From c15884ca68cc761bb4451a3166e7306395cea345 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Tue, 2 Oct 2012 00:34:28 +1300 Subject: [PATCH 0001/2699] start with some boilerplate --- ceph-proxy/config.yaml | 9 +++++++++ ceph-proxy/copyright | 4 ++++ ceph-proxy/metadata.yaml | 4 ++++ 3 files changed, 17 insertions(+) create mode 100644 ceph-proxy/config.yaml create mode 100644 ceph-proxy/copyright create mode 100644 ceph-proxy/metadata.yaml diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml new file mode 100644 index 00000000..5107e474 --- /dev/null +++ b/ceph-proxy/config.yaml @@ -0,0 +1,9 @@ +options: + fsid: + type: string + description: | + fsid of our cluster + osd-devices: + default: ["/dev/sdb", "/dev/sdc", "/dev/sdd", "/dev/sde"] + description: | + the devices to format and set up as osd volumes diff --git a/ceph-proxy/copyright b/ceph-proxy/copyright new file mode 100644 index 00000000..44e9574f --- /dev/null +++ b/ceph-proxy/copyright @@ -0,0 +1,4 @@ +Copyright 2012 Canonical Ltd. + +Authors: + Paul Collins diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml new file mode 100644 index 00000000..dbc974d6 --- /dev/null +++ b/ceph-proxy/metadata.yaml @@ -0,0 +1,4 @@ +name: ceph-brolin +summary: distributed storage +description: | + This charm deploys Ceph. From 5d7fd3bc3e073504c745e17249b3d8438aed1716 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Tue, 2 Oct 2012 00:34:28 +1300 Subject: [PATCH 0002/2699] start with some boilerplate --- ceph-mon/config.yaml | 9 +++++++++ ceph-mon/copyright | 4 ++++ ceph-mon/metadata.yaml | 4 ++++ 3 files changed, 17 insertions(+) create mode 100644 ceph-mon/config.yaml create mode 100644 ceph-mon/copyright create mode 100644 ceph-mon/metadata.yaml diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml new file mode 100644 index 00000000..5107e474 --- /dev/null +++ b/ceph-mon/config.yaml @@ -0,0 +1,9 @@ +options: + fsid: + type: string + description: | + fsid of our cluster + osd-devices: + default: ["/dev/sdb", "/dev/sdc", "/dev/sdd", "/dev/sde"] + description: | + the devices to format and set up as osd volumes diff --git a/ceph-mon/copyright b/ceph-mon/copyright new file mode 100644 index 00000000..44e9574f --- /dev/null +++ b/ceph-mon/copyright @@ -0,0 +1,4 @@ +Copyright 2012 Canonical Ltd. + +Authors: + Paul Collins diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml new file mode 100644 index 00000000..dbc974d6 --- /dev/null +++ b/ceph-mon/metadata.yaml @@ -0,0 +1,4 @@ +name: ceph-brolin +summary: distributed storage +description: | + This charm deploys Ceph. From aaf17b1dbb35efb84b147e13f55b7d3d44ddf898 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Tue, 2 Oct 2012 22:25:11 +1300 Subject: [PATCH 0003/2699] add README and some prototype hooks --- ceph-proxy/README | 18 ++++++++++++++++++ ceph-proxy/config.yaml | 9 ++++++++- ceph-proxy/copyright | 1 + ceph-proxy/hooks/install | 19 +++++++++++++++++++ ceph-proxy/hooks/mon-relation | 5 +++++ ceph-proxy/hooks/mon-relation-changed | 1 + ceph-proxy/hooks/mon-relation-departed | 1 + ceph-proxy/hooks/mon-relation-joined | 1 + ceph-proxy/hooks/start | 13 +++++++++++++ ceph-proxy/metadata.yaml | 3 +++ 10 files changed, 70 insertions(+), 1 deletion(-) create mode 100644 ceph-proxy/README create mode 100755 ceph-proxy/hooks/install create mode 100755 ceph-proxy/hooks/mon-relation create mode 120000 ceph-proxy/hooks/mon-relation-changed create mode 120000 ceph-proxy/hooks/mon-relation-departed create mode 120000 ceph-proxy/hooks/mon-relation-joined create mode 100644 ceph-proxy/hooks/start diff --git a/ceph-proxy/README b/ceph-proxy/README new file mode 100644 index 00000000..0b56260b --- /dev/null +++ b/ceph-proxy/README @@ -0,0 +1,18 @@ +This charm deploys a Ceph cluster. + +It uses the new-style Ceph deployment as reverse-engineered from the +Chef cookbook at https://github.com/ceph/ceph-cookbooks + +This charm is currently deliberately inflexible and potentially +destructive. It is designed to deploy on exactly three machines. +Each machine will run mon and osd. + +The osds use so-called "OSD hotplugging". ceph-disk-prepare is used +to create the filesystems with a special GPT partition type. udev is +set up to mount such filesystems and start the osd daemons as their +storage becomes visible to the system (or after "udevadm trigger"). + +The Chef cookbook above performs some extra steps to generate an OSD +bootstrapping key and propagate it to the other nodes in the cluster. +Since all our OSDs run on nodes that also run mon, we don't need this +and did not implement it. diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 5107e474..0fca4bc8 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -4,6 +4,13 @@ options: description: | fsid of our cluster osd-devices: - default: ["/dev/sdb", "/dev/sdc", "/dev/sdd", "/dev/sde"] + type: string + default: /dev/sdb /dev/sdc /dev/sdd /dev/sde description: | the devices to format and set up as osd volumes + monitor-secret: + type: string + description: | + this value will become the "mon." key + to generate a suitable value, use + ceph-authtool /dev/stdout --name=mon. --gen-key diff --git a/ceph-proxy/copyright b/ceph-proxy/copyright index 44e9574f..8b380c23 100644 --- a/ceph-proxy/copyright +++ b/ceph-proxy/copyright @@ -2,3 +2,4 @@ Copyright 2012 Canonical Ltd. Authors: Paul Collins + James Page diff --git a/ceph-proxy/hooks/install b/ceph-proxy/hooks/install new file mode 100755 index 00000000..b26b6872 --- /dev/null +++ b/ceph-proxy/hooks/install @@ -0,0 +1,19 @@ +#!/bin/bash + +set -e +set -u + +# set up ceph package source +# XXX make this a charm config option +cat >/etc/apt/sources.list.d/ceph-brolin.list < Date: Tue, 2 Oct 2012 22:25:11 +1300 Subject: [PATCH 0004/2699] add README and some prototype hooks --- ceph-mon/README | 18 ++++++++++++++++++ ceph-mon/config.yaml | 9 ++++++++- ceph-mon/copyright | 1 + ceph-mon/hooks/install | 19 +++++++++++++++++++ ceph-mon/hooks/mon-relation | 5 +++++ ceph-mon/hooks/mon-relation-changed | 1 + ceph-mon/hooks/mon-relation-departed | 1 + ceph-mon/hooks/mon-relation-joined | 1 + ceph-mon/hooks/start | 13 +++++++++++++ ceph-mon/metadata.yaml | 3 +++ 10 files changed, 70 insertions(+), 1 deletion(-) create mode 100644 ceph-mon/README create mode 100755 ceph-mon/hooks/install create mode 100755 ceph-mon/hooks/mon-relation create mode 120000 ceph-mon/hooks/mon-relation-changed create mode 120000 ceph-mon/hooks/mon-relation-departed create mode 120000 ceph-mon/hooks/mon-relation-joined create mode 100644 ceph-mon/hooks/start diff --git a/ceph-mon/README b/ceph-mon/README new file mode 100644 index 00000000..0b56260b --- /dev/null +++ b/ceph-mon/README @@ -0,0 +1,18 @@ +This charm deploys a Ceph cluster. + +It uses the new-style Ceph deployment as reverse-engineered from the +Chef cookbook at https://github.com/ceph/ceph-cookbooks + +This charm is currently deliberately inflexible and potentially +destructive. It is designed to deploy on exactly three machines. +Each machine will run mon and osd. + +The osds use so-called "OSD hotplugging". ceph-disk-prepare is used +to create the filesystems with a special GPT partition type. udev is +set up to mount such filesystems and start the osd daemons as their +storage becomes visible to the system (or after "udevadm trigger"). + +The Chef cookbook above performs some extra steps to generate an OSD +bootstrapping key and propagate it to the other nodes in the cluster. +Since all our OSDs run on nodes that also run mon, we don't need this +and did not implement it. diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 5107e474..0fca4bc8 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -4,6 +4,13 @@ options: description: | fsid of our cluster osd-devices: - default: ["/dev/sdb", "/dev/sdc", "/dev/sdd", "/dev/sde"] + type: string + default: /dev/sdb /dev/sdc /dev/sdd /dev/sde description: | the devices to format and set up as osd volumes + monitor-secret: + type: string + description: | + this value will become the "mon." key + to generate a suitable value, use + ceph-authtool /dev/stdout --name=mon. --gen-key diff --git a/ceph-mon/copyright b/ceph-mon/copyright index 44e9574f..8b380c23 100644 --- a/ceph-mon/copyright +++ b/ceph-mon/copyright @@ -2,3 +2,4 @@ Copyright 2012 Canonical Ltd. Authors: Paul Collins + James Page diff --git a/ceph-mon/hooks/install b/ceph-mon/hooks/install new file mode 100755 index 00000000..b26b6872 --- /dev/null +++ b/ceph-mon/hooks/install @@ -0,0 +1,19 @@ +#!/bin/bash + +set -e +set -u + +# set up ceph package source +# XXX make this a charm config option +cat >/etc/apt/sources.list.d/ceph-brolin.list < Date: Tue, 2 Oct 2012 22:38:06 +1300 Subject: [PATCH 0005/2699] throw away prototype hooks, add hooks.py (stubs only atm) --- ceph-proxy/hooks/hooks.py | 39 ++++++++++++++++++++++++++ ceph-proxy/hooks/install | 20 +------------ ceph-proxy/hooks/mon-relation | 5 ---- ceph-proxy/hooks/mon-relation-changed | 2 +- ceph-proxy/hooks/mon-relation-departed | 2 +- ceph-proxy/hooks/mon-relation-joined | 2 +- 6 files changed, 43 insertions(+), 27 deletions(-) create mode 100755 ceph-proxy/hooks/hooks.py mode change 100755 => 120000 ceph-proxy/hooks/install delete mode 100755 ceph-proxy/hooks/mon-relation diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py new file mode 100755 index 00000000..b73bbdb2 --- /dev/null +++ b/ceph-proxy/hooks/hooks.py @@ -0,0 +1,39 @@ +#!/usr/bin/python + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Paul Collins +# + +import os +import subprocess +import sys + +def install(): + print "install" + +def config_changed(): + print "config_changed" + +def mon_relation(): + print "mon_relation" + +hooks = { + 'mon-relation-joined': mon_relation, + 'mon-relation-changed': mon_relation, + 'mon-relation-departed': mon_relation, + 'install': install, + 'config-changed': config_changed, +} + +hook = os.path.basename(sys.argv[0]) + +try: + hooks[hook]() +except: + subprocess.call(['juju-log', '-l', 'INFO', + "This charm doesn't know how to handle '%s'." % hook]) + +sys.exit(0) diff --git a/ceph-proxy/hooks/install b/ceph-proxy/hooks/install deleted file mode 100755 index b26b6872..00000000 --- a/ceph-proxy/hooks/install +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -set -e -set -u - -# set up ceph package source -# XXX make this a charm config option -cat >/etc/apt/sources.list.d/ceph-brolin.list < Date: Tue, 2 Oct 2012 22:38:06 +1300 Subject: [PATCH 0006/2699] throw away prototype hooks, add hooks.py (stubs only atm) --- ceph-mon/hooks/hooks.py | 39 ++++++++++++++++++++++++++++ ceph-mon/hooks/install | 20 +------------- ceph-mon/hooks/mon-relation | 5 ---- ceph-mon/hooks/mon-relation-changed | 2 +- ceph-mon/hooks/mon-relation-departed | 2 +- ceph-mon/hooks/mon-relation-joined | 2 +- 6 files changed, 43 insertions(+), 27 deletions(-) create mode 100755 ceph-mon/hooks/hooks.py mode change 100755 => 120000 ceph-mon/hooks/install delete mode 100755 ceph-mon/hooks/mon-relation diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py new file mode 100755 index 00000000..b73bbdb2 --- /dev/null +++ b/ceph-mon/hooks/hooks.py @@ -0,0 +1,39 @@ +#!/usr/bin/python + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Paul Collins +# + +import os +import subprocess +import sys + +def install(): + print "install" + +def config_changed(): + print "config_changed" + +def mon_relation(): + print "mon_relation" + +hooks = { + 'mon-relation-joined': mon_relation, + 'mon-relation-changed': mon_relation, + 'mon-relation-departed': mon_relation, + 'install': install, + 'config-changed': config_changed, +} + +hook = os.path.basename(sys.argv[0]) + +try: + hooks[hook]() +except: + subprocess.call(['juju-log', '-l', 'INFO', + "This charm doesn't know how to handle '%s'." % hook]) + +sys.exit(0) diff --git a/ceph-mon/hooks/install b/ceph-mon/hooks/install deleted file mode 100755 index b26b6872..00000000 --- a/ceph-mon/hooks/install +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -set -e -set -u - -# set up ceph package source -# XXX make this a charm config option -cat >/etc/apt/sources.list.d/ceph-brolin.list < Date: Tue, 2 Oct 2012 23:52:44 +1300 Subject: [PATCH 0007/2699] hackety hack --- ceph-proxy/config.yaml | 3 + ceph-proxy/hooks/ceph.py | 26 +++++++ ceph-proxy/hooks/config-changed | 1 + ceph-proxy/hooks/hooks.py | 29 +++++-- ceph-proxy/hooks/start | 13 ---- ceph-proxy/hooks/utils.py | 130 ++++++++++++++++++++++++++++++++ ceph-proxy/revision | 1 + ceph-proxy/templates/ceph.conf | 15 ++++ 8 files changed, 200 insertions(+), 18 deletions(-) create mode 100644 ceph-proxy/hooks/ceph.py create mode 120000 ceph-proxy/hooks/config-changed delete mode 100644 ceph-proxy/hooks/start create mode 100644 ceph-proxy/hooks/utils.py create mode 100644 ceph-proxy/revision create mode 100644 ceph-proxy/templates/ceph.conf diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 0fca4bc8..192c1b42 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -14,3 +14,6 @@ options: this value will become the "mon." key to generate a suitable value, use ceph-authtool /dev/stdout --name=mon. --gen-key + source: + type: string + default: ppa:ceph-ubuntu/dev diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py new file mode 100644 index 00000000..2049a07b --- /dev/null +++ b/ceph-proxy/hooks/ceph.py @@ -0,0 +1,26 @@ + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# + +import subprocess +import json +import os + +QUORUM = [ 'leader', 'peon' ] + +def is_quorum(): + cmd = [ + "ceph", + "--admin-daemon", + "/var/run/ceph/ceph-mon.%s.asok" % os.uname()[1], + "mon_status" + ] + result = json.loads(subprocess.check_output(cmd)) + if result['state'] in QUORUM: + return True + else: + return False diff --git a/ceph-proxy/hooks/config-changed b/ceph-proxy/hooks/config-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-proxy/hooks/config-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index b73bbdb2..7fad4db9 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -11,11 +11,31 @@ import subprocess import sys +import ceph +import utils + def install(): - print "install" + utils.juju_log('INFO', 'Begin install hook.') + utils.configure_source() + utils.install('ceph') + + # TODO: Install the upstart scripts. + utils.juju_log('INFO', 'End install hook.') def config_changed(): - print "config_changed" + utils.juju_log('INFO', 'Begin config-changed hook.') + fsid = utils.config_get('fsid') + if fsid == "": + utils.juju_log('CRITICAL', 'No fsid supplied, cannot proceed.') + sys.exit(1) + + monitor_secret = utils.config_get('monitor-secret') + if monitor_secret == "": + utils.juju_log('CRITICAL', 'No monitor-secret supplied, cannot proceed.') + sys.exit(1) + + osd_devices = utils.config_get('osd-devices') + utils.juju_log('INFO', 'End config-changed hook.') def mon_relation(): print "mon_relation" @@ -32,8 +52,7 @@ def mon_relation(): try: hooks[hook]() -except: - subprocess.call(['juju-log', '-l', 'INFO', - "This charm doesn't know how to handle '%s'." % hook]) +except KeyError: + utils.juju_log('INFO', "This charm doesn't know how to handle '%s'." % hook) sys.exit(0) diff --git a/ceph-proxy/hooks/start b/ceph-proxy/hooks/start deleted file mode 100644 index f27ac7be..00000000 --- a/ceph-proxy/hooks/start +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh - -set -e -set -u - - -for disk in $(config-get osd-devices); do - ceph-disk-prepare $disk -done - -udevadm trigger --subsystem-match=block --action=add - -exit 0 diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py new file mode 100644 index 00000000..9b08a90d --- /dev/null +++ b/ceph-proxy/hooks/utils.py @@ -0,0 +1,130 @@ + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# +# Taken from lp:~james-page/charms/precise/ganglia/python-refactor +# + +import subprocess +import os +import sys + +def install (*pkgs): + cmd = [ + "apt-get", + "-y", + "install" + ] + for pkg in pkgs: + cmd.append(pkg) + subprocess.check_call(cmd) + +TEMPLATES_DIR="templates" + +try: + import jinja2 +except ImportError: + install('python-jinja2') + import jinja2 + +def render_template (template_name, context, template_dir=TEMPLATES_DIR): + templates = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir)) + template = templates.get_template(template_name) + return template.render(context) + +def configure_source(): + source = config_get("source") + if (source.startswith("ppa:") or + source.startswith("cloud:") or + source.startswith("http:")): + cmd = [ + "add-apt-repository", + source + ] + subprocess.check_call(cmd) + if source.startswith("http:"): + key = config_get("key") + cmd = [ + "apt-key", + "import", + key + ] + subprocess.check_call(cmd) + cmd = [ + "apt-get", + "update" + ] + subprocess.check_call(cmd) + +# Protocols +TCP="TCP" +UDP="UDP" + +def expose(port, protocol="TCP"): + cmd = [ + "open-port", + "%d/%s" % (port,protocol) + ] + subprocess.check_call(cmd) + +def juju_log(message,severity="INFO"): + cmd = [ + "juju-log", + "--log-level", severity, + message + ] + subprocess.check_call(cmd) + +def relation_ids(relation): + cmd = [ + "relation-ids", + relation + ] + return subprocess.check_output(cmd).split() + +def relation_list(rid): + cmd = [ + "relation-list", + "-r", rid, + ] + return subprocess.check_output(cmd).split() + +def relation_get(attribute,unit=None,rid=None): + cmd = [ + "relation-get", + ] + if rid: + cmd.append("-r") + cmd.append(rid) + cmd.append(attribute) + if unit: + cmd.append(unit) + return subprocess.check_output(cmd).strip() + +def relation_set(*kwargs): + cmd = [ + "relation-set" + ] + for k, v in kwargs.items(): + cmd.append("%s=%s" % (k,v)) + subprocess.check_call(cmd) + +def unit_get(attribute): + cmd = [ + "unit-get", + attribute + ] + return subprocess.check_output(cmd).strip() + +def config_get(attribute): + cmd = [ + "config-get", + attribute + ] + return subprocess.check_output(cmd).strip() + +def juju_log(level, message): + subprocess.call(['juju-log', '-l', level, message]) diff --git a/ceph-proxy/revision b/ceph-proxy/revision new file mode 100644 index 00000000..b1bd38b6 --- /dev/null +++ b/ceph-proxy/revision @@ -0,0 +1 @@ +13 diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf new file mode 100644 index 00000000..d3bf0060 --- /dev/null +++ b/ceph-proxy/templates/ceph.conf @@ -0,0 +1,15 @@ +[global] + auth supported = none + keyring = /etc/ceph/$cluster.$name.keyring + mon host = + +[mon] + keyring = /var/lib/ceph/mon/$cluster-$id/keyring + +[mds] + keyring = /var/lib/ceph/mds/$cluster-$id/keyring + +[osd] + keyring = /var/lib/ceph/osd/$cluster-$id/keyring + osd journal size = 1000 + filestore xattr use omap = true From bb7fc923e40accc61e7dccfaeea38079e0c67671 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Tue, 2 Oct 2012 23:52:44 +1300 Subject: [PATCH 0008/2699] hackety hack --- ceph-mon/config.yaml | 3 + ceph-mon/hooks/ceph.py | 26 +++++++ ceph-mon/hooks/config-changed | 1 + ceph-mon/hooks/hooks.py | 29 ++++++-- ceph-mon/hooks/start | 13 ---- ceph-mon/hooks/utils.py | 130 ++++++++++++++++++++++++++++++++++ ceph-mon/revision | 1 + ceph-mon/templates/ceph.conf | 15 ++++ 8 files changed, 200 insertions(+), 18 deletions(-) create mode 100644 ceph-mon/hooks/ceph.py create mode 120000 ceph-mon/hooks/config-changed delete mode 100644 ceph-mon/hooks/start create mode 100644 ceph-mon/hooks/utils.py create mode 100644 ceph-mon/revision create mode 100644 ceph-mon/templates/ceph.conf diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 0fca4bc8..192c1b42 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -14,3 +14,6 @@ options: this value will become the "mon." key to generate a suitable value, use ceph-authtool /dev/stdout --name=mon. --gen-key + source: + type: string + default: ppa:ceph-ubuntu/dev diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py new file mode 100644 index 00000000..2049a07b --- /dev/null +++ b/ceph-mon/hooks/ceph.py @@ -0,0 +1,26 @@ + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# + +import subprocess +import json +import os + +QUORUM = [ 'leader', 'peon' ] + +def is_quorum(): + cmd = [ + "ceph", + "--admin-daemon", + "/var/run/ceph/ceph-mon.%s.asok" % os.uname()[1], + "mon_status" + ] + result = json.loads(subprocess.check_output(cmd)) + if result['state'] in QUORUM: + return True + else: + return False diff --git a/ceph-mon/hooks/config-changed b/ceph-mon/hooks/config-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-mon/hooks/config-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index b73bbdb2..7fad4db9 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -11,11 +11,31 @@ import subprocess import sys +import ceph +import utils + def install(): - print "install" + utils.juju_log('INFO', 'Begin install hook.') + utils.configure_source() + utils.install('ceph') + + # TODO: Install the upstart scripts. + utils.juju_log('INFO', 'End install hook.') def config_changed(): - print "config_changed" + utils.juju_log('INFO', 'Begin config-changed hook.') + fsid = utils.config_get('fsid') + if fsid == "": + utils.juju_log('CRITICAL', 'No fsid supplied, cannot proceed.') + sys.exit(1) + + monitor_secret = utils.config_get('monitor-secret') + if monitor_secret == "": + utils.juju_log('CRITICAL', 'No monitor-secret supplied, cannot proceed.') + sys.exit(1) + + osd_devices = utils.config_get('osd-devices') + utils.juju_log('INFO', 'End config-changed hook.') def mon_relation(): print "mon_relation" @@ -32,8 +52,7 @@ def mon_relation(): try: hooks[hook]() -except: - subprocess.call(['juju-log', '-l', 'INFO', - "This charm doesn't know how to handle '%s'." % hook]) +except KeyError: + utils.juju_log('INFO', "This charm doesn't know how to handle '%s'." % hook) sys.exit(0) diff --git a/ceph-mon/hooks/start b/ceph-mon/hooks/start deleted file mode 100644 index f27ac7be..00000000 --- a/ceph-mon/hooks/start +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh - -set -e -set -u - - -for disk in $(config-get osd-devices); do - ceph-disk-prepare $disk -done - -udevadm trigger --subsystem-match=block --action=add - -exit 0 diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py new file mode 100644 index 00000000..9b08a90d --- /dev/null +++ b/ceph-mon/hooks/utils.py @@ -0,0 +1,130 @@ + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# +# Taken from lp:~james-page/charms/precise/ganglia/python-refactor +# + +import subprocess +import os +import sys + +def install (*pkgs): + cmd = [ + "apt-get", + "-y", + "install" + ] + for pkg in pkgs: + cmd.append(pkg) + subprocess.check_call(cmd) + +TEMPLATES_DIR="templates" + +try: + import jinja2 +except ImportError: + install('python-jinja2') + import jinja2 + +def render_template (template_name, context, template_dir=TEMPLATES_DIR): + templates = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir)) + template = templates.get_template(template_name) + return template.render(context) + +def configure_source(): + source = config_get("source") + if (source.startswith("ppa:") or + source.startswith("cloud:") or + source.startswith("http:")): + cmd = [ + "add-apt-repository", + source + ] + subprocess.check_call(cmd) + if source.startswith("http:"): + key = config_get("key") + cmd = [ + "apt-key", + "import", + key + ] + subprocess.check_call(cmd) + cmd = [ + "apt-get", + "update" + ] + subprocess.check_call(cmd) + +# Protocols +TCP="TCP" +UDP="UDP" + +def expose(port, protocol="TCP"): + cmd = [ + "open-port", + "%d/%s" % (port,protocol) + ] + subprocess.check_call(cmd) + +def juju_log(message,severity="INFO"): + cmd = [ + "juju-log", + "--log-level", severity, + message + ] + subprocess.check_call(cmd) + +def relation_ids(relation): + cmd = [ + "relation-ids", + relation + ] + return subprocess.check_output(cmd).split() + +def relation_list(rid): + cmd = [ + "relation-list", + "-r", rid, + ] + return subprocess.check_output(cmd).split() + +def relation_get(attribute,unit=None,rid=None): + cmd = [ + "relation-get", + ] + if rid: + cmd.append("-r") + cmd.append(rid) + cmd.append(attribute) + if unit: + cmd.append(unit) + return subprocess.check_output(cmd).strip() + +def relation_set(*kwargs): + cmd = [ + "relation-set" + ] + for k, v in kwargs.items(): + cmd.append("%s=%s" % (k,v)) + subprocess.check_call(cmd) + +def unit_get(attribute): + cmd = [ + "unit-get", + attribute + ] + return subprocess.check_output(cmd).strip() + +def config_get(attribute): + cmd = [ + "config-get", + attribute + ] + return subprocess.check_output(cmd).strip() + +def juju_log(level, message): + subprocess.call(['juju-log', '-l', level, message]) diff --git a/ceph-mon/revision b/ceph-mon/revision new file mode 100644 index 00000000..b1bd38b6 --- /dev/null +++ b/ceph-mon/revision @@ -0,0 +1 @@ +13 diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf new file mode 100644 index 00000000..d3bf0060 --- /dev/null +++ b/ceph-mon/templates/ceph.conf @@ -0,0 +1,15 @@ +[global] + auth supported = none + keyring = /etc/ceph/$cluster.$name.keyring + mon host = + +[mon] + keyring = /var/lib/ceph/mon/$cluster-$id/keyring + +[mds] + keyring = /var/lib/ceph/mds/$cluster-$id/keyring + +[osd] + keyring = /var/lib/ceph/osd/$cluster-$id/keyring + osd journal size = 1000 + filestore xattr use omap = true From ed033fbecd78316568b06ad6f5ecd42b233973a6 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 02:37:42 +1300 Subject: [PATCH 0009/2699] add get_mon_hosts() and blort it from config-changed --- ceph-proxy/hooks/hooks.py | 13 +++++++++++++ ceph-proxy/revision | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 7fad4db9..434c0f90 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -24,6 +24,9 @@ def install(): def config_changed(): utils.juju_log('INFO', 'Begin config-changed hook.') + + utils.juju_log('INFO', 'Monitor hosts are ' + repr(get_mon_hosts())) + fsid = utils.config_get('fsid') if fsid == "": utils.juju_log('CRITICAL', 'No fsid supplied, cannot proceed.') @@ -37,6 +40,16 @@ def config_changed(): osd_devices = utils.config_get('osd-devices') utils.juju_log('INFO', 'End config-changed hook.') +def get_mon_hosts(): + hosts = [] + hosts.append(utils.unit_get('private-address')) + + for relid in utils.relation_ids("mon"): + for unit in utils.relation_list(relid): + hosts.append(utils.relation_get('private-address', unit, relid)) + + return hosts + def mon_relation(): print "mon_relation" diff --git a/ceph-proxy/revision b/ceph-proxy/revision index b1bd38b6..98d9bcb7 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -13 +17 From ed1c35d6cfb8ba75d0a563dae9b2032febd32c53 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 02:37:42 +1300 Subject: [PATCH 0010/2699] add get_mon_hosts() and blort it from config-changed --- ceph-mon/hooks/hooks.py | 13 +++++++++++++ ceph-mon/revision | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 7fad4db9..434c0f90 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -24,6 +24,9 @@ def install(): def config_changed(): utils.juju_log('INFO', 'Begin config-changed hook.') + + utils.juju_log('INFO', 'Monitor hosts are ' + repr(get_mon_hosts())) + fsid = utils.config_get('fsid') if fsid == "": utils.juju_log('CRITICAL', 'No fsid supplied, cannot proceed.') @@ -37,6 +40,16 @@ def config_changed(): osd_devices = utils.config_get('osd-devices') utils.juju_log('INFO', 'End config-changed hook.') +def get_mon_hosts(): + hosts = [] + hosts.append(utils.unit_get('private-address')) + + for relid in utils.relation_ids("mon"): + for unit in utils.relation_list(relid): + hosts.append(utils.relation_get('private-address', unit, relid)) + + return hosts + def mon_relation(): print "mon_relation" diff --git a/ceph-mon/revision b/ceph-mon/revision index b1bd38b6..98d9bcb7 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -13 +17 From be8e1cf962af43c6a276870bf203b5e5e1687af8 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 02:43:41 +1300 Subject: [PATCH 0011/2699] coerce private-address to an address with the magic of gethostbyname private-address may or may not be an address - it depends on the juju provider in use --- ceph-proxy/hooks/hooks.py | 6 ++++-- ceph-proxy/revision | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 434c0f90..1fe27ce4 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -9,6 +9,7 @@ import os import subprocess +import socket import sys import ceph @@ -42,11 +43,12 @@ def config_changed(): def get_mon_hosts(): hosts = [] - hosts.append(utils.unit_get('private-address')) + hosts.append(socket.gethostbyname(utils.unit_get('private-address'))) for relid in utils.relation_ids("mon"): for unit in utils.relation_list(relid): - hosts.append(utils.relation_get('private-address', unit, relid)) + hosts.append(socket.gethostbyname( + utils.relation_get('private-address', unit, relid))) return hosts diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 98d9bcb7..3c032078 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -17 +18 From caaab050beb13117e0f7912af47cb9eae2702400 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 02:43:41 +1300 Subject: [PATCH 0012/2699] coerce private-address to an address with the magic of gethostbyname private-address may or may not be an address - it depends on the juju provider in use --- ceph-mon/hooks/hooks.py | 6 ++++-- ceph-mon/revision | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 434c0f90..1fe27ce4 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -9,6 +9,7 @@ import os import subprocess +import socket import sys import ceph @@ -42,11 +43,12 @@ def config_changed(): def get_mon_hosts(): hosts = [] - hosts.append(utils.unit_get('private-address')) + hosts.append(socket.gethostbyname(utils.unit_get('private-address'))) for relid in utils.relation_ids("mon"): for unit in utils.relation_list(relid): - hosts.append(utils.relation_get('private-address', unit, relid)) + hosts.append(socket.gethostbyname( + utils.relation_get('private-address', unit, relid))) return hosts diff --git a/ceph-mon/revision b/ceph-mon/revision index 98d9bcb7..3c032078 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -17 +18 From fdf1659960fe4d0c1fadab54b74462419b4d837f Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 03:49:19 +1300 Subject: [PATCH 0013/2699] write out a plausible ceph.conf from config-changed and mon-relation-* add an "extra" newline to the template so that the generated ceph.conf ends with a newline --- ceph-proxy/hooks/hooks.py | 15 ++++++++++++++- ceph-proxy/revision | 2 +- ceph-proxy/templates/ceph.conf | 3 ++- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 1fe27ce4..f3b10eed 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -23,6 +23,14 @@ def install(): # TODO: Install the upstart scripts. utils.juju_log('INFO', 'End install hook.') +def emit_cephconf(): + cephcontext = { + 'mon_hosts': ' '.join(get_mon_hosts()) + } + + with open('/etc/ceph/ceph.conf', 'w') as cephconf: + cephconf.write(utils.render_template('ceph.conf', cephcontext)) + def config_changed(): utils.juju_log('INFO', 'Begin config-changed hook.') @@ -39,6 +47,9 @@ def config_changed(): sys.exit(1) osd_devices = utils.config_get('osd-devices') + + emit_cephconf() + utils.juju_log('INFO', 'End config-changed hook.') def get_mon_hosts(): @@ -53,7 +64,9 @@ def get_mon_hosts(): return hosts def mon_relation(): - print "mon_relation" + utils.juju_log('INFO', 'Begin mon-relation hook.') + emit_cephconf() + utils.juju_log('INFO', 'End mon-relation hook.') hooks = { 'mon-relation-joined': mon_relation, diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 3c032078..7273c0fa 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -18 +25 diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index d3bf0060..ca5bc8f9 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -1,7 +1,7 @@ [global] auth supported = none keyring = /etc/ceph/$cluster.$name.keyring - mon host = + mon host = {{ mon_hosts }} [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring @@ -13,3 +13,4 @@ keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = 1000 filestore xattr use omap = true + From b440b570b9384aab8e8a593bfe3300b108b6de56 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 03:49:19 +1300 Subject: [PATCH 0014/2699] write out a plausible ceph.conf from config-changed and mon-relation-* add an "extra" newline to the template so that the generated ceph.conf ends with a newline --- ceph-mon/hooks/hooks.py | 15 ++++++++++++++- ceph-mon/revision | 2 +- ceph-mon/templates/ceph.conf | 3 ++- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 1fe27ce4..f3b10eed 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -23,6 +23,14 @@ def install(): # TODO: Install the upstart scripts. utils.juju_log('INFO', 'End install hook.') +def emit_cephconf(): + cephcontext = { + 'mon_hosts': ' '.join(get_mon_hosts()) + } + + with open('/etc/ceph/ceph.conf', 'w') as cephconf: + cephconf.write(utils.render_template('ceph.conf', cephcontext)) + def config_changed(): utils.juju_log('INFO', 'Begin config-changed hook.') @@ -39,6 +47,9 @@ def config_changed(): sys.exit(1) osd_devices = utils.config_get('osd-devices') + + emit_cephconf() + utils.juju_log('INFO', 'End config-changed hook.') def get_mon_hosts(): @@ -53,7 +64,9 @@ def get_mon_hosts(): return hosts def mon_relation(): - print "mon_relation" + utils.juju_log('INFO', 'Begin mon-relation hook.') + emit_cephconf() + utils.juju_log('INFO', 'End mon-relation hook.') hooks = { 'mon-relation-joined': mon_relation, diff --git a/ceph-mon/revision b/ceph-mon/revision index 3c032078..7273c0fa 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -18 +25 diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index d3bf0060..ca5bc8f9 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -1,7 +1,7 @@ [global] auth supported = none keyring = /etc/ceph/$cluster.$name.keyring - mon host = + mon host = {{ mon_hosts }} [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring @@ -13,3 +13,4 @@ keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = 1000 filestore xattr use omap = true + From bdd357fe17b4c813a169b06e9413bf157ec087cf Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 04:12:49 +1300 Subject: [PATCH 0015/2699] append port to the monitor addresses, and rewrite ceph.conf from the upgrade-charm hook --- ceph-proxy/hooks/hooks.py | 21 +++++++++++++++------ ceph-proxy/revision | 2 +- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index f3b10eed..8ba49f3e 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -54,12 +54,15 @@ def config_changed(): def get_mon_hosts(): hosts = [] - hosts.append(socket.gethostbyname(utils.unit_get('private-address'))) + hosts.append(socket.gethostbyname(utils.unit_get('private-address')) + + ':6789') for relid in utils.relation_ids("mon"): for unit in utils.relation_list(relid): - hosts.append(socket.gethostbyname( - utils.relation_get('private-address', unit, relid))) + hosts.append( + socket.gethostbyname(utils.relation_get('private-address', + unit, relid)) + + ':6789') return hosts @@ -68,12 +71,18 @@ def mon_relation(): emit_cephconf() utils.juju_log('INFO', 'End mon-relation hook.') +def upgrade_charm(): + utils.juju_log('INFO', 'Begin upgrade-charm hook.') + emit_cephconf() + utils.juju_log('INFO', 'End upgrade-charm hook.') + hooks = { - 'mon-relation-joined': mon_relation, + 'config-changed': config_changed, + 'install': install, 'mon-relation-changed': mon_relation, 'mon-relation-departed': mon_relation, - 'install': install, - 'config-changed': config_changed, + 'mon-relation-joined': mon_relation, + 'upgrade-charm': upgrade_charm, } hook = os.path.basename(sys.argv[0]) diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 7273c0fa..bb95160c 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -25 +33 From 3b90b6caf10bfebcac55188bef760accf461f0fc Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 04:12:49 +1300 Subject: [PATCH 0016/2699] append port to the monitor addresses, and rewrite ceph.conf from the upgrade-charm hook --- ceph-mon/hooks/hooks.py | 21 +++++++++++++++------ ceph-mon/revision | 2 +- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index f3b10eed..8ba49f3e 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -54,12 +54,15 @@ def config_changed(): def get_mon_hosts(): hosts = [] - hosts.append(socket.gethostbyname(utils.unit_get('private-address'))) + hosts.append(socket.gethostbyname(utils.unit_get('private-address')) + + ':6789') for relid in utils.relation_ids("mon"): for unit in utils.relation_list(relid): - hosts.append(socket.gethostbyname( - utils.relation_get('private-address', unit, relid))) + hosts.append( + socket.gethostbyname(utils.relation_get('private-address', + unit, relid)) + + ':6789') return hosts @@ -68,12 +71,18 @@ def mon_relation(): emit_cephconf() utils.juju_log('INFO', 'End mon-relation hook.') +def upgrade_charm(): + utils.juju_log('INFO', 'Begin upgrade-charm hook.') + emit_cephconf() + utils.juju_log('INFO', 'End upgrade-charm hook.') + hooks = { - 'mon-relation-joined': mon_relation, + 'config-changed': config_changed, + 'install': install, 'mon-relation-changed': mon_relation, 'mon-relation-departed': mon_relation, - 'install': install, - 'config-changed': config_changed, + 'mon-relation-joined': mon_relation, + 'upgrade-charm': upgrade_charm, } hook = os.path.basename(sys.argv[0]) diff --git a/ceph-mon/revision b/ceph-mon/revision index 7273c0fa..bb95160c 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -25 +33 From 0b7d3837829bc35b1450b732d1018042f0f28d07 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 05:21:10 +1300 Subject: [PATCH 0017/2699] missing symlink to hooks.py --- ceph-proxy/hooks/upgrade-charm | 1 + 1 file changed, 1 insertion(+) create mode 120000 ceph-proxy/hooks/upgrade-charm diff --git a/ceph-proxy/hooks/upgrade-charm b/ceph-proxy/hooks/upgrade-charm new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-proxy/hooks/upgrade-charm @@ -0,0 +1 @@ +hooks.py \ No newline at end of file From 643acd9bf974784d2fba18a7a12d5c5be6ee8e79 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 05:21:10 +1300 Subject: [PATCH 0018/2699] missing symlink to hooks.py --- ceph-mon/hooks/upgrade-charm | 1 + 1 file changed, 1 insertion(+) create mode 120000 ceph-mon/hooks/upgrade-charm diff --git a/ceph-mon/hooks/upgrade-charm b/ceph-mon/hooks/upgrade-charm new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-mon/hooks/upgrade-charm @@ -0,0 +1 @@ +hooks.py \ No newline at end of file From 1587605da2516d51f10e5fce8951dd010d9b68d3 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 21:19:14 +1300 Subject: [PATCH 0019/2699] write fsid to ceph.conf --- ceph-proxy/hooks/hooks.py | 3 ++- ceph-proxy/revision | 2 +- ceph-proxy/templates/ceph.conf | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 8ba49f3e..3fe6fa03 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -25,7 +25,8 @@ def install(): def emit_cephconf(): cephcontext = { - 'mon_hosts': ' '.join(get_mon_hosts()) + 'mon_hosts': ' '.join(get_mon_hosts().sort()), + 'fsid': utils.config_get('fsid'), } with open('/etc/ceph/ceph.conf', 'w') as cephconf: diff --git a/ceph-proxy/revision b/ceph-proxy/revision index bb95160c..a7873645 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -33 +34 diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index ca5bc8f9..32103fb5 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -2,6 +2,7 @@ auth supported = none keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} + fsid = {{ fsid }} [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring From 086ad1d11e0bbd60e18bb3726b5ee9108224d261 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 21:19:14 +1300 Subject: [PATCH 0020/2699] write fsid to ceph.conf --- ceph-mon/hooks/hooks.py | 3 ++- ceph-mon/revision | 2 +- ceph-mon/templates/ceph.conf | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 8ba49f3e..3fe6fa03 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -25,7 +25,8 @@ def install(): def emit_cephconf(): cephcontext = { - 'mon_hosts': ' '.join(get_mon_hosts()) + 'mon_hosts': ' '.join(get_mon_hosts().sort()), + 'fsid': utils.config_get('fsid'), } with open('/etc/ceph/ceph.conf', 'w') as cephconf: diff --git a/ceph-mon/revision b/ceph-mon/revision index bb95160c..a7873645 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -33 +34 diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index ca5bc8f9..32103fb5 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -2,6 +2,7 @@ auth supported = none keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} + fsid = {{ fsid }} [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring From d8dccf598492a406f185f176143c096e7da0d4f7 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 21:19:53 +1300 Subject: [PATCH 0021/2699] import upstart scripts from argonaut --- .../files/upstart/ceph-create-keys.conf | 8 ++++ ceph-proxy/files/upstart/ceph-hotplug.conf | 11 ++++++ .../files/upstart/ceph-mon-all-starter.conf | 20 ++++++++++ ceph-proxy/files/upstart/ceph-mon-all.conf | 4 ++ ceph-proxy/files/upstart/ceph-mon.conf | 24 ++++++++++++ ceph-proxy/files/upstart/ceph-osd.conf | 37 +++++++++++++++++++ 6 files changed, 104 insertions(+) create mode 100644 ceph-proxy/files/upstart/ceph-create-keys.conf create mode 100644 ceph-proxy/files/upstart/ceph-hotplug.conf create mode 100644 ceph-proxy/files/upstart/ceph-mon-all-starter.conf create mode 100644 ceph-proxy/files/upstart/ceph-mon-all.conf create mode 100644 ceph-proxy/files/upstart/ceph-mon.conf create mode 100644 ceph-proxy/files/upstart/ceph-osd.conf diff --git a/ceph-proxy/files/upstart/ceph-create-keys.conf b/ceph-proxy/files/upstart/ceph-create-keys.conf new file mode 100644 index 00000000..6fb45818 --- /dev/null +++ b/ceph-proxy/files/upstart/ceph-create-keys.conf @@ -0,0 +1,8 @@ +description "Create Ceph client.admin key when possible" + +start on started ceph-mon +stop on runlevel [!2345] + +task + +exec /usr/sbin/ceph-create-keys --cluster="${cluster:-ceph}" -i "${id:-$(hostname)}" diff --git a/ceph-proxy/files/upstart/ceph-hotplug.conf b/ceph-proxy/files/upstart/ceph-hotplug.conf new file mode 100644 index 00000000..70204529 --- /dev/null +++ b/ceph-proxy/files/upstart/ceph-hotplug.conf @@ -0,0 +1,11 @@ +description "Ceph hotplug" + +start on block-device-added \ + DEVTYPE=partition \ + ID_PART_ENTRY_TYPE=4fbd7e29-9d25-41b8-afd0-062c0ceff05d +stop on runlevel [!2345] + +task +instance $DEVNAME + +exec /usr/sbin/ceph-disk-activate --mount -- "$DEVNAME" diff --git a/ceph-proxy/files/upstart/ceph-mon-all-starter.conf b/ceph-proxy/files/upstart/ceph-mon-all-starter.conf new file mode 100644 index 00000000..f7188cb7 --- /dev/null +++ b/ceph-proxy/files/upstart/ceph-mon-all-starter.conf @@ -0,0 +1,20 @@ +description "Ceph MON (start all instances)" + +start on starting ceph-mon-all +stop on runlevel [!2345] + +task + +script + set -e + # TODO what's the valid charset for cluster names and mon ids? + find /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[a-z0-9]+-[a-z0-9._-]+' -printf '%P\n' \ + | while read f; do + if [ -e "/var/lib/ceph/mon/$f/done" ]; then + cluster="${f%%-*}" + id="${f#*-}" + + initctl emit ceph-mon cluster="$cluster" id="$id" + fi + done +end script diff --git a/ceph-proxy/files/upstart/ceph-mon-all.conf b/ceph-proxy/files/upstart/ceph-mon-all.conf new file mode 100644 index 00000000..006f2f20 --- /dev/null +++ b/ceph-proxy/files/upstart/ceph-mon-all.conf @@ -0,0 +1,4 @@ +description "Ceph monitor (all instances)" + +start on (local-filesystems and net-device-up IFACE!=lo) +stop on runlevel [!2345] diff --git a/ceph-proxy/files/upstart/ceph-mon.conf b/ceph-proxy/files/upstart/ceph-mon.conf new file mode 100644 index 00000000..2cf7bfa5 --- /dev/null +++ b/ceph-proxy/files/upstart/ceph-mon.conf @@ -0,0 +1,24 @@ +description "Ceph MON" + +start on ceph-mon +stop on runlevel [!2345] or stopping ceph-mon-all + +respawn +respawn limit 5 30 + +pre-start script + set -e + test -x /usr/bin/ceph-mon || { stop; exit 0; } + test -d "/var/lib/ceph/mon/${cluster:-ceph}-$id" || { stop; exit 0; } + + install -d -m0755 /var/run/ceph +end script + +instance ${cluster:-ceph}/$id +export cluster +export id + +# this breaks oneiric +#usage "cluster = name of cluster (defaults to 'ceph'); id = monitor instance id" + +exec /usr/bin/ceph-mon --cluster="${cluster:-ceph}" -i "$id" -f diff --git a/ceph-proxy/files/upstart/ceph-osd.conf b/ceph-proxy/files/upstart/ceph-osd.conf new file mode 100644 index 00000000..119ad000 --- /dev/null +++ b/ceph-proxy/files/upstart/ceph-osd.conf @@ -0,0 +1,37 @@ +description "Ceph OSD" + +start on ceph-osd +stop on runlevel [!2345] + +respawn +respawn limit 5 30 + +pre-start script + set -e + test -x /usr/bin/ceph-osd || { stop; exit 0; } + test -d "/var/lib/ceph/osd/${cluster:-ceph}-$id" || { stop; exit 0; } + + install -d -m0755 /var/run/ceph + + # update location in crush; put in some suitable defaults on the + # command line, ceph.conf can override what it wants + location="$(ceph-conf --cluster="${cluster:-ceph}" --name="osd.$id" --lookup osd_crush_location || :)" + weight="$(ceph-conf --cluster="$cluster" --name="osd.$id" --lookup osd_crush_weight || :)" + ceph \ + --cluster="${cluster:-ceph}" \ + --name="osd.$id" \ + --keyring="/var/lib/ceph/osd/${cluster:-ceph}-$id/keyring" \ + osd crush set \ + -- \ + "$id" "osd.$id" "${weight:-1}" \ + pool=default \ + host="$(hostname -s)" \ + $location \ + || : +end script + +instance ${cluster:-ceph}/$id +export cluster +export id + +exec /usr/bin/ceph-osd --cluster="${cluster:-ceph}" -i "$id" -f From 46980b0429a295a4719c9a3efb78257a3a0c1d68 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 21:19:53 +1300 Subject: [PATCH 0022/2699] import upstart scripts from argonaut --- ceph-mon/files/upstart/ceph-create-keys.conf | 8 ++++ ceph-mon/files/upstart/ceph-hotplug.conf | 11 ++++++ .../files/upstart/ceph-mon-all-starter.conf | 20 ++++++++++ ceph-mon/files/upstart/ceph-mon-all.conf | 4 ++ ceph-mon/files/upstart/ceph-mon.conf | 24 ++++++++++++ ceph-mon/files/upstart/ceph-osd.conf | 37 +++++++++++++++++++ 6 files changed, 104 insertions(+) create mode 100644 ceph-mon/files/upstart/ceph-create-keys.conf create mode 100644 ceph-mon/files/upstart/ceph-hotplug.conf create mode 100644 ceph-mon/files/upstart/ceph-mon-all-starter.conf create mode 100644 ceph-mon/files/upstart/ceph-mon-all.conf create mode 100644 ceph-mon/files/upstart/ceph-mon.conf create mode 100644 ceph-mon/files/upstart/ceph-osd.conf diff --git a/ceph-mon/files/upstart/ceph-create-keys.conf b/ceph-mon/files/upstart/ceph-create-keys.conf new file mode 100644 index 00000000..6fb45818 --- /dev/null +++ b/ceph-mon/files/upstart/ceph-create-keys.conf @@ -0,0 +1,8 @@ +description "Create Ceph client.admin key when possible" + +start on started ceph-mon +stop on runlevel [!2345] + +task + +exec /usr/sbin/ceph-create-keys --cluster="${cluster:-ceph}" -i "${id:-$(hostname)}" diff --git a/ceph-mon/files/upstart/ceph-hotplug.conf b/ceph-mon/files/upstart/ceph-hotplug.conf new file mode 100644 index 00000000..70204529 --- /dev/null +++ b/ceph-mon/files/upstart/ceph-hotplug.conf @@ -0,0 +1,11 @@ +description "Ceph hotplug" + +start on block-device-added \ + DEVTYPE=partition \ + ID_PART_ENTRY_TYPE=4fbd7e29-9d25-41b8-afd0-062c0ceff05d +stop on runlevel [!2345] + +task +instance $DEVNAME + +exec /usr/sbin/ceph-disk-activate --mount -- "$DEVNAME" diff --git a/ceph-mon/files/upstart/ceph-mon-all-starter.conf b/ceph-mon/files/upstart/ceph-mon-all-starter.conf new file mode 100644 index 00000000..f7188cb7 --- /dev/null +++ b/ceph-mon/files/upstart/ceph-mon-all-starter.conf @@ -0,0 +1,20 @@ +description "Ceph MON (start all instances)" + +start on starting ceph-mon-all +stop on runlevel [!2345] + +task + +script + set -e + # TODO what's the valid charset for cluster names and mon ids? + find /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[a-z0-9]+-[a-z0-9._-]+' -printf '%P\n' \ + | while read f; do + if [ -e "/var/lib/ceph/mon/$f/done" ]; then + cluster="${f%%-*}" + id="${f#*-}" + + initctl emit ceph-mon cluster="$cluster" id="$id" + fi + done +end script diff --git a/ceph-mon/files/upstart/ceph-mon-all.conf b/ceph-mon/files/upstart/ceph-mon-all.conf new file mode 100644 index 00000000..006f2f20 --- /dev/null +++ b/ceph-mon/files/upstart/ceph-mon-all.conf @@ -0,0 +1,4 @@ +description "Ceph monitor (all instances)" + +start on (local-filesystems and net-device-up IFACE!=lo) +stop on runlevel [!2345] diff --git a/ceph-mon/files/upstart/ceph-mon.conf b/ceph-mon/files/upstart/ceph-mon.conf new file mode 100644 index 00000000..2cf7bfa5 --- /dev/null +++ b/ceph-mon/files/upstart/ceph-mon.conf @@ -0,0 +1,24 @@ +description "Ceph MON" + +start on ceph-mon +stop on runlevel [!2345] or stopping ceph-mon-all + +respawn +respawn limit 5 30 + +pre-start script + set -e + test -x /usr/bin/ceph-mon || { stop; exit 0; } + test -d "/var/lib/ceph/mon/${cluster:-ceph}-$id" || { stop; exit 0; } + + install -d -m0755 /var/run/ceph +end script + +instance ${cluster:-ceph}/$id +export cluster +export id + +# this breaks oneiric +#usage "cluster = name of cluster (defaults to 'ceph'); id = monitor instance id" + +exec /usr/bin/ceph-mon --cluster="${cluster:-ceph}" -i "$id" -f diff --git a/ceph-mon/files/upstart/ceph-osd.conf b/ceph-mon/files/upstart/ceph-osd.conf new file mode 100644 index 00000000..119ad000 --- /dev/null +++ b/ceph-mon/files/upstart/ceph-osd.conf @@ -0,0 +1,37 @@ +description "Ceph OSD" + +start on ceph-osd +stop on runlevel [!2345] + +respawn +respawn limit 5 30 + +pre-start script + set -e + test -x /usr/bin/ceph-osd || { stop; exit 0; } + test -d "/var/lib/ceph/osd/${cluster:-ceph}-$id" || { stop; exit 0; } + + install -d -m0755 /var/run/ceph + + # update location in crush; put in some suitable defaults on the + # command line, ceph.conf can override what it wants + location="$(ceph-conf --cluster="${cluster:-ceph}" --name="osd.$id" --lookup osd_crush_location || :)" + weight="$(ceph-conf --cluster="$cluster" --name="osd.$id" --lookup osd_crush_weight || :)" + ceph \ + --cluster="${cluster:-ceph}" \ + --name="osd.$id" \ + --keyring="/var/lib/ceph/osd/${cluster:-ceph}-$id/keyring" \ + osd crush set \ + -- \ + "$id" "osd.$id" "${weight:-1}" \ + pool=default \ + host="$(hostname -s)" \ + $location \ + || : +end script + +instance ${cluster:-ceph}/$id +export cluster +export id + +exec /usr/bin/ceph-osd --cluster="${cluster:-ceph}" -i "$id" -f From 18df4b1ba9189abe804e6bc4ee0ca17831fcdb4d Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 21:50:24 +1300 Subject: [PATCH 0023/2699] install upstart scripts --- ceph-proxy/hooks/hooks.py | 13 +++++++++++-- ceph-proxy/hooks/utils.py | 2 +- ceph-proxy/revision | 2 +- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 3fe6fa03..c47f79a7 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -7,25 +7,32 @@ # Paul Collins # +import glob import os import subprocess +import shutil import socket import sys import ceph import utils +def install_upstart_scripts(): + for x in glob.glob('files/upstart/*.conf'): + shutil.copy(x, '/etc/init/') + def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() utils.install('ceph') - # TODO: Install the upstart scripts. + install_upstart_scripts() + utils.juju_log('INFO', 'End install hook.') def emit_cephconf(): cephcontext = { - 'mon_hosts': ' '.join(get_mon_hosts().sort()), + 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': utils.config_get('fsid'), } @@ -65,6 +72,7 @@ def get_mon_hosts(): unit, relid)) + ':6789') + hosts.sort() return hosts def mon_relation(): @@ -75,6 +83,7 @@ def mon_relation(): def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() + install_upstart_scripts() utils.juju_log('INFO', 'End upgrade-charm hook.') hooks = { diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 9b08a90d..1d6af729 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -104,7 +104,7 @@ def relation_get(attribute,unit=None,rid=None): cmd.append(unit) return subprocess.check_output(cmd).strip() -def relation_set(*kwargs): +def relation_set(**kwargs): cmd = [ "relation-set" ] diff --git a/ceph-proxy/revision b/ceph-proxy/revision index a7873645..81b5c5d0 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -34 +37 From 924ffa6189dc245f7aa7704c8108dfe2ec5a975f Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 21:50:24 +1300 Subject: [PATCH 0024/2699] install upstart scripts --- ceph-mon/hooks/hooks.py | 13 +++++++++++-- ceph-mon/hooks/utils.py | 2 +- ceph-mon/revision | 2 +- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 3fe6fa03..c47f79a7 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -7,25 +7,32 @@ # Paul Collins # +import glob import os import subprocess +import shutil import socket import sys import ceph import utils +def install_upstart_scripts(): + for x in glob.glob('files/upstart/*.conf'): + shutil.copy(x, '/etc/init/') + def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() utils.install('ceph') - # TODO: Install the upstart scripts. + install_upstart_scripts() + utils.juju_log('INFO', 'End install hook.') def emit_cephconf(): cephcontext = { - 'mon_hosts': ' '.join(get_mon_hosts().sort()), + 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': utils.config_get('fsid'), } @@ -65,6 +72,7 @@ def get_mon_hosts(): unit, relid)) + ':6789') + hosts.sort() return hosts def mon_relation(): @@ -75,6 +83,7 @@ def mon_relation(): def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() + install_upstart_scripts() utils.juju_log('INFO', 'End upgrade-charm hook.') hooks = { diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 9b08a90d..1d6af729 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -104,7 +104,7 @@ def relation_get(attribute,unit=None,rid=None): cmd.append(unit) return subprocess.check_output(cmd).strip() -def relation_set(*kwargs): +def relation_set(**kwargs): cmd = [ "relation-set" ] diff --git a/ceph-mon/revision b/ceph-mon/revision index a7873645..81b5c5d0 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -34 +37 From 94755baa73369672a0a1cac38a9449edb39802d8 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 23:01:49 +1300 Subject: [PATCH 0025/2699] start a TODO --- ceph-proxy/TODO | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 ceph-proxy/TODO diff --git a/ceph-proxy/TODO b/ceph-proxy/TODO new file mode 100644 index 00000000..c4247485 --- /dev/null +++ b/ceph-proxy/TODO @@ -0,0 +1,8 @@ +== Minor == + + * fix tunables (http://tracker.newdream.net/issues/2210) + * more than 192 PGs + +== Major == + + * deploy more than 3 OSD hosts From a7740f7565511f72035b0e9fdd457775320e64ed Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 23:01:49 +1300 Subject: [PATCH 0026/2699] start a TODO --- ceph-mon/TODO | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 ceph-mon/TODO diff --git a/ceph-mon/TODO b/ceph-mon/TODO new file mode 100644 index 00000000..c4247485 --- /dev/null +++ b/ceph-mon/TODO @@ -0,0 +1,8 @@ +== Minor == + + * fix tunables (http://tracker.newdream.net/issues/2210) + * more than 192 PGs + +== Major == + + * deploy more than 3 OSD hosts From b79238ae0afc92392c38194267b11ebe5cc5b98a Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 23:02:28 +1300 Subject: [PATCH 0027/2699] wait for 3 potential monitors to show up, and then try to configure the cluster --- ceph-proxy/config.yaml | 7 +++++++ ceph-proxy/hooks/hooks.py | 33 +++++++++++++++++++++++++++++++++ ceph-proxy/revision | 2 +- 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 192c1b42..a98d7570 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -8,6 +8,13 @@ options: default: /dev/sdb /dev/sdc /dev/sdd /dev/sde description: | the devices to format and set up as osd volumes + monitor-count: + type: int + default: 3 + description: | + how many nodes to wait for before trying to create the monitor cluster + this number needs to be odd, and more than three is a waste except for + very large clusters monitor-secret: type: string description: | diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index c47f79a7..7425aa7e 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -75,9 +75,42 @@ def get_mon_hosts(): hosts.sort() return hosts +def bootstrap_monitor_cluster(): + hostname = os.uname()[1] + done = "/var/lib/ceph/mon/ceph-%s/done" % hostname + secret = utils.config_get('monitor-secret') + keyring = "/var/lib/ceph/tmp/%s.mon.keyring" % hostname + + try: + with os.fdopen(os.open(done, os.O_WRONLY | os.O_CREAT | os.O_EXCL)) as d: + try: + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring' '--name=mon.' + "--add-key=" % secret, + '--cap', 'mon', 'allow *']) + + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + except: + os.unlink(done) + os.unlink(keyring) + raise + subprocess.check_call(['start', 'ceph-mon-all-starter']) + except OSError: + utils.juju_log('INFO', 'bootstrap_monitor_cluster: mon already initialized, getting on with life.') + def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') emit_cephconf() + + moncount = utils.config_get('monitor-count') + if len(get_mon_hosts()) == moncount: + bootstrap_monitor_cluster() + else: + utils.juju_log('INFO', + "Not enough mons (%d), punting." % len(get_mon_hosts())) + utils.juju_log('INFO', 'End mon-relation hook.') def upgrade_charm(): diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 81b5c5d0..e522732c 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -37 +38 From 0bae1f241b6e1d2909a3f457ddbde8ed5f5f228f Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 23:02:28 +1300 Subject: [PATCH 0028/2699] wait for 3 potential monitors to show up, and then try to configure the cluster --- ceph-mon/config.yaml | 7 +++++++ ceph-mon/hooks/hooks.py | 33 +++++++++++++++++++++++++++++++++ ceph-mon/revision | 2 +- 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 192c1b42..a98d7570 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -8,6 +8,13 @@ options: default: /dev/sdb /dev/sdc /dev/sdd /dev/sde description: | the devices to format and set up as osd volumes + monitor-count: + type: int + default: 3 + description: | + how many nodes to wait for before trying to create the monitor cluster + this number needs to be odd, and more than three is a waste except for + very large clusters monitor-secret: type: string description: | diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index c47f79a7..7425aa7e 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -75,9 +75,42 @@ def get_mon_hosts(): hosts.sort() return hosts +def bootstrap_monitor_cluster(): + hostname = os.uname()[1] + done = "/var/lib/ceph/mon/ceph-%s/done" % hostname + secret = utils.config_get('monitor-secret') + keyring = "/var/lib/ceph/tmp/%s.mon.keyring" % hostname + + try: + with os.fdopen(os.open(done, os.O_WRONLY | os.O_CREAT | os.O_EXCL)) as d: + try: + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring' '--name=mon.' + "--add-key=" % secret, + '--cap', 'mon', 'allow *']) + + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + except: + os.unlink(done) + os.unlink(keyring) + raise + subprocess.check_call(['start', 'ceph-mon-all-starter']) + except OSError: + utils.juju_log('INFO', 'bootstrap_monitor_cluster: mon already initialized, getting on with life.') + def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') emit_cephconf() + + moncount = utils.config_get('monitor-count') + if len(get_mon_hosts()) == moncount: + bootstrap_monitor_cluster() + else: + utils.juju_log('INFO', + "Not enough mons (%d), punting." % len(get_mon_hosts())) + utils.juju_log('INFO', 'End mon-relation hook.') def upgrade_charm(): diff --git a/ceph-mon/revision b/ceph-mon/revision index 81b5c5d0..e522732c 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -37 +38 From 8a7673b16d5e4926a8e3ad58f9564466fd9e0ba8 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 23:12:08 +1300 Subject: [PATCH 0029/2699] interpolation, do you speak it --- ceph-proxy/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 7425aa7e..0bb9ce72 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -86,7 +86,7 @@ def bootstrap_monitor_cluster(): try: subprocess.check_call(['ceph-authtool', keyring, '--create-keyring' '--name=mon.' - "--add-key=" % secret, + "--add-key=%s" % secret, '--cap', 'mon', 'allow *']) subprocess.check_call(['ceph-mon', '--mkfs', From 45497b8541ab01b7d0c177659e45fe521c023f10 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 23:12:08 +1300 Subject: [PATCH 0030/2699] interpolation, do you speak it --- ceph-mon/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 7425aa7e..0bb9ce72 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -86,7 +86,7 @@ def bootstrap_monitor_cluster(): try: subprocess.check_call(['ceph-authtool', keyring, '--create-keyring' '--name=mon.' - "--add-key=" % secret, + "--add-key=%s" % secret, '--cap', 'mon', 'allow *']) subprocess.check_call(['ceph-mon', '--mkfs', From d3ba16b3d37e16ea1df76103824bee03703e72f8 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 23:39:23 +1300 Subject: [PATCH 0031/2699] bootstrap_monitor_cluster: simplify (and make work) mon_relation: cast int to int so that i can int while i int --- ceph-proxy/hooks/hooks.py | 39 +++++++++++++++++++++------------------ ceph-proxy/revision | 2 +- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 0bb9ce72..fe8d779e 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -81,30 +81,33 @@ def bootstrap_monitor_cluster(): secret = utils.config_get('monitor-secret') keyring = "/var/lib/ceph/tmp/%s.mon.keyring" % hostname - try: - with os.fdopen(os.open(done, os.O_WRONLY | os.O_CREAT | os.O_EXCL)) as d: - try: - subprocess.check_call(['ceph-authtool', keyring, - '--create-keyring' '--name=mon.' - "--add-key=%s" % secret, - '--cap', 'mon', 'allow *']) - - subprocess.check_call(['ceph-mon', '--mkfs', - '-i', hostname, - '--keyring', keyring]) - except: - os.unlink(done) - os.unlink(keyring) - raise - subprocess.check_call(['start', 'ceph-mon-all-starter']) - except OSError: + if os.path.exists(done): utils.juju_log('INFO', 'bootstrap_monitor_cluster: mon already initialized, getting on with life.') + else: + try: + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring', '--name=mon.', + "--add-key=%s" % secret, + '--cap', 'mon', 'allow *']) + + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + + with open(done, 'w'): + pass + + subprocess.check_call(['start', 'ceph-mon-all-starter']) + except: + raise + finally: + os.unlink(keyring) def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') emit_cephconf() - moncount = utils.config_get('monitor-count') + moncount = int(utils.config_get('monitor-count')) if len(get_mon_hosts()) == moncount: bootstrap_monitor_cluster() else: diff --git a/ceph-proxy/revision b/ceph-proxy/revision index e522732c..920a1396 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -38 +43 From 65415037d38b54f77e736e1531f99156a1db8b3b Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Wed, 3 Oct 2012 23:39:23 +1300 Subject: [PATCH 0032/2699] bootstrap_monitor_cluster: simplify (and make work) mon_relation: cast int to int so that i can int while i int --- ceph-mon/hooks/hooks.py | 39 +++++++++++++++++++++------------------ ceph-mon/revision | 2 +- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 0bb9ce72..fe8d779e 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -81,30 +81,33 @@ def bootstrap_monitor_cluster(): secret = utils.config_get('monitor-secret') keyring = "/var/lib/ceph/tmp/%s.mon.keyring" % hostname - try: - with os.fdopen(os.open(done, os.O_WRONLY | os.O_CREAT | os.O_EXCL)) as d: - try: - subprocess.check_call(['ceph-authtool', keyring, - '--create-keyring' '--name=mon.' - "--add-key=%s" % secret, - '--cap', 'mon', 'allow *']) - - subprocess.check_call(['ceph-mon', '--mkfs', - '-i', hostname, - '--keyring', keyring]) - except: - os.unlink(done) - os.unlink(keyring) - raise - subprocess.check_call(['start', 'ceph-mon-all-starter']) - except OSError: + if os.path.exists(done): utils.juju_log('INFO', 'bootstrap_monitor_cluster: mon already initialized, getting on with life.') + else: + try: + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring', '--name=mon.', + "--add-key=%s" % secret, + '--cap', 'mon', 'allow *']) + + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + + with open(done, 'w'): + pass + + subprocess.check_call(['start', 'ceph-mon-all-starter']) + except: + raise + finally: + os.unlink(keyring) def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') emit_cephconf() - moncount = utils.config_get('monitor-count') + moncount = int(utils.config_get('monitor-count')) if len(get_mon_hosts()) == moncount: bootstrap_monitor_cluster() else: diff --git a/ceph-mon/revision b/ceph-mon/revision index e522732c..920a1396 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -38 +43 From 08a33be12eec8fff44b86b1f677d9cd0b7742292 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Thu, 4 Oct 2012 02:32:36 +1300 Subject: [PATCH 0033/2699] use osd-devices for OSDs --- ceph-proxy/hooks/ceph.py | 8 +++++++- ceph-proxy/hooks/hooks.py | 27 +++++++++++++++++++++++++-- ceph-proxy/revision | 2 +- 3 files changed, 33 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 2049a07b..47137423 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -4,11 +4,13 @@ # # Authors: # James Page +# Paul Collins # -import subprocess import json import os +import subprocess +import time QUORUM = [ 'leader', 'peon' ] @@ -24,3 +26,7 @@ def is_quorum(): return True else: return False + +def wait_for_quorum(): + while not is_quorum(): + time.sleep(3) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index fe8d779e..b80a321e 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -25,6 +25,7 @@ def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() utils.install('ceph') + utils.install('gdisk') # for ceph-disk-prepare install_upstart_scripts() @@ -54,10 +55,12 @@ def config_changed(): utils.juju_log('CRITICAL', 'No monitor-secret supplied, cannot proceed.') sys.exit(1) - osd_devices = utils.config_get('osd-devices') - emit_cephconf() + if ceph.is_quorum(): + for dev in utils.config_get('osd-devices').split(' '): + osdize_and_activate(dev) + utils.juju_log('INFO', 'End config-changed hook.') def get_mon_hosts(): @@ -103,6 +106,21 @@ def bootstrap_monitor_cluster(): finally: os.unlink(keyring) +def osdize_and_activate(dev): + ceph.wait_for_quorum() + + # XXX hack for instances + subprocess.call(['umount', '/mnt']) + + if subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0: + utils.juju_log('INFO', "Looks like %s is in use, skipping." % dev) + return True + + subprocess.call(['ceph-disk-prepare', dev]) + + subprocess.call(['udevadm', 'trigger', + '--subsystem-match=block', '--action=add']) + def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') emit_cephconf() @@ -110,6 +128,10 @@ def mon_relation(): moncount = int(utils.config_get('monitor-count')) if len(get_mon_hosts()) == moncount: bootstrap_monitor_cluster() + + ceph.wait_for_quorum() + for dev in utils.config_get('osd-devices').split(' '): + osdize_and_activate(dev) else: utils.juju_log('INFO', "Not enough mons (%d), punting." % len(get_mon_hosts())) @@ -120,6 +142,7 @@ def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() install_upstart_scripts() + utils.install('gdisk') # for ceph-disk-prepare utils.juju_log('INFO', 'End upgrade-charm hook.') hooks = { diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 920a1396..c739b42c 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -43 +44 From 42c8a015415347d13fed36db3bafcab7ce8fa777 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Thu, 4 Oct 2012 02:32:36 +1300 Subject: [PATCH 0034/2699] use osd-devices for OSDs --- ceph-mon/hooks/ceph.py | 8 +++++++- ceph-mon/hooks/hooks.py | 27 +++++++++++++++++++++++++-- ceph-mon/revision | 2 +- 3 files changed, 33 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 2049a07b..47137423 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -4,11 +4,13 @@ # # Authors: # James Page +# Paul Collins # -import subprocess import json import os +import subprocess +import time QUORUM = [ 'leader', 'peon' ] @@ -24,3 +26,7 @@ def is_quorum(): return True else: return False + +def wait_for_quorum(): + while not is_quorum(): + time.sleep(3) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index fe8d779e..b80a321e 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -25,6 +25,7 @@ def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() utils.install('ceph') + utils.install('gdisk') # for ceph-disk-prepare install_upstart_scripts() @@ -54,10 +55,12 @@ def config_changed(): utils.juju_log('CRITICAL', 'No monitor-secret supplied, cannot proceed.') sys.exit(1) - osd_devices = utils.config_get('osd-devices') - emit_cephconf() + if ceph.is_quorum(): + for dev in utils.config_get('osd-devices').split(' '): + osdize_and_activate(dev) + utils.juju_log('INFO', 'End config-changed hook.') def get_mon_hosts(): @@ -103,6 +106,21 @@ def bootstrap_monitor_cluster(): finally: os.unlink(keyring) +def osdize_and_activate(dev): + ceph.wait_for_quorum() + + # XXX hack for instances + subprocess.call(['umount', '/mnt']) + + if subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0: + utils.juju_log('INFO', "Looks like %s is in use, skipping." % dev) + return True + + subprocess.call(['ceph-disk-prepare', dev]) + + subprocess.call(['udevadm', 'trigger', + '--subsystem-match=block', '--action=add']) + def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') emit_cephconf() @@ -110,6 +128,10 @@ def mon_relation(): moncount = int(utils.config_get('monitor-count')) if len(get_mon_hosts()) == moncount: bootstrap_monitor_cluster() + + ceph.wait_for_quorum() + for dev in utils.config_get('osd-devices').split(' '): + osdize_and_activate(dev) else: utils.juju_log('INFO', "Not enough mons (%d), punting." % len(get_mon_hosts())) @@ -120,6 +142,7 @@ def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() install_upstart_scripts() + utils.install('gdisk') # for ceph-disk-prepare utils.juju_log('INFO', 'End upgrade-charm hook.') hooks = { diff --git a/ceph-mon/revision b/ceph-mon/revision index 920a1396..c739b42c 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -43 +44 From b52f8de5ea4b692327778ed6dba1e8c15cd05fc8 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Thu, 4 Oct 2012 02:52:54 +1300 Subject: [PATCH 0035/2699] make failure to open or connect to the ceph admin socket result in False --- ceph-proxy/hooks/ceph.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 47137423..bbd41e1f 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -21,7 +21,12 @@ def is_quorum(): "/var/run/ceph/ceph-mon.%s.asok" % os.uname()[1], "mon_status" ] - result = json.loads(subprocess.check_output(cmd)) + + try: + result = json.loads(subprocess.check_output(cmd)) + except CalledProcessError: + return False + if result['state'] in QUORUM: return True else: From 3718c5de109c875b1c1c2c73d3569d948e17f3b3 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Thu, 4 Oct 2012 02:52:54 +1300 Subject: [PATCH 0036/2699] make failure to open or connect to the ceph admin socket result in False --- ceph-mon/hooks/ceph.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 47137423..bbd41e1f 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -21,7 +21,12 @@ def is_quorum(): "/var/run/ceph/ceph-mon.%s.asok" % os.uname()[1], "mon_status" ] - result = json.loads(subprocess.check_output(cmd)) + + try: + result = json.loads(subprocess.check_output(cmd)) + except CalledProcessError: + return False + if result['state'] in QUORUM: return True else: From 5a659daf721580d7547ca1898d531f2b515cdc69 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Oct 2012 16:07:47 +0100 Subject: [PATCH 0037/2699] Fixed subprocess exception handling in is_quorum, use socket.gethostname() for better hostname resolution, drop extra call for wait_for_quorum from osdize_and_activate --- ceph-proxy/hooks/ceph.py | 7 ++++--- ceph-proxy/hooks/hooks.py | 12 +++++------- ceph-proxy/hooks/utils.py | 4 ++++ ceph-proxy/revision | 2 +- 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index bbd41e1f..99976cfe 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -11,20 +11,21 @@ import os import subprocess import time +import utils -QUORUM = [ 'leader', 'peon' ] +QUORUM = [ 'leader', 'peon' ] def is_quorum(): cmd = [ "ceph", "--admin-daemon", - "/var/run/ceph/ceph-mon.%s.asok" % os.uname()[1], + "/var/run/ceph/ceph-mon.%s.asok" % utils.get_unit_hostname(), "mon_status" ] try: result = json.loads(subprocess.check_output(cmd)) - except CalledProcessError: + except subprocess.CalledProcessError: return False if result['state'] in QUORUM: diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index b80a321e..9e9f2dd9 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -79,7 +79,7 @@ def get_mon_hosts(): return hosts def bootstrap_monitor_cluster(): - hostname = os.uname()[1] + hostname = utils.get_unit_hostname() done = "/var/lib/ceph/mon/ceph-%s/done" % hostname secret = utils.config_get('monitor-secret') keyring = "/var/lib/ceph/tmp/%s.mon.keyring" % hostname @@ -107,8 +107,6 @@ def bootstrap_monitor_cluster(): os.unlink(keyring) def osdize_and_activate(dev): - ceph.wait_for_quorum() - # XXX hack for instances subprocess.call(['umount', '/mnt']) @@ -116,10 +114,10 @@ def osdize_and_activate(dev): utils.juju_log('INFO', "Looks like %s is in use, skipping." % dev) return True - subprocess.call(['ceph-disk-prepare', dev]) - - subprocess.call(['udevadm', 'trigger', - '--subsystem-match=block', '--action=add']) + if os.path.exists(dev): + subprocess.call(['ceph-disk-prepare', dev]) + subprocess.call(['udevadm', 'trigger', + '--subsystem-match=block', '--action=add']) def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 1d6af729..2c10ac1b 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -11,6 +11,7 @@ import subprocess import os import sys +import socket def install (*pkgs): cmd = [ @@ -128,3 +129,6 @@ def config_get(attribute): def juju_log(level, message): subprocess.call(['juju-log', '-l', level, message]) + +def get_unit_hostname(): + return socket.gethostname() diff --git a/ceph-proxy/revision b/ceph-proxy/revision index c739b42c..0691f67b 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -44 +52 From f0179d1b8f9fa2db86802700e0973ec9bbb0659a Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Oct 2012 16:07:47 +0100 Subject: [PATCH 0038/2699] Fixed subprocess exception handling in is_quorum, use socket.gethostname() for better hostname resolution, drop extra call for wait_for_quorum from osdize_and_activate --- ceph-mon/hooks/ceph.py | 7 ++++--- ceph-mon/hooks/hooks.py | 12 +++++------- ceph-mon/hooks/utils.py | 4 ++++ ceph-mon/revision | 2 +- 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index bbd41e1f..99976cfe 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -11,20 +11,21 @@ import os import subprocess import time +import utils -QUORUM = [ 'leader', 'peon' ] +QUORUM = [ 'leader', 'peon' ] def is_quorum(): cmd = [ "ceph", "--admin-daemon", - "/var/run/ceph/ceph-mon.%s.asok" % os.uname()[1], + "/var/run/ceph/ceph-mon.%s.asok" % utils.get_unit_hostname(), "mon_status" ] try: result = json.loads(subprocess.check_output(cmd)) - except CalledProcessError: + except subprocess.CalledProcessError: return False if result['state'] in QUORUM: diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index b80a321e..9e9f2dd9 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -79,7 +79,7 @@ def get_mon_hosts(): return hosts def bootstrap_monitor_cluster(): - hostname = os.uname()[1] + hostname = utils.get_unit_hostname() done = "/var/lib/ceph/mon/ceph-%s/done" % hostname secret = utils.config_get('monitor-secret') keyring = "/var/lib/ceph/tmp/%s.mon.keyring" % hostname @@ -107,8 +107,6 @@ def bootstrap_monitor_cluster(): os.unlink(keyring) def osdize_and_activate(dev): - ceph.wait_for_quorum() - # XXX hack for instances subprocess.call(['umount', '/mnt']) @@ -116,10 +114,10 @@ def osdize_and_activate(dev): utils.juju_log('INFO', "Looks like %s is in use, skipping." % dev) return True - subprocess.call(['ceph-disk-prepare', dev]) - - subprocess.call(['udevadm', 'trigger', - '--subsystem-match=block', '--action=add']) + if os.path.exists(dev): + subprocess.call(['ceph-disk-prepare', dev]) + subprocess.call(['udevadm', 'trigger', + '--subsystem-match=block', '--action=add']) def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 1d6af729..2c10ac1b 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -11,6 +11,7 @@ import subprocess import os import sys +import socket def install (*pkgs): cmd = [ @@ -128,3 +129,6 @@ def config_get(attribute): def juju_log(level, message): subprocess.call(['juju-log', '-l', level, message]) + +def get_unit_hostname(): + return socket.gethostname() diff --git a/ceph-mon/revision b/ceph-mon/revision index c739b42c..0691f67b 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -44 +52 From 6818b33b622a3f60a2cb5ebdfab08ef955f8524d Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 09:28:49 +0100 Subject: [PATCH 0039/2699] Updates for pep8 compliance, added peer hinting to mon hook, removed redundant mon-relation-changed hook, updated copyright, added start/stop hooks to keep charm proof quiet --- ceph-proxy/copyright | 18 +++- ceph-proxy/hooks/ceph.py | 18 +++- ceph-proxy/hooks/hooks.py | 75 +++++++++++----- .../hooks/{mon-relation-changed => start} | 0 ceph-proxy/hooks/stop | 1 + ceph-proxy/hooks/utils.py | 87 +++++++++++-------- ceph-proxy/metadata.yaml | 7 +- 7 files changed, 138 insertions(+), 68 deletions(-) rename ceph-proxy/hooks/{mon-relation-changed => start} (100%) create mode 120000 ceph-proxy/hooks/stop diff --git a/ceph-proxy/copyright b/ceph-proxy/copyright index 8b380c23..4e1085af 100644 --- a/ceph-proxy/copyright +++ b/ceph-proxy/copyright @@ -1,5 +1,15 @@ -Copyright 2012 Canonical Ltd. +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0 +Comment: The licensing of this charm is aligned to upstream ceph + as the ceph upstart integration is distributed as part of the charm. -Authors: - Paul Collins - James Page +Files: * +Copyright: 2012, Canonical Ltd. +License: LGPL-2.1 + +Files: files/upstart/* +Copyright: 2004-2010 by Sage Weil +License: LGPL-2.1 + +License: LGPL-2.1 + On Debian GNU/Linux system you can find the complete text of the + LGPL-2.1 license in '/usr/share/common-licenses/LGPL-2.1' diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 99976cfe..9e3bfe99 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -13,13 +13,14 @@ import time import utils -QUORUM = [ 'leader', 'peon' ] +QUORUM = ['leader', 'peon'] + def is_quorum(): cmd = [ "ceph", "--admin-daemon", - "/var/run/ceph/ceph-mon.%s.asok" % utils.get_unit_hostname(), + "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()), "mon_status" ] @@ -33,6 +34,19 @@ def is_quorum(): else: return False + def wait_for_quorum(): while not is_quorum(): time.sleep(3) + + +def add_bootstrap_hint(peer): + cmd = [ + "ceph", + "--admin-daemon", + "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()), + "add_bootstrap_peer_hint", + peer + ] + # Ignore any errors for this call + subprocess.call(cmd) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 9e9f2dd9..f1451ef7 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -5,6 +5,7 @@ # # Authors: # Paul Collins +# James Page # import glob @@ -17,20 +18,20 @@ import ceph import utils + def install_upstart_scripts(): for x in glob.glob('files/upstart/*.conf'): - shutil.copy(x, '/etc/init/') + shutil.copy(x, '/etc/init/') + def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() - utils.install('ceph') - utils.install('gdisk') # for ceph-disk-prepare - + utils.install('ceph', 'gdisk') install_upstart_scripts() - utils.juju_log('INFO', 'End install hook.') + def emit_cephconf(): cephcontext = { 'mon_hosts': ' '.join(get_mon_hosts()), @@ -40,19 +41,21 @@ def emit_cephconf(): with open('/etc/ceph/ceph.conf', 'w') as cephconf: cephconf.write(utils.render_template('ceph.conf', cephcontext)) + def config_changed(): utils.juju_log('INFO', 'Begin config-changed hook.') utils.juju_log('INFO', 'Monitor hosts are ' + repr(get_mon_hosts())) fsid = utils.config_get('fsid') - if fsid == "": + if fsid == '': utils.juju_log('CRITICAL', 'No fsid supplied, cannot proceed.') sys.exit(1) monitor_secret = utils.config_get('monitor-secret') - if monitor_secret == "": - utils.juju_log('CRITICAL', 'No monitor-secret supplied, cannot proceed.') + if monitor_secret == '': + utils.juju_log('CRITICAL', + 'No monitor-secret supplied, cannot proceed.') sys.exit(1) emit_cephconf() @@ -63,12 +66,13 @@ def config_changed(): utils.juju_log('INFO', 'End config-changed hook.') + def get_mon_hosts(): hosts = [] hosts.append(socket.gethostbyname(utils.unit_get('private-address')) + ':6789') - for relid in utils.relation_ids("mon"): + for relid in utils.relation_ids('mon'): for unit in utils.relation_list(relid): hosts.append( socket.gethostbyname(utils.relation_get('private-address', @@ -78,19 +82,35 @@ def get_mon_hosts(): hosts.sort() return hosts + +def get_mon_addresses(): + hosts = [] + hosts.append(socket.gethostbyname(utils.unit_get('private-address'))) + + for relid in utils.relation_ids('mon'): + for unit in utils.relation_list(relid): + hosts.append( + socket.gethostbyname(utils.relation_get('private-address', + unit, relid))) + + hosts.sort() + return hosts + + def bootstrap_monitor_cluster(): hostname = utils.get_unit_hostname() - done = "/var/lib/ceph/mon/ceph-%s/done" % hostname + done = '/var/lib/ceph/mon/ceph-{}/done'.format(hostname) secret = utils.config_get('monitor-secret') - keyring = "/var/lib/ceph/tmp/%s.mon.keyring" % hostname + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) if os.path.exists(done): - utils.juju_log('INFO', 'bootstrap_monitor_cluster: mon already initialized, getting on with life.') + utils.juju_log('INFO', + 'bootstrap_monitor_cluster: mon already initialized.') else: try: subprocess.check_call(['ceph-authtool', keyring, '--create-keyring', '--name=mon.', - "--add-key=%s" % secret, + '--add-key={}'.format(secret), '--cap', 'mon', 'allow *']) subprocess.check_call(['ceph-mon', '--mkfs', @@ -106,47 +126,53 @@ def bootstrap_monitor_cluster(): finally: os.unlink(keyring) + def osdize_and_activate(dev): # XXX hack for instances subprocess.call(['umount', '/mnt']) if subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0: - utils.juju_log('INFO', "Looks like %s is in use, skipping." % dev) - return True + utils.juju_log('INFO', + 'Looks like {} is in use, skipping.'.format(dev)) + else: + if os.path.exists(dev): + subprocess.call(['ceph-disk-prepare', dev]) + subprocess.call(['udevadm', 'trigger', + '--subsystem-match=block', '--action=add']) - if os.path.exists(dev): - subprocess.call(['ceph-disk-prepare', dev]) - subprocess.call(['udevadm', 'trigger', - '--subsystem-match=block', '--action=add']) def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') emit_cephconf() moncount = int(utils.config_get('monitor-count')) - if len(get_mon_hosts()) == moncount: + if len(get_mon_hosts()) >= moncount: bootstrap_monitor_cluster() ceph.wait_for_quorum() for dev in utils.config_get('osd-devices').split(' '): osdize_and_activate(dev) + + for peer in get_mon_addresses(): + ceph.add_bootstrap_hint(peer) else: utils.juju_log('INFO', - "Not enough mons (%d), punting." % len(get_mon_hosts())) + 'Not enough mons ({}), punting.'.format( + len(get_mon_hosts()))) utils.juju_log('INFO', 'End mon-relation hook.') + def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() install_upstart_scripts() - utils.install('gdisk') # for ceph-disk-prepare utils.juju_log('INFO', 'End upgrade-charm hook.') + hooks = { 'config-changed': config_changed, 'install': install, - 'mon-relation-changed': mon_relation, 'mon-relation-departed': mon_relation, 'mon-relation-joined': mon_relation, 'upgrade-charm': upgrade_charm, @@ -157,6 +183,7 @@ def upgrade_charm(): try: hooks[hook]() except KeyError: - utils.juju_log('INFO', "This charm doesn't know how to handle '%s'." % hook) + utils.juju_log('INFO', + 'This charm doesn't know how to handle '{}'.'.format(hook)) sys.exit(0) diff --git a/ceph-proxy/hooks/mon-relation-changed b/ceph-proxy/hooks/start similarity index 100% rename from ceph-proxy/hooks/mon-relation-changed rename to ceph-proxy/hooks/start diff --git a/ceph-proxy/hooks/stop b/ceph-proxy/hooks/stop new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-proxy/hooks/stop @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 2c10ac1b..d6cae2f9 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -13,17 +13,18 @@ import sys import socket -def install (*pkgs): + +def install(*pkgs): cmd = [ - "apt-get", - "-y", - "install" + 'apt-get', + '-y', + 'install' ] for pkg in pkgs: cmd.append(pkg) subprocess.check_call(cmd) -TEMPLATES_DIR="templates" +TEMPLATES_DIR = 'templates' try: import jinja2 @@ -31,104 +32,118 @@ def install (*pkgs): install('python-jinja2') import jinja2 -def render_template (template_name, context, template_dir=TEMPLATES_DIR): - templates = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir)) + +def render_template(template_name, context, template_dir=TEMPLATES_DIR): + templates = jinja2.Environment( + loader=jinja2.FileSystemLoader(template_dir) + ) template = templates.get_template(template_name) return template.render(context) + def configure_source(): - source = config_get("source") - if (source.startswith("ppa:") or - source.startswith("cloud:") or - source.startswith("http:")): + source = config_get('source') + if (source.startswith('ppa:') or + source.startswith('cloud:') or + source.startswith('http:')): cmd = [ - "add-apt-repository", + 'add-apt-repository', source ] subprocess.check_call(cmd) - if source.startswith("http:"): - key = config_get("key") + if source.startswith('http:'): + key = config_get('key') cmd = [ - "apt-key", - "import", + 'apt-key', + 'import', key ] subprocess.check_call(cmd) cmd = [ - "apt-get", - "update" + 'apt-get', + 'update' ] subprocess.check_call(cmd) # Protocols -TCP="TCP" -UDP="UDP" +TCP = 'TCP' +UDP = 'UDP' + -def expose(port, protocol="TCP"): +def expose(port, protocol='TCP'): cmd = [ - "open-port", - "%d/%s" % (port,protocol) + 'open-port', + '{}/{}'.format(port, protocol) ] subprocess.check_call(cmd) -def juju_log(message,severity="INFO"): + +def juju_log(message, severity='INFO'): cmd = [ - "juju-log", - "--log-level", severity, + 'juju-log', + '--log-level', severity, message ] subprocess.check_call(cmd) + def relation_ids(relation): cmd = [ - "relation-ids", + 'relation-ids', relation ] return subprocess.check_output(cmd).split() + def relation_list(rid): cmd = [ - "relation-list", - "-r", rid, + 'relation-list', + '-r', rid, ] return subprocess.check_output(cmd).split() -def relation_get(attribute,unit=None,rid=None): + +def relation_get(attribute, unit=None, rid=None): cmd = [ - "relation-get", + 'relation-get', ] if rid: - cmd.append("-r") + cmd.append('-r') cmd.append(rid) cmd.append(attribute) if unit: cmd.append(unit) return subprocess.check_output(cmd).strip() + def relation_set(**kwargs): cmd = [ - "relation-set" + 'relation-set' ] for k, v in kwargs.items(): - cmd.append("%s=%s" % (k,v)) + cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) + def unit_get(attribute): cmd = [ - "unit-get", + 'unit-get', attribute ] return subprocess.check_output(cmd).strip() + def config_get(attribute): cmd = [ - "config-get", + 'config-get', attribute ] return subprocess.check_output(cmd).strip() + def juju_log(level, message): subprocess.call(['juju-log', '-l', level, message]) + def get_unit_hostname(): return socket.gethostname() diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 906cdb4e..6a363437 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -1,7 +1,10 @@ name: ceph-brolin -summary: distributed storage +summary: Highly scalable distributed storage +maintainer: James Page , + Paul Collins description: | - This charm deploys Ceph. + Ceph is a distributed storage and network file system designed to provide + excellent performance, reliability, and scalability. peers: mon: interface: ceph-brolin From 05b457624424cc89887ebfe3109b24fc20b1a4fb Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 09:28:49 +0100 Subject: [PATCH 0040/2699] Updates for pep8 compliance, added peer hinting to mon hook, removed redundant mon-relation-changed hook, updated copyright, added start/stop hooks to keep charm proof quiet --- ceph-mon/copyright | 18 +++- ceph-mon/hooks/ceph.py | 18 +++- ceph-mon/hooks/hooks.py | 75 +++++++++++----- .../hooks/{mon-relation-changed => start} | 0 ceph-mon/hooks/stop | 1 + ceph-mon/hooks/utils.py | 87 +++++++++++-------- ceph-mon/metadata.yaml | 7 +- 7 files changed, 138 insertions(+), 68 deletions(-) rename ceph-mon/hooks/{mon-relation-changed => start} (100%) create mode 120000 ceph-mon/hooks/stop diff --git a/ceph-mon/copyright b/ceph-mon/copyright index 8b380c23..4e1085af 100644 --- a/ceph-mon/copyright +++ b/ceph-mon/copyright @@ -1,5 +1,15 @@ -Copyright 2012 Canonical Ltd. +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0 +Comment: The licensing of this charm is aligned to upstream ceph + as the ceph upstart integration is distributed as part of the charm. -Authors: - Paul Collins - James Page +Files: * +Copyright: 2012, Canonical Ltd. +License: LGPL-2.1 + +Files: files/upstart/* +Copyright: 2004-2010 by Sage Weil +License: LGPL-2.1 + +License: LGPL-2.1 + On Debian GNU/Linux system you can find the complete text of the + LGPL-2.1 license in '/usr/share/common-licenses/LGPL-2.1' diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 99976cfe..9e3bfe99 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -13,13 +13,14 @@ import time import utils -QUORUM = [ 'leader', 'peon' ] +QUORUM = ['leader', 'peon'] + def is_quorum(): cmd = [ "ceph", "--admin-daemon", - "/var/run/ceph/ceph-mon.%s.asok" % utils.get_unit_hostname(), + "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()), "mon_status" ] @@ -33,6 +34,19 @@ def is_quorum(): else: return False + def wait_for_quorum(): while not is_quorum(): time.sleep(3) + + +def add_bootstrap_hint(peer): + cmd = [ + "ceph", + "--admin-daemon", + "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()), + "add_bootstrap_peer_hint", + peer + ] + # Ignore any errors for this call + subprocess.call(cmd) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 9e9f2dd9..f1451ef7 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -5,6 +5,7 @@ # # Authors: # Paul Collins +# James Page # import glob @@ -17,20 +18,20 @@ import ceph import utils + def install_upstart_scripts(): for x in glob.glob('files/upstart/*.conf'): - shutil.copy(x, '/etc/init/') + shutil.copy(x, '/etc/init/') + def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() - utils.install('ceph') - utils.install('gdisk') # for ceph-disk-prepare - + utils.install('ceph', 'gdisk') install_upstart_scripts() - utils.juju_log('INFO', 'End install hook.') + def emit_cephconf(): cephcontext = { 'mon_hosts': ' '.join(get_mon_hosts()), @@ -40,19 +41,21 @@ def emit_cephconf(): with open('/etc/ceph/ceph.conf', 'w') as cephconf: cephconf.write(utils.render_template('ceph.conf', cephcontext)) + def config_changed(): utils.juju_log('INFO', 'Begin config-changed hook.') utils.juju_log('INFO', 'Monitor hosts are ' + repr(get_mon_hosts())) fsid = utils.config_get('fsid') - if fsid == "": + if fsid == '': utils.juju_log('CRITICAL', 'No fsid supplied, cannot proceed.') sys.exit(1) monitor_secret = utils.config_get('monitor-secret') - if monitor_secret == "": - utils.juju_log('CRITICAL', 'No monitor-secret supplied, cannot proceed.') + if monitor_secret == '': + utils.juju_log('CRITICAL', + 'No monitor-secret supplied, cannot proceed.') sys.exit(1) emit_cephconf() @@ -63,12 +66,13 @@ def config_changed(): utils.juju_log('INFO', 'End config-changed hook.') + def get_mon_hosts(): hosts = [] hosts.append(socket.gethostbyname(utils.unit_get('private-address')) + ':6789') - for relid in utils.relation_ids("mon"): + for relid in utils.relation_ids('mon'): for unit in utils.relation_list(relid): hosts.append( socket.gethostbyname(utils.relation_get('private-address', @@ -78,19 +82,35 @@ def get_mon_hosts(): hosts.sort() return hosts + +def get_mon_addresses(): + hosts = [] + hosts.append(socket.gethostbyname(utils.unit_get('private-address'))) + + for relid in utils.relation_ids('mon'): + for unit in utils.relation_list(relid): + hosts.append( + socket.gethostbyname(utils.relation_get('private-address', + unit, relid))) + + hosts.sort() + return hosts + + def bootstrap_monitor_cluster(): hostname = utils.get_unit_hostname() - done = "/var/lib/ceph/mon/ceph-%s/done" % hostname + done = '/var/lib/ceph/mon/ceph-{}/done'.format(hostname) secret = utils.config_get('monitor-secret') - keyring = "/var/lib/ceph/tmp/%s.mon.keyring" % hostname + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) if os.path.exists(done): - utils.juju_log('INFO', 'bootstrap_monitor_cluster: mon already initialized, getting on with life.') + utils.juju_log('INFO', + 'bootstrap_monitor_cluster: mon already initialized.') else: try: subprocess.check_call(['ceph-authtool', keyring, '--create-keyring', '--name=mon.', - "--add-key=%s" % secret, + '--add-key={}'.format(secret), '--cap', 'mon', 'allow *']) subprocess.check_call(['ceph-mon', '--mkfs', @@ -106,47 +126,53 @@ def bootstrap_monitor_cluster(): finally: os.unlink(keyring) + def osdize_and_activate(dev): # XXX hack for instances subprocess.call(['umount', '/mnt']) if subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0: - utils.juju_log('INFO', "Looks like %s is in use, skipping." % dev) - return True + utils.juju_log('INFO', + 'Looks like {} is in use, skipping.'.format(dev)) + else: + if os.path.exists(dev): + subprocess.call(['ceph-disk-prepare', dev]) + subprocess.call(['udevadm', 'trigger', + '--subsystem-match=block', '--action=add']) - if os.path.exists(dev): - subprocess.call(['ceph-disk-prepare', dev]) - subprocess.call(['udevadm', 'trigger', - '--subsystem-match=block', '--action=add']) def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') emit_cephconf() moncount = int(utils.config_get('monitor-count')) - if len(get_mon_hosts()) == moncount: + if len(get_mon_hosts()) >= moncount: bootstrap_monitor_cluster() ceph.wait_for_quorum() for dev in utils.config_get('osd-devices').split(' '): osdize_and_activate(dev) + + for peer in get_mon_addresses(): + ceph.add_bootstrap_hint(peer) else: utils.juju_log('INFO', - "Not enough mons (%d), punting." % len(get_mon_hosts())) + 'Not enough mons ({}), punting.'.format( + len(get_mon_hosts()))) utils.juju_log('INFO', 'End mon-relation hook.') + def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() install_upstart_scripts() - utils.install('gdisk') # for ceph-disk-prepare utils.juju_log('INFO', 'End upgrade-charm hook.') + hooks = { 'config-changed': config_changed, 'install': install, - 'mon-relation-changed': mon_relation, 'mon-relation-departed': mon_relation, 'mon-relation-joined': mon_relation, 'upgrade-charm': upgrade_charm, @@ -157,6 +183,7 @@ def upgrade_charm(): try: hooks[hook]() except KeyError: - utils.juju_log('INFO', "This charm doesn't know how to handle '%s'." % hook) + utils.juju_log('INFO', + 'This charm doesn't know how to handle '{}'.'.format(hook)) sys.exit(0) diff --git a/ceph-mon/hooks/mon-relation-changed b/ceph-mon/hooks/start similarity index 100% rename from ceph-mon/hooks/mon-relation-changed rename to ceph-mon/hooks/start diff --git a/ceph-mon/hooks/stop b/ceph-mon/hooks/stop new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-mon/hooks/stop @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 2c10ac1b..d6cae2f9 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -13,17 +13,18 @@ import sys import socket -def install (*pkgs): + +def install(*pkgs): cmd = [ - "apt-get", - "-y", - "install" + 'apt-get', + '-y', + 'install' ] for pkg in pkgs: cmd.append(pkg) subprocess.check_call(cmd) -TEMPLATES_DIR="templates" +TEMPLATES_DIR = 'templates' try: import jinja2 @@ -31,104 +32,118 @@ def install (*pkgs): install('python-jinja2') import jinja2 -def render_template (template_name, context, template_dir=TEMPLATES_DIR): - templates = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir)) + +def render_template(template_name, context, template_dir=TEMPLATES_DIR): + templates = jinja2.Environment( + loader=jinja2.FileSystemLoader(template_dir) + ) template = templates.get_template(template_name) return template.render(context) + def configure_source(): - source = config_get("source") - if (source.startswith("ppa:") or - source.startswith("cloud:") or - source.startswith("http:")): + source = config_get('source') + if (source.startswith('ppa:') or + source.startswith('cloud:') or + source.startswith('http:')): cmd = [ - "add-apt-repository", + 'add-apt-repository', source ] subprocess.check_call(cmd) - if source.startswith("http:"): - key = config_get("key") + if source.startswith('http:'): + key = config_get('key') cmd = [ - "apt-key", - "import", + 'apt-key', + 'import', key ] subprocess.check_call(cmd) cmd = [ - "apt-get", - "update" + 'apt-get', + 'update' ] subprocess.check_call(cmd) # Protocols -TCP="TCP" -UDP="UDP" +TCP = 'TCP' +UDP = 'UDP' + -def expose(port, protocol="TCP"): +def expose(port, protocol='TCP'): cmd = [ - "open-port", - "%d/%s" % (port,protocol) + 'open-port', + '{}/{}'.format(port, protocol) ] subprocess.check_call(cmd) -def juju_log(message,severity="INFO"): + +def juju_log(message, severity='INFO'): cmd = [ - "juju-log", - "--log-level", severity, + 'juju-log', + '--log-level', severity, message ] subprocess.check_call(cmd) + def relation_ids(relation): cmd = [ - "relation-ids", + 'relation-ids', relation ] return subprocess.check_output(cmd).split() + def relation_list(rid): cmd = [ - "relation-list", - "-r", rid, + 'relation-list', + '-r', rid, ] return subprocess.check_output(cmd).split() -def relation_get(attribute,unit=None,rid=None): + +def relation_get(attribute, unit=None, rid=None): cmd = [ - "relation-get", + 'relation-get', ] if rid: - cmd.append("-r") + cmd.append('-r') cmd.append(rid) cmd.append(attribute) if unit: cmd.append(unit) return subprocess.check_output(cmd).strip() + def relation_set(**kwargs): cmd = [ - "relation-set" + 'relation-set' ] for k, v in kwargs.items(): - cmd.append("%s=%s" % (k,v)) + cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) + def unit_get(attribute): cmd = [ - "unit-get", + 'unit-get', attribute ] return subprocess.check_output(cmd).strip() + def config_get(attribute): cmd = [ - "config-get", + 'config-get', attribute ] return subprocess.check_output(cmd).strip() + def juju_log(level, message): subprocess.call(['juju-log', '-l', level, message]) + def get_unit_hostname(): return socket.gethostname() diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 906cdb4e..6a363437 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -1,7 +1,10 @@ name: ceph-brolin -summary: distributed storage +summary: Highly scalable distributed storage +maintainer: James Page , + Paul Collins description: | - This charm deploys Ceph. + Ceph is a distributed storage and network file system designed to provide + excellent performance, reliability, and scalability. peers: mon: interface: ceph-brolin From 6018864bc010f7293d05d494a43a4707551f8752 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 09:40:40 +0100 Subject: [PATCH 0041/2699] Tidied config.yaml, added extra detail and key config option for arbitarty sources --- ceph-proxy/config.yaml | 47 +++++++++++++++++++++++++++++++++--------- 1 file changed, 37 insertions(+), 10 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index a98d7570..a59f67e6 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -2,25 +2,52 @@ options: fsid: type: string description: | - fsid of our cluster - osd-devices: + fsid of the ceph cluster. To generate a suitable value use `uuid` + . + This configuration element is mandatory and the service will fail on + install if it is not provided. + monitor-secret: type: string - default: /dev/sdb /dev/sdc /dev/sdd /dev/sde description: | - the devices to format and set up as osd volumes + This value will become the "mon." key. To generate a suitable value use: + . + ceph-authtool /dev/stdout --name=mon. --gen-key + . + This configuration element is mandatory and the service will fail on + install if it is not provided. monitor-count: type: int default: 3 description: | - how many nodes to wait for before trying to create the monitor cluster + How many nodes to wait for before trying to create the monitor cluster this number needs to be odd, and more than three is a waste except for - very large clusters - monitor-secret: + very large clusters. + osd-devices: type: string + default: /dev/sdb /dev/sdc /dev/sdd /dev/sde description: | - this value will become the "mon." key - to generate a suitable value, use - ceph-authtool /dev/stdout --name=mon. --gen-key + The devices to format and set up as osd volumes. + . + These devices are the range of devices that will be checked for and + used across all service units. source: type: string default: ppa:ceph-ubuntu/dev + description: | + Optional configuration to support use of additional sources such as: + . + - ppa:myteam/ppa + - cloud:folsom-proposed + - http://my.archive.com/ubuntu main + . + The last option should be used in conjunction with the key configuration + option. + . + Note that a minimum ceph version of 0.48.2 is required for use with this + charm which is NOT provided by the packages in the main Ubuntu archive + for precise. + key: + type: string + description: | + Key ID to import to the apt keyring to support use with arbitary source + configuration from outside of Launchpad archives or PPA's. From 5c83533f386be699511a4dd8c7af1523a8aea8d9 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 09:40:40 +0100 Subject: [PATCH 0042/2699] Tidied config.yaml, added extra detail and key config option for arbitarty sources --- ceph-mon/config.yaml | 47 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 10 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index a98d7570..a59f67e6 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -2,25 +2,52 @@ options: fsid: type: string description: | - fsid of our cluster - osd-devices: + fsid of the ceph cluster. To generate a suitable value use `uuid` + . + This configuration element is mandatory and the service will fail on + install if it is not provided. + monitor-secret: type: string - default: /dev/sdb /dev/sdc /dev/sdd /dev/sde description: | - the devices to format and set up as osd volumes + This value will become the "mon." key. To generate a suitable value use: + . + ceph-authtool /dev/stdout --name=mon. --gen-key + . + This configuration element is mandatory and the service will fail on + install if it is not provided. monitor-count: type: int default: 3 description: | - how many nodes to wait for before trying to create the monitor cluster + How many nodes to wait for before trying to create the monitor cluster this number needs to be odd, and more than three is a waste except for - very large clusters - monitor-secret: + very large clusters. + osd-devices: type: string + default: /dev/sdb /dev/sdc /dev/sdd /dev/sde description: | - this value will become the "mon." key - to generate a suitable value, use - ceph-authtool /dev/stdout --name=mon. --gen-key + The devices to format and set up as osd volumes. + . + These devices are the range of devices that will be checked for and + used across all service units. source: type: string default: ppa:ceph-ubuntu/dev + description: | + Optional configuration to support use of additional sources such as: + . + - ppa:myteam/ppa + - cloud:folsom-proposed + - http://my.archive.com/ubuntu main + . + The last option should be used in conjunction with the key configuration + option. + . + Note that a minimum ceph version of 0.48.2 is required for use with this + charm which is NOT provided by the packages in the main Ubuntu archive + for precise. + key: + type: string + description: | + Key ID to import to the apt keyring to support use with arbitary source + configuration from outside of Launchpad archives or PPA's. From dc7e7a9ab2e13ba7c31c6a8fa11ce1a88e61303e Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 09:50:14 +0100 Subject: [PATCH 0043/2699] Tidy redundant imports and fixup string lex --- ceph-proxy/hooks/ceph.py | 1 - ceph-proxy/hooks/hooks.py | 2 +- ceph-proxy/hooks/utils.py | 6 ------ 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 9e3bfe99..83b113cf 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -8,7 +8,6 @@ # import json -import os import subprocess import time import utils diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index f1451ef7..c9a5617e 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -184,6 +184,6 @@ def upgrade_charm(): hooks[hook]() except KeyError: utils.juju_log('INFO', - 'This charm doesn't know how to handle '{}'.'.format(hook)) + "This charm doesn't know how to handle '{}'.".format(hook)) sys.exit(0) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index d6cae2f9..75123991 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -9,8 +9,6 @@ # import subprocess -import os -import sys import socket @@ -141,9 +139,5 @@ def config_get(attribute): return subprocess.check_output(cmd).strip() -def juju_log(level, message): - subprocess.call(['juju-log', '-l', level, message]) - - def get_unit_hostname(): return socket.gethostname() From cb57c01d38d45ce48948d131d773d13b2f06cdd9 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 09:50:14 +0100 Subject: [PATCH 0044/2699] Tidy redundant imports and fixup string lex --- ceph-mon/hooks/ceph.py | 1 - ceph-mon/hooks/hooks.py | 2 +- ceph-mon/hooks/utils.py | 6 ------ 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 9e3bfe99..83b113cf 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -8,7 +8,6 @@ # import json -import os import subprocess import time import utils diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index f1451ef7..c9a5617e 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -184,6 +184,6 @@ def upgrade_charm(): hooks[hook]() except KeyError: utils.juju_log('INFO', - 'This charm doesn't know how to handle '{}'.'.format(hook)) + "This charm doesn't know how to handle '{}'.".format(hook)) sys.exit(0) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index d6cae2f9..75123991 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -9,8 +9,6 @@ # import subprocess -import os -import sys import socket @@ -141,9 +139,5 @@ def config_get(attribute): return subprocess.check_output(cmd).strip() -def juju_log(level, message): - subprocess.call(['juju-log', '-l', level, message]) - - def get_unit_hostname(): return socket.gethostname() From a2e86e3831a2ff22714a35ef89ed9b5bfc1c7e2b Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 09:56:29 +0100 Subject: [PATCH 0045/2699] Remove attributation of utils.py --- ceph-proxy/hooks/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 75123991..2d5ad307 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -5,7 +5,6 @@ # Authors: # James Page # -# Taken from lp:~james-page/charms/precise/ganglia/python-refactor # import subprocess From 64332c5e70e1f3717a91d7f5bc292e8bd3e6552f Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 09:56:29 +0100 Subject: [PATCH 0046/2699] Remove attributation of utils.py --- ceph-mon/hooks/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 75123991..2d5ad307 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -5,7 +5,6 @@ # Authors: # James Page # -# Taken from lp:~james-page/charms/precise/ganglia/python-refactor # import subprocess From 4b19c0e0ad03a3e68a83868f228e81bb1a3104db Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 10:09:59 +0100 Subject: [PATCH 0047/2699] Switch to using DNS for host->ip resolution as this is more provider independent, use .format for string formatting --- ceph-proxy/hooks/hooks.py | 10 +++++----- ceph-proxy/hooks/utils.py | 9 +++++++++ 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index c9a5617e..a4ba1b5f 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -69,15 +69,15 @@ def config_changed(): def get_mon_hosts(): hosts = [] - hosts.append(socket.gethostbyname(utils.unit_get('private-address')) - + ':6789') + hosts.append('{}:6789'.format(utils.get_host_ip())) for relid in utils.relation_ids('mon'): for unit in utils.relation_list(relid): hosts.append( - socket.gethostbyname(utils.relation_get('private-address', - unit, relid)) - + ':6789') + '{}:6789'.format(utils.get_host_ip( + utils.relation_get('private-address', + unit, relid))) + ) hosts.sort() return hosts diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 2d5ad307..e0cacb18 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -140,3 +140,12 @@ def config_get(attribute): def get_unit_hostname(): return socket.gethostname() + + +def get_host_ip(hostname=unit_get('private-address')): + cmd = [ + 'dig', + '+short', + hostname + ] + return subprocess.check_call(cmd).strip() From 41e9dca55fdafb32ef3b9e5af2bffbf5f2d280d9 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 10:09:59 +0100 Subject: [PATCH 0048/2699] Switch to using DNS for host->ip resolution as this is more provider independent, use .format for string formatting --- ceph-mon/hooks/hooks.py | 10 +++++----- ceph-mon/hooks/utils.py | 9 +++++++++ 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index c9a5617e..a4ba1b5f 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -69,15 +69,15 @@ def config_changed(): def get_mon_hosts(): hosts = [] - hosts.append(socket.gethostbyname(utils.unit_get('private-address')) - + ':6789') + hosts.append('{}:6789'.format(utils.get_host_ip())) for relid in utils.relation_ids('mon'): for unit in utils.relation_list(relid): hosts.append( - socket.gethostbyname(utils.relation_get('private-address', - unit, relid)) - + ':6789') + '{}:6789'.format(utils.get_host_ip( + utils.relation_get('private-address', + unit, relid))) + ) hosts.sort() return hosts diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 2d5ad307..e0cacb18 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -140,3 +140,12 @@ def config_get(attribute): def get_unit_hostname(): return socket.gethostname() + + +def get_host_ip(hostname=unit_get('private-address')): + cmd = [ + 'dig', + '+short', + hostname + ] + return subprocess.check_call(cmd).strip() From e3455b2373760fc6aadf869615d9435c11a53afd Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Thu, 4 Oct 2012 22:18:58 +1300 Subject: [PATCH 0049/2699] bump rev --- ceph-proxy/revision | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 0691f67b..59343b09 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -52 +53 From e4e3a3ced628544a29f750961527f446b6b6f3c6 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Thu, 4 Oct 2012 22:18:58 +1300 Subject: [PATCH 0050/2699] bump rev --- ceph-mon/revision | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/revision b/ceph-mon/revision index 0691f67b..59343b09 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -52 +53 From a1144f4df5f345b10f6b7187a1eef7b263f66eb9 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 10:32:03 +0100 Subject: [PATCH 0051/2699] Fixup juju_log signature, use check_output not check_call when getting IP addresses --- ceph-proxy/hooks/utils.py | 4 ++-- ceph-proxy/revision | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index e0cacb18..d180aa42 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -75,7 +75,7 @@ def expose(port, protocol='TCP'): subprocess.check_call(cmd) -def juju_log(message, severity='INFO'): +def juju_log(severity, message): cmd = [ 'juju-log', '--log-level', severity, @@ -148,4 +148,4 @@ def get_host_ip(hostname=unit_get('private-address')): '+short', hostname ] - return subprocess.check_call(cmd).strip() + return subprocess.check_output(cmd).strip() diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 0691f67b..fb1e7bc8 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -52 +54 From 0aa98ad4b63f3140deac857a5bba1c0d00ceae50 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 10:32:03 +0100 Subject: [PATCH 0052/2699] Fixup juju_log signature, use check_output not check_call when getting IP addresses --- ceph-mon/hooks/utils.py | 4 ++-- ceph-mon/revision | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index e0cacb18..d180aa42 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -75,7 +75,7 @@ def expose(port, protocol='TCP'): subprocess.check_call(cmd) -def juju_log(message, severity='INFO'): +def juju_log(severity, message): cmd = [ 'juju-log', '--log-level', severity, @@ -148,4 +148,4 @@ def get_host_ip(hostname=unit_get('private-address')): '+short', hostname ] - return subprocess.check_call(cmd).strip() + return subprocess.check_output(cmd).strip() diff --git a/ceph-mon/revision b/ceph-mon/revision index 0691f67b..fb1e7bc8 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -52 +54 From e606ca28acaa7d92ed478687cd184583a34d0a05 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Fri, 5 Oct 2012 00:09:28 +1300 Subject: [PATCH 0053/2699] dox hax --- ceph-proxy/README | 31 ++++++++++++++++++------------- ceph-proxy/TODO | 1 + 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/ceph-proxy/README b/ceph-proxy/README index 0b56260b..964545f4 100644 --- a/ceph-proxy/README +++ b/ceph-proxy/README @@ -1,18 +1,23 @@ -This charm deploys a Ceph cluster. +This charm deploys a Ceph cluster. It is currently deliberately +inflexible and potentially destructive. It is designed to deploy on +exactly three machines. Each machine will run mon and osd. cephx is +not enabled and sufficient keys are not yet deployed for it to be +enbled after the fact. -It uses the new-style Ceph deployment as reverse-engineered from the -Chef cookbook at https://github.com/ceph/ceph-cookbooks +We use new-style Ceph deployment, as reverse-engineered from the Chef +cookbook at https://github.com/ceph/ceph-cookbooks, although we +selected a different strategy to form the monitor cluster. Since we +don't know the names *or* addresses of the machines in advance, we use +the relation-joined hook to wait for all three nodes to come up, and +then write their addresses to ceph.conf in the "mon host" parameter. +After we initialize the monitor cluster a quorum forms quickly, and +OSD bringup proceeds. -This charm is currently deliberately inflexible and potentially -destructive. It is designed to deploy on exactly three machines. -Each machine will run mon and osd. +See http://ceph.com/docs/master/dev/mon-bootstrap/ for more +information on Ceph monitor cluster deployment strategies and +pitfalls. The osds use so-called "OSD hotplugging". ceph-disk-prepare is used -to create the filesystems with a special GPT partition type. udev is -set up to mount such filesystems and start the osd daemons as their +to create the filesystems with a special GPT partition type. upstart +is set up to mount such filesystems and start the osd daemons as their storage becomes visible to the system (or after "udevadm trigger"). - -The Chef cookbook above performs some extra steps to generate an OSD -bootstrapping key and propagate it to the other nodes in the cluster. -Since all our OSDs run on nodes that also run mon, we don't need this -and did not implement it. diff --git a/ceph-proxy/TODO b/ceph-proxy/TODO index c4247485..c1287fcc 100644 --- a/ceph-proxy/TODO +++ b/ceph-proxy/TODO @@ -6,3 +6,4 @@ == Major == * deploy more than 3 OSD hosts + * deploy keys so that cephx can be used From 6bcb5082fb5de93951eeebddb83e74908ef1eeed Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Fri, 5 Oct 2012 00:09:28 +1300 Subject: [PATCH 0054/2699] dox hax --- ceph-mon/README | 31 ++++++++++++++++++------------- ceph-mon/TODO | 1 + 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/ceph-mon/README b/ceph-mon/README index 0b56260b..964545f4 100644 --- a/ceph-mon/README +++ b/ceph-mon/README @@ -1,18 +1,23 @@ -This charm deploys a Ceph cluster. +This charm deploys a Ceph cluster. It is currently deliberately +inflexible and potentially destructive. It is designed to deploy on +exactly three machines. Each machine will run mon and osd. cephx is +not enabled and sufficient keys are not yet deployed for it to be +enbled after the fact. -It uses the new-style Ceph deployment as reverse-engineered from the -Chef cookbook at https://github.com/ceph/ceph-cookbooks +We use new-style Ceph deployment, as reverse-engineered from the Chef +cookbook at https://github.com/ceph/ceph-cookbooks, although we +selected a different strategy to form the monitor cluster. Since we +don't know the names *or* addresses of the machines in advance, we use +the relation-joined hook to wait for all three nodes to come up, and +then write their addresses to ceph.conf in the "mon host" parameter. +After we initialize the monitor cluster a quorum forms quickly, and +OSD bringup proceeds. -This charm is currently deliberately inflexible and potentially -destructive. It is designed to deploy on exactly three machines. -Each machine will run mon and osd. +See http://ceph.com/docs/master/dev/mon-bootstrap/ for more +information on Ceph monitor cluster deployment strategies and +pitfalls. The osds use so-called "OSD hotplugging". ceph-disk-prepare is used -to create the filesystems with a special GPT partition type. udev is -set up to mount such filesystems and start the osd daemons as their +to create the filesystems with a special GPT partition type. upstart +is set up to mount such filesystems and start the osd daemons as their storage becomes visible to the system (or after "udevadm trigger"). - -The Chef cookbook above performs some extra steps to generate an OSD -bootstrapping key and propagate it to the other nodes in the cluster. -Since all our OSDs run on nodes that also run mon, we don't need this -and did not implement it. diff --git a/ceph-mon/TODO b/ceph-mon/TODO index c4247485..c1287fcc 100644 --- a/ceph-mon/TODO +++ b/ceph-mon/TODO @@ -6,3 +6,4 @@ == Major == * deploy more than 3 OSD hosts + * deploy keys so that cephx can be used From 70cbf4d492120c771ddbf57d2cc7450d54fddedc Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 14:24:12 +0100 Subject: [PATCH 0055/2699] Updated README verbosity, added checks to harden ceph admin-daemon usage in ceph utils --- ceph-proxy/README | 81 ++++++++++++++++++++++++++++++++++------ ceph-proxy/TODO | 8 ++++ ceph-proxy/config.yaml | 2 +- ceph-proxy/copyright | 4 +- ceph-proxy/hooks/ceph.py | 32 ++++++++++------ ceph-proxy/revision | 2 +- 6 files changed, 101 insertions(+), 28 deletions(-) diff --git a/ceph-proxy/README b/ceph-proxy/README index 0b56260b..2d1159dd 100644 --- a/ceph-proxy/README +++ b/ceph-proxy/README @@ -1,18 +1,75 @@ +Overview +======== + +Ceph is a distributed storage and network file system designed to provide +excellent performance, reliability, and scalability. + This charm deploys a Ceph cluster. -It uses the new-style Ceph deployment as reverse-engineered from the -Chef cookbook at https://github.com/ceph/ceph-cookbooks +Usage +===== + +The ceph charm has two pieces of mandatory configuration for which no defaults +are provided: + + fsid: + uuid specific to a ceph cluster used to ensure that different + clusters don't get mixed up - use `uuid` to generate one. + + monitor-secret: + a ceph generated key used by the daemons that manage to cluster + to control security. You can use the ceph-authtool command to + generate one: + + ceph-authtool /dev/stdout --name=mon. --gen-key + +These two pieces of configuration must NOT be changed post bootstrap; attempting +todo this will cause a reconfiguration error and new service units will not join +the existing ceph cluster. + +The charm also supports specification of the storage devices to use in the ceph +cluster. + + osd-devices: + A list of devices that the charm will attempt to detect, initialise and + activate as ceph storage. + + This this can be a superset of the actual storage devices presented to + each service unit and can be changed post ceph bootstrap using `juju set`. + +At a minimum you must provide a juju config file during initial deployment +with the fsid and monitor-secret options: + + ceph: + fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 + monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== + osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde + +Specifying the osd-devices to use is also a good idea. + +By default the ceph cluster will not bootstrap until 3 service units have been +deployed and started; this is to ensure that a quorum is achieved prior to adding +storage devices. + +Bootnotes +========= + +This charm uses the new-style Ceph deployment as reverse-engineered from the Chef +cookbook at https://github.com/ceph/ceph-cookbooks. + +This charm is currently deliberately inflexible and potentially destructive. It +is designed to deploy on exactly three machines. -This charm is currently deliberately inflexible and potentially -destructive. It is designed to deploy on exactly three machines. Each machine will run mon and osd. -The osds use so-called "OSD hotplugging". ceph-disk-prepare is used -to create the filesystems with a special GPT partition type. udev is -set up to mount such filesystems and start the osd daemons as their -storage becomes visible to the system (or after "udevadm trigger"). +The osds use so-called "OSD hotplugging". ceph-disk-prepare is used to create the +filesystems with a special GPT partition type. udev is set up to mount such +filesystems and start the osd daemons as their storage becomes visible to the +system (or after "udevadm trigger"). + +The Chef cookbook above performs some extra steps to generate an OSD bootstrapping +key and propagate it to the other nodes in the cluster. Since all our OSDs run on +nodes that also run mon, we don't need this and did not implement it. -The Chef cookbook above performs some extra steps to generate an OSD -bootstrapping key and propagate it to the other nodes in the cluster. -Since all our OSDs run on nodes that also run mon, we don't need this -and did not implement it. +The charm does not currently implement cephx and its explicitly turned off in the +configuration generated for ceph. diff --git a/ceph-proxy/TODO b/ceph-proxy/TODO index c4247485..8fcf6066 100644 --- a/ceph-proxy/TODO +++ b/ceph-proxy/TODO @@ -2,7 +2,15 @@ * fix tunables (http://tracker.newdream.net/issues/2210) * more than 192 PGs + * fixup data placement in crush to be host not osd driven == Major == * deploy more than 3 OSD hosts + +== Public Charm == + + * cephx support + * rel: remote OSD services (+bootstrap.osd keys for cephx) + * rel: remote MON clients (+client keys for cephx) + * rel: RADOS gateway (+client key for cephx) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index a59f67e6..b8314087 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -9,7 +9,7 @@ options: monitor-secret: type: string description: | - This value will become the "mon." key. To generate a suitable value use: + This value will become the mon. key. To generate a suitable value use: . ceph-authtool /dev/stdout --name=mon. --gen-key . diff --git a/ceph-proxy/copyright b/ceph-proxy/copyright index 4e1085af..bdfae0e0 100644 --- a/ceph-proxy/copyright +++ b/ceph-proxy/copyright @@ -11,5 +11,5 @@ Copyright: 2004-2010 by Sage Weil License: LGPL-2.1 License: LGPL-2.1 - On Debian GNU/Linux system you can find the complete text of the - LGPL-2.1 license in '/usr/share/common-licenses/LGPL-2.1' + On Debian GNU/Linux system you can find the complete text of the + LGPL-2.1 license in '/usr/share/common-licenses/LGPL-2.1' diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 83b113cf..f542ed9a 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -11,25 +11,31 @@ import subprocess import time import utils +import os QUORUM = ['leader', 'peon'] def is_quorum(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", - "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()), + asok, "mon_status" ] - - try: - result = json.loads(subprocess.check_output(cmd)) - except subprocess.CalledProcessError: - return False - - if result['state'] in QUORUM: - return True + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] in QUORUM: + return True + else: + return False else: return False @@ -40,12 +46,14 @@ def wait_for_quorum(): def add_bootstrap_hint(peer): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", - "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()), + asok, "add_bootstrap_peer_hint", peer ] - # Ignore any errors for this call - subprocess.call(cmd) + if os.path.exists(asok): + # Ignore any errors for this call + subprocess.call(cmd) diff --git a/ceph-proxy/revision b/ceph-proxy/revision index fb1e7bc8..c3f407c0 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -54 +55 From ffad190a14c22f8e9cc9ec9c4b7d98ff457b6f43 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 14:24:12 +0100 Subject: [PATCH 0056/2699] Updated README verbosity, added checks to harden ceph admin-daemon usage in ceph utils --- ceph-mon/README | 81 +++++++++++++++++++++++++++++++++++------- ceph-mon/TODO | 8 +++++ ceph-mon/config.yaml | 2 +- ceph-mon/copyright | 4 +-- ceph-mon/hooks/ceph.py | 32 ++++++++++------- ceph-mon/revision | 2 +- 6 files changed, 101 insertions(+), 28 deletions(-) diff --git a/ceph-mon/README b/ceph-mon/README index 0b56260b..2d1159dd 100644 --- a/ceph-mon/README +++ b/ceph-mon/README @@ -1,18 +1,75 @@ +Overview +======== + +Ceph is a distributed storage and network file system designed to provide +excellent performance, reliability, and scalability. + This charm deploys a Ceph cluster. -It uses the new-style Ceph deployment as reverse-engineered from the -Chef cookbook at https://github.com/ceph/ceph-cookbooks +Usage +===== + +The ceph charm has two pieces of mandatory configuration for which no defaults +are provided: + + fsid: + uuid specific to a ceph cluster used to ensure that different + clusters don't get mixed up - use `uuid` to generate one. + + monitor-secret: + a ceph generated key used by the daemons that manage to cluster + to control security. You can use the ceph-authtool command to + generate one: + + ceph-authtool /dev/stdout --name=mon. --gen-key + +These two pieces of configuration must NOT be changed post bootstrap; attempting +todo this will cause a reconfiguration error and new service units will not join +the existing ceph cluster. + +The charm also supports specification of the storage devices to use in the ceph +cluster. + + osd-devices: + A list of devices that the charm will attempt to detect, initialise and + activate as ceph storage. + + This this can be a superset of the actual storage devices presented to + each service unit and can be changed post ceph bootstrap using `juju set`. + +At a minimum you must provide a juju config file during initial deployment +with the fsid and monitor-secret options: + + ceph: + fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 + monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== + osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde + +Specifying the osd-devices to use is also a good idea. + +By default the ceph cluster will not bootstrap until 3 service units have been +deployed and started; this is to ensure that a quorum is achieved prior to adding +storage devices. + +Bootnotes +========= + +This charm uses the new-style Ceph deployment as reverse-engineered from the Chef +cookbook at https://github.com/ceph/ceph-cookbooks. + +This charm is currently deliberately inflexible and potentially destructive. It +is designed to deploy on exactly three machines. -This charm is currently deliberately inflexible and potentially -destructive. It is designed to deploy on exactly three machines. Each machine will run mon and osd. -The osds use so-called "OSD hotplugging". ceph-disk-prepare is used -to create the filesystems with a special GPT partition type. udev is -set up to mount such filesystems and start the osd daemons as their -storage becomes visible to the system (or after "udevadm trigger"). +The osds use so-called "OSD hotplugging". ceph-disk-prepare is used to create the +filesystems with a special GPT partition type. udev is set up to mount such +filesystems and start the osd daemons as their storage becomes visible to the +system (or after "udevadm trigger"). + +The Chef cookbook above performs some extra steps to generate an OSD bootstrapping +key and propagate it to the other nodes in the cluster. Since all our OSDs run on +nodes that also run mon, we don't need this and did not implement it. -The Chef cookbook above performs some extra steps to generate an OSD -bootstrapping key and propagate it to the other nodes in the cluster. -Since all our OSDs run on nodes that also run mon, we don't need this -and did not implement it. +The charm does not currently implement cephx and its explicitly turned off in the +configuration generated for ceph. diff --git a/ceph-mon/TODO b/ceph-mon/TODO index c4247485..8fcf6066 100644 --- a/ceph-mon/TODO +++ b/ceph-mon/TODO @@ -2,7 +2,15 @@ * fix tunables (http://tracker.newdream.net/issues/2210) * more than 192 PGs + * fixup data placement in crush to be host not osd driven == Major == * deploy more than 3 OSD hosts + +== Public Charm == + + * cephx support + * rel: remote OSD services (+bootstrap.osd keys for cephx) + * rel: remote MON clients (+client keys for cephx) + * rel: RADOS gateway (+client key for cephx) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index a59f67e6..b8314087 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -9,7 +9,7 @@ options: monitor-secret: type: string description: | - This value will become the "mon." key. To generate a suitable value use: + This value will become the mon. key. To generate a suitable value use: . ceph-authtool /dev/stdout --name=mon. --gen-key . diff --git a/ceph-mon/copyright b/ceph-mon/copyright index 4e1085af..bdfae0e0 100644 --- a/ceph-mon/copyright +++ b/ceph-mon/copyright @@ -11,5 +11,5 @@ Copyright: 2004-2010 by Sage Weil License: LGPL-2.1 License: LGPL-2.1 - On Debian GNU/Linux system you can find the complete text of the - LGPL-2.1 license in '/usr/share/common-licenses/LGPL-2.1' + On Debian GNU/Linux system you can find the complete text of the + LGPL-2.1 license in '/usr/share/common-licenses/LGPL-2.1' diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 83b113cf..f542ed9a 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -11,25 +11,31 @@ import subprocess import time import utils +import os QUORUM = ['leader', 'peon'] def is_quorum(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", - "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()), + asok, "mon_status" ] - - try: - result = json.loads(subprocess.check_output(cmd)) - except subprocess.CalledProcessError: - return False - - if result['state'] in QUORUM: - return True + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] in QUORUM: + return True + else: + return False else: return False @@ -40,12 +46,14 @@ def wait_for_quorum(): def add_bootstrap_hint(peer): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", - "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()), + asok, "add_bootstrap_peer_hint", peer ] - # Ignore any errors for this call - subprocess.call(cmd) + if os.path.exists(asok): + # Ignore any errors for this call + subprocess.call(cmd) diff --git a/ceph-mon/revision b/ceph-mon/revision index fb1e7bc8..c3f407c0 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -54 +55 From 649333bad58c912474ad3228bea8e2845a1a481d Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 14:47:13 +0100 Subject: [PATCH 0057/2699] Merge README updates from pjdc --- ceph-proxy/README | 50 +++++++++++++++++++++++++++++------------------ 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/ceph-proxy/README b/ceph-proxy/README index 2d1159dd..a886a83e 100644 --- a/ceph-proxy/README +++ b/ceph-proxy/README @@ -38,38 +38,50 @@ cluster. each service unit and can be changed post ceph bootstrap using `juju set`. At a minimum you must provide a juju config file during initial deployment -with the fsid and monitor-secret options: +with the fsid and monitor-secret options (contents of cepy.yaml below): - ceph: + ceph-brolin: fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde Specifying the osd-devices to use is also a good idea. +Boot things up by using: + + juju deploy -n 3 --config ceph.yaml ceph-brolin + By default the ceph cluster will not bootstrap until 3 service units have been deployed and started; this is to ensure that a quorum is achieved prior to adding -storage devices. +storage devices. -Bootnotes -========= +Technical Bootnotes +=================== -This charm uses the new-style Ceph deployment as reverse-engineered from the Chef -cookbook at https://github.com/ceph/ceph-cookbooks. +This charm is currently deliberately inflexible and potentially destructive. +It is designed to deploy on exactly three machines. Each machine will run mon +and osd. -This charm is currently deliberately inflexible and potentially destructive. It -is designed to deploy on exactly three machines. +This charm uses the new-style Ceph deployment as reverse-engineered from the +Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected +a different strategy to form the monitor cluster. Since we don't know the +names *or* addresses of the machines in advance, we use the relation-joined +hook to wait for all three nodes to come up, and then write their addresses +to ceph.conf in the "mon host" parameter. After we initialize the monitor +cluster a quorum forms quickly, and OSD bringup proceeds. -Each machine will run mon and osd. +The osds use so-called "OSD hotplugging". ceph-disk-prepare is used to create +the filesystems with a special GPT partition type. udev is set up to mounti +such filesystems and start the osd daemons as their storage becomes visible to +the system (or after "udevadm trigger"). -The osds use so-called "OSD hotplugging". ceph-disk-prepare is used to create the -filesystems with a special GPT partition type. udev is set up to mount such -filesystems and start the osd daemons as their storage becomes visible to the -system (or after "udevadm trigger"). +The Chef cookbook above performs some extra steps to generate an OSD +bootstrapping key and propagate it to the other nodes in the cluster. Since +all OSDs run on nodes that also run mon, we don't need this and did not +implement it. -The Chef cookbook above performs some extra steps to generate an OSD bootstrapping -key and propagate it to the other nodes in the cluster. Since all our OSDs run on -nodes that also run mon, we don't need this and did not implement it. +The charm does not currently implement cephx and its explicitly turned off in +the configuration generated for ceph. -The charm does not currently implement cephx and its explicitly turned off in the -configuration generated for ceph. +See http://ceph.com/docs/master/dev/mon-bootstrap/ for more information on Ceph +monitor cluster deployment strategies and pitfalls. From 4df9a30215ed244a99f4cf692478bcceefeb4713 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 4 Oct 2012 14:47:13 +0100 Subject: [PATCH 0058/2699] Merge README updates from pjdc --- ceph-mon/README | 50 ++++++++++++++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/ceph-mon/README b/ceph-mon/README index 2d1159dd..a886a83e 100644 --- a/ceph-mon/README +++ b/ceph-mon/README @@ -38,38 +38,50 @@ cluster. each service unit and can be changed post ceph bootstrap using `juju set`. At a minimum you must provide a juju config file during initial deployment -with the fsid and monitor-secret options: +with the fsid and monitor-secret options (contents of cepy.yaml below): - ceph: + ceph-brolin: fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde Specifying the osd-devices to use is also a good idea. +Boot things up by using: + + juju deploy -n 3 --config ceph.yaml ceph-brolin + By default the ceph cluster will not bootstrap until 3 service units have been deployed and started; this is to ensure that a quorum is achieved prior to adding -storage devices. +storage devices. -Bootnotes -========= +Technical Bootnotes +=================== -This charm uses the new-style Ceph deployment as reverse-engineered from the Chef -cookbook at https://github.com/ceph/ceph-cookbooks. +This charm is currently deliberately inflexible and potentially destructive. +It is designed to deploy on exactly three machines. Each machine will run mon +and osd. -This charm is currently deliberately inflexible and potentially destructive. It -is designed to deploy on exactly three machines. +This charm uses the new-style Ceph deployment as reverse-engineered from the +Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected +a different strategy to form the monitor cluster. Since we don't know the +names *or* addresses of the machines in advance, we use the relation-joined +hook to wait for all three nodes to come up, and then write their addresses +to ceph.conf in the "mon host" parameter. After we initialize the monitor +cluster a quorum forms quickly, and OSD bringup proceeds. -Each machine will run mon and osd. +The osds use so-called "OSD hotplugging". ceph-disk-prepare is used to create +the filesystems with a special GPT partition type. udev is set up to mounti +such filesystems and start the osd daemons as their storage becomes visible to +the system (or after "udevadm trigger"). -The osds use so-called "OSD hotplugging". ceph-disk-prepare is used to create the -filesystems with a special GPT partition type. udev is set up to mount such -filesystems and start the osd daemons as their storage becomes visible to the -system (or after "udevadm trigger"). +The Chef cookbook above performs some extra steps to generate an OSD +bootstrapping key and propagate it to the other nodes in the cluster. Since +all OSDs run on nodes that also run mon, we don't need this and did not +implement it. -The Chef cookbook above performs some extra steps to generate an OSD bootstrapping -key and propagate it to the other nodes in the cluster. Since all our OSDs run on -nodes that also run mon, we don't need this and did not implement it. +The charm does not currently implement cephx and its explicitly turned off in +the configuration generated for ceph. -The charm does not currently implement cephx and its explicitly turned off in the -configuration generated for ceph. +See http://ceph.com/docs/master/dev/mon-bootstrap/ for more information on Ceph +monitor cluster deployment strategies and pitfalls. From 955a194ee2e0e1d83274ae9c107f22651c6415dc Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 5 Oct 2012 10:09:18 +0100 Subject: [PATCH 0059/2699] Removed bootstrap hinting - its not required for the method we are using to bootstrap the cluster --- ceph-proxy/hooks/hooks.py | 3 --- ceph-proxy/revision | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index a4ba1b5f..35b6598f 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -152,9 +152,6 @@ def mon_relation(): ceph.wait_for_quorum() for dev in utils.config_get('osd-devices').split(' '): osdize_and_activate(dev) - - for peer in get_mon_addresses(): - ceph.add_bootstrap_hint(peer) else: utils.juju_log('INFO', 'Not enough mons ({}), punting.'.format( diff --git a/ceph-proxy/revision b/ceph-proxy/revision index c3f407c0..f6b91e0e 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -55 +56 From 1e33a98cd4f24aea3b962dc6235c3bb02c5482ee Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 5 Oct 2012 10:09:18 +0100 Subject: [PATCH 0060/2699] Removed bootstrap hinting - its not required for the method we are using to bootstrap the cluster --- ceph-mon/hooks/hooks.py | 3 --- ceph-mon/revision | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index a4ba1b5f..35b6598f 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -152,9 +152,6 @@ def mon_relation(): ceph.wait_for_quorum() for dev in utils.config_get('osd-devices').split(' '): osdize_and_activate(dev) - - for peer in get_mon_addresses(): - ceph.add_bootstrap_hint(peer) else: utils.juju_log('INFO', 'Not enough mons ({}), punting.'.format( diff --git a/ceph-mon/revision b/ceph-mon/revision index c3f407c0..f6b91e0e 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -55 +56 From e64657012ce728267478f1f67da260815d2e8e79 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Fri, 5 Oct 2012 22:15:13 +1300 Subject: [PATCH 0061/2699] hoist udevadm trigger, add start hook for redeployments --- ceph-proxy/hooks/hooks.py | 21 ++++++++++++++++----- ceph-proxy/revision | 2 +- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index a4ba1b5f..94574d0a 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -62,7 +62,9 @@ def config_changed(): if ceph.is_quorum(): for dev in utils.config_get('osd-devices').split(' '): - osdize_and_activate(dev) + osdize(dev) + subprocess.call(['udevadm', 'trigger', + '--subsystem-match=block', '--action=add']) utils.juju_log('INFO', 'End config-changed hook.') @@ -127,7 +129,7 @@ def bootstrap_monitor_cluster(): os.unlink(keyring) -def osdize_and_activate(dev): +def osdize(dev): # XXX hack for instances subprocess.call(['umount', '/mnt']) @@ -137,8 +139,6 @@ def osdize_and_activate(dev): else: if os.path.exists(dev): subprocess.call(['ceph-disk-prepare', dev]) - subprocess.call(['udevadm', 'trigger', - '--subsystem-match=block', '--action=add']) def mon_relation(): @@ -151,7 +151,9 @@ def mon_relation(): ceph.wait_for_quorum() for dev in utils.config_get('osd-devices').split(' '): - osdize_and_activate(dev) + osdize(dev) + subprocess.call(['udevadm', 'trigger', + '--subsystem-match=block', '--action=add']) for peer in get_mon_addresses(): ceph.add_bootstrap_hint(peer) @@ -170,11 +172,20 @@ def upgrade_charm(): utils.juju_log('INFO', 'End upgrade-charm hook.') +def start(): + # In case we're being redeployed to the same machines, try + # to make sure everything is running as soon as possible. + subprocess.call(['start', 'ceph-mon-all-starter']) + subprocess.call(['udevadm', 'trigger', + '--subsystem-match=block', '--action=add']) + + hooks = { 'config-changed': config_changed, 'install': install, 'mon-relation-departed': mon_relation, 'mon-relation-joined': mon_relation, + 'start': start, 'upgrade-charm': upgrade_charm, } diff --git a/ceph-proxy/revision b/ceph-proxy/revision index c3f407c0..e1617e84 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -55 +57 From fc22a9a74c3f821ac5357211437eee20bccdc5cb Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Fri, 5 Oct 2012 22:15:13 +1300 Subject: [PATCH 0062/2699] hoist udevadm trigger, add start hook for redeployments --- ceph-mon/hooks/hooks.py | 21 ++++++++++++++++----- ceph-mon/revision | 2 +- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index a4ba1b5f..94574d0a 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -62,7 +62,9 @@ def config_changed(): if ceph.is_quorum(): for dev in utils.config_get('osd-devices').split(' '): - osdize_and_activate(dev) + osdize(dev) + subprocess.call(['udevadm', 'trigger', + '--subsystem-match=block', '--action=add']) utils.juju_log('INFO', 'End config-changed hook.') @@ -127,7 +129,7 @@ def bootstrap_monitor_cluster(): os.unlink(keyring) -def osdize_and_activate(dev): +def osdize(dev): # XXX hack for instances subprocess.call(['umount', '/mnt']) @@ -137,8 +139,6 @@ def osdize_and_activate(dev): else: if os.path.exists(dev): subprocess.call(['ceph-disk-prepare', dev]) - subprocess.call(['udevadm', 'trigger', - '--subsystem-match=block', '--action=add']) def mon_relation(): @@ -151,7 +151,9 @@ def mon_relation(): ceph.wait_for_quorum() for dev in utils.config_get('osd-devices').split(' '): - osdize_and_activate(dev) + osdize(dev) + subprocess.call(['udevadm', 'trigger', + '--subsystem-match=block', '--action=add']) for peer in get_mon_addresses(): ceph.add_bootstrap_hint(peer) @@ -170,11 +172,20 @@ def upgrade_charm(): utils.juju_log('INFO', 'End upgrade-charm hook.') +def start(): + # In case we're being redeployed to the same machines, try + # to make sure everything is running as soon as possible. + subprocess.call(['start', 'ceph-mon-all-starter']) + subprocess.call(['udevadm', 'trigger', + '--subsystem-match=block', '--action=add']) + + hooks = { 'config-changed': config_changed, 'install': install, 'mon-relation-departed': mon_relation, 'mon-relation-joined': mon_relation, + 'start': start, 'upgrade-charm': upgrade_charm, } diff --git a/ceph-mon/revision b/ceph-mon/revision index c3f407c0..e1617e84 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -55 +57 From b0ee06410390e3d8db2b52d13670815d8a56a834 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Fri, 5 Oct 2012 22:53:28 +1300 Subject: [PATCH 0063/2699] be more careful when intializing disks by checking for the ceph UUID in the GPT --- ceph-proxy/hooks/ceph.py | 12 ++++++++++++ ceph-proxy/hooks/hooks.py | 12 +++++++++--- ceph-proxy/revision | 2 +- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index f542ed9a..f570e4e3 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -57,3 +57,15 @@ def add_bootstrap_hint(peer): if os.path.exists(asok): # Ignore any errors for this call subprocess.call(cmd) + + +def is_osd_disk(dev): + try: + info = subprocess.check_output(['sgdisk', '-i', '1', dev]) + info = info.split("\n") + for line in info: + if line.startswith('Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D'): + return True + except subprocess.CalledProcessError: + pass + return False diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 94574d0a..48524523 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -133,12 +133,18 @@ def osdize(dev): # XXX hack for instances subprocess.call(['umount', '/mnt']) + if ceph.is_osd_disk(dev): + utils.juju_log('INFO', + 'Looks like {} is already an OSD, skipping.'.format(dev)) + return + if subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0: utils.juju_log('INFO', 'Looks like {} is in use, skipping.'.format(dev)) - else: - if os.path.exists(dev): - subprocess.call(['ceph-disk-prepare', dev]) + return + + if os.path.exists(dev): + subprocess.call(['ceph-disk-prepare', dev]) def mon_relation(): diff --git a/ceph-proxy/revision b/ceph-proxy/revision index e1617e84..1479e19b 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -57 +65 From 70f12e56dd5ea7640a40251e850fd6fbb0823734 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Fri, 5 Oct 2012 22:53:28 +1300 Subject: [PATCH 0064/2699] be more careful when intializing disks by checking for the ceph UUID in the GPT --- ceph-mon/hooks/ceph.py | 12 ++++++++++++ ceph-mon/hooks/hooks.py | 12 +++++++++--- ceph-mon/revision | 2 +- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index f542ed9a..f570e4e3 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -57,3 +57,15 @@ def add_bootstrap_hint(peer): if os.path.exists(asok): # Ignore any errors for this call subprocess.call(cmd) + + +def is_osd_disk(dev): + try: + info = subprocess.check_output(['sgdisk', '-i', '1', dev]) + info = info.split("\n") + for line in info: + if line.startswith('Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D'): + return True + except subprocess.CalledProcessError: + pass + return False diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 94574d0a..48524523 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -133,12 +133,18 @@ def osdize(dev): # XXX hack for instances subprocess.call(['umount', '/mnt']) + if ceph.is_osd_disk(dev): + utils.juju_log('INFO', + 'Looks like {} is already an OSD, skipping.'.format(dev)) + return + if subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0: utils.juju_log('INFO', 'Looks like {} is in use, skipping.'.format(dev)) - else: - if os.path.exists(dev): - subprocess.call(['ceph-disk-prepare', dev]) + return + + if os.path.exists(dev): + subprocess.call(['ceph-disk-prepare', dev]) def mon_relation(): diff --git a/ceph-mon/revision b/ceph-mon/revision index e1617e84..1479e19b 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -57 +65 From e54017542e1108dc85584b025331c945f79987d1 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Fri, 5 Oct 2012 23:49:55 +1300 Subject: [PATCH 0065/2699] drop unused function get_mon_addresses --- ceph-proxy/hooks/hooks.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 1cbcfab4..317e1489 100644 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -85,20 +85,6 @@ def get_mon_hosts(): return hosts -def get_mon_addresses(): - hosts = [] - hosts.append(socket.gethostbyname(utils.unit_get('private-address'))) - - for relid in utils.relation_ids('mon'): - for unit in utils.relation_list(relid): - hosts.append( - socket.gethostbyname(utils.relation_get('private-address', - unit, relid))) - - hosts.sort() - return hosts - - def bootstrap_monitor_cluster(): hostname = utils.get_unit_hostname() done = '/var/lib/ceph/mon/ceph-{}/done'.format(hostname) From f36393442fcfacf0584466e05fc523a88c49f9c8 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Fri, 5 Oct 2012 23:49:55 +1300 Subject: [PATCH 0066/2699] drop unused function get_mon_addresses --- ceph-mon/hooks/hooks.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 1cbcfab4..317e1489 100644 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -85,20 +85,6 @@ def get_mon_hosts(): return hosts -def get_mon_addresses(): - hosts = [] - hosts.append(socket.gethostbyname(utils.unit_get('private-address'))) - - for relid in utils.relation_ids('mon'): - for unit in utils.relation_list(relid): - hosts.append( - socket.gethostbyname(utils.relation_get('private-address', - unit, relid))) - - hosts.sort() - return hosts - - def bootstrap_monitor_cluster(): hostname = utils.get_unit_hostname() done = '/var/lib/ceph/mon/ceph-{}/done'.format(hostname) From e17ab6611d08d928000f1117838ba6fcea460a27 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Sat, 6 Oct 2012 01:15:38 +1300 Subject: [PATCH 0067/2699] hide the hook indirection logic --- ceph-proxy/hooks/hooks.py | 24 ++++++++---------------- ceph-proxy/hooks/utils.py | 15 +++++++++++++-- ceph-proxy/revision | 2 +- 3 files changed, 22 insertions(+), 19 deletions(-) mode change 100644 => 100755 ceph-proxy/hooks/hooks.py diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py old mode 100644 new mode 100755 index 317e1489..00fbc412 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -169,21 +169,13 @@ def start(): '--subsystem-match=block', '--action=add']) -hooks = { - 'config-changed': config_changed, - 'install': install, - 'mon-relation-departed': mon_relation, - 'mon-relation-joined': mon_relation, - 'start': start, - 'upgrade-charm': upgrade_charm, -} - -hook = os.path.basename(sys.argv[0]) - -try: - hooks[hook]() -except KeyError: - utils.juju_log('INFO', - "This charm doesn't know how to handle '{}'.".format(hook)) +utils.do_hooks({ + 'config-changed': config_changed, + 'install': install, + 'mon-relation-departed': mon_relation, + 'mon-relation-joined': mon_relation, + 'start': start, + 'upgrade-charm': upgrade_charm, + }) sys.exit(0) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index d180aa42..74711438 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -3,12 +3,23 @@ # Copyright 2012 Canonical Ltd. # # Authors: -# James Page -# +# James Page +# Paul Collins # +import os import subprocess import socket +import sys + +def do_hooks(hooks): + hook = os.path.basename(sys.argv[0]) + + try: + hooks[hook]() + except KeyError: + utils.juju_log('INFO', + "This charm doesn't know how to handle '{}'.".format(hook)) def install(*pkgs): diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 1479e19b..b5489e5e 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -65 +69 From cc55842e358b42e055c0b2909921c325d81e58b8 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Sat, 6 Oct 2012 01:15:38 +1300 Subject: [PATCH 0068/2699] hide the hook indirection logic --- ceph-mon/hooks/hooks.py | 24 ++++++++---------------- ceph-mon/hooks/utils.py | 15 +++++++++++++-- ceph-mon/revision | 2 +- 3 files changed, 22 insertions(+), 19 deletions(-) mode change 100644 => 100755 ceph-mon/hooks/hooks.py diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py old mode 100644 new mode 100755 index 317e1489..00fbc412 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -169,21 +169,13 @@ def start(): '--subsystem-match=block', '--action=add']) -hooks = { - 'config-changed': config_changed, - 'install': install, - 'mon-relation-departed': mon_relation, - 'mon-relation-joined': mon_relation, - 'start': start, - 'upgrade-charm': upgrade_charm, -} - -hook = os.path.basename(sys.argv[0]) - -try: - hooks[hook]() -except KeyError: - utils.juju_log('INFO', - "This charm doesn't know how to handle '{}'.".format(hook)) +utils.do_hooks({ + 'config-changed': config_changed, + 'install': install, + 'mon-relation-departed': mon_relation, + 'mon-relation-joined': mon_relation, + 'start': start, + 'upgrade-charm': upgrade_charm, + }) sys.exit(0) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index d180aa42..74711438 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -3,12 +3,23 @@ # Copyright 2012 Canonical Ltd. # # Authors: -# James Page -# +# James Page +# Paul Collins # +import os import subprocess import socket +import sys + +def do_hooks(hooks): + hook = os.path.basename(sys.argv[0]) + + try: + hooks[hook]() + except KeyError: + utils.juju_log('INFO', + "This charm doesn't know how to handle '{}'.".format(hook)) def install(*pkgs): diff --git a/ceph-mon/revision b/ceph-mon/revision index 1479e19b..b5489e5e 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -65 +69 From ad7feb225a9a58894ad020f395d71a8ecac90db0 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Sat, 6 Oct 2012 02:16:19 +1300 Subject: [PATCH 0069/2699] fix pep8 e302 --- ceph-proxy/hooks/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 74711438..d9c9ef99 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -12,6 +12,7 @@ import socket import sys + def do_hooks(hooks): hook = os.path.basename(sys.argv[0]) From c8a75d68f8c1b5f35c2388628f148fcff906a0b0 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Sat, 6 Oct 2012 02:16:19 +1300 Subject: [PATCH 0070/2699] fix pep8 e302 --- ceph-mon/hooks/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 74711438..d9c9ef99 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -12,6 +12,7 @@ import socket import sys + def do_hooks(hooks): hook = os.path.basename(sys.argv[0]) From e7be026e9772c87bc70157b26d50ac87f529063a Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 8 Oct 2012 13:18:19 +0100 Subject: [PATCH 0071/2699] Reset execute bit on hooks.py --- ceph-proxy/hooks/hooks.py | 0 ceph-proxy/revision | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) mode change 100644 => 100755 ceph-proxy/hooks/hooks.py diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py old mode 100644 new mode 100755 diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 1479e19b..3fdcd7c4 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -65 +67 From cb8ae48bab6725519b7a68672ec3ced57f30e3d8 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 8 Oct 2012 13:18:19 +0100 Subject: [PATCH 0072/2699] Reset execute bit on hooks.py --- ceph-mon/hooks/hooks.py | 0 ceph-mon/revision | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) mode change 100644 => 100755 ceph-mon/hooks/hooks.py diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py old mode 100644 new mode 100755 diff --git a/ceph-mon/revision b/ceph-mon/revision index 1479e19b..3fdcd7c4 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -65 +67 From 39e13751f394055313937b8f16cd189c2dc878c9 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 8 Oct 2012 13:27:14 +0100 Subject: [PATCH 0073/2699] Fixup do_hook log calling to work --- ceph-proxy/hooks/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 9797ba64..64c578e1 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -19,8 +19,8 @@ def do_hooks(hooks): try: hooks[hook]() except KeyError: - utils.juju_log('INFO', - "This charm doesn't know how to handle '{}'.".format(hook)) + juju_log('INFO', + "This charm doesn't know how to handle '{}'.".format(hook)) def install(*pkgs): From 65c71dd1e9b793eb1863850068b2f7a79c91fc50 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 8 Oct 2012 13:27:14 +0100 Subject: [PATCH 0074/2699] Fixup do_hook log calling to work --- ceph-mon/hooks/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 9797ba64..64c578e1 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -19,8 +19,8 @@ def do_hooks(hooks): try: hooks[hook]() except KeyError: - utils.juju_log('INFO', - "This charm doesn't know how to handle '{}'.".format(hook)) + juju_log('INFO', + "This charm doesn't know how to handle '{}'.".format(hook)) def install(*pkgs): From d3c33a502d7c7d424dca952121431848af1cb0ed Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 8 Oct 2012 15:06:09 +0100 Subject: [PATCH 0075/2699] Added support for remote OSD services --- ceph-proxy/hooks/hooks.py | 28 +++++++++++++++++++++++++++- ceph-proxy/hooks/osd-relation-joined | 1 + ceph-proxy/hooks/utils.py | 8 +++++++- ceph-proxy/metadata.yaml | 7 +++++++ ceph-proxy/revision | 2 +- 5 files changed, 43 insertions(+), 3 deletions(-) create mode 120000 ceph-proxy/hooks/osd-relation-joined diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 4148c5d4..423b493a 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -12,7 +12,6 @@ import os import subprocess import shutil -import socket import sys import ceph @@ -147,6 +146,8 @@ def mon_relation(): osdize(dev) subprocess.call(['udevadm', 'trigger', '--subsystem-match=block', '--action=add']) + + notify_osds() else: utils.juju_log('INFO', 'Not enough mons ({}), punting.'.format( @@ -155,6 +156,30 @@ def mon_relation(): utils.juju_log('INFO', 'End mon-relation hook.') +def notify_osds(): + utils.juju_log('INFO', 'Begin notify_osds.') + + for relid in utils.relation_ids('osd'): + utils.relation_set(fsid=utils.config_get('fsid'), + rid=relid) + + utils.juju_log('INFO', 'End notify_osds.') + + +def osd_relation(): + utils.juju_log('INFO', 'Begin osd-relation hook.') + + if ceph.is_quorum(): + utils.juju_log('INFO', + 'mon cluster in quorum - providing OSD with fsid') + utils.relation_set(fsid=utils.config_get('fsid')) + else: + utils.juju_log('INFO', + 'mon cluster not in quorum - deferring fsid provision') + + utils.juju_log('INFO', 'End osd-relation hook.') + + def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() @@ -175,6 +200,7 @@ def start(): 'install': install, 'mon-relation-departed': mon_relation, 'mon-relation-joined': mon_relation, + 'osd-relation-joined': osd_relation, 'start': start, 'upgrade-charm': upgrade_charm, }) diff --git a/ceph-proxy/hooks/osd-relation-joined b/ceph-proxy/hooks/osd-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-proxy/hooks/osd-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 64c578e1..7e2da93c 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -129,8 +129,14 @@ def relation_set(**kwargs): cmd = [ 'relation-set' ] + args = [] for k, v in kwargs.items(): - cmd.append('{}={}'.format(k, v)) + if k == 'rid': + cmd.append('-r') + cmd.append(v) + else: + args.append('{}={}'.format(k, v)) + cmd += args subprocess.check_call(cmd) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 6a363437..3f0147da 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -8,3 +8,10 @@ description: | peers: mon: interface: ceph-brolin +provides: + client: + interface: ceph-client + osd: + interface: ceph-osd + radosgw: + interface: ceph-radosgw diff --git a/ceph-proxy/revision b/ceph-proxy/revision index b5489e5e..39f5b693 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -69 +71 From 683a4bf2a8dadea746a0fa704ab497f8396d19f9 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 8 Oct 2012 15:06:09 +0100 Subject: [PATCH 0076/2699] Added support for remote OSD services --- ceph-mon/hooks/hooks.py | 28 +++++++++++++++++++++++++++- ceph-mon/hooks/osd-relation-joined | 1 + ceph-mon/hooks/utils.py | 8 +++++++- ceph-mon/metadata.yaml | 7 +++++++ ceph-mon/revision | 2 +- 5 files changed, 43 insertions(+), 3 deletions(-) create mode 120000 ceph-mon/hooks/osd-relation-joined diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 4148c5d4..423b493a 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -12,7 +12,6 @@ import os import subprocess import shutil -import socket import sys import ceph @@ -147,6 +146,8 @@ def mon_relation(): osdize(dev) subprocess.call(['udevadm', 'trigger', '--subsystem-match=block', '--action=add']) + + notify_osds() else: utils.juju_log('INFO', 'Not enough mons ({}), punting.'.format( @@ -155,6 +156,30 @@ def mon_relation(): utils.juju_log('INFO', 'End mon-relation hook.') +def notify_osds(): + utils.juju_log('INFO', 'Begin notify_osds.') + + for relid in utils.relation_ids('osd'): + utils.relation_set(fsid=utils.config_get('fsid'), + rid=relid) + + utils.juju_log('INFO', 'End notify_osds.') + + +def osd_relation(): + utils.juju_log('INFO', 'Begin osd-relation hook.') + + if ceph.is_quorum(): + utils.juju_log('INFO', + 'mon cluster in quorum - providing OSD with fsid') + utils.relation_set(fsid=utils.config_get('fsid')) + else: + utils.juju_log('INFO', + 'mon cluster not in quorum - deferring fsid provision') + + utils.juju_log('INFO', 'End osd-relation hook.') + + def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() @@ -175,6 +200,7 @@ def start(): 'install': install, 'mon-relation-departed': mon_relation, 'mon-relation-joined': mon_relation, + 'osd-relation-joined': osd_relation, 'start': start, 'upgrade-charm': upgrade_charm, }) diff --git a/ceph-mon/hooks/osd-relation-joined b/ceph-mon/hooks/osd-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-mon/hooks/osd-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 64c578e1..7e2da93c 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -129,8 +129,14 @@ def relation_set(**kwargs): cmd = [ 'relation-set' ] + args = [] for k, v in kwargs.items(): - cmd.append('{}={}'.format(k, v)) + if k == 'rid': + cmd.append('-r') + cmd.append(v) + else: + args.append('{}={}'.format(k, v)) + cmd += args subprocess.check_call(cmd) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 6a363437..3f0147da 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -8,3 +8,10 @@ description: | peers: mon: interface: ceph-brolin +provides: + client: + interface: ceph-client + osd: + interface: ceph-osd + radosgw: + interface: ceph-radosgw diff --git a/ceph-mon/revision b/ceph-mon/revision index b5489e5e..39f5b693 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -69 +71 From af55be8c31857847894b4caf9f8e9639b9c1af62 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 8 Oct 2012 15:07:16 +0100 Subject: [PATCH 0077/2699] Initial ceph-osd charm --- ceph-osd/.bzrignore | 1 + ceph-osd/.project | 17 ++ ceph-osd/.pydevproject | 8 + ceph-osd/README | 87 ++++++++++ ceph-osd/TODO | 11 ++ ceph-osd/config.yaml | 37 ++++ ceph-osd/copyright | 15 ++ ceph-osd/files/upstart/ceph-create-keys.conf | 8 + ceph-osd/files/upstart/ceph-hotplug.conf | 11 ++ .../files/upstart/ceph-mon-all-starter.conf | 20 +++ ceph-osd/files/upstart/ceph-mon-all.conf | 4 + ceph-osd/files/upstart/ceph-mon.conf | 24 +++ ceph-osd/files/upstart/ceph-osd.conf | 37 ++++ ceph-osd/hooks/ceph.py | 73 ++++++++ ceph-osd/hooks/config-changed | 1 + ceph-osd/hooks/hooks.py | 145 ++++++++++++++++ ceph-osd/hooks/install | 1 + ceph-osd/hooks/mon-relation-changed | 1 + ceph-osd/hooks/mon-relation-departed | 1 + ceph-osd/hooks/start | 1 + ceph-osd/hooks/stop | 1 + ceph-osd/hooks/upgrade-charm | 1 + ceph-osd/hooks/utils.py | 163 ++++++++++++++++++ ceph-osd/metadata.yaml | 12 ++ ceph-osd/revision | 1 + ceph-osd/templates/ceph.conf | 17 ++ 26 files changed, 698 insertions(+) create mode 100644 ceph-osd/.bzrignore create mode 100644 ceph-osd/.project create mode 100644 ceph-osd/.pydevproject create mode 100644 ceph-osd/README create mode 100644 ceph-osd/TODO create mode 100644 ceph-osd/config.yaml create mode 100644 ceph-osd/copyright create mode 100644 ceph-osd/files/upstart/ceph-create-keys.conf create mode 100644 ceph-osd/files/upstart/ceph-hotplug.conf create mode 100644 ceph-osd/files/upstart/ceph-mon-all-starter.conf create mode 100644 ceph-osd/files/upstart/ceph-mon-all.conf create mode 100644 ceph-osd/files/upstart/ceph-mon.conf create mode 100644 ceph-osd/files/upstart/ceph-osd.conf create mode 100644 ceph-osd/hooks/ceph.py create mode 120000 ceph-osd/hooks/config-changed create mode 100755 ceph-osd/hooks/hooks.py create mode 120000 ceph-osd/hooks/install create mode 120000 ceph-osd/hooks/mon-relation-changed create mode 120000 ceph-osd/hooks/mon-relation-departed create mode 120000 ceph-osd/hooks/start create mode 120000 ceph-osd/hooks/stop create mode 120000 ceph-osd/hooks/upgrade-charm create mode 100644 ceph-osd/hooks/utils.py create mode 100644 ceph-osd/metadata.yaml create mode 100644 ceph-osd/revision create mode 100644 ceph-osd/templates/ceph.conf diff --git a/ceph-osd/.bzrignore b/ceph-osd/.bzrignore new file mode 100644 index 00000000..3a4edf69 --- /dev/null +++ b/ceph-osd/.bzrignore @@ -0,0 +1 @@ +.project diff --git a/ceph-osd/.project b/ceph-osd/.project new file mode 100644 index 00000000..c5e385b7 --- /dev/null +++ b/ceph-osd/.project @@ -0,0 +1,17 @@ + + + ceph-osd + + + + + + org.python.pydev.PyDevBuilder + + + + + + org.python.pydev.pythonNature + + diff --git a/ceph-osd/.pydevproject b/ceph-osd/.pydevproject new file mode 100644 index 00000000..bb30cc40 --- /dev/null +++ b/ceph-osd/.pydevproject @@ -0,0 +1,8 @@ + + +python 2.7 +Default + +/ceph-osd/hooks + + diff --git a/ceph-osd/README b/ceph-osd/README new file mode 100644 index 00000000..a886a83e --- /dev/null +++ b/ceph-osd/README @@ -0,0 +1,87 @@ +Overview +======== + +Ceph is a distributed storage and network file system designed to provide +excellent performance, reliability, and scalability. + +This charm deploys a Ceph cluster. + +Usage +===== + +The ceph charm has two pieces of mandatory configuration for which no defaults +are provided: + + fsid: + uuid specific to a ceph cluster used to ensure that different + clusters don't get mixed up - use `uuid` to generate one. + + monitor-secret: + a ceph generated key used by the daemons that manage to cluster + to control security. You can use the ceph-authtool command to + generate one: + + ceph-authtool /dev/stdout --name=mon. --gen-key + +These two pieces of configuration must NOT be changed post bootstrap; attempting +todo this will cause a reconfiguration error and new service units will not join +the existing ceph cluster. + +The charm also supports specification of the storage devices to use in the ceph +cluster. + + osd-devices: + A list of devices that the charm will attempt to detect, initialise and + activate as ceph storage. + + This this can be a superset of the actual storage devices presented to + each service unit and can be changed post ceph bootstrap using `juju set`. + +At a minimum you must provide a juju config file during initial deployment +with the fsid and monitor-secret options (contents of cepy.yaml below): + + ceph-brolin: + fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 + monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== + osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde + +Specifying the osd-devices to use is also a good idea. + +Boot things up by using: + + juju deploy -n 3 --config ceph.yaml ceph-brolin + +By default the ceph cluster will not bootstrap until 3 service units have been +deployed and started; this is to ensure that a quorum is achieved prior to adding +storage devices. + +Technical Bootnotes +=================== + +This charm is currently deliberately inflexible and potentially destructive. +It is designed to deploy on exactly three machines. Each machine will run mon +and osd. + +This charm uses the new-style Ceph deployment as reverse-engineered from the +Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected +a different strategy to form the monitor cluster. Since we don't know the +names *or* addresses of the machines in advance, we use the relation-joined +hook to wait for all three nodes to come up, and then write their addresses +to ceph.conf in the "mon host" parameter. After we initialize the monitor +cluster a quorum forms quickly, and OSD bringup proceeds. + +The osds use so-called "OSD hotplugging". ceph-disk-prepare is used to create +the filesystems with a special GPT partition type. udev is set up to mounti +such filesystems and start the osd daemons as their storage becomes visible to +the system (or after "udevadm trigger"). + +The Chef cookbook above performs some extra steps to generate an OSD +bootstrapping key and propagate it to the other nodes in the cluster. Since +all OSDs run on nodes that also run mon, we don't need this and did not +implement it. + +The charm does not currently implement cephx and its explicitly turned off in +the configuration generated for ceph. + +See http://ceph.com/docs/master/dev/mon-bootstrap/ for more information on Ceph +monitor cluster deployment strategies and pitfalls. diff --git a/ceph-osd/TODO b/ceph-osd/TODO new file mode 100644 index 00000000..46549b7a --- /dev/null +++ b/ceph-osd/TODO @@ -0,0 +1,11 @@ +== Minor == + + * fix tunables (http://tracker.newdream.net/issues/2210) + * more than 192 PGs + * fixup data placement in crush to be host not osd driven + +== Public Charm == + + * cephx support + * rel: remote MON clients (+client keys for cephx) + * rel: RADOS gateway (+client key for cephx) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml new file mode 100644 index 00000000..06222338 --- /dev/null +++ b/ceph-osd/config.yaml @@ -0,0 +1,37 @@ +options: + fsid: + type: string + description: | + fsid of the ceph cluster. To generate a suitable value use `uuid` + . + This configuration element is mandatory and the service will fail on + install if it is not provided. + osd-devices: + type: string + default: /dev/sdb /dev/sdc /dev/sdd /dev/sde + description: | + The devices to format and set up as osd volumes. + . + These devices are the range of devices that will be checked for and + used across all service units. + source: + type: string + default: ppa:ceph-ubuntu/dev + description: | + Optional configuration to support use of additional sources such as: + . + - ppa:myteam/ppa + - cloud:folsom-proposed + - http://my.archive.com/ubuntu main + . + The last option should be used in conjunction with the key configuration + option. + . + Note that a minimum ceph version of 0.48.2 is required for use with this + charm which is NOT provided by the packages in the main Ubuntu archive + for precise. + key: + type: string + description: | + Key ID to import to the apt keyring to support use with arbitary source + configuration from outside of Launchpad archives or PPA's. diff --git a/ceph-osd/copyright b/ceph-osd/copyright new file mode 100644 index 00000000..bdfae0e0 --- /dev/null +++ b/ceph-osd/copyright @@ -0,0 +1,15 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0 +Comment: The licensing of this charm is aligned to upstream ceph + as the ceph upstart integration is distributed as part of the charm. + +Files: * +Copyright: 2012, Canonical Ltd. +License: LGPL-2.1 + +Files: files/upstart/* +Copyright: 2004-2010 by Sage Weil +License: LGPL-2.1 + +License: LGPL-2.1 + On Debian GNU/Linux system you can find the complete text of the + LGPL-2.1 license in '/usr/share/common-licenses/LGPL-2.1' diff --git a/ceph-osd/files/upstart/ceph-create-keys.conf b/ceph-osd/files/upstart/ceph-create-keys.conf new file mode 100644 index 00000000..6fb45818 --- /dev/null +++ b/ceph-osd/files/upstart/ceph-create-keys.conf @@ -0,0 +1,8 @@ +description "Create Ceph client.admin key when possible" + +start on started ceph-mon +stop on runlevel [!2345] + +task + +exec /usr/sbin/ceph-create-keys --cluster="${cluster:-ceph}" -i "${id:-$(hostname)}" diff --git a/ceph-osd/files/upstart/ceph-hotplug.conf b/ceph-osd/files/upstart/ceph-hotplug.conf new file mode 100644 index 00000000..70204529 --- /dev/null +++ b/ceph-osd/files/upstart/ceph-hotplug.conf @@ -0,0 +1,11 @@ +description "Ceph hotplug" + +start on block-device-added \ + DEVTYPE=partition \ + ID_PART_ENTRY_TYPE=4fbd7e29-9d25-41b8-afd0-062c0ceff05d +stop on runlevel [!2345] + +task +instance $DEVNAME + +exec /usr/sbin/ceph-disk-activate --mount -- "$DEVNAME" diff --git a/ceph-osd/files/upstart/ceph-mon-all-starter.conf b/ceph-osd/files/upstart/ceph-mon-all-starter.conf new file mode 100644 index 00000000..f7188cb7 --- /dev/null +++ b/ceph-osd/files/upstart/ceph-mon-all-starter.conf @@ -0,0 +1,20 @@ +description "Ceph MON (start all instances)" + +start on starting ceph-mon-all +stop on runlevel [!2345] + +task + +script + set -e + # TODO what's the valid charset for cluster names and mon ids? + find /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[a-z0-9]+-[a-z0-9._-]+' -printf '%P\n' \ + | while read f; do + if [ -e "/var/lib/ceph/mon/$f/done" ]; then + cluster="${f%%-*}" + id="${f#*-}" + + initctl emit ceph-mon cluster="$cluster" id="$id" + fi + done +end script diff --git a/ceph-osd/files/upstart/ceph-mon-all.conf b/ceph-osd/files/upstart/ceph-mon-all.conf new file mode 100644 index 00000000..006f2f20 --- /dev/null +++ b/ceph-osd/files/upstart/ceph-mon-all.conf @@ -0,0 +1,4 @@ +description "Ceph monitor (all instances)" + +start on (local-filesystems and net-device-up IFACE!=lo) +stop on runlevel [!2345] diff --git a/ceph-osd/files/upstart/ceph-mon.conf b/ceph-osd/files/upstart/ceph-mon.conf new file mode 100644 index 00000000..2cf7bfa5 --- /dev/null +++ b/ceph-osd/files/upstart/ceph-mon.conf @@ -0,0 +1,24 @@ +description "Ceph MON" + +start on ceph-mon +stop on runlevel [!2345] or stopping ceph-mon-all + +respawn +respawn limit 5 30 + +pre-start script + set -e + test -x /usr/bin/ceph-mon || { stop; exit 0; } + test -d "/var/lib/ceph/mon/${cluster:-ceph}-$id" || { stop; exit 0; } + + install -d -m0755 /var/run/ceph +end script + +instance ${cluster:-ceph}/$id +export cluster +export id + +# this breaks oneiric +#usage "cluster = name of cluster (defaults to 'ceph'); id = monitor instance id" + +exec /usr/bin/ceph-mon --cluster="${cluster:-ceph}" -i "$id" -f diff --git a/ceph-osd/files/upstart/ceph-osd.conf b/ceph-osd/files/upstart/ceph-osd.conf new file mode 100644 index 00000000..119ad000 --- /dev/null +++ b/ceph-osd/files/upstart/ceph-osd.conf @@ -0,0 +1,37 @@ +description "Ceph OSD" + +start on ceph-osd +stop on runlevel [!2345] + +respawn +respawn limit 5 30 + +pre-start script + set -e + test -x /usr/bin/ceph-osd || { stop; exit 0; } + test -d "/var/lib/ceph/osd/${cluster:-ceph}-$id" || { stop; exit 0; } + + install -d -m0755 /var/run/ceph + + # update location in crush; put in some suitable defaults on the + # command line, ceph.conf can override what it wants + location="$(ceph-conf --cluster="${cluster:-ceph}" --name="osd.$id" --lookup osd_crush_location || :)" + weight="$(ceph-conf --cluster="$cluster" --name="osd.$id" --lookup osd_crush_weight || :)" + ceph \ + --cluster="${cluster:-ceph}" \ + --name="osd.$id" \ + --keyring="/var/lib/ceph/osd/${cluster:-ceph}-$id/keyring" \ + osd crush set \ + -- \ + "$id" "osd.$id" "${weight:-1}" \ + pool=default \ + host="$(hostname -s)" \ + $location \ + || : +end script + +instance ${cluster:-ceph}/$id +export cluster +export id + +exec /usr/bin/ceph-osd --cluster="${cluster:-ceph}" -i "$id" -f diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py new file mode 100644 index 00000000..2a193d58 --- /dev/null +++ b/ceph-osd/hooks/ceph.py @@ -0,0 +1,73 @@ + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# Paul Collins +# + +import json +import subprocess +import time +import utils +import os + +QUORUM = ['leader', 'peon'] + + +def is_quorum(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + cmd = [ + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] in QUORUM: + return True + else: + return False + else: + return False + + +def wait_for_quorum(): + while not is_quorum(): + time.sleep(3) + + +def add_bootstrap_hint(peer): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + cmd = [ + "ceph", + "--admin-daemon", + asok, + "add_bootstrap_peer_hint", + peer + ] + if os.path.exists(asok): + # Ignore any errors for this call + subprocess.call(cmd) + + +def is_osd_disk(dev): + try: + info = subprocess.check_output(['sgdisk', '-i', '1', dev]) + info = info.split("\n") # IGNORE:E1103 + for line in info: + if line.startswith( + 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' + ): + return True + except subprocess.CalledProcessError: + pass + return False diff --git a/ceph-osd/hooks/config-changed b/ceph-osd/hooks/config-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-osd/hooks/config-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py new file mode 100755 index 00000000..97437a1d --- /dev/null +++ b/ceph-osd/hooks/hooks.py @@ -0,0 +1,145 @@ +#!/usr/bin/python + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# + +import glob +import os +import subprocess +import shutil +import sys + +import ceph +import utils + + +def install_upstart_scripts(): + for x in glob.glob('files/upstart/*.conf'): + shutil.copy(x, '/etc/init/') + + +def install(): + utils.juju_log('INFO', 'Begin install hook.') + utils.configure_source() + utils.install('ceph', 'gdisk') + install_upstart_scripts() + utils.juju_log('INFO', 'End install hook.') + + +def emit_cephconf(): + cephcontext = { + 'mon_hosts': ' '.join(get_mon_hosts()), + 'fsid': get_fsid() + } + + with open('/etc/ceph/ceph.conf', 'w') as cephconf: + cephconf.write(utils.render_template('ceph.conf', cephcontext)) + + +def config_changed(): + utils.juju_log('INFO', 'Begin config-changed hook.') + + utils.juju_log('INFO', 'Monitor hosts are ' + repr(get_mon_hosts())) + + if get_fsid(): + utils.juju_log('INFO', 'cluster fsid detected, rescanning disks') + emit_cephconf() + for dev in utils.config_get('osd-devices').split(' '): + osdize(dev) + subprocess.call(['udevadm', 'trigger', + '--subsystem-match=block', '--action=add']) + + utils.juju_log('INFO', 'End config-changed hook.') + + +def get_mon_hosts(): + hosts = [] + hosts.append('{}:6789'.format(utils.get_host_ip())) + + for relid in utils.relation_ids('mon'): + for unit in utils.relation_list(relid): + hosts.append( + '{}:6789'.format(utils.get_host_ip( + utils.relation_get('private-address', + unit, relid))) + ) + + hosts.sort() + return hosts + + +def get_fsid(): + for relid in utils.relation_ids('mon'): + for unit in utils.relation_list(relid): + fsid = utils.relation_get('fsid', + unit, relid) + if fsid != "": + return fsid + return None + + +def osdize(dev): + # XXX hack for instances + subprocess.call(['umount', '/mnt']) + + if ceph.is_osd_disk(dev): + utils.juju_log('INFO', + 'Looks like {} is already an OSD, skipping.' + .format(dev)) + return + + if subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0: + utils.juju_log('INFO', + 'Looks like {} is in use, skipping.'.format(dev)) + return + + if os.path.exists(dev): + subprocess.call(['ceph-disk-prepare', dev]) + + +def mon_relation(): + utils.juju_log('INFO', 'Begin mon-relation hook.') + + if get_fsid(): + utils.juju_log('INFO', 'mon has provided fsid - scanning disks') + emit_cephconf() + for dev in utils.config_get('osd-devices').split(' '): + osdize(dev) + subprocess.call(['udevadm', 'trigger', + '--subsystem-match=block', '--action=add']) + else: + utils.juju_log('INFO', + 'mon cluster has not yet provided fsid') + + utils.juju_log('INFO', 'End mon-relation hook.') + + +def upgrade_charm(): + utils.juju_log('INFO', 'Begin upgrade-charm hook.') + if get_fsid(): + emit_cephconf() + install_upstart_scripts() + utils.juju_log('INFO', 'End upgrade-charm hook.') + + +def start(): + # In case we're being redeployed to the same machines, try + # to make sure everything is running as soon as possible. + subprocess.call(['udevadm', 'trigger', + '--subsystem-match=block', '--action=add']) + + +utils.do_hooks({ + 'config-changed': config_changed, + 'install': install, + 'mon-relation-departed': mon_relation, + 'mon-relation-changed': mon_relation, + 'start': start, + 'upgrade-charm': upgrade_charm, + }) + +sys.exit(0) diff --git a/ceph-osd/hooks/install b/ceph-osd/hooks/install new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-osd/hooks/install @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/mon-relation-changed b/ceph-osd/hooks/mon-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-osd/hooks/mon-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/mon-relation-departed b/ceph-osd/hooks/mon-relation-departed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-osd/hooks/mon-relation-departed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/start b/ceph-osd/hooks/start new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-osd/hooks/start @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/stop b/ceph-osd/hooks/stop new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-osd/hooks/stop @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/upgrade-charm b/ceph-osd/hooks/upgrade-charm new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-osd/hooks/upgrade-charm @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py new file mode 100644 index 00000000..64c578e1 --- /dev/null +++ b/ceph-osd/hooks/utils.py @@ -0,0 +1,163 @@ + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# Paul Collins +# + +import os +import subprocess +import socket +import sys + + +def do_hooks(hooks): + hook = os.path.basename(sys.argv[0]) + + try: + hooks[hook]() + except KeyError: + juju_log('INFO', + "This charm doesn't know how to handle '{}'.".format(hook)) + + +def install(*pkgs): + cmd = [ + 'apt-get', + '-y', + 'install' + ] + for pkg in pkgs: + cmd.append(pkg) + subprocess.check_call(cmd) + +TEMPLATES_DIR = 'templates' + +try: + import jinja2 +except ImportError: + install('python-jinja2') + import jinja2 + + +def render_template(template_name, context, template_dir=TEMPLATES_DIR): + templates = jinja2.Environment( + loader=jinja2.FileSystemLoader(template_dir) + ) + template = templates.get_template(template_name) + return template.render(context) + + +def configure_source(): + source = config_get('source') + if (source.startswith('ppa:') or + source.startswith('cloud:') or + source.startswith('http:')): + cmd = [ + 'add-apt-repository', + source + ] + subprocess.check_call(cmd) + if source.startswith('http:'): + key = config_get('key') + cmd = [ + 'apt-key', + 'import', + key + ] + subprocess.check_call(cmd) + cmd = [ + 'apt-get', + 'update' + ] + subprocess.check_call(cmd) + +# Protocols +TCP = 'TCP' +UDP = 'UDP' + + +def expose(port, protocol='TCP'): + cmd = [ + 'open-port', + '{}/{}'.format(port, protocol) + ] + subprocess.check_call(cmd) + + +def juju_log(severity, message): + cmd = [ + 'juju-log', + '--log-level', severity, + message + ] + subprocess.check_call(cmd) + + +def relation_ids(relation): + cmd = [ + 'relation-ids', + relation + ] + return subprocess.check_output(cmd).split() # IGNORE:E1103 + + +def relation_list(rid): + cmd = [ + 'relation-list', + '-r', rid, + ] + return subprocess.check_output(cmd).split() # IGNORE:E1103 + + +def relation_get(attribute, unit=None, rid=None): + cmd = [ + 'relation-get', + ] + if rid: + cmd.append('-r') + cmd.append(rid) + cmd.append(attribute) + if unit: + cmd.append(unit) + return subprocess.check_output(cmd).strip() # IGNORE:E1103 + + +def relation_set(**kwargs): + cmd = [ + 'relation-set' + ] + for k, v in kwargs.items(): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def unit_get(attribute): + cmd = [ + 'unit-get', + attribute + ] + return subprocess.check_output(cmd).strip() # IGNORE:E1103 + + +def config_get(attribute): + cmd = [ + 'config-get', + attribute + ] + return subprocess.check_output(cmd).strip() # IGNORE:E1103 + + +def get_unit_hostname(): + return socket.gethostname() + + +def get_host_ip(hostname=unit_get('private-address')): + cmd = [ + 'dig', + '+short', + hostname + ] + return subprocess.check_output(cmd).strip() # IGNORE:E1103 diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml new file mode 100644 index 00000000..530f4142 --- /dev/null +++ b/ceph-osd/metadata.yaml @@ -0,0 +1,12 @@ +name: ceph-osd +summary: Highly scalable distributed storage - OSD nodes +maintainer: James Page +description: | + Ceph is a distributed storage and network file system designed to provide + excellent performance, reliability, and scalability. + . + This charm provides the OSD personality for expanding storage nodes within + a ceph deployment. +requires: + mon: + interface: ceph-osd diff --git a/ceph-osd/revision b/ceph-osd/revision new file mode 100644 index 00000000..0cfbf088 --- /dev/null +++ b/ceph-osd/revision @@ -0,0 +1 @@ +2 diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf new file mode 100644 index 00000000..32103fb5 --- /dev/null +++ b/ceph-osd/templates/ceph.conf @@ -0,0 +1,17 @@ +[global] + auth supported = none + keyring = /etc/ceph/$cluster.$name.keyring + mon host = {{ mon_hosts }} + fsid = {{ fsid }} + +[mon] + keyring = /var/lib/ceph/mon/$cluster-$id/keyring + +[mds] + keyring = /var/lib/ceph/mds/$cluster-$id/keyring + +[osd] + keyring = /var/lib/ceph/osd/$cluster-$id/keyring + osd journal size = 1000 + filestore xattr use omap = true + From 622cc88f9d95f7d2b9781a27d33c0f0e5018d427 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 8 Oct 2012 16:58:16 +0100 Subject: [PATCH 0078/2699] Minor tweak to copyright --- ceph-radosgw/.bzrignore | 2 + ceph-radosgw/README | 58 +++++++ ceph-radosgw/TODO | 5 + ceph-radosgw/config.yaml | 22 +++ ceph-radosgw/copyright | 9 ++ ceph-radosgw/files/www/s3gw.fcgi | 2 + ceph-radosgw/hooks/config-changed | 1 + ceph-radosgw/hooks/gateway-relation-joined | 1 + ceph-radosgw/hooks/hooks.py | 139 ++++++++++++++++ ceph-radosgw/hooks/install | 1 + ceph-radosgw/hooks/mon-relation-changed | 1 + ceph-radosgw/hooks/mon-relation-departed | 1 + ceph-radosgw/hooks/start | 1 + ceph-radosgw/hooks/stop | 1 + ceph-radosgw/hooks/upgrade-charm | 1 + ceph-radosgw/hooks/utils.py | 177 +++++++++++++++++++++ ceph-radosgw/metadata.yaml | 15 ++ ceph-radosgw/revision | 1 + ceph-radosgw/templates/ceph.conf | 9 ++ ceph-radosgw/templates/rgw | 25 +++ 20 files changed, 472 insertions(+) create mode 100644 ceph-radosgw/.bzrignore create mode 100644 ceph-radosgw/README create mode 100644 ceph-radosgw/TODO create mode 100644 ceph-radosgw/config.yaml create mode 100644 ceph-radosgw/copyright create mode 100755 ceph-radosgw/files/www/s3gw.fcgi create mode 120000 ceph-radosgw/hooks/config-changed create mode 120000 ceph-radosgw/hooks/gateway-relation-joined create mode 100755 ceph-radosgw/hooks/hooks.py create mode 120000 ceph-radosgw/hooks/install create mode 120000 ceph-radosgw/hooks/mon-relation-changed create mode 120000 ceph-radosgw/hooks/mon-relation-departed create mode 120000 ceph-radosgw/hooks/start create mode 120000 ceph-radosgw/hooks/stop create mode 120000 ceph-radosgw/hooks/upgrade-charm create mode 100644 ceph-radosgw/hooks/utils.py create mode 100644 ceph-radosgw/metadata.yaml create mode 100644 ceph-radosgw/revision create mode 100644 ceph-radosgw/templates/ceph.conf create mode 100644 ceph-radosgw/templates/rgw diff --git a/ceph-radosgw/.bzrignore b/ceph-radosgw/.bzrignore new file mode 100644 index 00000000..a9af2130 --- /dev/null +++ b/ceph-radosgw/.bzrignore @@ -0,0 +1,2 @@ +.project +.pydevproject diff --git a/ceph-radosgw/README b/ceph-radosgw/README new file mode 100644 index 00000000..9bc2552d --- /dev/null +++ b/ceph-radosgw/README @@ -0,0 +1,58 @@ +Overview +======== + +Ceph is a distributed storage and network file system designed to provide +excellent performance, reliability, and scalability. + +This charm deploys the RADOS Gateway, a S3 and Swift compatible HTTP gateway +for online object storage on-top of a ceph cluster. + +This charm only supports the S3 gateway at this point in time. + +Usage +===== + +In order to use this charm, it assumed that you have already deployed a ceph +storage cluster using the 'ceph' charm with something like this:: + + juju deploy -n 3 --config ceph.yaml ceph + +To deploy the RADOS gateway simple do:: + + juju deploy ceph-radosgw + juju add-relation ceph-radosgw ceph + +You can then directly access the RADOS gateway by exposing the service:: + + juju expose ceph-radosgw + +The gateway can be accessed over port 80 (as show in juju status exposed +ports). + +Note that you will need to login to one of the service units supporting the +ceph-radosgw charm to generate some access credentials:: + + radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph" + +Scale-out +========= + +Its possible to scale-out the RADOS Gateway itself:: + + juju add-unit -n 2 ceph-radosgw + +and then stick a HA loadbalancer on the front:: + + juju deploy haproxy + juju add-relation haproxy ceph-radosgw + +Should give you a bit more bang on the front end if you really need it. + +Bootnotes +========= + +The Ceph RADOS Gateway makes use of a multiverse package, +libapache2-mod-fastcgi. As such it will try to automatically enable the +multiverse pocket in /etc/apt/sources.list. Note that there is noting +'wrong' with multiverse components - they typically have less liberal +licensing policies or suchlike. diff --git a/ceph-radosgw/TODO b/ceph-radosgw/TODO new file mode 100644 index 00000000..cd30ef3b --- /dev/null +++ b/ceph-radosgw/TODO @@ -0,0 +1,5 @@ +RADOS Gateway Charm +------------------- + + * cephx support + * Improved process control of radosgw daemon (to many restarts) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml new file mode 100644 index 00000000..1312bec9 --- /dev/null +++ b/ceph-radosgw/config.yaml @@ -0,0 +1,22 @@ +options: + source: + type: string + default: ppa:ceph-ubuntu/dev + description: | + Optional configuration to support use of additional sources such as: + . + - ppa:myteam/ppa + - cloud:folsom-proposed + - http://my.archive.com/ubuntu main + . + The last option should be used in conjunction with the key configuration + option. + . + Note that a minimum ceph version of 0.48.2 is required for use with this + charm which is NOT provided by the packages in the main Ubuntu archive + for precise. + key: + type: string + description: | + Key ID to import to the apt keyring to support use with arbitary source + configuration from outside of Launchpad archives or PPA's. diff --git a/ceph-radosgw/copyright b/ceph-radosgw/copyright new file mode 100644 index 00000000..f35c8617 --- /dev/null +++ b/ceph-radosgw/copyright @@ -0,0 +1,9 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0 + +Files: * +Copyright: 2012, Canonical Ltd. +License: LGPL-2.1 + +License: LGPL-2.1 + On Debian GNU/Linux system you can find the complete text of the + LGPL-2.1 license in '/usr/share/common-licenses/LGPL-2.1' diff --git a/ceph-radosgw/files/www/s3gw.fcgi b/ceph-radosgw/files/www/s3gw.fcgi new file mode 100755 index 00000000..c0f4854a --- /dev/null +++ b/ceph-radosgw/files/www/s3gw.fcgi @@ -0,0 +1,2 @@ +#!/bin/sh +exec /usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.rados.gateway \ No newline at end of file diff --git a/ceph-radosgw/hooks/config-changed b/ceph-radosgw/hooks/config-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/config-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/gateway-relation-joined b/ceph-radosgw/hooks/gateway-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/gateway-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py new file mode 100755 index 00000000..23f3babb --- /dev/null +++ b/ceph-radosgw/hooks/hooks.py @@ -0,0 +1,139 @@ +#!/usr/bin/python + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# + +import shutil +import subprocess +import sys +import glob +import os + +import utils + + +def install_www_scripts(): + for x in glob.glob('files/www/*'): + shutil.copy(x, '/var/www/') + + +def install(): + utils.juju_log('INFO', 'Begin install hook.') + utils.enable_pocket('multiverse') + utils.configure_source() + utils.install('radosgw', + 'libapache2-mod-fastcgi', + 'apache2') + utils.juju_log('INFO', 'End install hook.') + + +def emit_cephconf(): + # Ensure ceph directory actually exists + if not os.path.exists('/etc/ceph'): + os.makedirs('/etc/ceph') + + cephcontext = { + 'mon_hosts': ' '.join(get_mon_hosts()), + 'hostname': utils.get_unit_hostname() + } + + with open('/etc/ceph/ceph.conf', 'w') as cephconf: + cephconf.write(utils.render_template('ceph.conf', cephcontext)) + + +def emit_apacheconf(): + apachecontext = { + "hostname": utils.unit_get('private-address') + } + with open('/etc/apache2/sites-available/rgw', 'w') as apacheconf: + apacheconf.write(utils.render_template('rgw', apachecontext)) + + +def apache_sites(): + utils.juju_log('INFO', 'Begin apache_sites.') + subprocess.check_call(['a2dissite', 'default']) + subprocess.check_call(['a2ensite', 'rgw']) + utils.juju_log('INFO', 'End apache_sites.') + + +def apache_modules(): + utils.juju_log('INFO', 'Begin apache_sites.') + subprocess.check_call(['a2enmod', 'fastcgi']) + subprocess.check_call(['a2enmod', 'rewrite']) + utils.juju_log('INFO', 'End apache_sites.') + + +def apache_reload(): + subprocess.call(['service', 'apache2', 'reload']) + + +def config_changed(): + utils.juju_log('INFO', 'Begin config-changed hook.') + emit_cephconf() + emit_apacheconf() + install_www_scripts() + apache_sites() + apache_modules() + apache_reload() + utils.juju_log('INFO', 'End config-changed hook.') + + +def get_mon_hosts(): + hosts = [] + for relid in utils.relation_ids('mon'): + for unit in utils.relation_list(relid): + hosts.append( + '{}:6789'.format(utils.get_host_ip( + utils.relation_get('private-address', + unit, relid))) + ) + + hosts.sort() + return hosts + + +def mon_relation(): + utils.juju_log('INFO', 'Begin mon-relation hook.') + emit_cephconf() + restart() + utils.juju_log('INFO', 'End mon-relation hook.') + + +def gateway_relation(): + utils.juju_log('INFO', 'Begin gateway-relation hook.') + utils.relation_set(hostname=utils.unit_get('private-address'), + port=80) + utils.juju_log('INFO', 'Begin gateway-relation hook.') + + +def upgrade_charm(): + utils.juju_log('INFO', 'Begin upgrade-charm hook.') + utils.juju_log('INFO', 'End upgrade-charm hook.') + + +def start(): + # In case we're being redeployed to the same machines, try + # to make sure everything is running as soon as possible. + subprocess.call(['service', 'radosgw', 'start']) + utils.expose(port=80) + + +def restart(): + subprocess.call(['service', 'radosgw', 'restart']) + + +utils.do_hooks({ + 'install': install, + 'config-changed': config_changed, + 'mon-relation-departed': mon_relation, + 'mon-relation-changed': mon_relation, + 'gateway-relation-joined': gateway_relation, + 'start': start, + 'upgrade-charm': config_changed, # same function ATM + }) + +sys.exit(0) diff --git a/ceph-radosgw/hooks/install b/ceph-radosgw/hooks/install new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/install @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/mon-relation-changed b/ceph-radosgw/hooks/mon-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/mon-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/mon-relation-departed b/ceph-radosgw/hooks/mon-relation-departed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/mon-relation-departed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/start b/ceph-radosgw/hooks/start new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/start @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/stop b/ceph-radosgw/hooks/stop new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/stop @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/upgrade-charm b/ceph-radosgw/hooks/upgrade-charm new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/upgrade-charm @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py new file mode 100644 index 00000000..c6556dc7 --- /dev/null +++ b/ceph-radosgw/hooks/utils.py @@ -0,0 +1,177 @@ + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# Paul Collins +# + +import os +import subprocess +import socket +import sys +import re + + +def do_hooks(hooks): + hook = os.path.basename(sys.argv[0]) + + try: + hooks[hook]() + except KeyError: + juju_log('INFO', + "This charm doesn't know how to handle '{}'.".format(hook)) + + +def install(*pkgs): + cmd = [ + 'apt-get', + '-y', + 'install' + ] + for pkg in pkgs: + cmd.append(pkg) + subprocess.check_call(cmd) + +TEMPLATES_DIR = 'templates' + +try: + import jinja2 +except ImportError: + install('python-jinja2') + import jinja2 + + +def render_template(template_name, context, template_dir=TEMPLATES_DIR): + templates = jinja2.Environment( + loader=jinja2.FileSystemLoader(template_dir) + ) + template = templates.get_template(template_name) + return template.render(context) + + +def configure_source(): + source = config_get('source') + if (source.startswith('ppa:') or + source.startswith('cloud:') or + source.startswith('http:')): + cmd = [ + 'add-apt-repository', + source + ] + subprocess.check_call(cmd) + if source.startswith('http:'): + key = config_get('key') + cmd = [ + 'apt-key', + 'import', + key + ] + subprocess.check_call(cmd) + cmd = [ + 'apt-get', + 'update' + ] + subprocess.check_call(cmd) + + +def enable_pocket(pocket): + apt_sources = "/etc/apt/sources.list" + with open(apt_sources, "r") as sources: + lines = sources.readlines() + with open(apt_sources, "w") as sources: + for line in lines: + if pocket in line: + sources.write(re.sub('^# deb', 'deb', line)) + else: + sources.write(line) + + +# Protocols +TCP = 'TCP' +UDP = 'UDP' + + +def expose(port, protocol='TCP'): + cmd = [ + 'open-port', + '{}/{}'.format(port, protocol) + ] + subprocess.check_call(cmd) + + +def juju_log(severity, message): + cmd = [ + 'juju-log', + '--log-level', severity, + message + ] + subprocess.check_call(cmd) + + +def relation_ids(relation): + cmd = [ + 'relation-ids', + relation + ] + return subprocess.check_output(cmd).split() # IGNORE:E1103 + + +def relation_list(rid): + cmd = [ + 'relation-list', + '-r', rid, + ] + return subprocess.check_output(cmd).split() # IGNORE:E1103 + + +def relation_get(attribute, unit=None, rid=None): + cmd = [ + 'relation-get', + ] + if rid: + cmd.append('-r') + cmd.append(rid) + cmd.append(attribute) + if unit: + cmd.append(unit) + return subprocess.check_output(cmd).strip() # IGNORE:E1103 + + +def relation_set(**kwargs): + cmd = [ + 'relation-set' + ] + for k, v in kwargs.items(): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def unit_get(attribute): + cmd = [ + 'unit-get', + attribute + ] + return subprocess.check_output(cmd).strip() # IGNORE:E1103 + + +def config_get(attribute): + cmd = [ + 'config-get', + attribute + ] + return subprocess.check_output(cmd).strip() # IGNORE:E1103 + + +def get_unit_hostname(): + return socket.gethostname() + + +def get_host_ip(hostname=unit_get('private-address')): + cmd = [ + 'dig', + '+short', + hostname + ] + return subprocess.check_output(cmd).strip() # IGNORE:E1103 diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml new file mode 100644 index 00000000..9db4b805 --- /dev/null +++ b/ceph-radosgw/metadata.yaml @@ -0,0 +1,15 @@ +name: ceph-radosgw +summary: Highly scalable distributed storage - RADOS HTTP Gateway +maintainer: James Page +description: | + Ceph is a distributed storage and network file system designed to provide + excellent performance, reliability, and scalability. + . + This charm provides the RADOS HTTP gateway supporting S3 and Swift protocols + for object storage. +requires: + mon: + interface: ceph-radosgw +provides: + gateway: + interface: http diff --git a/ceph-radosgw/revision b/ceph-radosgw/revision new file mode 100644 index 00000000..b4de3947 --- /dev/null +++ b/ceph-radosgw/revision @@ -0,0 +1 @@ +11 diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf new file mode 100644 index 00000000..73389c63 --- /dev/null +++ b/ceph-radosgw/templates/ceph.conf @@ -0,0 +1,9 @@ +[global] + auth supported = none + mon host = {{ mon_hosts }} + +[client.radosgw.gateway] + host = {{ hostname }} + keyring = /etc/ceph/keyring.rados.gateway + rgw socket path = /tmp/radosgw.sock + log file = /var/log/ceph/radosgw.log diff --git a/ceph-radosgw/templates/rgw b/ceph-radosgw/templates/rgw new file mode 100644 index 00000000..7b3f8b6e --- /dev/null +++ b/ceph-radosgw/templates/rgw @@ -0,0 +1,25 @@ + + FastCgiExternalServer /var/www/s3gw.fcgi -socket /tmp/radosgw.sock + + + + ServerName {{ hostname }} + ServerAdmin ceph@ubuntu.com + DocumentRoot /var/www + RewriteEngine On + RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /s3gw.fcgi?page=$1¶ms=$2&%{QUERY_STRING} [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L] + + + Options +ExecCGI + AllowOverride All + SetHandler fastcgi-script + Order allow,deny + Allow from all + AuthBasicAuthoritative Off + + + AllowEncodedSlashes On + ErrorLog /var/log/apache2/error.log + CustomLog /var/log/apache2/access.log combined + ServerSignature Off + From d011147ec5bdf244b3589e2f9465cf75652683b2 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 8 Oct 2012 17:10:10 +0100 Subject: [PATCH 0079/2699] Documentation updates --- ceph-osd/README | 85 ++++++++++------------------------------- ceph-osd/TODO | 13 ++----- ceph-osd/config.yaml | 7 ---- ceph-osd/hooks/hooks.py | 2 - ceph-osd/metadata.yaml | 6 +-- ceph-osd/revision | 2 +- 6 files changed, 27 insertions(+), 88 deletions(-) diff --git a/ceph-osd/README b/ceph-osd/README index a886a83e..4ed39562 100644 --- a/ceph-osd/README +++ b/ceph-osd/README @@ -4,84 +4,39 @@ Overview Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. -This charm deploys a Ceph cluster. +This charm deploys additional Ceph OSD storage service units and should be +used in conjunction with the 'ceph' charm to scale out the amount of storage +available in a Ceph cluster. Usage ===== - -The ceph charm has two pieces of mandatory configuration for which no defaults -are provided: - - fsid: - uuid specific to a ceph cluster used to ensure that different - clusters don't get mixed up - use `uuid` to generate one. - - monitor-secret: - a ceph generated key used by the daemons that manage to cluster - to control security. You can use the ceph-authtool command to - generate one: - - ceph-authtool /dev/stdout --name=mon. --gen-key - -These two pieces of configuration must NOT be changed post bootstrap; attempting -todo this will cause a reconfiguration error and new service units will not join -the existing ceph cluster. - + The charm also supports specification of the storage devices to use in the ceph -cluster. +cluster:: osd-devices: A list of devices that the charm will attempt to detect, initialise and activate as ceph storage. This this can be a superset of the actual storage devices presented to - each service unit and can be changed post ceph bootstrap using `juju set`. - -At a minimum you must provide a juju config file during initial deployment -with the fsid and monitor-secret options (contents of cepy.yaml below): + each service unit and can be changed post ceph-osd deployment using + `juju set`. - ceph-brolin: - fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 - monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== +For example:: + + ceph-osd: osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde -Specifying the osd-devices to use is also a good idea. - -Boot things up by using: - - juju deploy -n 3 --config ceph.yaml ceph-brolin +Boot things up by using:: -By default the ceph cluster will not bootstrap until 3 service units have been -deployed and started; this is to ensure that a quorum is achieved prior to adding -storage devices. + juju deploy -n 3 --config ceph.yaml ceph -Technical Bootnotes -=================== +You can then deploy this charm by simple doing:: -This charm is currently deliberately inflexible and potentially destructive. -It is designed to deploy on exactly three machines. Each machine will run mon -and osd. - -This charm uses the new-style Ceph deployment as reverse-engineered from the -Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected -a different strategy to form the monitor cluster. Since we don't know the -names *or* addresses of the machines in advance, we use the relation-joined -hook to wait for all three nodes to come up, and then write their addresses -to ceph.conf in the "mon host" parameter. After we initialize the monitor -cluster a quorum forms quickly, and OSD bringup proceeds. - -The osds use so-called "OSD hotplugging". ceph-disk-prepare is used to create -the filesystems with a special GPT partition type. udev is set up to mounti -such filesystems and start the osd daemons as their storage becomes visible to -the system (or after "udevadm trigger"). - -The Chef cookbook above performs some extra steps to generate an OSD -bootstrapping key and propagate it to the other nodes in the cluster. Since -all OSDs run on nodes that also run mon, we don't need this and did not -implement it. - -The charm does not currently implement cephx and its explicitly turned off in -the configuration generated for ceph. - -See http://ceph.com/docs/master/dev/mon-bootstrap/ for more information on Ceph -monitor cluster deployment strategies and pitfalls. + juju deploy -n 10 --config ceph.yaml ceph-osd + juju add-relation ceph-osd ceph + +Once the ceph charm has bootstrapped the cluster, it will notify the ceph-osd +charm which will scan for the configured storage devices and add them to the +pool of available storage. + \ No newline at end of file diff --git a/ceph-osd/TODO b/ceph-osd/TODO index 46549b7a..e06e95c3 100644 --- a/ceph-osd/TODO +++ b/ceph-osd/TODO @@ -1,11 +1,4 @@ -== Minor == +Ceph OSD Charm +============== - * fix tunables (http://tracker.newdream.net/issues/2210) - * more than 192 PGs - * fixup data placement in crush to be host not osd driven - -== Public Charm == - - * cephx support - * rel: remote MON clients (+client keys for cephx) - * rel: RADOS gateway (+client key for cephx) + * cephx support diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 06222338..4ed9a914 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -1,11 +1,4 @@ options: - fsid: - type: string - description: | - fsid of the ceph cluster. To generate a suitable value use `uuid` - . - This configuration element is mandatory and the service will fail on - install if it is not provided. osd-devices: type: string default: /dev/sdb /dev/sdc /dev/sdd /dev/sde diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 97437a1d..20b5144f 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -58,8 +58,6 @@ def config_changed(): def get_mon_hosts(): hosts = [] - hosts.append('{}:6789'.format(utils.get_host_ip())) - for relid in utils.relation_ids('mon'): for unit in utils.relation_list(relid): hosts.append( diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 530f4142..2f41d9b2 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -1,12 +1,12 @@ name: ceph-osd -summary: Highly scalable distributed storage - OSD nodes +summary: Highly scalable distributed storage - Ceph OSD storage maintainer: James Page description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. . - This charm provides the OSD personality for expanding storage nodes within - a ceph deployment. + This charm provides the Ceph OSD personality for expanding storage capacity + within a ceph deployment. requires: mon: interface: ceph-osd diff --git a/ceph-osd/revision b/ceph-osd/revision index 0cfbf088..00750edc 100644 --- a/ceph-osd/revision +++ b/ceph-osd/revision @@ -1 +1 @@ -2 +3 From 8c8bce2cf4b4f765a0530f67c6928c87f437bd48 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 8 Oct 2012 17:10:41 +0100 Subject: [PATCH 0080/2699] Update TODO now we have osd and radosgw charms --- ceph-proxy/TODO | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/ceph-proxy/TODO b/ceph-proxy/TODO index 8fcf6066..2916c337 100644 --- a/ceph-proxy/TODO +++ b/ceph-proxy/TODO @@ -1,16 +1,7 @@ -== Minor == +Ceph Charm +========== - * fix tunables (http://tracker.newdream.net/issues/2210) - * more than 192 PGs - * fixup data placement in crush to be host not osd driven - -== Major == - - * deploy more than 3 OSD hosts - -== Public Charm == - - * cephx support - * rel: remote OSD services (+bootstrap.osd keys for cephx) - * rel: remote MON clients (+client keys for cephx) - * rel: RADOS gateway (+client key for cephx) + * fix tunables (http://tracker.newdream.net/issues/2210) + * more than 192 PGs + * fixup data placement in crush to be host not osd driven + * cephx support From ab94b3a881c7fcb7b8443e1f5508bb0fd04fd288 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 8 Oct 2012 17:10:41 +0100 Subject: [PATCH 0081/2699] Update TODO now we have osd and radosgw charms --- ceph-mon/TODO | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/ceph-mon/TODO b/ceph-mon/TODO index 8fcf6066..2916c337 100644 --- a/ceph-mon/TODO +++ b/ceph-mon/TODO @@ -1,16 +1,7 @@ -== Minor == +Ceph Charm +========== - * fix tunables (http://tracker.newdream.net/issues/2210) - * more than 192 PGs - * fixup data placement in crush to be host not osd driven - -== Major == - - * deploy more than 3 OSD hosts - -== Public Charm == - - * cephx support - * rel: remote OSD services (+bootstrap.osd keys for cephx) - * rel: remote MON clients (+client keys for cephx) - * rel: RADOS gateway (+client key for cephx) + * fix tunables (http://tracker.newdream.net/issues/2210) + * more than 192 PGs + * fixup data placement in crush to be host not osd driven + * cephx support From 57f33d4ec05ecdf7ffb86c469df22e0e64f5e758 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Oct 2012 12:18:01 +0100 Subject: [PATCH 0082/2699] Turn on cephx support by default --- ceph-proxy/README | 3 --- ceph-proxy/TODO | 1 - ceph-proxy/hooks/ceph.py | 48 ++++++++++++++++++++++++++++++++++ ceph-proxy/hooks/hooks.py | 8 ++++-- ceph-proxy/revision | 2 +- ceph-proxy/templates/ceph.conf | 2 +- 6 files changed, 56 insertions(+), 8 deletions(-) diff --git a/ceph-proxy/README b/ceph-proxy/README index a886a83e..6abd7a81 100644 --- a/ceph-proxy/README +++ b/ceph-proxy/README @@ -80,8 +80,5 @@ bootstrapping key and propagate it to the other nodes in the cluster. Since all OSDs run on nodes that also run mon, we don't need this and did not implement it. -The charm does not currently implement cephx and its explicitly turned off in -the configuration generated for ceph. - See http://ceph.com/docs/master/dev/mon-bootstrap/ for more information on Ceph monitor cluster deployment strategies and pitfalls. diff --git a/ceph-proxy/TODO b/ceph-proxy/TODO index 2916c337..22e0889d 100644 --- a/ceph-proxy/TODO +++ b/ceph-proxy/TODO @@ -4,4 +4,3 @@ Ceph Charm * fix tunables (http://tracker.newdream.net/issues/2210) * more than 192 PGs * fixup data placement in crush to be host not osd driven - * cephx support diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 2a193d58..567ec3fa 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -71,3 +71,51 @@ def is_osd_disk(dev): except subprocess.CalledProcessError: pass return False + +_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" + + +def import_osd_bootstrap_key(key): + if not os.path.exists(_bootstrap_keyring): + cmd = [ + 'ceph-authtool', + _bootstrap_keyring, + '--create-keyring', + '--name=client.bootstrap-osd', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + +# OSD caps taken from ceph-create-keys +_osd_bootstrap_caps = [ + 'allow command osd create ...', + 'allow command osd crush set ...', + r'allow command auth add * osd allow\ * mon allow\ rwx', + 'allow command mon getmap' + ] + + +def get_osd_bootstrap_key(): + cmd = [ + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + utils.get_unit_hostname() + ), + 'auth', 'get-or-create', 'client.bootstrap-osd', + 'mon', '; '.join(_osd_bootstrap_caps) + ] + output = subprocess.check_output(cmd).strip() # IGNORE:E1103 + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(output.splitlines()) == 1: + key = output + else: + for element in output.splitlines(): + if 'key' in element: + key = element.split(' = ')[1].strip() # IGNORE:E1103 + return key diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 423b493a..c596ec30 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -142,6 +142,7 @@ def mon_relation(): bootstrap_monitor_cluster() ceph.wait_for_quorum() + for dev in utils.config_get('osd-devices').split(' '): osdize(dev) subprocess.call(['udevadm', 'trigger', @@ -161,6 +162,7 @@ def notify_osds(): for relid in utils.relation_ids('osd'): utils.relation_set(fsid=utils.config_get('fsid'), + osd_bootstrap_key=ceph.get_osd_bootstrap_key(), rid=relid) utils.juju_log('INFO', 'End notify_osds.') @@ -171,8 +173,10 @@ def osd_relation(): if ceph.is_quorum(): utils.juju_log('INFO', - 'mon cluster in quorum - providing OSD with fsid') - utils.relation_set(fsid=utils.config_get('fsid')) + 'mon cluster in quorum - \ + providing OSD with fsid & keys') + utils.relation_set(fsid=utils.config_get('fsid'), + osd_bootstrap_key=ceph.get_osd_bootstrap_key()) else: utils.juju_log('INFO', 'mon cluster not in quorum - deferring fsid provision') diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 39f5b693..d15a2cc4 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -71 +80 diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index 32103fb5..072535f5 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -1,5 +1,5 @@ [global] - auth supported = none + auth supported = cephx keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} From fceb7c54fb8a619d8a99fdef137728649a7e7eb1 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Oct 2012 12:18:01 +0100 Subject: [PATCH 0083/2699] Turn on cephx support by default --- ceph-mon/README | 3 --- ceph-mon/TODO | 1 - ceph-mon/hooks/ceph.py | 48 ++++++++++++++++++++++++++++++++++++ ceph-mon/hooks/hooks.py | 8 ++++-- ceph-mon/revision | 2 +- ceph-mon/templates/ceph.conf | 2 +- 6 files changed, 56 insertions(+), 8 deletions(-) diff --git a/ceph-mon/README b/ceph-mon/README index a886a83e..6abd7a81 100644 --- a/ceph-mon/README +++ b/ceph-mon/README @@ -80,8 +80,5 @@ bootstrapping key and propagate it to the other nodes in the cluster. Since all OSDs run on nodes that also run mon, we don't need this and did not implement it. -The charm does not currently implement cephx and its explicitly turned off in -the configuration generated for ceph. - See http://ceph.com/docs/master/dev/mon-bootstrap/ for more information on Ceph monitor cluster deployment strategies and pitfalls. diff --git a/ceph-mon/TODO b/ceph-mon/TODO index 2916c337..22e0889d 100644 --- a/ceph-mon/TODO +++ b/ceph-mon/TODO @@ -4,4 +4,3 @@ Ceph Charm * fix tunables (http://tracker.newdream.net/issues/2210) * more than 192 PGs * fixup data placement in crush to be host not osd driven - * cephx support diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 2a193d58..567ec3fa 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -71,3 +71,51 @@ def is_osd_disk(dev): except subprocess.CalledProcessError: pass return False + +_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" + + +def import_osd_bootstrap_key(key): + if not os.path.exists(_bootstrap_keyring): + cmd = [ + 'ceph-authtool', + _bootstrap_keyring, + '--create-keyring', + '--name=client.bootstrap-osd', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + +# OSD caps taken from ceph-create-keys +_osd_bootstrap_caps = [ + 'allow command osd create ...', + 'allow command osd crush set ...', + r'allow command auth add * osd allow\ * mon allow\ rwx', + 'allow command mon getmap' + ] + + +def get_osd_bootstrap_key(): + cmd = [ + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + utils.get_unit_hostname() + ), + 'auth', 'get-or-create', 'client.bootstrap-osd', + 'mon', '; '.join(_osd_bootstrap_caps) + ] + output = subprocess.check_output(cmd).strip() # IGNORE:E1103 + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(output.splitlines()) == 1: + key = output + else: + for element in output.splitlines(): + if 'key' in element: + key = element.split(' = ')[1].strip() # IGNORE:E1103 + return key diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 423b493a..c596ec30 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -142,6 +142,7 @@ def mon_relation(): bootstrap_monitor_cluster() ceph.wait_for_quorum() + for dev in utils.config_get('osd-devices').split(' '): osdize(dev) subprocess.call(['udevadm', 'trigger', @@ -161,6 +162,7 @@ def notify_osds(): for relid in utils.relation_ids('osd'): utils.relation_set(fsid=utils.config_get('fsid'), + osd_bootstrap_key=ceph.get_osd_bootstrap_key(), rid=relid) utils.juju_log('INFO', 'End notify_osds.') @@ -171,8 +173,10 @@ def osd_relation(): if ceph.is_quorum(): utils.juju_log('INFO', - 'mon cluster in quorum - providing OSD with fsid') - utils.relation_set(fsid=utils.config_get('fsid')) + 'mon cluster in quorum - \ + providing OSD with fsid & keys') + utils.relation_set(fsid=utils.config_get('fsid'), + osd_bootstrap_key=ceph.get_osd_bootstrap_key()) else: utils.juju_log('INFO', 'mon cluster not in quorum - deferring fsid provision') diff --git a/ceph-mon/revision b/ceph-mon/revision index 39f5b693..d15a2cc4 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -71 +80 diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 32103fb5..072535f5 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -1,5 +1,5 @@ [global] - auth supported = none + auth supported = cephx keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} From e2546ff54477560174b9055d5acfe3a572a569b3 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Oct 2012 12:19:16 +0100 Subject: [PATCH 0084/2699] Enable cephx support by default --- ceph-osd/TODO | 4 +-- ceph-osd/hooks/ceph.py | 48 ++++++++++++++++++++++++++++++++++++ ceph-osd/hooks/hooks.py | 1 + ceph-osd/revision | 2 +- ceph-osd/templates/ceph.conf | 2 +- 5 files changed, 53 insertions(+), 4 deletions(-) diff --git a/ceph-osd/TODO b/ceph-osd/TODO index e06e95c3..782a7a43 100644 --- a/ceph-osd/TODO +++ b/ceph-osd/TODO @@ -1,4 +1,4 @@ Ceph OSD Charm ============== - - * cephx support + + * Nothing TODO! diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 2a193d58..567ec3fa 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -71,3 +71,51 @@ def is_osd_disk(dev): except subprocess.CalledProcessError: pass return False + +_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" + + +def import_osd_bootstrap_key(key): + if not os.path.exists(_bootstrap_keyring): + cmd = [ + 'ceph-authtool', + _bootstrap_keyring, + '--create-keyring', + '--name=client.bootstrap-osd', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + +# OSD caps taken from ceph-create-keys +_osd_bootstrap_caps = [ + 'allow command osd create ...', + 'allow command osd crush set ...', + r'allow command auth add * osd allow\ * mon allow\ rwx', + 'allow command mon getmap' + ] + + +def get_osd_bootstrap_key(): + cmd = [ + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + utils.get_unit_hostname() + ), + 'auth', 'get-or-create', 'client.bootstrap-osd', + 'mon', '; '.join(_osd_bootstrap_caps) + ] + output = subprocess.check_output(cmd).strip() # IGNORE:E1103 + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(output.splitlines()) == 1: + key = output + else: + for element in output.splitlines(): + if 'key' in element: + key = element.split(' = ')[1].strip() # IGNORE:E1103 + return key diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 20b5144f..96a15b3c 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -105,6 +105,7 @@ def mon_relation(): if get_fsid(): utils.juju_log('INFO', 'mon has provided fsid - scanning disks') emit_cephconf() + ceph.import_osd_bootstrap_key(utils.relation_get('osd_bootstrap_key')) for dev in utils.config_get('osd-devices').split(' '): osdize(dev) subprocess.call(['udevadm', 'trigger', diff --git a/ceph-osd/revision b/ceph-osd/revision index 00750edc..b8626c4c 100644 --- a/ceph-osd/revision +++ b/ceph-osd/revision @@ -1 +1 @@ -3 +4 diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 32103fb5..072535f5 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -1,5 +1,5 @@ [global] - auth supported = none + auth supported = cephx keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} From 59fce108fd58d2bbf6fbbb00a013ddecab4fb029 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Oct 2012 13:29:34 +0100 Subject: [PATCH 0085/2699] Add cephx support for radosgw --- ceph-proxy/hooks/ceph.py | 73 +++++++++++++++++++++--- ceph-proxy/hooks/hooks.py | 29 ++++++++++ ceph-proxy/hooks/radosgw-relation-joined | 1 + ceph-proxy/revision | 2 +- 4 files changed, 97 insertions(+), 8 deletions(-) create mode 120000 ceph-proxy/hooks/radosgw-relation-joined diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 567ec3fa..25873eb8 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -87,12 +87,14 @@ def import_osd_bootstrap_key(key): subprocess.check_call(cmd) # OSD caps taken from ceph-create-keys -_osd_bootstrap_caps = [ - 'allow command osd create ...', - 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', - 'allow command mon getmap' - ] +_osd_bootstrap_caps = { + 'mon': [ + 'allow command osd create ...', + 'allow command osd crush set ...', + r'allow command auth add * osd allow\ * mon allow\ rwx', + 'allow command mon getmap' + ] + } def get_osd_bootstrap_key(): @@ -104,8 +106,65 @@ def get_osd_bootstrap_key(): utils.get_unit_hostname() ), 'auth', 'get-or-create', 'client.bootstrap-osd', - 'mon', '; '.join(_osd_bootstrap_caps) ] + # Add capabilities + for subsystem, subcaps in _osd_bootstrap_caps.iteritems(): + cmd.extend([ + subsystem, + '; '.join(subcaps), + ]) + output = subprocess.check_output(cmd).strip() # IGNORE:E1103 + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(output.splitlines()) == 1: + key = output + else: + for element in output.splitlines(): + if 'key' in element: + key = element.split(' = ')[1].strip() # IGNORE:E1103 + return key + + +_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" + + +def import_radosgw_key(key): + if not os.path.exists(_radosgw_keyring): + cmd = [ + 'ceph-authtool', + _radosgw_keyring, + '--create-keyring', + '--name=client.radosgw.gateway', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + +# OSD caps taken from ceph-create-keys +_radosgw_caps = { + 'mon': ['allow r'], + 'osd': ['allow rwx'] + } + + +def get_radosgw_key(): + cmd = [ + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + utils.get_unit_hostname() + ), + 'auth', 'get-or-create', 'client.radosgw.gateway', + ] + # Add capabilities + for subsystem, subcaps in _radosgw_caps.iteritems(): + cmd.extend([ + subsystem, + '; '.join(subcaps), + ]) output = subprocess.check_output(cmd).strip() # IGNORE:E1103 # get-or-create appears to have different output depending # on whether its 'get' or 'create' diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index c596ec30..6c703800 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -149,6 +149,7 @@ def mon_relation(): '--subsystem-match=block', '--action=add']) notify_osds() + notify_radosgws() else: utils.juju_log('INFO', 'Not enough mons ({}), punting.'.format( @@ -168,6 +169,16 @@ def notify_osds(): utils.juju_log('INFO', 'End notify_osds.') +def notify_radosgws(): + utils.juju_log('INFO', 'Begin notify_radosgws.') + + for relid in utils.relation_ids('radosgw'): + utils.relation_set(radosgw_key=ceph.get_radosgw_key(), + rid=relid) + + utils.juju_log('INFO', 'End notify_radosgws.') + + def osd_relation(): utils.juju_log('INFO', 'Begin osd-relation hook.') @@ -184,6 +195,23 @@ def osd_relation(): utils.juju_log('INFO', 'End osd-relation hook.') +def radosgw_relation(): + utils.juju_log('INFO', 'Begin radosgw-relation hook.') + + utils.install('radosgw') # Install radosgw for admin tools + + if ceph.is_quorum(): + utils.juju_log('INFO', + 'mon cluster in quorum - \ + providing radosgw with keys') + utils.relation_set(radosgw_key=ceph.get_radosgw_key()) + else: + utils.juju_log('INFO', + 'mon cluster not in quorum - deferring key provision') + + utils.juju_log('INFO', 'End radosgw-relation hook.') + + def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() @@ -205,6 +233,7 @@ def start(): 'mon-relation-departed': mon_relation, 'mon-relation-joined': mon_relation, 'osd-relation-joined': osd_relation, + 'radosgw-relation-joined': radosgw_relation, 'start': start, 'upgrade-charm': upgrade_charm, }) diff --git a/ceph-proxy/hooks/radosgw-relation-joined b/ceph-proxy/hooks/radosgw-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-proxy/hooks/radosgw-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-proxy/revision b/ceph-proxy/revision index d15a2cc4..dde92ddc 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -80 +82 From f600d25615527bf541d478dc7f9c76fee4fc0851 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Oct 2012 13:29:34 +0100 Subject: [PATCH 0086/2699] Add cephx support for radosgw --- ceph-mon/hooks/ceph.py | 73 +++++++++++++++++++++++--- ceph-mon/hooks/hooks.py | 29 ++++++++++ ceph-mon/hooks/radosgw-relation-joined | 1 + ceph-mon/revision | 2 +- 4 files changed, 97 insertions(+), 8 deletions(-) create mode 120000 ceph-mon/hooks/radosgw-relation-joined diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 567ec3fa..25873eb8 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -87,12 +87,14 @@ def import_osd_bootstrap_key(key): subprocess.check_call(cmd) # OSD caps taken from ceph-create-keys -_osd_bootstrap_caps = [ - 'allow command osd create ...', - 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', - 'allow command mon getmap' - ] +_osd_bootstrap_caps = { + 'mon': [ + 'allow command osd create ...', + 'allow command osd crush set ...', + r'allow command auth add * osd allow\ * mon allow\ rwx', + 'allow command mon getmap' + ] + } def get_osd_bootstrap_key(): @@ -104,8 +106,65 @@ def get_osd_bootstrap_key(): utils.get_unit_hostname() ), 'auth', 'get-or-create', 'client.bootstrap-osd', - 'mon', '; '.join(_osd_bootstrap_caps) ] + # Add capabilities + for subsystem, subcaps in _osd_bootstrap_caps.iteritems(): + cmd.extend([ + subsystem, + '; '.join(subcaps), + ]) + output = subprocess.check_output(cmd).strip() # IGNORE:E1103 + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(output.splitlines()) == 1: + key = output + else: + for element in output.splitlines(): + if 'key' in element: + key = element.split(' = ')[1].strip() # IGNORE:E1103 + return key + + +_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" + + +def import_radosgw_key(key): + if not os.path.exists(_radosgw_keyring): + cmd = [ + 'ceph-authtool', + _radosgw_keyring, + '--create-keyring', + '--name=client.radosgw.gateway', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + +# OSD caps taken from ceph-create-keys +_radosgw_caps = { + 'mon': ['allow r'], + 'osd': ['allow rwx'] + } + + +def get_radosgw_key(): + cmd = [ + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + utils.get_unit_hostname() + ), + 'auth', 'get-or-create', 'client.radosgw.gateway', + ] + # Add capabilities + for subsystem, subcaps in _radosgw_caps.iteritems(): + cmd.extend([ + subsystem, + '; '.join(subcaps), + ]) output = subprocess.check_output(cmd).strip() # IGNORE:E1103 # get-or-create appears to have different output depending # on whether its 'get' or 'create' diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index c596ec30..6c703800 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -149,6 +149,7 @@ def mon_relation(): '--subsystem-match=block', '--action=add']) notify_osds() + notify_radosgws() else: utils.juju_log('INFO', 'Not enough mons ({}), punting.'.format( @@ -168,6 +169,16 @@ def notify_osds(): utils.juju_log('INFO', 'End notify_osds.') +def notify_radosgws(): + utils.juju_log('INFO', 'Begin notify_radosgws.') + + for relid in utils.relation_ids('radosgw'): + utils.relation_set(radosgw_key=ceph.get_radosgw_key(), + rid=relid) + + utils.juju_log('INFO', 'End notify_radosgws.') + + def osd_relation(): utils.juju_log('INFO', 'Begin osd-relation hook.') @@ -184,6 +195,23 @@ def osd_relation(): utils.juju_log('INFO', 'End osd-relation hook.') +def radosgw_relation(): + utils.juju_log('INFO', 'Begin radosgw-relation hook.') + + utils.install('radosgw') # Install radosgw for admin tools + + if ceph.is_quorum(): + utils.juju_log('INFO', + 'mon cluster in quorum - \ + providing radosgw with keys') + utils.relation_set(radosgw_key=ceph.get_radosgw_key()) + else: + utils.juju_log('INFO', + 'mon cluster not in quorum - deferring key provision') + + utils.juju_log('INFO', 'End radosgw-relation hook.') + + def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() @@ -205,6 +233,7 @@ def start(): 'mon-relation-departed': mon_relation, 'mon-relation-joined': mon_relation, 'osd-relation-joined': osd_relation, + 'radosgw-relation-joined': radosgw_relation, 'start': start, 'upgrade-charm': upgrade_charm, }) diff --git a/ceph-mon/hooks/radosgw-relation-joined b/ceph-mon/hooks/radosgw-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-mon/hooks/radosgw-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-mon/revision b/ceph-mon/revision index d15a2cc4..dde92ddc 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -80 +82 From eb77fbe9ca0fe2993e204200b6eaf6f9d7d120a5 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Oct 2012 13:35:06 +0100 Subject: [PATCH 0087/2699] Added cephx support --- ceph-radosgw/README | 8 ++++++-- ceph-radosgw/TODO | 1 - ceph-radosgw/hooks/hooks.py | 15 +++++++++++---- ceph-radosgw/revision | 2 +- ceph-radosgw/templates/ceph.conf | 2 +- 5 files changed, 19 insertions(+), 9 deletions(-) diff --git a/ceph-radosgw/README b/ceph-radosgw/README index 9bc2552d..af3872be 100644 --- a/ceph-radosgw/README +++ b/ceph-radosgw/README @@ -30,9 +30,13 @@ The gateway can be accessed over port 80 (as show in juju status exposed ports). Note that you will need to login to one of the service units supporting the -ceph-radosgw charm to generate some access credentials:: +ceph charm to generate some access credentials:: - radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph" + juju ssh ceph/0 \ + 'sudo radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph"' + +For security reasons the ceph-radosgw charm is not setup with appropriate +permissions to administer the ceph cluster. Scale-out ========= diff --git a/ceph-radosgw/TODO b/ceph-radosgw/TODO index cd30ef3b..75ceb8d5 100644 --- a/ceph-radosgw/TODO +++ b/ceph-radosgw/TODO @@ -1,5 +1,4 @@ RADOS Gateway Charm ------------------- - * cephx support * Improved process control of radosgw daemon (to many restarts) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 23f3babb..ddff6e22 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -12,6 +12,7 @@ import sys import glob import os +import ceph import utils @@ -99,7 +100,10 @@ def get_mon_hosts(): def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') emit_cephconf() - restart() + key = utils.relation_get('radosgw_key') + if key != "": + ceph.import_radosgw_key(key) + restart() # TODO figure out a better way todo this utils.juju_log('INFO', 'End mon-relation hook.') @@ -116,14 +120,18 @@ def upgrade_charm(): def start(): - # In case we're being redeployed to the same machines, try - # to make sure everything is running as soon as possible. + subprocess.call(['service', 'radosgw', 'start']) + utils.expose(port=80) + + +def stop(): subprocess.call(['service', 'radosgw', 'start']) utils.expose(port=80) def restart(): subprocess.call(['service', 'radosgw', 'restart']) + utils.expose(port=80) utils.do_hooks({ @@ -132,7 +140,6 @@ def restart(): 'mon-relation-departed': mon_relation, 'mon-relation-changed': mon_relation, 'gateway-relation-joined': gateway_relation, - 'start': start, 'upgrade-charm': config_changed, # same function ATM }) diff --git a/ceph-radosgw/revision b/ceph-radosgw/revision index b4de3947..8351c193 100644 --- a/ceph-radosgw/revision +++ b/ceph-radosgw/revision @@ -1 +1 @@ -11 +14 diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 73389c63..9dcbbd7c 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -1,5 +1,5 @@ [global] - auth supported = none + auth supported = cephx mon host = {{ mon_hosts }} [client.radosgw.gateway] From dc2d5e5f493b5b05f6aba33d9b9991f46017e24b Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Oct 2012 14:04:32 +0100 Subject: [PATCH 0088/2699] Rename ceph-brolin -> ceph --- ceph-proxy/metadata.yaml | 4 ++-- ceph-proxy/revision | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 3f0147da..92a2385a 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -1,4 +1,4 @@ -name: ceph-brolin +name: ceph summary: Highly scalable distributed storage maintainer: James Page , Paul Collins @@ -7,7 +7,7 @@ description: | excellent performance, reliability, and scalability. peers: mon: - interface: ceph-brolin + interface: ceph provides: client: interface: ceph-client diff --git a/ceph-proxy/revision b/ceph-proxy/revision index dde92ddc..76a8b2b7 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -82 +83 From e12817a86d60dce74d3dbe0d8cd8ef52d72c775d Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Oct 2012 14:04:32 +0100 Subject: [PATCH 0089/2699] Rename ceph-brolin -> ceph --- ceph-mon/metadata.yaml | 4 ++-- ceph-mon/revision | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 3f0147da..92a2385a 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -1,4 +1,4 @@ -name: ceph-brolin +name: ceph summary: Highly scalable distributed storage maintainer: James Page , Paul Collins @@ -7,7 +7,7 @@ description: | excellent performance, reliability, and scalability. peers: mon: - interface: ceph-brolin + interface: ceph provides: client: interface: ceph-client diff --git a/ceph-mon/revision b/ceph-mon/revision index dde92ddc..76a8b2b7 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -82 +83 From d5b27f932a6839513eae2cc8939f951cdb9db70e Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Oct 2012 16:11:19 +0100 Subject: [PATCH 0090/2699] Added client hooks with basic permissions for cephx --- ceph-proxy/hooks/ceph.py | 44 ++++++++----------------- ceph-proxy/hooks/client-relation-joined | 1 + ceph-proxy/hooks/hooks.py | 28 ++++++++++++++++ 3 files changed, 43 insertions(+), 30 deletions(-) create mode 120000 ceph-proxy/hooks/client-relation-joined diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 25873eb8..33bbcf92 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -98,34 +98,7 @@ def import_osd_bootstrap_key(key): def get_osd_bootstrap_key(): - cmd = [ - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - utils.get_unit_hostname() - ), - 'auth', 'get-or-create', 'client.bootstrap-osd', - ] - # Add capabilities - for subsystem, subcaps in _osd_bootstrap_caps.iteritems(): - cmd.extend([ - subsystem, - '; '.join(subcaps), - ]) - output = subprocess.check_output(cmd).strip() # IGNORE:E1103 - # get-or-create appears to have different output depending - # on whether its 'get' or 'create' - # 'create' just returns the key, 'get' is more verbose and - # needs parsing - key = None - if len(output.splitlines()) == 1: - key = output - else: - for element in output.splitlines(): - if 'key' in element: - key = element.split(' = ')[1].strip() # IGNORE:E1103 - return key + return get_named_key('bootstrap-osd', _osd_bootstrap_caps) _radosgw_keyring = "/etc/ceph/keyring.rados.gateway" @@ -150,6 +123,17 @@ def import_radosgw_key(key): def get_radosgw_key(): + return get_named_key('radosgw.gateway', _radosgw_caps) + + +_default_caps = { + 'mon': ['allow r'], + 'osd': ['allow rwx'] + } + + +def get_named_key(name, caps=None): + caps = caps or _default_caps cmd = [ 'ceph', '--name', 'mon.', @@ -157,10 +141,10 @@ def get_radosgw_key(): '/var/lib/ceph/mon/ceph-{}/keyring'.format( utils.get_unit_hostname() ), - 'auth', 'get-or-create', 'client.radosgw.gateway', + 'auth', 'get-or-create', 'client.{}'.format(name), ] # Add capabilities - for subsystem, subcaps in _radosgw_caps.iteritems(): + for subsystem, subcaps in caps.iteritems(): cmd.extend([ subsystem, '; '.join(subcaps), diff --git a/ceph-proxy/hooks/client-relation-joined b/ceph-proxy/hooks/client-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-proxy/hooks/client-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 6c703800..7d97af40 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -179,6 +179,17 @@ def notify_radosgws(): utils.juju_log('INFO', 'End notify_radosgws.') +def notify_client(): + utils.juju_log('INFO', 'Begin notify_client.') + + for relid in utils.relation_ids('client'): + service_name = utils.relation_list(relid)[0].split('/')[0] + utils.relation_set(key=ceph.get_named_key(service_name), + rid=relid) + + utils.juju_log('INFO', 'End notify_client.') + + def osd_relation(): utils.juju_log('INFO', 'Begin osd-relation hook.') @@ -212,6 +223,22 @@ def radosgw_relation(): utils.juju_log('INFO', 'End radosgw-relation hook.') +def client_relation(): + utils.juju_log('INFO', 'Begin client-relation hook.') + + if ceph.is_quorum(): + utils.juju_log('INFO', + 'mon cluster in quorum - \ + providing client with keys') + service_name = os.environ['JUJU_REMOTE_UNIT'].split('/')[0] + utils.relation_set(key=ceph.get_named_key(service_name)) + else: + utils.juju_log('INFO', + 'mon cluster not in quorum - deferring key provision') + + utils.juju_log('INFO', 'End client-relation hook.') + + def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() @@ -234,6 +261,7 @@ def start(): 'mon-relation-joined': mon_relation, 'osd-relation-joined': osd_relation, 'radosgw-relation-joined': radosgw_relation, + 'client-relation-joined': client_relation, 'start': start, 'upgrade-charm': upgrade_charm, }) From 2c5726f5b31154883ab88ac3b2efd4cb8e782950 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Oct 2012 16:11:19 +0100 Subject: [PATCH 0091/2699] Added client hooks with basic permissions for cephx --- ceph-mon/hooks/ceph.py | 44 +++++++++------------------ ceph-mon/hooks/client-relation-joined | 1 + ceph-mon/hooks/hooks.py | 28 +++++++++++++++++ 3 files changed, 43 insertions(+), 30 deletions(-) create mode 120000 ceph-mon/hooks/client-relation-joined diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 25873eb8..33bbcf92 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -98,34 +98,7 @@ def import_osd_bootstrap_key(key): def get_osd_bootstrap_key(): - cmd = [ - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - utils.get_unit_hostname() - ), - 'auth', 'get-or-create', 'client.bootstrap-osd', - ] - # Add capabilities - for subsystem, subcaps in _osd_bootstrap_caps.iteritems(): - cmd.extend([ - subsystem, - '; '.join(subcaps), - ]) - output = subprocess.check_output(cmd).strip() # IGNORE:E1103 - # get-or-create appears to have different output depending - # on whether its 'get' or 'create' - # 'create' just returns the key, 'get' is more verbose and - # needs parsing - key = None - if len(output.splitlines()) == 1: - key = output - else: - for element in output.splitlines(): - if 'key' in element: - key = element.split(' = ')[1].strip() # IGNORE:E1103 - return key + return get_named_key('bootstrap-osd', _osd_bootstrap_caps) _radosgw_keyring = "/etc/ceph/keyring.rados.gateway" @@ -150,6 +123,17 @@ def import_radosgw_key(key): def get_radosgw_key(): + return get_named_key('radosgw.gateway', _radosgw_caps) + + +_default_caps = { + 'mon': ['allow r'], + 'osd': ['allow rwx'] + } + + +def get_named_key(name, caps=None): + caps = caps or _default_caps cmd = [ 'ceph', '--name', 'mon.', @@ -157,10 +141,10 @@ def get_radosgw_key(): '/var/lib/ceph/mon/ceph-{}/keyring'.format( utils.get_unit_hostname() ), - 'auth', 'get-or-create', 'client.radosgw.gateway', + 'auth', 'get-or-create', 'client.{}'.format(name), ] # Add capabilities - for subsystem, subcaps in _radosgw_caps.iteritems(): + for subsystem, subcaps in caps.iteritems(): cmd.extend([ subsystem, '; '.join(subcaps), diff --git a/ceph-mon/hooks/client-relation-joined b/ceph-mon/hooks/client-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-mon/hooks/client-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 6c703800..7d97af40 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -179,6 +179,17 @@ def notify_radosgws(): utils.juju_log('INFO', 'End notify_radosgws.') +def notify_client(): + utils.juju_log('INFO', 'Begin notify_client.') + + for relid in utils.relation_ids('client'): + service_name = utils.relation_list(relid)[0].split('/')[0] + utils.relation_set(key=ceph.get_named_key(service_name), + rid=relid) + + utils.juju_log('INFO', 'End notify_client.') + + def osd_relation(): utils.juju_log('INFO', 'Begin osd-relation hook.') @@ -212,6 +223,22 @@ def radosgw_relation(): utils.juju_log('INFO', 'End radosgw-relation hook.') +def client_relation(): + utils.juju_log('INFO', 'Begin client-relation hook.') + + if ceph.is_quorum(): + utils.juju_log('INFO', + 'mon cluster in quorum - \ + providing client with keys') + service_name = os.environ['JUJU_REMOTE_UNIT'].split('/')[0] + utils.relation_set(key=ceph.get_named_key(service_name)) + else: + utils.juju_log('INFO', + 'mon cluster not in quorum - deferring key provision') + + utils.juju_log('INFO', 'End client-relation hook.') + + def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() @@ -234,6 +261,7 @@ def start(): 'mon-relation-joined': mon_relation, 'osd-relation-joined': osd_relation, 'radosgw-relation-joined': radosgw_relation, + 'client-relation-joined': client_relation, 'start': start, 'upgrade-charm': upgrade_charm, }) From 608a8d6020c8a2f75f4d7cafb90831ab85980bd1 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Oct 2012 16:12:25 +0100 Subject: [PATCH 0092/2699] Added ceph hook for cephx handling --- ceph-radosgw/hooks/ceph.py | 180 +++++++++++++++++++++++++++++++++++++ 1 file changed, 180 insertions(+) create mode 100644 ceph-radosgw/hooks/ceph.py diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py new file mode 100644 index 00000000..25873eb8 --- /dev/null +++ b/ceph-radosgw/hooks/ceph.py @@ -0,0 +1,180 @@ + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# Paul Collins +# + +import json +import subprocess +import time +import utils +import os + +QUORUM = ['leader', 'peon'] + + +def is_quorum(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + cmd = [ + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] in QUORUM: + return True + else: + return False + else: + return False + + +def wait_for_quorum(): + while not is_quorum(): + time.sleep(3) + + +def add_bootstrap_hint(peer): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + cmd = [ + "ceph", + "--admin-daemon", + asok, + "add_bootstrap_peer_hint", + peer + ] + if os.path.exists(asok): + # Ignore any errors for this call + subprocess.call(cmd) + + +def is_osd_disk(dev): + try: + info = subprocess.check_output(['sgdisk', '-i', '1', dev]) + info = info.split("\n") # IGNORE:E1103 + for line in info: + if line.startswith( + 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' + ): + return True + except subprocess.CalledProcessError: + pass + return False + +_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" + + +def import_osd_bootstrap_key(key): + if not os.path.exists(_bootstrap_keyring): + cmd = [ + 'ceph-authtool', + _bootstrap_keyring, + '--create-keyring', + '--name=client.bootstrap-osd', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + +# OSD caps taken from ceph-create-keys +_osd_bootstrap_caps = { + 'mon': [ + 'allow command osd create ...', + 'allow command osd crush set ...', + r'allow command auth add * osd allow\ * mon allow\ rwx', + 'allow command mon getmap' + ] + } + + +def get_osd_bootstrap_key(): + cmd = [ + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + utils.get_unit_hostname() + ), + 'auth', 'get-or-create', 'client.bootstrap-osd', + ] + # Add capabilities + for subsystem, subcaps in _osd_bootstrap_caps.iteritems(): + cmd.extend([ + subsystem, + '; '.join(subcaps), + ]) + output = subprocess.check_output(cmd).strip() # IGNORE:E1103 + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(output.splitlines()) == 1: + key = output + else: + for element in output.splitlines(): + if 'key' in element: + key = element.split(' = ')[1].strip() # IGNORE:E1103 + return key + + +_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" + + +def import_radosgw_key(key): + if not os.path.exists(_radosgw_keyring): + cmd = [ + 'ceph-authtool', + _radosgw_keyring, + '--create-keyring', + '--name=client.radosgw.gateway', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + +# OSD caps taken from ceph-create-keys +_radosgw_caps = { + 'mon': ['allow r'], + 'osd': ['allow rwx'] + } + + +def get_radosgw_key(): + cmd = [ + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + utils.get_unit_hostname() + ), + 'auth', 'get-or-create', 'client.radosgw.gateway', + ] + # Add capabilities + for subsystem, subcaps in _radosgw_caps.iteritems(): + cmd.extend([ + subsystem, + '; '.join(subcaps), + ]) + output = subprocess.check_output(cmd).strip() # IGNORE:E1103 + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(output.splitlines()) == 1: + key = output + else: + for element in output.splitlines(): + if 'key' in element: + key = element.split(' = ')[1].strip() # IGNORE:E1103 + return key From ca5c25eb1fd67366d2d02b375d4c4cf7b91a364e Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Oct 2012 16:13:31 +0100 Subject: [PATCH 0093/2699] Set config setting to something sensible for quantal --- ceph-proxy/config.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index b8314087..869e7ff5 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -24,7 +24,7 @@ options: very large clusters. osd-devices: type: string - default: /dev/sdb /dev/sdc /dev/sdd /dev/sde + default: /dev/vdb description: | The devices to format and set up as osd volumes. . @@ -32,7 +32,6 @@ options: used across all service units. source: type: string - default: ppa:ceph-ubuntu/dev description: | Optional configuration to support use of additional sources such as: . From 7c3dced19bea3b373fbf64247e255120b07b344d Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Oct 2012 16:13:31 +0100 Subject: [PATCH 0094/2699] Set config setting to something sensible for quantal --- ceph-mon/config.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index b8314087..869e7ff5 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -24,7 +24,7 @@ options: very large clusters. osd-devices: type: string - default: /dev/sdb /dev/sdc /dev/sdd /dev/sde + default: /dev/vdb description: | The devices to format and set up as osd volumes. . @@ -32,7 +32,6 @@ options: used across all service units. source: type: string - default: ppa:ceph-ubuntu/dev description: | Optional configuration to support use of additional sources such as: . From c25d6d918c84a97d6ec0701b49dbf851aab5e39f Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Oct 2012 16:14:01 +0100 Subject: [PATCH 0095/2699] Set config setting to something sensible for quantal --- ceph-osd/config.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 4ed9a914..9009b403 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -1,7 +1,7 @@ options: osd-devices: type: string - default: /dev/sdb /dev/sdc /dev/sdd /dev/sde + default: /dev/vdb description: | The devices to format and set up as osd volumes. . @@ -9,7 +9,6 @@ options: used across all service units. source: type: string - default: ppa:ceph-ubuntu/dev description: | Optional configuration to support use of additional sources such as: . From 80ab73278d042bf5ce91023ca47216d8e5579d63 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Oct 2012 16:14:23 +0100 Subject: [PATCH 0096/2699] Set config setting to something sensible for quantal --- ceph-radosgw/config.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 1312bec9..5d9b4c40 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -1,7 +1,6 @@ options: source: type: string - default: ppa:ceph-ubuntu/dev description: | Optional configuration to support use of additional sources such as: . From 48c1e13fee349f2a5a1221d19ea4e045b36cff0b Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Oct 2012 10:02:32 +0100 Subject: [PATCH 0097/2699] Updated contact info in README --- ceph-proxy/README | 8 ++++++++ ceph-proxy/hooks/hooks.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/README b/ceph-proxy/README index 6abd7a81..07942748 100644 --- a/ceph-proxy/README +++ b/ceph-proxy/README @@ -54,6 +54,14 @@ Boot things up by using: By default the ceph cluster will not bootstrap until 3 service units have been deployed and started; this is to ensure that a quorum is achieved prior to adding storage devices. + +Contact Information +=================== + +Author: Paul Collins , + James Page +Report bugs at: http://bugs.launchpad.net/charms/+source/ceph/+filebug +Location: http://jujucharms.com/charms/ceph Technical Bootnotes =================== diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 7d97af40..a114a05c 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -26,7 +26,7 @@ def install_upstart_scripts(): def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() - utils.install('ceph', 'gdisk') + utils.install('ceph', 'gdisk', 'ntp') install_upstart_scripts() utils.juju_log('INFO', 'End install hook.') From d9513c8a4bfbdae361956e2ed89119fc8fd60457 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Oct 2012 10:02:32 +0100 Subject: [PATCH 0098/2699] Updated contact info in README --- ceph-mon/README | 8 ++++++++ ceph-mon/hooks/hooks.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ceph-mon/README b/ceph-mon/README index 6abd7a81..07942748 100644 --- a/ceph-mon/README +++ b/ceph-mon/README @@ -54,6 +54,14 @@ Boot things up by using: By default the ceph cluster will not bootstrap until 3 service units have been deployed and started; this is to ensure that a quorum is achieved prior to adding storage devices. + +Contact Information +=================== + +Author: Paul Collins , + James Page +Report bugs at: http://bugs.launchpad.net/charms/+source/ceph/+filebug +Location: http://jujucharms.com/charms/ceph Technical Bootnotes =================== diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 7d97af40..a114a05c 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -26,7 +26,7 @@ def install_upstart_scripts(): def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() - utils.install('ceph', 'gdisk') + utils.install('ceph', 'gdisk', 'ntp') install_upstart_scripts() utils.juju_log('INFO', 'End install hook.') From b9ea61e2b077e32bfa4c3db906c24a534525cd4f Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Oct 2012 10:02:51 +0100 Subject: [PATCH 0099/2699] Updated contact info in README --- ceph-osd/README | 8 +++++++- ceph-osd/hooks/hooks.py | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/ceph-osd/README b/ceph-osd/README index 4ed39562..0f3173af 100644 --- a/ceph-osd/README +++ b/ceph-osd/README @@ -39,4 +39,10 @@ You can then deploy this charm by simple doing:: Once the ceph charm has bootstrapped the cluster, it will notify the ceph-osd charm which will scan for the configured storage devices and add them to the pool of available storage. - \ No newline at end of file + +Contact Information +=================== + +Author: James Page +Report bugs at: http://bugs.launchpad.net/charms/+source/ceph-osd/+filebug +Location: http://jujucharms.com/charms/ceph-osd \ No newline at end of file diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 96a15b3c..b1edb38a 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -25,7 +25,7 @@ def install_upstart_scripts(): def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() - utils.install('ceph', 'gdisk') + utils.install('ceph', 'gdisk', 'ntp') install_upstart_scripts() utils.juju_log('INFO', 'End install hook.') From d8fa4665aef18622125fd3d11d69b1053310554a Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Oct 2012 10:03:18 +0100 Subject: [PATCH 0100/2699] Updated contact info in README --- ceph-radosgw/README | 7 +++++++ ceph-radosgw/hooks/hooks.py | 3 ++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/README b/ceph-radosgw/README index af3872be..808f720c 100644 --- a/ceph-radosgw/README +++ b/ceph-radosgw/README @@ -52,6 +52,13 @@ and then stick a HA loadbalancer on the front:: Should give you a bit more bang on the front end if you really need it. +Contact Information +=================== + +Author: James Page +Report bugs at: http://bugs.launchpad.net/charms/+source/ceph-radosgw/+filebug +Location: http://jujucharms.com/charms/ceph-radosgw + Bootnotes ========= diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index ddff6e22..cfaeeb43 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -28,7 +28,8 @@ def install(): utils.configure_source() utils.install('radosgw', 'libapache2-mod-fastcgi', - 'apache2') + 'apache2', + 'ntp') utils.juju_log('INFO', 'End install hook.') From 389c21dfc3b0a1bbf70b605d4096bda8ec0c5737 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Oct 2012 10:40:44 +0100 Subject: [PATCH 0101/2699] TODO race condition and notify clients when cluster becomes quorate --- ceph-proxy/hooks/hooks.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index a114a05c..7ed993bf 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -143,6 +143,10 @@ def mon_relation(): ceph.wait_for_quorum() + # TODO:Potential race condition between ceph-create-keys + # completing and the upstart hotplug OSD device stuff + # running which is dependent on the osd bootstrap keyring + # being in place. for dev in utils.config_get('osd-devices').split(' '): osdize(dev) subprocess.call(['udevadm', 'trigger', @@ -150,6 +154,7 @@ def mon_relation(): notify_osds() notify_radosgws() + notify_client() else: utils.juju_log('INFO', 'Not enough mons ({}), punting.'.format( From 2564317199c48879eb655341e4db2a231295fd70 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Oct 2012 10:40:44 +0100 Subject: [PATCH 0102/2699] TODO race condition and notify clients when cluster becomes quorate --- ceph-mon/hooks/hooks.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index a114a05c..7ed993bf 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -143,6 +143,10 @@ def mon_relation(): ceph.wait_for_quorum() + # TODO:Potential race condition between ceph-create-keys + # completing and the upstart hotplug OSD device stuff + # running which is dependent on the osd bootstrap keyring + # being in place. for dev in utils.config_get('osd-devices').split(' '): osdize(dev) subprocess.call(['udevadm', 'trigger', @@ -150,6 +154,7 @@ def mon_relation(): notify_osds() notify_radosgws() + notify_client() else: utils.juju_log('INFO', 'Not enough mons ({}), punting.'.format( From 20b04140cec42dcaca2cb8ee745416cd2dd2e456 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Oct 2012 11:20:48 +0100 Subject: [PATCH 0103/2699] Made ephemeral unmounting a configurable option --- ceph-proxy/config.yaml | 9 +++++++++ ceph-proxy/hooks/hooks.py | 5 +++-- ceph-proxy/revision | 2 +- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 869e7ff5..bdc15200 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -30,6 +30,15 @@ options: . These devices are the range of devices that will be checked for and used across all service units. + ephemeral-unmount: + type: string + description: | + Cloud instances provider ephermeral storage which is normally mounted + on /mnt. + . + Providing this option will force an unmount of the ephemeral device + so that it can be used as a OSD storage device. This is useful for + testing purposes (cloud deployment is not a typical use case). source: type: string description: | diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 7ed993bf..bc5c163a 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -115,8 +115,9 @@ def bootstrap_monitor_cluster(): def osdize(dev): - # XXX hack for instances - subprocess.call(['umount', '/mnt']) + e_mountpoint = utils.config_get('ephemeral-unmount') + if e_mountpoint != "": + subprocess.call(['umount', e_mountpoint]) if ceph.is_osd_disk(dev): utils.juju_log('INFO', diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 76a8b2b7..871727de 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -83 +84 From c9b96bc62f660b13a013c4edc06d4274a8e14660 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Oct 2012 11:20:48 +0100 Subject: [PATCH 0104/2699] Made ephemeral unmounting a configurable option --- ceph-mon/config.yaml | 9 +++++++++ ceph-mon/hooks/hooks.py | 5 +++-- ceph-mon/revision | 2 +- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 869e7ff5..bdc15200 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -30,6 +30,15 @@ options: . These devices are the range of devices that will be checked for and used across all service units. + ephemeral-unmount: + type: string + description: | + Cloud instances provider ephermeral storage which is normally mounted + on /mnt. + . + Providing this option will force an unmount of the ephemeral device + so that it can be used as a OSD storage device. This is useful for + testing purposes (cloud deployment is not a typical use case). source: type: string description: | diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 7ed993bf..bc5c163a 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -115,8 +115,9 @@ def bootstrap_monitor_cluster(): def osdize(dev): - # XXX hack for instances - subprocess.call(['umount', '/mnt']) + e_mountpoint = utils.config_get('ephemeral-unmount') + if e_mountpoint != "": + subprocess.call(['umount', e_mountpoint]) if ceph.is_osd_disk(dev): utils.juju_log('INFO', diff --git a/ceph-mon/revision b/ceph-mon/revision index 76a8b2b7..871727de 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -83 +84 From 273aa7633bada571a4d5f5d19c5a59b16ce85335 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Oct 2012 11:21:04 +0100 Subject: [PATCH 0105/2699] Made ephemeral unmounting a configurable option --- ceph-osd/config.yaml | 9 +++++++++ ceph-osd/hooks/hooks.py | 5 +++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 9009b403..93de8291 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -7,6 +7,15 @@ options: . These devices are the range of devices that will be checked for and used across all service units. + ephemeral-unmount: + type: string + description: | + Cloud instances provider ephermeral storage which is normally mounted + on /mnt. + . + Providing this option will force an unmount of the ephemeral device + so that it can be used as a OSD storage device. This is useful for + testing purposes (cloud deployment is not a typical use case). source: type: string description: | diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index b1edb38a..74183b2d 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -81,8 +81,9 @@ def get_fsid(): def osdize(dev): - # XXX hack for instances - subprocess.call(['umount', '/mnt']) + e_mountpoint = utils.config_get('ephemeral-unmount') + if e_mountpoint != "": + subprocess.call(['umount', e_mountpoint]) if ceph.is_osd_disk(dev): utils.juju_log('INFO', From 6cd09311f3e74151c5815c9d67dfee21c6420e00 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Oct 2012 11:54:19 +0100 Subject: [PATCH 0106/2699] Remove references to brolin --- ceph-proxy/README | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/README b/ceph-proxy/README index 07942748..7d5d3de7 100644 --- a/ceph-proxy/README +++ b/ceph-proxy/README @@ -40,7 +40,7 @@ cluster. At a minimum you must provide a juju config file during initial deployment with the fsid and monitor-secret options (contents of cepy.yaml below): - ceph-brolin: + ceph: fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde @@ -49,7 +49,7 @@ Specifying the osd-devices to use is also a good idea. Boot things up by using: - juju deploy -n 3 --config ceph.yaml ceph-brolin + juju deploy -n 3 --config ceph.yaml ceph By default the ceph cluster will not bootstrap until 3 service units have been deployed and started; this is to ensure that a quorum is achieved prior to adding From da5854980e28010f33a9f2ec0f2970706d54e9db Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Oct 2012 11:54:19 +0100 Subject: [PATCH 0107/2699] Remove references to brolin --- ceph-mon/README | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/README b/ceph-mon/README index 07942748..7d5d3de7 100644 --- a/ceph-mon/README +++ b/ceph-mon/README @@ -40,7 +40,7 @@ cluster. At a minimum you must provide a juju config file during initial deployment with the fsid and monitor-secret options (contents of cepy.yaml below): - ceph-brolin: + ceph: fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde @@ -49,7 +49,7 @@ Specifying the osd-devices to use is also a good idea. Boot things up by using: - juju deploy -n 3 --config ceph.yaml ceph-brolin + juju deploy -n 3 --config ceph.yaml ceph By default the ceph cluster will not bootstrap until 3 service units have been deployed and started; this is to ensure that a quorum is achieved prior to adding From c69867305cc22a4f84d40fda16a4bdc0a31141ba Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Oct 2012 18:21:09 +0100 Subject: [PATCH 0108/2699] Gate initialization of OSD devices on presense of bootstrap keyring --- ceph-proxy/hooks/ceph.py | 5 +++++ ceph-proxy/hooks/hooks.py | 7 +++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 33bbcf92..440a0464 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -75,6 +75,11 @@ def is_osd_disk(dev): _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" +def wait_for_bootstrap_osd_keyring(): + while (not os.path.exists(_bootstrap_keyring)): + time.sleep(3) + + def import_osd_bootstrap_key(key): if not os.path.exists(_bootstrap_keyring): cmd = [ diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index bc5c163a..47616c9a 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -144,10 +144,9 @@ def mon_relation(): ceph.wait_for_quorum() - # TODO:Potential race condition between ceph-create-keys - # completing and the upstart hotplug OSD device stuff - # running which is dependent on the osd bootstrap keyring - # being in place. + # bootstrap keyring must be present before OSD devices + # can be activated + ceph.wait_for_bootstrap_osd_keyring() for dev in utils.config_get('osd-devices').split(' '): osdize(dev) subprocess.call(['udevadm', 'trigger', From 42171008708fb347a5066fd7fa5aea9406dc7edf Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Oct 2012 18:21:09 +0100 Subject: [PATCH 0109/2699] Gate initialization of OSD devices on presense of bootstrap keyring --- ceph-mon/hooks/ceph.py | 5 +++++ ceph-mon/hooks/hooks.py | 7 +++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 33bbcf92..440a0464 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -75,6 +75,11 @@ def is_osd_disk(dev): _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" +def wait_for_bootstrap_osd_keyring(): + while (not os.path.exists(_bootstrap_keyring)): + time.sleep(3) + + def import_osd_bootstrap_key(key): if not os.path.exists(_bootstrap_keyring): cmd = [ diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index bc5c163a..47616c9a 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -144,10 +144,9 @@ def mon_relation(): ceph.wait_for_quorum() - # TODO:Potential race condition between ceph-create-keys - # completing and the upstart hotplug OSD device stuff - # running which is dependent on the osd bootstrap keyring - # being in place. + # bootstrap keyring must be present before OSD devices + # can be activated + ceph.wait_for_bootstrap_osd_keyring() for dev in utils.config_get('osd-devices').split(' '): osdize(dev) subprocess.call(['udevadm', 'trigger', From 7fd18342b701eec907cb037b738257ac6b236ee0 Mon Sep 17 00:00:00 2001 From: James Page Date: Sat, 13 Oct 2012 19:38:47 +0100 Subject: [PATCH 0110/2699] Refactoring to: 1) osdize disks in config-changed hook - this can happen in advance of bootstrap 2) use bootstrap keyring presence to indicate bootstrapped in mon-relation --- ceph-proxy/hooks/ceph.py | 18 ++++++++++++++++-- ceph-proxy/hooks/hooks.py | 26 ++++++++------------------ 2 files changed, 24 insertions(+), 20 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 440a0464..9cb8985f 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -72,11 +72,25 @@ def is_osd_disk(dev): pass return False + +def rescan_osd_devices(): + cmd = [ + 'udevadm', 'trigger', + '--subsystem-match=block', '--action=add' + ] + + subprocess.call(cmd) + + _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" -def wait_for_bootstrap_osd_keyring(): - while (not os.path.exists(_bootstrap_keyring)): +def is_bootstrapped(): + return os.path.exists(_bootstrap_keyring) + + +def wait_for_bootstrap(): + while (not is_bootstrapped()): time.sleep(3) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 47616c9a..bea64373 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -59,11 +59,11 @@ def config_changed(): emit_cephconf() - if ceph.is_quorum(): - for dev in utils.config_get('osd-devices').split(' '): - osdize(dev) - subprocess.call(['udevadm', 'trigger', - '--subsystem-match=block', '--action=add']) + for dev in utils.config_get('osd-devices').split(' '): + osdize(dev) + + if ceph.is_bootstrapped(): + ceph.rescan_osd_devices() utils.juju_log('INFO', 'End config-changed hook.') @@ -141,17 +141,8 @@ def mon_relation(): moncount = int(utils.config_get('monitor-count')) if len(get_mon_hosts()) >= moncount: bootstrap_monitor_cluster() - - ceph.wait_for_quorum() - - # bootstrap keyring must be present before OSD devices - # can be activated - ceph.wait_for_bootstrap_osd_keyring() - for dev in utils.config_get('osd-devices').split(' '): - osdize(dev) - subprocess.call(['udevadm', 'trigger', - '--subsystem-match=block', '--action=add']) - + ceph.wait_for_bootstrap() + ceph.rescan_osd_devices() notify_osds() notify_radosgws() notify_client() @@ -200,8 +191,7 @@ def osd_relation(): if ceph.is_quorum(): utils.juju_log('INFO', - 'mon cluster in quorum - \ - providing OSD with fsid & keys') + 'mon cluster in quorum - providing fsid & keys') utils.relation_set(fsid=utils.config_get('fsid'), osd_bootstrap_key=ceph.get_osd_bootstrap_key()) else: From 5432246b9c508e7f283ef17fc5ccdd1f18882e95 Mon Sep 17 00:00:00 2001 From: James Page Date: Sat, 13 Oct 2012 19:38:47 +0100 Subject: [PATCH 0111/2699] Refactoring to: 1) osdize disks in config-changed hook - this can happen in advance of bootstrap 2) use bootstrap keyring presence to indicate bootstrapped in mon-relation --- ceph-mon/hooks/ceph.py | 18 ++++++++++++++++-- ceph-mon/hooks/hooks.py | 26 ++++++++------------------ 2 files changed, 24 insertions(+), 20 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 440a0464..9cb8985f 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -72,11 +72,25 @@ def is_osd_disk(dev): pass return False + +def rescan_osd_devices(): + cmd = [ + 'udevadm', 'trigger', + '--subsystem-match=block', '--action=add' + ] + + subprocess.call(cmd) + + _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" -def wait_for_bootstrap_osd_keyring(): - while (not os.path.exists(_bootstrap_keyring)): +def is_bootstrapped(): + return os.path.exists(_bootstrap_keyring) + + +def wait_for_bootstrap(): + while (not is_bootstrapped()): time.sleep(3) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 47616c9a..bea64373 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -59,11 +59,11 @@ def config_changed(): emit_cephconf() - if ceph.is_quorum(): - for dev in utils.config_get('osd-devices').split(' '): - osdize(dev) - subprocess.call(['udevadm', 'trigger', - '--subsystem-match=block', '--action=add']) + for dev in utils.config_get('osd-devices').split(' '): + osdize(dev) + + if ceph.is_bootstrapped(): + ceph.rescan_osd_devices() utils.juju_log('INFO', 'End config-changed hook.') @@ -141,17 +141,8 @@ def mon_relation(): moncount = int(utils.config_get('monitor-count')) if len(get_mon_hosts()) >= moncount: bootstrap_monitor_cluster() - - ceph.wait_for_quorum() - - # bootstrap keyring must be present before OSD devices - # can be activated - ceph.wait_for_bootstrap_osd_keyring() - for dev in utils.config_get('osd-devices').split(' '): - osdize(dev) - subprocess.call(['udevadm', 'trigger', - '--subsystem-match=block', '--action=add']) - + ceph.wait_for_bootstrap() + ceph.rescan_osd_devices() notify_osds() notify_radosgws() notify_client() @@ -200,8 +191,7 @@ def osd_relation(): if ceph.is_quorum(): utils.juju_log('INFO', - 'mon cluster in quorum - \ - providing OSD with fsid & keys') + 'mon cluster in quorum - providing fsid & keys') utils.relation_set(fsid=utils.config_get('fsid'), osd_bootstrap_key=ceph.get_osd_bootstrap_key()) else: From c3da23236bb8d801d3511ed40d04d383adabad78 Mon Sep 17 00:00:00 2001 From: James Page Date: Sat, 13 Oct 2012 20:52:35 +0100 Subject: [PATCH 0112/2699] Support use of single node ceph --- ceph-proxy/hooks/hooks.py | 9 +++++++-- ceph-proxy/revision | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index bea64373..570af21c 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -62,6 +62,12 @@ def config_changed(): for dev in utils.config_get('osd-devices').split(' '): osdize(dev) + # Support use of single node ceph + if (not ceph.is_bootstrapped() and + int(utils.config_get('monitor-count')) == 1): + bootstrap_monitor_cluster() + ceph.wait_for_bootstrap() + if ceph.is_bootstrapped(): ceph.rescan_osd_devices() @@ -245,8 +251,7 @@ def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. subprocess.call(['start', 'ceph-mon-all-starter']) - subprocess.call(['udevadm', 'trigger', - '--subsystem-match=block', '--action=add']) + ceph.rescan_osd_devices() utils.do_hooks({ diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 871727de..a862eb84 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -84 +85 From cd920905df56e7b54206193e399497c772259d01 Mon Sep 17 00:00:00 2001 From: James Page Date: Sat, 13 Oct 2012 20:52:35 +0100 Subject: [PATCH 0113/2699] Support use of single node ceph --- ceph-mon/hooks/hooks.py | 9 +++++++-- ceph-mon/revision | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index bea64373..570af21c 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -62,6 +62,12 @@ def config_changed(): for dev in utils.config_get('osd-devices').split(' '): osdize(dev) + # Support use of single node ceph + if (not ceph.is_bootstrapped() and + int(utils.config_get('monitor-count')) == 1): + bootstrap_monitor_cluster() + ceph.wait_for_bootstrap() + if ceph.is_bootstrapped(): ceph.rescan_osd_devices() @@ -245,8 +251,7 @@ def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. subprocess.call(['start', 'ceph-mon-all-starter']) - subprocess.call(['udevadm', 'trigger', - '--subsystem-match=block', '--action=add']) + ceph.rescan_osd_devices() utils.do_hooks({ diff --git a/ceph-mon/revision b/ceph-mon/revision index 871727de..a862eb84 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -84 +85 From 0cae49bcc0b29f420657e8dd2623752d3559e02a Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 15 Oct 2012 11:13:36 +0100 Subject: [PATCH 0114/2699] Update bootstrap handling to ensure key and fsid are present before trying to init and start OSD devices. Refactor osd device rescanning into ceph package. --- ceph-osd/hooks/ceph.py | 13 +++++++++++++ ceph-osd/hooks/hooks.py | 27 ++++++++++++++------------- 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 567ec3fa..3e466c50 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -72,9 +72,22 @@ def is_osd_disk(dev): pass return False + +def rescan_osd_devices(): + cmd = [ + 'udevadm', 'trigger', + '--subsystem-match=block', '--action=add' + ] + + subprocess.call(cmd) + _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" +def is_bootstrapped(): + return os.path.exists(_bootstrap_keyring) + + def import_osd_bootstrap_key(key): if not os.path.exists(_bootstrap_keyring): cmd = [ diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 74183b2d..5a43ce00 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -31,8 +31,11 @@ def install(): def emit_cephconf(): + mon_hosts = get_mon_hosts() + utils.juju_log('INFO', 'Monitor hosts are ' + repr(mon_hosts)) + cephcontext = { - 'mon_hosts': ' '.join(get_mon_hosts()), + 'mon_hosts': ' '.join(mon_hosts), 'fsid': get_fsid() } @@ -43,15 +46,12 @@ def emit_cephconf(): def config_changed(): utils.juju_log('INFO', 'Begin config-changed hook.') - utils.juju_log('INFO', 'Monitor hosts are ' + repr(get_mon_hosts())) - - if get_fsid(): - utils.juju_log('INFO', 'cluster fsid detected, rescanning disks') + if ceph.is_bootstrapped(): + utils.juju_log('INFO', 'ceph bootstrapped, rescanning disks') emit_cephconf() for dev in utils.config_get('osd-devices').split(' '): osdize(dev) - subprocess.call(['udevadm', 'trigger', - '--subsystem-match=block', '--action=add']) + ceph.rescan_osd_devices() utils.juju_log('INFO', 'End config-changed hook.') @@ -103,17 +103,18 @@ def osdize(dev): def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') - if get_fsid(): - utils.juju_log('INFO', 'mon has provided fsid - scanning disks') + bootstrap_key = utils.relation_get('osd_bootstrap_key') + if (get_fsid() and + bootstrap_key != ""): + utils.juju_log('INFO', 'mon has provided fsid & key- scanning disks') emit_cephconf() - ceph.import_osd_bootstrap_key(utils.relation_get('osd_bootstrap_key')) + ceph.import_osd_bootstrap_key(bootstrap_key) for dev in utils.config_get('osd-devices').split(' '): osdize(dev) - subprocess.call(['udevadm', 'trigger', - '--subsystem-match=block', '--action=add']) + ceph.rescan_osd_devices() else: utils.juju_log('INFO', - 'mon cluster has not yet provided fsid') + 'mon cluster has not yet provided fsid & key') utils.juju_log('INFO', 'End mon-relation hook.') From 0b35ba6975efbeab85956defbe882e69b5c60559 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Thu, 18 Oct 2012 15:13:45 +1300 Subject: [PATCH 0115/2699] if no key ID is supplied, do not try to apt-key import --- ceph-proxy/hooks/utils.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 7e2da93c..8efee2a5 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -62,12 +62,13 @@ def configure_source(): subprocess.check_call(cmd) if source.startswith('http:'): key = config_get('key') - cmd = [ - 'apt-key', - 'import', - key - ] - subprocess.check_call(cmd) + if key != "": + cmd = [ + 'apt-key', + 'import', + key + ] + subprocess.check_call(cmd) cmd = [ 'apt-get', 'update' From 5659a6a4d97eebb27d1897f4c327fe0dfeaf63fc Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Thu, 18 Oct 2012 15:13:45 +1300 Subject: [PATCH 0116/2699] if no key ID is supplied, do not try to apt-key import --- ceph-mon/hooks/utils.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 7e2da93c..8efee2a5 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -62,12 +62,13 @@ def configure_source(): subprocess.check_call(cmd) if source.startswith('http:'): key = config_get('key') - cmd = [ - 'apt-key', - 'import', - key - ] - subprocess.check_call(cmd) + if key != "": + cmd = [ + 'apt-key', + 'import', + key + ] + subprocess.check_call(cmd) cmd = [ 'apt-get', 'update' From b9e4f35a83989664ec2916f5729c2e2d879f746b Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Thu, 18 Oct 2012 15:30:52 +1300 Subject: [PATCH 0117/2699] handle http repositories ourselves since add-apt-repository fails so badly --- ceph-proxy/hooks/utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 8efee2a5..8ceb1d3e 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -53,14 +53,15 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): def configure_source(): source = config_get('source') if (source.startswith('ppa:') or - source.startswith('cloud:') or - source.startswith('http:')): + source.startswith('cloud:')): cmd = [ 'add-apt-repository', source ] subprocess.check_call(cmd) if source.startswith('http:'): + with open('/etc/apt/sources.list.d/ceph.list', 'w') as apt: + apt.write("deb " + source + "\n") key = config_get('key') if key != "": cmd = [ From 03041e204ac644644ecc62a8e43611da28febce5 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Thu, 18 Oct 2012 15:30:52 +1300 Subject: [PATCH 0118/2699] handle http repositories ourselves since add-apt-repository fails so badly --- ceph-mon/hooks/utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 8efee2a5..8ceb1d3e 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -53,14 +53,15 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): def configure_source(): source = config_get('source') if (source.startswith('ppa:') or - source.startswith('cloud:') or - source.startswith('http:')): + source.startswith('cloud:')): cmd = [ 'add-apt-repository', source ] subprocess.check_call(cmd) if source.startswith('http:'): + with open('/etc/apt/sources.list.d/ceph.list', 'w') as apt: + apt.write("deb " + source + "\n") key = config_get('key') if key != "": cmd = [ From 78524af3f008bee8576beb2345eba8ed46cb5670 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Thu, 18 Oct 2012 19:19:52 +1300 Subject: [PATCH 0119/2699] add auth-supported config option --- ceph-proxy/config.yaml | 9 +++++++++ ceph-proxy/hooks/hooks.py | 1 + ceph-proxy/templates/ceph.conf | 2 +- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index bdc15200..dd5ce654 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -6,6 +6,15 @@ options: . This configuration element is mandatory and the service will fail on install if it is not provided. + auth-supported: + type: string + default: cephx + description: | + Which authentication flavour to use. + . + Valid options are "cephx" and "none". If "none" is specified, + keys will still be created and deployed so that it can be + enabled later. monitor-secret: type: string description: | diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 570af21c..69304901 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -33,6 +33,7 @@ def install(): def emit_cephconf(): cephcontext = { + 'auth_supported': utils.config_get('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': utils.config_get('fsid'), } diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index 072535f5..2af77d0a 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -1,5 +1,5 @@ [global] - auth supported = cephx + auth supported = {{ auth_supported }} keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} From f525fb99847533431e07e02829c6010a297aa0ab Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Thu, 18 Oct 2012 19:19:52 +1300 Subject: [PATCH 0120/2699] add auth-supported config option --- ceph-mon/config.yaml | 9 +++++++++ ceph-mon/hooks/hooks.py | 1 + ceph-mon/templates/ceph.conf | 2 +- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index bdc15200..dd5ce654 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -6,6 +6,15 @@ options: . This configuration element is mandatory and the service will fail on install if it is not provided. + auth-supported: + type: string + default: cephx + description: | + Which authentication flavour to use. + . + Valid options are "cephx" and "none". If "none" is specified, + keys will still be created and deployed so that it can be + enabled later. monitor-secret: type: string description: | diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 570af21c..69304901 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -33,6 +33,7 @@ def install(): def emit_cephconf(): cephcontext = { + 'auth_supported': utils.config_get('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': utils.config_get('fsid'), } diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 072535f5..2af77d0a 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -1,5 +1,5 @@ [global] - auth supported = cephx + auth supported = {{ auth_supported }} keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} From fe6dd3dbb06a75e073107f3562118c884f129c0c Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 19 Oct 2012 16:50:18 +0100 Subject: [PATCH 0121/2699] Added is_leader to ceph --- ceph-proxy/hooks/ceph.py | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 9cb8985f..a7c720f5 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -13,7 +13,9 @@ import utils import os -QUORUM = ['leader', 'peon'] +LEADER = 'leader' +PEON = 'peon' +QUORUM = [LEADER, PEON] def is_quorum(): @@ -40,6 +42,30 @@ def is_quorum(): return False +def is_leader(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + cmd = [ + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] == LEADER: + return True + else: + return False + else: + return False + + def wait_for_quorum(): while not is_quorum(): time.sleep(3) From 1b46bb724dd47884167ab76811185fe72e583d17 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 19 Oct 2012 16:50:18 +0100 Subject: [PATCH 0122/2699] Added is_leader to ceph --- ceph-mon/hooks/ceph.py | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 9cb8985f..a7c720f5 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -13,7 +13,9 @@ import utils import os -QUORUM = ['leader', 'peon'] +LEADER = 'leader' +PEON = 'peon' +QUORUM = [LEADER, PEON] def is_quorum(): @@ -40,6 +42,30 @@ def is_quorum(): return False +def is_leader(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + cmd = [ + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] == LEADER: + return True + else: + return False + else: + return False + + def wait_for_quorum(): while not is_quorum(): time.sleep(3) From 985113d07f62f9699a78a10b1f603a9f4eed110c Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 19 Oct 2012 16:50:51 +0100 Subject: [PATCH 0123/2699] Added support for auth configuration from mons --- ceph-osd/hooks/hooks.py | 26 ++++++++++++++++++-------- ceph-osd/templates/ceph.conf | 2 +- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 5a43ce00..1421284e 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -35,6 +35,7 @@ def emit_cephconf(): utils.juju_log('INFO', 'Monitor hosts are ' + repr(mon_hosts)) cephcontext = { + 'auth_supported': get_auth(), 'mon_hosts': ' '.join(mon_hosts), 'fsid': get_fsid() } @@ -71,12 +72,20 @@ def get_mon_hosts(): def get_fsid(): + return get_conf('fsid') + + +def get_auth(): + return get_conf('auth') + + +def get_conf(name): for relid in utils.relation_ids('mon'): for unit in utils.relation_list(relid): - fsid = utils.relation_get('fsid', + conf = utils.relation_get(name, unit, relid) - if fsid != "": - return fsid + if conf != "": + return conf return None @@ -105,8 +114,9 @@ def mon_relation(): bootstrap_key = utils.relation_get('osd_bootstrap_key') if (get_fsid() and + get_auth() and bootstrap_key != ""): - utils.juju_log('INFO', 'mon has provided fsid & key- scanning disks') + utils.juju_log('INFO', 'mon has provided conf- scanning disks') emit_cephconf() ceph.import_osd_bootstrap_key(bootstrap_key) for dev in utils.config_get('osd-devices').split(' '): @@ -114,14 +124,15 @@ def mon_relation(): ceph.rescan_osd_devices() else: utils.juju_log('INFO', - 'mon cluster has not yet provided fsid & key') + 'mon cluster has not yet provided conf') utils.juju_log('INFO', 'End mon-relation hook.') def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') - if get_fsid(): + if (get_fsid() and + get_auth()): emit_cephconf() install_upstart_scripts() utils.juju_log('INFO', 'End upgrade-charm hook.') @@ -130,8 +141,7 @@ def upgrade_charm(): def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. - subprocess.call(['udevadm', 'trigger', - '--subsystem-match=block', '--action=add']) + ceph.rescan_osd_devices() utils.do_hooks({ diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 072535f5..2af77d0a 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -1,5 +1,5 @@ [global] - auth supported = cephx + auth supported = {{ auth_supported }} keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} From 1096b28116292c895e2a29da5ce5f34584ee3ac1 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 19 Oct 2012 16:51:24 +0100 Subject: [PATCH 0124/2699] Added support for auth configuration from mons --- ceph-radosgw/hooks/hooks.py | 15 +++++++++++++++ ceph-radosgw/templates/ceph.conf | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index cfaeeb43..f989594e 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -39,6 +39,7 @@ def emit_cephconf(): os.makedirs('/etc/ceph') cephcontext = { + 'auth_supported': get_auth() or 'none', 'mon_hosts': ' '.join(get_mon_hosts()), 'hostname': utils.get_unit_hostname() } @@ -98,6 +99,20 @@ def get_mon_hosts(): return hosts +def get_auth(): + return get_conf('auth') + + +def get_conf(name): + for relid in utils.relation_ids('mon'): + for unit in utils.relation_list(relid): + conf = utils.relation_get(name, + unit, relid) + if conf != "": + return conf + return None + + def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') emit_cephconf() diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 9dcbbd7c..f5f2bcb2 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -1,5 +1,5 @@ [global] - auth supported = cephx + auth supported = {{ auth_supported }} mon host = {{ mon_hosts }} [client.radosgw.gateway] From cc71024159859a45fc5d1aa7406046a902a5ecd7 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 12 Nov 2012 09:32:06 +0000 Subject: [PATCH 0125/2699] Add support for cloud: prefix for cloud archive, tidy OSD device handling to exit early when devices are not found. --- ceph-proxy/config.yaml | 4 ++-- ceph-proxy/hooks/hooks.py | 8 ++++++-- ceph-proxy/hooks/utils.py | 26 +++++++++++++++++++------- ceph-proxy/revision | 2 +- 4 files changed, 28 insertions(+), 12 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index dd5ce654..d08845b6 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -54,7 +54,7 @@ options: Optional configuration to support use of additional sources such as: . - ppa:myteam/ppa - - cloud:folsom-proposed + - cloud:precise-proposed/folsom - http://my.archive.com/ubuntu main . The last option should be used in conjunction with the key configuration @@ -62,7 +62,7 @@ options: . Note that a minimum ceph version of 0.48.2 is required for use with this charm which is NOT provided by the packages in the main Ubuntu archive - for precise. + for precise but is provided in the Folsom cloud archive. key: type: string description: | diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 34cf9edc..44bdc3a0 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -122,6 +122,11 @@ def bootstrap_monitor_cluster(): def osdize(dev): + if not os.path.exists(dev): + utils.juju_log('INFO', + 'Path {} does not exist - bailing'.format(dev)) + return + e_mountpoint = utils.config_get('ephemeral-unmount') if e_mountpoint != "": subprocess.call(['umount', e_mountpoint]) @@ -137,8 +142,7 @@ def osdize(dev): 'Looks like {} is in use, skipping.'.format(dev)) return - if os.path.exists(dev): - subprocess.call(['ceph-disk-prepare', dev]) + subprocess.call(['ceph-disk-prepare', dev]) def mon_relation(): diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index e3593e0a..5ae96bc0 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -50,24 +50,36 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): return template.render(context) +CLOUD_ARCHIVE = \ +""" # Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" + + def configure_source(): - source = config_get('source') - if (source.startswith('ppa:') or - source.startswith('cloud:')): + source = str(config_get('source')) + if not source: + return + if source.startswith('ppa:'): cmd = [ 'add-apt-repository', source ] subprocess.check_call(cmd) + if source.startswith('cloud:'): + install('ubuntu-cloud-keyring') + pocket = source.split(':')[1] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(pocket)) if source.startswith('http:'): - with open('/etc/apt/sources.list.d/ceph.list', 'w') as apt: + with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: apt.write("deb " + source + "\n") key = config_get('key') - if key != "": + if key: cmd = [ 'apt-key', - 'import', - key + 'adv', '--keyserver keyserver.ubuntu.com', + '--recv-keys', key ] subprocess.check_call(cmd) cmd = [ diff --git a/ceph-proxy/revision b/ceph-proxy/revision index a862eb84..8cf5c1a2 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -85 +86 From 2536a3d0200de141fced2c7b14a47fae3c94e490 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 12 Nov 2012 09:32:06 +0000 Subject: [PATCH 0126/2699] Add support for cloud: prefix for cloud archive, tidy OSD device handling to exit early when devices are not found. --- ceph-mon/config.yaml | 4 ++-- ceph-mon/hooks/hooks.py | 8 ++++++-- ceph-mon/hooks/utils.py | 26 +++++++++++++++++++------- ceph-mon/revision | 2 +- 4 files changed, 28 insertions(+), 12 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index dd5ce654..d08845b6 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -54,7 +54,7 @@ options: Optional configuration to support use of additional sources such as: . - ppa:myteam/ppa - - cloud:folsom-proposed + - cloud:precise-proposed/folsom - http://my.archive.com/ubuntu main . The last option should be used in conjunction with the key configuration @@ -62,7 +62,7 @@ options: . Note that a minimum ceph version of 0.48.2 is required for use with this charm which is NOT provided by the packages in the main Ubuntu archive - for precise. + for precise but is provided in the Folsom cloud archive. key: type: string description: | diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 34cf9edc..44bdc3a0 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -122,6 +122,11 @@ def bootstrap_monitor_cluster(): def osdize(dev): + if not os.path.exists(dev): + utils.juju_log('INFO', + 'Path {} does not exist - bailing'.format(dev)) + return + e_mountpoint = utils.config_get('ephemeral-unmount') if e_mountpoint != "": subprocess.call(['umount', e_mountpoint]) @@ -137,8 +142,7 @@ def osdize(dev): 'Looks like {} is in use, skipping.'.format(dev)) return - if os.path.exists(dev): - subprocess.call(['ceph-disk-prepare', dev]) + subprocess.call(['ceph-disk-prepare', dev]) def mon_relation(): diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index e3593e0a..5ae96bc0 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -50,24 +50,36 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): return template.render(context) +CLOUD_ARCHIVE = \ +""" # Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" + + def configure_source(): - source = config_get('source') - if (source.startswith('ppa:') or - source.startswith('cloud:')): + source = str(config_get('source')) + if not source: + return + if source.startswith('ppa:'): cmd = [ 'add-apt-repository', source ] subprocess.check_call(cmd) + if source.startswith('cloud:'): + install('ubuntu-cloud-keyring') + pocket = source.split(':')[1] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(pocket)) if source.startswith('http:'): - with open('/etc/apt/sources.list.d/ceph.list', 'w') as apt: + with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: apt.write("deb " + source + "\n") key = config_get('key') - if key != "": + if key: cmd = [ 'apt-key', - 'import', - key + 'adv', '--keyserver keyserver.ubuntu.com', + '--recv-keys', key ] subprocess.check_call(cmd) cmd = [ diff --git a/ceph-mon/revision b/ceph-mon/revision index a862eb84..8cf5c1a2 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -85 +86 From 3a55e1de92ac860c0c02e1d1df53c046a3b01d1e Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 12 Nov 2012 09:36:03 +0000 Subject: [PATCH 0127/2699] Add full support for cloud archive use; add early check for device presens in osdize. --- ceph-osd/config.yaml | 4 ++-- ceph-osd/hooks/hooks.py | 8 ++++++-- ceph-osd/hooks/utils.py | 42 ++++++++++++++++++++++++++++++----------- ceph-osd/revision | 2 +- 4 files changed, 40 insertions(+), 16 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 93de8291..e4ae50f5 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -22,7 +22,7 @@ options: Optional configuration to support use of additional sources such as: . - ppa:myteam/ppa - - cloud:folsom-proposed + - cloud:precise-proposed/folsom - http://my.archive.com/ubuntu main . The last option should be used in conjunction with the key configuration @@ -30,7 +30,7 @@ options: . Note that a minimum ceph version of 0.48.2 is required for use with this charm which is NOT provided by the packages in the main Ubuntu archive - for precise. + for precise but is provided in the Ubuntu cloud archive. key: type: string description: | diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 1421284e..42d59f01 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -90,6 +90,11 @@ def get_conf(name): def osdize(dev): + if not os.path.exists(dev): + utils.juju_log('INFO', + 'Path {} does not exist - bailing'.format(dev)) + return + e_mountpoint = utils.config_get('ephemeral-unmount') if e_mountpoint != "": subprocess.call(['umount', e_mountpoint]) @@ -105,8 +110,7 @@ def osdize(dev): 'Looks like {} is in use, skipping.'.format(dev)) return - if os.path.exists(dev): - subprocess.call(['ceph-disk-prepare', dev]) + subprocess.call(['ceph-disk-prepare', dev]) def mon_relation(): diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 64c578e1..5ae96bc0 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -50,24 +50,38 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): return template.render(context) +CLOUD_ARCHIVE = \ +""" # Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" + + def configure_source(): - source = config_get('source') - if (source.startswith('ppa:') or - source.startswith('cloud:') or - source.startswith('http:')): + source = str(config_get('source')) + if not source: + return + if source.startswith('ppa:'): cmd = [ 'add-apt-repository', source ] subprocess.check_call(cmd) + if source.startswith('cloud:'): + install('ubuntu-cloud-keyring') + pocket = source.split(':')[1] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(pocket)) if source.startswith('http:'): + with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: + apt.write("deb " + source + "\n") key = config_get('key') - cmd = [ - 'apt-key', - 'import', - key - ] - subprocess.check_call(cmd) + if key: + cmd = [ + 'apt-key', + 'adv', '--keyserver keyserver.ubuntu.com', + '--recv-keys', key + ] + subprocess.check_call(cmd) cmd = [ 'apt-get', 'update' @@ -129,8 +143,14 @@ def relation_set(**kwargs): cmd = [ 'relation-set' ] + args = [] for k, v in kwargs.items(): - cmd.append('{}={}'.format(k, v)) + if k == 'rid': + cmd.append('-r') + cmd.append(v) + else: + args.append('{}={}'.format(k, v)) + cmd += args subprocess.check_call(cmd) diff --git a/ceph-osd/revision b/ceph-osd/revision index b8626c4c..7ed6ff82 100644 --- a/ceph-osd/revision +++ b/ceph-osd/revision @@ -1 +1 @@ -4 +5 From 9935d5bf79afd5b5a6b3fb643abc75ca54d7d7c8 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 12 Nov 2012 09:45:09 +0000 Subject: [PATCH 0128/2699] Add feature to enable forced reformatting of OSD devices. --- ceph-osd/config.yaml | 9 +++++++++ ceph-osd/hooks/hooks.py | 10 +++++++++- ceph-osd/revision | 2 +- 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index e4ae50f5..beb67a27 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -7,6 +7,15 @@ options: . These devices are the range of devices that will be checked for and used across all service units. + osd-reformat: + type: string + description: | + By default, the charm will not re-format a device that already looks + as if it might be an OSD device. This is a safeguard to try to + prevent data loss. + . + Specifying this option (any value) forces a reformat of any OSD devices + found which are not already mounted. ephemeral-unmount: type: string description: | diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 42d59f01..a66c37fb 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -89,6 +89,13 @@ def get_conf(name): return None +def reformat_osd(): + if utils.config_get('osd-reformat') != "": + return True + else: + return False + + def osdize(dev): if not os.path.exists(dev): utils.juju_log('INFO', @@ -99,7 +106,8 @@ def osdize(dev): if e_mountpoint != "": subprocess.call(['umount', e_mountpoint]) - if ceph.is_osd_disk(dev): + if (ceph.is_osd_disk(dev) and not + reformat_osd()): utils.juju_log('INFO', 'Looks like {} is already an OSD, skipping.' .format(dev)) diff --git a/ceph-osd/revision b/ceph-osd/revision index 7ed6ff82..1e8b3149 100644 --- a/ceph-osd/revision +++ b/ceph-osd/revision @@ -1 +1 @@ -5 +6 From d68011ea7eef06b7e42dfe39e959942a4b6e39bd Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 12 Nov 2012 09:45:28 +0000 Subject: [PATCH 0129/2699] Add feature to enabled forced reformatting of OSD devices. --- ceph-proxy/config.yaml | 9 +++++++++ ceph-proxy/hooks/hooks.py | 10 +++++++++- ceph-proxy/revision | 2 +- 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index d08845b6..ffc45a45 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -39,6 +39,15 @@ options: . These devices are the range of devices that will be checked for and used across all service units. + osd-reformat: + type: string + description: | + By default, the charm will not re-format a device that already looks + as if it might be an OSD device. This is a safeguard to try to + prevent data loss. + . + Specifying this option (any value) forces a reformat of any OSD devices + found which are not already mounted. ephemeral-unmount: type: string description: | diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 44bdc3a0..b364b686 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -121,6 +121,13 @@ def bootstrap_monitor_cluster(): os.unlink(keyring) +def reformat_osd(): + if utils.config_get('osd-reformat') != "": + return True + else: + return False + + def osdize(dev): if not os.path.exists(dev): utils.juju_log('INFO', @@ -131,7 +138,8 @@ def osdize(dev): if e_mountpoint != "": subprocess.call(['umount', e_mountpoint]) - if ceph.is_osd_disk(dev): + if (ceph.is_osd_disk(dev) and not + reformat_osd()): utils.juju_log('INFO', 'Looks like {} is already an OSD, skipping.' .format(dev)) diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 8cf5c1a2..84df3526 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -86 +87 From 38d485b5dd256e2217d1a986fd5209d591ec69cf Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 12 Nov 2012 09:45:28 +0000 Subject: [PATCH 0130/2699] Add feature to enabled forced reformatting of OSD devices. --- ceph-mon/config.yaml | 9 +++++++++ ceph-mon/hooks/hooks.py | 10 +++++++++- ceph-mon/revision | 2 +- 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index d08845b6..ffc45a45 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -39,6 +39,15 @@ options: . These devices are the range of devices that will be checked for and used across all service units. + osd-reformat: + type: string + description: | + By default, the charm will not re-format a device that already looks + as if it might be an OSD device. This is a safeguard to try to + prevent data loss. + . + Specifying this option (any value) forces a reformat of any OSD devices + found which are not already mounted. ephemeral-unmount: type: string description: | diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 44bdc3a0..b364b686 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -121,6 +121,13 @@ def bootstrap_monitor_cluster(): os.unlink(keyring) +def reformat_osd(): + if utils.config_get('osd-reformat') != "": + return True + else: + return False + + def osdize(dev): if not os.path.exists(dev): utils.juju_log('INFO', @@ -131,7 +138,8 @@ def osdize(dev): if e_mountpoint != "": subprocess.call(['umount', e_mountpoint]) - if ceph.is_osd_disk(dev): + if (ceph.is_osd_disk(dev) and not + reformat_osd()): utils.juju_log('INFO', 'Looks like {} is already an OSD, skipping.' .format(dev)) diff --git a/ceph-mon/revision b/ceph-mon/revision index 8cf5c1a2..84df3526 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -86 +87 From 73737c5e70427e79cc789a7840d585f11b3b6af5 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 12 Nov 2012 09:46:54 +0000 Subject: [PATCH 0131/2699] Add support for using Ubuntu cloud archive. --- ceph-radosgw/config.yaml | 2 +- ceph-radosgw/hooks/utils.py | 34 ++++++++++++++++++++++++---------- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 5d9b4c40..96c0aeb1 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -5,7 +5,7 @@ options: Optional configuration to support use of additional sources such as: . - ppa:myteam/ppa - - cloud:folsom-proposed + - cloud:precise-proposed/folsom - http://my.archive.com/ubuntu main . The last option should be used in conjunction with the key configuration diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index c6556dc7..92fe6285 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -51,24 +51,38 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): return template.render(context) +CLOUD_ARCHIVE = \ +""" # Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" + + def configure_source(): - source = config_get('source') - if (source.startswith('ppa:') or - source.startswith('cloud:') or - source.startswith('http:')): + source = str(config_get('source')) + if not source: + return + if source.startswith('ppa:'): cmd = [ 'add-apt-repository', source ] subprocess.check_call(cmd) + if source.startswith('cloud:'): + install('ubuntu-cloud-keyring') + pocket = source.split(':')[1] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(pocket)) if source.startswith('http:'): + with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: + apt.write("deb " + source + "\n") key = config_get('key') - cmd = [ - 'apt-key', - 'import', - key - ] - subprocess.check_call(cmd) + if key: + cmd = [ + 'apt-key', + 'adv', '--keyserver keyserver.ubuntu.com', + '--recv-keys', key + ] + subprocess.check_call(cmd) cmd = [ 'apt-get', 'update' From d7c68d5893f4fed0ccb80aef5885d8367f9600a4 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 12 Nov 2012 10:00:27 +0000 Subject: [PATCH 0132/2699] General tidy of filesystem handling and ephemeral mount stealing. --- ceph-proxy/hooks/hooks.py | 19 ++++++++++++++----- ceph-proxy/revision | 2 +- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index b364b686..5b2117c3 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -60,6 +60,11 @@ def config_changed(): emit_cephconf() + e_mountpoint = utils.config_get('ephemeral-unmount') + if (e_mountpoint != "" and + filesystem_mounted(e_mountpoint)): + subprocess.call(['umount', e_mountpoint]) + for dev in utils.config_get('osd-devices').split(' '): osdize(dev) @@ -134,10 +139,6 @@ def osdize(dev): 'Path {} does not exist - bailing'.format(dev)) return - e_mountpoint = utils.config_get('ephemeral-unmount') - if e_mountpoint != "": - subprocess.call(['umount', e_mountpoint]) - if (ceph.is_osd_disk(dev) and not reformat_osd()): utils.juju_log('INFO', @@ -145,7 +146,7 @@ def osdize(dev): .format(dev)) return - if subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0: + if device_mounted(dev): utils.juju_log('INFO', 'Looks like {} is in use, skipping.'.format(dev)) return @@ -153,6 +154,14 @@ def osdize(dev): subprocess.call(['ceph-disk-prepare', dev]) +def device_mounted(dev): + return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 + + +def filesystem_mounted(fs): + return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 + + def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') emit_cephconf() diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 84df3526..d22307c4 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -87 +88 From a829f9574799039b8a4e0d9f76c0a6c37dce8dbc Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 12 Nov 2012 10:00:27 +0000 Subject: [PATCH 0133/2699] General tidy of filesystem handling and ephemeral mount stealing. --- ceph-mon/hooks/hooks.py | 19 ++++++++++++++----- ceph-mon/revision | 2 +- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index b364b686..5b2117c3 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -60,6 +60,11 @@ def config_changed(): emit_cephconf() + e_mountpoint = utils.config_get('ephemeral-unmount') + if (e_mountpoint != "" and + filesystem_mounted(e_mountpoint)): + subprocess.call(['umount', e_mountpoint]) + for dev in utils.config_get('osd-devices').split(' '): osdize(dev) @@ -134,10 +139,6 @@ def osdize(dev): 'Path {} does not exist - bailing'.format(dev)) return - e_mountpoint = utils.config_get('ephemeral-unmount') - if e_mountpoint != "": - subprocess.call(['umount', e_mountpoint]) - if (ceph.is_osd_disk(dev) and not reformat_osd()): utils.juju_log('INFO', @@ -145,7 +146,7 @@ def osdize(dev): .format(dev)) return - if subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0: + if device_mounted(dev): utils.juju_log('INFO', 'Looks like {} is in use, skipping.'.format(dev)) return @@ -153,6 +154,14 @@ def osdize(dev): subprocess.call(['ceph-disk-prepare', dev]) +def device_mounted(dev): + return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 + + +def filesystem_mounted(fs): + return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 + + def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') emit_cephconf() diff --git a/ceph-mon/revision b/ceph-mon/revision index 84df3526..d22307c4 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -87 +88 From e3dece38f67ff956659643eb1680be1f8c7340fc Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 12 Nov 2012 10:00:51 +0000 Subject: [PATCH 0134/2699] General tidy of filesystem handling and ephemeral mountpoint stealing. --- ceph-osd/hooks/hooks.py | 19 ++++++++++++++----- ceph-osd/revision | 2 +- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index a66c37fb..a8d9d899 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -47,6 +47,11 @@ def emit_cephconf(): def config_changed(): utils.juju_log('INFO', 'Begin config-changed hook.') + e_mountpoint = utils.config_get('ephemeral-unmount') + if (e_mountpoint != "" and + filesystem_mounted(e_mountpoint)): + subprocess.call(['umount', e_mountpoint]) + if ceph.is_bootstrapped(): utils.juju_log('INFO', 'ceph bootstrapped, rescanning disks') emit_cephconf() @@ -102,10 +107,6 @@ def osdize(dev): 'Path {} does not exist - bailing'.format(dev)) return - e_mountpoint = utils.config_get('ephemeral-unmount') - if e_mountpoint != "": - subprocess.call(['umount', e_mountpoint]) - if (ceph.is_osd_disk(dev) and not reformat_osd()): utils.juju_log('INFO', @@ -113,7 +114,7 @@ def osdize(dev): .format(dev)) return - if subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0: + if device_mounted(dev): utils.juju_log('INFO', 'Looks like {} is in use, skipping.'.format(dev)) return @@ -121,6 +122,14 @@ def osdize(dev): subprocess.call(['ceph-disk-prepare', dev]) +def device_mounted(dev): + return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 + + +def filesystem_mounted(fs): + return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 + + def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') diff --git a/ceph-osd/revision b/ceph-osd/revision index 1e8b3149..7f8f011e 100644 --- a/ceph-osd/revision +++ b/ceph-osd/revision @@ -1 +1 @@ -6 +7 From 2b4031531e51f7ff7b54bff71db19134cbd74010 Mon Sep 17 00:00:00 2001 From: "Juan L. Negron" Date: Wed, 21 Nov 2012 17:09:33 -0800 Subject: [PATCH 0135/2699] Updating revision. --- ceph-radosgw/revision | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/revision b/ceph-radosgw/revision index 8351c193..60d3b2f4 100644 --- a/ceph-radosgw/revision +++ b/ceph-radosgw/revision @@ -1 +1 @@ -14 +15 From bba11ee4e275bdeeb322fecdc541eaf309b24a7b Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 22 Nov 2012 10:47:03 +0000 Subject: [PATCH 0136/2699] Updated default source to cloud-archive for precise charm branch --- ceph-proxy/config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index ffc45a45..c2331798 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -59,6 +59,7 @@ options: testing purposes (cloud deployment is not a typical use case). source: type: string + default: cloud:precise-updates/folsom description: | Optional configuration to support use of additional sources such as: . From a1251ca3682b2481a2127ee5cdd2643ef0e4be07 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 22 Nov 2012 10:47:03 +0000 Subject: [PATCH 0137/2699] Updated default source to cloud-archive for precise charm branch --- ceph-mon/config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index ffc45a45..c2331798 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -59,6 +59,7 @@ options: testing purposes (cloud deployment is not a typical use case). source: type: string + default: cloud:precise-updates/folsom description: | Optional configuration to support use of additional sources such as: . From 7583ebd8fe6c9a201cae0a9b64178aa9e5ee4843 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 22 Nov 2012 10:47:27 +0000 Subject: [PATCH 0138/2699] Updated default source to cloud-archive for precise charm branch --- ceph-osd/config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index beb67a27..e8f9ad7f 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -27,6 +27,7 @@ options: testing purposes (cloud deployment is not a typical use case). source: type: string + default: cloud:precise-updates/folsom description: | Optional configuration to support use of additional sources such as: . From f7aceb3715b3c21cf51ebb22d044ec0623ebaadb Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 22 Nov 2012 10:47:49 +0000 Subject: [PATCH 0139/2699] Updated default source to cloud-archive for precise charm branch --- ceph-radosgw/config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 96c0aeb1..26c89856 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -1,6 +1,7 @@ options: source: type: string + default: cloud:precise-updates/folsom description: | Optional configuration to support use of additional sources such as: . From 4b262dc093b4fd5759260dc977617735ad437d72 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 17 Dec 2012 10:22:51 +0000 Subject: [PATCH 0140/2699] Updates for latest ceph upstream and utils refactor --- ceph-proxy/{README => README.md} | 0 ceph-proxy/config.yaml | 19 +++++++++++++++++- ceph-proxy/hooks/ceph.py | 15 +++++++++++++++ ceph-proxy/hooks/hooks.py | 33 ++++++++++++++++++++++++++------ ceph-proxy/hooks/utils.py | 18 ++++++++++++++--- ceph-proxy/revision | 2 +- ceph-proxy/templates/ceph.conf | 6 ++++++ 7 files changed, 82 insertions(+), 11 deletions(-) rename ceph-proxy/{README => README.md} (100%) diff --git a/ceph-proxy/README b/ceph-proxy/README.md similarity index 100% rename from ceph-proxy/README rename to ceph-proxy/README.md diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index c2331798..baa947c5 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -39,6 +39,23 @@ options: . These devices are the range of devices that will be checked for and used across all service units. + osd-journal: + type: string + description: | + The device to use as a shared journal drive for all OSD's. By default + no journal device will be used. + . + Only supported with ceph >= 0.55. + osd-format: + type: string + default: xfs + description: | + Format of filesystem to use for OSD devices; supported formats include: + . + xfs (Default >= 0.55) + ext4 (Only option < 0.55) + . + Only supported with ceph >= 0.55. osd-reformat: type: string description: | @@ -47,7 +64,7 @@ options: prevent data loss. . Specifying this option (any value) forces a reformat of any OSD devices - found which are not already mounted. + found which are not already mounted. ephemeral-unmount: type: string description: | diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index a7c720f5..90a22a2d 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -12,6 +12,7 @@ import time import utils import os +import apt_pkg as apt LEADER = 'leader' PEON = 'peon' @@ -207,3 +208,17 @@ def get_named_key(name, caps=None): if 'key' in element: key = element.split(' = ')[1].strip() # IGNORE:E1103 return key + + +def get_ceph_version(): + apt.init() + cache = apt.Cache() + pkg = cache['ceph'] + if pkg.current_ver: + return apt.upstream_version(pkg.current_ver.ver_str) + else: + return None + + +def version_compare(a, b): + return apt.version_compare(a, b) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 5b2117c3..b0a16b94 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -19,8 +19,11 @@ def install_upstart_scripts(): - for x in glob.glob('files/upstart/*.conf'): - shutil.copy(x, '/etc/init/') + # Only install upstart configurations for older versions + if ceph.version_compare(ceph.get_ceph_version(), + "0.55.1") < 0: + for x in glob.glob('files/upstart/*.conf'): + shutil.copy(x, '/etc/init/') def install(): @@ -36,6 +39,7 @@ def emit_cephconf(): 'auth_supported': utils.config_get('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': utils.config_get('fsid'), + 'version': ceph.get_ceph_version() } with open('/etc/ceph/ceph.conf', 'w') as cephconf: @@ -53,7 +57,7 @@ def config_changed(): sys.exit(1) monitor_secret = utils.config_get('monitor-secret') - if monitor_secret == '': + if not monitor_secret: utils.juju_log('CRITICAL', 'No monitor-secret supplied, cannot proceed.') sys.exit(1) @@ -61,7 +65,7 @@ def config_changed(): emit_cephconf() e_mountpoint = utils.config_get('ephemeral-unmount') - if (e_mountpoint != "" and + if (e_mountpoint and filesystem_mounted(e_mountpoint)): subprocess.call(['umount', e_mountpoint]) @@ -127,7 +131,7 @@ def bootstrap_monitor_cluster(): def reformat_osd(): - if utils.config_get('osd-reformat') != "": + if utils.config_get('osd-reformat'): return True else: return False @@ -151,7 +155,24 @@ def osdize(dev): 'Looks like {} is in use, skipping.'.format(dev)) return - subprocess.call(['ceph-disk-prepare', dev]) + cmd = ['ceph-disk-prepare'] + # Later versions of ceph support more options + if ceph.version_compare(ceph.get_ceph_version(), + "0.55") >= 0: + osd_format = utils.config_get('osd-format') + if osd_format: + cmd.append('--fs-type') + cmd.append(osd_format) + cmd.append(dev) + osd_journal = utils.config_get('osd-journal') + if (osd_journal and + os.path.exists(osd_journal)): + cmd.append(osd_journal) + else: + # Just provide the device - no other options + # for older versions of ceph + cmd.append(dev) + subprocess.call(cmd) def device_mounted(dev): diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 5ae96bc0..65d18fc2 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -136,7 +136,11 @@ def relation_get(attribute, unit=None, rid=None): cmd.append(attribute) if unit: cmd.append(unit) - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + value = str(subprocess.check_output(cmd)).strip() + if value == "": + return None + else: + return value def relation_set(**kwargs): @@ -159,7 +163,11 @@ def unit_get(attribute): 'unit-get', attribute ] - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + value = str(subprocess.check_output(cmd)).strip() + if value == "": + return None + else: + return value def config_get(attribute): @@ -167,7 +175,11 @@ def config_get(attribute): 'config-get', attribute ] - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + value = str(subprocess.check_output(cmd)).strip() + if value == "": + return None + else: + return value def get_unit_hostname(): diff --git a/ceph-proxy/revision b/ceph-proxy/revision index d22307c4..7fe4e495 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -88 +91 diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index 2af77d0a..887ba82f 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -1,5 +1,11 @@ [global] +{% if version < "0.51" %} auth supported = {{ auth_supported }} +{% else %} + auth cluster required = {{ auth_supported }} + auth service required = {{ auth_supported }} + auth client required = {{ auth_supported }} +{% endif %} keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} From b70c8359644a478755ce303ea3fc8e7e2de44749 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 17 Dec 2012 10:22:51 +0000 Subject: [PATCH 0141/2699] Updates for latest ceph upstream and utils refactor --- ceph-mon/{README => README.md} | 0 ceph-mon/config.yaml | 19 ++++++++++++++++++- ceph-mon/hooks/ceph.py | 15 +++++++++++++++ ceph-mon/hooks/hooks.py | 33 +++++++++++++++++++++++++++------ ceph-mon/hooks/utils.py | 18 +++++++++++++++--- ceph-mon/revision | 2 +- ceph-mon/templates/ceph.conf | 6 ++++++ 7 files changed, 82 insertions(+), 11 deletions(-) rename ceph-mon/{README => README.md} (100%) diff --git a/ceph-mon/README b/ceph-mon/README.md similarity index 100% rename from ceph-mon/README rename to ceph-mon/README.md diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index c2331798..baa947c5 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -39,6 +39,23 @@ options: . These devices are the range of devices that will be checked for and used across all service units. + osd-journal: + type: string + description: | + The device to use as a shared journal drive for all OSD's. By default + no journal device will be used. + . + Only supported with ceph >= 0.55. + osd-format: + type: string + default: xfs + description: | + Format of filesystem to use for OSD devices; supported formats include: + . + xfs (Default >= 0.55) + ext4 (Only option < 0.55) + . + Only supported with ceph >= 0.55. osd-reformat: type: string description: | @@ -47,7 +64,7 @@ options: prevent data loss. . Specifying this option (any value) forces a reformat of any OSD devices - found which are not already mounted. + found which are not already mounted. ephemeral-unmount: type: string description: | diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index a7c720f5..90a22a2d 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -12,6 +12,7 @@ import time import utils import os +import apt_pkg as apt LEADER = 'leader' PEON = 'peon' @@ -207,3 +208,17 @@ def get_named_key(name, caps=None): if 'key' in element: key = element.split(' = ')[1].strip() # IGNORE:E1103 return key + + +def get_ceph_version(): + apt.init() + cache = apt.Cache() + pkg = cache['ceph'] + if pkg.current_ver: + return apt.upstream_version(pkg.current_ver.ver_str) + else: + return None + + +def version_compare(a, b): + return apt.version_compare(a, b) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 5b2117c3..b0a16b94 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -19,8 +19,11 @@ def install_upstart_scripts(): - for x in glob.glob('files/upstart/*.conf'): - shutil.copy(x, '/etc/init/') + # Only install upstart configurations for older versions + if ceph.version_compare(ceph.get_ceph_version(), + "0.55.1") < 0: + for x in glob.glob('files/upstart/*.conf'): + shutil.copy(x, '/etc/init/') def install(): @@ -36,6 +39,7 @@ def emit_cephconf(): 'auth_supported': utils.config_get('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': utils.config_get('fsid'), + 'version': ceph.get_ceph_version() } with open('/etc/ceph/ceph.conf', 'w') as cephconf: @@ -53,7 +57,7 @@ def config_changed(): sys.exit(1) monitor_secret = utils.config_get('monitor-secret') - if monitor_secret == '': + if not monitor_secret: utils.juju_log('CRITICAL', 'No monitor-secret supplied, cannot proceed.') sys.exit(1) @@ -61,7 +65,7 @@ def config_changed(): emit_cephconf() e_mountpoint = utils.config_get('ephemeral-unmount') - if (e_mountpoint != "" and + if (e_mountpoint and filesystem_mounted(e_mountpoint)): subprocess.call(['umount', e_mountpoint]) @@ -127,7 +131,7 @@ def bootstrap_monitor_cluster(): def reformat_osd(): - if utils.config_get('osd-reformat') != "": + if utils.config_get('osd-reformat'): return True else: return False @@ -151,7 +155,24 @@ def osdize(dev): 'Looks like {} is in use, skipping.'.format(dev)) return - subprocess.call(['ceph-disk-prepare', dev]) + cmd = ['ceph-disk-prepare'] + # Later versions of ceph support more options + if ceph.version_compare(ceph.get_ceph_version(), + "0.55") >= 0: + osd_format = utils.config_get('osd-format') + if osd_format: + cmd.append('--fs-type') + cmd.append(osd_format) + cmd.append(dev) + osd_journal = utils.config_get('osd-journal') + if (osd_journal and + os.path.exists(osd_journal)): + cmd.append(osd_journal) + else: + # Just provide the device - no other options + # for older versions of ceph + cmd.append(dev) + subprocess.call(cmd) def device_mounted(dev): diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 5ae96bc0..65d18fc2 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -136,7 +136,11 @@ def relation_get(attribute, unit=None, rid=None): cmd.append(attribute) if unit: cmd.append(unit) - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + value = str(subprocess.check_output(cmd)).strip() + if value == "": + return None + else: + return value def relation_set(**kwargs): @@ -159,7 +163,11 @@ def unit_get(attribute): 'unit-get', attribute ] - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + value = str(subprocess.check_output(cmd)).strip() + if value == "": + return None + else: + return value def config_get(attribute): @@ -167,7 +175,11 @@ def config_get(attribute): 'config-get', attribute ] - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + value = str(subprocess.check_output(cmd)).strip() + if value == "": + return None + else: + return value def get_unit_hostname(): diff --git a/ceph-mon/revision b/ceph-mon/revision index d22307c4..7fe4e495 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -88 +91 diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 2af77d0a..887ba82f 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -1,5 +1,11 @@ [global] +{% if version < "0.51" %} auth supported = {{ auth_supported }} +{% else %} + auth cluster required = {{ auth_supported }} + auth service required = {{ auth_supported }} + auth client required = {{ auth_supported }} +{% endif %} keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} From 0e2149cbb0d2e02e02dc189d14e27d17da64d22c Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 17 Dec 2012 10:31:03 +0000 Subject: [PATCH 0142/2699] Resync with ceph charm, updates for raring --- ceph-osd/{README => README.md} | 0 ceph-osd/config.yaml | 17 ++++++ ceph-osd/hooks/ceph.py | 108 ++++++++++++++++++++++++++++++--- ceph-osd/hooks/hooks.py | 44 +++++++++----- ceph-osd/hooks/utils.py | 18 +++++- ceph-osd/templates/ceph.conf | 6 ++ 6 files changed, 166 insertions(+), 27 deletions(-) rename ceph-osd/{README => README.md} (100%) diff --git a/ceph-osd/README b/ceph-osd/README.md similarity index 100% rename from ceph-osd/README rename to ceph-osd/README.md diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index e8f9ad7f..497eb8f4 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -7,6 +7,23 @@ options: . These devices are the range of devices that will be checked for and used across all service units. + osd-journal: + type: string + description: | + The device to use as a shared journal drive for all OSD's. By default + no journal device will be used. + . + Only supported with ceph >= 0.55. + osd-format: + type: string + default: xfs + description: | + Format of filesystem to use for OSD devices; supported formats include: + . + xfs (Default >= 0.55) + ext4 (Only option < 0.55) + . + Only supported with ceph >= 0.55. osd-reformat: type: string description: | diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 3e466c50..90a22a2d 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -12,8 +12,11 @@ import time import utils import os +import apt_pkg as apt -QUORUM = ['leader', 'peon'] +LEADER = 'leader' +PEON = 'peon' +QUORUM = [LEADER, PEON] def is_quorum(): @@ -40,6 +43,30 @@ def is_quorum(): return False +def is_leader(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + cmd = [ + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] == LEADER: + return True + else: + return False + else: + return False + + def wait_for_quorum(): while not is_quorum(): time.sleep(3) @@ -81,6 +108,7 @@ def rescan_osd_devices(): subprocess.call(cmd) + _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" @@ -88,6 +116,11 @@ def is_bootstrapped(): return os.path.exists(_bootstrap_keyring) +def wait_for_bootstrap(): + while (not is_bootstrapped()): + time.sleep(3) + + def import_osd_bootstrap_key(key): if not os.path.exists(_bootstrap_keyring): cmd = [ @@ -100,15 +133,53 @@ def import_osd_bootstrap_key(key): subprocess.check_call(cmd) # OSD caps taken from ceph-create-keys -_osd_bootstrap_caps = [ - 'allow command osd create ...', - 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', - 'allow command mon getmap' - ] +_osd_bootstrap_caps = { + 'mon': [ + 'allow command osd create ...', + 'allow command osd crush set ...', + r'allow command auth add * osd allow\ * mon allow\ rwx', + 'allow command mon getmap' + ] + } def get_osd_bootstrap_key(): + return get_named_key('bootstrap-osd', _osd_bootstrap_caps) + + +_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" + + +def import_radosgw_key(key): + if not os.path.exists(_radosgw_keyring): + cmd = [ + 'ceph-authtool', + _radosgw_keyring, + '--create-keyring', + '--name=client.radosgw.gateway', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + +# OSD caps taken from ceph-create-keys +_radosgw_caps = { + 'mon': ['allow r'], + 'osd': ['allow rwx'] + } + + +def get_radosgw_key(): + return get_named_key('radosgw.gateway', _radosgw_caps) + + +_default_caps = { + 'mon': ['allow r'], + 'osd': ['allow rwx'] + } + + +def get_named_key(name, caps=None): + caps = caps or _default_caps cmd = [ 'ceph', '--name', 'mon.', @@ -116,9 +187,14 @@ def get_osd_bootstrap_key(): '/var/lib/ceph/mon/ceph-{}/keyring'.format( utils.get_unit_hostname() ), - 'auth', 'get-or-create', 'client.bootstrap-osd', - 'mon', '; '.join(_osd_bootstrap_caps) + 'auth', 'get-or-create', 'client.{}'.format(name), ] + # Add capabilities + for subsystem, subcaps in caps.iteritems(): + cmd.extend([ + subsystem, + '; '.join(subcaps), + ]) output = subprocess.check_output(cmd).strip() # IGNORE:E1103 # get-or-create appears to have different output depending # on whether its 'get' or 'create' @@ -132,3 +208,17 @@ def get_osd_bootstrap_key(): if 'key' in element: key = element.split(' = ')[1].strip() # IGNORE:E1103 return key + + +def get_ceph_version(): + apt.init() + cache = apt.Cache() + pkg = cache['ceph'] + if pkg.current_ver: + return apt.upstream_version(pkg.current_ver.ver_str) + else: + return None + + +def version_compare(a, b): + return apt.version_compare(a, b) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index a8d9d899..91fb2703 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -18,8 +18,11 @@ def install_upstart_scripts(): - for x in glob.glob('files/upstart/*.conf'): - shutil.copy(x, '/etc/init/') + # Only install upstart configurations for older versions + if ceph.version_compare(ceph.get_ceph_version(), + "0.55.1") < 0: + for x in glob.glob('files/upstart/*.conf'): + shutil.copy(x, '/etc/init/') def install(): @@ -37,7 +40,8 @@ def emit_cephconf(): cephcontext = { 'auth_supported': get_auth(), 'mon_hosts': ' '.join(mon_hosts), - 'fsid': get_fsid() + 'fsid': get_fsid(), + 'version': ceph.get_ceph_version() } with open('/etc/ceph/ceph.conf', 'w') as cephconf: @@ -48,7 +52,7 @@ def config_changed(): utils.juju_log('INFO', 'Begin config-changed hook.') e_mountpoint = utils.config_get('ephemeral-unmount') - if (e_mountpoint != "" and + if (e_mountpoint and filesystem_mounted(e_mountpoint)): subprocess.call(['umount', e_mountpoint]) @@ -89,13 +93,13 @@ def get_conf(name): for unit in utils.relation_list(relid): conf = utils.relation_get(name, unit, relid) - if conf != "": + if conf: return conf return None def reformat_osd(): - if utils.config_get('osd-reformat') != "": + if utils.config_get('osd-reformat'): return True else: return False @@ -119,7 +123,24 @@ def osdize(dev): 'Looks like {} is in use, skipping.'.format(dev)) return - subprocess.call(['ceph-disk-prepare', dev]) + cmd = ['ceph-disk-prepare'] + # Later versions of ceph support more options + if ceph.version_compare(ceph.get_ceph_version(), + "0.55") >= 0: + osd_format = utils.config_get('osd-format') + if osd_format: + cmd.append('--fs-type') + cmd.append(osd_format) + cmd.append(dev) + osd_journal = utils.config_get('osd-journal') + if (osd_journal and + os.path.exists(osd_journal)): + cmd.append(osd_journal) + else: + # Just provide the device - no other options + # for older versions of ceph + cmd.append(dev) + subprocess.call(cmd) def device_mounted(dev): @@ -136,7 +157,7 @@ def mon_relation(): bootstrap_key = utils.relation_get('osd_bootstrap_key') if (get_fsid() and get_auth() and - bootstrap_key != ""): + bootstrap_key): utils.juju_log('INFO', 'mon has provided conf- scanning disks') emit_cephconf() ceph.import_osd_bootstrap_key(bootstrap_key) @@ -159,18 +180,11 @@ def upgrade_charm(): utils.juju_log('INFO', 'End upgrade-charm hook.') -def start(): - # In case we're being redeployed to the same machines, try - # to make sure everything is running as soon as possible. - ceph.rescan_osd_devices() - - utils.do_hooks({ 'config-changed': config_changed, 'install': install, 'mon-relation-departed': mon_relation, 'mon-relation-changed': mon_relation, - 'start': start, 'upgrade-charm': upgrade_charm, }) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 5ae96bc0..65d18fc2 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -136,7 +136,11 @@ def relation_get(attribute, unit=None, rid=None): cmd.append(attribute) if unit: cmd.append(unit) - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + value = str(subprocess.check_output(cmd)).strip() + if value == "": + return None + else: + return value def relation_set(**kwargs): @@ -159,7 +163,11 @@ def unit_get(attribute): 'unit-get', attribute ] - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + value = str(subprocess.check_output(cmd)).strip() + if value == "": + return None + else: + return value def config_get(attribute): @@ -167,7 +175,11 @@ def config_get(attribute): 'config-get', attribute ] - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + value = str(subprocess.check_output(cmd)).strip() + if value == "": + return None + else: + return value def get_unit_hostname(): diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 2af77d0a..887ba82f 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -1,5 +1,11 @@ [global] +{% if version < "0.51" %} auth supported = {{ auth_supported }} +{% else %} + auth cluster required = {{ auth_supported }} + auth service required = {{ auth_supported }} + auth client required = {{ auth_supported }} +{% endif %} keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} From c8d1d4d264e7f6144d29d0c628b8594129bb4316 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 17 Dec 2012 17:37:31 +0000 Subject: [PATCH 0143/2699] Zap journal once --- ceph-proxy/hooks/ceph.py | 5 +++++ ceph-proxy/hooks/hooks.py | 10 ++++++++++ 2 files changed, 15 insertions(+) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 90a22a2d..b3b67f46 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -109,6 +109,11 @@ def rescan_osd_devices(): subprocess.call(cmd) +def zap_disk(dev): + cmd = ['sgdisk', '--zap-all', dev] + subprocess.check_call(cmd) + + _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index b0a16b94..ec1453b0 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -45,6 +45,8 @@ def emit_cephconf(): with open('/etc/ceph/ceph.conf', 'w') as cephconf: cephconf.write(utils.render_template('ceph.conf', cephcontext)) +JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' + def config_changed(): utils.juju_log('INFO', 'Begin config-changed hook.') @@ -69,6 +71,14 @@ def config_changed(): filesystem_mounted(e_mountpoint)): subprocess.call(['umount', e_mountpoint]) + osd_journal = utils.config_get('osd-journal') + if (osd_journal and + not os.path.exists(JOURNAL_ZAPPED) and + os.path.exists(osd_journal)): + ceph.zap_disk(osd_journal) + with open(JOURNAL_ZAPPED, 'w') as zapped: + zapped.write('DONE') + for dev in utils.config_get('osd-devices').split(' '): osdize(dev) From a31230a742a4e249dd22eab0eb93bd88114a9c1f Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 17 Dec 2012 17:37:31 +0000 Subject: [PATCH 0144/2699] Zap journal once --- ceph-mon/hooks/ceph.py | 5 +++++ ceph-mon/hooks/hooks.py | 10 ++++++++++ 2 files changed, 15 insertions(+) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 90a22a2d..b3b67f46 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -109,6 +109,11 @@ def rescan_osd_devices(): subprocess.call(cmd) +def zap_disk(dev): + cmd = ['sgdisk', '--zap-all', dev] + subprocess.check_call(cmd) + + _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index b0a16b94..ec1453b0 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -45,6 +45,8 @@ def emit_cephconf(): with open('/etc/ceph/ceph.conf', 'w') as cephconf: cephconf.write(utils.render_template('ceph.conf', cephcontext)) +JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' + def config_changed(): utils.juju_log('INFO', 'Begin config-changed hook.') @@ -69,6 +71,14 @@ def config_changed(): filesystem_mounted(e_mountpoint)): subprocess.call(['umount', e_mountpoint]) + osd_journal = utils.config_get('osd-journal') + if (osd_journal and + not os.path.exists(JOURNAL_ZAPPED) and + os.path.exists(osd_journal)): + ceph.zap_disk(osd_journal) + with open(JOURNAL_ZAPPED, 'w') as zapped: + zapped.write('DONE') + for dev in utils.config_get('osd-devices').split(' '): osdize(dev) From 5c3707d7a1d18ee55a24640200fa7d03c2c012bf Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 17 Dec 2012 17:40:05 +0000 Subject: [PATCH 0145/2699] Zap journal device once only --- ceph-osd/hooks/ceph.py | 5 +++++ ceph-osd/hooks/hooks.py | 10 ++++++++++ 2 files changed, 15 insertions(+) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 90a22a2d..b3b67f46 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -109,6 +109,11 @@ def rescan_osd_devices(): subprocess.call(cmd) +def zap_disk(dev): + cmd = ['sgdisk', '--zap-all', dev] + subprocess.check_call(cmd) + + _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 91fb2703..783d7816 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -47,6 +47,8 @@ def emit_cephconf(): with open('/etc/ceph/ceph.conf', 'w') as cephconf: cephconf.write(utils.render_template('ceph.conf', cephcontext)) +JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' + def config_changed(): utils.juju_log('INFO', 'Begin config-changed hook.') @@ -56,6 +58,14 @@ def config_changed(): filesystem_mounted(e_mountpoint)): subprocess.call(['umount', e_mountpoint]) + osd_journal = utils.config_get('osd-journal') + if (osd_journal and + not os.path.exists(JOURNAL_ZAPPED) and + os.path.exists(osd_journal)): + ceph.zap_disk(osd_journal) + with open(JOURNAL_ZAPPED, 'w') as zapped: + zapped.write('DONE') + if ceph.is_bootstrapped(): utils.juju_log('INFO', 'ceph bootstrapped, rescanning disks') emit_cephconf() From 65996f09c7018616ebc35f0550ee30ae986999ae Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 18 Dec 2012 10:25:38 +0000 Subject: [PATCH 0146/2699] Add support for BTRFS --- ceph-proxy/config.yaml | 1 + ceph-proxy/hooks/ceph.py | 6 ++++++ ceph-proxy/hooks/hooks.py | 14 ++++++++------ 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index baa947c5..7b2f56db 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -54,6 +54,7 @@ options: . xfs (Default >= 0.55) ext4 (Only option < 0.55) + btrfs (experimental and not recommended) . Only supported with ceph >= 0.55. osd-reformat: diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index b3b67f46..6502b183 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -85,6 +85,12 @@ def add_bootstrap_hint(peer): # Ignore any errors for this call subprocess.call(cmd) +DISK_FORMATS = [ + 'xfs', + 'ext4', + 'btrfs' + ] + def is_osd_disk(dev): try: diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index ec1453b0..a7609871 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -29,7 +29,7 @@ def install_upstart_scripts(): def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() - utils.install('ceph', 'gdisk', 'ntp') + utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools') install_upstart_scripts() utils.juju_log('INFO', 'End install hook.') @@ -53,16 +53,18 @@ def config_changed(): utils.juju_log('INFO', 'Monitor hosts are ' + repr(get_mon_hosts())) - fsid = utils.config_get('fsid') - if fsid == '': + # Pre-flight checks + if not utils.config_get('fsid'): utils.juju_log('CRITICAL', 'No fsid supplied, cannot proceed.') sys.exit(1) - - monitor_secret = utils.config_get('monitor-secret') - if not monitor_secret: + if not utils.config_get('monitor-secret'): utils.juju_log('CRITICAL', 'No monitor-secret supplied, cannot proceed.') sys.exit(1) + if utils.config_get('osd-format') not in ceph.DISK_FORMATS: + utils.juju_log('CRITICAL', + 'Invalid OSD disk format configuration specified') + sys.exit(1) emit_cephconf() From 06877378b90a316c3ff893bc5034cee280c51108 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 18 Dec 2012 10:25:38 +0000 Subject: [PATCH 0147/2699] Add support for BTRFS --- ceph-mon/config.yaml | 1 + ceph-mon/hooks/ceph.py | 6 ++++++ ceph-mon/hooks/hooks.py | 14 ++++++++------ 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index baa947c5..7b2f56db 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -54,6 +54,7 @@ options: . xfs (Default >= 0.55) ext4 (Only option < 0.55) + btrfs (experimental and not recommended) . Only supported with ceph >= 0.55. osd-reformat: diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index b3b67f46..6502b183 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -85,6 +85,12 @@ def add_bootstrap_hint(peer): # Ignore any errors for this call subprocess.call(cmd) +DISK_FORMATS = [ + 'xfs', + 'ext4', + 'btrfs' + ] + def is_osd_disk(dev): try: diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index ec1453b0..a7609871 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -29,7 +29,7 @@ def install_upstart_scripts(): def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() - utils.install('ceph', 'gdisk', 'ntp') + utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools') install_upstart_scripts() utils.juju_log('INFO', 'End install hook.') @@ -53,16 +53,18 @@ def config_changed(): utils.juju_log('INFO', 'Monitor hosts are ' + repr(get_mon_hosts())) - fsid = utils.config_get('fsid') - if fsid == '': + # Pre-flight checks + if not utils.config_get('fsid'): utils.juju_log('CRITICAL', 'No fsid supplied, cannot proceed.') sys.exit(1) - - monitor_secret = utils.config_get('monitor-secret') - if not monitor_secret: + if not utils.config_get('monitor-secret'): utils.juju_log('CRITICAL', 'No monitor-secret supplied, cannot proceed.') sys.exit(1) + if utils.config_get('osd-format') not in ceph.DISK_FORMATS: + utils.juju_log('CRITICAL', + 'Invalid OSD disk format configuration specified') + sys.exit(1) emit_cephconf() From c127049a6ee6c9c72d7f4eed6ad39ef7c880e53a Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 18 Dec 2012 10:26:09 +0000 Subject: [PATCH 0148/2699] Add support for BTRFS and validate osd-format --- ceph-osd/config.yaml | 1 + ceph-osd/hooks/ceph.py | 6 ++++++ ceph-osd/hooks/hooks.py | 8 +++++++- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 497eb8f4..edcad69a 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -22,6 +22,7 @@ options: . xfs (Default >= 0.55) ext4 (Only option < 0.55) + btrfs (experimental and not recommended) . Only supported with ceph >= 0.55. osd-reformat: diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index b3b67f46..6502b183 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -85,6 +85,12 @@ def add_bootstrap_hint(peer): # Ignore any errors for this call subprocess.call(cmd) +DISK_FORMATS = [ + 'xfs', + 'ext4', + 'btrfs' + ] + def is_osd_disk(dev): try: diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 783d7816..16286924 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -28,7 +28,7 @@ def install_upstart_scripts(): def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() - utils.install('ceph', 'gdisk', 'ntp') + utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools') install_upstart_scripts() utils.juju_log('INFO', 'End install hook.') @@ -53,6 +53,12 @@ def emit_cephconf(): def config_changed(): utils.juju_log('INFO', 'Begin config-changed hook.') + # Pre-flight checks + if utils.config_get('osd-format') not in ceph.DISK_FORMATS: + utils.juju_log('CRITICAL', + 'Invalid OSD disk format configuration specified') + sys.exit(1) + e_mountpoint = utils.config_get('ephemeral-unmount') if (e_mountpoint and filesystem_mounted(e_mountpoint)): From 715abe06a9164a52f0278c8131ae113c90445747 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 11 Jan 2013 08:59:51 +0000 Subject: [PATCH 0149/2699] Update units, mark mon managed by upstart --- ceph-proxy/hooks/hooks.py | 3 +++ ceph-proxy/hooks/utils.py | 17 ++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index a7609871..84d8652c 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -115,6 +115,7 @@ def get_mon_hosts(): def bootstrap_monitor_cluster(): hostname = utils.get_unit_hostname() done = '/var/lib/ceph/mon/ceph-{}/done'.format(hostname) + upstart = '/var/lib/ceph/mon/ceph-{}/upstart'.format(hostname) secret = utils.config_get('monitor-secret') keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) @@ -134,6 +135,8 @@ def bootstrap_monitor_cluster(): with open(done, 'w'): pass + with open(upstart, 'w'): + pass subprocess.check_call(['start', 'ceph-mon-all-starter']) except: diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 65d18fc2..c21d0d4c 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -11,16 +11,19 @@ import subprocess import socket import sys +import re def do_hooks(hooks): hook = os.path.basename(sys.argv[0]) try: - hooks[hook]() + hook_func = hooks[hook] except KeyError: juju_log('INFO', "This charm doesn't know how to handle '{}'.".format(hook)) + else: + hook_func() def install(*pkgs): @@ -88,6 +91,18 @@ def configure_source(): ] subprocess.check_call(cmd) + +def enable_pocket(pocket): + apt_sources = "/etc/apt/sources.list" + with open(apt_sources, "r") as sources: + lines = sources.readlines() + with open(apt_sources, "w") as sources: + for line in lines: + if pocket in line: + sources.write(re.sub('^# deb', 'deb', line)) + else: + sources.write(line) + # Protocols TCP = 'TCP' UDP = 'UDP' From 05d1b5f471d0fe097d4ee7c2b6732a2eced525e9 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 11 Jan 2013 08:59:51 +0000 Subject: [PATCH 0150/2699] Update units, mark mon managed by upstart --- ceph-mon/hooks/hooks.py | 3 +++ ceph-mon/hooks/utils.py | 17 ++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index a7609871..84d8652c 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -115,6 +115,7 @@ def get_mon_hosts(): def bootstrap_monitor_cluster(): hostname = utils.get_unit_hostname() done = '/var/lib/ceph/mon/ceph-{}/done'.format(hostname) + upstart = '/var/lib/ceph/mon/ceph-{}/upstart'.format(hostname) secret = utils.config_get('monitor-secret') keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) @@ -134,6 +135,8 @@ def bootstrap_monitor_cluster(): with open(done, 'w'): pass + with open(upstart, 'w'): + pass subprocess.check_call(['start', 'ceph-mon-all-starter']) except: diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 65d18fc2..c21d0d4c 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -11,16 +11,19 @@ import subprocess import socket import sys +import re def do_hooks(hooks): hook = os.path.basename(sys.argv[0]) try: - hooks[hook]() + hook_func = hooks[hook] except KeyError: juju_log('INFO', "This charm doesn't know how to handle '{}'.".format(hook)) + else: + hook_func() def install(*pkgs): @@ -88,6 +91,18 @@ def configure_source(): ] subprocess.check_call(cmd) + +def enable_pocket(pocket): + apt_sources = "/etc/apt/sources.list" + with open(apt_sources, "r") as sources: + lines = sources.readlines() + with open(apt_sources, "w") as sources: + for line in lines: + if pocket in line: + sources.write(re.sub('^# deb', 'deb', line)) + else: + sources.write(line) + # Protocols TCP = 'TCP' UDP = 'UDP' From 973c4b2faa00cc37ce83c14813de13e6d7c8abb9 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 11 Jan 2013 09:08:56 +0000 Subject: [PATCH 0151/2699] Misc updates for upgrades to 0.55 series --- ceph-proxy/hooks/hooks.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 84d8652c..32f266d3 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -20,8 +20,7 @@ def install_upstart_scripts(): # Only install upstart configurations for older versions - if ceph.version_compare(ceph.get_ceph_version(), - "0.55.1") < 0: + if ceph.get_ceph_version() >= "0.55.1": for x in glob.glob('files/upstart/*.conf'): shutil.copy(x, '/etc/init/') @@ -112,6 +111,18 @@ def get_mon_hosts(): return hosts +def update_monfs(): + hostname = utils.get_unit_hostname() + monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + upstart = '{}/upstart'.format(monfs) + if (os.path.exists(monfs) and + not os.path.exists(upstart)): + # Mark mon as managed by upstart so that + # it gets start correctly on reboots + with open(upstart, 'w'): + pass + + def bootstrap_monitor_cluster(): hostname = utils.get_unit_hostname() done = '/var/lib/ceph/mon/ceph-{}/done'.format(hostname) @@ -172,8 +183,7 @@ def osdize(dev): cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options - if ceph.version_compare(ceph.get_ceph_version(), - "0.55") >= 0: + if ceph.get_ceph_version() >= "0.55": osd_format = utils.config_get('osd-format') if osd_format: cmd.append('--fs-type') @@ -308,6 +318,7 @@ def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() install_upstart_scripts() + update_monfs() utils.juju_log('INFO', 'End upgrade-charm hook.') From e1424f6a8c47b88e09a48d7b1781322caab4bd3c Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 11 Jan 2013 09:08:56 +0000 Subject: [PATCH 0152/2699] Misc updates for upgrades to 0.55 series --- ceph-mon/hooks/hooks.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 84d8652c..32f266d3 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -20,8 +20,7 @@ def install_upstart_scripts(): # Only install upstart configurations for older versions - if ceph.version_compare(ceph.get_ceph_version(), - "0.55.1") < 0: + if ceph.get_ceph_version() >= "0.55.1": for x in glob.glob('files/upstart/*.conf'): shutil.copy(x, '/etc/init/') @@ -112,6 +111,18 @@ def get_mon_hosts(): return hosts +def update_monfs(): + hostname = utils.get_unit_hostname() + monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + upstart = '{}/upstart'.format(monfs) + if (os.path.exists(monfs) and + not os.path.exists(upstart)): + # Mark mon as managed by upstart so that + # it gets start correctly on reboots + with open(upstart, 'w'): + pass + + def bootstrap_monitor_cluster(): hostname = utils.get_unit_hostname() done = '/var/lib/ceph/mon/ceph-{}/done'.format(hostname) @@ -172,8 +183,7 @@ def osdize(dev): cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options - if ceph.version_compare(ceph.get_ceph_version(), - "0.55") >= 0: + if ceph.get_ceph_version() >= "0.55": osd_format = utils.config_get('osd-format') if osd_format: cmd.append('--fs-type') @@ -308,6 +318,7 @@ def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() install_upstart_scripts() + update_monfs() utils.juju_log('INFO', 'End upgrade-charm hook.') From d08c99cb81a06c52aadc7fd81e4888d28d282999 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 11 Jan 2013 09:10:40 +0000 Subject: [PATCH 0153/2699] Correct upstart versioned installs --- ceph-proxy/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 32f266d3..5df4c52e 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -20,7 +20,7 @@ def install_upstart_scripts(): # Only install upstart configurations for older versions - if ceph.get_ceph_version() >= "0.55.1": + if ceph.get_ceph_version() < "0.55.1": for x in glob.glob('files/upstart/*.conf'): shutil.copy(x, '/etc/init/') From 04238f04a9efb927631ec723417bf31d7c98f355 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 11 Jan 2013 09:10:40 +0000 Subject: [PATCH 0154/2699] Correct upstart versioned installs --- ceph-mon/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 32f266d3..5df4c52e 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -20,7 +20,7 @@ def install_upstart_scripts(): # Only install upstart configurations for older versions - if ceph.get_ceph_version() >= "0.55.1": + if ceph.get_ceph_version() < "0.55.1": for x in glob.glob('files/upstart/*.conf'): shutil.copy(x, '/etc/init/') From e914b406ca8bfe57b9862e60b7761d83766909e5 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 11 Jan 2013 09:11:24 +0000 Subject: [PATCH 0155/2699] Misc updates --- ceph-osd/hooks/hooks.py | 6 ++---- ceph-osd/hooks/utils.py | 17 ++++++++++++++++- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 16286924..4187a3ec 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -19,8 +19,7 @@ def install_upstart_scripts(): # Only install upstart configurations for older versions - if ceph.version_compare(ceph.get_ceph_version(), - "0.55.1") < 0: + if ceph.get_ceph_version() < "0.55.1": for x in glob.glob('files/upstart/*.conf'): shutil.copy(x, '/etc/init/') @@ -141,8 +140,7 @@ def osdize(dev): cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options - if ceph.version_compare(ceph.get_ceph_version(), - "0.55") >= 0: + if ceph.get_ceph_version() >= "0.55": osd_format = utils.config_get('osd-format') if osd_format: cmd.append('--fs-type') diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 65d18fc2..c21d0d4c 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -11,16 +11,19 @@ import subprocess import socket import sys +import re def do_hooks(hooks): hook = os.path.basename(sys.argv[0]) try: - hooks[hook]() + hook_func = hooks[hook] except KeyError: juju_log('INFO', "This charm doesn't know how to handle '{}'.".format(hook)) + else: + hook_func() def install(*pkgs): @@ -88,6 +91,18 @@ def configure_source(): ] subprocess.check_call(cmd) + +def enable_pocket(pocket): + apt_sources = "/etc/apt/sources.list" + with open(apt_sources, "r") as sources: + lines = sources.readlines() + with open(apt_sources, "w") as sources: + for line in lines: + if pocket in line: + sources.write(re.sub('^# deb', 'deb', line)) + else: + sources.write(line) + # Protocols TCP = 'TCP' UDP = 'UDP' From 748edac9228b70373ad5d9dbe87b27c1a5da9be1 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 11 Jan 2013 09:15:51 +0000 Subject: [PATCH 0156/2699] Resync utils, ceph, add support for keystone auth --- ceph-radosgw/config.yaml | 21 ++++++ ceph-radosgw/hooks/ceph.py | 117 +++++++++++++++++++++++-------- ceph-radosgw/hooks/hooks.py | 62 ++++++++++++++-- ceph-radosgw/hooks/utils.py | 32 +++++++-- ceph-radosgw/metadata.yaml | 2 + ceph-radosgw/revision | 2 +- ceph-radosgw/templates/ceph.conf | 17 +++++ 7 files changed, 211 insertions(+), 42 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 26c89856..8f3235f2 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -20,3 +20,24 @@ options: description: | Key ID to import to the apt keyring to support use with arbitary source configuration from outside of Launchpad archives or PPA's. + # Keystone integration + operator-roles: + default: "Member,Admin" + type: string + description: | + Comma-separated list of Swift operator roles; used when integrating with + OpenStack Keystone. + region: + default: RegionOne + type: string + description: | + OpenStack region that the RADOS gateway supports; used when integrating with + OpenStack Keystone. + cache-size: + default: 500 + type: int + description: Number of keystone tokens to hold in local cache. + revocation-check-interval: + default: 600 + type: int + description: Interval between revocation checks to keystone. diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index 25873eb8..afe29674 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -12,8 +12,11 @@ import time import utils import os +import apt_pkg as apt -QUORUM = ['leader', 'peon'] +LEADER = 'leader' +PEON = 'peon' +QUORUM = [LEADER, PEON] def is_quorum(): @@ -40,6 +43,30 @@ def is_quorum(): return False +def is_leader(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + cmd = [ + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] == LEADER: + return True + else: + return False + else: + return False + + def wait_for_quorum(): while not is_quorum(): time.sleep(3) @@ -58,6 +85,12 @@ def add_bootstrap_hint(peer): # Ignore any errors for this call subprocess.call(cmd) +DISK_FORMATS = [ + 'xfs', + 'ext4', + 'btrfs' + ] + def is_osd_disk(dev): try: @@ -72,9 +105,33 @@ def is_osd_disk(dev): pass return False + +def rescan_osd_devices(): + cmd = [ + 'udevadm', 'trigger', + '--subsystem-match=block', '--action=add' + ] + + subprocess.call(cmd) + + +def zap_disk(dev): + cmd = ['sgdisk', '--zap-all', dev] + subprocess.check_call(cmd) + + _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" +def is_bootstrapped(): + return os.path.exists(_bootstrap_keyring) + + +def wait_for_bootstrap(): + while (not is_bootstrapped()): + time.sleep(3) + + def import_osd_bootstrap_key(key): if not os.path.exists(_bootstrap_keyring): cmd = [ @@ -98,34 +155,7 @@ def import_osd_bootstrap_key(key): def get_osd_bootstrap_key(): - cmd = [ - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - utils.get_unit_hostname() - ), - 'auth', 'get-or-create', 'client.bootstrap-osd', - ] - # Add capabilities - for subsystem, subcaps in _osd_bootstrap_caps.iteritems(): - cmd.extend([ - subsystem, - '; '.join(subcaps), - ]) - output = subprocess.check_output(cmd).strip() # IGNORE:E1103 - # get-or-create appears to have different output depending - # on whether its 'get' or 'create' - # 'create' just returns the key, 'get' is more verbose and - # needs parsing - key = None - if len(output.splitlines()) == 1: - key = output - else: - for element in output.splitlines(): - if 'key' in element: - key = element.split(' = ')[1].strip() # IGNORE:E1103 - return key + return get_named_key('bootstrap-osd', _osd_bootstrap_caps) _radosgw_keyring = "/etc/ceph/keyring.rados.gateway" @@ -150,6 +180,17 @@ def import_radosgw_key(key): def get_radosgw_key(): + return get_named_key('radosgw.gateway', _radosgw_caps) + + +_default_caps = { + 'mon': ['allow r'], + 'osd': ['allow rwx'] + } + + +def get_named_key(name, caps=None): + caps = caps or _default_caps cmd = [ 'ceph', '--name', 'mon.', @@ -157,10 +198,10 @@ def get_radosgw_key(): '/var/lib/ceph/mon/ceph-{}/keyring'.format( utils.get_unit_hostname() ), - 'auth', 'get-or-create', 'client.radosgw.gateway', + 'auth', 'get-or-create', 'client.{}'.format(name), ] # Add capabilities - for subsystem, subcaps in _radosgw_caps.iteritems(): + for subsystem, subcaps in caps.iteritems(): cmd.extend([ subsystem, '; '.join(subcaps), @@ -178,3 +219,17 @@ def get_radosgw_key(): if 'key' in element: key = element.split(' = ')[1].strip() # IGNORE:E1103 return key + + +def get_ceph_version(package=None): + apt.init() + cache = apt.Cache() + pkg = cache[package or 'ceph'] + if pkg.current_ver: + return apt.upstream_version(pkg.current_ver.ver_str) + else: + return None + + +def version_compare(a, b): + return apt.version_compare(a, b) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index f989594e..b239c1f0 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -22,6 +22,9 @@ def install_www_scripts(): shutil.copy(x, '/var/www/') +NSS_DIR='/var/lib/ceph/nss' + + def install(): utils.juju_log('INFO', 'Begin install hook.') utils.enable_pocket('multiverse') @@ -30,6 +33,7 @@ def install(): 'libapache2-mod-fastcgi', 'apache2', 'ntp') + os.makedirs(NSS_DIR) utils.juju_log('INFO', 'End install hook.') @@ -41,8 +45,17 @@ def emit_cephconf(): cephcontext = { 'auth_supported': get_auth() or 'none', 'mon_hosts': ' '.join(get_mon_hosts()), - 'hostname': utils.get_unit_hostname() + 'hostname': utils.get_unit_hostname(), + 'version': ceph.get_ceph_version('radosgw') } + + # Check to ensure that correct version of ceph is + # in use + if ceph.get_ceph_version('radosgw') >= "0.55": + # Add keystone configuration if found + ks_conf = get_keystone_conf() + if ks_conf: + cephcontext.update(ks_conf) with open('/etc/ceph/ceph.conf', 'w') as cephconf: cephconf.write(utils.render_template('ceph.conf', cephcontext)) @@ -108,16 +121,33 @@ def get_conf(name): for unit in utils.relation_list(relid): conf = utils.relation_get(name, unit, relid) - if conf != "": + if conf: return conf return None +def get_keystone_conf(): + for relid in utils.relation_ids('identity-service'): + for unit in utils.relation_list(relid): + ks_auth = { + 'auth_type': 'keystone', + 'auth_protocol': 'http', + 'auth_host': utils.relation_get('auth_host', unit, relid), + 'auth_port': utils.relation_get('auth_port', unit, relid), + 'admin_token': utils.relation_get('admin_token', unit, relid), + 'user_roles': utils.config_get('operator-roles'), + 'cache_size': utils.config_get('cache-size'), + 'revocation_check_interval': utils.config_get('revocation-check-interval') + } + if None not in ks_auth.itervalues(): + return ks_auth + return None + def mon_relation(): utils.juju_log('INFO', 'Begin mon-relation hook.') emit_cephconf() key = utils.relation_get('radosgw_key') - if key != "": + if key: ceph.import_radosgw_key(key) restart() # TODO figure out a better way todo this utils.juju_log('INFO', 'End mon-relation hook.') @@ -141,7 +171,7 @@ def start(): def stop(): - subprocess.call(['service', 'radosgw', 'start']) + subprocess.call(['service', 'radosgw', 'stop']) utils.expose(port=80) @@ -150,6 +180,28 @@ def restart(): utils.expose(port=80) +def identity_joined(relid=None): + if ceph.get_ceph_version('radosgw') < "0.55": + utils.juju_log('ERROR', + 'Integration with keystone requires ceph >= 0.55') + sys.exit(1) + + hostname = utils.unit_get('private-address') + admin_url = 'http://{}:80/swift'.format(hostname) + internal_url = public_url = '{}/v1'.format(admin_url) + utils.relation_set(service='swift', + region=utils.config_get('region'), + public_url=public_url, internal_url=internal_url, + admin_url=admin_url, + requested_roles=utils.config_get('operator-roles'), + rid=relid) + + +def identity_changed(): + emit_cephconf() + restart() + + utils.do_hooks({ 'install': install, 'config-changed': config_changed, @@ -157,6 +209,8 @@ def restart(): 'mon-relation-changed': mon_relation, 'gateway-relation-joined': gateway_relation, 'upgrade-charm': config_changed, # same function ATM + 'identity-service-relation-joined': identity_joined, + 'identity-service-relation-changed': identity_changed }) sys.exit(0) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 92fe6285..158f7f46 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -18,10 +18,12 @@ def do_hooks(hooks): hook = os.path.basename(sys.argv[0]) try: - hooks[hook]() + hook_func = hooks[hook] except KeyError: juju_log('INFO', "This charm doesn't know how to handle '{}'.".format(hook)) + else: + hook_func() def install(*pkgs): @@ -101,7 +103,6 @@ def enable_pocket(pocket): else: sources.write(line) - # Protocols TCP = 'TCP' UDP = 'UDP' @@ -150,15 +151,26 @@ def relation_get(attribute, unit=None, rid=None): cmd.append(attribute) if unit: cmd.append(unit) - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + value = str(subprocess.check_output(cmd)).strip() + if value == "": + return None + else: + return value def relation_set(**kwargs): cmd = [ 'relation-set' ] + args = [] for k, v in kwargs.items(): - cmd.append('{}={}'.format(k, v)) + if k == 'rid': + if v: + cmd.append('-r') + cmd.append(v) + else: + args.append('{}={}'.format(k, v)) + cmd += args subprocess.check_call(cmd) @@ -167,7 +179,11 @@ def unit_get(attribute): 'unit-get', attribute ] - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + value = str(subprocess.check_output(cmd)).strip() + if value == "": + return None + else: + return value def config_get(attribute): @@ -175,7 +191,11 @@ def config_get(attribute): 'config-get', attribute ] - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + value = str(subprocess.check_output(cmd)).strip() + if value == "": + return None + else: + return value def get_unit_hostname(): diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 9db4b805..1e4a9f8e 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -10,6 +10,8 @@ description: | requires: mon: interface: ceph-radosgw + identity-service: + interface: keystone provides: gateway: interface: http diff --git a/ceph-radosgw/revision b/ceph-radosgw/revision index 60d3b2f4..aabe6ec3 100644 --- a/ceph-radosgw/revision +++ b/ceph-radosgw/revision @@ -1 +1 @@ -15 +21 diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index f5f2bcb2..b71c20f4 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -1,5 +1,11 @@ [global] +{% if version < "0.51" %} auth supported = {{ auth_supported }} +{% else %} + auth cluster required = {{ auth_supported }} + auth service required = {{ auth_supported }} + auth client required = {{ auth_supported }} +{% endif %} mon host = {{ mon_hosts }} [client.radosgw.gateway] @@ -7,3 +13,14 @@ keyring = /etc/ceph/keyring.rados.gateway rgw socket path = /tmp/radosgw.sock log file = /var/log/ceph/radosgw.log + # Turn off 100-continue optimization as stock mod_fastcgi + # does not support it + rgw print continue = false +{% if auth_type == 'keystone' %} + rgw keystone url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/ + rgw keystone admin token = {{ admin_token }} + rgw keystone accepted roles = {{ user_roles }} + rgw keystone token cache size = {{ cache_size }} + rgw keystone revocation interval = {{ revocation_check_interval }} +#nss db path = /var/lib/ceph/nss +{% endif %} \ No newline at end of file From 88035e9f8323aa707238bcb87d324701622c8eed Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 11 Jan 2013 10:28:17 +0000 Subject: [PATCH 0157/2699] Updates to docs for keystone integration --- ceph-radosgw/{README => README.md} | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) rename ceph-radosgw/{README => README.md} (70%) diff --git a/ceph-radosgw/README b/ceph-radosgw/README.md similarity index 70% rename from ceph-radosgw/README rename to ceph-radosgw/README.md index 808f720c..c5d7a0e2 100644 --- a/ceph-radosgw/README +++ b/ceph-radosgw/README.md @@ -29,15 +29,31 @@ You can then directly access the RADOS gateway by exposing the service:: The gateway can be accessed over port 80 (as show in juju status exposed ports). +Access +====== + Note that you will need to login to one of the service units supporting the ceph charm to generate some access credentials:: juju ssh ceph/0 \ 'sudo radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph"' - + For security reasons the ceph-radosgw charm is not setup with appropriate permissions to administer the ceph cluster. +Keystone Integration +==================== + +Ceph >= 0.55 integrates with Openstack Keystone for authentication of Swift requests. + +This is enabled by relating the ceph-radosgw service with keystone:: + + juju deploy keystone + juju add-relation keystone ceph-radosgw + +If you try to relate the radosgw to keystone with an earlier version of ceph the hook +will error out to let you know. + Scale-out ========= @@ -62,8 +78,7 @@ Location: http://jujucharms.com/charms/ceph-radosgw Bootnotes ========= -The Ceph RADOS Gateway makes use of a multiverse package, -libapache2-mod-fastcgi. As such it will try to automatically enable the -multiverse pocket in /etc/apt/sources.list. Note that there is noting -'wrong' with multiverse components - they typically have less liberal -licensing policies or suchlike. +The Ceph RADOS Gateway makes use of a multiverse package libapache2-mod-fastcgi. +As such it will try to automatically enable the multiverse pocket in +/etc/apt/sources.list. Note that there is noting 'wrong' with multiverse +components - they typically have less liberal licensing policies or suchlike. From c6438d4f39eeba4f08301f8c38fcfd163863da76 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jan 2013 08:38:15 -0600 Subject: [PATCH 0158/2699] Improve host ip resolution - fixes issues with maas managed DNS --- ceph-proxy/hooks/utils.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index c21d0d4c..4aff2f14 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -44,6 +44,12 @@ def install(*pkgs): install('python-jinja2') import jinja2 +try: + import dns.resolver +except ImportError: + install('python-dnspython') + import dns.resolver + def render_template(template_name, context, template_dir=TEMPLATES_DIR): templates = jinja2.Environment( @@ -202,9 +208,17 @@ def get_unit_hostname(): def get_host_ip(hostname=unit_get('private-address')): - cmd = [ - 'dig', - '+short', - hostname - ] - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + try: + # Test to see if already an IPv4 address + socket.inet_aton(hostname) + return hostname + except socket.error: + pass + try: + answers = dns.resolver.query(hostname, 'A') + if answers: + return answers[0].address + except dns.resolver.NXDOMAIN: + pass + return None + From f87ef70e9eb277ebd93e4589f1725acec235d202 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jan 2013 08:38:15 -0600 Subject: [PATCH 0159/2699] Improve host ip resolution - fixes issues with maas managed DNS --- ceph-mon/hooks/utils.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index c21d0d4c..4aff2f14 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -44,6 +44,12 @@ def install(*pkgs): install('python-jinja2') import jinja2 +try: + import dns.resolver +except ImportError: + install('python-dnspython') + import dns.resolver + def render_template(template_name, context, template_dir=TEMPLATES_DIR): templates = jinja2.Environment( @@ -202,9 +208,17 @@ def get_unit_hostname(): def get_host_ip(hostname=unit_get('private-address')): - cmd = [ - 'dig', - '+short', - hostname - ] - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + try: + # Test to see if already an IPv4 address + socket.inet_aton(hostname) + return hostname + except socket.error: + pass + try: + answers = dns.resolver.query(hostname, 'A') + if answers: + return answers[0].address + except dns.resolver.NXDOMAIN: + pass + return None + From 9323a394e07d6cb618a12b2b9d382b41ca19688e Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jan 2013 08:40:15 -0600 Subject: [PATCH 0160/2699] Improve host ip resolution - fixes issues with maas managed DNS --- ceph-osd/hooks/utils.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index c21d0d4c..4aff2f14 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -44,6 +44,12 @@ def install(*pkgs): install('python-jinja2') import jinja2 +try: + import dns.resolver +except ImportError: + install('python-dnspython') + import dns.resolver + def render_template(template_name, context, template_dir=TEMPLATES_DIR): templates = jinja2.Environment( @@ -202,9 +208,17 @@ def get_unit_hostname(): def get_host_ip(hostname=unit_get('private-address')): - cmd = [ - 'dig', - '+short', - hostname - ] - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + try: + # Test to see if already an IPv4 address + socket.inet_aton(hostname) + return hostname + except socket.error: + pass + try: + answers = dns.resolver.query(hostname, 'A') + if answers: + return answers[0].address + except dns.resolver.NXDOMAIN: + pass + return None + From 4eb00bad849ab15258632c5843b66f8d910bb588 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jan 2013 08:40:58 -0600 Subject: [PATCH 0161/2699] Improve host ip resolution - fixes issues with maas managed DNS --- ceph-radosgw/hooks/utils.py | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 158f7f46..4aff2f14 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -44,6 +44,12 @@ def install(*pkgs): install('python-jinja2') import jinja2 +try: + import dns.resolver +except ImportError: + install('python-dnspython') + import dns.resolver + def render_template(template_name, context, template_dir=TEMPLATES_DIR): templates = jinja2.Environment( @@ -165,9 +171,8 @@ def relation_set(**kwargs): args = [] for k, v in kwargs.items(): if k == 'rid': - if v: - cmd.append('-r') - cmd.append(v) + cmd.append('-r') + cmd.append(v) else: args.append('{}={}'.format(k, v)) cmd += args @@ -203,9 +208,17 @@ def get_unit_hostname(): def get_host_ip(hostname=unit_get('private-address')): - cmd = [ - 'dig', - '+short', - hostname - ] - return subprocess.check_output(cmd).strip() # IGNORE:E1103 + try: + # Test to see if already an IPv4 address + socket.inet_aton(hostname) + return hostname + except socket.error: + pass + try: + answers = dns.resolver.query(hostname, 'A') + if answers: + return answers[0].address + except dns.resolver.NXDOMAIN: + pass + return None + From a91b897145c4993983114f944b1dc68e03d751ed Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Mon, 28 Jan 2013 16:18:48 +1300 Subject: [PATCH 0162/2699] use ceph.list, not quantum.list --- ceph-proxy/hooks/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 5ae96bc0..f03eb8ac 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -72,7 +72,7 @@ def configure_source(): with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: apt.write(CLOUD_ARCHIVE.format(pocket)) if source.startswith('http:'): - with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: + with open('/etc/apt/sources.list.d/ceph.list', 'w') as apt: apt.write("deb " + source + "\n") key = config_get('key') if key: From 17698eea2b07f89601b0f4eda24f50aacc95f3c6 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Mon, 28 Jan 2013 16:18:48 +1300 Subject: [PATCH 0163/2699] use ceph.list, not quantum.list --- ceph-mon/hooks/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 5ae96bc0..f03eb8ac 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -72,7 +72,7 @@ def configure_source(): with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: apt.write(CLOUD_ARCHIVE.format(pocket)) if source.startswith('http:'): - with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: + with open('/etc/apt/sources.list.d/ceph.list', 'w') as apt: apt.write("deb " + source + "\n") key = config_get('key') if key: From ce17863e132d13286c315b6dc659ad82142a05fc Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Mon, 28 Jan 2013 16:24:05 +1300 Subject: [PATCH 0164/2699] use ceph.list, not quantum.list --- ceph-osd/hooks/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 5ae96bc0..f03eb8ac 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -72,7 +72,7 @@ def configure_source(): with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: apt.write(CLOUD_ARCHIVE.format(pocket)) if source.startswith('http:'): - with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: + with open('/etc/apt/sources.list.d/ceph.list', 'w') as apt: apt.write("deb " + source + "\n") key = config_get('key') if key: From 58c26a0dcaa59c42fe8b16f3557d8cf907b6e496 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 7 Feb 2013 16:00:28 +0000 Subject: [PATCH 0165/2699] Allow get_host_ip helper to raise exception in the event that a IP address cannot be resolved for the provided hostname. --- ceph-proxy/hooks/utils.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 4aff2f14..5ad0b246 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -213,12 +213,8 @@ def get_host_ip(hostname=unit_get('private-address')): socket.inet_aton(hostname) return hostname except socket.error: - pass - try: + # This may throw an NXDOMAIN exception; in which case + # things are badly broken so just let it kill the hook answers = dns.resolver.query(hostname, 'A') if answers: return answers[0].address - except dns.resolver.NXDOMAIN: - pass - return None - From 407a7e1956f560365c775b8baaef5b006453c9e6 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 7 Feb 2013 16:00:28 +0000 Subject: [PATCH 0166/2699] Allow get_host_ip helper to raise exception in the event that a IP address cannot be resolved for the provided hostname. --- ceph-mon/hooks/utils.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 4aff2f14..5ad0b246 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -213,12 +213,8 @@ def get_host_ip(hostname=unit_get('private-address')): socket.inet_aton(hostname) return hostname except socket.error: - pass - try: + # This may throw an NXDOMAIN exception; in which case + # things are badly broken so just let it kill the hook answers = dns.resolver.query(hostname, 'A') if answers: return answers[0].address - except dns.resolver.NXDOMAIN: - pass - return None - From 20a8e6baf32a683545441cdce060bbb2a0391aa7 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 7 Feb 2013 16:01:17 +0000 Subject: [PATCH 0167/2699] Allow DNS problems to bubble up and break hooks as things are broken if this is the case. --- ceph-osd/hooks/utils.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 4aff2f14..5ad0b246 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -213,12 +213,8 @@ def get_host_ip(hostname=unit_get('private-address')): socket.inet_aton(hostname) return hostname except socket.error: - pass - try: + # This may throw an NXDOMAIN exception; in which case + # things are badly broken so just let it kill the hook answers = dns.resolver.query(hostname, 'A') if answers: return answers[0].address - except dns.resolver.NXDOMAIN: - pass - return None - From 43c9a96985620075d82dc384e0c7d8878f441ec4 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 7 Feb 2013 16:01:50 +0000 Subject: [PATCH 0168/2699] Allow DNS problems to bubble up and break hooks as things are broken if this is the case. --- ceph-radosgw/hooks/utils.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 4aff2f14..5ad0b246 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -213,12 +213,8 @@ def get_host_ip(hostname=unit_get('private-address')): socket.inet_aton(hostname) return hostname except socket.error: - pass - try: + # This may throw an NXDOMAIN exception; in which case + # things are badly broken so just let it kill the hook answers = dns.resolver.query(hostname, 'A') if answers: return answers[0].address - except dns.resolver.NXDOMAIN: - pass - return None - From 63993c807e27351a90a0de3854ef5d6348b807e1 Mon Sep 17 00:00:00 2001 From: Christopher Glass Date: Tue, 12 Mar 2013 16:30:06 +0000 Subject: [PATCH 0169/2699] Added python-ceph to the list of installed packages, to make the ceph python API available. --- ceph-proxy/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 5df4c52e..bdd32363 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -28,7 +28,7 @@ def install_upstart_scripts(): def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() - utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools') + utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph') install_upstart_scripts() utils.juju_log('INFO', 'End install hook.') From e3ff91276196f42204a771a964714b42838e5570 Mon Sep 17 00:00:00 2001 From: Christopher Glass Date: Tue, 12 Mar 2013 16:30:06 +0000 Subject: [PATCH 0170/2699] Added python-ceph to the list of installed packages, to make the ceph python API available. --- ceph-mon/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 5df4c52e..bdd32363 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -28,7 +28,7 @@ def install_upstart_scripts(): def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() - utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools') + utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph') install_upstart_scripts() utils.juju_log('INFO', 'End install hook.') From bb1c3ee32112aad3e81d18c35803d2b57d6d99df Mon Sep 17 00:00:00 2001 From: "Jorge O. Castro" Date: Mon, 22 Apr 2013 15:49:09 -0400 Subject: [PATCH 0171/2699] Add categories --- ceph-proxy/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 92a2385a..0d84f430 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -5,6 +5,7 @@ maintainer: James Page , description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. +categories: ["misc"] peers: mon: interface: ceph From ef5bc7b488cd6656448794b13b99dfa4f783ede0 Mon Sep 17 00:00:00 2001 From: "Jorge O. Castro" Date: Mon, 22 Apr 2013 15:49:09 -0400 Subject: [PATCH 0172/2699] Add categories --- ceph-mon/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 92a2385a..0d84f430 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -5,6 +5,7 @@ maintainer: James Page , description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. +categories: ["misc"] peers: mon: interface: ceph From f6ccdb0ff65cef0b4186cde840e0d24171f821a0 Mon Sep 17 00:00:00 2001 From: Marco Ceppi Date: Thu, 25 Apr 2013 14:24:03 -0400 Subject: [PATCH 0173/2699] Added icon.svg --- ceph-proxy/icon.svg | 414 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 414 insertions(+) create mode 100644 ceph-proxy/icon.svg diff --git a/ceph-proxy/icon.svg b/ceph-proxy/icon.svg new file mode 100644 index 00000000..de53ab2e --- /dev/null +++ b/ceph-proxy/icon.svg @@ -0,0 +1,414 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + From 849d1b9819c0be5c9544329aa9aac6c4e27e2803 Mon Sep 17 00:00:00 2001 From: Marco Ceppi Date: Thu, 25 Apr 2013 14:24:03 -0400 Subject: [PATCH 0174/2699] Added icon.svg --- ceph-mon/icon.svg | 414 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 414 insertions(+) create mode 100644 ceph-mon/icon.svg diff --git a/ceph-mon/icon.svg b/ceph-mon/icon.svg new file mode 100644 index 00000000..de53ab2e --- /dev/null +++ b/ceph-mon/icon.svg @@ -0,0 +1,414 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + From f6c93eb04d1cae46db7996f539cade13aa4308ee Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 1 May 2013 12:45:53 +0100 Subject: [PATCH 0175/2699] Fixup issue with early hook execution failing to notify clients correctly --- ceph-proxy/hooks/hooks.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index bdd32363..274d8fe6 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -255,10 +255,12 @@ def notify_client(): utils.juju_log('INFO', 'Begin notify_client.') for relid in utils.relation_ids('client'): - service_name = utils.relation_list(relid)[0].split('/')[0] - utils.relation_set(key=ceph.get_named_key(service_name), - auth=utils.config_get('auth-supported'), - rid=relid) + units = utils.relation_list(relid) + if len(units) > 0: + service_name = units[0].split('/')[0] + utils.relation_set(key=ceph.get_named_key(service_name), + auth=utils.config_get('auth-supported'), + rid=relid) utils.juju_log('INFO', 'End notify_client.') From 72d99a02d476aa234e2ce899cda8d8d10435215d Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 1 May 2013 12:45:53 +0100 Subject: [PATCH 0176/2699] Fixup issue with early hook execution failing to notify clients correctly --- ceph-mon/hooks/hooks.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index bdd32363..274d8fe6 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -255,10 +255,12 @@ def notify_client(): utils.juju_log('INFO', 'Begin notify_client.') for relid in utils.relation_ids('client'): - service_name = utils.relation_list(relid)[0].split('/')[0] - utils.relation_set(key=ceph.get_named_key(service_name), - auth=utils.config_get('auth-supported'), - rid=relid) + units = utils.relation_list(relid) + if len(units) > 0: + service_name = units[0].split('/')[0] + utils.relation_set(key=ceph.get_named_key(service_name), + auth=utils.config_get('auth-supported'), + rid=relid) utils.juju_log('INFO', 'End notify_client.') From be3bff0dd5a265b9401b4d86c41938292df73c20 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 10 May 2013 10:25:06 +0100 Subject: [PATCH 0177/2699] Removed typos from README file. Fixes LP1178301 --- ceph-radosgw/README.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/ceph-radosgw/README.md b/ceph-radosgw/README.md index c5d7a0e2..e405d017 100644 --- a/ceph-radosgw/README.md +++ b/ceph-radosgw/README.md @@ -2,17 +2,15 @@ Overview ======== Ceph is a distributed storage and network file system designed to provide -excellent performance, reliability, and scalability. +excellent performance, reliability and scalability. This charm deploys the RADOS Gateway, a S3 and Swift compatible HTTP gateway for online object storage on-top of a ceph cluster. -This charm only supports the S3 gateway at this point in time. - Usage ===== -In order to use this charm, it assumed that you have already deployed a ceph +In order to use this charm, it is assumed that you have already deployed a ceph storage cluster using the 'ceph' charm with something like this:: juju deploy -n 3 --config ceph.yaml ceph @@ -38,7 +36,7 @@ ceph charm to generate some access credentials:: juju ssh ceph/0 \ 'sudo radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph"' -For security reasons the ceph-radosgw charm is not setup with appropriate +For security reasons the ceph-radosgw charm is not set up with appropriate permissions to administer the ceph cluster. Keystone Integration From ab305cb06dfaa314051a3fb6d6566bca819d5296 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 10 Jun 2013 14:22:13 +0100 Subject: [PATCH 0178/2699] Add xfsprogs to list of install packages for argonaut --- ceph-proxy/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 274d8fe6..f6542e33 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -28,7 +28,7 @@ def install_upstart_scripts(): def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() - utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph') + utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs') install_upstart_scripts() utils.juju_log('INFO', 'End install hook.') From c5d779198d25e81bdf0bd34da7950890e0575bdd Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 10 Jun 2013 14:22:13 +0100 Subject: [PATCH 0179/2699] Add xfsprogs to list of install packages for argonaut --- ceph-mon/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 274d8fe6..f6542e33 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -28,7 +28,7 @@ def install_upstart_scripts(): def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() - utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph') + utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs') install_upstart_scripts() utils.juju_log('INFO', 'End install hook.') From 0456666497a85306318fa9adffe0de2300a706d8 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 10 Jun 2013 14:23:31 +0100 Subject: [PATCH 0180/2699] Add xfsprogs to list of installed packages for argonaut update --- ceph-osd/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 4187a3ec..efb11fe7 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -27,7 +27,7 @@ def install_upstart_scripts(): def install(): utils.juju_log('INFO', 'Begin install hook.') utils.configure_source() - utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools') + utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs') install_upstart_scripts() utils.juju_log('INFO', 'End install hook.') From cefd0ab0341681c5d36985b2056c5b8d944fce34 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 10 Jun 2013 14:38:49 +0100 Subject: [PATCH 0181/2699] Ensure xfsprogs is installed on upgrade --- ceph-osd/hooks/hooks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index efb11fe7..82b0ba3f 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -191,6 +191,7 @@ def upgrade_charm(): get_auth()): emit_cephconf() install_upstart_scripts() + utils.install('xfsprogs') utils.juju_log('INFO', 'End upgrade-charm hook.') From 922fe3b112fe4bfe9ed9bd1c3f84bf822f3f521a Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 10 Jun 2013 14:39:34 +0100 Subject: [PATCH 0182/2699] Ensure xfsprogs installed on upgrade --- ceph-proxy/hooks/hooks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index f6542e33..08e8d913 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -319,6 +319,7 @@ def client_relation(): def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() + utils.install('xfsprogs') install_upstart_scripts() update_monfs() utils.juju_log('INFO', 'End upgrade-charm hook.') From 093539bfba09dd3f26153338c57a21d69ff220da Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 10 Jun 2013 14:39:34 +0100 Subject: [PATCH 0183/2699] Ensure xfsprogs installed on upgrade --- ceph-mon/hooks/hooks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index f6542e33..08e8d913 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -319,6 +319,7 @@ def client_relation(): def upgrade_charm(): utils.juju_log('INFO', 'Begin upgrade-charm hook.') emit_cephconf() + utils.install('xfsprogs') install_upstart_scripts() update_monfs() utils.juju_log('INFO', 'End upgrade-charm hook.') From 4467e94493348f34cd29645d89c471a7db71b834 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 10 Jun 2013 14:46:24 +0100 Subject: [PATCH 0184/2699] Allow osd-format for ceph >= 0.48.3 --- ceph-proxy/config.yaml | 6 +++--- ceph-proxy/hooks/hooks.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 7b2f56db..cbc62b41 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -52,11 +52,11 @@ options: description: | Format of filesystem to use for OSD devices; supported formats include: . - xfs (Default >= 0.55) - ext4 (Only option < 0.55) + xfs (Default >= 0.48.3) + ext4 (Only option < 0.48.3) btrfs (experimental and not recommended) . - Only supported with ceph >= 0.55. + Only supported with ceph >= 0.48.3. osd-reformat: type: string description: | diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 08e8d913..8f72c258 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -183,7 +183,7 @@ def osdize(dev): cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options - if ceph.get_ceph_version() >= "0.55": + if ceph.get_ceph_version() >= "0.48.3": osd_format = utils.config_get('osd-format') if osd_format: cmd.append('--fs-type') From 596b2e1824bf1faeb27f6a27f5e86d007c366da1 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 10 Jun 2013 14:46:24 +0100 Subject: [PATCH 0185/2699] Allow osd-format for ceph >= 0.48.3 --- ceph-mon/config.yaml | 6 +++--- ceph-mon/hooks/hooks.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 7b2f56db..cbc62b41 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -52,11 +52,11 @@ options: description: | Format of filesystem to use for OSD devices; supported formats include: . - xfs (Default >= 0.55) - ext4 (Only option < 0.55) + xfs (Default >= 0.48.3) + ext4 (Only option < 0.48.3) btrfs (experimental and not recommended) . - Only supported with ceph >= 0.55. + Only supported with ceph >= 0.48.3. osd-reformat: type: string description: | diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 08e8d913..8f72c258 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -183,7 +183,7 @@ def osdize(dev): cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options - if ceph.get_ceph_version() >= "0.55": + if ceph.get_ceph_version() >= "0.48.3": osd_format = utils.config_get('osd-format') if osd_format: cmd.append('--fs-type') From fff8b925d42ab64450c2ad086e75050dc9845b73 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 10 Jun 2013 14:48:23 +0100 Subject: [PATCH 0186/2699] Allow journal >= 0.48.3 --- ceph-osd/config.yaml | 10 +++++----- ceph-osd/hooks/hooks.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index edcad69a..8db7b035 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -13,18 +13,18 @@ options: The device to use as a shared journal drive for all OSD's. By default no journal device will be used. . - Only supported with ceph >= 0.55. + Only supported with ceph >= 0.48.3. osd-format: type: string default: xfs description: | Format of filesystem to use for OSD devices; supported formats include: . - xfs (Default >= 0.55) - ext4 (Only option < 0.55) - btrfs (experimental and not recommended) + xfs (Default >= 0.48.3) + ext4 (Only option < 0.48.3) + btrfs (experimental and not recommended) . - Only supported with ceph >= 0.55. + Only supported with ceph >= 0.48.3. osd-reformat: type: string description: | diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 82b0ba3f..5cdafcab 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -140,7 +140,7 @@ def osdize(dev): cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options - if ceph.get_ceph_version() >= "0.55": + if ceph.get_ceph_version() >= "0.48.3": osd_format = utils.config_get('osd-format') if osd_format: cmd.append('--fs-type') From 28df1e9385b17bd835be4adfdf56e0b8ee6d64b0 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 10 Jun 2013 14:48:52 +0100 Subject: [PATCH 0187/2699] Journal also supported >= 0.48.3 --- ceph-proxy/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index cbc62b41..f5061360 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -45,7 +45,7 @@ options: The device to use as a shared journal drive for all OSD's. By default no journal device will be used. . - Only supported with ceph >= 0.55. + Only supported with ceph >= 0.48.3. osd-format: type: string default: xfs From 92a66fe0ac7dbb11d4732ac77fd7fe6912a84813 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 10 Jun 2013 14:48:52 +0100 Subject: [PATCH 0188/2699] Journal also supported >= 0.48.3 --- ceph-mon/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index cbc62b41..f5061360 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -45,7 +45,7 @@ options: The device to use as a shared journal drive for all OSD's. By default no journal device will be used. . - Only supported with ceph >= 0.55. + Only supported with ceph >= 0.48.3. osd-format: type: string default: xfs From b30393536313c9b4c246ec66027aabec5d54aeab Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 20 Jun 2013 22:15:17 +0100 Subject: [PATCH 0189/2699] Fixup for Ceph 0.61.3 --- ceph-proxy/hooks/hooks.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 8f72c258..b74eefb2 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -125,8 +125,9 @@ def update_monfs(): def bootstrap_monitor_cluster(): hostname = utils.get_unit_hostname() - done = '/var/lib/ceph/mon/ceph-{}/done'.format(hostname) - upstart = '/var/lib/ceph/mon/ceph-{}/upstart'.format(hostname) + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + done = '{}/done'.format(path) + upstart = '{}/upstart'.format(path) secret = utils.config_get('monitor-secret') keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) @@ -134,6 +135,10 @@ def bootstrap_monitor_cluster(): utils.juju_log('INFO', 'bootstrap_monitor_cluster: mon already initialized.') else: + # Ceph >= 0.61.3 needs this for ceph-mon fs creation + os.makedirs('/var/run/ceph', mode=0755) + os.makedirs(path) + # end changes for Ceph >= 0.61.3 try: subprocess.check_call(['ceph-authtool', keyring, '--create-keyring', '--name=mon.', From 7d390511062dee98c1beb4c53ad1a0c9f65f2cca Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 20 Jun 2013 22:15:17 +0100 Subject: [PATCH 0190/2699] Fixup for Ceph 0.61.3 --- ceph-mon/hooks/hooks.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 8f72c258..b74eefb2 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -125,8 +125,9 @@ def update_monfs(): def bootstrap_monitor_cluster(): hostname = utils.get_unit_hostname() - done = '/var/lib/ceph/mon/ceph-{}/done'.format(hostname) - upstart = '/var/lib/ceph/mon/ceph-{}/upstart'.format(hostname) + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + done = '{}/done'.format(path) + upstart = '{}/upstart'.format(path) secret = utils.config_get('monitor-secret') keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) @@ -134,6 +135,10 @@ def bootstrap_monitor_cluster(): utils.juju_log('INFO', 'bootstrap_monitor_cluster: mon already initialized.') else: + # Ceph >= 0.61.3 needs this for ceph-mon fs creation + os.makedirs('/var/run/ceph', mode=0755) + os.makedirs(path) + # end changes for Ceph >= 0.61.3 try: subprocess.check_call(['ceph-authtool', keyring, '--create-keyring', '--name=mon.', From bcc12cdc06dd890002a78f6fd74985e60d414faf Mon Sep 17 00:00:00 2001 From: James Page Date: Sun, 23 Jun 2013 20:10:07 +0100 Subject: [PATCH 0191/2699] Initial move to charm-helpers --- ceph-proxy/.pydevproject | 4 +- ceph-proxy/charm-helpers-sync.yaml | 4 + ceph-proxy/hooks/ceph.py | 4 +- ceph-proxy/hooks/charmhelpers/__init__.py | 0 .../hooks/charmhelpers/core/__init__.py | 0 ceph-proxy/hooks/charmhelpers/core/hookenv.py | 320 ++++++++++++++++++ ceph-proxy/hooks/charmhelpers/core/host.py | 261 ++++++++++++++ ceph-proxy/hooks/hooks.py | 247 +++++++------- 8 files changed, 720 insertions(+), 120 deletions(-) create mode 100644 ceph-proxy/charm-helpers-sync.yaml create mode 100644 ceph-proxy/hooks/charmhelpers/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/hookenv.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/host.py diff --git a/ceph-proxy/.pydevproject b/ceph-proxy/.pydevproject index a61f5fbf..998e0aa1 100644 --- a/ceph-proxy/.pydevproject +++ b/ceph-proxy/.pydevproject @@ -1,7 +1,5 @@ - - - + python 2.7 Default diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml new file mode 100644 index 00000000..7ee93b78 --- /dev/null +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -0,0 +1,4 @@ +branch: lp:charm-helpers +destination: hooks/charmhelpers +include: + - core diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 6502b183..5b107b99 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -10,7 +10,7 @@ import json import subprocess import time -import utils +#import utils import os import apt_pkg as apt @@ -18,6 +18,8 @@ PEON = 'peon' QUORUM = [LEADER, PEON] +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] + def is_quorum(): asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) diff --git a/ceph-proxy/hooks/charmhelpers/__init__.py b/ceph-proxy/hooks/charmhelpers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/charmhelpers/core/__init__.py b/ceph-proxy/hooks/charmhelpers/core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py new file mode 100644 index 00000000..e28bfd98 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -0,0 +1,320 @@ +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +import os +import json +import yaml +import subprocess +import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + ''' Cache return values for multiple executions of func + args + + For example: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + ''' + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + res = func(*args, **kwargs) + cache[key] = res + return res + return wrapper + + +def flush(key): + ''' Flushes any entries from function cache where the + key is found in the function+args ''' + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + "Write a message to the juju log" + command = ['juju-log'] + if level: + command += ['-l', level] + command += [message] + subprocess.call(command) + + +class Serializable(UserDict.IterableUserDict): + "Wrapper, an object that can be serialized to yaml or json" + + def __init__(self, obj): + # wrap the object + UserDict.IterableUserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def json(self): + "Serialize the object to json" + return json.dumps(self.data) + + def yaml(self): + "Serialize the object to yaml" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['unit'] = local_unit() + context['rels'] = relations() + context['rel'] = relation_get() + context['env'] = os.environ + return context + + +def in_relation_hook(): + "Determine whether we're running in a relation hook" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + "The scope for the current relation hook" + return os.environ.get('JUJU_RELATION', None) + + +def relation_id(): + "The relation ID for the current relation hook" + return os.environ.get('JUJU_RELATION_ID', None) + + +def local_unit(): + "Local unit ID" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + "The remote unit for the current relation hook" + return os.environ['JUJU_REMOTE_UNIT'] + + +@cached +def config(scope=None): + "Juju charm configuration" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + config_cmd_line.append('--format=json') + try: + return Serializable(json.loads( + subprocess.check_output(config_cmd_line) + )) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return Serializable(json.loads(subprocess.check_output(_args))) + except ValueError: + return None + + +def relation_set(relation_id=None, relation_settings={}, **kwargs): + relation_cmd_line = ['relation-set'] + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + for k, v in relation_settings.items(): + relation_cmd_line.append('{}={}'.format(k, v)) + for k, v in kwargs.items(): + relation_cmd_line.append('{}={}'.format(k, v)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +@cached +def relation_ids(reltype=None): + "A list of relation_ids" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads(subprocess.check_output(relid_cmd_line)) + return [] + + +@cached +def related_units(relid=None): + "A list of related units" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads(subprocess.check_output(units_cmd_line)) + + +@cached +def relation_for_unit(unit=None, rid=None): + "Get the json represenation of a unit's relation" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return Serializable(relation) + + +@cached +def relations_for_id(relid=None): + "Get relations of a specific relation ID" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + "Get relations of a specific type" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def relation_types(): + "Get a list of relation types supported by this charm" + charmdir = os.environ.get('CHARM_DIR', '') + mdf = open(os.path.join(charmdir, 'metadata.yaml')) + md = yaml.safe_load(mdf) + rel_types = [] + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + mdf.close() + return rel_types + + +@cached +def relations(): + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +def open_port(port, protocol="TCP"): + "Open a service network port" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + "Close a service network port" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + + +def unit_private_ip(): + return unit_get('private-address') + + +class UnregisteredHookError(Exception): + pass + + +class Hooks(object): + def __init__(self): + super(Hooks, self).__init__() + self._hooks = {} + + def register(self, name, function): + self._hooks[name] = function + + def execute(self, args): + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + self._hooks[hook_name]() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + return decorated + return wrapper diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py new file mode 100644 index 00000000..cee4ee05 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -0,0 +1,261 @@ +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import apt_pkg +import os +import pwd +import grp +import subprocess +import hashlib + +from hookenv import log, execution_environment + + +def service_start(service_name): + service('start', service_name) + + +def service_stop(service_name): + service('stop', service_name) + + +def service_restart(service_name): + service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False): + if not service('reload', service_name) and restart_on_failure: + service('restart', service_name) + + +def service(action, service_name): + cmd = ['service', service_name, action] + return subprocess.call(cmd) == 0 + + +def adduser(username, password=None, shell='/bin/bash', system_user=False): + """Add a user""" + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = [ + 'gpasswd', '-a', + username, + group + ] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None): + """Replicate the contents of a path""" + context = execution_environment() + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + cmd.extend(options) + cmd.append(from_path.format(**context)) + cmd.append(to_path.format(**context)) + log(" ".join(cmd)) + return subprocess.check_output(cmd).strip() + + +def symlink(source, destination): + """Create a symbolic link""" + context = execution_environment() + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source.format(**context), + destination.format(**context) + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0555, force=False): + """Create a directory""" + context = execution_environment() + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner.format(**context)).pw_uid + gid = grp.getgrnam(group.format(**context)).gr_gid + realpath = os.path.abspath(path) + if os.path.exists(realpath): + if force and not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + + +def write_file(path, fmtstr, owner='root', group='root', perms=0444, **kwargs): + """Create or overwrite a file with the contents of a string""" + context = execution_environment() + context.update(kwargs) + log("Writing file {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner.format(**context)).pw_uid + gid = grp.getgrnam(group.format(**context)).gr_gid + with open(path.format(**context), 'w') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(fmtstr.format(**context)) + + +def render_template_file(source, destination, **kwargs): + """Create or overwrite a file using a template""" + log("Rendering template {} for {}".format(source, + destination)) + context = execution_environment() + with open(source.format(**context), 'r') as template: + write_file(destination.format(**context), template.read(), + **kwargs) + + +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + apt_pkg.init() + cache = apt_pkg.Cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + options = options or [] + cmd = ['apt-get', '-y'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def mount(device, mountpoint, options=None, persist=False): + '''Mount a filesystem''' + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def umount(mountpoint, persist=False): + '''Unmount a filesystem''' + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def mounts(): + '''List of all mounted volumes as [[mountpoint,device],[...]]''' + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def file_hash(path): + ''' Generate a md5 hash of the contents of 'path' or None if not found ''' + if os.path.exists(path): + h = hashlib.md5() + with open(path, 'r') as source: + h.update(source.read()) # IGNORE:E1101 - it does have update + return h.hexdigest() + else: + return None + + +def restart_on_change(restart_map): + ''' Restart services based on configuration files changing + + This function is used a decorator, for example + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + }) + def ceph_client_changed(): + ... + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. + ''' + def wrap(f): + def wrapped_f(*args): + checksums = {} + for path in restart_map: + checksums[path] = file_hash(path) + f(*args) + restarts = [] + for path in restart_map: + if checksums[path] != file_hash(path): + restarts += restart_map[path] + for service_name in list(set(restarts)): + service('restart', service_name) + return wrapped_f + return wrap diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index b74eefb2..d7029625 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -15,7 +15,35 @@ import sys import ceph -import utils +#import utils +from charmhelpers.core.hookenv import ( + log, + ERROR, + config, + relation_ids, + related_units, + relation_get, + relation_set, + remote_unit, + Hooks, + UnregisteredHookError + ) +from charmhelpers.core.host import ( + apt_install, + apt_update, + filter_installed_packages, + mkdir + ) + +from utils import ( + render_template, + configure_source, + get_host_ip, + get_unit_hostname + ) + + +hooks = Hooks() def install_upstart_scripts(): @@ -25,54 +53,55 @@ def install_upstart_scripts(): shutil.copy(x, '/etc/init/') +@hooks.hook('install') def install(): - utils.juju_log('INFO', 'Begin install hook.') - utils.configure_source() - utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs') + log('Begin install hook.') + configure_source() + apt_update(fatal=True) + apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() - utils.juju_log('INFO', 'End install hook.') + log('End install hook.') def emit_cephconf(): cephcontext = { - 'auth_supported': utils.config_get('auth-supported'), + 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), - 'fsid': utils.config_get('fsid'), + 'fsid': config('fsid'), 'version': ceph.get_ceph_version() } with open('/etc/ceph/ceph.conf', 'w') as cephconf: - cephconf.write(utils.render_template('ceph.conf', cephcontext)) + cephconf.write(render_template('ceph.conf', cephcontext)) JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' +@hooks.hook('config-changed') def config_changed(): - utils.juju_log('INFO', 'Begin config-changed hook.') + log('Begin config-changed hook.') - utils.juju_log('INFO', 'Monitor hosts are ' + repr(get_mon_hosts())) + log('Monitor hosts are ' + repr(get_mon_hosts())) # Pre-flight checks - if not utils.config_get('fsid'): - utils.juju_log('CRITICAL', 'No fsid supplied, cannot proceed.') + if not config('fsid'): + log('No fsid supplied, cannot proceed.', level=ERROR) sys.exit(1) - if not utils.config_get('monitor-secret'): - utils.juju_log('CRITICAL', - 'No monitor-secret supplied, cannot proceed.') + if not config('monitor-secret'): + log('No monitor-secret supplied, cannot proceed.', level=ERROR) sys.exit(1) - if utils.config_get('osd-format') not in ceph.DISK_FORMATS: - utils.juju_log('CRITICAL', - 'Invalid OSD disk format configuration specified') + if config('osd-format') not in ceph.DISK_FORMATS: + log('Invalid OSD disk format configuration specified', level=ERROR) sys.exit(1) emit_cephconf() - e_mountpoint = utils.config_get('ephemeral-unmount') + e_mountpoint = config('ephemeral-unmount') if (e_mountpoint and filesystem_mounted(e_mountpoint)): subprocess.call(['umount', e_mountpoint]) - osd_journal = utils.config_get('osd-journal') + osd_journal = config('osd-journal') if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) and os.path.exists(osd_journal)): @@ -80,31 +109,31 @@ def config_changed(): with open(JOURNAL_ZAPPED, 'w') as zapped: zapped.write('DONE') - for dev in utils.config_get('osd-devices').split(' '): + for dev in config('osd-devices').split(' '): osdize(dev) # Support use of single node ceph if (not ceph.is_bootstrapped() and - int(utils.config_get('monitor-count')) == 1): + int(config('monitor-count')) == 1): bootstrap_monitor_cluster() ceph.wait_for_bootstrap() if ceph.is_bootstrapped(): ceph.rescan_osd_devices() - utils.juju_log('INFO', 'End config-changed hook.') + log('End config-changed hook.') def get_mon_hosts(): hosts = [] - hosts.append('{}:6789'.format(utils.get_host_ip())) + hosts.append('{}:6789'.format(get_host_ip())) - for relid in utils.relation_ids('mon'): - for unit in utils.relation_list(relid): + for relid in relation_ids('mon'): + for unit in related_units(relid): hosts.append( - '{}:6789'.format(utils.get_host_ip( - utils.relation_get('private-address', - unit, relid))) + '{}:6789'.format(get_host_ip( + relation_get('private-address', + unit, relid))) ) hosts.sort() @@ -112,7 +141,7 @@ def get_mon_hosts(): def update_monfs(): - hostname = utils.get_unit_hostname() + hostname = get_unit_hostname() monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) upstart = '{}/upstart'.format(monfs) if (os.path.exists(monfs) and @@ -124,20 +153,19 @@ def update_monfs(): def bootstrap_monitor_cluster(): - hostname = utils.get_unit_hostname() + hostname = get_unit_hostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) done = '{}/done'.format(path) upstart = '{}/upstart'.format(path) - secret = utils.config_get('monitor-secret') + secret = config('monitor-secret') keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) if os.path.exists(done): - utils.juju_log('INFO', - 'bootstrap_monitor_cluster: mon already initialized.') + log('bootstrap_monitor_cluster: mon already initialized.') else: # Ceph >= 0.61.3 needs this for ceph-mon fs creation - os.makedirs('/var/run/ceph', mode=0755) - os.makedirs(path) + mkdir('/var/run/ceph', perms=0755) + mkdir(path) # end changes for Ceph >= 0.61.3 try: subprocess.check_call(['ceph-authtool', keyring, @@ -162,7 +190,7 @@ def bootstrap_monitor_cluster(): def reformat_osd(): - if utils.config_get('osd-reformat'): + if config('osd-reformat'): return True else: return False @@ -170,31 +198,28 @@ def reformat_osd(): def osdize(dev): if not os.path.exists(dev): - utils.juju_log('INFO', - 'Path {} does not exist - bailing'.format(dev)) + log('Path {} does not exist - bailing'.format(dev)) return if (ceph.is_osd_disk(dev) and not reformat_osd()): - utils.juju_log('INFO', - 'Looks like {} is already an OSD, skipping.' + log('Looks like {} is already an OSD, skipping.' .format(dev)) return if device_mounted(dev): - utils.juju_log('INFO', - 'Looks like {} is in use, skipping.'.format(dev)) + log('Looks like {} is in use, skipping.'.format(dev)) return cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options if ceph.get_ceph_version() >= "0.48.3": - osd_format = utils.config_get('osd-format') + osd_format = config('osd-format') if osd_format: cmd.append('--fs-type') cmd.append(osd_format) cmd.append(dev) - osd_journal = utils.config_get('osd-journal') + osd_journal = config('osd-journal') if (osd_journal and os.path.exists(osd_journal)): cmd.append(osd_journal) @@ -213,11 +238,13 @@ def filesystem_mounted(fs): return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 +@hooks.hook('mon-relation-departed') +@hooks.hook('mon-relation-joined') def mon_relation(): - utils.juju_log('INFO', 'Begin mon-relation hook.') + log('Begin mon-relation hook.') emit_cephconf() - moncount = int(utils.config_get('monitor-count')) + moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: bootstrap_monitor_cluster() ceph.wait_for_bootstrap() @@ -226,127 +253,115 @@ def mon_relation(): notify_radosgws() notify_client() else: - utils.juju_log('INFO', - 'Not enough mons ({}), punting.'.format( + log('Not enough mons ({}), punting.'.format( len(get_mon_hosts()))) - utils.juju_log('INFO', 'End mon-relation hook.') + log('End mon-relation hook.') def notify_osds(): - utils.juju_log('INFO', 'Begin notify_osds.') + log('Begin notify_osds.') - for relid in utils.relation_ids('osd'): - utils.relation_set(fsid=utils.config_get('fsid'), - osd_bootstrap_key=ceph.get_osd_bootstrap_key(), - auth=utils.config_get('auth-supported'), - rid=relid) + for relid in relation_ids('osd'): + relation_set(relation_id=relid, + fsid=config('fsid'), + osd_bootstrap_key=ceph.get_osd_bootstrap_key(), + auth=config('auth-supported')) - utils.juju_log('INFO', 'End notify_osds.') + log('End notify_osds.') def notify_radosgws(): - utils.juju_log('INFO', 'Begin notify_radosgws.') + log('Begin notify_radosgws.') - for relid in utils.relation_ids('radosgw'): - utils.relation_set(radosgw_key=ceph.get_radosgw_key(), - auth=utils.config_get('auth-supported'), - rid=relid) + for relid in relation_ids('radosgw'): + relation_set(relation_id=relid, + radosgw_key=ceph.get_radosgw_key(), + auth=config('auth-supported')) - utils.juju_log('INFO', 'End notify_radosgws.') + log('End notify_radosgws.') def notify_client(): - utils.juju_log('INFO', 'Begin notify_client.') + log('Begin notify_client.') - for relid in utils.relation_ids('client'): - units = utils.relation_list(relid) + for relid in relation_ids('client'): + units = related_units(relid) if len(units) > 0: service_name = units[0].split('/')[0] - utils.relation_set(key=ceph.get_named_key(service_name), - auth=utils.config_get('auth-supported'), - rid=relid) + relation_set(relation_id=relid, + key=ceph.get_named_key(service_name), + auth=config('auth-supported')) - utils.juju_log('INFO', 'End notify_client.') + log('End notify_client.') +@hooks.hook('osd-relation-joined') def osd_relation(): - utils.juju_log('INFO', 'Begin osd-relation hook.') + log('Begin osd-relation hook.') if ceph.is_quorum(): - utils.juju_log('INFO', - 'mon cluster in quorum - providing fsid & keys') - utils.relation_set(fsid=utils.config_get('fsid'), - osd_bootstrap_key=ceph.get_osd_bootstrap_key(), - auth=utils.config_get('auth-supported')) + log('mon cluster in quorum - providing fsid & keys') + relation_set(fsid=config('fsid'), + osd_bootstrap_key=ceph.get_osd_bootstrap_key(), + auth=config('auth-supported')) else: - utils.juju_log('INFO', - 'mon cluster not in quorum - deferring fsid provision') + log('mon cluster not in quorum - deferring fsid provision') - utils.juju_log('INFO', 'End osd-relation hook.') + log('End osd-relation hook.') +@hooks.hook('radosgw-relation-joined') def radosgw_relation(): - utils.juju_log('INFO', 'Begin radosgw-relation hook.') + log('Begin radosgw-relation hook.') - utils.install('radosgw') # Install radosgw for admin tools + # Install radosgw for admin tools + apt_install(packages=filter_installed_packages(['radosgw'])) if ceph.is_quorum(): - utils.juju_log('INFO', - 'mon cluster in quorum - \ - providing radosgw with keys') - utils.relation_set(radosgw_key=ceph.get_radosgw_key(), - auth=utils.config_get('auth-supported')) + log('mon cluster in quorum - providing radosgw with keys') + relation_set(radosgw_key=ceph.get_radosgw_key(), + auth=config('auth-supported')) else: - utils.juju_log('INFO', - 'mon cluster not in quorum - deferring key provision') + log('mon cluster not in quorum - deferring key provision') - utils.juju_log('INFO', 'End radosgw-relation hook.') + log('End radosgw-relation hook.') +@hooks.hook('client-relation-joined') def client_relation(): - utils.juju_log('INFO', 'Begin client-relation hook.') + log('Begin client-relation hook.') if ceph.is_quorum(): - utils.juju_log('INFO', - 'mon cluster in quorum - \ - providing client with keys') - service_name = os.environ['JUJU_REMOTE_UNIT'].split('/')[0] - utils.relation_set(key=ceph.get_named_key(service_name), - auth=utils.config_get('auth-supported')) + log('mon cluster in quorum - providing client with keys') + service_name = remote_unit().split('/')[0] + relation_set(key=ceph.get_named_key(service_name), + auth=config('auth-supported')) else: - utils.juju_log('INFO', - 'mon cluster not in quorum - deferring key provision') + log('mon cluster not in quorum - deferring key provision') - utils.juju_log('INFO', 'End client-relation hook.') + log('End client-relation hook.') +@hooks.hook('upgrade-charm') def upgrade_charm(): - utils.juju_log('INFO', 'Begin upgrade-charm hook.') + log('Begin upgrade-charm hook.') emit_cephconf() - utils.install('xfsprogs') + apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) install_upstart_scripts() update_monfs() - utils.juju_log('INFO', 'End upgrade-charm hook.') + log('End upgrade-charm hook.') +@hooks.hook('start') def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. - subprocess.call(['start', 'ceph-mon-all-starter']) + subprocess.call(['start', 'ceph-mon-all']) ceph.rescan_osd_devices() -utils.do_hooks({ - 'config-changed': config_changed, - 'install': install, - 'mon-relation-departed': mon_relation, - 'mon-relation-joined': mon_relation, - 'osd-relation-joined': osd_relation, - 'radosgw-relation-joined': radosgw_relation, - 'client-relation-joined': client_relation, - 'start': start, - 'upgrade-charm': upgrade_charm, - }) - -sys.exit(0) +try: + hooks.execute(sys.argv) +except UnregisteredHookError as e: + log('Unknown hook {} - skipping.'.format(e)) From 1b0cd6de07653b64482a5ece8497c3ec7c247182 Mon Sep 17 00:00:00 2001 From: James Page Date: Sun, 23 Jun 2013 20:10:07 +0100 Subject: [PATCH 0192/2699] Initial move to charm-helpers --- ceph-mon/.pydevproject | 4 +- ceph-mon/charm-helpers-sync.yaml | 4 + ceph-mon/hooks/ceph.py | 4 +- ceph-mon/hooks/charmhelpers/__init__.py | 0 ceph-mon/hooks/charmhelpers/core/__init__.py | 0 ceph-mon/hooks/charmhelpers/core/hookenv.py | 320 +++++++++++++++++++ ceph-mon/hooks/charmhelpers/core/host.py | 261 +++++++++++++++ ceph-mon/hooks/hooks.py | 247 +++++++------- 8 files changed, 720 insertions(+), 120 deletions(-) create mode 100644 ceph-mon/charm-helpers-sync.yaml create mode 100644 ceph-mon/hooks/charmhelpers/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/core/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/core/hookenv.py create mode 100644 ceph-mon/hooks/charmhelpers/core/host.py diff --git a/ceph-mon/.pydevproject b/ceph-mon/.pydevproject index a61f5fbf..998e0aa1 100644 --- a/ceph-mon/.pydevproject +++ b/ceph-mon/.pydevproject @@ -1,7 +1,5 @@ - - - + python 2.7 Default diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml new file mode 100644 index 00000000..7ee93b78 --- /dev/null +++ b/ceph-mon/charm-helpers-sync.yaml @@ -0,0 +1,4 @@ +branch: lp:charm-helpers +destination: hooks/charmhelpers +include: + - core diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 6502b183..5b107b99 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -10,7 +10,7 @@ import json import subprocess import time -import utils +#import utils import os import apt_pkg as apt @@ -18,6 +18,8 @@ PEON = 'peon' QUORUM = [LEADER, PEON] +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] + def is_quorum(): asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) diff --git a/ceph-mon/hooks/charmhelpers/__init__.py b/ceph-mon/hooks/charmhelpers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/core/__init__.py b/ceph-mon/hooks/charmhelpers/core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py new file mode 100644 index 00000000..e28bfd98 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -0,0 +1,320 @@ +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +import os +import json +import yaml +import subprocess +import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + ''' Cache return values for multiple executions of func + args + + For example: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + ''' + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + res = func(*args, **kwargs) + cache[key] = res + return res + return wrapper + + +def flush(key): + ''' Flushes any entries from function cache where the + key is found in the function+args ''' + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + "Write a message to the juju log" + command = ['juju-log'] + if level: + command += ['-l', level] + command += [message] + subprocess.call(command) + + +class Serializable(UserDict.IterableUserDict): + "Wrapper, an object that can be serialized to yaml or json" + + def __init__(self, obj): + # wrap the object + UserDict.IterableUserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def json(self): + "Serialize the object to json" + return json.dumps(self.data) + + def yaml(self): + "Serialize the object to yaml" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['unit'] = local_unit() + context['rels'] = relations() + context['rel'] = relation_get() + context['env'] = os.environ + return context + + +def in_relation_hook(): + "Determine whether we're running in a relation hook" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + "The scope for the current relation hook" + return os.environ.get('JUJU_RELATION', None) + + +def relation_id(): + "The relation ID for the current relation hook" + return os.environ.get('JUJU_RELATION_ID', None) + + +def local_unit(): + "Local unit ID" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + "The remote unit for the current relation hook" + return os.environ['JUJU_REMOTE_UNIT'] + + +@cached +def config(scope=None): + "Juju charm configuration" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + config_cmd_line.append('--format=json') + try: + return Serializable(json.loads( + subprocess.check_output(config_cmd_line) + )) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return Serializable(json.loads(subprocess.check_output(_args))) + except ValueError: + return None + + +def relation_set(relation_id=None, relation_settings={}, **kwargs): + relation_cmd_line = ['relation-set'] + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + for k, v in relation_settings.items(): + relation_cmd_line.append('{}={}'.format(k, v)) + for k, v in kwargs.items(): + relation_cmd_line.append('{}={}'.format(k, v)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +@cached +def relation_ids(reltype=None): + "A list of relation_ids" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads(subprocess.check_output(relid_cmd_line)) + return [] + + +@cached +def related_units(relid=None): + "A list of related units" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads(subprocess.check_output(units_cmd_line)) + + +@cached +def relation_for_unit(unit=None, rid=None): + "Get the json represenation of a unit's relation" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return Serializable(relation) + + +@cached +def relations_for_id(relid=None): + "Get relations of a specific relation ID" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + "Get relations of a specific type" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def relation_types(): + "Get a list of relation types supported by this charm" + charmdir = os.environ.get('CHARM_DIR', '') + mdf = open(os.path.join(charmdir, 'metadata.yaml')) + md = yaml.safe_load(mdf) + rel_types = [] + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + mdf.close() + return rel_types + + +@cached +def relations(): + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +def open_port(port, protocol="TCP"): + "Open a service network port" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + "Close a service network port" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + + +def unit_private_ip(): + return unit_get('private-address') + + +class UnregisteredHookError(Exception): + pass + + +class Hooks(object): + def __init__(self): + super(Hooks, self).__init__() + self._hooks = {} + + def register(self, name, function): + self._hooks[name] = function + + def execute(self, args): + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + self._hooks[hook_name]() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + return decorated + return wrapper diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py new file mode 100644 index 00000000..cee4ee05 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -0,0 +1,261 @@ +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import apt_pkg +import os +import pwd +import grp +import subprocess +import hashlib + +from hookenv import log, execution_environment + + +def service_start(service_name): + service('start', service_name) + + +def service_stop(service_name): + service('stop', service_name) + + +def service_restart(service_name): + service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False): + if not service('reload', service_name) and restart_on_failure: + service('restart', service_name) + + +def service(action, service_name): + cmd = ['service', service_name, action] + return subprocess.call(cmd) == 0 + + +def adduser(username, password=None, shell='/bin/bash', system_user=False): + """Add a user""" + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = [ + 'gpasswd', '-a', + username, + group + ] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None): + """Replicate the contents of a path""" + context = execution_environment() + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + cmd.extend(options) + cmd.append(from_path.format(**context)) + cmd.append(to_path.format(**context)) + log(" ".join(cmd)) + return subprocess.check_output(cmd).strip() + + +def symlink(source, destination): + """Create a symbolic link""" + context = execution_environment() + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source.format(**context), + destination.format(**context) + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0555, force=False): + """Create a directory""" + context = execution_environment() + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner.format(**context)).pw_uid + gid = grp.getgrnam(group.format(**context)).gr_gid + realpath = os.path.abspath(path) + if os.path.exists(realpath): + if force and not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + + +def write_file(path, fmtstr, owner='root', group='root', perms=0444, **kwargs): + """Create or overwrite a file with the contents of a string""" + context = execution_environment() + context.update(kwargs) + log("Writing file {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner.format(**context)).pw_uid + gid = grp.getgrnam(group.format(**context)).gr_gid + with open(path.format(**context), 'w') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(fmtstr.format(**context)) + + +def render_template_file(source, destination, **kwargs): + """Create or overwrite a file using a template""" + log("Rendering template {} for {}".format(source, + destination)) + context = execution_environment() + with open(source.format(**context), 'r') as template: + write_file(destination.format(**context), template.read(), + **kwargs) + + +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + apt_pkg.init() + cache = apt_pkg.Cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + options = options or [] + cmd = ['apt-get', '-y'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def mount(device, mountpoint, options=None, persist=False): + '''Mount a filesystem''' + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def umount(mountpoint, persist=False): + '''Unmount a filesystem''' + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def mounts(): + '''List of all mounted volumes as [[mountpoint,device],[...]]''' + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def file_hash(path): + ''' Generate a md5 hash of the contents of 'path' or None if not found ''' + if os.path.exists(path): + h = hashlib.md5() + with open(path, 'r') as source: + h.update(source.read()) # IGNORE:E1101 - it does have update + return h.hexdigest() + else: + return None + + +def restart_on_change(restart_map): + ''' Restart services based on configuration files changing + + This function is used a decorator, for example + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + }) + def ceph_client_changed(): + ... + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. + ''' + def wrap(f): + def wrapped_f(*args): + checksums = {} + for path in restart_map: + checksums[path] = file_hash(path) + f(*args) + restarts = [] + for path in restart_map: + if checksums[path] != file_hash(path): + restarts += restart_map[path] + for service_name in list(set(restarts)): + service('restart', service_name) + return wrapped_f + return wrap diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index b74eefb2..d7029625 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -15,7 +15,35 @@ import sys import ceph -import utils +#import utils +from charmhelpers.core.hookenv import ( + log, + ERROR, + config, + relation_ids, + related_units, + relation_get, + relation_set, + remote_unit, + Hooks, + UnregisteredHookError + ) +from charmhelpers.core.host import ( + apt_install, + apt_update, + filter_installed_packages, + mkdir + ) + +from utils import ( + render_template, + configure_source, + get_host_ip, + get_unit_hostname + ) + + +hooks = Hooks() def install_upstart_scripts(): @@ -25,54 +53,55 @@ def install_upstart_scripts(): shutil.copy(x, '/etc/init/') +@hooks.hook('install') def install(): - utils.juju_log('INFO', 'Begin install hook.') - utils.configure_source() - utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs') + log('Begin install hook.') + configure_source() + apt_update(fatal=True) + apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() - utils.juju_log('INFO', 'End install hook.') + log('End install hook.') def emit_cephconf(): cephcontext = { - 'auth_supported': utils.config_get('auth-supported'), + 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), - 'fsid': utils.config_get('fsid'), + 'fsid': config('fsid'), 'version': ceph.get_ceph_version() } with open('/etc/ceph/ceph.conf', 'w') as cephconf: - cephconf.write(utils.render_template('ceph.conf', cephcontext)) + cephconf.write(render_template('ceph.conf', cephcontext)) JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' +@hooks.hook('config-changed') def config_changed(): - utils.juju_log('INFO', 'Begin config-changed hook.') + log('Begin config-changed hook.') - utils.juju_log('INFO', 'Monitor hosts are ' + repr(get_mon_hosts())) + log('Monitor hosts are ' + repr(get_mon_hosts())) # Pre-flight checks - if not utils.config_get('fsid'): - utils.juju_log('CRITICAL', 'No fsid supplied, cannot proceed.') + if not config('fsid'): + log('No fsid supplied, cannot proceed.', level=ERROR) sys.exit(1) - if not utils.config_get('monitor-secret'): - utils.juju_log('CRITICAL', - 'No monitor-secret supplied, cannot proceed.') + if not config('monitor-secret'): + log('No monitor-secret supplied, cannot proceed.', level=ERROR) sys.exit(1) - if utils.config_get('osd-format') not in ceph.DISK_FORMATS: - utils.juju_log('CRITICAL', - 'Invalid OSD disk format configuration specified') + if config('osd-format') not in ceph.DISK_FORMATS: + log('Invalid OSD disk format configuration specified', level=ERROR) sys.exit(1) emit_cephconf() - e_mountpoint = utils.config_get('ephemeral-unmount') + e_mountpoint = config('ephemeral-unmount') if (e_mountpoint and filesystem_mounted(e_mountpoint)): subprocess.call(['umount', e_mountpoint]) - osd_journal = utils.config_get('osd-journal') + osd_journal = config('osd-journal') if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) and os.path.exists(osd_journal)): @@ -80,31 +109,31 @@ def config_changed(): with open(JOURNAL_ZAPPED, 'w') as zapped: zapped.write('DONE') - for dev in utils.config_get('osd-devices').split(' '): + for dev in config('osd-devices').split(' '): osdize(dev) # Support use of single node ceph if (not ceph.is_bootstrapped() and - int(utils.config_get('monitor-count')) == 1): + int(config('monitor-count')) == 1): bootstrap_monitor_cluster() ceph.wait_for_bootstrap() if ceph.is_bootstrapped(): ceph.rescan_osd_devices() - utils.juju_log('INFO', 'End config-changed hook.') + log('End config-changed hook.') def get_mon_hosts(): hosts = [] - hosts.append('{}:6789'.format(utils.get_host_ip())) + hosts.append('{}:6789'.format(get_host_ip())) - for relid in utils.relation_ids('mon'): - for unit in utils.relation_list(relid): + for relid in relation_ids('mon'): + for unit in related_units(relid): hosts.append( - '{}:6789'.format(utils.get_host_ip( - utils.relation_get('private-address', - unit, relid))) + '{}:6789'.format(get_host_ip( + relation_get('private-address', + unit, relid))) ) hosts.sort() @@ -112,7 +141,7 @@ def get_mon_hosts(): def update_monfs(): - hostname = utils.get_unit_hostname() + hostname = get_unit_hostname() monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) upstart = '{}/upstart'.format(monfs) if (os.path.exists(monfs) and @@ -124,20 +153,19 @@ def update_monfs(): def bootstrap_monitor_cluster(): - hostname = utils.get_unit_hostname() + hostname = get_unit_hostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) done = '{}/done'.format(path) upstart = '{}/upstart'.format(path) - secret = utils.config_get('monitor-secret') + secret = config('monitor-secret') keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) if os.path.exists(done): - utils.juju_log('INFO', - 'bootstrap_monitor_cluster: mon already initialized.') + log('bootstrap_monitor_cluster: mon already initialized.') else: # Ceph >= 0.61.3 needs this for ceph-mon fs creation - os.makedirs('/var/run/ceph', mode=0755) - os.makedirs(path) + mkdir('/var/run/ceph', perms=0755) + mkdir(path) # end changes for Ceph >= 0.61.3 try: subprocess.check_call(['ceph-authtool', keyring, @@ -162,7 +190,7 @@ def bootstrap_monitor_cluster(): def reformat_osd(): - if utils.config_get('osd-reformat'): + if config('osd-reformat'): return True else: return False @@ -170,31 +198,28 @@ def reformat_osd(): def osdize(dev): if not os.path.exists(dev): - utils.juju_log('INFO', - 'Path {} does not exist - bailing'.format(dev)) + log('Path {} does not exist - bailing'.format(dev)) return if (ceph.is_osd_disk(dev) and not reformat_osd()): - utils.juju_log('INFO', - 'Looks like {} is already an OSD, skipping.' + log('Looks like {} is already an OSD, skipping.' .format(dev)) return if device_mounted(dev): - utils.juju_log('INFO', - 'Looks like {} is in use, skipping.'.format(dev)) + log('Looks like {} is in use, skipping.'.format(dev)) return cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options if ceph.get_ceph_version() >= "0.48.3": - osd_format = utils.config_get('osd-format') + osd_format = config('osd-format') if osd_format: cmd.append('--fs-type') cmd.append(osd_format) cmd.append(dev) - osd_journal = utils.config_get('osd-journal') + osd_journal = config('osd-journal') if (osd_journal and os.path.exists(osd_journal)): cmd.append(osd_journal) @@ -213,11 +238,13 @@ def filesystem_mounted(fs): return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 +@hooks.hook('mon-relation-departed') +@hooks.hook('mon-relation-joined') def mon_relation(): - utils.juju_log('INFO', 'Begin mon-relation hook.') + log('Begin mon-relation hook.') emit_cephconf() - moncount = int(utils.config_get('monitor-count')) + moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: bootstrap_monitor_cluster() ceph.wait_for_bootstrap() @@ -226,127 +253,115 @@ def mon_relation(): notify_radosgws() notify_client() else: - utils.juju_log('INFO', - 'Not enough mons ({}), punting.'.format( + log('Not enough mons ({}), punting.'.format( len(get_mon_hosts()))) - utils.juju_log('INFO', 'End mon-relation hook.') + log('End mon-relation hook.') def notify_osds(): - utils.juju_log('INFO', 'Begin notify_osds.') + log('Begin notify_osds.') - for relid in utils.relation_ids('osd'): - utils.relation_set(fsid=utils.config_get('fsid'), - osd_bootstrap_key=ceph.get_osd_bootstrap_key(), - auth=utils.config_get('auth-supported'), - rid=relid) + for relid in relation_ids('osd'): + relation_set(relation_id=relid, + fsid=config('fsid'), + osd_bootstrap_key=ceph.get_osd_bootstrap_key(), + auth=config('auth-supported')) - utils.juju_log('INFO', 'End notify_osds.') + log('End notify_osds.') def notify_radosgws(): - utils.juju_log('INFO', 'Begin notify_radosgws.') + log('Begin notify_radosgws.') - for relid in utils.relation_ids('radosgw'): - utils.relation_set(radosgw_key=ceph.get_radosgw_key(), - auth=utils.config_get('auth-supported'), - rid=relid) + for relid in relation_ids('radosgw'): + relation_set(relation_id=relid, + radosgw_key=ceph.get_radosgw_key(), + auth=config('auth-supported')) - utils.juju_log('INFO', 'End notify_radosgws.') + log('End notify_radosgws.') def notify_client(): - utils.juju_log('INFO', 'Begin notify_client.') + log('Begin notify_client.') - for relid in utils.relation_ids('client'): - units = utils.relation_list(relid) + for relid in relation_ids('client'): + units = related_units(relid) if len(units) > 0: service_name = units[0].split('/')[0] - utils.relation_set(key=ceph.get_named_key(service_name), - auth=utils.config_get('auth-supported'), - rid=relid) + relation_set(relation_id=relid, + key=ceph.get_named_key(service_name), + auth=config('auth-supported')) - utils.juju_log('INFO', 'End notify_client.') + log('End notify_client.') +@hooks.hook('osd-relation-joined') def osd_relation(): - utils.juju_log('INFO', 'Begin osd-relation hook.') + log('Begin osd-relation hook.') if ceph.is_quorum(): - utils.juju_log('INFO', - 'mon cluster in quorum - providing fsid & keys') - utils.relation_set(fsid=utils.config_get('fsid'), - osd_bootstrap_key=ceph.get_osd_bootstrap_key(), - auth=utils.config_get('auth-supported')) + log('mon cluster in quorum - providing fsid & keys') + relation_set(fsid=config('fsid'), + osd_bootstrap_key=ceph.get_osd_bootstrap_key(), + auth=config('auth-supported')) else: - utils.juju_log('INFO', - 'mon cluster not in quorum - deferring fsid provision') + log('mon cluster not in quorum - deferring fsid provision') - utils.juju_log('INFO', 'End osd-relation hook.') + log('End osd-relation hook.') +@hooks.hook('radosgw-relation-joined') def radosgw_relation(): - utils.juju_log('INFO', 'Begin radosgw-relation hook.') + log('Begin radosgw-relation hook.') - utils.install('radosgw') # Install radosgw for admin tools + # Install radosgw for admin tools + apt_install(packages=filter_installed_packages(['radosgw'])) if ceph.is_quorum(): - utils.juju_log('INFO', - 'mon cluster in quorum - \ - providing radosgw with keys') - utils.relation_set(radosgw_key=ceph.get_radosgw_key(), - auth=utils.config_get('auth-supported')) + log('mon cluster in quorum - providing radosgw with keys') + relation_set(radosgw_key=ceph.get_radosgw_key(), + auth=config('auth-supported')) else: - utils.juju_log('INFO', - 'mon cluster not in quorum - deferring key provision') + log('mon cluster not in quorum - deferring key provision') - utils.juju_log('INFO', 'End radosgw-relation hook.') + log('End radosgw-relation hook.') +@hooks.hook('client-relation-joined') def client_relation(): - utils.juju_log('INFO', 'Begin client-relation hook.') + log('Begin client-relation hook.') if ceph.is_quorum(): - utils.juju_log('INFO', - 'mon cluster in quorum - \ - providing client with keys') - service_name = os.environ['JUJU_REMOTE_UNIT'].split('/')[0] - utils.relation_set(key=ceph.get_named_key(service_name), - auth=utils.config_get('auth-supported')) + log('mon cluster in quorum - providing client with keys') + service_name = remote_unit().split('/')[0] + relation_set(key=ceph.get_named_key(service_name), + auth=config('auth-supported')) else: - utils.juju_log('INFO', - 'mon cluster not in quorum - deferring key provision') + log('mon cluster not in quorum - deferring key provision') - utils.juju_log('INFO', 'End client-relation hook.') + log('End client-relation hook.') +@hooks.hook('upgrade-charm') def upgrade_charm(): - utils.juju_log('INFO', 'Begin upgrade-charm hook.') + log('Begin upgrade-charm hook.') emit_cephconf() - utils.install('xfsprogs') + apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) install_upstart_scripts() update_monfs() - utils.juju_log('INFO', 'End upgrade-charm hook.') + log('End upgrade-charm hook.') +@hooks.hook('start') def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. - subprocess.call(['start', 'ceph-mon-all-starter']) + subprocess.call(['start', 'ceph-mon-all']) ceph.rescan_osd_devices() -utils.do_hooks({ - 'config-changed': config_changed, - 'install': install, - 'mon-relation-departed': mon_relation, - 'mon-relation-joined': mon_relation, - 'osd-relation-joined': osd_relation, - 'radosgw-relation-joined': radosgw_relation, - 'client-relation-joined': client_relation, - 'start': start, - 'upgrade-charm': upgrade_charm, - }) - -sys.exit(0) +try: + hooks.execute(sys.argv) +except UnregisteredHookError as e: + log('Unknown hook {} - skipping.'.format(e)) From 40e8fd13cd6bd2f57206ca3dc14b5d81139c4b6b Mon Sep 17 00:00:00 2001 From: James Page Date: Sun, 23 Jun 2013 20:24:03 +0100 Subject: [PATCH 0193/2699] Pickup fixes for string handling --- ceph-proxy/charm-helpers-sync.yaml | 2 +- ceph-proxy/hooks/ceph.py | 11 ++++--- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 31 ++++++++++++++----- 3 files changed, 31 insertions(+), 13 deletions(-) diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml index 7ee93b78..e55f90ca 100644 --- a/ceph-proxy/charm-helpers-sync.yaml +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: ../..//charm-helpers/charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 5b107b99..ed58b6ed 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -13,6 +13,9 @@ #import utils import os import apt_pkg as apt +from utils import ( + get_unit_hostname + ) LEADER = 'leader' PEON = 'peon' @@ -22,7 +25,7 @@ def is_quorum(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", @@ -46,7 +49,7 @@ def is_quorum(): def is_leader(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", @@ -75,7 +78,7 @@ def wait_for_quorum(): def add_bootstrap_hint(peer): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", @@ -198,7 +201,7 @@ def get_named_key(name, caps=None): '--name', 'mon.', '--keyring', '/var/lib/ceph/mon/ceph-{}/keyring'.format( - utils.get_unit_hostname() + get_unit_hostname() ), 'auth', 'get-or-create', 'client.{}'.format(name), ] diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index e28bfd98..b28240c6 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -87,6 +87,14 @@ def __getattr__(self, attr): except KeyError: raise AttributeError(attr) + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + def json(self): "Serialize the object to json" return json.dumps(self.data) @@ -142,11 +150,13 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - return Serializable(json.loads( - subprocess.check_output(config_cmd_line) - )) + value = json.loads(subprocess.check_output(config_cmd_line)) except ValueError: return None + if isinstance(value, dict): + return Serializable(value) + else: + return value @cached @@ -159,19 +169,24 @@ def relation_get(attribute=None, unit=None, rid=None): if unit: _args.append(unit) try: - return Serializable(json.loads(subprocess.check_output(_args))) + value = json.loads(subprocess.check_output(_args)) except ValueError: return None + if isinstance(value, dict): + return Serializable(value) + else: + return value def relation_set(relation_id=None, relation_settings={}, **kwargs): relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in relation_settings.items(): - relation_cmd_line.append('{}={}'.format(k, v)) - for k, v in kwargs.items(): - relation_cmd_line.append('{}={}'.format(k, v)) + for k, v in (relation_settings.items() + kwargs.items()): + if v is None: + relation_cmd_line.append('{}='.format(k)) + else: + relation_cmd_line.append('{}={}'.format(k, v)) subprocess.check_call(relation_cmd_line) # Flush cache of any relation-gets for local unit flush(local_unit()) From d256aa0b3678ab0d153358bc16114c74badb7536 Mon Sep 17 00:00:00 2001 From: James Page Date: Sun, 23 Jun 2013 20:24:03 +0100 Subject: [PATCH 0194/2699] Pickup fixes for string handling --- ceph-mon/charm-helpers-sync.yaml | 2 +- ceph-mon/hooks/ceph.py | 11 +++++--- ceph-mon/hooks/charmhelpers/core/hookenv.py | 31 +++++++++++++++------ 3 files changed, 31 insertions(+), 13 deletions(-) diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml index 7ee93b78..e55f90ca 100644 --- a/ceph-mon/charm-helpers-sync.yaml +++ b/ceph-mon/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: ../..//charm-helpers/charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 5b107b99..ed58b6ed 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -13,6 +13,9 @@ #import utils import os import apt_pkg as apt +from utils import ( + get_unit_hostname + ) LEADER = 'leader' PEON = 'peon' @@ -22,7 +25,7 @@ def is_quorum(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", @@ -46,7 +49,7 @@ def is_quorum(): def is_leader(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", @@ -75,7 +78,7 @@ def wait_for_quorum(): def add_bootstrap_hint(peer): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", @@ -198,7 +201,7 @@ def get_named_key(name, caps=None): '--name', 'mon.', '--keyring', '/var/lib/ceph/mon/ceph-{}/keyring'.format( - utils.get_unit_hostname() + get_unit_hostname() ), 'auth', 'get-or-create', 'client.{}'.format(name), ] diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index e28bfd98..b28240c6 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -87,6 +87,14 @@ def __getattr__(self, attr): except KeyError: raise AttributeError(attr) + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + def json(self): "Serialize the object to json" return json.dumps(self.data) @@ -142,11 +150,13 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - return Serializable(json.loads( - subprocess.check_output(config_cmd_line) - )) + value = json.loads(subprocess.check_output(config_cmd_line)) except ValueError: return None + if isinstance(value, dict): + return Serializable(value) + else: + return value @cached @@ -159,19 +169,24 @@ def relation_get(attribute=None, unit=None, rid=None): if unit: _args.append(unit) try: - return Serializable(json.loads(subprocess.check_output(_args))) + value = json.loads(subprocess.check_output(_args)) except ValueError: return None + if isinstance(value, dict): + return Serializable(value) + else: + return value def relation_set(relation_id=None, relation_settings={}, **kwargs): relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in relation_settings.items(): - relation_cmd_line.append('{}={}'.format(k, v)) - for k, v in kwargs.items(): - relation_cmd_line.append('{}={}'.format(k, v)) + for k, v in (relation_settings.items() + kwargs.items()): + if v is None: + relation_cmd_line.append('{}='.format(k)) + else: + relation_cmd_line.append('{}={}'.format(k, v)) subprocess.check_call(relation_cmd_line) # Flush cache of any relation-gets for local unit flush(local_unit()) From a9d891fc59b07fde50854ab01373c3359b955b1f Mon Sep 17 00:00:00 2001 From: James Page Date: Sun, 23 Jun 2013 20:38:19 +0100 Subject: [PATCH 0195/2699] Switch back to main charmhelpers branch --- ceph-proxy/charm-helpers-sync.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml index e55f90ca..7ee93b78 100644 --- a/ceph-proxy/charm-helpers-sync.yaml +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: ../..//charm-helpers/charm-helpers +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core From f4584a03677e9921d65684b1bc58a8592008d1e6 Mon Sep 17 00:00:00 2001 From: James Page Date: Sun, 23 Jun 2013 20:38:19 +0100 Subject: [PATCH 0196/2699] Switch back to main charmhelpers branch --- ceph-mon/charm-helpers-sync.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml index e55f90ca..7ee93b78 100644 --- a/ceph-mon/charm-helpers-sync.yaml +++ b/ceph-mon/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: ../..//charm-helpers/charm-helpers +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core From 2456b826b3c79b31008fcc5947a0616e9f856a15 Mon Sep 17 00:00:00 2001 From: James Page Date: Sun, 23 Jun 2013 20:44:05 +0100 Subject: [PATCH 0197/2699] Minor tidy --- ceph-proxy/hooks/hooks.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index d7029625..d0f29274 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -42,7 +42,6 @@ get_unit_hostname ) - hooks = Hooks() @@ -238,8 +237,8 @@ def filesystem_mounted(fs): return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 -@hooks.hook('mon-relation-departed') -@hooks.hook('mon-relation-joined') +@hooks.hook('mon-relation-departed', + 'mon-relation-joined') def mon_relation(): log('Begin mon-relation hook.') emit_cephconf() @@ -361,7 +360,8 @@ def start(): ceph.rescan_osd_devices() -try: - hooks.execute(sys.argv) -except UnregisteredHookError as e: - log('Unknown hook {} - skipping.'.format(e)) +if __name__ == '__main__': + try: + hooks.execute(sys.argv) + except UnregisteredHookError as e: + log('Unknown hook {} - skipping.'.format(e)) From 542edc591bbe84f8460fb4e71836c06294aa74ef Mon Sep 17 00:00:00 2001 From: James Page Date: Sun, 23 Jun 2013 20:44:05 +0100 Subject: [PATCH 0198/2699] Minor tidy --- ceph-mon/hooks/hooks.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index d7029625..d0f29274 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -42,7 +42,6 @@ get_unit_hostname ) - hooks = Hooks() @@ -238,8 +237,8 @@ def filesystem_mounted(fs): return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 -@hooks.hook('mon-relation-departed') -@hooks.hook('mon-relation-joined') +@hooks.hook('mon-relation-departed', + 'mon-relation-joined') def mon_relation(): log('Begin mon-relation hook.') emit_cephconf() @@ -361,7 +360,8 @@ def start(): ceph.rescan_osd_devices() -try: - hooks.execute(sys.argv) -except UnregisteredHookError as e: - log('Unknown hook {} - skipping.'.format(e)) +if __name__ == '__main__': + try: + hooks.execute(sys.argv) + except UnregisteredHookError as e: + log('Unknown hook {} - skipping.'.format(e)) From d882eb7db754001c1589e884e1bcb11e77f85aaa Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 09:43:24 +0100 Subject: [PATCH 0199/2699] Trim down utils.py to remaining calls --- ceph-proxy/hooks/charmhelpers/core/host.py | 4 +- ceph-proxy/hooks/hooks.py | 2 +- ceph-proxy/hooks/utils.py | 155 ++++----------------- 3 files changed, 28 insertions(+), 133 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index cee4ee05..19951ed2 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -12,6 +12,8 @@ import subprocess import hashlib +from collections import OrderedDict + from hookenv import log, execution_environment @@ -255,7 +257,7 @@ def wrapped_f(*args): for path in restart_map: if checksums[path] != file_hash(path): restarts += restart_map[path] - for service_name in list(set(restarts)): + for service_name in list(OrderedDict.fromkeys(restarts)): service('restart', service_name) return wrapped_f return wrap diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index d0f29274..819219dd 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -55,7 +55,7 @@ def install_upstart_scripts(): @hooks.hook('install') def install(): log('Begin install hook.') - configure_source() + configure_source(config('source')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index f5b17211..48c18716 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -7,47 +7,34 @@ # Paul Collins # -import os import subprocess import socket -import sys import re - - -def do_hooks(hooks): - hook = os.path.basename(sys.argv[0]) - - try: - hook_func = hooks[hook] - except KeyError: - juju_log('INFO', - "This charm doesn't know how to handle '{}'.".format(hook)) - else: - hook_func() - - -def install(*pkgs): - cmd = [ - 'apt-get', - '-y', - 'install' - ] - for pkg in pkgs: - cmd.append(pkg) - subprocess.check_call(cmd) +from charmhelpers.core.hookenv import ( + config, + unit_get, + cached + ) +from charmhelpers.core.host import ( + apt_install, + apt_update, + filter_installed_packages + ) TEMPLATES_DIR = 'templates' try: import jinja2 except ImportError: - install('python-jinja2') + apt_install(filter_installed_packages(['python-jinja2']), + fatal=True) import jinja2 try: import dns.resolver except ImportError: - install('python-dnspython') + apt_install(filter_installed_packages(['python-dnspython']), + fatal=True) import dns.resolver @@ -65,8 +52,7 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): """ -def configure_source(): - source = str(config_get('source')) +def configure_source(source=None): if not source: return if source.startswith('ppa:'): @@ -76,14 +62,15 @@ def configure_source(): ] subprocess.check_call(cmd) if source.startswith('cloud:'): - install('ubuntu-cloud-keyring') - pocket = source.split(':')[1] + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: apt.write(CLOUD_ARCHIVE.format(pocket)) if source.startswith('http:'): with open('/etc/apt/sources.list.d/ceph.list', 'w') as apt: apt.write("deb " + source + "\n") - key = config_get('key') + key = config('key') if key: cmd = [ 'apt-key', @@ -91,11 +78,7 @@ def configure_source(): '--recv-keys', key ] subprocess.check_call(cmd) - cmd = [ - 'apt-get', - 'update' - ] - subprocess.check_call(cmd) + apt_update(fatal=True) def enable_pocket(pocket): @@ -109,105 +92,15 @@ def enable_pocket(pocket): else: sources.write(line) -# Protocols -TCP = 'TCP' -UDP = 'UDP' - - -def expose(port, protocol='TCP'): - cmd = [ - 'open-port', - '{}/{}'.format(port, protocol) - ] - subprocess.check_call(cmd) - - -def juju_log(severity, message): - cmd = [ - 'juju-log', - '--log-level', severity, - message - ] - subprocess.check_call(cmd) - - -def relation_ids(relation): - cmd = [ - 'relation-ids', - relation - ] - return subprocess.check_output(cmd).split() # IGNORE:E1103 - - -def relation_list(rid): - cmd = [ - 'relation-list', - '-r', rid, - ] - return subprocess.check_output(cmd).split() # IGNORE:E1103 - - -def relation_get(attribute, unit=None, rid=None): - cmd = [ - 'relation-get', - ] - if rid: - cmd.append('-r') - cmd.append(rid) - cmd.append(attribute) - if unit: - cmd.append(unit) - value = str(subprocess.check_output(cmd)).strip() - if value == "": - return None - else: - return value - - -def relation_set(**kwargs): - cmd = [ - 'relation-set' - ] - args = [] - for k, v in kwargs.items(): - if k == 'rid': - cmd.append('-r') - cmd.append(v) - else: - args.append('{}={}'.format(k, v)) - cmd += args - subprocess.check_call(cmd) - - -def unit_get(attribute): - cmd = [ - 'unit-get', - attribute - ] - value = str(subprocess.check_output(cmd)).strip() - if value == "": - return None - else: - return value - - -def config_get(attribute): - cmd = [ - 'config-get', - attribute - ] - value = str(subprocess.check_output(cmd)).strip() - if value == "": - return None - else: - return value - +@cached def get_unit_hostname(): return socket.gethostname() -def get_host_ip(hostname=unit_get('private-address')): +@cached +def get_host_ip(hostname=None): + hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address socket.inet_aton(hostname) From 5c6b0c25656832a1c0ca8f6889cb17960e1c8c0d Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 09:43:24 +0100 Subject: [PATCH 0200/2699] Trim down utils.py to remaining calls --- ceph-mon/hooks/charmhelpers/core/host.py | 4 +- ceph-mon/hooks/hooks.py | 2 +- ceph-mon/hooks/utils.py | 155 ++++------------------- 3 files changed, 28 insertions(+), 133 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index cee4ee05..19951ed2 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -12,6 +12,8 @@ import subprocess import hashlib +from collections import OrderedDict + from hookenv import log, execution_environment @@ -255,7 +257,7 @@ def wrapped_f(*args): for path in restart_map: if checksums[path] != file_hash(path): restarts += restart_map[path] - for service_name in list(set(restarts)): + for service_name in list(OrderedDict.fromkeys(restarts)): service('restart', service_name) return wrapped_f return wrap diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index d0f29274..819219dd 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -55,7 +55,7 @@ def install_upstart_scripts(): @hooks.hook('install') def install(): log('Begin install hook.') - configure_source() + configure_source(config('source')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index f5b17211..48c18716 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -7,47 +7,34 @@ # Paul Collins # -import os import subprocess import socket -import sys import re - - -def do_hooks(hooks): - hook = os.path.basename(sys.argv[0]) - - try: - hook_func = hooks[hook] - except KeyError: - juju_log('INFO', - "This charm doesn't know how to handle '{}'.".format(hook)) - else: - hook_func() - - -def install(*pkgs): - cmd = [ - 'apt-get', - '-y', - 'install' - ] - for pkg in pkgs: - cmd.append(pkg) - subprocess.check_call(cmd) +from charmhelpers.core.hookenv import ( + config, + unit_get, + cached + ) +from charmhelpers.core.host import ( + apt_install, + apt_update, + filter_installed_packages + ) TEMPLATES_DIR = 'templates' try: import jinja2 except ImportError: - install('python-jinja2') + apt_install(filter_installed_packages(['python-jinja2']), + fatal=True) import jinja2 try: import dns.resolver except ImportError: - install('python-dnspython') + apt_install(filter_installed_packages(['python-dnspython']), + fatal=True) import dns.resolver @@ -65,8 +52,7 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): """ -def configure_source(): - source = str(config_get('source')) +def configure_source(source=None): if not source: return if source.startswith('ppa:'): @@ -76,14 +62,15 @@ def configure_source(): ] subprocess.check_call(cmd) if source.startswith('cloud:'): - install('ubuntu-cloud-keyring') - pocket = source.split(':')[1] + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: apt.write(CLOUD_ARCHIVE.format(pocket)) if source.startswith('http:'): with open('/etc/apt/sources.list.d/ceph.list', 'w') as apt: apt.write("deb " + source + "\n") - key = config_get('key') + key = config('key') if key: cmd = [ 'apt-key', @@ -91,11 +78,7 @@ def configure_source(): '--recv-keys', key ] subprocess.check_call(cmd) - cmd = [ - 'apt-get', - 'update' - ] - subprocess.check_call(cmd) + apt_update(fatal=True) def enable_pocket(pocket): @@ -109,105 +92,15 @@ def enable_pocket(pocket): else: sources.write(line) -# Protocols -TCP = 'TCP' -UDP = 'UDP' - - -def expose(port, protocol='TCP'): - cmd = [ - 'open-port', - '{}/{}'.format(port, protocol) - ] - subprocess.check_call(cmd) - - -def juju_log(severity, message): - cmd = [ - 'juju-log', - '--log-level', severity, - message - ] - subprocess.check_call(cmd) - - -def relation_ids(relation): - cmd = [ - 'relation-ids', - relation - ] - return subprocess.check_output(cmd).split() # IGNORE:E1103 - - -def relation_list(rid): - cmd = [ - 'relation-list', - '-r', rid, - ] - return subprocess.check_output(cmd).split() # IGNORE:E1103 - - -def relation_get(attribute, unit=None, rid=None): - cmd = [ - 'relation-get', - ] - if rid: - cmd.append('-r') - cmd.append(rid) - cmd.append(attribute) - if unit: - cmd.append(unit) - value = str(subprocess.check_output(cmd)).strip() - if value == "": - return None - else: - return value - - -def relation_set(**kwargs): - cmd = [ - 'relation-set' - ] - args = [] - for k, v in kwargs.items(): - if k == 'rid': - cmd.append('-r') - cmd.append(v) - else: - args.append('{}={}'.format(k, v)) - cmd += args - subprocess.check_call(cmd) - - -def unit_get(attribute): - cmd = [ - 'unit-get', - attribute - ] - value = str(subprocess.check_output(cmd)).strip() - if value == "": - return None - else: - return value - - -def config_get(attribute): - cmd = [ - 'config-get', - attribute - ] - value = str(subprocess.check_output(cmd)).strip() - if value == "": - return None - else: - return value - +@cached def get_unit_hostname(): return socket.gethostname() -def get_host_ip(hostname=unit_get('private-address')): +@cached +def get_host_ip(hostname=None): + hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address socket.inet_aton(hostname) From 657e8be58071f293d8d3c59e9cb8d4eca87a3e04 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 09:44:02 +0100 Subject: [PATCH 0201/2699] Remove commented out import --- ceph-proxy/hooks/ceph.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index ed58b6ed..d9f1f4dd 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -10,7 +10,6 @@ import json import subprocess import time -#import utils import os import apt_pkg as apt from utils import ( From ab6e640859be8caecc597ebf0652efc7fceb0d26 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 09:44:02 +0100 Subject: [PATCH 0202/2699] Remove commented out import --- ceph-mon/hooks/ceph.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index ed58b6ed..d9f1f4dd 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -10,7 +10,6 @@ import json import subprocess import time -#import utils import os import apt_pkg as apt from utils import ( From be35c0420d4a50fc6842694b60c7770979dd736c Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 10:13:52 +0100 Subject: [PATCH 0203/2699] Add flake8 and charm proof target for Make --- ceph-proxy/Makefile | 5 ++ ceph-proxy/hooks/ceph.py | 127 +++++++++++++++++++++++----- ceph-proxy/hooks/hooks.py | 172 +++++++++----------------------------- ceph-proxy/hooks/utils.py | 14 ++-- ceph-proxy/metadata.yaml | 3 +- 5 files changed, 158 insertions(+), 163 deletions(-) create mode 100644 ceph-proxy/Makefile diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile new file mode 100644 index 00000000..1e9ee8da --- /dev/null +++ b/ceph-proxy/Makefile @@ -0,0 +1,5 @@ +#!/usr/bin/make + +lint: + flake8 --exclude hooks/charmhelpers hooks + charm proof diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index d9f1f4dd..ec6fb011 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -12,9 +12,14 @@ import time import os import apt_pkg as apt +from charmhelpers.core.host import ( + mkdir, + service_start, + log +) from utils import ( - get_unit_hostname - ) + get_unit_hostname +) LEADER = 'leader' PEON = 'peon' @@ -30,7 +35,7 @@ def is_quorum(): "--admin-daemon", asok, "mon_status" - ] + ] if os.path.exists(asok): try: result = json.loads(subprocess.check_output(cmd)) @@ -54,7 +59,7 @@ def is_leader(): "--admin-daemon", asok, "mon_status" - ] + ] if os.path.exists(asok): try: result = json.loads(subprocess.check_output(cmd)) @@ -84,7 +89,7 @@ def add_bootstrap_hint(peer): asok, "add_bootstrap_peer_hint", peer - ] + ] if os.path.exists(asok): # Ignore any errors for this call subprocess.call(cmd) @@ -93,7 +98,7 @@ def add_bootstrap_hint(peer): 'xfs', 'ext4', 'btrfs' - ] +] def is_osd_disk(dev): @@ -103,7 +108,7 @@ def is_osd_disk(dev): for line in info: if line.startswith( 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' - ): + ): return True except subprocess.CalledProcessError: pass @@ -114,7 +119,7 @@ def rescan_osd_devices(): cmd = [ 'udevadm', 'trigger', '--subsystem-match=block', '--action=add' - ] + ] subprocess.call(cmd) @@ -144,7 +149,7 @@ def import_osd_bootstrap_key(key): '--create-keyring', '--name=client.bootstrap-osd', '--add-key={}'.format(key) - ] + ] subprocess.check_call(cmd) # OSD caps taken from ceph-create-keys @@ -152,10 +157,10 @@ def import_osd_bootstrap_key(key): 'mon': [ 'allow command osd create ...', 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', + r'allow command auth add * osd allow\ * mon allow\ rwx', 'allow command mon getmap' - ] - } + ] +} def get_osd_bootstrap_key(): @@ -173,14 +178,14 @@ def import_radosgw_key(key): '--create-keyring', '--name=client.radosgw.gateway', '--add-key={}'.format(key) - ] + ] subprocess.check_call(cmd) # OSD caps taken from ceph-create-keys _radosgw_caps = { 'mon': ['allow r'], 'osd': ['allow rwx'] - } +} def get_radosgw_key(): @@ -190,7 +195,7 @@ def get_radosgw_key(): _default_caps = { 'mon': ['allow r'], 'osd': ['allow rwx'] - } +} def get_named_key(name, caps=None): @@ -200,16 +205,16 @@ def get_named_key(name, caps=None): '--name', 'mon.', '--keyring', '/var/lib/ceph/mon/ceph-{}/keyring'.format( - get_unit_hostname() - ), + get_unit_hostname() + ), 'auth', 'get-or-create', 'client.{}'.format(name), - ] + ] # Add capabilities for subsystem, subcaps in caps.iteritems(): cmd.extend([ subsystem, '; '.join(subcaps), - ]) + ]) output = subprocess.check_output(cmd).strip() # IGNORE:E1103 # get-or-create appears to have different output depending # on whether its 'get' or 'create' @@ -225,6 +230,42 @@ def get_named_key(name, caps=None): return key +def bootstrap_monitor_cluster(secret): + hostname = get_unit_hostname() + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + done = '{}/done'.format(path) + upstart = '{}/upstart'.format(path) + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) + + if os.path.exists(done): + log('bootstrap_monitor_cluster: mon already initialized.') + else: + # Ceph >= 0.61.3 needs this for ceph-mon fs creation + mkdir('/var/run/ceph', perms=0755) + mkdir(path) + # end changes for Ceph >= 0.61.3 + try: + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring', '--name=mon.', + '--add-key={}'.format(secret), + '--cap', 'mon', 'allow *']) + + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + + with open(done, 'w'): + pass + with open(upstart, 'w'): + pass + + service_start('ceph-mon-all') + except: + raise + finally: + os.unlink(keyring) + + def get_ceph_version(): apt.init() cache = apt.Cache() @@ -237,3 +278,51 @@ def get_ceph_version(): def version_compare(a, b): return apt.version_compare(a, b) + + +def update_monfs(): + hostname = get_unit_hostname() + monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + upstart = '{}/upstart'.format(monfs) + if os.path.exists(monfs) and not os.path.exists(upstart): + # Mark mon as managed by upstart so that + # it gets start correctly on reboots + with open(upstart, 'w'): + pass + + +def osdize(dev, osd_format, osd_journal, reformat_osd=False): + if not os.path.exists(dev): + log('Path {} does not exist - bailing'.format(dev)) + return + + if (is_osd_disk(dev) and not reformat_osd): + log('Looks like {} is already an OSD, skipping.'.format(dev)) + return + + if device_mounted(dev): + log('Looks like {} is in use, skipping.'.format(dev)) + return + + cmd = ['ceph-disk-prepare'] + # Later versions of ceph support more options + if get_ceph_version() >= "0.48.3": + if osd_format: + cmd.append('--fs-type') + cmd.append(osd_format) + cmd.append(dev) + if osd_journal and os.path.exists(osd_journal): + cmd.append(osd_journal) + else: + # Just provide the device - no other options + # for older versions of ceph + cmd.append(dev) + subprocess.call(cmd) + + +def device_mounted(dev): + return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 + + +def filesystem_mounted(fs): + return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 819219dd..75ced03d 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -10,37 +10,36 @@ import glob import os -import subprocess import shutil import sys import ceph #import utils from charmhelpers.core.hookenv import ( - log, - ERROR, - config, - relation_ids, - related_units, - relation_get, - relation_set, - remote_unit, - Hooks, - UnregisteredHookError - ) + log, + ERROR, + config, + relation_ids, + related_units, + relation_get, + relation_set, + remote_unit, + Hooks, + UnregisteredHookError +) from charmhelpers.core.host import ( - apt_install, - apt_update, - filter_installed_packages, - mkdir - ) + apt_install, + apt_update, + filter_installed_packages, + service_start, + umount +) from utils import ( - render_template, - configure_source, - get_host_ip, - get_unit_hostname - ) + render_template, + configure_source, + get_host_ip, +) hooks = Hooks() @@ -68,7 +67,7 @@ def emit_cephconf(): 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': config('fsid'), 'version': ceph.get_ceph_version() - } + } with open('/etc/ceph/ceph.conf', 'w') as cephconf: cephconf.write(render_template('ceph.conf', cephcontext)) @@ -96,25 +95,23 @@ def config_changed(): emit_cephconf() e_mountpoint = config('ephemeral-unmount') - if (e_mountpoint and - filesystem_mounted(e_mountpoint)): - subprocess.call(['umount', e_mountpoint]) + if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): + umount(e_mountpoint) osd_journal = config('osd-journal') - if (osd_journal and - not os.path.exists(JOURNAL_ZAPPED) and - os.path.exists(osd_journal)): + if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) + and os.path.exists(osd_journal)): ceph.zap_disk(osd_journal) with open(JOURNAL_ZAPPED, 'w') as zapped: zapped.write('DONE') for dev in config('osd-devices').split(' '): - osdize(dev) + ceph.osdize(dev, config('osd-format'), config('osd-journal'), + reformat_osd()) # Support use of single node ceph - if (not ceph.is_bootstrapped() and - int(config('monitor-count')) == 1): - bootstrap_monitor_cluster() + if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1): + ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() if ceph.is_bootstrapped(): @@ -130,64 +127,14 @@ def get_mon_hosts(): for relid in relation_ids('mon'): for unit in related_units(relid): hosts.append( - '{}:6789'.format(get_host_ip( - relation_get('private-address', - unit, relid))) - ) + '{}:6789'.format(get_host_ip(relation_get('private-address', + unit, relid))) + ) hosts.sort() return hosts -def update_monfs(): - hostname = get_unit_hostname() - monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - upstart = '{}/upstart'.format(monfs) - if (os.path.exists(monfs) and - not os.path.exists(upstart)): - # Mark mon as managed by upstart so that - # it gets start correctly on reboots - with open(upstart, 'w'): - pass - - -def bootstrap_monitor_cluster(): - hostname = get_unit_hostname() - path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - done = '{}/done'.format(path) - upstart = '{}/upstart'.format(path) - secret = config('monitor-secret') - keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) - - if os.path.exists(done): - log('bootstrap_monitor_cluster: mon already initialized.') - else: - # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', perms=0755) - mkdir(path) - # end changes for Ceph >= 0.61.3 - try: - subprocess.check_call(['ceph-authtool', keyring, - '--create-keyring', '--name=mon.', - '--add-key={}'.format(secret), - '--cap', 'mon', 'allow *']) - - subprocess.check_call(['ceph-mon', '--mkfs', - '-i', hostname, - '--keyring', keyring]) - - with open(done, 'w'): - pass - with open(upstart, 'w'): - pass - - subprocess.check_call(['start', 'ceph-mon-all-starter']) - except: - raise - finally: - os.unlink(keyring) - - def reformat_osd(): if config('osd-reformat'): return True @@ -195,48 +142,6 @@ def reformat_osd(): return False -def osdize(dev): - if not os.path.exists(dev): - log('Path {} does not exist - bailing'.format(dev)) - return - - if (ceph.is_osd_disk(dev) and not - reformat_osd()): - log('Looks like {} is already an OSD, skipping.' - .format(dev)) - return - - if device_mounted(dev): - log('Looks like {} is in use, skipping.'.format(dev)) - return - - cmd = ['ceph-disk-prepare'] - # Later versions of ceph support more options - if ceph.get_ceph_version() >= "0.48.3": - osd_format = config('osd-format') - if osd_format: - cmd.append('--fs-type') - cmd.append(osd_format) - cmd.append(dev) - osd_journal = config('osd-journal') - if (osd_journal and - os.path.exists(osd_journal)): - cmd.append(osd_journal) - else: - # Just provide the device - no other options - # for older versions of ceph - cmd.append(dev) - subprocess.call(cmd) - - -def device_mounted(dev): - return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 - - -def filesystem_mounted(fs): - return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 - - @hooks.hook('mon-relation-departed', 'mon-relation-joined') def mon_relation(): @@ -245,15 +150,15 @@ def mon_relation(): moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: - bootstrap_monitor_cluster() + ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() ceph.rescan_osd_devices() notify_osds() notify_radosgws() notify_client() else: - log('Not enough mons ({}), punting.'.format( - len(get_mon_hosts()))) + log('Not enough mons ({}), punting.' + .format(len(get_mon_hosts()))) log('End mon-relation hook.') @@ -316,7 +221,6 @@ def radosgw_relation(): # Install radosgw for admin tools apt_install(packages=filter_installed_packages(['radosgw'])) - if ceph.is_quorum(): log('mon cluster in quorum - providing radosgw with keys') relation_set(radosgw_key=ceph.get_radosgw_key(), @@ -348,7 +252,7 @@ def upgrade_charm(): emit_cephconf() apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) install_upstart_scripts() - update_monfs() + ceph.update_monfs() log('End upgrade-charm hook.') @@ -356,7 +260,7 @@ def upgrade_charm(): def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. - subprocess.call(['start', 'ceph-mon-all']) + service_start('ceph-mon-all') ceph.rescan_osd_devices() diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 48c18716..8069bbec 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -14,12 +14,12 @@ config, unit_get, cached - ) +) from charmhelpers.core.host import ( apt_install, apt_update, filter_installed_packages - ) +) TEMPLATES_DIR = 'templates' @@ -40,14 +40,12 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): templates = jinja2.Environment( - loader=jinja2.FileSystemLoader(template_dir) - ) + loader=jinja2.FileSystemLoader(template_dir)) template = templates.get_template(template_name) return template.render(context) -CLOUD_ARCHIVE = \ -""" # Ubuntu Cloud Archive +CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ @@ -59,7 +57,7 @@ def configure_source(source=None): cmd = [ 'add-apt-repository', source - ] + ] subprocess.check_call(cmd) if source.startswith('cloud:'): apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), @@ -76,7 +74,7 @@ def configure_source(source=None): 'apt-key', 'adv', '--keyserver keyserver.ubuntu.com', '--recv-keys', key - ] + ] subprocess.check_call(cmd) apt_update(fatal=True) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 0d84f430..c62ea5c8 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -1,7 +1,6 @@ name: ceph summary: Highly scalable distributed storage -maintainer: James Page , - Paul Collins +maintainer: James Page description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. From 4d4592bdae5115db102af7f1e3cabadadeb3a349 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 10:13:52 +0100 Subject: [PATCH 0204/2699] Add flake8 and charm proof target for Make --- ceph-mon/Makefile | 5 ++ ceph-mon/hooks/ceph.py | 127 ++++++++++++++++++++++++----- ceph-mon/hooks/hooks.py | 172 +++++++++------------------------------- ceph-mon/hooks/utils.py | 14 ++-- ceph-mon/metadata.yaml | 3 +- 5 files changed, 158 insertions(+), 163 deletions(-) create mode 100644 ceph-mon/Makefile diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile new file mode 100644 index 00000000..1e9ee8da --- /dev/null +++ b/ceph-mon/Makefile @@ -0,0 +1,5 @@ +#!/usr/bin/make + +lint: + flake8 --exclude hooks/charmhelpers hooks + charm proof diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index d9f1f4dd..ec6fb011 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -12,9 +12,14 @@ import time import os import apt_pkg as apt +from charmhelpers.core.host import ( + mkdir, + service_start, + log +) from utils import ( - get_unit_hostname - ) + get_unit_hostname +) LEADER = 'leader' PEON = 'peon' @@ -30,7 +35,7 @@ def is_quorum(): "--admin-daemon", asok, "mon_status" - ] + ] if os.path.exists(asok): try: result = json.loads(subprocess.check_output(cmd)) @@ -54,7 +59,7 @@ def is_leader(): "--admin-daemon", asok, "mon_status" - ] + ] if os.path.exists(asok): try: result = json.loads(subprocess.check_output(cmd)) @@ -84,7 +89,7 @@ def add_bootstrap_hint(peer): asok, "add_bootstrap_peer_hint", peer - ] + ] if os.path.exists(asok): # Ignore any errors for this call subprocess.call(cmd) @@ -93,7 +98,7 @@ def add_bootstrap_hint(peer): 'xfs', 'ext4', 'btrfs' - ] +] def is_osd_disk(dev): @@ -103,7 +108,7 @@ def is_osd_disk(dev): for line in info: if line.startswith( 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' - ): + ): return True except subprocess.CalledProcessError: pass @@ -114,7 +119,7 @@ def rescan_osd_devices(): cmd = [ 'udevadm', 'trigger', '--subsystem-match=block', '--action=add' - ] + ] subprocess.call(cmd) @@ -144,7 +149,7 @@ def import_osd_bootstrap_key(key): '--create-keyring', '--name=client.bootstrap-osd', '--add-key={}'.format(key) - ] + ] subprocess.check_call(cmd) # OSD caps taken from ceph-create-keys @@ -152,10 +157,10 @@ def import_osd_bootstrap_key(key): 'mon': [ 'allow command osd create ...', 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', + r'allow command auth add * osd allow\ * mon allow\ rwx', 'allow command mon getmap' - ] - } + ] +} def get_osd_bootstrap_key(): @@ -173,14 +178,14 @@ def import_radosgw_key(key): '--create-keyring', '--name=client.radosgw.gateway', '--add-key={}'.format(key) - ] + ] subprocess.check_call(cmd) # OSD caps taken from ceph-create-keys _radosgw_caps = { 'mon': ['allow r'], 'osd': ['allow rwx'] - } +} def get_radosgw_key(): @@ -190,7 +195,7 @@ def get_radosgw_key(): _default_caps = { 'mon': ['allow r'], 'osd': ['allow rwx'] - } +} def get_named_key(name, caps=None): @@ -200,16 +205,16 @@ def get_named_key(name, caps=None): '--name', 'mon.', '--keyring', '/var/lib/ceph/mon/ceph-{}/keyring'.format( - get_unit_hostname() - ), + get_unit_hostname() + ), 'auth', 'get-or-create', 'client.{}'.format(name), - ] + ] # Add capabilities for subsystem, subcaps in caps.iteritems(): cmd.extend([ subsystem, '; '.join(subcaps), - ]) + ]) output = subprocess.check_output(cmd).strip() # IGNORE:E1103 # get-or-create appears to have different output depending # on whether its 'get' or 'create' @@ -225,6 +230,42 @@ def get_named_key(name, caps=None): return key +def bootstrap_monitor_cluster(secret): + hostname = get_unit_hostname() + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + done = '{}/done'.format(path) + upstart = '{}/upstart'.format(path) + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) + + if os.path.exists(done): + log('bootstrap_monitor_cluster: mon already initialized.') + else: + # Ceph >= 0.61.3 needs this for ceph-mon fs creation + mkdir('/var/run/ceph', perms=0755) + mkdir(path) + # end changes for Ceph >= 0.61.3 + try: + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring', '--name=mon.', + '--add-key={}'.format(secret), + '--cap', 'mon', 'allow *']) + + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + + with open(done, 'w'): + pass + with open(upstart, 'w'): + pass + + service_start('ceph-mon-all') + except: + raise + finally: + os.unlink(keyring) + + def get_ceph_version(): apt.init() cache = apt.Cache() @@ -237,3 +278,51 @@ def get_ceph_version(): def version_compare(a, b): return apt.version_compare(a, b) + + +def update_monfs(): + hostname = get_unit_hostname() + monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + upstart = '{}/upstart'.format(monfs) + if os.path.exists(monfs) and not os.path.exists(upstart): + # Mark mon as managed by upstart so that + # it gets start correctly on reboots + with open(upstart, 'w'): + pass + + +def osdize(dev, osd_format, osd_journal, reformat_osd=False): + if not os.path.exists(dev): + log('Path {} does not exist - bailing'.format(dev)) + return + + if (is_osd_disk(dev) and not reformat_osd): + log('Looks like {} is already an OSD, skipping.'.format(dev)) + return + + if device_mounted(dev): + log('Looks like {} is in use, skipping.'.format(dev)) + return + + cmd = ['ceph-disk-prepare'] + # Later versions of ceph support more options + if get_ceph_version() >= "0.48.3": + if osd_format: + cmd.append('--fs-type') + cmd.append(osd_format) + cmd.append(dev) + if osd_journal and os.path.exists(osd_journal): + cmd.append(osd_journal) + else: + # Just provide the device - no other options + # for older versions of ceph + cmd.append(dev) + subprocess.call(cmd) + + +def device_mounted(dev): + return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 + + +def filesystem_mounted(fs): + return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 819219dd..75ced03d 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -10,37 +10,36 @@ import glob import os -import subprocess import shutil import sys import ceph #import utils from charmhelpers.core.hookenv import ( - log, - ERROR, - config, - relation_ids, - related_units, - relation_get, - relation_set, - remote_unit, - Hooks, - UnregisteredHookError - ) + log, + ERROR, + config, + relation_ids, + related_units, + relation_get, + relation_set, + remote_unit, + Hooks, + UnregisteredHookError +) from charmhelpers.core.host import ( - apt_install, - apt_update, - filter_installed_packages, - mkdir - ) + apt_install, + apt_update, + filter_installed_packages, + service_start, + umount +) from utils import ( - render_template, - configure_source, - get_host_ip, - get_unit_hostname - ) + render_template, + configure_source, + get_host_ip, +) hooks = Hooks() @@ -68,7 +67,7 @@ def emit_cephconf(): 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': config('fsid'), 'version': ceph.get_ceph_version() - } + } with open('/etc/ceph/ceph.conf', 'w') as cephconf: cephconf.write(render_template('ceph.conf', cephcontext)) @@ -96,25 +95,23 @@ def config_changed(): emit_cephconf() e_mountpoint = config('ephemeral-unmount') - if (e_mountpoint and - filesystem_mounted(e_mountpoint)): - subprocess.call(['umount', e_mountpoint]) + if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): + umount(e_mountpoint) osd_journal = config('osd-journal') - if (osd_journal and - not os.path.exists(JOURNAL_ZAPPED) and - os.path.exists(osd_journal)): + if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) + and os.path.exists(osd_journal)): ceph.zap_disk(osd_journal) with open(JOURNAL_ZAPPED, 'w') as zapped: zapped.write('DONE') for dev in config('osd-devices').split(' '): - osdize(dev) + ceph.osdize(dev, config('osd-format'), config('osd-journal'), + reformat_osd()) # Support use of single node ceph - if (not ceph.is_bootstrapped() and - int(config('monitor-count')) == 1): - bootstrap_monitor_cluster() + if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1): + ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() if ceph.is_bootstrapped(): @@ -130,64 +127,14 @@ def get_mon_hosts(): for relid in relation_ids('mon'): for unit in related_units(relid): hosts.append( - '{}:6789'.format(get_host_ip( - relation_get('private-address', - unit, relid))) - ) + '{}:6789'.format(get_host_ip(relation_get('private-address', + unit, relid))) + ) hosts.sort() return hosts -def update_monfs(): - hostname = get_unit_hostname() - monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - upstart = '{}/upstart'.format(monfs) - if (os.path.exists(monfs) and - not os.path.exists(upstart)): - # Mark mon as managed by upstart so that - # it gets start correctly on reboots - with open(upstart, 'w'): - pass - - -def bootstrap_monitor_cluster(): - hostname = get_unit_hostname() - path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - done = '{}/done'.format(path) - upstart = '{}/upstart'.format(path) - secret = config('monitor-secret') - keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) - - if os.path.exists(done): - log('bootstrap_monitor_cluster: mon already initialized.') - else: - # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', perms=0755) - mkdir(path) - # end changes for Ceph >= 0.61.3 - try: - subprocess.check_call(['ceph-authtool', keyring, - '--create-keyring', '--name=mon.', - '--add-key={}'.format(secret), - '--cap', 'mon', 'allow *']) - - subprocess.check_call(['ceph-mon', '--mkfs', - '-i', hostname, - '--keyring', keyring]) - - with open(done, 'w'): - pass - with open(upstart, 'w'): - pass - - subprocess.check_call(['start', 'ceph-mon-all-starter']) - except: - raise - finally: - os.unlink(keyring) - - def reformat_osd(): if config('osd-reformat'): return True @@ -195,48 +142,6 @@ def reformat_osd(): return False -def osdize(dev): - if not os.path.exists(dev): - log('Path {} does not exist - bailing'.format(dev)) - return - - if (ceph.is_osd_disk(dev) and not - reformat_osd()): - log('Looks like {} is already an OSD, skipping.' - .format(dev)) - return - - if device_mounted(dev): - log('Looks like {} is in use, skipping.'.format(dev)) - return - - cmd = ['ceph-disk-prepare'] - # Later versions of ceph support more options - if ceph.get_ceph_version() >= "0.48.3": - osd_format = config('osd-format') - if osd_format: - cmd.append('--fs-type') - cmd.append(osd_format) - cmd.append(dev) - osd_journal = config('osd-journal') - if (osd_journal and - os.path.exists(osd_journal)): - cmd.append(osd_journal) - else: - # Just provide the device - no other options - # for older versions of ceph - cmd.append(dev) - subprocess.call(cmd) - - -def device_mounted(dev): - return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 - - -def filesystem_mounted(fs): - return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 - - @hooks.hook('mon-relation-departed', 'mon-relation-joined') def mon_relation(): @@ -245,15 +150,15 @@ def mon_relation(): moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: - bootstrap_monitor_cluster() + ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() ceph.rescan_osd_devices() notify_osds() notify_radosgws() notify_client() else: - log('Not enough mons ({}), punting.'.format( - len(get_mon_hosts()))) + log('Not enough mons ({}), punting.' + .format(len(get_mon_hosts()))) log('End mon-relation hook.') @@ -316,7 +221,6 @@ def radosgw_relation(): # Install radosgw for admin tools apt_install(packages=filter_installed_packages(['radosgw'])) - if ceph.is_quorum(): log('mon cluster in quorum - providing radosgw with keys') relation_set(radosgw_key=ceph.get_radosgw_key(), @@ -348,7 +252,7 @@ def upgrade_charm(): emit_cephconf() apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) install_upstart_scripts() - update_monfs() + ceph.update_monfs() log('End upgrade-charm hook.') @@ -356,7 +260,7 @@ def upgrade_charm(): def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. - subprocess.call(['start', 'ceph-mon-all']) + service_start('ceph-mon-all') ceph.rescan_osd_devices() diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 48c18716..8069bbec 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -14,12 +14,12 @@ config, unit_get, cached - ) +) from charmhelpers.core.host import ( apt_install, apt_update, filter_installed_packages - ) +) TEMPLATES_DIR = 'templates' @@ -40,14 +40,12 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): templates = jinja2.Environment( - loader=jinja2.FileSystemLoader(template_dir) - ) + loader=jinja2.FileSystemLoader(template_dir)) template = templates.get_template(template_name) return template.render(context) -CLOUD_ARCHIVE = \ -""" # Ubuntu Cloud Archive +CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ @@ -59,7 +57,7 @@ def configure_source(source=None): cmd = [ 'add-apt-repository', source - ] + ] subprocess.check_call(cmd) if source.startswith('cloud:'): apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), @@ -76,7 +74,7 @@ def configure_source(source=None): 'apt-key', 'adv', '--keyserver keyserver.ubuntu.com', '--recv-keys', key - ] + ] subprocess.check_call(cmd) apt_update(fatal=True) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 0d84f430..c62ea5c8 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -1,7 +1,6 @@ name: ceph summary: Highly scalable distributed storage -maintainer: James Page , - Paul Collins +maintainer: James Page description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. From 6b9ac00c3d9e8a5f4b583a94a2042d02ac7b861d Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 10:14:42 +0100 Subject: [PATCH 0205/2699] Add sync target for charm-helpers --- ceph-proxy/Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index 1e9ee8da..9e8e5c0f 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -3,3 +3,6 @@ lint: flake8 --exclude hooks/charmhelpers hooks charm proof + +sync: + charm-helper-sync -c charm-helpers-sync.yaml From 467cdd7239498b323fdecf7f8078a0592badf7a6 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 10:14:42 +0100 Subject: [PATCH 0206/2699] Add sync target for charm-helpers --- ceph-mon/Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index 1e9ee8da..9e8e5c0f 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -3,3 +3,6 @@ lint: flake8 --exclude hooks/charmhelpers hooks charm proof + +sync: + charm-helper-sync -c charm-helpers-sync.yaml From ae1116a2c33aad3346719b6d7f5d46af3e7f7bd1 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 10:15:45 +0100 Subject: [PATCH 0207/2699] General tidy of README --- ceph-proxy/README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/ceph-proxy/README.md b/ceph-proxy/README.md index 7d5d3de7..89ea5807 100644 --- a/ceph-proxy/README.md +++ b/ceph-proxy/README.md @@ -15,28 +15,28 @@ are provided: fsid: uuid specific to a ceph cluster used to ensure that different clusters don't get mixed up - use `uuid` to generate one. - + monitor-secret: a ceph generated key used by the daemons that manage to cluster to control security. You can use the ceph-authtool command to generate one: - + ceph-authtool /dev/stdout --name=mon. --gen-key - + These two pieces of configuration must NOT be changed post bootstrap; attempting todo this will cause a reconfiguration error and new service units will not join the existing ceph cluster. - + The charm also supports specification of the storage devices to use in the ceph cluster. osd-devices: A list of devices that the charm will attempt to detect, initialise and activate as ceph storage. - + This this can be a superset of the actual storage devices presented to each service unit and can be changed post ceph bootstrap using `juju set`. - + At a minimum you must provide a juju config file during initial deployment with the fsid and monitor-secret options (contents of cepy.yaml below): @@ -44,7 +44,7 @@ with the fsid and monitor-secret options (contents of cepy.yaml below): fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde - + Specifying the osd-devices to use is also a good idea. Boot things up by using: @@ -62,7 +62,7 @@ Author: Paul Collins , James Page Report bugs at: http://bugs.launchpad.net/charms/+source/ceph/+filebug Location: http://jujucharms.com/charms/ceph - + Technical Bootnotes =================== @@ -89,4 +89,4 @@ all OSDs run on nodes that also run mon, we don't need this and did not implement it. See http://ceph.com/docs/master/dev/mon-bootstrap/ for more information on Ceph -monitor cluster deployment strategies and pitfalls. +monitor cluster deployment strategies and pitfalls. From 5fcabe95b5cb652238eccf34519e8376b6c198f3 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 10:15:45 +0100 Subject: [PATCH 0208/2699] General tidy of README --- ceph-mon/README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 7d5d3de7..89ea5807 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -15,28 +15,28 @@ are provided: fsid: uuid specific to a ceph cluster used to ensure that different clusters don't get mixed up - use `uuid` to generate one. - + monitor-secret: a ceph generated key used by the daemons that manage to cluster to control security. You can use the ceph-authtool command to generate one: - + ceph-authtool /dev/stdout --name=mon. --gen-key - + These two pieces of configuration must NOT be changed post bootstrap; attempting todo this will cause a reconfiguration error and new service units will not join the existing ceph cluster. - + The charm also supports specification of the storage devices to use in the ceph cluster. osd-devices: A list of devices that the charm will attempt to detect, initialise and activate as ceph storage. - + This this can be a superset of the actual storage devices presented to each service unit and can be changed post ceph bootstrap using `juju set`. - + At a minimum you must provide a juju config file during initial deployment with the fsid and monitor-secret options (contents of cepy.yaml below): @@ -44,7 +44,7 @@ with the fsid and monitor-secret options (contents of cepy.yaml below): fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde - + Specifying the osd-devices to use is also a good idea. Boot things up by using: @@ -62,7 +62,7 @@ Author: Paul Collins , James Page Report bugs at: http://bugs.launchpad.net/charms/+source/ceph/+filebug Location: http://jujucharms.com/charms/ceph - + Technical Bootnotes =================== @@ -89,4 +89,4 @@ all OSDs run on nodes that also run mon, we don't need this and did not implement it. See http://ceph.com/docs/master/dev/mon-bootstrap/ for more information on Ceph -monitor cluster deployment strategies and pitfalls. +monitor cluster deployment strategies and pitfalls. From 279b93397510360892957bc2b58e9c1a5f2d3802 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 10:24:12 +0100 Subject: [PATCH 0209/2699] Switch to using restart for ceph-mon-all --- ceph-proxy/hooks/ceph.py | 4 ++-- ceph-proxy/hooks/hooks.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index ec6fb011..8e239de3 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -14,7 +14,7 @@ import apt_pkg as apt from charmhelpers.core.host import ( mkdir, - service_start, + service_restart, log ) from utils import ( @@ -259,7 +259,7 @@ def bootstrap_monitor_cluster(secret): with open(upstart, 'w'): pass - service_start('ceph-mon-all') + service_restart('ceph-mon-all') except: raise finally: diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 75ced03d..872e192b 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -31,7 +31,7 @@ apt_install, apt_update, filter_installed_packages, - service_start, + service_restart, umount ) @@ -260,7 +260,7 @@ def upgrade_charm(): def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. - service_start('ceph-mon-all') + service_restart('ceph-mon-all') ceph.rescan_osd_devices() From 8355e1387534c09969e0a88d6daf032b4bd554ee Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 10:24:12 +0100 Subject: [PATCH 0210/2699] Switch to using restart for ceph-mon-all --- ceph-mon/hooks/ceph.py | 4 ++-- ceph-mon/hooks/hooks.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index ec6fb011..8e239de3 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -14,7 +14,7 @@ import apt_pkg as apt from charmhelpers.core.host import ( mkdir, - service_start, + service_restart, log ) from utils import ( @@ -259,7 +259,7 @@ def bootstrap_monitor_cluster(secret): with open(upstart, 'w'): pass - service_start('ceph-mon-all') + service_restart('ceph-mon-all') except: raise finally: diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 75ced03d..872e192b 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -31,7 +31,7 @@ apt_install, apt_update, filter_installed_packages, - service_start, + service_restart, umount ) @@ -260,7 +260,7 @@ def upgrade_charm(): def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. - service_start('ceph-mon-all') + service_restart('ceph-mon-all') ceph.rescan_osd_devices() From 0afb91e71df57145d65921162fc21d34a11b9428 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 11:24:47 +0100 Subject: [PATCH 0211/2699] Migrate to charm-helpers, tidy up flake8 --- ceph-osd/Makefile | 8 + ceph-osd/charm-helpers-sync.yaml | 4 + ceph-osd/hooks/ceph.py | 135 ++++++-- ceph-osd/hooks/charmhelpers/__init__.py | 0 ceph-osd/hooks/charmhelpers/core/__init__.py | 0 ceph-osd/hooks/charmhelpers/core/hookenv.py | 335 +++++++++++++++++++ ceph-osd/hooks/charmhelpers/core/host.py | 263 +++++++++++++++ ceph-osd/hooks/hooks.py | 177 +++++----- ceph-osd/hooks/utils.py | 165 ++------- 9 files changed, 832 insertions(+), 255 deletions(-) create mode 100644 ceph-osd/Makefile create mode 100644 ceph-osd/charm-helpers-sync.yaml create mode 100644 ceph-osd/hooks/charmhelpers/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/core/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/core/hookenv.py create mode 100644 ceph-osd/hooks/charmhelpers/core/host.py diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile new file mode 100644 index 00000000..fdb22de2 --- /dev/null +++ b/ceph-osd/Makefile @@ -0,0 +1,8 @@ +#!/usr/bin/make + +lint: + flake8 --exclude hooks/charmhelpers hooks + charm proof || : + +sync: + charm-helper-sync -c charm-helpers-sync.yaml diff --git a/ceph-osd/charm-helpers-sync.yaml b/ceph-osd/charm-helpers-sync.yaml new file mode 100644 index 00000000..7ee93b78 --- /dev/null +++ b/ceph-osd/charm-helpers-sync.yaml @@ -0,0 +1,4 @@ +branch: lp:charm-helpers +destination: hooks/charmhelpers +include: + - core diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 6502b183..8e239de3 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -10,23 +10,32 @@ import json import subprocess import time -import utils import os import apt_pkg as apt +from charmhelpers.core.host import ( + mkdir, + service_restart, + log +) +from utils import ( + get_unit_hostname +) LEADER = 'leader' PEON = 'peon' QUORUM = [LEADER, PEON] +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] + def is_quorum(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", asok, "mon_status" - ] + ] if os.path.exists(asok): try: result = json.loads(subprocess.check_output(cmd)) @@ -44,13 +53,13 @@ def is_quorum(): def is_leader(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", asok, "mon_status" - ] + ] if os.path.exists(asok): try: result = json.loads(subprocess.check_output(cmd)) @@ -73,14 +82,14 @@ def wait_for_quorum(): def add_bootstrap_hint(peer): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", asok, "add_bootstrap_peer_hint", peer - ] + ] if os.path.exists(asok): # Ignore any errors for this call subprocess.call(cmd) @@ -89,7 +98,7 @@ def add_bootstrap_hint(peer): 'xfs', 'ext4', 'btrfs' - ] +] def is_osd_disk(dev): @@ -99,7 +108,7 @@ def is_osd_disk(dev): for line in info: if line.startswith( 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' - ): + ): return True except subprocess.CalledProcessError: pass @@ -110,7 +119,7 @@ def rescan_osd_devices(): cmd = [ 'udevadm', 'trigger', '--subsystem-match=block', '--action=add' - ] + ] subprocess.call(cmd) @@ -140,7 +149,7 @@ def import_osd_bootstrap_key(key): '--create-keyring', '--name=client.bootstrap-osd', '--add-key={}'.format(key) - ] + ] subprocess.check_call(cmd) # OSD caps taken from ceph-create-keys @@ -148,10 +157,10 @@ def import_osd_bootstrap_key(key): 'mon': [ 'allow command osd create ...', 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', + r'allow command auth add * osd allow\ * mon allow\ rwx', 'allow command mon getmap' - ] - } + ] +} def get_osd_bootstrap_key(): @@ -169,14 +178,14 @@ def import_radosgw_key(key): '--create-keyring', '--name=client.radosgw.gateway', '--add-key={}'.format(key) - ] + ] subprocess.check_call(cmd) # OSD caps taken from ceph-create-keys _radosgw_caps = { 'mon': ['allow r'], 'osd': ['allow rwx'] - } +} def get_radosgw_key(): @@ -186,7 +195,7 @@ def get_radosgw_key(): _default_caps = { 'mon': ['allow r'], 'osd': ['allow rwx'] - } +} def get_named_key(name, caps=None): @@ -196,16 +205,16 @@ def get_named_key(name, caps=None): '--name', 'mon.', '--keyring', '/var/lib/ceph/mon/ceph-{}/keyring'.format( - utils.get_unit_hostname() - ), + get_unit_hostname() + ), 'auth', 'get-or-create', 'client.{}'.format(name), - ] + ] # Add capabilities for subsystem, subcaps in caps.iteritems(): cmd.extend([ subsystem, '; '.join(subcaps), - ]) + ]) output = subprocess.check_output(cmd).strip() # IGNORE:E1103 # get-or-create appears to have different output depending # on whether its 'get' or 'create' @@ -221,6 +230,42 @@ def get_named_key(name, caps=None): return key +def bootstrap_monitor_cluster(secret): + hostname = get_unit_hostname() + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + done = '{}/done'.format(path) + upstart = '{}/upstart'.format(path) + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) + + if os.path.exists(done): + log('bootstrap_monitor_cluster: mon already initialized.') + else: + # Ceph >= 0.61.3 needs this for ceph-mon fs creation + mkdir('/var/run/ceph', perms=0755) + mkdir(path) + # end changes for Ceph >= 0.61.3 + try: + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring', '--name=mon.', + '--add-key={}'.format(secret), + '--cap', 'mon', 'allow *']) + + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + + with open(done, 'w'): + pass + with open(upstart, 'w'): + pass + + service_restart('ceph-mon-all') + except: + raise + finally: + os.unlink(keyring) + + def get_ceph_version(): apt.init() cache = apt.Cache() @@ -233,3 +278,51 @@ def get_ceph_version(): def version_compare(a, b): return apt.version_compare(a, b) + + +def update_monfs(): + hostname = get_unit_hostname() + monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + upstart = '{}/upstart'.format(monfs) + if os.path.exists(monfs) and not os.path.exists(upstart): + # Mark mon as managed by upstart so that + # it gets start correctly on reboots + with open(upstart, 'w'): + pass + + +def osdize(dev, osd_format, osd_journal, reformat_osd=False): + if not os.path.exists(dev): + log('Path {} does not exist - bailing'.format(dev)) + return + + if (is_osd_disk(dev) and not reformat_osd): + log('Looks like {} is already an OSD, skipping.'.format(dev)) + return + + if device_mounted(dev): + log('Looks like {} is in use, skipping.'.format(dev)) + return + + cmd = ['ceph-disk-prepare'] + # Later versions of ceph support more options + if get_ceph_version() >= "0.48.3": + if osd_format: + cmd.append('--fs-type') + cmd.append(osd_format) + cmd.append(dev) + if osd_journal and os.path.exists(osd_journal): + cmd.append(osd_journal) + else: + # Just provide the device - no other options + # for older versions of ceph + cmd.append(dev) + subprocess.call(cmd) + + +def device_mounted(dev): + return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 + + +def filesystem_mounted(fs): + return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 diff --git a/ceph-osd/hooks/charmhelpers/__init__.py b/ceph-osd/hooks/charmhelpers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/hooks/charmhelpers/core/__init__.py b/ceph-osd/hooks/charmhelpers/core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py new file mode 100644 index 00000000..b28240c6 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -0,0 +1,335 @@ +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +import os +import json +import yaml +import subprocess +import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + ''' Cache return values for multiple executions of func + args + + For example: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + ''' + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + res = func(*args, **kwargs) + cache[key] = res + return res + return wrapper + + +def flush(key): + ''' Flushes any entries from function cache where the + key is found in the function+args ''' + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + "Write a message to the juju log" + command = ['juju-log'] + if level: + command += ['-l', level] + command += [message] + subprocess.call(command) + + +class Serializable(UserDict.IterableUserDict): + "Wrapper, an object that can be serialized to yaml or json" + + def __init__(self, obj): + # wrap the object + UserDict.IterableUserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + "Serialize the object to json" + return json.dumps(self.data) + + def yaml(self): + "Serialize the object to yaml" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['unit'] = local_unit() + context['rels'] = relations() + context['rel'] = relation_get() + context['env'] = os.environ + return context + + +def in_relation_hook(): + "Determine whether we're running in a relation hook" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + "The scope for the current relation hook" + return os.environ.get('JUJU_RELATION', None) + + +def relation_id(): + "The relation ID for the current relation hook" + return os.environ.get('JUJU_RELATION_ID', None) + + +def local_unit(): + "Local unit ID" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + "The remote unit for the current relation hook" + return os.environ['JUJU_REMOTE_UNIT'] + + +@cached +def config(scope=None): + "Juju charm configuration" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + config_cmd_line.append('--format=json') + try: + value = json.loads(subprocess.check_output(config_cmd_line)) + except ValueError: + return None + if isinstance(value, dict): + return Serializable(value) + else: + return value + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + value = json.loads(subprocess.check_output(_args)) + except ValueError: + return None + if isinstance(value, dict): + return Serializable(value) + else: + return value + + +def relation_set(relation_id=None, relation_settings={}, **kwargs): + relation_cmd_line = ['relation-set'] + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + for k, v in (relation_settings.items() + kwargs.items()): + if v is None: + relation_cmd_line.append('{}='.format(k)) + else: + relation_cmd_line.append('{}={}'.format(k, v)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +@cached +def relation_ids(reltype=None): + "A list of relation_ids" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads(subprocess.check_output(relid_cmd_line)) + return [] + + +@cached +def related_units(relid=None): + "A list of related units" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads(subprocess.check_output(units_cmd_line)) + + +@cached +def relation_for_unit(unit=None, rid=None): + "Get the json represenation of a unit's relation" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return Serializable(relation) + + +@cached +def relations_for_id(relid=None): + "Get relations of a specific relation ID" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + "Get relations of a specific type" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def relation_types(): + "Get a list of relation types supported by this charm" + charmdir = os.environ.get('CHARM_DIR', '') + mdf = open(os.path.join(charmdir, 'metadata.yaml')) + md = yaml.safe_load(mdf) + rel_types = [] + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + mdf.close() + return rel_types + + +@cached +def relations(): + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +def open_port(port, protocol="TCP"): + "Open a service network port" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + "Close a service network port" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + + +def unit_private_ip(): + return unit_get('private-address') + + +class UnregisteredHookError(Exception): + pass + + +class Hooks(object): + def __init__(self): + super(Hooks, self).__init__() + self._hooks = {} + + def register(self, name, function): + self._hooks[name] = function + + def execute(self, args): + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + self._hooks[hook_name]() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + return decorated + return wrapper diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py new file mode 100644 index 00000000..19951ed2 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -0,0 +1,263 @@ +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import apt_pkg +import os +import pwd +import grp +import subprocess +import hashlib + +from collections import OrderedDict + +from hookenv import log, execution_environment + + +def service_start(service_name): + service('start', service_name) + + +def service_stop(service_name): + service('stop', service_name) + + +def service_restart(service_name): + service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False): + if not service('reload', service_name) and restart_on_failure: + service('restart', service_name) + + +def service(action, service_name): + cmd = ['service', service_name, action] + return subprocess.call(cmd) == 0 + + +def adduser(username, password=None, shell='/bin/bash', system_user=False): + """Add a user""" + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = [ + 'gpasswd', '-a', + username, + group + ] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None): + """Replicate the contents of a path""" + context = execution_environment() + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + cmd.extend(options) + cmd.append(from_path.format(**context)) + cmd.append(to_path.format(**context)) + log(" ".join(cmd)) + return subprocess.check_output(cmd).strip() + + +def symlink(source, destination): + """Create a symbolic link""" + context = execution_environment() + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source.format(**context), + destination.format(**context) + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0555, force=False): + """Create a directory""" + context = execution_environment() + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner.format(**context)).pw_uid + gid = grp.getgrnam(group.format(**context)).gr_gid + realpath = os.path.abspath(path) + if os.path.exists(realpath): + if force and not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + + +def write_file(path, fmtstr, owner='root', group='root', perms=0444, **kwargs): + """Create or overwrite a file with the contents of a string""" + context = execution_environment() + context.update(kwargs) + log("Writing file {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner.format(**context)).pw_uid + gid = grp.getgrnam(group.format(**context)).gr_gid + with open(path.format(**context), 'w') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(fmtstr.format(**context)) + + +def render_template_file(source, destination, **kwargs): + """Create or overwrite a file using a template""" + log("Rendering template {} for {}".format(source, + destination)) + context = execution_environment() + with open(source.format(**context), 'r') as template: + write_file(destination.format(**context), template.read(), + **kwargs) + + +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + apt_pkg.init() + cache = apt_pkg.Cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + options = options or [] + cmd = ['apt-get', '-y'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def mount(device, mountpoint, options=None, persist=False): + '''Mount a filesystem''' + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def umount(mountpoint, persist=False): + '''Unmount a filesystem''' + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def mounts(): + '''List of all mounted volumes as [[mountpoint,device],[...]]''' + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def file_hash(path): + ''' Generate a md5 hash of the contents of 'path' or None if not found ''' + if os.path.exists(path): + h = hashlib.md5() + with open(path, 'r') as source: + h.update(source.read()) # IGNORE:E1101 - it does have update + return h.hexdigest() + else: + return None + + +def restart_on_change(restart_map): + ''' Restart services based on configuration files changing + + This function is used a decorator, for example + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + }) + def ceph_client_changed(): + ... + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. + ''' + def wrap(f): + def wrapped_f(*args): + checksums = {} + for path in restart_map: + checksums[path] = file_hash(path) + f(*args) + restarts = [] + for path in restart_map: + if checksums[path] != file_hash(path): + restarts += restart_map[path] + for service_name in list(OrderedDict.fromkeys(restarts)): + service('restart', service_name) + return wrapped_f + return wrap diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 5cdafcab..20162f28 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -14,7 +14,30 @@ import sys import ceph -import utils +from charmhelpers.core.hookenv import ( + log, + ERROR, + config, + relation_ids, + related_units, + relation_get, + Hooks, + UnregisteredHookError +) +from charmhelpers.core.host import ( + apt_install, + apt_update, + filter_installed_packages, + umount +) + +from utils import ( + render_template, + configure_source, + get_host_ip, +) + +hooks = Hooks() def install_upstart_scripts(): @@ -24,72 +47,72 @@ def install_upstart_scripts(): shutil.copy(x, '/etc/init/') +@hooks.hook('install') def install(): - utils.juju_log('INFO', 'Begin install hook.') - utils.configure_source() - utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs') + log('Begin install hook.') + configure_source(config('source')) + apt_update(fatal=True) + apt_install(packages=ceph.PACKAGES, error=True) install_upstart_scripts() - utils.juju_log('INFO', 'End install hook.') + log('End install hook.') def emit_cephconf(): mon_hosts = get_mon_hosts() - utils.juju_log('INFO', 'Monitor hosts are ' + repr(mon_hosts)) + log('Monitor hosts are ' + repr(mon_hosts)) cephcontext = { 'auth_supported': get_auth(), 'mon_hosts': ' '.join(mon_hosts), 'fsid': get_fsid(), 'version': ceph.get_ceph_version() - } + } with open('/etc/ceph/ceph.conf', 'w') as cephconf: - cephconf.write(utils.render_template('ceph.conf', cephcontext)) + cephconf.write(render_template('ceph.conf', cephcontext)) JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' +@hooks.hook('config-changed') def config_changed(): - utils.juju_log('INFO', 'Begin config-changed hook.') + log('Begin config-changed hook.') # Pre-flight checks - if utils.config_get('osd-format') not in ceph.DISK_FORMATS: - utils.juju_log('CRITICAL', - 'Invalid OSD disk format configuration specified') + if config('osd-format') not in ceph.DISK_FORMATS: + log('Invalid OSD disk format configuration specified', level=ERROR) sys.exit(1) - e_mountpoint = utils.config_get('ephemeral-unmount') - if (e_mountpoint and - filesystem_mounted(e_mountpoint)): - subprocess.call(['umount', e_mountpoint]) + e_mountpoint = config('ephemeral-unmount') + if (e_mountpoint and filesystem_mounted(e_mountpoint)): + umount(e_mountpoint) - osd_journal = utils.config_get('osd-journal') - if (osd_journal and - not os.path.exists(JOURNAL_ZAPPED) and - os.path.exists(osd_journal)): + osd_journal = config('osd-journal') + if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) + and os.path.exists(osd_journal)): ceph.zap_disk(osd_journal) with open(JOURNAL_ZAPPED, 'w') as zapped: zapped.write('DONE') if ceph.is_bootstrapped(): - utils.juju_log('INFO', 'ceph bootstrapped, rescanning disks') + log('ceph bootstrapped, rescanning disks') emit_cephconf() - for dev in utils.config_get('osd-devices').split(' '): - osdize(dev) + for dev in config('osd-devices').split(' '): + ceph.osdize(dev, config('osd-format'), + config('osd-journal'), config('osd-reformat')) ceph.rescan_osd_devices() - utils.juju_log('INFO', 'End config-changed hook.') + log('End config-changed hook.') def get_mon_hosts(): hosts = [] - for relid in utils.relation_ids('mon'): - for unit in utils.relation_list(relid): + for relid in relation_ids('mon'): + for unit in related_units(relid): hosts.append( - '{}:6789'.format(utils.get_host_ip( - utils.relation_get('private-address', - unit, relid))) - ) + '{}:6789'.format(get_host_ip(relation_get('private-address', + unit, relid))) + ) hosts.sort() return hosts @@ -104,59 +127,22 @@ def get_auth(): def get_conf(name): - for relid in utils.relation_ids('mon'): - for unit in utils.relation_list(relid): - conf = utils.relation_get(name, - unit, relid) + for relid in relation_ids('mon'): + for unit in related_units(relid): + conf = relation_get(name, + unit, relid) if conf: return conf return None def reformat_osd(): - if utils.config_get('osd-reformat'): + if config('osd-reformat'): return True else: return False -def osdize(dev): - if not os.path.exists(dev): - utils.juju_log('INFO', - 'Path {} does not exist - bailing'.format(dev)) - return - - if (ceph.is_osd_disk(dev) and not - reformat_osd()): - utils.juju_log('INFO', - 'Looks like {} is already an OSD, skipping.' - .format(dev)) - return - - if device_mounted(dev): - utils.juju_log('INFO', - 'Looks like {} is in use, skipping.'.format(dev)) - return - - cmd = ['ceph-disk-prepare'] - # Later versions of ceph support more options - if ceph.get_ceph_version() >= "0.48.3": - osd_format = utils.config_get('osd-format') - if osd_format: - cmd.append('--fs-type') - cmd.append(osd_format) - cmd.append(dev) - osd_journal = utils.config_get('osd-journal') - if (osd_journal and - os.path.exists(osd_journal)): - cmd.append(osd_journal) - else: - # Just provide the device - no other options - # for older versions of ceph - cmd.append(dev) - subprocess.call(cmd) - - def device_mounted(dev): return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 @@ -165,42 +151,39 @@ def filesystem_mounted(fs): return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 +@hooks.hook('mon-relation-changed', + 'mon-relation-departed') def mon_relation(): - utils.juju_log('INFO', 'Begin mon-relation hook.') + log('Begin mon-relation hook.') - bootstrap_key = utils.relation_get('osd_bootstrap_key') - if (get_fsid() and - get_auth() and - bootstrap_key): - utils.juju_log('INFO', 'mon has provided conf- scanning disks') + bootstrap_key = relation_get('osd_bootstrap_key') + if get_fsid() and get_auth() and bootstrap_key: + log('mon has provided conf- scanning disks') emit_cephconf() ceph.import_osd_bootstrap_key(bootstrap_key) - for dev in utils.config_get('osd-devices').split(' '): - osdize(dev) + for dev in config('osd-devices').split(' '): + ceph.osdize(dev, config('osd-format'), + config('osd-journal'), config('osd-reformat')) ceph.rescan_osd_devices() else: - utils.juju_log('INFO', - 'mon cluster has not yet provided conf') + log('mon cluster has not yet provided conf') - utils.juju_log('INFO', 'End mon-relation hook.') + log('End mon-relation hook.') +@hooks.hook('upgrade-charm') def upgrade_charm(): - utils.juju_log('INFO', 'Begin upgrade-charm hook.') - if (get_fsid() and - get_auth()): + log('Begin upgrade-charm hook.') + if get_fsid() and get_auth(): emit_cephconf() install_upstart_scripts() - utils.install('xfsprogs') - utils.juju_log('INFO', 'End upgrade-charm hook.') - + apt_install(packages=filter_installed_packages(ceph.PACKAGES), + error=True) + log('End upgrade-charm hook.') -utils.do_hooks({ - 'config-changed': config_changed, - 'install': install, - 'mon-relation-departed': mon_relation, - 'mon-relation-changed': mon_relation, - 'upgrade-charm': upgrade_charm, - }) -sys.exit(0) +if __name__ == '__main__': + try: + hooks.execute(sys.argv) + except UnregisteredHookError as e: + log('Unknown hook {} - skipping.'.format(e)) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index f5b17211..8069bbec 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -7,95 +7,76 @@ # Paul Collins # -import os import subprocess import socket -import sys import re - - -def do_hooks(hooks): - hook = os.path.basename(sys.argv[0]) - - try: - hook_func = hooks[hook] - except KeyError: - juju_log('INFO', - "This charm doesn't know how to handle '{}'.".format(hook)) - else: - hook_func() - - -def install(*pkgs): - cmd = [ - 'apt-get', - '-y', - 'install' - ] - for pkg in pkgs: - cmd.append(pkg) - subprocess.check_call(cmd) +from charmhelpers.core.hookenv import ( + config, + unit_get, + cached +) +from charmhelpers.core.host import ( + apt_install, + apt_update, + filter_installed_packages +) TEMPLATES_DIR = 'templates' try: import jinja2 except ImportError: - install('python-jinja2') + apt_install(filter_installed_packages(['python-jinja2']), + fatal=True) import jinja2 try: import dns.resolver except ImportError: - install('python-dnspython') + apt_install(filter_installed_packages(['python-dnspython']), + fatal=True) import dns.resolver def render_template(template_name, context, template_dir=TEMPLATES_DIR): templates = jinja2.Environment( - loader=jinja2.FileSystemLoader(template_dir) - ) + loader=jinja2.FileSystemLoader(template_dir)) template = templates.get_template(template_name) return template.render(context) -CLOUD_ARCHIVE = \ -""" # Ubuntu Cloud Archive +CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ -def configure_source(): - source = str(config_get('source')) +def configure_source(source=None): if not source: return if source.startswith('ppa:'): cmd = [ 'add-apt-repository', source - ] + ] subprocess.check_call(cmd) if source.startswith('cloud:'): - install('ubuntu-cloud-keyring') - pocket = source.split(':')[1] + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: apt.write(CLOUD_ARCHIVE.format(pocket)) if source.startswith('http:'): with open('/etc/apt/sources.list.d/ceph.list', 'w') as apt: apt.write("deb " + source + "\n") - key = config_get('key') + key = config('key') if key: cmd = [ 'apt-key', 'adv', '--keyserver keyserver.ubuntu.com', '--recv-keys', key - ] + ] subprocess.check_call(cmd) - cmd = [ - 'apt-get', - 'update' - ] - subprocess.check_call(cmd) + apt_update(fatal=True) def enable_pocket(pocket): @@ -109,105 +90,15 @@ def enable_pocket(pocket): else: sources.write(line) -# Protocols -TCP = 'TCP' -UDP = 'UDP' - - -def expose(port, protocol='TCP'): - cmd = [ - 'open-port', - '{}/{}'.format(port, protocol) - ] - subprocess.check_call(cmd) - - -def juju_log(severity, message): - cmd = [ - 'juju-log', - '--log-level', severity, - message - ] - subprocess.check_call(cmd) - - -def relation_ids(relation): - cmd = [ - 'relation-ids', - relation - ] - return subprocess.check_output(cmd).split() # IGNORE:E1103 - - -def relation_list(rid): - cmd = [ - 'relation-list', - '-r', rid, - ] - return subprocess.check_output(cmd).split() # IGNORE:E1103 - - -def relation_get(attribute, unit=None, rid=None): - cmd = [ - 'relation-get', - ] - if rid: - cmd.append('-r') - cmd.append(rid) - cmd.append(attribute) - if unit: - cmd.append(unit) - value = str(subprocess.check_output(cmd)).strip() - if value == "": - return None - else: - return value - - -def relation_set(**kwargs): - cmd = [ - 'relation-set' - ] - args = [] - for k, v in kwargs.items(): - if k == 'rid': - cmd.append('-r') - cmd.append(v) - else: - args.append('{}={}'.format(k, v)) - cmd += args - subprocess.check_call(cmd) - - -def unit_get(attribute): - cmd = [ - 'unit-get', - attribute - ] - value = str(subprocess.check_output(cmd)).strip() - if value == "": - return None - else: - return value - - -def config_get(attribute): - cmd = [ - 'config-get', - attribute - ] - value = str(subprocess.check_output(cmd)).strip() - if value == "": - return None - else: - return value - +@cached def get_unit_hostname(): return socket.gethostname() -def get_host_ip(hostname=unit_get('private-address')): +@cached +def get_host_ip(hostname=None): + hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address socket.inet_aton(hostname) From 68e030bffac8ccdd206c9d946d346338e0191c71 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 11:40:54 +0100 Subject: [PATCH 0212/2699] Fixup parameter name in apt_install calls --- ceph-osd/hooks/hooks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 20162f28..e6c5840e 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -52,7 +52,7 @@ def install(): log('Begin install hook.') configure_source(config('source')) apt_update(fatal=True) - apt_install(packages=ceph.PACKAGES, error=True) + apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() log('End install hook.') @@ -178,7 +178,7 @@ def upgrade_charm(): emit_cephconf() install_upstart_scripts() apt_install(packages=filter_installed_packages(ceph.PACKAGES), - error=True) + fatal=True) log('End upgrade-charm hook.') From 19333a5e695fc7e3362066c762b8742bf184e82d Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 13:43:48 +0100 Subject: [PATCH 0213/2699] Use charm-helpers add_source --- ceph-proxy/charm-helpers-sync.yaml | 3 +- .../hooks/charmhelpers/fetch/__init__.py | 56 +++++++++++++++++++ ceph-proxy/hooks/hooks.py | 4 +- 3 files changed, 60 insertions(+), 3 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/fetch/__init__.py diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml index 7ee93b78..77afba70 100644 --- a/ceph-proxy/charm-helpers-sync.yaml +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -1,4 +1,5 @@ -branch: lp:charm-helpers +branch: lp:~james-page/charm-helpers/configure_source_redux destination: hooks/charmhelpers include: - core + - fetch diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py new file mode 100644 index 00000000..ef059e24 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -0,0 +1,56 @@ +from yaml import safe_load +from charmhelpers.core.hookenv import config +from charmhelpers.core.host import ( + apt_install, apt_update, filter_installed_packages +) +import subprocess + +CLOUD_ARCHIVE = 'http://ubuntu-cloud.archive.canonical.com/ubuntu {} main' + + +def add_source(source, key=None): + if ((source.startswith('ppa:') or + source.startswith('http:'))): + subprocess.check_call(['add-apt-repository', source]) + elif source.startswith('cloud:'): + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(pocket)) + if key: + subprocess.check_call(['apt-key', 'import', key]) + + +class SourceConfigError(Exception): + pass + + +def configure_sources(update=False, + sources_var='install_sources', + keys_var='install_keys'): + """ + Configure multiple sources from charm configuration + + Example config: + install_sources: + - "ppa:foo" + - "http://example.com/repo precise main" + install_keys: + - null + - "a1b2c3d4" + + Note that 'null' (a.k.a. None) should not be quoted. + """ + sources = safe_load(config(sources_var)) + keys = safe_load(config(keys_var)) + if isinstance(sources, basestring) and isinstance(keys, basestring): + add_source(sources, keys) + else: + if not len(sources) == len(keys): + msg = 'Install sources and keys lists are different lengths' + raise SourceConfigError(msg) + for src_num in range(len(sources)): + add_source(sources[src_num], keys[src_num]) + if update: + apt_update(fatal=True) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 872e192b..cf625469 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -34,10 +34,10 @@ service_restart, umount ) +from charmhelpers.fetch import add_source from utils import ( render_template, - configure_source, get_host_ip, ) @@ -54,7 +54,7 @@ def install_upstart_scripts(): @hooks.hook('install') def install(): log('Begin install hook.') - configure_source(config('source')) + add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() From d84796d986fa188975cbf9fde67014c59d9bc9ea Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 13:43:48 +0100 Subject: [PATCH 0214/2699] Use charm-helpers add_source --- ceph-mon/charm-helpers-sync.yaml | 3 +- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 56 +++++++++++++++++++ ceph-mon/hooks/hooks.py | 4 +- 3 files changed, 60 insertions(+), 3 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/fetch/__init__.py diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml index 7ee93b78..77afba70 100644 --- a/ceph-mon/charm-helpers-sync.yaml +++ b/ceph-mon/charm-helpers-sync.yaml @@ -1,4 +1,5 @@ -branch: lp:charm-helpers +branch: lp:~james-page/charm-helpers/configure_source_redux destination: hooks/charmhelpers include: - core + - fetch diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py new file mode 100644 index 00000000..ef059e24 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -0,0 +1,56 @@ +from yaml import safe_load +from charmhelpers.core.hookenv import config +from charmhelpers.core.host import ( + apt_install, apt_update, filter_installed_packages +) +import subprocess + +CLOUD_ARCHIVE = 'http://ubuntu-cloud.archive.canonical.com/ubuntu {} main' + + +def add_source(source, key=None): + if ((source.startswith('ppa:') or + source.startswith('http:'))): + subprocess.check_call(['add-apt-repository', source]) + elif source.startswith('cloud:'): + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(pocket)) + if key: + subprocess.check_call(['apt-key', 'import', key]) + + +class SourceConfigError(Exception): + pass + + +def configure_sources(update=False, + sources_var='install_sources', + keys_var='install_keys'): + """ + Configure multiple sources from charm configuration + + Example config: + install_sources: + - "ppa:foo" + - "http://example.com/repo precise main" + install_keys: + - null + - "a1b2c3d4" + + Note that 'null' (a.k.a. None) should not be quoted. + """ + sources = safe_load(config(sources_var)) + keys = safe_load(config(keys_var)) + if isinstance(sources, basestring) and isinstance(keys, basestring): + add_source(sources, keys) + else: + if not len(sources) == len(keys): + msg = 'Install sources and keys lists are different lengths' + raise SourceConfigError(msg) + for src_num in range(len(sources)): + add_source(sources[src_num], keys[src_num]) + if update: + apt_update(fatal=True) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 872e192b..cf625469 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -34,10 +34,10 @@ service_restart, umount ) +from charmhelpers.fetch import add_source from utils import ( render_template, - configure_source, get_host_ip, ) @@ -54,7 +54,7 @@ def install_upstart_scripts(): @hooks.hook('install') def install(): log('Begin install hook.') - configure_source(config('source')) + add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() From 0f74a13f07fd0bb1ee3284f8ccbba07629ae759f Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 13:56:40 +0100 Subject: [PATCH 0215/2699] Resync fetch helper --- ceph-proxy/hooks/charmhelpers/fetch/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index ef059e24..7068dd8d 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -5,7 +5,7 @@ ) import subprocess -CLOUD_ARCHIVE = 'http://ubuntu-cloud.archive.canonical.com/ubuntu {} main' +CLOUD_ARCHIVE = 'deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main' def add_source(source, key=None): From a2033faa5dfcfb5ebb0be874b9fe1a83b055fc04 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 13:56:40 +0100 Subject: [PATCH 0216/2699] Resync fetch helper --- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index ef059e24..7068dd8d 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -5,7 +5,7 @@ ) import subprocess -CLOUD_ARCHIVE = 'http://ubuntu-cloud.archive.canonical.com/ubuntu {} main' +CLOUD_ARCHIVE = 'deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main' def add_source(source, key=None): From b3d30187e231f9921ac81f7eb21c79f98e940ed0 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 14:06:51 +0100 Subject: [PATCH 0217/2699] Resync helpers --- ceph-proxy/hooks/charmhelpers/fetch/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 7068dd8d..19d83b8b 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -5,7 +5,9 @@ ) import subprocess -CLOUD_ARCHIVE = 'deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main' +CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" def add_source(source, key=None): From 80957e9875a2caf3752df3fd745ae982b93a9784 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 14:06:51 +0100 Subject: [PATCH 0218/2699] Resync helpers --- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 7068dd8d..19d83b8b 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -5,7 +5,9 @@ ) import subprocess -CLOUD_ARCHIVE = 'deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main' +CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" def add_source(source, key=None): From ec3a0bce0f166591a5eeb59efda18f715e3f41b2 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 14:08:24 +0100 Subject: [PATCH 0219/2699] Use refactored charmhelpers add_source --- ceph-osd/charm-helpers-sync.yaml | 3 ++- ceph-osd/hooks/hooks.py | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ceph-osd/charm-helpers-sync.yaml b/ceph-osd/charm-helpers-sync.yaml index 7ee93b78..77afba70 100644 --- a/ceph-osd/charm-helpers-sync.yaml +++ b/ceph-osd/charm-helpers-sync.yaml @@ -1,4 +1,5 @@ -branch: lp:charm-helpers +branch: lp:~james-page/charm-helpers/configure_source_redux destination: hooks/charmhelpers include: - core + - fetch diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index e6c5840e..fb9d91ae 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -30,10 +30,10 @@ filter_installed_packages, umount ) +from charmhelpers.fetch import add_source from utils import ( render_template, - configure_source, get_host_ip, ) @@ -50,7 +50,7 @@ def install_upstart_scripts(): @hooks.hook('install') def install(): log('Begin install hook.') - configure_source(config('source')) + add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() From f6f89f403a47e7054029a68e337dab2129d2dd6c Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 14:10:26 +0100 Subject: [PATCH 0220/2699] Add missing fetch helper --- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 ceph-osd/hooks/charmhelpers/fetch/__init__.py diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py new file mode 100644 index 00000000..19d83b8b --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -0,0 +1,58 @@ +from yaml import safe_load +from charmhelpers.core.hookenv import config +from charmhelpers.core.host import ( + apt_install, apt_update, filter_installed_packages +) +import subprocess + +CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" + + +def add_source(source, key=None): + if ((source.startswith('ppa:') or + source.startswith('http:'))): + subprocess.check_call(['add-apt-repository', source]) + elif source.startswith('cloud:'): + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(pocket)) + if key: + subprocess.check_call(['apt-key', 'import', key]) + + +class SourceConfigError(Exception): + pass + + +def configure_sources(update=False, + sources_var='install_sources', + keys_var='install_keys'): + """ + Configure multiple sources from charm configuration + + Example config: + install_sources: + - "ppa:foo" + - "http://example.com/repo precise main" + install_keys: + - null + - "a1b2c3d4" + + Note that 'null' (a.k.a. None) should not be quoted. + """ + sources = safe_load(config(sources_var)) + keys = safe_load(config(keys_var)) + if isinstance(sources, basestring) and isinstance(keys, basestring): + add_source(sources, keys) + else: + if not len(sources) == len(keys): + msg = 'Install sources and keys lists are different lengths' + raise SourceConfigError(msg) + for src_num in range(len(sources)): + add_source(sources[src_num], keys[src_num]) + if update: + apt_update(fatal=True) From 362e7fdd0bb96d4504a4f8d7b6aeae3281634e55 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 14:52:52 +0100 Subject: [PATCH 0221/2699] Zap disks if reformatting is requested --- ceph-proxy/hooks/ceph.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 8e239de3..e496820b 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -108,6 +108,7 @@ def is_osd_disk(dev): for line in info: if line.startswith( 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' + 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' ): return True except subprocess.CalledProcessError: @@ -317,7 +318,11 @@ def osdize(dev, osd_format, osd_journal, reformat_osd=False): # Just provide the device - no other options # for older versions of ceph cmd.append(dev) - subprocess.call(cmd) + + if reformat_osd: + zap_disk(dev) + + subprocess.check_call(cmd) def device_mounted(dev): From 1d2b09c15c2a44f0ca46700ab674361212cb5306 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 14:52:52 +0100 Subject: [PATCH 0222/2699] Zap disks if reformatting is requested --- ceph-mon/hooks/ceph.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 8e239de3..e496820b 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -108,6 +108,7 @@ def is_osd_disk(dev): for line in info: if line.startswith( 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' + 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' ): return True except subprocess.CalledProcessError: @@ -317,7 +318,11 @@ def osdize(dev, osd_format, osd_journal, reformat_osd=False): # Just provide the device - no other options # for older versions of ceph cmd.append(dev) - subprocess.call(cmd) + + if reformat_osd: + zap_disk(dev) + + subprocess.check_call(cmd) def device_mounted(dev): From 0d40a072b0bdbb57f9db6b64b1a152a850538102 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 24 Jun 2013 14:57:51 +0100 Subject: [PATCH 0223/2699] Ensure ceph-disk calls fail on error and zap disks if requested --- ceph-osd/hooks/ceph.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 8e239de3..e496820b 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -108,6 +108,7 @@ def is_osd_disk(dev): for line in info: if line.startswith( 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' + 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' ): return True except subprocess.CalledProcessError: @@ -317,7 +318,11 @@ def osdize(dev, osd_format, osd_journal, reformat_osd=False): # Just provide the device - no other options # for older versions of ceph cmd.append(dev) - subprocess.call(cmd) + + if reformat_osd: + zap_disk(dev) + + subprocess.check_call(cmd) def device_mounted(dev): From 3e2e91a272be5cc5f5c354e2c27221d089990b15 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 25 Jun 2013 12:03:02 +0100 Subject: [PATCH 0224/2699] Move to using new storage charmhelpers --- ceph-proxy/charm-helpers-sync.yaml | 2 + ceph-proxy/hooks/ceph.py | 13 ++++--- .../hooks/charmhelpers/contrib/__init__.py | 0 .../charmhelpers/contrib/storage/__init__.py | 0 .../contrib/storage/linux/__init__.py | 0 .../contrib/storage/linux/utils.py | 25 +++++++++++++ ceph-proxy/hooks/hooks.py | 7 +--- ceph-proxy/hooks/utils.py | 37 ------------------- 8 files changed, 37 insertions(+), 47 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/storage/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/storage/linux/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml index 77afba70..032b9b64 100644 --- a/ceph-proxy/charm-helpers-sync.yaml +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -3,3 +3,5 @@ destination: hooks/charmhelpers include: - core - fetch + - contrib.storage.linux: + - utils diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index e496820b..7f9666e1 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -17,6 +17,10 @@ service_restart, log ) +from charmhelpers.contrib.storage.linux.utils import ( + zap_disk, + is_block_device +) from utils import ( get_unit_hostname ) @@ -125,11 +129,6 @@ def rescan_osd_devices(): subprocess.call(cmd) -def zap_disk(dev): - cmd = ['sgdisk', '--zap-all', dev] - subprocess.check_call(cmd) - - _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" @@ -297,6 +296,10 @@ def osdize(dev, osd_format, osd_journal, reformat_osd=False): log('Path {} does not exist - bailing'.format(dev)) return + if not is_block_device(dev): + log('Path {} is not a block device - bailing'.format(dev)) + return + if (is_osd_disk(dev) and not reformat_osd): log('Looks like {} is already an OSD, skipping.'.format(dev)) return diff --git a/ceph-proxy/hooks/charmhelpers/contrib/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py new file mode 100644 index 00000000..5b9b6d47 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -0,0 +1,25 @@ +from os import stat +from stat import S_ISBLK + +from subprocess import ( + check_call +) + + +def is_block_device(path): + ''' + Confirm device at path is a valid block device node. + + :returns: boolean: True if path is a block device, False if not. + ''' + return S_ISBLK(stat(path).st_mode) + + +def zap_disk(block_device): + ''' + Clear a block device of partition table. Relies on sgdisk, which is + installed as pat of the 'gdisk' package in Ubuntu. + + :param block_device: str: Full path of block device to clean. + ''' + check_call(['sgdisk', '--zap-all', block_device]) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index cf625469..b61e04b6 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -14,18 +14,15 @@ import sys import ceph -#import utils from charmhelpers.core.hookenv import ( - log, - ERROR, + log, ERROR, config, relation_ids, related_units, relation_get, relation_set, remote_unit, - Hooks, - UnregisteredHookError + Hooks, UnregisteredHookError ) from charmhelpers.core.host import ( apt_install, diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 8069bbec..a8868b69 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -7,17 +7,14 @@ # Paul Collins # -import subprocess import socket import re from charmhelpers.core.hookenv import ( - config, unit_get, cached ) from charmhelpers.core.host import ( apt_install, - apt_update, filter_installed_packages ) @@ -45,40 +42,6 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): return template.render(context) -CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" - - -def configure_source(source=None): - if not source: - return - if source.startswith('ppa:'): - cmd = [ - 'add-apt-repository', - source - ] - subprocess.check_call(cmd) - if source.startswith('cloud:'): - apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(pocket)) - if source.startswith('http:'): - with open('/etc/apt/sources.list.d/ceph.list', 'w') as apt: - apt.write("deb " + source + "\n") - key = config('key') - if key: - cmd = [ - 'apt-key', - 'adv', '--keyserver keyserver.ubuntu.com', - '--recv-keys', key - ] - subprocess.check_call(cmd) - apt_update(fatal=True) - - def enable_pocket(pocket): apt_sources = "/etc/apt/sources.list" with open(apt_sources, "r") as sources: From a934bc5f3b8aa2dadc3920cec43b246fc4451afc Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 25 Jun 2013 12:03:02 +0100 Subject: [PATCH 0225/2699] Move to using new storage charmhelpers --- ceph-mon/charm-helpers-sync.yaml | 2 + ceph-mon/hooks/ceph.py | 13 ++++--- .../hooks/charmhelpers/contrib/__init__.py | 0 .../charmhelpers/contrib/storage/__init__.py | 0 .../contrib/storage/linux/__init__.py | 0 .../contrib/storage/linux/utils.py | 25 +++++++++++++ ceph-mon/hooks/hooks.py | 7 +--- ceph-mon/hooks/utils.py | 37 ------------------- 8 files changed, 37 insertions(+), 47 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/storage/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/storage/linux/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml index 77afba70..032b9b64 100644 --- a/ceph-mon/charm-helpers-sync.yaml +++ b/ceph-mon/charm-helpers-sync.yaml @@ -3,3 +3,5 @@ destination: hooks/charmhelpers include: - core - fetch + - contrib.storage.linux: + - utils diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index e496820b..7f9666e1 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -17,6 +17,10 @@ service_restart, log ) +from charmhelpers.contrib.storage.linux.utils import ( + zap_disk, + is_block_device +) from utils import ( get_unit_hostname ) @@ -125,11 +129,6 @@ def rescan_osd_devices(): subprocess.call(cmd) -def zap_disk(dev): - cmd = ['sgdisk', '--zap-all', dev] - subprocess.check_call(cmd) - - _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" @@ -297,6 +296,10 @@ def osdize(dev, osd_format, osd_journal, reformat_osd=False): log('Path {} does not exist - bailing'.format(dev)) return + if not is_block_device(dev): + log('Path {} is not a block device - bailing'.format(dev)) + return + if (is_osd_disk(dev) and not reformat_osd): log('Looks like {} is already an OSD, skipping.'.format(dev)) return diff --git a/ceph-mon/hooks/charmhelpers/contrib/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/storage/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py new file mode 100644 index 00000000..5b9b6d47 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -0,0 +1,25 @@ +from os import stat +from stat import S_ISBLK + +from subprocess import ( + check_call +) + + +def is_block_device(path): + ''' + Confirm device at path is a valid block device node. + + :returns: boolean: True if path is a block device, False if not. + ''' + return S_ISBLK(stat(path).st_mode) + + +def zap_disk(block_device): + ''' + Clear a block device of partition table. Relies on sgdisk, which is + installed as pat of the 'gdisk' package in Ubuntu. + + :param block_device: str: Full path of block device to clean. + ''' + check_call(['sgdisk', '--zap-all', block_device]) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index cf625469..b61e04b6 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -14,18 +14,15 @@ import sys import ceph -#import utils from charmhelpers.core.hookenv import ( - log, - ERROR, + log, ERROR, config, relation_ids, related_units, relation_get, relation_set, remote_unit, - Hooks, - UnregisteredHookError + Hooks, UnregisteredHookError ) from charmhelpers.core.host import ( apt_install, diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 8069bbec..a8868b69 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -7,17 +7,14 @@ # Paul Collins # -import subprocess import socket import re from charmhelpers.core.hookenv import ( - config, unit_get, cached ) from charmhelpers.core.host import ( apt_install, - apt_update, filter_installed_packages ) @@ -45,40 +42,6 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): return template.render(context) -CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" - - -def configure_source(source=None): - if not source: - return - if source.startswith('ppa:'): - cmd = [ - 'add-apt-repository', - source - ] - subprocess.check_call(cmd) - if source.startswith('cloud:'): - apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(pocket)) - if source.startswith('http:'): - with open('/etc/apt/sources.list.d/ceph.list', 'w') as apt: - apt.write("deb " + source + "\n") - key = config('key') - if key: - cmd = [ - 'apt-key', - 'adv', '--keyserver keyserver.ubuntu.com', - '--recv-keys', key - ] - subprocess.check_call(cmd) - apt_update(fatal=True) - - def enable_pocket(pocket): apt_sources = "/etc/apt/sources.list" with open(apt_sources, "r") as sources: From ad802d6c11dc4f56db039688718a461758b03541 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 25 Jun 2013 12:03:55 +0100 Subject: [PATCH 0226/2699] Use new storage charmhelpers, configure_source redux --- ceph-osd/charm-helpers-sync.yaml | 2 ++ ceph-osd/hooks/ceph.py | 13 ++++++----- ceph-osd/hooks/utils.py | 37 -------------------------------- 3 files changed, 10 insertions(+), 42 deletions(-) diff --git a/ceph-osd/charm-helpers-sync.yaml b/ceph-osd/charm-helpers-sync.yaml index 77afba70..032b9b64 100644 --- a/ceph-osd/charm-helpers-sync.yaml +++ b/ceph-osd/charm-helpers-sync.yaml @@ -3,3 +3,5 @@ destination: hooks/charmhelpers include: - core - fetch + - contrib.storage.linux: + - utils diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index e496820b..7f9666e1 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -17,6 +17,10 @@ service_restart, log ) +from charmhelpers.contrib.storage.linux.utils import ( + zap_disk, + is_block_device +) from utils import ( get_unit_hostname ) @@ -125,11 +129,6 @@ def rescan_osd_devices(): subprocess.call(cmd) -def zap_disk(dev): - cmd = ['sgdisk', '--zap-all', dev] - subprocess.check_call(cmd) - - _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" @@ -297,6 +296,10 @@ def osdize(dev, osd_format, osd_journal, reformat_osd=False): log('Path {} does not exist - bailing'.format(dev)) return + if not is_block_device(dev): + log('Path {} is not a block device - bailing'.format(dev)) + return + if (is_osd_disk(dev) and not reformat_osd): log('Looks like {} is already an OSD, skipping.'.format(dev)) return diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 8069bbec..a8868b69 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -7,17 +7,14 @@ # Paul Collins # -import subprocess import socket import re from charmhelpers.core.hookenv import ( - config, unit_get, cached ) from charmhelpers.core.host import ( apt_install, - apt_update, filter_installed_packages ) @@ -45,40 +42,6 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): return template.render(context) -CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" - - -def configure_source(source=None): - if not source: - return - if source.startswith('ppa:'): - cmd = [ - 'add-apt-repository', - source - ] - subprocess.check_call(cmd) - if source.startswith('cloud:'): - apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(pocket)) - if source.startswith('http:'): - with open('/etc/apt/sources.list.d/ceph.list', 'w') as apt: - apt.write("deb " + source + "\n") - key = config('key') - if key: - cmd = [ - 'apt-key', - 'adv', '--keyserver keyserver.ubuntu.com', - '--recv-keys', key - ] - subprocess.check_call(cmd) - apt_update(fatal=True) - - def enable_pocket(pocket): apt_sources = "/etc/apt/sources.list" with open(apt_sources, "r") as sources: From 59c4c61a28e7f9e5f66dcf23d5ec96c0f5dcf316 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 25 Jun 2013 12:04:08 +0100 Subject: [PATCH 0227/2699] Add missing storage helper --- .../hooks/charmhelpers/contrib/__init__.py | 0 .../charmhelpers/contrib/storage/__init__.py | 0 .../contrib/storage/linux/__init__.py | 0 .../contrib/storage/linux/utils.py | 25 +++++++++++++++++++ 4 files changed, 25 insertions(+) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/storage/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/storage/linux/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py diff --git a/ceph-osd/hooks/charmhelpers/contrib/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/storage/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py new file mode 100644 index 00000000..5b9b6d47 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -0,0 +1,25 @@ +from os import stat +from stat import S_ISBLK + +from subprocess import ( + check_call +) + + +def is_block_device(path): + ''' + Confirm device at path is a valid block device node. + + :returns: boolean: True if path is a block device, False if not. + ''' + return S_ISBLK(stat(path).st_mode) + + +def zap_disk(block_device): + ''' + Clear a block device of partition table. Relies on sgdisk, which is + installed as pat of the 'gdisk' package in Ubuntu. + + :param block_device: str: Full path of block device to clean. + ''' + check_call(['sgdisk', '--zap-all', block_device]) From ef342d24fdb83cd7280c9eb3047b49dd87012852 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 25 Jun 2013 14:02:47 +0100 Subject: [PATCH 0228/2699] Use device_mounted from ceph --- ceph-osd/hooks/hooks.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index fb9d91ae..41bb4568 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -84,7 +84,7 @@ def config_changed(): sys.exit(1) e_mountpoint = config('ephemeral-unmount') - if (e_mountpoint and filesystem_mounted(e_mountpoint)): + if (e_mountpoint and ceph.filesystem_mounted(e_mountpoint)): umount(e_mountpoint) osd_journal = config('osd-journal') @@ -143,14 +143,6 @@ def reformat_osd(): return False -def device_mounted(dev): - return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 - - -def filesystem_mounted(fs): - return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 - - @hooks.hook('mon-relation-changed', 'mon-relation-departed') def mon_relation(): From bb680292959bb7527666f5dfd544a7c020e2e490 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 25 Jun 2013 14:04:30 +0100 Subject: [PATCH 0229/2699] Add ceph icon --- ceph-osd/icon.svg | 414 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 414 insertions(+) create mode 100644 ceph-osd/icon.svg diff --git a/ceph-osd/icon.svg b/ceph-osd/icon.svg new file mode 100644 index 00000000..de53ab2e --- /dev/null +++ b/ceph-osd/icon.svg @@ -0,0 +1,414 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + From c925f1950ac255a8af7278cba1d75dc8fb934c6b Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Jul 2013 10:03:26 +0100 Subject: [PATCH 0230/2699] Resync helpers from trunk --- ceph-proxy/Makefile | 6 +- ceph-proxy/charm-helpers-sync.yaml | 2 +- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 3 + .../hooks/charmhelpers/fetch/__init__.py | 92 ++++++++++++++++++- 4 files changed, 96 insertions(+), 7 deletions(-) diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index 9e8e5c0f..71dfd409 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -1,8 +1,8 @@ #!/usr/bin/make lint: - flake8 --exclude hooks/charmhelpers hooks - charm proof + @flake8 --exclude hooks/charmhelpers hooks + @charm proof sync: - charm-helper-sync -c charm-helpers-sync.yaml + @charm-helper-sync -c charm-helpers-sync.yaml diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml index 032b9b64..21c0bc63 100644 --- a/ceph-proxy/charm-helpers-sync.yaml +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~james-page/charm-helpers/configure_source_redux +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index b28240c6..b1ede910 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -333,3 +333,6 @@ def wrapper(decorated): self.register(decorated.__name__, decorated) return decorated return wrapper + +def charm_dir(): + return os.environ.get('CHARM_DIR') diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 19d83b8b..e3c42424 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -1,11 +1,21 @@ +import importlib from yaml import safe_load -from charmhelpers.core.hookenv import config from charmhelpers.core.host import ( - apt_install, apt_update, filter_installed_packages + apt_install, + apt_update, + filter_installed_packages +) +from urlparse import ( + urlparse, + urlunparse, ) import subprocess +from charmhelpers.core.hookenv import ( + config, + log, +) -CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ @@ -56,3 +66,79 @@ def configure_sources(update=False, add_source(sources[src_num], keys[src_num]) if update: apt_update(fatal=True) + +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', +) + + +class UnhandledSource(Exception): + pass + + +def install_remote(source): + """ + Install a file tree from a remote source + + The specified source should be a url of the form: + scheme://[host]/path[#[option=value][&...]] + + Schemes supported are based on this modules submodules + Options supported are submodule-specific""" + # We ONLY check for True here because can_handle may return a string + # explaining why it can't handle a given source. + handlers = [h for h in plugins() if h.can_handle(source) is True] + for handler in handlers: + try: + installed_to = handler.install(source) + except UnhandledSource: + pass + if not installed_to: + raise UnhandledSource("No handler found for source {}".format(source)) + return installed_to + + +def install_from_config(config_var_name): + charm_config = config() + source = charm_config[config_var_name] + return install_remote(source) + + +class BaseFetchHandler(object): + """Base class for FetchHandler implementations in fetch plugins""" + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + + +def plugins(fetch_handlers=None): + if not fetch_handlers: + fetch_handlers = FETCH_HANDLERS + plugin_list = [] + for handler_name in fetch_handlers: + package, classname = handler_name.rsplit('.', 1) + try: + handler_class = getattr(importlib.import_module(package), classname) + plugin_list.append(handler_class()) + except (ImportError, AttributeError): + # Skip missing plugins so that they can be ommitted from + # installation if desired + log("FetchHandler {} not found, skipping plugin".format(handler_name)) + return plugin_list From e5d25720c9fa33aec73e69b72c3b7a360b4d03c6 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Jul 2013 10:03:26 +0100 Subject: [PATCH 0231/2699] Resync helpers from trunk --- ceph-mon/Makefile | 6 +- ceph-mon/charm-helpers-sync.yaml | 2 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 3 + ceph-mon/hooks/charmhelpers/fetch/__init__.py | 92 ++++++++++++++++++- 4 files changed, 96 insertions(+), 7 deletions(-) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index 9e8e5c0f..71dfd409 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -1,8 +1,8 @@ #!/usr/bin/make lint: - flake8 --exclude hooks/charmhelpers hooks - charm proof + @flake8 --exclude hooks/charmhelpers hooks + @charm proof sync: - charm-helper-sync -c charm-helpers-sync.yaml + @charm-helper-sync -c charm-helpers-sync.yaml diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml index 032b9b64..21c0bc63 100644 --- a/ceph-mon/charm-helpers-sync.yaml +++ b/ceph-mon/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~james-page/charm-helpers/configure_source_redux +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index b28240c6..b1ede910 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -333,3 +333,6 @@ def wrapper(decorated): self.register(decorated.__name__, decorated) return decorated return wrapper + +def charm_dir(): + return os.environ.get('CHARM_DIR') diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 19d83b8b..e3c42424 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -1,11 +1,21 @@ +import importlib from yaml import safe_load -from charmhelpers.core.hookenv import config from charmhelpers.core.host import ( - apt_install, apt_update, filter_installed_packages + apt_install, + apt_update, + filter_installed_packages +) +from urlparse import ( + urlparse, + urlunparse, ) import subprocess +from charmhelpers.core.hookenv import ( + config, + log, +) -CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ @@ -56,3 +66,79 @@ def configure_sources(update=False, add_source(sources[src_num], keys[src_num]) if update: apt_update(fatal=True) + +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', +) + + +class UnhandledSource(Exception): + pass + + +def install_remote(source): + """ + Install a file tree from a remote source + + The specified source should be a url of the form: + scheme://[host]/path[#[option=value][&...]] + + Schemes supported are based on this modules submodules + Options supported are submodule-specific""" + # We ONLY check for True here because can_handle may return a string + # explaining why it can't handle a given source. + handlers = [h for h in plugins() if h.can_handle(source) is True] + for handler in handlers: + try: + installed_to = handler.install(source) + except UnhandledSource: + pass + if not installed_to: + raise UnhandledSource("No handler found for source {}".format(source)) + return installed_to + + +def install_from_config(config_var_name): + charm_config = config() + source = charm_config[config_var_name] + return install_remote(source) + + +class BaseFetchHandler(object): + """Base class for FetchHandler implementations in fetch plugins""" + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + + +def plugins(fetch_handlers=None): + if not fetch_handlers: + fetch_handlers = FETCH_HANDLERS + plugin_list = [] + for handler_name in fetch_handlers: + package, classname = handler_name.rsplit('.', 1) + try: + handler_class = getattr(importlib.import_module(package), classname) + plugin_list.append(handler_class()) + except (ImportError, AttributeError): + # Skip missing plugins so that they can be ommitted from + # installation if desired + log("FetchHandler {} not found, skipping plugin".format(handler_name)) + return plugin_list From ed3a58446715253792d568370453ac4d7615a036 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Jul 2013 10:04:42 +0100 Subject: [PATCH 0232/2699] Resync charm helpers from trunk --- ceph-osd/Makefile | 6 +- ceph-osd/charm-helpers-sync.yaml | 2 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 3 + ceph-osd/hooks/charmhelpers/fetch/__init__.py | 92 ++++++++++++++++++- .../hooks/charmhelpers/fetch/archiveurl.py | 43 +++++++++ ceph-osd/hooks/hooks.py | 1 - 6 files changed, 139 insertions(+), 8 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/fetch/archiveurl.py diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index fdb22de2..9da53536 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -1,8 +1,8 @@ #!/usr/bin/make lint: - flake8 --exclude hooks/charmhelpers hooks - charm proof || : + @flake8 --exclude hooks/charmhelpers hooks + @charm proof || true sync: - charm-helper-sync -c charm-helpers-sync.yaml + @charm-helper-sync -c charm-helpers-sync.yaml diff --git a/ceph-osd/charm-helpers-sync.yaml b/ceph-osd/charm-helpers-sync.yaml index 032b9b64..21c0bc63 100644 --- a/ceph-osd/charm-helpers-sync.yaml +++ b/ceph-osd/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~james-page/charm-helpers/configure_source_redux +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index b28240c6..b1ede910 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -333,3 +333,6 @@ def wrapper(decorated): self.register(decorated.__name__, decorated) return decorated return wrapper + +def charm_dir(): + return os.environ.get('CHARM_DIR') diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 19d83b8b..e3c42424 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -1,11 +1,21 @@ +import importlib from yaml import safe_load -from charmhelpers.core.hookenv import config from charmhelpers.core.host import ( - apt_install, apt_update, filter_installed_packages + apt_install, + apt_update, + filter_installed_packages +) +from urlparse import ( + urlparse, + urlunparse, ) import subprocess +from charmhelpers.core.hookenv import ( + config, + log, +) -CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ @@ -56,3 +66,79 @@ def configure_sources(update=False, add_source(sources[src_num], keys[src_num]) if update: apt_update(fatal=True) + +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', +) + + +class UnhandledSource(Exception): + pass + + +def install_remote(source): + """ + Install a file tree from a remote source + + The specified source should be a url of the form: + scheme://[host]/path[#[option=value][&...]] + + Schemes supported are based on this modules submodules + Options supported are submodule-specific""" + # We ONLY check for True here because can_handle may return a string + # explaining why it can't handle a given source. + handlers = [h for h in plugins() if h.can_handle(source) is True] + for handler in handlers: + try: + installed_to = handler.install(source) + except UnhandledSource: + pass + if not installed_to: + raise UnhandledSource("No handler found for source {}".format(source)) + return installed_to + + +def install_from_config(config_var_name): + charm_config = config() + source = charm_config[config_var_name] + return install_remote(source) + + +class BaseFetchHandler(object): + """Base class for FetchHandler implementations in fetch plugins""" + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + + +def plugins(fetch_handlers=None): + if not fetch_handlers: + fetch_handlers = FETCH_HANDLERS + plugin_list = [] + for handler_name in fetch_handlers: + package, classname = handler_name.rsplit('.', 1) + try: + handler_class = getattr(importlib.import_module(package), classname) + plugin_list.append(handler_class()) + except (ImportError, AttributeError): + # Skip missing plugins so that they can be ommitted from + # installation if desired + log("FetchHandler {} not found, skipping plugin".format(handler_name)) + return plugin_list diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py new file mode 100644 index 00000000..09ac69e3 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -0,0 +1,43 @@ +import os +import urllib2 +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) + + +class ArchiveUrlFetchHandler(BaseFetchHandler): + """Handler for archives via generic URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + return "Wrong source type" + if get_archive_handler(self.base_url(source)): + return True + return False + + def download(self, source, dest): + # propogate all exceptions + # URLError, OSError, etc + response = urllib2.urlopen(source) + with open(dest, 'w') as dest_file: + dest_file.write(response.read()) + + def install(self, source): + url_parts = self.parse_url(source) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') + dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) + try: + self.download(source, dld_file) + except urllib2.URLError as e: + return UnhandledSource(e.reason) + except OSError as e: + return UnhandledSource(e.strerror) + finally: + if os.path.isfile(dld_file): + os.unlink(dld_file) + return extract(dld_file) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 41bb4568..95b917e4 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -9,7 +9,6 @@ import glob import os -import subprocess import shutil import sys From d994c54aa556304d19d85158e67a4b35136fab47 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Jul 2013 10:05:03 +0100 Subject: [PATCH 0233/2699] Add missing charmhelpers files --- .../hooks/charmhelpers/fetch/archiveurl.py | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py new file mode 100644 index 00000000..09ac69e3 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -0,0 +1,43 @@ +import os +import urllib2 +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) + + +class ArchiveUrlFetchHandler(BaseFetchHandler): + """Handler for archives via generic URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + return "Wrong source type" + if get_archive_handler(self.base_url(source)): + return True + return False + + def download(self, source, dest): + # propogate all exceptions + # URLError, OSError, etc + response = urllib2.urlopen(source) + with open(dest, 'w') as dest_file: + dest_file.write(response.read()) + + def install(self, source): + url_parts = self.parse_url(source) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') + dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) + try: + self.download(source, dld_file) + except urllib2.URLError as e: + return UnhandledSource(e.reason) + except OSError as e: + return UnhandledSource(e.strerror) + finally: + if os.path.isfile(dld_file): + os.unlink(dld_file) + return extract(dld_file) From f0b20f0576419f37f895509bed6dff520b5b5e1d Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Jul 2013 10:05:03 +0100 Subject: [PATCH 0234/2699] Add missing charmhelpers files --- .../hooks/charmhelpers/fetch/archiveurl.py | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 ceph-mon/hooks/charmhelpers/fetch/archiveurl.py diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py new file mode 100644 index 00000000..09ac69e3 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -0,0 +1,43 @@ +import os +import urllib2 +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) + + +class ArchiveUrlFetchHandler(BaseFetchHandler): + """Handler for archives via generic URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + return "Wrong source type" + if get_archive_handler(self.base_url(source)): + return True + return False + + def download(self, source, dest): + # propogate all exceptions + # URLError, OSError, etc + response = urllib2.urlopen(source) + with open(dest, 'w') as dest_file: + dest_file.write(response.read()) + + def install(self, source): + url_parts = self.parse_url(source) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') + dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) + try: + self.download(source, dld_file) + except urllib2.URLError as e: + return UnhandledSource(e.reason) + except OSError as e: + return UnhandledSource(e.strerror) + finally: + if os.path.isfile(dld_file): + os.unlink(dld_file) + return extract(dld_file) From 4ba83a0b74bea37a790fd1a3c2a653626003cd6c Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Jul 2013 12:45:41 +0100 Subject: [PATCH 0235/2699] Resync with proposed enabled charmhelpers --- ceph-proxy/charm-helpers-sync.yaml | 2 +- ceph-proxy/hooks/charmhelpers/core/host.py | 22 ++++++++++++++----- .../hooks/charmhelpers/fetch/__init__.py | 10 ++++++++- 3 files changed, 26 insertions(+), 8 deletions(-) diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml index 21c0bc63..45b8a15a 100644 --- a/ceph-proxy/charm-helpers-sync.yaml +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~james-page/charm-helpers/configure_source_proposed destination: hooks/charmhelpers include: - core diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 19951ed2..d60d982d 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -48,13 +48,13 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): log('creating user {0}'.format(username)) cmd = ['useradd'] if system_user or password is None: - cmd.append('--system') + cmd.append('--system') else: - cmd.extend([ - '--create-home', - '--shell', shell, - '--password', password, - ]) + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) cmd.append(username) subprocess.check_call(cmd) user_info = pwd.getpwnam(username) @@ -261,3 +261,13 @@ def wrapped_f(*args): service('restart', service_name) return wrapped_f return wrap + + +def lsb_release(): + '''Return /etc/lsb-release in a dict''' + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index e3c42424..5a306257 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -3,7 +3,8 @@ from charmhelpers.core.host import ( apt_install, apt_update, - filter_installed_packages + filter_installed_packages, + lsb_release ) from urlparse import ( urlparse, @@ -18,6 +19,9 @@ CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ +PROPOSED_POCKET = """# Proposed +deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted +""" def add_source(source, key=None): @@ -30,6 +34,10 @@ def add_source(source, key=None): pocket = source.split(':')[-1] with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: apt.write(CLOUD_ARCHIVE.format(pocket)) + elif source == 'proposed': + release = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(PROPOSED_POCKET.format(release)) if key: subprocess.check_call(['apt-key', 'import', key]) From 51e7bb8a382ca601adbcb68d7d4010523cdfa6c0 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Jul 2013 12:45:41 +0100 Subject: [PATCH 0236/2699] Resync with proposed enabled charmhelpers --- ceph-mon/charm-helpers-sync.yaml | 2 +- ceph-mon/hooks/charmhelpers/core/host.py | 22 ++++++++++++++----- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 10 ++++++++- 3 files changed, 26 insertions(+), 8 deletions(-) diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml index 21c0bc63..45b8a15a 100644 --- a/ceph-mon/charm-helpers-sync.yaml +++ b/ceph-mon/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~james-page/charm-helpers/configure_source_proposed destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 19951ed2..d60d982d 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -48,13 +48,13 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): log('creating user {0}'.format(username)) cmd = ['useradd'] if system_user or password is None: - cmd.append('--system') + cmd.append('--system') else: - cmd.extend([ - '--create-home', - '--shell', shell, - '--password', password, - ]) + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) cmd.append(username) subprocess.check_call(cmd) user_info = pwd.getpwnam(username) @@ -261,3 +261,13 @@ def wrapped_f(*args): service('restart', service_name) return wrapped_f return wrap + + +def lsb_release(): + '''Return /etc/lsb-release in a dict''' + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index e3c42424..5a306257 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -3,7 +3,8 @@ from charmhelpers.core.host import ( apt_install, apt_update, - filter_installed_packages + filter_installed_packages, + lsb_release ) from urlparse import ( urlparse, @@ -18,6 +19,9 @@ CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ +PROPOSED_POCKET = """# Proposed +deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted +""" def add_source(source, key=None): @@ -30,6 +34,10 @@ def add_source(source, key=None): pocket = source.split(':')[-1] with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: apt.write(CLOUD_ARCHIVE.format(pocket)) + elif source == 'proposed': + release = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(PROPOSED_POCKET.format(release)) if key: subprocess.check_call(['apt-key', 'import', key]) From 1835a4d94803b2761460a8cb22d8941ee83f7cd7 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Jul 2013 12:46:27 +0100 Subject: [PATCH 0237/2699] Resync with configure_source supporting proposed --- ceph-osd/charm-helpers-sync.yaml | 2 +- ceph-osd/hooks/charmhelpers/core/host.py | 22 ++++++++++++++----- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 10 ++++++++- 3 files changed, 26 insertions(+), 8 deletions(-) diff --git a/ceph-osd/charm-helpers-sync.yaml b/ceph-osd/charm-helpers-sync.yaml index 21c0bc63..45b8a15a 100644 --- a/ceph-osd/charm-helpers-sync.yaml +++ b/ceph-osd/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~james-page/charm-helpers/configure_source_proposed destination: hooks/charmhelpers include: - core diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 19951ed2..d60d982d 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -48,13 +48,13 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): log('creating user {0}'.format(username)) cmd = ['useradd'] if system_user or password is None: - cmd.append('--system') + cmd.append('--system') else: - cmd.extend([ - '--create-home', - '--shell', shell, - '--password', password, - ]) + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) cmd.append(username) subprocess.check_call(cmd) user_info = pwd.getpwnam(username) @@ -261,3 +261,13 @@ def wrapped_f(*args): service('restart', service_name) return wrapped_f return wrap + + +def lsb_release(): + '''Return /etc/lsb-release in a dict''' + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index e3c42424..5a306257 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -3,7 +3,8 @@ from charmhelpers.core.host import ( apt_install, apt_update, - filter_installed_packages + filter_installed_packages, + lsb_release ) from urlparse import ( urlparse, @@ -18,6 +19,9 @@ CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ +PROPOSED_POCKET = """# Proposed +deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted +""" def add_source(source, key=None): @@ -30,6 +34,10 @@ def add_source(source, key=None): pocket = source.split(':')[-1] with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: apt.write(CLOUD_ARCHIVE.format(pocket)) + elif source == 'proposed': + release = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(PROPOSED_POCKET.format(release)) if key: subprocess.check_call(['apt-key', 'import', key]) From ef54065cd7df8834625c17d42ced999ccb4049e8 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Jul 2013 21:51:57 +0100 Subject: [PATCH 0238/2699] Resync with trunk of charm-helpers --- ceph-proxy/charm-helpers-sync.yaml | 2 +- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 24 ++++++++----------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml index 45b8a15a..21c0bc63 100644 --- a/ceph-proxy/charm-helpers-sync.yaml +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~james-page/charm-helpers/configure_source_proposed +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index b1ede910..e57ea25c 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -108,11 +108,12 @@ def execution_environment(): """A convenient bundling of the current execution context""" context = {} context['conf'] = config() - context['reltype'] = relation_type() - context['relid'] = relation_id() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() context['unit'] = local_unit() context['rels'] = relations() - context['rel'] = relation_get() context['env'] = os.environ return context @@ -150,13 +151,9 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - value = json.loads(subprocess.check_output(config_cmd_line)) + return json.loads(subprocess.check_output(config_cmd_line)) except ValueError: return None - if isinstance(value, dict): - return Serializable(value) - else: - return value @cached @@ -169,13 +166,9 @@ def relation_get(attribute=None, unit=None, rid=None): if unit: _args.append(unit) try: - value = json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args)) except ValueError: return None - if isinstance(value, dict): - return Serializable(value) - else: - return value def relation_set(relation_id=None, relation_settings={}, **kwargs): @@ -222,7 +215,7 @@ def relation_for_unit(unit=None, rid=None): if key.endswith('-list'): relation[key] = relation[key].split() relation['__unit__'] = unit - return Serializable(relation) + return relation @cached @@ -331,6 +324,9 @@ def wrapper(decorated): self.register(hook_name, decorated) else: self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) return decorated return wrapper From 0f8fa955d55ef9bebd65f731e12bd348edd74a99 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Jul 2013 21:51:57 +0100 Subject: [PATCH 0239/2699] Resync with trunk of charm-helpers --- ceph-mon/charm-helpers-sync.yaml | 2 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 24 +++++++++------------ 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml index 45b8a15a..21c0bc63 100644 --- a/ceph-mon/charm-helpers-sync.yaml +++ b/ceph-mon/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~james-page/charm-helpers/configure_source_proposed +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index b1ede910..e57ea25c 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -108,11 +108,12 @@ def execution_environment(): """A convenient bundling of the current execution context""" context = {} context['conf'] = config() - context['reltype'] = relation_type() - context['relid'] = relation_id() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() context['unit'] = local_unit() context['rels'] = relations() - context['rel'] = relation_get() context['env'] = os.environ return context @@ -150,13 +151,9 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - value = json.loads(subprocess.check_output(config_cmd_line)) + return json.loads(subprocess.check_output(config_cmd_line)) except ValueError: return None - if isinstance(value, dict): - return Serializable(value) - else: - return value @cached @@ -169,13 +166,9 @@ def relation_get(attribute=None, unit=None, rid=None): if unit: _args.append(unit) try: - value = json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args)) except ValueError: return None - if isinstance(value, dict): - return Serializable(value) - else: - return value def relation_set(relation_id=None, relation_settings={}, **kwargs): @@ -222,7 +215,7 @@ def relation_for_unit(unit=None, rid=None): if key.endswith('-list'): relation[key] = relation[key].split() relation['__unit__'] = unit - return Serializable(relation) + return relation @cached @@ -331,6 +324,9 @@ def wrapper(decorated): self.register(hook_name, decorated) else: self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) return decorated return wrapper From b17d692b36e85eccf8b3f60af8cd663e0024a410 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 3 Jul 2013 21:53:22 +0100 Subject: [PATCH 0240/2699] resync with trunk of charm-helpers --- ceph-osd/charm-helpers-sync.yaml | 2 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 24 +++++++++------------ 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/ceph-osd/charm-helpers-sync.yaml b/ceph-osd/charm-helpers-sync.yaml index 45b8a15a..21c0bc63 100644 --- a/ceph-osd/charm-helpers-sync.yaml +++ b/ceph-osd/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~james-page/charm-helpers/configure_source_proposed +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index b1ede910..e57ea25c 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -108,11 +108,12 @@ def execution_environment(): """A convenient bundling of the current execution context""" context = {} context['conf'] = config() - context['reltype'] = relation_type() - context['relid'] = relation_id() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() context['unit'] = local_unit() context['rels'] = relations() - context['rel'] = relation_get() context['env'] = os.environ return context @@ -150,13 +151,9 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - value = json.loads(subprocess.check_output(config_cmd_line)) + return json.loads(subprocess.check_output(config_cmd_line)) except ValueError: return None - if isinstance(value, dict): - return Serializable(value) - else: - return value @cached @@ -169,13 +166,9 @@ def relation_get(attribute=None, unit=None, rid=None): if unit: _args.append(unit) try: - value = json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args)) except ValueError: return None - if isinstance(value, dict): - return Serializable(value) - else: - return value def relation_set(relation_id=None, relation_settings={}, **kwargs): @@ -222,7 +215,7 @@ def relation_for_unit(unit=None, rid=None): if key.endswith('-list'): relation[key] = relation[key].split() relation['__unit__'] = unit - return Serializable(relation) + return relation @cached @@ -331,6 +324,9 @@ def wrapper(decorated): self.register(hook_name, decorated) else: self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) return decorated return wrapper From 9d9c45c94eae9b504327e1b628f671291ba11952 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 8 Jul 2013 09:32:38 +0100 Subject: [PATCH 0241/2699] Fixup dodgy disk detection --- ceph-proxy/hooks/ceph.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 7f9666e1..acdabe85 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -112,7 +112,6 @@ def is_osd_disk(dev): for line in info: if line.startswith( 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' - 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' ): return True except subprocess.CalledProcessError: From 2ac0a174fc5dee8435c22795d58d38e16168f448 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 8 Jul 2013 09:32:38 +0100 Subject: [PATCH 0242/2699] Fixup dodgy disk detection --- ceph-mon/hooks/ceph.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 7f9666e1..acdabe85 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -112,7 +112,6 @@ def is_osd_disk(dev): for line in info: if line.startswith( 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' - 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' ): return True except subprocess.CalledProcessError: From 9da40dbbd2a6d4842677ab2680f7ce6c2a4acc3e Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 8 Jul 2013 09:33:02 +0100 Subject: [PATCH 0243/2699] Fixup dodgy disk detection --- ceph-osd/hooks/ceph.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 7f9666e1..acdabe85 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -112,7 +112,6 @@ def is_osd_disk(dev): for line in info: if line.startswith( 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' - 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' ): return True except subprocess.CalledProcessError: From 8c754a951b61e2b8c9c9890b5793f47097be8d4b Mon Sep 17 00:00:00 2001 From: "Jorge O. Castro" Date: Thu, 11 Jul 2013 15:45:38 -0400 Subject: [PATCH 0244/2699] Add category --- ceph-proxy/metadata.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 0d84f430..0e416cc6 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -5,7 +5,8 @@ maintainer: James Page , description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. -categories: ["misc"] +categories: + - file-servers peers: mon: interface: ceph From 78087b3bde8ddcdb49f9a6ecb581113d8a633296 Mon Sep 17 00:00:00 2001 From: "Jorge O. Castro" Date: Thu, 11 Jul 2013 15:45:38 -0400 Subject: [PATCH 0245/2699] Add category --- ceph-mon/metadata.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 0d84f430..0e416cc6 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -5,7 +5,8 @@ maintainer: James Page , description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. -categories: ["misc"] +categories: + - file-servers peers: mon: interface: ceph From c38d35a6066c9d172b7affb51d6553fea4e7a9ab Mon Sep 17 00:00:00 2001 From: Antonio Rosales Date: Wed, 17 Jul 2013 00:23:14 -0500 Subject: [PATCH 0246/2699] Adding Ceph icon. --- ceph-radosgw/icon.svg | 414 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 414 insertions(+) create mode 100644 ceph-radosgw/icon.svg diff --git a/ceph-radosgw/icon.svg b/ceph-radosgw/icon.svg new file mode 100644 index 00000000..de53ab2e --- /dev/null +++ b/ceph-radosgw/icon.svg @@ -0,0 +1,414 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + From 8fed7c6113d5aefb874b2d0e93cf969a2d172493 Mon Sep 17 00:00:00 2001 From: Nick Veitch Date: Wed, 17 Jul 2013 19:11:40 +0100 Subject: [PATCH 0247/2699] minor spelling/grammar edits to the README --- ceph-proxy/README.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/ceph-proxy/README.md b/ceph-proxy/README.md index 89ea5807..282148af 100644 --- a/ceph-proxy/README.md +++ b/ceph-proxy/README.md @@ -24,11 +24,11 @@ are provided: ceph-authtool /dev/stdout --name=mon. --gen-key These two pieces of configuration must NOT be changed post bootstrap; attempting -todo this will cause a reconfiguration error and new service units will not join +to do this will cause a reconfiguration error and new service units will not join the existing ceph cluster. -The charm also supports specification of the storage devices to use in the ceph -cluster. +The charm also supports the specification of storage devices to be used in the +ceph cluster. osd-devices: A list of devices that the charm will attempt to detect, initialise and @@ -72,18 +72,18 @@ and osd. This charm uses the new-style Ceph deployment as reverse-engineered from the Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected -a different strategy to form the monitor cluster. Since we don't know the -names *or* addresses of the machines in advance, we use the relation-joined +a different strategy to form the monitor cluster. Since we don't know the +names *or* addresses of the machines in advance, we use the _relation-joined_ hook to wait for all three nodes to come up, and then write their addresses to ceph.conf in the "mon host" parameter. After we initialize the monitor cluster a quorum forms quickly, and OSD bringup proceeds. -The osds use so-called "OSD hotplugging". ceph-disk-prepare is used to create -the filesystems with a special GPT partition type. udev is set up to mounti -such filesystems and start the osd daemons as their storage becomes visible to -the system (or after "udevadm trigger"). +The osds use so-called "OSD hotplugging". **ceph-disk-prepare** is used to +create the filesystems with a special GPT partition type. *udev* is set up +to mount such filesystems and start the osd daemons as their storage becomes +visible to the system (or after "udevadm trigger"). -The Chef cookbook above performs some extra steps to generate an OSD +The Chef cookbook mentioned above performs some extra steps to generate an OSD bootstrapping key and propagate it to the other nodes in the cluster. Since all OSDs run on nodes that also run mon, we don't need this and did not implement it. From 82a2cf127e6e461a937b05e68516002362f899b4 Mon Sep 17 00:00:00 2001 From: Nick Veitch Date: Wed, 17 Jul 2013 19:11:40 +0100 Subject: [PATCH 0248/2699] minor spelling/grammar edits to the README --- ceph-mon/README.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 89ea5807..282148af 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -24,11 +24,11 @@ are provided: ceph-authtool /dev/stdout --name=mon. --gen-key These two pieces of configuration must NOT be changed post bootstrap; attempting -todo this will cause a reconfiguration error and new service units will not join +to do this will cause a reconfiguration error and new service units will not join the existing ceph cluster. -The charm also supports specification of the storage devices to use in the ceph -cluster. +The charm also supports the specification of storage devices to be used in the +ceph cluster. osd-devices: A list of devices that the charm will attempt to detect, initialise and @@ -72,18 +72,18 @@ and osd. This charm uses the new-style Ceph deployment as reverse-engineered from the Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected -a different strategy to form the monitor cluster. Since we don't know the -names *or* addresses of the machines in advance, we use the relation-joined +a different strategy to form the monitor cluster. Since we don't know the +names *or* addresses of the machines in advance, we use the _relation-joined_ hook to wait for all three nodes to come up, and then write their addresses to ceph.conf in the "mon host" parameter. After we initialize the monitor cluster a quorum forms quickly, and OSD bringup proceeds. -The osds use so-called "OSD hotplugging". ceph-disk-prepare is used to create -the filesystems with a special GPT partition type. udev is set up to mounti -such filesystems and start the osd daemons as their storage becomes visible to -the system (or after "udevadm trigger"). +The osds use so-called "OSD hotplugging". **ceph-disk-prepare** is used to +create the filesystems with a special GPT partition type. *udev* is set up +to mount such filesystems and start the osd daemons as their storage becomes +visible to the system (or after "udevadm trigger"). -The Chef cookbook above performs some extra steps to generate an OSD +The Chef cookbook mentioned above performs some extra steps to generate an OSD bootstrapping key and propagate it to the other nodes in the cluster. Since all OSDs run on nodes that also run mon, we don't need this and did not implement it. From 3977a1a2f22cef2e03a43b1d4c836309f89617bd Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 11:44:16 +0100 Subject: [PATCH 0249/2699] Add support for use of directories instead of devices for OSD's --- ceph-osd/config.yaml | 3 +++ ceph-osd/hooks/ceph.py | 46 ++++++++++++++++++++++++++++++++++++++--- ceph-osd/hooks/hooks.py | 14 +++++++++---- ceph-osd/revision | 2 +- 4 files changed, 57 insertions(+), 8 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 8db7b035..d915cc1f 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -7,6 +7,9 @@ options: . These devices are the range of devices that will be checked for and used across all service units. + . + For ceph >= 0.56.6 these can also be directories instead of devices - the + charm assumes anything not starting with /dev is a directory instead. osd-journal: type: string description: | diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index acdabe85..83ab4c2d 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -15,14 +15,18 @@ from charmhelpers.core.host import ( mkdir, service_restart, - log +) +from charmhelpers.core.hookenv import ( + log, + ERROR, + config, ) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, - is_block_device + is_block_device, ) from utils import ( - get_unit_hostname + get_unit_hostname, ) LEADER = 'leader' @@ -119,6 +123,16 @@ def is_osd_disk(dev): return False +def start_osds(devices): + if get_ceph_version() < "0.56.6": + # Only supports block devices - force a rescan + rescan_osd_devices() + else: + # Use ceph-disk-activate for later ceph versions + for dev_or_path in devices: + subprocess.check_call(['ceph-disk-activate', dev_or_path]) + + def rescan_osd_devices(): cmd = [ 'udevadm', 'trigger', @@ -291,6 +305,13 @@ def update_monfs(): def osdize(dev, osd_format, osd_journal, reformat_osd=False): + if dev.startswith('/dev'): + osdize_dev(dev, osd_format, osd_journal, reformat_osd) + else: + osdize_dir(dev) + + +def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): if not os.path.exists(dev): log('Path {} does not exist - bailing'.format(dev)) return @@ -327,6 +348,25 @@ def osdize(dev, osd_format, osd_journal, reformat_osd=False): subprocess.check_call(cmd) +def osdize_dir(path): + if os.path.exists(os.path.join(path, 'upstart')): + log('Path {} is already configured as an OSD - bailing'.format(path)) + return + + if get_ceph_version() < "0.56.6": + log('Unable to use directories for OSDs with ceph < 0.56.6', + level=ERROR) + raise + + mkdir(path) + cmd = [ + 'ceph-disk-prepare', + '--data-dir', + path + ] + subprocess.check_call(cmd) + + def device_mounted(dev): return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 95b917e4..2d7dd502 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -96,10 +96,10 @@ def config_changed(): if ceph.is_bootstrapped(): log('ceph bootstrapped, rescanning disks') emit_cephconf() - for dev in config('osd-devices').split(' '): + for dev in get_devices(): ceph.osdize(dev, config('osd-format'), config('osd-journal'), config('osd-reformat')) - ceph.rescan_osd_devices() + ceph.start_osds(get_devices()) log('End config-changed hook.') @@ -142,6 +142,12 @@ def reformat_osd(): return False +def get_devices(): + if config('osd-devices'): + return config('osd-devices').split(' ') + else: + return [] + @hooks.hook('mon-relation-changed', 'mon-relation-departed') def mon_relation(): @@ -152,10 +158,10 @@ def mon_relation(): log('mon has provided conf- scanning disks') emit_cephconf() ceph.import_osd_bootstrap_key(bootstrap_key) - for dev in config('osd-devices').split(' '): + for dev in get_devices(): ceph.osdize(dev, config('osd-format'), config('osd-journal'), config('osd-reformat')) - ceph.rescan_osd_devices() + ceph.start_osds(get_devices()) else: log('mon cluster has not yet provided conf') diff --git a/ceph-osd/revision b/ceph-osd/revision index 7f8f011e..45a4fb75 100644 --- a/ceph-osd/revision +++ b/ceph-osd/revision @@ -1 +1 @@ -7 +8 From 66661833bc58a5b73f15aaeb964752541c110a1c Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 11:44:50 +0100 Subject: [PATCH 0250/2699] Add support for use of directories instead of devices for OSD's --- ceph-proxy/README.md | 15 +++++++------ ceph-proxy/config.yaml | 3 +++ ceph-proxy/hooks/ceph.py | 46 ++++++++++++++++++++++++++++++++++++--- ceph-proxy/hooks/hooks.py | 16 ++++++++++---- ceph-proxy/revision | 2 +- 5 files changed, 67 insertions(+), 15 deletions(-) diff --git a/ceph-proxy/README.md b/ceph-proxy/README.md index 282148af..40fe82c4 100644 --- a/ceph-proxy/README.md +++ b/ceph-proxy/README.md @@ -27,15 +27,20 @@ These two pieces of configuration must NOT be changed post bootstrap; attempting to do this will cause a reconfiguration error and new service units will not join the existing ceph cluster. -The charm also supports the specification of storage devices to be used in the +The charm also supports the specification of storage devices to be used in the ceph cluster. osd-devices: A list of devices that the charm will attempt to detect, initialise and activate as ceph storage. - This this can be a superset of the actual storage devices presented to - each service unit and can be changed post ceph bootstrap using `juju set`. + This can be a superset of the actual storage devices presented to each + service unit and can be changed post ceph bootstrap using `juju set`. + + The full path of each device must be provided, e.g. /dev/vdb. + + For Ceph >= 0.56.6 (Raring or the Grizzly Cloud Archive) use of + directories instead of devices is also supported. At a minimum you must provide a juju config file during initial deployment with the fsid and monitor-secret options (contents of cepy.yaml below): @@ -66,10 +71,6 @@ Location: http://jujucharms.com/charms/ceph Technical Bootnotes =================== -This charm is currently deliberately inflexible and potentially destructive. -It is designed to deploy on exactly three machines. Each machine will run mon -and osd. - This charm uses the new-style Ceph deployment as reverse-engineered from the Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected a different strategy to form the monitor cluster. Since we don't know the diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index f5061360..bac32bca 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -39,6 +39,9 @@ options: . These devices are the range of devices that will be checked for and used across all service units. + . + For ceph >= 0.56.6 these can also be directories instead of devices - the + charm assumes anything not starting with /dev is a directory instead. osd-journal: type: string description: | diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index acdabe85..83ab4c2d 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -15,14 +15,18 @@ from charmhelpers.core.host import ( mkdir, service_restart, - log +) +from charmhelpers.core.hookenv import ( + log, + ERROR, + config, ) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, - is_block_device + is_block_device, ) from utils import ( - get_unit_hostname + get_unit_hostname, ) LEADER = 'leader' @@ -119,6 +123,16 @@ def is_osd_disk(dev): return False +def start_osds(devices): + if get_ceph_version() < "0.56.6": + # Only supports block devices - force a rescan + rescan_osd_devices() + else: + # Use ceph-disk-activate for later ceph versions + for dev_or_path in devices: + subprocess.check_call(['ceph-disk-activate', dev_or_path]) + + def rescan_osd_devices(): cmd = [ 'udevadm', 'trigger', @@ -291,6 +305,13 @@ def update_monfs(): def osdize(dev, osd_format, osd_journal, reformat_osd=False): + if dev.startswith('/dev'): + osdize_dev(dev, osd_format, osd_journal, reformat_osd) + else: + osdize_dir(dev) + + +def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): if not os.path.exists(dev): log('Path {} does not exist - bailing'.format(dev)) return @@ -327,6 +348,25 @@ def osdize(dev, osd_format, osd_journal, reformat_osd=False): subprocess.check_call(cmd) +def osdize_dir(path): + if os.path.exists(os.path.join(path, 'upstart')): + log('Path {} is already configured as an OSD - bailing'.format(path)) + return + + if get_ceph_version() < "0.56.6": + log('Unable to use directories for OSDs with ceph < 0.56.6', + level=ERROR) + raise + + mkdir(path) + cmd = [ + 'ceph-disk-prepare', + '--data-dir', + path + ] + subprocess.check_call(cmd) + + def device_mounted(dev): return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index b61e04b6..700e1f6b 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -102,7 +102,7 @@ def config_changed(): with open(JOURNAL_ZAPPED, 'w') as zapped: zapped.write('DONE') - for dev in config('osd-devices').split(' '): + for dev in get_devices(): ceph.osdize(dev, config('osd-format'), config('osd-journal'), reformat_osd()) @@ -112,7 +112,7 @@ def config_changed(): ceph.wait_for_bootstrap() if ceph.is_bootstrapped(): - ceph.rescan_osd_devices() + ceph.start_osds(get_devices()) log('End config-changed hook.') @@ -139,6 +139,13 @@ def reformat_osd(): return False +def get_devices(): + if config('osd-devices'): + return config('osd-devices').split(' ') + else: + return [] + + @hooks.hook('mon-relation-departed', 'mon-relation-joined') def mon_relation(): @@ -149,7 +156,7 @@ def mon_relation(): if len(get_mon_hosts()) >= moncount: ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() - ceph.rescan_osd_devices() + ceph.start_osds(get_devices()) notify_osds() notify_radosgws() notify_client() @@ -258,7 +265,8 @@ def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. service_restart('ceph-mon-all') - ceph.rescan_osd_devices() + if ceph.is_bootstrapped(): + ceph.start_osds(get_devices()) if __name__ == '__main__': diff --git a/ceph-proxy/revision b/ceph-proxy/revision index cd5b0252..f906e184 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -92 +96 From c13c02510b0e9db383f170b2df9281fdc30c02cb Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 11:44:50 +0100 Subject: [PATCH 0251/2699] Add support for use of directories instead of devices for OSD's --- ceph-mon/README.md | 15 +++++++------- ceph-mon/config.yaml | 3 +++ ceph-mon/hooks/ceph.py | 46 ++++++++++++++++++++++++++++++++++++++--- ceph-mon/hooks/hooks.py | 16 ++++++++++---- ceph-mon/revision | 2 +- 5 files changed, 67 insertions(+), 15 deletions(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 282148af..40fe82c4 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -27,15 +27,20 @@ These two pieces of configuration must NOT be changed post bootstrap; attempting to do this will cause a reconfiguration error and new service units will not join the existing ceph cluster. -The charm also supports the specification of storage devices to be used in the +The charm also supports the specification of storage devices to be used in the ceph cluster. osd-devices: A list of devices that the charm will attempt to detect, initialise and activate as ceph storage. - This this can be a superset of the actual storage devices presented to - each service unit and can be changed post ceph bootstrap using `juju set`. + This can be a superset of the actual storage devices presented to each + service unit and can be changed post ceph bootstrap using `juju set`. + + The full path of each device must be provided, e.g. /dev/vdb. + + For Ceph >= 0.56.6 (Raring or the Grizzly Cloud Archive) use of + directories instead of devices is also supported. At a minimum you must provide a juju config file during initial deployment with the fsid and monitor-secret options (contents of cepy.yaml below): @@ -66,10 +71,6 @@ Location: http://jujucharms.com/charms/ceph Technical Bootnotes =================== -This charm is currently deliberately inflexible and potentially destructive. -It is designed to deploy on exactly three machines. Each machine will run mon -and osd. - This charm uses the new-style Ceph deployment as reverse-engineered from the Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected a different strategy to form the monitor cluster. Since we don't know the diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index f5061360..bac32bca 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -39,6 +39,9 @@ options: . These devices are the range of devices that will be checked for and used across all service units. + . + For ceph >= 0.56.6 these can also be directories instead of devices - the + charm assumes anything not starting with /dev is a directory instead. osd-journal: type: string description: | diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index acdabe85..83ab4c2d 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -15,14 +15,18 @@ from charmhelpers.core.host import ( mkdir, service_restart, - log +) +from charmhelpers.core.hookenv import ( + log, + ERROR, + config, ) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, - is_block_device + is_block_device, ) from utils import ( - get_unit_hostname + get_unit_hostname, ) LEADER = 'leader' @@ -119,6 +123,16 @@ def is_osd_disk(dev): return False +def start_osds(devices): + if get_ceph_version() < "0.56.6": + # Only supports block devices - force a rescan + rescan_osd_devices() + else: + # Use ceph-disk-activate for later ceph versions + for dev_or_path in devices: + subprocess.check_call(['ceph-disk-activate', dev_or_path]) + + def rescan_osd_devices(): cmd = [ 'udevadm', 'trigger', @@ -291,6 +305,13 @@ def update_monfs(): def osdize(dev, osd_format, osd_journal, reformat_osd=False): + if dev.startswith('/dev'): + osdize_dev(dev, osd_format, osd_journal, reformat_osd) + else: + osdize_dir(dev) + + +def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): if not os.path.exists(dev): log('Path {} does not exist - bailing'.format(dev)) return @@ -327,6 +348,25 @@ def osdize(dev, osd_format, osd_journal, reformat_osd=False): subprocess.check_call(cmd) +def osdize_dir(path): + if os.path.exists(os.path.join(path, 'upstart')): + log('Path {} is already configured as an OSD - bailing'.format(path)) + return + + if get_ceph_version() < "0.56.6": + log('Unable to use directories for OSDs with ceph < 0.56.6', + level=ERROR) + raise + + mkdir(path) + cmd = [ + 'ceph-disk-prepare', + '--data-dir', + path + ] + subprocess.check_call(cmd) + + def device_mounted(dev): return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index b61e04b6..700e1f6b 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -102,7 +102,7 @@ def config_changed(): with open(JOURNAL_ZAPPED, 'w') as zapped: zapped.write('DONE') - for dev in config('osd-devices').split(' '): + for dev in get_devices(): ceph.osdize(dev, config('osd-format'), config('osd-journal'), reformat_osd()) @@ -112,7 +112,7 @@ def config_changed(): ceph.wait_for_bootstrap() if ceph.is_bootstrapped(): - ceph.rescan_osd_devices() + ceph.start_osds(get_devices()) log('End config-changed hook.') @@ -139,6 +139,13 @@ def reformat_osd(): return False +def get_devices(): + if config('osd-devices'): + return config('osd-devices').split(' ') + else: + return [] + + @hooks.hook('mon-relation-departed', 'mon-relation-joined') def mon_relation(): @@ -149,7 +156,7 @@ def mon_relation(): if len(get_mon_hosts()) >= moncount: ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() - ceph.rescan_osd_devices() + ceph.start_osds(get_devices()) notify_osds() notify_radosgws() notify_client() @@ -258,7 +265,8 @@ def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. service_restart('ceph-mon-all') - ceph.rescan_osd_devices() + if ceph.is_bootstrapped(): + ceph.start_osds(get_devices()) if __name__ == '__main__': diff --git a/ceph-mon/revision b/ceph-mon/revision index cd5b0252..f906e184 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -92 +96 From 4401c6ef62c7b131f542b3b3606208ff75ea86be Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 12:23:02 +0100 Subject: [PATCH 0252/2699] Ensure disks exist before activating --- ceph-proxy/hooks/ceph.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 83ab4c2d..69cfec4b 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -130,7 +130,8 @@ def start_osds(devices): else: # Use ceph-disk-activate for later ceph versions for dev_or_path in devices: - subprocess.check_call(['ceph-disk-activate', dev_or_path]) + if os.path.exists(dev_or_path): + subprocess.check_call(['ceph-disk-activate', dev_or_path]) def rescan_osd_devices(): From 19269e0b484bcd3e56beda6d4daf32ab122bed64 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 12:23:02 +0100 Subject: [PATCH 0253/2699] Ensure disks exist before activating --- ceph-mon/hooks/ceph.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 83ab4c2d..69cfec4b 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -130,7 +130,8 @@ def start_osds(devices): else: # Use ceph-disk-activate for later ceph versions for dev_or_path in devices: - subprocess.check_call(['ceph-disk-activate', dev_or_path]) + if os.path.exists(dev_or_path): + subprocess.check_call(['ceph-disk-activate', dev_or_path]) def rescan_osd_devices(): From 81018df6f47e3fb39ae70068a30e2fc3a980a8bd Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 12:24:45 +0100 Subject: [PATCH 0254/2699] Ensure dev/patch exists before activating --- ceph-osd/hooks/ceph.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 83ab4c2d..69cfec4b 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -130,7 +130,8 @@ def start_osds(devices): else: # Use ceph-disk-activate for later ceph versions for dev_or_path in devices: - subprocess.check_call(['ceph-disk-activate', dev_or_path]) + if os.path.exists(dev_or_path): + subprocess.check_call(['ceph-disk-activate', dev_or_path]) def rescan_osd_devices(): From e525f225b2e8414ce2567309cc334e382168fde3 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 12:30:53 +0100 Subject: [PATCH 0255/2699] Don't try to create osd-bootstrap key - it should always exist --- ceph-proxy/hooks/ceph.py | 41 +++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 69cfec4b..fd3ad6a0 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -19,7 +19,6 @@ from charmhelpers.core.hookenv import ( log, ERROR, - config, ) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, @@ -177,8 +176,32 @@ def import_osd_bootstrap_key(key): } +def parse_key(raw_key): + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(raw_key.splitlines()) == 1: + key = raw_key + else: + for element in raw_key.splitlines(): + if 'key' in element: + key = element.split(' = ')[1].strip() # IGNORE:E1103 + return key + + def get_osd_bootstrap_key(): - return get_named_key('bootstrap-osd', _osd_bootstrap_caps) + cmd = [ + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + get_unit_hostname() + ), + 'auth', 'get', 'client.bootstrap-osd', + ] + return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 _radosgw_keyring = "/etc/ceph/keyring.rados.gateway" @@ -229,19 +252,7 @@ def get_named_key(name, caps=None): subsystem, '; '.join(subcaps), ]) - output = subprocess.check_output(cmd).strip() # IGNORE:E1103 - # get-or-create appears to have different output depending - # on whether its 'get' or 'create' - # 'create' just returns the key, 'get' is more verbose and - # needs parsing - key = None - if len(output.splitlines()) == 1: - key = output - else: - for element in output.splitlines(): - if 'key' in element: - key = element.split(' = ')[1].strip() # IGNORE:E1103 - return key + return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 def bootstrap_monitor_cluster(secret): From 0c7a8db8edefee70f3eefdfdd790f9fedd49a341 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 12:30:53 +0100 Subject: [PATCH 0256/2699] Don't try to create osd-bootstrap key - it should always exist --- ceph-mon/hooks/ceph.py | 41 ++++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 69cfec4b..fd3ad6a0 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -19,7 +19,6 @@ from charmhelpers.core.hookenv import ( log, ERROR, - config, ) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, @@ -177,8 +176,32 @@ def import_osd_bootstrap_key(key): } +def parse_key(raw_key): + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(raw_key.splitlines()) == 1: + key = raw_key + else: + for element in raw_key.splitlines(): + if 'key' in element: + key = element.split(' = ')[1].strip() # IGNORE:E1103 + return key + + def get_osd_bootstrap_key(): - return get_named_key('bootstrap-osd', _osd_bootstrap_caps) + cmd = [ + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + get_unit_hostname() + ), + 'auth', 'get', 'client.bootstrap-osd', + ] + return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 _radosgw_keyring = "/etc/ceph/keyring.rados.gateway" @@ -229,19 +252,7 @@ def get_named_key(name, caps=None): subsystem, '; '.join(subcaps), ]) - output = subprocess.check_output(cmd).strip() # IGNORE:E1103 - # get-or-create appears to have different output depending - # on whether its 'get' or 'create' - # 'create' just returns the key, 'get' is more verbose and - # needs parsing - key = None - if len(output.splitlines()) == 1: - key = output - else: - for element in output.splitlines(): - if 'key' in element: - key = element.split(' = ')[1].strip() # IGNORE:E1103 - return key + return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 def bootstrap_monitor_cluster(secret): From bf69ea9a870868c762ad33328e30d5884747395e Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 12:32:19 +0100 Subject: [PATCH 0257/2699] Don't try to create osd-bootstrap key - it should always exist --- ceph-osd/hooks/ceph.py | 41 ++++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 69cfec4b..fd3ad6a0 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -19,7 +19,6 @@ from charmhelpers.core.hookenv import ( log, ERROR, - config, ) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, @@ -177,8 +176,32 @@ def import_osd_bootstrap_key(key): } +def parse_key(raw_key): + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(raw_key.splitlines()) == 1: + key = raw_key + else: + for element in raw_key.splitlines(): + if 'key' in element: + key = element.split(' = ')[1].strip() # IGNORE:E1103 + return key + + def get_osd_bootstrap_key(): - return get_named_key('bootstrap-osd', _osd_bootstrap_caps) + cmd = [ + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + get_unit_hostname() + ), + 'auth', 'get', 'client.bootstrap-osd', + ] + return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 _radosgw_keyring = "/etc/ceph/keyring.rados.gateway" @@ -229,19 +252,7 @@ def get_named_key(name, caps=None): subsystem, '; '.join(subcaps), ]) - output = subprocess.check_output(cmd).strip() # IGNORE:E1103 - # get-or-create appears to have different output depending - # on whether its 'get' or 'create' - # 'create' just returns the key, 'get' is more verbose and - # needs parsing - key = None - if len(output.splitlines()) == 1: - key = output - else: - for element in output.splitlines(): - if 'key' in element: - key = element.split(' = ')[1].strip() # IGNORE:E1103 - return key + return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 def bootstrap_monitor_cluster(secret): From e5cdb2ad9f860a35934d3da09f5c25ec3b11284b Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 12:47:16 +0100 Subject: [PATCH 0258/2699] Don't prepare disks until cluster is bootstrapped --- ceph-proxy/hooks/hooks.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 700e1f6b..8fd69502 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -102,16 +102,15 @@ def config_changed(): with open(JOURNAL_ZAPPED, 'w') as zapped: zapped.write('DONE') - for dev in get_devices(): - ceph.osdize(dev, config('osd-format'), config('osd-journal'), - reformat_osd()) - # Support use of single node ceph if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1): ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() if ceph.is_bootstrapped(): + for dev in get_devices(): + ceph.osdize(dev, config('osd-format'), config('osd-journal'), + reformat_osd()) ceph.start_osds(get_devices()) log('End config-changed hook.') @@ -156,6 +155,9 @@ def mon_relation(): if len(get_mon_hosts()) >= moncount: ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() + for dev in get_devices(): + ceph.osdize(dev, config('osd-format'), config('osd-journal'), + reformat_osd()) ceph.start_osds(get_devices()) notify_osds() notify_radosgws() From 4f2d1d9daa978ba6c138e57bda93bc825ef3c5fa Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 12:47:16 +0100 Subject: [PATCH 0259/2699] Don't prepare disks until cluster is bootstrapped --- ceph-mon/hooks/hooks.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 700e1f6b..8fd69502 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -102,16 +102,15 @@ def config_changed(): with open(JOURNAL_ZAPPED, 'w') as zapped: zapped.write('DONE') - for dev in get_devices(): - ceph.osdize(dev, config('osd-format'), config('osd-journal'), - reformat_osd()) - # Support use of single node ceph if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1): ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() if ceph.is_bootstrapped(): + for dev in get_devices(): + ceph.osdize(dev, config('osd-format'), config('osd-journal'), + reformat_osd()) ceph.start_osds(get_devices()) log('End config-changed hook.') @@ -156,6 +155,9 @@ def mon_relation(): if len(get_mon_hosts()) >= moncount: ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() + for dev in get_devices(): + ceph.osdize(dev, config('osd-format'), config('osd-journal'), + reformat_osd()) ceph.start_osds(get_devices()) notify_osds() notify_radosgws() From eb4870abca358887acfd06e01bf210f26b98d9be Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 12:47:42 +0100 Subject: [PATCH 0260/2699] Bump revision --- ceph-proxy/revision | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/revision b/ceph-proxy/revision index f906e184..c17e934b 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -96 +97 From 167e4600f259ba664be8ba4fdfe0af5ed3107f53 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 12:47:42 +0100 Subject: [PATCH 0261/2699] Bump revision --- ceph-mon/revision | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/revision b/ceph-mon/revision index f906e184..c17e934b 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -96 +97 From b8cace2176201a45c76927e29f42720b85284ff8 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 14:05:25 +0100 Subject: [PATCH 0262/2699] Tweak handling for OSD startup --- ceph-proxy/hooks/ceph.py | 12 ++++++------ ceph-proxy/revision | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index fd3ad6a0..5152dde5 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -123,13 +123,13 @@ def is_osd_disk(dev): def start_osds(devices): - if get_ceph_version() < "0.56.6": - # Only supports block devices - force a rescan - rescan_osd_devices() - else: - # Use ceph-disk-activate for later ceph versions + # Scan for ceph block devices + rescan_osd_devices() + if get_ceph_version() >= "0.56.6": + # Use ceph-disk-activate for directory based OSD's for dev_or_path in devices: - if os.path.exists(dev_or_path): + if (os.path.exists(dev_or_path) and + os.path.isdir(dev_or_path)): subprocess.check_call(['ceph-disk-activate', dev_or_path]) diff --git a/ceph-proxy/revision b/ceph-proxy/revision index c17e934b..6529ff88 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -97 +98 From 30171cb60b62e9e50cecaaea88d160d5cc8c4299 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 14:05:25 +0100 Subject: [PATCH 0263/2699] Tweak handling for OSD startup --- ceph-mon/hooks/ceph.py | 12 ++++++------ ceph-mon/revision | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index fd3ad6a0..5152dde5 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -123,13 +123,13 @@ def is_osd_disk(dev): def start_osds(devices): - if get_ceph_version() < "0.56.6": - # Only supports block devices - force a rescan - rescan_osd_devices() - else: - # Use ceph-disk-activate for later ceph versions + # Scan for ceph block devices + rescan_osd_devices() + if get_ceph_version() >= "0.56.6": + # Use ceph-disk-activate for directory based OSD's for dev_or_path in devices: - if os.path.exists(dev_or_path): + if (os.path.exists(dev_or_path) and + os.path.isdir(dev_or_path)): subprocess.check_call(['ceph-disk-activate', dev_or_path]) diff --git a/ceph-mon/revision b/ceph-mon/revision index c17e934b..6529ff88 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -97 +98 From f59626fdf53a1ff3eb3c37f078f12490cecb5e73 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 14:05:55 +0100 Subject: [PATCH 0264/2699] Tweak handling for OSD startup --- ceph-osd/hooks/ceph.py | 12 ++++++------ ceph-osd/revision | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index fd3ad6a0..5152dde5 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -123,13 +123,13 @@ def is_osd_disk(dev): def start_osds(devices): - if get_ceph_version() < "0.56.6": - # Only supports block devices - force a rescan - rescan_osd_devices() - else: - # Use ceph-disk-activate for later ceph versions + # Scan for ceph block devices + rescan_osd_devices() + if get_ceph_version() >= "0.56.6": + # Use ceph-disk-activate for directory based OSD's for dev_or_path in devices: - if os.path.exists(dev_or_path): + if (os.path.exists(dev_or_path) and + os.path.isdir(dev_or_path)): subprocess.check_call(['ceph-disk-activate', dev_or_path]) diff --git a/ceph-osd/revision b/ceph-osd/revision index 45a4fb75..ec635144 100644 --- a/ceph-osd/revision +++ b/ceph-osd/revision @@ -1 +1 @@ -8 +9 From bd87cbde642aa489e28aaf0a3f9664cba99909ba Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 14:38:34 +0100 Subject: [PATCH 0265/2699] Deal with osd bootstrap key upgrade challenges --- ceph-proxy/hooks/ceph.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 5152dde5..edadad3a 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -175,6 +175,12 @@ def import_osd_bootstrap_key(key): ] } +_osd_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-osd' + ] +} + def parse_key(raw_key): # get-or-create appears to have different output depending @@ -192,16 +198,15 @@ def parse_key(raw_key): def get_osd_bootstrap_key(): - cmd = [ - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - get_unit_hostname() - ), - 'auth', 'get', 'client.bootstrap-osd', - ] - return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + try: + # Attempt to get/create a key using the OSD bootstrap profile first + key = get_named_key('client.bootstrap-osd', + _osd_bootstrap_caps_profile) + except: + # If that fails try with the older style permissions + key = get_named_key('client.bootstrap-osd', + _osd_bootstrap_caps) + return key _radosgw_keyring = "/etc/ceph/keyring.rados.gateway" From 6dbaa75b6153cdaf8df3630b6a1d17359ee5ba60 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 14:38:34 +0100 Subject: [PATCH 0266/2699] Deal with osd bootstrap key upgrade challenges --- ceph-mon/hooks/ceph.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 5152dde5..edadad3a 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -175,6 +175,12 @@ def import_osd_bootstrap_key(key): ] } +_osd_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-osd' + ] +} + def parse_key(raw_key): # get-or-create appears to have different output depending @@ -192,16 +198,15 @@ def parse_key(raw_key): def get_osd_bootstrap_key(): - cmd = [ - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - get_unit_hostname() - ), - 'auth', 'get', 'client.bootstrap-osd', - ] - return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + try: + # Attempt to get/create a key using the OSD bootstrap profile first + key = get_named_key('client.bootstrap-osd', + _osd_bootstrap_caps_profile) + except: + # If that fails try with the older style permissions + key = get_named_key('client.bootstrap-osd', + _osd_bootstrap_caps) + return key _radosgw_keyring = "/etc/ceph/keyring.rados.gateway" From 1938062d7e69d95ac9c0f043ae13e28369712212 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 14:38:41 +0100 Subject: [PATCH 0267/2699] Deal with osd bootstrap key upgrade challenges --- ceph-proxy/revision | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 6529ff88..3ad5abd0 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -98 +99 From 2d5298b29fa79252f54d59eb057c84e67c561b93 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 14:38:41 +0100 Subject: [PATCH 0268/2699] Deal with osd bootstrap key upgrade challenges --- ceph-mon/revision | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/revision b/ceph-mon/revision index 6529ff88..3ad5abd0 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -98 +99 From ea2f810d1ed689526650f49384705767ad1a3734 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 14:41:45 +0100 Subject: [PATCH 0269/2699] Fixups for osd bootstrap key handling --- ceph-osd/hooks/ceph.py | 25 +++++++++++++++---------- ceph-osd/revision | 2 +- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 5152dde5..edadad3a 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -175,6 +175,12 @@ def import_osd_bootstrap_key(key): ] } +_osd_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-osd' + ] +} + def parse_key(raw_key): # get-or-create appears to have different output depending @@ -192,16 +198,15 @@ def parse_key(raw_key): def get_osd_bootstrap_key(): - cmd = [ - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - get_unit_hostname() - ), - 'auth', 'get', 'client.bootstrap-osd', - ] - return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + try: + # Attempt to get/create a key using the OSD bootstrap profile first + key = get_named_key('client.bootstrap-osd', + _osd_bootstrap_caps_profile) + except: + # If that fails try with the older style permissions + key = get_named_key('client.bootstrap-osd', + _osd_bootstrap_caps) + return key _radosgw_keyring = "/etc/ceph/keyring.rados.gateway" diff --git a/ceph-osd/revision b/ceph-osd/revision index ec635144..f599e28b 100644 --- a/ceph-osd/revision +++ b/ceph-osd/revision @@ -1 +1 @@ -9 +10 From 8f142b806c86e4fcb2caa9e8c1f738e6c9102c37 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 14:42:48 +0100 Subject: [PATCH 0270/2699] Tidy --- ceph-osd/hooks/ceph.py | 3 +-- ceph-osd/hooks/hooks.py | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index edadad3a..a3735456 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -128,8 +128,7 @@ def start_osds(devices): if get_ceph_version() >= "0.56.6": # Use ceph-disk-activate for directory based OSD's for dev_or_path in devices: - if (os.path.exists(dev_or_path) and - os.path.isdir(dev_or_path)): + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): subprocess.check_call(['ceph-disk-activate', dev_or_path]) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 2d7dd502..1424eef1 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -148,6 +148,7 @@ def get_devices(): else: return [] + @hooks.hook('mon-relation-changed', 'mon-relation-departed') def mon_relation(): From e4cdbfb7c42e512a0b8333d1100dd696958cd6a6 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 14:57:22 +0100 Subject: [PATCH 0271/2699] Correct osd bootstrap key name --- ceph-osd/hooks/ceph.py | 4 ++-- ceph-osd/revision | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index a3735456..fca22549 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -199,11 +199,11 @@ def parse_key(raw_key): def get_osd_bootstrap_key(): try: # Attempt to get/create a key using the OSD bootstrap profile first - key = get_named_key('client.bootstrap-osd', + key = get_named_key('bootstrap-osd', _osd_bootstrap_caps_profile) except: # If that fails try with the older style permissions - key = get_named_key('client.bootstrap-osd', + key = get_named_key('bootstrap-osd', _osd_bootstrap_caps) return key diff --git a/ceph-osd/revision b/ceph-osd/revision index f599e28b..b4de3947 100644 --- a/ceph-osd/revision +++ b/ceph-osd/revision @@ -1 +1 @@ -10 +11 From 4888bfaa0411c169f8954879b854d68f079c4e8d Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 14:57:46 +0100 Subject: [PATCH 0272/2699] Correct osd bootstrap key name --- ceph-proxy/hooks/ceph.py | 7 +++---- ceph-proxy/revision | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index edadad3a..fca22549 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -128,8 +128,7 @@ def start_osds(devices): if get_ceph_version() >= "0.56.6": # Use ceph-disk-activate for directory based OSD's for dev_or_path in devices: - if (os.path.exists(dev_or_path) and - os.path.isdir(dev_or_path)): + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): subprocess.check_call(['ceph-disk-activate', dev_or_path]) @@ -200,11 +199,11 @@ def parse_key(raw_key): def get_osd_bootstrap_key(): try: # Attempt to get/create a key using the OSD bootstrap profile first - key = get_named_key('client.bootstrap-osd', + key = get_named_key('bootstrap-osd', _osd_bootstrap_caps_profile) except: # If that fails try with the older style permissions - key = get_named_key('client.bootstrap-osd', + key = get_named_key('bootstrap-osd', _osd_bootstrap_caps) return key diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 3ad5abd0..29d6383b 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -99 +100 From adc40240ec5a5d5c9748f89c41991afe8287357d Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 27 Aug 2013 14:57:46 +0100 Subject: [PATCH 0273/2699] Correct osd bootstrap key name --- ceph-mon/hooks/ceph.py | 7 +++---- ceph-mon/revision | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index edadad3a..fca22549 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -128,8 +128,7 @@ def start_osds(devices): if get_ceph_version() >= "0.56.6": # Use ceph-disk-activate for directory based OSD's for dev_or_path in devices: - if (os.path.exists(dev_or_path) and - os.path.isdir(dev_or_path)): + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): subprocess.check_call(['ceph-disk-activate', dev_or_path]) @@ -200,11 +199,11 @@ def parse_key(raw_key): def get_osd_bootstrap_key(): try: # Attempt to get/create a key using the OSD bootstrap profile first - key = get_named_key('client.bootstrap-osd', + key = get_named_key('bootstrap-osd', _osd_bootstrap_caps_profile) except: # If that fails try with the older style permissions - key = get_named_key('client.bootstrap-osd', + key = get_named_key('bootstrap-osd', _osd_bootstrap_caps) return key diff --git a/ceph-mon/revision b/ceph-mon/revision index 3ad5abd0..29d6383b 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -99 +100 From bdb6825e1413bb5e41fff9cd85d3f3e647349979 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Sep 2013 11:11:04 +0100 Subject: [PATCH 0274/2699] Update default mon permissions to have rw for pool creation, upgrade keys on charm upgrade --- ceph-proxy/hooks/ceph.py | 17 +++++++++++++++-- ceph-proxy/hooks/hooks.py | 14 ++++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index fca22549..461e4416 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -224,7 +224,7 @@ def import_radosgw_key(key): # OSD caps taken from ceph-create-keys _radosgw_caps = { - 'mon': ['allow r'], + 'mon': ['allow rw'], 'osd': ['allow rwx'] } @@ -234,7 +234,7 @@ def get_radosgw_key(): _default_caps = { - 'mon': ['allow r'], + 'mon': ['allow rw'], 'osd': ['allow rwx'] } @@ -259,6 +259,19 @@ def get_named_key(name, caps=None): return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 +def upgrade_key_caps(key, caps): + ''' Upgrade key to have capabilities caps ''' + if not is_leader(): + # Not the MON leader OR not clustered + return + cmd = [ + 'ceph', 'auth', 'caps', key + ] + for subsystem, subcaps in caps.iteritems(): + cmd.extend([subsystem, "'{}'".format(subcaps)]) + subprocess.check_call(cmd) + + def bootstrap_monitor_cluster(secret): hostname = get_unit_hostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 8fd69502..e8a790f9 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -206,6 +206,19 @@ def notify_client(): log('End notify_client.') +def upgrade_keys(): + ''' Ceph now required mon allow rw for pool creation ''' + if len(relation_ids('radosgw')) > 0: + ceph.upgrade_key_caps('radosgw.gateway', + ceph._radosgw_caps) + for relid in relation_ids('client'): + units = related_units(relid) + if len(units) > 0: + service_name = units[0].split('/')[0] + ceph.upgrade_key_caps(service_name, + ceph._default_caps) + + @hooks.hook('osd-relation-joined') def osd_relation(): log('Begin osd-relation hook.') @@ -259,6 +272,7 @@ def upgrade_charm(): apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) install_upstart_scripts() ceph.update_monfs() + upgrade_keys() log('End upgrade-charm hook.') From 9791c7cfa1bc7bf8195a801b3303d00f70223a0f Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Sep 2013 11:11:04 +0100 Subject: [PATCH 0275/2699] Update default mon permissions to have rw for pool creation, upgrade keys on charm upgrade --- ceph-mon/hooks/ceph.py | 17 +++++++++++++++-- ceph-mon/hooks/hooks.py | 14 ++++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index fca22549..461e4416 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -224,7 +224,7 @@ def import_radosgw_key(key): # OSD caps taken from ceph-create-keys _radosgw_caps = { - 'mon': ['allow r'], + 'mon': ['allow rw'], 'osd': ['allow rwx'] } @@ -234,7 +234,7 @@ def get_radosgw_key(): _default_caps = { - 'mon': ['allow r'], + 'mon': ['allow rw'], 'osd': ['allow rwx'] } @@ -259,6 +259,19 @@ def get_named_key(name, caps=None): return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 +def upgrade_key_caps(key, caps): + ''' Upgrade key to have capabilities caps ''' + if not is_leader(): + # Not the MON leader OR not clustered + return + cmd = [ + 'ceph', 'auth', 'caps', key + ] + for subsystem, subcaps in caps.iteritems(): + cmd.extend([subsystem, "'{}'".format(subcaps)]) + subprocess.check_call(cmd) + + def bootstrap_monitor_cluster(secret): hostname = get_unit_hostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 8fd69502..e8a790f9 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -206,6 +206,19 @@ def notify_client(): log('End notify_client.') +def upgrade_keys(): + ''' Ceph now required mon allow rw for pool creation ''' + if len(relation_ids('radosgw')) > 0: + ceph.upgrade_key_caps('radosgw.gateway', + ceph._radosgw_caps) + for relid in relation_ids('client'): + units = related_units(relid) + if len(units) > 0: + service_name = units[0].split('/')[0] + ceph.upgrade_key_caps(service_name, + ceph._default_caps) + + @hooks.hook('osd-relation-joined') def osd_relation(): log('Begin osd-relation hook.') @@ -259,6 +272,7 @@ def upgrade_charm(): apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) install_upstart_scripts() ceph.update_monfs() + upgrade_keys() log('End upgrade-charm hook.') From eea38776f6444b9e4ee0c385238ea99c18fd6667 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Sep 2013 11:13:17 +0100 Subject: [PATCH 0276/2699] Prefix key upgrade with client --- ceph-proxy/hooks/hooks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index e8a790f9..80fbe13b 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -209,13 +209,13 @@ def notify_client(): def upgrade_keys(): ''' Ceph now required mon allow rw for pool creation ''' if len(relation_ids('radosgw')) > 0: - ceph.upgrade_key_caps('radosgw.gateway', + ceph.upgrade_key_caps('client.radosgw.gateway', ceph._radosgw_caps) for relid in relation_ids('client'): units = related_units(relid) if len(units) > 0: service_name = units[0].split('/')[0] - ceph.upgrade_key_caps(service_name, + ceph.upgrade_key_caps('client.{}'.format(service_name), ceph._default_caps) From 5712fad5e10051148748df358ad57da02afe266a Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Sep 2013 11:13:17 +0100 Subject: [PATCH 0277/2699] Prefix key upgrade with client --- ceph-mon/hooks/hooks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index e8a790f9..80fbe13b 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -209,13 +209,13 @@ def notify_client(): def upgrade_keys(): ''' Ceph now required mon allow rw for pool creation ''' if len(relation_ids('radosgw')) > 0: - ceph.upgrade_key_caps('radosgw.gateway', + ceph.upgrade_key_caps('client.radosgw.gateway', ceph._radosgw_caps) for relid in relation_ids('client'): units = related_units(relid) if len(units) > 0: service_name = units[0].split('/')[0] - ceph.upgrade_key_caps(service_name, + ceph.upgrade_key_caps('client.{}'.format(service_name), ceph._default_caps) From fa735d22088762bcd3b30ae80e4dfe8e0712d5e6 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Sep 2013 11:19:06 +0100 Subject: [PATCH 0278/2699] Fixup subcap handling --- ceph-proxy/hooks/ceph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 461e4416..d165d26a 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -268,7 +268,7 @@ def upgrade_key_caps(key, caps): 'ceph', 'auth', 'caps', key ] for subsystem, subcaps in caps.iteritems(): - cmd.extend([subsystem, "'{}'".format(subcaps)]) + cmd.extend([subsystem, '; '.join(subcaps)]) subprocess.check_call(cmd) From 7e812e8498de09f0301433bf31757530999ab96a Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Sep 2013 11:19:06 +0100 Subject: [PATCH 0279/2699] Fixup subcap handling --- ceph-mon/hooks/ceph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 461e4416..d165d26a 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -268,7 +268,7 @@ def upgrade_key_caps(key, caps): 'ceph', 'auth', 'caps', key ] for subsystem, subcaps in caps.iteritems(): - cmd.extend([subsystem, "'{}'".format(subcaps)]) + cmd.extend([subsystem, '; '.join(subcaps)]) subprocess.check_call(cmd) From 20fc96dfa9afb58236ecf4aa139dd6c0ed606f0b Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 24 Sep 2013 11:17:13 +0100 Subject: [PATCH 0280/2699] Adds missing identity-service-relation-* hook links Fixes: bug 1229645 --- ceph-radosgw/hooks/identity-service-relation-changed | 1 + ceph-radosgw/hooks/identity-service-relation-joined | 1 + 2 files changed, 2 insertions(+) create mode 120000 ceph-radosgw/hooks/identity-service-relation-changed create mode 120000 ceph-radosgw/hooks/identity-service-relation-joined diff --git a/ceph-radosgw/hooks/identity-service-relation-changed b/ceph-radosgw/hooks/identity-service-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/identity-service-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/identity-service-relation-joined b/ceph-radosgw/hooks/identity-service-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/identity-service-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file From 5942c5f22bc8dcdd13ca46450e50b3dfacdf2f3c Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 24 Sep 2013 11:19:47 +0100 Subject: [PATCH 0281/2699] bumped revision --- ceph-radosgw/revision | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/revision b/ceph-radosgw/revision index aabe6ec3..2bd5a0a9 100644 --- a/ceph-radosgw/revision +++ b/ceph-radosgw/revision @@ -1 +1 @@ -21 +22 From 404b65de8905a12e4930fb9ddcc8fa31fde8cb81 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 24 Sep 2013 12:29:07 +0100 Subject: [PATCH 0282/2699] [trivial] fix relation_set handling problem --- ceph-radosgw/hooks/utils.py | 4 ++-- ceph-radosgw/revision | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 5ad0b246..972bc99e 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -170,10 +170,10 @@ def relation_set(**kwargs): ] args = [] for k, v in kwargs.items(): - if k == 'rid': + if k == 'rid' and v: cmd.append('-r') cmd.append(v) - else: + elif k != 'rid': args.append('{}={}'.format(k, v)) cmd += args subprocess.check_call(cmd) diff --git a/ceph-radosgw/revision b/ceph-radosgw/revision index 2bd5a0a9..7273c0fa 100644 --- a/ceph-radosgw/revision +++ b/ceph-radosgw/revision @@ -1 +1 @@ -22 +25 From 08773f71436aa95d4adb35a029fec42adcf8d17e Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 10 Oct 2013 11:46:24 +0100 Subject: [PATCH 0283/2699] Sync with alternatives charmhelpers, installed ceph.conf as alternative for charm co-existence --- ceph-proxy/charm-helpers-sync.yaml | 3 +- .../contrib/openstack/__init__.py | 0 .../contrib/openstack/alternatives.py | 17 +++ ceph-proxy/hooks/charmhelpers/core/hookenv.py | 10 +- ceph-proxy/hooks/charmhelpers/core/host.py | 124 +++++++----------- .../hooks/charmhelpers/fetch/__init__.py | 65 ++++++++- .../hooks/charmhelpers/fetch/archiveurl.py | 19 ++- ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py | 49 +++++++ ceph-proxy/hooks/hooks.py | 23 +++- ceph-proxy/hooks/utils.py | 2 +- ceph-proxy/revision | 2 +- 11 files changed, 214 insertions(+), 100 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/openstack/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py create mode 100644 ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml index 21c0bc63..38dc4108 100644 --- a/ceph-proxy/charm-helpers-sync.yaml +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -1,7 +1,8 @@ -branch: lp:charm-helpers +branch: lp:~openstack-charmers/charm-helpers/os-alternatives destination: hooks/charmhelpers include: - core - fetch - contrib.storage.linux: - utils + - contrib.openstack.alternatives diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py new file mode 100644 index 00000000..b413259c --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -0,0 +1,17 @@ +''' Helper for managing alternatives for file conflict resolution ''' + +import subprocess +import shutil +import os + + +def install_alternative(name, target, source, priority=50): + ''' Install alternative configuration ''' + if (os.path.exists(target) and not os.path.islink(target)): + # Move existing file/directory away before installing + shutil.move(target, '{}.bak'.format(target)) + cmd = [ + 'update-alternatives', '--force', '--install', + target, name, source, str(priority) + ] + subprocess.check_call(cmd) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index e57ea25c..2b06706c 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -143,6 +143,11 @@ def remote_unit(): return os.environ['JUJU_REMOTE_UNIT'] +def service_name(): + "The name service group this unit belongs to" + return local_unit().split('/')[0] + + @cached def config(scope=None): "Juju charm configuration" @@ -192,7 +197,7 @@ def relation_ids(reltype=None): relid_cmd_line = ['relation-ids', '--format=json'] if reltype is not None: relid_cmd_line.append(reltype) - return json.loads(subprocess.check_output(relid_cmd_line)) + return json.loads(subprocess.check_output(relid_cmd_line)) or [] return [] @@ -203,7 +208,7 @@ def related_units(relid=None): units_cmd_line = ['relation-list', '--format=json'] if relid is not None: units_cmd_line.extend(('-r', relid)) - return json.loads(subprocess.check_output(units_cmd_line)) + return json.loads(subprocess.check_output(units_cmd_line)) or [] @cached @@ -330,5 +335,6 @@ def wrapper(decorated): return decorated return wrapper + def charm_dir(): return os.environ.get('CHARM_DIR') diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index d60d982d..1a63bf89 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -5,33 +5,36 @@ # Nick Moffitt # Matthew Wedgwood -import apt_pkg import os import pwd import grp +import random +import string import subprocess import hashlib from collections import OrderedDict -from hookenv import log, execution_environment +from hookenv import log def service_start(service_name): - service('start', service_name) + return service('start', service_name) def service_stop(service_name): - service('stop', service_name) + return service('stop', service_name) def service_restart(service_name): - service('restart', service_name) + return service('restart', service_name) def service_reload(service_name, restart_on_failure=False): - if not service('reload', service_name) and restart_on_failure: - service('restart', service_name) + service_result = service('reload', service_name) + if not service_result and restart_on_failure: + service_result = service('restart', service_name) + return service_result def service(action, service_name): @@ -39,6 +42,18 @@ def service(action, service_name): return subprocess.call(cmd) == 0 +def service_running(service): + try: + output = subprocess.check_output(['service', service, 'status']) + except subprocess.CalledProcessError: + return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False + + def adduser(username, password=None, shell='/bin/bash', system_user=False): """Add a user""" try: @@ -74,36 +89,33 @@ def add_user_to_group(username, group): def rsync(from_path, to_path, flags='-r', options=None): """Replicate the contents of a path""" - context = execution_environment() options = options or ['--delete', '--executability'] cmd = ['/usr/bin/rsync', flags] cmd.extend(options) - cmd.append(from_path.format(**context)) - cmd.append(to_path.format(**context)) + cmd.append(from_path) + cmd.append(to_path) log(" ".join(cmd)) return subprocess.check_output(cmd).strip() def symlink(source, destination): """Create a symbolic link""" - context = execution_environment() log("Symlinking {} as {}".format(source, destination)) cmd = [ 'ln', '-sf', - source.format(**context), - destination.format(**context) + source, + destination, ] subprocess.check_call(cmd) def mkdir(path, owner='root', group='root', perms=0555, force=False): """Create a directory""" - context = execution_environment() log("Making dir {} {}:{} {:o}".format(path, owner, group, perms)) - uid = pwd.getpwnam(owner.format(**context)).pw_uid - gid = grp.getgrnam(group.format(**context)).gr_gid + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid realpath = os.path.abspath(path) if os.path.exists(realpath): if force and not os.path.isdir(realpath): @@ -114,71 +126,15 @@ def mkdir(path, owner='root', group='root', perms=0555, force=False): os.chown(realpath, uid, gid) -def write_file(path, fmtstr, owner='root', group='root', perms=0444, **kwargs): +def write_file(path, content, owner='root', group='root', perms=0444): """Create or overwrite a file with the contents of a string""" - context = execution_environment() - context.update(kwargs) - log("Writing file {} {}:{} {:o}".format(path, owner, group, - perms)) - uid = pwd.getpwnam(owner.format(**context)).pw_uid - gid = grp.getgrnam(group.format(**context)).gr_gid - with open(path.format(**context), 'w') as target: + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'w') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) - target.write(fmtstr.format(**context)) - - -def render_template_file(source, destination, **kwargs): - """Create or overwrite a file using a template""" - log("Rendering template {} for {}".format(source, - destination)) - context = execution_environment() - with open(source.format(**context), 'r') as template: - write_file(destination.format(**context), template.read(), - **kwargs) - - -def filter_installed_packages(packages): - """Returns a list of packages that require installation""" - apt_pkg.init() - cache = apt_pkg.Cache() - _pkgs = [] - for package in packages: - try: - p = cache[package] - p.current_ver or _pkgs.append(package) - except KeyError: - log('Package {} has no installation candidate.'.format(package), - level='WARNING') - _pkgs.append(package) - return _pkgs - - -def apt_install(packages, options=None, fatal=False): - """Install one or more packages""" - options = options or [] - cmd = ['apt-get', '-y'] - cmd.extend(options) - cmd.append('install') - if isinstance(packages, basestring): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) - - -def apt_update(fatal=False): - """Update local apt cache""" - cmd = ['apt-get', 'update'] - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) + target.write(content) def mount(device, mountpoint, options=None, persist=False): @@ -271,3 +227,15 @@ def lsb_release(): k, v = l.split('=') d[k.strip()] = v.strip() return d + + +def pwgen(length=None): + '''Generate a random pasword.''' + if length is None: + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + random_chars = [ + random.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 5a306257..b2f96467 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -1,9 +1,6 @@ import importlib from yaml import safe_load from charmhelpers.core.host import ( - apt_install, - apt_update, - filter_installed_packages, lsb_release ) from urlparse import ( @@ -15,6 +12,7 @@ config, log, ) +import apt_pkg CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main @@ -24,10 +22,67 @@ """ +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + apt_pkg.init() + cache = apt_pkg.Cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + options = options or [] + cmd = ['apt-get', '-y'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_purge(packages, fatal=False): + """Purge one or more packages""" + cmd = ['apt-get', '-y', 'purge'] + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + def add_source(source, key=None): if ((source.startswith('ppa:') or source.startswith('http:'))): - subprocess.check_call(['add-apt-repository', source]) + subprocess.check_call(['add-apt-repository', '--yes', source]) elif source.startswith('cloud:'): apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), fatal=True) @@ -79,6 +134,7 @@ def configure_sources(update=False, # least- to most-specific URL matching. FETCH_HANDLERS = ( 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', ) @@ -98,6 +154,7 @@ def install_remote(source): # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] + installed_to = None for handler in handlers: try: installed_to = handler.install(source) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py index 09ac69e3..e35b8f15 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -8,6 +8,7 @@ get_archive_handler, extract, ) +from charmhelpers.core.host import mkdir class ArchiveUrlFetchHandler(BaseFetchHandler): @@ -24,20 +25,24 @@ def download(self, source, dest): # propogate all exceptions # URLError, OSError, etc response = urllib2.urlopen(source) - with open(dest, 'w') as dest_file: - dest_file.write(response.read()) + try: + with open(dest, 'w') as dest_file: + dest_file.write(response.read()) + except Exception as e: + if os.path.isfile(dest): + os.unlink(dest) + raise e def install(self, source): url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) except urllib2.URLError as e: - return UnhandledSource(e.reason) + raise UnhandledSource(e.reason) except OSError as e: - return UnhandledSource(e.strerror) - finally: - if os.path.isfile(dld_file): - os.unlink(dld_file) + raise UnhandledSource(e.strerror) return extract(dld_file) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py new file mode 100644 index 00000000..c348b4bb --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py @@ -0,0 +1,49 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +try: + from bzrlib.branch import Branch +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-bzrlib") + from bzrlib.branch import Branch + +class BzrUrlFetchHandler(BaseFetchHandler): + """Handler for bazaar branches via generic and lp URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('bzr+ssh', 'lp'): + return False + else: + return True + + def branch(self, source, dest): + url_parts = self.parse_url(source) + # If we use lp:branchname scheme we need to load plugins + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + if url_parts.scheme == "lp": + from bzrlib.plugin import load_plugins + load_plugins() + try: + remote_branch = Branch.open(source) + remote_branch.bzrdir.sprout(dest).open_branch() + except Exception as e: + raise e + + def install(self, source): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + try: + self.branch(source, dest_dir) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir + diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 80fbe13b..59dfe3d6 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -22,16 +22,22 @@ relation_get, relation_set, remote_unit, - Hooks, UnregisteredHookError + Hooks, UnregisteredHookError, + service_name ) + from charmhelpers.core.host import ( + service_restart, + umount, + mkdir +) +from charmhelpers.fetch import ( apt_install, apt_update, filter_installed_packages, - service_restart, - umount + add_source ) -from charmhelpers.fetch import add_source +from charmhelpers.contrib.openstack.alternatives import install_alternative from utils import ( render_template, @@ -65,9 +71,14 @@ def emit_cephconf(): 'fsid': config('fsid'), 'version': ceph.get_ceph_version() } - - with open('/etc/ceph/ceph.conf', 'w') as cephconf: + # Install ceph.conf as an alternative to support + # co-existence with other charms that write this file + charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) + mkdir(os.path.dirname(charm_ceph_conf)) + with open(charm_ceph_conf, 'w') as cephconf: cephconf.write(render_template('ceph.conf', cephcontext)) + install_alternative('ceph.conf', '/etc/ceph/ceph.conf', + charm_ceph_conf, 100) JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index a8868b69..c1044a45 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -13,7 +13,7 @@ unit_get, cached ) -from charmhelpers.core.host import ( +from charmhelpers.fetch import ( apt_install, filter_installed_packages ) diff --git a/ceph-proxy/revision b/ceph-proxy/revision index 29d6383b..a9c8fe82 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -100 +103 From 135279292aa37e2669b75eb9fb475d94294390b1 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 10 Oct 2013 11:46:24 +0100 Subject: [PATCH 0284/2699] Sync with alternatives charmhelpers, installed ceph.conf as alternative for charm co-existence --- ceph-mon/charm-helpers-sync.yaml | 3 +- .../contrib/openstack/__init__.py | 0 .../contrib/openstack/alternatives.py | 17 +++ ceph-mon/hooks/charmhelpers/core/hookenv.py | 10 +- ceph-mon/hooks/charmhelpers/core/host.py | 124 +++++++----------- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 65 ++++++++- .../hooks/charmhelpers/fetch/archiveurl.py | 19 ++- ceph-mon/hooks/charmhelpers/fetch/bzrurl.py | 49 +++++++ ceph-mon/hooks/hooks.py | 23 +++- ceph-mon/hooks/utils.py | 2 +- ceph-mon/revision | 2 +- 11 files changed, 214 insertions(+), 100 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py create mode 100644 ceph-mon/hooks/charmhelpers/fetch/bzrurl.py diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml index 21c0bc63..38dc4108 100644 --- a/ceph-mon/charm-helpers-sync.yaml +++ b/ceph-mon/charm-helpers-sync.yaml @@ -1,7 +1,8 @@ -branch: lp:charm-helpers +branch: lp:~openstack-charmers/charm-helpers/os-alternatives destination: hooks/charmhelpers include: - core - fetch - contrib.storage.linux: - utils + - contrib.openstack.alternatives diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py new file mode 100644 index 00000000..b413259c --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -0,0 +1,17 @@ +''' Helper for managing alternatives for file conflict resolution ''' + +import subprocess +import shutil +import os + + +def install_alternative(name, target, source, priority=50): + ''' Install alternative configuration ''' + if (os.path.exists(target) and not os.path.islink(target)): + # Move existing file/directory away before installing + shutil.move(target, '{}.bak'.format(target)) + cmd = [ + 'update-alternatives', '--force', '--install', + target, name, source, str(priority) + ] + subprocess.check_call(cmd) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index e57ea25c..2b06706c 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -143,6 +143,11 @@ def remote_unit(): return os.environ['JUJU_REMOTE_UNIT'] +def service_name(): + "The name service group this unit belongs to" + return local_unit().split('/')[0] + + @cached def config(scope=None): "Juju charm configuration" @@ -192,7 +197,7 @@ def relation_ids(reltype=None): relid_cmd_line = ['relation-ids', '--format=json'] if reltype is not None: relid_cmd_line.append(reltype) - return json.loads(subprocess.check_output(relid_cmd_line)) + return json.loads(subprocess.check_output(relid_cmd_line)) or [] return [] @@ -203,7 +208,7 @@ def related_units(relid=None): units_cmd_line = ['relation-list', '--format=json'] if relid is not None: units_cmd_line.extend(('-r', relid)) - return json.loads(subprocess.check_output(units_cmd_line)) + return json.loads(subprocess.check_output(units_cmd_line)) or [] @cached @@ -330,5 +335,6 @@ def wrapper(decorated): return decorated return wrapper + def charm_dir(): return os.environ.get('CHARM_DIR') diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index d60d982d..1a63bf89 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -5,33 +5,36 @@ # Nick Moffitt # Matthew Wedgwood -import apt_pkg import os import pwd import grp +import random +import string import subprocess import hashlib from collections import OrderedDict -from hookenv import log, execution_environment +from hookenv import log def service_start(service_name): - service('start', service_name) + return service('start', service_name) def service_stop(service_name): - service('stop', service_name) + return service('stop', service_name) def service_restart(service_name): - service('restart', service_name) + return service('restart', service_name) def service_reload(service_name, restart_on_failure=False): - if not service('reload', service_name) and restart_on_failure: - service('restart', service_name) + service_result = service('reload', service_name) + if not service_result and restart_on_failure: + service_result = service('restart', service_name) + return service_result def service(action, service_name): @@ -39,6 +42,18 @@ def service(action, service_name): return subprocess.call(cmd) == 0 +def service_running(service): + try: + output = subprocess.check_output(['service', service, 'status']) + except subprocess.CalledProcessError: + return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False + + def adduser(username, password=None, shell='/bin/bash', system_user=False): """Add a user""" try: @@ -74,36 +89,33 @@ def add_user_to_group(username, group): def rsync(from_path, to_path, flags='-r', options=None): """Replicate the contents of a path""" - context = execution_environment() options = options or ['--delete', '--executability'] cmd = ['/usr/bin/rsync', flags] cmd.extend(options) - cmd.append(from_path.format(**context)) - cmd.append(to_path.format(**context)) + cmd.append(from_path) + cmd.append(to_path) log(" ".join(cmd)) return subprocess.check_output(cmd).strip() def symlink(source, destination): """Create a symbolic link""" - context = execution_environment() log("Symlinking {} as {}".format(source, destination)) cmd = [ 'ln', '-sf', - source.format(**context), - destination.format(**context) + source, + destination, ] subprocess.check_call(cmd) def mkdir(path, owner='root', group='root', perms=0555, force=False): """Create a directory""" - context = execution_environment() log("Making dir {} {}:{} {:o}".format(path, owner, group, perms)) - uid = pwd.getpwnam(owner.format(**context)).pw_uid - gid = grp.getgrnam(group.format(**context)).gr_gid + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid realpath = os.path.abspath(path) if os.path.exists(realpath): if force and not os.path.isdir(realpath): @@ -114,71 +126,15 @@ def mkdir(path, owner='root', group='root', perms=0555, force=False): os.chown(realpath, uid, gid) -def write_file(path, fmtstr, owner='root', group='root', perms=0444, **kwargs): +def write_file(path, content, owner='root', group='root', perms=0444): """Create or overwrite a file with the contents of a string""" - context = execution_environment() - context.update(kwargs) - log("Writing file {} {}:{} {:o}".format(path, owner, group, - perms)) - uid = pwd.getpwnam(owner.format(**context)).pw_uid - gid = grp.getgrnam(group.format(**context)).gr_gid - with open(path.format(**context), 'w') as target: + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'w') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) - target.write(fmtstr.format(**context)) - - -def render_template_file(source, destination, **kwargs): - """Create or overwrite a file using a template""" - log("Rendering template {} for {}".format(source, - destination)) - context = execution_environment() - with open(source.format(**context), 'r') as template: - write_file(destination.format(**context), template.read(), - **kwargs) - - -def filter_installed_packages(packages): - """Returns a list of packages that require installation""" - apt_pkg.init() - cache = apt_pkg.Cache() - _pkgs = [] - for package in packages: - try: - p = cache[package] - p.current_ver or _pkgs.append(package) - except KeyError: - log('Package {} has no installation candidate.'.format(package), - level='WARNING') - _pkgs.append(package) - return _pkgs - - -def apt_install(packages, options=None, fatal=False): - """Install one or more packages""" - options = options or [] - cmd = ['apt-get', '-y'] - cmd.extend(options) - cmd.append('install') - if isinstance(packages, basestring): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) - - -def apt_update(fatal=False): - """Update local apt cache""" - cmd = ['apt-get', 'update'] - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) + target.write(content) def mount(device, mountpoint, options=None, persist=False): @@ -271,3 +227,15 @@ def lsb_release(): k, v = l.split('=') d[k.strip()] = v.strip() return d + + +def pwgen(length=None): + '''Generate a random pasword.''' + if length is None: + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + random_chars = [ + random.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 5a306257..b2f96467 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -1,9 +1,6 @@ import importlib from yaml import safe_load from charmhelpers.core.host import ( - apt_install, - apt_update, - filter_installed_packages, lsb_release ) from urlparse import ( @@ -15,6 +12,7 @@ config, log, ) +import apt_pkg CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main @@ -24,10 +22,67 @@ """ +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + apt_pkg.init() + cache = apt_pkg.Cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + options = options or [] + cmd = ['apt-get', '-y'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_purge(packages, fatal=False): + """Purge one or more packages""" + cmd = ['apt-get', '-y', 'purge'] + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + def add_source(source, key=None): if ((source.startswith('ppa:') or source.startswith('http:'))): - subprocess.check_call(['add-apt-repository', source]) + subprocess.check_call(['add-apt-repository', '--yes', source]) elif source.startswith('cloud:'): apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), fatal=True) @@ -79,6 +134,7 @@ def configure_sources(update=False, # least- to most-specific URL matching. FETCH_HANDLERS = ( 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', ) @@ -98,6 +154,7 @@ def install_remote(source): # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] + installed_to = None for handler in handlers: try: installed_to = handler.install(source) diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index 09ac69e3..e35b8f15 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -8,6 +8,7 @@ get_archive_handler, extract, ) +from charmhelpers.core.host import mkdir class ArchiveUrlFetchHandler(BaseFetchHandler): @@ -24,20 +25,24 @@ def download(self, source, dest): # propogate all exceptions # URLError, OSError, etc response = urllib2.urlopen(source) - with open(dest, 'w') as dest_file: - dest_file.write(response.read()) + try: + with open(dest, 'w') as dest_file: + dest_file.write(response.read()) + except Exception as e: + if os.path.isfile(dest): + os.unlink(dest) + raise e def install(self, source): url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) except urllib2.URLError as e: - return UnhandledSource(e.reason) + raise UnhandledSource(e.reason) except OSError as e: - return UnhandledSource(e.strerror) - finally: - if os.path.isfile(dld_file): - os.unlink(dld_file) + raise UnhandledSource(e.strerror) return extract(dld_file) diff --git a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py new file mode 100644 index 00000000..c348b4bb --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py @@ -0,0 +1,49 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +try: + from bzrlib.branch import Branch +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-bzrlib") + from bzrlib.branch import Branch + +class BzrUrlFetchHandler(BaseFetchHandler): + """Handler for bazaar branches via generic and lp URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('bzr+ssh', 'lp'): + return False + else: + return True + + def branch(self, source, dest): + url_parts = self.parse_url(source) + # If we use lp:branchname scheme we need to load plugins + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + if url_parts.scheme == "lp": + from bzrlib.plugin import load_plugins + load_plugins() + try: + remote_branch = Branch.open(source) + remote_branch.bzrdir.sprout(dest).open_branch() + except Exception as e: + raise e + + def install(self, source): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + try: + self.branch(source, dest_dir) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir + diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 80fbe13b..59dfe3d6 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -22,16 +22,22 @@ relation_get, relation_set, remote_unit, - Hooks, UnregisteredHookError + Hooks, UnregisteredHookError, + service_name ) + from charmhelpers.core.host import ( + service_restart, + umount, + mkdir +) +from charmhelpers.fetch import ( apt_install, apt_update, filter_installed_packages, - service_restart, - umount + add_source ) -from charmhelpers.fetch import add_source +from charmhelpers.contrib.openstack.alternatives import install_alternative from utils import ( render_template, @@ -65,9 +71,14 @@ def emit_cephconf(): 'fsid': config('fsid'), 'version': ceph.get_ceph_version() } - - with open('/etc/ceph/ceph.conf', 'w') as cephconf: + # Install ceph.conf as an alternative to support + # co-existence with other charms that write this file + charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) + mkdir(os.path.dirname(charm_ceph_conf)) + with open(charm_ceph_conf, 'w') as cephconf: cephconf.write(render_template('ceph.conf', cephcontext)) + install_alternative('ceph.conf', '/etc/ceph/ceph.conf', + charm_ceph_conf, 100) JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index a8868b69..c1044a45 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -13,7 +13,7 @@ unit_get, cached ) -from charmhelpers.core.host import ( +from charmhelpers.fetch import ( apt_install, filter_installed_packages ) diff --git a/ceph-mon/revision b/ceph-mon/revision index 29d6383b..a9c8fe82 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -100 +103 From 0fb0b3ed7d29540b4b67bcc43860bc2b5d9ed0ea Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 10 Oct 2013 11:49:36 +0100 Subject: [PATCH 0285/2699] Sync with alternatives charmhelpers, installed ceph.conf as alternative for charm co-existence --- ceph-osd/charm-helpers-sync.yaml | 3 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 10 +- ceph-osd/hooks/charmhelpers/core/host.py | 124 +++++++----------- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 65 ++++++++- .../hooks/charmhelpers/fetch/archiveurl.py | 19 ++- ceph-osd/hooks/hooks.py | 21 ++- ceph-osd/hooks/utils.py | 2 +- ceph-osd/revision | 2 +- 8 files changed, 147 insertions(+), 99 deletions(-) diff --git a/ceph-osd/charm-helpers-sync.yaml b/ceph-osd/charm-helpers-sync.yaml index 21c0bc63..38dc4108 100644 --- a/ceph-osd/charm-helpers-sync.yaml +++ b/ceph-osd/charm-helpers-sync.yaml @@ -1,7 +1,8 @@ -branch: lp:charm-helpers +branch: lp:~openstack-charmers/charm-helpers/os-alternatives destination: hooks/charmhelpers include: - core - fetch - contrib.storage.linux: - utils + - contrib.openstack.alternatives diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index e57ea25c..2b06706c 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -143,6 +143,11 @@ def remote_unit(): return os.environ['JUJU_REMOTE_UNIT'] +def service_name(): + "The name service group this unit belongs to" + return local_unit().split('/')[0] + + @cached def config(scope=None): "Juju charm configuration" @@ -192,7 +197,7 @@ def relation_ids(reltype=None): relid_cmd_line = ['relation-ids', '--format=json'] if reltype is not None: relid_cmd_line.append(reltype) - return json.loads(subprocess.check_output(relid_cmd_line)) + return json.loads(subprocess.check_output(relid_cmd_line)) or [] return [] @@ -203,7 +208,7 @@ def related_units(relid=None): units_cmd_line = ['relation-list', '--format=json'] if relid is not None: units_cmd_line.extend(('-r', relid)) - return json.loads(subprocess.check_output(units_cmd_line)) + return json.loads(subprocess.check_output(units_cmd_line)) or [] @cached @@ -330,5 +335,6 @@ def wrapper(decorated): return decorated return wrapper + def charm_dir(): return os.environ.get('CHARM_DIR') diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index d60d982d..1a63bf89 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -5,33 +5,36 @@ # Nick Moffitt # Matthew Wedgwood -import apt_pkg import os import pwd import grp +import random +import string import subprocess import hashlib from collections import OrderedDict -from hookenv import log, execution_environment +from hookenv import log def service_start(service_name): - service('start', service_name) + return service('start', service_name) def service_stop(service_name): - service('stop', service_name) + return service('stop', service_name) def service_restart(service_name): - service('restart', service_name) + return service('restart', service_name) def service_reload(service_name, restart_on_failure=False): - if not service('reload', service_name) and restart_on_failure: - service('restart', service_name) + service_result = service('reload', service_name) + if not service_result and restart_on_failure: + service_result = service('restart', service_name) + return service_result def service(action, service_name): @@ -39,6 +42,18 @@ def service(action, service_name): return subprocess.call(cmd) == 0 +def service_running(service): + try: + output = subprocess.check_output(['service', service, 'status']) + except subprocess.CalledProcessError: + return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False + + def adduser(username, password=None, shell='/bin/bash', system_user=False): """Add a user""" try: @@ -74,36 +89,33 @@ def add_user_to_group(username, group): def rsync(from_path, to_path, flags='-r', options=None): """Replicate the contents of a path""" - context = execution_environment() options = options or ['--delete', '--executability'] cmd = ['/usr/bin/rsync', flags] cmd.extend(options) - cmd.append(from_path.format(**context)) - cmd.append(to_path.format(**context)) + cmd.append(from_path) + cmd.append(to_path) log(" ".join(cmd)) return subprocess.check_output(cmd).strip() def symlink(source, destination): """Create a symbolic link""" - context = execution_environment() log("Symlinking {} as {}".format(source, destination)) cmd = [ 'ln', '-sf', - source.format(**context), - destination.format(**context) + source, + destination, ] subprocess.check_call(cmd) def mkdir(path, owner='root', group='root', perms=0555, force=False): """Create a directory""" - context = execution_environment() log("Making dir {} {}:{} {:o}".format(path, owner, group, perms)) - uid = pwd.getpwnam(owner.format(**context)).pw_uid - gid = grp.getgrnam(group.format(**context)).gr_gid + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid realpath = os.path.abspath(path) if os.path.exists(realpath): if force and not os.path.isdir(realpath): @@ -114,71 +126,15 @@ def mkdir(path, owner='root', group='root', perms=0555, force=False): os.chown(realpath, uid, gid) -def write_file(path, fmtstr, owner='root', group='root', perms=0444, **kwargs): +def write_file(path, content, owner='root', group='root', perms=0444): """Create or overwrite a file with the contents of a string""" - context = execution_environment() - context.update(kwargs) - log("Writing file {} {}:{} {:o}".format(path, owner, group, - perms)) - uid = pwd.getpwnam(owner.format(**context)).pw_uid - gid = grp.getgrnam(group.format(**context)).gr_gid - with open(path.format(**context), 'w') as target: + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'w') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) - target.write(fmtstr.format(**context)) - - -def render_template_file(source, destination, **kwargs): - """Create or overwrite a file using a template""" - log("Rendering template {} for {}".format(source, - destination)) - context = execution_environment() - with open(source.format(**context), 'r') as template: - write_file(destination.format(**context), template.read(), - **kwargs) - - -def filter_installed_packages(packages): - """Returns a list of packages that require installation""" - apt_pkg.init() - cache = apt_pkg.Cache() - _pkgs = [] - for package in packages: - try: - p = cache[package] - p.current_ver or _pkgs.append(package) - except KeyError: - log('Package {} has no installation candidate.'.format(package), - level='WARNING') - _pkgs.append(package) - return _pkgs - - -def apt_install(packages, options=None, fatal=False): - """Install one or more packages""" - options = options or [] - cmd = ['apt-get', '-y'] - cmd.extend(options) - cmd.append('install') - if isinstance(packages, basestring): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) - - -def apt_update(fatal=False): - """Update local apt cache""" - cmd = ['apt-get', 'update'] - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) + target.write(content) def mount(device, mountpoint, options=None, persist=False): @@ -271,3 +227,15 @@ def lsb_release(): k, v = l.split('=') d[k.strip()] = v.strip() return d + + +def pwgen(length=None): + '''Generate a random pasword.''' + if length is None: + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + random_chars = [ + random.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 5a306257..b2f96467 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -1,9 +1,6 @@ import importlib from yaml import safe_load from charmhelpers.core.host import ( - apt_install, - apt_update, - filter_installed_packages, lsb_release ) from urlparse import ( @@ -15,6 +12,7 @@ config, log, ) +import apt_pkg CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main @@ -24,10 +22,67 @@ """ +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + apt_pkg.init() + cache = apt_pkg.Cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + options = options or [] + cmd = ['apt-get', '-y'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_purge(packages, fatal=False): + """Purge one or more packages""" + cmd = ['apt-get', '-y', 'purge'] + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + def add_source(source, key=None): if ((source.startswith('ppa:') or source.startswith('http:'))): - subprocess.check_call(['add-apt-repository', source]) + subprocess.check_call(['add-apt-repository', '--yes', source]) elif source.startswith('cloud:'): apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), fatal=True) @@ -79,6 +134,7 @@ def configure_sources(update=False, # least- to most-specific URL matching. FETCH_HANDLERS = ( 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', ) @@ -98,6 +154,7 @@ def install_remote(source): # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] + installed_to = None for handler in handlers: try: installed_to = handler.install(source) diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index 09ac69e3..e35b8f15 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -8,6 +8,7 @@ get_archive_handler, extract, ) +from charmhelpers.core.host import mkdir class ArchiveUrlFetchHandler(BaseFetchHandler): @@ -24,20 +25,24 @@ def download(self, source, dest): # propogate all exceptions # URLError, OSError, etc response = urllib2.urlopen(source) - with open(dest, 'w') as dest_file: - dest_file.write(response.read()) + try: + with open(dest, 'w') as dest_file: + dest_file.write(response.read()) + except Exception as e: + if os.path.isfile(dest): + os.unlink(dest) + raise e def install(self, source): url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) except urllib2.URLError as e: - return UnhandledSource(e.reason) + raise UnhandledSource(e.reason) except OSError as e: - return UnhandledSource(e.strerror) - finally: - if os.path.isfile(dld_file): - os.unlink(dld_file) + raise UnhandledSource(e.strerror) return extract(dld_file) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 1424eef1..0176d651 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -21,21 +21,27 @@ related_units, relation_get, Hooks, - UnregisteredHookError + UnregisteredHookError, + service_name ) from charmhelpers.core.host import ( + umount, + mkdir +) +from charmhelpers.fetch import ( + add_source, apt_install, apt_update, filter_installed_packages, - umount ) -from charmhelpers.fetch import add_source from utils import ( render_template, get_host_ip, ) +from charmhelpers.contrib.openstack.alternatives import install_alternative + hooks = Hooks() @@ -66,9 +72,14 @@ def emit_cephconf(): 'fsid': get_fsid(), 'version': ceph.get_ceph_version() } - - with open('/etc/ceph/ceph.conf', 'w') as cephconf: + # Install ceph.conf as an alternative to support + # co-existence with other charms that write this file + charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) + mkdir(os.path.dirname(charm_ceph_conf)) + with open(charm_ceph_conf, 'w') as cephconf: cephconf.write(render_template('ceph.conf', cephcontext)) + install_alternative('ceph.conf', '/etc/ceph/ceph.conf', + charm_ceph_conf, 90) JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index a8868b69..c1044a45 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -13,7 +13,7 @@ unit_get, cached ) -from charmhelpers.core.host import ( +from charmhelpers.fetch import ( apt_install, filter_installed_packages ) diff --git a/ceph-osd/revision b/ceph-osd/revision index b4de3947..b1bd38b6 100644 --- a/ceph-osd/revision +++ b/ceph-osd/revision @@ -1 +1 @@ -11 +13 From dc5d7e0910f7e4c3746d68207056f1c151397056 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 10 Oct 2013 11:49:40 +0100 Subject: [PATCH 0286/2699] Sync with alternatives charmhelpers, installed ceph.conf as alternative for charm co-existence --- .../contrib/openstack/__init__.py | 0 .../contrib/openstack/alternatives.py | 17 +++++++ ceph-osd/hooks/charmhelpers/fetch/bzrurl.py | 49 +++++++++++++++++++ 3 files changed, 66 insertions(+) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py create mode 100644 ceph-osd/hooks/charmhelpers/fetch/bzrurl.py diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py new file mode 100644 index 00000000..b413259c --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -0,0 +1,17 @@ +''' Helper for managing alternatives for file conflict resolution ''' + +import subprocess +import shutil +import os + + +def install_alternative(name, target, source, priority=50): + ''' Install alternative configuration ''' + if (os.path.exists(target) and not os.path.islink(target)): + # Move existing file/directory away before installing + shutil.move(target, '{}.bak'.format(target)) + cmd = [ + 'update-alternatives', '--force', '--install', + target, name, source, str(priority) + ] + subprocess.check_call(cmd) diff --git a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py new file mode 100644 index 00000000..c348b4bb --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py @@ -0,0 +1,49 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +try: + from bzrlib.branch import Branch +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-bzrlib") + from bzrlib.branch import Branch + +class BzrUrlFetchHandler(BaseFetchHandler): + """Handler for bazaar branches via generic and lp URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('bzr+ssh', 'lp'): + return False + else: + return True + + def branch(self, source, dest): + url_parts = self.parse_url(source) + # If we use lp:branchname scheme we need to load plugins + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + if url_parts.scheme == "lp": + from bzrlib.plugin import load_plugins + load_plugins() + try: + remote_branch = Branch.open(source) + remote_branch.bzrdir.sprout(dest).open_branch() + except Exception as e: + raise e + + def install(self, source): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + try: + self.branch(source, dest_dir) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir + From 13c0bd28ac0ac700e2ab9f90766635aec84f51a3 Mon Sep 17 00:00:00 2001 From: Juju Management User Date: Tue, 12 Nov 2013 16:27:20 +0000 Subject: [PATCH 0287/2699] Add preinstall hook and update charm-helpers-sync.yaml to pull in additional charmhelpers module to support it --- ceph-proxy/charm-helpers-sync.yaml | 1 + .../hooks/charmhelpers/payload/__init__.py | 1 + .../hooks/charmhelpers/payload/execd.py | 50 +++++++++++++++++++ ceph-proxy/hooks/hooks.py | 2 + 4 files changed, 54 insertions(+) create mode 100644 ceph-proxy/hooks/charmhelpers/payload/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/payload/execd.py diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml index 21c0bc63..b30e44a2 100644 --- a/ceph-proxy/charm-helpers-sync.yaml +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -5,3 +5,4 @@ include: - fetch - contrib.storage.linux: - utils + - payload.execd diff --git a/ceph-proxy/hooks/charmhelpers/payload/__init__.py b/ceph-proxy/hooks/charmhelpers/payload/__init__.py new file mode 100644 index 00000000..fc9fbc08 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/payload/__init__.py @@ -0,0 +1 @@ +"Tools for working with files injected into a charm just before deployment." diff --git a/ceph-proxy/hooks/charmhelpers/payload/execd.py b/ceph-proxy/hooks/charmhelpers/payload/execd.py new file mode 100644 index 00000000..6476a75f --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/payload/execd.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python + +import os +import sys +import subprocess +from charmhelpers.core import hookenv + + +def default_execd_dir(): + return os.path.join(os.environ['CHARM_DIR'], 'exec.d') + + +def execd_module_paths(execd_dir=None): + """Generate a list of full paths to modules within execd_dir.""" + if not execd_dir: + execd_dir = default_execd_dir() + + if not os.path.exists(execd_dir): + return + + for subpath in os.listdir(execd_dir): + module = os.path.join(execd_dir, subpath) + if os.path.isdir(module): + yield module + + +def execd_submodule_paths(command, execd_dir=None): + """Generate a list of full paths to the specified command within exec_dir. + """ + for module_path in execd_module_paths(execd_dir): + path = os.path.join(module_path, command) + if os.access(path, os.X_OK) and os.path.isfile(path): + yield path + + +def execd_run(command, execd_dir=None, die_on_error=False, stderr=None): + """Run command for each module within execd_dir which defines it.""" + for submodule_path in execd_submodule_paths(command, execd_dir): + try: + subprocess.check_call(submodule_path, shell=True, stderr=stderr) + except subprocess.CalledProcessError as e: + hookenv.log("Error ({}) running {}. Output: {}".format( + e.returncode, e.cmd, e.output)) + if die_on_error: + sys.exit(e.returncode) + + +def execd_preinstall(execd_dir=None): + """Run charm-pre-install for each module within execd_dir.""" + execd_run('charm-pre-install', execd_dir=execd_dir) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 80fbe13b..8b405abf 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -32,6 +32,7 @@ umount ) from charmhelpers.fetch import add_source +from charmhelpers.payload.execd import execd_preinstall from utils import ( render_template, @@ -51,6 +52,7 @@ def install_upstart_scripts(): @hooks.hook('install') def install(): log('Begin install hook.') + execd_preinstall() add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) From 1b727513812805d115f1f6ffd21e444e0d129911 Mon Sep 17 00:00:00 2001 From: Juju Management User Date: Tue, 12 Nov 2013 16:27:20 +0000 Subject: [PATCH 0288/2699] Add preinstall hook and update charm-helpers-sync.yaml to pull in additional charmhelpers module to support it --- ceph-mon/charm-helpers-sync.yaml | 1 + .../hooks/charmhelpers/payload/__init__.py | 1 + ceph-mon/hooks/charmhelpers/payload/execd.py | 50 +++++++++++++++++++ ceph-mon/hooks/hooks.py | 2 + 4 files changed, 54 insertions(+) create mode 100644 ceph-mon/hooks/charmhelpers/payload/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/payload/execd.py diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml index 21c0bc63..b30e44a2 100644 --- a/ceph-mon/charm-helpers-sync.yaml +++ b/ceph-mon/charm-helpers-sync.yaml @@ -5,3 +5,4 @@ include: - fetch - contrib.storage.linux: - utils + - payload.execd diff --git a/ceph-mon/hooks/charmhelpers/payload/__init__.py b/ceph-mon/hooks/charmhelpers/payload/__init__.py new file mode 100644 index 00000000..fc9fbc08 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/payload/__init__.py @@ -0,0 +1 @@ +"Tools for working with files injected into a charm just before deployment." diff --git a/ceph-mon/hooks/charmhelpers/payload/execd.py b/ceph-mon/hooks/charmhelpers/payload/execd.py new file mode 100644 index 00000000..6476a75f --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/payload/execd.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python + +import os +import sys +import subprocess +from charmhelpers.core import hookenv + + +def default_execd_dir(): + return os.path.join(os.environ['CHARM_DIR'], 'exec.d') + + +def execd_module_paths(execd_dir=None): + """Generate a list of full paths to modules within execd_dir.""" + if not execd_dir: + execd_dir = default_execd_dir() + + if not os.path.exists(execd_dir): + return + + for subpath in os.listdir(execd_dir): + module = os.path.join(execd_dir, subpath) + if os.path.isdir(module): + yield module + + +def execd_submodule_paths(command, execd_dir=None): + """Generate a list of full paths to the specified command within exec_dir. + """ + for module_path in execd_module_paths(execd_dir): + path = os.path.join(module_path, command) + if os.access(path, os.X_OK) and os.path.isfile(path): + yield path + + +def execd_run(command, execd_dir=None, die_on_error=False, stderr=None): + """Run command for each module within execd_dir which defines it.""" + for submodule_path in execd_submodule_paths(command, execd_dir): + try: + subprocess.check_call(submodule_path, shell=True, stderr=stderr) + except subprocess.CalledProcessError as e: + hookenv.log("Error ({}) running {}. Output: {}".format( + e.returncode, e.cmd, e.output)) + if die_on_error: + sys.exit(e.returncode) + + +def execd_preinstall(execd_dir=None): + """Run charm-pre-install for each module within execd_dir.""" + execd_run('charm-pre-install', execd_dir=execd_dir) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 80fbe13b..8b405abf 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -32,6 +32,7 @@ umount ) from charmhelpers.fetch import add_source +from charmhelpers.payload.execd import execd_preinstall from utils import ( render_template, @@ -51,6 +52,7 @@ def install_upstart_scripts(): @hooks.hook('install') def install(): log('Begin install hook.') + execd_preinstall() add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) From 1999da808f804267e26fbfbcc4cbd5783daf7598 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 13 Nov 2013 22:09:26 +0000 Subject: [PATCH 0289/2699] Resync trunk of charm-helpers --- ceph-proxy/charm-helpers-sync.yaml | 2 +- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 101 ++++++++++++++---- ceph-proxy/hooks/charmhelpers/core/host.py | 24 +++-- .../hooks/charmhelpers/fetch/__init__.py | 86 ++++++++++++--- ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py | 2 +- 5 files changed, 169 insertions(+), 46 deletions(-) diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml index 38dc4108..c8ee8f59 100644 --- a/ceph-proxy/charm-helpers-sync.yaml +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~openstack-charmers/charm-helpers/os-alternatives +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 2b06706c..bb196dfa 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -9,6 +9,7 @@ import yaml import subprocess import UserDict +from subprocess import CalledProcessError CRITICAL = "CRITICAL" ERROR = "ERROR" @@ -21,7 +22,7 @@ def cached(func): - ''' Cache return values for multiple executions of func + args + """Cache return values for multiple executions of func + args For example: @@ -32,7 +33,7 @@ def unit_get(attribute): unit_get('test') will cache the result of unit_get + 'test' for future calls. - ''' + """ def wrapper(*args, **kwargs): global cache key = str((func, args, kwargs)) @@ -46,8 +47,8 @@ def wrapper(*args, **kwargs): def flush(key): - ''' Flushes any entries from function cache where the - key is found in the function+args ''' + """Flushes any entries from function cache where the + key is found in the function+args """ flush_list = [] for item in cache: if key in item: @@ -57,7 +58,7 @@ def flush(key): def log(message, level=None): - "Write a message to the juju log" + """Write a message to the juju log""" command = ['juju-log'] if level: command += ['-l', level] @@ -66,7 +67,7 @@ def log(message, level=None): class Serializable(UserDict.IterableUserDict): - "Wrapper, an object that can be serialized to yaml or json" + """Wrapper, an object that can be serialized to yaml or json""" def __init__(self, obj): # wrap the object @@ -96,11 +97,11 @@ def __setstate__(self, state): self.data = state def json(self): - "Serialize the object to json" + """Serialize the object to json""" return json.dumps(self.data) def yaml(self): - "Serialize the object to yaml" + """Serialize the object to yaml""" return yaml.dump(self.data) @@ -119,38 +120,38 @@ def execution_environment(): def in_relation_hook(): - "Determine whether we're running in a relation hook" + """Determine whether we're running in a relation hook""" return 'JUJU_RELATION' in os.environ def relation_type(): - "The scope for the current relation hook" + """The scope for the current relation hook""" return os.environ.get('JUJU_RELATION', None) def relation_id(): - "The relation ID for the current relation hook" + """The relation ID for the current relation hook""" return os.environ.get('JUJU_RELATION_ID', None) def local_unit(): - "Local unit ID" + """Local unit ID""" return os.environ['JUJU_UNIT_NAME'] def remote_unit(): - "The remote unit for the current relation hook" + """The remote unit for the current relation hook""" return os.environ['JUJU_REMOTE_UNIT'] def service_name(): - "The name service group this unit belongs to" + """The name service group this unit belongs to""" return local_unit().split('/')[0] @cached def config(scope=None): - "Juju charm configuration" + """Juju charm configuration""" config_cmd_line = ['config-get'] if scope is not None: config_cmd_line.append(scope) @@ -163,6 +164,7 @@ def config(scope=None): @cached def relation_get(attribute=None, unit=None, rid=None): + """Get relation information""" _args = ['relation-get', '--format=json'] if rid: _args.append('-r') @@ -174,9 +176,14 @@ def relation_get(attribute=None, unit=None, rid=None): return json.loads(subprocess.check_output(_args)) except ValueError: return None + except CalledProcessError, e: + if e.returncode == 2: + return None + raise def relation_set(relation_id=None, relation_settings={}, **kwargs): + """Set relation information for the current unit""" relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) @@ -192,7 +199,7 @@ def relation_set(relation_id=None, relation_settings={}, **kwargs): @cached def relation_ids(reltype=None): - "A list of relation_ids" + """A list of relation_ids""" reltype = reltype or relation_type() relid_cmd_line = ['relation-ids', '--format=json'] if reltype is not None: @@ -203,7 +210,7 @@ def relation_ids(reltype=None): @cached def related_units(relid=None): - "A list of related units" + """A list of related units""" relid = relid or relation_id() units_cmd_line = ['relation-list', '--format=json'] if relid is not None: @@ -213,7 +220,7 @@ def related_units(relid=None): @cached def relation_for_unit(unit=None, rid=None): - "Get the json represenation of a unit's relation" + """Get the json represenation of a unit's relation""" unit = unit or remote_unit() relation = relation_get(unit=unit, rid=rid) for key in relation: @@ -225,7 +232,7 @@ def relation_for_unit(unit=None, rid=None): @cached def relations_for_id(relid=None): - "Get relations of a specific relation ID" + """Get relations of a specific relation ID""" relation_data = [] relid = relid or relation_ids() for unit in related_units(relid): @@ -237,7 +244,7 @@ def relations_for_id(relid=None): @cached def relations_of_type(reltype=None): - "Get relations of a specific type" + """Get relations of a specific type""" relation_data = [] reltype = reltype or relation_type() for relid in relation_ids(reltype): @@ -249,7 +256,7 @@ def relations_of_type(reltype=None): @cached def relation_types(): - "Get a list of relation types supported by this charm" + """Get a list of relation types supported by this charm""" charmdir = os.environ.get('CHARM_DIR', '') mdf = open(os.path.join(charmdir, 'metadata.yaml')) md = yaml.safe_load(mdf) @@ -264,6 +271,7 @@ def relation_types(): @cached def relations(): + """Get a nested dictionary of relation data for all related units""" rels = {} for reltype in relation_types(): relids = {} @@ -277,15 +285,35 @@ def relations(): return rels +@cached +def is_relation_made(relation, keys='private-address'): + ''' + Determine whether a relation is established by checking for + presence of key(s). If a list of keys is provided, they + must all be present for the relation to be identified as made + ''' + if isinstance(keys, str): + keys = [keys] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + context = {} + for k in keys: + context[k] = relation_get(k, rid=r_id, + unit=unit) + if None not in context.values(): + return True + return False + + def open_port(port, protocol="TCP"): - "Open a service network port" + """Open a service network port""" _args = ['open-port'] _args.append('{}/{}'.format(port, protocol)) subprocess.check_call(_args) def close_port(port, protocol="TCP"): - "Close a service network port" + """Close a service network port""" _args = ['close-port'] _args.append('{}/{}'.format(port, protocol)) subprocess.check_call(_args) @@ -293,6 +321,7 @@ def close_port(port, protocol="TCP"): @cached def unit_get(attribute): + """Get the unit ID for the remote unit""" _args = ['unit-get', '--format=json', attribute] try: return json.loads(subprocess.check_output(_args)) @@ -301,22 +330,46 @@ def unit_get(attribute): def unit_private_ip(): + """Get this unit's private IP address""" return unit_get('private-address') class UnregisteredHookError(Exception): + """Raised when an undefined hook is called""" pass class Hooks(object): + """A convenient handler for hook functions. + + Example: + hooks = Hooks() + + # register a hook, taking its name from the function name + @hooks.hook() + def install(): + ... + + # register a hook, providing a custom hook name + @hooks.hook("config-changed") + def config_changed(): + ... + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + def __init__(self): super(Hooks, self).__init__() self._hooks = {} def register(self, name, function): + """Register a hook""" self._hooks[name] = function def execute(self, args): + """Execute a registered hook based on args[0]""" hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() @@ -324,6 +377,7 @@ def execute(self, args): raise UnregisteredHookError(hook_name) def hook(self, *hook_names): + """Decorator, registering them as hooks""" def wrapper(decorated): for hook_name in hook_names: self.register(hook_name, decorated) @@ -337,4 +391,5 @@ def wrapper(decorated): def charm_dir(): + """Return the root directory of the current charm""" return os.environ.get('CHARM_DIR') diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 1a63bf89..4a6a4a8c 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -19,18 +19,22 @@ def service_start(service_name): + """Start a system service""" return service('start', service_name) def service_stop(service_name): + """Stop a system service""" return service('stop', service_name) def service_restart(service_name): + """Restart a system service""" return service('restart', service_name) def service_reload(service_name, restart_on_failure=False): + """Reload a system service, optionally falling back to restart if reload fails""" service_result = service('reload', service_name) if not service_result and restart_on_failure: service_result = service('restart', service_name) @@ -38,11 +42,13 @@ def service_reload(service_name, restart_on_failure=False): def service(action, service_name): + """Control a system service""" cmd = ['service', service_name, action] return subprocess.call(cmd) == 0 def service_running(service): + """Determine whether a system service is running""" try: output = subprocess.check_output(['service', service, 'status']) except subprocess.CalledProcessError: @@ -55,7 +61,7 @@ def service_running(service): def adduser(username, password=None, shell='/bin/bash', system_user=False): - """Add a user""" + """Add a user to the system""" try: user_info = pwd.getpwnam(username) log('user {0} already exists!'.format(username)) @@ -138,7 +144,7 @@ def write_file(path, content, owner='root', group='root', perms=0444): def mount(device, mountpoint, options=None, persist=False): - '''Mount a filesystem''' + """Mount a filesystem at a particular mountpoint""" cmd_args = ['mount'] if options is not None: cmd_args.extend(['-o', options]) @@ -155,7 +161,7 @@ def mount(device, mountpoint, options=None, persist=False): def umount(mountpoint, persist=False): - '''Unmount a filesystem''' + """Unmount a filesystem""" cmd_args = ['umount', mountpoint] try: subprocess.check_output(cmd_args) @@ -169,7 +175,7 @@ def umount(mountpoint, persist=False): def mounts(): - '''List of all mounted volumes as [[mountpoint,device],[...]]''' + """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" with open('/proc/mounts') as f: # [['/mount/point','/dev/path'],[...]] system_mounts = [m[1::-1] for m in [l.strip().split() @@ -178,7 +184,7 @@ def mounts(): def file_hash(path): - ''' Generate a md5 hash of the contents of 'path' or None if not found ''' + """Generate a md5 hash of the contents of 'path' or None if not found """ if os.path.exists(path): h = hashlib.md5() with open(path, 'r') as source: @@ -189,7 +195,7 @@ def file_hash(path): def restart_on_change(restart_map): - ''' Restart services based on configuration files changing + """Restart services based on configuration files changing This function is used a decorator, for example @@ -202,7 +208,7 @@ def ceph_client_changed(): In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the ceph_client_changed function. - ''' + """ def wrap(f): def wrapped_f(*args): checksums = {} @@ -220,7 +226,7 @@ def wrapped_f(*args): def lsb_release(): - '''Return /etc/lsb-release in a dict''' + """Return /etc/lsb-release in a dict""" d = {} with open('/etc/lsb-release', 'r') as lsb: for l in lsb: @@ -230,7 +236,7 @@ def lsb_release(): def pwgen(length=None): - '''Generate a random pasword.''' + """Generate a random pasword.""" if length is None: length = random.choice(range(35, 45)) alphanumeric_chars = [ diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index b2f96467..f83e7b7d 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -13,6 +13,7 @@ log, ) import apt_pkg +import os CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main @@ -20,6 +21,32 @@ PROPOSED_POCKET = """# Proposed deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted """ +CLOUD_ARCHIVE_POCKETS = { + # Folsom + 'folsom': 'precise-updates/folsom', + 'precise-folsom': 'precise-updates/folsom', + 'precise-folsom/updates': 'precise-updates/folsom', + 'precise-updates/folsom': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'precise-folsom/proposed': 'precise-proposed/folsom', + 'precise-proposed/folsom': 'precise-proposed/folsom', + # Grizzly + 'grizzly': 'precise-updates/grizzly', + 'precise-grizzly': 'precise-updates/grizzly', + 'precise-grizzly/updates': 'precise-updates/grizzly', + 'precise-updates/grizzly': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'precise-grizzly/proposed': 'precise-proposed/grizzly', + 'precise-proposed/grizzly': 'precise-proposed/grizzly', + # Havana + 'havana': 'precise-updates/havana', + 'precise-havana': 'precise-updates/havana', + 'precise-havana/updates': 'precise-updates/havana', + 'precise-updates/havana': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'precies-havana/proposed': 'precise-proposed/havana', + 'precise-proposed/havana': 'precise-proposed/havana', +} def filter_installed_packages(packages): @@ -40,8 +67,10 @@ def filter_installed_packages(packages): def apt_install(packages, options=None, fatal=False): """Install one or more packages""" - options = options or [] - cmd = ['apt-get', '-y'] + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') if isinstance(packages, basestring): @@ -50,10 +79,14 @@ def apt_install(packages, options=None, fatal=False): cmd.extend(packages) log("Installing {} with options: {}".format(packages, options)) + env = os.environ.copy() + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + if fatal: - subprocess.check_call(cmd) + subprocess.check_call(cmd, env=env) else: - subprocess.call(cmd) + subprocess.call(cmd, env=env) def apt_update(fatal=False): @@ -67,7 +100,7 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): """Purge one or more packages""" - cmd = ['apt-get', '-y', 'purge'] + cmd = ['apt-get', '--assume-yes', 'purge'] if isinstance(packages, basestring): cmd.append(packages) else: @@ -79,16 +112,37 @@ def apt_purge(packages, fatal=False): subprocess.call(cmd) +def apt_hold(packages, fatal=False): + """Hold one or more packages""" + cmd = ['apt-mark', 'hold'] + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Holding {}".format(packages)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + def add_source(source, key=None): - if ((source.startswith('ppa:') or - source.startswith('http:'))): + if (source.startswith('ppa:') or + source.startswith('http:') or + source.startswith('deb ') or + source.startswith('cloud-archive:')): subprocess.check_call(['add-apt-repository', '--yes', source]) elif source.startswith('cloud:'): apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), fatal=True) pocket = source.split(':')[-1] + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(pocket)) + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) elif source == 'proposed': release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: @@ -118,8 +172,11 @@ def configure_sources(update=False, Note that 'null' (a.k.a. None) should not be quoted. """ sources = safe_load(config(sources_var)) - keys = safe_load(config(keys_var)) - if isinstance(sources, basestring) and isinstance(keys, basestring): + keys = config(keys_var) + if keys is not None: + keys = safe_load(keys) + if isinstance(sources, basestring) and ( + keys is None or isinstance(keys, basestring)): add_source(sources, keys) else: if not len(sources) == len(keys): @@ -172,7 +229,9 @@ def install_from_config(config_var_name): class BaseFetchHandler(object): + """Base class for FetchHandler implementations in fetch plugins""" + def can_handle(self, source): """Returns True if the source can be handled. Otherwise returns a string explaining why it cannot""" @@ -200,10 +259,13 @@ def plugins(fetch_handlers=None): for handler_name in fetch_handlers: package, classname = handler_name.rsplit('.', 1) try: - handler_class = getattr(importlib.import_module(package), classname) + handler_class = getattr( + importlib.import_module(package), + classname) plugin_list.append(handler_class()) except (ImportError, AttributeError): # Skip missing plugins so that they can be ommitted from # installation if desired - log("FetchHandler {} not found, skipping plugin".format(handler_name)) + log("FetchHandler {} not found, skipping plugin".format( + handler_name)) return plugin_list diff --git a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py index c348b4bb..db5dd9a3 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py @@ -12,6 +12,7 @@ apt_install("python-bzrlib") from bzrlib.branch import Branch + class BzrUrlFetchHandler(BaseFetchHandler): """Handler for bazaar branches via generic and lp URLs""" def can_handle(self, source): @@ -46,4 +47,3 @@ def install(self, source): except OSError as e: raise UnhandledSource(e.strerror) return dest_dir - From eb3ef69a5336e7db826b74b756499e0d2f2ab54f Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 13 Nov 2013 22:09:26 +0000 Subject: [PATCH 0290/2699] Resync trunk of charm-helpers --- ceph-mon/charm-helpers-sync.yaml | 2 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 101 ++++++++++++++---- ceph-mon/hooks/charmhelpers/core/host.py | 24 +++-- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 86 ++++++++++++--- ceph-mon/hooks/charmhelpers/fetch/bzrurl.py | 2 +- 5 files changed, 169 insertions(+), 46 deletions(-) diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml index 38dc4108..c8ee8f59 100644 --- a/ceph-mon/charm-helpers-sync.yaml +++ b/ceph-mon/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~openstack-charmers/charm-helpers/os-alternatives +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 2b06706c..bb196dfa 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -9,6 +9,7 @@ import yaml import subprocess import UserDict +from subprocess import CalledProcessError CRITICAL = "CRITICAL" ERROR = "ERROR" @@ -21,7 +22,7 @@ def cached(func): - ''' Cache return values for multiple executions of func + args + """Cache return values for multiple executions of func + args For example: @@ -32,7 +33,7 @@ def unit_get(attribute): unit_get('test') will cache the result of unit_get + 'test' for future calls. - ''' + """ def wrapper(*args, **kwargs): global cache key = str((func, args, kwargs)) @@ -46,8 +47,8 @@ def wrapper(*args, **kwargs): def flush(key): - ''' Flushes any entries from function cache where the - key is found in the function+args ''' + """Flushes any entries from function cache where the + key is found in the function+args """ flush_list = [] for item in cache: if key in item: @@ -57,7 +58,7 @@ def flush(key): def log(message, level=None): - "Write a message to the juju log" + """Write a message to the juju log""" command = ['juju-log'] if level: command += ['-l', level] @@ -66,7 +67,7 @@ def log(message, level=None): class Serializable(UserDict.IterableUserDict): - "Wrapper, an object that can be serialized to yaml or json" + """Wrapper, an object that can be serialized to yaml or json""" def __init__(self, obj): # wrap the object @@ -96,11 +97,11 @@ def __setstate__(self, state): self.data = state def json(self): - "Serialize the object to json" + """Serialize the object to json""" return json.dumps(self.data) def yaml(self): - "Serialize the object to yaml" + """Serialize the object to yaml""" return yaml.dump(self.data) @@ -119,38 +120,38 @@ def execution_environment(): def in_relation_hook(): - "Determine whether we're running in a relation hook" + """Determine whether we're running in a relation hook""" return 'JUJU_RELATION' in os.environ def relation_type(): - "The scope for the current relation hook" + """The scope for the current relation hook""" return os.environ.get('JUJU_RELATION', None) def relation_id(): - "The relation ID for the current relation hook" + """The relation ID for the current relation hook""" return os.environ.get('JUJU_RELATION_ID', None) def local_unit(): - "Local unit ID" + """Local unit ID""" return os.environ['JUJU_UNIT_NAME'] def remote_unit(): - "The remote unit for the current relation hook" + """The remote unit for the current relation hook""" return os.environ['JUJU_REMOTE_UNIT'] def service_name(): - "The name service group this unit belongs to" + """The name service group this unit belongs to""" return local_unit().split('/')[0] @cached def config(scope=None): - "Juju charm configuration" + """Juju charm configuration""" config_cmd_line = ['config-get'] if scope is not None: config_cmd_line.append(scope) @@ -163,6 +164,7 @@ def config(scope=None): @cached def relation_get(attribute=None, unit=None, rid=None): + """Get relation information""" _args = ['relation-get', '--format=json'] if rid: _args.append('-r') @@ -174,9 +176,14 @@ def relation_get(attribute=None, unit=None, rid=None): return json.loads(subprocess.check_output(_args)) except ValueError: return None + except CalledProcessError, e: + if e.returncode == 2: + return None + raise def relation_set(relation_id=None, relation_settings={}, **kwargs): + """Set relation information for the current unit""" relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) @@ -192,7 +199,7 @@ def relation_set(relation_id=None, relation_settings={}, **kwargs): @cached def relation_ids(reltype=None): - "A list of relation_ids" + """A list of relation_ids""" reltype = reltype or relation_type() relid_cmd_line = ['relation-ids', '--format=json'] if reltype is not None: @@ -203,7 +210,7 @@ def relation_ids(reltype=None): @cached def related_units(relid=None): - "A list of related units" + """A list of related units""" relid = relid or relation_id() units_cmd_line = ['relation-list', '--format=json'] if relid is not None: @@ -213,7 +220,7 @@ def related_units(relid=None): @cached def relation_for_unit(unit=None, rid=None): - "Get the json represenation of a unit's relation" + """Get the json represenation of a unit's relation""" unit = unit or remote_unit() relation = relation_get(unit=unit, rid=rid) for key in relation: @@ -225,7 +232,7 @@ def relation_for_unit(unit=None, rid=None): @cached def relations_for_id(relid=None): - "Get relations of a specific relation ID" + """Get relations of a specific relation ID""" relation_data = [] relid = relid or relation_ids() for unit in related_units(relid): @@ -237,7 +244,7 @@ def relations_for_id(relid=None): @cached def relations_of_type(reltype=None): - "Get relations of a specific type" + """Get relations of a specific type""" relation_data = [] reltype = reltype or relation_type() for relid in relation_ids(reltype): @@ -249,7 +256,7 @@ def relations_of_type(reltype=None): @cached def relation_types(): - "Get a list of relation types supported by this charm" + """Get a list of relation types supported by this charm""" charmdir = os.environ.get('CHARM_DIR', '') mdf = open(os.path.join(charmdir, 'metadata.yaml')) md = yaml.safe_load(mdf) @@ -264,6 +271,7 @@ def relation_types(): @cached def relations(): + """Get a nested dictionary of relation data for all related units""" rels = {} for reltype in relation_types(): relids = {} @@ -277,15 +285,35 @@ def relations(): return rels +@cached +def is_relation_made(relation, keys='private-address'): + ''' + Determine whether a relation is established by checking for + presence of key(s). If a list of keys is provided, they + must all be present for the relation to be identified as made + ''' + if isinstance(keys, str): + keys = [keys] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + context = {} + for k in keys: + context[k] = relation_get(k, rid=r_id, + unit=unit) + if None not in context.values(): + return True + return False + + def open_port(port, protocol="TCP"): - "Open a service network port" + """Open a service network port""" _args = ['open-port'] _args.append('{}/{}'.format(port, protocol)) subprocess.check_call(_args) def close_port(port, protocol="TCP"): - "Close a service network port" + """Close a service network port""" _args = ['close-port'] _args.append('{}/{}'.format(port, protocol)) subprocess.check_call(_args) @@ -293,6 +321,7 @@ def close_port(port, protocol="TCP"): @cached def unit_get(attribute): + """Get the unit ID for the remote unit""" _args = ['unit-get', '--format=json', attribute] try: return json.loads(subprocess.check_output(_args)) @@ -301,22 +330,46 @@ def unit_get(attribute): def unit_private_ip(): + """Get this unit's private IP address""" return unit_get('private-address') class UnregisteredHookError(Exception): + """Raised when an undefined hook is called""" pass class Hooks(object): + """A convenient handler for hook functions. + + Example: + hooks = Hooks() + + # register a hook, taking its name from the function name + @hooks.hook() + def install(): + ... + + # register a hook, providing a custom hook name + @hooks.hook("config-changed") + def config_changed(): + ... + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + def __init__(self): super(Hooks, self).__init__() self._hooks = {} def register(self, name, function): + """Register a hook""" self._hooks[name] = function def execute(self, args): + """Execute a registered hook based on args[0]""" hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() @@ -324,6 +377,7 @@ def execute(self, args): raise UnregisteredHookError(hook_name) def hook(self, *hook_names): + """Decorator, registering them as hooks""" def wrapper(decorated): for hook_name in hook_names: self.register(hook_name, decorated) @@ -337,4 +391,5 @@ def wrapper(decorated): def charm_dir(): + """Return the root directory of the current charm""" return os.environ.get('CHARM_DIR') diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 1a63bf89..4a6a4a8c 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -19,18 +19,22 @@ def service_start(service_name): + """Start a system service""" return service('start', service_name) def service_stop(service_name): + """Stop a system service""" return service('stop', service_name) def service_restart(service_name): + """Restart a system service""" return service('restart', service_name) def service_reload(service_name, restart_on_failure=False): + """Reload a system service, optionally falling back to restart if reload fails""" service_result = service('reload', service_name) if not service_result and restart_on_failure: service_result = service('restart', service_name) @@ -38,11 +42,13 @@ def service_reload(service_name, restart_on_failure=False): def service(action, service_name): + """Control a system service""" cmd = ['service', service_name, action] return subprocess.call(cmd) == 0 def service_running(service): + """Determine whether a system service is running""" try: output = subprocess.check_output(['service', service, 'status']) except subprocess.CalledProcessError: @@ -55,7 +61,7 @@ def service_running(service): def adduser(username, password=None, shell='/bin/bash', system_user=False): - """Add a user""" + """Add a user to the system""" try: user_info = pwd.getpwnam(username) log('user {0} already exists!'.format(username)) @@ -138,7 +144,7 @@ def write_file(path, content, owner='root', group='root', perms=0444): def mount(device, mountpoint, options=None, persist=False): - '''Mount a filesystem''' + """Mount a filesystem at a particular mountpoint""" cmd_args = ['mount'] if options is not None: cmd_args.extend(['-o', options]) @@ -155,7 +161,7 @@ def mount(device, mountpoint, options=None, persist=False): def umount(mountpoint, persist=False): - '''Unmount a filesystem''' + """Unmount a filesystem""" cmd_args = ['umount', mountpoint] try: subprocess.check_output(cmd_args) @@ -169,7 +175,7 @@ def umount(mountpoint, persist=False): def mounts(): - '''List of all mounted volumes as [[mountpoint,device],[...]]''' + """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" with open('/proc/mounts') as f: # [['/mount/point','/dev/path'],[...]] system_mounts = [m[1::-1] for m in [l.strip().split() @@ -178,7 +184,7 @@ def mounts(): def file_hash(path): - ''' Generate a md5 hash of the contents of 'path' or None if not found ''' + """Generate a md5 hash of the contents of 'path' or None if not found """ if os.path.exists(path): h = hashlib.md5() with open(path, 'r') as source: @@ -189,7 +195,7 @@ def file_hash(path): def restart_on_change(restart_map): - ''' Restart services based on configuration files changing + """Restart services based on configuration files changing This function is used a decorator, for example @@ -202,7 +208,7 @@ def ceph_client_changed(): In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the ceph_client_changed function. - ''' + """ def wrap(f): def wrapped_f(*args): checksums = {} @@ -220,7 +226,7 @@ def wrapped_f(*args): def lsb_release(): - '''Return /etc/lsb-release in a dict''' + """Return /etc/lsb-release in a dict""" d = {} with open('/etc/lsb-release', 'r') as lsb: for l in lsb: @@ -230,7 +236,7 @@ def lsb_release(): def pwgen(length=None): - '''Generate a random pasword.''' + """Generate a random pasword.""" if length is None: length = random.choice(range(35, 45)) alphanumeric_chars = [ diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index b2f96467..f83e7b7d 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -13,6 +13,7 @@ log, ) import apt_pkg +import os CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main @@ -20,6 +21,32 @@ PROPOSED_POCKET = """# Proposed deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted """ +CLOUD_ARCHIVE_POCKETS = { + # Folsom + 'folsom': 'precise-updates/folsom', + 'precise-folsom': 'precise-updates/folsom', + 'precise-folsom/updates': 'precise-updates/folsom', + 'precise-updates/folsom': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'precise-folsom/proposed': 'precise-proposed/folsom', + 'precise-proposed/folsom': 'precise-proposed/folsom', + # Grizzly + 'grizzly': 'precise-updates/grizzly', + 'precise-grizzly': 'precise-updates/grizzly', + 'precise-grizzly/updates': 'precise-updates/grizzly', + 'precise-updates/grizzly': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'precise-grizzly/proposed': 'precise-proposed/grizzly', + 'precise-proposed/grizzly': 'precise-proposed/grizzly', + # Havana + 'havana': 'precise-updates/havana', + 'precise-havana': 'precise-updates/havana', + 'precise-havana/updates': 'precise-updates/havana', + 'precise-updates/havana': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'precies-havana/proposed': 'precise-proposed/havana', + 'precise-proposed/havana': 'precise-proposed/havana', +} def filter_installed_packages(packages): @@ -40,8 +67,10 @@ def filter_installed_packages(packages): def apt_install(packages, options=None, fatal=False): """Install one or more packages""" - options = options or [] - cmd = ['apt-get', '-y'] + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') if isinstance(packages, basestring): @@ -50,10 +79,14 @@ def apt_install(packages, options=None, fatal=False): cmd.extend(packages) log("Installing {} with options: {}".format(packages, options)) + env = os.environ.copy() + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + if fatal: - subprocess.check_call(cmd) + subprocess.check_call(cmd, env=env) else: - subprocess.call(cmd) + subprocess.call(cmd, env=env) def apt_update(fatal=False): @@ -67,7 +100,7 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): """Purge one or more packages""" - cmd = ['apt-get', '-y', 'purge'] + cmd = ['apt-get', '--assume-yes', 'purge'] if isinstance(packages, basestring): cmd.append(packages) else: @@ -79,16 +112,37 @@ def apt_purge(packages, fatal=False): subprocess.call(cmd) +def apt_hold(packages, fatal=False): + """Hold one or more packages""" + cmd = ['apt-mark', 'hold'] + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Holding {}".format(packages)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + def add_source(source, key=None): - if ((source.startswith('ppa:') or - source.startswith('http:'))): + if (source.startswith('ppa:') or + source.startswith('http:') or + source.startswith('deb ') or + source.startswith('cloud-archive:')): subprocess.check_call(['add-apt-repository', '--yes', source]) elif source.startswith('cloud:'): apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), fatal=True) pocket = source.split(':')[-1] + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(pocket)) + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) elif source == 'proposed': release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: @@ -118,8 +172,11 @@ def configure_sources(update=False, Note that 'null' (a.k.a. None) should not be quoted. """ sources = safe_load(config(sources_var)) - keys = safe_load(config(keys_var)) - if isinstance(sources, basestring) and isinstance(keys, basestring): + keys = config(keys_var) + if keys is not None: + keys = safe_load(keys) + if isinstance(sources, basestring) and ( + keys is None or isinstance(keys, basestring)): add_source(sources, keys) else: if not len(sources) == len(keys): @@ -172,7 +229,9 @@ def install_from_config(config_var_name): class BaseFetchHandler(object): + """Base class for FetchHandler implementations in fetch plugins""" + def can_handle(self, source): """Returns True if the source can be handled. Otherwise returns a string explaining why it cannot""" @@ -200,10 +259,13 @@ def plugins(fetch_handlers=None): for handler_name in fetch_handlers: package, classname = handler_name.rsplit('.', 1) try: - handler_class = getattr(importlib.import_module(package), classname) + handler_class = getattr( + importlib.import_module(package), + classname) plugin_list.append(handler_class()) except (ImportError, AttributeError): # Skip missing plugins so that they can be ommitted from # installation if desired - log("FetchHandler {} not found, skipping plugin".format(handler_name)) + log("FetchHandler {} not found, skipping plugin".format( + handler_name)) return plugin_list diff --git a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py index c348b4bb..db5dd9a3 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py @@ -12,6 +12,7 @@ apt_install("python-bzrlib") from bzrlib.branch import Branch + class BzrUrlFetchHandler(BaseFetchHandler): """Handler for bazaar branches via generic and lp URLs""" def can_handle(self, source): @@ -46,4 +47,3 @@ def install(self, source): except OSError as e: raise UnhandledSource(e.strerror) return dest_dir - From 85cbc7798aa3a9e44a25db90fe81d5db7202021b Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 13 Nov 2013 22:10:09 +0000 Subject: [PATCH 0291/2699] Resync with charm-helpers trunk --- ceph-osd/charm-helpers-sync.yaml | 2 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 101 ++++++++++++++---- ceph-osd/hooks/charmhelpers/core/host.py | 24 +++-- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 86 ++++++++++++--- ceph-osd/hooks/charmhelpers/fetch/bzrurl.py | 2 +- 5 files changed, 169 insertions(+), 46 deletions(-) diff --git a/ceph-osd/charm-helpers-sync.yaml b/ceph-osd/charm-helpers-sync.yaml index 38dc4108..c8ee8f59 100644 --- a/ceph-osd/charm-helpers-sync.yaml +++ b/ceph-osd/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~openstack-charmers/charm-helpers/os-alternatives +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 2b06706c..bb196dfa 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -9,6 +9,7 @@ import yaml import subprocess import UserDict +from subprocess import CalledProcessError CRITICAL = "CRITICAL" ERROR = "ERROR" @@ -21,7 +22,7 @@ def cached(func): - ''' Cache return values for multiple executions of func + args + """Cache return values for multiple executions of func + args For example: @@ -32,7 +33,7 @@ def unit_get(attribute): unit_get('test') will cache the result of unit_get + 'test' for future calls. - ''' + """ def wrapper(*args, **kwargs): global cache key = str((func, args, kwargs)) @@ -46,8 +47,8 @@ def wrapper(*args, **kwargs): def flush(key): - ''' Flushes any entries from function cache where the - key is found in the function+args ''' + """Flushes any entries from function cache where the + key is found in the function+args """ flush_list = [] for item in cache: if key in item: @@ -57,7 +58,7 @@ def flush(key): def log(message, level=None): - "Write a message to the juju log" + """Write a message to the juju log""" command = ['juju-log'] if level: command += ['-l', level] @@ -66,7 +67,7 @@ def log(message, level=None): class Serializable(UserDict.IterableUserDict): - "Wrapper, an object that can be serialized to yaml or json" + """Wrapper, an object that can be serialized to yaml or json""" def __init__(self, obj): # wrap the object @@ -96,11 +97,11 @@ def __setstate__(self, state): self.data = state def json(self): - "Serialize the object to json" + """Serialize the object to json""" return json.dumps(self.data) def yaml(self): - "Serialize the object to yaml" + """Serialize the object to yaml""" return yaml.dump(self.data) @@ -119,38 +120,38 @@ def execution_environment(): def in_relation_hook(): - "Determine whether we're running in a relation hook" + """Determine whether we're running in a relation hook""" return 'JUJU_RELATION' in os.environ def relation_type(): - "The scope for the current relation hook" + """The scope for the current relation hook""" return os.environ.get('JUJU_RELATION', None) def relation_id(): - "The relation ID for the current relation hook" + """The relation ID for the current relation hook""" return os.environ.get('JUJU_RELATION_ID', None) def local_unit(): - "Local unit ID" + """Local unit ID""" return os.environ['JUJU_UNIT_NAME'] def remote_unit(): - "The remote unit for the current relation hook" + """The remote unit for the current relation hook""" return os.environ['JUJU_REMOTE_UNIT'] def service_name(): - "The name service group this unit belongs to" + """The name service group this unit belongs to""" return local_unit().split('/')[0] @cached def config(scope=None): - "Juju charm configuration" + """Juju charm configuration""" config_cmd_line = ['config-get'] if scope is not None: config_cmd_line.append(scope) @@ -163,6 +164,7 @@ def config(scope=None): @cached def relation_get(attribute=None, unit=None, rid=None): + """Get relation information""" _args = ['relation-get', '--format=json'] if rid: _args.append('-r') @@ -174,9 +176,14 @@ def relation_get(attribute=None, unit=None, rid=None): return json.loads(subprocess.check_output(_args)) except ValueError: return None + except CalledProcessError, e: + if e.returncode == 2: + return None + raise def relation_set(relation_id=None, relation_settings={}, **kwargs): + """Set relation information for the current unit""" relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) @@ -192,7 +199,7 @@ def relation_set(relation_id=None, relation_settings={}, **kwargs): @cached def relation_ids(reltype=None): - "A list of relation_ids" + """A list of relation_ids""" reltype = reltype or relation_type() relid_cmd_line = ['relation-ids', '--format=json'] if reltype is not None: @@ -203,7 +210,7 @@ def relation_ids(reltype=None): @cached def related_units(relid=None): - "A list of related units" + """A list of related units""" relid = relid or relation_id() units_cmd_line = ['relation-list', '--format=json'] if relid is not None: @@ -213,7 +220,7 @@ def related_units(relid=None): @cached def relation_for_unit(unit=None, rid=None): - "Get the json represenation of a unit's relation" + """Get the json represenation of a unit's relation""" unit = unit or remote_unit() relation = relation_get(unit=unit, rid=rid) for key in relation: @@ -225,7 +232,7 @@ def relation_for_unit(unit=None, rid=None): @cached def relations_for_id(relid=None): - "Get relations of a specific relation ID" + """Get relations of a specific relation ID""" relation_data = [] relid = relid or relation_ids() for unit in related_units(relid): @@ -237,7 +244,7 @@ def relations_for_id(relid=None): @cached def relations_of_type(reltype=None): - "Get relations of a specific type" + """Get relations of a specific type""" relation_data = [] reltype = reltype or relation_type() for relid in relation_ids(reltype): @@ -249,7 +256,7 @@ def relations_of_type(reltype=None): @cached def relation_types(): - "Get a list of relation types supported by this charm" + """Get a list of relation types supported by this charm""" charmdir = os.environ.get('CHARM_DIR', '') mdf = open(os.path.join(charmdir, 'metadata.yaml')) md = yaml.safe_load(mdf) @@ -264,6 +271,7 @@ def relation_types(): @cached def relations(): + """Get a nested dictionary of relation data for all related units""" rels = {} for reltype in relation_types(): relids = {} @@ -277,15 +285,35 @@ def relations(): return rels +@cached +def is_relation_made(relation, keys='private-address'): + ''' + Determine whether a relation is established by checking for + presence of key(s). If a list of keys is provided, they + must all be present for the relation to be identified as made + ''' + if isinstance(keys, str): + keys = [keys] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + context = {} + for k in keys: + context[k] = relation_get(k, rid=r_id, + unit=unit) + if None not in context.values(): + return True + return False + + def open_port(port, protocol="TCP"): - "Open a service network port" + """Open a service network port""" _args = ['open-port'] _args.append('{}/{}'.format(port, protocol)) subprocess.check_call(_args) def close_port(port, protocol="TCP"): - "Close a service network port" + """Close a service network port""" _args = ['close-port'] _args.append('{}/{}'.format(port, protocol)) subprocess.check_call(_args) @@ -293,6 +321,7 @@ def close_port(port, protocol="TCP"): @cached def unit_get(attribute): + """Get the unit ID for the remote unit""" _args = ['unit-get', '--format=json', attribute] try: return json.loads(subprocess.check_output(_args)) @@ -301,22 +330,46 @@ def unit_get(attribute): def unit_private_ip(): + """Get this unit's private IP address""" return unit_get('private-address') class UnregisteredHookError(Exception): + """Raised when an undefined hook is called""" pass class Hooks(object): + """A convenient handler for hook functions. + + Example: + hooks = Hooks() + + # register a hook, taking its name from the function name + @hooks.hook() + def install(): + ... + + # register a hook, providing a custom hook name + @hooks.hook("config-changed") + def config_changed(): + ... + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + def __init__(self): super(Hooks, self).__init__() self._hooks = {} def register(self, name, function): + """Register a hook""" self._hooks[name] = function def execute(self, args): + """Execute a registered hook based on args[0]""" hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() @@ -324,6 +377,7 @@ def execute(self, args): raise UnregisteredHookError(hook_name) def hook(self, *hook_names): + """Decorator, registering them as hooks""" def wrapper(decorated): for hook_name in hook_names: self.register(hook_name, decorated) @@ -337,4 +391,5 @@ def wrapper(decorated): def charm_dir(): + """Return the root directory of the current charm""" return os.environ.get('CHARM_DIR') diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 1a63bf89..4a6a4a8c 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -19,18 +19,22 @@ def service_start(service_name): + """Start a system service""" return service('start', service_name) def service_stop(service_name): + """Stop a system service""" return service('stop', service_name) def service_restart(service_name): + """Restart a system service""" return service('restart', service_name) def service_reload(service_name, restart_on_failure=False): + """Reload a system service, optionally falling back to restart if reload fails""" service_result = service('reload', service_name) if not service_result and restart_on_failure: service_result = service('restart', service_name) @@ -38,11 +42,13 @@ def service_reload(service_name, restart_on_failure=False): def service(action, service_name): + """Control a system service""" cmd = ['service', service_name, action] return subprocess.call(cmd) == 0 def service_running(service): + """Determine whether a system service is running""" try: output = subprocess.check_output(['service', service, 'status']) except subprocess.CalledProcessError: @@ -55,7 +61,7 @@ def service_running(service): def adduser(username, password=None, shell='/bin/bash', system_user=False): - """Add a user""" + """Add a user to the system""" try: user_info = pwd.getpwnam(username) log('user {0} already exists!'.format(username)) @@ -138,7 +144,7 @@ def write_file(path, content, owner='root', group='root', perms=0444): def mount(device, mountpoint, options=None, persist=False): - '''Mount a filesystem''' + """Mount a filesystem at a particular mountpoint""" cmd_args = ['mount'] if options is not None: cmd_args.extend(['-o', options]) @@ -155,7 +161,7 @@ def mount(device, mountpoint, options=None, persist=False): def umount(mountpoint, persist=False): - '''Unmount a filesystem''' + """Unmount a filesystem""" cmd_args = ['umount', mountpoint] try: subprocess.check_output(cmd_args) @@ -169,7 +175,7 @@ def umount(mountpoint, persist=False): def mounts(): - '''List of all mounted volumes as [[mountpoint,device],[...]]''' + """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" with open('/proc/mounts') as f: # [['/mount/point','/dev/path'],[...]] system_mounts = [m[1::-1] for m in [l.strip().split() @@ -178,7 +184,7 @@ def mounts(): def file_hash(path): - ''' Generate a md5 hash of the contents of 'path' or None if not found ''' + """Generate a md5 hash of the contents of 'path' or None if not found """ if os.path.exists(path): h = hashlib.md5() with open(path, 'r') as source: @@ -189,7 +195,7 @@ def file_hash(path): def restart_on_change(restart_map): - ''' Restart services based on configuration files changing + """Restart services based on configuration files changing This function is used a decorator, for example @@ -202,7 +208,7 @@ def ceph_client_changed(): In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the ceph_client_changed function. - ''' + """ def wrap(f): def wrapped_f(*args): checksums = {} @@ -220,7 +226,7 @@ def wrapped_f(*args): def lsb_release(): - '''Return /etc/lsb-release in a dict''' + """Return /etc/lsb-release in a dict""" d = {} with open('/etc/lsb-release', 'r') as lsb: for l in lsb: @@ -230,7 +236,7 @@ def lsb_release(): def pwgen(length=None): - '''Generate a random pasword.''' + """Generate a random pasword.""" if length is None: length = random.choice(range(35, 45)) alphanumeric_chars = [ diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index b2f96467..f83e7b7d 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -13,6 +13,7 @@ log, ) import apt_pkg +import os CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main @@ -20,6 +21,32 @@ PROPOSED_POCKET = """# Proposed deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted """ +CLOUD_ARCHIVE_POCKETS = { + # Folsom + 'folsom': 'precise-updates/folsom', + 'precise-folsom': 'precise-updates/folsom', + 'precise-folsom/updates': 'precise-updates/folsom', + 'precise-updates/folsom': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'precise-folsom/proposed': 'precise-proposed/folsom', + 'precise-proposed/folsom': 'precise-proposed/folsom', + # Grizzly + 'grizzly': 'precise-updates/grizzly', + 'precise-grizzly': 'precise-updates/grizzly', + 'precise-grizzly/updates': 'precise-updates/grizzly', + 'precise-updates/grizzly': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'precise-grizzly/proposed': 'precise-proposed/grizzly', + 'precise-proposed/grizzly': 'precise-proposed/grizzly', + # Havana + 'havana': 'precise-updates/havana', + 'precise-havana': 'precise-updates/havana', + 'precise-havana/updates': 'precise-updates/havana', + 'precise-updates/havana': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'precies-havana/proposed': 'precise-proposed/havana', + 'precise-proposed/havana': 'precise-proposed/havana', +} def filter_installed_packages(packages): @@ -40,8 +67,10 @@ def filter_installed_packages(packages): def apt_install(packages, options=None, fatal=False): """Install one or more packages""" - options = options or [] - cmd = ['apt-get', '-y'] + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') if isinstance(packages, basestring): @@ -50,10 +79,14 @@ def apt_install(packages, options=None, fatal=False): cmd.extend(packages) log("Installing {} with options: {}".format(packages, options)) + env = os.environ.copy() + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + if fatal: - subprocess.check_call(cmd) + subprocess.check_call(cmd, env=env) else: - subprocess.call(cmd) + subprocess.call(cmd, env=env) def apt_update(fatal=False): @@ -67,7 +100,7 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): """Purge one or more packages""" - cmd = ['apt-get', '-y', 'purge'] + cmd = ['apt-get', '--assume-yes', 'purge'] if isinstance(packages, basestring): cmd.append(packages) else: @@ -79,16 +112,37 @@ def apt_purge(packages, fatal=False): subprocess.call(cmd) +def apt_hold(packages, fatal=False): + """Hold one or more packages""" + cmd = ['apt-mark', 'hold'] + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Holding {}".format(packages)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + def add_source(source, key=None): - if ((source.startswith('ppa:') or - source.startswith('http:'))): + if (source.startswith('ppa:') or + source.startswith('http:') or + source.startswith('deb ') or + source.startswith('cloud-archive:')): subprocess.check_call(['add-apt-repository', '--yes', source]) elif source.startswith('cloud:'): apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), fatal=True) pocket = source.split(':')[-1] + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(pocket)) + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) elif source == 'proposed': release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: @@ -118,8 +172,11 @@ def configure_sources(update=False, Note that 'null' (a.k.a. None) should not be quoted. """ sources = safe_load(config(sources_var)) - keys = safe_load(config(keys_var)) - if isinstance(sources, basestring) and isinstance(keys, basestring): + keys = config(keys_var) + if keys is not None: + keys = safe_load(keys) + if isinstance(sources, basestring) and ( + keys is None or isinstance(keys, basestring)): add_source(sources, keys) else: if not len(sources) == len(keys): @@ -172,7 +229,9 @@ def install_from_config(config_var_name): class BaseFetchHandler(object): + """Base class for FetchHandler implementations in fetch plugins""" + def can_handle(self, source): """Returns True if the source can be handled. Otherwise returns a string explaining why it cannot""" @@ -200,10 +259,13 @@ def plugins(fetch_handlers=None): for handler_name in fetch_handlers: package, classname = handler_name.rsplit('.', 1) try: - handler_class = getattr(importlib.import_module(package), classname) + handler_class = getattr( + importlib.import_module(package), + classname) plugin_list.append(handler_class()) except (ImportError, AttributeError): # Skip missing plugins so that they can be ommitted from # installation if desired - log("FetchHandler {} not found, skipping plugin".format(handler_name)) + log("FetchHandler {} not found, skipping plugin".format( + handler_name)) return plugin_list diff --git a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py index c348b4bb..db5dd9a3 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py @@ -12,6 +12,7 @@ apt_install("python-bzrlib") from bzrlib.branch import Branch + class BzrUrlFetchHandler(BaseFetchHandler): """Handler for bazaar branches via generic and lp URLs""" def can_handle(self, source): @@ -46,4 +47,3 @@ def install(self, source): except OSError as e: raise UnhandledSource(e.strerror) return dest_dir - From 73460dbc48d9c305c1c958bc61787bc24689d52a Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 4 Dec 2013 10:44:00 +0000 Subject: [PATCH 0292/2699] Add icehouse cloud targets --- ceph-proxy/hooks/charmhelpers/core/host.py | 44 +++++++++++++++++++ .../hooks/charmhelpers/fetch/__init__.py | 10 ++++- 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 4a6a4a8c..c8c81b28 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -245,3 +245,47 @@ def pwgen(length=None): random_chars = [ random.choice(alphanumeric_chars) for _ in range(length)] return(''.join(random_chars)) + + +def list_nics(nic_type): + '''Return a list of nics of given type(s)''' + if isinstance(nic_type, basestring): + int_types = [nic_type] + else: + int_types = nic_type + interfaces = [] + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + interfaces.append(line.split()[1].replace(":", "")) + return interfaces + + +def set_nic_mtu(nic, mtu): + '''Set MTU on a network interface''' + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd) + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index f83e7b7d..1f4f6315 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -44,8 +44,16 @@ 'precise-havana/updates': 'precise-updates/havana', 'precise-updates/havana': 'precise-updates/havana', 'havana/proposed': 'precise-proposed/havana', - 'precies-havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', } From 0dc831dbc9c3a7deea2b868910ab92f6714f7421 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 4 Dec 2013 10:44:00 +0000 Subject: [PATCH 0293/2699] Add icehouse cloud targets --- ceph-mon/hooks/charmhelpers/core/host.py | 44 +++++++++++++++++++ ceph-mon/hooks/charmhelpers/fetch/__init__.py | 10 ++++- 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 4a6a4a8c..c8c81b28 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -245,3 +245,47 @@ def pwgen(length=None): random_chars = [ random.choice(alphanumeric_chars) for _ in range(length)] return(''.join(random_chars)) + + +def list_nics(nic_type): + '''Return a list of nics of given type(s)''' + if isinstance(nic_type, basestring): + int_types = [nic_type] + else: + int_types = nic_type + interfaces = [] + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + interfaces.append(line.split()[1].replace(":", "")) + return interfaces + + +def set_nic_mtu(nic, mtu): + '''Set MTU on a network interface''' + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd) + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index f83e7b7d..1f4f6315 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -44,8 +44,16 @@ 'precise-havana/updates': 'precise-updates/havana', 'precise-updates/havana': 'precise-updates/havana', 'havana/proposed': 'precise-proposed/havana', - 'precies-havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', } From 0e0e1ab18b30113ff81009435aa52284b37015ea Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 4 Dec 2013 10:44:51 +0000 Subject: [PATCH 0294/2699] Add icehouse cloud support --- ceph-osd/hooks/charmhelpers/core/host.py | 44 +++++++++++++++++++ ceph-osd/hooks/charmhelpers/fetch/__init__.py | 10 ++++- 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 4a6a4a8c..c8c81b28 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -245,3 +245,47 @@ def pwgen(length=None): random_chars = [ random.choice(alphanumeric_chars) for _ in range(length)] return(''.join(random_chars)) + + +def list_nics(nic_type): + '''Return a list of nics of given type(s)''' + if isinstance(nic_type, basestring): + int_types = [nic_type] + else: + int_types = nic_type + interfaces = [] + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + interfaces.append(line.split()[1].replace(":", "")) + return interfaces + + +def set_nic_mtu(nic, mtu): + '''Set MTU on a network interface''' + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd) + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index f83e7b7d..1f4f6315 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -44,8 +44,16 @@ 'precise-havana/updates': 'precise-updates/havana', 'precise-updates/havana': 'precise-updates/havana', 'havana/proposed': 'precise-proposed/havana', - 'precies-havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', } From f50eec7f9e3d2dff2d98f8ee7cbe2c911ef36399 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 12 Dec 2013 10:58:43 +0000 Subject: [PATCH 0295/2699] Adds configurable osd-journal-size option Fixes: bug 1259919 --- ceph-proxy/config.yaml | 11 +++++++++++ ceph-proxy/hooks/hooks.py | 3 ++- ceph-proxy/revision | 2 +- ceph-proxy/templates/ceph.conf | 2 +- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index bac32bca..654eb887 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -49,6 +49,17 @@ options: no journal device will be used. . Only supported with ceph >= 0.48.3. + osd-journal-size: + type: int + default: 1024 + description: | + Ceph osd journal size. The journal size should be at least twice the + product of the expected drive speed multiplied by filestore max sync + interval. However, the most common practice is to partition the journal + drive (often an SSD), and mount it such that Ceph uses the entire + partition for the journal. + . + Only supported with ceph >= 0.48.3. osd-format: type: string default: xfs diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 537019e5..82fb3323 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -71,7 +71,8 @@ def emit_cephconf(): 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': config('fsid'), - 'version': ceph.get_ceph_version() + 'version': ceph.get_ceph_version(), + 'osd_journal_size': config('osd-journal-size') } # Install ceph.conf as an alternative to support # co-existence with other charms that write this file diff --git a/ceph-proxy/revision b/ceph-proxy/revision index a9c8fe82..b16e5f75 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -103 +104 diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index 887ba82f..107beece 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -18,6 +18,6 @@ [osd] keyring = /var/lib/ceph/osd/$cluster-$id/keyring - osd journal size = 1000 + osd journal size = {{ osd_journal_size }} filestore xattr use omap = true From 9d218163639051f5e9d664951a6eead3f1c29b7b Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 12 Dec 2013 10:58:43 +0000 Subject: [PATCH 0296/2699] Adds configurable osd-journal-size option Fixes: bug 1259919 --- ceph-mon/config.yaml | 11 +++++++++++ ceph-mon/hooks/hooks.py | 3 ++- ceph-mon/revision | 2 +- ceph-mon/templates/ceph.conf | 2 +- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index bac32bca..654eb887 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -49,6 +49,17 @@ options: no journal device will be used. . Only supported with ceph >= 0.48.3. + osd-journal-size: + type: int + default: 1024 + description: | + Ceph osd journal size. The journal size should be at least twice the + product of the expected drive speed multiplied by filestore max sync + interval. However, the most common practice is to partition the journal + drive (often an SSD), and mount it such that Ceph uses the entire + partition for the journal. + . + Only supported with ceph >= 0.48.3. osd-format: type: string default: xfs diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 537019e5..82fb3323 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -71,7 +71,8 @@ def emit_cephconf(): 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': config('fsid'), - 'version': ceph.get_ceph_version() + 'version': ceph.get_ceph_version(), + 'osd_journal_size': config('osd-journal-size') } # Install ceph.conf as an alternative to support # co-existence with other charms that write this file diff --git a/ceph-mon/revision b/ceph-mon/revision index a9c8fe82..b16e5f75 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -103 +104 diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 887ba82f..107beece 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -18,6 +18,6 @@ [osd] keyring = /var/lib/ceph/osd/$cluster-$id/keyring - osd journal size = 1000 + osd journal size = {{ osd_journal_size }} filestore xattr use omap = true From dda2fbb43d5c040699ae85bd9e7873e06526fb41 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 12 Dec 2013 11:17:23 +0000 Subject: [PATCH 0297/2699] Adds configurable osd-journal-size option Fixes: bug 1259919 --- ceph-osd/config.yaml | 11 +++++++++++ ceph-osd/hooks/hooks.py | 3 ++- ceph-osd/revision | 2 +- ceph-osd/templates/ceph.conf | 2 +- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index d915cc1f..dfeae00b 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -17,6 +17,17 @@ options: no journal device will be used. . Only supported with ceph >= 0.48.3. + osd-journal-size: + type: int + default: 1024 + description: | + Ceph osd journal size. The journal size should be at least twice the + product of the expected drive speed multiplied by filestore max sync + interval. However, the most common practice is to partition the journal + drive (often an SSD), and mount it such that Ceph uses the entire + partition for the journal. + . + Only supported with ceph >= 0.48.3. osd-format: type: string default: xfs diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 0176d651..7b536b8d 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -70,7 +70,8 @@ def emit_cephconf(): 'auth_supported': get_auth(), 'mon_hosts': ' '.join(mon_hosts), 'fsid': get_fsid(), - 'version': ceph.get_ceph_version() + 'version': ceph.get_ceph_version(), + 'osd_journal_size': config('osd-journal-size') } # Install ceph.conf as an alternative to support # co-existence with other charms that write this file diff --git a/ceph-osd/revision b/ceph-osd/revision index b1bd38b6..8351c193 100644 --- a/ceph-osd/revision +++ b/ceph-osd/revision @@ -1 +1 @@ -13 +14 diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 887ba82f..107beece 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -18,6 +18,6 @@ [osd] keyring = /var/lib/ceph/osd/$cluster-$id/keyring - osd journal size = 1000 + osd journal size = {{ osd_journal_size }} filestore xattr use omap = true From 247d15e4c5fca41b84d23739cad54b490a8d87ea Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 24 Jan 2014 15:31:23 +0000 Subject: [PATCH 0298/2699] Deal with apache24 --- ceph-radosgw/hooks/hooks.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index b239c1f0..579e22d4 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -65,13 +65,16 @@ def emit_apacheconf(): apachecontext = { "hostname": utils.unit_get('private-address') } - with open('/etc/apache2/sites-available/rgw', 'w') as apacheconf: + with open('/etc/apache2/sites-available/rgw.conf', 'w') as apacheconf: apacheconf.write(utils.render_template('rgw', apachecontext)) def apache_sites(): utils.juju_log('INFO', 'Begin apache_sites.') - subprocess.check_call(['a2dissite', 'default']) + if os.path.exists('/etc/apache2/sites-available/000-default.conf'): + subprocess.check_call(['a2dissite', '000-default']) + else: + subprocess.check_call(['a2dissite', 'default']) subprocess.check_call(['a2ensite', 'rgw']) utils.juju_log('INFO', 'End apache_sites.') From c12a1cdcaeb9d4882af5fc66cde0e69e40c2a430 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 24 Jan 2014 16:02:57 +0000 Subject: [PATCH 0299/2699] Rebase on charm-helpers --- ceph-radosgw/Makefile | 8 + ceph-radosgw/charm-helpers-sync.yaml | 9 + ceph-radosgw/hooks/ceph.py | 43 +- ceph-radosgw/hooks/charmhelpers/__init__.py | 0 .../hooks/charmhelpers/contrib/__init__.py | 0 .../contrib/openstack/__init__.py | 0 .../contrib/openstack/alternatives.py | 17 + .../charmhelpers/contrib/storage/__init__.py | 0 .../contrib/storage/linux/__init__.py | 0 .../contrib/storage/linux/utils.py | 25 ++ .../hooks/charmhelpers/core/__init__.py | 0 .../hooks/charmhelpers/core/hookenv.py | 395 ++++++++++++++++++ ceph-radosgw/hooks/charmhelpers/core/host.py | 291 +++++++++++++ .../hooks/charmhelpers/fetch/__init__.py | 279 +++++++++++++ .../hooks/charmhelpers/fetch/archiveurl.py | 48 +++ .../hooks/charmhelpers/fetch/bzrurl.py | 49 +++ .../hooks/charmhelpers/payload/__init__.py | 1 + .../hooks/charmhelpers/payload/execd.py | 50 +++ ceph-radosgw/hooks/hooks.py | 172 ++++---- ceph-radosgw/hooks/utils.py | 171 +------- ceph-radosgw/metadata.yaml | 2 + 21 files changed, 1294 insertions(+), 266 deletions(-) create mode 100644 ceph-radosgw/Makefile create mode 100644 ceph-radosgw/charm-helpers-sync.yaml create mode 100644 ceph-radosgw/hooks/charmhelpers/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/storage/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/hookenv.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/host.py create mode 100644 ceph-radosgw/hooks/charmhelpers/fetch/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py create mode 100644 ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py create mode 100644 ceph-radosgw/hooks/charmhelpers/payload/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/payload/execd.py diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile new file mode 100644 index 00000000..71dfd409 --- /dev/null +++ b/ceph-radosgw/Makefile @@ -0,0 +1,8 @@ +#!/usr/bin/make + +lint: + @flake8 --exclude hooks/charmhelpers hooks + @charm proof + +sync: + @charm-helper-sync -c charm-helpers-sync.yaml diff --git a/ceph-radosgw/charm-helpers-sync.yaml b/ceph-radosgw/charm-helpers-sync.yaml new file mode 100644 index 00000000..0963bbcd --- /dev/null +++ b/ceph-radosgw/charm-helpers-sync.yaml @@ -0,0 +1,9 @@ +branch: lp:charm-helpers +destination: hooks/charmhelpers +include: + - core + - fetch + - contrib.storage.linux: + - utils + - payload.execd + - contrib.openstack.alternatives diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index afe29674..71f569d4 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -10,23 +10,24 @@ import json import subprocess import time -import utils import os import apt_pkg as apt +import socket.gethostname as get_unit_hostname + LEADER = 'leader' PEON = 'peon' QUORUM = [LEADER, PEON] def is_quorum(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", asok, "mon_status" - ] + ] if os.path.exists(asok): try: result = json.loads(subprocess.check_output(cmd)) @@ -44,13 +45,13 @@ def is_quorum(): def is_leader(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", asok, "mon_status" - ] + ] if os.path.exists(asok): try: result = json.loads(subprocess.check_output(cmd)) @@ -73,14 +74,14 @@ def wait_for_quorum(): def add_bootstrap_hint(peer): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ "ceph", "--admin-daemon", asok, "add_bootstrap_peer_hint", peer - ] + ] if os.path.exists(asok): # Ignore any errors for this call subprocess.call(cmd) @@ -89,7 +90,7 @@ def add_bootstrap_hint(peer): 'xfs', 'ext4', 'btrfs' - ] +] def is_osd_disk(dev): @@ -99,7 +100,7 @@ def is_osd_disk(dev): for line in info: if line.startswith( 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' - ): + ): return True except subprocess.CalledProcessError: pass @@ -110,7 +111,7 @@ def rescan_osd_devices(): cmd = [ 'udevadm', 'trigger', '--subsystem-match=block', '--action=add' - ] + ] subprocess.call(cmd) @@ -140,7 +141,7 @@ def import_osd_bootstrap_key(key): '--create-keyring', '--name=client.bootstrap-osd', '--add-key={}'.format(key) - ] + ] subprocess.check_call(cmd) # OSD caps taken from ceph-create-keys @@ -148,10 +149,10 @@ def import_osd_bootstrap_key(key): 'mon': [ 'allow command osd create ...', 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', + r'allow command auth add * osd allow\ * mon allow\ rwx', 'allow command mon getmap' - ] - } + ] +} def get_osd_bootstrap_key(): @@ -169,14 +170,14 @@ def import_radosgw_key(key): '--create-keyring', '--name=client.radosgw.gateway', '--add-key={}'.format(key) - ] + ] subprocess.check_call(cmd) # OSD caps taken from ceph-create-keys _radosgw_caps = { 'mon': ['allow r'], 'osd': ['allow rwx'] - } +} def get_radosgw_key(): @@ -186,7 +187,7 @@ def get_radosgw_key(): _default_caps = { 'mon': ['allow r'], 'osd': ['allow rwx'] - } +} def get_named_key(name, caps=None): @@ -196,16 +197,16 @@ def get_named_key(name, caps=None): '--name', 'mon.', '--keyring', '/var/lib/ceph/mon/ceph-{}/keyring'.format( - utils.get_unit_hostname() - ), + get_unit_hostname() + ), 'auth', 'get-or-create', 'client.{}'.format(name), - ] + ] # Add capabilities for subsystem, subcaps in caps.iteritems(): cmd.extend([ subsystem, '; '.join(subcaps), - ]) + ]) output = subprocess.check_output(cmd).strip() # IGNORE:E1103 # get-or-create appears to have different output depending # on whether its 'get' or 'create' diff --git a/ceph-radosgw/hooks/charmhelpers/__init__.py b/ceph-radosgw/hooks/charmhelpers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py new file mode 100644 index 00000000..b413259c --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -0,0 +1,17 @@ +''' Helper for managing alternatives for file conflict resolution ''' + +import subprocess +import shutil +import os + + +def install_alternative(name, target, source, priority=50): + ''' Install alternative configuration ''' + if (os.path.exists(target) and not os.path.islink(target)): + # Move existing file/directory away before installing + shutil.move(target, '{}.bak'.format(target)) + cmd = [ + 'update-alternatives', '--force', '--install', + target, name, source, str(priority) + ] + subprocess.check_call(cmd) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py new file mode 100644 index 00000000..c40218f0 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -0,0 +1,25 @@ +from os import stat +from stat import S_ISBLK + +from subprocess import ( + check_call +) + + +def is_block_device(path): + ''' + Confirm device at path is a valid block device node. + + :returns: boolean: True if path is a block device, False if not. + ''' + return S_ISBLK(stat(path).st_mode) + + +def zap_disk(block_device): + ''' + Clear a block device of partition table. Relies on sgdisk, which is + installed as pat of the 'gdisk' package in Ubuntu. + + :param block_device: str: Full path of block device to clean. + ''' + check_call(['sgdisk', '--zap-all', '--mbrtogpt', block_device]) diff --git a/ceph-radosgw/hooks/charmhelpers/core/__init__.py b/ceph-radosgw/hooks/charmhelpers/core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py new file mode 100644 index 00000000..bb196dfa --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -0,0 +1,395 @@ +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +import os +import json +import yaml +import subprocess +import UserDict +from subprocess import CalledProcessError + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + """Cache return values for multiple executions of func + args + + For example: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + """ + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + res = func(*args, **kwargs) + cache[key] = res + return res + return wrapper + + +def flush(key): + """Flushes any entries from function cache where the + key is found in the function+args """ + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + """Write a message to the juju log""" + command = ['juju-log'] + if level: + command += ['-l', level] + command += [message] + subprocess.call(command) + + +class Serializable(UserDict.IterableUserDict): + """Wrapper, an object that can be serialized to yaml or json""" + + def __init__(self, obj): + # wrap the object + UserDict.IterableUserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + """Serialize the object to json""" + return json.dumps(self.data) + + def yaml(self): + """Serialize the object to yaml""" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + """Determine whether we're running in a relation hook""" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + """The scope for the current relation hook""" + return os.environ.get('JUJU_RELATION', None) + + +def relation_id(): + """The relation ID for the current relation hook""" + return os.environ.get('JUJU_RELATION_ID', None) + + +def local_unit(): + """Local unit ID""" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + """The remote unit for the current relation hook""" + return os.environ['JUJU_REMOTE_UNIT'] + + +def service_name(): + """The name service group this unit belongs to""" + return local_unit().split('/')[0] + + +@cached +def config(scope=None): + """Juju charm configuration""" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + config_cmd_line.append('--format=json') + try: + return json.loads(subprocess.check_output(config_cmd_line)) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + """Get relation information""" + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + except CalledProcessError, e: + if e.returncode == 2: + return None + raise + + +def relation_set(relation_id=None, relation_settings={}, **kwargs): + """Set relation information for the current unit""" + relation_cmd_line = ['relation-set'] + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + for k, v in (relation_settings.items() + kwargs.items()): + if v is None: + relation_cmd_line.append('{}='.format(k)) + else: + relation_cmd_line.append('{}={}'.format(k, v)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +@cached +def relation_ids(reltype=None): + """A list of relation_ids""" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads(subprocess.check_output(relid_cmd_line)) or [] + return [] + + +@cached +def related_units(relid=None): + """A list of related units""" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads(subprocess.check_output(units_cmd_line)) or [] + + +@cached +def relation_for_unit(unit=None, rid=None): + """Get the json represenation of a unit's relation""" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + """Get relations of a specific relation ID""" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + """Get relations of a specific type""" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def relation_types(): + """Get a list of relation types supported by this charm""" + charmdir = os.environ.get('CHARM_DIR', '') + mdf = open(os.path.join(charmdir, 'metadata.yaml')) + md = yaml.safe_load(mdf) + rel_types = [] + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + mdf.close() + return rel_types + + +@cached +def relations(): + """Get a nested dictionary of relation data for all related units""" + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +@cached +def is_relation_made(relation, keys='private-address'): + ''' + Determine whether a relation is established by checking for + presence of key(s). If a list of keys is provided, they + must all be present for the relation to be identified as made + ''' + if isinstance(keys, str): + keys = [keys] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + context = {} + for k in keys: + context[k] = relation_get(k, rid=r_id, + unit=unit) + if None not in context.values(): + return True + return False + + +def open_port(port, protocol="TCP"): + """Open a service network port""" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + """Close a service network port""" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + """Get the unit ID for the remote unit""" + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + + +def unit_private_ip(): + """Get this unit's private IP address""" + return unit_get('private-address') + + +class UnregisteredHookError(Exception): + """Raised when an undefined hook is called""" + pass + + +class Hooks(object): + """A convenient handler for hook functions. + + Example: + hooks = Hooks() + + # register a hook, taking its name from the function name + @hooks.hook() + def install(): + ... + + # register a hook, providing a custom hook name + @hooks.hook("config-changed") + def config_changed(): + ... + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + + def __init__(self): + super(Hooks, self).__init__() + self._hooks = {} + + def register(self, name, function): + """Register a hook""" + self._hooks[name] = function + + def execute(self, args): + """Execute a registered hook based on args[0]""" + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + self._hooks[hook_name]() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + """Decorator, registering them as hooks""" + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +def charm_dir(): + """Return the root directory of the current charm""" + return os.environ.get('CHARM_DIR') diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py new file mode 100644 index 00000000..c8c81b28 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -0,0 +1,291 @@ +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import os +import pwd +import grp +import random +import string +import subprocess +import hashlib + +from collections import OrderedDict + +from hookenv import log + + +def service_start(service_name): + """Start a system service""" + return service('start', service_name) + + +def service_stop(service_name): + """Stop a system service""" + return service('stop', service_name) + + +def service_restart(service_name): + """Restart a system service""" + return service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False): + """Reload a system service, optionally falling back to restart if reload fails""" + service_result = service('reload', service_name) + if not service_result and restart_on_failure: + service_result = service('restart', service_name) + return service_result + + +def service(action, service_name): + """Control a system service""" + cmd = ['service', service_name, action] + return subprocess.call(cmd) == 0 + + +def service_running(service): + """Determine whether a system service is running""" + try: + output = subprocess.check_output(['service', service, 'status']) + except subprocess.CalledProcessError: + return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False + + +def adduser(username, password=None, shell='/bin/bash', system_user=False): + """Add a user to the system""" + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = [ + 'gpasswd', '-a', + username, + group + ] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None): + """Replicate the contents of a path""" + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + cmd.extend(options) + cmd.append(from_path) + cmd.append(to_path) + log(" ".join(cmd)) + return subprocess.check_output(cmd).strip() + + +def symlink(source, destination): + """Create a symbolic link""" + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source, + destination, + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0555, force=False): + """Create a directory""" + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + realpath = os.path.abspath(path) + if os.path.exists(realpath): + if force and not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + + +def write_file(path, content, owner='root', group='root', perms=0444): + """Create or overwrite a file with the contents of a string""" + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'w') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + + +def mount(device, mountpoint, options=None, persist=False): + """Mount a filesystem at a particular mountpoint""" + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def umount(mountpoint, persist=False): + """Unmount a filesystem""" + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def mounts(): + """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def file_hash(path): + """Generate a md5 hash of the contents of 'path' or None if not found """ + if os.path.exists(path): + h = hashlib.md5() + with open(path, 'r') as source: + h.update(source.read()) # IGNORE:E1101 - it does have update + return h.hexdigest() + else: + return None + + +def restart_on_change(restart_map): + """Restart services based on configuration files changing + + This function is used a decorator, for example + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + }) + def ceph_client_changed(): + ... + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. + """ + def wrap(f): + def wrapped_f(*args): + checksums = {} + for path in restart_map: + checksums[path] = file_hash(path) + f(*args) + restarts = [] + for path in restart_map: + if checksums[path] != file_hash(path): + restarts += restart_map[path] + for service_name in list(OrderedDict.fromkeys(restarts)): + service('restart', service_name) + return wrapped_f + return wrap + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def pwgen(length=None): + """Generate a random pasword.""" + if length is None: + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + random_chars = [ + random.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) + + +def list_nics(nic_type): + '''Return a list of nics of given type(s)''' + if isinstance(nic_type, basestring): + int_types = [nic_type] + else: + int_types = nic_type + interfaces = [] + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + interfaces.append(line.split()[1].replace(":", "")) + return interfaces + + +def set_nic_mtu(nic, mtu): + '''Set MTU on a network interface''' + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd) + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py new file mode 100644 index 00000000..1f4f6315 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -0,0 +1,279 @@ +import importlib +from yaml import safe_load +from charmhelpers.core.host import ( + lsb_release +) +from urlparse import ( + urlparse, + urlunparse, +) +import subprocess +from charmhelpers.core.hookenv import ( + config, + log, +) +import apt_pkg +import os + +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" +PROPOSED_POCKET = """# Proposed +deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted +""" +CLOUD_ARCHIVE_POCKETS = { + # Folsom + 'folsom': 'precise-updates/folsom', + 'precise-folsom': 'precise-updates/folsom', + 'precise-folsom/updates': 'precise-updates/folsom', + 'precise-updates/folsom': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'precise-folsom/proposed': 'precise-proposed/folsom', + 'precise-proposed/folsom': 'precise-proposed/folsom', + # Grizzly + 'grizzly': 'precise-updates/grizzly', + 'precise-grizzly': 'precise-updates/grizzly', + 'precise-grizzly/updates': 'precise-updates/grizzly', + 'precise-updates/grizzly': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'precise-grizzly/proposed': 'precise-proposed/grizzly', + 'precise-proposed/grizzly': 'precise-proposed/grizzly', + # Havana + 'havana': 'precise-updates/havana', + 'precise-havana': 'precise-updates/havana', + 'precise-havana/updates': 'precise-updates/havana', + 'precise-updates/havana': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', + 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', +} + + +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + apt_pkg.init() + cache = apt_pkg.Cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + env = os.environ.copy() + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + subprocess.check_call(cmd, env=env) + else: + subprocess.call(cmd, env=env) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_purge(packages, fatal=False): + """Purge one or more packages""" + cmd = ['apt-get', '--assume-yes', 'purge'] + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_hold(packages, fatal=False): + """Hold one or more packages""" + cmd = ['apt-mark', 'hold'] + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Holding {}".format(packages)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def add_source(source, key=None): + if (source.startswith('ppa:') or + source.startswith('http:') or + source.startswith('deb ') or + source.startswith('cloud-archive:')): + subprocess.check_call(['add-apt-repository', '--yes', source]) + elif source.startswith('cloud:'): + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + elif source == 'proposed': + release = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(PROPOSED_POCKET.format(release)) + if key: + subprocess.check_call(['apt-key', 'import', key]) + + +class SourceConfigError(Exception): + pass + + +def configure_sources(update=False, + sources_var='install_sources', + keys_var='install_keys'): + """ + Configure multiple sources from charm configuration + + Example config: + install_sources: + - "ppa:foo" + - "http://example.com/repo precise main" + install_keys: + - null + - "a1b2c3d4" + + Note that 'null' (a.k.a. None) should not be quoted. + """ + sources = safe_load(config(sources_var)) + keys = config(keys_var) + if keys is not None: + keys = safe_load(keys) + if isinstance(sources, basestring) and ( + keys is None or isinstance(keys, basestring)): + add_source(sources, keys) + else: + if not len(sources) == len(keys): + msg = 'Install sources and keys lists are different lengths' + raise SourceConfigError(msg) + for src_num in range(len(sources)): + add_source(sources[src_num], keys[src_num]) + if update: + apt_update(fatal=True) + +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', +) + + +class UnhandledSource(Exception): + pass + + +def install_remote(source): + """ + Install a file tree from a remote source + + The specified source should be a url of the form: + scheme://[host]/path[#[option=value][&...]] + + Schemes supported are based on this modules submodules + Options supported are submodule-specific""" + # We ONLY check for True here because can_handle may return a string + # explaining why it can't handle a given source. + handlers = [h for h in plugins() if h.can_handle(source) is True] + installed_to = None + for handler in handlers: + try: + installed_to = handler.install(source) + except UnhandledSource: + pass + if not installed_to: + raise UnhandledSource("No handler found for source {}".format(source)) + return installed_to + + +def install_from_config(config_var_name): + charm_config = config() + source = charm_config[config_var_name] + return install_remote(source) + + +class BaseFetchHandler(object): + + """Base class for FetchHandler implementations in fetch plugins""" + + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + + +def plugins(fetch_handlers=None): + if not fetch_handlers: + fetch_handlers = FETCH_HANDLERS + plugin_list = [] + for handler_name in fetch_handlers: + package, classname = handler_name.rsplit('.', 1) + try: + handler_class = getattr( + importlib.import_module(package), + classname) + plugin_list.append(handler_class()) + except (ImportError, AttributeError): + # Skip missing plugins so that they can be ommitted from + # installation if desired + log("FetchHandler {} not found, skipping plugin".format( + handler_name)) + return plugin_list diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py new file mode 100644 index 00000000..e35b8f15 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py @@ -0,0 +1,48 @@ +import os +import urllib2 +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir + + +class ArchiveUrlFetchHandler(BaseFetchHandler): + """Handler for archives via generic URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + return "Wrong source type" + if get_archive_handler(self.base_url(source)): + return True + return False + + def download(self, source, dest): + # propogate all exceptions + # URLError, OSError, etc + response = urllib2.urlopen(source) + try: + with open(dest, 'w') as dest_file: + dest_file.write(response.read()) + except Exception as e: + if os.path.isfile(dest): + os.unlink(dest) + raise e + + def install(self, source): + url_parts = self.parse_url(source) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) + try: + self.download(source, dld_file) + except urllib2.URLError as e: + raise UnhandledSource(e.reason) + except OSError as e: + raise UnhandledSource(e.strerror) + return extract(dld_file) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py new file mode 100644 index 00000000..db5dd9a3 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py @@ -0,0 +1,49 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +try: + from bzrlib.branch import Branch +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-bzrlib") + from bzrlib.branch import Branch + + +class BzrUrlFetchHandler(BaseFetchHandler): + """Handler for bazaar branches via generic and lp URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('bzr+ssh', 'lp'): + return False + else: + return True + + def branch(self, source, dest): + url_parts = self.parse_url(source) + # If we use lp:branchname scheme we need to load plugins + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + if url_parts.scheme == "lp": + from bzrlib.plugin import load_plugins + load_plugins() + try: + remote_branch = Branch.open(source) + remote_branch.bzrdir.sprout(dest).open_branch() + except Exception as e: + raise e + + def install(self, source): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + try: + self.branch(source, dest_dir) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir diff --git a/ceph-radosgw/hooks/charmhelpers/payload/__init__.py b/ceph-radosgw/hooks/charmhelpers/payload/__init__.py new file mode 100644 index 00000000..fc9fbc08 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/payload/__init__.py @@ -0,0 +1 @@ +"Tools for working with files injected into a charm just before deployment." diff --git a/ceph-radosgw/hooks/charmhelpers/payload/execd.py b/ceph-radosgw/hooks/charmhelpers/payload/execd.py new file mode 100644 index 00000000..6476a75f --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/payload/execd.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python + +import os +import sys +import subprocess +from charmhelpers.core import hookenv + + +def default_execd_dir(): + return os.path.join(os.environ['CHARM_DIR'], 'exec.d') + + +def execd_module_paths(execd_dir=None): + """Generate a list of full paths to modules within execd_dir.""" + if not execd_dir: + execd_dir = default_execd_dir() + + if not os.path.exists(execd_dir): + return + + for subpath in os.listdir(execd_dir): + module = os.path.join(execd_dir, subpath) + if os.path.isdir(module): + yield module + + +def execd_submodule_paths(command, execd_dir=None): + """Generate a list of full paths to the specified command within exec_dir. + """ + for module_path in execd_module_paths(execd_dir): + path = os.path.join(module_path, command) + if os.access(path, os.X_OK) and os.path.isfile(path): + yield path + + +def execd_run(command, execd_dir=None, die_on_error=False, stderr=None): + """Run command for each module within execd_dir which defines it.""" + for submodule_path in execd_submodule_paths(command, execd_dir): + try: + subprocess.check_call(submodule_path, shell=True, stderr=stderr) + except subprocess.CalledProcessError as e: + hookenv.log("Error ({}) running {}. Output: {}".format( + e.returncode, e.cmd, e.output)) + if die_on_error: + sys.exit(e.returncode) + + +def execd_preinstall(execd_dir=None): + """Run charm-pre-install for each module within execd_dir.""" + execd_run('charm-pre-install', execd_dir=execd_dir) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 579e22d4..0ff90fd0 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -14,7 +14,32 @@ import os import ceph -import utils +from charmhelpers.core.hookenv import ( + relation_get, + relation_ids, + related_units, + config, + unit_get, + open_port, + relation_set, + log, + Hooks, UnregisteredHookError, +) +from charmhelpers.fetch import ( + apt_update, + apt_install, + add_source, +) +from utils import ( + render_template, + get_host_ip, + enable_pocket +) + +from charmhelpers.payload.execd import execd_preinstall +import socket.gethostname as get_unit_hostname + +hooks = Hooks() def install_www_scripts(): @@ -22,19 +47,20 @@ def install_www_scripts(): shutil.copy(x, '/var/www/') -NSS_DIR='/var/lib/ceph/nss' +NSS_DIR = '/var/lib/ceph/nss' +@hooks.hook('install') def install(): - utils.juju_log('INFO', 'Begin install hook.') - utils.enable_pocket('multiverse') - utils.configure_source() - utils.install('radosgw', - 'libapache2-mod-fastcgi', - 'apache2', - 'ntp') + execd_preinstall() + enable_pocket('multiverse') + add_source(config('source'), config('key')) + apt_update(fatal=True) + apt_install(['radosgw', + 'libapache2-mod-fastcgi', + 'apache2', + 'ntp'], fatal=True) os.makedirs(NSS_DIR) - utils.juju_log('INFO', 'End install hook.') def emit_cephconf(): @@ -45,71 +71,67 @@ def emit_cephconf(): cephcontext = { 'auth_supported': get_auth() or 'none', 'mon_hosts': ' '.join(get_mon_hosts()), - 'hostname': utils.get_unit_hostname(), + 'hostname': get_unit_hostname(), 'version': ceph.get_ceph_version('radosgw') - } - - # Check to ensure that correct version of ceph is + } + + # Check to ensure that correct version of ceph is # in use - if ceph.get_ceph_version('radosgw') >= "0.55": + if ceph.get_ceph_version('radosgw') >= "0.55": # Add keystone configuration if found ks_conf = get_keystone_conf() if ks_conf: cephcontext.update(ks_conf) with open('/etc/ceph/ceph.conf', 'w') as cephconf: - cephconf.write(utils.render_template('ceph.conf', cephcontext)) + cephconf.write(render_template('ceph.conf', cephcontext)) def emit_apacheconf(): apachecontext = { - "hostname": utils.unit_get('private-address') - } + "hostname": unit_get('private-address') + } with open('/etc/apache2/sites-available/rgw.conf', 'w') as apacheconf: - apacheconf.write(utils.render_template('rgw', apachecontext)) + apacheconf.write(render_template('rgw', apachecontext)) def apache_sites(): - utils.juju_log('INFO', 'Begin apache_sites.') if os.path.exists('/etc/apache2/sites-available/000-default.conf'): subprocess.check_call(['a2dissite', '000-default']) else: subprocess.check_call(['a2dissite', 'default']) subprocess.check_call(['a2ensite', 'rgw']) - utils.juju_log('INFO', 'End apache_sites.') def apache_modules(): - utils.juju_log('INFO', 'Begin apache_sites.') subprocess.check_call(['a2enmod', 'fastcgi']) subprocess.check_call(['a2enmod', 'rewrite']) - utils.juju_log('INFO', 'End apache_sites.') def apache_reload(): subprocess.call(['service', 'apache2', 'reload']) +@hooks.hook('upgrade-charm', + 'config-changed') def config_changed(): - utils.juju_log('INFO', 'Begin config-changed hook.') emit_cephconf() emit_apacheconf() install_www_scripts() apache_sites() apache_modules() apache_reload() - utils.juju_log('INFO', 'End config-changed hook.') def get_mon_hosts(): hosts = [] - for relid in utils.relation_ids('mon'): - for unit in utils.relation_list(relid): + for relid in relation_ids('mon'): + for unit in related_units(relid): hosts.append( - '{}:6789'.format(utils.get_host_ip( - utils.relation_get('private-address', - unit, relid))) - ) + '{}:6789'.format(get_host_ip( + relation_get('private-address', + unit, relid))) + ) hosts.sort() return hosts @@ -120,100 +142,90 @@ def get_auth(): def get_conf(name): - for relid in utils.relation_ids('mon'): - for unit in utils.relation_list(relid): - conf = utils.relation_get(name, - unit, relid) + for relid in relation_ids('mon'): + for unit in related_units(relid): + conf = relation_get(name, + unit, relid) if conf: return conf return None + def get_keystone_conf(): - for relid in utils.relation_ids('identity-service'): - for unit in utils.relation_list(relid): + for relid in relation_ids('identity-service'): + for unit in related_units(relid): ks_auth = { 'auth_type': 'keystone', 'auth_protocol': 'http', - 'auth_host': utils.relation_get('auth_host', unit, relid), - 'auth_port': utils.relation_get('auth_port', unit, relid), - 'admin_token': utils.relation_get('admin_token', unit, relid), - 'user_roles': utils.config_get('operator-roles'), - 'cache_size': utils.config_get('cache-size'), - 'revocation_check_interval': utils.config_get('revocation-check-interval') + 'auth_host': relation_get('auth_host', unit, relid), + 'auth_port': relation_get('auth_port', unit, relid), + 'admin_token': relation_get('admin_token', unit, relid), + 'user_roles': config('operator-roles'), + 'cache_size': config('cache-size'), + 'revocation_check_interval': + config('revocation-check-interval') } if None not in ks_auth.itervalues(): return ks_auth return None +@hooks.hook('mon-relation-departed', + 'mon-relation-changed') def mon_relation(): - utils.juju_log('INFO', 'Begin mon-relation hook.') emit_cephconf() - key = utils.relation_get('radosgw_key') + key = relation_get('radosgw_key') if key: ceph.import_radosgw_key(key) restart() # TODO figure out a better way todo this - utils.juju_log('INFO', 'End mon-relation hook.') +@hooks.hook('gateway-relation-joined') def gateway_relation(): - utils.juju_log('INFO', 'Begin gateway-relation hook.') - utils.relation_set(hostname=utils.unit_get('private-address'), - port=80) - utils.juju_log('INFO', 'Begin gateway-relation hook.') - - -def upgrade_charm(): - utils.juju_log('INFO', 'Begin upgrade-charm hook.') - utils.juju_log('INFO', 'End upgrade-charm hook.') + relation_set(hostname=unit_get('private-address'), + port=80) def start(): subprocess.call(['service', 'radosgw', 'start']) - utils.expose(port=80) + open_port(port=80) def stop(): subprocess.call(['service', 'radosgw', 'stop']) - utils.expose(port=80) + open_port(port=80) def restart(): subprocess.call(['service', 'radosgw', 'restart']) - utils.expose(port=80) + open_port(port=80) +@hooks.hook('identity-service-relation-joined', + 'identity-service-relation-changed') def identity_joined(relid=None): if ceph.get_ceph_version('radosgw') < "0.55": - utils.juju_log('ERROR', - 'Integration with keystone requires ceph >= 0.55') + log('Integration with keystone requires ceph >= 0.55') sys.exit(1) - hostname = utils.unit_get('private-address') + hostname = unit_get('private-address') admin_url = 'http://{}:80/swift'.format(hostname) internal_url = public_url = '{}/v1'.format(admin_url) - utils.relation_set(service='swift', - region=utils.config_get('region'), - public_url=public_url, internal_url=internal_url, - admin_url=admin_url, - requested_roles=utils.config_get('operator-roles'), - rid=relid) + relation_set(service='swift', + region=config('region'), + public_url=public_url, internal_url=internal_url, + admin_url=admin_url, + requested_roles=config('operator-roles'), + rid=relid) def identity_changed(): emit_cephconf() - restart() - + restart() -utils.do_hooks({ - 'install': install, - 'config-changed': config_changed, - 'mon-relation-departed': mon_relation, - 'mon-relation-changed': mon_relation, - 'gateway-relation-joined': gateway_relation, - 'upgrade-charm': config_changed, # same function ATM - 'identity-service-relation-joined': identity_joined, - 'identity-service-relation-changed': identity_changed - }) -sys.exit(0) +if __name__ == '__main__': + try: + hooks.execute(sys.argv) + except UnregisteredHookError as e: + log('Unknown hook {} - skipping.'.format(e)) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 972bc99e..2a9a9c71 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -7,97 +7,35 @@ # Paul Collins # -import os -import subprocess import socket -import sys import re - -def do_hooks(hooks): - hook = os.path.basename(sys.argv[0]) - - try: - hook_func = hooks[hook] - except KeyError: - juju_log('INFO', - "This charm doesn't know how to handle '{}'.".format(hook)) - else: - hook_func() - - -def install(*pkgs): - cmd = [ - 'apt-get', - '-y', - 'install' - ] - for pkg in pkgs: - cmd.append(pkg) - subprocess.check_call(cmd) +from charmhelpers.core.hookenv import unit_get +from charmhelpers.fetch import apt_install TEMPLATES_DIR = 'templates' try: import jinja2 except ImportError: - install('python-jinja2') + apt_install('python-jinja2', fatal=True) import jinja2 try: import dns.resolver except ImportError: - install('python-dnspython') + apt_install('python-dnspython', fatal=True) import dns.resolver def render_template(template_name, context, template_dir=TEMPLATES_DIR): templates = jinja2.Environment( - loader=jinja2.FileSystemLoader(template_dir) - ) + loader=jinja2.FileSystemLoader(template_dir) + ) template = templates.get_template(template_name) return template.render(context) -CLOUD_ARCHIVE = \ -""" # Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" - - -def configure_source(): - source = str(config_get('source')) - if not source: - return - if source.startswith('ppa:'): - cmd = [ - 'add-apt-repository', - source - ] - subprocess.check_call(cmd) - if source.startswith('cloud:'): - install('ubuntu-cloud-keyring') - pocket = source.split(':')[1] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(pocket)) - if source.startswith('http:'): - with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: - apt.write("deb " + source + "\n") - key = config_get('key') - if key: - cmd = [ - 'apt-key', - 'adv', '--keyserver keyserver.ubuntu.com', - '--recv-keys', key - ] - subprocess.check_call(cmd) - cmd = [ - 'apt-get', - 'update' - ] - subprocess.check_call(cmd) - - def enable_pocket(pocket): apt_sources = "/etc/apt/sources.list" with open(apt_sources, "r") as sources: @@ -109,103 +47,6 @@ def enable_pocket(pocket): else: sources.write(line) -# Protocols -TCP = 'TCP' -UDP = 'UDP' - - -def expose(port, protocol='TCP'): - cmd = [ - 'open-port', - '{}/{}'.format(port, protocol) - ] - subprocess.check_call(cmd) - - -def juju_log(severity, message): - cmd = [ - 'juju-log', - '--log-level', severity, - message - ] - subprocess.check_call(cmd) - - -def relation_ids(relation): - cmd = [ - 'relation-ids', - relation - ] - return subprocess.check_output(cmd).split() # IGNORE:E1103 - - -def relation_list(rid): - cmd = [ - 'relation-list', - '-r', rid, - ] - return subprocess.check_output(cmd).split() # IGNORE:E1103 - - -def relation_get(attribute, unit=None, rid=None): - cmd = [ - 'relation-get', - ] - if rid: - cmd.append('-r') - cmd.append(rid) - cmd.append(attribute) - if unit: - cmd.append(unit) - value = str(subprocess.check_output(cmd)).strip() - if value == "": - return None - else: - return value - - -def relation_set(**kwargs): - cmd = [ - 'relation-set' - ] - args = [] - for k, v in kwargs.items(): - if k == 'rid' and v: - cmd.append('-r') - cmd.append(v) - elif k != 'rid': - args.append('{}={}'.format(k, v)) - cmd += args - subprocess.check_call(cmd) - - -def unit_get(attribute): - cmd = [ - 'unit-get', - attribute - ] - value = str(subprocess.check_output(cmd)).strip() - if value == "": - return None - else: - return value - - -def config_get(attribute): - cmd = [ - 'config-get', - attribute - ] - value = str(subprocess.check_output(cmd)).strip() - if value == "": - return None - else: - return value - - -def get_unit_hostname(): - return socket.gethostname() - def get_host_ip(hostname=unit_get('private-address')): try: diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 1e4a9f8e..f4ca7a1a 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -7,6 +7,8 @@ description: | . This charm provides the RADOS HTTP gateway supporting S3 and Swift protocols for object storage. +categories: + - misc requires: mon: interface: ceph-radosgw From 238c42f5e32384668cbc72a12d396e91f19afa18 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 24 Jan 2014 16:04:49 +0000 Subject: [PATCH 0300/2699] Fixup imports --- ceph-radosgw/hooks/ceph.py | 2 +- ceph-radosgw/hooks/hooks.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index 71f569d4..d75ce956 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -13,7 +13,7 @@ import os import apt_pkg as apt -import socket.gethostname as get_unit_hostname +from socket import gethostname as get_unit_hostname LEADER = 'leader' PEON = 'peon' diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 0ff90fd0..de94a940 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -37,7 +37,7 @@ ) from charmhelpers.payload.execd import execd_preinstall -import socket.gethostname as get_unit_hostname +from socket import gethostname as get_unit_hostname hooks = Hooks() From fb5f8481d5c4829e25854cf6fec2ab199b4484b2 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 24 Jan 2014 16:21:42 +0000 Subject: [PATCH 0301/2699] Add apache helper, fixup site handling --- ceph-radosgw/hooks/hooks.py | 10 +++++++--- ceph-radosgw/hooks/utils.py | 8 ++++++++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index de94a940..7a8f8c08 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -33,7 +33,8 @@ from utils import ( render_template, get_host_ip, - enable_pocket + enable_pocket, + is_apache_24 ) from charmhelpers.payload.execd import execd_preinstall @@ -91,12 +92,15 @@ def emit_apacheconf(): apachecontext = { "hostname": unit_get('private-address') } - with open('/etc/apache2/sites-available/rgw.conf', 'w') as apacheconf: + site_conf = '/etc/apache2/sites-available/rgw' + if is_apache_24(): + site_conf = '/etc/apache2/sites-available/rgw.conf' + with open(site_conf, 'w') as apacheconf: apacheconf.write(render_template('rgw', apachecontext)) def apache_sites(): - if os.path.exists('/etc/apache2/sites-available/000-default.conf'): + if is_apache_24(): subprocess.check_call(['a2dissite', '000-default']) else: subprocess.check_call(['a2dissite', 'default']) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 2a9a9c71..b8e16623 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -9,6 +9,7 @@ import socket import re +import os from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install @@ -59,3 +60,10 @@ def get_host_ip(hostname=unit_get('private-address')): answers = dns.resolver.query(hostname, 'A') if answers: return answers[0].address + + +def is_apache_24(): + if os.path.exists('/etc/apache2/conf-available'): + return True + else: + return False From ad8dd1ff6055e3ce6ee89efa0a31a0b6f0e625b3 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 24 Jan 2014 17:20:33 +0000 Subject: [PATCH 0302/2699] Add pydev stuff --- ceph-radosgw/.project | 17 +++++++++++++++++ ceph-radosgw/.pydevproject | 8 ++++++++ 2 files changed, 25 insertions(+) create mode 100644 ceph-radosgw/.project create mode 100644 ceph-radosgw/.pydevproject diff --git a/ceph-radosgw/.project b/ceph-radosgw/.project new file mode 100644 index 00000000..51d6166f --- /dev/null +++ b/ceph-radosgw/.project @@ -0,0 +1,17 @@ + + + ceph-radosgw + + + + + + org.python.pydev.PyDevBuilder + + + + + + org.python.pydev.pythonNature + + diff --git a/ceph-radosgw/.pydevproject b/ceph-radosgw/.pydevproject new file mode 100644 index 00000000..98cc65d3 --- /dev/null +++ b/ceph-radosgw/.pydevproject @@ -0,0 +1,8 @@ + + +python 2.7 +Default + +/ceph-radosgw/hooks + + From 086b5b80dc9ad1ce4ff99cf3fbf918110f769f28 Mon Sep 17 00:00:00 2001 From: "Jorge O. Castro" Date: Mon, 27 Jan 2014 16:34:35 -0500 Subject: [PATCH 0303/2699] Revised readme for audit. --- ceph-proxy/README.md | 43 +++++++++++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/ceph-proxy/README.md b/ceph-proxy/README.md index 40fe82c4..c8c7af76 100644 --- a/ceph-proxy/README.md +++ b/ceph-proxy/README.md @@ -1,16 +1,15 @@ -Overview -======== +# Overview Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. This charm deploys a Ceph cluster. +juju -Usage -===== +# Usage The ceph charm has two pieces of mandatory configuration for which no defaults -are provided: +are provided. You _must_ set these configuration options before deployment or the charm will not work: fsid: uuid specific to a ceph cluster used to ensure that different @@ -60,16 +59,29 @@ By default the ceph cluster will not bootstrap until 3 service units have been deployed and started; this is to ensure that a quorum is achieved prior to adding storage devices. -Contact Information -=================== +## Scale Out Usage -Author: Paul Collins , - James Page -Report bugs at: http://bugs.launchpad.net/charms/+source/ceph/+filebug -Location: http://jujucharms.com/charms/ceph +You can use the Ceph OSD and Ceph Radosgw charms: -Technical Bootnotes -=================== +- [Ceph OSD](https://jujucharms.com/precise/ceph-osd) +- [Ceph Rados Gateway](https://jujucharms.com/precise/ceph-radosgw) + +# Contact Information + +## Authors + +- Paul Collins , +- James Page + +Report bugs on [Launchpad](http://bugs.launchpad.net/charms/+source/ceph/+filebug) + +## Ceph + +- [Ceph website](http://ceph.com) +- [Ceph mailing lists](http://ceph.com/resources/mailing-list-irc/) +- [Ceph bug tracker](http://tracker.ceph.com/projects/ceph) + +# Technical Footnotes This charm uses the new-style Ceph deployment as reverse-engineered from the Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected @@ -82,12 +94,11 @@ cluster a quorum forms quickly, and OSD bringup proceeds. The osds use so-called "OSD hotplugging". **ceph-disk-prepare** is used to create the filesystems with a special GPT partition type. *udev* is set up to mount such filesystems and start the osd daemons as their storage becomes -visible to the system (or after "udevadm trigger"). +visible to the system (or after `udevadm trigger`). The Chef cookbook mentioned above performs some extra steps to generate an OSD bootstrapping key and propagate it to the other nodes in the cluster. Since all OSDs run on nodes that also run mon, we don't need this and did not implement it. -See http://ceph.com/docs/master/dev/mon-bootstrap/ for more information on Ceph -monitor cluster deployment strategies and pitfalls. +See [the documentation](http://ceph.com/docs/master/dev/mon-bootstrap/) for more information on Ceph monitor cluster deployment strategies and pitfalls. From c9e814d31d49295d0d56d8549b7dc1f0874a97ef Mon Sep 17 00:00:00 2001 From: "Jorge O. Castro" Date: Mon, 27 Jan 2014 16:34:35 -0500 Subject: [PATCH 0304/2699] Revised readme for audit. --- ceph-mon/README.md | 43 +++++++++++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 40fe82c4..c8c7af76 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -1,16 +1,15 @@ -Overview -======== +# Overview Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. This charm deploys a Ceph cluster. +juju -Usage -===== +# Usage The ceph charm has two pieces of mandatory configuration for which no defaults -are provided: +are provided. You _must_ set these configuration options before deployment or the charm will not work: fsid: uuid specific to a ceph cluster used to ensure that different @@ -60,16 +59,29 @@ By default the ceph cluster will not bootstrap until 3 service units have been deployed and started; this is to ensure that a quorum is achieved prior to adding storage devices. -Contact Information -=================== +## Scale Out Usage -Author: Paul Collins , - James Page -Report bugs at: http://bugs.launchpad.net/charms/+source/ceph/+filebug -Location: http://jujucharms.com/charms/ceph +You can use the Ceph OSD and Ceph Radosgw charms: -Technical Bootnotes -=================== +- [Ceph OSD](https://jujucharms.com/precise/ceph-osd) +- [Ceph Rados Gateway](https://jujucharms.com/precise/ceph-radosgw) + +# Contact Information + +## Authors + +- Paul Collins , +- James Page + +Report bugs on [Launchpad](http://bugs.launchpad.net/charms/+source/ceph/+filebug) + +## Ceph + +- [Ceph website](http://ceph.com) +- [Ceph mailing lists](http://ceph.com/resources/mailing-list-irc/) +- [Ceph bug tracker](http://tracker.ceph.com/projects/ceph) + +# Technical Footnotes This charm uses the new-style Ceph deployment as reverse-engineered from the Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected @@ -82,12 +94,11 @@ cluster a quorum forms quickly, and OSD bringup proceeds. The osds use so-called "OSD hotplugging". **ceph-disk-prepare** is used to create the filesystems with a special GPT partition type. *udev* is set up to mount such filesystems and start the osd daemons as their storage becomes -visible to the system (or after "udevadm trigger"). +visible to the system (or after `udevadm trigger`). The Chef cookbook mentioned above performs some extra steps to generate an OSD bootstrapping key and propagate it to the other nodes in the cluster. Since all OSDs run on nodes that also run mon, we don't need this and did not implement it. -See http://ceph.com/docs/master/dev/mon-bootstrap/ for more information on Ceph -monitor cluster deployment strategies and pitfalls. +See [the documentation](http://ceph.com/docs/master/dev/mon-bootstrap/) for more information on Ceph monitor cluster deployment strategies and pitfalls. From 083e2882f69f7a8a329c052256be15c07e94c8d1 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 19 Mar 2014 09:45:14 +0000 Subject: [PATCH 0305/2699] Fixup execution of correct function for identity-changed --- ceph-radosgw/hooks/hooks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 7a8f8c08..3f125f0d 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -205,8 +205,7 @@ def restart(): open_port(port=80) -@hooks.hook('identity-service-relation-joined', - 'identity-service-relation-changed') +@hooks.hook('identity-service-relation-joined') def identity_joined(relid=None): if ceph.get_ceph_version('radosgw') < "0.55": log('Integration with keystone requires ceph >= 0.55') @@ -223,6 +222,7 @@ def identity_joined(relid=None): rid=relid) +@hooks.hook('identity-service-relation-changed') def identity_changed(): emit_cephconf() restart() From 671b0c6ce0d17a477725d0a8744eec836afeb96d Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 20 Mar 2014 13:38:46 +0000 Subject: [PATCH 0306/2699] Resynced helpers --- .../charmhelpers/contrib/storage/linux/utils.py | 3 ++- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 6 ++++++ ceph-proxy/hooks/charmhelpers/core/host.py | 12 +++++++++--- ceph-proxy/hooks/charmhelpers/fetch/__init__.py | 10 ++++++++-- ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py | 15 +++++++++++++++ 5 files changed, 40 insertions(+), 6 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index 5b9b6d47..5349c3ea 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -22,4 +22,5 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' - check_call(['sgdisk', '--zap-all', block_device]) + check_call(['sgdisk', '--zap-all', '--clear', + '--mbrtogpt', block_device]) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index bb196dfa..505c202d 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -8,6 +8,7 @@ import json import yaml import subprocess +import sys import UserDict from subprocess import CalledProcessError @@ -149,6 +150,11 @@ def service_name(): return local_unit().split('/')[0] +def hook_name(): + """The name of the currently executing hook""" + return os.path.basename(sys.argv[0]) + + @cached def config(scope=None): """Juju charm configuration""" diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index c8c81b28..cfd26847 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -194,7 +194,7 @@ def file_hash(path): return None -def restart_on_change(restart_map): +def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing This function is used a decorator, for example @@ -219,8 +219,14 @@ def wrapped_f(*args): for path in restart_map: if checksums[path] != file_hash(path): restarts += restart_map[path] - for service_name in list(OrderedDict.fromkeys(restarts)): - service('restart', service_name) + services_list = list(OrderedDict.fromkeys(restarts)) + if not stopstart: + for service_name in services_list: + service('restart', service_name) + else: + for action in ['stop', 'start']: + for service_name in services_list: + service(action, service_name) return wrapped_f return wrap diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 1f4f6315..c05e0335 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -135,8 +135,12 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + if source is None: + log('Source is not present. Skipping') + return + if (source.startswith('ppa:') or - source.startswith('http:') or + source.startswith('http') or source.startswith('deb ') or source.startswith('cloud-archive:')): subprocess.check_call(['add-apt-repository', '--yes', source]) @@ -156,7 +160,9 @@ def add_source(source, key=None): with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) if key: - subprocess.check_call(['apt-key', 'import', key]) + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'keyserver.ubuntu.com', '--recv', + key]) class SourceConfigError(Exception): diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py index e35b8f15..87e7071a 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -1,5 +1,7 @@ import os import urllib2 +import urlparse + from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource @@ -24,6 +26,19 @@ def can_handle(self, source): def download(self, source, dest): # propogate all exceptions # URLError, OSError, etc + proto, netloc, path, params, query, fragment = urlparse.urlparse(source) + if proto in ('http', 'https'): + auth, barehost = urllib2.splituser(netloc) + if auth is not None: + source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) + username, password = urllib2.splitpasswd(auth) + passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + # Realm is set to None in add_password to force the username and password + # to be used whatever the realm + passman.add_password(None, source, username, password) + authhandler = urllib2.HTTPBasicAuthHandler(passman) + opener = urllib2.build_opener(authhandler) + urllib2.install_opener(opener) response = urllib2.urlopen(source) try: with open(dest, 'w') as dest_file: From b9218446b88d2398f1952e007d2aa8e8675506ed Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 20 Mar 2014 13:38:46 +0000 Subject: [PATCH 0307/2699] Resynced helpers --- .../charmhelpers/contrib/storage/linux/utils.py | 3 ++- ceph-mon/hooks/charmhelpers/core/hookenv.py | 6 ++++++ ceph-mon/hooks/charmhelpers/core/host.py | 12 +++++++++--- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 10 ++++++++-- ceph-mon/hooks/charmhelpers/fetch/archiveurl.py | 15 +++++++++++++++ 5 files changed, 40 insertions(+), 6 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index 5b9b6d47..5349c3ea 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -22,4 +22,5 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' - check_call(['sgdisk', '--zap-all', block_device]) + check_call(['sgdisk', '--zap-all', '--clear', + '--mbrtogpt', block_device]) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index bb196dfa..505c202d 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -8,6 +8,7 @@ import json import yaml import subprocess +import sys import UserDict from subprocess import CalledProcessError @@ -149,6 +150,11 @@ def service_name(): return local_unit().split('/')[0] +def hook_name(): + """The name of the currently executing hook""" + return os.path.basename(sys.argv[0]) + + @cached def config(scope=None): """Juju charm configuration""" diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index c8c81b28..cfd26847 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -194,7 +194,7 @@ def file_hash(path): return None -def restart_on_change(restart_map): +def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing This function is used a decorator, for example @@ -219,8 +219,14 @@ def wrapped_f(*args): for path in restart_map: if checksums[path] != file_hash(path): restarts += restart_map[path] - for service_name in list(OrderedDict.fromkeys(restarts)): - service('restart', service_name) + services_list = list(OrderedDict.fromkeys(restarts)) + if not stopstart: + for service_name in services_list: + service('restart', service_name) + else: + for action in ['stop', 'start']: + for service_name in services_list: + service(action, service_name) return wrapped_f return wrap diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 1f4f6315..c05e0335 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -135,8 +135,12 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + if source is None: + log('Source is not present. Skipping') + return + if (source.startswith('ppa:') or - source.startswith('http:') or + source.startswith('http') or source.startswith('deb ') or source.startswith('cloud-archive:')): subprocess.check_call(['add-apt-repository', '--yes', source]) @@ -156,7 +160,9 @@ def add_source(source, key=None): with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) if key: - subprocess.check_call(['apt-key', 'import', key]) + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'keyserver.ubuntu.com', '--recv', + key]) class SourceConfigError(Exception): diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index e35b8f15..87e7071a 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -1,5 +1,7 @@ import os import urllib2 +import urlparse + from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource @@ -24,6 +26,19 @@ def can_handle(self, source): def download(self, source, dest): # propogate all exceptions # URLError, OSError, etc + proto, netloc, path, params, query, fragment = urlparse.urlparse(source) + if proto in ('http', 'https'): + auth, barehost = urllib2.splituser(netloc) + if auth is not None: + source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) + username, password = urllib2.splitpasswd(auth) + passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + # Realm is set to None in add_password to force the username and password + # to be used whatever the realm + passman.add_password(None, source, username, password) + authhandler = urllib2.HTTPBasicAuthHandler(passman) + opener = urllib2.build_opener(authhandler) + urllib2.install_opener(opener) response = urllib2.urlopen(source) try: with open(dest, 'w') as dest_file: From a6027192cd1b825d89454172826a0d95e0bf3296 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 20 Mar 2014 13:41:32 +0000 Subject: [PATCH 0308/2699] Resynced helpers --- ceph-proxy/charm-helpers-sync.yaml | 2 +- .../hooks/charmhelpers/fetch/__init__.py | 23 +++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml index 0963bbcd..0839dc50 100644 --- a/ceph-proxy/charm-helpers-sync.yaml +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~openstack-charmers/charm-helpers/icehouse destination: hooks/charmhelpers include: - core diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index c05e0335..97a19912 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -97,6 +97,29 @@ def apt_install(packages, options=None, fatal=False): subprocess.call(cmd, env=env) +def apt_upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + + env = os.environ.copy() + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + subprocess.check_call(cmd, env=env) + else: + subprocess.call(cmd, env=env) + + def apt_update(fatal=False): """Update local apt cache""" cmd = ['apt-get', 'update'] From a05deb22f79dacd35913ed0f880900282d47ffa7 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 20 Mar 2014 13:41:32 +0000 Subject: [PATCH 0309/2699] Resynced helpers --- ceph-mon/charm-helpers-sync.yaml | 2 +- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 23 +++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml index 0963bbcd..0839dc50 100644 --- a/ceph-mon/charm-helpers-sync.yaml +++ b/ceph-mon/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~openstack-charmers/charm-helpers/icehouse destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index c05e0335..97a19912 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -97,6 +97,29 @@ def apt_install(packages, options=None, fatal=False): subprocess.call(cmd, env=env) +def apt_upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + + env = os.environ.copy() + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + subprocess.check_call(cmd, env=env) + else: + subprocess.call(cmd, env=env) + + def apt_update(fatal=False): """Update local apt cache""" cmd = ['apt-get', 'update'] From a60cf60b5c31e6cf9eb26dfebad9ddf1295215b9 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 20 Mar 2014 13:41:40 +0000 Subject: [PATCH 0310/2699] Resynced helpers --- ceph-osd/charm-helpers-sync.yaml | 2 +- .../contrib/storage/linux/utils.py | 3 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 6 ++++ ceph-osd/hooks/charmhelpers/core/host.py | 12 +++++-- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 33 +++++++++++++++++-- .../hooks/charmhelpers/fetch/archiveurl.py | 15 +++++++++ 6 files changed, 64 insertions(+), 7 deletions(-) diff --git a/ceph-osd/charm-helpers-sync.yaml b/ceph-osd/charm-helpers-sync.yaml index c8ee8f59..bd0d84f4 100644 --- a/ceph-osd/charm-helpers-sync.yaml +++ b/ceph-osd/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~openstack-charmers/charm-helpers/icehouse destination: hooks/charmhelpers include: - core diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index 5b9b6d47..5349c3ea 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -22,4 +22,5 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' - check_call(['sgdisk', '--zap-all', block_device]) + check_call(['sgdisk', '--zap-all', '--clear', + '--mbrtogpt', block_device]) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index bb196dfa..505c202d 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -8,6 +8,7 @@ import json import yaml import subprocess +import sys import UserDict from subprocess import CalledProcessError @@ -149,6 +150,11 @@ def service_name(): return local_unit().split('/')[0] +def hook_name(): + """The name of the currently executing hook""" + return os.path.basename(sys.argv[0]) + + @cached def config(scope=None): """Juju charm configuration""" diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index c8c81b28..cfd26847 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -194,7 +194,7 @@ def file_hash(path): return None -def restart_on_change(restart_map): +def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing This function is used a decorator, for example @@ -219,8 +219,14 @@ def wrapped_f(*args): for path in restart_map: if checksums[path] != file_hash(path): restarts += restart_map[path] - for service_name in list(OrderedDict.fromkeys(restarts)): - service('restart', service_name) + services_list = list(OrderedDict.fromkeys(restarts)) + if not stopstart: + for service_name in services_list: + service('restart', service_name) + else: + for action in ['stop', 'start']: + for service_name in services_list: + service(action, service_name) return wrapped_f return wrap diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 1f4f6315..97a19912 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -97,6 +97,29 @@ def apt_install(packages, options=None, fatal=False): subprocess.call(cmd, env=env) +def apt_upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + + env = os.environ.copy() + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + subprocess.check_call(cmd, env=env) + else: + subprocess.call(cmd, env=env) + + def apt_update(fatal=False): """Update local apt cache""" cmd = ['apt-get', 'update'] @@ -135,8 +158,12 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + if source is None: + log('Source is not present. Skipping') + return + if (source.startswith('ppa:') or - source.startswith('http:') or + source.startswith('http') or source.startswith('deb ') or source.startswith('cloud-archive:')): subprocess.check_call(['add-apt-repository', '--yes', source]) @@ -156,7 +183,9 @@ def add_source(source, key=None): with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) if key: - subprocess.check_call(['apt-key', 'import', key]) + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'keyserver.ubuntu.com', '--recv', + key]) class SourceConfigError(Exception): diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index e35b8f15..87e7071a 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -1,5 +1,7 @@ import os import urllib2 +import urlparse + from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource @@ -24,6 +26,19 @@ def can_handle(self, source): def download(self, source, dest): # propogate all exceptions # URLError, OSError, etc + proto, netloc, path, params, query, fragment = urlparse.urlparse(source) + if proto in ('http', 'https'): + auth, barehost = urllib2.splituser(netloc) + if auth is not None: + source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) + username, password = urllib2.splitpasswd(auth) + passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + # Realm is set to None in add_password to force the username and password + # to be used whatever the realm + passman.add_password(None, source, username, password) + authhandler = urllib2.HTTPBasicAuthHandler(passman) + opener = urllib2.build_opener(authhandler) + urllib2.install_opener(opener) response = urllib2.urlopen(source) try: with open(dest, 'w') as dest_file: From 3fe9afdc390f6e1affb28d6831e3907ebe2cd000 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 25 Mar 2014 12:26:40 +0000 Subject: [PATCH 0311/2699] [hopem] synced charm-helpers --- .../contrib/storage/linux/utils.py | 3 +- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 6 ++ ceph-proxy/hooks/charmhelpers/core/host.py | 56 ++++++++++++++++++- .../hooks/charmhelpers/fetch/__init__.py | 43 +++++++++++++- .../hooks/charmhelpers/fetch/archiveurl.py | 15 +++++ 5 files changed, 116 insertions(+), 7 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index 5b9b6d47..5349c3ea 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -22,4 +22,5 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' - check_call(['sgdisk', '--zap-all', block_device]) + check_call(['sgdisk', '--zap-all', '--clear', + '--mbrtogpt', block_device]) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index bb196dfa..505c202d 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -8,6 +8,7 @@ import json import yaml import subprocess +import sys import UserDict from subprocess import CalledProcessError @@ -149,6 +150,11 @@ def service_name(): return local_unit().split('/')[0] +def hook_name(): + """The name of the currently executing hook""" + return os.path.basename(sys.argv[0]) + + @cached def config(scope=None): """Juju charm configuration""" diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 4a6a4a8c..cfd26847 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -194,7 +194,7 @@ def file_hash(path): return None -def restart_on_change(restart_map): +def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing This function is used a decorator, for example @@ -219,8 +219,14 @@ def wrapped_f(*args): for path in restart_map: if checksums[path] != file_hash(path): restarts += restart_map[path] - for service_name in list(OrderedDict.fromkeys(restarts)): - service('restart', service_name) + services_list = list(OrderedDict.fromkeys(restarts)) + if not stopstart: + for service_name in services_list: + service('restart', service_name) + else: + for action in ['stop', 'start']: + for service_name in services_list: + service(action, service_name) return wrapped_f return wrap @@ -245,3 +251,47 @@ def pwgen(length=None): random_chars = [ random.choice(alphanumeric_chars) for _ in range(length)] return(''.join(random_chars)) + + +def list_nics(nic_type): + '''Return a list of nics of given type(s)''' + if isinstance(nic_type, basestring): + int_types = [nic_type] + else: + int_types = nic_type + interfaces = [] + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + interfaces.append(line.split()[1].replace(":", "")) + return interfaces + + +def set_nic_mtu(nic, mtu): + '''Set MTU on a network interface''' + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd) + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index f83e7b7d..97a19912 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -44,8 +44,16 @@ 'precise-havana/updates': 'precise-updates/havana', 'precise-updates/havana': 'precise-updates/havana', 'havana/proposed': 'precise-proposed/havana', - 'precies-havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', } @@ -89,6 +97,29 @@ def apt_install(packages, options=None, fatal=False): subprocess.call(cmd, env=env) +def apt_upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + + env = os.environ.copy() + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + subprocess.check_call(cmd, env=env) + else: + subprocess.call(cmd, env=env) + + def apt_update(fatal=False): """Update local apt cache""" cmd = ['apt-get', 'update'] @@ -127,8 +158,12 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + if source is None: + log('Source is not present. Skipping') + return + if (source.startswith('ppa:') or - source.startswith('http:') or + source.startswith('http') or source.startswith('deb ') or source.startswith('cloud-archive:')): subprocess.check_call(['add-apt-repository', '--yes', source]) @@ -148,7 +183,9 @@ def add_source(source, key=None): with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) if key: - subprocess.check_call(['apt-key', 'import', key]) + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'keyserver.ubuntu.com', '--recv', + key]) class SourceConfigError(Exception): diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py index e35b8f15..87e7071a 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -1,5 +1,7 @@ import os import urllib2 +import urlparse + from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource @@ -24,6 +26,19 @@ def can_handle(self, source): def download(self, source, dest): # propogate all exceptions # URLError, OSError, etc + proto, netloc, path, params, query, fragment = urlparse.urlparse(source) + if proto in ('http', 'https'): + auth, barehost = urllib2.splituser(netloc) + if auth is not None: + source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) + username, password = urllib2.splitpasswd(auth) + passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + # Realm is set to None in add_password to force the username and password + # to be used whatever the realm + passman.add_password(None, source, username, password) + authhandler = urllib2.HTTPBasicAuthHandler(passman) + opener = urllib2.build_opener(authhandler) + urllib2.install_opener(opener) response = urllib2.urlopen(source) try: with open(dest, 'w') as dest_file: From e6d497962a770c911748951145cb9d909f5f49da Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 25 Mar 2014 12:26:40 +0000 Subject: [PATCH 0312/2699] [hopem] synced charm-helpers --- .../contrib/storage/linux/utils.py | 3 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 6 ++ ceph-mon/hooks/charmhelpers/core/host.py | 56 ++++++++++++++++++- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 43 +++++++++++++- .../hooks/charmhelpers/fetch/archiveurl.py | 15 +++++ 5 files changed, 116 insertions(+), 7 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index 5b9b6d47..5349c3ea 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -22,4 +22,5 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' - check_call(['sgdisk', '--zap-all', block_device]) + check_call(['sgdisk', '--zap-all', '--clear', + '--mbrtogpt', block_device]) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index bb196dfa..505c202d 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -8,6 +8,7 @@ import json import yaml import subprocess +import sys import UserDict from subprocess import CalledProcessError @@ -149,6 +150,11 @@ def service_name(): return local_unit().split('/')[0] +def hook_name(): + """The name of the currently executing hook""" + return os.path.basename(sys.argv[0]) + + @cached def config(scope=None): """Juju charm configuration""" diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 4a6a4a8c..cfd26847 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -194,7 +194,7 @@ def file_hash(path): return None -def restart_on_change(restart_map): +def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing This function is used a decorator, for example @@ -219,8 +219,14 @@ def wrapped_f(*args): for path in restart_map: if checksums[path] != file_hash(path): restarts += restart_map[path] - for service_name in list(OrderedDict.fromkeys(restarts)): - service('restart', service_name) + services_list = list(OrderedDict.fromkeys(restarts)) + if not stopstart: + for service_name in services_list: + service('restart', service_name) + else: + for action in ['stop', 'start']: + for service_name in services_list: + service(action, service_name) return wrapped_f return wrap @@ -245,3 +251,47 @@ def pwgen(length=None): random_chars = [ random.choice(alphanumeric_chars) for _ in range(length)] return(''.join(random_chars)) + + +def list_nics(nic_type): + '''Return a list of nics of given type(s)''' + if isinstance(nic_type, basestring): + int_types = [nic_type] + else: + int_types = nic_type + interfaces = [] + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + interfaces.append(line.split()[1].replace(":", "")) + return interfaces + + +def set_nic_mtu(nic, mtu): + '''Set MTU on a network interface''' + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd) + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index f83e7b7d..97a19912 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -44,8 +44,16 @@ 'precise-havana/updates': 'precise-updates/havana', 'precise-updates/havana': 'precise-updates/havana', 'havana/proposed': 'precise-proposed/havana', - 'precies-havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', } @@ -89,6 +97,29 @@ def apt_install(packages, options=None, fatal=False): subprocess.call(cmd, env=env) +def apt_upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + + env = os.environ.copy() + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + subprocess.check_call(cmd, env=env) + else: + subprocess.call(cmd, env=env) + + def apt_update(fatal=False): """Update local apt cache""" cmd = ['apt-get', 'update'] @@ -127,8 +158,12 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + if source is None: + log('Source is not present. Skipping') + return + if (source.startswith('ppa:') or - source.startswith('http:') or + source.startswith('http') or source.startswith('deb ') or source.startswith('cloud-archive:')): subprocess.check_call(['add-apt-repository', '--yes', source]) @@ -148,7 +183,9 @@ def add_source(source, key=None): with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) if key: - subprocess.check_call(['apt-key', 'import', key]) + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'keyserver.ubuntu.com', '--recv', + key]) class SourceConfigError(Exception): diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index e35b8f15..87e7071a 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -1,5 +1,7 @@ import os import urllib2 +import urlparse + from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource @@ -24,6 +26,19 @@ def can_handle(self, source): def download(self, source, dest): # propogate all exceptions # URLError, OSError, etc + proto, netloc, path, params, query, fragment = urlparse.urlparse(source) + if proto in ('http', 'https'): + auth, barehost = urllib2.splituser(netloc) + if auth is not None: + source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) + username, password = urllib2.splitpasswd(auth) + passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + # Realm is set to None in add_password to force the username and password + # to be used whatever the realm + passman.add_password(None, source, username, password) + authhandler = urllib2.HTTPBasicAuthHandler(passman) + opener = urllib2.build_opener(authhandler) + urllib2.install_opener(opener) response = urllib2.urlopen(source) try: with open(dest, 'w') as dest_file: From 035ae4989afd1f162fa7e62248bd7af308d9c5c0 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 25 Mar 2014 12:26:40 +0000 Subject: [PATCH 0313/2699] [hopem] synced charm-helpers --- .../contrib/storage/linux/utils.py | 3 +- .../hooks/charmhelpers/core/hookenv.py | 6 ++++ ceph-radosgw/hooks/charmhelpers/core/host.py | 12 +++++-- .../hooks/charmhelpers/fetch/__init__.py | 33 +++++++++++++++++-- .../hooks/charmhelpers/fetch/archiveurl.py | 15 +++++++++ 5 files changed, 63 insertions(+), 6 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py index c40218f0..5349c3ea 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -22,4 +22,5 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' - check_call(['sgdisk', '--zap-all', '--mbrtogpt', block_device]) + check_call(['sgdisk', '--zap-all', '--clear', + '--mbrtogpt', block_device]) diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index bb196dfa..505c202d 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -8,6 +8,7 @@ import json import yaml import subprocess +import sys import UserDict from subprocess import CalledProcessError @@ -149,6 +150,11 @@ def service_name(): return local_unit().split('/')[0] +def hook_name(): + """The name of the currently executing hook""" + return os.path.basename(sys.argv[0]) + + @cached def config(scope=None): """Juju charm configuration""" diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index c8c81b28..cfd26847 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -194,7 +194,7 @@ def file_hash(path): return None -def restart_on_change(restart_map): +def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing This function is used a decorator, for example @@ -219,8 +219,14 @@ def wrapped_f(*args): for path in restart_map: if checksums[path] != file_hash(path): restarts += restart_map[path] - for service_name in list(OrderedDict.fromkeys(restarts)): - service('restart', service_name) + services_list = list(OrderedDict.fromkeys(restarts)) + if not stopstart: + for service_name in services_list: + service('restart', service_name) + else: + for action in ['stop', 'start']: + for service_name in services_list: + service(action, service_name) return wrapped_f return wrap diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 1f4f6315..97a19912 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -97,6 +97,29 @@ def apt_install(packages, options=None, fatal=False): subprocess.call(cmd, env=env) +def apt_upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + + env = os.environ.copy() + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + subprocess.check_call(cmd, env=env) + else: + subprocess.call(cmd, env=env) + + def apt_update(fatal=False): """Update local apt cache""" cmd = ['apt-get', 'update'] @@ -135,8 +158,12 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + if source is None: + log('Source is not present. Skipping') + return + if (source.startswith('ppa:') or - source.startswith('http:') or + source.startswith('http') or source.startswith('deb ') or source.startswith('cloud-archive:')): subprocess.check_call(['add-apt-repository', '--yes', source]) @@ -156,7 +183,9 @@ def add_source(source, key=None): with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) if key: - subprocess.check_call(['apt-key', 'import', key]) + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'keyserver.ubuntu.com', '--recv', + key]) class SourceConfigError(Exception): diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py index e35b8f15..87e7071a 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py @@ -1,5 +1,7 @@ import os import urllib2 +import urlparse + from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource @@ -24,6 +26,19 @@ def can_handle(self, source): def download(self, source, dest): # propogate all exceptions # URLError, OSError, etc + proto, netloc, path, params, query, fragment = urlparse.urlparse(source) + if proto in ('http', 'https'): + auth, barehost = urllib2.splituser(netloc) + if auth is not None: + source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) + username, password = urllib2.splitpasswd(auth) + passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + # Realm is set to None in add_password to force the username and password + # to be used whatever the realm + passman.add_password(None, source, username, password) + authhandler = urllib2.HTTPBasicAuthHandler(passman) + opener = urllib2.build_opener(authhandler) + urllib2.install_opener(opener) response = urllib2.urlopen(source) try: with open(dest, 'w') as dest_file: From 8896e68090e1d502879494ec2bd1b1259f9f0eb1 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 25 Mar 2014 12:26:41 +0000 Subject: [PATCH 0314/2699] [hopem] synced charm-helpers --- .../contrib/storage/linux/utils.py | 3 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 6 ++ ceph-osd/hooks/charmhelpers/core/host.py | 56 ++++++++++++++++++- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 43 +++++++++++++- .../hooks/charmhelpers/fetch/archiveurl.py | 15 +++++ 5 files changed, 116 insertions(+), 7 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index 5b9b6d47..5349c3ea 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -22,4 +22,5 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' - check_call(['sgdisk', '--zap-all', block_device]) + check_call(['sgdisk', '--zap-all', '--clear', + '--mbrtogpt', block_device]) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index bb196dfa..505c202d 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -8,6 +8,7 @@ import json import yaml import subprocess +import sys import UserDict from subprocess import CalledProcessError @@ -149,6 +150,11 @@ def service_name(): return local_unit().split('/')[0] +def hook_name(): + """The name of the currently executing hook""" + return os.path.basename(sys.argv[0]) + + @cached def config(scope=None): """Juju charm configuration""" diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 4a6a4a8c..cfd26847 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -194,7 +194,7 @@ def file_hash(path): return None -def restart_on_change(restart_map): +def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing This function is used a decorator, for example @@ -219,8 +219,14 @@ def wrapped_f(*args): for path in restart_map: if checksums[path] != file_hash(path): restarts += restart_map[path] - for service_name in list(OrderedDict.fromkeys(restarts)): - service('restart', service_name) + services_list = list(OrderedDict.fromkeys(restarts)) + if not stopstart: + for service_name in services_list: + service('restart', service_name) + else: + for action in ['stop', 'start']: + for service_name in services_list: + service(action, service_name) return wrapped_f return wrap @@ -245,3 +251,47 @@ def pwgen(length=None): random_chars = [ random.choice(alphanumeric_chars) for _ in range(length)] return(''.join(random_chars)) + + +def list_nics(nic_type): + '''Return a list of nics of given type(s)''' + if isinstance(nic_type, basestring): + int_types = [nic_type] + else: + int_types = nic_type + interfaces = [] + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + interfaces.append(line.split()[1].replace(":", "")) + return interfaces + + +def set_nic_mtu(nic, mtu): + '''Set MTU on a network interface''' + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd) + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index f83e7b7d..97a19912 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -44,8 +44,16 @@ 'precise-havana/updates': 'precise-updates/havana', 'precise-updates/havana': 'precise-updates/havana', 'havana/proposed': 'precise-proposed/havana', - 'precies-havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', } @@ -89,6 +97,29 @@ def apt_install(packages, options=None, fatal=False): subprocess.call(cmd, env=env) +def apt_upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + + env = os.environ.copy() + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + subprocess.check_call(cmd, env=env) + else: + subprocess.call(cmd, env=env) + + def apt_update(fatal=False): """Update local apt cache""" cmd = ['apt-get', 'update'] @@ -127,8 +158,12 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + if source is None: + log('Source is not present. Skipping') + return + if (source.startswith('ppa:') or - source.startswith('http:') or + source.startswith('http') or source.startswith('deb ') or source.startswith('cloud-archive:')): subprocess.check_call(['add-apt-repository', '--yes', source]) @@ -148,7 +183,9 @@ def add_source(source, key=None): with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) if key: - subprocess.check_call(['apt-key', 'import', key]) + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'keyserver.ubuntu.com', '--recv', + key]) class SourceConfigError(Exception): diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index e35b8f15..87e7071a 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -1,5 +1,7 @@ import os import urllib2 +import urlparse + from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource @@ -24,6 +26,19 @@ def can_handle(self, source): def download(self, source, dest): # propogate all exceptions # URLError, OSError, etc + proto, netloc, path, params, query, fragment = urlparse.urlparse(source) + if proto in ('http', 'https'): + auth, barehost = urllib2.splituser(netloc) + if auth is not None: + source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) + username, password = urllib2.splitpasswd(auth) + passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + # Realm is set to None in add_password to force the username and password + # to be used whatever the realm + passman.add_password(None, source, username, password) + authhandler = urllib2.HTTPBasicAuthHandler(passman) + opener = urllib2.build_opener(authhandler) + urllib2.install_opener(opener) response = urllib2.urlopen(source) try: with open(dest, 'w') as dest_file: From 17021e0d60a3e5a3818f6e7cd0b2667c9b2a9edc Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 25 Mar 2014 17:03:59 +0000 Subject: [PATCH 0315/2699] Resynced helpers --- ceph-proxy/charm-helpers-sync.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml index 0839dc50..0963bbcd 100644 --- a/ceph-proxy/charm-helpers-sync.yaml +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~openstack-charmers/charm-helpers/icehouse +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core From 64b32f50f00084b4843c0c86ebf249155dbc1bf9 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 25 Mar 2014 17:03:59 +0000 Subject: [PATCH 0316/2699] Resynced helpers --- ceph-mon/charm-helpers-sync.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml index 0839dc50..0963bbcd 100644 --- a/ceph-mon/charm-helpers-sync.yaml +++ b/ceph-mon/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~openstack-charmers/charm-helpers/icehouse +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core From 7a31744a89b32be456d8483c99135eeeecef545e Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 25 Mar 2014 17:04:06 +0000 Subject: [PATCH 0317/2699] Resynced helpers --- ceph-osd/charm-helpers-sync.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/charm-helpers-sync.yaml b/ceph-osd/charm-helpers-sync.yaml index bd0d84f4..c8ee8f59 100644 --- a/ceph-osd/charm-helpers-sync.yaml +++ b/ceph-osd/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~openstack-charmers/charm-helpers/icehouse +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core From 42e58652a8f523c961e8ee017e2f9f1d3d9cb5eb Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 25 Mar 2014 18:44:22 +0000 Subject: [PATCH 0318/2699] [hopem] Added use-syslog cfg option to allow logging to syslog --- ceph-proxy/config.yaml | 5 +++++ ceph-proxy/hooks/hooks.py | 3 ++- ceph-proxy/revision | 2 +- ceph-proxy/templates/ceph.conf | 4 ++++ 4 files changed, 12 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 654eb887..6adafb29 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -110,3 +110,8 @@ options: description: | Key ID to import to the apt keyring to support use with arbitary source configuration from outside of Launchpad archives or PPA's. + use-syslog: + type: boolean + default: False + description: | + If set to True, supporting services will log to syslog. diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 82fb3323..11e1cab8 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -72,7 +72,8 @@ def emit_cephconf(): 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': config('fsid'), 'version': ceph.get_ceph_version(), - 'osd_journal_size': config('osd-journal-size') + 'osd_journal_size': config('osd-journal-size'), + 'use_syslog': str(config('use-syslog')).lower() } # Install ceph.conf as an alternative to support # co-existence with other charms that write this file diff --git a/ceph-proxy/revision b/ceph-proxy/revision index b16e5f75..ffda4e73 100644 --- a/ceph-proxy/revision +++ b/ceph-proxy/revision @@ -1 +1 @@ -104 +105 \ No newline at end of file diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index 107beece..d82ca820 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -9,6 +9,10 @@ keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} + log to syslog = {{ use_syslog }} + err to syslog = {{ use_syslog }} + clog to syslog = {{ use_syslog }} + mon cluster log to syslog = {{ use_syslog }} [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring From ec86551e7b6e916f087e7b449641b0b6bacf74f5 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 25 Mar 2014 18:44:22 +0000 Subject: [PATCH 0319/2699] [hopem] Added use-syslog cfg option to allow logging to syslog --- ceph-mon/config.yaml | 5 +++++ ceph-mon/hooks/hooks.py | 3 ++- ceph-mon/revision | 2 +- ceph-mon/templates/ceph.conf | 4 ++++ 4 files changed, 12 insertions(+), 2 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 654eb887..6adafb29 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -110,3 +110,8 @@ options: description: | Key ID to import to the apt keyring to support use with arbitary source configuration from outside of Launchpad archives or PPA's. + use-syslog: + type: boolean + default: False + description: | + If set to True, supporting services will log to syslog. diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 82fb3323..11e1cab8 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -72,7 +72,8 @@ def emit_cephconf(): 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': config('fsid'), 'version': ceph.get_ceph_version(), - 'osd_journal_size': config('osd-journal-size') + 'osd_journal_size': config('osd-journal-size'), + 'use_syslog': str(config('use-syslog')).lower() } # Install ceph.conf as an alternative to support # co-existence with other charms that write this file diff --git a/ceph-mon/revision b/ceph-mon/revision index b16e5f75..ffda4e73 100644 --- a/ceph-mon/revision +++ b/ceph-mon/revision @@ -1 +1 @@ -104 +105 \ No newline at end of file diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 107beece..d82ca820 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -9,6 +9,10 @@ keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} + log to syslog = {{ use_syslog }} + err to syslog = {{ use_syslog }} + clog to syslog = {{ use_syslog }} + mon cluster log to syslog = {{ use_syslog }} [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring From d3532b8907dd5c8d1a373fb5bfee46b0da5bc66a Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 25 Mar 2014 18:44:22 +0000 Subject: [PATCH 0320/2699] [hopem] Added use-syslog cfg option to allow logging to syslog --- ceph-radosgw/config.yaml | 5 +++++ ceph-radosgw/hooks/hooks.py | 3 ++- ceph-radosgw/revision | 2 +- ceph-radosgw/templates/ceph.conf | 3 +++ 4 files changed, 11 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 8f3235f2..0cb65e7e 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -41,3 +41,8 @@ options: default: 600 type: int description: Interval between revocation checks to keystone. + use-syslog: + type: boolean + default: False + description: | + If set to True, supporting services will log to syslog. diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 3f125f0d..9073a252 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -73,7 +73,8 @@ def emit_cephconf(): 'auth_supported': get_auth() or 'none', 'mon_hosts': ' '.join(get_mon_hosts()), 'hostname': get_unit_hostname(), - 'version': ceph.get_ceph_version('radosgw') + 'version': ceph.get_ceph_version('radosgw'), + 'use_syslog': str(config('use-syslog')).lower() } # Check to ensure that correct version of ceph is diff --git a/ceph-radosgw/revision b/ceph-radosgw/revision index 7273c0fa..6f4247a6 100644 --- a/ceph-radosgw/revision +++ b/ceph-radosgw/revision @@ -1 +1 @@ -25 +26 diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index b71c20f4..14f088f7 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -7,6 +7,9 @@ auth client required = {{ auth_supported }} {% endif %} mon host = {{ mon_hosts }} + log to syslog = {{ use_syslog }} + err to syslog = {{ use_syslog }} + clog to syslog = {{ use_syslog }} [client.radosgw.gateway] host = {{ hostname }} From d3fc3e95381d6873cf288fd542c5f546ab96344b Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 25 Mar 2014 18:44:23 +0000 Subject: [PATCH 0321/2699] [hopem] Added use-syslog cfg option to allow logging to syslog --- ceph-osd/config.yaml | 5 +++++ ceph-osd/hooks/hooks.py | 3 ++- ceph-osd/revision | 2 +- ceph-osd/templates/ceph.conf | 3 +++ 4 files changed, 11 insertions(+), 2 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index dfeae00b..43176274 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -78,3 +78,8 @@ options: description: | Key ID to import to the apt keyring to support use with arbitary source configuration from outside of Launchpad archives or PPA's. + use-syslog: + type: boolean + default: False + description: | + If set to True, supporting services will log to syslog. diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 7b536b8d..1340b993 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -71,7 +71,8 @@ def emit_cephconf(): 'mon_hosts': ' '.join(mon_hosts), 'fsid': get_fsid(), 'version': ceph.get_ceph_version(), - 'osd_journal_size': config('osd-journal-size') + 'osd_journal_size': config('osd-journal-size'), + 'use_syslog': str(config('use-syslog')).lower() } # Install ceph.conf as an alternative to support # co-existence with other charms that write this file diff --git a/ceph-osd/revision b/ceph-osd/revision index 8351c193..3f10ffe7 100644 --- a/ceph-osd/revision +++ b/ceph-osd/revision @@ -1 +1 @@ -14 +15 \ No newline at end of file diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 107beece..cd81d440 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -9,6 +9,9 @@ keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} + log to syslog = {{ use_syslog }} + err to syslog = {{ use_syslog }} + clog to syslog = {{ use_syslog }} [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring From 866bbfc447787cd0b533f718f3c6e6a2c30caeee Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 14 Apr 2014 15:39:48 +0100 Subject: [PATCH 0322/2699] Resync helpers for better disk cleaning --- .../charmhelpers/contrib/storage/linux/utils.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index 5349c3ea..eed99ae3 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -2,7 +2,9 @@ from stat import S_ISBLK from subprocess import ( - check_call + check_call, + check_output, + call ) @@ -22,5 +24,12 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' - check_call(['sgdisk', '--zap-all', '--clear', - '--mbrtogpt', block_device]) + # sometimes sgdisk exits non-zero; this is OK, dd will clean up + call(['sgdisk', '--zap-all', '--mbrtogpt', + '--clear', block_device]) + dev_end = check_output(['blockdev', '--getsz', block_device]) + gpt_end = int(dev_end.split()[0]) - 100 + check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), + 'bs=1M', 'count=1']) + check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), + 'bs=512', 'count=100', 'seek=%s'%(gpt_end)]) From 5840a641c6d4a71b65e1a02d177095db192f8263 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 14 Apr 2014 15:39:48 +0100 Subject: [PATCH 0323/2699] Resync helpers for better disk cleaning --- .../charmhelpers/contrib/storage/linux/utils.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index 5349c3ea..eed99ae3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -2,7 +2,9 @@ from stat import S_ISBLK from subprocess import ( - check_call + check_call, + check_output, + call ) @@ -22,5 +24,12 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' - check_call(['sgdisk', '--zap-all', '--clear', - '--mbrtogpt', block_device]) + # sometimes sgdisk exits non-zero; this is OK, dd will clean up + call(['sgdisk', '--zap-all', '--mbrtogpt', + '--clear', block_device]) + dev_end = check_output(['blockdev', '--getsz', block_device]) + gpt_end = int(dev_end.split()[0]) - 100 + check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), + 'bs=1M', 'count=1']) + check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), + 'bs=512', 'count=100', 'seek=%s'%(gpt_end)]) From 6bf6f083046ae01e909c9aa65db591f361bf786f Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 14 Apr 2014 15:40:20 +0100 Subject: [PATCH 0324/2699] Resync helpers for better disk cleaning --- .../charmhelpers/contrib/storage/linux/utils.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index 5349c3ea..eed99ae3 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -2,7 +2,9 @@ from stat import S_ISBLK from subprocess import ( - check_call + check_call, + check_output, + call ) @@ -22,5 +24,12 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' - check_call(['sgdisk', '--zap-all', '--clear', - '--mbrtogpt', block_device]) + # sometimes sgdisk exits non-zero; this is OK, dd will clean up + call(['sgdisk', '--zap-all', '--mbrtogpt', + '--clear', block_device]) + dev_end = check_output(['blockdev', '--getsz', block_device]) + gpt_end = int(dev_end.split()[0]) - 100 + check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), + 'bs=1M', 'count=1']) + check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), + 'bs=512', 'count=100', 'seek=%s'%(gpt_end)]) From c2ee3c57fcc27528ab9f95bb348963750aa094fd Mon Sep 17 00:00:00 2001 From: Jorge Niedbalski R Date: Thu, 24 Apr 2014 13:27:30 -0400 Subject: [PATCH 0325/2699] Applies patch for fixing bug lp:1308557 --- ceph-radosgw/hooks/charmhelpers/fetch/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 97a19912..dce7db4c 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -184,7 +184,7 @@ def add_source(source, key=None): apt.write(PROPOSED_POCKET.format(release)) if key: subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'keyserver.ubuntu.com', '--recv', + 'hkp://keyserver.ubuntu.com:80', '--recv', key]) From 7c423e4424e88653cf7fd607b35cab6ad8328831 Mon Sep 17 00:00:00 2001 From: Jorge Niedbalski R Date: Thu, 24 Apr 2014 13:28:21 -0400 Subject: [PATCH 0326/2699] Applies patch for fixing bug lp:1308557 --- ceph-proxy/hooks/charmhelpers/fetch/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 97a19912..dce7db4c 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -184,7 +184,7 @@ def add_source(source, key=None): apt.write(PROPOSED_POCKET.format(release)) if key: subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'keyserver.ubuntu.com', '--recv', + 'hkp://keyserver.ubuntu.com:80', '--recv', key]) From b39f745dbcfe79fe8543527c677d4d5f0ba43676 Mon Sep 17 00:00:00 2001 From: Jorge Niedbalski R Date: Thu, 24 Apr 2014 13:28:21 -0400 Subject: [PATCH 0327/2699] Applies patch for fixing bug lp:1308557 --- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 97a19912..dce7db4c 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -184,7 +184,7 @@ def add_source(source, key=None): apt.write(PROPOSED_POCKET.format(release)) if key: subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'keyserver.ubuntu.com', '--recv', + 'hkp://keyserver.ubuntu.com:80', '--recv', key]) From 2aee69ef97a5140d579b579a44b1c0e8ad2ddb94 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 2 May 2014 10:53:33 +0100 Subject: [PATCH 0328/2699] Update charmhelpers alongside other ceph charms, add publish target to makefile --- ceph-osd/Makefile | 4 ++++ ceph-osd/hooks/charmhelpers/fetch/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index 9da53536..18a2771c 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -6,3 +6,7 @@ lint: sync: @charm-helper-sync -c charm-helpers-sync.yaml + +publish: + bzr push lp:charms/ceph-osd + bzr push lp:charms/trusty/ceph-osd diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 97a19912..dce7db4c 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -184,7 +184,7 @@ def add_source(source, key=None): apt.write(PROPOSED_POCKET.format(release)) if key: subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'keyserver.ubuntu.com', '--recv', + 'hkp://keyserver.ubuntu.com:80', '--recv', key]) From c3280c379f5d34eda0615a7856ebc3d1111a977f Mon Sep 17 00:00:00 2001 From: Christopher Glass Date: Mon, 19 May 2014 13:37:27 +0200 Subject: [PATCH 0329/2699] Updating charm-helpers from lp:charm-helpers revision 153 --- .../contrib/storage/linux/utils.py | 33 +++- .../hooks/charmhelpers/core/hookenv.py | 99 ++++++++++- ceph-radosgw/hooks/charmhelpers/core/host.py | 14 ++ .../hooks/charmhelpers/fetch/__init__.py | 160 +++++++++++------- 4 files changed, 236 insertions(+), 70 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py index 5349c3ea..b87ef26d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -1,8 +1,11 @@ -from os import stat +import os +import re from stat import S_ISBLK from subprocess import ( - check_call + check_call, + check_output, + call ) @@ -12,7 +15,9 @@ def is_block_device(path): :returns: boolean: True if path is a block device, False if not. ''' - return S_ISBLK(stat(path).st_mode) + if not os.path.exists(path): + return False + return S_ISBLK(os.stat(path).st_mode) def zap_disk(block_device): @@ -22,5 +27,23 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' - check_call(['sgdisk', '--zap-all', '--clear', - '--mbrtogpt', block_device]) + # sometimes sgdisk exits non-zero; this is OK, dd will clean up + call(['sgdisk', '--zap-all', '--mbrtogpt', + '--clear', block_device]) + dev_end = check_output(['blockdev', '--getsz', block_device]) + gpt_end = int(dev_end.split()[0]) - 100 + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), + 'bs=1M', 'count=1']) + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), + 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + +def is_device_mounted(device): + '''Given a device path, return True if that device is mounted, and False + if it isn't. + + :param device: str: Full path of the device to check. + :returns: boolean: True if the path represents a mounted device, False if + it doesn't. + ''' + out = check_output(['mount']) + return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 505c202d..c2e66f66 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -155,6 +155,100 @@ def hook_name(): return os.path.basename(sys.argv[0]) +class Config(dict): + """A Juju charm config dictionary that can write itself to + disk (as json) and track which values have changed since + the previous hook invocation. + + Do not instantiate this object directly - instead call + ``hookenv.config()`` + + Example usage:: + + >>> # inside a hook + >>> from charmhelpers.core import hookenv + >>> config = hookenv.config() + >>> config['foo'] + 'bar' + >>> config['mykey'] = 'myval' + >>> config.save() + + + >>> # user runs `juju set mycharm foo=baz` + >>> # now we're inside subsequent config-changed hook + >>> config = hookenv.config() + >>> config['foo'] + 'baz' + >>> # test to see if this val has changed since last hook + >>> config.changed('foo') + True + >>> # what was the previous value? + >>> config.previous('foo') + 'bar' + >>> # keys/values that we add are preserved across hooks + >>> config['mykey'] + 'myval' + >>> # don't forget to save at the end of hook! + >>> config.save() + + """ + CONFIG_FILE_NAME = '.juju-persistent-config' + + def __init__(self, *args, **kw): + super(Config, self).__init__(*args, **kw) + self._prev_dict = None + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) + if os.path.exists(self.path): + self.load_previous() + + def load_previous(self, path=None): + """Load previous copy of config from disk so that current values + can be compared to previous values. + + :param path: + + File path from which to load the previous config. If `None`, + config is loaded from the default location. If `path` is + specified, subsequent `save()` calls will write to the same + path. + + """ + self.path = path or self.path + with open(self.path) as f: + self._prev_dict = json.load(f) + + def changed(self, key): + """Return true if the value for this key has changed since + the last save. + + """ + if self._prev_dict is None: + return True + return self.previous(key) != self.get(key) + + def previous(self, key): + """Return previous value for this key, or None if there + is no "previous" value. + + """ + if self._prev_dict: + return self._prev_dict.get(key) + return None + + def save(self): + """Save this config to disk. + + Preserves items in _prev_dict that do not exist in self. + + """ + if self._prev_dict: + for k, v in self._prev_dict.iteritems(): + if k not in self: + self[k] = v + with open(self.path, 'w') as f: + json.dump(self, f) + + @cached def config(scope=None): """Juju charm configuration""" @@ -163,7 +257,10 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - return json.loads(subprocess.check_output(config_cmd_line)) + config_data = json.loads(subprocess.check_output(config_cmd_line)) + if scope is not None: + return config_data + return Config(config_data) except ValueError: return None diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index cfd26847..186147f6 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -12,6 +12,7 @@ import string import subprocess import hashlib +import apt_pkg from collections import OrderedDict @@ -295,3 +296,16 @@ def get_nic_hwaddr(nic): if 'link/ether' in words: hwaddr = words[words.index('link/ether') + 1] return hwaddr + + +def cmp_pkgrevno(package, revno, pkgcache=None): + '''Compare supplied revno with the revno of the installed package + 1 => Installed revno is greater than supplied arg + 0 => Installed revno is the same as supplied arg + -1 => Installed revno is less than supplied arg + ''' + if not pkgcache: + apt_pkg.init() + pkgcache = apt_pkg.Cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index dce7db4c..e1e17dae 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,5 @@ import importlib +import time from yaml import safe_load from charmhelpers.core.host import ( lsb_release @@ -15,6 +16,7 @@ import apt_pkg import os + CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ @@ -56,10 +58,62 @@ 'precise-proposed/icehouse': 'precise-proposed/icehouse', } +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', +) + +APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. +APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +class SourceConfigError(Exception): + pass + + +class UnhandledSource(Exception): + pass + + +class AptLockError(Exception): + pass + + +class BaseFetchHandler(object): + + """Base class for FetchHandler implementations in fetch plugins""" + + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + def filter_installed_packages(packages): """Returns a list of packages that require installation""" apt_pkg.init() + + # Tell apt to build an in-memory cache to prevent race conditions (if + # another process is already building the cache). + apt_pkg.config.set("Dir::Cache::pkgcache", "") + cache = apt_pkg.Cache() _pkgs = [] for package in packages: @@ -87,14 +141,7 @@ def apt_install(packages, options=None, fatal=False): cmd.extend(packages) log("Installing {} with options: {}".format(packages, options)) - env = os.environ.copy() - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if fatal: - subprocess.check_call(cmd, env=env) - else: - subprocess.call(cmd, env=env) + _run_apt_command(cmd, fatal) def apt_upgrade(options=None, fatal=False, dist=False): @@ -109,24 +156,13 @@ def apt_upgrade(options=None, fatal=False, dist=False): else: cmd.append('upgrade') log("Upgrading with options: {}".format(options)) - - env = os.environ.copy() - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if fatal: - subprocess.check_call(cmd, env=env) - else: - subprocess.call(cmd, env=env) + _run_apt_command(cmd, fatal) def apt_update(fatal=False): """Update local apt cache""" cmd = ['apt-get', 'update'] - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) + _run_apt_command(cmd, fatal) def apt_purge(packages, fatal=False): @@ -137,10 +173,7 @@ def apt_purge(packages, fatal=False): else: cmd.extend(packages) log("Purging {}".format(packages)) - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) + _run_apt_command(cmd, fatal) def apt_hold(packages, fatal=False): @@ -151,6 +184,7 @@ def apt_hold(packages, fatal=False): else: cmd.extend(packages) log("Holding {}".format(packages)) + if fatal: subprocess.check_call(cmd) else: @@ -188,10 +222,6 @@ def add_source(source, key=None): key]) -class SourceConfigError(Exception): - pass - - def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): @@ -224,17 +254,6 @@ def configure_sources(update=False, if update: apt_update(fatal=True) -# The order of this list is very important. Handlers should be listed in from -# least- to most-specific URL matching. -FETCH_HANDLERS = ( - 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', - 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', -) - - -class UnhandledSource(Exception): - pass - def install_remote(source): """ @@ -265,30 +284,6 @@ def install_from_config(config_var_name): return install_remote(source) -class BaseFetchHandler(object): - - """Base class for FetchHandler implementations in fetch plugins""" - - def can_handle(self, source): - """Returns True if the source can be handled. Otherwise returns - a string explaining why it cannot""" - return "Wrong source type" - - def install(self, source): - """Try to download and unpack the source. Return the path to the - unpacked files or raise UnhandledSource.""" - raise UnhandledSource("Wrong source type {}".format(source)) - - def parse_url(self, url): - return urlparse(url) - - def base_url(self, url): - """Return url without querystring or fragment""" - parts = list(self.parse_url(url)) - parts[4:] = ['' for i in parts[4:]] - return urlunparse(parts) - - def plugins(fetch_handlers=None): if not fetch_handlers: fetch_handlers = FETCH_HANDLERS @@ -306,3 +301,40 @@ def plugins(fetch_handlers=None): log("FetchHandler {} not found, skipping plugin".format( handler_name)) return plugin_list + + +def _run_apt_command(cmd, fatal=False): + """ + Run an APT command, checking output and retrying if the fatal flag is set + to True. + + :param: cmd: str: The apt command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the apt + # lock was not acquired. + + while result is None or result == APT_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError, e: + retry_count = retry_count + 1 + if retry_count > APT_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire DPKG lock. Will retry in {} seconds." + "".format(APT_NO_LOCK_RETRY_DELAY)) + time.sleep(APT_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) From e6d3ac98bafde6f64d86f67fa1a306725563c8e1 Mon Sep 17 00:00:00 2001 From: Christopher Glass Date: Mon, 19 May 2014 13:38:45 +0200 Subject: [PATCH 0330/2699] Updating charm-helpers from lp:charm-helpers revision 153 --- .../contrib/storage/linux/utils.py | 24 ++- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 99 ++++++++++- ceph-proxy/hooks/charmhelpers/core/host.py | 14 ++ .../hooks/charmhelpers/fetch/__init__.py | 160 +++++++++++------- 4 files changed, 227 insertions(+), 70 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index eed99ae3..b87ef26d 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -1,4 +1,5 @@ -from os import stat +import os +import re from stat import S_ISBLK from subprocess import ( @@ -14,7 +15,9 @@ def is_block_device(path): :returns: boolean: True if path is a block device, False if not. ''' - return S_ISBLK(stat(path).st_mode) + if not os.path.exists(path): + return False + return S_ISBLK(os.stat(path).st_mode) def zap_disk(block_device): @@ -29,7 +32,18 @@ def zap_disk(block_device): '--clear', block_device]) dev_end = check_output(['blockdev', '--getsz', block_device]) gpt_end = int(dev_end.split()[0]) - 100 - check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=1M', 'count=1']) - check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), - 'bs=512', 'count=100', 'seek=%s'%(gpt_end)]) + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), + 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + +def is_device_mounted(device): + '''Given a device path, return True if that device is mounted, and False + if it isn't. + + :param device: str: Full path of the device to check. + :returns: boolean: True if the path represents a mounted device, False if + it doesn't. + ''' + out = check_output(['mount']) + return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 505c202d..c2e66f66 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -155,6 +155,100 @@ def hook_name(): return os.path.basename(sys.argv[0]) +class Config(dict): + """A Juju charm config dictionary that can write itself to + disk (as json) and track which values have changed since + the previous hook invocation. + + Do not instantiate this object directly - instead call + ``hookenv.config()`` + + Example usage:: + + >>> # inside a hook + >>> from charmhelpers.core import hookenv + >>> config = hookenv.config() + >>> config['foo'] + 'bar' + >>> config['mykey'] = 'myval' + >>> config.save() + + + >>> # user runs `juju set mycharm foo=baz` + >>> # now we're inside subsequent config-changed hook + >>> config = hookenv.config() + >>> config['foo'] + 'baz' + >>> # test to see if this val has changed since last hook + >>> config.changed('foo') + True + >>> # what was the previous value? + >>> config.previous('foo') + 'bar' + >>> # keys/values that we add are preserved across hooks + >>> config['mykey'] + 'myval' + >>> # don't forget to save at the end of hook! + >>> config.save() + + """ + CONFIG_FILE_NAME = '.juju-persistent-config' + + def __init__(self, *args, **kw): + super(Config, self).__init__(*args, **kw) + self._prev_dict = None + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) + if os.path.exists(self.path): + self.load_previous() + + def load_previous(self, path=None): + """Load previous copy of config from disk so that current values + can be compared to previous values. + + :param path: + + File path from which to load the previous config. If `None`, + config is loaded from the default location. If `path` is + specified, subsequent `save()` calls will write to the same + path. + + """ + self.path = path or self.path + with open(self.path) as f: + self._prev_dict = json.load(f) + + def changed(self, key): + """Return true if the value for this key has changed since + the last save. + + """ + if self._prev_dict is None: + return True + return self.previous(key) != self.get(key) + + def previous(self, key): + """Return previous value for this key, or None if there + is no "previous" value. + + """ + if self._prev_dict: + return self._prev_dict.get(key) + return None + + def save(self): + """Save this config to disk. + + Preserves items in _prev_dict that do not exist in self. + + """ + if self._prev_dict: + for k, v in self._prev_dict.iteritems(): + if k not in self: + self[k] = v + with open(self.path, 'w') as f: + json.dump(self, f) + + @cached def config(scope=None): """Juju charm configuration""" @@ -163,7 +257,10 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - return json.loads(subprocess.check_output(config_cmd_line)) + config_data = json.loads(subprocess.check_output(config_cmd_line)) + if scope is not None: + return config_data + return Config(config_data) except ValueError: return None diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index cfd26847..186147f6 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -12,6 +12,7 @@ import string import subprocess import hashlib +import apt_pkg from collections import OrderedDict @@ -295,3 +296,16 @@ def get_nic_hwaddr(nic): if 'link/ether' in words: hwaddr = words[words.index('link/ether') + 1] return hwaddr + + +def cmp_pkgrevno(package, revno, pkgcache=None): + '''Compare supplied revno with the revno of the installed package + 1 => Installed revno is greater than supplied arg + 0 => Installed revno is the same as supplied arg + -1 => Installed revno is less than supplied arg + ''' + if not pkgcache: + apt_pkg.init() + pkgcache = apt_pkg.Cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index dce7db4c..e1e17dae 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,5 @@ import importlib +import time from yaml import safe_load from charmhelpers.core.host import ( lsb_release @@ -15,6 +16,7 @@ import apt_pkg import os + CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ @@ -56,10 +58,62 @@ 'precise-proposed/icehouse': 'precise-proposed/icehouse', } +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', +) + +APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. +APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +class SourceConfigError(Exception): + pass + + +class UnhandledSource(Exception): + pass + + +class AptLockError(Exception): + pass + + +class BaseFetchHandler(object): + + """Base class for FetchHandler implementations in fetch plugins""" + + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + def filter_installed_packages(packages): """Returns a list of packages that require installation""" apt_pkg.init() + + # Tell apt to build an in-memory cache to prevent race conditions (if + # another process is already building the cache). + apt_pkg.config.set("Dir::Cache::pkgcache", "") + cache = apt_pkg.Cache() _pkgs = [] for package in packages: @@ -87,14 +141,7 @@ def apt_install(packages, options=None, fatal=False): cmd.extend(packages) log("Installing {} with options: {}".format(packages, options)) - env = os.environ.copy() - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if fatal: - subprocess.check_call(cmd, env=env) - else: - subprocess.call(cmd, env=env) + _run_apt_command(cmd, fatal) def apt_upgrade(options=None, fatal=False, dist=False): @@ -109,24 +156,13 @@ def apt_upgrade(options=None, fatal=False, dist=False): else: cmd.append('upgrade') log("Upgrading with options: {}".format(options)) - - env = os.environ.copy() - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if fatal: - subprocess.check_call(cmd, env=env) - else: - subprocess.call(cmd, env=env) + _run_apt_command(cmd, fatal) def apt_update(fatal=False): """Update local apt cache""" cmd = ['apt-get', 'update'] - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) + _run_apt_command(cmd, fatal) def apt_purge(packages, fatal=False): @@ -137,10 +173,7 @@ def apt_purge(packages, fatal=False): else: cmd.extend(packages) log("Purging {}".format(packages)) - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) + _run_apt_command(cmd, fatal) def apt_hold(packages, fatal=False): @@ -151,6 +184,7 @@ def apt_hold(packages, fatal=False): else: cmd.extend(packages) log("Holding {}".format(packages)) + if fatal: subprocess.check_call(cmd) else: @@ -188,10 +222,6 @@ def add_source(source, key=None): key]) -class SourceConfigError(Exception): - pass - - def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): @@ -224,17 +254,6 @@ def configure_sources(update=False, if update: apt_update(fatal=True) -# The order of this list is very important. Handlers should be listed in from -# least- to most-specific URL matching. -FETCH_HANDLERS = ( - 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', - 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', -) - - -class UnhandledSource(Exception): - pass - def install_remote(source): """ @@ -265,30 +284,6 @@ def install_from_config(config_var_name): return install_remote(source) -class BaseFetchHandler(object): - - """Base class for FetchHandler implementations in fetch plugins""" - - def can_handle(self, source): - """Returns True if the source can be handled. Otherwise returns - a string explaining why it cannot""" - return "Wrong source type" - - def install(self, source): - """Try to download and unpack the source. Return the path to the - unpacked files or raise UnhandledSource.""" - raise UnhandledSource("Wrong source type {}".format(source)) - - def parse_url(self, url): - return urlparse(url) - - def base_url(self, url): - """Return url without querystring or fragment""" - parts = list(self.parse_url(url)) - parts[4:] = ['' for i in parts[4:]] - return urlunparse(parts) - - def plugins(fetch_handlers=None): if not fetch_handlers: fetch_handlers = FETCH_HANDLERS @@ -306,3 +301,40 @@ def plugins(fetch_handlers=None): log("FetchHandler {} not found, skipping plugin".format( handler_name)) return plugin_list + + +def _run_apt_command(cmd, fatal=False): + """ + Run an APT command, checking output and retrying if the fatal flag is set + to True. + + :param: cmd: str: The apt command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the apt + # lock was not acquired. + + while result is None or result == APT_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError, e: + retry_count = retry_count + 1 + if retry_count > APT_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire DPKG lock. Will retry in {} seconds." + "".format(APT_NO_LOCK_RETRY_DELAY)) + time.sleep(APT_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) From 5edc6c3dbc35965d4ea0ff2c7b1a451032994b24 Mon Sep 17 00:00:00 2001 From: Christopher Glass Date: Mon, 19 May 2014 13:38:45 +0200 Subject: [PATCH 0331/2699] Updating charm-helpers from lp:charm-helpers revision 153 --- .../contrib/storage/linux/utils.py | 24 ++- ceph-mon/hooks/charmhelpers/core/hookenv.py | 99 ++++++++++- ceph-mon/hooks/charmhelpers/core/host.py | 14 ++ ceph-mon/hooks/charmhelpers/fetch/__init__.py | 160 +++++++++++------- 4 files changed, 227 insertions(+), 70 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index eed99ae3..b87ef26d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -1,4 +1,5 @@ -from os import stat +import os +import re from stat import S_ISBLK from subprocess import ( @@ -14,7 +15,9 @@ def is_block_device(path): :returns: boolean: True if path is a block device, False if not. ''' - return S_ISBLK(stat(path).st_mode) + if not os.path.exists(path): + return False + return S_ISBLK(os.stat(path).st_mode) def zap_disk(block_device): @@ -29,7 +32,18 @@ def zap_disk(block_device): '--clear', block_device]) dev_end = check_output(['blockdev', '--getsz', block_device]) gpt_end = int(dev_end.split()[0]) - 100 - check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=1M', 'count=1']) - check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), - 'bs=512', 'count=100', 'seek=%s'%(gpt_end)]) + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), + 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + +def is_device_mounted(device): + '''Given a device path, return True if that device is mounted, and False + if it isn't. + + :param device: str: Full path of the device to check. + :returns: boolean: True if the path represents a mounted device, False if + it doesn't. + ''' + out = check_output(['mount']) + return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 505c202d..c2e66f66 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -155,6 +155,100 @@ def hook_name(): return os.path.basename(sys.argv[0]) +class Config(dict): + """A Juju charm config dictionary that can write itself to + disk (as json) and track which values have changed since + the previous hook invocation. + + Do not instantiate this object directly - instead call + ``hookenv.config()`` + + Example usage:: + + >>> # inside a hook + >>> from charmhelpers.core import hookenv + >>> config = hookenv.config() + >>> config['foo'] + 'bar' + >>> config['mykey'] = 'myval' + >>> config.save() + + + >>> # user runs `juju set mycharm foo=baz` + >>> # now we're inside subsequent config-changed hook + >>> config = hookenv.config() + >>> config['foo'] + 'baz' + >>> # test to see if this val has changed since last hook + >>> config.changed('foo') + True + >>> # what was the previous value? + >>> config.previous('foo') + 'bar' + >>> # keys/values that we add are preserved across hooks + >>> config['mykey'] + 'myval' + >>> # don't forget to save at the end of hook! + >>> config.save() + + """ + CONFIG_FILE_NAME = '.juju-persistent-config' + + def __init__(self, *args, **kw): + super(Config, self).__init__(*args, **kw) + self._prev_dict = None + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) + if os.path.exists(self.path): + self.load_previous() + + def load_previous(self, path=None): + """Load previous copy of config from disk so that current values + can be compared to previous values. + + :param path: + + File path from which to load the previous config. If `None`, + config is loaded from the default location. If `path` is + specified, subsequent `save()` calls will write to the same + path. + + """ + self.path = path or self.path + with open(self.path) as f: + self._prev_dict = json.load(f) + + def changed(self, key): + """Return true if the value for this key has changed since + the last save. + + """ + if self._prev_dict is None: + return True + return self.previous(key) != self.get(key) + + def previous(self, key): + """Return previous value for this key, or None if there + is no "previous" value. + + """ + if self._prev_dict: + return self._prev_dict.get(key) + return None + + def save(self): + """Save this config to disk. + + Preserves items in _prev_dict that do not exist in self. + + """ + if self._prev_dict: + for k, v in self._prev_dict.iteritems(): + if k not in self: + self[k] = v + with open(self.path, 'w') as f: + json.dump(self, f) + + @cached def config(scope=None): """Juju charm configuration""" @@ -163,7 +257,10 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - return json.loads(subprocess.check_output(config_cmd_line)) + config_data = json.loads(subprocess.check_output(config_cmd_line)) + if scope is not None: + return config_data + return Config(config_data) except ValueError: return None diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index cfd26847..186147f6 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -12,6 +12,7 @@ import string import subprocess import hashlib +import apt_pkg from collections import OrderedDict @@ -295,3 +296,16 @@ def get_nic_hwaddr(nic): if 'link/ether' in words: hwaddr = words[words.index('link/ether') + 1] return hwaddr + + +def cmp_pkgrevno(package, revno, pkgcache=None): + '''Compare supplied revno with the revno of the installed package + 1 => Installed revno is greater than supplied arg + 0 => Installed revno is the same as supplied arg + -1 => Installed revno is less than supplied arg + ''' + if not pkgcache: + apt_pkg.init() + pkgcache = apt_pkg.Cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index dce7db4c..e1e17dae 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,5 @@ import importlib +import time from yaml import safe_load from charmhelpers.core.host import ( lsb_release @@ -15,6 +16,7 @@ import apt_pkg import os + CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ @@ -56,10 +58,62 @@ 'precise-proposed/icehouse': 'precise-proposed/icehouse', } +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', +) + +APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. +APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +class SourceConfigError(Exception): + pass + + +class UnhandledSource(Exception): + pass + + +class AptLockError(Exception): + pass + + +class BaseFetchHandler(object): + + """Base class for FetchHandler implementations in fetch plugins""" + + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + def filter_installed_packages(packages): """Returns a list of packages that require installation""" apt_pkg.init() + + # Tell apt to build an in-memory cache to prevent race conditions (if + # another process is already building the cache). + apt_pkg.config.set("Dir::Cache::pkgcache", "") + cache = apt_pkg.Cache() _pkgs = [] for package in packages: @@ -87,14 +141,7 @@ def apt_install(packages, options=None, fatal=False): cmd.extend(packages) log("Installing {} with options: {}".format(packages, options)) - env = os.environ.copy() - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if fatal: - subprocess.check_call(cmd, env=env) - else: - subprocess.call(cmd, env=env) + _run_apt_command(cmd, fatal) def apt_upgrade(options=None, fatal=False, dist=False): @@ -109,24 +156,13 @@ def apt_upgrade(options=None, fatal=False, dist=False): else: cmd.append('upgrade') log("Upgrading with options: {}".format(options)) - - env = os.environ.copy() - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if fatal: - subprocess.check_call(cmd, env=env) - else: - subprocess.call(cmd, env=env) + _run_apt_command(cmd, fatal) def apt_update(fatal=False): """Update local apt cache""" cmd = ['apt-get', 'update'] - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) + _run_apt_command(cmd, fatal) def apt_purge(packages, fatal=False): @@ -137,10 +173,7 @@ def apt_purge(packages, fatal=False): else: cmd.extend(packages) log("Purging {}".format(packages)) - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) + _run_apt_command(cmd, fatal) def apt_hold(packages, fatal=False): @@ -151,6 +184,7 @@ def apt_hold(packages, fatal=False): else: cmd.extend(packages) log("Holding {}".format(packages)) + if fatal: subprocess.check_call(cmd) else: @@ -188,10 +222,6 @@ def add_source(source, key=None): key]) -class SourceConfigError(Exception): - pass - - def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): @@ -224,17 +254,6 @@ def configure_sources(update=False, if update: apt_update(fatal=True) -# The order of this list is very important. Handlers should be listed in from -# least- to most-specific URL matching. -FETCH_HANDLERS = ( - 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', - 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', -) - - -class UnhandledSource(Exception): - pass - def install_remote(source): """ @@ -265,30 +284,6 @@ def install_from_config(config_var_name): return install_remote(source) -class BaseFetchHandler(object): - - """Base class for FetchHandler implementations in fetch plugins""" - - def can_handle(self, source): - """Returns True if the source can be handled. Otherwise returns - a string explaining why it cannot""" - return "Wrong source type" - - def install(self, source): - """Try to download and unpack the source. Return the path to the - unpacked files or raise UnhandledSource.""" - raise UnhandledSource("Wrong source type {}".format(source)) - - def parse_url(self, url): - return urlparse(url) - - def base_url(self, url): - """Return url without querystring or fragment""" - parts = list(self.parse_url(url)) - parts[4:] = ['' for i in parts[4:]] - return urlunparse(parts) - - def plugins(fetch_handlers=None): if not fetch_handlers: fetch_handlers = FETCH_HANDLERS @@ -306,3 +301,40 @@ def plugins(fetch_handlers=None): log("FetchHandler {} not found, skipping plugin".format( handler_name)) return plugin_list + + +def _run_apt_command(cmd, fatal=False): + """ + Run an APT command, checking output and retrying if the fatal flag is set + to True. + + :param: cmd: str: The apt command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the apt + # lock was not acquired. + + while result is None or result == APT_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError, e: + retry_count = retry_count + 1 + if retry_count > APT_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire DPKG lock. Will retry in {} seconds." + "".format(APT_NO_LOCK_RETRY_DELAY)) + time.sleep(APT_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) From a90277fff45628179d599f571331b56df29398a6 Mon Sep 17 00:00:00 2001 From: Christopher Glass Date: Mon, 19 May 2014 13:39:52 +0200 Subject: [PATCH 0332/2699] Updating charm-helpers from lp:charm-helpers revision 153 --- .../contrib/storage/linux/utils.py | 24 ++- ceph-osd/hooks/charmhelpers/core/hookenv.py | 99 ++++++++++- ceph-osd/hooks/charmhelpers/core/host.py | 14 ++ ceph-osd/hooks/charmhelpers/fetch/__init__.py | 160 +++++++++++------- 4 files changed, 227 insertions(+), 70 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index eed99ae3..b87ef26d 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -1,4 +1,5 @@ -from os import stat +import os +import re from stat import S_ISBLK from subprocess import ( @@ -14,7 +15,9 @@ def is_block_device(path): :returns: boolean: True if path is a block device, False if not. ''' - return S_ISBLK(stat(path).st_mode) + if not os.path.exists(path): + return False + return S_ISBLK(os.stat(path).st_mode) def zap_disk(block_device): @@ -29,7 +32,18 @@ def zap_disk(block_device): '--clear', block_device]) dev_end = check_output(['blockdev', '--getsz', block_device]) gpt_end = int(dev_end.split()[0]) - 100 - check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=1M', 'count=1']) - check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), - 'bs=512', 'count=100', 'seek=%s'%(gpt_end)]) + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), + 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + +def is_device_mounted(device): + '''Given a device path, return True if that device is mounted, and False + if it isn't. + + :param device: str: Full path of the device to check. + :returns: boolean: True if the path represents a mounted device, False if + it doesn't. + ''' + out = check_output(['mount']) + return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 505c202d..c2e66f66 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -155,6 +155,100 @@ def hook_name(): return os.path.basename(sys.argv[0]) +class Config(dict): + """A Juju charm config dictionary that can write itself to + disk (as json) and track which values have changed since + the previous hook invocation. + + Do not instantiate this object directly - instead call + ``hookenv.config()`` + + Example usage:: + + >>> # inside a hook + >>> from charmhelpers.core import hookenv + >>> config = hookenv.config() + >>> config['foo'] + 'bar' + >>> config['mykey'] = 'myval' + >>> config.save() + + + >>> # user runs `juju set mycharm foo=baz` + >>> # now we're inside subsequent config-changed hook + >>> config = hookenv.config() + >>> config['foo'] + 'baz' + >>> # test to see if this val has changed since last hook + >>> config.changed('foo') + True + >>> # what was the previous value? + >>> config.previous('foo') + 'bar' + >>> # keys/values that we add are preserved across hooks + >>> config['mykey'] + 'myval' + >>> # don't forget to save at the end of hook! + >>> config.save() + + """ + CONFIG_FILE_NAME = '.juju-persistent-config' + + def __init__(self, *args, **kw): + super(Config, self).__init__(*args, **kw) + self._prev_dict = None + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) + if os.path.exists(self.path): + self.load_previous() + + def load_previous(self, path=None): + """Load previous copy of config from disk so that current values + can be compared to previous values. + + :param path: + + File path from which to load the previous config. If `None`, + config is loaded from the default location. If `path` is + specified, subsequent `save()` calls will write to the same + path. + + """ + self.path = path or self.path + with open(self.path) as f: + self._prev_dict = json.load(f) + + def changed(self, key): + """Return true if the value for this key has changed since + the last save. + + """ + if self._prev_dict is None: + return True + return self.previous(key) != self.get(key) + + def previous(self, key): + """Return previous value for this key, or None if there + is no "previous" value. + + """ + if self._prev_dict: + return self._prev_dict.get(key) + return None + + def save(self): + """Save this config to disk. + + Preserves items in _prev_dict that do not exist in self. + + """ + if self._prev_dict: + for k, v in self._prev_dict.iteritems(): + if k not in self: + self[k] = v + with open(self.path, 'w') as f: + json.dump(self, f) + + @cached def config(scope=None): """Juju charm configuration""" @@ -163,7 +257,10 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - return json.loads(subprocess.check_output(config_cmd_line)) + config_data = json.loads(subprocess.check_output(config_cmd_line)) + if scope is not None: + return config_data + return Config(config_data) except ValueError: return None diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index cfd26847..186147f6 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -12,6 +12,7 @@ import string import subprocess import hashlib +import apt_pkg from collections import OrderedDict @@ -295,3 +296,16 @@ def get_nic_hwaddr(nic): if 'link/ether' in words: hwaddr = words[words.index('link/ether') + 1] return hwaddr + + +def cmp_pkgrevno(package, revno, pkgcache=None): + '''Compare supplied revno with the revno of the installed package + 1 => Installed revno is greater than supplied arg + 0 => Installed revno is the same as supplied arg + -1 => Installed revno is less than supplied arg + ''' + if not pkgcache: + apt_pkg.init() + pkgcache = apt_pkg.Cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index dce7db4c..e1e17dae 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,5 @@ import importlib +import time from yaml import safe_load from charmhelpers.core.host import ( lsb_release @@ -15,6 +16,7 @@ import apt_pkg import os + CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ @@ -56,10 +58,62 @@ 'precise-proposed/icehouse': 'precise-proposed/icehouse', } +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', +) + +APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. +APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +class SourceConfigError(Exception): + pass + + +class UnhandledSource(Exception): + pass + + +class AptLockError(Exception): + pass + + +class BaseFetchHandler(object): + + """Base class for FetchHandler implementations in fetch plugins""" + + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + def filter_installed_packages(packages): """Returns a list of packages that require installation""" apt_pkg.init() + + # Tell apt to build an in-memory cache to prevent race conditions (if + # another process is already building the cache). + apt_pkg.config.set("Dir::Cache::pkgcache", "") + cache = apt_pkg.Cache() _pkgs = [] for package in packages: @@ -87,14 +141,7 @@ def apt_install(packages, options=None, fatal=False): cmd.extend(packages) log("Installing {} with options: {}".format(packages, options)) - env = os.environ.copy() - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if fatal: - subprocess.check_call(cmd, env=env) - else: - subprocess.call(cmd, env=env) + _run_apt_command(cmd, fatal) def apt_upgrade(options=None, fatal=False, dist=False): @@ -109,24 +156,13 @@ def apt_upgrade(options=None, fatal=False, dist=False): else: cmd.append('upgrade') log("Upgrading with options: {}".format(options)) - - env = os.environ.copy() - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if fatal: - subprocess.check_call(cmd, env=env) - else: - subprocess.call(cmd, env=env) + _run_apt_command(cmd, fatal) def apt_update(fatal=False): """Update local apt cache""" cmd = ['apt-get', 'update'] - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) + _run_apt_command(cmd, fatal) def apt_purge(packages, fatal=False): @@ -137,10 +173,7 @@ def apt_purge(packages, fatal=False): else: cmd.extend(packages) log("Purging {}".format(packages)) - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) + _run_apt_command(cmd, fatal) def apt_hold(packages, fatal=False): @@ -151,6 +184,7 @@ def apt_hold(packages, fatal=False): else: cmd.extend(packages) log("Holding {}".format(packages)) + if fatal: subprocess.check_call(cmd) else: @@ -188,10 +222,6 @@ def add_source(source, key=None): key]) -class SourceConfigError(Exception): - pass - - def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): @@ -224,17 +254,6 @@ def configure_sources(update=False, if update: apt_update(fatal=True) -# The order of this list is very important. Handlers should be listed in from -# least- to most-specific URL matching. -FETCH_HANDLERS = ( - 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', - 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', -) - - -class UnhandledSource(Exception): - pass - def install_remote(source): """ @@ -265,30 +284,6 @@ def install_from_config(config_var_name): return install_remote(source) -class BaseFetchHandler(object): - - """Base class for FetchHandler implementations in fetch plugins""" - - def can_handle(self, source): - """Returns True if the source can be handled. Otherwise returns - a string explaining why it cannot""" - return "Wrong source type" - - def install(self, source): - """Try to download and unpack the source. Return the path to the - unpacked files or raise UnhandledSource.""" - raise UnhandledSource("Wrong source type {}".format(source)) - - def parse_url(self, url): - return urlparse(url) - - def base_url(self, url): - """Return url without querystring or fragment""" - parts = list(self.parse_url(url)) - parts[4:] = ['' for i in parts[4:]] - return urlunparse(parts) - - def plugins(fetch_handlers=None): if not fetch_handlers: fetch_handlers = FETCH_HANDLERS @@ -306,3 +301,40 @@ def plugins(fetch_handlers=None): log("FetchHandler {} not found, skipping plugin".format( handler_name)) return plugin_list + + +def _run_apt_command(cmd, fatal=False): + """ + Run an APT command, checking output and retrying if the fatal flag is set + to True. + + :param: cmd: str: The apt command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the apt + # lock was not acquired. + + while result is None or result == APT_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError, e: + retry_count = retry_count + 1 + if retry_count > APT_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire DPKG lock. Will retry in {} seconds." + "".format(APT_NO_LOCK_RETRY_DELAY)) + time.sleep(APT_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) From 7da11696d5ddb9124f606e3ad235bc1522797d7a Mon Sep 17 00:00:00 2001 From: Christopher Glass Date: Mon, 19 May 2014 14:15:38 +0200 Subject: [PATCH 0333/2699] Make ceph use the charm-helpers "is_device_mounted" function instead of defining its own. --- ceph-proxy/hooks/ceph.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index d165d26a..cd81cc41 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -23,6 +23,7 @@ from charmhelpers.contrib.storage.linux.utils import ( zap_disk, is_block_device, + is_device_mounted ) from utils import ( get_unit_hostname, @@ -353,7 +354,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): log('Looks like {} is already an OSD, skipping.'.format(dev)) return - if device_mounted(dev): + if is_device_mounted(dev): log('Looks like {} is in use, skipping.'.format(dev)) return @@ -396,9 +397,5 @@ def osdize_dir(path): subprocess.check_call(cmd) -def device_mounted(dev): - return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 - - def filesystem_mounted(fs): return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 From 9145d99f5d2ed1c84c073ee6344d19ed34027181 Mon Sep 17 00:00:00 2001 From: Christopher Glass Date: Mon, 19 May 2014 14:15:38 +0200 Subject: [PATCH 0334/2699] Make ceph use the charm-helpers "is_device_mounted" function instead of defining its own. --- ceph-mon/hooks/ceph.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index d165d26a..cd81cc41 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -23,6 +23,7 @@ from charmhelpers.contrib.storage.linux.utils import ( zap_disk, is_block_device, + is_device_mounted ) from utils import ( get_unit_hostname, @@ -353,7 +354,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): log('Looks like {} is already an OSD, skipping.'.format(dev)) return - if device_mounted(dev): + if is_device_mounted(dev): log('Looks like {} is in use, skipping.'.format(dev)) return @@ -396,9 +397,5 @@ def osdize_dir(path): subprocess.check_call(cmd) -def device_mounted(dev): - return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 - - def filesystem_mounted(fs): return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 From d37c546d68b9595d7201ac6282b2330ec2503d26 Mon Sep 17 00:00:00 2001 From: Christopher Glass Date: Mon, 19 May 2014 14:16:15 +0200 Subject: [PATCH 0335/2699] Make ceph-osd use the charm-helpers "is_device_mounted" function instead of defining its own. --- ceph-osd/hooks/ceph.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index fca22549..21b919df 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -23,6 +23,7 @@ from charmhelpers.contrib.storage.linux.utils import ( zap_disk, is_block_device, + is_device_mounted, ) from utils import ( get_unit_hostname, @@ -340,7 +341,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): log('Looks like {} is already an OSD, skipping.'.format(dev)) return - if device_mounted(dev): + if is_device_mounted(dev): log('Looks like {} is in use, skipping.'.format(dev)) return @@ -383,9 +384,5 @@ def osdize_dir(path): subprocess.check_call(cmd) -def device_mounted(dev): - return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0 - - def filesystem_mounted(fs): return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 From fa965f2fe7b06c94a17f79ffe52e6cd448763ad4 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 21 May 2014 11:09:21 +0100 Subject: [PATCH 0336/2699] [trivial] Add publish target to Makefile --- ceph-proxy/Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index 71dfd409..cc53a956 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -6,3 +6,7 @@ lint: sync: @charm-helper-sync -c charm-helpers-sync.yaml + +publish: lint + bzr push lp:charms/ceph + bzr push lp:charms/trusty/ceph From 31476fe4418c1a20c7a6b86a8f26a87439b0c269 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 21 May 2014 11:09:21 +0100 Subject: [PATCH 0337/2699] [trivial] Add publish target to Makefile --- ceph-mon/Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index 71dfd409..cc53a956 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -6,3 +6,7 @@ lint: sync: @charm-helper-sync -c charm-helpers-sync.yaml + +publish: lint + bzr push lp:charms/ceph + bzr push lp:charms/trusty/ceph From 5aa62be18c232fa87977febfb6623f53a2bcb648 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 21 May 2014 11:10:19 +0100 Subject: [PATCH 0338/2699] [trivial] Add publish target to makefile --- ceph-radosgw/Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index 71dfd409..cc53a956 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -6,3 +6,7 @@ lint: sync: @charm-helper-sync -c charm-helpers-sync.yaml + +publish: lint + bzr push lp:charms/ceph + bzr push lp:charms/trusty/ceph From 6107270ebc912ce01dd068be689a22451b615bd4 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 21 May 2014 11:10:54 +0100 Subject: [PATCH 0339/2699] [trivial] Fixup target for publish --- ceph-radosgw/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index cc53a956..8b8b72d0 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -8,5 +8,5 @@ sync: @charm-helper-sync -c charm-helpers-sync.yaml publish: lint - bzr push lp:charms/ceph - bzr push lp:charms/trusty/ceph + bzr push lp:charms/ceph-radosgw + bzr push lp:charms/trusty/ceph-radosgw From 645750eeb471f8662c40341b496fa2cf70848306 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 4 Jun 2014 14:06:51 +0100 Subject: [PATCH 0340/2699] resync helpers for juno support --- ceph-proxy/hooks/charmhelpers/fetch/__init__.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index e1e17dae..e8e837a5 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -56,6 +56,15 @@ 'icehouse/proposed': 'precise-proposed/icehouse', 'precise-icehouse/proposed': 'precise-proposed/icehouse', 'precise-proposed/icehouse': 'precise-proposed/icehouse', + # Juno + 'juno': 'trusty-updates/juno', + 'trusty-juno': 'trusty-updates/juno', + 'trusty-juno/updates': 'trusty-updates/juno', + 'trusty-updates/juno': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'trusty-juno/proposed': 'trusty-proposed/juno', + 'trusty-proposed/juno': 'trusty-proposed/juno', } # The order of this list is very important. Handlers should be listed in from From ba95cc56d798595394f94b563efa1b7ecbe90bea Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 4 Jun 2014 14:06:51 +0100 Subject: [PATCH 0341/2699] resync helpers for juno support --- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index e1e17dae..e8e837a5 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -56,6 +56,15 @@ 'icehouse/proposed': 'precise-proposed/icehouse', 'precise-icehouse/proposed': 'precise-proposed/icehouse', 'precise-proposed/icehouse': 'precise-proposed/icehouse', + # Juno + 'juno': 'trusty-updates/juno', + 'trusty-juno': 'trusty-updates/juno', + 'trusty-juno/updates': 'trusty-updates/juno', + 'trusty-updates/juno': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'trusty-juno/proposed': 'trusty-proposed/juno', + 'trusty-proposed/juno': 'trusty-proposed/juno', } # The order of this list is very important. Handlers should be listed in from From da26bb185ecd347c1c6b45d615824ee8030a992c Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 4 Jun 2014 14:07:02 +0100 Subject: [PATCH 0342/2699] resync helpers for juno support --- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index e1e17dae..e8e837a5 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -56,6 +56,15 @@ 'icehouse/proposed': 'precise-proposed/icehouse', 'precise-icehouse/proposed': 'precise-proposed/icehouse', 'precise-proposed/icehouse': 'precise-proposed/icehouse', + # Juno + 'juno': 'trusty-updates/juno', + 'trusty-juno': 'trusty-updates/juno', + 'trusty-juno/updates': 'trusty-updates/juno', + 'trusty-updates/juno': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'trusty-juno/proposed': 'trusty-proposed/juno', + 'trusty-proposed/juno': 'trusty-proposed/juno', } # The order of this list is very important. Handlers should be listed in from From bc3dc17838e08b1f6a6cf182afbc6cf9e9702031 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 4 Jun 2014 14:07:17 +0100 Subject: [PATCH 0343/2699] resync helpers for juno support --- ceph-radosgw/hooks/charmhelpers/fetch/__init__.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index e1e17dae..e8e837a5 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -56,6 +56,15 @@ 'icehouse/proposed': 'precise-proposed/icehouse', 'precise-icehouse/proposed': 'precise-proposed/icehouse', 'precise-proposed/icehouse': 'precise-proposed/icehouse', + # Juno + 'juno': 'trusty-updates/juno', + 'trusty-juno': 'trusty-updates/juno', + 'trusty-juno/updates': 'trusty-updates/juno', + 'trusty-updates/juno': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'trusty-juno/proposed': 'trusty-proposed/juno', + 'trusty-proposed/juno': 'trusty-proposed/juno', } # The order of this list is very important. Handlers should be listed in from From 9e1334c8713d4779e0da898163294cfd2a76d339 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 6 Jun 2014 13:29:32 +0100 Subject: [PATCH 0344/2699] Add support for splitting public and cluster networks --- ceph-proxy/charm-helpers-sync.yaml | 1 + ceph-proxy/config.yaml | 10 ++ ceph-proxy/hooks/ceph.py | 2 +- .../charmhelpers/contrib/network/__init__.py | 0 .../hooks/charmhelpers/contrib/network/ip.py | 69 +++++++++++ ceph-proxy/hooks/charmhelpers/core/fstab.py | 114 ++++++++++++++++++ ceph-proxy/hooks/charmhelpers/core/host.py | 26 +++- ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py | 3 +- ceph-proxy/hooks/hooks.py | 41 +++++-- ceph-proxy/hooks/utils.py | 11 +- ceph-proxy/templates/ceph.conf | 8 ++ 11 files changed, 263 insertions(+), 22 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/network/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/network/ip.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/fstab.py diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml index 0963bbcd..afb9e42b 100644 --- a/ceph-proxy/charm-helpers-sync.yaml +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -7,3 +7,4 @@ include: - utils - payload.execd - contrib.openstack.alternatives + - contrib.network.ip diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 6adafb29..d6422431 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -115,3 +115,13 @@ options: default: False description: | If set to True, supporting services will log to syslog. + ceph-public-network: + type: string + description: | + The IP address and netmask of the public (front-side) network (e.g., + 192.168.0.0/24) + ceph-cluster-network: + type: string + description: | + The IP address and netmask of the cluster (back-side) network (e.g., + 192.168.0.0/24) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index cd81cc41..734acc8c 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -284,7 +284,7 @@ def bootstrap_monitor_cluster(secret): log('bootstrap_monitor_cluster: mon already initialized.') else: # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', perms=0755) + mkdir('/var/run/ceph', perms=0o755) mkdir(path) # end changes for Ceph >= 0.61.3 try: diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/network/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py new file mode 100644 index 00000000..44c7c975 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -0,0 +1,69 @@ +import sys + +from charmhelpers.fetch import apt_install +from charmhelpers.core.hookenv import ( + ERROR, log, +) + +try: + import netifaces +except ImportError: + apt_install('python-netifaces') + import netifaces + +try: + import netaddr +except ImportError: + apt_install('python-netaddr') + import netaddr + + +def _validate_cidr(network): + try: + netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + + +def get_address_in_network(network, fallback=None, fatal=False): + """ + Get an IPv4 address within the network from the host. + + Args: + network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + fallback (str): If no address is found, return fallback. + fatal (boolean): If no address is found, fallback is not + set and fatal is True then exit(1). + """ + + def not_found_error_out(): + log("No IP address found in network: %s" % network, + level=ERROR) + sys.exit(1) + + if network is None: + if fallback is not None: + return fallback + else: + if fatal: + not_found_error_out() + + _validate_cidr(network) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + if cidr in netaddr.IPNetwork(network): + return str(cidr.ip) + + if fallback is not None: + return fallback + + if fatal: + not_found_error_out() + + return None diff --git a/ceph-proxy/hooks/charmhelpers/core/fstab.py b/ceph-proxy/hooks/charmhelpers/core/fstab.py new file mode 100644 index 00000000..cdd72616 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/fstab.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import os + + +class Fstab(file): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = d + self.p = p + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + file.__init__(self, self._path, 'r+') + + def _hydrate_entry(self, line): + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split(" "))) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + try: + if not line.startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write(str(entry) + '\n') + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = self.readlines() + + found = False + for index, line in enumerate(lines): + if not line.startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines)) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 186147f6..46bfd36a 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -17,6 +17,7 @@ from collections import OrderedDict from hookenv import log +from fstab import Fstab def service_start(service_name): @@ -35,7 +36,8 @@ def service_restart(service_name): def service_reload(service_name, restart_on_failure=False): - """Reload a system service, optionally falling back to restart if reload fails""" + """Reload a system service, optionally falling back to restart if + reload fails""" service_result = service('reload', service_name) if not service_result and restart_on_failure: service_result = service('restart', service_name) @@ -144,7 +146,19 @@ def write_file(path, content, owner='root', group='root', perms=0444): target.write(content) -def mount(device, mountpoint, options=None, persist=False): +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab + """ + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file + """ + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): """Mount a filesystem at a particular mountpoint""" cmd_args = ['mount'] if options is not None: @@ -155,9 +169,9 @@ def mount(device, mountpoint, options=None, persist=False): except subprocess.CalledProcessError, e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_add(device, mountpoint, filesystem, options=options) return True @@ -169,9 +183,9 @@ def umount(mountpoint, persist=False): except subprocess.CalledProcessError, e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_remove(mountpoint) return True diff --git a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py index db5dd9a3..0e580e47 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py @@ -39,7 +39,8 @@ def branch(self, source, dest): def install(self, source): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0755) try: diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 11e1cab8..a4df35c6 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -43,6 +43,7 @@ from utils import ( render_template, get_host_ip, + get_public_addr, ) hooks = Hooks() @@ -73,7 +74,9 @@ def emit_cephconf(): 'fsid': config('fsid'), 'version': ceph.get_ceph_version(), 'osd_journal_size': config('osd-journal-size'), - 'use_syslog': str(config('use-syslog')).lower() + 'use_syslog': str(config('use-syslog')).lower(), + 'ceph_public_network': config('ceph-public-network'), + 'ceph_cluster_network': config('ceph-cluster-network'), } # Install ceph.conf as an alternative to support # co-existence with other charms that write this file @@ -133,14 +136,13 @@ def config_changed(): def get_mon_hosts(): hosts = [] - hosts.append('{}:6789'.format(get_host_ip())) + hosts.append('{}:6789'.format(get_public_addr())) for relid in relation_ids('mon'): for unit in related_units(relid): - hosts.append( - '{}:6789'.format(get_host_ip(relation_get('private-address', - unit, relid))) - ) + addr = relation_get('ceph_public_addr', unit, relid) or \ + get_host_ip(relation_get('private-address', unit, relid)) + hosts.append('{}:6789'.format(addr)) hosts.sort() return hosts @@ -160,8 +162,15 @@ def get_devices(): return [] +@hooks.hook('mon-relation-joined') +def mon_relation_joined(): + for relid in relation_ids('mon'): + relation_set(relation_id=relid, + ceph_public_addr=get_public_addr()) + + @hooks.hook('mon-relation-departed', - 'mon-relation-joined') + 'mon-relation-changed') def mon_relation(): log('Begin mon-relation hook.') emit_cephconf() @@ -191,7 +200,8 @@ def notify_osds(): relation_set(relation_id=relid, fsid=config('fsid'), osd_bootstrap_key=ceph.get_osd_bootstrap_key(), - auth=config('auth-supported')) + auth=config('auth-supported'), + ceph_public_addr=get_public_addr()) log('End notify_osds.') @@ -202,7 +212,8 @@ def notify_radosgws(): for relid in relation_ids('radosgw'): relation_set(relation_id=relid, radosgw_key=ceph.get_radosgw_key(), - auth=config('auth-supported')) + auth=config('auth-supported'), + ceph_public_addr=get_public_addr()) log('End notify_radosgws.') @@ -216,7 +227,8 @@ def notify_client(): service_name = units[0].split('/')[0] relation_set(relation_id=relid, key=ceph.get_named_key(service_name), - auth=config('auth-supported')) + auth=config('auth-supported'), + ceph_public_addr=get_public_addr()) log('End notify_client.') @@ -242,7 +254,8 @@ def osd_relation(): log('mon cluster in quorum - providing fsid & keys') relation_set(fsid=config('fsid'), osd_bootstrap_key=ceph.get_osd_bootstrap_key(), - auth=config('auth-supported')) + auth=config('auth-supported'), + ceph_public_addr=get_public_addr()) else: log('mon cluster not in quorum - deferring fsid provision') @@ -258,7 +271,8 @@ def radosgw_relation(): if ceph.is_quorum(): log('mon cluster in quorum - providing radosgw with keys') relation_set(radosgw_key=ceph.get_radosgw_key(), - auth=config('auth-supported')) + auth=config('auth-supported'), + ceph_public_addr=get_public_addr()) else: log('mon cluster not in quorum - deferring key provision') @@ -273,7 +287,8 @@ def client_relation(): log('mon cluster in quorum - providing client with keys') service_name = remote_unit().split('/')[0] relation_set(key=ceph.get_named_key(service_name), - auth=config('auth-supported')) + auth=config('auth-supported'), + ceph_public_addr=get_public_addr()) else: log('mon cluster not in quorum - deferring key provision') diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index c1044a45..b2937f15 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -11,13 +11,16 @@ import re from charmhelpers.core.hookenv import ( unit_get, - cached + cached, + config ) from charmhelpers.fetch import ( apt_install, filter_installed_packages ) +from charmhelpers.contrib.network import ip + TEMPLATES_DIR = 'templates' try: @@ -72,3 +75,9 @@ def get_host_ip(hostname=None): answers = dns.resolver.query(hostname, 'A') if answers: return answers[0].address + + +@cached +def get_public_addr(): + return ip.get_address_in_network( + config('ceph-public-network'), fallback=get_host_ip()) diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index d82ca820..9626d9ad 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -9,11 +9,19 @@ keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} + log to syslog = {{ use_syslog }} err to syslog = {{ use_syslog }} clog to syslog = {{ use_syslog }} mon cluster log to syslog = {{ use_syslog }} +{%- if ceph_public_network is string %} + public network = {{ ceph_public_network }} +{%- endif %} +{%- if ceph_cluster_network is string %} + cluster network = {{ ceph_cluster_network }} +{%- endif %} + [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring From 3963d0e0a2a3325e67447142cce7af84ae0000be Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 6 Jun 2014 13:29:32 +0100 Subject: [PATCH 0345/2699] Add support for splitting public and cluster networks --- ceph-mon/charm-helpers-sync.yaml | 1 + ceph-mon/config.yaml | 10 ++ ceph-mon/hooks/ceph.py | 2 +- .../charmhelpers/contrib/network/__init__.py | 0 .../hooks/charmhelpers/contrib/network/ip.py | 69 +++++++++++ ceph-mon/hooks/charmhelpers/core/fstab.py | 114 ++++++++++++++++++ ceph-mon/hooks/charmhelpers/core/host.py | 26 +++- ceph-mon/hooks/charmhelpers/fetch/bzrurl.py | 3 +- ceph-mon/hooks/hooks.py | 41 +++++-- ceph-mon/hooks/utils.py | 11 +- ceph-mon/templates/ceph.conf | 8 ++ 11 files changed, 263 insertions(+), 22 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/network/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/network/ip.py create mode 100644 ceph-mon/hooks/charmhelpers/core/fstab.py diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml index 0963bbcd..afb9e42b 100644 --- a/ceph-mon/charm-helpers-sync.yaml +++ b/ceph-mon/charm-helpers-sync.yaml @@ -7,3 +7,4 @@ include: - utils - payload.execd - contrib.openstack.alternatives + - contrib.network.ip diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 6adafb29..d6422431 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -115,3 +115,13 @@ options: default: False description: | If set to True, supporting services will log to syslog. + ceph-public-network: + type: string + description: | + The IP address and netmask of the public (front-side) network (e.g., + 192.168.0.0/24) + ceph-cluster-network: + type: string + description: | + The IP address and netmask of the cluster (back-side) network (e.g., + 192.168.0.0/24) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index cd81cc41..734acc8c 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -284,7 +284,7 @@ def bootstrap_monitor_cluster(secret): log('bootstrap_monitor_cluster: mon already initialized.') else: # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', perms=0755) + mkdir('/var/run/ceph', perms=0o755) mkdir(path) # end changes for Ceph >= 0.61.3 try: diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/network/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py new file mode 100644 index 00000000..44c7c975 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -0,0 +1,69 @@ +import sys + +from charmhelpers.fetch import apt_install +from charmhelpers.core.hookenv import ( + ERROR, log, +) + +try: + import netifaces +except ImportError: + apt_install('python-netifaces') + import netifaces + +try: + import netaddr +except ImportError: + apt_install('python-netaddr') + import netaddr + + +def _validate_cidr(network): + try: + netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + + +def get_address_in_network(network, fallback=None, fatal=False): + """ + Get an IPv4 address within the network from the host. + + Args: + network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + fallback (str): If no address is found, return fallback. + fatal (boolean): If no address is found, fallback is not + set and fatal is True then exit(1). + """ + + def not_found_error_out(): + log("No IP address found in network: %s" % network, + level=ERROR) + sys.exit(1) + + if network is None: + if fallback is not None: + return fallback + else: + if fatal: + not_found_error_out() + + _validate_cidr(network) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + if cidr in netaddr.IPNetwork(network): + return str(cidr.ip) + + if fallback is not None: + return fallback + + if fatal: + not_found_error_out() + + return None diff --git a/ceph-mon/hooks/charmhelpers/core/fstab.py b/ceph-mon/hooks/charmhelpers/core/fstab.py new file mode 100644 index 00000000..cdd72616 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/fstab.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import os + + +class Fstab(file): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = d + self.p = p + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + file.__init__(self, self._path, 'r+') + + def _hydrate_entry(self, line): + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split(" "))) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + try: + if not line.startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write(str(entry) + '\n') + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = self.readlines() + + found = False + for index, line in enumerate(lines): + if not line.startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines)) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 186147f6..46bfd36a 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -17,6 +17,7 @@ from collections import OrderedDict from hookenv import log +from fstab import Fstab def service_start(service_name): @@ -35,7 +36,8 @@ def service_restart(service_name): def service_reload(service_name, restart_on_failure=False): - """Reload a system service, optionally falling back to restart if reload fails""" + """Reload a system service, optionally falling back to restart if + reload fails""" service_result = service('reload', service_name) if not service_result and restart_on_failure: service_result = service('restart', service_name) @@ -144,7 +146,19 @@ def write_file(path, content, owner='root', group='root', perms=0444): target.write(content) -def mount(device, mountpoint, options=None, persist=False): +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab + """ + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file + """ + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): """Mount a filesystem at a particular mountpoint""" cmd_args = ['mount'] if options is not None: @@ -155,9 +169,9 @@ def mount(device, mountpoint, options=None, persist=False): except subprocess.CalledProcessError, e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_add(device, mountpoint, filesystem, options=options) return True @@ -169,9 +183,9 @@ def umount(mountpoint, persist=False): except subprocess.CalledProcessError, e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_remove(mountpoint) return True diff --git a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py index db5dd9a3..0e580e47 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py @@ -39,7 +39,8 @@ def branch(self, source, dest): def install(self, source): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0755) try: diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 11e1cab8..a4df35c6 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -43,6 +43,7 @@ from utils import ( render_template, get_host_ip, + get_public_addr, ) hooks = Hooks() @@ -73,7 +74,9 @@ def emit_cephconf(): 'fsid': config('fsid'), 'version': ceph.get_ceph_version(), 'osd_journal_size': config('osd-journal-size'), - 'use_syslog': str(config('use-syslog')).lower() + 'use_syslog': str(config('use-syslog')).lower(), + 'ceph_public_network': config('ceph-public-network'), + 'ceph_cluster_network': config('ceph-cluster-network'), } # Install ceph.conf as an alternative to support # co-existence with other charms that write this file @@ -133,14 +136,13 @@ def config_changed(): def get_mon_hosts(): hosts = [] - hosts.append('{}:6789'.format(get_host_ip())) + hosts.append('{}:6789'.format(get_public_addr())) for relid in relation_ids('mon'): for unit in related_units(relid): - hosts.append( - '{}:6789'.format(get_host_ip(relation_get('private-address', - unit, relid))) - ) + addr = relation_get('ceph_public_addr', unit, relid) or \ + get_host_ip(relation_get('private-address', unit, relid)) + hosts.append('{}:6789'.format(addr)) hosts.sort() return hosts @@ -160,8 +162,15 @@ def get_devices(): return [] +@hooks.hook('mon-relation-joined') +def mon_relation_joined(): + for relid in relation_ids('mon'): + relation_set(relation_id=relid, + ceph_public_addr=get_public_addr()) + + @hooks.hook('mon-relation-departed', - 'mon-relation-joined') + 'mon-relation-changed') def mon_relation(): log('Begin mon-relation hook.') emit_cephconf() @@ -191,7 +200,8 @@ def notify_osds(): relation_set(relation_id=relid, fsid=config('fsid'), osd_bootstrap_key=ceph.get_osd_bootstrap_key(), - auth=config('auth-supported')) + auth=config('auth-supported'), + ceph_public_addr=get_public_addr()) log('End notify_osds.') @@ -202,7 +212,8 @@ def notify_radosgws(): for relid in relation_ids('radosgw'): relation_set(relation_id=relid, radosgw_key=ceph.get_radosgw_key(), - auth=config('auth-supported')) + auth=config('auth-supported'), + ceph_public_addr=get_public_addr()) log('End notify_radosgws.') @@ -216,7 +227,8 @@ def notify_client(): service_name = units[0].split('/')[0] relation_set(relation_id=relid, key=ceph.get_named_key(service_name), - auth=config('auth-supported')) + auth=config('auth-supported'), + ceph_public_addr=get_public_addr()) log('End notify_client.') @@ -242,7 +254,8 @@ def osd_relation(): log('mon cluster in quorum - providing fsid & keys') relation_set(fsid=config('fsid'), osd_bootstrap_key=ceph.get_osd_bootstrap_key(), - auth=config('auth-supported')) + auth=config('auth-supported'), + ceph_public_addr=get_public_addr()) else: log('mon cluster not in quorum - deferring fsid provision') @@ -258,7 +271,8 @@ def radosgw_relation(): if ceph.is_quorum(): log('mon cluster in quorum - providing radosgw with keys') relation_set(radosgw_key=ceph.get_radosgw_key(), - auth=config('auth-supported')) + auth=config('auth-supported'), + ceph_public_addr=get_public_addr()) else: log('mon cluster not in quorum - deferring key provision') @@ -273,7 +287,8 @@ def client_relation(): log('mon cluster in quorum - providing client with keys') service_name = remote_unit().split('/')[0] relation_set(key=ceph.get_named_key(service_name), - auth=config('auth-supported')) + auth=config('auth-supported'), + ceph_public_addr=get_public_addr()) else: log('mon cluster not in quorum - deferring key provision') diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index c1044a45..b2937f15 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -11,13 +11,16 @@ import re from charmhelpers.core.hookenv import ( unit_get, - cached + cached, + config ) from charmhelpers.fetch import ( apt_install, filter_installed_packages ) +from charmhelpers.contrib.network import ip + TEMPLATES_DIR = 'templates' try: @@ -72,3 +75,9 @@ def get_host_ip(hostname=None): answers = dns.resolver.query(hostname, 'A') if answers: return answers[0].address + + +@cached +def get_public_addr(): + return ip.get_address_in_network( + config('ceph-public-network'), fallback=get_host_ip()) diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index d82ca820..9626d9ad 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -9,11 +9,19 @@ keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} + log to syslog = {{ use_syslog }} err to syslog = {{ use_syslog }} clog to syslog = {{ use_syslog }} mon cluster log to syslog = {{ use_syslog }} +{%- if ceph_public_network is string %} + public network = {{ ceph_public_network }} +{%- endif %} +{%- if ceph_cluster_network is string %} + cluster network = {{ ceph_cluster_network }} +{%- endif %} + [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring From 7933bc94b65ca1b5c3a0c0585d52623c22fab2cd Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 6 Jun 2014 13:34:39 +0100 Subject: [PATCH 0346/2699] Add support for splitting public and cluster networks --- ceph-osd/charm-helpers-sync.yaml | 1 + ceph-osd/config.yaml | 10 ++ .../charmhelpers/contrib/network/__init__.py | 0 .../hooks/charmhelpers/contrib/network/ip.py | 69 +++++++++++ ceph-osd/hooks/charmhelpers/core/fstab.py | 114 ++++++++++++++++++ ceph-osd/hooks/charmhelpers/core/host.py | 26 +++- ceph-osd/hooks/charmhelpers/fetch/bzrurl.py | 3 +- ceph-osd/hooks/hooks.py | 12 +- ceph-osd/templates/ceph.conf | 8 ++ 9 files changed, 231 insertions(+), 12 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/network/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/network/ip.py create mode 100644 ceph-osd/hooks/charmhelpers/core/fstab.py diff --git a/ceph-osd/charm-helpers-sync.yaml b/ceph-osd/charm-helpers-sync.yaml index c8ee8f59..1d9081b7 100644 --- a/ceph-osd/charm-helpers-sync.yaml +++ b/ceph-osd/charm-helpers-sync.yaml @@ -6,3 +6,4 @@ include: - contrib.storage.linux: - utils - contrib.openstack.alternatives + - contrib.network.ip diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 43176274..767c92a3 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -83,3 +83,13 @@ options: default: False description: | If set to True, supporting services will log to syslog. + ceph-public-network: + type: string + description: | + The IP address and netmask of the public (front-side) network (e.g., + 192.168.0.0/24) + ceph-cluster-network: + type: string + description: | + The IP address and netmask of the cluster (back-side) network (e.g., + 192.168.0.0/24) diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/network/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py new file mode 100644 index 00000000..44c7c975 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -0,0 +1,69 @@ +import sys + +from charmhelpers.fetch import apt_install +from charmhelpers.core.hookenv import ( + ERROR, log, +) + +try: + import netifaces +except ImportError: + apt_install('python-netifaces') + import netifaces + +try: + import netaddr +except ImportError: + apt_install('python-netaddr') + import netaddr + + +def _validate_cidr(network): + try: + netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + + +def get_address_in_network(network, fallback=None, fatal=False): + """ + Get an IPv4 address within the network from the host. + + Args: + network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + fallback (str): If no address is found, return fallback. + fatal (boolean): If no address is found, fallback is not + set and fatal is True then exit(1). + """ + + def not_found_error_out(): + log("No IP address found in network: %s" % network, + level=ERROR) + sys.exit(1) + + if network is None: + if fallback is not None: + return fallback + else: + if fatal: + not_found_error_out() + + _validate_cidr(network) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + if cidr in netaddr.IPNetwork(network): + return str(cidr.ip) + + if fallback is not None: + return fallback + + if fatal: + not_found_error_out() + + return None diff --git a/ceph-osd/hooks/charmhelpers/core/fstab.py b/ceph-osd/hooks/charmhelpers/core/fstab.py new file mode 100644 index 00000000..cdd72616 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/fstab.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import os + + +class Fstab(file): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = d + self.p = p + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + file.__init__(self, self._path, 'r+') + + def _hydrate_entry(self, line): + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split(" "))) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + try: + if not line.startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write(str(entry) + '\n') + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = self.readlines() + + found = False + for index, line in enumerate(lines): + if not line.startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines)) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 186147f6..46bfd36a 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -17,6 +17,7 @@ from collections import OrderedDict from hookenv import log +from fstab import Fstab def service_start(service_name): @@ -35,7 +36,8 @@ def service_restart(service_name): def service_reload(service_name, restart_on_failure=False): - """Reload a system service, optionally falling back to restart if reload fails""" + """Reload a system service, optionally falling back to restart if + reload fails""" service_result = service('reload', service_name) if not service_result and restart_on_failure: service_result = service('restart', service_name) @@ -144,7 +146,19 @@ def write_file(path, content, owner='root', group='root', perms=0444): target.write(content) -def mount(device, mountpoint, options=None, persist=False): +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab + """ + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file + """ + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): """Mount a filesystem at a particular mountpoint""" cmd_args = ['mount'] if options is not None: @@ -155,9 +169,9 @@ def mount(device, mountpoint, options=None, persist=False): except subprocess.CalledProcessError, e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_add(device, mountpoint, filesystem, options=options) return True @@ -169,9 +183,9 @@ def umount(mountpoint, persist=False): except subprocess.CalledProcessError, e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_remove(mountpoint) return True diff --git a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py index db5dd9a3..0e580e47 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py @@ -39,7 +39,8 @@ def branch(self, source, dest): def install(self, source): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0755) try: diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 1340b993..a0d6cf42 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -72,7 +72,9 @@ def emit_cephconf(): 'fsid': get_fsid(), 'version': ceph.get_ceph_version(), 'osd_journal_size': config('osd-journal-size'), - 'use_syslog': str(config('use-syslog')).lower() + 'use_syslog': str(config('use-syslog')).lower(), + 'ceph_public_network': config('ceph-public-network'), + 'ceph_cluster_network': config('ceph-cluster-network'), } # Install ceph.conf as an alternative to support # co-existence with other charms that write this file @@ -121,10 +123,10 @@ def get_mon_hosts(): hosts = [] for relid in relation_ids('mon'): for unit in related_units(relid): - hosts.append( - '{}:6789'.format(get_host_ip(relation_get('private-address', - unit, relid))) - ) + addr = relation_get('ceph_public_addr', unit, relid) or \ + get_host_ip(relation_get('private-address', unit, relid)) + if addr is not None: + hosts.append('{}:6789'.format(addr)) hosts.sort() return hosts diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index cd81d440..bd7854a6 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -9,10 +9,18 @@ keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} + log to syslog = {{ use_syslog }} err to syslog = {{ use_syslog }} clog to syslog = {{ use_syslog }} +{%- if ceph_public_network is string %} + public network = {{ ceph_public_network }} +{%- endif %} +{%- if ceph_cluster_network is string %} + cluster network = {{ ceph_cluster_network }} +{%- endif %} + [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring From 67c5066412932eb09bd8956aeb58c66d7709152b Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 6 Jun 2014 14:15:47 +0000 Subject: [PATCH 0347/2699] Add missing changed hook symlink --- ceph-proxy/hooks/mon-relation-changed | 1 + 1 file changed, 1 insertion(+) create mode 120000 ceph-proxy/hooks/mon-relation-changed diff --git a/ceph-proxy/hooks/mon-relation-changed b/ceph-proxy/hooks/mon-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-proxy/hooks/mon-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file From e45332c545c91b454d20a4748648d9ff1e0a1c18 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 6 Jun 2014 14:15:47 +0000 Subject: [PATCH 0348/2699] Add missing changed hook symlink --- ceph-mon/hooks/mon-relation-changed | 1 + 1 file changed, 1 insertion(+) create mode 120000 ceph-mon/hooks/mon-relation-changed diff --git a/ceph-mon/hooks/mon-relation-changed b/ceph-mon/hooks/mon-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-mon/hooks/mon-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file From 75e40844d7860ba32f8a02d447aefe680c4bb3ac Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Jun 2014 15:19:21 +0100 Subject: [PATCH 0349/2699] Rework to use ceph-public-address, drop logging --- ceph-proxy/hooks/hooks.py | 104 ++++++++++++++------------------------ ceph-proxy/hooks/utils.py | 4 +- 2 files changed, 40 insertions(+), 68 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index a4df35c6..cc049cb0 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -58,13 +58,11 @@ def install_upstart_scripts(): @hooks.hook('install') def install(): - log('Begin install hook.') execd_preinstall() add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() - log('End install hook.') def emit_cephconf(): @@ -92,8 +90,6 @@ def emit_cephconf(): @hooks.hook('config-changed') def config_changed(): - log('Begin config-changed hook.') - log('Monitor hosts are ' + repr(get_mon_hosts())) # Pre-flight checks @@ -131,8 +127,6 @@ def config_changed(): reformat_osd()) ceph.start_osds(get_devices()) - log('End config-changed hook.') - def get_mon_hosts(): hosts = [] @@ -140,7 +134,7 @@ def get_mon_hosts(): for relid in relation_ids('mon'): for unit in related_units(relid): - addr = relation_get('ceph_public_addr', unit, relid) or \ + addr = relation_get('ceph-public-address', unit, relid) or \ get_host_ip(relation_get('private-address', unit, relid)) hosts.append('{}:6789'.format(addr)) @@ -172,7 +166,6 @@ def mon_relation_joined(): @hooks.hook('mon-relation-departed', 'mon-relation-changed') def mon_relation(): - log('Begin mon-relation hook.') emit_cephconf() moncount = int(config('monitor-count')) @@ -190,47 +183,20 @@ def mon_relation(): log('Not enough mons ({}), punting.' .format(len(get_mon_hosts()))) - log('End mon-relation hook.') - def notify_osds(): - log('Begin notify_osds.') - for relid in relation_ids('osd'): - relation_set(relation_id=relid, - fsid=config('fsid'), - osd_bootstrap_key=ceph.get_osd_bootstrap_key(), - auth=config('auth-supported'), - ceph_public_addr=get_public_addr()) - - log('End notify_osds.') + osd_relation(relid) def notify_radosgws(): - log('Begin notify_radosgws.') - for relid in relation_ids('radosgw'): - relation_set(relation_id=relid, - radosgw_key=ceph.get_radosgw_key(), - auth=config('auth-supported'), - ceph_public_addr=get_public_addr()) - - log('End notify_radosgws.') + radosgw_relation(relid) def notify_client(): - log('Begin notify_client.') - for relid in relation_ids('client'): - units = related_units(relid) - if len(units) > 0: - service_name = units[0].split('/')[0] - relation_set(relation_id=relid, - key=ceph.get_named_key(service_name), - auth=config('auth-supported'), - ceph_public_addr=get_public_addr()) - - log('End notify_client.') + client_relation(relid) def upgrade_keys(): @@ -247,63 +213,69 @@ def upgrade_keys(): @hooks.hook('osd-relation-joined') -def osd_relation(): - log('Begin osd-relation hook.') - +def osd_relation(relid=None): if ceph.is_quorum(): log('mon cluster in quorum - providing fsid & keys') - relation_set(fsid=config('fsid'), - osd_bootstrap_key=ceph.get_osd_bootstrap_key(), - auth=config('auth-supported'), - ceph_public_addr=get_public_addr()) + data = { + 'fsid': config('fsid'), + 'osd_bootstrap_key': ceph.get_osd_bootstrap_key(), + 'auth': config('auth-supported'), + 'ceph-public-address': get_public_addr(), + } + relation_set(relation_id=relid, + relation_settings=data) else: log('mon cluster not in quorum - deferring fsid provision') - log('End osd-relation hook.') - @hooks.hook('radosgw-relation-joined') -def radosgw_relation(): - log('Begin radosgw-relation hook.') - +def radosgw_relation(relid=None): # Install radosgw for admin tools apt_install(packages=filter_installed_packages(['radosgw'])) if ceph.is_quorum(): log('mon cluster in quorum - providing radosgw with keys') - relation_set(radosgw_key=ceph.get_radosgw_key(), - auth=config('auth-supported'), - ceph_public_addr=get_public_addr()) + data = { + 'fsid': config('fsid'), + 'radosgw_key': ceph.get_radosgw_key(), + 'auth': config('auth-supported'), + 'ceph-public-address': get_public_addr(), + } + relation_set(relation_id=relid, + relation_settings=data) else: log('mon cluster not in quorum - deferring key provision') - log('End radosgw-relation hook.') - @hooks.hook('client-relation-joined') -def client_relation(): - log('Begin client-relation hook.') - +def client_relation(relid=None): if ceph.is_quorum(): log('mon cluster in quorum - providing client with keys') - service_name = remote_unit().split('/')[0] - relation_set(key=ceph.get_named_key(service_name), - auth=config('auth-supported'), - ceph_public_addr=get_public_addr()) + service_name = None + if relid is None: + service_name = remote_unit().split('/')[0] + else: + units = related_units(relid) + if len(units) > 0: + service_name = units[0].split('/')[0] + if service_name is not None: + data = { + 'key': ceph.get_named_key(service_name), + 'auth': config('auth-supported'), + 'ceph-public-address': get_public_addr(), + } + relation_set(relation_id=relid, + relation_settings=data) else: log('mon cluster not in quorum - deferring key provision') - log('End client-relation hook.') - @hooks.hook('upgrade-charm') def upgrade_charm(): - log('Begin upgrade-charm hook.') emit_cephconf() apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) install_upstart_scripts() ceph.update_monfs() upgrade_keys() - log('End upgrade-charm hook.') @hooks.hook('start') diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index b2937f15..21fd1a5f 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -79,5 +79,5 @@ def get_host_ip(hostname=None): @cached def get_public_addr(): - return ip.get_address_in_network( - config('ceph-public-network'), fallback=get_host_ip()) + return ip.get_address_in_network(config('ceph-public-network'), + fallback=get_host_ip()) From 3fec5a6ba4f078627e5caab6e75774d4a0f13bc7 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Jun 2014 15:19:21 +0100 Subject: [PATCH 0350/2699] Rework to use ceph-public-address, drop logging --- ceph-mon/hooks/hooks.py | 104 +++++++++++++++------------------------- ceph-mon/hooks/utils.py | 4 +- 2 files changed, 40 insertions(+), 68 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index a4df35c6..cc049cb0 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -58,13 +58,11 @@ def install_upstart_scripts(): @hooks.hook('install') def install(): - log('Begin install hook.') execd_preinstall() add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() - log('End install hook.') def emit_cephconf(): @@ -92,8 +90,6 @@ def emit_cephconf(): @hooks.hook('config-changed') def config_changed(): - log('Begin config-changed hook.') - log('Monitor hosts are ' + repr(get_mon_hosts())) # Pre-flight checks @@ -131,8 +127,6 @@ def config_changed(): reformat_osd()) ceph.start_osds(get_devices()) - log('End config-changed hook.') - def get_mon_hosts(): hosts = [] @@ -140,7 +134,7 @@ def get_mon_hosts(): for relid in relation_ids('mon'): for unit in related_units(relid): - addr = relation_get('ceph_public_addr', unit, relid) or \ + addr = relation_get('ceph-public-address', unit, relid) or \ get_host_ip(relation_get('private-address', unit, relid)) hosts.append('{}:6789'.format(addr)) @@ -172,7 +166,6 @@ def mon_relation_joined(): @hooks.hook('mon-relation-departed', 'mon-relation-changed') def mon_relation(): - log('Begin mon-relation hook.') emit_cephconf() moncount = int(config('monitor-count')) @@ -190,47 +183,20 @@ def mon_relation(): log('Not enough mons ({}), punting.' .format(len(get_mon_hosts()))) - log('End mon-relation hook.') - def notify_osds(): - log('Begin notify_osds.') - for relid in relation_ids('osd'): - relation_set(relation_id=relid, - fsid=config('fsid'), - osd_bootstrap_key=ceph.get_osd_bootstrap_key(), - auth=config('auth-supported'), - ceph_public_addr=get_public_addr()) - - log('End notify_osds.') + osd_relation(relid) def notify_radosgws(): - log('Begin notify_radosgws.') - for relid in relation_ids('radosgw'): - relation_set(relation_id=relid, - radosgw_key=ceph.get_radosgw_key(), - auth=config('auth-supported'), - ceph_public_addr=get_public_addr()) - - log('End notify_radosgws.') + radosgw_relation(relid) def notify_client(): - log('Begin notify_client.') - for relid in relation_ids('client'): - units = related_units(relid) - if len(units) > 0: - service_name = units[0].split('/')[0] - relation_set(relation_id=relid, - key=ceph.get_named_key(service_name), - auth=config('auth-supported'), - ceph_public_addr=get_public_addr()) - - log('End notify_client.') + client_relation(relid) def upgrade_keys(): @@ -247,63 +213,69 @@ def upgrade_keys(): @hooks.hook('osd-relation-joined') -def osd_relation(): - log('Begin osd-relation hook.') - +def osd_relation(relid=None): if ceph.is_quorum(): log('mon cluster in quorum - providing fsid & keys') - relation_set(fsid=config('fsid'), - osd_bootstrap_key=ceph.get_osd_bootstrap_key(), - auth=config('auth-supported'), - ceph_public_addr=get_public_addr()) + data = { + 'fsid': config('fsid'), + 'osd_bootstrap_key': ceph.get_osd_bootstrap_key(), + 'auth': config('auth-supported'), + 'ceph-public-address': get_public_addr(), + } + relation_set(relation_id=relid, + relation_settings=data) else: log('mon cluster not in quorum - deferring fsid provision') - log('End osd-relation hook.') - @hooks.hook('radosgw-relation-joined') -def radosgw_relation(): - log('Begin radosgw-relation hook.') - +def radosgw_relation(relid=None): # Install radosgw for admin tools apt_install(packages=filter_installed_packages(['radosgw'])) if ceph.is_quorum(): log('mon cluster in quorum - providing radosgw with keys') - relation_set(radosgw_key=ceph.get_radosgw_key(), - auth=config('auth-supported'), - ceph_public_addr=get_public_addr()) + data = { + 'fsid': config('fsid'), + 'radosgw_key': ceph.get_radosgw_key(), + 'auth': config('auth-supported'), + 'ceph-public-address': get_public_addr(), + } + relation_set(relation_id=relid, + relation_settings=data) else: log('mon cluster not in quorum - deferring key provision') - log('End radosgw-relation hook.') - @hooks.hook('client-relation-joined') -def client_relation(): - log('Begin client-relation hook.') - +def client_relation(relid=None): if ceph.is_quorum(): log('mon cluster in quorum - providing client with keys') - service_name = remote_unit().split('/')[0] - relation_set(key=ceph.get_named_key(service_name), - auth=config('auth-supported'), - ceph_public_addr=get_public_addr()) + service_name = None + if relid is None: + service_name = remote_unit().split('/')[0] + else: + units = related_units(relid) + if len(units) > 0: + service_name = units[0].split('/')[0] + if service_name is not None: + data = { + 'key': ceph.get_named_key(service_name), + 'auth': config('auth-supported'), + 'ceph-public-address': get_public_addr(), + } + relation_set(relation_id=relid, + relation_settings=data) else: log('mon cluster not in quorum - deferring key provision') - log('End client-relation hook.') - @hooks.hook('upgrade-charm') def upgrade_charm(): - log('Begin upgrade-charm hook.') emit_cephconf() apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) install_upstart_scripts() ceph.update_monfs() upgrade_keys() - log('End upgrade-charm hook.') @hooks.hook('start') diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index b2937f15..21fd1a5f 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -79,5 +79,5 @@ def get_host_ip(hostname=None): @cached def get_public_addr(): - return ip.get_address_in_network( - config('ceph-public-network'), fallback=get_host_ip()) + return ip.get_address_in_network(config('ceph-public-network'), + fallback=get_host_ip()) From bce6dcd6f6f555db04d3b3cab7d029250fa69374 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Jun 2014 15:21:02 +0100 Subject: [PATCH 0351/2699] Rework for ceph-public-address, drop logging --- ceph-osd/hooks/hooks.py | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index a0d6cf42..a7f3f371 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -54,12 +54,10 @@ def install_upstart_scripts(): @hooks.hook('install') def install(): - log('Begin install hook.') add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() - log('End install hook.') def emit_cephconf(): @@ -90,8 +88,6 @@ def emit_cephconf(): @hooks.hook('config-changed') def config_changed(): - log('Begin config-changed hook.') - # Pre-flight checks if config('osd-format') not in ceph.DISK_FORMATS: log('Invalid OSD disk format configuration specified', level=ERROR) @@ -116,14 +112,12 @@ def config_changed(): config('osd-journal'), config('osd-reformat')) ceph.start_osds(get_devices()) - log('End config-changed hook.') - def get_mon_hosts(): hosts = [] for relid in relation_ids('mon'): for unit in related_units(relid): - addr = relation_get('ceph_public_addr', unit, relid) or \ + addr = relation_get('ceph-public-address', unit, relid) or \ get_host_ip(relation_get('private-address', unit, relid)) if addr is not None: hosts.append('{}:6789'.format(addr)) @@ -167,8 +161,6 @@ def get_devices(): @hooks.hook('mon-relation-changed', 'mon-relation-departed') def mon_relation(): - log('Begin mon-relation hook.') - bootstrap_key = relation_get('osd_bootstrap_key') if get_fsid() and get_auth() and bootstrap_key: log('mon has provided conf- scanning disks') @@ -181,18 +173,14 @@ def mon_relation(): else: log('mon cluster has not yet provided conf') - log('End mon-relation hook.') - @hooks.hook('upgrade-charm') def upgrade_charm(): - log('Begin upgrade-charm hook.') if get_fsid() and get_auth(): emit_cephconf() install_upstart_scripts() apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) - log('End upgrade-charm hook.') if __name__ == '__main__': From d2e6c367051952364bdd3850d6ca02cb48b154ca Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 25 Jun 2014 11:56:36 +0100 Subject: [PATCH 0352/2699] Always use ceph-public-address for quorum --- ceph-proxy/hooks/hooks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index cc049cb0..fa68ff78 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -134,9 +134,9 @@ def get_mon_hosts(): for relid in relation_ids('mon'): for unit in related_units(relid): - addr = relation_get('ceph-public-address', unit, relid) or \ - get_host_ip(relation_get('private-address', unit, relid)) - hosts.append('{}:6789'.format(addr)) + addr = relation_get('ceph-public-address', unit, relid) + if addr is not None: + hosts.append('{}:6789'.format(addr)) hosts.sort() return hosts From 469401490e955547a35629c6c753aeefe7a8556b Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 25 Jun 2014 11:56:36 +0100 Subject: [PATCH 0353/2699] Always use ceph-public-address for quorum --- ceph-mon/hooks/hooks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index cc049cb0..fa68ff78 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -134,9 +134,9 @@ def get_mon_hosts(): for relid in relation_ids('mon'): for unit in related_units(relid): - addr = relation_get('ceph-public-address', unit, relid) or \ - get_host_ip(relation_get('private-address', unit, relid)) - hosts.append('{}:6789'.format(addr)) + addr = relation_get('ceph-public-address', unit, relid) + if addr is not None: + hosts.append('{}:6789'.format(addr)) hosts.sort() return hosts From 2443e3a9fe17b866281589e0f9573c4d279b29f1 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 25 Jun 2014 12:04:57 +0100 Subject: [PATCH 0354/2699] Set correct key on mon-relation-joined, trigger on upgrade for backwards compat --- ceph-proxy/hooks/hooks.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index fa68ff78..9041dba6 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -160,7 +160,9 @@ def get_devices(): def mon_relation_joined(): for relid in relation_ids('mon'): relation_set(relation_id=relid, - ceph_public_addr=get_public_addr()) + relation_settings={ + 'ceph-public-address': get_public_addr() + }) @hooks.hook('mon-relation-departed', @@ -276,6 +278,7 @@ def upgrade_charm(): install_upstart_scripts() ceph.update_monfs() upgrade_keys() + mon_relation_joined() @hooks.hook('start') From 0955438ee618270f234e763d4d1a41221071eaaf Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 25 Jun 2014 12:04:57 +0100 Subject: [PATCH 0355/2699] Set correct key on mon-relation-joined, trigger on upgrade for backwards compat --- ceph-mon/hooks/hooks.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index fa68ff78..9041dba6 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -160,7 +160,9 @@ def get_devices(): def mon_relation_joined(): for relid in relation_ids('mon'): relation_set(relation_id=relid, - ceph_public_addr=get_public_addr()) + relation_settings={ + 'ceph-public-address': get_public_addr() + }) @hooks.hook('mon-relation-departed', @@ -276,6 +278,7 @@ def upgrade_charm(): install_upstart_scripts() ceph.update_monfs() upgrade_keys() + mon_relation_joined() @hooks.hook('start') From 10b5d5aadb3b8c79b566a573ac73c41bb64a432b Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 25 Jun 2014 12:06:09 +0100 Subject: [PATCH 0356/2699] Tidy lint --- ceph-proxy/hooks/hooks.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 9041dba6..26ca452d 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -42,7 +42,6 @@ from utils import ( render_template, - get_host_ip, get_public_addr, ) @@ -160,9 +159,8 @@ def get_devices(): def mon_relation_joined(): for relid in relation_ids('mon'): relation_set(relation_id=relid, - relation_settings={ - 'ceph-public-address': get_public_addr() - }) + relation_settings={'ceph-public-address': + get_public_addr()}) @hooks.hook('mon-relation-departed', From e9aede28d0548235040d691b2d523bab1968cc89 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 25 Jun 2014 12:06:09 +0100 Subject: [PATCH 0357/2699] Tidy lint --- ceph-mon/hooks/hooks.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 9041dba6..26ca452d 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -42,7 +42,6 @@ from utils import ( render_template, - get_host_ip, get_public_addr, ) @@ -160,9 +159,8 @@ def get_devices(): def mon_relation_joined(): for relid in relation_ids('mon'): relation_set(relation_id=relid, - relation_settings={ - 'ceph-public-address': get_public_addr() - }) + relation_settings={'ceph-public-address': + get_public_addr()}) @hooks.hook('mon-relation-departed', From 3629548dffbdc7d623f15ce7ed45981493bbef2c Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 27 Jun 2014 14:48:27 +0100 Subject: [PATCH 0358/2699] Resync helpers --- .../hooks/charmhelpers/contrib/network/ip.py | 12 +++--- .../contrib/storage/linux/utils.py | 1 + ceph-proxy/hooks/charmhelpers/core/fstab.py | 4 +- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 9 +++-- ceph-proxy/hooks/charmhelpers/core/host.py | 14 ++++--- .../hooks/charmhelpers/fetch/__init__.py | 40 +++++++++++-------- 6 files changed, 47 insertions(+), 33 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 44c7c975..15a6731c 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -30,12 +30,12 @@ def get_address_in_network(network, fallback=None, fatal=False): """ Get an IPv4 address within the network from the host. - Args: - network (str): CIDR presentation format. For example, - '192.168.1.0/24'. - fallback (str): If no address is found, return fallback. - fatal (boolean): If no address is found, fallback is not - set and fatal is True then exit(1). + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + :param fallback (str): If no address is found, return fallback. + :param fatal (boolean): If no address is found, fallback is not + set and fatal is True then exit(1). + """ def not_found_error_out(): diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index b87ef26d..8d0f6116 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -37,6 +37,7 @@ def zap_disk(block_device): check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + def is_device_mounted(device): '''Given a device path, return True if that device is mounted, and False if it isn't. diff --git a/ceph-proxy/hooks/charmhelpers/core/fstab.py b/ceph-proxy/hooks/charmhelpers/core/fstab.py index cdd72616..cfaf0a65 100644 --- a/ceph-proxy/hooks/charmhelpers/core/fstab.py +++ b/ceph-proxy/hooks/charmhelpers/core/fstab.py @@ -48,9 +48,11 @@ def __init__(self, path=None): file.__init__(self, self._path, 'r+') def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs return Fstab.Entry(*filter( lambda x: x not in ('', None), - line.strip("\n").split(" "))) + line.strip("\n").split())) @property def entries(self): diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index c2e66f66..c9530433 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -25,7 +25,7 @@ def cached(func): """Cache return values for multiple executions of func + args - For example: + For example:: @cached def unit_get(attribute): @@ -445,18 +445,19 @@ class UnregisteredHookError(Exception): class Hooks(object): """A convenient handler for hook functions. - Example: + Example:: + hooks = Hooks() # register a hook, taking its name from the function name @hooks.hook() def install(): - ... + pass # your code here # register a hook, providing a custom hook name @hooks.hook("config-changed") def config_changed(): - ... + pass # your code here if __name__ == "__main__": # execute a hook based on the name the program is called by diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 46bfd36a..8b617a42 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -12,7 +12,6 @@ import string import subprocess import hashlib -import apt_pkg from collections import OrderedDict @@ -212,13 +211,13 @@ def file_hash(path): def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing - This function is used a decorator, for example + This function is used a decorator, for example:: @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] }) def ceph_client_changed(): - ... + pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the @@ -314,10 +313,13 @@ def get_nic_hwaddr(nic): def cmp_pkgrevno(package, revno, pkgcache=None): '''Compare supplied revno with the revno of the installed package - 1 => Installed revno is greater than supplied arg - 0 => Installed revno is the same as supplied arg - -1 => Installed revno is less than supplied arg + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + ''' + import apt_pkg if not pkgcache: apt_pkg.init() pkgcache = apt_pkg.Cache() diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index e8e837a5..5be512ce 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -13,7 +13,6 @@ config, log, ) -import apt_pkg import os @@ -117,6 +116,7 @@ def base_url(self, url): def filter_installed_packages(packages): """Returns a list of packages that require installation""" + import apt_pkg apt_pkg.init() # Tell apt to build an in-memory cache to prevent race conditions (if @@ -235,31 +235,39 @@ def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): """ - Configure multiple sources from charm configuration + Configure multiple sources from charm configuration. + + The lists are encoded as yaml fragments in the configuration. + The frament needs to be included as a string. Example config: - install_sources: + install_sources: | - "ppa:foo" - "http://example.com/repo precise main" - install_keys: + install_keys: | - null - "a1b2c3d4" Note that 'null' (a.k.a. None) should not be quoted. """ - sources = safe_load(config(sources_var)) - keys = config(keys_var) - if keys is not None: - keys = safe_load(keys) - if isinstance(sources, basestring) and ( - keys is None or isinstance(keys, basestring)): - add_source(sources, keys) + sources = safe_load((config(sources_var) or '').strip()) or [] + keys = safe_load((config(keys_var) or '').strip()) or None + + if isinstance(sources, basestring): + sources = [sources] + + if keys is None: + for source in sources: + add_source(source, None) else: - if not len(sources) == len(keys): - msg = 'Install sources and keys lists are different lengths' - raise SourceConfigError(msg) - for src_num in range(len(sources)): - add_source(sources[src_num], keys[src_num]) + if isinstance(keys, basestring): + keys = [keys] + + if len(sources) != len(keys): + raise SourceConfigError( + 'Install sources and keys lists are different lengths') + for source, key in zip(sources, keys): + add_source(source, key) if update: apt_update(fatal=True) From e1e136d7b81053c1cd2992df930b85bec93bac8c Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 27 Jun 2014 14:48:27 +0100 Subject: [PATCH 0359/2699] Resync helpers --- .../hooks/charmhelpers/contrib/network/ip.py | 12 +++--- .../contrib/storage/linux/utils.py | 1 + ceph-mon/hooks/charmhelpers/core/fstab.py | 4 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 9 +++-- ceph-mon/hooks/charmhelpers/core/host.py | 14 ++++--- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 40 +++++++++++-------- 6 files changed, 47 insertions(+), 33 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 44c7c975..15a6731c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -30,12 +30,12 @@ def get_address_in_network(network, fallback=None, fatal=False): """ Get an IPv4 address within the network from the host. - Args: - network (str): CIDR presentation format. For example, - '192.168.1.0/24'. - fallback (str): If no address is found, return fallback. - fatal (boolean): If no address is found, fallback is not - set and fatal is True then exit(1). + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + :param fallback (str): If no address is found, return fallback. + :param fatal (boolean): If no address is found, fallback is not + set and fatal is True then exit(1). + """ def not_found_error_out(): diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index b87ef26d..8d0f6116 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -37,6 +37,7 @@ def zap_disk(block_device): check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + def is_device_mounted(device): '''Given a device path, return True if that device is mounted, and False if it isn't. diff --git a/ceph-mon/hooks/charmhelpers/core/fstab.py b/ceph-mon/hooks/charmhelpers/core/fstab.py index cdd72616..cfaf0a65 100644 --- a/ceph-mon/hooks/charmhelpers/core/fstab.py +++ b/ceph-mon/hooks/charmhelpers/core/fstab.py @@ -48,9 +48,11 @@ def __init__(self, path=None): file.__init__(self, self._path, 'r+') def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs return Fstab.Entry(*filter( lambda x: x not in ('', None), - line.strip("\n").split(" "))) + line.strip("\n").split())) @property def entries(self): diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index c2e66f66..c9530433 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -25,7 +25,7 @@ def cached(func): """Cache return values for multiple executions of func + args - For example: + For example:: @cached def unit_get(attribute): @@ -445,18 +445,19 @@ class UnregisteredHookError(Exception): class Hooks(object): """A convenient handler for hook functions. - Example: + Example:: + hooks = Hooks() # register a hook, taking its name from the function name @hooks.hook() def install(): - ... + pass # your code here # register a hook, providing a custom hook name @hooks.hook("config-changed") def config_changed(): - ... + pass # your code here if __name__ == "__main__": # execute a hook based on the name the program is called by diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 46bfd36a..8b617a42 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -12,7 +12,6 @@ import string import subprocess import hashlib -import apt_pkg from collections import OrderedDict @@ -212,13 +211,13 @@ def file_hash(path): def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing - This function is used a decorator, for example + This function is used a decorator, for example:: @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] }) def ceph_client_changed(): - ... + pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the @@ -314,10 +313,13 @@ def get_nic_hwaddr(nic): def cmp_pkgrevno(package, revno, pkgcache=None): '''Compare supplied revno with the revno of the installed package - 1 => Installed revno is greater than supplied arg - 0 => Installed revno is the same as supplied arg - -1 => Installed revno is less than supplied arg + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + ''' + import apt_pkg if not pkgcache: apt_pkg.init() pkgcache = apt_pkg.Cache() diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index e8e837a5..5be512ce 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -13,7 +13,6 @@ config, log, ) -import apt_pkg import os @@ -117,6 +116,7 @@ def base_url(self, url): def filter_installed_packages(packages): """Returns a list of packages that require installation""" + import apt_pkg apt_pkg.init() # Tell apt to build an in-memory cache to prevent race conditions (if @@ -235,31 +235,39 @@ def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): """ - Configure multiple sources from charm configuration + Configure multiple sources from charm configuration. + + The lists are encoded as yaml fragments in the configuration. + The frament needs to be included as a string. Example config: - install_sources: + install_sources: | - "ppa:foo" - "http://example.com/repo precise main" - install_keys: + install_keys: | - null - "a1b2c3d4" Note that 'null' (a.k.a. None) should not be quoted. """ - sources = safe_load(config(sources_var)) - keys = config(keys_var) - if keys is not None: - keys = safe_load(keys) - if isinstance(sources, basestring) and ( - keys is None or isinstance(keys, basestring)): - add_source(sources, keys) + sources = safe_load((config(sources_var) or '').strip()) or [] + keys = safe_load((config(keys_var) or '').strip()) or None + + if isinstance(sources, basestring): + sources = [sources] + + if keys is None: + for source in sources: + add_source(source, None) else: - if not len(sources) == len(keys): - msg = 'Install sources and keys lists are different lengths' - raise SourceConfigError(msg) - for src_num in range(len(sources)): - add_source(sources[src_num], keys[src_num]) + if isinstance(keys, basestring): + keys = [keys] + + if len(sources) != len(keys): + raise SourceConfigError( + 'Install sources and keys lists are different lengths') + for source, key in zip(sources, keys): + add_source(source, key) if update: apt_update(fatal=True) From 08927b1d1b110425137620efbdea58d03bfced77 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 27 Jun 2014 14:48:50 +0100 Subject: [PATCH 0360/2699] resync helpers --- .../hooks/charmhelpers/contrib/network/ip.py | 12 +++--- .../contrib/storage/linux/utils.py | 1 + ceph-osd/hooks/charmhelpers/core/fstab.py | 4 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 9 +++-- ceph-osd/hooks/charmhelpers/core/host.py | 14 ++++--- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 40 +++++++++++-------- 6 files changed, 47 insertions(+), 33 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 44c7c975..15a6731c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -30,12 +30,12 @@ def get_address_in_network(network, fallback=None, fatal=False): """ Get an IPv4 address within the network from the host. - Args: - network (str): CIDR presentation format. For example, - '192.168.1.0/24'. - fallback (str): If no address is found, return fallback. - fatal (boolean): If no address is found, fallback is not - set and fatal is True then exit(1). + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + :param fallback (str): If no address is found, return fallback. + :param fatal (boolean): If no address is found, fallback is not + set and fatal is True then exit(1). + """ def not_found_error_out(): diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index b87ef26d..8d0f6116 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -37,6 +37,7 @@ def zap_disk(block_device): check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + def is_device_mounted(device): '''Given a device path, return True if that device is mounted, and False if it isn't. diff --git a/ceph-osd/hooks/charmhelpers/core/fstab.py b/ceph-osd/hooks/charmhelpers/core/fstab.py index cdd72616..cfaf0a65 100644 --- a/ceph-osd/hooks/charmhelpers/core/fstab.py +++ b/ceph-osd/hooks/charmhelpers/core/fstab.py @@ -48,9 +48,11 @@ def __init__(self, path=None): file.__init__(self, self._path, 'r+') def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs return Fstab.Entry(*filter( lambda x: x not in ('', None), - line.strip("\n").split(" "))) + line.strip("\n").split())) @property def entries(self): diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index c2e66f66..c9530433 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -25,7 +25,7 @@ def cached(func): """Cache return values for multiple executions of func + args - For example: + For example:: @cached def unit_get(attribute): @@ -445,18 +445,19 @@ class UnregisteredHookError(Exception): class Hooks(object): """A convenient handler for hook functions. - Example: + Example:: + hooks = Hooks() # register a hook, taking its name from the function name @hooks.hook() def install(): - ... + pass # your code here # register a hook, providing a custom hook name @hooks.hook("config-changed") def config_changed(): - ... + pass # your code here if __name__ == "__main__": # execute a hook based on the name the program is called by diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 46bfd36a..8b617a42 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -12,7 +12,6 @@ import string import subprocess import hashlib -import apt_pkg from collections import OrderedDict @@ -212,13 +211,13 @@ def file_hash(path): def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing - This function is used a decorator, for example + This function is used a decorator, for example:: @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] }) def ceph_client_changed(): - ... + pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the @@ -314,10 +313,13 @@ def get_nic_hwaddr(nic): def cmp_pkgrevno(package, revno, pkgcache=None): '''Compare supplied revno with the revno of the installed package - 1 => Installed revno is greater than supplied arg - 0 => Installed revno is the same as supplied arg - -1 => Installed revno is less than supplied arg + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + ''' + import apt_pkg if not pkgcache: apt_pkg.init() pkgcache = apt_pkg.Cache() diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index e8e837a5..5be512ce 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -13,7 +13,6 @@ config, log, ) -import apt_pkg import os @@ -117,6 +116,7 @@ def base_url(self, url): def filter_installed_packages(packages): """Returns a list of packages that require installation""" + import apt_pkg apt_pkg.init() # Tell apt to build an in-memory cache to prevent race conditions (if @@ -235,31 +235,39 @@ def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): """ - Configure multiple sources from charm configuration + Configure multiple sources from charm configuration. + + The lists are encoded as yaml fragments in the configuration. + The frament needs to be included as a string. Example config: - install_sources: + install_sources: | - "ppa:foo" - "http://example.com/repo precise main" - install_keys: + install_keys: | - null - "a1b2c3d4" Note that 'null' (a.k.a. None) should not be quoted. """ - sources = safe_load(config(sources_var)) - keys = config(keys_var) - if keys is not None: - keys = safe_load(keys) - if isinstance(sources, basestring) and ( - keys is None or isinstance(keys, basestring)): - add_source(sources, keys) + sources = safe_load((config(sources_var) or '').strip()) or [] + keys = safe_load((config(keys_var) or '').strip()) or None + + if isinstance(sources, basestring): + sources = [sources] + + if keys is None: + for source in sources: + add_source(source, None) else: - if not len(sources) == len(keys): - msg = 'Install sources and keys lists are different lengths' - raise SourceConfigError(msg) - for src_num in range(len(sources)): - add_source(sources[src_num], keys[src_num]) + if isinstance(keys, basestring): + keys = [keys] + + if len(sources) != len(keys): + raise SourceConfigError( + 'Install sources and keys lists are different lengths') + for source, key in zip(sources, keys): + add_source(source, key) if update: apt_update(fatal=True) From 51c66ed92407ff94a08918a6d477bb6540b68c15 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 4 Jul 2014 12:23:17 +0100 Subject: [PATCH 0361/2699] Resync helpers for IPv6 support --- ceph-proxy/charm-helpers-sync.yaml | 2 +- .../hooks/charmhelpers/contrib/network/ip.py | 109 +++++++++++++++++- 2 files changed, 107 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml index afb9e42b..f282af34 100644 --- a/ceph-proxy/charm-helpers-sync.yaml +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~james-page/charm-helpers/network-splits destination: hooks/charmhelpers include: - core diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 15a6731c..e0f9eb66 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -28,7 +28,7 @@ def _validate_cidr(network): def get_address_in_network(network, fallback=None, fatal=False): """ - Get an IPv4 address within the network from the host. + Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, '192.168.1.0/24'. @@ -51,14 +51,23 @@ def not_found_error_out(): not_found_error_out() _validate_cidr(network) + network = netaddr.IPNetwork(network) for iface in netifaces.interfaces(): addresses = netifaces.ifaddresses(iface) - if netifaces.AF_INET in addresses: + if network.version == 4 and netifaces.AF_INET in addresses: addr = addresses[netifaces.AF_INET][0]['addr'] netmask = addresses[netifaces.AF_INET][0]['netmask'] cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - if cidr in netaddr.IPNetwork(network): + if cidr in network: return str(cidr.ip) + if network.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if 'fe80' not in addr['addr']: + netmask = addr['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + netmask)) + if cidr in network: + return str(cidr.ip) if fallback is not None: return fallback @@ -67,3 +76,97 @@ def not_found_error_out(): not_found_error_out() return None + + +def is_ipv6(address): + '''Determine whether provided address is IPv6 or not''' + try: + address = netaddr.IPAddress(address) + except netaddr.AddrFormatError: + # probably a hostname - so not an address at all! + return False + else: + return address.version == 6 + + +def is_address_in_network(network, address): + """ + Determine whether the provided address is within a network range. + + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + :param address: An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :returns boolean: Flag indicating whether address is in network. + """ + try: + network = netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + try: + address = netaddr.IPAddress(address) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Address (%s) is not in correct presentation format" % + address) + if address in network: + return True + else: + return False + + +def _get_for_address(address, key): + """Retrieve an attribute of or the physical interface that + the IP address provided could be bound to. + + :param address (str): An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :param key: 'iface' for the physical interface name or an attribute + of the configured interface, for example 'netmask'. + :returns str: Requested attribute or None if address is not bindable. + """ + address = netaddr.IPAddress(address) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if address.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + if address in cidr: + if key == 'iface': + return iface + else: + return addresses[netifaces.AF_INET][0][key] + if address.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if 'fe80' not in addr['addr']: + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if address in cidr: + if key == 'iface': + return iface + else: + return addr[key] + return None + + +def get_iface_for_address(address): + """Determine the physical interface to which an IP address could be bound + + :param address (str): An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :returns str: Interface name or None if address is not bindable. + """ + return _get_for_address(address, 'iface') + + +def get_netmask_for_address(address): + """Determine the netmask of the physical interface to which and IP address + could be bound + + :param address (str): An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :returns str: Netmask of configured interface or None if address is + not bindable. + """ + return _get_for_address(address, 'netmask') From cb014602b87d01fb074a66d12bc3dd34679fe7d5 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 4 Jul 2014 12:23:17 +0100 Subject: [PATCH 0362/2699] Resync helpers for IPv6 support --- ceph-mon/charm-helpers-sync.yaml | 2 +- .../hooks/charmhelpers/contrib/network/ip.py | 109 +++++++++++++++++- 2 files changed, 107 insertions(+), 4 deletions(-) diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml index afb9e42b..f282af34 100644 --- a/ceph-mon/charm-helpers-sync.yaml +++ b/ceph-mon/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~james-page/charm-helpers/network-splits destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 15a6731c..e0f9eb66 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -28,7 +28,7 @@ def _validate_cidr(network): def get_address_in_network(network, fallback=None, fatal=False): """ - Get an IPv4 address within the network from the host. + Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, '192.168.1.0/24'. @@ -51,14 +51,23 @@ def not_found_error_out(): not_found_error_out() _validate_cidr(network) + network = netaddr.IPNetwork(network) for iface in netifaces.interfaces(): addresses = netifaces.ifaddresses(iface) - if netifaces.AF_INET in addresses: + if network.version == 4 and netifaces.AF_INET in addresses: addr = addresses[netifaces.AF_INET][0]['addr'] netmask = addresses[netifaces.AF_INET][0]['netmask'] cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - if cidr in netaddr.IPNetwork(network): + if cidr in network: return str(cidr.ip) + if network.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if 'fe80' not in addr['addr']: + netmask = addr['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + netmask)) + if cidr in network: + return str(cidr.ip) if fallback is not None: return fallback @@ -67,3 +76,97 @@ def not_found_error_out(): not_found_error_out() return None + + +def is_ipv6(address): + '''Determine whether provided address is IPv6 or not''' + try: + address = netaddr.IPAddress(address) + except netaddr.AddrFormatError: + # probably a hostname - so not an address at all! + return False + else: + return address.version == 6 + + +def is_address_in_network(network, address): + """ + Determine whether the provided address is within a network range. + + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + :param address: An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :returns boolean: Flag indicating whether address is in network. + """ + try: + network = netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + try: + address = netaddr.IPAddress(address) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Address (%s) is not in correct presentation format" % + address) + if address in network: + return True + else: + return False + + +def _get_for_address(address, key): + """Retrieve an attribute of or the physical interface that + the IP address provided could be bound to. + + :param address (str): An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :param key: 'iface' for the physical interface name or an attribute + of the configured interface, for example 'netmask'. + :returns str: Requested attribute or None if address is not bindable. + """ + address = netaddr.IPAddress(address) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if address.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + if address in cidr: + if key == 'iface': + return iface + else: + return addresses[netifaces.AF_INET][0][key] + if address.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if 'fe80' not in addr['addr']: + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if address in cidr: + if key == 'iface': + return iface + else: + return addr[key] + return None + + +def get_iface_for_address(address): + """Determine the physical interface to which an IP address could be bound + + :param address (str): An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :returns str: Interface name or None if address is not bindable. + """ + return _get_for_address(address, 'iface') + + +def get_netmask_for_address(address): + """Determine the netmask of the physical interface to which and IP address + could be bound + + :param address (str): An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :returns str: Netmask of configured interface or None if address is + not bindable. + """ + return _get_for_address(address, 'netmask') From e964e06929bff0df9900c158ede548fb0d337468 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 4 Jul 2014 12:24:14 +0100 Subject: [PATCH 0363/2699] Resync helpers for IPv6 support --- ceph-osd/charm-helpers-sync.yaml | 2 +- .../hooks/charmhelpers/contrib/network/ip.py | 109 +++++++++++++++++- 2 files changed, 107 insertions(+), 4 deletions(-) diff --git a/ceph-osd/charm-helpers-sync.yaml b/ceph-osd/charm-helpers-sync.yaml index 1d9081b7..f379a60a 100644 --- a/ceph-osd/charm-helpers-sync.yaml +++ b/ceph-osd/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~james-page/charm-helpers/network-splits destination: hooks/charmhelpers include: - core diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 15a6731c..e0f9eb66 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -28,7 +28,7 @@ def _validate_cidr(network): def get_address_in_network(network, fallback=None, fatal=False): """ - Get an IPv4 address within the network from the host. + Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, '192.168.1.0/24'. @@ -51,14 +51,23 @@ def not_found_error_out(): not_found_error_out() _validate_cidr(network) + network = netaddr.IPNetwork(network) for iface in netifaces.interfaces(): addresses = netifaces.ifaddresses(iface) - if netifaces.AF_INET in addresses: + if network.version == 4 and netifaces.AF_INET in addresses: addr = addresses[netifaces.AF_INET][0]['addr'] netmask = addresses[netifaces.AF_INET][0]['netmask'] cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - if cidr in netaddr.IPNetwork(network): + if cidr in network: return str(cidr.ip) + if network.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if 'fe80' not in addr['addr']: + netmask = addr['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + netmask)) + if cidr in network: + return str(cidr.ip) if fallback is not None: return fallback @@ -67,3 +76,97 @@ def not_found_error_out(): not_found_error_out() return None + + +def is_ipv6(address): + '''Determine whether provided address is IPv6 or not''' + try: + address = netaddr.IPAddress(address) + except netaddr.AddrFormatError: + # probably a hostname - so not an address at all! + return False + else: + return address.version == 6 + + +def is_address_in_network(network, address): + """ + Determine whether the provided address is within a network range. + + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + :param address: An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :returns boolean: Flag indicating whether address is in network. + """ + try: + network = netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + try: + address = netaddr.IPAddress(address) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Address (%s) is not in correct presentation format" % + address) + if address in network: + return True + else: + return False + + +def _get_for_address(address, key): + """Retrieve an attribute of or the physical interface that + the IP address provided could be bound to. + + :param address (str): An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :param key: 'iface' for the physical interface name or an attribute + of the configured interface, for example 'netmask'. + :returns str: Requested attribute or None if address is not bindable. + """ + address = netaddr.IPAddress(address) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if address.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + if address in cidr: + if key == 'iface': + return iface + else: + return addresses[netifaces.AF_INET][0][key] + if address.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if 'fe80' not in addr['addr']: + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if address in cidr: + if key == 'iface': + return iface + else: + return addr[key] + return None + + +def get_iface_for_address(address): + """Determine the physical interface to which an IP address could be bound + + :param address (str): An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :returns str: Interface name or None if address is not bindable. + """ + return _get_for_address(address, 'iface') + + +def get_netmask_for_address(address): + """Determine the netmask of the physical interface to which and IP address + could be bound + + :param address (str): An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :returns str: Netmask of configured interface or None if address is + not bindable. + """ + return _get_for_address(address, 'netmask') From 7b7f6ffc0a095c379f82e26101db5c512166f7b3 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 15 Jul 2014 10:41:35 +0100 Subject: [PATCH 0364/2699] Pass zap disk to prepare command if need be --- ceph-proxy/hooks/ceph.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 734acc8c..7b5effd4 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -364,6 +364,8 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): if osd_format: cmd.append('--fs-type') cmd.append(osd_format) + if reformat_osd: + cmd.append('--zap-disk') cmd.append(dev) if osd_journal and os.path.exists(osd_journal): cmd.append(osd_journal) From 3ebca1c5d09eceada143ce5b72ee958f4f14c319 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 15 Jul 2014 10:41:35 +0100 Subject: [PATCH 0365/2699] Pass zap disk to prepare command if need be --- ceph-mon/hooks/ceph.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 734acc8c..7b5effd4 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -364,6 +364,8 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): if osd_format: cmd.append('--fs-type') cmd.append(osd_format) + if reformat_osd: + cmd.append('--zap-disk') cmd.append(dev) if osd_journal and os.path.exists(osd_journal): cmd.append(osd_journal) From 39ba66ea4dc1bdffdb4db49993fbf981b3e65759 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 15 Jul 2014 10:42:36 +0100 Subject: [PATCH 0366/2699] Pass zap disk to prepare command if need be --- ceph-osd/hooks/ceph.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 21b919df..7c65e117 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -351,6 +351,8 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): if osd_format: cmd.append('--fs-type') cmd.append(osd_format) + if reformat_osd: + cmd.append('--zap-disk') cmd.append(dev) if osd_journal and os.path.exists(osd_journal): cmd.append(osd_journal) From 3da43582d489adce6ccc179e549da47c1b011e93 Mon Sep 17 00:00:00 2001 From: Bjorn Tillenius Date: Wed, 23 Jul 2014 07:06:18 +0000 Subject: [PATCH 0367/2699] Create the Apt cache in memory only. If the cache is written to disk, there's a race condition in that other applications creating the Apt cache might delete our files. --- ceph-radosgw/hooks/ceph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index d75ce956..371514e7 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -224,7 +224,7 @@ def get_named_key(name, caps=None): def get_ceph_version(package=None): apt.init() - cache = apt.Cache() + cache = apt.Cache(memonly=True) pkg = cache[package or 'ceph'] if pkg.current_ver: return apt.upstream_version(pkg.current_ver.ver_str) From 62364af1395934252d6f088d1bb8f7fefbba3b60 Mon Sep 17 00:00:00 2001 From: Bjorn Tillenius Date: Wed, 23 Jul 2014 07:54:11 +0000 Subject: [PATCH 0368/2699] It's using the apt_pkg package, not apt. --- ceph-radosgw/hooks/ceph.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index 371514e7..2810a0b2 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -11,7 +11,7 @@ import subprocess import time import os -import apt_pkg as apt +import apt_pkg from socket import gethostname as get_unit_hostname @@ -223,14 +223,18 @@ def get_named_key(name, caps=None): def get_ceph_version(package=None): - apt.init() - cache = apt.Cache(memonly=True) + apt_pkg.init() + # Force Apt to build its cache in memory. That way we avoid race + # conditions with other applications building the cache in the same + # place. + apt_pkg.config.set("Dir::Cache::pkgcache", "") + cache = apt_pkg.Cache() pkg = cache[package or 'ceph'] if pkg.current_ver: - return apt.upstream_version(pkg.current_ver.ver_str) + return apt_pkg.upstream_version(pkg.current_ver.ver_str) else: return None def version_compare(a, b): - return apt.version_compare(a, b) + return apt_pkg.version_compare(a, b) From ede4fedbdf42813a99749d322887ce573a49472c Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 23 Jul 2014 12:28:18 +0100 Subject: [PATCH 0369/2699] Use charm-helper for pkg version comparison --- ceph-proxy/hooks/ceph.py | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index cd81cc41..bef03ebb 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -11,10 +11,10 @@ import subprocess import time import os -import apt_pkg as apt from charmhelpers.core.host import ( mkdir, service_restart, + cmp_pkgrevno, ) from charmhelpers.core.hookenv import ( log, @@ -126,7 +126,7 @@ def is_osd_disk(dev): def start_osds(devices): # Scan for ceph block devices rescan_osd_devices() - if get_ceph_version() >= "0.56.6": + if cmp_pkgrevno('ceph', "0.56.6") >= 0: # Use ceph-disk-activate for directory based OSD's for dev_or_path in devices: if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): @@ -309,20 +309,6 @@ def bootstrap_monitor_cluster(secret): os.unlink(keyring) -def get_ceph_version(): - apt.init() - cache = apt.Cache() - pkg = cache['ceph'] - if pkg.current_ver: - return apt.upstream_version(pkg.current_ver.ver_str) - else: - return None - - -def version_compare(a, b): - return apt.version_compare(a, b) - - def update_monfs(): hostname = get_unit_hostname() monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) @@ -360,7 +346,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options - if get_ceph_version() >= "0.48.3": + if cmp_pkgrevno('ceph', "0.48.3") >= 0: if osd_format: cmd.append('--fs-type') cmd.append(osd_format) @@ -383,7 +369,7 @@ def osdize_dir(path): log('Path {} is already configured as an OSD - bailing'.format(path)) return - if get_ceph_version() < "0.56.6": + if cmp_pkgrevno('ceph', "0.56.6") < 0: log('Unable to use directories for OSDs with ceph < 0.56.6', level=ERROR) raise From fdffdf75f32264c8c0865fea9f9ba30c272e5ecf Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 23 Jul 2014 12:28:18 +0100 Subject: [PATCH 0370/2699] Use charm-helper for pkg version comparison --- ceph-mon/hooks/ceph.py | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index cd81cc41..bef03ebb 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -11,10 +11,10 @@ import subprocess import time import os -import apt_pkg as apt from charmhelpers.core.host import ( mkdir, service_restart, + cmp_pkgrevno, ) from charmhelpers.core.hookenv import ( log, @@ -126,7 +126,7 @@ def is_osd_disk(dev): def start_osds(devices): # Scan for ceph block devices rescan_osd_devices() - if get_ceph_version() >= "0.56.6": + if cmp_pkgrevno('ceph', "0.56.6") >= 0: # Use ceph-disk-activate for directory based OSD's for dev_or_path in devices: if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): @@ -309,20 +309,6 @@ def bootstrap_monitor_cluster(secret): os.unlink(keyring) -def get_ceph_version(): - apt.init() - cache = apt.Cache() - pkg = cache['ceph'] - if pkg.current_ver: - return apt.upstream_version(pkg.current_ver.ver_str) - else: - return None - - -def version_compare(a, b): - return apt.version_compare(a, b) - - def update_monfs(): hostname = get_unit_hostname() monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) @@ -360,7 +346,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options - if get_ceph_version() >= "0.48.3": + if cmp_pkgrevno('ceph', "0.48.3") >= 0: if osd_format: cmd.append('--fs-type') cmd.append(osd_format) @@ -383,7 +369,7 @@ def osdize_dir(path): log('Path {} is already configured as an OSD - bailing'.format(path)) return - if get_ceph_version() < "0.56.6": + if cmp_pkgrevno('ceph', "0.56.6") < 0: log('Unable to use directories for OSDs with ceph < 0.56.6', level=ERROR) raise From db45f223c700ad97c0cf926b6ba417e2489aeb33 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 23 Jul 2014 12:31:16 +0100 Subject: [PATCH 0371/2699] Use helper in hooks as well --- ceph-proxy/hooks/hooks.py | 7 ++++--- ceph-proxy/templates/ceph.conf | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 11e1cab8..1827f811 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -29,7 +29,8 @@ from charmhelpers.core.host import ( service_restart, umount, - mkdir + mkdir, + cmp_pkgrevno ) from charmhelpers.fetch import ( apt_install, @@ -50,7 +51,7 @@ def install_upstart_scripts(): # Only install upstart configurations for older versions - if ceph.get_ceph_version() < "0.55.1": + if cmp_pkgrevno('ceph', "0.55.1") < 0: for x in glob.glob('files/upstart/*.conf'): shutil.copy(x, '/etc/init/') @@ -71,7 +72,7 @@ def emit_cephconf(): 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': config('fsid'), - 'version': ceph.get_ceph_version(), + 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower() } diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index d82ca820..b828eeb3 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -1,5 +1,5 @@ [global] -{% if version < "0.51" %} +{% if old_auth %} auth supported = {{ auth_supported }} {% else %} auth cluster required = {{ auth_supported }} From baa3d4142a9b05d1636385ab6d927bcf9b6f0162 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 23 Jul 2014 12:31:16 +0100 Subject: [PATCH 0372/2699] Use helper in hooks as well --- ceph-mon/hooks/hooks.py | 7 ++++--- ceph-mon/templates/ceph.conf | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 11e1cab8..1827f811 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -29,7 +29,8 @@ from charmhelpers.core.host import ( service_restart, umount, - mkdir + mkdir, + cmp_pkgrevno ) from charmhelpers.fetch import ( apt_install, @@ -50,7 +51,7 @@ def install_upstart_scripts(): # Only install upstart configurations for older versions - if ceph.get_ceph_version() < "0.55.1": + if cmp_pkgrevno('ceph', "0.55.1") < 0: for x in glob.glob('files/upstart/*.conf'): shutil.copy(x, '/etc/init/') @@ -71,7 +72,7 @@ def emit_cephconf(): 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': config('fsid'), - 'version': ceph.get_ceph_version(), + 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower() } diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index d82ca820..b828eeb3 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -1,5 +1,5 @@ [global] -{% if version < "0.51" %} +{% if old_auth %} auth supported = {{ auth_supported }} {% else %} auth cluster required = {{ auth_supported }} From aeac00384b04769426cd2b270f0c405fc5e399f8 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 23 Jul 2014 12:34:57 +0100 Subject: [PATCH 0373/2699] Use charm-helper for version comparisons --- ceph-osd/hooks/ceph.py | 22 ++++------------------ ceph-osd/hooks/hooks.py | 7 ++++--- ceph-osd/templates/ceph.conf | 2 +- 3 files changed, 9 insertions(+), 22 deletions(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 21b919df..95535c41 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -11,10 +11,10 @@ import subprocess import time import os -import apt_pkg as apt from charmhelpers.core.host import ( mkdir, service_restart, + cmp_pkgrevno ) from charmhelpers.core.hookenv import ( log, @@ -126,7 +126,7 @@ def is_osd_disk(dev): def start_osds(devices): # Scan for ceph block devices rescan_osd_devices() - if get_ceph_version() >= "0.56.6": + if cmp_pkgrevno('ceph', '0.56.6') >= 0: # Use ceph-disk-activate for directory based OSD's for dev_or_path in devices: if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): @@ -296,20 +296,6 @@ def bootstrap_monitor_cluster(secret): os.unlink(keyring) -def get_ceph_version(): - apt.init() - cache = apt.Cache() - pkg = cache['ceph'] - if pkg.current_ver: - return apt.upstream_version(pkg.current_ver.ver_str) - else: - return None - - -def version_compare(a, b): - return apt.version_compare(a, b) - - def update_monfs(): hostname = get_unit_hostname() monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) @@ -347,7 +333,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options - if get_ceph_version() >= "0.48.3": + if cmp_pkgrevno('ceph', '0.48.3') >= 0: if osd_format: cmd.append('--fs-type') cmd.append(osd_format) @@ -370,7 +356,7 @@ def osdize_dir(path): log('Path {} is already configured as an OSD - bailing'.format(path)) return - if get_ceph_version() < "0.56.6": + if cmp_pkgrevno('ceph', '0.56.6.') < 0: log('Unable to use directories for OSDs with ceph < 0.56.6', level=ERROR) raise diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 1340b993..2e933c1f 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -26,7 +26,8 @@ ) from charmhelpers.core.host import ( umount, - mkdir + mkdir, + cmp_pkgrevno ) from charmhelpers.fetch import ( add_source, @@ -47,7 +48,7 @@ def install_upstart_scripts(): # Only install upstart configurations for older versions - if ceph.get_ceph_version() < "0.55.1": + if cmp_pkgrevno('ceph', "0.55.1") < 0: for x in glob.glob('files/upstart/*.conf'): shutil.copy(x, '/etc/init/') @@ -70,7 +71,7 @@ def emit_cephconf(): 'auth_supported': get_auth(), 'mon_hosts': ' '.join(mon_hosts), 'fsid': get_fsid(), - 'version': ceph.get_ceph_version(), + 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower() } diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index cd81d440..49b6ba56 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -1,5 +1,5 @@ [global] -{% if version < "0.51" %} +{% if old_auth %} auth supported = {{ auth_supported }} {% else %} auth cluster required = {{ auth_supported }} From f28ea61522cbc40ff449d320cadf449ae8d0042a Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 23 Jul 2014 12:37:44 +0100 Subject: [PATCH 0374/2699] Use charm-helper for version comparisons --- ceph-radosgw/hooks/ceph.py | 19 ------------------- ceph-radosgw/hooks/hooks.py | 7 ++++--- 2 files changed, 4 insertions(+), 22 deletions(-) diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index 2810a0b2..ffff7fc0 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -11,7 +11,6 @@ import subprocess import time import os -import apt_pkg from socket import gethostname as get_unit_hostname @@ -220,21 +219,3 @@ def get_named_key(name, caps=None): if 'key' in element: key = element.split(' = ')[1].strip() # IGNORE:E1103 return key - - -def get_ceph_version(package=None): - apt_pkg.init() - # Force Apt to build its cache in memory. That way we avoid race - # conditions with other applications building the cache in the same - # place. - apt_pkg.config.set("Dir::Cache::pkgcache", "") - cache = apt_pkg.Cache() - pkg = cache[package or 'ceph'] - if pkg.current_ver: - return apt_pkg.upstream_version(pkg.current_ver.ver_str) - else: - return None - - -def version_compare(a, b): - return apt_pkg.version_compare(a, b) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 9073a252..41b3641f 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -38,6 +38,7 @@ ) from charmhelpers.payload.execd import execd_preinstall +from charmhelpers.core.host import cmp_pkgrevno from socket import gethostname as get_unit_hostname hooks = Hooks() @@ -73,13 +74,13 @@ def emit_cephconf(): 'auth_supported': get_auth() or 'none', 'mon_hosts': ' '.join(get_mon_hosts()), 'hostname': get_unit_hostname(), - 'version': ceph.get_ceph_version('radosgw'), + 'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0, 'use_syslog': str(config('use-syslog')).lower() } # Check to ensure that correct version of ceph is # in use - if ceph.get_ceph_version('radosgw') >= "0.55": + if cmp_pkgrevno('radosgw', '0.55') >= 0: # Add keystone configuration if found ks_conf = get_keystone_conf() if ks_conf: @@ -208,7 +209,7 @@ def restart(): @hooks.hook('identity-service-relation-joined') def identity_joined(relid=None): - if ceph.get_ceph_version('radosgw') < "0.55": + if cmp_pkgrevno('radosgw', '0.55') < 0: log('Integration with keystone requires ceph >= 0.55') sys.exit(1) From a1c29c4b0a1d7047b8e83a433aa0290cfc5b943a Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jul 2014 10:40:31 +0100 Subject: [PATCH 0375/2699] Resync helpers --- .../contrib/storage/linux/utils.py | 1 + ceph-proxy/hooks/charmhelpers/core/hookenv.py | 9 ++-- ceph-proxy/hooks/charmhelpers/core/host.py | 44 ++++++++++++++----- .../hooks/charmhelpers/fetch/__init__.py | 40 ++++++++++------- ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py | 3 +- 5 files changed, 64 insertions(+), 33 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index b87ef26d..8d0f6116 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -37,6 +37,7 @@ def zap_disk(block_device): check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + def is_device_mounted(device): '''Given a device path, return True if that device is mounted, and False if it isn't. diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index c2e66f66..c9530433 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -25,7 +25,7 @@ def cached(func): """Cache return values for multiple executions of func + args - For example: + For example:: @cached def unit_get(attribute): @@ -445,18 +445,19 @@ class UnregisteredHookError(Exception): class Hooks(object): """A convenient handler for hook functions. - Example: + Example:: + hooks = Hooks() # register a hook, taking its name from the function name @hooks.hook() def install(): - ... + pass # your code here # register a hook, providing a custom hook name @hooks.hook("config-changed") def config_changed(): - ... + pass # your code here if __name__ == "__main__": # execute a hook based on the name the program is called by diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 186147f6..d934f940 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -12,11 +12,11 @@ import string import subprocess import hashlib -import apt_pkg from collections import OrderedDict from hookenv import log +from fstab import Fstab def service_start(service_name): @@ -35,7 +35,8 @@ def service_restart(service_name): def service_reload(service_name, restart_on_failure=False): - """Reload a system service, optionally falling back to restart if reload fails""" + """Reload a system service, optionally falling back to restart if + reload fails""" service_result = service('reload', service_name) if not service_result and restart_on_failure: service_result = service('restart', service_name) @@ -144,7 +145,19 @@ def write_file(path, content, owner='root', group='root', perms=0444): target.write(content) -def mount(device, mountpoint, options=None, persist=False): +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab + """ + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file + """ + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): """Mount a filesystem at a particular mountpoint""" cmd_args = ['mount'] if options is not None: @@ -155,9 +168,9 @@ def mount(device, mountpoint, options=None, persist=False): except subprocess.CalledProcessError, e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_add(device, mountpoint, filesystem, options=options) return True @@ -169,9 +182,9 @@ def umount(mountpoint, persist=False): except subprocess.CalledProcessError, e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_remove(mountpoint) return True @@ -198,13 +211,13 @@ def file_hash(path): def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing - This function is used a decorator, for example + This function is used a decorator, for example:: @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] }) def ceph_client_changed(): - ... + pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the @@ -300,12 +313,19 @@ def get_nic_hwaddr(nic): def cmp_pkgrevno(package, revno, pkgcache=None): '''Compare supplied revno with the revno of the installed package - 1 => Installed revno is greater than supplied arg - 0 => Installed revno is the same as supplied arg - -1 => Installed revno is less than supplied arg + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + ''' + import apt_pkg if not pkgcache: apt_pkg.init() + # Force Apt to build its cache in memory. That way we avoid race + # conditions with other applications building the cache in the same + # place. + apt_pkg.config.set("Dir::Cache::pkgcache", "") pkgcache = apt_pkg.Cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index e8e837a5..5be512ce 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -13,7 +13,6 @@ config, log, ) -import apt_pkg import os @@ -117,6 +116,7 @@ def base_url(self, url): def filter_installed_packages(packages): """Returns a list of packages that require installation""" + import apt_pkg apt_pkg.init() # Tell apt to build an in-memory cache to prevent race conditions (if @@ -235,31 +235,39 @@ def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): """ - Configure multiple sources from charm configuration + Configure multiple sources from charm configuration. + + The lists are encoded as yaml fragments in the configuration. + The frament needs to be included as a string. Example config: - install_sources: + install_sources: | - "ppa:foo" - "http://example.com/repo precise main" - install_keys: + install_keys: | - null - "a1b2c3d4" Note that 'null' (a.k.a. None) should not be quoted. """ - sources = safe_load(config(sources_var)) - keys = config(keys_var) - if keys is not None: - keys = safe_load(keys) - if isinstance(sources, basestring) and ( - keys is None or isinstance(keys, basestring)): - add_source(sources, keys) + sources = safe_load((config(sources_var) or '').strip()) or [] + keys = safe_load((config(keys_var) or '').strip()) or None + + if isinstance(sources, basestring): + sources = [sources] + + if keys is None: + for source in sources: + add_source(source, None) else: - if not len(sources) == len(keys): - msg = 'Install sources and keys lists are different lengths' - raise SourceConfigError(msg) - for src_num in range(len(sources)): - add_source(sources[src_num], keys[src_num]) + if isinstance(keys, basestring): + keys = [keys] + + if len(sources) != len(keys): + raise SourceConfigError( + 'Install sources and keys lists are different lengths') + for source, key in zip(sources, keys): + add_source(source, key) if update: apt_update(fatal=True) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py index db5dd9a3..0e580e47 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py @@ -39,7 +39,8 @@ def branch(self, source, dest): def install(self, source): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0755) try: From da49b1fc1c58d997e48125b8318d40151459efa4 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jul 2014 10:40:31 +0100 Subject: [PATCH 0376/2699] Resync helpers --- .../contrib/storage/linux/utils.py | 1 + ceph-mon/hooks/charmhelpers/core/hookenv.py | 9 ++-- ceph-mon/hooks/charmhelpers/core/host.py | 44 ++++++++++++++----- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 40 ++++++++++------- ceph-mon/hooks/charmhelpers/fetch/bzrurl.py | 3 +- 5 files changed, 64 insertions(+), 33 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index b87ef26d..8d0f6116 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -37,6 +37,7 @@ def zap_disk(block_device): check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + def is_device_mounted(device): '''Given a device path, return True if that device is mounted, and False if it isn't. diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index c2e66f66..c9530433 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -25,7 +25,7 @@ def cached(func): """Cache return values for multiple executions of func + args - For example: + For example:: @cached def unit_get(attribute): @@ -445,18 +445,19 @@ class UnregisteredHookError(Exception): class Hooks(object): """A convenient handler for hook functions. - Example: + Example:: + hooks = Hooks() # register a hook, taking its name from the function name @hooks.hook() def install(): - ... + pass # your code here # register a hook, providing a custom hook name @hooks.hook("config-changed") def config_changed(): - ... + pass # your code here if __name__ == "__main__": # execute a hook based on the name the program is called by diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 186147f6..d934f940 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -12,11 +12,11 @@ import string import subprocess import hashlib -import apt_pkg from collections import OrderedDict from hookenv import log +from fstab import Fstab def service_start(service_name): @@ -35,7 +35,8 @@ def service_restart(service_name): def service_reload(service_name, restart_on_failure=False): - """Reload a system service, optionally falling back to restart if reload fails""" + """Reload a system service, optionally falling back to restart if + reload fails""" service_result = service('reload', service_name) if not service_result and restart_on_failure: service_result = service('restart', service_name) @@ -144,7 +145,19 @@ def write_file(path, content, owner='root', group='root', perms=0444): target.write(content) -def mount(device, mountpoint, options=None, persist=False): +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab + """ + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file + """ + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): """Mount a filesystem at a particular mountpoint""" cmd_args = ['mount'] if options is not None: @@ -155,9 +168,9 @@ def mount(device, mountpoint, options=None, persist=False): except subprocess.CalledProcessError, e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_add(device, mountpoint, filesystem, options=options) return True @@ -169,9 +182,9 @@ def umount(mountpoint, persist=False): except subprocess.CalledProcessError, e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_remove(mountpoint) return True @@ -198,13 +211,13 @@ def file_hash(path): def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing - This function is used a decorator, for example + This function is used a decorator, for example:: @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] }) def ceph_client_changed(): - ... + pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the @@ -300,12 +313,19 @@ def get_nic_hwaddr(nic): def cmp_pkgrevno(package, revno, pkgcache=None): '''Compare supplied revno with the revno of the installed package - 1 => Installed revno is greater than supplied arg - 0 => Installed revno is the same as supplied arg - -1 => Installed revno is less than supplied arg + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + ''' + import apt_pkg if not pkgcache: apt_pkg.init() + # Force Apt to build its cache in memory. That way we avoid race + # conditions with other applications building the cache in the same + # place. + apt_pkg.config.set("Dir::Cache::pkgcache", "") pkgcache = apt_pkg.Cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index e8e837a5..5be512ce 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -13,7 +13,6 @@ config, log, ) -import apt_pkg import os @@ -117,6 +116,7 @@ def base_url(self, url): def filter_installed_packages(packages): """Returns a list of packages that require installation""" + import apt_pkg apt_pkg.init() # Tell apt to build an in-memory cache to prevent race conditions (if @@ -235,31 +235,39 @@ def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): """ - Configure multiple sources from charm configuration + Configure multiple sources from charm configuration. + + The lists are encoded as yaml fragments in the configuration. + The frament needs to be included as a string. Example config: - install_sources: + install_sources: | - "ppa:foo" - "http://example.com/repo precise main" - install_keys: + install_keys: | - null - "a1b2c3d4" Note that 'null' (a.k.a. None) should not be quoted. """ - sources = safe_load(config(sources_var)) - keys = config(keys_var) - if keys is not None: - keys = safe_load(keys) - if isinstance(sources, basestring) and ( - keys is None or isinstance(keys, basestring)): - add_source(sources, keys) + sources = safe_load((config(sources_var) or '').strip()) or [] + keys = safe_load((config(keys_var) or '').strip()) or None + + if isinstance(sources, basestring): + sources = [sources] + + if keys is None: + for source in sources: + add_source(source, None) else: - if not len(sources) == len(keys): - msg = 'Install sources and keys lists are different lengths' - raise SourceConfigError(msg) - for src_num in range(len(sources)): - add_source(sources[src_num], keys[src_num]) + if isinstance(keys, basestring): + keys = [keys] + + if len(sources) != len(keys): + raise SourceConfigError( + 'Install sources and keys lists are different lengths') + for source, key in zip(sources, keys): + add_source(source, key) if update: apt_update(fatal=True) diff --git a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py index db5dd9a3..0e580e47 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py @@ -39,7 +39,8 @@ def branch(self, source, dest): def install(self, source): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0755) try: From fe71c84974b9711c4f59ef77890c1b56c495ca92 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jul 2014 10:41:12 +0100 Subject: [PATCH 0377/2699] Resync helpers --- .../contrib/storage/linux/utils.py | 1 + ceph-osd/hooks/charmhelpers/core/fstab.py | 116 ++++++++++++++++++ ceph-osd/hooks/charmhelpers/core/hookenv.py | 9 +- ceph-osd/hooks/charmhelpers/core/host.py | 44 +++++-- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 40 +++--- ceph-osd/hooks/charmhelpers/fetch/bzrurl.py | 3 +- 6 files changed, 180 insertions(+), 33 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/core/fstab.py diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index b87ef26d..8d0f6116 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -37,6 +37,7 @@ def zap_disk(block_device): check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + def is_device_mounted(device): '''Given a device path, return True if that device is mounted, and False if it isn't. diff --git a/ceph-osd/hooks/charmhelpers/core/fstab.py b/ceph-osd/hooks/charmhelpers/core/fstab.py new file mode 100644 index 00000000..cfaf0a65 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/fstab.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import os + + +class Fstab(file): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = d + self.p = p + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + file.__init__(self, self._path, 'r+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + try: + if not line.startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write(str(entry) + '\n') + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = self.readlines() + + found = False + for index, line in enumerate(lines): + if not line.startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines)) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index c2e66f66..c9530433 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -25,7 +25,7 @@ def cached(func): """Cache return values for multiple executions of func + args - For example: + For example:: @cached def unit_get(attribute): @@ -445,18 +445,19 @@ class UnregisteredHookError(Exception): class Hooks(object): """A convenient handler for hook functions. - Example: + Example:: + hooks = Hooks() # register a hook, taking its name from the function name @hooks.hook() def install(): - ... + pass # your code here # register a hook, providing a custom hook name @hooks.hook("config-changed") def config_changed(): - ... + pass # your code here if __name__ == "__main__": # execute a hook based on the name the program is called by diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 186147f6..d934f940 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -12,11 +12,11 @@ import string import subprocess import hashlib -import apt_pkg from collections import OrderedDict from hookenv import log +from fstab import Fstab def service_start(service_name): @@ -35,7 +35,8 @@ def service_restart(service_name): def service_reload(service_name, restart_on_failure=False): - """Reload a system service, optionally falling back to restart if reload fails""" + """Reload a system service, optionally falling back to restart if + reload fails""" service_result = service('reload', service_name) if not service_result and restart_on_failure: service_result = service('restart', service_name) @@ -144,7 +145,19 @@ def write_file(path, content, owner='root', group='root', perms=0444): target.write(content) -def mount(device, mountpoint, options=None, persist=False): +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab + """ + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file + """ + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): """Mount a filesystem at a particular mountpoint""" cmd_args = ['mount'] if options is not None: @@ -155,9 +168,9 @@ def mount(device, mountpoint, options=None, persist=False): except subprocess.CalledProcessError, e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_add(device, mountpoint, filesystem, options=options) return True @@ -169,9 +182,9 @@ def umount(mountpoint, persist=False): except subprocess.CalledProcessError, e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_remove(mountpoint) return True @@ -198,13 +211,13 @@ def file_hash(path): def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing - This function is used a decorator, for example + This function is used a decorator, for example:: @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] }) def ceph_client_changed(): - ... + pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the @@ -300,12 +313,19 @@ def get_nic_hwaddr(nic): def cmp_pkgrevno(package, revno, pkgcache=None): '''Compare supplied revno with the revno of the installed package - 1 => Installed revno is greater than supplied arg - 0 => Installed revno is the same as supplied arg - -1 => Installed revno is less than supplied arg + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + ''' + import apt_pkg if not pkgcache: apt_pkg.init() + # Force Apt to build its cache in memory. That way we avoid race + # conditions with other applications building the cache in the same + # place. + apt_pkg.config.set("Dir::Cache::pkgcache", "") pkgcache = apt_pkg.Cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index e8e837a5..5be512ce 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -13,7 +13,6 @@ config, log, ) -import apt_pkg import os @@ -117,6 +116,7 @@ def base_url(self, url): def filter_installed_packages(packages): """Returns a list of packages that require installation""" + import apt_pkg apt_pkg.init() # Tell apt to build an in-memory cache to prevent race conditions (if @@ -235,31 +235,39 @@ def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): """ - Configure multiple sources from charm configuration + Configure multiple sources from charm configuration. + + The lists are encoded as yaml fragments in the configuration. + The frament needs to be included as a string. Example config: - install_sources: + install_sources: | - "ppa:foo" - "http://example.com/repo precise main" - install_keys: + install_keys: | - null - "a1b2c3d4" Note that 'null' (a.k.a. None) should not be quoted. """ - sources = safe_load(config(sources_var)) - keys = config(keys_var) - if keys is not None: - keys = safe_load(keys) - if isinstance(sources, basestring) and ( - keys is None or isinstance(keys, basestring)): - add_source(sources, keys) + sources = safe_load((config(sources_var) or '').strip()) or [] + keys = safe_load((config(keys_var) or '').strip()) or None + + if isinstance(sources, basestring): + sources = [sources] + + if keys is None: + for source in sources: + add_source(source, None) else: - if not len(sources) == len(keys): - msg = 'Install sources and keys lists are different lengths' - raise SourceConfigError(msg) - for src_num in range(len(sources)): - add_source(sources[src_num], keys[src_num]) + if isinstance(keys, basestring): + keys = [keys] + + if len(sources) != len(keys): + raise SourceConfigError( + 'Install sources and keys lists are different lengths') + for source, key in zip(sources, keys): + add_source(source, key) if update: apt_update(fatal=True) diff --git a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py index db5dd9a3..0e580e47 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py @@ -39,7 +39,8 @@ def branch(self, source, dest): def install(self, source): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0755) try: From bb1fdf42c578e128690e9d3983b95b32ded8fe9e Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jul 2014 10:42:51 +0100 Subject: [PATCH 0378/2699] Add missing fstab package --- ceph-proxy/hooks/charmhelpers/core/fstab.py | 116 ++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 ceph-proxy/hooks/charmhelpers/core/fstab.py diff --git a/ceph-proxy/hooks/charmhelpers/core/fstab.py b/ceph-proxy/hooks/charmhelpers/core/fstab.py new file mode 100644 index 00000000..cfaf0a65 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/fstab.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import os + + +class Fstab(file): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = d + self.p = p + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + file.__init__(self, self._path, 'r+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + try: + if not line.startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write(str(entry) + '\n') + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = self.readlines() + + found = False + for index, line in enumerate(lines): + if not line.startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines)) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) From 26fec1042c85cbb8c03216d0c01d4b74e2bdff14 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jul 2014 10:42:51 +0100 Subject: [PATCH 0379/2699] Add missing fstab package --- ceph-mon/hooks/charmhelpers/core/fstab.py | 116 ++++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 ceph-mon/hooks/charmhelpers/core/fstab.py diff --git a/ceph-mon/hooks/charmhelpers/core/fstab.py b/ceph-mon/hooks/charmhelpers/core/fstab.py new file mode 100644 index 00000000..cfaf0a65 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/fstab.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import os + + +class Fstab(file): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = d + self.p = p + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + file.__init__(self, self._path, 'r+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + try: + if not line.startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write(str(entry) + '\n') + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = self.readlines() + + found = False + for index, line in enumerate(lines): + if not line.startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines)) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) From 3db3b6a78c31aa5b984455eb0eb2ddcbfe07c0e4 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jul 2014 10:43:27 +0100 Subject: [PATCH 0380/2699] Resync helpers --- .../contrib/storage/linux/utils.py | 1 + ceph-radosgw/hooks/charmhelpers/core/fstab.py | 116 ++++++++++++++++++ .../hooks/charmhelpers/core/hookenv.py | 9 +- ceph-radosgw/hooks/charmhelpers/core/host.py | 44 +++++-- .../hooks/charmhelpers/fetch/__init__.py | 40 +++--- .../hooks/charmhelpers/fetch/bzrurl.py | 3 +- 6 files changed, 180 insertions(+), 33 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/core/fstab.py diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py index b87ef26d..8d0f6116 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -37,6 +37,7 @@ def zap_disk(block_device): check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + def is_device_mounted(device): '''Given a device path, return True if that device is mounted, and False if it isn't. diff --git a/ceph-radosgw/hooks/charmhelpers/core/fstab.py b/ceph-radosgw/hooks/charmhelpers/core/fstab.py new file mode 100644 index 00000000..cfaf0a65 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/fstab.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import os + + +class Fstab(file): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = d + self.p = p + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + file.__init__(self, self._path, 'r+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + try: + if not line.startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write(str(entry) + '\n') + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = self.readlines() + + found = False + for index, line in enumerate(lines): + if not line.startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines)) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index c2e66f66..c9530433 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -25,7 +25,7 @@ def cached(func): """Cache return values for multiple executions of func + args - For example: + For example:: @cached def unit_get(attribute): @@ -445,18 +445,19 @@ class UnregisteredHookError(Exception): class Hooks(object): """A convenient handler for hook functions. - Example: + Example:: + hooks = Hooks() # register a hook, taking its name from the function name @hooks.hook() def install(): - ... + pass # your code here # register a hook, providing a custom hook name @hooks.hook("config-changed") def config_changed(): - ... + pass # your code here if __name__ == "__main__": # execute a hook based on the name the program is called by diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 186147f6..d934f940 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -12,11 +12,11 @@ import string import subprocess import hashlib -import apt_pkg from collections import OrderedDict from hookenv import log +from fstab import Fstab def service_start(service_name): @@ -35,7 +35,8 @@ def service_restart(service_name): def service_reload(service_name, restart_on_failure=False): - """Reload a system service, optionally falling back to restart if reload fails""" + """Reload a system service, optionally falling back to restart if + reload fails""" service_result = service('reload', service_name) if not service_result and restart_on_failure: service_result = service('restart', service_name) @@ -144,7 +145,19 @@ def write_file(path, content, owner='root', group='root', perms=0444): target.write(content) -def mount(device, mountpoint, options=None, persist=False): +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab + """ + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file + """ + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): """Mount a filesystem at a particular mountpoint""" cmd_args = ['mount'] if options is not None: @@ -155,9 +168,9 @@ def mount(device, mountpoint, options=None, persist=False): except subprocess.CalledProcessError, e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_add(device, mountpoint, filesystem, options=options) return True @@ -169,9 +182,9 @@ def umount(mountpoint, persist=False): except subprocess.CalledProcessError, e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_remove(mountpoint) return True @@ -198,13 +211,13 @@ def file_hash(path): def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing - This function is used a decorator, for example + This function is used a decorator, for example:: @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] }) def ceph_client_changed(): - ... + pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the @@ -300,12 +313,19 @@ def get_nic_hwaddr(nic): def cmp_pkgrevno(package, revno, pkgcache=None): '''Compare supplied revno with the revno of the installed package - 1 => Installed revno is greater than supplied arg - 0 => Installed revno is the same as supplied arg - -1 => Installed revno is less than supplied arg + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + ''' + import apt_pkg if not pkgcache: apt_pkg.init() + # Force Apt to build its cache in memory. That way we avoid race + # conditions with other applications building the cache in the same + # place. + apt_pkg.config.set("Dir::Cache::pkgcache", "") pkgcache = apt_pkg.Cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index e8e837a5..5be512ce 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -13,7 +13,6 @@ config, log, ) -import apt_pkg import os @@ -117,6 +116,7 @@ def base_url(self, url): def filter_installed_packages(packages): """Returns a list of packages that require installation""" + import apt_pkg apt_pkg.init() # Tell apt to build an in-memory cache to prevent race conditions (if @@ -235,31 +235,39 @@ def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): """ - Configure multiple sources from charm configuration + Configure multiple sources from charm configuration. + + The lists are encoded as yaml fragments in the configuration. + The frament needs to be included as a string. Example config: - install_sources: + install_sources: | - "ppa:foo" - "http://example.com/repo precise main" - install_keys: + install_keys: | - null - "a1b2c3d4" Note that 'null' (a.k.a. None) should not be quoted. """ - sources = safe_load(config(sources_var)) - keys = config(keys_var) - if keys is not None: - keys = safe_load(keys) - if isinstance(sources, basestring) and ( - keys is None or isinstance(keys, basestring)): - add_source(sources, keys) + sources = safe_load((config(sources_var) or '').strip()) or [] + keys = safe_load((config(keys_var) or '').strip()) or None + + if isinstance(sources, basestring): + sources = [sources] + + if keys is None: + for source in sources: + add_source(source, None) else: - if not len(sources) == len(keys): - msg = 'Install sources and keys lists are different lengths' - raise SourceConfigError(msg) - for src_num in range(len(sources)): - add_source(sources[src_num], keys[src_num]) + if isinstance(keys, basestring): + keys = [keys] + + if len(sources) != len(keys): + raise SourceConfigError( + 'Install sources and keys lists are different lengths') + for source, key in zip(sources, keys): + add_source(source, key) if update: apt_update(fatal=True) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py index db5dd9a3..0e580e47 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py @@ -39,7 +39,8 @@ def branch(self, source, dest): def install(self, source): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0755) try: From 69b956233fd6c772247aa56689f4ea6beed2c70d Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jul 2014 10:48:17 +0100 Subject: [PATCH 0381/2699] Fixup template --- ceph-radosgw/templates/ceph.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 14f088f7..89287606 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -1,5 +1,5 @@ [global] -{% if version < "0.51" %} +{% if old_auth %} auth supported = {{ auth_supported }} {% else %} auth cluster required = {{ auth_supported }} From 48731ae0900860579bf29a54d44e7066153dc556 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jul 2014 15:17:01 +0100 Subject: [PATCH 0382/2699] Deal with IPv6 addresses --- ceph-proxy/hooks/hooks.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 26ca452d..7c657bf9 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -39,6 +39,7 @@ ) from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.alternatives import install_alternative +from charmhelpers.contrib.network.ip import is_ipv6 from utils import ( render_template, @@ -134,6 +135,8 @@ def get_mon_hosts(): for relid in relation_ids('mon'): for unit in related_units(relid): addr = relation_get('ceph-public-address', unit, relid) + if is_ipv6(addr): + addr = '[{}]'.format(addr) if addr is not None: hosts.append('{}:6789'.format(addr)) From 16770f14669b2f5d3c99208e9faac700f29cdb3a Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jul 2014 15:17:01 +0100 Subject: [PATCH 0383/2699] Deal with IPv6 addresses --- ceph-mon/hooks/hooks.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 26ca452d..7c657bf9 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -39,6 +39,7 @@ ) from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.alternatives import install_alternative +from charmhelpers.contrib.network.ip import is_ipv6 from utils import ( render_template, @@ -134,6 +135,8 @@ def get_mon_hosts(): for relid in relation_ids('mon'): for unit in related_units(relid): addr = relation_get('ceph-public-address', unit, relid) + if is_ipv6(addr): + addr = '[{}]'.format(addr) if addr is not None: hosts.append('{}:6789'.format(addr)) From 8bd3bb995efe88c137b24ceab6456c81eae22694 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jul 2014 15:18:21 +0100 Subject: [PATCH 0384/2699] Deal with IPv6 addresses --- ceph-proxy/hooks/hooks.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 7c657bf9..d5834c2a 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -130,15 +130,20 @@ def config_changed(): def get_mon_hosts(): hosts = [] - hosts.append('{}:6789'.format(get_public_addr())) + addr = get_public_addr() + if is_ipv6(addr): + hosts.append('[{}]:6789'.format(addr)) + else: + hosts.append('{}:6789'.format(addr)) for relid in relation_ids('mon'): for unit in related_units(relid): addr = relation_get('ceph-public-address', unit, relid) - if is_ipv6(addr): - addr = '[{}]'.format(addr) if addr is not None: - hosts.append('{}:6789'.format(addr)) + if is_ipv6(addr): + hosts.append('[{}]:6789'.format(addr)) + else: + hosts.append('{}:6789'.format(addr)) hosts.sort() return hosts From b6029340bdf75f7c87022ad21de7b594bb36fd5e Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jul 2014 15:18:21 +0100 Subject: [PATCH 0385/2699] Deal with IPv6 addresses --- ceph-mon/hooks/hooks.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 7c657bf9..d5834c2a 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -130,15 +130,20 @@ def config_changed(): def get_mon_hosts(): hosts = [] - hosts.append('{}:6789'.format(get_public_addr())) + addr = get_public_addr() + if is_ipv6(addr): + hosts.append('[{}]:6789'.format(addr)) + else: + hosts.append('{}:6789'.format(addr)) for relid in relation_ids('mon'): for unit in related_units(relid): addr = relation_get('ceph-public-address', unit, relid) - if is_ipv6(addr): - addr = '[{}]'.format(addr) if addr is not None: - hosts.append('{}:6789'.format(addr)) + if is_ipv6(addr): + hosts.append('[{}]:6789'.format(addr)) + else: + hosts.append('{}:6789'.format(addr)) hosts.sort() return hosts From f10f9df911bea6a63316c3b71a1cc5d7f5e24497 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 24 Jul 2014 15:29:47 +0100 Subject: [PATCH 0386/2699] Deal with IPv6 addresses --- ceph-osd/hooks/hooks.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index a7f3f371..5fa7844f 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -41,6 +41,7 @@ ) from charmhelpers.contrib.openstack.alternatives import install_alternative +from charmhelpers.contrib.network.ip import is_ipv6 hooks = Hooks() @@ -120,8 +121,10 @@ def get_mon_hosts(): addr = relation_get('ceph-public-address', unit, relid) or \ get_host_ip(relation_get('private-address', unit, relid)) if addr is not None: - hosts.append('{}:6789'.format(addr)) - + if is_ipv6(addr): + hosts.append('[{}]:6789'.format(addr)) + else: + hosts.append('{}:6789'.format(addr)) hosts.sort() return hosts From 86af4147b460670862909ebaa69d2ff1bf8f1d25 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 25 Jul 2014 09:08:17 +0100 Subject: [PATCH 0387/2699] Rebase --- ceph-proxy/charm-helpers-sync.yaml | 2 +- ceph-proxy/hooks/charmhelpers/core/host.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-sync.yaml index f282af34..afb9e42b 100644 --- a/ceph-proxy/charm-helpers-sync.yaml +++ b/ceph-proxy/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~james-page/charm-helpers/network-splits +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 8b617a42..d934f940 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -322,6 +322,10 @@ def cmp_pkgrevno(package, revno, pkgcache=None): import apt_pkg if not pkgcache: apt_pkg.init() + # Force Apt to build its cache in memory. That way we avoid race + # conditions with other applications building the cache in the same + # place. + apt_pkg.config.set("Dir::Cache::pkgcache", "") pkgcache = apt_pkg.Cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) From 2977b46f84fcab5d65d24b7b1fa232a1f4511292 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 25 Jul 2014 09:08:17 +0100 Subject: [PATCH 0388/2699] Rebase --- ceph-mon/charm-helpers-sync.yaml | 2 +- ceph-mon/hooks/charmhelpers/core/host.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-sync.yaml index f282af34..afb9e42b 100644 --- a/ceph-mon/charm-helpers-sync.yaml +++ b/ceph-mon/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~james-page/charm-helpers/network-splits +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 8b617a42..d934f940 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -322,6 +322,10 @@ def cmp_pkgrevno(package, revno, pkgcache=None): import apt_pkg if not pkgcache: apt_pkg.init() + # Force Apt to build its cache in memory. That way we avoid race + # conditions with other applications building the cache in the same + # place. + apt_pkg.config.set("Dir::Cache::pkgcache", "") pkgcache = apt_pkg.Cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) From c74f0a891ba50f71bfe69b6552c2562b92d64dd9 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 28 Jul 2014 15:50:31 +0100 Subject: [PATCH 0389/2699] Only directly zap the disk for older ceph versions --- ceph-proxy/hooks/ceph.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 31178d46..11376009 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -359,9 +359,8 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): # Just provide the device - no other options # for older versions of ceph cmd.append(dev) - - if reformat_osd: - zap_disk(dev) + if reformat_osd: + zap_disk(dev) subprocess.check_call(cmd) From 0af094f00709498feb10129dbbc7b69e64d1d87b Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 28 Jul 2014 15:50:31 +0100 Subject: [PATCH 0390/2699] Only directly zap the disk for older ceph versions --- ceph-mon/hooks/ceph.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 31178d46..11376009 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -359,9 +359,8 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): # Just provide the device - no other options # for older versions of ceph cmd.append(dev) - - if reformat_osd: - zap_disk(dev) + if reformat_osd: + zap_disk(dev) subprocess.check_call(cmd) From 71aef42293983f68b1cda20a1585ba79054811ea Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 28 Jul 2014 15:50:47 +0100 Subject: [PATCH 0391/2699] Only directly zap the disk for older ceph versions --- ceph-osd/hooks/ceph.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 07e22eb4..172e560a 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -346,9 +346,8 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): # Just provide the device - no other options # for older versions of ceph cmd.append(dev) - - if reformat_osd: - zap_disk(dev) + if reformat_osd: + zap_disk(dev) subprocess.check_call(cmd) From a8d5ab66ff1585202d63aed113aeb6f57932dfd3 Mon Sep 17 00:00:00 2001 From: Seyeong Kim Date: Wed, 6 Aug 2014 14:02:36 +0900 Subject: [PATCH 0392/2699] change template ceph.conf configuration name from client.radosgw.gateway to client.rados.gateway --- ceph-radosgw/templates/ceph.conf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 89287606..1b46aa42 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -11,7 +11,7 @@ err to syslog = {{ use_syslog }} clog to syslog = {{ use_syslog }} -[client.radosgw.gateway] +[client.rados.gateway] host = {{ hostname }} keyring = /etc/ceph/keyring.rados.gateway rgw socket path = /tmp/radosgw.sock @@ -26,4 +26,4 @@ rgw keystone token cache size = {{ cache_size }} rgw keystone revocation interval = {{ revocation_check_interval }} #nss db path = /var/lib/ceph/nss -{% endif %} \ No newline at end of file +{% endif %} From 1007e799b7c432fdc0cdf4574f251edfd1208db1 Mon Sep 17 00:00:00 2001 From: Seyeong Kim Date: Thu, 7 Aug 2014 19:09:14 +0900 Subject: [PATCH 0393/2699] s3gw.fcgi should have same configuration name in ceph.conf --- ceph-radosgw/files/www/s3gw.fcgi | 2 +- ceph-radosgw/templates/ceph.conf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/files/www/s3gw.fcgi b/ceph-radosgw/files/www/s3gw.fcgi index c0f4854a..e766fcb9 100755 --- a/ceph-radosgw/files/www/s3gw.fcgi +++ b/ceph-radosgw/files/www/s3gw.fcgi @@ -1,2 +1,2 @@ #!/bin/sh -exec /usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.rados.gateway \ No newline at end of file +exec /usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.radosgw.gateway diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 1b46aa42..d57943c8 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -11,7 +11,7 @@ err to syslog = {{ use_syslog }} clog to syslog = {{ use_syslog }} -[client.rados.gateway] +[client.radosgw.gateway] host = {{ hostname }} keyring = /etc/ceph/keyring.rados.gateway rgw socket path = /tmp/radosgw.sock From 2e5824b62040ae6dc7718e443cd8e5a2ce9dc457 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Mon, 11 Aug 2014 19:04:42 +0800 Subject: [PATCH 0394/2699] Support ceph for IPv6. --- ceph-proxy/config.yaml | 3 + .../hooks/charmhelpers/contrib/network/ip.py | 20 +- .../contrib/storage/linux/utils.py | 3 + ceph-proxy/hooks/charmhelpers/core/host.py | 35 +- .../charmhelpers/core/services/__init__.py | 2 + .../hooks/charmhelpers/core/services/base.py | 305 ++++++++++++++++++ .../charmhelpers/core/services/helpers.py | 125 +++++++ .../hooks/charmhelpers/core/templating.py | 51 +++ ceph-proxy/hooks/hooks.py | 44 ++- ceph-proxy/hooks/utils.py | 6 + ceph-proxy/templates/ceph.conf | 4 + 11 files changed, 594 insertions(+), 4 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/core/services/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/services/base.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/services/helpers.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/templating.py diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index d6422431..b617a1f7 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -125,3 +125,6 @@ options: description: | The IP address and netmask of the cluster (back-side) network (e.g., 192.168.0.0/24) + prefer-ipv6: + type: boolean + default: False diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 0972e91a..7edbcc48 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -4,7 +4,7 @@ from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, + ERROR, log, config, ) try: @@ -154,3 +154,21 @@ def _get_for_address(address, key): get_iface_for_address = partial(_get_for_address, key='iface') get_netmask_for_address = partial(_get_for_address, key='netmask') + + +def get_ipv6_addr(iface="eth0"): + try: + iface_addrs = netifaces.ifaddresses(iface) + if netifaces.AF_INET6 not in iface_addrs: + raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) + + addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] + ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') + and config('vip') != a['addr']] + if not ipv6_addr: + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + + return ipv6_addr[0] + + except ValueError: + raise ValueError("Invalid interface '%s'" % iface) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index 8d0f6116..1b958712 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -46,5 +46,8 @@ def is_device_mounted(device): :returns: boolean: True if the path represents a mounted device, False if it doesn't. ''' + is_partition = bool(re.search(r".*[0-9]+\b", device)) out = check_output(['mount']) + if is_partition: + return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index d934f940..ca7780df 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -12,6 +12,8 @@ import string import subprocess import hashlib +import shutil +from contextlib import contextmanager from collections import OrderedDict @@ -52,7 +54,7 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status']) + output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) except subprocess.CalledProcessError: return False else: @@ -62,6 +64,16 @@ def service_running(service): return False +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + return False + else: + return True + + def adduser(username, password=None, shell='/bin/bash', system_user=False): """Add a user to the system""" try: @@ -329,3 +341,24 @@ def cmp_pkgrevno(package, revno, pkgcache=None): pkgcache = apt_pkg.Cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@contextmanager +def chdir(d): + cur = os.getcwd() + try: + yield os.chdir(d) + finally: + os.chdir(cur) + + +def chownr(path, owner, group): + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + + for root, dirs, files in os.walk(path): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + os.chown(full, uid, gid) diff --git a/ceph-proxy/hooks/charmhelpers/core/services/__init__.py b/ceph-proxy/hooks/charmhelpers/core/services/__init__.py new file mode 100644 index 00000000..e8039a84 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/services/__init__.py @@ -0,0 +1,2 @@ +from .base import * +from .helpers import * diff --git a/ceph-proxy/hooks/charmhelpers/core/services/base.py b/ceph-proxy/hooks/charmhelpers/core/services/base.py new file mode 100644 index 00000000..f08e6d78 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/services/base.py @@ -0,0 +1,305 @@ +import os +import re +import json +from collections import Iterable + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Traditional charm authoring is focused on implementing hooks. That is, + the charm author is thinking in terms of "What hook am I handling; what + does this hook need to do?" However, in most cases, the real question + should be "Do I have the information I need to configure and start this + piece of software and, if so, what are the steps for doing so?" The + ServiceManager framework tries to bring the focus to the data and the + setup tasks, in the most declarative way possible. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = {} + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.provide_data() + self.reconfigure_services() + + def provide_data(self): + hook_name = hookenv.hook_name() + for service in self.services.values(): + for provider in service.get('provided_data', []): + if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): + data = provider.provide_data() + if provider._is_ready(data): + hookenv.relation_set(None, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py new file mode 100644 index 00000000..4b90589b --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py @@ -0,0 +1,125 @@ +from charmhelpers.core import hookenv +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the interface type, to prevent + potential naming conflicts. + """ + name = None + interface = None + required_keys = [] + + def __init__(self, *args, **kwargs): + super(RelationContext, self).__init__(*args, **kwargs) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a template, for use as a ready action. + """ + def __init__(self, source, target, owner='root', group='root', perms=0444): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + context = {} + for ctx in service.get('required_data', []): + context.update(ctx) + templating.render(self.source, self.target, context, + self.owner, self.group, self.perms) + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/ceph-proxy/hooks/charmhelpers/core/templating.py b/ceph-proxy/hooks/charmhelpers/core/templating.py new file mode 100644 index 00000000..2c638853 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/templating.py @@ -0,0 +1,51 @@ +import os + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + Note: Using this requires python-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + loader = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = loader.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + host.mkdir(os.path.dirname(target)) + host.write_file(target, content, owner, group, perms) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 1ca06db0..ead950fb 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -40,7 +40,10 @@ ) from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.alternatives import install_alternative -from charmhelpers.contrib.network.ip import is_ipv6 +from charmhelpers.contrib.network.ip import ( + is_ipv6, + get_ipv6_addr, +) from utils import ( render_template, @@ -67,6 +70,11 @@ def install(): def emit_cephconf(): + if config('prefer-ipv6'): + host_ip = '[%s]' % get_ipv6_addr() + #else: + # host_ip = '0.0.0.0' + cephcontext = { 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), @@ -76,6 +84,7 @@ def emit_cephconf(): 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': config('ceph-public-network'), 'ceph_cluster_network': config('ceph-cluster-network'), + 'host_ip': host_ip, } # Install ceph.conf as an alternative to support # co-existence with other charms that write this file @@ -139,7 +148,11 @@ def get_mon_hosts(): for relid in relation_ids('mon'): for unit in related_units(relid): - addr = relation_get('ceph-public-address', unit, relid) + if config('prefer-ipv6'): + addr = relation_get('ceph-public-address', unit, relid) + else: + addr = relation_get('private-address', unit, relid) + if addr is not None: if is_ipv6(addr): hosts.append('[{}]:6789'.format(addr)) @@ -176,6 +189,14 @@ def mon_relation_joined(): 'mon-relation-changed') def mon_relation(): emit_cephconf() + + if config('prefer-ipv6'): + host = '[%s]' % get_ipv6_addr() + else: + host = unit_get('private-address') + relation_data = {} + relation_data['private-address'] = host + relation_set(**relation_data) moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: @@ -254,9 +275,28 @@ def radosgw_relation(relid=None): else: log('mon cluster not in quorum - deferring key provision') + if config('prefer-ipv6'): + host = '[%s]' % get_ipv6_addr() + else: + host = unit_get('private-address') + + relation_data = {} + relation_data['private-address'] = host + relation_set(**relation_data) + + log('End radosgw-relation hook.') + @hooks.hook('client-relation-joined') def client_relation(relid=None): + if config('prefer-ipv6'): + host = '[%s]' % get_ipv6_addr() + else: + host = unit_get('private-address') + relation_data = {} + relation_data['private-address'] = host + relation_set(**relation_data) + if ceph.is_quorum(): log('mon cluster in quorum - providing client with keys') service_name = None diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 21fd1a5f..695409a3 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -20,6 +20,9 @@ ) from charmhelpers.contrib.network import ip +from charmhelpers.contrib.network.ip import ( + get_ipv6_addr, +) TEMPLATES_DIR = 'templates' @@ -64,6 +67,9 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): + if config('prefer-ipv6'): + return '[%s]' % get_ipv6_addr() + hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index 4ef0c66e..03e5d04c 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -33,3 +33,7 @@ osd journal size = {{ osd_journal_size }} filestore xattr use omap = true + host = {{ hostname }} + public addr = {{ host_ip }} + cluster addr = {{ host_ip }} + From 994cbab309abbff96b8b8f89c24029c1c8d8ab1a Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Mon, 11 Aug 2014 19:04:42 +0800 Subject: [PATCH 0395/2699] Support ceph for IPv6. --- ceph-mon/config.yaml | 3 + .../hooks/charmhelpers/contrib/network/ip.py | 20 +- .../contrib/storage/linux/utils.py | 3 + ceph-mon/hooks/charmhelpers/core/host.py | 35 +- .../charmhelpers/core/services/__init__.py | 2 + .../hooks/charmhelpers/core/services/base.py | 305 ++++++++++++++++++ .../charmhelpers/core/services/helpers.py | 125 +++++++ .../hooks/charmhelpers/core/templating.py | 51 +++ ceph-mon/hooks/hooks.py | 44 ++- ceph-mon/hooks/utils.py | 6 + ceph-mon/templates/ceph.conf | 4 + 11 files changed, 594 insertions(+), 4 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/core/services/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/core/services/base.py create mode 100644 ceph-mon/hooks/charmhelpers/core/services/helpers.py create mode 100644 ceph-mon/hooks/charmhelpers/core/templating.py diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index d6422431..b617a1f7 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -125,3 +125,6 @@ options: description: | The IP address and netmask of the cluster (back-side) network (e.g., 192.168.0.0/24) + prefer-ipv6: + type: boolean + default: False diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 0972e91a..7edbcc48 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -4,7 +4,7 @@ from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, + ERROR, log, config, ) try: @@ -154,3 +154,21 @@ def _get_for_address(address, key): get_iface_for_address = partial(_get_for_address, key='iface') get_netmask_for_address = partial(_get_for_address, key='netmask') + + +def get_ipv6_addr(iface="eth0"): + try: + iface_addrs = netifaces.ifaddresses(iface) + if netifaces.AF_INET6 not in iface_addrs: + raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) + + addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] + ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') + and config('vip') != a['addr']] + if not ipv6_addr: + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + + return ipv6_addr[0] + + except ValueError: + raise ValueError("Invalid interface '%s'" % iface) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index 8d0f6116..1b958712 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -46,5 +46,8 @@ def is_device_mounted(device): :returns: boolean: True if the path represents a mounted device, False if it doesn't. ''' + is_partition = bool(re.search(r".*[0-9]+\b", device)) out = check_output(['mount']) + if is_partition: + return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index d934f940..ca7780df 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -12,6 +12,8 @@ import string import subprocess import hashlib +import shutil +from contextlib import contextmanager from collections import OrderedDict @@ -52,7 +54,7 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status']) + output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) except subprocess.CalledProcessError: return False else: @@ -62,6 +64,16 @@ def service_running(service): return False +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + return False + else: + return True + + def adduser(username, password=None, shell='/bin/bash', system_user=False): """Add a user to the system""" try: @@ -329,3 +341,24 @@ def cmp_pkgrevno(package, revno, pkgcache=None): pkgcache = apt_pkg.Cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@contextmanager +def chdir(d): + cur = os.getcwd() + try: + yield os.chdir(d) + finally: + os.chdir(cur) + + +def chownr(path, owner, group): + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + + for root, dirs, files in os.walk(path): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + os.chown(full, uid, gid) diff --git a/ceph-mon/hooks/charmhelpers/core/services/__init__.py b/ceph-mon/hooks/charmhelpers/core/services/__init__.py new file mode 100644 index 00000000..e8039a84 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/services/__init__.py @@ -0,0 +1,2 @@ +from .base import * +from .helpers import * diff --git a/ceph-mon/hooks/charmhelpers/core/services/base.py b/ceph-mon/hooks/charmhelpers/core/services/base.py new file mode 100644 index 00000000..f08e6d78 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/services/base.py @@ -0,0 +1,305 @@ +import os +import re +import json +from collections import Iterable + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Traditional charm authoring is focused on implementing hooks. That is, + the charm author is thinking in terms of "What hook am I handling; what + does this hook need to do?" However, in most cases, the real question + should be "Do I have the information I need to configure and start this + piece of software and, if so, what are the steps for doing so?" The + ServiceManager framework tries to bring the focus to the data and the + setup tasks, in the most declarative way possible. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = {} + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.provide_data() + self.reconfigure_services() + + def provide_data(self): + hook_name = hookenv.hook_name() + for service in self.services.values(): + for provider in service.get('provided_data', []): + if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): + data = provider.provide_data() + if provider._is_ready(data): + hookenv.relation_set(None, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py new file mode 100644 index 00000000..4b90589b --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/services/helpers.py @@ -0,0 +1,125 @@ +from charmhelpers.core import hookenv +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the interface type, to prevent + potential naming conflicts. + """ + name = None + interface = None + required_keys = [] + + def __init__(self, *args, **kwargs): + super(RelationContext, self).__init__(*args, **kwargs) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a template, for use as a ready action. + """ + def __init__(self, source, target, owner='root', group='root', perms=0444): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + context = {} + for ctx in service.get('required_data', []): + context.update(ctx) + templating.render(self.source, self.target, context, + self.owner, self.group, self.perms) + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/ceph-mon/hooks/charmhelpers/core/templating.py b/ceph-mon/hooks/charmhelpers/core/templating.py new file mode 100644 index 00000000..2c638853 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/templating.py @@ -0,0 +1,51 @@ +import os + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + Note: Using this requires python-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + loader = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = loader.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + host.mkdir(os.path.dirname(target)) + host.write_file(target, content, owner, group, perms) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 1ca06db0..ead950fb 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -40,7 +40,10 @@ ) from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.alternatives import install_alternative -from charmhelpers.contrib.network.ip import is_ipv6 +from charmhelpers.contrib.network.ip import ( + is_ipv6, + get_ipv6_addr, +) from utils import ( render_template, @@ -67,6 +70,11 @@ def install(): def emit_cephconf(): + if config('prefer-ipv6'): + host_ip = '[%s]' % get_ipv6_addr() + #else: + # host_ip = '0.0.0.0' + cephcontext = { 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), @@ -76,6 +84,7 @@ def emit_cephconf(): 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': config('ceph-public-network'), 'ceph_cluster_network': config('ceph-cluster-network'), + 'host_ip': host_ip, } # Install ceph.conf as an alternative to support # co-existence with other charms that write this file @@ -139,7 +148,11 @@ def get_mon_hosts(): for relid in relation_ids('mon'): for unit in related_units(relid): - addr = relation_get('ceph-public-address', unit, relid) + if config('prefer-ipv6'): + addr = relation_get('ceph-public-address', unit, relid) + else: + addr = relation_get('private-address', unit, relid) + if addr is not None: if is_ipv6(addr): hosts.append('[{}]:6789'.format(addr)) @@ -176,6 +189,14 @@ def mon_relation_joined(): 'mon-relation-changed') def mon_relation(): emit_cephconf() + + if config('prefer-ipv6'): + host = '[%s]' % get_ipv6_addr() + else: + host = unit_get('private-address') + relation_data = {} + relation_data['private-address'] = host + relation_set(**relation_data) moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: @@ -254,9 +275,28 @@ def radosgw_relation(relid=None): else: log('mon cluster not in quorum - deferring key provision') + if config('prefer-ipv6'): + host = '[%s]' % get_ipv6_addr() + else: + host = unit_get('private-address') + + relation_data = {} + relation_data['private-address'] = host + relation_set(**relation_data) + + log('End radosgw-relation hook.') + @hooks.hook('client-relation-joined') def client_relation(relid=None): + if config('prefer-ipv6'): + host = '[%s]' % get_ipv6_addr() + else: + host = unit_get('private-address') + relation_data = {} + relation_data['private-address'] = host + relation_set(**relation_data) + if ceph.is_quorum(): log('mon cluster in quorum - providing client with keys') service_name = None diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 21fd1a5f..695409a3 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -20,6 +20,9 @@ ) from charmhelpers.contrib.network import ip +from charmhelpers.contrib.network.ip import ( + get_ipv6_addr, +) TEMPLATES_DIR = 'templates' @@ -64,6 +67,9 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): + if config('prefer-ipv6'): + return '[%s]' % get_ipv6_addr() + hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 4ef0c66e..03e5d04c 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -33,3 +33,7 @@ osd journal size = {{ osd_journal_size }} filestore xattr use omap = true + host = {{ hostname }} + public addr = {{ host_ip }} + cluster addr = {{ host_ip }} + From dabf8c8411dec310848c49a7fb92abb98371048f Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 15 Aug 2014 10:06:49 +0100 Subject: [PATCH 0396/2699] Add option to ignore device errors --- ceph-osd/config.yaml | 10 ++++++++++ ceph-osd/hooks/ceph.py | 19 ++++++++++++++----- ceph-osd/hooks/hooks.py | 6 ++++-- 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 767c92a3..e63d36d5 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -48,6 +48,16 @@ options: . Specifying this option (any value) forces a reformat of any OSD devices found which are not already mounted. + ignore-device-errors: + type: boolean + default: False + description: | + By default, the charm will raise errors if a whitelisted device is found, + but for some reason the charm is unable to initialize the device for use + by Ceph. + . + Setting this option to 'True' will result in the charm classifying such + problems as warnings only and will not result in a hook error. ephemeral-unmount: type: string description: | diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 172e560a..3ef84921 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -18,7 +18,7 @@ ) from charmhelpers.core.hookenv import ( log, - ERROR, + ERROR, WARNING ) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, @@ -307,14 +307,16 @@ def update_monfs(): pass -def osdize(dev, osd_format, osd_journal, reformat_osd=False): +def osdize(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False): if dev.startswith('/dev'): - osdize_dev(dev, osd_format, osd_journal, reformat_osd) + osdize_dev(dev, osd_format, osd_journal, reformat_osd, ignore_errors) else: osdize_dir(dev) -def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): +def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False): if not os.path.exists(dev): log('Path {} does not exist - bailing'.format(dev)) return @@ -349,7 +351,14 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): if reformat_osd: zap_disk(dev) - subprocess.check_call(cmd) + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as e: + if ignore_errors: + log('Enable to initialize device: {}'.format(dev), WARNING) + else: + log('Enable to initialize device: {}'.format(dev), ERROR) + raise e def osdize_dir(path): diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 554d93ec..6379fcb7 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -111,7 +111,8 @@ def config_changed(): emit_cephconf() for dev in get_devices(): ceph.osdize(dev, config('osd-format'), - config('osd-journal'), config('osd-reformat')) + config('osd-journal'), config('osd-reformat'), + config('ignore-device-errors')) ceph.start_osds(get_devices()) @@ -172,7 +173,8 @@ def mon_relation(): ceph.import_osd_bootstrap_key(bootstrap_key) for dev in get_devices(): ceph.osdize(dev, config('osd-format'), - config('osd-journal'), config('osd-reformat')) + config('osd-journal'), config('osd-reformat'), + config('ignore-device-errors')) ceph.start_osds(get_devices()) else: log('mon cluster has not yet provided conf') From fdfb789d234c8605fbd0d0d1dd095914e054981a Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 15 Aug 2014 10:09:48 +0100 Subject: [PATCH 0397/2699] Fixup bust point --- ceph-osd/hooks/ceph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 3ef84921..7b297323 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -366,7 +366,7 @@ def osdize_dir(path): log('Path {} is already configured as an OSD - bailing'.format(path)) return - if cmp_pkgrevno('ceph', '0.56.6.') < 0: + if cmp_pkgrevno('ceph', '0.56.6') < 0: log('Unable to use directories for OSDs with ceph < 0.56.6', level=ERROR) raise From 2cb4e2db6cd364b036112c42c706eb537ac1b093 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 15 Aug 2014 10:21:25 +0100 Subject: [PATCH 0398/2699] Add support for ignoring device problems --- ceph-proxy/config.yaml | 10 ++++++++++ ceph-proxy/hooks/ceph.py | 22 ++++++++++++++++------ ceph-proxy/hooks/hooks.py | 4 ++-- 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index d6422431..17db7920 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -80,6 +80,16 @@ options: . Specifying this option (any value) forces a reformat of any OSD devices found which are not already mounted. + ignore-device-errors: + type: boolean + default: False + description: | + By default, the charm will raise errors if a whitelisted device is found, + but for some reason the charm is unable to initialize the device for use + by Ceph. + . + Setting this option to 'True' will result in the charm classifying such + problems as warnings only and will not result in a hook error. ephemeral-unmount: type: string description: | diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 11376009..bd34a8c3 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -19,11 +19,12 @@ from charmhelpers.core.hookenv import ( log, ERROR, + WARNING, ) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, is_block_device, - is_device_mounted + is_device_mounted, ) from utils import ( get_unit_hostname, @@ -320,14 +321,16 @@ def update_monfs(): pass -def osdize(dev, osd_format, osd_journal, reformat_osd=False): +def osdize(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False): if dev.startswith('/dev'): - osdize_dev(dev, osd_format, osd_journal, reformat_osd) + osdize_dev(dev, osd_format, osd_journal, reformat_osd, ignore_errors) else: osdize_dir(dev) -def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): +def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False): if not os.path.exists(dev): log('Path {} does not exist - bailing'.format(dev)) return @@ -346,7 +349,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options - if cmp_pkgrevno('ceph', "0.48.3") >= 0: + if cmp_pkgrevno('ceph', '0.48.3') >= 0: if osd_format: cmd.append('--fs-type') cmd.append(osd_format) @@ -362,7 +365,14 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): if reformat_osd: zap_disk(dev) - subprocess.check_call(cmd) + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as e: + if ignore_errors: + log('Enable to initialize device: {}'.format(dev), WARNING) + else: + log('Enable to initialize device: {}'.format(dev), ERROR) + raise e def osdize_dir(path): diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 1ca06db0..ce72a583 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -125,7 +125,7 @@ def config_changed(): if ceph.is_bootstrapped(): for dev in get_devices(): ceph.osdize(dev, config('osd-format'), config('osd-journal'), - reformat_osd()) + reformat_osd(), config('ignore-device-errors')) ceph.start_osds(get_devices()) @@ -183,7 +183,7 @@ def mon_relation(): ceph.wait_for_bootstrap() for dev in get_devices(): ceph.osdize(dev, config('osd-format'), config('osd-journal'), - reformat_osd()) + reformat_osd(), config('ignore-device-errors')) ceph.start_osds(get_devices()) notify_osds() notify_radosgws() From fbf8a241b31007a4c8e07aac681067e2bdcc948e Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 15 Aug 2014 10:21:25 +0100 Subject: [PATCH 0399/2699] Add support for ignoring device problems --- ceph-mon/config.yaml | 10 ++++++++++ ceph-mon/hooks/ceph.py | 22 ++++++++++++++++------ ceph-mon/hooks/hooks.py | 4 ++-- 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index d6422431..17db7920 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -80,6 +80,16 @@ options: . Specifying this option (any value) forces a reformat of any OSD devices found which are not already mounted. + ignore-device-errors: + type: boolean + default: False + description: | + By default, the charm will raise errors if a whitelisted device is found, + but for some reason the charm is unable to initialize the device for use + by Ceph. + . + Setting this option to 'True' will result in the charm classifying such + problems as warnings only and will not result in a hook error. ephemeral-unmount: type: string description: | diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 11376009..bd34a8c3 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -19,11 +19,12 @@ from charmhelpers.core.hookenv import ( log, ERROR, + WARNING, ) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, is_block_device, - is_device_mounted + is_device_mounted, ) from utils import ( get_unit_hostname, @@ -320,14 +321,16 @@ def update_monfs(): pass -def osdize(dev, osd_format, osd_journal, reformat_osd=False): +def osdize(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False): if dev.startswith('/dev'): - osdize_dev(dev, osd_format, osd_journal, reformat_osd) + osdize_dev(dev, osd_format, osd_journal, reformat_osd, ignore_errors) else: osdize_dir(dev) -def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): +def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False): if not os.path.exists(dev): log('Path {} does not exist - bailing'.format(dev)) return @@ -346,7 +349,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options - if cmp_pkgrevno('ceph', "0.48.3") >= 0: + if cmp_pkgrevno('ceph', '0.48.3') >= 0: if osd_format: cmd.append('--fs-type') cmd.append(osd_format) @@ -362,7 +365,14 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False): if reformat_osd: zap_disk(dev) - subprocess.check_call(cmd) + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as e: + if ignore_errors: + log('Enable to initialize device: {}'.format(dev), WARNING) + else: + log('Enable to initialize device: {}'.format(dev), ERROR) + raise e def osdize_dir(path): diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 1ca06db0..ce72a583 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -125,7 +125,7 @@ def config_changed(): if ceph.is_bootstrapped(): for dev in get_devices(): ceph.osdize(dev, config('osd-format'), config('osd-journal'), - reformat_osd()) + reformat_osd(), config('ignore-device-errors')) ceph.start_osds(get_devices()) @@ -183,7 +183,7 @@ def mon_relation(): ceph.wait_for_bootstrap() for dev in get_devices(): ceph.osdize(dev, config('osd-format'), config('osd-journal'), - reformat_osd()) + reformat_osd(), config('ignore-device-errors')) ceph.start_osds(get_devices()) notify_osds() notify_radosgws() From 774633252c975e6fe43f54c3a30ee41d30e22d92 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Wed, 20 Aug 2014 15:24:10 +0000 Subject: [PATCH 0400/2699] Remove default value for source config option, allowing it to get set to None, which allows the charm to use the default distro packages. --- ceph-proxy/config.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index d6422431..9e60721b 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -91,12 +91,11 @@ options: testing purposes (cloud deployment is not a typical use case). source: type: string - default: cloud:precise-updates/folsom description: | Optional configuration to support use of additional sources such as: . - ppa:myteam/ppa - - cloud:precise-proposed/folsom + - cloud:precise-proposed/icehouse - http://my.archive.com/ubuntu main . The last option should be used in conjunction with the key configuration From 897db952219acfbc7016ebea5e9cee05133da1bf Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Wed, 20 Aug 2014 15:24:10 +0000 Subject: [PATCH 0401/2699] Remove default value for source config option, allowing it to get set to None, which allows the charm to use the default distro packages. --- ceph-mon/config.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index d6422431..9e60721b 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -91,12 +91,11 @@ options: testing purposes (cloud deployment is not a typical use case). source: type: string - default: cloud:precise-updates/folsom description: | Optional configuration to support use of additional sources such as: . - ppa:myteam/ppa - - cloud:precise-proposed/folsom + - cloud:precise-proposed/icehouse - http://my.archive.com/ubuntu main . The last option should be used in conjunction with the key configuration From 48024eba36c9d973a30b41a8f2695fee21bc11f6 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 25 Aug 2014 18:26:04 +0000 Subject: [PATCH 0402/2699] Move charm-helpers-sync.yaml to charm-helpers-hooks.yaml and add charm-helpers-tests.yaml. --- ceph-proxy/Makefile | 3 ++- .../{charm-helpers-sync.yaml => charm-helpers-hooks.yaml} | 0 ceph-proxy/charm-helpers-tests.yaml | 5 +++++ 3 files changed, 7 insertions(+), 1 deletion(-) rename ceph-proxy/{charm-helpers-sync.yaml => charm-helpers-hooks.yaml} (100%) create mode 100644 ceph-proxy/charm-helpers-tests.yaml diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index cc53a956..f2070593 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -5,7 +5,8 @@ lint: @charm proof sync: - @charm-helper-sync -c charm-helpers-sync.yaml + @charm-helper-sync -c charm-helpers-hooks.yaml + @charm-helper-sync -c charm-helpers-tests.yaml publish: lint bzr push lp:charms/ceph diff --git a/ceph-proxy/charm-helpers-sync.yaml b/ceph-proxy/charm-helpers-hooks.yaml similarity index 100% rename from ceph-proxy/charm-helpers-sync.yaml rename to ceph-proxy/charm-helpers-hooks.yaml diff --git a/ceph-proxy/charm-helpers-tests.yaml b/ceph-proxy/charm-helpers-tests.yaml new file mode 100644 index 00000000..48b12f6f --- /dev/null +++ b/ceph-proxy/charm-helpers-tests.yaml @@ -0,0 +1,5 @@ +branch: lp:charm-helpers +destination: tests/charmhelpers +include: + - contrib.amulet + - contrib.openstack.amulet From 2be03ab28b172757dbe0716f8801ec5bf16231a8 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 25 Aug 2014 18:26:04 +0000 Subject: [PATCH 0403/2699] Move charm-helpers-sync.yaml to charm-helpers-hooks.yaml and add charm-helpers-tests.yaml. --- ceph-mon/Makefile | 3 ++- .../{charm-helpers-sync.yaml => charm-helpers-hooks.yaml} | 0 ceph-mon/charm-helpers-tests.yaml | 5 +++++ 3 files changed, 7 insertions(+), 1 deletion(-) rename ceph-mon/{charm-helpers-sync.yaml => charm-helpers-hooks.yaml} (100%) create mode 100644 ceph-mon/charm-helpers-tests.yaml diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index cc53a956..f2070593 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -5,7 +5,8 @@ lint: @charm proof sync: - @charm-helper-sync -c charm-helpers-sync.yaml + @charm-helper-sync -c charm-helpers-hooks.yaml + @charm-helper-sync -c charm-helpers-tests.yaml publish: lint bzr push lp:charms/ceph diff --git a/ceph-mon/charm-helpers-sync.yaml b/ceph-mon/charm-helpers-hooks.yaml similarity index 100% rename from ceph-mon/charm-helpers-sync.yaml rename to ceph-mon/charm-helpers-hooks.yaml diff --git a/ceph-mon/charm-helpers-tests.yaml b/ceph-mon/charm-helpers-tests.yaml new file mode 100644 index 00000000..48b12f6f --- /dev/null +++ b/ceph-mon/charm-helpers-tests.yaml @@ -0,0 +1,5 @@ +branch: lp:charm-helpers +destination: tests/charmhelpers +include: + - contrib.amulet + - contrib.openstack.amulet From e62867a414deede570f87b5d637bf64bc0ec7967 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 25 Aug 2014 18:40:17 +0000 Subject: [PATCH 0404/2699] Automatically pull down charm_helpers_sync when 'make sync' is called. --- ceph-proxy/Makefile | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index f2070593..a78fc6d2 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -1,12 +1,18 @@ #!/usr/bin/make +PYTHON := /usr/bin/env python lint: @flake8 --exclude hooks/charmhelpers hooks @charm proof -sync: - @charm-helper-sync -c charm-helpers-hooks.yaml - @charm-helper-sync -c charm-helpers-tests.yaml +bin/charm_helpers_sync.py: + @mkdir -p bin + @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ + > bin/charm_helpers_sync.py + +sync: bin/charm_helpers_sync.py + $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml + $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml publish: lint bzr push lp:charms/ceph From 3d129c04cf013c442dce166d050733f0281c0af0 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 25 Aug 2014 18:40:17 +0000 Subject: [PATCH 0405/2699] Automatically pull down charm_helpers_sync when 'make sync' is called. --- ceph-mon/Makefile | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index f2070593..a78fc6d2 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -1,12 +1,18 @@ #!/usr/bin/make +PYTHON := /usr/bin/env python lint: @flake8 --exclude hooks/charmhelpers hooks @charm proof -sync: - @charm-helper-sync -c charm-helpers-hooks.yaml - @charm-helper-sync -c charm-helpers-tests.yaml +bin/charm_helpers_sync.py: + @mkdir -p bin + @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ + > bin/charm_helpers_sync.py + +sync: bin/charm_helpers_sync.py + $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml + $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml publish: lint bzr push lp:charms/ceph From 792870bcfe550807a92de567fd7c958b966ec92a Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 25 Aug 2014 18:42:17 +0000 Subject: [PATCH 0406/2699] Sync with charm-helpers --- .../hooks/charmhelpers/contrib/network/ip.py | 20 +- .../contrib/storage/linux/utils.py | 3 + ceph-proxy/hooks/charmhelpers/core/hookenv.py | 3 +- ceph-proxy/hooks/charmhelpers/core/host.py | 35 +- .../charmhelpers/core/services/__init__.py | 2 + .../hooks/charmhelpers/core/services/base.py | 310 ++++++++++++++++++ .../charmhelpers/core/services/helpers.py | 125 +++++++ .../hooks/charmhelpers/core/templating.py | 51 +++ .../hooks/charmhelpers/fetch/__init__.py | 45 ++- ceph-proxy/tests/charmhelpers/__init__.py | 0 .../tests/charmhelpers/contrib/__init__.py | 0 .../charmhelpers/contrib/amulet/__init__.py | 0 .../charmhelpers/contrib/amulet/deployment.py | 71 ++++ .../charmhelpers/contrib/amulet/utils.py | 176 ++++++++++ .../contrib/openstack/__init__.py | 0 .../contrib/openstack/amulet/__init__.py | 0 .../contrib/openstack/amulet/deployment.py | 61 ++++ .../contrib/openstack/amulet/utils.py | 275 ++++++++++++++++ 18 files changed, 1170 insertions(+), 7 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/core/services/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/services/base.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/services/helpers.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/templating.py create mode 100644 ceph-proxy/tests/charmhelpers/__init__.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/__init__.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 0972e91a..7edbcc48 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -4,7 +4,7 @@ from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, + ERROR, log, config, ) try: @@ -154,3 +154,21 @@ def _get_for_address(address, key): get_iface_for_address = partial(_get_for_address, key='iface') get_netmask_for_address = partial(_get_for_address, key='netmask') + + +def get_ipv6_addr(iface="eth0"): + try: + iface_addrs = netifaces.ifaddresses(iface) + if netifaces.AF_INET6 not in iface_addrs: + raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) + + addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] + ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') + and config('vip') != a['addr']] + if not ipv6_addr: + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + + return ipv6_addr[0] + + except ValueError: + raise ValueError("Invalid interface '%s'" % iface) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index 8d0f6116..1b958712 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -46,5 +46,8 @@ def is_device_mounted(device): :returns: boolean: True if the path represents a mounted device, False if it doesn't. ''' + is_partition = bool(re.search(r".*[0-9]+\b", device)) out = check_output(['mount']) + if is_partition: + return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index c9530433..eb4aa092 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -285,8 +285,9 @@ def relation_get(attribute=None, unit=None, rid=None): raise -def relation_set(relation_id=None, relation_settings={}, **kwargs): +def relation_set(relation_id=None, relation_settings=None, **kwargs): """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index d934f940..ca7780df 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -12,6 +12,8 @@ import string import subprocess import hashlib +import shutil +from contextlib import contextmanager from collections import OrderedDict @@ -52,7 +54,7 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status']) + output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) except subprocess.CalledProcessError: return False else: @@ -62,6 +64,16 @@ def service_running(service): return False +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + return False + else: + return True + + def adduser(username, password=None, shell='/bin/bash', system_user=False): """Add a user to the system""" try: @@ -329,3 +341,24 @@ def cmp_pkgrevno(package, revno, pkgcache=None): pkgcache = apt_pkg.Cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@contextmanager +def chdir(d): + cur = os.getcwd() + try: + yield os.chdir(d) + finally: + os.chdir(cur) + + +def chownr(path, owner, group): + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + + for root, dirs, files in os.walk(path): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + os.chown(full, uid, gid) diff --git a/ceph-proxy/hooks/charmhelpers/core/services/__init__.py b/ceph-proxy/hooks/charmhelpers/core/services/__init__.py new file mode 100644 index 00000000..e8039a84 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/services/__init__.py @@ -0,0 +1,2 @@ +from .base import * +from .helpers import * diff --git a/ceph-proxy/hooks/charmhelpers/core/services/base.py b/ceph-proxy/hooks/charmhelpers/core/services/base.py new file mode 100644 index 00000000..6b5a1b9f --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/services/base.py @@ -0,0 +1,310 @@ +import os +import re +import json +from collections import Iterable + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = {} + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.provide_data() + self.reconfigure_services() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + """ + hook_name = hookenv.hook_name() + for service in self.services.values(): + for provider in service.get('provided_data', []): + if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): + data = provider.provide_data() + _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data + if _ready: + hookenv.relation_set(None, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py new file mode 100644 index 00000000..4b90589b --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py @@ -0,0 +1,125 @@ +from charmhelpers.core import hookenv +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the interface type, to prevent + potential naming conflicts. + """ + name = None + interface = None + required_keys = [] + + def __init__(self, *args, **kwargs): + super(RelationContext, self).__init__(*args, **kwargs) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a template, for use as a ready action. + """ + def __init__(self, source, target, owner='root', group='root', perms=0444): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + context = {} + for ctx in service.get('required_data', []): + context.update(ctx) + templating.render(self.source, self.target, context, + self.owner, self.group, self.perms) + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/ceph-proxy/hooks/charmhelpers/core/templating.py b/ceph-proxy/hooks/charmhelpers/core/templating.py new file mode 100644 index 00000000..2c638853 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/templating.py @@ -0,0 +1,51 @@ +import os + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + Note: Using this requires python-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + loader = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = loader.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + host.mkdir(os.path.dirname(target)) + host.write_file(target, content, owner, group, perms) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 5be512ce..61633d8c 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,5 @@ import importlib +from tempfile import NamedTemporaryFile import time from yaml import safe_load from charmhelpers.core.host import ( @@ -122,6 +123,7 @@ def filter_installed_packages(packages): # Tell apt to build an in-memory cache to prevent race conditions (if # another process is already building the cache). apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") cache = apt_pkg.Cache() _pkgs = [] @@ -201,6 +203,27 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples: + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + """ if source is None: log('Source is not present. Skipping') return @@ -225,10 +248,23 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + else: + raise SourceConfigError("Unknown source: {!r}".format(source)) + if key: - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile() as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) + else: + # Note that hkp: is in no way a secure protocol. Using a + # GPG key id is pointless from a security POV unless you + # absolutely trust your network and DNS. + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv', + key]) def configure_sources(update=False, @@ -238,7 +274,8 @@ def configure_sources(update=False, Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. + The frament needs to be included as a string. Sources and their + corresponding keys are of the types supported by add_source(). Example config: install_sources: | diff --git a/ceph-proxy/tests/charmhelpers/__init__.py b/ceph-proxy/tests/charmhelpers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/tests/charmhelpers/contrib/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py new file mode 100644 index 00000000..8c0af487 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py @@ -0,0 +1,71 @@ +import amulet + +import os + + +class AmuletDeployment(object): + """Amulet deployment. + + This class provides generic Amulet deployment and test runner + methods. + """ + + def __init__(self, series=None): + """Initialize the deployment environment.""" + self.series = None + + if series: + self.series = series + self.d = amulet.Deployment(series=self.series) + else: + self.d = amulet.Deployment() + + def _add_services(self, this_service, other_services): + """Add services. + + Add services to the deployment where this_service is the local charm + that we're focused on testing and other_services are the other + charms that come from the charm store. + """ + name, units = range(2) + + if this_service[name] != os.path.basename(os.getcwd()): + s = this_service[name] + msg = "The charm's root directory name needs to be {}".format(s) + amulet.raise_status(amulet.FAIL, msg=msg) + + self.d.add(this_service[name], units=this_service[units]) + + for svc in other_services: + if self.series: + self.d.add(svc[name], + charm='cs:{}/{}'.format(self.series, svc[name]), + units=svc[units]) + else: + self.d.add(svc[name], units=svc[units]) + + def _add_relations(self, relations): + """Add all of the relations for the services.""" + for k, v in relations.iteritems(): + self.d.relate(k, v) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in configs.iteritems(): + self.d.configure(service, config) + + def _deploy(self): + """Deploy environment and wait for all hooks to finish executing.""" + try: + self.d.setup() + self.d.sentry.wait(timeout=900) + except amulet.helpers.TimeoutError: + amulet.raise_status(amulet.FAIL, msg="Deployment timed out") + except Exception: + raise + + def run_tests(self): + """Run all of the methods that are prefixed with 'test_'.""" + for test in dir(self): + if test.startswith('test_'): + getattr(self, test)() diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py new file mode 100644 index 00000000..c843333f --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -0,0 +1,176 @@ +import ConfigParser +import io +import logging +import re +import sys +import time + + +class AmuletUtils(object): + """Amulet utilities. + + This class provides common utility functions that are used by Amulet + tests. + """ + + def __init__(self, log_level=logging.ERROR): + self.log = self.get_logger(level=log_level) + + def get_logger(self, name="amulet-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def valid_ip(self, ip): + if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): + return True + else: + return False + + def valid_url(self, url): + p = re.compile( + r'^(?:http|ftp)s?://' + r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa + r'localhost|' + r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' + r'(?::\d+)?' + r'(?:/?|[/?]\S+)$', + re.IGNORECASE) + if p.match(url): + return True + else: + return False + + def validate_services(self, commands): + """Validate services. + + Verify the specified services are running on the corresponding + service units. + """ + for k, v in commands.iteritems(): + for cmd in v: + output, code = k.run(cmd) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def _get_config(self, unit, filename): + """Get a ConfigParser object for parsing a unit's config file.""" + file_contents = unit.file_contents(filename) + config = ConfigParser.ConfigParser() + config.readfp(io.StringIO(file_contents)) + return config + + def validate_config_data(self, sentry_unit, config_file, section, + expected): + """Validate config file data. + + Verify that the specified section of the config file contains + the expected option key:value pairs. + """ + config = self._get_config(sentry_unit, config_file) + + if section != 'DEFAULT' and not config.has_section(section): + return "section [{}] does not exist".format(section) + + for k in expected.keys(): + if not config.has_option(section, k): + return "section [{}] is missing option {}".format(section, k) + if config.get(section, k) != expected[k]: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, config.get(section, k), k, expected[k]) + return None + + def _validate_dict_data(self, expected, actual): + """Validate dictionary data. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluate a variable and returns a + bool. + """ + for k, v in expected.iteritems(): + if k in actual: + if (isinstance(v, basestring) or + isinstance(v, bool) or + isinstance(v, (int, long))): + if v != actual[k]: + return "{}:{}".format(k, actual[k]) + elif not v(actual[k]): + return "{}:{}".format(k, actual[k]) + else: + return "key '{}' does not exist".format(k) + return None + + def validate_relation_data(self, sentry_unit, relation, expected): + """Validate actual relation data based on expected relation data.""" + actual = sentry_unit.relation(relation[0], relation[1]) + self.log.debug('actual: {}'.format(repr(actual))) + return self._validate_dict_data(expected, actual) + + def _validate_list_data(self, expected, actual): + """Compare expected list vs actual list data.""" + for e in expected: + if e not in actual: + return "expected item {} not found in actual list".format(e) + return None + + def not_null(self, string): + if string is not None: + return True + else: + return False + + def _get_file_mtime(self, sentry_unit, filename): + """Get last modification time of file.""" + return sentry_unit.file_stat(filename)['mtime'] + + def _get_dir_mtime(self, sentry_unit, directory): + """Get last modification time of directory.""" + return sentry_unit.directory_stat(directory)['mtime'] + + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): + """Get process' start time. + + Determine start time of the process based on the last modification + time of the /proc/pid directory. If pgrep_full is True, the process + name is matched against the full command line. + """ + if pgrep_full: + cmd = 'pgrep -o -f {}'.format(service) + else: + cmd = 'pgrep -o {}'.format(service) + proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) + + def service_restarted(self, sentry_unit, service, filename, + pgrep_full=False, sleep_time=20): + """Check if service was restarted. + + Compare a service's start time vs a file's last modification time + (such as a config file for that service) to determine if the service + has been restarted. + """ + time.sleep(sleep_time) + if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= + self._get_file_mtime(sentry_unit, filename)): + return True + else: + return False + + def relation_error(self, name, data): + return 'unexpected relation data in {} - {}'.format(name, data) + + def endpoint_error(self, name, data): + return 'unexpected endpoint data in {} - {}'.format(name, data) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 00000000..9179eeb1 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,61 @@ +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.openstack = openstack + self.source = source + + def _add_services(self, this_service, other_services): + """Add services to the deployment and set openstack-origin.""" + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + name = 0 + services = other_services + services.append(this_service) + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] + + if self.openstack: + for svc in services: + if svc[name] not in use_source: + config = {'openstack-origin': self.openstack} + self.d.configure(svc[name], config) + + if self.source: + for svc in services: + if svc[name] in use_source: + config = {'source': self.source} + self.d.configure(svc[name], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in configs.iteritems(): + self.d.configure(service, config) + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + (self.precise_essex, self.precise_folsom, self.precise_grizzly, + self.precise_havana, self.precise_icehouse, + self.trusty_icehouse) = range(6) + releases = { + ('precise', None): self.precise_essex, + ('precise', 'cloud:precise-folsom'): self.precise_folsom, + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, + ('precise', 'cloud:precise-havana'): self.precise_havana, + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, + ('trusty', None): self.trusty_icehouse} + return releases[(self.series, self.openstack)] diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 00000000..bd327bdc --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,275 @@ +import logging +import os +import time +import urllib + +import glanceclient.v1.client as glance_client +import keystoneclient.v2_0 as keystone_client +import novaclient.v1_1.client as nova_client + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in expected.iteritems(): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'tenantId': act.tenantId, + 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + return tenant in [t.name for t in keystone.tenants.list()] + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant): + """Authenticates admin user with the keystone admin endpoint.""" + unit = keystone_sentry + service_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + ep = keystone.service_catalog.url_for(service_type='image', + endpoint_type='adminURL') + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return nova_client.Client(username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance.""" + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open("http://download.cirros-cloud.net/version/released") + version = f.read().strip() + cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) + + if not os.path.exists(cirros_img): + cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + version, cirros_img) + opener.retrieve(cirros_url, cirros_img) + f.close() + + with open(cirros_img) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + count = 1 + status = image.status + while status != 'active' and count < 10: + time.sleep(3) + image = glance.images.get(image.id) + status = image.status + self.log.debug('image status: {}'.format(status)) + count += 1 + + if status != 'active': + self.log.error('image creation timed out') + return None + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + num_before = len(list(glance.images.list())) + glance.images.delete(image) + + count = 1 + num_after = len(list(glance.images.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(glance.images.list())) + self.log.debug('number of images: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('image deletion timed out') + return False + + return True + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + image = nova.images.find(name=image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + num_before = len(list(nova.servers.list())) + nova.servers.delete(instance) + + count = 1 + num_after = len(list(nova.servers.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(nova.servers.list())) + self.log.debug('number of instances: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('instance deletion timed out') + return False + + return True From 48b7fd19c2fa2c1ea1bc6669dce57e1914031737 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 25 Aug 2014 18:42:17 +0000 Subject: [PATCH 0407/2699] Sync with charm-helpers --- .../hooks/charmhelpers/contrib/network/ip.py | 20 +- .../contrib/storage/linux/utils.py | 3 + ceph-mon/hooks/charmhelpers/core/hookenv.py | 3 +- ceph-mon/hooks/charmhelpers/core/host.py | 35 +- .../charmhelpers/core/services/__init__.py | 2 + .../hooks/charmhelpers/core/services/base.py | 310 ++++++++++++++++++ .../charmhelpers/core/services/helpers.py | 125 +++++++ .../hooks/charmhelpers/core/templating.py | 51 +++ ceph-mon/hooks/charmhelpers/fetch/__init__.py | 45 ++- ceph-mon/tests/charmhelpers/__init__.py | 0 .../tests/charmhelpers/contrib/__init__.py | 0 .../charmhelpers/contrib/amulet/__init__.py | 0 .../charmhelpers/contrib/amulet/deployment.py | 71 ++++ .../charmhelpers/contrib/amulet/utils.py | 176 ++++++++++ .../contrib/openstack/__init__.py | 0 .../contrib/openstack/amulet/__init__.py | 0 .../contrib/openstack/amulet/deployment.py | 61 ++++ .../contrib/openstack/amulet/utils.py | 275 ++++++++++++++++ 18 files changed, 1170 insertions(+), 7 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/core/services/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/core/services/base.py create mode 100644 ceph-mon/hooks/charmhelpers/core/services/helpers.py create mode 100644 ceph-mon/hooks/charmhelpers/core/templating.py create mode 100644 ceph-mon/tests/charmhelpers/__init__.py create mode 100644 ceph-mon/tests/charmhelpers/contrib/__init__.py create mode 100644 ceph-mon/tests/charmhelpers/contrib/amulet/__init__.py create mode 100644 ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py create mode 100644 ceph-mon/tests/charmhelpers/contrib/amulet/utils.py create mode 100644 ceph-mon/tests/charmhelpers/contrib/openstack/__init__.py create mode 100644 ceph-mon/tests/charmhelpers/contrib/openstack/amulet/__init__.py create mode 100644 ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py create mode 100644 ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 0972e91a..7edbcc48 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -4,7 +4,7 @@ from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, + ERROR, log, config, ) try: @@ -154,3 +154,21 @@ def _get_for_address(address, key): get_iface_for_address = partial(_get_for_address, key='iface') get_netmask_for_address = partial(_get_for_address, key='netmask') + + +def get_ipv6_addr(iface="eth0"): + try: + iface_addrs = netifaces.ifaddresses(iface) + if netifaces.AF_INET6 not in iface_addrs: + raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) + + addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] + ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') + and config('vip') != a['addr']] + if not ipv6_addr: + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + + return ipv6_addr[0] + + except ValueError: + raise ValueError("Invalid interface '%s'" % iface) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index 8d0f6116..1b958712 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -46,5 +46,8 @@ def is_device_mounted(device): :returns: boolean: True if the path represents a mounted device, False if it doesn't. ''' + is_partition = bool(re.search(r".*[0-9]+\b", device)) out = check_output(['mount']) + if is_partition: + return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index c9530433..eb4aa092 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -285,8 +285,9 @@ def relation_get(attribute=None, unit=None, rid=None): raise -def relation_set(relation_id=None, relation_settings={}, **kwargs): +def relation_set(relation_id=None, relation_settings=None, **kwargs): """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index d934f940..ca7780df 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -12,6 +12,8 @@ import string import subprocess import hashlib +import shutil +from contextlib import contextmanager from collections import OrderedDict @@ -52,7 +54,7 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status']) + output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) except subprocess.CalledProcessError: return False else: @@ -62,6 +64,16 @@ def service_running(service): return False +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + return False + else: + return True + + def adduser(username, password=None, shell='/bin/bash', system_user=False): """Add a user to the system""" try: @@ -329,3 +341,24 @@ def cmp_pkgrevno(package, revno, pkgcache=None): pkgcache = apt_pkg.Cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@contextmanager +def chdir(d): + cur = os.getcwd() + try: + yield os.chdir(d) + finally: + os.chdir(cur) + + +def chownr(path, owner, group): + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + + for root, dirs, files in os.walk(path): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + os.chown(full, uid, gid) diff --git a/ceph-mon/hooks/charmhelpers/core/services/__init__.py b/ceph-mon/hooks/charmhelpers/core/services/__init__.py new file mode 100644 index 00000000..e8039a84 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/services/__init__.py @@ -0,0 +1,2 @@ +from .base import * +from .helpers import * diff --git a/ceph-mon/hooks/charmhelpers/core/services/base.py b/ceph-mon/hooks/charmhelpers/core/services/base.py new file mode 100644 index 00000000..6b5a1b9f --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/services/base.py @@ -0,0 +1,310 @@ +import os +import re +import json +from collections import Iterable + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = {} + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.provide_data() + self.reconfigure_services() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + """ + hook_name = hookenv.hook_name() + for service in self.services.values(): + for provider in service.get('provided_data', []): + if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): + data = provider.provide_data() + _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data + if _ready: + hookenv.relation_set(None, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py new file mode 100644 index 00000000..4b90589b --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/services/helpers.py @@ -0,0 +1,125 @@ +from charmhelpers.core import hookenv +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the interface type, to prevent + potential naming conflicts. + """ + name = None + interface = None + required_keys = [] + + def __init__(self, *args, **kwargs): + super(RelationContext, self).__init__(*args, **kwargs) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a template, for use as a ready action. + """ + def __init__(self, source, target, owner='root', group='root', perms=0444): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + context = {} + for ctx in service.get('required_data', []): + context.update(ctx) + templating.render(self.source, self.target, context, + self.owner, self.group, self.perms) + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/ceph-mon/hooks/charmhelpers/core/templating.py b/ceph-mon/hooks/charmhelpers/core/templating.py new file mode 100644 index 00000000..2c638853 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/templating.py @@ -0,0 +1,51 @@ +import os + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + Note: Using this requires python-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + loader = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = loader.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + host.mkdir(os.path.dirname(target)) + host.write_file(target, content, owner, group, perms) diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 5be512ce..61633d8c 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,5 @@ import importlib +from tempfile import NamedTemporaryFile import time from yaml import safe_load from charmhelpers.core.host import ( @@ -122,6 +123,7 @@ def filter_installed_packages(packages): # Tell apt to build an in-memory cache to prevent race conditions (if # another process is already building the cache). apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") cache = apt_pkg.Cache() _pkgs = [] @@ -201,6 +203,27 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples: + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + """ if source is None: log('Source is not present. Skipping') return @@ -225,10 +248,23 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + else: + raise SourceConfigError("Unknown source: {!r}".format(source)) + if key: - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile() as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) + else: + # Note that hkp: is in no way a secure protocol. Using a + # GPG key id is pointless from a security POV unless you + # absolutely trust your network and DNS. + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv', + key]) def configure_sources(update=False, @@ -238,7 +274,8 @@ def configure_sources(update=False, Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. + The frament needs to be included as a string. Sources and their + corresponding keys are of the types supported by add_source(). Example config: install_sources: | diff --git a/ceph-mon/tests/charmhelpers/__init__.py b/ceph-mon/tests/charmhelpers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/tests/charmhelpers/contrib/__init__.py b/ceph-mon/tests/charmhelpers/contrib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-mon/tests/charmhelpers/contrib/amulet/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py new file mode 100644 index 00000000..8c0af487 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py @@ -0,0 +1,71 @@ +import amulet + +import os + + +class AmuletDeployment(object): + """Amulet deployment. + + This class provides generic Amulet deployment and test runner + methods. + """ + + def __init__(self, series=None): + """Initialize the deployment environment.""" + self.series = None + + if series: + self.series = series + self.d = amulet.Deployment(series=self.series) + else: + self.d = amulet.Deployment() + + def _add_services(self, this_service, other_services): + """Add services. + + Add services to the deployment where this_service is the local charm + that we're focused on testing and other_services are the other + charms that come from the charm store. + """ + name, units = range(2) + + if this_service[name] != os.path.basename(os.getcwd()): + s = this_service[name] + msg = "The charm's root directory name needs to be {}".format(s) + amulet.raise_status(amulet.FAIL, msg=msg) + + self.d.add(this_service[name], units=this_service[units]) + + for svc in other_services: + if self.series: + self.d.add(svc[name], + charm='cs:{}/{}'.format(self.series, svc[name]), + units=svc[units]) + else: + self.d.add(svc[name], units=svc[units]) + + def _add_relations(self, relations): + """Add all of the relations for the services.""" + for k, v in relations.iteritems(): + self.d.relate(k, v) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in configs.iteritems(): + self.d.configure(service, config) + + def _deploy(self): + """Deploy environment and wait for all hooks to finish executing.""" + try: + self.d.setup() + self.d.sentry.wait(timeout=900) + except amulet.helpers.TimeoutError: + amulet.raise_status(amulet.FAIL, msg="Deployment timed out") + except Exception: + raise + + def run_tests(self): + """Run all of the methods that are prefixed with 'test_'.""" + for test in dir(self): + if test.startswith('test_'): + getattr(self, test)() diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py new file mode 100644 index 00000000..c843333f --- /dev/null +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -0,0 +1,176 @@ +import ConfigParser +import io +import logging +import re +import sys +import time + + +class AmuletUtils(object): + """Amulet utilities. + + This class provides common utility functions that are used by Amulet + tests. + """ + + def __init__(self, log_level=logging.ERROR): + self.log = self.get_logger(level=log_level) + + def get_logger(self, name="amulet-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def valid_ip(self, ip): + if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): + return True + else: + return False + + def valid_url(self, url): + p = re.compile( + r'^(?:http|ftp)s?://' + r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa + r'localhost|' + r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' + r'(?::\d+)?' + r'(?:/?|[/?]\S+)$', + re.IGNORECASE) + if p.match(url): + return True + else: + return False + + def validate_services(self, commands): + """Validate services. + + Verify the specified services are running on the corresponding + service units. + """ + for k, v in commands.iteritems(): + for cmd in v: + output, code = k.run(cmd) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def _get_config(self, unit, filename): + """Get a ConfigParser object for parsing a unit's config file.""" + file_contents = unit.file_contents(filename) + config = ConfigParser.ConfigParser() + config.readfp(io.StringIO(file_contents)) + return config + + def validate_config_data(self, sentry_unit, config_file, section, + expected): + """Validate config file data. + + Verify that the specified section of the config file contains + the expected option key:value pairs. + """ + config = self._get_config(sentry_unit, config_file) + + if section != 'DEFAULT' and not config.has_section(section): + return "section [{}] does not exist".format(section) + + for k in expected.keys(): + if not config.has_option(section, k): + return "section [{}] is missing option {}".format(section, k) + if config.get(section, k) != expected[k]: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, config.get(section, k), k, expected[k]) + return None + + def _validate_dict_data(self, expected, actual): + """Validate dictionary data. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluate a variable and returns a + bool. + """ + for k, v in expected.iteritems(): + if k in actual: + if (isinstance(v, basestring) or + isinstance(v, bool) or + isinstance(v, (int, long))): + if v != actual[k]: + return "{}:{}".format(k, actual[k]) + elif not v(actual[k]): + return "{}:{}".format(k, actual[k]) + else: + return "key '{}' does not exist".format(k) + return None + + def validate_relation_data(self, sentry_unit, relation, expected): + """Validate actual relation data based on expected relation data.""" + actual = sentry_unit.relation(relation[0], relation[1]) + self.log.debug('actual: {}'.format(repr(actual))) + return self._validate_dict_data(expected, actual) + + def _validate_list_data(self, expected, actual): + """Compare expected list vs actual list data.""" + for e in expected: + if e not in actual: + return "expected item {} not found in actual list".format(e) + return None + + def not_null(self, string): + if string is not None: + return True + else: + return False + + def _get_file_mtime(self, sentry_unit, filename): + """Get last modification time of file.""" + return sentry_unit.file_stat(filename)['mtime'] + + def _get_dir_mtime(self, sentry_unit, directory): + """Get last modification time of directory.""" + return sentry_unit.directory_stat(directory)['mtime'] + + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): + """Get process' start time. + + Determine start time of the process based on the last modification + time of the /proc/pid directory. If pgrep_full is True, the process + name is matched against the full command line. + """ + if pgrep_full: + cmd = 'pgrep -o -f {}'.format(service) + else: + cmd = 'pgrep -o {}'.format(service) + proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) + + def service_restarted(self, sentry_unit, service, filename, + pgrep_full=False, sleep_time=20): + """Check if service was restarted. + + Compare a service's start time vs a file's last modification time + (such as a config file for that service) to determine if the service + has been restarted. + """ + time.sleep(sleep_time) + if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= + self._get_file_mtime(sentry_unit, filename)): + return True + else: + return False + + def relation_error(self, name, data): + return 'unexpected relation data in {} - {}'.format(name, data) + + def endpoint_error(self, name, data): + return 'unexpected endpoint data in {} - {}'.format(name, data) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-mon/tests/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 00000000..9179eeb1 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,61 @@ +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.openstack = openstack + self.source = source + + def _add_services(self, this_service, other_services): + """Add services to the deployment and set openstack-origin.""" + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + name = 0 + services = other_services + services.append(this_service) + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] + + if self.openstack: + for svc in services: + if svc[name] not in use_source: + config = {'openstack-origin': self.openstack} + self.d.configure(svc[name], config) + + if self.source: + for svc in services: + if svc[name] in use_source: + config = {'source': self.source} + self.d.configure(svc[name], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in configs.iteritems(): + self.d.configure(service, config) + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + (self.precise_essex, self.precise_folsom, self.precise_grizzly, + self.precise_havana, self.precise_icehouse, + self.trusty_icehouse) = range(6) + releases = { + ('precise', None): self.precise_essex, + ('precise', 'cloud:precise-folsom'): self.precise_folsom, + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, + ('precise', 'cloud:precise-havana'): self.precise_havana, + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, + ('trusty', None): self.trusty_icehouse} + return releases[(self.series, self.openstack)] diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 00000000..bd327bdc --- /dev/null +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,275 @@ +import logging +import os +import time +import urllib + +import glanceclient.v1.client as glance_client +import keystoneclient.v2_0 as keystone_client +import novaclient.v1_1.client as nova_client + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in expected.iteritems(): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'tenantId': act.tenantId, + 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + return tenant in [t.name for t in keystone.tenants.list()] + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant): + """Authenticates admin user with the keystone admin endpoint.""" + unit = keystone_sentry + service_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + ep = keystone.service_catalog.url_for(service_type='image', + endpoint_type='adminURL') + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return nova_client.Client(username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance.""" + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open("http://download.cirros-cloud.net/version/released") + version = f.read().strip() + cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) + + if not os.path.exists(cirros_img): + cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + version, cirros_img) + opener.retrieve(cirros_url, cirros_img) + f.close() + + with open(cirros_img) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + count = 1 + status = image.status + while status != 'active' and count < 10: + time.sleep(3) + image = glance.images.get(image.id) + status = image.status + self.log.debug('image status: {}'.format(status)) + count += 1 + + if status != 'active': + self.log.error('image creation timed out') + return None + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + num_before = len(list(glance.images.list())) + glance.images.delete(image) + + count = 1 + num_after = len(list(glance.images.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(glance.images.list())) + self.log.debug('number of images: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('image deletion timed out') + return False + + return True + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + image = nova.images.find(name=image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + num_before = len(list(nova.servers.list())) + nova.servers.delete(instance) + + count = 1 + num_after = len(list(nova.servers.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(nova.servers.list())) + self.log.debug('number of instances: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('instance deletion timed out') + return False + + return True From 5fbfb6ffeab5622ba6a9444f94bb72a36aa5cb32 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 26 Aug 2014 02:06:25 +0000 Subject: [PATCH 0408/2699] Add Amulet basic tests --- ceph-proxy/Makefile | 9 +- ceph-proxy/tests/00-setup | 8 + ceph-proxy/tests/12-basic-precise-grizzly | 11 + ceph-proxy/tests/13-basic-precise-havana | 11 + ceph-proxy/tests/14-basic-precise-icehouse | 11 + ceph-proxy/tests/15-basic-trusty-icehouse | 9 + ceph-proxy/tests/README | 47 ++++ ceph-proxy/tests/basic_deployment.py | 302 +++++++++++++++++++++ 8 files changed, 407 insertions(+), 1 deletion(-) create mode 100755 ceph-proxy/tests/00-setup create mode 100755 ceph-proxy/tests/12-basic-precise-grizzly create mode 100755 ceph-proxy/tests/13-basic-precise-havana create mode 100755 ceph-proxy/tests/14-basic-precise-icehouse create mode 100755 ceph-proxy/tests/15-basic-trusty-icehouse create mode 100644 ceph-proxy/tests/README create mode 100644 ceph-proxy/tests/basic_deployment.py diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index a78fc6d2..b57c4e08 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -2,9 +2,16 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers hooks + @flake8 --exclude hooks/charmhelpers hooks tests @charm proof +test: + @echo Starting Amulet tests... + # coreycb note: The -v should only be temporary until Amulet sends + # raise_status() messages to stderr: + # https://bugs.launchpad.net/amulet/+bug/1320357 + @juju test -v -p AMULET_HTTP_PROXY + bin/charm_helpers_sync.py: @mkdir -p bin @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ diff --git a/ceph-proxy/tests/00-setup b/ceph-proxy/tests/00-setup new file mode 100755 index 00000000..62f40029 --- /dev/null +++ b/ceph-proxy/tests/00-setup @@ -0,0 +1,8 @@ +#!/bin/bash + +set -ex + +sudo add-apt-repository --yes ppa:juju/stable +sudo apt-get update --yes +sudo apt-get install --yes python-amulet +sudo apt-get install --yes python-keystoneclient diff --git a/ceph-proxy/tests/12-basic-precise-grizzly b/ceph-proxy/tests/12-basic-precise-grizzly new file mode 100755 index 00000000..0fa08342 --- /dev/null +++ b/ceph-proxy/tests/12-basic-precise-grizzly @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on precise-grizzly.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='precise', + openstack='cloud:precise-grizzly', + source='cloud:precise-updates/grizzly') + deployment.run_tests() diff --git a/ceph-proxy/tests/13-basic-precise-havana b/ceph-proxy/tests/13-basic-precise-havana new file mode 100755 index 00000000..8a299afc --- /dev/null +++ b/ceph-proxy/tests/13-basic-precise-havana @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on precise-havana.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='precise', + openstack='cloud:precise-havana', + source='cloud:precise-updates/havana') + deployment.run_tests() diff --git a/ceph-proxy/tests/14-basic-precise-icehouse b/ceph-proxy/tests/14-basic-precise-icehouse new file mode 100755 index 00000000..020cd751 --- /dev/null +++ b/ceph-proxy/tests/14-basic-precise-icehouse @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on precise-icehouse.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='precise', + openstack='cloud:precise-icehouse', + source='cloud:precise-updates/icehouse') + deployment.run_tests() diff --git a/ceph-proxy/tests/15-basic-trusty-icehouse b/ceph-proxy/tests/15-basic-trusty-icehouse new file mode 100755 index 00000000..f67fea91 --- /dev/null +++ b/ceph-proxy/tests/15-basic-trusty-icehouse @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on trusty-icehouse.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='trusty') + deployment.run_tests() diff --git a/ceph-proxy/tests/README b/ceph-proxy/tests/README new file mode 100644 index 00000000..6eb04415 --- /dev/null +++ b/ceph-proxy/tests/README @@ -0,0 +1,47 @@ +This directory provides Amulet tests that focus on verification of ceph +deployments. + +If you use a web proxy server to access the web, you'll need to set the +AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. + +The following examples demonstrate different ways that tests can be executed. +All examples are run from the charm's root directory. + + * To run all tests (starting with 00-setup): + + make test + + * To run a specific test module (or modules): + + juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To run a specific test module (or modules), and keep the environment + deployed after a failure: + + juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To re-run a test module against an already deployed environment (one + that was deployed by a previous call to 'juju test --set-e'): + + ./tests/15-basic-trusty-icehouse + +For debugging and test development purposes, all code should be idempotent. +In other words, the code should have the ability to be re-run without changing +the results beyond the initial run. This enables editing and re-running of a +test module against an already deployed environment, as described above. + +Manual debugging tips: + + * Set the following env vars before using the OpenStack CLI as admin: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=admin + export OS_USERNAME=admin + export OS_PASSWORD=openstack + export OS_REGION_NAME=RegionOne + + * Set the following env vars before using the OpenStack CLI as demoUser: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=demoTenant + export OS_USERNAME=demoUser + export OS_PASSWORD=password + export OS_REGION_NAME=RegionOne diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py new file mode 100644 index 00000000..8529a10e --- /dev/null +++ b/ceph-proxy/tests/basic_deployment.py @@ -0,0 +1,302 @@ +#!/usr/bin/python + +import amulet +from charmhelpers.contrib.openstack.amulet.deployment import ( + OpenStackAmuletDeployment +) +from charmhelpers.contrib.openstack.amulet.utils import ( # noqa + OpenStackAmuletUtils, + DEBUG, + ERROR +) + +# Use DEBUG to turn on debug logging +u = OpenStackAmuletUtils(ERROR) + + +class CephBasicDeployment(OpenStackAmuletDeployment): + """Amulet tests on a basic ceph deployment.""" + + def __init__(self, series=None, openstack=None, source=None): + """Deploy the entire test environment.""" + super(CephBasicDeployment, self).__init__(series, openstack, source) + self._add_services() + self._add_relations() + self._configure_services() + self._deploy() + self._initialize_tests() + + def _add_services(self): + """Add services + + Add the services that we're testing, including the number of units, + where ceph is local, and mysql and cinder are from the charm + store. + """ + this_service = ('ceph', 3) + other_services = [('mysql', 1), ('keystone', 1), + ('rabbitmq-server', 1), ('nova-compute', 1), + ('glance', 1), ('cinder', 1)] + super(CephBasicDeployment, self)._add_services(this_service, + other_services) + + def _add_relations(self): + """Add all of the relations for the services.""" + relations = { + 'nova-compute:shared-db': 'mysql:shared-db', + 'nova-compute:amqp': 'rabbitmq-server:amqp', + 'nova-compute:image-service': 'glance:image-service', + 'nova-compute:ceph': 'ceph:client', + 'keystone:shared-db': 'mysql:shared-db', + 'glance:shared-db': 'mysql:shared-db', + 'glance:identity-service': 'keystone:identity-service', + 'glance:amqp': 'rabbitmq-server:amqp', + 'glance:ceph': 'ceph:client', + 'cinder:shared-db': 'mysql:shared-db', + 'cinder:identity-service': 'keystone:identity-service', + 'cinder:amqp': 'rabbitmq-server:amqp', + 'cinder:image-service': 'glance:image-service', + 'cinder:ceph': 'ceph:client' + } + super(CephBasicDeployment, self)._add_relations(relations) + + def _configure_services(self): + """Configure all of the services.""" + keystone_config = {'admin-password': 'openstack', + 'admin-token': 'ubuntutesting'} + mysql_config = {'dataset-size': '50%'} + cinder_config = {'block-device': 'None', 'glance-api-version': '2'} + ceph_config = { + 'monitor-count': '3', + 'auth-supported': 'none', + 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', + 'osd-reformat': 'yes', + 'ephemeral-unmount': '/mnt' + } + if self._get_openstack_release() >= self.precise_grizzly: + ceph_config['osd-devices'] = '/dev/vdb /srv/ceph' + else: + ceph_config['osd-devices'] = '/dev/vdb' + + configs = {'keystone': keystone_config, + 'mysql': mysql_config, + 'cinder': cinder_config, + 'ceph': ceph_config} + super(CephBasicDeployment, self)._configure_services(configs) + + def _initialize_tests(self): + """Perform final initialization before tests get run.""" + # Access the sentries for inspecting service units + self.mysql_sentry = self.d.sentry.unit['mysql/0'] + self.keystone_sentry = self.d.sentry.unit['keystone/0'] + self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] + self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0'] + self.glance_sentry = self.d.sentry.unit['glance/0'] + self.cinder_sentry = self.d.sentry.unit['cinder/0'] + self.ceph0_sentry = self.d.sentry.unit['ceph/0'] + self.ceph1_sentry = self.d.sentry.unit['ceph/1'] + self.ceph2_sentry = self.d.sentry.unit['ceph/2'] + + # Authenticate admin with keystone + self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, + user='admin', + password='openstack', + tenant='admin') + + # Authenticate admin with glance endpoint + self.glance = u.authenticate_glance_admin(self.keystone) + + # Create a demo tenant/role/user + self.demo_tenant = 'demoTenant' + self.demo_role = 'demoRole' + self.demo_user = 'demoUser' + if not u.tenant_exists(self.keystone, self.demo_tenant): + tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, + description='demo tenant', + enabled=True) + self.keystone.roles.create(name=self.demo_role) + self.keystone.users.create(name=self.demo_user, + password='password', + tenant_id=tenant.id, + email='demo@demo.com') + + # Authenticate demo user with keystone + self.keystone_demo = u.authenticate_keystone_user(self.keystone, + self.demo_user, + 'password', + self.demo_tenant) + + # Authenticate demo user with nova-api + self.nova_demo = u.authenticate_nova_user(self.keystone, + self.demo_user, + 'password', + self.demo_tenant) + + def _ceph_osd_id(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa + + def test_services(self): + """Verify the expected services are running on the service units.""" + ceph_services = ['status ceph-mon-all', + 'status ceph-mon id=`hostname`'] + commands = { + self.mysql_sentry: ['status mysql'], + self.rabbitmq_sentry: ['sudo service rabbitmq-server status'], + self.nova_compute_sentry: ['status nova-compute'], + self.keystone_sentry: ['status keystone'], + self.glance_sentry: ['status glance-registry', + 'status glance-api'], + self.cinder_sentry: ['status cinder-api', + 'status cinder-scheduler', + 'status cinder-volume'] + } + if self._get_openstack_release() >= self.precise_grizzly: + ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0)) + ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1)) + ceph_services.extend([ceph_osd0, ceph_osd1, 'status ceph-osd-all']) + commands[self.ceph0_sentry] = ceph_services + commands[self.ceph1_sentry] = ceph_services + commands[self.ceph2_sentry] = ceph_services + else: + ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0)) + ceph_services.append(ceph_osd0) + commands[self.ceph0_sentry] = ceph_services + commands[self.ceph1_sentry] = ceph_services + commands[self.ceph2_sentry] = ceph_services + + ret = u.validate_services(commands) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_ceph_nova_client_relation(self): + """Verify the ceph to nova ceph-client relation data.""" + unit = self.ceph0_sentry + relation = ['client', 'nova-compute:ceph'] + expected = { + 'private-address': u.valid_ip, + 'auth': 'none', + 'key': u.not_null + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph to nova ceph-client', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_nova_ceph_client_relation(self): + """Verify the nova to ceph ceph-client relation data.""" + unit = self.nova_compute_sentry + relation = ['ceph', 'ceph:client'] + expected = { + 'private-address': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('nova to ceph ceph-client', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph_glance_client_relation(self): + """Verify the ceph to glance ceph-client relation data.""" + unit = self.ceph1_sentry + relation = ['client', 'glance:ceph'] + expected = { + 'private-address': u.valid_ip, + 'auth': 'none', + 'key': u.not_null + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph to glance ceph-client', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_glance_ceph_client_relation(self): + """Verify the glance to ceph ceph-client relation data.""" + unit = self.glance_sentry + relation = ['ceph', 'ceph:client'] + expected = { + 'private-address': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('glance to ceph ceph-client', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph_cinder_client_relation(self): + """Verify the ceph to cinder ceph-client relation data.""" + unit = self.ceph2_sentry + relation = ['client', 'cinder:ceph'] + expected = { + 'private-address': u.valid_ip, + 'auth': 'none', + 'key': u.not_null + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph to cinder ceph-client', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_cinder_ceph_client_relation(self): + """Verify the cinder to ceph ceph-client relation data.""" + unit = self.cinder_sentry + relation = ['ceph', 'ceph:client'] + expected = { + 'private-address': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('cinder to ceph ceph-client', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph_config(self): + """Verify the data in the ceph config file.""" + unit = self.ceph0_sentry + conf = '/etc/ceph/ceph.conf' + expected = { + 'global': { + 'keyring': '/etc/ceph/$cluster.$name.keyring', + 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + 'log to syslog': 'false', + 'err to syslog': 'false', + 'clog to syslog': 'false', + 'mon cluster log to syslog': 'false' + }, + 'mon': { + 'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring' + }, + 'mds': { + 'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring' + }, + 'osd': { + 'keyring': '/var/lib/ceph/osd/$cluster-$id/keyring', + 'osd journal size': '1024', + 'filestore xattr use omap': 'true' + }, + } + if self._get_openstack_release() >= self.precise_grizzly: + expected['global']['auth cluster required'] = 'none' + expected['global']['auth service required'] = 'none' + expected['global']['auth client required'] = 'none' + else: + expected['global']['auth supported'] = 'none' + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "ceph config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_restart_on_config_change(self): + """Verify the specified services are restarted on config change.""" + # NOTE(coreycb): Test not implemented but should it be? ceph services + # aren't restarted by charm after config change. Should + # they be restarted? + if self._get_openstack_release() >= self.precise_essex: + u.log.error("Test not implemented") + return From 9e4b0968b39673c6d6eba3c015b84a3c37f06d45 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 26 Aug 2014 02:06:25 +0000 Subject: [PATCH 0409/2699] Add Amulet basic tests --- ceph-mon/Makefile | 9 +- ceph-mon/tests/00-setup | 8 + ceph-mon/tests/12-basic-precise-grizzly | 11 + ceph-mon/tests/13-basic-precise-havana | 11 + ceph-mon/tests/14-basic-precise-icehouse | 11 + ceph-mon/tests/15-basic-trusty-icehouse | 9 + ceph-mon/tests/README | 47 ++++ ceph-mon/tests/basic_deployment.py | 302 +++++++++++++++++++++++ 8 files changed, 407 insertions(+), 1 deletion(-) create mode 100755 ceph-mon/tests/00-setup create mode 100755 ceph-mon/tests/12-basic-precise-grizzly create mode 100755 ceph-mon/tests/13-basic-precise-havana create mode 100755 ceph-mon/tests/14-basic-precise-icehouse create mode 100755 ceph-mon/tests/15-basic-trusty-icehouse create mode 100644 ceph-mon/tests/README create mode 100644 ceph-mon/tests/basic_deployment.py diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index a78fc6d2..b57c4e08 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -2,9 +2,16 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers hooks + @flake8 --exclude hooks/charmhelpers hooks tests @charm proof +test: + @echo Starting Amulet tests... + # coreycb note: The -v should only be temporary until Amulet sends + # raise_status() messages to stderr: + # https://bugs.launchpad.net/amulet/+bug/1320357 + @juju test -v -p AMULET_HTTP_PROXY + bin/charm_helpers_sync.py: @mkdir -p bin @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ diff --git a/ceph-mon/tests/00-setup b/ceph-mon/tests/00-setup new file mode 100755 index 00000000..62f40029 --- /dev/null +++ b/ceph-mon/tests/00-setup @@ -0,0 +1,8 @@ +#!/bin/bash + +set -ex + +sudo add-apt-repository --yes ppa:juju/stable +sudo apt-get update --yes +sudo apt-get install --yes python-amulet +sudo apt-get install --yes python-keystoneclient diff --git a/ceph-mon/tests/12-basic-precise-grizzly b/ceph-mon/tests/12-basic-precise-grizzly new file mode 100755 index 00000000..0fa08342 --- /dev/null +++ b/ceph-mon/tests/12-basic-precise-grizzly @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on precise-grizzly.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='precise', + openstack='cloud:precise-grizzly', + source='cloud:precise-updates/grizzly') + deployment.run_tests() diff --git a/ceph-mon/tests/13-basic-precise-havana b/ceph-mon/tests/13-basic-precise-havana new file mode 100755 index 00000000..8a299afc --- /dev/null +++ b/ceph-mon/tests/13-basic-precise-havana @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on precise-havana.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='precise', + openstack='cloud:precise-havana', + source='cloud:precise-updates/havana') + deployment.run_tests() diff --git a/ceph-mon/tests/14-basic-precise-icehouse b/ceph-mon/tests/14-basic-precise-icehouse new file mode 100755 index 00000000..020cd751 --- /dev/null +++ b/ceph-mon/tests/14-basic-precise-icehouse @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on precise-icehouse.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='precise', + openstack='cloud:precise-icehouse', + source='cloud:precise-updates/icehouse') + deployment.run_tests() diff --git a/ceph-mon/tests/15-basic-trusty-icehouse b/ceph-mon/tests/15-basic-trusty-icehouse new file mode 100755 index 00000000..f67fea91 --- /dev/null +++ b/ceph-mon/tests/15-basic-trusty-icehouse @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on trusty-icehouse.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='trusty') + deployment.run_tests() diff --git a/ceph-mon/tests/README b/ceph-mon/tests/README new file mode 100644 index 00000000..6eb04415 --- /dev/null +++ b/ceph-mon/tests/README @@ -0,0 +1,47 @@ +This directory provides Amulet tests that focus on verification of ceph +deployments. + +If you use a web proxy server to access the web, you'll need to set the +AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. + +The following examples demonstrate different ways that tests can be executed. +All examples are run from the charm's root directory. + + * To run all tests (starting with 00-setup): + + make test + + * To run a specific test module (or modules): + + juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To run a specific test module (or modules), and keep the environment + deployed after a failure: + + juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To re-run a test module against an already deployed environment (one + that was deployed by a previous call to 'juju test --set-e'): + + ./tests/15-basic-trusty-icehouse + +For debugging and test development purposes, all code should be idempotent. +In other words, the code should have the ability to be re-run without changing +the results beyond the initial run. This enables editing and re-running of a +test module against an already deployed environment, as described above. + +Manual debugging tips: + + * Set the following env vars before using the OpenStack CLI as admin: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=admin + export OS_USERNAME=admin + export OS_PASSWORD=openstack + export OS_REGION_NAME=RegionOne + + * Set the following env vars before using the OpenStack CLI as demoUser: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=demoTenant + export OS_USERNAME=demoUser + export OS_PASSWORD=password + export OS_REGION_NAME=RegionOne diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py new file mode 100644 index 00000000..8529a10e --- /dev/null +++ b/ceph-mon/tests/basic_deployment.py @@ -0,0 +1,302 @@ +#!/usr/bin/python + +import amulet +from charmhelpers.contrib.openstack.amulet.deployment import ( + OpenStackAmuletDeployment +) +from charmhelpers.contrib.openstack.amulet.utils import ( # noqa + OpenStackAmuletUtils, + DEBUG, + ERROR +) + +# Use DEBUG to turn on debug logging +u = OpenStackAmuletUtils(ERROR) + + +class CephBasicDeployment(OpenStackAmuletDeployment): + """Amulet tests on a basic ceph deployment.""" + + def __init__(self, series=None, openstack=None, source=None): + """Deploy the entire test environment.""" + super(CephBasicDeployment, self).__init__(series, openstack, source) + self._add_services() + self._add_relations() + self._configure_services() + self._deploy() + self._initialize_tests() + + def _add_services(self): + """Add services + + Add the services that we're testing, including the number of units, + where ceph is local, and mysql and cinder are from the charm + store. + """ + this_service = ('ceph', 3) + other_services = [('mysql', 1), ('keystone', 1), + ('rabbitmq-server', 1), ('nova-compute', 1), + ('glance', 1), ('cinder', 1)] + super(CephBasicDeployment, self)._add_services(this_service, + other_services) + + def _add_relations(self): + """Add all of the relations for the services.""" + relations = { + 'nova-compute:shared-db': 'mysql:shared-db', + 'nova-compute:amqp': 'rabbitmq-server:amqp', + 'nova-compute:image-service': 'glance:image-service', + 'nova-compute:ceph': 'ceph:client', + 'keystone:shared-db': 'mysql:shared-db', + 'glance:shared-db': 'mysql:shared-db', + 'glance:identity-service': 'keystone:identity-service', + 'glance:amqp': 'rabbitmq-server:amqp', + 'glance:ceph': 'ceph:client', + 'cinder:shared-db': 'mysql:shared-db', + 'cinder:identity-service': 'keystone:identity-service', + 'cinder:amqp': 'rabbitmq-server:amqp', + 'cinder:image-service': 'glance:image-service', + 'cinder:ceph': 'ceph:client' + } + super(CephBasicDeployment, self)._add_relations(relations) + + def _configure_services(self): + """Configure all of the services.""" + keystone_config = {'admin-password': 'openstack', + 'admin-token': 'ubuntutesting'} + mysql_config = {'dataset-size': '50%'} + cinder_config = {'block-device': 'None', 'glance-api-version': '2'} + ceph_config = { + 'monitor-count': '3', + 'auth-supported': 'none', + 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', + 'osd-reformat': 'yes', + 'ephemeral-unmount': '/mnt' + } + if self._get_openstack_release() >= self.precise_grizzly: + ceph_config['osd-devices'] = '/dev/vdb /srv/ceph' + else: + ceph_config['osd-devices'] = '/dev/vdb' + + configs = {'keystone': keystone_config, + 'mysql': mysql_config, + 'cinder': cinder_config, + 'ceph': ceph_config} + super(CephBasicDeployment, self)._configure_services(configs) + + def _initialize_tests(self): + """Perform final initialization before tests get run.""" + # Access the sentries for inspecting service units + self.mysql_sentry = self.d.sentry.unit['mysql/0'] + self.keystone_sentry = self.d.sentry.unit['keystone/0'] + self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] + self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0'] + self.glance_sentry = self.d.sentry.unit['glance/0'] + self.cinder_sentry = self.d.sentry.unit['cinder/0'] + self.ceph0_sentry = self.d.sentry.unit['ceph/0'] + self.ceph1_sentry = self.d.sentry.unit['ceph/1'] + self.ceph2_sentry = self.d.sentry.unit['ceph/2'] + + # Authenticate admin with keystone + self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, + user='admin', + password='openstack', + tenant='admin') + + # Authenticate admin with glance endpoint + self.glance = u.authenticate_glance_admin(self.keystone) + + # Create a demo tenant/role/user + self.demo_tenant = 'demoTenant' + self.demo_role = 'demoRole' + self.demo_user = 'demoUser' + if not u.tenant_exists(self.keystone, self.demo_tenant): + tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, + description='demo tenant', + enabled=True) + self.keystone.roles.create(name=self.demo_role) + self.keystone.users.create(name=self.demo_user, + password='password', + tenant_id=tenant.id, + email='demo@demo.com') + + # Authenticate demo user with keystone + self.keystone_demo = u.authenticate_keystone_user(self.keystone, + self.demo_user, + 'password', + self.demo_tenant) + + # Authenticate demo user with nova-api + self.nova_demo = u.authenticate_nova_user(self.keystone, + self.demo_user, + 'password', + self.demo_tenant) + + def _ceph_osd_id(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa + + def test_services(self): + """Verify the expected services are running on the service units.""" + ceph_services = ['status ceph-mon-all', + 'status ceph-mon id=`hostname`'] + commands = { + self.mysql_sentry: ['status mysql'], + self.rabbitmq_sentry: ['sudo service rabbitmq-server status'], + self.nova_compute_sentry: ['status nova-compute'], + self.keystone_sentry: ['status keystone'], + self.glance_sentry: ['status glance-registry', + 'status glance-api'], + self.cinder_sentry: ['status cinder-api', + 'status cinder-scheduler', + 'status cinder-volume'] + } + if self._get_openstack_release() >= self.precise_grizzly: + ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0)) + ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1)) + ceph_services.extend([ceph_osd0, ceph_osd1, 'status ceph-osd-all']) + commands[self.ceph0_sentry] = ceph_services + commands[self.ceph1_sentry] = ceph_services + commands[self.ceph2_sentry] = ceph_services + else: + ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0)) + ceph_services.append(ceph_osd0) + commands[self.ceph0_sentry] = ceph_services + commands[self.ceph1_sentry] = ceph_services + commands[self.ceph2_sentry] = ceph_services + + ret = u.validate_services(commands) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_ceph_nova_client_relation(self): + """Verify the ceph to nova ceph-client relation data.""" + unit = self.ceph0_sentry + relation = ['client', 'nova-compute:ceph'] + expected = { + 'private-address': u.valid_ip, + 'auth': 'none', + 'key': u.not_null + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph to nova ceph-client', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_nova_ceph_client_relation(self): + """Verify the nova to ceph ceph-client relation data.""" + unit = self.nova_compute_sentry + relation = ['ceph', 'ceph:client'] + expected = { + 'private-address': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('nova to ceph ceph-client', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph_glance_client_relation(self): + """Verify the ceph to glance ceph-client relation data.""" + unit = self.ceph1_sentry + relation = ['client', 'glance:ceph'] + expected = { + 'private-address': u.valid_ip, + 'auth': 'none', + 'key': u.not_null + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph to glance ceph-client', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_glance_ceph_client_relation(self): + """Verify the glance to ceph ceph-client relation data.""" + unit = self.glance_sentry + relation = ['ceph', 'ceph:client'] + expected = { + 'private-address': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('glance to ceph ceph-client', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph_cinder_client_relation(self): + """Verify the ceph to cinder ceph-client relation data.""" + unit = self.ceph2_sentry + relation = ['client', 'cinder:ceph'] + expected = { + 'private-address': u.valid_ip, + 'auth': 'none', + 'key': u.not_null + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph to cinder ceph-client', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_cinder_ceph_client_relation(self): + """Verify the cinder to ceph ceph-client relation data.""" + unit = self.cinder_sentry + relation = ['ceph', 'ceph:client'] + expected = { + 'private-address': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('cinder to ceph ceph-client', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph_config(self): + """Verify the data in the ceph config file.""" + unit = self.ceph0_sentry + conf = '/etc/ceph/ceph.conf' + expected = { + 'global': { + 'keyring': '/etc/ceph/$cluster.$name.keyring', + 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + 'log to syslog': 'false', + 'err to syslog': 'false', + 'clog to syslog': 'false', + 'mon cluster log to syslog': 'false' + }, + 'mon': { + 'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring' + }, + 'mds': { + 'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring' + }, + 'osd': { + 'keyring': '/var/lib/ceph/osd/$cluster-$id/keyring', + 'osd journal size': '1024', + 'filestore xattr use omap': 'true' + }, + } + if self._get_openstack_release() >= self.precise_grizzly: + expected['global']['auth cluster required'] = 'none' + expected['global']['auth service required'] = 'none' + expected['global']['auth client required'] = 'none' + else: + expected['global']['auth supported'] = 'none' + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "ceph config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_restart_on_config_change(self): + """Verify the specified services are restarted on config change.""" + # NOTE(coreycb): Test not implemented but should it be? ceph services + # aren't restarted by charm after config change. Should + # they be restarted? + if self._get_openstack_release() >= self.precise_essex: + u.log.error("Test not implemented") + return From acc468cefad5bb0b17785b8e9526b509541e41fe Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 26 Aug 2014 02:06:58 +0000 Subject: [PATCH 0410/2699] Remove leading whitespace from templates/ceph.conf (ConfigParser can't parse) --- ceph-proxy/templates/ceph.conf | 36 +++++++++++++++++----------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index 4ef0c66e..3b0d91f1 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -1,35 +1,35 @@ [global] {% if old_auth %} - auth supported = {{ auth_supported }} +auth supported = {{ auth_supported }} {% else %} - auth cluster required = {{ auth_supported }} - auth service required = {{ auth_supported }} - auth client required = {{ auth_supported }} +auth cluster required = {{ auth_supported }} +auth service required = {{ auth_supported }} +auth client required = {{ auth_supported }} {% endif %} - keyring = /etc/ceph/$cluster.$name.keyring - mon host = {{ mon_hosts }} - fsid = {{ fsid }} +keyring = /etc/ceph/$cluster.$name.keyring +mon host = {{ mon_hosts }} +fsid = {{ fsid }} - log to syslog = {{ use_syslog }} - err to syslog = {{ use_syslog }} - clog to syslog = {{ use_syslog }} - mon cluster log to syslog = {{ use_syslog }} +log to syslog = {{ use_syslog }} +err to syslog = {{ use_syslog }} +clog to syslog = {{ use_syslog }} +mon cluster log to syslog = {{ use_syslog }} {%- if ceph_public_network is string %} - public network = {{ ceph_public_network }} +public network = {{ ceph_public_network }} {%- endif %} {%- if ceph_cluster_network is string %} - cluster network = {{ ceph_cluster_network }} +cluster network = {{ ceph_cluster_network }} {%- endif %} [mon] - keyring = /var/lib/ceph/mon/$cluster-$id/keyring +keyring = /var/lib/ceph/mon/$cluster-$id/keyring [mds] - keyring = /var/lib/ceph/mds/$cluster-$id/keyring +keyring = /var/lib/ceph/mds/$cluster-$id/keyring [osd] - keyring = /var/lib/ceph/osd/$cluster-$id/keyring - osd journal size = {{ osd_journal_size }} - filestore xattr use omap = true +keyring = /var/lib/ceph/osd/$cluster-$id/keyring +osd journal size = {{ osd_journal_size }} +filestore xattr use omap = true From bde69fd965afe4325c1c822f8b0d3a4c05bec8d3 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 26 Aug 2014 02:06:58 +0000 Subject: [PATCH 0411/2699] Remove leading whitespace from templates/ceph.conf (ConfigParser can't parse) --- ceph-mon/templates/ceph.conf | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 4ef0c66e..3b0d91f1 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -1,35 +1,35 @@ [global] {% if old_auth %} - auth supported = {{ auth_supported }} +auth supported = {{ auth_supported }} {% else %} - auth cluster required = {{ auth_supported }} - auth service required = {{ auth_supported }} - auth client required = {{ auth_supported }} +auth cluster required = {{ auth_supported }} +auth service required = {{ auth_supported }} +auth client required = {{ auth_supported }} {% endif %} - keyring = /etc/ceph/$cluster.$name.keyring - mon host = {{ mon_hosts }} - fsid = {{ fsid }} +keyring = /etc/ceph/$cluster.$name.keyring +mon host = {{ mon_hosts }} +fsid = {{ fsid }} - log to syslog = {{ use_syslog }} - err to syslog = {{ use_syslog }} - clog to syslog = {{ use_syslog }} - mon cluster log to syslog = {{ use_syslog }} +log to syslog = {{ use_syslog }} +err to syslog = {{ use_syslog }} +clog to syslog = {{ use_syslog }} +mon cluster log to syslog = {{ use_syslog }} {%- if ceph_public_network is string %} - public network = {{ ceph_public_network }} +public network = {{ ceph_public_network }} {%- endif %} {%- if ceph_cluster_network is string %} - cluster network = {{ ceph_cluster_network }} +cluster network = {{ ceph_cluster_network }} {%- endif %} [mon] - keyring = /var/lib/ceph/mon/$cluster-$id/keyring +keyring = /var/lib/ceph/mon/$cluster-$id/keyring [mds] - keyring = /var/lib/ceph/mds/$cluster-$id/keyring +keyring = /var/lib/ceph/mds/$cluster-$id/keyring [osd] - keyring = /var/lib/ceph/osd/$cluster-$id/keyring - osd journal size = {{ osd_journal_size }} - filestore xattr use omap = true +keyring = /var/lib/ceph/osd/$cluster-$id/keyring +osd journal size = {{ osd_journal_size }} +filestore xattr use omap = true From 89c41f36817ae6a422e39d191cd0a711d56dc900 Mon Sep 17 00:00:00 2001 From: Yaguang Tang Date: Thu, 28 Aug 2014 18:50:56 +0800 Subject: [PATCH 0412/2699] Get keystone endpoint auth protocol from keystone --- ceph-radosgw/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 41b3641f..ec3e1269 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -162,7 +162,7 @@ def get_keystone_conf(): for unit in related_units(relid): ks_auth = { 'auth_type': 'keystone', - 'auth_protocol': 'http', + 'auth_protocol': relation_get('auth_protocol', unit, relid), 'auth_host': relation_get('auth_host', unit, relid), 'auth_port': relation_get('auth_port', unit, relid), 'admin_token': relation_get('admin_token', unit, relid), From 69a94e91c32af7cd0c3c17eea2ef09c226e26167 Mon Sep 17 00:00:00 2001 From: Christopher Glass Date: Fri, 12 Sep 2014 14:12:47 +0200 Subject: [PATCH 0413/2699] Fixed typo in log message. --- ceph-osd/hooks/ceph.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 7b297323..ad2a2d51 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -355,9 +355,9 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, subprocess.check_call(cmd) except subprocess.CalledProcessError as e: if ignore_errors: - log('Enable to initialize device: {}'.format(dev), WARNING) + log('Unable to initialize device: {}'.format(dev), WARNING) else: - log('Enable to initialize device: {}'.format(dev), ERROR) + log('Unable to initialize device: {}'.format(dev), ERROR) raise e From d79ff15393809ebbee7515a9ca3bafaad9e83e98 Mon Sep 17 00:00:00 2001 From: Christopher Glass Date: Fri, 12 Sep 2014 14:13:36 +0200 Subject: [PATCH 0414/2699] Fixed typo in log message. --- ceph-proxy/hooks/ceph.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index bd34a8c3..b0d7be17 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -369,9 +369,9 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, subprocess.check_call(cmd) except subprocess.CalledProcessError as e: if ignore_errors: - log('Enable to initialize device: {}'.format(dev), WARNING) + log('Unable to initialize device: {}'.format(dev), WARNING) else: - log('Enable to initialize device: {}'.format(dev), ERROR) + log('Unable to initialize device: {}'.format(dev), ERROR) raise e From 0df0cc87f04e06ba83a0f5e468969933f671d2ff Mon Sep 17 00:00:00 2001 From: Christopher Glass Date: Fri, 12 Sep 2014 14:13:36 +0200 Subject: [PATCH 0415/2699] Fixed typo in log message. --- ceph-mon/hooks/ceph.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index bd34a8c3..b0d7be17 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -369,9 +369,9 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, subprocess.check_call(cmd) except subprocess.CalledProcessError as e: if ignore_errors: - log('Enable to initialize device: {}'.format(dev), WARNING) + log('Unable to initialize device: {}'.format(dev), WARNING) else: - log('Enable to initialize device: {}'.format(dev), ERROR) + log('Unable to initialize device: {}'.format(dev), ERROR) raise e From 776403f990bc6f532a69917e5039f3e941061bf6 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 15 Sep 2014 11:50:24 +0100 Subject: [PATCH 0416/2699] [hopem,r=] Add config option to allow apache2 and libapache-mod-fastcgi to be installed from ceph.com. This enables support for patched packages contanining http 100-continue support. --- ceph-radosgw/config.yaml | 10 ++++++++++ ceph-radosgw/hooks/hooks.py | 32 ++++++++++++++++++++++++++++---- 2 files changed, 38 insertions(+), 4 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 0cb65e7e..ca1a8f95 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -46,3 +46,13 @@ options: default: False description: | If set to True, supporting services will log to syslog. + use-ceph-optimised-packages: + type: boolean + default: false + description: | + By default apache2 and libapache2-mod-fastcgi will be installed from the + Ubuntu archives. This option allows for an alternate ceph.com install + source which contains patched versions with added support for HTTP + 100-continue. See the following page for more info: + + http://ceph.com/docs/dumpling/radosgw/manual-install/#continue-support diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 41b3641f..716f2724 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -30,6 +30,7 @@ apt_install, add_source, ) +from charmhelpers.core.host import lsb_release from utils import ( render_template, get_host_ip, @@ -52,16 +53,38 @@ def install_www_scripts(): NSS_DIR = '/var/lib/ceph/nss' -@hooks.hook('install') -def install(): - execd_preinstall() - enable_pocket('multiverse') +def install_ceph_optimised_packages(): + """Inktank provides patched/optimised packages for HTTP 100-continue support + that does has not yet been ported to upstream. These can optionally be + installed from ceph.com archives. + """ + prolog = "http://gitbuilder.ceph.com/" + epilog = "-x86_64-basic/ref/master" + rel = lsb_release()['DISTRIB_CODENAME'] + fastcgi_source = "%slibapache-mod-fastcgi-deb-%s%s" % (prolog, rel, epilog) + apache_source = "%sapache2-deb-%s%s" % (prolog, rel, epilog) + + for source in [fastcgi_source, apache_source]: + add_source(source, key='6EAEAE2203C3951A') + + +def install_packages(): add_source(config('source'), config('key')) + if config('use-ceph-optimised-packages'): + install_ceph_optimised_packages() + apt_update(fatal=True) apt_install(['radosgw', 'libapache2-mod-fastcgi', 'apache2', 'ntp'], fatal=True) + + +@hooks.hook('install') +def install(): + execd_preinstall() + enable_pocket('multiverse') + install_packages() os.makedirs(NSS_DIR) @@ -121,6 +144,7 @@ def apache_reload(): @hooks.hook('upgrade-charm', 'config-changed') def config_changed(): + install_packages() emit_cephconf() emit_apacheconf() install_www_scripts() From c566f6ca6d1fefeccbc63b0d0523796fb5fb69ef Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 15 Sep 2014 11:53:30 +0100 Subject: [PATCH 0417/2699] fixed lint issue --- ceph-radosgw/hooks/hooks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 716f2724..5eaeea0c 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -54,9 +54,9 @@ def install_www_scripts(): def install_ceph_optimised_packages(): - """Inktank provides patched/optimised packages for HTTP 100-continue support - that does has not yet been ported to upstream. These can optionally be - installed from ceph.com archives. + """Inktank provides patched/optimised packages for HTTP 100-continue + support that does has not yet been ported to upstream. These can + optionally be installed from ceph.com archives. """ prolog = "http://gitbuilder.ceph.com/" epilog = "-x86_64-basic/ref/master" From 9e30461364bf91e9a0193934b19e94a922420add Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 17 Sep 2014 15:11:53 +0100 Subject: [PATCH 0418/2699] Sync charm-helpers to switch to using in memory apt cache --- ceph-radosgw/.bzrignore | 1 + ceph-radosgw/Makefile | 10 +- .../contrib/storage/linux/utils.py | 3 + .../hooks/charmhelpers/core/hookenv.py | 55 ++- ceph-radosgw/hooks/charmhelpers/core/host.py | 43 ++- .../charmhelpers/core/services/__init__.py | 2 + .../hooks/charmhelpers/core/services/base.py | 313 ++++++++++++++++++ .../charmhelpers/core/services/helpers.py | 125 +++++++ .../hooks/charmhelpers/core/templating.py | 51 +++ .../hooks/charmhelpers/fetch/__init__.py | 63 +++- .../hooks/charmhelpers/fetch/archiveurl.py | 40 +++ 11 files changed, 670 insertions(+), 36 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/core/services/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/services/base.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/services/helpers.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/templating.py diff --git a/ceph-radosgw/.bzrignore b/ceph-radosgw/.bzrignore index a9af2130..0879cd47 100644 --- a/ceph-radosgw/.bzrignore +++ b/ceph-radosgw/.bzrignore @@ -1,2 +1,3 @@ .project .pydevproject +bin diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index 8b8b72d0..bb4e7b58 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -1,11 +1,17 @@ #!/usr/bin/make +PYTHON := /usr/bin/env python lint: @flake8 --exclude hooks/charmhelpers hooks @charm proof -sync: - @charm-helper-sync -c charm-helpers-sync.yaml +bin/charm_helpers_sync.py: + @mkdir -p bin + @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ + > bin/charm_helpers_sync.py + +sync: bin/charm_helpers_sync.py + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml publish: lint bzr push lp:charms/ceph-radosgw diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py index 8d0f6116..1b958712 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -46,5 +46,8 @@ def is_device_mounted(device): :returns: boolean: True if the path represents a mounted device, False if it doesn't. ''' + is_partition = bool(re.search(r".*[0-9]+\b", device)) out = check_output(['mount']) + if is_partition: + return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index c9530433..324987e6 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -156,12 +156,15 @@ def hook_name(): class Config(dict): - """A Juju charm config dictionary that can write itself to - disk (as json) and track which values have changed since - the previous hook invocation. + """A dictionary representation of the charm's config.yaml, with some + extra features: - Do not instantiate this object directly - instead call - ``hookenv.config()`` + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. Example usage:: @@ -170,8 +173,8 @@ class Config(dict): >>> config = hookenv.config() >>> config['foo'] 'bar' + >>> # store a new key/value for later use >>> config['mykey'] = 'myval' - >>> config.save() >>> # user runs `juju set mycharm foo=baz` @@ -188,22 +191,34 @@ class Config(dict): >>> # keys/values that we add are preserved across hooks >>> config['mykey'] 'myval' - >>> # don't forget to save at the end of hook! - >>> config.save() """ CONFIG_FILE_NAME = '.juju-persistent-config' def __init__(self, *args, **kw): super(Config, self).__init__(*args, **kw) + self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() + def __getitem__(self, key): + """For regular dict lookups, check the current juju config first, + then the previous (saved) copy. This ensures that user-saved values + will be returned by a dict lookup. + + """ + try: + return dict.__getitem__(self, key) + except KeyError: + return (self._prev_dict or {})[key] + def load_previous(self, path=None): - """Load previous copy of config from disk so that current values - can be compared to previous values. + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. :param path: @@ -218,8 +233,8 @@ def load_previous(self, path=None): self._prev_dict = json.load(f) def changed(self, key): - """Return true if the value for this key has changed since - the last save. + """Return True if the current value for this key is different from + the previous value. """ if self._prev_dict is None: @@ -228,7 +243,7 @@ def changed(self, key): def previous(self, key): """Return previous value for this key, or None if there - is no "previous" value. + is no previous value. """ if self._prev_dict: @@ -238,7 +253,13 @@ def previous(self, key): def save(self): """Save this config to disk. - Preserves items in _prev_dict that do not exist in self. + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. """ if self._prev_dict: @@ -285,8 +306,9 @@ def relation_get(attribute=None, unit=None, rid=None): raise -def relation_set(relation_id=None, relation_settings={}, **kwargs): +def relation_set(relation_id=None, relation_settings=None, **kwargs): """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) @@ -477,6 +499,9 @@ def execute(self, args): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index d934f940..b85b0280 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -12,6 +12,8 @@ import string import subprocess import hashlib +import shutil +from contextlib import contextmanager from collections import OrderedDict @@ -52,7 +54,7 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status']) + output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) except subprocess.CalledProcessError: return False else: @@ -62,6 +64,16 @@ def service_running(service): return False +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + return False + else: + return True + + def adduser(username, password=None, shell='/bin/bash', system_user=False): """Add a user to the system""" try: @@ -320,12 +332,29 @@ def cmp_pkgrevno(package, revno, pkgcache=None): ''' import apt_pkg + from charmhelpers.fetch import apt_cache if not pkgcache: - apt_pkg.init() - # Force Apt to build its cache in memory. That way we avoid race - # conditions with other applications building the cache in the same - # place. - apt_pkg.config.set("Dir::Cache::pkgcache", "") - pkgcache = apt_pkg.Cache() + pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@contextmanager +def chdir(d): + cur = os.getcwd() + try: + yield os.chdir(d) + finally: + os.chdir(cur) + + +def chownr(path, owner, group): + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + + for root, dirs, files in os.walk(path): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + os.chown(full, uid, gid) diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/__init__.py b/ceph-radosgw/hooks/charmhelpers/core/services/__init__.py new file mode 100644 index 00000000..e8039a84 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/services/__init__.py @@ -0,0 +1,2 @@ +from .base import * +from .helpers import * diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/base.py b/ceph-radosgw/hooks/charmhelpers/core/services/base.py new file mode 100644 index 00000000..87ecb130 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/services/base.py @@ -0,0 +1,313 @@ +import os +import re +import json +from collections import Iterable + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = {} + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.provide_data() + self.reconfigure_services() + cfg = hookenv.config() + if cfg.implicit_save: + cfg.save() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + """ + hook_name = hookenv.hook_name() + for service in self.services.values(): + for provider in service.get('provided_data', []): + if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): + data = provider.provide_data() + _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data + if _ready: + hookenv.relation_set(None, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py new file mode 100644 index 00000000..4b90589b --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py @@ -0,0 +1,125 @@ +from charmhelpers.core import hookenv +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the interface type, to prevent + potential naming conflicts. + """ + name = None + interface = None + required_keys = [] + + def __init__(self, *args, **kwargs): + super(RelationContext, self).__init__(*args, **kwargs) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a template, for use as a ready action. + """ + def __init__(self, source, target, owner='root', group='root', perms=0444): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + context = {} + for ctx in service.get('required_data', []): + context.update(ctx) + templating.render(self.source, self.target, context, + self.owner, self.group, self.perms) + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/ceph-radosgw/hooks/charmhelpers/core/templating.py b/ceph-radosgw/hooks/charmhelpers/core/templating.py new file mode 100644 index 00000000..2c638853 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/templating.py @@ -0,0 +1,51 @@ +import os + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + Note: Using this requires python-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + loader = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = loader.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + host.mkdir(os.path.dirname(target)) + host.write_file(target, content, owner, group, perms) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 5be512ce..8e9d3804 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,5 @@ import importlib +from tempfile import NamedTemporaryFile import time from yaml import safe_load from charmhelpers.core.host import ( @@ -116,14 +117,7 @@ def base_url(self, url): def filter_installed_packages(packages): """Returns a list of packages that require installation""" - import apt_pkg - apt_pkg.init() - - # Tell apt to build an in-memory cache to prevent race conditions (if - # another process is already building the cache). - apt_pkg.config.set("Dir::Cache::pkgcache", "") - - cache = apt_pkg.Cache() + cache = apt_cache() _pkgs = [] for package in packages: try: @@ -136,6 +130,16 @@ def filter_installed_packages(packages): return _pkgs +def apt_cache(in_memory=True): + """Build and return an apt cache""" + import apt_pkg + apt_pkg.init() + if in_memory: + apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") + return apt_pkg.Cache() + + def apt_install(packages, options=None, fatal=False): """Install one or more packages""" if options is None: @@ -201,6 +205,27 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples: + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + """ if source is None: log('Source is not present. Skipping') return @@ -225,10 +250,23 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + else: + raise SourceConfigError("Unknown source: {!r}".format(source)) + if key: - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile() as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) + else: + # Note that hkp: is in no way a secure protocol. Using a + # GPG key id is pointless from a security POV unless you + # absolutely trust your network and DNS. + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv', + key]) def configure_sources(update=False, @@ -238,7 +276,8 @@ def configure_sources(update=False, Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. + The frament needs to be included as a string. Sources and their + corresponding keys are of the types supported by add_source(). Example config: install_sources: | diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py index 87e7071a..1b11fa03 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py @@ -1,6 +1,8 @@ import os import urllib2 +from urllib import urlretrieve import urlparse +import hashlib from charmhelpers.fetch import ( BaseFetchHandler, @@ -12,7 +14,17 @@ ) from charmhelpers.core.host import mkdir +""" +This class is a plugin for charmhelpers.fetch.install_remote. +It grabs, validates and installs remote archives fetched over "http", "https", "ftp" or "file" protocols. The contents of the archive are installed in $CHARM_DIR/fetched/. + +Example usage: +install_remote("https://example.com/some/archive.tar.gz") +# Installs the contents of archive.tar.gz in $CHARM_DIR/fetched/. + +See charmhelpers.fetch.archiveurl.get_archivehandler for supported archive types. +""" class ArchiveUrlFetchHandler(BaseFetchHandler): """Handler for archives via generic URLs""" def can_handle(self, source): @@ -61,3 +73,31 @@ def install(self, source): except OSError as e: raise UnhandledSource(e.strerror) return extract(dld_file) + + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + if validate == 'sha1' and len(hashsum) != 40: + raise ValueError("HashSum must be = 40 characters when using sha1" + " validation") + if validate == 'md5' and len(hashsum) != 32: + raise ValueError("HashSum must be = 32 characters when using md5" + " validation") + tempfile, headers = urlretrieve(url) + self.validate_file(tempfile, hashsum, validate) + return tempfile + + # Predicate method that returns status of hash matching expected hash. + def validate_file(self, source, hashsum, vmethod='sha1'): + if vmethod != 'sha1' and vmethod != 'md5': + raise ValueError("Validation Method not supported") + + if vmethod == 'md5': + m = hashlib.md5() + if vmethod == 'sha1': + m = hashlib.sha1() + with open(source) as f: + for line in f: + m.update(line) + if hashsum != m.hexdigest(): + msg = "Hash Mismatch on {} expected {} got {}" + raise ValueError(msg.format(source, hashsum, m.hexdigest())) From cbf697a516707ba2a6a9bca442a1cfc7a383d7d9 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Thu, 18 Sep 2014 22:25:16 +0800 Subject: [PATCH 0419/2699] Do IPv6 check for supported version. --- ceph-proxy/hooks/hooks.py | 26 +++++++++++++++++--------- ceph-proxy/hooks/utils.py | 13 ++++++++++++- 2 files changed, 29 insertions(+), 10 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index ead950fb..865cf4bd 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -23,7 +23,8 @@ relation_set, remote_unit, Hooks, UnregisteredHookError, - service_name + service_name, + unit_get ) from charmhelpers.core.host import ( @@ -42,12 +43,13 @@ from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( is_ipv6, - get_ipv6_addr, + get_ipv6_addr ) from utils import ( render_template, get_public_addr, + setup_ipv6 ) hooks = Hooks() @@ -59,6 +61,9 @@ def install_upstart_scripts(): for x in glob.glob('files/upstart/*.conf'): shutil.copy(x, '/etc/init/') + if config('prefer-ipv6'): + setup_ipv6() + @hooks.hook('install') def install(): @@ -71,9 +76,7 @@ def install(): def emit_cephconf(): if config('prefer-ipv6'): - host_ip = '[%s]' % get_ipv6_addr() - #else: - # host_ip = '0.0.0.0' + host_ip = '%s' % get_ipv6_addr() cephcontext = { 'auth_supported': config('auth-supported'), @@ -102,6 +105,9 @@ def emit_cephconf(): def config_changed(): log('Monitor hosts are ' + repr(get_mon_hosts())) + if config('prefer-ipv6'): + setup_ipv6() + # Pre-flight checks if not config('fsid'): log('No fsid supplied, cannot proceed.', level=ERROR) @@ -189,11 +195,12 @@ def mon_relation_joined(): 'mon-relation-changed') def mon_relation(): emit_cephconf() - + if config('prefer-ipv6'): - host = '[%s]' % get_ipv6_addr() + host = get_ipv6_addr() else: host = unit_get('private-address') + relation_data = {} relation_data['private-address'] = host relation_set(**relation_data) @@ -276,7 +283,7 @@ def radosgw_relation(relid=None): log('mon cluster not in quorum - deferring key provision') if config('prefer-ipv6'): - host = '[%s]' % get_ipv6_addr() + host = get_ipv6_addr() else: host = unit_get('private-address') @@ -290,9 +297,10 @@ def radosgw_relation(relid=None): @hooks.hook('client-relation-joined') def client_relation(relid=None): if config('prefer-ipv6'): - host = '[%s]' % get_ipv6_addr() + host = get_ipv6_addr() else: host = unit_get('private-address') + relation_data = {} relation_data['private-address'] = host relation_set(**relation_data) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 695409a3..cc8eee63 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -19,9 +19,13 @@ filter_installed_packages ) +from charmhelpers.core.host import ( + lsb_release +) + from charmhelpers.contrib.network import ip from charmhelpers.contrib.network.ip import ( - get_ipv6_addr, + get_ipv6_addr ) TEMPLATES_DIR = 'templates' @@ -87,3 +91,10 @@ def get_host_ip(hostname=None): def get_public_addr(): return ip.get_address_in_network(config('ceph-public-network'), fallback=get_host_ip()) + + +def setup_ipv6(): + ubuntu_rel = float(lsb_release()['DISTRIB_RELEASE']) + if ubuntu_rel < 14.04: + raise Exception("IPv6 is not supported for Ubuntu " + "versions less than Trusty 14.04") From f6eeffc83b853f0dee63fa18a89be81c413103b6 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Thu, 18 Sep 2014 22:25:16 +0800 Subject: [PATCH 0420/2699] Do IPv6 check for supported version. --- ceph-mon/hooks/hooks.py | 26 +++++++++++++++++--------- ceph-mon/hooks/utils.py | 13 ++++++++++++- 2 files changed, 29 insertions(+), 10 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index ead950fb..865cf4bd 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -23,7 +23,8 @@ relation_set, remote_unit, Hooks, UnregisteredHookError, - service_name + service_name, + unit_get ) from charmhelpers.core.host import ( @@ -42,12 +43,13 @@ from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( is_ipv6, - get_ipv6_addr, + get_ipv6_addr ) from utils import ( render_template, get_public_addr, + setup_ipv6 ) hooks = Hooks() @@ -59,6 +61,9 @@ def install_upstart_scripts(): for x in glob.glob('files/upstart/*.conf'): shutil.copy(x, '/etc/init/') + if config('prefer-ipv6'): + setup_ipv6() + @hooks.hook('install') def install(): @@ -71,9 +76,7 @@ def install(): def emit_cephconf(): if config('prefer-ipv6'): - host_ip = '[%s]' % get_ipv6_addr() - #else: - # host_ip = '0.0.0.0' + host_ip = '%s' % get_ipv6_addr() cephcontext = { 'auth_supported': config('auth-supported'), @@ -102,6 +105,9 @@ def emit_cephconf(): def config_changed(): log('Monitor hosts are ' + repr(get_mon_hosts())) + if config('prefer-ipv6'): + setup_ipv6() + # Pre-flight checks if not config('fsid'): log('No fsid supplied, cannot proceed.', level=ERROR) @@ -189,11 +195,12 @@ def mon_relation_joined(): 'mon-relation-changed') def mon_relation(): emit_cephconf() - + if config('prefer-ipv6'): - host = '[%s]' % get_ipv6_addr() + host = get_ipv6_addr() else: host = unit_get('private-address') + relation_data = {} relation_data['private-address'] = host relation_set(**relation_data) @@ -276,7 +283,7 @@ def radosgw_relation(relid=None): log('mon cluster not in quorum - deferring key provision') if config('prefer-ipv6'): - host = '[%s]' % get_ipv6_addr() + host = get_ipv6_addr() else: host = unit_get('private-address') @@ -290,9 +297,10 @@ def radosgw_relation(relid=None): @hooks.hook('client-relation-joined') def client_relation(relid=None): if config('prefer-ipv6'): - host = '[%s]' % get_ipv6_addr() + host = get_ipv6_addr() else: host = unit_get('private-address') + relation_data = {} relation_data['private-address'] = host relation_set(**relation_data) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 695409a3..cc8eee63 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -19,9 +19,13 @@ filter_installed_packages ) +from charmhelpers.core.host import ( + lsb_release +) + from charmhelpers.contrib.network import ip from charmhelpers.contrib.network.ip import ( - get_ipv6_addr, + get_ipv6_addr ) TEMPLATES_DIR = 'templates' @@ -87,3 +91,10 @@ def get_host_ip(hostname=None): def get_public_addr(): return ip.get_address_in_network(config('ceph-public-network'), fallback=get_host_ip()) + + +def setup_ipv6(): + ubuntu_rel = float(lsb_release()['DISTRIB_RELEASE']) + if ubuntu_rel < 14.04: + raise Exception("IPv6 is not supported for Ubuntu " + "versions less than Trusty 14.04") From 178303fcc0e29c98aa5a2c11b700b592ff9a31e6 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 18 Sep 2014 18:52:25 +0100 Subject: [PATCH 0421/2699] [hopem,r=] Adds IPv6 support for ceph osd. --- ceph-osd/config.yaml | 3 +++ ceph-osd/hooks/hooks.py | 46 ++++++++++++++++++++++++++++++++---- ceph-osd/hooks/utils.py | 30 +++++++---------------- ceph-osd/templates/ceph.conf | 3 +++ 4 files changed, 56 insertions(+), 26 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index e63d36d5..50f14e6d 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -103,3 +103,6 @@ options: description: | The IP address and netmask of the cluster (back-side) network (e.g., 192.168.0.0/24) + prefer-ipv6: + type: boolean + default: False \ No newline at end of file diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 6379fcb7..c7d79f2d 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -15,14 +15,17 @@ import ceph from charmhelpers.core.hookenv import ( log, + WARNING, ERROR, config, relation_ids, related_units, relation_get, + relation_set, Hooks, UnregisteredHookError, - service_name + service_name, + unit_get ) from charmhelpers.core.host import ( umount, @@ -38,11 +41,14 @@ from utils import ( render_template, - get_host_ip, + setup_ipv6 ) from charmhelpers.contrib.openstack.alternatives import install_alternative -from charmhelpers.contrib.network.ip import is_ipv6 +from charmhelpers.contrib.network.ip import ( + is_ipv6, + get_ipv6_addr +) hooks = Hooks() @@ -58,6 +64,10 @@ def install_upstart_scripts(): def install(): add_source(config('source'), config('key')) apt_update(fatal=True) + + if config('prefer-ipv6'): + setup_ipv6() + apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() @@ -76,6 +86,14 @@ def emit_cephconf(): 'ceph_public_network': config('ceph-public-network'), 'ceph_cluster_network': config('ceph-cluster-network'), } + + if config('prefer-ipv6'): + host_ip = get_ipv6_addr() + if host_ip: + cephcontext['host_ip'] = host_ip + else: + log("Unable to obtain host address", level=WARNING) + # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) @@ -95,6 +113,9 @@ def config_changed(): log('Invalid OSD disk format configuration specified', level=ERROR) sys.exit(1) + if config('prefer-ipv6'): + setup_ipv6() + e_mountpoint = config('ephemeral-unmount') if (e_mountpoint and ceph.filesystem_mounted(e_mountpoint)): umount(e_mountpoint) @@ -120,8 +141,12 @@ def get_mon_hosts(): hosts = [] for relid in relation_ids('mon'): for unit in related_units(relid): - addr = relation_get('ceph-public-address', unit, relid) or \ - get_host_ip(relation_get('private-address', unit, relid)) + if config('prefer-ipv6'): + addr = relation_get('ceph-public-address', unit, relid) or \ + get_ipv6_addr() + else: + addr = relation_get('private-address', unit, relid) + if addr is not None: if is_ipv6(addr): hosts.append('[{}]:6789'.format(addr)) @@ -166,6 +191,17 @@ def get_devices(): @hooks.hook('mon-relation-changed', 'mon-relation-departed') def mon_relation(): + if config('prefer-ipv6'): + host = get_ipv6_addr() + else: + host = unit_get('private-address') + + if host: + relation_data = {'private-address': host} + relation_set(**relation_data) + else: + log("Unable to obtain host address", level=WARNING) + bootstrap_key = relation_get('osd_bootstrap_key') if get_fsid() and get_auth() and bootstrap_key: log('mon has provided conf- scanning disks') diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index c1044a45..d4bbd38f 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -10,7 +10,6 @@ import socket import re from charmhelpers.core.hookenv import ( - unit_get, cached ) from charmhelpers.fetch import ( @@ -18,6 +17,10 @@ filter_installed_packages ) +from charmhelpers.core.host import ( + lsb_release +) + TEMPLATES_DIR = 'templates' try: @@ -27,13 +30,6 @@ fatal=True) import jinja2 -try: - import dns.resolver -except ImportError: - apt_install(filter_installed_packages(['python-dnspython']), - fatal=True) - import dns.resolver - def render_template(template_name, context, template_dir=TEMPLATES_DIR): templates = jinja2.Environment( @@ -59,16 +55,8 @@ def get_unit_hostname(): return socket.gethostname() -@cached -def get_host_ip(hostname=None): - hostname = hostname or unit_get('private-address') - try: - # Test to see if already an IPv4 address - socket.inet_aton(hostname) - return hostname - except socket.error: - # This may throw an NXDOMAIN exception; in which case - # things are badly broken so just let it kill the hook - answers = dns.resolver.query(hostname, 'A') - if answers: - return answers[0].address +def setup_ipv6(): + ubuntu_rel = float(lsb_release()['DISTRIB_RELEASE']) + if ubuntu_rel < 14.04: + raise Exception("IPv6 is not supported for Ubuntu " + "versions less than Trusty 14.04") diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index def993c3..44336ca0 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -32,3 +32,6 @@ osd journal size = {{ osd_journal_size }} filestore xattr use omap = true + host = {{ hostname }} + public addr = {{ host_ip }} + cluster addr = {{ host_ip }} \ No newline at end of file From 12b7ad7b64b10f8e40135e11f05a932b9254220b Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 19 Sep 2014 08:57:33 +0100 Subject: [PATCH 0422/2699] synced charm-helpers --- .../hooks/charmhelpers/contrib/network/ip.py | 33 +- .../contrib/storage/linux/utils.py | 3 + ceph-osd/hooks/charmhelpers/core/hookenv.py | 55 ++- ceph-osd/hooks/charmhelpers/core/host.py | 43 ++- .../charmhelpers/core/services/__init__.py | 2 + .../hooks/charmhelpers/core/services/base.py | 313 ++++++++++++++++++ .../charmhelpers/core/services/helpers.py | 125 +++++++ .../hooks/charmhelpers/core/templating.py | 51 +++ ceph-osd/hooks/charmhelpers/fetch/__init__.py | 63 +++- .../hooks/charmhelpers/fetch/archiveurl.py | 40 +++ 10 files changed, 693 insertions(+), 35 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/core/services/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/core/services/base.py create mode 100644 ceph-osd/hooks/charmhelpers/core/services/helpers.py create mode 100644 ceph-osd/hooks/charmhelpers/core/templating.py diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 0972e91a..f8cc1975 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -4,7 +4,7 @@ from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, + ERROR, log, config, ) try: @@ -154,3 +154,34 @@ def _get_for_address(address, key): get_iface_for_address = partial(_get_for_address, key='iface') get_netmask_for_address = partial(_get_for_address, key='netmask') + + +def get_ipv6_addr(iface="eth0"): + try: + iface_addrs = netifaces.ifaddresses(iface) + if netifaces.AF_INET6 not in iface_addrs: + raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) + + addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] + ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') + and config('vip') != a['addr']] + if not ipv6_addr: + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + + return ipv6_addr[0] + + except ValueError: + raise ValueError("Invalid interface '%s'" % iface) + + +def format_ipv6_addr(address): + """ + IPv6 needs to be wrapped with [] in url link to parse correctly. + """ + if is_ipv6(address): + address = "[%s]" % address + else: + log("Not an valid ipv6 address: %s" % address, + level=ERROR) + address = None + return address diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index 8d0f6116..1b958712 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -46,5 +46,8 @@ def is_device_mounted(device): :returns: boolean: True if the path represents a mounted device, False if it doesn't. ''' + is_partition = bool(re.search(r".*[0-9]+\b", device)) out = check_output(['mount']) + if is_partition: + return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index c9530433..324987e6 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -156,12 +156,15 @@ def hook_name(): class Config(dict): - """A Juju charm config dictionary that can write itself to - disk (as json) and track which values have changed since - the previous hook invocation. + """A dictionary representation of the charm's config.yaml, with some + extra features: - Do not instantiate this object directly - instead call - ``hookenv.config()`` + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. Example usage:: @@ -170,8 +173,8 @@ class Config(dict): >>> config = hookenv.config() >>> config['foo'] 'bar' + >>> # store a new key/value for later use >>> config['mykey'] = 'myval' - >>> config.save() >>> # user runs `juju set mycharm foo=baz` @@ -188,22 +191,34 @@ class Config(dict): >>> # keys/values that we add are preserved across hooks >>> config['mykey'] 'myval' - >>> # don't forget to save at the end of hook! - >>> config.save() """ CONFIG_FILE_NAME = '.juju-persistent-config' def __init__(self, *args, **kw): super(Config, self).__init__(*args, **kw) + self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() + def __getitem__(self, key): + """For regular dict lookups, check the current juju config first, + then the previous (saved) copy. This ensures that user-saved values + will be returned by a dict lookup. + + """ + try: + return dict.__getitem__(self, key) + except KeyError: + return (self._prev_dict or {})[key] + def load_previous(self, path=None): - """Load previous copy of config from disk so that current values - can be compared to previous values. + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. :param path: @@ -218,8 +233,8 @@ def load_previous(self, path=None): self._prev_dict = json.load(f) def changed(self, key): - """Return true if the value for this key has changed since - the last save. + """Return True if the current value for this key is different from + the previous value. """ if self._prev_dict is None: @@ -228,7 +243,7 @@ def changed(self, key): def previous(self, key): """Return previous value for this key, or None if there - is no "previous" value. + is no previous value. """ if self._prev_dict: @@ -238,7 +253,13 @@ def previous(self, key): def save(self): """Save this config to disk. - Preserves items in _prev_dict that do not exist in self. + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. """ if self._prev_dict: @@ -285,8 +306,9 @@ def relation_get(attribute=None, unit=None, rid=None): raise -def relation_set(relation_id=None, relation_settings={}, **kwargs): +def relation_set(relation_id=None, relation_settings=None, **kwargs): """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) @@ -477,6 +499,9 @@ def execute(self, args): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index d934f940..b85b0280 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -12,6 +12,8 @@ import string import subprocess import hashlib +import shutil +from contextlib import contextmanager from collections import OrderedDict @@ -52,7 +54,7 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status']) + output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) except subprocess.CalledProcessError: return False else: @@ -62,6 +64,16 @@ def service_running(service): return False +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + return False + else: + return True + + def adduser(username, password=None, shell='/bin/bash', system_user=False): """Add a user to the system""" try: @@ -320,12 +332,29 @@ def cmp_pkgrevno(package, revno, pkgcache=None): ''' import apt_pkg + from charmhelpers.fetch import apt_cache if not pkgcache: - apt_pkg.init() - # Force Apt to build its cache in memory. That way we avoid race - # conditions with other applications building the cache in the same - # place. - apt_pkg.config.set("Dir::Cache::pkgcache", "") - pkgcache = apt_pkg.Cache() + pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@contextmanager +def chdir(d): + cur = os.getcwd() + try: + yield os.chdir(d) + finally: + os.chdir(cur) + + +def chownr(path, owner, group): + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + + for root, dirs, files in os.walk(path): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + os.chown(full, uid, gid) diff --git a/ceph-osd/hooks/charmhelpers/core/services/__init__.py b/ceph-osd/hooks/charmhelpers/core/services/__init__.py new file mode 100644 index 00000000..e8039a84 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/services/__init__.py @@ -0,0 +1,2 @@ +from .base import * +from .helpers import * diff --git a/ceph-osd/hooks/charmhelpers/core/services/base.py b/ceph-osd/hooks/charmhelpers/core/services/base.py new file mode 100644 index 00000000..87ecb130 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/services/base.py @@ -0,0 +1,313 @@ +import os +import re +import json +from collections import Iterable + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = {} + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.provide_data() + self.reconfigure_services() + cfg = hookenv.config() + if cfg.implicit_save: + cfg.save() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + """ + hook_name = hookenv.hook_name() + for service in self.services.values(): + for provider in service.get('provided_data', []): + if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): + data = provider.provide_data() + _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data + if _ready: + hookenv.relation_set(None, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-osd/hooks/charmhelpers/core/services/helpers.py b/ceph-osd/hooks/charmhelpers/core/services/helpers.py new file mode 100644 index 00000000..4b90589b --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/services/helpers.py @@ -0,0 +1,125 @@ +from charmhelpers.core import hookenv +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the interface type, to prevent + potential naming conflicts. + """ + name = None + interface = None + required_keys = [] + + def __init__(self, *args, **kwargs): + super(RelationContext, self).__init__(*args, **kwargs) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a template, for use as a ready action. + """ + def __init__(self, source, target, owner='root', group='root', perms=0444): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + context = {} + for ctx in service.get('required_data', []): + context.update(ctx) + templating.render(self.source, self.target, context, + self.owner, self.group, self.perms) + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/ceph-osd/hooks/charmhelpers/core/templating.py b/ceph-osd/hooks/charmhelpers/core/templating.py new file mode 100644 index 00000000..2c638853 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/templating.py @@ -0,0 +1,51 @@ +import os + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + Note: Using this requires python-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + loader = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = loader.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + host.mkdir(os.path.dirname(target)) + host.write_file(target, content, owner, group, perms) diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 5be512ce..8e9d3804 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,5 @@ import importlib +from tempfile import NamedTemporaryFile import time from yaml import safe_load from charmhelpers.core.host import ( @@ -116,14 +117,7 @@ def base_url(self, url): def filter_installed_packages(packages): """Returns a list of packages that require installation""" - import apt_pkg - apt_pkg.init() - - # Tell apt to build an in-memory cache to prevent race conditions (if - # another process is already building the cache). - apt_pkg.config.set("Dir::Cache::pkgcache", "") - - cache = apt_pkg.Cache() + cache = apt_cache() _pkgs = [] for package in packages: try: @@ -136,6 +130,16 @@ def filter_installed_packages(packages): return _pkgs +def apt_cache(in_memory=True): + """Build and return an apt cache""" + import apt_pkg + apt_pkg.init() + if in_memory: + apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") + return apt_pkg.Cache() + + def apt_install(packages, options=None, fatal=False): """Install one or more packages""" if options is None: @@ -201,6 +205,27 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples: + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + """ if source is None: log('Source is not present. Skipping') return @@ -225,10 +250,23 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + else: + raise SourceConfigError("Unknown source: {!r}".format(source)) + if key: - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile() as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) + else: + # Note that hkp: is in no way a secure protocol. Using a + # GPG key id is pointless from a security POV unless you + # absolutely trust your network and DNS. + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv', + key]) def configure_sources(update=False, @@ -238,7 +276,8 @@ def configure_sources(update=False, Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. + The frament needs to be included as a string. Sources and their + corresponding keys are of the types supported by add_source(). Example config: install_sources: | diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index 87e7071a..1b11fa03 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -1,6 +1,8 @@ import os import urllib2 +from urllib import urlretrieve import urlparse +import hashlib from charmhelpers.fetch import ( BaseFetchHandler, @@ -12,7 +14,17 @@ ) from charmhelpers.core.host import mkdir +""" +This class is a plugin for charmhelpers.fetch.install_remote. +It grabs, validates and installs remote archives fetched over "http", "https", "ftp" or "file" protocols. The contents of the archive are installed in $CHARM_DIR/fetched/. + +Example usage: +install_remote("https://example.com/some/archive.tar.gz") +# Installs the contents of archive.tar.gz in $CHARM_DIR/fetched/. + +See charmhelpers.fetch.archiveurl.get_archivehandler for supported archive types. +""" class ArchiveUrlFetchHandler(BaseFetchHandler): """Handler for archives via generic URLs""" def can_handle(self, source): @@ -61,3 +73,31 @@ def install(self, source): except OSError as e: raise UnhandledSource(e.strerror) return extract(dld_file) + + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + if validate == 'sha1' and len(hashsum) != 40: + raise ValueError("HashSum must be = 40 characters when using sha1" + " validation") + if validate == 'md5' and len(hashsum) != 32: + raise ValueError("HashSum must be = 32 characters when using md5" + " validation") + tempfile, headers = urlretrieve(url) + self.validate_file(tempfile, hashsum, validate) + return tempfile + + # Predicate method that returns status of hash matching expected hash. + def validate_file(self, source, hashsum, vmethod='sha1'): + if vmethod != 'sha1' and vmethod != 'md5': + raise ValueError("Validation Method not supported") + + if vmethod == 'md5': + m = hashlib.md5() + if vmethod == 'sha1': + m = hashlib.sha1() + with open(source) as f: + for line in f: + m.update(line) + if hashsum != m.hexdigest(): + msg = "Hash Mismatch on {} expected {} got {}" + raise ValueError(msg.format(source, hashsum, m.hexdigest())) From e16cf8ed263b85ffc8804cf2b6addbb7900f569c Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 19 Sep 2014 09:10:31 +0100 Subject: [PATCH 0423/2699] fixed get_mon_hosts() --- ceph-osd/hooks/hooks.py | 6 ++++-- ceph-osd/hooks/utils.py | 23 +++++++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index c7d79f2d..45f092f2 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -41,6 +41,7 @@ from utils import ( render_template, + get_host_ip, setup_ipv6 ) @@ -143,9 +144,10 @@ def get_mon_hosts(): for unit in related_units(relid): if config('prefer-ipv6'): addr = relation_get('ceph-public-address', unit, relid) or \ - get_ipv6_addr() + relation_get('private-address', unit, relid) else: - addr = relation_get('private-address', unit, relid) + addr = relation_get('ceph-public-address', unit, relid) or \ + get_host_ip(relation_get('private-address', unit, relid)) if addr is not None: if is_ipv6(addr): diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index d4bbd38f..09e27307 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -10,6 +10,7 @@ import socket import re from charmhelpers.core.hookenv import ( + unit_get, cached ) from charmhelpers.fetch import ( @@ -30,6 +31,13 @@ fatal=True) import jinja2 +try: + import dns.resolver +except ImportError: + apt_install(filter_installed_packages(['python-dnspython']), + fatal=True) + import dns.resolver + def render_template(template_name, context, template_dir=TEMPLATES_DIR): templates = jinja2.Environment( @@ -55,6 +63,21 @@ def get_unit_hostname(): return socket.gethostname() +@cached +def get_host_ip(hostname=None): + hostname = hostname or unit_get('private-address') + try: + # Test to see if already an IPv4 address + socket.inet_aton(hostname) + return hostname + except socket.error: + # This may throw an NXDOMAIN exception; in which case + # things are badly broken so just let it kill the hook + answers = dns.resolver.query(hostname, 'A') + if answers: + return answers[0].address + + def setup_ipv6(): ubuntu_rel = float(lsb_release()['DISTRIB_RELEASE']) if ubuntu_rel < 14.04: From 415acda46c0e7aa7a5fb42854ad6d7ceafd72ef7 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 19 Sep 2014 10:50:33 +0100 Subject: [PATCH 0424/2699] fixed get_public_addr() for ipv6 --- ceph-proxy/hooks/utils.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index cc8eee63..676a4abc 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -25,6 +25,7 @@ from charmhelpers.contrib.network import ip from charmhelpers.contrib.network.ip import ( + is_ipv6, get_ipv6_addr ) @@ -71,9 +72,6 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): - if config('prefer-ipv6'): - return '[%s]' % get_ipv6_addr() - hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address @@ -89,8 +87,14 @@ def get_host_ip(hostname=None): @cached def get_public_addr(): - return ip.get_address_in_network(config('ceph-public-network'), - fallback=get_host_ip()) + addr = config('ceph-public-network') + if config('prefer-ipv6'): + if addr and is_ipv6_addr(addr): + return addr + else: + return get_ipv6_addr() + else: + return ip.get_address_in_network(addr, fallback=get_host_ip()) def setup_ipv6(): From 57c25143930fb7ef19d7e6a89fb215f171bf1bcb Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 19 Sep 2014 10:50:33 +0100 Subject: [PATCH 0425/2699] fixed get_public_addr() for ipv6 --- ceph-mon/hooks/utils.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index cc8eee63..676a4abc 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -25,6 +25,7 @@ from charmhelpers.contrib.network import ip from charmhelpers.contrib.network.ip import ( + is_ipv6, get_ipv6_addr ) @@ -71,9 +72,6 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): - if config('prefer-ipv6'): - return '[%s]' % get_ipv6_addr() - hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address @@ -89,8 +87,14 @@ def get_host_ip(hostname=None): @cached def get_public_addr(): - return ip.get_address_in_network(config('ceph-public-network'), - fallback=get_host_ip()) + addr = config('ceph-public-network') + if config('prefer-ipv6'): + if addr and is_ipv6_addr(addr): + return addr + else: + return get_ipv6_addr() + else: + return ip.get_address_in_network(addr, fallback=get_host_ip()) def setup_ipv6(): From b7076d1d868c7db3185c35860232d50961bdfc37 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 19 Sep 2014 10:51:46 +0100 Subject: [PATCH 0426/2699] fixed get_public_addr() for ipv6 --- ceph-proxy/hooks/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 676a4abc..a9373807 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -89,10 +89,10 @@ def get_host_ip(hostname=None): def get_public_addr(): addr = config('ceph-public-network') if config('prefer-ipv6'): - if addr and is_ipv6_addr(addr): + if addr and is_ipv6(addr): return addr else: - return get_ipv6_addr() + return get_ipv6_addr() else: return ip.get_address_in_network(addr, fallback=get_host_ip()) From fd094b4185535832bad61cb33c76de5d06a55cff Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 19 Sep 2014 10:51:46 +0100 Subject: [PATCH 0427/2699] fixed get_public_addr() for ipv6 --- ceph-mon/hooks/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 676a4abc..a9373807 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -89,10 +89,10 @@ def get_host_ip(hostname=None): def get_public_addr(): addr = config('ceph-public-network') if config('prefer-ipv6'): - if addr and is_ipv6_addr(addr): + if addr and is_ipv6(addr): return addr else: - return get_ipv6_addr() + return get_ipv6_addr() else: return ip.get_address_in_network(addr, fallback=get_host_ip()) From 38f8f0fe31ec2e381e106297da3460107dbd4b85 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 19 Sep 2014 10:57:53 +0100 Subject: [PATCH 0428/2699] fixed get_mon_hosts() --- ceph-osd/hooks/hooks.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 45f092f2..ff029aad 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -142,11 +142,11 @@ def get_mon_hosts(): hosts = [] for relid in relation_ids('mon'): for unit in related_units(relid): - if config('prefer-ipv6'): - addr = relation_get('ceph-public-address', unit, relid) or \ - relation_get('private-address', unit, relid) - else: - addr = relation_get('ceph-public-address', unit, relid) or \ + addr = relation_get('ceph-public-address', unit, relid) + if not addr: + if config('prefer-ipv6'): + addr = relation_get('private-address', unit, relid) + else: get_host_ip(relation_get('private-address', unit, relid)) if addr is not None: From 09e8ca4c5b67ec7d2778e2ed27d30e741fc810d9 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 19 Sep 2014 11:00:18 +0000 Subject: [PATCH 0429/2699] Add ha support to ceph-radosgw --- ceph-radosgw/charm-helpers-sync.yaml | 4 + ceph-radosgw/config.yaml | 23 ++ .../contrib/hahelpers/__init__.py | 0 .../charmhelpers/contrib/hahelpers/apache.py | 59 +++++ .../charmhelpers/contrib/hahelpers/cluster.py | 226 ++++++++++++++++++ .../charmhelpers/contrib/network/__init__.py | 0 .../hooks/charmhelpers/contrib/network/ip.py | 187 +++++++++++++++ ceph-radosgw/hooks/cluster-relation-changed | 1 + ceph-radosgw/hooks/cluster-relation-joined | 1 + ceph-radosgw/hooks/ha-relation-changed | 1 + ceph-radosgw/hooks/ha-relation-joined | 1 + ceph-radosgw/hooks/hooks.py | 88 ++++++- ceph-radosgw/hooks/utils.py | 1 + ceph-radosgw/metadata.yaml | 6 + 14 files changed, 594 insertions(+), 4 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/network/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py create mode 120000 ceph-radosgw/hooks/cluster-relation-changed create mode 120000 ceph-radosgw/hooks/cluster-relation-joined create mode 120000 ceph-radosgw/hooks/ha-relation-changed create mode 120000 ceph-radosgw/hooks/ha-relation-joined diff --git a/ceph-radosgw/charm-helpers-sync.yaml b/ceph-radosgw/charm-helpers-sync.yaml index 0963bbcd..16e1d202 100644 --- a/ceph-radosgw/charm-helpers-sync.yaml +++ b/ceph-radosgw/charm-helpers-sync.yaml @@ -5,5 +5,9 @@ include: - fetch - contrib.storage.linux: - utils + - contrib.hahelpers: + - apache + - cluster - payload.execd - contrib.openstack.alternatives + - contrib.network.ip diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 0cb65e7e..38a14adc 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -17,6 +17,7 @@ options: for precise. key: type: string + default: description: | Key ID to import to the apt keyring to support use with arbitary source configuration from outside of Launchpad archives or PPA's. @@ -46,3 +47,25 @@ options: default: False description: | If set to True, supporting services will log to syslog. + vip: + type: string + default: + description: | + Virtual IP(s) to use to front API services in HA configuration. + . + If multiple networks are being used, a VIP should be provided for each + network, separated by spaces. + ha-bindiface: + type: string + default: eth0 + description: | + Default network interface on which HA cluster will bind to communication + with the other members of the HA Cluster. + ha-mcastport: + type: int + default: 5414 + description: | + Default multicast port number that will be used to communicate between + HA Cluster nodes. + # Network configuration options + # by default all access is over 'private-address' diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py new file mode 100644 index 00000000..8d5fb8ba --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -0,0 +1,59 @@ +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import subprocess + +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + relation_ids, + related_units as relation_list, + log, + INFO, +) + + +def get_cert(): + cert = config_get('ssl_cert') + key = config_get('ssl_key') + if not (cert and key): + log("Inspecting identity-service relations for SSL certificate.", + level=INFO) + cert = key = None + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not cert: + cert = relation_get('ssl_cert', + rid=r_id, unit=unit) + if not key: + key = relation_get('ssl_key', + rid=r_id, unit=unit) + return (cert, key) + + +def get_ca_cert(): + ca_cert = config_get('ssl_ca') + if ca_cert is None: + log("Inspecting identity-service relations for CA SSL certificate.", + level=INFO) + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if ca_cert is None: + ca_cert = relation_get('ca_cert', + rid=r_id, unit=unit) + return ca_cert + + +def install_ca_cert(ca_cert): + if ca_cert: + with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', + 'w') as crt: + crt.write(ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py new file mode 100644 index 00000000..7151b1d0 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -0,0 +1,226 @@ +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# Adam Gandelman +# + +""" +Helpers for clustering and determining "cluster leadership" and other +clustering-related helpers. +""" + +import subprocess +import os + +from socket import gethostname as get_unit_hostname + +from charmhelpers.core.hookenv import ( + log, + relation_ids, + related_units as relation_list, + relation_get, + config as config_get, + INFO, + ERROR, + WARNING, + unit_get, +) + + +class HAIncompleteConfig(Exception): + pass + + +def is_elected_leader(resource): + """ + Returns True if the charm executing this is the elected cluster leader. + + It relies on two mechanisms to determine leadership: + 1. If the charm is part of a corosync cluster, call corosync to + determine leadership. + 2. If the charm is not part of a corosync cluster, the leader is + determined as being "the alive unit with the lowest unit numer". In + other words, the oldest surviving unit. + """ + if is_clustered(): + if not is_crm_leader(resource): + log('Deferring action to CRM leader.', level=INFO) + return False + else: + peers = peer_units() + if peers and not oldest_peer(peers): + log('Deferring action to oldest service unit.', level=INFO) + return False + return True + + +def is_clustered(): + for r_id in (relation_ids('ha') or []): + for unit in (relation_list(r_id) or []): + clustered = relation_get('clustered', + rid=r_id, + unit=unit) + if clustered: + return True + return False + + +def is_crm_leader(resource): + """ + Returns True if the charm calling this is the elected corosync leader, + as returned by calling the external "crm" command. + """ + cmd = [ + "crm", "resource", + "show", resource + ] + try: + status = subprocess.check_output(cmd) + except subprocess.CalledProcessError: + return False + else: + if get_unit_hostname() in status: + return True + else: + return False + + +def is_leader(resource): + log("is_leader is deprecated. Please consider using is_crm_leader " + "instead.", level=WARNING) + return is_crm_leader(resource) + + +def peer_units(peer_relation="cluster"): + peers = [] + for r_id in (relation_ids(peer_relation) or []): + for unit in (relation_list(r_id) or []): + peers.append(unit) + return peers + + +def peer_ips(peer_relation='cluster', addr_key='private-address'): + '''Return a dict of peers and their private-address''' + peers = {} + for r_id in relation_ids(peer_relation): + for unit in relation_list(r_id): + peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) + return peers + + +def oldest_peer(peers): + """Determines who the oldest peer is by comparing unit numbers.""" + local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) + for peer in peers: + remote_unit_no = int(peer.split('/')[1]) + if remote_unit_no < local_unit_no: + return False + return True + + +def eligible_leader(resource): + log("eligible_leader is deprecated. Please consider using " + "is_elected_leader instead.", level=WARNING) + return is_elected_leader(resource) + + +def https(): + ''' + Determines whether enough data has been provided in configuration + or relation data to configure HTTPS + . + returns: boolean + ''' + if config_get('use-https') == "yes": + return True + if config_get('ssl_cert') and config_get('ssl_key'): + return True + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + rel_state = [ + relation_get('https_keystone', rid=r_id, unit=unit), + relation_get('ssl_cert', rid=r_id, unit=unit), + relation_get('ssl_key', rid=r_id, unit=unit), + relation_get('ca_cert', rid=r_id, unit=unit), + ] + # NOTE: works around (LP: #1203241) + if (None not in rel_state) and ('' not in rel_state): + return True + return False + + +def determine_api_port(public_port): + ''' + Determine correct API server listening port based on + existence of HTTPS reverse proxy and/or haproxy. + + public_port: int: standard public port for given service + + returns: int: the correct listening port for the API service + ''' + i = 0 + if len(peer_units()) > 0 or is_clustered(): + i += 1 + if https(): + i += 1 + return public_port - (i * 10) + + +def determine_apache_port(public_port): + ''' + Description: Determine correct apache listening port based on public IP + + state of the cluster. + + public_port: int: standard public port for given service + + returns: int: the correct listening port for the HAProxy service + ''' + i = 0 + if len(peer_units()) > 0 or is_clustered(): + i += 1 + return public_port - (i * 10) + + +def get_hacluster_config(): + ''' + Obtains all relevant configuration from charm configuration required + for initiating a relation to hacluster: + + ha-bindiface, ha-mcastport, vip + + returns: dict: A dict containing settings keyed by setting name. + raises: HAIncompleteConfig if settings are missing. + ''' + settings = ['ha-bindiface', 'ha-mcastport', 'vip'] + conf = {} + for setting in settings: + conf[setting] = config_get(setting) + missing = [] + [missing.append(s) for s, v in conf.iteritems() if v is None] + if missing: + log('Insufficient config data to configure hacluster.', level=ERROR) + raise HAIncompleteConfig + return conf + + +def canonical_url(configs, vip_setting='vip'): + ''' + Returns the correct HTTP URL to this host given the state of HTTPS + configuration and hacluster. + + :configs : OSTemplateRenderer: A config tempating object to inspect for + a complete https context. + + :vip_setting: str: Setting in charm config that specifies + VIP address. + ''' + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + if is_clustered(): + addr = config_get(vip_setting) + else: + addr = unit_get('private-address') + return '%s://%s' % (scheme, addr) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py new file mode 100644 index 00000000..f8cc1975 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -0,0 +1,187 @@ +import sys + +from functools import partial + +from charmhelpers.fetch import apt_install +from charmhelpers.core.hookenv import ( + ERROR, log, config, +) + +try: + import netifaces +except ImportError: + apt_install('python-netifaces') + import netifaces + +try: + import netaddr +except ImportError: + apt_install('python-netaddr') + import netaddr + + +def _validate_cidr(network): + try: + netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + + +def get_address_in_network(network, fallback=None, fatal=False): + """ + Get an IPv4 or IPv6 address within the network from the host. + + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + :param fallback (str): If no address is found, return fallback. + :param fatal (boolean): If no address is found, fallback is not + set and fatal is True then exit(1). + + """ + + def not_found_error_out(): + log("No IP address found in network: %s" % network, + level=ERROR) + sys.exit(1) + + if network is None: + if fallback is not None: + return fallback + else: + if fatal: + not_found_error_out() + + _validate_cidr(network) + network = netaddr.IPNetwork(network) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if network.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + if cidr in network: + return str(cidr.ip) + if network.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if not addr['addr'].startswith('fe80'): + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) + + if fallback is not None: + return fallback + + if fatal: + not_found_error_out() + + return None + + +def is_ipv6(address): + '''Determine whether provided address is IPv6 or not''' + try: + address = netaddr.IPAddress(address) + except netaddr.AddrFormatError: + # probably a hostname - so not an address at all! + return False + else: + return address.version == 6 + + +def is_address_in_network(network, address): + """ + Determine whether the provided address is within a network range. + + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + :param address: An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :returns boolean: Flag indicating whether address is in network. + """ + try: + network = netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + try: + address = netaddr.IPAddress(address) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Address (%s) is not in correct presentation format" % + address) + if address in network: + return True + else: + return False + + +def _get_for_address(address, key): + """Retrieve an attribute of or the physical interface that + the IP address provided could be bound to. + + :param address (str): An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :param key: 'iface' for the physical interface name or an attribute + of the configured interface, for example 'netmask'. + :returns str: Requested attribute or None if address is not bindable. + """ + address = netaddr.IPAddress(address) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if address.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + if address in cidr: + if key == 'iface': + return iface + else: + return addresses[netifaces.AF_INET][0][key] + if address.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if not addr['addr'].startswith('fe80'): + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if address in cidr: + if key == 'iface': + return iface + else: + return addr[key] + return None + + +get_iface_for_address = partial(_get_for_address, key='iface') + +get_netmask_for_address = partial(_get_for_address, key='netmask') + + +def get_ipv6_addr(iface="eth0"): + try: + iface_addrs = netifaces.ifaddresses(iface) + if netifaces.AF_INET6 not in iface_addrs: + raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) + + addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] + ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') + and config('vip') != a['addr']] + if not ipv6_addr: + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + + return ipv6_addr[0] + + except ValueError: + raise ValueError("Invalid interface '%s'" % iface) + + +def format_ipv6_addr(address): + """ + IPv6 needs to be wrapped with [] in url link to parse correctly. + """ + if is_ipv6(address): + address = "[%s]" % address + else: + log("Not an valid ipv6 address: %s" % address, + level=ERROR) + address = None + return address diff --git a/ceph-radosgw/hooks/cluster-relation-changed b/ceph-radosgw/hooks/cluster-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/cluster-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/cluster-relation-joined b/ceph-radosgw/hooks/cluster-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/cluster-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/ha-relation-changed b/ceph-radosgw/hooks/ha-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/ha-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/ha-relation-joined b/ceph-radosgw/hooks/ha-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/ha-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 41b3641f..74bdcf69 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -13,7 +13,7 @@ import glob import os import ceph - +import charmhelpers.contrib.hahelpers.cluster as cluster from charmhelpers.core.hookenv import ( relation_get, relation_ids, @@ -22,7 +22,7 @@ unit_get, open_port, relation_set, - log, + log, ERROR, Hooks, UnregisteredHookError, ) from charmhelpers.fetch import ( @@ -34,13 +34,18 @@ render_template, get_host_ip, enable_pocket, - is_apache_24 + is_apache_24, + CEPHRG_HA_RES, ) from charmhelpers.payload.execd import execd_preinstall from charmhelpers.core.host import cmp_pkgrevno from socket import gethostname as get_unit_hostname +from charmhelpers.contrib.network.ip import ( + get_iface_for_address, + get_netmask_for_address +) hooks = Hooks() @@ -212,8 +217,13 @@ def identity_joined(relid=None): if cmp_pkgrevno('radosgw', '0.55') < 0: log('Integration with keystone requires ceph >= 0.55') sys.exit(1) + if not cluster.eligible_leader(CEPHRG_HA_RES): + return + if cluster.is_clustered(): + hostname = config('vip') + else: + hostname = unit_get('private-address') - hostname = unit_get('private-address') admin_url = 'http://{}:80/swift'.format(hostname) internal_url = public_url = '{}/v1'.format(admin_url) relation_set(service='swift', @@ -230,6 +240,76 @@ def identity_changed(): restart() +@hooks.hook('cluster-relation-changed', + 'cluster-relation-joined') +def cluster_changed(): + print "Do cluster changed actions here" + + +@hooks.hook('ha-relation-joined') +def ha_relation_joined(): + # Obtain the config values necessary for the cluster config. These + # include multicast port and interface to bind to. + corosync_bindiface = config('ha-bindiface') + corosync_mcastport = config('ha-mcastport') + vip = config('vip') + if not vip: + log('Unable to configure hacluster as vip not provided', + level=ERROR) + sys.exit(1) + # Obtain resources + # SWIFT_HA_RES = 'grp_swift_vips' + resources = { + 'res_cephrg_haproxy': 'lsb:haproxy' + } + resource_params = { + 'res_cephrg_haproxy': 'op monitor interval="5s"' + } + + vip_group = [] + for vip in vip.split(): + iface = get_iface_for_address(vip) + if iface is not None: + vip_key = 'res_cephrg_{}_vip'.format(iface) + resources[vip_key] = 'ocf:heartbeat:IPaddr2' + resource_params[vip_key] = ( + 'params ip="{vip}" cidr_netmask="{netmask}"' + ' nic="{iface}"'.format(vip=vip, + iface=iface, + netmask=get_netmask_for_address(vip)) + ) + vip_group.append(vip_key) + + if len(vip_group) >= 1: + relation_set(groups={CEPHRG_HA_RES: ' '.join(vip_group)}) + + init_services = { + 'res_cephrg_haproxy': 'haproxy' + } + clones = { + 'cl_cephrg_haproxy': 'res_cephrg_haproxy' + } + + relation_set(init_services=init_services, + corosync_bindiface=corosync_bindiface, + corosync_mcastport=corosync_mcastport, + resources=resources, + resource_params=resource_params, + clones=clones) + + +@hooks.hook('ha-relation-changed') +def ha_relation_changed(): + clustered = relation_get('clustered') + if clustered and cluster.is_leader(CEPHRG_HA_RES): + log('Cluster configured, notifying other services and' + 'updating keystone endpoint configuration') + # Tell all related services to start using + # the VIP instead + for r_id in relation_ids('identity-service'): + identity_joined(relid=r_id) + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index b8e16623..e4b7dfa8 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -14,6 +14,7 @@ from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install +CEPHRG_HA_RES = 'grp_cephrg_vips' TEMPLATES_DIR = 'templates' try: diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index f4ca7a1a..07ae7e70 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -14,6 +14,12 @@ requires: interface: ceph-radosgw identity-service: interface: keystone + ha: + interface: hacluster + scope: container provides: gateway: interface: http +peers: + cluster: + interface: swift-ha From 9fff6d2c9cb1400e58dd89249117e36584ec4b42 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Sat, 20 Sep 2014 01:05:07 +0800 Subject: [PATCH 0430/2699] Use get_ipv6_addr()[0] directly --- .../hooks/charmhelpers/contrib/network/ip.py | 110 +++++++++++++++--- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 55 ++++++--- ceph-proxy/hooks/charmhelpers/core/host.py | 8 +- .../hooks/charmhelpers/core/services/base.py | 26 +++-- .../hooks/charmhelpers/fetch/__init__.py | 63 ++++++++-- .../hooks/charmhelpers/fetch/archiveurl.py | 40 +++++++ ceph-proxy/hooks/hooks.py | 8 +- ceph-proxy/hooks/utils.py | 2 +- 8 files changed, 252 insertions(+), 60 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 7edbcc48..b859a097 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -1,10 +1,11 @@ +import glob import sys from functools import partial from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, config, + ERROR, log, ) try: @@ -156,19 +157,102 @@ def _get_for_address(address, key): get_netmask_for_address = partial(_get_for_address, key='netmask') -def get_ipv6_addr(iface="eth0"): +def format_ipv6_addr(address): + """ + IPv6 needs to be wrapped with [] in url link to parse correctly. + """ + if is_ipv6(address): + address = "[%s]" % address + else: + log("Not an valid ipv6 address: %s" % address, + level=ERROR) + address = None + return address + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IP address for a given interface, if any, or []. + """ + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + if not exc_list: + exc_list = [] try: - iface_addrs = netifaces.ifaddresses(iface) - if netifaces.AF_INET6 not in iface_addrs: - raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception('Unknown inet type ' + str(inet_type)) + + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("%s not found " % (iface)) + else: + return [] + else: + ifaces = [iface] + + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) + return addresses - addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] - ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') - and config('vip') != a['addr']] - if not ipv6_addr: - raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') - return ipv6_addr[0] - except ValueError: - raise ValueError("Invalid interface '%s'" % iface) +def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IPv6 address for a given interface, if any, or []. + """ + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + remotly_addressable = [] + for address in addresses: + if not address.startswith('fe80'): + remotly_addressable.append(address) + if fatal and not remotly_addressable: + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + return remotly_addressable + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of bridges on the system or [] + """ + b_rgex = vnic_dir + '/*/bridge' + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of nics comprising a given bridge on the system or [] + """ + brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_rgex)] + + +def is_bridge_member(nic): + """ + Check if a given nic is a member of a bridge + """ + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + return False diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index c9530433..324987e6 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -156,12 +156,15 @@ def hook_name(): class Config(dict): - """A Juju charm config dictionary that can write itself to - disk (as json) and track which values have changed since - the previous hook invocation. + """A dictionary representation of the charm's config.yaml, with some + extra features: - Do not instantiate this object directly - instead call - ``hookenv.config()`` + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. Example usage:: @@ -170,8 +173,8 @@ class Config(dict): >>> config = hookenv.config() >>> config['foo'] 'bar' + >>> # store a new key/value for later use >>> config['mykey'] = 'myval' - >>> config.save() >>> # user runs `juju set mycharm foo=baz` @@ -188,22 +191,34 @@ class Config(dict): >>> # keys/values that we add are preserved across hooks >>> config['mykey'] 'myval' - >>> # don't forget to save at the end of hook! - >>> config.save() """ CONFIG_FILE_NAME = '.juju-persistent-config' def __init__(self, *args, **kw): super(Config, self).__init__(*args, **kw) + self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() + def __getitem__(self, key): + """For regular dict lookups, check the current juju config first, + then the previous (saved) copy. This ensures that user-saved values + will be returned by a dict lookup. + + """ + try: + return dict.__getitem__(self, key) + except KeyError: + return (self._prev_dict or {})[key] + def load_previous(self, path=None): - """Load previous copy of config from disk so that current values - can be compared to previous values. + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. :param path: @@ -218,8 +233,8 @@ def load_previous(self, path=None): self._prev_dict = json.load(f) def changed(self, key): - """Return true if the value for this key has changed since - the last save. + """Return True if the current value for this key is different from + the previous value. """ if self._prev_dict is None: @@ -228,7 +243,7 @@ def changed(self, key): def previous(self, key): """Return previous value for this key, or None if there - is no "previous" value. + is no previous value. """ if self._prev_dict: @@ -238,7 +253,13 @@ def previous(self, key): def save(self): """Save this config to disk. - Preserves items in _prev_dict that do not exist in self. + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. """ if self._prev_dict: @@ -285,8 +306,9 @@ def relation_get(attribute=None, unit=None, rid=None): raise -def relation_set(relation_id=None, relation_settings={}, **kwargs): +def relation_set(relation_id=None, relation_settings=None, **kwargs): """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) @@ -477,6 +499,9 @@ def execute(self, args): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index ca7780df..b85b0280 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -332,13 +332,9 @@ def cmp_pkgrevno(package, revno, pkgcache=None): ''' import apt_pkg + from charmhelpers.fetch import apt_cache if not pkgcache: - apt_pkg.init() - # Force Apt to build its cache in memory. That way we avoid race - # conditions with other applications building the cache in the same - # place. - apt_pkg.config.set("Dir::Cache::pkgcache", "") - pkgcache = apt_pkg.Cache() + pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-proxy/hooks/charmhelpers/core/services/base.py b/ceph-proxy/hooks/charmhelpers/core/services/base.py index f08e6d78..87ecb130 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/base.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/base.py @@ -17,20 +17,13 @@ def __init__(self, services=None): """ Register a list of services, given their definitions. - Traditional charm authoring is focused on implementing hooks. That is, - the charm author is thinking in terms of "What hook am I handling; what - does this hook need to do?" However, in most cases, the real question - should be "Do I have the information I need to configure and start this - piece of software and, if so, what are the steps for doing so?" The - ServiceManager framework tries to bring the focus to the data and the - setup tasks, in the most declarative way possible. - Service definitions are dicts in the following formats (all keys except 'service' are optional):: { "service": , "required_data": , + "provided_data": , "data_ready": , "data_lost": , "start": , @@ -44,6 +37,10 @@ def __init__(self, services=None): of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more information. + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + The 'data_ready' value should be either a single callback, or a list of callbacks, to be called when all items in 'required_data' pass `is_ready()`. Each callback will be called with the service name as the only parameter. @@ -121,14 +118,25 @@ def manage(self): else: self.provide_data() self.reconfigure_services() + cfg = hookenv.config() + if cfg.implicit_save: + cfg.save() def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + """ hook_name = hookenv.hook_name() for service in self.services.values(): for provider in service.get('provided_data', []): if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): data = provider.provide_data() - if provider._is_ready(data): + _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data + if _ready: hookenv.relation_set(None, data) def reconfigure_services(self, *service_names): diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 5be512ce..8e9d3804 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,5 @@ import importlib +from tempfile import NamedTemporaryFile import time from yaml import safe_load from charmhelpers.core.host import ( @@ -116,14 +117,7 @@ def base_url(self, url): def filter_installed_packages(packages): """Returns a list of packages that require installation""" - import apt_pkg - apt_pkg.init() - - # Tell apt to build an in-memory cache to prevent race conditions (if - # another process is already building the cache). - apt_pkg.config.set("Dir::Cache::pkgcache", "") - - cache = apt_pkg.Cache() + cache = apt_cache() _pkgs = [] for package in packages: try: @@ -136,6 +130,16 @@ def filter_installed_packages(packages): return _pkgs +def apt_cache(in_memory=True): + """Build and return an apt cache""" + import apt_pkg + apt_pkg.init() + if in_memory: + apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") + return apt_pkg.Cache() + + def apt_install(packages, options=None, fatal=False): """Install one or more packages""" if options is None: @@ -201,6 +205,27 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples: + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + """ if source is None: log('Source is not present. Skipping') return @@ -225,10 +250,23 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + else: + raise SourceConfigError("Unknown source: {!r}".format(source)) + if key: - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile() as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) + else: + # Note that hkp: is in no way a secure protocol. Using a + # GPG key id is pointless from a security POV unless you + # absolutely trust your network and DNS. + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv', + key]) def configure_sources(update=False, @@ -238,7 +276,8 @@ def configure_sources(update=False, Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. + The frament needs to be included as a string. Sources and their + corresponding keys are of the types supported by add_source(). Example config: install_sources: | diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py index 87e7071a..1b11fa03 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -1,6 +1,8 @@ import os import urllib2 +from urllib import urlretrieve import urlparse +import hashlib from charmhelpers.fetch import ( BaseFetchHandler, @@ -12,7 +14,17 @@ ) from charmhelpers.core.host import mkdir +""" +This class is a plugin for charmhelpers.fetch.install_remote. +It grabs, validates and installs remote archives fetched over "http", "https", "ftp" or "file" protocols. The contents of the archive are installed in $CHARM_DIR/fetched/. + +Example usage: +install_remote("https://example.com/some/archive.tar.gz") +# Installs the contents of archive.tar.gz in $CHARM_DIR/fetched/. + +See charmhelpers.fetch.archiveurl.get_archivehandler for supported archive types. +""" class ArchiveUrlFetchHandler(BaseFetchHandler): """Handler for archives via generic URLs""" def can_handle(self, source): @@ -61,3 +73,31 @@ def install(self, source): except OSError as e: raise UnhandledSource(e.strerror) return extract(dld_file) + + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + if validate == 'sha1' and len(hashsum) != 40: + raise ValueError("HashSum must be = 40 characters when using sha1" + " validation") + if validate == 'md5' and len(hashsum) != 32: + raise ValueError("HashSum must be = 32 characters when using md5" + " validation") + tempfile, headers = urlretrieve(url) + self.validate_file(tempfile, hashsum, validate) + return tempfile + + # Predicate method that returns status of hash matching expected hash. + def validate_file(self, source, hashsum, vmethod='sha1'): + if vmethod != 'sha1' and vmethod != 'md5': + raise ValueError("Validation Method not supported") + + if vmethod == 'md5': + m = hashlib.md5() + if vmethod == 'sha1': + m = hashlib.sha1() + with open(source) as f: + for line in f: + m.update(line) + if hashsum != m.hexdigest(): + msg = "Hash Mismatch on {} expected {} got {}" + raise ValueError(msg.format(source, hashsum, m.hexdigest())) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 865cf4bd..209fa651 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -76,7 +76,7 @@ def install(): def emit_cephconf(): if config('prefer-ipv6'): - host_ip = '%s' % get_ipv6_addr() + host_ip = '%s' % get_ipv6_addr()[0] cephcontext = { 'auth_supported': config('auth-supported'), @@ -197,7 +197,7 @@ def mon_relation(): emit_cephconf() if config('prefer-ipv6'): - host = get_ipv6_addr() + host = get_ipv6_addr()[0] else: host = unit_get('private-address') @@ -283,7 +283,7 @@ def radosgw_relation(relid=None): log('mon cluster not in quorum - deferring key provision') if config('prefer-ipv6'): - host = get_ipv6_addr() + host = get_ipv6_addr()[0] else: host = unit_get('private-address') @@ -297,7 +297,7 @@ def radosgw_relation(relid=None): @hooks.hook('client-relation-joined') def client_relation(relid=None): if config('prefer-ipv6'): - host = get_ipv6_addr() + host = get_ipv6_addr()[0] else: host = unit_get('private-address') diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index a9373807..f5e7e85d 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -92,7 +92,7 @@ def get_public_addr(): if addr and is_ipv6(addr): return addr else: - return get_ipv6_addr() + return get_ipv6_addr()[0] else: return ip.get_address_in_network(addr, fallback=get_host_ip()) From b05003341cc549c4c822e55a2fa380fd89af9f28 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Sat, 20 Sep 2014 01:05:07 +0800 Subject: [PATCH 0431/2699] Use get_ipv6_addr()[0] directly --- .../hooks/charmhelpers/contrib/network/ip.py | 110 +++++++++++++++--- ceph-mon/hooks/charmhelpers/core/hookenv.py | 55 ++++++--- ceph-mon/hooks/charmhelpers/core/host.py | 8 +- .../hooks/charmhelpers/core/services/base.py | 26 +++-- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 63 ++++++++-- .../hooks/charmhelpers/fetch/archiveurl.py | 40 +++++++ ceph-mon/hooks/hooks.py | 8 +- ceph-mon/hooks/utils.py | 2 +- 8 files changed, 252 insertions(+), 60 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 7edbcc48..b859a097 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -1,10 +1,11 @@ +import glob import sys from functools import partial from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, config, + ERROR, log, ) try: @@ -156,19 +157,102 @@ def _get_for_address(address, key): get_netmask_for_address = partial(_get_for_address, key='netmask') -def get_ipv6_addr(iface="eth0"): +def format_ipv6_addr(address): + """ + IPv6 needs to be wrapped with [] in url link to parse correctly. + """ + if is_ipv6(address): + address = "[%s]" % address + else: + log("Not an valid ipv6 address: %s" % address, + level=ERROR) + address = None + return address + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IP address for a given interface, if any, or []. + """ + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + if not exc_list: + exc_list = [] try: - iface_addrs = netifaces.ifaddresses(iface) - if netifaces.AF_INET6 not in iface_addrs: - raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception('Unknown inet type ' + str(inet_type)) + + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("%s not found " % (iface)) + else: + return [] + else: + ifaces = [iface] + + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) + return addresses - addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] - ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') - and config('vip') != a['addr']] - if not ipv6_addr: - raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') - return ipv6_addr[0] - except ValueError: - raise ValueError("Invalid interface '%s'" % iface) +def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IPv6 address for a given interface, if any, or []. + """ + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + remotly_addressable = [] + for address in addresses: + if not address.startswith('fe80'): + remotly_addressable.append(address) + if fatal and not remotly_addressable: + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + return remotly_addressable + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of bridges on the system or [] + """ + b_rgex = vnic_dir + '/*/bridge' + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of nics comprising a given bridge on the system or [] + """ + brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_rgex)] + + +def is_bridge_member(nic): + """ + Check if a given nic is a member of a bridge + """ + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + return False diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index c9530433..324987e6 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -156,12 +156,15 @@ def hook_name(): class Config(dict): - """A Juju charm config dictionary that can write itself to - disk (as json) and track which values have changed since - the previous hook invocation. + """A dictionary representation of the charm's config.yaml, with some + extra features: - Do not instantiate this object directly - instead call - ``hookenv.config()`` + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. Example usage:: @@ -170,8 +173,8 @@ class Config(dict): >>> config = hookenv.config() >>> config['foo'] 'bar' + >>> # store a new key/value for later use >>> config['mykey'] = 'myval' - >>> config.save() >>> # user runs `juju set mycharm foo=baz` @@ -188,22 +191,34 @@ class Config(dict): >>> # keys/values that we add are preserved across hooks >>> config['mykey'] 'myval' - >>> # don't forget to save at the end of hook! - >>> config.save() """ CONFIG_FILE_NAME = '.juju-persistent-config' def __init__(self, *args, **kw): super(Config, self).__init__(*args, **kw) + self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() + def __getitem__(self, key): + """For regular dict lookups, check the current juju config first, + then the previous (saved) copy. This ensures that user-saved values + will be returned by a dict lookup. + + """ + try: + return dict.__getitem__(self, key) + except KeyError: + return (self._prev_dict or {})[key] + def load_previous(self, path=None): - """Load previous copy of config from disk so that current values - can be compared to previous values. + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. :param path: @@ -218,8 +233,8 @@ def load_previous(self, path=None): self._prev_dict = json.load(f) def changed(self, key): - """Return true if the value for this key has changed since - the last save. + """Return True if the current value for this key is different from + the previous value. """ if self._prev_dict is None: @@ -228,7 +243,7 @@ def changed(self, key): def previous(self, key): """Return previous value for this key, or None if there - is no "previous" value. + is no previous value. """ if self._prev_dict: @@ -238,7 +253,13 @@ def previous(self, key): def save(self): """Save this config to disk. - Preserves items in _prev_dict that do not exist in self. + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. """ if self._prev_dict: @@ -285,8 +306,9 @@ def relation_get(attribute=None, unit=None, rid=None): raise -def relation_set(relation_id=None, relation_settings={}, **kwargs): +def relation_set(relation_id=None, relation_settings=None, **kwargs): """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) @@ -477,6 +499,9 @@ def execute(self, args): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index ca7780df..b85b0280 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -332,13 +332,9 @@ def cmp_pkgrevno(package, revno, pkgcache=None): ''' import apt_pkg + from charmhelpers.fetch import apt_cache if not pkgcache: - apt_pkg.init() - # Force Apt to build its cache in memory. That way we avoid race - # conditions with other applications building the cache in the same - # place. - apt_pkg.config.set("Dir::Cache::pkgcache", "") - pkgcache = apt_pkg.Cache() + pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-mon/hooks/charmhelpers/core/services/base.py b/ceph-mon/hooks/charmhelpers/core/services/base.py index f08e6d78..87ecb130 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/base.py +++ b/ceph-mon/hooks/charmhelpers/core/services/base.py @@ -17,20 +17,13 @@ def __init__(self, services=None): """ Register a list of services, given their definitions. - Traditional charm authoring is focused on implementing hooks. That is, - the charm author is thinking in terms of "What hook am I handling; what - does this hook need to do?" However, in most cases, the real question - should be "Do I have the information I need to configure and start this - piece of software and, if so, what are the steps for doing so?" The - ServiceManager framework tries to bring the focus to the data and the - setup tasks, in the most declarative way possible. - Service definitions are dicts in the following formats (all keys except 'service' are optional):: { "service": , "required_data": , + "provided_data": , "data_ready": , "data_lost": , "start": , @@ -44,6 +37,10 @@ def __init__(self, services=None): of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more information. + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + The 'data_ready' value should be either a single callback, or a list of callbacks, to be called when all items in 'required_data' pass `is_ready()`. Each callback will be called with the service name as the only parameter. @@ -121,14 +118,25 @@ def manage(self): else: self.provide_data() self.reconfigure_services() + cfg = hookenv.config() + if cfg.implicit_save: + cfg.save() def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + """ hook_name = hookenv.hook_name() for service in self.services.values(): for provider in service.get('provided_data', []): if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): data = provider.provide_data() - if provider._is_ready(data): + _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data + if _ready: hookenv.relation_set(None, data) def reconfigure_services(self, *service_names): diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 5be512ce..8e9d3804 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,5 @@ import importlib +from tempfile import NamedTemporaryFile import time from yaml import safe_load from charmhelpers.core.host import ( @@ -116,14 +117,7 @@ def base_url(self, url): def filter_installed_packages(packages): """Returns a list of packages that require installation""" - import apt_pkg - apt_pkg.init() - - # Tell apt to build an in-memory cache to prevent race conditions (if - # another process is already building the cache). - apt_pkg.config.set("Dir::Cache::pkgcache", "") - - cache = apt_pkg.Cache() + cache = apt_cache() _pkgs = [] for package in packages: try: @@ -136,6 +130,16 @@ def filter_installed_packages(packages): return _pkgs +def apt_cache(in_memory=True): + """Build and return an apt cache""" + import apt_pkg + apt_pkg.init() + if in_memory: + apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") + return apt_pkg.Cache() + + def apt_install(packages, options=None, fatal=False): """Install one or more packages""" if options is None: @@ -201,6 +205,27 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples: + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + """ if source is None: log('Source is not present. Skipping') return @@ -225,10 +250,23 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + else: + raise SourceConfigError("Unknown source: {!r}".format(source)) + if key: - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile() as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) + else: + # Note that hkp: is in no way a secure protocol. Using a + # GPG key id is pointless from a security POV unless you + # absolutely trust your network and DNS. + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv', + key]) def configure_sources(update=False, @@ -238,7 +276,8 @@ def configure_sources(update=False, Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. + The frament needs to be included as a string. Sources and their + corresponding keys are of the types supported by add_source(). Example config: install_sources: | diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index 87e7071a..1b11fa03 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -1,6 +1,8 @@ import os import urllib2 +from urllib import urlretrieve import urlparse +import hashlib from charmhelpers.fetch import ( BaseFetchHandler, @@ -12,7 +14,17 @@ ) from charmhelpers.core.host import mkdir +""" +This class is a plugin for charmhelpers.fetch.install_remote. +It grabs, validates and installs remote archives fetched over "http", "https", "ftp" or "file" protocols. The contents of the archive are installed in $CHARM_DIR/fetched/. + +Example usage: +install_remote("https://example.com/some/archive.tar.gz") +# Installs the contents of archive.tar.gz in $CHARM_DIR/fetched/. + +See charmhelpers.fetch.archiveurl.get_archivehandler for supported archive types. +""" class ArchiveUrlFetchHandler(BaseFetchHandler): """Handler for archives via generic URLs""" def can_handle(self, source): @@ -61,3 +73,31 @@ def install(self, source): except OSError as e: raise UnhandledSource(e.strerror) return extract(dld_file) + + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + if validate == 'sha1' and len(hashsum) != 40: + raise ValueError("HashSum must be = 40 characters when using sha1" + " validation") + if validate == 'md5' and len(hashsum) != 32: + raise ValueError("HashSum must be = 32 characters when using md5" + " validation") + tempfile, headers = urlretrieve(url) + self.validate_file(tempfile, hashsum, validate) + return tempfile + + # Predicate method that returns status of hash matching expected hash. + def validate_file(self, source, hashsum, vmethod='sha1'): + if vmethod != 'sha1' and vmethod != 'md5': + raise ValueError("Validation Method not supported") + + if vmethod == 'md5': + m = hashlib.md5() + if vmethod == 'sha1': + m = hashlib.sha1() + with open(source) as f: + for line in f: + m.update(line) + if hashsum != m.hexdigest(): + msg = "Hash Mismatch on {} expected {} got {}" + raise ValueError(msg.format(source, hashsum, m.hexdigest())) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 865cf4bd..209fa651 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -76,7 +76,7 @@ def install(): def emit_cephconf(): if config('prefer-ipv6'): - host_ip = '%s' % get_ipv6_addr() + host_ip = '%s' % get_ipv6_addr()[0] cephcontext = { 'auth_supported': config('auth-supported'), @@ -197,7 +197,7 @@ def mon_relation(): emit_cephconf() if config('prefer-ipv6'): - host = get_ipv6_addr() + host = get_ipv6_addr()[0] else: host = unit_get('private-address') @@ -283,7 +283,7 @@ def radosgw_relation(relid=None): log('mon cluster not in quorum - deferring key provision') if config('prefer-ipv6'): - host = get_ipv6_addr() + host = get_ipv6_addr()[0] else: host = unit_get('private-address') @@ -297,7 +297,7 @@ def radosgw_relation(relid=None): @hooks.hook('client-relation-joined') def client_relation(relid=None): if config('prefer-ipv6'): - host = get_ipv6_addr() + host = get_ipv6_addr()[0] else: host = unit_get('private-address') diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index a9373807..f5e7e85d 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -92,7 +92,7 @@ def get_public_addr(): if addr and is_ipv6(addr): return addr else: - return get_ipv6_addr() + return get_ipv6_addr()[0] else: return ip.get_address_in_network(addr, fallback=get_host_ip()) From 4d9479a73fdb23c36ac898839e6261126e78fb9d Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sun, 21 Sep 2014 20:32:02 +0100 Subject: [PATCH 0432/2699] synced ~xianghui/charm-helpers/format-ipv6 --- .../hooks/charmhelpers/contrib/network/ip.py | 109 +++++++++++++++--- 1 file changed, 90 insertions(+), 19 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index f8cc1975..b859a097 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -1,10 +1,11 @@ +import glob import sys from functools import partial from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, config, + ERROR, log, ) try: @@ -156,24 +157,6 @@ def _get_for_address(address, key): get_netmask_for_address = partial(_get_for_address, key='netmask') -def get_ipv6_addr(iface="eth0"): - try: - iface_addrs = netifaces.ifaddresses(iface) - if netifaces.AF_INET6 not in iface_addrs: - raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) - - addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] - ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') - and config('vip') != a['addr']] - if not ipv6_addr: - raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) - - return ipv6_addr[0] - - except ValueError: - raise ValueError("Invalid interface '%s'" % iface) - - def format_ipv6_addr(address): """ IPv6 needs to be wrapped with [] in url link to parse correctly. @@ -185,3 +168,91 @@ def format_ipv6_addr(address): level=ERROR) address = None return address + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IP address for a given interface, if any, or []. + """ + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + if not exc_list: + exc_list = [] + try: + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception('Unknown inet type ' + str(inet_type)) + + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("%s not found " % (iface)) + else: + return [] + else: + ifaces = [iface] + + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) + return addresses + +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') + + +def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IPv6 address for a given interface, if any, or []. + """ + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + remotly_addressable = [] + for address in addresses: + if not address.startswith('fe80'): + remotly_addressable.append(address) + if fatal and not remotly_addressable: + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + return remotly_addressable + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of bridges on the system or [] + """ + b_rgex = vnic_dir + '/*/bridge' + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of nics comprising a given bridge on the system or [] + """ + brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_rgex)] + + +def is_bridge_member(nic): + """ + Check if a given nic is a member of a bridge + """ + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + return False From 862f91d12589d52c49ef1309011ea657ad0c6b91 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sun, 21 Sep 2014 20:33:49 +0100 Subject: [PATCH 0433/2699] adjuested to new get_ipv6_addr --- ceph-osd/hooks/hooks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index ff029aad..12b076f4 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -89,7 +89,7 @@ def emit_cephconf(): } if config('prefer-ipv6'): - host_ip = get_ipv6_addr() + host_ip = get_ipv6_addr()[0] if host_ip: cephcontext['host_ip'] = host_ip else: @@ -194,7 +194,7 @@ def get_devices(): 'mon-relation-departed') def mon_relation(): if config('prefer-ipv6'): - host = get_ipv6_addr() + host = get_ipv6_addr()[0] else: host = unit_get('private-address') From ef6bf7cee20f8612fb63304706a7d60612f8228f Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Mon, 22 Sep 2014 09:56:47 +0800 Subject: [PATCH 0434/2699] Set host_ip for ipv4. --- ceph-proxy/hooks/hooks.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 209fa651..f4c3d6b3 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -77,6 +77,8 @@ def install(): def emit_cephconf(): if config('prefer-ipv6'): host_ip = '%s' % get_ipv6_addr()[0] + else: + host_ip = unit_get('private-address') cephcontext = { 'auth_supported': config('auth-supported'), From ce1c94ff42e5eeeb8aef1139bcd4a9e552c883c4 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Mon, 22 Sep 2014 09:56:47 +0800 Subject: [PATCH 0435/2699] Set host_ip for ipv4. --- ceph-mon/hooks/hooks.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 209fa651..f4c3d6b3 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -77,6 +77,8 @@ def install(): def emit_cephconf(): if config('prefer-ipv6'): host_ip = '%s' % get_ipv6_addr()[0] + else: + host_ip = unit_get('private-address') cephcontext = { 'auth_supported': config('auth-supported'), From ed908d002ee0818182874f5458bb7b7f747ea9d3 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 22 Sep 2014 21:02:27 +0100 Subject: [PATCH 0436/2699] synced lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 --- ceph-proxy/bin/charm_helpers_sync.py | 225 ++++++++++++++++++ ceph-proxy/charm-helpers-hooks.yaml | 2 +- ceph-proxy/charm-helpers-tests.yaml | 2 +- .../hooks/charmhelpers/contrib/network/ip.py | 8 +- ceph-proxy/hooks/charmhelpers/core/host.py | 31 ++- .../hooks/charmhelpers/core/services/base.py | 3 + .../charmhelpers/core/services/helpers.py | 124 +++++++++- .../hooks/charmhelpers/fetch/__init__.py | 21 +- .../hooks/charmhelpers/fetch/archiveurl.py | 86 +++---- 9 files changed, 444 insertions(+), 58 deletions(-) create mode 100644 ceph-proxy/bin/charm_helpers_sync.py diff --git a/ceph-proxy/bin/charm_helpers_sync.py b/ceph-proxy/bin/charm_helpers_sync.py new file mode 100644 index 00000000..03bf64dc --- /dev/null +++ b/ceph-proxy/bin/charm_helpers_sync.py @@ -0,0 +1,225 @@ +#!/usr/bin/python +# +# Copyright 2013 Canonical Ltd. + +# Authors: +# Adam Gandelman +# + +import logging +import optparse +import os +import subprocess +import shutil +import sys +import tempfile +import yaml + +from fnmatch import fnmatch + +CHARM_HELPERS_BRANCH = 'lp:charm-helpers' + + +def parse_config(conf_file): + if not os.path.isfile(conf_file): + logging.error('Invalid config file: %s.' % conf_file) + return False + return yaml.load(open(conf_file).read()) + + +def clone_helpers(work_dir, branch): + dest = os.path.join(work_dir, 'charm-helpers') + logging.info('Checking out %s to %s.' % (branch, dest)) + cmd = ['bzr', 'checkout', '--lightweight', branch, dest] + subprocess.check_call(cmd) + return dest + + +def _module_path(module): + return os.path.join(*module.split('.')) + + +def _src_path(src, module): + return os.path.join(src, 'charmhelpers', _module_path(module)) + + +def _dest_path(dest, module): + return os.path.join(dest, _module_path(module)) + + +def _is_pyfile(path): + return os.path.isfile(path + '.py') + + +def ensure_init(path): + ''' + ensure directories leading up to path are importable, omitting + parent directory, eg path='/hooks/helpers/foo'/: + hooks/ + hooks/helpers/__init__.py + hooks/helpers/foo/__init__.py + ''' + for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])): + _i = os.path.join(d, '__init__.py') + if not os.path.exists(_i): + logging.info('Adding missing __init__.py: %s' % _i) + open(_i, 'wb').close() + + +def sync_pyfile(src, dest): + src = src + '.py' + src_dir = os.path.dirname(src) + logging.info('Syncing pyfile: %s -> %s.' % (src, dest)) + if not os.path.exists(dest): + os.makedirs(dest) + shutil.copy(src, dest) + if os.path.isfile(os.path.join(src_dir, '__init__.py')): + shutil.copy(os.path.join(src_dir, '__init__.py'), + dest) + ensure_init(dest) + + +def get_filter(opts=None): + opts = opts or [] + if 'inc=*' in opts: + # do not filter any files, include everything + return None + + def _filter(dir, ls): + incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt] + _filter = [] + for f in ls: + _f = os.path.join(dir, f) + + if not os.path.isdir(_f) and not _f.endswith('.py') and incs: + if True not in [fnmatch(_f, inc) for inc in incs]: + logging.debug('Not syncing %s, does not match include ' + 'filters (%s)' % (_f, incs)) + _filter.append(f) + else: + logging.debug('Including file, which matches include ' + 'filters (%s): %s' % (incs, _f)) + elif (os.path.isfile(_f) and not _f.endswith('.py')): + logging.debug('Not syncing file: %s' % f) + _filter.append(f) + elif (os.path.isdir(_f) and not + os.path.isfile(os.path.join(_f, '__init__.py'))): + logging.debug('Not syncing directory: %s' % f) + _filter.append(f) + return _filter + return _filter + + +def sync_directory(src, dest, opts=None): + if os.path.exists(dest): + logging.debug('Removing existing directory: %s' % dest) + shutil.rmtree(dest) + logging.info('Syncing directory: %s -> %s.' % (src, dest)) + + shutil.copytree(src, dest, ignore=get_filter(opts)) + ensure_init(dest) + + +def sync(src, dest, module, opts=None): + if os.path.isdir(_src_path(src, module)): + sync_directory(_src_path(src, module), _dest_path(dest, module), opts) + elif _is_pyfile(_src_path(src, module)): + sync_pyfile(_src_path(src, module), + os.path.dirname(_dest_path(dest, module))) + else: + logging.warn('Could not sync: %s. Neither a pyfile or directory, ' + 'does it even exist?' % module) + + +def parse_sync_options(options): + if not options: + return [] + return options.split(',') + + +def extract_options(inc, global_options=None): + global_options = global_options or [] + if global_options and isinstance(global_options, basestring): + global_options = [global_options] + if '|' not in inc: + return (inc, global_options) + inc, opts = inc.split('|') + return (inc, parse_sync_options(opts) + global_options) + + +def sync_helpers(include, src, dest, options=None): + if not os.path.isdir(dest): + os.makedirs(dest) + + global_options = parse_sync_options(options) + + for inc in include: + if isinstance(inc, str): + inc, opts = extract_options(inc, global_options) + sync(src, dest, inc, opts) + elif isinstance(inc, dict): + # could also do nested dicts here. + for k, v in inc.iteritems(): + if isinstance(v, list): + for m in v: + inc, opts = extract_options(m, global_options) + sync(src, dest, '%s.%s' % (k, inc), opts) + +if __name__ == '__main__': + parser = optparse.OptionParser() + parser.add_option('-c', '--config', action='store', dest='config', + default=None, help='helper config file') + parser.add_option('-D', '--debug', action='store_true', dest='debug', + default=False, help='debug') + parser.add_option('-b', '--branch', action='store', dest='branch', + help='charm-helpers bzr branch (overrides config)') + parser.add_option('-d', '--destination', action='store', dest='dest_dir', + help='sync destination dir (overrides config)') + (opts, args) = parser.parse_args() + + if opts.debug: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.INFO) + + if opts.config: + logging.info('Loading charm helper config from %s.' % opts.config) + config = parse_config(opts.config) + if not config: + logging.error('Could not parse config from %s.' % opts.config) + sys.exit(1) + else: + config = {} + + if 'branch' not in config: + config['branch'] = CHARM_HELPERS_BRANCH + if opts.branch: + config['branch'] = opts.branch + if opts.dest_dir: + config['destination'] = opts.dest_dir + + if 'destination' not in config: + logging.error('No destination dir. specified as option or config.') + sys.exit(1) + + if 'include' not in config: + if not args: + logging.error('No modules to sync specified as option or config.') + sys.exit(1) + config['include'] = [] + [config['include'].append(a) for a in args] + + sync_options = None + if 'options' in config: + sync_options = config['options'] + tmpd = tempfile.mkdtemp() + try: + checkout = clone_helpers(tmpd, config['branch']) + sync_helpers(config['include'], checkout, config['destination'], + options=sync_options) + except Exception, e: + logging.error("Could not sync: %s" % e) + raise e + finally: + logging.debug('Cleaning up %s' % tmpd) + shutil.rmtree(tmpd) diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index afb9e42b..afbb1bd2 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 destination: hooks/charmhelpers include: - core diff --git a/ceph-proxy/charm-helpers-tests.yaml b/ceph-proxy/charm-helpers-tests.yaml index 48b12f6f..25fc96d2 100644 --- a/ceph-proxy/charm-helpers-tests.yaml +++ b/ceph-proxy/charm-helpers-tests.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 destination: tests/charmhelpers include: - contrib.amulet diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index b859a097..37ecbbed 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -5,7 +5,9 @@ from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, + WARNING, + ERROR, + log ) try: @@ -164,9 +166,9 @@ def format_ipv6_addr(address): if is_ipv6(address): address = "[%s]" % address else: - log("Not an valid ipv6 address: %s" % address, - level=ERROR) + log("Not a valid ipv6 address: %s" % address, level=WARNING) address = None + return address diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index b85b0280..3ac70143 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -209,10 +209,15 @@ def mounts(): return system_mounts -def file_hash(path): - """Generate a md5 hash of the contents of 'path' or None if not found """ +def file_hash(path, hash_type='md5'): + """ + Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ if os.path.exists(path): - h = hashlib.md5() + h = getattr(hashlib, hash_type)() with open(path, 'r') as source: h.update(source.read()) # IGNORE:E1101 - it does have update return h.hexdigest() @@ -220,6 +225,26 @@ def file_hash(path): return None +def check_hash(path, checksum, hash_type='md5'): + """ + Validate a file using a cryptographic checksum. + + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate :param:`checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + pass + + def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing diff --git a/ceph-proxy/hooks/charmhelpers/core/services/base.py b/ceph-proxy/hooks/charmhelpers/core/services/base.py index 6b5a1b9f..87ecb130 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/base.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/base.py @@ -118,6 +118,9 @@ def manage(self): else: self.provide_data() self.reconfigure_services() + cfg = hookenv.config() + if cfg.implicit_save: + cfg.save() def provide_data(self): """ diff --git a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py index 4b90589b..7067b94b 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py @@ -1,3 +1,5 @@ +import os +import yaml from charmhelpers.core import hookenv from charmhelpers.core import templating @@ -19,15 +21,21 @@ class RelationContext(dict): the `name` attribute that are complete will used to populate the dictionary values (see `get_data`, below). - The generated context will be namespaced under the interface type, to prevent - potential naming conflicts. + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` """ name = None interface = None required_keys = [] - def __init__(self, *args, **kwargs): - super(RelationContext, self).__init__(*args, **kwargs) + def __init__(self, name=None, additional_required_keys=None): + if name is not None: + self.name = name + if additional_required_keys is not None: + self.required_keys.extend(additional_required_keys) self.get_data() def __bool__(self): @@ -101,9 +109,115 @@ def provide_data(self): return {} +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + required_keys = ['host', 'user', 'password', 'database'] + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + required_keys = ['host', 'port'] + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + class TemplateCallback(ManagerCallback): """ - Callback class that will render a template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready action. + + :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file """ def __init__(self, source, target, owner='root', group='root', perms=0444): self.source = source diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 8e9d3804..20a20ac6 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -311,22 +311,35 @@ def configure_sources(update=False, apt_update(fatal=True) -def install_remote(source): +def install_remote(source, *args, **kwargs): """ Install a file tree from a remote source The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] - Schemes supported are based on this modules submodules - Options supported are submodule-specific""" + Schemes supported are based on this modules submodules. + Options supported are submodule-specific. + Additional arguments are passed through to the submodule. + + For example:: + + dest = install_remote('http://example.com/archive.tgz', + checksum='deadbeef', + hash_type='sha1') + + This will download `archive.tgz`, validate it using SHA1 and, if + the file is ok, extract it and return the directory in which it + was extracted. If the checksum fails, it will raise + :class:`charmhelpers.core.host.ChecksumError`. + """ # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] installed_to = None for handler in handlers: try: - installed_to = handler.install(source) + installed_to = handler.install(source, *args, **kwargs) except UnhandledSource: pass if not installed_to: diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py index 1b11fa03..d1dcbc33 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -12,21 +12,19 @@ get_archive_handler, extract, ) -from charmhelpers.core.host import mkdir +from charmhelpers.core.host import mkdir, check_hash -""" -This class is a plugin for charmhelpers.fetch.install_remote. -It grabs, validates and installs remote archives fetched over "http", "https", "ftp" or "file" protocols. The contents of the archive are installed in $CHARM_DIR/fetched/. +class ArchiveUrlFetchHandler(BaseFetchHandler): + """ + Handler to download archive files from arbitrary URLs. -Example usage: -install_remote("https://example.com/some/archive.tar.gz") -# Installs the contents of archive.tar.gz in $CHARM_DIR/fetched/. + Can fetch from http, https, ftp, and file URLs. -See charmhelpers.fetch.archiveurl.get_archivehandler for supported archive types. -""" -class ArchiveUrlFetchHandler(BaseFetchHandler): - """Handler for archives via generic URLs""" + Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. + + Installs the contents of the archive in $CHARM_DIR/fetched/. + """ def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): @@ -36,6 +34,12 @@ def can_handle(self, source): return False def download(self, source, dest): + """ + Download an archive file. + + :param str source: URL pointing to an archive file. + :param str dest: Local path location to download archive file to. + """ # propogate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse.urlparse(source) @@ -60,7 +64,29 @@ def download(self, source, dest): os.unlink(dest) raise e - def install(self, source): + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + tempfile, headers = urlretrieve(url) + check_hash(tempfile, hashsum, validate) + return tempfile + + def install(self, source, dest=None, checksum=None, hash_type='sha1'): + """ + Download and install an archive file, with optional checksum validation. + + The checksum can also be given on the :param:`source` URL's fragment. + For example:: + + handler.install('http://example.com/file.tgz#sha1=deadbeef') + + :param str source: URL pointing to an archive file. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. + :param str checksum: If given, validate the archive file after download. + :param str hash_type: Algorithm used to generate :param:`checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): @@ -72,32 +98,10 @@ def install(self, source): raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - return extract(dld_file) - - # Mandatory file validation via Sha1 or MD5 hashing. - def download_and_validate(self, url, hashsum, validate="sha1"): - if validate == 'sha1' and len(hashsum) != 40: - raise ValueError("HashSum must be = 40 characters when using sha1" - " validation") - if validate == 'md5' and len(hashsum) != 32: - raise ValueError("HashSum must be = 32 characters when using md5" - " validation") - tempfile, headers = urlretrieve(url) - self.validate_file(tempfile, hashsum, validate) - return tempfile - - # Predicate method that returns status of hash matching expected hash. - def validate_file(self, source, hashsum, vmethod='sha1'): - if vmethod != 'sha1' and vmethod != 'md5': - raise ValueError("Validation Method not supported") - - if vmethod == 'md5': - m = hashlib.md5() - if vmethod == 'sha1': - m = hashlib.sha1() - with open(source) as f: - for line in f: - m.update(line) - if hashsum != m.hexdigest(): - msg = "Hash Mismatch on {} expected {} got {}" - raise ValueError(msg.format(source, hashsum, m.hexdigest())) + options = urlparse.parse_qs(url_parts.fragment) + for key, value in options.items(): + if key in hashlib.algorithms: + check_hash(dld_file, value, key) + if checksum: + check_hash(dld_file, checksum, hash_type) + return extract(dld_file, dest) From 3ada5d0471caadd32b443c165f24b8a89f99e367 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 22 Sep 2014 21:02:27 +0100 Subject: [PATCH 0437/2699] synced lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 --- ceph-mon/bin/charm_helpers_sync.py | 225 ++++++++++++++++++ ceph-mon/charm-helpers-hooks.yaml | 2 +- ceph-mon/charm-helpers-tests.yaml | 2 +- .../hooks/charmhelpers/contrib/network/ip.py | 8 +- ceph-mon/hooks/charmhelpers/core/host.py | 31 ++- .../hooks/charmhelpers/core/services/base.py | 3 + .../charmhelpers/core/services/helpers.py | 124 +++++++++- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 21 +- .../hooks/charmhelpers/fetch/archiveurl.py | 86 +++---- 9 files changed, 444 insertions(+), 58 deletions(-) create mode 100644 ceph-mon/bin/charm_helpers_sync.py diff --git a/ceph-mon/bin/charm_helpers_sync.py b/ceph-mon/bin/charm_helpers_sync.py new file mode 100644 index 00000000..03bf64dc --- /dev/null +++ b/ceph-mon/bin/charm_helpers_sync.py @@ -0,0 +1,225 @@ +#!/usr/bin/python +# +# Copyright 2013 Canonical Ltd. + +# Authors: +# Adam Gandelman +# + +import logging +import optparse +import os +import subprocess +import shutil +import sys +import tempfile +import yaml + +from fnmatch import fnmatch + +CHARM_HELPERS_BRANCH = 'lp:charm-helpers' + + +def parse_config(conf_file): + if not os.path.isfile(conf_file): + logging.error('Invalid config file: %s.' % conf_file) + return False + return yaml.load(open(conf_file).read()) + + +def clone_helpers(work_dir, branch): + dest = os.path.join(work_dir, 'charm-helpers') + logging.info('Checking out %s to %s.' % (branch, dest)) + cmd = ['bzr', 'checkout', '--lightweight', branch, dest] + subprocess.check_call(cmd) + return dest + + +def _module_path(module): + return os.path.join(*module.split('.')) + + +def _src_path(src, module): + return os.path.join(src, 'charmhelpers', _module_path(module)) + + +def _dest_path(dest, module): + return os.path.join(dest, _module_path(module)) + + +def _is_pyfile(path): + return os.path.isfile(path + '.py') + + +def ensure_init(path): + ''' + ensure directories leading up to path are importable, omitting + parent directory, eg path='/hooks/helpers/foo'/: + hooks/ + hooks/helpers/__init__.py + hooks/helpers/foo/__init__.py + ''' + for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])): + _i = os.path.join(d, '__init__.py') + if not os.path.exists(_i): + logging.info('Adding missing __init__.py: %s' % _i) + open(_i, 'wb').close() + + +def sync_pyfile(src, dest): + src = src + '.py' + src_dir = os.path.dirname(src) + logging.info('Syncing pyfile: %s -> %s.' % (src, dest)) + if not os.path.exists(dest): + os.makedirs(dest) + shutil.copy(src, dest) + if os.path.isfile(os.path.join(src_dir, '__init__.py')): + shutil.copy(os.path.join(src_dir, '__init__.py'), + dest) + ensure_init(dest) + + +def get_filter(opts=None): + opts = opts or [] + if 'inc=*' in opts: + # do not filter any files, include everything + return None + + def _filter(dir, ls): + incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt] + _filter = [] + for f in ls: + _f = os.path.join(dir, f) + + if not os.path.isdir(_f) and not _f.endswith('.py') and incs: + if True not in [fnmatch(_f, inc) for inc in incs]: + logging.debug('Not syncing %s, does not match include ' + 'filters (%s)' % (_f, incs)) + _filter.append(f) + else: + logging.debug('Including file, which matches include ' + 'filters (%s): %s' % (incs, _f)) + elif (os.path.isfile(_f) and not _f.endswith('.py')): + logging.debug('Not syncing file: %s' % f) + _filter.append(f) + elif (os.path.isdir(_f) and not + os.path.isfile(os.path.join(_f, '__init__.py'))): + logging.debug('Not syncing directory: %s' % f) + _filter.append(f) + return _filter + return _filter + + +def sync_directory(src, dest, opts=None): + if os.path.exists(dest): + logging.debug('Removing existing directory: %s' % dest) + shutil.rmtree(dest) + logging.info('Syncing directory: %s -> %s.' % (src, dest)) + + shutil.copytree(src, dest, ignore=get_filter(opts)) + ensure_init(dest) + + +def sync(src, dest, module, opts=None): + if os.path.isdir(_src_path(src, module)): + sync_directory(_src_path(src, module), _dest_path(dest, module), opts) + elif _is_pyfile(_src_path(src, module)): + sync_pyfile(_src_path(src, module), + os.path.dirname(_dest_path(dest, module))) + else: + logging.warn('Could not sync: %s. Neither a pyfile or directory, ' + 'does it even exist?' % module) + + +def parse_sync_options(options): + if not options: + return [] + return options.split(',') + + +def extract_options(inc, global_options=None): + global_options = global_options or [] + if global_options and isinstance(global_options, basestring): + global_options = [global_options] + if '|' not in inc: + return (inc, global_options) + inc, opts = inc.split('|') + return (inc, parse_sync_options(opts) + global_options) + + +def sync_helpers(include, src, dest, options=None): + if not os.path.isdir(dest): + os.makedirs(dest) + + global_options = parse_sync_options(options) + + for inc in include: + if isinstance(inc, str): + inc, opts = extract_options(inc, global_options) + sync(src, dest, inc, opts) + elif isinstance(inc, dict): + # could also do nested dicts here. + for k, v in inc.iteritems(): + if isinstance(v, list): + for m in v: + inc, opts = extract_options(m, global_options) + sync(src, dest, '%s.%s' % (k, inc), opts) + +if __name__ == '__main__': + parser = optparse.OptionParser() + parser.add_option('-c', '--config', action='store', dest='config', + default=None, help='helper config file') + parser.add_option('-D', '--debug', action='store_true', dest='debug', + default=False, help='debug') + parser.add_option('-b', '--branch', action='store', dest='branch', + help='charm-helpers bzr branch (overrides config)') + parser.add_option('-d', '--destination', action='store', dest='dest_dir', + help='sync destination dir (overrides config)') + (opts, args) = parser.parse_args() + + if opts.debug: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.INFO) + + if opts.config: + logging.info('Loading charm helper config from %s.' % opts.config) + config = parse_config(opts.config) + if not config: + logging.error('Could not parse config from %s.' % opts.config) + sys.exit(1) + else: + config = {} + + if 'branch' not in config: + config['branch'] = CHARM_HELPERS_BRANCH + if opts.branch: + config['branch'] = opts.branch + if opts.dest_dir: + config['destination'] = opts.dest_dir + + if 'destination' not in config: + logging.error('No destination dir. specified as option or config.') + sys.exit(1) + + if 'include' not in config: + if not args: + logging.error('No modules to sync specified as option or config.') + sys.exit(1) + config['include'] = [] + [config['include'].append(a) for a in args] + + sync_options = None + if 'options' in config: + sync_options = config['options'] + tmpd = tempfile.mkdtemp() + try: + checkout = clone_helpers(tmpd, config['branch']) + sync_helpers(config['include'], checkout, config['destination'], + options=sync_options) + except Exception, e: + logging.error("Could not sync: %s" % e) + raise e + finally: + logging.debug('Cleaning up %s' % tmpd) + shutil.rmtree(tmpd) diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index afb9e42b..afbb1bd2 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/charm-helpers-tests.yaml b/ceph-mon/charm-helpers-tests.yaml index 48b12f6f..25fc96d2 100644 --- a/ceph-mon/charm-helpers-tests.yaml +++ b/ceph-mon/charm-helpers-tests.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 destination: tests/charmhelpers include: - contrib.amulet diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index b859a097..37ecbbed 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -5,7 +5,9 @@ from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, + WARNING, + ERROR, + log ) try: @@ -164,9 +166,9 @@ def format_ipv6_addr(address): if is_ipv6(address): address = "[%s]" % address else: - log("Not an valid ipv6 address: %s" % address, - level=ERROR) + log("Not a valid ipv6 address: %s" % address, level=WARNING) address = None + return address diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index b85b0280..3ac70143 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -209,10 +209,15 @@ def mounts(): return system_mounts -def file_hash(path): - """Generate a md5 hash of the contents of 'path' or None if not found """ +def file_hash(path, hash_type='md5'): + """ + Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ if os.path.exists(path): - h = hashlib.md5() + h = getattr(hashlib, hash_type)() with open(path, 'r') as source: h.update(source.read()) # IGNORE:E1101 - it does have update return h.hexdigest() @@ -220,6 +225,26 @@ def file_hash(path): return None +def check_hash(path, checksum, hash_type='md5'): + """ + Validate a file using a cryptographic checksum. + + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate :param:`checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + pass + + def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing diff --git a/ceph-mon/hooks/charmhelpers/core/services/base.py b/ceph-mon/hooks/charmhelpers/core/services/base.py index 6b5a1b9f..87ecb130 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/base.py +++ b/ceph-mon/hooks/charmhelpers/core/services/base.py @@ -118,6 +118,9 @@ def manage(self): else: self.provide_data() self.reconfigure_services() + cfg = hookenv.config() + if cfg.implicit_save: + cfg.save() def provide_data(self): """ diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py index 4b90589b..7067b94b 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-mon/hooks/charmhelpers/core/services/helpers.py @@ -1,3 +1,5 @@ +import os +import yaml from charmhelpers.core import hookenv from charmhelpers.core import templating @@ -19,15 +21,21 @@ class RelationContext(dict): the `name` attribute that are complete will used to populate the dictionary values (see `get_data`, below). - The generated context will be namespaced under the interface type, to prevent - potential naming conflicts. + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` """ name = None interface = None required_keys = [] - def __init__(self, *args, **kwargs): - super(RelationContext, self).__init__(*args, **kwargs) + def __init__(self, name=None, additional_required_keys=None): + if name is not None: + self.name = name + if additional_required_keys is not None: + self.required_keys.extend(additional_required_keys) self.get_data() def __bool__(self): @@ -101,9 +109,115 @@ def provide_data(self): return {} +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + required_keys = ['host', 'user', 'password', 'database'] + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + required_keys = ['host', 'port'] + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + class TemplateCallback(ManagerCallback): """ - Callback class that will render a template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready action. + + :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file """ def __init__(self, source, target, owner='root', group='root', perms=0444): self.source = source diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 8e9d3804..20a20ac6 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -311,22 +311,35 @@ def configure_sources(update=False, apt_update(fatal=True) -def install_remote(source): +def install_remote(source, *args, **kwargs): """ Install a file tree from a remote source The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] - Schemes supported are based on this modules submodules - Options supported are submodule-specific""" + Schemes supported are based on this modules submodules. + Options supported are submodule-specific. + Additional arguments are passed through to the submodule. + + For example:: + + dest = install_remote('http://example.com/archive.tgz', + checksum='deadbeef', + hash_type='sha1') + + This will download `archive.tgz`, validate it using SHA1 and, if + the file is ok, extract it and return the directory in which it + was extracted. If the checksum fails, it will raise + :class:`charmhelpers.core.host.ChecksumError`. + """ # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] installed_to = None for handler in handlers: try: - installed_to = handler.install(source) + installed_to = handler.install(source, *args, **kwargs) except UnhandledSource: pass if not installed_to: diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index 1b11fa03..d1dcbc33 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -12,21 +12,19 @@ get_archive_handler, extract, ) -from charmhelpers.core.host import mkdir +from charmhelpers.core.host import mkdir, check_hash -""" -This class is a plugin for charmhelpers.fetch.install_remote. -It grabs, validates and installs remote archives fetched over "http", "https", "ftp" or "file" protocols. The contents of the archive are installed in $CHARM_DIR/fetched/. +class ArchiveUrlFetchHandler(BaseFetchHandler): + """ + Handler to download archive files from arbitrary URLs. -Example usage: -install_remote("https://example.com/some/archive.tar.gz") -# Installs the contents of archive.tar.gz in $CHARM_DIR/fetched/. + Can fetch from http, https, ftp, and file URLs. -See charmhelpers.fetch.archiveurl.get_archivehandler for supported archive types. -""" -class ArchiveUrlFetchHandler(BaseFetchHandler): - """Handler for archives via generic URLs""" + Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. + + Installs the contents of the archive in $CHARM_DIR/fetched/. + """ def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): @@ -36,6 +34,12 @@ def can_handle(self, source): return False def download(self, source, dest): + """ + Download an archive file. + + :param str source: URL pointing to an archive file. + :param str dest: Local path location to download archive file to. + """ # propogate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse.urlparse(source) @@ -60,7 +64,29 @@ def download(self, source, dest): os.unlink(dest) raise e - def install(self, source): + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + tempfile, headers = urlretrieve(url) + check_hash(tempfile, hashsum, validate) + return tempfile + + def install(self, source, dest=None, checksum=None, hash_type='sha1'): + """ + Download and install an archive file, with optional checksum validation. + + The checksum can also be given on the :param:`source` URL's fragment. + For example:: + + handler.install('http://example.com/file.tgz#sha1=deadbeef') + + :param str source: URL pointing to an archive file. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. + :param str checksum: If given, validate the archive file after download. + :param str hash_type: Algorithm used to generate :param:`checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): @@ -72,32 +98,10 @@ def install(self, source): raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - return extract(dld_file) - - # Mandatory file validation via Sha1 or MD5 hashing. - def download_and_validate(self, url, hashsum, validate="sha1"): - if validate == 'sha1' and len(hashsum) != 40: - raise ValueError("HashSum must be = 40 characters when using sha1" - " validation") - if validate == 'md5' and len(hashsum) != 32: - raise ValueError("HashSum must be = 32 characters when using md5" - " validation") - tempfile, headers = urlretrieve(url) - self.validate_file(tempfile, hashsum, validate) - return tempfile - - # Predicate method that returns status of hash matching expected hash. - def validate_file(self, source, hashsum, vmethod='sha1'): - if vmethod != 'sha1' and vmethod != 'md5': - raise ValueError("Validation Method not supported") - - if vmethod == 'md5': - m = hashlib.md5() - if vmethod == 'sha1': - m = hashlib.sha1() - with open(source) as f: - for line in f: - m.update(line) - if hashsum != m.hexdigest(): - msg = "Hash Mismatch on {} expected {} got {}" - raise ValueError(msg.format(source, hashsum, m.hexdigest())) + options = urlparse.parse_qs(url_parts.fragment) + for key, value in options.items(): + if key in hashlib.algorithms: + check_hash(dld_file, value, key) + if checksum: + check_hash(dld_file, checksum, hash_type) + return extract(dld_file, dest) From 94b3e9f5c81017edbbdf8abac30a6845b10bb45c Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 22 Sep 2014 21:03:44 +0100 Subject: [PATCH 0438/2699] synced lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 --- ceph-osd/charm-helpers-sync.yaml | 2 +- .../hooks/charmhelpers/contrib/network/ip.py | 8 +- ceph-osd/hooks/charmhelpers/core/host.py | 31 ++++- .../charmhelpers/core/services/helpers.py | 124 +++++++++++++++++- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 21 ++- .../hooks/charmhelpers/fetch/archiveurl.py | 86 ++++++------ 6 files changed, 215 insertions(+), 57 deletions(-) diff --git a/ceph-osd/charm-helpers-sync.yaml b/ceph-osd/charm-helpers-sync.yaml index 1d9081b7..c6b2beb2 100644 --- a/ceph-osd/charm-helpers-sync.yaml +++ b/ceph-osd/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 destination: hooks/charmhelpers include: - core diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index b859a097..37ecbbed 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -5,7 +5,9 @@ from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, + WARNING, + ERROR, + log ) try: @@ -164,9 +166,9 @@ def format_ipv6_addr(address): if is_ipv6(address): address = "[%s]" % address else: - log("Not an valid ipv6 address: %s" % address, - level=ERROR) + log("Not a valid ipv6 address: %s" % address, level=WARNING) address = None + return address diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index b85b0280..3ac70143 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -209,10 +209,15 @@ def mounts(): return system_mounts -def file_hash(path): - """Generate a md5 hash of the contents of 'path' or None if not found """ +def file_hash(path, hash_type='md5'): + """ + Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ if os.path.exists(path): - h = hashlib.md5() + h = getattr(hashlib, hash_type)() with open(path, 'r') as source: h.update(source.read()) # IGNORE:E1101 - it does have update return h.hexdigest() @@ -220,6 +225,26 @@ def file_hash(path): return None +def check_hash(path, checksum, hash_type='md5'): + """ + Validate a file using a cryptographic checksum. + + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate :param:`checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + pass + + def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing diff --git a/ceph-osd/hooks/charmhelpers/core/services/helpers.py b/ceph-osd/hooks/charmhelpers/core/services/helpers.py index 4b90589b..7067b94b 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-osd/hooks/charmhelpers/core/services/helpers.py @@ -1,3 +1,5 @@ +import os +import yaml from charmhelpers.core import hookenv from charmhelpers.core import templating @@ -19,15 +21,21 @@ class RelationContext(dict): the `name` attribute that are complete will used to populate the dictionary values (see `get_data`, below). - The generated context will be namespaced under the interface type, to prevent - potential naming conflicts. + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` """ name = None interface = None required_keys = [] - def __init__(self, *args, **kwargs): - super(RelationContext, self).__init__(*args, **kwargs) + def __init__(self, name=None, additional_required_keys=None): + if name is not None: + self.name = name + if additional_required_keys is not None: + self.required_keys.extend(additional_required_keys) self.get_data() def __bool__(self): @@ -101,9 +109,115 @@ def provide_data(self): return {} +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + required_keys = ['host', 'user', 'password', 'database'] + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + required_keys = ['host', 'port'] + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + class TemplateCallback(ManagerCallback): """ - Callback class that will render a template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready action. + + :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file """ def __init__(self, source, target, owner='root', group='root', perms=0444): self.source = source diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 8e9d3804..20a20ac6 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -311,22 +311,35 @@ def configure_sources(update=False, apt_update(fatal=True) -def install_remote(source): +def install_remote(source, *args, **kwargs): """ Install a file tree from a remote source The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] - Schemes supported are based on this modules submodules - Options supported are submodule-specific""" + Schemes supported are based on this modules submodules. + Options supported are submodule-specific. + Additional arguments are passed through to the submodule. + + For example:: + + dest = install_remote('http://example.com/archive.tgz', + checksum='deadbeef', + hash_type='sha1') + + This will download `archive.tgz`, validate it using SHA1 and, if + the file is ok, extract it and return the directory in which it + was extracted. If the checksum fails, it will raise + :class:`charmhelpers.core.host.ChecksumError`. + """ # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] installed_to = None for handler in handlers: try: - installed_to = handler.install(source) + installed_to = handler.install(source, *args, **kwargs) except UnhandledSource: pass if not installed_to: diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index 1b11fa03..d1dcbc33 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -12,21 +12,19 @@ get_archive_handler, extract, ) -from charmhelpers.core.host import mkdir +from charmhelpers.core.host import mkdir, check_hash -""" -This class is a plugin for charmhelpers.fetch.install_remote. -It grabs, validates and installs remote archives fetched over "http", "https", "ftp" or "file" protocols. The contents of the archive are installed in $CHARM_DIR/fetched/. +class ArchiveUrlFetchHandler(BaseFetchHandler): + """ + Handler to download archive files from arbitrary URLs. -Example usage: -install_remote("https://example.com/some/archive.tar.gz") -# Installs the contents of archive.tar.gz in $CHARM_DIR/fetched/. + Can fetch from http, https, ftp, and file URLs. -See charmhelpers.fetch.archiveurl.get_archivehandler for supported archive types. -""" -class ArchiveUrlFetchHandler(BaseFetchHandler): - """Handler for archives via generic URLs""" + Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. + + Installs the contents of the archive in $CHARM_DIR/fetched/. + """ def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): @@ -36,6 +34,12 @@ def can_handle(self, source): return False def download(self, source, dest): + """ + Download an archive file. + + :param str source: URL pointing to an archive file. + :param str dest: Local path location to download archive file to. + """ # propogate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse.urlparse(source) @@ -60,7 +64,29 @@ def download(self, source, dest): os.unlink(dest) raise e - def install(self, source): + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + tempfile, headers = urlretrieve(url) + check_hash(tempfile, hashsum, validate) + return tempfile + + def install(self, source, dest=None, checksum=None, hash_type='sha1'): + """ + Download and install an archive file, with optional checksum validation. + + The checksum can also be given on the :param:`source` URL's fragment. + For example:: + + handler.install('http://example.com/file.tgz#sha1=deadbeef') + + :param str source: URL pointing to an archive file. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. + :param str checksum: If given, validate the archive file after download. + :param str hash_type: Algorithm used to generate :param:`checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): @@ -72,32 +98,10 @@ def install(self, source): raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - return extract(dld_file) - - # Mandatory file validation via Sha1 or MD5 hashing. - def download_and_validate(self, url, hashsum, validate="sha1"): - if validate == 'sha1' and len(hashsum) != 40: - raise ValueError("HashSum must be = 40 characters when using sha1" - " validation") - if validate == 'md5' and len(hashsum) != 32: - raise ValueError("HashSum must be = 32 characters when using md5" - " validation") - tempfile, headers = urlretrieve(url) - self.validate_file(tempfile, hashsum, validate) - return tempfile - - # Predicate method that returns status of hash matching expected hash. - def validate_file(self, source, hashsum, vmethod='sha1'): - if vmethod != 'sha1' and vmethod != 'md5': - raise ValueError("Validation Method not supported") - - if vmethod == 'md5': - m = hashlib.md5() - if vmethod == 'sha1': - m = hashlib.sha1() - with open(source) as f: - for line in f: - m.update(line) - if hashsum != m.hexdigest(): - msg = "Hash Mismatch on {} expected {} got {}" - raise ValueError(msg.format(source, hashsum, m.hexdigest())) + options = urlparse.parse_qs(url_parts.fragment) + for key, value in options.items(): + if key in hashlib.algorithms: + check_hash(dld_file, value, key) + if checksum: + check_hash(dld_file, checksum, hash_type) + return extract(dld_file, dest) From 87a9bcdc2a791244d760867019dc2a15fb3631d2 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 23 Sep 2014 11:01:29 +0100 Subject: [PATCH 0439/2699] reset charm-helpers sync path to lp:charm-helpers --- ceph-proxy/charm-helpers-hooks.yaml | 2 +- ceph-proxy/charm-helpers-tests.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index afbb1bd2..afb9e42b 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-proxy/charm-helpers-tests.yaml b/ceph-proxy/charm-helpers-tests.yaml index 25fc96d2..48b12f6f 100644 --- a/ceph-proxy/charm-helpers-tests.yaml +++ b/ceph-proxy/charm-helpers-tests.yaml @@ -1,4 +1,4 @@ -branch: lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 +branch: lp:charm-helpers destination: tests/charmhelpers include: - contrib.amulet From ac788ccfc85f0406382c89d8c72ee938ceac868b Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 23 Sep 2014 11:01:29 +0100 Subject: [PATCH 0440/2699] reset charm-helpers sync path to lp:charm-helpers --- ceph-mon/charm-helpers-hooks.yaml | 2 +- ceph-mon/charm-helpers-tests.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index afbb1bd2..afb9e42b 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/charm-helpers-tests.yaml b/ceph-mon/charm-helpers-tests.yaml index 25fc96d2..48b12f6f 100644 --- a/ceph-mon/charm-helpers-tests.yaml +++ b/ceph-mon/charm-helpers-tests.yaml @@ -1,4 +1,4 @@ -branch: lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 +branch: lp:charm-helpers destination: tests/charmhelpers include: - contrib.amulet From b856d6b48446552acdab21a748fb339709584eb3 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 23 Sep 2014 11:01:35 +0100 Subject: [PATCH 0441/2699] reset charm-helpers sync path to lp:charm-helpers --- ceph-osd/charm-helpers-sync.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/charm-helpers-sync.yaml b/ceph-osd/charm-helpers-sync.yaml index c6b2beb2..1d9081b7 100644 --- a/ceph-osd/charm-helpers-sync.yaml +++ b/ceph-osd/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core From 437e7a50635fb917145ced1c1df4f51e98652a56 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Sep 2014 13:12:45 +0100 Subject: [PATCH 0442/2699] applied jamespage review fixes --- ceph-proxy/hooks/hooks.py | 55 +++++++++++----------------------- ceph-proxy/hooks/utils.py | 26 +++++++--------- ceph-proxy/templates/ceph.conf | 7 +---- 3 files changed, 29 insertions(+), 59 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 7e1dfade..bdd66546 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -23,8 +23,7 @@ relation_set, remote_unit, Hooks, UnregisteredHookError, - service_name, - unit_get + service_name ) from charmhelpers.core.host import ( @@ -42,14 +41,14 @@ from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( - is_ipv6, - get_ipv6_addr + is_ipv6 ) from utils import ( render_template, get_public_addr, - setup_ipv6 + assert_charm_supports_ipv6, + get_host_ip ) hooks = Hooks() @@ -62,7 +61,7 @@ def install_upstart_scripts(): shutil.copy(x, '/etc/init/') if config('prefer-ipv6'): - setup_ipv6() + assert_charm_supports_ipv6() @hooks.hook('install') @@ -75,11 +74,6 @@ def install(): def emit_cephconf(): - if config('prefer-ipv6'): - host_ip = '%s' % get_ipv6_addr()[0] - else: - host_ip = unit_get('private-address') - cephcontext = { 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), @@ -88,8 +82,7 @@ def emit_cephconf(): 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': config('ceph-public-network'), - 'ceph_cluster_network': config('ceph-cluster-network'), - 'host_ip': host_ip, + 'ceph_cluster_network': config('ceph-cluster-network') } # Install ceph.conf as an alternative to support # co-existence with other charms that write this file @@ -105,10 +98,10 @@ def emit_cephconf(): @hooks.hook('config-changed') def config_changed(): - log('Monitor hosts are ' + repr(get_mon_hosts())) - if config('prefer-ipv6'): - setup_ipv6() + assert_charm_supports_ipv6() + + log('Monitor hosts are ' + repr(get_mon_hosts())) # Pre-flight checks if not config('fsid'): @@ -157,10 +150,11 @@ def get_mon_hosts(): for relid in relation_ids('mon'): for unit in related_units(relid): if config('prefer-ipv6'): - addr = relation_get('ceph-public-address', unit, relid) + r_attr = 'ceph-public-address' else: - addr = relation_get('private-address', unit, relid) + r_attr = 'private-address' + addr = relation_get(r_attr, unit, relid) if addr is not None: if is_ipv6(addr): hosts.append('[{}]:6789'.format(addr)) @@ -188,23 +182,16 @@ def get_devices(): @hooks.hook('mon-relation-joined') def mon_relation_joined(): for relid in relation_ids('mon'): - relation_set(relation_id=relid, - relation_settings={'ceph-public-address': - get_public_addr()}) + settings = {'ceph-public-address': get_public_addr()} + relation_set(relation_id=relid, relation_settings=settings) @hooks.hook('mon-relation-departed', 'mon-relation-changed') def mon_relation(): emit_cephconf() - - if config('prefer-ipv6'): - host = get_ipv6_addr()[0] - else: - host = unit_get('private-address') - relation_data = {} - relation_data['private-address'] = host + relation_data['private-address'] = get_host_ip() relation_set(**relation_data) moncount = int(config('monitor-count')) @@ -284,11 +271,7 @@ def radosgw_relation(relid=None): else: log('mon cluster not in quorum - deferring key provision') - if config('prefer-ipv6'): - host = get_ipv6_addr()[0] - else: - host = unit_get('private-address') - + host = get_host_ip() relation_data = {} relation_data['private-address'] = host relation_set(**relation_data) @@ -298,11 +281,7 @@ def radosgw_relation(relid=None): @hooks.hook('client-relation-joined') def client_relation(relid=None): - if config('prefer-ipv6'): - host = get_ipv6_addr()[0] - else: - host = unit_get('private-address') - + host = get_host_ip() relation_data = {} relation_data['private-address'] = host relation_set(**relation_data) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index f5e7e85d..cd0255d0 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -25,7 +25,6 @@ from charmhelpers.contrib.network import ip from charmhelpers.contrib.network.ip import ( - is_ipv6, get_ipv6_addr ) @@ -72,6 +71,9 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): + if config('prefer-ipv6'): + return get_ipv6_addr()[0] + hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address @@ -87,18 +89,12 @@ def get_host_ip(hostname=None): @cached def get_public_addr(): - addr = config('ceph-public-network') - if config('prefer-ipv6'): - if addr and is_ipv6(addr): - return addr - else: - return get_ipv6_addr()[0] - else: - return ip.get_address_in_network(addr, fallback=get_host_ip()) - - -def setup_ipv6(): - ubuntu_rel = float(lsb_release()['DISTRIB_RELEASE']) - if ubuntu_rel < 14.04: - raise Exception("IPv6 is not supported for Ubuntu " + network = config('ceph-public-network') + return ip.get_address_in_network(network, fallback=get_host_ip()) + + +def assert_charm_supports_ipv6(): + """Check whether we are able to support charms ipv6.""" + if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index 3cce3b74..078b035c 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -31,9 +31,4 @@ keyring = /var/lib/ceph/mds/$cluster-$id/keyring [osd] keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = {{ osd_journal_size }} -filestore xattr use omap = true - - host = {{ hostname }} - public addr = {{ host_ip }} - cluster addr = {{ host_ip }} - +filestore xattr use omap = true \ No newline at end of file From fe81fc83735f79d1cf4478759e723fce62309ab8 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Sep 2014 13:12:45 +0100 Subject: [PATCH 0443/2699] applied jamespage review fixes --- ceph-mon/hooks/hooks.py | 55 +++++++++++------------------------- ceph-mon/hooks/utils.py | 26 ++++++++--------- ceph-mon/templates/ceph.conf | 7 +---- 3 files changed, 29 insertions(+), 59 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 7e1dfade..bdd66546 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -23,8 +23,7 @@ relation_set, remote_unit, Hooks, UnregisteredHookError, - service_name, - unit_get + service_name ) from charmhelpers.core.host import ( @@ -42,14 +41,14 @@ from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( - is_ipv6, - get_ipv6_addr + is_ipv6 ) from utils import ( render_template, get_public_addr, - setup_ipv6 + assert_charm_supports_ipv6, + get_host_ip ) hooks = Hooks() @@ -62,7 +61,7 @@ def install_upstart_scripts(): shutil.copy(x, '/etc/init/') if config('prefer-ipv6'): - setup_ipv6() + assert_charm_supports_ipv6() @hooks.hook('install') @@ -75,11 +74,6 @@ def install(): def emit_cephconf(): - if config('prefer-ipv6'): - host_ip = '%s' % get_ipv6_addr()[0] - else: - host_ip = unit_get('private-address') - cephcontext = { 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), @@ -88,8 +82,7 @@ def emit_cephconf(): 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': config('ceph-public-network'), - 'ceph_cluster_network': config('ceph-cluster-network'), - 'host_ip': host_ip, + 'ceph_cluster_network': config('ceph-cluster-network') } # Install ceph.conf as an alternative to support # co-existence with other charms that write this file @@ -105,10 +98,10 @@ def emit_cephconf(): @hooks.hook('config-changed') def config_changed(): - log('Monitor hosts are ' + repr(get_mon_hosts())) - if config('prefer-ipv6'): - setup_ipv6() + assert_charm_supports_ipv6() + + log('Monitor hosts are ' + repr(get_mon_hosts())) # Pre-flight checks if not config('fsid'): @@ -157,10 +150,11 @@ def get_mon_hosts(): for relid in relation_ids('mon'): for unit in related_units(relid): if config('prefer-ipv6'): - addr = relation_get('ceph-public-address', unit, relid) + r_attr = 'ceph-public-address' else: - addr = relation_get('private-address', unit, relid) + r_attr = 'private-address' + addr = relation_get(r_attr, unit, relid) if addr is not None: if is_ipv6(addr): hosts.append('[{}]:6789'.format(addr)) @@ -188,23 +182,16 @@ def get_devices(): @hooks.hook('mon-relation-joined') def mon_relation_joined(): for relid in relation_ids('mon'): - relation_set(relation_id=relid, - relation_settings={'ceph-public-address': - get_public_addr()}) + settings = {'ceph-public-address': get_public_addr()} + relation_set(relation_id=relid, relation_settings=settings) @hooks.hook('mon-relation-departed', 'mon-relation-changed') def mon_relation(): emit_cephconf() - - if config('prefer-ipv6'): - host = get_ipv6_addr()[0] - else: - host = unit_get('private-address') - relation_data = {} - relation_data['private-address'] = host + relation_data['private-address'] = get_host_ip() relation_set(**relation_data) moncount = int(config('monitor-count')) @@ -284,11 +271,7 @@ def radosgw_relation(relid=None): else: log('mon cluster not in quorum - deferring key provision') - if config('prefer-ipv6'): - host = get_ipv6_addr()[0] - else: - host = unit_get('private-address') - + host = get_host_ip() relation_data = {} relation_data['private-address'] = host relation_set(**relation_data) @@ -298,11 +281,7 @@ def radosgw_relation(relid=None): @hooks.hook('client-relation-joined') def client_relation(relid=None): - if config('prefer-ipv6'): - host = get_ipv6_addr()[0] - else: - host = unit_get('private-address') - + host = get_host_ip() relation_data = {} relation_data['private-address'] = host relation_set(**relation_data) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index f5e7e85d..cd0255d0 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -25,7 +25,6 @@ from charmhelpers.contrib.network import ip from charmhelpers.contrib.network.ip import ( - is_ipv6, get_ipv6_addr ) @@ -72,6 +71,9 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): + if config('prefer-ipv6'): + return get_ipv6_addr()[0] + hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address @@ -87,18 +89,12 @@ def get_host_ip(hostname=None): @cached def get_public_addr(): - addr = config('ceph-public-network') - if config('prefer-ipv6'): - if addr and is_ipv6(addr): - return addr - else: - return get_ipv6_addr()[0] - else: - return ip.get_address_in_network(addr, fallback=get_host_ip()) - - -def setup_ipv6(): - ubuntu_rel = float(lsb_release()['DISTRIB_RELEASE']) - if ubuntu_rel < 14.04: - raise Exception("IPv6 is not supported for Ubuntu " + network = config('ceph-public-network') + return ip.get_address_in_network(network, fallback=get_host_ip()) + + +def assert_charm_supports_ipv6(): + """Check whether we are able to support charms ipv6.""" + if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 3cce3b74..078b035c 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -31,9 +31,4 @@ keyring = /var/lib/ceph/mds/$cluster-$id/keyring [osd] keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = {{ osd_journal_size }} -filestore xattr use omap = true - - host = {{ hostname }} - public addr = {{ host_ip }} - cluster addr = {{ host_ip }} - +filestore xattr use omap = true \ No newline at end of file From a2814549df1dfab2bd9dbb77f7c5c80bab1b5418 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Sep 2014 13:54:40 +0100 Subject: [PATCH 0444/2699] applied jamespage review fixes --- ceph-osd/hooks/hooks.py | 35 +++++++++++------------------------ ceph-osd/hooks/utils.py | 18 +++++++++++++----- ceph-osd/templates/ceph.conf | 6 +----- 3 files changed, 25 insertions(+), 34 deletions(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 12b076f4..24731833 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -24,8 +24,7 @@ relation_set, Hooks, UnregisteredHookError, - service_name, - unit_get + service_name ) from charmhelpers.core.host import ( umount, @@ -42,13 +41,12 @@ from utils import ( render_template, get_host_ip, - setup_ipv6 + assert_charm_supports_ipv6 ) from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( - is_ipv6, - get_ipv6_addr + is_ipv6 ) hooks = Hooks() @@ -67,7 +65,7 @@ def install(): apt_update(fatal=True) if config('prefer-ipv6'): - setup_ipv6() + assert_charm_supports_ipv6() apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() @@ -88,13 +86,6 @@ def emit_cephconf(): 'ceph_cluster_network': config('ceph-cluster-network'), } - if config('prefer-ipv6'): - host_ip = get_ipv6_addr()[0] - if host_ip: - cephcontext['host_ip'] = host_ip - else: - log("Unable to obtain host address", level=WARNING) - # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) @@ -115,7 +106,7 @@ def config_changed(): sys.exit(1) if config('prefer-ipv6'): - setup_ipv6() + assert_charm_supports_ipv6() e_mountpoint = config('ephemeral-unmount') if (e_mountpoint and ceph.filesystem_mounted(e_mountpoint)): @@ -144,12 +135,12 @@ def get_mon_hosts(): for unit in related_units(relid): addr = relation_get('ceph-public-address', unit, relid) if not addr: - if config('prefer-ipv6'): - addr = relation_get('private-address', unit, relid) - else: - get_host_ip(relation_get('private-address', unit, relid)) + addr = relation_get('private-address', unit, relid) + if not config('prefer-ipv6'): + # This will verify ipv4 address + addr = get_host_ip(addr) - if addr is not None: + if addr: if is_ipv6(addr): hosts.append('[{}]:6789'.format(addr)) else: @@ -193,11 +184,7 @@ def get_devices(): @hooks.hook('mon-relation-changed', 'mon-relation-departed') def mon_relation(): - if config('prefer-ipv6'): - host = get_ipv6_addr()[0] - else: - host = unit_get('private-address') - + host = get_host_ip() if host: relation_data = {'private-address': host} relation_set(**relation_data) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 09e27307..384ca7bb 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -11,7 +11,8 @@ import re from charmhelpers.core.hookenv import ( unit_get, - cached + cached, + config ) from charmhelpers.fetch import ( apt_install, @@ -22,6 +23,10 @@ lsb_release ) +from charmhelpers.contrib.network.ip import ( + get_ipv6_addr +) + TEMPLATES_DIR = 'templates' try: @@ -66,6 +71,9 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): hostname = hostname or unit_get('private-address') + if config('prefer-ipv6'): + return hostname or get_ipv6_addr()[0] + try: # Test to see if already an IPv4 address socket.inet_aton(hostname) @@ -78,8 +86,8 @@ def get_host_ip(hostname=None): return answers[0].address -def setup_ipv6(): - ubuntu_rel = float(lsb_release()['DISTRIB_RELEASE']) - if ubuntu_rel < 14.04: - raise Exception("IPv6 is not supported for Ubuntu " +def assert_charm_supports_ipv6(): + """Check whether we are able to support charms ipv6.""" + if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 44336ca0..5e71b636 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -30,8 +30,4 @@ [osd] keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = {{ osd_journal_size }} - filestore xattr use omap = true - - host = {{ hostname }} - public addr = {{ host_ip }} - cluster addr = {{ host_ip }} \ No newline at end of file + filestore xattr use omap = true \ No newline at end of file From d3f5e4ee7b694deeaf65c00843ed259dcee92468 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Sep 2014 13:56:03 +0100 Subject: [PATCH 0445/2699] applied jamespage review fixes --- ceph-proxy/hooks/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index cd0255d0..cb2ab2fb 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -71,10 +71,10 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): + hostname = hostname or unit_get('private-address') if config('prefer-ipv6'): - return get_ipv6_addr()[0] + return hostname or get_ipv6_addr()[0] - hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address socket.inet_aton(hostname) From dd32d916022137fdc4600e1e6ce6c5034514acb2 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Sep 2014 13:56:03 +0100 Subject: [PATCH 0446/2699] applied jamespage review fixes --- ceph-mon/hooks/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index cd0255d0..cb2ab2fb 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -71,10 +71,10 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): + hostname = hostname or unit_get('private-address') if config('prefer-ipv6'): - return get_ipv6_addr()[0] + return hostname or get_ipv6_addr()[0] - hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address socket.inet_aton(hostname) From 6ac96fc0aa050cef1065c4a38d94b02f63dfe4f3 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Sep 2014 14:28:44 +0100 Subject: [PATCH 0447/2699] fixed ceph.conf newline issue and get_host_ip() --- ceph-proxy/hooks/utils.py | 2 +- ceph-proxy/templates/ceph.conf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index cb2ab2fb..825c0797 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -71,10 +71,10 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): - hostname = hostname or unit_get('private-address') if config('prefer-ipv6'): return hostname or get_ipv6_addr()[0] + hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address socket.inet_aton(hostname) diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index 078b035c..78035e1c 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -31,4 +31,4 @@ keyring = /var/lib/ceph/mds/$cluster-$id/keyring [osd] keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = {{ osd_journal_size }} -filestore xattr use omap = true \ No newline at end of file +filestore xattr use omap = true From fc9e7156b6dc67b67fdd7eccb808a7ea8766f714 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Sep 2014 14:28:44 +0100 Subject: [PATCH 0448/2699] fixed ceph.conf newline issue and get_host_ip() --- ceph-mon/hooks/utils.py | 2 +- ceph-mon/templates/ceph.conf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index cb2ab2fb..825c0797 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -71,10 +71,10 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): - hostname = hostname or unit_get('private-address') if config('prefer-ipv6'): return hostname or get_ipv6_addr()[0] + hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address socket.inet_aton(hostname) diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 078b035c..78035e1c 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -31,4 +31,4 @@ keyring = /var/lib/ceph/mds/$cluster-$id/keyring [osd] keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = {{ osd_journal_size }} -filestore xattr use omap = true \ No newline at end of file +filestore xattr use omap = true From fcceb7e52607f609235fba24ae7fec0a00b8acad Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Sep 2014 14:29:10 +0100 Subject: [PATCH 0449/2699] fixed ceph.conf newline issue and get_host_ip() --- ceph-osd/hooks/utils.py | 2 +- ceph-osd/templates/ceph.conf | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 384ca7bb..60b8fc27 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -70,10 +70,10 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): - hostname = hostname or unit_get('private-address') if config('prefer-ipv6'): return hostname or get_ipv6_addr()[0] + hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address socket.inet_aton(hostname) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 5e71b636..a9144436 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -30,4 +30,5 @@ [osd] keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = {{ osd_journal_size }} - filestore xattr use omap = true \ No newline at end of file + filestore xattr use omap = true + \ No newline at end of file From 18cdf11878b3861d7f8771df72f710311db80fb8 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Sep 2014 14:51:16 +0100 Subject: [PATCH 0450/2699] fixed ceph.conf newline issue --- ceph-osd/templates/ceph.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index a9144436..def993c3 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -31,4 +31,4 @@ keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = {{ osd_journal_size }} filestore xattr use omap = true - \ No newline at end of file + From 23a3cc2a8f1879a38fefd9b03b551d11231b227c Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Sep 2014 14:51:38 +0100 Subject: [PATCH 0451/2699] fixed ceph.conf newline issue --- ceph-proxy/templates/ceph.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index 78035e1c..3b0d91f1 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -32,3 +32,4 @@ keyring = /var/lib/ceph/mds/$cluster-$id/keyring keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = {{ osd_journal_size }} filestore xattr use omap = true + From b414cb2cda68748533d51963c5766bb8ffcc1e02 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Sep 2014 14:51:38 +0100 Subject: [PATCH 0452/2699] fixed ceph.conf newline issue --- ceph-mon/templates/ceph.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 78035e1c..3b0d91f1 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -32,3 +32,4 @@ keyring = /var/lib/ceph/mds/$cluster-$id/keyring keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = {{ osd_journal_size }} filestore xattr use omap = true + From 8d8a400e26d2c36f12d9f71386d1feffd0836214 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Sep 2014 16:23:40 +0100 Subject: [PATCH 0453/2699] small refactor --- ceph-proxy/hooks/hooks.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index bdd66546..dcbbd543 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -271,9 +271,8 @@ def radosgw_relation(relid=None): else: log('mon cluster not in quorum - deferring key provision') - host = get_host_ip() relation_data = {} - relation_data['private-address'] = host + relation_data['private-address'] = get_host_ip() relation_set(**relation_data) log('End radosgw-relation hook.') @@ -281,9 +280,8 @@ def radosgw_relation(relid=None): @hooks.hook('client-relation-joined') def client_relation(relid=None): - host = get_host_ip() relation_data = {} - relation_data['private-address'] = host + relation_data['private-address'] = get_host_ip() relation_set(**relation_data) if ceph.is_quorum(): From 53fa5ddae2a205d5620cd602b2138c5c4132732b Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Sep 2014 16:23:40 +0100 Subject: [PATCH 0454/2699] small refactor --- ceph-mon/hooks/hooks.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index bdd66546..dcbbd543 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -271,9 +271,8 @@ def radosgw_relation(relid=None): else: log('mon cluster not in quorum - deferring key provision') - host = get_host_ip() relation_data = {} - relation_data['private-address'] = host + relation_data['private-address'] = get_host_ip() relation_set(**relation_data) log('End radosgw-relation hook.') @@ -281,9 +280,8 @@ def radosgw_relation(relid=None): @hooks.hook('client-relation-joined') def client_relation(relid=None): - host = get_host_ip() relation_data = {} - relation_data['private-address'] = host + relation_data['private-address'] = get_host_ip() relation_set(**relation_data) if ceph.is_quorum(): From 6a16c03e73484cfbd463adc624b5730ca37f10b2 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 25 Sep 2014 17:31:59 +0100 Subject: [PATCH 0455/2699] [hopem] Adds ipv6 privacy extensions deploy note to config.yaml --- ceph-osd/config.yaml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 50f14e6d..3d420050 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -105,4 +105,13 @@ options: 192.168.0.0/24) prefer-ipv6: type: boolean - default: False \ No newline at end of file + default: False + description: | + If True enables IPv6 support. The charm will expect network interfaces + to be configured with an IPv6 address. If set to False (default) IPv4 + is expected. + . + NOTE: these charms do not currently support IPv6 privacy extension. In + order for this charm to function correctly, the privacy must be + disabled and a non-temporary address must be configured/available on + your network interface. From 4688f06916196db7a0c949a0a5f8fd8e7ab14112 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 25 Sep 2014 17:32:36 +0100 Subject: [PATCH 0456/2699] [hopem] Adds ipv6 privacy extensions deploy note to config.yaml --- ceph-proxy/config.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 53d9f9aa..422b009f 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -137,3 +137,12 @@ options: prefer-ipv6: type: boolean default: False + description: | + If True enables IPv6 support. The charm will expect network interfaces + to be configured with an IPv6 address. If set to False (default) IPv4 + is expected. + . + NOTE: these charms do not currently support IPv6 privacy extension. In + order for this charm to function correctly, the privacy must be + disabled and a non-temporary address must be configured/available on + your network interface. From 26bfe4286640adad8031ddda1553b4e0cfb557fd Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 25 Sep 2014 17:32:36 +0100 Subject: [PATCH 0457/2699] [hopem] Adds ipv6 privacy extensions deploy note to config.yaml --- ceph-mon/config.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 53d9f9aa..422b009f 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -137,3 +137,12 @@ options: prefer-ipv6: type: boolean default: False + description: | + If True enables IPv6 support. The charm will expect network interfaces + to be configured with an IPv6 address. If set to False (default) IPv4 + is expected. + . + NOTE: these charms do not currently support IPv6 privacy extension. In + order for this charm to function correctly, the privacy must be + disabled and a non-temporary address must be configured/available on + your network interface. From c328e9cf34766df6da86022123a89dd2c21a4665 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 25 Sep 2014 17:43:12 +0100 Subject: [PATCH 0458/2699] Fixed minor typo in config.yaml --- ceph-osd/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 3d420050..0eb89279 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -112,6 +112,6 @@ options: is expected. . NOTE: these charms do not currently support IPv6 privacy extension. In - order for this charm to function correctly, the privacy must be + order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on your network interface. From a0d169d049faf6c60eabb9b2f014ec63e75066e7 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 25 Sep 2014 17:43:49 +0100 Subject: [PATCH 0459/2699] Fixed minor typo in config.yaml --- ceph-proxy/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 422b009f..a0404a11 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -143,6 +143,6 @@ options: is expected. . NOTE: these charms do not currently support IPv6 privacy extension. In - order for this charm to function correctly, the privacy must be + order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on your network interface. From f5cdc4bf830fa34ce43c25218b868b449128ddb8 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 25 Sep 2014 17:43:49 +0100 Subject: [PATCH 0460/2699] Fixed minor typo in config.yaml --- ceph-mon/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 422b009f..a0404a11 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -143,6 +143,6 @@ options: is expected. . NOTE: these charms do not currently support IPv6 privacy extension. In - order for this charm to function correctly, the privacy must be + order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on your network interface. From 028dc70ccfc9f16c944f250620c94b9ec023246f Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 26 Sep 2014 09:19:54 +0100 Subject: [PATCH 0461/2699] Sync charmhelpers --- .../hooks/charmhelpers/contrib/network/ip.py | 110 ++++++++++++++-- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 56 +++++--- ceph-proxy/hooks/charmhelpers/core/host.py | 43 ++++-- .../hooks/charmhelpers/core/services/base.py | 3 + .../charmhelpers/core/services/helpers.py | 124 +++++++++++++++++- .../hooks/charmhelpers/fetch/__init__.py | 44 +++++-- .../hooks/charmhelpers/fetch/archiveurl.py | 53 +++++++- .../charmhelpers/contrib/amulet/deployment.py | 19 +-- .../contrib/openstack/amulet/deployment.py | 37 +++++- .../contrib/openstack/amulet/utils.py | 9 +- 10 files changed, 421 insertions(+), 77 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 7edbcc48..b859a097 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -1,10 +1,11 @@ +import glob import sys from functools import partial from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, config, + ERROR, log, ) try: @@ -156,19 +157,102 @@ def _get_for_address(address, key): get_netmask_for_address = partial(_get_for_address, key='netmask') -def get_ipv6_addr(iface="eth0"): +def format_ipv6_addr(address): + """ + IPv6 needs to be wrapped with [] in url link to parse correctly. + """ + if is_ipv6(address): + address = "[%s]" % address + else: + log("Not an valid ipv6 address: %s" % address, + level=ERROR) + address = None + return address + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IP address for a given interface, if any, or []. + """ + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + if not exc_list: + exc_list = [] try: - iface_addrs = netifaces.ifaddresses(iface) - if netifaces.AF_INET6 not in iface_addrs: - raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception('Unknown inet type ' + str(inet_type)) + + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("%s not found " % (iface)) + else: + return [] + else: + ifaces = [iface] + + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) + return addresses - addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] - ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') - and config('vip') != a['addr']] - if not ipv6_addr: - raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') - return ipv6_addr[0] - except ValueError: - raise ValueError("Invalid interface '%s'" % iface) +def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IPv6 address for a given interface, if any, or []. + """ + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + remotly_addressable = [] + for address in addresses: + if not address.startswith('fe80'): + remotly_addressable.append(address) + if fatal and not remotly_addressable: + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + return remotly_addressable + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of bridges on the system or [] + """ + b_rgex = vnic_dir + '/*/bridge' + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of nics comprising a given bridge on the system or [] + """ + brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_rgex)] + + +def is_bridge_member(nic): + """ + Check if a given nic is a member of a bridge + """ + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + return False diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index eb4aa092..af8fe2db 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -156,12 +156,15 @@ def hook_name(): class Config(dict): - """A Juju charm config dictionary that can write itself to - disk (as json) and track which values have changed since - the previous hook invocation. + """A dictionary representation of the charm's config.yaml, with some + extra features: - Do not instantiate this object directly - instead call - ``hookenv.config()`` + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. Example usage:: @@ -170,8 +173,8 @@ class Config(dict): >>> config = hookenv.config() >>> config['foo'] 'bar' + >>> # store a new key/value for later use >>> config['mykey'] = 'myval' - >>> config.save() >>> # user runs `juju set mycharm foo=baz` @@ -188,22 +191,34 @@ class Config(dict): >>> # keys/values that we add are preserved across hooks >>> config['mykey'] 'myval' - >>> # don't forget to save at the end of hook! - >>> config.save() """ CONFIG_FILE_NAME = '.juju-persistent-config' def __init__(self, *args, **kw): super(Config, self).__init__(*args, **kw) + self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() + def __getitem__(self, key): + """For regular dict lookups, check the current juju config first, + then the previous (saved) copy. This ensures that user-saved values + will be returned by a dict lookup. + + """ + try: + return dict.__getitem__(self, key) + except KeyError: + return (self._prev_dict or {})[key] + def load_previous(self, path=None): - """Load previous copy of config from disk so that current values - can be compared to previous values. + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. :param path: @@ -218,8 +233,8 @@ def load_previous(self, path=None): self._prev_dict = json.load(f) def changed(self, key): - """Return true if the value for this key has changed since - the last save. + """Return True if the current value for this key is different from + the previous value. """ if self._prev_dict is None: @@ -228,7 +243,7 @@ def changed(self, key): def previous(self, key): """Return previous value for this key, or None if there - is no "previous" value. + is no previous value. """ if self._prev_dict: @@ -238,7 +253,13 @@ def previous(self, key): def save(self): """Save this config to disk. - Preserves items in _prev_dict that do not exist in self. + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. """ if self._prev_dict: @@ -465,9 +486,10 @@ def config_changed(): hooks.execute(sys.argv) """ - def __init__(self): + def __init__(self, config_save=True): super(Hooks, self).__init__() self._hooks = {} + self._config_save = config_save def register(self, name, function): """Register a hook""" @@ -478,6 +500,10 @@ def execute(self, args): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() + if self._config_save: + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index ca7780df..d7ce1e4c 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -68,8 +68,8 @@ def service_available(service_name): """Determine whether a system service is available""" try: subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - return False + except subprocess.CalledProcessError as e: + return 'unrecognized service' not in e.output else: return True @@ -209,10 +209,15 @@ def mounts(): return system_mounts -def file_hash(path): - """Generate a md5 hash of the contents of 'path' or None if not found """ +def file_hash(path, hash_type='md5'): + """ + Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ if os.path.exists(path): - h = hashlib.md5() + h = getattr(hashlib, hash_type)() with open(path, 'r') as source: h.update(source.read()) # IGNORE:E1101 - it does have update return h.hexdigest() @@ -220,6 +225,26 @@ def file_hash(path): return None +def check_hash(path, checksum, hash_type='md5'): + """ + Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + pass + + def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing @@ -332,13 +357,9 @@ def cmp_pkgrevno(package, revno, pkgcache=None): ''' import apt_pkg + from charmhelpers.fetch import apt_cache if not pkgcache: - apt_pkg.init() - # Force Apt to build its cache in memory. That way we avoid race - # conditions with other applications building the cache in the same - # place. - apt_pkg.config.set("Dir::Cache::pkgcache", "") - pkgcache = apt_pkg.Cache() + pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-proxy/hooks/charmhelpers/core/services/base.py b/ceph-proxy/hooks/charmhelpers/core/services/base.py index 6b5a1b9f..87ecb130 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/base.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/base.py @@ -118,6 +118,9 @@ def manage(self): else: self.provide_data() self.reconfigure_services() + cfg = hookenv.config() + if cfg.implicit_save: + cfg.save() def provide_data(self): """ diff --git a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py index 4b90589b..7067b94b 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py @@ -1,3 +1,5 @@ +import os +import yaml from charmhelpers.core import hookenv from charmhelpers.core import templating @@ -19,15 +21,21 @@ class RelationContext(dict): the `name` attribute that are complete will used to populate the dictionary values (see `get_data`, below). - The generated context will be namespaced under the interface type, to prevent - potential naming conflicts. + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` """ name = None interface = None required_keys = [] - def __init__(self, *args, **kwargs): - super(RelationContext, self).__init__(*args, **kwargs) + def __init__(self, name=None, additional_required_keys=None): + if name is not None: + self.name = name + if additional_required_keys is not None: + self.required_keys.extend(additional_required_keys) self.get_data() def __bool__(self): @@ -101,9 +109,115 @@ def provide_data(self): return {} +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + required_keys = ['host', 'user', 'password', 'database'] + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + required_keys = ['host', 'port'] + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + class TemplateCallback(ManagerCallback): """ - Callback class that will render a template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready action. + + :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file """ def __init__(self, source, target, owner='root', group='root', perms=0444): self.source = source diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 61633d8c..32a673d6 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -117,15 +117,7 @@ def base_url(self, url): def filter_installed_packages(packages): """Returns a list of packages that require installation""" - import apt_pkg - apt_pkg.init() - - # Tell apt to build an in-memory cache to prevent race conditions (if - # another process is already building the cache). - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - - cache = apt_pkg.Cache() + cache = apt_cache() _pkgs = [] for package in packages: try: @@ -138,6 +130,16 @@ def filter_installed_packages(packages): return _pkgs +def apt_cache(in_memory=True): + """Build and return an apt cache""" + import apt_pkg + apt_pkg.init() + if in_memory: + apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") + return apt_pkg.Cache() + + def apt_install(packages, options=None, fatal=False): """Install one or more packages""" if options is None: @@ -206,7 +208,8 @@ def add_source(source, key=None): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples: + add-apt-repository(1). Examples:: + ppa:charmers/example deb https://stub:key@private.example.com/ubuntu trusty main @@ -309,22 +312,35 @@ def configure_sources(update=False, apt_update(fatal=True) -def install_remote(source): +def install_remote(source, *args, **kwargs): """ Install a file tree from a remote source The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] - Schemes supported are based on this modules submodules - Options supported are submodule-specific""" + Schemes supported are based on this modules submodules. + Options supported are submodule-specific. + Additional arguments are passed through to the submodule. + + For example:: + + dest = install_remote('http://example.com/archive.tgz', + checksum='deadbeef', + hash_type='sha1') + + This will download `archive.tgz`, validate it using SHA1 and, if + the file is ok, extract it and return the directory in which it + was extracted. If the checksum fails, it will raise + :class:`charmhelpers.core.host.ChecksumError`. + """ # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] installed_to = None for handler in handlers: try: - installed_to = handler.install(source) + installed_to = handler.install(source, *args, **kwargs) except UnhandledSource: pass if not installed_to: diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py index 87e7071a..8c045650 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -1,6 +1,8 @@ import os import urllib2 +from urllib import urlretrieve import urlparse +import hashlib from charmhelpers.fetch import ( BaseFetchHandler, @@ -10,11 +12,19 @@ get_archive_handler, extract, ) -from charmhelpers.core.host import mkdir +from charmhelpers.core.host import mkdir, check_hash class ArchiveUrlFetchHandler(BaseFetchHandler): - """Handler for archives via generic URLs""" + """ + Handler to download archive files from arbitrary URLs. + + Can fetch from http, https, ftp, and file URLs. + + Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. + + Installs the contents of the archive in $CHARM_DIR/fetched/. + """ def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): @@ -24,6 +34,12 @@ def can_handle(self, source): return False def download(self, source, dest): + """ + Download an archive file. + + :param str source: URL pointing to an archive file. + :param str dest: Local path location to download archive file to. + """ # propogate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse.urlparse(source) @@ -48,7 +64,30 @@ def download(self, source, dest): os.unlink(dest) raise e - def install(self, source): + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + tempfile, headers = urlretrieve(url) + check_hash(tempfile, hashsum, validate) + return tempfile + + def install(self, source, dest=None, checksum=None, hash_type='sha1'): + """ + Download and install an archive file, with optional checksum validation. + + The checksum can also be given on the `source` URL's fragment. + For example:: + + handler.install('http://example.com/file.tgz#sha1=deadbeef') + + :param str source: URL pointing to an archive file. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. + :param str checksum: If given, validate the archive file after download. + :param str hash_type: Algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): @@ -60,4 +99,10 @@ def install(self, source): raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - return extract(dld_file) + options = urlparse.parse_qs(url_parts.fragment) + for key, value in options.items(): + if key in hashlib.algorithms: + check_hash(dld_file, value, key) + if checksum: + check_hash(dld_file, checksum, hash_type) + return extract(dld_file, dest) diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py index 8c0af487..e0e850dd 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py @@ -24,10 +24,10 @@ def _add_services(self, this_service, other_services): """Add services. Add services to the deployment where this_service is the local charm - that we're focused on testing and other_services are the other - charms that come from the charm store. + that we're testing and other_services are the other services that + are being used in the amulet tests. """ - name, units = range(2) + name, units, location = range(3) if this_service[name] != os.path.basename(os.getcwd()): s = this_service[name] @@ -37,12 +37,13 @@ def _add_services(self, this_service, other_services): self.d.add(this_service[name], units=this_service[units]) for svc in other_services: - if self.series: - self.d.add(svc[name], - charm='cs:{}/{}'.format(self.series, svc[name]), - units=svc[units]) + if len(svc) > 2: + branch_location = svc[location] + elif self.series: + branch_location = 'cs:{}/{}'.format(self.series, svc[name]), else: - self.d.add(svc[name], units=svc[units]) + branch_location = None + self.d.add(svc[name], charm=branch_location, units=svc[units]) def _add_relations(self, relations): """Add all of the relations for the services.""" @@ -57,7 +58,7 @@ def _configure_services(self, configs): def _deploy(self): """Deploy environment and wait for all hooks to finish executing.""" try: - self.d.setup() + self.d.setup(timeout=900) self.d.sentry.wait(timeout=900) except amulet.helpers.TimeoutError: amulet.raise_status(amulet.FAIL, msg="Deployment timed out") diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 9179eeb1..10d3b506 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,6 @@ +from bzrlib.branch import Branch +import os +import re from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -16,11 +19,41 @@ def __init__(self, series=None, openstack=None, source=None): self.openstack = openstack self.source = source + def _is_dev_branch(self): + """Determine if branch being tested is a dev (i.e. next) branch.""" + branch = Branch.open(os.getcwd()) + parent = branch.get_parent() + pattern = re.compile("^.*/next/$") + if (pattern.match(parent)): + return True + else: + return False + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + If the branch being tested is a dev branch, then determine the + development branch locations for the other services. Otherwise, + the default charm store branches will be used.""" + name = 0 + if self._is_dev_branch(): + updated_services = [] + for svc in other_services: + if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']: + location = 'lp:charms/{}'.format(svc[name]) + else: + temp = 'lp:~openstack-charmers/charms/trusty/{}/next' + location = temp.format(svc[name]) + updated_services.append(svc + (location,)) + other_services = updated_services + return other_services + def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin.""" + """Add services to the deployment and set openstack-origin/source.""" + name = 0 + other_services = self._determine_branch_locations(other_services) super(OpenStackAmuletDeployment, self)._add_services(this_service, other_services) - name = 0 services = other_services services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index bd327bdc..0f312b99 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -187,15 +187,16 @@ def create_cirros_image(self, glance, image_name): f = opener.open("http://download.cirros-cloud.net/version/released") version = f.read().strip() - cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) + cirros_img = "cirros-{}-x86_64-disk.img".format(version) + local_path = os.path.join('tests', cirros_img) - if not os.path.exists(cirros_img): + if not os.path.exists(local_path): cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", version, cirros_img) - opener.retrieve(cirros_url, cirros_img) + opener.retrieve(cirros_url, local_path) f.close() - with open(cirros_img) as f: + with open(local_path) as f: image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) From bb9a35011be90599a7beebbf9a99c6242ac42bab Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 26 Sep 2014 09:19:54 +0100 Subject: [PATCH 0462/2699] Sync charmhelpers --- .../hooks/charmhelpers/contrib/network/ip.py | 110 ++++++++++++++-- ceph-mon/hooks/charmhelpers/core/hookenv.py | 56 +++++--- ceph-mon/hooks/charmhelpers/core/host.py | 43 ++++-- .../hooks/charmhelpers/core/services/base.py | 3 + .../charmhelpers/core/services/helpers.py | 124 +++++++++++++++++- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 44 +++++-- .../hooks/charmhelpers/fetch/archiveurl.py | 53 +++++++- .../charmhelpers/contrib/amulet/deployment.py | 19 +-- .../contrib/openstack/amulet/deployment.py | 37 +++++- .../contrib/openstack/amulet/utils.py | 9 +- 10 files changed, 421 insertions(+), 77 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 7edbcc48..b859a097 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -1,10 +1,11 @@ +import glob import sys from functools import partial from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, config, + ERROR, log, ) try: @@ -156,19 +157,102 @@ def _get_for_address(address, key): get_netmask_for_address = partial(_get_for_address, key='netmask') -def get_ipv6_addr(iface="eth0"): +def format_ipv6_addr(address): + """ + IPv6 needs to be wrapped with [] in url link to parse correctly. + """ + if is_ipv6(address): + address = "[%s]" % address + else: + log("Not an valid ipv6 address: %s" % address, + level=ERROR) + address = None + return address + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IP address for a given interface, if any, or []. + """ + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + if not exc_list: + exc_list = [] try: - iface_addrs = netifaces.ifaddresses(iface) - if netifaces.AF_INET6 not in iface_addrs: - raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception('Unknown inet type ' + str(inet_type)) + + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("%s not found " % (iface)) + else: + return [] + else: + ifaces = [iface] + + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) + return addresses - addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] - ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') - and config('vip') != a['addr']] - if not ipv6_addr: - raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') - return ipv6_addr[0] - except ValueError: - raise ValueError("Invalid interface '%s'" % iface) +def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IPv6 address for a given interface, if any, or []. + """ + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + remotly_addressable = [] + for address in addresses: + if not address.startswith('fe80'): + remotly_addressable.append(address) + if fatal and not remotly_addressable: + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + return remotly_addressable + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of bridges on the system or [] + """ + b_rgex = vnic_dir + '/*/bridge' + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of nics comprising a given bridge on the system or [] + """ + brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_rgex)] + + +def is_bridge_member(nic): + """ + Check if a given nic is a member of a bridge + """ + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + return False diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index eb4aa092..af8fe2db 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -156,12 +156,15 @@ def hook_name(): class Config(dict): - """A Juju charm config dictionary that can write itself to - disk (as json) and track which values have changed since - the previous hook invocation. + """A dictionary representation of the charm's config.yaml, with some + extra features: - Do not instantiate this object directly - instead call - ``hookenv.config()`` + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. Example usage:: @@ -170,8 +173,8 @@ class Config(dict): >>> config = hookenv.config() >>> config['foo'] 'bar' + >>> # store a new key/value for later use >>> config['mykey'] = 'myval' - >>> config.save() >>> # user runs `juju set mycharm foo=baz` @@ -188,22 +191,34 @@ class Config(dict): >>> # keys/values that we add are preserved across hooks >>> config['mykey'] 'myval' - >>> # don't forget to save at the end of hook! - >>> config.save() """ CONFIG_FILE_NAME = '.juju-persistent-config' def __init__(self, *args, **kw): super(Config, self).__init__(*args, **kw) + self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() + def __getitem__(self, key): + """For regular dict lookups, check the current juju config first, + then the previous (saved) copy. This ensures that user-saved values + will be returned by a dict lookup. + + """ + try: + return dict.__getitem__(self, key) + except KeyError: + return (self._prev_dict or {})[key] + def load_previous(self, path=None): - """Load previous copy of config from disk so that current values - can be compared to previous values. + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. :param path: @@ -218,8 +233,8 @@ def load_previous(self, path=None): self._prev_dict = json.load(f) def changed(self, key): - """Return true if the value for this key has changed since - the last save. + """Return True if the current value for this key is different from + the previous value. """ if self._prev_dict is None: @@ -228,7 +243,7 @@ def changed(self, key): def previous(self, key): """Return previous value for this key, or None if there - is no "previous" value. + is no previous value. """ if self._prev_dict: @@ -238,7 +253,13 @@ def previous(self, key): def save(self): """Save this config to disk. - Preserves items in _prev_dict that do not exist in self. + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. """ if self._prev_dict: @@ -465,9 +486,10 @@ def config_changed(): hooks.execute(sys.argv) """ - def __init__(self): + def __init__(self, config_save=True): super(Hooks, self).__init__() self._hooks = {} + self._config_save = config_save def register(self, name, function): """Register a hook""" @@ -478,6 +500,10 @@ def execute(self, args): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() + if self._config_save: + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index ca7780df..d7ce1e4c 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -68,8 +68,8 @@ def service_available(service_name): """Determine whether a system service is available""" try: subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - return False + except subprocess.CalledProcessError as e: + return 'unrecognized service' not in e.output else: return True @@ -209,10 +209,15 @@ def mounts(): return system_mounts -def file_hash(path): - """Generate a md5 hash of the contents of 'path' or None if not found """ +def file_hash(path, hash_type='md5'): + """ + Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ if os.path.exists(path): - h = hashlib.md5() + h = getattr(hashlib, hash_type)() with open(path, 'r') as source: h.update(source.read()) # IGNORE:E1101 - it does have update return h.hexdigest() @@ -220,6 +225,26 @@ def file_hash(path): return None +def check_hash(path, checksum, hash_type='md5'): + """ + Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + pass + + def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing @@ -332,13 +357,9 @@ def cmp_pkgrevno(package, revno, pkgcache=None): ''' import apt_pkg + from charmhelpers.fetch import apt_cache if not pkgcache: - apt_pkg.init() - # Force Apt to build its cache in memory. That way we avoid race - # conditions with other applications building the cache in the same - # place. - apt_pkg.config.set("Dir::Cache::pkgcache", "") - pkgcache = apt_pkg.Cache() + pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-mon/hooks/charmhelpers/core/services/base.py b/ceph-mon/hooks/charmhelpers/core/services/base.py index 6b5a1b9f..87ecb130 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/base.py +++ b/ceph-mon/hooks/charmhelpers/core/services/base.py @@ -118,6 +118,9 @@ def manage(self): else: self.provide_data() self.reconfigure_services() + cfg = hookenv.config() + if cfg.implicit_save: + cfg.save() def provide_data(self): """ diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py index 4b90589b..7067b94b 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-mon/hooks/charmhelpers/core/services/helpers.py @@ -1,3 +1,5 @@ +import os +import yaml from charmhelpers.core import hookenv from charmhelpers.core import templating @@ -19,15 +21,21 @@ class RelationContext(dict): the `name` attribute that are complete will used to populate the dictionary values (see `get_data`, below). - The generated context will be namespaced under the interface type, to prevent - potential naming conflicts. + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` """ name = None interface = None required_keys = [] - def __init__(self, *args, **kwargs): - super(RelationContext, self).__init__(*args, **kwargs) + def __init__(self, name=None, additional_required_keys=None): + if name is not None: + self.name = name + if additional_required_keys is not None: + self.required_keys.extend(additional_required_keys) self.get_data() def __bool__(self): @@ -101,9 +109,115 @@ def provide_data(self): return {} +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + required_keys = ['host', 'user', 'password', 'database'] + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + required_keys = ['host', 'port'] + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + class TemplateCallback(ManagerCallback): """ - Callback class that will render a template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready action. + + :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file """ def __init__(self, source, target, owner='root', group='root', perms=0444): self.source = source diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 61633d8c..32a673d6 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -117,15 +117,7 @@ def base_url(self, url): def filter_installed_packages(packages): """Returns a list of packages that require installation""" - import apt_pkg - apt_pkg.init() - - # Tell apt to build an in-memory cache to prevent race conditions (if - # another process is already building the cache). - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - - cache = apt_pkg.Cache() + cache = apt_cache() _pkgs = [] for package in packages: try: @@ -138,6 +130,16 @@ def filter_installed_packages(packages): return _pkgs +def apt_cache(in_memory=True): + """Build and return an apt cache""" + import apt_pkg + apt_pkg.init() + if in_memory: + apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") + return apt_pkg.Cache() + + def apt_install(packages, options=None, fatal=False): """Install one or more packages""" if options is None: @@ -206,7 +208,8 @@ def add_source(source, key=None): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples: + add-apt-repository(1). Examples:: + ppa:charmers/example deb https://stub:key@private.example.com/ubuntu trusty main @@ -309,22 +312,35 @@ def configure_sources(update=False, apt_update(fatal=True) -def install_remote(source): +def install_remote(source, *args, **kwargs): """ Install a file tree from a remote source The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] - Schemes supported are based on this modules submodules - Options supported are submodule-specific""" + Schemes supported are based on this modules submodules. + Options supported are submodule-specific. + Additional arguments are passed through to the submodule. + + For example:: + + dest = install_remote('http://example.com/archive.tgz', + checksum='deadbeef', + hash_type='sha1') + + This will download `archive.tgz`, validate it using SHA1 and, if + the file is ok, extract it and return the directory in which it + was extracted. If the checksum fails, it will raise + :class:`charmhelpers.core.host.ChecksumError`. + """ # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] installed_to = None for handler in handlers: try: - installed_to = handler.install(source) + installed_to = handler.install(source, *args, **kwargs) except UnhandledSource: pass if not installed_to: diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index 87e7071a..8c045650 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -1,6 +1,8 @@ import os import urllib2 +from urllib import urlretrieve import urlparse +import hashlib from charmhelpers.fetch import ( BaseFetchHandler, @@ -10,11 +12,19 @@ get_archive_handler, extract, ) -from charmhelpers.core.host import mkdir +from charmhelpers.core.host import mkdir, check_hash class ArchiveUrlFetchHandler(BaseFetchHandler): - """Handler for archives via generic URLs""" + """ + Handler to download archive files from arbitrary URLs. + + Can fetch from http, https, ftp, and file URLs. + + Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. + + Installs the contents of the archive in $CHARM_DIR/fetched/. + """ def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): @@ -24,6 +34,12 @@ def can_handle(self, source): return False def download(self, source, dest): + """ + Download an archive file. + + :param str source: URL pointing to an archive file. + :param str dest: Local path location to download archive file to. + """ # propogate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse.urlparse(source) @@ -48,7 +64,30 @@ def download(self, source, dest): os.unlink(dest) raise e - def install(self, source): + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + tempfile, headers = urlretrieve(url) + check_hash(tempfile, hashsum, validate) + return tempfile + + def install(self, source, dest=None, checksum=None, hash_type='sha1'): + """ + Download and install an archive file, with optional checksum validation. + + The checksum can also be given on the `source` URL's fragment. + For example:: + + handler.install('http://example.com/file.tgz#sha1=deadbeef') + + :param str source: URL pointing to an archive file. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. + :param str checksum: If given, validate the archive file after download. + :param str hash_type: Algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): @@ -60,4 +99,10 @@ def install(self, source): raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - return extract(dld_file) + options = urlparse.parse_qs(url_parts.fragment) + for key, value in options.items(): + if key in hashlib.algorithms: + check_hash(dld_file, value, key) + if checksum: + check_hash(dld_file, checksum, hash_type) + return extract(dld_file, dest) diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py index 8c0af487..e0e850dd 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py @@ -24,10 +24,10 @@ def _add_services(self, this_service, other_services): """Add services. Add services to the deployment where this_service is the local charm - that we're focused on testing and other_services are the other - charms that come from the charm store. + that we're testing and other_services are the other services that + are being used in the amulet tests. """ - name, units = range(2) + name, units, location = range(3) if this_service[name] != os.path.basename(os.getcwd()): s = this_service[name] @@ -37,12 +37,13 @@ def _add_services(self, this_service, other_services): self.d.add(this_service[name], units=this_service[units]) for svc in other_services: - if self.series: - self.d.add(svc[name], - charm='cs:{}/{}'.format(self.series, svc[name]), - units=svc[units]) + if len(svc) > 2: + branch_location = svc[location] + elif self.series: + branch_location = 'cs:{}/{}'.format(self.series, svc[name]), else: - self.d.add(svc[name], units=svc[units]) + branch_location = None + self.d.add(svc[name], charm=branch_location, units=svc[units]) def _add_relations(self, relations): """Add all of the relations for the services.""" @@ -57,7 +58,7 @@ def _configure_services(self, configs): def _deploy(self): """Deploy environment and wait for all hooks to finish executing.""" try: - self.d.setup() + self.d.setup(timeout=900) self.d.sentry.wait(timeout=900) except amulet.helpers.TimeoutError: amulet.raise_status(amulet.FAIL, msg="Deployment timed out") diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 9179eeb1..10d3b506 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,6 @@ +from bzrlib.branch import Branch +import os +import re from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -16,11 +19,41 @@ def __init__(self, series=None, openstack=None, source=None): self.openstack = openstack self.source = source + def _is_dev_branch(self): + """Determine if branch being tested is a dev (i.e. next) branch.""" + branch = Branch.open(os.getcwd()) + parent = branch.get_parent() + pattern = re.compile("^.*/next/$") + if (pattern.match(parent)): + return True + else: + return False + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + If the branch being tested is a dev branch, then determine the + development branch locations for the other services. Otherwise, + the default charm store branches will be used.""" + name = 0 + if self._is_dev_branch(): + updated_services = [] + for svc in other_services: + if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']: + location = 'lp:charms/{}'.format(svc[name]) + else: + temp = 'lp:~openstack-charmers/charms/trusty/{}/next' + location = temp.format(svc[name]) + updated_services.append(svc + (location,)) + other_services = updated_services + return other_services + def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin.""" + """Add services to the deployment and set openstack-origin/source.""" + name = 0 + other_services = self._determine_branch_locations(other_services) super(OpenStackAmuletDeployment, self)._add_services(this_service, other_services) - name = 0 services = other_services services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index bd327bdc..0f312b99 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -187,15 +187,16 @@ def create_cirros_image(self, glance, image_name): f = opener.open("http://download.cirros-cloud.net/version/released") version = f.read().strip() - cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) + cirros_img = "cirros-{}-x86_64-disk.img".format(version) + local_path = os.path.join('tests', cirros_img) - if not os.path.exists(cirros_img): + if not os.path.exists(local_path): cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", version, cirros_img) - opener.retrieve(cirros_url, cirros_img) + opener.retrieve(cirros_url, local_path) f.close() - with open(cirros_img) as f: + with open(local_path) as f: image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) From 1c3c313e226482a98cec3d75dfd415e87b6842b5 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 26 Sep 2014 09:25:57 +0100 Subject: [PATCH 0463/2699] Sync charmhelpers --- ceph-osd/.bzrignore | 1 + ceph-osd/Makefile | 10 +- .../hooks/charmhelpers/contrib/network/ip.py | 102 ++++++ .../contrib/storage/linux/utils.py | 3 + ceph-osd/hooks/charmhelpers/core/hookenv.py | 59 +++- ceph-osd/hooks/charmhelpers/core/host.py | 74 ++++- .../charmhelpers/core/services/__init__.py | 2 + .../hooks/charmhelpers/core/services/base.py | 313 ++++++++++++++++++ .../charmhelpers/core/services/helpers.py | 239 +++++++++++++ .../hooks/charmhelpers/core/templating.py | 51 +++ ceph-osd/hooks/charmhelpers/fetch/__init__.py | 85 ++++- .../hooks/charmhelpers/fetch/archiveurl.py | 53 ++- 12 files changed, 944 insertions(+), 48 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/core/services/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/core/services/base.py create mode 100644 ceph-osd/hooks/charmhelpers/core/services/helpers.py create mode 100644 ceph-osd/hooks/charmhelpers/core/templating.py diff --git a/ceph-osd/.bzrignore b/ceph-osd/.bzrignore index 3a4edf69..221610be 100644 --- a/ceph-osd/.bzrignore +++ b/ceph-osd/.bzrignore @@ -1 +1,2 @@ .project +bin diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index 11aa3028..5c19fc1b 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -1,11 +1,17 @@ #!/usr/bin/make +PYTHON := /usr/bin/env python lint: @flake8 --exclude hooks/charmhelpers hooks @charm proof || true -sync: - @charm-helper-sync -c charm-helpers-sync.yaml +bin/charm_helpers_sync.py: + @mkdir -p bin + @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ + > bin/charm_helpers_sync.py + +sync: bin/charm_helpers_sync.py + $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml publish: lint bzr push lp:charms/ceph-osd diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 0972e91a..b859a097 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -1,3 +1,4 @@ +import glob import sys from functools import partial @@ -154,3 +155,104 @@ def _get_for_address(address, key): get_iface_for_address = partial(_get_for_address, key='iface') get_netmask_for_address = partial(_get_for_address, key='netmask') + + +def format_ipv6_addr(address): + """ + IPv6 needs to be wrapped with [] in url link to parse correctly. + """ + if is_ipv6(address): + address = "[%s]" % address + else: + log("Not an valid ipv6 address: %s" % address, + level=ERROR) + address = None + return address + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IP address for a given interface, if any, or []. + """ + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + if not exc_list: + exc_list = [] + try: + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception('Unknown inet type ' + str(inet_type)) + + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("%s not found " % (iface)) + else: + return [] + else: + ifaces = [iface] + + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) + return addresses + +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') + + +def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IPv6 address for a given interface, if any, or []. + """ + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + remotly_addressable = [] + for address in addresses: + if not address.startswith('fe80'): + remotly_addressable.append(address) + if fatal and not remotly_addressable: + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + return remotly_addressable + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of bridges on the system or [] + """ + b_rgex = vnic_dir + '/*/bridge' + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of nics comprising a given bridge on the system or [] + """ + brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_rgex)] + + +def is_bridge_member(nic): + """ + Check if a given nic is a member of a bridge + """ + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + return False diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index 8d0f6116..1b958712 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -46,5 +46,8 @@ def is_device_mounted(device): :returns: boolean: True if the path represents a mounted device, False if it doesn't. ''' + is_partition = bool(re.search(r".*[0-9]+\b", device)) out = check_output(['mount']) + if is_partition: + return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index c9530433..af8fe2db 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -156,12 +156,15 @@ def hook_name(): class Config(dict): - """A Juju charm config dictionary that can write itself to - disk (as json) and track which values have changed since - the previous hook invocation. + """A dictionary representation of the charm's config.yaml, with some + extra features: - Do not instantiate this object directly - instead call - ``hookenv.config()`` + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. Example usage:: @@ -170,8 +173,8 @@ class Config(dict): >>> config = hookenv.config() >>> config['foo'] 'bar' + >>> # store a new key/value for later use >>> config['mykey'] = 'myval' - >>> config.save() >>> # user runs `juju set mycharm foo=baz` @@ -188,22 +191,34 @@ class Config(dict): >>> # keys/values that we add are preserved across hooks >>> config['mykey'] 'myval' - >>> # don't forget to save at the end of hook! - >>> config.save() """ CONFIG_FILE_NAME = '.juju-persistent-config' def __init__(self, *args, **kw): super(Config, self).__init__(*args, **kw) + self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() + def __getitem__(self, key): + """For regular dict lookups, check the current juju config first, + then the previous (saved) copy. This ensures that user-saved values + will be returned by a dict lookup. + + """ + try: + return dict.__getitem__(self, key) + except KeyError: + return (self._prev_dict or {})[key] + def load_previous(self, path=None): - """Load previous copy of config from disk so that current values - can be compared to previous values. + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. :param path: @@ -218,8 +233,8 @@ def load_previous(self, path=None): self._prev_dict = json.load(f) def changed(self, key): - """Return true if the value for this key has changed since - the last save. + """Return True if the current value for this key is different from + the previous value. """ if self._prev_dict is None: @@ -228,7 +243,7 @@ def changed(self, key): def previous(self, key): """Return previous value for this key, or None if there - is no "previous" value. + is no previous value. """ if self._prev_dict: @@ -238,7 +253,13 @@ def previous(self, key): def save(self): """Save this config to disk. - Preserves items in _prev_dict that do not exist in self. + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. """ if self._prev_dict: @@ -285,8 +306,9 @@ def relation_get(attribute=None, unit=None, rid=None): raise -def relation_set(relation_id=None, relation_settings={}, **kwargs): +def relation_set(relation_id=None, relation_settings=None, **kwargs): """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) @@ -464,9 +486,10 @@ def config_changed(): hooks.execute(sys.argv) """ - def __init__(self): + def __init__(self, config_save=True): super(Hooks, self).__init__() self._hooks = {} + self._config_save = config_save def register(self, name, function): """Register a hook""" @@ -477,6 +500,10 @@ def execute(self, args): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() + if self._config_save: + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index d934f940..d7ce1e4c 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -12,6 +12,8 @@ import string import subprocess import hashlib +import shutil +from contextlib import contextmanager from collections import OrderedDict @@ -52,7 +54,7 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status']) + output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) except subprocess.CalledProcessError: return False else: @@ -62,6 +64,16 @@ def service_running(service): return False +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + return 'unrecognized service' not in e.output + else: + return True + + def adduser(username, password=None, shell='/bin/bash', system_user=False): """Add a user to the system""" try: @@ -197,10 +209,15 @@ def mounts(): return system_mounts -def file_hash(path): - """Generate a md5 hash of the contents of 'path' or None if not found """ +def file_hash(path, hash_type='md5'): + """ + Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ if os.path.exists(path): - h = hashlib.md5() + h = getattr(hashlib, hash_type)() with open(path, 'r') as source: h.update(source.read()) # IGNORE:E1101 - it does have update return h.hexdigest() @@ -208,6 +225,26 @@ def file_hash(path): return None +def check_hash(path, checksum, hash_type='md5'): + """ + Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + pass + + def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing @@ -320,12 +357,29 @@ def cmp_pkgrevno(package, revno, pkgcache=None): ''' import apt_pkg + from charmhelpers.fetch import apt_cache if not pkgcache: - apt_pkg.init() - # Force Apt to build its cache in memory. That way we avoid race - # conditions with other applications building the cache in the same - # place. - apt_pkg.config.set("Dir::Cache::pkgcache", "") - pkgcache = apt_pkg.Cache() + pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@contextmanager +def chdir(d): + cur = os.getcwd() + try: + yield os.chdir(d) + finally: + os.chdir(cur) + + +def chownr(path, owner, group): + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + + for root, dirs, files in os.walk(path): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + os.chown(full, uid, gid) diff --git a/ceph-osd/hooks/charmhelpers/core/services/__init__.py b/ceph-osd/hooks/charmhelpers/core/services/__init__.py new file mode 100644 index 00000000..e8039a84 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/services/__init__.py @@ -0,0 +1,2 @@ +from .base import * +from .helpers import * diff --git a/ceph-osd/hooks/charmhelpers/core/services/base.py b/ceph-osd/hooks/charmhelpers/core/services/base.py new file mode 100644 index 00000000..87ecb130 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/services/base.py @@ -0,0 +1,313 @@ +import os +import re +import json +from collections import Iterable + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = {} + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.provide_data() + self.reconfigure_services() + cfg = hookenv.config() + if cfg.implicit_save: + cfg.save() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + """ + hook_name = hookenv.hook_name() + for service in self.services.values(): + for provider in service.get('provided_data', []): + if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): + data = provider.provide_data() + _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data + if _ready: + hookenv.relation_set(None, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-osd/hooks/charmhelpers/core/services/helpers.py b/ceph-osd/hooks/charmhelpers/core/services/helpers.py new file mode 100644 index 00000000..7067b94b --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/services/helpers.py @@ -0,0 +1,239 @@ +import os +import yaml +from charmhelpers.core import hookenv +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = None + interface = None + required_keys = [] + + def __init__(self, name=None, additional_required_keys=None): + if name is not None: + self.name = name + if additional_required_keys is not None: + self.required_keys.extend(additional_required_keys) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + required_keys = ['host', 'user', 'password', 'database'] + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + required_keys = ['host', 'port'] + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a Jinja2 template, for use as a ready action. + + :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file + """ + def __init__(self, source, target, owner='root', group='root', perms=0444): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + context = {} + for ctx in service.get('required_data', []): + context.update(ctx) + templating.render(self.source, self.target, context, + self.owner, self.group, self.perms) + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/ceph-osd/hooks/charmhelpers/core/templating.py b/ceph-osd/hooks/charmhelpers/core/templating.py new file mode 100644 index 00000000..2c638853 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/templating.py @@ -0,0 +1,51 @@ +import os + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + Note: Using this requires python-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + loader = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = loader.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + host.mkdir(os.path.dirname(target)) + host.write_file(target, content, owner, group, perms) diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 5be512ce..32a673d6 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,5 @@ import importlib +from tempfile import NamedTemporaryFile import time from yaml import safe_load from charmhelpers.core.host import ( @@ -116,14 +117,7 @@ def base_url(self, url): def filter_installed_packages(packages): """Returns a list of packages that require installation""" - import apt_pkg - apt_pkg.init() - - # Tell apt to build an in-memory cache to prevent race conditions (if - # another process is already building the cache). - apt_pkg.config.set("Dir::Cache::pkgcache", "") - - cache = apt_pkg.Cache() + cache = apt_cache() _pkgs = [] for package in packages: try: @@ -136,6 +130,16 @@ def filter_installed_packages(packages): return _pkgs +def apt_cache(in_memory=True): + """Build and return an apt cache""" + import apt_pkg + apt_pkg.init() + if in_memory: + apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") + return apt_pkg.Cache() + + def apt_install(packages, options=None, fatal=False): """Install one or more packages""" if options is None: @@ -201,6 +205,28 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples:: + + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + """ if source is None: log('Source is not present. Skipping') return @@ -225,10 +251,23 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + else: + raise SourceConfigError("Unknown source: {!r}".format(source)) + if key: - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile() as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) + else: + # Note that hkp: is in no way a secure protocol. Using a + # GPG key id is pointless from a security POV unless you + # absolutely trust your network and DNS. + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv', + key]) def configure_sources(update=False, @@ -238,7 +277,8 @@ def configure_sources(update=False, Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. + The frament needs to be included as a string. Sources and their + corresponding keys are of the types supported by add_source(). Example config: install_sources: | @@ -272,22 +312,35 @@ def configure_sources(update=False, apt_update(fatal=True) -def install_remote(source): +def install_remote(source, *args, **kwargs): """ Install a file tree from a remote source The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] - Schemes supported are based on this modules submodules - Options supported are submodule-specific""" + Schemes supported are based on this modules submodules. + Options supported are submodule-specific. + Additional arguments are passed through to the submodule. + + For example:: + + dest = install_remote('http://example.com/archive.tgz', + checksum='deadbeef', + hash_type='sha1') + + This will download `archive.tgz`, validate it using SHA1 and, if + the file is ok, extract it and return the directory in which it + was extracted. If the checksum fails, it will raise + :class:`charmhelpers.core.host.ChecksumError`. + """ # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] installed_to = None for handler in handlers: try: - installed_to = handler.install(source) + installed_to = handler.install(source, *args, **kwargs) except UnhandledSource: pass if not installed_to: diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index 87e7071a..8c045650 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -1,6 +1,8 @@ import os import urllib2 +from urllib import urlretrieve import urlparse +import hashlib from charmhelpers.fetch import ( BaseFetchHandler, @@ -10,11 +12,19 @@ get_archive_handler, extract, ) -from charmhelpers.core.host import mkdir +from charmhelpers.core.host import mkdir, check_hash class ArchiveUrlFetchHandler(BaseFetchHandler): - """Handler for archives via generic URLs""" + """ + Handler to download archive files from arbitrary URLs. + + Can fetch from http, https, ftp, and file URLs. + + Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. + + Installs the contents of the archive in $CHARM_DIR/fetched/. + """ def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): @@ -24,6 +34,12 @@ def can_handle(self, source): return False def download(self, source, dest): + """ + Download an archive file. + + :param str source: URL pointing to an archive file. + :param str dest: Local path location to download archive file to. + """ # propogate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse.urlparse(source) @@ -48,7 +64,30 @@ def download(self, source, dest): os.unlink(dest) raise e - def install(self, source): + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + tempfile, headers = urlretrieve(url) + check_hash(tempfile, hashsum, validate) + return tempfile + + def install(self, source, dest=None, checksum=None, hash_type='sha1'): + """ + Download and install an archive file, with optional checksum validation. + + The checksum can also be given on the `source` URL's fragment. + For example:: + + handler.install('http://example.com/file.tgz#sha1=deadbeef') + + :param str source: URL pointing to an archive file. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. + :param str checksum: If given, validate the archive file after download. + :param str hash_type: Algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): @@ -60,4 +99,10 @@ def install(self, source): raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - return extract(dld_file) + options = urlparse.parse_qs(url_parts.fragment) + for key, value in options.items(): + if key in hashlib.algorithms: + check_hash(dld_file, value, key) + if checksum: + check_hash(dld_file, checksum, hash_type) + return extract(dld_file, dest) From 3e274cc45f1579d30f8ed7de62cec44086342493 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 26 Sep 2014 13:40:57 +0100 Subject: [PATCH 0464/2699] synced charm-helpers --- .../hooks/charmhelpers/contrib/network/ip.py | 68 +++++++++++++++---- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 10 +-- ceph-proxy/hooks/charmhelpers/core/host.py | 12 ++-- .../hooks/charmhelpers/fetch/__init__.py | 3 +- .../hooks/charmhelpers/fetch/archiveurl.py | 13 ++-- .../charmhelpers/contrib/amulet/deployment.py | 19 +++--- .../contrib/openstack/amulet/deployment.py | 37 +++++++++- .../contrib/openstack/amulet/utils.py | 9 +-- 8 files changed, 127 insertions(+), 44 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 37ecbbed..19f654d3 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -1,4 +1,6 @@ import glob +import re +import subprocess import sys from functools import partial @@ -172,7 +174,8 @@ def format_ipv6_addr(address): return address -def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, + fatal=True, exc_list=None): """ Return the assigned IP address for a given interface, if any, or []. """ @@ -212,26 +215,67 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=T if 'addr' in entry and entry['addr'] not in exc_list: addresses.append(entry['addr']) if fatal and not addresses: - raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) + raise Exception("Interface '%s' doesn't have any %s addresses." % + (iface, inet_type)) return addresses get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') -def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None): - """ - Return the assigned IPv6 address for a given interface, if any, or []. +def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None, + dynamic_only=True): + """Get assigned IPv6 address for a given interface. + + Returns list of addresses found. If no address found, returns empty list. + + We currently only support scope global IPv6 addresses i.e. non-temporary + addresses. If no global IPv6 address is found, return the first one found + in the ipv6 address list. """ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', inc_aliases=inc_aliases, fatal=fatal, exc_list=exc_list) - remotly_addressable = [] - for address in addresses: - if not address.startswith('fe80'): - remotly_addressable.append(address) - if fatal and not remotly_addressable: - raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) - return remotly_addressable + + if addresses: + global_addrs = [] + for addr in addresses: + key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") + m = re.match(key_scope_link_local, addr) + if m: + eui_64_mac = m.group(1) + iface = m.group(2) + else: + global_addrs.append(addr) + + if global_addrs: + # Make sure any found global addresses are not temporary + cmd = ['ip', 'addr', 'show', iface] + out = subprocess.check_output(cmd) + if dynamic_only: + key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") + else: + key = re.compile("inet6 (.+)/[0-9]+ scope global.*") + + addrs = [] + for line in out.split('\n'): + line = line.strip() + m = re.match(key, line) + if m and 'temporary' not in line: + # Return the first valid address we find + for addr in global_addrs: + if m.group(1) == addr: + if not dynamic_only or \ + m.group(1).endswith(eui_64_mac): + addrs.append(addr) + + if addrs: + return addrs + + if fatal: + raise Exception("Interface '%s' doesn't have a scope global " + "non-temporary ipv6 address." % iface) + + return [] def get_bridges(vnic_dir='/sys/devices/virtual/net'): diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 324987e6..af8fe2db 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -486,9 +486,10 @@ def config_changed(): hooks.execute(sys.argv) """ - def __init__(self): + def __init__(self, config_save=True): super(Hooks, self).__init__() self._hooks = {} + self._config_save = config_save def register(self, name, function): """Register a hook""" @@ -499,9 +500,10 @@ def execute(self, args): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() - cfg = config() - if cfg.implicit_save: - cfg.save() + if self._config_save: + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 3ac70143..d7ce1e4c 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -68,8 +68,8 @@ def service_available(service_name): """Determine whether a system service is available""" try: subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - return False + except subprocess.CalledProcessError as e: + return 'unrecognized service' not in e.output else: return True @@ -229,12 +229,12 @@ def check_hash(path, checksum, hash_type='md5'): """ Validate a file using a cryptographic checksum. - :param str checksum: Value of the checksum used to validate the file. - :param str hash_type: Hash algorithm used to generate :param:`checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. :raises ChecksumError: If the file fails the checksum + """ actual_checksum = file_hash(path, hash_type) if checksum != actual_checksum: diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 20a20ac6..32a673d6 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -208,7 +208,8 @@ def add_source(source, key=None): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples: + add-apt-repository(1). Examples:: + ppa:charmers/example deb https://stub:key@private.example.com/ubuntu trusty main diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py index d1dcbc33..8c045650 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -74,18 +74,19 @@ def install(self, source, dest=None, checksum=None, hash_type='sha1'): """ Download and install an archive file, with optional checksum validation. - The checksum can also be given on the :param:`source` URL's fragment. + The checksum can also be given on the `source` URL's fragment. For example:: handler.install('http://example.com/file.tgz#sha1=deadbeef') :param str source: URL pointing to an archive file. - :param str dest: Local destination path to install to. If not given, - installs to `$CHARM_DIR/archives/archive_file_name`. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. :param str checksum: If given, validate the archive file after download. - :param str hash_type: Algorithm used to generate :param:`checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. + :param str hash_type: Algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py index 8c0af487..e0e850dd 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py @@ -24,10 +24,10 @@ def _add_services(self, this_service, other_services): """Add services. Add services to the deployment where this_service is the local charm - that we're focused on testing and other_services are the other - charms that come from the charm store. + that we're testing and other_services are the other services that + are being used in the amulet tests. """ - name, units = range(2) + name, units, location = range(3) if this_service[name] != os.path.basename(os.getcwd()): s = this_service[name] @@ -37,12 +37,13 @@ def _add_services(self, this_service, other_services): self.d.add(this_service[name], units=this_service[units]) for svc in other_services: - if self.series: - self.d.add(svc[name], - charm='cs:{}/{}'.format(self.series, svc[name]), - units=svc[units]) + if len(svc) > 2: + branch_location = svc[location] + elif self.series: + branch_location = 'cs:{}/{}'.format(self.series, svc[name]), else: - self.d.add(svc[name], units=svc[units]) + branch_location = None + self.d.add(svc[name], charm=branch_location, units=svc[units]) def _add_relations(self, relations): """Add all of the relations for the services.""" @@ -57,7 +58,7 @@ def _configure_services(self, configs): def _deploy(self): """Deploy environment and wait for all hooks to finish executing.""" try: - self.d.setup() + self.d.setup(timeout=900) self.d.sentry.wait(timeout=900) except amulet.helpers.TimeoutError: amulet.raise_status(amulet.FAIL, msg="Deployment timed out") diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 9179eeb1..10d3b506 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,6 @@ +from bzrlib.branch import Branch +import os +import re from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -16,11 +19,41 @@ def __init__(self, series=None, openstack=None, source=None): self.openstack = openstack self.source = source + def _is_dev_branch(self): + """Determine if branch being tested is a dev (i.e. next) branch.""" + branch = Branch.open(os.getcwd()) + parent = branch.get_parent() + pattern = re.compile("^.*/next/$") + if (pattern.match(parent)): + return True + else: + return False + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + If the branch being tested is a dev branch, then determine the + development branch locations for the other services. Otherwise, + the default charm store branches will be used.""" + name = 0 + if self._is_dev_branch(): + updated_services = [] + for svc in other_services: + if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']: + location = 'lp:charms/{}'.format(svc[name]) + else: + temp = 'lp:~openstack-charmers/charms/trusty/{}/next' + location = temp.format(svc[name]) + updated_services.append(svc + (location,)) + other_services = updated_services + return other_services + def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin.""" + """Add services to the deployment and set openstack-origin/source.""" + name = 0 + other_services = self._determine_branch_locations(other_services) super(OpenStackAmuletDeployment, self)._add_services(this_service, other_services) - name = 0 services = other_services services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index bd327bdc..0f312b99 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -187,15 +187,16 @@ def create_cirros_image(self, glance, image_name): f = opener.open("http://download.cirros-cloud.net/version/released") version = f.read().strip() - cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) + cirros_img = "cirros-{}-x86_64-disk.img".format(version) + local_path = os.path.join('tests', cirros_img) - if not os.path.exists(cirros_img): + if not os.path.exists(local_path): cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", version, cirros_img) - opener.retrieve(cirros_url, cirros_img) + opener.retrieve(cirros_url, local_path) f.close() - with open(cirros_img) as f: + with open(local_path) as f: image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) From ff63397fdbb1dd3af1a4abdd72e642e895e8ea61 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 26 Sep 2014 13:40:57 +0100 Subject: [PATCH 0465/2699] synced charm-helpers --- .../hooks/charmhelpers/contrib/network/ip.py | 68 +++++++++++++++---- ceph-mon/hooks/charmhelpers/core/hookenv.py | 10 +-- ceph-mon/hooks/charmhelpers/core/host.py | 12 ++-- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 3 +- .../hooks/charmhelpers/fetch/archiveurl.py | 13 ++-- .../charmhelpers/contrib/amulet/deployment.py | 19 +++--- .../contrib/openstack/amulet/deployment.py | 37 +++++++++- .../contrib/openstack/amulet/utils.py | 9 +-- 8 files changed, 127 insertions(+), 44 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 37ecbbed..19f654d3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -1,4 +1,6 @@ import glob +import re +import subprocess import sys from functools import partial @@ -172,7 +174,8 @@ def format_ipv6_addr(address): return address -def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, + fatal=True, exc_list=None): """ Return the assigned IP address for a given interface, if any, or []. """ @@ -212,26 +215,67 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=T if 'addr' in entry and entry['addr'] not in exc_list: addresses.append(entry['addr']) if fatal and not addresses: - raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) + raise Exception("Interface '%s' doesn't have any %s addresses." % + (iface, inet_type)) return addresses get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') -def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None): - """ - Return the assigned IPv6 address for a given interface, if any, or []. +def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None, + dynamic_only=True): + """Get assigned IPv6 address for a given interface. + + Returns list of addresses found. If no address found, returns empty list. + + We currently only support scope global IPv6 addresses i.e. non-temporary + addresses. If no global IPv6 address is found, return the first one found + in the ipv6 address list. """ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', inc_aliases=inc_aliases, fatal=fatal, exc_list=exc_list) - remotly_addressable = [] - for address in addresses: - if not address.startswith('fe80'): - remotly_addressable.append(address) - if fatal and not remotly_addressable: - raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) - return remotly_addressable + + if addresses: + global_addrs = [] + for addr in addresses: + key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") + m = re.match(key_scope_link_local, addr) + if m: + eui_64_mac = m.group(1) + iface = m.group(2) + else: + global_addrs.append(addr) + + if global_addrs: + # Make sure any found global addresses are not temporary + cmd = ['ip', 'addr', 'show', iface] + out = subprocess.check_output(cmd) + if dynamic_only: + key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") + else: + key = re.compile("inet6 (.+)/[0-9]+ scope global.*") + + addrs = [] + for line in out.split('\n'): + line = line.strip() + m = re.match(key, line) + if m and 'temporary' not in line: + # Return the first valid address we find + for addr in global_addrs: + if m.group(1) == addr: + if not dynamic_only or \ + m.group(1).endswith(eui_64_mac): + addrs.append(addr) + + if addrs: + return addrs + + if fatal: + raise Exception("Interface '%s' doesn't have a scope global " + "non-temporary ipv6 address." % iface) + + return [] def get_bridges(vnic_dir='/sys/devices/virtual/net'): diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 324987e6..af8fe2db 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -486,9 +486,10 @@ def config_changed(): hooks.execute(sys.argv) """ - def __init__(self): + def __init__(self, config_save=True): super(Hooks, self).__init__() self._hooks = {} + self._config_save = config_save def register(self, name, function): """Register a hook""" @@ -499,9 +500,10 @@ def execute(self, args): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() - cfg = config() - if cfg.implicit_save: - cfg.save() + if self._config_save: + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 3ac70143..d7ce1e4c 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -68,8 +68,8 @@ def service_available(service_name): """Determine whether a system service is available""" try: subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - return False + except subprocess.CalledProcessError as e: + return 'unrecognized service' not in e.output else: return True @@ -229,12 +229,12 @@ def check_hash(path, checksum, hash_type='md5'): """ Validate a file using a cryptographic checksum. - :param str checksum: Value of the checksum used to validate the file. - :param str hash_type: Hash algorithm used to generate :param:`checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. :raises ChecksumError: If the file fails the checksum + """ actual_checksum = file_hash(path, hash_type) if checksum != actual_checksum: diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 20a20ac6..32a673d6 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -208,7 +208,8 @@ def add_source(source, key=None): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples: + add-apt-repository(1). Examples:: + ppa:charmers/example deb https://stub:key@private.example.com/ubuntu trusty main diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index d1dcbc33..8c045650 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -74,18 +74,19 @@ def install(self, source, dest=None, checksum=None, hash_type='sha1'): """ Download and install an archive file, with optional checksum validation. - The checksum can also be given on the :param:`source` URL's fragment. + The checksum can also be given on the `source` URL's fragment. For example:: handler.install('http://example.com/file.tgz#sha1=deadbeef') :param str source: URL pointing to an archive file. - :param str dest: Local destination path to install to. If not given, - installs to `$CHARM_DIR/archives/archive_file_name`. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. :param str checksum: If given, validate the archive file after download. - :param str hash_type: Algorithm used to generate :param:`checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. + :param str hash_type: Algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py index 8c0af487..e0e850dd 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py @@ -24,10 +24,10 @@ def _add_services(self, this_service, other_services): """Add services. Add services to the deployment where this_service is the local charm - that we're focused on testing and other_services are the other - charms that come from the charm store. + that we're testing and other_services are the other services that + are being used in the amulet tests. """ - name, units = range(2) + name, units, location = range(3) if this_service[name] != os.path.basename(os.getcwd()): s = this_service[name] @@ -37,12 +37,13 @@ def _add_services(self, this_service, other_services): self.d.add(this_service[name], units=this_service[units]) for svc in other_services: - if self.series: - self.d.add(svc[name], - charm='cs:{}/{}'.format(self.series, svc[name]), - units=svc[units]) + if len(svc) > 2: + branch_location = svc[location] + elif self.series: + branch_location = 'cs:{}/{}'.format(self.series, svc[name]), else: - self.d.add(svc[name], units=svc[units]) + branch_location = None + self.d.add(svc[name], charm=branch_location, units=svc[units]) def _add_relations(self, relations): """Add all of the relations for the services.""" @@ -57,7 +58,7 @@ def _configure_services(self, configs): def _deploy(self): """Deploy environment and wait for all hooks to finish executing.""" try: - self.d.setup() + self.d.setup(timeout=900) self.d.sentry.wait(timeout=900) except amulet.helpers.TimeoutError: amulet.raise_status(amulet.FAIL, msg="Deployment timed out") diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 9179eeb1..10d3b506 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,6 @@ +from bzrlib.branch import Branch +import os +import re from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -16,11 +19,41 @@ def __init__(self, series=None, openstack=None, source=None): self.openstack = openstack self.source = source + def _is_dev_branch(self): + """Determine if branch being tested is a dev (i.e. next) branch.""" + branch = Branch.open(os.getcwd()) + parent = branch.get_parent() + pattern = re.compile("^.*/next/$") + if (pattern.match(parent)): + return True + else: + return False + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + If the branch being tested is a dev branch, then determine the + development branch locations for the other services. Otherwise, + the default charm store branches will be used.""" + name = 0 + if self._is_dev_branch(): + updated_services = [] + for svc in other_services: + if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']: + location = 'lp:charms/{}'.format(svc[name]) + else: + temp = 'lp:~openstack-charmers/charms/trusty/{}/next' + location = temp.format(svc[name]) + updated_services.append(svc + (location,)) + other_services = updated_services + return other_services + def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin.""" + """Add services to the deployment and set openstack-origin/source.""" + name = 0 + other_services = self._determine_branch_locations(other_services) super(OpenStackAmuletDeployment, self)._add_services(this_service, other_services) - name = 0 services = other_services services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index bd327bdc..0f312b99 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -187,15 +187,16 @@ def create_cirros_image(self, glance, image_name): f = opener.open("http://download.cirros-cloud.net/version/released") version = f.read().strip() - cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) + cirros_img = "cirros-{}-x86_64-disk.img".format(version) + local_path = os.path.join('tests', cirros_img) - if not os.path.exists(cirros_img): + if not os.path.exists(local_path): cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", version, cirros_img) - opener.retrieve(cirros_url, cirros_img) + opener.retrieve(cirros_url, local_path) f.close() - with open(cirros_img) as f: + with open(local_path) as f: image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) From 76a529d06ad78fdf70b0153e4b08de202079d015 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 26 Sep 2014 13:40:58 +0100 Subject: [PATCH 0466/2699] synced charm-helpers --- .../hooks/charmhelpers/contrib/network/ip.py | 68 +++++++++++++++---- ceph-osd/hooks/charmhelpers/core/hookenv.py | 10 +-- ceph-osd/hooks/charmhelpers/core/host.py | 12 ++-- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 3 +- .../hooks/charmhelpers/fetch/archiveurl.py | 13 ++-- 5 files changed, 77 insertions(+), 29 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 37ecbbed..19f654d3 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -1,4 +1,6 @@ import glob +import re +import subprocess import sys from functools import partial @@ -172,7 +174,8 @@ def format_ipv6_addr(address): return address -def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, + fatal=True, exc_list=None): """ Return the assigned IP address for a given interface, if any, or []. """ @@ -212,26 +215,67 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=T if 'addr' in entry and entry['addr'] not in exc_list: addresses.append(entry['addr']) if fatal and not addresses: - raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) + raise Exception("Interface '%s' doesn't have any %s addresses." % + (iface, inet_type)) return addresses get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') -def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None): - """ - Return the assigned IPv6 address for a given interface, if any, or []. +def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None, + dynamic_only=True): + """Get assigned IPv6 address for a given interface. + + Returns list of addresses found. If no address found, returns empty list. + + We currently only support scope global IPv6 addresses i.e. non-temporary + addresses. If no global IPv6 address is found, return the first one found + in the ipv6 address list. """ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', inc_aliases=inc_aliases, fatal=fatal, exc_list=exc_list) - remotly_addressable = [] - for address in addresses: - if not address.startswith('fe80'): - remotly_addressable.append(address) - if fatal and not remotly_addressable: - raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) - return remotly_addressable + + if addresses: + global_addrs = [] + for addr in addresses: + key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") + m = re.match(key_scope_link_local, addr) + if m: + eui_64_mac = m.group(1) + iface = m.group(2) + else: + global_addrs.append(addr) + + if global_addrs: + # Make sure any found global addresses are not temporary + cmd = ['ip', 'addr', 'show', iface] + out = subprocess.check_output(cmd) + if dynamic_only: + key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") + else: + key = re.compile("inet6 (.+)/[0-9]+ scope global.*") + + addrs = [] + for line in out.split('\n'): + line = line.strip() + m = re.match(key, line) + if m and 'temporary' not in line: + # Return the first valid address we find + for addr in global_addrs: + if m.group(1) == addr: + if not dynamic_only or \ + m.group(1).endswith(eui_64_mac): + addrs.append(addr) + + if addrs: + return addrs + + if fatal: + raise Exception("Interface '%s' doesn't have a scope global " + "non-temporary ipv6 address." % iface) + + return [] def get_bridges(vnic_dir='/sys/devices/virtual/net'): diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 324987e6..af8fe2db 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -486,9 +486,10 @@ def config_changed(): hooks.execute(sys.argv) """ - def __init__(self): + def __init__(self, config_save=True): super(Hooks, self).__init__() self._hooks = {} + self._config_save = config_save def register(self, name, function): """Register a hook""" @@ -499,9 +500,10 @@ def execute(self, args): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() - cfg = config() - if cfg.implicit_save: - cfg.save() + if self._config_save: + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 3ac70143..d7ce1e4c 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -68,8 +68,8 @@ def service_available(service_name): """Determine whether a system service is available""" try: subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - return False + except subprocess.CalledProcessError as e: + return 'unrecognized service' not in e.output else: return True @@ -229,12 +229,12 @@ def check_hash(path, checksum, hash_type='md5'): """ Validate a file using a cryptographic checksum. - :param str checksum: Value of the checksum used to validate the file. - :param str hash_type: Hash algorithm used to generate :param:`checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. :raises ChecksumError: If the file fails the checksum + """ actual_checksum = file_hash(path, hash_type) if checksum != actual_checksum: diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 20a20ac6..32a673d6 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -208,7 +208,8 @@ def add_source(source, key=None): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples: + add-apt-repository(1). Examples:: + ppa:charmers/example deb https://stub:key@private.example.com/ubuntu trusty main diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index d1dcbc33..8c045650 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -74,18 +74,19 @@ def install(self, source, dest=None, checksum=None, hash_type='sha1'): """ Download and install an archive file, with optional checksum validation. - The checksum can also be given on the :param:`source` URL's fragment. + The checksum can also be given on the `source` URL's fragment. For example:: handler.install('http://example.com/file.tgz#sha1=deadbeef') :param str source: URL pointing to an archive file. - :param str dest: Local destination path to install to. If not given, - installs to `$CHARM_DIR/archives/archive_file_name`. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. :param str checksum: If given, validate the archive file after download. - :param str hash_type: Algorithm used to generate :param:`checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. + :param str hash_type: Algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') From b5a02f4658929fc947219534ac20bb575bf0ef37 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 26 Sep 2014 20:57:01 +0000 Subject: [PATCH 0467/2699] Sync charm-helpers --- .../charmhelpers/contrib/amulet/deployment.py | 25 ++++---- .../contrib/openstack/amulet/deployment.py | 57 +++++++++---------- 2 files changed, 41 insertions(+), 41 deletions(-) diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py index e0e850dd..d859d367 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py @@ -25,25 +25,30 @@ def _add_services(self, this_service, other_services): Add services to the deployment where this_service is the local charm that we're testing and other_services are the other services that - are being used in the amulet tests. + are being used in the local amulet tests. """ - name, units, location = range(3) - - if this_service[name] != os.path.basename(os.getcwd()): - s = this_service[name] + if this_service['name'] != os.path.basename(os.getcwd()): + s = this_service['name'] msg = "The charm's root directory name needs to be {}".format(s) amulet.raise_status(amulet.FAIL, msg=msg) - self.d.add(this_service[name], units=this_service[units]) + if 'units' not in this_service: + this_service['units'] = 1 + + self.d.add(this_service['name'], units=this_service['units']) for svc in other_services: - if len(svc) > 2: - branch_location = svc[location] + if 'location' in svc: + branch_location = svc['location'] elif self.series: - branch_location = 'cs:{}/{}'.format(self.series, svc[name]), + branch_location = 'cs:{}/{}'.format(self.series, svc['name']), else: branch_location = None - self.d.add(svc[name], charm=branch_location, units=svc[units]) + + if 'units' not in svc: + svc['units'] = 1 + + self.d.add(svc['name'], charm=branch_location, units=svc['units']) def _add_relations(self, relations): """Add all of the relations for the services.""" diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 10d3b506..495ebdb6 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,6 +1,3 @@ -from bzrlib.branch import Branch -import os -import re from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -13,62 +10,60 @@ class OpenStackAmuletDeployment(AmuletDeployment): that is specifically for use by OpenStack charms. """ - def __init__(self, series=None, openstack=None, source=None): + def __init__(self, series=None, openstack=None, source=None, stable=True): """Initialize the deployment environment.""" super(OpenStackAmuletDeployment, self).__init__(series) self.openstack = openstack self.source = source - - def _is_dev_branch(self): - """Determine if branch being tested is a dev (i.e. next) branch.""" - branch = Branch.open(os.getcwd()) - parent = branch.get_parent() - pattern = re.compile("^.*/next/$") - if (pattern.match(parent)): - return True - else: - return False + self.stable = stable + # Note(coreycb): this needs to be changed when new next branches come out. + self.current_next = "trusty" def _determine_branch_locations(self, other_services): """Determine the branch locations for the other services. - If the branch being tested is a dev branch, then determine the - development branch locations for the other services. Otherwise, - the default charm store branches will be used.""" - name = 0 - if self._is_dev_branch(): - updated_services = [] + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] + + if self.stable: for svc in other_services: - if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']: - location = 'lp:charms/{}'.format(svc[name]) + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) + else: + for svc in other_services: + if svc['name'] in base_charms: + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) else: - temp = 'lp:~openstack-charmers/charms/trusty/{}/next' - location = temp.format(svc[name]) - updated_services.append(svc + (location,)) - other_services = updated_services + temp = 'lp:~openstack-charmers/charms/{}/{}/next' + svc['location'] = temp.format(self.current_next, + svc['name']) return other_services def _add_services(self, this_service, other_services): """Add services to the deployment and set openstack-origin/source.""" - name = 0 other_services = self._determine_branch_locations(other_services) + super(OpenStackAmuletDeployment, self)._add_services(this_service, other_services) + services = other_services services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] if self.openstack: for svc in services: - if svc[name] not in use_source: + if svc['name'] not in use_source: config = {'openstack-origin': self.openstack} - self.d.configure(svc[name], config) + self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc[name] in use_source: + if svc['name'] in use_source: config = {'source': self.source} - self.d.configure(svc[name], config) + self.d.configure(svc['name'], config) def _configure_services(self, configs): """Configure all of the services.""" From ba4f50c5861ea8a238a9b63f195ae3ecf4db4368 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 26 Sep 2014 20:57:01 +0000 Subject: [PATCH 0468/2699] Sync charm-helpers --- .../charmhelpers/contrib/amulet/deployment.py | 25 ++++---- .../contrib/openstack/amulet/deployment.py | 57 +++++++++---------- 2 files changed, 41 insertions(+), 41 deletions(-) diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py index e0e850dd..d859d367 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py @@ -25,25 +25,30 @@ def _add_services(self, this_service, other_services): Add services to the deployment where this_service is the local charm that we're testing and other_services are the other services that - are being used in the amulet tests. + are being used in the local amulet tests. """ - name, units, location = range(3) - - if this_service[name] != os.path.basename(os.getcwd()): - s = this_service[name] + if this_service['name'] != os.path.basename(os.getcwd()): + s = this_service['name'] msg = "The charm's root directory name needs to be {}".format(s) amulet.raise_status(amulet.FAIL, msg=msg) - self.d.add(this_service[name], units=this_service[units]) + if 'units' not in this_service: + this_service['units'] = 1 + + self.d.add(this_service['name'], units=this_service['units']) for svc in other_services: - if len(svc) > 2: - branch_location = svc[location] + if 'location' in svc: + branch_location = svc['location'] elif self.series: - branch_location = 'cs:{}/{}'.format(self.series, svc[name]), + branch_location = 'cs:{}/{}'.format(self.series, svc['name']), else: branch_location = None - self.d.add(svc[name], charm=branch_location, units=svc[units]) + + if 'units' not in svc: + svc['units'] = 1 + + self.d.add(svc['name'], charm=branch_location, units=svc['units']) def _add_relations(self, relations): """Add all of the relations for the services.""" diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 10d3b506..495ebdb6 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,6 +1,3 @@ -from bzrlib.branch import Branch -import os -import re from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -13,62 +10,60 @@ class OpenStackAmuletDeployment(AmuletDeployment): that is specifically for use by OpenStack charms. """ - def __init__(self, series=None, openstack=None, source=None): + def __init__(self, series=None, openstack=None, source=None, stable=True): """Initialize the deployment environment.""" super(OpenStackAmuletDeployment, self).__init__(series) self.openstack = openstack self.source = source - - def _is_dev_branch(self): - """Determine if branch being tested is a dev (i.e. next) branch.""" - branch = Branch.open(os.getcwd()) - parent = branch.get_parent() - pattern = re.compile("^.*/next/$") - if (pattern.match(parent)): - return True - else: - return False + self.stable = stable + # Note(coreycb): this needs to be changed when new next branches come out. + self.current_next = "trusty" def _determine_branch_locations(self, other_services): """Determine the branch locations for the other services. - If the branch being tested is a dev branch, then determine the - development branch locations for the other services. Otherwise, - the default charm store branches will be used.""" - name = 0 - if self._is_dev_branch(): - updated_services = [] + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] + + if self.stable: for svc in other_services: - if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']: - location = 'lp:charms/{}'.format(svc[name]) + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) + else: + for svc in other_services: + if svc['name'] in base_charms: + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) else: - temp = 'lp:~openstack-charmers/charms/trusty/{}/next' - location = temp.format(svc[name]) - updated_services.append(svc + (location,)) - other_services = updated_services + temp = 'lp:~openstack-charmers/charms/{}/{}/next' + svc['location'] = temp.format(self.current_next, + svc['name']) return other_services def _add_services(self, this_service, other_services): """Add services to the deployment and set openstack-origin/source.""" - name = 0 other_services = self._determine_branch_locations(other_services) + super(OpenStackAmuletDeployment, self)._add_services(this_service, other_services) + services = other_services services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] if self.openstack: for svc in services: - if svc[name] not in use_source: + if svc['name'] not in use_source: config = {'openstack-origin': self.openstack} - self.d.configure(svc[name], config) + self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc[name] in use_source: + if svc['name'] in use_source: config = {'source': self.source} - self.d.configure(svc[name], config) + self.d.configure(svc['name'], config) def _configure_services(self, configs): """Configure all of the services.""" From 8286062ab17ef4b7b6cdef36e14a1b5a4d389cf0 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Sat, 27 Sep 2014 02:28:51 +0000 Subject: [PATCH 0469/2699] Move charm-helpers-sync.yaml to charm-helpers-hooks.yaml and add charm-helpers-tests.yaml. --- ceph-osd/Makefile | 3 ++- .../{charm-helpers-sync.yaml => charm-helpers-hooks.yaml} | 0 ceph-osd/charm-helpers-tests.yaml | 5 +++++ 3 files changed, 7 insertions(+), 1 deletion(-) rename ceph-osd/{charm-helpers-sync.yaml => charm-helpers-hooks.yaml} (100%) create mode 100644 ceph-osd/charm-helpers-tests.yaml diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index 5c19fc1b..d7a13bf8 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -11,7 +11,8 @@ bin/charm_helpers_sync.py: > bin/charm_helpers_sync.py sync: bin/charm_helpers_sync.py - $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml + $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml + $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml publish: lint bzr push lp:charms/ceph-osd diff --git a/ceph-osd/charm-helpers-sync.yaml b/ceph-osd/charm-helpers-hooks.yaml similarity index 100% rename from ceph-osd/charm-helpers-sync.yaml rename to ceph-osd/charm-helpers-hooks.yaml diff --git a/ceph-osd/charm-helpers-tests.yaml b/ceph-osd/charm-helpers-tests.yaml new file mode 100644 index 00000000..48b12f6f --- /dev/null +++ b/ceph-osd/charm-helpers-tests.yaml @@ -0,0 +1,5 @@ +branch: lp:charm-helpers +destination: tests/charmhelpers +include: + - contrib.amulet + - contrib.openstack.amulet From 1da7d3e2d0fb0c0f764613514a3a97b3f3f25fde Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Sat, 27 Sep 2014 02:57:08 +0000 Subject: [PATCH 0470/2699] Move charm-helpers-sync.yaml to charm-helpers-hooks.yaml and add charm-helpers-tests.yaml. --- ceph-radosgw/Makefile | 3 ++- .../{charm-helpers-sync.yaml => charm-helpers-hooks.yaml} | 0 ceph-radosgw/charm-helpers-tests.yaml | 5 +++++ 3 files changed, 7 insertions(+), 1 deletion(-) rename ceph-radosgw/{charm-helpers-sync.yaml => charm-helpers-hooks.yaml} (100%) create mode 100644 ceph-radosgw/charm-helpers-tests.yaml diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index bb4e7b58..e312fd81 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -11,7 +11,8 @@ bin/charm_helpers_sync.py: > bin/charm_helpers_sync.py sync: bin/charm_helpers_sync.py - @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml publish: lint bzr push lp:charms/ceph-radosgw diff --git a/ceph-radosgw/charm-helpers-sync.yaml b/ceph-radosgw/charm-helpers-hooks.yaml similarity index 100% rename from ceph-radosgw/charm-helpers-sync.yaml rename to ceph-radosgw/charm-helpers-hooks.yaml diff --git a/ceph-radosgw/charm-helpers-tests.yaml b/ceph-radosgw/charm-helpers-tests.yaml new file mode 100644 index 00000000..48b12f6f --- /dev/null +++ b/ceph-radosgw/charm-helpers-tests.yaml @@ -0,0 +1,5 @@ +branch: lp:charm-helpers +destination: tests/charmhelpers +include: + - contrib.amulet + - contrib.openstack.amulet From 849414068b2b6213f79330e3ebf57ed0d7848cd3 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Sat, 27 Sep 2014 17:18:26 +0000 Subject: [PATCH 0471/2699] Sync charm-helpers. --- ceph-osd/tests/charmhelpers/__init__.py | 0 .../tests/charmhelpers/contrib/__init__.py | 0 .../charmhelpers/contrib/amulet/__init__.py | 0 .../charmhelpers/contrib/amulet/deployment.py | 77 +++++ .../charmhelpers/contrib/amulet/utils.py | 176 +++++++++++ .../contrib/openstack/__init__.py | 0 .../contrib/openstack/amulet/__init__.py | 0 .../contrib/openstack/amulet/deployment.py | 90 ++++++ .../contrib/openstack/amulet/utils.py | 276 ++++++++++++++++++ 9 files changed, 619 insertions(+) create mode 100644 ceph-osd/tests/charmhelpers/__init__.py create mode 100644 ceph-osd/tests/charmhelpers/contrib/__init__.py create mode 100644 ceph-osd/tests/charmhelpers/contrib/amulet/__init__.py create mode 100644 ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py create mode 100644 ceph-osd/tests/charmhelpers/contrib/amulet/utils.py create mode 100644 ceph-osd/tests/charmhelpers/contrib/openstack/__init__.py create mode 100644 ceph-osd/tests/charmhelpers/contrib/openstack/amulet/__init__.py create mode 100644 ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py create mode 100644 ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py diff --git a/ceph-osd/tests/charmhelpers/__init__.py b/ceph-osd/tests/charmhelpers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/tests/charmhelpers/contrib/__init__.py b/ceph-osd/tests/charmhelpers/contrib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-osd/tests/charmhelpers/contrib/amulet/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py new file mode 100644 index 00000000..d859d367 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py @@ -0,0 +1,77 @@ +import amulet + +import os + + +class AmuletDeployment(object): + """Amulet deployment. + + This class provides generic Amulet deployment and test runner + methods. + """ + + def __init__(self, series=None): + """Initialize the deployment environment.""" + self.series = None + + if series: + self.series = series + self.d = amulet.Deployment(series=self.series) + else: + self.d = amulet.Deployment() + + def _add_services(self, this_service, other_services): + """Add services. + + Add services to the deployment where this_service is the local charm + that we're testing and other_services are the other services that + are being used in the local amulet tests. + """ + if this_service['name'] != os.path.basename(os.getcwd()): + s = this_service['name'] + msg = "The charm's root directory name needs to be {}".format(s) + amulet.raise_status(amulet.FAIL, msg=msg) + + if 'units' not in this_service: + this_service['units'] = 1 + + self.d.add(this_service['name'], units=this_service['units']) + + for svc in other_services: + if 'location' in svc: + branch_location = svc['location'] + elif self.series: + branch_location = 'cs:{}/{}'.format(self.series, svc['name']), + else: + branch_location = None + + if 'units' not in svc: + svc['units'] = 1 + + self.d.add(svc['name'], charm=branch_location, units=svc['units']) + + def _add_relations(self, relations): + """Add all of the relations for the services.""" + for k, v in relations.iteritems(): + self.d.relate(k, v) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in configs.iteritems(): + self.d.configure(service, config) + + def _deploy(self): + """Deploy environment and wait for all hooks to finish executing.""" + try: + self.d.setup(timeout=900) + self.d.sentry.wait(timeout=900) + except amulet.helpers.TimeoutError: + amulet.raise_status(amulet.FAIL, msg="Deployment timed out") + except Exception: + raise + + def run_tests(self): + """Run all of the methods that are prefixed with 'test_'.""" + for test in dir(self): + if test.startswith('test_'): + getattr(self, test)() diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py new file mode 100644 index 00000000..c843333f --- /dev/null +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -0,0 +1,176 @@ +import ConfigParser +import io +import logging +import re +import sys +import time + + +class AmuletUtils(object): + """Amulet utilities. + + This class provides common utility functions that are used by Amulet + tests. + """ + + def __init__(self, log_level=logging.ERROR): + self.log = self.get_logger(level=log_level) + + def get_logger(self, name="amulet-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def valid_ip(self, ip): + if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): + return True + else: + return False + + def valid_url(self, url): + p = re.compile( + r'^(?:http|ftp)s?://' + r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa + r'localhost|' + r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' + r'(?::\d+)?' + r'(?:/?|[/?]\S+)$', + re.IGNORECASE) + if p.match(url): + return True + else: + return False + + def validate_services(self, commands): + """Validate services. + + Verify the specified services are running on the corresponding + service units. + """ + for k, v in commands.iteritems(): + for cmd in v: + output, code = k.run(cmd) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def _get_config(self, unit, filename): + """Get a ConfigParser object for parsing a unit's config file.""" + file_contents = unit.file_contents(filename) + config = ConfigParser.ConfigParser() + config.readfp(io.StringIO(file_contents)) + return config + + def validate_config_data(self, sentry_unit, config_file, section, + expected): + """Validate config file data. + + Verify that the specified section of the config file contains + the expected option key:value pairs. + """ + config = self._get_config(sentry_unit, config_file) + + if section != 'DEFAULT' and not config.has_section(section): + return "section [{}] does not exist".format(section) + + for k in expected.keys(): + if not config.has_option(section, k): + return "section [{}] is missing option {}".format(section, k) + if config.get(section, k) != expected[k]: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, config.get(section, k), k, expected[k]) + return None + + def _validate_dict_data(self, expected, actual): + """Validate dictionary data. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluate a variable and returns a + bool. + """ + for k, v in expected.iteritems(): + if k in actual: + if (isinstance(v, basestring) or + isinstance(v, bool) or + isinstance(v, (int, long))): + if v != actual[k]: + return "{}:{}".format(k, actual[k]) + elif not v(actual[k]): + return "{}:{}".format(k, actual[k]) + else: + return "key '{}' does not exist".format(k) + return None + + def validate_relation_data(self, sentry_unit, relation, expected): + """Validate actual relation data based on expected relation data.""" + actual = sentry_unit.relation(relation[0], relation[1]) + self.log.debug('actual: {}'.format(repr(actual))) + return self._validate_dict_data(expected, actual) + + def _validate_list_data(self, expected, actual): + """Compare expected list vs actual list data.""" + for e in expected: + if e not in actual: + return "expected item {} not found in actual list".format(e) + return None + + def not_null(self, string): + if string is not None: + return True + else: + return False + + def _get_file_mtime(self, sentry_unit, filename): + """Get last modification time of file.""" + return sentry_unit.file_stat(filename)['mtime'] + + def _get_dir_mtime(self, sentry_unit, directory): + """Get last modification time of directory.""" + return sentry_unit.directory_stat(directory)['mtime'] + + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): + """Get process' start time. + + Determine start time of the process based on the last modification + time of the /proc/pid directory. If pgrep_full is True, the process + name is matched against the full command line. + """ + if pgrep_full: + cmd = 'pgrep -o -f {}'.format(service) + else: + cmd = 'pgrep -o {}'.format(service) + proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) + + def service_restarted(self, sentry_unit, service, filename, + pgrep_full=False, sleep_time=20): + """Check if service was restarted. + + Compare a service's start time vs a file's last modification time + (such as a config file for that service) to determine if the service + has been restarted. + """ + time.sleep(sleep_time) + if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= + self._get_file_mtime(sentry_unit, filename)): + return True + else: + return False + + def relation_error(self, name, data): + return 'unexpected relation data in {} - {}'.format(name, data) + + def endpoint_error(self, name, data): + return 'unexpected endpoint data in {} - {}'.format(name, data) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-osd/tests/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 00000000..1a882fdb --- /dev/null +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,90 @@ +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None, stable=True): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.openstack = openstack + self.source = source + self.stable = stable + # Note(coreycb): this needs to be changed when new next branches come out. + self.current_next = "trusty" + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] + + if self.stable: + for svc in other_services: + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) + else: + for svc in other_services: + if svc['name'] in base_charms: + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) + else: + temp = 'lp:~openstack-charmers/charms/{}/{}/next' + svc['location'] = temp.format(self.current_next, + svc['name']) + return other_services + + def _add_services(self, this_service, other_services): + """Add services to the deployment and set openstack-origin/source.""" + other_services = self._determine_branch_locations(other_services) + + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + + services = other_services + services.append(this_service) + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw'] + + if self.openstack: + for svc in services: + if svc['name'] not in use_source: + config = {'openstack-origin': self.openstack} + self.d.configure(svc['name'], config) + + if self.source: + for svc in services: + if svc['name'] in use_source: + config = {'source': self.source} + self.d.configure(svc['name'], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in configs.iteritems(): + self.d.configure(service, config) + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + (self.precise_essex, self.precise_folsom, self.precise_grizzly, + self.precise_havana, self.precise_icehouse, + self.trusty_icehouse) = range(6) + releases = { + ('precise', None): self.precise_essex, + ('precise', 'cloud:precise-folsom'): self.precise_folsom, + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, + ('precise', 'cloud:precise-havana'): self.precise_havana, + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, + ('trusty', None): self.trusty_icehouse} + return releases[(self.series, self.openstack)] diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 00000000..0f312b99 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,276 @@ +import logging +import os +import time +import urllib + +import glanceclient.v1.client as glance_client +import keystoneclient.v2_0 as keystone_client +import novaclient.v1_1.client as nova_client + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in expected.iteritems(): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'tenantId': act.tenantId, + 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + return tenant in [t.name for t in keystone.tenants.list()] + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant): + """Authenticates admin user with the keystone admin endpoint.""" + unit = keystone_sentry + service_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + ep = keystone.service_catalog.url_for(service_type='image', + endpoint_type='adminURL') + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return nova_client.Client(username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance.""" + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open("http://download.cirros-cloud.net/version/released") + version = f.read().strip() + cirros_img = "cirros-{}-x86_64-disk.img".format(version) + local_path = os.path.join('tests', cirros_img) + + if not os.path.exists(local_path): + cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + version, cirros_img) + opener.retrieve(cirros_url, local_path) + f.close() + + with open(local_path) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + count = 1 + status = image.status + while status != 'active' and count < 10: + time.sleep(3) + image = glance.images.get(image.id) + status = image.status + self.log.debug('image status: {}'.format(status)) + count += 1 + + if status != 'active': + self.log.error('image creation timed out') + return None + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + num_before = len(list(glance.images.list())) + glance.images.delete(image) + + count = 1 + num_after = len(list(glance.images.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(glance.images.list())) + self.log.debug('number of images: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('image deletion timed out') + return False + + return True + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + image = nova.images.find(name=image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + num_before = len(list(nova.servers.list())) + nova.servers.delete(instance) + + count = 1 + num_after = len(list(nova.servers.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(nova.servers.list())) + self.log.debug('number of instances: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('instance deletion timed out') + return False + + return True From ccb04231f89d61d03ec93eaf6d0c2a3653e03d33 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Sat, 27 Sep 2014 17:18:59 +0000 Subject: [PATCH 0472/2699] Remove leading whitespace from templates/ceph.conf (ConfigParser can't parse) --- ceph-osd/templates/ceph.conf | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index def993c3..ed6b8205 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -1,34 +1,34 @@ [global] {% if old_auth %} - auth supported = {{ auth_supported }} +auth supported = {{ auth_supported }} {% else %} - auth cluster required = {{ auth_supported }} - auth service required = {{ auth_supported }} - auth client required = {{ auth_supported }} +auth cluster required = {{ auth_supported }} +auth service required = {{ auth_supported }} +auth client required = {{ auth_supported }} {% endif %} - keyring = /etc/ceph/$cluster.$name.keyring - mon host = {{ mon_hosts }} - fsid = {{ fsid }} +keyring = /etc/ceph/$cluster.$name.keyring +mon host = {{ mon_hosts }} +fsid = {{ fsid }} - log to syslog = {{ use_syslog }} - err to syslog = {{ use_syslog }} - clog to syslog = {{ use_syslog }} +log to syslog = {{ use_syslog }} +err to syslog = {{ use_syslog }} +clog to syslog = {{ use_syslog }} {%- if ceph_public_network is string %} - public network = {{ ceph_public_network }} +public network = {{ ceph_public_network }} {%- endif %} {%- if ceph_cluster_network is string %} - cluster network = {{ ceph_cluster_network }} +cluster network = {{ ceph_cluster_network }} {%- endif %} [mon] - keyring = /var/lib/ceph/mon/$cluster-$id/keyring +keyring = /var/lib/ceph/mon/$cluster-$id/keyring [mds] - keyring = /var/lib/ceph/mds/$cluster-$id/keyring +keyring = /var/lib/ceph/mds/$cluster-$id/keyring [osd] - keyring = /var/lib/ceph/osd/$cluster-$id/keyring - osd journal size = {{ osd_journal_size }} - filestore xattr use omap = true +keyring = /var/lib/ceph/osd/$cluster-$id/keyring +osd journal size = {{ osd_journal_size }} +filestore xattr use omap = true From 2774b9f4a5bad0d862c1803e9df99ab53c8ea94c Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Sat, 27 Sep 2014 17:33:59 +0000 Subject: [PATCH 0473/2699] Sync charm-helpers. --- .../hooks/charmhelpers/core/hookenv.py | 10 +- ceph-radosgw/hooks/charmhelpers/core/host.py | 35 ++- .../charmhelpers/core/services/helpers.py | 124 +++++++- .../hooks/charmhelpers/fetch/__init__.py | 24 +- .../hooks/charmhelpers/fetch/archiveurl.py | 87 +++--- ceph-radosgw/tests/charmhelpers/__init__.py | 0 .../tests/charmhelpers/contrib/__init__.py | 0 .../charmhelpers/contrib/amulet/__init__.py | 0 .../charmhelpers/contrib/amulet/deployment.py | 77 +++++ .../charmhelpers/contrib/amulet/utils.py | 176 +++++++++++ .../contrib/openstack/__init__.py | 0 .../contrib/openstack/amulet/__init__.py | 0 .../contrib/openstack/amulet/deployment.py | 90 ++++++ .../contrib/openstack/amulet/utils.py | 276 ++++++++++++++++++ 14 files changed, 839 insertions(+), 60 deletions(-) create mode 100644 ceph-radosgw/tests/charmhelpers/__init__.py create mode 100644 ceph-radosgw/tests/charmhelpers/contrib/__init__.py create mode 100644 ceph-radosgw/tests/charmhelpers/contrib/amulet/__init__.py create mode 100644 ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py create mode 100644 ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py create mode 100644 ceph-radosgw/tests/charmhelpers/contrib/openstack/__init__.py create mode 100644 ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/__init__.py create mode 100644 ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py create mode 100644 ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 324987e6..af8fe2db 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -486,9 +486,10 @@ def config_changed(): hooks.execute(sys.argv) """ - def __init__(self): + def __init__(self, config_save=True): super(Hooks, self).__init__() self._hooks = {} + self._config_save = config_save def register(self, name, function): """Register a hook""" @@ -499,9 +500,10 @@ def execute(self, args): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() - cfg = config() - if cfg.implicit_save: - cfg.save() + if self._config_save: + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index b85b0280..d7ce1e4c 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -68,8 +68,8 @@ def service_available(service_name): """Determine whether a system service is available""" try: subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - return False + except subprocess.CalledProcessError as e: + return 'unrecognized service' not in e.output else: return True @@ -209,10 +209,15 @@ def mounts(): return system_mounts -def file_hash(path): - """Generate a md5 hash of the contents of 'path' or None if not found """ +def file_hash(path, hash_type='md5'): + """ + Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ if os.path.exists(path): - h = hashlib.md5() + h = getattr(hashlib, hash_type)() with open(path, 'r') as source: h.update(source.read()) # IGNORE:E1101 - it does have update return h.hexdigest() @@ -220,6 +225,26 @@ def file_hash(path): return None +def check_hash(path, checksum, hash_type='md5'): + """ + Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + pass + + def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py index 4b90589b..7067b94b 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py @@ -1,3 +1,5 @@ +import os +import yaml from charmhelpers.core import hookenv from charmhelpers.core import templating @@ -19,15 +21,21 @@ class RelationContext(dict): the `name` attribute that are complete will used to populate the dictionary values (see `get_data`, below). - The generated context will be namespaced under the interface type, to prevent - potential naming conflicts. + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` """ name = None interface = None required_keys = [] - def __init__(self, *args, **kwargs): - super(RelationContext, self).__init__(*args, **kwargs) + def __init__(self, name=None, additional_required_keys=None): + if name is not None: + self.name = name + if additional_required_keys is not None: + self.required_keys.extend(additional_required_keys) self.get_data() def __bool__(self): @@ -101,9 +109,115 @@ def provide_data(self): return {} +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + required_keys = ['host', 'user', 'password', 'database'] + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + required_keys = ['host', 'port'] + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + class TemplateCallback(ManagerCallback): """ - Callback class that will render a template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready action. + + :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file """ def __init__(self, source, target, owner='root', group='root', perms=0444): self.source = source diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 8e9d3804..32a673d6 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -208,7 +208,8 @@ def add_source(source, key=None): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples: + add-apt-repository(1). Examples:: + ppa:charmers/example deb https://stub:key@private.example.com/ubuntu trusty main @@ -311,22 +312,35 @@ def configure_sources(update=False, apt_update(fatal=True) -def install_remote(source): +def install_remote(source, *args, **kwargs): """ Install a file tree from a remote source The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] - Schemes supported are based on this modules submodules - Options supported are submodule-specific""" + Schemes supported are based on this modules submodules. + Options supported are submodule-specific. + Additional arguments are passed through to the submodule. + + For example:: + + dest = install_remote('http://example.com/archive.tgz', + checksum='deadbeef', + hash_type='sha1') + + This will download `archive.tgz`, validate it using SHA1 and, if + the file is ok, extract it and return the directory in which it + was extracted. If the checksum fails, it will raise + :class:`charmhelpers.core.host.ChecksumError`. + """ # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] installed_to = None for handler in handlers: try: - installed_to = handler.install(source) + installed_to = handler.install(source, *args, **kwargs) except UnhandledSource: pass if not installed_to: diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py index 1b11fa03..8c045650 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py @@ -12,21 +12,19 @@ get_archive_handler, extract, ) -from charmhelpers.core.host import mkdir +from charmhelpers.core.host import mkdir, check_hash -""" -This class is a plugin for charmhelpers.fetch.install_remote. -It grabs, validates and installs remote archives fetched over "http", "https", "ftp" or "file" protocols. The contents of the archive are installed in $CHARM_DIR/fetched/. +class ArchiveUrlFetchHandler(BaseFetchHandler): + """ + Handler to download archive files from arbitrary URLs. -Example usage: -install_remote("https://example.com/some/archive.tar.gz") -# Installs the contents of archive.tar.gz in $CHARM_DIR/fetched/. + Can fetch from http, https, ftp, and file URLs. -See charmhelpers.fetch.archiveurl.get_archivehandler for supported archive types. -""" -class ArchiveUrlFetchHandler(BaseFetchHandler): - """Handler for archives via generic URLs""" + Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. + + Installs the contents of the archive in $CHARM_DIR/fetched/. + """ def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): @@ -36,6 +34,12 @@ def can_handle(self, source): return False def download(self, source, dest): + """ + Download an archive file. + + :param str source: URL pointing to an archive file. + :param str dest: Local path location to download archive file to. + """ # propogate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse.urlparse(source) @@ -60,7 +64,30 @@ def download(self, source, dest): os.unlink(dest) raise e - def install(self, source): + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + tempfile, headers = urlretrieve(url) + check_hash(tempfile, hashsum, validate) + return tempfile + + def install(self, source, dest=None, checksum=None, hash_type='sha1'): + """ + Download and install an archive file, with optional checksum validation. + + The checksum can also be given on the `source` URL's fragment. + For example:: + + handler.install('http://example.com/file.tgz#sha1=deadbeef') + + :param str source: URL pointing to an archive file. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. + :param str checksum: If given, validate the archive file after download. + :param str hash_type: Algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): @@ -72,32 +99,10 @@ def install(self, source): raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - return extract(dld_file) - - # Mandatory file validation via Sha1 or MD5 hashing. - def download_and_validate(self, url, hashsum, validate="sha1"): - if validate == 'sha1' and len(hashsum) != 40: - raise ValueError("HashSum must be = 40 characters when using sha1" - " validation") - if validate == 'md5' and len(hashsum) != 32: - raise ValueError("HashSum must be = 32 characters when using md5" - " validation") - tempfile, headers = urlretrieve(url) - self.validate_file(tempfile, hashsum, validate) - return tempfile - - # Predicate method that returns status of hash matching expected hash. - def validate_file(self, source, hashsum, vmethod='sha1'): - if vmethod != 'sha1' and vmethod != 'md5': - raise ValueError("Validation Method not supported") - - if vmethod == 'md5': - m = hashlib.md5() - if vmethod == 'sha1': - m = hashlib.sha1() - with open(source) as f: - for line in f: - m.update(line) - if hashsum != m.hexdigest(): - msg = "Hash Mismatch on {} expected {} got {}" - raise ValueError(msg.format(source, hashsum, m.hexdigest())) + options = urlparse.parse_qs(url_parts.fragment) + for key, value in options.items(): + if key in hashlib.algorithms: + check_hash(dld_file, value, key) + if checksum: + check_hash(dld_file, checksum, hash_type) + return extract(dld_file, dest) diff --git a/ceph-radosgw/tests/charmhelpers/__init__.py b/ceph-radosgw/tests/charmhelpers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/tests/charmhelpers/contrib/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py new file mode 100644 index 00000000..d859d367 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py @@ -0,0 +1,77 @@ +import amulet + +import os + + +class AmuletDeployment(object): + """Amulet deployment. + + This class provides generic Amulet deployment and test runner + methods. + """ + + def __init__(self, series=None): + """Initialize the deployment environment.""" + self.series = None + + if series: + self.series = series + self.d = amulet.Deployment(series=self.series) + else: + self.d = amulet.Deployment() + + def _add_services(self, this_service, other_services): + """Add services. + + Add services to the deployment where this_service is the local charm + that we're testing and other_services are the other services that + are being used in the local amulet tests. + """ + if this_service['name'] != os.path.basename(os.getcwd()): + s = this_service['name'] + msg = "The charm's root directory name needs to be {}".format(s) + amulet.raise_status(amulet.FAIL, msg=msg) + + if 'units' not in this_service: + this_service['units'] = 1 + + self.d.add(this_service['name'], units=this_service['units']) + + for svc in other_services: + if 'location' in svc: + branch_location = svc['location'] + elif self.series: + branch_location = 'cs:{}/{}'.format(self.series, svc['name']), + else: + branch_location = None + + if 'units' not in svc: + svc['units'] = 1 + + self.d.add(svc['name'], charm=branch_location, units=svc['units']) + + def _add_relations(self, relations): + """Add all of the relations for the services.""" + for k, v in relations.iteritems(): + self.d.relate(k, v) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in configs.iteritems(): + self.d.configure(service, config) + + def _deploy(self): + """Deploy environment and wait for all hooks to finish executing.""" + try: + self.d.setup(timeout=900) + self.d.sentry.wait(timeout=900) + except amulet.helpers.TimeoutError: + amulet.raise_status(amulet.FAIL, msg="Deployment timed out") + except Exception: + raise + + def run_tests(self): + """Run all of the methods that are prefixed with 'test_'.""" + for test in dir(self): + if test.startswith('test_'): + getattr(self, test)() diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py new file mode 100644 index 00000000..c843333f --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -0,0 +1,176 @@ +import ConfigParser +import io +import logging +import re +import sys +import time + + +class AmuletUtils(object): + """Amulet utilities. + + This class provides common utility functions that are used by Amulet + tests. + """ + + def __init__(self, log_level=logging.ERROR): + self.log = self.get_logger(level=log_level) + + def get_logger(self, name="amulet-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def valid_ip(self, ip): + if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): + return True + else: + return False + + def valid_url(self, url): + p = re.compile( + r'^(?:http|ftp)s?://' + r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa + r'localhost|' + r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' + r'(?::\d+)?' + r'(?:/?|[/?]\S+)$', + re.IGNORECASE) + if p.match(url): + return True + else: + return False + + def validate_services(self, commands): + """Validate services. + + Verify the specified services are running on the corresponding + service units. + """ + for k, v in commands.iteritems(): + for cmd in v: + output, code = k.run(cmd) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def _get_config(self, unit, filename): + """Get a ConfigParser object for parsing a unit's config file.""" + file_contents = unit.file_contents(filename) + config = ConfigParser.ConfigParser() + config.readfp(io.StringIO(file_contents)) + return config + + def validate_config_data(self, sentry_unit, config_file, section, + expected): + """Validate config file data. + + Verify that the specified section of the config file contains + the expected option key:value pairs. + """ + config = self._get_config(sentry_unit, config_file) + + if section != 'DEFAULT' and not config.has_section(section): + return "section [{}] does not exist".format(section) + + for k in expected.keys(): + if not config.has_option(section, k): + return "section [{}] is missing option {}".format(section, k) + if config.get(section, k) != expected[k]: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, config.get(section, k), k, expected[k]) + return None + + def _validate_dict_data(self, expected, actual): + """Validate dictionary data. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluate a variable and returns a + bool. + """ + for k, v in expected.iteritems(): + if k in actual: + if (isinstance(v, basestring) or + isinstance(v, bool) or + isinstance(v, (int, long))): + if v != actual[k]: + return "{}:{}".format(k, actual[k]) + elif not v(actual[k]): + return "{}:{}".format(k, actual[k]) + else: + return "key '{}' does not exist".format(k) + return None + + def validate_relation_data(self, sentry_unit, relation, expected): + """Validate actual relation data based on expected relation data.""" + actual = sentry_unit.relation(relation[0], relation[1]) + self.log.debug('actual: {}'.format(repr(actual))) + return self._validate_dict_data(expected, actual) + + def _validate_list_data(self, expected, actual): + """Compare expected list vs actual list data.""" + for e in expected: + if e not in actual: + return "expected item {} not found in actual list".format(e) + return None + + def not_null(self, string): + if string is not None: + return True + else: + return False + + def _get_file_mtime(self, sentry_unit, filename): + """Get last modification time of file.""" + return sentry_unit.file_stat(filename)['mtime'] + + def _get_dir_mtime(self, sentry_unit, directory): + """Get last modification time of directory.""" + return sentry_unit.directory_stat(directory)['mtime'] + + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): + """Get process' start time. + + Determine start time of the process based on the last modification + time of the /proc/pid directory. If pgrep_full is True, the process + name is matched against the full command line. + """ + if pgrep_full: + cmd = 'pgrep -o -f {}'.format(service) + else: + cmd = 'pgrep -o {}'.format(service) + proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) + + def service_restarted(self, sentry_unit, service, filename, + pgrep_full=False, sleep_time=20): + """Check if service was restarted. + + Compare a service's start time vs a file's last modification time + (such as a config file for that service) to determine if the service + has been restarted. + """ + time.sleep(sleep_time) + if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= + self._get_file_mtime(sentry_unit, filename)): + return True + else: + return False + + def relation_error(self, name, data): + return 'unexpected relation data in {} - {}'.format(name, data) + + def endpoint_error(self, name, data): + return 'unexpected endpoint data in {} - {}'.format(name, data) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 00000000..1a882fdb --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,90 @@ +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None, stable=True): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.openstack = openstack + self.source = source + self.stable = stable + # Note(coreycb): this needs to be changed when new next branches come out. + self.current_next = "trusty" + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] + + if self.stable: + for svc in other_services: + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) + else: + for svc in other_services: + if svc['name'] in base_charms: + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) + else: + temp = 'lp:~openstack-charmers/charms/{}/{}/next' + svc['location'] = temp.format(self.current_next, + svc['name']) + return other_services + + def _add_services(self, this_service, other_services): + """Add services to the deployment and set openstack-origin/source.""" + other_services = self._determine_branch_locations(other_services) + + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + + services = other_services + services.append(this_service) + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw'] + + if self.openstack: + for svc in services: + if svc['name'] not in use_source: + config = {'openstack-origin': self.openstack} + self.d.configure(svc['name'], config) + + if self.source: + for svc in services: + if svc['name'] in use_source: + config = {'source': self.source} + self.d.configure(svc['name'], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in configs.iteritems(): + self.d.configure(service, config) + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + (self.precise_essex, self.precise_folsom, self.precise_grizzly, + self.precise_havana, self.precise_icehouse, + self.trusty_icehouse) = range(6) + releases = { + ('precise', None): self.precise_essex, + ('precise', 'cloud:precise-folsom'): self.precise_folsom, + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, + ('precise', 'cloud:precise-havana'): self.precise_havana, + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, + ('trusty', None): self.trusty_icehouse} + return releases[(self.series, self.openstack)] diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 00000000..0f312b99 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,276 @@ +import logging +import os +import time +import urllib + +import glanceclient.v1.client as glance_client +import keystoneclient.v2_0 as keystone_client +import novaclient.v1_1.client as nova_client + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in expected.iteritems(): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'tenantId': act.tenantId, + 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + return tenant in [t.name for t in keystone.tenants.list()] + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant): + """Authenticates admin user with the keystone admin endpoint.""" + unit = keystone_sentry + service_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + ep = keystone.service_catalog.url_for(service_type='image', + endpoint_type='adminURL') + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return nova_client.Client(username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance.""" + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open("http://download.cirros-cloud.net/version/released") + version = f.read().strip() + cirros_img = "cirros-{}-x86_64-disk.img".format(version) + local_path = os.path.join('tests', cirros_img) + + if not os.path.exists(local_path): + cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + version, cirros_img) + opener.retrieve(cirros_url, local_path) + f.close() + + with open(local_path) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + count = 1 + status = image.status + while status != 'active' and count < 10: + time.sleep(3) + image = glance.images.get(image.id) + status = image.status + self.log.debug('image status: {}'.format(status)) + count += 1 + + if status != 'active': + self.log.error('image creation timed out') + return None + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + num_before = len(list(glance.images.list())) + glance.images.delete(image) + + count = 1 + num_after = len(list(glance.images.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(glance.images.list())) + self.log.debug('number of images: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('image deletion timed out') + return False + + return True + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + image = nova.images.find(name=image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + num_before = len(list(nova.servers.list())) + nova.servers.delete(instance) + + count = 1 + num_after = len(list(nova.servers.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(nova.servers.list())) + self.log.debug('number of instances: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('instance deletion timed out') + return False + + return True From 4148b62d9aaef8f179f2054f9e068aa8bfa0005e Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Sat, 27 Sep 2014 18:15:47 +0000 Subject: [PATCH 0474/2699] Amulet test fixes: * Makefile: Only run precise-icehouse and trusty-icehouse tests by default and increase test timeout * t/00-setup: Add more required dependencies * t/README: Mention charm-tools dependency * t/basic_deployment.py: - Specify unstable charm deployment - Use dicts in add_services --- ceph-proxy/Makefile | 3 ++- ceph-proxy/tests/00-setup | 6 ++++-- ceph-proxy/tests/README | 6 ++++++ ceph-proxy/tests/basic_deployment.py | 19 ++++++++++--------- 4 files changed, 22 insertions(+), 12 deletions(-) diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index b57c4e08..70e95fbb 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -10,7 +10,8 @@ test: # coreycb note: The -v should only be temporary until Amulet sends # raise_status() messages to stderr: # https://bugs.launchpad.net/amulet/+bug/1320357 - @juju test -v -p AMULET_HTTP_PROXY + @juju test -v -p AMULET_HTTP_PROXY --timeout 900 \ + 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse bin/charm_helpers_sync.py: @mkdir -p bin diff --git a/ceph-proxy/tests/00-setup b/ceph-proxy/tests/00-setup index 62f40029..1243ec43 100755 --- a/ceph-proxy/tests/00-setup +++ b/ceph-proxy/tests/00-setup @@ -4,5 +4,7 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes -sudo apt-get install --yes python-amulet -sudo apt-get install --yes python-keystoneclient +sudo apt-get install --yes python-amulet \ + python-keystoneclient \ + python-glanceclient \ + python-novaclient diff --git a/ceph-proxy/tests/README b/ceph-proxy/tests/README index 6eb04415..8072a8b0 100644 --- a/ceph-proxy/tests/README +++ b/ceph-proxy/tests/README @@ -1,6 +1,12 @@ This directory provides Amulet tests that focus on verification of ceph deployments. +In order to run tests, you'll need charm-tools installed (in addition to +juju, of course): + sudo add-apt-repository ppa:juju/stable + sudo apt-get update + sudo apt-get install charm-tools + If you use a web proxy server to access the web, you'll need to set the AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 8529a10e..86548963 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -17,9 +17,9 @@ class CephBasicDeployment(OpenStackAmuletDeployment): """Amulet tests on a basic ceph deployment.""" - def __init__(self, series=None, openstack=None, source=None): + def __init__(self, series=None, openstack=None, source=None, stable=False): """Deploy the entire test environment.""" - super(CephBasicDeployment, self).__init__(series, openstack, source) + super(CephBasicDeployment, self).__init__(series, openstack, source, stable) self._add_services() self._add_relations() self._configure_services() @@ -29,14 +29,15 @@ def __init__(self, series=None, openstack=None, source=None): def _add_services(self): """Add services - Add the services that we're testing, including the number of units, - where ceph is local, and mysql and cinder are from the charm - store. + Add the services that we're testing, where ceph is local, + and the rest of the service are from lp branches that are + compatible with the local charm (e.g. stable or next). """ - this_service = ('ceph', 3) - other_services = [('mysql', 1), ('keystone', 1), - ('rabbitmq-server', 1), ('nova-compute', 1), - ('glance', 1), ('cinder', 1)] + this_service = {'name': 'ceph', 'units': 3} + other_services = [{'name': 'mysql'}, {'name': 'keystone'}, + {'name': 'rabbitmq-server'}, + {'name': 'nova-compute'}, + {'name': 'glance'}, {'name': 'cinder'}] super(CephBasicDeployment, self)._add_services(this_service, other_services) From f32e9bfd8aa815a23802dab6fd580414533716a4 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Sat, 27 Sep 2014 18:15:47 +0000 Subject: [PATCH 0475/2699] Amulet test fixes: * Makefile: Only run precise-icehouse and trusty-icehouse tests by default and increase test timeout * t/00-setup: Add more required dependencies * t/README: Mention charm-tools dependency * t/basic_deployment.py: - Specify unstable charm deployment - Use dicts in add_services --- ceph-mon/Makefile | 3 ++- ceph-mon/tests/00-setup | 6 ++++-- ceph-mon/tests/README | 6 ++++++ ceph-mon/tests/basic_deployment.py | 19 ++++++++++--------- 4 files changed, 22 insertions(+), 12 deletions(-) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index b57c4e08..70e95fbb 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -10,7 +10,8 @@ test: # coreycb note: The -v should only be temporary until Amulet sends # raise_status() messages to stderr: # https://bugs.launchpad.net/amulet/+bug/1320357 - @juju test -v -p AMULET_HTTP_PROXY + @juju test -v -p AMULET_HTTP_PROXY --timeout 900 \ + 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse bin/charm_helpers_sync.py: @mkdir -p bin diff --git a/ceph-mon/tests/00-setup b/ceph-mon/tests/00-setup index 62f40029..1243ec43 100755 --- a/ceph-mon/tests/00-setup +++ b/ceph-mon/tests/00-setup @@ -4,5 +4,7 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes -sudo apt-get install --yes python-amulet -sudo apt-get install --yes python-keystoneclient +sudo apt-get install --yes python-amulet \ + python-keystoneclient \ + python-glanceclient \ + python-novaclient diff --git a/ceph-mon/tests/README b/ceph-mon/tests/README index 6eb04415..8072a8b0 100644 --- a/ceph-mon/tests/README +++ b/ceph-mon/tests/README @@ -1,6 +1,12 @@ This directory provides Amulet tests that focus on verification of ceph deployments. +In order to run tests, you'll need charm-tools installed (in addition to +juju, of course): + sudo add-apt-repository ppa:juju/stable + sudo apt-get update + sudo apt-get install charm-tools + If you use a web proxy server to access the web, you'll need to set the AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 8529a10e..86548963 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -17,9 +17,9 @@ class CephBasicDeployment(OpenStackAmuletDeployment): """Amulet tests on a basic ceph deployment.""" - def __init__(self, series=None, openstack=None, source=None): + def __init__(self, series=None, openstack=None, source=None, stable=False): """Deploy the entire test environment.""" - super(CephBasicDeployment, self).__init__(series, openstack, source) + super(CephBasicDeployment, self).__init__(series, openstack, source, stable) self._add_services() self._add_relations() self._configure_services() @@ -29,14 +29,15 @@ def __init__(self, series=None, openstack=None, source=None): def _add_services(self): """Add services - Add the services that we're testing, including the number of units, - where ceph is local, and mysql and cinder are from the charm - store. + Add the services that we're testing, where ceph is local, + and the rest of the service are from lp branches that are + compatible with the local charm (e.g. stable or next). """ - this_service = ('ceph', 3) - other_services = [('mysql', 1), ('keystone', 1), - ('rabbitmq-server', 1), ('nova-compute', 1), - ('glance', 1), ('cinder', 1)] + this_service = {'name': 'ceph', 'units': 3} + other_services = [{'name': 'mysql'}, {'name': 'keystone'}, + {'name': 'rabbitmq-server'}, + {'name': 'nova-compute'}, + {'name': 'glance'}, {'name': 'cinder'}] super(CephBasicDeployment, self)._add_services(this_service, other_services) From 294336110f3b5b071c4de0905fe75e8bedf6d4b4 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Sat, 27 Sep 2014 18:17:20 +0000 Subject: [PATCH 0476/2699] Add basic amulet tests. --- ceph-osd/Makefile | 10 +- ceph-osd/tests/00-setup | 10 + ceph-osd/tests/14-basic-precise-icehouse | 11 + ceph-osd/tests/15-basic-trusty-icehouse | 9 + ceph-osd/tests/README | 53 +++++ ceph-osd/tests/basic_deployment.py | 281 +++++++++++++++++++++++ 6 files changed, 373 insertions(+), 1 deletion(-) create mode 100755 ceph-osd/tests/00-setup create mode 100755 ceph-osd/tests/14-basic-precise-icehouse create mode 100755 ceph-osd/tests/15-basic-trusty-icehouse create mode 100644 ceph-osd/tests/README create mode 100644 ceph-osd/tests/basic_deployment.py diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index d7a13bf8..6f53deac 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -2,9 +2,17 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers hooks + @flake8 --exclude hooks/charmhelpers hooks tests @charm proof || true +test: + @echo Starting Amulet tests... + # coreycb note: The -v should only be temporary until Amulet sends + # raise_status() messages to stderr: + # https://bugs.launchpad.net/amulet/+bug/1320357 + @juju test -v -p AMULET_HTTP_PROXY --timeout 900 \ + 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse + bin/charm_helpers_sync.py: @mkdir -p bin @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ diff --git a/ceph-osd/tests/00-setup b/ceph-osd/tests/00-setup new file mode 100755 index 00000000..1243ec43 --- /dev/null +++ b/ceph-osd/tests/00-setup @@ -0,0 +1,10 @@ +#!/bin/bash + +set -ex + +sudo add-apt-repository --yes ppa:juju/stable +sudo apt-get update --yes +sudo apt-get install --yes python-amulet \ + python-keystoneclient \ + python-glanceclient \ + python-novaclient diff --git a/ceph-osd/tests/14-basic-precise-icehouse b/ceph-osd/tests/14-basic-precise-icehouse new file mode 100755 index 00000000..13aee613 --- /dev/null +++ b/ceph-osd/tests/14-basic-precise-icehouse @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-osd deployment on precise-icehouse.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='precise', + openstack='cloud:precise-icehouse', + source='cloud:precise-updates/icehouse') + deployment.run_tests() diff --git a/ceph-osd/tests/15-basic-trusty-icehouse b/ceph-osd/tests/15-basic-trusty-icehouse new file mode 100755 index 00000000..9079f5e3 --- /dev/null +++ b/ceph-osd/tests/15-basic-trusty-icehouse @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-osd deployment on trusty-icehouse.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='trusty') + deployment.run_tests() diff --git a/ceph-osd/tests/README b/ceph-osd/tests/README new file mode 100644 index 00000000..643eb8dd --- /dev/null +++ b/ceph-osd/tests/README @@ -0,0 +1,53 @@ +This directory provides Amulet tests that focus on verification of ceph-osd +deployments. + +In order to run tests, you'll need charm-tools installed (in addition to +juju, of course): + sudo add-apt-repository ppa:juju/stable + sudo apt-get update + sudo apt-get install charm-tools + +If you use a web proxy server to access the web, you'll need to set the +AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. + +The following examples demonstrate different ways that tests can be executed. +All examples are run from the charm's root directory. + + * To run all tests (starting with 00-setup): + + make test + + * To run a specific test module (or modules): + + juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To run a specific test module (or modules), and keep the environment + deployed after a failure: + + juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To re-run a test module against an already deployed environment (one + that was deployed by a previous call to 'juju test --set-e'): + + ./tests/15-basic-trusty-icehouse + +For debugging and test development purposes, all code should be idempotent. +In other words, the code should have the ability to be re-run without changing +the results beyond the initial run. This enables editing and re-running of a +test module against an already deployed environment, as described above. + +Manual debugging tips: + + * Set the following env vars before using the OpenStack CLI as admin: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=admin + export OS_USERNAME=admin + export OS_PASSWORD=openstack + export OS_REGION_NAME=RegionOne + + * Set the following env vars before using the OpenStack CLI as demoUser: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=demoTenant + export OS_USERNAME=demoUser + export OS_PASSWORD=password + export OS_REGION_NAME=RegionOne diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py new file mode 100644 index 00000000..2f0542b2 --- /dev/null +++ b/ceph-osd/tests/basic_deployment.py @@ -0,0 +1,281 @@ +#!/usr/bin/python import amulet + +import amulet +from charmhelpers.contrib.openstack.amulet.deployment import ( + OpenStackAmuletDeployment +) +from charmhelpers.contrib.openstack.amulet.utils import ( # noqa + OpenStackAmuletUtils, + DEBUG, + ERROR +) + +# Use DEBUG to turn on debug logging +u = OpenStackAmuletUtils(ERROR) + + +class CephOsdBasicDeployment(OpenStackAmuletDeployment): + """Amulet tests on a basic ceph-osd deployment.""" + + def __init__(self, series=None, openstack=None, source=None, + stable=False): + """Deploy the entire test environment.""" + super(CephOsdBasicDeployment, self).__init__(series, openstack, + source, stable) + self._add_services() + self._add_relations() + self._configure_services() + self._deploy() + self._initialize_tests() + + def _add_services(self): + """Add services + + Add the services that we're testing, where ceph-osd is local, + and the rest of the service are from lp branches that are + compatible with the local charm (e.g. stable or next). + """ + this_service = {'name': 'ceph-osd'} + other_services = [{'name': 'ceph', 'units': 3}, {'name': 'mysql'}, + {'name': 'keystone'}, {'name': 'rabbitmq-server'}, + {'name': 'nova-compute'}, {'name': 'glance'}, + {'name': 'cinder'}] + super(CephOsdBasicDeployment, self)._add_services(this_service, + other_services) + + def _add_relations(self): + """Add all of the relations for the services.""" + relations = { + 'nova-compute:shared-db': 'mysql:shared-db', + 'nova-compute:amqp': 'rabbitmq-server:amqp', + 'nova-compute:image-service': 'glance:image-service', + 'nova-compute:ceph': 'ceph:client', + 'keystone:shared-db': 'mysql:shared-db', + 'glance:shared-db': 'mysql:shared-db', + 'glance:identity-service': 'keystone:identity-service', + 'glance:amqp': 'rabbitmq-server:amqp', + 'glance:ceph': 'ceph:client', + 'cinder:shared-db': 'mysql:shared-db', + 'cinder:identity-service': 'keystone:identity-service', + 'cinder:amqp': 'rabbitmq-server:amqp', + 'cinder:image-service': 'glance:image-service', + 'cinder:ceph': 'ceph:client', + 'ceph-osd:mon': 'ceph:osd' + } + super(CephOsdBasicDeployment, self)._add_relations(relations) + + def _configure_services(self): + """Configure all of the services.""" + keystone_config = {'admin-password': 'openstack', + 'admin-token': 'ubuntutesting'} + mysql_config = {'dataset-size': '50%'} + cinder_config = {'block-device': 'None', 'glance-api-version': '2'} + ceph_config = { + 'monitor-count': '3', + 'auth-supported': 'none', + 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', + 'osd-reformat': 'yes', + 'ephemeral-unmount': '/mnt', + 'osd-devices': '/dev/vdb /srv/ceph' + } + ceph_osd_config = { + 'osd-reformat': 'yes', + 'ephemeral-unmount': '/mnt', + 'osd-devices': '/dev/vdb /srv/ceph' + } + + configs = {'keystone': keystone_config, + 'mysql': mysql_config, + 'cinder': cinder_config, + 'ceph': ceph_config, + 'ceph-osd': ceph_osd_config} + super(CephOsdBasicDeployment, self)._configure_services(configs) + + def _initialize_tests(self): + """Perform final initialization before tests get run.""" + # Access the sentries for inspecting service units + self.mysql_sentry = self.d.sentry.unit['mysql/0'] + self.keystone_sentry = self.d.sentry.unit['keystone/0'] + self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] + self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0'] + self.glance_sentry = self.d.sentry.unit['glance/0'] + self.cinder_sentry = self.d.sentry.unit['cinder/0'] + self.ceph0_sentry = self.d.sentry.unit['ceph/0'] + self.ceph1_sentry = self.d.sentry.unit['ceph/1'] + self.ceph2_sentry = self.d.sentry.unit['ceph/2'] + self.ceph_osd_sentry = self.d.sentry.unit['ceph-osd/0'] + + # Authenticate admin with keystone + self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, + user='admin', + password='openstack', + tenant='admin') + + # Authenticate admin with glance endpoint + self.glance = u.authenticate_glance_admin(self.keystone) + + # Create a demo tenant/role/user + self.demo_tenant = 'demoTenant' + self.demo_role = 'demoRole' + self.demo_user = 'demoUser' + if not u.tenant_exists(self.keystone, self.demo_tenant): + tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, + description='demo tenant', + enabled=True) + self.keystone.roles.create(name=self.demo_role) + self.keystone.users.create(name=self.demo_user, + password='password', + tenant_id=tenant.id, + email='demo@demo.com') + + # Authenticate demo user with keystone + self.keystone_demo = u.authenticate_keystone_user(self.keystone, + self.demo_user, + 'password', + self.demo_tenant) + + # Authenticate demo user with nova-api + self.nova_demo = u.authenticate_nova_user(self.keystone, + self.demo_user, + 'password', + self.demo_tenant) + + def _ceph_osd_id(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa + + def test_services(self): + """Verify the expected services are running on the service units.""" + commands = { + self.mysql_sentry: ['status mysql'], + self.rabbitmq_sentry: ['sudo service rabbitmq-server status'], + self.nova_compute_sentry: ['status nova-compute'], + self.keystone_sentry: ['status keystone'], + self.glance_sentry: ['status glance-registry', + 'status glance-api'], + self.cinder_sentry: ['status cinder-api', + 'status cinder-scheduler', + 'status cinder-volume'] + } + ceph_services = ['status ceph-mon-all', + 'status ceph-mon id=`hostname`'] + ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0)) + ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1)) + ceph_osd_services = [ceph_osd0, ceph_osd1, 'status ceph-osd-all'] + ceph_services.extend(ceph_osd_services) + commands[self.ceph0_sentry] = ceph_services + commands[self.ceph1_sentry] = ceph_services + commands[self.ceph2_sentry] = ceph_services + commands[self.ceph_osd_sentry] = ceph_osd_services + + ret = u.validate_services(commands) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_ceph_osd_ceph_relation(self): + """Verify the ceph-osd to ceph relation data.""" + unit = self.ceph_osd_sentry + relation = ['mon', 'ceph:osd'] + expected = { + 'private-address': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph-osd to ceph', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph0_to_ceph_osd_relation(self): + """Verify the ceph0 to ceph-osd relation data.""" + unit = self.ceph0_sentry + relation = ['osd', 'ceph-osd:mon'] + expected = { + 'osd_bootstrap_key': u.not_null, + 'private-address': u.valid_ip, + 'auth': u'none', + 'ceph-public-address': u.valid_ip, + 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph0 to ceph-osd', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph1_to_ceph_osd_relation(self): + """Verify the ceph1 to ceph-osd relation data.""" + unit = self.ceph1_sentry + relation = ['osd', 'ceph-osd:mon'] + expected = { + 'osd_bootstrap_key': u.not_null, + 'private-address': u.valid_ip, + 'auth': u'none', + 'ceph-public-address': u.valid_ip, + 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph1 to ceph-osd', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph2_to_ceph_osd_relation(self): + """Verify the ceph2 to ceph-osd relation data.""" + unit = self.ceph2_sentry + relation = ['osd', 'ceph-osd:mon'] + expected = { + 'osd_bootstrap_key': u.not_null, + 'private-address': u.valid_ip, + 'auth': u'none', + 'ceph-public-address': u.valid_ip, + 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph2 to ceph-osd', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph_config(self): + """Verify the data in the ceph config file.""" + unit = self.ceph_osd_sentry + conf = '/etc/ceph/ceph.conf' + expected = { + 'global': { + 'auth cluster required': 'none', + 'auth service required': 'none', + 'auth client required': 'none', + 'keyring': '/etc/ceph/$cluster.$name.keyring', + 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + 'log to syslog': 'false', + 'err to syslog': 'false', + 'clog to syslog': 'false' + }, + 'mon': { + 'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring' + }, + 'mds': { + 'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring' + }, + 'osd': { + 'keyring': '/var/lib/ceph/osd/$cluster-$id/keyring', + 'osd journal size': '1024', + 'filestore xattr use omap': 'true' + }, + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "ceph config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_restart_on_config_change(self): + """Verify the specified services are restarted on config change.""" + # NOTE(coreycb): Test not implemented but should it be? ceph-osd svcs + # aren't restarted by charm after config change. Should + # they be restarted? + if self._get_openstack_release() >= self.precise_essex: + u.log.error("Test not implemented") + return From ca6b1ac06b9b3adb99469030d8b64b50d9e0423c Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Sat, 27 Sep 2014 19:16:33 +0000 Subject: [PATCH 0477/2699] Remove leading whitespace from templates/ceph.conf (ConfigParser can't parse) --- ceph-radosgw/templates/ceph.conf | 40 ++++++++++++++++---------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index d57943c8..a94483d1 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -1,29 +1,29 @@ [global] {% if old_auth %} - auth supported = {{ auth_supported }} +auth supported = {{ auth_supported }} {% else %} - auth cluster required = {{ auth_supported }} - auth service required = {{ auth_supported }} - auth client required = {{ auth_supported }} +auth cluster required = {{ auth_supported }} +auth service required = {{ auth_supported }} +auth client required = {{ auth_supported }} {% endif %} - mon host = {{ mon_hosts }} - log to syslog = {{ use_syslog }} - err to syslog = {{ use_syslog }} - clog to syslog = {{ use_syslog }} +mon host = {{ mon_hosts }} +log to syslog = {{ use_syslog }} +err to syslog = {{ use_syslog }} +clog to syslog = {{ use_syslog }} [client.radosgw.gateway] - host = {{ hostname }} - keyring = /etc/ceph/keyring.rados.gateway - rgw socket path = /tmp/radosgw.sock - log file = /var/log/ceph/radosgw.log - # Turn off 100-continue optimization as stock mod_fastcgi - # does not support it - rgw print continue = false +host = {{ hostname }} +keyring = /etc/ceph/keyring.rados.gateway +rgw socket path = /tmp/radosgw.sock +log file = /var/log/ceph/radosgw.log +# Turn off 100-continue optimization as stock mod_fastcgi +# does not support it +rgw print continue = false {% if auth_type == 'keystone' %} - rgw keystone url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/ - rgw keystone admin token = {{ admin_token }} - rgw keystone accepted roles = {{ user_roles }} - rgw keystone token cache size = {{ cache_size }} - rgw keystone revocation interval = {{ revocation_check_interval }} +rgw keystone url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/ +rgw keystone admin token = {{ admin_token }} +rgw keystone accepted roles = {{ user_roles }} +rgw keystone token cache size = {{ cache_size }} +rgw keystone revocation interval = {{ revocation_check_interval }} #nss db path = /var/lib/ceph/nss {% endif %} From 7cae7349f13b59e04caead4b9903d7a404e52410 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Sun, 28 Sep 2014 12:41:06 +0800 Subject: [PATCH 0478/2699] Ceph need to set public/cluster network for IPv6. --- ceph-proxy/hooks/hooks.py | 11 ++++++++++- ceph-proxy/hooks/utils.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index dcbbd543..98cdd34b 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -48,7 +48,8 @@ render_template, get_public_addr, assert_charm_supports_ipv6, - get_host_ip + get_host_ip, + get_network ) hooks = Hooks() @@ -84,6 +85,14 @@ def emit_cephconf(): 'ceph_public_network': config('ceph-public-network'), 'ceph_cluster_network': config('ceph-cluster-network') } + + if config('prefer-ipv6'): + network = get_network() + if not config('ceph-public-network'): + cephcontext['ceph_public_network'] = network + if not config('ceph-cluster-network'): + cephcontext['ceph_cluster_network'] = network + # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 825c0797..791dc54e 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -98,3 +98,33 @@ def assert_charm_supports_ipv6(): if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") + + +def get_network(iface="eth0"): + try: + try: + import netifaces + except ImportError: + apt_install('python-netifaces') + import netifaces + + try: + from netaddr import IPNetwork + except ImportError: + apt_install('python-netaddr', fatal=True) + from netaddr import IPNetwork + + ipv6_address = get_ipv6_addr(iface)[0] + ifa_addrs = netifaces.ifaddresses(iface) + + for ifaddr in ifa_addrs[netifaces.AF_INET6]: + if ipv6_address == ifaddr['addr']: + network = "{}/{}".format(ifaddr['addr'], + ifaddr['netmask']) + ip = IPNetwork(network) + return str(ip.network) + + except ValueError: + raise Exception("Invalid interface '%s'" % iface) + + raise Exception("No valid network found in interface '%s'" % iface) From 3ae351ef46a43767c0f5967b2f66f38dcafb3990 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Sun, 28 Sep 2014 12:41:06 +0800 Subject: [PATCH 0479/2699] Ceph need to set public/cluster network for IPv6. --- ceph-mon/hooks/hooks.py | 11 ++++++++++- ceph-mon/hooks/utils.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index dcbbd543..98cdd34b 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -48,7 +48,8 @@ render_template, get_public_addr, assert_charm_supports_ipv6, - get_host_ip + get_host_ip, + get_network ) hooks = Hooks() @@ -84,6 +85,14 @@ def emit_cephconf(): 'ceph_public_network': config('ceph-public-network'), 'ceph_cluster_network': config('ceph-cluster-network') } + + if config('prefer-ipv6'): + network = get_network() + if not config('ceph-public-network'): + cephcontext['ceph_public_network'] = network + if not config('ceph-cluster-network'): + cephcontext['ceph_cluster_network'] = network + # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 825c0797..791dc54e 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -98,3 +98,33 @@ def assert_charm_supports_ipv6(): if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") + + +def get_network(iface="eth0"): + try: + try: + import netifaces + except ImportError: + apt_install('python-netifaces') + import netifaces + + try: + from netaddr import IPNetwork + except ImportError: + apt_install('python-netaddr', fatal=True) + from netaddr import IPNetwork + + ipv6_address = get_ipv6_addr(iface)[0] + ifa_addrs = netifaces.ifaddresses(iface) + + for ifaddr in ifa_addrs[netifaces.AF_INET6]: + if ipv6_address == ifaddr['addr']: + network = "{}/{}".format(ifaddr['addr'], + ifaddr['netmask']) + ip = IPNetwork(network) + return str(ip.network) + + except ValueError: + raise Exception("Invalid interface '%s'" % iface) + + raise Exception("No valid network found in interface '%s'" % iface) From 2672fb4fb81c26a57092bb9e7000745dbc5ced49 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Sun, 28 Sep 2014 14:00:11 +0800 Subject: [PATCH 0480/2699] Use public/cluster addr for osd, osd mon can't parse ipv6 network. --- ceph-proxy/hooks/hooks.py | 12 ++++++------ ceph-proxy/hooks/utils.py | 30 ------------------------------ ceph-proxy/templates/ceph.conf | 6 ++++++ 3 files changed, 12 insertions(+), 36 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 98cdd34b..9bdd776a 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -41,15 +41,15 @@ from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( - is_ipv6 + is_ipv6, + get_ipv6_addr ) from utils import ( render_template, get_public_addr, assert_charm_supports_ipv6, - get_host_ip, - get_network + get_host_ip ) hooks = Hooks() @@ -87,11 +87,11 @@ def emit_cephconf(): } if config('prefer-ipv6'): - network = get_network() + dynamic_ipv6_address = get_ipv6_addr()[0] if not config('ceph-public-network'): - cephcontext['ceph_public_network'] = network + cephcontext['public_addr'] = dynamic_ipv6_address if not config('ceph-cluster-network'): - cephcontext['ceph_cluster_network'] = network + cephcontext['cluster_addr'] = dynamic_ipv6_address # Install ceph.conf as an alternative to support # co-existence with other charms that write this file diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 791dc54e..825c0797 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -98,33 +98,3 @@ def assert_charm_supports_ipv6(): if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") - - -def get_network(iface="eth0"): - try: - try: - import netifaces - except ImportError: - apt_install('python-netifaces') - import netifaces - - try: - from netaddr import IPNetwork - except ImportError: - apt_install('python-netaddr', fatal=True) - from netaddr import IPNetwork - - ipv6_address = get_ipv6_addr(iface)[0] - ifa_addrs = netifaces.ifaddresses(iface) - - for ifaddr in ifa_addrs[netifaces.AF_INET6]: - if ipv6_address == ifaddr['addr']: - network = "{}/{}".format(ifaddr['addr'], - ifaddr['netmask']) - ip = IPNetwork(network) - return str(ip.network) - - except ValueError: - raise Exception("Invalid interface '%s'" % iface) - - raise Exception("No valid network found in interface '%s'" % iface) diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index 3b0d91f1..5ea82d18 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -33,3 +33,9 @@ keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = {{ osd_journal_size }} filestore xattr use omap = true +{% if public_addr %} +public addr = {{ public_addr }} +{% endif %} +{% if cluster_addr %} +cluster addr = {{ cluster_addr }} +{%- endif %} From aaaaebd8b87f047cd627f10d20a5ee9444f0df31 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Sun, 28 Sep 2014 14:00:11 +0800 Subject: [PATCH 0481/2699] Use public/cluster addr for osd, osd mon can't parse ipv6 network. --- ceph-mon/hooks/hooks.py | 12 ++++++------ ceph-mon/hooks/utils.py | 30 ------------------------------ ceph-mon/templates/ceph.conf | 6 ++++++ 3 files changed, 12 insertions(+), 36 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 98cdd34b..9bdd776a 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -41,15 +41,15 @@ from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( - is_ipv6 + is_ipv6, + get_ipv6_addr ) from utils import ( render_template, get_public_addr, assert_charm_supports_ipv6, - get_host_ip, - get_network + get_host_ip ) hooks = Hooks() @@ -87,11 +87,11 @@ def emit_cephconf(): } if config('prefer-ipv6'): - network = get_network() + dynamic_ipv6_address = get_ipv6_addr()[0] if not config('ceph-public-network'): - cephcontext['ceph_public_network'] = network + cephcontext['public_addr'] = dynamic_ipv6_address if not config('ceph-cluster-network'): - cephcontext['ceph_cluster_network'] = network + cephcontext['cluster_addr'] = dynamic_ipv6_address # Install ceph.conf as an alternative to support # co-existence with other charms that write this file diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 791dc54e..825c0797 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -98,33 +98,3 @@ def assert_charm_supports_ipv6(): if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") - - -def get_network(iface="eth0"): - try: - try: - import netifaces - except ImportError: - apt_install('python-netifaces') - import netifaces - - try: - from netaddr import IPNetwork - except ImportError: - apt_install('python-netaddr', fatal=True) - from netaddr import IPNetwork - - ipv6_address = get_ipv6_addr(iface)[0] - ifa_addrs = netifaces.ifaddresses(iface) - - for ifaddr in ifa_addrs[netifaces.AF_INET6]: - if ipv6_address == ifaddr['addr']: - network = "{}/{}".format(ifaddr['addr'], - ifaddr['netmask']) - ip = IPNetwork(network) - return str(ip.network) - - except ValueError: - raise Exception("Invalid interface '%s'" % iface) - - raise Exception("No valid network found in interface '%s'" % iface) diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 3b0d91f1..5ea82d18 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -33,3 +33,9 @@ keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = {{ osd_journal_size }} filestore xattr use omap = true +{% if public_addr %} +public addr = {{ public_addr }} +{% endif %} +{% if cluster_addr %} +cluster addr = {{ cluster_addr }} +{%- endif %} From 00e8ae8af765ab0f2585dadbc7890b40c2957939 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sun, 28 Sep 2014 23:26:29 +0100 Subject: [PATCH 0482/2699] minor cleanup --- ceph-proxy/hooks/hooks.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 9bdd776a..c25f01e3 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -199,8 +199,7 @@ def mon_relation_joined(): 'mon-relation-changed') def mon_relation(): emit_cephconf() - relation_data = {} - relation_data['private-address'] = get_host_ip() + relation_data = {'private-address': get_host_ip()} relation_set(**relation_data) moncount = int(config('monitor-count')) @@ -280,8 +279,7 @@ def radosgw_relation(relid=None): else: log('mon cluster not in quorum - deferring key provision') - relation_data = {} - relation_data['private-address'] = get_host_ip() + relation_data = {'private-address': get_host_ip()} relation_set(**relation_data) log('End radosgw-relation hook.') @@ -289,8 +287,7 @@ def radosgw_relation(relid=None): @hooks.hook('client-relation-joined') def client_relation(relid=None): - relation_data = {} - relation_data['private-address'] = get_host_ip() + relation_data = {'private-address': get_host_ip()} relation_set(**relation_data) if ceph.is_quorum(): From f5669edd9e07f7783d33dabc3f43b4d6baaa1ffd Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sun, 28 Sep 2014 23:26:29 +0100 Subject: [PATCH 0483/2699] minor cleanup --- ceph-mon/hooks/hooks.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 9bdd776a..c25f01e3 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -199,8 +199,7 @@ def mon_relation_joined(): 'mon-relation-changed') def mon_relation(): emit_cephconf() - relation_data = {} - relation_data['private-address'] = get_host_ip() + relation_data = {'private-address': get_host_ip()} relation_set(**relation_data) moncount = int(config('monitor-count')) @@ -280,8 +279,7 @@ def radosgw_relation(relid=None): else: log('mon cluster not in quorum - deferring key provision') - relation_data = {} - relation_data['private-address'] = get_host_ip() + relation_data = {'private-address': get_host_ip()} relation_set(**relation_data) log('End radosgw-relation hook.') @@ -289,8 +287,7 @@ def radosgw_relation(relid=None): @hooks.hook('client-relation-joined') def client_relation(relid=None): - relation_data = {} - relation_data['private-address'] = get_host_ip() + relation_data = {'private-address': get_host_ip()} relation_set(**relation_data) if ceph.is_quorum(): From 8de7bc35e619cff4d5f0420b834c9f86bb9e4194 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 29 Sep 2014 01:57:43 +0000 Subject: [PATCH 0484/2699] Add basic Amulet tests. --- ceph-radosgw/Makefile | 10 +- ceph-radosgw/tests/00-setup | 10 + ceph-radosgw/tests/14-basic-precise-icehouse | 11 + ceph-radosgw/tests/15-basic-trusty-icehouse | 9 + ceph-radosgw/tests/README | 53 +++ ceph-radosgw/tests/basic_deployment.py | 320 +++++++++++++++++++ 6 files changed, 412 insertions(+), 1 deletion(-) create mode 100755 ceph-radosgw/tests/00-setup create mode 100755 ceph-radosgw/tests/14-basic-precise-icehouse create mode 100755 ceph-radosgw/tests/15-basic-trusty-icehouse create mode 100644 ceph-radosgw/tests/README create mode 100644 ceph-radosgw/tests/basic_deployment.py diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index e312fd81..5c9ade39 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -2,9 +2,17 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers hooks + @flake8 --exclude hooks/charmhelpers hooks tests @charm proof +test: + @echo Starting Amulet tests... + # coreycb note: The -v should only be temporary until Amulet sends + # raise_status() messages to stderr: + # https://bugs.launchpad.net/amulet/+bug/1320357 + @juju test -v -p AMULET_HTTP_PROXY --timeout 900 \ + 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse + bin/charm_helpers_sync.py: @mkdir -p bin @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ diff --git a/ceph-radosgw/tests/00-setup b/ceph-radosgw/tests/00-setup new file mode 100755 index 00000000..1243ec43 --- /dev/null +++ b/ceph-radosgw/tests/00-setup @@ -0,0 +1,10 @@ +#!/bin/bash + +set -ex + +sudo add-apt-repository --yes ppa:juju/stable +sudo apt-get update --yes +sudo apt-get install --yes python-amulet \ + python-keystoneclient \ + python-glanceclient \ + python-novaclient diff --git a/ceph-radosgw/tests/14-basic-precise-icehouse b/ceph-radosgw/tests/14-basic-precise-icehouse new file mode 100755 index 00000000..8a3ab031 --- /dev/null +++ b/ceph-radosgw/tests/14-basic-precise-icehouse @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-radosgw deployment on precise-icehouse.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='precise', + openstack='cloud:precise-icehouse', + source='cloud:precise-updates/icehouse') + deployment.run_tests() diff --git a/ceph-radosgw/tests/15-basic-trusty-icehouse b/ceph-radosgw/tests/15-basic-trusty-icehouse new file mode 100755 index 00000000..34588d2f --- /dev/null +++ b/ceph-radosgw/tests/15-basic-trusty-icehouse @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-radosgw deployment on trusty-icehouse.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='trusty') + deployment.run_tests() diff --git a/ceph-radosgw/tests/README b/ceph-radosgw/tests/README new file mode 100644 index 00000000..003fbe25 --- /dev/null +++ b/ceph-radosgw/tests/README @@ -0,0 +1,53 @@ +This directory provides Amulet tests that focus on verification of +ceph-radosgw deployments. + +In order to run tests, you'll need charm-tools installed (in addition to +juju, of course): + sudo add-apt-repository ppa:juju/stable + sudo apt-get update + sudo apt-get install charm-tools + +If you use a web proxy server to access the web, you'll need to set the +AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. + +The following examples demonstrate different ways that tests can be executed. +All examples are run from the charm's root directory. + + * To run all tests (starting with 00-setup): + + make test + + * To run a specific test module (or modules): + + juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To run a specific test module (or modules), and keep the environment + deployed after a failure: + + juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To re-run a test module against an already deployed environment (one + that was deployed by a previous call to 'juju test --set-e'): + + ./tests/15-basic-trusty-icehouse + +For debugging and test development purposes, all code should be idempotent. +In other words, the code should have the ability to be re-run without changing +the results beyond the initial run. This enables editing and re-running of a +test module against an already deployed environment, as described above. + +Manual debugging tips: + + * Set the following env vars before using the OpenStack CLI as admin: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=admin + export OS_USERNAME=admin + export OS_PASSWORD=openstack + export OS_REGION_NAME=RegionOne + + * Set the following env vars before using the OpenStack CLI as demoUser: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=demoTenant + export OS_USERNAME=demoUser + export OS_PASSWORD=password + export OS_REGION_NAME=RegionOne diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py new file mode 100644 index 00000000..b93f964e --- /dev/null +++ b/ceph-radosgw/tests/basic_deployment.py @@ -0,0 +1,320 @@ +#!/usr/bin/python + +import amulet +from charmhelpers.contrib.openstack.amulet.deployment import ( + OpenStackAmuletDeployment +) +from charmhelpers.contrib.openstack.amulet.utils import ( # noqa + OpenStackAmuletUtils, + DEBUG, + ERROR +) + +# Use DEBUG to turn on debug logging +u = OpenStackAmuletUtils(ERROR) + + +class CephRadosGwBasicDeployment(OpenStackAmuletDeployment): + """Amulet tests on a basic ceph-radosgw deployment.""" + + def __init__(self, series=None, openstack=None, source=None, stable=False): + """Deploy the entire test environment.""" + super(CephRadosGwBasicDeployment, self).__init__(series, openstack, + source, stable) + self._add_services() + self._add_relations() + self._configure_services() + self._deploy() + self._initialize_tests() + + def _add_services(self): + """Add services + + Add the services that we're testing, where ceph-radosgw is local, + and the rest of the service are from lp branches that are + compatible with the local charm (e.g. stable or next). + """ + this_service = {'name': 'ceph-radosgw'} + other_services = [{'name': 'ceph', 'units': 3}, {'name': 'mysql'}, + {'name': 'keystone'}, {'name': 'rabbitmq-server'}, + {'name': 'nova-compute'}, {'name': 'glance'}, + {'name': 'cinder'}] + super(CephRadosGwBasicDeployment, self)._add_services(this_service, + other_services) + + def _add_relations(self): + """Add all of the relations for the services.""" + relations = { + 'nova-compute:shared-db': 'mysql:shared-db', + 'nova-compute:amqp': 'rabbitmq-server:amqp', + 'nova-compute:image-service': 'glance:image-service', + 'nova-compute:ceph': 'ceph:client', + 'keystone:shared-db': 'mysql:shared-db', + 'glance:shared-db': 'mysql:shared-db', + 'glance:identity-service': 'keystone:identity-service', + 'glance:amqp': 'rabbitmq-server:amqp', + 'glance:ceph': 'ceph:client', + 'cinder:shared-db': 'mysql:shared-db', + 'cinder:identity-service': 'keystone:identity-service', + 'cinder:amqp': 'rabbitmq-server:amqp', + 'cinder:image-service': 'glance:image-service', + 'cinder:ceph': 'ceph:client', + 'ceph-radosgw:mon': 'ceph:radosgw', + 'ceph-radosgw:identity-service': 'keystone:identity-service' + } + super(CephRadosGwBasicDeployment, self)._add_relations(relations) + + def _configure_services(self): + """Configure all of the services.""" + keystone_config = {'admin-password': 'openstack', + 'admin-token': 'ubuntutesting'} + mysql_config = {'dataset-size': '50%'} + cinder_config = {'block-device': 'None', 'glance-api-version': '2'} + ceph_config = { + 'monitor-count': '3', + 'auth-supported': 'none', + 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', + 'osd-reformat': 'yes', + 'ephemeral-unmount': '/mnt', + 'osd-devices': '/dev/vdb /srv/ceph' + } + + configs = {'keystone': keystone_config, + 'mysql': mysql_config, + 'cinder': cinder_config, + 'ceph': ceph_config} + super(CephRadosGwBasicDeployment, self)._configure_services(configs) + + def _initialize_tests(self): + """Perform final initialization before tests get run.""" + # Access the sentries for inspecting service units + self.mysql_sentry = self.d.sentry.unit['mysql/0'] + self.keystone_sentry = self.d.sentry.unit['keystone/0'] + self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] + self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0'] + self.glance_sentry = self.d.sentry.unit['glance/0'] + self.cinder_sentry = self.d.sentry.unit['cinder/0'] + self.ceph0_sentry = self.d.sentry.unit['ceph/0'] + self.ceph1_sentry = self.d.sentry.unit['ceph/1'] + self.ceph2_sentry = self.d.sentry.unit['ceph/2'] + self.ceph_radosgw_sentry = self.d.sentry.unit['ceph-radosgw/0'] + + # Authenticate admin with keystone + self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, + user='admin', + password='openstack', + tenant='admin') + + # Authenticate admin with glance endpoint + self.glance = u.authenticate_glance_admin(self.keystone) + + # Create a demo tenant/role/user + self.demo_tenant = 'demoTenant' + self.demo_role = 'demoRole' + self.demo_user = 'demoUser' + if not u.tenant_exists(self.keystone, self.demo_tenant): + tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, + description='demo tenant', + enabled=True) + self.keystone.roles.create(name=self.demo_role) + self.keystone.users.create(name=self.demo_user, + password='password', + tenant_id=tenant.id, + email='demo@demo.com') + + # Authenticate demo user with keystone + self.keystone_demo = u.authenticate_keystone_user(self.keystone, + self.demo_user, + 'password', + self.demo_tenant) + + # Authenticate demo user with nova-api + self.nova_demo = u.authenticate_nova_user(self.keystone, + self.demo_user, + 'password', + self.demo_tenant) + + def _ceph_osd_id(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa + + def test_services(self): + """Verify the expected services are running on the service units.""" + ceph_services = ['status ceph-mon-all', + 'status ceph-mon id=`hostname`'] + commands = { + self.mysql_sentry: ['status mysql'], + self.rabbitmq_sentry: ['sudo service rabbitmq-server status'], + self.nova_compute_sentry: ['status nova-compute'], + self.keystone_sentry: ['status keystone'], + self.glance_sentry: ['status glance-registry', + 'status glance-api'], + self.cinder_sentry: ['status cinder-api', + 'status cinder-scheduler', + 'status cinder-volume'], + self.ceph_radosgw_sentry: ['status radosgw-all'] + } + ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0)) + ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1)) + ceph_services.extend([ceph_osd0, ceph_osd1, 'status ceph-osd-all']) + commands[self.ceph0_sentry] = ceph_services + commands[self.ceph1_sentry] = ceph_services + commands[self.ceph2_sentry] = ceph_services + + ret = u.validate_services(commands) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_ceph_radosgw_ceph_relation(self): + """Verify the ceph-radosgw to ceph relation data.""" + unit = self.ceph_radosgw_sentry + relation = ['mon', 'ceph:radosgw'] + expected = { + 'private-address': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph-radosgw to ceph', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph0_ceph_radosgw_relation(self): + """Verify the ceph0 to ceph-radosgw relation data.""" + unit = self.ceph0_sentry + relation = ['radosgw', 'ceph-radosgw:mon'] + expected = { + 'private-address': u.valid_ip, + 'radosgw_key': u.not_null, + 'auth': 'none', + 'ceph-public-address': u.valid_ip, + 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph0 to ceph-radosgw', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph1_ceph_radosgw_relation(self): + """Verify the ceph1 to ceph-radosgw relation data.""" + unit = self.ceph1_sentry + relation = ['radosgw', 'ceph-radosgw:mon'] + expected = { + 'private-address': u.valid_ip, + 'radosgw_key': u.not_null, + 'auth': 'none', + 'ceph-public-address': u.valid_ip, + 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph1 to ceph-radosgw', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph2_ceph_radosgw_relation(self): + """Verify the ceph2 to ceph-radosgw relation data.""" + unit = self.ceph2_sentry + relation = ['radosgw', 'ceph-radosgw:mon'] + expected = { + 'private-address': u.valid_ip, + 'radosgw_key': u.not_null, + 'auth': 'none', + 'ceph-public-address': u.valid_ip, + 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph2 to ceph-radosgw', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph_radosgw_keystone_relation(self): + """Verify the ceph-radosgw to keystone relation data.""" + unit = self.ceph_radosgw_sentry + relation = ['identity-service', 'keystone:identity-service'] + expected = { + 'service': 'swift', + 'region': 'RegionOne', + 'public_url': u.valid_url, + 'internal_url': u.valid_url, + 'private-address': u.valid_ip, + 'requested_roles': 'Member,Admin', + 'admin_url': u.valid_url + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph-radosgw to keystone', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_keystone_ceph_radosgw_relation(self): + """Verify the keystone to ceph-radosgw relation data.""" + unit = self.keystone_sentry + relation = ['identity-service', 'ceph-radosgw:identity-service'] + expected = { + 'service_protocol': 'http', + 'service_tenant': 'services', + 'admin_token': 'ubuntutesting', + 'service_password': u.not_null, + 'service_port': '5000', + 'auth_port': '35357', + 'auth_protocol': 'http', + 'private-address': u.valid_ip, + 'https_keystone': 'False', + 'auth_host': u.valid_ip, + 'service_username': 'swift', + 'service_tenant_id': u.not_null, + 'service_host': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('keystone to ceph-radosgw', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph_config(self): + """Verify the data in the ceph config file.""" + unit = self.ceph_radosgw_sentry + conf = '/etc/ceph/ceph.conf' + keystone_sentry = self.keystone_sentry + relation = keystone_sentry.relation('identity-service', + 'ceph-radosgw:identity-service') + keystone_ip = relation['auth_host'] + expected = { + 'global': { + 'auth cluster required': 'none', + 'auth service required': 'none', + 'auth client required': 'none', + 'log to syslog': 'false', + 'err to syslog': 'false', + 'clog to syslog': 'false' + }, + 'client.radosgw.gateway': { + 'keyring': '/etc/ceph/keyring.rados.gateway', + 'rgw socket path': '/tmp/radosgw.sock', + 'log file': '/var/log/ceph/radosgw.log', + 'rgw print continue': 'false', + 'rgw keystone url': 'http://{}:35357/'.format(keystone_ip), + 'rgw keystone admin token': 'ubuntutesting', + 'rgw keystone accepted roles': 'Member,Admin', + 'rgw keystone token cache size': '500', + 'rgw keystone revocation interval': '600' + }, + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "ceph config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_restart_on_config_change(self): + """Verify the specified services are restarted on config change.""" + # NOTE(coreycb): Test not implemented but should it be? ceph-radosgw + # svcs aren't restarted by charm after config change + # Should they be restarted? + if self._get_openstack_release() >= self.precise_essex: + u.log.error("Test not implemented") + return From defa033dc72f45fde1b9f03e2d68fb4d71138d2d Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 29 Sep 2014 11:55:42 +0100 Subject: [PATCH 0485/2699] fixup ceph.conf template (needs newline at EOF) --- ceph-proxy/templates/ceph.conf | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index 5ea82d18..e168f54a 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -22,6 +22,13 @@ public network = {{ ceph_public_network }} cluster network = {{ ceph_cluster_network }} {%- endif %} +{% if public_addr %} +public addr = {{ public_addr }} +{% endif %} +{% if cluster_addr %} +cluster addr = {{ cluster_addr }} +{%- endif %} + [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring @@ -33,9 +40,3 @@ keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = {{ osd_journal_size }} filestore xattr use omap = true -{% if public_addr %} -public addr = {{ public_addr }} -{% endif %} -{% if cluster_addr %} -cluster addr = {{ cluster_addr }} -{%- endif %} From 90dcfbd32016c6d5d944aeef4493eafef897929e Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 29 Sep 2014 11:55:42 +0100 Subject: [PATCH 0486/2699] fixup ceph.conf template (needs newline at EOF) --- ceph-mon/templates/ceph.conf | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 5ea82d18..e168f54a 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -22,6 +22,13 @@ public network = {{ ceph_public_network }} cluster network = {{ ceph_cluster_network }} {%- endif %} +{% if public_addr %} +public addr = {{ public_addr }} +{% endif %} +{% if cluster_addr %} +cluster addr = {{ cluster_addr }} +{%- endif %} + [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring @@ -33,9 +40,3 @@ keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = {{ osd_journal_size }} filestore xattr use omap = true -{% if public_addr %} -public addr = {{ public_addr }} -{% endif %} -{% if cluster_addr %} -cluster addr = {{ cluster_addr }} -{%- endif %} From 0738d0eac83d7b7e2eed40a989776747d2bf41c2 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 29 Sep 2014 12:00:26 +0100 Subject: [PATCH 0487/2699] set private/public addr like in ceph charm --- ceph-osd/hooks/hooks.py | 10 +++++++++- ceph-osd/templates/ceph.conf | 8 ++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 24731833..d0e2bbef 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -46,7 +46,8 @@ from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( - is_ipv6 + is_ipv6, + get_ipv6_addr ) hooks = Hooks() @@ -86,6 +87,13 @@ def emit_cephconf(): 'ceph_cluster_network': config('ceph-cluster-network'), } + if config('prefer-ipv6'): + dynamic_ipv6_address = get_ipv6_addr()[0] + if not config('ceph-public-network'): + cephcontext['public_addr'] = dynamic_ipv6_address + if not config('ceph-cluster-network'): + cephcontext['cluster_addr'] = dynamic_ipv6_address + # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index def993c3..6177b050 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -21,6 +21,14 @@ cluster network = {{ ceph_cluster_network }} {%- endif %} +{% if public_addr %} +public addr = {{ public_addr }} +{% endif %} +{% if cluster_addr %} +cluster addr = {{ cluster_addr }} +{%- endif %} + + [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring From 237e0bd8ca24a7b9eb268cf1112d4c641ccd558d Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 29 Sep 2014 12:28:21 +0100 Subject: [PATCH 0488/2699] only set client relation setting if mon quorum --- ceph-proxy/hooks/hooks.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index c25f01e3..44290159 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -287,9 +287,6 @@ def radosgw_relation(relid=None): @hooks.hook('client-relation-joined') def client_relation(relid=None): - relation_data = {'private-address': get_host_ip()} - relation_set(**relation_data) - if ceph.is_quorum(): log('mon cluster in quorum - providing client with keys') service_name = None @@ -304,6 +301,7 @@ def client_relation(relid=None): 'key': ceph.get_named_key(service_name), 'auth': config('auth-supported'), 'ceph-public-address': get_public_addr(), + 'private-address': get_host_ip() } relation_set(relation_id=relid, relation_settings=data) From e85f403f451dbc492877aa2200d6ca354bff3666 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 29 Sep 2014 12:28:21 +0100 Subject: [PATCH 0489/2699] only set client relation setting if mon quorum --- ceph-mon/hooks/hooks.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index c25f01e3..44290159 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -287,9 +287,6 @@ def radosgw_relation(relid=None): @hooks.hook('client-relation-joined') def client_relation(relid=None): - relation_data = {'private-address': get_host_ip()} - relation_set(**relation_data) - if ceph.is_quorum(): log('mon cluster in quorum - providing client with keys') service_name = None @@ -304,6 +301,7 @@ def client_relation(relid=None): 'key': ceph.get_named_key(service_name), 'auth': config('auth-supported'), 'ceph-public-address': get_public_addr(), + 'private-address': get_host_ip() } relation_set(relation_id=relid, relation_settings=data) From 045e11a2f49db10a6dc688b6c9c53129f7c0751b Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 29 Sep 2014 20:39:40 +0000 Subject: [PATCH 0490/2699] Sync charm-helpers to pick up lint fix. --- .../tests/charmhelpers/contrib/openstack/amulet/deployment.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 1a882fdb..3c7f422a 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -16,7 +16,8 @@ def __init__(self, series=None, openstack=None, source=None, stable=True): self.openstack = openstack self.source = source self.stable = stable - # Note(coreycb): this needs to be changed when new next branches come out. + # Note(coreycb): this needs to be changed when new next branches come + # out. self.current_next = "trusty" def _determine_branch_locations(self, other_services): From a8205538c4d78a59c2c18640bdd7822811ac5609 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 29 Sep 2014 20:41:39 +0000 Subject: [PATCH 0491/2699] Sync charm-helpers and fix lint errors. --- ceph-proxy/tests/basic_deployment.py | 3 ++- .../charmhelpers/contrib/openstack/amulet/deployment.py | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 86548963..d073d08b 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -19,7 +19,8 @@ class CephBasicDeployment(OpenStackAmuletDeployment): def __init__(self, series=None, openstack=None, source=None, stable=False): """Deploy the entire test environment.""" - super(CephBasicDeployment, self).__init__(series, openstack, source, stable) + super(CephBasicDeployment, self).__init__(series, openstack, source, + stable) self._add_services() self._add_relations() self._configure_services() diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 495ebdb6..3c7f422a 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -16,7 +16,8 @@ def __init__(self, series=None, openstack=None, source=None, stable=True): self.openstack = openstack self.source = source self.stable = stable - # Note(coreycb): this needs to be changed when new next branches come out. + # Note(coreycb): this needs to be changed when new next branches come + # out. self.current_next = "trusty" def _determine_branch_locations(self, other_services): @@ -51,7 +52,8 @@ def _add_services(self, this_service, other_services): services = other_services services.append(this_service) - use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw'] if self.openstack: for svc in services: From 368fae4434e739e95795e6c495ca9ef90204168a Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 29 Sep 2014 20:41:39 +0000 Subject: [PATCH 0492/2699] Sync charm-helpers and fix lint errors. --- ceph-mon/tests/basic_deployment.py | 3 ++- .../charmhelpers/contrib/openstack/amulet/deployment.py | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 86548963..d073d08b 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -19,7 +19,8 @@ class CephBasicDeployment(OpenStackAmuletDeployment): def __init__(self, series=None, openstack=None, source=None, stable=False): """Deploy the entire test environment.""" - super(CephBasicDeployment, self).__init__(series, openstack, source, stable) + super(CephBasicDeployment, self).__init__(series, openstack, source, + stable) self._add_services() self._add_relations() self._configure_services() diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 495ebdb6..3c7f422a 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -16,7 +16,8 @@ def __init__(self, series=None, openstack=None, source=None, stable=True): self.openstack = openstack self.source = source self.stable = stable - # Note(coreycb): this needs to be changed when new next branches come out. + # Note(coreycb): this needs to be changed when new next branches come + # out. self.current_next = "trusty" def _determine_branch_locations(self, other_services): @@ -51,7 +52,8 @@ def _add_services(self, this_service, other_services): services = other_services services.append(this_service) - use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw'] if self.openstack: for svc in services: From 56819da4677702a54678228b899b7e8eaad1111e Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 29 Sep 2014 20:44:50 +0000 Subject: [PATCH 0493/2699] Sync charm-helpers to pick up lint fix. --- .../tests/charmhelpers/contrib/openstack/amulet/deployment.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 1a882fdb..3c7f422a 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -16,7 +16,8 @@ def __init__(self, series=None, openstack=None, source=None, stable=True): self.openstack = openstack self.source = source self.stable = stable - # Note(coreycb): this needs to be changed when new next branches come out. + # Note(coreycb): this needs to be changed when new next branches come + # out. self.current_next = "trusty" def _determine_branch_locations(self, other_services): From c5e3110141bccce6dd319bdc29373b3e19fa8290 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Tue, 30 Sep 2014 11:06:10 +0800 Subject: [PATCH 0494/2699] Refactor codes. --- ceph-proxy/hooks/hooks.py | 43 ++++++++++----------------------------- ceph-proxy/hooks/utils.py | 6 +++--- 2 files changed, 14 insertions(+), 35 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 44290159..8a6c26c8 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -41,15 +41,14 @@ from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( - is_ipv6, - get_ipv6_addr + get_ipv6_addr, + format_ipv6_addr ) from utils import ( render_template, get_public_addr, - assert_charm_supports_ipv6, - get_host_ip + assert_charm_supports_ipv6 ) hooks = Hooks() @@ -61,9 +60,6 @@ def install_upstart_scripts(): for x in glob.glob('files/upstart/*.conf'): shutil.copy(x, '/etc/init/') - if config('prefer-ipv6'): - assert_charm_supports_ipv6() - @hooks.hook('install') def install(): @@ -83,7 +79,7 @@ def emit_cephconf(): 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': config('ceph-public-network'), - 'ceph_cluster_network': config('ceph-cluster-network') + 'ceph_cluster_network': config('ceph-cluster-network'), } if config('prefer-ipv6'): @@ -151,24 +147,14 @@ def config_changed(): def get_mon_hosts(): hosts = [] addr = get_public_addr() - if is_ipv6(addr): - hosts.append('[{}]:6789'.format(addr)) - else: - hosts.append('{}:6789'.format(addr)) + hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) for relid in relation_ids('mon'): for unit in related_units(relid): - if config('prefer-ipv6'): - r_attr = 'ceph-public-address' - else: - r_attr = 'private-address' - - addr = relation_get(r_attr, unit, relid) + addr = relation_get('ceph-public-address', unit, relid) if addr is not None: - if is_ipv6(addr): - hosts.append('[{}]:6789'.format(addr)) - else: - hosts.append('{}:6789'.format(addr)) + hosts.append('{}:6789'.format( + format_ipv6_addr(addr) or addr)) hosts.sort() return hosts @@ -191,16 +177,15 @@ def get_devices(): @hooks.hook('mon-relation-joined') def mon_relation_joined(): for relid in relation_ids('mon'): - settings = {'ceph-public-address': get_public_addr()} - relation_set(relation_id=relid, relation_settings=settings) + relation_set(relation_id=relid, + relation_settings={'ceph-public-address': + get_public_addr()}) @hooks.hook('mon-relation-departed', 'mon-relation-changed') def mon_relation(): emit_cephconf() - relation_data = {'private-address': get_host_ip()} - relation_set(**relation_data) moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: @@ -279,11 +264,6 @@ def radosgw_relation(relid=None): else: log('mon cluster not in quorum - deferring key provision') - relation_data = {'private-address': get_host_ip()} - relation_set(**relation_data) - - log('End radosgw-relation hook.') - @hooks.hook('client-relation-joined') def client_relation(relid=None): @@ -301,7 +281,6 @@ def client_relation(relid=None): 'key': ceph.get_named_key(service_name), 'auth': config('auth-supported'), 'ceph-public-address': get_public_addr(), - 'private-address': get_host_ip() } relation_set(relation_id=relid, relation_settings=data) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 825c0797..ada3563b 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -72,7 +72,7 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): if config('prefer-ipv6'): - return hostname or get_ipv6_addr()[0] + return get_ipv6_addr()[0] hostname = hostname or unit_get('private-address') try: @@ -89,8 +89,8 @@ def get_host_ip(hostname=None): @cached def get_public_addr(): - network = config('ceph-public-network') - return ip.get_address_in_network(network, fallback=get_host_ip()) + return ip.get_address_in_network(config('ceph-public-network'), + fallback=get_host_ip()) def assert_charm_supports_ipv6(): From 9a2e5a58ae8fd074a41508539da55f1afad00cce Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Tue, 30 Sep 2014 11:06:10 +0800 Subject: [PATCH 0495/2699] Refactor codes. --- ceph-mon/hooks/hooks.py | 43 +++++++++++------------------------------ ceph-mon/hooks/utils.py | 6 +++--- 2 files changed, 14 insertions(+), 35 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 44290159..8a6c26c8 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -41,15 +41,14 @@ from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( - is_ipv6, - get_ipv6_addr + get_ipv6_addr, + format_ipv6_addr ) from utils import ( render_template, get_public_addr, - assert_charm_supports_ipv6, - get_host_ip + assert_charm_supports_ipv6 ) hooks = Hooks() @@ -61,9 +60,6 @@ def install_upstart_scripts(): for x in glob.glob('files/upstart/*.conf'): shutil.copy(x, '/etc/init/') - if config('prefer-ipv6'): - assert_charm_supports_ipv6() - @hooks.hook('install') def install(): @@ -83,7 +79,7 @@ def emit_cephconf(): 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': config('ceph-public-network'), - 'ceph_cluster_network': config('ceph-cluster-network') + 'ceph_cluster_network': config('ceph-cluster-network'), } if config('prefer-ipv6'): @@ -151,24 +147,14 @@ def config_changed(): def get_mon_hosts(): hosts = [] addr = get_public_addr() - if is_ipv6(addr): - hosts.append('[{}]:6789'.format(addr)) - else: - hosts.append('{}:6789'.format(addr)) + hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) for relid in relation_ids('mon'): for unit in related_units(relid): - if config('prefer-ipv6'): - r_attr = 'ceph-public-address' - else: - r_attr = 'private-address' - - addr = relation_get(r_attr, unit, relid) + addr = relation_get('ceph-public-address', unit, relid) if addr is not None: - if is_ipv6(addr): - hosts.append('[{}]:6789'.format(addr)) - else: - hosts.append('{}:6789'.format(addr)) + hosts.append('{}:6789'.format( + format_ipv6_addr(addr) or addr)) hosts.sort() return hosts @@ -191,16 +177,15 @@ def get_devices(): @hooks.hook('mon-relation-joined') def mon_relation_joined(): for relid in relation_ids('mon'): - settings = {'ceph-public-address': get_public_addr()} - relation_set(relation_id=relid, relation_settings=settings) + relation_set(relation_id=relid, + relation_settings={'ceph-public-address': + get_public_addr()}) @hooks.hook('mon-relation-departed', 'mon-relation-changed') def mon_relation(): emit_cephconf() - relation_data = {'private-address': get_host_ip()} - relation_set(**relation_data) moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: @@ -279,11 +264,6 @@ def radosgw_relation(relid=None): else: log('mon cluster not in quorum - deferring key provision') - relation_data = {'private-address': get_host_ip()} - relation_set(**relation_data) - - log('End radosgw-relation hook.') - @hooks.hook('client-relation-joined') def client_relation(relid=None): @@ -301,7 +281,6 @@ def client_relation(relid=None): 'key': ceph.get_named_key(service_name), 'auth': config('auth-supported'), 'ceph-public-address': get_public_addr(), - 'private-address': get_host_ip() } relation_set(relation_id=relid, relation_settings=data) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 825c0797..ada3563b 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -72,7 +72,7 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): if config('prefer-ipv6'): - return hostname or get_ipv6_addr()[0] + return get_ipv6_addr()[0] hostname = hostname or unit_get('private-address') try: @@ -89,8 +89,8 @@ def get_host_ip(hostname=None): @cached def get_public_addr(): - network = config('ceph-public-network') - return ip.get_address_in_network(network, fallback=get_host_ip()) + return ip.get_address_in_network(config('ceph-public-network'), + fallback=get_host_ip()) def assert_charm_supports_ipv6(): From 6a82db2cf19292e78e4003224333586b1ae926c7 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Tue, 30 Sep 2014 11:22:30 +0800 Subject: [PATCH 0496/2699] Refactor codes. --- ceph-osd/hooks/hooks.py | 12 ------------ ceph-osd/hooks/utils.py | 2 +- 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index d0e2bbef..63bc3f2d 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -15,13 +15,11 @@ import ceph from charmhelpers.core.hookenv import ( log, - WARNING, ERROR, config, relation_ids, related_units, relation_get, - relation_set, Hooks, UnregisteredHookError, service_name @@ -65,9 +63,6 @@ def install(): add_source(config('source'), config('key')) apt_update(fatal=True) - if config('prefer-ipv6'): - assert_charm_supports_ipv6() - apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() @@ -192,13 +187,6 @@ def get_devices(): @hooks.hook('mon-relation-changed', 'mon-relation-departed') def mon_relation(): - host = get_host_ip() - if host: - relation_data = {'private-address': host} - relation_set(**relation_data) - else: - log("Unable to obtain host address", level=WARNING) - bootstrap_key = relation_get('osd_bootstrap_key') if get_fsid() and get_auth() and bootstrap_key: log('mon has provided conf- scanning disks') diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 60b8fc27..6e8d2a54 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -71,7 +71,7 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): if config('prefer-ipv6'): - return hostname or get_ipv6_addr()[0] + return get_ipv6_addr()[0] hostname = hostname or unit_get('private-address') try: From 88bc16b41443699217e7b062c4333048df93f3e0 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Tue, 30 Sep 2014 11:37:16 +0800 Subject: [PATCH 0497/2699] Refactor codes. --- ceph-osd/hooks/hooks.py | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 63bc3f2d..390288ed 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -44,8 +44,8 @@ from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( - is_ipv6, - get_ipv6_addr + get_ipv6_addr, + format_ipv6_addr ) hooks = Hooks() @@ -136,18 +136,12 @@ def get_mon_hosts(): hosts = [] for relid in relation_ids('mon'): for unit in related_units(relid): - addr = relation_get('ceph-public-address', unit, relid) - if not addr: - addr = relation_get('private-address', unit, relid) - if not config('prefer-ipv6'): - # This will verify ipv4 address - addr = get_host_ip(addr) + addr = relation_get('ceph-public-address', unit, relid) or \ + get_host_ip(relation_get('private-address', unit, relid)) if addr: - if is_ipv6(addr): - hosts.append('[{}]:6789'.format(addr)) - else: - hosts.append('{}:6789'.format(addr)) + hosts.append('{}:6789'.format(format_ipv6_addr(addr or addr))) + hosts.sort() return hosts From 8490a92e00047253b29cfc391036e9e8a59c1e41 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Tue, 30 Sep 2014 11:41:06 +0800 Subject: [PATCH 0498/2699] Fix error. --- ceph-osd/hooks/hooks.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 390288ed..fdf38798 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -62,7 +62,6 @@ def install_upstart_scripts(): def install(): add_source(config('source'), config('key')) apt_update(fatal=True) - apt_install(packages=ceph.PACKAGES, fatal=True) install_upstart_scripts() @@ -140,7 +139,7 @@ def get_mon_hosts(): get_host_ip(relation_get('private-address', unit, relid)) if addr: - hosts.append('{}:6789'.format(format_ipv6_addr(addr or addr))) + hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) hosts.sort() return hosts From deb2a65c3994e1defdb6601ef4c02d1c0d2db6db Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 30 Sep 2014 13:49:23 +0100 Subject: [PATCH 0499/2699] synced charm-helpers --- .../hooks/charmhelpers/contrib/network/ip.py | 41 ++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 19f654d3..9a3c2bfa 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -5,6 +5,7 @@ from functools import partial +from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( WARNING, @@ -222,12 +223,50 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') -def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None, +def get_iface_from_addr(addr): + """Work out on which interface the provided address is configured.""" + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + for inet_type in addresses: + for _addr in addresses[inet_type]: + _addr = _addr['addr'] + # link local + ll_key = re.compile("(.+)%.*") + raw = re.match(ll_key, _addr) + if raw: + _addr = raw.group(1) + if _addr == addr: + log("Address '%s' is configured on iface '%s'" % + (addr, iface)) + return iface + + msg = "Unable to infer net iface on which '%s' is configured" % (addr) + raise Exception(msg) + + +def sniff_iface(f): + """If no iface provided, inject net iface inferred from unit private + address. + """ + def iface_sniffer(*args, **kwargs): + if not kwargs.get('iface', None): + kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) + + return f(*args, **kwargs) + + return iface_sniffer + + +@sniff_iface +def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, dynamic_only=True): """Get assigned IPv6 address for a given interface. Returns list of addresses found. If no address found, returns empty list. + If iface is None, we infer the current primary interface by doing a reverse + lookup on the unit private-address. + We currently only support scope global IPv6 addresses i.e. non-temporary addresses. If no global IPv6 address is found, return the first one found in the ipv6 address list. From 71fc66d44ff6f5fbd64aa03dbd1a80c80a9eaf25 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 30 Sep 2014 13:49:23 +0100 Subject: [PATCH 0500/2699] synced charm-helpers --- .../hooks/charmhelpers/contrib/network/ip.py | 41 ++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 19f654d3..9a3c2bfa 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -5,6 +5,7 @@ from functools import partial +from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( WARNING, @@ -222,12 +223,50 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') -def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None, +def get_iface_from_addr(addr): + """Work out on which interface the provided address is configured.""" + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + for inet_type in addresses: + for _addr in addresses[inet_type]: + _addr = _addr['addr'] + # link local + ll_key = re.compile("(.+)%.*") + raw = re.match(ll_key, _addr) + if raw: + _addr = raw.group(1) + if _addr == addr: + log("Address '%s' is configured on iface '%s'" % + (addr, iface)) + return iface + + msg = "Unable to infer net iface on which '%s' is configured" % (addr) + raise Exception(msg) + + +def sniff_iface(f): + """If no iface provided, inject net iface inferred from unit private + address. + """ + def iface_sniffer(*args, **kwargs): + if not kwargs.get('iface', None): + kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) + + return f(*args, **kwargs) + + return iface_sniffer + + +@sniff_iface +def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, dynamic_only=True): """Get assigned IPv6 address for a given interface. Returns list of addresses found. If no address found, returns empty list. + If iface is None, we infer the current primary interface by doing a reverse + lookup on the unit private-address. + We currently only support scope global IPv6 addresses i.e. non-temporary addresses. If no global IPv6 address is found, return the first one found in the ipv6 address list. From 142d6138ea6c6fb48af0e4b36521792d28186d68 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 30 Sep 2014 13:49:42 +0100 Subject: [PATCH 0501/2699] synced charm-helpers --- .../hooks/charmhelpers/contrib/network/ip.py | 41 ++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 19f654d3..9a3c2bfa 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -5,6 +5,7 @@ from functools import partial +from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( WARNING, @@ -222,12 +223,50 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') -def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None, +def get_iface_from_addr(addr): + """Work out on which interface the provided address is configured.""" + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + for inet_type in addresses: + for _addr in addresses[inet_type]: + _addr = _addr['addr'] + # link local + ll_key = re.compile("(.+)%.*") + raw = re.match(ll_key, _addr) + if raw: + _addr = raw.group(1) + if _addr == addr: + log("Address '%s' is configured on iface '%s'" % + (addr, iface)) + return iface + + msg = "Unable to infer net iface on which '%s' is configured" % (addr) + raise Exception(msg) + + +def sniff_iface(f): + """If no iface provided, inject net iface inferred from unit private + address. + """ + def iface_sniffer(*args, **kwargs): + if not kwargs.get('iface', None): + kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) + + return f(*args, **kwargs) + + return iface_sniffer + + +@sniff_iface +def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, dynamic_only=True): """Get assigned IPv6 address for a given interface. Returns list of addresses found. If no address found, returns empty list. + If iface is None, we infer the current primary interface by doing a reverse + lookup on the unit private-address. + We currently only support scope global IPv6 addresses i.e. non-temporary addresses. If no global IPv6 address is found, return the first one found in the ipv6 address list. From 81d56b569d2769600e36239144832e13e35d1e68 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 6 Oct 2014 23:07:41 +0100 Subject: [PATCH 0502/2699] [trivial] Tidy configuration lint. --- ceph-proxy/config.yaml | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index a0404a11..95e94857 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -1,6 +1,7 @@ options: fsid: type: string + default: description: | fsid of the ceph cluster. To generate a suitable value use `uuid` . @@ -17,6 +18,7 @@ options: enabled later. monitor-secret: type: string + default: description: | This value will become the mon. key. To generate a suitable value use: . @@ -44,6 +46,7 @@ options: charm assumes anything not starting with /dev is a directory instead. osd-journal: type: string + default: description: | The device to use as a shared journal drive for all OSD's. By default no journal device will be used. @@ -73,6 +76,7 @@ options: Only supported with ceph >= 0.48.3. osd-reformat: type: string + default: description: | By default, the charm will not re-format a device that already looks as if it might be an OSD device. This is a safeguard to try to @@ -92,6 +96,7 @@ options: problems as warnings only and will not result in a hook error. ephemeral-unmount: type: string + default: description: | Cloud instances provider ephermeral storage which is normally mounted on /mnt. @@ -101,6 +106,7 @@ options: testing purposes (cloud deployment is not a typical use case). source: type: string + default: description: | Optional configuration to support use of additional sources such as: . @@ -116,6 +122,7 @@ options: for precise but is provided in the Folsom cloud archive. key: type: string + default: description: | Key ID to import to the apt keyring to support use with arbitary source configuration from outside of Launchpad archives or PPA's. @@ -125,15 +132,17 @@ options: description: | If set to True, supporting services will log to syslog. ceph-public-network: - type: string - description: | - The IP address and netmask of the public (front-side) network (e.g., - 192.168.0.0/24) + type: string + default: + description: | + The IP address and netmask of the public (front-side) network (e.g., + 192.168.0.0/24) ceph-cluster-network: - type: string - description: | - The IP address and netmask of the cluster (back-side) network (e.g., - 192.168.0.0/24) + type: string + default: + description: | + The IP address and netmask of the cluster (back-side) network (e.g., + 192.168.0.0/24) prefer-ipv6: type: boolean default: False From 87573b082a72dea31c3547fd5fedf2fcedf8540e Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 6 Oct 2014 23:07:41 +0100 Subject: [PATCH 0503/2699] [trivial] Tidy configuration lint. --- ceph-mon/config.yaml | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index a0404a11..95e94857 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -1,6 +1,7 @@ options: fsid: type: string + default: description: | fsid of the ceph cluster. To generate a suitable value use `uuid` . @@ -17,6 +18,7 @@ options: enabled later. monitor-secret: type: string + default: description: | This value will become the mon. key. To generate a suitable value use: . @@ -44,6 +46,7 @@ options: charm assumes anything not starting with /dev is a directory instead. osd-journal: type: string + default: description: | The device to use as a shared journal drive for all OSD's. By default no journal device will be used. @@ -73,6 +76,7 @@ options: Only supported with ceph >= 0.48.3. osd-reformat: type: string + default: description: | By default, the charm will not re-format a device that already looks as if it might be an OSD device. This is a safeguard to try to @@ -92,6 +96,7 @@ options: problems as warnings only and will not result in a hook error. ephemeral-unmount: type: string + default: description: | Cloud instances provider ephermeral storage which is normally mounted on /mnt. @@ -101,6 +106,7 @@ options: testing purposes (cloud deployment is not a typical use case). source: type: string + default: description: | Optional configuration to support use of additional sources such as: . @@ -116,6 +122,7 @@ options: for precise but is provided in the Folsom cloud archive. key: type: string + default: description: | Key ID to import to the apt keyring to support use with arbitary source configuration from outside of Launchpad archives or PPA's. @@ -125,15 +132,17 @@ options: description: | If set to True, supporting services will log to syslog. ceph-public-network: - type: string - description: | - The IP address and netmask of the public (front-side) network (e.g., - 192.168.0.0/24) + type: string + default: + description: | + The IP address and netmask of the public (front-side) network (e.g., + 192.168.0.0/24) ceph-cluster-network: - type: string - description: | - The IP address and netmask of the cluster (back-side) network (e.g., - 192.168.0.0/24) + type: string + default: + description: | + The IP address and netmask of the cluster (back-side) network (e.g., + 192.168.0.0/24) prefer-ipv6: type: boolean default: False From b7136fa826c9e5f9ecd9d5d1a62dfaf937b9f33e Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 6 Oct 2014 23:11:14 +0100 Subject: [PATCH 0504/2699] [trivial] Tidy configuration lint --- ceph-osd/config.yaml | 22 ++++++++++++++-------- ceph-osd/metadata.yaml | 2 ++ 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 0eb89279..a694f290 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -12,6 +12,7 @@ options: charm assumes anything not starting with /dev is a directory instead. osd-journal: type: string + default: description: | The device to use as a shared journal drive for all OSD's. By default no journal device will be used. @@ -41,6 +42,7 @@ options: Only supported with ceph >= 0.48.3. osd-reformat: type: string + default: description: | By default, the charm will not re-format a device that already looks as if it might be an OSD device. This is a safeguard to try to @@ -60,6 +62,7 @@ options: problems as warnings only and will not result in a hook error. ephemeral-unmount: type: string + default: description: | Cloud instances provider ephermeral storage which is normally mounted on /mnt. @@ -85,6 +88,7 @@ options: for precise but is provided in the Ubuntu cloud archive. key: type: string + default: description: | Key ID to import to the apt keyring to support use with arbitary source configuration from outside of Launchpad archives or PPA's. @@ -94,15 +98,17 @@ options: description: | If set to True, supporting services will log to syslog. ceph-public-network: - type: string - description: | - The IP address and netmask of the public (front-side) network (e.g., - 192.168.0.0/24) + type: string + default: + description: | + The IP address and netmask of the public (front-side) network (e.g., + 192.168.0.0/24) ceph-cluster-network: - type: string - description: | - The IP address and netmask of the cluster (back-side) network (e.g., - 192.168.0.0/24) + type: string + default: + description: | + The IP address and netmask of the cluster (back-side) network (e.g., + 192.168.0.0/24) prefer-ipv6: type: boolean default: False diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 2f41d9b2..7ac72fa8 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -1,6 +1,8 @@ name: ceph-osd summary: Highly scalable distributed storage - Ceph OSD storage maintainer: James Page +categories: + - misc description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. From fe934894223993a92629d4aba0676879063aac5c Mon Sep 17 00:00:00 2001 From: Jorge Niedbalski Date: Tue, 7 Oct 2014 10:42:11 +0200 Subject: [PATCH 0505/2699] - Added a sysctl configuration option for passing ceph specific runtime flags. - Modified config.yaml to expose the sysctl option, by default is "" - Synced sysctl from charmhelpers --- ceph-osd/config.yaml | 6 ++++ ceph-osd/hooks/charmhelpers/core/sysctl.py | 34 ++++++++++++++++++++++ ceph-osd/hooks/hooks.py | 6 ++++ 3 files changed, 46 insertions(+) create mode 100644 ceph-osd/hooks/charmhelpers/core/sysctl.py diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index a694f290..ae1b4336 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -121,3 +121,9 @@ options: order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on your network interface. + sysctl: + type: string + default: "" + description: | + YAML formatted associative array of sysctl values, e.g.: + '{ kernel.pid_max : 4194303 }' diff --git a/ceph-osd/hooks/charmhelpers/core/sysctl.py b/ceph-osd/hooks/charmhelpers/core/sysctl.py new file mode 100644 index 00000000..0f299630 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/sysctl.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } + :type sysctl_dict: dict + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + sysctl_dict = yaml.load(sysctl_dict) + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index fdf38798..37e77121 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -36,6 +36,8 @@ filter_installed_packages, ) +from charmhelpers.core.sysctl import create as create_sysctl + from utils import ( render_template, get_host_ip, @@ -110,6 +112,10 @@ def config_changed(): if config('prefer-ipv6'): assert_charm_supports_ipv6() + sysctl_dict = config('sysctl') + if sysctl_dict: + create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-osd-charm.conf') + e_mountpoint = config('ephemeral-unmount') if (e_mountpoint and ceph.filesystem_mounted(e_mountpoint)): umount(e_mountpoint) From 5347b01fa32440da7c76cf16a63035a0264e953b Mon Sep 17 00:00:00 2001 From: Jorge Niedbalski Date: Tue, 7 Oct 2014 11:01:56 +0200 Subject: [PATCH 0506/2699] - Moved sysctl logic to charmhelpers for make it compatible with ceph-osd --- ceph-proxy/config.yaml | 8 ++++- ceph-proxy/hooks/charmhelpers/core/sysctl.py | 34 ++++++++++++++++++++ ceph-proxy/hooks/hooks.py | 6 ++++ 3 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 ceph-proxy/hooks/charmhelpers/core/sysctl.py diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 95e94857..218f7af0 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -154,4 +154,10 @@ options: NOTE: these charms do not currently support IPv6 privacy extension. In order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on - your network interface. + your network interface. + sysctl: + type: string + default: "" + description: | + YAML formatted associative array of sysctl values, e.g.: + '{ kernel.pid_max : 4194303 }' diff --git a/ceph-proxy/hooks/charmhelpers/core/sysctl.py b/ceph-proxy/hooks/charmhelpers/core/sysctl.py new file mode 100644 index 00000000..0f299630 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/sysctl.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } + :type sysctl_dict: dict + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + sysctl_dict = yaml.load(sysctl_dict) + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 8a6c26c8..4b8fb452 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -45,6 +45,8 @@ format_ipv6_addr ) +from charmhelpers.core.sysctl import create as create_sysctl + from utils import ( render_template, get_public_addr, @@ -119,6 +121,10 @@ def config_changed(): log('Invalid OSD disk format configuration specified', level=ERROR) sys.exit(1) + sysctl_dict = config('sysctl') + if sysctl_dict: + create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf') + emit_cephconf() e_mountpoint = config('ephemeral-unmount') From 41024bd54cd70e2344d9d971a7348055ea8457d7 Mon Sep 17 00:00:00 2001 From: Jorge Niedbalski Date: Tue, 7 Oct 2014 11:01:56 +0200 Subject: [PATCH 0507/2699] - Moved sysctl logic to charmhelpers for make it compatible with ceph-osd --- ceph-mon/config.yaml | 8 ++++- ceph-mon/hooks/charmhelpers/core/sysctl.py | 34 ++++++++++++++++++++++ ceph-mon/hooks/hooks.py | 6 ++++ 3 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 ceph-mon/hooks/charmhelpers/core/sysctl.py diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 95e94857..218f7af0 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -154,4 +154,10 @@ options: NOTE: these charms do not currently support IPv6 privacy extension. In order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on - your network interface. + your network interface. + sysctl: + type: string + default: "" + description: | + YAML formatted associative array of sysctl values, e.g.: + '{ kernel.pid_max : 4194303 }' diff --git a/ceph-mon/hooks/charmhelpers/core/sysctl.py b/ceph-mon/hooks/charmhelpers/core/sysctl.py new file mode 100644 index 00000000..0f299630 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/sysctl.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } + :type sysctl_dict: dict + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + sysctl_dict = yaml.load(sysctl_dict) + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 8a6c26c8..4b8fb452 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -45,6 +45,8 @@ format_ipv6_addr ) +from charmhelpers.core.sysctl import create as create_sysctl + from utils import ( render_template, get_public_addr, @@ -119,6 +121,10 @@ def config_changed(): log('Invalid OSD disk format configuration specified', level=ERROR) sys.exit(1) + sysctl_dict = config('sysctl') + if sysctl_dict: + create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf') + emit_cephconf() e_mountpoint = config('ephemeral-unmount') From a16f1875a59fa23c1e44c86119f46de1d9192017 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 21 Oct 2014 08:25:29 +0100 Subject: [PATCH 0508/2699] Sync charmhelpers --- .../hooks/charmhelpers/contrib/network/ip.py | 10 +++-- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 6 +++ ceph-proxy/hooks/charmhelpers/core/host.py | 10 ++++- ceph-proxy/hooks/charmhelpers/core/sysctl.py | 34 ++++++++++++++ .../hooks/charmhelpers/fetch/__init__.py | 4 ++ ceph-proxy/hooks/charmhelpers/fetch/giturl.py | 44 +++++++++++++++++++ 6 files changed, 103 insertions(+), 5 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/core/sysctl.py create mode 100644 ceph-proxy/hooks/charmhelpers/fetch/giturl.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 17df06fc..e62e5655 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -140,7 +140,8 @@ def _get_for_address(address, key): if address.version == 4 and netifaces.AF_INET in addresses: addr = addresses[netifaces.AF_INET][0]['addr'] netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + cidr = network.cidr if address in cidr: if key == 'iface': return iface @@ -149,11 +150,14 @@ def _get_for_address(address, key): if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) + network = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + cidr = network.cidr if address in cidr: if key == 'iface': return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] else: return addr[key] return None diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index af8fe2db..083a7090 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -214,6 +214,12 @@ def __getitem__(self, key): except KeyError: return (self._prev_dict or {})[key] + def keys(self): + prev_keys = [] + if self._prev_dict is not None: + prev_keys = self._prev_dict.keys() + return list(set(prev_keys + dict.keys(self))) + def load_previous(self, path=None): """Load previous copy of config from disk. diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index d7ce1e4c..0b8bdc50 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -6,13 +6,13 @@ # Matthew Wedgwood import os +import re import pwd import grp import random import string import subprocess import hashlib -import shutil from contextlib import contextmanager from collections import OrderedDict @@ -317,7 +317,13 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - interfaces.append(line.split()[1].replace(":", "")) + matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + if matched: + interface = matched.groups()[0] + else: + interface = line.split()[1].replace(":", "") + interfaces.append(interface) + return interfaces diff --git a/ceph-proxy/hooks/charmhelpers/core/sysctl.py b/ceph-proxy/hooks/charmhelpers/core/sysctl.py new file mode 100644 index 00000000..0f299630 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/sysctl.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } + :type sysctl_dict: dict + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + sysctl_dict = yaml.load(sysctl_dict) + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 32a673d6..6724d293 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -72,6 +72,7 @@ FETCH_HANDLERS = ( 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', ) APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. @@ -218,6 +219,7 @@ def add_source(source, key=None): pocket for the release. 'cloud:' may be used to activate official cloud archive pockets, such as 'cloud:icehouse' + 'distro' may be used as a noop @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an @@ -251,6 +253,8 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass else: raise SourceConfigError("Unknown source: {!r}".format(source)) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py new file mode 100644 index 00000000..7d672460 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py @@ -0,0 +1,44 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +try: + from git import Repo +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-git") + from git import Repo + + +class GitUrlFetchHandler(BaseFetchHandler): + """Handler for git branches via generic and github URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + #TODO (mattyw) no support for ssh git@ yet + if url_parts.scheme not in ('http', 'https', 'git'): + return False + else: + return True + + def clone(self, source, dest, branch): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + + repo = Repo.clone_from(source, dest) + repo.git.checkout(branch) + + def install(self, source, branch="master"): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + try: + self.clone(source, dest_dir, branch) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir From 0648b7e12aca74acf04e61c770cd5bb170fd78a3 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 21 Oct 2014 08:25:29 +0100 Subject: [PATCH 0509/2699] Sync charmhelpers --- .../hooks/charmhelpers/contrib/network/ip.py | 10 +++-- ceph-mon/hooks/charmhelpers/core/hookenv.py | 6 +++ ceph-mon/hooks/charmhelpers/core/host.py | 10 ++++- ceph-mon/hooks/charmhelpers/core/sysctl.py | 34 ++++++++++++++ ceph-mon/hooks/charmhelpers/fetch/__init__.py | 4 ++ ceph-mon/hooks/charmhelpers/fetch/giturl.py | 44 +++++++++++++++++++ 6 files changed, 103 insertions(+), 5 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/core/sysctl.py create mode 100644 ceph-mon/hooks/charmhelpers/fetch/giturl.py diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 17df06fc..e62e5655 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -140,7 +140,8 @@ def _get_for_address(address, key): if address.version == 4 and netifaces.AF_INET in addresses: addr = addresses[netifaces.AF_INET][0]['addr'] netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + cidr = network.cidr if address in cidr: if key == 'iface': return iface @@ -149,11 +150,14 @@ def _get_for_address(address, key): if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) + network = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + cidr = network.cidr if address in cidr: if key == 'iface': return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] else: return addr[key] return None diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index af8fe2db..083a7090 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -214,6 +214,12 @@ def __getitem__(self, key): except KeyError: return (self._prev_dict or {})[key] + def keys(self): + prev_keys = [] + if self._prev_dict is not None: + prev_keys = self._prev_dict.keys() + return list(set(prev_keys + dict.keys(self))) + def load_previous(self, path=None): """Load previous copy of config from disk. diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index d7ce1e4c..0b8bdc50 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -6,13 +6,13 @@ # Matthew Wedgwood import os +import re import pwd import grp import random import string import subprocess import hashlib -import shutil from contextlib import contextmanager from collections import OrderedDict @@ -317,7 +317,13 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - interfaces.append(line.split()[1].replace(":", "")) + matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + if matched: + interface = matched.groups()[0] + else: + interface = line.split()[1].replace(":", "") + interfaces.append(interface) + return interfaces diff --git a/ceph-mon/hooks/charmhelpers/core/sysctl.py b/ceph-mon/hooks/charmhelpers/core/sysctl.py new file mode 100644 index 00000000..0f299630 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/sysctl.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } + :type sysctl_dict: dict + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + sysctl_dict = yaml.load(sysctl_dict) + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 32a673d6..6724d293 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -72,6 +72,7 @@ FETCH_HANDLERS = ( 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', ) APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. @@ -218,6 +219,7 @@ def add_source(source, key=None): pocket for the release. 'cloud:' may be used to activate official cloud archive pockets, such as 'cloud:icehouse' + 'distro' may be used as a noop @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an @@ -251,6 +253,8 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass else: raise SourceConfigError("Unknown source: {!r}".format(source)) diff --git a/ceph-mon/hooks/charmhelpers/fetch/giturl.py b/ceph-mon/hooks/charmhelpers/fetch/giturl.py new file mode 100644 index 00000000..7d672460 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/fetch/giturl.py @@ -0,0 +1,44 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +try: + from git import Repo +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-git") + from git import Repo + + +class GitUrlFetchHandler(BaseFetchHandler): + """Handler for git branches via generic and github URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + #TODO (mattyw) no support for ssh git@ yet + if url_parts.scheme not in ('http', 'https', 'git'): + return False + else: + return True + + def clone(self, source, dest, branch): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + + repo = Repo.clone_from(source, dest) + repo.git.checkout(branch) + + def install(self, source, branch="master"): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + try: + self.clone(source, dest_dir, branch) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir From f67a5fed236c758ed5f03d8d90b58a580b60ed2a Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 21 Oct 2014 08:28:36 +0100 Subject: [PATCH 0510/2699] Sync charmhelpers --- .../hooks/charmhelpers/core/hookenv.py | 6 +++ ceph-radosgw/hooks/charmhelpers/core/host.py | 10 ++++- .../hooks/charmhelpers/core/sysctl.py | 34 ++++++++++++++ .../hooks/charmhelpers/fetch/__init__.py | 4 ++ .../hooks/charmhelpers/fetch/giturl.py | 44 +++++++++++++++++++ 5 files changed, 96 insertions(+), 2 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/core/sysctl.py create mode 100644 ceph-radosgw/hooks/charmhelpers/fetch/giturl.py diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index af8fe2db..083a7090 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -214,6 +214,12 @@ def __getitem__(self, key): except KeyError: return (self._prev_dict or {})[key] + def keys(self): + prev_keys = [] + if self._prev_dict is not None: + prev_keys = self._prev_dict.keys() + return list(set(prev_keys + dict.keys(self))) + def load_previous(self, path=None): """Load previous copy of config from disk. diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index d7ce1e4c..0b8bdc50 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -6,13 +6,13 @@ # Matthew Wedgwood import os +import re import pwd import grp import random import string import subprocess import hashlib -import shutil from contextlib import contextmanager from collections import OrderedDict @@ -317,7 +317,13 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - interfaces.append(line.split()[1].replace(":", "")) + matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + if matched: + interface = matched.groups()[0] + else: + interface = line.split()[1].replace(":", "") + interfaces.append(interface) + return interfaces diff --git a/ceph-radosgw/hooks/charmhelpers/core/sysctl.py b/ceph-radosgw/hooks/charmhelpers/core/sysctl.py new file mode 100644 index 00000000..0f299630 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/sysctl.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } + :type sysctl_dict: dict + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + sysctl_dict = yaml.load(sysctl_dict) + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 32a673d6..6724d293 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -72,6 +72,7 @@ FETCH_HANDLERS = ( 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', ) APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. @@ -218,6 +219,7 @@ def add_source(source, key=None): pocket for the release. 'cloud:' may be used to activate official cloud archive pockets, such as 'cloud:icehouse' + 'distro' may be used as a noop @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an @@ -251,6 +253,8 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass else: raise SourceConfigError("Unknown source: {!r}".format(source)) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py new file mode 100644 index 00000000..7d672460 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py @@ -0,0 +1,44 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +try: + from git import Repo +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-git") + from git import Repo + + +class GitUrlFetchHandler(BaseFetchHandler): + """Handler for git branches via generic and github URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + #TODO (mattyw) no support for ssh git@ yet + if url_parts.scheme not in ('http', 'https', 'git'): + return False + else: + return True + + def clone(self, source, dest, branch): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + + repo = Repo.clone_from(source, dest) + repo.git.checkout(branch) + + def install(self, source, branch="master"): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + try: + self.clone(source, dest_dir, branch) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir From 547e003887d9aa33995a74d26985c65b690ce484 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 21 Oct 2014 08:56:51 +0100 Subject: [PATCH 0511/2699] Sync charmhelpers --- .../hooks/charmhelpers/contrib/network/ip.py | 10 +++-- ceph-osd/hooks/charmhelpers/core/hookenv.py | 6 +++ ceph-osd/hooks/charmhelpers/core/host.py | 10 ++++- ceph-osd/hooks/charmhelpers/core/sysctl.py | 34 ++++++++++++++ ceph-osd/hooks/charmhelpers/fetch/__init__.py | 4 ++ ceph-osd/hooks/charmhelpers/fetch/giturl.py | 44 +++++++++++++++++++ 6 files changed, 103 insertions(+), 5 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/core/sysctl.py create mode 100644 ceph-osd/hooks/charmhelpers/fetch/giturl.py diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 17df06fc..e62e5655 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -140,7 +140,8 @@ def _get_for_address(address, key): if address.version == 4 and netifaces.AF_INET in addresses: addr = addresses[netifaces.AF_INET][0]['addr'] netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + cidr = network.cidr if address in cidr: if key == 'iface': return iface @@ -149,11 +150,14 @@ def _get_for_address(address, key): if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) + network = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + cidr = network.cidr if address in cidr: if key == 'iface': return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] else: return addr[key] return None diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index af8fe2db..083a7090 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -214,6 +214,12 @@ def __getitem__(self, key): except KeyError: return (self._prev_dict or {})[key] + def keys(self): + prev_keys = [] + if self._prev_dict is not None: + prev_keys = self._prev_dict.keys() + return list(set(prev_keys + dict.keys(self))) + def load_previous(self, path=None): """Load previous copy of config from disk. diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index d7ce1e4c..0b8bdc50 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -6,13 +6,13 @@ # Matthew Wedgwood import os +import re import pwd import grp import random import string import subprocess import hashlib -import shutil from contextlib import contextmanager from collections import OrderedDict @@ -317,7 +317,13 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - interfaces.append(line.split()[1].replace(":", "")) + matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + if matched: + interface = matched.groups()[0] + else: + interface = line.split()[1].replace(":", "") + interfaces.append(interface) + return interfaces diff --git a/ceph-osd/hooks/charmhelpers/core/sysctl.py b/ceph-osd/hooks/charmhelpers/core/sysctl.py new file mode 100644 index 00000000..0f299630 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/sysctl.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } + :type sysctl_dict: dict + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + sysctl_dict = yaml.load(sysctl_dict) + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 32a673d6..6724d293 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -72,6 +72,7 @@ FETCH_HANDLERS = ( 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', ) APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. @@ -218,6 +219,7 @@ def add_source(source, key=None): pocket for the release. 'cloud:' may be used to activate official cloud archive pockets, such as 'cloud:icehouse' + 'distro' may be used as a noop @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an @@ -251,6 +253,8 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass else: raise SourceConfigError("Unknown source: {!r}".format(source)) diff --git a/ceph-osd/hooks/charmhelpers/fetch/giturl.py b/ceph-osd/hooks/charmhelpers/fetch/giturl.py new file mode 100644 index 00000000..7d672460 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/fetch/giturl.py @@ -0,0 +1,44 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +try: + from git import Repo +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-git") + from git import Repo + + +class GitUrlFetchHandler(BaseFetchHandler): + """Handler for git branches via generic and github URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + #TODO (mattyw) no support for ssh git@ yet + if url_parts.scheme not in ('http', 'https', 'git'): + return False + else: + return True + + def clone(self, source, dest, branch): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + + repo = Repo.clone_from(source, dest) + repo.git.checkout(branch) + + def install(self, source, branch="master"): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + try: + self.clone(source, dest_dir, branch) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir From d46f499fb40f0eb514744d5cf0bd497fd1aa2dff Mon Sep 17 00:00:00 2001 From: root Date: Wed, 29 Oct 2014 22:30:35 -0500 Subject: [PATCH 0512/2699] [bradm] initial nrpe checks --- ceph-proxy/metadata.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index aa299038..3d0a7f48 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -10,6 +10,9 @@ peers: mon: interface: ceph provides: + nrpe-external-master: + interface: nrpe-external-master + scope: container client: interface: ceph-client osd: From af7254fdef365a54ce4dada4783e7ac71ccea5f3 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 29 Oct 2014 22:30:35 -0500 Subject: [PATCH 0513/2699] [bradm] initial nrpe checks --- ceph-mon/metadata.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index aa299038..3d0a7f48 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -10,6 +10,9 @@ peers: mon: interface: ceph provides: + nrpe-external-master: + interface: nrpe-external-master + scope: container client: interface: ceph-client osd: From 7b9f812b50354c4e995cccb8dd7a4de218b46222 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 29 Oct 2014 22:30:35 -0500 Subject: [PATCH 0514/2699] [bradm] initial nrpe checks --- ceph-osd/metadata.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 7ac72fa8..6bd1fbd3 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -1,6 +1,10 @@ name: ceph-osd summary: Highly scalable distributed storage - Ceph OSD storage maintainer: James Page +provides: + nrpe-external-master: + interface: nrpe-external-master + scope: container categories: - misc description: | From c0f6fe60f65e764dcc8e828cfcd51eb25a2de63f Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 30 Oct 2014 16:57:10 +1000 Subject: [PATCH 0515/2699] [bradm] Initial nrpe checks --- ceph-proxy/charm-helpers-hooks.yaml | 1 + ceph-proxy/config.yaml | 11 + ceph-proxy/files/nagios/check_ceph_status.py | 44 ++++ .../files/nagios/collect_ceph_status.sh | 18 ++ .../contrib/charmsupport/__init__.py | 0 .../charmhelpers/contrib/charmsupport/nrpe.py | 219 ++++++++++++++++++ .../contrib/charmsupport/volumes.py | 156 +++++++++++++ ceph-proxy/hooks/hooks.py | 46 +++- .../nrpe-external-master-relation-changed | 1 + .../nrpe-external-master-relation-joined | 1 + ceph-proxy/metadata.yaml | 4 + 11 files changed, 500 insertions(+), 1 deletion(-) create mode 100755 ceph-proxy/files/nagios/check_ceph_status.py create mode 100755 ceph-proxy/files/nagios/collect_ceph_status.sh create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/charmsupport/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py create mode 120000 ceph-proxy/hooks/nrpe-external-master-relation-changed create mode 120000 ceph-proxy/hooks/nrpe-external-master-relation-joined diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index afb9e42b..f6978678 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -8,3 +8,4 @@ include: - payload.execd - contrib.openstack.alternatives - contrib.network.ip + - contrib.charmsupport diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 95e94857..1581052c 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -155,3 +155,14 @@ options: order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on your network interface. + nagios_context: + default: "juju" + type: string + description: | + Used by the nrpe-external-master subordinate charm. + A string that will be prepended to instance name to set the host name + in nagios. So for instance the hostname would be something like: + juju-myservice-0 + If you're running multiple environments with the same services in them + this allows you to differentiate between them. + diff --git a/ceph-proxy/files/nagios/check_ceph_status.py b/ceph-proxy/files/nagios/check_ceph_status.py new file mode 100755 index 00000000..cb8d1a1a --- /dev/null +++ b/ceph-proxy/files/nagios/check_ceph_status.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +# Copyright (C) 2014 Canonical +# All Rights Reserved +# Author: Jacek Nykis + +import re +import argparse +import subprocess +import nagios_plugin + + +def check_ceph_status(args): + if args.status_file: + nagios_plugin.check_file_freshness(args.status_file, 3600) + with open(args.status_file, "r") as f: + lines = f.readlines() + status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) + else: + lines = subprocess.check_output(["ceph", "status"]).split('\n') + status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) + + if ('health' not in status_data + or 'monmap' not in status_data + or 'osdmap'not in status_data): + raise nagios_plugin.UnknownError('UNKNOWN: status data is incomplete') + + if status_data['health'] != 'HEALTH_OK': + msg = 'CRITICAL: ceph health status: "{}"'.format(status_data['health']) + raise nagios_plugin.CriticalError(msg) + osds = re.search("^.*: (\d+) osds: (\d+) up, (\d+) in", status_data['osdmap']) + if osds.group(1) > osds.group(2): # not all OSDs are "up" + msg = 'CRITICAL: Some OSDs are not up. Total: {}, up: {}'.format( + osds.group(1), osds.group(2)) + raise nagios_plugin.CriticalError(msg) + print "All OK" + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Check ceph status') + parser.add_argument('-f', '--file', dest='status_file', + default=False, help='Optional file with "ceph status" output') + args = parser.parse_args() + nagios_plugin.try_check(check_ceph_status, args) diff --git a/ceph-proxy/files/nagios/collect_ceph_status.sh b/ceph-proxy/files/nagios/collect_ceph_status.sh new file mode 100755 index 00000000..dbdd3acf --- /dev/null +++ b/ceph-proxy/files/nagios/collect_ceph_status.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright (C) 2014 Canonical +# All Rights Reserved +# Author: Jacek Nykis + +LOCK=/var/lock/ceph-status.lock +lockfile-create -r2 --lock-name $LOCK > /dev/null 2>&1 +if [ $? -ne 0 ]; then + exit 1 +fi +trap "rm -f $LOCK > /dev/null 2>&1" exit + +DATA_DIR="/var/lib/nagios" +if [ ! -d $DATA_DIR ]; then + mkdir -p $DATA_DIR +fi + +ceph status >${DATA_DIR}/cat-ceph-status.txt diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py new file mode 100644 index 00000000..1815dad2 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -0,0 +1,219 @@ +"""Compatibility with the nrpe-external-master charm""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Matthew Wedgwood + +import subprocess +import pwd +import grp +import os +import re +import shlex +import yaml + +from charmhelpers.core.hookenv import ( + config, + local_unit, + log, + relation_ids, + relation_set, +) + +from charmhelpers.core.host import service + +# This module adds compatibility with the nrpe-external-master and plain nrpe +# subordinate charms. To use it in your charm: +# +# 1. Update metadata.yaml +# +# provides: +# (...) +# nrpe-external-master: +# interface: nrpe-external-master +# scope: container +# +# and/or +# +# provides: +# (...) +# local-monitors: +# interface: local-monitors +# scope: container + +# +# 2. Add the following to config.yaml +# +# nagios_context: +# default: "juju" +# type: string +# description: | +# Used by the nrpe subordinate charms. +# A string that will be prepended to instance name to set the host name +# in nagios. So for instance the hostname would be something like: +# juju-myservice-0 +# If you're running multiple environments with the same services in them +# this allows you to differentiate between them. +# +# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master +# +# 4. Update your hooks.py with something like this: +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE() +# nrpe_compat.add_check( +# shortname = "myservice", +# description = "Check MyService", +# check_cmd = "check_http -w 2 -c 10 http://localhost" +# ) +# nrpe_compat.add_check( +# "myservice_other", +# "Check for widget failures", +# check_cmd = "/srv/myapp/scripts/widget_check" +# ) +# nrpe_compat.write() +# +# def config_changed(): +# (...) +# update_nrpe_config() +# +# def nrpe_external_master_relation_changed(): +# update_nrpe_config() +# +# def local_monitors_relation_changed(): +# update_nrpe_config() +# +# 5. ln -s hooks.py nrpe-external-master-relation-changed +# ln -s hooks.py local-monitors-relation-changed + + +class CheckException(Exception): + pass + + +class Check(object): + shortname_re = '[A-Za-z0-9-_]+$' + service_template = (""" +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +define service {{ + use active-service + host_name {nagios_hostname} + service_description {nagios_hostname}[{shortname}] """ + """{description} + check_command check_nrpe!{command} + servicegroups {nagios_servicegroup} +}} +""") + + def __init__(self, shortname, description, check_cmd): + super(Check, self).__init__() + # XXX: could be better to calculate this from the service name + if not re.match(self.shortname_re, shortname): + raise CheckException("shortname must match {}".format( + Check.shortname_re)) + self.shortname = shortname + self.command = "check_{}".format(shortname) + # Note: a set of invalid characters is defined by the + # Nagios server config + # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= + self.description = description + self.check_cmd = self._locate_cmd(check_cmd) + + def _locate_cmd(self, check_cmd): + search_path = ( + '/usr/lib/nagios/plugins', + '/usr/local/lib/nagios/plugins', + ) + parts = shlex.split(check_cmd) + for path in search_path: + if os.path.exists(os.path.join(path, parts[0])): + command = os.path.join(path, parts[0]) + if len(parts) > 1: + command += " " + " ".join(parts[1:]) + return command + log('Check command not found: {}'.format(parts[0])) + return '' + + def write(self, nagios_context, hostname): + nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( + self.command) + with open(nrpe_check_file, 'w') as nrpe_check_config: + nrpe_check_config.write("# check {}\n".format(self.shortname)) + nrpe_check_config.write("command[{}]={}\n".format( + self.command, self.check_cmd)) + + if not os.path.exists(NRPE.nagios_exportdir): + log('Not writing service config as {} is not accessible'.format( + NRPE.nagios_exportdir)) + else: + self.write_service_config(nagios_context, hostname) + + def write_service_config(self, nagios_context, hostname): + for f in os.listdir(NRPE.nagios_exportdir): + if re.search('.*{}.cfg'.format(self.command), f): + os.remove(os.path.join(NRPE.nagios_exportdir, f)) + + templ_vars = { + 'nagios_hostname': hostname, + 'nagios_servicegroup': nagios_context, + 'description': self.description, + 'shortname': self.shortname, + 'command': self.command, + } + nrpe_service_text = Check.service_template.format(**templ_vars) + nrpe_service_file = '{}/service__{}_{}.cfg'.format( + NRPE.nagios_exportdir, hostname, self.command) + with open(nrpe_service_file, 'w') as nrpe_service_config: + nrpe_service_config.write(str(nrpe_service_text)) + + def run(self): + subprocess.call(self.check_cmd) + + +class NRPE(object): + nagios_logdir = '/var/log/nagios' + nagios_exportdir = '/var/lib/nagios/export' + nrpe_confdir = '/etc/nagios/nrpe.d' + + def __init__(self, hostname=None): + super(NRPE, self).__init__() + self.config = config() + self.nagios_context = self.config['nagios_context'] + self.unit_name = local_unit().replace('/', '-') + if hostname: + self.hostname = hostname + else: + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + self.checks = [] + + def add_check(self, *args, **kwargs): + self.checks.append(Check(*args, **kwargs)) + + def write(self): + try: + nagios_uid = pwd.getpwnam('nagios').pw_uid + nagios_gid = grp.getgrnam('nagios').gr_gid + except: + log("Nagios user not set up, nrpe checks not updated") + return + + if not os.path.exists(NRPE.nagios_logdir): + os.mkdir(NRPE.nagios_logdir) + os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) + + nrpe_monitors = {} + monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} + for nrpecheck in self.checks: + nrpecheck.write(self.nagios_context, self.hostname) + nrpe_monitors[nrpecheck.shortname] = { + "command": nrpecheck.command, + } + + service('restart', 'nagios-nrpe-server') + + for rid in relation_ids("local-monitors"): + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py new file mode 100644 index 00000000..0f905dff --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -0,0 +1,156 @@ +''' +Functions for managing volumes in juju units. One volume is supported per unit. +Subordinates may have their own storage, provided it is on its own partition. + +Configuration stanzas: + volume-ephemeral: + type: boolean + default: true + description: > + If false, a volume is mounted as sepecified in "volume-map" + If true, ephemeral storage will be used, meaning that log data + will only exist as long as the machine. YOU HAVE BEEN WARNED. + volume-map: + type: string + default: {} + description: > + YAML map of units to device names, e.g: + "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" + Service units will raise a configure-error if volume-ephemeral + is 'true' and no volume-map value is set. Use 'juju set' to set a + value and 'juju resolved' to complete configuration. + +Usage: + from charmsupport.volumes import configure_volume, VolumeConfigurationError + from charmsupport.hookenv import log, ERROR + def post_mount_hook(): + stop_service('myservice') + def post_mount_hook(): + start_service('myservice') + + if __name__ == '__main__': + try: + configure_volume(before_change=pre_mount_hook, + after_change=post_mount_hook) + except VolumeConfigurationError: + log('Storage could not be configured', ERROR) +''' + +# XXX: Known limitations +# - fstab is neither consulted nor updated + +import os +from charmhelpers.core import hookenv +from charmhelpers.core import host +import yaml + + +MOUNT_BASE = '/srv/juju/volumes' + + +class VolumeConfigurationError(Exception): + '''Volume configuration data is missing or invalid''' + pass + + +def get_config(): + '''Gather and sanity-check volume configuration data''' + volume_config = {} + config = hookenv.config() + + errors = False + + if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): + volume_config['ephemeral'] = True + else: + volume_config['ephemeral'] = False + + try: + volume_map = yaml.safe_load(config.get('volume-map', '{}')) + except yaml.YAMLError as e: + hookenv.log("Error parsing YAML volume-map: {}".format(e), + hookenv.ERROR) + errors = True + if volume_map is None: + # probably an empty string + volume_map = {} + elif not isinstance(volume_map, dict): + hookenv.log("Volume-map should be a dictionary, not {}".format( + type(volume_map))) + errors = True + + volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) + if volume_config['device'] and volume_config['ephemeral']: + # asked for ephemeral storage but also defined a volume ID + hookenv.log('A volume is defined for this unit, but ephemeral ' + 'storage was requested', hookenv.ERROR) + errors = True + elif not volume_config['device'] and not volume_config['ephemeral']: + # asked for permanent storage but did not define volume ID + hookenv.log('Ephemeral storage was requested, but there is no volume ' + 'defined for this unit.', hookenv.ERROR) + errors = True + + unit_mount_name = hookenv.local_unit().replace('/', '-') + volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) + + if errors: + return None + return volume_config + + +def mount_volume(config): + if os.path.exists(config['mountpoint']): + if not os.path.isdir(config['mountpoint']): + hookenv.log('Not a directory: {}'.format(config['mountpoint'])) + raise VolumeConfigurationError() + else: + host.mkdir(config['mountpoint']) + if os.path.ismount(config['mountpoint']): + unmount_volume(config) + if not host.mount(config['device'], config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def unmount_volume(config): + if os.path.ismount(config['mountpoint']): + if not host.umount(config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def managed_mounts(): + '''List of all mounted managed volumes''' + return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) + + +def configure_volume(before_change=lambda: None, after_change=lambda: None): + '''Set up storage (or don't) according to the charm's volume configuration. + Returns the mount point or "ephemeral". before_change and after_change + are optional functions to be called if the volume configuration changes. + ''' + + config = get_config() + if not config: + hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) + raise VolumeConfigurationError() + + if config['ephemeral']: + if os.path.ismount(config['mountpoint']): + before_change() + unmount_volume(config) + after_change() + return 'ephemeral' + else: + # persistent storage + if os.path.ismount(config['mountpoint']): + mounts = dict(managed_mounts()) + if mounts.get(config['mountpoint']) != config['device']: + before_change() + unmount_volume(config) + mount_volume(config) + after_change() + else: + before_change() + mount_volume(config) + after_change() + return config['mountpoint'] diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 8a6c26c8..a0befde5 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -23,13 +23,16 @@ relation_set, remote_unit, Hooks, UnregisteredHookError, - service_name + service_name, + relations_of_type ) from charmhelpers.core.host import ( service_restart, umount, mkdir, + write_file, + rsync, cmp_pkgrevno ) from charmhelpers.fetch import ( @@ -51,8 +54,15 @@ assert_charm_supports_ipv6 ) +from charmhelpers.contrib.charmsupport.nrpe import NRPE + hooks = Hooks() +NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' +SCRIPTS_DIR = '/usr/local/bin' +STATUS_FILE = '/var/lib/nagios/cat-ceph-status.txt' +STATUS_CRONFILE = '/etc/cron.d/cat-ceph-health' + def install_upstart_scripts(): # Only install upstart configurations for older versions @@ -143,6 +153,9 @@ def config_changed(): reformat_osd(), config('ignore-device-errors')) ceph.start_osds(get_devices()) + if relations_of_type('nrpe-external-master'): + update_nrpe_config() + def get_mon_hosts(): hosts = [] @@ -307,6 +320,37 @@ def start(): ceph.start_osds(get_devices()) +@hooks.hook('nrpe-external-master-relation-joined') +@hooks.hook('nrpe-external-master-relation-changed') +def update_nrpe_config(): + log('Refreshing nagios checks') + if os.path.isdir(NAGIOS_PLUGINS): + rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', + 'check_ceph_status.py'), + os.path.join(NAGIOS_PLUGINS, 'check_ceph_status.py')) + + script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh') + rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', + 'nagios', 'collect_ceph_status.sh'), + script) + cronjob = "{} root {}\n".format('*/5 * * * *', script) + write_file(STATUS_CRONFILE, cronjob) + + # Find out if nrpe set nagios_hostname + hostname = None + for rel in relations_of_type('nrpe-external-master'): + if 'nagios_hostname' in rel: + hostname = rel['nagios_hostname'] + break + nrpe = NRPE(hostname=hostname) + nrpe.add_check( + shortname="ceph", + description='Check Ceph health', + check_cmd='check_ceph_status.py -f {}'.format(STATUS_FILE) + ) + nrpe.write() + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-proxy/hooks/nrpe-external-master-relation-changed b/ceph-proxy/hooks/nrpe-external-master-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-proxy/hooks/nrpe-external-master-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/nrpe-external-master-relation-joined b/ceph-proxy/hooks/nrpe-external-master-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-proxy/hooks/nrpe-external-master-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 3d0a7f48..9fab75a8 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -19,3 +19,7 @@ provides: interface: ceph-osd radosgw: interface: ceph-radosgw + nrpe-external-master: + interface: nrpe-external-master + scope: container + gets: [nagios_hostname, nagios_host_context] From 989fc1a84905740b23b313472c2aee0e86b4d19a Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 30 Oct 2014 16:57:10 +1000 Subject: [PATCH 0516/2699] [bradm] Initial nrpe checks --- ceph-mon/charm-helpers-hooks.yaml | 1 + ceph-mon/config.yaml | 11 + ceph-mon/files/nagios/check_ceph_status.py | 44 ++++ ceph-mon/files/nagios/collect_ceph_status.sh | 18 ++ .../contrib/charmsupport/__init__.py | 0 .../charmhelpers/contrib/charmsupport/nrpe.py | 219 ++++++++++++++++++ .../contrib/charmsupport/volumes.py | 156 +++++++++++++ ceph-mon/hooks/hooks.py | 46 +++- .../nrpe-external-master-relation-changed | 1 + .../nrpe-external-master-relation-joined | 1 + ceph-mon/metadata.yaml | 4 + 11 files changed, 500 insertions(+), 1 deletion(-) create mode 100755 ceph-mon/files/nagios/check_ceph_status.py create mode 100755 ceph-mon/files/nagios/collect_ceph_status.sh create mode 100644 ceph-mon/hooks/charmhelpers/contrib/charmsupport/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py create mode 120000 ceph-mon/hooks/nrpe-external-master-relation-changed create mode 120000 ceph-mon/hooks/nrpe-external-master-relation-joined diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index afb9e42b..f6978678 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -8,3 +8,4 @@ include: - payload.execd - contrib.openstack.alternatives - contrib.network.ip + - contrib.charmsupport diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 95e94857..1581052c 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -155,3 +155,14 @@ options: order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on your network interface. + nagios_context: + default: "juju" + type: string + description: | + Used by the nrpe-external-master subordinate charm. + A string that will be prepended to instance name to set the host name + in nagios. So for instance the hostname would be something like: + juju-myservice-0 + If you're running multiple environments with the same services in them + this allows you to differentiate between them. + diff --git a/ceph-mon/files/nagios/check_ceph_status.py b/ceph-mon/files/nagios/check_ceph_status.py new file mode 100755 index 00000000..cb8d1a1a --- /dev/null +++ b/ceph-mon/files/nagios/check_ceph_status.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +# Copyright (C) 2014 Canonical +# All Rights Reserved +# Author: Jacek Nykis + +import re +import argparse +import subprocess +import nagios_plugin + + +def check_ceph_status(args): + if args.status_file: + nagios_plugin.check_file_freshness(args.status_file, 3600) + with open(args.status_file, "r") as f: + lines = f.readlines() + status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) + else: + lines = subprocess.check_output(["ceph", "status"]).split('\n') + status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) + + if ('health' not in status_data + or 'monmap' not in status_data + or 'osdmap'not in status_data): + raise nagios_plugin.UnknownError('UNKNOWN: status data is incomplete') + + if status_data['health'] != 'HEALTH_OK': + msg = 'CRITICAL: ceph health status: "{}"'.format(status_data['health']) + raise nagios_plugin.CriticalError(msg) + osds = re.search("^.*: (\d+) osds: (\d+) up, (\d+) in", status_data['osdmap']) + if osds.group(1) > osds.group(2): # not all OSDs are "up" + msg = 'CRITICAL: Some OSDs are not up. Total: {}, up: {}'.format( + osds.group(1), osds.group(2)) + raise nagios_plugin.CriticalError(msg) + print "All OK" + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Check ceph status') + parser.add_argument('-f', '--file', dest='status_file', + default=False, help='Optional file with "ceph status" output') + args = parser.parse_args() + nagios_plugin.try_check(check_ceph_status, args) diff --git a/ceph-mon/files/nagios/collect_ceph_status.sh b/ceph-mon/files/nagios/collect_ceph_status.sh new file mode 100755 index 00000000..dbdd3acf --- /dev/null +++ b/ceph-mon/files/nagios/collect_ceph_status.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright (C) 2014 Canonical +# All Rights Reserved +# Author: Jacek Nykis + +LOCK=/var/lock/ceph-status.lock +lockfile-create -r2 --lock-name $LOCK > /dev/null 2>&1 +if [ $? -ne 0 ]; then + exit 1 +fi +trap "rm -f $LOCK > /dev/null 2>&1" exit + +DATA_DIR="/var/lib/nagios" +if [ ! -d $DATA_DIR ]; then + mkdir -p $DATA_DIR +fi + +ceph status >${DATA_DIR}/cat-ceph-status.txt diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py new file mode 100644 index 00000000..1815dad2 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -0,0 +1,219 @@ +"""Compatibility with the nrpe-external-master charm""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Matthew Wedgwood + +import subprocess +import pwd +import grp +import os +import re +import shlex +import yaml + +from charmhelpers.core.hookenv import ( + config, + local_unit, + log, + relation_ids, + relation_set, +) + +from charmhelpers.core.host import service + +# This module adds compatibility with the nrpe-external-master and plain nrpe +# subordinate charms. To use it in your charm: +# +# 1. Update metadata.yaml +# +# provides: +# (...) +# nrpe-external-master: +# interface: nrpe-external-master +# scope: container +# +# and/or +# +# provides: +# (...) +# local-monitors: +# interface: local-monitors +# scope: container + +# +# 2. Add the following to config.yaml +# +# nagios_context: +# default: "juju" +# type: string +# description: | +# Used by the nrpe subordinate charms. +# A string that will be prepended to instance name to set the host name +# in nagios. So for instance the hostname would be something like: +# juju-myservice-0 +# If you're running multiple environments with the same services in them +# this allows you to differentiate between them. +# +# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master +# +# 4. Update your hooks.py with something like this: +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE() +# nrpe_compat.add_check( +# shortname = "myservice", +# description = "Check MyService", +# check_cmd = "check_http -w 2 -c 10 http://localhost" +# ) +# nrpe_compat.add_check( +# "myservice_other", +# "Check for widget failures", +# check_cmd = "/srv/myapp/scripts/widget_check" +# ) +# nrpe_compat.write() +# +# def config_changed(): +# (...) +# update_nrpe_config() +# +# def nrpe_external_master_relation_changed(): +# update_nrpe_config() +# +# def local_monitors_relation_changed(): +# update_nrpe_config() +# +# 5. ln -s hooks.py nrpe-external-master-relation-changed +# ln -s hooks.py local-monitors-relation-changed + + +class CheckException(Exception): + pass + + +class Check(object): + shortname_re = '[A-Za-z0-9-_]+$' + service_template = (""" +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +define service {{ + use active-service + host_name {nagios_hostname} + service_description {nagios_hostname}[{shortname}] """ + """{description} + check_command check_nrpe!{command} + servicegroups {nagios_servicegroup} +}} +""") + + def __init__(self, shortname, description, check_cmd): + super(Check, self).__init__() + # XXX: could be better to calculate this from the service name + if not re.match(self.shortname_re, shortname): + raise CheckException("shortname must match {}".format( + Check.shortname_re)) + self.shortname = shortname + self.command = "check_{}".format(shortname) + # Note: a set of invalid characters is defined by the + # Nagios server config + # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= + self.description = description + self.check_cmd = self._locate_cmd(check_cmd) + + def _locate_cmd(self, check_cmd): + search_path = ( + '/usr/lib/nagios/plugins', + '/usr/local/lib/nagios/plugins', + ) + parts = shlex.split(check_cmd) + for path in search_path: + if os.path.exists(os.path.join(path, parts[0])): + command = os.path.join(path, parts[0]) + if len(parts) > 1: + command += " " + " ".join(parts[1:]) + return command + log('Check command not found: {}'.format(parts[0])) + return '' + + def write(self, nagios_context, hostname): + nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( + self.command) + with open(nrpe_check_file, 'w') as nrpe_check_config: + nrpe_check_config.write("# check {}\n".format(self.shortname)) + nrpe_check_config.write("command[{}]={}\n".format( + self.command, self.check_cmd)) + + if not os.path.exists(NRPE.nagios_exportdir): + log('Not writing service config as {} is not accessible'.format( + NRPE.nagios_exportdir)) + else: + self.write_service_config(nagios_context, hostname) + + def write_service_config(self, nagios_context, hostname): + for f in os.listdir(NRPE.nagios_exportdir): + if re.search('.*{}.cfg'.format(self.command), f): + os.remove(os.path.join(NRPE.nagios_exportdir, f)) + + templ_vars = { + 'nagios_hostname': hostname, + 'nagios_servicegroup': nagios_context, + 'description': self.description, + 'shortname': self.shortname, + 'command': self.command, + } + nrpe_service_text = Check.service_template.format(**templ_vars) + nrpe_service_file = '{}/service__{}_{}.cfg'.format( + NRPE.nagios_exportdir, hostname, self.command) + with open(nrpe_service_file, 'w') as nrpe_service_config: + nrpe_service_config.write(str(nrpe_service_text)) + + def run(self): + subprocess.call(self.check_cmd) + + +class NRPE(object): + nagios_logdir = '/var/log/nagios' + nagios_exportdir = '/var/lib/nagios/export' + nrpe_confdir = '/etc/nagios/nrpe.d' + + def __init__(self, hostname=None): + super(NRPE, self).__init__() + self.config = config() + self.nagios_context = self.config['nagios_context'] + self.unit_name = local_unit().replace('/', '-') + if hostname: + self.hostname = hostname + else: + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + self.checks = [] + + def add_check(self, *args, **kwargs): + self.checks.append(Check(*args, **kwargs)) + + def write(self): + try: + nagios_uid = pwd.getpwnam('nagios').pw_uid + nagios_gid = grp.getgrnam('nagios').gr_gid + except: + log("Nagios user not set up, nrpe checks not updated") + return + + if not os.path.exists(NRPE.nagios_logdir): + os.mkdir(NRPE.nagios_logdir) + os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) + + nrpe_monitors = {} + monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} + for nrpecheck in self.checks: + nrpecheck.write(self.nagios_context, self.hostname) + nrpe_monitors[nrpecheck.shortname] = { + "command": nrpecheck.command, + } + + service('restart', 'nagios-nrpe-server') + + for rid in relation_ids("local-monitors"): + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py new file mode 100644 index 00000000..0f905dff --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -0,0 +1,156 @@ +''' +Functions for managing volumes in juju units. One volume is supported per unit. +Subordinates may have their own storage, provided it is on its own partition. + +Configuration stanzas: + volume-ephemeral: + type: boolean + default: true + description: > + If false, a volume is mounted as sepecified in "volume-map" + If true, ephemeral storage will be used, meaning that log data + will only exist as long as the machine. YOU HAVE BEEN WARNED. + volume-map: + type: string + default: {} + description: > + YAML map of units to device names, e.g: + "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" + Service units will raise a configure-error if volume-ephemeral + is 'true' and no volume-map value is set. Use 'juju set' to set a + value and 'juju resolved' to complete configuration. + +Usage: + from charmsupport.volumes import configure_volume, VolumeConfigurationError + from charmsupport.hookenv import log, ERROR + def post_mount_hook(): + stop_service('myservice') + def post_mount_hook(): + start_service('myservice') + + if __name__ == '__main__': + try: + configure_volume(before_change=pre_mount_hook, + after_change=post_mount_hook) + except VolumeConfigurationError: + log('Storage could not be configured', ERROR) +''' + +# XXX: Known limitations +# - fstab is neither consulted nor updated + +import os +from charmhelpers.core import hookenv +from charmhelpers.core import host +import yaml + + +MOUNT_BASE = '/srv/juju/volumes' + + +class VolumeConfigurationError(Exception): + '''Volume configuration data is missing or invalid''' + pass + + +def get_config(): + '''Gather and sanity-check volume configuration data''' + volume_config = {} + config = hookenv.config() + + errors = False + + if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): + volume_config['ephemeral'] = True + else: + volume_config['ephemeral'] = False + + try: + volume_map = yaml.safe_load(config.get('volume-map', '{}')) + except yaml.YAMLError as e: + hookenv.log("Error parsing YAML volume-map: {}".format(e), + hookenv.ERROR) + errors = True + if volume_map is None: + # probably an empty string + volume_map = {} + elif not isinstance(volume_map, dict): + hookenv.log("Volume-map should be a dictionary, not {}".format( + type(volume_map))) + errors = True + + volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) + if volume_config['device'] and volume_config['ephemeral']: + # asked for ephemeral storage but also defined a volume ID + hookenv.log('A volume is defined for this unit, but ephemeral ' + 'storage was requested', hookenv.ERROR) + errors = True + elif not volume_config['device'] and not volume_config['ephemeral']: + # asked for permanent storage but did not define volume ID + hookenv.log('Ephemeral storage was requested, but there is no volume ' + 'defined for this unit.', hookenv.ERROR) + errors = True + + unit_mount_name = hookenv.local_unit().replace('/', '-') + volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) + + if errors: + return None + return volume_config + + +def mount_volume(config): + if os.path.exists(config['mountpoint']): + if not os.path.isdir(config['mountpoint']): + hookenv.log('Not a directory: {}'.format(config['mountpoint'])) + raise VolumeConfigurationError() + else: + host.mkdir(config['mountpoint']) + if os.path.ismount(config['mountpoint']): + unmount_volume(config) + if not host.mount(config['device'], config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def unmount_volume(config): + if os.path.ismount(config['mountpoint']): + if not host.umount(config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def managed_mounts(): + '''List of all mounted managed volumes''' + return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) + + +def configure_volume(before_change=lambda: None, after_change=lambda: None): + '''Set up storage (or don't) according to the charm's volume configuration. + Returns the mount point or "ephemeral". before_change and after_change + are optional functions to be called if the volume configuration changes. + ''' + + config = get_config() + if not config: + hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) + raise VolumeConfigurationError() + + if config['ephemeral']: + if os.path.ismount(config['mountpoint']): + before_change() + unmount_volume(config) + after_change() + return 'ephemeral' + else: + # persistent storage + if os.path.ismount(config['mountpoint']): + mounts = dict(managed_mounts()) + if mounts.get(config['mountpoint']) != config['device']: + before_change() + unmount_volume(config) + mount_volume(config) + after_change() + else: + before_change() + mount_volume(config) + after_change() + return config['mountpoint'] diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 8a6c26c8..a0befde5 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -23,13 +23,16 @@ relation_set, remote_unit, Hooks, UnregisteredHookError, - service_name + service_name, + relations_of_type ) from charmhelpers.core.host import ( service_restart, umount, mkdir, + write_file, + rsync, cmp_pkgrevno ) from charmhelpers.fetch import ( @@ -51,8 +54,15 @@ assert_charm_supports_ipv6 ) +from charmhelpers.contrib.charmsupport.nrpe import NRPE + hooks = Hooks() +NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' +SCRIPTS_DIR = '/usr/local/bin' +STATUS_FILE = '/var/lib/nagios/cat-ceph-status.txt' +STATUS_CRONFILE = '/etc/cron.d/cat-ceph-health' + def install_upstart_scripts(): # Only install upstart configurations for older versions @@ -143,6 +153,9 @@ def config_changed(): reformat_osd(), config('ignore-device-errors')) ceph.start_osds(get_devices()) + if relations_of_type('nrpe-external-master'): + update_nrpe_config() + def get_mon_hosts(): hosts = [] @@ -307,6 +320,37 @@ def start(): ceph.start_osds(get_devices()) +@hooks.hook('nrpe-external-master-relation-joined') +@hooks.hook('nrpe-external-master-relation-changed') +def update_nrpe_config(): + log('Refreshing nagios checks') + if os.path.isdir(NAGIOS_PLUGINS): + rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', + 'check_ceph_status.py'), + os.path.join(NAGIOS_PLUGINS, 'check_ceph_status.py')) + + script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh') + rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', + 'nagios', 'collect_ceph_status.sh'), + script) + cronjob = "{} root {}\n".format('*/5 * * * *', script) + write_file(STATUS_CRONFILE, cronjob) + + # Find out if nrpe set nagios_hostname + hostname = None + for rel in relations_of_type('nrpe-external-master'): + if 'nagios_hostname' in rel: + hostname = rel['nagios_hostname'] + break + nrpe = NRPE(hostname=hostname) + nrpe.add_check( + shortname="ceph", + description='Check Ceph health', + check_cmd='check_ceph_status.py -f {}'.format(STATUS_FILE) + ) + nrpe.write() + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-mon/hooks/nrpe-external-master-relation-changed b/ceph-mon/hooks/nrpe-external-master-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-mon/hooks/nrpe-external-master-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/nrpe-external-master-relation-joined b/ceph-mon/hooks/nrpe-external-master-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-mon/hooks/nrpe-external-master-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 3d0a7f48..9fab75a8 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -19,3 +19,7 @@ provides: interface: ceph-osd radosgw: interface: ceph-radosgw + nrpe-external-master: + interface: nrpe-external-master + scope: container + gets: [nagios_hostname, nagios_host_context] From 3fdf9dacdcd46179f30bdfa6c70587b9a662aa00 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Tue, 4 Nov 2014 17:04:13 +1000 Subject: [PATCH 0517/2699] [bradm] Tweaked check to include host context and unit name --- ceph-proxy/hooks/hooks.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index a0befde5..441d5a74 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -22,6 +22,7 @@ relation_get, relation_set, remote_unit, + local_unit, Hooks, UnregisteredHookError, service_name, relations_of_type @@ -341,11 +342,15 @@ def update_nrpe_config(): for rel in relations_of_type('nrpe-external-master'): if 'nagios_hostname' in rel: hostname = rel['nagios_hostname'] + host_context = rel['nagios_host_context'] break nrpe = NRPE(hostname=hostname) + + current_unit = "%s:%s" % (host_context, local_unit()) + nrpe.add_check( shortname="ceph", - description='Check Ceph health', + description='Check Ceph health {%s}' % current_unit, check_cmd='check_ceph_status.py -f {}'.format(STATUS_FILE) ) nrpe.write() From 0b24efa41a26f432f5a3063d67afe1296a01542b Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Tue, 4 Nov 2014 17:04:13 +1000 Subject: [PATCH 0518/2699] [bradm] Tweaked check to include host context and unit name --- ceph-mon/hooks/hooks.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index a0befde5..441d5a74 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -22,6 +22,7 @@ relation_get, relation_set, remote_unit, + local_unit, Hooks, UnregisteredHookError, service_name, relations_of_type @@ -341,11 +342,15 @@ def update_nrpe_config(): for rel in relations_of_type('nrpe-external-master'): if 'nagios_hostname' in rel: hostname = rel['nagios_hostname'] + host_context = rel['nagios_host_context'] break nrpe = NRPE(hostname=hostname) + + current_unit = "%s:%s" % (host_context, local_unit()) + nrpe.add_check( shortname="ceph", - description='Check Ceph health', + description='Check Ceph health {%s}' % current_unit, check_cmd='check_ceph_status.py -f {}'.format(STATUS_FILE) ) nrpe.write() From f038603cd3b4b983c27e9665ec3b77a6e8c9ffa5 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Tue, 4 Nov 2014 17:05:18 +1000 Subject: [PATCH 0519/2699] [bradm] Added nrpe check --- ceph-osd/charm-helpers-hooks.yaml | 1 + ceph-osd/config.yaml | 11 + ceph-osd/files/nagios/check_ceph_status.py | 44 ++++ ceph-osd/files/nagios/collect_ceph_status.sh | 18 ++ .../contrib/charmsupport/__init__.py | 0 .../charmhelpers/contrib/charmsupport/nrpe.py | 222 ++++++++++++++++++ .../contrib/charmsupport/volumes.py | 156 ++++++++++++ ceph-osd/hooks/hooks.py | 27 +++ .../nrpe-external-master-relation-changed | 1 + .../nrpe-external-master-relation-joined | 1 + 10 files changed, 481 insertions(+) create mode 100755 ceph-osd/files/nagios/check_ceph_status.py create mode 100755 ceph-osd/files/nagios/collect_ceph_status.sh create mode 100644 ceph-osd/hooks/charmhelpers/contrib/charmsupport/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py create mode 120000 ceph-osd/hooks/nrpe-external-master-relation-changed create mode 120000 ceph-osd/hooks/nrpe-external-master-relation-joined diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index 1d9081b7..8f5373ec 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -7,3 +7,4 @@ include: - utils - contrib.openstack.alternatives - contrib.network.ip + - contrib.charmsupport diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index a694f290..efd5cd90 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -121,3 +121,14 @@ options: order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on your network interface. + nagios_context: + default: "juju" + type: string + description: | + Used by the nrpe-external-master subordinate charm. + A string that will be prepended to instance name to set the host name + in nagios. So for instance the hostname would be something like: + juju-myservice-0 + If you're running multiple environments with the same services in them + this allows you to differentiate between them. + diff --git a/ceph-osd/files/nagios/check_ceph_status.py b/ceph-osd/files/nagios/check_ceph_status.py new file mode 100755 index 00000000..cb8d1a1a --- /dev/null +++ b/ceph-osd/files/nagios/check_ceph_status.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +# Copyright (C) 2014 Canonical +# All Rights Reserved +# Author: Jacek Nykis + +import re +import argparse +import subprocess +import nagios_plugin + + +def check_ceph_status(args): + if args.status_file: + nagios_plugin.check_file_freshness(args.status_file, 3600) + with open(args.status_file, "r") as f: + lines = f.readlines() + status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) + else: + lines = subprocess.check_output(["ceph", "status"]).split('\n') + status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) + + if ('health' not in status_data + or 'monmap' not in status_data + or 'osdmap'not in status_data): + raise nagios_plugin.UnknownError('UNKNOWN: status data is incomplete') + + if status_data['health'] != 'HEALTH_OK': + msg = 'CRITICAL: ceph health status: "{}"'.format(status_data['health']) + raise nagios_plugin.CriticalError(msg) + osds = re.search("^.*: (\d+) osds: (\d+) up, (\d+) in", status_data['osdmap']) + if osds.group(1) > osds.group(2): # not all OSDs are "up" + msg = 'CRITICAL: Some OSDs are not up. Total: {}, up: {}'.format( + osds.group(1), osds.group(2)) + raise nagios_plugin.CriticalError(msg) + print "All OK" + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Check ceph status') + parser.add_argument('-f', '--file', dest='status_file', + default=False, help='Optional file with "ceph status" output') + args = parser.parse_args() + nagios_plugin.try_check(check_ceph_status, args) diff --git a/ceph-osd/files/nagios/collect_ceph_status.sh b/ceph-osd/files/nagios/collect_ceph_status.sh new file mode 100755 index 00000000..dbdd3acf --- /dev/null +++ b/ceph-osd/files/nagios/collect_ceph_status.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright (C) 2014 Canonical +# All Rights Reserved +# Author: Jacek Nykis + +LOCK=/var/lock/ceph-status.lock +lockfile-create -r2 --lock-name $LOCK > /dev/null 2>&1 +if [ $? -ne 0 ]; then + exit 1 +fi +trap "rm -f $LOCK > /dev/null 2>&1" exit + +DATA_DIR="/var/lib/nagios" +if [ ! -d $DATA_DIR ]; then + mkdir -p $DATA_DIR +fi + +ceph status >${DATA_DIR}/cat-ceph-status.txt diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py new file mode 100644 index 00000000..51b62d39 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -0,0 +1,222 @@ +"""Compatibility with the nrpe-external-master charm""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Matthew Wedgwood + +import subprocess +import pwd +import grp +import os +import re +import shlex +import yaml + +from charmhelpers.core.hookenv import ( + config, + local_unit, + log, + relation_ids, + relation_set, +) + +from charmhelpers.core.host import service + +# This module adds compatibility with the nrpe-external-master and plain nrpe +# subordinate charms. To use it in your charm: +# +# 1. Update metadata.yaml +# +# provides: +# (...) +# nrpe-external-master: +# interface: nrpe-external-master +# scope: container +# +# and/or +# +# provides: +# (...) +# local-monitors: +# interface: local-monitors +# scope: container + +# +# 2. Add the following to config.yaml +# +# nagios_context: +# default: "juju" +# type: string +# description: | +# Used by the nrpe subordinate charms. +# A string that will be prepended to instance name to set the host name +# in nagios. So for instance the hostname would be something like: +# juju-myservice-0 +# If you're running multiple environments with the same services in them +# this allows you to differentiate between them. +# +# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master +# +# 4. Update your hooks.py with something like this: +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE() +# nrpe_compat.add_check( +# shortname = "myservice", +# description = "Check MyService", +# check_cmd = "check_http -w 2 -c 10 http://localhost" +# ) +# nrpe_compat.add_check( +# "myservice_other", +# "Check for widget failures", +# check_cmd = "/srv/myapp/scripts/widget_check" +# ) +# nrpe_compat.write() +# +# def config_changed(): +# (...) +# update_nrpe_config() +# +# def nrpe_external_master_relation_changed(): +# update_nrpe_config() +# +# def local_monitors_relation_changed(): +# update_nrpe_config() +# +# 5. ln -s hooks.py nrpe-external-master-relation-changed +# ln -s hooks.py local-monitors-relation-changed + + +class CheckException(Exception): + pass + + +class Check(object): + shortname_re = '[A-Za-z0-9-_]+$' + service_template = (""" +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +define service {{ + use active-service + host_name {nagios_hostname} + service_description {nagios_hostname}[{shortname}] """ + """{description} + check_command check_nrpe!{command} + servicegroups {nagios_servicegroup} +}} +""") + + def __init__(self, shortname, description, check_cmd): + super(Check, self).__init__() + # XXX: could be better to calculate this from the service name + if not re.match(self.shortname_re, shortname): + raise CheckException("shortname must match {}".format( + Check.shortname_re)) + self.shortname = shortname + self.command = "check_{}".format(shortname) + # Note: a set of invalid characters is defined by the + # Nagios server config + # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= + self.description = description + self.check_cmd = self._locate_cmd(check_cmd) + + def _locate_cmd(self, check_cmd): + search_path = ( + '/', + os.path.join(os.environ['CHARM_DIR'], + 'files/nrpe-external-master'), + '/usr/lib/nagios/plugins', + '/usr/local/lib/nagios/plugins', + ) + parts = shlex.split(check_cmd) + for path in search_path: + if os.path.exists(os.path.join(path, parts[0])): + command = os.path.join(path, parts[0]) + if len(parts) > 1: + command += " " + " ".join(parts[1:]) + return command + log('Check command not found: {}'.format(parts[0])) + return '' + + def write(self, nagios_context, hostname): + nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( + self.command) + with open(nrpe_check_file, 'w') as nrpe_check_config: + nrpe_check_config.write("# check {}\n".format(self.shortname)) + nrpe_check_config.write("command[{}]={}\n".format( + self.command, self.check_cmd)) + + if not os.path.exists(NRPE.nagios_exportdir): + log('Not writing service config as {} is not accessible'.format( + NRPE.nagios_exportdir)) + else: + self.write_service_config(nagios_context, hostname) + + def write_service_config(self, nagios_context, hostname): + for f in os.listdir(NRPE.nagios_exportdir): + if re.search('.*{}.cfg'.format(self.command), f): + os.remove(os.path.join(NRPE.nagios_exportdir, f)) + + templ_vars = { + 'nagios_hostname': hostname, + 'nagios_servicegroup': nagios_context, + 'description': self.description, + 'shortname': self.shortname, + 'command': self.command, + } + nrpe_service_text = Check.service_template.format(**templ_vars) + nrpe_service_file = '{}/service__{}_{}.cfg'.format( + NRPE.nagios_exportdir, hostname, self.command) + with open(nrpe_service_file, 'w') as nrpe_service_config: + nrpe_service_config.write(str(nrpe_service_text)) + + def run(self): + subprocess.call(self.check_cmd) + + +class NRPE(object): + nagios_logdir = '/var/log/nagios' + nagios_exportdir = '/var/lib/nagios/export' + nrpe_confdir = '/etc/nagios/nrpe.d' + + def __init__(self, hostname=None): + super(NRPE, self).__init__() + self.config = config() + self.nagios_context = self.config['nagios_context'] + self.unit_name = local_unit().replace('/', '-') + if hostname: + self.hostname = hostname + else: + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + self.checks = [] + + def add_check(self, *args, **kwargs): + self.checks.append(Check(*args, **kwargs)) + + def write(self): + try: + nagios_uid = pwd.getpwnam('nagios').pw_uid + nagios_gid = grp.getgrnam('nagios').gr_gid + except: + log("Nagios user not set up, nrpe checks not updated") + return + + if not os.path.exists(NRPE.nagios_logdir): + os.mkdir(NRPE.nagios_logdir) + os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) + + nrpe_monitors = {} + monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} + for nrpecheck in self.checks: + nrpecheck.write(self.nagios_context, self.hostname) + nrpe_monitors[nrpecheck.shortname] = { + "command": nrpecheck.command, + } + + service('restart', 'nagios-nrpe-server') + + for rid in relation_ids("local-monitors"): + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py new file mode 100644 index 00000000..0f905dff --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -0,0 +1,156 @@ +''' +Functions for managing volumes in juju units. One volume is supported per unit. +Subordinates may have their own storage, provided it is on its own partition. + +Configuration stanzas: + volume-ephemeral: + type: boolean + default: true + description: > + If false, a volume is mounted as sepecified in "volume-map" + If true, ephemeral storage will be used, meaning that log data + will only exist as long as the machine. YOU HAVE BEEN WARNED. + volume-map: + type: string + default: {} + description: > + YAML map of units to device names, e.g: + "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" + Service units will raise a configure-error if volume-ephemeral + is 'true' and no volume-map value is set. Use 'juju set' to set a + value and 'juju resolved' to complete configuration. + +Usage: + from charmsupport.volumes import configure_volume, VolumeConfigurationError + from charmsupport.hookenv import log, ERROR + def post_mount_hook(): + stop_service('myservice') + def post_mount_hook(): + start_service('myservice') + + if __name__ == '__main__': + try: + configure_volume(before_change=pre_mount_hook, + after_change=post_mount_hook) + except VolumeConfigurationError: + log('Storage could not be configured', ERROR) +''' + +# XXX: Known limitations +# - fstab is neither consulted nor updated + +import os +from charmhelpers.core import hookenv +from charmhelpers.core import host +import yaml + + +MOUNT_BASE = '/srv/juju/volumes' + + +class VolumeConfigurationError(Exception): + '''Volume configuration data is missing or invalid''' + pass + + +def get_config(): + '''Gather and sanity-check volume configuration data''' + volume_config = {} + config = hookenv.config() + + errors = False + + if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): + volume_config['ephemeral'] = True + else: + volume_config['ephemeral'] = False + + try: + volume_map = yaml.safe_load(config.get('volume-map', '{}')) + except yaml.YAMLError as e: + hookenv.log("Error parsing YAML volume-map: {}".format(e), + hookenv.ERROR) + errors = True + if volume_map is None: + # probably an empty string + volume_map = {} + elif not isinstance(volume_map, dict): + hookenv.log("Volume-map should be a dictionary, not {}".format( + type(volume_map))) + errors = True + + volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) + if volume_config['device'] and volume_config['ephemeral']: + # asked for ephemeral storage but also defined a volume ID + hookenv.log('A volume is defined for this unit, but ephemeral ' + 'storage was requested', hookenv.ERROR) + errors = True + elif not volume_config['device'] and not volume_config['ephemeral']: + # asked for permanent storage but did not define volume ID + hookenv.log('Ephemeral storage was requested, but there is no volume ' + 'defined for this unit.', hookenv.ERROR) + errors = True + + unit_mount_name = hookenv.local_unit().replace('/', '-') + volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) + + if errors: + return None + return volume_config + + +def mount_volume(config): + if os.path.exists(config['mountpoint']): + if not os.path.isdir(config['mountpoint']): + hookenv.log('Not a directory: {}'.format(config['mountpoint'])) + raise VolumeConfigurationError() + else: + host.mkdir(config['mountpoint']) + if os.path.ismount(config['mountpoint']): + unmount_volume(config) + if not host.mount(config['device'], config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def unmount_volume(config): + if os.path.ismount(config['mountpoint']): + if not host.umount(config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def managed_mounts(): + '''List of all mounted managed volumes''' + return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) + + +def configure_volume(before_change=lambda: None, after_change=lambda: None): + '''Set up storage (or don't) according to the charm's volume configuration. + Returns the mount point or "ephemeral". before_change and after_change + are optional functions to be called if the volume configuration changes. + ''' + + config = get_config() + if not config: + hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) + raise VolumeConfigurationError() + + if config['ephemeral']: + if os.path.ismount(config['mountpoint']): + before_change() + unmount_volume(config) + after_change() + return 'ephemeral' + else: + # persistent storage + if os.path.ismount(config['mountpoint']): + mounts = dict(managed_mounts()) + if mounts.get(config['mountpoint']) != config['device']: + before_change() + unmount_volume(config) + mount_volume(config) + after_change() + else: + before_change() + mount_volume(config) + after_change() + return config['mountpoint'] diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index fdf38798..5e67473c 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -20,6 +20,8 @@ relation_ids, related_units, relation_get, + relations_of_type, + local_unit, Hooks, UnregisteredHookError, service_name @@ -48,6 +50,8 @@ format_ipv6_addr ) +from charmhelpers.contrib.charmsupport.nrpe import NRPE + hooks = Hooks() @@ -203,6 +207,29 @@ def upgrade_charm(): fatal=True) +@hooks.hook('nrpe-external-master-relation-joined', 'nrpe-external-master-relation-changed') +def update_nrpe_config(): + # Find out if nrpe set nagios_hostname + hostname = None + for rel in relations_of_type('nrpe-external-master'): + if 'nagios_hostname' in rel: + hostname = rel['nagios_hostname'] + host_context = rel['nagios_host_context'] + break + nrpe = NRPE(hostname=hostname) + apt_install('python-dbus') + + current_unit = "%s:%s" % (host_context, local_unit()) + + nrpe.add_check( + shortname='ceph-osd', + description='process check {%s}' % current_unit, + check_cmd = 'check_upstart_job ceph-osd', + ) + + nrpe.write() + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-osd/hooks/nrpe-external-master-relation-changed b/ceph-osd/hooks/nrpe-external-master-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-osd/hooks/nrpe-external-master-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/nrpe-external-master-relation-joined b/ceph-osd/hooks/nrpe-external-master-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-osd/hooks/nrpe-external-master-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file From 1ac7721fe834ffe5aa9343bafd478a6d40d49de2 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 6 Nov 2014 17:26:04 +1000 Subject: [PATCH 0520/2699] [bradm] Check if host_context is defined before using it --- ceph-proxy/hooks/hooks.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 441d5a74..4747a09f 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -339,6 +339,7 @@ def update_nrpe_config(): # Find out if nrpe set nagios_hostname hostname = None + host_context = None for rel in relations_of_type('nrpe-external-master'): if 'nagios_hostname' in rel: hostname = rel['nagios_hostname'] @@ -346,7 +347,10 @@ def update_nrpe_config(): break nrpe = NRPE(hostname=hostname) - current_unit = "%s:%s" % (host_context, local_unit()) + if host_context: + current_unit = "%s:%s" % (host_context, local_unit()) + else: + current_unit = local_unit() nrpe.add_check( shortname="ceph", From e77249a31161d8db52e682255a495c77a9b177dd Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 6 Nov 2014 17:26:04 +1000 Subject: [PATCH 0521/2699] [bradm] Check if host_context is defined before using it --- ceph-mon/hooks/hooks.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 441d5a74..4747a09f 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -339,6 +339,7 @@ def update_nrpe_config(): # Find out if nrpe set nagios_hostname hostname = None + host_context = None for rel in relations_of_type('nrpe-external-master'): if 'nagios_hostname' in rel: hostname = rel['nagios_hostname'] @@ -346,7 +347,10 @@ def update_nrpe_config(): break nrpe = NRPE(hostname=hostname) - current_unit = "%s:%s" % (host_context, local_unit()) + if host_context: + current_unit = "%s:%s" % (host_context, local_unit()) + else: + current_unit = local_unit() nrpe.add_check( shortname="ceph", From 349d684537df5b7ba1933977ec0ea23abe33cdb0 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 6 Nov 2014 17:27:21 +1000 Subject: [PATCH 0522/2699] [bradm] Check if host_context is defined before using it, add check_upstart_job --- .../nrpe-external-master/check_upstart_job | 72 +++++++++++++++++++ ceph-osd/hooks/hooks.py | 6 +- 2 files changed, 77 insertions(+), 1 deletion(-) create mode 100755 ceph-osd/files/nrpe-external-master/check_upstart_job diff --git a/ceph-osd/files/nrpe-external-master/check_upstart_job b/ceph-osd/files/nrpe-external-master/check_upstart_job new file mode 100755 index 00000000..94efb95e --- /dev/null +++ b/ceph-osd/files/nrpe-external-master/check_upstart_job @@ -0,0 +1,72 @@ +#!/usr/bin/python + +# +# Copyright 2012, 2013 Canonical Ltd. +# +# Author: Paul Collins +# +# Based on http://www.eurion.net/python-snippets/snippet/Upstart%20service%20status.html +# + +import sys + +import dbus + + +class Upstart(object): + def __init__(self): + self._bus = dbus.SystemBus() + self._upstart = self._bus.get_object('com.ubuntu.Upstart', + '/com/ubuntu/Upstart') + def get_job(self, job_name): + path = self._upstart.GetJobByName(job_name, + dbus_interface='com.ubuntu.Upstart0_6') + return self._bus.get_object('com.ubuntu.Upstart', path) + + def get_properties(self, job): + path = job.GetInstance([], dbus_interface='com.ubuntu.Upstart0_6.Job') + instance = self._bus.get_object('com.ubuntu.Upstart', path) + return instance.GetAll('com.ubuntu.Upstart0_6.Instance', + dbus_interface=dbus.PROPERTIES_IFACE) + + def get_job_instances(self, job_name): + job = self.get_job(job_name) + paths = job.GetAllInstances([], dbus_interface='com.ubuntu.Upstart0_6.Job') + return [self._bus.get_object('com.ubuntu.Upstart', path) for path in paths] + + def get_job_instance_properties(self, job): + return job.GetAll('com.ubuntu.Upstart0_6.Instance', + dbus_interface=dbus.PROPERTIES_IFACE) + +try: + upstart = Upstart() + try: + job = upstart.get_job(sys.argv[1]) + props = upstart.get_properties(job) + + if props['state'] == 'running': + print 'OK: %s is running' % sys.argv[1] + sys.exit(0) + else: + print 'CRITICAL: %s is not running' % sys.argv[1] + sys.exit(2) + + except dbus.DBusException as e: + instances = upstart.get_job_instances(sys.argv[1]) + propses = [upstart.get_job_instance_properties(instance) for instance in instances] + states = dict([(props['name'], props['state']) for props in propses]) + if len(states) != states.values().count('running'): + not_running = [] + for name in states.keys(): + if states[name] != 'running': + not_running.append(name) + print 'CRITICAL: %d instances of %s not running: %s' % \ + (len(not_running), sys.argv[1], not_running.join(', ')) + sys.exit(2) + else: + print 'OK: %d instances of %s running' % (len(states), sys.argv[1]) + +except dbus.DBusException as e: + print 'CRITICAL: failed to get properties of \'%s\' from upstart' % sys.argv[1] + sys.exit(2) + diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 5e67473c..b47514aa 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -211,6 +211,7 @@ def upgrade_charm(): def update_nrpe_config(): # Find out if nrpe set nagios_hostname hostname = None + host_context = None for rel in relations_of_type('nrpe-external-master'): if 'nagios_hostname' in rel: hostname = rel['nagios_hostname'] @@ -219,7 +220,10 @@ def update_nrpe_config(): nrpe = NRPE(hostname=hostname) apt_install('python-dbus') - current_unit = "%s:%s" % (host_context, local_unit()) + if host_context: + current_unit = "%s:%s" % (host_context, local_unit()) + else: + current_unit = local_unit() nrpe.add_check( shortname='ceph-osd', From 4b04c6d128c6cf55ec8ee9d0598a65391668d4eb Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 7 Nov 2014 14:16:26 +0100 Subject: [PATCH 0523/2699] Added Ceph broker support to allow clients to request resources e.g. pools as opposed to creating them themselves. This hopefully simplifies the logic at the client side and reduces the risk of race conditions by shifting execution to the ceph charm itself. Backwards-compatibility with clients that don't want to/yet support this approach is maintained. --- ceph-proxy/Makefile | 6 +- ceph-proxy/charm-helpers-hooks.yaml | 1 + ceph-proxy/hooks/ceph_broker.py | 47 +++ .../hooks/charmhelpers/contrib/network/ip.py | 2 - .../contrib/storage/linux/ceph.py | 388 ++++++++++++++++++ .../charmhelpers/core/services/__init__.py | 4 +- .../hooks/charmhelpers/fetch/__init__.py | 2 +- ceph-proxy/hooks/client-relation-changed | 1 + ceph-proxy/hooks/hooks.py | 39 +- ceph-proxy/unit_tests/test_ceph_broker.py | 11 + 10 files changed, 487 insertions(+), 14 deletions(-) create mode 100644 ceph-proxy/hooks/ceph_broker.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py create mode 120000 ceph-proxy/hooks/client-relation-changed create mode 100644 ceph-proxy/unit_tests/test_ceph_broker.py diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index 70e95fbb..e29ab2b7 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -2,9 +2,13 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers hooks tests + @flake8 --exclude hooks/charmhelpers hooks tests unit_tests @charm proof +unit_test: + @echo Starting unit tests... + @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests + test: @echo Starting Amulet tests... # coreycb note: The -v should only be temporary until Amulet sends diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index afb9e42b..c401e72e 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -5,6 +5,7 @@ include: - fetch - contrib.storage.linux: - utils + - ceph - payload.execd - contrib.openstack.alternatives - contrib.network.ip diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py new file mode 100644 index 00000000..cf1df89b --- /dev/null +++ b/ceph-proxy/hooks/ceph_broker.py @@ -0,0 +1,47 @@ +#!/usr/bin/python +# +# Copyright 2014 Canonical Ltd. +# +from charmhelpers.core.hookenv import ( + log, + INFO, + ERROR +) +from charmhelpers.contrib.storage.linux.ceph import ( + create_pool, + pool_exists, + ensure_ceph_keyring +) + + +def process_requests(reqs): + """Process a Ceph broker request from a ceph client.""" + log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) + for req in reqs: + op = req.get('op') + log("Processing op='%s'" % (op), level=INFO) + # Use admin client since we do not have other client key locations + # setup to use them for these operations. + svc = 'admin' + if op == "create_pool": + pool = req.get('pool') + replicas = req.get('replicas') + if not all([pool, replicas]): + log("Missing parameter(s)", level=ERROR) + return 1 + + if not pool_exists(service=svc, name=pool): + log("Creating pool '%s'" % (pool), level=INFO) + create_pool(service=svc, name=pool, replicas=replicas) + else: + log("Pool '%s' already exists" % (pool), level=INFO) + elif op == "create_keyring": + user = req.get('user') + group = req.get('group') + if not all([user, group]): + log("Missing parameter(s)", level=ERROR) + return 1 + + ensure_ceph_keyring(service=svc, user=user, group=group) + + return 0 diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index e62e5655..c4bfeadb 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -8,7 +8,6 @@ from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - WARNING, ERROR, log ) @@ -175,7 +174,6 @@ def format_ipv6_addr(address): if is_ipv6(address): address = "[%s]" % address else: - log("Not a valid ipv6 address: %s" % address, level=WARNING) address = None return address diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py new file mode 100644 index 00000000..598ec263 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -0,0 +1,388 @@ +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import os +import shutil +import json +import time + +from subprocess import ( + check_call, + check_output, + CalledProcessError +) + +from charmhelpers.core.hookenv import ( + relation_get, + relation_ids, + related_units, + log, + INFO, + WARNING, + ERROR +) + +from charmhelpers.core.host import ( + mount, + mounts, + service_start, + service_stop, + service_running, + umount, +) + +from charmhelpers.fetch import ( + apt_install, +) + +KEYRING = '/etc/ceph/ceph.client.{}.keyring' +KEYFILE = '/etc/ceph/ceph.client.{}.key' + +CEPH_CONF = """[global] + auth supported = {auth} + keyring = {keyring} + mon host = {mon_hosts} + log to syslog = {use_syslog} + err to syslog = {use_syslog} + clog to syslog = {use_syslog} +""" + + +def install(): + ''' Basic Ceph client installation ''' + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + ''' Check to see if a RADOS block device exists ''' + try: + out = check_output(['rbd', 'list', '--id', service, + '--pool', pool]) + except CalledProcessError: + return False + else: + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + ''' Create a new RADOS block device ''' + cmd = [ + 'rbd', + 'create', + image, + '--size', + str(sizemb), + '--id', + service, + '--pool', + pool + ] + check_call(cmd) + + +def pool_exists(service, name): + ''' Check to see if a RADOS pool already exists ''' + try: + out = check_output(['rados', '--id', service, 'lspools']) + except CalledProcessError: + return False + else: + return name in out + + +def get_osds(service): + ''' + Return a list of all Ceph Object Storage Daemons + currently in the cluster + ''' + version = ceph_version() + if version and version >= '0.56': + return json.loads(check_output(['ceph', '--id', service, + 'osd', 'ls', '--format=json'])) + else: + return None + + +def create_pool(service, name, replicas=3): + ''' Create a new RADOS pool ''' + if pool_exists(service, name): + log("Ceph pool {} already exists, skipping creation".format(name), + level=WARNING) + return + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pgnum = (len(osds) * 100 / replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pgnum = 200 + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'create', + name, str(pgnum) + ] + check_call(cmd) + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'set', name, + 'size', str(replicas) + ] + check_call(cmd) + + +def delete_pool(service, name): + ''' Delete a RADOS pool from ceph ''' + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'delete', + name, '--yes-i-really-really-mean-it' + ] + check_call(cmd) + + +def _keyfile_path(service): + return KEYFILE.format(service) + + +def _keyring_path(service): + return KEYRING.format(service) + + +def create_keyring(service, key): + ''' Create a new Ceph keyring containing key''' + keyring = _keyring_path(service) + if os.path.exists(keyring): + log('ceph: Keyring exists at %s.' % keyring, level=WARNING) + return + cmd = [ + 'ceph-authtool', + keyring, + '--create-keyring', + '--name=client.{}'.format(service), + '--add-key={}'.format(key) + ] + check_call(cmd) + log('ceph: Created new ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + ''' Create a file containing key ''' + keyfile = _keyfile_path(service) + if os.path.exists(keyfile): + log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) + return + with open(keyfile, 'w') as fd: + fd.write(key) + log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(): + ''' Query named relation 'ceph' to detemine current nodes ''' + hosts = [] + for r_id in relation_ids('ceph'): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + return hosts + + +def configure(service, key, auth, use_syslog): + ''' Perform basic configuration of Ceph ''' + create_keyring(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF.format(auth=auth, + keyring=_keyring_path(service), + mon_hosts=",".join(map(str, hosts)), + use_syslog=use_syslog)) + modprobe('rbd') + + +def image_mapped(name): + ''' Determine whether a RADOS block device is mapped locally ''' + try: + out = check_output(['rbd', 'showmapped']) + except CalledProcessError: + return False + else: + return name in out + + +def map_block_storage(service, pool, image): + ''' Map a RADOS block device for local use ''' + cmd = [ + 'rbd', + 'map', + '{}/{}'.format(pool, image), + '--user', + service, + '--secret', + _keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + ''' Determine whether a filesytems is already mounted ''' + return fs in [f for f, m in mounts()] + + +def make_filesystem(blk_device, fstype='ext4', timeout=10): + ''' Make a new filesystem on the specified block device ''' + count = 0 + e_noent = os.errno.ENOENT + while not os.path.exists(blk_device): + if count >= timeout: + log('ceph: gave up waiting on block device %s' % blk_device, + level=ERROR) + raise IOError(e_noent, os.strerror(e_noent), blk_device) + log('ceph: waiting for block device %s to appear' % blk_device, + level=INFO) + count += 1 + time.sleep(1) + else: + log('ceph: Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + check_call(['mkfs', '-t', fstype, blk_device]) + + +def place_data_on_block_device(blk_device, data_src_dst): + ''' Migrate data in data_src_dst to blk_device and then remount ''' + # mount block device into /mnt + mount(blk_device, '/mnt') + # copy data to /mnt + copy_files(data_src_dst, '/mnt') + # umount block device + umount('/mnt') + # Grab user/group ID's from original source + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + # re-mount where the data should originally be + # TODO: persist is currently a NO-OP in core.host + mount(blk_device, data_src_dst, persist=True) + # ensure original ownership of new mount. + os.chown(data_src_dst, uid, gid) + + +# TODO: re-use +def modprobe(module): + ''' Load a kernel module and configure for auto-load on reboot ''' + log('ceph: Loading kernel module', level=INFO) + cmd = ['modprobe', module] + check_call(cmd) + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def copy_files(src, dst, symlinks=False, ignore=None): + ''' Copy files from src to dst ''' + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, + blk_device, fstype, system_services=[], + replicas=3): + """ + NOTE: This function must only be called from a single service unit for + the same rbd_img otherwise data loss will occur. + + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being re-mounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('ceph: Creating new pool {}.'.format(pool)) + create_pool(service, pool, replicas=replicas) + + if not rbd_exists(service, pool, rbd_img): + log('ceph: Creating RBD image ({}).'.format(rbd_img)) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if service_running(svc): + log('ceph: Stopping services {} prior to migrating data.' + .format(svc)) + service_stop(svc) + + place_data_on_block_device(blk_device, mount_point) + + for svc in system_services: + log('ceph: Starting service {} after migrating data.' + .format(svc)) + service_start(svc) + + +def ensure_ceph_keyring(service, user=None, group=None): + ''' + Ensures a ceph keyring is created for a named service + and optionally ensures user and group ownership. + + Returns False if no ceph key is available in relation state. + ''' + key = None + for rid in relation_ids('ceph'): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break + if not key: + return False + create_keyring(service=service, key=key) + keyring = _keyring_path(service) + if user and group: + check_call(['chown', '%s.%s' % (user, group), keyring]) + return True + + +def ceph_version(): + ''' Retrieve the local version of ceph ''' + if os.path.exists('/usr/bin/ceph'): + cmd = ['ceph', '-v'] + output = check_output(cmd) + output = output.split() + if len(output) > 3: + return output[2] + else: + return None + else: + return None diff --git a/ceph-proxy/hooks/charmhelpers/core/services/__init__.py b/ceph-proxy/hooks/charmhelpers/core/services/__init__.py index e8039a84..69dde79a 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,2 @@ -from .base import * -from .helpers import * +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 6724d293..2398e8ed 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -256,7 +256,7 @@ def add_source(source, key=None): elif source == 'distro': pass else: - raise SourceConfigError("Unknown source: {!r}".format(source)) + log("Unknown source: {!r}".format(source)) if key: if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: diff --git a/ceph-proxy/hooks/client-relation-changed b/ceph-proxy/hooks/client-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-proxy/hooks/client-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 8a6c26c8..301a87ad 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -9,6 +9,7 @@ # import glob +import json import os import shutil import sys @@ -50,6 +51,9 @@ get_public_addr, assert_charm_supports_ipv6 ) +from ceph_broker import ( + process_requests +) hooks = Hooks() @@ -215,7 +219,7 @@ def notify_radosgws(): def notify_client(): for relid in relation_ids('client'): - client_relation(relid) + client_relation_joined(relid) def upgrade_keys(): @@ -266,28 +270,47 @@ def radosgw_relation(relid=None): @hooks.hook('client-relation-joined') -def client_relation(relid=None): +def client_relation_joined(relid=None): if ceph.is_quorum(): log('mon cluster in quorum - providing client with keys') service_name = None if relid is None: - service_name = remote_unit().split('/')[0] + units = [remote_unit()] + service_name = units[0].split('/')[0] else: units = related_units(relid) if len(units) > 0: service_name = units[0].split('/')[0] + if service_name is not None: - data = { - 'key': ceph.get_named_key(service_name), - 'auth': config('auth-supported'), - 'ceph-public-address': get_public_addr(), - } + data = {'key': ceph.get_named_key(service_name), + 'auth': config('auth-supported'), + 'ceph-public-address': get_public_addr()} relation_set(relation_id=relid, relation_settings=data) + + client_relation_changed(relid=relid) else: log('mon cluster not in quorum - deferring key provision') +@hooks.hook('client-relation-changed') +def client_relation_changed(relid=None): + if ceph.is_quorum(): + resp = None + settings = relation_get(rid=relid) + if 'broker_req' in settings: + req = settings['broker_req'] + log("Broker request received") + resp = process_requests(json.loads(req)) + + if resp is not None: + relation_set(relation_id=relid, + relation_settings={'broker_rsp': resp}) + else: + log('mon cluster not in quorum') + + @hooks.hook('upgrade-charm') def upgrade_charm(): emit_cephconf() diff --git a/ceph-proxy/unit_tests/test_ceph_broker.py b/ceph-proxy/unit_tests/test_ceph_broker.py new file mode 100644 index 00000000..faca4587 --- /dev/null +++ b/ceph-proxy/unit_tests/test_ceph_broker.py @@ -0,0 +1,11 @@ +#import mock +import unittest + + +class CephBrokerTestCase(unittest.TestCase): + + def setUp(self): + super(CephBrokerTestCase, self).setUp() + + def test_process_requests(self): + pass From 05d19ec993fc18bc48ccbc1a4fffc5bc168a80f6 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 7 Nov 2014 14:16:26 +0100 Subject: [PATCH 0524/2699] Added Ceph broker support to allow clients to request resources e.g. pools as opposed to creating them themselves. This hopefully simplifies the logic at the client side and reduces the risk of race conditions by shifting execution to the ceph charm itself. Backwards-compatibility with clients that don't want to/yet support this approach is maintained. --- ceph-mon/Makefile | 6 +- ceph-mon/charm-helpers-hooks.yaml | 1 + ceph-mon/hooks/ceph_broker.py | 47 +++ .../hooks/charmhelpers/contrib/network/ip.py | 2 - .../contrib/storage/linux/ceph.py | 388 ++++++++++++++++++ .../charmhelpers/core/services/__init__.py | 4 +- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 2 +- ceph-mon/hooks/client-relation-changed | 1 + ceph-mon/hooks/hooks.py | 39 +- ceph-mon/unit_tests/test_ceph_broker.py | 11 + 10 files changed, 487 insertions(+), 14 deletions(-) create mode 100644 ceph-mon/hooks/ceph_broker.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py create mode 120000 ceph-mon/hooks/client-relation-changed create mode 100644 ceph-mon/unit_tests/test_ceph_broker.py diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index 70e95fbb..e29ab2b7 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -2,9 +2,13 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers hooks tests + @flake8 --exclude hooks/charmhelpers hooks tests unit_tests @charm proof +unit_test: + @echo Starting unit tests... + @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests + test: @echo Starting Amulet tests... # coreycb note: The -v should only be temporary until Amulet sends diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index afb9e42b..c401e72e 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -5,6 +5,7 @@ include: - fetch - contrib.storage.linux: - utils + - ceph - payload.execd - contrib.openstack.alternatives - contrib.network.ip diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py new file mode 100644 index 00000000..cf1df89b --- /dev/null +++ b/ceph-mon/hooks/ceph_broker.py @@ -0,0 +1,47 @@ +#!/usr/bin/python +# +# Copyright 2014 Canonical Ltd. +# +from charmhelpers.core.hookenv import ( + log, + INFO, + ERROR +) +from charmhelpers.contrib.storage.linux.ceph import ( + create_pool, + pool_exists, + ensure_ceph_keyring +) + + +def process_requests(reqs): + """Process a Ceph broker request from a ceph client.""" + log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) + for req in reqs: + op = req.get('op') + log("Processing op='%s'" % (op), level=INFO) + # Use admin client since we do not have other client key locations + # setup to use them for these operations. + svc = 'admin' + if op == "create_pool": + pool = req.get('pool') + replicas = req.get('replicas') + if not all([pool, replicas]): + log("Missing parameter(s)", level=ERROR) + return 1 + + if not pool_exists(service=svc, name=pool): + log("Creating pool '%s'" % (pool), level=INFO) + create_pool(service=svc, name=pool, replicas=replicas) + else: + log("Pool '%s' already exists" % (pool), level=INFO) + elif op == "create_keyring": + user = req.get('user') + group = req.get('group') + if not all([user, group]): + log("Missing parameter(s)", level=ERROR) + return 1 + + ensure_ceph_keyring(service=svc, user=user, group=group) + + return 0 diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index e62e5655..c4bfeadb 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -8,7 +8,6 @@ from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - WARNING, ERROR, log ) @@ -175,7 +174,6 @@ def format_ipv6_addr(address): if is_ipv6(address): address = "[%s]" % address else: - log("Not a valid ipv6 address: %s" % address, level=WARNING) address = None return address diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py new file mode 100644 index 00000000..598ec263 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -0,0 +1,388 @@ +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import os +import shutil +import json +import time + +from subprocess import ( + check_call, + check_output, + CalledProcessError +) + +from charmhelpers.core.hookenv import ( + relation_get, + relation_ids, + related_units, + log, + INFO, + WARNING, + ERROR +) + +from charmhelpers.core.host import ( + mount, + mounts, + service_start, + service_stop, + service_running, + umount, +) + +from charmhelpers.fetch import ( + apt_install, +) + +KEYRING = '/etc/ceph/ceph.client.{}.keyring' +KEYFILE = '/etc/ceph/ceph.client.{}.key' + +CEPH_CONF = """[global] + auth supported = {auth} + keyring = {keyring} + mon host = {mon_hosts} + log to syslog = {use_syslog} + err to syslog = {use_syslog} + clog to syslog = {use_syslog} +""" + + +def install(): + ''' Basic Ceph client installation ''' + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + ''' Check to see if a RADOS block device exists ''' + try: + out = check_output(['rbd', 'list', '--id', service, + '--pool', pool]) + except CalledProcessError: + return False + else: + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + ''' Create a new RADOS block device ''' + cmd = [ + 'rbd', + 'create', + image, + '--size', + str(sizemb), + '--id', + service, + '--pool', + pool + ] + check_call(cmd) + + +def pool_exists(service, name): + ''' Check to see if a RADOS pool already exists ''' + try: + out = check_output(['rados', '--id', service, 'lspools']) + except CalledProcessError: + return False + else: + return name in out + + +def get_osds(service): + ''' + Return a list of all Ceph Object Storage Daemons + currently in the cluster + ''' + version = ceph_version() + if version and version >= '0.56': + return json.loads(check_output(['ceph', '--id', service, + 'osd', 'ls', '--format=json'])) + else: + return None + + +def create_pool(service, name, replicas=3): + ''' Create a new RADOS pool ''' + if pool_exists(service, name): + log("Ceph pool {} already exists, skipping creation".format(name), + level=WARNING) + return + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pgnum = (len(osds) * 100 / replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pgnum = 200 + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'create', + name, str(pgnum) + ] + check_call(cmd) + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'set', name, + 'size', str(replicas) + ] + check_call(cmd) + + +def delete_pool(service, name): + ''' Delete a RADOS pool from ceph ''' + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'delete', + name, '--yes-i-really-really-mean-it' + ] + check_call(cmd) + + +def _keyfile_path(service): + return KEYFILE.format(service) + + +def _keyring_path(service): + return KEYRING.format(service) + + +def create_keyring(service, key): + ''' Create a new Ceph keyring containing key''' + keyring = _keyring_path(service) + if os.path.exists(keyring): + log('ceph: Keyring exists at %s.' % keyring, level=WARNING) + return + cmd = [ + 'ceph-authtool', + keyring, + '--create-keyring', + '--name=client.{}'.format(service), + '--add-key={}'.format(key) + ] + check_call(cmd) + log('ceph: Created new ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + ''' Create a file containing key ''' + keyfile = _keyfile_path(service) + if os.path.exists(keyfile): + log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) + return + with open(keyfile, 'w') as fd: + fd.write(key) + log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(): + ''' Query named relation 'ceph' to detemine current nodes ''' + hosts = [] + for r_id in relation_ids('ceph'): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + return hosts + + +def configure(service, key, auth, use_syslog): + ''' Perform basic configuration of Ceph ''' + create_keyring(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF.format(auth=auth, + keyring=_keyring_path(service), + mon_hosts=",".join(map(str, hosts)), + use_syslog=use_syslog)) + modprobe('rbd') + + +def image_mapped(name): + ''' Determine whether a RADOS block device is mapped locally ''' + try: + out = check_output(['rbd', 'showmapped']) + except CalledProcessError: + return False + else: + return name in out + + +def map_block_storage(service, pool, image): + ''' Map a RADOS block device for local use ''' + cmd = [ + 'rbd', + 'map', + '{}/{}'.format(pool, image), + '--user', + service, + '--secret', + _keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + ''' Determine whether a filesytems is already mounted ''' + return fs in [f for f, m in mounts()] + + +def make_filesystem(blk_device, fstype='ext4', timeout=10): + ''' Make a new filesystem on the specified block device ''' + count = 0 + e_noent = os.errno.ENOENT + while not os.path.exists(blk_device): + if count >= timeout: + log('ceph: gave up waiting on block device %s' % blk_device, + level=ERROR) + raise IOError(e_noent, os.strerror(e_noent), blk_device) + log('ceph: waiting for block device %s to appear' % blk_device, + level=INFO) + count += 1 + time.sleep(1) + else: + log('ceph: Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + check_call(['mkfs', '-t', fstype, blk_device]) + + +def place_data_on_block_device(blk_device, data_src_dst): + ''' Migrate data in data_src_dst to blk_device and then remount ''' + # mount block device into /mnt + mount(blk_device, '/mnt') + # copy data to /mnt + copy_files(data_src_dst, '/mnt') + # umount block device + umount('/mnt') + # Grab user/group ID's from original source + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + # re-mount where the data should originally be + # TODO: persist is currently a NO-OP in core.host + mount(blk_device, data_src_dst, persist=True) + # ensure original ownership of new mount. + os.chown(data_src_dst, uid, gid) + + +# TODO: re-use +def modprobe(module): + ''' Load a kernel module and configure for auto-load on reboot ''' + log('ceph: Loading kernel module', level=INFO) + cmd = ['modprobe', module] + check_call(cmd) + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def copy_files(src, dst, symlinks=False, ignore=None): + ''' Copy files from src to dst ''' + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, + blk_device, fstype, system_services=[], + replicas=3): + """ + NOTE: This function must only be called from a single service unit for + the same rbd_img otherwise data loss will occur. + + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being re-mounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('ceph: Creating new pool {}.'.format(pool)) + create_pool(service, pool, replicas=replicas) + + if not rbd_exists(service, pool, rbd_img): + log('ceph: Creating RBD image ({}).'.format(rbd_img)) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if service_running(svc): + log('ceph: Stopping services {} prior to migrating data.' + .format(svc)) + service_stop(svc) + + place_data_on_block_device(blk_device, mount_point) + + for svc in system_services: + log('ceph: Starting service {} after migrating data.' + .format(svc)) + service_start(svc) + + +def ensure_ceph_keyring(service, user=None, group=None): + ''' + Ensures a ceph keyring is created for a named service + and optionally ensures user and group ownership. + + Returns False if no ceph key is available in relation state. + ''' + key = None + for rid in relation_ids('ceph'): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break + if not key: + return False + create_keyring(service=service, key=key) + keyring = _keyring_path(service) + if user and group: + check_call(['chown', '%s.%s' % (user, group), keyring]) + return True + + +def ceph_version(): + ''' Retrieve the local version of ceph ''' + if os.path.exists('/usr/bin/ceph'): + cmd = ['ceph', '-v'] + output = check_output(cmd) + output = output.split() + if len(output) > 3: + return output[2] + else: + return None + else: + return None diff --git a/ceph-mon/hooks/charmhelpers/core/services/__init__.py b/ceph-mon/hooks/charmhelpers/core/services/__init__.py index e8039a84..69dde79a 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/__init__.py +++ b/ceph-mon/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,2 @@ -from .base import * -from .helpers import * +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 6724d293..2398e8ed 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -256,7 +256,7 @@ def add_source(source, key=None): elif source == 'distro': pass else: - raise SourceConfigError("Unknown source: {!r}".format(source)) + log("Unknown source: {!r}".format(source)) if key: if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: diff --git a/ceph-mon/hooks/client-relation-changed b/ceph-mon/hooks/client-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-mon/hooks/client-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 8a6c26c8..301a87ad 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -9,6 +9,7 @@ # import glob +import json import os import shutil import sys @@ -50,6 +51,9 @@ get_public_addr, assert_charm_supports_ipv6 ) +from ceph_broker import ( + process_requests +) hooks = Hooks() @@ -215,7 +219,7 @@ def notify_radosgws(): def notify_client(): for relid in relation_ids('client'): - client_relation(relid) + client_relation_joined(relid) def upgrade_keys(): @@ -266,28 +270,47 @@ def radosgw_relation(relid=None): @hooks.hook('client-relation-joined') -def client_relation(relid=None): +def client_relation_joined(relid=None): if ceph.is_quorum(): log('mon cluster in quorum - providing client with keys') service_name = None if relid is None: - service_name = remote_unit().split('/')[0] + units = [remote_unit()] + service_name = units[0].split('/')[0] else: units = related_units(relid) if len(units) > 0: service_name = units[0].split('/')[0] + if service_name is not None: - data = { - 'key': ceph.get_named_key(service_name), - 'auth': config('auth-supported'), - 'ceph-public-address': get_public_addr(), - } + data = {'key': ceph.get_named_key(service_name), + 'auth': config('auth-supported'), + 'ceph-public-address': get_public_addr()} relation_set(relation_id=relid, relation_settings=data) + + client_relation_changed(relid=relid) else: log('mon cluster not in quorum - deferring key provision') +@hooks.hook('client-relation-changed') +def client_relation_changed(relid=None): + if ceph.is_quorum(): + resp = None + settings = relation_get(rid=relid) + if 'broker_req' in settings: + req = settings['broker_req'] + log("Broker request received") + resp = process_requests(json.loads(req)) + + if resp is not None: + relation_set(relation_id=relid, + relation_settings={'broker_rsp': resp}) + else: + log('mon cluster not in quorum') + + @hooks.hook('upgrade-charm') def upgrade_charm(): emit_cephconf() diff --git a/ceph-mon/unit_tests/test_ceph_broker.py b/ceph-mon/unit_tests/test_ceph_broker.py new file mode 100644 index 00000000..faca4587 --- /dev/null +++ b/ceph-mon/unit_tests/test_ceph_broker.py @@ -0,0 +1,11 @@ +#import mock +import unittest + + +class CephBrokerTestCase(unittest.TestCase): + + def setUp(self): + super(CephBrokerTestCase, self).setUp() + + def test_process_requests(self): + pass From 95fac205ce1c71be0f169cd1e08c941f47c1d246 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 7 Nov 2014 15:26:58 +0100 Subject: [PATCH 0525/2699] client still has to do keyring create --- ceph-proxy/hooks/ceph_broker.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index cf1df89b..280f05f9 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -9,8 +9,7 @@ ) from charmhelpers.contrib.storage.linux.ceph import ( create_pool, - pool_exists, - ensure_ceph_keyring + pool_exists ) @@ -35,13 +34,5 @@ def process_requests(reqs): create_pool(service=svc, name=pool, replicas=replicas) else: log("Pool '%s' already exists" % (pool), level=INFO) - elif op == "create_keyring": - user = req.get('user') - group = req.get('group') - if not all([user, group]): - log("Missing parameter(s)", level=ERROR) - return 1 - - ensure_ceph_keyring(service=svc, user=user, group=group) return 0 From d50a46fec8ff65c7c61ab546495176b6ae4e7019 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 7 Nov 2014 15:26:58 +0100 Subject: [PATCH 0526/2699] client still has to do keyring create --- ceph-mon/hooks/ceph_broker.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index cf1df89b..280f05f9 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -9,8 +9,7 @@ ) from charmhelpers.contrib.storage.linux.ceph import ( create_pool, - pool_exists, - ensure_ceph_keyring + pool_exists ) @@ -35,13 +34,5 @@ def process_requests(reqs): create_pool(service=svc, name=pool, replicas=replicas) else: log("Pool '%s' already exists" % (pool), level=INFO) - elif op == "create_keyring": - user = req.get('user') - group = req.get('group') - if not all([user, group]): - log("Missing parameter(s)", level=ERROR) - return 1 - - ensure_ceph_keyring(service=svc, user=user, group=group) return 0 From fc46a818581db5809cf4ebaae8d4085c941aae72 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sat, 8 Nov 2014 20:15:17 +0000 Subject: [PATCH 0527/2699] added unit tests --- ceph-proxy/hooks/__init__.py | 0 ceph-proxy/hooks/ceph_broker.py | 11 ++++-- ceph-proxy/hooks/hooks.py | 17 +++++----- ceph-proxy/unit_tests/__init__.py | 2 ++ ceph-proxy/unit_tests/test_ceph_broker.py | 41 +++++++++++++++++++++-- 5 files changed, 57 insertions(+), 14 deletions(-) create mode 100644 ceph-proxy/hooks/__init__.py create mode 100644 ceph-proxy/unit_tests/__init__.py diff --git a/ceph-proxy/hooks/__init__.py b/ceph-proxy/hooks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index 280f05f9..ccbc2adb 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -23,16 +23,21 @@ def process_requests(reqs): # setup to use them for these operations. svc = 'admin' if op == "create_pool": - pool = req.get('pool') + pool = req.get('name') replicas = req.get('replicas') if not all([pool, replicas]): log("Missing parameter(s)", level=ERROR) return 1 if not pool_exists(service=svc, name=pool): - log("Creating pool '%s'" % (pool), level=INFO) + log("Creating pool '%s' (replicas=%s)" % (pool, replicas), + level=INFO) create_pool(service=svc, name=pool, replicas=replicas) else: - log("Pool '%s' already exists" % (pool), level=INFO) + log("Pool '%s' already exists - skipping create" % (pool), + level=INFO) + else: + log("Unknown operation '%s'" % (op)) + return 1 return 0 diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 301a87ad..11aa5ed7 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -296,17 +296,18 @@ def client_relation_joined(relid=None): @hooks.hook('client-relation-changed') def client_relation_changed(relid=None): - if ceph.is_quorum(): - resp = None + """Process broker requests from ceph client relations.""" + if ceph.is_quorum() and ceph.is_leader(): settings = relation_get(rid=relid) if 'broker_req' in settings: req = settings['broker_req'] - log("Broker request received") - resp = process_requests(json.loads(req)) - - if resp is not None: - relation_set(relation_id=relid, - relation_settings={'broker_rsp': resp}) + log("Broker request received from ceph client") + exit_code = process_requests(json.loads(req)) + # Construct JSON response dict allowing other data to be added as + # and when we need it. + resp = json.dumps({'exit_code': exit_code}) + relation_set(relation_id=relid, + relation_settings={'broker_rsp': resp}) else: log('mon cluster not in quorum') diff --git a/ceph-proxy/unit_tests/__init__.py b/ceph-proxy/unit_tests/__init__.py new file mode 100644 index 00000000..f80aab3d --- /dev/null +++ b/ceph-proxy/unit_tests/__init__.py @@ -0,0 +1,2 @@ +import sys +sys.path.append('hooks') diff --git a/ceph-proxy/unit_tests/test_ceph_broker.py b/ceph-proxy/unit_tests/test_ceph_broker.py index faca4587..0d4a337d 100644 --- a/ceph-proxy/unit_tests/test_ceph_broker.py +++ b/ceph-proxy/unit_tests/test_ceph_broker.py @@ -1,11 +1,46 @@ -#import mock +import mock import unittest +import ceph_broker + class CephBrokerTestCase(unittest.TestCase): def setUp(self): super(CephBrokerTestCase, self).setUp() - def test_process_requests(self): - pass + @mock.patch('ceph_broker.log') + def test_process_requests_noop(self, mock_log): + rc = ceph_broker.process_requests([{}]) + self.assertEqual(rc, 1) + + @mock.patch('ceph_broker.log') + def test_process_requests_invalid(self, mock_log): + rc = ceph_broker.process_requests([{'op': 'invalid_op'}]) + self.assertEqual(rc, 1) + + @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.pool_exists') + @mock.patch('ceph_broker.log') + def test_process_requests_create_pool(self, mock_log, mock_pool_exists, + mock_create_pool): + mock_pool_exists.return_value = False + rc = ceph_broker.process_requests([{'op': 'create_pool', 'name': 'foo', + 'replicas': 3}]) + mock_pool_exists.assert_called_with(service='admin', name='foo') + mock_create_pool.assert_called_with(service='admin', name='foo', + replicas=3) + self.assertEqual(rc, 0) + + @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.pool_exists') + @mock.patch('ceph_broker.log') + def test_process_requests_create_pool_exists(self, mock_log, + mock_pool_exists, + mock_create_pool): + mock_pool_exists.return_value = True + rc = ceph_broker.process_requests([{'op': 'create_pool', 'name': 'foo', + 'replicas': 3}]) + mock_pool_exists.assert_called_with(service='admin', name='foo') + self.assertFalse(mock_create_pool.called) + self.assertEqual(rc, 0) From ea939c814d0c136ad906d765ef3748b7555cf47c Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sat, 8 Nov 2014 20:15:17 +0000 Subject: [PATCH 0528/2699] added unit tests --- ceph-mon/hooks/__init__.py | 0 ceph-mon/hooks/ceph_broker.py | 11 +++++-- ceph-mon/hooks/hooks.py | 17 +++++----- ceph-mon/unit_tests/__init__.py | 2 ++ ceph-mon/unit_tests/test_ceph_broker.py | 41 +++++++++++++++++++++++-- 5 files changed, 57 insertions(+), 14 deletions(-) create mode 100644 ceph-mon/hooks/__init__.py create mode 100644 ceph-mon/unit_tests/__init__.py diff --git a/ceph-mon/hooks/__init__.py b/ceph-mon/hooks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index 280f05f9..ccbc2adb 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -23,16 +23,21 @@ def process_requests(reqs): # setup to use them for these operations. svc = 'admin' if op == "create_pool": - pool = req.get('pool') + pool = req.get('name') replicas = req.get('replicas') if not all([pool, replicas]): log("Missing parameter(s)", level=ERROR) return 1 if not pool_exists(service=svc, name=pool): - log("Creating pool '%s'" % (pool), level=INFO) + log("Creating pool '%s' (replicas=%s)" % (pool, replicas), + level=INFO) create_pool(service=svc, name=pool, replicas=replicas) else: - log("Pool '%s' already exists" % (pool), level=INFO) + log("Pool '%s' already exists - skipping create" % (pool), + level=INFO) + else: + log("Unknown operation '%s'" % (op)) + return 1 return 0 diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 301a87ad..11aa5ed7 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -296,17 +296,18 @@ def client_relation_joined(relid=None): @hooks.hook('client-relation-changed') def client_relation_changed(relid=None): - if ceph.is_quorum(): - resp = None + """Process broker requests from ceph client relations.""" + if ceph.is_quorum() and ceph.is_leader(): settings = relation_get(rid=relid) if 'broker_req' in settings: req = settings['broker_req'] - log("Broker request received") - resp = process_requests(json.loads(req)) - - if resp is not None: - relation_set(relation_id=relid, - relation_settings={'broker_rsp': resp}) + log("Broker request received from ceph client") + exit_code = process_requests(json.loads(req)) + # Construct JSON response dict allowing other data to be added as + # and when we need it. + resp = json.dumps({'exit_code': exit_code}) + relation_set(relation_id=relid, + relation_settings={'broker_rsp': resp}) else: log('mon cluster not in quorum') diff --git a/ceph-mon/unit_tests/__init__.py b/ceph-mon/unit_tests/__init__.py new file mode 100644 index 00000000..f80aab3d --- /dev/null +++ b/ceph-mon/unit_tests/__init__.py @@ -0,0 +1,2 @@ +import sys +sys.path.append('hooks') diff --git a/ceph-mon/unit_tests/test_ceph_broker.py b/ceph-mon/unit_tests/test_ceph_broker.py index faca4587..0d4a337d 100644 --- a/ceph-mon/unit_tests/test_ceph_broker.py +++ b/ceph-mon/unit_tests/test_ceph_broker.py @@ -1,11 +1,46 @@ -#import mock +import mock import unittest +import ceph_broker + class CephBrokerTestCase(unittest.TestCase): def setUp(self): super(CephBrokerTestCase, self).setUp() - def test_process_requests(self): - pass + @mock.patch('ceph_broker.log') + def test_process_requests_noop(self, mock_log): + rc = ceph_broker.process_requests([{}]) + self.assertEqual(rc, 1) + + @mock.patch('ceph_broker.log') + def test_process_requests_invalid(self, mock_log): + rc = ceph_broker.process_requests([{'op': 'invalid_op'}]) + self.assertEqual(rc, 1) + + @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.pool_exists') + @mock.patch('ceph_broker.log') + def test_process_requests_create_pool(self, mock_log, mock_pool_exists, + mock_create_pool): + mock_pool_exists.return_value = False + rc = ceph_broker.process_requests([{'op': 'create_pool', 'name': 'foo', + 'replicas': 3}]) + mock_pool_exists.assert_called_with(service='admin', name='foo') + mock_create_pool.assert_called_with(service='admin', name='foo', + replicas=3) + self.assertEqual(rc, 0) + + @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.pool_exists') + @mock.patch('ceph_broker.log') + def test_process_requests_create_pool_exists(self, mock_log, + mock_pool_exists, + mock_create_pool): + mock_pool_exists.return_value = True + rc = ceph_broker.process_requests([{'op': 'create_pool', 'name': 'foo', + 'replicas': 3}]) + mock_pool_exists.assert_called_with(service='admin', name='foo') + self.assertFalse(mock_create_pool.called) + self.assertEqual(rc, 0) From 46da614bff97663528f185882233c57f4efbf515 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sat, 8 Nov 2014 21:13:40 +0000 Subject: [PATCH 0529/2699] more --- ceph-proxy/hooks/ceph_broker.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index ccbc2adb..dd91b05d 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -23,12 +23,17 @@ def process_requests(reqs): # setup to use them for these operations. svc = 'admin' if op == "create_pool": - pool = req.get('name') - replicas = req.get('replicas') - if not all([pool, replicas]): - log("Missing parameter(s)", level=ERROR) + params = {'pool': req.get('name'), + 'replicas': req.get('replicas')} + if not all(params.iteritems()): + log("Missing parameter(s): %s" % + (' '.join([k for k in params.iterkeys() + if not params[k]])), + level=ERROR) return 1 + pool = params['pool'] + replicas = params['replicas'] if not pool_exists(service=svc, name=pool): log("Creating pool '%s' (replicas=%s)" % (pool, replicas), level=INFO) From e1deaef60472be6ce3376eab693865edfbd56daf Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sat, 8 Nov 2014 21:13:40 +0000 Subject: [PATCH 0530/2699] more --- ceph-mon/hooks/ceph_broker.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index ccbc2adb..dd91b05d 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -23,12 +23,17 @@ def process_requests(reqs): # setup to use them for these operations. svc = 'admin' if op == "create_pool": - pool = req.get('name') - replicas = req.get('replicas') - if not all([pool, replicas]): - log("Missing parameter(s)", level=ERROR) + params = {'pool': req.get('name'), + 'replicas': req.get('replicas')} + if not all(params.iteritems()): + log("Missing parameter(s): %s" % + (' '.join([k for k in params.iterkeys() + if not params[k]])), + level=ERROR) return 1 + pool = params['pool'] + replicas = params['replicas'] if not pool_exists(service=svc, name=pool): log("Creating pool '%s' (replicas=%s)" % (pool, replicas), level=INFO) From c033f24b8b0311d77c4ba1c6290f257714dda360 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sun, 9 Nov 2014 11:54:40 +0000 Subject: [PATCH 0531/2699] Moved more broker code into ceph_broker Added support for versioning api Added unit tests --- ceph-proxy/hooks/ceph_broker.py | 50 +++++++++++++++++++---- ceph-proxy/hooks/hooks.py | 25 ++++++------ ceph-proxy/unit_tests/test_ceph_broker.py | 46 ++++++++++++++++----- 3 files changed, 89 insertions(+), 32 deletions(-) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index dd91b05d..8ab77c5f 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -2,6 +2,8 @@ # # Copyright 2014 Canonical Ltd. # +import json + from charmhelpers.core.hookenv import ( log, INFO, @@ -13,8 +15,37 @@ ) +def decode(f): + def decode_inner(req): + return json.dumps(f(json.loads(req))) + + return decode_inner + + +@decode def process_requests(reqs): - """Process a Ceph broker request from a ceph client.""" + """Process a Ceph broker request from a ceph client. + + This is a versioned api. We choose the api version based on provided + version from client. + """ + version = reqs.get('version') + if version == 1: + return process_requests_v1(reqs['ops']) + + msg = ("Missing or invalid api version (%s)" % (version)) + return {'exit_code': 1, 'stderr': msg} + + +def process_requests_v1(reqs): + """Process a v1 requests from a ceph client. + + Takes a list of requests (dicts) and processes each one until it hits an + error. + + Upon completion of all ops or if an error is found, a response dict is + returned containing exit code and any extra info. + """ log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) for req in reqs: op = req.get('op') @@ -26,11 +57,11 @@ def process_requests(reqs): params = {'pool': req.get('name'), 'replicas': req.get('replicas')} if not all(params.iteritems()): - log("Missing parameter(s): %s" % - (' '.join([k for k in params.iterkeys() - if not params[k]])), - level=ERROR) - return 1 + msg = ("Missing parameter(s): %s" % + (' '.join([k for k in params.iterkeys() + if not params[k]]))) + log(msg, level=ERROR) + return {'exit_code': 1, 'stderr': msg} pool = params['pool'] replicas = params['replicas'] @@ -42,7 +73,8 @@ def process_requests(reqs): log("Pool '%s' already exists - skipping create" % (pool), level=INFO) else: - log("Unknown operation '%s'" % (op)) - return 1 + msg = "Unknown operation '%s'" % (op) + log(msg, level=ERROR) + return {'exit_code': 1, 'stderr': msg} - return 0 + return {'exit_code': 0} diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 11aa5ed7..ed3da210 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -9,14 +9,15 @@ # import glob -import json import os import shutil import sys import ceph from charmhelpers.core.hookenv import ( - log, ERROR, + log, + INFO, + ERROR, config, relation_ids, related_units, @@ -26,7 +27,6 @@ Hooks, UnregisteredHookError, service_name ) - from charmhelpers.core.host import ( service_restart, umount, @@ -45,7 +45,6 @@ get_ipv6_addr, format_ipv6_addr ) - from utils import ( render_template, get_public_addr, @@ -297,17 +296,17 @@ def client_relation_joined(relid=None): @hooks.hook('client-relation-changed') def client_relation_changed(relid=None): """Process broker requests from ceph client relations.""" - if ceph.is_quorum() and ceph.is_leader(): + if ceph.is_quorum(): settings = relation_get(rid=relid) if 'broker_req' in settings: - req = settings['broker_req'] - log("Broker request received from ceph client") - exit_code = process_requests(json.loads(req)) - # Construct JSON response dict allowing other data to be added as - # and when we need it. - resp = json.dumps({'exit_code': exit_code}) - relation_set(relation_id=relid, - relation_settings={'broker_rsp': resp}) + if not ceph.is_leader(): + log("Not leader - ignoring broker request", level=INFO) + else: + req = settings['broker_req'] + log("Broker request received from ceph client") + rsp = process_requests(req) + relation_set(relation_id=relid, + relation_settings={'broker_rsp': rsp}) else: log('mon cluster not in quorum') diff --git a/ceph-proxy/unit_tests/test_ceph_broker.py b/ceph-proxy/unit_tests/test_ceph_broker.py index 0d4a337d..2d50e2a6 100644 --- a/ceph-proxy/unit_tests/test_ceph_broker.py +++ b/ceph-proxy/unit_tests/test_ceph_broker.py @@ -1,3 +1,4 @@ +import json import mock import unittest @@ -11,13 +12,34 @@ def setUp(self): @mock.patch('ceph_broker.log') def test_process_requests_noop(self, mock_log): - rc = ceph_broker.process_requests([{}]) - self.assertEqual(rc, 1) + req = json.dumps({'version': 1, 'ops': []}) + rc = ceph_broker.process_requests(req) + self.assertEqual(json.loads(rc), {'exit_code': 0}) + + @mock.patch('ceph_broker.log') + def test_process_requests_missing_api_version(self, mock_log): + req = json.dumps({'ops': []}) + rc = ceph_broker.process_requests(req) + self.assertEqual(json.loads(rc), {'exit_code': 1, + 'stderr': + ('Missing or invalid api version ' + '(None)')}) + + @mock.patch('ceph_broker.log') + def test_process_requests_invalid_api_version(self, mock_log): + req = json.dumps({'version': 2, 'ops': []}) + rc = ceph_broker.process_requests(req) + self.assertEqual(json.loads(rc), + {'exit_code': 1, + 'stderr': 'Missing or invalid api version (2)'}) @mock.patch('ceph_broker.log') def test_process_requests_invalid(self, mock_log): - rc = ceph_broker.process_requests([{'op': 'invalid_op'}]) - self.assertEqual(rc, 1) + reqs = json.dumps({'version': 1, 'ops': [{'op': 'invalid_op'}]}) + rc = ceph_broker.process_requests(reqs) + self.assertEqual(json.loads(rc), + {'exit_code': 1, + 'stderr': "Unknown operation 'invalid_op'"}) @mock.patch('ceph_broker.create_pool') @mock.patch('ceph_broker.pool_exists') @@ -25,12 +47,14 @@ def test_process_requests_invalid(self, mock_log): def test_process_requests_create_pool(self, mock_log, mock_pool_exists, mock_create_pool): mock_pool_exists.return_value = False - rc = ceph_broker.process_requests([{'op': 'create_pool', 'name': 'foo', - 'replicas': 3}]) + reqs = json.dumps({'version': 1, + 'ops': [{'op': 'create_pool', 'name': + 'foo', 'replicas': 3}]}) + rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') mock_create_pool.assert_called_with(service='admin', name='foo', replicas=3) - self.assertEqual(rc, 0) + self.assertEqual(json.loads(rc), {'exit_code': 0}) @mock.patch('ceph_broker.create_pool') @mock.patch('ceph_broker.pool_exists') @@ -39,8 +63,10 @@ def test_process_requests_create_pool_exists(self, mock_log, mock_pool_exists, mock_create_pool): mock_pool_exists.return_value = True - rc = ceph_broker.process_requests([{'op': 'create_pool', 'name': 'foo', - 'replicas': 3}]) + reqs = json.dumps({'version': 1, + 'ops': [{'op': 'create_pool', 'name': 'foo', + 'replicas': 3}]}) + rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') self.assertFalse(mock_create_pool.called) - self.assertEqual(rc, 0) + self.assertEqual(json.loads(rc), {'exit_code': 0}) From b98415a174277f22ee79c61d78b05e9bc1265391 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sun, 9 Nov 2014 11:54:40 +0000 Subject: [PATCH 0532/2699] Moved more broker code into ceph_broker Added support for versioning api Added unit tests --- ceph-mon/hooks/ceph_broker.py | 50 ++++++++++++++++++++----- ceph-mon/hooks/hooks.py | 25 ++++++------- ceph-mon/unit_tests/test_ceph_broker.py | 46 ++++++++++++++++++----- 3 files changed, 89 insertions(+), 32 deletions(-) diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index dd91b05d..8ab77c5f 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -2,6 +2,8 @@ # # Copyright 2014 Canonical Ltd. # +import json + from charmhelpers.core.hookenv import ( log, INFO, @@ -13,8 +15,37 @@ ) +def decode(f): + def decode_inner(req): + return json.dumps(f(json.loads(req))) + + return decode_inner + + +@decode def process_requests(reqs): - """Process a Ceph broker request from a ceph client.""" + """Process a Ceph broker request from a ceph client. + + This is a versioned api. We choose the api version based on provided + version from client. + """ + version = reqs.get('version') + if version == 1: + return process_requests_v1(reqs['ops']) + + msg = ("Missing or invalid api version (%s)" % (version)) + return {'exit_code': 1, 'stderr': msg} + + +def process_requests_v1(reqs): + """Process a v1 requests from a ceph client. + + Takes a list of requests (dicts) and processes each one until it hits an + error. + + Upon completion of all ops or if an error is found, a response dict is + returned containing exit code and any extra info. + """ log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) for req in reqs: op = req.get('op') @@ -26,11 +57,11 @@ def process_requests(reqs): params = {'pool': req.get('name'), 'replicas': req.get('replicas')} if not all(params.iteritems()): - log("Missing parameter(s): %s" % - (' '.join([k for k in params.iterkeys() - if not params[k]])), - level=ERROR) - return 1 + msg = ("Missing parameter(s): %s" % + (' '.join([k for k in params.iterkeys() + if not params[k]]))) + log(msg, level=ERROR) + return {'exit_code': 1, 'stderr': msg} pool = params['pool'] replicas = params['replicas'] @@ -42,7 +73,8 @@ def process_requests(reqs): log("Pool '%s' already exists - skipping create" % (pool), level=INFO) else: - log("Unknown operation '%s'" % (op)) - return 1 + msg = "Unknown operation '%s'" % (op) + log(msg, level=ERROR) + return {'exit_code': 1, 'stderr': msg} - return 0 + return {'exit_code': 0} diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 11aa5ed7..ed3da210 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -9,14 +9,15 @@ # import glob -import json import os import shutil import sys import ceph from charmhelpers.core.hookenv import ( - log, ERROR, + log, + INFO, + ERROR, config, relation_ids, related_units, @@ -26,7 +27,6 @@ Hooks, UnregisteredHookError, service_name ) - from charmhelpers.core.host import ( service_restart, umount, @@ -45,7 +45,6 @@ get_ipv6_addr, format_ipv6_addr ) - from utils import ( render_template, get_public_addr, @@ -297,17 +296,17 @@ def client_relation_joined(relid=None): @hooks.hook('client-relation-changed') def client_relation_changed(relid=None): """Process broker requests from ceph client relations.""" - if ceph.is_quorum() and ceph.is_leader(): + if ceph.is_quorum(): settings = relation_get(rid=relid) if 'broker_req' in settings: - req = settings['broker_req'] - log("Broker request received from ceph client") - exit_code = process_requests(json.loads(req)) - # Construct JSON response dict allowing other data to be added as - # and when we need it. - resp = json.dumps({'exit_code': exit_code}) - relation_set(relation_id=relid, - relation_settings={'broker_rsp': resp}) + if not ceph.is_leader(): + log("Not leader - ignoring broker request", level=INFO) + else: + req = settings['broker_req'] + log("Broker request received from ceph client") + rsp = process_requests(req) + relation_set(relation_id=relid, + relation_settings={'broker_rsp': rsp}) else: log('mon cluster not in quorum') diff --git a/ceph-mon/unit_tests/test_ceph_broker.py b/ceph-mon/unit_tests/test_ceph_broker.py index 0d4a337d..2d50e2a6 100644 --- a/ceph-mon/unit_tests/test_ceph_broker.py +++ b/ceph-mon/unit_tests/test_ceph_broker.py @@ -1,3 +1,4 @@ +import json import mock import unittest @@ -11,13 +12,34 @@ def setUp(self): @mock.patch('ceph_broker.log') def test_process_requests_noop(self, mock_log): - rc = ceph_broker.process_requests([{}]) - self.assertEqual(rc, 1) + req = json.dumps({'version': 1, 'ops': []}) + rc = ceph_broker.process_requests(req) + self.assertEqual(json.loads(rc), {'exit_code': 0}) + + @mock.patch('ceph_broker.log') + def test_process_requests_missing_api_version(self, mock_log): + req = json.dumps({'ops': []}) + rc = ceph_broker.process_requests(req) + self.assertEqual(json.loads(rc), {'exit_code': 1, + 'stderr': + ('Missing or invalid api version ' + '(None)')}) + + @mock.patch('ceph_broker.log') + def test_process_requests_invalid_api_version(self, mock_log): + req = json.dumps({'version': 2, 'ops': []}) + rc = ceph_broker.process_requests(req) + self.assertEqual(json.loads(rc), + {'exit_code': 1, + 'stderr': 'Missing or invalid api version (2)'}) @mock.patch('ceph_broker.log') def test_process_requests_invalid(self, mock_log): - rc = ceph_broker.process_requests([{'op': 'invalid_op'}]) - self.assertEqual(rc, 1) + reqs = json.dumps({'version': 1, 'ops': [{'op': 'invalid_op'}]}) + rc = ceph_broker.process_requests(reqs) + self.assertEqual(json.loads(rc), + {'exit_code': 1, + 'stderr': "Unknown operation 'invalid_op'"}) @mock.patch('ceph_broker.create_pool') @mock.patch('ceph_broker.pool_exists') @@ -25,12 +47,14 @@ def test_process_requests_invalid(self, mock_log): def test_process_requests_create_pool(self, mock_log, mock_pool_exists, mock_create_pool): mock_pool_exists.return_value = False - rc = ceph_broker.process_requests([{'op': 'create_pool', 'name': 'foo', - 'replicas': 3}]) + reqs = json.dumps({'version': 1, + 'ops': [{'op': 'create_pool', 'name': + 'foo', 'replicas': 3}]}) + rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') mock_create_pool.assert_called_with(service='admin', name='foo', replicas=3) - self.assertEqual(rc, 0) + self.assertEqual(json.loads(rc), {'exit_code': 0}) @mock.patch('ceph_broker.create_pool') @mock.patch('ceph_broker.pool_exists') @@ -39,8 +63,10 @@ def test_process_requests_create_pool_exists(self, mock_log, mock_pool_exists, mock_create_pool): mock_pool_exists.return_value = True - rc = ceph_broker.process_requests([{'op': 'create_pool', 'name': 'foo', - 'replicas': 3}]) + reqs = json.dumps({'version': 1, + 'ops': [{'op': 'create_pool', 'name': 'foo', + 'replicas': 3}]}) + rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') self.assertFalse(mock_create_pool.called) - self.assertEqual(rc, 0) + self.assertEqual(json.loads(rc), {'exit_code': 0}) From ae6588e1c928000e1a43dd1af99c151bb4686010 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sun, 9 Nov 2014 12:58:04 +0000 Subject: [PATCH 0533/2699] cleanup --- ceph-proxy/hooks/ceph_broker.py | 12 +++++------ ceph-proxy/unit_tests/test_ceph_broker.py | 26 +++++++++++------------ 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index 8ab77c5f..adba2ce8 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -29,12 +29,12 @@ def process_requests(reqs): This is a versioned api. We choose the api version based on provided version from client. """ - version = reqs.get('version') + version = reqs.get('api-version') if version == 1: return process_requests_v1(reqs['ops']) msg = ("Missing or invalid api version (%s)" % (version)) - return {'exit_code': 1, 'stderr': msg} + return {'exit-code': 1, 'stderr': msg} def process_requests_v1(reqs): @@ -53,7 +53,7 @@ def process_requests_v1(reqs): # Use admin client since we do not have other client key locations # setup to use them for these operations. svc = 'admin' - if op == "create_pool": + if op == "create-pool": params = {'pool': req.get('name'), 'replicas': req.get('replicas')} if not all(params.iteritems()): @@ -61,7 +61,7 @@ def process_requests_v1(reqs): (' '.join([k for k in params.iterkeys() if not params[k]]))) log(msg, level=ERROR) - return {'exit_code': 1, 'stderr': msg} + return {'exit-code': 1, 'stderr': msg} pool = params['pool'] replicas = params['replicas'] @@ -75,6 +75,6 @@ def process_requests_v1(reqs): else: msg = "Unknown operation '%s'" % (op) log(msg, level=ERROR) - return {'exit_code': 1, 'stderr': msg} + return {'exit-code': 1, 'stderr': msg} - return {'exit_code': 0} + return {'exit-code': 0} diff --git a/ceph-proxy/unit_tests/test_ceph_broker.py b/ceph-proxy/unit_tests/test_ceph_broker.py index 2d50e2a6..0176d119 100644 --- a/ceph-proxy/unit_tests/test_ceph_broker.py +++ b/ceph-proxy/unit_tests/test_ceph_broker.py @@ -12,33 +12,33 @@ def setUp(self): @mock.patch('ceph_broker.log') def test_process_requests_noop(self, mock_log): - req = json.dumps({'version': 1, 'ops': []}) + req = json.dumps({'api-version': 1, 'ops': []}) rc = ceph_broker.process_requests(req) - self.assertEqual(json.loads(rc), {'exit_code': 0}) + self.assertEqual(json.loads(rc), {'exit-code': 0}) @mock.patch('ceph_broker.log') def test_process_requests_missing_api_version(self, mock_log): req = json.dumps({'ops': []}) rc = ceph_broker.process_requests(req) - self.assertEqual(json.loads(rc), {'exit_code': 1, + self.assertEqual(json.loads(rc), {'exit-code': 1, 'stderr': ('Missing or invalid api version ' '(None)')}) @mock.patch('ceph_broker.log') def test_process_requests_invalid_api_version(self, mock_log): - req = json.dumps({'version': 2, 'ops': []}) + req = json.dumps({'api-version': 2, 'ops': []}) rc = ceph_broker.process_requests(req) self.assertEqual(json.loads(rc), - {'exit_code': 1, + {'exit-code': 1, 'stderr': 'Missing or invalid api version (2)'}) @mock.patch('ceph_broker.log') def test_process_requests_invalid(self, mock_log): - reqs = json.dumps({'version': 1, 'ops': [{'op': 'invalid_op'}]}) + reqs = json.dumps({'api-version': 1, 'ops': [{'op': 'invalid_op'}]}) rc = ceph_broker.process_requests(reqs) self.assertEqual(json.loads(rc), - {'exit_code': 1, + {'exit-code': 1, 'stderr': "Unknown operation 'invalid_op'"}) @mock.patch('ceph_broker.create_pool') @@ -47,14 +47,14 @@ def test_process_requests_invalid(self, mock_log): def test_process_requests_create_pool(self, mock_log, mock_pool_exists, mock_create_pool): mock_pool_exists.return_value = False - reqs = json.dumps({'version': 1, - 'ops': [{'op': 'create_pool', 'name': + reqs = json.dumps({'api-version': 1, + 'ops': [{'op': 'create-pool', 'name': 'foo', 'replicas': 3}]}) rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') mock_create_pool.assert_called_with(service='admin', name='foo', replicas=3) - self.assertEqual(json.loads(rc), {'exit_code': 0}) + self.assertEqual(json.loads(rc), {'exit-code': 0}) @mock.patch('ceph_broker.create_pool') @mock.patch('ceph_broker.pool_exists') @@ -63,10 +63,10 @@ def test_process_requests_create_pool_exists(self, mock_log, mock_pool_exists, mock_create_pool): mock_pool_exists.return_value = True - reqs = json.dumps({'version': 1, - 'ops': [{'op': 'create_pool', 'name': 'foo', + reqs = json.dumps({'api-version': 1, + 'ops': [{'op': 'create-pool', 'name': 'foo', 'replicas': 3}]}) rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') self.assertFalse(mock_create_pool.called) - self.assertEqual(json.loads(rc), {'exit_code': 0}) + self.assertEqual(json.loads(rc), {'exit-code': 0}) From fd3df1251c9b6b7fd8d8ea861dbdb58dddf1b7b9 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sun, 9 Nov 2014 12:58:04 +0000 Subject: [PATCH 0534/2699] cleanup --- ceph-mon/hooks/ceph_broker.py | 12 ++++++------ ceph-mon/unit_tests/test_ceph_broker.py | 26 ++++++++++++------------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index 8ab77c5f..adba2ce8 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -29,12 +29,12 @@ def process_requests(reqs): This is a versioned api. We choose the api version based on provided version from client. """ - version = reqs.get('version') + version = reqs.get('api-version') if version == 1: return process_requests_v1(reqs['ops']) msg = ("Missing or invalid api version (%s)" % (version)) - return {'exit_code': 1, 'stderr': msg} + return {'exit-code': 1, 'stderr': msg} def process_requests_v1(reqs): @@ -53,7 +53,7 @@ def process_requests_v1(reqs): # Use admin client since we do not have other client key locations # setup to use them for these operations. svc = 'admin' - if op == "create_pool": + if op == "create-pool": params = {'pool': req.get('name'), 'replicas': req.get('replicas')} if not all(params.iteritems()): @@ -61,7 +61,7 @@ def process_requests_v1(reqs): (' '.join([k for k in params.iterkeys() if not params[k]]))) log(msg, level=ERROR) - return {'exit_code': 1, 'stderr': msg} + return {'exit-code': 1, 'stderr': msg} pool = params['pool'] replicas = params['replicas'] @@ -75,6 +75,6 @@ def process_requests_v1(reqs): else: msg = "Unknown operation '%s'" % (op) log(msg, level=ERROR) - return {'exit_code': 1, 'stderr': msg} + return {'exit-code': 1, 'stderr': msg} - return {'exit_code': 0} + return {'exit-code': 0} diff --git a/ceph-mon/unit_tests/test_ceph_broker.py b/ceph-mon/unit_tests/test_ceph_broker.py index 2d50e2a6..0176d119 100644 --- a/ceph-mon/unit_tests/test_ceph_broker.py +++ b/ceph-mon/unit_tests/test_ceph_broker.py @@ -12,33 +12,33 @@ def setUp(self): @mock.patch('ceph_broker.log') def test_process_requests_noop(self, mock_log): - req = json.dumps({'version': 1, 'ops': []}) + req = json.dumps({'api-version': 1, 'ops': []}) rc = ceph_broker.process_requests(req) - self.assertEqual(json.loads(rc), {'exit_code': 0}) + self.assertEqual(json.loads(rc), {'exit-code': 0}) @mock.patch('ceph_broker.log') def test_process_requests_missing_api_version(self, mock_log): req = json.dumps({'ops': []}) rc = ceph_broker.process_requests(req) - self.assertEqual(json.loads(rc), {'exit_code': 1, + self.assertEqual(json.loads(rc), {'exit-code': 1, 'stderr': ('Missing or invalid api version ' '(None)')}) @mock.patch('ceph_broker.log') def test_process_requests_invalid_api_version(self, mock_log): - req = json.dumps({'version': 2, 'ops': []}) + req = json.dumps({'api-version': 2, 'ops': []}) rc = ceph_broker.process_requests(req) self.assertEqual(json.loads(rc), - {'exit_code': 1, + {'exit-code': 1, 'stderr': 'Missing or invalid api version (2)'}) @mock.patch('ceph_broker.log') def test_process_requests_invalid(self, mock_log): - reqs = json.dumps({'version': 1, 'ops': [{'op': 'invalid_op'}]}) + reqs = json.dumps({'api-version': 1, 'ops': [{'op': 'invalid_op'}]}) rc = ceph_broker.process_requests(reqs) self.assertEqual(json.loads(rc), - {'exit_code': 1, + {'exit-code': 1, 'stderr': "Unknown operation 'invalid_op'"}) @mock.patch('ceph_broker.create_pool') @@ -47,14 +47,14 @@ def test_process_requests_invalid(self, mock_log): def test_process_requests_create_pool(self, mock_log, mock_pool_exists, mock_create_pool): mock_pool_exists.return_value = False - reqs = json.dumps({'version': 1, - 'ops': [{'op': 'create_pool', 'name': + reqs = json.dumps({'api-version': 1, + 'ops': [{'op': 'create-pool', 'name': 'foo', 'replicas': 3}]}) rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') mock_create_pool.assert_called_with(service='admin', name='foo', replicas=3) - self.assertEqual(json.loads(rc), {'exit_code': 0}) + self.assertEqual(json.loads(rc), {'exit-code': 0}) @mock.patch('ceph_broker.create_pool') @mock.patch('ceph_broker.pool_exists') @@ -63,10 +63,10 @@ def test_process_requests_create_pool_exists(self, mock_log, mock_pool_exists, mock_create_pool): mock_pool_exists.return_value = True - reqs = json.dumps({'version': 1, - 'ops': [{'op': 'create_pool', 'name': 'foo', + reqs = json.dumps({'api-version': 1, + 'ops': [{'op': 'create-pool', 'name': 'foo', 'replicas': 3}]}) rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') self.assertFalse(mock_create_pool.called) - self.assertEqual(json.loads(rc), {'exit_code': 0}) + self.assertEqual(json.loads(rc), {'exit-code': 0}) From dec6430d96054fb8d4c9c203e16a2f82e8269b55 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sun, 9 Nov 2014 16:56:19 +0000 Subject: [PATCH 0535/2699] catch unexpected error and inform caller --- ceph-proxy/hooks/ceph_broker.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index adba2ce8..72887ac1 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -29,9 +29,16 @@ def process_requests(reqs): This is a versioned api. We choose the api version based on provided version from client. """ - version = reqs.get('api-version') - if version == 1: - return process_requests_v1(reqs['ops']) + try: + version = reqs.get('api-version') + if version == 1: + return process_requests_v1(reqs['ops']) + except Exception as exc: + log(str(exc), level=ERROR) + msg = ("Unexpected error occurred while processing requests: %s" % + (reqs)) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} msg = ("Missing or invalid api version (%s)" % (version)) return {'exit-code': 1, 'stderr': msg} From 6cdbcedb0a5614ae7b2fdcd95381b5d9f53c5cdb Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sun, 9 Nov 2014 16:56:19 +0000 Subject: [PATCH 0536/2699] catch unexpected error and inform caller --- ceph-mon/hooks/ceph_broker.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index adba2ce8..72887ac1 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -29,9 +29,16 @@ def process_requests(reqs): This is a versioned api. We choose the api version based on provided version from client. """ - version = reqs.get('api-version') - if version == 1: - return process_requests_v1(reqs['ops']) + try: + version = reqs.get('api-version') + if version == 1: + return process_requests_v1(reqs['ops']) + except Exception as exc: + log(str(exc), level=ERROR) + msg = ("Unexpected error occurred while processing requests: %s" % + (reqs)) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} msg = ("Missing or invalid api version (%s)" % (version)) return {'exit-code': 1, 'stderr': msg} From 98142c1119f6032e6b04dc08d2aba9a2ca6830eb Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 10 Nov 2014 10:46:09 +0000 Subject: [PATCH 0537/2699] fixed docstring typo --- ceph-proxy/hooks/ceph_broker.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index 72887ac1..616c0f9b 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -24,10 +24,10 @@ def decode_inner(req): @decode def process_requests(reqs): - """Process a Ceph broker request from a ceph client. + """Process Ceph broker request(s). - This is a versioned api. We choose the api version based on provided - version from client. + This is a versioned api. API version must be supplied by the client making + the request. """ try: version = reqs.get('api-version') @@ -45,13 +45,13 @@ def process_requests(reqs): def process_requests_v1(reqs): - """Process a v1 requests from a ceph client. + """Process v1 requests. - Takes a list of requests (dicts) and processes each one until it hits an - error. + Takes a list of requests (dicts) and processes each one. If an error is + found, processing stops and the client is notified in the response. - Upon completion of all ops or if an error is found, a response dict is - returned containing exit code and any extra info. + Returns a response dict containing the exit code (non-zero if any + operation failed along with an explanation). """ log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) for req in reqs: From 7b4025588a95effcbbf39c33c1ae2359617f4445 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 10 Nov 2014 10:46:09 +0000 Subject: [PATCH 0538/2699] fixed docstring typo --- ceph-mon/hooks/ceph_broker.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index 72887ac1..616c0f9b 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -24,10 +24,10 @@ def decode_inner(req): @decode def process_requests(reqs): - """Process a Ceph broker request from a ceph client. + """Process Ceph broker request(s). - This is a versioned api. We choose the api version based on provided - version from client. + This is a versioned api. API version must be supplied by the client making + the request. """ try: version = reqs.get('api-version') @@ -45,13 +45,13 @@ def process_requests(reqs): def process_requests_v1(reqs): - """Process a v1 requests from a ceph client. + """Process v1 requests. - Takes a list of requests (dicts) and processes each one until it hits an - error. + Takes a list of requests (dicts) and processes each one. If an error is + found, processing stops and the client is notified in the response. - Upon completion of all ops or if an error is found, a response dict is - returned containing exit code and any extra info. + Returns a response dict containing the exit code (non-zero if any + operation failed along with an explanation). """ log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) for req in reqs: From 6af7a2be7e33c5159500190ab75cea0dd7862087 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 15 Nov 2014 09:32:03 -0600 Subject: [PATCH 0539/2699] Remove mutable method default --- ceph-radosgw/hooks/utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index b8e16623..dc4a9379 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -49,8 +49,10 @@ def enable_pocket(pocket): sources.write(line) -def get_host_ip(hostname=unit_get('private-address')): +def get_host_ip(hostname=None): try: + if not hostname: + hostname=unit_get('private-address') # Test to see if already an IPv4 address socket.inet_aton(hostname) return hostname From 69bdfa26b8bf7cb267c4982bada9876cff8798de Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 15 Nov 2014 09:32:24 -0600 Subject: [PATCH 0540/2699] Start to add unit_tests --- ceph-radosgw/.coverage | 10 + ceph-radosgw/.coveragerc | 6 + ceph-radosgw/Makefile | 4 + ceph-radosgw/unit_tests/__init__.py | 3 + ceph-radosgw/unit_tests/test_ceph.py | 124 +++++++++++ ceph-radosgw/unit_tests/test_hooks.py | 303 ++++++++++++++++++++++++++ ceph-radosgw/unit_tests/test_utils.py | 119 ++++++++++ 7 files changed, 569 insertions(+) create mode 100644 ceph-radosgw/.coverage create mode 100644 ceph-radosgw/.coveragerc create mode 100644 ceph-radosgw/unit_tests/__init__.py create mode 100644 ceph-radosgw/unit_tests/test_ceph.py create mode 100644 ceph-radosgw/unit_tests/test_hooks.py create mode 100644 ceph-radosgw/unit_tests/test_utils.py diff --git a/ceph-radosgw/.coverage b/ceph-radosgw/.coverage new file mode 100644 index 00000000..a257f1b6 --- /dev/null +++ b/ceph-radosgw/.coverage @@ -0,0 +1,10 @@ +€}q(U collectorqUcoverage v3.7.1qUlinesq}q(US/home/liam/branches/paris-train/next-org/ceph-radosgw-next/unit_tests/test_utils.pyq]q(KKKKKKK +KKKKKKKKKKKK#K$K%K&K'K*K-K/K0K1K2K3K4K5K7K8K9K:K;K=K>K?KBKDKEKGKHKJKKKOKRKSKUKXKZK[K]K`KhKnKoKqKsKtKvKweUZ/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/core/host.pyq]q (KKK K +K K K KKKKKKKKK"K'K0K6KCKMKcKnKyK…K•K K¦K¬K½KËKÔKäKôKõKøMM'M3MJMPM[MeMuM~eU^/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/payload/execd.pyq +]q (KKKKK K KK$K0eUY/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/__init__.pyq ]q KaU_/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/fetch/__init__.pyq]q(KKKKKKK K KKKKKKKKK K!K"K$K%K&K'K(K)K*K,K-K.K/K0K1K2K4K5K6K7K8K9K:KK?K@KAKBKCKKKNKOKPKSKTKWKXK[K\K_KaKcKhKmKpKwK†KK’K“K•K–K—K˜K™KœKK¡K°K¶KÁKÐMMMM?McMiM|eUH/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/ceph.pyq]q(K +K K K KKKKKKKKKKKKK K!K"K#K$K%K&K'K(K)K*K+K,K-K.K/K0K1K2K3K4K5K6K7K8K9K;K=K>K?K@KAKCKFKGKHKIKKKLKMKNKOKPKQKRKSKTKUKVKWKYKZK[K\K]K^K_K`KaKbKcKdKeKgKjKmKnKoKpKrKsKvKwKxKyK{K|K}K~KK€KK‚KƒK„K…K‡KˆK‰KŠK‹KŒKKŽKK“K”K•K–K—K˜K™KšK›KœKKžKŸK K¡K¢K£K¤K¥K¦K§K©K°K±K²K³K´KµK¶K·K¸K¹KºK»K¼K½K¾K¿KÀKÁKÂKÃKÅeUQ/home/liam/branches/paris-train/next-org/ceph-radosgw-next/unit_tests/__init__.pyq]q(KKeUI/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/hooks.pyq]q(K +K K K KKKKK!K"K)K*K+K-K0K1K2K5K8K=K>K?K@KAKCKDKGKHKIKJKLKMKNKOKPKSKUKVKWKXK[K]K^K`KaKbKcKdKeKjKlKmKnKpKqKtKuKvKxKyKzK{K|KK€KKƒK„K‡KˆK‰KŒKKK‘K“K”K•K–K—K˜K™KœKKžKŸK K¡K¢K£K¦K§KªK«K®K¯K°K±K²K³K´KµK¸K¹KºK»K¼K¾K¿KÀKÁKÂKÃKÅKÇKÈKÉKÌKÍKÏKÐKÑKÒKÓKÖKØKÙKÜKÝKÞKáKâKãKæKçKèKëKìKíKîKïKñKòKóKôKõKöK÷KøKùKüKþKÿMeU]/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/core/hookenv.pyq]q(KKKK K +K K K KKKKKKKKK&K(K)K*K+K,K/K2K=K?K@KBKCKFKGKIKNK\K`KdKhKmK{K€K…KŠKK”K™KžKÃKÄKÆKÎKÙKßKñKúMMMM'M(M;MKMLMVMWM`MaMlMmMxMyM„M“M£M¤M·M¾MÅMÈMÉMÊMËMÏMÔMÕMÖMÙMíMïMðMñMòMôMöMøMMMMM +M M M MMMeUa/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/payload/__init__.pyq]qKaU^/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/core/__init__.pyq]qKaUI/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/utils.pyq]q(K +K K KKKKKKKKKK K(K4KCeUS/home/liam/branches/paris-train/next-org/ceph-radosgw-next/unit_tests/test_hooks.pyq]q(KKKKKKK K +K K K KKKKKKKKKKKKKKKK K!K"K#K$K&K'K(K)K+K,K-K.K0K3K4K6K7K9K:K;KK?K@KAKCKDKEKFKGKHKIKKKLKMKNKOKPKQKRKSKTKUKVKWKXKYKZK\K]K^K_K`KaKcKdKeKfKgKiKjKkKlKnKoKpKrKsKuKwKxKyK{K|K~eU[/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/core/fstab.pyq ]q!(KKK K KKKKK!K)K+K2K9KCKJKRKhKiKpKqeuu. \ No newline at end of file diff --git a/ceph-radosgw/.coveragerc b/ceph-radosgw/.coveragerc new file mode 100644 index 00000000..0e6369e1 --- /dev/null +++ b/ceph-radosgw/.coveragerc @@ -0,0 +1,6 @@ +[report] +# Regexes for lines to exclude from consideration +exclude_lines = + if __name__ == .__main__.: +include= + hooks/ceph.py diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index 5c9ade39..02f8c75d 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -5,6 +5,10 @@ lint: @flake8 --exclude hooks/charmhelpers hooks tests @charm proof +unit_test: + @$(PYTHON) /usr/bin/nosetests unit_tests +# @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests + test: @echo Starting Amulet tests... # coreycb note: The -v should only be temporary until Amulet sends diff --git a/ceph-radosgw/unit_tests/__init__.py b/ceph-radosgw/unit_tests/__init__.py new file mode 100644 index 00000000..afaed60c --- /dev/null +++ b/ceph-radosgw/unit_tests/__init__.py @@ -0,0 +1,3 @@ +import sys + +sys.path.append('hooks/') diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py new file mode 100644 index 00000000..2e096082 --- /dev/null +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -0,0 +1,124 @@ +from mock import call, patch, MagicMock +from test_utils import CharmTestCase, patch_open + +import ceph + +TO_PATCH = [ + 'get_unit_hostname', + 'os', + 'subprocess', + 'time', +] + +class CephRadosGWCephTests(CharmTestCase): + + def setUp(self): + super(CephRadosGWCephTests, self).setUp(ceph, TO_PATCH) + + def test_is_quorum_leader(self): + self.get_unit_hostname.return_value = 'myhost' + self.subprocess.check_output.return_value = '{"state": "leader"}' + self.assertEqual(ceph.is_quorum(), True) + + def test_is_quorum_notleader(self): + self.get_unit_hostname.return_value = 'myhost' + self.subprocess.check_output.return_value = '{"state": "notleader"}' + self.assertEqual(ceph.is_quorum(), False) + + def test_is_quorum_valerror(self): + self.get_unit_hostname.return_value = 'myhost' + self.subprocess.check_output.return_value = "'state': 'bob'}" + self.assertEqual(ceph.is_quorum(), False) + + def test_is_leader(self): + self.get_unit_hostname.return_value = 'myhost' + self.os.path.exists.return_value = True + self.subprocess.check_output.return_value = '{"state": "leader"}' + self.assertEqual(ceph.is_leader(), True) + + def test_is_leader_notleader(self): + self.get_unit_hostname.return_value = 'myhost' + self.os.path.exists.return_value = True + self.subprocess.check_output.return_value = '{"state": "notleader"}' + self.assertEqual(ceph.is_leader(), False) + + def test_is_leader_valerror(self): + self.get_unit_hostname.return_value = 'myhost' + self.os.path.exists.return_value = True + self.subprocess.check_output.return_value = "'state': 'bob'}" + self.assertEqual(ceph.is_leader(), False) + + def test_is_leader_noasok(self): + self.get_unit_hostname.return_value = 'myhost' + self.os.path.exists.return_value = False + self.assertEqual(ceph.is_leader(), False) + +# def test_wait_for_quorum_yes(self): +# _is_quorum = self.patch('is_quorum') +# _is_quorum.return_value = False +# self.time.return_value = None +# ceph.wait_for_quorum() +# self.time.sleep.assert_called_with(3) + +# def test_wait_for_quorum_no(self): +# _is_quorum = self.patch('is_quorum') +# _is_quorum.return_value = True +# ceph.wait_for_quorum() +# self.assertFalse(self.time.sleep.called) + + def test_add_bootstrap_hint(self): + self.get_unit_hostname.return_value = 'myhost' + cmd = [ + "ceph", + "--admin-daemon", + '/var/run/ceph/ceph-mon.myhost.asok', + "add_bootstrap_peer_hint", + 'mypeer' + ] + self.os.path.exists.return_value = True + ceph.add_bootstrap_hint('mypeer') + self.subprocess.call.assert_called_with(cmd) + + def test_add_bootstrap_hint_noasok(self): + self.get_unit_hostname.return_value = 'myhost' + self.os.path.exists.return_value = False + ceph.add_bootstrap_hint('mypeer') + self.assertFalse(self.subprocess.call.called) + + def test_is_osd_disk(self): + # XXX Insert real sgdisk output + self.subprocess.check_output.return_value = 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' + self.assertEqual(ceph.is_osd_disk('/dev/fmd0'), True) + + def test_is_osd_disk_no(self): + # XXX Insert real sgdisk output + self.subprocess.check_output.return_value = 'Partition GUID code: 5FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' + self.assertEqual(ceph.is_osd_disk('/dev/fmd0'), False) + + def test_rescan_osd_devices(self): + cmd = [ + 'udevadm', 'trigger', + '--subsystem-match=block', '--action=add' + ] + ceph.rescan_osd_devices() + self.subprocess.call.assert_called_with(cmd) + + def test_zap_disk(self): + cmd = [ + 'sgdisk', '--zap-all', '/dev/fmd0', + ] + ceph.zap_disk('/dev/fmd0') + self.subprocess.check_call.assert_called_with(cmd) + + def test_import_osd_bootstrap_key(self): + self.os.path.exists.return_value = False + cmd = [ + 'ceph-authtool', + '/var/lib/ceph/bootstrap-osd/ceph.keyring', + '--create-keyring', + '--name=client.bootstrap-osd', + '--add-key=mykey', + ] + ceph.import_osd_bootstrap_key('mykey') + self.subprocess.check_call.assert_called_with(cmd) + diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py new file mode 100644 index 00000000..a49f2b90 --- /dev/null +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -0,0 +1,303 @@ + +from mock import call, patch, MagicMock +from test_utils import CharmTestCase, patch_open + +import hooks as ceph_hooks + +TO_PATCH = [ + 'add_source', + 'apt_update', + 'apt_install', + 'config', + 'cmp_pkgrevno', + 'execd_preinstall', + 'enable_pocket', + 'get_host_ip', + 'get_unit_hostname', + 'glob', + 'is_apache_24', + 'log', + 'lsb_release', + 'open_port', + 'os', + 'related_units', + 'relation_ids', + 'relation_set', + 'relation_get', + 'render_template', + 'shutil', + 'subprocess', + 'sys', + 'unit_get', +] + +class CephRadosGWTests(CharmTestCase): + + def setUp(self): + super(CephRadosGWTests, self).setUp(ceph_hooks, TO_PATCH) + self.config.side_effect = self.test_config.get + self.test_config.set('source', 'distro') + self.test_config.set('key', 'secretkey') + self.test_config.set('use-syslog', False) + + def test_install_www_scripts(self): + self.glob.glob.return_value = ['files/www/bob'] + ceph_hooks.install_www_scripts() + self.shutil.copy.assert_called_with('files/www/bob', '/var/www/') + + def test_install_ceph_optimised_packages(self): + self.lsb_release.return_value = {'DISTRIB_CODENAME': 'vivid'} + git_url = 'http://gitbuilder.ceph.com' + fastcgi_source = ('http://gitbuilder.ceph.com/' + 'libapache-mod-fastcgi-deb-vivid-x86_64-basic/ref/master') + apache_source = ('http://gitbuilder.ceph.com/' + 'apache2-deb-vivid-x86_64-basic/ref/master') + calls = [ + call(fastcgi_source, key='6EAEAE2203C3951A'), + call(apache_source, key='6EAEAE2203C3951A'), + ] + ceph_hooks.install_ceph_optimised_packages() + self.add_source.assert_has_calls(calls) + + def test_install_packages(self): + self.test_config.set('use-ceph-optimised-packages', '') + ceph_hooks.install_packages() + self.add_source.assert_called_with('distro', 'secretkey') + self.apt_update.assert_called() + self.apt_install.assert_called_with(['radosgw', + 'libapache2-mod-fastcgi', + 'apache2', + 'ntp'], fatal=True) + + def test_install_optimised_packages(self): + self.test_config.set('use-ceph-optimised-packages', True) + _install_packages = self.patch('install_ceph_optimised_packages') + ceph_hooks.install_packages() + self.add_source.assert_called_with('distro', 'secretkey') + self.apt_update.assert_called() + _install_packages.assert_called() + self.apt_install.assert_called_with(['radosgw', + 'libapache2-mod-fastcgi', + 'apache2', + 'ntp'], fatal=True) + + def test_install(self): + _install_packages = self.patch('install_packages') + ceph_hooks.install() + self.execd_preinstall.assert_called() + _install_packages.assert_called() + self.enable_pocket.assert_called_with('multiverse') + self.os.makedirs.called_with('/var/lib/ceph/nss') + + def test_emit_cephconf(self): + _get_keystone_conf = self.patch('get_keystone_conf') + _get_auth = self.patch('get_auth') + _get_mon_hosts = self.patch('get_mon_hosts') + _get_auth.return_value = 'cephx' + _get_keystone_conf.return_value = {'keystone_key': 'keystone_value'} + _get_mon_hosts.return_value = ['10.0.0.1:6789', '10.0.0.2:6789'] + self.get_unit_hostname.return_value = 'bob' + self.os.path.exists.return_value = False + cephcontext = { + 'auth_supported': 'cephx', + 'mon_hosts': '10.0.0.1:6789 10.0.0.2:6789', + 'hostname': 'bob', + 'old_auth': False, + 'use_syslog': 'false', + 'keystone_key': 'keystone_value', + } + self.cmp_pkgrevno.return_value = 1 + with patch_open() as (_open, _file): + ceph_hooks.emit_cephconf() + self.os.makedirs.assert_called_with('/etc/ceph') + _open.assert_called_with('/etc/ceph/ceph.conf', 'w') + self.render_template.assert_called_with('ceph.conf', cephcontext) + + def test_emit_apacheconf(self): + self.is_apache_24.return_value = True + self.unit_get.return_value = '10.0.0.1' + apachecontext = { + "hostname": '10.0.0.1', + } + with patch_open() as (_open, _file): + ceph_hooks.emit_apacheconf() + _open.assert_called_with('/etc/apache2/sites-available/rgw.conf', 'w') + self.render_template.assert_called_with('rgw', apachecontext) + + def test_apache_sites24(self): + self.is_apache_24.return_value = True + ceph_hooks.apache_sites() + calls = [ + call(['a2dissite', '000-default']), + call(['a2ensite', 'rgw']), + ] + self.subprocess.check_call.assert_has_calls(calls) + + def test_apache_sites22(self): + self.is_apache_24.return_value = False + ceph_hooks.apache_sites() + calls = [ + call(['a2dissite', 'default']), + call(['a2ensite', 'rgw']), + ] + self.subprocess.check_call.assert_has_calls(calls) + + def test_apache_modules(self): + ceph_hooks.apache_modules() + calls = [ + call(['a2enmod', 'fastcgi']), + call(['a2enmod', 'rewrite']), + ] + self.subprocess.check_call.assert_has_calls(calls) + + def test_apache_reload(self): + ceph_hooks.apache_reload() + calls = [ + call(['service', 'apache2', 'reload']), + ] + self.subprocess.call.assert_has_calls(calls) + + def test_config_changed(self): + _install_packages = self.patch('install_packages') + _emit_cephconf = self.patch('emit_cephconf') + _emit_apacheconf = self.patch('emit_apacheconf') + _install_www_scripts = self.patch('install_www_scripts') + _apache_sites = self.patch('apache_sites') + _apache_modules = self.patch('apache_modules') + _apache_reload = self.patch('apache_reload') + ceph_hooks.config_changed() + _install_packages.assert_called() + _emit_cephconf.assert_called() + _emit_apacheconf.assert_called() + _install_www_scripts.assert_called() + _apache_sites.assert_called() + _apache_modules.assert_called() + _apache_reload.assert_called() + + def test_get_mon_hosts(self): + self.relation_ids.return_value = ['monrelid'] + self.related_units.return_value = ['monunit'] + self.relation_get.return_value = '10.0.0.1' + self.get_host_ip.return_value = '10.0.0.1' + self.assertEquals(ceph_hooks.get_mon_hosts(), ['10.0.0.1:6789']) + + def test_get_conf(self): + self.relation_ids.return_value = ['monrelid'] + self.related_units.return_value = ['monunit'] + self.relation_get.return_value = 'bob' + self.assertEquals(ceph_hooks.get_conf('key'), 'bob') + + def test_get_conf_nomatch(self): + self.relation_ids.return_value = ['monrelid'] + self.related_units.return_value = ['monunit'] + self.relation_get.return_value = '' + self.assertEquals(ceph_hooks.get_conf('key'), None) + + def test_get_auth(self): + self.relation_ids.return_value = ['monrelid'] + self.related_units.return_value = ['monunit'] + self.relation_get.return_value = 'bob' + self.assertEquals(ceph_hooks.get_auth(), 'bob') + + def test_get_keystone_conf(self): + self.test_config.set('operator-roles', 'admin') + self.test_config.set('cache-size', '42') + self.test_config.set('revocation-check-interval', '21') + self.relation_ids.return_value = ['idrelid'] + self.related_units.return_value = ['idunit'] + def _relation_get(key, unit, relid): + ks_dict = { + 'auth_protocol': 'https', + 'auth_host': '10.0.0.2', + 'auth_port': '8090', + 'admin_token': 'sectocken', + } + return ks_dict[key] + self.relation_get.side_effect = _relation_get + self.assertEquals(ceph_hooks.get_keystone_conf(), + {'auth_type': 'keystone', + 'auth_protocol': 'https', + 'admin_token': 'sectocken', + 'user_roles': 'admin', + 'auth_host': '10.0.0.2', + 'cache_size': '42', + 'auth_port': '8090', + 'revocation_check_interval': '21'}) + + def test_get_keystone_conf_missinginfo(self): + self.test_config.set('operator-roles', 'admin') + self.test_config.set('cache-size', '42') + self.test_config.set('revocation-check-interval', '21') + self.relation_ids.return_value = ['idrelid'] + self.related_units.return_value = ['idunit'] + def _relation_get(key, unit, relid): + ks_dict = { + 'auth_protocol': 'https', + 'auth_host': '10.0.0.2', + 'auth_port': '8090', + } + return ks_dict[key] if key in ks_dict else None + self.relation_get.side_effect = _relation_get + self.assertEquals(ceph_hooks.get_keystone_conf(), None) + + def test_mon_relation(self): + _emit_cephconf = self.patch('emit_cephconf') + _ceph = self.patch('ceph') + _restart = self.patch('restart') + self.relation_get.return_value = 'seckey' + ceph_hooks.mon_relation() + _restart.assert_called() + _ceph.import_radosgw_key.assert_called_with('seckey') + + def test_mon_relation_nokey(self): + _emit_cephconf = self.patch('emit_cephconf') + _ceph = self.patch('ceph') + _restart = self.patch('restart') + self.relation_get.return_value = None + ceph_hooks.mon_relation() + self.assertFalse(_ceph.import_radosgw_key.called) + self.assertFalse(_restart.called) + + def test_gateway_relation(self): + self.unit_get.return_value = 'myserver' + ceph_hooks.gateway_relation() + self.relation_set.assert_called_with(hostname='myserver', port=80) + + def test_start(self): + ceph_hooks.start() + self.subprocess.call.assert_called_with(['service', 'radosgw', 'start']) + + def test_stop(self): + ceph_hooks.stop() + self.subprocess.call.assert_called_with(['service', 'radosgw', 'stop']) + + def test_start(self): + ceph_hooks.restart() + self.subprocess.call.assert_called_with(['service', 'radosgw', 'restart']) + + def test_identity_joined_early_version(self): + self.cmp_pkgrevno.return_value = -1 + ceph_hooks.identity_joined() + self.sys.exit.assert_called_with(1) + + def test_identity_joined(self): + self.cmp_pkgrevno.return_value = 1 + self.test_config.set('region', 'region1') + self.test_config.set('operator-roles', 'admin') + self.unit_get.return_value = 'myserv' + ceph_hooks.identity_joined(relid='rid') + self.relation_set.assert_called_with(service='swift', + region='region1', + public_url='http://myserv:80/swift/v1', + internal_url='http://myserv:80/swift/v1', + requested_roles='admin', + rid='rid', + admin_url='http://myserv:80/swift') + + def test_identity_changed(self): + _emit_cephconf = self.patch('emit_cephconf') + _restart = self.patch('restart') + ceph_hooks.identity_changed() + _emit_cephconf.assert_called() + _restart.assert_called() + diff --git a/ceph-radosgw/unit_tests/test_utils.py b/ceph-radosgw/unit_tests/test_utils.py new file mode 100644 index 00000000..526a61f7 --- /dev/null +++ b/ceph-radosgw/unit_tests/test_utils.py @@ -0,0 +1,119 @@ +import logging +import os +import unittest +import yaml + +from contextlib import contextmanager +from mock import patch, MagicMock + + +def load_config(): + '''Walk backwords from __file__ looking for config.yaml, + load and return the 'options' section' + ''' + config = None + f = __file__ + while config is None: + d = os.path.dirname(f) + if os.path.isfile(os.path.join(d, 'config.yaml')): + config = os.path.join(d, 'config.yaml') + break + f = d + + if not config: + logging.error('Could not find config.yaml in any parent directory ' + 'of %s. ' % file) + raise Exception + + return yaml.safe_load(open(config).read())['options'] + + +def get_default_config(): + '''Load default charm config from config.yaml return as a dict. + If no default is set in config.yaml, its value is None. + ''' + default_config = {} + config = load_config() + for k, v in config.iteritems(): + if 'default' in v: + default_config[k] = v['default'] + else: + default_config[k] = None + return default_config + + +class CharmTestCase(unittest.TestCase): + + def setUp(self, obj, patches): + super(CharmTestCase, self).setUp() + self.patches = patches + self.obj = obj + self.test_config = TestConfig() + self.test_relation = TestRelation() + self.patch_all() + + def patch(self, method): + _m = patch.object(self.obj, method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def patch_all(self): + for method in self.patches: + setattr(self, method, self.patch(method)) + + +class TestConfig(object): + + def __init__(self): + self.config = get_default_config() + + def get(self, attr=None): + if not attr: + return self.get_all() + try: + return self.config[attr] + except KeyError: + return None + + def get_all(self): + return self.config + + def set(self, attr, value): + if attr not in self.config: + raise KeyError + self.config[attr] = value + + +class TestRelation(object): + + def __init__(self, relation_data={}): + self.relation_data = relation_data + + def set(self, relation_data): + self.relation_data = relation_data + + def get(self, attr=None, unit=None, rid=None): + if attr is None: + return self.relation_data + elif attr in self.relation_data: + return self.relation_data[attr] + return None + + +@contextmanager +def patch_open(): + '''Patch open() to allow mocking both open() itself and the file that is + yielded. + Yields the mock for "open" and "file", respectively. + ''' + mock_open = MagicMock(spec=open) + mock_file = MagicMock(spec=file) + + @contextmanager + def stub_open(*args, **kwargs): + mock_open(*args, **kwargs) + yield mock_file + + with patch('__builtin__.open', stub_open): + yield mock_open, mock_file From 850c52c7275338e18626a3624c2fc43912dd60d2 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 15 Nov 2014 09:33:21 -0600 Subject: [PATCH 0541/2699] Exclude .converage from bzr --- ceph-radosgw/.bzrignore | 2 ++ ceph-radosgw/.coverage | 10 ---------- 2 files changed, 2 insertions(+), 10 deletions(-) delete mode 100644 ceph-radosgw/.coverage diff --git a/ceph-radosgw/.bzrignore b/ceph-radosgw/.bzrignore index 0879cd47..e06c3a88 100644 --- a/ceph-radosgw/.bzrignore +++ b/ceph-radosgw/.bzrignore @@ -1,3 +1,5 @@ .project .pydevproject bin +.coveragerc +.coverage diff --git a/ceph-radosgw/.coverage b/ceph-radosgw/.coverage deleted file mode 100644 index a257f1b6..00000000 --- a/ceph-radosgw/.coverage +++ /dev/null @@ -1,10 +0,0 @@ -€}q(U collectorqUcoverage v3.7.1qUlinesq}q(US/home/liam/branches/paris-train/next-org/ceph-radosgw-next/unit_tests/test_utils.pyq]q(KKKKKKK -KKKKKKKKKKKK#K$K%K&K'K*K-K/K0K1K2K3K4K5K7K8K9K:K;K=K>K?KBKDKEKGKHKJKKKOKRKSKUKXKZK[K]K`KhKnKoKqKsKtKvKweUZ/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/core/host.pyq]q (KKK K -K K K KKKKKKKKK"K'K0K6KCKMKcKnKyK…K•K K¦K¬K½KËKÔKäKôKõKøMM'M3MJMPM[MeMuM~eU^/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/payload/execd.pyq -]q (KKKKK K KK$K0eUY/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/__init__.pyq ]q KaU_/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/fetch/__init__.pyq]q(KKKKKKK K KKKKKKKKK K!K"K$K%K&K'K(K)K*K,K-K.K/K0K1K2K4K5K6K7K8K9K:KK?K@KAKBKCKKKNKOKPKSKTKWKXK[K\K_KaKcKhKmKpKwK†KK’K“K•K–K—K˜K™KœKK¡K°K¶KÁKÐMMMM?McMiM|eUH/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/ceph.pyq]q(K -K K K KKKKKKKKKKKKK K!K"K#K$K%K&K'K(K)K*K+K,K-K.K/K0K1K2K3K4K5K6K7K8K9K;K=K>K?K@KAKCKFKGKHKIKKKLKMKNKOKPKQKRKSKTKUKVKWKYKZK[K\K]K^K_K`KaKbKcKdKeKgKjKmKnKoKpKrKsKvKwKxKyK{K|K}K~KK€KK‚KƒK„K…K‡KˆK‰KŠK‹KŒKKŽKK“K”K•K–K—K˜K™KšK›KœKKžKŸK K¡K¢K£K¤K¥K¦K§K©K°K±K²K³K´KµK¶K·K¸K¹KºK»K¼K½K¾K¿KÀKÁKÂKÃKÅeUQ/home/liam/branches/paris-train/next-org/ceph-radosgw-next/unit_tests/__init__.pyq]q(KKeUI/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/hooks.pyq]q(K -K K K KKKKK!K"K)K*K+K-K0K1K2K5K8K=K>K?K@KAKCKDKGKHKIKJKLKMKNKOKPKSKUKVKWKXK[K]K^K`KaKbKcKdKeKjKlKmKnKpKqKtKuKvKxKyKzK{K|KK€KKƒK„K‡KˆK‰KŒKKK‘K“K”K•K–K—K˜K™KœKKžKŸK K¡K¢K£K¦K§KªK«K®K¯K°K±K²K³K´KµK¸K¹KºK»K¼K¾K¿KÀKÁKÂKÃKÅKÇKÈKÉKÌKÍKÏKÐKÑKÒKÓKÖKØKÙKÜKÝKÞKáKâKãKæKçKèKëKìKíKîKïKñKòKóKôKõKöK÷KøKùKüKþKÿMeU]/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/core/hookenv.pyq]q(KKKK K -K K K KKKKKKKKK&K(K)K*K+K,K/K2K=K?K@KBKCKFKGKIKNK\K`KdKhKmK{K€K…KŠKK”K™KžKÃKÄKÆKÎKÙKßKñKúMMMM'M(M;MKMLMVMWM`MaMlMmMxMyM„M“M£M¤M·M¾MÅMÈMÉMÊMËMÏMÔMÕMÖMÙMíMïMðMñMòMôMöMøMMMMM -M M M MMMeUa/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/payload/__init__.pyq]qKaU^/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/core/__init__.pyq]qKaUI/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/utils.pyq]q(K -K K KKKKKKKKKK K(K4KCeUS/home/liam/branches/paris-train/next-org/ceph-radosgw-next/unit_tests/test_hooks.pyq]q(KKKKKKK K -K K K KKKKKKKKKKKKKKKK K!K"K#K$K&K'K(K)K+K,K-K.K0K3K4K6K7K9K:K;KK?K@KAKCKDKEKFKGKHKIKKKLKMKNKOKPKQKRKSKTKUKVKWKXKYKZK\K]K^K_K`KaKcKdKeKfKgKiKjKkKlKnKoKpKrKsKuKwKxKyK{K|K~eU[/home/liam/branches/paris-train/next-org/ceph-radosgw-next/hooks/charmhelpers/core/fstab.pyq ]q!(KKK K KKKKK!K)K+K2K9KCKJKRKhKiKpKqeuu. \ No newline at end of file From db48f121fed1d01a02a8448f94b15cb2b9f13203 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Mon, 17 Nov 2014 12:28:49 +1000 Subject: [PATCH 0542/2699] [bradm] Fixes from pep8 run --- ceph-osd/hooks/hooks.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index b47514aa..cfcc8b65 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -207,7 +207,8 @@ def upgrade_charm(): fatal=True) -@hooks.hook('nrpe-external-master-relation-joined', 'nrpe-external-master-relation-changed') +@hooks.hook('nrpe-external-master-relation-joined', + 'nrpe-external-master-relation-changed') def update_nrpe_config(): # Find out if nrpe set nagios_hostname hostname = None @@ -228,7 +229,7 @@ def update_nrpe_config(): nrpe.add_check( shortname='ceph-osd', description='process check {%s}' % current_unit, - check_cmd = 'check_upstart_job ceph-osd', + check_cmd='check_upstart_job ceph-osd', ) nrpe.write() From 06602c44eaa89d0d35c7b139924d56ca5fb14839 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Tue, 18 Nov 2014 11:06:09 +1000 Subject: [PATCH 0543/2699] [bradm] Removed nagios check files that were moved to nrpe-external-master charm --- .../nrpe-external-master/check_upstart_job | 72 ------------------- 1 file changed, 72 deletions(-) delete mode 100755 ceph-osd/files/nrpe-external-master/check_upstart_job diff --git a/ceph-osd/files/nrpe-external-master/check_upstart_job b/ceph-osd/files/nrpe-external-master/check_upstart_job deleted file mode 100755 index 94efb95e..00000000 --- a/ceph-osd/files/nrpe-external-master/check_upstart_job +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/python - -# -# Copyright 2012, 2013 Canonical Ltd. -# -# Author: Paul Collins -# -# Based on http://www.eurion.net/python-snippets/snippet/Upstart%20service%20status.html -# - -import sys - -import dbus - - -class Upstart(object): - def __init__(self): - self._bus = dbus.SystemBus() - self._upstart = self._bus.get_object('com.ubuntu.Upstart', - '/com/ubuntu/Upstart') - def get_job(self, job_name): - path = self._upstart.GetJobByName(job_name, - dbus_interface='com.ubuntu.Upstart0_6') - return self._bus.get_object('com.ubuntu.Upstart', path) - - def get_properties(self, job): - path = job.GetInstance([], dbus_interface='com.ubuntu.Upstart0_6.Job') - instance = self._bus.get_object('com.ubuntu.Upstart', path) - return instance.GetAll('com.ubuntu.Upstart0_6.Instance', - dbus_interface=dbus.PROPERTIES_IFACE) - - def get_job_instances(self, job_name): - job = self.get_job(job_name) - paths = job.GetAllInstances([], dbus_interface='com.ubuntu.Upstart0_6.Job') - return [self._bus.get_object('com.ubuntu.Upstart', path) for path in paths] - - def get_job_instance_properties(self, job): - return job.GetAll('com.ubuntu.Upstart0_6.Instance', - dbus_interface=dbus.PROPERTIES_IFACE) - -try: - upstart = Upstart() - try: - job = upstart.get_job(sys.argv[1]) - props = upstart.get_properties(job) - - if props['state'] == 'running': - print 'OK: %s is running' % sys.argv[1] - sys.exit(0) - else: - print 'CRITICAL: %s is not running' % sys.argv[1] - sys.exit(2) - - except dbus.DBusException as e: - instances = upstart.get_job_instances(sys.argv[1]) - propses = [upstart.get_job_instance_properties(instance) for instance in instances] - states = dict([(props['name'], props['state']) for props in propses]) - if len(states) != states.values().count('running'): - not_running = [] - for name in states.keys(): - if states[name] != 'running': - not_running.append(name) - print 'CRITICAL: %d instances of %s not running: %s' % \ - (len(not_running), sys.argv[1], not_running.join(', ')) - sys.exit(2) - else: - print 'OK: %d instances of %s running' % (len(states), sys.argv[1]) - -except dbus.DBusException as e: - print 'CRITICAL: failed to get properties of \'%s\' from upstart' % sys.argv[1] - sys.exit(2) - From bf9d828349fac4a55f6a0d726d3d9b9a75a245e9 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 19 Nov 2014 15:33:12 -0600 Subject: [PATCH 0544/2699] fixed up log levels --- ceph-proxy/hooks/ceph_broker.py | 15 +++++++++------ ceph-proxy/hooks/hooks.py | 10 ++++------ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index 616c0f9b..9fced945 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -6,23 +6,25 @@ from charmhelpers.core.hookenv import ( log, + DEBUG, INFO, - ERROR + ERROR, ) from charmhelpers.contrib.storage.linux.ceph import ( create_pool, - pool_exists + pool_exists, ) -def decode(f): +def decode_req_encode_rsp(f): + """Decorator to decode incoming requests and encode responses.""" def decode_inner(req): return json.dumps(f(json.loads(req))) return decode_inner -@decode +@decode_req_encode_rsp def process_requests(reqs): """Process Ceph broker request(s). @@ -33,6 +35,7 @@ def process_requests(reqs): version = reqs.get('api-version') if version == 1: return process_requests_v1(reqs['ops']) + except Exception as exc: log(str(exc), level=ERROR) msg = ("Unexpected error occurred while processing requests: %s" % @@ -56,7 +59,7 @@ def process_requests_v1(reqs): log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) for req in reqs: op = req.get('op') - log("Processing op='%s'" % (op), level=INFO) + log("Processing op='%s'" % (op), level=DEBUG) # Use admin client since we do not have other client key locations # setup to use them for these operations. svc = 'admin' @@ -78,7 +81,7 @@ def process_requests_v1(reqs): create_pool(service=svc, name=pool, replicas=replicas) else: log("Pool '%s' already exists - skipping create" % (pool), - level=INFO) + level=DEBUG) else: msg = "Unknown operation '%s'" % (op) log(msg, level=ERROR) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index ed3da210..90144018 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -16,7 +16,7 @@ import ceph from charmhelpers.core.hookenv import ( log, - INFO, + DEBUG, ERROR, config, relation_ids, @@ -300,15 +300,13 @@ def client_relation_changed(relid=None): settings = relation_get(rid=relid) if 'broker_req' in settings: if not ceph.is_leader(): - log("Not leader - ignoring broker request", level=INFO) + log("Not leader - ignoring broker request", level=DEBUG) else: - req = settings['broker_req'] - log("Broker request received from ceph client") - rsp = process_requests(req) + rsp = process_requests(settings['broker_req']) relation_set(relation_id=relid, relation_settings={'broker_rsp': rsp}) else: - log('mon cluster not in quorum') + log('mon cluster not in quorum', level=DEBUG) @hooks.hook('upgrade-charm') From 4dd3534c0ecf072ac2a83151d574f4153bb41f45 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 19 Nov 2014 15:33:12 -0600 Subject: [PATCH 0545/2699] fixed up log levels --- ceph-mon/hooks/ceph_broker.py | 15 +++++++++------ ceph-mon/hooks/hooks.py | 10 ++++------ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index 616c0f9b..9fced945 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -6,23 +6,25 @@ from charmhelpers.core.hookenv import ( log, + DEBUG, INFO, - ERROR + ERROR, ) from charmhelpers.contrib.storage.linux.ceph import ( create_pool, - pool_exists + pool_exists, ) -def decode(f): +def decode_req_encode_rsp(f): + """Decorator to decode incoming requests and encode responses.""" def decode_inner(req): return json.dumps(f(json.loads(req))) return decode_inner -@decode +@decode_req_encode_rsp def process_requests(reqs): """Process Ceph broker request(s). @@ -33,6 +35,7 @@ def process_requests(reqs): version = reqs.get('api-version') if version == 1: return process_requests_v1(reqs['ops']) + except Exception as exc: log(str(exc), level=ERROR) msg = ("Unexpected error occurred while processing requests: %s" % @@ -56,7 +59,7 @@ def process_requests_v1(reqs): log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) for req in reqs: op = req.get('op') - log("Processing op='%s'" % (op), level=INFO) + log("Processing op='%s'" % (op), level=DEBUG) # Use admin client since we do not have other client key locations # setup to use them for these operations. svc = 'admin' @@ -78,7 +81,7 @@ def process_requests_v1(reqs): create_pool(service=svc, name=pool, replicas=replicas) else: log("Pool '%s' already exists - skipping create" % (pool), - level=INFO) + level=DEBUG) else: msg = "Unknown operation '%s'" % (op) log(msg, level=ERROR) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index ed3da210..90144018 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -16,7 +16,7 @@ import ceph from charmhelpers.core.hookenv import ( log, - INFO, + DEBUG, ERROR, config, relation_ids, @@ -300,15 +300,13 @@ def client_relation_changed(relid=None): settings = relation_get(rid=relid) if 'broker_req' in settings: if not ceph.is_leader(): - log("Not leader - ignoring broker request", level=INFO) + log("Not leader - ignoring broker request", level=DEBUG) else: - req = settings['broker_req'] - log("Broker request received from ceph client") - rsp = process_requests(req) + rsp = process_requests(settings['broker_req']) relation_set(relation_id=relid, relation_settings={'broker_rsp': rsp}) else: - log('mon cluster not in quorum') + log('mon cluster not in quorum', level=DEBUG) @hooks.hook('upgrade-charm') From 1dbfac35d8de2bee8d892f3ea03ef48122100f9e Mon Sep 17 00:00:00 2001 From: Jorge Niedbalski Date: Thu, 20 Nov 2014 18:19:08 -0300 Subject: [PATCH 0546/2699] [config] renamed empty string to None , according to @jamespage --- ceph-osd/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index ae1b4336..091a7dc4 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -123,7 +123,7 @@ options: your network interface. sysctl: type: string - default: "" + default: description: | YAML formatted associative array of sysctl values, e.g.: '{ kernel.pid_max : 4194303 }' From 8bf5edc6880c8bf53ba2c94f467ee6add517e069 Mon Sep 17 00:00:00 2001 From: Jorge Niedbalski Date: Thu, 20 Nov 2014 18:24:58 -0300 Subject: [PATCH 0547/2699] [config] removed empty string from sysctl config param --- ceph-proxy/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 218f7af0..e90d7b41 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -157,7 +157,7 @@ options: your network interface. sysctl: type: string - default: "" + default: description: | YAML formatted associative array of sysctl values, e.g.: '{ kernel.pid_max : 4194303 }' From b2108316eb25f5b246f03b3771e9a7c6fce1edf7 Mon Sep 17 00:00:00 2001 From: Jorge Niedbalski Date: Thu, 20 Nov 2014 18:24:58 -0300 Subject: [PATCH 0548/2699] [config] removed empty string from sysctl config param --- ceph-mon/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 218f7af0..e90d7b41 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -157,7 +157,7 @@ options: your network interface. sysctl: type: string - default: "" + default: description: | YAML formatted associative array of sysctl values, e.g.: '{ kernel.pid_max : 4194303 }' From 7586e19e2290c2d67baa0909817a95c0418ce641 Mon Sep 17 00:00:00 2001 From: Jorge Niedbalski Date: Tue, 25 Nov 2014 13:52:35 -0300 Subject: [PATCH 0549/2699] [all] make sync for charmhelpers --- .../hooks/charmhelpers/contrib/network/ip.py | 112 +++++++++--------- .../contrib/storage/linux/utils.py | 5 +- ceph-osd/hooks/charmhelpers/core/fstab.py | 18 +-- ceph-osd/hooks/charmhelpers/core/hookenv.py | 36 ++++-- ceph-osd/hooks/charmhelpers/core/host.py | 49 +++++--- .../charmhelpers/core/services/__init__.py | 4 +- .../charmhelpers/core/services/helpers.py | 12 +- .../hooks/charmhelpers/core/templating.py | 3 +- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 30 +++-- .../hooks/charmhelpers/fetch/archiveurl.py | 69 ++++++++--- ceph-osd/hooks/charmhelpers/fetch/bzrurl.py | 6 +- .../charmhelpers/contrib/amulet/deployment.py | 6 +- .../charmhelpers/contrib/amulet/utils.py | 10 +- .../contrib/openstack/amulet/deployment.py | 3 +- .../contrib/openstack/amulet/utils.py | 4 +- 15 files changed, 229 insertions(+), 138 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 17df06fc..8dc83165 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -1,15 +1,12 @@ import glob import re import subprocess -import sys from functools import partial from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - WARNING, - ERROR, log ) @@ -34,31 +31,28 @@ def _validate_cidr(network): network) +def no_ip_found_error_out(network): + errmsg = ("No IP address found in network: %s" % network) + raise ValueError(errmsg) + + def get_address_in_network(network, fallback=None, fatal=False): - """ - Get an IPv4 or IPv6 address within the network from the host. + """Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, '192.168.1.0/24'. :param fallback (str): If no address is found, return fallback. :param fatal (boolean): If no address is found, fallback is not set and fatal is True then exit(1). - """ - - def not_found_error_out(): - log("No IP address found in network: %s" % network, - level=ERROR) - sys.exit(1) - if network is None: if fallback is not None: return fallback + + if fatal: + no_ip_found_error_out(network) else: - if fatal: - not_found_error_out() - else: - return None + return None _validate_cidr(network) network = netaddr.IPNetwork(network) @@ -70,6 +64,7 @@ def not_found_error_out(): cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) if cidr in network: return str(cidr.ip) + if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): @@ -82,20 +77,20 @@ def not_found_error_out(): return fallback if fatal: - not_found_error_out() + no_ip_found_error_out(network) return None def is_ipv6(address): - '''Determine whether provided address is IPv6 or not''' + """Determine whether provided address is IPv6 or not.""" try: address = netaddr.IPAddress(address) except netaddr.AddrFormatError: # probably a hostname - so not an address at all! return False - else: - return address.version == 6 + + return address.version == 6 def is_address_in_network(network, address): @@ -113,11 +108,13 @@ def is_address_in_network(network, address): except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Network (%s) is not in CIDR presentation format" % network) + try: address = netaddr.IPAddress(address) except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Address (%s) is not in correct presentation format" % address) + if address in network: return True else: @@ -140,57 +137,63 @@ def _get_for_address(address, key): if address.version == 4 and netifaces.AF_INET in addresses: addr = addresses[netifaces.AF_INET][0]['addr'] netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + cidr = network.cidr if address in cidr: if key == 'iface': return iface else: return addresses[netifaces.AF_INET][0][key] + if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) + network = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + cidr = network.cidr if address in cidr: if key == 'iface': return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] else: return addr[key] + return None get_iface_for_address = partial(_get_for_address, key='iface') + get_netmask_for_address = partial(_get_for_address, key='netmask') def format_ipv6_addr(address): - """ - IPv6 needs to be wrapped with [] in url link to parse correctly. + """If address is IPv6, wrap it in '[]' otherwise return None. + + This is required by most configuration files when specifying IPv6 + addresses. """ if is_ipv6(address): - address = "[%s]" % address - else: - log("Not a valid ipv6 address: %s" % address, level=WARNING) - address = None + return "[%s]" % address - return address + return None def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): - """ - Return the assigned IP address for a given interface, if any, or []. - """ + """Return the assigned IP address for a given interface, if any.""" # Extract nic if passed /dev/ethX if '/' in iface: iface = iface.split('/')[-1] + if not exc_list: exc_list = [] + try: inet_num = getattr(netifaces, inet_type) except AttributeError: - raise Exception('Unknown inet type ' + str(inet_type)) + raise Exception("Unknown inet type '%s'" % str(inet_type)) interfaces = netifaces.interfaces() if inc_aliases: @@ -198,15 +201,18 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, for _iface in interfaces: if iface == _iface or _iface.split(':')[0] == iface: ifaces.append(_iface) + if fatal and not ifaces: raise Exception("Invalid interface '%s'" % iface) + ifaces.sort() else: if iface not in interfaces: if fatal: - raise Exception("%s not found " % (iface)) + raise Exception("Interface '%s' not found " % (iface)) else: return [] + else: ifaces = [iface] @@ -217,10 +223,13 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, for entry in net_info[inet_num]: if 'addr' in entry and entry['addr'] not in exc_list: addresses.append(entry['addr']) + if fatal and not addresses: raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) - return addresses + + return sorted(addresses) + get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') @@ -237,6 +246,7 @@ def get_iface_from_addr(addr): raw = re.match(ll_key, _addr) if raw: _addr = raw.group(1) + if _addr == addr: log("Address '%s' is configured on iface '%s'" % (addr, iface)) @@ -247,8 +257,9 @@ def get_iface_from_addr(addr): def sniff_iface(f): - """If no iface provided, inject net iface inferred from unit private - address. + """Ensure decorated function is called with a value for iface. + + If no iface provided, inject net iface inferred from unit private address. """ def iface_sniffer(*args, **kwargs): if not kwargs.get('iface', None): @@ -291,7 +302,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, if global_addrs: # Make sure any found global addresses are not temporary cmd = ['ip', 'addr', 'show', iface] - out = subprocess.check_output(cmd) + out = subprocess.check_output(cmd).decode('UTF-8') if dynamic_only: key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") else: @@ -313,33 +324,28 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, return addrs if fatal: - raise Exception("Interface '%s' doesn't have a scope global " + raise Exception("Interface '%s' does not have a scope global " "non-temporary ipv6 address." % iface) return [] def get_bridges(vnic_dir='/sys/devices/virtual/net'): - """ - Return a list of bridges on the system or [] - """ - b_rgex = vnic_dir + '/*/bridge' - return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] + """Return a list of bridges on the system.""" + b_regex = "%s/*/bridge" % vnic_dir + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): - """ - Return a list of nics comprising a given bridge on the system or [] - """ - brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) - return [x.split('/')[-1] for x in glob.glob(brif_rgex)] + """Return a list of nics comprising a given bridge on the system.""" + brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_regex)] def is_bridge_member(nic): - """ - Check if a given nic is a member of a bridge - """ + """Check if a given nic is a member of a bridge.""" for bridge in get_bridges(): if nic in get_bridge_nics(bridge): return True + return False diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index 1b958712..c6a15e14 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -30,7 +30,8 @@ def zap_disk(block_device): # sometimes sgdisk exits non-zero; this is OK, dd will clean up call(['sgdisk', '--zap-all', '--mbrtogpt', '--clear', block_device]) - dev_end = check_output(['blockdev', '--getsz', block_device]) + dev_end = check_output(['blockdev', '--getsz', + block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=1M', 'count=1']) @@ -47,7 +48,7 @@ def is_device_mounted(device): it doesn't. ''' is_partition = bool(re.search(r".*[0-9]+\b", device)) - out = check_output(['mount']) + out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/ceph-osd/hooks/charmhelpers/core/fstab.py b/ceph-osd/hooks/charmhelpers/core/fstab.py index cfaf0a65..0adf0db3 100644 --- a/ceph-osd/hooks/charmhelpers/core/fstab.py +++ b/ceph-osd/hooks/charmhelpers/core/fstab.py @@ -3,10 +3,11 @@ __author__ = 'Jorge Niedbalski R. ' +import io import os -class Fstab(file): +class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer for file `/etc/fstab` """ @@ -24,8 +25,8 @@ def __init__(self, device, mountpoint, filesystem, options = "defaults" self.options = options - self.d = d - self.p = p + self.d = int(d) + self.p = int(p) def __eq__(self, o): return str(self) == str(o) @@ -45,7 +46,7 @@ def __init__(self, path=None): self._path = path else: self._path = self.DEFAULT_PATH - file.__init__(self, self._path, 'r+') + super(Fstab, self).__init__(self._path, 'rb+') def _hydrate_entry(self, line): # NOTE: use split with no arguments to split on any @@ -58,8 +59,9 @@ def _hydrate_entry(self, line): def entries(self): self.seek(0) for line in self.readlines(): + line = line.decode('us-ascii') try: - if not line.startswith("#"): + if line.strip() and not line.startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -75,14 +77,14 @@ def add_entry(self, entry): if self.get_entry_by_attr('device', entry.device): return False - self.write(str(entry) + '\n') + self.write((str(entry) + '\n').encode('us-ascii')) self.truncate() return entry def remove_entry(self, entry): self.seek(0) - lines = self.readlines() + lines = [l.decode('us-ascii') for l in self.readlines()] found = False for index, line in enumerate(lines): @@ -97,7 +99,7 @@ def remove_entry(self, entry): lines.remove(line) self.seek(0) - self.write(''.join(lines)) + self.write(''.join(lines).encode('us-ascii')) self.truncate() return True diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index af8fe2db..99e5d208 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -9,9 +9,14 @@ import yaml import subprocess import sys -import UserDict from subprocess import CalledProcessError +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -67,12 +72,12 @@ def log(message, level=None): subprocess.call(command) -class Serializable(UserDict.IterableUserDict): +class Serializable(UserDict): """Wrapper, an object that can be serialized to yaml or json""" def __init__(self, obj): # wrap the object - UserDict.IterableUserDict.__init__(self) + UserDict.__init__(self) self.data = obj def __getattr__(self, attr): @@ -214,6 +219,12 @@ def __getitem__(self, key): except KeyError: return (self._prev_dict or {})[key] + def keys(self): + prev_keys = [] + if self._prev_dict is not None: + prev_keys = self._prev_dict.keys() + return list(set(prev_keys + list(dict.keys(self)))) + def load_previous(self, path=None): """Load previous copy of config from disk. @@ -263,7 +274,7 @@ def save(self): """ if self._prev_dict: - for k, v in self._prev_dict.iteritems(): + for k, v in six.iteritems(self._prev_dict): if k not in self: self[k] = v with open(self.path, 'w') as f: @@ -278,7 +289,8 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - config_data = json.loads(subprocess.check_output(config_cmd_line)) + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) if scope is not None: return config_data return Config(config_data) @@ -297,10 +309,10 @@ def relation_get(attribute=None, unit=None, rid=None): if unit: _args.append(unit) try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None - except CalledProcessError, e: + except CalledProcessError as e: if e.returncode == 2: return None raise @@ -312,7 +324,7 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs): relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in (relation_settings.items() + kwargs.items()): + for k, v in (list(relation_settings.items()) + list(kwargs.items())): if v is None: relation_cmd_line.append('{}='.format(k)) else: @@ -329,7 +341,8 @@ def relation_ids(reltype=None): relid_cmd_line = ['relation-ids', '--format=json'] if reltype is not None: relid_cmd_line.append(reltype) - return json.loads(subprocess.check_output(relid_cmd_line)) or [] + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] return [] @@ -340,7 +353,8 @@ def related_units(relid=None): units_cmd_line = ['relation-list', '--format=json'] if relid is not None: units_cmd_line.extend(('-r', relid)) - return json.loads(subprocess.check_output(units_cmd_line)) or [] + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] @cached @@ -449,7 +463,7 @@ def unit_get(attribute): """Get the unit ID for the remote unit""" _args = ['unit-get', '--format=json', attribute] try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index d7ce1e4c..e6783d9b 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -6,19 +6,20 @@ # Matthew Wedgwood import os +import re import pwd import grp import random import string import subprocess import hashlib -import shutil from contextlib import contextmanager - from collections import OrderedDict -from hookenv import log -from fstab import Fstab +import six + +from .hookenv import log +from .fstab import Fstab def service_start(service_name): @@ -54,7 +55,9 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) + output = subprocess.check_output( + ['service', service, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -67,7 +70,9 @@ def service_running(service): def service_available(service_name): """Determine whether a system service is available""" try: - subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError as e: return 'unrecognized service' not in e.output else: @@ -115,7 +120,7 @@ def rsync(from_path, to_path, flags='-r', options=None): cmd.append(from_path) cmd.append(to_path) log(" ".join(cmd)) - return subprocess.check_output(cmd).strip() + return subprocess.check_output(cmd).decode('UTF-8').strip() def symlink(source, destination): @@ -130,7 +135,7 @@ def symlink(source, destination): subprocess.check_call(cmd) -def mkdir(path, owner='root', group='root', perms=0555, force=False): +def mkdir(path, owner='root', group='root', perms=0o555, force=False): """Create a directory""" log("Making dir {} {}:{} {:o}".format(path, owner, group, perms)) @@ -146,7 +151,7 @@ def mkdir(path, owner='root', group='root', perms=0555, force=False): os.chown(realpath, uid, gid) -def write_file(path, content, owner='root', group='root', perms=0444): +def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a string""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid @@ -177,7 +182,7 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): cmd_args.extend([device, mountpoint]) try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False @@ -191,7 +196,7 @@ def umount(mountpoint, persist=False): cmd_args = ['umount', mountpoint] try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False @@ -218,8 +223,8 @@ def file_hash(path, hash_type='md5'): """ if os.path.exists(path): h = getattr(hashlib, hash_type)() - with open(path, 'r') as source: - h.update(source.read()) # IGNORE:E1101 - it does have update + with open(path, 'rb') as source: + h.update(source.read()) return h.hexdigest() else: return None @@ -297,7 +302,7 @@ def pwgen(length=None): if length is None: length = random.choice(range(35, 45)) alphanumeric_chars = [ - l for l in (string.letters + string.digits) + l for l in (string.ascii_letters + string.digits) if l not in 'l0QD1vAEIOUaeiou'] random_chars = [ random.choice(alphanumeric_chars) for _ in range(length)] @@ -306,18 +311,24 @@ def pwgen(length=None): def list_nics(nic_type): '''Return a list of nics of given type(s)''' - if isinstance(nic_type, basestring): + if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type interfaces = [] for int_type in int_types: cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - interfaces.append(line.split()[1].replace(":", "")) + matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + if matched: + interface = matched.groups()[0] + else: + interface = line.split()[1].replace(":", "") + interfaces.append(interface) + return interfaces @@ -329,7 +340,7 @@ def set_nic_mtu(nic, mtu): def get_nic_mtu(nic): cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" for line in ip_output: words = line.split() @@ -340,7 +351,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd) + ip_output = subprocess.check_output(cmd).decode('UTF-8') hwaddr = "" words = ip_output.split() if 'link/ether' in words: diff --git a/ceph-osd/hooks/charmhelpers/core/services/__init__.py b/ceph-osd/hooks/charmhelpers/core/services/__init__.py index e8039a84..69dde79a 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/__init__.py +++ b/ceph-osd/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,2 @@ -from .base import * -from .helpers import * +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/ceph-osd/hooks/charmhelpers/core/services/helpers.py b/ceph-osd/hooks/charmhelpers/core/services/helpers.py index 7067b94b..163a7932 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-osd/hooks/charmhelpers/core/services/helpers.py @@ -196,7 +196,7 @@ def store_context(self, file_name, config_data): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0600) + os.fchmod(file_stream.fileno(), 0o600) yaml.dump(config_data, file_stream) def read_context(self, file_name): @@ -211,15 +211,19 @@ def read_context(self, file_name): class TemplateCallback(ManagerCallback): """ - Callback class that will render a Jinja2 template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` - :param str source: The template source file, relative to `$CHARM_DIR/templates` :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file """ - def __init__(self, source, target, owner='root', group='root', perms=0444): + def __init__(self, source, target, + owner='root', group='root', perms=0o444): self.source = source self.target = target self.owner = owner diff --git a/ceph-osd/hooks/charmhelpers/core/templating.py b/ceph-osd/hooks/charmhelpers/core/templating.py index 2c638853..83133fa4 100644 --- a/ceph-osd/hooks/charmhelpers/core/templating.py +++ b/ceph-osd/hooks/charmhelpers/core/templating.py @@ -4,7 +4,8 @@ from charmhelpers.core import hookenv -def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None): """ Render a template. diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 32a673d6..0a126fc3 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -5,10 +5,6 @@ from charmhelpers.core.host import ( lsb_release ) -from urlparse import ( - urlparse, - urlunparse, -) import subprocess from charmhelpers.core.hookenv import ( config, @@ -16,6 +12,12 @@ ) import os +import six +if six.PY3: + from urllib.parse import urlparse, urlunparse +else: + from urlparse import urlparse, urlunparse + CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main @@ -72,6 +74,7 @@ FETCH_HANDLERS = ( 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', ) APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. @@ -148,7 +151,7 @@ def apt_install(packages, options=None, fatal=False): cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -181,7 +184,7 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): """Purge one or more packages""" cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -192,7 +195,7 @@ def apt_purge(packages, fatal=False): def apt_hold(packages, fatal=False): """Hold one or more packages""" cmd = ['apt-mark', 'hold'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -218,6 +221,7 @@ def add_source(source, key=None): pocket for the release. 'cloud:' may be used to activate official cloud archive pockets, such as 'cloud:icehouse' + 'distro' may be used as a noop @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an @@ -251,12 +255,14 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass else: - raise SourceConfigError("Unknown source: {!r}".format(source)) + log("Unknown source: {!r}".format(source)) if key: if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile() as key_file: + with NamedTemporaryFile('w+') as key_file: key_file.write(key) key_file.flush() key_file.seek(0) @@ -293,14 +299,14 @@ def configure_sources(update=False, sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None - if isinstance(sources, basestring): + if isinstance(sources, six.string_types): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: - if isinstance(keys, basestring): + if isinstance(keys, six.string_types): keys = [keys] if len(sources) != len(keys): @@ -397,7 +403,7 @@ def _run_apt_command(cmd, fatal=False): while result is None or result == APT_NO_LOCK: try: result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > APT_NO_LOCK_RETRY_COUNT: raise diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index 8c045650..8a4624b2 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -1,8 +1,23 @@ import os -import urllib2 -from urllib import urlretrieve -import urlparse import hashlib +import re + +import six +if six.PY3: + from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ) + from urllib.parse import urlparse, urlunparse, parse_qs + from urllib.error import URLError +else: + from urllib import urlretrieve + from urllib2 import ( + build_opener, install_opener, urlopen, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + URLError + ) + from urlparse import urlparse, urlunparse, parse_qs from charmhelpers.fetch import ( BaseFetchHandler, @@ -15,6 +30,24 @@ from charmhelpers.core.host import mkdir, check_hash +def splituser(host): + '''urllib.splituser(), but six's support of this seems broken''' + _userprog = re.compile('^(.*)@(.*)$') + match = _userprog.match(host) + if match: + return match.group(1, 2) + return None, host + + +def splitpasswd(user): + '''urllib.splitpasswd(), but six's support of this is missing''' + _passwdprog = re.compile('^([^:]*):(.*)$', re.S) + match = _passwdprog.match(user) + if match: + return match.group(1, 2) + return user, None + + class ArchiveUrlFetchHandler(BaseFetchHandler): """ Handler to download archive files from arbitrary URLs. @@ -42,20 +75,20 @@ def download(self, source, dest): """ # propogate all exceptions # URLError, OSError, etc - proto, netloc, path, params, query, fragment = urlparse.urlparse(source) + proto, netloc, path, params, query, fragment = urlparse(source) if proto in ('http', 'https'): - auth, barehost = urllib2.splituser(netloc) + auth, barehost = splituser(netloc) if auth is not None: - source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) - username, password = urllib2.splitpasswd(auth) - passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + source = urlunparse((proto, barehost, path, params, query, fragment)) + username, password = splitpasswd(auth) + passman = HTTPPasswordMgrWithDefaultRealm() # Realm is set to None in add_password to force the username and password # to be used whatever the realm passman.add_password(None, source, username, password) - authhandler = urllib2.HTTPBasicAuthHandler(passman) - opener = urllib2.build_opener(authhandler) - urllib2.install_opener(opener) - response = urllib2.urlopen(source) + authhandler = HTTPBasicAuthHandler(passman) + opener = build_opener(authhandler) + install_opener(opener) + response = urlopen(source) try: with open(dest, 'w') as dest_file: dest_file.write(response.read()) @@ -91,17 +124,21 @@ def install(self, source, dest=None, checksum=None, hash_type='sha1'): url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) - except urllib2.URLError as e: + except URLError as e: raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - options = urlparse.parse_qs(url_parts.fragment) + options = parse_qs(url_parts.fragment) for key, value in options.items(): - if key in hashlib.algorithms: + if not six.PY3: + algorithms = hashlib.algorithms + else: + algorithms = hashlib.algorithms_available + if key in algorithms: check_hash(dld_file, value, key) if checksum: check_hash(dld_file, checksum, hash_type) diff --git a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py index 0e580e47..8ef48f30 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py @@ -5,6 +5,10 @@ ) from charmhelpers.core.host import mkdir +import six +if six.PY3: + raise ImportError('bzrlib does not support Python3') + try: from bzrlib.branch import Branch except ImportError: @@ -42,7 +46,7 @@ def install(self, source): dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) try: self.branch(source, dest_dir) except OSError as e: diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py index d859d367..3d3ef339 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py @@ -1,6 +1,6 @@ import amulet - import os +import six class AmuletDeployment(object): @@ -52,12 +52,12 @@ def _add_services(self, this_service, other_services): def _add_relations(self, relations): """Add all of the relations for the services.""" - for k, v in relations.iteritems(): + for k, v in six.iteritems(relations): self.d.relate(k, v) def _configure_services(self, configs): """Configure all of the services.""" - for service, config in configs.iteritems(): + for service, config in six.iteritems(configs): self.d.configure(service, config) def _deploy(self): diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index c843333f..d333e63b 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -5,6 +5,8 @@ import sys import time +import six + class AmuletUtils(object): """Amulet utilities. @@ -58,7 +60,7 @@ def validate_services(self, commands): Verify the specified services are running on the corresponding service units. """ - for k, v in commands.iteritems(): + for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) if code != 0: @@ -100,11 +102,11 @@ def _validate_dict_data(self, expected, actual): longs, or can be a function that evaluate a variable and returns a bool. """ - for k, v in expected.iteritems(): + for k, v in six.iteritems(expected): if k in actual: - if (isinstance(v, basestring) or + if (isinstance(v, six.string_types) or isinstance(v, bool) or - isinstance(v, (int, long))): + isinstance(v, six.integer_types)): if v != actual[k]: return "{}:{}".format(k, actual[k]) elif not v(actual[k]): diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 3c7f422a..f3fee074 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,4 @@ +import six from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -69,7 +70,7 @@ def _add_services(self, this_service, other_services): def _configure_services(self, configs): """Configure all of the services.""" - for service, config in configs.iteritems(): + for service, config in six.iteritems(configs): self.d.configure(service, config) def _get_openstack_release(self): diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index 0f312b99..3e0cc61c 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -7,6 +7,8 @@ import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import six + from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -60,7 +62,7 @@ def validate_svc_catalog_endpoint_data(self, expected, actual): expected service catalog endpoints. """ self.log.debug('actual: {}'.format(repr(actual))) - for k, v in expected.iteritems(): + for k, v in six.iteritems(expected): if k in actual: ret = self._validate_dict_data(expected[k][0], actual[k][0]) if ret: From 005c439fae3a04d17c99ee6d77b209a7d4c944f1 Mon Sep 17 00:00:00 2001 From: Jorge Niedbalski Date: Tue, 25 Nov 2014 14:04:02 -0300 Subject: [PATCH 0550/2699] [all] sync charmhelpers --- .../hooks/charmhelpers/contrib/network/ip.py | 112 +++++++++--------- .../contrib/storage/linux/utils.py | 5 +- ceph-proxy/hooks/charmhelpers/core/fstab.py | 18 +-- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 36 ++++-- ceph-proxy/hooks/charmhelpers/core/host.py | 49 +++++--- .../charmhelpers/core/services/__init__.py | 4 +- .../charmhelpers/core/services/helpers.py | 12 +- .../hooks/charmhelpers/core/templating.py | 3 +- .../hooks/charmhelpers/fetch/__init__.py | 30 +++-- .../hooks/charmhelpers/fetch/archiveurl.py | 69 ++++++++--- ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py | 6 +- ceph-proxy/hooks/charmhelpers/fetch/giturl.py | 48 ++++++++ .../charmhelpers/contrib/amulet/deployment.py | 6 +- .../charmhelpers/contrib/amulet/utils.py | 10 +- .../contrib/openstack/amulet/deployment.py | 3 +- .../contrib/openstack/amulet/utils.py | 4 +- 16 files changed, 277 insertions(+), 138 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/fetch/giturl.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 17df06fc..8dc83165 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -1,15 +1,12 @@ import glob import re import subprocess -import sys from functools import partial from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - WARNING, - ERROR, log ) @@ -34,31 +31,28 @@ def _validate_cidr(network): network) +def no_ip_found_error_out(network): + errmsg = ("No IP address found in network: %s" % network) + raise ValueError(errmsg) + + def get_address_in_network(network, fallback=None, fatal=False): - """ - Get an IPv4 or IPv6 address within the network from the host. + """Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, '192.168.1.0/24'. :param fallback (str): If no address is found, return fallback. :param fatal (boolean): If no address is found, fallback is not set and fatal is True then exit(1). - """ - - def not_found_error_out(): - log("No IP address found in network: %s" % network, - level=ERROR) - sys.exit(1) - if network is None: if fallback is not None: return fallback + + if fatal: + no_ip_found_error_out(network) else: - if fatal: - not_found_error_out() - else: - return None + return None _validate_cidr(network) network = netaddr.IPNetwork(network) @@ -70,6 +64,7 @@ def not_found_error_out(): cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) if cidr in network: return str(cidr.ip) + if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): @@ -82,20 +77,20 @@ def not_found_error_out(): return fallback if fatal: - not_found_error_out() + no_ip_found_error_out(network) return None def is_ipv6(address): - '''Determine whether provided address is IPv6 or not''' + """Determine whether provided address is IPv6 or not.""" try: address = netaddr.IPAddress(address) except netaddr.AddrFormatError: # probably a hostname - so not an address at all! return False - else: - return address.version == 6 + + return address.version == 6 def is_address_in_network(network, address): @@ -113,11 +108,13 @@ def is_address_in_network(network, address): except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Network (%s) is not in CIDR presentation format" % network) + try: address = netaddr.IPAddress(address) except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Address (%s) is not in correct presentation format" % address) + if address in network: return True else: @@ -140,57 +137,63 @@ def _get_for_address(address, key): if address.version == 4 and netifaces.AF_INET in addresses: addr = addresses[netifaces.AF_INET][0]['addr'] netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + cidr = network.cidr if address in cidr: if key == 'iface': return iface else: return addresses[netifaces.AF_INET][0][key] + if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) + network = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + cidr = network.cidr if address in cidr: if key == 'iface': return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] else: return addr[key] + return None get_iface_for_address = partial(_get_for_address, key='iface') + get_netmask_for_address = partial(_get_for_address, key='netmask') def format_ipv6_addr(address): - """ - IPv6 needs to be wrapped with [] in url link to parse correctly. + """If address is IPv6, wrap it in '[]' otherwise return None. + + This is required by most configuration files when specifying IPv6 + addresses. """ if is_ipv6(address): - address = "[%s]" % address - else: - log("Not a valid ipv6 address: %s" % address, level=WARNING) - address = None + return "[%s]" % address - return address + return None def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): - """ - Return the assigned IP address for a given interface, if any, or []. - """ + """Return the assigned IP address for a given interface, if any.""" # Extract nic if passed /dev/ethX if '/' in iface: iface = iface.split('/')[-1] + if not exc_list: exc_list = [] + try: inet_num = getattr(netifaces, inet_type) except AttributeError: - raise Exception('Unknown inet type ' + str(inet_type)) + raise Exception("Unknown inet type '%s'" % str(inet_type)) interfaces = netifaces.interfaces() if inc_aliases: @@ -198,15 +201,18 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, for _iface in interfaces: if iface == _iface or _iface.split(':')[0] == iface: ifaces.append(_iface) + if fatal and not ifaces: raise Exception("Invalid interface '%s'" % iface) + ifaces.sort() else: if iface not in interfaces: if fatal: - raise Exception("%s not found " % (iface)) + raise Exception("Interface '%s' not found " % (iface)) else: return [] + else: ifaces = [iface] @@ -217,10 +223,13 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, for entry in net_info[inet_num]: if 'addr' in entry and entry['addr'] not in exc_list: addresses.append(entry['addr']) + if fatal and not addresses: raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) - return addresses + + return sorted(addresses) + get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') @@ -237,6 +246,7 @@ def get_iface_from_addr(addr): raw = re.match(ll_key, _addr) if raw: _addr = raw.group(1) + if _addr == addr: log("Address '%s' is configured on iface '%s'" % (addr, iface)) @@ -247,8 +257,9 @@ def get_iface_from_addr(addr): def sniff_iface(f): - """If no iface provided, inject net iface inferred from unit private - address. + """Ensure decorated function is called with a value for iface. + + If no iface provided, inject net iface inferred from unit private address. """ def iface_sniffer(*args, **kwargs): if not kwargs.get('iface', None): @@ -291,7 +302,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, if global_addrs: # Make sure any found global addresses are not temporary cmd = ['ip', 'addr', 'show', iface] - out = subprocess.check_output(cmd) + out = subprocess.check_output(cmd).decode('UTF-8') if dynamic_only: key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") else: @@ -313,33 +324,28 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, return addrs if fatal: - raise Exception("Interface '%s' doesn't have a scope global " + raise Exception("Interface '%s' does not have a scope global " "non-temporary ipv6 address." % iface) return [] def get_bridges(vnic_dir='/sys/devices/virtual/net'): - """ - Return a list of bridges on the system or [] - """ - b_rgex = vnic_dir + '/*/bridge' - return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] + """Return a list of bridges on the system.""" + b_regex = "%s/*/bridge" % vnic_dir + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): - """ - Return a list of nics comprising a given bridge on the system or [] - """ - brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) - return [x.split('/')[-1] for x in glob.glob(brif_rgex)] + """Return a list of nics comprising a given bridge on the system.""" + brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_regex)] def is_bridge_member(nic): - """ - Check if a given nic is a member of a bridge - """ + """Check if a given nic is a member of a bridge.""" for bridge in get_bridges(): if nic in get_bridge_nics(bridge): return True + return False diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index 1b958712..c6a15e14 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -30,7 +30,8 @@ def zap_disk(block_device): # sometimes sgdisk exits non-zero; this is OK, dd will clean up call(['sgdisk', '--zap-all', '--mbrtogpt', '--clear', block_device]) - dev_end = check_output(['blockdev', '--getsz', block_device]) + dev_end = check_output(['blockdev', '--getsz', + block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=1M', 'count=1']) @@ -47,7 +48,7 @@ def is_device_mounted(device): it doesn't. ''' is_partition = bool(re.search(r".*[0-9]+\b", device)) - out = check_output(['mount']) + out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/ceph-proxy/hooks/charmhelpers/core/fstab.py b/ceph-proxy/hooks/charmhelpers/core/fstab.py index cfaf0a65..0adf0db3 100644 --- a/ceph-proxy/hooks/charmhelpers/core/fstab.py +++ b/ceph-proxy/hooks/charmhelpers/core/fstab.py @@ -3,10 +3,11 @@ __author__ = 'Jorge Niedbalski R. ' +import io import os -class Fstab(file): +class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer for file `/etc/fstab` """ @@ -24,8 +25,8 @@ def __init__(self, device, mountpoint, filesystem, options = "defaults" self.options = options - self.d = d - self.p = p + self.d = int(d) + self.p = int(p) def __eq__(self, o): return str(self) == str(o) @@ -45,7 +46,7 @@ def __init__(self, path=None): self._path = path else: self._path = self.DEFAULT_PATH - file.__init__(self, self._path, 'r+') + super(Fstab, self).__init__(self._path, 'rb+') def _hydrate_entry(self, line): # NOTE: use split with no arguments to split on any @@ -58,8 +59,9 @@ def _hydrate_entry(self, line): def entries(self): self.seek(0) for line in self.readlines(): + line = line.decode('us-ascii') try: - if not line.startswith("#"): + if line.strip() and not line.startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -75,14 +77,14 @@ def add_entry(self, entry): if self.get_entry_by_attr('device', entry.device): return False - self.write(str(entry) + '\n') + self.write((str(entry) + '\n').encode('us-ascii')) self.truncate() return entry def remove_entry(self, entry): self.seek(0) - lines = self.readlines() + lines = [l.decode('us-ascii') for l in self.readlines()] found = False for index, line in enumerate(lines): @@ -97,7 +99,7 @@ def remove_entry(self, entry): lines.remove(line) self.seek(0) - self.write(''.join(lines)) + self.write(''.join(lines).encode('us-ascii')) self.truncate() return True diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index af8fe2db..99e5d208 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -9,9 +9,14 @@ import yaml import subprocess import sys -import UserDict from subprocess import CalledProcessError +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -67,12 +72,12 @@ def log(message, level=None): subprocess.call(command) -class Serializable(UserDict.IterableUserDict): +class Serializable(UserDict): """Wrapper, an object that can be serialized to yaml or json""" def __init__(self, obj): # wrap the object - UserDict.IterableUserDict.__init__(self) + UserDict.__init__(self) self.data = obj def __getattr__(self, attr): @@ -214,6 +219,12 @@ def __getitem__(self, key): except KeyError: return (self._prev_dict or {})[key] + def keys(self): + prev_keys = [] + if self._prev_dict is not None: + prev_keys = self._prev_dict.keys() + return list(set(prev_keys + list(dict.keys(self)))) + def load_previous(self, path=None): """Load previous copy of config from disk. @@ -263,7 +274,7 @@ def save(self): """ if self._prev_dict: - for k, v in self._prev_dict.iteritems(): + for k, v in six.iteritems(self._prev_dict): if k not in self: self[k] = v with open(self.path, 'w') as f: @@ -278,7 +289,8 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - config_data = json.loads(subprocess.check_output(config_cmd_line)) + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) if scope is not None: return config_data return Config(config_data) @@ -297,10 +309,10 @@ def relation_get(attribute=None, unit=None, rid=None): if unit: _args.append(unit) try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None - except CalledProcessError, e: + except CalledProcessError as e: if e.returncode == 2: return None raise @@ -312,7 +324,7 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs): relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in (relation_settings.items() + kwargs.items()): + for k, v in (list(relation_settings.items()) + list(kwargs.items())): if v is None: relation_cmd_line.append('{}='.format(k)) else: @@ -329,7 +341,8 @@ def relation_ids(reltype=None): relid_cmd_line = ['relation-ids', '--format=json'] if reltype is not None: relid_cmd_line.append(reltype) - return json.loads(subprocess.check_output(relid_cmd_line)) or [] + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] return [] @@ -340,7 +353,8 @@ def related_units(relid=None): units_cmd_line = ['relation-list', '--format=json'] if relid is not None: units_cmd_line.extend(('-r', relid)) - return json.loads(subprocess.check_output(units_cmd_line)) or [] + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] @cached @@ -449,7 +463,7 @@ def unit_get(attribute): """Get the unit ID for the remote unit""" _args = ['unit-get', '--format=json', attribute] try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index d7ce1e4c..e6783d9b 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -6,19 +6,20 @@ # Matthew Wedgwood import os +import re import pwd import grp import random import string import subprocess import hashlib -import shutil from contextlib import contextmanager - from collections import OrderedDict -from hookenv import log -from fstab import Fstab +import six + +from .hookenv import log +from .fstab import Fstab def service_start(service_name): @@ -54,7 +55,9 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) + output = subprocess.check_output( + ['service', service, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -67,7 +70,9 @@ def service_running(service): def service_available(service_name): """Determine whether a system service is available""" try: - subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError as e: return 'unrecognized service' not in e.output else: @@ -115,7 +120,7 @@ def rsync(from_path, to_path, flags='-r', options=None): cmd.append(from_path) cmd.append(to_path) log(" ".join(cmd)) - return subprocess.check_output(cmd).strip() + return subprocess.check_output(cmd).decode('UTF-8').strip() def symlink(source, destination): @@ -130,7 +135,7 @@ def symlink(source, destination): subprocess.check_call(cmd) -def mkdir(path, owner='root', group='root', perms=0555, force=False): +def mkdir(path, owner='root', group='root', perms=0o555, force=False): """Create a directory""" log("Making dir {} {}:{} {:o}".format(path, owner, group, perms)) @@ -146,7 +151,7 @@ def mkdir(path, owner='root', group='root', perms=0555, force=False): os.chown(realpath, uid, gid) -def write_file(path, content, owner='root', group='root', perms=0444): +def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a string""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid @@ -177,7 +182,7 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): cmd_args.extend([device, mountpoint]) try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False @@ -191,7 +196,7 @@ def umount(mountpoint, persist=False): cmd_args = ['umount', mountpoint] try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False @@ -218,8 +223,8 @@ def file_hash(path, hash_type='md5'): """ if os.path.exists(path): h = getattr(hashlib, hash_type)() - with open(path, 'r') as source: - h.update(source.read()) # IGNORE:E1101 - it does have update + with open(path, 'rb') as source: + h.update(source.read()) return h.hexdigest() else: return None @@ -297,7 +302,7 @@ def pwgen(length=None): if length is None: length = random.choice(range(35, 45)) alphanumeric_chars = [ - l for l in (string.letters + string.digits) + l for l in (string.ascii_letters + string.digits) if l not in 'l0QD1vAEIOUaeiou'] random_chars = [ random.choice(alphanumeric_chars) for _ in range(length)] @@ -306,18 +311,24 @@ def pwgen(length=None): def list_nics(nic_type): '''Return a list of nics of given type(s)''' - if isinstance(nic_type, basestring): + if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type interfaces = [] for int_type in int_types: cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - interfaces.append(line.split()[1].replace(":", "")) + matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + if matched: + interface = matched.groups()[0] + else: + interface = line.split()[1].replace(":", "") + interfaces.append(interface) + return interfaces @@ -329,7 +340,7 @@ def set_nic_mtu(nic, mtu): def get_nic_mtu(nic): cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" for line in ip_output: words = line.split() @@ -340,7 +351,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd) + ip_output = subprocess.check_output(cmd).decode('UTF-8') hwaddr = "" words = ip_output.split() if 'link/ether' in words: diff --git a/ceph-proxy/hooks/charmhelpers/core/services/__init__.py b/ceph-proxy/hooks/charmhelpers/core/services/__init__.py index e8039a84..69dde79a 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,2 @@ -from .base import * -from .helpers import * +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py index 7067b94b..163a7932 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py @@ -196,7 +196,7 @@ def store_context(self, file_name, config_data): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0600) + os.fchmod(file_stream.fileno(), 0o600) yaml.dump(config_data, file_stream) def read_context(self, file_name): @@ -211,15 +211,19 @@ def read_context(self, file_name): class TemplateCallback(ManagerCallback): """ - Callback class that will render a Jinja2 template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` - :param str source: The template source file, relative to `$CHARM_DIR/templates` :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file """ - def __init__(self, source, target, owner='root', group='root', perms=0444): + def __init__(self, source, target, + owner='root', group='root', perms=0o444): self.source = source self.target = target self.owner = owner diff --git a/ceph-proxy/hooks/charmhelpers/core/templating.py b/ceph-proxy/hooks/charmhelpers/core/templating.py index 2c638853..83133fa4 100644 --- a/ceph-proxy/hooks/charmhelpers/core/templating.py +++ b/ceph-proxy/hooks/charmhelpers/core/templating.py @@ -4,7 +4,8 @@ from charmhelpers.core import hookenv -def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None): """ Render a template. diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 32a673d6..0a126fc3 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -5,10 +5,6 @@ from charmhelpers.core.host import ( lsb_release ) -from urlparse import ( - urlparse, - urlunparse, -) import subprocess from charmhelpers.core.hookenv import ( config, @@ -16,6 +12,12 @@ ) import os +import six +if six.PY3: + from urllib.parse import urlparse, urlunparse +else: + from urlparse import urlparse, urlunparse + CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main @@ -72,6 +74,7 @@ FETCH_HANDLERS = ( 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', ) APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. @@ -148,7 +151,7 @@ def apt_install(packages, options=None, fatal=False): cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -181,7 +184,7 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): """Purge one or more packages""" cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -192,7 +195,7 @@ def apt_purge(packages, fatal=False): def apt_hold(packages, fatal=False): """Hold one or more packages""" cmd = ['apt-mark', 'hold'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -218,6 +221,7 @@ def add_source(source, key=None): pocket for the release. 'cloud:' may be used to activate official cloud archive pockets, such as 'cloud:icehouse' + 'distro' may be used as a noop @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an @@ -251,12 +255,14 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass else: - raise SourceConfigError("Unknown source: {!r}".format(source)) + log("Unknown source: {!r}".format(source)) if key: if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile() as key_file: + with NamedTemporaryFile('w+') as key_file: key_file.write(key) key_file.flush() key_file.seek(0) @@ -293,14 +299,14 @@ def configure_sources(update=False, sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None - if isinstance(sources, basestring): + if isinstance(sources, six.string_types): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: - if isinstance(keys, basestring): + if isinstance(keys, six.string_types): keys = [keys] if len(sources) != len(keys): @@ -397,7 +403,7 @@ def _run_apt_command(cmd, fatal=False): while result is None or result == APT_NO_LOCK: try: result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > APT_NO_LOCK_RETRY_COUNT: raise diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py index 8c045650..8a4624b2 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -1,8 +1,23 @@ import os -import urllib2 -from urllib import urlretrieve -import urlparse import hashlib +import re + +import six +if six.PY3: + from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ) + from urllib.parse import urlparse, urlunparse, parse_qs + from urllib.error import URLError +else: + from urllib import urlretrieve + from urllib2 import ( + build_opener, install_opener, urlopen, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + URLError + ) + from urlparse import urlparse, urlunparse, parse_qs from charmhelpers.fetch import ( BaseFetchHandler, @@ -15,6 +30,24 @@ from charmhelpers.core.host import mkdir, check_hash +def splituser(host): + '''urllib.splituser(), but six's support of this seems broken''' + _userprog = re.compile('^(.*)@(.*)$') + match = _userprog.match(host) + if match: + return match.group(1, 2) + return None, host + + +def splitpasswd(user): + '''urllib.splitpasswd(), but six's support of this is missing''' + _passwdprog = re.compile('^([^:]*):(.*)$', re.S) + match = _passwdprog.match(user) + if match: + return match.group(1, 2) + return user, None + + class ArchiveUrlFetchHandler(BaseFetchHandler): """ Handler to download archive files from arbitrary URLs. @@ -42,20 +75,20 @@ def download(self, source, dest): """ # propogate all exceptions # URLError, OSError, etc - proto, netloc, path, params, query, fragment = urlparse.urlparse(source) + proto, netloc, path, params, query, fragment = urlparse(source) if proto in ('http', 'https'): - auth, barehost = urllib2.splituser(netloc) + auth, barehost = splituser(netloc) if auth is not None: - source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) - username, password = urllib2.splitpasswd(auth) - passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + source = urlunparse((proto, barehost, path, params, query, fragment)) + username, password = splitpasswd(auth) + passman = HTTPPasswordMgrWithDefaultRealm() # Realm is set to None in add_password to force the username and password # to be used whatever the realm passman.add_password(None, source, username, password) - authhandler = urllib2.HTTPBasicAuthHandler(passman) - opener = urllib2.build_opener(authhandler) - urllib2.install_opener(opener) - response = urllib2.urlopen(source) + authhandler = HTTPBasicAuthHandler(passman) + opener = build_opener(authhandler) + install_opener(opener) + response = urlopen(source) try: with open(dest, 'w') as dest_file: dest_file.write(response.read()) @@ -91,17 +124,21 @@ def install(self, source, dest=None, checksum=None, hash_type='sha1'): url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) - except urllib2.URLError as e: + except URLError as e: raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - options = urlparse.parse_qs(url_parts.fragment) + options = parse_qs(url_parts.fragment) for key, value in options.items(): - if key in hashlib.algorithms: + if not six.PY3: + algorithms = hashlib.algorithms + else: + algorithms = hashlib.algorithms_available + if key in algorithms: check_hash(dld_file, value, key) if checksum: check_hash(dld_file, checksum, hash_type) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py index 0e580e47..8ef48f30 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py @@ -5,6 +5,10 @@ ) from charmhelpers.core.host import mkdir +import six +if six.PY3: + raise ImportError('bzrlib does not support Python3') + try: from bzrlib.branch import Branch except ImportError: @@ -42,7 +46,7 @@ def install(self, source): dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) try: self.branch(source, dest_dir) except OSError as e: diff --git a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py new file mode 100644 index 00000000..61684cb6 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py @@ -0,0 +1,48 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +import six +if six.PY3: + raise ImportError('GitPython does not support Python 3') + +try: + from git import Repo +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-git") + from git import Repo + + +class GitUrlFetchHandler(BaseFetchHandler): + """Handler for git branches via generic and github URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + # TODO (mattyw) no support for ssh git@ yet + if url_parts.scheme not in ('http', 'https', 'git'): + return False + else: + return True + + def clone(self, source, dest, branch): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + + repo = Repo.clone_from(source, dest) + repo.git.checkout(branch) + + def install(self, source, branch="master"): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0o755) + try: + self.clone(source, dest_dir, branch) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py index d859d367..3d3ef339 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py @@ -1,6 +1,6 @@ import amulet - import os +import six class AmuletDeployment(object): @@ -52,12 +52,12 @@ def _add_services(self, this_service, other_services): def _add_relations(self, relations): """Add all of the relations for the services.""" - for k, v in relations.iteritems(): + for k, v in six.iteritems(relations): self.d.relate(k, v) def _configure_services(self, configs): """Configure all of the services.""" - for service, config in configs.iteritems(): + for service, config in six.iteritems(configs): self.d.configure(service, config) def _deploy(self): diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index c843333f..d333e63b 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -5,6 +5,8 @@ import sys import time +import six + class AmuletUtils(object): """Amulet utilities. @@ -58,7 +60,7 @@ def validate_services(self, commands): Verify the specified services are running on the corresponding service units. """ - for k, v in commands.iteritems(): + for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) if code != 0: @@ -100,11 +102,11 @@ def _validate_dict_data(self, expected, actual): longs, or can be a function that evaluate a variable and returns a bool. """ - for k, v in expected.iteritems(): + for k, v in six.iteritems(expected): if k in actual: - if (isinstance(v, basestring) or + if (isinstance(v, six.string_types) or isinstance(v, bool) or - isinstance(v, (int, long))): + isinstance(v, six.integer_types)): if v != actual[k]: return "{}:{}".format(k, actual[k]) elif not v(actual[k]): diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 3c7f422a..f3fee074 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,4 @@ +import six from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -69,7 +70,7 @@ def _add_services(self, this_service, other_services): def _configure_services(self, configs): """Configure all of the services.""" - for service, config in configs.iteritems(): + for service, config in six.iteritems(configs): self.d.configure(service, config) def _get_openstack_release(self): diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index 0f312b99..3e0cc61c 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -7,6 +7,8 @@ import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import six + from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -60,7 +62,7 @@ def validate_svc_catalog_endpoint_data(self, expected, actual): expected service catalog endpoints. """ self.log.debug('actual: {}'.format(repr(actual))) - for k, v in expected.iteritems(): + for k, v in six.iteritems(expected): if k in actual: ret = self._validate_dict_data(expected[k][0], actual[k][0]) if ret: From 1f4c7d7848024f2a51d4a5d1a1b7fa0dd7c305e2 Mon Sep 17 00:00:00 2001 From: Jorge Niedbalski Date: Tue, 25 Nov 2014 14:04:02 -0300 Subject: [PATCH 0551/2699] [all] sync charmhelpers --- .../hooks/charmhelpers/contrib/network/ip.py | 112 +++++++++--------- .../contrib/storage/linux/utils.py | 5 +- ceph-mon/hooks/charmhelpers/core/fstab.py | 18 +-- ceph-mon/hooks/charmhelpers/core/hookenv.py | 36 ++++-- ceph-mon/hooks/charmhelpers/core/host.py | 49 +++++--- .../charmhelpers/core/services/__init__.py | 4 +- .../charmhelpers/core/services/helpers.py | 12 +- .../hooks/charmhelpers/core/templating.py | 3 +- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 30 +++-- .../hooks/charmhelpers/fetch/archiveurl.py | 69 ++++++++--- ceph-mon/hooks/charmhelpers/fetch/bzrurl.py | 6 +- ceph-mon/hooks/charmhelpers/fetch/giturl.py | 48 ++++++++ .../charmhelpers/contrib/amulet/deployment.py | 6 +- .../charmhelpers/contrib/amulet/utils.py | 10 +- .../contrib/openstack/amulet/deployment.py | 3 +- .../contrib/openstack/amulet/utils.py | 4 +- 16 files changed, 277 insertions(+), 138 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/fetch/giturl.py diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 17df06fc..8dc83165 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -1,15 +1,12 @@ import glob import re import subprocess -import sys from functools import partial from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - WARNING, - ERROR, log ) @@ -34,31 +31,28 @@ def _validate_cidr(network): network) +def no_ip_found_error_out(network): + errmsg = ("No IP address found in network: %s" % network) + raise ValueError(errmsg) + + def get_address_in_network(network, fallback=None, fatal=False): - """ - Get an IPv4 or IPv6 address within the network from the host. + """Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, '192.168.1.0/24'. :param fallback (str): If no address is found, return fallback. :param fatal (boolean): If no address is found, fallback is not set and fatal is True then exit(1). - """ - - def not_found_error_out(): - log("No IP address found in network: %s" % network, - level=ERROR) - sys.exit(1) - if network is None: if fallback is not None: return fallback + + if fatal: + no_ip_found_error_out(network) else: - if fatal: - not_found_error_out() - else: - return None + return None _validate_cidr(network) network = netaddr.IPNetwork(network) @@ -70,6 +64,7 @@ def not_found_error_out(): cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) if cidr in network: return str(cidr.ip) + if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): @@ -82,20 +77,20 @@ def not_found_error_out(): return fallback if fatal: - not_found_error_out() + no_ip_found_error_out(network) return None def is_ipv6(address): - '''Determine whether provided address is IPv6 or not''' + """Determine whether provided address is IPv6 or not.""" try: address = netaddr.IPAddress(address) except netaddr.AddrFormatError: # probably a hostname - so not an address at all! return False - else: - return address.version == 6 + + return address.version == 6 def is_address_in_network(network, address): @@ -113,11 +108,13 @@ def is_address_in_network(network, address): except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Network (%s) is not in CIDR presentation format" % network) + try: address = netaddr.IPAddress(address) except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Address (%s) is not in correct presentation format" % address) + if address in network: return True else: @@ -140,57 +137,63 @@ def _get_for_address(address, key): if address.version == 4 and netifaces.AF_INET in addresses: addr = addresses[netifaces.AF_INET][0]['addr'] netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + cidr = network.cidr if address in cidr: if key == 'iface': return iface else: return addresses[netifaces.AF_INET][0][key] + if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) + network = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + cidr = network.cidr if address in cidr: if key == 'iface': return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] else: return addr[key] + return None get_iface_for_address = partial(_get_for_address, key='iface') + get_netmask_for_address = partial(_get_for_address, key='netmask') def format_ipv6_addr(address): - """ - IPv6 needs to be wrapped with [] in url link to parse correctly. + """If address is IPv6, wrap it in '[]' otherwise return None. + + This is required by most configuration files when specifying IPv6 + addresses. """ if is_ipv6(address): - address = "[%s]" % address - else: - log("Not a valid ipv6 address: %s" % address, level=WARNING) - address = None + return "[%s]" % address - return address + return None def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): - """ - Return the assigned IP address for a given interface, if any, or []. - """ + """Return the assigned IP address for a given interface, if any.""" # Extract nic if passed /dev/ethX if '/' in iface: iface = iface.split('/')[-1] + if not exc_list: exc_list = [] + try: inet_num = getattr(netifaces, inet_type) except AttributeError: - raise Exception('Unknown inet type ' + str(inet_type)) + raise Exception("Unknown inet type '%s'" % str(inet_type)) interfaces = netifaces.interfaces() if inc_aliases: @@ -198,15 +201,18 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, for _iface in interfaces: if iface == _iface or _iface.split(':')[0] == iface: ifaces.append(_iface) + if fatal and not ifaces: raise Exception("Invalid interface '%s'" % iface) + ifaces.sort() else: if iface not in interfaces: if fatal: - raise Exception("%s not found " % (iface)) + raise Exception("Interface '%s' not found " % (iface)) else: return [] + else: ifaces = [iface] @@ -217,10 +223,13 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, for entry in net_info[inet_num]: if 'addr' in entry and entry['addr'] not in exc_list: addresses.append(entry['addr']) + if fatal and not addresses: raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) - return addresses + + return sorted(addresses) + get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') @@ -237,6 +246,7 @@ def get_iface_from_addr(addr): raw = re.match(ll_key, _addr) if raw: _addr = raw.group(1) + if _addr == addr: log("Address '%s' is configured on iface '%s'" % (addr, iface)) @@ -247,8 +257,9 @@ def get_iface_from_addr(addr): def sniff_iface(f): - """If no iface provided, inject net iface inferred from unit private - address. + """Ensure decorated function is called with a value for iface. + + If no iface provided, inject net iface inferred from unit private address. """ def iface_sniffer(*args, **kwargs): if not kwargs.get('iface', None): @@ -291,7 +302,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, if global_addrs: # Make sure any found global addresses are not temporary cmd = ['ip', 'addr', 'show', iface] - out = subprocess.check_output(cmd) + out = subprocess.check_output(cmd).decode('UTF-8') if dynamic_only: key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") else: @@ -313,33 +324,28 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, return addrs if fatal: - raise Exception("Interface '%s' doesn't have a scope global " + raise Exception("Interface '%s' does not have a scope global " "non-temporary ipv6 address." % iface) return [] def get_bridges(vnic_dir='/sys/devices/virtual/net'): - """ - Return a list of bridges on the system or [] - """ - b_rgex = vnic_dir + '/*/bridge' - return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] + """Return a list of bridges on the system.""" + b_regex = "%s/*/bridge" % vnic_dir + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): - """ - Return a list of nics comprising a given bridge on the system or [] - """ - brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) - return [x.split('/')[-1] for x in glob.glob(brif_rgex)] + """Return a list of nics comprising a given bridge on the system.""" + brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_regex)] def is_bridge_member(nic): - """ - Check if a given nic is a member of a bridge - """ + """Check if a given nic is a member of a bridge.""" for bridge in get_bridges(): if nic in get_bridge_nics(bridge): return True + return False diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index 1b958712..c6a15e14 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -30,7 +30,8 @@ def zap_disk(block_device): # sometimes sgdisk exits non-zero; this is OK, dd will clean up call(['sgdisk', '--zap-all', '--mbrtogpt', '--clear', block_device]) - dev_end = check_output(['blockdev', '--getsz', block_device]) + dev_end = check_output(['blockdev', '--getsz', + block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=1M', 'count=1']) @@ -47,7 +48,7 @@ def is_device_mounted(device): it doesn't. ''' is_partition = bool(re.search(r".*[0-9]+\b", device)) - out = check_output(['mount']) + out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/ceph-mon/hooks/charmhelpers/core/fstab.py b/ceph-mon/hooks/charmhelpers/core/fstab.py index cfaf0a65..0adf0db3 100644 --- a/ceph-mon/hooks/charmhelpers/core/fstab.py +++ b/ceph-mon/hooks/charmhelpers/core/fstab.py @@ -3,10 +3,11 @@ __author__ = 'Jorge Niedbalski R. ' +import io import os -class Fstab(file): +class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer for file `/etc/fstab` """ @@ -24,8 +25,8 @@ def __init__(self, device, mountpoint, filesystem, options = "defaults" self.options = options - self.d = d - self.p = p + self.d = int(d) + self.p = int(p) def __eq__(self, o): return str(self) == str(o) @@ -45,7 +46,7 @@ def __init__(self, path=None): self._path = path else: self._path = self.DEFAULT_PATH - file.__init__(self, self._path, 'r+') + super(Fstab, self).__init__(self._path, 'rb+') def _hydrate_entry(self, line): # NOTE: use split with no arguments to split on any @@ -58,8 +59,9 @@ def _hydrate_entry(self, line): def entries(self): self.seek(0) for line in self.readlines(): + line = line.decode('us-ascii') try: - if not line.startswith("#"): + if line.strip() and not line.startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -75,14 +77,14 @@ def add_entry(self, entry): if self.get_entry_by_attr('device', entry.device): return False - self.write(str(entry) + '\n') + self.write((str(entry) + '\n').encode('us-ascii')) self.truncate() return entry def remove_entry(self, entry): self.seek(0) - lines = self.readlines() + lines = [l.decode('us-ascii') for l in self.readlines()] found = False for index, line in enumerate(lines): @@ -97,7 +99,7 @@ def remove_entry(self, entry): lines.remove(line) self.seek(0) - self.write(''.join(lines)) + self.write(''.join(lines).encode('us-ascii')) self.truncate() return True diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index af8fe2db..99e5d208 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -9,9 +9,14 @@ import yaml import subprocess import sys -import UserDict from subprocess import CalledProcessError +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -67,12 +72,12 @@ def log(message, level=None): subprocess.call(command) -class Serializable(UserDict.IterableUserDict): +class Serializable(UserDict): """Wrapper, an object that can be serialized to yaml or json""" def __init__(self, obj): # wrap the object - UserDict.IterableUserDict.__init__(self) + UserDict.__init__(self) self.data = obj def __getattr__(self, attr): @@ -214,6 +219,12 @@ def __getitem__(self, key): except KeyError: return (self._prev_dict or {})[key] + def keys(self): + prev_keys = [] + if self._prev_dict is not None: + prev_keys = self._prev_dict.keys() + return list(set(prev_keys + list(dict.keys(self)))) + def load_previous(self, path=None): """Load previous copy of config from disk. @@ -263,7 +274,7 @@ def save(self): """ if self._prev_dict: - for k, v in self._prev_dict.iteritems(): + for k, v in six.iteritems(self._prev_dict): if k not in self: self[k] = v with open(self.path, 'w') as f: @@ -278,7 +289,8 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - config_data = json.loads(subprocess.check_output(config_cmd_line)) + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) if scope is not None: return config_data return Config(config_data) @@ -297,10 +309,10 @@ def relation_get(attribute=None, unit=None, rid=None): if unit: _args.append(unit) try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None - except CalledProcessError, e: + except CalledProcessError as e: if e.returncode == 2: return None raise @@ -312,7 +324,7 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs): relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in (relation_settings.items() + kwargs.items()): + for k, v in (list(relation_settings.items()) + list(kwargs.items())): if v is None: relation_cmd_line.append('{}='.format(k)) else: @@ -329,7 +341,8 @@ def relation_ids(reltype=None): relid_cmd_line = ['relation-ids', '--format=json'] if reltype is not None: relid_cmd_line.append(reltype) - return json.loads(subprocess.check_output(relid_cmd_line)) or [] + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] return [] @@ -340,7 +353,8 @@ def related_units(relid=None): units_cmd_line = ['relation-list', '--format=json'] if relid is not None: units_cmd_line.extend(('-r', relid)) - return json.loads(subprocess.check_output(units_cmd_line)) or [] + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] @cached @@ -449,7 +463,7 @@ def unit_get(attribute): """Get the unit ID for the remote unit""" _args = ['unit-get', '--format=json', attribute] try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index d7ce1e4c..e6783d9b 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -6,19 +6,20 @@ # Matthew Wedgwood import os +import re import pwd import grp import random import string import subprocess import hashlib -import shutil from contextlib import contextmanager - from collections import OrderedDict -from hookenv import log -from fstab import Fstab +import six + +from .hookenv import log +from .fstab import Fstab def service_start(service_name): @@ -54,7 +55,9 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) + output = subprocess.check_output( + ['service', service, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -67,7 +70,9 @@ def service_running(service): def service_available(service_name): """Determine whether a system service is available""" try: - subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError as e: return 'unrecognized service' not in e.output else: @@ -115,7 +120,7 @@ def rsync(from_path, to_path, flags='-r', options=None): cmd.append(from_path) cmd.append(to_path) log(" ".join(cmd)) - return subprocess.check_output(cmd).strip() + return subprocess.check_output(cmd).decode('UTF-8').strip() def symlink(source, destination): @@ -130,7 +135,7 @@ def symlink(source, destination): subprocess.check_call(cmd) -def mkdir(path, owner='root', group='root', perms=0555, force=False): +def mkdir(path, owner='root', group='root', perms=0o555, force=False): """Create a directory""" log("Making dir {} {}:{} {:o}".format(path, owner, group, perms)) @@ -146,7 +151,7 @@ def mkdir(path, owner='root', group='root', perms=0555, force=False): os.chown(realpath, uid, gid) -def write_file(path, content, owner='root', group='root', perms=0444): +def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a string""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid @@ -177,7 +182,7 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): cmd_args.extend([device, mountpoint]) try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False @@ -191,7 +196,7 @@ def umount(mountpoint, persist=False): cmd_args = ['umount', mountpoint] try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False @@ -218,8 +223,8 @@ def file_hash(path, hash_type='md5'): """ if os.path.exists(path): h = getattr(hashlib, hash_type)() - with open(path, 'r') as source: - h.update(source.read()) # IGNORE:E1101 - it does have update + with open(path, 'rb') as source: + h.update(source.read()) return h.hexdigest() else: return None @@ -297,7 +302,7 @@ def pwgen(length=None): if length is None: length = random.choice(range(35, 45)) alphanumeric_chars = [ - l for l in (string.letters + string.digits) + l for l in (string.ascii_letters + string.digits) if l not in 'l0QD1vAEIOUaeiou'] random_chars = [ random.choice(alphanumeric_chars) for _ in range(length)] @@ -306,18 +311,24 @@ def pwgen(length=None): def list_nics(nic_type): '''Return a list of nics of given type(s)''' - if isinstance(nic_type, basestring): + if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type interfaces = [] for int_type in int_types: cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - interfaces.append(line.split()[1].replace(":", "")) + matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + if matched: + interface = matched.groups()[0] + else: + interface = line.split()[1].replace(":", "") + interfaces.append(interface) + return interfaces @@ -329,7 +340,7 @@ def set_nic_mtu(nic, mtu): def get_nic_mtu(nic): cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" for line in ip_output: words = line.split() @@ -340,7 +351,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd) + ip_output = subprocess.check_output(cmd).decode('UTF-8') hwaddr = "" words = ip_output.split() if 'link/ether' in words: diff --git a/ceph-mon/hooks/charmhelpers/core/services/__init__.py b/ceph-mon/hooks/charmhelpers/core/services/__init__.py index e8039a84..69dde79a 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/__init__.py +++ b/ceph-mon/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,2 @@ -from .base import * -from .helpers import * +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py index 7067b94b..163a7932 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-mon/hooks/charmhelpers/core/services/helpers.py @@ -196,7 +196,7 @@ def store_context(self, file_name, config_data): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0600) + os.fchmod(file_stream.fileno(), 0o600) yaml.dump(config_data, file_stream) def read_context(self, file_name): @@ -211,15 +211,19 @@ def read_context(self, file_name): class TemplateCallback(ManagerCallback): """ - Callback class that will render a Jinja2 template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` - :param str source: The template source file, relative to `$CHARM_DIR/templates` :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file """ - def __init__(self, source, target, owner='root', group='root', perms=0444): + def __init__(self, source, target, + owner='root', group='root', perms=0o444): self.source = source self.target = target self.owner = owner diff --git a/ceph-mon/hooks/charmhelpers/core/templating.py b/ceph-mon/hooks/charmhelpers/core/templating.py index 2c638853..83133fa4 100644 --- a/ceph-mon/hooks/charmhelpers/core/templating.py +++ b/ceph-mon/hooks/charmhelpers/core/templating.py @@ -4,7 +4,8 @@ from charmhelpers.core import hookenv -def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None): """ Render a template. diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 32a673d6..0a126fc3 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -5,10 +5,6 @@ from charmhelpers.core.host import ( lsb_release ) -from urlparse import ( - urlparse, - urlunparse, -) import subprocess from charmhelpers.core.hookenv import ( config, @@ -16,6 +12,12 @@ ) import os +import six +if six.PY3: + from urllib.parse import urlparse, urlunparse +else: + from urlparse import urlparse, urlunparse + CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main @@ -72,6 +74,7 @@ FETCH_HANDLERS = ( 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', ) APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. @@ -148,7 +151,7 @@ def apt_install(packages, options=None, fatal=False): cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -181,7 +184,7 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): """Purge one or more packages""" cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -192,7 +195,7 @@ def apt_purge(packages, fatal=False): def apt_hold(packages, fatal=False): """Hold one or more packages""" cmd = ['apt-mark', 'hold'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -218,6 +221,7 @@ def add_source(source, key=None): pocket for the release. 'cloud:' may be used to activate official cloud archive pockets, such as 'cloud:icehouse' + 'distro' may be used as a noop @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an @@ -251,12 +255,14 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass else: - raise SourceConfigError("Unknown source: {!r}".format(source)) + log("Unknown source: {!r}".format(source)) if key: if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile() as key_file: + with NamedTemporaryFile('w+') as key_file: key_file.write(key) key_file.flush() key_file.seek(0) @@ -293,14 +299,14 @@ def configure_sources(update=False, sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None - if isinstance(sources, basestring): + if isinstance(sources, six.string_types): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: - if isinstance(keys, basestring): + if isinstance(keys, six.string_types): keys = [keys] if len(sources) != len(keys): @@ -397,7 +403,7 @@ def _run_apt_command(cmd, fatal=False): while result is None or result == APT_NO_LOCK: try: result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > APT_NO_LOCK_RETRY_COUNT: raise diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index 8c045650..8a4624b2 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -1,8 +1,23 @@ import os -import urllib2 -from urllib import urlretrieve -import urlparse import hashlib +import re + +import six +if six.PY3: + from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ) + from urllib.parse import urlparse, urlunparse, parse_qs + from urllib.error import URLError +else: + from urllib import urlretrieve + from urllib2 import ( + build_opener, install_opener, urlopen, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + URLError + ) + from urlparse import urlparse, urlunparse, parse_qs from charmhelpers.fetch import ( BaseFetchHandler, @@ -15,6 +30,24 @@ from charmhelpers.core.host import mkdir, check_hash +def splituser(host): + '''urllib.splituser(), but six's support of this seems broken''' + _userprog = re.compile('^(.*)@(.*)$') + match = _userprog.match(host) + if match: + return match.group(1, 2) + return None, host + + +def splitpasswd(user): + '''urllib.splitpasswd(), but six's support of this is missing''' + _passwdprog = re.compile('^([^:]*):(.*)$', re.S) + match = _passwdprog.match(user) + if match: + return match.group(1, 2) + return user, None + + class ArchiveUrlFetchHandler(BaseFetchHandler): """ Handler to download archive files from arbitrary URLs. @@ -42,20 +75,20 @@ def download(self, source, dest): """ # propogate all exceptions # URLError, OSError, etc - proto, netloc, path, params, query, fragment = urlparse.urlparse(source) + proto, netloc, path, params, query, fragment = urlparse(source) if proto in ('http', 'https'): - auth, barehost = urllib2.splituser(netloc) + auth, barehost = splituser(netloc) if auth is not None: - source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) - username, password = urllib2.splitpasswd(auth) - passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + source = urlunparse((proto, barehost, path, params, query, fragment)) + username, password = splitpasswd(auth) + passman = HTTPPasswordMgrWithDefaultRealm() # Realm is set to None in add_password to force the username and password # to be used whatever the realm passman.add_password(None, source, username, password) - authhandler = urllib2.HTTPBasicAuthHandler(passman) - opener = urllib2.build_opener(authhandler) - urllib2.install_opener(opener) - response = urllib2.urlopen(source) + authhandler = HTTPBasicAuthHandler(passman) + opener = build_opener(authhandler) + install_opener(opener) + response = urlopen(source) try: with open(dest, 'w') as dest_file: dest_file.write(response.read()) @@ -91,17 +124,21 @@ def install(self, source, dest=None, checksum=None, hash_type='sha1'): url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) - except urllib2.URLError as e: + except URLError as e: raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - options = urlparse.parse_qs(url_parts.fragment) + options = parse_qs(url_parts.fragment) for key, value in options.items(): - if key in hashlib.algorithms: + if not six.PY3: + algorithms = hashlib.algorithms + else: + algorithms = hashlib.algorithms_available + if key in algorithms: check_hash(dld_file, value, key) if checksum: check_hash(dld_file, checksum, hash_type) diff --git a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py index 0e580e47..8ef48f30 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py @@ -5,6 +5,10 @@ ) from charmhelpers.core.host import mkdir +import six +if six.PY3: + raise ImportError('bzrlib does not support Python3') + try: from bzrlib.branch import Branch except ImportError: @@ -42,7 +46,7 @@ def install(self, source): dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) try: self.branch(source, dest_dir) except OSError as e: diff --git a/ceph-mon/hooks/charmhelpers/fetch/giturl.py b/ceph-mon/hooks/charmhelpers/fetch/giturl.py new file mode 100644 index 00000000..61684cb6 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/fetch/giturl.py @@ -0,0 +1,48 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +import six +if six.PY3: + raise ImportError('GitPython does not support Python 3') + +try: + from git import Repo +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-git") + from git import Repo + + +class GitUrlFetchHandler(BaseFetchHandler): + """Handler for git branches via generic and github URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + # TODO (mattyw) no support for ssh git@ yet + if url_parts.scheme not in ('http', 'https', 'git'): + return False + else: + return True + + def clone(self, source, dest, branch): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + + repo = Repo.clone_from(source, dest) + repo.git.checkout(branch) + + def install(self, source, branch="master"): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0o755) + try: + self.clone(source, dest_dir, branch) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py index d859d367..3d3ef339 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py @@ -1,6 +1,6 @@ import amulet - import os +import six class AmuletDeployment(object): @@ -52,12 +52,12 @@ def _add_services(self, this_service, other_services): def _add_relations(self, relations): """Add all of the relations for the services.""" - for k, v in relations.iteritems(): + for k, v in six.iteritems(relations): self.d.relate(k, v) def _configure_services(self, configs): """Configure all of the services.""" - for service, config in configs.iteritems(): + for service, config in six.iteritems(configs): self.d.configure(service, config) def _deploy(self): diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index c843333f..d333e63b 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -5,6 +5,8 @@ import sys import time +import six + class AmuletUtils(object): """Amulet utilities. @@ -58,7 +60,7 @@ def validate_services(self, commands): Verify the specified services are running on the corresponding service units. """ - for k, v in commands.iteritems(): + for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) if code != 0: @@ -100,11 +102,11 @@ def _validate_dict_data(self, expected, actual): longs, or can be a function that evaluate a variable and returns a bool. """ - for k, v in expected.iteritems(): + for k, v in six.iteritems(expected): if k in actual: - if (isinstance(v, basestring) or + if (isinstance(v, six.string_types) or isinstance(v, bool) or - isinstance(v, (int, long))): + isinstance(v, six.integer_types)): if v != actual[k]: return "{}:{}".format(k, actual[k]) elif not v(actual[k]): diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 3c7f422a..f3fee074 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,4 @@ +import six from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -69,7 +70,7 @@ def _add_services(self, this_service, other_services): def _configure_services(self, configs): """Configure all of the services.""" - for service, config in configs.iteritems(): + for service, config in six.iteritems(configs): self.d.configure(service, config) def _get_openstack_release(self): diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 0f312b99..3e0cc61c 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -7,6 +7,8 @@ import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import six + from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -60,7 +62,7 @@ def validate_svc_catalog_endpoint_data(self, expected, actual): expected service catalog endpoints. """ self.log.debug('actual: {}'.format(repr(actual))) - for k, v in expected.iteritems(): + for k, v in six.iteritems(expected): if k in actual: ret = self._validate_dict_data(expected[k][0], actual[k][0]) if ret: From e8e2cf5d45ecf2c017224a9cb77284db8328108b Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 26 Nov 2014 09:07:27 +0000 Subject: [PATCH 0552/2699] Sync charmhelpers --- .../hooks/charmhelpers/contrib/network/ip.py | 7 - .../contrib/storage/linux/ceph.py | 180 ++++++++---------- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 2 +- 3 files changed, 84 insertions(+), 105 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 779ada11..8dc83165 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -7,7 +7,6 @@ from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log ) @@ -176,13 +175,7 @@ def format_ipv6_addr(address): addresses. """ if is_ipv6(address): -<<<<<<< TREE return "[%s]" % address -======= - address = "[%s]" % address - else: - address = None ->>>>>>> MERGE-SOURCE return None diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 598ec263..d47dc228 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -16,19 +16,18 @@ from subprocess import ( check_call, check_output, - CalledProcessError + CalledProcessError, ) - from charmhelpers.core.hookenv import ( relation_get, relation_ids, related_units, log, + DEBUG, INFO, WARNING, - ERROR + ERROR, ) - from charmhelpers.core.host import ( mount, mounts, @@ -37,7 +36,6 @@ service_running, umount, ) - from charmhelpers.fetch import ( apt_install, ) @@ -56,99 +54,85 @@ def install(): - ''' Basic Ceph client installation ''' + """Basic Ceph client installation.""" ceph_dir = "/etc/ceph" if not os.path.exists(ceph_dir): os.mkdir(ceph_dir) + apt_install('ceph-common', fatal=True) def rbd_exists(service, pool, rbd_img): - ''' Check to see if a RADOS block device exists ''' + """Check to see if a RADOS block device exists.""" try: - out = check_output(['rbd', 'list', '--id', service, - '--pool', pool]) + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]).decode('UTF-8') except CalledProcessError: return False - else: - return rbd_img in out + + return rbd_img in out def create_rbd_image(service, pool, image, sizemb): - ''' Create a new RADOS block device ''' - cmd = [ - 'rbd', - 'create', - image, - '--size', - str(sizemb), - '--id', - service, - '--pool', - pool - ] + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] check_call(cmd) def pool_exists(service, name): - ''' Check to see if a RADOS pool already exists ''' + """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, 'lspools']) + out = check_output(['rados', '--id', service, + 'lspools']).decode('UTF-8') except CalledProcessError: return False - else: - return name in out + + return name in out def get_osds(service): - ''' - Return a list of all Ceph Object Storage Daemons - currently in the cluster - ''' + """Return a list of all Ceph Object Storage Daemons currently in the + cluster. + """ version = ceph_version() if version and version >= '0.56': return json.loads(check_output(['ceph', '--id', service, - 'osd', 'ls', '--format=json'])) - else: - return None + 'osd', 'ls', + '--format=json']).decode('UTF-8')) + + return None def create_pool(service, name, replicas=3): - ''' Create a new RADOS pool ''' + """Create a new RADOS pool.""" if pool_exists(service, name): log("Ceph pool {} already exists, skipping creation".format(name), level=WARNING) return + # Calculate the number of placement groups based # on upstream recommended best practices. osds = get_osds(service) if osds: - pgnum = (len(osds) * 100 / replicas) + pgnum = (len(osds) * 100 // replicas) else: # NOTE(james-page): Default to 200 for older ceph versions # which don't support OSD query from cli pgnum = 200 - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'create', - name, str(pgnum) - ] + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] check_call(cmd) - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'set', name, - 'size', str(replicas) - ] + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', + str(replicas)] check_call(cmd) def delete_pool(service, name): - ''' Delete a RADOS pool from ceph ''' - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'delete', - name, '--yes-i-really-really-mean-it' - ] + """Delete a RADOS pool from ceph.""" + cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, + '--yes-i-really-really-mean-it'] check_call(cmd) @@ -161,44 +145,43 @@ def _keyring_path(service): def create_keyring(service, key): - ''' Create a new Ceph keyring containing key''' + """Create a new Ceph keyring containing key.""" keyring = _keyring_path(service) if os.path.exists(keyring): - log('ceph: Keyring exists at %s.' % keyring, level=WARNING) + log('Ceph keyring exists at %s.' % keyring, level=WARNING) return - cmd = [ - 'ceph-authtool', - keyring, - '--create-keyring', - '--name=client.{}'.format(service), - '--add-key={}'.format(key) - ] + + cmd = ['ceph-authtool', keyring, '--create-keyring', + '--name=client.{}'.format(service), '--add-key={}'.format(key)] check_call(cmd) - log('ceph: Created new ring at %s.' % keyring, level=INFO) + log('Created new ceph keyring at %s.' % keyring, level=DEBUG) def create_key_file(service, key): - ''' Create a file containing key ''' + """Create a file containing key.""" keyfile = _keyfile_path(service) if os.path.exists(keyfile): - log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) + log('Keyfile exists at %s.' % keyfile, level=WARNING) return + with open(keyfile, 'w') as fd: fd.write(key) - log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) + + log('Created new keyfile at %s.' % keyfile, level=INFO) def get_ceph_nodes(): - ''' Query named relation 'ceph' to detemine current nodes ''' + """Query named relation 'ceph' to determine current nodes.""" hosts = [] for r_id in relation_ids('ceph'): for unit in related_units(r_id): hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + return hosts def configure(service, key, auth, use_syslog): - ''' Perform basic configuration of Ceph ''' + """Perform basic configuration of Ceph.""" create_keyring(service, key) create_key_file(service, key) hosts = get_ceph_nodes() @@ -211,17 +194,17 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): - ''' Determine whether a RADOS block device is mapped locally ''' + """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']) + out = check_output(['rbd', 'showmapped']).decode('UTF-8') except CalledProcessError: return False - else: - return name in out + + return name in out def map_block_storage(service, pool, image): - ''' Map a RADOS block device for local use ''' + """Map a RADOS block device for local use.""" cmd = [ 'rbd', 'map', @@ -235,31 +218,32 @@ def map_block_storage(service, pool, image): def filesystem_mounted(fs): - ''' Determine whether a filesytems is already mounted ''' + """Determine whether a filesytems is already mounted.""" return fs in [f for f, m in mounts()] def make_filesystem(blk_device, fstype='ext4', timeout=10): - ''' Make a new filesystem on the specified block device ''' + """Make a new filesystem on the specified block device.""" count = 0 e_noent = os.errno.ENOENT while not os.path.exists(blk_device): if count >= timeout: - log('ceph: gave up waiting on block device %s' % blk_device, + log('Gave up waiting on block device %s' % blk_device, level=ERROR) raise IOError(e_noent, os.strerror(e_noent), blk_device) - log('ceph: waiting for block device %s to appear' % blk_device, - level=INFO) + + log('Waiting for block device %s to appear' % blk_device, + level=DEBUG) count += 1 time.sleep(1) else: - log('ceph: Formatting block device %s as filesystem %s.' % + log('Formatting block device %s as filesystem %s.' % (blk_device, fstype), level=INFO) check_call(['mkfs', '-t', fstype, blk_device]) def place_data_on_block_device(blk_device, data_src_dst): - ''' Migrate data in data_src_dst to blk_device and then remount ''' + """Migrate data in data_src_dst to blk_device and then remount.""" # mount block device into /mnt mount(blk_device, '/mnt') # copy data to /mnt @@ -279,8 +263,8 @@ def place_data_on_block_device(blk_device, data_src_dst): # TODO: re-use def modprobe(module): - ''' Load a kernel module and configure for auto-load on reboot ''' - log('ceph: Loading kernel module', level=INFO) + """Load a kernel module and configure for auto-load on reboot.""" + log('Loading kernel module', level=INFO) cmd = ['modprobe', module] check_call(cmd) with open('/etc/modules', 'r+') as modules: @@ -289,7 +273,7 @@ def modprobe(module): def copy_files(src, dst, symlinks=False, ignore=None): - ''' Copy files from src to dst ''' + """Copy files from src to dst.""" for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) @@ -302,8 +286,7 @@ def copy_files(src, dst, symlinks=False, ignore=None): def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, blk_device, fstype, system_services=[], replicas=3): - """ - NOTE: This function must only be called from a single service unit for + """NOTE: This function must only be called from a single service unit for the same rbd_img otherwise data loss will occur. Ensures given pool and RBD image exists, is mapped to a block device, @@ -317,15 +300,16 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, """ # Ensure pool, RBD image, RBD mappings are in place. if not pool_exists(service, pool): - log('ceph: Creating new pool {}.'.format(pool)) + log('Creating new pool {}.'.format(pool), level=INFO) create_pool(service, pool, replicas=replicas) if not rbd_exists(service, pool, rbd_img): - log('ceph: Creating RBD image ({}).'.format(rbd_img)) + log('Creating RBD image ({}).'.format(rbd_img), level=INFO) create_rbd_image(service, pool, rbd_img, sizemb) if not image_mapped(rbd_img): - log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) + log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), + level=INFO) map_block_storage(service, pool, rbd_img) # make file system @@ -340,45 +324,47 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, for svc in system_services: if service_running(svc): - log('ceph: Stopping services {} prior to migrating data.' - .format(svc)) + log('Stopping services {} prior to migrating data.' + .format(svc), level=DEBUG) service_stop(svc) place_data_on_block_device(blk_device, mount_point) for svc in system_services: - log('ceph: Starting service {} after migrating data.' - .format(svc)) + log('Starting service {} after migrating data.' + .format(svc), level=DEBUG) service_start(svc) def ensure_ceph_keyring(service, user=None, group=None): - ''' - Ensures a ceph keyring is created for a named service - and optionally ensures user and group ownership. + """Ensures a ceph keyring is created for a named service and optionally + ensures user and group ownership. Returns False if no ceph key is available in relation state. - ''' + """ key = None for rid in relation_ids('ceph'): for unit in related_units(rid): key = relation_get('key', rid=rid, unit=unit) if key: break + if not key: return False + create_keyring(service=service, key=key) keyring = _keyring_path(service) if user and group: check_call(['chown', '%s.%s' % (user, group), keyring]) + return True def ceph_version(): - ''' Retrieve the local version of ceph ''' + """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): cmd = ['ceph', '-v'] - output = check_output(cmd) + output = check_output(cmd).decode('US-ASCII') output = output.split() if len(output) > 3: return output[2] diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 4140282b..99e5d208 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -223,7 +223,7 @@ def keys(self): prev_keys = [] if self._prev_dict is not None: prev_keys = self._prev_dict.keys() - return list(set(prev_keys + dict.keys(self))) + return list(set(prev_keys + list(dict.keys(self)))) def load_previous(self, path=None): """Load previous copy of config from disk. From ada890c53063537ede2c5bf1cb7d5b0ed4fa5f1a Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 26 Nov 2014 09:07:27 +0000 Subject: [PATCH 0553/2699] Sync charmhelpers --- .../hooks/charmhelpers/contrib/network/ip.py | 7 - .../contrib/storage/linux/ceph.py | 180 ++++++++---------- ceph-mon/hooks/charmhelpers/core/hookenv.py | 2 +- 3 files changed, 84 insertions(+), 105 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 779ada11..8dc83165 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -7,7 +7,6 @@ from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log ) @@ -176,13 +175,7 @@ def format_ipv6_addr(address): addresses. """ if is_ipv6(address): -<<<<<<< TREE return "[%s]" % address -======= - address = "[%s]" % address - else: - address = None ->>>>>>> MERGE-SOURCE return None diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 598ec263..d47dc228 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -16,19 +16,18 @@ from subprocess import ( check_call, check_output, - CalledProcessError + CalledProcessError, ) - from charmhelpers.core.hookenv import ( relation_get, relation_ids, related_units, log, + DEBUG, INFO, WARNING, - ERROR + ERROR, ) - from charmhelpers.core.host import ( mount, mounts, @@ -37,7 +36,6 @@ service_running, umount, ) - from charmhelpers.fetch import ( apt_install, ) @@ -56,99 +54,85 @@ def install(): - ''' Basic Ceph client installation ''' + """Basic Ceph client installation.""" ceph_dir = "/etc/ceph" if not os.path.exists(ceph_dir): os.mkdir(ceph_dir) + apt_install('ceph-common', fatal=True) def rbd_exists(service, pool, rbd_img): - ''' Check to see if a RADOS block device exists ''' + """Check to see if a RADOS block device exists.""" try: - out = check_output(['rbd', 'list', '--id', service, - '--pool', pool]) + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]).decode('UTF-8') except CalledProcessError: return False - else: - return rbd_img in out + + return rbd_img in out def create_rbd_image(service, pool, image, sizemb): - ''' Create a new RADOS block device ''' - cmd = [ - 'rbd', - 'create', - image, - '--size', - str(sizemb), - '--id', - service, - '--pool', - pool - ] + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] check_call(cmd) def pool_exists(service, name): - ''' Check to see if a RADOS pool already exists ''' + """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, 'lspools']) + out = check_output(['rados', '--id', service, + 'lspools']).decode('UTF-8') except CalledProcessError: return False - else: - return name in out + + return name in out def get_osds(service): - ''' - Return a list of all Ceph Object Storage Daemons - currently in the cluster - ''' + """Return a list of all Ceph Object Storage Daemons currently in the + cluster. + """ version = ceph_version() if version and version >= '0.56': return json.loads(check_output(['ceph', '--id', service, - 'osd', 'ls', '--format=json'])) - else: - return None + 'osd', 'ls', + '--format=json']).decode('UTF-8')) + + return None def create_pool(service, name, replicas=3): - ''' Create a new RADOS pool ''' + """Create a new RADOS pool.""" if pool_exists(service, name): log("Ceph pool {} already exists, skipping creation".format(name), level=WARNING) return + # Calculate the number of placement groups based # on upstream recommended best practices. osds = get_osds(service) if osds: - pgnum = (len(osds) * 100 / replicas) + pgnum = (len(osds) * 100 // replicas) else: # NOTE(james-page): Default to 200 for older ceph versions # which don't support OSD query from cli pgnum = 200 - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'create', - name, str(pgnum) - ] + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] check_call(cmd) - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'set', name, - 'size', str(replicas) - ] + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', + str(replicas)] check_call(cmd) def delete_pool(service, name): - ''' Delete a RADOS pool from ceph ''' - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'delete', - name, '--yes-i-really-really-mean-it' - ] + """Delete a RADOS pool from ceph.""" + cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, + '--yes-i-really-really-mean-it'] check_call(cmd) @@ -161,44 +145,43 @@ def _keyring_path(service): def create_keyring(service, key): - ''' Create a new Ceph keyring containing key''' + """Create a new Ceph keyring containing key.""" keyring = _keyring_path(service) if os.path.exists(keyring): - log('ceph: Keyring exists at %s.' % keyring, level=WARNING) + log('Ceph keyring exists at %s.' % keyring, level=WARNING) return - cmd = [ - 'ceph-authtool', - keyring, - '--create-keyring', - '--name=client.{}'.format(service), - '--add-key={}'.format(key) - ] + + cmd = ['ceph-authtool', keyring, '--create-keyring', + '--name=client.{}'.format(service), '--add-key={}'.format(key)] check_call(cmd) - log('ceph: Created new ring at %s.' % keyring, level=INFO) + log('Created new ceph keyring at %s.' % keyring, level=DEBUG) def create_key_file(service, key): - ''' Create a file containing key ''' + """Create a file containing key.""" keyfile = _keyfile_path(service) if os.path.exists(keyfile): - log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) + log('Keyfile exists at %s.' % keyfile, level=WARNING) return + with open(keyfile, 'w') as fd: fd.write(key) - log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) + + log('Created new keyfile at %s.' % keyfile, level=INFO) def get_ceph_nodes(): - ''' Query named relation 'ceph' to detemine current nodes ''' + """Query named relation 'ceph' to determine current nodes.""" hosts = [] for r_id in relation_ids('ceph'): for unit in related_units(r_id): hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + return hosts def configure(service, key, auth, use_syslog): - ''' Perform basic configuration of Ceph ''' + """Perform basic configuration of Ceph.""" create_keyring(service, key) create_key_file(service, key) hosts = get_ceph_nodes() @@ -211,17 +194,17 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): - ''' Determine whether a RADOS block device is mapped locally ''' + """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']) + out = check_output(['rbd', 'showmapped']).decode('UTF-8') except CalledProcessError: return False - else: - return name in out + + return name in out def map_block_storage(service, pool, image): - ''' Map a RADOS block device for local use ''' + """Map a RADOS block device for local use.""" cmd = [ 'rbd', 'map', @@ -235,31 +218,32 @@ def map_block_storage(service, pool, image): def filesystem_mounted(fs): - ''' Determine whether a filesytems is already mounted ''' + """Determine whether a filesytems is already mounted.""" return fs in [f for f, m in mounts()] def make_filesystem(blk_device, fstype='ext4', timeout=10): - ''' Make a new filesystem on the specified block device ''' + """Make a new filesystem on the specified block device.""" count = 0 e_noent = os.errno.ENOENT while not os.path.exists(blk_device): if count >= timeout: - log('ceph: gave up waiting on block device %s' % blk_device, + log('Gave up waiting on block device %s' % blk_device, level=ERROR) raise IOError(e_noent, os.strerror(e_noent), blk_device) - log('ceph: waiting for block device %s to appear' % blk_device, - level=INFO) + + log('Waiting for block device %s to appear' % blk_device, + level=DEBUG) count += 1 time.sleep(1) else: - log('ceph: Formatting block device %s as filesystem %s.' % + log('Formatting block device %s as filesystem %s.' % (blk_device, fstype), level=INFO) check_call(['mkfs', '-t', fstype, blk_device]) def place_data_on_block_device(blk_device, data_src_dst): - ''' Migrate data in data_src_dst to blk_device and then remount ''' + """Migrate data in data_src_dst to blk_device and then remount.""" # mount block device into /mnt mount(blk_device, '/mnt') # copy data to /mnt @@ -279,8 +263,8 @@ def place_data_on_block_device(blk_device, data_src_dst): # TODO: re-use def modprobe(module): - ''' Load a kernel module and configure for auto-load on reboot ''' - log('ceph: Loading kernel module', level=INFO) + """Load a kernel module and configure for auto-load on reboot.""" + log('Loading kernel module', level=INFO) cmd = ['modprobe', module] check_call(cmd) with open('/etc/modules', 'r+') as modules: @@ -289,7 +273,7 @@ def modprobe(module): def copy_files(src, dst, symlinks=False, ignore=None): - ''' Copy files from src to dst ''' + """Copy files from src to dst.""" for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) @@ -302,8 +286,7 @@ def copy_files(src, dst, symlinks=False, ignore=None): def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, blk_device, fstype, system_services=[], replicas=3): - """ - NOTE: This function must only be called from a single service unit for + """NOTE: This function must only be called from a single service unit for the same rbd_img otherwise data loss will occur. Ensures given pool and RBD image exists, is mapped to a block device, @@ -317,15 +300,16 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, """ # Ensure pool, RBD image, RBD mappings are in place. if not pool_exists(service, pool): - log('ceph: Creating new pool {}.'.format(pool)) + log('Creating new pool {}.'.format(pool), level=INFO) create_pool(service, pool, replicas=replicas) if not rbd_exists(service, pool, rbd_img): - log('ceph: Creating RBD image ({}).'.format(rbd_img)) + log('Creating RBD image ({}).'.format(rbd_img), level=INFO) create_rbd_image(service, pool, rbd_img, sizemb) if not image_mapped(rbd_img): - log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) + log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), + level=INFO) map_block_storage(service, pool, rbd_img) # make file system @@ -340,45 +324,47 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, for svc in system_services: if service_running(svc): - log('ceph: Stopping services {} prior to migrating data.' - .format(svc)) + log('Stopping services {} prior to migrating data.' + .format(svc), level=DEBUG) service_stop(svc) place_data_on_block_device(blk_device, mount_point) for svc in system_services: - log('ceph: Starting service {} after migrating data.' - .format(svc)) + log('Starting service {} after migrating data.' + .format(svc), level=DEBUG) service_start(svc) def ensure_ceph_keyring(service, user=None, group=None): - ''' - Ensures a ceph keyring is created for a named service - and optionally ensures user and group ownership. + """Ensures a ceph keyring is created for a named service and optionally + ensures user and group ownership. Returns False if no ceph key is available in relation state. - ''' + """ key = None for rid in relation_ids('ceph'): for unit in related_units(rid): key = relation_get('key', rid=rid, unit=unit) if key: break + if not key: return False + create_keyring(service=service, key=key) keyring = _keyring_path(service) if user and group: check_call(['chown', '%s.%s' % (user, group), keyring]) + return True def ceph_version(): - ''' Retrieve the local version of ceph ''' + """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): cmd = ['ceph', '-v'] - output = check_output(cmd) + output = check_output(cmd).decode('US-ASCII') output = output.split() if len(output) > 3: return output[2] diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 4140282b..99e5d208 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -223,7 +223,7 @@ def keys(self): prev_keys = [] if self._prev_dict is not None: prev_keys = self._prev_dict.keys() - return list(set(prev_keys + dict.keys(self))) + return list(set(prev_keys + list(dict.keys(self)))) def load_previous(self, path=None): """Load previous copy of config from disk. From 991b50ccb1344b815fe53b40b4080058e568296c Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 27 Nov 2014 11:05:13 +0000 Subject: [PATCH 0554/2699] Merged charmhelpers --- .../charmhelpers/contrib/hahelpers/apache.py | 13 +- .../charmhelpers/contrib/hahelpers/cluster.py | 26 +- .../hooks/charmhelpers/contrib/network/ip.py | 246 +++++++++++++++--- .../contrib/storage/linux/utils.py | 5 +- ceph-radosgw/hooks/charmhelpers/core/fstab.py | 18 +- .../hooks/charmhelpers/core/hookenv.py | 32 ++- ceph-radosgw/hooks/charmhelpers/core/host.py | 39 +-- .../charmhelpers/core/services/__init__.py | 4 +- .../charmhelpers/core/services/helpers.py | 12 +- .../hooks/charmhelpers/core/templating.py | 3 +- .../hooks/charmhelpers/fetch/__init__.py | 26 +- .../hooks/charmhelpers/fetch/archiveurl.py | 69 +++-- .../hooks/charmhelpers/fetch/bzrurl.py | 6 +- .../hooks/charmhelpers/fetch/giturl.py | 8 +- .../charmhelpers/contrib/amulet/deployment.py | 6 +- .../charmhelpers/contrib/amulet/utils.py | 10 +- .../contrib/openstack/amulet/deployment.py | 3 +- .../contrib/openstack/amulet/utils.py | 4 +- 18 files changed, 391 insertions(+), 139 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py index 8d5fb8ba..6616ffff 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -20,20 +20,27 @@ ) -def get_cert(): +def get_cert(cn=None): + # TODO: deal with multiple https endpoints via charm config cert = config_get('ssl_cert') key = config_get('ssl_key') if not (cert and key): log("Inspecting identity-service relations for SSL certificate.", level=INFO) cert = key = None + if cn: + ssl_cert_attr = 'ssl_cert_{}'.format(cn) + ssl_key_attr = 'ssl_key_{}'.format(cn) + else: + ssl_cert_attr = 'ssl_cert' + ssl_key_attr = 'ssl_key' for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): if not cert: - cert = relation_get('ssl_cert', + cert = relation_get(ssl_cert_attr, rid=r_id, unit=unit) if not key: - key = relation_get('ssl_key', + key = relation_get(ssl_key_attr, rid=r_id, unit=unit) return (cert, key) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index 7151b1d0..52ce4b7c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -13,9 +13,10 @@ import subprocess import os - from socket import gethostname as get_unit_hostname +import six + from charmhelpers.core.hookenv import ( log, relation_ids, @@ -77,7 +78,7 @@ def is_crm_leader(resource): "show", resource ] try: - status = subprocess.check_output(cmd) + status = subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -139,10 +140,9 @@ def https(): return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): + # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN rel_state = [ relation_get('https_keystone', rid=r_id, unit=unit), - relation_get('ssl_cert', rid=r_id, unit=unit), - relation_get('ssl_key', rid=r_id, unit=unit), relation_get('ca_cert', rid=r_id, unit=unit), ] # NOTE: works around (LP: #1203241) @@ -151,34 +151,42 @@ def https(): return False -def determine_api_port(public_port): +def determine_api_port(public_port, singlenode_mode=False): ''' Determine correct API server listening port based on existence of HTTPS reverse proxy and/or haproxy. public_port: int: standard public port for given service + singlenode_mode: boolean: Shuffle ports when only a single unit is present + returns: int: the correct listening port for the API service ''' i = 0 - if len(peer_units()) > 0 or is_clustered(): + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): i += 1 if https(): i += 1 return public_port - (i * 10) -def determine_apache_port(public_port): +def determine_apache_port(public_port, singlenode_mode=False): ''' Description: Determine correct apache listening port based on public IP + state of the cluster. public_port: int: standard public port for given service + singlenode_mode: boolean: Shuffle ports when only a single unit is present + returns: int: the correct listening port for the HAProxy service ''' i = 0 - if len(peer_units()) > 0 or is_clustered(): + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): i += 1 return public_port - (i * 10) @@ -198,7 +206,7 @@ def get_hacluster_config(): for setting in settings: conf[setting] = config_get(setting) missing = [] - [missing.append(s) for s, v in conf.iteritems() if v is None] + [missing.append(s) for s, v in six.iteritems(conf) if v is None] if missing: log('Insufficient config data to configure hacluster.', level=ERROR) raise HAIncompleteConfig diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index f8cc1975..8dc83165 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -1,10 +1,13 @@ -import sys +import glob +import re +import subprocess from functools import partial +from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, config, + log ) try: @@ -28,29 +31,28 @@ def _validate_cidr(network): network) +def no_ip_found_error_out(network): + errmsg = ("No IP address found in network: %s" % network) + raise ValueError(errmsg) + + def get_address_in_network(network, fallback=None, fatal=False): - """ - Get an IPv4 or IPv6 address within the network from the host. + """Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, '192.168.1.0/24'. :param fallback (str): If no address is found, return fallback. :param fatal (boolean): If no address is found, fallback is not set and fatal is True then exit(1). - """ - - def not_found_error_out(): - log("No IP address found in network: %s" % network, - level=ERROR) - sys.exit(1) - if network is None: if fallback is not None: return fallback + + if fatal: + no_ip_found_error_out(network) else: - if fatal: - not_found_error_out() + return None _validate_cidr(network) network = netaddr.IPNetwork(network) @@ -62,6 +64,7 @@ def not_found_error_out(): cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) if cidr in network: return str(cidr.ip) + if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): @@ -74,20 +77,20 @@ def not_found_error_out(): return fallback if fatal: - not_found_error_out() + no_ip_found_error_out(network) return None def is_ipv6(address): - '''Determine whether provided address is IPv6 or not''' + """Determine whether provided address is IPv6 or not.""" try: address = netaddr.IPAddress(address) except netaddr.AddrFormatError: # probably a hostname - so not an address at all! return False - else: - return address.version == 6 + + return address.version == 6 def is_address_in_network(network, address): @@ -105,11 +108,13 @@ def is_address_in_network(network, address): except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Network (%s) is not in CIDR presentation format" % network) + try: address = netaddr.IPAddress(address) except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Address (%s) is not in correct presentation format" % address) + if address in network: return True else: @@ -132,56 +137,215 @@ def _get_for_address(address, key): if address.version == 4 and netifaces.AF_INET in addresses: addr = addresses[netifaces.AF_INET][0]['addr'] netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + cidr = network.cidr if address in cidr: if key == 'iface': return iface else: return addresses[netifaces.AF_INET][0][key] + if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) + network = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + cidr = network.cidr if address in cidr: if key == 'iface': return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] else: return addr[key] + return None get_iface_for_address = partial(_get_for_address, key='iface') + get_netmask_for_address = partial(_get_for_address, key='netmask') -def get_ipv6_addr(iface="eth0"): +def format_ipv6_addr(address): + """If address is IPv6, wrap it in '[]' otherwise return None. + + This is required by most configuration files when specifying IPv6 + addresses. + """ + if is_ipv6(address): + return "[%s]" % address + + return None + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, + fatal=True, exc_list=None): + """Return the assigned IP address for a given interface, if any.""" + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + + if not exc_list: + exc_list = [] + try: - iface_addrs = netifaces.ifaddresses(iface) - if netifaces.AF_INET6 not in iface_addrs: - raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception("Unknown inet type '%s'" % str(inet_type)) - addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] - ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') - and config('vip') != a['addr']] - if not ipv6_addr: - raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) - return ipv6_addr[0] + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) - except ValueError: - raise ValueError("Invalid interface '%s'" % iface) + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("Interface '%s' not found " % (iface)) + else: + return [] + else: + ifaces = [iface] -def format_ipv6_addr(address): + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % + (iface, inet_type)) + + return sorted(addresses) + + +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') + + +def get_iface_from_addr(addr): + """Work out on which interface the provided address is configured.""" + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + for inet_type in addresses: + for _addr in addresses[inet_type]: + _addr = _addr['addr'] + # link local + ll_key = re.compile("(.+)%.*") + raw = re.match(ll_key, _addr) + if raw: + _addr = raw.group(1) + + if _addr == addr: + log("Address '%s' is configured on iface '%s'" % + (addr, iface)) + return iface + + msg = "Unable to infer net iface on which '%s' is configured" % (addr) + raise Exception(msg) + + +def sniff_iface(f): + """Ensure decorated function is called with a value for iface. + + If no iface provided, inject net iface inferred from unit private address. """ - IPv6 needs to be wrapped with [] in url link to parse correctly. + def iface_sniffer(*args, **kwargs): + if not kwargs.get('iface', None): + kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) + + return f(*args, **kwargs) + + return iface_sniffer + + +@sniff_iface +def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, + dynamic_only=True): + """Get assigned IPv6 address for a given interface. + + Returns list of addresses found. If no address found, returns empty list. + + If iface is None, we infer the current primary interface by doing a reverse + lookup on the unit private-address. + + We currently only support scope global IPv6 addresses i.e. non-temporary + addresses. If no global IPv6 address is found, return the first one found + in the ipv6 address list. """ - if is_ipv6(address): - address = "[%s]" % address - else: - log("Not an valid ipv6 address: %s" % address, - level=ERROR) - address = None - return address + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + + if addresses: + global_addrs = [] + for addr in addresses: + key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") + m = re.match(key_scope_link_local, addr) + if m: + eui_64_mac = m.group(1) + iface = m.group(2) + else: + global_addrs.append(addr) + + if global_addrs: + # Make sure any found global addresses are not temporary + cmd = ['ip', 'addr', 'show', iface] + out = subprocess.check_output(cmd).decode('UTF-8') + if dynamic_only: + key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") + else: + key = re.compile("inet6 (.+)/[0-9]+ scope global.*") + + addrs = [] + for line in out.split('\n'): + line = line.strip() + m = re.match(key, line) + if m and 'temporary' not in line: + # Return the first valid address we find + for addr in global_addrs: + if m.group(1) == addr: + if not dynamic_only or \ + m.group(1).endswith(eui_64_mac): + addrs.append(addr) + + if addrs: + return addrs + + if fatal: + raise Exception("Interface '%s' does not have a scope global " + "non-temporary ipv6 address." % iface) + + return [] + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """Return a list of bridges on the system.""" + b_regex = "%s/*/bridge" % vnic_dir + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """Return a list of nics comprising a given bridge on the system.""" + brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_regex)] + + +def is_bridge_member(nic): + """Check if a given nic is a member of a bridge.""" + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + + return False diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py index 1b958712..c6a15e14 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -30,7 +30,8 @@ def zap_disk(block_device): # sometimes sgdisk exits non-zero; this is OK, dd will clean up call(['sgdisk', '--zap-all', '--mbrtogpt', '--clear', block_device]) - dev_end = check_output(['blockdev', '--getsz', block_device]) + dev_end = check_output(['blockdev', '--getsz', + block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=1M', 'count=1']) @@ -47,7 +48,7 @@ def is_device_mounted(device): it doesn't. ''' is_partition = bool(re.search(r".*[0-9]+\b", device)) - out = check_output(['mount']) + out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/ceph-radosgw/hooks/charmhelpers/core/fstab.py b/ceph-radosgw/hooks/charmhelpers/core/fstab.py index cfaf0a65..0adf0db3 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/fstab.py +++ b/ceph-radosgw/hooks/charmhelpers/core/fstab.py @@ -3,10 +3,11 @@ __author__ = 'Jorge Niedbalski R. ' +import io import os -class Fstab(file): +class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer for file `/etc/fstab` """ @@ -24,8 +25,8 @@ def __init__(self, device, mountpoint, filesystem, options = "defaults" self.options = options - self.d = d - self.p = p + self.d = int(d) + self.p = int(p) def __eq__(self, o): return str(self) == str(o) @@ -45,7 +46,7 @@ def __init__(self, path=None): self._path = path else: self._path = self.DEFAULT_PATH - file.__init__(self, self._path, 'r+') + super(Fstab, self).__init__(self._path, 'rb+') def _hydrate_entry(self, line): # NOTE: use split with no arguments to split on any @@ -58,8 +59,9 @@ def _hydrate_entry(self, line): def entries(self): self.seek(0) for line in self.readlines(): + line = line.decode('us-ascii') try: - if not line.startswith("#"): + if line.strip() and not line.startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -75,14 +77,14 @@ def add_entry(self, entry): if self.get_entry_by_attr('device', entry.device): return False - self.write(str(entry) + '\n') + self.write((str(entry) + '\n').encode('us-ascii')) self.truncate() return entry def remove_entry(self, entry): self.seek(0) - lines = self.readlines() + lines = [l.decode('us-ascii') for l in self.readlines()] found = False for index, line in enumerate(lines): @@ -97,7 +99,7 @@ def remove_entry(self, entry): lines.remove(line) self.seek(0) - self.write(''.join(lines)) + self.write(''.join(lines).encode('us-ascii')) self.truncate() return True diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 083a7090..99e5d208 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -9,9 +9,14 @@ import yaml import subprocess import sys -import UserDict from subprocess import CalledProcessError +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -67,12 +72,12 @@ def log(message, level=None): subprocess.call(command) -class Serializable(UserDict.IterableUserDict): +class Serializable(UserDict): """Wrapper, an object that can be serialized to yaml or json""" def __init__(self, obj): # wrap the object - UserDict.IterableUserDict.__init__(self) + UserDict.__init__(self) self.data = obj def __getattr__(self, attr): @@ -218,7 +223,7 @@ def keys(self): prev_keys = [] if self._prev_dict is not None: prev_keys = self._prev_dict.keys() - return list(set(prev_keys + dict.keys(self))) + return list(set(prev_keys + list(dict.keys(self)))) def load_previous(self, path=None): """Load previous copy of config from disk. @@ -269,7 +274,7 @@ def save(self): """ if self._prev_dict: - for k, v in self._prev_dict.iteritems(): + for k, v in six.iteritems(self._prev_dict): if k not in self: self[k] = v with open(self.path, 'w') as f: @@ -284,7 +289,8 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - config_data = json.loads(subprocess.check_output(config_cmd_line)) + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) if scope is not None: return config_data return Config(config_data) @@ -303,10 +309,10 @@ def relation_get(attribute=None, unit=None, rid=None): if unit: _args.append(unit) try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None - except CalledProcessError, e: + except CalledProcessError as e: if e.returncode == 2: return None raise @@ -318,7 +324,7 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs): relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in (relation_settings.items() + kwargs.items()): + for k, v in (list(relation_settings.items()) + list(kwargs.items())): if v is None: relation_cmd_line.append('{}='.format(k)) else: @@ -335,7 +341,8 @@ def relation_ids(reltype=None): relid_cmd_line = ['relation-ids', '--format=json'] if reltype is not None: relid_cmd_line.append(reltype) - return json.loads(subprocess.check_output(relid_cmd_line)) or [] + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] return [] @@ -346,7 +353,8 @@ def related_units(relid=None): units_cmd_line = ['relation-list', '--format=json'] if relid is not None: units_cmd_line.extend(('-r', relid)) - return json.loads(subprocess.check_output(units_cmd_line)) or [] + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] @cached @@ -455,7 +463,7 @@ def unit_get(attribute): """Get the unit ID for the remote unit""" _args = ['unit-get', '--format=json', attribute] try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 0b8bdc50..e6783d9b 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -14,11 +14,12 @@ import subprocess import hashlib from contextlib import contextmanager - from collections import OrderedDict -from hookenv import log -from fstab import Fstab +import six + +from .hookenv import log +from .fstab import Fstab def service_start(service_name): @@ -54,7 +55,9 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) + output = subprocess.check_output( + ['service', service, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -67,7 +70,9 @@ def service_running(service): def service_available(service_name): """Determine whether a system service is available""" try: - subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError as e: return 'unrecognized service' not in e.output else: @@ -115,7 +120,7 @@ def rsync(from_path, to_path, flags='-r', options=None): cmd.append(from_path) cmd.append(to_path) log(" ".join(cmd)) - return subprocess.check_output(cmd).strip() + return subprocess.check_output(cmd).decode('UTF-8').strip() def symlink(source, destination): @@ -130,7 +135,7 @@ def symlink(source, destination): subprocess.check_call(cmd) -def mkdir(path, owner='root', group='root', perms=0555, force=False): +def mkdir(path, owner='root', group='root', perms=0o555, force=False): """Create a directory""" log("Making dir {} {}:{} {:o}".format(path, owner, group, perms)) @@ -146,7 +151,7 @@ def mkdir(path, owner='root', group='root', perms=0555, force=False): os.chown(realpath, uid, gid) -def write_file(path, content, owner='root', group='root', perms=0444): +def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a string""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid @@ -177,7 +182,7 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): cmd_args.extend([device, mountpoint]) try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False @@ -191,7 +196,7 @@ def umount(mountpoint, persist=False): cmd_args = ['umount', mountpoint] try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False @@ -218,8 +223,8 @@ def file_hash(path, hash_type='md5'): """ if os.path.exists(path): h = getattr(hashlib, hash_type)() - with open(path, 'r') as source: - h.update(source.read()) # IGNORE:E1101 - it does have update + with open(path, 'rb') as source: + h.update(source.read()) return h.hexdigest() else: return None @@ -297,7 +302,7 @@ def pwgen(length=None): if length is None: length = random.choice(range(35, 45)) alphanumeric_chars = [ - l for l in (string.letters + string.digits) + l for l in (string.ascii_letters + string.digits) if l not in 'l0QD1vAEIOUaeiou'] random_chars = [ random.choice(alphanumeric_chars) for _ in range(length)] @@ -306,14 +311,14 @@ def pwgen(length=None): def list_nics(nic_type): '''Return a list of nics of given type(s)''' - if isinstance(nic_type, basestring): + if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type interfaces = [] for int_type in int_types: cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): @@ -335,7 +340,7 @@ def set_nic_mtu(nic, mtu): def get_nic_mtu(nic): cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" for line in ip_output: words = line.split() @@ -346,7 +351,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd) + ip_output = subprocess.check_output(cmd).decode('UTF-8') hwaddr = "" words = ip_output.split() if 'link/ether' in words: diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/__init__.py b/ceph-radosgw/hooks/charmhelpers/core/services/__init__.py index e8039a84..69dde79a 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,2 @@ -from .base import * -from .helpers import * +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py index 7067b94b..163a7932 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py @@ -196,7 +196,7 @@ def store_context(self, file_name, config_data): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0600) + os.fchmod(file_stream.fileno(), 0o600) yaml.dump(config_data, file_stream) def read_context(self, file_name): @@ -211,15 +211,19 @@ def read_context(self, file_name): class TemplateCallback(ManagerCallback): """ - Callback class that will render a Jinja2 template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` - :param str source: The template source file, relative to `$CHARM_DIR/templates` :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file """ - def __init__(self, source, target, owner='root', group='root', perms=0444): + def __init__(self, source, target, + owner='root', group='root', perms=0o444): self.source = source self.target = target self.owner = owner diff --git a/ceph-radosgw/hooks/charmhelpers/core/templating.py b/ceph-radosgw/hooks/charmhelpers/core/templating.py index 2c638853..83133fa4 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/core/templating.py @@ -4,7 +4,8 @@ from charmhelpers.core import hookenv -def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None): """ Render a template. diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 6724d293..0a126fc3 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -5,10 +5,6 @@ from charmhelpers.core.host import ( lsb_release ) -from urlparse import ( - urlparse, - urlunparse, -) import subprocess from charmhelpers.core.hookenv import ( config, @@ -16,6 +12,12 @@ ) import os +import six +if six.PY3: + from urllib.parse import urlparse, urlunparse +else: + from urlparse import urlparse, urlunparse + CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main @@ -149,7 +151,7 @@ def apt_install(packages, options=None, fatal=False): cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -182,7 +184,7 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): """Purge one or more packages""" cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -193,7 +195,7 @@ def apt_purge(packages, fatal=False): def apt_hold(packages, fatal=False): """Hold one or more packages""" cmd = ['apt-mark', 'hold'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -256,11 +258,11 @@ def add_source(source, key=None): elif source == 'distro': pass else: - raise SourceConfigError("Unknown source: {!r}".format(source)) + log("Unknown source: {!r}".format(source)) if key: if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile() as key_file: + with NamedTemporaryFile('w+') as key_file: key_file.write(key) key_file.flush() key_file.seek(0) @@ -297,14 +299,14 @@ def configure_sources(update=False, sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None - if isinstance(sources, basestring): + if isinstance(sources, six.string_types): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: - if isinstance(keys, basestring): + if isinstance(keys, six.string_types): keys = [keys] if len(sources) != len(keys): @@ -401,7 +403,7 @@ def _run_apt_command(cmd, fatal=False): while result is None or result == APT_NO_LOCK: try: result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > APT_NO_LOCK_RETRY_COUNT: raise diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py index 8c045650..8a4624b2 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py @@ -1,8 +1,23 @@ import os -import urllib2 -from urllib import urlretrieve -import urlparse import hashlib +import re + +import six +if six.PY3: + from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ) + from urllib.parse import urlparse, urlunparse, parse_qs + from urllib.error import URLError +else: + from urllib import urlretrieve + from urllib2 import ( + build_opener, install_opener, urlopen, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + URLError + ) + from urlparse import urlparse, urlunparse, parse_qs from charmhelpers.fetch import ( BaseFetchHandler, @@ -15,6 +30,24 @@ from charmhelpers.core.host import mkdir, check_hash +def splituser(host): + '''urllib.splituser(), but six's support of this seems broken''' + _userprog = re.compile('^(.*)@(.*)$') + match = _userprog.match(host) + if match: + return match.group(1, 2) + return None, host + + +def splitpasswd(user): + '''urllib.splitpasswd(), but six's support of this is missing''' + _passwdprog = re.compile('^([^:]*):(.*)$', re.S) + match = _passwdprog.match(user) + if match: + return match.group(1, 2) + return user, None + + class ArchiveUrlFetchHandler(BaseFetchHandler): """ Handler to download archive files from arbitrary URLs. @@ -42,20 +75,20 @@ def download(self, source, dest): """ # propogate all exceptions # URLError, OSError, etc - proto, netloc, path, params, query, fragment = urlparse.urlparse(source) + proto, netloc, path, params, query, fragment = urlparse(source) if proto in ('http', 'https'): - auth, barehost = urllib2.splituser(netloc) + auth, barehost = splituser(netloc) if auth is not None: - source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) - username, password = urllib2.splitpasswd(auth) - passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + source = urlunparse((proto, barehost, path, params, query, fragment)) + username, password = splitpasswd(auth) + passman = HTTPPasswordMgrWithDefaultRealm() # Realm is set to None in add_password to force the username and password # to be used whatever the realm passman.add_password(None, source, username, password) - authhandler = urllib2.HTTPBasicAuthHandler(passman) - opener = urllib2.build_opener(authhandler) - urllib2.install_opener(opener) - response = urllib2.urlopen(source) + authhandler = HTTPBasicAuthHandler(passman) + opener = build_opener(authhandler) + install_opener(opener) + response = urlopen(source) try: with open(dest, 'w') as dest_file: dest_file.write(response.read()) @@ -91,17 +124,21 @@ def install(self, source, dest=None, checksum=None, hash_type='sha1'): url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) - except urllib2.URLError as e: + except URLError as e: raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - options = urlparse.parse_qs(url_parts.fragment) + options = parse_qs(url_parts.fragment) for key, value in options.items(): - if key in hashlib.algorithms: + if not six.PY3: + algorithms = hashlib.algorithms + else: + algorithms = hashlib.algorithms_available + if key in algorithms: check_hash(dld_file, value, key) if checksum: check_hash(dld_file, checksum, hash_type) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py index 0e580e47..8ef48f30 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py @@ -5,6 +5,10 @@ ) from charmhelpers.core.host import mkdir +import six +if six.PY3: + raise ImportError('bzrlib does not support Python3') + try: from bzrlib.branch import Branch except ImportError: @@ -42,7 +46,7 @@ def install(self, source): dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) try: self.branch(source, dest_dir) except OSError as e: diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py index 7d672460..61684cb6 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py @@ -5,6 +5,10 @@ ) from charmhelpers.core.host import mkdir +import six +if six.PY3: + raise ImportError('GitPython does not support Python 3') + try: from git import Repo except ImportError: @@ -17,7 +21,7 @@ class GitUrlFetchHandler(BaseFetchHandler): """Handler for git branches via generic and github URLs""" def can_handle(self, source): url_parts = self.parse_url(source) - #TODO (mattyw) no support for ssh git@ yet + # TODO (mattyw) no support for ssh git@ yet if url_parts.scheme not in ('http', 'https', 'git'): return False else: @@ -36,7 +40,7 @@ def install(self, source, branch="master"): dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch) except OSError as e: diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py index d859d367..3d3ef339 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py @@ -1,6 +1,6 @@ import amulet - import os +import six class AmuletDeployment(object): @@ -52,12 +52,12 @@ def _add_services(self, this_service, other_services): def _add_relations(self, relations): """Add all of the relations for the services.""" - for k, v in relations.iteritems(): + for k, v in six.iteritems(relations): self.d.relate(k, v) def _configure_services(self, configs): """Configure all of the services.""" - for service, config in configs.iteritems(): + for service, config in six.iteritems(configs): self.d.configure(service, config) def _deploy(self): diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index c843333f..d333e63b 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -5,6 +5,8 @@ import sys import time +import six + class AmuletUtils(object): """Amulet utilities. @@ -58,7 +60,7 @@ def validate_services(self, commands): Verify the specified services are running on the corresponding service units. """ - for k, v in commands.iteritems(): + for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) if code != 0: @@ -100,11 +102,11 @@ def _validate_dict_data(self, expected, actual): longs, or can be a function that evaluate a variable and returns a bool. """ - for k, v in expected.iteritems(): + for k, v in six.iteritems(expected): if k in actual: - if (isinstance(v, basestring) or + if (isinstance(v, six.string_types) or isinstance(v, bool) or - isinstance(v, (int, long))): + isinstance(v, six.integer_types)): if v != actual[k]: return "{}:{}".format(k, actual[k]) elif not v(actual[k]): diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 3c7f422a..f3fee074 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,4 @@ +import six from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -69,7 +70,7 @@ def _add_services(self, this_service, other_services): def _configure_services(self, configs): """Configure all of the services.""" - for service, config in configs.iteritems(): + for service, config in six.iteritems(configs): self.d.configure(service, config) def _get_openstack_release(self): diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 0f312b99..3e0cc61c 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -7,6 +7,8 @@ import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import six + from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -60,7 +62,7 @@ def validate_svc_catalog_endpoint_data(self, expected, actual): expected service catalog endpoints. """ self.log.debug('actual: {}'.format(repr(actual))) - for k, v in expected.iteritems(): + for k, v in six.iteritems(expected): if k in actual: ret = self._validate_dict_data(expected[k][0], actual[k][0]) if ret: From 7c1db524fbbc0a0e421efe9ad4b3c5e4a80c61ea Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 27 Nov 2014 13:27:36 +0000 Subject: [PATCH 0555/2699] Fix net splits vip config --- ceph-radosgw/charm-helpers-hooks.yaml | 1 + ceph-radosgw/hooks/hooks.py | 17 +++++++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/ceph-radosgw/charm-helpers-hooks.yaml b/ceph-radosgw/charm-helpers-hooks.yaml index 16e1d202..b6e048c8 100644 --- a/ceph-radosgw/charm-helpers-hooks.yaml +++ b/ceph-radosgw/charm-helpers-hooks.yaml @@ -11,3 +11,4 @@ include: - payload.execd - contrib.openstack.alternatives - contrib.network.ip + - charmhelpers.contrib.openstack.ip diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 3ebcbee6..f194285a 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -47,6 +47,11 @@ get_iface_for_address, get_netmask_for_address ) +from charmhelpers.contrib.openstack.ip import ( + canonical_url, + PUBLIC, INTERNAL, ADMIN +) + hooks = Hooks() @@ -244,13 +249,13 @@ def identity_joined(relid=None): sys.exit(1) if not cluster.eligible_leader(CEPHRG_HA_RES): return - if cluster.is_clustered(): - hostname = config('vip') - else: - hostname = unit_get('private-address') - admin_url = 'http://{}:80/swift'.format(hostname) - internal_url = public_url = '{}/v1'.format(admin_url) + port = 80 + admin_url = '%s:%i' % (canonical_url(CONFIGS, ADMIN), port) + internal_url = '%s:%s/v1' % \ + (canonical_url(CONFIGS, INTERNAL), port) + public_url = '%s:%s/v1' % \ + (canonical_url(CONFIGS, PUBLIC), port) relation_set(service='swift', region=config('region'), public_url=public_url, internal_url=internal_url, From fa6d3642a9b8968f39688dd9406815b2dd28469c Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 27 Nov 2014 13:30:03 +0000 Subject: [PATCH 0556/2699] Add required charmhelper module --- ceph-radosgw/charm-helpers-hooks.yaml | 2 +- .../charmhelpers/contrib/openstack/ip.py | 93 +++++++++++++++++++ 2 files changed, 94 insertions(+), 1 deletion(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py diff --git a/ceph-radosgw/charm-helpers-hooks.yaml b/ceph-radosgw/charm-helpers-hooks.yaml index b6e048c8..39d2bf71 100644 --- a/ceph-radosgw/charm-helpers-hooks.yaml +++ b/ceph-radosgw/charm-helpers-hooks.yaml @@ -11,4 +11,4 @@ include: - payload.execd - contrib.openstack.alternatives - contrib.network.ip - - charmhelpers.contrib.openstack.ip + - contrib.openstack.ip diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py new file mode 100644 index 00000000..f062c807 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -0,0 +1,93 @@ +from charmhelpers.core.hookenv import ( + config, + unit_get, +) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + is_address_in_network, + is_ipv6, + get_ipv6_addr, +) +from charmhelpers.contrib.hahelpers.cluster import is_clustered + +PUBLIC = 'public' +INTERNAL = 'int' +ADMIN = 'admin' + +ADDRESS_MAP = { + PUBLIC: { + 'config': 'os-public-network', + 'fallback': 'public-address' + }, + INTERNAL: { + 'config': 'os-internal-network', + 'fallback': 'private-address' + }, + ADMIN: { + 'config': 'os-admin-network', + 'fallback': 'private-address' + } +} + + +def canonical_url(configs, endpoint_type=PUBLIC): + """Returns the correct HTTP URL to this host given the state of HTTPS + configuration, hacluster and charm configuration. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :param endpoint_type: str endpoint type to resolve. + :param returns: str base URL for services on the current service unit. + """ + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + address = resolve_address(endpoint_type) + if is_ipv6(address): + address = "[{}]".format(address) + return '%s://%s' % (scheme, address) + + +def resolve_address(endpoint_type=PUBLIC): + """Return unit address depending on net config. + + If unit is clustered with vip(s) and has net splits defined, return vip on + correct network. If clustered with no nets defined, return primary vip. + + If not clustered, return unit address ensuring address is on configured net + split if one is configured. + + :param endpoint_type: Network endpoing type + """ + resolved_address = None + vips = config('vip') + if vips: + vips = vips.split() + + net_type = ADDRESS_MAP[endpoint_type]['config'] + net_addr = config(net_type) + net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] + clustered = is_clustered() + if clustered: + if not net_addr: + # If no net-splits defined, we expect a single vip + resolved_address = vips[0] + else: + for vip in vips: + if is_address_in_network(net_addr, vip): + resolved_address = vip + break + else: + if config('prefer-ipv6'): + fallback_addr = get_ipv6_addr(exc_list=vips)[0] + else: + fallback_addr = unit_get(net_fallback) + + resolved_address = get_address_in_network(net_addr, fallback_addr) + + if resolved_address is None: + raise ValueError("Unable to resolve a suitable IP address based on " + "charm state and configuration. (net_type=%s, " + "clustered=%s)" % (net_type, clustered)) + + return resolved_address From 614124e0e5681cf1b4b84f694cc79d2c73befa05 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 27 Nov 2014 13:36:57 +0000 Subject: [PATCH 0557/2699] Need config object --- ceph-radosgw/hooks/hooks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index f194285a..b5bed990 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -244,6 +244,7 @@ def restart(): @hooks.hook('identity-service-relation-joined') def identity_joined(relid=None): + CONFIGS = register_configs() if cmp_pkgrevno('radosgw', '0.55') < 0: log('Integration with keystone requires ceph >= 0.55') sys.exit(1) From bece9504d5bdb2c2c0640d5d762090805729a9bd Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 27 Nov 2014 13:56:20 +0000 Subject: [PATCH 0558/2699] Rather than port charm to use standard contexts, define local canonical_url to mimic behaviour of charmhelpers.contrib.openstack.ip.canonical_url --- ceph-radosgw/hooks/hooks.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index b5bed990..fd92ed4e 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -45,11 +45,12 @@ from charmhelpers.contrib.network.ip import ( get_iface_for_address, - get_netmask_for_address + get_netmask_for_address, + is_ipv6, ) from charmhelpers.contrib.openstack.ip import ( - canonical_url, - PUBLIC, INTERNAL, ADMIN + resolve_address, + PUBLIC, INTERNAL, ADMIN, ) hooks = Hooks() @@ -242,9 +243,18 @@ def restart(): open_port(port=80) +# XXX Define local canonical_url until charm has been updated to use the +# standard context architecture. +def canonical_url(configs, endpoint_type=PUBLIC): + scheme = 'http' + address = resolve_address(endpoint_type) + if is_ipv6(address) + address = "[{}]".format(address) + return '%s://%s' % (scheme, address) + + @hooks.hook('identity-service-relation-joined') def identity_joined(relid=None): - CONFIGS = register_configs() if cmp_pkgrevno('radosgw', '0.55') < 0: log('Integration with keystone requires ceph >= 0.55') sys.exit(1) @@ -252,11 +262,11 @@ def identity_joined(relid=None): return port = 80 - admin_url = '%s:%i' % (canonical_url(CONFIGS, ADMIN), port) + admin_url = '%s:%i' % (canonical_url(ADMIN), port) internal_url = '%s:%s/v1' % \ - (canonical_url(CONFIGS, INTERNAL), port) + (canonical_url(INTERNAL), port) public_url = '%s:%s/v1' % \ - (canonical_url(CONFIGS, PUBLIC), port) + (canonical_url(PUBLIC), port) relation_set(service='swift', region=config('region'), public_url=public_url, internal_url=internal_url, From 1a8591feed11286086f398a11098253314459ab1 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 27 Nov 2014 14:20:35 +0000 Subject: [PATCH 0559/2699] Fixed typo --- ceph-radosgw/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index fd92ed4e..ed382ef1 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -248,7 +248,7 @@ def restart(): def canonical_url(configs, endpoint_type=PUBLIC): scheme = 'http' address = resolve_address(endpoint_type) - if is_ipv6(address) + if is_ipv6(address): address = "[{}]".format(address) return '%s://%s' % (scheme, address) From 4e3af22d70b5f22580bac262f24ee6641c52f331 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 27 Nov 2014 14:54:58 +0000 Subject: [PATCH 0560/2699] Fix keystone endponit --- ceph-radosgw/hooks/hooks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index ed382ef1..0abc4328 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -262,10 +262,10 @@ def identity_joined(relid=None): return port = 80 - admin_url = '%s:%i' % (canonical_url(ADMIN), port) - internal_url = '%s:%s/v1' % \ + admin_url = '%s:%i/swift' % (canonical_url(ADMIN), port) + internal_url = '%s:%s/swift/v1' % \ (canonical_url(INTERNAL), port) - public_url = '%s:%s/v1' % \ + public_url = '%s:%s/swift/v1' % \ (canonical_url(PUBLIC), port) relation_set(service='swift', region=config('region'), From b5dcafa22288dd01cc394fa5e79d5875ae034a38 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 27 Nov 2014 15:46:51 +0000 Subject: [PATCH 0561/2699] Update endpoints, if needed on config-changed --- ceph-radosgw/hooks/hooks.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 0abc4328..759dba18 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -162,6 +162,8 @@ def config_changed(): apache_sites() apache_modules() apache_reload() + for r_id in relation_ids('identity-service'): + identity_joined(relid=r_id) def get_mon_hosts(): From bfaf066a1dcd506097b732c572de893d48cec376 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 27 Nov 2014 15:57:11 +0000 Subject: [PATCH 0562/2699] If cluster has changed we may be ready to register vip so trigger identity_joined --- ceph-radosgw/hooks/hooks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 759dba18..3cb4b0e2 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -286,7 +286,8 @@ def identity_changed(): @hooks.hook('cluster-relation-changed', 'cluster-relation-joined') def cluster_changed(): - print "Do cluster changed actions here" + for r_id in relation_ids('identity-service'): + identity_joined(relid=r_id) @hooks.hook('ha-relation-joined') From f83f7f763a2770ffcd6e9f130ae17ad95d27b34a Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 1 Dec 2014 17:35:21 +0000 Subject: [PATCH 0563/2699] Add __init__.py to charmhelpers sync & perform sync to get fix for recent charmhelpers break --- ceph-proxy/charm-helpers-hooks.yaml | 1 + ceph-proxy/hooks/charmhelpers/__init__.py | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index c401e72e..4586c919 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -1,6 +1,7 @@ branch: lp:charm-helpers destination: hooks/charmhelpers include: + - __init__ - core - fetch - contrib.storage.linux: diff --git a/ceph-proxy/hooks/charmhelpers/__init__.py b/ceph-proxy/hooks/charmhelpers/__init__.py index e69de29b..b46e2e23 100644 --- a/ceph-proxy/hooks/charmhelpers/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/__init__.py @@ -0,0 +1,22 @@ +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa From ffc1ecbb248b1741f33f872787a48cd517928786 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 1 Dec 2014 17:35:21 +0000 Subject: [PATCH 0564/2699] Add __init__.py to charmhelpers sync & perform sync to get fix for recent charmhelpers break --- ceph-mon/charm-helpers-hooks.yaml | 1 + ceph-mon/hooks/charmhelpers/__init__.py | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index c401e72e..4586c919 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -1,6 +1,7 @@ branch: lp:charm-helpers destination: hooks/charmhelpers include: + - __init__ - core - fetch - contrib.storage.linux: diff --git a/ceph-mon/hooks/charmhelpers/__init__.py b/ceph-mon/hooks/charmhelpers/__init__.py index e69de29b..b46e2e23 100644 --- a/ceph-mon/hooks/charmhelpers/__init__.py +++ b/ceph-mon/hooks/charmhelpers/__init__.py @@ -0,0 +1,22 @@ +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa From 8f60a9cfea2c028f763e85375e0c61c57bf70054 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 11 Dec 2014 16:46:45 +0000 Subject: [PATCH 0565/2699] Remove __init__ from charm-helpers yaml. --- ceph-proxy/charm-helpers-hooks.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index 4586c919..c401e72e 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -1,7 +1,6 @@ branch: lp:charm-helpers destination: hooks/charmhelpers include: - - __init__ - core - fetch - contrib.storage.linux: From 134bb8a0c93748e4f202582e14650e3d7e467def Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 11 Dec 2014 16:46:45 +0000 Subject: [PATCH 0566/2699] Remove __init__ from charm-helpers yaml. --- ceph-mon/charm-helpers-hooks.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index 4586c919..c401e72e 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -1,7 +1,6 @@ branch: lp:charm-helpers destination: hooks/charmhelpers include: - - __init__ - core - fetch - contrib.storage.linux: From 00971f3a0bbd2038eb48447423813a18670b2593 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 11 Dec 2014 16:47:24 +0000 Subject: [PATCH 0567/2699] Remove hooks/charmhelpers/__init__.py and re-sync charm-helpers. --- ceph-proxy/hooks/charmhelpers/__init__.py | 22 ------------------- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 20 +++++++++++++---- ceph-proxy/hooks/charmhelpers/core/host.py | 22 ++++++++++++++++++- ceph-proxy/hooks/charmhelpers/fetch/giturl.py | 9 +++++--- 4 files changed, 43 insertions(+), 30 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/__init__.py b/ceph-proxy/hooks/charmhelpers/__init__.py index b46e2e23..e69de29b 100644 --- a/ceph-proxy/hooks/charmhelpers/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/__init__.py @@ -1,22 +0,0 @@ -# Bootstrap charm-helpers, installing its dependencies if necessary using -# only standard libraries. -import subprocess -import sys - -try: - import six # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa - -try: - import yaml # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 99e5d208..69ae4564 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -68,6 +68,8 @@ def log(message, level=None): command = ['juju-log'] if level: command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) command += [message] subprocess.call(command) @@ -393,21 +395,31 @@ def relations_of_type(reltype=None): return relation_data +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + @cached def relation_types(): """Get a list of relation types supported by this charm""" - charmdir = os.environ.get('CHARM_DIR', '') - mdf = open(os.path.join(charmdir, 'metadata.yaml')) - md = yaml.safe_load(mdf) rel_types = [] + md = metadata() for key in ('provides', 'requires', 'peers'): section = md.get(key) if section: rel_types.extend(section.keys()) - mdf.close() return rel_types +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + @cached def relations(): """Get a nested dictionary of relation data for all related units""" diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index e6783d9b..c6f1680a 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -101,6 +101,26 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): return user_info +def add_group(group_name, system_group=False): + """Add a group to the system""" + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + except KeyError: + log('creating group {0}'.format(group_name)) + cmd = ['addgroup'] + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + group_info = grp.getgrnam(group_name) + return group_info + + def add_user_to_group(username, group): """Add a user to a group""" cmd = [ @@ -368,8 +388,8 @@ def cmp_pkgrevno(package, revno, pkgcache=None): ''' import apt_pkg - from charmhelpers.fetch import apt_cache if not pkgcache: + from charmhelpers.fetch import apt_cache pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py index 61684cb6..f3aa2821 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py @@ -34,11 +34,14 @@ def clone(self, source, dest, branch): repo = Repo.clone_from(source, dest) repo.git.checkout(branch) - def install(self, source, branch="master"): + def install(self, source, branch="master", dest=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: From 345fda0a4d7a7775aaa7baad5fa68758806d16ac Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 11 Dec 2014 16:47:24 +0000 Subject: [PATCH 0568/2699] Remove hooks/charmhelpers/__init__.py and re-sync charm-helpers. --- ceph-mon/hooks/charmhelpers/__init__.py | 22 --------------------- ceph-mon/hooks/charmhelpers/core/hookenv.py | 20 +++++++++++++++---- ceph-mon/hooks/charmhelpers/core/host.py | 22 ++++++++++++++++++++- ceph-mon/hooks/charmhelpers/fetch/giturl.py | 9 ++++++--- 4 files changed, 43 insertions(+), 30 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/__init__.py b/ceph-mon/hooks/charmhelpers/__init__.py index b46e2e23..e69de29b 100644 --- a/ceph-mon/hooks/charmhelpers/__init__.py +++ b/ceph-mon/hooks/charmhelpers/__init__.py @@ -1,22 +0,0 @@ -# Bootstrap charm-helpers, installing its dependencies if necessary using -# only standard libraries. -import subprocess -import sys - -try: - import six # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa - -try: - import yaml # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 99e5d208..69ae4564 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -68,6 +68,8 @@ def log(message, level=None): command = ['juju-log'] if level: command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) command += [message] subprocess.call(command) @@ -393,21 +395,31 @@ def relations_of_type(reltype=None): return relation_data +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + @cached def relation_types(): """Get a list of relation types supported by this charm""" - charmdir = os.environ.get('CHARM_DIR', '') - mdf = open(os.path.join(charmdir, 'metadata.yaml')) - md = yaml.safe_load(mdf) rel_types = [] + md = metadata() for key in ('provides', 'requires', 'peers'): section = md.get(key) if section: rel_types.extend(section.keys()) - mdf.close() return rel_types +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + @cached def relations(): """Get a nested dictionary of relation data for all related units""" diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index e6783d9b..c6f1680a 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -101,6 +101,26 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): return user_info +def add_group(group_name, system_group=False): + """Add a group to the system""" + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + except KeyError: + log('creating group {0}'.format(group_name)) + cmd = ['addgroup'] + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + group_info = grp.getgrnam(group_name) + return group_info + + def add_user_to_group(username, group): """Add a user to a group""" cmd = [ @@ -368,8 +388,8 @@ def cmp_pkgrevno(package, revno, pkgcache=None): ''' import apt_pkg - from charmhelpers.fetch import apt_cache if not pkgcache: + from charmhelpers.fetch import apt_cache pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-mon/hooks/charmhelpers/fetch/giturl.py b/ceph-mon/hooks/charmhelpers/fetch/giturl.py index 61684cb6..f3aa2821 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/giturl.py @@ -34,11 +34,14 @@ def clone(self, source, dest, branch): repo = Repo.clone_from(source, dest) repo.git.checkout(branch) - def install(self, source, branch="master"): + def install(self, source, branch="master", dest=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: From 626920f200b066c65a0f941d5f03c80ef8798347 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 11 Dec 2014 17:47:07 +0000 Subject: [PATCH 0569/2699] Sync charm-helpers and actually pick up charmhelpers/__init__.py this time. --- ceph-proxy/hooks/charmhelpers/__init__.py | 22 ++++++++++++++++++++++ ceph-proxy/tests/charmhelpers/__init__.py | 22 ++++++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/ceph-proxy/hooks/charmhelpers/__init__.py b/ceph-proxy/hooks/charmhelpers/__init__.py index e69de29b..b46e2e23 100644 --- a/ceph-proxy/hooks/charmhelpers/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/__init__.py @@ -0,0 +1,22 @@ +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa diff --git a/ceph-proxy/tests/charmhelpers/__init__.py b/ceph-proxy/tests/charmhelpers/__init__.py index e69de29b..b46e2e23 100644 --- a/ceph-proxy/tests/charmhelpers/__init__.py +++ b/ceph-proxy/tests/charmhelpers/__init__.py @@ -0,0 +1,22 @@ +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa From bedbd290e98bc61941acfc7fdea806a417e86b16 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 11 Dec 2014 17:47:07 +0000 Subject: [PATCH 0570/2699] Sync charm-helpers and actually pick up charmhelpers/__init__.py this time. --- ceph-mon/hooks/charmhelpers/__init__.py | 22 ++++++++++++++++++++++ ceph-mon/tests/charmhelpers/__init__.py | 22 ++++++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/ceph-mon/hooks/charmhelpers/__init__.py b/ceph-mon/hooks/charmhelpers/__init__.py index e69de29b..b46e2e23 100644 --- a/ceph-mon/hooks/charmhelpers/__init__.py +++ b/ceph-mon/hooks/charmhelpers/__init__.py @@ -0,0 +1,22 @@ +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa diff --git a/ceph-mon/tests/charmhelpers/__init__.py b/ceph-mon/tests/charmhelpers/__init__.py index e69de29b..b46e2e23 100644 --- a/ceph-mon/tests/charmhelpers/__init__.py +++ b/ceph-mon/tests/charmhelpers/__init__.py @@ -0,0 +1,22 @@ +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa From 37edd33ec138c390d9f4d431fcd110eb4e5aeb99 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 15 Dec 2014 09:23:46 +0000 Subject: [PATCH 0571/2699] [trivial] Resync charm-helpers --- ceph-proxy/hooks/charmhelpers/core/templating.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/charmhelpers/core/templating.py b/ceph-proxy/hooks/charmhelpers/core/templating.py index 83133fa4..569eaed6 100644 --- a/ceph-proxy/hooks/charmhelpers/core/templating.py +++ b/ceph-proxy/hooks/charmhelpers/core/templating.py @@ -48,5 +48,5 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target)) + host.mkdir(os.path.dirname(target), owner, group) host.write_file(target, content, owner, group, perms) From 8f414098f3ac34d88fe5cd4c1744fd122f0cf05c Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 15 Dec 2014 09:23:46 +0000 Subject: [PATCH 0572/2699] [trivial] Resync charm-helpers --- ceph-mon/hooks/charmhelpers/core/templating.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/core/templating.py b/ceph-mon/hooks/charmhelpers/core/templating.py index 83133fa4..569eaed6 100644 --- a/ceph-mon/hooks/charmhelpers/core/templating.py +++ b/ceph-mon/hooks/charmhelpers/core/templating.py @@ -48,5 +48,5 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target)) + host.mkdir(os.path.dirname(target), owner, group) host.write_file(target, content, owner, group, perms) From 02f06b44abbce243f5235d1ad48d5c718a665af4 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 15 Dec 2014 09:56:18 +0000 Subject: [PATCH 0573/2699] [trivial] Resync charm-helpers --- ceph-osd/hooks/charmhelpers/__init__.py | 22 +++++++++++++++++++ ceph-osd/hooks/charmhelpers/core/hookenv.py | 22 ++++++++++++++----- ceph-osd/hooks/charmhelpers/core/host.py | 22 ++++++++++++++++++- .../hooks/charmhelpers/core/templating.py | 2 +- ceph-osd/hooks/charmhelpers/fetch/giturl.py | 17 +++++++++----- ceph-osd/tests/charmhelpers/__init__.py | 22 +++++++++++++++++++ 6 files changed, 95 insertions(+), 12 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/__init__.py b/ceph-osd/hooks/charmhelpers/__init__.py index e69de29b..b46e2e23 100644 --- a/ceph-osd/hooks/charmhelpers/__init__.py +++ b/ceph-osd/hooks/charmhelpers/__init__.py @@ -0,0 +1,22 @@ +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 4140282b..69ae4564 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -68,6 +68,8 @@ def log(message, level=None): command = ['juju-log'] if level: command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) command += [message] subprocess.call(command) @@ -223,7 +225,7 @@ def keys(self): prev_keys = [] if self._prev_dict is not None: prev_keys = self._prev_dict.keys() - return list(set(prev_keys + dict.keys(self))) + return list(set(prev_keys + list(dict.keys(self)))) def load_previous(self, path=None): """Load previous copy of config from disk. @@ -393,21 +395,31 @@ def relations_of_type(reltype=None): return relation_data +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + @cached def relation_types(): """Get a list of relation types supported by this charm""" - charmdir = os.environ.get('CHARM_DIR', '') - mdf = open(os.path.join(charmdir, 'metadata.yaml')) - md = yaml.safe_load(mdf) rel_types = [] + md = metadata() for key in ('provides', 'requires', 'peers'): section = md.get(key) if section: rel_types.extend(section.keys()) - mdf.close() return rel_types +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + @cached def relations(): """Get a nested dictionary of relation data for all related units""" diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index e6783d9b..c6f1680a 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -101,6 +101,26 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): return user_info +def add_group(group_name, system_group=False): + """Add a group to the system""" + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + except KeyError: + log('creating group {0}'.format(group_name)) + cmd = ['addgroup'] + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + group_info = grp.getgrnam(group_name) + return group_info + + def add_user_to_group(username, group): """Add a user to a group""" cmd = [ @@ -368,8 +388,8 @@ def cmp_pkgrevno(package, revno, pkgcache=None): ''' import apt_pkg - from charmhelpers.fetch import apt_cache if not pkgcache: + from charmhelpers.fetch import apt_cache pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-osd/hooks/charmhelpers/core/templating.py b/ceph-osd/hooks/charmhelpers/core/templating.py index 83133fa4..569eaed6 100644 --- a/ceph-osd/hooks/charmhelpers/core/templating.py +++ b/ceph-osd/hooks/charmhelpers/core/templating.py @@ -48,5 +48,5 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target)) + host.mkdir(os.path.dirname(target), owner, group) host.write_file(target, content, owner, group, perms) diff --git a/ceph-osd/hooks/charmhelpers/fetch/giturl.py b/ceph-osd/hooks/charmhelpers/fetch/giturl.py index 7d672460..f3aa2821 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/giturl.py @@ -5,6 +5,10 @@ ) from charmhelpers.core.host import mkdir +import six +if six.PY3: + raise ImportError('GitPython does not support Python 3') + try: from git import Repo except ImportError: @@ -17,7 +21,7 @@ class GitUrlFetchHandler(BaseFetchHandler): """Handler for git branches via generic and github URLs""" def can_handle(self, source): url_parts = self.parse_url(source) - #TODO (mattyw) no support for ssh git@ yet + # TODO (mattyw) no support for ssh git@ yet if url_parts.scheme not in ('http', 'https', 'git'): return False else: @@ -30,13 +34,16 @@ def clone(self, source, dest, branch): repo = Repo.clone_from(source, dest) repo.git.checkout(branch) - def install(self, source, branch="master"): + def install(self, source, branch="master", dest=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch) except OSError as e: diff --git a/ceph-osd/tests/charmhelpers/__init__.py b/ceph-osd/tests/charmhelpers/__init__.py index e69de29b..b46e2e23 100644 --- a/ceph-osd/tests/charmhelpers/__init__.py +++ b/ceph-osd/tests/charmhelpers/__init__.py @@ -0,0 +1,22 @@ +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa From e2775d2739f4eafd839f41ffb5c0c774462dd6cb Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 15 Dec 2014 21:13:11 +0000 Subject: [PATCH 0574/2699] Drop render_template() and use charm-helpers render() instead. --- ceph-proxy/hooks/hooks.py | 5 ++--- ceph-proxy/hooks/utils.py | 16 ---------------- 2 files changed, 2 insertions(+), 19 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 7566ce4c..98eeebd0 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -46,9 +46,9 @@ format_ipv6_addr ) from charmhelpers.core.sysctl import create as create_sysctl +from charmhelpers.core.templating import render from utils import ( - render_template, get_public_addr, assert_charm_supports_ipv6 ) @@ -98,8 +98,7 @@ def emit_cephconf(): # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) mkdir(os.path.dirname(charm_ceph_conf)) - with open(charm_ceph_conf, 'w') as cephconf: - cephconf.write(render_template('ceph.conf', cephcontext)) + render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index ada3563b..5a196d47 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -28,15 +28,6 @@ get_ipv6_addr ) -TEMPLATES_DIR = 'templates' - -try: - import jinja2 -except ImportError: - apt_install(filter_installed_packages(['python-jinja2']), - fatal=True) - import jinja2 - try: import dns.resolver except ImportError: @@ -45,13 +36,6 @@ import dns.resolver -def render_template(template_name, context, template_dir=TEMPLATES_DIR): - templates = jinja2.Environment( - loader=jinja2.FileSystemLoader(template_dir)) - template = templates.get_template(template_name) - return template.render(context) - - def enable_pocket(pocket): apt_sources = "/etc/apt/sources.list" with open(apt_sources, "r") as sources: From 9077df9117e95c0e842535bf91b4470d1674e0fc Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 15 Dec 2014 21:13:11 +0000 Subject: [PATCH 0575/2699] Drop render_template() and use charm-helpers render() instead. --- ceph-mon/hooks/hooks.py | 5 ++--- ceph-mon/hooks/utils.py | 16 ---------------- 2 files changed, 2 insertions(+), 19 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 7566ce4c..98eeebd0 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -46,9 +46,9 @@ format_ipv6_addr ) from charmhelpers.core.sysctl import create as create_sysctl +from charmhelpers.core.templating import render from utils import ( - render_template, get_public_addr, assert_charm_supports_ipv6 ) @@ -98,8 +98,7 @@ def emit_cephconf(): # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) mkdir(os.path.dirname(charm_ceph_conf)) - with open(charm_ceph_conf, 'w') as cephconf: - cephconf.write(render_template('ceph.conf', cephcontext)) + render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index ada3563b..5a196d47 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -28,15 +28,6 @@ get_ipv6_addr ) -TEMPLATES_DIR = 'templates' - -try: - import jinja2 -except ImportError: - apt_install(filter_installed_packages(['python-jinja2']), - fatal=True) - import jinja2 - try: import dns.resolver except ImportError: @@ -45,13 +36,6 @@ import dns.resolver -def render_template(template_name, context, template_dir=TEMPLATES_DIR): - templates = jinja2.Environment( - loader=jinja2.FileSystemLoader(template_dir)) - template = templates.get_template(template_name) - return template.render(context) - - def enable_pocket(pocket): apt_sources = "/etc/apt/sources.list" with open(apt_sources, "r") as sources: From f039790085a1be7ff4dcad5d165aea0c7596d7bd Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Wed, 17 Dec 2014 14:51:22 +0000 Subject: [PATCH 0576/2699] Sync charm-helpers. --- .../contrib/storage/linux/ceph.py | 43 +++++++++++++++++++ ceph-proxy/hooks/charmhelpers/core/host.py | 11 +++-- 2 files changed, 50 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index d47dc228..1479f4f3 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -372,3 +372,46 @@ def ceph_version(): return None else: return None + + +class CephBrokerRq(object): + """Ceph broker request. + + Multiple operations can be added to a request and sent to the Ceph broker + to be executed. + + Request is json-encoded for sending over the wire. + + The API is versioned and defaults to version 1. + """ + def __init__(self, api_version=1): + self.api_version = api_version + self.ops = [] + + def add_op_create_pool(self, name, replica_count=3): + self.ops.append({'op': 'create-pool', 'name': name, + 'replicas': replica_count}) + + @property + def request(self): + return json.dumps({'api-version': self.api_version, 'ops': self.ops}) + + +class CephBrokerRsp(object): + """Ceph broker response. + + Response is json-decoded and contents provided as methods/properties. + + The API is versioned and defaults to version 1. + """ + def __init__(self, encoded_rsp): + self.api_version = None + self.rsp = json.loads(encoded_rsp) + + @property + def exit_code(self): + return self.rsp.get('exit-code') + + @property + def exit_msg(self): + return self.rsp.get('stderr') diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index c6f1680a..5221120c 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -162,13 +162,16 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid realpath = os.path.abspath(path) - if os.path.exists(realpath): - if force and not os.path.isdir(realpath): + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): log("Removing non-directory file {} prior to mkdir()".format(path)) os.unlink(realpath) - else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + elif not path_exists: os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) + os.chown(realpath, uid, gid) def write_file(path, content, owner='root', group='root', perms=0o444): From a787954ec7bc4cc490cf0719471b75c94c46099b Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Wed, 17 Dec 2014 14:51:22 +0000 Subject: [PATCH 0577/2699] Sync charm-helpers. --- .../contrib/storage/linux/ceph.py | 43 +++++++++++++++++++ ceph-mon/hooks/charmhelpers/core/host.py | 11 +++-- 2 files changed, 50 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index d47dc228..1479f4f3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -372,3 +372,46 @@ def ceph_version(): return None else: return None + + +class CephBrokerRq(object): + """Ceph broker request. + + Multiple operations can be added to a request and sent to the Ceph broker + to be executed. + + Request is json-encoded for sending over the wire. + + The API is versioned and defaults to version 1. + """ + def __init__(self, api_version=1): + self.api_version = api_version + self.ops = [] + + def add_op_create_pool(self, name, replica_count=3): + self.ops.append({'op': 'create-pool', 'name': name, + 'replicas': replica_count}) + + @property + def request(self): + return json.dumps({'api-version': self.api_version, 'ops': self.ops}) + + +class CephBrokerRsp(object): + """Ceph broker response. + + Response is json-decoded and contents provided as methods/properties. + + The API is versioned and defaults to version 1. + """ + def __init__(self, encoded_rsp): + self.api_version = None + self.rsp = json.loads(encoded_rsp) + + @property + def exit_code(self): + return self.rsp.get('exit-code') + + @property + def exit_msg(self): + return self.rsp.get('stderr') diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index c6f1680a..5221120c 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -162,13 +162,16 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid realpath = os.path.abspath(path) - if os.path.exists(realpath): - if force and not os.path.isdir(realpath): + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): log("Removing non-directory file {} prior to mkdir()".format(path)) os.unlink(realpath) - else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + elif not path_exists: os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) + os.chown(realpath, uid, gid) def write_file(path, content, owner='root', group='root', perms=0o444): From 234b1c0fab66d3aaa43f7e87f051d2524f207595 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 9 Jan 2015 16:20:04 +0000 Subject: [PATCH 0578/2699] Fix typo from last fix --- ceph-proxy/config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 6baf03b2..132c8f45 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -163,6 +163,7 @@ options: persistently e.g. '{ kernel.pid_max : 4194303 }'. nagios_context: default: "juju" + description: | Used by the nrpe-external-master subordinate charm. A string that will be prepended to instance name to set the host name in nagios. So for instance the hostname would be something like: From 153887fee50473194a68e73ed27026e777880e6c Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 9 Jan 2015 16:20:04 +0000 Subject: [PATCH 0579/2699] Fix typo from last fix --- ceph-mon/config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 6baf03b2..132c8f45 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -163,6 +163,7 @@ options: persistently e.g. '{ kernel.pid_max : 4194303 }'. nagios_context: default: "juju" + description: | Used by the nrpe-external-master subordinate charm. A string that will be prepended to instance name to set the host name in nagios. So for instance the hostname would be something like: From 3c8943e0879c77ccfa8de765b966da6ffef3fa68 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Jan 2015 12:03:58 +0000 Subject: [PATCH 0580/2699] Use rnpe functions from charmhelpers --- .../charmhelpers/contrib/charmsupport/nrpe.py | 99 ++++++++++++++++++- .../contrib/charmsupport/volumes.py | 7 +- .../contrib/storage/linux/ceph.py | 43 ++++++++ ceph-proxy/hooks/charmhelpers/core/host.py | 11 ++- .../hooks/charmhelpers/fetch/__init__.py | 9 +- ceph-proxy/hooks/hooks.py | 26 ++--- 6 files changed, 165 insertions(+), 30 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 1815dad2..f3a936d0 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -18,6 +18,7 @@ log, relation_ids, relation_set, + relations_of_type, ) from charmhelpers.core.host import service @@ -54,6 +55,12 @@ # juju-myservice-0 # If you're running multiple environments with the same services in them # this allows you to differentiate between them. +# nagios_servicegroups: +# default: "" +# type: string +# description: | +# A comma-separated list of nagios servicegroups. +# If left empty, the nagios_context will be used as the servicegroup # # 3. Add custom checks (Nagios plugins) to files/nrpe-external-master # @@ -138,7 +145,7 @@ def _locate_cmd(self, check_cmd): log('Check command not found: {}'.format(parts[0])) return '' - def write(self, nagios_context, hostname): + def write(self, nagios_context, hostname, nagios_servicegroups=None): nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( self.command) with open(nrpe_check_file, 'w') as nrpe_check_config: @@ -150,16 +157,21 @@ def write(self, nagios_context, hostname): log('Not writing service config as {} is not accessible'.format( NRPE.nagios_exportdir)) else: - self.write_service_config(nagios_context, hostname) + self.write_service_config(nagios_context, hostname, + nagios_servicegroups) - def write_service_config(self, nagios_context, hostname): + def write_service_config(self, nagios_context, hostname, + nagios_servicegroups=None): for f in os.listdir(NRPE.nagios_exportdir): if re.search('.*{}.cfg'.format(self.command), f): os.remove(os.path.join(NRPE.nagios_exportdir, f)) + if not nagios_servicegroups: + nagios_servicegroups = nagios_context + templ_vars = { 'nagios_hostname': hostname, - 'nagios_servicegroup': nagios_context, + 'nagios_servicegroup': nagios_servicegroups, 'description': self.description, 'shortname': self.shortname, 'command': self.command, @@ -183,6 +195,10 @@ def __init__(self, hostname=None): super(NRPE, self).__init__() self.config = config() self.nagios_context = self.config['nagios_context'] + if 'nagios_servicegroups' in self.config: + self.nagios_servicegroups = self.config['nagios_servicegroups'] + else: + self.nagios_servicegroups = 'juju' self.unit_name = local_unit().replace('/', '-') if hostname: self.hostname = hostname @@ -208,7 +224,8 @@ def write(self): nrpe_monitors = {} monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} for nrpecheck in self.checks: - nrpecheck.write(self.nagios_context, self.hostname) + nrpecheck.write(self.nagios_context, self.hostname, + self.nagios_servicegroups) nrpe_monitors[nrpecheck.shortname] = { "command": nrpecheck.command, } @@ -217,3 +234,75 @@ def write(self): for rid in relation_ids("local-monitors"): relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + +def get_nagios_hostcontext(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_host_context + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_host_context'] + + +def get_nagios_hostname(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_hostname + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_hostname'] + + +def get_nagios_unit_name(relation_name='nrpe-external-master'): + """ + Return the nagios unit name prepended with host_context if needed + + :param str relation_name: Name of relation nrpe sub joined to + """ + host_context = get_nagios_hostcontext(relation_name) + if host_context: + unit = "%s:%s" % (host_context, local_unit()) + else: + unit = local_unit() + return unit + + +def add_init_service_checks(nrpe, services, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param list services: List of services to check + :param str unit_name: Unit name to use in check description + """ + for svc in services: + upstart_init = '/etc/init/%s.conf' % svc + sysv_init = '/etc/init.d/%s' % svc + if os.path.exists(upstart_init): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) + elif os.path.exists(sysv_init): + cronpath = '/etc/cron.d/nagios-service-check-%s' % svc + cron_file = ('*/5 * * * * root ' + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-s /etc/init.d/%s status > ' + '/var/lib/nagios/service-check-%s.txt\n' % (svc, + svc) + ) + f = open(cronpath, 'w') + f.write(cron_file) + f.close() + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_status_file.py -f ' + '/var/lib/nagios/service-check-%s.txt' % svc, + ) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py index 0f905dff..d61aa47f 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -2,7 +2,8 @@ Functions for managing volumes in juju units. One volume is supported per unit. Subordinates may have their own storage, provided it is on its own partition. -Configuration stanzas: +Configuration stanzas:: + volume-ephemeral: type: boolean default: true @@ -20,7 +21,8 @@ is 'true' and no volume-map value is set. Use 'juju set' to set a value and 'juju resolved' to complete configuration. -Usage: +Usage:: + from charmsupport.volumes import configure_volume, VolumeConfigurationError from charmsupport.hookenv import log, ERROR def post_mount_hook(): @@ -34,6 +36,7 @@ def post_mount_hook(): after_change=post_mount_hook) except VolumeConfigurationError: log('Storage could not be configured', ERROR) + ''' # XXX: Known limitations diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index d47dc228..1479f4f3 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -372,3 +372,46 @@ def ceph_version(): return None else: return None + + +class CephBrokerRq(object): + """Ceph broker request. + + Multiple operations can be added to a request and sent to the Ceph broker + to be executed. + + Request is json-encoded for sending over the wire. + + The API is versioned and defaults to version 1. + """ + def __init__(self, api_version=1): + self.api_version = api_version + self.ops = [] + + def add_op_create_pool(self, name, replica_count=3): + self.ops.append({'op': 'create-pool', 'name': name, + 'replicas': replica_count}) + + @property + def request(self): + return json.dumps({'api-version': self.api_version, 'ops': self.ops}) + + +class CephBrokerRsp(object): + """Ceph broker response. + + Response is json-decoded and contents provided as methods/properties. + + The API is versioned and defaults to version 1. + """ + def __init__(self, encoded_rsp): + self.api_version = None + self.rsp = json.loads(encoded_rsp) + + @property + def exit_code(self): + return self.rsp.get('exit-code') + + @property + def exit_msg(self): + return self.rsp.get('stderr') diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index c6f1680a..5221120c 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -162,13 +162,16 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid realpath = os.path.abspath(path) - if os.path.exists(realpath): - if force and not os.path.isdir(realpath): + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): log("Removing non-directory file {} prior to mkdir()".format(path)) os.unlink(realpath) - else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + elif not path_exists: os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) + os.chown(realpath, uid, gid) def write_file(path, content, owner='root', group='root', perms=0o444): diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 0a126fc3..aceadea4 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -64,9 +64,16 @@ 'trusty-juno/updates': 'trusty-updates/juno', 'trusty-updates/juno': 'trusty-updates/juno', 'juno/proposed': 'trusty-proposed/juno', - 'juno/proposed': 'trusty-proposed/juno', 'trusty-juno/proposed': 'trusty-proposed/juno', 'trusty-proposed/juno': 'trusty-proposed/juno', + # Kilo + 'kilo': 'trusty-updates/kilo', + 'trusty-kilo': 'trusty-updates/kilo', + 'trusty-kilo/updates': 'trusty-updates/kilo', + 'trusty-updates/kilo': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'trusty-kilo/proposed': 'trusty-proposed/kilo', + 'trusty-proposed/kilo': 'trusty-proposed/kilo', } # The order of this list is very important. Handlers should be listed in from diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 20a6bd8e..b3c04893 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -24,7 +24,6 @@ relation_get, relation_set, remote_unit, - local_unit, Hooks, UnregisteredHookError, service_name, relations_of_type @@ -60,7 +59,7 @@ process_requests ) -from charmhelpers.contrib.charmsupport.nrpe import NRPE +from charmhelpers.contrib.charmsupport import nrpe hooks = Hooks() @@ -351,6 +350,8 @@ def start(): @hooks.hook('nrpe-external-master-relation-joined') @hooks.hook('nrpe-external-master-relation-changed') def update_nrpe_config(): + # python-dbus is used by check_upstart_job + apt_install('python-dbus') log('Refreshing nagios checks') if os.path.isdir(NAGIOS_PLUGINS): rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', @@ -365,26 +366,15 @@ def update_nrpe_config(): write_file(STATUS_CRONFILE, cronjob) # Find out if nrpe set nagios_hostname - hostname = None - host_context = None - for rel in relations_of_type('nrpe-external-master'): - if 'nagios_hostname' in rel: - hostname = rel['nagios_hostname'] - host_context = rel['nagios_host_context'] - break - nrpe = NRPE(hostname=hostname) - - if host_context: - current_unit = "%s:%s" % (host_context, local_unit()) - else: - current_unit = local_unit() - - nrpe.add_check( + hostname = nrpe.get_nagios_hostname() + current_unit = nrpe.get_nagios_unit_name() + nrpe_setup = nrpe.NRPE(hostname=hostname) + nrpe_setup.add_check( shortname="ceph", description='Check Ceph health {%s}' % current_unit, check_cmd='check_ceph_status.py -f {}'.format(STATUS_FILE) ) - nrpe.write() + nrpe_setup.write() if __name__ == '__main__': From 9ffc1561f0ef3765fc80775174f843d5c5fdf018 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Jan 2015 12:03:58 +0000 Subject: [PATCH 0581/2699] Use rnpe functions from charmhelpers --- .../charmhelpers/contrib/charmsupport/nrpe.py | 99 ++++++++++++++++++- .../contrib/charmsupport/volumes.py | 7 +- .../contrib/storage/linux/ceph.py | 43 ++++++++ ceph-mon/hooks/charmhelpers/core/host.py | 11 ++- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 9 +- ceph-mon/hooks/hooks.py | 26 ++--- 6 files changed, 165 insertions(+), 30 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 1815dad2..f3a936d0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -18,6 +18,7 @@ log, relation_ids, relation_set, + relations_of_type, ) from charmhelpers.core.host import service @@ -54,6 +55,12 @@ # juju-myservice-0 # If you're running multiple environments with the same services in them # this allows you to differentiate between them. +# nagios_servicegroups: +# default: "" +# type: string +# description: | +# A comma-separated list of nagios servicegroups. +# If left empty, the nagios_context will be used as the servicegroup # # 3. Add custom checks (Nagios plugins) to files/nrpe-external-master # @@ -138,7 +145,7 @@ def _locate_cmd(self, check_cmd): log('Check command not found: {}'.format(parts[0])) return '' - def write(self, nagios_context, hostname): + def write(self, nagios_context, hostname, nagios_servicegroups=None): nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( self.command) with open(nrpe_check_file, 'w') as nrpe_check_config: @@ -150,16 +157,21 @@ def write(self, nagios_context, hostname): log('Not writing service config as {} is not accessible'.format( NRPE.nagios_exportdir)) else: - self.write_service_config(nagios_context, hostname) + self.write_service_config(nagios_context, hostname, + nagios_servicegroups) - def write_service_config(self, nagios_context, hostname): + def write_service_config(self, nagios_context, hostname, + nagios_servicegroups=None): for f in os.listdir(NRPE.nagios_exportdir): if re.search('.*{}.cfg'.format(self.command), f): os.remove(os.path.join(NRPE.nagios_exportdir, f)) + if not nagios_servicegroups: + nagios_servicegroups = nagios_context + templ_vars = { 'nagios_hostname': hostname, - 'nagios_servicegroup': nagios_context, + 'nagios_servicegroup': nagios_servicegroups, 'description': self.description, 'shortname': self.shortname, 'command': self.command, @@ -183,6 +195,10 @@ def __init__(self, hostname=None): super(NRPE, self).__init__() self.config = config() self.nagios_context = self.config['nagios_context'] + if 'nagios_servicegroups' in self.config: + self.nagios_servicegroups = self.config['nagios_servicegroups'] + else: + self.nagios_servicegroups = 'juju' self.unit_name = local_unit().replace('/', '-') if hostname: self.hostname = hostname @@ -208,7 +224,8 @@ def write(self): nrpe_monitors = {} monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} for nrpecheck in self.checks: - nrpecheck.write(self.nagios_context, self.hostname) + nrpecheck.write(self.nagios_context, self.hostname, + self.nagios_servicegroups) nrpe_monitors[nrpecheck.shortname] = { "command": nrpecheck.command, } @@ -217,3 +234,75 @@ def write(self): for rid in relation_ids("local-monitors"): relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + +def get_nagios_hostcontext(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_host_context + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_host_context'] + + +def get_nagios_hostname(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_hostname + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_hostname'] + + +def get_nagios_unit_name(relation_name='nrpe-external-master'): + """ + Return the nagios unit name prepended with host_context if needed + + :param str relation_name: Name of relation nrpe sub joined to + """ + host_context = get_nagios_hostcontext(relation_name) + if host_context: + unit = "%s:%s" % (host_context, local_unit()) + else: + unit = local_unit() + return unit + + +def add_init_service_checks(nrpe, services, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param list services: List of services to check + :param str unit_name: Unit name to use in check description + """ + for svc in services: + upstart_init = '/etc/init/%s.conf' % svc + sysv_init = '/etc/init.d/%s' % svc + if os.path.exists(upstart_init): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) + elif os.path.exists(sysv_init): + cronpath = '/etc/cron.d/nagios-service-check-%s' % svc + cron_file = ('*/5 * * * * root ' + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-s /etc/init.d/%s status > ' + '/var/lib/nagios/service-check-%s.txt\n' % (svc, + svc) + ) + f = open(cronpath, 'w') + f.write(cron_file) + f.close() + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_status_file.py -f ' + '/var/lib/nagios/service-check-%s.txt' % svc, + ) diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py index 0f905dff..d61aa47f 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -2,7 +2,8 @@ Functions for managing volumes in juju units. One volume is supported per unit. Subordinates may have their own storage, provided it is on its own partition. -Configuration stanzas: +Configuration stanzas:: + volume-ephemeral: type: boolean default: true @@ -20,7 +21,8 @@ is 'true' and no volume-map value is set. Use 'juju set' to set a value and 'juju resolved' to complete configuration. -Usage: +Usage:: + from charmsupport.volumes import configure_volume, VolumeConfigurationError from charmsupport.hookenv import log, ERROR def post_mount_hook(): @@ -34,6 +36,7 @@ def post_mount_hook(): after_change=post_mount_hook) except VolumeConfigurationError: log('Storage could not be configured', ERROR) + ''' # XXX: Known limitations diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index d47dc228..1479f4f3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -372,3 +372,46 @@ def ceph_version(): return None else: return None + + +class CephBrokerRq(object): + """Ceph broker request. + + Multiple operations can be added to a request and sent to the Ceph broker + to be executed. + + Request is json-encoded for sending over the wire. + + The API is versioned and defaults to version 1. + """ + def __init__(self, api_version=1): + self.api_version = api_version + self.ops = [] + + def add_op_create_pool(self, name, replica_count=3): + self.ops.append({'op': 'create-pool', 'name': name, + 'replicas': replica_count}) + + @property + def request(self): + return json.dumps({'api-version': self.api_version, 'ops': self.ops}) + + +class CephBrokerRsp(object): + """Ceph broker response. + + Response is json-decoded and contents provided as methods/properties. + + The API is versioned and defaults to version 1. + """ + def __init__(self, encoded_rsp): + self.api_version = None + self.rsp = json.loads(encoded_rsp) + + @property + def exit_code(self): + return self.rsp.get('exit-code') + + @property + def exit_msg(self): + return self.rsp.get('stderr') diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index c6f1680a..5221120c 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -162,13 +162,16 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid realpath = os.path.abspath(path) - if os.path.exists(realpath): - if force and not os.path.isdir(realpath): + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): log("Removing non-directory file {} prior to mkdir()".format(path)) os.unlink(realpath) - else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + elif not path_exists: os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) + os.chown(realpath, uid, gid) def write_file(path, content, owner='root', group='root', perms=0o444): diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 0a126fc3..aceadea4 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -64,9 +64,16 @@ 'trusty-juno/updates': 'trusty-updates/juno', 'trusty-updates/juno': 'trusty-updates/juno', 'juno/proposed': 'trusty-proposed/juno', - 'juno/proposed': 'trusty-proposed/juno', 'trusty-juno/proposed': 'trusty-proposed/juno', 'trusty-proposed/juno': 'trusty-proposed/juno', + # Kilo + 'kilo': 'trusty-updates/kilo', + 'trusty-kilo': 'trusty-updates/kilo', + 'trusty-kilo/updates': 'trusty-updates/kilo', + 'trusty-updates/kilo': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'trusty-kilo/proposed': 'trusty-proposed/kilo', + 'trusty-proposed/kilo': 'trusty-proposed/kilo', } # The order of this list is very important. Handlers should be listed in from diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 20a6bd8e..b3c04893 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -24,7 +24,6 @@ relation_get, relation_set, remote_unit, - local_unit, Hooks, UnregisteredHookError, service_name, relations_of_type @@ -60,7 +59,7 @@ process_requests ) -from charmhelpers.contrib.charmsupport.nrpe import NRPE +from charmhelpers.contrib.charmsupport import nrpe hooks = Hooks() @@ -351,6 +350,8 @@ def start(): @hooks.hook('nrpe-external-master-relation-joined') @hooks.hook('nrpe-external-master-relation-changed') def update_nrpe_config(): + # python-dbus is used by check_upstart_job + apt_install('python-dbus') log('Refreshing nagios checks') if os.path.isdir(NAGIOS_PLUGINS): rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', @@ -365,26 +366,15 @@ def update_nrpe_config(): write_file(STATUS_CRONFILE, cronjob) # Find out if nrpe set nagios_hostname - hostname = None - host_context = None - for rel in relations_of_type('nrpe-external-master'): - if 'nagios_hostname' in rel: - hostname = rel['nagios_hostname'] - host_context = rel['nagios_host_context'] - break - nrpe = NRPE(hostname=hostname) - - if host_context: - current_unit = "%s:%s" % (host_context, local_unit()) - else: - current_unit = local_unit() - - nrpe.add_check( + hostname = nrpe.get_nagios_hostname() + current_unit = nrpe.get_nagios_unit_name() + nrpe_setup = nrpe.NRPE(hostname=hostname) + nrpe_setup.add_check( shortname="ceph", description='Check Ceph health {%s}' % current_unit, check_cmd='check_ceph_status.py -f {}'.format(STATUS_FILE) ) - nrpe.write() + nrpe_setup.write() if __name__ == '__main__': From 3e4e88830e6b6cfc4653354a1f8476e51557e487 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Jan 2015 12:03:59 +0000 Subject: [PATCH 0582/2699] Use rnpe functions from charmhelpers --- .../charmhelpers/contrib/charmsupport/nrpe.py | 102 ++++++++++++++++-- .../contrib/charmsupport/volumes.py | 7 +- ceph-osd/hooks/charmhelpers/core/host.py | 11 +- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 9 +- ceph-osd/hooks/hooks.py | 28 ++--- 5 files changed, 121 insertions(+), 36 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 51b62d39..f3a936d0 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -18,6 +18,7 @@ log, relation_ids, relation_set, + relations_of_type, ) from charmhelpers.core.host import service @@ -54,6 +55,12 @@ # juju-myservice-0 # If you're running multiple environments with the same services in them # this allows you to differentiate between them. +# nagios_servicegroups: +# default: "" +# type: string +# description: | +# A comma-separated list of nagios servicegroups. +# If left empty, the nagios_context will be used as the servicegroup # # 3. Add custom checks (Nagios plugins) to files/nrpe-external-master # @@ -125,9 +132,6 @@ def __init__(self, shortname, description, check_cmd): def _locate_cmd(self, check_cmd): search_path = ( - '/', - os.path.join(os.environ['CHARM_DIR'], - 'files/nrpe-external-master'), '/usr/lib/nagios/plugins', '/usr/local/lib/nagios/plugins', ) @@ -141,7 +145,7 @@ def _locate_cmd(self, check_cmd): log('Check command not found: {}'.format(parts[0])) return '' - def write(self, nagios_context, hostname): + def write(self, nagios_context, hostname, nagios_servicegroups=None): nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( self.command) with open(nrpe_check_file, 'w') as nrpe_check_config: @@ -153,16 +157,21 @@ def write(self, nagios_context, hostname): log('Not writing service config as {} is not accessible'.format( NRPE.nagios_exportdir)) else: - self.write_service_config(nagios_context, hostname) + self.write_service_config(nagios_context, hostname, + nagios_servicegroups) - def write_service_config(self, nagios_context, hostname): + def write_service_config(self, nagios_context, hostname, + nagios_servicegroups=None): for f in os.listdir(NRPE.nagios_exportdir): if re.search('.*{}.cfg'.format(self.command), f): os.remove(os.path.join(NRPE.nagios_exportdir, f)) + if not nagios_servicegroups: + nagios_servicegroups = nagios_context + templ_vars = { 'nagios_hostname': hostname, - 'nagios_servicegroup': nagios_context, + 'nagios_servicegroup': nagios_servicegroups, 'description': self.description, 'shortname': self.shortname, 'command': self.command, @@ -186,6 +195,10 @@ def __init__(self, hostname=None): super(NRPE, self).__init__() self.config = config() self.nagios_context = self.config['nagios_context'] + if 'nagios_servicegroups' in self.config: + self.nagios_servicegroups = self.config['nagios_servicegroups'] + else: + self.nagios_servicegroups = 'juju' self.unit_name = local_unit().replace('/', '-') if hostname: self.hostname = hostname @@ -211,7 +224,8 @@ def write(self): nrpe_monitors = {} monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} for nrpecheck in self.checks: - nrpecheck.write(self.nagios_context, self.hostname) + nrpecheck.write(self.nagios_context, self.hostname, + self.nagios_servicegroups) nrpe_monitors[nrpecheck.shortname] = { "command": nrpecheck.command, } @@ -220,3 +234,75 @@ def write(self): for rid in relation_ids("local-monitors"): relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + +def get_nagios_hostcontext(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_host_context + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_host_context'] + + +def get_nagios_hostname(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_hostname + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_hostname'] + + +def get_nagios_unit_name(relation_name='nrpe-external-master'): + """ + Return the nagios unit name prepended with host_context if needed + + :param str relation_name: Name of relation nrpe sub joined to + """ + host_context = get_nagios_hostcontext(relation_name) + if host_context: + unit = "%s:%s" % (host_context, local_unit()) + else: + unit = local_unit() + return unit + + +def add_init_service_checks(nrpe, services, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param list services: List of services to check + :param str unit_name: Unit name to use in check description + """ + for svc in services: + upstart_init = '/etc/init/%s.conf' % svc + sysv_init = '/etc/init.d/%s' % svc + if os.path.exists(upstart_init): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) + elif os.path.exists(sysv_init): + cronpath = '/etc/cron.d/nagios-service-check-%s' % svc + cron_file = ('*/5 * * * * root ' + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-s /etc/init.d/%s status > ' + '/var/lib/nagios/service-check-%s.txt\n' % (svc, + svc) + ) + f = open(cronpath, 'w') + f.write(cron_file) + f.close() + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_status_file.py -f ' + '/var/lib/nagios/service-check-%s.txt' % svc, + ) diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py index 0f905dff..d61aa47f 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -2,7 +2,8 @@ Functions for managing volumes in juju units. One volume is supported per unit. Subordinates may have their own storage, provided it is on its own partition. -Configuration stanzas: +Configuration stanzas:: + volume-ephemeral: type: boolean default: true @@ -20,7 +21,8 @@ is 'true' and no volume-map value is set. Use 'juju set' to set a value and 'juju resolved' to complete configuration. -Usage: +Usage:: + from charmsupport.volumes import configure_volume, VolumeConfigurationError from charmsupport.hookenv import log, ERROR def post_mount_hook(): @@ -34,6 +36,7 @@ def post_mount_hook(): after_change=post_mount_hook) except VolumeConfigurationError: log('Storage could not be configured', ERROR) + ''' # XXX: Known limitations diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index c6f1680a..5221120c 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -162,13 +162,16 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid realpath = os.path.abspath(path) - if os.path.exists(realpath): - if force and not os.path.isdir(realpath): + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): log("Removing non-directory file {} prior to mkdir()".format(path)) os.unlink(realpath) - else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + elif not path_exists: os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) + os.chown(realpath, uid, gid) def write_file(path, content, owner='root', group='root', perms=0o444): diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 0a126fc3..aceadea4 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -64,9 +64,16 @@ 'trusty-juno/updates': 'trusty-updates/juno', 'trusty-updates/juno': 'trusty-updates/juno', 'juno/proposed': 'trusty-proposed/juno', - 'juno/proposed': 'trusty-proposed/juno', 'trusty-juno/proposed': 'trusty-proposed/juno', 'trusty-proposed/juno': 'trusty-proposed/juno', + # Kilo + 'kilo': 'trusty-updates/kilo', + 'trusty-kilo': 'trusty-updates/kilo', + 'trusty-kilo/updates': 'trusty-updates/kilo', + 'trusty-updates/kilo': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'trusty-kilo/proposed': 'trusty-proposed/kilo', + 'trusty-proposed/kilo': 'trusty-proposed/kilo', } # The order of this list is very important. Handlers should be listed in from diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index cb18f8e5..b0e3a54f 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -20,8 +20,6 @@ relation_ids, related_units, relation_get, - relations_of_type, - local_unit, Hooks, UnregisteredHookError, service_name @@ -51,7 +49,7 @@ format_ipv6_addr ) -from charmhelpers.contrib.charmsupport.nrpe import NRPE +from charmhelpers.contrib.charmsupport import nrpe hooks = Hooks() @@ -215,29 +213,17 @@ def upgrade_charm(): @hooks.hook('nrpe-external-master-relation-joined', 'nrpe-external-master-relation-changed') def update_nrpe_config(): - # Find out if nrpe set nagios_hostname - hostname = None - host_context = None - for rel in relations_of_type('nrpe-external-master'): - if 'nagios_hostname' in rel: - hostname = rel['nagios_hostname'] - host_context = rel['nagios_host_context'] - break - nrpe = NRPE(hostname=hostname) + # python-dbus is used by check_upstart_job apt_install('python-dbus') - - if host_context: - current_unit = "%s:%s" % (host_context, local_unit()) - else: - current_unit = local_unit() - - nrpe.add_check( + hostname = nrpe.get_nagios_hostname() + current_unit = nrpe.get_nagios_unit_name() + nrpe_setup = nrpe.NRPE(hostname=hostname) + nrpe_setup.add_check( shortname='ceph-osd', description='process check {%s}' % current_unit, check_cmd='check_upstart_job ceph-osd', ) - - nrpe.write() + nrpe_setup.write() if __name__ == '__main__': From 863e355a0a981c649199e88c48301b33cc46e573 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Jan 2015 12:04:48 +0000 Subject: [PATCH 0583/2699] Add decorators.py after charmhelpers sync --- .../hooks/charmhelpers/core/decorators.py | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 ceph-proxy/hooks/charmhelpers/core/decorators.py diff --git a/ceph-proxy/hooks/charmhelpers/core/decorators.py b/ceph-proxy/hooks/charmhelpers/core/decorators.py new file mode 100644 index 00000000..029a4ef4 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/decorators.py @@ -0,0 +1,41 @@ +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 From 8c1d25e36adfc0ebed495f7f8caf4e8a51817d25 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Jan 2015 12:04:48 +0000 Subject: [PATCH 0584/2699] Add decorators.py after charmhelpers sync --- .../hooks/charmhelpers/core/decorators.py | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 ceph-mon/hooks/charmhelpers/core/decorators.py diff --git a/ceph-mon/hooks/charmhelpers/core/decorators.py b/ceph-mon/hooks/charmhelpers/core/decorators.py new file mode 100644 index 00000000..029a4ef4 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/decorators.py @@ -0,0 +1,41 @@ +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 From 7c07cde3ce334f2e213586e05947a733e48944c4 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Jan 2015 12:04:49 +0000 Subject: [PATCH 0585/2699] Add decorators.py after charmhelpers sync --- ceph-osd/.coverage | 1 + .../hooks/charmhelpers/core/decorators.py | 41 +++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 ceph-osd/.coverage create mode 100644 ceph-osd/hooks/charmhelpers/core/decorators.py diff --git a/ceph-osd/.coverage b/ceph-osd/.coverage new file mode 100644 index 00000000..a78d4453 --- /dev/null +++ b/ceph-osd/.coverage @@ -0,0 +1 @@ +€}q(U collectorqUcoverage v3.7.1qUlinesq}qUA/home/liam/branches/merges/nagios/ceph-osd/unit_tests/__init__.pyq]qKasu. \ No newline at end of file diff --git a/ceph-osd/hooks/charmhelpers/core/decorators.py b/ceph-osd/hooks/charmhelpers/core/decorators.py new file mode 100644 index 00000000..029a4ef4 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/decorators.py @@ -0,0 +1,41 @@ +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 From 6014754d68a166c17358ecbe16946ea16eab5d54 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Jan 2015 15:28:48 +0000 Subject: [PATCH 0586/2699] Be explicit about which unit to get settings info from --- ceph-proxy/hooks/hooks.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 7566ce4c..c811bdb2 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -303,12 +303,17 @@ def client_relation_joined(relid=None): def client_relation_changed(relid=None): """Process broker requests from ceph client relations.""" if ceph.is_quorum(): - settings = relation_get(rid=relid) - if 'broker_req' in settings: + broker_req = None + for unit in related_units(relid): + # relation_get('ceph-public-address', unit, relid) + settings = relation_get(unit=unit, rid=relid) + if 'broker_req' in settings: + broker_req = settings['broker_req'] + if broker_req: if not ceph.is_leader(): log("Not leader - ignoring broker request", level=DEBUG) else: - rsp = process_requests(settings['broker_req']) + rsp = process_requests(broker_req) relation_set(relation_id=relid, relation_settings={'broker_rsp': rsp}) else: From e7555a6ffe016b6e852bb6ca81529d1c916ae246 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Jan 2015 15:28:48 +0000 Subject: [PATCH 0587/2699] Be explicit about which unit to get settings info from --- ceph-mon/hooks/hooks.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 7566ce4c..c811bdb2 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -303,12 +303,17 @@ def client_relation_joined(relid=None): def client_relation_changed(relid=None): """Process broker requests from ceph client relations.""" if ceph.is_quorum(): - settings = relation_get(rid=relid) - if 'broker_req' in settings: + broker_req = None + for unit in related_units(relid): + # relation_get('ceph-public-address', unit, relid) + settings = relation_get(unit=unit, rid=relid) + if 'broker_req' in settings: + broker_req = settings['broker_req'] + if broker_req: if not ceph.is_leader(): log("Not leader - ignoring broker request", level=DEBUG) else: - rsp = process_requests(settings['broker_req']) + rsp = process_requests(broker_req) relation_set(relation_id=relid, relation_settings={'broker_rsp': rsp}) else: From e32ca6c51262a26c15d3cd2fb3ffe57dbd4302b1 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Jan 2015 16:28:59 +0000 Subject: [PATCH 0588/2699] Fix bug by removing superfluous call to client_relation_changed in light of mp feedback --- ceph-proxy/hooks/hooks.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index c811bdb2..d9043138 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -293,27 +293,20 @@ def client_relation_joined(relid=None): 'ceph-public-address': get_public_addr()} relation_set(relation_id=relid, relation_settings=data) - - client_relation_changed(relid=relid) else: log('mon cluster not in quorum - deferring key provision') @hooks.hook('client-relation-changed') -def client_relation_changed(relid=None): +def client_relation_changed(): """Process broker requests from ceph client relations.""" if ceph.is_quorum(): - broker_req = None - for unit in related_units(relid): - # relation_get('ceph-public-address', unit, relid) - settings = relation_get(unit=unit, rid=relid) - if 'broker_req' in settings: - broker_req = settings['broker_req'] - if broker_req: + settings = relation_get(rid=relid) + if 'broker_req' in settings: if not ceph.is_leader(): log("Not leader - ignoring broker request", level=DEBUG) else: - rsp = process_requests(broker_req) + rsp = process_requests(settings['broker_req']) relation_set(relation_id=relid, relation_settings={'broker_rsp': rsp}) else: From aa963aca886e3f8438431a65e674e27315e6705d Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Jan 2015 16:28:59 +0000 Subject: [PATCH 0589/2699] Fix bug by removing superfluous call to client_relation_changed in light of mp feedback --- ceph-mon/hooks/hooks.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index c811bdb2..d9043138 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -293,27 +293,20 @@ def client_relation_joined(relid=None): 'ceph-public-address': get_public_addr()} relation_set(relation_id=relid, relation_settings=data) - - client_relation_changed(relid=relid) else: log('mon cluster not in quorum - deferring key provision') @hooks.hook('client-relation-changed') -def client_relation_changed(relid=None): +def client_relation_changed(): """Process broker requests from ceph client relations.""" if ceph.is_quorum(): - broker_req = None - for unit in related_units(relid): - # relation_get('ceph-public-address', unit, relid) - settings = relation_get(unit=unit, rid=relid) - if 'broker_req' in settings: - broker_req = settings['broker_req'] - if broker_req: + settings = relation_get(rid=relid) + if 'broker_req' in settings: if not ceph.is_leader(): log("Not leader - ignoring broker request", level=DEBUG) else: - rsp = process_requests(broker_req) + rsp = process_requests(settings['broker_req']) relation_set(relation_id=relid, relation_settings={'broker_rsp': rsp}) else: From 4479752935579048c010d8f83861b7888943e960 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 13 Jan 2015 10:30:25 +0000 Subject: [PATCH 0590/2699] Remove reference to relid since it is no longer used in client_relation_changed() --- ceph-proxy/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index d9043138..c2020770 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -301,7 +301,7 @@ def client_relation_joined(relid=None): def client_relation_changed(): """Process broker requests from ceph client relations.""" if ceph.is_quorum(): - settings = relation_get(rid=relid) + settings = relation_get() if 'broker_req' in settings: if not ceph.is_leader(): log("Not leader - ignoring broker request", level=DEBUG) From fed2b922028d6881aed775cc39fe217ef863c093 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 13 Jan 2015 10:30:25 +0000 Subject: [PATCH 0591/2699] Remove reference to relid since it is no longer used in client_relation_changed() --- ceph-mon/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index d9043138..c2020770 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -301,7 +301,7 @@ def client_relation_joined(relid=None): def client_relation_changed(): """Process broker requests from ceph client relations.""" if ceph.is_quorum(): - settings = relation_get(rid=relid) + settings = relation_get() if 'broker_req' in settings: if not ceph.is_leader(): log("Not leader - ignoring broker request", level=DEBUG) From eddbfe949e83caead84584a4691bd0f161188b97 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 13 Jan 2015 10:32:04 +0000 Subject: [PATCH 0592/2699] Remove another reference to relid since it is no longer used in client_relation_changed() --- ceph-proxy/hooks/hooks.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index c2020770..d3709ffa 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -307,8 +307,7 @@ def client_relation_changed(): log("Not leader - ignoring broker request", level=DEBUG) else: rsp = process_requests(settings['broker_req']) - relation_set(relation_id=relid, - relation_settings={'broker_rsp': rsp}) + relation_set(relation_settings={'broker_rsp': rsp}) else: log('mon cluster not in quorum', level=DEBUG) From e30d6bef396df34e71d3e78f5344d16e5f5b5bff Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 13 Jan 2015 10:32:04 +0000 Subject: [PATCH 0593/2699] Remove another reference to relid since it is no longer used in client_relation_changed() --- ceph-mon/hooks/hooks.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index c2020770..d3709ffa 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -307,8 +307,7 @@ def client_relation_changed(): log("Not leader - ignoring broker request", level=DEBUG) else: rsp = process_requests(settings['broker_req']) - relation_set(relation_id=relid, - relation_settings={'broker_rsp': rsp}) + relation_set(relation_settings={'broker_rsp': rsp}) else: log('mon cluster not in quorum', level=DEBUG) From a4e36acae066e5640677900afbb6868a07438589 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 14 Jan 2015 07:57:51 +0000 Subject: [PATCH 0594/2699] Add tmp debug --- ceph-radosgw/hooks/hooks.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 3cb4b0e2..ee9827ad 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -257,18 +257,22 @@ def canonical_url(configs, endpoint_type=PUBLIC): @hooks.hook('identity-service-relation-joined') def identity_joined(relid=None): + log('LY identity_joined called relid: ' + str(relid), level=ERROR) if cmp_pkgrevno('radosgw', '0.55') < 0: log('Integration with keystone requires ceph >= 0.55') sys.exit(1) if not cluster.eligible_leader(CEPHRG_HA_RES): + log('LY identity_joined exiting not eligible_leader', level=ERROR) return + log('LY identity_joined I am eligible_leader', level=ERROR) port = 80 admin_url = '%s:%i/swift' % (canonical_url(ADMIN), port) internal_url = '%s:%s/swift/v1' % \ (canonical_url(INTERNAL), port) public_url = '%s:%s/swift/v1' % \ (canonical_url(PUBLIC), port) + log('LY identity_joined setting endpoint for public_url: ' + public_url, level=ERROR) relation_set(service='swift', region=config('region'), public_url=public_url, internal_url=internal_url, @@ -286,7 +290,9 @@ def identity_changed(): @hooks.hook('cluster-relation-changed', 'cluster-relation-joined') def cluster_changed(): + log('LY cluster_changed Cluster has changed calling identity_joined', level=ERROR) for r_id in relation_ids('identity-service'): + log('LY cluster_changed Cluster has changed calling identity_joined: ' + r_id, level=ERROR) identity_joined(relid=r_id) From eed7ac83893acebf756849c5f2f18f15af0ed15b Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 14 Jan 2015 08:08:08 +0000 Subject: [PATCH 0595/2699] More debug --- ceph-radosgw/hooks/hooks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index ee9827ad..4a16e4f7 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -266,6 +266,7 @@ def identity_joined(relid=None): return log('LY identity_joined I am eligible_leader', level=ERROR) + log('LY identity_joined cluster state: ' + str(cluster.is_clustered()), level=ERROR) port = 80 admin_url = '%s:%i/swift' % (canonical_url(ADMIN), port) internal_url = '%s:%s/swift/v1' % \ From 18ae9ac2ad2b06cb26544596920a63382be80c8b Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 14 Jan 2015 08:22:24 +0000 Subject: [PATCH 0596/2699] Fix bug with wrong keyword being used in relation_set --- ceph-radosgw/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 4a16e4f7..d8d4c200 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -279,7 +279,7 @@ def identity_joined(relid=None): public_url=public_url, internal_url=internal_url, admin_url=admin_url, requested_roles=config('operator-roles'), - rid=relid) + relation_id=relid) @hooks.hook('identity-service-relation-changed') From ddba2cf91f4ae5a578f244503d53c9e21ea47333 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 14 Jan 2015 08:29:48 +0000 Subject: [PATCH 0597/2699] Remove debug --- ceph-radosgw/hooks/hooks.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index d8d4c200..880222ae 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -257,23 +257,18 @@ def canonical_url(configs, endpoint_type=PUBLIC): @hooks.hook('identity-service-relation-joined') def identity_joined(relid=None): - log('LY identity_joined called relid: ' + str(relid), level=ERROR) if cmp_pkgrevno('radosgw', '0.55') < 0: log('Integration with keystone requires ceph >= 0.55') sys.exit(1) if not cluster.eligible_leader(CEPHRG_HA_RES): - log('LY identity_joined exiting not eligible_leader', level=ERROR) return - log('LY identity_joined I am eligible_leader', level=ERROR) - log('LY identity_joined cluster state: ' + str(cluster.is_clustered()), level=ERROR) port = 80 admin_url = '%s:%i/swift' % (canonical_url(ADMIN), port) internal_url = '%s:%s/swift/v1' % \ (canonical_url(INTERNAL), port) public_url = '%s:%s/swift/v1' % \ (canonical_url(PUBLIC), port) - log('LY identity_joined setting endpoint for public_url: ' + public_url, level=ERROR) relation_set(service='swift', region=config('region'), public_url=public_url, internal_url=internal_url, @@ -291,9 +286,7 @@ def identity_changed(): @hooks.hook('cluster-relation-changed', 'cluster-relation-joined') def cluster_changed(): - log('LY cluster_changed Cluster has changed calling identity_joined', level=ERROR) for r_id in relation_ids('identity-service'): - log('LY cluster_changed Cluster has changed calling identity_joined: ' + r_id, level=ERROR) identity_joined(relid=r_id) From b96e0c3b2662e1db1a8af3645f16df4df8e24f6f Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 14 Jan 2015 08:49:35 +0000 Subject: [PATCH 0598/2699] Debug back as bug is still not squashed --- ceph-radosgw/hooks/hooks.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 880222ae..75928305 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -269,6 +269,7 @@ def identity_joined(relid=None): (canonical_url(INTERNAL), port) public_url = '%s:%s/swift/v1' % \ (canonical_url(PUBLIC), port) + log('LY identity_joined relation_set public_url=%s relation_id=%s' % (public_url, str(relid)), level=ERROR) relation_set(service='swift', region=config('region'), public_url=public_url, internal_url=internal_url, @@ -286,7 +287,9 @@ def identity_changed(): @hooks.hook('cluster-relation-changed', 'cluster-relation-joined') def cluster_changed(): + log('LY In cluster_changed triggering identity_joined', level=ERROR) for r_id in relation_ids('identity-service'): + log('LY In cluster_changed triggering identity_joined for relid: ' + r_id, level=ERROR) identity_joined(relid=r_id) From 7051f59097e8ef95ce8dfc08877ef237eda49ace Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 14 Jan 2015 09:10:04 +0000 Subject: [PATCH 0599/2699] Add support of use of embedded webserver --- ceph-radosgw/config.yaml | 10 ++++++++ ceph-radosgw/hooks/hooks.py | 44 +++++++++++++++++++++++--------- ceph-radosgw/templates/ceph.conf | 6 ++++- 3 files changed, 47 insertions(+), 13 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 469bef34..5a70b7e4 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -57,3 +57,13 @@ options: 100-continue. See the following page for more info: http://ceph.com/docs/dumpling/radosgw/manual-install/#continue-support + use-embedded-webserver: + type: boolean + default: false + description: | + Newer versions of the Ceph RADOS Gateway support use of an embedded web + container instead of Apache + mod-fastcgi, avoiding some of the nuances + of using the stock mod-fastcgi packages from Ubuntu. + . + Enable this option to disable use of Apache and enable the embedded + web container feature. diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index cdc7053a..ad432f58 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -28,9 +28,13 @@ from charmhelpers.fetch import ( apt_update, apt_install, + apt_purge, add_source, ) -from charmhelpers.core.host import lsb_release +from charmhelpers.core.host import ( + lsb_release, + restart_on_change +) from utils import ( render_template, get_host_ip, @@ -68,16 +72,29 @@ def install_ceph_optimised_packages(): add_source(source, key='6EAEAE2203C3951A') +PACKAGES = [ + 'radosgw', + 'ntp', +] + +APACHE_PACKAGES = [ + 'libapache2-mod-fastcgi', + 'apache2', +] + + def install_packages(): add_source(config('source'), config('key')) - if config('use-ceph-optimised-packages'): + if (config('use-ceph-optimised-packages') and + not config('use-embedded-webserver')): install_ceph_optimised_packages() apt_update(fatal=True) - apt_install(['radosgw', - 'libapache2-mod-fastcgi', - 'apache2', - 'ntp'], fatal=True) + apt_install(PACKAGES, fatal=True) + if config('use-embedded-webserver'): + apt_purge(APACHE_PACKAGES) + else: + apt_install(APACHE_PACKAGES, fatal=True) @hooks.hook('install') @@ -98,7 +115,8 @@ def emit_cephconf(): 'mon_hosts': ' '.join(get_mon_hosts()), 'hostname': get_unit_hostname(), 'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0, - 'use_syslog': str(config('use-syslog')).lower() + 'use_syslog': str(config('use-syslog')).lower(), + 'embedded_webserver': config('use-embedded-webserver'), } # Check to ensure that correct version of ceph is @@ -143,14 +161,16 @@ def apache_reload(): @hooks.hook('upgrade-charm', 'config-changed') +@restart_on_change({'/etc/ceph/ceph.con', ['radosgw']}) def config_changed(): install_packages() emit_cephconf() - emit_apacheconf() - install_www_scripts() - apache_sites() - apache_modules() - apache_reload() + if not config('use-embedded-webserver'): + emit_apacheconf() + install_www_scripts() + apache_sites() + apache_modules() + apache_reload() def get_mon_hosts(): diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index a94483d1..5c1473ba 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -16,9 +16,13 @@ host = {{ hostname }} keyring = /etc/ceph/keyring.rados.gateway rgw socket path = /tmp/radosgw.sock log file = /var/log/ceph/radosgw.log +{% if embedded_webserver %} +rgw frontends = civetweb port=80 +{% else %} # Turn off 100-continue optimization as stock mod_fastcgi # does not support it -rgw print continue = false +rgw print continue = false +{% endif %} {% if auth_type == 'keystone' %} rgw keystone url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/ rgw keystone admin token = {{ admin_token }} From f4cd496c5dfb6f1f0a0c72652b061cce7ed37181 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 14 Jan 2015 09:13:44 +0000 Subject: [PATCH 0600/2699] fixups --- ceph-radosgw/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index ad432f58..119771e7 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -161,7 +161,7 @@ def apache_reload(): @hooks.hook('upgrade-charm', 'config-changed') -@restart_on_change({'/etc/ceph/ceph.con', ['radosgw']}) +@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) def config_changed(): install_packages() emit_cephconf() From 7312906471ecb736b1387f4db221b342d1502bf8 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 14 Jan 2015 09:31:59 +0000 Subject: [PATCH 0601/2699] Always tell keystone to use the vip --- ceph-radosgw/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 75928305..fa301bd6 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -348,7 +348,7 @@ def ha_relation_joined(): @hooks.hook('ha-relation-changed') def ha_relation_changed(): clustered = relation_get('clustered') - if clustered and cluster.is_leader(CEPHRG_HA_RES): + if clustered: log('Cluster configured, notifying other services and' 'updating keystone endpoint configuration') # Tell all related services to start using From b9357b5f77c804fa9cd5044bf02bb01026ad076c Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 14 Jan 2015 16:48:07 +0000 Subject: [PATCH 0602/2699] Enable haproxy --- ceph-radosgw/charm-helpers-hooks.yaml | 4 +- ceph-radosgw/hooks/ceph_radosgw_context.py | 29 + ceph-radosgw/hooks/charmhelpers/__init__.py | 22 + .../charmhelpers/contrib/hahelpers/cluster.py | 38 +- .../contrib/openstack/amulet/__init__.py | 0 .../contrib/openstack/amulet/deployment.py | 92 ++ .../contrib/openstack/amulet/utils.py | 278 +++++ .../charmhelpers/contrib/openstack/context.py | 1038 +++++++++++++++++ .../charmhelpers/contrib/openstack/neutron.py | 223 ++++ .../contrib/openstack/templates/__init__.py | 2 + .../contrib/openstack/templates/ceph.conf | 15 + .../contrib/openstack/templates/haproxy.cfg | 58 + .../templates/openstack_https_frontend | 24 + .../templates/openstack_https_frontend.conf | 24 + .../contrib/openstack/templating.py | 279 +++++ .../charmhelpers/contrib/openstack/utils.py | 625 ++++++++++ .../charmhelpers/contrib/python/__init__.py | 0 .../charmhelpers/contrib/python/packages.py | 77 ++ .../contrib/storage/linux/ceph.py | 428 +++++++ .../contrib/storage/linux/loopback.py | 62 + .../charmhelpers/contrib/storage/linux/lvm.py | 89 ++ .../hooks/charmhelpers/core/decorators.py | 41 + .../hooks/charmhelpers/core/hookenv.py | 20 +- ceph-radosgw/hooks/charmhelpers/core/host.py | 33 +- .../hooks/charmhelpers/core/templating.py | 2 +- .../hooks/charmhelpers/fetch/__init__.py | 9 +- .../hooks/charmhelpers/fetch/giturl.py | 9 +- ceph-radosgw/hooks/hooks.py | 14 +- ceph-radosgw/hooks/utils.py | 34 +- ceph-radosgw/templates/ceph.conf | 2 +- ceph-radosgw/templates/rgw | 2 +- ceph-radosgw/tests/charmhelpers/__init__.py | 22 + 32 files changed, 3561 insertions(+), 34 deletions(-) create mode 100644 ceph-radosgw/hooks/ceph_radosgw_context.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/python/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/decorators.py diff --git a/ceph-radosgw/charm-helpers-hooks.yaml b/ceph-radosgw/charm-helpers-hooks.yaml index 39d2bf71..768108d0 100644 --- a/ceph-radosgw/charm-helpers-hooks.yaml +++ b/ceph-radosgw/charm-helpers-hooks.yaml @@ -9,6 +9,8 @@ include: - apache - cluster - payload.execd - - contrib.openstack.alternatives + - contrib.openstack|inc=* - contrib.network.ip - contrib.openstack.ip + - contrib.storage.linux + - contrib.python.packages diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py new file mode 100644 index 00000000..79c295c1 --- /dev/null +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -0,0 +1,29 @@ +from charmhelpers.contrib.openstack import context +from charmhelpers.contrib.hahelpers.cluster import ( + determine_api_port, + determine_apache_port, +) + + +class HAProxyContext(context.HAProxyContext): + + def __call__(self): + ctxt = super(HAProxyContext, self).__call__() + + # Apache ports + a_cephradosgw_api = determine_apache_port(80, + singlenode_mode=True) + + port_mapping = { + 'cephradosgw-server': [ + 80, a_cephradosgw_api] + } + + ctxt['cephradosgw_bind_port'] = determine_api_port( + 80, + singlenode_mode=True, + ) + + # for haproxy.conf + ctxt['service_ports'] = port_mapping + return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/__init__.py b/ceph-radosgw/hooks/charmhelpers/__init__.py index e69de29b..b46e2e23 100644 --- a/ceph-radosgw/hooks/charmhelpers/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/__init__.py @@ -0,0 +1,22 @@ +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index 52ce4b7c..912b2fe3 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -13,6 +13,7 @@ import subprocess import os + from socket import gethostname as get_unit_hostname import six @@ -28,12 +29,19 @@ WARNING, unit_get, ) +from charmhelpers.core.decorators import ( + retry_on_exception, +) class HAIncompleteConfig(Exception): pass +class CRMResourceNotFound(Exception): + pass + + def is_elected_leader(resource): """ Returns True if the charm executing this is the elected cluster leader. @@ -68,24 +76,30 @@ def is_clustered(): return False -def is_crm_leader(resource): +@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound) +def is_crm_leader(resource, retry=False): """ Returns True if the charm calling this is the elected corosync leader, as returned by calling the external "crm" command. + + We allow this operation to be retried to avoid the possibility of getting a + false negative. See LP #1396246 for more info. """ - cmd = [ - "crm", "resource", - "show", resource - ] + cmd = ['crm', 'resource', 'show', resource] try: - status = subprocess.check_output(cmd).decode('UTF-8') + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") except subprocess.CalledProcessError: - return False - else: - if get_unit_hostname() in status: - return True - else: - return False + status = None + + if status and get_unit_hostname() in status: + return True + + if status and "resource %s is NOT running" % (resource) in status: + raise CRMResourceNotFound("CRM resource %s not found" % (resource)) + + return False def is_leader(resource): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 00000000..f3fee074 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,92 @@ +import six +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None, stable=True): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.openstack = openstack + self.source = source + self.stable = stable + # Note(coreycb): this needs to be changed when new next branches come + # out. + self.current_next = "trusty" + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] + + if self.stable: + for svc in other_services: + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) + else: + for svc in other_services: + if svc['name'] in base_charms: + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) + else: + temp = 'lp:~openstack-charmers/charms/{}/{}/next' + svc['location'] = temp.format(self.current_next, + svc['name']) + return other_services + + def _add_services(self, this_service, other_services): + """Add services to the deployment and set openstack-origin/source.""" + other_services = self._determine_branch_locations(other_services) + + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + + services = other_services + services.append(this_service) + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw'] + + if self.openstack: + for svc in services: + if svc['name'] not in use_source: + config = {'openstack-origin': self.openstack} + self.d.configure(svc['name'], config) + + if self.source: + for svc in services: + if svc['name'] in use_source: + config = {'source': self.source} + self.d.configure(svc['name'], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + (self.precise_essex, self.precise_folsom, self.precise_grizzly, + self.precise_havana, self.precise_icehouse, + self.trusty_icehouse) = range(6) + releases = { + ('precise', None): self.precise_essex, + ('precise', 'cloud:precise-folsom'): self.precise_folsom, + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, + ('precise', 'cloud:precise-havana'): self.precise_havana, + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, + ('trusty', None): self.trusty_icehouse} + return releases[(self.series, self.openstack)] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 00000000..3e0cc61c --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,278 @@ +import logging +import os +import time +import urllib + +import glanceclient.v1.client as glance_client +import keystoneclient.v2_0 as keystone_client +import novaclient.v1_1.client as nova_client + +import six + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'tenantId': act.tenantId, + 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + return tenant in [t.name for t in keystone.tenants.list()] + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant): + """Authenticates admin user with the keystone admin endpoint.""" + unit = keystone_sentry + service_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + ep = keystone.service_catalog.url_for(service_type='image', + endpoint_type='adminURL') + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return nova_client.Client(username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance.""" + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open("http://download.cirros-cloud.net/version/released") + version = f.read().strip() + cirros_img = "cirros-{}-x86_64-disk.img".format(version) + local_path = os.path.join('tests', cirros_img) + + if not os.path.exists(local_path): + cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + version, cirros_img) + opener.retrieve(cirros_url, local_path) + f.close() + + with open(local_path) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + count = 1 + status = image.status + while status != 'active' and count < 10: + time.sleep(3) + image = glance.images.get(image.id) + status = image.status + self.log.debug('image status: {}'.format(status)) + count += 1 + + if status != 'active': + self.log.error('image creation timed out') + return None + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + num_before = len(list(glance.images.list())) + glance.images.delete(image) + + count = 1 + num_after = len(list(glance.images.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(glance.images.list())) + self.log.debug('number of images: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('image deletion timed out') + return False + + return True + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + image = nova.images.find(name=image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + num_before = len(list(nova.servers.list())) + nova.servers.delete(instance) + + count = 1 + num_after = len(list(nova.servers.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(nova.servers.list())) + self.log.debug('number of instances: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('instance deletion timed out') + return False + + return True diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py new file mode 100644 index 00000000..eaa89a67 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -0,0 +1,1038 @@ +import json +import os +import time +from base64 import b64decode +from subprocess import check_call + +import six + +from charmhelpers.fetch import ( + apt_install, + filter_installed_packages, +) +from charmhelpers.core.hookenv import ( + config, + is_relation_made, + local_unit, + log, + relation_get, + relation_ids, + related_units, + relation_set, + unit_get, + unit_private_ip, + charm_name, + DEBUG, + INFO, + WARNING, + ERROR, +) + +from charmhelpers.core.sysctl import create as sysctl_create + +from charmhelpers.core.host import ( + mkdir, + write_file, +) +from charmhelpers.contrib.hahelpers.cluster import ( + determine_apache_port, + determine_api_port, + https, + is_clustered, +) +from charmhelpers.contrib.hahelpers.apache import ( + get_cert, + get_ca_cert, + install_ca_cert, +) +from charmhelpers.contrib.openstack.neutron import ( + neutron_plugin_attribute, +) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_ipv6_addr, + get_netmask_for_address, + format_ipv6_addr, + is_address_in_network, +) +from charmhelpers.contrib.openstack.utils import get_host_ip + +CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' +ADDRESS_TYPES = ['admin', 'internal', 'public'] + + +class OSContextError(Exception): + pass + + +def ensure_packages(packages): + """Install but do not upgrade required plugin packages.""" + required = filter_installed_packages(packages) + if required: + apt_install(required, fatal=True) + + +def context_complete(ctxt): + _missing = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + _missing.append(k) + + if _missing: + log('Missing required data: %s' % ' '.join(_missing), level=INFO) + return False + + return True + + +def config_flags_parser(config_flags): + """Parses config flags string into dict. + + The provided config_flags string may be a list of comma-separated values + which themselves may be comma-separated list of values. + """ + if config_flags.find('==') >= 0: + log("config_flags is not in expected format (key=value)", level=ERROR) + raise OSContextError + + # strip the following from each value. + post_strippers = ' ,' + # we strip any leading/trailing '=' or ' ' from the string then + # split on '='. + split = config_flags.strip(' =').split('=') + limit = len(split) + flags = {} + for i in range(0, limit - 1): + current = split[i] + next = split[i + 1] + vindex = next.rfind(',') + if (i == limit - 2) or (vindex < 0): + value = next + else: + value = next[:vindex] + + if i == 0: + key = current + else: + # if this not the first entry, expect an embedded key. + index = current.rfind(',') + if index < 0: + log("Invalid config value(s) at index %s" % (i), level=ERROR) + raise OSContextError + key = current[index + 1:] + + # Add to collection. + flags[key.strip(post_strippers)] = value.rstrip(post_strippers) + + return flags + + +class OSContextGenerator(object): + """Base class for all context generators.""" + interfaces = [] + + def __call__(self): + raise NotImplementedError + + +class SharedDBContext(OSContextGenerator): + interfaces = ['shared-db'] + + def __init__(self, + database=None, user=None, relation_prefix=None, ssl_dir=None): + """Allows inspecting relation for settings prefixed with + relation_prefix. This is useful for parsing access for multiple + databases returned via the shared-db interface (eg, nova_password, + quantum_password) + """ + self.relation_prefix = relation_prefix + self.database = database + self.user = user + self.ssl_dir = ssl_dir + + def __call__(self): + self.database = self.database or config('database') + self.user = self.user or config('database-user') + if None in [self.database, self.user]: + log("Could not generate shared_db context. Missing required charm " + "config options. (database name and user)", level=ERROR) + raise OSContextError + + ctxt = {} + + # NOTE(jamespage) if mysql charm provides a network upon which + # access to the database should be made, reconfigure relation + # with the service units local address and defer execution + access_network = relation_get('access-network') + if access_network is not None: + if self.relation_prefix is not None: + hostname_key = "{}_hostname".format(self.relation_prefix) + else: + hostname_key = "hostname" + access_hostname = get_address_in_network(access_network, + unit_get('private-address')) + set_hostname = relation_get(attribute=hostname_key, + unit=local_unit()) + if set_hostname != access_hostname: + relation_set(relation_settings={hostname_key: access_hostname}) + return ctxt # Defer any further hook execution for now.... + + password_setting = 'password' + if self.relation_prefix: + password_setting = self.relation_prefix + '_password' + + for rid in relation_ids('shared-db'): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + host = rdata.get('db_host') + host = format_ipv6_addr(host) or host + ctxt = { + 'database_host': host, + 'database': self.database, + 'database_user': self.user, + 'database_password': rdata.get(password_setting), + 'database_type': 'mysql' + } + if context_complete(ctxt): + db_ssl(rdata, ctxt, self.ssl_dir) + return ctxt + return {} + + +class PostgresqlDBContext(OSContextGenerator): + interfaces = ['pgsql-db'] + + def __init__(self, database=None): + self.database = database + + def __call__(self): + self.database = self.database or config('database') + if self.database is None: + log('Could not generate postgresql_db context. Missing required ' + 'charm config options. (database name)', level=ERROR) + raise OSContextError + + ctxt = {} + for rid in relation_ids(self.interfaces[0]): + for unit in related_units(rid): + rel_host = relation_get('host', rid=rid, unit=unit) + rel_user = relation_get('user', rid=rid, unit=unit) + rel_passwd = relation_get('password', rid=rid, unit=unit) + ctxt = {'database_host': rel_host, + 'database': self.database, + 'database_user': rel_user, + 'database_password': rel_passwd, + 'database_type': 'postgresql'} + if context_complete(ctxt): + return ctxt + + return {} + + +def db_ssl(rdata, ctxt, ssl_dir): + if 'ssl_ca' in rdata and ssl_dir: + ca_path = os.path.join(ssl_dir, 'db-client.ca') + with open(ca_path, 'w') as fh: + fh.write(b64decode(rdata['ssl_ca'])) + + ctxt['database_ssl_ca'] = ca_path + elif 'ssl_ca' in rdata: + log("Charm not setup for ssl support but ssl ca found", level=INFO) + return ctxt + + if 'ssl_cert' in rdata: + cert_path = os.path.join( + ssl_dir, 'db-client.cert') + if not os.path.exists(cert_path): + log("Waiting 1m for ssl client cert validity", level=INFO) + time.sleep(60) + + with open(cert_path, 'w') as fh: + fh.write(b64decode(rdata['ssl_cert'])) + + ctxt['database_ssl_cert'] = cert_path + key_path = os.path.join(ssl_dir, 'db-client.key') + with open(key_path, 'w') as fh: + fh.write(b64decode(rdata['ssl_key'])) + + ctxt['database_ssl_key'] = key_path + + return ctxt + + +class IdentityServiceContext(OSContextGenerator): + interfaces = ['identity-service'] + + def __call__(self): + log('Generating template context for identity-service', level=DEBUG) + ctxt = {} + for rid in relation_ids('identity-service'): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + serv_host = rdata.get('service_host') + serv_host = format_ipv6_addr(serv_host) or serv_host + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host + svc_protocol = rdata.get('service_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + ctxt = {'service_port': rdata.get('service_port'), + 'service_host': serv_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('service_tenant'), + 'admin_user': rdata.get('service_username'), + 'admin_password': rdata.get('service_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol} + if context_complete(ctxt): + # NOTE(jamespage) this is required for >= icehouse + # so a missing value just indicates keystone needs + # upgrading + ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') + return ctxt + + return {} + + +class AMQPContext(OSContextGenerator): + + def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): + self.ssl_dir = ssl_dir + self.rel_name = rel_name + self.relation_prefix = relation_prefix + self.interfaces = [rel_name] + + def __call__(self): + log('Generating template context for amqp', level=DEBUG) + conf = config() + if self.relation_prefix: + user_setting = '%s-rabbit-user' % (self.relation_prefix) + vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) + else: + user_setting = 'rabbit-user' + vhost_setting = 'rabbit-vhost' + + try: + username = conf[user_setting] + vhost = conf[vhost_setting] + except KeyError as e: + log('Could not generate shared_db context. Missing required charm ' + 'config options: %s.' % e, level=ERROR) + raise OSContextError + + ctxt = {} + for rid in relation_ids(self.rel_name): + ha_vip_only = False + for unit in related_units(rid): + if relation_get('clustered', rid=rid, unit=unit): + ctxt['clustered'] = True + vip = relation_get('vip', rid=rid, unit=unit) + vip = format_ipv6_addr(vip) or vip + ctxt['rabbitmq_host'] = vip + else: + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + ctxt['rabbitmq_host'] = host + + ctxt.update({ + 'rabbitmq_user': username, + 'rabbitmq_password': relation_get('password', rid=rid, + unit=unit), + 'rabbitmq_virtual_host': vhost, + }) + + ssl_port = relation_get('ssl_port', rid=rid, unit=unit) + if ssl_port: + ctxt['rabbit_ssl_port'] = ssl_port + + ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) + if ssl_ca: + ctxt['rabbit_ssl_ca'] = ssl_ca + + if relation_get('ha_queues', rid=rid, unit=unit) is not None: + ctxt['rabbitmq_ha_queues'] = True + + ha_vip_only = relation_get('ha-vip-only', + rid=rid, unit=unit) is not None + + if context_complete(ctxt): + if 'rabbit_ssl_ca' in ctxt: + if not self.ssl_dir: + log("Charm not setup for ssl support but ssl ca " + "found", level=INFO) + break + + ca_path = os.path.join( + self.ssl_dir, 'rabbit-client-ca.pem') + with open(ca_path, 'w') as fh: + fh.write(b64decode(ctxt['rabbit_ssl_ca'])) + ctxt['rabbit_ssl_ca'] = ca_path + + # Sufficient information found = break out! + break + + # Used for active/active rabbitmq >= grizzly + if (('clustered' not in ctxt or ha_vip_only) and + len(related_units(rid)) > 1): + rabbitmq_hosts = [] + for unit in related_units(rid): + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + rabbitmq_hosts.append(host) + + ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + + if not context_complete(ctxt): + return {} + + return ctxt + + +class CephContext(OSContextGenerator): + """Generates context for /etc/ceph/ceph.conf templates.""" + interfaces = ['ceph'] + + def __call__(self): + if not relation_ids('ceph'): + return {} + + log('Generating template context for ceph', level=DEBUG) + mon_hosts = [] + auth = None + key = None + use_syslog = str(config('use-syslog')).lower() + for rid in relation_ids('ceph'): + for unit in related_units(rid): + auth = relation_get('auth', rid=rid, unit=unit) + key = relation_get('key', rid=rid, unit=unit) + ceph_pub_addr = relation_get('ceph-public-address', rid=rid, + unit=unit) + unit_priv_addr = relation_get('private-address', rid=rid, + unit=unit) + ceph_addr = ceph_pub_addr or unit_priv_addr + ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr + mon_hosts.append(ceph_addr) + + ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), + 'auth': auth, + 'key': key, + 'use_syslog': use_syslog} + + if not os.path.isdir('/etc/ceph'): + os.mkdir('/etc/ceph') + + if not context_complete(ctxt): + return {} + + ensure_packages(['ceph-common']) + return ctxt + + +class HAProxyContext(OSContextGenerator): + """Provides half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + """ + interfaces = ['cluster'] + + def __init__(self, singlenode_mode=False): + self.singlenode_mode = singlenode_mode + + def __call__(self): + if not relation_ids('cluster') and not self.singlenode_mode: + return {} + + if config('prefer-ipv6'): + addr = get_ipv6_addr(exc_list=[config('vip')])[0] + else: + addr = get_host_ip(unit_get('private-address')) + + l_unit = local_unit().replace('/', '-') + cluster_hosts = {} + + # NOTE(jamespage): build out map of configured network endpoints + # and associated backends + for addr_type in ADDRESS_TYPES: + cfg_opt = 'os-{}-network'.format(addr_type) + laddr = get_address_in_network(config(cfg_opt)) + if laddr: + netmask = get_netmask_for_address(laddr) + cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, + netmask), + 'backends': {l_unit: laddr}} + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _laddr = relation_get('{}-address'.format(addr_type), + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[laddr]['backends'][_unit] = _laddr + + # NOTE(jamespage) add backend based on private address - this + # with either be the only backend or the fallback if no acls + # match in the frontend + cluster_hosts[addr] = {} + netmask = get_netmask_for_address(addr) + cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), + 'backends': {l_unit: addr}} + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _laddr = relation_get('private-address', + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[addr]['backends'][_unit] = _laddr + + ctxt = { + 'frontends': cluster_hosts, + 'default_backend': addr + } + + if config('haproxy-server-timeout'): + ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') + + if config('haproxy-client-timeout'): + ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') + + if config('prefer-ipv6'): + ctxt['ipv6'] = True + ctxt['local_host'] = 'ip6-localhost' + ctxt['haproxy_host'] = '::' + ctxt['stat_port'] = ':::8888' + else: + ctxt['local_host'] = '127.0.0.1' + ctxt['haproxy_host'] = '0.0.0.0' + ctxt['stat_port'] = ':8888' + + for frontend in cluster_hosts: + if (len(cluster_hosts[frontend]['backends']) > 1 or + self.singlenode_mode): + # Enable haproxy when we have enough peers. + log('Ensuring haproxy enabled in /etc/default/haproxy.', + level=DEBUG) + with open('/etc/default/haproxy', 'w') as out: + out.write('ENABLED=1\n') + + return ctxt + + log('HAProxy context is incomplete, this unit has no peers.', + level=INFO) + return {} + + +class ImageServiceContext(OSContextGenerator): + interfaces = ['image-service'] + + def __call__(self): + """Obtains the glance API server from the image-service relation. + Useful in nova and cinder (currently). + """ + log('Generating template context for image-service.', level=DEBUG) + rids = relation_ids('image-service') + if not rids: + return {} + + for rid in rids: + for unit in related_units(rid): + api_server = relation_get('glance-api-server', + rid=rid, unit=unit) + if api_server: + return {'glance_api_servers': api_server} + + log("ImageService context is incomplete. Missing required relation " + "data.", level=INFO) + return {} + + +class ApacheSSLContext(OSContextGenerator): + """Generates a context for an apache vhost configuration that configures + HTTPS reverse proxying for one or many endpoints. Generated context + looks something like:: + + { + 'namespace': 'cinder', + 'private_address': 'iscsi.mycinderhost.com', + 'endpoints': [(8776, 8766), (8777, 8767)] + } + + The endpoints list consists of a tuples mapping external ports + to internal ports. + """ + interfaces = ['https'] + + # charms should inherit this context and set external ports + # and service namespace accordingly. + external_ports = [] + service_namespace = None + + def enable_modules(self): + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] + check_call(cmd) + + def configure_cert(self, cn=None): + ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) + mkdir(path=ssl_dir) + cert, key = get_cert(cn) + if cn: + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + else: + cert_filename = 'cert' + key_filename = 'key' + + write_file(path=os.path.join(ssl_dir, cert_filename), + content=b64decode(cert)) + write_file(path=os.path.join(ssl_dir, key_filename), + content=b64decode(key)) + + def configure_ca(self): + ca_cert = get_ca_cert() + if ca_cert: + install_ca_cert(b64decode(ca_cert)) + + def canonical_names(self): + """Figure out which canonical names clients will access this service. + """ + cns = [] + for r_id in relation_ids('identity-service'): + for unit in related_units(r_id): + rdata = relation_get(rid=r_id, unit=unit) + for k in rdata: + if k.startswith('ssl_key_'): + cns.append(k.lstrip('ssl_key_')) + + return sorted(list(set(cns))) + + def get_network_addresses(self): + """For each network configured, return corresponding address and vip + (if available). + + Returns a list of tuples of the form: + + [(address_in_net_a, vip_in_net_a), + (address_in_net_b, vip_in_net_b), + ...] + + or, if no vip(s) available: + + [(address_in_net_a, address_in_net_a), + (address_in_net_b, address_in_net_b), + ...] + """ + addresses = [] + if config('vip'): + vips = config('vip').split() + else: + vips = [] + + for net_type in ['os-internal-network', 'os-admin-network', + 'os-public-network']: + addr = get_address_in_network(config(net_type), + unit_get('private-address')) + if len(vips) > 1 and is_clustered(): + if not config(net_type): + log("Multiple networks configured but net_type " + "is None (%s)." % net_type, level=WARNING) + continue + + for vip in vips: + if is_address_in_network(config(net_type), vip): + addresses.append((addr, vip)) + break + + elif is_clustered() and config('vip'): + addresses.append((addr, config('vip'))) + else: + addresses.append((addr, addr)) + + return sorted(addresses) + + def __call__(self): + if isinstance(self.external_ports, six.string_types): + self.external_ports = [self.external_ports] + + if not self.external_ports or not https(): + return {} + + self.configure_ca() + self.enable_modules() + + ctxt = {'namespace': self.service_namespace, + 'endpoints': [], + 'ext_ports': []} + + for cn in self.canonical_names(): + self.configure_cert(cn) + + addresses = self.get_network_addresses() + for address, endpoint in sorted(set(addresses)): + for api_port in self.external_ports: + ext_port = determine_apache_port(api_port, + singlenode_mode=True) + int_port = determine_api_port(api_port, singlenode_mode=True) + portmap = (address, endpoint, int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + ctxt['ext_ports'].append(int(ext_port)) + + ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) + return ctxt + + +class NeutronContext(OSContextGenerator): + interfaces = [] + + @property + def plugin(self): + return None + + @property + def network_manager(self): + return None + + @property + def packages(self): + return neutron_plugin_attribute(self.plugin, 'packages', + self.network_manager) + + @property + def neutron_security_groups(self): + return None + + def _ensure_packages(self): + for pkgs in self.packages: + ensure_packages(pkgs) + + def _save_flag_file(self): + if self.network_manager == 'quantum': + _file = '/etc/nova/quantum_plugin.conf' + else: + _file = '/etc/nova/neutron_plugin.conf' + + with open(_file, 'wb') as out: + out.write(self.plugin + '\n') + + def ovs_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'ovs', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return ovs_ctxt + + def nvp_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + nvp_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'nvp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return nvp_ctxt + + def n1kv_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + n1kv_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + n1kv_user_config_flags = config('n1kv-config-flags') + restrict_policy_profiles = config('n1kv-restrict-policy-profiles') + n1kv_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'n1kv', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': n1kv_config, + 'vsm_ip': config('n1kv-vsm-ip'), + 'vsm_username': config('n1kv-vsm-username'), + 'vsm_password': config('n1kv-vsm-password'), + 'restrict_policy_profiles': restrict_policy_profiles} + + if n1kv_user_config_flags: + flags = config_flags_parser(n1kv_user_config_flags) + n1kv_ctxt['user_config_flags'] = flags + + return n1kv_ctxt + + def calico_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + calico_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'Calico', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return calico_ctxt + + def neutron_ctxt(self): + if https(): + proto = 'https' + else: + proto = 'http' + + if is_clustered(): + host = config('vip') + else: + host = unit_get('private-address') + + ctxt = {'network_manager': self.network_manager, + 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} + return ctxt + + def __call__(self): + self._ensure_packages() + + if self.network_manager not in ['quantum', 'neutron']: + return {} + + if not self.plugin: + return {} + + ctxt = self.neutron_ctxt() + + if self.plugin == 'ovs': + ctxt.update(self.ovs_ctxt()) + elif self.plugin in ['nvp', 'nsx']: + ctxt.update(self.nvp_ctxt()) + elif self.plugin == 'n1kv': + ctxt.update(self.n1kv_ctxt()) + elif self.plugin == 'Calico': + ctxt.update(self.calico_ctxt()) + + alchemy_flags = config('neutron-alchemy-flags') + if alchemy_flags: + flags = config_flags_parser(alchemy_flags) + ctxt['neutron_alchemy_flags'] = flags + + self._save_flag_file() + return ctxt + + +class OSConfigFlagContext(OSContextGenerator): + """Provides support for user-defined config flags. + + Users can define a comma-seperated list of key=value pairs + in the charm configuration and apply them at any point in + any file by using a template flag. + + Sometimes users might want config flags inserted within a + specific section so this class allows users to specify the + template flag name, allowing for multiple template flags + (sections) within the same context. + + NOTE: the value of config-flags may be a comma-separated list of + key=value pairs and some Openstack config files support + comma-separated lists as values. + """ + + def __init__(self, charm_flag='config-flags', + template_flag='user_config_flags'): + """ + :param charm_flag: config flags in charm configuration. + :param template_flag: insert point for user-defined flags in template + file. + """ + super(OSConfigFlagContext, self).__init__() + self._charm_flag = charm_flag + self._template_flag = template_flag + + def __call__(self): + config_flags = config(self._charm_flag) + if not config_flags: + return {} + + return {self._template_flag: + config_flags_parser(config_flags)} + + +class SubordinateConfigContext(OSContextGenerator): + + """ + Responsible for inspecting relations to subordinates that + may be exporting required config via a json blob. + + The subordinate interface allows subordinates to export their + configuration requirements to the principle for multiple config + files and multiple serivces. Ie, a subordinate that has interfaces + to both glance and nova may export to following yaml blob as json:: + + glance: + /etc/glance/glance-api.conf: + sections: + DEFAULT: + - [key1, value1] + /etc/glance/glance-registry.conf: + MYSECTION: + - [key2, value2] + nova: + /etc/nova/nova.conf: + sections: + DEFAULT: + - [key3, value3] + + + It is then up to the principle charms to subscribe this context to + the service+config file it is interestd in. Configuration data will + be available in the template context, in glance's case, as:: + + ctxt = { + ... other context ... + 'subordinate_config': { + 'DEFAULT': { + 'key1': 'value1', + }, + 'MYSECTION': { + 'key2': 'value2', + }, + } + } + """ + + def __init__(self, service, config_file, interface): + """ + :param service : Service name key to query in any subordinate + data found + :param config_file : Service's config file to query sections + :param interface : Subordinate interface to inspect + """ + self.service = service + self.config_file = config_file + self.interface = interface + + def __call__(self): + ctxt = {'sections': {}} + for rid in relation_ids(self.interface): + for unit in related_units(rid): + sub_config = relation_get('subordinate_configuration', + rid=rid, unit=unit) + if sub_config and sub_config != '': + try: + sub_config = json.loads(sub_config) + except: + log('Could not parse JSON from subordinate_config ' + 'setting from %s' % rid, level=ERROR) + continue + + if self.service not in sub_config: + log('Found subordinate_config on %s but it contained' + 'nothing for %s service' % (rid, self.service), + level=INFO) + continue + + sub_config = sub_config[self.service] + if self.config_file not in sub_config: + log('Found subordinate_config on %s but it contained' + 'nothing for %s' % (rid, self.config_file), + level=INFO) + continue + + sub_config = sub_config[self.config_file] + for k, v in six.iteritems(sub_config): + if k == 'sections': + for section, config_dict in six.iteritems(v): + log("adding section '%s'" % (section), + level=DEBUG) + ctxt[k][section] = config_dict + else: + ctxt[k] = v + + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) + return ctxt + + +class LogLevelContext(OSContextGenerator): + + def __call__(self): + ctxt = {} + ctxt['debug'] = \ + False if config('debug') is None else config('debug') + ctxt['verbose'] = \ + False if config('verbose') is None else config('verbose') + + return ctxt + + +class SyslogContext(OSContextGenerator): + + def __call__(self): + ctxt = {'use_syslog': config('use-syslog')} + return ctxt + + +class BindHostContext(OSContextGenerator): + + def __call__(self): + if config('prefer-ipv6'): + return {'bind_host': '::'} + else: + return {'bind_host': '0.0.0.0'} + + +class WorkerConfigContext(OSContextGenerator): + + @property + def num_cpus(self): + try: + from psutil import NUM_CPUS + except ImportError: + apt_install('python-psutil', fatal=True) + from psutil import NUM_CPUS + + return NUM_CPUS + + def __call__(self): + multiplier = config('worker-multiplier') or 0 + ctxt = {"workers": self.num_cpus * multiplier} + return ctxt + + +class ZeroMQContext(OSContextGenerator): + interfaces = ['zeromq-configuration'] + + def __call__(self): + ctxt = {} + if is_relation_made('zeromq-configuration', 'host'): + for rid in relation_ids('zeromq-configuration'): + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + + return ctxt + + +class NotificationDriverContext(OSContextGenerator): + + def __init__(self, zmq_relation='zeromq-configuration', + amqp_relation='amqp'): + """ + :param zmq_relation: Name of Zeromq relation to check + """ + self.zmq_relation = zmq_relation + self.amqp_relation = amqp_relation + + def __call__(self): + ctxt = {'notifications': 'False'} + if is_relation_made(self.amqp_relation): + ctxt['notifications'] = "True" + + return ctxt + + +class SysctlContext(OSContextGenerator): + """This context check if the 'sysctl' option exists on configuration + then creates a file with the loaded contents""" + def __call__(self): + sysctl_dict = config('sysctl') + if sysctl_dict: + sysctl_create(sysctl_dict, + '/etc/sysctl.d/50-{0}.conf'.format(charm_name())) + return {'sysctl': sysctl_dict} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py new file mode 100644 index 00000000..095cc24b --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -0,0 +1,223 @@ +# Various utilies for dealing with Neutron and the renaming from Quantum. + +from subprocess import check_output + +from charmhelpers.core.hookenv import ( + config, + log, + ERROR, +) + +from charmhelpers.contrib.openstack.utils import os_release + + +def headers_package(): + """Ensures correct linux-headers for running kernel are installed, + for building DKMS package""" + kver = check_output(['uname', '-r']).decode('UTF-8').strip() + return 'linux-headers-%s' % kver + +QUANTUM_CONF_DIR = '/etc/quantum' + + +def kernel_version(): + """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ + kver = check_output(['uname', '-r']).decode('UTF-8').strip() + kver = kver.split('.') + return (int(kver[0]), int(kver[1])) + + +def determine_dkms_package(): + """ Determine which DKMS package should be used based on kernel version """ + # NOTE: 3.13 kernels have support for GRE and VXLAN native + if kernel_version() >= (3, 13): + return [] + else: + return ['openvswitch-datapath-dkms'] + + +# legacy + + +def quantum_plugins(): + from charmhelpers.contrib.openstack import context + return { + 'ovs': { + 'config': '/etc/quantum/plugins/openvswitch/' + 'ovs_quantum_plugin.ini', + 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' + 'OVSQuantumPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=QUANTUM_CONF_DIR)], + 'services': ['quantum-plugin-openvswitch-agent'], + 'packages': [[headers_package()] + determine_dkms_package(), + ['quantum-plugin-openvswitch-agent']], + 'server_packages': ['quantum-server', + 'quantum-plugin-openvswitch'], + 'server_services': ['quantum-server'] + }, + 'nvp': { + 'config': '/etc/quantum/plugins/nicira/nvp.ini', + 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' + 'QuantumPlugin.NvpPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=QUANTUM_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['quantum-server', + 'quantum-plugin-nicira'], + 'server_services': ['quantum-server'] + } + } + +NEUTRON_CONF_DIR = '/etc/neutron' + + +def neutron_plugins(): + from charmhelpers.contrib.openstack import context + release = os_release('nova-common') + plugins = { + 'ovs': { + 'config': '/etc/neutron/plugins/openvswitch/' + 'ovs_neutron_plugin.ini', + 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' + 'OVSNeutronPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': ['neutron-plugin-openvswitch-agent'], + 'packages': [[headers_package()] + determine_dkms_package(), + ['neutron-plugin-openvswitch-agent']], + 'server_packages': ['neutron-server', + 'neutron-plugin-openvswitch'], + 'server_services': ['neutron-server'] + }, + 'nvp': { + 'config': '/etc/neutron/plugins/nicira/nvp.ini', + 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' + 'NeutronPlugin.NvpPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', + 'neutron-plugin-nicira'], + 'server_services': ['neutron-server'] + }, + 'nsx': { + 'config': '/etc/neutron/plugins/vmware/nsx.ini', + 'driver': 'vmware', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', + 'neutron-plugin-vmware'], + 'server_services': ['neutron-server'] + }, + 'n1kv': { + 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', + 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [[headers_package()] + determine_dkms_package(), + ['neutron-plugin-cisco']], + 'server_packages': ['neutron-server', + 'neutron-plugin-cisco'], + 'server_services': ['neutron-server'] + }, + 'Calico': { + 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', + 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': ['calico-felix', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata'], + 'packages': [[headers_package()] + determine_dkms_package(), + ['calico-compute', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata']], + 'server_packages': ['neutron-server', 'calico-control'], + 'server_services': ['neutron-server'] + } + } + if release >= 'icehouse': + # NOTE: patch in ml2 plugin for icehouse onwards + plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' + plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' + plugins['ovs']['server_packages'] = ['neutron-server', + 'neutron-plugin-ml2'] + # NOTE: patch in vmware renames nvp->nsx for icehouse onwards + plugins['nvp'] = plugins['nsx'] + return plugins + + +def neutron_plugin_attribute(plugin, attr, net_manager=None): + manager = net_manager or network_manager() + if manager == 'quantum': + plugins = quantum_plugins() + elif manager == 'neutron': + plugins = neutron_plugins() + else: + log("Network manager '%s' does not support plugins." % (manager), + level=ERROR) + raise Exception + + try: + _plugin = plugins[plugin] + except KeyError: + log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) + raise Exception + + try: + return _plugin[attr] + except KeyError: + return None + + +def network_manager(): + ''' + Deals with the renaming of Quantum to Neutron in H and any situations + that require compatability (eg, deploying H with network-manager=quantum, + upgrading from G). + ''' + release = os_release('nova-common') + manager = config('network-manager').lower() + + if manager not in ['quantum', 'neutron']: + return manager + + if release in ['essex']: + # E does not support neutron + log('Neutron networking not supported in Essex.', level=ERROR) + raise Exception + elif release in ['folsom', 'grizzly']: + # neutron is named quantum in F and G + return 'quantum' + else: + # ensure accurate naming for all releases post-H + return 'neutron' diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/__init__.py new file mode 100644 index 00000000..0b49ad28 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/__init__.py @@ -0,0 +1,2 @@ +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf new file mode 100644 index 00000000..81a9719f --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf @@ -0,0 +1,15 @@ +############################################################################### +# [ WARNING ] +# cinder configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### +[global] +{% if auth -%} + auth_supported = {{ auth }} + keyring = /etc/ceph/$cluster.$name.keyring + mon host = {{ mon_hosts }} +{% endif -%} + log to syslog = {{ use_syslog }} + err to syslog = {{ use_syslog }} + clog to syslog = {{ use_syslog }} + diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg new file mode 100644 index 00000000..ad875f16 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -0,0 +1,58 @@ +global + log {{ local_host }} local0 + log {{ local_host }} local1 notice + maxconn 20000 + user haproxy + group haproxy + spread-checks 0 + +defaults + log global + mode tcp + option tcplog + option dontlognull + retries 3 + timeout queue 1000 + timeout connect 1000 +{% if haproxy_client_timeout -%} + timeout client {{ haproxy_client_timeout }} +{% else -%} + timeout client 30000 +{% endif -%} + +{% if haproxy_server_timeout -%} + timeout server {{ haproxy_server_timeout }} +{% else -%} + timeout server 30000 +{% endif -%} + +listen stats {{ stat_port }} + mode http + stats enable + stats hide-version + stats realm Haproxy\ Statistics + stats uri / + stats auth admin:password + +{% if frontends -%} +{% for service, ports in service_ports.items() -%} +frontend tcp-in_{{ service }} + bind *:{{ ports[0] }} + {% if ipv6 -%} + bind :::{{ ports[0] }} + {% endif -%} + {% for frontend in frontends -%} + acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} + use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} + {% endfor -%} + default_backend {{ service }}_{{ default_backend }} + +{% for frontend in frontends -%} +backend {{ service }}_{{ frontend }} + balance leastconn + {% for unit, address in frontends[frontend]['backends'].items() -%} + server {{ unit }} {{ address }}:{{ ports[1] }} check + {% endfor %} +{% endfor -%} +{% endfor -%} +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend new file mode 100644 index 00000000..ce28fa3f --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -0,0 +1,24 @@ +{% if endpoints -%} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} + + ServerName {{ endpoint }} + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + +{% endfor -%} + + Order deny,allow + Allow from all + + + Order allow,deny + Allow from all + +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf new file mode 100644 index 00000000..ce28fa3f --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -0,0 +1,24 @@ +{% if endpoints -%} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} + + ServerName {{ endpoint }} + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + +{% endfor -%} + + Order deny,allow + Allow from all + + + Order allow,deny + Allow from all + +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py new file mode 100644 index 00000000..33df0675 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py @@ -0,0 +1,279 @@ +import os + +import six + +from charmhelpers.fetch import apt_install +from charmhelpers.core.hookenv import ( + log, + ERROR, + INFO +) +from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES + +try: + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions +except ImportError: + # python-jinja2 may not be installed yet, or we're running unittests. + FileSystemLoader = ChoiceLoader = Environment = exceptions = None + + +class OSConfigException(Exception): + pass + + +def get_loader(templates_dir, os_release): + """ + Create a jinja2.ChoiceLoader containing template dirs up to + and including os_release. If directory template directory + is missing at templates_dir, it will be omitted from the loader. + templates_dir is added to the bottom of the search list as a base + loading dir. + + A charm may also ship a templates dir with this module + and it will be appended to the bottom of the search list, eg:: + + hooks/charmhelpers/contrib/openstack/templates + + :param templates_dir (str): Base template directory containing release + sub-directories. + :param os_release (str): OpenStack release codename to construct template + loader. + :returns: jinja2.ChoiceLoader constructed with a list of + jinja2.FilesystemLoaders, ordered in descending + order by OpenStack release. + """ + tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) + for rel in six.itervalues(OPENSTACK_CODENAMES)] + + if not os.path.isdir(templates_dir): + log('Templates directory not found @ %s.' % templates_dir, + level=ERROR) + raise OSConfigException + + # the bottom contains tempaltes_dir and possibly a common templates dir + # shipped with the helper. + loaders = [FileSystemLoader(templates_dir)] + helper_templates = os.path.join(os.path.dirname(__file__), 'templates') + if os.path.isdir(helper_templates): + loaders.append(FileSystemLoader(helper_templates)) + + for rel, tmpl_dir in tmpl_dirs: + if os.path.isdir(tmpl_dir): + loaders.insert(0, FileSystemLoader(tmpl_dir)) + if rel == os_release: + break + log('Creating choice loader with dirs: %s' % + [l.searchpath for l in loaders], level=INFO) + return ChoiceLoader(loaders) + + +class OSConfigTemplate(object): + """ + Associates a config file template with a list of context generators. + Responsible for constructing a template context based on those generators. + """ + def __init__(self, config_file, contexts): + self.config_file = config_file + + if hasattr(contexts, '__call__'): + self.contexts = [contexts] + else: + self.contexts = contexts + + self._complete_contexts = [] + + def context(self): + ctxt = {} + for context in self.contexts: + _ctxt = context() + if _ctxt: + ctxt.update(_ctxt) + # track interfaces for every complete context. + [self._complete_contexts.append(interface) + for interface in context.interfaces + if interface not in self._complete_contexts] + return ctxt + + def complete_contexts(self): + ''' + Return a list of interfaces that have atisfied contexts. + ''' + if self._complete_contexts: + return self._complete_contexts + self.context() + return self._complete_contexts + + +class OSConfigRenderer(object): + """ + This class provides a common templating system to be used by OpenStack + charms. It is intended to help charms share common code and templates, + and ease the burden of managing config templates across multiple OpenStack + releases. + + Basic usage:: + + # import some common context generates from charmhelpers + from charmhelpers.contrib.openstack import context + + # Create a renderer object for a specific OS release. + configs = OSConfigRenderer(templates_dir='/tmp/templates', + openstack_release='folsom') + # register some config files with context generators. + configs.register(config_file='/etc/nova/nova.conf', + contexts=[context.SharedDBContext(), + context.AMQPContext()]) + configs.register(config_file='/etc/nova/api-paste.ini', + contexts=[context.IdentityServiceContext()]) + configs.register(config_file='/etc/haproxy/haproxy.conf', + contexts=[context.HAProxyContext()]) + # write out a single config + configs.write('/etc/nova/nova.conf') + # write out all registered configs + configs.write_all() + + **OpenStack Releases and template loading** + + When the object is instantiated, it is associated with a specific OS + release. This dictates how the template loader will be constructed. + + The constructed loader attempts to load the template from several places + in the following order: + - from the most recent OS release-specific template dir (if one exists) + - the base templates_dir + - a template directory shipped in the charm with this helper file. + + For the example above, '/tmp/templates' contains the following structure:: + + /tmp/templates/nova.conf + /tmp/templates/api-paste.ini + /tmp/templates/grizzly/api-paste.ini + /tmp/templates/havana/api-paste.ini + + Since it was registered with the grizzly release, it first seraches + the grizzly directory for nova.conf, then the templates dir. + + When writing api-paste.ini, it will find the template in the grizzly + directory. + + If the object were created with folsom, it would fall back to the + base templates dir for its api-paste.ini template. + + This system should help manage changes in config files through + openstack releases, allowing charms to fall back to the most recently + updated config template for a given release + + The haproxy.conf, since it is not shipped in the templates dir, will + be loaded from the module directory's template directory, eg + $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows + us to ship common templates (haproxy, apache) with the helpers. + + **Context generators** + + Context generators are used to generate template contexts during hook + execution. Doing so may require inspecting service relations, charm + config, etc. When registered, a config file is associated with a list + of generators. When a template is rendered and written, all context + generates are called in a chain to generate the context dictionary + passed to the jinja2 template. See context.py for more info. + """ + def __init__(self, templates_dir, openstack_release): + if not os.path.isdir(templates_dir): + log('Could not locate templates dir %s' % templates_dir, + level=ERROR) + raise OSConfigException + + self.templates_dir = templates_dir + self.openstack_release = openstack_release + self.templates = {} + self._tmpl_env = None + + if None in [Environment, ChoiceLoader, FileSystemLoader]: + # if this code is running, the object is created pre-install hook. + # jinja2 shouldn't get touched until the module is reloaded on next + # hook execution, with proper jinja2 bits successfully imported. + apt_install('python-jinja2') + + def register(self, config_file, contexts): + """ + Register a config file with a list of context generators to be called + during rendering. + """ + self.templates[config_file] = OSConfigTemplate(config_file=config_file, + contexts=contexts) + log('Registered config file: %s' % config_file, level=INFO) + + def _get_tmpl_env(self): + if not self._tmpl_env: + loader = get_loader(self.templates_dir, self.openstack_release) + self._tmpl_env = Environment(loader=loader) + + def _get_template(self, template): + self._get_tmpl_env() + template = self._tmpl_env.get_template(template) + log('Loaded template from %s' % template.filename, level=INFO) + return template + + def render(self, config_file): + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + ctxt = self.templates[config_file].context() + + _tmpl = os.path.basename(config_file) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound: + # if no template is found with basename, try looking for it + # using a munged full path, eg: + # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf + _tmpl = '_'.join(config_file.split('/')[1:]) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound as e: + log('Could not load template from %s by %s or %s.' % + (self.templates_dir, os.path.basename(config_file), _tmpl), + level=ERROR) + raise e + + log('Rendering from template: %s' % _tmpl, level=INFO) + return template.render(ctxt) + + def write(self, config_file): + """ + Write a single config file, raises if config file is not registered. + """ + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + + _out = self.render(config_file) + + with open(config_file, 'wb') as out: + out.write(_out) + + log('Wrote template %s.' % config_file, level=INFO) + + def write_all(self): + """ + Write out all registered config files. + """ + [self.write(k) for k in six.iterkeys(self.templates)] + + def set_release(self, openstack_release): + """ + Resets the template environment and generates a new template loader + based on a the new openstack release. + """ + self._tmpl_env = None + self.openstack_release = openstack_release + self._get_tmpl_env() + + def complete_contexts(self): + ''' + Returns a list of context interfaces that yield a complete context. + ''' + interfaces = [] + [interfaces.extend(i.complete_contexts()) + for i in six.itervalues(self.templates)] + return interfaces diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py new file mode 100644 index 00000000..ddd40ce5 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -0,0 +1,625 @@ +#!/usr/bin/python + +# Common python helper functions used for OpenStack charms. +from collections import OrderedDict +from functools import wraps + +import subprocess +import json +import os +import socket +import sys + +import six +import yaml + +from charmhelpers.core.hookenv import ( + config, + log as juju_log, + charm_dir, + INFO, + relation_ids, + relation_set +) + +from charmhelpers.contrib.storage.linux.lvm import ( + deactivate_lvm_volume_group, + is_lvm_physical_volume, + remove_lvm_physical_volume, +) + +from charmhelpers.contrib.network.ip import ( + get_ipv6_addr +) + +from charmhelpers.core.host import lsb_release, mounts, umount +from charmhelpers.fetch import apt_install, apt_cache, install_remote +from charmhelpers.contrib.python.packages import pip_install +from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk +from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device + +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' + +DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' + 'restricted main multiverse universe') + + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), +]) + + +OPENSTACK_CODENAMES = OrderedDict([ + ('2011.2', 'diablo'), + ('2012.1', 'essex'), + ('2012.2', 'folsom'), + ('2013.1', 'grizzly'), + ('2013.2', 'havana'), + ('2014.1', 'icehouse'), + ('2014.2', 'juno'), + ('2015.1', 'kilo'), +]) + +# The ugly duckling +SWIFT_CODENAMES = OrderedDict([ + ('1.4.3', 'diablo'), + ('1.4.8', 'essex'), + ('1.7.4', 'folsom'), + ('1.8.0', 'grizzly'), + ('1.7.7', 'grizzly'), + ('1.7.6', 'grizzly'), + ('1.10.0', 'havana'), + ('1.9.1', 'havana'), + ('1.9.0', 'havana'), + ('1.13.1', 'icehouse'), + ('1.13.0', 'icehouse'), + ('1.12.0', 'icehouse'), + ('1.11.0', 'icehouse'), + ('2.0.0', 'juno'), + ('2.1.0', 'juno'), + ('2.2.0', 'juno'), + ('2.2.1', 'kilo'), +]) + +DEFAULT_LOOPBACK_SIZE = '5G' + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + + +def get_os_codename_install_source(src): + '''Derive OpenStack release codename from a given installation source.''' + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = '' + if src is None: + return rel + if src in ['distro', 'distro-proposed']: + try: + rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] + except KeyError: + e = 'Could not derive openstack release for '\ + 'this Ubuntu release: %s' % ubuntu_rel + error_out(e) + return rel + + if src.startswith('cloud:'): + ca_rel = src.split(':')[1] + ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + return ca_rel + + # Best guess match based on deb string provided + if src.startswith('deb') or src.startswith('ppa'): + for k, v in six.iteritems(OPENSTACK_CODENAMES): + if v in src: + return v + + +def get_os_version_install_source(src): + codename = get_os_codename_install_source(src) + return get_os_version_codename(codename) + + +def get_os_codename_version(vers): + '''Determine OpenStack codename from version number.''' + try: + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_codename(codename): + '''Determine OpenStack version number from codename.''' + for k, v in six.iteritems(OPENSTACK_CODENAMES): + if v == codename: + return k + e = 'Could not derive OpenStack version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_os_codename_package(package, fatal=True): + '''Derive OpenStack release codename from an installed package.''' + import apt_pkg as apt + + cache = apt_cache() + + try: + pkg = cache[package] + except: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + + try: + if 'swift' in pkg.name: + swift_vers = vers[:5] + if swift_vers not in SWIFT_CODENAMES: + # Deal with 1.10.0 upward + swift_vers = vers[:6] + return SWIFT_CODENAMES[swift_vers] + else: + vers = vers[:6] + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_package(pkg, fatal=True): + '''Derive OpenStack version number from an installed package.''' + codename = get_os_codename_package(pkg, fatal=fatal) + + if not codename: + return None + + if 'swift' in pkg: + vers_map = SWIFT_CODENAMES + else: + vers_map = OPENSTACK_CODENAMES + + for version, cname in six.iteritems(vers_map): + if cname == codename: + return version + # e = "Could not determine OpenStack version for package: %s" % pkg + # error_out(e) + + +os_rel = None + + +def os_release(package, base='essex'): + ''' + Returns OpenStack release codename from a cached global. + If the codename can not be determined from either an installed package or + the installation source, the earliest release supported by the charm should + be returned. + ''' + global os_rel + if os_rel: + return os_rel + os_rel = (get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config('openstack-origin')) or + base) + return os_rel + + +def import_key(keyid): + cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ + "--recv-keys %s" % keyid + try: + subprocess.check_call(cmd.split(' ')) + except subprocess.CalledProcessError: + error_out("Error importing repo key %s" % keyid) + + +def configure_installation_source(rel): + '''Configure apt installation source.''' + if rel == 'distro': + return + elif rel == 'distro-proposed': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(DISTRO_PROPOSED % ubuntu_rel) + elif rel[:4] == "ppa:": + src = rel + subprocess.check_call(["add-apt-repository", "-y", src]) + elif rel[:3] == "deb": + l = len(rel.split('|')) + if l == 2: + src, key = rel.split('|') + juju_log("Importing PPA key from keyserver for %s" % src) + import_key(key) + elif l == 1: + src = rel + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(src) + elif rel[:6] == 'cloud:': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = rel.split(':')[1] + u_rel = rel.split('-')[0] + ca_rel = rel.split('-')[1] + + if u_rel != ubuntu_rel: + e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ + 'version (%s)' % (ca_rel, ubuntu_rel) + error_out(e) + + if 'staging' in ca_rel: + # staging is just a regular PPA. + os_rel = ca_rel.split('/')[0] + ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel + cmd = 'add-apt-repository -y %s' % ppa + subprocess.check_call(cmd.split(' ')) + return + + # map charm config options to actual archive pockets. + pockets = { + 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'icehouse': 'precise-updates/icehouse', + 'icehouse/updates': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'juno': 'trusty-updates/juno', + 'juno/updates': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'kilo': 'trusty-updates/kilo', + 'kilo/updates': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + } + + try: + pocket = pockets[ca_rel] + except KeyError: + e = 'Invalid Cloud Archive release specified: %s' % rel + error_out(e) + + src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) + apt_install('ubuntu-cloud-keyring', fatal=True) + + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: + f.write(src) + else: + error_out("Invalid openstack-release specified: %s" % rel) + + +def save_script_rc(script_path="scripts/scriptrc", **env_vars): + """ + Write an rc file in the charm-delivered directory containing + exported environment variables provided by env_vars. Any charm scripts run + outside the juju hook environment can source this scriptrc to obtain + updated config information necessary to perform health checks or + service changes. + """ + juju_rc_path = "%s/%s" % (charm_dir(), script_path) + if not os.path.exists(os.path.dirname(juju_rc_path)): + os.mkdir(os.path.dirname(juju_rc_path)) + with open(juju_rc_path, 'wb') as rc_script: + rc_script.write( + "#!/bin/bash\n") + [rc_script.write('export %s=%s\n' % (u, p)) + for u, p in six.iteritems(env_vars) if u != "script_path"] + + +def openstack_upgrade_available(package): + """ + Determines if an OpenStack upgrade is available from installation + source, based on version of installed package. + + :param package: str: Name of installed package. + + :returns: bool: : Returns True if configured installation source offers + a newer version of package. + + """ + + import apt_pkg as apt + src = config('openstack-origin') + cur_vers = get_os_version_package(package) + available_vers = get_os_version_install_source(src) + apt.init() + return apt.version_compare(available_vers, cur_vers) == 1 + + +def ensure_block_device(block_device): + ''' + Confirm block_device, create as loopback if necessary. + + :param block_device: str: Full path of block device to ensure. + + :returns: str: Full path of ensured block device. + ''' + _none = ['None', 'none', None] + if (block_device in _none): + error_out('prepare_storage(): Missing required input: block_device=%s.' + % block_device) + + if block_device.startswith('/dev/'): + bdev = block_device + elif block_device.startswith('/'): + _bd = block_device.split('|') + if len(_bd) == 2: + bdev, size = _bd + else: + bdev = block_device + size = DEFAULT_LOOPBACK_SIZE + bdev = ensure_loopback_device(bdev, size) + else: + bdev = '/dev/%s' % block_device + + if not is_block_device(bdev): + error_out('Failed to locate valid block device at %s' % bdev) + + return bdev + + +def clean_storage(block_device): + ''' + Ensures a block device is clean. That is: + - unmounted + - any lvm volume groups are deactivated + - any lvm physical device signatures removed + - partition table wiped + + :param block_device: str: Full path to block device to clean. + ''' + for mp, d in mounts(): + if d == block_device: + juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % + (d, mp), level=INFO) + umount(mp, persist=True) + + if is_lvm_physical_volume(block_device): + deactivate_lvm_volume_group(block_device) + remove_lvm_physical_volume(block_device) + else: + zap_disk(block_device) + + +def is_ip(address): + """ + Returns True if address is a valid IP address. + """ + try: + # Test to see if already an IPv4 address + socket.inet_aton(address) + return True + except socket.error: + return False + + +def ns_query(address): + try: + import dns.resolver + except ImportError: + apt_install('python-dnspython') + import dns.resolver + + if isinstance(address, dns.name.Name): + rtype = 'PTR' + elif isinstance(address, six.string_types): + rtype = 'A' + else: + return None + + answers = dns.resolver.query(address, rtype) + if answers: + return str(answers[0]) + return None + + +def get_host_ip(hostname): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ + if is_ip(hostname): + return hostname + + return ns_query(hostname) + + +def get_hostname(address, fqdn=True): + """ + Resolves hostname for given IP, or returns the input + if it is already a hostname. + """ + if is_ip(address): + try: + import dns.reversename + except ImportError: + apt_install('python-dnspython') + import dns.reversename + + rev = dns.reversename.from_address(address) + result = ns_query(rev) + if not result: + return None + else: + result = address + + if fqdn: + # strip trailing . + if result.endswith('.'): + return result[:-1] + else: + return result + else: + return result.split('.')[0] + + +def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): + mm_map = {} + if os.path.isfile(mm_file): + with open(mm_file, 'r') as f: + mm_map = json.load(f) + return mm_map + + +def sync_db_with_multi_ipv6_addresses(database, database_user, + relation_prefix=None): + hosts = get_ipv6_addr(dynamic_only=False) + + kwargs = {'database': database, + 'username': database_user, + 'hostname': json.dumps(hosts)} + + if relation_prefix: + for key in list(kwargs.keys()): + kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] + del kwargs[key] + + for rid in relation_ids('shared-db'): + relation_set(relation_id=rid, **kwargs) + + +def os_requires_version(ostack_release, pkg): + """ + Decorator for hook to specify minimum supported release + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args): + if os_release(pkg) < ostack_release: + raise Exception("This hook is not supported on releases" + " before %s" % ostack_release) + f(*args) + return wrapped_f + return wrap + + +def git_install_requested(): + """Returns true if openstack-origin-git is specified.""" + return config('openstack-origin-git') != "None" + + +requirements_dir = None + + +def git_clone_and_install(file_name, core_project): + """Clone/install all OpenStack repos specified in yaml config file.""" + global requirements_dir + + if file_name == "None": + return + + yaml_file = os.path.join(charm_dir(), file_name) + + # clone/install the requirements project first + installed = _git_clone_and_install_subset(yaml_file, + whitelist=['requirements']) + if 'requirements' not in installed: + error_out('requirements git repository must be specified') + + # clone/install all other projects except requirements and the core project + blacklist = ['requirements', core_project] + _git_clone_and_install_subset(yaml_file, blacklist=blacklist, + update_requirements=True) + + # clone/install the core project + whitelist = [core_project] + installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist, + update_requirements=True) + if core_project not in installed: + error_out('{} git repository must be specified'.format(core_project)) + + +def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[], + update_requirements=False): + """Clone/install subset of OpenStack repos specified in yaml config file.""" + global requirements_dir + installed = [] + + with open(yaml_file, 'r') as fd: + projects = yaml.load(fd) + for proj, val in projects.items(): + # The project subset is chosen based on the following 3 rules: + # 1) If project is in blacklist, we don't clone/install it, period. + # 2) If whitelist is empty, we clone/install everything else. + # 3) If whitelist is not empty, we clone/install everything in the + # whitelist. + if proj in blacklist: + continue + if whitelist and proj not in whitelist: + continue + repo = val['repository'] + branch = val['branch'] + repo_dir = _git_clone_and_install_single(repo, branch, + update_requirements) + if proj == 'requirements': + requirements_dir = repo_dir + installed.append(proj) + return installed + + +def _git_clone_and_install_single(repo, branch, update_requirements=False): + """Clone and install a single git repository.""" + dest_parent_dir = "/mnt/openstack-git/" + dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo)) + + if not os.path.exists(dest_parent_dir): + juju_log('Host dir not mounted at {}. ' + 'Creating directory there instead.'.format(dest_parent_dir)) + os.mkdir(dest_parent_dir) + + if not os.path.exists(dest_dir): + juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) + repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch) + else: + repo_dir = dest_dir + + if update_requirements: + if not requirements_dir: + error_out('requirements repo must be cloned before ' + 'updating from global requirements.') + _git_update_requirements(repo_dir, requirements_dir) + + juju_log('Installing git repo from dir: {}'.format(repo_dir)) + pip_install(repo_dir) + + return repo_dir + + +def _git_update_requirements(package_dir, reqs_dir): + """Update from global requirements. + + Update an OpenStack git directory's requirements.txt and + test-requirements.txt from global-requirements.txt.""" + orig_dir = os.getcwd() + os.chdir(reqs_dir) + cmd = "python update.py {}".format(package_dir) + try: + subprocess.check_call(cmd.split(' ')) + except subprocess.CalledProcessError: + package = os.path.basename(package_dir) + error_out("Error updating {} from global-requirements.txt".format(package)) + os.chdir(orig_dir) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/python/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py new file mode 100644 index 00000000..78162b1b --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# coding: utf-8 + +__author__ = "Jorge Niedbalski " + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import log + +try: + from pip import main as pip_execute +except ImportError: + apt_update() + apt_install('python-pip') + from pip import main as pip_execute + + +def parse_options(given, available): + """Given a set of options, check if available""" + for key, value in sorted(given.items()): + if key in available: + yield "--{0}={1}".format(key, value) + + +def pip_install_requirements(requirements, **options): + """Install a requirements file """ + command = ["install"] + + available_options = ('proxy', 'src', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + command.append("-r {0}".format(requirements)) + log("Installing from file: {} with options: {}".format(requirements, + command)) + pip_execute(command) + + +def pip_install(package, fatal=False, **options): + """Install a python package""" + command = ["install"] + + available_options = ('proxy', 'src', 'log', "index-url", ) + for option in parse_options(options, available_options): + command.append(option) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Installing {} package with options: {}".format(package, + command)) + pip_execute(command) + + +def pip_uninstall(package, **options): + """Uninstall a python package""" + command = ["uninstall", "-q", "-y"] + + available_options = ('proxy', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Uninstalling {} package with options: {}".format(package, + command)) + pip_execute(command) + + +def pip_list(): + """Returns the list of current python installed packages + """ + return pip_execute(["list"]) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py new file mode 100644 index 00000000..6ebeab5c --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -0,0 +1,428 @@ +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import os +import shutil +import json +import time + +from subprocess import ( + check_call, + check_output, + CalledProcessError, +) +from charmhelpers.core.hookenv import ( + relation_get, + relation_ids, + related_units, + log, + DEBUG, + INFO, + WARNING, + ERROR, +) +from charmhelpers.core.host import ( + mount, + mounts, + service_start, + service_stop, + service_running, + umount, +) +from charmhelpers.fetch import ( + apt_install, +) + +KEYRING = '/etc/ceph/ceph.client.{}.keyring' +KEYFILE = '/etc/ceph/ceph.client.{}.key' + +CEPH_CONF = """[global] + auth supported = {auth} + keyring = {keyring} + mon host = {mon_hosts} + log to syslog = {use_syslog} + err to syslog = {use_syslog} + clog to syslog = {use_syslog} +""" + + +def install(): + """Basic Ceph client installation.""" + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + """Check to see if a RADOS block device exists.""" + try: + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]).decode('UTF-8') + except CalledProcessError: + return False + + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] + check_call(cmd) + + +def pool_exists(service, name): + """Check to see if a RADOS pool already exists.""" + try: + out = check_output(['rados', '--id', service, + 'lspools']).decode('UTF-8') + except CalledProcessError: + return False + + return name in out + + +def get_osds(service): + """Return a list of all Ceph Object Storage Daemons currently in the + cluster. + """ + version = ceph_version() + if version and version >= '0.56': + return json.loads(check_output(['ceph', '--id', service, + 'osd', 'ls', + '--format=json']).decode('UTF-8')) + + return None + + +def create_pool(service, name, replicas=3): + """Create a new RADOS pool.""" + if pool_exists(service, name): + log("Ceph pool {} already exists, skipping creation".format(name), + level=WARNING) + return + + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pgnum = (len(osds) * 100 // replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pgnum = 200 + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] + check_call(cmd) + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', + str(replicas)] + check_call(cmd) + + +def delete_pool(service, name): + """Delete a RADOS pool from ceph.""" + cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, + '--yes-i-really-really-mean-it'] + check_call(cmd) + + +def _keyfile_path(service): + return KEYFILE.format(service) + + +def _keyring_path(service): + return KEYRING.format(service) + + +def create_keyring(service, key): + """Create a new Ceph keyring containing key.""" + keyring = _keyring_path(service) + if os.path.exists(keyring): + log('Ceph keyring exists at %s.' % keyring, level=WARNING) + return + + cmd = ['ceph-authtool', keyring, '--create-keyring', + '--name=client.{}'.format(service), '--add-key={}'.format(key)] + check_call(cmd) + log('Created new ceph keyring at %s.' % keyring, level=DEBUG) + + +def delete_keyring(service): + """Delete an existing Ceph keyring.""" + keyring = _keyring_path(service) + if not os.path.exists(keyring): + log('Keyring does not exist at %s' % keyring, level=WARNING) + return + + os.remove(keyring) + log('Deleted ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + """Create a file containing key.""" + keyfile = _keyfile_path(service) + if os.path.exists(keyfile): + log('Keyfile exists at %s.' % keyfile, level=WARNING) + return + + with open(keyfile, 'w') as fd: + fd.write(key) + + log('Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(): + """Query named relation 'ceph' to determine current nodes.""" + hosts = [] + for r_id in relation_ids('ceph'): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + + return hosts + + +def configure(service, key, auth, use_syslog): + """Perform basic configuration of Ceph.""" + create_keyring(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF.format(auth=auth, + keyring=_keyring_path(service), + mon_hosts=",".join(map(str, hosts)), + use_syslog=use_syslog)) + modprobe('rbd') + + +def image_mapped(name): + """Determine whether a RADOS block device is mapped locally.""" + try: + out = check_output(['rbd', 'showmapped']).decode('UTF-8') + except CalledProcessError: + return False + + return name in out + + +def map_block_storage(service, pool, image): + """Map a RADOS block device for local use.""" + cmd = [ + 'rbd', + 'map', + '{}/{}'.format(pool, image), + '--user', + service, + '--secret', + _keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + """Determine whether a filesytems is already mounted.""" + return fs in [f for f, m in mounts()] + + +def make_filesystem(blk_device, fstype='ext4', timeout=10): + """Make a new filesystem on the specified block device.""" + count = 0 + e_noent = os.errno.ENOENT + while not os.path.exists(blk_device): + if count >= timeout: + log('Gave up waiting on block device %s' % blk_device, + level=ERROR) + raise IOError(e_noent, os.strerror(e_noent), blk_device) + + log('Waiting for block device %s to appear' % blk_device, + level=DEBUG) + count += 1 + time.sleep(1) + else: + log('Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + check_call(['mkfs', '-t', fstype, blk_device]) + + +def place_data_on_block_device(blk_device, data_src_dst): + """Migrate data in data_src_dst to blk_device and then remount.""" + # mount block device into /mnt + mount(blk_device, '/mnt') + # copy data to /mnt + copy_files(data_src_dst, '/mnt') + # umount block device + umount('/mnt') + # Grab user/group ID's from original source + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + # re-mount where the data should originally be + # TODO: persist is currently a NO-OP in core.host + mount(blk_device, data_src_dst, persist=True) + # ensure original ownership of new mount. + os.chown(data_src_dst, uid, gid) + + +# TODO: re-use +def modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + log('Loading kernel module', level=INFO) + cmd = ['modprobe', module] + check_call(cmd) + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def copy_files(src, dst, symlinks=False, ignore=None): + """Copy files from src to dst.""" + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, + blk_device, fstype, system_services=[], + replicas=3): + """NOTE: This function must only be called from a single service unit for + the same rbd_img otherwise data loss will occur. + + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being re-mounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('Creating new pool {}.'.format(pool), level=INFO) + create_pool(service, pool, replicas=replicas) + + if not rbd_exists(service, pool, rbd_img): + log('Creating RBD image ({}).'.format(rbd_img), level=INFO) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), + level=INFO) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if service_running(svc): + log('Stopping services {} prior to migrating data.' + .format(svc), level=DEBUG) + service_stop(svc) + + place_data_on_block_device(blk_device, mount_point) + + for svc in system_services: + log('Starting service {} after migrating data.' + .format(svc), level=DEBUG) + service_start(svc) + + +def ensure_ceph_keyring(service, user=None, group=None): + """Ensures a ceph keyring is created for a named service and optionally + ensures user and group ownership. + + Returns False if no ceph key is available in relation state. + """ + key = None + for rid in relation_ids('ceph'): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break + + if not key: + return False + + create_keyring(service=service, key=key) + keyring = _keyring_path(service) + if user and group: + check_call(['chown', '%s.%s' % (user, group), keyring]) + + return True + + +def ceph_version(): + """Retrieve the local version of ceph.""" + if os.path.exists('/usr/bin/ceph'): + cmd = ['ceph', '-v'] + output = check_output(cmd).decode('US-ASCII') + output = output.split() + if len(output) > 3: + return output[2] + else: + return None + else: + return None + + +class CephBrokerRq(object): + """Ceph broker request. + + Multiple operations can be added to a request and sent to the Ceph broker + to be executed. + + Request is json-encoded for sending over the wire. + + The API is versioned and defaults to version 1. + """ + def __init__(self, api_version=1): + self.api_version = api_version + self.ops = [] + + def add_op_create_pool(self, name, replica_count=3): + self.ops.append({'op': 'create-pool', 'name': name, + 'replicas': replica_count}) + + @property + def request(self): + return json.dumps({'api-version': self.api_version, 'ops': self.ops}) + + +class CephBrokerRsp(object): + """Ceph broker response. + + Response is json-decoded and contents provided as methods/properties. + + The API is versioned and defaults to version 1. + """ + def __init__(self, encoded_rsp): + self.api_version = None + self.rsp = json.loads(encoded_rsp) + + @property + def exit_code(self): + return self.rsp.get('exit-code') + + @property + def exit_msg(self): + return self.rsp.get('stderr') diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py new file mode 100644 index 00000000..a22c3d7b --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -0,0 +1,62 @@ +import os +import re +from subprocess import ( + check_call, + check_output, +) + +import six + + +################################################## +# loopback device helpers. +################################################## +def loopback_devices(): + ''' + Parse through 'losetup -a' output to determine currently mapped + loopback devices. Output is expected to look like: + + /dev/loop0: [0807]:961814 (/tmp/my.img) + + :returns: dict: a dict mapping {loopback_dev: backing_file} + ''' + loopbacks = {} + cmd = ['losetup', '-a'] + devs = [d.strip().split(' ') for d in + check_output(cmd).splitlines() if d != ''] + for dev, _, f in devs: + loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] + return loopbacks + + +def create_loopback(file_path): + ''' + Create a loopback device for a given backing file. + + :returns: str: Full path to new loopback device (eg, /dev/loop0) + ''' + file_path = os.path.abspath(file_path) + check_call(['losetup', '--find', file_path]) + for d, f in six.iteritems(loopback_devices()): + if f == file_path: + return d + + +def ensure_loopback_device(path, size): + ''' + Ensure a loopback device exists for a given backing file path and size. + If it a loopback device is not mapped to file, a new one will be created. + + TODO: Confirm size of found loopback device. + + :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) + ''' + for d, f in six.iteritems(loopback_devices()): + if f == path: + return d + + if not os.path.exists(path): + cmd = ['truncate', '--size', size, path] + check_call(cmd) + + return create_loopback(path) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py new file mode 100644 index 00000000..0aa65f4f --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -0,0 +1,89 @@ +from subprocess import ( + CalledProcessError, + check_call, + check_output, + Popen, + PIPE, +) + + +################################################## +# LVM helpers. +################################################## +def deactivate_lvm_volume_group(block_device): + ''' + Deactivate any volume gruop associated with an LVM physical volume. + + :param block_device: str: Full path to LVM physical volume + ''' + vg = list_lvm_volume_group(block_device) + if vg: + cmd = ['vgchange', '-an', vg] + check_call(cmd) + + +def is_lvm_physical_volume(block_device): + ''' + Determine whether a block device is initialized as an LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: boolean: True if block device is a PV, False if not. + ''' + try: + check_output(['pvdisplay', block_device]) + return True + except CalledProcessError: + return False + + +def remove_lvm_physical_volume(block_device): + ''' + Remove LVM PV signatures from a given block device. + + :param block_device: str: Full path of block device to scrub. + ''' + p = Popen(['pvremove', '-ff', block_device], + stdin=PIPE) + p.communicate(input='y\n') + + +def list_lvm_volume_group(block_device): + ''' + List LVM volume group associated with a given block device. + + Assumes block device is a valid LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: str: Name of volume group associated with block device or None + ''' + vg = None + pvd = check_output(['pvdisplay', block_device]).splitlines() + for l in pvd: + l = l.decode('UTF-8') + if l.strip().startswith('VG Name'): + vg = ' '.join(l.strip().split()[2:]) + return vg + + +def create_lvm_physical_volume(block_device): + ''' + Initialize a block device as an LVM physical volume. + + :param block_device: str: Full path of block device to initialize. + + ''' + check_call(['pvcreate', block_device]) + + +def create_lvm_volume_group(volume_group, block_device): + ''' + Create an LVM volume group backed by a given block device. + + Assumes block device has already been initialized as an LVM PV. + + :param volume_group: str: Name of volume group to create. + :block_device: str: Full path of PV-initialized block device. + ''' + check_call(['vgcreate', volume_group, block_device]) diff --git a/ceph-radosgw/hooks/charmhelpers/core/decorators.py b/ceph-radosgw/hooks/charmhelpers/core/decorators.py new file mode 100644 index 00000000..029a4ef4 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/decorators.py @@ -0,0 +1,41 @@ +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 99e5d208..69ae4564 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -68,6 +68,8 @@ def log(message, level=None): command = ['juju-log'] if level: command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) command += [message] subprocess.call(command) @@ -393,21 +395,31 @@ def relations_of_type(reltype=None): return relation_data +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + @cached def relation_types(): """Get a list of relation types supported by this charm""" - charmdir = os.environ.get('CHARM_DIR', '') - mdf = open(os.path.join(charmdir, 'metadata.yaml')) - md = yaml.safe_load(mdf) rel_types = [] + md = metadata() for key in ('provides', 'requires', 'peers'): section = md.get(key) if section: rel_types.extend(section.keys()) - mdf.close() return rel_types +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + @cached def relations(): """Get a nested dictionary of relation data for all related units""" diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index e6783d9b..5221120c 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -101,6 +101,26 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): return user_info +def add_group(group_name, system_group=False): + """Add a group to the system""" + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + except KeyError: + log('creating group {0}'.format(group_name)) + cmd = ['addgroup'] + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + group_info = grp.getgrnam(group_name) + return group_info + + def add_user_to_group(username, group): """Add a user to a group""" cmd = [ @@ -142,13 +162,16 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid realpath = os.path.abspath(path) - if os.path.exists(realpath): - if force and not os.path.isdir(realpath): + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): log("Removing non-directory file {} prior to mkdir()".format(path)) os.unlink(realpath) - else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + elif not path_exists: os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) + os.chown(realpath, uid, gid) def write_file(path, content, owner='root', group='root', perms=0o444): @@ -368,8 +391,8 @@ def cmp_pkgrevno(package, revno, pkgcache=None): ''' import apt_pkg - from charmhelpers.fetch import apt_cache if not pkgcache: + from charmhelpers.fetch import apt_cache pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-radosgw/hooks/charmhelpers/core/templating.py b/ceph-radosgw/hooks/charmhelpers/core/templating.py index 83133fa4..569eaed6 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/core/templating.py @@ -48,5 +48,5 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target)) + host.mkdir(os.path.dirname(target), owner, group) host.write_file(target, content, owner, group, perms) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 0a126fc3..aceadea4 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -64,9 +64,16 @@ 'trusty-juno/updates': 'trusty-updates/juno', 'trusty-updates/juno': 'trusty-updates/juno', 'juno/proposed': 'trusty-proposed/juno', - 'juno/proposed': 'trusty-proposed/juno', 'trusty-juno/proposed': 'trusty-proposed/juno', 'trusty-proposed/juno': 'trusty-proposed/juno', + # Kilo + 'kilo': 'trusty-updates/kilo', + 'trusty-kilo': 'trusty-updates/kilo', + 'trusty-kilo/updates': 'trusty-updates/kilo', + 'trusty-updates/kilo': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'trusty-kilo/proposed': 'trusty-proposed/kilo', + 'trusty-proposed/kilo': 'trusty-proposed/kilo', } # The order of this list is very important. Handlers should be listed in from diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py index 61684cb6..f3aa2821 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py @@ -34,11 +34,14 @@ def clone(self, source, dest, branch): repo = Repo.clone_from(source, dest) repo.git.checkout(branch) - def install(self, source, branch="master"): + def install(self, source, branch="master", dest=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 4a5b7271..c54b111a 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -41,6 +41,7 @@ enable_pocket, is_apache_24, CEPHRG_HA_RES, + register_configs, ) from charmhelpers.payload.execd import execd_preinstall @@ -58,6 +59,7 @@ ) hooks = Hooks() +CONFIGS = register_configs() def install_www_scripts(): @@ -86,6 +88,7 @@ def install_ceph_optimised_packages(): PACKAGES = [ 'radosgw', 'ntp', + 'haproxy', ] APACHE_PACKAGES = [ @@ -172,10 +175,12 @@ def apache_reload(): @hooks.hook('upgrade-charm', 'config-changed') -@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) +@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw'], + '/etc/haproxy/haproxy.cfg': ['haproxy']}) def config_changed(): install_packages() emit_cephconf() + CONFIGS.write_all() if not config('use-embedded-webserver'): emit_apacheconf() install_www_scripts() @@ -236,6 +241,7 @@ def get_keystone_conf(): @hooks.hook('mon-relation-departed', 'mon-relation-changed') +@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) def mon_relation(): emit_cephconf() key = relation_get('radosgw_key') @@ -289,7 +295,6 @@ def identity_joined(relid=None): (canonical_url(INTERNAL), port) public_url = '%s:%s/swift/v1' % \ (canonical_url(PUBLIC), port) - log('LY identity_joined relation_set public_url=%s relation_id=%s' % (public_url, str(relid)), level=ERROR) relation_set(service='swift', region=config('region'), public_url=public_url, internal_url=internal_url, @@ -299,6 +304,7 @@ def identity_joined(relid=None): @hooks.hook('identity-service-relation-changed') +@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) def identity_changed(): emit_cephconf() restart() @@ -306,10 +312,10 @@ def identity_changed(): @hooks.hook('cluster-relation-changed', 'cluster-relation-joined') +@restart_on_change({'/etc/haproxy/haproxy.cfg': ['haproxy']}) def cluster_changed(): - log('LY In cluster_changed triggering identity_joined', level=ERROR) + CONFIGS.write_all() for r_id in relation_ids('identity-service'): - log('LY In cluster_changed triggering identity_joined for relid: ' + r_id, level=ERROR) identity_joined(relid=r_id) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index e4b7dfa8..25b2b173 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -10,12 +10,27 @@ import socket import re import os - +from copy import deepcopy +from collections import OrderedDict from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install +from charmhelpers.contrib.openstack import context, templating +from charmhelpers.contrib.openstack.utils import os_release + +import ceph_radosgw_context CEPHRG_HA_RES = 'grp_cephrg_vips' TEMPLATES_DIR = 'templates' +TEMPLATES = 'templates/' +HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' + +BASE_RESOURCE_MAP = OrderedDict([ + (HAPROXY_CONF, { + 'contexts': [context.HAProxyContext(singlenode_mode=True), + ceph_radosgw_context.HAProxyContext()], + 'services': ['haproxy'], + }), +]) try: import jinja2 @@ -30,6 +45,23 @@ import dns.resolver +def resource_map(): + ''' + Dynamically generate a map of resources that will be managed for a single + hook execution. + ''' + resource_map = deepcopy(BASE_RESOURCE_MAP) + return resource_map + + +def register_configs(release='icehouse'): + configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, + openstack_release=release) + for cfg, rscs in resource_map().iteritems(): + configs.register(cfg, rscs['contexts']) + return configs + + def render_template(template_name, context, template_dir=TEMPLATES_DIR): templates = jinja2.Environment( loader=jinja2.FileSystemLoader(template_dir) diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 5c1473ba..85c72c5a 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -17,7 +17,7 @@ keyring = /etc/ceph/keyring.rados.gateway rgw socket path = /tmp/radosgw.sock log file = /var/log/ceph/radosgw.log {% if embedded_webserver %} -rgw frontends = civetweb port=80 +rgw frontends = civetweb port=70 {% else %} # Turn off 100-continue optimization as stock mod_fastcgi # does not support it diff --git a/ceph-radosgw/templates/rgw b/ceph-radosgw/templates/rgw index 7b3f8b6e..6101e1bc 100644 --- a/ceph-radosgw/templates/rgw +++ b/ceph-radosgw/templates/rgw @@ -2,7 +2,7 @@ FastCgiExternalServer /var/www/s3gw.fcgi -socket /tmp/radosgw.sock - + ServerName {{ hostname }} ServerAdmin ceph@ubuntu.com DocumentRoot /var/www diff --git a/ceph-radosgw/tests/charmhelpers/__init__.py b/ceph-radosgw/tests/charmhelpers/__init__.py index e69de29b..b46e2e23 100644 --- a/ceph-radosgw/tests/charmhelpers/__init__.py +++ b/ceph-radosgw/tests/charmhelpers/__init__.py @@ -0,0 +1,22 @@ +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa From e72e387cdc0e816fe82e4f8ee791f814029ffa7f Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 15 Jan 2015 11:10:34 +0000 Subject: [PATCH 0603/2699] Update apache to listen on 70 rather than 80 to avoid clashing with haproxy --- ceph-radosgw/files/ports.conf | 11 +++++++++++ ceph-radosgw/hooks/hooks.py | 5 +++++ 2 files changed, 16 insertions(+) create mode 100644 ceph-radosgw/files/ports.conf diff --git a/ceph-radosgw/files/ports.conf b/ceph-radosgw/files/ports.conf new file mode 100644 index 00000000..83a775fe --- /dev/null +++ b/ceph-radosgw/files/ports.conf @@ -0,0 +1,11 @@ +Listen 70 + + + Listen 443 + + + + Listen 443 + + +# vim: syntax=apache ts=4 sw=4 sts=4 sr noet diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index c54b111a..338b1fe0 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -173,6 +173,10 @@ def apache_reload(): subprocess.call(['service', 'apache2', 'reload']) +def apache_ports(): + shutil.copy('files/ports.conf', '/etc/apache2/ports.conf') + + @hooks.hook('upgrade-charm', 'config-changed') @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw'], @@ -186,6 +190,7 @@ def config_changed(): install_www_scripts() apache_sites() apache_modules() + apache_ports() apache_reload() for r_id in relation_ids('identity-service'): identity_joined(relid=r_id) From 2ae077dd9c738b270579b87585ea76fc38268604 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 15 Jan 2015 15:21:36 +0000 Subject: [PATCH 0604/2699] All nodes whould register the same vip info so don't check on leader --- ceph-radosgw/hooks/hooks.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 338b1fe0..bce7668c 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -291,8 +291,6 @@ def identity_joined(relid=None): if cmp_pkgrevno('radosgw', '0.55') < 0: log('Integration with keystone requires ceph >= 0.55') sys.exit(1) - if not cluster.eligible_leader(CEPHRG_HA_RES): - return port = 80 admin_url = '%s:%i/swift' % (canonical_url(ADMIN), port) From 7b53456043bfb35eeb6524501f04a683de80d972 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 15 Jan 2015 15:43:50 +0000 Subject: [PATCH 0605/2699] Fix lint --- ceph-radosgw/hooks/hooks.py | 3 +-- ceph-radosgw/hooks/utils.py | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index bce7668c..9b3699ac 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -13,7 +13,6 @@ import glob import os import ceph -import charmhelpers.contrib.hahelpers.cluster as cluster from charmhelpers.core.hookenv import ( relation_get, relation_ids, @@ -175,7 +174,7 @@ def apache_reload(): def apache_ports(): shutil.copy('files/ports.conf', '/etc/apache2/ports.conf') - + @hooks.hook('upgrade-charm', 'config-changed') diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 25b2b173..8e30d809 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -15,7 +15,6 @@ from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.contrib.openstack import context, templating -from charmhelpers.contrib.openstack.utils import os_release import ceph_radosgw_context From 1102a91d20f18d1656d0b3e3ae52786609da4ba4 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 15 Jan 2015 16:18:33 +0000 Subject: [PATCH 0606/2699] Added comment to explain icehouse hardcoding --- ceph-radosgw/hooks/utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 8e30d809..d9d5d3dd 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -53,6 +53,8 @@ def resource_map(): return resource_map +# Hardcoded to icehouse to enable use of charmhelper templating/context tools +# Ideally these function would support non-OpenStack services def register_configs(release='icehouse'): configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, openstack_release=release) From 12dc2e397d6bb3a975865b2bd9936305dc3b5dd7 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 16 Jan 2015 10:54:36 +0000 Subject: [PATCH 0607/2699] [hopem,r=] Set kernel.pid_max to a high value to avoid problems with large clusters (> 20 OSDs) recovering. --- ceph-proxy/config.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 132c8f45..d087510c 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -157,10 +157,12 @@ options: your network interface. sysctl: type: string - default: '' + default: '{ kernel.pid_max : 2097152 }' description: | YAML-formatted associative array of sysctl key/value pairs to be set - persistently e.g. '{ kernel.pid_max : 4194303 }'. + persistently. As a default we set pid_max to a high value to avoid + problems with large numbers (>20) of OSDs recovering. very large clusters + should set this value even higher (max 4194303). nagios_context: default: "juju" description: | From e62d5fb47f89bcea105ac0dfe8dec9c5e0e5cce5 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 16 Jan 2015 10:54:36 +0000 Subject: [PATCH 0608/2699] [hopem,r=] Set kernel.pid_max to a high value to avoid problems with large clusters (> 20 OSDs) recovering. --- ceph-mon/config.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 132c8f45..d087510c 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -157,10 +157,12 @@ options: your network interface. sysctl: type: string - default: '' + default: '{ kernel.pid_max : 2097152 }' description: | YAML-formatted associative array of sysctl key/value pairs to be set - persistently e.g. '{ kernel.pid_max : 4194303 }'. + persistently. As a default we set pid_max to a high value to avoid + problems with large numbers (>20) of OSDs recovering. very large clusters + should set this value even higher (max 4194303). nagios_context: default: "juju" description: | From 833d22ec10f27a7fa526775d6b95c071f7644c15 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 16 Jan 2015 10:56:36 +0000 Subject: [PATCH 0609/2699] [hopem,r=] Set kernel.pid_max to a high value to avoid problems with large clusters (> 20 OSDs) recovering. --- ceph-osd/config.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index cf4809a0..ccaa8b46 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -123,10 +123,12 @@ options: your network interface. sysctl: type: string - default: '' + default: '{ kernel.pid_max : 2097152 }' description: | YAML-formatted associative array of sysctl key/value pairs to be set - persistently e.g. '{ kernel.pid_max : 4194303 }'. + persistently. As a default we set pid_max to a high value to avoid + problems with large numbers (>20) of OSDs recovering. very large clusters + should set this value even higher (max 4194303). nagios_context: default: "juju" description: | From 6eec22b0d85457385951521e3f0a8aa79ab5a9c0 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 26 Jan 2015 09:46:20 +0000 Subject: [PATCH 0610/2699] [gnuoy,trivial] Pre-release charmhelper sync --- ceph-proxy/hooks/charmhelpers/__init__.py | 16 +++++++++ .../hooks/charmhelpers/contrib/__init__.py | 15 ++++++++ .../contrib/charmsupport/__init__.py | 15 ++++++++ .../charmhelpers/contrib/charmsupport/nrpe.py | 16 +++++++++ .../contrib/charmsupport/volumes.py | 16 +++++++++ .../charmhelpers/contrib/network/__init__.py | 15 ++++++++ .../hooks/charmhelpers/contrib/network/ip.py | 16 +++++++++ .../contrib/openstack/__init__.py | 15 ++++++++ .../contrib/openstack/alternatives.py | 16 +++++++++ .../charmhelpers/contrib/storage/__init__.py | 15 ++++++++ .../contrib/storage/linux/__init__.py | 15 ++++++++ .../contrib/storage/linux/ceph.py | 27 ++++++++++++++ .../contrib/storage/linux/utils.py | 16 +++++++++ .../hooks/charmhelpers/core/__init__.py | 15 ++++++++ .../hooks/charmhelpers/core/decorators.py | 16 +++++++++ ceph-proxy/hooks/charmhelpers/core/fstab.py | 16 +++++++++ ceph-proxy/hooks/charmhelpers/core/hookenv.py | 16 +++++++++ ceph-proxy/hooks/charmhelpers/core/host.py | 35 ++++++++++++++++--- .../charmhelpers/core/services/__init__.py | 16 +++++++++ .../hooks/charmhelpers/core/services/base.py | 16 +++++++++ .../charmhelpers/core/services/helpers.py | 16 +++++++++ ceph-proxy/hooks/charmhelpers/core/sysctl.py | 16 +++++++++ .../hooks/charmhelpers/core/templating.py | 16 +++++++++ .../hooks/charmhelpers/fetch/__init__.py | 16 +++++++++ .../hooks/charmhelpers/fetch/archiveurl.py | 16 +++++++++ ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py | 26 +++++++++++++- ceph-proxy/hooks/charmhelpers/fetch/giturl.py | 20 +++++++++++ .../hooks/charmhelpers/payload/__init__.py | 16 +++++++++ .../hooks/charmhelpers/payload/execd.py | 16 +++++++++ ceph-proxy/tests/charmhelpers/__init__.py | 16 +++++++++ .../tests/charmhelpers/contrib/__init__.py | 15 ++++++++ .../charmhelpers/contrib/amulet/__init__.py | 15 ++++++++ .../charmhelpers/contrib/amulet/deployment.py | 16 +++++++++ .../charmhelpers/contrib/amulet/utils.py | 16 +++++++++ .../contrib/openstack/__init__.py | 15 ++++++++ .../contrib/openstack/amulet/__init__.py | 15 ++++++++ .../contrib/openstack/amulet/deployment.py | 16 +++++++++ .../contrib/openstack/amulet/utils.py | 16 +++++++++ 38 files changed, 636 insertions(+), 5 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/__init__.py b/ceph-proxy/hooks/charmhelpers/__init__.py index b46e2e23..f72e7f84 100644 --- a/ceph-proxy/hooks/charmhelpers/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/__init__.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. import subprocess diff --git a/ceph-proxy/hooks/charmhelpers/contrib/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index f3a936d0..0fd0a9d8 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + """Compatibility with the nrpe-external-master charm""" # Copyright 2012 Canonical Ltd. # diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py index d61aa47f..320961b9 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + ''' Functions for managing volumes in juju units. One volume is supported per unit. Subordinates may have their own storage, provided it is on its own partition. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/network/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 8dc83165..98b17544 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import glob import re import subprocess diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py index b413259c..ef77caf3 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + ''' Helper for managing alternatives for file conflict resolution ''' import subprocess diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1479f4f3..31ea7f9e 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # # Copyright 2012 Canonical Ltd. # @@ -157,6 +173,17 @@ def create_keyring(service, key): log('Created new ceph keyring at %s.' % keyring, level=DEBUG) +def delete_keyring(service): + """Delete an existing Ceph keyring.""" + keyring = _keyring_path(service) + if not os.path.exists(keyring): + log('Keyring does not exist at %s' % keyring, level=WARNING) + return + + os.remove(keyring) + log('Deleted ring at %s.' % keyring, level=INFO) + + def create_key_file(service, key): """Create a file containing key.""" keyfile = _keyfile_path(service) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index c6a15e14..c8373b72 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import re from stat import S_ISBLK diff --git a/ceph-proxy/hooks/charmhelpers/core/__init__.py b/ceph-proxy/hooks/charmhelpers/core/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-proxy/hooks/charmhelpers/core/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/core/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-proxy/hooks/charmhelpers/core/decorators.py b/ceph-proxy/hooks/charmhelpers/core/decorators.py index 029a4ef4..bb05620b 100644 --- a/ceph-proxy/hooks/charmhelpers/core/decorators.py +++ b/ceph-proxy/hooks/charmhelpers/core/decorators.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # # Copyright 2014 Canonical Ltd. # diff --git a/ceph-proxy/hooks/charmhelpers/core/fstab.py b/ceph-proxy/hooks/charmhelpers/core/fstab.py index 0adf0db3..be7de248 100644 --- a/ceph-proxy/hooks/charmhelpers/core/fstab.py +++ b/ceph-proxy/hooks/charmhelpers/core/fstab.py @@ -1,6 +1,22 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + __author__ = 'Jorge Niedbalski R. ' import io diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 69ae4564..cf552b39 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + "Interactions with the Juju environment" # Copyright 2013 Canonical Ltd. # diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 5221120c..cf2cbe14 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + """Tools for working with the host system""" # Copyright 2012 Canonical Ltd. # @@ -168,10 +184,10 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): log("Removing non-directory file {} prior to mkdir()".format(path)) os.unlink(realpath) os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) elif not path_exists: os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) + os.chown(realpath, uid, gid) + os.chmod(realpath, perms) def write_file(path, content, owner='root', group='root', perms=0o444): @@ -389,6 +405,9 @@ def cmp_pkgrevno(package, revno, pkgcache=None): * 0 => Installed revno is the same as supplied arg * -1 => Installed revno is less than supplied arg + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. ''' import apt_pkg if not pkgcache: @@ -407,13 +426,21 @@ def chdir(d): os.chdir(cur) -def chownr(path, owner, group): +def chownr(path, owner, group, follow_links=True): uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid + if follow_links: + chown = os.chown + else: + chown = os.lchown for root, dirs, files in os.walk(path): for name in dirs + files: full = os.path.join(root, name) broken_symlink = os.path.lexists(full) and not os.path.exists(full) if not broken_symlink: - os.chown(full, uid, gid) + chown(full, uid, gid) + + +def lchownr(path, owner, group): + chownr(path, owner, group, follow_links=False) diff --git a/ceph-proxy/hooks/charmhelpers/core/services/__init__.py b/ceph-proxy/hooks/charmhelpers/core/services/__init__.py index 69dde79a..0928158b 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,18 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + from .base import * # NOQA from .helpers import * # NOQA diff --git a/ceph-proxy/hooks/charmhelpers/core/services/base.py b/ceph-proxy/hooks/charmhelpers/core/services/base.py index 87ecb130..c5534e4c 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/base.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/base.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import re import json diff --git a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py index 163a7932..5e3af9da 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import yaml from charmhelpers.core import hookenv diff --git a/ceph-proxy/hooks/charmhelpers/core/sysctl.py b/ceph-proxy/hooks/charmhelpers/core/sysctl.py index 0f299630..d642a371 100644 --- a/ceph-proxy/hooks/charmhelpers/core/sysctl.py +++ b/ceph-proxy/hooks/charmhelpers/core/sysctl.py @@ -1,6 +1,22 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + __author__ = 'Jorge Niedbalski R. ' import yaml diff --git a/ceph-proxy/hooks/charmhelpers/core/templating.py b/ceph-proxy/hooks/charmhelpers/core/templating.py index 569eaed6..97669092 100644 --- a/ceph-proxy/hooks/charmhelpers/core/templating.py +++ b/ceph-proxy/hooks/charmhelpers/core/templating.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os from charmhelpers.core import host diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index aceadea4..792e629a 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import importlib from tempfile import NamedTemporaryFile import time diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py index 8a4624b2..d25a0ddd 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import hashlib import re diff --git a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py index 8ef48f30..3531315a 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os from charmhelpers.fetch import ( BaseFetchHandler, @@ -11,10 +27,12 @@ try: from bzrlib.branch import Branch + from bzrlib import bzrdir, workingtree, errors except ImportError: from charmhelpers.fetch import apt_install apt_install("python-bzrlib") from bzrlib.branch import Branch + from bzrlib import bzrdir, workingtree, errors class BzrUrlFetchHandler(BaseFetchHandler): @@ -34,9 +52,15 @@ def branch(self, source, dest): if url_parts.scheme == "lp": from bzrlib.plugin import load_plugins load_plugins() + try: + local_branch = bzrdir.BzrDir.create_branch_convenience(dest) + except errors.AlreadyControlDirError: + local_branch = Branch.open(dest) try: remote_branch = Branch.open(source) - remote_branch.bzrdir.sprout(dest).open_branch() + remote_branch.push(local_branch) + tree = workingtree.WorkingTree.open(dest) + tree.update() except Exception as e: raise e diff --git a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py index f3aa2821..5376786b 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os from charmhelpers.fetch import ( BaseFetchHandler, @@ -16,6 +32,8 @@ apt_install("python-git") from git import Repo +from git.exc import GitCommandError + class GitUrlFetchHandler(BaseFetchHandler): """Handler for git branches via generic and github URLs""" @@ -46,6 +64,8 @@ def install(self, source, branch="master", dest=None): mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch) + except GitCommandError as e: + raise UnhandledSource(e.message) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir diff --git a/ceph-proxy/hooks/charmhelpers/payload/__init__.py b/ceph-proxy/hooks/charmhelpers/payload/__init__.py index fc9fbc08..e6f42497 100644 --- a/ceph-proxy/hooks/charmhelpers/payload/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/payload/__init__.py @@ -1 +1,17 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + "Tools for working with files injected into a charm just before deployment." diff --git a/ceph-proxy/hooks/charmhelpers/payload/execd.py b/ceph-proxy/hooks/charmhelpers/payload/execd.py index 6476a75f..4d4d81a6 100644 --- a/ceph-proxy/hooks/charmhelpers/payload/execd.py +++ b/ceph-proxy/hooks/charmhelpers/payload/execd.py @@ -1,5 +1,21 @@ #!/usr/bin/env python +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import sys import subprocess diff --git a/ceph-proxy/tests/charmhelpers/__init__.py b/ceph-proxy/tests/charmhelpers/__init__.py index b46e2e23..f72e7f84 100644 --- a/ceph-proxy/tests/charmhelpers/__init__.py +++ b/ceph-proxy/tests/charmhelpers/__init__.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. import subprocess diff --git a/ceph-proxy/tests/charmhelpers/contrib/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/__init__.py +++ b/ceph-proxy/tests/charmhelpers/contrib/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py index 3d3ef339..367d6b47 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import amulet import os import six diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index d333e63b..3464b873 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import ConfigParser import io import logging diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index f3fee074..c50d3ec6 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import six from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index 3e0cc61c..9c3d918a 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import logging import os import time From 61cb904ce429544a82e6837d40ea02e015a825b1 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 26 Jan 2015 09:46:20 +0000 Subject: [PATCH 0611/2699] [gnuoy,trivial] Pre-release charmhelper sync --- ceph-mon/hooks/charmhelpers/__init__.py | 16 +++++++++ .../hooks/charmhelpers/contrib/__init__.py | 15 ++++++++ .../contrib/charmsupport/__init__.py | 15 ++++++++ .../charmhelpers/contrib/charmsupport/nrpe.py | 16 +++++++++ .../contrib/charmsupport/volumes.py | 16 +++++++++ .../charmhelpers/contrib/network/__init__.py | 15 ++++++++ .../hooks/charmhelpers/contrib/network/ip.py | 16 +++++++++ .../contrib/openstack/__init__.py | 15 ++++++++ .../contrib/openstack/alternatives.py | 16 +++++++++ .../charmhelpers/contrib/storage/__init__.py | 15 ++++++++ .../contrib/storage/linux/__init__.py | 15 ++++++++ .../contrib/storage/linux/ceph.py | 27 ++++++++++++++ .../contrib/storage/linux/utils.py | 16 +++++++++ ceph-mon/hooks/charmhelpers/core/__init__.py | 15 ++++++++ .../hooks/charmhelpers/core/decorators.py | 16 +++++++++ ceph-mon/hooks/charmhelpers/core/fstab.py | 16 +++++++++ ceph-mon/hooks/charmhelpers/core/hookenv.py | 16 +++++++++ ceph-mon/hooks/charmhelpers/core/host.py | 35 ++++++++++++++++--- .../charmhelpers/core/services/__init__.py | 16 +++++++++ .../hooks/charmhelpers/core/services/base.py | 16 +++++++++ .../charmhelpers/core/services/helpers.py | 16 +++++++++ ceph-mon/hooks/charmhelpers/core/sysctl.py | 16 +++++++++ .../hooks/charmhelpers/core/templating.py | 16 +++++++++ ceph-mon/hooks/charmhelpers/fetch/__init__.py | 16 +++++++++ .../hooks/charmhelpers/fetch/archiveurl.py | 16 +++++++++ ceph-mon/hooks/charmhelpers/fetch/bzrurl.py | 26 +++++++++++++- ceph-mon/hooks/charmhelpers/fetch/giturl.py | 20 +++++++++++ .../hooks/charmhelpers/payload/__init__.py | 16 +++++++++ ceph-mon/hooks/charmhelpers/payload/execd.py | 16 +++++++++ ceph-mon/tests/charmhelpers/__init__.py | 16 +++++++++ .../tests/charmhelpers/contrib/__init__.py | 15 ++++++++ .../charmhelpers/contrib/amulet/__init__.py | 15 ++++++++ .../charmhelpers/contrib/amulet/deployment.py | 16 +++++++++ .../charmhelpers/contrib/amulet/utils.py | 16 +++++++++ .../contrib/openstack/__init__.py | 15 ++++++++ .../contrib/openstack/amulet/__init__.py | 15 ++++++++ .../contrib/openstack/amulet/deployment.py | 16 +++++++++ .../contrib/openstack/amulet/utils.py | 16 +++++++++ 38 files changed, 636 insertions(+), 5 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/__init__.py b/ceph-mon/hooks/charmhelpers/__init__.py index b46e2e23..f72e7f84 100644 --- a/ceph-mon/hooks/charmhelpers/__init__.py +++ b/ceph-mon/hooks/charmhelpers/__init__.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. import subprocess diff --git a/ceph-mon/hooks/charmhelpers/contrib/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index f3a936d0..0fd0a9d8 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + """Compatibility with the nrpe-external-master charm""" # Copyright 2012 Canonical Ltd. # diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py index d61aa47f..320961b9 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + ''' Functions for managing volumes in juju units. One volume is supported per unit. Subordinates may have their own storage, provided it is on its own partition. diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/network/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 8dc83165..98b17544 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import glob import re import subprocess diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py index b413259c..ef77caf3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + ''' Helper for managing alternatives for file conflict resolution ''' import subprocess diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/storage/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1479f4f3..31ea7f9e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # # Copyright 2012 Canonical Ltd. # @@ -157,6 +173,17 @@ def create_keyring(service, key): log('Created new ceph keyring at %s.' % keyring, level=DEBUG) +def delete_keyring(service): + """Delete an existing Ceph keyring.""" + keyring = _keyring_path(service) + if not os.path.exists(keyring): + log('Keyring does not exist at %s' % keyring, level=WARNING) + return + + os.remove(keyring) + log('Deleted ring at %s.' % keyring, level=INFO) + + def create_key_file(service, key): """Create a file containing key.""" keyfile = _keyfile_path(service) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index c6a15e14..c8373b72 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import re from stat import S_ISBLK diff --git a/ceph-mon/hooks/charmhelpers/core/__init__.py b/ceph-mon/hooks/charmhelpers/core/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-mon/hooks/charmhelpers/core/__init__.py +++ b/ceph-mon/hooks/charmhelpers/core/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-mon/hooks/charmhelpers/core/decorators.py b/ceph-mon/hooks/charmhelpers/core/decorators.py index 029a4ef4..bb05620b 100644 --- a/ceph-mon/hooks/charmhelpers/core/decorators.py +++ b/ceph-mon/hooks/charmhelpers/core/decorators.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # # Copyright 2014 Canonical Ltd. # diff --git a/ceph-mon/hooks/charmhelpers/core/fstab.py b/ceph-mon/hooks/charmhelpers/core/fstab.py index 0adf0db3..be7de248 100644 --- a/ceph-mon/hooks/charmhelpers/core/fstab.py +++ b/ceph-mon/hooks/charmhelpers/core/fstab.py @@ -1,6 +1,22 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + __author__ = 'Jorge Niedbalski R. ' import io diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 69ae4564..cf552b39 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + "Interactions with the Juju environment" # Copyright 2013 Canonical Ltd. # diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 5221120c..cf2cbe14 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + """Tools for working with the host system""" # Copyright 2012 Canonical Ltd. # @@ -168,10 +184,10 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): log("Removing non-directory file {} prior to mkdir()".format(path)) os.unlink(realpath) os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) elif not path_exists: os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) + os.chown(realpath, uid, gid) + os.chmod(realpath, perms) def write_file(path, content, owner='root', group='root', perms=0o444): @@ -389,6 +405,9 @@ def cmp_pkgrevno(package, revno, pkgcache=None): * 0 => Installed revno is the same as supplied arg * -1 => Installed revno is less than supplied arg + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. ''' import apt_pkg if not pkgcache: @@ -407,13 +426,21 @@ def chdir(d): os.chdir(cur) -def chownr(path, owner, group): +def chownr(path, owner, group, follow_links=True): uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid + if follow_links: + chown = os.chown + else: + chown = os.lchown for root, dirs, files in os.walk(path): for name in dirs + files: full = os.path.join(root, name) broken_symlink = os.path.lexists(full) and not os.path.exists(full) if not broken_symlink: - os.chown(full, uid, gid) + chown(full, uid, gid) + + +def lchownr(path, owner, group): + chownr(path, owner, group, follow_links=False) diff --git a/ceph-mon/hooks/charmhelpers/core/services/__init__.py b/ceph-mon/hooks/charmhelpers/core/services/__init__.py index 69dde79a..0928158b 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/__init__.py +++ b/ceph-mon/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,18 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + from .base import * # NOQA from .helpers import * # NOQA diff --git a/ceph-mon/hooks/charmhelpers/core/services/base.py b/ceph-mon/hooks/charmhelpers/core/services/base.py index 87ecb130..c5534e4c 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/base.py +++ b/ceph-mon/hooks/charmhelpers/core/services/base.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import re import json diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py index 163a7932..5e3af9da 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-mon/hooks/charmhelpers/core/services/helpers.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import yaml from charmhelpers.core import hookenv diff --git a/ceph-mon/hooks/charmhelpers/core/sysctl.py b/ceph-mon/hooks/charmhelpers/core/sysctl.py index 0f299630..d642a371 100644 --- a/ceph-mon/hooks/charmhelpers/core/sysctl.py +++ b/ceph-mon/hooks/charmhelpers/core/sysctl.py @@ -1,6 +1,22 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + __author__ = 'Jorge Niedbalski R. ' import yaml diff --git a/ceph-mon/hooks/charmhelpers/core/templating.py b/ceph-mon/hooks/charmhelpers/core/templating.py index 569eaed6..97669092 100644 --- a/ceph-mon/hooks/charmhelpers/core/templating.py +++ b/ceph-mon/hooks/charmhelpers/core/templating.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os from charmhelpers.core import host diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index aceadea4..792e629a 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import importlib from tempfile import NamedTemporaryFile import time diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index 8a4624b2..d25a0ddd 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import hashlib import re diff --git a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py index 8ef48f30..3531315a 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os from charmhelpers.fetch import ( BaseFetchHandler, @@ -11,10 +27,12 @@ try: from bzrlib.branch import Branch + from bzrlib import bzrdir, workingtree, errors except ImportError: from charmhelpers.fetch import apt_install apt_install("python-bzrlib") from bzrlib.branch import Branch + from bzrlib import bzrdir, workingtree, errors class BzrUrlFetchHandler(BaseFetchHandler): @@ -34,9 +52,15 @@ def branch(self, source, dest): if url_parts.scheme == "lp": from bzrlib.plugin import load_plugins load_plugins() + try: + local_branch = bzrdir.BzrDir.create_branch_convenience(dest) + except errors.AlreadyControlDirError: + local_branch = Branch.open(dest) try: remote_branch = Branch.open(source) - remote_branch.bzrdir.sprout(dest).open_branch() + remote_branch.push(local_branch) + tree = workingtree.WorkingTree.open(dest) + tree.update() except Exception as e: raise e diff --git a/ceph-mon/hooks/charmhelpers/fetch/giturl.py b/ceph-mon/hooks/charmhelpers/fetch/giturl.py index f3aa2821..5376786b 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/giturl.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os from charmhelpers.fetch import ( BaseFetchHandler, @@ -16,6 +32,8 @@ apt_install("python-git") from git import Repo +from git.exc import GitCommandError + class GitUrlFetchHandler(BaseFetchHandler): """Handler for git branches via generic and github URLs""" @@ -46,6 +64,8 @@ def install(self, source, branch="master", dest=None): mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch) + except GitCommandError as e: + raise UnhandledSource(e.message) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir diff --git a/ceph-mon/hooks/charmhelpers/payload/__init__.py b/ceph-mon/hooks/charmhelpers/payload/__init__.py index fc9fbc08..e6f42497 100644 --- a/ceph-mon/hooks/charmhelpers/payload/__init__.py +++ b/ceph-mon/hooks/charmhelpers/payload/__init__.py @@ -1 +1,17 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + "Tools for working with files injected into a charm just before deployment." diff --git a/ceph-mon/hooks/charmhelpers/payload/execd.py b/ceph-mon/hooks/charmhelpers/payload/execd.py index 6476a75f..4d4d81a6 100644 --- a/ceph-mon/hooks/charmhelpers/payload/execd.py +++ b/ceph-mon/hooks/charmhelpers/payload/execd.py @@ -1,5 +1,21 @@ #!/usr/bin/env python +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import sys import subprocess diff --git a/ceph-mon/tests/charmhelpers/__init__.py b/ceph-mon/tests/charmhelpers/__init__.py index b46e2e23..f72e7f84 100644 --- a/ceph-mon/tests/charmhelpers/__init__.py +++ b/ceph-mon/tests/charmhelpers/__init__.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. import subprocess diff --git a/ceph-mon/tests/charmhelpers/contrib/__init__.py b/ceph-mon/tests/charmhelpers/contrib/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-mon/tests/charmhelpers/contrib/__init__.py +++ b/ceph-mon/tests/charmhelpers/contrib/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-mon/tests/charmhelpers/contrib/amulet/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/__init__.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py index 3d3ef339..367d6b47 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import amulet import os import six diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index d333e63b..3464b873 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import ConfigParser import io import logging diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-mon/tests/charmhelpers/contrib/openstack/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/__init__.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/__init__.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index f3fee074..c50d3ec6 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import six from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 3e0cc61c..9c3d918a 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import logging import os import time From 44d1bedbc6b1b4c511299a3331d6b217e17c94ef Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 26 Jan 2015 11:51:28 +0000 Subject: [PATCH 0612/2699] [gnuoy,trivial] Pre-release charmhelper sync --- ceph-osd/hooks/charmhelpers/__init__.py | 16 +++++++++ .../hooks/charmhelpers/contrib/__init__.py | 15 ++++++++ .../contrib/charmsupport/__init__.py | 15 ++++++++ .../charmhelpers/contrib/charmsupport/nrpe.py | 16 +++++++++ .../contrib/charmsupport/volumes.py | 16 +++++++++ .../charmhelpers/contrib/network/__init__.py | 15 ++++++++ .../hooks/charmhelpers/contrib/network/ip.py | 16 +++++++++ .../contrib/openstack/__init__.py | 15 ++++++++ .../contrib/openstack/alternatives.py | 16 +++++++++ .../charmhelpers/contrib/storage/__init__.py | 15 ++++++++ .../contrib/storage/linux/__init__.py | 15 ++++++++ .../contrib/storage/linux/utils.py | 16 +++++++++ ceph-osd/hooks/charmhelpers/core/__init__.py | 15 ++++++++ .../hooks/charmhelpers/core/decorators.py | 16 +++++++++ ceph-osd/hooks/charmhelpers/core/fstab.py | 16 +++++++++ ceph-osd/hooks/charmhelpers/core/hookenv.py | 16 +++++++++ ceph-osd/hooks/charmhelpers/core/host.py | 35 ++++++++++++++++--- .../charmhelpers/core/services/__init__.py | 16 +++++++++ .../hooks/charmhelpers/core/services/base.py | 16 +++++++++ .../charmhelpers/core/services/helpers.py | 16 +++++++++ ceph-osd/hooks/charmhelpers/core/sysctl.py | 16 +++++++++ .../hooks/charmhelpers/core/templating.py | 16 +++++++++ ceph-osd/hooks/charmhelpers/fetch/__init__.py | 16 +++++++++ .../hooks/charmhelpers/fetch/archiveurl.py | 16 +++++++++ ceph-osd/hooks/charmhelpers/fetch/bzrurl.py | 26 +++++++++++++- ceph-osd/hooks/charmhelpers/fetch/giturl.py | 20 +++++++++++ ceph-osd/tests/charmhelpers/__init__.py | 16 +++++++++ .../tests/charmhelpers/contrib/__init__.py | 15 ++++++++ .../charmhelpers/contrib/amulet/__init__.py | 15 ++++++++ .../charmhelpers/contrib/amulet/deployment.py | 16 +++++++++ .../charmhelpers/contrib/amulet/utils.py | 16 +++++++++ .../contrib/openstack/__init__.py | 15 ++++++++ .../contrib/openstack/amulet/__init__.py | 15 ++++++++ .../contrib/openstack/amulet/deployment.py | 16 +++++++++ .../contrib/openstack/amulet/utils.py | 16 +++++++++ 35 files changed, 577 insertions(+), 5 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/__init__.py b/ceph-osd/hooks/charmhelpers/__init__.py index b46e2e23..f72e7f84 100644 --- a/ceph-osd/hooks/charmhelpers/__init__.py +++ b/ceph-osd/hooks/charmhelpers/__init__.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. import subprocess diff --git a/ceph-osd/hooks/charmhelpers/contrib/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index f3a936d0..0fd0a9d8 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + """Compatibility with the nrpe-external-master charm""" # Copyright 2012 Canonical Ltd. # diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py index d61aa47f..320961b9 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + ''' Functions for managing volumes in juju units. One volume is supported per unit. Subordinates may have their own storage, provided it is on its own partition. diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/network/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 8dc83165..98b17544 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import glob import re import subprocess diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py index b413259c..ef77caf3 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + ''' Helper for managing alternatives for file conflict resolution ''' import subprocess diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/storage/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index c6a15e14..c8373b72 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import re from stat import S_ISBLK diff --git a/ceph-osd/hooks/charmhelpers/core/__init__.py b/ceph-osd/hooks/charmhelpers/core/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-osd/hooks/charmhelpers/core/__init__.py +++ b/ceph-osd/hooks/charmhelpers/core/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-osd/hooks/charmhelpers/core/decorators.py b/ceph-osd/hooks/charmhelpers/core/decorators.py index 029a4ef4..bb05620b 100644 --- a/ceph-osd/hooks/charmhelpers/core/decorators.py +++ b/ceph-osd/hooks/charmhelpers/core/decorators.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # # Copyright 2014 Canonical Ltd. # diff --git a/ceph-osd/hooks/charmhelpers/core/fstab.py b/ceph-osd/hooks/charmhelpers/core/fstab.py index 0adf0db3..be7de248 100644 --- a/ceph-osd/hooks/charmhelpers/core/fstab.py +++ b/ceph-osd/hooks/charmhelpers/core/fstab.py @@ -1,6 +1,22 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + __author__ = 'Jorge Niedbalski R. ' import io diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 69ae4564..cf552b39 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + "Interactions with the Juju environment" # Copyright 2013 Canonical Ltd. # diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 5221120c..cf2cbe14 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + """Tools for working with the host system""" # Copyright 2012 Canonical Ltd. # @@ -168,10 +184,10 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): log("Removing non-directory file {} prior to mkdir()".format(path)) os.unlink(realpath) os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) elif not path_exists: os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) + os.chown(realpath, uid, gid) + os.chmod(realpath, perms) def write_file(path, content, owner='root', group='root', perms=0o444): @@ -389,6 +405,9 @@ def cmp_pkgrevno(package, revno, pkgcache=None): * 0 => Installed revno is the same as supplied arg * -1 => Installed revno is less than supplied arg + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. ''' import apt_pkg if not pkgcache: @@ -407,13 +426,21 @@ def chdir(d): os.chdir(cur) -def chownr(path, owner, group): +def chownr(path, owner, group, follow_links=True): uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid + if follow_links: + chown = os.chown + else: + chown = os.lchown for root, dirs, files in os.walk(path): for name in dirs + files: full = os.path.join(root, name) broken_symlink = os.path.lexists(full) and not os.path.exists(full) if not broken_symlink: - os.chown(full, uid, gid) + chown(full, uid, gid) + + +def lchownr(path, owner, group): + chownr(path, owner, group, follow_links=False) diff --git a/ceph-osd/hooks/charmhelpers/core/services/__init__.py b/ceph-osd/hooks/charmhelpers/core/services/__init__.py index 69dde79a..0928158b 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/__init__.py +++ b/ceph-osd/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,18 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + from .base import * # NOQA from .helpers import * # NOQA diff --git a/ceph-osd/hooks/charmhelpers/core/services/base.py b/ceph-osd/hooks/charmhelpers/core/services/base.py index 87ecb130..c5534e4c 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/base.py +++ b/ceph-osd/hooks/charmhelpers/core/services/base.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import re import json diff --git a/ceph-osd/hooks/charmhelpers/core/services/helpers.py b/ceph-osd/hooks/charmhelpers/core/services/helpers.py index 163a7932..5e3af9da 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-osd/hooks/charmhelpers/core/services/helpers.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import yaml from charmhelpers.core import hookenv diff --git a/ceph-osd/hooks/charmhelpers/core/sysctl.py b/ceph-osd/hooks/charmhelpers/core/sysctl.py index 0f299630..d642a371 100644 --- a/ceph-osd/hooks/charmhelpers/core/sysctl.py +++ b/ceph-osd/hooks/charmhelpers/core/sysctl.py @@ -1,6 +1,22 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + __author__ = 'Jorge Niedbalski R. ' import yaml diff --git a/ceph-osd/hooks/charmhelpers/core/templating.py b/ceph-osd/hooks/charmhelpers/core/templating.py index 569eaed6..97669092 100644 --- a/ceph-osd/hooks/charmhelpers/core/templating.py +++ b/ceph-osd/hooks/charmhelpers/core/templating.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os from charmhelpers.core import host diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index aceadea4..792e629a 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import importlib from tempfile import NamedTemporaryFile import time diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index 8a4624b2..d25a0ddd 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import hashlib import re diff --git a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py index 8ef48f30..3531315a 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os from charmhelpers.fetch import ( BaseFetchHandler, @@ -11,10 +27,12 @@ try: from bzrlib.branch import Branch + from bzrlib import bzrdir, workingtree, errors except ImportError: from charmhelpers.fetch import apt_install apt_install("python-bzrlib") from bzrlib.branch import Branch + from bzrlib import bzrdir, workingtree, errors class BzrUrlFetchHandler(BaseFetchHandler): @@ -34,9 +52,15 @@ def branch(self, source, dest): if url_parts.scheme == "lp": from bzrlib.plugin import load_plugins load_plugins() + try: + local_branch = bzrdir.BzrDir.create_branch_convenience(dest) + except errors.AlreadyControlDirError: + local_branch = Branch.open(dest) try: remote_branch = Branch.open(source) - remote_branch.bzrdir.sprout(dest).open_branch() + remote_branch.push(local_branch) + tree = workingtree.WorkingTree.open(dest) + tree.update() except Exception as e: raise e diff --git a/ceph-osd/hooks/charmhelpers/fetch/giturl.py b/ceph-osd/hooks/charmhelpers/fetch/giturl.py index f3aa2821..5376786b 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/giturl.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os from charmhelpers.fetch import ( BaseFetchHandler, @@ -16,6 +32,8 @@ apt_install("python-git") from git import Repo +from git.exc import GitCommandError + class GitUrlFetchHandler(BaseFetchHandler): """Handler for git branches via generic and github URLs""" @@ -46,6 +64,8 @@ def install(self, source, branch="master", dest=None): mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch) + except GitCommandError as e: + raise UnhandledSource(e.message) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir diff --git a/ceph-osd/tests/charmhelpers/__init__.py b/ceph-osd/tests/charmhelpers/__init__.py index b46e2e23..f72e7f84 100644 --- a/ceph-osd/tests/charmhelpers/__init__.py +++ b/ceph-osd/tests/charmhelpers/__init__.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. import subprocess diff --git a/ceph-osd/tests/charmhelpers/contrib/__init__.py b/ceph-osd/tests/charmhelpers/contrib/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-osd/tests/charmhelpers/contrib/__init__.py +++ b/ceph-osd/tests/charmhelpers/contrib/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-osd/tests/charmhelpers/contrib/amulet/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/__init__.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py index 3d3ef339..367d6b47 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import amulet import os import six diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index d333e63b..3464b873 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import ConfigParser import io import logging diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-osd/tests/charmhelpers/contrib/openstack/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/__init__.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/__init__.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index f3fee074..c50d3ec6 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import six from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index 3e0cc61c..9c3d918a 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import logging import os import time From a4d15e9425f73b611755475c502e37ea25b559a3 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 26 Jan 2015 11:53:19 +0000 Subject: [PATCH 0613/2699] [gnuoy,trivial] Pre-release charmhelper sync --- ceph-radosgw/hooks/charmhelpers/__init__.py | 16 +++++++++ .../hooks/charmhelpers/contrib/__init__.py | 15 ++++++++ .../contrib/hahelpers/__init__.py | 15 ++++++++ .../charmhelpers/contrib/hahelpers/apache.py | 16 +++++++++ .../charmhelpers/contrib/hahelpers/cluster.py | 22 +++++++++++- .../charmhelpers/contrib/network/__init__.py | 15 ++++++++ .../hooks/charmhelpers/contrib/network/ip.py | 16 +++++++++ .../contrib/openstack/__init__.py | 15 ++++++++ .../contrib/openstack/alternatives.py | 16 +++++++++ .../contrib/openstack/amulet/__init__.py | 15 ++++++++ .../contrib/openstack/amulet/deployment.py | 16 +++++++++ .../contrib/openstack/amulet/utils.py | 16 +++++++++ .../charmhelpers/contrib/openstack/context.py | 16 +++++++++ .../charmhelpers/contrib/openstack/ip.py | 16 +++++++++ .../charmhelpers/contrib/openstack/neutron.py | 16 +++++++++ .../contrib/openstack/templates/__init__.py | 16 +++++++++ .../contrib/openstack/templating.py | 16 +++++++++ .../charmhelpers/contrib/openstack/utils.py | 16 +++++++++ .../charmhelpers/contrib/python/__init__.py | 15 ++++++++ .../charmhelpers/contrib/python/packages.py | 21 ++++++++++- .../charmhelpers/contrib/storage/__init__.py | 15 ++++++++ .../contrib/storage/linux/__init__.py | 15 ++++++++ .../contrib/storage/linux/ceph.py | 16 +++++++++ .../contrib/storage/linux/loopback.py | 16 +++++++++ .../charmhelpers/contrib/storage/linux/lvm.py | 16 +++++++++ .../contrib/storage/linux/utils.py | 16 +++++++++ .../hooks/charmhelpers/core/__init__.py | 15 ++++++++ .../hooks/charmhelpers/core/decorators.py | 16 +++++++++ ceph-radosgw/hooks/charmhelpers/core/fstab.py | 16 +++++++++ .../hooks/charmhelpers/core/hookenv.py | 16 +++++++++ ceph-radosgw/hooks/charmhelpers/core/host.py | 35 ++++++++++++++++--- .../charmhelpers/core/services/__init__.py | 16 +++++++++ .../hooks/charmhelpers/core/services/base.py | 16 +++++++++ .../charmhelpers/core/services/helpers.py | 16 +++++++++ .../hooks/charmhelpers/core/sysctl.py | 16 +++++++++ .../hooks/charmhelpers/core/templating.py | 16 +++++++++ .../hooks/charmhelpers/fetch/__init__.py | 16 +++++++++ .../hooks/charmhelpers/fetch/archiveurl.py | 16 +++++++++ .../hooks/charmhelpers/fetch/bzrurl.py | 26 +++++++++++++- .../hooks/charmhelpers/fetch/giturl.py | 20 +++++++++++ .../hooks/charmhelpers/payload/__init__.py | 16 +++++++++ .../hooks/charmhelpers/payload/execd.py | 16 +++++++++ ceph-radosgw/tests/charmhelpers/__init__.py | 16 +++++++++ .../tests/charmhelpers/contrib/__init__.py | 15 ++++++++ .../charmhelpers/contrib/amulet/__init__.py | 15 ++++++++ .../charmhelpers/contrib/amulet/deployment.py | 16 +++++++++ .../charmhelpers/contrib/amulet/utils.py | 16 +++++++++ .../contrib/openstack/__init__.py | 15 ++++++++ .../contrib/openstack/amulet/__init__.py | 15 ++++++++ .../contrib/openstack/amulet/deployment.py | 16 +++++++++ .../contrib/openstack/amulet/utils.py | 16 +++++++++ 51 files changed, 840 insertions(+), 7 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/__init__.py b/ceph-radosgw/hooks/charmhelpers/__init__.py index b46e2e23..f72e7f84 100644 --- a/ceph-radosgw/hooks/charmhelpers/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/__init__.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. import subprocess diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py index 6616ffff..00917195 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # # Copyright 2012 Canonical Ltd. # diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index 912b2fe3..9a2588b6 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # # Copyright 2012 Canonical Ltd. # @@ -205,19 +221,23 @@ def determine_apache_port(public_port, singlenode_mode=False): return public_port - (i * 10) -def get_hacluster_config(): +def get_hacluster_config(exclude_keys=None): ''' Obtains all relevant configuration from charm configuration required for initiating a relation to hacluster: ha-bindiface, ha-mcastport, vip + param: exclude_keys: list of setting key(s) to be excluded. returns: dict: A dict containing settings keyed by setting name. raises: HAIncompleteConfig if settings are missing. ''' settings = ['ha-bindiface', 'ha-mcastport', 'vip'] conf = {} for setting in settings: + if exclude_keys and setting in exclude_keys: + continue + conf[setting] = config_get(setting) missing = [] [missing.append(s) for s, v in six.iteritems(conf) if v is None] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index 8dc83165..98b17544 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import glob import re import subprocess diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py index b413259c..ef77caf3 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + ''' Helper for managing alternatives for file conflict resolution ''' import subprocess diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index f3fee074..c50d3ec6 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import six from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 3e0cc61c..9c3d918a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import logging import os import time diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index eaa89a67..c7c4cd4a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import json import os import time diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index f062c807..9eabed73 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + from charmhelpers.core.hookenv import ( config, unit_get, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index 095cc24b..902757fe 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # Various utilies for dealing with Neutron and the renaming from Quantum. from subprocess import check_output diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/__init__.py index 0b49ad28..75876796 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/__init__.py @@ -1,2 +1,18 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # dummy __init__.py to fool syncer into thinking this is a syncable python # module diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py index 33df0675..24cb272b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import six diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index ddd40ce5..26259a03 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,5 +1,21 @@ #!/usr/bin/python +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # Common python helper functions used for OpenStack charms. from collections import OrderedDict from functools import wraps diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/python/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/python/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/python/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py index 78162b1b..d848a120 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py @@ -1,6 +1,22 @@ #!/usr/bin/env python # coding: utf-8 +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + __author__ = "Jorge Niedbalski " from charmhelpers.fetch import apt_install, apt_update @@ -35,7 +51,7 @@ def pip_install_requirements(requirements, **options): pip_execute(command) -def pip_install(package, fatal=False, **options): +def pip_install(package, fatal=False, upgrade=False, **options): """Install a python package""" command = ["install"] @@ -43,6 +59,9 @@ def pip_install(package, fatal=False, **options): for option in parse_options(options, available_options): command.append(option) + if upgrade: + command.append('--upgrade') + if isinstance(package, list): command.extend(package) else: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 6ebeab5c..31ea7f9e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # # Copyright 2012 Canonical Ltd. # diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py index a22c3d7b..c296f098 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import re from subprocess import ( diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py index 0aa65f4f..34b5f71a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + from subprocess import ( CalledProcessError, check_call, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py index c6a15e14..c8373b72 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import re from stat import S_ISBLK diff --git a/ceph-radosgw/hooks/charmhelpers/core/__init__.py b/ceph-radosgw/hooks/charmhelpers/core/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/core/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-radosgw/hooks/charmhelpers/core/decorators.py b/ceph-radosgw/hooks/charmhelpers/core/decorators.py index 029a4ef4..bb05620b 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/decorators.py +++ b/ceph-radosgw/hooks/charmhelpers/core/decorators.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # # Copyright 2014 Canonical Ltd. # diff --git a/ceph-radosgw/hooks/charmhelpers/core/fstab.py b/ceph-radosgw/hooks/charmhelpers/core/fstab.py index 0adf0db3..be7de248 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/fstab.py +++ b/ceph-radosgw/hooks/charmhelpers/core/fstab.py @@ -1,6 +1,22 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + __author__ = 'Jorge Niedbalski R. ' import io diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 69ae4564..cf552b39 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + "Interactions with the Juju environment" # Copyright 2013 Canonical Ltd. # diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 5221120c..cf2cbe14 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + """Tools for working with the host system""" # Copyright 2012 Canonical Ltd. # @@ -168,10 +184,10 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): log("Removing non-directory file {} prior to mkdir()".format(path)) os.unlink(realpath) os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) elif not path_exists: os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) + os.chown(realpath, uid, gid) + os.chmod(realpath, perms) def write_file(path, content, owner='root', group='root', perms=0o444): @@ -389,6 +405,9 @@ def cmp_pkgrevno(package, revno, pkgcache=None): * 0 => Installed revno is the same as supplied arg * -1 => Installed revno is less than supplied arg + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. ''' import apt_pkg if not pkgcache: @@ -407,13 +426,21 @@ def chdir(d): os.chdir(cur) -def chownr(path, owner, group): +def chownr(path, owner, group, follow_links=True): uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid + if follow_links: + chown = os.chown + else: + chown = os.lchown for root, dirs, files in os.walk(path): for name in dirs + files: full = os.path.join(root, name) broken_symlink = os.path.lexists(full) and not os.path.exists(full) if not broken_symlink: - os.chown(full, uid, gid) + chown(full, uid, gid) + + +def lchownr(path, owner, group): + chownr(path, owner, group, follow_links=False) diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/__init__.py b/ceph-radosgw/hooks/charmhelpers/core/services/__init__.py index 69dde79a..0928158b 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,18 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + from .base import * # NOQA from .helpers import * # NOQA diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/base.py b/ceph-radosgw/hooks/charmhelpers/core/services/base.py index 87ecb130..c5534e4c 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/base.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/base.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import re import json diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py index 163a7932..5e3af9da 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import yaml from charmhelpers.core import hookenv diff --git a/ceph-radosgw/hooks/charmhelpers/core/sysctl.py b/ceph-radosgw/hooks/charmhelpers/core/sysctl.py index 0f299630..d642a371 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/sysctl.py +++ b/ceph-radosgw/hooks/charmhelpers/core/sysctl.py @@ -1,6 +1,22 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + __author__ = 'Jorge Niedbalski R. ' import yaml diff --git a/ceph-radosgw/hooks/charmhelpers/core/templating.py b/ceph-radosgw/hooks/charmhelpers/core/templating.py index 569eaed6..97669092 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/core/templating.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os from charmhelpers.core import host diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index aceadea4..792e629a 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import importlib from tempfile import NamedTemporaryFile import time diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py index 8a4624b2..d25a0ddd 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import hashlib import re diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py index 8ef48f30..3531315a 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os from charmhelpers.fetch import ( BaseFetchHandler, @@ -11,10 +27,12 @@ try: from bzrlib.branch import Branch + from bzrlib import bzrdir, workingtree, errors except ImportError: from charmhelpers.fetch import apt_install apt_install("python-bzrlib") from bzrlib.branch import Branch + from bzrlib import bzrdir, workingtree, errors class BzrUrlFetchHandler(BaseFetchHandler): @@ -34,9 +52,15 @@ def branch(self, source, dest): if url_parts.scheme == "lp": from bzrlib.plugin import load_plugins load_plugins() + try: + local_branch = bzrdir.BzrDir.create_branch_convenience(dest) + except errors.AlreadyControlDirError: + local_branch = Branch.open(dest) try: remote_branch = Branch.open(source) - remote_branch.bzrdir.sprout(dest).open_branch() + remote_branch.push(local_branch) + tree = workingtree.WorkingTree.open(dest) + tree.update() except Exception as e: raise e diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py index f3aa2821..5376786b 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os from charmhelpers.fetch import ( BaseFetchHandler, @@ -16,6 +32,8 @@ apt_install("python-git") from git import Repo +from git.exc import GitCommandError + class GitUrlFetchHandler(BaseFetchHandler): """Handler for git branches via generic and github URLs""" @@ -46,6 +64,8 @@ def install(self, source, branch="master", dest=None): mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch) + except GitCommandError as e: + raise UnhandledSource(e.message) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir diff --git a/ceph-radosgw/hooks/charmhelpers/payload/__init__.py b/ceph-radosgw/hooks/charmhelpers/payload/__init__.py index fc9fbc08..e6f42497 100644 --- a/ceph-radosgw/hooks/charmhelpers/payload/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/payload/__init__.py @@ -1 +1,17 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + "Tools for working with files injected into a charm just before deployment." diff --git a/ceph-radosgw/hooks/charmhelpers/payload/execd.py b/ceph-radosgw/hooks/charmhelpers/payload/execd.py index 6476a75f..4d4d81a6 100644 --- a/ceph-radosgw/hooks/charmhelpers/payload/execd.py +++ b/ceph-radosgw/hooks/charmhelpers/payload/execd.py @@ -1,5 +1,21 @@ #!/usr/bin/env python +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import sys import subprocess diff --git a/ceph-radosgw/tests/charmhelpers/__init__.py b/ceph-radosgw/tests/charmhelpers/__init__.py index b46e2e23..f72e7f84 100644 --- a/ceph-radosgw/tests/charmhelpers/__init__.py +++ b/ceph-radosgw/tests/charmhelpers/__init__.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. import subprocess diff --git a/ceph-radosgw/tests/charmhelpers/contrib/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/__init__.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/__init__.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py index 3d3ef339..367d6b47 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import amulet import os import six diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index d333e63b..3464b873 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import ConfigParser import io import logging diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/__init__.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/__init__.py index e69de29b..d1400a02 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/__init__.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index f3fee074..c50d3ec6 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import six from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 3e0cc61c..9c3d918a 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import logging import os import time From 97ed8b71308f1843ee100610bdfc785d3c6140e4 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 29 Jan 2015 11:14:06 -0500 Subject: [PATCH 0614/2699] Switch amulet tests to use stable branches --- ceph-proxy/tests/basic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index d073d08b..b04bb60b 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -17,7 +17,7 @@ class CephBasicDeployment(OpenStackAmuletDeployment): """Amulet tests on a basic ceph deployment.""" - def __init__(self, series=None, openstack=None, source=None, stable=False): + def __init__(self, series=None, openstack=None, source=None, stable=True): """Deploy the entire test environment.""" super(CephBasicDeployment, self).__init__(series, openstack, source, stable) From fa28743b4d1c18669c739458860132b20e9acccb Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 29 Jan 2015 11:14:06 -0500 Subject: [PATCH 0615/2699] Switch amulet tests to use stable branches --- ceph-mon/tests/basic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index d073d08b..b04bb60b 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -17,7 +17,7 @@ class CephBasicDeployment(OpenStackAmuletDeployment): """Amulet tests on a basic ceph deployment.""" - def __init__(self, series=None, openstack=None, source=None, stable=False): + def __init__(self, series=None, openstack=None, source=None, stable=True): """Deploy the entire test environment.""" super(CephBasicDeployment, self).__init__(series, openstack, source, stable) From 197164919c8c41103ffd849533439f8643679be1 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 29 Jan 2015 11:14:30 -0500 Subject: [PATCH 0616/2699] Switch amulet tests to use stable branches --- ceph-osd/tests/basic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 2f0542b2..65ffca36 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -18,7 +18,7 @@ class CephOsdBasicDeployment(OpenStackAmuletDeployment): """Amulet tests on a basic ceph-osd deployment.""" def __init__(self, series=None, openstack=None, source=None, - stable=False): + stable=True): """Deploy the entire test environment.""" super(CephOsdBasicDeployment, self).__init__(series, openstack, source, stable) From 0164a757717ecf94f4d862c7cddd0cc2db4a2f9f Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 4 Feb 2015 15:23:38 +0000 Subject: [PATCH 0617/2699] [gnuoy,trivial] Fix charm proof warning --- ceph-osd/config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index ccaa8b46..8034a6d8 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -131,6 +131,7 @@ options: should set this value even higher (max 4194303). nagios_context: default: "juju" + type: string description: | Used by the nrpe-external-master subordinate charm. A string that will be prepended to instance name to set the host name From a5804e3859ee2581fa343fe95a4d41b3f3a6fb6f Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 4 Feb 2015 15:31:08 +0000 Subject: [PATCH 0618/2699] [gnuoy,trivial] Fix charm proof warning --- ceph-proxy/config.yaml | 1 + ceph-proxy/metadata.yaml | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index d087510c..a1d44461 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -165,6 +165,7 @@ options: should set this value even higher (max 4194303). nagios_context: default: "juju" + type: string description: | Used by the nrpe-external-master subordinate charm. A string that will be prepended to instance name to set the host name diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 9fab75a8..e918708e 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -22,4 +22,3 @@ provides: nrpe-external-master: interface: nrpe-external-master scope: container - gets: [nagios_hostname, nagios_host_context] From 8dcbe6e29edd38c94ca79791f78f0c026201efbe Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 4 Feb 2015 15:31:08 +0000 Subject: [PATCH 0619/2699] [gnuoy,trivial] Fix charm proof warning --- ceph-mon/config.yaml | 1 + ceph-mon/metadata.yaml | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index d087510c..a1d44461 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -165,6 +165,7 @@ options: should set this value even higher (max 4194303). nagios_context: default: "juju" + type: string description: | Used by the nrpe-external-master subordinate charm. A string that will be prepended to instance name to set the host name diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 9fab75a8..e918708e 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -22,4 +22,3 @@ provides: nrpe-external-master: interface: nrpe-external-master scope: container - gets: [nagios_hostname, nagios_host_context] From 482b009c7616e5e629c206b0325f5e01e025b295 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 10 Feb 2015 08:05:05 +0000 Subject: [PATCH 0620/2699] Misc ceph test fixes --- ceph-radosgw/.coveragerc | 2 ++ ceph-radosgw/Makefile | 3 +-- ceph-radosgw/unit_tests/test_ceph.py | 7 ++++++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/.coveragerc b/ceph-radosgw/.coveragerc index 0e6369e1..fcfbae91 100644 --- a/ceph-radosgw/.coveragerc +++ b/ceph-radosgw/.coveragerc @@ -4,3 +4,5 @@ exclude_lines = if __name__ == .__main__.: include= hooks/ceph.py + hooks/hooks.py + hooks/utils.py diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index 02f8c75d..562ab110 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -6,8 +6,7 @@ lint: @charm proof unit_test: - @$(PYTHON) /usr/bin/nosetests unit_tests -# @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests + @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests test: @echo Starting Amulet tests... diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index 2e096082..722b0f0d 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -121,4 +121,9 @@ def test_import_osd_bootstrap_key(self): ] ceph.import_osd_bootstrap_key('mykey') self.subprocess.check_call.assert_called_with(cmd) - + + def test_is_bootstrapped(self): + self.os.path.exists.return_value = True + self.assertEqual(ceph.is_bootstrapped(), True) + self.os.path.exists.return_value = False + self.assertEqual(ceph.is_bootstrapped(), False) From 20a3e71bd91e94455f186ae3dc3ffce4ead87b6e Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 10 Feb 2015 10:30:13 +0000 Subject: [PATCH 0621/2699] Finsh unit tests and fix lint --- ceph-radosgw/.coveragerc | 1 - ceph-radosgw/Makefile | 2 +- ceph-radosgw/hooks/utils.py | 2 +- ceph-radosgw/unit_tests/test_ceph.py | 112 +++++++++++++++---- ceph-radosgw/unit_tests/test_hooks.py | 151 ++++++++++++++++++++------ 5 files changed, 208 insertions(+), 60 deletions(-) diff --git a/ceph-radosgw/.coveragerc b/ceph-radosgw/.coveragerc index fcfbae91..61e98080 100644 --- a/ceph-radosgw/.coveragerc +++ b/ceph-radosgw/.coveragerc @@ -5,4 +5,3 @@ exclude_lines = include= hooks/ceph.py hooks/hooks.py - hooks/utils.py diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index 562ab110..0af1d0c2 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -2,7 +2,7 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers hooks tests + @flake8 --exclude hooks/charmhelpers hooks tests unit_tests @charm proof unit_test: diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index b7793bbd..7c96bfb1 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -86,7 +86,7 @@ def enable_pocket(pocket): def get_host_ip(hostname=None): try: if not hostname: - hostname=unit_get('private-address') + hostname = unit_get('private-address') # Test to see if already an IPv4 address socket.inet_aton(hostname) return hostname diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index 722b0f0d..04459769 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -1,5 +1,4 @@ -from mock import call, patch, MagicMock -from test_utils import CharmTestCase, patch_open +from test_utils import CharmTestCase import ceph @@ -10,26 +9,34 @@ 'time', ] + class CephRadosGWCephTests(CharmTestCase): def setUp(self): super(CephRadosGWCephTests, self).setUp(ceph, TO_PATCH) def test_is_quorum_leader(self): + self.os.path.exists.return_value = True self.get_unit_hostname.return_value = 'myhost' self.subprocess.check_output.return_value = '{"state": "leader"}' self.assertEqual(ceph.is_quorum(), True) def test_is_quorum_notleader(self): + self.os.path.exists.return_value = True self.get_unit_hostname.return_value = 'myhost' self.subprocess.check_output.return_value = '{"state": "notleader"}' self.assertEqual(ceph.is_quorum(), False) def test_is_quorum_valerror(self): + self.os.path.exists.return_value = True self.get_unit_hostname.return_value = 'myhost' self.subprocess.check_output.return_value = "'state': 'bob'}" self.assertEqual(ceph.is_quorum(), False) + def test_is_quorum_no_asok(self): + self.os.path.exists.return_value = False + self.assertEqual(ceph.is_quorum(), False) + def test_is_leader(self): self.get_unit_hostname.return_value = 'myhost' self.os.path.exists.return_value = True @@ -53,18 +60,31 @@ def test_is_leader_noasok(self): self.os.path.exists.return_value = False self.assertEqual(ceph.is_leader(), False) -# def test_wait_for_quorum_yes(self): -# _is_quorum = self.patch('is_quorum') -# _is_quorum.return_value = False -# self.time.return_value = None -# ceph.wait_for_quorum() -# self.time.sleep.assert_called_with(3) + def test_wait_for_quorum_yes(self): + results = [True, False] + + def quorum(): + return results.pop() + _is_quorum = self.patch('is_quorum') + _is_quorum.side_effect = quorum + ceph.wait_for_quorum() + self.time.sleep.assert_called_with(3) -# def test_wait_for_quorum_no(self): -# _is_quorum = self.patch('is_quorum') -# _is_quorum.return_value = True -# ceph.wait_for_quorum() -# self.assertFalse(self.time.sleep.called) + def test_wait_for_quorum_no(self): + _is_quorum = self.patch('is_quorum') + _is_quorum.return_value = True + ceph.wait_for_quorum() + self.assertFalse(self.time.sleep.called) + + def test_wait_for_bootstrap(self): + results = [True, False] + + def bootstrapped(): + return results.pop() + _is_bootstrapped = self.patch('is_bootstrapped') + _is_bootstrapped.side_effect = bootstrapped + ceph.wait_for_bootstrap() + self.time.sleep.assert_called_with(3) def test_add_bootstrap_hint(self): self.get_unit_hostname.return_value = 'myhost' @@ -77,7 +97,7 @@ def test_add_bootstrap_hint(self): ] self.os.path.exists.return_value = True ceph.add_bootstrap_hint('mypeer') - self.subprocess.call.assert_called_with(cmd) + self.subprocess.call.assert_called_with(cmd) def test_add_bootstrap_hint_noasok(self): self.get_unit_hostname.return_value = 'myhost' @@ -87,14 +107,16 @@ def test_add_bootstrap_hint_noasok(self): def test_is_osd_disk(self): # XXX Insert real sgdisk output - self.subprocess.check_output.return_value = 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' + self.subprocess.check_output.return_value = \ + 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' self.assertEqual(ceph.is_osd_disk('/dev/fmd0'), True) - + def test_is_osd_disk_no(self): # XXX Insert real sgdisk output - self.subprocess.check_output.return_value = 'Partition GUID code: 5FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' + self.subprocess.check_output.return_value = \ + 'Partition GUID code: 5FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' self.assertEqual(ceph.is_osd_disk('/dev/fmd0'), False) - + def test_rescan_osd_devices(self): cmd = [ 'udevadm', 'trigger', @@ -102,7 +124,7 @@ def test_rescan_osd_devices(self): ] ceph.rescan_osd_devices() self.subprocess.call.assert_called_with(cmd) - + def test_zap_disk(self): cmd = [ 'sgdisk', '--zap-all', '/dev/fmd0', @@ -121,9 +143,55 @@ def test_import_osd_bootstrap_key(self): ] ceph.import_osd_bootstrap_key('mykey') self.subprocess.check_call.assert_called_with(cmd) - + def test_is_bootstrapped(self): self.os.path.exists.return_value = True - self.assertEqual(ceph.is_bootstrapped(), True) + self.assertEqual(ceph.is_bootstrapped(), True) self.os.path.exists.return_value = False - self.assertEqual(ceph.is_bootstrapped(), False) + self.assertEqual(ceph.is_bootstrapped(), False) + + def test_import_radosgw_key(self): + self.os.path.exists.return_value = False + ceph.import_radosgw_key('mykey') + cmd = [ + 'ceph-authtool', + '/etc/ceph/keyring.rados.gateway', + '--create-keyring', + '--name=client.radosgw.gateway', + '--add-key=mykey' + ] + self.subprocess.check_call.assert_called_with(cmd) + + def test_get_named_key_create(self): + self.get_unit_hostname.return_value = "myhost" + self.subprocess.check_output.return_value = """ + +[client.dummy] + key = AQAPiu1RCMb4CxAAmP7rrufwZPRqy8bpQa2OeQ== +""" + self.assertEqual(ceph.get_named_key('dummy'), + 'AQAPiu1RCMb4CxAAmP7rrufwZPRqy8bpQa2OeQ==') + cmd = [ + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-myhost/keyring', + 'auth', 'get-or-create', 'client.dummy', + 'mon', 'allow r', 'osd', 'allow rwx' + ] + self.subprocess.check_output.assert_called_with(cmd) + + def test_get_named_key_get(self): + self.get_unit_hostname.return_value = "myhost" + key = "AQAPiu1RCMb4CxAAmP7rrufwZPRqy8bpQa2OeQ==" + self.subprocess.check_output.return_value = key + self.assertEqual(ceph.get_named_key('dummy'), key) + cmd = [ + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-myhost/keyring', + 'auth', 'get-or-create', 'client.dummy', + 'mon', 'allow r', 'osd', 'allow rwx' + ] + self.subprocess.check_output.assert_called_with(cmd) diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index a49f2b90..06731787 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -2,17 +2,27 @@ from mock import call, patch, MagicMock from test_utils import CharmTestCase, patch_open +import utils +_reg = utils.register_configs + +utils.register_configs = MagicMock() + import hooks as ceph_hooks +utils.register_configs = _reg + TO_PATCH = [ 'add_source', 'apt_update', 'apt_install', + 'apt_purge', 'config', 'cmp_pkgrevno', 'execd_preinstall', 'enable_pocket', 'get_host_ip', + 'get_iface_for_address', + 'get_netmask_for_address', 'get_unit_hostname', 'glob', 'is_apache_24', @@ -25,12 +35,14 @@ 'relation_set', 'relation_get', 'render_template', + 'resolve_address', 'shutil', 'subprocess', 'sys', 'unit_get', ] + class CephRadosGWTests(CharmTestCase): def setUp(self): @@ -47,10 +59,11 @@ def test_install_www_scripts(self): def test_install_ceph_optimised_packages(self): self.lsb_release.return_value = {'DISTRIB_CODENAME': 'vivid'} - git_url = 'http://gitbuilder.ceph.com' - fastcgi_source = ('http://gitbuilder.ceph.com/' + fastcgi_source = ( + 'http://gitbuilder.ceph.com/' 'libapache-mod-fastcgi-deb-vivid-x86_64-basic/ref/master') - apache_source = ('http://gitbuilder.ceph.com/' + apache_source = ( + 'http://gitbuilder.ceph.com/' 'apache2-deb-vivid-x86_64-basic/ref/master') calls = [ call(fastcgi_source, key='6EAEAE2203C3951A'), @@ -64,22 +77,33 @@ def test_install_packages(self): ceph_hooks.install_packages() self.add_source.assert_called_with('distro', 'secretkey') self.apt_update.assert_called() - self.apt_install.assert_called_with(['radosgw', - 'libapache2-mod-fastcgi', - 'apache2', - 'ntp'], fatal=True) + self.apt_install.assert_called_with(['libapache2-mod-fastcgi', + 'apache2'], fatal=True) + + def test_install_optimised_packages_no_embedded(self): + self.test_config.set('use-ceph-optimised-packages', True) + self.test_config.set('use-embedded-webserver', False) + _install_packages = self.patch('install_ceph_optimised_packages') + ceph_hooks.install_packages() + self.add_source.assert_called_with('distro', 'secretkey') + self.apt_update.assert_called() + _install_packages.assert_called() + self.apt_install.assert_called_with(['libapache2-mod-fastcgi', + 'apache2'], fatal=True) - def test_install_optimised_packages(self): + def test_install_optimised_packages_embedded(self): self.test_config.set('use-ceph-optimised-packages', True) + self.test_config.set('use-embedded-webserver', True) _install_packages = self.patch('install_ceph_optimised_packages') ceph_hooks.install_packages() self.add_source.assert_called_with('distro', 'secretkey') self.apt_update.assert_called() _install_packages.assert_called() self.apt_install.assert_called_with(['radosgw', - 'libapache2-mod-fastcgi', - 'apache2', - 'ntp'], fatal=True) + 'ntp', + 'haproxy'], fatal=True) + self.apt_purge.assert_called_with(['libapache2-mod-fastcgi', + 'apache2']) def test_install(self): _install_packages = self.patch('install_packages') @@ -105,9 +129,10 @@ def test_emit_cephconf(self): 'old_auth': False, 'use_syslog': 'false', 'keystone_key': 'keystone_value', + 'embedded_webserver': False, } self.cmp_pkgrevno.return_value = 1 - with patch_open() as (_open, _file): + with patch_open() as (_open, _file): ceph_hooks.emit_cephconf() self.os.makedirs.assert_called_with('/etc/ceph') _open.assert_called_with('/etc/ceph/ceph.conf', 'w') @@ -119,9 +144,10 @@ def test_emit_apacheconf(self): apachecontext = { "hostname": '10.0.0.1', } - with patch_open() as (_open, _file): + vhost_file = '/etc/apache2/sites-available/rgw.conf' + with patch_open() as (_open, _file): ceph_hooks.emit_apacheconf() - _open.assert_called_with('/etc/apache2/sites-available/rgw.conf', 'w') + _open.assert_called_with(vhost_file, 'w') self.render_template.assert_called_with('rgw', apachecontext) def test_apache_sites24(self): @@ -205,6 +231,7 @@ def test_get_keystone_conf(self): self.test_config.set('revocation-check-interval', '21') self.relation_ids.return_value = ['idrelid'] self.related_units.return_value = ['idunit'] + def _relation_get(key, unit, relid): ks_dict = { 'auth_protocol': 'https', @@ -214,15 +241,15 @@ def _relation_get(key, unit, relid): } return ks_dict[key] self.relation_get.side_effect = _relation_get - self.assertEquals(ceph_hooks.get_keystone_conf(), - {'auth_type': 'keystone', - 'auth_protocol': 'https', - 'admin_token': 'sectocken', - 'user_roles': 'admin', - 'auth_host': '10.0.0.2', - 'cache_size': '42', - 'auth_port': '8090', - 'revocation_check_interval': '21'}) + self.assertEquals(ceph_hooks.get_keystone_conf(), { + 'auth_type': 'keystone', + 'auth_protocol': 'https', + 'admin_token': 'sectocken', + 'user_roles': 'admin', + 'auth_host': '10.0.0.2', + 'cache_size': '42', + 'auth_port': '8090', + 'revocation_check_interval': '21'}) def test_get_keystone_conf_missinginfo(self): self.test_config.set('operator-roles', 'admin') @@ -230,6 +257,7 @@ def test_get_keystone_conf_missinginfo(self): self.test_config.set('revocation-check-interval', '21') self.relation_ids.return_value = ['idrelid'] self.related_units.return_value = ['idunit'] + def _relation_get(key, unit, relid): ks_dict = { 'auth_protocol': 'https', @@ -248,6 +276,7 @@ def test_mon_relation(self): ceph_hooks.mon_relation() _restart.assert_called() _ceph.import_radosgw_key.assert_called_with('seckey') + _emit_cephconf.assert_called() def test_mon_relation_nokey(self): _emit_cephconf = self.patch('emit_cephconf') @@ -257,6 +286,7 @@ def test_mon_relation_nokey(self): ceph_hooks.mon_relation() self.assertFalse(_ceph.import_radosgw_key.called) self.assertFalse(_restart.called) + _emit_cephconf.assert_called() def test_gateway_relation(self): self.unit_get.return_value = 'myserver' @@ -265,15 +295,18 @@ def test_gateway_relation(self): def test_start(self): ceph_hooks.start() - self.subprocess.call.assert_called_with(['service', 'radosgw', 'start']) + cmd = ['service', 'radosgw', 'start'] + self.subprocess.call.assert_called_with(cmd) def test_stop(self): ceph_hooks.stop() - self.subprocess.call.assert_called_with(['service', 'radosgw', 'stop']) + cmd = ['service', 'radosgw', 'stop'] + self.subprocess.call.assert_called_with(cmd) - def test_start(self): + def test_restart(self): ceph_hooks.restart() - self.subprocess.call.assert_called_with(['service', 'radosgw', 'restart']) + cmd = ['service', 'radosgw', 'restart'] + self.subprocess.call.assert_called_with(cmd) def test_identity_joined_early_version(self): self.cmp_pkgrevno.return_value = -1 @@ -282,18 +315,20 @@ def test_identity_joined_early_version(self): def test_identity_joined(self): self.cmp_pkgrevno.return_value = 1 + self.resolve_address.return_value = 'myserv' self.test_config.set('region', 'region1') self.test_config.set('operator-roles', 'admin') self.unit_get.return_value = 'myserv' ceph_hooks.identity_joined(relid='rid') - self.relation_set.assert_called_with(service='swift', - region='region1', - public_url='http://myserv:80/swift/v1', - internal_url='http://myserv:80/swift/v1', - requested_roles='admin', - rid='rid', - admin_url='http://myserv:80/swift') - + self.relation_set.assert_called_with( + service='swift', + region='region1', + public_url='http://myserv:80/swift/v1', + internal_url='http://myserv:80/swift/v1', + requested_roles='admin', + relation_id='rid', + admin_url='http://myserv:80/swift') + def test_identity_changed(self): _emit_cephconf = self.patch('emit_cephconf') _restart = self.patch('restart') @@ -301,3 +336,49 @@ def test_identity_changed(self): _emit_cephconf.assert_called() _restart.assert_called() + def test_canonical_url_ipv6(self): + ipv6_addr = '2001:db8:85a3:8d3:1319:8a2e:370:7348' + self.resolve_address.return_value = ipv6_addr + self.assertEquals(ceph_hooks.canonical_url({}), + 'http://[%s]' % ipv6_addr) + + @patch.object(ceph_hooks, 'CONFIGS') + def test_cluster_changed(self, configs): + _id_joined = self.patch('identity_joined') + self.relation_ids.return_value = ['rid'] + ceph_hooks.cluster_changed() + configs.write_all.assert_called() + _id_joined.assert_called_with(relid='rid') + + def test_ha_relation_joined_no_vip(self): + self.test_config.set('vip', '') + ceph_hooks.ha_relation_joined() + self.sys.exit.assert_called_with(1) + + def test_ha_relation_joined_vip(self): + self.test_config.set('ha-bindiface', 'eth8') + self.test_config.set('ha-mcastport', '5000') + self.test_config.set('vip', '10.0.0.10') + self.get_iface_for_address.return_value = 'eth7' + self.get_netmask_for_address.return_value = '255.255.0.0' + ceph_hooks.ha_relation_joined() + eth_params = ('params ip="10.0.0.10" cidr_netmask="255.255.0.0" ' + 'nic="eth7"') + resources = {'res_cephrg_haproxy': 'lsb:haproxy', + 'res_cephrg_eth7_vip': 'ocf:heartbeat:IPaddr2'} + resource_params = {'res_cephrg_haproxy': 'op monitor interval="5s"', + 'res_cephrg_eth7_vip': eth_params} + self.relation_set.assert_called_with( + init_services={'res_cephrg_haproxy': 'haproxy'}, + corosync_bindiface='eth8', + corosync_mcastport='5000', + resource_params=resource_params, + resources=resources, + clones={'cl_cephrg_haproxy': 'res_cephrg_haproxy'}) + + def test_ha_relation_changed(self): + _id_joined = self.patch('identity_joined') + self.relation_get.return_value = True + self.relation_ids.return_value = ['rid'] + ceph_hooks.ha_relation_changed() + _id_joined.assert_called_with(relid='rid') From de3f7e5f803fba38e51efc5e782f63db3861feb2 Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Wed, 11 Feb 2015 00:39:09 +0900 Subject: [PATCH 0622/2699] increase more sysctl values by default to allow 256k threads per process and 2M threads in total, LP: #1420339 --- ceph-osd/config.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index ccaa8b46..a2fd8fbd 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -123,12 +123,13 @@ options: your network interface. sysctl: type: string - default: '{ kernel.pid_max : 2097152 }' + default: '{ kernel.pid_max : 2097152, vm.max_map_count : 524288, kernel.threads-max: 2097152 }' description: | YAML-formatted associative array of sysctl key/value pairs to be set - persistently. As a default we set pid_max to a high value to avoid - problems with large numbers (>20) of OSDs recovering. very large clusters - should set this value even higher (max 4194303). + persistently. As a default we set pid_max, max_map_count and + threads-max to a high value to avoid problems with large numbers (>20) + of OSDs recovering. very large clusters should set this value even + higher (max 4194303). nagios_context: default: "juju" description: | From 1f49a352c256d404afc2f45793203b00d3d07c60 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 12 Feb 2015 09:05:54 +0000 Subject: [PATCH 0623/2699] Hack to patch out implicit install of python-dns and subsequent import of dns.python that is triggered by importing utils --- ceph-radosgw/unit_tests/test_hooks.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 06731787..3df4b786 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -2,7 +2,16 @@ from mock import call, patch, MagicMock from test_utils import CharmTestCase, patch_open -import utils +dnsmock = MagicMock() +modules = { + 'dns': dnsmock, + 'dns.resolver': dnsmock, +} +module_patcher = patch.dict('sys.modules', modules) +module_patcher.start() +with patch('charmhelpers.fetch.apt_install'): + import utils + _reg = utils.register_configs utils.register_configs = MagicMock() From 01c0b9e0f30c13d256392146c9719076529cb99c Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Mon, 16 Feb 2015 15:23:52 +0900 Subject: [PATCH 0624/2699] tweak config.yaml description --- ceph-osd/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index a2fd8fbd..26bbb61d 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -128,7 +128,7 @@ options: YAML-formatted associative array of sysctl key/value pairs to be set persistently. As a default we set pid_max, max_map_count and threads-max to a high value to avoid problems with large numbers (>20) - of OSDs recovering. very large clusters should set this value even + of OSDs recovering. very large clusters should set pid_max even higher (max 4194303). nagios_context: default: "juju" From 02a6f5592cade3a0b3ba2e3d368e4d56d3180624 Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Mon, 16 Feb 2015 15:35:20 +0900 Subject: [PATCH 0625/2699] increase more sysctl values by default to allow 256k threads per process and 2M threads in total, LP: #1420339 --- ceph-proxy/config.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index d087510c..16c5d0c6 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -157,12 +157,13 @@ options: your network interface. sysctl: type: string - default: '{ kernel.pid_max : 2097152 }' + default: '{ kernel.pid_max : 2097152, vm.max_map_count : 524288, kernel.threads-max: 2097152 }' description: | YAML-formatted associative array of sysctl key/value pairs to be set - persistently. As a default we set pid_max to a high value to avoid - problems with large numbers (>20) of OSDs recovering. very large clusters - should set this value even higher (max 4194303). + persistently. As a default we set pid_max, max_map_count and + threads-max to a high value to avoid problems with large numbers (>20) + of OSDs recovering. very large clusters should set pid_max even + higher (max 4194303). nagios_context: default: "juju" description: | From 76c82f4a403932475e5a10f7d3f71b292be9795e Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Mon, 16 Feb 2015 15:35:20 +0900 Subject: [PATCH 0626/2699] increase more sysctl values by default to allow 256k threads per process and 2M threads in total, LP: #1420339 --- ceph-mon/config.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index d087510c..16c5d0c6 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -157,12 +157,13 @@ options: your network interface. sysctl: type: string - default: '{ kernel.pid_max : 2097152 }' + default: '{ kernel.pid_max : 2097152, vm.max_map_count : 524288, kernel.threads-max: 2097152 }' description: | YAML-formatted associative array of sysctl key/value pairs to be set - persistently. As a default we set pid_max to a high value to avoid - problems with large numbers (>20) of OSDs recovering. very large clusters - should set this value even higher (max 4194303). + persistently. As a default we set pid_max, max_map_count and + threads-max to a high value to avoid problems with large numbers (>20) + of OSDs recovering. very large clusters should set pid_max even + higher (max 4194303). nagios_context: default: "juju" description: | From daee7f310f4561e8089ada331a3be51acfbf0377 Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Mon, 16 Feb 2015 18:42:32 +0900 Subject: [PATCH 0627/2699] update config.yaml description --- ceph-proxy/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 16c5d0c6..22d34248 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -162,7 +162,7 @@ options: YAML-formatted associative array of sysctl key/value pairs to be set persistently. As a default we set pid_max, max_map_count and threads-max to a high value to avoid problems with large numbers (>20) - of OSDs recovering. very large clusters should set pid_max even + of OSDs recovering. very large clusters should set those values even higher (max 4194303). nagios_context: default: "juju" From 894169945a78d60427f6dd3b7d198546f18873c3 Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Mon, 16 Feb 2015 18:42:32 +0900 Subject: [PATCH 0628/2699] update config.yaml description --- ceph-mon/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 16c5d0c6..22d34248 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -162,7 +162,7 @@ options: YAML-formatted associative array of sysctl key/value pairs to be set persistently. As a default we set pid_max, max_map_count and threads-max to a high value to avoid problems with large numbers (>20) - of OSDs recovering. very large clusters should set pid_max even + of OSDs recovering. very large clusters should set those values even higher (max 4194303). nagios_context: default: "juju" From be34960c90f9193390bb92c273c637a1a3fbb994 Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Mon, 16 Feb 2015 18:43:14 +0900 Subject: [PATCH 0629/2699] update config.yaml description --- ceph-osd/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 26bbb61d..712c37d2 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -128,7 +128,7 @@ options: YAML-formatted associative array of sysctl key/value pairs to be set persistently. As a default we set pid_max, max_map_count and threads-max to a high value to avoid problems with large numbers (>20) - of OSDs recovering. very large clusters should set pid_max even + of OSDs recovering. very large clusters should set those values even higher (max 4194303). nagios_context: default: "juju" From b690e93a8309b122764751327712e6d9d87e8928 Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Wed, 18 Feb 2015 15:46:35 +0900 Subject: [PATCH 0630/2699] fix lint warning --- ceph-proxy/config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 22d34248..6c5b74e2 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -165,6 +165,7 @@ options: of OSDs recovering. very large clusters should set those values even higher (max 4194303). nagios_context: + type: string default: "juju" description: | Used by the nrpe-external-master subordinate charm. From 9654e9aaa08845f50c18397db98a76105513994d Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Wed, 18 Feb 2015 15:46:35 +0900 Subject: [PATCH 0631/2699] fix lint warning --- ceph-mon/config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 22d34248..6c5b74e2 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -165,6 +165,7 @@ options: of OSDs recovering. very large clusters should set those values even higher (max 4194303). nagios_context: + type: string default: "juju" description: | Used by the nrpe-external-master subordinate charm. From 341f7cd036f34f24e51d7d52838aba5e41fc2fca Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Wed, 18 Feb 2015 15:47:34 +0900 Subject: [PATCH 0632/2699] fix lint warning --- ceph-osd/config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 712c37d2..04f01026 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -131,6 +131,7 @@ options: of OSDs recovering. very large clusters should set those values even higher (max 4194303). nagios_context: + type: string default: "juju" description: | Used by the nrpe-external-master subordinate charm. From c5766431d8448b25b11bb1dcc916190ce617cc45 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 19 Feb 2015 12:11:25 +1000 Subject: [PATCH 0633/2699] [bradm] Sync charmhelpers --- .../charmhelpers/contrib/charmsupport/nrpe.py | 46 +- ceph-proxy/hooks/charmhelpers/core/fstab.py | 8 +- ceph-proxy/hooks/charmhelpers/core/host.py | 10 +- .../hooks/charmhelpers/core/strutils.py | 42 ++ ceph-proxy/hooks/charmhelpers/core/sysctl.py | 20 +- .../hooks/charmhelpers/core/templating.py | 6 +- .../hooks/charmhelpers/core/unitdata.py | 477 ++++++++++++++++++ .../hooks/charmhelpers/fetch/archiveurl.py | 20 +- ceph-proxy/hooks/charmhelpers/fetch/giturl.py | 2 +- .../contrib/openstack/amulet/deployment.py | 7 +- 10 files changed, 600 insertions(+), 38 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/core/strutils.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/unitdata.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 0fd0a9d8..8229f6b5 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -24,6 +24,8 @@ import pwd import grp import os +import glob +import shutil import re import shlex import yaml @@ -161,7 +163,7 @@ def _locate_cmd(self, check_cmd): log('Check command not found: {}'.format(parts[0])) return '' - def write(self, nagios_context, hostname, nagios_servicegroups=None): + def write(self, nagios_context, hostname, nagios_servicegroups): nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( self.command) with open(nrpe_check_file, 'w') as nrpe_check_config: @@ -177,14 +179,11 @@ def write(self, nagios_context, hostname, nagios_servicegroups=None): nagios_servicegroups) def write_service_config(self, nagios_context, hostname, - nagios_servicegroups=None): + nagios_servicegroups): for f in os.listdir(NRPE.nagios_exportdir): if re.search('.*{}.cfg'.format(self.command), f): os.remove(os.path.join(NRPE.nagios_exportdir, f)) - if not nagios_servicegroups: - nagios_servicegroups = nagios_context - templ_vars = { 'nagios_hostname': hostname, 'nagios_servicegroup': nagios_servicegroups, @@ -214,7 +213,7 @@ def __init__(self, hostname=None): if 'nagios_servicegroups' in self.config: self.nagios_servicegroups = self.config['nagios_servicegroups'] else: - self.nagios_servicegroups = 'juju' + self.nagios_servicegroups = self.nagios_context self.unit_name = local_unit().replace('/', '-') if hostname: self.hostname = hostname @@ -322,3 +321,38 @@ def add_init_service_checks(nrpe, services, unit_name): check_cmd='check_status_file.py -f ' '/var/lib/nagios/service-check-%s.txt' % svc, ) + + +def copy_nrpe_checks(): + """ + Copy the nrpe checks into place + + """ + NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' + nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', + 'charmhelpers', 'contrib', 'openstack', + 'files') + + if not os.path.exists(NAGIOS_PLUGINS): + os.makedirs(NAGIOS_PLUGINS) + for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): + if os.path.isfile(fname): + shutil.copy2(fname, + os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) + + +def add_haproxy_checks(nrpe, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param str unit_name: Unit name to use in check description + """ + nrpe.add_check( + shortname='haproxy_servers', + description='Check HAProxy {%s}' % unit_name, + check_cmd='check_haproxy.sh') + nrpe.add_check( + shortname='haproxy_queue', + description='Check HAProxy queue depth {%s}' % unit_name, + check_cmd='check_haproxy_queue_depth.sh') diff --git a/ceph-proxy/hooks/charmhelpers/core/fstab.py b/ceph-proxy/hooks/charmhelpers/core/fstab.py index be7de248..3056fbac 100644 --- a/ceph-proxy/hooks/charmhelpers/core/fstab.py +++ b/ceph-proxy/hooks/charmhelpers/core/fstab.py @@ -17,11 +17,11 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import io import os +__author__ = 'Jorge Niedbalski R. ' + class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer @@ -77,7 +77,7 @@ def entries(self): for line in self.readlines(): line = line.decode('us-ascii') try: - if line.strip() and not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -104,7 +104,7 @@ def remove_entry(self, entry): found = False for index, line in enumerate(lines): - if not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): if self._hydrate_entry(line) == entry: found = True break diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index cf2cbe14..b771c611 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -191,11 +191,11 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a string""" + """Create or overwrite a file with the contents of a byte string.""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'w') as target: + with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) target.write(content) @@ -305,11 +305,11 @@ def ceph_client_changed(): ceph_client_changed function. """ def wrap(f): - def wrapped_f(*args): + def wrapped_f(*args, **kwargs): checksums = {} for path in restart_map: checksums[path] = file_hash(path) - f(*args) + f(*args, **kwargs) restarts = [] for path in restart_map: if checksums[path] != file_hash(path): @@ -361,7 +361,7 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) if matched: interface = matched.groups()[0] else: diff --git a/ceph-proxy/hooks/charmhelpers/core/strutils.py b/ceph-proxy/hooks/charmhelpers/core/strutils.py new file mode 100644 index 00000000..efc4402e --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/strutils.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't']: + return True + elif value in ['n', 'no', 'false', 'f']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) diff --git a/ceph-proxy/hooks/charmhelpers/core/sysctl.py b/ceph-proxy/hooks/charmhelpers/core/sysctl.py index d642a371..21cc8ab2 100644 --- a/ceph-proxy/hooks/charmhelpers/core/sysctl.py +++ b/ceph-proxy/hooks/charmhelpers/core/sysctl.py @@ -17,8 +17,6 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import yaml from subprocess import check_call @@ -26,25 +24,33 @@ from charmhelpers.core.hookenv import ( log, DEBUG, + ERROR, ) +__author__ = 'Jorge Niedbalski R. ' + def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } - :type sysctl_dict: dict + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - sysctl_dict = yaml.load(sysctl_dict) + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict.items(): + for key, value in sysctl_dict_parsed.items(): fd.write("{}={}\n".format(key, value)) - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), level=DEBUG) check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-proxy/hooks/charmhelpers/core/templating.py b/ceph-proxy/hooks/charmhelpers/core/templating.py index 97669092..45319998 100644 --- a/ceph-proxy/hooks/charmhelpers/core/templating.py +++ b/ceph-proxy/hooks/charmhelpers/core/templating.py @@ -21,7 +21,7 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None): + perms=0o444, templates_dir=None, encoding='UTF-8'): """ Render a template. @@ -64,5 +64,5 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group) - host.write_file(target, content, owner, group, perms) + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/ceph-proxy/hooks/charmhelpers/core/unitdata.py b/ceph-proxy/hooks/charmhelpers/core/unitdata.py new file mode 100644 index 00000000..3000134a --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/unitdata.py @@ -0,0 +1,477 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . +# +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are automatically committed at hook exit. That's + currently regardless of exit code. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def _scoped_query(self, stmt, params=None): + if params is None: + params = [] + return stmt, params + + def get(self, key, default=None, record=False): + self.cursor.execute( + *self._scoped_query( + 'select data from kv where key=?', [key])) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + stmt = "select key, data from kv where key like '%s%%'" % key_prefix + self.cursor.execute(*self._scoped_query(stmt)) + result = self.cursor.fetchall() + + if not result: + return None + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + serialized = json.dumps(value) + + self.cursor.execute( + 'select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', data['env']) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py index d25a0ddd..8dfce505 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -18,6 +18,16 @@ import hashlib import re +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir, check_hash + import six if six.PY3: from urllib.request import ( @@ -35,16 +45,6 @@ ) from urlparse import urlparse, urlunparse, parse_qs -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.payload.archive import ( - get_archive_handler, - extract, -) -from charmhelpers.core.host import mkdir, check_hash - def splituser(host): '''urllib.splituser(), but six's support of this seems broken''' diff --git a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py index 5376786b..93aae87b 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py @@ -32,7 +32,7 @@ apt_install("python-git") from git import Repo -from git.exc import GitCommandError +from git.exc import GitCommandError # noqa E402 class GitUrlFetchHandler(BaseFetchHandler): diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index c50d3ec6..0cfeaa4c 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -71,16 +71,19 @@ def _add_services(self, this_service, other_services): services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] + # Openstack subordinate charms do not expose an origin option as that + # is controlled by the principle + ignore = ['neutron-openvswitch'] if self.openstack: for svc in services: - if svc['name'] not in use_source: + if svc['name'] not in use_source + ignore: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source: + if svc['name'] in use_source and svc['name'] not in ignore: config = {'source': self.source} self.d.configure(svc['name'], config) From 9459355fae8fa80c749c75f2aa1b2e690ee2575c Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 19 Feb 2015 12:11:25 +1000 Subject: [PATCH 0634/2699] [bradm] Sync charmhelpers --- .../charmhelpers/contrib/charmsupport/nrpe.py | 46 +- ceph-mon/hooks/charmhelpers/core/fstab.py | 8 +- ceph-mon/hooks/charmhelpers/core/host.py | 10 +- ceph-mon/hooks/charmhelpers/core/strutils.py | 42 ++ ceph-mon/hooks/charmhelpers/core/sysctl.py | 20 +- .../hooks/charmhelpers/core/templating.py | 6 +- ceph-mon/hooks/charmhelpers/core/unitdata.py | 477 ++++++++++++++++++ .../hooks/charmhelpers/fetch/archiveurl.py | 20 +- ceph-mon/hooks/charmhelpers/fetch/giturl.py | 2 +- .../contrib/openstack/amulet/deployment.py | 7 +- 10 files changed, 600 insertions(+), 38 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/core/strutils.py create mode 100644 ceph-mon/hooks/charmhelpers/core/unitdata.py diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 0fd0a9d8..8229f6b5 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -24,6 +24,8 @@ import pwd import grp import os +import glob +import shutil import re import shlex import yaml @@ -161,7 +163,7 @@ def _locate_cmd(self, check_cmd): log('Check command not found: {}'.format(parts[0])) return '' - def write(self, nagios_context, hostname, nagios_servicegroups=None): + def write(self, nagios_context, hostname, nagios_servicegroups): nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( self.command) with open(nrpe_check_file, 'w') as nrpe_check_config: @@ -177,14 +179,11 @@ def write(self, nagios_context, hostname, nagios_servicegroups=None): nagios_servicegroups) def write_service_config(self, nagios_context, hostname, - nagios_servicegroups=None): + nagios_servicegroups): for f in os.listdir(NRPE.nagios_exportdir): if re.search('.*{}.cfg'.format(self.command), f): os.remove(os.path.join(NRPE.nagios_exportdir, f)) - if not nagios_servicegroups: - nagios_servicegroups = nagios_context - templ_vars = { 'nagios_hostname': hostname, 'nagios_servicegroup': nagios_servicegroups, @@ -214,7 +213,7 @@ def __init__(self, hostname=None): if 'nagios_servicegroups' in self.config: self.nagios_servicegroups = self.config['nagios_servicegroups'] else: - self.nagios_servicegroups = 'juju' + self.nagios_servicegroups = self.nagios_context self.unit_name = local_unit().replace('/', '-') if hostname: self.hostname = hostname @@ -322,3 +321,38 @@ def add_init_service_checks(nrpe, services, unit_name): check_cmd='check_status_file.py -f ' '/var/lib/nagios/service-check-%s.txt' % svc, ) + + +def copy_nrpe_checks(): + """ + Copy the nrpe checks into place + + """ + NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' + nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', + 'charmhelpers', 'contrib', 'openstack', + 'files') + + if not os.path.exists(NAGIOS_PLUGINS): + os.makedirs(NAGIOS_PLUGINS) + for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): + if os.path.isfile(fname): + shutil.copy2(fname, + os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) + + +def add_haproxy_checks(nrpe, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param str unit_name: Unit name to use in check description + """ + nrpe.add_check( + shortname='haproxy_servers', + description='Check HAProxy {%s}' % unit_name, + check_cmd='check_haproxy.sh') + nrpe.add_check( + shortname='haproxy_queue', + description='Check HAProxy queue depth {%s}' % unit_name, + check_cmd='check_haproxy_queue_depth.sh') diff --git a/ceph-mon/hooks/charmhelpers/core/fstab.py b/ceph-mon/hooks/charmhelpers/core/fstab.py index be7de248..3056fbac 100644 --- a/ceph-mon/hooks/charmhelpers/core/fstab.py +++ b/ceph-mon/hooks/charmhelpers/core/fstab.py @@ -17,11 +17,11 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import io import os +__author__ = 'Jorge Niedbalski R. ' + class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer @@ -77,7 +77,7 @@ def entries(self): for line in self.readlines(): line = line.decode('us-ascii') try: - if line.strip() and not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -104,7 +104,7 @@ def remove_entry(self, entry): found = False for index, line in enumerate(lines): - if not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): if self._hydrate_entry(line) == entry: found = True break diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index cf2cbe14..b771c611 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -191,11 +191,11 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a string""" + """Create or overwrite a file with the contents of a byte string.""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'w') as target: + with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) target.write(content) @@ -305,11 +305,11 @@ def ceph_client_changed(): ceph_client_changed function. """ def wrap(f): - def wrapped_f(*args): + def wrapped_f(*args, **kwargs): checksums = {} for path in restart_map: checksums[path] = file_hash(path) - f(*args) + f(*args, **kwargs) restarts = [] for path in restart_map: if checksums[path] != file_hash(path): @@ -361,7 +361,7 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) if matched: interface = matched.groups()[0] else: diff --git a/ceph-mon/hooks/charmhelpers/core/strutils.py b/ceph-mon/hooks/charmhelpers/core/strutils.py new file mode 100644 index 00000000..efc4402e --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/strutils.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't']: + return True + elif value in ['n', 'no', 'false', 'f']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) diff --git a/ceph-mon/hooks/charmhelpers/core/sysctl.py b/ceph-mon/hooks/charmhelpers/core/sysctl.py index d642a371..21cc8ab2 100644 --- a/ceph-mon/hooks/charmhelpers/core/sysctl.py +++ b/ceph-mon/hooks/charmhelpers/core/sysctl.py @@ -17,8 +17,6 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import yaml from subprocess import check_call @@ -26,25 +24,33 @@ from charmhelpers.core.hookenv import ( log, DEBUG, + ERROR, ) +__author__ = 'Jorge Niedbalski R. ' + def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } - :type sysctl_dict: dict + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - sysctl_dict = yaml.load(sysctl_dict) + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict.items(): + for key, value in sysctl_dict_parsed.items(): fd.write("{}={}\n".format(key, value)) - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), level=DEBUG) check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-mon/hooks/charmhelpers/core/templating.py b/ceph-mon/hooks/charmhelpers/core/templating.py index 97669092..45319998 100644 --- a/ceph-mon/hooks/charmhelpers/core/templating.py +++ b/ceph-mon/hooks/charmhelpers/core/templating.py @@ -21,7 +21,7 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None): + perms=0o444, templates_dir=None, encoding='UTF-8'): """ Render a template. @@ -64,5 +64,5 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group) - host.write_file(target, content, owner, group, perms) + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/ceph-mon/hooks/charmhelpers/core/unitdata.py b/ceph-mon/hooks/charmhelpers/core/unitdata.py new file mode 100644 index 00000000..3000134a --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/unitdata.py @@ -0,0 +1,477 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . +# +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are automatically committed at hook exit. That's + currently regardless of exit code. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def _scoped_query(self, stmt, params=None): + if params is None: + params = [] + return stmt, params + + def get(self, key, default=None, record=False): + self.cursor.execute( + *self._scoped_query( + 'select data from kv where key=?', [key])) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + stmt = "select key, data from kv where key like '%s%%'" % key_prefix + self.cursor.execute(*self._scoped_query(stmt)) + result = self.cursor.fetchall() + + if not result: + return None + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + serialized = json.dumps(value) + + self.cursor.execute( + 'select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', data['env']) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index d25a0ddd..8dfce505 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -18,6 +18,16 @@ import hashlib import re +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir, check_hash + import six if six.PY3: from urllib.request import ( @@ -35,16 +45,6 @@ ) from urlparse import urlparse, urlunparse, parse_qs -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.payload.archive import ( - get_archive_handler, - extract, -) -from charmhelpers.core.host import mkdir, check_hash - def splituser(host): '''urllib.splituser(), but six's support of this seems broken''' diff --git a/ceph-mon/hooks/charmhelpers/fetch/giturl.py b/ceph-mon/hooks/charmhelpers/fetch/giturl.py index 5376786b..93aae87b 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/giturl.py @@ -32,7 +32,7 @@ apt_install("python-git") from git import Repo -from git.exc import GitCommandError +from git.exc import GitCommandError # noqa E402 class GitUrlFetchHandler(BaseFetchHandler): diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index c50d3ec6..0cfeaa4c 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -71,16 +71,19 @@ def _add_services(self, this_service, other_services): services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] + # Openstack subordinate charms do not expose an origin option as that + # is controlled by the principle + ignore = ['neutron-openvswitch'] if self.openstack: for svc in services: - if svc['name'] not in use_source: + if svc['name'] not in use_source + ignore: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source: + if svc['name'] in use_source and svc['name'] not in ignore: config = {'source': self.source} self.d.configure(svc['name'], config) From f440c7bcf5fd3bf2b0fe3a6edc43d9b76b2b1717 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 19 Feb 2015 12:12:47 +1000 Subject: [PATCH 0635/2699] [bradm] Add nagios_servicegroups config option --- ceph-proxy/config.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index a1d44461..a3d8d5b9 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -173,3 +173,9 @@ options: juju-myservice-0 If you're running multiple environments with the same services in them this allows you to differentiate between them. + nagios_servicegroups: + default: "" + type: string + description: | + A comma-separated list of nagios servicegroups. + If left empty, the nagios_context will be used as the servicegroup From 5db406fe66cd65518bee8a42dd37108eee8fa011 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 19 Feb 2015 12:12:47 +1000 Subject: [PATCH 0636/2699] [bradm] Add nagios_servicegroups config option --- ceph-mon/config.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index a1d44461..a3d8d5b9 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -173,3 +173,9 @@ options: juju-myservice-0 If you're running multiple environments with the same services in them this allows you to differentiate between them. + nagios_servicegroups: + default: "" + type: string + description: | + A comma-separated list of nagios servicegroups. + If left empty, the nagios_context will be used as the servicegroup From 8167552255fdf1454ea28f874984dcd53d436eac Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 19 Feb 2015 12:15:44 +1000 Subject: [PATCH 0637/2699] [bradm] Sync charmhelpers --- .../charmhelpers/contrib/charmsupport/nrpe.py | 46 +- ceph-osd/hooks/charmhelpers/core/fstab.py | 8 +- ceph-osd/hooks/charmhelpers/core/host.py | 10 +- ceph-osd/hooks/charmhelpers/core/strutils.py | 42 ++ ceph-osd/hooks/charmhelpers/core/sysctl.py | 20 +- .../hooks/charmhelpers/core/templating.py | 6 +- ceph-osd/hooks/charmhelpers/core/unitdata.py | 477 ++++++++++++++++++ .../hooks/charmhelpers/fetch/archiveurl.py | 20 +- ceph-osd/hooks/charmhelpers/fetch/giturl.py | 2 +- .../contrib/openstack/amulet/deployment.py | 7 +- 10 files changed, 600 insertions(+), 38 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/core/strutils.py create mode 100644 ceph-osd/hooks/charmhelpers/core/unitdata.py diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 0fd0a9d8..8229f6b5 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -24,6 +24,8 @@ import pwd import grp import os +import glob +import shutil import re import shlex import yaml @@ -161,7 +163,7 @@ def _locate_cmd(self, check_cmd): log('Check command not found: {}'.format(parts[0])) return '' - def write(self, nagios_context, hostname, nagios_servicegroups=None): + def write(self, nagios_context, hostname, nagios_servicegroups): nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( self.command) with open(nrpe_check_file, 'w') as nrpe_check_config: @@ -177,14 +179,11 @@ def write(self, nagios_context, hostname, nagios_servicegroups=None): nagios_servicegroups) def write_service_config(self, nagios_context, hostname, - nagios_servicegroups=None): + nagios_servicegroups): for f in os.listdir(NRPE.nagios_exportdir): if re.search('.*{}.cfg'.format(self.command), f): os.remove(os.path.join(NRPE.nagios_exportdir, f)) - if not nagios_servicegroups: - nagios_servicegroups = nagios_context - templ_vars = { 'nagios_hostname': hostname, 'nagios_servicegroup': nagios_servicegroups, @@ -214,7 +213,7 @@ def __init__(self, hostname=None): if 'nagios_servicegroups' in self.config: self.nagios_servicegroups = self.config['nagios_servicegroups'] else: - self.nagios_servicegroups = 'juju' + self.nagios_servicegroups = self.nagios_context self.unit_name = local_unit().replace('/', '-') if hostname: self.hostname = hostname @@ -322,3 +321,38 @@ def add_init_service_checks(nrpe, services, unit_name): check_cmd='check_status_file.py -f ' '/var/lib/nagios/service-check-%s.txt' % svc, ) + + +def copy_nrpe_checks(): + """ + Copy the nrpe checks into place + + """ + NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' + nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', + 'charmhelpers', 'contrib', 'openstack', + 'files') + + if not os.path.exists(NAGIOS_PLUGINS): + os.makedirs(NAGIOS_PLUGINS) + for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): + if os.path.isfile(fname): + shutil.copy2(fname, + os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) + + +def add_haproxy_checks(nrpe, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param str unit_name: Unit name to use in check description + """ + nrpe.add_check( + shortname='haproxy_servers', + description='Check HAProxy {%s}' % unit_name, + check_cmd='check_haproxy.sh') + nrpe.add_check( + shortname='haproxy_queue', + description='Check HAProxy queue depth {%s}' % unit_name, + check_cmd='check_haproxy_queue_depth.sh') diff --git a/ceph-osd/hooks/charmhelpers/core/fstab.py b/ceph-osd/hooks/charmhelpers/core/fstab.py index be7de248..3056fbac 100644 --- a/ceph-osd/hooks/charmhelpers/core/fstab.py +++ b/ceph-osd/hooks/charmhelpers/core/fstab.py @@ -17,11 +17,11 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import io import os +__author__ = 'Jorge Niedbalski R. ' + class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer @@ -77,7 +77,7 @@ def entries(self): for line in self.readlines(): line = line.decode('us-ascii') try: - if line.strip() and not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -104,7 +104,7 @@ def remove_entry(self, entry): found = False for index, line in enumerate(lines): - if not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): if self._hydrate_entry(line) == entry: found = True break diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index cf2cbe14..b771c611 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -191,11 +191,11 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a string""" + """Create or overwrite a file with the contents of a byte string.""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'w') as target: + with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) target.write(content) @@ -305,11 +305,11 @@ def ceph_client_changed(): ceph_client_changed function. """ def wrap(f): - def wrapped_f(*args): + def wrapped_f(*args, **kwargs): checksums = {} for path in restart_map: checksums[path] = file_hash(path) - f(*args) + f(*args, **kwargs) restarts = [] for path in restart_map: if checksums[path] != file_hash(path): @@ -361,7 +361,7 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) if matched: interface = matched.groups()[0] else: diff --git a/ceph-osd/hooks/charmhelpers/core/strutils.py b/ceph-osd/hooks/charmhelpers/core/strutils.py new file mode 100644 index 00000000..efc4402e --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/strutils.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't']: + return True + elif value in ['n', 'no', 'false', 'f']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) diff --git a/ceph-osd/hooks/charmhelpers/core/sysctl.py b/ceph-osd/hooks/charmhelpers/core/sysctl.py index d642a371..21cc8ab2 100644 --- a/ceph-osd/hooks/charmhelpers/core/sysctl.py +++ b/ceph-osd/hooks/charmhelpers/core/sysctl.py @@ -17,8 +17,6 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import yaml from subprocess import check_call @@ -26,25 +24,33 @@ from charmhelpers.core.hookenv import ( log, DEBUG, + ERROR, ) +__author__ = 'Jorge Niedbalski R. ' + def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } - :type sysctl_dict: dict + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - sysctl_dict = yaml.load(sysctl_dict) + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict.items(): + for key, value in sysctl_dict_parsed.items(): fd.write("{}={}\n".format(key, value)) - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), level=DEBUG) check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-osd/hooks/charmhelpers/core/templating.py b/ceph-osd/hooks/charmhelpers/core/templating.py index 97669092..45319998 100644 --- a/ceph-osd/hooks/charmhelpers/core/templating.py +++ b/ceph-osd/hooks/charmhelpers/core/templating.py @@ -21,7 +21,7 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None): + perms=0o444, templates_dir=None, encoding='UTF-8'): """ Render a template. @@ -64,5 +64,5 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group) - host.write_file(target, content, owner, group, perms) + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/ceph-osd/hooks/charmhelpers/core/unitdata.py b/ceph-osd/hooks/charmhelpers/core/unitdata.py new file mode 100644 index 00000000..3000134a --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/unitdata.py @@ -0,0 +1,477 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . +# +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are automatically committed at hook exit. That's + currently regardless of exit code. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def _scoped_query(self, stmt, params=None): + if params is None: + params = [] + return stmt, params + + def get(self, key, default=None, record=False): + self.cursor.execute( + *self._scoped_query( + 'select data from kv where key=?', [key])) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + stmt = "select key, data from kv where key like '%s%%'" % key_prefix + self.cursor.execute(*self._scoped_query(stmt)) + result = self.cursor.fetchall() + + if not result: + return None + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + serialized = json.dumps(value) + + self.cursor.execute( + 'select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', data['env']) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index d25a0ddd..8dfce505 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -18,6 +18,16 @@ import hashlib import re +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir, check_hash + import six if six.PY3: from urllib.request import ( @@ -35,16 +45,6 @@ ) from urlparse import urlparse, urlunparse, parse_qs -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.payload.archive import ( - get_archive_handler, - extract, -) -from charmhelpers.core.host import mkdir, check_hash - def splituser(host): '''urllib.splituser(), but six's support of this seems broken''' diff --git a/ceph-osd/hooks/charmhelpers/fetch/giturl.py b/ceph-osd/hooks/charmhelpers/fetch/giturl.py index 5376786b..93aae87b 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/giturl.py @@ -32,7 +32,7 @@ apt_install("python-git") from git import Repo -from git.exc import GitCommandError +from git.exc import GitCommandError # noqa E402 class GitUrlFetchHandler(BaseFetchHandler): diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index c50d3ec6..0cfeaa4c 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -71,16 +71,19 @@ def _add_services(self, this_service, other_services): services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] + # Openstack subordinate charms do not expose an origin option as that + # is controlled by the principle + ignore = ['neutron-openvswitch'] if self.openstack: for svc in services: - if svc['name'] not in use_source: + if svc['name'] not in use_source + ignore: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source: + if svc['name'] in use_source and svc['name'] not in ignore: config = {'source': self.source} self.d.configure(svc['name'], config) From fcd215ffaacddcc3dbc17c410d78f1999b657545 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 19 Feb 2015 13:29:52 +1000 Subject: [PATCH 0638/2699] [bradm] Add nagios_servicegroups config option --- ceph-osd/config.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 8034a6d8..1e4bb317 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -139,3 +139,9 @@ options: juju-myservice-0 If you're running multiple environments with the same services in them this allows you to differentiate between them. + nagios_servicegroups: + default: "" + type: string + description: | + A comma-separated list of nagios servicegroups. + If left empty, the nagios_context will be used as the servicegroup From dcfe3fddaef23255ed232610e271a12fff147352 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Fri, 20 Feb 2015 10:08:35 +1000 Subject: [PATCH 0639/2699] [bradm] Handle case of empty nagios_servicegroups setting --- .../charmhelpers/contrib/charmsupport/nrpe.py | 2 +- .../charmhelpers/contrib/amulet/utils.py | 124 +++++++++++++++++- 2 files changed, 123 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 8229f6b5..9d961cfb 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -210,7 +210,7 @@ def __init__(self, hostname=None): super(NRPE, self).__init__() self.config = config() self.nagios_context = self.config['nagios_context'] - if 'nagios_servicegroups' in self.config: + if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: self.nagios_servicegroups = self.config['nagios_servicegroups'] else: self.nagios_servicegroups = self.nagios_context diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index 3464b873..65219d33 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -169,8 +169,13 @@ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): cmd = 'pgrep -o -f {}'.format(service) else: cmd = 'pgrep -o {}'.format(service) - proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) - return self._get_dir_mtime(sentry_unit, proc_dir) + cmd = cmd + ' | grep -v pgrep || exit 0' + cmd_out = sentry_unit.run(cmd) + self.log.debug('CMDout: ' + str(cmd_out)) + if cmd_out[0]: + self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) + proc_dir = '/proc/{}'.format(cmd_out[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) def service_restarted(self, sentry_unit, service, filename, pgrep_full=False, sleep_time=20): @@ -187,6 +192,121 @@ def service_restarted(self, sentry_unit, service, filename, else: return False + def service_restarted_since(self, sentry_unit, mtime, service, + pgrep_full=False, sleep_time=20, + retry_count=2): + """Check if service was been started after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Returns: + bool: True if service found and its start time it newer than mtime, + False if service is older than mtime or if service was + not found. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + while retry_count > 0 and not proc_start_time: + self.log.debug('No pid file found for service %s, will retry %i ' + 'more times' % (service, retry_count)) + time.sleep(30) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + retry_count = retry_count - 1 + + if not proc_start_time: + self.log.warn('No proc start time found, assuming service did ' + 'not start') + return False + if proc_start_time >= mtime: + self.log.debug('proc start time is newer than provided mtime' + '(%s >= %s)' % (proc_start_time, mtime)) + return True + else: + self.log.warn('proc start time (%s) is older than provided mtime ' + '(%s), service did not restart' % (proc_start_time, + mtime)) + return False + + def config_updated_since(self, sentry_unit, filename, mtime, + sleep_time=20): + """Check if file was modified after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check the file mtime on + filename (string): The file to check mtime of + mtime (float): The epoch time to check against + sleep_time (int): Seconds to sleep before looking for process + + Returns: + bool: True if file was modified more recently than mtime, False if + file was modified before mtime, + """ + self.log.debug('Checking %s updated since %s' % (filename, mtime)) + time.sleep(sleep_time) + file_mtime = self._get_file_mtime(sentry_unit, filename) + if file_mtime >= mtime: + self.log.debug('File mtime is newer than provided mtime ' + '(%s >= %s)' % (file_mtime, mtime)) + return True + else: + self.log.warn('File mtime %s is older than provided mtime %s' + % (file_mtime, mtime)) + return False + + def validate_service_config_changed(self, sentry_unit, mtime, service, + filename, pgrep_full=False, + sleep_time=20, retry_count=2): + """Check service and file were updated after mtime + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + filename (string): The file to check mtime of + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Typical Usage: + u = OpenStackAmuletUtils(ERROR) + ... + mtime = u.get_sentry_time(self.cinder_sentry) + self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) + if not u.validate_service_config_changed(self.cinder_sentry, + mtime, + 'cinder-api', + '/etc/cinder/cinder.conf') + amulet.raise_status(amulet.FAIL, msg='update failed') + Returns: + bool: True if both service and file where updated/restarted after + mtime, False if service is older than mtime or if service was + not found or if filename was modified before mtime. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + service_restart = self.service_restarted_since(sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=0, + retry_count=retry_count) + config_update = self.config_updated_since(sentry_unit, filename, mtime, + sleep_time=0) + return service_restart and config_update + + def get_sentry_time(self, sentry_unit): + """Return current epoch time on a sentry""" + cmd = "date +'%s'" + return float(sentry_unit.run(cmd)[0]) + def relation_error(self, name, data): return 'unexpected relation data in {} - {}'.format(name, data) From e22d0af1acf0999074ce9c17ce28e3792950c448 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Fri, 20 Feb 2015 10:08:35 +1000 Subject: [PATCH 0640/2699] [bradm] Handle case of empty nagios_servicegroups setting --- .../charmhelpers/contrib/charmsupport/nrpe.py | 2 +- .../charmhelpers/contrib/amulet/utils.py | 124 +++++++++++++++++- 2 files changed, 123 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 8229f6b5..9d961cfb 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -210,7 +210,7 @@ def __init__(self, hostname=None): super(NRPE, self).__init__() self.config = config() self.nagios_context = self.config['nagios_context'] - if 'nagios_servicegroups' in self.config: + if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: self.nagios_servicegroups = self.config['nagios_servicegroups'] else: self.nagios_servicegroups = self.nagios_context diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index 3464b873..65219d33 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -169,8 +169,13 @@ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): cmd = 'pgrep -o -f {}'.format(service) else: cmd = 'pgrep -o {}'.format(service) - proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) - return self._get_dir_mtime(sentry_unit, proc_dir) + cmd = cmd + ' | grep -v pgrep || exit 0' + cmd_out = sentry_unit.run(cmd) + self.log.debug('CMDout: ' + str(cmd_out)) + if cmd_out[0]: + self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) + proc_dir = '/proc/{}'.format(cmd_out[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) def service_restarted(self, sentry_unit, service, filename, pgrep_full=False, sleep_time=20): @@ -187,6 +192,121 @@ def service_restarted(self, sentry_unit, service, filename, else: return False + def service_restarted_since(self, sentry_unit, mtime, service, + pgrep_full=False, sleep_time=20, + retry_count=2): + """Check if service was been started after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Returns: + bool: True if service found and its start time it newer than mtime, + False if service is older than mtime or if service was + not found. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + while retry_count > 0 and not proc_start_time: + self.log.debug('No pid file found for service %s, will retry %i ' + 'more times' % (service, retry_count)) + time.sleep(30) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + retry_count = retry_count - 1 + + if not proc_start_time: + self.log.warn('No proc start time found, assuming service did ' + 'not start') + return False + if proc_start_time >= mtime: + self.log.debug('proc start time is newer than provided mtime' + '(%s >= %s)' % (proc_start_time, mtime)) + return True + else: + self.log.warn('proc start time (%s) is older than provided mtime ' + '(%s), service did not restart' % (proc_start_time, + mtime)) + return False + + def config_updated_since(self, sentry_unit, filename, mtime, + sleep_time=20): + """Check if file was modified after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check the file mtime on + filename (string): The file to check mtime of + mtime (float): The epoch time to check against + sleep_time (int): Seconds to sleep before looking for process + + Returns: + bool: True if file was modified more recently than mtime, False if + file was modified before mtime, + """ + self.log.debug('Checking %s updated since %s' % (filename, mtime)) + time.sleep(sleep_time) + file_mtime = self._get_file_mtime(sentry_unit, filename) + if file_mtime >= mtime: + self.log.debug('File mtime is newer than provided mtime ' + '(%s >= %s)' % (file_mtime, mtime)) + return True + else: + self.log.warn('File mtime %s is older than provided mtime %s' + % (file_mtime, mtime)) + return False + + def validate_service_config_changed(self, sentry_unit, mtime, service, + filename, pgrep_full=False, + sleep_time=20, retry_count=2): + """Check service and file were updated after mtime + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + filename (string): The file to check mtime of + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Typical Usage: + u = OpenStackAmuletUtils(ERROR) + ... + mtime = u.get_sentry_time(self.cinder_sentry) + self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) + if not u.validate_service_config_changed(self.cinder_sentry, + mtime, + 'cinder-api', + '/etc/cinder/cinder.conf') + amulet.raise_status(amulet.FAIL, msg='update failed') + Returns: + bool: True if both service and file where updated/restarted after + mtime, False if service is older than mtime or if service was + not found or if filename was modified before mtime. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + service_restart = self.service_restarted_since(sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=0, + retry_count=retry_count) + config_update = self.config_updated_since(sentry_unit, filename, mtime, + sleep_time=0) + return service_restart and config_update + + def get_sentry_time(self, sentry_unit): + """Return current epoch time on a sentry""" + cmd = "date +'%s'" + return float(sentry_unit.run(cmd)[0]) + def relation_error(self, name, data): return 'unexpected relation data in {} - {}'.format(name, data) From 643276a3cea89184fcb18891413911d62f31a139 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Fri, 20 Feb 2015 10:10:05 +1000 Subject: [PATCH 0641/2699] [bradm] Handle case of empty nagios_servicegroups setting --- .../charmhelpers/contrib/charmsupport/nrpe.py | 2 +- .../charmhelpers/contrib/amulet/utils.py | 124 +++++++++++++++++- 2 files changed, 123 insertions(+), 3 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 8229f6b5..9d961cfb 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -210,7 +210,7 @@ def __init__(self, hostname=None): super(NRPE, self).__init__() self.config = config() self.nagios_context = self.config['nagios_context'] - if 'nagios_servicegroups' in self.config: + if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: self.nagios_servicegroups = self.config['nagios_servicegroups'] else: self.nagios_servicegroups = self.nagios_context diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index 3464b873..65219d33 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -169,8 +169,13 @@ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): cmd = 'pgrep -o -f {}'.format(service) else: cmd = 'pgrep -o {}'.format(service) - proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) - return self._get_dir_mtime(sentry_unit, proc_dir) + cmd = cmd + ' | grep -v pgrep || exit 0' + cmd_out = sentry_unit.run(cmd) + self.log.debug('CMDout: ' + str(cmd_out)) + if cmd_out[0]: + self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) + proc_dir = '/proc/{}'.format(cmd_out[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) def service_restarted(self, sentry_unit, service, filename, pgrep_full=False, sleep_time=20): @@ -187,6 +192,121 @@ def service_restarted(self, sentry_unit, service, filename, else: return False + def service_restarted_since(self, sentry_unit, mtime, service, + pgrep_full=False, sleep_time=20, + retry_count=2): + """Check if service was been started after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Returns: + bool: True if service found and its start time it newer than mtime, + False if service is older than mtime or if service was + not found. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + while retry_count > 0 and not proc_start_time: + self.log.debug('No pid file found for service %s, will retry %i ' + 'more times' % (service, retry_count)) + time.sleep(30) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + retry_count = retry_count - 1 + + if not proc_start_time: + self.log.warn('No proc start time found, assuming service did ' + 'not start') + return False + if proc_start_time >= mtime: + self.log.debug('proc start time is newer than provided mtime' + '(%s >= %s)' % (proc_start_time, mtime)) + return True + else: + self.log.warn('proc start time (%s) is older than provided mtime ' + '(%s), service did not restart' % (proc_start_time, + mtime)) + return False + + def config_updated_since(self, sentry_unit, filename, mtime, + sleep_time=20): + """Check if file was modified after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check the file mtime on + filename (string): The file to check mtime of + mtime (float): The epoch time to check against + sleep_time (int): Seconds to sleep before looking for process + + Returns: + bool: True if file was modified more recently than mtime, False if + file was modified before mtime, + """ + self.log.debug('Checking %s updated since %s' % (filename, mtime)) + time.sleep(sleep_time) + file_mtime = self._get_file_mtime(sentry_unit, filename) + if file_mtime >= mtime: + self.log.debug('File mtime is newer than provided mtime ' + '(%s >= %s)' % (file_mtime, mtime)) + return True + else: + self.log.warn('File mtime %s is older than provided mtime %s' + % (file_mtime, mtime)) + return False + + def validate_service_config_changed(self, sentry_unit, mtime, service, + filename, pgrep_full=False, + sleep_time=20, retry_count=2): + """Check service and file were updated after mtime + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + filename (string): The file to check mtime of + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Typical Usage: + u = OpenStackAmuletUtils(ERROR) + ... + mtime = u.get_sentry_time(self.cinder_sentry) + self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) + if not u.validate_service_config_changed(self.cinder_sentry, + mtime, + 'cinder-api', + '/etc/cinder/cinder.conf') + amulet.raise_status(amulet.FAIL, msg='update failed') + Returns: + bool: True if both service and file where updated/restarted after + mtime, False if service is older than mtime or if service was + not found or if filename was modified before mtime. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + service_restart = self.service_restarted_since(sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=0, + retry_count=retry_count) + config_update = self.config_updated_since(sentry_unit, filename, mtime, + sleep_time=0) + return service_restart and config_update + + def get_sentry_time(self, sentry_unit): + """Return current epoch time on a sentry""" + cmd = "date +'%s'" + return float(sentry_unit.run(cmd)[0]) + def relation_error(self, name, data): return 'unexpected relation data in {} - {}'.format(name, data) From 256da94996234abc20ec71ef25f9e1026711133c Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 24 Feb 2015 10:59:26 +0000 Subject: [PATCH 0642/2699] [trivial] charmhelpers sync --- .../charmhelpers/contrib/charmsupport/nrpe.py | 48 +- ceph-proxy/hooks/charmhelpers/core/fstab.py | 8 +- ceph-proxy/hooks/charmhelpers/core/host.py | 10 +- .../hooks/charmhelpers/core/strutils.py | 42 ++ ceph-proxy/hooks/charmhelpers/core/sysctl.py | 20 +- .../hooks/charmhelpers/core/templating.py | 6 +- .../hooks/charmhelpers/core/unitdata.py | 477 ++++++++++++++++++ .../hooks/charmhelpers/fetch/archiveurl.py | 20 +- ceph-proxy/hooks/charmhelpers/fetch/giturl.py | 2 +- .../charmhelpers/contrib/amulet/utils.py | 124 ++++- .../contrib/openstack/amulet/deployment.py | 7 +- 11 files changed, 723 insertions(+), 41 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/core/strutils.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/unitdata.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 0fd0a9d8..9d961cfb 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -24,6 +24,8 @@ import pwd import grp import os +import glob +import shutil import re import shlex import yaml @@ -161,7 +163,7 @@ def _locate_cmd(self, check_cmd): log('Check command not found: {}'.format(parts[0])) return '' - def write(self, nagios_context, hostname, nagios_servicegroups=None): + def write(self, nagios_context, hostname, nagios_servicegroups): nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( self.command) with open(nrpe_check_file, 'w') as nrpe_check_config: @@ -177,14 +179,11 @@ def write(self, nagios_context, hostname, nagios_servicegroups=None): nagios_servicegroups) def write_service_config(self, nagios_context, hostname, - nagios_servicegroups=None): + nagios_servicegroups): for f in os.listdir(NRPE.nagios_exportdir): if re.search('.*{}.cfg'.format(self.command), f): os.remove(os.path.join(NRPE.nagios_exportdir, f)) - if not nagios_servicegroups: - nagios_servicegroups = nagios_context - templ_vars = { 'nagios_hostname': hostname, 'nagios_servicegroup': nagios_servicegroups, @@ -211,10 +210,10 @@ def __init__(self, hostname=None): super(NRPE, self).__init__() self.config = config() self.nagios_context = self.config['nagios_context'] - if 'nagios_servicegroups' in self.config: + if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: self.nagios_servicegroups = self.config['nagios_servicegroups'] else: - self.nagios_servicegroups = 'juju' + self.nagios_servicegroups = self.nagios_context self.unit_name = local_unit().replace('/', '-') if hostname: self.hostname = hostname @@ -322,3 +321,38 @@ def add_init_service_checks(nrpe, services, unit_name): check_cmd='check_status_file.py -f ' '/var/lib/nagios/service-check-%s.txt' % svc, ) + + +def copy_nrpe_checks(): + """ + Copy the nrpe checks into place + + """ + NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' + nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', + 'charmhelpers', 'contrib', 'openstack', + 'files') + + if not os.path.exists(NAGIOS_PLUGINS): + os.makedirs(NAGIOS_PLUGINS) + for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): + if os.path.isfile(fname): + shutil.copy2(fname, + os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) + + +def add_haproxy_checks(nrpe, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param str unit_name: Unit name to use in check description + """ + nrpe.add_check( + shortname='haproxy_servers', + description='Check HAProxy {%s}' % unit_name, + check_cmd='check_haproxy.sh') + nrpe.add_check( + shortname='haproxy_queue', + description='Check HAProxy queue depth {%s}' % unit_name, + check_cmd='check_haproxy_queue_depth.sh') diff --git a/ceph-proxy/hooks/charmhelpers/core/fstab.py b/ceph-proxy/hooks/charmhelpers/core/fstab.py index be7de248..3056fbac 100644 --- a/ceph-proxy/hooks/charmhelpers/core/fstab.py +++ b/ceph-proxy/hooks/charmhelpers/core/fstab.py @@ -17,11 +17,11 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import io import os +__author__ = 'Jorge Niedbalski R. ' + class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer @@ -77,7 +77,7 @@ def entries(self): for line in self.readlines(): line = line.decode('us-ascii') try: - if line.strip() and not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -104,7 +104,7 @@ def remove_entry(self, entry): found = False for index, line in enumerate(lines): - if not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): if self._hydrate_entry(line) == entry: found = True break diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index cf2cbe14..b771c611 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -191,11 +191,11 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a string""" + """Create or overwrite a file with the contents of a byte string.""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'w') as target: + with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) target.write(content) @@ -305,11 +305,11 @@ def ceph_client_changed(): ceph_client_changed function. """ def wrap(f): - def wrapped_f(*args): + def wrapped_f(*args, **kwargs): checksums = {} for path in restart_map: checksums[path] = file_hash(path) - f(*args) + f(*args, **kwargs) restarts = [] for path in restart_map: if checksums[path] != file_hash(path): @@ -361,7 +361,7 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) if matched: interface = matched.groups()[0] else: diff --git a/ceph-proxy/hooks/charmhelpers/core/strutils.py b/ceph-proxy/hooks/charmhelpers/core/strutils.py new file mode 100644 index 00000000..efc4402e --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/strutils.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't']: + return True + elif value in ['n', 'no', 'false', 'f']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) diff --git a/ceph-proxy/hooks/charmhelpers/core/sysctl.py b/ceph-proxy/hooks/charmhelpers/core/sysctl.py index d642a371..21cc8ab2 100644 --- a/ceph-proxy/hooks/charmhelpers/core/sysctl.py +++ b/ceph-proxy/hooks/charmhelpers/core/sysctl.py @@ -17,8 +17,6 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import yaml from subprocess import check_call @@ -26,25 +24,33 @@ from charmhelpers.core.hookenv import ( log, DEBUG, + ERROR, ) +__author__ = 'Jorge Niedbalski R. ' + def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } - :type sysctl_dict: dict + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - sysctl_dict = yaml.load(sysctl_dict) + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict.items(): + for key, value in sysctl_dict_parsed.items(): fd.write("{}={}\n".format(key, value)) - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), level=DEBUG) check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-proxy/hooks/charmhelpers/core/templating.py b/ceph-proxy/hooks/charmhelpers/core/templating.py index 97669092..45319998 100644 --- a/ceph-proxy/hooks/charmhelpers/core/templating.py +++ b/ceph-proxy/hooks/charmhelpers/core/templating.py @@ -21,7 +21,7 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None): + perms=0o444, templates_dir=None, encoding='UTF-8'): """ Render a template. @@ -64,5 +64,5 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group) - host.write_file(target, content, owner, group, perms) + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/ceph-proxy/hooks/charmhelpers/core/unitdata.py b/ceph-proxy/hooks/charmhelpers/core/unitdata.py new file mode 100644 index 00000000..3000134a --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/unitdata.py @@ -0,0 +1,477 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . +# +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are automatically committed at hook exit. That's + currently regardless of exit code. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def _scoped_query(self, stmt, params=None): + if params is None: + params = [] + return stmt, params + + def get(self, key, default=None, record=False): + self.cursor.execute( + *self._scoped_query( + 'select data from kv where key=?', [key])) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + stmt = "select key, data from kv where key like '%s%%'" % key_prefix + self.cursor.execute(*self._scoped_query(stmt)) + result = self.cursor.fetchall() + + if not result: + return None + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + serialized = json.dumps(value) + + self.cursor.execute( + 'select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', data['env']) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py index d25a0ddd..8dfce505 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -18,6 +18,16 @@ import hashlib import re +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir, check_hash + import six if six.PY3: from urllib.request import ( @@ -35,16 +45,6 @@ ) from urlparse import urlparse, urlunparse, parse_qs -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.payload.archive import ( - get_archive_handler, - extract, -) -from charmhelpers.core.host import mkdir, check_hash - def splituser(host): '''urllib.splituser(), but six's support of this seems broken''' diff --git a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py index 5376786b..93aae87b 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py @@ -32,7 +32,7 @@ apt_install("python-git") from git import Repo -from git.exc import GitCommandError +from git.exc import GitCommandError # noqa E402 class GitUrlFetchHandler(BaseFetchHandler): diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index 3464b873..65219d33 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -169,8 +169,13 @@ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): cmd = 'pgrep -o -f {}'.format(service) else: cmd = 'pgrep -o {}'.format(service) - proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) - return self._get_dir_mtime(sentry_unit, proc_dir) + cmd = cmd + ' | grep -v pgrep || exit 0' + cmd_out = sentry_unit.run(cmd) + self.log.debug('CMDout: ' + str(cmd_out)) + if cmd_out[0]: + self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) + proc_dir = '/proc/{}'.format(cmd_out[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) def service_restarted(self, sentry_unit, service, filename, pgrep_full=False, sleep_time=20): @@ -187,6 +192,121 @@ def service_restarted(self, sentry_unit, service, filename, else: return False + def service_restarted_since(self, sentry_unit, mtime, service, + pgrep_full=False, sleep_time=20, + retry_count=2): + """Check if service was been started after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Returns: + bool: True if service found and its start time it newer than mtime, + False if service is older than mtime or if service was + not found. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + while retry_count > 0 and not proc_start_time: + self.log.debug('No pid file found for service %s, will retry %i ' + 'more times' % (service, retry_count)) + time.sleep(30) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + retry_count = retry_count - 1 + + if not proc_start_time: + self.log.warn('No proc start time found, assuming service did ' + 'not start') + return False + if proc_start_time >= mtime: + self.log.debug('proc start time is newer than provided mtime' + '(%s >= %s)' % (proc_start_time, mtime)) + return True + else: + self.log.warn('proc start time (%s) is older than provided mtime ' + '(%s), service did not restart' % (proc_start_time, + mtime)) + return False + + def config_updated_since(self, sentry_unit, filename, mtime, + sleep_time=20): + """Check if file was modified after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check the file mtime on + filename (string): The file to check mtime of + mtime (float): The epoch time to check against + sleep_time (int): Seconds to sleep before looking for process + + Returns: + bool: True if file was modified more recently than mtime, False if + file was modified before mtime, + """ + self.log.debug('Checking %s updated since %s' % (filename, mtime)) + time.sleep(sleep_time) + file_mtime = self._get_file_mtime(sentry_unit, filename) + if file_mtime >= mtime: + self.log.debug('File mtime is newer than provided mtime ' + '(%s >= %s)' % (file_mtime, mtime)) + return True + else: + self.log.warn('File mtime %s is older than provided mtime %s' + % (file_mtime, mtime)) + return False + + def validate_service_config_changed(self, sentry_unit, mtime, service, + filename, pgrep_full=False, + sleep_time=20, retry_count=2): + """Check service and file were updated after mtime + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + filename (string): The file to check mtime of + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Typical Usage: + u = OpenStackAmuletUtils(ERROR) + ... + mtime = u.get_sentry_time(self.cinder_sentry) + self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) + if not u.validate_service_config_changed(self.cinder_sentry, + mtime, + 'cinder-api', + '/etc/cinder/cinder.conf') + amulet.raise_status(amulet.FAIL, msg='update failed') + Returns: + bool: True if both service and file where updated/restarted after + mtime, False if service is older than mtime or if service was + not found or if filename was modified before mtime. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + service_restart = self.service_restarted_since(sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=0, + retry_count=retry_count) + config_update = self.config_updated_since(sentry_unit, filename, mtime, + sleep_time=0) + return service_restart and config_update + + def get_sentry_time(self, sentry_unit): + """Return current epoch time on a sentry""" + cmd = "date +'%s'" + return float(sentry_unit.run(cmd)[0]) + def relation_error(self, name, data): return 'unexpected relation data in {} - {}'.format(name, data) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index c50d3ec6..0cfeaa4c 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -71,16 +71,19 @@ def _add_services(self, this_service, other_services): services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] + # Openstack subordinate charms do not expose an origin option as that + # is controlled by the principle + ignore = ['neutron-openvswitch'] if self.openstack: for svc in services: - if svc['name'] not in use_source: + if svc['name'] not in use_source + ignore: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source: + if svc['name'] in use_source and svc['name'] not in ignore: config = {'source': self.source} self.d.configure(svc['name'], config) From 4f186057a5e8c8a36f5f426d4998c03634e8313d Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 24 Feb 2015 10:59:26 +0000 Subject: [PATCH 0643/2699] [trivial] charmhelpers sync --- .../charmhelpers/contrib/charmsupport/nrpe.py | 48 +- ceph-mon/hooks/charmhelpers/core/fstab.py | 8 +- ceph-mon/hooks/charmhelpers/core/host.py | 10 +- ceph-mon/hooks/charmhelpers/core/strutils.py | 42 ++ ceph-mon/hooks/charmhelpers/core/sysctl.py | 20 +- .../hooks/charmhelpers/core/templating.py | 6 +- ceph-mon/hooks/charmhelpers/core/unitdata.py | 477 ++++++++++++++++++ .../hooks/charmhelpers/fetch/archiveurl.py | 20 +- ceph-mon/hooks/charmhelpers/fetch/giturl.py | 2 +- .../charmhelpers/contrib/amulet/utils.py | 124 ++++- .../contrib/openstack/amulet/deployment.py | 7 +- 11 files changed, 723 insertions(+), 41 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/core/strutils.py create mode 100644 ceph-mon/hooks/charmhelpers/core/unitdata.py diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 0fd0a9d8..9d961cfb 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -24,6 +24,8 @@ import pwd import grp import os +import glob +import shutil import re import shlex import yaml @@ -161,7 +163,7 @@ def _locate_cmd(self, check_cmd): log('Check command not found: {}'.format(parts[0])) return '' - def write(self, nagios_context, hostname, nagios_servicegroups=None): + def write(self, nagios_context, hostname, nagios_servicegroups): nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( self.command) with open(nrpe_check_file, 'w') as nrpe_check_config: @@ -177,14 +179,11 @@ def write(self, nagios_context, hostname, nagios_servicegroups=None): nagios_servicegroups) def write_service_config(self, nagios_context, hostname, - nagios_servicegroups=None): + nagios_servicegroups): for f in os.listdir(NRPE.nagios_exportdir): if re.search('.*{}.cfg'.format(self.command), f): os.remove(os.path.join(NRPE.nagios_exportdir, f)) - if not nagios_servicegroups: - nagios_servicegroups = nagios_context - templ_vars = { 'nagios_hostname': hostname, 'nagios_servicegroup': nagios_servicegroups, @@ -211,10 +210,10 @@ def __init__(self, hostname=None): super(NRPE, self).__init__() self.config = config() self.nagios_context = self.config['nagios_context'] - if 'nagios_servicegroups' in self.config: + if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: self.nagios_servicegroups = self.config['nagios_servicegroups'] else: - self.nagios_servicegroups = 'juju' + self.nagios_servicegroups = self.nagios_context self.unit_name = local_unit().replace('/', '-') if hostname: self.hostname = hostname @@ -322,3 +321,38 @@ def add_init_service_checks(nrpe, services, unit_name): check_cmd='check_status_file.py -f ' '/var/lib/nagios/service-check-%s.txt' % svc, ) + + +def copy_nrpe_checks(): + """ + Copy the nrpe checks into place + + """ + NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' + nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', + 'charmhelpers', 'contrib', 'openstack', + 'files') + + if not os.path.exists(NAGIOS_PLUGINS): + os.makedirs(NAGIOS_PLUGINS) + for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): + if os.path.isfile(fname): + shutil.copy2(fname, + os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) + + +def add_haproxy_checks(nrpe, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param str unit_name: Unit name to use in check description + """ + nrpe.add_check( + shortname='haproxy_servers', + description='Check HAProxy {%s}' % unit_name, + check_cmd='check_haproxy.sh') + nrpe.add_check( + shortname='haproxy_queue', + description='Check HAProxy queue depth {%s}' % unit_name, + check_cmd='check_haproxy_queue_depth.sh') diff --git a/ceph-mon/hooks/charmhelpers/core/fstab.py b/ceph-mon/hooks/charmhelpers/core/fstab.py index be7de248..3056fbac 100644 --- a/ceph-mon/hooks/charmhelpers/core/fstab.py +++ b/ceph-mon/hooks/charmhelpers/core/fstab.py @@ -17,11 +17,11 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import io import os +__author__ = 'Jorge Niedbalski R. ' + class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer @@ -77,7 +77,7 @@ def entries(self): for line in self.readlines(): line = line.decode('us-ascii') try: - if line.strip() and not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -104,7 +104,7 @@ def remove_entry(self, entry): found = False for index, line in enumerate(lines): - if not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): if self._hydrate_entry(line) == entry: found = True break diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index cf2cbe14..b771c611 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -191,11 +191,11 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a string""" + """Create or overwrite a file with the contents of a byte string.""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'w') as target: + with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) target.write(content) @@ -305,11 +305,11 @@ def ceph_client_changed(): ceph_client_changed function. """ def wrap(f): - def wrapped_f(*args): + def wrapped_f(*args, **kwargs): checksums = {} for path in restart_map: checksums[path] = file_hash(path) - f(*args) + f(*args, **kwargs) restarts = [] for path in restart_map: if checksums[path] != file_hash(path): @@ -361,7 +361,7 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) if matched: interface = matched.groups()[0] else: diff --git a/ceph-mon/hooks/charmhelpers/core/strutils.py b/ceph-mon/hooks/charmhelpers/core/strutils.py new file mode 100644 index 00000000..efc4402e --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/strutils.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't']: + return True + elif value in ['n', 'no', 'false', 'f']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) diff --git a/ceph-mon/hooks/charmhelpers/core/sysctl.py b/ceph-mon/hooks/charmhelpers/core/sysctl.py index d642a371..21cc8ab2 100644 --- a/ceph-mon/hooks/charmhelpers/core/sysctl.py +++ b/ceph-mon/hooks/charmhelpers/core/sysctl.py @@ -17,8 +17,6 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import yaml from subprocess import check_call @@ -26,25 +24,33 @@ from charmhelpers.core.hookenv import ( log, DEBUG, + ERROR, ) +__author__ = 'Jorge Niedbalski R. ' + def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } - :type sysctl_dict: dict + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - sysctl_dict = yaml.load(sysctl_dict) + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict.items(): + for key, value in sysctl_dict_parsed.items(): fd.write("{}={}\n".format(key, value)) - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), level=DEBUG) check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-mon/hooks/charmhelpers/core/templating.py b/ceph-mon/hooks/charmhelpers/core/templating.py index 97669092..45319998 100644 --- a/ceph-mon/hooks/charmhelpers/core/templating.py +++ b/ceph-mon/hooks/charmhelpers/core/templating.py @@ -21,7 +21,7 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None): + perms=0o444, templates_dir=None, encoding='UTF-8'): """ Render a template. @@ -64,5 +64,5 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group) - host.write_file(target, content, owner, group, perms) + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/ceph-mon/hooks/charmhelpers/core/unitdata.py b/ceph-mon/hooks/charmhelpers/core/unitdata.py new file mode 100644 index 00000000..3000134a --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/unitdata.py @@ -0,0 +1,477 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . +# +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are automatically committed at hook exit. That's + currently regardless of exit code. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def _scoped_query(self, stmt, params=None): + if params is None: + params = [] + return stmt, params + + def get(self, key, default=None, record=False): + self.cursor.execute( + *self._scoped_query( + 'select data from kv where key=?', [key])) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + stmt = "select key, data from kv where key like '%s%%'" % key_prefix + self.cursor.execute(*self._scoped_query(stmt)) + result = self.cursor.fetchall() + + if not result: + return None + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + serialized = json.dumps(value) + + self.cursor.execute( + 'select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', data['env']) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index d25a0ddd..8dfce505 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -18,6 +18,16 @@ import hashlib import re +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir, check_hash + import six if six.PY3: from urllib.request import ( @@ -35,16 +45,6 @@ ) from urlparse import urlparse, urlunparse, parse_qs -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.payload.archive import ( - get_archive_handler, - extract, -) -from charmhelpers.core.host import mkdir, check_hash - def splituser(host): '''urllib.splituser(), but six's support of this seems broken''' diff --git a/ceph-mon/hooks/charmhelpers/fetch/giturl.py b/ceph-mon/hooks/charmhelpers/fetch/giturl.py index 5376786b..93aae87b 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/giturl.py @@ -32,7 +32,7 @@ apt_install("python-git") from git import Repo -from git.exc import GitCommandError +from git.exc import GitCommandError # noqa E402 class GitUrlFetchHandler(BaseFetchHandler): diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index 3464b873..65219d33 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -169,8 +169,13 @@ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): cmd = 'pgrep -o -f {}'.format(service) else: cmd = 'pgrep -o {}'.format(service) - proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) - return self._get_dir_mtime(sentry_unit, proc_dir) + cmd = cmd + ' | grep -v pgrep || exit 0' + cmd_out = sentry_unit.run(cmd) + self.log.debug('CMDout: ' + str(cmd_out)) + if cmd_out[0]: + self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) + proc_dir = '/proc/{}'.format(cmd_out[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) def service_restarted(self, sentry_unit, service, filename, pgrep_full=False, sleep_time=20): @@ -187,6 +192,121 @@ def service_restarted(self, sentry_unit, service, filename, else: return False + def service_restarted_since(self, sentry_unit, mtime, service, + pgrep_full=False, sleep_time=20, + retry_count=2): + """Check if service was been started after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Returns: + bool: True if service found and its start time it newer than mtime, + False if service is older than mtime or if service was + not found. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + while retry_count > 0 and not proc_start_time: + self.log.debug('No pid file found for service %s, will retry %i ' + 'more times' % (service, retry_count)) + time.sleep(30) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + retry_count = retry_count - 1 + + if not proc_start_time: + self.log.warn('No proc start time found, assuming service did ' + 'not start') + return False + if proc_start_time >= mtime: + self.log.debug('proc start time is newer than provided mtime' + '(%s >= %s)' % (proc_start_time, mtime)) + return True + else: + self.log.warn('proc start time (%s) is older than provided mtime ' + '(%s), service did not restart' % (proc_start_time, + mtime)) + return False + + def config_updated_since(self, sentry_unit, filename, mtime, + sleep_time=20): + """Check if file was modified after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check the file mtime on + filename (string): The file to check mtime of + mtime (float): The epoch time to check against + sleep_time (int): Seconds to sleep before looking for process + + Returns: + bool: True if file was modified more recently than mtime, False if + file was modified before mtime, + """ + self.log.debug('Checking %s updated since %s' % (filename, mtime)) + time.sleep(sleep_time) + file_mtime = self._get_file_mtime(sentry_unit, filename) + if file_mtime >= mtime: + self.log.debug('File mtime is newer than provided mtime ' + '(%s >= %s)' % (file_mtime, mtime)) + return True + else: + self.log.warn('File mtime %s is older than provided mtime %s' + % (file_mtime, mtime)) + return False + + def validate_service_config_changed(self, sentry_unit, mtime, service, + filename, pgrep_full=False, + sleep_time=20, retry_count=2): + """Check service and file were updated after mtime + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + filename (string): The file to check mtime of + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Typical Usage: + u = OpenStackAmuletUtils(ERROR) + ... + mtime = u.get_sentry_time(self.cinder_sentry) + self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) + if not u.validate_service_config_changed(self.cinder_sentry, + mtime, + 'cinder-api', + '/etc/cinder/cinder.conf') + amulet.raise_status(amulet.FAIL, msg='update failed') + Returns: + bool: True if both service and file where updated/restarted after + mtime, False if service is older than mtime or if service was + not found or if filename was modified before mtime. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + service_restart = self.service_restarted_since(sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=0, + retry_count=retry_count) + config_update = self.config_updated_since(sentry_unit, filename, mtime, + sleep_time=0) + return service_restart and config_update + + def get_sentry_time(self, sentry_unit): + """Return current epoch time on a sentry""" + cmd = "date +'%s'" + return float(sentry_unit.run(cmd)[0]) + def relation_error(self, name, data): return 'unexpected relation data in {} - {}'.format(name, data) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index c50d3ec6..0cfeaa4c 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -71,16 +71,19 @@ def _add_services(self, this_service, other_services): services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] + # Openstack subordinate charms do not expose an origin option as that + # is controlled by the principle + ignore = ['neutron-openvswitch'] if self.openstack: for svc in services: - if svc['name'] not in use_source: + if svc['name'] not in use_source + ignore: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source: + if svc['name'] in use_source and svc['name'] not in ignore: config = {'source': self.source} self.d.configure(svc['name'], config) From 0ba9587b0878367a5f3ccc1230adc9023dc3f25f Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 24 Feb 2015 11:00:08 +0000 Subject: [PATCH 0644/2699] [trivial] charmhelpers sync --- .../charmhelpers/contrib/charmsupport/nrpe.py | 48 +- ceph-osd/hooks/charmhelpers/core/fstab.py | 8 +- ceph-osd/hooks/charmhelpers/core/host.py | 10 +- ceph-osd/hooks/charmhelpers/core/strutils.py | 42 ++ ceph-osd/hooks/charmhelpers/core/sysctl.py | 20 +- .../hooks/charmhelpers/core/templating.py | 6 +- ceph-osd/hooks/charmhelpers/core/unitdata.py | 477 ++++++++++++++++++ .../hooks/charmhelpers/fetch/archiveurl.py | 20 +- ceph-osd/hooks/charmhelpers/fetch/giturl.py | 2 +- .../charmhelpers/contrib/amulet/utils.py | 124 ++++- .../contrib/openstack/amulet/deployment.py | 7 +- 11 files changed, 723 insertions(+), 41 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/core/strutils.py create mode 100644 ceph-osd/hooks/charmhelpers/core/unitdata.py diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 0fd0a9d8..9d961cfb 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -24,6 +24,8 @@ import pwd import grp import os +import glob +import shutil import re import shlex import yaml @@ -161,7 +163,7 @@ def _locate_cmd(self, check_cmd): log('Check command not found: {}'.format(parts[0])) return '' - def write(self, nagios_context, hostname, nagios_servicegroups=None): + def write(self, nagios_context, hostname, nagios_servicegroups): nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( self.command) with open(nrpe_check_file, 'w') as nrpe_check_config: @@ -177,14 +179,11 @@ def write(self, nagios_context, hostname, nagios_servicegroups=None): nagios_servicegroups) def write_service_config(self, nagios_context, hostname, - nagios_servicegroups=None): + nagios_servicegroups): for f in os.listdir(NRPE.nagios_exportdir): if re.search('.*{}.cfg'.format(self.command), f): os.remove(os.path.join(NRPE.nagios_exportdir, f)) - if not nagios_servicegroups: - nagios_servicegroups = nagios_context - templ_vars = { 'nagios_hostname': hostname, 'nagios_servicegroup': nagios_servicegroups, @@ -211,10 +210,10 @@ def __init__(self, hostname=None): super(NRPE, self).__init__() self.config = config() self.nagios_context = self.config['nagios_context'] - if 'nagios_servicegroups' in self.config: + if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: self.nagios_servicegroups = self.config['nagios_servicegroups'] else: - self.nagios_servicegroups = 'juju' + self.nagios_servicegroups = self.nagios_context self.unit_name = local_unit().replace('/', '-') if hostname: self.hostname = hostname @@ -322,3 +321,38 @@ def add_init_service_checks(nrpe, services, unit_name): check_cmd='check_status_file.py -f ' '/var/lib/nagios/service-check-%s.txt' % svc, ) + + +def copy_nrpe_checks(): + """ + Copy the nrpe checks into place + + """ + NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' + nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', + 'charmhelpers', 'contrib', 'openstack', + 'files') + + if not os.path.exists(NAGIOS_PLUGINS): + os.makedirs(NAGIOS_PLUGINS) + for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): + if os.path.isfile(fname): + shutil.copy2(fname, + os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) + + +def add_haproxy_checks(nrpe, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param str unit_name: Unit name to use in check description + """ + nrpe.add_check( + shortname='haproxy_servers', + description='Check HAProxy {%s}' % unit_name, + check_cmd='check_haproxy.sh') + nrpe.add_check( + shortname='haproxy_queue', + description='Check HAProxy queue depth {%s}' % unit_name, + check_cmd='check_haproxy_queue_depth.sh') diff --git a/ceph-osd/hooks/charmhelpers/core/fstab.py b/ceph-osd/hooks/charmhelpers/core/fstab.py index be7de248..3056fbac 100644 --- a/ceph-osd/hooks/charmhelpers/core/fstab.py +++ b/ceph-osd/hooks/charmhelpers/core/fstab.py @@ -17,11 +17,11 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import io import os +__author__ = 'Jorge Niedbalski R. ' + class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer @@ -77,7 +77,7 @@ def entries(self): for line in self.readlines(): line = line.decode('us-ascii') try: - if line.strip() and not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -104,7 +104,7 @@ def remove_entry(self, entry): found = False for index, line in enumerate(lines): - if not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): if self._hydrate_entry(line) == entry: found = True break diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index cf2cbe14..b771c611 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -191,11 +191,11 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a string""" + """Create or overwrite a file with the contents of a byte string.""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'w') as target: + with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) target.write(content) @@ -305,11 +305,11 @@ def ceph_client_changed(): ceph_client_changed function. """ def wrap(f): - def wrapped_f(*args): + def wrapped_f(*args, **kwargs): checksums = {} for path in restart_map: checksums[path] = file_hash(path) - f(*args) + f(*args, **kwargs) restarts = [] for path in restart_map: if checksums[path] != file_hash(path): @@ -361,7 +361,7 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) if matched: interface = matched.groups()[0] else: diff --git a/ceph-osd/hooks/charmhelpers/core/strutils.py b/ceph-osd/hooks/charmhelpers/core/strutils.py new file mode 100644 index 00000000..efc4402e --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/strutils.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't']: + return True + elif value in ['n', 'no', 'false', 'f']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) diff --git a/ceph-osd/hooks/charmhelpers/core/sysctl.py b/ceph-osd/hooks/charmhelpers/core/sysctl.py index d642a371..21cc8ab2 100644 --- a/ceph-osd/hooks/charmhelpers/core/sysctl.py +++ b/ceph-osd/hooks/charmhelpers/core/sysctl.py @@ -17,8 +17,6 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import yaml from subprocess import check_call @@ -26,25 +24,33 @@ from charmhelpers.core.hookenv import ( log, DEBUG, + ERROR, ) +__author__ = 'Jorge Niedbalski R. ' + def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } - :type sysctl_dict: dict + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - sysctl_dict = yaml.load(sysctl_dict) + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict.items(): + for key, value in sysctl_dict_parsed.items(): fd.write("{}={}\n".format(key, value)) - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), level=DEBUG) check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-osd/hooks/charmhelpers/core/templating.py b/ceph-osd/hooks/charmhelpers/core/templating.py index 97669092..45319998 100644 --- a/ceph-osd/hooks/charmhelpers/core/templating.py +++ b/ceph-osd/hooks/charmhelpers/core/templating.py @@ -21,7 +21,7 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None): + perms=0o444, templates_dir=None, encoding='UTF-8'): """ Render a template. @@ -64,5 +64,5 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group) - host.write_file(target, content, owner, group, perms) + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/ceph-osd/hooks/charmhelpers/core/unitdata.py b/ceph-osd/hooks/charmhelpers/core/unitdata.py new file mode 100644 index 00000000..3000134a --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/unitdata.py @@ -0,0 +1,477 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . +# +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are automatically committed at hook exit. That's + currently regardless of exit code. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def _scoped_query(self, stmt, params=None): + if params is None: + params = [] + return stmt, params + + def get(self, key, default=None, record=False): + self.cursor.execute( + *self._scoped_query( + 'select data from kv where key=?', [key])) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + stmt = "select key, data from kv where key like '%s%%'" % key_prefix + self.cursor.execute(*self._scoped_query(stmt)) + result = self.cursor.fetchall() + + if not result: + return None + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + serialized = json.dumps(value) + + self.cursor.execute( + 'select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', data['env']) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index d25a0ddd..8dfce505 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -18,6 +18,16 @@ import hashlib import re +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir, check_hash + import six if six.PY3: from urllib.request import ( @@ -35,16 +45,6 @@ ) from urlparse import urlparse, urlunparse, parse_qs -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.payload.archive import ( - get_archive_handler, - extract, -) -from charmhelpers.core.host import mkdir, check_hash - def splituser(host): '''urllib.splituser(), but six's support of this seems broken''' diff --git a/ceph-osd/hooks/charmhelpers/fetch/giturl.py b/ceph-osd/hooks/charmhelpers/fetch/giturl.py index 5376786b..93aae87b 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/giturl.py @@ -32,7 +32,7 @@ apt_install("python-git") from git import Repo -from git.exc import GitCommandError +from git.exc import GitCommandError # noqa E402 class GitUrlFetchHandler(BaseFetchHandler): diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index 3464b873..65219d33 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -169,8 +169,13 @@ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): cmd = 'pgrep -o -f {}'.format(service) else: cmd = 'pgrep -o {}'.format(service) - proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) - return self._get_dir_mtime(sentry_unit, proc_dir) + cmd = cmd + ' | grep -v pgrep || exit 0' + cmd_out = sentry_unit.run(cmd) + self.log.debug('CMDout: ' + str(cmd_out)) + if cmd_out[0]: + self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) + proc_dir = '/proc/{}'.format(cmd_out[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) def service_restarted(self, sentry_unit, service, filename, pgrep_full=False, sleep_time=20): @@ -187,6 +192,121 @@ def service_restarted(self, sentry_unit, service, filename, else: return False + def service_restarted_since(self, sentry_unit, mtime, service, + pgrep_full=False, sleep_time=20, + retry_count=2): + """Check if service was been started after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Returns: + bool: True if service found and its start time it newer than mtime, + False if service is older than mtime or if service was + not found. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + while retry_count > 0 and not proc_start_time: + self.log.debug('No pid file found for service %s, will retry %i ' + 'more times' % (service, retry_count)) + time.sleep(30) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + retry_count = retry_count - 1 + + if not proc_start_time: + self.log.warn('No proc start time found, assuming service did ' + 'not start') + return False + if proc_start_time >= mtime: + self.log.debug('proc start time is newer than provided mtime' + '(%s >= %s)' % (proc_start_time, mtime)) + return True + else: + self.log.warn('proc start time (%s) is older than provided mtime ' + '(%s), service did not restart' % (proc_start_time, + mtime)) + return False + + def config_updated_since(self, sentry_unit, filename, mtime, + sleep_time=20): + """Check if file was modified after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check the file mtime on + filename (string): The file to check mtime of + mtime (float): The epoch time to check against + sleep_time (int): Seconds to sleep before looking for process + + Returns: + bool: True if file was modified more recently than mtime, False if + file was modified before mtime, + """ + self.log.debug('Checking %s updated since %s' % (filename, mtime)) + time.sleep(sleep_time) + file_mtime = self._get_file_mtime(sentry_unit, filename) + if file_mtime >= mtime: + self.log.debug('File mtime is newer than provided mtime ' + '(%s >= %s)' % (file_mtime, mtime)) + return True + else: + self.log.warn('File mtime %s is older than provided mtime %s' + % (file_mtime, mtime)) + return False + + def validate_service_config_changed(self, sentry_unit, mtime, service, + filename, pgrep_full=False, + sleep_time=20, retry_count=2): + """Check service and file were updated after mtime + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + filename (string): The file to check mtime of + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Typical Usage: + u = OpenStackAmuletUtils(ERROR) + ... + mtime = u.get_sentry_time(self.cinder_sentry) + self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) + if not u.validate_service_config_changed(self.cinder_sentry, + mtime, + 'cinder-api', + '/etc/cinder/cinder.conf') + amulet.raise_status(amulet.FAIL, msg='update failed') + Returns: + bool: True if both service and file where updated/restarted after + mtime, False if service is older than mtime or if service was + not found or if filename was modified before mtime. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + service_restart = self.service_restarted_since(sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=0, + retry_count=retry_count) + config_update = self.config_updated_since(sentry_unit, filename, mtime, + sleep_time=0) + return service_restart and config_update + + def get_sentry_time(self, sentry_unit): + """Return current epoch time on a sentry""" + cmd = "date +'%s'" + return float(sentry_unit.run(cmd)[0]) + def relation_error(self, name, data): return 'unexpected relation data in {} - {}'.format(name, data) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index c50d3ec6..0cfeaa4c 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -71,16 +71,19 @@ def _add_services(self, this_service, other_services): services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] + # Openstack subordinate charms do not expose an origin option as that + # is controlled by the principle + ignore = ['neutron-openvswitch'] if self.openstack: for svc in services: - if svc['name'] not in use_source: + if svc['name'] not in use_source + ignore: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source: + if svc['name'] in use_source and svc['name'] not in ignore: config = {'source': self.source} self.d.configure(svc['name'], config) From bc733052a52d4b1cab49f9bb0c058ca47b5d9862 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 24 Feb 2015 11:02:02 +0000 Subject: [PATCH 0645/2699] [trivial] charmhelpers sync --- .../charmhelpers/contrib/hahelpers/cluster.py | 6 +- .../contrib/openstack/amulet/deployment.py | 7 +- .../contrib/openstack/files/__init__.py | 18 + .../contrib/openstack/files/check_haproxy.sh | 32 ++ .../files/check_haproxy_queue_depth.sh | 30 ++ .../charmhelpers/contrib/openstack/ip.py | 37 ++ .../charmhelpers/contrib/openstack/utils.py | 1 + .../charmhelpers/contrib/python/packages.py | 4 +- ceph-radosgw/hooks/charmhelpers/core/fstab.py | 8 +- ceph-radosgw/hooks/charmhelpers/core/host.py | 10 +- .../hooks/charmhelpers/core/strutils.py | 42 ++ .../hooks/charmhelpers/core/sysctl.py | 20 +- .../hooks/charmhelpers/core/templating.py | 6 +- .../hooks/charmhelpers/core/unitdata.py | 477 ++++++++++++++++++ .../hooks/charmhelpers/fetch/archiveurl.py | 20 +- .../hooks/charmhelpers/fetch/giturl.py | 2 +- .../charmhelpers/contrib/amulet/utils.py | 124 ++++- .../contrib/openstack/amulet/deployment.py | 7 +- 18 files changed, 812 insertions(+), 39 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/__init__.py create mode 100755 ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh create mode 100755 ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh create mode 100644 ceph-radosgw/hooks/charmhelpers/core/strutils.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/unitdata.py diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index 9a2588b6..9333efc3 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -48,6 +48,9 @@ from charmhelpers.core.decorators import ( retry_on_exception, ) +from charmhelpers.core.strutils import ( + bool_from_string, +) class HAIncompleteConfig(Exception): @@ -164,7 +167,8 @@ def https(): . returns: boolean ''' - if config_get('use-https') == "yes": + use_https = config_get('use-https') + if use_https and bool_from_string(use_https): return True if config_get('ssl_cert') and config_get('ssl_key'): return True diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index c50d3ec6..0cfeaa4c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -71,16 +71,19 @@ def _add_services(self, this_service, other_services): services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] + # Openstack subordinate charms do not expose an origin option as that + # is controlled by the principle + ignore = ['neutron-openvswitch'] if self.openstack: for svc in services: - if svc['name'] not in use_source: + if svc['name'] not in use_source + ignore: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source: + if svc['name'] in use_source and svc['name'] not in ignore: config = {'source': self.source} self.d.configure(svc['name'], config) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/__init__.py new file mode 100644 index 00000000..75876796 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh new file mode 100755 index 00000000..eb8527f5 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh @@ -0,0 +1,32 @@ +#!/bin/bash +#-------------------------------------------- +# This file is managed by Juju +#-------------------------------------------- +# +# Copyright 2009,2012 Canonical Ltd. +# Author: Tom Haddon + +CRITICAL=0 +NOTACTIVE='' +LOGFILE=/var/log/nagios/check_haproxy.log +AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') + +for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'}); +do + output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK') + if [ $? != 0 ]; then + date >> $LOGFILE + echo $output >> $LOGFILE + /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1 + CRITICAL=1 + NOTACTIVE="${NOTACTIVE} $appserver" + fi +done + +if [ $CRITICAL = 1 ]; then + echo "CRITICAL:${NOTACTIVE}" + exit 2 +fi + +echo "OK: All haproxy instances looking good" +exit 0 diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh new file mode 100755 index 00000000..3ebb5329 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh @@ -0,0 +1,30 @@ +#!/bin/bash +#-------------------------------------------- +# This file is managed by Juju +#-------------------------------------------- +# +# Copyright 2009,2012 Canonical Ltd. +# Author: Tom Haddon + +# These should be config options at some stage +CURRQthrsh=0 +MAXQthrsh=100 + +AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') + +HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v) + +for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}') +do + CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3) + MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4) + + if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then + echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ" + exit 2 + fi +done + +echo "OK: All haproxy queue depths looking good" +exit 0 + diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index 9eabed73..29bbddcb 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -26,6 +26,8 @@ ) from charmhelpers.contrib.hahelpers.cluster import is_clustered +from functools import partial + PUBLIC = 'public' INTERNAL = 'int' ADMIN = 'admin' @@ -107,3 +109,38 @@ def resolve_address(endpoint_type=PUBLIC): "clustered=%s)" % (net_type, clustered)) return resolved_address + + +def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC, + override=None): + """Returns the correct endpoint URL to advertise to Keystone. + + This method provides the correct endpoint URL which should be advertised to + the keystone charm for endpoint creation. This method allows for the url to + be overridden to force a keystone endpoint to have specific URL for any of + the defined scopes (admin, internal, public). + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :param url_template: str format string for creating the url template. Only + two values will be passed - the scheme+hostname + returned by the canonical_url and the port. + :param endpoint_type: str endpoint type to resolve. + :param override: str the name of the config option which overrides the + endpoint URL defined by the charm itself. None will + disable any overrides (default). + """ + if override: + # Return any user-defined overrides for the keystone endpoint URL. + user_value = config(override) + if user_value: + return user_value.strip() + + return url_template % (canonical_url(configs, endpoint_type), port) + + +public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC) + +internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL) + +admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 26259a03..af2b3596 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -103,6 +103,7 @@ ('2.1.0', 'juno'), ('2.2.0', 'juno'), ('2.2.1', 'kilo'), + ('2.2.2', 'kilo'), ]) DEFAULT_LOOPBACK_SIZE = '5G' diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py index d848a120..8659516b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py @@ -17,8 +17,6 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = "Jorge Niedbalski " - from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import log @@ -29,6 +27,8 @@ apt_install('python-pip') from pip import main as pip_execute +__author__ = "Jorge Niedbalski " + def parse_options(given, available): """Given a set of options, check if available""" diff --git a/ceph-radosgw/hooks/charmhelpers/core/fstab.py b/ceph-radosgw/hooks/charmhelpers/core/fstab.py index be7de248..3056fbac 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/fstab.py +++ b/ceph-radosgw/hooks/charmhelpers/core/fstab.py @@ -17,11 +17,11 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import io import os +__author__ = 'Jorge Niedbalski R. ' + class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer @@ -77,7 +77,7 @@ def entries(self): for line in self.readlines(): line = line.decode('us-ascii') try: - if line.strip() and not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -104,7 +104,7 @@ def remove_entry(self, entry): found = False for index, line in enumerate(lines): - if not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): if self._hydrate_entry(line) == entry: found = True break diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index cf2cbe14..b771c611 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -191,11 +191,11 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a string""" + """Create or overwrite a file with the contents of a byte string.""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'w') as target: + with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) target.write(content) @@ -305,11 +305,11 @@ def ceph_client_changed(): ceph_client_changed function. """ def wrap(f): - def wrapped_f(*args): + def wrapped_f(*args, **kwargs): checksums = {} for path in restart_map: checksums[path] = file_hash(path) - f(*args) + f(*args, **kwargs) restarts = [] for path in restart_map: if checksums[path] != file_hash(path): @@ -361,7 +361,7 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) if matched: interface = matched.groups()[0] else: diff --git a/ceph-radosgw/hooks/charmhelpers/core/strutils.py b/ceph-radosgw/hooks/charmhelpers/core/strutils.py new file mode 100644 index 00000000..efc4402e --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/strutils.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't']: + return True + elif value in ['n', 'no', 'false', 'f']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) diff --git a/ceph-radosgw/hooks/charmhelpers/core/sysctl.py b/ceph-radosgw/hooks/charmhelpers/core/sysctl.py index d642a371..21cc8ab2 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/sysctl.py +++ b/ceph-radosgw/hooks/charmhelpers/core/sysctl.py @@ -17,8 +17,6 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import yaml from subprocess import check_call @@ -26,25 +24,33 @@ from charmhelpers.core.hookenv import ( log, DEBUG, + ERROR, ) +__author__ = 'Jorge Niedbalski R. ' + def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } - :type sysctl_dict: dict + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - sysctl_dict = yaml.load(sysctl_dict) + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict.items(): + for key, value in sysctl_dict_parsed.items(): fd.write("{}={}\n".format(key, value)) - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), level=DEBUG) check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-radosgw/hooks/charmhelpers/core/templating.py b/ceph-radosgw/hooks/charmhelpers/core/templating.py index 97669092..45319998 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/core/templating.py @@ -21,7 +21,7 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None): + perms=0o444, templates_dir=None, encoding='UTF-8'): """ Render a template. @@ -64,5 +64,5 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group) - host.write_file(target, content, owner, group, perms) + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py new file mode 100644 index 00000000..3000134a --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py @@ -0,0 +1,477 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . +# +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are automatically committed at hook exit. That's + currently regardless of exit code. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def _scoped_query(self, stmt, params=None): + if params is None: + params = [] + return stmt, params + + def get(self, key, default=None, record=False): + self.cursor.execute( + *self._scoped_query( + 'select data from kv where key=?', [key])) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + stmt = "select key, data from kv where key like '%s%%'" % key_prefix + self.cursor.execute(*self._scoped_query(stmt)) + result = self.cursor.fetchall() + + if not result: + return None + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + serialized = json.dumps(value) + + self.cursor.execute( + 'select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', data['env']) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py index d25a0ddd..8dfce505 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py @@ -18,6 +18,16 @@ import hashlib import re +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir, check_hash + import six if six.PY3: from urllib.request import ( @@ -35,16 +45,6 @@ ) from urlparse import urlparse, urlunparse, parse_qs -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.payload.archive import ( - get_archive_handler, - extract, -) -from charmhelpers.core.host import mkdir, check_hash - def splituser(host): '''urllib.splituser(), but six's support of this seems broken''' diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py index 5376786b..93aae87b 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py @@ -32,7 +32,7 @@ apt_install("python-git") from git import Repo -from git.exc import GitCommandError +from git.exc import GitCommandError # noqa E402 class GitUrlFetchHandler(BaseFetchHandler): diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index 3464b873..65219d33 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -169,8 +169,13 @@ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): cmd = 'pgrep -o -f {}'.format(service) else: cmd = 'pgrep -o {}'.format(service) - proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) - return self._get_dir_mtime(sentry_unit, proc_dir) + cmd = cmd + ' | grep -v pgrep || exit 0' + cmd_out = sentry_unit.run(cmd) + self.log.debug('CMDout: ' + str(cmd_out)) + if cmd_out[0]: + self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) + proc_dir = '/proc/{}'.format(cmd_out[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) def service_restarted(self, sentry_unit, service, filename, pgrep_full=False, sleep_time=20): @@ -187,6 +192,121 @@ def service_restarted(self, sentry_unit, service, filename, else: return False + def service_restarted_since(self, sentry_unit, mtime, service, + pgrep_full=False, sleep_time=20, + retry_count=2): + """Check if service was been started after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Returns: + bool: True if service found and its start time it newer than mtime, + False if service is older than mtime or if service was + not found. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + while retry_count > 0 and not proc_start_time: + self.log.debug('No pid file found for service %s, will retry %i ' + 'more times' % (service, retry_count)) + time.sleep(30) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + retry_count = retry_count - 1 + + if not proc_start_time: + self.log.warn('No proc start time found, assuming service did ' + 'not start') + return False + if proc_start_time >= mtime: + self.log.debug('proc start time is newer than provided mtime' + '(%s >= %s)' % (proc_start_time, mtime)) + return True + else: + self.log.warn('proc start time (%s) is older than provided mtime ' + '(%s), service did not restart' % (proc_start_time, + mtime)) + return False + + def config_updated_since(self, sentry_unit, filename, mtime, + sleep_time=20): + """Check if file was modified after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check the file mtime on + filename (string): The file to check mtime of + mtime (float): The epoch time to check against + sleep_time (int): Seconds to sleep before looking for process + + Returns: + bool: True if file was modified more recently than mtime, False if + file was modified before mtime, + """ + self.log.debug('Checking %s updated since %s' % (filename, mtime)) + time.sleep(sleep_time) + file_mtime = self._get_file_mtime(sentry_unit, filename) + if file_mtime >= mtime: + self.log.debug('File mtime is newer than provided mtime ' + '(%s >= %s)' % (file_mtime, mtime)) + return True + else: + self.log.warn('File mtime %s is older than provided mtime %s' + % (file_mtime, mtime)) + return False + + def validate_service_config_changed(self, sentry_unit, mtime, service, + filename, pgrep_full=False, + sleep_time=20, retry_count=2): + """Check service and file were updated after mtime + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + filename (string): The file to check mtime of + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Typical Usage: + u = OpenStackAmuletUtils(ERROR) + ... + mtime = u.get_sentry_time(self.cinder_sentry) + self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) + if not u.validate_service_config_changed(self.cinder_sentry, + mtime, + 'cinder-api', + '/etc/cinder/cinder.conf') + amulet.raise_status(amulet.FAIL, msg='update failed') + Returns: + bool: True if both service and file where updated/restarted after + mtime, False if service is older than mtime or if service was + not found or if filename was modified before mtime. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + service_restart = self.service_restarted_since(sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=0, + retry_count=retry_count) + config_update = self.config_updated_since(sentry_unit, filename, mtime, + sleep_time=0) + return service_restart and config_update + + def get_sentry_time(self, sentry_unit): + """Return current epoch time on a sentry""" + cmd = "date +'%s'" + return float(sentry_unit.run(cmd)[0]) + def relation_error(self, name, data): return 'unexpected relation data in {} - {}'.format(name, data) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index c50d3ec6..0cfeaa4c 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -71,16 +71,19 @@ def _add_services(self, this_service, other_services): services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] + # Openstack subordinate charms do not expose an origin option as that + # is controlled by the principle + ignore = ['neutron-openvswitch'] if self.openstack: for svc in services: - if svc['name'] not in use_source: + if svc['name'] not in use_source + ignore: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source: + if svc['name'] in use_source and svc['name'] not in ignore: config = {'source': self.source} self.d.configure(svc['name'], config) From eaaab8271d2e8caaa6bb082be74dc22adc6d53f7 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 4 Mar 2015 09:53:54 +0000 Subject: [PATCH 0646/2699] Automated resync of charm-helpers --- .../hooks/charmhelpers/contrib/network/ip.py | 85 ++++++++++++++++++- .../charmhelpers/core/services/helpers.py | 16 +++- 2 files changed, 96 insertions(+), 5 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 98b17544..fff6d5ca 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -17,13 +17,16 @@ import glob import re import subprocess +import six +import socket from functools import partial from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - log + log, + WARNING, ) try: @@ -365,3 +368,83 @@ def is_bridge_member(nic): return True return False + + +def is_ip(address): + """ + Returns True if address is a valid IP address. + """ + try: + # Test to see if already an IPv4 address + socket.inet_aton(address) + return True + except socket.error: + return False + + +def ns_query(address): + try: + import dns.resolver + except ImportError: + apt_install('python-dnspython') + import dns.resolver + + if isinstance(address, dns.name.Name): + rtype = 'PTR' + elif isinstance(address, six.string_types): + rtype = 'A' + else: + return None + + answers = dns.resolver.query(address, rtype) + if answers: + return str(answers[0]) + return None + + +def get_host_ip(hostname, fallback=None): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ + if is_ip(hostname): + return hostname + + ip_addr = ns_query(hostname) + if not ip_addr: + try: + ip_addr = socket.gethostbyname(hostname) + except: + log("Failed to resolve hostname '%s'" % (hostname), + level=WARNING) + return fallback + return ip_addr + + +def get_hostname(address, fqdn=True): + """ + Resolves hostname for given IP, or returns the input + if it is already a hostname. + """ + if is_ip(address): + try: + import dns.reversename + except ImportError: + apt_install("python-dnspython") + import dns.reversename + + rev = dns.reversename.from_address(address) + result = ns_query(rev) + if not result: + return None + else: + result = address + + if fqdn: + # strip trailing . + if result.endswith('.'): + return result[:-1] + else: + return result + else: + return result.split('.')[0] diff --git a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py index 5e3af9da..15b21664 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py @@ -45,12 +45,14 @@ class RelationContext(dict): """ name = None interface = None - required_keys = [] def __init__(self, name=None, additional_required_keys=None): + if not hasattr(self, 'required_keys'): + self.required_keys = [] + if name is not None: self.name = name - if additional_required_keys is not None: + if additional_required_keys: self.required_keys.extend(additional_required_keys) self.get_data() @@ -134,7 +136,10 @@ class MysqlRelation(RelationContext): """ name = 'db' interface = 'mysql' - required_keys = ['host', 'user', 'password', 'database'] + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'user', 'password', 'database'] + super(HttpRelation).__init__(self, *args, **kwargs) class HttpRelation(RelationContext): @@ -146,7 +151,10 @@ class HttpRelation(RelationContext): """ name = 'website' interface = 'http' - required_keys = ['host', 'port'] + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'port'] + super(HttpRelation).__init__(self, *args, **kwargs) def provide_data(self): return { From 5091a9ad357da3d645472d64a5182caf84e9fec4 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 4 Mar 2015 09:53:54 +0000 Subject: [PATCH 0647/2699] Automated resync of charm-helpers --- .../hooks/charmhelpers/contrib/network/ip.py | 85 ++++++++++++++++++- .../charmhelpers/core/services/helpers.py | 16 +++- 2 files changed, 96 insertions(+), 5 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 98b17544..fff6d5ca 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -17,13 +17,16 @@ import glob import re import subprocess +import six +import socket from functools import partial from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - log + log, + WARNING, ) try: @@ -365,3 +368,83 @@ def is_bridge_member(nic): return True return False + + +def is_ip(address): + """ + Returns True if address is a valid IP address. + """ + try: + # Test to see if already an IPv4 address + socket.inet_aton(address) + return True + except socket.error: + return False + + +def ns_query(address): + try: + import dns.resolver + except ImportError: + apt_install('python-dnspython') + import dns.resolver + + if isinstance(address, dns.name.Name): + rtype = 'PTR' + elif isinstance(address, six.string_types): + rtype = 'A' + else: + return None + + answers = dns.resolver.query(address, rtype) + if answers: + return str(answers[0]) + return None + + +def get_host_ip(hostname, fallback=None): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ + if is_ip(hostname): + return hostname + + ip_addr = ns_query(hostname) + if not ip_addr: + try: + ip_addr = socket.gethostbyname(hostname) + except: + log("Failed to resolve hostname '%s'" % (hostname), + level=WARNING) + return fallback + return ip_addr + + +def get_hostname(address, fqdn=True): + """ + Resolves hostname for given IP, or returns the input + if it is already a hostname. + """ + if is_ip(address): + try: + import dns.reversename + except ImportError: + apt_install("python-dnspython") + import dns.reversename + + rev = dns.reversename.from_address(address) + result = ns_query(rev) + if not result: + return None + else: + result = address + + if fqdn: + # strip trailing . + if result.endswith('.'): + return result[:-1] + else: + return result + else: + return result.split('.')[0] diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py index 5e3af9da..15b21664 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-mon/hooks/charmhelpers/core/services/helpers.py @@ -45,12 +45,14 @@ class RelationContext(dict): """ name = None interface = None - required_keys = [] def __init__(self, name=None, additional_required_keys=None): + if not hasattr(self, 'required_keys'): + self.required_keys = [] + if name is not None: self.name = name - if additional_required_keys is not None: + if additional_required_keys: self.required_keys.extend(additional_required_keys) self.get_data() @@ -134,7 +136,10 @@ class MysqlRelation(RelationContext): """ name = 'db' interface = 'mysql' - required_keys = ['host', 'user', 'password', 'database'] + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'user', 'password', 'database'] + super(HttpRelation).__init__(self, *args, **kwargs) class HttpRelation(RelationContext): @@ -146,7 +151,10 @@ class HttpRelation(RelationContext): """ name = 'website' interface = 'http' - required_keys = ['host', 'port'] + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'port'] + super(HttpRelation).__init__(self, *args, **kwargs) def provide_data(self): return { From 372f548d367952f005d636645996ec9daca89837 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 4 Mar 2015 09:54:11 +0000 Subject: [PATCH 0648/2699] Automated resync of charm-helpers --- ceph-osd/.coverage | 1 + .../hooks/charmhelpers/contrib/network/ip.py | 85 ++++++++++++++++++- .../charmhelpers/core/services/helpers.py | 16 +++- 3 files changed, 97 insertions(+), 5 deletions(-) create mode 100644 ceph-osd/.coverage diff --git a/ceph-osd/.coverage b/ceph-osd/.coverage new file mode 100644 index 00000000..d7f7b879 --- /dev/null +++ b/ceph-osd/.coverage @@ -0,0 +1 @@ +€}q(U collectorqUcoverage v3.7.1qUlinesq}qUF/home/jamespage/src/charms/next-resync/ceph-osd/unit_tests/__init__.pyq]qKasu. \ No newline at end of file diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 98b17544..fff6d5ca 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -17,13 +17,16 @@ import glob import re import subprocess +import six +import socket from functools import partial from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - log + log, + WARNING, ) try: @@ -365,3 +368,83 @@ def is_bridge_member(nic): return True return False + + +def is_ip(address): + """ + Returns True if address is a valid IP address. + """ + try: + # Test to see if already an IPv4 address + socket.inet_aton(address) + return True + except socket.error: + return False + + +def ns_query(address): + try: + import dns.resolver + except ImportError: + apt_install('python-dnspython') + import dns.resolver + + if isinstance(address, dns.name.Name): + rtype = 'PTR' + elif isinstance(address, six.string_types): + rtype = 'A' + else: + return None + + answers = dns.resolver.query(address, rtype) + if answers: + return str(answers[0]) + return None + + +def get_host_ip(hostname, fallback=None): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ + if is_ip(hostname): + return hostname + + ip_addr = ns_query(hostname) + if not ip_addr: + try: + ip_addr = socket.gethostbyname(hostname) + except: + log("Failed to resolve hostname '%s'" % (hostname), + level=WARNING) + return fallback + return ip_addr + + +def get_hostname(address, fqdn=True): + """ + Resolves hostname for given IP, or returns the input + if it is already a hostname. + """ + if is_ip(address): + try: + import dns.reversename + except ImportError: + apt_install("python-dnspython") + import dns.reversename + + rev = dns.reversename.from_address(address) + result = ns_query(rev) + if not result: + return None + else: + result = address + + if fqdn: + # strip trailing . + if result.endswith('.'): + return result[:-1] + else: + return result + else: + return result.split('.')[0] diff --git a/ceph-osd/hooks/charmhelpers/core/services/helpers.py b/ceph-osd/hooks/charmhelpers/core/services/helpers.py index 5e3af9da..15b21664 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-osd/hooks/charmhelpers/core/services/helpers.py @@ -45,12 +45,14 @@ class RelationContext(dict): """ name = None interface = None - required_keys = [] def __init__(self, name=None, additional_required_keys=None): + if not hasattr(self, 'required_keys'): + self.required_keys = [] + if name is not None: self.name = name - if additional_required_keys is not None: + if additional_required_keys: self.required_keys.extend(additional_required_keys) self.get_data() @@ -134,7 +136,10 @@ class MysqlRelation(RelationContext): """ name = 'db' interface = 'mysql' - required_keys = ['host', 'user', 'password', 'database'] + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'user', 'password', 'database'] + super(HttpRelation).__init__(self, *args, **kwargs) class HttpRelation(RelationContext): @@ -146,7 +151,10 @@ class HttpRelation(RelationContext): """ name = 'website' interface = 'http' - required_keys = ['host', 'port'] + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'port'] + super(HttpRelation).__init__(self, *args, **kwargs) def provide_data(self): return { From e382d3c812859c37393055d0e594914b20631f0e Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Mar 2015 17:40:42 +0000 Subject: [PATCH 0649/2699] Detect and configure mons correctly for systemd ubuntu --- ceph-proxy/hooks/ceph.py | 20 +++++++++++++++++--- ceph-proxy/hooks/hooks.py | 5 ++++- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index b0d7be17..25d04e51 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -15,11 +15,13 @@ mkdir, service_restart, cmp_pkgrevno, + lsb_release ) from charmhelpers.core.hookenv import ( log, ERROR, WARNING, + cached ) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, @@ -274,11 +276,20 @@ def upgrade_key_caps(key, caps): subprocess.check_call(cmd) +@cached +def systemd(): + return (lsb_release()['DISTRIB_CODENAME'] >= 'vivid') + + def bootstrap_monitor_cluster(secret): hostname = get_unit_hostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) done = '{}/done'.format(path) - upstart = '{}/upstart'.format(path) + if systemd(): + init_marker = '{}/systemd'.format(path) + else: + init_marker = '{}/upstart'.format(path) + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) if os.path.exists(done): @@ -300,10 +311,13 @@ def bootstrap_monitor_cluster(secret): with open(done, 'w'): pass - with open(upstart, 'w'): + with open(init_marker, 'w'): pass - service_restart('ceph-mon-all') + if systemd(): + service_restart('ceph-mon') + else: + service_restart('ceph-mon-all') except: raise finally: diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 6c2e723a..681c4376 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -338,7 +338,10 @@ def upgrade_charm(): def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. - service_restart('ceph-mon-all') + if ceph.systemd(): + service_restart('ceph-mon') + else: + service_restart('ceph-mon-all') if ceph.is_bootstrapped(): ceph.start_osds(get_devices()) From 11b8512d426fd09973e6559353c15de4b0645001 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Mar 2015 17:40:42 +0000 Subject: [PATCH 0650/2699] Detect and configure mons correctly for systemd ubuntu --- ceph-mon/hooks/ceph.py | 20 +++++++++++++++++--- ceph-mon/hooks/hooks.py | 5 ++++- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index b0d7be17..25d04e51 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -15,11 +15,13 @@ mkdir, service_restart, cmp_pkgrevno, + lsb_release ) from charmhelpers.core.hookenv import ( log, ERROR, WARNING, + cached ) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, @@ -274,11 +276,20 @@ def upgrade_key_caps(key, caps): subprocess.check_call(cmd) +@cached +def systemd(): + return (lsb_release()['DISTRIB_CODENAME'] >= 'vivid') + + def bootstrap_monitor_cluster(secret): hostname = get_unit_hostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) done = '{}/done'.format(path) - upstart = '{}/upstart'.format(path) + if systemd(): + init_marker = '{}/systemd'.format(path) + else: + init_marker = '{}/upstart'.format(path) + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) if os.path.exists(done): @@ -300,10 +311,13 @@ def bootstrap_monitor_cluster(secret): with open(done, 'w'): pass - with open(upstart, 'w'): + with open(init_marker, 'w'): pass - service_restart('ceph-mon-all') + if systemd(): + service_restart('ceph-mon') + else: + service_restart('ceph-mon-all') except: raise finally: diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 6c2e723a..681c4376 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -338,7 +338,10 @@ def upgrade_charm(): def start(): # In case we're being redeployed to the same machines, try # to make sure everything is running as soon as possible. - service_restart('ceph-mon-all') + if ceph.systemd(): + service_restart('ceph-mon') + else: + service_restart('ceph-mon-all') if ceph.is_bootstrapped(): ceph.start_osds(get_devices()) From 0f0c9b9c3b5d3e59e6cfa531d1b423a7c7c8ee8e Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Mar 2015 17:55:28 +0000 Subject: [PATCH 0651/2699] Enable ceph-mon on boot post bootstrap --- ceph-proxy/hooks/ceph.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 25d04e51..795c53bb 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -315,6 +315,7 @@ def bootstrap_monitor_cluster(secret): pass if systemd(): + subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) service_restart('ceph-mon') else: service_restart('ceph-mon-all') From c0a8b7b82c318b53d6ab1bb85568c50aabee0bab Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Mar 2015 17:55:28 +0000 Subject: [PATCH 0652/2699] Enable ceph-mon on boot post bootstrap --- ceph-mon/hooks/ceph.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 25d04e51..795c53bb 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -315,6 +315,7 @@ def bootstrap_monitor_cluster(secret): pass if systemd(): + subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) service_restart('ceph-mon') else: service_restart('ceph-mon-all') From 29d022f5f1579933ca68bfc4b325b2ca7c63dd6a Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Mar 2015 17:59:39 +0000 Subject: [PATCH 0653/2699] Deal with upgrades - unikely --- ceph-proxy/hooks/ceph.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 795c53bb..d4b98b58 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -328,11 +328,14 @@ def bootstrap_monitor_cluster(secret): def update_monfs(): hostname = get_unit_hostname() monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - upstart = '{}/upstart'.format(monfs) - if os.path.exists(monfs) and not os.path.exists(upstart): + if systemd(): + init_marker = '{}/systemd'.format(monfs) + else: + init_marker = '{}/upstart'.format(monfs) + if os.path.exists(monfs) and not os.path.exists(init_marker): # Mark mon as managed by upstart so that # it gets start correctly on reboots - with open(upstart, 'w'): + with open(init_marker, 'w'): pass From 1188342ecb0b9ba5ccddef642d3bb0b3d22443d9 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Mar 2015 17:59:39 +0000 Subject: [PATCH 0654/2699] Deal with upgrades - unikely --- ceph-mon/hooks/ceph.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 795c53bb..d4b98b58 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -328,11 +328,14 @@ def bootstrap_monitor_cluster(secret): def update_monfs(): hostname = get_unit_hostname() monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - upstart = '{}/upstart'.format(monfs) - if os.path.exists(monfs) and not os.path.exists(upstart): + if systemd(): + init_marker = '{}/systemd'.format(monfs) + else: + init_marker = '{}/upstart'.format(monfs) + if os.path.exists(monfs) and not os.path.exists(init_marker): # Mark mon as managed by upstart so that # it gets start correctly on reboots - with open(upstart, 'w'): + with open(init_marker, 'w'): pass From 5c6a1742bcad7519bbc58635870aaa67bd3bb3d4 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 26 Mar 2015 09:03:52 -0700 Subject: [PATCH 0655/2699] [trivial] fix amulet missing https_keystone setting error --- ceph-radosgw/tests/basic_deployment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index b93f964e..44c0e442 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -262,7 +262,6 @@ def test_keystone_ceph_radosgw_relation(self): 'auth_port': '35357', 'auth_protocol': 'http', 'private-address': u.valid_ip, - 'https_keystone': 'False', 'auth_host': u.valid_ip, 'service_username': 'swift', 'service_tenant_id': u.not_null, From 6870da281528b0aa214e8f9259487f59c7bbcfee Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 16 Apr 2015 11:27:24 +0100 Subject: [PATCH 0656/2699] [gnuoy,trivial] Pre-release charmhelper sync --- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 41 ++++++++++++++++++- ceph-proxy/hooks/charmhelpers/core/host.py | 6 ++- .../charmhelpers/core/services/helpers.py | 4 +- .../hooks/charmhelpers/core/strutils.py | 4 +- .../hooks/charmhelpers/core/unitdata.py | 2 +- .../charmhelpers/contrib/amulet/utils.py | 4 +- .../contrib/openstack/amulet/deployment.py | 29 +++++++++++-- 7 files changed, 79 insertions(+), 11 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index cf552b39..86f805f1 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -20,11 +20,13 @@ # Authors: # Charm Helpers Developers +from __future__ import print_function import os import json import yaml import subprocess import sys +import errno from subprocess import CalledProcessError import six @@ -87,7 +89,18 @@ def log(message, level=None): if not isinstance(message, six.string_types): message = repr(message) command += [message] - subprocess.call(command) + # Missing juju-log should not cause failures in unit tests + # Send log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + if level: + message = "{}: {}".format(level, message) + message = "juju-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise class Serializable(UserDict): @@ -566,3 +579,29 @@ def wrapper(decorated): def charm_dir(): """Return the root directory of the current charm""" return os.environ.get('CHARM_DIR') + + +@cached +def action_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['action-get'] + if key is not None: + cmd.append(key) + cmd.append('--format=json') + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return action_data + + +def action_set(values): + """Sets the values to be returned after the action finishes""" + cmd = ['action-set'] + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def action_fail(message): + """Sets the action status to failed and sets the error message. + + The results set by action_set are preserved.""" + subprocess.check_call(['action-fail', message]) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index b771c611..830822af 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -339,12 +339,16 @@ def lsb_release(): def pwgen(length=None): """Generate a random pasword.""" if length is None: + # A random length is ok to use a weak PRNG length = random.choice(range(35, 45)) alphanumeric_chars = [ l for l in (string.ascii_letters + string.digits) if l not in 'l0QD1vAEIOUaeiou'] + # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the + # actual password + random_generator = random.SystemRandom() random_chars = [ - random.choice(alphanumeric_chars) for _ in range(length)] + random_generator.choice(alphanumeric_chars) for _ in range(length)] return(''.join(random_chars)) diff --git a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py index 15b21664..3eb5fb44 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py @@ -139,7 +139,7 @@ class MysqlRelation(RelationContext): def __init__(self, *args, **kwargs): self.required_keys = ['host', 'user', 'password', 'database'] - super(HttpRelation).__init__(self, *args, **kwargs) + RelationContext.__init__(self, *args, **kwargs) class HttpRelation(RelationContext): @@ -154,7 +154,7 @@ class HttpRelation(RelationContext): def __init__(self, *args, **kwargs): self.required_keys = ['host', 'port'] - super(HttpRelation).__init__(self, *args, **kwargs) + RelationContext.__init__(self, *args, **kwargs) def provide_data(self): return { diff --git a/ceph-proxy/hooks/charmhelpers/core/strutils.py b/ceph-proxy/hooks/charmhelpers/core/strutils.py index efc4402e..a2a784aa 100644 --- a/ceph-proxy/hooks/charmhelpers/core/strutils.py +++ b/ceph-proxy/hooks/charmhelpers/core/strutils.py @@ -33,9 +33,9 @@ def bool_from_string(value): value = value.strip().lower() - if value in ['y', 'yes', 'true', 't']: + if value in ['y', 'yes', 'true', 't', 'on']: return True - elif value in ['n', 'no', 'false', 'f']: + elif value in ['n', 'no', 'false', 'f', 'off']: return False msg = "Unable to interpret string value '%s' as boolean" % (value) diff --git a/ceph-proxy/hooks/charmhelpers/core/unitdata.py b/ceph-proxy/hooks/charmhelpers/core/unitdata.py index 3000134a..406a35c5 100644 --- a/ceph-proxy/hooks/charmhelpers/core/unitdata.py +++ b/ceph-proxy/hooks/charmhelpers/core/unitdata.py @@ -443,7 +443,7 @@ def _record_hook(self, hookenv): data = hookenv.execution_environment() self.conf = conf_delta = self.kv.delta(data['conf'], 'config') self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') - self.kv.set('env', data['env']) + self.kv.set('env', dict(data['env'])) self.kv.set('unit', data['unit']) self.kv.set('relid', data.get('relid')) return conf_delta, rels_delta diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index 65219d33..5088b1d1 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -118,6 +118,9 @@ def _validate_dict_data(self, expected, actual): longs, or can be a function that evaluate a variable and returns a bool. """ + self.log.debug('actual: {}'.format(repr(actual))) + self.log.debug('expected: {}'.format(repr(expected))) + for k, v in six.iteritems(expected): if k in actual: if (isinstance(v, six.string_types) or @@ -134,7 +137,6 @@ def _validate_dict_data(self, expected, actual): def validate_relation_data(self, sentry_unit, relation, expected): """Validate actual relation data based on expected relation data.""" actual = sentry_unit.relation(relation[0], relation[1]) - self.log.debug('actual: {}'.format(repr(actual))) return self._validate_dict_data(expected, actual) def _validate_list_data(self, expected, actual): diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 0cfeaa4c..fef96384 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -15,6 +15,7 @@ # along with charm-helpers. If not, see . import six +from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -43,7 +44,7 @@ def _determine_branch_locations(self, other_services): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] + base_charms = ['mysql', 'mongodb'] if self.stable: for svc in other_services: @@ -100,12 +101,34 @@ def _get_openstack_release(self): """ (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, - self.trusty_icehouse) = range(6) + self.trusty_icehouse, self.trusty_juno, self.trusty_kilo) = range(8) releases = { ('precise', None): self.precise_essex, ('precise', 'cloud:precise-folsom'): self.precise_folsom, ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, ('precise', 'cloud:precise-havana'): self.precise_havana, ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, - ('trusty', None): self.trusty_icehouse} + ('trusty', None): self.trusty_icehouse, + ('trusty', 'cloud:trusty-juno'): self.trusty_juno, + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo} return releases[(self.series, self.openstack)] + + def _get_openstack_release_string(self): + """Get openstack release string. + + Return a string representing the openstack release. + """ + releases = OrderedDict([ + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ]) + if self.openstack: + os_origin = self.openstack.split(':')[1] + return os_origin.split('%s-' % self.series)[1].split('/')[0] + else: + return releases[self.series] From b0e712b50fe2f113f460e1d3606a268b483805bb Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 16 Apr 2015 11:27:24 +0100 Subject: [PATCH 0657/2699] [gnuoy,trivial] Pre-release charmhelper sync --- ceph-mon/hooks/charmhelpers/core/hookenv.py | 41 ++++++++++++++++++- ceph-mon/hooks/charmhelpers/core/host.py | 6 ++- .../charmhelpers/core/services/helpers.py | 4 +- ceph-mon/hooks/charmhelpers/core/strutils.py | 4 +- ceph-mon/hooks/charmhelpers/core/unitdata.py | 2 +- .../charmhelpers/contrib/amulet/utils.py | 4 +- .../contrib/openstack/amulet/deployment.py | 29 +++++++++++-- 7 files changed, 79 insertions(+), 11 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index cf552b39..86f805f1 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -20,11 +20,13 @@ # Authors: # Charm Helpers Developers +from __future__ import print_function import os import json import yaml import subprocess import sys +import errno from subprocess import CalledProcessError import six @@ -87,7 +89,18 @@ def log(message, level=None): if not isinstance(message, six.string_types): message = repr(message) command += [message] - subprocess.call(command) + # Missing juju-log should not cause failures in unit tests + # Send log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + if level: + message = "{}: {}".format(level, message) + message = "juju-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise class Serializable(UserDict): @@ -566,3 +579,29 @@ def wrapper(decorated): def charm_dir(): """Return the root directory of the current charm""" return os.environ.get('CHARM_DIR') + + +@cached +def action_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['action-get'] + if key is not None: + cmd.append(key) + cmd.append('--format=json') + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return action_data + + +def action_set(values): + """Sets the values to be returned after the action finishes""" + cmd = ['action-set'] + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def action_fail(message): + """Sets the action status to failed and sets the error message. + + The results set by action_set are preserved.""" + subprocess.check_call(['action-fail', message]) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index b771c611..830822af 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -339,12 +339,16 @@ def lsb_release(): def pwgen(length=None): """Generate a random pasword.""" if length is None: + # A random length is ok to use a weak PRNG length = random.choice(range(35, 45)) alphanumeric_chars = [ l for l in (string.ascii_letters + string.digits) if l not in 'l0QD1vAEIOUaeiou'] + # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the + # actual password + random_generator = random.SystemRandom() random_chars = [ - random.choice(alphanumeric_chars) for _ in range(length)] + random_generator.choice(alphanumeric_chars) for _ in range(length)] return(''.join(random_chars)) diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py index 15b21664..3eb5fb44 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-mon/hooks/charmhelpers/core/services/helpers.py @@ -139,7 +139,7 @@ class MysqlRelation(RelationContext): def __init__(self, *args, **kwargs): self.required_keys = ['host', 'user', 'password', 'database'] - super(HttpRelation).__init__(self, *args, **kwargs) + RelationContext.__init__(self, *args, **kwargs) class HttpRelation(RelationContext): @@ -154,7 +154,7 @@ class HttpRelation(RelationContext): def __init__(self, *args, **kwargs): self.required_keys = ['host', 'port'] - super(HttpRelation).__init__(self, *args, **kwargs) + RelationContext.__init__(self, *args, **kwargs) def provide_data(self): return { diff --git a/ceph-mon/hooks/charmhelpers/core/strutils.py b/ceph-mon/hooks/charmhelpers/core/strutils.py index efc4402e..a2a784aa 100644 --- a/ceph-mon/hooks/charmhelpers/core/strutils.py +++ b/ceph-mon/hooks/charmhelpers/core/strutils.py @@ -33,9 +33,9 @@ def bool_from_string(value): value = value.strip().lower() - if value in ['y', 'yes', 'true', 't']: + if value in ['y', 'yes', 'true', 't', 'on']: return True - elif value in ['n', 'no', 'false', 'f']: + elif value in ['n', 'no', 'false', 'f', 'off']: return False msg = "Unable to interpret string value '%s' as boolean" % (value) diff --git a/ceph-mon/hooks/charmhelpers/core/unitdata.py b/ceph-mon/hooks/charmhelpers/core/unitdata.py index 3000134a..406a35c5 100644 --- a/ceph-mon/hooks/charmhelpers/core/unitdata.py +++ b/ceph-mon/hooks/charmhelpers/core/unitdata.py @@ -443,7 +443,7 @@ def _record_hook(self, hookenv): data = hookenv.execution_environment() self.conf = conf_delta = self.kv.delta(data['conf'], 'config') self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') - self.kv.set('env', data['env']) + self.kv.set('env', dict(data['env'])) self.kv.set('unit', data['unit']) self.kv.set('relid', data.get('relid')) return conf_delta, rels_delta diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index 65219d33..5088b1d1 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -118,6 +118,9 @@ def _validate_dict_data(self, expected, actual): longs, or can be a function that evaluate a variable and returns a bool. """ + self.log.debug('actual: {}'.format(repr(actual))) + self.log.debug('expected: {}'.format(repr(expected))) + for k, v in six.iteritems(expected): if k in actual: if (isinstance(v, six.string_types) or @@ -134,7 +137,6 @@ def _validate_dict_data(self, expected, actual): def validate_relation_data(self, sentry_unit, relation, expected): """Validate actual relation data based on expected relation data.""" actual = sentry_unit.relation(relation[0], relation[1]) - self.log.debug('actual: {}'.format(repr(actual))) return self._validate_dict_data(expected, actual) def _validate_list_data(self, expected, actual): diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 0cfeaa4c..fef96384 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -15,6 +15,7 @@ # along with charm-helpers. If not, see . import six +from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -43,7 +44,7 @@ def _determine_branch_locations(self, other_services): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] + base_charms = ['mysql', 'mongodb'] if self.stable: for svc in other_services: @@ -100,12 +101,34 @@ def _get_openstack_release(self): """ (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, - self.trusty_icehouse) = range(6) + self.trusty_icehouse, self.trusty_juno, self.trusty_kilo) = range(8) releases = { ('precise', None): self.precise_essex, ('precise', 'cloud:precise-folsom'): self.precise_folsom, ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, ('precise', 'cloud:precise-havana'): self.precise_havana, ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, - ('trusty', None): self.trusty_icehouse} + ('trusty', None): self.trusty_icehouse, + ('trusty', 'cloud:trusty-juno'): self.trusty_juno, + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo} return releases[(self.series, self.openstack)] + + def _get_openstack_release_string(self): + """Get openstack release string. + + Return a string representing the openstack release. + """ + releases = OrderedDict([ + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ]) + if self.openstack: + os_origin = self.openstack.split(':')[1] + return os_origin.split('%s-' % self.series)[1].split('/')[0] + else: + return releases[self.series] From 2054e43c76fd6873b95cb880c3e1e7db1d371c05 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:31:29 +0000 Subject: [PATCH 0658/2699] auto flip amulet debug on --- ceph-proxy/tests/basic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index d073d08b..9bfa3023 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -11,7 +11,7 @@ ) # Use DEBUG to turn on debug logging -u = OpenStackAmuletUtils(ERROR) +u = OpenStackAmuletUtils(DEBUG) class CephBasicDeployment(OpenStackAmuletDeployment): From 28c0d3ca116b026728f7e58b711f8d70a4b47fd4 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:31:29 +0000 Subject: [PATCH 0659/2699] auto flip amulet debug on --- ceph-mon/tests/basic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index d073d08b..9bfa3023 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -11,7 +11,7 @@ ) # Use DEBUG to turn on debug logging -u = OpenStackAmuletUtils(ERROR) +u = OpenStackAmuletUtils(DEBUG) class CephBasicDeployment(OpenStackAmuletDeployment): From 5565eba85e08b3a13de6e1de3b082ab90b433abb Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:31:30 +0000 Subject: [PATCH 0660/2699] auto flip amulet debug on --- ceph-osd/tests/basic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 2f0542b2..ca9d1592 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -11,7 +11,7 @@ ) # Use DEBUG to turn on debug logging -u = OpenStackAmuletUtils(ERROR) +u = OpenStackAmuletUtils(DEBUG) class CephOsdBasicDeployment(OpenStackAmuletDeployment): From c40aaaea155186eb88eb682ae6d0ef5d2ddafdad Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:31:30 +0000 Subject: [PATCH 0661/2699] auto flip amulet debug on --- ceph-radosgw/tests/basic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 44c0e442..dd0d0ef1 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -11,7 +11,7 @@ ) # Use DEBUG to turn on debug logging -u = OpenStackAmuletUtils(ERROR) +u = OpenStackAmuletUtils(DEBUG) class CephRadosGwBasicDeployment(OpenStackAmuletDeployment): From f1c8d4c8836ebe044f6bddee64653cf77f9b9ca8 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:31:37 +0000 Subject: [PATCH 0662/2699] auto remove amulet tests for unsupported releases --- ceph-proxy/tests/12-basic-precise-grizzly | 11 ----------- ceph-proxy/tests/13-basic-precise-havana | 11 ----------- 2 files changed, 22 deletions(-) delete mode 100755 ceph-proxy/tests/12-basic-precise-grizzly delete mode 100755 ceph-proxy/tests/13-basic-precise-havana diff --git a/ceph-proxy/tests/12-basic-precise-grizzly b/ceph-proxy/tests/12-basic-precise-grizzly deleted file mode 100755 index 0fa08342..00000000 --- a/ceph-proxy/tests/12-basic-precise-grizzly +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on precise-grizzly.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='precise', - openstack='cloud:precise-grizzly', - source='cloud:precise-updates/grizzly') - deployment.run_tests() diff --git a/ceph-proxy/tests/13-basic-precise-havana b/ceph-proxy/tests/13-basic-precise-havana deleted file mode 100755 index 8a299afc..00000000 --- a/ceph-proxy/tests/13-basic-precise-havana +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on precise-havana.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='precise', - openstack='cloud:precise-havana', - source='cloud:precise-updates/havana') - deployment.run_tests() From 1b66669c21c6199432df2dc03948d6ff1b3f8c10 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:31:37 +0000 Subject: [PATCH 0663/2699] auto remove amulet tests for unsupported releases --- ceph-mon/tests/12-basic-precise-grizzly | 11 ----------- ceph-mon/tests/13-basic-precise-havana | 11 ----------- 2 files changed, 22 deletions(-) delete mode 100755 ceph-mon/tests/12-basic-precise-grizzly delete mode 100755 ceph-mon/tests/13-basic-precise-havana diff --git a/ceph-mon/tests/12-basic-precise-grizzly b/ceph-mon/tests/12-basic-precise-grizzly deleted file mode 100755 index 0fa08342..00000000 --- a/ceph-mon/tests/12-basic-precise-grizzly +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on precise-grizzly.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='precise', - openstack='cloud:precise-grizzly', - source='cloud:precise-updates/grizzly') - deployment.run_tests() diff --git a/ceph-mon/tests/13-basic-precise-havana b/ceph-mon/tests/13-basic-precise-havana deleted file mode 100755 index 8a299afc..00000000 --- a/ceph-mon/tests/13-basic-precise-havana +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on precise-havana.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='precise', - openstack='cloud:precise-havana', - source='cloud:precise-updates/havana') - deployment.run_tests() From c4853f2554256c78f00b50e187ab0910b2306a27 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:31:51 +0000 Subject: [PATCH 0664/2699] auto add AMULET_OS_VIP to preserved env vars in makefile --- ceph-proxy/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index e29ab2b7..6ab4e3ac 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -14,7 +14,7 @@ test: # coreycb note: The -v should only be temporary until Amulet sends # raise_status() messages to stderr: # https://bugs.launchpad.net/amulet/+bug/1320357 - @juju test -v -p AMULET_HTTP_PROXY --timeout 900 \ + @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 900 \ 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse bin/charm_helpers_sync.py: From f98f3af9038e04f321d8430cfbaf9d75026bd2c7 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:31:51 +0000 Subject: [PATCH 0665/2699] auto add AMULET_OS_VIP to preserved env vars in makefile --- ceph-mon/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index e29ab2b7..6ab4e3ac 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -14,7 +14,7 @@ test: # coreycb note: The -v should only be temporary until Amulet sends # raise_status() messages to stderr: # https://bugs.launchpad.net/amulet/+bug/1320357 - @juju test -v -p AMULET_HTTP_PROXY --timeout 900 \ + @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 900 \ 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse bin/charm_helpers_sync.py: From 220190ef2048ec3a675c98044a2b44cd92d3b3ed Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:31:52 +0000 Subject: [PATCH 0666/2699] auto add AMULET_OS_VIP to preserved env vars in makefile --- ceph-osd/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index 07a18e5e..fe366d16 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -14,7 +14,7 @@ test: # coreycb note: The -v should only be temporary until Amulet sends # raise_status() messages to stderr: # https://bugs.launchpad.net/amulet/+bug/1320357 - @juju test -v -p AMULET_HTTP_PROXY --timeout 900 \ + @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 900 \ 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse bin/charm_helpers_sync.py: From be55f985edbc6e0661badde5904096b3f7086dbe Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:31:52 +0000 Subject: [PATCH 0667/2699] auto add AMULET_OS_VIP to preserved env vars in makefile --- ceph-radosgw/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index 0af1d0c2..ab8082c9 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -13,7 +13,7 @@ test: # coreycb note: The -v should only be temporary until Amulet sends # raise_status() messages to stderr: # https://bugs.launchpad.net/amulet/+bug/1320357 - @juju test -v -p AMULET_HTTP_PROXY --timeout 900 \ + @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 900 \ 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse bin/charm_helpers_sync.py: From 477c026e8dfb21632443652762c8212666aad2a8 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:32:00 +0000 Subject: [PATCH 0668/2699] auto Makefile test target (amulet): bump juju test timeout to 2700s (same value as the juju-deployer default). Also remove explicit test names, which will cause all +x files in ./tests to be executed (as bundletester does by default). --- ceph-proxy/Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index 6ab4e3ac..befc8dc0 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -14,8 +14,7 @@ test: # coreycb note: The -v should only be temporary until Amulet sends # raise_status() messages to stderr: # https://bugs.launchpad.net/amulet/+bug/1320357 - @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 900 \ - 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse + @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 bin/charm_helpers_sync.py: @mkdir -p bin From 218a6bc6460daaeab9494d61c50622e2622c932a Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:32:00 +0000 Subject: [PATCH 0669/2699] auto Makefile test target (amulet): bump juju test timeout to 2700s (same value as the juju-deployer default). Also remove explicit test names, which will cause all +x files in ./tests to be executed (as bundletester does by default). --- ceph-mon/Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index 6ab4e3ac..befc8dc0 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -14,8 +14,7 @@ test: # coreycb note: The -v should only be temporary until Amulet sends # raise_status() messages to stderr: # https://bugs.launchpad.net/amulet/+bug/1320357 - @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 900 \ - 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse + @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 bin/charm_helpers_sync.py: @mkdir -p bin From c7cf61dd07258d2455f22ecda1da0fd8004318ce Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:32:00 +0000 Subject: [PATCH 0670/2699] auto Makefile test target (amulet): bump juju test timeout to 2700s (same value as the juju-deployer default). Also remove explicit test names, which will cause all +x files in ./tests to be executed (as bundletester does by default). --- ceph-osd/Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index fe366d16..cba4f868 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -14,8 +14,7 @@ test: # coreycb note: The -v should only be temporary until Amulet sends # raise_status() messages to stderr: # https://bugs.launchpad.net/amulet/+bug/1320357 - @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 900 \ - 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse + @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 bin/charm_helpers_sync.py: @mkdir -p bin From 83ed911012a2774cb4866e82299ebc6e6ac5585c Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:32:01 +0000 Subject: [PATCH 0671/2699] auto Makefile test target (amulet): bump juju test timeout to 2700s (same value as the juju-deployer default). Also remove explicit test names, which will cause all +x files in ./tests to be executed (as bundletester does by default). --- ceph-radosgw/Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index ab8082c9..eab0e2ea 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -13,8 +13,7 @@ test: # coreycb note: The -v should only be temporary until Amulet sends # raise_status() messages to stderr: # https://bugs.launchpad.net/amulet/+bug/1320357 - @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 900 \ - 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse + @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 bin/charm_helpers_sync.py: @mkdir -p bin From 3000b8e2de605ee78d10f401e05899f041a47cf3 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:32:38 +0000 Subject: [PATCH 0672/2699] auto sync charmhelpers --- .../charmhelpers/contrib/openstack/amulet/deployment.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index fef96384..11d49a7c 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -101,7 +101,8 @@ def _get_openstack_release(self): """ (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.trusty_kilo) = range(8) + self.trusty_icehouse, self.trusty_juno, self.trusty_kilo, + self.utopic_juno, self.vivid_kilo) = range(10) releases = { ('precise', None): self.precise_essex, ('precise', 'cloud:precise-folsom'): self.precise_folsom, @@ -110,7 +111,9 @@ def _get_openstack_release(self): ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, ('trusty', None): self.trusty_icehouse, ('trusty', 'cloud:trusty-juno'): self.trusty_juno, - ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo} + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('utopic', None): self.utopic_juno, + ('vivid', None): self.vivid_kilo} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): From 0a340a2d9dd23ef015f7f5a0d52dd8da320605d6 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:32:38 +0000 Subject: [PATCH 0673/2699] auto sync charmhelpers --- .../charmhelpers/contrib/openstack/amulet/deployment.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index fef96384..11d49a7c 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -101,7 +101,8 @@ def _get_openstack_release(self): """ (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.trusty_kilo) = range(8) + self.trusty_icehouse, self.trusty_juno, self.trusty_kilo, + self.utopic_juno, self.vivid_kilo) = range(10) releases = { ('precise', None): self.precise_essex, ('precise', 'cloud:precise-folsom'): self.precise_folsom, @@ -110,7 +111,9 @@ def _get_openstack_release(self): ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, ('trusty', None): self.trusty_icehouse, ('trusty', 'cloud:trusty-juno'): self.trusty_juno, - ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo} + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('utopic', None): self.utopic_juno, + ('vivid', None): self.vivid_kilo} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): From ce0829363ec0c696a8b22d73ebb20a168c979f8a Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:32:48 +0000 Subject: [PATCH 0674/2699] auto sync charmhelpers --- ceph-osd/hooks/charmhelpers/core/hookenv.py | 41 ++++++++++++++++++- ceph-osd/hooks/charmhelpers/core/host.py | 6 ++- .../charmhelpers/core/services/helpers.py | 4 +- ceph-osd/hooks/charmhelpers/core/strutils.py | 4 +- ceph-osd/hooks/charmhelpers/core/unitdata.py | 2 +- .../charmhelpers/contrib/amulet/utils.py | 4 +- .../contrib/openstack/amulet/deployment.py | 32 +++++++++++++-- 7 files changed, 82 insertions(+), 11 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index cf552b39..86f805f1 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -20,11 +20,13 @@ # Authors: # Charm Helpers Developers +from __future__ import print_function import os import json import yaml import subprocess import sys +import errno from subprocess import CalledProcessError import six @@ -87,7 +89,18 @@ def log(message, level=None): if not isinstance(message, six.string_types): message = repr(message) command += [message] - subprocess.call(command) + # Missing juju-log should not cause failures in unit tests + # Send log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + if level: + message = "{}: {}".format(level, message) + message = "juju-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise class Serializable(UserDict): @@ -566,3 +579,29 @@ def wrapper(decorated): def charm_dir(): """Return the root directory of the current charm""" return os.environ.get('CHARM_DIR') + + +@cached +def action_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['action-get'] + if key is not None: + cmd.append(key) + cmd.append('--format=json') + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return action_data + + +def action_set(values): + """Sets the values to be returned after the action finishes""" + cmd = ['action-set'] + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def action_fail(message): + """Sets the action status to failed and sets the error message. + + The results set by action_set are preserved.""" + subprocess.check_call(['action-fail', message]) diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index b771c611..830822af 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -339,12 +339,16 @@ def lsb_release(): def pwgen(length=None): """Generate a random pasword.""" if length is None: + # A random length is ok to use a weak PRNG length = random.choice(range(35, 45)) alphanumeric_chars = [ l for l in (string.ascii_letters + string.digits) if l not in 'l0QD1vAEIOUaeiou'] + # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the + # actual password + random_generator = random.SystemRandom() random_chars = [ - random.choice(alphanumeric_chars) for _ in range(length)] + random_generator.choice(alphanumeric_chars) for _ in range(length)] return(''.join(random_chars)) diff --git a/ceph-osd/hooks/charmhelpers/core/services/helpers.py b/ceph-osd/hooks/charmhelpers/core/services/helpers.py index 15b21664..3eb5fb44 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-osd/hooks/charmhelpers/core/services/helpers.py @@ -139,7 +139,7 @@ class MysqlRelation(RelationContext): def __init__(self, *args, **kwargs): self.required_keys = ['host', 'user', 'password', 'database'] - super(HttpRelation).__init__(self, *args, **kwargs) + RelationContext.__init__(self, *args, **kwargs) class HttpRelation(RelationContext): @@ -154,7 +154,7 @@ class HttpRelation(RelationContext): def __init__(self, *args, **kwargs): self.required_keys = ['host', 'port'] - super(HttpRelation).__init__(self, *args, **kwargs) + RelationContext.__init__(self, *args, **kwargs) def provide_data(self): return { diff --git a/ceph-osd/hooks/charmhelpers/core/strutils.py b/ceph-osd/hooks/charmhelpers/core/strutils.py index efc4402e..a2a784aa 100644 --- a/ceph-osd/hooks/charmhelpers/core/strutils.py +++ b/ceph-osd/hooks/charmhelpers/core/strutils.py @@ -33,9 +33,9 @@ def bool_from_string(value): value = value.strip().lower() - if value in ['y', 'yes', 'true', 't']: + if value in ['y', 'yes', 'true', 't', 'on']: return True - elif value in ['n', 'no', 'false', 'f']: + elif value in ['n', 'no', 'false', 'f', 'off']: return False msg = "Unable to interpret string value '%s' as boolean" % (value) diff --git a/ceph-osd/hooks/charmhelpers/core/unitdata.py b/ceph-osd/hooks/charmhelpers/core/unitdata.py index 3000134a..406a35c5 100644 --- a/ceph-osd/hooks/charmhelpers/core/unitdata.py +++ b/ceph-osd/hooks/charmhelpers/core/unitdata.py @@ -443,7 +443,7 @@ def _record_hook(self, hookenv): data = hookenv.execution_environment() self.conf = conf_delta = self.kv.delta(data['conf'], 'config') self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') - self.kv.set('env', data['env']) + self.kv.set('env', dict(data['env'])) self.kv.set('unit', data['unit']) self.kv.set('relid', data.get('relid')) return conf_delta, rels_delta diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index 65219d33..5088b1d1 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -118,6 +118,9 @@ def _validate_dict_data(self, expected, actual): longs, or can be a function that evaluate a variable and returns a bool. """ + self.log.debug('actual: {}'.format(repr(actual))) + self.log.debug('expected: {}'.format(repr(expected))) + for k, v in six.iteritems(expected): if k in actual: if (isinstance(v, six.string_types) or @@ -134,7 +137,6 @@ def _validate_dict_data(self, expected, actual): def validate_relation_data(self, sentry_unit, relation, expected): """Validate actual relation data based on expected relation data.""" actual = sentry_unit.relation(relation[0], relation[1]) - self.log.debug('actual: {}'.format(repr(actual))) return self._validate_dict_data(expected, actual) def _validate_list_data(self, expected, actual): diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 0cfeaa4c..11d49a7c 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -15,6 +15,7 @@ # along with charm-helpers. If not, see . import six +from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -43,7 +44,7 @@ def _determine_branch_locations(self, other_services): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] + base_charms = ['mysql', 'mongodb'] if self.stable: for svc in other_services: @@ -100,12 +101,37 @@ def _get_openstack_release(self): """ (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, - self.trusty_icehouse) = range(6) + self.trusty_icehouse, self.trusty_juno, self.trusty_kilo, + self.utopic_juno, self.vivid_kilo) = range(10) releases = { ('precise', None): self.precise_essex, ('precise', 'cloud:precise-folsom'): self.precise_folsom, ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, ('precise', 'cloud:precise-havana'): self.precise_havana, ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, - ('trusty', None): self.trusty_icehouse} + ('trusty', None): self.trusty_icehouse, + ('trusty', 'cloud:trusty-juno'): self.trusty_juno, + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('utopic', None): self.utopic_juno, + ('vivid', None): self.vivid_kilo} return releases[(self.series, self.openstack)] + + def _get_openstack_release_string(self): + """Get openstack release string. + + Return a string representing the openstack release. + """ + releases = OrderedDict([ + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ]) + if self.openstack: + os_origin = self.openstack.split(':')[1] + return os_origin.split('%s-' % self.series)[1].split('/')[0] + else: + return releases[self.series] From 3c4eeb3f75ecde5df79c728338bd24fa909e66bf Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:32:59 +0000 Subject: [PATCH 0675/2699] auto sync charmhelpers --- .../hooks/charmhelpers/contrib/network/ip.py | 85 ++++- .../contrib/openstack/amulet/deployment.py | 32 +- .../charmhelpers/contrib/openstack/context.py | 304 +++++++++++++++++- .../charmhelpers/contrib/openstack/neutron.py | 83 +++++ .../contrib/openstack/templates/git.upstart | 17 + .../templates/section-keystone-authtoken | 9 + .../openstack/templates/section-rabbitmq-oslo | 22 ++ .../openstack/templates/section-zeromq | 14 + .../charmhelpers/contrib/openstack/utils.py | 272 ++++++++-------- .../hooks/charmhelpers/core/hookenv.py | 41 ++- ceph-radosgw/hooks/charmhelpers/core/host.py | 6 +- .../charmhelpers/core/services/helpers.py | 16 +- .../hooks/charmhelpers/core/strutils.py | 4 +- .../hooks/charmhelpers/core/unitdata.py | 2 +- .../charmhelpers/contrib/amulet/utils.py | 4 +- .../contrib/openstack/amulet/deployment.py | 32 +- 16 files changed, 775 insertions(+), 168 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/git.upstart create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-zeromq diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index 98b17544..fff6d5ca 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -17,13 +17,16 @@ import glob import re import subprocess +import six +import socket from functools import partial from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - log + log, + WARNING, ) try: @@ -365,3 +368,83 @@ def is_bridge_member(nic): return True return False + + +def is_ip(address): + """ + Returns True if address is a valid IP address. + """ + try: + # Test to see if already an IPv4 address + socket.inet_aton(address) + return True + except socket.error: + return False + + +def ns_query(address): + try: + import dns.resolver + except ImportError: + apt_install('python-dnspython') + import dns.resolver + + if isinstance(address, dns.name.Name): + rtype = 'PTR' + elif isinstance(address, six.string_types): + rtype = 'A' + else: + return None + + answers = dns.resolver.query(address, rtype) + if answers: + return str(answers[0]) + return None + + +def get_host_ip(hostname, fallback=None): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ + if is_ip(hostname): + return hostname + + ip_addr = ns_query(hostname) + if not ip_addr: + try: + ip_addr = socket.gethostbyname(hostname) + except: + log("Failed to resolve hostname '%s'" % (hostname), + level=WARNING) + return fallback + return ip_addr + + +def get_hostname(address, fqdn=True): + """ + Resolves hostname for given IP, or returns the input + if it is already a hostname. + """ + if is_ip(address): + try: + import dns.reversename + except ImportError: + apt_install("python-dnspython") + import dns.reversename + + rev = dns.reversename.from_address(address) + result = ns_query(rev) + if not result: + return None + else: + result = address + + if fqdn: + # strip trailing . + if result.endswith('.'): + return result[:-1] + else: + return result + else: + return result.split('.')[0] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 0cfeaa4c..11d49a7c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -15,6 +15,7 @@ # along with charm-helpers. If not, see . import six +from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -43,7 +44,7 @@ def _determine_branch_locations(self, other_services): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] + base_charms = ['mysql', 'mongodb'] if self.stable: for svc in other_services: @@ -100,12 +101,37 @@ def _get_openstack_release(self): """ (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, - self.trusty_icehouse) = range(6) + self.trusty_icehouse, self.trusty_juno, self.trusty_kilo, + self.utopic_juno, self.vivid_kilo) = range(10) releases = { ('precise', None): self.precise_essex, ('precise', 'cloud:precise-folsom'): self.precise_folsom, ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, ('precise', 'cloud:precise-havana'): self.precise_havana, ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, - ('trusty', None): self.trusty_icehouse} + ('trusty', None): self.trusty_icehouse, + ('trusty', 'cloud:trusty-juno'): self.trusty_juno, + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('utopic', None): self.utopic_juno, + ('vivid', None): self.vivid_kilo} return releases[(self.series, self.openstack)] + + def _get_openstack_release_string(self): + """Get openstack release string. + + Return a string representing the openstack release. + """ + releases = OrderedDict([ + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ]) + if self.openstack: + os_origin = self.openstack.split(':')[1] + return os_origin.split('%s-' % self.series)[1].split('/')[0] + else: + return releases[self.series] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index c7c4cd4a..400eaf8e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -16,11 +16,13 @@ import json import os +import re import time from base64 import b64decode from subprocess import check_call import six +import yaml from charmhelpers.fetch import ( apt_install, @@ -45,8 +47,11 @@ ) from charmhelpers.core.sysctl import create as sysctl_create +from charmhelpers.core.strutils import bool_from_string from charmhelpers.core.host import ( + list_nics, + get_nic_hwaddr, mkdir, write_file, ) @@ -63,16 +68,22 @@ ) from charmhelpers.contrib.openstack.neutron import ( neutron_plugin_attribute, + parse_data_port_mappings, +) +from charmhelpers.contrib.openstack.ip import ( + resolve_address, + INTERNAL, ) from charmhelpers.contrib.network.ip import ( get_address_in_network, + get_ipv4_addr, get_ipv6_addr, get_netmask_for_address, format_ipv6_addr, is_address_in_network, + is_bridge_member, ) from charmhelpers.contrib.openstack.utils import get_host_ip - CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' ADDRESS_TYPES = ['admin', 'internal', 'public'] @@ -104,9 +115,41 @@ def context_complete(ctxt): def config_flags_parser(config_flags): """Parses config flags string into dict. + This parsing method supports a few different formats for the config + flag values to be parsed: + + 1. A string in the simple format of key=value pairs, with the possibility + of specifying multiple key value pairs within the same string. For + example, a string in the format of 'key1=value1, key2=value2' will + return a dict of: + {'key1': 'value1', + 'key2': 'value2'}. + + 2. A string in the above format, but supporting a comma-delimited list + of values for the same key. For example, a string in the format of + 'key1=value1, key2=value3,value4,value5' will return a dict of: + {'key1', 'value1', + 'key2', 'value2,value3,value4'} + + 3. A string containing a colon character (:) prior to an equal + character (=) will be treated as yaml and parsed as such. This can be + used to specify more complex key value pairs. For example, + a string in the format of 'key1: subkey1=value1, subkey2=value2' will + return a dict of: + {'key1', 'subkey1=value1, subkey2=value2'} + The provided config_flags string may be a list of comma-separated values which themselves may be comma-separated list of values. """ + # If we find a colon before an equals sign then treat it as yaml. + # Note: limit it to finding the colon first since this indicates assignment + # for inline yaml. + colon = config_flags.find(':') + equals = config_flags.find('=') + if colon > 0: + if colon < equals or equals < 0: + return yaml.safe_load(config_flags) + if config_flags.find('==') >= 0: log("config_flags is not in expected format (key=value)", level=ERROR) raise OSContextError @@ -191,7 +234,7 @@ def __call__(self): unit=local_unit()) if set_hostname != access_hostname: relation_set(relation_settings={hostname_key: access_hostname}) - return ctxt # Defer any further hook execution for now.... + return None # Defer any further hook execution for now.... password_setting = 'password' if self.relation_prefix: @@ -277,12 +320,29 @@ def db_ssl(rdata, ctxt, ssl_dir): class IdentityServiceContext(OSContextGenerator): - interfaces = ['identity-service'] + + def __init__(self, service=None, service_user=None, rel_name='identity-service'): + self.service = service + self.service_user = service_user + self.rel_name = rel_name + self.interfaces = [self.rel_name] def __call__(self): - log('Generating template context for identity-service', level=DEBUG) + log('Generating template context for ' + self.rel_name, level=DEBUG) ctxt = {} - for rid in relation_ids('identity-service'): + + if self.service and self.service_user: + # This is required for pki token signing if we don't want /tmp to + # be used. + cachedir = '/var/cache/%s' % (self.service) + if not os.path.isdir(cachedir): + log("Creating service cache dir %s" % (cachedir), level=DEBUG) + mkdir(path=cachedir, owner=self.service_user, + group=self.service_user, perms=0o700) + + ctxt['signing_dir'] = cachedir + + for rid in relation_ids(self.rel_name): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) serv_host = rdata.get('service_host') @@ -291,15 +351,16 @@ def __call__(self): auth_host = format_ipv6_addr(auth_host) or auth_host svc_protocol = rdata.get('service_protocol') or 'http' auth_protocol = rdata.get('auth_protocol') or 'http' - ctxt = {'service_port': rdata.get('service_port'), - 'service_host': serv_host, - 'auth_host': auth_host, - 'auth_port': rdata.get('auth_port'), - 'admin_tenant_name': rdata.get('service_tenant'), - 'admin_user': rdata.get('service_username'), - 'admin_password': rdata.get('service_password'), - 'service_protocol': svc_protocol, - 'auth_protocol': auth_protocol} + ctxt.update({'service_port': rdata.get('service_port'), + 'service_host': serv_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('service_tenant'), + 'admin_user': rdata.get('service_username'), + 'admin_password': rdata.get('service_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol}) + if context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs @@ -398,6 +459,11 @@ def __call__(self): ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + oslo_messaging_flags = conf.get('oslo-messaging-flags', None) + if oslo_messaging_flags: + ctxt['oslo_messaging_flags'] = config_flags_parser( + oslo_messaging_flags) + if not context_complete(ctxt): return {} @@ -677,7 +743,14 @@ def __call__(self): 'endpoints': [], 'ext_ports': []} - for cn in self.canonical_names(): + cns = self.canonical_names() + if cns: + for cn in cns: + self.configure_cert(cn) + else: + # Expect cert/key provided in config (currently assumed that ca + # uses ip for cn) + cn = resolve_address(endpoint_type=INTERNAL) self.configure_cert(cn) addresses = self.get_network_addresses() @@ -740,6 +813,19 @@ def ovs_ctxt(self): return ovs_ctxt + def nuage_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + nuage_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'vsp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return nuage_ctxt + def nvp_ctxt(self): driver = neutron_plugin_attribute(self.plugin, 'driver', self.network_manager) @@ -823,6 +909,8 @@ def __call__(self): ctxt.update(self.n1kv_ctxt()) elif self.plugin == 'Calico': ctxt.update(self.calico_ctxt()) + elif self.plugin == 'vsp': + ctxt.update(self.nuage_ctxt()) alchemy_flags = config('neutron-alchemy-flags') if alchemy_flags: @@ -833,6 +921,48 @@ def __call__(self): return ctxt +class NeutronPortContext(OSContextGenerator): + NIC_PREFIXES = ['eth', 'bond'] + + def resolve_ports(self, ports): + """Resolve NICs not yet bound to bridge(s) + + If hwaddress provided then returns resolved hwaddress otherwise NIC. + """ + if not ports: + return None + + hwaddr_to_nic = {} + hwaddr_to_ip = {} + for nic in list_nics(self.NIC_PREFIXES): + hwaddr = get_nic_hwaddr(nic) + hwaddr_to_nic[hwaddr] = nic + addresses = get_ipv4_addr(nic, fatal=False) + addresses += get_ipv6_addr(iface=nic, fatal=False) + hwaddr_to_ip[hwaddr] = addresses + + resolved = [] + mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) + for entry in ports: + if re.match(mac_regex, entry): + # NIC is in known NICs and does NOT hace an IP address + if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: + # If the nic is part of a bridge then don't use it + if is_bridge_member(hwaddr_to_nic[entry]): + continue + + # Entry is a MAC address for a valid interface that doesn't + # have an IP address assigned yet. + resolved.append(hwaddr_to_nic[entry]) + else: + # If the passed entry is not a MAC address, assume it's a valid + # interface, and that the user put it there on purpose (we can + # trust it to be the real external network). + resolved.append(entry) + + return resolved + + class OSConfigFlagContext(OSContextGenerator): """Provides support for user-defined config flags. @@ -1021,6 +1151,8 @@ def __call__(self): for unit in related_units(rid): ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) ctxt['zmq_host'] = relation_get('host', unit, rid) + ctxt['zmq_redis_address'] = relation_get( + 'zmq_redis_address', unit, rid) return ctxt @@ -1052,3 +1184,145 @@ def __call__(self): sysctl_create(sysctl_dict, '/etc/sysctl.d/50-{0}.conf'.format(charm_name())) return {'sysctl': sysctl_dict} + + +class NeutronAPIContext(OSContextGenerator): + ''' + Inspects current neutron-plugin-api relation for neutron settings. Return + defaults if it is not present. + ''' + interfaces = ['neutron-plugin-api'] + + def __call__(self): + self.neutron_defaults = { + 'l2_population': { + 'rel_key': 'l2-population', + 'default': False, + }, + 'overlay_network_type': { + 'rel_key': 'overlay-network-type', + 'default': 'gre', + }, + 'neutron_security_groups': { + 'rel_key': 'neutron-security-groups', + 'default': False, + }, + 'network_device_mtu': { + 'rel_key': 'network-device-mtu', + 'default': None, + }, + 'enable_dvr': { + 'rel_key': 'enable-dvr', + 'default': False, + }, + 'enable_l3ha': { + 'rel_key': 'enable-l3ha', + 'default': False, + }, + } + ctxt = self.get_neutron_options({}) + for rid in relation_ids('neutron-plugin-api'): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if 'l2-population' in rdata: + ctxt.update(self.get_neutron_options(rdata)) + + return ctxt + + def get_neutron_options(self, rdata): + settings = {} + for nkey in self.neutron_defaults.keys(): + defv = self.neutron_defaults[nkey]['default'] + rkey = self.neutron_defaults[nkey]['rel_key'] + if rkey in rdata.keys(): + if type(defv) is bool: + settings[nkey] = bool_from_string(rdata[rkey]) + else: + settings[nkey] = rdata[rkey] + else: + settings[nkey] = defv + return settings + + +class ExternalPortContext(NeutronPortContext): + + def __call__(self): + ctxt = {} + ports = config('ext-port') + if ports: + ports = [p.strip() for p in ports.split()] + ports = self.resolve_ports(ports) + if ports: + ctxt = {"ext_port": ports[0]} + napi_settings = NeutronAPIContext()() + mtu = napi_settings.get('network_device_mtu') + if mtu: + ctxt['ext_port_mtu'] = mtu + + return ctxt + + +class DataPortContext(NeutronPortContext): + + def __call__(self): + ports = config('data-port') + if ports: + portmap = parse_data_port_mappings(ports) + ports = portmap.values() + resolved = self.resolve_ports(ports) + normalized = {get_nic_hwaddr(port): port for port in resolved + if port not in ports} + normalized.update({port: port for port in resolved + if port in ports}) + if resolved: + return {bridge: normalized[port] for bridge, port in + six.iteritems(portmap) if port in normalized.keys()} + + return None + + +class PhyNICMTUContext(DataPortContext): + + def __call__(self): + ctxt = {} + mappings = super(PhyNICMTUContext, self).__call__() + if mappings and mappings.values(): + ports = mappings.values() + napi_settings = NeutronAPIContext()() + mtu = napi_settings.get('network_device_mtu') + if mtu: + ctxt["devs"] = '\\n'.join(ports) + ctxt['mtu'] = mtu + + return ctxt + + +class NetworkServiceContext(OSContextGenerator): + + def __init__(self, rel_name='quantum-network-service'): + self.rel_name = rel_name + self.interfaces = [rel_name] + + def __call__(self): + for rid in relation_ids(self.rel_name): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + ctxt = { + 'keystone_host': rdata.get('keystone_host'), + 'service_port': rdata.get('service_port'), + 'auth_port': rdata.get('auth_port'), + 'service_tenant': rdata.get('service_tenant'), + 'service_username': rdata.get('service_username'), + 'service_password': rdata.get('service_password'), + 'quantum_host': rdata.get('quantum_host'), + 'quantum_port': rdata.get('quantum_port'), + 'quantum_url': rdata.get('quantum_url'), + 'region': rdata.get('region'), + 'service_protocol': + rdata.get('service_protocol') or 'http', + 'auth_protocol': + rdata.get('auth_protocol') or 'http', + } + if context_complete(ctxt): + return ctxt + return {} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index 902757fe..02c92e9c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -16,6 +16,7 @@ # Various utilies for dealing with Neutron and the renaming from Quantum. +import six from subprocess import check_output from charmhelpers.core.hookenv import ( @@ -179,6 +180,19 @@ def neutron_plugins(): 'nova-api-metadata']], 'server_packages': ['neutron-server', 'calico-control'], 'server_services': ['neutron-server'] + }, + 'vsp': { + 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', + 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], + 'server_services': ['neutron-server'] } } if release >= 'icehouse': @@ -237,3 +251,72 @@ def network_manager(): else: # ensure accurate naming for all releases post-H return 'neutron' + + +def parse_mappings(mappings): + parsed = {} + if mappings: + mappings = mappings.split(' ') + for m in mappings: + p = m.partition(':') + if p[1] == ':': + parsed[p[0].strip()] = p[2].strip() + + return parsed + + +def parse_bridge_mappings(mappings): + """Parse bridge mappings. + + Mappings must be a space-delimited list of provider:bridge mappings. + + Returns dict of the form {provider:bridge}. + """ + return parse_mappings(mappings) + + +def parse_data_port_mappings(mappings, default_bridge='br-data'): + """Parse data port mappings. + + Mappings must be a space-delimited list of bridge:port mappings. + + Returns dict of the form {bridge:port}. + """ + _mappings = parse_mappings(mappings) + if not _mappings: + if not mappings: + return {} + + # For backwards-compatibility we need to support port-only provided in + # config. + _mappings = {default_bridge: mappings.split(' ')[0]} + + bridges = _mappings.keys() + ports = _mappings.values() + if len(set(bridges)) != len(bridges): + raise Exception("It is not allowed to have more than one port " + "configured on the same bridge") + + if len(set(ports)) != len(ports): + raise Exception("It is not allowed to have the same port configured " + "on more than one bridge") + + return _mappings + + +def parse_vlan_range_mappings(mappings): + """Parse vlan range mappings. + + Mappings must be a space-delimited list of provider:start:end mappings. + + Returns dict of the form {provider: (start, end)}. + """ + _mappings = parse_mappings(mappings) + if not _mappings: + return {} + + mappings = {} + for p, r in six.iteritems(_mappings): + mappings[p] = tuple(r.split(':')) + + return mappings diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/git.upstart b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/git.upstart new file mode 100644 index 00000000..4bed404b --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/git.upstart @@ -0,0 +1,17 @@ +description "{{ service_description }}" +author "Juju {{ service_name }} Charm " + +start on runlevel [2345] +stop on runlevel [!2345] + +respawn + +exec start-stop-daemon --start --chuid {{ user_name }} \ + --chdir {{ start_dir }} --name {{ process_name }} \ + --exec {{ executable_name }} -- \ + {% for config_file in config_files -%} + --config-file={{ config_file }} \ + {% endfor -%} + {% if log_file -%} + --log-file={{ log_file }} + {% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken new file mode 100644 index 00000000..2a37edd5 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken @@ -0,0 +1,9 @@ +{% if auth_host -%} +[keystone_authtoken] +identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }} +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} +admin_tenant_name = {{ admin_tenant_name }} +admin_user = {{ admin_user }} +admin_password = {{ admin_password }} +signing_dir = {{ signing_dir }} +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo new file mode 100644 index 00000000..b444c9c9 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo @@ -0,0 +1,22 @@ +{% if rabbitmq_host or rabbitmq_hosts -%} +[oslo_messaging_rabbit] +rabbit_userid = {{ rabbitmq_user }} +rabbit_virtual_host = {{ rabbitmq_virtual_host }} +rabbit_password = {{ rabbitmq_password }} +{% if rabbitmq_hosts -%} +rabbit_hosts = {{ rabbitmq_hosts }} +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +rabbit_durable_queues = False +{% endif -%} +{% else -%} +rabbit_host = {{ rabbitmq_host }} +{% endif -%} +{% if rabbit_ssl_port -%} +rabbit_use_ssl = True +rabbit_port = {{ rabbit_ssl_port }} +{% if rabbit_ssl_ca -%} +kombu_ssl_ca_certs = {{ rabbit_ssl_ca }} +{% endif -%} +{% endif -%} +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-zeromq b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-zeromq new file mode 100644 index 00000000..95f1a76c --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-zeromq @@ -0,0 +1,14 @@ +{% if zmq_host -%} +# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }}) +rpc_backend = zmq +rpc_zmq_host = {{ zmq_host }} +{% if zmq_redis_address -%} +rpc_zmq_matchmaker = redis +matchmaker_heartbeat_freq = 15 +matchmaker_heartbeat_ttl = 30 +[matchmaker_redis] +host = {{ zmq_redis_address }} +{% else -%} +rpc_zmq_matchmaker = ring +{% endif -%} +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index af2b3596..f90a0289 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -23,12 +23,17 @@ import subprocess import json import os -import socket import sys import six import yaml +from charmhelpers.contrib.network import ip + +from charmhelpers.core import ( + unitdata, +) + from charmhelpers.core.hookenv import ( config, log as juju_log, @@ -329,6 +334,21 @@ def configure_installation_source(rel): error_out("Invalid openstack-release specified: %s" % rel) +def config_value_changed(option): + """ + Determine if config value changed since last call to this function. + """ + hook_data = unitdata.HookData() + with hook_data(): + db = unitdata.kv() + current = config(option) + saved = db.get(option) + db.set(option, current) + if saved is None: + return False + return current != saved + + def save_script_rc(script_path="scripts/scriptrc", **env_vars): """ Write an rc file in the charm-delivered directory containing @@ -421,77 +441,10 @@ def clean_storage(block_device): else: zap_disk(block_device) - -def is_ip(address): - """ - Returns True if address is a valid IP address. - """ - try: - # Test to see if already an IPv4 address - socket.inet_aton(address) - return True - except socket.error: - return False - - -def ns_query(address): - try: - import dns.resolver - except ImportError: - apt_install('python-dnspython') - import dns.resolver - - if isinstance(address, dns.name.Name): - rtype = 'PTR' - elif isinstance(address, six.string_types): - rtype = 'A' - else: - return None - - answers = dns.resolver.query(address, rtype) - if answers: - return str(answers[0]) - return None - - -def get_host_ip(hostname): - """ - Resolves the IP for a given hostname, or returns - the input if it is already an IP. - """ - if is_ip(hostname): - return hostname - - return ns_query(hostname) - - -def get_hostname(address, fqdn=True): - """ - Resolves hostname for given IP, or returns the input - if it is already a hostname. - """ - if is_ip(address): - try: - import dns.reversename - except ImportError: - apt_install('python-dnspython') - import dns.reversename - - rev = dns.reversename.from_address(address) - result = ns_query(rev) - if not result: - return None - else: - result = address - - if fqdn: - # strip trailing . - if result.endswith('.'): - return result[:-1] - else: - return result - else: - return result.split('.')[0] +is_ip = ip.is_ip +ns_query = ip.ns_query +get_host_ip = ip.get_host_ip +get_hostname = ip.get_hostname def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): @@ -535,82 +488,106 @@ def wrapped_f(*args): def git_install_requested(): - """Returns true if openstack-origin-git is specified.""" - return config('openstack-origin-git') != "None" + """ + Returns true if openstack-origin-git is specified. + """ + return config('openstack-origin-git') is not None requirements_dir = None -def git_clone_and_install(file_name, core_project): - """Clone/install all OpenStack repos specified in yaml config file.""" +def git_clone_and_install(projects_yaml, core_project): + """ + Clone/install all specified OpenStack repositories. + + The expected format of projects_yaml is: + repositories: + - {name: keystone, + repository: 'git://git.openstack.org/openstack/keystone.git', + branch: 'stable/icehouse'} + - {name: requirements, + repository: 'git://git.openstack.org/openstack/requirements.git', + branch: 'stable/icehouse'} + directory: /mnt/openstack-git + http_proxy: http://squid.internal:3128 + https_proxy: https://squid.internal:3128 + + The directory, http_proxy, and https_proxy keys are optional. + """ global requirements_dir + parent_dir = '/mnt/openstack-git' - if file_name == "None": + if not projects_yaml: return - yaml_file = os.path.join(charm_dir(), file_name) + projects = yaml.load(projects_yaml) + _git_validate_projects_yaml(projects, core_project) - # clone/install the requirements project first - installed = _git_clone_and_install_subset(yaml_file, - whitelist=['requirements']) - if 'requirements' not in installed: - error_out('requirements git repository must be specified') + old_environ = dict(os.environ) - # clone/install all other projects except requirements and the core project - blacklist = ['requirements', core_project] - _git_clone_and_install_subset(yaml_file, blacklist=blacklist, - update_requirements=True) + if 'http_proxy' in projects.keys(): + os.environ['http_proxy'] = projects['http_proxy'] + if 'https_proxy' in projects.keys(): + os.environ['https_proxy'] = projects['https_proxy'] - # clone/install the core project - whitelist = [core_project] - installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist, - update_requirements=True) - if core_project not in installed: - error_out('{} git repository must be specified'.format(core_project)) + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + for p in projects['repositories']: + repo = p['repository'] + branch = p['branch'] + if p['name'] == 'requirements': + repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, + update_requirements=False) + requirements_dir = repo_dir + else: + repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, + update_requirements=True) -def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[], - update_requirements=False): - """Clone/install subset of OpenStack repos specified in yaml config file.""" - global requirements_dir - installed = [] - - with open(yaml_file, 'r') as fd: - projects = yaml.load(fd) - for proj, val in projects.items(): - # The project subset is chosen based on the following 3 rules: - # 1) If project is in blacklist, we don't clone/install it, period. - # 2) If whitelist is empty, we clone/install everything else. - # 3) If whitelist is not empty, we clone/install everything in the - # whitelist. - if proj in blacklist: - continue - if whitelist and proj not in whitelist: - continue - repo = val['repository'] - branch = val['branch'] - repo_dir = _git_clone_and_install_single(repo, branch, - update_requirements) - if proj == 'requirements': - requirements_dir = repo_dir - installed.append(proj) - return installed - - -def _git_clone_and_install_single(repo, branch, update_requirements=False): - """Clone and install a single git repository.""" - dest_parent_dir = "/mnt/openstack-git/" - dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo)) - - if not os.path.exists(dest_parent_dir): - juju_log('Host dir not mounted at {}. ' - 'Creating directory there instead.'.format(dest_parent_dir)) - os.mkdir(dest_parent_dir) + os.environ = old_environ + + +def _git_validate_projects_yaml(projects, core_project): + """ + Validate the projects yaml. + """ + _git_ensure_key_exists('repositories', projects) + + for project in projects['repositories']: + _git_ensure_key_exists('name', project.keys()) + _git_ensure_key_exists('repository', project.keys()) + _git_ensure_key_exists('branch', project.keys()) + + if projects['repositories'][0]['name'] != 'requirements': + error_out('{} git repo must be specified first'.format('requirements')) + + if projects['repositories'][-1]['name'] != core_project: + error_out('{} git repo must be specified last'.format(core_project)) + + +def _git_ensure_key_exists(key, keys): + """ + Ensure that key exists in keys. + """ + if key not in keys: + error_out('openstack-origin-git key \'{}\' is missing'.format(key)) + + +def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements): + """ + Clone and install a single git repository. + """ + dest_dir = os.path.join(parent_dir, os.path.basename(repo)) + + if not os.path.exists(parent_dir): + juju_log('Directory already exists at {}. ' + 'No need to create directory.'.format(parent_dir)) + os.mkdir(parent_dir) if not os.path.exists(dest_dir): juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) - repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch) + repo_dir = install_remote(repo, dest=parent_dir, branch=branch) else: repo_dir = dest_dir @@ -627,16 +604,39 @@ def _git_clone_and_install_single(repo, branch, update_requirements=False): def _git_update_requirements(package_dir, reqs_dir): - """Update from global requirements. + """ + Update from global requirements. - Update an OpenStack git directory's requirements.txt and - test-requirements.txt from global-requirements.txt.""" + Update an OpenStack git directory's requirements.txt and + test-requirements.txt from global-requirements.txt. + """ orig_dir = os.getcwd() os.chdir(reqs_dir) - cmd = "python update.py {}".format(package_dir) + cmd = ['python', 'update.py', package_dir] try: - subprocess.check_call(cmd.split(' ')) + subprocess.check_call(cmd) except subprocess.CalledProcessError: package = os.path.basename(package_dir) error_out("Error updating {} from global-requirements.txt".format(package)) os.chdir(orig_dir) + + +def git_src_dir(projects_yaml, project): + """ + Return the directory where the specified project's source is located. + """ + parent_dir = '/mnt/openstack-git' + + if not projects_yaml: + return + + projects = yaml.load(projects_yaml) + + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + + for p in projects['repositories']: + if p['name'] == project: + return os.path.join(parent_dir, os.path.basename(p['repository'])) + + return None diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index cf552b39..86f805f1 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -20,11 +20,13 @@ # Authors: # Charm Helpers Developers +from __future__ import print_function import os import json import yaml import subprocess import sys +import errno from subprocess import CalledProcessError import six @@ -87,7 +89,18 @@ def log(message, level=None): if not isinstance(message, six.string_types): message = repr(message) command += [message] - subprocess.call(command) + # Missing juju-log should not cause failures in unit tests + # Send log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + if level: + message = "{}: {}".format(level, message) + message = "juju-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise class Serializable(UserDict): @@ -566,3 +579,29 @@ def wrapper(decorated): def charm_dir(): """Return the root directory of the current charm""" return os.environ.get('CHARM_DIR') + + +@cached +def action_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['action-get'] + if key is not None: + cmd.append(key) + cmd.append('--format=json') + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return action_data + + +def action_set(values): + """Sets the values to be returned after the action finishes""" + cmd = ['action-set'] + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def action_fail(message): + """Sets the action status to failed and sets the error message. + + The results set by action_set are preserved.""" + subprocess.check_call(['action-fail', message]) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index b771c611..830822af 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -339,12 +339,16 @@ def lsb_release(): def pwgen(length=None): """Generate a random pasword.""" if length is None: + # A random length is ok to use a weak PRNG length = random.choice(range(35, 45)) alphanumeric_chars = [ l for l in (string.ascii_letters + string.digits) if l not in 'l0QD1vAEIOUaeiou'] + # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the + # actual password + random_generator = random.SystemRandom() random_chars = [ - random.choice(alphanumeric_chars) for _ in range(length)] + random_generator.choice(alphanumeric_chars) for _ in range(length)] return(''.join(random_chars)) diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py index 5e3af9da..3eb5fb44 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py @@ -45,12 +45,14 @@ class RelationContext(dict): """ name = None interface = None - required_keys = [] def __init__(self, name=None, additional_required_keys=None): + if not hasattr(self, 'required_keys'): + self.required_keys = [] + if name is not None: self.name = name - if additional_required_keys is not None: + if additional_required_keys: self.required_keys.extend(additional_required_keys) self.get_data() @@ -134,7 +136,10 @@ class MysqlRelation(RelationContext): """ name = 'db' interface = 'mysql' - required_keys = ['host', 'user', 'password', 'database'] + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'user', 'password', 'database'] + RelationContext.__init__(self, *args, **kwargs) class HttpRelation(RelationContext): @@ -146,7 +151,10 @@ class HttpRelation(RelationContext): """ name = 'website' interface = 'http' - required_keys = ['host', 'port'] + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'port'] + RelationContext.__init__(self, *args, **kwargs) def provide_data(self): return { diff --git a/ceph-radosgw/hooks/charmhelpers/core/strutils.py b/ceph-radosgw/hooks/charmhelpers/core/strutils.py index efc4402e..a2a784aa 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/strutils.py +++ b/ceph-radosgw/hooks/charmhelpers/core/strutils.py @@ -33,9 +33,9 @@ def bool_from_string(value): value = value.strip().lower() - if value in ['y', 'yes', 'true', 't']: + if value in ['y', 'yes', 'true', 't', 'on']: return True - elif value in ['n', 'no', 'false', 'f']: + elif value in ['n', 'no', 'false', 'f', 'off']: return False msg = "Unable to interpret string value '%s' as boolean" % (value) diff --git a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py index 3000134a..406a35c5 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py +++ b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py @@ -443,7 +443,7 @@ def _record_hook(self, hookenv): data = hookenv.execution_environment() self.conf = conf_delta = self.kv.delta(data['conf'], 'config') self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') - self.kv.set('env', data['env']) + self.kv.set('env', dict(data['env'])) self.kv.set('unit', data['unit']) self.kv.set('relid', data.get('relid')) return conf_delta, rels_delta diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index 65219d33..5088b1d1 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -118,6 +118,9 @@ def _validate_dict_data(self, expected, actual): longs, or can be a function that evaluate a variable and returns a bool. """ + self.log.debug('actual: {}'.format(repr(actual))) + self.log.debug('expected: {}'.format(repr(expected))) + for k, v in six.iteritems(expected): if k in actual: if (isinstance(v, six.string_types) or @@ -134,7 +137,6 @@ def _validate_dict_data(self, expected, actual): def validate_relation_data(self, sentry_unit, relation, expected): """Validate actual relation data based on expected relation data.""" actual = sentry_unit.relation(relation[0], relation[1]) - self.log.debug('actual: {}'.format(repr(actual))) return self._validate_dict_data(expected, actual) def _validate_list_data(self, expected, actual): diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 0cfeaa4c..11d49a7c 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -15,6 +15,7 @@ # along with charm-helpers. If not, see . import six +from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -43,7 +44,7 @@ def _determine_branch_locations(self, other_services): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] + base_charms = ['mysql', 'mongodb'] if self.stable: for svc in other_services: @@ -100,12 +101,37 @@ def _get_openstack_release(self): """ (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, - self.trusty_icehouse) = range(6) + self.trusty_icehouse, self.trusty_juno, self.trusty_kilo, + self.utopic_juno, self.vivid_kilo) = range(10) releases = { ('precise', None): self.precise_essex, ('precise', 'cloud:precise-folsom'): self.precise_folsom, ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, ('precise', 'cloud:precise-havana'): self.precise_havana, ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, - ('trusty', None): self.trusty_icehouse} + ('trusty', None): self.trusty_icehouse, + ('trusty', 'cloud:trusty-juno'): self.trusty_juno, + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('utopic', None): self.utopic_juno, + ('vivid', None): self.vivid_kilo} return releases[(self.series, self.openstack)] + + def _get_openstack_release_string(self): + """Get openstack release string. + + Return a string representing the openstack release. + """ + releases = OrderedDict([ + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ]) + if self.openstack: + os_origin = self.openstack.split(':')[1] + return os_origin.split('%s-' % self.series)[1].split('/')[0] + else: + return releases[self.series] From d31e07ad05f82b00bb1a381f80e193bb0851c51b Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:35:49 +0000 Subject: [PATCH 0676/2699] auto rename amulet tests --- .../{14-basic-precise-icehouse => 014-basic-precise-icehouse} | 0 .../tests/{15-basic-trusty-icehouse => 015-basic-trusty-icehouse} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename ceph-proxy/tests/{14-basic-precise-icehouse => 014-basic-precise-icehouse} (100%) rename ceph-proxy/tests/{15-basic-trusty-icehouse => 015-basic-trusty-icehouse} (100%) diff --git a/ceph-proxy/tests/14-basic-precise-icehouse b/ceph-proxy/tests/014-basic-precise-icehouse similarity index 100% rename from ceph-proxy/tests/14-basic-precise-icehouse rename to ceph-proxy/tests/014-basic-precise-icehouse diff --git a/ceph-proxy/tests/15-basic-trusty-icehouse b/ceph-proxy/tests/015-basic-trusty-icehouse similarity index 100% rename from ceph-proxy/tests/15-basic-trusty-icehouse rename to ceph-proxy/tests/015-basic-trusty-icehouse From 238122e0a8a5d8d41477d6514f1cb474636d0de6 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:35:49 +0000 Subject: [PATCH 0677/2699] auto rename amulet tests --- .../{14-basic-precise-icehouse => 014-basic-precise-icehouse} | 0 .../tests/{15-basic-trusty-icehouse => 015-basic-trusty-icehouse} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename ceph-mon/tests/{14-basic-precise-icehouse => 014-basic-precise-icehouse} (100%) rename ceph-mon/tests/{15-basic-trusty-icehouse => 015-basic-trusty-icehouse} (100%) diff --git a/ceph-mon/tests/14-basic-precise-icehouse b/ceph-mon/tests/014-basic-precise-icehouse similarity index 100% rename from ceph-mon/tests/14-basic-precise-icehouse rename to ceph-mon/tests/014-basic-precise-icehouse diff --git a/ceph-mon/tests/15-basic-trusty-icehouse b/ceph-mon/tests/015-basic-trusty-icehouse similarity index 100% rename from ceph-mon/tests/15-basic-trusty-icehouse rename to ceph-mon/tests/015-basic-trusty-icehouse From 747ae201c64cdaa0e31765372558c1353f157b9d Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:35:50 +0000 Subject: [PATCH 0678/2699] auto rename amulet tests --- .../{14-basic-precise-icehouse => 014-basic-precise-icehouse} | 0 .../tests/{15-basic-trusty-icehouse => 015-basic-trusty-icehouse} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename ceph-osd/tests/{14-basic-precise-icehouse => 014-basic-precise-icehouse} (100%) rename ceph-osd/tests/{15-basic-trusty-icehouse => 015-basic-trusty-icehouse} (100%) diff --git a/ceph-osd/tests/14-basic-precise-icehouse b/ceph-osd/tests/014-basic-precise-icehouse similarity index 100% rename from ceph-osd/tests/14-basic-precise-icehouse rename to ceph-osd/tests/014-basic-precise-icehouse diff --git a/ceph-osd/tests/15-basic-trusty-icehouse b/ceph-osd/tests/015-basic-trusty-icehouse similarity index 100% rename from ceph-osd/tests/15-basic-trusty-icehouse rename to ceph-osd/tests/015-basic-trusty-icehouse From 24906f989857c23298dfe1451191e1fdb3e6fd0a Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:35:51 +0000 Subject: [PATCH 0679/2699] auto rename amulet tests --- .../{14-basic-precise-icehouse => 014-basic-precise-icehouse} | 0 .../tests/{15-basic-trusty-icehouse => 015-basic-trusty-icehouse} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename ceph-radosgw/tests/{14-basic-precise-icehouse => 014-basic-precise-icehouse} (100%) rename ceph-radosgw/tests/{15-basic-trusty-icehouse => 015-basic-trusty-icehouse} (100%) diff --git a/ceph-radosgw/tests/14-basic-precise-icehouse b/ceph-radosgw/tests/014-basic-precise-icehouse similarity index 100% rename from ceph-radosgw/tests/14-basic-precise-icehouse rename to ceph-radosgw/tests/014-basic-precise-icehouse diff --git a/ceph-radosgw/tests/15-basic-trusty-icehouse b/ceph-radosgw/tests/015-basic-trusty-icehouse similarity index 100% rename from ceph-radosgw/tests/15-basic-trusty-icehouse rename to ceph-radosgw/tests/015-basic-trusty-icehouse From dcb17ff3d8b274722063d7b1a8e4b5731be5284e Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:36:09 +0000 Subject: [PATCH 0680/2699] auto add amulet tests for supported releases --- ceph-osd/tests/016-basic-trusty-juno | 11 +++++++++++ ceph-osd/tests/017-basic-trusty-kilo | 11 +++++++++++ ceph-osd/tests/018-basic-utopic-juno | 9 +++++++++ ceph-osd/tests/019-basic-vivid-kilo | 9 +++++++++ 4 files changed, 40 insertions(+) create mode 100755 ceph-osd/tests/016-basic-trusty-juno create mode 100755 ceph-osd/tests/017-basic-trusty-kilo create mode 100755 ceph-osd/tests/018-basic-utopic-juno create mode 100755 ceph-osd/tests/019-basic-vivid-kilo diff --git a/ceph-osd/tests/016-basic-trusty-juno b/ceph-osd/tests/016-basic-trusty-juno new file mode 100755 index 00000000..5606a174 --- /dev/null +++ b/ceph-osd/tests/016-basic-trusty-juno @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-osd deployment on trusty-juno.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='trusty', + openstack='cloud:trusty-juno', + source='cloud:trusty-updates/juno') + deployment.run_tests() diff --git a/ceph-osd/tests/017-basic-trusty-kilo b/ceph-osd/tests/017-basic-trusty-kilo new file mode 100755 index 00000000..5bb258e8 --- /dev/null +++ b/ceph-osd/tests/017-basic-trusty-kilo @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-osd deployment on trusty-kilo.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='trusty', + openstack='cloud:trusty-kilo', + source='cloud:trusty-updates/kilo') + deployment.run_tests() diff --git a/ceph-osd/tests/018-basic-utopic-juno b/ceph-osd/tests/018-basic-utopic-juno new file mode 100755 index 00000000..6241fb10 --- /dev/null +++ b/ceph-osd/tests/018-basic-utopic-juno @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-osd deployment on utopic-juno.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='utopic') + deployment.run_tests() diff --git a/ceph-osd/tests/019-basic-vivid-kilo b/ceph-osd/tests/019-basic-vivid-kilo new file mode 100755 index 00000000..6fd3f288 --- /dev/null +++ b/ceph-osd/tests/019-basic-vivid-kilo @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-osd deployment on vivid-kilo.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='vivid') + deployment.run_tests() From c209b7eb4984a0c62745aa4d821e90513c86fe70 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:36:09 +0000 Subject: [PATCH 0681/2699] auto add amulet tests for supported releases --- ceph-proxy/tests/016-basic-trusty-juno | 11 +++++++++++ ceph-proxy/tests/017-basic-trusty-kilo | 11 +++++++++++ ceph-proxy/tests/018-basic-utopic-juno | 9 +++++++++ ceph-proxy/tests/019-basic-vivid-kilo | 9 +++++++++ 4 files changed, 40 insertions(+) create mode 100755 ceph-proxy/tests/016-basic-trusty-juno create mode 100755 ceph-proxy/tests/017-basic-trusty-kilo create mode 100755 ceph-proxy/tests/018-basic-utopic-juno create mode 100755 ceph-proxy/tests/019-basic-vivid-kilo diff --git a/ceph-proxy/tests/016-basic-trusty-juno b/ceph-proxy/tests/016-basic-trusty-juno new file mode 100755 index 00000000..28c7684e --- /dev/null +++ b/ceph-proxy/tests/016-basic-trusty-juno @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on trusty-juno.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='trusty', + openstack='cloud:trusty-juno', + source='cloud:trusty-updates/juno') + deployment.run_tests() diff --git a/ceph-proxy/tests/017-basic-trusty-kilo b/ceph-proxy/tests/017-basic-trusty-kilo new file mode 100755 index 00000000..0a787b22 --- /dev/null +++ b/ceph-proxy/tests/017-basic-trusty-kilo @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on trusty-kilo.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='trusty', + openstack='cloud:trusty-kilo', + source='cloud:trusty-updates/kilo') + deployment.run_tests() diff --git a/ceph-proxy/tests/018-basic-utopic-juno b/ceph-proxy/tests/018-basic-utopic-juno new file mode 100755 index 00000000..9b9b760f --- /dev/null +++ b/ceph-proxy/tests/018-basic-utopic-juno @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on utopic-juno.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='utopic') + deployment.run_tests() diff --git a/ceph-proxy/tests/019-basic-vivid-kilo b/ceph-proxy/tests/019-basic-vivid-kilo new file mode 100755 index 00000000..934261b5 --- /dev/null +++ b/ceph-proxy/tests/019-basic-vivid-kilo @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on vivid-kilo.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='vivid') + deployment.run_tests() From 12aa0c4120300bfada0c013324bc3f8c43cc71de Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:36:09 +0000 Subject: [PATCH 0682/2699] auto add amulet tests for supported releases --- ceph-mon/tests/016-basic-trusty-juno | 11 +++++++++++ ceph-mon/tests/017-basic-trusty-kilo | 11 +++++++++++ ceph-mon/tests/018-basic-utopic-juno | 9 +++++++++ ceph-mon/tests/019-basic-vivid-kilo | 9 +++++++++ 4 files changed, 40 insertions(+) create mode 100755 ceph-mon/tests/016-basic-trusty-juno create mode 100755 ceph-mon/tests/017-basic-trusty-kilo create mode 100755 ceph-mon/tests/018-basic-utopic-juno create mode 100755 ceph-mon/tests/019-basic-vivid-kilo diff --git a/ceph-mon/tests/016-basic-trusty-juno b/ceph-mon/tests/016-basic-trusty-juno new file mode 100755 index 00000000..28c7684e --- /dev/null +++ b/ceph-mon/tests/016-basic-trusty-juno @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on trusty-juno.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='trusty', + openstack='cloud:trusty-juno', + source='cloud:trusty-updates/juno') + deployment.run_tests() diff --git a/ceph-mon/tests/017-basic-trusty-kilo b/ceph-mon/tests/017-basic-trusty-kilo new file mode 100755 index 00000000..0a787b22 --- /dev/null +++ b/ceph-mon/tests/017-basic-trusty-kilo @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on trusty-kilo.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='trusty', + openstack='cloud:trusty-kilo', + source='cloud:trusty-updates/kilo') + deployment.run_tests() diff --git a/ceph-mon/tests/018-basic-utopic-juno b/ceph-mon/tests/018-basic-utopic-juno new file mode 100755 index 00000000..9b9b760f --- /dev/null +++ b/ceph-mon/tests/018-basic-utopic-juno @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on utopic-juno.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='utopic') + deployment.run_tests() diff --git a/ceph-mon/tests/019-basic-vivid-kilo b/ceph-mon/tests/019-basic-vivid-kilo new file mode 100755 index 00000000..934261b5 --- /dev/null +++ b/ceph-mon/tests/019-basic-vivid-kilo @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on vivid-kilo.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='vivid') + deployment.run_tests() From 50d49bf6dd4cdf5e4e4c82bea4435e67a9a9731e Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:36:11 +0000 Subject: [PATCH 0683/2699] auto add amulet tests for supported releases --- ceph-radosgw/tests/016-basic-trusty-juno | 11 +++++++++++ ceph-radosgw/tests/017-basic-trusty-kilo | 11 +++++++++++ ceph-radosgw/tests/018-basic-utopic-juno | 9 +++++++++ ceph-radosgw/tests/019-basic-vivid-kilo | 9 +++++++++ 4 files changed, 40 insertions(+) create mode 100755 ceph-radosgw/tests/016-basic-trusty-juno create mode 100755 ceph-radosgw/tests/017-basic-trusty-kilo create mode 100755 ceph-radosgw/tests/018-basic-utopic-juno create mode 100755 ceph-radosgw/tests/019-basic-vivid-kilo diff --git a/ceph-radosgw/tests/016-basic-trusty-juno b/ceph-radosgw/tests/016-basic-trusty-juno new file mode 100755 index 00000000..d5de9b14 --- /dev/null +++ b/ceph-radosgw/tests/016-basic-trusty-juno @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-radosgw deployment on trusty-juno.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='trusty', + openstack='cloud:trusty-juno', + source='cloud:trusty-updates/juno') + deployment.run_tests() diff --git a/ceph-radosgw/tests/017-basic-trusty-kilo b/ceph-radosgw/tests/017-basic-trusty-kilo new file mode 100755 index 00000000..3335b188 --- /dev/null +++ b/ceph-radosgw/tests/017-basic-trusty-kilo @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-radosgw deployment on trusty-kilo.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='trusty', + openstack='cloud:trusty-kilo', + source='cloud:trusty-updates/kilo') + deployment.run_tests() diff --git a/ceph-radosgw/tests/018-basic-utopic-juno b/ceph-radosgw/tests/018-basic-utopic-juno new file mode 100755 index 00000000..4e4f5e59 --- /dev/null +++ b/ceph-radosgw/tests/018-basic-utopic-juno @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-radosgw deployment on utopic-juno.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='utopic') + deployment.run_tests() diff --git a/ceph-radosgw/tests/019-basic-vivid-kilo b/ceph-radosgw/tests/019-basic-vivid-kilo new file mode 100755 index 00000000..9238de85 --- /dev/null +++ b/ceph-radosgw/tests/019-basic-vivid-kilo @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-radosgw deployment on vivid-kilo.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='vivid') + deployment.run_tests() From ddc49cd3bd964cb9db0da5976d6c5164bb0830a7 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:36:21 +0000 Subject: [PATCH 0684/2699] auto disable kilo amulet tests (until later confirmed as functional) --- ceph-osd/tests/017-basic-trusty-kilo | 0 ceph-osd/tests/019-basic-vivid-kilo | 0 2 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 ceph-osd/tests/017-basic-trusty-kilo mode change 100755 => 100644 ceph-osd/tests/019-basic-vivid-kilo diff --git a/ceph-osd/tests/017-basic-trusty-kilo b/ceph-osd/tests/017-basic-trusty-kilo old mode 100755 new mode 100644 diff --git a/ceph-osd/tests/019-basic-vivid-kilo b/ceph-osd/tests/019-basic-vivid-kilo old mode 100755 new mode 100644 From e29fc4ff6ecc0bd67b5f5a457eb776b9656686a8 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:36:21 +0000 Subject: [PATCH 0685/2699] auto disable kilo amulet tests (until later confirmed as functional) --- ceph-proxy/tests/017-basic-trusty-kilo | 0 ceph-proxy/tests/019-basic-vivid-kilo | 0 2 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 ceph-proxy/tests/017-basic-trusty-kilo mode change 100755 => 100644 ceph-proxy/tests/019-basic-vivid-kilo diff --git a/ceph-proxy/tests/017-basic-trusty-kilo b/ceph-proxy/tests/017-basic-trusty-kilo old mode 100755 new mode 100644 diff --git a/ceph-proxy/tests/019-basic-vivid-kilo b/ceph-proxy/tests/019-basic-vivid-kilo old mode 100755 new mode 100644 From ec0d640b6fdf47f8c265ec28dba3cc9edf106732 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:36:21 +0000 Subject: [PATCH 0686/2699] auto disable kilo amulet tests (until later confirmed as functional) --- ceph-mon/tests/017-basic-trusty-kilo | 0 ceph-mon/tests/019-basic-vivid-kilo | 0 2 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 ceph-mon/tests/017-basic-trusty-kilo mode change 100755 => 100644 ceph-mon/tests/019-basic-vivid-kilo diff --git a/ceph-mon/tests/017-basic-trusty-kilo b/ceph-mon/tests/017-basic-trusty-kilo old mode 100755 new mode 100644 diff --git a/ceph-mon/tests/019-basic-vivid-kilo b/ceph-mon/tests/019-basic-vivid-kilo old mode 100755 new mode 100644 From f23c848b0e035463a3f4a1e1db76ffa2c43fa201 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 16 Apr 2015 21:36:22 +0000 Subject: [PATCH 0687/2699] auto disable kilo amulet tests (until later confirmed as functional) --- ceph-radosgw/tests/017-basic-trusty-kilo | 0 ceph-radosgw/tests/019-basic-vivid-kilo | 0 2 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 ceph-radosgw/tests/017-basic-trusty-kilo mode change 100755 => 100644 ceph-radosgw/tests/019-basic-vivid-kilo diff --git a/ceph-radosgw/tests/017-basic-trusty-kilo b/ceph-radosgw/tests/017-basic-trusty-kilo old mode 100755 new mode 100644 diff --git a/ceph-radosgw/tests/019-basic-vivid-kilo b/ceph-radosgw/tests/019-basic-vivid-kilo old mode 100755 new mode 100644 From 8dca19c6291025c8f3dc0584f44f46a287ebd8b3 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sun, 19 Apr 2015 10:01:44 +0100 Subject: [PATCH 0688/2699] [gnuoy,trivial] Pre-release charmhelper sync --- ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 9d961cfb..95a79c2e 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -247,7 +247,9 @@ def write(self): service('restart', 'nagios-nrpe-server') - for rid in relation_ids("local-monitors"): + monitor_ids = relation_ids("local-monitors") + \ + relation_ids("nrpe-external-master") + for rid in monitor_ids: relation_set(relation_id=rid, monitors=yaml.dump(monitors)) From e9bcaf997a3a24a61ef316fe8a75ee2a5633b065 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sun, 19 Apr 2015 10:01:44 +0100 Subject: [PATCH 0689/2699] [gnuoy,trivial] Pre-release charmhelper sync --- ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 9d961cfb..95a79c2e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -247,7 +247,9 @@ def write(self): service('restart', 'nagios-nrpe-server') - for rid in relation_ids("local-monitors"): + monitor_ids = relation_ids("local-monitors") + \ + relation_ids("nrpe-external-master") + for rid in monitor_ids: relation_set(relation_id=rid, monitors=yaml.dump(monitors)) From 4d54540a340060821f8a30acaba8f5399178b447 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 23 Apr 2015 15:53:03 +0100 Subject: [PATCH 0690/2699] [gnuoy,trivial] Pre-release charmhelper sync --- .../charmhelpers/contrib/amulet/utils.py | 9 +++++++- .../contrib/openstack/amulet/deployment.py | 21 +++++++++++++------ 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index 5088b1d1..f61c2e8b 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -79,6 +79,9 @@ def validate_services(self, commands): for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(k.info['unit_name'], + cmd, code)) if code != 0: return "command `{}` returned {}".format(cmd, str(code)) return None @@ -86,7 +89,11 @@ def validate_services(self, commands): def _get_config(self, unit, filename): """Get a ConfigParser object for parsing a unit's config file.""" file_contents = unit.file_contents(filename) - config = ConfigParser.ConfigParser() + + # NOTE(beisner): by default, ConfigParser does not handle options + # with no value, such as the flags used in the mysql my.cnf file. + # https://bugs.python.org/issue7005 + config = ConfigParser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 11d49a7c..461a702f 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -46,15 +46,22 @@ def _determine_branch_locations(self, other_services): stable or next branches for the other_services.""" base_charms = ['mysql', 'mongodb'] + if self.series in ['precise', 'trusty']: + base_series = self.series + else: + base_series = self.current_next + if self.stable: for svc in other_services: - temp = 'lp:charms/{}' - svc['location'] = temp.format(svc['name']) + temp = 'lp:charms/{}/{}' + svc['location'] = temp.format(base_series, + svc['name']) else: for svc in other_services: if svc['name'] in base_charms: - temp = 'lp:charms/{}' - svc['location'] = temp.format(svc['name']) + temp = 'lp:charms/{}/{}' + svc['location'] = temp.format(base_series, + svc['name']) else: temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, @@ -99,10 +106,12 @@ def _get_openstack_release(self): Return an integer representing the enum value of the openstack release. """ + # Must be ordered by OpenStack release (not by Ubuntu release): (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.trusty_kilo, - self.utopic_juno, self.vivid_kilo) = range(10) + self.trusty_icehouse, self.trusty_juno, self.utopic_juno, + self.trusty_kilo, self.vivid_kilo) = range(10) + releases = { ('precise', None): self.precise_essex, ('precise', 'cloud:precise-folsom'): self.precise_folsom, From bdeae3adc51a3dc340d03c0ea88a403971d0f477 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 23 Apr 2015 15:53:03 +0100 Subject: [PATCH 0691/2699] [gnuoy,trivial] Pre-release charmhelper sync --- .../charmhelpers/contrib/amulet/utils.py | 9 +++++++- .../contrib/openstack/amulet/deployment.py | 21 +++++++++++++------ 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index 5088b1d1..f61c2e8b 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -79,6 +79,9 @@ def validate_services(self, commands): for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(k.info['unit_name'], + cmd, code)) if code != 0: return "command `{}` returned {}".format(cmd, str(code)) return None @@ -86,7 +89,11 @@ def validate_services(self, commands): def _get_config(self, unit, filename): """Get a ConfigParser object for parsing a unit's config file.""" file_contents = unit.file_contents(filename) - config = ConfigParser.ConfigParser() + + # NOTE(beisner): by default, ConfigParser does not handle options + # with no value, such as the flags used in the mysql my.cnf file. + # https://bugs.python.org/issue7005 + config = ConfigParser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 11d49a7c..461a702f 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -46,15 +46,22 @@ def _determine_branch_locations(self, other_services): stable or next branches for the other_services.""" base_charms = ['mysql', 'mongodb'] + if self.series in ['precise', 'trusty']: + base_series = self.series + else: + base_series = self.current_next + if self.stable: for svc in other_services: - temp = 'lp:charms/{}' - svc['location'] = temp.format(svc['name']) + temp = 'lp:charms/{}/{}' + svc['location'] = temp.format(base_series, + svc['name']) else: for svc in other_services: if svc['name'] in base_charms: - temp = 'lp:charms/{}' - svc['location'] = temp.format(svc['name']) + temp = 'lp:charms/{}/{}' + svc['location'] = temp.format(base_series, + svc['name']) else: temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, @@ -99,10 +106,12 @@ def _get_openstack_release(self): Return an integer representing the enum value of the openstack release. """ + # Must be ordered by OpenStack release (not by Ubuntu release): (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.trusty_kilo, - self.utopic_juno, self.vivid_kilo) = range(10) + self.trusty_icehouse, self.trusty_juno, self.utopic_juno, + self.trusty_kilo, self.vivid_kilo) = range(10) + releases = { ('precise', None): self.precise_essex, ('precise', 'cloud:precise-folsom'): self.precise_folsom, From 0169b0d044833f4f86b059b6aa7a554235c9b83b Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Tue, 28 Apr 2015 18:18:24 -0300 Subject: [PATCH 0692/2699] Use ceph-public-address instead of private-address when relating to mon --- ceph-radosgw/hooks/hooks.py | 8 +++----- ceph-radosgw/unit_tests/test_hooks.py | 9 +++++++-- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 9b3699ac..fbb1952c 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -199,11 +199,9 @@ def get_mon_hosts(): hosts = [] for relid in relation_ids('mon'): for unit in related_units(relid): - hosts.append( - '{}:6789'.format(get_host_ip( - relation_get('private-address', - unit, relid))) - ) + host_ip = get_host_ip(relation_get('ceph-public-address', + unit, relid)) + hosts.append('{}:6789'.format(host_ip)) hosts.sort() return hosts diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index f494ab74..46279486 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -213,8 +213,13 @@ def test_config_changed(self): def test_get_mon_hosts(self): self.relation_ids.return_value = ['monrelid'] self.related_units.return_value = ['monunit'] - self.relation_get.return_value = '10.0.0.1' - self.get_host_ip.return_value = '10.0.0.1' + + def rel_get(k, *args): + return {'private-address': '127.0.0.1', + 'ceph-public-address': '10.0.0.1'}[k] + + self.relation_get.side_effect = rel_get + self.get_host_ip.side_effect = lambda x: x self.assertEquals(ceph_hooks.get_mon_hosts(), ['10.0.0.1:6789']) def test_get_conf(self): From 69792746e3ecd137333ccbb573d2dde76e2a5b9c Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Tue, 28 Apr 2015 18:20:22 -0300 Subject: [PATCH 0693/2699] Fix lint warning --- ceph-radosgw/unit_tests/test_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/unit_tests/test_utils.py b/ceph-radosgw/unit_tests/test_utils.py index bd257485..060fba97 100644 --- a/ceph-radosgw/unit_tests/test_utils.py +++ b/ceph-radosgw/unit_tests/test_utils.py @@ -25,7 +25,7 @@ def load_config(): if not config: logging.error('Could not find config.yaml in any parent directory ' - 'of %s. ' % file) + 'of %s. ' % f) raise Exception return yaml.safe_load(open(config).read())['options'] From 1b6b605b801b8b2161c48a3e76409adfab1bad75 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Tue, 2 Jun 2015 11:51:37 -0700 Subject: [PATCH 0694/2699] [wolsen,r=] Add support for overriding public endpoint addresses. Adds in the config option for overriding public endpoint addresses and introduces a unit tests to ensure that the override for the public address is functioning correctly. Closes-Bug: #1398182 --- ceph-radosgw/config.yaml | 12 +++ .../charmhelpers/contrib/openstack/ip.py | 76 ++++++++----------- ceph-radosgw/hooks/hooks.py | 19 +---- ceph-radosgw/unit_tests/test_hooks.py | 36 +++++++-- 4 files changed, 77 insertions(+), 66 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index c6ad45a1..2bc7732d 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -87,3 +87,15 @@ options: description: | Default multicast port number that will be used to communicate between HA Cluster nodes. + endpoint-public-name: + type: string + default: + description: | + The hostname or address of the public endpoints created for ceph-radosgw + in the keystone identity provider. + . + This value will be used for public endpoints. For example, an + endpoint-public-name set to 'files.example.com' with will create + the following public endpoint for the ceph-radosgw. + . + https://files.example.com:80/swift/v1 \ No newline at end of file diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index 29bbddcb..16394e35 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -26,8 +26,6 @@ ) from charmhelpers.contrib.hahelpers.cluster import is_clustered -from functools import partial - PUBLIC = 'public' INTERNAL = 'int' ADMIN = 'admin' @@ -35,15 +33,18 @@ ADDRESS_MAP = { PUBLIC: { 'config': 'os-public-network', - 'fallback': 'public-address' + 'fallback': 'public-address', + 'override': 'endpoint-public-name', }, INTERNAL: { 'config': 'os-internal-network', - 'fallback': 'private-address' + 'fallback': 'private-address', + 'override': 'endpoint-internal-name', }, ADMIN: { 'config': 'os-admin-network', - 'fallback': 'private-address' + 'fallback': 'private-address', + 'override': 'endpoint-admin-name', } } @@ -57,15 +58,37 @@ def canonical_url(configs, endpoint_type=PUBLIC): :param endpoint_type: str endpoint type to resolve. :param returns: str base URL for services on the current service unit. """ - scheme = 'http' - if 'https' in configs.complete_contexts(): - scheme = 'https' - address = resolve_address(endpoint_type) + scheme = _get_scheme(configs) + + # Allow the user to override the address which is used. This is + # useful for proxy services or exposing a public endpoint url, etc. + override_key = ADDRESS_MAP[endpoint_type]['override'] + addr_override = config(override_key) + if addr_override: + address = addr_override + else: + address = resolve_address(endpoint_type) if is_ipv6(address): address = "[{}]".format(address) + return '%s://%s' % (scheme, address) +def _get_scheme(configs): + """Returns the scheme to use for the url (either http or https) + depending upon whether https is in the configs value. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :returns: either 'http' or 'https' depending on whether https is + configured within the configs context. + """ + scheme = 'http' + if configs and 'https' in configs.complete_contexts(): + scheme = 'https' + return scheme + + def resolve_address(endpoint_type=PUBLIC): """Return unit address depending on net config. @@ -109,38 +132,3 @@ def resolve_address(endpoint_type=PUBLIC): "clustered=%s)" % (net_type, clustered)) return resolved_address - - -def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC, - override=None): - """Returns the correct endpoint URL to advertise to Keystone. - - This method provides the correct endpoint URL which should be advertised to - the keystone charm for endpoint creation. This method allows for the url to - be overridden to force a keystone endpoint to have specific URL for any of - the defined scopes (admin, internal, public). - - :param configs: OSTemplateRenderer config templating object to inspect - for a complete https context. - :param url_template: str format string for creating the url template. Only - two values will be passed - the scheme+hostname - returned by the canonical_url and the port. - :param endpoint_type: str endpoint type to resolve. - :param override: str the name of the config option which overrides the - endpoint URL defined by the charm itself. None will - disable any overrides (default). - """ - if override: - # Return any user-defined overrides for the keystone endpoint URL. - user_value = config(override) - if user_value: - return user_value.strip() - - return url_template % (canonical_url(configs, endpoint_type), port) - - -public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC) - -internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL) - -admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index fbb1952c..f2032ac2 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -50,10 +50,9 @@ from charmhelpers.contrib.network.ip import ( get_iface_for_address, get_netmask_for_address, - is_ipv6, ) from charmhelpers.contrib.openstack.ip import ( - resolve_address, + canonical_url, PUBLIC, INTERNAL, ADMIN, ) @@ -273,16 +272,6 @@ def restart(): open_port(port=80) -# XXX Define local canonical_url until charm has been updated to use the -# standard context architecture. -def canonical_url(configs, endpoint_type=PUBLIC): - scheme = 'http' - address = resolve_address(endpoint_type) - if is_ipv6(address): - address = "[{}]".format(address) - return '%s://%s' % (scheme, address) - - @hooks.hook('identity-service-relation-joined') def identity_joined(relid=None): if cmp_pkgrevno('radosgw', '0.55') < 0: @@ -290,11 +279,11 @@ def identity_joined(relid=None): sys.exit(1) port = 80 - admin_url = '%s:%i/swift' % (canonical_url(ADMIN), port) + admin_url = '%s:%i/swift' % (canonical_url(None, ADMIN), port) internal_url = '%s:%s/swift/v1' % \ - (canonical_url(INTERNAL), port) + (canonical_url(None, INTERNAL), port) public_url = '%s:%s/swift/v1' % \ - (canonical_url(PUBLIC), port) + (canonical_url(None, PUBLIC), port) relation_set(service='swift', region=config('region'), public_url=public_url, internal_url=internal_url, diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 46279486..dcafac1c 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -8,6 +8,7 @@ CharmTestCase, patch_open ) +from charmhelpers.contrib.openstack.ip import PUBLIC dnsmock = MagicMock() modules = { @@ -45,7 +46,6 @@ 'relation_set', 'relation_get', 'render_template', - 'resolve_address', 'shutil', 'subprocess', 'sys', @@ -323,14 +323,18 @@ def test_restart(self): cmd = ['service', 'radosgw', 'restart'] self.subprocess.call.assert_called_with(cmd) - def test_identity_joined_early_version(self): + @patch('charmhelpers.contrib.openstack.ip.config') + def test_identity_joined_early_version(self, _config): self.cmp_pkgrevno.return_value = -1 ceph_hooks.identity_joined() self.sys.exit.assert_called_with(1) - def test_identity_joined(self): + @patch('charmhelpers.contrib.openstack.ip.resolve_address') + @patch('charmhelpers.contrib.openstack.ip.config') + def test_identity_joined(self, _config, _resolve_address): self.cmp_pkgrevno.return_value = 1 - self.resolve_address.return_value = 'myserv' + _resolve_address.return_value = 'myserv' + _config.side_effect = self.test_config.get self.test_config.set('region', 'region1') self.test_config.set('operator-roles', 'admin') self.unit_get.return_value = 'myserv' @@ -344,6 +348,22 @@ def test_identity_joined(self): relation_id='rid', admin_url='http://myserv:80/swift') + @patch('charmhelpers.contrib.openstack.ip.resolve_address') + @patch('charmhelpers.contrib.openstack.ip.config') + def test_identity_joined_public_name(self, _config, _resolve_address): + _config.side_effect = self.test_config.get + self.test_config.set('endpoint-public-name', 'files.example.com') + _resolve_address.return_value = 'myserv' + ceph_hooks.identity_joined(relid='rid') + self.relation_set.assert_called_with( + service='swift', + region='RegionOne', + public_url='http://files.example.com:80/swift/v1', + internal_url='http://myserv:80/swift/v1', + requested_roles='Member,Admin', + relation_id='rid', + admin_url='http://myserv:80/swift') + def test_identity_changed(self): _emit_cephconf = self.patch('emit_cephconf') _restart = self.patch('restart') @@ -351,10 +371,12 @@ def test_identity_changed(self): _emit_cephconf.assert_called() _restart.assert_called() - def test_canonical_url_ipv6(self): + @patch('charmhelpers.contrib.openstack.ip.resolve_address') + @patch('charmhelpers.contrib.openstack.ip.config') + def test_canonical_url_ipv6(self, _resolve_address, _config): ipv6_addr = '2001:db8:85a3:8d3:1319:8a2e:370:7348' - self.resolve_address.return_value = ipv6_addr - self.assertEquals(ceph_hooks.canonical_url({}), + _resolve_address.return_value = ipv6_addr + self.assertEquals(ceph_hooks.canonical_url({}, PUBLIC), 'http://[%s]' % ipv6_addr) @patch.object(ceph_hooks, 'CONFIGS') From 001b2a988d5646ddcd6563966bc91297b3a3eb3b Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Tue, 2 Jun 2015 12:27:01 -0700 Subject: [PATCH 0695/2699] Update charmhelpers.contrib.openstack.ip --- .../hooks/charmhelpers/contrib/openstack/ip.py | 17 +++++++++-------- ceph-radosgw/unit_tests/test_hooks.py | 17 +++++++++++------ 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index 16394e35..45531b57 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -60,14 +60,7 @@ def canonical_url(configs, endpoint_type=PUBLIC): """ scheme = _get_scheme(configs) - # Allow the user to override the address which is used. This is - # useful for proxy services or exposing a public endpoint url, etc. - override_key = ADDRESS_MAP[endpoint_type]['override'] - addr_override = config(override_key) - if addr_override: - address = addr_override - else: - address = resolve_address(endpoint_type) + address = resolve_address(endpoint_type) if is_ipv6(address): address = "[{}]".format(address) @@ -101,6 +94,14 @@ def resolve_address(endpoint_type=PUBLIC): :param endpoint_type: Network endpoing type """ resolved_address = None + + # Allow the user to override the address which is used. This is + # useful for proxy services or exposing a public endpoint url, etc. + override_key = ADDRESS_MAP[endpoint_type]['override'] + addr_override = config(override_key) + if addr_override: + return addr_override + vips = config('vip') if vips: vips = vips.split() diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index dcafac1c..d4e11706 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -348,12 +348,14 @@ def test_identity_joined(self, _config, _resolve_address): relation_id='rid', admin_url='http://myserv:80/swift') - @patch('charmhelpers.contrib.openstack.ip.resolve_address') + @patch('charmhelpers.contrib.openstack.ip.is_clustered') + @patch('charmhelpers.contrib.openstack.ip.unit_get') @patch('charmhelpers.contrib.openstack.ip.config') - def test_identity_joined_public_name(self, _config, _resolve_address): + def test_identity_joined_public_name(self, _config, _unit_get, _is_clustered): _config.side_effect = self.test_config.get self.test_config.set('endpoint-public-name', 'files.example.com') - _resolve_address.return_value = 'myserv' + _unit_get.return_value = 'myserv' + _is_clustered.return_value = False ceph_hooks.identity_joined(relid='rid') self.relation_set.assert_called_with( service='swift', @@ -371,11 +373,14 @@ def test_identity_changed(self): _emit_cephconf.assert_called() _restart.assert_called() - @patch('charmhelpers.contrib.openstack.ip.resolve_address') + @patch('charmhelpers.contrib.openstack.ip.is_clustered') + @patch('charmhelpers.contrib.openstack.ip.unit_get') @patch('charmhelpers.contrib.openstack.ip.config') - def test_canonical_url_ipv6(self, _resolve_address, _config): + def test_canonical_url_ipv6(self, _config, _unit_get, _is_clustered): ipv6_addr = '2001:db8:85a3:8d3:1319:8a2e:370:7348' - _resolve_address.return_value = ipv6_addr + _config.side_effect = self.test_config.get + _unit_get.return_value = ipv6_addr + _is_clustered.return_value = False self.assertEquals(ceph_hooks.canonical_url({}, PUBLIC), 'http://[%s]' % ipv6_addr) From 955eb1684038bbb6ca76642f9cff6b1fe2ebdd48 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Wed, 3 Jun 2015 11:19:53 -0700 Subject: [PATCH 0696/2699] Change config option to os-public-hostname and lint error. --- ceph-radosgw/config.yaml | 4 ++-- ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py | 6 +++--- ceph-radosgw/unit_tests/test_hooks.py | 5 +++-- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 2bc7732d..3a33b00d 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -87,7 +87,7 @@ options: description: | Default multicast port number that will be used to communicate between HA Cluster nodes. - endpoint-public-name: + os-public-hostname: type: string default: description: | @@ -95,7 +95,7 @@ options: in the keystone identity provider. . This value will be used for public endpoints. For example, an - endpoint-public-name set to 'files.example.com' with will create + os-public-hostname set to 'files.example.com' with will create the following public endpoint for the ceph-radosgw. . https://files.example.com:80/swift/v1 \ No newline at end of file diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index 45531b57..6e18c98a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -34,17 +34,17 @@ PUBLIC: { 'config': 'os-public-network', 'fallback': 'public-address', - 'override': 'endpoint-public-name', + 'override': 'os-public-hostname', }, INTERNAL: { 'config': 'os-internal-network', 'fallback': 'private-address', - 'override': 'endpoint-internal-name', + 'override': 'os-internal-hostname', }, ADMIN: { 'config': 'os-admin-network', 'fallback': 'private-address', - 'override': 'endpoint-admin-name', + 'override': 'os-admin-hostname', } } diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index d4e11706..2f4d3033 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -351,9 +351,10 @@ def test_identity_joined(self, _config, _resolve_address): @patch('charmhelpers.contrib.openstack.ip.is_clustered') @patch('charmhelpers.contrib.openstack.ip.unit_get') @patch('charmhelpers.contrib.openstack.ip.config') - def test_identity_joined_public_name(self, _config, _unit_get, _is_clustered): + def test_identity_joined_public_name(self, _config, _unit_get, + _is_clustered): _config.side_effect = self.test_config.get - self.test_config.set('endpoint-public-name', 'files.example.com') + self.test_config.set('os-public-hostname', 'files.example.com') _unit_get.return_value = 'myserv' _is_clustered.return_value = False ceph_hooks.identity_joined(relid='rid') From 86d13ba6a35e366b41d3c6a79bf73d10f1acf27f Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Thu, 4 Jun 2015 16:06:40 -0700 Subject: [PATCH 0697/2699] c-h sync. unit test updates for sync --- .../charmhelpers/contrib/hahelpers/cluster.py | 25 +++ .../contrib/openstack/amulet/deployment.py | 21 ++- .../charmhelpers/contrib/openstack/ip.py | 32 +++- .../charmhelpers/contrib/openstack/neutron.py | 15 +- .../charmhelpers/contrib/openstack/utils.py | 81 +++++++-- .../charmhelpers/contrib/python/packages.py | 33 +++- .../hooks/charmhelpers/core/hookenv.py | 157 ++++++++++++++++-- ceph-radosgw/hooks/charmhelpers/core/host.py | 2 +- .../hooks/charmhelpers/core/services/base.py | 43 +++-- .../hooks/charmhelpers/fetch/__init__.py | 2 +- .../hooks/charmhelpers/fetch/giturl.py | 12 +- .../charmhelpers/contrib/amulet/utils.py | 9 +- .../contrib/openstack/amulet/deployment.py | 21 ++- ceph-radosgw/unit_tests/test_hooks.py | 6 + 14 files changed, 383 insertions(+), 76 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index 9333efc3..c555d7aa 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -52,6 +52,8 @@ bool_from_string, ) +DC_RESOURCE_NAME = 'DC' + class HAIncompleteConfig(Exception): pass @@ -95,6 +97,27 @@ def is_clustered(): return False +def is_crm_dc(): + """ + Determine leadership by querying the pacemaker Designated Controller + """ + cmd = ['crm', 'status'] + try: + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") + except subprocess.CalledProcessError: + return False + current_dc = '' + for line in status.split('\n'): + if line.startswith('Current DC'): + # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum + current_dc = line.split(':')[1].split()[0] + if current_dc == get_unit_hostname(): + return True + return False + + @retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound) def is_crm_leader(resource, retry=False): """ @@ -104,6 +127,8 @@ def is_crm_leader(resource, retry=False): We allow this operation to be retried to avoid the possibility of getting a false negative. See LP #1396246 for more info. """ + if resource == DC_RESOURCE_NAME: + return is_crm_dc() cmd = ['crm', 'resource', 'show', resource] try: status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 11d49a7c..461a702f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -46,15 +46,22 @@ def _determine_branch_locations(self, other_services): stable or next branches for the other_services.""" base_charms = ['mysql', 'mongodb'] + if self.series in ['precise', 'trusty']: + base_series = self.series + else: + base_series = self.current_next + if self.stable: for svc in other_services: - temp = 'lp:charms/{}' - svc['location'] = temp.format(svc['name']) + temp = 'lp:charms/{}/{}' + svc['location'] = temp.format(base_series, + svc['name']) else: for svc in other_services: if svc['name'] in base_charms: - temp = 'lp:charms/{}' - svc['location'] = temp.format(svc['name']) + temp = 'lp:charms/{}/{}' + svc['location'] = temp.format(base_series, + svc['name']) else: temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, @@ -99,10 +106,12 @@ def _get_openstack_release(self): Return an integer representing the enum value of the openstack release. """ + # Must be ordered by OpenStack release (not by Ubuntu release): (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.trusty_kilo, - self.utopic_juno, self.vivid_kilo) = range(10) + self.trusty_icehouse, self.trusty_juno, self.utopic_juno, + self.trusty_kilo, self.vivid_kilo) = range(10) + releases = { ('precise', None): self.precise_essex, ('precise', 'cloud:precise-folsom'): self.precise_folsom, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index 6e18c98a..3dca6dc1 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -17,6 +17,7 @@ from charmhelpers.core.hookenv import ( config, unit_get, + service_name, ) from charmhelpers.contrib.network.ip import ( get_address_in_network, @@ -82,6 +83,26 @@ def _get_scheme(configs): return scheme +def _get_address_override(endpoint_type=PUBLIC): + """Returns any address overrides that the user has defined based on the + endpoint type. + + Note: this function allows for the service name to be inserted into the + address if the user specifies {service_name}.somehost.org. + + :param endpoint_type: the type of endpoint to retrieve the override + value for. + :returns: any endpoint address or hostname that the user has overridden + or None if an override is not present. + """ + override_key = ADDRESS_MAP[endpoint_type]['override'] + addr_override = config(override_key) + if not addr_override: + return None + else: + return addr_override.format(service_name=service_name()) + + def resolve_address(endpoint_type=PUBLIC): """Return unit address depending on net config. @@ -93,14 +114,9 @@ def resolve_address(endpoint_type=PUBLIC): :param endpoint_type: Network endpoing type """ - resolved_address = None - - # Allow the user to override the address which is used. This is - # useful for proxy services or exposing a public endpoint url, etc. - override_key = ADDRESS_MAP[endpoint_type]['override'] - addr_override = config(override_key) - if addr_override: - return addr_override + resolved_address = _get_address_override(endpoint_type) + if resolved_address: + return resolved_address vips = config('vip') if vips: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index 02c92e9c..b3aa3d4c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -256,11 +256,14 @@ def network_manager(): def parse_mappings(mappings): parsed = {} if mappings: - mappings = mappings.split(' ') + mappings = mappings.split() for m in mappings: p = m.partition(':') - if p[1] == ':': - parsed[p[0].strip()] = p[2].strip() + key = p[0].strip() + if p[1]: + parsed[key] = p[2].strip() + else: + parsed[key] = '' return parsed @@ -283,13 +286,13 @@ def parse_data_port_mappings(mappings, default_bridge='br-data'): Returns dict of the form {bridge:port}. """ _mappings = parse_mappings(mappings) - if not _mappings: + if not _mappings or list(_mappings.values()) == ['']: if not mappings: return {} # For backwards-compatibility we need to support port-only provided in # config. - _mappings = {default_bridge: mappings.split(' ')[0]} + _mappings = {default_bridge: mappings.split()[0]} bridges = _mappings.keys() ports = _mappings.values() @@ -309,6 +312,8 @@ def parse_vlan_range_mappings(mappings): Mappings must be a space-delimited list of provider:start:end mappings. + The start:end range is optional and may be omitted. + Returns dict of the form {provider: (start, end)}. """ _mappings = parse_mappings(mappings) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index f90a0289..d795a358 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -53,9 +53,13 @@ get_ipv6_addr ) +from charmhelpers.contrib.python.packages import ( + pip_create_virtualenv, + pip_install, +) + from charmhelpers.core.host import lsb_release, mounts, umount from charmhelpers.fetch import apt_install, apt_cache, install_remote -from charmhelpers.contrib.python.packages import pip_install from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device @@ -497,7 +501,17 @@ def git_install_requested(): requirements_dir = None -def git_clone_and_install(projects_yaml, core_project): +def _git_yaml_load(projects_yaml): + """ + Load the specified yaml into a dictionary. + """ + if not projects_yaml: + return None + + return yaml.load(projects_yaml) + + +def git_clone_and_install(projects_yaml, core_project, depth=1): """ Clone/install all specified OpenStack repositories. @@ -510,23 +524,22 @@ def git_clone_and_install(projects_yaml, core_project): repository: 'git://git.openstack.org/openstack/requirements.git', branch: 'stable/icehouse'} directory: /mnt/openstack-git - http_proxy: http://squid.internal:3128 - https_proxy: https://squid.internal:3128 + http_proxy: squid-proxy-url + https_proxy: squid-proxy-url The directory, http_proxy, and https_proxy keys are optional. """ global requirements_dir parent_dir = '/mnt/openstack-git' + http_proxy = None - if not projects_yaml: - return - - projects = yaml.load(projects_yaml) + projects = _git_yaml_load(projects_yaml) _git_validate_projects_yaml(projects, core_project) old_environ = dict(os.environ) if 'http_proxy' in projects.keys(): + http_proxy = projects['http_proxy'] os.environ['http_proxy'] = projects['http_proxy'] if 'https_proxy' in projects.keys(): os.environ['https_proxy'] = projects['https_proxy'] @@ -534,15 +547,19 @@ def git_clone_and_install(projects_yaml, core_project): if 'directory' in projects.keys(): parent_dir = projects['directory'] + pip_create_virtualenv(os.path.join(parent_dir, 'venv')) + for p in projects['repositories']: repo = p['repository'] branch = p['branch'] if p['name'] == 'requirements': - repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, + repo_dir = _git_clone_and_install_single(repo, branch, depth, + parent_dir, http_proxy, update_requirements=False) requirements_dir = repo_dir else: - repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, + repo_dir = _git_clone_and_install_single(repo, branch, depth, + parent_dir, http_proxy, update_requirements=True) os.environ = old_environ @@ -574,7 +591,8 @@ def _git_ensure_key_exists(key, keys): error_out('openstack-origin-git key \'{}\' is missing'.format(key)) -def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements): +def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, + update_requirements): """ Clone and install a single git repository. """ @@ -587,7 +605,8 @@ def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements) if not os.path.exists(dest_dir): juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) - repo_dir = install_remote(repo, dest=parent_dir, branch=branch) + repo_dir = install_remote(repo, dest=parent_dir, branch=branch, + depth=depth) else: repo_dir = dest_dir @@ -598,7 +617,12 @@ def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements) _git_update_requirements(repo_dir, requirements_dir) juju_log('Installing git repo from dir: {}'.format(repo_dir)) - pip_install(repo_dir) + if http_proxy: + pip_install(repo_dir, proxy=http_proxy, + venv=os.path.join(parent_dir, 'venv')) + else: + pip_install(repo_dir, + venv=os.path.join(parent_dir, 'venv')) return repo_dir @@ -621,16 +645,27 @@ def _git_update_requirements(package_dir, reqs_dir): os.chdir(orig_dir) +def git_pip_venv_dir(projects_yaml): + """ + Return the pip virtualenv path. + """ + parent_dir = '/mnt/openstack-git' + + projects = _git_yaml_load(projects_yaml) + + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + + return os.path.join(parent_dir, 'venv') + + def git_src_dir(projects_yaml, project): """ Return the directory where the specified project's source is located. """ parent_dir = '/mnt/openstack-git' - if not projects_yaml: - return - - projects = yaml.load(projects_yaml) + projects = _git_yaml_load(projects_yaml) if 'directory' in projects.keys(): parent_dir = projects['directory'] @@ -640,3 +675,15 @@ def git_src_dir(projects_yaml, project): return os.path.join(parent_dir, os.path.basename(p['repository'])) return None + + +def git_yaml_value(projects_yaml, key): + """ + Return the value in projects_yaml for the specified key. + """ + projects = _git_yaml_load(projects_yaml) + + if key in projects.keys(): + return projects[key] + + return None diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py index 8659516b..07b0c1d7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py @@ -17,8 +17,11 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import os +import subprocess + from charmhelpers.fetch import apt_install, apt_update -from charmhelpers.core.hookenv import log +from charmhelpers.core.hookenv import charm_dir, log try: from pip import main as pip_execute @@ -51,11 +54,15 @@ def pip_install_requirements(requirements, **options): pip_execute(command) -def pip_install(package, fatal=False, upgrade=False, **options): +def pip_install(package, fatal=False, upgrade=False, venv=None, **options): """Install a python package""" - command = ["install"] + if venv: + venv_python = os.path.join(venv, 'bin/pip') + command = [venv_python, "install"] + else: + command = ["install"] - available_options = ('proxy', 'src', 'log', "index-url", ) + available_options = ('proxy', 'src', 'log', 'index-url', ) for option in parse_options(options, available_options): command.append(option) @@ -69,7 +76,10 @@ def pip_install(package, fatal=False, upgrade=False, **options): log("Installing {} package with options: {}".format(package, command)) - pip_execute(command) + if venv: + subprocess.check_call(command) + else: + pip_execute(command) def pip_uninstall(package, **options): @@ -94,3 +104,16 @@ def pip_list(): """Returns the list of current python installed packages """ return pip_execute(["list"]) + + +def pip_create_virtualenv(path=None): + """Create an isolated Python environment.""" + apt_install('python-virtualenv') + + if path: + venv_path = path + else: + venv_path = os.path.join(charm_dir(), 'venv') + + if not os.path.exists(venv_path): + subprocess.check_call(['virtualenv', venv_path]) diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 86f805f1..117429fd 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -21,12 +21,14 @@ # Charm Helpers Developers from __future__ import print_function +from functools import wraps import os import json import yaml import subprocess import sys import errno +import tempfile from subprocess import CalledProcessError import six @@ -58,15 +60,17 @@ def unit_get(attribute): will cache the result of unit_get + 'test' for future calls. """ + @wraps(func) def wrapper(*args, **kwargs): global cache key = str((func, args, kwargs)) try: return cache[key] except KeyError: - res = func(*args, **kwargs) - cache[key] = res - return res + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res return wrapper @@ -178,7 +182,7 @@ def local_unit(): def remote_unit(): """The remote unit for the current relation hook""" - return os.environ['JUJU_REMOTE_UNIT'] + return os.environ.get('JUJU_REMOTE_UNIT', None) def service_name(): @@ -250,6 +254,12 @@ def __getitem__(self, key): except KeyError: return (self._prev_dict or {})[key] + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + def keys(self): prev_keys = [] if self._prev_dict is not None: @@ -353,18 +363,49 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in (list(relation_settings.items()) + list(kwargs.items())): - if v is None: - relation_cmd_line.append('{}='.format(k)) - else: - relation_cmd_line.append('{}={}'.format(k, v)) - subprocess.check_call(relation_cmd_line) + settings = relation_settings.copy() + settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) + if accepts_file: + # --file was introduced in Juju 1.23.2. Use it by default if + # available, since otherwise we'll break if the relation data is + # too big. Ideally we should tell relation-set to read the data from + # stdin, but that feature is broken in 1.23.2: Bug #1454678. + with tempfile.NamedTemporaryFile(delete=False) as settings_file: + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) + subprocess.check_call( + relation_cmd_line + ["--file", settings_file.name]) + os.remove(settings_file.name) + else: + for key, value in settings.items(): + if value is None: + relation_cmd_line.append('{}='.format(key)) + else: + relation_cmd_line.append('{}={}'.format(key, value)) + subprocess.check_call(relation_cmd_line) # Flush cache of any relation-gets for local unit flush(local_unit()) +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + @cached def relation_ids(reltype=None): """A list of relation_ids""" @@ -509,6 +550,11 @@ def unit_get(attribute): return None +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + def unit_private_ip(): """Get this unit's private IP address""" return unit_get('private-address') @@ -605,3 +651,94 @@ def action_fail(message): The results set by action_set are preserved.""" subprocess.check_call(['action-fail', message]) + + +def status_set(workload_state, message): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message unstead. + + workload_state -- valid juju workload state. + message -- status update message + """ + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] + if workload_state not in valid_states: + raise ValueError( + '{!r} is not a valid workload state'.format(workload_state) + ) + cmd = ['status-set', workload_state, message] + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state + + If the status-set command is not found then assume this is juju < 1.23 and + return 'unknown' + """ + cmd = ['status-get'] + try: + raw_status = subprocess.check_output(cmd, universal_newlines=True) + status = raw_status.rstrip() + return status + except OSError as e: + if e.errno == errno.ENOENT: + return 'unknown' + else: + raise + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.iteritems(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 830822af..0d2ab4b4 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -90,7 +90,7 @@ def service_available(service_name): ['service', service_name, 'status'], stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError as e: - return 'unrecognized service' not in e.output + return b'unrecognized service' not in e.output else: return True diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/base.py b/ceph-radosgw/hooks/charmhelpers/core/services/base.py index c5534e4c..98d344e1 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/base.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/base.py @@ -15,9 +15,9 @@ # along with charm-helpers. If not, see . import os -import re import json -from collections import Iterable +from inspect import getargspec +from collections import Iterable, OrderedDict from charmhelpers.core import host from charmhelpers.core import hookenv @@ -119,7 +119,7 @@ def __init__(self, services=None): """ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') self._ready = None - self.services = {} + self.services = OrderedDict() for service in services or []: service_name = service['service'] self.services[service_name] = service @@ -132,8 +132,8 @@ def manage(self): if hook_name == 'stop': self.stop_services() else: - self.provide_data() self.reconfigure_services() + self.provide_data() cfg = hookenv.config() if cfg.implicit_save: cfg.save() @@ -145,15 +145,36 @@ def provide_data(self): A provider must have a `name` attribute, which indicates which relation to set data on, and a `provide_data()` method, which returns a dict of data to set. + + The `provide_data()` method can optionally accept two parameters: + + * ``remote_service`` The name of the remote service that the data will + be provided to. The `provide_data()` method will be called once + for each connected service (not unit). This allows the method to + tailor its data to the given service. + * ``service_ready`` Whether or not the service definition had all of + its requirements met, and thus the ``data_ready`` callbacks run. + + Note that the ``provided_data`` methods are now called **after** the + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks + a chance to generate any data necessary for the providing to the remote + services. """ - hook_name = hookenv.hook_name() - for service in self.services.values(): + for service_name, service in self.services.items(): + service_ready = self.is_ready(service_name) for provider in service.get('provided_data', []): - if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): - data = provider.provide_data() - _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data - if _ready: - hookenv.relation_set(None, data) + for relid in hookenv.relation_ids(provider.name): + units = hookenv.related_units(relid) + if not units: + continue + remote_service = units[0].split('/')[0] + argspec = getargspec(provider.provide_data) + if len(argspec.args) > 1: + data = provider.provide_data(remote_service, service_ready) + else: + data = provider.provide_data() + if data: + hookenv.relation_set(relid, data) def reconfigure_services(self, *service_names): """ diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 792e629a..9a1a2515 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -158,7 +158,7 @@ def filter_installed_packages(packages): def apt_cache(in_memory=True): """Build and return an apt cache""" - import apt_pkg + from apt import apt_pkg apt_pkg.init() if in_memory: apt_pkg.config.set("Dir::Cache::pkgcache", "") diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py index 93aae87b..ddc25b7e 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py @@ -45,14 +45,16 @@ def can_handle(self, source): else: return True - def clone(self, source, dest, branch): + def clone(self, source, dest, branch, depth=None): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - repo = Repo.clone_from(source, dest) - repo.git.checkout(branch) + if depth: + Repo.clone_from(source, dest, branch=branch, depth=depth) + else: + Repo.clone_from(source, dest, branch=branch) - def install(self, source, branch="master", dest=None): + def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] if dest: @@ -63,7 +65,7 @@ def install(self, source, branch="master", dest=None): if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: - self.clone(source, dest_dir, branch) + self.clone(source, dest_dir, branch, depth) except GitCommandError as e: raise UnhandledSource(e.message) except OSError as e: diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index 5088b1d1..f61c2e8b 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -79,6 +79,9 @@ def validate_services(self, commands): for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(k.info['unit_name'], + cmd, code)) if code != 0: return "command `{}` returned {}".format(cmd, str(code)) return None @@ -86,7 +89,11 @@ def validate_services(self, commands): def _get_config(self, unit, filename): """Get a ConfigParser object for parsing a unit's config file.""" file_contents = unit.file_contents(filename) - config = ConfigParser.ConfigParser() + + # NOTE(beisner): by default, ConfigParser does not handle options + # with no value, such as the flags used in the mysql my.cnf file. + # https://bugs.python.org/issue7005 + config = ConfigParser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 11d49a7c..461a702f 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -46,15 +46,22 @@ def _determine_branch_locations(self, other_services): stable or next branches for the other_services.""" base_charms = ['mysql', 'mongodb'] + if self.series in ['precise', 'trusty']: + base_series = self.series + else: + base_series = self.current_next + if self.stable: for svc in other_services: - temp = 'lp:charms/{}' - svc['location'] = temp.format(svc['name']) + temp = 'lp:charms/{}/{}' + svc['location'] = temp.format(base_series, + svc['name']) else: for svc in other_services: if svc['name'] in base_charms: - temp = 'lp:charms/{}' - svc['location'] = temp.format(svc['name']) + temp = 'lp:charms/{}/{}' + svc['location'] = temp.format(base_series, + svc['name']) else: temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, @@ -99,10 +106,12 @@ def _get_openstack_release(self): Return an integer representing the enum value of the openstack release. """ + # Must be ordered by OpenStack release (not by Ubuntu release): (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.trusty_kilo, - self.utopic_juno, self.vivid_kilo) = range(10) + self.trusty_icehouse, self.trusty_juno, self.utopic_juno, + self.trusty_kilo, self.vivid_kilo) = range(10) + releases = { ('precise', None): self.precise_essex, ('precise', 'cloud:precise-folsom'): self.precise_folsom, diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 2f4d3033..fe568690 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -323,12 +323,16 @@ def test_restart(self): cmd = ['service', 'radosgw', 'restart'] self.subprocess.call.assert_called_with(cmd) + @patch('charmhelpers.contrib.openstack.ip.service_name', + lambda *args: 'ceph-radosgw') @patch('charmhelpers.contrib.openstack.ip.config') def test_identity_joined_early_version(self, _config): self.cmp_pkgrevno.return_value = -1 ceph_hooks.identity_joined() self.sys.exit.assert_called_with(1) + @patch('charmhelpers.contrib.openstack.ip.service_name', + lambda *args: 'ceph-radosgw') @patch('charmhelpers.contrib.openstack.ip.resolve_address') @patch('charmhelpers.contrib.openstack.ip.config') def test_identity_joined(self, _config, _resolve_address): @@ -348,6 +352,8 @@ def test_identity_joined(self, _config, _resolve_address): relation_id='rid', admin_url='http://myserv:80/swift') + @patch('charmhelpers.contrib.openstack.ip.service_name', + lambda *args: 'ceph-radosgw') @patch('charmhelpers.contrib.openstack.ip.is_clustered') @patch('charmhelpers.contrib.openstack.ip.unit_get') @patch('charmhelpers.contrib.openstack.ip.config') From 75e81192bdfe4877e85ac248b431879872ad8131 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 15 Jun 2015 20:40:45 +0000 Subject: [PATCH 0698/2699] Update README, Makefile and amulet test dependencies Remove unsupported release logic Add nova, cinder and glance rbd config inspection Enable Vivid tests, prep for Wily Add debug logging Add osd pool inspection Add functional tests for ceph-backed cinder and glance Add basic cli functional checks. --- ceph-proxy/Makefile | 13 +- ceph-proxy/charm-helpers-tests.yaml | 3 +- ceph-proxy/tests/00-setup | 2 + ceph-proxy/tests/017-basic-trusty-kilo | 0 ceph-proxy/tests/019-basic-vivid-kilo | 0 ceph-proxy/tests/README | 24 ++ ceph-proxy/tests/basic_deployment.py | 426 +++++++++++++++++++++---- ceph-proxy/tests/tests.yaml | 16 + 8 files changed, 413 insertions(+), 71 deletions(-) mode change 100644 => 100755 ceph-proxy/tests/017-basic-trusty-kilo mode change 100644 => 100755 ceph-proxy/tests/019-basic-vivid-kilo create mode 100644 ceph-proxy/tests/tests.yaml diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index befc8dc0..69bb5727 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -2,18 +2,17 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers hooks tests unit_tests + @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \ + hooks tests unit_tests @charm proof -unit_test: +test: + @# Bundletester expects unit tests here. @echo Starting unit tests... @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests -test: +functional_test: @echo Starting Amulet tests... - # coreycb note: The -v should only be temporary until Amulet sends - # raise_status() messages to stderr: - # https://bugs.launchpad.net/amulet/+bug/1320357 @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 bin/charm_helpers_sync.py: @@ -22,7 +21,7 @@ bin/charm_helpers_sync.py: > bin/charm_helpers_sync.py sync: bin/charm_helpers_sync.py - $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml +# $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml publish: lint diff --git a/ceph-proxy/charm-helpers-tests.yaml b/ceph-proxy/charm-helpers-tests.yaml index 48b12f6f..987d84cb 100644 --- a/ceph-proxy/charm-helpers-tests.yaml +++ b/ceph-proxy/charm-helpers-tests.yaml @@ -1,4 +1,5 @@ -branch: lp:charm-helpers +#branch: lp:charm-helpers +branch: lp:~1chb1n/charm-helpers/amulet-ceph-cinder-updates/ destination: tests/charmhelpers include: - contrib.amulet diff --git a/ceph-proxy/tests/00-setup b/ceph-proxy/tests/00-setup index 1243ec43..54f560ca 100755 --- a/ceph-proxy/tests/00-setup +++ b/ceph-proxy/tests/00-setup @@ -5,6 +5,8 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes sudo apt-get install --yes python-amulet \ + python-distroinfo \ python-keystoneclient \ python-glanceclient \ + python-cinderclient \ python-novaclient diff --git a/ceph-proxy/tests/017-basic-trusty-kilo b/ceph-proxy/tests/017-basic-trusty-kilo old mode 100644 new mode 100755 diff --git a/ceph-proxy/tests/019-basic-vivid-kilo b/ceph-proxy/tests/019-basic-vivid-kilo old mode 100644 new mode 100755 diff --git a/ceph-proxy/tests/README b/ceph-proxy/tests/README index 8072a8b0..da63fb67 100644 --- a/ceph-proxy/tests/README +++ b/ceph-proxy/tests/README @@ -1,6 +1,30 @@ This directory provides Amulet tests that focus on verification of ceph deployments. +test_* methods are called in lexical sort order. + +Test name convention to ensure desired test order: + 1xx service and endpoint checks + 2xx relation checks + 3xx config checks + 4xx functional checks + 9xx restarts and other final checks + +Common uses of ceph relations in bundle deployments: + - [ nova-compute, ceph ] + - [ glance, ceph ] + - [ cinder, cinder-ceph ] + - [ cinder-ceph, ceph ] + +More detailed relations of ceph service in a common deployment: + relations: + client: + - cinder-ceph + - glance + - nova-compute + mon: + - ceph + In order to run tests, you'll need charm-tools installed (in addition to juju, of course): sudo add-apt-repository ppa:juju/stable diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 9bfa3023..09922e86 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -1,18 +1,26 @@ #!/usr/bin/python import amulet +import json +import time from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment ) from charmhelpers.contrib.openstack.amulet.utils import ( # noqa OpenStackAmuletUtils, DEBUG, - ERROR + #ERROR ) # Use DEBUG to turn on debug logging u = OpenStackAmuletUtils(DEBUG) +# Resource names and constants +IMAGE_NAME = 'cirros-image-1' +POOLS = ['data', 'metadata', 'rbd', 'cinder', 'glance'] +CINDER_POOL = 3 +GLANCE_POOL = 4 + class CephBasicDeployment(OpenStackAmuletDeployment): """Amulet tests on a basic ceph deployment.""" @@ -35,10 +43,12 @@ def _add_services(self): compatible with the local charm (e.g. stable or next). """ this_service = {'name': 'ceph', 'units': 3} - other_services = [{'name': 'mysql'}, {'name': 'keystone'}, + other_services = [{'name': 'mysql'}, + {'name': 'keystone'}, {'name': 'rabbitmq-server'}, {'name': 'nova-compute'}, - {'name': 'glance'}, {'name': 'cinder'}] + {'name': 'glance'}, + {'name': 'cinder'}] super(CephBasicDeployment, self)._add_services(this_service, other_services) @@ -74,12 +84,9 @@ def _configure_services(self): 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', 'osd-reformat': 'yes', - 'ephemeral-unmount': '/mnt' + 'ephemeral-unmount': '/mnt', + 'osd-devices': '/dev/vdb /srv/ceph' } - if self._get_openstack_release() >= self.precise_grizzly: - ceph_config['osd-devices'] = '/dev/vdb /srv/ceph' - else: - ceph_config['osd-devices'] = '/dev/vdb' configs = {'keystone': keystone_config, 'mysql': mysql_config, @@ -88,27 +95,44 @@ def _configure_services(self): super(CephBasicDeployment, self)._configure_services(configs) def _initialize_tests(self): - """Perform final initialization before tests get run.""" + """Perform final initialization original tests get run.""" # Access the sentries for inspecting service units self.mysql_sentry = self.d.sentry.unit['mysql/0'] self.keystone_sentry = self.d.sentry.unit['keystone/0'] self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] - self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0'] + self.nova_sentry = self.d.sentry.unit['nova-compute/0'] self.glance_sentry = self.d.sentry.unit['glance/0'] self.cinder_sentry = self.d.sentry.unit['cinder/0'] self.ceph0_sentry = self.d.sentry.unit['ceph/0'] self.ceph1_sentry = self.d.sentry.unit['ceph/1'] self.ceph2_sentry = self.d.sentry.unit['ceph/2'] + u.log.debug('openstack release val: {}'.format( + self._get_openstack_release())) + u.log.debug('openstack release str: {}'.format( + self._get_openstack_release_string())) + + # Let things settle a bit original moving forward + time.sleep(30) # Authenticate admin with keystone self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, user='admin', password='openstack', tenant='admin') - + # Authenticate admin with cinder endpoint + self.cinder = u.authenticate_cinder_admin(self.keystone_sentry, + username='admin', + password='openstack', + tenant='admin') # Authenticate admin with glance endpoint self.glance = u.authenticate_glance_admin(self.keystone) + # Authenticate admin with nova endpoint + self.nova = u.authenticate_nova_user(self.keystone, + user='admin', + password='openstack', + tenant='admin') + # Create a demo tenant/role/user self.demo_tenant = 'demoTenant' self.demo_role = 'demoRole' @@ -139,41 +163,83 @@ def _ceph_osd_id(self, index): """Produce a shell command that will return a ceph-osd id.""" return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa - def test_services(self): + def _ceph_df(self, sentry_unit): + """Return dict of ceph df json output""" + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + u.log.debug(msg) + amulet.raise_status(amulet.FAIL, msg=msg) + + df = json.loads(output) + return df + + def _take_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Return ceph pool name, object count and disk space used + for the specified pool ID number.""" + df = self._ceph_df(sentry_unit) + pool_name = df['pools'][pool_id]['name'] + obj_count = df['pools'][pool_id]['stats']['objects'] + kb_used = df['pools'][pool_id]['stats']['kb_used'] + u.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(_pool_name, + pool_id, + obj_count, + kb_used)) + return pool_name, obj_count, kb_used + + def _validate_pool_samples(self, samples, resource_type="item", + sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes.""" + original, created, deleted = range(3) + + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + msg = ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + return msg + else: + u.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None + + def test_100_services(self): """Verify the expected services are running on the service units.""" - ceph_services = ['status ceph-mon-all', - 'status ceph-mon id=`hostname`'] - commands = { - self.mysql_sentry: ['status mysql'], - self.rabbitmq_sentry: ['sudo service rabbitmq-server status'], - self.nova_compute_sentry: ['status nova-compute'], - self.keystone_sentry: ['status keystone'], - self.glance_sentry: ['status glance-registry', - 'status glance-api'], - self.cinder_sentry: ['status cinder-api', - 'status cinder-scheduler', - 'status cinder-volume'] + ceph_services = [ + 'ceph-mon-all', + 'ceph-mon id=`hostname`', + 'ceph-osd-all', + 'ceph-osd id={}'.format(self._ceph_osd_id(0)), + 'ceph-osd id={}'.format(self._ceph_osd_id(1)) + ] + + services = { + self.mysql_sentry: ['mysql'], + self.rabbitmq_sentry: ['rabbitmq-server'], + self.nova_sentry: ['nova-compute'], + self.keystone_sentry: ['keystone'], + self.glance_sentry: ['glance-registry', + 'glance-api'], + self.cinder_sentry: ['cinder-api', + 'cinder-scheduler', + 'cinder-volume'], + self.ceph0_sentry: ceph_services, + self.ceph1_sentry: ceph_services, + self.ceph2_sentry: ceph_services } - if self._get_openstack_release() >= self.precise_grizzly: - ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0)) - ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1)) - ceph_services.extend([ceph_osd0, ceph_osd1, 'status ceph-osd-all']) - commands[self.ceph0_sentry] = ceph_services - commands[self.ceph1_sentry] = ceph_services - commands[self.ceph2_sentry] = ceph_services - else: - ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0)) - ceph_services.append(ceph_osd0) - commands[self.ceph0_sentry] = ceph_services - commands[self.ceph1_sentry] = ceph_services - commands[self.ceph2_sentry] = ceph_services - ret = u.validate_services(commands) + ret = u.validate_services_by_name(services) if ret: amulet.raise_status(amulet.FAIL, msg=ret) - def test_ceph_nova_client_relation(self): + def test_200_ceph_nova_client_relation(self): """Verify the ceph to nova ceph-client relation data.""" + u.log.debug('Checking ceph:nova-compute ceph relation data...') unit = self.ceph0_sentry relation = ['client', 'nova-compute:ceph'] expected = { @@ -187,9 +253,10 @@ def test_ceph_nova_client_relation(self): message = u.relation_error('ceph to nova ceph-client', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_nova_ceph_client_relation(self): - """Verify the nova to ceph ceph-client relation data.""" - unit = self.nova_compute_sentry + def test_201_nova_ceph_client_relation(self): + """Verify the nova to ceph client relation data.""" + u.log.debug('Checking nova-compute:ceph ceph-client relation data...') + unit = self.nova_sentry relation = ['ceph', 'ceph:client'] expected = { 'private-address': u.valid_ip @@ -200,8 +267,9 @@ def test_nova_ceph_client_relation(self): message = u.relation_error('nova to ceph ceph-client', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_ceph_glance_client_relation(self): + def test_202_ceph_glance_client_relation(self): """Verify the ceph to glance ceph-client relation data.""" + u.log.debug('Checking ceph:glance client relation data...') unit = self.ceph1_sentry relation = ['client', 'glance:ceph'] expected = { @@ -215,8 +283,9 @@ def test_ceph_glance_client_relation(self): message = u.relation_error('ceph to glance ceph-client', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_glance_ceph_client_relation(self): - """Verify the glance to ceph ceph-client relation data.""" + def test_203_glance_ceph_client_relation(self): + """Verify the glance to ceph client relation data.""" + u.log.debug('Checking glance:ceph client relation data...') unit = self.glance_sentry relation = ['ceph', 'ceph:client'] expected = { @@ -228,8 +297,9 @@ def test_glance_ceph_client_relation(self): message = u.relation_error('glance to ceph ceph-client', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_ceph_cinder_client_relation(self): + def test_204_ceph_cinder_client_relation(self): """Verify the ceph to cinder ceph-client relation data.""" + u.log.debug('Checking ceph:cinder ceph relation data...') unit = self.ceph2_sentry relation = ['client', 'cinder:ceph'] expected = { @@ -243,8 +313,9 @@ def test_ceph_cinder_client_relation(self): message = u.relation_error('ceph to cinder ceph-client', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_cinder_ceph_client_relation(self): + def test_205_cinder_ceph_client_relation(self): """Verify the cinder to ceph ceph-client relation data.""" + u.log.debug('Checking cinder:ceph ceph relation data...') unit = self.cinder_sentry relation = ['ceph', 'ceph:client'] expected = { @@ -256,8 +327,9 @@ def test_cinder_ceph_client_relation(self): message = u.relation_error('cinder to ceph ceph-client', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_ceph_config(self): + def test_300_ceph_config(self): """Verify the data in the ceph config file.""" + u.log.debug('Checking ceph config file data...') unit = self.ceph0_sentry conf = '/etc/ceph/ceph.conf' expected = { @@ -267,7 +339,10 @@ def test_ceph_config(self): 'log to syslog': 'false', 'err to syslog': 'false', 'clog to syslog': 'false', - 'mon cluster log to syslog': 'false' + 'mon cluster log to syslog': 'false', + 'auth cluster required': 'none', + 'auth service required': 'none', + 'auth client required': 'none' }, 'mon': { 'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring' @@ -281,12 +356,6 @@ def test_ceph_config(self): 'filestore xattr use omap': 'true' }, } - if self._get_openstack_release() >= self.precise_grizzly: - expected['global']['auth cluster required'] = 'none' - expected['global']['auth service required'] = 'none' - expected['global']['auth client required'] = 'none' - else: - expected['global']['auth supported'] = 'none' for section, pairs in expected.iteritems(): ret = u.validate_config_data(unit, conf, section, pairs) @@ -294,11 +363,242 @@ def test_ceph_config(self): message = "ceph config error: {}".format(ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_restart_on_config_change(self): - """Verify the specified services are restarted on config change.""" - # NOTE(coreycb): Test not implemented but should it be? ceph services - # aren't restarted by charm after config change. Should - # they be restarted? - if self._get_openstack_release() >= self.precise_essex: - u.log.error("Test not implemented") - return + def test_302_cinder_rbd_config(self): + """Verify the cinder config file data regarding ceph.""" + u.log.debug('Checking cinder (rbd) config file data...') + unit = self.cinder_sentry + conf = '/etc/cinder/cinder.conf' + expected = { + 'DEFAULT': { + 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' + } + } + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "cinder (rbd) config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_304_glance_rbd_config(self): + """Verify the glance config file data regarding ceph.""" + u.log.debug('Checking glance (rbd) config file data...') + unit = self.glance_sentry + conf = '/etc/glance/glance-api.conf' + expected = { + 'DEFAULT': { + 'default_store': 'rbd', + 'rbd_store_ceph_conf': '/etc/ceph/ceph.conf', + 'rbd_store_user': 'glance', + 'rbd_store_pool': 'glance', + 'rbd_store_chunk_size': '8' + } + } + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "glance (rbd) config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_306_nova_rbd_config(self): + """Verify the nova config file data regarding ceph.""" + u.log.debug('Checking nova (rbd) config file data...') + unit = self.nova_sentry + conf = '/etc/nova/nova.conf' + expected = { + 'libvirt': { + 'rbd_pool': 'nova', + 'rbd_user': 'nova-compute', + 'rbd_secret_uuid': u.not_null + } + } + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "nova (rbd) config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_400_ceph_check_osd_pools(self): + """Check osd pools on all ceph units, expect them to be + identical, and expect specific pools to be present.""" + u.log.debug('Checking pools on ceph units...') + + cmd = 'sudo ceph osd lspools' + results = [] + sentries = [ + self.ceph0_sentry, + self.ceph1_sentry, + self.ceph2_sentry + ] + + for sentry_unit in sentries: + output, code = sentry_unit.run(cmd) + results.append(output) + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + u.log.debug(msg) + if code != 0: + amulet.raise_status(amulet.FAIL, msg=msg) + + # Check for presence of all pools on this unit + for pool in POOLS: + if pool not in output: + msg = ('{} does not have pool: ' + '{}'.format(sentry_unit.info['unit_name'], pool)) + amulet.raise_status(amulet.FAIL, msg=msg) + u.log.debug('{} has the expected ' + 'pools.'.format(sentry_unit.info['unit_name'])) + + # Check that lspool produces the same output on all units + if len(set(results)) == 1: + u.log.debug('Pool list on all ceph units produced the ' + 'same results (OK).') + else: + u.log.debug('Pool list results: {}'.format(results)) + msg = 'Pool list results are not identical on all ceph units.' + amulet.raise_status(amulet.FAIL, msg=msg) + + def test_410_ceph_cinder_vol_create(self): + """Create and confirm a ceph-backed cinder volume, and inspect + ceph cinder pool object count as the volume is created + and deleted.""" + sentry_unit = self.ceph0_sentry + obj_count_samples = [] + pool_size_samples = [] + + # Check ceph cinder pool object count, disk space usage and pool name + u.log.debug('Checking ceph cinder pool original samples...') + pool_name, obj_count, kb_used = self._take_ceph_pool_sample( + sentry_unit, pool_id=CINDER_POOL) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + expected = 'cinder' + if pool_name != expected: + msg = ('Ceph pool {} unexpected name (actual, expected): ' + '{}. {}'.format(CINDER_POOL, pool_name, expected)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create ceph-backed cinder volume + cinder_vol = u.create_cinder_volume(self.cinder) + + # Re-check ceph cinder pool object count and disk usage + time.sleep(10) + u.log.debug('Checking ceph cinder pool samples after volume create...') + pool_name, obj_count, kb_used = self._take_ceph_pool_sample( + sentry_unit, pool_id=CINDER_POOL) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + # Delete ceph-backed cinder volume + u.delete_resource(self.cinder.volumes, cinder_vol, msg="cinder volume") + + # Final check, ceph cinder pool object count and disk usage + time.sleep(10) + u.log.debug('Checking ceph cinder pool after volume delete...') + pool_name, obj_count, kb_used = self._take_ceph_pool_sample( + sentry_unit, pool_id=CINDER_POOL) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + # Validate ceph cinder pool object count samples over time + ret = self._validate_pool_samples(samples=obj_count_samples, + resource_type="cinder volume", + sample_type="pool object count") + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + # Validate ceph cinder pool disk space usage samples over time + ret = self._validate_pool_samples(samples=pool_size_samples, + resource_type="cinder volume", + sample_type="pool disk usage size") + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_412_ceph_glance_image_create_delete(self): + """Create and confirm a ceph-backed glance image, and inspect + ceph glance pool object count as the image is created + and deleted.""" + sentry_unit = self.ceph0_sentry + obj_count_samples = [] + pool_size_samples = [] + + # Check ceph glance pool object count, disk space usage and pool name + u.log.debug('Checking ceph glance pool original samples...') + pool_name, obj_count, kb_used = self._take_ceph_pool_sample( + sentry_unit, pool_id=GLANCE_POOL) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + expected = 'glance' + if pool_name != expected: + msg = ('Ceph glance pool {} unexpected name (actual, ' + 'expected): {}. {}'.format(GLANCE_POOL, + pool_name, expected)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create ceph-backed glance image + glance_img = u.create_cirros_image(self.glance, IMAGE_NAME) + + # Re-check ceph glance pool object count and disk usage + time.sleep(10) + u.log.debug('Checking ceph glance pool samples after image create...') + pool_name, obj_count, kb_used = self._take_ceph_pool_sample( + sentry_unit, pool_id=GLANCE_POOL) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + # Delete ceph-backed glance image + u.delete_resource(self.glance.images, + glance_img, msg="glance image") + + # Final check, ceph glance pool object count and disk usage + time.sleep(10) + u.log.debug('Checking ceph glance pool samples after image delete...') + pool_name, obj_count, kb_used = self._take_ceph_pool_sample( + sentry_unit, pool_id=GLANCE_POOL) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + # Validate ceph glance pool object count samples over time + ret = self._validate_pool_samples(samples=obj_count_samples, + resource_type="glance image", + sample_type="pool object count") + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + # Validate ceph glance pool disk space usage samples over time + ret = self._validate_pool_samples(samples=pool_size_samples, + resource_type="glance image", + sample_type="pool disk usage size") + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_499_ceph_cmds_exit_zero(self): + """Check that all ceph commands in a list return zero on all + ceph units listed.""" + sentry_units = [ + self.ceph0_sentry, + self.ceph1_sentry, + self.ceph2_sentry + ] + commands = [ + 'sudo ceph -s', + 'sudo ceph health', + 'sudo ceph mds stat', + 'sudo ceph pg stat', + 'sudo ceph osd stat', + 'sudo ceph mon stat', + 'sudo ceph osd pool get data size', + 'sudo ceph osd pool get data pg_num', + ] + ret = u.check_commands_on_units(commands, sentry_units) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + # FYI: No restart check as ceph services do not restart + # when charm config changes, unless monitor count increases. + + def test_999(self): + u.log.error('Fake fail!') + raise diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml new file mode 100644 index 00000000..348aae57 --- /dev/null +++ b/ceph-proxy/tests/tests.yaml @@ -0,0 +1,16 @@ +bootstrap: true +reset: true +virtualenv: true +makefile: + - lint + - test +sources: + - ppa:juju/stable +packages: + - amulet + - python-amulet + - python-distro-info + - python-keystoneclient + - python-glanceclient + - python-cinderclient + - python-novaclient From 35e8e25330d792ceee5e2d6be92a6b2aeb5562a5 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 15 Jun 2015 20:40:45 +0000 Subject: [PATCH 0699/2699] Update README, Makefile and amulet test dependencies Remove unsupported release logic Add nova, cinder and glance rbd config inspection Enable Vivid tests, prep for Wily Add debug logging Add osd pool inspection Add functional tests for ceph-backed cinder and glance Add basic cli functional checks. --- ceph-mon/Makefile | 13 +- ceph-mon/charm-helpers-tests.yaml | 3 +- ceph-mon/tests/00-setup | 2 + ceph-mon/tests/017-basic-trusty-kilo | 0 ceph-mon/tests/019-basic-vivid-kilo | 0 ceph-mon/tests/README | 24 ++ ceph-mon/tests/basic_deployment.py | 426 +++++++++++++++++++++++---- ceph-mon/tests/tests.yaml | 16 + 8 files changed, 413 insertions(+), 71 deletions(-) mode change 100644 => 100755 ceph-mon/tests/017-basic-trusty-kilo mode change 100644 => 100755 ceph-mon/tests/019-basic-vivid-kilo create mode 100644 ceph-mon/tests/tests.yaml diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index befc8dc0..69bb5727 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -2,18 +2,17 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers hooks tests unit_tests + @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \ + hooks tests unit_tests @charm proof -unit_test: +test: + @# Bundletester expects unit tests here. @echo Starting unit tests... @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests -test: +functional_test: @echo Starting Amulet tests... - # coreycb note: The -v should only be temporary until Amulet sends - # raise_status() messages to stderr: - # https://bugs.launchpad.net/amulet/+bug/1320357 @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 bin/charm_helpers_sync.py: @@ -22,7 +21,7 @@ bin/charm_helpers_sync.py: > bin/charm_helpers_sync.py sync: bin/charm_helpers_sync.py - $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml +# $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml publish: lint diff --git a/ceph-mon/charm-helpers-tests.yaml b/ceph-mon/charm-helpers-tests.yaml index 48b12f6f..987d84cb 100644 --- a/ceph-mon/charm-helpers-tests.yaml +++ b/ceph-mon/charm-helpers-tests.yaml @@ -1,4 +1,5 @@ -branch: lp:charm-helpers +#branch: lp:charm-helpers +branch: lp:~1chb1n/charm-helpers/amulet-ceph-cinder-updates/ destination: tests/charmhelpers include: - contrib.amulet diff --git a/ceph-mon/tests/00-setup b/ceph-mon/tests/00-setup index 1243ec43..54f560ca 100755 --- a/ceph-mon/tests/00-setup +++ b/ceph-mon/tests/00-setup @@ -5,6 +5,8 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes sudo apt-get install --yes python-amulet \ + python-distroinfo \ python-keystoneclient \ python-glanceclient \ + python-cinderclient \ python-novaclient diff --git a/ceph-mon/tests/017-basic-trusty-kilo b/ceph-mon/tests/017-basic-trusty-kilo old mode 100644 new mode 100755 diff --git a/ceph-mon/tests/019-basic-vivid-kilo b/ceph-mon/tests/019-basic-vivid-kilo old mode 100644 new mode 100755 diff --git a/ceph-mon/tests/README b/ceph-mon/tests/README index 8072a8b0..da63fb67 100644 --- a/ceph-mon/tests/README +++ b/ceph-mon/tests/README @@ -1,6 +1,30 @@ This directory provides Amulet tests that focus on verification of ceph deployments. +test_* methods are called in lexical sort order. + +Test name convention to ensure desired test order: + 1xx service and endpoint checks + 2xx relation checks + 3xx config checks + 4xx functional checks + 9xx restarts and other final checks + +Common uses of ceph relations in bundle deployments: + - [ nova-compute, ceph ] + - [ glance, ceph ] + - [ cinder, cinder-ceph ] + - [ cinder-ceph, ceph ] + +More detailed relations of ceph service in a common deployment: + relations: + client: + - cinder-ceph + - glance + - nova-compute + mon: + - ceph + In order to run tests, you'll need charm-tools installed (in addition to juju, of course): sudo add-apt-repository ppa:juju/stable diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 9bfa3023..09922e86 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -1,18 +1,26 @@ #!/usr/bin/python import amulet +import json +import time from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment ) from charmhelpers.contrib.openstack.amulet.utils import ( # noqa OpenStackAmuletUtils, DEBUG, - ERROR + #ERROR ) # Use DEBUG to turn on debug logging u = OpenStackAmuletUtils(DEBUG) +# Resource names and constants +IMAGE_NAME = 'cirros-image-1' +POOLS = ['data', 'metadata', 'rbd', 'cinder', 'glance'] +CINDER_POOL = 3 +GLANCE_POOL = 4 + class CephBasicDeployment(OpenStackAmuletDeployment): """Amulet tests on a basic ceph deployment.""" @@ -35,10 +43,12 @@ def _add_services(self): compatible with the local charm (e.g. stable or next). """ this_service = {'name': 'ceph', 'units': 3} - other_services = [{'name': 'mysql'}, {'name': 'keystone'}, + other_services = [{'name': 'mysql'}, + {'name': 'keystone'}, {'name': 'rabbitmq-server'}, {'name': 'nova-compute'}, - {'name': 'glance'}, {'name': 'cinder'}] + {'name': 'glance'}, + {'name': 'cinder'}] super(CephBasicDeployment, self)._add_services(this_service, other_services) @@ -74,12 +84,9 @@ def _configure_services(self): 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', 'osd-reformat': 'yes', - 'ephemeral-unmount': '/mnt' + 'ephemeral-unmount': '/mnt', + 'osd-devices': '/dev/vdb /srv/ceph' } - if self._get_openstack_release() >= self.precise_grizzly: - ceph_config['osd-devices'] = '/dev/vdb /srv/ceph' - else: - ceph_config['osd-devices'] = '/dev/vdb' configs = {'keystone': keystone_config, 'mysql': mysql_config, @@ -88,27 +95,44 @@ def _configure_services(self): super(CephBasicDeployment, self)._configure_services(configs) def _initialize_tests(self): - """Perform final initialization before tests get run.""" + """Perform final initialization original tests get run.""" # Access the sentries for inspecting service units self.mysql_sentry = self.d.sentry.unit['mysql/0'] self.keystone_sentry = self.d.sentry.unit['keystone/0'] self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] - self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0'] + self.nova_sentry = self.d.sentry.unit['nova-compute/0'] self.glance_sentry = self.d.sentry.unit['glance/0'] self.cinder_sentry = self.d.sentry.unit['cinder/0'] self.ceph0_sentry = self.d.sentry.unit['ceph/0'] self.ceph1_sentry = self.d.sentry.unit['ceph/1'] self.ceph2_sentry = self.d.sentry.unit['ceph/2'] + u.log.debug('openstack release val: {}'.format( + self._get_openstack_release())) + u.log.debug('openstack release str: {}'.format( + self._get_openstack_release_string())) + + # Let things settle a bit original moving forward + time.sleep(30) # Authenticate admin with keystone self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, user='admin', password='openstack', tenant='admin') - + # Authenticate admin with cinder endpoint + self.cinder = u.authenticate_cinder_admin(self.keystone_sentry, + username='admin', + password='openstack', + tenant='admin') # Authenticate admin with glance endpoint self.glance = u.authenticate_glance_admin(self.keystone) + # Authenticate admin with nova endpoint + self.nova = u.authenticate_nova_user(self.keystone, + user='admin', + password='openstack', + tenant='admin') + # Create a demo tenant/role/user self.demo_tenant = 'demoTenant' self.demo_role = 'demoRole' @@ -139,41 +163,83 @@ def _ceph_osd_id(self, index): """Produce a shell command that will return a ceph-osd id.""" return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa - def test_services(self): + def _ceph_df(self, sentry_unit): + """Return dict of ceph df json output""" + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + u.log.debug(msg) + amulet.raise_status(amulet.FAIL, msg=msg) + + df = json.loads(output) + return df + + def _take_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Return ceph pool name, object count and disk space used + for the specified pool ID number.""" + df = self._ceph_df(sentry_unit) + pool_name = df['pools'][pool_id]['name'] + obj_count = df['pools'][pool_id]['stats']['objects'] + kb_used = df['pools'][pool_id]['stats']['kb_used'] + u.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(_pool_name, + pool_id, + obj_count, + kb_used)) + return pool_name, obj_count, kb_used + + def _validate_pool_samples(self, samples, resource_type="item", + sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes.""" + original, created, deleted = range(3) + + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + msg = ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + return msg + else: + u.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None + + def test_100_services(self): """Verify the expected services are running on the service units.""" - ceph_services = ['status ceph-mon-all', - 'status ceph-mon id=`hostname`'] - commands = { - self.mysql_sentry: ['status mysql'], - self.rabbitmq_sentry: ['sudo service rabbitmq-server status'], - self.nova_compute_sentry: ['status nova-compute'], - self.keystone_sentry: ['status keystone'], - self.glance_sentry: ['status glance-registry', - 'status glance-api'], - self.cinder_sentry: ['status cinder-api', - 'status cinder-scheduler', - 'status cinder-volume'] + ceph_services = [ + 'ceph-mon-all', + 'ceph-mon id=`hostname`', + 'ceph-osd-all', + 'ceph-osd id={}'.format(self._ceph_osd_id(0)), + 'ceph-osd id={}'.format(self._ceph_osd_id(1)) + ] + + services = { + self.mysql_sentry: ['mysql'], + self.rabbitmq_sentry: ['rabbitmq-server'], + self.nova_sentry: ['nova-compute'], + self.keystone_sentry: ['keystone'], + self.glance_sentry: ['glance-registry', + 'glance-api'], + self.cinder_sentry: ['cinder-api', + 'cinder-scheduler', + 'cinder-volume'], + self.ceph0_sentry: ceph_services, + self.ceph1_sentry: ceph_services, + self.ceph2_sentry: ceph_services } - if self._get_openstack_release() >= self.precise_grizzly: - ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0)) - ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1)) - ceph_services.extend([ceph_osd0, ceph_osd1, 'status ceph-osd-all']) - commands[self.ceph0_sentry] = ceph_services - commands[self.ceph1_sentry] = ceph_services - commands[self.ceph2_sentry] = ceph_services - else: - ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0)) - ceph_services.append(ceph_osd0) - commands[self.ceph0_sentry] = ceph_services - commands[self.ceph1_sentry] = ceph_services - commands[self.ceph2_sentry] = ceph_services - ret = u.validate_services(commands) + ret = u.validate_services_by_name(services) if ret: amulet.raise_status(amulet.FAIL, msg=ret) - def test_ceph_nova_client_relation(self): + def test_200_ceph_nova_client_relation(self): """Verify the ceph to nova ceph-client relation data.""" + u.log.debug('Checking ceph:nova-compute ceph relation data...') unit = self.ceph0_sentry relation = ['client', 'nova-compute:ceph'] expected = { @@ -187,9 +253,10 @@ def test_ceph_nova_client_relation(self): message = u.relation_error('ceph to nova ceph-client', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_nova_ceph_client_relation(self): - """Verify the nova to ceph ceph-client relation data.""" - unit = self.nova_compute_sentry + def test_201_nova_ceph_client_relation(self): + """Verify the nova to ceph client relation data.""" + u.log.debug('Checking nova-compute:ceph ceph-client relation data...') + unit = self.nova_sentry relation = ['ceph', 'ceph:client'] expected = { 'private-address': u.valid_ip @@ -200,8 +267,9 @@ def test_nova_ceph_client_relation(self): message = u.relation_error('nova to ceph ceph-client', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_ceph_glance_client_relation(self): + def test_202_ceph_glance_client_relation(self): """Verify the ceph to glance ceph-client relation data.""" + u.log.debug('Checking ceph:glance client relation data...') unit = self.ceph1_sentry relation = ['client', 'glance:ceph'] expected = { @@ -215,8 +283,9 @@ def test_ceph_glance_client_relation(self): message = u.relation_error('ceph to glance ceph-client', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_glance_ceph_client_relation(self): - """Verify the glance to ceph ceph-client relation data.""" + def test_203_glance_ceph_client_relation(self): + """Verify the glance to ceph client relation data.""" + u.log.debug('Checking glance:ceph client relation data...') unit = self.glance_sentry relation = ['ceph', 'ceph:client'] expected = { @@ -228,8 +297,9 @@ def test_glance_ceph_client_relation(self): message = u.relation_error('glance to ceph ceph-client', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_ceph_cinder_client_relation(self): + def test_204_ceph_cinder_client_relation(self): """Verify the ceph to cinder ceph-client relation data.""" + u.log.debug('Checking ceph:cinder ceph relation data...') unit = self.ceph2_sentry relation = ['client', 'cinder:ceph'] expected = { @@ -243,8 +313,9 @@ def test_ceph_cinder_client_relation(self): message = u.relation_error('ceph to cinder ceph-client', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_cinder_ceph_client_relation(self): + def test_205_cinder_ceph_client_relation(self): """Verify the cinder to ceph ceph-client relation data.""" + u.log.debug('Checking cinder:ceph ceph relation data...') unit = self.cinder_sentry relation = ['ceph', 'ceph:client'] expected = { @@ -256,8 +327,9 @@ def test_cinder_ceph_client_relation(self): message = u.relation_error('cinder to ceph ceph-client', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_ceph_config(self): + def test_300_ceph_config(self): """Verify the data in the ceph config file.""" + u.log.debug('Checking ceph config file data...') unit = self.ceph0_sentry conf = '/etc/ceph/ceph.conf' expected = { @@ -267,7 +339,10 @@ def test_ceph_config(self): 'log to syslog': 'false', 'err to syslog': 'false', 'clog to syslog': 'false', - 'mon cluster log to syslog': 'false' + 'mon cluster log to syslog': 'false', + 'auth cluster required': 'none', + 'auth service required': 'none', + 'auth client required': 'none' }, 'mon': { 'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring' @@ -281,12 +356,6 @@ def test_ceph_config(self): 'filestore xattr use omap': 'true' }, } - if self._get_openstack_release() >= self.precise_grizzly: - expected['global']['auth cluster required'] = 'none' - expected['global']['auth service required'] = 'none' - expected['global']['auth client required'] = 'none' - else: - expected['global']['auth supported'] = 'none' for section, pairs in expected.iteritems(): ret = u.validate_config_data(unit, conf, section, pairs) @@ -294,11 +363,242 @@ def test_ceph_config(self): message = "ceph config error: {}".format(ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_restart_on_config_change(self): - """Verify the specified services are restarted on config change.""" - # NOTE(coreycb): Test not implemented but should it be? ceph services - # aren't restarted by charm after config change. Should - # they be restarted? - if self._get_openstack_release() >= self.precise_essex: - u.log.error("Test not implemented") - return + def test_302_cinder_rbd_config(self): + """Verify the cinder config file data regarding ceph.""" + u.log.debug('Checking cinder (rbd) config file data...') + unit = self.cinder_sentry + conf = '/etc/cinder/cinder.conf' + expected = { + 'DEFAULT': { + 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' + } + } + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "cinder (rbd) config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_304_glance_rbd_config(self): + """Verify the glance config file data regarding ceph.""" + u.log.debug('Checking glance (rbd) config file data...') + unit = self.glance_sentry + conf = '/etc/glance/glance-api.conf' + expected = { + 'DEFAULT': { + 'default_store': 'rbd', + 'rbd_store_ceph_conf': '/etc/ceph/ceph.conf', + 'rbd_store_user': 'glance', + 'rbd_store_pool': 'glance', + 'rbd_store_chunk_size': '8' + } + } + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "glance (rbd) config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_306_nova_rbd_config(self): + """Verify the nova config file data regarding ceph.""" + u.log.debug('Checking nova (rbd) config file data...') + unit = self.nova_sentry + conf = '/etc/nova/nova.conf' + expected = { + 'libvirt': { + 'rbd_pool': 'nova', + 'rbd_user': 'nova-compute', + 'rbd_secret_uuid': u.not_null + } + } + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "nova (rbd) config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_400_ceph_check_osd_pools(self): + """Check osd pools on all ceph units, expect them to be + identical, and expect specific pools to be present.""" + u.log.debug('Checking pools on ceph units...') + + cmd = 'sudo ceph osd lspools' + results = [] + sentries = [ + self.ceph0_sentry, + self.ceph1_sentry, + self.ceph2_sentry + ] + + for sentry_unit in sentries: + output, code = sentry_unit.run(cmd) + results.append(output) + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + u.log.debug(msg) + if code != 0: + amulet.raise_status(amulet.FAIL, msg=msg) + + # Check for presence of all pools on this unit + for pool in POOLS: + if pool not in output: + msg = ('{} does not have pool: ' + '{}'.format(sentry_unit.info['unit_name'], pool)) + amulet.raise_status(amulet.FAIL, msg=msg) + u.log.debug('{} has the expected ' + 'pools.'.format(sentry_unit.info['unit_name'])) + + # Check that lspool produces the same output on all units + if len(set(results)) == 1: + u.log.debug('Pool list on all ceph units produced the ' + 'same results (OK).') + else: + u.log.debug('Pool list results: {}'.format(results)) + msg = 'Pool list results are not identical on all ceph units.' + amulet.raise_status(amulet.FAIL, msg=msg) + + def test_410_ceph_cinder_vol_create(self): + """Create and confirm a ceph-backed cinder volume, and inspect + ceph cinder pool object count as the volume is created + and deleted.""" + sentry_unit = self.ceph0_sentry + obj_count_samples = [] + pool_size_samples = [] + + # Check ceph cinder pool object count, disk space usage and pool name + u.log.debug('Checking ceph cinder pool original samples...') + pool_name, obj_count, kb_used = self._take_ceph_pool_sample( + sentry_unit, pool_id=CINDER_POOL) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + expected = 'cinder' + if pool_name != expected: + msg = ('Ceph pool {} unexpected name (actual, expected): ' + '{}. {}'.format(CINDER_POOL, pool_name, expected)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create ceph-backed cinder volume + cinder_vol = u.create_cinder_volume(self.cinder) + + # Re-check ceph cinder pool object count and disk usage + time.sleep(10) + u.log.debug('Checking ceph cinder pool samples after volume create...') + pool_name, obj_count, kb_used = self._take_ceph_pool_sample( + sentry_unit, pool_id=CINDER_POOL) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + # Delete ceph-backed cinder volume + u.delete_resource(self.cinder.volumes, cinder_vol, msg="cinder volume") + + # Final check, ceph cinder pool object count and disk usage + time.sleep(10) + u.log.debug('Checking ceph cinder pool after volume delete...') + pool_name, obj_count, kb_used = self._take_ceph_pool_sample( + sentry_unit, pool_id=CINDER_POOL) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + # Validate ceph cinder pool object count samples over time + ret = self._validate_pool_samples(samples=obj_count_samples, + resource_type="cinder volume", + sample_type="pool object count") + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + # Validate ceph cinder pool disk space usage samples over time + ret = self._validate_pool_samples(samples=pool_size_samples, + resource_type="cinder volume", + sample_type="pool disk usage size") + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_412_ceph_glance_image_create_delete(self): + """Create and confirm a ceph-backed glance image, and inspect + ceph glance pool object count as the image is created + and deleted.""" + sentry_unit = self.ceph0_sentry + obj_count_samples = [] + pool_size_samples = [] + + # Check ceph glance pool object count, disk space usage and pool name + u.log.debug('Checking ceph glance pool original samples...') + pool_name, obj_count, kb_used = self._take_ceph_pool_sample( + sentry_unit, pool_id=GLANCE_POOL) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + expected = 'glance' + if pool_name != expected: + msg = ('Ceph glance pool {} unexpected name (actual, ' + 'expected): {}. {}'.format(GLANCE_POOL, + pool_name, expected)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create ceph-backed glance image + glance_img = u.create_cirros_image(self.glance, IMAGE_NAME) + + # Re-check ceph glance pool object count and disk usage + time.sleep(10) + u.log.debug('Checking ceph glance pool samples after image create...') + pool_name, obj_count, kb_used = self._take_ceph_pool_sample( + sentry_unit, pool_id=GLANCE_POOL) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + # Delete ceph-backed glance image + u.delete_resource(self.glance.images, + glance_img, msg="glance image") + + # Final check, ceph glance pool object count and disk usage + time.sleep(10) + u.log.debug('Checking ceph glance pool samples after image delete...') + pool_name, obj_count, kb_used = self._take_ceph_pool_sample( + sentry_unit, pool_id=GLANCE_POOL) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + # Validate ceph glance pool object count samples over time + ret = self._validate_pool_samples(samples=obj_count_samples, + resource_type="glance image", + sample_type="pool object count") + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + # Validate ceph glance pool disk space usage samples over time + ret = self._validate_pool_samples(samples=pool_size_samples, + resource_type="glance image", + sample_type="pool disk usage size") + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_499_ceph_cmds_exit_zero(self): + """Check that all ceph commands in a list return zero on all + ceph units listed.""" + sentry_units = [ + self.ceph0_sentry, + self.ceph1_sentry, + self.ceph2_sentry + ] + commands = [ + 'sudo ceph -s', + 'sudo ceph health', + 'sudo ceph mds stat', + 'sudo ceph pg stat', + 'sudo ceph osd stat', + 'sudo ceph mon stat', + 'sudo ceph osd pool get data size', + 'sudo ceph osd pool get data pg_num', + ] + ret = u.check_commands_on_units(commands, sentry_units) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + # FYI: No restart check as ceph services do not restart + # when charm config changes, unless monitor count increases. + + def test_999(self): + u.log.error('Fake fail!') + raise diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml new file mode 100644 index 00000000..348aae57 --- /dev/null +++ b/ceph-mon/tests/tests.yaml @@ -0,0 +1,16 @@ +bootstrap: true +reset: true +virtualenv: true +makefile: + - lint + - test +sources: + - ppa:juju/stable +packages: + - amulet + - python-amulet + - python-distro-info + - python-keystoneclient + - python-glanceclient + - python-cinderclient + - python-novaclient From 97fe6fd90d977f6ba5f0218b74f83bcbedb51958 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 15 Jun 2015 20:41:25 +0000 Subject: [PATCH 0700/2699] sync tests/charmhelpers --- .../charmhelpers/contrib/amulet/utils.py | 144 +++++++++++++++-- .../contrib/openstack/amulet/deployment.py | 8 +- .../contrib/openstack/amulet/utils.py | 151 +++++++++++++++++- 3 files changed, 288 insertions(+), 15 deletions(-) diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index f61c2e8b..47c4555c 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -15,13 +15,15 @@ # along with charm-helpers. If not, see . import ConfigParser +import distro_info import io import logging +import os import re +import six import sys import time - -import six +import urlparse class AmuletUtils(object): @@ -33,6 +35,7 @@ class AmuletUtils(object): def __init__(self, log_level=logging.ERROR): self.log = self.get_logger(level=log_level) + self.ubuntu_releases = self.get_ubuntu_releases() def get_logger(self, name="amulet-logger", level=logging.DEBUG): """Get a logger object that will log to stdout.""" @@ -70,12 +73,44 @@ def valid_url(self, url): else: return False - def validate_services(self, commands): - """Validate services. + def get_ubuntu_release_from_sentry(self, sentry_unit): + """Get Ubuntu release codename from sentry unit. + + :param sentry_unit: amulet sentry/service unit pointer + :returns: list of strings - release codename, failure message + """ + msg = None + cmd = 'lsb_release -cs' + release, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} lsb_release: {}'.format( + sentry_unit.info['unit_name'], release)) + else: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, release, code)) + if release not in self.ubuntu_releases: + msg = ("Release ({}) not found in Ubuntu releases " + "({})".format(release, self.ubuntu_releases)) + return release, msg - Verify the specified services are running on the corresponding + def validate_services(self, commands): + """Validate that lists of commands succeed on service units. Can be + used to verify system services are running on the corresponding service units. - """ + + :param commands: dict with sentry keys and arbitrary command list vals + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # /!\ DEPRECATION WARNING (beisner): + # New and existing tests should be rewritten to use + # validate_services_by_name() as it is aware of init systems. + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'validate_services_by_name instead of validate_services ' + 'due to init system differences.') + for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) @@ -86,6 +121,41 @@ def validate_services(self, commands): return "command `{}` returned {}".format(cmd, str(code)) return None + def validate_services_by_name(self, sentry_services): + """Validate system service status by service name, automatically + detecting init system based on Ubuntu release codename. + + :param sentry_services: dict with sentry keys and svc list values + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # Point at which systemd became a thing + systemd_switch = self.ubuntu_releases.index('vivid') + + for sentry_unit, services_list in six.iteritems(sentry_services): + # Get lsb_release codename from unit + release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) + if ret: + return ret + + for service_name in services_list: + if (self.ubuntu_releases.index(release) >= systemd_switch or + service_name == "rabbitmq-server"): + # init is systemd + cmd = 'sudo service {} status'.format(service_name) + elif self.ubuntu_releases.index(release) < systemd_switch: + # init is upstart + cmd = 'sudo status {}'.format(service_name) + + output, code = sentry_unit.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code)) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + def _get_config(self, unit, filename): """Get a ConfigParser object for parsing a unit's config file.""" file_contents = unit.file_contents(filename) @@ -104,6 +174,9 @@ def validate_config_data(self, sentry_unit, config_file, section, Verify that the specified section of the config file contains the expected option key:value pairs. """ + self.log.debug('Validating config file data ({} in {} on {})' + '...'.format(section, config_file, + sentry_unit.info['unit_name'])) config = self._get_config(sentry_unit, config_file) if section != 'DEFAULT' and not config.has_section(section): @@ -112,10 +185,23 @@ def validate_config_data(self, sentry_unit, config_file, section, for k in expected.keys(): if not config.has_option(section, k): return "section [{}] is missing option {}".format(section, k) - if config.get(section, k) != expected[k]: - return "section [{}] {}:{} != expected {}:{}".format( - section, k, config.get(section, k), k, expected[k]) - return None + + actual = config.get(section, k) + v = expected[k] + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if actual != v: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + else: + # handle not_null, valid_ip boolean comparison methods, etc. + if v(actual): + return None + else: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) def _validate_dict_data(self, expected, actual): """Validate dictionary data. @@ -321,3 +407,41 @@ def relation_error(self, name, data): def endpoint_error(self, name, data): return 'unexpected endpoint data in {} - {}'.format(name, data) + + def get_ubuntu_releases(self): + """Return a list of all Ubuntu releases in order of release.""" + _d = distro_info.UbuntuDistroInfo() + _release_list = _d.all + self.log.debug('Ubuntu release list: {}'.format(_release_list)) + return _release_list + + def file_to_url(self, file_rel_path): + """Convert a relative file path to a file URL.""" + _abs_path = os.path.abspath(file_rel_path) + return urlparse.urlparse(_abs_path, scheme='file').geturl() + + def check_commands_on_units(self, commands, sentry_units): + """Check that all commands in a list exit zero on all + sentry units in a list. + + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + self.log.debug('Checking exit codes for {} commands on {} ' + 'sentry units...'.format(len(commands), + len(sentry_units))) + for sentry_unit in sentry_units: + for cmd in commands: + output, code = sentry_unit.run(cmd) + if code == 0: + msg = ('{} `{}` returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + self.log.debug(msg) + else: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + return msg + return None diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 461a702f..c664c9d0 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -110,7 +110,8 @@ def _get_openstack_release(self): (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, self.trusty_icehouse, self.trusty_juno, self.utopic_juno, - self.trusty_kilo, self.vivid_kilo) = range(10) + self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, + self.wily_liberty) = range(12) releases = { ('precise', None): self.precise_essex, @@ -121,8 +122,10 @@ def _get_openstack_release(self): ('trusty', None): self.trusty_icehouse, ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo} + ('vivid', None): self.vivid_kilo, + ('wily', None): self.wily_liberty} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -138,6 +141,7 @@ def _get_openstack_release_string(self): ('trusty', 'icehouse'), ('utopic', 'juno'), ('vivid', 'kilo'), + ('wily', 'liberty'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index 9c3d918a..593437eb 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -16,15 +16,16 @@ import logging import os +import six import time import urllib +import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client +import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client -import six - from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -37,7 +38,7 @@ class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charms. + that is specifically for use by OpenStack charm tests. """ def __init__(self, log_level=ERROR): @@ -51,6 +52,8 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, Validate actual endpoint data vs expected endpoint data. The ports are used to find the matching endpoint. """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) found = False for ep in endpoints: self.log.debug('endpoint: {}'.format(repr(ep))) @@ -77,6 +80,7 @@ def validate_svc_catalog_endpoint_data(self, expected, actual): Validate a list of actual service catalog endpoints vs a list of expected service catalog endpoints. """ + self.log.debug('Validating service catalog endpoint data...') self.log.debug('actual: {}'.format(repr(actual))) for k, v in six.iteritems(expected): if k in actual: @@ -93,6 +97,7 @@ def validate_tenant_data(self, expected, actual): Validate a list of actual tenant data vs list of expected tenant data. """ + self.log.debug('Validating tenant data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -114,6 +119,7 @@ def validate_role_data(self, expected, actual): Validate a list of actual role data vs a list of expected role data. """ + self.log.debug('Validating role data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -134,6 +140,7 @@ def validate_user_data(self, expected, actual): Validate a list of actual user data vs a list of expected user data. """ + self.log.debug('Validating user data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -155,17 +162,29 @@ def validate_flavor_data(self, expected, actual): Validate a list of actual flavors vs a list of expected flavors. """ + self.log.debug('Validating flavor data...') self.log.debug('actual: {}'.format(repr(actual))) act = [a.name for a in actual] return self._validate_list_data(expected, act) def tenant_exists(self, keystone, tenant): """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + def authenticate_cinder_admin(self, keystone_sentry, username, + password, tenant): + """Authenticates admin user with cinder.""" + service_ip = \ + keystone_sentry.relation('shared-db', + 'mysql:shared-db')['private-address'] + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) + return cinder_client.Client(username, password, tenant, ept) + def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant): """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') unit = keystone_sentry service_ip = unit.relation('shared-db', 'mysql:shared-db')['private-address'] @@ -175,6 +194,7 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') return keystone_client.Client(username=user, password=password, @@ -182,12 +202,21 @@ def authenticate_keystone_user(self, keystone, user, password, tenant): def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', endpoint_type='adminURL') return glance_client.Client(ep, token=keystone.auth_token) + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + endpoint_type='publicURL') + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + def authenticate_nova_user(self, keystone, user, password, tenant): """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') return nova_client.Client(username=user, api_key=password, @@ -195,6 +224,7 @@ def authenticate_nova_user(self, keystone, user, password, tenant): def create_cirros_image(self, glance, image_name): """Download the latest cirros image and upload it to glance.""" + self.log.debug('Creating glance image ({})...'.format(image_name)) http_proxy = os.getenv('AMULET_HTTP_PROXY') self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: @@ -235,6 +265,11 @@ def create_cirros_image(self, glance, image_name): def delete_image(self, glance, image): """Delete the specified image.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) num_before = len(list(glance.images.list())) glance.images.delete(image) @@ -254,6 +289,8 @@ def delete_image(self, glance, image): def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) image = nova.images.find(name=image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, @@ -276,6 +313,11 @@ def create_instance(self, nova, image_name, instance_name, flavor): def delete_instance(self, nova, instance): """Delete the specified instance.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) num_before = len(list(nova.servers.list())) nova.servers.delete(instance) @@ -292,3 +334,106 @@ def delete_instance(self, nova, instance): return False return True + + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1): + """Add and confirm a new volume, 1GB by default.""" + self.log.debug('Creating volume ({}|{}GB)'.format(vol_name, vol_size)) + vol_new = cinder.volumes.create(display_name=vol_name, size=1) + vol_id = vol_new.id + ret = self.resource_reaches_status(cinder.volumes, vol_id, + expected_stat="available", + msg="Create volume status wait") + if ret: + return vol_new + else: + self.log.error('Failed to create volume.') + return None + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + self.log.debug('Deleting OpenStack resource ' + '{} ({})'.format(resource_id, msg)) + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) + return False + + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False From a8f90ad19e087906ab61ca1a17c6eba204efb7c5 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 15 Jun 2015 20:41:25 +0000 Subject: [PATCH 0701/2699] sync tests/charmhelpers --- .../charmhelpers/contrib/amulet/utils.py | 144 +++++++++++++++-- .../contrib/openstack/amulet/deployment.py | 8 +- .../contrib/openstack/amulet/utils.py | 151 +++++++++++++++++- 3 files changed, 288 insertions(+), 15 deletions(-) diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index f61c2e8b..47c4555c 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -15,13 +15,15 @@ # along with charm-helpers. If not, see . import ConfigParser +import distro_info import io import logging +import os import re +import six import sys import time - -import six +import urlparse class AmuletUtils(object): @@ -33,6 +35,7 @@ class AmuletUtils(object): def __init__(self, log_level=logging.ERROR): self.log = self.get_logger(level=log_level) + self.ubuntu_releases = self.get_ubuntu_releases() def get_logger(self, name="amulet-logger", level=logging.DEBUG): """Get a logger object that will log to stdout.""" @@ -70,12 +73,44 @@ def valid_url(self, url): else: return False - def validate_services(self, commands): - """Validate services. + def get_ubuntu_release_from_sentry(self, sentry_unit): + """Get Ubuntu release codename from sentry unit. + + :param sentry_unit: amulet sentry/service unit pointer + :returns: list of strings - release codename, failure message + """ + msg = None + cmd = 'lsb_release -cs' + release, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} lsb_release: {}'.format( + sentry_unit.info['unit_name'], release)) + else: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, release, code)) + if release not in self.ubuntu_releases: + msg = ("Release ({}) not found in Ubuntu releases " + "({})".format(release, self.ubuntu_releases)) + return release, msg - Verify the specified services are running on the corresponding + def validate_services(self, commands): + """Validate that lists of commands succeed on service units. Can be + used to verify system services are running on the corresponding service units. - """ + + :param commands: dict with sentry keys and arbitrary command list vals + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # /!\ DEPRECATION WARNING (beisner): + # New and existing tests should be rewritten to use + # validate_services_by_name() as it is aware of init systems. + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'validate_services_by_name instead of validate_services ' + 'due to init system differences.') + for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) @@ -86,6 +121,41 @@ def validate_services(self, commands): return "command `{}` returned {}".format(cmd, str(code)) return None + def validate_services_by_name(self, sentry_services): + """Validate system service status by service name, automatically + detecting init system based on Ubuntu release codename. + + :param sentry_services: dict with sentry keys and svc list values + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # Point at which systemd became a thing + systemd_switch = self.ubuntu_releases.index('vivid') + + for sentry_unit, services_list in six.iteritems(sentry_services): + # Get lsb_release codename from unit + release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) + if ret: + return ret + + for service_name in services_list: + if (self.ubuntu_releases.index(release) >= systemd_switch or + service_name == "rabbitmq-server"): + # init is systemd + cmd = 'sudo service {} status'.format(service_name) + elif self.ubuntu_releases.index(release) < systemd_switch: + # init is upstart + cmd = 'sudo status {}'.format(service_name) + + output, code = sentry_unit.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code)) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + def _get_config(self, unit, filename): """Get a ConfigParser object for parsing a unit's config file.""" file_contents = unit.file_contents(filename) @@ -104,6 +174,9 @@ def validate_config_data(self, sentry_unit, config_file, section, Verify that the specified section of the config file contains the expected option key:value pairs. """ + self.log.debug('Validating config file data ({} in {} on {})' + '...'.format(section, config_file, + sentry_unit.info['unit_name'])) config = self._get_config(sentry_unit, config_file) if section != 'DEFAULT' and not config.has_section(section): @@ -112,10 +185,23 @@ def validate_config_data(self, sentry_unit, config_file, section, for k in expected.keys(): if not config.has_option(section, k): return "section [{}] is missing option {}".format(section, k) - if config.get(section, k) != expected[k]: - return "section [{}] {}:{} != expected {}:{}".format( - section, k, config.get(section, k), k, expected[k]) - return None + + actual = config.get(section, k) + v = expected[k] + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if actual != v: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + else: + # handle not_null, valid_ip boolean comparison methods, etc. + if v(actual): + return None + else: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) def _validate_dict_data(self, expected, actual): """Validate dictionary data. @@ -321,3 +407,41 @@ def relation_error(self, name, data): def endpoint_error(self, name, data): return 'unexpected endpoint data in {} - {}'.format(name, data) + + def get_ubuntu_releases(self): + """Return a list of all Ubuntu releases in order of release.""" + _d = distro_info.UbuntuDistroInfo() + _release_list = _d.all + self.log.debug('Ubuntu release list: {}'.format(_release_list)) + return _release_list + + def file_to_url(self, file_rel_path): + """Convert a relative file path to a file URL.""" + _abs_path = os.path.abspath(file_rel_path) + return urlparse.urlparse(_abs_path, scheme='file').geturl() + + def check_commands_on_units(self, commands, sentry_units): + """Check that all commands in a list exit zero on all + sentry units in a list. + + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + self.log.debug('Checking exit codes for {} commands on {} ' + 'sentry units...'.format(len(commands), + len(sentry_units))) + for sentry_unit in sentry_units: + for cmd in commands: + output, code = sentry_unit.run(cmd) + if code == 0: + msg = ('{} `{}` returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + self.log.debug(msg) + else: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + return msg + return None diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 461a702f..c664c9d0 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -110,7 +110,8 @@ def _get_openstack_release(self): (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, self.trusty_icehouse, self.trusty_juno, self.utopic_juno, - self.trusty_kilo, self.vivid_kilo) = range(10) + self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, + self.wily_liberty) = range(12) releases = { ('precise', None): self.precise_essex, @@ -121,8 +122,10 @@ def _get_openstack_release(self): ('trusty', None): self.trusty_icehouse, ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo} + ('vivid', None): self.vivid_kilo, + ('wily', None): self.wily_liberty} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -138,6 +141,7 @@ def _get_openstack_release_string(self): ('trusty', 'icehouse'), ('utopic', 'juno'), ('vivid', 'kilo'), + ('wily', 'liberty'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 9c3d918a..593437eb 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -16,15 +16,16 @@ import logging import os +import six import time import urllib +import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client +import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client -import six - from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -37,7 +38,7 @@ class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charms. + that is specifically for use by OpenStack charm tests. """ def __init__(self, log_level=ERROR): @@ -51,6 +52,8 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, Validate actual endpoint data vs expected endpoint data. The ports are used to find the matching endpoint. """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) found = False for ep in endpoints: self.log.debug('endpoint: {}'.format(repr(ep))) @@ -77,6 +80,7 @@ def validate_svc_catalog_endpoint_data(self, expected, actual): Validate a list of actual service catalog endpoints vs a list of expected service catalog endpoints. """ + self.log.debug('Validating service catalog endpoint data...') self.log.debug('actual: {}'.format(repr(actual))) for k, v in six.iteritems(expected): if k in actual: @@ -93,6 +97,7 @@ def validate_tenant_data(self, expected, actual): Validate a list of actual tenant data vs list of expected tenant data. """ + self.log.debug('Validating tenant data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -114,6 +119,7 @@ def validate_role_data(self, expected, actual): Validate a list of actual role data vs a list of expected role data. """ + self.log.debug('Validating role data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -134,6 +140,7 @@ def validate_user_data(self, expected, actual): Validate a list of actual user data vs a list of expected user data. """ + self.log.debug('Validating user data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -155,17 +162,29 @@ def validate_flavor_data(self, expected, actual): Validate a list of actual flavors vs a list of expected flavors. """ + self.log.debug('Validating flavor data...') self.log.debug('actual: {}'.format(repr(actual))) act = [a.name for a in actual] return self._validate_list_data(expected, act) def tenant_exists(self, keystone, tenant): """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + def authenticate_cinder_admin(self, keystone_sentry, username, + password, tenant): + """Authenticates admin user with cinder.""" + service_ip = \ + keystone_sentry.relation('shared-db', + 'mysql:shared-db')['private-address'] + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) + return cinder_client.Client(username, password, tenant, ept) + def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant): """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') unit = keystone_sentry service_ip = unit.relation('shared-db', 'mysql:shared-db')['private-address'] @@ -175,6 +194,7 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') return keystone_client.Client(username=user, password=password, @@ -182,12 +202,21 @@ def authenticate_keystone_user(self, keystone, user, password, tenant): def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', endpoint_type='adminURL') return glance_client.Client(ep, token=keystone.auth_token) + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + endpoint_type='publicURL') + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + def authenticate_nova_user(self, keystone, user, password, tenant): """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') return nova_client.Client(username=user, api_key=password, @@ -195,6 +224,7 @@ def authenticate_nova_user(self, keystone, user, password, tenant): def create_cirros_image(self, glance, image_name): """Download the latest cirros image and upload it to glance.""" + self.log.debug('Creating glance image ({})...'.format(image_name)) http_proxy = os.getenv('AMULET_HTTP_PROXY') self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: @@ -235,6 +265,11 @@ def create_cirros_image(self, glance, image_name): def delete_image(self, glance, image): """Delete the specified image.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) num_before = len(list(glance.images.list())) glance.images.delete(image) @@ -254,6 +289,8 @@ def delete_image(self, glance, image): def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) image = nova.images.find(name=image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, @@ -276,6 +313,11 @@ def create_instance(self, nova, image_name, instance_name, flavor): def delete_instance(self, nova, instance): """Delete the specified instance.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) num_before = len(list(nova.servers.list())) nova.servers.delete(instance) @@ -292,3 +334,106 @@ def delete_instance(self, nova, instance): return False return True + + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1): + """Add and confirm a new volume, 1GB by default.""" + self.log.debug('Creating volume ({}|{}GB)'.format(vol_name, vol_size)) + vol_new = cinder.volumes.create(display_name=vol_name, size=1) + vol_id = vol_new.id + ret = self.resource_reaches_status(cinder.volumes, vol_id, + expected_stat="available", + msg="Create volume status wait") + if ret: + return vol_new + else: + self.log.error('Failed to create volume.') + return None + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + self.log.debug('Deleting OpenStack resource ' + '{} ({})'.format(resource_id, msg)) + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) + return False + + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False From 755c08a62eec129a0a0268d27bbba5ef5d477ffa Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 15 Jun 2015 20:42:45 +0000 Subject: [PATCH 0702/2699] set ch sync yaml and makefile --- ceph-proxy/Makefile | 2 +- ceph-proxy/charm-helpers-tests.yaml | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index 69bb5727..541b1fa8 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -21,7 +21,7 @@ bin/charm_helpers_sync.py: > bin/charm_helpers_sync.py sync: bin/charm_helpers_sync.py -# $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml + $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml publish: lint diff --git a/ceph-proxy/charm-helpers-tests.yaml b/ceph-proxy/charm-helpers-tests.yaml index 987d84cb..48b12f6f 100644 --- a/ceph-proxy/charm-helpers-tests.yaml +++ b/ceph-proxy/charm-helpers-tests.yaml @@ -1,5 +1,4 @@ -#branch: lp:charm-helpers -branch: lp:~1chb1n/charm-helpers/amulet-ceph-cinder-updates/ +branch: lp:charm-helpers destination: tests/charmhelpers include: - contrib.amulet From 912ab15da4a182e75205cb8d3180b7d295ba20a3 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 15 Jun 2015 20:42:45 +0000 Subject: [PATCH 0703/2699] set ch sync yaml and makefile --- ceph-mon/Makefile | 2 +- ceph-mon/charm-helpers-tests.yaml | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index 69bb5727..541b1fa8 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -21,7 +21,7 @@ bin/charm_helpers_sync.py: > bin/charm_helpers_sync.py sync: bin/charm_helpers_sync.py -# $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml + $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml publish: lint diff --git a/ceph-mon/charm-helpers-tests.yaml b/ceph-mon/charm-helpers-tests.yaml index 987d84cb..48b12f6f 100644 --- a/ceph-mon/charm-helpers-tests.yaml +++ b/ceph-mon/charm-helpers-tests.yaml @@ -1,5 +1,4 @@ -#branch: lp:charm-helpers -branch: lp:~1chb1n/charm-helpers/amulet-ceph-cinder-updates/ +branch: lp:charm-helpers destination: tests/charmhelpers include: - contrib.amulet From 0a03b5d0dfde90fd76d85c6e5afe11dbd0723003 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 16 Jun 2015 13:26:28 +0000 Subject: [PATCH 0704/2699] lint cleanup; fix test dependency typo; remove fakefail --- ceph-proxy/metadata.yaml | 2 +- ceph-proxy/tests/00-setup | 2 +- ceph-proxy/tests/basic_deployment.py | 6 +----- ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py | 2 +- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index e918708e..d67159ee 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -4,7 +4,7 @@ maintainer: James Page description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. -categories: +tags: - file-servers peers: mon: diff --git a/ceph-proxy/tests/00-setup b/ceph-proxy/tests/00-setup index 54f560ca..d6882caf 100755 --- a/ceph-proxy/tests/00-setup +++ b/ceph-proxy/tests/00-setup @@ -5,7 +5,7 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes sudo apt-get install --yes python-amulet \ - python-distroinfo \ + python-distro-info \ python-keystoneclient \ python-glanceclient \ python-cinderclient \ diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 09922e86..70025170 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -185,7 +185,7 @@ def _take_ceph_pool_sample(self, sentry_unit, pool_id=0): obj_count = df['pools'][pool_id]['stats']['objects'] kb_used = df['pools'][pool_id]['stats']['kb_used'] u.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(_pool_name, + '{} kb used'.format(pool_name, pool_id, obj_count, kb_used)) @@ -598,7 +598,3 @@ def test_499_ceph_cmds_exit_zero(self): # FYI: No restart check as ceph services do not restart # when charm config changes, unless monitor count increases. - - def test_999(self): - u.log.error('Fake fail!') - raise diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index 47c4555c..f04cee25 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -423,7 +423,7 @@ def file_to_url(self, file_rel_path): def check_commands_on_units(self, commands, sentry_units): """Check that all commands in a list exit zero on all sentry units in a list. - + :param commands: list of bash commands :param sentry_units: list of sentry unit pointers :returns: None if successful; Failure message otherwise From c4a31c45c8b2e8acfa82588ae0a9b413dae62b1c Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 16 Jun 2015 13:26:28 +0000 Subject: [PATCH 0705/2699] lint cleanup; fix test dependency typo; remove fakefail --- ceph-mon/metadata.yaml | 2 +- ceph-mon/tests/00-setup | 2 +- ceph-mon/tests/basic_deployment.py | 6 +----- ceph-mon/tests/charmhelpers/contrib/amulet/utils.py | 2 +- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index e918708e..d67159ee 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -4,7 +4,7 @@ maintainer: James Page description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. -categories: +tags: - file-servers peers: mon: diff --git a/ceph-mon/tests/00-setup b/ceph-mon/tests/00-setup index 54f560ca..d6882caf 100755 --- a/ceph-mon/tests/00-setup +++ b/ceph-mon/tests/00-setup @@ -5,7 +5,7 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes sudo apt-get install --yes python-amulet \ - python-distroinfo \ + python-distro-info \ python-keystoneclient \ python-glanceclient \ python-cinderclient \ diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 09922e86..70025170 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -185,7 +185,7 @@ def _take_ceph_pool_sample(self, sentry_unit, pool_id=0): obj_count = df['pools'][pool_id]['stats']['objects'] kb_used = df['pools'][pool_id]['stats']['kb_used'] u.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(_pool_name, + '{} kb used'.format(pool_name, pool_id, obj_count, kb_used)) @@ -598,7 +598,3 @@ def test_499_ceph_cmds_exit_zero(self): # FYI: No restart check as ceph services do not restart # when charm config changes, unless monitor count increases. - - def test_999(self): - u.log.error('Fake fail!') - raise diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index 47c4555c..f04cee25 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -423,7 +423,7 @@ def file_to_url(self, file_rel_path): def check_commands_on_units(self, commands, sentry_units): """Check that all commands in a list exit zero on all sentry units in a list. - + :param commands: list of bash commands :param sentry_units: list of sentry unit pointers :returns: None if successful; Failure message otherwise From fa50947e0aec0a95c068a36efd2791309b1e414c Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 16 Jun 2015 17:30:53 +0000 Subject: [PATCH 0706/2699] refactor for >=K pool and glance differences --- ceph-proxy/tests/basic_deployment.py | 77 +++++++++++++++++++--------- 1 file changed, 52 insertions(+), 25 deletions(-) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 70025170..81ca801b 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -17,9 +17,6 @@ # Resource names and constants IMAGE_NAME = 'cirros-image-1' -POOLS = ['data', 'metadata', 'rbd', 'cinder', 'glance'] -CINDER_POOL = 3 -GLANCE_POOL = 4 class CephBasicDeployment(OpenStackAmuletDeployment): @@ -159,6 +156,26 @@ def _initialize_tests(self): 'password', self.demo_tenant) + def _ceph_expected_pools(self): + """Return a dict of expected ceph pools based on + Ubuntu-OpenStack release""" + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + return { + 'rbd': 0, + 'cinder': 1, + 'glance': 2 + } + else: + # Juno or earlier + return { + 'data': 0, + 'metadata': 1, + 'rbd': 2, + 'cinder': 3, + 'glance': 4 + } + def _ceph_osd_id(self, index): """Produce a shell command that will return a ceph-osd id.""" return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa @@ -384,15 +401,23 @@ def test_304_glance_rbd_config(self): u.log.debug('Checking glance (rbd) config file data...') unit = self.glance_sentry conf = '/etc/glance/glance-api.conf' - expected = { - 'DEFAULT': { - 'default_store': 'rbd', - 'rbd_store_ceph_conf': '/etc/ceph/ceph.conf', - 'rbd_store_user': 'glance', - 'rbd_store_pool': 'glance', - 'rbd_store_chunk_size': '8' - } + config = { + 'default_store': 'rbd', + 'rbd_store_ceph_conf': '/etc/ceph/ceph.conf', + 'rbd_store_user': 'glance', + 'rbd_store_pool': 'glance', + 'rbd_store_chunk_size': '8' } + + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + config['stores'] = 'glance.store.filesystem.Store,glance.store.http.Store,glance.store.rbd.Store' # noqa + section = 'glance_store' + else: + # Juno or earlier + section = 'DEFAULT' + + expected = {section: config} for section, pairs in expected.iteritems(): ret = u.validate_config_data(unit, conf, section, pairs) if ret: @@ -423,6 +448,7 @@ def test_400_ceph_check_osd_pools(self): u.log.debug('Checking pools on ceph units...') cmd = 'sudo ceph osd lspools' + pools = self._ceph_expected_pools() results = [] sentries = [ self.ceph0_sentry, @@ -441,7 +467,7 @@ def test_400_ceph_check_osd_pools(self): amulet.raise_status(amulet.FAIL, msg=msg) # Check for presence of all pools on this unit - for pool in POOLS: + for pool in pools: if pool not in output: msg = ('{} does not have pool: ' '{}'.format(sentry_unit.info['unit_name'], pool)) @@ -465,18 +491,20 @@ def test_410_ceph_cinder_vol_create(self): sentry_unit = self.ceph0_sentry obj_count_samples = [] pool_size_samples = [] + pools = self._ceph_expected_pools() + cinder_pool = pools['cinder'] # Check ceph cinder pool object count, disk space usage and pool name u.log.debug('Checking ceph cinder pool original samples...') pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=CINDER_POOL) + sentry_unit, pool_id=cinder_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) expected = 'cinder' if pool_name != expected: msg = ('Ceph pool {} unexpected name (actual, expected): ' - '{}. {}'.format(CINDER_POOL, pool_name, expected)) + '{}. {}'.format(cinder_pool, pool_name, expected)) amulet.raise_status(amulet.FAIL, msg=msg) # Create ceph-backed cinder volume @@ -486,7 +514,7 @@ def test_410_ceph_cinder_vol_create(self): time.sleep(10) u.log.debug('Checking ceph cinder pool samples after volume create...') pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=CINDER_POOL) + sentry_unit, pool_id=cinder_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -497,7 +525,7 @@ def test_410_ceph_cinder_vol_create(self): time.sleep(10) u.log.debug('Checking ceph cinder pool after volume delete...') pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=CINDER_POOL) + sentry_unit, pool_id=cinder_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -522,18 +550,20 @@ def test_412_ceph_glance_image_create_delete(self): sentry_unit = self.ceph0_sentry obj_count_samples = [] pool_size_samples = [] + pools = self._ceph_expected_pools() + glance_pool = pools['glance'] # Check ceph glance pool object count, disk space usage and pool name u.log.debug('Checking ceph glance pool original samples...') pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=GLANCE_POOL) + sentry_unit, pool_id=glance_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) expected = 'glance' if pool_name != expected: msg = ('Ceph glance pool {} unexpected name (actual, ' - 'expected): {}. {}'.format(GLANCE_POOL, + 'expected): {}. {}'.format(glance_pool, pool_name, expected)) amulet.raise_status(amulet.FAIL, msg=msg) @@ -544,7 +574,7 @@ def test_412_ceph_glance_image_create_delete(self): time.sleep(10) u.log.debug('Checking ceph glance pool samples after image create...') pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=GLANCE_POOL) + sentry_unit, pool_id=glance_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -556,7 +586,7 @@ def test_412_ceph_glance_image_create_delete(self): time.sleep(10) u.log.debug('Checking ceph glance pool samples after image delete...') pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=GLANCE_POOL) + sentry_unit, pool_id=glance_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -575,22 +605,19 @@ def test_412_ceph_glance_image_create_delete(self): amulet.raise_status(amulet.FAIL, msg=ret) def test_499_ceph_cmds_exit_zero(self): - """Check that all ceph commands in a list return zero on all - ceph units listed.""" + """Check basic functionality of ceph cli commands against + all ceph units.""" sentry_units = [ self.ceph0_sentry, self.ceph1_sentry, self.ceph2_sentry ] commands = [ - 'sudo ceph -s', 'sudo ceph health', 'sudo ceph mds stat', 'sudo ceph pg stat', 'sudo ceph osd stat', 'sudo ceph mon stat', - 'sudo ceph osd pool get data size', - 'sudo ceph osd pool get data pg_num', ] ret = u.check_commands_on_units(commands, sentry_units) if ret: From e644cf6fd08574ce7a901d838f71282236629a9e Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 16 Jun 2015 17:30:53 +0000 Subject: [PATCH 0707/2699] refactor for >=K pool and glance differences --- ceph-mon/tests/basic_deployment.py | 77 ++++++++++++++++++++---------- 1 file changed, 52 insertions(+), 25 deletions(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 70025170..81ca801b 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -17,9 +17,6 @@ # Resource names and constants IMAGE_NAME = 'cirros-image-1' -POOLS = ['data', 'metadata', 'rbd', 'cinder', 'glance'] -CINDER_POOL = 3 -GLANCE_POOL = 4 class CephBasicDeployment(OpenStackAmuletDeployment): @@ -159,6 +156,26 @@ def _initialize_tests(self): 'password', self.demo_tenant) + def _ceph_expected_pools(self): + """Return a dict of expected ceph pools based on + Ubuntu-OpenStack release""" + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + return { + 'rbd': 0, + 'cinder': 1, + 'glance': 2 + } + else: + # Juno or earlier + return { + 'data': 0, + 'metadata': 1, + 'rbd': 2, + 'cinder': 3, + 'glance': 4 + } + def _ceph_osd_id(self, index): """Produce a shell command that will return a ceph-osd id.""" return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa @@ -384,15 +401,23 @@ def test_304_glance_rbd_config(self): u.log.debug('Checking glance (rbd) config file data...') unit = self.glance_sentry conf = '/etc/glance/glance-api.conf' - expected = { - 'DEFAULT': { - 'default_store': 'rbd', - 'rbd_store_ceph_conf': '/etc/ceph/ceph.conf', - 'rbd_store_user': 'glance', - 'rbd_store_pool': 'glance', - 'rbd_store_chunk_size': '8' - } + config = { + 'default_store': 'rbd', + 'rbd_store_ceph_conf': '/etc/ceph/ceph.conf', + 'rbd_store_user': 'glance', + 'rbd_store_pool': 'glance', + 'rbd_store_chunk_size': '8' } + + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + config['stores'] = 'glance.store.filesystem.Store,glance.store.http.Store,glance.store.rbd.Store' # noqa + section = 'glance_store' + else: + # Juno or earlier + section = 'DEFAULT' + + expected = {section: config} for section, pairs in expected.iteritems(): ret = u.validate_config_data(unit, conf, section, pairs) if ret: @@ -423,6 +448,7 @@ def test_400_ceph_check_osd_pools(self): u.log.debug('Checking pools on ceph units...') cmd = 'sudo ceph osd lspools' + pools = self._ceph_expected_pools() results = [] sentries = [ self.ceph0_sentry, @@ -441,7 +467,7 @@ def test_400_ceph_check_osd_pools(self): amulet.raise_status(amulet.FAIL, msg=msg) # Check for presence of all pools on this unit - for pool in POOLS: + for pool in pools: if pool not in output: msg = ('{} does not have pool: ' '{}'.format(sentry_unit.info['unit_name'], pool)) @@ -465,18 +491,20 @@ def test_410_ceph_cinder_vol_create(self): sentry_unit = self.ceph0_sentry obj_count_samples = [] pool_size_samples = [] + pools = self._ceph_expected_pools() + cinder_pool = pools['cinder'] # Check ceph cinder pool object count, disk space usage and pool name u.log.debug('Checking ceph cinder pool original samples...') pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=CINDER_POOL) + sentry_unit, pool_id=cinder_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) expected = 'cinder' if pool_name != expected: msg = ('Ceph pool {} unexpected name (actual, expected): ' - '{}. {}'.format(CINDER_POOL, pool_name, expected)) + '{}. {}'.format(cinder_pool, pool_name, expected)) amulet.raise_status(amulet.FAIL, msg=msg) # Create ceph-backed cinder volume @@ -486,7 +514,7 @@ def test_410_ceph_cinder_vol_create(self): time.sleep(10) u.log.debug('Checking ceph cinder pool samples after volume create...') pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=CINDER_POOL) + sentry_unit, pool_id=cinder_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -497,7 +525,7 @@ def test_410_ceph_cinder_vol_create(self): time.sleep(10) u.log.debug('Checking ceph cinder pool after volume delete...') pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=CINDER_POOL) + sentry_unit, pool_id=cinder_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -522,18 +550,20 @@ def test_412_ceph_glance_image_create_delete(self): sentry_unit = self.ceph0_sentry obj_count_samples = [] pool_size_samples = [] + pools = self._ceph_expected_pools() + glance_pool = pools['glance'] # Check ceph glance pool object count, disk space usage and pool name u.log.debug('Checking ceph glance pool original samples...') pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=GLANCE_POOL) + sentry_unit, pool_id=glance_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) expected = 'glance' if pool_name != expected: msg = ('Ceph glance pool {} unexpected name (actual, ' - 'expected): {}. {}'.format(GLANCE_POOL, + 'expected): {}. {}'.format(glance_pool, pool_name, expected)) amulet.raise_status(amulet.FAIL, msg=msg) @@ -544,7 +574,7 @@ def test_412_ceph_glance_image_create_delete(self): time.sleep(10) u.log.debug('Checking ceph glance pool samples after image create...') pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=GLANCE_POOL) + sentry_unit, pool_id=glance_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -556,7 +586,7 @@ def test_412_ceph_glance_image_create_delete(self): time.sleep(10) u.log.debug('Checking ceph glance pool samples after image delete...') pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=GLANCE_POOL) + sentry_unit, pool_id=glance_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -575,22 +605,19 @@ def test_412_ceph_glance_image_create_delete(self): amulet.raise_status(amulet.FAIL, msg=ret) def test_499_ceph_cmds_exit_zero(self): - """Check that all ceph commands in a list return zero on all - ceph units listed.""" + """Check basic functionality of ceph cli commands against + all ceph units.""" sentry_units = [ self.ceph0_sentry, self.ceph1_sentry, self.ceph2_sentry ] commands = [ - 'sudo ceph -s', 'sudo ceph health', 'sudo ceph mds stat', 'sudo ceph pg stat', 'sudo ceph osd stat', 'sudo ceph mon stat', - 'sudo ceph osd pool get data size', - 'sudo ceph osd pool get data pg_num', ] ret = u.check_commands_on_units(commands, sentry_units) if ret: From d850e01b10393907170a20bc1fbc0aa61ffbff3c Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 17 Jun 2015 14:52:04 +0000 Subject: [PATCH 0708/2699] move reusable ceph test pieces into charmhelpers; resync tests/charmhelpers --- ceph-proxy/tests/basic_deployment.py | 124 ++++-------------- .../contrib/openstack/amulet/deployment.py | 20 +++ .../contrib/openstack/amulet/utils.py | 64 +++++++++ 3 files changed, 109 insertions(+), 99 deletions(-) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 81ca801b..188cfaed 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -1,7 +1,6 @@ #!/usr/bin/python import amulet -import json import time from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment @@ -156,83 +155,14 @@ def _initialize_tests(self): 'password', self.demo_tenant) - def _ceph_expected_pools(self): - """Return a dict of expected ceph pools based on - Ubuntu-OpenStack release""" - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later - return { - 'rbd': 0, - 'cinder': 1, - 'glance': 2 - } - else: - # Juno or earlier - return { - 'data': 0, - 'metadata': 1, - 'rbd': 2, - 'cinder': 3, - 'glance': 4 - } - - def _ceph_osd_id(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa - - def _ceph_df(self, sentry_unit): - """Return dict of ceph df json output""" - cmd = 'sudo ceph df --format=json' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - u.log.debug(msg) - amulet.raise_status(amulet.FAIL, msg=msg) - - df = json.loads(output) - return df - - def _take_ceph_pool_sample(self, sentry_unit, pool_id=0): - """Return ceph pool name, object count and disk space used - for the specified pool ID number.""" - df = self._ceph_df(sentry_unit) - pool_name = df['pools'][pool_id]['name'] - obj_count = df['pools'][pool_id]['stats']['objects'] - kb_used = df['pools'][pool_id]['stats']['kb_used'] - u.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, - pool_id, - obj_count, - kb_used)) - return pool_name, obj_count, kb_used - - def _validate_pool_samples(self, samples, resource_type="item", - sample_type="resource pool"): - """Validate ceph pool samples taken over time, such as pool - object counts or pool kb used, before adding, after adding, and - after deleting items which affect those pool attributes.""" - original, created, deleted = range(3) - - if samples[created] <= samples[original] or \ - samples[deleted] >= samples[created]: - msg = ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - return msg - else: - u.log.debug('Ceph {} samples (OK): ' - '{}'.format(sample_type, samples)) - return None - def test_100_services(self): """Verify the expected services are running on the service units.""" ceph_services = [ 'ceph-mon-all', 'ceph-mon id=`hostname`', 'ceph-osd-all', - 'ceph-osd id={}'.format(self._ceph_osd_id(0)), - 'ceph-osd id={}'.format(self._ceph_osd_id(1)) + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) ] services = { @@ -448,7 +378,7 @@ def test_400_ceph_check_osd_pools(self): u.log.debug('Checking pools on ceph units...') cmd = 'sudo ceph osd lspools' - pools = self._ceph_expected_pools() + pools = self.get_ceph_expected_pools() results = [] sentries = [ self.ceph0_sentry, @@ -491,13 +421,13 @@ def test_410_ceph_cinder_vol_create(self): sentry_unit = self.ceph0_sentry obj_count_samples = [] pool_size_samples = [] - pools = self._ceph_expected_pools() + pools = self.get_ceph_expected_pools() cinder_pool = pools['cinder'] # Check ceph cinder pool object count, disk space usage and pool name u.log.debug('Checking ceph cinder pool original samples...') - pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=cinder_pool) + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + cinder_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -513,8 +443,8 @@ def test_410_ceph_cinder_vol_create(self): # Re-check ceph cinder pool object count and disk usage time.sleep(10) u.log.debug('Checking ceph cinder pool samples after volume create...') - pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=cinder_pool) + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + cinder_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -524,22 +454,20 @@ def test_410_ceph_cinder_vol_create(self): # Final check, ceph cinder pool object count and disk usage time.sleep(10) u.log.debug('Checking ceph cinder pool after volume delete...') - pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=cinder_pool) + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + cinder_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) # Validate ceph cinder pool object count samples over time - ret = self._validate_pool_samples(samples=obj_count_samples, - resource_type="cinder volume", - sample_type="pool object count") + ret = u.validate_ceph_pool_samples(obj_count_samples, + "cinder pool object count") if ret: amulet.raise_status(amulet.FAIL, msg=ret) # Validate ceph cinder pool disk space usage samples over time - ret = self._validate_pool_samples(samples=pool_size_samples, - resource_type="cinder volume", - sample_type="pool disk usage size") + ret = u.validate_ceph_pool_samples(pool_size_samples, + "cinder pool disk usage") if ret: amulet.raise_status(amulet.FAIL, msg=ret) @@ -550,13 +478,13 @@ def test_412_ceph_glance_image_create_delete(self): sentry_unit = self.ceph0_sentry obj_count_samples = [] pool_size_samples = [] - pools = self._ceph_expected_pools() + pools = self.get_ceph_expected_pools() glance_pool = pools['glance'] # Check ceph glance pool object count, disk space usage and pool name u.log.debug('Checking ceph glance pool original samples...') - pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=glance_pool) + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + glance_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -573,8 +501,8 @@ def test_412_ceph_glance_image_create_delete(self): # Re-check ceph glance pool object count and disk usage time.sleep(10) u.log.debug('Checking ceph glance pool samples after image create...') - pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=glance_pool) + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + glance_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -585,22 +513,20 @@ def test_412_ceph_glance_image_create_delete(self): # Final check, ceph glance pool object count and disk usage time.sleep(10) u.log.debug('Checking ceph glance pool samples after image delete...') - pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=glance_pool) + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + glance_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) # Validate ceph glance pool object count samples over time - ret = self._validate_pool_samples(samples=obj_count_samples, - resource_type="glance image", - sample_type="pool object count") + ret = u.validate_ceph_pool_samples(obj_count_samples, + "glance pool object count") if ret: amulet.raise_status(amulet.FAIL, msg=ret) # Validate ceph glance pool disk space usage samples over time - ret = self._validate_pool_samples(samples=pool_size_samples, - resource_type="glance image", - sample_type="pool disk usage size") + ret = u.validate_ceph_pool_samples(pool_size_samples, + "glance pool disk usage") if ret: amulet.raise_status(amulet.FAIL, msg=ret) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index c664c9d0..73e025d7 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -148,3 +148,23 @@ def _get_openstack_release_string(self): return os_origin.split('%s-' % self.series)[1].split('/')[0] else: return releases[self.series] + + def get_ceph_expected_pools(self): + """Return a dict of expected ceph pools based on + Ubuntu-OpenStack release""" + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + return { + 'rbd': 0, + 'cinder': 1, + 'glance': 2 + } + else: + # Juno or earlier + return { + 'data': 0, + 'metadata': 1, + 'rbd': 2, + 'cinder': 3, + 'glance': 4 + } diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index 593437eb..ddadd9fa 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import json import logging import os import six @@ -437,3 +438,66 @@ def resource_reaches_status(self, resource, resource_id, self.log.debug('{} never reached expected status: ' '{}'.format(resource_id, expected_stat)) return False + + def get_ceph_osd_id_cmd(self, index): + """Produce a shell command that will return a ceph-osd id.""" + cmd = ("`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}'" + " | grep -o '[0-9]*'`".format(index + 1)) + return cmd + + def get_ceph_df(self, sentry_unit): + """Return dict of ceph df json output, including ceph pool state. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :returns: Dict of ceph df output + """ + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + raise RuntimeError(msg) + return json.loads(output) + + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param pool_id: Ceph pool ID + :returns: List of pool name, object count, kb disk space used + """ + df = self.get_ceph_df(sentry_unit) + pool_name = df['pools'][pool_id]['name'] + obj_count = df['pools'][pool_id]['stats']['objects'] + kb_used = df['pools'][pool_id]['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, + pool_id, + obj_count, + kb_used)) + return pool_name, obj_count, kb_used + + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes. The + 2nd element is expected to be greater than the 1st; 3rd is expected + to be less than the 2nd. + + :param samples: List containing 3 data samples + :param sample_type: String for logging and usage context + :returns: None if successful, Failure message otherwise + """ + original, created, deleted = range(3) + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + msg = ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + return msg + else: + self.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None From 24010323e52684c652f6cc36a0b5ff5b11dab490 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 17 Jun 2015 14:52:04 +0000 Subject: [PATCH 0709/2699] move reusable ceph test pieces into charmhelpers; resync tests/charmhelpers --- ceph-mon/tests/basic_deployment.py | 124 ++++-------------- .../contrib/openstack/amulet/deployment.py | 20 +++ .../contrib/openstack/amulet/utils.py | 64 +++++++++ 3 files changed, 109 insertions(+), 99 deletions(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 81ca801b..188cfaed 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -1,7 +1,6 @@ #!/usr/bin/python import amulet -import json import time from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment @@ -156,83 +155,14 @@ def _initialize_tests(self): 'password', self.demo_tenant) - def _ceph_expected_pools(self): - """Return a dict of expected ceph pools based on - Ubuntu-OpenStack release""" - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later - return { - 'rbd': 0, - 'cinder': 1, - 'glance': 2 - } - else: - # Juno or earlier - return { - 'data': 0, - 'metadata': 1, - 'rbd': 2, - 'cinder': 3, - 'glance': 4 - } - - def _ceph_osd_id(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa - - def _ceph_df(self, sentry_unit): - """Return dict of ceph df json output""" - cmd = 'sudo ceph df --format=json' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - u.log.debug(msg) - amulet.raise_status(amulet.FAIL, msg=msg) - - df = json.loads(output) - return df - - def _take_ceph_pool_sample(self, sentry_unit, pool_id=0): - """Return ceph pool name, object count and disk space used - for the specified pool ID number.""" - df = self._ceph_df(sentry_unit) - pool_name = df['pools'][pool_id]['name'] - obj_count = df['pools'][pool_id]['stats']['objects'] - kb_used = df['pools'][pool_id]['stats']['kb_used'] - u.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, - pool_id, - obj_count, - kb_used)) - return pool_name, obj_count, kb_used - - def _validate_pool_samples(self, samples, resource_type="item", - sample_type="resource pool"): - """Validate ceph pool samples taken over time, such as pool - object counts or pool kb used, before adding, after adding, and - after deleting items which affect those pool attributes.""" - original, created, deleted = range(3) - - if samples[created] <= samples[original] or \ - samples[deleted] >= samples[created]: - msg = ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - return msg - else: - u.log.debug('Ceph {} samples (OK): ' - '{}'.format(sample_type, samples)) - return None - def test_100_services(self): """Verify the expected services are running on the service units.""" ceph_services = [ 'ceph-mon-all', 'ceph-mon id=`hostname`', 'ceph-osd-all', - 'ceph-osd id={}'.format(self._ceph_osd_id(0)), - 'ceph-osd id={}'.format(self._ceph_osd_id(1)) + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) ] services = { @@ -448,7 +378,7 @@ def test_400_ceph_check_osd_pools(self): u.log.debug('Checking pools on ceph units...') cmd = 'sudo ceph osd lspools' - pools = self._ceph_expected_pools() + pools = self.get_ceph_expected_pools() results = [] sentries = [ self.ceph0_sentry, @@ -491,13 +421,13 @@ def test_410_ceph_cinder_vol_create(self): sentry_unit = self.ceph0_sentry obj_count_samples = [] pool_size_samples = [] - pools = self._ceph_expected_pools() + pools = self.get_ceph_expected_pools() cinder_pool = pools['cinder'] # Check ceph cinder pool object count, disk space usage and pool name u.log.debug('Checking ceph cinder pool original samples...') - pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=cinder_pool) + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + cinder_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -513,8 +443,8 @@ def test_410_ceph_cinder_vol_create(self): # Re-check ceph cinder pool object count and disk usage time.sleep(10) u.log.debug('Checking ceph cinder pool samples after volume create...') - pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=cinder_pool) + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + cinder_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -524,22 +454,20 @@ def test_410_ceph_cinder_vol_create(self): # Final check, ceph cinder pool object count and disk usage time.sleep(10) u.log.debug('Checking ceph cinder pool after volume delete...') - pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=cinder_pool) + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + cinder_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) # Validate ceph cinder pool object count samples over time - ret = self._validate_pool_samples(samples=obj_count_samples, - resource_type="cinder volume", - sample_type="pool object count") + ret = u.validate_ceph_pool_samples(obj_count_samples, + "cinder pool object count") if ret: amulet.raise_status(amulet.FAIL, msg=ret) # Validate ceph cinder pool disk space usage samples over time - ret = self._validate_pool_samples(samples=pool_size_samples, - resource_type="cinder volume", - sample_type="pool disk usage size") + ret = u.validate_ceph_pool_samples(pool_size_samples, + "cinder pool disk usage") if ret: amulet.raise_status(amulet.FAIL, msg=ret) @@ -550,13 +478,13 @@ def test_412_ceph_glance_image_create_delete(self): sentry_unit = self.ceph0_sentry obj_count_samples = [] pool_size_samples = [] - pools = self._ceph_expected_pools() + pools = self.get_ceph_expected_pools() glance_pool = pools['glance'] # Check ceph glance pool object count, disk space usage and pool name u.log.debug('Checking ceph glance pool original samples...') - pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=glance_pool) + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + glance_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -573,8 +501,8 @@ def test_412_ceph_glance_image_create_delete(self): # Re-check ceph glance pool object count and disk usage time.sleep(10) u.log.debug('Checking ceph glance pool samples after image create...') - pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=glance_pool) + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + glance_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) @@ -585,22 +513,20 @@ def test_412_ceph_glance_image_create_delete(self): # Final check, ceph glance pool object count and disk usage time.sleep(10) u.log.debug('Checking ceph glance pool samples after image delete...') - pool_name, obj_count, kb_used = self._take_ceph_pool_sample( - sentry_unit, pool_id=glance_pool) + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + glance_pool) obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) # Validate ceph glance pool object count samples over time - ret = self._validate_pool_samples(samples=obj_count_samples, - resource_type="glance image", - sample_type="pool object count") + ret = u.validate_ceph_pool_samples(obj_count_samples, + "glance pool object count") if ret: amulet.raise_status(amulet.FAIL, msg=ret) # Validate ceph glance pool disk space usage samples over time - ret = self._validate_pool_samples(samples=pool_size_samples, - resource_type="glance image", - sample_type="pool disk usage size") + ret = u.validate_ceph_pool_samples(pool_size_samples, + "glance pool disk usage") if ret: amulet.raise_status(amulet.FAIL, msg=ret) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index c664c9d0..73e025d7 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -148,3 +148,23 @@ def _get_openstack_release_string(self): return os_origin.split('%s-' % self.series)[1].split('/')[0] else: return releases[self.series] + + def get_ceph_expected_pools(self): + """Return a dict of expected ceph pools based on + Ubuntu-OpenStack release""" + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + return { + 'rbd': 0, + 'cinder': 1, + 'glance': 2 + } + else: + # Juno or earlier + return { + 'data': 0, + 'metadata': 1, + 'rbd': 2, + 'cinder': 3, + 'glance': 4 + } diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 593437eb..ddadd9fa 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import json import logging import os import six @@ -437,3 +438,66 @@ def resource_reaches_status(self, resource, resource_id, self.log.debug('{} never reached expected status: ' '{}'.format(resource_id, expected_stat)) return False + + def get_ceph_osd_id_cmd(self, index): + """Produce a shell command that will return a ceph-osd id.""" + cmd = ("`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}'" + " | grep -o '[0-9]*'`".format(index + 1)) + return cmd + + def get_ceph_df(self, sentry_unit): + """Return dict of ceph df json output, including ceph pool state. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :returns: Dict of ceph df output + """ + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + raise RuntimeError(msg) + return json.loads(output) + + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param pool_id: Ceph pool ID + :returns: List of pool name, object count, kb disk space used + """ + df = self.get_ceph_df(sentry_unit) + pool_name = df['pools'][pool_id]['name'] + obj_count = df['pools'][pool_id]['stats']['objects'] + kb_used = df['pools'][pool_id]['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, + pool_id, + obj_count, + kb_used)) + return pool_name, obj_count, kb_used + + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes. The + 2nd element is expected to be greater than the 1st; 3rd is expected + to be less than the 2nd. + + :param samples: List containing 3 data samples + :param sample_type: String for logging and usage context + :returns: None if successful, Failure message otherwise + """ + original, created, deleted = range(3) + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + msg = ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + return msg + else: + self.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None From a077feee036bf469c9014ebd0ffffb5b60e1439f Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 17 Jun 2015 15:12:00 +0000 Subject: [PATCH 0710/2699] lint cleanup --- ceph-proxy/tests/basic_deployment.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 188cfaed..57a0b449 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -341,7 +341,9 @@ def test_304_glance_rbd_config(self): if self._get_openstack_release() >= self.trusty_kilo: # Kilo or later - config['stores'] = 'glance.store.filesystem.Store,glance.store.http.Store,glance.store.rbd.Store' # noqa + config['stores'] = ('glance.store.filesystem.Store,' + 'glance.store.http.Store,' + 'glance.store.rbd.Store') section = 'glance_store' else: # Juno or earlier From 960a2b69e4bbfcf49ee42a0493d314bb011bddd1 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 17 Jun 2015 15:12:00 +0000 Subject: [PATCH 0711/2699] lint cleanup --- ceph-mon/tests/basic_deployment.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 188cfaed..57a0b449 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -341,7 +341,9 @@ def test_304_glance_rbd_config(self): if self._get_openstack_release() >= self.trusty_kilo: # Kilo or later - config['stores'] = 'glance.store.filesystem.Store,glance.store.http.Store,glance.store.rbd.Store' # noqa + config['stores'] = ('glance.store.filesystem.Store,' + 'glance.store.http.Store,' + 'glance.store.rbd.Store') section = 'glance_store' else: # Juno or earlier From 47af5e537b4960896e189e8e97c836006bbc0191 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 17 Jun 2015 16:19:09 +0000 Subject: [PATCH 0712/2699] amulet tests - update test coverage, enable vivid, prep for wily --- ceph-osd/Makefile | 9 +- ceph-osd/metadata.yaml | 2 +- ceph-osd/tests/00-setup | 2 + ceph-osd/tests/017-basic-trusty-kilo | 0 ceph-osd/tests/019-basic-vivid-kilo | 0 ceph-osd/tests/README | 17 ++ ceph-osd/tests/basic_deployment.py | 357 +++++++++++++++++++++++---- ceph-osd/tests/tests.yaml | 16 ++ 8 files changed, 350 insertions(+), 53 deletions(-) mode change 100644 => 100755 ceph-osd/tests/017-basic-trusty-kilo mode change 100644 => 100755 ceph-osd/tests/019-basic-vivid-kilo create mode 100644 ceph-osd/tests/tests.yaml diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index cba4f868..28e56143 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -2,18 +2,17 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers hooks tests unit_tests + @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \ + hooks tests unit_tests @charm proof unit_test: + @# Bundletester expects unit tests here. @echo Starting unit tests... @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests -test: +functional_test: @echo Starting Amulet tests... - # coreycb note: The -v should only be temporary until Amulet sends - # raise_status() messages to stderr: - # https://bugs.launchpad.net/amulet/+bug/1320357 @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 bin/charm_helpers_sync.py: diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 6bd1fbd3..2f571eff 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -5,7 +5,7 @@ provides: nrpe-external-master: interface: nrpe-external-master scope: container -categories: +tags: - misc description: | Ceph is a distributed storage and network file system designed to provide diff --git a/ceph-osd/tests/00-setup b/ceph-osd/tests/00-setup index 1243ec43..d6882caf 100755 --- a/ceph-osd/tests/00-setup +++ b/ceph-osd/tests/00-setup @@ -5,6 +5,8 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes sudo apt-get install --yes python-amulet \ + python-distro-info \ python-keystoneclient \ python-glanceclient \ + python-cinderclient \ python-novaclient diff --git a/ceph-osd/tests/017-basic-trusty-kilo b/ceph-osd/tests/017-basic-trusty-kilo old mode 100644 new mode 100755 diff --git a/ceph-osd/tests/019-basic-vivid-kilo b/ceph-osd/tests/019-basic-vivid-kilo old mode 100644 new mode 100755 diff --git a/ceph-osd/tests/README b/ceph-osd/tests/README index 643eb8dd..5280a1b2 100644 --- a/ceph-osd/tests/README +++ b/ceph-osd/tests/README @@ -1,6 +1,23 @@ This directory provides Amulet tests that focus on verification of ceph-osd deployments. +test_* methods are called in lexical sort order. + +Test name convention to ensure desired test order: + 1xx service and endpoint checks + 2xx relation checks + 3xx config checks + 4xx functional checks + 9xx restarts and other final checks + +Common uses of ceph-osd relations in bundle deployments: + - - "ceph-osd:mon" + - "ceph:osd" + +More detailed relations of ceph-osd service in a common deployment: + relations: +???? + In order to run tests, you'll need charm-tools installed (in addition to juju, of course): sudo add-apt-repository ppa:juju/stable diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index ca9d1592..010b3b1d 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -1,18 +1,22 @@ -#!/usr/bin/python import amulet +#!/usr/bin/python import amulet +import time from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment ) -from charmhelpers.contrib.openstack.amulet.utils import ( # noqa +from charmhelpers.contrib.openstack.amulet.utils import ( OpenStackAmuletUtils, DEBUG, - ERROR + #ERROR ) # Use DEBUG to turn on debug logging u = OpenStackAmuletUtils(DEBUG) +# Resource names and constants +IMAGE_NAME = 'cirros-image-1' + class CephOsdBasicDeployment(OpenStackAmuletDeployment): """Amulet tests on a basic ceph-osd deployment.""" @@ -36,9 +40,12 @@ def _add_services(self): compatible with the local charm (e.g. stable or next). """ this_service = {'name': 'ceph-osd'} - other_services = [{'name': 'ceph', 'units': 3}, {'name': 'mysql'}, - {'name': 'keystone'}, {'name': 'rabbitmq-server'}, - {'name': 'nova-compute'}, {'name': 'glance'}, + other_services = [{'name': 'ceph', 'units': 3}, + {'name': 'mysql'}, + {'name': 'keystone'}, + {'name': 'rabbitmq-server'}, + {'name': 'nova-compute'}, + {'name': 'glance'}, {'name': 'cinder'}] super(CephOsdBasicDeployment, self)._add_services(this_service, other_services) @@ -98,13 +105,20 @@ def _initialize_tests(self): self.mysql_sentry = self.d.sentry.unit['mysql/0'] self.keystone_sentry = self.d.sentry.unit['keystone/0'] self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] - self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0'] + self.nova_sentry = self.d.sentry.unit['nova-compute/0'] self.glance_sentry = self.d.sentry.unit['glance/0'] self.cinder_sentry = self.d.sentry.unit['cinder/0'] self.ceph0_sentry = self.d.sentry.unit['ceph/0'] self.ceph1_sentry = self.d.sentry.unit['ceph/1'] self.ceph2_sentry = self.d.sentry.unit['ceph/2'] self.ceph_osd_sentry = self.d.sentry.unit['ceph-osd/0'] + u.log.debug('openstack release val: {}'.format( + self._get_openstack_release())) + u.log.debug('openstack release str: {}'.format( + self._get_openstack_release_string())) + + # Let things settle a bit original moving forward + time.sleep(30) # Authenticate admin with keystone self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, @@ -112,9 +126,20 @@ def _initialize_tests(self): password='openstack', tenant='admin') + # Authenticate admin with cinder endpoint + self.cinder = u.authenticate_cinder_admin(self.keystone_sentry, + username='admin', + password='openstack', + tenant='admin') # Authenticate admin with glance endpoint self.glance = u.authenticate_glance_admin(self.keystone) + # Authenticate admin with nova endpoint + self.nova = u.authenticate_nova_user(self.keystone, + user='admin', + password='openstack', + tenant='admin') + # Create a demo tenant/role/user self.demo_tenant = 'demoTenant' self.demo_role = 'demoRole' @@ -141,39 +166,36 @@ def _initialize_tests(self): 'password', self.demo_tenant) - def _ceph_osd_id(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa - - def test_services(self): + def test_100_services(self): """Verify the expected services are running on the service units.""" - commands = { - self.mysql_sentry: ['status mysql'], - self.rabbitmq_sentry: ['sudo service rabbitmq-server status'], - self.nova_compute_sentry: ['status nova-compute'], - self.keystone_sentry: ['status keystone'], - self.glance_sentry: ['status glance-registry', - 'status glance-api'], - self.cinder_sentry: ['status cinder-api', - 'status cinder-scheduler', - 'status cinder-volume'] + ceph_services = [ + 'ceph-mon-all', + 'ceph-mon id=`hostname`', + 'ceph-osd-all', + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) + ] + + services = { + self.mysql_sentry: ['mysql'], + self.rabbitmq_sentry: ['rabbitmq-server'], + self.nova_sentry: ['nova-compute'], + self.keystone_sentry: ['keystone'], + self.glance_sentry: ['glance-registry', + 'glance-api'], + self.cinder_sentry: ['cinder-api', + 'cinder-scheduler', + 'cinder-volume'], + self.ceph0_sentry: ceph_services, + self.ceph1_sentry: ceph_services, + self.ceph2_sentry: ceph_services } - ceph_services = ['status ceph-mon-all', - 'status ceph-mon id=`hostname`'] - ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0)) - ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1)) - ceph_osd_services = [ceph_osd0, ceph_osd1, 'status ceph-osd-all'] - ceph_services.extend(ceph_osd_services) - commands[self.ceph0_sentry] = ceph_services - commands[self.ceph1_sentry] = ceph_services - commands[self.ceph2_sentry] = ceph_services - commands[self.ceph_osd_sentry] = ceph_osd_services - - ret = u.validate_services(commands) + + ret = u.validate_services_by_name(services) if ret: amulet.raise_status(amulet.FAIL, msg=ret) - def test_ceph_osd_ceph_relation(self): + def test_200_ceph_osd_ceph_relation(self): """Verify the ceph-osd to ceph relation data.""" unit = self.ceph_osd_sentry relation = ['mon', 'ceph:osd'] @@ -186,8 +208,9 @@ def test_ceph_osd_ceph_relation(self): message = u.relation_error('ceph-osd to ceph', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_ceph0_to_ceph_osd_relation(self): + def test_201_ceph0_to_ceph_osd_relation(self): """Verify the ceph0 to ceph-osd relation data.""" + u.log.debug('Checking ceph0:ceph-osd mon relation data...') unit = self.ceph0_sentry relation = ['osd', 'ceph-osd:mon'] expected = { @@ -203,8 +226,9 @@ def test_ceph0_to_ceph_osd_relation(self): message = u.relation_error('ceph0 to ceph-osd', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_ceph1_to_ceph_osd_relation(self): + def test_202_ceph1_to_ceph_osd_relation(self): """Verify the ceph1 to ceph-osd relation data.""" + u.log.debug('Checking ceph1:ceph-osd mon relation data...') unit = self.ceph1_sentry relation = ['osd', 'ceph-osd:mon'] expected = { @@ -220,8 +244,9 @@ def test_ceph1_to_ceph_osd_relation(self): message = u.relation_error('ceph1 to ceph-osd', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_ceph2_to_ceph_osd_relation(self): + def test_203_ceph2_to_ceph_osd_relation(self): """Verify the ceph2 to ceph-osd relation data.""" + u.log.debug('Checking ceph2:ceph-osd mon relation data...') unit = self.ceph2_sentry relation = ['osd', 'ceph-osd:mon'] expected = { @@ -237,8 +262,9 @@ def test_ceph2_to_ceph_osd_relation(self): message = u.relation_error('ceph2 to ceph-osd', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_ceph_config(self): + def test_300_ceph_osd_config(self): """Verify the data in the ceph config file.""" + u.log.debug('Checking ceph config file data...') unit = self.ceph_osd_sentry conf = '/etc/ceph/ceph.conf' expected = { @@ -271,11 +297,248 @@ def test_ceph_config(self): message = "ceph config error: {}".format(ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_restart_on_config_change(self): - """Verify the specified services are restarted on config change.""" - # NOTE(coreycb): Test not implemented but should it be? ceph-osd svcs - # aren't restarted by charm after config change. Should - # they be restarted? - if self._get_openstack_release() >= self.precise_essex: - u.log.error("Test not implemented") - return + def test_302_cinder_rbd_config(self): + """Verify the cinder config file data regarding ceph.""" + u.log.debug('Checking cinder (rbd) config file data...') + unit = self.cinder_sentry + conf = '/etc/cinder/cinder.conf' + expected = { + 'DEFAULT': { + 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' + } + } + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "cinder (rbd) config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_304_glance_rbd_config(self): + """Verify the glance config file data regarding ceph.""" + u.log.debug('Checking glance (rbd) config file data...') + unit = self.glance_sentry + conf = '/etc/glance/glance-api.conf' + config = { + 'default_store': 'rbd', + 'rbd_store_ceph_conf': '/etc/ceph/ceph.conf', + 'rbd_store_user': 'glance', + 'rbd_store_pool': 'glance', + 'rbd_store_chunk_size': '8' + } + + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + config['stores'] = ('glance.store.filesystem.Store,' + 'glance.store.http.Store,' + 'glance.store.rbd.Store') + section = 'glance_store' + else: + # Juno or earlier + section = 'DEFAULT' + + expected = {section: config} + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "glance (rbd) config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_306_nova_rbd_config(self): + """Verify the nova config file data regarding ceph.""" + u.log.debug('Checking nova (rbd) config file data...') + unit = self.nova_sentry + conf = '/etc/nova/nova.conf' + expected = { + 'libvirt': { + 'rbd_pool': 'nova', + 'rbd_user': 'nova-compute', + 'rbd_secret_uuid': u.not_null + } + } + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "nova (rbd) config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_400_ceph_check_osd_pools(self): + """Check osd pools on all ceph units, expect them to be + identical, and expect specific pools to be present.""" + u.log.debug('Checking pools on ceph units...') + + cmd = 'sudo ceph osd lspools' + pools = self.get_ceph_expected_pools() + results = [] + sentries = [ + self.ceph_osd_sentry, + self.ceph0_sentry, + self.ceph1_sentry, + self.ceph2_sentry + ] + + for sentry_unit in sentries: + output, code = sentry_unit.run(cmd) + results.append(output) + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + u.log.debug(msg) + if code != 0: + amulet.raise_status(amulet.FAIL, msg=msg) + + # Check for presence of all pools on this unit + for pool in pools: + if pool not in output: + msg = ('{} does not have pool: ' + '{}'.format(sentry_unit.info['unit_name'], pool)) + amulet.raise_status(amulet.FAIL, msg=msg) + u.log.debug('{} has the expected ' + 'pools.'.format(sentry_unit.info['unit_name'])) + + # Check that lspool produces the same output on all units + if len(set(results)) == 1: + u.log.debug('Pool list on all ceph units produced the ' + 'same results (OK).') + else: + u.log.debug('Pool list results: {}'.format(results)) + msg = 'Pool list results are not identical on all ceph units.' + amulet.raise_status(amulet.FAIL, msg=msg) + + def test_410_ceph_cinder_vol_create(self): + """Create and confirm a ceph-backed cinder volume, and inspect + ceph cinder pool object count as the volume is created + and deleted.""" + sentry_unit = self.ceph0_sentry + obj_count_samples = [] + pool_size_samples = [] + pools = self.get_ceph_expected_pools() + cinder_pool = pools['cinder'] + + # Check ceph cinder pool object count, disk space usage and pool name + u.log.debug('Checking ceph cinder pool original samples...') + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + cinder_pool) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + expected = 'cinder' + if pool_name != expected: + msg = ('Ceph pool {} unexpected name (actual, expected): ' + '{}. {}'.format(cinder_pool, pool_name, expected)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create ceph-backed cinder volume + cinder_vol = u.create_cinder_volume(self.cinder) + + # Re-check ceph cinder pool object count and disk usage + time.sleep(10) + u.log.debug('Checking ceph cinder pool samples after volume create...') + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + cinder_pool) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + # Delete ceph-backed cinder volume + u.delete_resource(self.cinder.volumes, cinder_vol, msg="cinder volume") + + # Final check, ceph cinder pool object count and disk usage + time.sleep(10) + u.log.debug('Checking ceph cinder pool after volume delete...') + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + cinder_pool) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + # Validate ceph cinder pool object count samples over time + ret = u.validate_ceph_pool_samples(obj_count_samples, + "cinder pool object count") + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + # Validate ceph cinder pool disk space usage samples over time + ret = u.validate_ceph_pool_samples(pool_size_samples, + "cinder pool disk usage") + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_412_ceph_glance_image_create_delete(self): + """Create and confirm a ceph-backed glance image, and inspect + ceph glance pool object count as the image is created + and deleted.""" + sentry_unit = self.ceph0_sentry + obj_count_samples = [] + pool_size_samples = [] + pools = self.get_ceph_expected_pools() + glance_pool = pools['glance'] + + # Check ceph glance pool object count, disk space usage and pool name + u.log.debug('Checking ceph glance pool original samples...') + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + glance_pool) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + expected = 'glance' + if pool_name != expected: + msg = ('Ceph glance pool {} unexpected name (actual, ' + 'expected): {}. {}'.format(glance_pool, + pool_name, expected)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create ceph-backed glance image + glance_img = u.create_cirros_image(self.glance, IMAGE_NAME) + + # Re-check ceph glance pool object count and disk usage + time.sleep(10) + u.log.debug('Checking ceph glance pool samples after image create...') + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + glance_pool) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + # Delete ceph-backed glance image + u.delete_resource(self.glance.images, + glance_img, msg="glance image") + + # Final check, ceph glance pool object count and disk usage + time.sleep(10) + u.log.debug('Checking ceph glance pool samples after image delete...') + pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, + glance_pool) + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + # Validate ceph glance pool object count samples over time + ret = u.validate_ceph_pool_samples(obj_count_samples, + "glance pool object count") + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + # Validate ceph glance pool disk space usage samples over time + ret = u.validate_ceph_pool_samples(pool_size_samples, + "glance pool disk usage") + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_499_ceph_cmds_exit_zero(self): + """Check basic functionality of ceph cli commands against + all ceph units.""" + sentry_units = [ + self.ceph_osd_sentry, + self.ceph0_sentry, + self.ceph1_sentry, + self.ceph2_sentry + ] + commands = [ + 'sudo ceph health', + 'sudo ceph mds stat', + 'sudo ceph pg stat', + 'sudo ceph osd stat', + 'sudo ceph mon stat', + ] + ret = u.check_commands_on_units(commands, sentry_units) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + # FYI: No restart check as ceph services do not restart + # when charm config changes, unless monitor count increases. diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml new file mode 100644 index 00000000..348aae57 --- /dev/null +++ b/ceph-osd/tests/tests.yaml @@ -0,0 +1,16 @@ +bootstrap: true +reset: true +virtualenv: true +makefile: + - lint + - test +sources: + - ppa:juju/stable +packages: + - amulet + - python-amulet + - python-distro-info + - python-keystoneclient + - python-glanceclient + - python-cinderclient + - python-novaclient From 8a9a523d01d53ded59c8f7c3947a5848d51691c9 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 17 Jun 2015 16:33:37 +0000 Subject: [PATCH 0713/2699] sync tests/charmhelpers --- .../charmhelpers/contrib/amulet/utils.py | 153 ++++++++++++- .../contrib/openstack/amulet/deployment.py | 47 +++- .../contrib/openstack/amulet/utils.py | 215 +++++++++++++++++- 3 files changed, 394 insertions(+), 21 deletions(-) diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index 5088b1d1..f04cee25 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -15,13 +15,15 @@ # along with charm-helpers. If not, see . import ConfigParser +import distro_info import io import logging +import os import re +import six import sys import time - -import six +import urlparse class AmuletUtils(object): @@ -33,6 +35,7 @@ class AmuletUtils(object): def __init__(self, log_level=logging.ERROR): self.log = self.get_logger(level=log_level) + self.ubuntu_releases = self.get_ubuntu_releases() def get_logger(self, name="amulet-logger", level=logging.DEBUG): """Get a logger object that will log to stdout.""" @@ -70,15 +73,85 @@ def valid_url(self, url): else: return False - def validate_services(self, commands): - """Validate services. + def get_ubuntu_release_from_sentry(self, sentry_unit): + """Get Ubuntu release codename from sentry unit. + + :param sentry_unit: amulet sentry/service unit pointer + :returns: list of strings - release codename, failure message + """ + msg = None + cmd = 'lsb_release -cs' + release, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} lsb_release: {}'.format( + sentry_unit.info['unit_name'], release)) + else: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, release, code)) + if release not in self.ubuntu_releases: + msg = ("Release ({}) not found in Ubuntu releases " + "({})".format(release, self.ubuntu_releases)) + return release, msg - Verify the specified services are running on the corresponding + def validate_services(self, commands): + """Validate that lists of commands succeed on service units. Can be + used to verify system services are running on the corresponding service units. - """ + + :param commands: dict with sentry keys and arbitrary command list vals + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # /!\ DEPRECATION WARNING (beisner): + # New and existing tests should be rewritten to use + # validate_services_by_name() as it is aware of init systems. + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'validate_services_by_name instead of validate_services ' + 'due to init system differences.') + for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(k.info['unit_name'], + cmd, code)) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def validate_services_by_name(self, sentry_services): + """Validate system service status by service name, automatically + detecting init system based on Ubuntu release codename. + + :param sentry_services: dict with sentry keys and svc list values + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # Point at which systemd became a thing + systemd_switch = self.ubuntu_releases.index('vivid') + + for sentry_unit, services_list in six.iteritems(sentry_services): + # Get lsb_release codename from unit + release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) + if ret: + return ret + + for service_name in services_list: + if (self.ubuntu_releases.index(release) >= systemd_switch or + service_name == "rabbitmq-server"): + # init is systemd + cmd = 'sudo service {} status'.format(service_name) + elif self.ubuntu_releases.index(release) < systemd_switch: + # init is upstart + cmd = 'sudo status {}'.format(service_name) + + output, code = sentry_unit.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code)) if code != 0: return "command `{}` returned {}".format(cmd, str(code)) return None @@ -86,7 +159,11 @@ def validate_services(self, commands): def _get_config(self, unit, filename): """Get a ConfigParser object for parsing a unit's config file.""" file_contents = unit.file_contents(filename) - config = ConfigParser.ConfigParser() + + # NOTE(beisner): by default, ConfigParser does not handle options + # with no value, such as the flags used in the mysql my.cnf file. + # https://bugs.python.org/issue7005 + config = ConfigParser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config @@ -97,6 +174,9 @@ def validate_config_data(self, sentry_unit, config_file, section, Verify that the specified section of the config file contains the expected option key:value pairs. """ + self.log.debug('Validating config file data ({} in {} on {})' + '...'.format(section, config_file, + sentry_unit.info['unit_name'])) config = self._get_config(sentry_unit, config_file) if section != 'DEFAULT' and not config.has_section(section): @@ -105,10 +185,23 @@ def validate_config_data(self, sentry_unit, config_file, section, for k in expected.keys(): if not config.has_option(section, k): return "section [{}] is missing option {}".format(section, k) - if config.get(section, k) != expected[k]: - return "section [{}] {}:{} != expected {}:{}".format( - section, k, config.get(section, k), k, expected[k]) - return None + + actual = config.get(section, k) + v = expected[k] + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if actual != v: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + else: + # handle not_null, valid_ip boolean comparison methods, etc. + if v(actual): + return None + else: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) def _validate_dict_data(self, expected, actual): """Validate dictionary data. @@ -314,3 +407,41 @@ def relation_error(self, name, data): def endpoint_error(self, name, data): return 'unexpected endpoint data in {} - {}'.format(name, data) + + def get_ubuntu_releases(self): + """Return a list of all Ubuntu releases in order of release.""" + _d = distro_info.UbuntuDistroInfo() + _release_list = _d.all + self.log.debug('Ubuntu release list: {}'.format(_release_list)) + return _release_list + + def file_to_url(self, file_rel_path): + """Convert a relative file path to a file URL.""" + _abs_path = os.path.abspath(file_rel_path) + return urlparse.urlparse(_abs_path, scheme='file').geturl() + + def check_commands_on_units(self, commands, sentry_units): + """Check that all commands in a list exit zero on all + sentry units in a list. + + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + self.log.debug('Checking exit codes for {} commands on {} ' + 'sentry units...'.format(len(commands), + len(sentry_units))) + for sentry_unit in sentry_units: + for cmd in commands: + output, code = sentry_unit.run(cmd) + if code == 0: + msg = ('{} `{}` returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + self.log.debug(msg) + else: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + return msg + return None diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 11d49a7c..73e025d7 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -46,15 +46,22 @@ def _determine_branch_locations(self, other_services): stable or next branches for the other_services.""" base_charms = ['mysql', 'mongodb'] + if self.series in ['precise', 'trusty']: + base_series = self.series + else: + base_series = self.current_next + if self.stable: for svc in other_services: - temp = 'lp:charms/{}' - svc['location'] = temp.format(svc['name']) + temp = 'lp:charms/{}/{}' + svc['location'] = temp.format(base_series, + svc['name']) else: for svc in other_services: if svc['name'] in base_charms: - temp = 'lp:charms/{}' - svc['location'] = temp.format(svc['name']) + temp = 'lp:charms/{}/{}' + svc['location'] = temp.format(base_series, + svc['name']) else: temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, @@ -99,10 +106,13 @@ def _get_openstack_release(self): Return an integer representing the enum value of the openstack release. """ + # Must be ordered by OpenStack release (not by Ubuntu release): (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.trusty_kilo, - self.utopic_juno, self.vivid_kilo) = range(10) + self.trusty_icehouse, self.trusty_juno, self.utopic_juno, + self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, + self.wily_liberty) = range(12) + releases = { ('precise', None): self.precise_essex, ('precise', 'cloud:precise-folsom'): self.precise_folsom, @@ -112,8 +122,10 @@ def _get_openstack_release(self): ('trusty', None): self.trusty_icehouse, ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo} + ('vivid', None): self.vivid_kilo, + ('wily', None): self.wily_liberty} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -129,9 +141,30 @@ def _get_openstack_release_string(self): ('trusty', 'icehouse'), ('utopic', 'juno'), ('vivid', 'kilo'), + ('wily', 'liberty'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] return os_origin.split('%s-' % self.series)[1].split('/')[0] else: return releases[self.series] + + def get_ceph_expected_pools(self): + """Return a dict of expected ceph pools based on + Ubuntu-OpenStack release""" + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + return { + 'rbd': 0, + 'cinder': 1, + 'glance': 2 + } + else: + # Juno or earlier + return { + 'data': 0, + 'metadata': 1, + 'rbd': 2, + 'cinder': 3, + 'glance': 4 + } diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index 9c3d918a..ddadd9fa 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -14,17 +14,19 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import json import logging import os +import six import time import urllib +import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client +import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client -import six - from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -37,7 +39,7 @@ class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charms. + that is specifically for use by OpenStack charm tests. """ def __init__(self, log_level=ERROR): @@ -51,6 +53,8 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, Validate actual endpoint data vs expected endpoint data. The ports are used to find the matching endpoint. """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) found = False for ep in endpoints: self.log.debug('endpoint: {}'.format(repr(ep))) @@ -77,6 +81,7 @@ def validate_svc_catalog_endpoint_data(self, expected, actual): Validate a list of actual service catalog endpoints vs a list of expected service catalog endpoints. """ + self.log.debug('Validating service catalog endpoint data...') self.log.debug('actual: {}'.format(repr(actual))) for k, v in six.iteritems(expected): if k in actual: @@ -93,6 +98,7 @@ def validate_tenant_data(self, expected, actual): Validate a list of actual tenant data vs list of expected tenant data. """ + self.log.debug('Validating tenant data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -114,6 +120,7 @@ def validate_role_data(self, expected, actual): Validate a list of actual role data vs a list of expected role data. """ + self.log.debug('Validating role data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -134,6 +141,7 @@ def validate_user_data(self, expected, actual): Validate a list of actual user data vs a list of expected user data. """ + self.log.debug('Validating user data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -155,17 +163,29 @@ def validate_flavor_data(self, expected, actual): Validate a list of actual flavors vs a list of expected flavors. """ + self.log.debug('Validating flavor data...') self.log.debug('actual: {}'.format(repr(actual))) act = [a.name for a in actual] return self._validate_list_data(expected, act) def tenant_exists(self, keystone, tenant): """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + def authenticate_cinder_admin(self, keystone_sentry, username, + password, tenant): + """Authenticates admin user with cinder.""" + service_ip = \ + keystone_sentry.relation('shared-db', + 'mysql:shared-db')['private-address'] + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) + return cinder_client.Client(username, password, tenant, ept) + def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant): """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') unit = keystone_sentry service_ip = unit.relation('shared-db', 'mysql:shared-db')['private-address'] @@ -175,6 +195,7 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') return keystone_client.Client(username=user, password=password, @@ -182,12 +203,21 @@ def authenticate_keystone_user(self, keystone, user, password, tenant): def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', endpoint_type='adminURL') return glance_client.Client(ep, token=keystone.auth_token) + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + endpoint_type='publicURL') + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + def authenticate_nova_user(self, keystone, user, password, tenant): """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') return nova_client.Client(username=user, api_key=password, @@ -195,6 +225,7 @@ def authenticate_nova_user(self, keystone, user, password, tenant): def create_cirros_image(self, glance, image_name): """Download the latest cirros image and upload it to glance.""" + self.log.debug('Creating glance image ({})...'.format(image_name)) http_proxy = os.getenv('AMULET_HTTP_PROXY') self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: @@ -235,6 +266,11 @@ def create_cirros_image(self, glance, image_name): def delete_image(self, glance, image): """Delete the specified image.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) num_before = len(list(glance.images.list())) glance.images.delete(image) @@ -254,6 +290,8 @@ def delete_image(self, glance, image): def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) image = nova.images.find(name=image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, @@ -276,6 +314,11 @@ def create_instance(self, nova, image_name, instance_name, flavor): def delete_instance(self, nova, instance): """Delete the specified instance.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) num_before = len(list(nova.servers.list())) nova.servers.delete(instance) @@ -292,3 +335,169 @@ def delete_instance(self, nova, instance): return False return True + + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1): + """Add and confirm a new volume, 1GB by default.""" + self.log.debug('Creating volume ({}|{}GB)'.format(vol_name, vol_size)) + vol_new = cinder.volumes.create(display_name=vol_name, size=1) + vol_id = vol_new.id + ret = self.resource_reaches_status(cinder.volumes, vol_id, + expected_stat="available", + msg="Create volume status wait") + if ret: + return vol_new + else: + self.log.error('Failed to create volume.') + return None + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + self.log.debug('Deleting OpenStack resource ' + '{} ({})'.format(resource_id, msg)) + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) + return False + + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False + + def get_ceph_osd_id_cmd(self, index): + """Produce a shell command that will return a ceph-osd id.""" + cmd = ("`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}'" + " | grep -o '[0-9]*'`".format(index + 1)) + return cmd + + def get_ceph_df(self, sentry_unit): + """Return dict of ceph df json output, including ceph pool state. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :returns: Dict of ceph df output + """ + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + raise RuntimeError(msg) + return json.loads(output) + + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param pool_id: Ceph pool ID + :returns: List of pool name, object count, kb disk space used + """ + df = self.get_ceph_df(sentry_unit) + pool_name = df['pools'][pool_id]['name'] + obj_count = df['pools'][pool_id]['stats']['objects'] + kb_used = df['pools'][pool_id]['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, + pool_id, + obj_count, + kb_used)) + return pool_name, obj_count, kb_used + + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes. The + 2nd element is expected to be greater than the 1st; 3rd is expected + to be less than the 2nd. + + :param samples: List containing 3 data samples + :param sample_type: String for logging and usage context + :returns: None if successful, Failure message otherwise + """ + original, created, deleted = range(3) + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + msg = ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + return msg + else: + self.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None From 52784966f5f46c102fd92b94cf5525a21106f691 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 17 Jun 2015 16:59:15 +0000 Subject: [PATCH 0714/2699] sync hooks/charmhelpers --- .../charmhelpers/contrib/charmsupport/nrpe.py | 4 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 157 ++++++++++++++++-- ceph-osd/hooks/charmhelpers/core/host.py | 32 +++- .../hooks/charmhelpers/core/services/base.py | 43 +++-- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 2 +- ceph-osd/hooks/charmhelpers/fetch/giturl.py | 12 +- 6 files changed, 215 insertions(+), 35 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 9d961cfb..95a79c2e 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -247,7 +247,9 @@ def write(self): service('restart', 'nagios-nrpe-server') - for rid in relation_ids("local-monitors"): + monitor_ids = relation_ids("local-monitors") + \ + relation_ids("nrpe-external-master") + for rid in monitor_ids: relation_set(relation_id=rid, monitors=yaml.dump(monitors)) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 86f805f1..117429fd 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -21,12 +21,14 @@ # Charm Helpers Developers from __future__ import print_function +from functools import wraps import os import json import yaml import subprocess import sys import errno +import tempfile from subprocess import CalledProcessError import six @@ -58,15 +60,17 @@ def unit_get(attribute): will cache the result of unit_get + 'test' for future calls. """ + @wraps(func) def wrapper(*args, **kwargs): global cache key = str((func, args, kwargs)) try: return cache[key] except KeyError: - res = func(*args, **kwargs) - cache[key] = res - return res + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res return wrapper @@ -178,7 +182,7 @@ def local_unit(): def remote_unit(): """The remote unit for the current relation hook""" - return os.environ['JUJU_REMOTE_UNIT'] + return os.environ.get('JUJU_REMOTE_UNIT', None) def service_name(): @@ -250,6 +254,12 @@ def __getitem__(self, key): except KeyError: return (self._prev_dict or {})[key] + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + def keys(self): prev_keys = [] if self._prev_dict is not None: @@ -353,18 +363,49 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in (list(relation_settings.items()) + list(kwargs.items())): - if v is None: - relation_cmd_line.append('{}='.format(k)) - else: - relation_cmd_line.append('{}={}'.format(k, v)) - subprocess.check_call(relation_cmd_line) + settings = relation_settings.copy() + settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) + if accepts_file: + # --file was introduced in Juju 1.23.2. Use it by default if + # available, since otherwise we'll break if the relation data is + # too big. Ideally we should tell relation-set to read the data from + # stdin, but that feature is broken in 1.23.2: Bug #1454678. + with tempfile.NamedTemporaryFile(delete=False) as settings_file: + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) + subprocess.check_call( + relation_cmd_line + ["--file", settings_file.name]) + os.remove(settings_file.name) + else: + for key, value in settings.items(): + if value is None: + relation_cmd_line.append('{}='.format(key)) + else: + relation_cmd_line.append('{}={}'.format(key, value)) + subprocess.check_call(relation_cmd_line) # Flush cache of any relation-gets for local unit flush(local_unit()) +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + @cached def relation_ids(reltype=None): """A list of relation_ids""" @@ -509,6 +550,11 @@ def unit_get(attribute): return None +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + def unit_private_ip(): """Get this unit's private IP address""" return unit_get('private-address') @@ -605,3 +651,94 @@ def action_fail(message): The results set by action_set are preserved.""" subprocess.check_call(['action-fail', message]) + + +def status_set(workload_state, message): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message unstead. + + workload_state -- valid juju workload state. + message -- status update message + """ + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] + if workload_state not in valid_states: + raise ValueError( + '{!r} is not a valid workload state'.format(workload_state) + ) + cmd = ['status-set', workload_state, message] + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state + + If the status-set command is not found then assume this is juju < 1.23 and + return 'unknown' + """ + cmd = ['status-get'] + try: + raw_status = subprocess.check_output(cmd, universal_newlines=True) + status = raw_status.rstrip() + return status + except OSError as e: + if e.errno == errno.ENOENT: + return 'unknown' + else: + raise + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.iteritems(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 830822af..901a4cfe 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -24,6 +24,7 @@ import os import re import pwd +import glob import grp import random import string @@ -90,7 +91,7 @@ def service_available(service_name): ['service', service_name, 'status'], stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError as e: - return 'unrecognized service' not in e.output + return b'unrecognized service' not in e.output else: return True @@ -269,6 +270,21 @@ def file_hash(path, hash_type='md5'): return None +def path_hash(path): + """ + Generate a hash checksum of all files matching 'path'. Standard wildcards + like '*' and '?' are supported, see documentation for the 'glob' module for + more information. + + :return: dict: A { filename: hash } dictionary for all matched files. + Empty if none found. + """ + return { + filename: file_hash(filename) + for filename in glob.iglob(path) + } + + def check_hash(path, checksum, hash_type='md5'): """ Validate a file using a cryptographic checksum. @@ -296,23 +312,25 @@ def restart_on_change(restart_map, stopstart=False): @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + '/etc/apache/sites-enabled/*': [ 'apache2' ] }) - def ceph_client_changed(): + def config_changed(): pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. + ceph_client_changed function. The apache2 service would be + restarted if any file matching the pattern got changed, created + or removed. Standard wildcards are supported, see documentation + for the 'glob' module for more information. """ def wrap(f): def wrapped_f(*args, **kwargs): - checksums = {} - for path in restart_map: - checksums[path] = file_hash(path) + checksums = {path: path_hash(path) for path in restart_map} f(*args, **kwargs) restarts = [] for path in restart_map: - if checksums[path] != file_hash(path): + if path_hash(path) != checksums[path]: restarts += restart_map[path] services_list = list(OrderedDict.fromkeys(restarts)) if not stopstart: diff --git a/ceph-osd/hooks/charmhelpers/core/services/base.py b/ceph-osd/hooks/charmhelpers/core/services/base.py index c5534e4c..98d344e1 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/base.py +++ b/ceph-osd/hooks/charmhelpers/core/services/base.py @@ -15,9 +15,9 @@ # along with charm-helpers. If not, see . import os -import re import json -from collections import Iterable +from inspect import getargspec +from collections import Iterable, OrderedDict from charmhelpers.core import host from charmhelpers.core import hookenv @@ -119,7 +119,7 @@ def __init__(self, services=None): """ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') self._ready = None - self.services = {} + self.services = OrderedDict() for service in services or []: service_name = service['service'] self.services[service_name] = service @@ -132,8 +132,8 @@ def manage(self): if hook_name == 'stop': self.stop_services() else: - self.provide_data() self.reconfigure_services() + self.provide_data() cfg = hookenv.config() if cfg.implicit_save: cfg.save() @@ -145,15 +145,36 @@ def provide_data(self): A provider must have a `name` attribute, which indicates which relation to set data on, and a `provide_data()` method, which returns a dict of data to set. + + The `provide_data()` method can optionally accept two parameters: + + * ``remote_service`` The name of the remote service that the data will + be provided to. The `provide_data()` method will be called once + for each connected service (not unit). This allows the method to + tailor its data to the given service. + * ``service_ready`` Whether or not the service definition had all of + its requirements met, and thus the ``data_ready`` callbacks run. + + Note that the ``provided_data`` methods are now called **after** the + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks + a chance to generate any data necessary for the providing to the remote + services. """ - hook_name = hookenv.hook_name() - for service in self.services.values(): + for service_name, service in self.services.items(): + service_ready = self.is_ready(service_name) for provider in service.get('provided_data', []): - if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): - data = provider.provide_data() - _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data - if _ready: - hookenv.relation_set(None, data) + for relid in hookenv.relation_ids(provider.name): + units = hookenv.related_units(relid) + if not units: + continue + remote_service = units[0].split('/')[0] + argspec = getargspec(provider.provide_data) + if len(argspec.args) > 1: + data = provider.provide_data(remote_service, service_ready) + else: + data = provider.provide_data() + if data: + hookenv.relation_set(relid, data) def reconfigure_services(self, *service_names): """ diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 792e629a..9a1a2515 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -158,7 +158,7 @@ def filter_installed_packages(packages): def apt_cache(in_memory=True): """Build and return an apt cache""" - import apt_pkg + from apt import apt_pkg apt_pkg.init() if in_memory: apt_pkg.config.set("Dir::Cache::pkgcache", "") diff --git a/ceph-osd/hooks/charmhelpers/fetch/giturl.py b/ceph-osd/hooks/charmhelpers/fetch/giturl.py index 93aae87b..ddc25b7e 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/giturl.py @@ -45,14 +45,16 @@ def can_handle(self, source): else: return True - def clone(self, source, dest, branch): + def clone(self, source, dest, branch, depth=None): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - repo = Repo.clone_from(source, dest) - repo.git.checkout(branch) + if depth: + Repo.clone_from(source, dest, branch=branch, depth=depth) + else: + Repo.clone_from(source, dest, branch=branch) - def install(self, source, branch="master", dest=None): + def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] if dest: @@ -63,7 +65,7 @@ def install(self, source, branch="master", dest=None): if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: - self.clone(source, dest_dir, branch) + self.clone(source, dest_dir, branch, depth) except GitCommandError as e: raise UnhandledSource(e.message) except OSError as e: From 2b69ec40253f41c1c59438844058638bcbaa8e23 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Sat, 20 Jun 2015 11:28:43 +0000 Subject: [PATCH 0715/2699] Add ceph process ID checks; Limit ceph service status checks to upstart systems. --- ceph-proxy/tests/basic_deployment.py | 49 +++++++++--- .../charmhelpers/contrib/amulet/utils.py | 78 +++++++++++++++++++ 2 files changed, 116 insertions(+), 11 deletions(-) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 57a0b449..967bcee7 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -155,15 +155,31 @@ def _initialize_tests(self): 'password', self.demo_tenant) - def test_100_services(self): + def test_100_ceph_processes(self): + """Verify that the expected service processes are running + on each ceph unit.""" + + # Process name and quantity of processes to expect on each unit + ceph_processes = { + 'ceph-mon': 1, + 'ceph-mon': 1, + 'ceph-osd': 2 + } + + # Units with process names and PID quantities expected + expected_processes = { + self.ceph0_sentry: ceph_processes, + self.ceph1_sentry: ceph_processes, + self.ceph2_sentry: ceph_processes + } + + actual_pids = u.get_unit_process_ids(expected_processes) + ret = u.validate_unit_process_ids(expected_processes, actual_pids) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_102_services(self): """Verify the expected services are running on the service units.""" - ceph_services = [ - 'ceph-mon-all', - 'ceph-mon id=`hostname`', - 'ceph-osd-all', - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) - ] services = { self.mysql_sentry: ['mysql'], @@ -175,11 +191,22 @@ def test_100_services(self): self.cinder_sentry: ['cinder-api', 'cinder-scheduler', 'cinder-volume'], - self.ceph0_sentry: ceph_services, - self.ceph1_sentry: ceph_services, - self.ceph2_sentry: ceph_services } + if self._get_openstack_release() < self.vivid_kilo: + # For upstart systems only. Ceph services under systemd + # are checked by process name instead. + ceph_services = [ + 'ceph-mon-all', + 'ceph-mon id=`hostname`', + 'ceph-osd-all', + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) + ] + services[self.ceph0_sentry] = ceph_services + services[self.ceph1_sentry] = ceph_services + services[self.ceph2_sentry] = ceph_services + ret = u.validate_services_by_name(services) if ret: amulet.raise_status(amulet.FAIL, msg=ret) diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index f04cee25..33e82176 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -445,3 +445,81 @@ def check_commands_on_units(self, commands, sentry_units): cmd, code, output)) return msg return None + + def get_process_id_list(self, sentry_unit, process_name): + """Get a list of process ID(s) from a single sentry juju unit + for a single process name. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param process_name: Process name + :returns: List of process IDs + """ + cmd = 'pidof {}'.format(process_name) + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + raise RuntimeError(msg) + return str(output).split() + + def get_unit_process_ids(self, unit_processes): + """Construct a dict containing unit sentries, process names, and + process IDs.""" + pid_dict = {} + for sentry_unit, process_list in unit_processes.iteritems(): + pid_dict[sentry_unit] = {} + for process in process_list: + pids = self.get_process_id_list(sentry_unit, process) + pid_dict[sentry_unit].update({process: pids}) + return pid_dict + + def validate_unit_process_ids(self, expected, actual): + """Validate process id quantities for services on units.""" + self.log.debug('Checking units for running processes...') + self.log.debug('Expected PIDs: {}'.format(expected)) + self.log.debug('Actual PIDs: {}'.format(actual)) + + if len(actual) != len(expected): + msg = ('Unit count mismatch. expected, actual: {}, ' + '{} '.format(len(expected), len(actual))) + return msg + + for (e_sentry, e_proc_names) in expected.iteritems(): + e_sentry_name = e_sentry.info['unit_name'] + if e_sentry in actual.keys(): + a_proc_names = actual[e_sentry] + else: + msg = ('Expected sentry ({}) not found in actual dict data.' + '{}'.format(e_sentry_name, e_sentry)) + return msg + + if len(e_proc_names.keys()) != len(a_proc_names.keys()): + msg = ('Process name count mismatch. expected, actual: {}, ' + '{}'.format(len(expected), len(actual))) + return msg + + for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ + zip(e_proc_names.items(), a_proc_names.items()): + if e_proc_name != a_proc_name: + msg = ('Process name mismatch. expected, actual: {}, ' + '{}'.format(e_proc_name, a_proc_name)) + return msg + + a_pids_length = len(a_pids) + if e_pids_length != a_pids_length: + msg = ('PID count mismatch. {} ({}) expected, actual: {}, ' + '{} ({})'.format(e_sentry_name, + e_proc_name, + e_pids_length, + a_pids_length, + a_pids)) + return msg + else: + msg = ('PID check OK: {} {} {}: ' + '{}'.format(e_sentry_name, + e_proc_name, + e_pids_length, + a_pids)) + self.log.debug(msg) + return None From 7cdc854aed93d05150f85006ee1a0ddf3d7273ba Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Sat, 20 Jun 2015 11:28:43 +0000 Subject: [PATCH 0716/2699] Add ceph process ID checks; Limit ceph service status checks to upstart systems. --- ceph-mon/tests/basic_deployment.py | 49 +++++++++--- .../charmhelpers/contrib/amulet/utils.py | 78 +++++++++++++++++++ 2 files changed, 116 insertions(+), 11 deletions(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 57a0b449..967bcee7 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -155,15 +155,31 @@ def _initialize_tests(self): 'password', self.demo_tenant) - def test_100_services(self): + def test_100_ceph_processes(self): + """Verify that the expected service processes are running + on each ceph unit.""" + + # Process name and quantity of processes to expect on each unit + ceph_processes = { + 'ceph-mon': 1, + 'ceph-mon': 1, + 'ceph-osd': 2 + } + + # Units with process names and PID quantities expected + expected_processes = { + self.ceph0_sentry: ceph_processes, + self.ceph1_sentry: ceph_processes, + self.ceph2_sentry: ceph_processes + } + + actual_pids = u.get_unit_process_ids(expected_processes) + ret = u.validate_unit_process_ids(expected_processes, actual_pids) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_102_services(self): """Verify the expected services are running on the service units.""" - ceph_services = [ - 'ceph-mon-all', - 'ceph-mon id=`hostname`', - 'ceph-osd-all', - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) - ] services = { self.mysql_sentry: ['mysql'], @@ -175,11 +191,22 @@ def test_100_services(self): self.cinder_sentry: ['cinder-api', 'cinder-scheduler', 'cinder-volume'], - self.ceph0_sentry: ceph_services, - self.ceph1_sentry: ceph_services, - self.ceph2_sentry: ceph_services } + if self._get_openstack_release() < self.vivid_kilo: + # For upstart systems only. Ceph services under systemd + # are checked by process name instead. + ceph_services = [ + 'ceph-mon-all', + 'ceph-mon id=`hostname`', + 'ceph-osd-all', + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) + ] + services[self.ceph0_sentry] = ceph_services + services[self.ceph1_sentry] = ceph_services + services[self.ceph2_sentry] = ceph_services + ret = u.validate_services_by_name(services) if ret: amulet.raise_status(amulet.FAIL, msg=ret) diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index f04cee25..33e82176 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -445,3 +445,81 @@ def check_commands_on_units(self, commands, sentry_units): cmd, code, output)) return msg return None + + def get_process_id_list(self, sentry_unit, process_name): + """Get a list of process ID(s) from a single sentry juju unit + for a single process name. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param process_name: Process name + :returns: List of process IDs + """ + cmd = 'pidof {}'.format(process_name) + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + raise RuntimeError(msg) + return str(output).split() + + def get_unit_process_ids(self, unit_processes): + """Construct a dict containing unit sentries, process names, and + process IDs.""" + pid_dict = {} + for sentry_unit, process_list in unit_processes.iteritems(): + pid_dict[sentry_unit] = {} + for process in process_list: + pids = self.get_process_id_list(sentry_unit, process) + pid_dict[sentry_unit].update({process: pids}) + return pid_dict + + def validate_unit_process_ids(self, expected, actual): + """Validate process id quantities for services on units.""" + self.log.debug('Checking units for running processes...') + self.log.debug('Expected PIDs: {}'.format(expected)) + self.log.debug('Actual PIDs: {}'.format(actual)) + + if len(actual) != len(expected): + msg = ('Unit count mismatch. expected, actual: {}, ' + '{} '.format(len(expected), len(actual))) + return msg + + for (e_sentry, e_proc_names) in expected.iteritems(): + e_sentry_name = e_sentry.info['unit_name'] + if e_sentry in actual.keys(): + a_proc_names = actual[e_sentry] + else: + msg = ('Expected sentry ({}) not found in actual dict data.' + '{}'.format(e_sentry_name, e_sentry)) + return msg + + if len(e_proc_names.keys()) != len(a_proc_names.keys()): + msg = ('Process name count mismatch. expected, actual: {}, ' + '{}'.format(len(expected), len(actual))) + return msg + + for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ + zip(e_proc_names.items(), a_proc_names.items()): + if e_proc_name != a_proc_name: + msg = ('Process name mismatch. expected, actual: {}, ' + '{}'.format(e_proc_name, a_proc_name)) + return msg + + a_pids_length = len(a_pids) + if e_pids_length != a_pids_length: + msg = ('PID count mismatch. {} ({}) expected, actual: {}, ' + '{} ({})'.format(e_sentry_name, + e_proc_name, + e_pids_length, + a_pids_length, + a_pids)) + return msg + else: + msg = ('PID check OK: {} {} {}: ' + '{}'.format(e_sentry_name, + e_proc_name, + e_pids_length, + a_pids)) + self.log.debug(msg) + return None From 6e1e4beb8890073774ed68fa10b8f2216ff6eaeb Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Sat, 20 Jun 2015 15:27:28 +0000 Subject: [PATCH 0717/2699] update tests for process checking on systemd systems; misc cleanup. --- ceph-osd/Makefile | 2 +- ceph-osd/tests/basic_deployment.py | 53 ++++++++++--- .../charmhelpers/contrib/amulet/utils.py | 78 +++++++++++++++++++ 3 files changed, 121 insertions(+), 12 deletions(-) diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index 28e56143..c6c967d0 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -6,7 +6,7 @@ lint: hooks tests unit_tests @charm proof -unit_test: +test: @# Bundletester expects unit tests here. @echo Starting unit tests... @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 010b3b1d..cb785957 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -166,15 +166,32 @@ def _initialize_tests(self): 'password', self.demo_tenant) - def test_100_services(self): + def test_100_ceph_processes(self): + """Verify that the expected service processes are running + on each ceph unit.""" + + # Process name and quantity of processes to expect on each unit + ceph_processes = { + 'ceph-mon': 1, + 'ceph-mon': 1, + 'ceph-osd': 2 + } + + # Units with process names and PID quantities expected + expected_processes = { + self.ceph0_sentry: ceph_processes, + self.ceph1_sentry: ceph_processes, + self.ceph2_sentry: ceph_processes, + self.ceph_osd_sentry: {'ceph-osd': 2} + } + + actual_pids = u.get_unit_process_ids(expected_processes) + ret = u.validate_unit_process_ids(expected_processes, actual_pids) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_102_services(self): """Verify the expected services are running on the service units.""" - ceph_services = [ - 'ceph-mon-all', - 'ceph-mon id=`hostname`', - 'ceph-osd-all', - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) - ] services = { self.mysql_sentry: ['mysql'], @@ -186,17 +203,31 @@ def test_100_services(self): self.cinder_sentry: ['cinder-api', 'cinder-scheduler', 'cinder-volume'], - self.ceph0_sentry: ceph_services, - self.ceph1_sentry: ceph_services, - self.ceph2_sentry: ceph_services } + if self._get_openstack_release() < self.vivid_kilo: + # For upstart systems only. Ceph services under systemd + # are checked by process name instead. + ceph_services = [ + 'ceph-mon-all', + 'ceph-mon id=`hostname`', + 'ceph-osd-all', + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) + ] + services[self.ceph0_sentry] = ceph_services + services[self.ceph1_sentry] = ceph_services + services[self.ceph2_sentry] = ceph_services + + #!? add check for ceph_osd_sentry upstart services + ret = u.validate_services_by_name(services) if ret: amulet.raise_status(amulet.FAIL, msg=ret) def test_200_ceph_osd_ceph_relation(self): """Verify the ceph-osd to ceph relation data.""" + u.log.debug('Checking ceph-osd:ceph mon relation data...') unit = self.ceph_osd_sentry relation = ['mon', 'ceph:osd'] expected = { diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index f04cee25..33e82176 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -445,3 +445,81 @@ def check_commands_on_units(self, commands, sentry_units): cmd, code, output)) return msg return None + + def get_process_id_list(self, sentry_unit, process_name): + """Get a list of process ID(s) from a single sentry juju unit + for a single process name. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param process_name: Process name + :returns: List of process IDs + """ + cmd = 'pidof {}'.format(process_name) + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + raise RuntimeError(msg) + return str(output).split() + + def get_unit_process_ids(self, unit_processes): + """Construct a dict containing unit sentries, process names, and + process IDs.""" + pid_dict = {} + for sentry_unit, process_list in unit_processes.iteritems(): + pid_dict[sentry_unit] = {} + for process in process_list: + pids = self.get_process_id_list(sentry_unit, process) + pid_dict[sentry_unit].update({process: pids}) + return pid_dict + + def validate_unit_process_ids(self, expected, actual): + """Validate process id quantities for services on units.""" + self.log.debug('Checking units for running processes...') + self.log.debug('Expected PIDs: {}'.format(expected)) + self.log.debug('Actual PIDs: {}'.format(actual)) + + if len(actual) != len(expected): + msg = ('Unit count mismatch. expected, actual: {}, ' + '{} '.format(len(expected), len(actual))) + return msg + + for (e_sentry, e_proc_names) in expected.iteritems(): + e_sentry_name = e_sentry.info['unit_name'] + if e_sentry in actual.keys(): + a_proc_names = actual[e_sentry] + else: + msg = ('Expected sentry ({}) not found in actual dict data.' + '{}'.format(e_sentry_name, e_sentry)) + return msg + + if len(e_proc_names.keys()) != len(a_proc_names.keys()): + msg = ('Process name count mismatch. expected, actual: {}, ' + '{}'.format(len(expected), len(actual))) + return msg + + for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ + zip(e_proc_names.items(), a_proc_names.items()): + if e_proc_name != a_proc_name: + msg = ('Process name mismatch. expected, actual: {}, ' + '{}'.format(e_proc_name, a_proc_name)) + return msg + + a_pids_length = len(a_pids) + if e_pids_length != a_pids_length: + msg = ('PID count mismatch. {} ({}) expected, actual: {}, ' + '{} ({})'.format(e_sentry_name, + e_proc_name, + e_pids_length, + a_pids_length, + a_pids)) + return msg + else: + msg = ('PID check OK: {} {} {}: ' + '{}'.format(e_sentry_name, + e_proc_name, + e_pids_length, + a_pids)) + self.log.debug(msg) + return None From 4cede9c4612403e147f5962b10cb520b9bbf6e91 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 22 Jun 2015 15:04:24 +0000 Subject: [PATCH 0718/2699] Refactor osd pool checks for flexibility and reusability in other ceph-related charms (radosgw). --- ceph-proxy/tests/basic_deployment.py | 45 +++++++++---------- .../charmhelpers/contrib/amulet/utils.py | 16 +++++++ .../contrib/openstack/amulet/deployment.py | 42 ++++++++++------- .../contrib/openstack/amulet/utils.py | 24 ++++++++++ 4 files changed, 88 insertions(+), 39 deletions(-) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 967bcee7..c31cad0e 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -406,8 +406,7 @@ def test_400_ceph_check_osd_pools(self): identical, and expect specific pools to be present.""" u.log.debug('Checking pools on ceph units...') - cmd = 'sudo ceph osd lspools' - pools = self.get_ceph_expected_pools() + expected_pools = self.get_ceph_expected_pools() results = [] sentries = [ self.ceph0_sentry, @@ -415,33 +414,31 @@ def test_400_ceph_check_osd_pools(self): self.ceph2_sentry ] + # Check for presence of expected pools on each unit + u.log.debug('Expected pools: {}'.format(expected_pools)) for sentry_unit in sentries: - output, code = sentry_unit.run(cmd) - results.append(output) - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - u.log.debug(msg) - if code != 0: - amulet.raise_status(amulet.FAIL, msg=msg) - - # Check for presence of all pools on this unit - for pool in pools: - if pool not in output: + pools = u.get_ceph_pools(sentry_unit) + results.append(pools) + + for expected_pool in expected_pools: + if expected_pool not in pools: msg = ('{} does not have pool: ' - '{}'.format(sentry_unit.info['unit_name'], pool)) + '{}'.format(sentry_unit.info['unit_name'], + expected_pool)) amulet.raise_status(amulet.FAIL, msg=msg) - u.log.debug('{} has the expected ' + u.log.debug('{} has (at least) the expected ' 'pools.'.format(sentry_unit.info['unit_name'])) - # Check that lspool produces the same output on all units - if len(set(results)) == 1: - u.log.debug('Pool list on all ceph units produced the ' - 'same results (OK).') - else: + # Check that all units returned the same pool name:id data + ret = u.validate_list_of_identical_dicts(results) + if ret: u.log.debug('Pool list results: {}'.format(results)) - msg = 'Pool list results are not identical on all ceph units.' + msg = ('{}; Pool list results are not identical on all ' + 'ceph units.'.format(ret)) amulet.raise_status(amulet.FAIL, msg=msg) + else: + u.log.debug('Pool list on all ceph units produced the ' + 'same results (OK).') def test_410_ceph_cinder_vol_create(self): """Create and confirm a ceph-backed cinder volume, and inspect @@ -450,7 +447,7 @@ def test_410_ceph_cinder_vol_create(self): sentry_unit = self.ceph0_sentry obj_count_samples = [] pool_size_samples = [] - pools = self.get_ceph_expected_pools() + pools = u.get_ceph_pools(self.ceph0_sentry) cinder_pool = pools['cinder'] # Check ceph cinder pool object count, disk space usage and pool name @@ -507,7 +504,7 @@ def test_412_ceph_glance_image_create_delete(self): sentry_unit = self.ceph0_sentry obj_count_samples = [] pool_size_samples = [] - pools = self.get_ceph_expected_pools() + pools = u.get_ceph_pools(self.ceph0_sentry) glance_pool = pools['glance'] # Check ceph glance pool object count, disk space usage and pool name diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index 33e82176..c5fa1edc 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -523,3 +523,19 @@ def validate_unit_process_ids(self, expected, actual): a_pids)) self.log.debug(msg) return None + + def validate_list_of_identical_dicts(self, list_of_dicts): + """Check that all dicts within a list are identical.""" + hashes = [] + for _dict in list_of_dicts: + hashes.append(hash(frozenset(_dict.items()))) + + self.log.debug('Hashes: {}'.format(hashes)) + if len(set(hashes)) == 1: + msg = 'Dicts within list are identical' + self.log.debug(msg) + else: + msg = 'Dicts within list are not identical' + return msg + + return None diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 73e025d7..84850bd3 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -149,22 +149,34 @@ def _get_openstack_release_string(self): else: return releases[self.series] - def get_ceph_expected_pools(self): - """Return a dict of expected ceph pools based on - Ubuntu-OpenStack release""" + def get_ceph_expected_pools(self, radosgw=False): + """Return a list of expected ceph pools based on Ubuntu-OpenStack + release and whether ceph radosgw is flagged as present or not.""" + if self._get_openstack_release() >= self.trusty_kilo: # Kilo or later - return { - 'rbd': 0, - 'cinder': 1, - 'glance': 2 - } + pools = [ + 'rbd', + 'cinder', + 'glance' + ] else: # Juno or earlier - return { - 'data': 0, - 'metadata': 1, - 'rbd': 2, - 'cinder': 3, - 'glance': 4 - } + pools = [ + 'data', + 'metadata', + 'rbd', + 'cinder', + 'glance' + ] + + if radosgw: + pools.extend([ + '.rgw.root', + '.rgw.control', + '.rgw', + '.rgw.gc', + '.users.uid' + ]) + + return pools diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index ddadd9fa..84bf84ce 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -445,6 +445,30 @@ def get_ceph_osd_id_cmd(self, index): " | grep -o '[0-9]*'`".format(index + 1)) return cmd + def get_ceph_pools(self, sentry_unit): + """Return a dict of ceph pools from a single ceph unit, with + pool name as keys, pool id as vals.""" + pools = {} + cmd = 'sudo ceph osd lspools' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + raise RuntimeError(msg) + + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, + for pool in str(output).split(','): + pool_id_name = pool.split(' ') + if len(pool_id_name) == 2: + pool_id = pool_id_name[0] + pool_name = pool_id_name[1] + pools[pool_name] = int(pool_id) + + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], + pools)) + return pools + def get_ceph_df(self, sentry_unit): """Return dict of ceph df json output, including ceph pool state. From 98c2422dc82a80ae6d84029fdaeb50b8392593ae Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 22 Jun 2015 15:04:24 +0000 Subject: [PATCH 0719/2699] Refactor osd pool checks for flexibility and reusability in other ceph-related charms (radosgw). --- ceph-mon/tests/basic_deployment.py | 45 +++++++++---------- .../charmhelpers/contrib/amulet/utils.py | 16 +++++++ .../contrib/openstack/amulet/deployment.py | 42 ++++++++++------- .../contrib/openstack/amulet/utils.py | 24 ++++++++++ 4 files changed, 88 insertions(+), 39 deletions(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 967bcee7..c31cad0e 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -406,8 +406,7 @@ def test_400_ceph_check_osd_pools(self): identical, and expect specific pools to be present.""" u.log.debug('Checking pools on ceph units...') - cmd = 'sudo ceph osd lspools' - pools = self.get_ceph_expected_pools() + expected_pools = self.get_ceph_expected_pools() results = [] sentries = [ self.ceph0_sentry, @@ -415,33 +414,31 @@ def test_400_ceph_check_osd_pools(self): self.ceph2_sentry ] + # Check for presence of expected pools on each unit + u.log.debug('Expected pools: {}'.format(expected_pools)) for sentry_unit in sentries: - output, code = sentry_unit.run(cmd) - results.append(output) - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - u.log.debug(msg) - if code != 0: - amulet.raise_status(amulet.FAIL, msg=msg) - - # Check for presence of all pools on this unit - for pool in pools: - if pool not in output: + pools = u.get_ceph_pools(sentry_unit) + results.append(pools) + + for expected_pool in expected_pools: + if expected_pool not in pools: msg = ('{} does not have pool: ' - '{}'.format(sentry_unit.info['unit_name'], pool)) + '{}'.format(sentry_unit.info['unit_name'], + expected_pool)) amulet.raise_status(amulet.FAIL, msg=msg) - u.log.debug('{} has the expected ' + u.log.debug('{} has (at least) the expected ' 'pools.'.format(sentry_unit.info['unit_name'])) - # Check that lspool produces the same output on all units - if len(set(results)) == 1: - u.log.debug('Pool list on all ceph units produced the ' - 'same results (OK).') - else: + # Check that all units returned the same pool name:id data + ret = u.validate_list_of_identical_dicts(results) + if ret: u.log.debug('Pool list results: {}'.format(results)) - msg = 'Pool list results are not identical on all ceph units.' + msg = ('{}; Pool list results are not identical on all ' + 'ceph units.'.format(ret)) amulet.raise_status(amulet.FAIL, msg=msg) + else: + u.log.debug('Pool list on all ceph units produced the ' + 'same results (OK).') def test_410_ceph_cinder_vol_create(self): """Create and confirm a ceph-backed cinder volume, and inspect @@ -450,7 +447,7 @@ def test_410_ceph_cinder_vol_create(self): sentry_unit = self.ceph0_sentry obj_count_samples = [] pool_size_samples = [] - pools = self.get_ceph_expected_pools() + pools = u.get_ceph_pools(self.ceph0_sentry) cinder_pool = pools['cinder'] # Check ceph cinder pool object count, disk space usage and pool name @@ -507,7 +504,7 @@ def test_412_ceph_glance_image_create_delete(self): sentry_unit = self.ceph0_sentry obj_count_samples = [] pool_size_samples = [] - pools = self.get_ceph_expected_pools() + pools = u.get_ceph_pools(self.ceph0_sentry) glance_pool = pools['glance'] # Check ceph glance pool object count, disk space usage and pool name diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index 33e82176..c5fa1edc 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -523,3 +523,19 @@ def validate_unit_process_ids(self, expected, actual): a_pids)) self.log.debug(msg) return None + + def validate_list_of_identical_dicts(self, list_of_dicts): + """Check that all dicts within a list are identical.""" + hashes = [] + for _dict in list_of_dicts: + hashes.append(hash(frozenset(_dict.items()))) + + self.log.debug('Hashes: {}'.format(hashes)) + if len(set(hashes)) == 1: + msg = 'Dicts within list are identical' + self.log.debug(msg) + else: + msg = 'Dicts within list are not identical' + return msg + + return None diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 73e025d7..84850bd3 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -149,22 +149,34 @@ def _get_openstack_release_string(self): else: return releases[self.series] - def get_ceph_expected_pools(self): - """Return a dict of expected ceph pools based on - Ubuntu-OpenStack release""" + def get_ceph_expected_pools(self, radosgw=False): + """Return a list of expected ceph pools based on Ubuntu-OpenStack + release and whether ceph radosgw is flagged as present or not.""" + if self._get_openstack_release() >= self.trusty_kilo: # Kilo or later - return { - 'rbd': 0, - 'cinder': 1, - 'glance': 2 - } + pools = [ + 'rbd', + 'cinder', + 'glance' + ] else: # Juno or earlier - return { - 'data': 0, - 'metadata': 1, - 'rbd': 2, - 'cinder': 3, - 'glance': 4 - } + pools = [ + 'data', + 'metadata', + 'rbd', + 'cinder', + 'glance' + ] + + if radosgw: + pools.extend([ + '.rgw.root', + '.rgw.control', + '.rgw', + '.rgw.gc', + '.users.uid' + ]) + + return pools diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index ddadd9fa..84bf84ce 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -445,6 +445,30 @@ def get_ceph_osd_id_cmd(self, index): " | grep -o '[0-9]*'`".format(index + 1)) return cmd + def get_ceph_pools(self, sentry_unit): + """Return a dict of ceph pools from a single ceph unit, with + pool name as keys, pool id as vals.""" + pools = {} + cmd = 'sudo ceph osd lspools' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + raise RuntimeError(msg) + + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, + for pool in str(output).split(','): + pool_id_name = pool.split(' ') + if len(pool_id_name) == 2: + pool_id = pool_id_name[0] + pool_name = pool_id_name[1] + pools[pool_name] = int(pool_id) + + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], + pools)) + return pools + def get_ceph_df(self, sentry_unit): """Return dict of ceph df json output, including ceph pool state. From af9bda23f5e0122a4f91c1c4aea48e5089456895 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 22 Jun 2015 16:11:44 +0000 Subject: [PATCH 0720/2699] Refactor osd pool checks for flexibility and reusability in other ceph-related charms (radosgw). --- ceph-osd/tests/basic_deployment.py | 45 +++++++++---------- .../charmhelpers/contrib/amulet/utils.py | 16 +++++++ .../contrib/openstack/amulet/deployment.py | 42 ++++++++++------- .../contrib/openstack/amulet/utils.py | 24 ++++++++++ 4 files changed, 88 insertions(+), 39 deletions(-) diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index cb785957..0e485f33 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -397,8 +397,7 @@ def test_400_ceph_check_osd_pools(self): identical, and expect specific pools to be present.""" u.log.debug('Checking pools on ceph units...') - cmd = 'sudo ceph osd lspools' - pools = self.get_ceph_expected_pools() + expected_pools = self.get_ceph_expected_pools() results = [] sentries = [ self.ceph_osd_sentry, @@ -407,33 +406,31 @@ def test_400_ceph_check_osd_pools(self): self.ceph2_sentry ] + # Check for presence of expected pools on each unit + u.log.debug('Expected pools: {}'.format(expected_pools)) for sentry_unit in sentries: - output, code = sentry_unit.run(cmd) - results.append(output) - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - u.log.debug(msg) - if code != 0: - amulet.raise_status(amulet.FAIL, msg=msg) - - # Check for presence of all pools on this unit - for pool in pools: - if pool not in output: + pools = u.get_ceph_pools(sentry_unit) + results.append(pools) + + for expected_pool in expected_pools: + if expected_pool not in pools: msg = ('{} does not have pool: ' - '{}'.format(sentry_unit.info['unit_name'], pool)) + '{}'.format(sentry_unit.info['unit_name'], + expected_pool)) amulet.raise_status(amulet.FAIL, msg=msg) - u.log.debug('{} has the expected ' + u.log.debug('{} has (at least) the expected ' 'pools.'.format(sentry_unit.info['unit_name'])) - # Check that lspool produces the same output on all units - if len(set(results)) == 1: - u.log.debug('Pool list on all ceph units produced the ' - 'same results (OK).') - else: + # Check that all units returned the same pool name:id data + ret = u.validate_list_of_identical_dicts(results) + if ret: u.log.debug('Pool list results: {}'.format(results)) - msg = 'Pool list results are not identical on all ceph units.' + msg = ('{}; Pool list results are not identical on all ' + 'ceph units.'.format(ret)) amulet.raise_status(amulet.FAIL, msg=msg) + else: + u.log.debug('Pool list on all ceph units produced the ' + 'same results (OK).') def test_410_ceph_cinder_vol_create(self): """Create and confirm a ceph-backed cinder volume, and inspect @@ -442,7 +439,7 @@ def test_410_ceph_cinder_vol_create(self): sentry_unit = self.ceph0_sentry obj_count_samples = [] pool_size_samples = [] - pools = self.get_ceph_expected_pools() + pools = u.get_ceph_pools(self.ceph0_sentry) cinder_pool = pools['cinder'] # Check ceph cinder pool object count, disk space usage and pool name @@ -499,7 +496,7 @@ def test_412_ceph_glance_image_create_delete(self): sentry_unit = self.ceph0_sentry obj_count_samples = [] pool_size_samples = [] - pools = self.get_ceph_expected_pools() + pools = u.get_ceph_pools(self.ceph0_sentry) glance_pool = pools['glance'] # Check ceph glance pool object count, disk space usage and pool name diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index 33e82176..c5fa1edc 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -523,3 +523,19 @@ def validate_unit_process_ids(self, expected, actual): a_pids)) self.log.debug(msg) return None + + def validate_list_of_identical_dicts(self, list_of_dicts): + """Check that all dicts within a list are identical.""" + hashes = [] + for _dict in list_of_dicts: + hashes.append(hash(frozenset(_dict.items()))) + + self.log.debug('Hashes: {}'.format(hashes)) + if len(set(hashes)) == 1: + msg = 'Dicts within list are identical' + self.log.debug(msg) + else: + msg = 'Dicts within list are not identical' + return msg + + return None diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 73e025d7..84850bd3 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -149,22 +149,34 @@ def _get_openstack_release_string(self): else: return releases[self.series] - def get_ceph_expected_pools(self): - """Return a dict of expected ceph pools based on - Ubuntu-OpenStack release""" + def get_ceph_expected_pools(self, radosgw=False): + """Return a list of expected ceph pools based on Ubuntu-OpenStack + release and whether ceph radosgw is flagged as present or not.""" + if self._get_openstack_release() >= self.trusty_kilo: # Kilo or later - return { - 'rbd': 0, - 'cinder': 1, - 'glance': 2 - } + pools = [ + 'rbd', + 'cinder', + 'glance' + ] else: # Juno or earlier - return { - 'data': 0, - 'metadata': 1, - 'rbd': 2, - 'cinder': 3, - 'glance': 4 - } + pools = [ + 'data', + 'metadata', + 'rbd', + 'cinder', + 'glance' + ] + + if radosgw: + pools.extend([ + '.rgw.root', + '.rgw.control', + '.rgw', + '.rgw.gc', + '.users.uid' + ]) + + return pools diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index ddadd9fa..84bf84ce 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -445,6 +445,30 @@ def get_ceph_osd_id_cmd(self, index): " | grep -o '[0-9]*'`".format(index + 1)) return cmd + def get_ceph_pools(self, sentry_unit): + """Return a dict of ceph pools from a single ceph unit, with + pool name as keys, pool id as vals.""" + pools = {} + cmd = 'sudo ceph osd lspools' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + raise RuntimeError(msg) + + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, + for pool in str(output).split(','): + pool_id_name = pool.split(' ') + if len(pool_id_name) == 2: + pool_id = pool_id_name[0] + pool_name = pool_id_name[1] + pools[pool_name] = int(pool_id) + + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], + pools)) + return pools + def get_ceph_df(self, sentry_unit): """Return dict of ceph df json output, including ceph pool state. From e8039c5f42c185fb6e48c418cb7d74318dda4c45 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 22 Jun 2015 17:36:31 +0000 Subject: [PATCH 0721/2699] amulet tests - update test coverage, enable vivid, prep for wily add basic functional checks sync tests/charmhelpers --- ceph-radosgw/Makefile | 12 +- ceph-radosgw/metadata.yaml | 2 +- ceph-radosgw/tests/00-setup | 5 +- ceph-radosgw/tests/017-basic-trusty-kilo | 0 ceph-radosgw/tests/019-basic-vivid-kilo | 0 ceph-radosgw/tests/basic_deployment.py | 290 +++++++++++++++--- .../charmhelpers/contrib/amulet/utils.py | 238 +++++++++++++- .../contrib/openstack/amulet/deployment.py | 40 ++- .../contrib/openstack/amulet/utils.py | 251 ++++++++++++++- 9 files changed, 769 insertions(+), 69 deletions(-) mode change 100644 => 100755 ceph-radosgw/tests/017-basic-trusty-kilo mode change 100644 => 100755 ceph-radosgw/tests/019-basic-vivid-kilo diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index eab0e2ea..ef348086 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -2,17 +2,17 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers hooks tests unit_tests + @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \ + hooks tests unit_tests @charm proof -unit_test: +test: + @# Bundletester expects unit tests here. + @echo Starting unit tests... @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests -test: +functional_test: @echo Starting Amulet tests... - # coreycb note: The -v should only be temporary until Amulet sends - # raise_status() messages to stderr: - # https://bugs.launchpad.net/amulet/+bug/1320357 @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 bin/charm_helpers_sync.py: diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 07ae7e70..a798ddef 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -7,7 +7,7 @@ description: | . This charm provides the RADOS HTTP gateway supporting S3 and Swift protocols for object storage. -categories: +tags: - misc requires: mon: diff --git a/ceph-radosgw/tests/00-setup b/ceph-radosgw/tests/00-setup index 1243ec43..d6a3c75a 100755 --- a/ceph-radosgw/tests/00-setup +++ b/ceph-radosgw/tests/00-setup @@ -5,6 +5,9 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes sudo apt-get install --yes python-amulet \ + python-cinderclient \ + python-distro-info \ python-keystoneclient \ python-glanceclient \ - python-novaclient + python-novaclient \ + python-swiftclient diff --git a/ceph-radosgw/tests/017-basic-trusty-kilo b/ceph-radosgw/tests/017-basic-trusty-kilo old mode 100644 new mode 100755 diff --git a/ceph-radosgw/tests/019-basic-vivid-kilo b/ceph-radosgw/tests/019-basic-vivid-kilo old mode 100644 new mode 100755 diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index dd0d0ef1..c09125a8 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -1,13 +1,14 @@ #!/usr/bin/python import amulet +import time from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment ) -from charmhelpers.contrib.openstack.amulet.utils import ( # noqa +from charmhelpers.contrib.openstack.amulet.utils import ( OpenStackAmuletUtils, DEBUG, - ERROR + #ERROR ) # Use DEBUG to turn on debug logging @@ -35,9 +36,12 @@ def _add_services(self): compatible with the local charm (e.g. stable or next). """ this_service = {'name': 'ceph-radosgw'} - other_services = [{'name': 'ceph', 'units': 3}, {'name': 'mysql'}, - {'name': 'keystone'}, {'name': 'rabbitmq-server'}, - {'name': 'nova-compute'}, {'name': 'glance'}, + other_services = [{'name': 'ceph', 'units': 3}, + {'name': 'mysql'}, + {'name': 'keystone'}, + {'name': 'rabbitmq-server'}, + {'name': 'nova-compute'}, + {'name': 'glance'}, {'name': 'cinder'}] super(CephRadosGwBasicDeployment, self)._add_services(this_service, other_services) @@ -92,13 +96,20 @@ def _initialize_tests(self): self.mysql_sentry = self.d.sentry.unit['mysql/0'] self.keystone_sentry = self.d.sentry.unit['keystone/0'] self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] - self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0'] + self.nova_sentry = self.d.sentry.unit['nova-compute/0'] self.glance_sentry = self.d.sentry.unit['glance/0'] self.cinder_sentry = self.d.sentry.unit['cinder/0'] self.ceph0_sentry = self.d.sentry.unit['ceph/0'] self.ceph1_sentry = self.d.sentry.unit['ceph/1'] self.ceph2_sentry = self.d.sentry.unit['ceph/2'] self.ceph_radosgw_sentry = self.d.sentry.unit['ceph-radosgw/0'] + u.log.debug('openstack release val: {}'.format( + self._get_openstack_release())) + u.log.debug('openstack release str: {}'.format( + self._get_openstack_release_string())) + + # Let things settle a bit original moving forward + time.sleep(30) # Authenticate admin with keystone self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, @@ -135,39 +146,76 @@ def _initialize_tests(self): 'password', self.demo_tenant) - def _ceph_osd_id(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa + # Authenticate radosgw user using swift api + ks_obj_rel = self.keystone_sentry.relation('identity-service', + 'ceph-radosgw:identity-service') + self.swift = u.authenticate_swift_user(self.keystone, + user=ks_obj_rel['service_username'], + password=ks_obj_rel['service_password'], + tenant=ks_obj_rel['service_tenant']) + + def test_100_ceph_processes(self): + """Verify that the expected service processes are running + on each ceph unit.""" + + # Process name and quantity of processes to expect on each unit + ceph_processes = { + 'ceph-mon': 1, + 'ceph-mon': 1, + 'ceph-osd': 2 + } + + # Units with process names and PID quantities expected + expected_processes = { + self.ceph_radosgw_sentry: {'radosgw': 1}, + self.ceph0_sentry: ceph_processes, + self.ceph1_sentry: ceph_processes, + self.ceph2_sentry: ceph_processes + } + + actual_pids = u.get_unit_process_ids(expected_processes) + ret = u.validate_unit_process_ids(expected_processes, actual_pids) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) - def test_services(self): + def test_102_services(self): """Verify the expected services are running on the service units.""" - ceph_services = ['status ceph-mon-all', - 'status ceph-mon id=`hostname`'] - commands = { - self.mysql_sentry: ['status mysql'], - self.rabbitmq_sentry: ['sudo service rabbitmq-server status'], - self.nova_compute_sentry: ['status nova-compute'], - self.keystone_sentry: ['status keystone'], - self.glance_sentry: ['status glance-registry', - 'status glance-api'], - self.cinder_sentry: ['status cinder-api', - 'status cinder-scheduler', - 'status cinder-volume'], - self.ceph_radosgw_sentry: ['status radosgw-all'] + + services = { + self.mysql_sentry: ['mysql'], + self.rabbitmq_sentry: ['rabbitmq-server'], + self.nova_sentry: ['nova-compute'], + self.keystone_sentry: ['keystone'], + self.glance_sentry: ['glance-registry', + 'glance-api'], + self.cinder_sentry: ['cinder-api', + 'cinder-scheduler', + 'cinder-volume'], } - ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0)) - ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1)) - ceph_services.extend([ceph_osd0, ceph_osd1, 'status ceph-osd-all']) - commands[self.ceph0_sentry] = ceph_services - commands[self.ceph1_sentry] = ceph_services - commands[self.ceph2_sentry] = ceph_services - - ret = u.validate_services(commands) + + if self._get_openstack_release() < self.vivid_kilo: + # For upstart systems only. Ceph services under systemd + # are checked by process name instead. + ceph_services = [ + 'ceph-mon-all', + 'ceph-mon id=`hostname`', + 'ceph-osd-all', + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) + ] + services[self.ceph0_sentry] = ceph_services + services[self.ceph1_sentry] = ceph_services + services[self.ceph2_sentry] = ceph_services + services[self.ceph_radosgw_sentry] = ['radosgw-all'] + + ret = u.validate_services_by_name(services) if ret: amulet.raise_status(amulet.FAIL, msg=ret) - def test_ceph_radosgw_ceph_relation(self): + def test_200_ceph_radosgw_ceph_relation(self): """Verify the ceph-radosgw to ceph relation data.""" + u.log.debug('Checking ceph-radosgw:mon to ceph:radosgw ' + 'relation data...') unit = self.ceph_radosgw_sentry relation = ['mon', 'ceph:radosgw'] expected = { @@ -179,8 +227,9 @@ def test_ceph_radosgw_ceph_relation(self): message = u.relation_error('ceph-radosgw to ceph', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_ceph0_ceph_radosgw_relation(self): + def test_201_ceph0_ceph_radosgw_relation(self): """Verify the ceph0 to ceph-radosgw relation data.""" + u.log.debug('Checking ceph0:radosgw radosgw:mon relation data...') unit = self.ceph0_sentry relation = ['radosgw', 'ceph-radosgw:mon'] expected = { @@ -196,8 +245,9 @@ def test_ceph0_ceph_radosgw_relation(self): message = u.relation_error('ceph0 to ceph-radosgw', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_ceph1_ceph_radosgw_relation(self): + def test_202_ceph1_ceph_radosgw_relation(self): """Verify the ceph1 to ceph-radosgw relation data.""" + u.log.debug('Checking ceph1:radosgw ceph-radosgw:mon relation data...') unit = self.ceph1_sentry relation = ['radosgw', 'ceph-radosgw:mon'] expected = { @@ -213,8 +263,9 @@ def test_ceph1_ceph_radosgw_relation(self): message = u.relation_error('ceph1 to ceph-radosgw', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_ceph2_ceph_radosgw_relation(self): + def test_203_ceph2_ceph_radosgw_relation(self): """Verify the ceph2 to ceph-radosgw relation data.""" + u.log.debug('Checking ceph2:radosgw ceph-radosgw:mon relation data...') unit = self.ceph2_sentry relation = ['radosgw', 'ceph-radosgw:mon'] expected = { @@ -230,8 +281,10 @@ def test_ceph2_ceph_radosgw_relation(self): message = u.relation_error('ceph2 to ceph-radosgw', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_ceph_radosgw_keystone_relation(self): + def test_204_ceph_radosgw_keystone_relation(self): """Verify the ceph-radosgw to keystone relation data.""" + u.log.debug('Checking ceph-radosgw to keystone id service ' + 'relation data...') unit = self.ceph_radosgw_sentry relation = ['identity-service', 'keystone:identity-service'] expected = { @@ -249,8 +302,10 @@ def test_ceph_radosgw_keystone_relation(self): message = u.relation_error('ceph-radosgw to keystone', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_keystone_ceph_radosgw_relation(self): + def test_205_keystone_ceph_radosgw_relation(self): """Verify the keystone to ceph-radosgw relation data.""" + u.log.debug('Checking keystone to ceph-radosgw id service ' + 'relation data...') unit = self.keystone_sentry relation = ['identity-service', 'ceph-radosgw:identity-service'] expected = { @@ -273,8 +328,9 @@ def test_keystone_ceph_radosgw_relation(self): message = u.relation_error('keystone to ceph-radosgw', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_ceph_config(self): + def test_300_ceph_radosgw_config(self): """Verify the data in the ceph config file.""" + u.log.debug('Checking ceph config file data...') unit = self.ceph_radosgw_sentry conf = '/etc/ceph/ceph.conf' keystone_sentry = self.keystone_sentry @@ -309,11 +365,153 @@ def test_ceph_config(self): message = "ceph config error: {}".format(ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_restart_on_config_change(self): - """Verify the specified services are restarted on config change.""" - # NOTE(coreycb): Test not implemented but should it be? ceph-radosgw - # svcs aren't restarted by charm after config change - # Should they be restarted? - if self._get_openstack_release() >= self.precise_essex: - u.log.error("Test not implemented") - return + def test_302_cinder_rbd_config(self): + """Verify the cinder config file data regarding ceph.""" + u.log.debug('Checking cinder (rbd) config file data...') + unit = self.cinder_sentry + conf = '/etc/cinder/cinder.conf' + expected = { + 'DEFAULT': { + 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' + } + } + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "cinder (rbd) config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_304_glance_rbd_config(self): + """Verify the glance config file data regarding ceph.""" + u.log.debug('Checking glance (rbd) config file data...') + unit = self.glance_sentry + conf = '/etc/glance/glance-api.conf' + config = { + 'default_store': 'rbd', + 'rbd_store_ceph_conf': '/etc/ceph/ceph.conf', + 'rbd_store_user': 'glance', + 'rbd_store_pool': 'glance', + 'rbd_store_chunk_size': '8' + } + + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + config['stores'] = ('glance.store.filesystem.Store,' + 'glance.store.http.Store,' + 'glance.store.rbd.Store') + section = 'glance_store' + else: + # Juno or earlier + section = 'DEFAULT' + + expected = {section: config} + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "glance (rbd) config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_306_nova_rbd_config(self): + """Verify the nova config file data regarding ceph.""" + u.log.debug('Checking nova (rbd) config file data...') + unit = self.nova_sentry + conf = '/etc/nova/nova.conf' + expected = { + 'libvirt': { + 'rbd_pool': 'nova', + 'rbd_user': 'nova-compute', + 'rbd_secret_uuid': u.not_null + } + } + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "nova (rbd) config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_400_ceph_check_osd_pools(self): + """Check osd pools on all ceph units, expect them to be + identical, and expect specific pools to be present.""" + u.log.debug('Checking pools on ceph units...') + + expected_pools = self.get_ceph_expected_pools(radosgw=True) + results = [] + sentries = [ + self.ceph_radosgw_sentry, + self.ceph0_sentry, + self.ceph1_sentry, + self.ceph2_sentry + ] + + # Check for presence of expected pools on each unit + u.log.debug('Expected pools: {}'.format(expected_pools)) + for sentry_unit in sentries: + pools = u.get_ceph_pools(sentry_unit) + results.append(pools) + + for expected_pool in expected_pools: + if expected_pool not in pools: + msg = ('{} does not have pool: ' + '{}'.format(sentry_unit.info['unit_name'], + expected_pool)) + amulet.raise_status(amulet.FAIL, msg=msg) + u.log.debug('{} has (at least) the expected ' + 'pools.'.format(sentry_unit.info['unit_name'])) + + # Check that all units returned the same pool name:id data + ret = u.validate_list_of_identical_dicts(results) + if ret: + u.log.debug('Pool list results: {}'.format(results)) + msg = ('{}; Pool list results are not identical on all ' + 'ceph units.'.format(ret)) + amulet.raise_status(amulet.FAIL, msg=msg) + else: + u.log.debug('Pool list on all ceph units produced the ' + 'same results (OK).') + + def test_402_swift_api_connection(self): + """Simple api call to confirm basic service functionality""" + u.log.debug('Checking basic radosgw functionality via swift api...') + headers, containers = self.swift.get_account() + assert('content-type' in headers.keys()) + assert(containers == []) + + def test_498_radosgw_cmds_exit_zero(self): + """Check basic functionality of radosgw cli commands against + the ceph_radosgw unit.""" + sentry_units = [self.ceph_radosgw_sentry] + commands = [ + 'sudo radosgw-admin regions list', + 'sudo radosgw-admin bucket list', + 'sudo radosgw-admin zone list', + 'sudo radosgw-admin metadata list', + 'sudo radosgw-admin gc list' + ] + ret = u.check_commands_on_units(commands, sentry_units) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_499_ceph_cmds_exit_zero(self): + """Check basic functionality of ceph cli commands against + all ceph units.""" + sentry_units = [ + self.ceph_radosgw_sentry, + self.ceph0_sentry, + self.ceph1_sentry, + self.ceph2_sentry + ] + commands = [ + 'sudo ceph health', + 'sudo ceph mds stat', + 'sudo ceph pg stat', + 'sudo ceph osd stat', + 'sudo ceph mon stat', + ] + ret = u.check_commands_on_units(commands, sentry_units) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + # Note(beisner): need to add basic object store functional checks. + + # FYI: No restart check as ceph services do not restart + # when charm config changes, unless monitor count increases. diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index f61c2e8b..c5fa1edc 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -15,13 +15,15 @@ # along with charm-helpers. If not, see . import ConfigParser +import distro_info import io import logging +import os import re +import six import sys import time - -import six +import urlparse class AmuletUtils(object): @@ -33,6 +35,7 @@ class AmuletUtils(object): def __init__(self, log_level=logging.ERROR): self.log = self.get_logger(level=log_level) + self.ubuntu_releases = self.get_ubuntu_releases() def get_logger(self, name="amulet-logger", level=logging.DEBUG): """Get a logger object that will log to stdout.""" @@ -70,12 +73,44 @@ def valid_url(self, url): else: return False - def validate_services(self, commands): - """Validate services. + def get_ubuntu_release_from_sentry(self, sentry_unit): + """Get Ubuntu release codename from sentry unit. + + :param sentry_unit: amulet sentry/service unit pointer + :returns: list of strings - release codename, failure message + """ + msg = None + cmd = 'lsb_release -cs' + release, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} lsb_release: {}'.format( + sentry_unit.info['unit_name'], release)) + else: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, release, code)) + if release not in self.ubuntu_releases: + msg = ("Release ({}) not found in Ubuntu releases " + "({})".format(release, self.ubuntu_releases)) + return release, msg - Verify the specified services are running on the corresponding + def validate_services(self, commands): + """Validate that lists of commands succeed on service units. Can be + used to verify system services are running on the corresponding service units. - """ + + :param commands: dict with sentry keys and arbitrary command list vals + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # /!\ DEPRECATION WARNING (beisner): + # New and existing tests should be rewritten to use + # validate_services_by_name() as it is aware of init systems. + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'validate_services_by_name instead of validate_services ' + 'due to init system differences.') + for k, v in six.iteritems(commands): for cmd in v: output, code = k.run(cmd) @@ -86,6 +121,41 @@ def validate_services(self, commands): return "command `{}` returned {}".format(cmd, str(code)) return None + def validate_services_by_name(self, sentry_services): + """Validate system service status by service name, automatically + detecting init system based on Ubuntu release codename. + + :param sentry_services: dict with sentry keys and svc list values + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # Point at which systemd became a thing + systemd_switch = self.ubuntu_releases.index('vivid') + + for sentry_unit, services_list in six.iteritems(sentry_services): + # Get lsb_release codename from unit + release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) + if ret: + return ret + + for service_name in services_list: + if (self.ubuntu_releases.index(release) >= systemd_switch or + service_name == "rabbitmq-server"): + # init is systemd + cmd = 'sudo service {} status'.format(service_name) + elif self.ubuntu_releases.index(release) < systemd_switch: + # init is upstart + cmd = 'sudo status {}'.format(service_name) + + output, code = sentry_unit.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code)) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + def _get_config(self, unit, filename): """Get a ConfigParser object for parsing a unit's config file.""" file_contents = unit.file_contents(filename) @@ -104,6 +174,9 @@ def validate_config_data(self, sentry_unit, config_file, section, Verify that the specified section of the config file contains the expected option key:value pairs. """ + self.log.debug('Validating config file data ({} in {} on {})' + '...'.format(section, config_file, + sentry_unit.info['unit_name'])) config = self._get_config(sentry_unit, config_file) if section != 'DEFAULT' and not config.has_section(section): @@ -112,10 +185,23 @@ def validate_config_data(self, sentry_unit, config_file, section, for k in expected.keys(): if not config.has_option(section, k): return "section [{}] is missing option {}".format(section, k) - if config.get(section, k) != expected[k]: - return "section [{}] {}:{} != expected {}:{}".format( - section, k, config.get(section, k), k, expected[k]) - return None + + actual = config.get(section, k) + v = expected[k] + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if actual != v: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + else: + # handle not_null, valid_ip boolean comparison methods, etc. + if v(actual): + return None + else: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) def _validate_dict_data(self, expected, actual): """Validate dictionary data. @@ -321,3 +407,135 @@ def relation_error(self, name, data): def endpoint_error(self, name, data): return 'unexpected endpoint data in {} - {}'.format(name, data) + + def get_ubuntu_releases(self): + """Return a list of all Ubuntu releases in order of release.""" + _d = distro_info.UbuntuDistroInfo() + _release_list = _d.all + self.log.debug('Ubuntu release list: {}'.format(_release_list)) + return _release_list + + def file_to_url(self, file_rel_path): + """Convert a relative file path to a file URL.""" + _abs_path = os.path.abspath(file_rel_path) + return urlparse.urlparse(_abs_path, scheme='file').geturl() + + def check_commands_on_units(self, commands, sentry_units): + """Check that all commands in a list exit zero on all + sentry units in a list. + + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + self.log.debug('Checking exit codes for {} commands on {} ' + 'sentry units...'.format(len(commands), + len(sentry_units))) + for sentry_unit in sentry_units: + for cmd in commands: + output, code = sentry_unit.run(cmd) + if code == 0: + msg = ('{} `{}` returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + self.log.debug(msg) + else: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + return msg + return None + + def get_process_id_list(self, sentry_unit, process_name): + """Get a list of process ID(s) from a single sentry juju unit + for a single process name. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param process_name: Process name + :returns: List of process IDs + """ + cmd = 'pidof {}'.format(process_name) + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + raise RuntimeError(msg) + return str(output).split() + + def get_unit_process_ids(self, unit_processes): + """Construct a dict containing unit sentries, process names, and + process IDs.""" + pid_dict = {} + for sentry_unit, process_list in unit_processes.iteritems(): + pid_dict[sentry_unit] = {} + for process in process_list: + pids = self.get_process_id_list(sentry_unit, process) + pid_dict[sentry_unit].update({process: pids}) + return pid_dict + + def validate_unit_process_ids(self, expected, actual): + """Validate process id quantities for services on units.""" + self.log.debug('Checking units for running processes...') + self.log.debug('Expected PIDs: {}'.format(expected)) + self.log.debug('Actual PIDs: {}'.format(actual)) + + if len(actual) != len(expected): + msg = ('Unit count mismatch. expected, actual: {}, ' + '{} '.format(len(expected), len(actual))) + return msg + + for (e_sentry, e_proc_names) in expected.iteritems(): + e_sentry_name = e_sentry.info['unit_name'] + if e_sentry in actual.keys(): + a_proc_names = actual[e_sentry] + else: + msg = ('Expected sentry ({}) not found in actual dict data.' + '{}'.format(e_sentry_name, e_sentry)) + return msg + + if len(e_proc_names.keys()) != len(a_proc_names.keys()): + msg = ('Process name count mismatch. expected, actual: {}, ' + '{}'.format(len(expected), len(actual))) + return msg + + for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ + zip(e_proc_names.items(), a_proc_names.items()): + if e_proc_name != a_proc_name: + msg = ('Process name mismatch. expected, actual: {}, ' + '{}'.format(e_proc_name, a_proc_name)) + return msg + + a_pids_length = len(a_pids) + if e_pids_length != a_pids_length: + msg = ('PID count mismatch. {} ({}) expected, actual: {}, ' + '{} ({})'.format(e_sentry_name, + e_proc_name, + e_pids_length, + a_pids_length, + a_pids)) + return msg + else: + msg = ('PID check OK: {} {} {}: ' + '{}'.format(e_sentry_name, + e_proc_name, + e_pids_length, + a_pids)) + self.log.debug(msg) + return None + + def validate_list_of_identical_dicts(self, list_of_dicts): + """Check that all dicts within a list are identical.""" + hashes = [] + for _dict in list_of_dicts: + hashes.append(hash(frozenset(_dict.items()))) + + self.log.debug('Hashes: {}'.format(hashes)) + if len(set(hashes)) == 1: + msg = 'Dicts within list are identical' + self.log.debug(msg) + else: + msg = 'Dicts within list are not identical' + return msg + + return None diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 461a702f..84850bd3 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -110,7 +110,8 @@ def _get_openstack_release(self): (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, self.trusty_icehouse, self.trusty_juno, self.utopic_juno, - self.trusty_kilo, self.vivid_kilo) = range(10) + self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, + self.wily_liberty) = range(12) releases = { ('precise', None): self.precise_essex, @@ -121,8 +122,10 @@ def _get_openstack_release(self): ('trusty', None): self.trusty_icehouse, ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo} + ('vivid', None): self.vivid_kilo, + ('wily', None): self.wily_liberty} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -138,9 +141,42 @@ def _get_openstack_release_string(self): ('trusty', 'icehouse'), ('utopic', 'juno'), ('vivid', 'kilo'), + ('wily', 'liberty'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] return os_origin.split('%s-' % self.series)[1].split('/')[0] else: return releases[self.series] + + def get_ceph_expected_pools(self, radosgw=False): + """Return a list of expected ceph pools based on Ubuntu-OpenStack + release and whether ceph radosgw is flagged as present or not.""" + + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + pools = [ + 'rbd', + 'cinder', + 'glance' + ] + else: + # Juno or earlier + pools = [ + 'data', + 'metadata', + 'rbd', + 'cinder', + 'glance' + ] + + if radosgw: + pools.extend([ + '.rgw.root', + '.rgw.control', + '.rgw', + '.rgw.gc', + '.users.uid' + ]) + + return pools diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 9c3d918a..bcdb4b3c 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -14,16 +14,19 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import json import logging import os +import six import time import urllib +import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client +import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client - -import six +import swiftclient from charmhelpers.contrib.amulet.utils import ( AmuletUtils @@ -37,7 +40,7 @@ class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charms. + that is specifically for use by OpenStack charm tests. """ def __init__(self, log_level=ERROR): @@ -51,6 +54,8 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, Validate actual endpoint data vs expected endpoint data. The ports are used to find the matching endpoint. """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) found = False for ep in endpoints: self.log.debug('endpoint: {}'.format(repr(ep))) @@ -77,6 +82,7 @@ def validate_svc_catalog_endpoint_data(self, expected, actual): Validate a list of actual service catalog endpoints vs a list of expected service catalog endpoints. """ + self.log.debug('Validating service catalog endpoint data...') self.log.debug('actual: {}'.format(repr(actual))) for k, v in six.iteritems(expected): if k in actual: @@ -93,6 +99,7 @@ def validate_tenant_data(self, expected, actual): Validate a list of actual tenant data vs list of expected tenant data. """ + self.log.debug('Validating tenant data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -114,6 +121,7 @@ def validate_role_data(self, expected, actual): Validate a list of actual role data vs a list of expected role data. """ + self.log.debug('Validating role data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -134,6 +142,7 @@ def validate_user_data(self, expected, actual): Validate a list of actual user data vs a list of expected user data. """ + self.log.debug('Validating user data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -155,17 +164,29 @@ def validate_flavor_data(self, expected, actual): Validate a list of actual flavors vs a list of expected flavors. """ + self.log.debug('Validating flavor data...') self.log.debug('actual: {}'.format(repr(actual))) act = [a.name for a in actual] return self._validate_list_data(expected, act) def tenant_exists(self, keystone, tenant): """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + def authenticate_cinder_admin(self, keystone_sentry, username, + password, tenant): + """Authenticates admin user with cinder.""" + service_ip = \ + keystone_sentry.relation('shared-db', + 'mysql:shared-db')['private-address'] + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) + return cinder_client.Client(username, password, tenant, ept) + def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant): """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') unit = keystone_sentry service_ip = unit.relation('shared-db', 'mysql:shared-db')['private-address'] @@ -175,6 +196,7 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') return keystone_client.Client(username=user, password=password, @@ -182,19 +204,40 @@ def authenticate_keystone_user(self, keystone, user, password, tenant): def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', endpoint_type='adminURL') return glance_client.Client(ep, token=keystone.auth_token) + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + endpoint_type='publicURL') + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + def authenticate_nova_user(self, keystone, user, password, tenant): """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') return nova_client.Client(username=user, api_key=password, project_id=tenant, auth_url=ep) + def authenticate_swift_user(self, keystone, user, password, tenant): + """Authenticates a regular user with swift api.""" + self.log.debug('Authenticating swift user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') + def create_cirros_image(self, glance, image_name): """Download the latest cirros image and upload it to glance.""" + self.log.debug('Creating glance image ({})...'.format(image_name)) http_proxy = os.getenv('AMULET_HTTP_PROXY') self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: @@ -235,6 +278,11 @@ def create_cirros_image(self, glance, image_name): def delete_image(self, glance, image): """Delete the specified image.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) num_before = len(list(glance.images.list())) glance.images.delete(image) @@ -254,6 +302,8 @@ def delete_image(self, glance, image): def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) image = nova.images.find(name=image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, @@ -276,6 +326,11 @@ def create_instance(self, nova, image_name, instance_name, flavor): def delete_instance(self, nova, instance): """Delete the specified instance.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) num_before = len(list(nova.servers.list())) nova.servers.delete(instance) @@ -292,3 +347,193 @@ def delete_instance(self, nova, instance): return False return True + + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1): + """Add and confirm a new volume, 1GB by default.""" + self.log.debug('Creating volume ({}|{}GB)'.format(vol_name, vol_size)) + vol_new = cinder.volumes.create(display_name=vol_name, size=1) + vol_id = vol_new.id + ret = self.resource_reaches_status(cinder.volumes, vol_id, + expected_stat="available", + msg="Create volume status wait") + if ret: + return vol_new + else: + self.log.error('Failed to create volume.') + return None + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + self.log.debug('Deleting OpenStack resource ' + '{} ({})'.format(resource_id, msg)) + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) + return False + + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False + + def get_ceph_osd_id_cmd(self, index): + """Produce a shell command that will return a ceph-osd id.""" + cmd = ("`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}'" + " | grep -o '[0-9]*'`".format(index + 1)) + return cmd + + def get_ceph_pools(self, sentry_unit): + """Return a dict of ceph pools from a single ceph unit, with + pool name as keys, pool id as vals.""" + pools = {} + cmd = 'sudo ceph osd lspools' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + raise RuntimeError(msg) + + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, + for pool in str(output).split(','): + pool_id_name = pool.split(' ') + if len(pool_id_name) == 2: + pool_id = pool_id_name[0] + pool_name = pool_id_name[1] + pools[pool_name] = int(pool_id) + + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], + pools)) + return pools + + def get_ceph_df(self, sentry_unit): + """Return dict of ceph df json output, including ceph pool state. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :returns: Dict of ceph df output + """ + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + raise RuntimeError(msg) + return json.loads(output) + + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param pool_id: Ceph pool ID + :returns: List of pool name, object count, kb disk space used + """ + df = self.get_ceph_df(sentry_unit) + pool_name = df['pools'][pool_id]['name'] + obj_count = df['pools'][pool_id]['stats']['objects'] + kb_used = df['pools'][pool_id]['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, + pool_id, + obj_count, + kb_used)) + return pool_name, obj_count, kb_used + + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes. The + 2nd element is expected to be greater than the 1st; 3rd is expected + to be less than the 2nd. + + :param samples: List containing 3 data samples + :param sample_type: String for logging and usage context + :returns: None if successful, Failure message otherwise + """ + original, created, deleted = range(3) + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + msg = ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + return msg + else: + self.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None From 5e83f7041e3d3939372c1f61ea762601efec41d2 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 22 Jun 2015 17:46:04 +0000 Subject: [PATCH 0722/2699] sync hooks/charmhelpers --- .../charmhelpers/contrib/hahelpers/cluster.py | 29 +++- .../contrib/openstack/amulet/deployment.py | 8 +- .../contrib/openstack/amulet/utils.py | 125 +++++++++++++++++- .../charmhelpers/contrib/openstack/context.py | 2 +- .../charmhelpers/contrib/openstack/neutron.py | 10 +- .../charmhelpers/contrib/openstack/utils.py | 9 +- ceph-radosgw/hooks/charmhelpers/core/host.py | 30 ++++- 7 files changed, 191 insertions(+), 22 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index c555d7aa..aa0b515d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -44,6 +44,7 @@ ERROR, WARNING, unit_get, + is_leader as juju_is_leader ) from charmhelpers.core.decorators import ( retry_on_exception, @@ -63,17 +64,30 @@ class CRMResourceNotFound(Exception): pass +class CRMDCNotFound(Exception): + pass + + def is_elected_leader(resource): """ Returns True if the charm executing this is the elected cluster leader. It relies on two mechanisms to determine leadership: - 1. If the charm is part of a corosync cluster, call corosync to + 1. If juju is sufficiently new and leadership election is supported, + the is_leader command will be used. + 2. If the charm is part of a corosync cluster, call corosync to determine leadership. - 2. If the charm is not part of a corosync cluster, the leader is + 3. If the charm is not part of a corosync cluster, the leader is determined as being "the alive unit with the lowest unit numer". In other words, the oldest surviving unit. """ + try: + return juju_is_leader() + except NotImplementedError: + log('Juju leadership election feature not enabled' + ', using fallback support', + level=WARNING) + if is_clustered(): if not is_crm_leader(resource): log('Deferring action to CRM leader.', level=INFO) @@ -106,8 +120,9 @@ def is_crm_dc(): status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) if not isinstance(status, six.text_type): status = six.text_type(status, "utf-8") - except subprocess.CalledProcessError: - return False + except subprocess.CalledProcessError as ex: + raise CRMDCNotFound(str(ex)) + current_dc = '' for line in status.split('\n'): if line.startswith('Current DC'): @@ -115,10 +130,14 @@ def is_crm_dc(): current_dc = line.split(':')[1].split()[0] if current_dc == get_unit_hostname(): return True + elif current_dc == 'NONE': + raise CRMDCNotFound('Current DC: NONE') + return False -@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound) +@retry_on_exception(5, base_delay=2, + exc_type=(CRMResourceNotFound, CRMDCNotFound)) def is_crm_leader(resource, retry=False): """ Returns True if the charm calling this is the elected corosync leader, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 461a702f..c664c9d0 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -110,7 +110,8 @@ def _get_openstack_release(self): (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, self.trusty_icehouse, self.trusty_juno, self.utopic_juno, - self.trusty_kilo, self.vivid_kilo) = range(10) + self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, + self.wily_liberty) = range(12) releases = { ('precise', None): self.precise_essex, @@ -121,8 +122,10 @@ def _get_openstack_release(self): ('trusty', None): self.trusty_icehouse, ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo} + ('vivid', None): self.vivid_kilo, + ('wily', None): self.wily_liberty} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -138,6 +141,7 @@ def _get_openstack_release_string(self): ('trusty', 'icehouse'), ('utopic', 'juno'), ('vivid', 'kilo'), + ('wily', 'liberty'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 9c3d918a..576bf0b5 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -16,15 +16,15 @@ import logging import os +import six import time import urllib import glanceclient.v1.client as glance_client +import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client -import six - from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -37,7 +37,7 @@ class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charms. + that is specifically for use by OpenStack charm tests. """ def __init__(self, log_level=ERROR): @@ -51,6 +51,8 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, Validate actual endpoint data vs expected endpoint data. The ports are used to find the matching endpoint. """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) found = False for ep in endpoints: self.log.debug('endpoint: {}'.format(repr(ep))) @@ -77,6 +79,7 @@ def validate_svc_catalog_endpoint_data(self, expected, actual): Validate a list of actual service catalog endpoints vs a list of expected service catalog endpoints. """ + self.log.debug('Validating service catalog endpoint data...') self.log.debug('actual: {}'.format(repr(actual))) for k, v in six.iteritems(expected): if k in actual: @@ -93,6 +96,7 @@ def validate_tenant_data(self, expected, actual): Validate a list of actual tenant data vs list of expected tenant data. """ + self.log.debug('Validating tenant data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -114,6 +118,7 @@ def validate_role_data(self, expected, actual): Validate a list of actual role data vs a list of expected role data. """ + self.log.debug('Validating role data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -134,6 +139,7 @@ def validate_user_data(self, expected, actual): Validate a list of actual user data vs a list of expected user data. """ + self.log.debug('Validating user data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -155,17 +161,20 @@ def validate_flavor_data(self, expected, actual): Validate a list of actual flavors vs a list of expected flavors. """ + self.log.debug('Validating flavor data...') self.log.debug('actual: {}'.format(repr(actual))) act = [a.name for a in actual] return self._validate_list_data(expected, act) def tenant_exists(self, keystone, tenant): """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant): """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') unit = keystone_sentry service_ip = unit.relation('shared-db', 'mysql:shared-db')['private-address'] @@ -175,6 +184,7 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') return keystone_client.Client(username=user, password=password, @@ -182,12 +192,21 @@ def authenticate_keystone_user(self, keystone, user, password, tenant): def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', endpoint_type='adminURL') return glance_client.Client(ep, token=keystone.auth_token) + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + endpoint_type='publicURL') + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + def authenticate_nova_user(self, keystone, user, password, tenant): """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') return nova_client.Client(username=user, api_key=password, @@ -195,6 +214,7 @@ def authenticate_nova_user(self, keystone, user, password, tenant): def create_cirros_image(self, glance, image_name): """Download the latest cirros image and upload it to glance.""" + self.log.debug('Creating glance image ({})...'.format(image_name)) http_proxy = os.getenv('AMULET_HTTP_PROXY') self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: @@ -235,6 +255,11 @@ def create_cirros_image(self, glance, image_name): def delete_image(self, glance, image): """Delete the specified image.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) num_before = len(list(glance.images.list())) glance.images.delete(image) @@ -254,6 +279,8 @@ def delete_image(self, glance, image): def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) image = nova.images.find(name=image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, @@ -276,6 +303,11 @@ def create_instance(self, nova, image_name, instance_name, flavor): def delete_instance(self, nova, instance): """Delete the specified instance.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) num_before = len(list(nova.servers.list())) nova.servers.delete(instance) @@ -292,3 +324,90 @@ def delete_instance(self, nova, instance): return False return True + + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) + return False + + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 400eaf8e..ab400060 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -240,7 +240,7 @@ def __call__(self): if self.relation_prefix: password_setting = self.relation_prefix + '_password' - for rid in relation_ids('shared-db'): + for rid in relation_ids(self.interfaces[0]): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) host = rdata.get('db_host') diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index b3aa3d4c..f7b72352 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -172,14 +172,16 @@ def neutron_plugins(): 'services': ['calico-felix', 'bird', 'neutron-dhcp-agent', - 'nova-api-metadata'], + 'nova-api-metadata', + 'etcd'], 'packages': [[headers_package()] + determine_dkms_package(), ['calico-compute', 'bird', 'neutron-dhcp-agent', - 'nova-api-metadata']], - 'server_packages': ['neutron-server', 'calico-control'], - 'server_services': ['neutron-server'] + 'nova-api-metadata', + 'etcd']], + 'server_packages': ['neutron-server', 'calico-control', 'etcd'], + 'server_services': ['neutron-server', 'etcd'] }, 'vsp': { 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index d795a358..7c16fdbd 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -79,6 +79,7 @@ ('trusty', 'icehouse'), ('utopic', 'juno'), ('vivid', 'kilo'), + ('wily', 'liberty'), ]) @@ -91,6 +92,7 @@ ('2014.1', 'icehouse'), ('2014.2', 'juno'), ('2015.1', 'kilo'), + ('2015.2', 'liberty'), ]) # The ugly duckling @@ -113,6 +115,7 @@ ('2.2.0', 'juno'), ('2.2.1', 'kilo'), ('2.2.2', 'kilo'), + ('2.3.0', 'liberty'), ]) DEFAULT_LOOPBACK_SIZE = '5G' @@ -321,6 +324,9 @@ def configure_installation_source(rel): 'kilo': 'trusty-updates/kilo', 'kilo/updates': 'trusty-updates/kilo', 'kilo/proposed': 'trusty-proposed/kilo', + 'liberty': 'trusty-updates/liberty', + 'liberty/updates': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', } try: @@ -641,7 +647,8 @@ def _git_update_requirements(package_dir, reqs_dir): subprocess.check_call(cmd) except subprocess.CalledProcessError: package = os.path.basename(package_dir) - error_out("Error updating {} from global-requirements.txt".format(package)) + error_out("Error updating {} from " + "global-requirements.txt".format(package)) os.chdir(orig_dir) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 0d2ab4b4..901a4cfe 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -24,6 +24,7 @@ import os import re import pwd +import glob import grp import random import string @@ -269,6 +270,21 @@ def file_hash(path, hash_type='md5'): return None +def path_hash(path): + """ + Generate a hash checksum of all files matching 'path'. Standard wildcards + like '*' and '?' are supported, see documentation for the 'glob' module for + more information. + + :return: dict: A { filename: hash } dictionary for all matched files. + Empty if none found. + """ + return { + filename: file_hash(filename) + for filename in glob.iglob(path) + } + + def check_hash(path, checksum, hash_type='md5'): """ Validate a file using a cryptographic checksum. @@ -296,23 +312,25 @@ def restart_on_change(restart_map, stopstart=False): @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + '/etc/apache/sites-enabled/*': [ 'apache2' ] }) - def ceph_client_changed(): + def config_changed(): pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. + ceph_client_changed function. The apache2 service would be + restarted if any file matching the pattern got changed, created + or removed. Standard wildcards are supported, see documentation + for the 'glob' module for more information. """ def wrap(f): def wrapped_f(*args, **kwargs): - checksums = {} - for path in restart_map: - checksums[path] = file_hash(path) + checksums = {path: path_hash(path) for path in restart_map} f(*args, **kwargs) restarts = [] for path in restart_map: - if checksums[path] != file_hash(path): + if path_hash(path) != checksums[path]: restarts += restart_map[path] services_list = list(OrderedDict.fromkeys(restarts)) if not stopstart: From 4f77ff0de3478054340a700cdf279c208beb0d58 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 22 Jun 2015 17:48:19 +0000 Subject: [PATCH 0723/2699] sync hooks/charmhelpers --- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 157 ++++++++++++++++-- ceph-proxy/hooks/charmhelpers/core/host.py | 32 +++- .../hooks/charmhelpers/core/services/base.py | 43 +++-- .../hooks/charmhelpers/fetch/__init__.py | 2 +- ceph-proxy/hooks/charmhelpers/fetch/giturl.py | 12 +- 5 files changed, 212 insertions(+), 34 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 86f805f1..117429fd 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -21,12 +21,14 @@ # Charm Helpers Developers from __future__ import print_function +from functools import wraps import os import json import yaml import subprocess import sys import errno +import tempfile from subprocess import CalledProcessError import six @@ -58,15 +60,17 @@ def unit_get(attribute): will cache the result of unit_get + 'test' for future calls. """ + @wraps(func) def wrapper(*args, **kwargs): global cache key = str((func, args, kwargs)) try: return cache[key] except KeyError: - res = func(*args, **kwargs) - cache[key] = res - return res + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res return wrapper @@ -178,7 +182,7 @@ def local_unit(): def remote_unit(): """The remote unit for the current relation hook""" - return os.environ['JUJU_REMOTE_UNIT'] + return os.environ.get('JUJU_REMOTE_UNIT', None) def service_name(): @@ -250,6 +254,12 @@ def __getitem__(self, key): except KeyError: return (self._prev_dict or {})[key] + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + def keys(self): prev_keys = [] if self._prev_dict is not None: @@ -353,18 +363,49 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in (list(relation_settings.items()) + list(kwargs.items())): - if v is None: - relation_cmd_line.append('{}='.format(k)) - else: - relation_cmd_line.append('{}={}'.format(k, v)) - subprocess.check_call(relation_cmd_line) + settings = relation_settings.copy() + settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) + if accepts_file: + # --file was introduced in Juju 1.23.2. Use it by default if + # available, since otherwise we'll break if the relation data is + # too big. Ideally we should tell relation-set to read the data from + # stdin, but that feature is broken in 1.23.2: Bug #1454678. + with tempfile.NamedTemporaryFile(delete=False) as settings_file: + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) + subprocess.check_call( + relation_cmd_line + ["--file", settings_file.name]) + os.remove(settings_file.name) + else: + for key, value in settings.items(): + if value is None: + relation_cmd_line.append('{}='.format(key)) + else: + relation_cmd_line.append('{}={}'.format(key, value)) + subprocess.check_call(relation_cmd_line) # Flush cache of any relation-gets for local unit flush(local_unit()) +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + @cached def relation_ids(reltype=None): """A list of relation_ids""" @@ -509,6 +550,11 @@ def unit_get(attribute): return None +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + def unit_private_ip(): """Get this unit's private IP address""" return unit_get('private-address') @@ -605,3 +651,94 @@ def action_fail(message): The results set by action_set are preserved.""" subprocess.check_call(['action-fail', message]) + + +def status_set(workload_state, message): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message unstead. + + workload_state -- valid juju workload state. + message -- status update message + """ + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] + if workload_state not in valid_states: + raise ValueError( + '{!r} is not a valid workload state'.format(workload_state) + ) + cmd = ['status-set', workload_state, message] + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state + + If the status-set command is not found then assume this is juju < 1.23 and + return 'unknown' + """ + cmd = ['status-get'] + try: + raw_status = subprocess.check_output(cmd, universal_newlines=True) + status = raw_status.rstrip() + return status + except OSError as e: + if e.errno == errno.ENOENT: + return 'unknown' + else: + raise + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.iteritems(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 830822af..901a4cfe 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -24,6 +24,7 @@ import os import re import pwd +import glob import grp import random import string @@ -90,7 +91,7 @@ def service_available(service_name): ['service', service_name, 'status'], stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError as e: - return 'unrecognized service' not in e.output + return b'unrecognized service' not in e.output else: return True @@ -269,6 +270,21 @@ def file_hash(path, hash_type='md5'): return None +def path_hash(path): + """ + Generate a hash checksum of all files matching 'path'. Standard wildcards + like '*' and '?' are supported, see documentation for the 'glob' module for + more information. + + :return: dict: A { filename: hash } dictionary for all matched files. + Empty if none found. + """ + return { + filename: file_hash(filename) + for filename in glob.iglob(path) + } + + def check_hash(path, checksum, hash_type='md5'): """ Validate a file using a cryptographic checksum. @@ -296,23 +312,25 @@ def restart_on_change(restart_map, stopstart=False): @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + '/etc/apache/sites-enabled/*': [ 'apache2' ] }) - def ceph_client_changed(): + def config_changed(): pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. + ceph_client_changed function. The apache2 service would be + restarted if any file matching the pattern got changed, created + or removed. Standard wildcards are supported, see documentation + for the 'glob' module for more information. """ def wrap(f): def wrapped_f(*args, **kwargs): - checksums = {} - for path in restart_map: - checksums[path] = file_hash(path) + checksums = {path: path_hash(path) for path in restart_map} f(*args, **kwargs) restarts = [] for path in restart_map: - if checksums[path] != file_hash(path): + if path_hash(path) != checksums[path]: restarts += restart_map[path] services_list = list(OrderedDict.fromkeys(restarts)) if not stopstart: diff --git a/ceph-proxy/hooks/charmhelpers/core/services/base.py b/ceph-proxy/hooks/charmhelpers/core/services/base.py index c5534e4c..98d344e1 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/base.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/base.py @@ -15,9 +15,9 @@ # along with charm-helpers. If not, see . import os -import re import json -from collections import Iterable +from inspect import getargspec +from collections import Iterable, OrderedDict from charmhelpers.core import host from charmhelpers.core import hookenv @@ -119,7 +119,7 @@ def __init__(self, services=None): """ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') self._ready = None - self.services = {} + self.services = OrderedDict() for service in services or []: service_name = service['service'] self.services[service_name] = service @@ -132,8 +132,8 @@ def manage(self): if hook_name == 'stop': self.stop_services() else: - self.provide_data() self.reconfigure_services() + self.provide_data() cfg = hookenv.config() if cfg.implicit_save: cfg.save() @@ -145,15 +145,36 @@ def provide_data(self): A provider must have a `name` attribute, which indicates which relation to set data on, and a `provide_data()` method, which returns a dict of data to set. + + The `provide_data()` method can optionally accept two parameters: + + * ``remote_service`` The name of the remote service that the data will + be provided to. The `provide_data()` method will be called once + for each connected service (not unit). This allows the method to + tailor its data to the given service. + * ``service_ready`` Whether or not the service definition had all of + its requirements met, and thus the ``data_ready`` callbacks run. + + Note that the ``provided_data`` methods are now called **after** the + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks + a chance to generate any data necessary for the providing to the remote + services. """ - hook_name = hookenv.hook_name() - for service in self.services.values(): + for service_name, service in self.services.items(): + service_ready = self.is_ready(service_name) for provider in service.get('provided_data', []): - if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): - data = provider.provide_data() - _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data - if _ready: - hookenv.relation_set(None, data) + for relid in hookenv.relation_ids(provider.name): + units = hookenv.related_units(relid) + if not units: + continue + remote_service = units[0].split('/')[0] + argspec = getargspec(provider.provide_data) + if len(argspec.args) > 1: + data = provider.provide_data(remote_service, service_ready) + else: + data = provider.provide_data() + if data: + hookenv.relation_set(relid, data) def reconfigure_services(self, *service_names): """ diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 792e629a..9a1a2515 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -158,7 +158,7 @@ def filter_installed_packages(packages): def apt_cache(in_memory=True): """Build and return an apt cache""" - import apt_pkg + from apt import apt_pkg apt_pkg.init() if in_memory: apt_pkg.config.set("Dir::Cache::pkgcache", "") diff --git a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py index 93aae87b..ddc25b7e 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py @@ -45,14 +45,16 @@ def can_handle(self, source): else: return True - def clone(self, source, dest, branch): + def clone(self, source, dest, branch, depth=None): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - repo = Repo.clone_from(source, dest) - repo.git.checkout(branch) + if depth: + Repo.clone_from(source, dest, branch=branch, depth=depth) + else: + Repo.clone_from(source, dest, branch=branch) - def install(self, source, branch="master", dest=None): + def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] if dest: @@ -63,7 +65,7 @@ def install(self, source, branch="master", dest=None): if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: - self.clone(source, dest_dir, branch) + self.clone(source, dest_dir, branch, depth) except GitCommandError as e: raise UnhandledSource(e.message) except OSError as e: From 60a732b9640e4094cdccbfcd7e3395d260b6201d Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 22 Jun 2015 17:48:19 +0000 Subject: [PATCH 0724/2699] sync hooks/charmhelpers --- ceph-mon/hooks/charmhelpers/core/hookenv.py | 157 ++++++++++++++++-- ceph-mon/hooks/charmhelpers/core/host.py | 32 +++- .../hooks/charmhelpers/core/services/base.py | 43 +++-- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 2 +- ceph-mon/hooks/charmhelpers/fetch/giturl.py | 12 +- 5 files changed, 212 insertions(+), 34 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 86f805f1..117429fd 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -21,12 +21,14 @@ # Charm Helpers Developers from __future__ import print_function +from functools import wraps import os import json import yaml import subprocess import sys import errno +import tempfile from subprocess import CalledProcessError import six @@ -58,15 +60,17 @@ def unit_get(attribute): will cache the result of unit_get + 'test' for future calls. """ + @wraps(func) def wrapper(*args, **kwargs): global cache key = str((func, args, kwargs)) try: return cache[key] except KeyError: - res = func(*args, **kwargs) - cache[key] = res - return res + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res return wrapper @@ -178,7 +182,7 @@ def local_unit(): def remote_unit(): """The remote unit for the current relation hook""" - return os.environ['JUJU_REMOTE_UNIT'] + return os.environ.get('JUJU_REMOTE_UNIT', None) def service_name(): @@ -250,6 +254,12 @@ def __getitem__(self, key): except KeyError: return (self._prev_dict or {})[key] + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + def keys(self): prev_keys = [] if self._prev_dict is not None: @@ -353,18 +363,49 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in (list(relation_settings.items()) + list(kwargs.items())): - if v is None: - relation_cmd_line.append('{}='.format(k)) - else: - relation_cmd_line.append('{}={}'.format(k, v)) - subprocess.check_call(relation_cmd_line) + settings = relation_settings.copy() + settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) + if accepts_file: + # --file was introduced in Juju 1.23.2. Use it by default if + # available, since otherwise we'll break if the relation data is + # too big. Ideally we should tell relation-set to read the data from + # stdin, but that feature is broken in 1.23.2: Bug #1454678. + with tempfile.NamedTemporaryFile(delete=False) as settings_file: + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) + subprocess.check_call( + relation_cmd_line + ["--file", settings_file.name]) + os.remove(settings_file.name) + else: + for key, value in settings.items(): + if value is None: + relation_cmd_line.append('{}='.format(key)) + else: + relation_cmd_line.append('{}={}'.format(key, value)) + subprocess.check_call(relation_cmd_line) # Flush cache of any relation-gets for local unit flush(local_unit()) +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + @cached def relation_ids(reltype=None): """A list of relation_ids""" @@ -509,6 +550,11 @@ def unit_get(attribute): return None +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + def unit_private_ip(): """Get this unit's private IP address""" return unit_get('private-address') @@ -605,3 +651,94 @@ def action_fail(message): The results set by action_set are preserved.""" subprocess.check_call(['action-fail', message]) + + +def status_set(workload_state, message): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message unstead. + + workload_state -- valid juju workload state. + message -- status update message + """ + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] + if workload_state not in valid_states: + raise ValueError( + '{!r} is not a valid workload state'.format(workload_state) + ) + cmd = ['status-set', workload_state, message] + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state + + If the status-set command is not found then assume this is juju < 1.23 and + return 'unknown' + """ + cmd = ['status-get'] + try: + raw_status = subprocess.check_output(cmd, universal_newlines=True) + status = raw_status.rstrip() + return status + except OSError as e: + if e.errno == errno.ENOENT: + return 'unknown' + else: + raise + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.iteritems(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 830822af..901a4cfe 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -24,6 +24,7 @@ import os import re import pwd +import glob import grp import random import string @@ -90,7 +91,7 @@ def service_available(service_name): ['service', service_name, 'status'], stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError as e: - return 'unrecognized service' not in e.output + return b'unrecognized service' not in e.output else: return True @@ -269,6 +270,21 @@ def file_hash(path, hash_type='md5'): return None +def path_hash(path): + """ + Generate a hash checksum of all files matching 'path'. Standard wildcards + like '*' and '?' are supported, see documentation for the 'glob' module for + more information. + + :return: dict: A { filename: hash } dictionary for all matched files. + Empty if none found. + """ + return { + filename: file_hash(filename) + for filename in glob.iglob(path) + } + + def check_hash(path, checksum, hash_type='md5'): """ Validate a file using a cryptographic checksum. @@ -296,23 +312,25 @@ def restart_on_change(restart_map, stopstart=False): @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + '/etc/apache/sites-enabled/*': [ 'apache2' ] }) - def ceph_client_changed(): + def config_changed(): pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. + ceph_client_changed function. The apache2 service would be + restarted if any file matching the pattern got changed, created + or removed. Standard wildcards are supported, see documentation + for the 'glob' module for more information. """ def wrap(f): def wrapped_f(*args, **kwargs): - checksums = {} - for path in restart_map: - checksums[path] = file_hash(path) + checksums = {path: path_hash(path) for path in restart_map} f(*args, **kwargs) restarts = [] for path in restart_map: - if checksums[path] != file_hash(path): + if path_hash(path) != checksums[path]: restarts += restart_map[path] services_list = list(OrderedDict.fromkeys(restarts)) if not stopstart: diff --git a/ceph-mon/hooks/charmhelpers/core/services/base.py b/ceph-mon/hooks/charmhelpers/core/services/base.py index c5534e4c..98d344e1 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/base.py +++ b/ceph-mon/hooks/charmhelpers/core/services/base.py @@ -15,9 +15,9 @@ # along with charm-helpers. If not, see . import os -import re import json -from collections import Iterable +from inspect import getargspec +from collections import Iterable, OrderedDict from charmhelpers.core import host from charmhelpers.core import hookenv @@ -119,7 +119,7 @@ def __init__(self, services=None): """ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') self._ready = None - self.services = {} + self.services = OrderedDict() for service in services or []: service_name = service['service'] self.services[service_name] = service @@ -132,8 +132,8 @@ def manage(self): if hook_name == 'stop': self.stop_services() else: - self.provide_data() self.reconfigure_services() + self.provide_data() cfg = hookenv.config() if cfg.implicit_save: cfg.save() @@ -145,15 +145,36 @@ def provide_data(self): A provider must have a `name` attribute, which indicates which relation to set data on, and a `provide_data()` method, which returns a dict of data to set. + + The `provide_data()` method can optionally accept two parameters: + + * ``remote_service`` The name of the remote service that the data will + be provided to. The `provide_data()` method will be called once + for each connected service (not unit). This allows the method to + tailor its data to the given service. + * ``service_ready`` Whether or not the service definition had all of + its requirements met, and thus the ``data_ready`` callbacks run. + + Note that the ``provided_data`` methods are now called **after** the + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks + a chance to generate any data necessary for the providing to the remote + services. """ - hook_name = hookenv.hook_name() - for service in self.services.values(): + for service_name, service in self.services.items(): + service_ready = self.is_ready(service_name) for provider in service.get('provided_data', []): - if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): - data = provider.provide_data() - _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data - if _ready: - hookenv.relation_set(None, data) + for relid in hookenv.relation_ids(provider.name): + units = hookenv.related_units(relid) + if not units: + continue + remote_service = units[0].split('/')[0] + argspec = getargspec(provider.provide_data) + if len(argspec.args) > 1: + data = provider.provide_data(remote_service, service_ready) + else: + data = provider.provide_data() + if data: + hookenv.relation_set(relid, data) def reconfigure_services(self, *service_names): """ diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 792e629a..9a1a2515 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -158,7 +158,7 @@ def filter_installed_packages(packages): def apt_cache(in_memory=True): """Build and return an apt cache""" - import apt_pkg + from apt import apt_pkg apt_pkg.init() if in_memory: apt_pkg.config.set("Dir::Cache::pkgcache", "") diff --git a/ceph-mon/hooks/charmhelpers/fetch/giturl.py b/ceph-mon/hooks/charmhelpers/fetch/giturl.py index 93aae87b..ddc25b7e 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/giturl.py @@ -45,14 +45,16 @@ def can_handle(self, source): else: return True - def clone(self, source, dest, branch): + def clone(self, source, dest, branch, depth=None): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - repo = Repo.clone_from(source, dest) - repo.git.checkout(branch) + if depth: + Repo.clone_from(source, dest, branch=branch, depth=depth) + else: + Repo.clone_from(source, dest, branch=branch) - def install(self, source, branch="master", dest=None): + def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] if dest: @@ -63,7 +65,7 @@ def install(self, source, branch="master", dest=None): if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: - self.clone(source, dest_dir, branch) + self.clone(source, dest_dir, branch, depth) except GitCommandError as e: raise UnhandledSource(e.message) except OSError as e: From 48c33cc04b58f329799fdf06a86158004209bf08 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 22 Jun 2015 17:49:23 +0000 Subject: [PATCH 0725/2699] sync tests/charmhelpers --- .../charmhelpers/contrib/openstack/amulet/utils.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index 84bf84ce..bcdb4b3c 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -26,6 +26,7 @@ import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import swiftclient from charmhelpers.contrib.amulet.utils import ( AmuletUtils @@ -223,6 +224,17 @@ def authenticate_nova_user(self, keystone, user, password, tenant): return nova_client.Client(username=user, api_key=password, project_id=tenant, auth_url=ep) + def authenticate_swift_user(self, keystone, user, password, tenant): + """Authenticates a regular user with swift api.""" + self.log.debug('Authenticating swift user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') + def create_cirros_image(self, glance, image_name): """Download the latest cirros image and upload it to glance.""" self.log.debug('Creating glance image ({})...'.format(image_name)) From 7f14bdaa9b6ffbddd85960f7d5ae310cdd5e6741 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 22 Jun 2015 17:49:23 +0000 Subject: [PATCH 0726/2699] sync tests/charmhelpers --- .../charmhelpers/contrib/openstack/amulet/utils.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 84bf84ce..bcdb4b3c 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -26,6 +26,7 @@ import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import swiftclient from charmhelpers.contrib.amulet.utils import ( AmuletUtils @@ -223,6 +224,17 @@ def authenticate_nova_user(self, keystone, user, password, tenant): return nova_client.Client(username=user, api_key=password, project_id=tenant, auth_url=ep) + def authenticate_swift_user(self, keystone, user, password, tenant): + """Authenticates a regular user with swift api.""" + self.log.debug('Authenticating swift user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') + def create_cirros_image(self, glance, image_name): """Download the latest cirros image and upload it to glance.""" self.log.debug('Creating glance image ({})...'.format(image_name)) From 67ec2790d8d543782815286ae5de8ad15c5fd7d9 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 22 Jun 2015 17:53:22 +0000 Subject: [PATCH 0727/2699] lint cleanup --- ceph-radosgw/tests/basic_deployment.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index c09125a8..a520a654 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -147,12 +147,14 @@ def _initialize_tests(self): self.demo_tenant) # Authenticate radosgw user using swift api - ks_obj_rel = self.keystone_sentry.relation('identity-service', - 'ceph-radosgw:identity-service') - self.swift = u.authenticate_swift_user(self.keystone, - user=ks_obj_rel['service_username'], - password=ks_obj_rel['service_password'], - tenant=ks_obj_rel['service_tenant']) + ks_obj_rel = self.keystone_sentry.relation( + 'identity-service', + 'ceph-radosgw:identity-service') + self.swift = u.authenticate_swift_user( + self.keystone, + user=ks_obj_rel['service_username'], + password=ks_obj_rel['service_password'], + tenant=ks_obj_rel['service_tenant']) def test_100_ceph_processes(self): """Verify that the expected service processes are running From e54881b1f1595ad21fbe22b4d8138bcd4174aeb9 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 23 Jun 2015 23:25:26 +0000 Subject: [PATCH 0728/2699] re-sync tests/charmhelpers for cinder/glance helper updates --- ceph-proxy/tests/basic_deployment.py | 5 +- .../contrib/openstack/amulet/utils.py | 177 ++++++++++++------ 2 files changed, 120 insertions(+), 62 deletions(-) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index c31cad0e..abdb917b 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -14,9 +14,6 @@ # Use DEBUG to turn on debug logging u = OpenStackAmuletUtils(DEBUG) -# Resource names and constants -IMAGE_NAME = 'cirros-image-1' - class CephBasicDeployment(OpenStackAmuletDeployment): """Amulet tests on a basic ceph deployment.""" @@ -522,7 +519,7 @@ def test_412_ceph_glance_image_create_delete(self): amulet.raise_status(amulet.FAIL, msg=msg) # Create ceph-backed glance image - glance_img = u.create_cirros_image(self.glance, IMAGE_NAME) + glance_img = u.create_cirros_image(self.glance, "cirros-image-1") # Re-check ceph glance pool object count and disk usage time.sleep(10) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index bcdb4b3c..bba8458e 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -236,8 +236,17 @@ def authenticate_swift_user(self, keystone, user, password, tenant): auth_version='2.0') def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance.""" - self.log.debug('Creating glance image ({})...'.format(image_name)) + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :returns: glance image pointer + """ + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Download cirros image http_proxy = os.getenv('AMULET_HTTP_PROXY') self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: @@ -246,33 +255,51 @@ def create_cirros_image(self, glance, image_name): else: opener = urllib.FancyURLopener() - f = opener.open("http://download.cirros-cloud.net/version/released") + f = opener.open('http://download.cirros-cloud.net/version/released') version = f.read().strip() - cirros_img = "cirros-{}-x86_64-disk.img".format(version) + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) local_path = os.path.join('tests', cirros_img) if not os.path.exists(local_path): - cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', version, cirros_img) opener.retrieve(cirros_url, local_path) f.close() + # Create glance image with open(local_path) as f: image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) - count = 1 - status = image.status - while status != 'active' and count < 10: - time.sleep(3) - image = glance.images.get(image.id) - status = image.status - self.log.debug('image status: {}'.format(status)) - count += 1 - if status != 'active': - self.log.error('image creation timed out') - return None + # Wait for image to reach active status + img_id = image.id + ret = self.resource_reaches_status(glance.images, img_id, + expected_stat='active', + msg='Image status wait') + if not ret: + msg = 'Glance image failed to reach expected state.' + raise RuntimeError(msg) + + # Re-validate new image + self.log.debug('Validating image attributes...') + val_img_name = glance.images.get(img_id).name + val_img_stat = glance.images.get(img_id).status + val_img_pub = glance.images.get(img_id).is_public + val_img_cfmt = glance.images.get(img_id).container_format + val_img_dfmt = glance.images.get(img_id).disk_format + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' + 'container fmt:{} disk fmt:{}'.format( + val_img_name, val_img_pub, img_id, + val_img_stat, val_img_cfmt, val_img_dfmt)) + + if val_img_name == image_name and val_img_stat == 'active' \ + and val_img_pub is True and val_img_cfmt == 'bare' \ + and val_img_dfmt == 'qcow2': + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + raise RuntimeError(msg) return image @@ -283,22 +310,7 @@ def delete_image(self, glance, image): self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_image.') self.log.debug('Deleting glance image ({})...'.format(image)) - num_before = len(list(glance.images.list())) - glance.images.delete(image) - - count = 1 - num_after = len(list(glance.images.list())) - while num_after != (num_before - 1) and count < 10: - time.sleep(3) - num_after = len(list(glance.images.list())) - self.log.debug('number of images: {}'.format(num_after)) - count += 1 - - if num_after != (num_before - 1): - self.log.error('image deletion timed out') - return False - - return True + return self.delete_resource(glance.images, image, msg='glance image') def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" @@ -331,22 +343,7 @@ def delete_instance(self, nova, instance): self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_instance.') self.log.debug('Deleting instance ({})...'.format(instance)) - num_before = len(list(nova.servers.list())) - nova.servers.delete(instance) - - count = 1 - num_after = len(list(nova.servers.list())) - while num_after != (num_before - 1) and count < 10: - time.sleep(3) - num_after = len(list(nova.servers.list())) - self.log.debug('number of instances: {}'.format(num_after)) - count += 1 - - if num_after != (num_before - 1): - self.log.error('instance deletion timed out') - return False - - return True + return self.delete_resource(nova.servers, instance, msg='nova instance') def create_or_get_keypair(self, nova, keypair_name="testkey"): """Create a new keypair, or return pointer if it already exists.""" @@ -362,19 +359,83 @@ def create_or_get_keypair(self, nova, keypair_name="testkey"): _keypair = nova.keypairs.create(name=keypair_name) return _keypair - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1): - """Add and confirm a new volume, 1GB by default.""" - self.log.debug('Creating volume ({}|{}GB)'.format(vol_name, vol_size)) - vol_new = cinder.volumes.create(display_name=vol_name, size=1) - vol_id = vol_new.id + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, + img_id=None, src_vol_id=None, snap_id=None): + """Create cinder volume, optionally from a glance image, or + optionally as a clone of an existing volume, or optionally + from a snapshot. Wait for the new volume status to reach + the expected status, validate and return a resource pointer. + + :param vol_name: cinder volume display name + :param vol_size: size in gigabytes + :param img_id: optional glance image id + :param src_vol_id: optional source volume id to clone + :param snap_id: optional snapshot id to use + :returns: cinder volume pointer + """ + # Handle parameter input + if img_id and not src_vol_id and not snap_id: + self.log.debug('Creating cinder volume from glance image ' + '({})...'.format(img_id)) + bootable = 'true' + elif src_vol_id and not img_id and not snap_id: + self.log.debug('Cloning cinder volume...') + bootable = cinder.volumes.get(src_vol_id).bootable + elif snap_id and not src_vol_id and not img_id: + self.log.debug('Creating cinder volume from snapshot...') + snap = cinder.volume_snapshots.find(id=snap_id) + vol_size = snap.size + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id + bootable = cinder.volumes.get(snap_vol_id).bootable + elif not img_id and not src_vol_id and not snap_id: + self.log.debug('Creating cinder volume...') + bootable = 'false' + else: + msg = ('Invalid method use - name:{} size:{} img_id:{} ' + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, + img_id, src_vol_id, + snap_id)) + raise RuntimeError(msg) + + # Create new volume + try: + vol_new = cinder.volumes.create(display_name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except Exception as e: + msg = 'Failed to create volume: {}'.format(e) + raise RuntimeError(msg) + + # Wait for volume to reach available status ret = self.resource_reaches_status(cinder.volumes, vol_id, expected_stat="available", - msg="Create volume status wait") - if ret: - return vol_new + msg="Volume status wait") + if not ret: + msg = 'Cinder volume failed to reach expected state.' + raise RuntimeError(msg) + + # Re-validate new volume + self.log.debug('Validating volume attributes...') + val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_boot = cinder.volumes.get(vol_id).bootable + val_vol_stat = cinder.volumes.get(vol_id).status + val_vol_size = cinder.volumes.get(vol_id).size + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' + '{} size:{}'.format(val_vol_name, vol_id, + val_vol_stat, val_vol_boot, + val_vol_size)) + + if val_vol_boot == bootable and val_vol_stat == 'available' \ + and val_vol_name == vol_name and val_vol_size == vol_size: + self.log.debug(msg_attr) else: - self.log.error('Failed to create volume.') - return None + msg = ('Volume validation failed, {}'.format(msg_attr)) + raise RuntimeError(msg) + + return vol_new def delete_resource(self, resource, resource_id, msg="resource", max_wait=120): From df09bbdf23c275f43659109352b7f690091a30cf Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 23 Jun 2015 23:25:26 +0000 Subject: [PATCH 0729/2699] re-sync tests/charmhelpers for cinder/glance helper updates --- ceph-mon/tests/basic_deployment.py | 5 +- .../contrib/openstack/amulet/utils.py | 177 ++++++++++++------ 2 files changed, 120 insertions(+), 62 deletions(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index c31cad0e..abdb917b 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -14,9 +14,6 @@ # Use DEBUG to turn on debug logging u = OpenStackAmuletUtils(DEBUG) -# Resource names and constants -IMAGE_NAME = 'cirros-image-1' - class CephBasicDeployment(OpenStackAmuletDeployment): """Amulet tests on a basic ceph deployment.""" @@ -522,7 +519,7 @@ def test_412_ceph_glance_image_create_delete(self): amulet.raise_status(amulet.FAIL, msg=msg) # Create ceph-backed glance image - glance_img = u.create_cirros_image(self.glance, IMAGE_NAME) + glance_img = u.create_cirros_image(self.glance, "cirros-image-1") # Re-check ceph glance pool object count and disk usage time.sleep(10) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index bcdb4b3c..bba8458e 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -236,8 +236,17 @@ def authenticate_swift_user(self, keystone, user, password, tenant): auth_version='2.0') def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance.""" - self.log.debug('Creating glance image ({})...'.format(image_name)) + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :returns: glance image pointer + """ + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Download cirros image http_proxy = os.getenv('AMULET_HTTP_PROXY') self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: @@ -246,33 +255,51 @@ def create_cirros_image(self, glance, image_name): else: opener = urllib.FancyURLopener() - f = opener.open("http://download.cirros-cloud.net/version/released") + f = opener.open('http://download.cirros-cloud.net/version/released') version = f.read().strip() - cirros_img = "cirros-{}-x86_64-disk.img".format(version) + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) local_path = os.path.join('tests', cirros_img) if not os.path.exists(local_path): - cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', version, cirros_img) opener.retrieve(cirros_url, local_path) f.close() + # Create glance image with open(local_path) as f: image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) - count = 1 - status = image.status - while status != 'active' and count < 10: - time.sleep(3) - image = glance.images.get(image.id) - status = image.status - self.log.debug('image status: {}'.format(status)) - count += 1 - if status != 'active': - self.log.error('image creation timed out') - return None + # Wait for image to reach active status + img_id = image.id + ret = self.resource_reaches_status(glance.images, img_id, + expected_stat='active', + msg='Image status wait') + if not ret: + msg = 'Glance image failed to reach expected state.' + raise RuntimeError(msg) + + # Re-validate new image + self.log.debug('Validating image attributes...') + val_img_name = glance.images.get(img_id).name + val_img_stat = glance.images.get(img_id).status + val_img_pub = glance.images.get(img_id).is_public + val_img_cfmt = glance.images.get(img_id).container_format + val_img_dfmt = glance.images.get(img_id).disk_format + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' + 'container fmt:{} disk fmt:{}'.format( + val_img_name, val_img_pub, img_id, + val_img_stat, val_img_cfmt, val_img_dfmt)) + + if val_img_name == image_name and val_img_stat == 'active' \ + and val_img_pub is True and val_img_cfmt == 'bare' \ + and val_img_dfmt == 'qcow2': + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + raise RuntimeError(msg) return image @@ -283,22 +310,7 @@ def delete_image(self, glance, image): self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_image.') self.log.debug('Deleting glance image ({})...'.format(image)) - num_before = len(list(glance.images.list())) - glance.images.delete(image) - - count = 1 - num_after = len(list(glance.images.list())) - while num_after != (num_before - 1) and count < 10: - time.sleep(3) - num_after = len(list(glance.images.list())) - self.log.debug('number of images: {}'.format(num_after)) - count += 1 - - if num_after != (num_before - 1): - self.log.error('image deletion timed out') - return False - - return True + return self.delete_resource(glance.images, image, msg='glance image') def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" @@ -331,22 +343,7 @@ def delete_instance(self, nova, instance): self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_instance.') self.log.debug('Deleting instance ({})...'.format(instance)) - num_before = len(list(nova.servers.list())) - nova.servers.delete(instance) - - count = 1 - num_after = len(list(nova.servers.list())) - while num_after != (num_before - 1) and count < 10: - time.sleep(3) - num_after = len(list(nova.servers.list())) - self.log.debug('number of instances: {}'.format(num_after)) - count += 1 - - if num_after != (num_before - 1): - self.log.error('instance deletion timed out') - return False - - return True + return self.delete_resource(nova.servers, instance, msg='nova instance') def create_or_get_keypair(self, nova, keypair_name="testkey"): """Create a new keypair, or return pointer if it already exists.""" @@ -362,19 +359,83 @@ def create_or_get_keypair(self, nova, keypair_name="testkey"): _keypair = nova.keypairs.create(name=keypair_name) return _keypair - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1): - """Add and confirm a new volume, 1GB by default.""" - self.log.debug('Creating volume ({}|{}GB)'.format(vol_name, vol_size)) - vol_new = cinder.volumes.create(display_name=vol_name, size=1) - vol_id = vol_new.id + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, + img_id=None, src_vol_id=None, snap_id=None): + """Create cinder volume, optionally from a glance image, or + optionally as a clone of an existing volume, or optionally + from a snapshot. Wait for the new volume status to reach + the expected status, validate and return a resource pointer. + + :param vol_name: cinder volume display name + :param vol_size: size in gigabytes + :param img_id: optional glance image id + :param src_vol_id: optional source volume id to clone + :param snap_id: optional snapshot id to use + :returns: cinder volume pointer + """ + # Handle parameter input + if img_id and not src_vol_id and not snap_id: + self.log.debug('Creating cinder volume from glance image ' + '({})...'.format(img_id)) + bootable = 'true' + elif src_vol_id and not img_id and not snap_id: + self.log.debug('Cloning cinder volume...') + bootable = cinder.volumes.get(src_vol_id).bootable + elif snap_id and not src_vol_id and not img_id: + self.log.debug('Creating cinder volume from snapshot...') + snap = cinder.volume_snapshots.find(id=snap_id) + vol_size = snap.size + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id + bootable = cinder.volumes.get(snap_vol_id).bootable + elif not img_id and not src_vol_id and not snap_id: + self.log.debug('Creating cinder volume...') + bootable = 'false' + else: + msg = ('Invalid method use - name:{} size:{} img_id:{} ' + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, + img_id, src_vol_id, + snap_id)) + raise RuntimeError(msg) + + # Create new volume + try: + vol_new = cinder.volumes.create(display_name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except Exception as e: + msg = 'Failed to create volume: {}'.format(e) + raise RuntimeError(msg) + + # Wait for volume to reach available status ret = self.resource_reaches_status(cinder.volumes, vol_id, expected_stat="available", - msg="Create volume status wait") - if ret: - return vol_new + msg="Volume status wait") + if not ret: + msg = 'Cinder volume failed to reach expected state.' + raise RuntimeError(msg) + + # Re-validate new volume + self.log.debug('Validating volume attributes...') + val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_boot = cinder.volumes.get(vol_id).bootable + val_vol_stat = cinder.volumes.get(vol_id).status + val_vol_size = cinder.volumes.get(vol_id).size + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' + '{} size:{}'.format(val_vol_name, vol_id, + val_vol_stat, val_vol_boot, + val_vol_size)) + + if val_vol_boot == bootable and val_vol_stat == 'available' \ + and val_vol_name == vol_name and val_vol_size == vol_size: + self.log.debug(msg_attr) else: - self.log.error('Failed to create volume.') - return None + msg = ('Volume validation failed, {}'.format(msg_attr)) + raise RuntimeError(msg) + + return vol_new def delete_resource(self, resource, resource_id, msg="resource", max_wait=120): From 8a1d8a767373363666970490d1f8944b61bb769e Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 23 Jun 2015 23:35:27 +0000 Subject: [PATCH 0730/2699] re-sync tests/charmhelpers for cinder/glance helper updates --- ceph-osd/tests/basic_deployment.py | 5 +- .../contrib/openstack/amulet/utils.py | 189 ++++++++++++------ 2 files changed, 132 insertions(+), 62 deletions(-) diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 0e485f33..102ece16 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -14,9 +14,6 @@ # Use DEBUG to turn on debug logging u = OpenStackAmuletUtils(DEBUG) -# Resource names and constants -IMAGE_NAME = 'cirros-image-1' - class CephOsdBasicDeployment(OpenStackAmuletDeployment): """Amulet tests on a basic ceph-osd deployment.""" @@ -514,7 +511,7 @@ def test_412_ceph_glance_image_create_delete(self): amulet.raise_status(amulet.FAIL, msg=msg) # Create ceph-backed glance image - glance_img = u.create_cirros_image(self.glance, IMAGE_NAME) + glance_img = u.create_cirros_image(self.glance, 'cirros-image-1') # Re-check ceph glance pool object count and disk usage time.sleep(10) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index 84bf84ce..bba8458e 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -26,6 +26,7 @@ import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import swiftclient from charmhelpers.contrib.amulet.utils import ( AmuletUtils @@ -223,9 +224,29 @@ def authenticate_nova_user(self, keystone, user, password, tenant): return nova_client.Client(username=user, api_key=password, project_id=tenant, auth_url=ep) + def authenticate_swift_user(self, keystone, user, password, tenant): + """Authenticates a regular user with swift api.""" + self.log.debug('Authenticating swift user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') + def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance.""" - self.log.debug('Creating glance image ({})...'.format(image_name)) + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :returns: glance image pointer + """ + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Download cirros image http_proxy = os.getenv('AMULET_HTTP_PROXY') self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: @@ -234,33 +255,51 @@ def create_cirros_image(self, glance, image_name): else: opener = urllib.FancyURLopener() - f = opener.open("http://download.cirros-cloud.net/version/released") + f = opener.open('http://download.cirros-cloud.net/version/released') version = f.read().strip() - cirros_img = "cirros-{}-x86_64-disk.img".format(version) + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) local_path = os.path.join('tests', cirros_img) if not os.path.exists(local_path): - cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', version, cirros_img) opener.retrieve(cirros_url, local_path) f.close() + # Create glance image with open(local_path) as f: image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) - count = 1 - status = image.status - while status != 'active' and count < 10: - time.sleep(3) - image = glance.images.get(image.id) - status = image.status - self.log.debug('image status: {}'.format(status)) - count += 1 - if status != 'active': - self.log.error('image creation timed out') - return None + # Wait for image to reach active status + img_id = image.id + ret = self.resource_reaches_status(glance.images, img_id, + expected_stat='active', + msg='Image status wait') + if not ret: + msg = 'Glance image failed to reach expected state.' + raise RuntimeError(msg) + + # Re-validate new image + self.log.debug('Validating image attributes...') + val_img_name = glance.images.get(img_id).name + val_img_stat = glance.images.get(img_id).status + val_img_pub = glance.images.get(img_id).is_public + val_img_cfmt = glance.images.get(img_id).container_format + val_img_dfmt = glance.images.get(img_id).disk_format + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' + 'container fmt:{} disk fmt:{}'.format( + val_img_name, val_img_pub, img_id, + val_img_stat, val_img_cfmt, val_img_dfmt)) + + if val_img_name == image_name and val_img_stat == 'active' \ + and val_img_pub is True and val_img_cfmt == 'bare' \ + and val_img_dfmt == 'qcow2': + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + raise RuntimeError(msg) return image @@ -271,22 +310,7 @@ def delete_image(self, glance, image): self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_image.') self.log.debug('Deleting glance image ({})...'.format(image)) - num_before = len(list(glance.images.list())) - glance.images.delete(image) - - count = 1 - num_after = len(list(glance.images.list())) - while num_after != (num_before - 1) and count < 10: - time.sleep(3) - num_after = len(list(glance.images.list())) - self.log.debug('number of images: {}'.format(num_after)) - count += 1 - - if num_after != (num_before - 1): - self.log.error('image deletion timed out') - return False - - return True + return self.delete_resource(glance.images, image, msg='glance image') def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" @@ -319,22 +343,7 @@ def delete_instance(self, nova, instance): self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_instance.') self.log.debug('Deleting instance ({})...'.format(instance)) - num_before = len(list(nova.servers.list())) - nova.servers.delete(instance) - - count = 1 - num_after = len(list(nova.servers.list())) - while num_after != (num_before - 1) and count < 10: - time.sleep(3) - num_after = len(list(nova.servers.list())) - self.log.debug('number of instances: {}'.format(num_after)) - count += 1 - - if num_after != (num_before - 1): - self.log.error('instance deletion timed out') - return False - - return True + return self.delete_resource(nova.servers, instance, msg='nova instance') def create_or_get_keypair(self, nova, keypair_name="testkey"): """Create a new keypair, or return pointer if it already exists.""" @@ -350,19 +359,83 @@ def create_or_get_keypair(self, nova, keypair_name="testkey"): _keypair = nova.keypairs.create(name=keypair_name) return _keypair - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1): - """Add and confirm a new volume, 1GB by default.""" - self.log.debug('Creating volume ({}|{}GB)'.format(vol_name, vol_size)) - vol_new = cinder.volumes.create(display_name=vol_name, size=1) - vol_id = vol_new.id + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, + img_id=None, src_vol_id=None, snap_id=None): + """Create cinder volume, optionally from a glance image, or + optionally as a clone of an existing volume, or optionally + from a snapshot. Wait for the new volume status to reach + the expected status, validate and return a resource pointer. + + :param vol_name: cinder volume display name + :param vol_size: size in gigabytes + :param img_id: optional glance image id + :param src_vol_id: optional source volume id to clone + :param snap_id: optional snapshot id to use + :returns: cinder volume pointer + """ + # Handle parameter input + if img_id and not src_vol_id and not snap_id: + self.log.debug('Creating cinder volume from glance image ' + '({})...'.format(img_id)) + bootable = 'true' + elif src_vol_id and not img_id and not snap_id: + self.log.debug('Cloning cinder volume...') + bootable = cinder.volumes.get(src_vol_id).bootable + elif snap_id and not src_vol_id and not img_id: + self.log.debug('Creating cinder volume from snapshot...') + snap = cinder.volume_snapshots.find(id=snap_id) + vol_size = snap.size + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id + bootable = cinder.volumes.get(snap_vol_id).bootable + elif not img_id and not src_vol_id and not snap_id: + self.log.debug('Creating cinder volume...') + bootable = 'false' + else: + msg = ('Invalid method use - name:{} size:{} img_id:{} ' + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, + img_id, src_vol_id, + snap_id)) + raise RuntimeError(msg) + + # Create new volume + try: + vol_new = cinder.volumes.create(display_name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except Exception as e: + msg = 'Failed to create volume: {}'.format(e) + raise RuntimeError(msg) + + # Wait for volume to reach available status ret = self.resource_reaches_status(cinder.volumes, vol_id, expected_stat="available", - msg="Create volume status wait") - if ret: - return vol_new + msg="Volume status wait") + if not ret: + msg = 'Cinder volume failed to reach expected state.' + raise RuntimeError(msg) + + # Re-validate new volume + self.log.debug('Validating volume attributes...') + val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_boot = cinder.volumes.get(vol_id).bootable + val_vol_stat = cinder.volumes.get(vol_id).status + val_vol_size = cinder.volumes.get(vol_id).size + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' + '{} size:{}'.format(val_vol_name, vol_id, + val_vol_stat, val_vol_boot, + val_vol_size)) + + if val_vol_boot == bootable and val_vol_stat == 'available' \ + and val_vol_name == vol_name and val_vol_size == vol_size: + self.log.debug(msg_attr) else: - self.log.error('Failed to create volume.') - return None + msg = ('Volume validation failed, {}'.format(msg_attr)) + raise RuntimeError(msg) + + return vol_new def delete_resource(self, resource, resource_id, msg="resource", max_wait=120): From 81e3abd3efb3b7b2855b9f20a84ee1f14fafcba9 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 23 Jun 2015 23:38:47 +0000 Subject: [PATCH 0731/2699] re-sync tests/charmhelpers --- .../contrib/openstack/amulet/utils.py | 177 ++++++++++++------ 1 file changed, 119 insertions(+), 58 deletions(-) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index bcdb4b3c..bba8458e 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -236,8 +236,17 @@ def authenticate_swift_user(self, keystone, user, password, tenant): auth_version='2.0') def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance.""" - self.log.debug('Creating glance image ({})...'.format(image_name)) + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :returns: glance image pointer + """ + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Download cirros image http_proxy = os.getenv('AMULET_HTTP_PROXY') self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: @@ -246,33 +255,51 @@ def create_cirros_image(self, glance, image_name): else: opener = urllib.FancyURLopener() - f = opener.open("http://download.cirros-cloud.net/version/released") + f = opener.open('http://download.cirros-cloud.net/version/released') version = f.read().strip() - cirros_img = "cirros-{}-x86_64-disk.img".format(version) + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) local_path = os.path.join('tests', cirros_img) if not os.path.exists(local_path): - cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', version, cirros_img) opener.retrieve(cirros_url, local_path) f.close() + # Create glance image with open(local_path) as f: image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) - count = 1 - status = image.status - while status != 'active' and count < 10: - time.sleep(3) - image = glance.images.get(image.id) - status = image.status - self.log.debug('image status: {}'.format(status)) - count += 1 - if status != 'active': - self.log.error('image creation timed out') - return None + # Wait for image to reach active status + img_id = image.id + ret = self.resource_reaches_status(glance.images, img_id, + expected_stat='active', + msg='Image status wait') + if not ret: + msg = 'Glance image failed to reach expected state.' + raise RuntimeError(msg) + + # Re-validate new image + self.log.debug('Validating image attributes...') + val_img_name = glance.images.get(img_id).name + val_img_stat = glance.images.get(img_id).status + val_img_pub = glance.images.get(img_id).is_public + val_img_cfmt = glance.images.get(img_id).container_format + val_img_dfmt = glance.images.get(img_id).disk_format + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' + 'container fmt:{} disk fmt:{}'.format( + val_img_name, val_img_pub, img_id, + val_img_stat, val_img_cfmt, val_img_dfmt)) + + if val_img_name == image_name and val_img_stat == 'active' \ + and val_img_pub is True and val_img_cfmt == 'bare' \ + and val_img_dfmt == 'qcow2': + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + raise RuntimeError(msg) return image @@ -283,22 +310,7 @@ def delete_image(self, glance, image): self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_image.') self.log.debug('Deleting glance image ({})...'.format(image)) - num_before = len(list(glance.images.list())) - glance.images.delete(image) - - count = 1 - num_after = len(list(glance.images.list())) - while num_after != (num_before - 1) and count < 10: - time.sleep(3) - num_after = len(list(glance.images.list())) - self.log.debug('number of images: {}'.format(num_after)) - count += 1 - - if num_after != (num_before - 1): - self.log.error('image deletion timed out') - return False - - return True + return self.delete_resource(glance.images, image, msg='glance image') def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" @@ -331,22 +343,7 @@ def delete_instance(self, nova, instance): self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_instance.') self.log.debug('Deleting instance ({})...'.format(instance)) - num_before = len(list(nova.servers.list())) - nova.servers.delete(instance) - - count = 1 - num_after = len(list(nova.servers.list())) - while num_after != (num_before - 1) and count < 10: - time.sleep(3) - num_after = len(list(nova.servers.list())) - self.log.debug('number of instances: {}'.format(num_after)) - count += 1 - - if num_after != (num_before - 1): - self.log.error('instance deletion timed out') - return False - - return True + return self.delete_resource(nova.servers, instance, msg='nova instance') def create_or_get_keypair(self, nova, keypair_name="testkey"): """Create a new keypair, or return pointer if it already exists.""" @@ -362,19 +359,83 @@ def create_or_get_keypair(self, nova, keypair_name="testkey"): _keypair = nova.keypairs.create(name=keypair_name) return _keypair - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1): - """Add and confirm a new volume, 1GB by default.""" - self.log.debug('Creating volume ({}|{}GB)'.format(vol_name, vol_size)) - vol_new = cinder.volumes.create(display_name=vol_name, size=1) - vol_id = vol_new.id + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, + img_id=None, src_vol_id=None, snap_id=None): + """Create cinder volume, optionally from a glance image, or + optionally as a clone of an existing volume, or optionally + from a snapshot. Wait for the new volume status to reach + the expected status, validate and return a resource pointer. + + :param vol_name: cinder volume display name + :param vol_size: size in gigabytes + :param img_id: optional glance image id + :param src_vol_id: optional source volume id to clone + :param snap_id: optional snapshot id to use + :returns: cinder volume pointer + """ + # Handle parameter input + if img_id and not src_vol_id and not snap_id: + self.log.debug('Creating cinder volume from glance image ' + '({})...'.format(img_id)) + bootable = 'true' + elif src_vol_id and not img_id and not snap_id: + self.log.debug('Cloning cinder volume...') + bootable = cinder.volumes.get(src_vol_id).bootable + elif snap_id and not src_vol_id and not img_id: + self.log.debug('Creating cinder volume from snapshot...') + snap = cinder.volume_snapshots.find(id=snap_id) + vol_size = snap.size + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id + bootable = cinder.volumes.get(snap_vol_id).bootable + elif not img_id and not src_vol_id and not snap_id: + self.log.debug('Creating cinder volume...') + bootable = 'false' + else: + msg = ('Invalid method use - name:{} size:{} img_id:{} ' + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, + img_id, src_vol_id, + snap_id)) + raise RuntimeError(msg) + + # Create new volume + try: + vol_new = cinder.volumes.create(display_name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except Exception as e: + msg = 'Failed to create volume: {}'.format(e) + raise RuntimeError(msg) + + # Wait for volume to reach available status ret = self.resource_reaches_status(cinder.volumes, vol_id, expected_stat="available", - msg="Create volume status wait") - if ret: - return vol_new + msg="Volume status wait") + if not ret: + msg = 'Cinder volume failed to reach expected state.' + raise RuntimeError(msg) + + # Re-validate new volume + self.log.debug('Validating volume attributes...') + val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_boot = cinder.volumes.get(vol_id).bootable + val_vol_stat = cinder.volumes.get(vol_id).status + val_vol_size = cinder.volumes.get(vol_id).size + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' + '{} size:{}'.format(val_vol_name, vol_id, + val_vol_stat, val_vol_boot, + val_vol_size)) + + if val_vol_boot == bootable and val_vol_stat == 'available' \ + and val_vol_name == vol_name and val_vol_size == vol_size: + self.log.debug(msg_attr) else: - self.log.error('Failed to create volume.') - return None + msg = ('Volume validation failed, {}'.format(msg_attr)) + raise RuntimeError(msg) + + return vol_new def delete_resource(self, resource, resource_id, msg="resource", max_wait=120): From 49a1a13d33c71439987a84246728c2a0e890fc22 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 23 Jun 2015 23:46:44 +0000 Subject: [PATCH 0732/2699] re-sync hooks/charmhelpers --- ceph-radosgw/Makefile | 2 +- .../charmhelpers/contrib/openstack/utils.py | 20 ++++++++++++------- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index ef348086..5b96e872 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -22,7 +22,7 @@ bin/charm_helpers_sync.py: sync: bin/charm_helpers_sync.py @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml - @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml +# @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml publish: lint bzr push lp:charms/ceph-radosgw diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 7c16fdbd..28532c98 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -555,6 +555,11 @@ def git_clone_and_install(projects_yaml, core_project, depth=1): pip_create_virtualenv(os.path.join(parent_dir, 'venv')) + # Upgrade setuptools from default virtualenv version. The default version + # in trusty breaks update.py in global requirements master branch. + pip_install('setuptools', upgrade=True, proxy=http_proxy, + venv=os.path.join(parent_dir, 'venv')) + for p in projects['repositories']: repo = p['repository'] branch = p['branch'] @@ -616,24 +621,24 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, else: repo_dir = dest_dir + venv = os.path.join(parent_dir, 'venv') + if update_requirements: if not requirements_dir: error_out('requirements repo must be cloned before ' 'updating from global requirements.') - _git_update_requirements(repo_dir, requirements_dir) + _git_update_requirements(venv, repo_dir, requirements_dir) juju_log('Installing git repo from dir: {}'.format(repo_dir)) if http_proxy: - pip_install(repo_dir, proxy=http_proxy, - venv=os.path.join(parent_dir, 'venv')) + pip_install(repo_dir, proxy=http_proxy, venv=venv) else: - pip_install(repo_dir, - venv=os.path.join(parent_dir, 'venv')) + pip_install(repo_dir, venv=venv) return repo_dir -def _git_update_requirements(package_dir, reqs_dir): +def _git_update_requirements(venv, package_dir, reqs_dir): """ Update from global requirements. @@ -642,7 +647,8 @@ def _git_update_requirements(package_dir, reqs_dir): """ orig_dir = os.getcwd() os.chdir(reqs_dir) - cmd = ['python', 'update.py', package_dir] + python = os.path.join(venv, 'bin/python') + cmd = [python, 'update.py', package_dir] try: subprocess.check_call(cmd) except subprocess.CalledProcessError: From 475dbd2b116bc078ca871e1de8dd8e1d94a520a0 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 25 Jun 2015 21:28:20 +0000 Subject: [PATCH 0733/2699] cleanup --- ceph-radosgw/Makefile | 2 +- ceph-radosgw/tests/tests.yaml | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 ceph-radosgw/tests/tests.yaml diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index 5b96e872..ef348086 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -22,7 +22,7 @@ bin/charm_helpers_sync.py: sync: bin/charm_helpers_sync.py @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml -# @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml publish: lint bzr push lp:charms/ceph-radosgw diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml new file mode 100644 index 00000000..8906788c --- /dev/null +++ b/ceph-radosgw/tests/tests.yaml @@ -0,0 +1,17 @@ +bootstrap: true +reset: true +virtualenv: true +makefile: + - lint + - test +sources: + - ppa:juju/stable +packages: + - amulet + - python-amulet + - python-cinderclient + - python-distro-info + - python-keystoneclient + - python-glanceclient + - python-novaclient + - python-swiftclient From a1c79823bbcb365ceccf9c1432e4665d97b93dd9 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 13:55:41 +0000 Subject: [PATCH 0734/2699] resync hooks/charmhelpers --- ceph-radosgw/Makefile | 2 +- .../charmhelpers/contrib/python/packages.py | 2 + .../hooks/charmhelpers/core/hookenv.py | 128 +++++++++++++----- .../hooks/charmhelpers/core/services/base.py | 21 +-- 4 files changed, 107 insertions(+), 46 deletions(-) diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index ef348086..5b96e872 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -22,7 +22,7 @@ bin/charm_helpers_sync.py: sync: bin/charm_helpers_sync.py @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml - @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml +# @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml publish: lint bzr push lp:charms/ceph-radosgw diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py index 07b0c1d7..10b32e33 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py @@ -36,6 +36,8 @@ def parse_options(given, available): """Given a set of options, check if available""" for key, value in sorted(given.items()): + if not value: + continue if key in available: yield "--{0}={1}".format(key, value) diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 117429fd..0add16d4 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -21,7 +21,9 @@ # Charm Helpers Developers from __future__ import print_function +from distutils.version import LooseVersion from functools import wraps +import glob import os import json import yaml @@ -242,29 +244,7 @@ def __init__(self, *args, **kw): self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() - - def __getitem__(self, key): - """For regular dict lookups, check the current juju config first, - then the previous (saved) copy. This ensures that user-saved values - will be returned by a dict lookup. - - """ - try: - return dict.__getitem__(self, key) - except KeyError: - return (self._prev_dict or {})[key] - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def keys(self): - prev_keys = [] - if self._prev_dict is not None: - prev_keys = self._prev_dict.keys() - return list(set(prev_keys + list(dict.keys(self)))) + atexit(self._implicit_save) def load_previous(self, path=None): """Load previous copy of config from disk. @@ -283,6 +263,9 @@ def load_previous(self, path=None): self.path = path or self.path with open(self.path) as f: self._prev_dict = json.load(f) + for k, v in self._prev_dict.items(): + if k not in self: + self[k] = v def changed(self, key): """Return True if the current value for this key is different from @@ -314,13 +297,13 @@ def save(self): instance. """ - if self._prev_dict: - for k, v in six.iteritems(self._prev_dict): - if k not in self: - self[k] = v with open(self.path, 'w') as f: json.dump(self, f) + def _implicit_save(self): + if self.implicit_save: + self.save() + @cached def config(scope=None): @@ -587,10 +570,14 @@ def config_changed(): hooks.execute(sys.argv) """ - def __init__(self, config_save=True): + def __init__(self, config_save=None): super(Hooks, self).__init__() self._hooks = {} - self._config_save = config_save + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save def register(self, name, function): """Register a hook""" @@ -598,13 +585,16 @@ def register(self, name, function): def execute(self, args): """Execute a registered hook based on args[0]""" + _run_atstart() hook_name = os.path.basename(args[0]) if hook_name in self._hooks: - self._hooks[hook_name]() - if self._config_save: - cfg = config() - if cfg.implicit_save: - cfg.save() + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() else: raise UnregisteredHookError(hook_name) @@ -732,13 +722,79 @@ def leader_get(attribute=None): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def leader_set(settings=None, **kwargs): """Juju leader set value(s)""" - log("Juju leader-set '%s'" % (settings), level=DEBUG) + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) cmd = ['leader-set'] settings = settings or {} settings.update(kwargs) - for k, v in settings.iteritems(): + for k, v in settings.items(): if v is None: cmd.append('{}='.format(k)) else: cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +@cached +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/base.py b/ceph-radosgw/hooks/charmhelpers/core/services/base.py index 98d344e1..a42660ca 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/base.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/base.py @@ -128,15 +128,18 @@ def manage(self): """ Handle the current hook by doing The Right Thing with the registered services. """ - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.reconfigure_services() - self.provide_data() - cfg = hookenv.config() - if cfg.implicit_save: - cfg.save() + hookenv._run_atstart() + try: + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.reconfigure_services() + self.provide_data() + except SystemExit as x: + if x.code is None or x.code == 0: + hookenv._run_atexit() + hookenv._run_atexit() def provide_data(self): """ From 3d98837ef678dc061ed776973a76873f142c391e Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 13:56:27 +0000 Subject: [PATCH 0735/2699] resync tests/charmhelpers --- ceph-radosgw/Makefile | 2 +- .../charmhelpers/contrib/amulet/utils.py | 84 +++++++++---------- .../contrib/openstack/amulet/deployment.py | 11 +-- .../contrib/openstack/amulet/utils.py | 52 ++++++------ 4 files changed, 73 insertions(+), 76 deletions(-) diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index 5b96e872..ef348086 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -22,7 +22,7 @@ bin/charm_helpers_sync.py: sync: bin/charm_helpers_sync.py @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml -# @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml publish: lint bzr push lp:charms/ceph-radosgw diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index c5fa1edc..3de26afd 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import amulet import ConfigParser import distro_info import io @@ -173,6 +174,11 @@ def validate_config_data(self, sentry_unit, config_file, section, Verify that the specified section of the config file contains the expected option key:value pairs. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. """ self.log.debug('Validating config file data ({} in {} on {})' '...'.format(section, config_file, @@ -195,20 +201,18 @@ def validate_config_data(self, sentry_unit, config_file, section, if actual != v: return "section [{}] {}:{} != expected {}:{}".format( section, k, actual, k, expected[k]) - else: - # handle not_null, valid_ip boolean comparison methods, etc. - if v(actual): - return None - else: - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual): + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + return None def _validate_dict_data(self, expected, actual): """Validate dictionary data. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluate a variable and returns a + longs, or can be a function that evaluates a variable and returns a bool. """ self.log.debug('actual: {}'.format(repr(actual))) @@ -219,8 +223,10 @@ def _validate_dict_data(self, expected, actual): if (isinstance(v, six.string_types) or isinstance(v, bool) or isinstance(v, six.integer_types)): + # handle explicit values if v != actual[k]: return "{}:{}".format(k, actual[k]) + # handle function pointers, such as not_null or valid_ip elif not v(actual[k]): return "{}:{}".format(k, actual[k]) else: @@ -435,15 +441,13 @@ def check_commands_on_units(self, commands, sentry_units): for cmd in commands: output, code = sentry_unit.run(cmd) if code == 0: - msg = ('{} `{}` returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - self.log.debug(msg) + self.log.debug('{} `{}` returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) else: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - return msg + return ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) return None def get_process_id_list(self, sentry_unit, process_name): @@ -460,7 +464,7 @@ def get_process_id_list(self, sentry_unit, process_name): msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return str(output).split() def get_unit_process_ids(self, unit_processes): @@ -481,47 +485,37 @@ def validate_unit_process_ids(self, expected, actual): self.log.debug('Actual PIDs: {}'.format(actual)) if len(actual) != len(expected): - msg = ('Unit count mismatch. expected, actual: {}, ' - '{} '.format(len(expected), len(actual))) - return msg + return ('Unit count mismatch. expected, actual: {}, ' + '{} '.format(len(expected), len(actual))) for (e_sentry, e_proc_names) in expected.iteritems(): e_sentry_name = e_sentry.info['unit_name'] if e_sentry in actual.keys(): a_proc_names = actual[e_sentry] else: - msg = ('Expected sentry ({}) not found in actual dict data.' - '{}'.format(e_sentry_name, e_sentry)) - return msg + return ('Expected sentry ({}) not found in actual dict data.' + '{}'.format(e_sentry_name, e_sentry)) if len(e_proc_names.keys()) != len(a_proc_names.keys()): - msg = ('Process name count mismatch. expected, actual: {}, ' - '{}'.format(len(expected), len(actual))) - return msg + return ('Process name count mismatch. expected, actual: {}, ' + '{}'.format(len(expected), len(actual))) for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ zip(e_proc_names.items(), a_proc_names.items()): if e_proc_name != a_proc_name: - msg = ('Process name mismatch. expected, actual: {}, ' - '{}'.format(e_proc_name, a_proc_name)) - return msg + return ('Process name mismatch. expected, actual: {}, ' + '{}'.format(e_proc_name, a_proc_name)) a_pids_length = len(a_pids) if e_pids_length != a_pids_length: - msg = ('PID count mismatch. {} ({}) expected, actual: {}, ' - '{} ({})'.format(e_sentry_name, - e_proc_name, - e_pids_length, - a_pids_length, - a_pids)) - return msg + return ('PID count mismatch. {} ({}) expected, actual: ' + '{}, {} ({})'.format(e_sentry_name, e_proc_name, + e_pids_length, a_pids_length, + a_pids)) else: - msg = ('PID check OK: {} {} {}: ' - '{}'.format(e_sentry_name, - e_proc_name, - e_pids_length, - a_pids)) - self.log.debug(msg) + self.log.debug('PID check OK: {} {} {}: ' + '{}'.format(e_sentry_name, e_proc_name, + e_pids_length, a_pids)) return None def validate_list_of_identical_dicts(self, list_of_dicts): @@ -532,10 +526,8 @@ def validate_list_of_identical_dicts(self, list_of_dicts): self.log.debug('Hashes: {}'.format(hashes)) if len(set(hashes)) == 1: - msg = 'Dicts within list are identical' - self.log.debug(msg) + self.log.debug('Dicts within list are identical') else: - msg = 'Dicts within list are not identical' - return msg + return 'Dicts within list are not identical' return None diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 84850bd3..b01e6cb8 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -79,9 +79,9 @@ def _add_services(self, this_service, other_services): services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Openstack subordinate charms do not expose an origin option as that - # is controlled by the principle - ignore = ['neutron-openvswitch'] + # Most OpenStack subordinate charms do not expose an origin option + # as that is controlled by the principle. + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] if self.openstack: for svc in services: @@ -150,8 +150,9 @@ def _get_openstack_release_string(self): return releases[self.series] def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools based on Ubuntu-OpenStack - release and whether ceph radosgw is flagged as present or not.""" + """Return a list of expected ceph pools in a ceph + cinder + glance + test scenario, based on OpenStack release and whether ceph radosgw + is flagged as present or not.""" if self._get_openstack_release() >= self.trusty_kilo: # Kilo or later diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index bba8458e..03f79277 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import amulet import json import logging import os @@ -177,6 +178,7 @@ def tenant_exists(self, keystone, tenant): def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" + # NOTE(beisner): cinder python client doesn't accept tokens. service_ip = \ keystone_sentry.relation('shared-db', 'mysql:shared-db')['private-address'] @@ -279,7 +281,7 @@ def create_cirros_image(self, glance, image_name): msg='Image status wait') if not ret: msg = 'Glance image failed to reach expected state.' - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Re-validate new image self.log.debug('Validating image attributes...') @@ -299,7 +301,7 @@ def create_cirros_image(self, glance, image_name): self.log.debug(msg_attr) else: msg = ('Volume validation failed, {}'.format(msg_attr)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return image @@ -343,7 +345,8 @@ def delete_instance(self, nova, instance): self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_instance.') self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, msg='nova instance') + return self.delete_resource(nova.servers, instance, + msg='nova instance') def create_or_get_keypair(self, nova, keypair_name="testkey"): """Create a new keypair, or return pointer if it already exists.""" @@ -361,8 +364,8 @@ def create_or_get_keypair(self, nova, keypair_name="testkey"): def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, or - optionally as a clone of an existing volume, or optionally + """Create cinder volume, optionally from a glance image, OR + optionally as a clone of an existing volume, OR optionally from a snapshot. Wait for the new volume status to reach the expected status, validate and return a resource pointer. @@ -373,29 +376,33 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, :param snap_id: optional snapshot id to use :returns: cinder volume pointer """ - # Handle parameter input + # Handle parameter input and avoid impossible combinations if img_id and not src_vol_id and not snap_id: - self.log.debug('Creating cinder volume from glance image ' - '({})...'.format(img_id)) + # Create volume from image + self.log.debug('Creating cinder volume from glance image...') bootable = 'true' elif src_vol_id and not img_id and not snap_id: + # Clone an existing volume self.log.debug('Cloning cinder volume...') bootable = cinder.volumes.get(src_vol_id).bootable elif snap_id and not src_vol_id and not img_id: + # Create volume from snapshot self.log.debug('Creating cinder volume from snapshot...') snap = cinder.volume_snapshots.find(id=snap_id) vol_size = snap.size snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id bootable = cinder.volumes.get(snap_vol_id).bootable elif not img_id and not src_vol_id and not snap_id: + # Create volume self.log.debug('Creating cinder volume...') bootable = 'false' else: + # Impossible combination of parameters msg = ('Invalid method use - name:{} size:{} img_id:{} ' 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, img_id, src_vol_id, snap_id)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Create new volume try: @@ -407,7 +414,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, vol_id = vol_new.id except Exception as e: msg = 'Failed to create volume: {}'.format(e) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Wait for volume to reach available status ret = self.resource_reaches_status(cinder.volumes, vol_id, @@ -415,7 +422,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, msg="Volume status wait") if not ret: msg = 'Cinder volume failed to reach expected state.' - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Re-validate new volume self.log.debug('Validating volume attributes...') @@ -433,7 +440,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, self.log.debug(msg_attr) else: msg = ('Volume validation failed, {}'.format(msg_attr)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return vol_new @@ -514,9 +521,9 @@ def resource_reaches_status(self, resource, resource_id, def get_ceph_osd_id_cmd(self, index): """Produce a shell command that will return a ceph-osd id.""" - cmd = ("`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}'" - " | grep -o '[0-9]*'`".format(index + 1)) - return cmd + return ("`initctl list | grep 'ceph-osd ' | " + "awk 'NR=={} {{ print $2 }}' | " + "grep -o '[0-9]*'`".format(index + 1)) def get_ceph_pools(self, sentry_unit): """Return a dict of ceph pools from a single ceph unit, with @@ -528,7 +535,7 @@ def get_ceph_pools(self, sentry_unit): msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, for pool in str(output).split(','): @@ -554,7 +561,7 @@ def get_ceph_df(self, sentry_unit): msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return json.loads(output) def get_ceph_pool_sample(self, sentry_unit, pool_id=0): @@ -571,10 +578,8 @@ def get_ceph_pool_sample(self, sentry_unit, pool_id=0): obj_count = df['pools'][pool_id]['stats']['objects'] kb_used = df['pools'][pool_id]['stats']['kb_used'] self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, - pool_id, - obj_count, - kb_used)) + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) return pool_name, obj_count, kb_used def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): @@ -591,9 +596,8 @@ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): original, created, deleted = range(3) if samples[created] <= samples[original] or \ samples[deleted] >= samples[created]: - msg = ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - return msg + return ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) else: self.log.debug('Ceph {} samples (OK): ' '{}'.format(sample_type, samples)) From dc49fcf4252480fcc87bf4bc2db36402bbc6cd23 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 14:23:18 +0000 Subject: [PATCH 0736/2699] resync hooks/charmhelpers --- ceph-osd/hooks/charmhelpers/core/hookenv.py | 128 +++++++++++++----- .../hooks/charmhelpers/core/services/base.py | 21 +-- 2 files changed, 104 insertions(+), 45 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 117429fd..0add16d4 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -21,7 +21,9 @@ # Charm Helpers Developers from __future__ import print_function +from distutils.version import LooseVersion from functools import wraps +import glob import os import json import yaml @@ -242,29 +244,7 @@ def __init__(self, *args, **kw): self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() - - def __getitem__(self, key): - """For regular dict lookups, check the current juju config first, - then the previous (saved) copy. This ensures that user-saved values - will be returned by a dict lookup. - - """ - try: - return dict.__getitem__(self, key) - except KeyError: - return (self._prev_dict or {})[key] - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def keys(self): - prev_keys = [] - if self._prev_dict is not None: - prev_keys = self._prev_dict.keys() - return list(set(prev_keys + list(dict.keys(self)))) + atexit(self._implicit_save) def load_previous(self, path=None): """Load previous copy of config from disk. @@ -283,6 +263,9 @@ def load_previous(self, path=None): self.path = path or self.path with open(self.path) as f: self._prev_dict = json.load(f) + for k, v in self._prev_dict.items(): + if k not in self: + self[k] = v def changed(self, key): """Return True if the current value for this key is different from @@ -314,13 +297,13 @@ def save(self): instance. """ - if self._prev_dict: - for k, v in six.iteritems(self._prev_dict): - if k not in self: - self[k] = v with open(self.path, 'w') as f: json.dump(self, f) + def _implicit_save(self): + if self.implicit_save: + self.save() + @cached def config(scope=None): @@ -587,10 +570,14 @@ def config_changed(): hooks.execute(sys.argv) """ - def __init__(self, config_save=True): + def __init__(self, config_save=None): super(Hooks, self).__init__() self._hooks = {} - self._config_save = config_save + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save def register(self, name, function): """Register a hook""" @@ -598,13 +585,16 @@ def register(self, name, function): def execute(self, args): """Execute a registered hook based on args[0]""" + _run_atstart() hook_name = os.path.basename(args[0]) if hook_name in self._hooks: - self._hooks[hook_name]() - if self._config_save: - cfg = config() - if cfg.implicit_save: - cfg.save() + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() else: raise UnregisteredHookError(hook_name) @@ -732,13 +722,79 @@ def leader_get(attribute=None): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def leader_set(settings=None, **kwargs): """Juju leader set value(s)""" - log("Juju leader-set '%s'" % (settings), level=DEBUG) + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) cmd = ['leader-set'] settings = settings or {} settings.update(kwargs) - for k, v in settings.iteritems(): + for k, v in settings.items(): if v is None: cmd.append('{}='.format(k)) else: cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +@cached +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] diff --git a/ceph-osd/hooks/charmhelpers/core/services/base.py b/ceph-osd/hooks/charmhelpers/core/services/base.py index 98d344e1..a42660ca 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/base.py +++ b/ceph-osd/hooks/charmhelpers/core/services/base.py @@ -128,15 +128,18 @@ def manage(self): """ Handle the current hook by doing The Right Thing with the registered services. """ - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.reconfigure_services() - self.provide_data() - cfg = hookenv.config() - if cfg.implicit_save: - cfg.save() + hookenv._run_atstart() + try: + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.reconfigure_services() + self.provide_data() + except SystemExit as x: + if x.code is None or x.code == 0: + hookenv._run_atexit() + hookenv._run_atexit() def provide_data(self): """ From aa9b0fc06d5a72f986415bbea1581bb1b151a2a2 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 14:24:05 +0000 Subject: [PATCH 0737/2699] resync tests/charmhelpers --- .../charmhelpers/contrib/amulet/utils.py | 84 +++++++++---------- .../contrib/openstack/amulet/deployment.py | 11 +-- .../contrib/openstack/amulet/utils.py | 52 ++++++------ 3 files changed, 72 insertions(+), 75 deletions(-) diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index c5fa1edc..3de26afd 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import amulet import ConfigParser import distro_info import io @@ -173,6 +174,11 @@ def validate_config_data(self, sentry_unit, config_file, section, Verify that the specified section of the config file contains the expected option key:value pairs. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. """ self.log.debug('Validating config file data ({} in {} on {})' '...'.format(section, config_file, @@ -195,20 +201,18 @@ def validate_config_data(self, sentry_unit, config_file, section, if actual != v: return "section [{}] {}:{} != expected {}:{}".format( section, k, actual, k, expected[k]) - else: - # handle not_null, valid_ip boolean comparison methods, etc. - if v(actual): - return None - else: - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual): + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + return None def _validate_dict_data(self, expected, actual): """Validate dictionary data. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluate a variable and returns a + longs, or can be a function that evaluates a variable and returns a bool. """ self.log.debug('actual: {}'.format(repr(actual))) @@ -219,8 +223,10 @@ def _validate_dict_data(self, expected, actual): if (isinstance(v, six.string_types) or isinstance(v, bool) or isinstance(v, six.integer_types)): + # handle explicit values if v != actual[k]: return "{}:{}".format(k, actual[k]) + # handle function pointers, such as not_null or valid_ip elif not v(actual[k]): return "{}:{}".format(k, actual[k]) else: @@ -435,15 +441,13 @@ def check_commands_on_units(self, commands, sentry_units): for cmd in commands: output, code = sentry_unit.run(cmd) if code == 0: - msg = ('{} `{}` returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - self.log.debug(msg) + self.log.debug('{} `{}` returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) else: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - return msg + return ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) return None def get_process_id_list(self, sentry_unit, process_name): @@ -460,7 +464,7 @@ def get_process_id_list(self, sentry_unit, process_name): msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return str(output).split() def get_unit_process_ids(self, unit_processes): @@ -481,47 +485,37 @@ def validate_unit_process_ids(self, expected, actual): self.log.debug('Actual PIDs: {}'.format(actual)) if len(actual) != len(expected): - msg = ('Unit count mismatch. expected, actual: {}, ' - '{} '.format(len(expected), len(actual))) - return msg + return ('Unit count mismatch. expected, actual: {}, ' + '{} '.format(len(expected), len(actual))) for (e_sentry, e_proc_names) in expected.iteritems(): e_sentry_name = e_sentry.info['unit_name'] if e_sentry in actual.keys(): a_proc_names = actual[e_sentry] else: - msg = ('Expected sentry ({}) not found in actual dict data.' - '{}'.format(e_sentry_name, e_sentry)) - return msg + return ('Expected sentry ({}) not found in actual dict data.' + '{}'.format(e_sentry_name, e_sentry)) if len(e_proc_names.keys()) != len(a_proc_names.keys()): - msg = ('Process name count mismatch. expected, actual: {}, ' - '{}'.format(len(expected), len(actual))) - return msg + return ('Process name count mismatch. expected, actual: {}, ' + '{}'.format(len(expected), len(actual))) for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ zip(e_proc_names.items(), a_proc_names.items()): if e_proc_name != a_proc_name: - msg = ('Process name mismatch. expected, actual: {}, ' - '{}'.format(e_proc_name, a_proc_name)) - return msg + return ('Process name mismatch. expected, actual: {}, ' + '{}'.format(e_proc_name, a_proc_name)) a_pids_length = len(a_pids) if e_pids_length != a_pids_length: - msg = ('PID count mismatch. {} ({}) expected, actual: {}, ' - '{} ({})'.format(e_sentry_name, - e_proc_name, - e_pids_length, - a_pids_length, - a_pids)) - return msg + return ('PID count mismatch. {} ({}) expected, actual: ' + '{}, {} ({})'.format(e_sentry_name, e_proc_name, + e_pids_length, a_pids_length, + a_pids)) else: - msg = ('PID check OK: {} {} {}: ' - '{}'.format(e_sentry_name, - e_proc_name, - e_pids_length, - a_pids)) - self.log.debug(msg) + self.log.debug('PID check OK: {} {} {}: ' + '{}'.format(e_sentry_name, e_proc_name, + e_pids_length, a_pids)) return None def validate_list_of_identical_dicts(self, list_of_dicts): @@ -532,10 +526,8 @@ def validate_list_of_identical_dicts(self, list_of_dicts): self.log.debug('Hashes: {}'.format(hashes)) if len(set(hashes)) == 1: - msg = 'Dicts within list are identical' - self.log.debug(msg) + self.log.debug('Dicts within list are identical') else: - msg = 'Dicts within list are not identical' - return msg + return 'Dicts within list are not identical' return None diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 84850bd3..b01e6cb8 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -79,9 +79,9 @@ def _add_services(self, this_service, other_services): services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Openstack subordinate charms do not expose an origin option as that - # is controlled by the principle - ignore = ['neutron-openvswitch'] + # Most OpenStack subordinate charms do not expose an origin option + # as that is controlled by the principle. + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] if self.openstack: for svc in services: @@ -150,8 +150,9 @@ def _get_openstack_release_string(self): return releases[self.series] def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools based on Ubuntu-OpenStack - release and whether ceph radosgw is flagged as present or not.""" + """Return a list of expected ceph pools in a ceph + cinder + glance + test scenario, based on OpenStack release and whether ceph radosgw + is flagged as present or not.""" if self._get_openstack_release() >= self.trusty_kilo: # Kilo or later diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index bba8458e..03f79277 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import amulet import json import logging import os @@ -177,6 +178,7 @@ def tenant_exists(self, keystone, tenant): def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" + # NOTE(beisner): cinder python client doesn't accept tokens. service_ip = \ keystone_sentry.relation('shared-db', 'mysql:shared-db')['private-address'] @@ -279,7 +281,7 @@ def create_cirros_image(self, glance, image_name): msg='Image status wait') if not ret: msg = 'Glance image failed to reach expected state.' - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Re-validate new image self.log.debug('Validating image attributes...') @@ -299,7 +301,7 @@ def create_cirros_image(self, glance, image_name): self.log.debug(msg_attr) else: msg = ('Volume validation failed, {}'.format(msg_attr)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return image @@ -343,7 +345,8 @@ def delete_instance(self, nova, instance): self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_instance.') self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, msg='nova instance') + return self.delete_resource(nova.servers, instance, + msg='nova instance') def create_or_get_keypair(self, nova, keypair_name="testkey"): """Create a new keypair, or return pointer if it already exists.""" @@ -361,8 +364,8 @@ def create_or_get_keypair(self, nova, keypair_name="testkey"): def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, or - optionally as a clone of an existing volume, or optionally + """Create cinder volume, optionally from a glance image, OR + optionally as a clone of an existing volume, OR optionally from a snapshot. Wait for the new volume status to reach the expected status, validate and return a resource pointer. @@ -373,29 +376,33 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, :param snap_id: optional snapshot id to use :returns: cinder volume pointer """ - # Handle parameter input + # Handle parameter input and avoid impossible combinations if img_id and not src_vol_id and not snap_id: - self.log.debug('Creating cinder volume from glance image ' - '({})...'.format(img_id)) + # Create volume from image + self.log.debug('Creating cinder volume from glance image...') bootable = 'true' elif src_vol_id and not img_id and not snap_id: + # Clone an existing volume self.log.debug('Cloning cinder volume...') bootable = cinder.volumes.get(src_vol_id).bootable elif snap_id and not src_vol_id and not img_id: + # Create volume from snapshot self.log.debug('Creating cinder volume from snapshot...') snap = cinder.volume_snapshots.find(id=snap_id) vol_size = snap.size snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id bootable = cinder.volumes.get(snap_vol_id).bootable elif not img_id and not src_vol_id and not snap_id: + # Create volume self.log.debug('Creating cinder volume...') bootable = 'false' else: + # Impossible combination of parameters msg = ('Invalid method use - name:{} size:{} img_id:{} ' 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, img_id, src_vol_id, snap_id)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Create new volume try: @@ -407,7 +414,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, vol_id = vol_new.id except Exception as e: msg = 'Failed to create volume: {}'.format(e) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Wait for volume to reach available status ret = self.resource_reaches_status(cinder.volumes, vol_id, @@ -415,7 +422,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, msg="Volume status wait") if not ret: msg = 'Cinder volume failed to reach expected state.' - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Re-validate new volume self.log.debug('Validating volume attributes...') @@ -433,7 +440,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, self.log.debug(msg_attr) else: msg = ('Volume validation failed, {}'.format(msg_attr)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return vol_new @@ -514,9 +521,9 @@ def resource_reaches_status(self, resource, resource_id, def get_ceph_osd_id_cmd(self, index): """Produce a shell command that will return a ceph-osd id.""" - cmd = ("`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}'" - " | grep -o '[0-9]*'`".format(index + 1)) - return cmd + return ("`initctl list | grep 'ceph-osd ' | " + "awk 'NR=={} {{ print $2 }}' | " + "grep -o '[0-9]*'`".format(index + 1)) def get_ceph_pools(self, sentry_unit): """Return a dict of ceph pools from a single ceph unit, with @@ -528,7 +535,7 @@ def get_ceph_pools(self, sentry_unit): msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, for pool in str(output).split(','): @@ -554,7 +561,7 @@ def get_ceph_df(self, sentry_unit): msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return json.loads(output) def get_ceph_pool_sample(self, sentry_unit, pool_id=0): @@ -571,10 +578,8 @@ def get_ceph_pool_sample(self, sentry_unit, pool_id=0): obj_count = df['pools'][pool_id]['stats']['objects'] kb_used = df['pools'][pool_id]['stats']['kb_used'] self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, - pool_id, - obj_count, - kb_used)) + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) return pool_name, obj_count, kb_used def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): @@ -591,9 +596,8 @@ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): original, created, deleted = range(3) if samples[created] <= samples[original] or \ samples[deleted] >= samples[created]: - msg = ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - return msg + return ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) else: self.log.debug('Ceph {} samples (OK): ' '{}'.format(sample_type, samples)) From cb2affd0a17b09409759445d6d41a0e12d67bd79 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 14:25:06 +0000 Subject: [PATCH 0738/2699] resync hooks/charmhelpers --- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 128 +++++++++++++----- .../hooks/charmhelpers/core/services/base.py | 21 +-- 2 files changed, 104 insertions(+), 45 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 117429fd..0add16d4 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -21,7 +21,9 @@ # Charm Helpers Developers from __future__ import print_function +from distutils.version import LooseVersion from functools import wraps +import glob import os import json import yaml @@ -242,29 +244,7 @@ def __init__(self, *args, **kw): self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() - - def __getitem__(self, key): - """For regular dict lookups, check the current juju config first, - then the previous (saved) copy. This ensures that user-saved values - will be returned by a dict lookup. - - """ - try: - return dict.__getitem__(self, key) - except KeyError: - return (self._prev_dict or {})[key] - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def keys(self): - prev_keys = [] - if self._prev_dict is not None: - prev_keys = self._prev_dict.keys() - return list(set(prev_keys + list(dict.keys(self)))) + atexit(self._implicit_save) def load_previous(self, path=None): """Load previous copy of config from disk. @@ -283,6 +263,9 @@ def load_previous(self, path=None): self.path = path or self.path with open(self.path) as f: self._prev_dict = json.load(f) + for k, v in self._prev_dict.items(): + if k not in self: + self[k] = v def changed(self, key): """Return True if the current value for this key is different from @@ -314,13 +297,13 @@ def save(self): instance. """ - if self._prev_dict: - for k, v in six.iteritems(self._prev_dict): - if k not in self: - self[k] = v with open(self.path, 'w') as f: json.dump(self, f) + def _implicit_save(self): + if self.implicit_save: + self.save() + @cached def config(scope=None): @@ -587,10 +570,14 @@ def config_changed(): hooks.execute(sys.argv) """ - def __init__(self, config_save=True): + def __init__(self, config_save=None): super(Hooks, self).__init__() self._hooks = {} - self._config_save = config_save + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save def register(self, name, function): """Register a hook""" @@ -598,13 +585,16 @@ def register(self, name, function): def execute(self, args): """Execute a registered hook based on args[0]""" + _run_atstart() hook_name = os.path.basename(args[0]) if hook_name in self._hooks: - self._hooks[hook_name]() - if self._config_save: - cfg = config() - if cfg.implicit_save: - cfg.save() + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() else: raise UnregisteredHookError(hook_name) @@ -732,13 +722,79 @@ def leader_get(attribute=None): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def leader_set(settings=None, **kwargs): """Juju leader set value(s)""" - log("Juju leader-set '%s'" % (settings), level=DEBUG) + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) cmd = ['leader-set'] settings = settings or {} settings.update(kwargs) - for k, v in settings.iteritems(): + for k, v in settings.items(): if v is None: cmd.append('{}='.format(k)) else: cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +@cached +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] diff --git a/ceph-proxy/hooks/charmhelpers/core/services/base.py b/ceph-proxy/hooks/charmhelpers/core/services/base.py index 98d344e1..a42660ca 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/base.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/base.py @@ -128,15 +128,18 @@ def manage(self): """ Handle the current hook by doing The Right Thing with the registered services. """ - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.reconfigure_services() - self.provide_data() - cfg = hookenv.config() - if cfg.implicit_save: - cfg.save() + hookenv._run_atstart() + try: + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.reconfigure_services() + self.provide_data() + except SystemExit as x: + if x.code is None or x.code == 0: + hookenv._run_atexit() + hookenv._run_atexit() def provide_data(self): """ From f45504d0740579ff159100397e5d5bd5e09ee3aa Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 14:25:06 +0000 Subject: [PATCH 0739/2699] resync hooks/charmhelpers --- ceph-mon/hooks/charmhelpers/core/hookenv.py | 128 +++++++++++++----- .../hooks/charmhelpers/core/services/base.py | 21 +-- 2 files changed, 104 insertions(+), 45 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 117429fd..0add16d4 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -21,7 +21,9 @@ # Charm Helpers Developers from __future__ import print_function +from distutils.version import LooseVersion from functools import wraps +import glob import os import json import yaml @@ -242,29 +244,7 @@ def __init__(self, *args, **kw): self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() - - def __getitem__(self, key): - """For regular dict lookups, check the current juju config first, - then the previous (saved) copy. This ensures that user-saved values - will be returned by a dict lookup. - - """ - try: - return dict.__getitem__(self, key) - except KeyError: - return (self._prev_dict or {})[key] - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def keys(self): - prev_keys = [] - if self._prev_dict is not None: - prev_keys = self._prev_dict.keys() - return list(set(prev_keys + list(dict.keys(self)))) + atexit(self._implicit_save) def load_previous(self, path=None): """Load previous copy of config from disk. @@ -283,6 +263,9 @@ def load_previous(self, path=None): self.path = path or self.path with open(self.path) as f: self._prev_dict = json.load(f) + for k, v in self._prev_dict.items(): + if k not in self: + self[k] = v def changed(self, key): """Return True if the current value for this key is different from @@ -314,13 +297,13 @@ def save(self): instance. """ - if self._prev_dict: - for k, v in six.iteritems(self._prev_dict): - if k not in self: - self[k] = v with open(self.path, 'w') as f: json.dump(self, f) + def _implicit_save(self): + if self.implicit_save: + self.save() + @cached def config(scope=None): @@ -587,10 +570,14 @@ def config_changed(): hooks.execute(sys.argv) """ - def __init__(self, config_save=True): + def __init__(self, config_save=None): super(Hooks, self).__init__() self._hooks = {} - self._config_save = config_save + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save def register(self, name, function): """Register a hook""" @@ -598,13 +585,16 @@ def register(self, name, function): def execute(self, args): """Execute a registered hook based on args[0]""" + _run_atstart() hook_name = os.path.basename(args[0]) if hook_name in self._hooks: - self._hooks[hook_name]() - if self._config_save: - cfg = config() - if cfg.implicit_save: - cfg.save() + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() else: raise UnregisteredHookError(hook_name) @@ -732,13 +722,79 @@ def leader_get(attribute=None): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def leader_set(settings=None, **kwargs): """Juju leader set value(s)""" - log("Juju leader-set '%s'" % (settings), level=DEBUG) + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) cmd = ['leader-set'] settings = settings or {} settings.update(kwargs) - for k, v in settings.iteritems(): + for k, v in settings.items(): if v is None: cmd.append('{}='.format(k)) else: cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +@cached +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] diff --git a/ceph-mon/hooks/charmhelpers/core/services/base.py b/ceph-mon/hooks/charmhelpers/core/services/base.py index 98d344e1..a42660ca 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/base.py +++ b/ceph-mon/hooks/charmhelpers/core/services/base.py @@ -128,15 +128,18 @@ def manage(self): """ Handle the current hook by doing The Right Thing with the registered services. """ - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.reconfigure_services() - self.provide_data() - cfg = hookenv.config() - if cfg.implicit_save: - cfg.save() + hookenv._run_atstart() + try: + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.reconfigure_services() + self.provide_data() + except SystemExit as x: + if x.code is None or x.code == 0: + hookenv._run_atexit() + hookenv._run_atexit() def provide_data(self): """ From eeb323e6dcaabb7f08a7576f5b6ea1ecd4991d15 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 14:25:54 +0000 Subject: [PATCH 0740/2699] resync tests/charmhelpers --- .../charmhelpers/contrib/amulet/utils.py | 84 +++++++++---------- .../contrib/openstack/amulet/deployment.py | 11 +-- .../contrib/openstack/amulet/utils.py | 52 ++++++------ 3 files changed, 72 insertions(+), 75 deletions(-) diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index c5fa1edc..3de26afd 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import amulet import ConfigParser import distro_info import io @@ -173,6 +174,11 @@ def validate_config_data(self, sentry_unit, config_file, section, Verify that the specified section of the config file contains the expected option key:value pairs. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. """ self.log.debug('Validating config file data ({} in {} on {})' '...'.format(section, config_file, @@ -195,20 +201,18 @@ def validate_config_data(self, sentry_unit, config_file, section, if actual != v: return "section [{}] {}:{} != expected {}:{}".format( section, k, actual, k, expected[k]) - else: - # handle not_null, valid_ip boolean comparison methods, etc. - if v(actual): - return None - else: - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual): + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + return None def _validate_dict_data(self, expected, actual): """Validate dictionary data. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluate a variable and returns a + longs, or can be a function that evaluates a variable and returns a bool. """ self.log.debug('actual: {}'.format(repr(actual))) @@ -219,8 +223,10 @@ def _validate_dict_data(self, expected, actual): if (isinstance(v, six.string_types) or isinstance(v, bool) or isinstance(v, six.integer_types)): + # handle explicit values if v != actual[k]: return "{}:{}".format(k, actual[k]) + # handle function pointers, such as not_null or valid_ip elif not v(actual[k]): return "{}:{}".format(k, actual[k]) else: @@ -435,15 +441,13 @@ def check_commands_on_units(self, commands, sentry_units): for cmd in commands: output, code = sentry_unit.run(cmd) if code == 0: - msg = ('{} `{}` returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - self.log.debug(msg) + self.log.debug('{} `{}` returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) else: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - return msg + return ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) return None def get_process_id_list(self, sentry_unit, process_name): @@ -460,7 +464,7 @@ def get_process_id_list(self, sentry_unit, process_name): msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return str(output).split() def get_unit_process_ids(self, unit_processes): @@ -481,47 +485,37 @@ def validate_unit_process_ids(self, expected, actual): self.log.debug('Actual PIDs: {}'.format(actual)) if len(actual) != len(expected): - msg = ('Unit count mismatch. expected, actual: {}, ' - '{} '.format(len(expected), len(actual))) - return msg + return ('Unit count mismatch. expected, actual: {}, ' + '{} '.format(len(expected), len(actual))) for (e_sentry, e_proc_names) in expected.iteritems(): e_sentry_name = e_sentry.info['unit_name'] if e_sentry in actual.keys(): a_proc_names = actual[e_sentry] else: - msg = ('Expected sentry ({}) not found in actual dict data.' - '{}'.format(e_sentry_name, e_sentry)) - return msg + return ('Expected sentry ({}) not found in actual dict data.' + '{}'.format(e_sentry_name, e_sentry)) if len(e_proc_names.keys()) != len(a_proc_names.keys()): - msg = ('Process name count mismatch. expected, actual: {}, ' - '{}'.format(len(expected), len(actual))) - return msg + return ('Process name count mismatch. expected, actual: {}, ' + '{}'.format(len(expected), len(actual))) for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ zip(e_proc_names.items(), a_proc_names.items()): if e_proc_name != a_proc_name: - msg = ('Process name mismatch. expected, actual: {}, ' - '{}'.format(e_proc_name, a_proc_name)) - return msg + return ('Process name mismatch. expected, actual: {}, ' + '{}'.format(e_proc_name, a_proc_name)) a_pids_length = len(a_pids) if e_pids_length != a_pids_length: - msg = ('PID count mismatch. {} ({}) expected, actual: {}, ' - '{} ({})'.format(e_sentry_name, - e_proc_name, - e_pids_length, - a_pids_length, - a_pids)) - return msg + return ('PID count mismatch. {} ({}) expected, actual: ' + '{}, {} ({})'.format(e_sentry_name, e_proc_name, + e_pids_length, a_pids_length, + a_pids)) else: - msg = ('PID check OK: {} {} {}: ' - '{}'.format(e_sentry_name, - e_proc_name, - e_pids_length, - a_pids)) - self.log.debug(msg) + self.log.debug('PID check OK: {} {} {}: ' + '{}'.format(e_sentry_name, e_proc_name, + e_pids_length, a_pids)) return None def validate_list_of_identical_dicts(self, list_of_dicts): @@ -532,10 +526,8 @@ def validate_list_of_identical_dicts(self, list_of_dicts): self.log.debug('Hashes: {}'.format(hashes)) if len(set(hashes)) == 1: - msg = 'Dicts within list are identical' - self.log.debug(msg) + self.log.debug('Dicts within list are identical') else: - msg = 'Dicts within list are not identical' - return msg + return 'Dicts within list are not identical' return None diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 84850bd3..b01e6cb8 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -79,9 +79,9 @@ def _add_services(self, this_service, other_services): services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Openstack subordinate charms do not expose an origin option as that - # is controlled by the principle - ignore = ['neutron-openvswitch'] + # Most OpenStack subordinate charms do not expose an origin option + # as that is controlled by the principle. + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] if self.openstack: for svc in services: @@ -150,8 +150,9 @@ def _get_openstack_release_string(self): return releases[self.series] def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools based on Ubuntu-OpenStack - release and whether ceph radosgw is flagged as present or not.""" + """Return a list of expected ceph pools in a ceph + cinder + glance + test scenario, based on OpenStack release and whether ceph radosgw + is flagged as present or not.""" if self._get_openstack_release() >= self.trusty_kilo: # Kilo or later diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index bba8458e..03f79277 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import amulet import json import logging import os @@ -177,6 +178,7 @@ def tenant_exists(self, keystone, tenant): def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" + # NOTE(beisner): cinder python client doesn't accept tokens. service_ip = \ keystone_sentry.relation('shared-db', 'mysql:shared-db')['private-address'] @@ -279,7 +281,7 @@ def create_cirros_image(self, glance, image_name): msg='Image status wait') if not ret: msg = 'Glance image failed to reach expected state.' - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Re-validate new image self.log.debug('Validating image attributes...') @@ -299,7 +301,7 @@ def create_cirros_image(self, glance, image_name): self.log.debug(msg_attr) else: msg = ('Volume validation failed, {}'.format(msg_attr)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return image @@ -343,7 +345,8 @@ def delete_instance(self, nova, instance): self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_instance.') self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, msg='nova instance') + return self.delete_resource(nova.servers, instance, + msg='nova instance') def create_or_get_keypair(self, nova, keypair_name="testkey"): """Create a new keypair, or return pointer if it already exists.""" @@ -361,8 +364,8 @@ def create_or_get_keypair(self, nova, keypair_name="testkey"): def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, or - optionally as a clone of an existing volume, or optionally + """Create cinder volume, optionally from a glance image, OR + optionally as a clone of an existing volume, OR optionally from a snapshot. Wait for the new volume status to reach the expected status, validate and return a resource pointer. @@ -373,29 +376,33 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, :param snap_id: optional snapshot id to use :returns: cinder volume pointer """ - # Handle parameter input + # Handle parameter input and avoid impossible combinations if img_id and not src_vol_id and not snap_id: - self.log.debug('Creating cinder volume from glance image ' - '({})...'.format(img_id)) + # Create volume from image + self.log.debug('Creating cinder volume from glance image...') bootable = 'true' elif src_vol_id and not img_id and not snap_id: + # Clone an existing volume self.log.debug('Cloning cinder volume...') bootable = cinder.volumes.get(src_vol_id).bootable elif snap_id and not src_vol_id and not img_id: + # Create volume from snapshot self.log.debug('Creating cinder volume from snapshot...') snap = cinder.volume_snapshots.find(id=snap_id) vol_size = snap.size snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id bootable = cinder.volumes.get(snap_vol_id).bootable elif not img_id and not src_vol_id and not snap_id: + # Create volume self.log.debug('Creating cinder volume...') bootable = 'false' else: + # Impossible combination of parameters msg = ('Invalid method use - name:{} size:{} img_id:{} ' 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, img_id, src_vol_id, snap_id)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Create new volume try: @@ -407,7 +414,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, vol_id = vol_new.id except Exception as e: msg = 'Failed to create volume: {}'.format(e) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Wait for volume to reach available status ret = self.resource_reaches_status(cinder.volumes, vol_id, @@ -415,7 +422,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, msg="Volume status wait") if not ret: msg = 'Cinder volume failed to reach expected state.' - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Re-validate new volume self.log.debug('Validating volume attributes...') @@ -433,7 +440,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, self.log.debug(msg_attr) else: msg = ('Volume validation failed, {}'.format(msg_attr)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return vol_new @@ -514,9 +521,9 @@ def resource_reaches_status(self, resource, resource_id, def get_ceph_osd_id_cmd(self, index): """Produce a shell command that will return a ceph-osd id.""" - cmd = ("`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}'" - " | grep -o '[0-9]*'`".format(index + 1)) - return cmd + return ("`initctl list | grep 'ceph-osd ' | " + "awk 'NR=={} {{ print $2 }}' | " + "grep -o '[0-9]*'`".format(index + 1)) def get_ceph_pools(self, sentry_unit): """Return a dict of ceph pools from a single ceph unit, with @@ -528,7 +535,7 @@ def get_ceph_pools(self, sentry_unit): msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, for pool in str(output).split(','): @@ -554,7 +561,7 @@ def get_ceph_df(self, sentry_unit): msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return json.loads(output) def get_ceph_pool_sample(self, sentry_unit, pool_id=0): @@ -571,10 +578,8 @@ def get_ceph_pool_sample(self, sentry_unit, pool_id=0): obj_count = df['pools'][pool_id]['stats']['objects'] kb_used = df['pools'][pool_id]['stats']['kb_used'] self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, - pool_id, - obj_count, - kb_used)) + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) return pool_name, obj_count, kb_used def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): @@ -591,9 +596,8 @@ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): original, created, deleted = range(3) if samples[created] <= samples[original] or \ samples[deleted] >= samples[created]: - msg = ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - return msg + return ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) else: self.log.debug('Ceph {} samples (OK): ' '{}'.format(sample_type, samples)) From 6b778811a75d710fe3d062743a556c2a0477e707 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 14:25:54 +0000 Subject: [PATCH 0741/2699] resync tests/charmhelpers --- .../charmhelpers/contrib/amulet/utils.py | 84 +++++++++---------- .../contrib/openstack/amulet/deployment.py | 11 +-- .../contrib/openstack/amulet/utils.py | 52 ++++++------ 3 files changed, 72 insertions(+), 75 deletions(-) diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index c5fa1edc..3de26afd 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import amulet import ConfigParser import distro_info import io @@ -173,6 +174,11 @@ def validate_config_data(self, sentry_unit, config_file, section, Verify that the specified section of the config file contains the expected option key:value pairs. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. """ self.log.debug('Validating config file data ({} in {} on {})' '...'.format(section, config_file, @@ -195,20 +201,18 @@ def validate_config_data(self, sentry_unit, config_file, section, if actual != v: return "section [{}] {}:{} != expected {}:{}".format( section, k, actual, k, expected[k]) - else: - # handle not_null, valid_ip boolean comparison methods, etc. - if v(actual): - return None - else: - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual): + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + return None def _validate_dict_data(self, expected, actual): """Validate dictionary data. Compare expected dictionary data vs actual dictionary data. The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluate a variable and returns a + longs, or can be a function that evaluates a variable and returns a bool. """ self.log.debug('actual: {}'.format(repr(actual))) @@ -219,8 +223,10 @@ def _validate_dict_data(self, expected, actual): if (isinstance(v, six.string_types) or isinstance(v, bool) or isinstance(v, six.integer_types)): + # handle explicit values if v != actual[k]: return "{}:{}".format(k, actual[k]) + # handle function pointers, such as not_null or valid_ip elif not v(actual[k]): return "{}:{}".format(k, actual[k]) else: @@ -435,15 +441,13 @@ def check_commands_on_units(self, commands, sentry_units): for cmd in commands: output, code = sentry_unit.run(cmd) if code == 0: - msg = ('{} `{}` returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - self.log.debug(msg) + self.log.debug('{} `{}` returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) else: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - return msg + return ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) return None def get_process_id_list(self, sentry_unit, process_name): @@ -460,7 +464,7 @@ def get_process_id_list(self, sentry_unit, process_name): msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return str(output).split() def get_unit_process_ids(self, unit_processes): @@ -481,47 +485,37 @@ def validate_unit_process_ids(self, expected, actual): self.log.debug('Actual PIDs: {}'.format(actual)) if len(actual) != len(expected): - msg = ('Unit count mismatch. expected, actual: {}, ' - '{} '.format(len(expected), len(actual))) - return msg + return ('Unit count mismatch. expected, actual: {}, ' + '{} '.format(len(expected), len(actual))) for (e_sentry, e_proc_names) in expected.iteritems(): e_sentry_name = e_sentry.info['unit_name'] if e_sentry in actual.keys(): a_proc_names = actual[e_sentry] else: - msg = ('Expected sentry ({}) not found in actual dict data.' - '{}'.format(e_sentry_name, e_sentry)) - return msg + return ('Expected sentry ({}) not found in actual dict data.' + '{}'.format(e_sentry_name, e_sentry)) if len(e_proc_names.keys()) != len(a_proc_names.keys()): - msg = ('Process name count mismatch. expected, actual: {}, ' - '{}'.format(len(expected), len(actual))) - return msg + return ('Process name count mismatch. expected, actual: {}, ' + '{}'.format(len(expected), len(actual))) for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ zip(e_proc_names.items(), a_proc_names.items()): if e_proc_name != a_proc_name: - msg = ('Process name mismatch. expected, actual: {}, ' - '{}'.format(e_proc_name, a_proc_name)) - return msg + return ('Process name mismatch. expected, actual: {}, ' + '{}'.format(e_proc_name, a_proc_name)) a_pids_length = len(a_pids) if e_pids_length != a_pids_length: - msg = ('PID count mismatch. {} ({}) expected, actual: {}, ' - '{} ({})'.format(e_sentry_name, - e_proc_name, - e_pids_length, - a_pids_length, - a_pids)) - return msg + return ('PID count mismatch. {} ({}) expected, actual: ' + '{}, {} ({})'.format(e_sentry_name, e_proc_name, + e_pids_length, a_pids_length, + a_pids)) else: - msg = ('PID check OK: {} {} {}: ' - '{}'.format(e_sentry_name, - e_proc_name, - e_pids_length, - a_pids)) - self.log.debug(msg) + self.log.debug('PID check OK: {} {} {}: ' + '{}'.format(e_sentry_name, e_proc_name, + e_pids_length, a_pids)) return None def validate_list_of_identical_dicts(self, list_of_dicts): @@ -532,10 +526,8 @@ def validate_list_of_identical_dicts(self, list_of_dicts): self.log.debug('Hashes: {}'.format(hashes)) if len(set(hashes)) == 1: - msg = 'Dicts within list are identical' - self.log.debug(msg) + self.log.debug('Dicts within list are identical') else: - msg = 'Dicts within list are not identical' - return msg + return 'Dicts within list are not identical' return None diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 84850bd3..b01e6cb8 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -79,9 +79,9 @@ def _add_services(self, this_service, other_services): services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Openstack subordinate charms do not expose an origin option as that - # is controlled by the principle - ignore = ['neutron-openvswitch'] + # Most OpenStack subordinate charms do not expose an origin option + # as that is controlled by the principle. + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] if self.openstack: for svc in services: @@ -150,8 +150,9 @@ def _get_openstack_release_string(self): return releases[self.series] def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools based on Ubuntu-OpenStack - release and whether ceph radosgw is flagged as present or not.""" + """Return a list of expected ceph pools in a ceph + cinder + glance + test scenario, based on OpenStack release and whether ceph radosgw + is flagged as present or not.""" if self._get_openstack_release() >= self.trusty_kilo: # Kilo or later diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index bba8458e..03f79277 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import amulet import json import logging import os @@ -177,6 +178,7 @@ def tenant_exists(self, keystone, tenant): def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" + # NOTE(beisner): cinder python client doesn't accept tokens. service_ip = \ keystone_sentry.relation('shared-db', 'mysql:shared-db')['private-address'] @@ -279,7 +281,7 @@ def create_cirros_image(self, glance, image_name): msg='Image status wait') if not ret: msg = 'Glance image failed to reach expected state.' - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Re-validate new image self.log.debug('Validating image attributes...') @@ -299,7 +301,7 @@ def create_cirros_image(self, glance, image_name): self.log.debug(msg_attr) else: msg = ('Volume validation failed, {}'.format(msg_attr)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return image @@ -343,7 +345,8 @@ def delete_instance(self, nova, instance): self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_instance.') self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, msg='nova instance') + return self.delete_resource(nova.servers, instance, + msg='nova instance') def create_or_get_keypair(self, nova, keypair_name="testkey"): """Create a new keypair, or return pointer if it already exists.""" @@ -361,8 +364,8 @@ def create_or_get_keypair(self, nova, keypair_name="testkey"): def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, or - optionally as a clone of an existing volume, or optionally + """Create cinder volume, optionally from a glance image, OR + optionally as a clone of an existing volume, OR optionally from a snapshot. Wait for the new volume status to reach the expected status, validate and return a resource pointer. @@ -373,29 +376,33 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, :param snap_id: optional snapshot id to use :returns: cinder volume pointer """ - # Handle parameter input + # Handle parameter input and avoid impossible combinations if img_id and not src_vol_id and not snap_id: - self.log.debug('Creating cinder volume from glance image ' - '({})...'.format(img_id)) + # Create volume from image + self.log.debug('Creating cinder volume from glance image...') bootable = 'true' elif src_vol_id and not img_id and not snap_id: + # Clone an existing volume self.log.debug('Cloning cinder volume...') bootable = cinder.volumes.get(src_vol_id).bootable elif snap_id and not src_vol_id and not img_id: + # Create volume from snapshot self.log.debug('Creating cinder volume from snapshot...') snap = cinder.volume_snapshots.find(id=snap_id) vol_size = snap.size snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id bootable = cinder.volumes.get(snap_vol_id).bootable elif not img_id and not src_vol_id and not snap_id: + # Create volume self.log.debug('Creating cinder volume...') bootable = 'false' else: + # Impossible combination of parameters msg = ('Invalid method use - name:{} size:{} img_id:{} ' 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, img_id, src_vol_id, snap_id)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Create new volume try: @@ -407,7 +414,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, vol_id = vol_new.id except Exception as e: msg = 'Failed to create volume: {}'.format(e) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Wait for volume to reach available status ret = self.resource_reaches_status(cinder.volumes, vol_id, @@ -415,7 +422,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, msg="Volume status wait") if not ret: msg = 'Cinder volume failed to reach expected state.' - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Re-validate new volume self.log.debug('Validating volume attributes...') @@ -433,7 +440,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, self.log.debug(msg_attr) else: msg = ('Volume validation failed, {}'.format(msg_attr)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return vol_new @@ -514,9 +521,9 @@ def resource_reaches_status(self, resource, resource_id, def get_ceph_osd_id_cmd(self, index): """Produce a shell command that will return a ceph-osd id.""" - cmd = ("`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}'" - " | grep -o '[0-9]*'`".format(index + 1)) - return cmd + return ("`initctl list | grep 'ceph-osd ' | " + "awk 'NR=={} {{ print $2 }}' | " + "grep -o '[0-9]*'`".format(index + 1)) def get_ceph_pools(self, sentry_unit): """Return a dict of ceph pools from a single ceph unit, with @@ -528,7 +535,7 @@ def get_ceph_pools(self, sentry_unit): msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, for pool in str(output).split(','): @@ -554,7 +561,7 @@ def get_ceph_df(self, sentry_unit): msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) - raise RuntimeError(msg) + amulet.raise_status(amulet.FAIL, msg=msg) return json.loads(output) def get_ceph_pool_sample(self, sentry_unit, pool_id=0): @@ -571,10 +578,8 @@ def get_ceph_pool_sample(self, sentry_unit, pool_id=0): obj_count = df['pools'][pool_id]['stats']['objects'] kb_used = df['pools'][pool_id]['stats']['kb_used'] self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, - pool_id, - obj_count, - kb_used)) + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) return pool_name, obj_count, kb_used def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): @@ -591,9 +596,8 @@ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): original, created, deleted = range(3) if samples[created] <= samples[original] or \ samples[deleted] >= samples[created]: - msg = ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - return msg + return ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) else: self.log.debug('Ceph {} samples (OK): ' '{}'.format(sample_type, samples)) From 60816b19dd2490cf2553574d8d36f4542d043c86 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 18:36:46 +0000 Subject: [PATCH 0742/2699] Update publish target in makefile; update 00-setup and tests.yaml for dependencies. --- ceph-radosgw/Makefile | 2 +- ceph-radosgw/tests/00-setup | 5 +++-- ceph-radosgw/tests/tests.yaml | 3 ++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index ef348086..edf686fd 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -24,6 +24,6 @@ sync: bin/charm_helpers_sync.py @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml -publish: lint +publish: lint test bzr push lp:charms/ceph-radosgw bzr push lp:charms/trusty/ceph-radosgw diff --git a/ceph-radosgw/tests/00-setup b/ceph-radosgw/tests/00-setup index d6a3c75a..ee5f0332 100755 --- a/ceph-radosgw/tests/00-setup +++ b/ceph-radosgw/tests/00-setup @@ -7,7 +7,8 @@ sudo apt-get update --yes sudo apt-get install --yes python-amulet \ python-cinderclient \ python-distro-info \ - python-keystoneclient \ python-glanceclient \ - python-novaclient \ + python-heatclient \ + python-keystoneclient \ + python-novaclient python-swiftclient diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 8906788c..db533a1e 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -11,7 +11,8 @@ packages: - python-amulet - python-cinderclient - python-distro-info - - python-keystoneclient - python-glanceclient + - python-heatclient + - python-keystoneclient - python-novaclient - python-swiftclient From 8adcfea194cc3c9c6c3d96b8e5df928b4618bbea Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 18:38:42 +0000 Subject: [PATCH 0743/2699] Update publish target in makefile; update 00-setup and tests.yaml for dependencies. --- ceph-osd/.coverage | 3 ++- ceph-osd/Makefile | 2 +- ceph-osd/tests/00-setup | 6 ++++-- ceph-osd/tests/tests.yaml | 6 ++++-- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/ceph-osd/.coverage b/ceph-osd/.coverage index d3596333..de6e074f 100644 --- a/ceph-osd/.coverage +++ b/ceph-osd/.coverage @@ -1 +1,2 @@ -€}q(U collectorqUcoverage v3.7.1qUlinesq}q(UF/home/jamespage/src/charms/next-resync/ceph-osd/unit_tests/__init__.pyq]qKaUQ/home/jamespage/src/charms/landing-beisner-resync/ceph-osd/unit_tests/__init__.pyq]q Kauu. \ No newline at end of file +€}q(U collectorqUcoverage v3.7.1qUlinesq}q(UF/home/jamespage/src/charms/next-resync/ceph-osd/unit_tests/__init__.pyq]qKaUQ/home/jamespage/src/charms/landing-beisner-resync/ceph-osd/unit_tests/__init__.pyq]q KaU5/home/ubuntu/bzr/next/ceph-osd/unit_tests/__init__.pyq +]q Kauu. \ No newline at end of file diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index c6c967d0..1e4695f7 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -24,6 +24,6 @@ sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml -publish: lint +publish: lint test bzr push lp:charms/ceph-osd bzr push lp:charms/trusty/ceph-osd diff --git a/ceph-osd/tests/00-setup b/ceph-osd/tests/00-setup index d6882caf..ee5f0332 100755 --- a/ceph-osd/tests/00-setup +++ b/ceph-osd/tests/00-setup @@ -5,8 +5,10 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes sudo apt-get install --yes python-amulet \ + python-cinderclient \ python-distro-info \ - python-keystoneclient \ python-glanceclient \ - python-cinderclient \ + python-heatclient \ + python-keystoneclient \ python-novaclient + python-swiftclient diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 348aae57..db533a1e 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -9,8 +9,10 @@ sources: packages: - amulet - python-amulet + - python-cinderclient - python-distro-info - - python-keystoneclient - python-glanceclient - - python-cinderclient + - python-heatclient + - python-keystoneclient - python-novaclient + - python-swiftclient From 4a69ad90642f82bd6e2112f27ef989143c5775f7 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 18:39:56 +0000 Subject: [PATCH 0744/2699] Update publish target in makefile; update 00-setup and tests.yaml for dependencies. --- ceph-proxy/Makefile | 2 +- ceph-proxy/tests/00-setup | 6 ++++-- ceph-proxy/tests/tests.yaml | 6 ++++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index 541b1fa8..93e2758b 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -24,6 +24,6 @@ sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml -publish: lint +publish: lint test bzr push lp:charms/ceph bzr push lp:charms/trusty/ceph diff --git a/ceph-proxy/tests/00-setup b/ceph-proxy/tests/00-setup index d6882caf..ee5f0332 100755 --- a/ceph-proxy/tests/00-setup +++ b/ceph-proxy/tests/00-setup @@ -5,8 +5,10 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes sudo apt-get install --yes python-amulet \ + python-cinderclient \ python-distro-info \ - python-keystoneclient \ python-glanceclient \ - python-cinderclient \ + python-heatclient \ + python-keystoneclient \ python-novaclient + python-swiftclient diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 348aae57..db533a1e 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -9,8 +9,10 @@ sources: packages: - amulet - python-amulet + - python-cinderclient - python-distro-info - - python-keystoneclient - python-glanceclient - - python-cinderclient + - python-heatclient + - python-keystoneclient - python-novaclient + - python-swiftclient From 2f6c6eb05281377d676dd71d308c968236125904 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 18:39:56 +0000 Subject: [PATCH 0745/2699] Update publish target in makefile; update 00-setup and tests.yaml for dependencies. --- ceph-mon/Makefile | 2 +- ceph-mon/tests/00-setup | 6 ++++-- ceph-mon/tests/tests.yaml | 6 ++++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index 541b1fa8..93e2758b 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -24,6 +24,6 @@ sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml -publish: lint +publish: lint test bzr push lp:charms/ceph bzr push lp:charms/trusty/ceph diff --git a/ceph-mon/tests/00-setup b/ceph-mon/tests/00-setup index d6882caf..ee5f0332 100755 --- a/ceph-mon/tests/00-setup +++ b/ceph-mon/tests/00-setup @@ -5,8 +5,10 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes sudo apt-get install --yes python-amulet \ + python-cinderclient \ python-distro-info \ - python-keystoneclient \ python-glanceclient \ - python-cinderclient \ + python-heatclient \ + python-keystoneclient \ python-novaclient + python-swiftclient diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 348aae57..db533a1e 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -9,8 +9,10 @@ sources: packages: - amulet - python-amulet + - python-cinderclient - python-distro-info - - python-keystoneclient - python-glanceclient - - python-cinderclient + - python-heatclient + - python-keystoneclient - python-novaclient + - python-swiftclient From fe52849687182a408f7de82ace4c501559b371ef Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 19:25:36 +0000 Subject: [PATCH 0746/2699] fix 00-setup --- ceph-radosgw/tests/00-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/tests/00-setup b/ceph-radosgw/tests/00-setup index ee5f0332..dd9158fd 100755 --- a/ceph-radosgw/tests/00-setup +++ b/ceph-radosgw/tests/00-setup @@ -10,5 +10,5 @@ sudo apt-get install --yes python-amulet \ python-glanceclient \ python-heatclient \ python-keystoneclient \ - python-novaclient + python-novaclient \ python-swiftclient From 9d4d8d2b70d775d1045779c853b154b76c085086 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 19:25:50 +0000 Subject: [PATCH 0747/2699] fix 00-setup --- ceph-osd/tests/00-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/tests/00-setup b/ceph-osd/tests/00-setup index ee5f0332..dd9158fd 100755 --- a/ceph-osd/tests/00-setup +++ b/ceph-osd/tests/00-setup @@ -10,5 +10,5 @@ sudo apt-get install --yes python-amulet \ python-glanceclient \ python-heatclient \ python-keystoneclient \ - python-novaclient + python-novaclient \ python-swiftclient From 97bd7c109ac7a5a44162f66b96f066f8ec469980 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 19:26:04 +0000 Subject: [PATCH 0748/2699] fix 00-setup --- ceph-proxy/tests/00-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/tests/00-setup b/ceph-proxy/tests/00-setup index ee5f0332..dd9158fd 100755 --- a/ceph-proxy/tests/00-setup +++ b/ceph-proxy/tests/00-setup @@ -10,5 +10,5 @@ sudo apt-get install --yes python-amulet \ python-glanceclient \ python-heatclient \ python-keystoneclient \ - python-novaclient + python-novaclient \ python-swiftclient From 0c8d2bd524e5aa895dc1b6fb6cefabf4d3d624f9 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 19:26:04 +0000 Subject: [PATCH 0749/2699] fix 00-setup --- ceph-mon/tests/00-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/tests/00-setup b/ceph-mon/tests/00-setup index ee5f0332..dd9158fd 100755 --- a/ceph-mon/tests/00-setup +++ b/ceph-mon/tests/00-setup @@ -10,5 +10,5 @@ sudo apt-get install --yes python-amulet \ python-glanceclient \ python-heatclient \ python-keystoneclient \ - python-novaclient + python-novaclient \ python-swiftclient From 88f05d48918b058bff78d25548996a140e2ffb76 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 20:13:41 +0000 Subject: [PATCH 0750/2699] update test --- ceph-proxy/tests/basic_deployment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index abdb917b..de9cfcd5 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -158,7 +158,6 @@ def test_100_ceph_processes(self): # Process name and quantity of processes to expect on each unit ceph_processes = { - 'ceph-mon': 1, 'ceph-mon': 1, 'ceph-osd': 2 } From b607b69fbc34144ae4d80188fe54c474babf9f5a Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 20:13:41 +0000 Subject: [PATCH 0751/2699] update test --- ceph-mon/tests/basic_deployment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index abdb917b..de9cfcd5 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -158,7 +158,6 @@ def test_100_ceph_processes(self): # Process name and quantity of processes to expect on each unit ceph_processes = { - 'ceph-mon': 1, 'ceph-mon': 1, 'ceph-osd': 2 } From 739c44775a7b7e63aabf2a7ee36901852796408d Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 20:14:43 +0000 Subject: [PATCH 0752/2699] update test --- ceph-osd/tests/basic_deployment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 102ece16..720a24fb 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -169,7 +169,6 @@ def test_100_ceph_processes(self): # Process name and quantity of processes to expect on each unit ceph_processes = { - 'ceph-mon': 1, 'ceph-mon': 1, 'ceph-osd': 2 } From 658a0a0f56b14095ae331b1e658adff24ea83b16 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 20:15:06 +0000 Subject: [PATCH 0753/2699] update test --- ceph-radosgw/tests/basic_deployment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index a520a654..06d061fe 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -162,7 +162,6 @@ def test_100_ceph_processes(self): # Process name and quantity of processes to expect on each unit ceph_processes = { - 'ceph-mon': 1, 'ceph-mon': 1, 'ceph-osd': 2 } From 57cc10ed56cfb7a3a7f0f133b790522af74b8b23 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 29 Jun 2015 21:20:01 +0000 Subject: [PATCH 0754/2699] add pre-kilo ceph-mon service status checks --- ceph-osd/tests/basic_deployment.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 720a24fb..4aba7fc0 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -8,7 +8,7 @@ from charmhelpers.contrib.openstack.amulet.utils import ( OpenStackAmuletUtils, DEBUG, - #ERROR + # ERROR ) # Use DEBUG to turn on debug logging @@ -214,8 +214,11 @@ def test_102_services(self): services[self.ceph0_sentry] = ceph_services services[self.ceph1_sentry] = ceph_services services[self.ceph2_sentry] = ceph_services - - #!? add check for ceph_osd_sentry upstart services + services[self.ceph_osd_sentry] = [ + 'ceph-osd-all', + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) + ] ret = u.validate_services_by_name(services) if ret: From fb2a87b08f7afc7f69d095c0cea8f8a6e8505334 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 1 Jul 2015 14:47:18 +0000 Subject: [PATCH 0755/2699] update tags for consistency with other openstack charms --- ceph-radosgw/metadata.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index a798ddef..59d6d9d9 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -8,6 +8,9 @@ description: | This charm provides the RADOS HTTP gateway supporting S3 and Swift protocols for object storage. tags: + - openstack + - storage + - file-servers - misc requires: mon: From e97fc9133464a9d01600d165fbaa7a58c1feaece Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 1 Jul 2015 14:47:29 +0000 Subject: [PATCH 0756/2699] update tags for consistency with other openstack charms --- ceph-osd/metadata.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 2f571eff..74e207e3 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -6,7 +6,10 @@ provides: interface: nrpe-external-master scope: container tags: - - misc + - openstack + - storage + - file-servers + - misc description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. From 2a3685698b4e2d7f3535fdc5f8c550ee8ead92cb Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 1 Jul 2015 14:47:39 +0000 Subject: [PATCH 0757/2699] update tags for consistency with other openstack charms --- ceph-proxy/metadata.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index d67159ee..5afda9ed 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -5,7 +5,10 @@ description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. tags: - - file-servers + - openstack + - storage + - file-servers + - misc peers: mon: interface: ceph From fda1175c4b2b60322b260cf2e976cdb6b40aaf41 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 1 Jul 2015 14:47:39 +0000 Subject: [PATCH 0758/2699] update tags for consistency with other openstack charms --- ceph-mon/metadata.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index d67159ee..5afda9ed 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -5,7 +5,10 @@ description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. tags: - - file-servers + - openstack + - storage + - file-servers + - misc peers: mon: interface: ceph From 111b32c27bece4fa9a204de1bc61b3eb101cbe3c Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 10 Jul 2015 15:12:01 +0100 Subject: [PATCH 0759/2699] [trivial] Cleanup config.yaml Partially-Closes-Bug: 1473426 --- ceph-osd/config.yaml | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index fb314719..fdba28f5 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -4,10 +4,10 @@ options: default: /dev/vdb description: | The devices to format and set up as osd volumes. - . + These devices are the range of devices that will be checked for and used across all service units. - . + For ceph >= 0.56.6 these can also be directories instead of devices - the charm assumes anything not starting with /dev is a directory instead. osd-journal: @@ -16,7 +16,7 @@ options: description: | The device to use as a shared journal drive for all OSD's. By default no journal device will be used. - . + Only supported with ceph >= 0.48.3. osd-journal-size: type: int @@ -27,18 +27,18 @@ options: interval. However, the most common practice is to partition the journal drive (often an SSD), and mount it such that Ceph uses the entire partition for the journal. - . + Only supported with ceph >= 0.48.3. osd-format: type: string default: xfs description: | Format of filesystem to use for OSD devices; supported formats include: - . + xfs (Default >= 0.48.3) ext4 (Only option < 0.48.3) btrfs (experimental and not recommended) - . + Only supported with ceph >= 0.48.3. osd-reformat: type: string @@ -47,7 +47,7 @@ options: By default, the charm will not re-format a device that already looks as if it might be an OSD device. This is a safeguard to try to prevent data loss. - . + Specifying this option (any value) forces a reformat of any OSD devices found which are not already mounted. ignore-device-errors: @@ -57,7 +57,7 @@ options: By default, the charm will raise errors if a whitelisted device is found, but for some reason the charm is unable to initialize the device for use by Ceph. - . + Setting this option to 'True' will result in the charm classifying such problems as warnings only and will not result in a hook error. ephemeral-unmount: @@ -66,23 +66,23 @@ options: description: | Cloud instances provider ephermeral storage which is normally mounted on /mnt. - . + Providing this option will force an unmount of the ephemeral device so that it can be used as a OSD storage device. This is useful for testing purposes (cloud deployment is not a typical use case). source: type: string - default: cloud:precise-updates/folsom + default: description: | Optional configuration to support use of additional sources such as: - . + - ppa:myteam/ppa - - cloud:precise-proposed/folsom + - cloud:trusty-proposed/kilo - http://my.archive.com/ubuntu main - . + The last option should be used in conjunction with the key configuration option. - . + Note that a minimum ceph version of 0.48.2 is required for use with this charm which is NOT provided by the packages in the main Ubuntu archive for precise but is provided in the Ubuntu cloud archive. From defe4da78b3647b7f2f536a45be688a0107e9563 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 10 Jul 2015 15:14:09 +0100 Subject: [PATCH 0760/2699] [trivial] Cleanup config.yaml Partially-Closes-Bug: 1473426 --- ceph-radosgw/config.yaml | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 21f02fbf..8a9e76eb 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -1,20 +1,20 @@ options: source: type: string - default: cloud:precise-updates/folsom + default: description: | Optional configuration to support use of additional sources such as: - . + - ppa:myteam/ppa - - cloud:precise-proposed/folsom + - cloud:trusty-proposed/kilo - http://my.archive.com/ubuntu main - . + The last option should be used in conjunction with the key configuration option. - . + Note that a minimum ceph version of 0.48.2 is required for use with this charm which is NOT provided by the packages in the main Ubuntu archive - for precise. + for precise but is provided in the Ubuntu cloud archive. key: type: string default: @@ -32,8 +32,8 @@ options: default: RegionOne type: string description: | - OpenStack region that the RADOS gateway supports; used when integrating with - OpenStack Keystone. + OpenStack region that the RADOS gateway supports; used when integrating + with OpenStack Keystone. cache-size: default: 500 type: int @@ -51,12 +51,12 @@ options: type: boolean default: false description: | - By default apache2 and libapache2-mod-fastcgi will be installed from the - Ubuntu archives. This option allows for an alternate ceph.com install - source which contains patched versions with added support for HTTP - 100-continue. See the following page for more info: + By default apache2 and libapache2-mod-fastcgi will be installed from the + Ubuntu archives. This option allows for an alternate ceph.com install + source which contains patched versions with added support for HTTP + 100-continue. See the following page for more info: - http://ceph.com/docs/dumpling/radosgw/manual-install/#continue-support + http://ceph.com/docs/dumpling/radosgw/manual-install/#continue-support use-embedded-webserver: type: boolean default: false @@ -64,7 +64,7 @@ options: Newer versions of the Ceph RADOS Gateway support use of an embedded web container instead of Apache + mod-fastcgi, avoiding some of the nuances of using the stock mod-fastcgi packages from Ubuntu. - . + Enable this option to disable use of Apache and enable the embedded web container feature. vip: @@ -72,7 +72,7 @@ options: default: description: | Virtual IP(s) to use to front API services in HA configuration. - . + If multiple networks are being used, a VIP should be provided for each network, separated by spaces. ha-bindiface: @@ -93,9 +93,9 @@ options: description: | The hostname or address of the public endpoints created for ceph-radosgw in the keystone identity provider. - . + This value will be used for public endpoints. For example, an os-public-hostname set to 'files.example.com' with will create the following public endpoint for the ceph-radosgw: - . + https://files.example.com:80/swift/v1 From a84fdc6946479944b152641146404a9a5ca87c18 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 10 Jul 2015 15:14:18 +0100 Subject: [PATCH 0761/2699] [trivial] Cleanup config.yaml Partially-Closes-Bug: 1473426 --- ceph-proxy/config.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 433543c8..b1880de1 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -109,17 +109,17 @@ options: default: description: | Optional configuration to support use of additional sources such as: - . + - ppa:myteam/ppa - - cloud:precise-proposed/icehouse + - cloud:trusty-proposed/kilo - http://my.archive.com/ubuntu main - . + The last option should be used in conjunction with the key configuration option. - . + Note that a minimum ceph version of 0.48.2 is required for use with this charm which is NOT provided by the packages in the main Ubuntu archive - for precise but is provided in the Folsom cloud archive. + for precise but is provided in the Ubuntu cloud archive. key: type: string default: @@ -150,7 +150,7 @@ options: If True enables IPv6 support. The charm will expect network interfaces to be configured with an IPv6 address. If set to False (default) IPv4 is expected. - . + NOTE: these charms do not currently support IPv6 privacy extension. In order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on From 5fa28a1fa6899c49291051eb980f59ba17f07e23 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 10 Jul 2015 15:14:18 +0100 Subject: [PATCH 0762/2699] [trivial] Cleanup config.yaml Partially-Closes-Bug: 1473426 --- ceph-mon/config.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 433543c8..b1880de1 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -109,17 +109,17 @@ options: default: description: | Optional configuration to support use of additional sources such as: - . + - ppa:myteam/ppa - - cloud:precise-proposed/icehouse + - cloud:trusty-proposed/kilo - http://my.archive.com/ubuntu main - . + The last option should be used in conjunction with the key configuration option. - . + Note that a minimum ceph version of 0.48.2 is required for use with this charm which is NOT provided by the packages in the main Ubuntu archive - for precise but is provided in the Folsom cloud archive. + for precise but is provided in the Ubuntu cloud archive. key: type: string default: @@ -150,7 +150,7 @@ options: If True enables IPv6 support. The charm will expect network interfaces to be configured with an IPv6 address. If set to False (default) IPv4 is expected. - . + NOTE: these charms do not currently support IPv6 privacy extension. In order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on From 57b0e131dc7d57ffec3bc01239f5a4362fa42d6d Mon Sep 17 00:00:00 2001 From: JuanJo Ciarlante Date: Wed, 15 Jul 2015 16:57:27 -0300 Subject: [PATCH 0763/2699] [jjo, r=] fix nrpe ceph-osd check: use /var/lib/ceph/osd/ceph-*/whoami to loop over "status ceph-osd id=X" - fixes lp#1474989 --- ceph-osd/hooks/hooks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index b0e3a54f..e28e9c1c 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -221,7 +221,8 @@ def update_nrpe_config(): nrpe_setup.add_check( shortname='ceph-osd', description='process check {%s}' % current_unit, - check_cmd='check_upstart_job ceph-osd', + check_cmd=('/bin/cat /var/lib/ceph/osd/ceph-*/whoami |' + 'xargs -I@ status ceph-osd id=@ && exit 0 || exit 2') ) nrpe_setup.write() From 31db1871f65c7194903ae1da3c2fefec25aee11c Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 29 Jul 2015 11:48:21 +0100 Subject: [PATCH 0764/2699] [gnuoy,trivial] Pre-release charmhelper sync --- .../contrib/storage/linux/ceph.py | 12 ++--- .../contrib/storage/linux/utils.py | 2 +- ceph-proxy/hooks/charmhelpers/core/files.py | 45 +++++++++++++++++++ ceph-proxy/hooks/charmhelpers/core/hookenv.py | 4 +- ceph-proxy/hooks/charmhelpers/core/host.py | 36 ++++++++++++--- .../charmhelpers/core/services/helpers.py | 4 +- .../hooks/charmhelpers/fetch/__init__.py | 23 +++++++--- .../hooks/charmhelpers/fetch/archiveurl.py | 8 +++- ceph-proxy/hooks/charmhelpers/fetch/giturl.py | 2 +- 9 files changed, 112 insertions(+), 24 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/core/files.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 31ea7f9e..00dbffb4 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -60,12 +60,12 @@ KEYFILE = '/etc/ceph/ceph.client.{}.key' CEPH_CONF = """[global] - auth supported = {auth} - keyring = {keyring} - mon host = {mon_hosts} - log to syslog = {use_syslog} - err to syslog = {use_syslog} - clog to syslog = {use_syslog} +auth supported = {auth} +keyring = {keyring} +mon host = {mon_hosts} +log to syslog = {use_syslog} +err to syslog = {use_syslog} +clog to syslog = {use_syslog} """ diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index c8373b72..e2769e49 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -67,4 +67,4 @@ def is_device_mounted(device): out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) - return bool(re.search(device + r"[0-9]+\b", out)) + return bool(re.search(device + r"[0-9]*\b", out)) diff --git a/ceph-proxy/hooks/charmhelpers/core/files.py b/ceph-proxy/hooks/charmhelpers/core/files.py new file mode 100644 index 00000000..0f12d321 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/files.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 0add16d4..15b09d11 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -21,6 +21,7 @@ # Charm Helpers Developers from __future__ import print_function +import copy from distutils.version import LooseVersion from functools import wraps import glob @@ -263,7 +264,7 @@ def load_previous(self, path=None): self.path = path or self.path with open(self.path) as f: self._prev_dict = json.load(f) - for k, v in self._prev_dict.items(): + for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -761,6 +762,7 @@ def atstart(callback, *args, **kwargs): This is useful for modules and classes to perform initialization and inject behavior. In particular: + - Run common code before all of your hooks, such as logging the hook name or interesting relation data. - Defer object or module initialization that requires a hook diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 901a4cfe..8ae8ef86 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -63,6 +63,36 @@ def service_reload(service_name, restart_on_failure=False): return service_result +def service_pause(service_name, init_dir=None): + """Pause a system service. + + Stop it, and prevent it from starting again at boot.""" + if init_dir is None: + init_dir = "/etc/init" + stopped = service_stop(service_name) + # XXX: Support systemd too + override_path = os.path.join( + init_dir, '{}.conf.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + return stopped + + +def service_resume(service_name, init_dir=None): + """Resume a system service. + + Reenable starting again at boot. Start the service""" + # XXX: Support systemd too + if init_dir is None: + init_dir = "/etc/init" + override_path = os.path.join( + init_dir, '{}.conf.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + started = service_start(service_name) + return started + + def service(action, service_name): """Control a system service""" cmd = ['service', service_name, action] @@ -140,11 +170,7 @@ def add_group(group_name, system_group=False): def add_user_to_group(username, group): """Add a user to a group""" - cmd = [ - 'gpasswd', '-a', - username, - group - ] + cmd = ['gpasswd', '-a', username, group] log("Adding user {} to group {}".format(username, group)) subprocess.check_call(cmd) diff --git a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py index 3eb5fb44..8005c415 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py @@ -239,12 +239,12 @@ class TemplateCallback(ManagerCallback): action. :param str source: The template source file, relative to - `$CHARM_DIR/templates` - + `$CHARM_DIR/templates` :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file + """ def __init__(self, source, target, owner='root', group='root', perms=0o444): diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 9a1a2515..0a3bb969 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -215,9 +215,9 @@ def apt_purge(packages, fatal=False): _run_apt_command(cmd, fatal) -def apt_hold(packages, fatal=False): - """Hold one or more packages""" - cmd = ['apt-mark', 'hold'] +def apt_mark(packages, mark, fatal=False): + """Flag one or more packages using apt-mark""" + cmd = ['apt-mark', mark] if isinstance(packages, six.string_types): cmd.append(packages) else: @@ -225,9 +225,17 @@ def apt_hold(packages, fatal=False): log("Holding {}".format(packages)) if fatal: - subprocess.check_call(cmd) + subprocess.check_call(cmd, universal_newlines=True) else: - subprocess.call(cmd) + subprocess.call(cmd, universal_newlines=True) + + +def apt_hold(packages, fatal=False): + return apt_mark(packages, 'hold', fatal=fatal) + + +def apt_unhold(packages, fatal=False): + return apt_mark(packages, 'unhold', fatal=fatal) def add_source(source, key=None): @@ -370,8 +378,9 @@ def install_remote(source, *args, **kwargs): for handler in handlers: try: installed_to = handler.install(source, *args, **kwargs) - except UnhandledSource: - pass + except UnhandledSource as e: + log('Install source attempt unsuccessful: {}'.format(e), + level='WARNING') if not installed_to: raise UnhandledSource("No handler found for source {}".format(source)) return installed_to diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py index 8dfce505..efd7f9f0 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -77,6 +77,8 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + # XXX: Why is this returning a boolean and a string? It's + # doomed to fail since "bool(can_handle('foo://'))" will be True. return "Wrong source type" if get_archive_handler(self.base_url(source)): return True @@ -155,7 +157,11 @@ def install(self, source, dest=None, checksum=None, hash_type='sha1'): else: algorithms = hashlib.algorithms_available if key in algorithms: - check_hash(dld_file, value, key) + if len(value) != 1: + raise TypeError( + "Expected 1 hash value, not %d" % len(value)) + expected = value[0] + check_hash(dld_file, expected, key) if checksum: check_hash(dld_file, checksum, hash_type) return extract(dld_file, dest) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py index ddc25b7e..f023b26d 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py @@ -67,7 +67,7 @@ def install(self, source, branch="master", dest=None, depth=None): try: self.clone(source, dest_dir, branch, depth) except GitCommandError as e: - raise UnhandledSource(e.message) + raise UnhandledSource(e) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir From e1e620729138e820095f5faf1dc8cedb42da50aa Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 29 Jul 2015 11:48:21 +0100 Subject: [PATCH 0765/2699] [gnuoy,trivial] Pre-release charmhelper sync --- .../contrib/storage/linux/ceph.py | 12 ++--- .../contrib/storage/linux/utils.py | 2 +- ceph-mon/hooks/charmhelpers/core/files.py | 45 +++++++++++++++++++ ceph-mon/hooks/charmhelpers/core/hookenv.py | 4 +- ceph-mon/hooks/charmhelpers/core/host.py | 36 ++++++++++++--- .../charmhelpers/core/services/helpers.py | 4 +- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 23 +++++++--- .../hooks/charmhelpers/fetch/archiveurl.py | 8 +++- ceph-mon/hooks/charmhelpers/fetch/giturl.py | 2 +- 9 files changed, 112 insertions(+), 24 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/core/files.py diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 31ea7f9e..00dbffb4 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -60,12 +60,12 @@ KEYFILE = '/etc/ceph/ceph.client.{}.key' CEPH_CONF = """[global] - auth supported = {auth} - keyring = {keyring} - mon host = {mon_hosts} - log to syslog = {use_syslog} - err to syslog = {use_syslog} - clog to syslog = {use_syslog} +auth supported = {auth} +keyring = {keyring} +mon host = {mon_hosts} +log to syslog = {use_syslog} +err to syslog = {use_syslog} +clog to syslog = {use_syslog} """ diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index c8373b72..e2769e49 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -67,4 +67,4 @@ def is_device_mounted(device): out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) - return bool(re.search(device + r"[0-9]+\b", out)) + return bool(re.search(device + r"[0-9]*\b", out)) diff --git a/ceph-mon/hooks/charmhelpers/core/files.py b/ceph-mon/hooks/charmhelpers/core/files.py new file mode 100644 index 00000000..0f12d321 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/files.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 0add16d4..15b09d11 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -21,6 +21,7 @@ # Charm Helpers Developers from __future__ import print_function +import copy from distutils.version import LooseVersion from functools import wraps import glob @@ -263,7 +264,7 @@ def load_previous(self, path=None): self.path = path or self.path with open(self.path) as f: self._prev_dict = json.load(f) - for k, v in self._prev_dict.items(): + for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -761,6 +762,7 @@ def atstart(callback, *args, **kwargs): This is useful for modules and classes to perform initialization and inject behavior. In particular: + - Run common code before all of your hooks, such as logging the hook name or interesting relation data. - Defer object or module initialization that requires a hook diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 901a4cfe..8ae8ef86 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -63,6 +63,36 @@ def service_reload(service_name, restart_on_failure=False): return service_result +def service_pause(service_name, init_dir=None): + """Pause a system service. + + Stop it, and prevent it from starting again at boot.""" + if init_dir is None: + init_dir = "/etc/init" + stopped = service_stop(service_name) + # XXX: Support systemd too + override_path = os.path.join( + init_dir, '{}.conf.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + return stopped + + +def service_resume(service_name, init_dir=None): + """Resume a system service. + + Reenable starting again at boot. Start the service""" + # XXX: Support systemd too + if init_dir is None: + init_dir = "/etc/init" + override_path = os.path.join( + init_dir, '{}.conf.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + started = service_start(service_name) + return started + + def service(action, service_name): """Control a system service""" cmd = ['service', service_name, action] @@ -140,11 +170,7 @@ def add_group(group_name, system_group=False): def add_user_to_group(username, group): """Add a user to a group""" - cmd = [ - 'gpasswd', '-a', - username, - group - ] + cmd = ['gpasswd', '-a', username, group] log("Adding user {} to group {}".format(username, group)) subprocess.check_call(cmd) diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py index 3eb5fb44..8005c415 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-mon/hooks/charmhelpers/core/services/helpers.py @@ -239,12 +239,12 @@ class TemplateCallback(ManagerCallback): action. :param str source: The template source file, relative to - `$CHARM_DIR/templates` - + `$CHARM_DIR/templates` :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file + """ def __init__(self, source, target, owner='root', group='root', perms=0o444): diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 9a1a2515..0a3bb969 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -215,9 +215,9 @@ def apt_purge(packages, fatal=False): _run_apt_command(cmd, fatal) -def apt_hold(packages, fatal=False): - """Hold one or more packages""" - cmd = ['apt-mark', 'hold'] +def apt_mark(packages, mark, fatal=False): + """Flag one or more packages using apt-mark""" + cmd = ['apt-mark', mark] if isinstance(packages, six.string_types): cmd.append(packages) else: @@ -225,9 +225,17 @@ def apt_hold(packages, fatal=False): log("Holding {}".format(packages)) if fatal: - subprocess.check_call(cmd) + subprocess.check_call(cmd, universal_newlines=True) else: - subprocess.call(cmd) + subprocess.call(cmd, universal_newlines=True) + + +def apt_hold(packages, fatal=False): + return apt_mark(packages, 'hold', fatal=fatal) + + +def apt_unhold(packages, fatal=False): + return apt_mark(packages, 'unhold', fatal=fatal) def add_source(source, key=None): @@ -370,8 +378,9 @@ def install_remote(source, *args, **kwargs): for handler in handlers: try: installed_to = handler.install(source, *args, **kwargs) - except UnhandledSource: - pass + except UnhandledSource as e: + log('Install source attempt unsuccessful: {}'.format(e), + level='WARNING') if not installed_to: raise UnhandledSource("No handler found for source {}".format(source)) return installed_to diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index 8dfce505..efd7f9f0 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -77,6 +77,8 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + # XXX: Why is this returning a boolean and a string? It's + # doomed to fail since "bool(can_handle('foo://'))" will be True. return "Wrong source type" if get_archive_handler(self.base_url(source)): return True @@ -155,7 +157,11 @@ def install(self, source, dest=None, checksum=None, hash_type='sha1'): else: algorithms = hashlib.algorithms_available if key in algorithms: - check_hash(dld_file, value, key) + if len(value) != 1: + raise TypeError( + "Expected 1 hash value, not %d" % len(value)) + expected = value[0] + check_hash(dld_file, expected, key) if checksum: check_hash(dld_file, checksum, hash_type) return extract(dld_file, dest) diff --git a/ceph-mon/hooks/charmhelpers/fetch/giturl.py b/ceph-mon/hooks/charmhelpers/fetch/giturl.py index ddc25b7e..f023b26d 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/giturl.py @@ -67,7 +67,7 @@ def install(self, source, branch="master", dest=None, depth=None): try: self.clone(source, dest_dir, branch, depth) except GitCommandError as e: - raise UnhandledSource(e.message) + raise UnhandledSource(e) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir From 3ba53f0474d8cc56b76a29088cf4e8a14eab47b9 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 29 Jul 2015 17:23:16 +0000 Subject: [PATCH 0766/2699] remove amulet tests for unsupported releases --- ceph-proxy/tests/018-basic-utopic-juno | 9 --------- 1 file changed, 9 deletions(-) delete mode 100755 ceph-proxy/tests/018-basic-utopic-juno diff --git a/ceph-proxy/tests/018-basic-utopic-juno b/ceph-proxy/tests/018-basic-utopic-juno deleted file mode 100755 index 9b9b760f..00000000 --- a/ceph-proxy/tests/018-basic-utopic-juno +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on utopic-juno.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='utopic') - deployment.run_tests() From 44d1fd9eeabe9a7758f0182e10068b00062104a9 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 29 Jul 2015 17:23:16 +0000 Subject: [PATCH 0767/2699] remove amulet tests for unsupported releases --- ceph-mon/tests/018-basic-utopic-juno | 9 --------- 1 file changed, 9 deletions(-) delete mode 100755 ceph-mon/tests/018-basic-utopic-juno diff --git a/ceph-mon/tests/018-basic-utopic-juno b/ceph-mon/tests/018-basic-utopic-juno deleted file mode 100755 index 9b9b760f..00000000 --- a/ceph-mon/tests/018-basic-utopic-juno +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on utopic-juno.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='utopic') - deployment.run_tests() From 109377fd828f284713f0e367706cf398bee149f0 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 29 Jul 2015 17:23:18 +0000 Subject: [PATCH 0768/2699] remove amulet tests for unsupported releases --- ceph-osd/tests/018-basic-utopic-juno | 9 --------- 1 file changed, 9 deletions(-) delete mode 100755 ceph-osd/tests/018-basic-utopic-juno diff --git a/ceph-osd/tests/018-basic-utopic-juno b/ceph-osd/tests/018-basic-utopic-juno deleted file mode 100755 index 6241fb10..00000000 --- a/ceph-osd/tests/018-basic-utopic-juno +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph-osd deployment on utopic-juno.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='utopic') - deployment.run_tests() From 60518af80731bd7eac3eaef734bd3122c4835a43 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 29 Jul 2015 17:23:19 +0000 Subject: [PATCH 0769/2699] remove amulet tests for unsupported releases --- ceph-radosgw/tests/018-basic-utopic-juno | 9 --------- 1 file changed, 9 deletions(-) delete mode 100755 ceph-radosgw/tests/018-basic-utopic-juno diff --git a/ceph-radosgw/tests/018-basic-utopic-juno b/ceph-radosgw/tests/018-basic-utopic-juno deleted file mode 100755 index 4e4f5e59..00000000 --- a/ceph-radosgw/tests/018-basic-utopic-juno +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph-radosgw deployment on utopic-juno.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='utopic') - deployment.run_tests() From fdbc718f69709eaf93bb35eab80d4e426674e4ad Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 31 Jul 2015 14:11:12 +0100 Subject: [PATCH 0770/2699] [gnuoy,trivial] Pre-release charmhelper sync to pickup cli module --- ceph-proxy/charm-helpers-hooks.yaml | 1 + ceph-proxy/hooks/charmhelpers/cli/__init__.py | 195 ++++++++++++++++++ .../hooks/charmhelpers/cli/benchmark.py | 36 ++++ ceph-proxy/hooks/charmhelpers/cli/commands.py | 32 +++ ceph-proxy/hooks/charmhelpers/cli/host.py | 31 +++ ceph-proxy/hooks/charmhelpers/cli/unitdata.py | 39 ++++ ceph-proxy/hooks/charmhelpers/core/hookenv.py | 106 +++++++++- .../hooks/charmhelpers/core/unitdata.py | 78 +++++-- 8 files changed, 497 insertions(+), 21 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/cli/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/cli/benchmark.py create mode 100644 ceph-proxy/hooks/charmhelpers/cli/commands.py create mode 100644 ceph-proxy/hooks/charmhelpers/cli/host.py create mode 100644 ceph-proxy/hooks/charmhelpers/cli/unitdata.py diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index b75fd927..eeee6f8c 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -2,6 +2,7 @@ branch: lp:charm-helpers destination: hooks/charmhelpers include: - core + - cli - fetch - contrib.storage.linux: - utils diff --git a/ceph-proxy/hooks/charmhelpers/cli/__init__.py b/ceph-proxy/hooks/charmhelpers/cli/__init__.py new file mode 100644 index 00000000..7118daf5 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/cli/__init__.py @@ -0,0 +1,195 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import inspect +import argparse +import sys + +from six.moves import zip + +from charmhelpers.core import unitdata + + +class OutputFormatter(object): + def __init__(self, outfile=sys.stdout): + self.formats = ( + "raw", + "json", + "py", + "yaml", + "csv", + "tab", + ) + self.outfile = outfile + + def add_arguments(self, argument_parser): + formatgroup = argument_parser.add_mutually_exclusive_group() + choices = self.supported_formats + formatgroup.add_argument("--format", metavar='FMT', + help="Select output format for returned data, " + "where FMT is one of: {}".format(choices), + choices=choices, default='raw') + for fmt in self.formats: + fmtfunc = getattr(self, fmt) + formatgroup.add_argument("-{}".format(fmt[0]), + "--{}".format(fmt), action='store_const', + const=fmt, dest='format', + help=fmtfunc.__doc__) + + @property + def supported_formats(self): + return self.formats + + def raw(self, output): + """Output data as raw string (default)""" + if isinstance(output, (list, tuple)): + output = '\n'.join(map(str, output)) + self.outfile.write(str(output)) + + def py(self, output): + """Output data as a nicely-formatted python data structure""" + import pprint + pprint.pprint(output, stream=self.outfile) + + def json(self, output): + """Output data in JSON format""" + import json + json.dump(output, self.outfile) + + def yaml(self, output): + """Output data in YAML format""" + import yaml + yaml.safe_dump(output, self.outfile) + + def csv(self, output): + """Output data as excel-compatible CSV""" + import csv + csvwriter = csv.writer(self.outfile) + csvwriter.writerows(output) + + def tab(self, output): + """Output data in excel-compatible tab-delimited format""" + import csv + csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab) + csvwriter.writerows(output) + + def format_output(self, output, fmt='raw'): + fmtfunc = getattr(self, fmt) + fmtfunc(output) + + +class CommandLine(object): + argument_parser = None + subparsers = None + formatter = None + exit_code = 0 + + def __init__(self): + if not self.argument_parser: + self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks') + if not self.formatter: + self.formatter = OutputFormatter() + self.formatter.add_arguments(self.argument_parser) + if not self.subparsers: + self.subparsers = self.argument_parser.add_subparsers(help='Commands') + + def subcommand(self, command_name=None): + """ + Decorate a function as a subcommand. Use its arguments as the + command-line arguments""" + def wrapper(decorated): + cmd_name = command_name or decorated.__name__ + subparser = self.subparsers.add_parser(cmd_name, + description=decorated.__doc__) + for args, kwargs in describe_arguments(decorated): + subparser.add_argument(*args, **kwargs) + subparser.set_defaults(func=decorated) + return decorated + return wrapper + + def test_command(self, decorated): + """ + Subcommand is a boolean test function, so bool return values should be + converted to a 0/1 exit code. + """ + decorated._cli_test_command = True + return decorated + + def no_output(self, decorated): + """ + Subcommand is not expected to return a value, so don't print a spurious None. + """ + decorated._cli_no_output = True + return decorated + + def subcommand_builder(self, command_name, description=None): + """ + Decorate a function that builds a subcommand. Builders should accept a + single argument (the subparser instance) and return the function to be + run as the command.""" + def wrapper(decorated): + subparser = self.subparsers.add_parser(command_name) + func = decorated(subparser) + subparser.set_defaults(func=func) + subparser.description = description or func.__doc__ + return wrapper + + def run(self): + "Run cli, processing arguments and executing subcommands." + arguments = self.argument_parser.parse_args() + argspec = inspect.getargspec(arguments.func) + vargs = [] + kwargs = {} + for arg in argspec.args: + vargs.append(getattr(arguments, arg)) + if argspec.varargs: + vargs.extend(getattr(arguments, argspec.varargs)) + if argspec.keywords: + for kwarg in argspec.keywords.items(): + kwargs[kwarg] = getattr(arguments, kwarg) + output = arguments.func(*vargs, **kwargs) + if getattr(arguments.func, '_cli_test_command', False): + self.exit_code = 0 if output else 1 + output = '' + if getattr(arguments.func, '_cli_no_output', False): + output = '' + self.formatter.format_output(output, arguments.format) + if unitdata._KV: + unitdata._KV.flush() + + +cmdline = CommandLine() + + +def describe_arguments(func): + """ + Analyze a function's signature and return a data structure suitable for + passing in as arguments to an argparse parser's add_argument() method.""" + + argspec = inspect.getargspec(func) + # we should probably raise an exception somewhere if func includes **kwargs + if argspec.defaults: + positional_args = argspec.args[:-len(argspec.defaults)] + keyword_names = argspec.args[-len(argspec.defaults):] + for arg, default in zip(keyword_names, argspec.defaults): + yield ('--{}'.format(arg),), {'default': default} + else: + positional_args = argspec.args + + for arg in positional_args: + yield (arg,), {} + if argspec.varargs: + yield (argspec.varargs,), {'nargs': '*'} diff --git a/ceph-proxy/hooks/charmhelpers/cli/benchmark.py b/ceph-proxy/hooks/charmhelpers/cli/benchmark.py new file mode 100644 index 00000000..b23c16ce --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/cli/benchmark.py @@ -0,0 +1,36 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.contrib.benchmark import Benchmark + + +@cmdline.subcommand(command_name='benchmark-start') +def start(): + Benchmark.start() + + +@cmdline.subcommand(command_name='benchmark-finish') +def finish(): + Benchmark.finish() + + +@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score") +def service(subparser): + subparser.add_argument("value", help="The composite score.") + subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.") + subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.") + return Benchmark.set_composite_score diff --git a/ceph-proxy/hooks/charmhelpers/cli/commands.py b/ceph-proxy/hooks/charmhelpers/cli/commands.py new file mode 100644 index 00000000..443ff05d --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/cli/commands.py @@ -0,0 +1,32 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +""" +This module loads sub-modules into the python runtime so they can be +discovered via the inspect module. In order to prevent flake8 from (rightfully) +telling us these are unused modules, throw a ' # noqa' at the end of each import +so that the warning is suppressed. +""" + +from . import CommandLine # noqa + +""" +Import the sub-modules which have decorated subcommands to register with chlp. +""" +import host # noqa +import benchmark # noqa +import unitdata # noqa +from charmhelpers.core import hookenv # noqa diff --git a/ceph-proxy/hooks/charmhelpers/cli/host.py b/ceph-proxy/hooks/charmhelpers/cli/host.py new file mode 100644 index 00000000..58e78d6b --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/cli/host.py @@ -0,0 +1,31 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import host + + +@cmdline.subcommand() +def mounts(): + "List mounts" + return host.mounts() + + +@cmdline.subcommand_builder('service', description="Control system services") +def service(subparser): + subparser.add_argument("action", help="The action to perform (start, stop, etc...)") + subparser.add_argument("service_name", help="Name of the service to control") + return host.service diff --git a/ceph-proxy/hooks/charmhelpers/cli/unitdata.py b/ceph-proxy/hooks/charmhelpers/cli/unitdata.py new file mode 100644 index 00000000..d1cd95bf --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/cli/unitdata.py @@ -0,0 +1,39 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import unitdata + + +@cmdline.subcommand_builder('unitdata', description="Store and retrieve data") +def unitdata_cmd(subparser): + nested = subparser.add_subparsers() + get_cmd = nested.add_parser('get', help='Retrieve data') + get_cmd.add_argument('key', help='Key to retrieve the value of') + get_cmd.set_defaults(action='get', value=None) + set_cmd = nested.add_parser('set', help='Store data') + set_cmd.add_argument('key', help='Key to set') + set_cmd.add_argument('value', help='Value to store') + set_cmd.set_defaults(action='set') + + def _unitdata_cmd(action, key, value): + if action == 'get': + return unitdata.kv().get(key) + elif action == 'set': + unitdata.kv().set(key, value) + unitdata.kv().flush() + return '' + return _unitdata_cmd diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 15b09d11..6e4fb686 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -34,6 +34,8 @@ import tempfile from subprocess import CalledProcessError +from charmhelpers.cli import cmdline + import six if not six.PY3: from UserDict import UserDict @@ -173,9 +175,20 @@ def relation_type(): return os.environ.get('JUJU_RELATION', None) -def relation_id(): - """The relation ID for the current relation hook""" - return os.environ.get('JUJU_RELATION_ID', None) +@cmdline.subcommand() +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') def local_unit(): @@ -188,14 +201,27 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) +@cmdline.subcommand() def service_name(): """The name service group this unit belongs to""" return local_unit().split('/')[0] +@cmdline.subcommand() +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + def hook_name(): """The name of the currently executing hook""" - return os.path.basename(sys.argv[0]) + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) class Config(dict): @@ -468,6 +494,63 @@ def relation_types(): return rel_types +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peer'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peer``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peer'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + @cached def charm_name(): """Get the name of the current charm as is specified on metadata.yaml""" @@ -644,6 +727,21 @@ def action_fail(message): subprocess.check_call(['action-fail', message]) +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + def status_set(workload_state, message): """Set the workload state with a message diff --git a/ceph-proxy/hooks/charmhelpers/core/unitdata.py b/ceph-proxy/hooks/charmhelpers/core/unitdata.py index 406a35c5..338104e0 100644 --- a/ceph-proxy/hooks/charmhelpers/core/unitdata.py +++ b/ceph-proxy/hooks/charmhelpers/core/unitdata.py @@ -152,6 +152,7 @@ def config_changed(): import collections import contextlib import datetime +import itertools import json import os import pprint @@ -164,8 +165,7 @@ def config_changed(): class Storage(object): """Simple key value database for local unit state within charms. - Modifications are automatically committed at hook exit. That's - currently regardless of exit code. + Modifications are not persisted unless :meth:`flush` is called. To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. @@ -173,8 +173,11 @@ class Storage(object): def __init__(self, path=None): self.db_path = path if path is None: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None @@ -189,15 +192,8 @@ def close(self): self.conn.close() self._closed = True - def _scoped_query(self, stmt, params=None): - if params is None: - params = [] - return stmt, params - def get(self, key, default=None, record=False): - self.cursor.execute( - *self._scoped_query( - 'select data from kv where key=?', [key])) + self.cursor.execute('select data from kv where key=?', [key]) result = self.cursor.fetchone() if not result: return default @@ -206,33 +202,81 @@ def get(self, key, default=None, record=False): return json.loads(result[0]) def getrange(self, key_prefix, strip=False): - stmt = "select key, data from kv where key like '%s%%'" % key_prefix - self.cursor.execute(*self._scoped_query(stmt)) + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) result = self.cursor.fetchall() if not result: - return None + return {} if not strip: key_prefix = '' return dict([ (k[len(key_prefix):], json.loads(v)) for k, v in result]) def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ for k, v in mapping.items(): self.set("%s%s" % (prefix, k), v) def unset(self, key): + """ + Remove a key from the database entirely. + """ self.cursor.execute('delete from kv where key=?', [key]) if self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values (?, ?, ?)', [key, self.revision, json.dumps('DELETED')]) + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ serialized = json.dumps(value) - self.cursor.execute( - 'select data from kv where key=?', [key]) + self.cursor.execute('select data from kv where key=?', [key]) exists = self.cursor.fetchone() # Skip mutations to the same value From 3d95e24e7805f87975a40264c5f8ff6c366cc7a7 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 31 Jul 2015 14:11:12 +0100 Subject: [PATCH 0771/2699] [gnuoy,trivial] Pre-release charmhelper sync to pickup cli module --- ceph-mon/charm-helpers-hooks.yaml | 1 + ceph-mon/hooks/charmhelpers/cli/__init__.py | 195 +++++++++++++++++++ ceph-mon/hooks/charmhelpers/cli/benchmark.py | 36 ++++ ceph-mon/hooks/charmhelpers/cli/commands.py | 32 +++ ceph-mon/hooks/charmhelpers/cli/host.py | 31 +++ ceph-mon/hooks/charmhelpers/cli/unitdata.py | 39 ++++ ceph-mon/hooks/charmhelpers/core/hookenv.py | 106 +++++++++- ceph-mon/hooks/charmhelpers/core/unitdata.py | 78 ++++++-- 8 files changed, 497 insertions(+), 21 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/cli/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/cli/benchmark.py create mode 100644 ceph-mon/hooks/charmhelpers/cli/commands.py create mode 100644 ceph-mon/hooks/charmhelpers/cli/host.py create mode 100644 ceph-mon/hooks/charmhelpers/cli/unitdata.py diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index b75fd927..eeee6f8c 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -2,6 +2,7 @@ branch: lp:charm-helpers destination: hooks/charmhelpers include: - core + - cli - fetch - contrib.storage.linux: - utils diff --git a/ceph-mon/hooks/charmhelpers/cli/__init__.py b/ceph-mon/hooks/charmhelpers/cli/__init__.py new file mode 100644 index 00000000..7118daf5 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/cli/__init__.py @@ -0,0 +1,195 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import inspect +import argparse +import sys + +from six.moves import zip + +from charmhelpers.core import unitdata + + +class OutputFormatter(object): + def __init__(self, outfile=sys.stdout): + self.formats = ( + "raw", + "json", + "py", + "yaml", + "csv", + "tab", + ) + self.outfile = outfile + + def add_arguments(self, argument_parser): + formatgroup = argument_parser.add_mutually_exclusive_group() + choices = self.supported_formats + formatgroup.add_argument("--format", metavar='FMT', + help="Select output format for returned data, " + "where FMT is one of: {}".format(choices), + choices=choices, default='raw') + for fmt in self.formats: + fmtfunc = getattr(self, fmt) + formatgroup.add_argument("-{}".format(fmt[0]), + "--{}".format(fmt), action='store_const', + const=fmt, dest='format', + help=fmtfunc.__doc__) + + @property + def supported_formats(self): + return self.formats + + def raw(self, output): + """Output data as raw string (default)""" + if isinstance(output, (list, tuple)): + output = '\n'.join(map(str, output)) + self.outfile.write(str(output)) + + def py(self, output): + """Output data as a nicely-formatted python data structure""" + import pprint + pprint.pprint(output, stream=self.outfile) + + def json(self, output): + """Output data in JSON format""" + import json + json.dump(output, self.outfile) + + def yaml(self, output): + """Output data in YAML format""" + import yaml + yaml.safe_dump(output, self.outfile) + + def csv(self, output): + """Output data as excel-compatible CSV""" + import csv + csvwriter = csv.writer(self.outfile) + csvwriter.writerows(output) + + def tab(self, output): + """Output data in excel-compatible tab-delimited format""" + import csv + csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab) + csvwriter.writerows(output) + + def format_output(self, output, fmt='raw'): + fmtfunc = getattr(self, fmt) + fmtfunc(output) + + +class CommandLine(object): + argument_parser = None + subparsers = None + formatter = None + exit_code = 0 + + def __init__(self): + if not self.argument_parser: + self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks') + if not self.formatter: + self.formatter = OutputFormatter() + self.formatter.add_arguments(self.argument_parser) + if not self.subparsers: + self.subparsers = self.argument_parser.add_subparsers(help='Commands') + + def subcommand(self, command_name=None): + """ + Decorate a function as a subcommand. Use its arguments as the + command-line arguments""" + def wrapper(decorated): + cmd_name = command_name or decorated.__name__ + subparser = self.subparsers.add_parser(cmd_name, + description=decorated.__doc__) + for args, kwargs in describe_arguments(decorated): + subparser.add_argument(*args, **kwargs) + subparser.set_defaults(func=decorated) + return decorated + return wrapper + + def test_command(self, decorated): + """ + Subcommand is a boolean test function, so bool return values should be + converted to a 0/1 exit code. + """ + decorated._cli_test_command = True + return decorated + + def no_output(self, decorated): + """ + Subcommand is not expected to return a value, so don't print a spurious None. + """ + decorated._cli_no_output = True + return decorated + + def subcommand_builder(self, command_name, description=None): + """ + Decorate a function that builds a subcommand. Builders should accept a + single argument (the subparser instance) and return the function to be + run as the command.""" + def wrapper(decorated): + subparser = self.subparsers.add_parser(command_name) + func = decorated(subparser) + subparser.set_defaults(func=func) + subparser.description = description or func.__doc__ + return wrapper + + def run(self): + "Run cli, processing arguments and executing subcommands." + arguments = self.argument_parser.parse_args() + argspec = inspect.getargspec(arguments.func) + vargs = [] + kwargs = {} + for arg in argspec.args: + vargs.append(getattr(arguments, arg)) + if argspec.varargs: + vargs.extend(getattr(arguments, argspec.varargs)) + if argspec.keywords: + for kwarg in argspec.keywords.items(): + kwargs[kwarg] = getattr(arguments, kwarg) + output = arguments.func(*vargs, **kwargs) + if getattr(arguments.func, '_cli_test_command', False): + self.exit_code = 0 if output else 1 + output = '' + if getattr(arguments.func, '_cli_no_output', False): + output = '' + self.formatter.format_output(output, arguments.format) + if unitdata._KV: + unitdata._KV.flush() + + +cmdline = CommandLine() + + +def describe_arguments(func): + """ + Analyze a function's signature and return a data structure suitable for + passing in as arguments to an argparse parser's add_argument() method.""" + + argspec = inspect.getargspec(func) + # we should probably raise an exception somewhere if func includes **kwargs + if argspec.defaults: + positional_args = argspec.args[:-len(argspec.defaults)] + keyword_names = argspec.args[-len(argspec.defaults):] + for arg, default in zip(keyword_names, argspec.defaults): + yield ('--{}'.format(arg),), {'default': default} + else: + positional_args = argspec.args + + for arg in positional_args: + yield (arg,), {} + if argspec.varargs: + yield (argspec.varargs,), {'nargs': '*'} diff --git a/ceph-mon/hooks/charmhelpers/cli/benchmark.py b/ceph-mon/hooks/charmhelpers/cli/benchmark.py new file mode 100644 index 00000000..b23c16ce --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/cli/benchmark.py @@ -0,0 +1,36 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.contrib.benchmark import Benchmark + + +@cmdline.subcommand(command_name='benchmark-start') +def start(): + Benchmark.start() + + +@cmdline.subcommand(command_name='benchmark-finish') +def finish(): + Benchmark.finish() + + +@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score") +def service(subparser): + subparser.add_argument("value", help="The composite score.") + subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.") + subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.") + return Benchmark.set_composite_score diff --git a/ceph-mon/hooks/charmhelpers/cli/commands.py b/ceph-mon/hooks/charmhelpers/cli/commands.py new file mode 100644 index 00000000..443ff05d --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/cli/commands.py @@ -0,0 +1,32 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +""" +This module loads sub-modules into the python runtime so they can be +discovered via the inspect module. In order to prevent flake8 from (rightfully) +telling us these are unused modules, throw a ' # noqa' at the end of each import +so that the warning is suppressed. +""" + +from . import CommandLine # noqa + +""" +Import the sub-modules which have decorated subcommands to register with chlp. +""" +import host # noqa +import benchmark # noqa +import unitdata # noqa +from charmhelpers.core import hookenv # noqa diff --git a/ceph-mon/hooks/charmhelpers/cli/host.py b/ceph-mon/hooks/charmhelpers/cli/host.py new file mode 100644 index 00000000..58e78d6b --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/cli/host.py @@ -0,0 +1,31 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import host + + +@cmdline.subcommand() +def mounts(): + "List mounts" + return host.mounts() + + +@cmdline.subcommand_builder('service', description="Control system services") +def service(subparser): + subparser.add_argument("action", help="The action to perform (start, stop, etc...)") + subparser.add_argument("service_name", help="Name of the service to control") + return host.service diff --git a/ceph-mon/hooks/charmhelpers/cli/unitdata.py b/ceph-mon/hooks/charmhelpers/cli/unitdata.py new file mode 100644 index 00000000..d1cd95bf --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/cli/unitdata.py @@ -0,0 +1,39 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import unitdata + + +@cmdline.subcommand_builder('unitdata', description="Store and retrieve data") +def unitdata_cmd(subparser): + nested = subparser.add_subparsers() + get_cmd = nested.add_parser('get', help='Retrieve data') + get_cmd.add_argument('key', help='Key to retrieve the value of') + get_cmd.set_defaults(action='get', value=None) + set_cmd = nested.add_parser('set', help='Store data') + set_cmd.add_argument('key', help='Key to set') + set_cmd.add_argument('value', help='Value to store') + set_cmd.set_defaults(action='set') + + def _unitdata_cmd(action, key, value): + if action == 'get': + return unitdata.kv().get(key) + elif action == 'set': + unitdata.kv().set(key, value) + unitdata.kv().flush() + return '' + return _unitdata_cmd diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 15b09d11..6e4fb686 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -34,6 +34,8 @@ import tempfile from subprocess import CalledProcessError +from charmhelpers.cli import cmdline + import six if not six.PY3: from UserDict import UserDict @@ -173,9 +175,20 @@ def relation_type(): return os.environ.get('JUJU_RELATION', None) -def relation_id(): - """The relation ID for the current relation hook""" - return os.environ.get('JUJU_RELATION_ID', None) +@cmdline.subcommand() +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') def local_unit(): @@ -188,14 +201,27 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) +@cmdline.subcommand() def service_name(): """The name service group this unit belongs to""" return local_unit().split('/')[0] +@cmdline.subcommand() +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + def hook_name(): """The name of the currently executing hook""" - return os.path.basename(sys.argv[0]) + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) class Config(dict): @@ -468,6 +494,63 @@ def relation_types(): return rel_types +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peer'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peer``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peer'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + @cached def charm_name(): """Get the name of the current charm as is specified on metadata.yaml""" @@ -644,6 +727,21 @@ def action_fail(message): subprocess.check_call(['action-fail', message]) +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + def status_set(workload_state, message): """Set the workload state with a message diff --git a/ceph-mon/hooks/charmhelpers/core/unitdata.py b/ceph-mon/hooks/charmhelpers/core/unitdata.py index 406a35c5..338104e0 100644 --- a/ceph-mon/hooks/charmhelpers/core/unitdata.py +++ b/ceph-mon/hooks/charmhelpers/core/unitdata.py @@ -152,6 +152,7 @@ def config_changed(): import collections import contextlib import datetime +import itertools import json import os import pprint @@ -164,8 +165,7 @@ def config_changed(): class Storage(object): """Simple key value database for local unit state within charms. - Modifications are automatically committed at hook exit. That's - currently regardless of exit code. + Modifications are not persisted unless :meth:`flush` is called. To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. @@ -173,8 +173,11 @@ class Storage(object): def __init__(self, path=None): self.db_path = path if path is None: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None @@ -189,15 +192,8 @@ def close(self): self.conn.close() self._closed = True - def _scoped_query(self, stmt, params=None): - if params is None: - params = [] - return stmt, params - def get(self, key, default=None, record=False): - self.cursor.execute( - *self._scoped_query( - 'select data from kv where key=?', [key])) + self.cursor.execute('select data from kv where key=?', [key]) result = self.cursor.fetchone() if not result: return default @@ -206,33 +202,81 @@ def get(self, key, default=None, record=False): return json.loads(result[0]) def getrange(self, key_prefix, strip=False): - stmt = "select key, data from kv where key like '%s%%'" % key_prefix - self.cursor.execute(*self._scoped_query(stmt)) + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) result = self.cursor.fetchall() if not result: - return None + return {} if not strip: key_prefix = '' return dict([ (k[len(key_prefix):], json.loads(v)) for k, v in result]) def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ for k, v in mapping.items(): self.set("%s%s" % (prefix, k), v) def unset(self, key): + """ + Remove a key from the database entirely. + """ self.cursor.execute('delete from kv where key=?', [key]) if self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values (?, ?, ?)', [key, self.revision, json.dumps('DELETED')]) + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ serialized = json.dumps(value) - self.cursor.execute( - 'select data from kv where key=?', [key]) + self.cursor.execute('select data from kv where key=?', [key]) exists = self.cursor.fetchone() # Skip mutations to the same value From b935fc1e7e65012e3bcc7885ef98f47ab34b317a Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 3 Aug 2015 14:59:51 +0100 Subject: [PATCH 0772/2699] [gnuoy,trivial] Pre-release charmhelper sync to pickup leadership election peer migration fix --- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 6e4fb686..18860f59 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -34,7 +34,22 @@ import tempfile from subprocess import CalledProcessError -from charmhelpers.cli import cmdline +try: + from charmhelpers.cli import cmdline +except ImportError as e: + # due to the anti-pattern of partially synching charmhelpers directly + # into charms, it's possible that charmhelpers.cli is not available; + # if that's the case, they don't really care about using the cli anyway, + # so mock it out + if str(e) == 'No module named cli': + class cmdline(object): + @classmethod + def subcommand(cls, *args, **kwargs): + def _wrap(func): + return func + return _wrap + else: + raise import six if not six.PY3: From 9cc02d9456f2d8bfe2826911f0d1b020f7b3205a Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 3 Aug 2015 14:59:51 +0100 Subject: [PATCH 0773/2699] [gnuoy,trivial] Pre-release charmhelper sync to pickup leadership election peer migration fix --- ceph-mon/hooks/charmhelpers/core/hookenv.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 6e4fb686..18860f59 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -34,7 +34,22 @@ import tempfile from subprocess import CalledProcessError -from charmhelpers.cli import cmdline +try: + from charmhelpers.cli import cmdline +except ImportError as e: + # due to the anti-pattern of partially synching charmhelpers directly + # into charms, it's possible that charmhelpers.cli is not available; + # if that's the case, they don't really care about using the cli anyway, + # so mock it out + if str(e) == 'No module named cli': + class cmdline(object): + @classmethod + def subcommand(cls, *args, **kwargs): + def _wrap(func): + return func + return _wrap + else: + raise import six if not six.PY3: From 1887c9dba4a25dc93610f14477bc49de089bfc55 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 3 Aug 2015 15:53:01 +0100 Subject: [PATCH 0774/2699] [gnuoy,trivial] Pre-release charmhelper sync --- ceph-radosgw/charm-helpers-hooks.yaml | 1 + .../hooks/charmhelpers/cli/__init__.py | 195 ++++++++++++ .../hooks/charmhelpers/cli/benchmark.py | 36 +++ .../hooks/charmhelpers/cli/commands.py | 32 ++ ceph-radosgw/hooks/charmhelpers/cli/host.py | 31 ++ .../hooks/charmhelpers/cli/unitdata.py | 39 +++ .../contrib/openstack/amulet/deployment.py | 39 ++- .../contrib/openstack/amulet/utils.py | 287 +++++++++++++++--- .../charmhelpers/contrib/openstack/context.py | 79 +++-- .../contrib/openstack/templates/ceph.conf | 12 +- .../contrib/openstack/templating.py | 4 +- .../charmhelpers/contrib/openstack/utils.py | 14 +- .../contrib/storage/linux/ceph.py | 12 +- .../contrib/storage/linux/utils.py | 2 +- ceph-radosgw/hooks/charmhelpers/core/files.py | 45 +++ .../hooks/charmhelpers/core/hookenv.py | 125 +++++++- ceph-radosgw/hooks/charmhelpers/core/host.py | 36 ++- .../charmhelpers/core/services/helpers.py | 4 +- .../hooks/charmhelpers/core/unitdata.py | 78 +++-- .../hooks/charmhelpers/fetch/__init__.py | 23 +- .../hooks/charmhelpers/fetch/archiveurl.py | 8 +- .../hooks/charmhelpers/fetch/giturl.py | 2 +- 22 files changed, 962 insertions(+), 142 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/cli/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/cli/benchmark.py create mode 100644 ceph-radosgw/hooks/charmhelpers/cli/commands.py create mode 100644 ceph-radosgw/hooks/charmhelpers/cli/host.py create mode 100644 ceph-radosgw/hooks/charmhelpers/cli/unitdata.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/files.py diff --git a/ceph-radosgw/charm-helpers-hooks.yaml b/ceph-radosgw/charm-helpers-hooks.yaml index 768108d0..f8185cbb 100644 --- a/ceph-radosgw/charm-helpers-hooks.yaml +++ b/ceph-radosgw/charm-helpers-hooks.yaml @@ -2,6 +2,7 @@ branch: lp:charm-helpers destination: hooks/charmhelpers include: - core + - cli - fetch - contrib.storage.linux: - utils diff --git a/ceph-radosgw/hooks/charmhelpers/cli/__init__.py b/ceph-radosgw/hooks/charmhelpers/cli/__init__.py new file mode 100644 index 00000000..7118daf5 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/cli/__init__.py @@ -0,0 +1,195 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import inspect +import argparse +import sys + +from six.moves import zip + +from charmhelpers.core import unitdata + + +class OutputFormatter(object): + def __init__(self, outfile=sys.stdout): + self.formats = ( + "raw", + "json", + "py", + "yaml", + "csv", + "tab", + ) + self.outfile = outfile + + def add_arguments(self, argument_parser): + formatgroup = argument_parser.add_mutually_exclusive_group() + choices = self.supported_formats + formatgroup.add_argument("--format", metavar='FMT', + help="Select output format for returned data, " + "where FMT is one of: {}".format(choices), + choices=choices, default='raw') + for fmt in self.formats: + fmtfunc = getattr(self, fmt) + formatgroup.add_argument("-{}".format(fmt[0]), + "--{}".format(fmt), action='store_const', + const=fmt, dest='format', + help=fmtfunc.__doc__) + + @property + def supported_formats(self): + return self.formats + + def raw(self, output): + """Output data as raw string (default)""" + if isinstance(output, (list, tuple)): + output = '\n'.join(map(str, output)) + self.outfile.write(str(output)) + + def py(self, output): + """Output data as a nicely-formatted python data structure""" + import pprint + pprint.pprint(output, stream=self.outfile) + + def json(self, output): + """Output data in JSON format""" + import json + json.dump(output, self.outfile) + + def yaml(self, output): + """Output data in YAML format""" + import yaml + yaml.safe_dump(output, self.outfile) + + def csv(self, output): + """Output data as excel-compatible CSV""" + import csv + csvwriter = csv.writer(self.outfile) + csvwriter.writerows(output) + + def tab(self, output): + """Output data in excel-compatible tab-delimited format""" + import csv + csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab) + csvwriter.writerows(output) + + def format_output(self, output, fmt='raw'): + fmtfunc = getattr(self, fmt) + fmtfunc(output) + + +class CommandLine(object): + argument_parser = None + subparsers = None + formatter = None + exit_code = 0 + + def __init__(self): + if not self.argument_parser: + self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks') + if not self.formatter: + self.formatter = OutputFormatter() + self.formatter.add_arguments(self.argument_parser) + if not self.subparsers: + self.subparsers = self.argument_parser.add_subparsers(help='Commands') + + def subcommand(self, command_name=None): + """ + Decorate a function as a subcommand. Use its arguments as the + command-line arguments""" + def wrapper(decorated): + cmd_name = command_name or decorated.__name__ + subparser = self.subparsers.add_parser(cmd_name, + description=decorated.__doc__) + for args, kwargs in describe_arguments(decorated): + subparser.add_argument(*args, **kwargs) + subparser.set_defaults(func=decorated) + return decorated + return wrapper + + def test_command(self, decorated): + """ + Subcommand is a boolean test function, so bool return values should be + converted to a 0/1 exit code. + """ + decorated._cli_test_command = True + return decorated + + def no_output(self, decorated): + """ + Subcommand is not expected to return a value, so don't print a spurious None. + """ + decorated._cli_no_output = True + return decorated + + def subcommand_builder(self, command_name, description=None): + """ + Decorate a function that builds a subcommand. Builders should accept a + single argument (the subparser instance) and return the function to be + run as the command.""" + def wrapper(decorated): + subparser = self.subparsers.add_parser(command_name) + func = decorated(subparser) + subparser.set_defaults(func=func) + subparser.description = description or func.__doc__ + return wrapper + + def run(self): + "Run cli, processing arguments and executing subcommands." + arguments = self.argument_parser.parse_args() + argspec = inspect.getargspec(arguments.func) + vargs = [] + kwargs = {} + for arg in argspec.args: + vargs.append(getattr(arguments, arg)) + if argspec.varargs: + vargs.extend(getattr(arguments, argspec.varargs)) + if argspec.keywords: + for kwarg in argspec.keywords.items(): + kwargs[kwarg] = getattr(arguments, kwarg) + output = arguments.func(*vargs, **kwargs) + if getattr(arguments.func, '_cli_test_command', False): + self.exit_code = 0 if output else 1 + output = '' + if getattr(arguments.func, '_cli_no_output', False): + output = '' + self.formatter.format_output(output, arguments.format) + if unitdata._KV: + unitdata._KV.flush() + + +cmdline = CommandLine() + + +def describe_arguments(func): + """ + Analyze a function's signature and return a data structure suitable for + passing in as arguments to an argparse parser's add_argument() method.""" + + argspec = inspect.getargspec(func) + # we should probably raise an exception somewhere if func includes **kwargs + if argspec.defaults: + positional_args = argspec.args[:-len(argspec.defaults)] + keyword_names = argspec.args[-len(argspec.defaults):] + for arg, default in zip(keyword_names, argspec.defaults): + yield ('--{}'.format(arg),), {'default': default} + else: + positional_args = argspec.args + + for arg in positional_args: + yield (arg,), {} + if argspec.varargs: + yield (argspec.varargs,), {'nargs': '*'} diff --git a/ceph-radosgw/hooks/charmhelpers/cli/benchmark.py b/ceph-radosgw/hooks/charmhelpers/cli/benchmark.py new file mode 100644 index 00000000..b23c16ce --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/cli/benchmark.py @@ -0,0 +1,36 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.contrib.benchmark import Benchmark + + +@cmdline.subcommand(command_name='benchmark-start') +def start(): + Benchmark.start() + + +@cmdline.subcommand(command_name='benchmark-finish') +def finish(): + Benchmark.finish() + + +@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score") +def service(subparser): + subparser.add_argument("value", help="The composite score.") + subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.") + subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.") + return Benchmark.set_composite_score diff --git a/ceph-radosgw/hooks/charmhelpers/cli/commands.py b/ceph-radosgw/hooks/charmhelpers/cli/commands.py new file mode 100644 index 00000000..443ff05d --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/cli/commands.py @@ -0,0 +1,32 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +""" +This module loads sub-modules into the python runtime so they can be +discovered via the inspect module. In order to prevent flake8 from (rightfully) +telling us these are unused modules, throw a ' # noqa' at the end of each import +so that the warning is suppressed. +""" + +from . import CommandLine # noqa + +""" +Import the sub-modules which have decorated subcommands to register with chlp. +""" +import host # noqa +import benchmark # noqa +import unitdata # noqa +from charmhelpers.core import hookenv # noqa diff --git a/ceph-radosgw/hooks/charmhelpers/cli/host.py b/ceph-radosgw/hooks/charmhelpers/cli/host.py new file mode 100644 index 00000000..58e78d6b --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/cli/host.py @@ -0,0 +1,31 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import host + + +@cmdline.subcommand() +def mounts(): + "List mounts" + return host.mounts() + + +@cmdline.subcommand_builder('service', description="Control system services") +def service(subparser): + subparser.add_argument("action", help="The action to perform (start, stop, etc...)") + subparser.add_argument("service_name", help="Name of the service to control") + return host.service diff --git a/ceph-radosgw/hooks/charmhelpers/cli/unitdata.py b/ceph-radosgw/hooks/charmhelpers/cli/unitdata.py new file mode 100644 index 00000000..d1cd95bf --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/cli/unitdata.py @@ -0,0 +1,39 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import unitdata + + +@cmdline.subcommand_builder('unitdata', description="Store and retrieve data") +def unitdata_cmd(subparser): + nested = subparser.add_subparsers() + get_cmd = nested.add_parser('get', help='Retrieve data') + get_cmd.add_argument('key', help='Key to retrieve the value of') + get_cmd.set_defaults(action='get', value=None) + set_cmd = nested.add_parser('set', help='Store data') + set_cmd.add_argument('key', help='Key to set') + set_cmd.add_argument('value', help='Value to store') + set_cmd.set_defaults(action='set') + + def _unitdata_cmd(action, key, value): + if action == 'get': + return unitdata.kv().get(key) + elif action == 'set': + unitdata.kv().set(key, value) + unitdata.kv().flush() + return '' + return _unitdata_cmd diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index c664c9d0..b01e6cb8 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -79,9 +79,9 @@ def _add_services(self, this_service, other_services): services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Openstack subordinate charms do not expose an origin option as that - # is controlled by the principle - ignore = ['neutron-openvswitch'] + # Most OpenStack subordinate charms do not expose an origin option + # as that is controlled by the principle. + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] if self.openstack: for svc in services: @@ -148,3 +148,36 @@ def _get_openstack_release_string(self): return os_origin.split('%s-' % self.series)[1].split('/')[0] else: return releases[self.series] + + def get_ceph_expected_pools(self, radosgw=False): + """Return a list of expected ceph pools in a ceph + cinder + glance + test scenario, based on OpenStack release and whether ceph radosgw + is flagged as present or not.""" + + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + pools = [ + 'rbd', + 'cinder', + 'glance' + ] + else: + # Juno or earlier + pools = [ + 'data', + 'metadata', + 'rbd', + 'cinder', + 'glance' + ] + + if radosgw: + pools.extend([ + '.rgw.root', + '.rgw.control', + '.rgw', + '.rgw.gc', + '.users.uid' + ]) + + return pools diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 576bf0b5..03f79277 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -14,16 +14,20 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import amulet +import json import logging import os import six import time import urllib +import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import swiftclient from charmhelpers.contrib.amulet.utils import ( AmuletUtils @@ -171,6 +175,16 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + def authenticate_cinder_admin(self, keystone_sentry, username, + password, tenant): + """Authenticates admin user with cinder.""" + # NOTE(beisner): cinder python client doesn't accept tokens. + service_ip = \ + keystone_sentry.relation('shared-db', + 'mysql:shared-db')['private-address'] + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) + return cinder_client.Client(username, password, tenant, ept) + def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant): """Authenticates admin user with the keystone admin endpoint.""" @@ -212,9 +226,29 @@ def authenticate_nova_user(self, keystone, user, password, tenant): return nova_client.Client(username=user, api_key=password, project_id=tenant, auth_url=ep) + def authenticate_swift_user(self, keystone, user, password, tenant): + """Authenticates a regular user with swift api.""" + self.log.debug('Authenticating swift user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') + def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance.""" - self.log.debug('Creating glance image ({})...'.format(image_name)) + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :returns: glance image pointer + """ + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Download cirros image http_proxy = os.getenv('AMULET_HTTP_PROXY') self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: @@ -223,33 +257,51 @@ def create_cirros_image(self, glance, image_name): else: opener = urllib.FancyURLopener() - f = opener.open("http://download.cirros-cloud.net/version/released") + f = opener.open('http://download.cirros-cloud.net/version/released') version = f.read().strip() - cirros_img = "cirros-{}-x86_64-disk.img".format(version) + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) local_path = os.path.join('tests', cirros_img) if not os.path.exists(local_path): - cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', version, cirros_img) opener.retrieve(cirros_url, local_path) f.close() + # Create glance image with open(local_path) as f: image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) - count = 1 - status = image.status - while status != 'active' and count < 10: - time.sleep(3) - image = glance.images.get(image.id) - status = image.status - self.log.debug('image status: {}'.format(status)) - count += 1 - if status != 'active': - self.log.error('image creation timed out') - return None + # Wait for image to reach active status + img_id = image.id + ret = self.resource_reaches_status(glance.images, img_id, + expected_stat='active', + msg='Image status wait') + if not ret: + msg = 'Glance image failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new image + self.log.debug('Validating image attributes...') + val_img_name = glance.images.get(img_id).name + val_img_stat = glance.images.get(img_id).status + val_img_pub = glance.images.get(img_id).is_public + val_img_cfmt = glance.images.get(img_id).container_format + val_img_dfmt = glance.images.get(img_id).disk_format + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' + 'container fmt:{} disk fmt:{}'.format( + val_img_name, val_img_pub, img_id, + val_img_stat, val_img_cfmt, val_img_dfmt)) + + if val_img_name == image_name and val_img_stat == 'active' \ + and val_img_pub is True and val_img_cfmt == 'bare' \ + and val_img_dfmt == 'qcow2': + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) return image @@ -260,22 +312,7 @@ def delete_image(self, glance, image): self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_image.') self.log.debug('Deleting glance image ({})...'.format(image)) - num_before = len(list(glance.images.list())) - glance.images.delete(image) - - count = 1 - num_after = len(list(glance.images.list())) - while num_after != (num_before - 1) and count < 10: - time.sleep(3) - num_after = len(list(glance.images.list())) - self.log.debug('number of images: {}'.format(num_after)) - count += 1 - - if num_after != (num_before - 1): - self.log.error('image deletion timed out') - return False - - return True + return self.delete_resource(glance.images, image, msg='glance image') def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" @@ -308,22 +345,8 @@ def delete_instance(self, nova, instance): self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_instance.') self.log.debug('Deleting instance ({})...'.format(instance)) - num_before = len(list(nova.servers.list())) - nova.servers.delete(instance) - - count = 1 - num_after = len(list(nova.servers.list())) - while num_after != (num_before - 1) and count < 10: - time.sleep(3) - num_after = len(list(nova.servers.list())) - self.log.debug('number of instances: {}'.format(num_after)) - count += 1 - - if num_after != (num_before - 1): - self.log.error('instance deletion timed out') - return False - - return True + return self.delete_resource(nova.servers, instance, + msg='nova instance') def create_or_get_keypair(self, nova, keypair_name="testkey"): """Create a new keypair, or return pointer if it already exists.""" @@ -339,6 +362,88 @@ def create_or_get_keypair(self, nova, keypair_name="testkey"): _keypair = nova.keypairs.create(name=keypair_name) return _keypair + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, + img_id=None, src_vol_id=None, snap_id=None): + """Create cinder volume, optionally from a glance image, OR + optionally as a clone of an existing volume, OR optionally + from a snapshot. Wait for the new volume status to reach + the expected status, validate and return a resource pointer. + + :param vol_name: cinder volume display name + :param vol_size: size in gigabytes + :param img_id: optional glance image id + :param src_vol_id: optional source volume id to clone + :param snap_id: optional snapshot id to use + :returns: cinder volume pointer + """ + # Handle parameter input and avoid impossible combinations + if img_id and not src_vol_id and not snap_id: + # Create volume from image + self.log.debug('Creating cinder volume from glance image...') + bootable = 'true' + elif src_vol_id and not img_id and not snap_id: + # Clone an existing volume + self.log.debug('Cloning cinder volume...') + bootable = cinder.volumes.get(src_vol_id).bootable + elif snap_id and not src_vol_id and not img_id: + # Create volume from snapshot + self.log.debug('Creating cinder volume from snapshot...') + snap = cinder.volume_snapshots.find(id=snap_id) + vol_size = snap.size + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id + bootable = cinder.volumes.get(snap_vol_id).bootable + elif not img_id and not src_vol_id and not snap_id: + # Create volume + self.log.debug('Creating cinder volume...') + bootable = 'false' + else: + # Impossible combination of parameters + msg = ('Invalid method use - name:{} size:{} img_id:{} ' + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, + img_id, src_vol_id, + snap_id)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create new volume + try: + vol_new = cinder.volumes.create(display_name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except Exception as e: + msg = 'Failed to create volume: {}'.format(e) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Wait for volume to reach available status + ret = self.resource_reaches_status(cinder.volumes, vol_id, + expected_stat="available", + msg="Volume status wait") + if not ret: + msg = 'Cinder volume failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new volume + self.log.debug('Validating volume attributes...') + val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_boot = cinder.volumes.get(vol_id).bootable + val_vol_stat = cinder.volumes.get(vol_id).status + val_vol_size = cinder.volumes.get(vol_id).size + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' + '{} size:{}'.format(val_vol_name, vol_id, + val_vol_stat, val_vol_boot, + val_vol_size)) + + if val_vol_boot == bootable and val_vol_stat == 'available' \ + and val_vol_name == vol_name and val_vol_size == vol_size: + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return vol_new + def delete_resource(self, resource, resource_id, msg="resource", max_wait=120): """Delete one openstack resource, such as one instance, keypair, @@ -350,6 +455,8 @@ def delete_resource(self, resource, resource_id, :param max_wait: maximum wait time in seconds :returns: True if successful, otherwise False """ + self.log.debug('Deleting OpenStack resource ' + '{} ({})'.format(resource_id, msg)) num_before = len(list(resource.list())) resource.delete(resource_id) @@ -411,3 +518,87 @@ def resource_reaches_status(self, resource, resource_id, self.log.debug('{} never reached expected status: ' '{}'.format(resource_id, expected_stat)) return False + + def get_ceph_osd_id_cmd(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return ("`initctl list | grep 'ceph-osd ' | " + "awk 'NR=={} {{ print $2 }}' | " + "grep -o '[0-9]*'`".format(index + 1)) + + def get_ceph_pools(self, sentry_unit): + """Return a dict of ceph pools from a single ceph unit, with + pool name as keys, pool id as vals.""" + pools = {} + cmd = 'sudo ceph osd lspools' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, + for pool in str(output).split(','): + pool_id_name = pool.split(' ') + if len(pool_id_name) == 2: + pool_id = pool_id_name[0] + pool_name = pool_id_name[1] + pools[pool_name] = int(pool_id) + + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], + pools)) + return pools + + def get_ceph_df(self, sentry_unit): + """Return dict of ceph df json output, including ceph pool state. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :returns: Dict of ceph df output + """ + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return json.loads(output) + + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param pool_id: Ceph pool ID + :returns: List of pool name, object count, kb disk space used + """ + df = self.get_ceph_df(sentry_unit) + pool_name = df['pools'][pool_id]['name'] + obj_count = df['pools'][pool_id]['stats']['objects'] + kb_used = df['pools'][pool_id]['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) + return pool_name, obj_count, kb_used + + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes. The + 2nd element is expected to be greater than the 1st; 3rd is expected + to be less than the 2nd. + + :param samples: List containing 3 data samples + :param sample_type: String for logging and usage context + :returns: None if successful, Failure message otherwise + """ + original, created, deleted = range(3) + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + return ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + else: + self.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index ab400060..ab2ebac1 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -122,21 +122,24 @@ def config_flags_parser(config_flags): of specifying multiple key value pairs within the same string. For example, a string in the format of 'key1=value1, key2=value2' will return a dict of: - {'key1': 'value1', - 'key2': 'value2'}. + + {'key1': 'value1', + 'key2': 'value2'}. 2. A string in the above format, but supporting a comma-delimited list of values for the same key. For example, a string in the format of 'key1=value1, key2=value3,value4,value5' will return a dict of: - {'key1', 'value1', - 'key2', 'value2,value3,value4'} + + {'key1', 'value1', + 'key2', 'value2,value3,value4'} 3. A string containing a colon character (:) prior to an equal character (=) will be treated as yaml and parsed as such. This can be used to specify more complex key value pairs. For example, a string in the format of 'key1: subkey1=value1, subkey2=value2' will return a dict of: - {'key1', 'subkey1=value1, subkey2=value2'} + + {'key1', 'subkey1=value1, subkey2=value2'} The provided config_flags string may be a list of comma-separated values which themselves may be comma-separated list of values. @@ -891,8 +894,6 @@ def neutron_ctxt(self): return ctxt def __call__(self): - self._ensure_packages() - if self.network_manager not in ['quantum', 'neutron']: return {} @@ -1050,13 +1051,22 @@ def __init__(self, service, config_file, interface): :param config_file : Service's config file to query sections :param interface : Subordinate interface to inspect """ - self.service = service self.config_file = config_file - self.interface = interface + if isinstance(service, list): + self.services = service + else: + self.services = [service] + if isinstance(interface, list): + self.interfaces = interface + else: + self.interfaces = [interface] def __call__(self): ctxt = {'sections': {}} - for rid in relation_ids(self.interface): + rids = [] + for interface in self.interfaces: + rids.extend(relation_ids(interface)) + for rid in rids: for unit in related_units(rid): sub_config = relation_get('subordinate_configuration', rid=rid, unit=unit) @@ -1068,29 +1078,32 @@ def __call__(self): 'setting from %s' % rid, level=ERROR) continue - if self.service not in sub_config: - log('Found subordinate_config on %s but it contained' - 'nothing for %s service' % (rid, self.service), - level=INFO) - continue - - sub_config = sub_config[self.service] - if self.config_file not in sub_config: - log('Found subordinate_config on %s but it contained' - 'nothing for %s' % (rid, self.config_file), - level=INFO) - continue - - sub_config = sub_config[self.config_file] - for k, v in six.iteritems(sub_config): - if k == 'sections': - for section, config_dict in six.iteritems(v): - log("adding section '%s'" % (section), - level=DEBUG) - ctxt[k][section] = config_dict - else: - ctxt[k] = v - + for service in self.services: + if service not in sub_config: + log('Found subordinate_config on %s but it contained' + 'nothing for %s service' % (rid, service), + level=INFO) + continue + + sub_config = sub_config[service] + if self.config_file not in sub_config: + log('Found subordinate_config on %s but it contained' + 'nothing for %s' % (rid, self.config_file), + level=INFO) + continue + + sub_config = sub_config[self.config_file] + for k, v in six.iteritems(sub_config): + if k == 'sections': + for section, config_list in six.iteritems(v): + log("adding section '%s'" % (section), + level=DEBUG) + if ctxt[k].get(section): + ctxt[k][section].extend(config_list) + else: + ctxt[k][section] = config_list + else: + ctxt[k] = v log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf index 81a9719f..b99851cc 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf @@ -5,11 +5,11 @@ ############################################################################### [global] {% if auth -%} - auth_supported = {{ auth }} - keyring = /etc/ceph/$cluster.$name.keyring - mon host = {{ mon_hosts }} +auth_supported = {{ auth }} +keyring = /etc/ceph/$cluster.$name.keyring +mon host = {{ mon_hosts }} {% endif -%} - log to syslog = {{ use_syslog }} - err to syslog = {{ use_syslog }} - clog to syslog = {{ use_syslog }} +log to syslog = {{ use_syslog }} +err to syslog = {{ use_syslog }} +clog to syslog = {{ use_syslog }} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py index 24cb272b..021d8cf9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py @@ -29,8 +29,8 @@ try: from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: - # python-jinja2 may not be installed yet, or we're running unittests. - FileSystemLoader = ChoiceLoader = Environment = exceptions = None + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions class OSConfigException(Exception): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 28532c98..4dd000c3 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -522,6 +522,7 @@ def git_clone_and_install(projects_yaml, core_project, depth=1): Clone/install all specified OpenStack repositories. The expected format of projects_yaml is: + repositories: - {name: keystone, repository: 'git://git.openstack.org/openstack/keystone.git', @@ -529,11 +530,13 @@ def git_clone_and_install(projects_yaml, core_project, depth=1): - {name: requirements, repository: 'git://git.openstack.org/openstack/requirements.git', branch: 'stable/icehouse'} + directory: /mnt/openstack-git http_proxy: squid-proxy-url https_proxy: squid-proxy-url - The directory, http_proxy, and https_proxy keys are optional. + The directory, http_proxy, and https_proxy keys are optional. + """ global requirements_dir parent_dir = '/mnt/openstack-git' @@ -555,10 +558,11 @@ def git_clone_and_install(projects_yaml, core_project, depth=1): pip_create_virtualenv(os.path.join(parent_dir, 'venv')) - # Upgrade setuptools from default virtualenv version. The default version - # in trusty breaks update.py in global requirements master branch. - pip_install('setuptools', upgrade=True, proxy=http_proxy, - venv=os.path.join(parent_dir, 'venv')) + # Upgrade setuptools and pip from default virtualenv versions. The default + # versions in trusty break master OpenStack branch deployments. + for p in ['pip', 'setuptools']: + pip_install(p, upgrade=True, proxy=http_proxy, + venv=os.path.join(parent_dir, 'venv')) for p in projects['repositories']: repo = p['repository'] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 31ea7f9e..00dbffb4 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -60,12 +60,12 @@ KEYFILE = '/etc/ceph/ceph.client.{}.key' CEPH_CONF = """[global] - auth supported = {auth} - keyring = {keyring} - mon host = {mon_hosts} - log to syslog = {use_syslog} - err to syslog = {use_syslog} - clog to syslog = {use_syslog} +auth supported = {auth} +keyring = {keyring} +mon host = {mon_hosts} +log to syslog = {use_syslog} +err to syslog = {use_syslog} +clog to syslog = {use_syslog} """ diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py index c8373b72..e2769e49 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -67,4 +67,4 @@ def is_device_mounted(device): out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) - return bool(re.search(device + r"[0-9]+\b", out)) + return bool(re.search(device + r"[0-9]*\b", out)) diff --git a/ceph-radosgw/hooks/charmhelpers/core/files.py b/ceph-radosgw/hooks/charmhelpers/core/files.py new file mode 100644 index 00000000..0f12d321 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/files.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 0add16d4..18860f59 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -21,6 +21,7 @@ # Charm Helpers Developers from __future__ import print_function +import copy from distutils.version import LooseVersion from functools import wraps import glob @@ -33,6 +34,23 @@ import tempfile from subprocess import CalledProcessError +try: + from charmhelpers.cli import cmdline +except ImportError as e: + # due to the anti-pattern of partially synching charmhelpers directly + # into charms, it's possible that charmhelpers.cli is not available; + # if that's the case, they don't really care about using the cli anyway, + # so mock it out + if str(e) == 'No module named cli': + class cmdline(object): + @classmethod + def subcommand(cls, *args, **kwargs): + def _wrap(func): + return func + return _wrap + else: + raise + import six if not six.PY3: from UserDict import UserDict @@ -172,9 +190,20 @@ def relation_type(): return os.environ.get('JUJU_RELATION', None) -def relation_id(): - """The relation ID for the current relation hook""" - return os.environ.get('JUJU_RELATION_ID', None) +@cmdline.subcommand() +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') def local_unit(): @@ -187,14 +216,27 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) +@cmdline.subcommand() def service_name(): """The name service group this unit belongs to""" return local_unit().split('/')[0] +@cmdline.subcommand() +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + def hook_name(): """The name of the currently executing hook""" - return os.path.basename(sys.argv[0]) + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) class Config(dict): @@ -263,7 +305,7 @@ def load_previous(self, path=None): self.path = path or self.path with open(self.path) as f: self._prev_dict = json.load(f) - for k, v in self._prev_dict.items(): + for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -467,6 +509,63 @@ def relation_types(): return rel_types +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peer'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peer``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peer'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + @cached def charm_name(): """Get the name of the current charm as is specified on metadata.yaml""" @@ -643,6 +742,21 @@ def action_fail(message): subprocess.check_call(['action-fail', message]) +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + def status_set(workload_state, message): """Set the workload state with a message @@ -761,6 +875,7 @@ def atstart(callback, *args, **kwargs): This is useful for modules and classes to perform initialization and inject behavior. In particular: + - Run common code before all of your hooks, such as logging the hook name or interesting relation data. - Defer object or module initialization that requires a hook diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 901a4cfe..8ae8ef86 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -63,6 +63,36 @@ def service_reload(service_name, restart_on_failure=False): return service_result +def service_pause(service_name, init_dir=None): + """Pause a system service. + + Stop it, and prevent it from starting again at boot.""" + if init_dir is None: + init_dir = "/etc/init" + stopped = service_stop(service_name) + # XXX: Support systemd too + override_path = os.path.join( + init_dir, '{}.conf.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + return stopped + + +def service_resume(service_name, init_dir=None): + """Resume a system service. + + Reenable starting again at boot. Start the service""" + # XXX: Support systemd too + if init_dir is None: + init_dir = "/etc/init" + override_path = os.path.join( + init_dir, '{}.conf.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + started = service_start(service_name) + return started + + def service(action, service_name): """Control a system service""" cmd = ['service', service_name, action] @@ -140,11 +170,7 @@ def add_group(group_name, system_group=False): def add_user_to_group(username, group): """Add a user to a group""" - cmd = [ - 'gpasswd', '-a', - username, - group - ] + cmd = ['gpasswd', '-a', username, group] log("Adding user {} to group {}".format(username, group)) subprocess.check_call(cmd) diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py index 3eb5fb44..8005c415 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py @@ -239,12 +239,12 @@ class TemplateCallback(ManagerCallback): action. :param str source: The template source file, relative to - `$CHARM_DIR/templates` - + `$CHARM_DIR/templates` :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file + """ def __init__(self, source, target, owner='root', group='root', perms=0o444): diff --git a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py index 406a35c5..338104e0 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py +++ b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py @@ -152,6 +152,7 @@ def config_changed(): import collections import contextlib import datetime +import itertools import json import os import pprint @@ -164,8 +165,7 @@ def config_changed(): class Storage(object): """Simple key value database for local unit state within charms. - Modifications are automatically committed at hook exit. That's - currently regardless of exit code. + Modifications are not persisted unless :meth:`flush` is called. To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. @@ -173,8 +173,11 @@ class Storage(object): def __init__(self, path=None): self.db_path = path if path is None: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None @@ -189,15 +192,8 @@ def close(self): self.conn.close() self._closed = True - def _scoped_query(self, stmt, params=None): - if params is None: - params = [] - return stmt, params - def get(self, key, default=None, record=False): - self.cursor.execute( - *self._scoped_query( - 'select data from kv where key=?', [key])) + self.cursor.execute('select data from kv where key=?', [key]) result = self.cursor.fetchone() if not result: return default @@ -206,33 +202,81 @@ def get(self, key, default=None, record=False): return json.loads(result[0]) def getrange(self, key_prefix, strip=False): - stmt = "select key, data from kv where key like '%s%%'" % key_prefix - self.cursor.execute(*self._scoped_query(stmt)) + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) result = self.cursor.fetchall() if not result: - return None + return {} if not strip: key_prefix = '' return dict([ (k[len(key_prefix):], json.loads(v)) for k, v in result]) def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ for k, v in mapping.items(): self.set("%s%s" % (prefix, k), v) def unset(self, key): + """ + Remove a key from the database entirely. + """ self.cursor.execute('delete from kv where key=?', [key]) if self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values (?, ?, ?)', [key, self.revision, json.dumps('DELETED')]) + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ serialized = json.dumps(value) - self.cursor.execute( - 'select data from kv where key=?', [key]) + self.cursor.execute('select data from kv where key=?', [key]) exists = self.cursor.fetchone() # Skip mutations to the same value diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 9a1a2515..0a3bb969 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -215,9 +215,9 @@ def apt_purge(packages, fatal=False): _run_apt_command(cmd, fatal) -def apt_hold(packages, fatal=False): - """Hold one or more packages""" - cmd = ['apt-mark', 'hold'] +def apt_mark(packages, mark, fatal=False): + """Flag one or more packages using apt-mark""" + cmd = ['apt-mark', mark] if isinstance(packages, six.string_types): cmd.append(packages) else: @@ -225,9 +225,17 @@ def apt_hold(packages, fatal=False): log("Holding {}".format(packages)) if fatal: - subprocess.check_call(cmd) + subprocess.check_call(cmd, universal_newlines=True) else: - subprocess.call(cmd) + subprocess.call(cmd, universal_newlines=True) + + +def apt_hold(packages, fatal=False): + return apt_mark(packages, 'hold', fatal=fatal) + + +def apt_unhold(packages, fatal=False): + return apt_mark(packages, 'unhold', fatal=fatal) def add_source(source, key=None): @@ -370,8 +378,9 @@ def install_remote(source, *args, **kwargs): for handler in handlers: try: installed_to = handler.install(source, *args, **kwargs) - except UnhandledSource: - pass + except UnhandledSource as e: + log('Install source attempt unsuccessful: {}'.format(e), + level='WARNING') if not installed_to: raise UnhandledSource("No handler found for source {}".format(source)) return installed_to diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py index 8dfce505..efd7f9f0 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py @@ -77,6 +77,8 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + # XXX: Why is this returning a boolean and a string? It's + # doomed to fail since "bool(can_handle('foo://'))" will be True. return "Wrong source type" if get_archive_handler(self.base_url(source)): return True @@ -155,7 +157,11 @@ def install(self, source, dest=None, checksum=None, hash_type='sha1'): else: algorithms = hashlib.algorithms_available if key in algorithms: - check_hash(dld_file, value, key) + if len(value) != 1: + raise TypeError( + "Expected 1 hash value, not %d" % len(value)) + expected = value[0] + check_hash(dld_file, expected, key) if checksum: check_hash(dld_file, checksum, hash_type) return extract(dld_file, dest) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py index ddc25b7e..f023b26d 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py @@ -67,7 +67,7 @@ def install(self, source, branch="master", dest=None, depth=None): try: self.clone(source, dest_dir, branch, depth) except GitCommandError as e: - raise UnhandledSource(e.message) + raise UnhandledSource(e) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir From cf2997c2caf4f1924c9bf5c7fe0cf497d1f2fc8b Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 3 Aug 2015 15:53:05 +0100 Subject: [PATCH 0775/2699] [gnuoy,trivial] Pre-release charmhelper sync --- ceph-osd/charm-helpers-hooks.yaml | 1 + ceph-osd/hooks/charmhelpers/cli/__init__.py | 195 ++++++++++++++++++ ceph-osd/hooks/charmhelpers/cli/benchmark.py | 36 ++++ ceph-osd/hooks/charmhelpers/cli/commands.py | 32 +++ ceph-osd/hooks/charmhelpers/cli/host.py | 31 +++ ceph-osd/hooks/charmhelpers/cli/unitdata.py | 39 ++++ .../contrib/storage/linux/utils.py | 2 +- ceph-osd/hooks/charmhelpers/core/files.py | 45 ++++ ceph-osd/hooks/charmhelpers/core/hookenv.py | 125 ++++++++++- ceph-osd/hooks/charmhelpers/core/host.py | 36 +++- .../charmhelpers/core/services/helpers.py | 4 +- ceph-osd/hooks/charmhelpers/core/unitdata.py | 78 +++++-- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 23 ++- .../hooks/charmhelpers/fetch/archiveurl.py | 8 +- ceph-osd/hooks/charmhelpers/fetch/giturl.py | 2 +- 15 files changed, 618 insertions(+), 39 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/cli/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/cli/benchmark.py create mode 100644 ceph-osd/hooks/charmhelpers/cli/commands.py create mode 100644 ceph-osd/hooks/charmhelpers/cli/host.py create mode 100644 ceph-osd/hooks/charmhelpers/cli/unitdata.py create mode 100644 ceph-osd/hooks/charmhelpers/core/files.py diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index 8f5373ec..c8c54766 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -2,6 +2,7 @@ branch: lp:charm-helpers destination: hooks/charmhelpers include: - core + - cli - fetch - contrib.storage.linux: - utils diff --git a/ceph-osd/hooks/charmhelpers/cli/__init__.py b/ceph-osd/hooks/charmhelpers/cli/__init__.py new file mode 100644 index 00000000..7118daf5 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/cli/__init__.py @@ -0,0 +1,195 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import inspect +import argparse +import sys + +from six.moves import zip + +from charmhelpers.core import unitdata + + +class OutputFormatter(object): + def __init__(self, outfile=sys.stdout): + self.formats = ( + "raw", + "json", + "py", + "yaml", + "csv", + "tab", + ) + self.outfile = outfile + + def add_arguments(self, argument_parser): + formatgroup = argument_parser.add_mutually_exclusive_group() + choices = self.supported_formats + formatgroup.add_argument("--format", metavar='FMT', + help="Select output format for returned data, " + "where FMT is one of: {}".format(choices), + choices=choices, default='raw') + for fmt in self.formats: + fmtfunc = getattr(self, fmt) + formatgroup.add_argument("-{}".format(fmt[0]), + "--{}".format(fmt), action='store_const', + const=fmt, dest='format', + help=fmtfunc.__doc__) + + @property + def supported_formats(self): + return self.formats + + def raw(self, output): + """Output data as raw string (default)""" + if isinstance(output, (list, tuple)): + output = '\n'.join(map(str, output)) + self.outfile.write(str(output)) + + def py(self, output): + """Output data as a nicely-formatted python data structure""" + import pprint + pprint.pprint(output, stream=self.outfile) + + def json(self, output): + """Output data in JSON format""" + import json + json.dump(output, self.outfile) + + def yaml(self, output): + """Output data in YAML format""" + import yaml + yaml.safe_dump(output, self.outfile) + + def csv(self, output): + """Output data as excel-compatible CSV""" + import csv + csvwriter = csv.writer(self.outfile) + csvwriter.writerows(output) + + def tab(self, output): + """Output data in excel-compatible tab-delimited format""" + import csv + csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab) + csvwriter.writerows(output) + + def format_output(self, output, fmt='raw'): + fmtfunc = getattr(self, fmt) + fmtfunc(output) + + +class CommandLine(object): + argument_parser = None + subparsers = None + formatter = None + exit_code = 0 + + def __init__(self): + if not self.argument_parser: + self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks') + if not self.formatter: + self.formatter = OutputFormatter() + self.formatter.add_arguments(self.argument_parser) + if not self.subparsers: + self.subparsers = self.argument_parser.add_subparsers(help='Commands') + + def subcommand(self, command_name=None): + """ + Decorate a function as a subcommand. Use its arguments as the + command-line arguments""" + def wrapper(decorated): + cmd_name = command_name or decorated.__name__ + subparser = self.subparsers.add_parser(cmd_name, + description=decorated.__doc__) + for args, kwargs in describe_arguments(decorated): + subparser.add_argument(*args, **kwargs) + subparser.set_defaults(func=decorated) + return decorated + return wrapper + + def test_command(self, decorated): + """ + Subcommand is a boolean test function, so bool return values should be + converted to a 0/1 exit code. + """ + decorated._cli_test_command = True + return decorated + + def no_output(self, decorated): + """ + Subcommand is not expected to return a value, so don't print a spurious None. + """ + decorated._cli_no_output = True + return decorated + + def subcommand_builder(self, command_name, description=None): + """ + Decorate a function that builds a subcommand. Builders should accept a + single argument (the subparser instance) and return the function to be + run as the command.""" + def wrapper(decorated): + subparser = self.subparsers.add_parser(command_name) + func = decorated(subparser) + subparser.set_defaults(func=func) + subparser.description = description or func.__doc__ + return wrapper + + def run(self): + "Run cli, processing arguments and executing subcommands." + arguments = self.argument_parser.parse_args() + argspec = inspect.getargspec(arguments.func) + vargs = [] + kwargs = {} + for arg in argspec.args: + vargs.append(getattr(arguments, arg)) + if argspec.varargs: + vargs.extend(getattr(arguments, argspec.varargs)) + if argspec.keywords: + for kwarg in argspec.keywords.items(): + kwargs[kwarg] = getattr(arguments, kwarg) + output = arguments.func(*vargs, **kwargs) + if getattr(arguments.func, '_cli_test_command', False): + self.exit_code = 0 if output else 1 + output = '' + if getattr(arguments.func, '_cli_no_output', False): + output = '' + self.formatter.format_output(output, arguments.format) + if unitdata._KV: + unitdata._KV.flush() + + +cmdline = CommandLine() + + +def describe_arguments(func): + """ + Analyze a function's signature and return a data structure suitable for + passing in as arguments to an argparse parser's add_argument() method.""" + + argspec = inspect.getargspec(func) + # we should probably raise an exception somewhere if func includes **kwargs + if argspec.defaults: + positional_args = argspec.args[:-len(argspec.defaults)] + keyword_names = argspec.args[-len(argspec.defaults):] + for arg, default in zip(keyword_names, argspec.defaults): + yield ('--{}'.format(arg),), {'default': default} + else: + positional_args = argspec.args + + for arg in positional_args: + yield (arg,), {} + if argspec.varargs: + yield (argspec.varargs,), {'nargs': '*'} diff --git a/ceph-osd/hooks/charmhelpers/cli/benchmark.py b/ceph-osd/hooks/charmhelpers/cli/benchmark.py new file mode 100644 index 00000000..b23c16ce --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/cli/benchmark.py @@ -0,0 +1,36 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.contrib.benchmark import Benchmark + + +@cmdline.subcommand(command_name='benchmark-start') +def start(): + Benchmark.start() + + +@cmdline.subcommand(command_name='benchmark-finish') +def finish(): + Benchmark.finish() + + +@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score") +def service(subparser): + subparser.add_argument("value", help="The composite score.") + subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.") + subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.") + return Benchmark.set_composite_score diff --git a/ceph-osd/hooks/charmhelpers/cli/commands.py b/ceph-osd/hooks/charmhelpers/cli/commands.py new file mode 100644 index 00000000..443ff05d --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/cli/commands.py @@ -0,0 +1,32 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +""" +This module loads sub-modules into the python runtime so they can be +discovered via the inspect module. In order to prevent flake8 from (rightfully) +telling us these are unused modules, throw a ' # noqa' at the end of each import +so that the warning is suppressed. +""" + +from . import CommandLine # noqa + +""" +Import the sub-modules which have decorated subcommands to register with chlp. +""" +import host # noqa +import benchmark # noqa +import unitdata # noqa +from charmhelpers.core import hookenv # noqa diff --git a/ceph-osd/hooks/charmhelpers/cli/host.py b/ceph-osd/hooks/charmhelpers/cli/host.py new file mode 100644 index 00000000..58e78d6b --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/cli/host.py @@ -0,0 +1,31 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import host + + +@cmdline.subcommand() +def mounts(): + "List mounts" + return host.mounts() + + +@cmdline.subcommand_builder('service', description="Control system services") +def service(subparser): + subparser.add_argument("action", help="The action to perform (start, stop, etc...)") + subparser.add_argument("service_name", help="Name of the service to control") + return host.service diff --git a/ceph-osd/hooks/charmhelpers/cli/unitdata.py b/ceph-osd/hooks/charmhelpers/cli/unitdata.py new file mode 100644 index 00000000..d1cd95bf --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/cli/unitdata.py @@ -0,0 +1,39 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import unitdata + + +@cmdline.subcommand_builder('unitdata', description="Store and retrieve data") +def unitdata_cmd(subparser): + nested = subparser.add_subparsers() + get_cmd = nested.add_parser('get', help='Retrieve data') + get_cmd.add_argument('key', help='Key to retrieve the value of') + get_cmd.set_defaults(action='get', value=None) + set_cmd = nested.add_parser('set', help='Store data') + set_cmd.add_argument('key', help='Key to set') + set_cmd.add_argument('value', help='Value to store') + set_cmd.set_defaults(action='set') + + def _unitdata_cmd(action, key, value): + if action == 'get': + return unitdata.kv().get(key) + elif action == 'set': + unitdata.kv().set(key, value) + unitdata.kv().flush() + return '' + return _unitdata_cmd diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index c8373b72..e2769e49 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -67,4 +67,4 @@ def is_device_mounted(device): out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) - return bool(re.search(device + r"[0-9]+\b", out)) + return bool(re.search(device + r"[0-9]*\b", out)) diff --git a/ceph-osd/hooks/charmhelpers/core/files.py b/ceph-osd/hooks/charmhelpers/core/files.py new file mode 100644 index 00000000..0f12d321 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/files.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 0add16d4..18860f59 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -21,6 +21,7 @@ # Charm Helpers Developers from __future__ import print_function +import copy from distutils.version import LooseVersion from functools import wraps import glob @@ -33,6 +34,23 @@ import tempfile from subprocess import CalledProcessError +try: + from charmhelpers.cli import cmdline +except ImportError as e: + # due to the anti-pattern of partially synching charmhelpers directly + # into charms, it's possible that charmhelpers.cli is not available; + # if that's the case, they don't really care about using the cli anyway, + # so mock it out + if str(e) == 'No module named cli': + class cmdline(object): + @classmethod + def subcommand(cls, *args, **kwargs): + def _wrap(func): + return func + return _wrap + else: + raise + import six if not six.PY3: from UserDict import UserDict @@ -172,9 +190,20 @@ def relation_type(): return os.environ.get('JUJU_RELATION', None) -def relation_id(): - """The relation ID for the current relation hook""" - return os.environ.get('JUJU_RELATION_ID', None) +@cmdline.subcommand() +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') def local_unit(): @@ -187,14 +216,27 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) +@cmdline.subcommand() def service_name(): """The name service group this unit belongs to""" return local_unit().split('/')[0] +@cmdline.subcommand() +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + def hook_name(): """The name of the currently executing hook""" - return os.path.basename(sys.argv[0]) + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) class Config(dict): @@ -263,7 +305,7 @@ def load_previous(self, path=None): self.path = path or self.path with open(self.path) as f: self._prev_dict = json.load(f) - for k, v in self._prev_dict.items(): + for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -467,6 +509,63 @@ def relation_types(): return rel_types +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peer'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peer``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peer'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + @cached def charm_name(): """Get the name of the current charm as is specified on metadata.yaml""" @@ -643,6 +742,21 @@ def action_fail(message): subprocess.check_call(['action-fail', message]) +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + def status_set(workload_state, message): """Set the workload state with a message @@ -761,6 +875,7 @@ def atstart(callback, *args, **kwargs): This is useful for modules and classes to perform initialization and inject behavior. In particular: + - Run common code before all of your hooks, such as logging the hook name or interesting relation data. - Defer object or module initialization that requires a hook diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 901a4cfe..8ae8ef86 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -63,6 +63,36 @@ def service_reload(service_name, restart_on_failure=False): return service_result +def service_pause(service_name, init_dir=None): + """Pause a system service. + + Stop it, and prevent it from starting again at boot.""" + if init_dir is None: + init_dir = "/etc/init" + stopped = service_stop(service_name) + # XXX: Support systemd too + override_path = os.path.join( + init_dir, '{}.conf.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + return stopped + + +def service_resume(service_name, init_dir=None): + """Resume a system service. + + Reenable starting again at boot. Start the service""" + # XXX: Support systemd too + if init_dir is None: + init_dir = "/etc/init" + override_path = os.path.join( + init_dir, '{}.conf.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + started = service_start(service_name) + return started + + def service(action, service_name): """Control a system service""" cmd = ['service', service_name, action] @@ -140,11 +170,7 @@ def add_group(group_name, system_group=False): def add_user_to_group(username, group): """Add a user to a group""" - cmd = [ - 'gpasswd', '-a', - username, - group - ] + cmd = ['gpasswd', '-a', username, group] log("Adding user {} to group {}".format(username, group)) subprocess.check_call(cmd) diff --git a/ceph-osd/hooks/charmhelpers/core/services/helpers.py b/ceph-osd/hooks/charmhelpers/core/services/helpers.py index 3eb5fb44..8005c415 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-osd/hooks/charmhelpers/core/services/helpers.py @@ -239,12 +239,12 @@ class TemplateCallback(ManagerCallback): action. :param str source: The template source file, relative to - `$CHARM_DIR/templates` - + `$CHARM_DIR/templates` :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file + """ def __init__(self, source, target, owner='root', group='root', perms=0o444): diff --git a/ceph-osd/hooks/charmhelpers/core/unitdata.py b/ceph-osd/hooks/charmhelpers/core/unitdata.py index 406a35c5..338104e0 100644 --- a/ceph-osd/hooks/charmhelpers/core/unitdata.py +++ b/ceph-osd/hooks/charmhelpers/core/unitdata.py @@ -152,6 +152,7 @@ def config_changed(): import collections import contextlib import datetime +import itertools import json import os import pprint @@ -164,8 +165,7 @@ def config_changed(): class Storage(object): """Simple key value database for local unit state within charms. - Modifications are automatically committed at hook exit. That's - currently regardless of exit code. + Modifications are not persisted unless :meth:`flush` is called. To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. @@ -173,8 +173,11 @@ class Storage(object): def __init__(self, path=None): self.db_path = path if path is None: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None @@ -189,15 +192,8 @@ def close(self): self.conn.close() self._closed = True - def _scoped_query(self, stmt, params=None): - if params is None: - params = [] - return stmt, params - def get(self, key, default=None, record=False): - self.cursor.execute( - *self._scoped_query( - 'select data from kv where key=?', [key])) + self.cursor.execute('select data from kv where key=?', [key]) result = self.cursor.fetchone() if not result: return default @@ -206,33 +202,81 @@ def get(self, key, default=None, record=False): return json.loads(result[0]) def getrange(self, key_prefix, strip=False): - stmt = "select key, data from kv where key like '%s%%'" % key_prefix - self.cursor.execute(*self._scoped_query(stmt)) + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) result = self.cursor.fetchall() if not result: - return None + return {} if not strip: key_prefix = '' return dict([ (k[len(key_prefix):], json.loads(v)) for k, v in result]) def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ for k, v in mapping.items(): self.set("%s%s" % (prefix, k), v) def unset(self, key): + """ + Remove a key from the database entirely. + """ self.cursor.execute('delete from kv where key=?', [key]) if self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values (?, ?, ?)', [key, self.revision, json.dumps('DELETED')]) + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ serialized = json.dumps(value) - self.cursor.execute( - 'select data from kv where key=?', [key]) + self.cursor.execute('select data from kv where key=?', [key]) exists = self.cursor.fetchone() # Skip mutations to the same value diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 9a1a2515..0a3bb969 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -215,9 +215,9 @@ def apt_purge(packages, fatal=False): _run_apt_command(cmd, fatal) -def apt_hold(packages, fatal=False): - """Hold one or more packages""" - cmd = ['apt-mark', 'hold'] +def apt_mark(packages, mark, fatal=False): + """Flag one or more packages using apt-mark""" + cmd = ['apt-mark', mark] if isinstance(packages, six.string_types): cmd.append(packages) else: @@ -225,9 +225,17 @@ def apt_hold(packages, fatal=False): log("Holding {}".format(packages)) if fatal: - subprocess.check_call(cmd) + subprocess.check_call(cmd, universal_newlines=True) else: - subprocess.call(cmd) + subprocess.call(cmd, universal_newlines=True) + + +def apt_hold(packages, fatal=False): + return apt_mark(packages, 'hold', fatal=fatal) + + +def apt_unhold(packages, fatal=False): + return apt_mark(packages, 'unhold', fatal=fatal) def add_source(source, key=None): @@ -370,8 +378,9 @@ def install_remote(source, *args, **kwargs): for handler in handlers: try: installed_to = handler.install(source, *args, **kwargs) - except UnhandledSource: - pass + except UnhandledSource as e: + log('Install source attempt unsuccessful: {}'.format(e), + level='WARNING') if not installed_to: raise UnhandledSource("No handler found for source {}".format(source)) return installed_to diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index 8dfce505..efd7f9f0 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -77,6 +77,8 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + # XXX: Why is this returning a boolean and a string? It's + # doomed to fail since "bool(can_handle('foo://'))" will be True. return "Wrong source type" if get_archive_handler(self.base_url(source)): return True @@ -155,7 +157,11 @@ def install(self, source, dest=None, checksum=None, hash_type='sha1'): else: algorithms = hashlib.algorithms_available if key in algorithms: - check_hash(dld_file, value, key) + if len(value) != 1: + raise TypeError( + "Expected 1 hash value, not %d" % len(value)) + expected = value[0] + check_hash(dld_file, expected, key) if checksum: check_hash(dld_file, checksum, hash_type) return extract(dld_file, dest) diff --git a/ceph-osd/hooks/charmhelpers/fetch/giturl.py b/ceph-osd/hooks/charmhelpers/fetch/giturl.py index ddc25b7e..f023b26d 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/giturl.py @@ -67,7 +67,7 @@ def install(self, source, branch="master", dest=None, depth=None): try: self.clone(source, dest_dir, branch, depth) except GitCommandError as e: - raise UnhandledSource(e.message) + raise UnhandledSource(e) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir From 91a02f499e23c654c87d1ea480f1da3147cb0e06 Mon Sep 17 00:00:00 2001 From: Christopher Glass Date: Fri, 7 Aug 2015 12:15:24 +0300 Subject: [PATCH 0776/2699] Patching in changes form https://code.launchpad.net/~james-page/charm-helpers/lp-1475247/+merge/265125 The ceph disk zapping fails sometimes when issued in a single command, as described in https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b This branche's purpose is to avoid a full charmhelpers sync before release. --- ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index e2769e49..1e57941a 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -43,9 +43,10 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' + # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b # sometimes sgdisk exits non-zero; this is OK, dd will clean up - call(['sgdisk', '--zap-all', '--mbrtogpt', - '--clear', block_device]) + call(['sgdisk', '--zap-all', '--', block_device]) + call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) dev_end = check_output(['blockdev', '--getsz', block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 From 0885e51ed88e606f05a4a45c49f329a6077b4a1e Mon Sep 17 00:00:00 2001 From: Christopher Glass Date: Fri, 7 Aug 2015 12:15:24 +0300 Subject: [PATCH 0777/2699] Patching in changes form https://code.launchpad.net/~james-page/charm-helpers/lp-1475247/+merge/265125 The ceph disk zapping fails sometimes when issued in a single command, as described in https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b This branche's purpose is to avoid a full charmhelpers sync before release. --- ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index e2769e49..1e57941a 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -43,9 +43,10 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' + # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b # sometimes sgdisk exits non-zero; this is OK, dd will clean up - call(['sgdisk', '--zap-all', '--mbrtogpt', - '--clear', block_device]) + call(['sgdisk', '--zap-all', '--', block_device]) + call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) dev_end = check_output(['blockdev', '--getsz', block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 From cfdce5add65ae946b70271cfb89a7576f47c7f02 Mon Sep 17 00:00:00 2001 From: Christopher Glass Date: Fri, 7 Aug 2015 12:18:11 +0300 Subject: [PATCH 0778/2699] Patching in changes form https://code.launchpad.net/~james-page/charm-helpers/lp-1475247/+merge/265125 The ceph disk zapping fails sometimes when issued in a single command, as described in https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b This branche's purpose is to avoid a full charmhelpers sync before release. --- ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index e2769e49..1e57941a 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -43,9 +43,10 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' + # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b # sometimes sgdisk exits non-zero; this is OK, dd will clean up - call(['sgdisk', '--zap-all', '--mbrtogpt', - '--clear', block_device]) + call(['sgdisk', '--zap-all', '--', block_device]) + call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) dev_end = check_output(['blockdev', '--getsz', block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 From d9c9e350dc5f1f8c41fc3e2192479acb33b10c32 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 18 Aug 2015 20:51:43 -0400 Subject: [PATCH 0779/2699] [corey.bryant,r=trivial] Sync charm-helpers to pick up Liberty support. --- ceph-proxy/hooks/charmhelpers/cli/__init__.py | 6 +- ceph-proxy/hooks/charmhelpers/cli/commands.py | 8 +- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 21 +--- ceph-proxy/hooks/charmhelpers/core/host.py | 25 ++++- .../charmhelpers/core/services/helpers.py | 20 +++- .../hooks/charmhelpers/fetch/__init__.py | 8 ++ .../charmhelpers/contrib/amulet/utils.py | 105 ++++++++++++++---- .../contrib/openstack/amulet/deployment.py | 4 +- 8 files changed, 141 insertions(+), 56 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/cli/__init__.py b/ceph-proxy/hooks/charmhelpers/cli/__init__.py index 7118daf5..16d52cc4 100644 --- a/ceph-proxy/hooks/charmhelpers/cli/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/cli/__init__.py @@ -152,15 +152,11 @@ def run(self): arguments = self.argument_parser.parse_args() argspec = inspect.getargspec(arguments.func) vargs = [] - kwargs = {} for arg in argspec.args: vargs.append(getattr(arguments, arg)) if argspec.varargs: vargs.extend(getattr(arguments, argspec.varargs)) - if argspec.keywords: - for kwarg in argspec.keywords.items(): - kwargs[kwarg] = getattr(arguments, kwarg) - output = arguments.func(*vargs, **kwargs) + output = arguments.func(*vargs) if getattr(arguments.func, '_cli_test_command', False): self.exit_code = 0 if output else 1 output = '' diff --git a/ceph-proxy/hooks/charmhelpers/cli/commands.py b/ceph-proxy/hooks/charmhelpers/cli/commands.py index 443ff05d..7e91db00 100644 --- a/ceph-proxy/hooks/charmhelpers/cli/commands.py +++ b/ceph-proxy/hooks/charmhelpers/cli/commands.py @@ -26,7 +26,7 @@ """ Import the sub-modules which have decorated subcommands to register with chlp. """ -import host # noqa -import benchmark # noqa -import unitdata # noqa -from charmhelpers.core import hookenv # noqa +from . import host # noqa +from . import benchmark # noqa +from . import unitdata # noqa +from . import hookenv # noqa diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 18860f59..a35d006b 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -34,23 +34,6 @@ import tempfile from subprocess import CalledProcessError -try: - from charmhelpers.cli import cmdline -except ImportError as e: - # due to the anti-pattern of partially synching charmhelpers directly - # into charms, it's possible that charmhelpers.cli is not available; - # if that's the case, they don't really care about using the cli anyway, - # so mock it out - if str(e) == 'No module named cli': - class cmdline(object): - @classmethod - def subcommand(cls, *args, **kwargs): - def _wrap(func): - return func - return _wrap - else: - raise - import six if not six.PY3: from UserDict import UserDict @@ -91,6 +74,7 @@ def wrapper(*args, **kwargs): res = func(*args, **kwargs) cache[key] = res return res + wrapper._wrapped = func return wrapper @@ -190,7 +174,6 @@ def relation_type(): return os.environ.get('JUJU_RELATION', None) -@cmdline.subcommand() @cached def relation_id(relation_name=None, service_or_unit=None): """The relation ID for the current or a specified relation""" @@ -216,13 +199,11 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -@cmdline.subcommand() def service_name(): """The name service group this unit belongs to""" return local_unit().split('/')[0] -@cmdline.subcommand() @cached def remote_service_name(relid=None): """The remote service name for a given relation-id (or the current relation)""" diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 8ae8ef86..ec659eef 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -72,7 +72,7 @@ def service_pause(service_name, init_dir=None): stopped = service_stop(service_name) # XXX: Support systemd too override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) + init_dir, '{}.override'.format(service_name)) with open(override_path, 'w') as fh: fh.write("manual\n") return stopped @@ -86,7 +86,7 @@ def service_resume(service_name, init_dir=None): if init_dir is None: init_dir = "/etc/init" override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) + init_dir, '{}.override'.format(service_name)) if os.path.exists(override_path): os.unlink(override_path) started = service_start(service_name) @@ -148,6 +148,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): return user_info +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + def add_group(group_name, system_group=False): """Add a group to the system""" try: @@ -280,6 +290,17 @@ def mounts(): return system_mounts +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + def file_hash(path, hash_type='md5'): """ Generate a hash checksum of the contents of 'path' or None if not found. diff --git a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py index 8005c415..3f677833 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py @@ -16,7 +16,9 @@ import os import yaml + from charmhelpers.core import hookenv +from charmhelpers.core import host from charmhelpers.core import templating from charmhelpers.core.services.base import ManagerCallback @@ -240,27 +242,41 @@ class TemplateCallback(ManagerCallback): :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file - + :param partial on_change_action: functools partial to be executed when + rendered file changes """ def __init__(self, source, target, - owner='root', group='root', perms=0o444): + owner='root', group='root', perms=0o444, + on_change_action=None): self.source = source self.target = target self.owner = owner self.group = group self.perms = perms + self.on_change_action = on_change_action def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) service = manager.get_service(service_name) context = {} for ctx in service.get('required_data', []): context.update(ctx) templating.render(self.source, self.target, context, self.owner, self.group, self.perms) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() # Convenience aliases for templates diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 0a3bb969..cd0b783c 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -90,6 +90,14 @@ 'kilo/proposed': 'trusty-proposed/kilo', 'trusty-kilo/proposed': 'trusty-proposed/kilo', 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', } # The order of this list is very important. Handlers should be listed in from diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index 3de26afd..7816c934 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -14,17 +14,23 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -import amulet -import ConfigParser -import distro_info import io +import json import logging import os import re -import six +import subprocess import sys import time -import urlparse + +import amulet +import distro_info +import six +from six.moves import configparser +if six.PY3: + from urllib import parse as urlparse +else: + import urlparse class AmuletUtils(object): @@ -142,19 +148,23 @@ def validate_services_by_name(self, sentry_services): for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name == "rabbitmq-server"): - # init is systemd + service_name in ['rabbitmq-server', 'apache2']): + # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 elif self.ubuntu_releases.index(release) < systemd_switch: # init is upstart cmd = 'sudo status {}'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 and "start/running" in output - output, code = sentry_unit.run(cmd) self.log.debug('{} `{}` returned ' '{}'.format(sentry_unit.info['unit_name'], cmd, code)) - if code != 0: - return "command `{}` returned {}".format(cmd, str(code)) + if not service_running: + return u"command `{}` returned {} {}".format( + cmd, output, str(code)) return None def _get_config(self, unit, filename): @@ -164,7 +174,7 @@ def _get_config(self, unit, filename): # NOTE(beisner): by default, ConfigParser does not handle options # with no value, such as the flags used in the mysql my.cnf file. # https://bugs.python.org/issue7005 - config = ConfigParser.ConfigParser(allow_no_value=True) + config = configparser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config @@ -450,15 +460,20 @@ def check_commands_on_units(self, commands, sentry_units): cmd, code, output)) return None - def get_process_id_list(self, sentry_unit, process_name): + def get_process_id_list(self, sentry_unit, process_name, + expect_success=True): """Get a list of process ID(s) from a single sentry juju unit for a single process name. - :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param sentry_unit: Amulet sentry instance (juju unit) :param process_name: Process name + :param expect_success: If False, expect the PID to be missing, + raise if it is present. :returns: List of process IDs """ - cmd = 'pidof {}'.format(process_name) + cmd = 'pidof -x {}'.format(process_name) + if not expect_success: + cmd += " || exit 0 && exit 1" output, code = sentry_unit.run(cmd) if code != 0: msg = ('{} `{}` returned {} ' @@ -467,14 +482,23 @@ def get_process_id_list(self, sentry_unit, process_name): amulet.raise_status(amulet.FAIL, msg=msg) return str(output).split() - def get_unit_process_ids(self, unit_processes): + def get_unit_process_ids(self, unit_processes, expect_success=True): """Construct a dict containing unit sentries, process names, and - process IDs.""" + process IDs. + + :param unit_processes: A dictionary of Amulet sentry instance + to list of process names. + :param expect_success: if False expect the processes to not be + running, raise if they are. + :returns: Dictionary of Amulet sentry instance to dictionary + of process names to PIDs. + """ pid_dict = {} - for sentry_unit, process_list in unit_processes.iteritems(): + for sentry_unit, process_list in six.iteritems(unit_processes): pid_dict[sentry_unit] = {} for process in process_list: - pids = self.get_process_id_list(sentry_unit, process) + pids = self.get_process_id_list( + sentry_unit, process, expect_success=expect_success) pid_dict[sentry_unit].update({process: pids}) return pid_dict @@ -488,7 +512,7 @@ def validate_unit_process_ids(self, expected, actual): return ('Unit count mismatch. expected, actual: {}, ' '{} '.format(len(expected), len(actual))) - for (e_sentry, e_proc_names) in expected.iteritems(): + for (e_sentry, e_proc_names) in six.iteritems(expected): e_sentry_name = e_sentry.info['unit_name'] if e_sentry in actual.keys(): a_proc_names = actual[e_sentry] @@ -507,11 +531,23 @@ def validate_unit_process_ids(self, expected, actual): '{}'.format(e_proc_name, a_proc_name)) a_pids_length = len(a_pids) - if e_pids_length != a_pids_length: - return ('PID count mismatch. {} ({}) expected, actual: ' + fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})'.format(e_sentry_name, e_proc_name, e_pids_length, a_pids_length, a_pids)) + + # If expected is not bool, ensure PID quantities match + if not isinstance(e_pids_length, bool) and \ + a_pids_length != e_pids_length: + return fail_msg + # If expected is bool True, ensure 1 or more PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is True and a_pids_length < 1: + return fail_msg + # If expected is bool False, ensure 0 PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is False and a_pids_length != 0: + return fail_msg else: self.log.debug('PID check OK: {} {} {}: ' '{}'.format(e_sentry_name, e_proc_name, @@ -531,3 +567,30 @@ def validate_list_of_identical_dicts(self, list_of_dicts): return 'Dicts within list are not identical' return None + + def run_action(self, unit_sentry, action, + _check_output=subprocess.check_output): + """Run the named action on a given unit sentry. + + _check_output parameter is used for dependency injection. + + @return action_id. + """ + unit_id = unit_sentry.info["unit_name"] + command = ["juju", "action", "do", "--format=json", unit_id, action] + self.log.info("Running command: %s\n" % " ".join(command)) + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + action_id = data[u'Action queued with id'] + return action_id + + def wait_on_action(self, action_id, _check_output=subprocess.check_output): + """Wait for a given action, returning if it completed or not. + + _check_output parameter is used for dependency injection. + """ + command = ["juju", "action", "fetch", "--format=json", "--wait=0", + action_id] + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + return data.get(u"status") == "completed" diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index b01e6cb8..07ee2ef1 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -44,7 +44,7 @@ def _determine_branch_locations(self, other_services): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb'] + base_charms = ['mysql', 'mongodb', 'nrpe'] if self.series in ['precise', 'trusty']: base_series = self.series @@ -81,7 +81,7 @@ def _add_services(self, this_service, other_services): 'ceph-osd', 'ceph-radosgw'] # Most OpenStack subordinate charms do not expose an origin option # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: From f0775fa6d375bc13828c4a6b70e354963c0d5f7d Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 18 Aug 2015 20:51:43 -0400 Subject: [PATCH 0780/2699] [corey.bryant,r=trivial] Sync charm-helpers to pick up Liberty support. --- ceph-mon/hooks/charmhelpers/cli/__init__.py | 6 +- ceph-mon/hooks/charmhelpers/cli/commands.py | 8 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 21 +--- ceph-mon/hooks/charmhelpers/core/host.py | 25 ++++- .../charmhelpers/core/services/helpers.py | 20 +++- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 8 ++ .../charmhelpers/contrib/amulet/utils.py | 105 ++++++++++++++---- .../contrib/openstack/amulet/deployment.py | 4 +- 8 files changed, 141 insertions(+), 56 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/cli/__init__.py b/ceph-mon/hooks/charmhelpers/cli/__init__.py index 7118daf5..16d52cc4 100644 --- a/ceph-mon/hooks/charmhelpers/cli/__init__.py +++ b/ceph-mon/hooks/charmhelpers/cli/__init__.py @@ -152,15 +152,11 @@ def run(self): arguments = self.argument_parser.parse_args() argspec = inspect.getargspec(arguments.func) vargs = [] - kwargs = {} for arg in argspec.args: vargs.append(getattr(arguments, arg)) if argspec.varargs: vargs.extend(getattr(arguments, argspec.varargs)) - if argspec.keywords: - for kwarg in argspec.keywords.items(): - kwargs[kwarg] = getattr(arguments, kwarg) - output = arguments.func(*vargs, **kwargs) + output = arguments.func(*vargs) if getattr(arguments.func, '_cli_test_command', False): self.exit_code = 0 if output else 1 output = '' diff --git a/ceph-mon/hooks/charmhelpers/cli/commands.py b/ceph-mon/hooks/charmhelpers/cli/commands.py index 443ff05d..7e91db00 100644 --- a/ceph-mon/hooks/charmhelpers/cli/commands.py +++ b/ceph-mon/hooks/charmhelpers/cli/commands.py @@ -26,7 +26,7 @@ """ Import the sub-modules which have decorated subcommands to register with chlp. """ -import host # noqa -import benchmark # noqa -import unitdata # noqa -from charmhelpers.core import hookenv # noqa +from . import host # noqa +from . import benchmark # noqa +from . import unitdata # noqa +from . import hookenv # noqa diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 18860f59..a35d006b 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -34,23 +34,6 @@ import tempfile from subprocess import CalledProcessError -try: - from charmhelpers.cli import cmdline -except ImportError as e: - # due to the anti-pattern of partially synching charmhelpers directly - # into charms, it's possible that charmhelpers.cli is not available; - # if that's the case, they don't really care about using the cli anyway, - # so mock it out - if str(e) == 'No module named cli': - class cmdline(object): - @classmethod - def subcommand(cls, *args, **kwargs): - def _wrap(func): - return func - return _wrap - else: - raise - import six if not six.PY3: from UserDict import UserDict @@ -91,6 +74,7 @@ def wrapper(*args, **kwargs): res = func(*args, **kwargs) cache[key] = res return res + wrapper._wrapped = func return wrapper @@ -190,7 +174,6 @@ def relation_type(): return os.environ.get('JUJU_RELATION', None) -@cmdline.subcommand() @cached def relation_id(relation_name=None, service_or_unit=None): """The relation ID for the current or a specified relation""" @@ -216,13 +199,11 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -@cmdline.subcommand() def service_name(): """The name service group this unit belongs to""" return local_unit().split('/')[0] -@cmdline.subcommand() @cached def remote_service_name(relid=None): """The remote service name for a given relation-id (or the current relation)""" diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 8ae8ef86..ec659eef 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -72,7 +72,7 @@ def service_pause(service_name, init_dir=None): stopped = service_stop(service_name) # XXX: Support systemd too override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) + init_dir, '{}.override'.format(service_name)) with open(override_path, 'w') as fh: fh.write("manual\n") return stopped @@ -86,7 +86,7 @@ def service_resume(service_name, init_dir=None): if init_dir is None: init_dir = "/etc/init" override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) + init_dir, '{}.override'.format(service_name)) if os.path.exists(override_path): os.unlink(override_path) started = service_start(service_name) @@ -148,6 +148,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): return user_info +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + def add_group(group_name, system_group=False): """Add a group to the system""" try: @@ -280,6 +290,17 @@ def mounts(): return system_mounts +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + def file_hash(path, hash_type='md5'): """ Generate a hash checksum of the contents of 'path' or None if not found. diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py index 8005c415..3f677833 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-mon/hooks/charmhelpers/core/services/helpers.py @@ -16,7 +16,9 @@ import os import yaml + from charmhelpers.core import hookenv +from charmhelpers.core import host from charmhelpers.core import templating from charmhelpers.core.services.base import ManagerCallback @@ -240,27 +242,41 @@ class TemplateCallback(ManagerCallback): :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file - + :param partial on_change_action: functools partial to be executed when + rendered file changes """ def __init__(self, source, target, - owner='root', group='root', perms=0o444): + owner='root', group='root', perms=0o444, + on_change_action=None): self.source = source self.target = target self.owner = owner self.group = group self.perms = perms + self.on_change_action = on_change_action def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) service = manager.get_service(service_name) context = {} for ctx in service.get('required_data', []): context.update(ctx) templating.render(self.source, self.target, context, self.owner, self.group, self.perms) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() # Convenience aliases for templates diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 0a3bb969..cd0b783c 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -90,6 +90,14 @@ 'kilo/proposed': 'trusty-proposed/kilo', 'trusty-kilo/proposed': 'trusty-proposed/kilo', 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', } # The order of this list is very important. Handlers should be listed in from diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index 3de26afd..7816c934 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -14,17 +14,23 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -import amulet -import ConfigParser -import distro_info import io +import json import logging import os import re -import six +import subprocess import sys import time -import urlparse + +import amulet +import distro_info +import six +from six.moves import configparser +if six.PY3: + from urllib import parse as urlparse +else: + import urlparse class AmuletUtils(object): @@ -142,19 +148,23 @@ def validate_services_by_name(self, sentry_services): for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name == "rabbitmq-server"): - # init is systemd + service_name in ['rabbitmq-server', 'apache2']): + # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 elif self.ubuntu_releases.index(release) < systemd_switch: # init is upstart cmd = 'sudo status {}'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 and "start/running" in output - output, code = sentry_unit.run(cmd) self.log.debug('{} `{}` returned ' '{}'.format(sentry_unit.info['unit_name'], cmd, code)) - if code != 0: - return "command `{}` returned {}".format(cmd, str(code)) + if not service_running: + return u"command `{}` returned {} {}".format( + cmd, output, str(code)) return None def _get_config(self, unit, filename): @@ -164,7 +174,7 @@ def _get_config(self, unit, filename): # NOTE(beisner): by default, ConfigParser does not handle options # with no value, such as the flags used in the mysql my.cnf file. # https://bugs.python.org/issue7005 - config = ConfigParser.ConfigParser(allow_no_value=True) + config = configparser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config @@ -450,15 +460,20 @@ def check_commands_on_units(self, commands, sentry_units): cmd, code, output)) return None - def get_process_id_list(self, sentry_unit, process_name): + def get_process_id_list(self, sentry_unit, process_name, + expect_success=True): """Get a list of process ID(s) from a single sentry juju unit for a single process name. - :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param sentry_unit: Amulet sentry instance (juju unit) :param process_name: Process name + :param expect_success: If False, expect the PID to be missing, + raise if it is present. :returns: List of process IDs """ - cmd = 'pidof {}'.format(process_name) + cmd = 'pidof -x {}'.format(process_name) + if not expect_success: + cmd += " || exit 0 && exit 1" output, code = sentry_unit.run(cmd) if code != 0: msg = ('{} `{}` returned {} ' @@ -467,14 +482,23 @@ def get_process_id_list(self, sentry_unit, process_name): amulet.raise_status(amulet.FAIL, msg=msg) return str(output).split() - def get_unit_process_ids(self, unit_processes): + def get_unit_process_ids(self, unit_processes, expect_success=True): """Construct a dict containing unit sentries, process names, and - process IDs.""" + process IDs. + + :param unit_processes: A dictionary of Amulet sentry instance + to list of process names. + :param expect_success: if False expect the processes to not be + running, raise if they are. + :returns: Dictionary of Amulet sentry instance to dictionary + of process names to PIDs. + """ pid_dict = {} - for sentry_unit, process_list in unit_processes.iteritems(): + for sentry_unit, process_list in six.iteritems(unit_processes): pid_dict[sentry_unit] = {} for process in process_list: - pids = self.get_process_id_list(sentry_unit, process) + pids = self.get_process_id_list( + sentry_unit, process, expect_success=expect_success) pid_dict[sentry_unit].update({process: pids}) return pid_dict @@ -488,7 +512,7 @@ def validate_unit_process_ids(self, expected, actual): return ('Unit count mismatch. expected, actual: {}, ' '{} '.format(len(expected), len(actual))) - for (e_sentry, e_proc_names) in expected.iteritems(): + for (e_sentry, e_proc_names) in six.iteritems(expected): e_sentry_name = e_sentry.info['unit_name'] if e_sentry in actual.keys(): a_proc_names = actual[e_sentry] @@ -507,11 +531,23 @@ def validate_unit_process_ids(self, expected, actual): '{}'.format(e_proc_name, a_proc_name)) a_pids_length = len(a_pids) - if e_pids_length != a_pids_length: - return ('PID count mismatch. {} ({}) expected, actual: ' + fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})'.format(e_sentry_name, e_proc_name, e_pids_length, a_pids_length, a_pids)) + + # If expected is not bool, ensure PID quantities match + if not isinstance(e_pids_length, bool) and \ + a_pids_length != e_pids_length: + return fail_msg + # If expected is bool True, ensure 1 or more PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is True and a_pids_length < 1: + return fail_msg + # If expected is bool False, ensure 0 PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is False and a_pids_length != 0: + return fail_msg else: self.log.debug('PID check OK: {} {} {}: ' '{}'.format(e_sentry_name, e_proc_name, @@ -531,3 +567,30 @@ def validate_list_of_identical_dicts(self, list_of_dicts): return 'Dicts within list are not identical' return None + + def run_action(self, unit_sentry, action, + _check_output=subprocess.check_output): + """Run the named action on a given unit sentry. + + _check_output parameter is used for dependency injection. + + @return action_id. + """ + unit_id = unit_sentry.info["unit_name"] + command = ["juju", "action", "do", "--format=json", unit_id, action] + self.log.info("Running command: %s\n" % " ".join(command)) + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + action_id = data[u'Action queued with id'] + return action_id + + def wait_on_action(self, action_id, _check_output=subprocess.check_output): + """Wait for a given action, returning if it completed or not. + + _check_output parameter is used for dependency injection. + """ + command = ["juju", "action", "fetch", "--format=json", "--wait=0", + action_id] + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + return data.get(u"status") == "completed" diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index b01e6cb8..07ee2ef1 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -44,7 +44,7 @@ def _determine_branch_locations(self, other_services): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb'] + base_charms = ['mysql', 'mongodb', 'nrpe'] if self.series in ['precise', 'trusty']: base_series = self.series @@ -81,7 +81,7 @@ def _add_services(self, this_service, other_services): 'ceph-osd', 'ceph-radosgw'] # Most OpenStack subordinate charms do not expose an origin option # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: From d8e0faeb99006c74dddc913601227512b97b92a3 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 19 Aug 2015 14:50:16 +0100 Subject: [PATCH 0781/2699] [gnuoy,trivial] Charmhelper sync (+1'd by mojo) --- ceph-proxy/hooks/charmhelpers/cli/hookenv.py | 23 ++++++ ceph-proxy/hooks/charmhelpers/core/host.py | 77 ++++++++++++++++--- .../hooks/charmhelpers/core/hugepage.py | 62 +++++++++++++++ 3 files changed, 151 insertions(+), 11 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/cli/hookenv.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/hugepage.py diff --git a/ceph-proxy/hooks/charmhelpers/cli/hookenv.py b/ceph-proxy/hooks/charmhelpers/cli/hookenv.py new file mode 100644 index 00000000..265c816e --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/cli/hookenv.py @@ -0,0 +1,23 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import hookenv + + +cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped) +cmdline.subcommand('service-name')(hookenv.service_name) +cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index ec659eef..29e8fee0 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -417,25 +417,80 @@ def pwgen(length=None): return(''.join(random_chars)) -def list_nics(nic_type): +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): '''Return a list of nics of given type(s)''' if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type + interfaces = [] - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line for line in ip_output if line) + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile('^[0-9]+:\s+(.+):') for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) - if matched: - interface = matched.groups()[0] - else: - interface = line.split()[1].replace(":", "") - interfaces.append(interface) + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) return interfaces diff --git a/ceph-proxy/hooks/charmhelpers/core/hugepage.py b/ceph-proxy/hooks/charmhelpers/core/hugepage.py new file mode 100644 index 00000000..ba4340ff --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/hugepage.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) From a1be0b96a9a5c13c8b27fe4cae1deb3e07a0a5f4 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 19 Aug 2015 14:50:16 +0100 Subject: [PATCH 0782/2699] [gnuoy,trivial] Charmhelper sync (+1'd by mojo) --- ceph-mon/hooks/charmhelpers/cli/hookenv.py | 23 ++++++ ceph-mon/hooks/charmhelpers/core/host.py | 77 +++++++++++++++++--- ceph-mon/hooks/charmhelpers/core/hugepage.py | 62 ++++++++++++++++ 3 files changed, 151 insertions(+), 11 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/cli/hookenv.py create mode 100644 ceph-mon/hooks/charmhelpers/core/hugepage.py diff --git a/ceph-mon/hooks/charmhelpers/cli/hookenv.py b/ceph-mon/hooks/charmhelpers/cli/hookenv.py new file mode 100644 index 00000000..265c816e --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/cli/hookenv.py @@ -0,0 +1,23 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import hookenv + + +cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped) +cmdline.subcommand('service-name')(hookenv.service_name) +cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index ec659eef..29e8fee0 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -417,25 +417,80 @@ def pwgen(length=None): return(''.join(random_chars)) -def list_nics(nic_type): +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): '''Return a list of nics of given type(s)''' if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type + interfaces = [] - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line for line in ip_output if line) + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile('^[0-9]+:\s+(.+):') for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) - if matched: - interface = matched.groups()[0] - else: - interface = line.split()[1].replace(":", "") - interfaces.append(interface) + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) return interfaces diff --git a/ceph-mon/hooks/charmhelpers/core/hugepage.py b/ceph-mon/hooks/charmhelpers/core/hugepage.py new file mode 100644 index 00000000..ba4340ff --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/hugepage.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) From a7d6264103a095bc5426445394c97c986effc5ad Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 19 Aug 2015 14:50:42 +0100 Subject: [PATCH 0783/2699] [gnuoy,trivial] Charmhelper sync (+1'd by mojo) --- ceph-osd/hooks/charmhelpers/cli/__init__.py | 6 +- ceph-osd/hooks/charmhelpers/cli/commands.py | 8 +- ceph-osd/hooks/charmhelpers/cli/hookenv.py | 23 ++++ ceph-osd/hooks/charmhelpers/core/hookenv.py | 21 +--- ceph-osd/hooks/charmhelpers/core/host.py | 102 ++++++++++++++--- ceph-osd/hooks/charmhelpers/core/hugepage.py | 62 +++++++++++ .../charmhelpers/core/services/helpers.py | 20 +++- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 8 ++ .../charmhelpers/contrib/amulet/utils.py | 105 ++++++++++++++---- .../contrib/openstack/amulet/deployment.py | 4 +- 10 files changed, 292 insertions(+), 67 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/cli/hookenv.py create mode 100644 ceph-osd/hooks/charmhelpers/core/hugepage.py diff --git a/ceph-osd/hooks/charmhelpers/cli/__init__.py b/ceph-osd/hooks/charmhelpers/cli/__init__.py index 7118daf5..16d52cc4 100644 --- a/ceph-osd/hooks/charmhelpers/cli/__init__.py +++ b/ceph-osd/hooks/charmhelpers/cli/__init__.py @@ -152,15 +152,11 @@ def run(self): arguments = self.argument_parser.parse_args() argspec = inspect.getargspec(arguments.func) vargs = [] - kwargs = {} for arg in argspec.args: vargs.append(getattr(arguments, arg)) if argspec.varargs: vargs.extend(getattr(arguments, argspec.varargs)) - if argspec.keywords: - for kwarg in argspec.keywords.items(): - kwargs[kwarg] = getattr(arguments, kwarg) - output = arguments.func(*vargs, **kwargs) + output = arguments.func(*vargs) if getattr(arguments.func, '_cli_test_command', False): self.exit_code = 0 if output else 1 output = '' diff --git a/ceph-osd/hooks/charmhelpers/cli/commands.py b/ceph-osd/hooks/charmhelpers/cli/commands.py index 443ff05d..7e91db00 100644 --- a/ceph-osd/hooks/charmhelpers/cli/commands.py +++ b/ceph-osd/hooks/charmhelpers/cli/commands.py @@ -26,7 +26,7 @@ """ Import the sub-modules which have decorated subcommands to register with chlp. """ -import host # noqa -import benchmark # noqa -import unitdata # noqa -from charmhelpers.core import hookenv # noqa +from . import host # noqa +from . import benchmark # noqa +from . import unitdata # noqa +from . import hookenv # noqa diff --git a/ceph-osd/hooks/charmhelpers/cli/hookenv.py b/ceph-osd/hooks/charmhelpers/cli/hookenv.py new file mode 100644 index 00000000..265c816e --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/cli/hookenv.py @@ -0,0 +1,23 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import hookenv + + +cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped) +cmdline.subcommand('service-name')(hookenv.service_name) +cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 18860f59..a35d006b 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -34,23 +34,6 @@ import tempfile from subprocess import CalledProcessError -try: - from charmhelpers.cli import cmdline -except ImportError as e: - # due to the anti-pattern of partially synching charmhelpers directly - # into charms, it's possible that charmhelpers.cli is not available; - # if that's the case, they don't really care about using the cli anyway, - # so mock it out - if str(e) == 'No module named cli': - class cmdline(object): - @classmethod - def subcommand(cls, *args, **kwargs): - def _wrap(func): - return func - return _wrap - else: - raise - import six if not six.PY3: from UserDict import UserDict @@ -91,6 +74,7 @@ def wrapper(*args, **kwargs): res = func(*args, **kwargs) cache[key] = res return res + wrapper._wrapped = func return wrapper @@ -190,7 +174,6 @@ def relation_type(): return os.environ.get('JUJU_RELATION', None) -@cmdline.subcommand() @cached def relation_id(relation_name=None, service_or_unit=None): """The relation ID for the current or a specified relation""" @@ -216,13 +199,11 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -@cmdline.subcommand() def service_name(): """The name service group this unit belongs to""" return local_unit().split('/')[0] -@cmdline.subcommand() @cached def remote_service_name(relid=None): """The remote service name for a given relation-id (or the current relation)""" diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 8ae8ef86..29e8fee0 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -72,7 +72,7 @@ def service_pause(service_name, init_dir=None): stopped = service_stop(service_name) # XXX: Support systemd too override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) + init_dir, '{}.override'.format(service_name)) with open(override_path, 'w') as fh: fh.write("manual\n") return stopped @@ -86,7 +86,7 @@ def service_resume(service_name, init_dir=None): if init_dir is None: init_dir = "/etc/init" override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) + init_dir, '{}.override'.format(service_name)) if os.path.exists(override_path): os.unlink(override_path) started = service_start(service_name) @@ -148,6 +148,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): return user_info +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + def add_group(group_name, system_group=False): """Add a group to the system""" try: @@ -280,6 +290,17 @@ def mounts(): return system_mounts +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + def file_hash(path, hash_type='md5'): """ Generate a hash checksum of the contents of 'path' or None if not found. @@ -396,25 +417,80 @@ def pwgen(length=None): return(''.join(random_chars)) -def list_nics(nic_type): +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): '''Return a list of nics of given type(s)''' if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type + interfaces = [] - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line for line in ip_output if line) + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile('^[0-9]+:\s+(.+):') for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) - if matched: - interface = matched.groups()[0] - else: - interface = line.split()[1].replace(":", "") - interfaces.append(interface) + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) return interfaces diff --git a/ceph-osd/hooks/charmhelpers/core/hugepage.py b/ceph-osd/hooks/charmhelpers/core/hugepage.py new file mode 100644 index 00000000..ba4340ff --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/hugepage.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) diff --git a/ceph-osd/hooks/charmhelpers/core/services/helpers.py b/ceph-osd/hooks/charmhelpers/core/services/helpers.py index 8005c415..3f677833 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-osd/hooks/charmhelpers/core/services/helpers.py @@ -16,7 +16,9 @@ import os import yaml + from charmhelpers.core import hookenv +from charmhelpers.core import host from charmhelpers.core import templating from charmhelpers.core.services.base import ManagerCallback @@ -240,27 +242,41 @@ class TemplateCallback(ManagerCallback): :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file - + :param partial on_change_action: functools partial to be executed when + rendered file changes """ def __init__(self, source, target, - owner='root', group='root', perms=0o444): + owner='root', group='root', perms=0o444, + on_change_action=None): self.source = source self.target = target self.owner = owner self.group = group self.perms = perms + self.on_change_action = on_change_action def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) service = manager.get_service(service_name) context = {} for ctx in service.get('required_data', []): context.update(ctx) templating.render(self.source, self.target, context, self.owner, self.group, self.perms) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() # Convenience aliases for templates diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 0a3bb969..cd0b783c 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -90,6 +90,14 @@ 'kilo/proposed': 'trusty-proposed/kilo', 'trusty-kilo/proposed': 'trusty-proposed/kilo', 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', } # The order of this list is very important. Handlers should be listed in from diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index 3de26afd..7816c934 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -14,17 +14,23 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -import amulet -import ConfigParser -import distro_info import io +import json import logging import os import re -import six +import subprocess import sys import time -import urlparse + +import amulet +import distro_info +import six +from six.moves import configparser +if six.PY3: + from urllib import parse as urlparse +else: + import urlparse class AmuletUtils(object): @@ -142,19 +148,23 @@ def validate_services_by_name(self, sentry_services): for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name == "rabbitmq-server"): - # init is systemd + service_name in ['rabbitmq-server', 'apache2']): + # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 elif self.ubuntu_releases.index(release) < systemd_switch: # init is upstart cmd = 'sudo status {}'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 and "start/running" in output - output, code = sentry_unit.run(cmd) self.log.debug('{} `{}` returned ' '{}'.format(sentry_unit.info['unit_name'], cmd, code)) - if code != 0: - return "command `{}` returned {}".format(cmd, str(code)) + if not service_running: + return u"command `{}` returned {} {}".format( + cmd, output, str(code)) return None def _get_config(self, unit, filename): @@ -164,7 +174,7 @@ def _get_config(self, unit, filename): # NOTE(beisner): by default, ConfigParser does not handle options # with no value, such as the flags used in the mysql my.cnf file. # https://bugs.python.org/issue7005 - config = ConfigParser.ConfigParser(allow_no_value=True) + config = configparser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config @@ -450,15 +460,20 @@ def check_commands_on_units(self, commands, sentry_units): cmd, code, output)) return None - def get_process_id_list(self, sentry_unit, process_name): + def get_process_id_list(self, sentry_unit, process_name, + expect_success=True): """Get a list of process ID(s) from a single sentry juju unit for a single process name. - :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param sentry_unit: Amulet sentry instance (juju unit) :param process_name: Process name + :param expect_success: If False, expect the PID to be missing, + raise if it is present. :returns: List of process IDs """ - cmd = 'pidof {}'.format(process_name) + cmd = 'pidof -x {}'.format(process_name) + if not expect_success: + cmd += " || exit 0 && exit 1" output, code = sentry_unit.run(cmd) if code != 0: msg = ('{} `{}` returned {} ' @@ -467,14 +482,23 @@ def get_process_id_list(self, sentry_unit, process_name): amulet.raise_status(amulet.FAIL, msg=msg) return str(output).split() - def get_unit_process_ids(self, unit_processes): + def get_unit_process_ids(self, unit_processes, expect_success=True): """Construct a dict containing unit sentries, process names, and - process IDs.""" + process IDs. + + :param unit_processes: A dictionary of Amulet sentry instance + to list of process names. + :param expect_success: if False expect the processes to not be + running, raise if they are. + :returns: Dictionary of Amulet sentry instance to dictionary + of process names to PIDs. + """ pid_dict = {} - for sentry_unit, process_list in unit_processes.iteritems(): + for sentry_unit, process_list in six.iteritems(unit_processes): pid_dict[sentry_unit] = {} for process in process_list: - pids = self.get_process_id_list(sentry_unit, process) + pids = self.get_process_id_list( + sentry_unit, process, expect_success=expect_success) pid_dict[sentry_unit].update({process: pids}) return pid_dict @@ -488,7 +512,7 @@ def validate_unit_process_ids(self, expected, actual): return ('Unit count mismatch. expected, actual: {}, ' '{} '.format(len(expected), len(actual))) - for (e_sentry, e_proc_names) in expected.iteritems(): + for (e_sentry, e_proc_names) in six.iteritems(expected): e_sentry_name = e_sentry.info['unit_name'] if e_sentry in actual.keys(): a_proc_names = actual[e_sentry] @@ -507,11 +531,23 @@ def validate_unit_process_ids(self, expected, actual): '{}'.format(e_proc_name, a_proc_name)) a_pids_length = len(a_pids) - if e_pids_length != a_pids_length: - return ('PID count mismatch. {} ({}) expected, actual: ' + fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})'.format(e_sentry_name, e_proc_name, e_pids_length, a_pids_length, a_pids)) + + # If expected is not bool, ensure PID quantities match + if not isinstance(e_pids_length, bool) and \ + a_pids_length != e_pids_length: + return fail_msg + # If expected is bool True, ensure 1 or more PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is True and a_pids_length < 1: + return fail_msg + # If expected is bool False, ensure 0 PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is False and a_pids_length != 0: + return fail_msg else: self.log.debug('PID check OK: {} {} {}: ' '{}'.format(e_sentry_name, e_proc_name, @@ -531,3 +567,30 @@ def validate_list_of_identical_dicts(self, list_of_dicts): return 'Dicts within list are not identical' return None + + def run_action(self, unit_sentry, action, + _check_output=subprocess.check_output): + """Run the named action on a given unit sentry. + + _check_output parameter is used for dependency injection. + + @return action_id. + """ + unit_id = unit_sentry.info["unit_name"] + command = ["juju", "action", "do", "--format=json", unit_id, action] + self.log.info("Running command: %s\n" % " ".join(command)) + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + action_id = data[u'Action queued with id'] + return action_id + + def wait_on_action(self, action_id, _check_output=subprocess.check_output): + """Wait for a given action, returning if it completed or not. + + _check_output parameter is used for dependency injection. + """ + command = ["juju", "action", "fetch", "--format=json", "--wait=0", + action_id] + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + return data.get(u"status") == "completed" diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index b01e6cb8..07ee2ef1 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -44,7 +44,7 @@ def _determine_branch_locations(self, other_services): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb'] + base_charms = ['mysql', 'mongodb', 'nrpe'] if self.series in ['precise', 'trusty']: base_series = self.series @@ -81,7 +81,7 @@ def _add_services(self, this_service, other_services): 'ceph-osd', 'ceph-radosgw'] # Most OpenStack subordinate charms do not expose an origin option # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: From da5da122299fc4c61d1bb793db20352472858f76 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 20 Aug 2015 10:44:41 +0000 Subject: [PATCH 0784/2699] Charmhelper sync --- ceph-proxy/charm-helpers-hooks.yaml | 2 +- .../hooks/charmhelpers/contrib/storage/linux/ceph.py | 9 ++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index eeee6f8c..d4035311 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~gnuoy/charm-helpers/cepg-broker destination: hooks/charmhelpers include: - core diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 00dbffb4..24e41b01 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -28,6 +28,7 @@ import shutil import json import time +import uuid from subprocess import ( check_call, @@ -413,6 +414,7 @@ class CephBrokerRq(object): """ def __init__(self, api_version=1): self.api_version = api_version + self.rq_id = str(uuid.uuid1()) self.ops = [] def add_op_create_pool(self, name, replica_count=3): @@ -421,7 +423,8 @@ def add_op_create_pool(self, name, replica_count=3): @property def request(self): - return json.dumps({'api-version': self.api_version, 'ops': self.ops}) + return json.dumps({'api-version': self.api_version, 'ops': self.ops, + 'rq-id': self.rq_id}) class CephBrokerRsp(object): @@ -435,6 +438,10 @@ def __init__(self, encoded_rsp): self.api_version = None self.rsp = json.loads(encoded_rsp) + @property + def req_id(self): + return self.rsp.get('rq-id') + @property def exit_code(self): return self.rsp.get('exit-code') From fd1f499f4f5d8000c3e22036a5b7baa3891fe475 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 20 Aug 2015 10:44:41 +0000 Subject: [PATCH 0785/2699] Charmhelper sync --- ceph-mon/charm-helpers-hooks.yaml | 2 +- .../hooks/charmhelpers/contrib/storage/linux/ceph.py | 9 ++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index eeee6f8c..d4035311 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~gnuoy/charm-helpers/cepg-broker destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 00dbffb4..24e41b01 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -28,6 +28,7 @@ import shutil import json import time +import uuid from subprocess import ( check_call, @@ -413,6 +414,7 @@ class CephBrokerRq(object): """ def __init__(self, api_version=1): self.api_version = api_version + self.rq_id = str(uuid.uuid1()) self.ops = [] def add_op_create_pool(self, name, replica_count=3): @@ -421,7 +423,8 @@ def add_op_create_pool(self, name, replica_count=3): @property def request(self): - return json.dumps({'api-version': self.api_version, 'ops': self.ops}) + return json.dumps({'api-version': self.api_version, 'ops': self.ops, + 'rq-id': self.rq_id}) class CephBrokerRsp(object): @@ -435,6 +438,10 @@ def __init__(self, encoded_rsp): self.api_version = None self.rsp = json.loads(encoded_rsp) + @property + def req_id(self): + return self.rsp.get('rq-id') + @property def exit_code(self): return self.rsp.get('exit-code') From 4a603af2640ced3af58f1f800c2a511bc36967ef Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 20 Aug 2015 11:14:24 +0000 Subject: [PATCH 0786/2699] Process reqid if there is one --- ceph-proxy/hooks/ceph_broker.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index 9fced945..00ce4b6f 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -31,10 +31,14 @@ def process_requests(reqs): This is a versioned api. API version must be supplied by the client making the request. """ + rq_id = reqs.get('rq_id') try: version = reqs.get('api-version') if version == 1: - return process_requests_v1(reqs['ops']) + resp = process_requests_v1(reqs['ops']) + if rq_id: + resp['rq-id'] = rq_id + return resp except Exception as exc: log(str(exc), level=ERROR) @@ -44,7 +48,10 @@ def process_requests(reqs): return {'exit-code': 1, 'stderr': msg} msg = ("Missing or invalid api version (%s)" % (version)) - return {'exit-code': 1, 'stderr': msg} + resp = {'exit-code': 1, 'stderr': msg} + if rq_id: + resp['rq-id'] = rq_id + return resp def process_requests_v1(reqs): From bea81f93e39c0104279650f1b58c8fc71583841b Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 20 Aug 2015 11:14:24 +0000 Subject: [PATCH 0787/2699] Process reqid if there is one --- ceph-mon/hooks/ceph_broker.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index 9fced945..00ce4b6f 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -31,10 +31,14 @@ def process_requests(reqs): This is a versioned api. API version must be supplied by the client making the request. """ + rq_id = reqs.get('rq_id') try: version = reqs.get('api-version') if version == 1: - return process_requests_v1(reqs['ops']) + resp = process_requests_v1(reqs['ops']) + if rq_id: + resp['rq-id'] = rq_id + return resp except Exception as exc: log(str(exc), level=ERROR) @@ -44,7 +48,10 @@ def process_requests(reqs): return {'exit-code': 1, 'stderr': msg} msg = ("Missing or invalid api version (%s)" % (version)) - return {'exit-code': 1, 'stderr': msg} + resp = {'exit-code': 1, 'stderr': msg} + if rq_id: + resp['rq-id'] = rq_id + return resp def process_requests_v1(reqs): From 26500c95338f7f29e6e271b73967a860c6b0b4ea Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 20 Aug 2015 11:41:50 +0000 Subject: [PATCH 0788/2699] Fix broker req id typo --- ceph-proxy/hooks/ceph_broker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index 00ce4b6f..570d7d5a 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -31,7 +31,7 @@ def process_requests(reqs): This is a versioned api. API version must be supplied by the client making the request. """ - rq_id = reqs.get('rq_id') + rq_id = reqs.get('rq-id') try: version = reqs.get('api-version') if version == 1: From 30c4e6c4a2e38093cd85914df26132753749fe90 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 20 Aug 2015 11:41:50 +0000 Subject: [PATCH 0789/2699] Fix broker req id typo --- ceph-mon/hooks/ceph_broker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index 00ce4b6f..570d7d5a 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -31,7 +31,7 @@ def process_requests(reqs): This is a versioned api. API version must be supplied by the client making the request. """ - rq_id = reqs.get('rq_id') + rq_id = reqs.get('rq-id') try: version = reqs.get('api-version') if version == 1: From 39373dfebe01f7c21e9af33bdc8076c6f2d17a59 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 21 Aug 2015 10:55:32 +0100 Subject: [PATCH 0790/2699] Fix request id --- ceph-proxy/charm-helpers-hooks.yaml | 2 +- ceph-proxy/hooks/ceph_broker.py | 10 +++++----- .../hooks/charmhelpers/contrib/storage/linux/ceph.py | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index d4035311..eeee6f8c 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:~gnuoy/charm-helpers/cepg-broker +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index 570d7d5a..92463692 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -31,13 +31,13 @@ def process_requests(reqs): This is a versioned api. API version must be supplied by the client making the request. """ - rq_id = reqs.get('rq-id') + request_id = reqs.get('request-id') try: version = reqs.get('api-version') if version == 1: resp = process_requests_v1(reqs['ops']) - if rq_id: - resp['rq-id'] = rq_id + if request_id: + resp['request-id'] = request_id return resp except Exception as exc: @@ -49,8 +49,8 @@ def process_requests(reqs): msg = ("Missing or invalid api version (%s)" % (version)) resp = {'exit-code': 1, 'stderr': msg} - if rq_id: - resp['rq-id'] = rq_id + if request_id: + resp['request-id'] = request_id return resp diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 24e41b01..a28d9441 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -424,7 +424,7 @@ def add_op_create_pool(self, name, replica_count=3): @property def request(self): return json.dumps({'api-version': self.api_version, 'ops': self.ops, - 'rq-id': self.rq_id}) + 'request-id': self.rq_id}) class CephBrokerRsp(object): @@ -440,7 +440,7 @@ def __init__(self, encoded_rsp): @property def req_id(self): - return self.rsp.get('rq-id') + return self.rsp.get('request-id') @property def exit_code(self): From 6a22cfe12e7db9e2dfc20db8600e94b364de637c Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 21 Aug 2015 10:55:32 +0100 Subject: [PATCH 0791/2699] Fix request id --- ceph-mon/charm-helpers-hooks.yaml | 2 +- ceph-mon/hooks/ceph_broker.py | 10 +++++----- .../hooks/charmhelpers/contrib/storage/linux/ceph.py | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index d4035311..eeee6f8c 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:~gnuoy/charm-helpers/cepg-broker +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index 570d7d5a..92463692 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -31,13 +31,13 @@ def process_requests(reqs): This is a versioned api. API version must be supplied by the client making the request. """ - rq_id = reqs.get('rq-id') + request_id = reqs.get('request-id') try: version = reqs.get('api-version') if version == 1: resp = process_requests_v1(reqs['ops']) - if rq_id: - resp['rq-id'] = rq_id + if request_id: + resp['request-id'] = request_id return resp except Exception as exc: @@ -49,8 +49,8 @@ def process_requests(reqs): msg = ("Missing or invalid api version (%s)" % (version)) resp = {'exit-code': 1, 'stderr': msg} - if rq_id: - resp['rq-id'] = rq_id + if request_id: + resp['request-id'] = request_id return resp diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 24e41b01..a28d9441 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -424,7 +424,7 @@ def add_op_create_pool(self, name, replica_count=3): @property def request(self): return json.dumps({'api-version': self.api_version, 'ops': self.ops, - 'rq-id': self.rq_id}) + 'request-id': self.rq_id}) class CephBrokerRsp(object): @@ -440,7 +440,7 @@ def __init__(self, encoded_rsp): @property def req_id(self): - return self.rsp.get('rq-id') + return self.rsp.get('request-id') @property def exit_code(self): From f641213e60a981f843c3f8dcb82b9c6932b5568b Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 21 Aug 2015 11:41:06 +0000 Subject: [PATCH 0792/2699] Ch sync --- ceph-proxy/charm-helpers-hooks.yaml | 2 +- .../contrib/storage/linux/ceph.py | 27 ++++++++++++++++--- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index eeee6f8c..d4035311 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~gnuoy/charm-helpers/cepg-broker destination: hooks/charmhelpers include: - core diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index a28d9441..368bb18f 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -36,6 +36,7 @@ CalledProcessError, ) from charmhelpers.core.hookenv import ( + local_unit, relation_get, relation_ids, related_units, @@ -414,7 +415,7 @@ class CephBrokerRq(object): """ def __init__(self, api_version=1): self.api_version = api_version - self.rq_id = str(uuid.uuid1()) + self.request_id = str(uuid.uuid1()) self.ops = [] def add_op_create_pool(self, name, replica_count=3): @@ -424,7 +425,7 @@ def add_op_create_pool(self, name, replica_count=3): @property def request(self): return json.dumps({'api-version': self.api_version, 'ops': self.ops, - 'request-id': self.rq_id}) + 'request-id': self.request_id}) class CephBrokerRsp(object): @@ -434,12 +435,16 @@ class CephBrokerRsp(object): The API is versioned and defaults to version 1. """ + VALID = 0 + ABSENT = 1 + INVALID = 2 + def __init__(self, encoded_rsp): self.api_version = None self.rsp = json.loads(encoded_rsp) @property - def req_id(self): + def request_id(self): return self.rsp.get('request-id') @property @@ -449,3 +454,19 @@ def exit_code(self): @property def exit_msg(self): return self.rsp.get('stderr') + + def validate_request_id(self): + pending_request_id = None + pending_request_raw = relation_get(attribute='broker_req', + unit=local_unit()) + if pending_request_raw: + pending_request = json.loads(pending_request_raw) + pending_request_id = pending_request.get('request-id') + if not self.request_id: + # back compat + return self.ABSENT + + if pending_request_id and self.request_id != pending_request_id: + return self.INVALID + + return self.VALID From 7e0bb0211e8161c2204b37d4412ab1b256df3e0f Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 21 Aug 2015 11:41:06 +0000 Subject: [PATCH 0793/2699] Ch sync --- ceph-mon/charm-helpers-hooks.yaml | 2 +- .../contrib/storage/linux/ceph.py | 27 ++++++++++++++++--- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index eeee6f8c..d4035311 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~gnuoy/charm-helpers/cepg-broker destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index a28d9441..368bb18f 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -36,6 +36,7 @@ CalledProcessError, ) from charmhelpers.core.hookenv import ( + local_unit, relation_get, relation_ids, related_units, @@ -414,7 +415,7 @@ class CephBrokerRq(object): """ def __init__(self, api_version=1): self.api_version = api_version - self.rq_id = str(uuid.uuid1()) + self.request_id = str(uuid.uuid1()) self.ops = [] def add_op_create_pool(self, name, replica_count=3): @@ -424,7 +425,7 @@ def add_op_create_pool(self, name, replica_count=3): @property def request(self): return json.dumps({'api-version': self.api_version, 'ops': self.ops, - 'request-id': self.rq_id}) + 'request-id': self.request_id}) class CephBrokerRsp(object): @@ -434,12 +435,16 @@ class CephBrokerRsp(object): The API is versioned and defaults to version 1. """ + VALID = 0 + ABSENT = 1 + INVALID = 2 + def __init__(self, encoded_rsp): self.api_version = None self.rsp = json.loads(encoded_rsp) @property - def req_id(self): + def request_id(self): return self.rsp.get('request-id') @property @@ -449,3 +454,19 @@ def exit_code(self): @property def exit_msg(self): return self.rsp.get('stderr') + + def validate_request_id(self): + pending_request_id = None + pending_request_raw = relation_get(attribute='broker_req', + unit=local_unit()) + if pending_request_raw: + pending_request = json.loads(pending_request_raw) + pending_request_id = pending_request.get('request-id') + if not self.request_id: + # back compat + return self.ABSENT + + if pending_request_id and self.request_id != pending_request_id: + return self.INVALID + + return self.VALID From fc8e92798161ab8d9bc020c4a695ea8e056d7e27 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 21 Aug 2015 13:24:26 +0000 Subject: [PATCH 0794/2699] Charm helper sync --- ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 368bb18f..613d19d5 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -463,10 +463,14 @@ def validate_request_id(self): pending_request = json.loads(pending_request_raw) pending_request_id = pending_request.get('request-id') if not self.request_id: + log('Request has no request-id'.format(svc), level=DEBUG) # back compat return self.ABSENT if pending_request_id and self.request_id != pending_request_id: + log('request-id {} does not match expected request-id ' + '{}'.format(self.request_id, pending_request_id), level=DEBUG) return self.INVALID + log('request-id {} is expected'.format(self.request_id) return self.VALID From 48fd6ad61fd0a9d68c3dce78b95923f230b3dea5 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 21 Aug 2015 13:24:26 +0000 Subject: [PATCH 0795/2699] Charm helper sync --- ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 368bb18f..613d19d5 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -463,10 +463,14 @@ def validate_request_id(self): pending_request = json.loads(pending_request_raw) pending_request_id = pending_request.get('request-id') if not self.request_id: + log('Request has no request-id'.format(svc), level=DEBUG) # back compat return self.ABSENT if pending_request_id and self.request_id != pending_request_id: + log('request-id {} does not match expected request-id ' + '{}'.format(self.request_id, pending_request_id), level=DEBUG) return self.INVALID + log('request-id {} is expected'.format(self.request_id) return self.VALID From dac0befb1d0825f17c0a316f39d1d8bdc34d1e73 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 21 Aug 2015 13:32:45 +0000 Subject: [PATCH 0796/2699] ch sync --- ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 613d19d5..27319b07 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -472,5 +472,5 @@ def validate_request_id(self): '{}'.format(self.request_id, pending_request_id), level=DEBUG) return self.INVALID - log('request-id {} is expected'.format(self.request_id) + log('request-id {} is expected'.format(self.request_id)) return self.VALID From db111e38f716e5666590004fc08718796be8dbd2 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 21 Aug 2015 13:32:45 +0000 Subject: [PATCH 0797/2699] ch sync --- ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 613d19d5..27319b07 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -472,5 +472,5 @@ def validate_request_id(self): '{}'.format(self.request_id, pending_request_id), level=DEBUG) return self.INVALID - log('request-id {} is expected'.format(self.request_id) + log('request-id {} is expected'.format(self.request_id)) return self.VALID From cb9af55433cdb8c33bdb483361878d317c97c248 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 21 Aug 2015 13:55:54 +0000 Subject: [PATCH 0798/2699] ch sync --- ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 27319b07..9d035604 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -417,6 +417,7 @@ def __init__(self, api_version=1): self.api_version = api_version self.request_id = str(uuid.uuid1()) self.ops = [] + log('Received request {}'.format(self.request_id)) def add_op_create_pool(self, name, replica_count=3): self.ops.append({'op': 'create-pool', 'name': name, From 2661342892f12fe9635514d4ba323dfa270640d8 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 21 Aug 2015 13:55:54 +0000 Subject: [PATCH 0799/2699] ch sync --- ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 27319b07..9d035604 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -417,6 +417,7 @@ def __init__(self, api_version=1): self.api_version = api_version self.request_id = str(uuid.uuid1()) self.ops = [] + log('Received request {}'.format(self.request_id)) def add_op_create_pool(self, name, replica_count=3): self.ops.append({'op': 'create-pool', 'name': name, From 2defa27cdb638becbfbe6b91c2c253151d177456 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 21 Aug 2015 14:12:59 +0000 Subject: [PATCH 0800/2699] More logging --- ceph-proxy/hooks/ceph_broker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index 92463692..de75518e 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -35,6 +35,7 @@ def process_requests(reqs): try: version = reqs.get('api-version') if version == 1: + log('Processing request {}'.format(request_id)) resp = process_requests_v1(reqs['ops']) if request_id: resp['request-id'] = request_id From aafe2078e37996abbf02c79a5f9cc7ce929cc172 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 21 Aug 2015 14:12:59 +0000 Subject: [PATCH 0801/2699] More logging --- ceph-mon/hooks/ceph_broker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index 92463692..de75518e 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -35,6 +35,7 @@ def process_requests(reqs): try: version = reqs.get('api-version') if version == 1: + log('Processing request {}'.format(request_id)) resp = process_requests_v1(reqs['ops']) if request_id: resp['request-id'] = request_id From f0049a9f86a86b457a131e9a84b6d34d18bc215d Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 22 Aug 2015 06:51:15 +0000 Subject: [PATCH 0802/2699] Generate unit specific broker resp --- ceph-proxy/hooks/hooks.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 681c4376..1055f6c3 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -319,7 +319,13 @@ def client_relation_changed(): log("Not leader - ignoring broker request", level=DEBUG) else: rsp = process_requests(settings['broker_req']) - relation_set(relation_settings={'broker_rsp': rsp}) + unit_id = remote_unit().replace('/', '-') + unit_response_key = 'broker_rsp_' + unit_id + data = { + 'broker_rsp': rsp, + unit_response_key: rsp, + } + relation_set(relation_settings=data) else: log('mon cluster not in quorum', level=DEBUG) From 2aafd7ec7b719b2b41641d86c0aed6ddaad0b405 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 22 Aug 2015 06:51:15 +0000 Subject: [PATCH 0803/2699] Generate unit specific broker resp --- ceph-mon/hooks/hooks.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 681c4376..1055f6c3 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -319,7 +319,13 @@ def client_relation_changed(): log("Not leader - ignoring broker request", level=DEBUG) else: rsp = process_requests(settings['broker_req']) - relation_set(relation_settings={'broker_rsp': rsp}) + unit_id = remote_unit().replace('/', '-') + unit_response_key = 'broker_rsp_' + unit_id + data = { + 'broker_rsp': rsp, + unit_response_key: rsp, + } + relation_set(relation_settings=data) else: log('mon cluster not in quorum', level=DEBUG) From f6bd5d42c5ce328017b4690aadaf5da1340b7780 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 22 Aug 2015 06:52:32 +0000 Subject: [PATCH 0804/2699] ch sync --- .../charmhelpers/contrib/storage/linux/ceph.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 9d035604..767902db 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -40,6 +40,7 @@ relation_get, relation_ids, related_units, + remote_unit, log, DEBUG, INFO, @@ -417,7 +418,6 @@ def __init__(self, api_version=1): self.api_version = api_version self.request_id = str(uuid.uuid1()) self.ops = [] - log('Received request {}'.format(self.request_id)) def add_op_create_pool(self, name, replica_count=3): self.ops.append({'op': 'create-pool', 'name': name, @@ -475,3 +475,14 @@ def validate_request_id(self): log('request-id {} is expected'.format(self.request_id)) return self.VALID + +def duplicate_broker_requests(encoded_req1, encoded_req2): + req1 = json.loads(encoded_req1) + req2 = json.loads(encoded_req2) + if len(req1['ops']) != len(req2['ops']): + return False + for req_no in range(0,len(req1['ops'])): + for key in ['replicas', 'name', 'op']: + if req1['ops'][req_no][key] != req2['ops'][req_no][key]: + return False + return True From 76d570ce2ee8805ded921ced79bd78802b3bc257 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 22 Aug 2015 06:52:32 +0000 Subject: [PATCH 0805/2699] ch sync --- .../charmhelpers/contrib/storage/linux/ceph.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 9d035604..767902db 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -40,6 +40,7 @@ relation_get, relation_ids, related_units, + remote_unit, log, DEBUG, INFO, @@ -417,7 +418,6 @@ def __init__(self, api_version=1): self.api_version = api_version self.request_id = str(uuid.uuid1()) self.ops = [] - log('Received request {}'.format(self.request_id)) def add_op_create_pool(self, name, replica_count=3): self.ops.append({'op': 'create-pool', 'name': name, @@ -475,3 +475,14 @@ def validate_request_id(self): log('request-id {} is expected'.format(self.request_id)) return self.VALID + +def duplicate_broker_requests(encoded_req1, encoded_req2): + req1 = json.loads(encoded_req1) + req2 = json.loads(encoded_req2) + if len(req1['ops']) != len(req2['ops']): + return False + for req_no in range(0,len(req1['ops'])): + for key in ['replicas', 'name', 'op']: + if req1['ops'][req_no][key] != req2['ops'][req_no][key]: + return False + return True From 296032bb0279a5acbc2805c70e53115f937042ed Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 22 Aug 2015 07:17:15 +0000 Subject: [PATCH 0806/2699] Ch sync --- ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 767902db..ce7dd2c9 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -477,6 +477,8 @@ def validate_request_id(self): return self.VALID def duplicate_broker_requests(encoded_req1, encoded_req2): + if not encoded_req1 or not encoded_req2: + return False req1 = json.loads(encoded_req1) req2 = json.loads(encoded_req2) if len(req1['ops']) != len(req2['ops']): From c3852181b9c3459ff59298d6c36706a72d4514b9 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 22 Aug 2015 07:17:15 +0000 Subject: [PATCH 0807/2699] Ch sync --- ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 767902db..ce7dd2c9 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -477,6 +477,8 @@ def validate_request_id(self): return self.VALID def duplicate_broker_requests(encoded_req1, encoded_req2): + if not encoded_req1 or not encoded_req2: + return False req1 = json.loads(encoded_req1) req2 = json.loads(encoded_req2) if len(req1['ops']) != len(req2['ops']): From e2a10a221334bb0a0e50c0fc8d3c14f5e8df3564 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 22 Aug 2015 08:26:06 +0000 Subject: [PATCH 0808/2699] ch sync --- .../charmhelpers/contrib/storage/linux/ceph.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index ce7dd2c9..bcc8f9ab 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -488,3 +488,18 @@ def duplicate_broker_requests(encoded_req1, encoded_req2): if req1['ops'][req_no][key] != req2['ops'][req_no][key]: return False return True + +def broker_request_completed(encoded_req): + req = json.loads(encoded_req) + broker_key = get_broker_rsp_key() + for rid in relation_ids('ceph'): + for unit in related_units(rid): + rdata = relation_get(attribute=broker_key, rid=rid, unit=unit) + if rdata: + rsp = CephBrokerRsp(rdata) + if not rsp.exit_code: + return True + return False + +def get_broker_rsp_key(): + return 'broker_rsp_' + local_unit().replace('/', '-') From 9405dccf46fbca91c7a2416c8822ffc16f087cb5 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 22 Aug 2015 08:26:06 +0000 Subject: [PATCH 0809/2699] ch sync --- .../charmhelpers/contrib/storage/linux/ceph.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index ce7dd2c9..bcc8f9ab 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -488,3 +488,18 @@ def duplicate_broker_requests(encoded_req1, encoded_req2): if req1['ops'][req_no][key] != req2['ops'][req_no][key]: return False return True + +def broker_request_completed(encoded_req): + req = json.loads(encoded_req) + broker_key = get_broker_rsp_key() + for rid in relation_ids('ceph'): + for unit in related_units(rid): + rdata = relation_get(attribute=broker_key, rid=rid, unit=unit) + if rdata: + rsp = CephBrokerRsp(rdata) + if not rsp.exit_code: + return True + return False + +def get_broker_rsp_key(): + return 'broker_rsp_' + local_unit().replace('/', '-') From 11e763247315e305fa9b2955cce5a11c8c750f93 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 24 Aug 2015 07:16:52 +0000 Subject: [PATCH 0810/2699] Send flag in legacy request to show that unit specific reply is expected --- ceph-proxy/hooks/hooks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 1055f6c3..fb749e8a 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -322,6 +322,7 @@ def client_relation_changed(): unit_id = remote_unit().replace('/', '-') unit_response_key = 'broker_rsp_' + unit_id data = { + 'unit_targeted_reponses': True, 'broker_rsp': rsp, unit_response_key: rsp, } From 1c5f30cea6a1f204cd69d8e72d74000f43e6544a Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 24 Aug 2015 07:16:52 +0000 Subject: [PATCH 0811/2699] Send flag in legacy request to show that unit specific reply is expected --- ceph-mon/hooks/hooks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 1055f6c3..fb749e8a 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -322,6 +322,7 @@ def client_relation_changed(): unit_id = remote_unit().replace('/', '-') unit_response_key = 'broker_rsp_' + unit_id data = { + 'unit_targeted_reponses': True, 'broker_rsp': rsp, unit_response_key: rsp, } From 5ff5c32820de51362c8ccbb202e06cccd36ae6e2 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 24 Aug 2015 09:16:45 +0000 Subject: [PATCH 0812/2699] Use '-' in relation vars to be consistent with other charms --- ceph-proxy/hooks/hooks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index fb749e8a..ed36409f 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -320,9 +320,9 @@ def client_relation_changed(): else: rsp = process_requests(settings['broker_req']) unit_id = remote_unit().replace('/', '-') - unit_response_key = 'broker_rsp_' + unit_id + unit_response_key = 'broker-rsp-' + unit_id data = { - 'unit_targeted_reponses': True, + 'unit-targeted-reponses': True, 'broker_rsp': rsp, unit_response_key: rsp, } From 560ed0ca567ac460e1676f4d49520012bed5c3c1 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 24 Aug 2015 09:16:45 +0000 Subject: [PATCH 0813/2699] Use '-' in relation vars to be consistent with other charms --- ceph-mon/hooks/hooks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index fb749e8a..ed36409f 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -320,9 +320,9 @@ def client_relation_changed(): else: rsp = process_requests(settings['broker_req']) unit_id = remote_unit().replace('/', '-') - unit_response_key = 'broker_rsp_' + unit_id + unit_response_key = 'broker-rsp-' + unit_id data = { - 'unit_targeted_reponses': True, + 'unit-targeted-reponses': True, 'broker_rsp': rsp, unit_response_key: rsp, } From 051d525c877a185bd2c1717c6f4bc671ca5be9ca Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 24 Aug 2015 09:16:50 +0000 Subject: [PATCH 0814/2699] ch sync --- .../contrib/storage/linux/ceph.py | 78 +++++++++++++++++-- 1 file changed, 70 insertions(+), 8 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index bcc8f9ab..410e151d 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -39,6 +39,7 @@ local_unit, relation_get, relation_ids, + relation_set, related_units, remote_unit, log, @@ -476,7 +477,43 @@ def validate_request_id(self): log('request-id {} is expected'.format(self.request_id)) return self.VALID -def duplicate_broker_requests(encoded_req1, encoded_req2): +def request_states(request_needed): + """Return dict showing if a request has been sent and completed per rid""" + complete = [] + issued = {} + requests = {} + for rid in relation_ids('ceph'): + complete = False + previous_request = relation_get(attribute='broker_req', rid=rid, unit=local_unit()) + sent = equivalent_broker_requests(previous_request, request_needed.request) + if sent: + complete = broker_request_completed(previous_request, rid) + else: + complete = False + requests[rid] = { + 'sent': sent, + 'complete': complete, + } + return requests + +def request_sent(request_needed): + """Check to see if a matching request has been sent""" + states = request_states(request_needed) + for rid in states.keys(): + if not states[rid]['sent']: + return False + return True + +def request_complete(request_needed): + """Check to see if a matching request has been completed""" + states = request_states(request_needed) + for rid in states.keys(): + if not states[rid]['complete']: + return False + return True + +def equivalent_broker_requests(encoded_req1, encoded_req2): + """Check to see if two requests are equivalent (ignore request id)""" if not encoded_req1 or not encoded_req2: return False req1 = json.loads(encoded_req1) @@ -489,17 +526,42 @@ def duplicate_broker_requests(encoded_req1, encoded_req2): return False return True -def broker_request_completed(encoded_req): +def broker_request_completed(encoded_req, rid): + """Check if a given request has been completed on the given relation""" req = json.loads(encoded_req) broker_key = get_broker_rsp_key() - for rid in relation_ids('ceph'): - for unit in related_units(rid): - rdata = relation_get(attribute=broker_key, rid=rid, unit=unit) - if rdata: - rsp = CephBrokerRsp(rdata) + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if rdata.get(broker_key): + rsp = CephBrokerRsp(rdata.get(broker_key)) + if rsp.request_id == req.get('request-id'): if not rsp.exit_code: return True + else: + # The remote unit sent no reply targeted at this unit so either the + # remote ceph cluster does not support unit targeted replies or it + # has not processed our request yet. + if rdata.get('broker_rsp'): + if rdata.get('unit-targeted-reponses'): + log('Ignoring legacy broker_rsp without unit key as remote ' + 'service supports unit specific replies') + else: + log('Using legacy broker_rsp as remote service does not ' + 'supports unit specific replies') + rsp = CephBrokerRsp(rdata['broker_rsp']) + if not rsp.exit_code: + return True return False def get_broker_rsp_key(): - return 'broker_rsp_' + local_unit().replace('/', '-') + """Return broker request key for this unit""" + return 'broker-rsp-' + local_unit().replace('/', '-') + +def send_request_if_needed(rq): + """Send broker request if one has not already been sent""" + if request_sent(rq): + log('Request already sent but not complete, not sending new request') + else: + for rid in relation_ids('ceph'): + log('Sending request {}'.format(rq.request_id)) + relation_set(relation_id=rid, broker_req=rq.request) From ad49e633c5da4ee54d717c318aaee20f6ff54529 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 24 Aug 2015 09:16:50 +0000 Subject: [PATCH 0815/2699] ch sync --- .../contrib/storage/linux/ceph.py | 78 +++++++++++++++++-- 1 file changed, 70 insertions(+), 8 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index bcc8f9ab..410e151d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -39,6 +39,7 @@ local_unit, relation_get, relation_ids, + relation_set, related_units, remote_unit, log, @@ -476,7 +477,43 @@ def validate_request_id(self): log('request-id {} is expected'.format(self.request_id)) return self.VALID -def duplicate_broker_requests(encoded_req1, encoded_req2): +def request_states(request_needed): + """Return dict showing if a request has been sent and completed per rid""" + complete = [] + issued = {} + requests = {} + for rid in relation_ids('ceph'): + complete = False + previous_request = relation_get(attribute='broker_req', rid=rid, unit=local_unit()) + sent = equivalent_broker_requests(previous_request, request_needed.request) + if sent: + complete = broker_request_completed(previous_request, rid) + else: + complete = False + requests[rid] = { + 'sent': sent, + 'complete': complete, + } + return requests + +def request_sent(request_needed): + """Check to see if a matching request has been sent""" + states = request_states(request_needed) + for rid in states.keys(): + if not states[rid]['sent']: + return False + return True + +def request_complete(request_needed): + """Check to see if a matching request has been completed""" + states = request_states(request_needed) + for rid in states.keys(): + if not states[rid]['complete']: + return False + return True + +def equivalent_broker_requests(encoded_req1, encoded_req2): + """Check to see if two requests are equivalent (ignore request id)""" if not encoded_req1 or not encoded_req2: return False req1 = json.loads(encoded_req1) @@ -489,17 +526,42 @@ def duplicate_broker_requests(encoded_req1, encoded_req2): return False return True -def broker_request_completed(encoded_req): +def broker_request_completed(encoded_req, rid): + """Check if a given request has been completed on the given relation""" req = json.loads(encoded_req) broker_key = get_broker_rsp_key() - for rid in relation_ids('ceph'): - for unit in related_units(rid): - rdata = relation_get(attribute=broker_key, rid=rid, unit=unit) - if rdata: - rsp = CephBrokerRsp(rdata) + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if rdata.get(broker_key): + rsp = CephBrokerRsp(rdata.get(broker_key)) + if rsp.request_id == req.get('request-id'): if not rsp.exit_code: return True + else: + # The remote unit sent no reply targeted at this unit so either the + # remote ceph cluster does not support unit targeted replies or it + # has not processed our request yet. + if rdata.get('broker_rsp'): + if rdata.get('unit-targeted-reponses'): + log('Ignoring legacy broker_rsp without unit key as remote ' + 'service supports unit specific replies') + else: + log('Using legacy broker_rsp as remote service does not ' + 'supports unit specific replies') + rsp = CephBrokerRsp(rdata['broker_rsp']) + if not rsp.exit_code: + return True return False def get_broker_rsp_key(): - return 'broker_rsp_' + local_unit().replace('/', '-') + """Return broker request key for this unit""" + return 'broker-rsp-' + local_unit().replace('/', '-') + +def send_request_if_needed(rq): + """Send broker request if one has not already been sent""" + if request_sent(rq): + log('Request already sent but not complete, not sending new request') + else: + for rid in relation_ids('ceph'): + log('Sending request {}'.format(rq.request_id)) + relation_set(relation_id=rid, broker_req=rq.request) From fb266003dd1cc202baab46ec2fac37ef178d3698 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 25 Aug 2015 15:14:26 +0100 Subject: [PATCH 0816/2699] ch sync --- .../contrib/storage/linux/ceph.py | 36 +++++-------------- 1 file changed, 9 insertions(+), 27 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 410e151d..db8459f0 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -41,7 +41,6 @@ relation_ids, relation_set, related_units, - remote_unit, log, DEBUG, INFO, @@ -437,9 +436,6 @@ class CephBrokerRsp(object): The API is versioned and defaults to version 1. """ - VALID = 0 - ABSENT = 1 - INVALID = 2 def __init__(self, encoded_rsp): self.api_version = None @@ -457,30 +453,10 @@ def exit_code(self): def exit_msg(self): return self.rsp.get('stderr') - def validate_request_id(self): - pending_request_id = None - pending_request_raw = relation_get(attribute='broker_req', - unit=local_unit()) - if pending_request_raw: - pending_request = json.loads(pending_request_raw) - pending_request_id = pending_request.get('request-id') - if not self.request_id: - log('Request has no request-id'.format(svc), level=DEBUG) - # back compat - return self.ABSENT - - if pending_request_id and self.request_id != pending_request_id: - log('request-id {} does not match expected request-id ' - '{}'.format(self.request_id, pending_request_id), level=DEBUG) - return self.INVALID - - log('request-id {} is expected'.format(self.request_id)) - return self.VALID def request_states(request_needed): """Return dict showing if a request has been sent and completed per rid""" complete = [] - issued = {} requests = {} for rid in relation_ids('ceph'): complete = False @@ -496,22 +472,25 @@ def request_states(request_needed): } return requests + def request_sent(request_needed): - """Check to see if a matching request has been sent""" + """Check to see if a matching request has been sent""" states = request_states(request_needed) for rid in states.keys(): if not states[rid]['sent']: return False return True + def request_complete(request_needed): - """Check to see if a matching request has been completed""" + """Check to see if a matching request has been completed""" states = request_states(request_needed) for rid in states.keys(): if not states[rid]['complete']: return False return True + def equivalent_broker_requests(encoded_req1, encoded_req2): """Check to see if two requests are equivalent (ignore request id)""" if not encoded_req1 or not encoded_req2: @@ -520,12 +499,13 @@ def equivalent_broker_requests(encoded_req1, encoded_req2): req2 = json.loads(encoded_req2) if len(req1['ops']) != len(req2['ops']): return False - for req_no in range(0,len(req1['ops'])): + for req_no in range(0, len(req1['ops'])): for key in ['replicas', 'name', 'op']: if req1['ops'][req_no][key] != req2['ops'][req_no][key]: return False return True + def broker_request_completed(encoded_req, rid): """Check if a given request has been completed on the given relation""" req = json.loads(encoded_req) @@ -553,10 +533,12 @@ def broker_request_completed(encoded_req, rid): return True return False + def get_broker_rsp_key(): """Return broker request key for this unit""" return 'broker-rsp-' + local_unit().replace('/', '-') + def send_request_if_needed(rq): """Send broker request if one has not already been sent""" if request_sent(rq): From b7cbcaece9b767061ae8b5e98bb197d39c69ab6c Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 25 Aug 2015 15:14:26 +0100 Subject: [PATCH 0817/2699] ch sync --- .../contrib/storage/linux/ceph.py | 36 +++++-------------- 1 file changed, 9 insertions(+), 27 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 410e151d..db8459f0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -41,7 +41,6 @@ relation_ids, relation_set, related_units, - remote_unit, log, DEBUG, INFO, @@ -437,9 +436,6 @@ class CephBrokerRsp(object): The API is versioned and defaults to version 1. """ - VALID = 0 - ABSENT = 1 - INVALID = 2 def __init__(self, encoded_rsp): self.api_version = None @@ -457,30 +453,10 @@ def exit_code(self): def exit_msg(self): return self.rsp.get('stderr') - def validate_request_id(self): - pending_request_id = None - pending_request_raw = relation_get(attribute='broker_req', - unit=local_unit()) - if pending_request_raw: - pending_request = json.loads(pending_request_raw) - pending_request_id = pending_request.get('request-id') - if not self.request_id: - log('Request has no request-id'.format(svc), level=DEBUG) - # back compat - return self.ABSENT - - if pending_request_id and self.request_id != pending_request_id: - log('request-id {} does not match expected request-id ' - '{}'.format(self.request_id, pending_request_id), level=DEBUG) - return self.INVALID - - log('request-id {} is expected'.format(self.request_id)) - return self.VALID def request_states(request_needed): """Return dict showing if a request has been sent and completed per rid""" complete = [] - issued = {} requests = {} for rid in relation_ids('ceph'): complete = False @@ -496,22 +472,25 @@ def request_states(request_needed): } return requests + def request_sent(request_needed): - """Check to see if a matching request has been sent""" + """Check to see if a matching request has been sent""" states = request_states(request_needed) for rid in states.keys(): if not states[rid]['sent']: return False return True + def request_complete(request_needed): - """Check to see if a matching request has been completed""" + """Check to see if a matching request has been completed""" states = request_states(request_needed) for rid in states.keys(): if not states[rid]['complete']: return False return True + def equivalent_broker_requests(encoded_req1, encoded_req2): """Check to see if two requests are equivalent (ignore request id)""" if not encoded_req1 or not encoded_req2: @@ -520,12 +499,13 @@ def equivalent_broker_requests(encoded_req1, encoded_req2): req2 = json.loads(encoded_req2) if len(req1['ops']) != len(req2['ops']): return False - for req_no in range(0,len(req1['ops'])): + for req_no in range(0, len(req1['ops'])): for key in ['replicas', 'name', 'op']: if req1['ops'][req_no][key] != req2['ops'][req_no][key]: return False return True + def broker_request_completed(encoded_req, rid): """Check if a given request has been completed on the given relation""" req = json.loads(encoded_req) @@ -553,10 +533,12 @@ def broker_request_completed(encoded_req, rid): return True return False + def get_broker_rsp_key(): """Return broker request key for this unit""" return 'broker-rsp-' + local_unit().replace('/', '-') + def send_request_if_needed(rq): """Send broker request if one has not already been sent""" if request_sent(rq): From 0a45ffc5d63979429e59c56e5b0d41561af5f219 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 25 Aug 2015 15:43:06 +0100 Subject: [PATCH 0818/2699] Fix lint and add unit_test updates --- ceph-proxy/hooks/hooks.py | 2 +- ceph-proxy/unit_tests/test_ceph_broker.py | 27 +++++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index ed36409f..da978e66 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -319,7 +319,7 @@ def client_relation_changed(): log("Not leader - ignoring broker request", level=DEBUG) else: rsp = process_requests(settings['broker_req']) - unit_id = remote_unit().replace('/', '-') + unit_id = remote_unit().replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id data = { 'unit-targeted-reponses': True, diff --git a/ceph-proxy/unit_tests/test_ceph_broker.py b/ceph-proxy/unit_tests/test_ceph_broker.py index 0176d119..fc698174 100644 --- a/ceph-proxy/unit_tests/test_ceph_broker.py +++ b/ceph-proxy/unit_tests/test_ceph_broker.py @@ -70,3 +70,30 @@ def test_process_requests_create_pool_exists(self, mock_log, mock_pool_exists.assert_called_with(service='admin', name='foo') self.assertFalse(mock_create_pool.called) self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.pool_exists') + @mock.patch('ceph_broker.log') + def test_process_requests_create_pool_rid(self, mock_log, mock_pool_exists, + mock_create_pool): + mock_pool_exists.return_value = False + reqs = json.dumps({'api-version': 1, + 'request-id': '1ef5aede', + 'ops': [{'op': 'create-pool', 'name': + 'foo', 'replicas': 3}]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_called_with(service='admin', name='foo') + mock_create_pool.assert_called_with(service='admin', name='foo', + replicas=3) + self.assertEqual(json.loads(rc)['exit-code'], 0) + self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') + + @mock.patch('ceph_broker.log') + def test_process_requests_invalid_api_rid(self, mock_log): + reqs = json.dumps({'api-version': 0, 'request-id': '1ef5aede', + 'ops': [{'op': 'create-pool'}]}) + rc = ceph_broker.process_requests(reqs) + self.assertEqual(json.loads(rc)['exit-code'], 1) + self.assertEqual(json.loads(rc)['stderr'], + "Missing or invalid api version (0)") + self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') From 01dd12306398e66799882b8cbc03c933ba1ecc42 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 25 Aug 2015 15:43:06 +0100 Subject: [PATCH 0819/2699] Fix lint and add unit_test updates --- ceph-mon/hooks/hooks.py | 2 +- ceph-mon/unit_tests/test_ceph_broker.py | 27 +++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index ed36409f..da978e66 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -319,7 +319,7 @@ def client_relation_changed(): log("Not leader - ignoring broker request", level=DEBUG) else: rsp = process_requests(settings['broker_req']) - unit_id = remote_unit().replace('/', '-') + unit_id = remote_unit().replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id data = { 'unit-targeted-reponses': True, diff --git a/ceph-mon/unit_tests/test_ceph_broker.py b/ceph-mon/unit_tests/test_ceph_broker.py index 0176d119..fc698174 100644 --- a/ceph-mon/unit_tests/test_ceph_broker.py +++ b/ceph-mon/unit_tests/test_ceph_broker.py @@ -70,3 +70,30 @@ def test_process_requests_create_pool_exists(self, mock_log, mock_pool_exists.assert_called_with(service='admin', name='foo') self.assertFalse(mock_create_pool.called) self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.pool_exists') + @mock.patch('ceph_broker.log') + def test_process_requests_create_pool_rid(self, mock_log, mock_pool_exists, + mock_create_pool): + mock_pool_exists.return_value = False + reqs = json.dumps({'api-version': 1, + 'request-id': '1ef5aede', + 'ops': [{'op': 'create-pool', 'name': + 'foo', 'replicas': 3}]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_called_with(service='admin', name='foo') + mock_create_pool.assert_called_with(service='admin', name='foo', + replicas=3) + self.assertEqual(json.loads(rc)['exit-code'], 0) + self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') + + @mock.patch('ceph_broker.log') + def test_process_requests_invalid_api_rid(self, mock_log): + reqs = json.dumps({'api-version': 0, 'request-id': '1ef5aede', + 'ops': [{'op': 'create-pool'}]}) + rc = ceph_broker.process_requests(reqs) + self.assertEqual(json.loads(rc)['exit-code'], 1) + self.assertEqual(json.loads(rc)['stderr'], + "Missing or invalid api version (0)") + self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') From 31485af526eadff9179dbe48f1353499e0f55e0e Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 3 Sep 2015 10:42:00 +0100 Subject: [PATCH 0820/2699] [gnuoy,trivial] Charmhelper sync (+1'd by mojo) --- .../hooks/charmhelpers/contrib/network/ip.py | 6 +++++- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 18 ++++++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index fff6d5ca..67b4dccc 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -435,8 +435,12 @@ def get_hostname(address, fqdn=True): rev = dns.reversename.from_address(address) result = ns_query(rev) + if not result: - return None + try: + result = socket.gethostbyaddr(address)[0] + except: + return None else: result = address diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index a35d006b..ab53a780 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -767,21 +767,23 @@ def status_set(workload_state, message): def status_get(): - """Retrieve the previously set juju workload state + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" - If the status-set command is not found then assume this is juju < 1.23 and - return 'unknown' """ - cmd = ['status-get'] + cmd = ['status-get', "--format=json", "--include-data"] try: - raw_status = subprocess.check_output(cmd, universal_newlines=True) - status = raw_status.rstrip() - return status + raw_status = subprocess.check_output(cmd) except OSError as e: if e.errno == errno.ENOENT: - return 'unknown' + return ('unknown', "") else: raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) def translate_exc(from_exc, to_exc): From e27b15c59a5c8012421e5ed395712dd9e38af296 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 3 Sep 2015 10:42:00 +0100 Subject: [PATCH 0821/2699] [gnuoy,trivial] Charmhelper sync (+1'd by mojo) --- .../hooks/charmhelpers/contrib/network/ip.py | 6 +++++- ceph-mon/hooks/charmhelpers/core/hookenv.py | 18 ++++++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index fff6d5ca..67b4dccc 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -435,8 +435,12 @@ def get_hostname(address, fqdn=True): rev = dns.reversename.from_address(address) result = ns_query(rev) + if not result: - return None + try: + result = socket.gethostbyaddr(address)[0] + except: + return None else: result = address diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index a35d006b..ab53a780 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -767,21 +767,23 @@ def status_set(workload_state, message): def status_get(): - """Retrieve the previously set juju workload state + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" - If the status-set command is not found then assume this is juju < 1.23 and - return 'unknown' """ - cmd = ['status-get'] + cmd = ['status-get', "--format=json", "--include-data"] try: - raw_status = subprocess.check_output(cmd, universal_newlines=True) - status = raw_status.rstrip() - return status + raw_status = subprocess.check_output(cmd) except OSError as e: if e.errno == errno.ENOENT: - return 'unknown' + return ('unknown', "") else: raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) def translate_exc(from_exc, to_exc): From a9517cff38ac19d5de49bcaac1a629b703015fd9 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 3 Sep 2015 10:42:18 +0100 Subject: [PATCH 0822/2699] [gnuoy,trivial] Charmhelper sync (+1'd by mojo) --- .../hooks/charmhelpers/contrib/network/ip.py | 6 +++++- ceph-osd/hooks/charmhelpers/core/hookenv.py | 18 ++++++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index fff6d5ca..67b4dccc 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -435,8 +435,12 @@ def get_hostname(address, fqdn=True): rev = dns.reversename.from_address(address) result = ns_query(rev) + if not result: - return None + try: + result = socket.gethostbyaddr(address)[0] + except: + return None else: result = address diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index a35d006b..ab53a780 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -767,21 +767,23 @@ def status_set(workload_state, message): def status_get(): - """Retrieve the previously set juju workload state + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" - If the status-set command is not found then assume this is juju < 1.23 and - return 'unknown' """ - cmd = ['status-get'] + cmd = ['status-get', "--format=json", "--include-data"] try: - raw_status = subprocess.check_output(cmd, universal_newlines=True) - status = raw_status.rstrip() - return status + raw_status = subprocess.check_output(cmd) except OSError as e: if e.errno == errno.ENOENT: - return 'unknown' + return ('unknown', "") else: raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) def translate_exc(from_exc, to_exc): From b732c20a617eb6c12e4cb1838576cd7e5c8119a7 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 4 Sep 2015 11:28:52 +0100 Subject: [PATCH 0823/2699] Remove unit-targeted-reponses as the presence of a request-id in broker_rsp achieves the same thing --- ceph-proxy/hooks/hooks.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index da978e66..9d0db776 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -322,7 +322,6 @@ def client_relation_changed(): unit_id = remote_unit().replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id data = { - 'unit-targeted-reponses': True, 'broker_rsp': rsp, unit_response_key: rsp, } From 8426eb7fa307295709f27147696151b20cb784f8 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 4 Sep 2015 11:28:52 +0100 Subject: [PATCH 0824/2699] Remove unit-targeted-reponses as the presence of a request-id in broker_rsp achieves the same thing --- ceph-mon/hooks/hooks.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index da978e66..9d0db776 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -322,7 +322,6 @@ def client_relation_changed(): unit_id = remote_unit().replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id data = { - 'unit-targeted-reponses': True, 'broker_rsp': rsp, unit_response_key: rsp, } From cd240076bbc5132ef3c0e4a1d01a4449f753e930 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 4 Sep 2015 11:33:49 +0100 Subject: [PATCH 0825/2699] Fix lint --- ceph-proxy/hooks/ceph_broker.py | 4 +++- ceph-proxy/hooks/hooks.py | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index de75518e..e162dcb2 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -35,10 +35,11 @@ def process_requests(reqs): try: version = reqs.get('api-version') if version == 1: - log('Processing request {}'.format(request_id)) + log('Processing request {}'.format(request_id), level=DEBUG) resp = process_requests_v1(reqs['ops']) if request_id: resp['request-id'] = request_id + return resp except Exception as exc: @@ -52,6 +53,7 @@ def process_requests(reqs): resp = {'exit-code': 1, 'stderr': msg} if request_id: resp['request-id'] = request_id + return resp diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 9d0db776..bc11596a 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -321,6 +321,8 @@ def client_relation_changed(): rsp = process_requests(settings['broker_req']) unit_id = remote_unit().replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id + # broker_rsp is being left for backward compatibility, + # unit_response_key superscedes it data = { 'broker_rsp': rsp, unit_response_key: rsp, From 6365f6083935f70fa4291b915d9586802242c238 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 4 Sep 2015 11:33:49 +0100 Subject: [PATCH 0826/2699] Fix lint --- ceph-mon/hooks/ceph_broker.py | 4 +++- ceph-mon/hooks/hooks.py | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index de75518e..e162dcb2 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -35,10 +35,11 @@ def process_requests(reqs): try: version = reqs.get('api-version') if version == 1: - log('Processing request {}'.format(request_id)) + log('Processing request {}'.format(request_id), level=DEBUG) resp = process_requests_v1(reqs['ops']) if request_id: resp['request-id'] = request_id + return resp except Exception as exc: @@ -52,6 +53,7 @@ def process_requests(reqs): resp = {'exit-code': 1, 'stderr': msg} if request_id: resp['request-id'] = request_id + return resp diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 9d0db776..bc11596a 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -321,6 +321,8 @@ def client_relation_changed(): rsp = process_requests(settings['broker_req']) unit_id = remote_unit().replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id + # broker_rsp is being left for backward compatibility, + # unit_response_key superscedes it data = { 'broker_rsp': rsp, unit_response_key: rsp, From 140bfc892ac72fec9f5931aad0d879b35ce298b1 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 4 Sep 2015 11:34:36 +0100 Subject: [PATCH 0827/2699] charmhelper sync --- .../contrib/storage/linux/ceph.py | 144 +++++++++++++----- 1 file changed, 102 insertions(+), 42 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index db8459f0..5f7135b0 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -414,10 +414,16 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ - def __init__(self, api_version=1): + def __init__(self, api_version=1, ops=None, request_id=None): self.api_version = api_version - self.request_id = str(uuid.uuid1()) - self.ops = [] + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) + if ops: + self.ops = ops + else: + self.ops = [] def add_op_create_pool(self, name, replica_count=3): self.ops.append({'op': 'create-pool', 'name': name, @@ -428,6 +434,28 @@ def request(self): return json.dumps({'api-version': self.api_version, 'ops': self.ops, 'request-id': self.request_id}) + def _ops_equal(self, other): + if len(self.ops) == len(other.ops): + for req_no in range(0, len(self.ops)): + for key in ['replicas', 'name', 'op']: + if self.ops[req_no][key] != other.ops[req_no][key]: + return False + else: + return False + return True + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + if self.api_version == other.api_version and \ + self._ops_equal(other): + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + class CephBrokerRsp(object): """Ceph broker response. @@ -454,17 +482,42 @@ def exit_msg(self): return self.rsp.get('stderr') -def request_states(request_needed): - """Return dict showing if a request has been sent and completed per rid""" +def get_previous_request(rid): + """Return the last ceph broker request sent on a given relation + + @param rid: Relation id to query for request + """ + request = None + broker_req = relation_get(attribute='broker_req', rid=rid, + unit=local_unit()) + if broker_req: + request_data = json.loads(broker_req) + request = CephBrokerRq(api_version=request_data['api-version'], + ops=request_data['ops'], + request_id=request_data['request-id']) + return request + + +def get_request_states(request): + """Return a dict of requests per relation id with their corresponding + completion state. + + This allows a charm, which has a request for ceph, to see whether there is + an equivalent request already being processed and if so what state that + request is in. + + @param request: A CephBrokerRq object + """ complete = [] requests = {} for rid in relation_ids('ceph'): complete = False - previous_request = relation_get(attribute='broker_req', rid=rid, unit=local_unit()) - sent = equivalent_broker_requests(previous_request, request_needed.request) - if sent: - complete = broker_request_completed(previous_request, rid) + previous_request = get_previous_request(rid) + if request == previous_request: + sent = True + complete = is_broker_request_complete(previous_request, rid) else: + sent = False complete = False requests[rid] = { 'sent': sent, @@ -473,48 +526,47 @@ def request_states(request_needed): return requests -def request_sent(request_needed): - """Check to see if a matching request has been sent""" - states = request_states(request_needed) +def is_request_sent(request): + """Check to see if a functionally equivalent request has already been sent + + Returns True if a similair request has been sent + + @param request: A CephBrokerRq object + """ + states = get_request_states(request) for rid in states.keys(): if not states[rid]['sent']: return False return True -def request_complete(request_needed): - """Check to see if a matching request has been completed""" - states = request_states(request_needed) +def is_request_complete(request): + """Check to see if a functionally equivalent request has already been + completed + + Returns True if a similair request has been completed + + @param request: A CephBrokerRq object + """ + states = get_request_states(request) for rid in states.keys(): if not states[rid]['complete']: return False return True -def equivalent_broker_requests(encoded_req1, encoded_req2): - """Check to see if two requests are equivalent (ignore request id)""" - if not encoded_req1 or not encoded_req2: - return False - req1 = json.loads(encoded_req1) - req2 = json.loads(encoded_req2) - if len(req1['ops']) != len(req2['ops']): - return False - for req_no in range(0, len(req1['ops'])): - for key in ['replicas', 'name', 'op']: - if req1['ops'][req_no][key] != req2['ops'][req_no][key]: - return False - return True - +def is_broker_request_complete(request, rid): + """Check if a given request has been completed on the given relation -def broker_request_completed(encoded_req, rid): - """Check if a given request has been completed on the given relation""" - req = json.loads(encoded_req) + @param request: A CephBrokerRq object + @param rid: Relation ID + """ broker_key = get_broker_rsp_key() for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) if rdata.get(broker_key): rsp = CephBrokerRsp(rdata.get(broker_key)) - if rsp.request_id == req.get('request-id'): + if rsp.request_id == request.request_id: if not rsp.exit_code: return True else: @@ -524,10 +576,10 @@ def broker_request_completed(encoded_req, rid): if rdata.get('broker_rsp'): if rdata.get('unit-targeted-reponses'): log('Ignoring legacy broker_rsp without unit key as remote ' - 'service supports unit specific replies') + 'service supports unit specific replies', level=DEBUG) else: log('Using legacy broker_rsp as remote service does not ' - 'supports unit specific replies') + 'supports unit specific replies', level=DEBUG) rsp = CephBrokerRsp(rdata['broker_rsp']) if not rsp.exit_code: return True @@ -535,15 +587,23 @@ def broker_request_completed(encoded_req, rid): def get_broker_rsp_key(): - """Return broker request key for this unit""" + """Return broker response key for this unit + + This is the key that ceph is going to use to pass request status + information back to this unit + """ return 'broker-rsp-' + local_unit().replace('/', '-') -def send_request_if_needed(rq): - """Send broker request if one has not already been sent""" - if request_sent(rq): - log('Request already sent but not complete, not sending new request') +def send_request_if_needed(request): + """Send broker request if an equivalent request has not already been sent + + @param request: A CephBrokerRq object + """ + if is_request_sent(request): + log('Request already sent but not complete, not sending new request', + level=DEBUG) else: for rid in relation_ids('ceph'): - log('Sending request {}'.format(rq.request_id)) - relation_set(relation_id=rid, broker_req=rq.request) + log('Sending request {}'.format(request.request_id), level=DEBUG) + relation_set(relation_id=rid, broker_req=request.request) From 0a03044ac8f7821aa5e9902ac70879b167242a9b Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 4 Sep 2015 11:34:36 +0100 Subject: [PATCH 0828/2699] charmhelper sync --- .../contrib/storage/linux/ceph.py | 144 +++++++++++++----- 1 file changed, 102 insertions(+), 42 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index db8459f0..5f7135b0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -414,10 +414,16 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ - def __init__(self, api_version=1): + def __init__(self, api_version=1, ops=None, request_id=None): self.api_version = api_version - self.request_id = str(uuid.uuid1()) - self.ops = [] + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) + if ops: + self.ops = ops + else: + self.ops = [] def add_op_create_pool(self, name, replica_count=3): self.ops.append({'op': 'create-pool', 'name': name, @@ -428,6 +434,28 @@ def request(self): return json.dumps({'api-version': self.api_version, 'ops': self.ops, 'request-id': self.request_id}) + def _ops_equal(self, other): + if len(self.ops) == len(other.ops): + for req_no in range(0, len(self.ops)): + for key in ['replicas', 'name', 'op']: + if self.ops[req_no][key] != other.ops[req_no][key]: + return False + else: + return False + return True + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + if self.api_version == other.api_version and \ + self._ops_equal(other): + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + class CephBrokerRsp(object): """Ceph broker response. @@ -454,17 +482,42 @@ def exit_msg(self): return self.rsp.get('stderr') -def request_states(request_needed): - """Return dict showing if a request has been sent and completed per rid""" +def get_previous_request(rid): + """Return the last ceph broker request sent on a given relation + + @param rid: Relation id to query for request + """ + request = None + broker_req = relation_get(attribute='broker_req', rid=rid, + unit=local_unit()) + if broker_req: + request_data = json.loads(broker_req) + request = CephBrokerRq(api_version=request_data['api-version'], + ops=request_data['ops'], + request_id=request_data['request-id']) + return request + + +def get_request_states(request): + """Return a dict of requests per relation id with their corresponding + completion state. + + This allows a charm, which has a request for ceph, to see whether there is + an equivalent request already being processed and if so what state that + request is in. + + @param request: A CephBrokerRq object + """ complete = [] requests = {} for rid in relation_ids('ceph'): complete = False - previous_request = relation_get(attribute='broker_req', rid=rid, unit=local_unit()) - sent = equivalent_broker_requests(previous_request, request_needed.request) - if sent: - complete = broker_request_completed(previous_request, rid) + previous_request = get_previous_request(rid) + if request == previous_request: + sent = True + complete = is_broker_request_complete(previous_request, rid) else: + sent = False complete = False requests[rid] = { 'sent': sent, @@ -473,48 +526,47 @@ def request_states(request_needed): return requests -def request_sent(request_needed): - """Check to see if a matching request has been sent""" - states = request_states(request_needed) +def is_request_sent(request): + """Check to see if a functionally equivalent request has already been sent + + Returns True if a similair request has been sent + + @param request: A CephBrokerRq object + """ + states = get_request_states(request) for rid in states.keys(): if not states[rid]['sent']: return False return True -def request_complete(request_needed): - """Check to see if a matching request has been completed""" - states = request_states(request_needed) +def is_request_complete(request): + """Check to see if a functionally equivalent request has already been + completed + + Returns True if a similair request has been completed + + @param request: A CephBrokerRq object + """ + states = get_request_states(request) for rid in states.keys(): if not states[rid]['complete']: return False return True -def equivalent_broker_requests(encoded_req1, encoded_req2): - """Check to see if two requests are equivalent (ignore request id)""" - if not encoded_req1 or not encoded_req2: - return False - req1 = json.loads(encoded_req1) - req2 = json.loads(encoded_req2) - if len(req1['ops']) != len(req2['ops']): - return False - for req_no in range(0, len(req1['ops'])): - for key in ['replicas', 'name', 'op']: - if req1['ops'][req_no][key] != req2['ops'][req_no][key]: - return False - return True - +def is_broker_request_complete(request, rid): + """Check if a given request has been completed on the given relation -def broker_request_completed(encoded_req, rid): - """Check if a given request has been completed on the given relation""" - req = json.loads(encoded_req) + @param request: A CephBrokerRq object + @param rid: Relation ID + """ broker_key = get_broker_rsp_key() for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) if rdata.get(broker_key): rsp = CephBrokerRsp(rdata.get(broker_key)) - if rsp.request_id == req.get('request-id'): + if rsp.request_id == request.request_id: if not rsp.exit_code: return True else: @@ -524,10 +576,10 @@ def broker_request_completed(encoded_req, rid): if rdata.get('broker_rsp'): if rdata.get('unit-targeted-reponses'): log('Ignoring legacy broker_rsp without unit key as remote ' - 'service supports unit specific replies') + 'service supports unit specific replies', level=DEBUG) else: log('Using legacy broker_rsp as remote service does not ' - 'supports unit specific replies') + 'supports unit specific replies', level=DEBUG) rsp = CephBrokerRsp(rdata['broker_rsp']) if not rsp.exit_code: return True @@ -535,15 +587,23 @@ def broker_request_completed(encoded_req, rid): def get_broker_rsp_key(): - """Return broker request key for this unit""" + """Return broker response key for this unit + + This is the key that ceph is going to use to pass request status + information back to this unit + """ return 'broker-rsp-' + local_unit().replace('/', '-') -def send_request_if_needed(rq): - """Send broker request if one has not already been sent""" - if request_sent(rq): - log('Request already sent but not complete, not sending new request') +def send_request_if_needed(request): + """Send broker request if an equivalent request has not already been sent + + @param request: A CephBrokerRq object + """ + if is_request_sent(request): + log('Request already sent but not complete, not sending new request', + level=DEBUG) else: for rid in relation_ids('ceph'): - log('Sending request {}'.format(rq.request_id)) - relation_set(relation_id=rid, broker_req=rq.request) + log('Sending request {}'.format(request.request_id), level=DEBUG) + relation_set(relation_id=rid, broker_req=request.request) From 4788886ca336d0b14dd988ae7df6635e57673317 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 4 Sep 2015 11:42:20 +0100 Subject: [PATCH 0829/2699] Charmhelper sync --- ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 5f7135b0..0caaabe3 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -574,7 +574,8 @@ def is_broker_request_complete(request, rid): # remote ceph cluster does not support unit targeted replies or it # has not processed our request yet. if rdata.get('broker_rsp'): - if rdata.get('unit-targeted-reponses'): + request_data = json.loads(rdata['broker_rsp']) + if request_data.get('request-id'): log('Ignoring legacy broker_rsp without unit key as remote ' 'service supports unit specific replies', level=DEBUG) else: From 432b38d79b24338c6014b351e565f7c4e55eb290 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 4 Sep 2015 11:42:20 +0100 Subject: [PATCH 0830/2699] Charmhelper sync --- ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 5f7135b0..0caaabe3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -574,7 +574,8 @@ def is_broker_request_complete(request, rid): # remote ceph cluster does not support unit targeted replies or it # has not processed our request yet. if rdata.get('broker_rsp'): - if rdata.get('unit-targeted-reponses'): + request_data = json.loads(rdata['broker_rsp']) + if request_data.get('request-id'): log('Ignoring legacy broker_rsp without unit key as remote ' 'service supports unit specific replies', level=DEBUG) else: From 86a026c2bc08d67c86d611e3aa32f618ee5b28c2 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 7 Sep 2015 09:23:57 +0100 Subject: [PATCH 0831/2699] Charm helper sync --- ceph-proxy/charm-helpers-hooks.yaml | 2 +- .../contrib/storage/linux/ceph.py | 62 +++++++++++++++++-- 2 files changed, 57 insertions(+), 7 deletions(-) diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index d4035311..eeee6f8c 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:~gnuoy/charm-helpers/cepg-broker +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 0caaabe3..c5aa2dde 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -414,21 +414,26 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ - def __init__(self, api_version=1, ops=None, request_id=None): + def __init__(self, api_version=1, request_id=None): self.api_version = api_version if request_id: self.request_id = request_id else: self.request_id = str(uuid.uuid1()) - if ops: - self.ops = ops - else: - self.ops = [] + self.ops = [] def add_op_create_pool(self, name, replica_count=3): self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count}) + def set_ops(self, ops): + """Set request ops to provided value. + + Useful for injecting ops that come from a previous request + to allow comparisons to ensure validity. + """ + self.ops = ops + @property def request(self): return json.dumps({'api-version': self.api_version, 'ops': self.ops, @@ -482,6 +487,51 @@ def exit_msg(self): return self.rsp.get('stderr') +# Ceph Broker Conversation: +# If a charm needs an action to be taken by ceph it can create a CephBrokerRq +# and send that request to ceph via the ceph relation. The CephBrokerRq has a +# unique id so that the client can identity which CephBrokerRsp is associated +# with the request. Ceph will also respond to each client unit individually +# creating a response key per client unit eg glance/0 will get a CephBrokerRsp +# via key broker-rsp-glance-0 +# +# To use this the charm can just do something like: +# +# from charmhelpers.contrib.storage.linux.ceph import ( +# send_request_if_needed, +# is_request_complete, +# CephBrokerRq, +# ) +# +# @hooks.hook('ceph-relation-changed') +# def ceph_changed(): +# rq = CephBrokerRq() +# rq.add_op_create_pool(name='poolname', replica_count=3) +# +# if is_request_complete(rq): +# +# else: +# send_request_if_needed(get_ceph_request()) +# +# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example +# of glance having sent a request to ceph which ceph has successfully processed +# 'ceph:8': { +# 'ceph/0': { +# 'auth': 'cephx', +# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', +# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', +# 'ceph-public-address': '10.5.44.103', +# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', +# 'private-address': '10.5.44.103', +# }, +# 'glance/0': { +# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' +# '"ops": [{"replicas": 3, "name": "glance", ' +# '"op": "create-pool"}]}'), +# 'private-address': '10.5.44.109', +# }, +# } + def get_previous_request(rid): """Return the last ceph broker request sent on a given relation @@ -493,8 +543,8 @@ def get_previous_request(rid): if broker_req: request_data = json.loads(broker_req) request = CephBrokerRq(api_version=request_data['api-version'], - ops=request_data['ops'], request_id=request_data['request-id']) + request.set_ops(request_data['ops']) return request From c99b2b0cc154ff7686d28eb0c07b7f90ef6e9bdc Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 7 Sep 2015 09:23:57 +0100 Subject: [PATCH 0832/2699] Charm helper sync --- ceph-mon/charm-helpers-hooks.yaml | 2 +- .../contrib/storage/linux/ceph.py | 62 +++++++++++++++++-- 2 files changed, 57 insertions(+), 7 deletions(-) diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index d4035311..eeee6f8c 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:~gnuoy/charm-helpers/cepg-broker +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 0caaabe3..c5aa2dde 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -414,21 +414,26 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ - def __init__(self, api_version=1, ops=None, request_id=None): + def __init__(self, api_version=1, request_id=None): self.api_version = api_version if request_id: self.request_id = request_id else: self.request_id = str(uuid.uuid1()) - if ops: - self.ops = ops - else: - self.ops = [] + self.ops = [] def add_op_create_pool(self, name, replica_count=3): self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count}) + def set_ops(self, ops): + """Set request ops to provided value. + + Useful for injecting ops that come from a previous request + to allow comparisons to ensure validity. + """ + self.ops = ops + @property def request(self): return json.dumps({'api-version': self.api_version, 'ops': self.ops, @@ -482,6 +487,51 @@ def exit_msg(self): return self.rsp.get('stderr') +# Ceph Broker Conversation: +# If a charm needs an action to be taken by ceph it can create a CephBrokerRq +# and send that request to ceph via the ceph relation. The CephBrokerRq has a +# unique id so that the client can identity which CephBrokerRsp is associated +# with the request. Ceph will also respond to each client unit individually +# creating a response key per client unit eg glance/0 will get a CephBrokerRsp +# via key broker-rsp-glance-0 +# +# To use this the charm can just do something like: +# +# from charmhelpers.contrib.storage.linux.ceph import ( +# send_request_if_needed, +# is_request_complete, +# CephBrokerRq, +# ) +# +# @hooks.hook('ceph-relation-changed') +# def ceph_changed(): +# rq = CephBrokerRq() +# rq.add_op_create_pool(name='poolname', replica_count=3) +# +# if is_request_complete(rq): +# +# else: +# send_request_if_needed(get_ceph_request()) +# +# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example +# of glance having sent a request to ceph which ceph has successfully processed +# 'ceph:8': { +# 'ceph/0': { +# 'auth': 'cephx', +# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', +# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', +# 'ceph-public-address': '10.5.44.103', +# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', +# 'private-address': '10.5.44.103', +# }, +# 'glance/0': { +# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' +# '"ops": [{"replicas": 3, "name": "glance", ' +# '"op": "create-pool"}]}'), +# 'private-address': '10.5.44.109', +# }, +# } + def get_previous_request(rid): """Return the last ceph broker request sent on a given relation @@ -493,8 +543,8 @@ def get_previous_request(rid): if broker_req: request_data = json.loads(broker_req) request = CephBrokerRq(api_version=request_data['api-version'], - ops=request_data['ops'], request_id=request_data['request-id']) + request.set_ops(request_data['ops']) return request From 4cc64440206c8e1c5d2e8b06fc14828697001fa8 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 10 Sep 2015 10:29:50 +0100 Subject: [PATCH 0833/2699] Charm helper sync --- .../contrib/storage/linux/ceph.py | 10 +- .../charmhelpers/contrib/amulet/utils.py | 284 +++++++++++--- .../contrib/openstack/amulet/deployment.py | 25 +- .../contrib/openstack/amulet/utils.py | 359 ++++++++++++++++++ 4 files changed, 620 insertions(+), 58 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index c5aa2dde..b4fda124 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -545,6 +545,7 @@ def get_previous_request(rid): request = CephBrokerRq(api_version=request_data['api-version'], request_id=request_data['request-id']) request.set_ops(request_data['ops']) + return request @@ -565,14 +566,16 @@ def get_request_states(request): previous_request = get_previous_request(rid) if request == previous_request: sent = True - complete = is_broker_request_complete(previous_request, rid) + complete = is_request_complete_for_rid(previous_request, rid) else: sent = False complete = False + requests[rid] = { 'sent': sent, 'complete': complete, } + return requests @@ -587,6 +590,7 @@ def is_request_sent(request): for rid in states.keys(): if not states[rid]['sent']: return False + return True @@ -602,10 +606,11 @@ def is_request_complete(request): for rid in states.keys(): if not states[rid]['complete']: return False + return True -def is_broker_request_complete(request, rid): +def is_request_complete_for_rid(request, rid): """Check if a given request has been completed on the given relation @param request: A CephBrokerRq object @@ -634,6 +639,7 @@ def is_broker_request_complete(request, rid): rsp = CephBrokerRsp(rdata['broker_rsp']) if not rsp.exit_code: return True + return False diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index 7816c934..6770f26b 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -19,9 +19,11 @@ import logging import os import re +import socket import subprocess import sys import time +import uuid import amulet import distro_info @@ -114,7 +116,7 @@ def validate_services(self, commands): # /!\ DEPRECATION WARNING (beisner): # New and existing tests should be rewritten to use # validate_services_by_name() as it is aware of init systems. - self.log.warn('/!\\ DEPRECATION WARNING: use ' + self.log.warn('DEPRECATION WARNING: use ' 'validate_services_by_name instead of validate_services ' 'due to init system differences.') @@ -269,33 +271,52 @@ def _get_dir_mtime(self, sentry_unit, directory): """Get last modification time of directory.""" return sentry_unit.directory_stat(directory)['mtime'] - def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): - """Get process' start time. + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): + """Get start time of a process based on the last modification time + of the /proc/pid directory. - Determine start time of the process based on the last modification - time of the /proc/pid directory. If pgrep_full is True, the process - name is matched against the full command line. - """ - if pgrep_full: - cmd = 'pgrep -o -f {}'.format(service) - else: - cmd = 'pgrep -o {}'.format(service) - cmd = cmd + ' | grep -v pgrep || exit 0' - cmd_out = sentry_unit.run(cmd) - self.log.debug('CMDout: ' + str(cmd_out)) - if cmd_out[0]: - self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) - proc_dir = '/proc/{}'.format(cmd_out[0].strip()) - return self._get_dir_mtime(sentry_unit, proc_dir) + :sentry_unit: The sentry unit to check for the service on + :service: service name to look for in process table + :pgrep_full: [Deprecated] Use full command line search mode with pgrep + :returns: epoch time of service process start + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + if pgrep_full is not None: + # /!\ DEPRECATION WARNING (beisner): + # No longer implemented, as pidof is now used instead of pgrep. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' + 'longer implemented re: lp 1474030.') + + pid_list = self.get_process_id_list(sentry_unit, service) + pid = pid_list[0] + proc_dir = '/proc/{}'.format(pid) + self.log.debug('Pid for {} on {}: {}'.format( + service, sentry_unit.info['unit_name'], pid)) + + return self._get_dir_mtime(sentry_unit, proc_dir) def service_restarted(self, sentry_unit, service, filename, - pgrep_full=False, sleep_time=20): + pgrep_full=None, sleep_time=20): """Check if service was restarted. Compare a service's start time vs a file's last modification time (such as a config file for that service) to determine if the service has been restarted. """ + # /!\ DEPRECATION WARNING (beisner): + # This method is prone to races in that no before-time is known. + # Use validate_service_config_changed instead. + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + self.log.warn('DEPRECATION WARNING: use ' + 'validate_service_config_changed instead of ' + 'service_restarted due to known races.') + time.sleep(sleep_time) if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= self._get_file_mtime(sentry_unit, filename)): @@ -304,15 +325,15 @@ def service_restarted(self, sentry_unit, service, filename, return False def service_restarted_since(self, sentry_unit, mtime, service, - pgrep_full=False, sleep_time=20, - retry_count=2): + pgrep_full=None, sleep_time=20, + retry_count=2, retry_sleep_time=30): """Check if service was been started after a given time. Args: sentry_unit (sentry): The sentry unit to check for the service on mtime (float): The epoch time to check against service (string): service name to look for in process table - pgrep_full (boolean): Use full command line search mode with pgrep + pgrep_full: [Deprecated] Use full command line search mode with pgrep sleep_time (int): Seconds to sleep before looking for process retry_count (int): If service is not found, how many times to retry @@ -321,30 +342,44 @@ def service_restarted_since(self, sentry_unit, mtime, service, False if service is older than mtime or if service was not found. """ - self.log.debug('Checking %s restarted since %s' % (service, mtime)) + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s service restarted since %s on ' + '%s' % (service, mtime, unit_name)) time.sleep(sleep_time) - proc_start_time = self._get_proc_start_time(sentry_unit, service, - pgrep_full) - while retry_count > 0 and not proc_start_time: - self.log.debug('No pid file found for service %s, will retry %i ' - 'more times' % (service, retry_count)) - time.sleep(30) - proc_start_time = self._get_proc_start_time(sentry_unit, service, - pgrep_full) - retry_count = retry_count - 1 + proc_start_time = None + tries = 0 + while tries <= retry_count and not proc_start_time: + try: + proc_start_time = self._get_proc_start_time(sentry_unit, + service, + pgrep_full) + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'OK'.format(tries, service, unit_name)) + except IOError: + # NOTE(beisner) - race avoidance, proc may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'failed'.format(tries, service, unit_name)) + time.sleep(retry_sleep_time) + tries += 1 if not proc_start_time: self.log.warn('No proc start time found, assuming service did ' 'not start') return False if proc_start_time >= mtime: - self.log.debug('proc start time is newer than provided mtime' - '(%s >= %s)' % (proc_start_time, mtime)) + self.log.debug('Proc start time is newer than provided mtime' + '(%s >= %s) on %s (OK)' % (proc_start_time, + mtime, unit_name)) return True else: - self.log.warn('proc start time (%s) is older than provided mtime ' - '(%s), service did not restart' % (proc_start_time, - mtime)) + self.log.warn('Proc start time (%s) is older than provided mtime ' + '(%s) on %s, service did not ' + 'restart' % (proc_start_time, mtime, unit_name)) return False def config_updated_since(self, sentry_unit, filename, mtime, @@ -374,8 +409,9 @@ def config_updated_since(self, sentry_unit, filename, mtime, return False def validate_service_config_changed(self, sentry_unit, mtime, service, - filename, pgrep_full=False, - sleep_time=20, retry_count=2): + filename, pgrep_full=None, + sleep_time=20, retry_count=2, + retry_sleep_time=30): """Check service and file were updated after mtime Args: @@ -383,9 +419,10 @@ def validate_service_config_changed(self, sentry_unit, mtime, service, mtime (float): The epoch time to check against service (string): service name to look for in process table filename (string): The file to check mtime of - pgrep_full (boolean): Use full command line search mode with pgrep - sleep_time (int): Seconds to sleep before looking for process + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep in seconds to pass to test helpers retry_count (int): If service is not found, how many times to retry + retry_sleep_time (int): Time in seconds to wait between retries Typical Usage: u = OpenStackAmuletUtils(ERROR) @@ -402,15 +439,25 @@ def validate_service_config_changed(self, sentry_unit, mtime, service, mtime, False if service is older than mtime or if service was not found or if filename was modified before mtime. """ - self.log.debug('Checking %s restarted since %s' % (service, mtime)) - time.sleep(sleep_time) - service_restart = self.service_restarted_since(sentry_unit, mtime, - service, - pgrep_full=pgrep_full, - sleep_time=0, - retry_count=retry_count) - config_update = self.config_updated_since(sentry_unit, filename, mtime, - sleep_time=0) + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + service_restart = self.service_restarted_since( + sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + config_update = self.config_updated_since( + sentry_unit, + filename, + mtime, + sleep_time=0) + return service_restart and config_update def get_sentry_time(self, sentry_unit): @@ -428,7 +475,6 @@ def get_ubuntu_releases(self): """Return a list of all Ubuntu releases in order of release.""" _d = distro_info.UbuntuDistroInfo() _release_list = _d.all - self.log.debug('Ubuntu release list: {}'.format(_release_list)) return _release_list def file_to_url(self, file_rel_path): @@ -568,6 +614,142 @@ def validate_list_of_identical_dicts(self, list_of_dicts): return None + def validate_sectionless_conf(self, file_contents, expected): + """A crude conf parser. Useful to inspect configuration files which + do not have section headers (as would be necessary in order to use + the configparser). Such as openstack-dashboard or rabbitmq confs.""" + for line in file_contents.split('\n'): + if '=' in line: + args = line.split('=') + if len(args) <= 1: + continue + key = args[0].strip() + value = args[1].strip() + if key in expected.keys(): + if expected[key] != value: + msg = ('Config mismatch. Expected, actual: {}, ' + '{}'.format(expected[key], value)) + amulet.raise_status(amulet.FAIL, msg=msg) + + def get_unit_hostnames(self, units): + """Return a dict of juju unit names to hostnames.""" + host_names = {} + for unit in units: + host_names[unit.info['unit_name']] = \ + str(unit.file_contents('/etc/hostname').strip()) + self.log.debug('Unit host names: {}'.format(host_names)) + return host_names + + def run_cmd_unit(self, sentry_unit, cmd): + """Run a command on a unit, return the output and exit code.""" + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` command returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + msg = ('{} `{}` command returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output), code + + def file_exists_on_unit(self, sentry_unit, file_name): + """Check if a file exists on a unit.""" + try: + sentry_unit.file_stat(file_name) + return True + except IOError: + return False + except Exception as e: + msg = 'Error checking file {}: {}'.format(file_name, e) + amulet.raise_status(amulet.FAIL, msg=msg) + + def file_contents_safe(self, sentry_unit, file_name, + max_wait=60, fatal=False): + """Get file contents from a sentry unit. Wrap amulet file_contents + with retry logic to address races where a file checks as existing, + but no longer exists by the time file_contents is called. + Return None if file not found. Optionally raise if fatal is True.""" + unit_name = sentry_unit.info['unit_name'] + file_contents = False + tries = 0 + while not file_contents and tries < (max_wait / 4): + try: + file_contents = sentry_unit.file_contents(file_name) + except IOError: + self.log.debug('Attempt {} to open file {} from {} ' + 'failed'.format(tries, file_name, + unit_name)) + time.sleep(4) + tries += 1 + + if file_contents: + return file_contents + elif not fatal: + return None + elif fatal: + msg = 'Failed to get file contents from unit.' + amulet.raise_status(amulet.FAIL, msg) + + def port_knock_tcp(self, host="localhost", port=22, timeout=15): + """Open a TCP socket to check for a listening sevice on a host. + + :param host: host name or IP address, default to localhost + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :returns: True if successful, False if connect failed + """ + + # Resolve host name if possible + try: + connect_host = socket.gethostbyname(host) + host_human = "{} ({})".format(connect_host, host) + except socket.error as e: + self.log.warn('Unable to resolve address: ' + '{} ({}) Trying anyway!'.format(host, e)) + connect_host = host + host_human = connect_host + + # Attempt socket connection + try: + knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + knock.settimeout(timeout) + knock.connect((connect_host, port)) + knock.close() + self.log.debug('Socket connect OK for host ' + '{} on port {}.'.format(host_human, port)) + return True + except socket.error as e: + self.log.debug('Socket connect FAIL for' + ' {} port {} ({})'.format(host_human, port, e)) + return False + + def port_knock_units(self, sentry_units, port=22, + timeout=15, expect_success=True): + """Open a TCP socket to check for a listening sevice on each + listed juju unit. + + :param sentry_units: list of sentry unit pointers + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :expect_success: True by default, set False to invert logic + :returns: None if successful, Failure message otherwise + """ + for unit in sentry_units: + host = unit.info['public-address'] + connected = self.port_knock_tcp(host, port, timeout) + if not connected and expect_success: + return 'Socket connect failed.' + elif connected and not expect_success: + return 'Socket connected unexpectedly.' + + def get_uuid_epoch_stamp(self): + """Returns a stamp string based on uuid4 and epoch time. Useful in + generating test messages which need to be unique-ish.""" + return '[{}-{}]'.format(uuid.uuid4(), time.time()) + +# amulet juju action helpers: def run_action(self, unit_sentry, action, _check_output=subprocess.check_output): """Run the named action on a given unit sentry. diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 07ee2ef1..63155d8d 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -44,8 +44,15 @@ def _determine_branch_locations(self, other_services): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" + + # Charms outside the lp:~openstack-charmers namespace base_charms = ['mysql', 'mongodb', 'nrpe'] + # Force these charms to current series even when using an older series. + # ie. Use trusty/nrpe even when series is precise, as the P charm + # does not possess the necessary external master config and hooks. + force_series_current = ['nrpe'] + if self.series in ['precise', 'trusty']: base_series = self.series else: @@ -53,11 +60,17 @@ def _determine_branch_locations(self, other_services): if self.stable: for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, svc['name']) else: for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + if svc['name'] in base_charms: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, @@ -77,21 +90,23 @@ def _add_services(self, this_service, other_services): services = other_services services.append(this_service) + + # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Most OpenStack subordinate charms do not expose an origin option - # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: - if svc['name'] not in use_source + ignore: + if svc['name'] not in use_source + no_origin: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source and svc['name'] not in ignore: + if svc['name'] in use_source and svc['name'] not in no_origin: config = {'source': self.source} self.d.configure(svc['name'], config) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index 03f79277..b1397419 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -27,6 +27,7 @@ import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import pika import swiftclient from charmhelpers.contrib.amulet.utils import ( @@ -602,3 +603,361 @@ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): self.log.debug('Ceph {} samples (OK): ' '{}'.format(sample_type, samples)) return None + +# rabbitmq/amqp specific helpers: + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not port and not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.server_properties['product'] == 'RabbitMQ' + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) From 3a512c38d31c872c5fc11ef10df75c055a09c2d8 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 10 Sep 2015 10:29:50 +0100 Subject: [PATCH 0834/2699] Charm helper sync --- .../contrib/storage/linux/ceph.py | 10 +- .../charmhelpers/contrib/amulet/utils.py | 284 +++++++++++--- .../contrib/openstack/amulet/deployment.py | 25 +- .../contrib/openstack/amulet/utils.py | 359 ++++++++++++++++++ 4 files changed, 620 insertions(+), 58 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index c5aa2dde..b4fda124 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -545,6 +545,7 @@ def get_previous_request(rid): request = CephBrokerRq(api_version=request_data['api-version'], request_id=request_data['request-id']) request.set_ops(request_data['ops']) + return request @@ -565,14 +566,16 @@ def get_request_states(request): previous_request = get_previous_request(rid) if request == previous_request: sent = True - complete = is_broker_request_complete(previous_request, rid) + complete = is_request_complete_for_rid(previous_request, rid) else: sent = False complete = False + requests[rid] = { 'sent': sent, 'complete': complete, } + return requests @@ -587,6 +590,7 @@ def is_request_sent(request): for rid in states.keys(): if not states[rid]['sent']: return False + return True @@ -602,10 +606,11 @@ def is_request_complete(request): for rid in states.keys(): if not states[rid]['complete']: return False + return True -def is_broker_request_complete(request, rid): +def is_request_complete_for_rid(request, rid): """Check if a given request has been completed on the given relation @param request: A CephBrokerRq object @@ -634,6 +639,7 @@ def is_broker_request_complete(request, rid): rsp = CephBrokerRsp(rdata['broker_rsp']) if not rsp.exit_code: return True + return False diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index 7816c934..6770f26b 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -19,9 +19,11 @@ import logging import os import re +import socket import subprocess import sys import time +import uuid import amulet import distro_info @@ -114,7 +116,7 @@ def validate_services(self, commands): # /!\ DEPRECATION WARNING (beisner): # New and existing tests should be rewritten to use # validate_services_by_name() as it is aware of init systems. - self.log.warn('/!\\ DEPRECATION WARNING: use ' + self.log.warn('DEPRECATION WARNING: use ' 'validate_services_by_name instead of validate_services ' 'due to init system differences.') @@ -269,33 +271,52 @@ def _get_dir_mtime(self, sentry_unit, directory): """Get last modification time of directory.""" return sentry_unit.directory_stat(directory)['mtime'] - def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): - """Get process' start time. + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): + """Get start time of a process based on the last modification time + of the /proc/pid directory. - Determine start time of the process based on the last modification - time of the /proc/pid directory. If pgrep_full is True, the process - name is matched against the full command line. - """ - if pgrep_full: - cmd = 'pgrep -o -f {}'.format(service) - else: - cmd = 'pgrep -o {}'.format(service) - cmd = cmd + ' | grep -v pgrep || exit 0' - cmd_out = sentry_unit.run(cmd) - self.log.debug('CMDout: ' + str(cmd_out)) - if cmd_out[0]: - self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) - proc_dir = '/proc/{}'.format(cmd_out[0].strip()) - return self._get_dir_mtime(sentry_unit, proc_dir) + :sentry_unit: The sentry unit to check for the service on + :service: service name to look for in process table + :pgrep_full: [Deprecated] Use full command line search mode with pgrep + :returns: epoch time of service process start + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + if pgrep_full is not None: + # /!\ DEPRECATION WARNING (beisner): + # No longer implemented, as pidof is now used instead of pgrep. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' + 'longer implemented re: lp 1474030.') + + pid_list = self.get_process_id_list(sentry_unit, service) + pid = pid_list[0] + proc_dir = '/proc/{}'.format(pid) + self.log.debug('Pid for {} on {}: {}'.format( + service, sentry_unit.info['unit_name'], pid)) + + return self._get_dir_mtime(sentry_unit, proc_dir) def service_restarted(self, sentry_unit, service, filename, - pgrep_full=False, sleep_time=20): + pgrep_full=None, sleep_time=20): """Check if service was restarted. Compare a service's start time vs a file's last modification time (such as a config file for that service) to determine if the service has been restarted. """ + # /!\ DEPRECATION WARNING (beisner): + # This method is prone to races in that no before-time is known. + # Use validate_service_config_changed instead. + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + self.log.warn('DEPRECATION WARNING: use ' + 'validate_service_config_changed instead of ' + 'service_restarted due to known races.') + time.sleep(sleep_time) if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= self._get_file_mtime(sentry_unit, filename)): @@ -304,15 +325,15 @@ def service_restarted(self, sentry_unit, service, filename, return False def service_restarted_since(self, sentry_unit, mtime, service, - pgrep_full=False, sleep_time=20, - retry_count=2): + pgrep_full=None, sleep_time=20, + retry_count=2, retry_sleep_time=30): """Check if service was been started after a given time. Args: sentry_unit (sentry): The sentry unit to check for the service on mtime (float): The epoch time to check against service (string): service name to look for in process table - pgrep_full (boolean): Use full command line search mode with pgrep + pgrep_full: [Deprecated] Use full command line search mode with pgrep sleep_time (int): Seconds to sleep before looking for process retry_count (int): If service is not found, how many times to retry @@ -321,30 +342,44 @@ def service_restarted_since(self, sentry_unit, mtime, service, False if service is older than mtime or if service was not found. """ - self.log.debug('Checking %s restarted since %s' % (service, mtime)) + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s service restarted since %s on ' + '%s' % (service, mtime, unit_name)) time.sleep(sleep_time) - proc_start_time = self._get_proc_start_time(sentry_unit, service, - pgrep_full) - while retry_count > 0 and not proc_start_time: - self.log.debug('No pid file found for service %s, will retry %i ' - 'more times' % (service, retry_count)) - time.sleep(30) - proc_start_time = self._get_proc_start_time(sentry_unit, service, - pgrep_full) - retry_count = retry_count - 1 + proc_start_time = None + tries = 0 + while tries <= retry_count and not proc_start_time: + try: + proc_start_time = self._get_proc_start_time(sentry_unit, + service, + pgrep_full) + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'OK'.format(tries, service, unit_name)) + except IOError: + # NOTE(beisner) - race avoidance, proc may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'failed'.format(tries, service, unit_name)) + time.sleep(retry_sleep_time) + tries += 1 if not proc_start_time: self.log.warn('No proc start time found, assuming service did ' 'not start') return False if proc_start_time >= mtime: - self.log.debug('proc start time is newer than provided mtime' - '(%s >= %s)' % (proc_start_time, mtime)) + self.log.debug('Proc start time is newer than provided mtime' + '(%s >= %s) on %s (OK)' % (proc_start_time, + mtime, unit_name)) return True else: - self.log.warn('proc start time (%s) is older than provided mtime ' - '(%s), service did not restart' % (proc_start_time, - mtime)) + self.log.warn('Proc start time (%s) is older than provided mtime ' + '(%s) on %s, service did not ' + 'restart' % (proc_start_time, mtime, unit_name)) return False def config_updated_since(self, sentry_unit, filename, mtime, @@ -374,8 +409,9 @@ def config_updated_since(self, sentry_unit, filename, mtime, return False def validate_service_config_changed(self, sentry_unit, mtime, service, - filename, pgrep_full=False, - sleep_time=20, retry_count=2): + filename, pgrep_full=None, + sleep_time=20, retry_count=2, + retry_sleep_time=30): """Check service and file were updated after mtime Args: @@ -383,9 +419,10 @@ def validate_service_config_changed(self, sentry_unit, mtime, service, mtime (float): The epoch time to check against service (string): service name to look for in process table filename (string): The file to check mtime of - pgrep_full (boolean): Use full command line search mode with pgrep - sleep_time (int): Seconds to sleep before looking for process + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep in seconds to pass to test helpers retry_count (int): If service is not found, how many times to retry + retry_sleep_time (int): Time in seconds to wait between retries Typical Usage: u = OpenStackAmuletUtils(ERROR) @@ -402,15 +439,25 @@ def validate_service_config_changed(self, sentry_unit, mtime, service, mtime, False if service is older than mtime or if service was not found or if filename was modified before mtime. """ - self.log.debug('Checking %s restarted since %s' % (service, mtime)) - time.sleep(sleep_time) - service_restart = self.service_restarted_since(sentry_unit, mtime, - service, - pgrep_full=pgrep_full, - sleep_time=0, - retry_count=retry_count) - config_update = self.config_updated_since(sentry_unit, filename, mtime, - sleep_time=0) + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + service_restart = self.service_restarted_since( + sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + config_update = self.config_updated_since( + sentry_unit, + filename, + mtime, + sleep_time=0) + return service_restart and config_update def get_sentry_time(self, sentry_unit): @@ -428,7 +475,6 @@ def get_ubuntu_releases(self): """Return a list of all Ubuntu releases in order of release.""" _d = distro_info.UbuntuDistroInfo() _release_list = _d.all - self.log.debug('Ubuntu release list: {}'.format(_release_list)) return _release_list def file_to_url(self, file_rel_path): @@ -568,6 +614,142 @@ def validate_list_of_identical_dicts(self, list_of_dicts): return None + def validate_sectionless_conf(self, file_contents, expected): + """A crude conf parser. Useful to inspect configuration files which + do not have section headers (as would be necessary in order to use + the configparser). Such as openstack-dashboard or rabbitmq confs.""" + for line in file_contents.split('\n'): + if '=' in line: + args = line.split('=') + if len(args) <= 1: + continue + key = args[0].strip() + value = args[1].strip() + if key in expected.keys(): + if expected[key] != value: + msg = ('Config mismatch. Expected, actual: {}, ' + '{}'.format(expected[key], value)) + amulet.raise_status(amulet.FAIL, msg=msg) + + def get_unit_hostnames(self, units): + """Return a dict of juju unit names to hostnames.""" + host_names = {} + for unit in units: + host_names[unit.info['unit_name']] = \ + str(unit.file_contents('/etc/hostname').strip()) + self.log.debug('Unit host names: {}'.format(host_names)) + return host_names + + def run_cmd_unit(self, sentry_unit, cmd): + """Run a command on a unit, return the output and exit code.""" + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` command returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + msg = ('{} `{}` command returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output), code + + def file_exists_on_unit(self, sentry_unit, file_name): + """Check if a file exists on a unit.""" + try: + sentry_unit.file_stat(file_name) + return True + except IOError: + return False + except Exception as e: + msg = 'Error checking file {}: {}'.format(file_name, e) + amulet.raise_status(amulet.FAIL, msg=msg) + + def file_contents_safe(self, sentry_unit, file_name, + max_wait=60, fatal=False): + """Get file contents from a sentry unit. Wrap amulet file_contents + with retry logic to address races where a file checks as existing, + but no longer exists by the time file_contents is called. + Return None if file not found. Optionally raise if fatal is True.""" + unit_name = sentry_unit.info['unit_name'] + file_contents = False + tries = 0 + while not file_contents and tries < (max_wait / 4): + try: + file_contents = sentry_unit.file_contents(file_name) + except IOError: + self.log.debug('Attempt {} to open file {} from {} ' + 'failed'.format(tries, file_name, + unit_name)) + time.sleep(4) + tries += 1 + + if file_contents: + return file_contents + elif not fatal: + return None + elif fatal: + msg = 'Failed to get file contents from unit.' + amulet.raise_status(amulet.FAIL, msg) + + def port_knock_tcp(self, host="localhost", port=22, timeout=15): + """Open a TCP socket to check for a listening sevice on a host. + + :param host: host name or IP address, default to localhost + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :returns: True if successful, False if connect failed + """ + + # Resolve host name if possible + try: + connect_host = socket.gethostbyname(host) + host_human = "{} ({})".format(connect_host, host) + except socket.error as e: + self.log.warn('Unable to resolve address: ' + '{} ({}) Trying anyway!'.format(host, e)) + connect_host = host + host_human = connect_host + + # Attempt socket connection + try: + knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + knock.settimeout(timeout) + knock.connect((connect_host, port)) + knock.close() + self.log.debug('Socket connect OK for host ' + '{} on port {}.'.format(host_human, port)) + return True + except socket.error as e: + self.log.debug('Socket connect FAIL for' + ' {} port {} ({})'.format(host_human, port, e)) + return False + + def port_knock_units(self, sentry_units, port=22, + timeout=15, expect_success=True): + """Open a TCP socket to check for a listening sevice on each + listed juju unit. + + :param sentry_units: list of sentry unit pointers + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :expect_success: True by default, set False to invert logic + :returns: None if successful, Failure message otherwise + """ + for unit in sentry_units: + host = unit.info['public-address'] + connected = self.port_knock_tcp(host, port, timeout) + if not connected and expect_success: + return 'Socket connect failed.' + elif connected and not expect_success: + return 'Socket connected unexpectedly.' + + def get_uuid_epoch_stamp(self): + """Returns a stamp string based on uuid4 and epoch time. Useful in + generating test messages which need to be unique-ish.""" + return '[{}-{}]'.format(uuid.uuid4(), time.time()) + +# amulet juju action helpers: def run_action(self, unit_sentry, action, _check_output=subprocess.check_output): """Run the named action on a given unit sentry. diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 07ee2ef1..63155d8d 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -44,8 +44,15 @@ def _determine_branch_locations(self, other_services): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" + + # Charms outside the lp:~openstack-charmers namespace base_charms = ['mysql', 'mongodb', 'nrpe'] + # Force these charms to current series even when using an older series. + # ie. Use trusty/nrpe even when series is precise, as the P charm + # does not possess the necessary external master config and hooks. + force_series_current = ['nrpe'] + if self.series in ['precise', 'trusty']: base_series = self.series else: @@ -53,11 +60,17 @@ def _determine_branch_locations(self, other_services): if self.stable: for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, svc['name']) else: for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + if svc['name'] in base_charms: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, @@ -77,21 +90,23 @@ def _add_services(self, this_service, other_services): services = other_services services.append(this_service) + + # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Most OpenStack subordinate charms do not expose an origin option - # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: - if svc['name'] not in use_source + ignore: + if svc['name'] not in use_source + no_origin: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source and svc['name'] not in ignore: + if svc['name'] in use_source and svc['name'] not in no_origin: config = {'source': self.source} self.d.configure(svc['name'], config) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 03f79277..b1397419 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -27,6 +27,7 @@ import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import pika import swiftclient from charmhelpers.contrib.amulet.utils import ( @@ -602,3 +603,361 @@ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): self.log.debug('Ceph {} samples (OK): ' '{}'.format(sample_type, samples)) return None + +# rabbitmq/amqp specific helpers: + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not port and not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.server_properties['product'] == 'RabbitMQ' + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) From e17b236d010db3fc2a10bdf1811ddf81cde5f9d4 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 22 Sep 2015 14:35:02 +0100 Subject: [PATCH 0835/2699] Ensure python2 is installed before hook execution --- ceph-proxy/hooks/hooks.py | 2 +- ceph-proxy/hooks/install | 21 ++++++++++++++++++++- ceph-proxy/hooks/install.real | 1 + 3 files changed, 22 insertions(+), 2 deletions(-) mode change 120000 => 100755 ceph-proxy/hooks/install create mode 120000 ceph-proxy/hooks/install.real diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index bc11596a..2306fcff 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -76,7 +76,7 @@ def install_upstart_scripts(): shutil.copy(x, '/etc/init/') -@hooks.hook('install') +@hooks.hook('install.real') def install(): execd_preinstall() add_source(config('source'), config('key')) diff --git a/ceph-proxy/hooks/install b/ceph-proxy/hooks/install deleted file mode 120000 index 9416ca6a..00000000 --- a/ceph-proxy/hooks/install +++ /dev/null @@ -1 +0,0 @@ -hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/install b/ceph-proxy/hooks/install new file mode 100755 index 00000000..83a9d3ce --- /dev/null +++ b/ceph-proxy/hooks/install @@ -0,0 +1,20 @@ +#!/bin/bash +# Wrapper to deal with newer Ubuntu versions that don't have py2 installed +# by default. + +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') + +check_and_install() { + pkg="${1}-${2}" + if ! dpkg -s ${pkg} 2>&1 > /dev/null; then + apt-get -y install ${pkg} + fi +} + +PYTHON="python" + +for dep in ${DEPS[@]}; do + check_and_install ${PYTHON} ${dep} +done + +exec ./hooks/install.real diff --git a/ceph-proxy/hooks/install.real b/ceph-proxy/hooks/install.real new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-proxy/hooks/install.real @@ -0,0 +1 @@ +hooks.py \ No newline at end of file From dff2ccd4fb08255b5146c27f88dd865c462cea44 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 22 Sep 2015 14:35:02 +0100 Subject: [PATCH 0836/2699] Ensure python2 is installed before hook execution --- ceph-mon/hooks/hooks.py | 2 +- ceph-mon/hooks/install | 21 ++++++++++++++++++++- ceph-mon/hooks/install.real | 1 + 3 files changed, 22 insertions(+), 2 deletions(-) mode change 120000 => 100755 ceph-mon/hooks/install create mode 120000 ceph-mon/hooks/install.real diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index bc11596a..2306fcff 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -76,7 +76,7 @@ def install_upstart_scripts(): shutil.copy(x, '/etc/init/') -@hooks.hook('install') +@hooks.hook('install.real') def install(): execd_preinstall() add_source(config('source'), config('key')) diff --git a/ceph-mon/hooks/install b/ceph-mon/hooks/install deleted file mode 120000 index 9416ca6a..00000000 --- a/ceph-mon/hooks/install +++ /dev/null @@ -1 +0,0 @@ -hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/install b/ceph-mon/hooks/install new file mode 100755 index 00000000..83a9d3ce --- /dev/null +++ b/ceph-mon/hooks/install @@ -0,0 +1,20 @@ +#!/bin/bash +# Wrapper to deal with newer Ubuntu versions that don't have py2 installed +# by default. + +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') + +check_and_install() { + pkg="${1}-${2}" + if ! dpkg -s ${pkg} 2>&1 > /dev/null; then + apt-get -y install ${pkg} + fi +} + +PYTHON="python" + +for dep in ${DEPS[@]}; do + check_and_install ${PYTHON} ${dep} +done + +exec ./hooks/install.real diff --git a/ceph-mon/hooks/install.real b/ceph-mon/hooks/install.real new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-mon/hooks/install.real @@ -0,0 +1 @@ +hooks.py \ No newline at end of file From 2d6168ce1e937a7e9b9c1c87272c58b06c3e42f9 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 22 Sep 2015 14:35:49 +0100 Subject: [PATCH 0837/2699] Ensure python2 is installed before hook execution --- ceph-osd/hooks/hooks.py | 2 +- ceph-osd/hooks/install | 21 ++++++++++++++++++++- ceph-osd/hooks/install.real | 1 + 3 files changed, 22 insertions(+), 2 deletions(-) mode change 120000 => 100755 ceph-osd/hooks/install create mode 120000 ceph-osd/hooks/install.real diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index e28e9c1c..9d9a3d4c 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -61,7 +61,7 @@ def install_upstart_scripts(): shutil.copy(x, '/etc/init/') -@hooks.hook('install') +@hooks.hook('install.real') def install(): add_source(config('source'), config('key')) apt_update(fatal=True) diff --git a/ceph-osd/hooks/install b/ceph-osd/hooks/install deleted file mode 120000 index 9416ca6a..00000000 --- a/ceph-osd/hooks/install +++ /dev/null @@ -1 +0,0 @@ -hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/install b/ceph-osd/hooks/install new file mode 100755 index 00000000..83a9d3ce --- /dev/null +++ b/ceph-osd/hooks/install @@ -0,0 +1,20 @@ +#!/bin/bash +# Wrapper to deal with newer Ubuntu versions that don't have py2 installed +# by default. + +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') + +check_and_install() { + pkg="${1}-${2}" + if ! dpkg -s ${pkg} 2>&1 > /dev/null; then + apt-get -y install ${pkg} + fi +} + +PYTHON="python" + +for dep in ${DEPS[@]}; do + check_and_install ${PYTHON} ${dep} +done + +exec ./hooks/install.real diff --git a/ceph-osd/hooks/install.real b/ceph-osd/hooks/install.real new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-osd/hooks/install.real @@ -0,0 +1 @@ +hooks.py \ No newline at end of file From c32cf82e01f461d20a0a332c69f23b72d7af463f Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 22 Sep 2015 14:36:25 +0100 Subject: [PATCH 0838/2699] Ensure python2 is installed before hook execution --- ceph-radosgw/hooks/hooks.py | 2 +- ceph-radosgw/hooks/install | 21 ++++++++++++++++++++- ceph-radosgw/hooks/install.real | 1 + 3 files changed, 22 insertions(+), 2 deletions(-) mode change 120000 => 100755 ceph-radosgw/hooks/install create mode 120000 ceph-radosgw/hooks/install.real diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index f2032ac2..ceb20d10 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -109,7 +109,7 @@ def install_packages(): apt_install(APACHE_PACKAGES, fatal=True) -@hooks.hook('install') +@hooks.hook('install.real') def install(): execd_preinstall() enable_pocket('multiverse') diff --git a/ceph-radosgw/hooks/install b/ceph-radosgw/hooks/install deleted file mode 120000 index 9416ca6a..00000000 --- a/ceph-radosgw/hooks/install +++ /dev/null @@ -1 +0,0 @@ -hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/install b/ceph-radosgw/hooks/install new file mode 100755 index 00000000..83a9d3ce --- /dev/null +++ b/ceph-radosgw/hooks/install @@ -0,0 +1,20 @@ +#!/bin/bash +# Wrapper to deal with newer Ubuntu versions that don't have py2 installed +# by default. + +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') + +check_and_install() { + pkg="${1}-${2}" + if ! dpkg -s ${pkg} 2>&1 > /dev/null; then + apt-get -y install ${pkg} + fi +} + +PYTHON="python" + +for dep in ${DEPS[@]}; do + check_and_install ${PYTHON} ${dep} +done + +exec ./hooks/install.real diff --git a/ceph-radosgw/hooks/install.real b/ceph-radosgw/hooks/install.real new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/install.real @@ -0,0 +1 @@ +hooks.py \ No newline at end of file From 49abfc7e8bc75d6daf2ce987ed1aee2d8287d58a Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 1 Oct 2015 20:02:04 +0000 Subject: [PATCH 0839/2699] [corey.bryant,trivial] Sync charm-helpers --- .../hooks/charmhelpers/cli/__init__.py | 6 +- .../hooks/charmhelpers/cli/commands.py | 8 +- .../hooks/charmhelpers/cli/hookenv.py | 23 + .../hooks/charmhelpers/contrib/network/ip.py | 14 +- .../contrib/openstack/amulet/deployment.py | 34 +- .../contrib/openstack/amulet/utils.py | 359 ++++++++++++++ .../charmhelpers/contrib/openstack/context.py | 134 +++++- .../charmhelpers/contrib/openstack/neutron.py | 57 ++- .../contrib/openstack/templates/ceph.conf | 6 + .../contrib/openstack/templating.py | 32 +- .../charmhelpers/contrib/openstack/utils.py | 315 +++++++++++- .../contrib/storage/linux/ceph.py | 239 +++++++++- .../contrib/storage/linux/utils.py | 5 +- .../hooks/charmhelpers/core/hookenv.py | 71 +-- ceph-radosgw/hooks/charmhelpers/core/host.py | 146 ++++-- .../hooks/charmhelpers/core/hugepage.py | 69 +++ .../hooks/charmhelpers/core/kernel.py | 68 +++ .../charmhelpers/core/services/helpers.py | 20 +- .../hooks/charmhelpers/core/strutils.py | 30 ++ .../hooks/charmhelpers/fetch/__init__.py | 8 + .../charmhelpers/contrib/amulet/deployment.py | 6 +- .../charmhelpers/contrib/amulet/utils.py | 449 ++++++++++++++---- .../contrib/openstack/amulet/deployment.py | 34 +- .../contrib/openstack/amulet/utils.py | 359 ++++++++++++++ 24 files changed, 2240 insertions(+), 252 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/cli/hookenv.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/hugepage.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/kernel.py diff --git a/ceph-radosgw/hooks/charmhelpers/cli/__init__.py b/ceph-radosgw/hooks/charmhelpers/cli/__init__.py index 7118daf5..16d52cc4 100644 --- a/ceph-radosgw/hooks/charmhelpers/cli/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/cli/__init__.py @@ -152,15 +152,11 @@ def run(self): arguments = self.argument_parser.parse_args() argspec = inspect.getargspec(arguments.func) vargs = [] - kwargs = {} for arg in argspec.args: vargs.append(getattr(arguments, arg)) if argspec.varargs: vargs.extend(getattr(arguments, argspec.varargs)) - if argspec.keywords: - for kwarg in argspec.keywords.items(): - kwargs[kwarg] = getattr(arguments, kwarg) - output = arguments.func(*vargs, **kwargs) + output = arguments.func(*vargs) if getattr(arguments.func, '_cli_test_command', False): self.exit_code = 0 if output else 1 output = '' diff --git a/ceph-radosgw/hooks/charmhelpers/cli/commands.py b/ceph-radosgw/hooks/charmhelpers/cli/commands.py index 443ff05d..7e91db00 100644 --- a/ceph-radosgw/hooks/charmhelpers/cli/commands.py +++ b/ceph-radosgw/hooks/charmhelpers/cli/commands.py @@ -26,7 +26,7 @@ """ Import the sub-modules which have decorated subcommands to register with chlp. """ -import host # noqa -import benchmark # noqa -import unitdata # noqa -from charmhelpers.core import hookenv # noqa +from . import host # noqa +from . import benchmark # noqa +from . import unitdata # noqa +from . import hookenv # noqa diff --git a/ceph-radosgw/hooks/charmhelpers/cli/hookenv.py b/ceph-radosgw/hooks/charmhelpers/cli/hookenv.py new file mode 100644 index 00000000..265c816e --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/cli/hookenv.py @@ -0,0 +1,23 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import hookenv + + +cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped) +cmdline.subcommand('service-name')(hookenv.service_name) +cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index fff6d5ca..7f3b66b1 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -23,7 +23,7 @@ from functools import partial from charmhelpers.core.hookenv import unit_get -from charmhelpers.fetch import apt_install +from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, WARNING, @@ -32,13 +32,15 @@ try: import netifaces except ImportError: - apt_install('python-netifaces') + apt_update(fatal=True) + apt_install('python-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: - apt_install('python-netaddr') + apt_update(fatal=True) + apt_install('python-netaddr', fatal=True) import netaddr @@ -435,8 +437,12 @@ def get_hostname(address, fqdn=True): rev = dns.reversename.from_address(address) result = ns_query(rev) + if not result: - return None + try: + result = socket.gethostbyaddr(address)[0] + except: + return None else: result = address diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index b01e6cb8..722bc645 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -44,20 +44,31 @@ def _determine_branch_locations(self, other_services): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb'] + + # Charms outside the lp:~openstack-charmers namespace + base_charms = ['mysql', 'mongodb', 'nrpe'] + + # Force these charms to current series even when using an older series. + # ie. Use trusty/nrpe even when series is precise, as the P charm + # does not possess the necessary external master config and hooks. + force_series_current = ['nrpe'] if self.series in ['precise', 'trusty']: base_series = self.series else: base_series = self.current_next - if self.stable: - for svc in other_services: + for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if self.stable: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, svc['name']) - else: - for svc in other_services: + else: if svc['name'] in base_charms: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, @@ -66,6 +77,7 @@ def _determine_branch_locations(self, other_services): temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, svc['name']) + return other_services def _add_services(self, this_service, other_services): @@ -77,21 +89,23 @@ def _add_services(self, this_service, other_services): services = other_services services.append(this_service) + + # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Most OpenStack subordinate charms do not expose an origin option - # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: - if svc['name'] not in use_source + ignore: + if svc['name'] not in use_source + no_origin: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source and svc['name'] not in ignore: + if svc['name'] in use_source and svc['name'] not in no_origin: config = {'source': self.source} self.d.configure(svc['name'], config) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 03f79277..2b3087ea 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -27,6 +27,7 @@ import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import pika import swiftclient from charmhelpers.contrib.amulet.utils import ( @@ -602,3 +603,361 @@ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): self.log.debug('Ceph {} samples (OK): ' '{}'.format(sample_type, samples)) return None + +# rabbitmq/amqp specific helpers: + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.server_properties['product'] == 'RabbitMQ' + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index ab2ebac1..49c04de0 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import glob import json import os import re @@ -50,6 +51,8 @@ from charmhelpers.core.strutils import bool_from_string from charmhelpers.core.host import ( + get_bond_master, + is_phy_iface, list_nics, get_nic_hwaddr, mkdir, @@ -192,10 +195,50 @@ def config_flags_parser(config_flags): class OSContextGenerator(object): """Base class for all context generators.""" interfaces = [] + related = False + complete = False + missing_data = [] def __call__(self): raise NotImplementedError + def context_complete(self, ctxt): + """Check for missing data for the required context data. + Set self.missing_data if it exists and return False. + Set self.complete if no missing data and return True. + """ + # Fresh start + self.complete = False + self.missing_data = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + if k not in self.missing_data: + self.missing_data.append(k) + + if self.missing_data: + self.complete = False + log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO) + else: + self.complete = True + return self.complete + + def get_related(self): + """Check if any of the context interfaces have relation ids. + Set self.related and return True if one of the interfaces + has relation ids. + """ + # Fresh start + self.related = False + try: + for interface in self.interfaces: + if relation_ids(interface): + self.related = True + return self.related + except AttributeError as e: + log("{} {}" + "".format(self, e), 'INFO') + return self.related + class SharedDBContext(OSContextGenerator): interfaces = ['shared-db'] @@ -211,6 +254,7 @@ def __init__(self, self.database = database self.user = user self.ssl_dir = ssl_dir + self.rel_name = self.interfaces[0] def __call__(self): self.database = self.database or config('database') @@ -244,6 +288,7 @@ def __call__(self): password_setting = self.relation_prefix + '_password' for rid in relation_ids(self.interfaces[0]): + self.related = True for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) host = rdata.get('db_host') @@ -255,7 +300,7 @@ def __call__(self): 'database_password': rdata.get(password_setting), 'database_type': 'mysql' } - if context_complete(ctxt): + if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) return ctxt return {} @@ -276,6 +321,7 @@ def __call__(self): ctxt = {} for rid in relation_ids(self.interfaces[0]): + self.related = True for unit in related_units(rid): rel_host = relation_get('host', rid=rid, unit=unit) rel_user = relation_get('user', rid=rid, unit=unit) @@ -285,7 +331,7 @@ def __call__(self): 'database_user': rel_user, 'database_password': rel_passwd, 'database_type': 'postgresql'} - if context_complete(ctxt): + if self.context_complete(ctxt): return ctxt return {} @@ -346,6 +392,7 @@ def __call__(self): ctxt['signing_dir'] = cachedir for rid in relation_ids(self.rel_name): + self.related = True for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) serv_host = rdata.get('service_host') @@ -364,7 +411,7 @@ def __call__(self): 'service_protocol': svc_protocol, 'auth_protocol': auth_protocol}) - if context_complete(ctxt): + if self.context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading @@ -403,6 +450,7 @@ def __call__(self): ctxt = {} for rid in relation_ids(self.rel_name): ha_vip_only = False + self.related = True for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): ctxt['clustered'] = True @@ -435,7 +483,7 @@ def __call__(self): ha_vip_only = relation_get('ha-vip-only', rid=rid, unit=unit) is not None - if context_complete(ctxt): + if self.context_complete(ctxt): if 'rabbit_ssl_ca' in ctxt: if not self.ssl_dir: log("Charm not setup for ssl support but ssl ca " @@ -467,7 +515,7 @@ def __call__(self): ctxt['oslo_messaging_flags'] = config_flags_parser( oslo_messaging_flags) - if not context_complete(ctxt): + if not self.complete: return {} return ctxt @@ -483,13 +531,15 @@ def __call__(self): log('Generating template context for ceph', level=DEBUG) mon_hosts = [] - auth = None - key = None - use_syslog = str(config('use-syslog')).lower() + ctxt = { + 'use_syslog': str(config('use-syslog')).lower() + } for rid in relation_ids('ceph'): for unit in related_units(rid): - auth = relation_get('auth', rid=rid, unit=unit) - key = relation_get('key', rid=rid, unit=unit) + if not ctxt.get('auth'): + ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) + if not ctxt.get('key'): + ctxt['key'] = relation_get('key', rid=rid, unit=unit) ceph_pub_addr = relation_get('ceph-public-address', rid=rid, unit=unit) unit_priv_addr = relation_get('private-address', rid=rid, @@ -498,15 +548,12 @@ def __call__(self): ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr mon_hosts.append(ceph_addr) - ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), - 'auth': auth, - 'key': key, - 'use_syslog': use_syslog} + ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') - if not context_complete(ctxt): + if not self.context_complete(ctxt): return {} ensure_packages(['ceph-common']) @@ -893,6 +940,18 @@ def neutron_ctxt(self): 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} return ctxt + def pg_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'plumgrid', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + return ovs_ctxt + def __call__(self): if self.network_manager not in ['quantum', 'neutron']: return {} @@ -912,6 +971,8 @@ def __call__(self): ctxt.update(self.calico_ctxt()) elif self.plugin == 'vsp': ctxt.update(self.nuage_ctxt()) + elif self.plugin == 'plumgrid': + ctxt.update(self.pg_ctxt()) alchemy_flags = config('neutron-alchemy-flags') if alchemy_flags: @@ -923,7 +984,6 @@ def __call__(self): class NeutronPortContext(OSContextGenerator): - NIC_PREFIXES = ['eth', 'bond'] def resolve_ports(self, ports): """Resolve NICs not yet bound to bridge(s) @@ -935,7 +995,18 @@ def resolve_ports(self, ports): hwaddr_to_nic = {} hwaddr_to_ip = {} - for nic in list_nics(self.NIC_PREFIXES): + for nic in list_nics(): + # Ignore virtual interfaces (bond masters will be identified from + # their slaves) + if not is_phy_iface(nic): + continue + + _nic = get_bond_master(nic) + if _nic: + log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), + level=DEBUG) + nic = _nic + hwaddr = get_nic_hwaddr(nic) hwaddr_to_nic[hwaddr] = nic addresses = get_ipv4_addr(nic, fatal=False) @@ -961,7 +1032,8 @@ def resolve_ports(self, ports): # trust it to be the real external network). resolved.append(entry) - return resolved + # Ensure no duplicates + return list(set(resolved)) class OSConfigFlagContext(OSContextGenerator): @@ -1280,15 +1352,19 @@ class DataPortContext(NeutronPortContext): def __call__(self): ports = config('data-port') if ports: + # Map of {port/mac:bridge} portmap = parse_data_port_mappings(ports) - ports = portmap.values() + ports = portmap.keys() + # Resolve provided ports or mac addresses and filter out those + # already attached to a bridge. resolved = self.resolve_ports(ports) + # FIXME: is this necessary? normalized = {get_nic_hwaddr(port): port for port in resolved if port not in ports} normalized.update({port: port for port in resolved if port in ports}) if resolved: - return {bridge: normalized[port] for bridge, port in + return {normalized[port]: bridge for port, bridge in six.iteritems(portmap) if port in normalized.keys()} return None @@ -1299,12 +1375,22 @@ class PhyNICMTUContext(DataPortContext): def __call__(self): ctxt = {} mappings = super(PhyNICMTUContext, self).__call__() - if mappings and mappings.values(): - ports = mappings.values() + if mappings and mappings.keys(): + ports = sorted(mappings.keys()) napi_settings = NeutronAPIContext()() mtu = napi_settings.get('network_device_mtu') + all_ports = set() + # If any of ports is a vlan device, its underlying device must have + # mtu applied first. + for port in ports: + for lport in glob.glob("/sys/class/net/%s/lower_*" % port): + lport = os.path.basename(lport) + all_ports.add(lport.split('_')[1]) + + all_ports = list(all_ports) + all_ports.extend(ports) if mtu: - ctxt["devs"] = '\\n'.join(ports) + ctxt["devs"] = '\\n'.join(all_ports) ctxt['mtu'] = mtu return ctxt @@ -1336,6 +1422,6 @@ def __call__(self): 'auth_protocol': rdata.get('auth_protocol') or 'http', } - if context_complete(ctxt): + if self.context_complete(ctxt): return ctxt return {} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index f7b72352..2a59d86b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -195,6 +195,20 @@ def neutron_plugins(): 'packages': [], 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], 'server_services': ['neutron-server'] + }, + 'plumgrid': { + 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', + 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', + 'contexts': [ + context.SharedDBContext(user=config('database-user'), + database=config('database'), + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [['plumgrid-lxc'], + ['iovisor-dkms']], + 'server_packages': ['neutron-server', + 'neutron-plugin-plumgrid'], + 'server_services': ['neutron-server'] } } if release >= 'icehouse': @@ -255,17 +269,30 @@ def network_manager(): return 'neutron' -def parse_mappings(mappings): +def parse_mappings(mappings, key_rvalue=False): + """By default mappings are lvalue keyed. + + If key_rvalue is True, the mapping will be reversed to allow multiple + configs for the same lvalue. + """ parsed = {} if mappings: mappings = mappings.split() for m in mappings: p = m.partition(':') - key = p[0].strip() - if p[1]: - parsed[key] = p[2].strip() + + if key_rvalue: + key_index = 2 + val_index = 0 + # if there is no rvalue skip to next + if not p[1]: + continue else: - parsed[key] = '' + key_index = 0 + val_index = 2 + + key = p[key_index].strip() + parsed[key] = p[val_index].strip() return parsed @@ -283,25 +310,25 @@ def parse_bridge_mappings(mappings): def parse_data_port_mappings(mappings, default_bridge='br-data'): """Parse data port mappings. - Mappings must be a space-delimited list of bridge:port mappings. + Mappings must be a space-delimited list of bridge:port. - Returns dict of the form {bridge:port}. + Returns dict of the form {port:bridge} where ports may be mac addresses or + interface names. """ - _mappings = parse_mappings(mappings) + + # NOTE(dosaboy): we use rvalue for key to allow multiple values to be + # proposed for since it may be a mac address which will differ + # across units this allowing first-known-good to be chosen. + _mappings = parse_mappings(mappings, key_rvalue=True) if not _mappings or list(_mappings.values()) == ['']: if not mappings: return {} # For backwards-compatibility we need to support port-only provided in # config. - _mappings = {default_bridge: mappings.split()[0]} - - bridges = _mappings.keys() - ports = _mappings.values() - if len(set(bridges)) != len(bridges): - raise Exception("It is not allowed to have more than one port " - "configured on the same bridge") + _mappings = {mappings.split()[0]: default_bridge} + ports = _mappings.keys() if len(set(ports)) != len(ports): raise Exception("It is not allowed to have the same port configured " "on more than one bridge") diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf index b99851cc..33ceee25 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf @@ -13,3 +13,9 @@ log to syslog = {{ use_syslog }} err to syslog = {{ use_syslog }} clog to syslog = {{ use_syslog }} +[client] +{% if rbd_client_cache_settings -%} +{% for key, value in rbd_client_cache_settings.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} +{%- endif %} \ No newline at end of file diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py index 021d8cf9..e5e3cb1b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py @@ -18,7 +18,7 @@ import six -from charmhelpers.fetch import apt_install +from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, ERROR, @@ -29,6 +29,7 @@ try: from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: + apt_update(fatal=True) apt_install('python-jinja2', fatal=True) from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions @@ -112,7 +113,7 @@ def context(self): def complete_contexts(self): ''' - Return a list of interfaces that have atisfied contexts. + Return a list of interfaces that have satisfied contexts. ''' if self._complete_contexts: return self._complete_contexts @@ -293,3 +294,30 @@ def complete_contexts(self): [interfaces.extend(i.complete_contexts()) for i in six.itervalues(self.templates)] return interfaces + + def get_incomplete_context_data(self, interfaces): + ''' + Return dictionary of relation status of interfaces and any missing + required context data. Example: + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}} + ''' + incomplete_context_data = {} + + for i in six.itervalues(self.templates): + for context in i.contexts: + for interface in interfaces: + related = False + if interface in context.interfaces: + related = context.get_related() + missing_data = context.missing_data + if missing_data: + incomplete_context_data[interface] = {'missing_data': missing_data} + if related: + if incomplete_context_data.get(interface): + incomplete_context_data[interface].update({'related': True}) + else: + incomplete_context_data[interface] = {'related': True} + else: + incomplete_context_data[interface] = {'related': False} + return incomplete_context_data diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 4dd000c3..eefcf08b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,5 +1,3 @@ -#!/usr/bin/python - # Copyright 2014-2015 Canonical Limited. # # This file is part of charm-helpers. @@ -24,8 +22,10 @@ import json import os import sys +import re import six +import traceback import yaml from charmhelpers.contrib.network import ip @@ -35,12 +35,16 @@ ) from charmhelpers.core.hookenv import ( + action_fail, + action_set, config, log as juju_log, charm_dir, INFO, relation_ids, - relation_set + relation_set, + status_set, + hook_name ) from charmhelpers.contrib.storage.linux.lvm import ( @@ -50,7 +54,8 @@ ) from charmhelpers.contrib.network.ip import ( - get_ipv6_addr + get_ipv6_addr, + is_ipv6, ) from charmhelpers.contrib.python.packages import ( @@ -69,7 +74,6 @@ DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 'restricted main multiverse universe') - UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('oneiric', 'diablo'), ('precise', 'essex'), @@ -116,8 +120,40 @@ ('2.2.1', 'kilo'), ('2.2.2', 'kilo'), ('2.3.0', 'liberty'), + ('2.4.0', 'liberty'), ]) +# >= Liberty version->codename mapping +PACKAGE_CODENAMES = { + 'nova-common': OrderedDict([ + ('12.0.0', 'liberty'), + ]), + 'neutron-common': OrderedDict([ + ('7.0.0', 'liberty'), + ]), + 'cinder-common': OrderedDict([ + ('7.0.0', 'liberty'), + ]), + 'keystone': OrderedDict([ + ('8.0.0', 'liberty'), + ]), + 'horizon-common': OrderedDict([ + ('8.0.0', 'liberty'), + ]), + 'ceilometer-common': OrderedDict([ + ('5.0.0', 'liberty'), + ]), + 'heat-common': OrderedDict([ + ('5.0.0', 'liberty'), + ]), + 'glance-common': OrderedDict([ + ('11.0.0', 'liberty'), + ]), + 'openstack-dashboard': OrderedDict([ + ('8.0.0', 'liberty'), + ]), +} + DEFAULT_LOOPBACK_SIZE = '5G' @@ -167,9 +203,9 @@ def get_os_codename_version(vers): error_out(e) -def get_os_version_codename(codename): +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): '''Determine OpenStack version number from codename.''' - for k, v in six.iteritems(OPENSTACK_CODENAMES): + for k, v in six.iteritems(version_map): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -201,20 +237,31 @@ def get_os_codename_package(package, fatal=True): error_out(e) vers = apt.upstream_version(pkg.current_ver.ver_str) - - try: - if 'swift' in pkg.name: - swift_vers = vers[:5] - if swift_vers not in SWIFT_CODENAMES: - # Deal with 1.10.0 upward - swift_vers = vers[:6] - return SWIFT_CODENAMES[swift_vers] - else: - vers = vers[:6] - return OPENSTACK_CODENAMES[vers] - except KeyError: - e = 'Could not determine OpenStack codename for version %s' % vers - error_out(e) + match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) + if match: + vers = match.group(0) + + # >= Liberty independent project versions + if (package in PACKAGE_CODENAMES and + vers in PACKAGE_CODENAMES[package]): + return PACKAGE_CODENAMES[package][vers] + else: + # < Liberty co-ordinated project versions + try: + if 'swift' in pkg.name: + swift_vers = vers[:5] + if swift_vers not in SWIFT_CODENAMES: + # Deal with 1.10.0 upward + swift_vers = vers[:6] + return SWIFT_CODENAMES[swift_vers] + else: + vers = vers[:6] + return OPENSTACK_CODENAMES[vers] + except KeyError: + if not fatal: + return None + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) def get_os_version_package(pkg, fatal=True): @@ -392,7 +439,11 @@ def openstack_upgrade_available(package): import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) - available_vers = get_os_version_install_source(src) + if "swift" in package: + codename = get_os_codename_install_source(src) + available_vers = get_os_version_codename(codename, SWIFT_CODENAMES) + else: + available_vers = get_os_version_install_source(src) apt.init() return apt.version_compare(available_vers, cur_vers) == 1 @@ -469,6 +520,12 @@ def sync_db_with_multi_ipv6_addresses(database, database_user, relation_prefix=None): hosts = get_ipv6_addr(dynamic_only=False) + if config('vip'): + vips = config('vip').split() + for vip in vips: + if vip and is_ipv6(vip): + hosts.append(vip) + kwargs = {'database': database, 'username': database_user, 'hostname': json.dumps(hosts)} @@ -704,3 +761,217 @@ def git_yaml_value(projects_yaml, key): return projects[key] return None + + +def os_workload_status(configs, required_interfaces, charm_func=None): + """ + Decorator to set workload status based on complete contexts + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args, **kwargs): + # Run the original function first + f(*args, **kwargs) + # Set workload status now that contexts have been + # acted on + set_os_workload_status(configs, required_interfaces, charm_func) + return wrapped_f + return wrap + + +def set_os_workload_status(configs, required_interfaces, charm_func=None): + """ + Set workload status based on complete contexts. + status-set missing or incomplete contexts + and juju-log details of missing required data. + charm_func is a charm specific function to run checking + for charm specific requirements such as a VIP setting. + """ + incomplete_rel_data = incomplete_relation_data(configs, required_interfaces) + state = 'active' + missing_relations = [] + incomplete_relations = [] + message = None + charm_state = None + charm_message = None + + for generic_interface in incomplete_rel_data.keys(): + related_interface = None + missing_data = {} + # Related or not? + for interface in incomplete_rel_data[generic_interface]: + if incomplete_rel_data[generic_interface][interface].get('related'): + related_interface = interface + missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data') + # No relation ID for the generic_interface + if not related_interface: + juju_log("{} relation is missing and must be related for " + "functionality. ".format(generic_interface), 'WARN') + state = 'blocked' + if generic_interface not in missing_relations: + missing_relations.append(generic_interface) + else: + # Relation ID exists but no related unit + if not missing_data: + # Edge case relation ID exists but departing + if ('departed' in hook_name() or 'broken' in hook_name()) \ + and related_interface in hook_name(): + state = 'blocked' + if generic_interface not in missing_relations: + missing_relations.append(generic_interface) + juju_log("{} relation's interface, {}, " + "relationship is departed or broken " + "and is required for functionality." + "".format(generic_interface, related_interface), "WARN") + # Normal case relation ID exists but no related unit + # (joining) + else: + juju_log("{} relations's interface, {}, is related but has " + "no units in the relation." + "".format(generic_interface, related_interface), "INFO") + # Related unit exists and data missing on the relation + else: + juju_log("{} relation's interface, {}, is related awaiting " + "the following data from the relationship: {}. " + "".format(generic_interface, related_interface, + ", ".join(missing_data)), "INFO") + if state != 'blocked': + state = 'waiting' + if generic_interface not in incomplete_relations \ + and generic_interface not in missing_relations: + incomplete_relations.append(generic_interface) + + if missing_relations: + message = "Missing relations: {}".format(", ".join(missing_relations)) + if incomplete_relations: + message += "; incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'blocked' + elif incomplete_relations: + message = "Incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'waiting' + + # Run charm specific checks + if charm_func: + charm_state, charm_message = charm_func(configs) + if charm_state != 'active' and charm_state != 'unknown': + state = workload_state_compare(state, charm_state) + if message: + message = "{} {}".format(message, charm_message) + else: + message = charm_message + + # Set to active if all requirements have been met + if state == 'active': + message = "Unit is ready" + juju_log(message, "INFO") + + status_set(state, message) + + +def workload_state_compare(current_workload_state, workload_state): + """ Return highest priority of two states""" + hierarchy = {'unknown': -1, + 'active': 0, + 'maintenance': 1, + 'waiting': 2, + 'blocked': 3, + } + + if hierarchy.get(workload_state) is None: + workload_state = 'unknown' + if hierarchy.get(current_workload_state) is None: + current_workload_state = 'unknown' + + # Set workload_state based on hierarchy of statuses + if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): + return current_workload_state + else: + return workload_state + + +def incomplete_relation_data(configs, required_interfaces): + """ + Check complete contexts against required_interfaces + Return dictionary of incomplete relation data. + + configs is an OSConfigRenderer object with configs registered + + required_interfaces is a dictionary of required general interfaces + with dictionary values of possible specific interfaces. + Example: + required_interfaces = {'database': ['shared-db', 'pgsql-db']} + + The interface is said to be satisfied if anyone of the interfaces in the + list has a complete context. + + Return dictionary of incomplete or missing required contexts with relation + status of interfaces and any missing data points. Example: + {'message': + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}}, + 'identity': + {'identity-service': {'related': False}}, + 'database': + {'pgsql-db': {'related': False}, + 'shared-db': {'related': True}}} + """ + complete_ctxts = configs.complete_contexts() + incomplete_relations = [] + for svc_type in required_interfaces.keys(): + # Avoid duplicates + found_ctxt = False + for interface in required_interfaces[svc_type]: + if interface in complete_ctxts: + found_ctxt = True + if not found_ctxt: + incomplete_relations.append(svc_type) + incomplete_context_data = {} + for i in incomplete_relations: + incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i]) + return incomplete_context_data + + +def do_action_openstack_upgrade(package, upgrade_callback, configs): + """Perform action-managed OpenStack upgrade. + + Upgrades packages to the configured openstack-origin version and sets + the corresponding action status as a result. + + If the charm was installed from source we cannot upgrade it. + For backwards compatibility a config flag (action-managed-upgrade) must + be set for this code to run, otherwise a full service level upgrade will + fire on config-changed. + + @param package: package name for determining if upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if git_install_requested(): + action_set({'outcome': 'installed from source, skipped upgrade.'}) + else: + if openstack_upgrade_available(package): + if config('action-managed-upgrade'): + juju_log('Upgrading OpenStack release') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed.'}) + ret = True + except: + action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('do_openstack_upgrade resulted in an ' + 'unexpected error') + else: + action_set({'outcome': 'action-managed-upgrade config is ' + 'False, skipped upgrade.'}) + else: + action_set({'outcome': 'no upgrade available.'}) + + return ret diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 00dbffb4..83f264db 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -28,6 +28,7 @@ import shutil import json import time +import uuid from subprocess import ( check_call, @@ -35,8 +36,10 @@ CalledProcessError, ) from charmhelpers.core.hookenv import ( + local_unit, relation_get, relation_ids, + relation_set, related_units, log, DEBUG, @@ -56,6 +59,8 @@ apt_install, ) +from charmhelpers.core.kernel import modprobe + KEYRING = '/etc/ceph/ceph.client.{}.keyring' KEYFILE = '/etc/ceph/ceph.client.{}.key' @@ -288,17 +293,6 @@ def place_data_on_block_device(blk_device, data_src_dst): os.chown(data_src_dst, uid, gid) -# TODO: re-use -def modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - log('Loading kernel module', level=INFO) - cmd = ['modprobe', module] - check_call(cmd) - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module) - - def copy_files(src, dst, symlinks=False, ignore=None): """Copy files from src to dst.""" for item in os.listdir(src): @@ -411,17 +405,52 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ - def __init__(self, api_version=1): + def __init__(self, api_version=1, request_id=None): self.api_version = api_version + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) self.ops = [] def add_op_create_pool(self, name, replica_count=3): self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count}) + def set_ops(self, ops): + """Set request ops to provided value. + + Useful for injecting ops that come from a previous request + to allow comparisons to ensure validity. + """ + self.ops = ops + @property def request(self): - return json.dumps({'api-version': self.api_version, 'ops': self.ops}) + return json.dumps({'api-version': self.api_version, 'ops': self.ops, + 'request-id': self.request_id}) + + def _ops_equal(self, other): + if len(self.ops) == len(other.ops): + for req_no in range(0, len(self.ops)): + for key in ['replicas', 'name', 'op']: + if self.ops[req_no][key] != other.ops[req_no][key]: + return False + else: + return False + return True + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + if self.api_version == other.api_version and \ + self._ops_equal(other): + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) class CephBrokerRsp(object): @@ -431,10 +460,15 @@ class CephBrokerRsp(object): The API is versioned and defaults to version 1. """ + def __init__(self, encoded_rsp): self.api_version = None self.rsp = json.loads(encoded_rsp) + @property + def request_id(self): + return self.rsp.get('request-id') + @property def exit_code(self): return self.rsp.get('exit-code') @@ -442,3 +476,182 @@ def exit_code(self): @property def exit_msg(self): return self.rsp.get('stderr') + + +# Ceph Broker Conversation: +# If a charm needs an action to be taken by ceph it can create a CephBrokerRq +# and send that request to ceph via the ceph relation. The CephBrokerRq has a +# unique id so that the client can identity which CephBrokerRsp is associated +# with the request. Ceph will also respond to each client unit individually +# creating a response key per client unit eg glance/0 will get a CephBrokerRsp +# via key broker-rsp-glance-0 +# +# To use this the charm can just do something like: +# +# from charmhelpers.contrib.storage.linux.ceph import ( +# send_request_if_needed, +# is_request_complete, +# CephBrokerRq, +# ) +# +# @hooks.hook('ceph-relation-changed') +# def ceph_changed(): +# rq = CephBrokerRq() +# rq.add_op_create_pool(name='poolname', replica_count=3) +# +# if is_request_complete(rq): +# +# else: +# send_request_if_needed(get_ceph_request()) +# +# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example +# of glance having sent a request to ceph which ceph has successfully processed +# 'ceph:8': { +# 'ceph/0': { +# 'auth': 'cephx', +# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', +# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', +# 'ceph-public-address': '10.5.44.103', +# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', +# 'private-address': '10.5.44.103', +# }, +# 'glance/0': { +# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' +# '"ops": [{"replicas": 3, "name": "glance", ' +# '"op": "create-pool"}]}'), +# 'private-address': '10.5.44.109', +# }, +# } + +def get_previous_request(rid): + """Return the last ceph broker request sent on a given relation + + @param rid: Relation id to query for request + """ + request = None + broker_req = relation_get(attribute='broker_req', rid=rid, + unit=local_unit()) + if broker_req: + request_data = json.loads(broker_req) + request = CephBrokerRq(api_version=request_data['api-version'], + request_id=request_data['request-id']) + request.set_ops(request_data['ops']) + + return request + + +def get_request_states(request): + """Return a dict of requests per relation id with their corresponding + completion state. + + This allows a charm, which has a request for ceph, to see whether there is + an equivalent request already being processed and if so what state that + request is in. + + @param request: A CephBrokerRq object + """ + complete = [] + requests = {} + for rid in relation_ids('ceph'): + complete = False + previous_request = get_previous_request(rid) + if request == previous_request: + sent = True + complete = is_request_complete_for_rid(previous_request, rid) + else: + sent = False + complete = False + + requests[rid] = { + 'sent': sent, + 'complete': complete, + } + + return requests + + +def is_request_sent(request): + """Check to see if a functionally equivalent request has already been sent + + Returns True if a similair request has been sent + + @param request: A CephBrokerRq object + """ + states = get_request_states(request) + for rid in states.keys(): + if not states[rid]['sent']: + return False + + return True + + +def is_request_complete(request): + """Check to see if a functionally equivalent request has already been + completed + + Returns True if a similair request has been completed + + @param request: A CephBrokerRq object + """ + states = get_request_states(request) + for rid in states.keys(): + if not states[rid]['complete']: + return False + + return True + + +def is_request_complete_for_rid(request, rid): + """Check if a given request has been completed on the given relation + + @param request: A CephBrokerRq object + @param rid: Relation ID + """ + broker_key = get_broker_rsp_key() + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if rdata.get(broker_key): + rsp = CephBrokerRsp(rdata.get(broker_key)) + if rsp.request_id == request.request_id: + if not rsp.exit_code: + return True + else: + # The remote unit sent no reply targeted at this unit so either the + # remote ceph cluster does not support unit targeted replies or it + # has not processed our request yet. + if rdata.get('broker_rsp'): + request_data = json.loads(rdata['broker_rsp']) + if request_data.get('request-id'): + log('Ignoring legacy broker_rsp without unit key as remote ' + 'service supports unit specific replies', level=DEBUG) + else: + log('Using legacy broker_rsp as remote service does not ' + 'supports unit specific replies', level=DEBUG) + rsp = CephBrokerRsp(rdata['broker_rsp']) + if not rsp.exit_code: + return True + + return False + + +def get_broker_rsp_key(): + """Return broker response key for this unit + + This is the key that ceph is going to use to pass request status + information back to this unit + """ + return 'broker-rsp-' + local_unit().replace('/', '-') + + +def send_request_if_needed(request): + """Send broker request if an equivalent request has not already been sent + + @param request: A CephBrokerRq object + """ + if is_request_sent(request): + log('Request already sent but not complete, not sending new request', + level=DEBUG) + else: + for rid in relation_ids('ceph'): + log('Sending request {}'.format(request.request_id), level=DEBUG) + relation_set(relation_id=rid, broker_req=request.request) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py index e2769e49..1e57941a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -43,9 +43,10 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' + # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b # sometimes sgdisk exits non-zero; this is OK, dd will clean up - call(['sgdisk', '--zap-all', '--mbrtogpt', - '--clear', block_device]) + call(['sgdisk', '--zap-all', '--', block_device]) + call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) dev_end = check_output(['blockdev', '--getsz', block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 18860f59..c2bee134 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -34,23 +34,6 @@ import tempfile from subprocess import CalledProcessError -try: - from charmhelpers.cli import cmdline -except ImportError as e: - # due to the anti-pattern of partially synching charmhelpers directly - # into charms, it's possible that charmhelpers.cli is not available; - # if that's the case, they don't really care about using the cli anyway, - # so mock it out - if str(e) == 'No module named cli': - class cmdline(object): - @classmethod - def subcommand(cls, *args, **kwargs): - def _wrap(func): - return func - return _wrap - else: - raise - import six if not six.PY3: from UserDict import UserDict @@ -91,6 +74,7 @@ def wrapper(*args, **kwargs): res = func(*args, **kwargs) cache[key] = res return res + wrapper._wrapped = func return wrapper @@ -190,7 +174,6 @@ def relation_type(): return os.environ.get('JUJU_RELATION', None) -@cmdline.subcommand() @cached def relation_id(relation_name=None, service_or_unit=None): """The relation ID for the current or a specified relation""" @@ -216,13 +199,11 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -@cmdline.subcommand() def service_name(): """The name service group this unit belongs to""" return local_unit().split('/')[0] -@cmdline.subcommand() @cached def remote_service_name(relid=None): """The remote service name for a given relation-id (or the current relation)""" @@ -642,6 +623,38 @@ def unit_private_ip(): return unit_get('private-address') +@cached +def storage_get(attribute="", storage_id=""): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=""): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + class UnregisteredHookError(Exception): """Raised when an undefined hook is called""" pass @@ -786,21 +799,23 @@ def status_set(workload_state, message): def status_get(): - """Retrieve the previously set juju workload state + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" - If the status-set command is not found then assume this is juju < 1.23 and - return 'unknown' """ - cmd = ['status-get'] + cmd = ['status-get', "--format=json", "--include-data"] try: - raw_status = subprocess.check_output(cmd, universal_newlines=True) - status = raw_status.rstrip() - return status + raw_status = subprocess.check_output(cmd) except OSError as e: if e.errno == errno.ENOENT: - return 'unknown' + return ('unknown', "") else: raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) def translate_exc(from_exc, to_exc): diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 8ae8ef86..cb3c527e 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -63,32 +63,48 @@ def service_reload(service_name, restart_on_failure=False): return service_result -def service_pause(service_name, init_dir=None): +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): """Pause a system service. Stop it, and prevent it from starting again at boot.""" - if init_dir is None: - init_dir = "/etc/init" stopped = service_stop(service_name) - # XXX: Support systemd too - override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) return stopped -def service_resume(service_name, init_dir=None): +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d"): """Resume a system service. Reenable starting again at boot. Start the service""" - # XXX: Support systemd too - if init_dir is None: - init_dir = "/etc/init" - override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) + started = service_start(service_name) return started @@ -148,6 +164,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): return user_info +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + def add_group(group_name, system_group=False): """Add a group to the system""" try: @@ -280,6 +306,17 @@ def mounts(): return system_mounts +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + def file_hash(path, hash_type='md5'): """ Generate a hash checksum of the contents of 'path' or None if not found. @@ -396,25 +433,80 @@ def pwgen(length=None): return(''.join(random_chars)) -def list_nics(nic_type): +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): '''Return a list of nics of given type(s)''' if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type + interfaces = [] - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line for line in ip_output if line) + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile('^[0-9]+:\s+(.+):') for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) - if matched: - interface = matched.groups()[0] - else: - interface = line.split()[1].replace(":", "") - interfaces.append(interface) + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) return interfaces diff --git a/ceph-radosgw/hooks/charmhelpers/core/hugepage.py b/ceph-radosgw/hooks/charmhelpers/core/hugepage.py new file mode 100644 index 00000000..4aaca3f5 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/hugepage.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True, set_shmmax=False): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) diff --git a/ceph-radosgw/hooks/charmhelpers/core/kernel.py b/ceph-radosgw/hooks/charmhelpers/core/kernel.py new file mode 100644 index 00000000..5dc64952 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/kernel.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = "Jorge Niedbalski " + +from charmhelpers.core.hookenv import ( + log, + INFO +) + +from subprocess import check_call, check_output +import re + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + check_call(cmd) + if persist: + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 + + +def update_initramfs(version='all'): + """Updates an initramfs image""" + return check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py index 8005c415..3f677833 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py @@ -16,7 +16,9 @@ import os import yaml + from charmhelpers.core import hookenv +from charmhelpers.core import host from charmhelpers.core import templating from charmhelpers.core.services.base import ManagerCallback @@ -240,27 +242,41 @@ class TemplateCallback(ManagerCallback): :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file - + :param partial on_change_action: functools partial to be executed when + rendered file changes """ def __init__(self, source, target, - owner='root', group='root', perms=0o444): + owner='root', group='root', perms=0o444, + on_change_action=None): self.source = source self.target = target self.owner = owner self.group = group self.perms = perms + self.on_change_action = on_change_action def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) service = manager.get_service(service_name) context = {} for ctx in service.get('required_data', []): context.update(ctx) templating.render(self.source, self.target, context, self.owner, self.group, self.perms) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() # Convenience aliases for templates diff --git a/ceph-radosgw/hooks/charmhelpers/core/strutils.py b/ceph-radosgw/hooks/charmhelpers/core/strutils.py index a2a784aa..7e3f9693 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/strutils.py +++ b/ceph-radosgw/hooks/charmhelpers/core/strutils.py @@ -18,6 +18,7 @@ # along with charm-helpers. If not, see . import six +import re def bool_from_string(value): @@ -40,3 +41,32 @@ def bool_from_string(value): msg = "Unable to interpret string value '%s' as boolean" % (value) raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if not matches: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 0a3bb969..cd0b783c 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -90,6 +90,14 @@ 'kilo/proposed': 'trusty-proposed/kilo', 'trusty-kilo/proposed': 'trusty-proposed/kilo', 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', } # The order of this list is very important. Handlers should be listed in from diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py index 367d6b47..d451698d 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py @@ -51,7 +51,8 @@ def _add_services(self, this_service, other_services): if 'units' not in this_service: this_service['units'] = 1 - self.d.add(this_service['name'], units=this_service['units']) + self.d.add(this_service['name'], units=this_service['units'], + constraints=this_service.get('constraints')) for svc in other_services: if 'location' in svc: @@ -64,7 +65,8 @@ def _add_services(self, this_service, other_services): if 'units' not in svc: svc['units'] = 1 - self.d.add(svc['name'], charm=branch_location, units=svc['units']) + self.d.add(svc['name'], charm=branch_location, units=svc['units'], + constraints=svc.get('constraints')) def _add_relations(self, relations): """Add all of the relations for the services.""" diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index 3de26afd..2591a9b1 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -14,17 +14,25 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -import amulet -import ConfigParser -import distro_info import io +import json import logging import os import re -import six +import socket +import subprocess import sys import time -import urlparse +import uuid + +import amulet +import distro_info +import six +from six.moves import configparser +if six.PY3: + from urllib import parse as urlparse +else: + import urlparse class AmuletUtils(object): @@ -108,7 +116,7 @@ def validate_services(self, commands): # /!\ DEPRECATION WARNING (beisner): # New and existing tests should be rewritten to use # validate_services_by_name() as it is aware of init systems. - self.log.warn('/!\\ DEPRECATION WARNING: use ' + self.log.warn('DEPRECATION WARNING: use ' 'validate_services_by_name instead of validate_services ' 'due to init system differences.') @@ -142,19 +150,23 @@ def validate_services_by_name(self, sentry_services): for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name == "rabbitmq-server"): - # init is systemd + service_name in ['rabbitmq-server', 'apache2']): + # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 elif self.ubuntu_releases.index(release) < systemd_switch: # init is upstart cmd = 'sudo status {}'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 and "start/running" in output - output, code = sentry_unit.run(cmd) self.log.debug('{} `{}` returned ' '{}'.format(sentry_unit.info['unit_name'], cmd, code)) - if code != 0: - return "command `{}` returned {}".format(cmd, str(code)) + if not service_running: + return u"command `{}` returned {} {}".format( + cmd, output, str(code)) return None def _get_config(self, unit, filename): @@ -164,7 +176,7 @@ def _get_config(self, unit, filename): # NOTE(beisner): by default, ConfigParser does not handle options # with no value, such as the flags used in the mysql my.cnf file. # https://bugs.python.org/issue7005 - config = ConfigParser.ConfigParser(allow_no_value=True) + config = configparser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config @@ -259,33 +271,52 @@ def _get_dir_mtime(self, sentry_unit, directory): """Get last modification time of directory.""" return sentry_unit.directory_stat(directory)['mtime'] - def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): - """Get process' start time. + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): + """Get start time of a process based on the last modification time + of the /proc/pid directory. - Determine start time of the process based on the last modification - time of the /proc/pid directory. If pgrep_full is True, the process - name is matched against the full command line. - """ - if pgrep_full: - cmd = 'pgrep -o -f {}'.format(service) - else: - cmd = 'pgrep -o {}'.format(service) - cmd = cmd + ' | grep -v pgrep || exit 0' - cmd_out = sentry_unit.run(cmd) - self.log.debug('CMDout: ' + str(cmd_out)) - if cmd_out[0]: - self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) - proc_dir = '/proc/{}'.format(cmd_out[0].strip()) - return self._get_dir_mtime(sentry_unit, proc_dir) + :sentry_unit: The sentry unit to check for the service on + :service: service name to look for in process table + :pgrep_full: [Deprecated] Use full command line search mode with pgrep + :returns: epoch time of service process start + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + if pgrep_full is not None: + # /!\ DEPRECATION WARNING (beisner): + # No longer implemented, as pidof is now used instead of pgrep. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' + 'longer implemented re: lp 1474030.') + + pid_list = self.get_process_id_list(sentry_unit, service) + pid = pid_list[0] + proc_dir = '/proc/{}'.format(pid) + self.log.debug('Pid for {} on {}: {}'.format( + service, sentry_unit.info['unit_name'], pid)) + + return self._get_dir_mtime(sentry_unit, proc_dir) def service_restarted(self, sentry_unit, service, filename, - pgrep_full=False, sleep_time=20): + pgrep_full=None, sleep_time=20): """Check if service was restarted. Compare a service's start time vs a file's last modification time (such as a config file for that service) to determine if the service has been restarted. """ + # /!\ DEPRECATION WARNING (beisner): + # This method is prone to races in that no before-time is known. + # Use validate_service_config_changed instead. + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + self.log.warn('DEPRECATION WARNING: use ' + 'validate_service_config_changed instead of ' + 'service_restarted due to known races.') + time.sleep(sleep_time) if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= self._get_file_mtime(sentry_unit, filename)): @@ -294,78 +325,122 @@ def service_restarted(self, sentry_unit, service, filename, return False def service_restarted_since(self, sentry_unit, mtime, service, - pgrep_full=False, sleep_time=20, - retry_count=2): + pgrep_full=None, sleep_time=20, + retry_count=30, retry_sleep_time=10): """Check if service was been started after a given time. Args: sentry_unit (sentry): The sentry unit to check for the service on mtime (float): The epoch time to check against service (string): service name to look for in process table - pgrep_full (boolean): Use full command line search mode with pgrep - sleep_time (int): Seconds to sleep before looking for process - retry_count (int): If service is not found, how many times to retry + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry Returns: bool: True if service found and its start time it newer than mtime, False if service is older than mtime or if service was not found. """ - self.log.debug('Checking %s restarted since %s' % (service, mtime)) + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s service restarted since %s on ' + '%s' % (service, mtime, unit_name)) time.sleep(sleep_time) - proc_start_time = self._get_proc_start_time(sentry_unit, service, - pgrep_full) - while retry_count > 0 and not proc_start_time: - self.log.debug('No pid file found for service %s, will retry %i ' - 'more times' % (service, retry_count)) - time.sleep(30) - proc_start_time = self._get_proc_start_time(sentry_unit, service, - pgrep_full) - retry_count = retry_count - 1 + proc_start_time = None + tries = 0 + while tries <= retry_count and not proc_start_time: + try: + proc_start_time = self._get_proc_start_time(sentry_unit, + service, + pgrep_full) + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'OK'.format(tries, service, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, proc may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'failed\n{}'.format(tries, service, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 if not proc_start_time: self.log.warn('No proc start time found, assuming service did ' 'not start') return False if proc_start_time >= mtime: - self.log.debug('proc start time is newer than provided mtime' - '(%s >= %s)' % (proc_start_time, mtime)) + self.log.debug('Proc start time is newer than provided mtime' + '(%s >= %s) on %s (OK)' % (proc_start_time, + mtime, unit_name)) return True else: - self.log.warn('proc start time (%s) is older than provided mtime ' - '(%s), service did not restart' % (proc_start_time, - mtime)) + self.log.warn('Proc start time (%s) is older than provided mtime ' + '(%s) on %s, service did not ' + 'restart' % (proc_start_time, mtime, unit_name)) return False def config_updated_since(self, sentry_unit, filename, mtime, - sleep_time=20): + sleep_time=20, retry_count=30, + retry_sleep_time=10): """Check if file was modified after a given time. Args: sentry_unit (sentry): The sentry unit to check the file mtime on filename (string): The file to check mtime of mtime (float): The epoch time to check against - sleep_time (int): Seconds to sleep before looking for process + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry Returns: bool: True if file was modified more recently than mtime, False if - file was modified before mtime, + file was modified before mtime, or if file not found. """ - self.log.debug('Checking %s updated since %s' % (filename, mtime)) + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s updated since %s on ' + '%s' % (filename, mtime, unit_name)) time.sleep(sleep_time) - file_mtime = self._get_file_mtime(sentry_unit, filename) + file_mtime = None + tries = 0 + while tries <= retry_count and not file_mtime: + try: + file_mtime = self._get_file_mtime(sentry_unit, filename) + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'OK'.format(tries, filename, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, file may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'failed\n{}'.format(tries, filename, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not file_mtime: + self.log.warn('Could not determine file mtime, assuming ' + 'file does not exist') + return False + if file_mtime >= mtime: self.log.debug('File mtime is newer than provided mtime ' - '(%s >= %s)' % (file_mtime, mtime)) + '(%s >= %s) on %s (OK)' % (file_mtime, + mtime, unit_name)) return True else: - self.log.warn('File mtime %s is older than provided mtime %s' - % (file_mtime, mtime)) + self.log.warn('File mtime is older than provided mtime' + '(%s < on %s) on %s' % (file_mtime, + mtime, unit_name)) return False def validate_service_config_changed(self, sentry_unit, mtime, service, - filename, pgrep_full=False, - sleep_time=20, retry_count=2): + filename, pgrep_full=None, + sleep_time=20, retry_count=30, + retry_sleep_time=10): """Check service and file were updated after mtime Args: @@ -373,9 +448,10 @@ def validate_service_config_changed(self, sentry_unit, mtime, service, mtime (float): The epoch time to check against service (string): service name to look for in process table filename (string): The file to check mtime of - pgrep_full (boolean): Use full command line search mode with pgrep - sleep_time (int): Seconds to sleep before looking for process + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep in seconds to pass to test helpers retry_count (int): If service is not found, how many times to retry + retry_sleep_time (int): Time in seconds to wait between retries Typical Usage: u = OpenStackAmuletUtils(ERROR) @@ -392,15 +468,27 @@ def validate_service_config_changed(self, sentry_unit, mtime, service, mtime, False if service is older than mtime or if service was not found or if filename was modified before mtime. """ - self.log.debug('Checking %s restarted since %s' % (service, mtime)) - time.sleep(sleep_time) - service_restart = self.service_restarted_since(sentry_unit, mtime, - service, - pgrep_full=pgrep_full, - sleep_time=0, - retry_count=retry_count) - config_update = self.config_updated_since(sentry_unit, filename, mtime, - sleep_time=0) + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + service_restart = self.service_restarted_since( + sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + config_update = self.config_updated_since( + sentry_unit, + filename, + mtime, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + return service_restart and config_update def get_sentry_time(self, sentry_unit): @@ -418,7 +506,6 @@ def get_ubuntu_releases(self): """Return a list of all Ubuntu releases in order of release.""" _d = distro_info.UbuntuDistroInfo() _release_list = _d.all - self.log.debug('Ubuntu release list: {}'.format(_release_list)) return _release_list def file_to_url(self, file_rel_path): @@ -450,15 +537,20 @@ def check_commands_on_units(self, commands, sentry_units): cmd, code, output)) return None - def get_process_id_list(self, sentry_unit, process_name): + def get_process_id_list(self, sentry_unit, process_name, + expect_success=True): """Get a list of process ID(s) from a single sentry juju unit for a single process name. - :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param sentry_unit: Amulet sentry instance (juju unit) :param process_name: Process name + :param expect_success: If False, expect the PID to be missing, + raise if it is present. :returns: List of process IDs """ - cmd = 'pidof {}'.format(process_name) + cmd = 'pidof -x {}'.format(process_name) + if not expect_success: + cmd += " || exit 0 && exit 1" output, code = sentry_unit.run(cmd) if code != 0: msg = ('{} `{}` returned {} ' @@ -467,14 +559,23 @@ def get_process_id_list(self, sentry_unit, process_name): amulet.raise_status(amulet.FAIL, msg=msg) return str(output).split() - def get_unit_process_ids(self, unit_processes): + def get_unit_process_ids(self, unit_processes, expect_success=True): """Construct a dict containing unit sentries, process names, and - process IDs.""" + process IDs. + + :param unit_processes: A dictionary of Amulet sentry instance + to list of process names. + :param expect_success: if False expect the processes to not be + running, raise if they are. + :returns: Dictionary of Amulet sentry instance to dictionary + of process names to PIDs. + """ pid_dict = {} - for sentry_unit, process_list in unit_processes.iteritems(): + for sentry_unit, process_list in six.iteritems(unit_processes): pid_dict[sentry_unit] = {} for process in process_list: - pids = self.get_process_id_list(sentry_unit, process) + pids = self.get_process_id_list( + sentry_unit, process, expect_success=expect_success) pid_dict[sentry_unit].update({process: pids}) return pid_dict @@ -488,7 +589,7 @@ def validate_unit_process_ids(self, expected, actual): return ('Unit count mismatch. expected, actual: {}, ' '{} '.format(len(expected), len(actual))) - for (e_sentry, e_proc_names) in expected.iteritems(): + for (e_sentry, e_proc_names) in six.iteritems(expected): e_sentry_name = e_sentry.info['unit_name'] if e_sentry in actual.keys(): a_proc_names = actual[e_sentry] @@ -507,11 +608,23 @@ def validate_unit_process_ids(self, expected, actual): '{}'.format(e_proc_name, a_proc_name)) a_pids_length = len(a_pids) - if e_pids_length != a_pids_length: - return ('PID count mismatch. {} ({}) expected, actual: ' + fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})'.format(e_sentry_name, e_proc_name, e_pids_length, a_pids_length, a_pids)) + + # If expected is not bool, ensure PID quantities match + if not isinstance(e_pids_length, bool) and \ + a_pids_length != e_pids_length: + return fail_msg + # If expected is bool True, ensure 1 or more PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is True and a_pids_length < 1: + return fail_msg + # If expected is bool False, ensure 0 PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is False and a_pids_length != 0: + return fail_msg else: self.log.debug('PID check OK: {} {} {}: ' '{}'.format(e_sentry_name, e_proc_name, @@ -531,3 +644,175 @@ def validate_list_of_identical_dicts(self, list_of_dicts): return 'Dicts within list are not identical' return None + + def validate_sectionless_conf(self, file_contents, expected): + """A crude conf parser. Useful to inspect configuration files which + do not have section headers (as would be necessary in order to use + the configparser). Such as openstack-dashboard or rabbitmq confs.""" + for line in file_contents.split('\n'): + if '=' in line: + args = line.split('=') + if len(args) <= 1: + continue + key = args[0].strip() + value = args[1].strip() + if key in expected.keys(): + if expected[key] != value: + msg = ('Config mismatch. Expected, actual: {}, ' + '{}'.format(expected[key], value)) + amulet.raise_status(amulet.FAIL, msg=msg) + + def get_unit_hostnames(self, units): + """Return a dict of juju unit names to hostnames.""" + host_names = {} + for unit in units: + host_names[unit.info['unit_name']] = \ + str(unit.file_contents('/etc/hostname').strip()) + self.log.debug('Unit host names: {}'.format(host_names)) + return host_names + + def run_cmd_unit(self, sentry_unit, cmd): + """Run a command on a unit, return the output and exit code.""" + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` command returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + msg = ('{} `{}` command returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output), code + + def file_exists_on_unit(self, sentry_unit, file_name): + """Check if a file exists on a unit.""" + try: + sentry_unit.file_stat(file_name) + return True + except IOError: + return False + except Exception as e: + msg = 'Error checking file {}: {}'.format(file_name, e) + amulet.raise_status(amulet.FAIL, msg=msg) + + def file_contents_safe(self, sentry_unit, file_name, + max_wait=60, fatal=False): + """Get file contents from a sentry unit. Wrap amulet file_contents + with retry logic to address races where a file checks as existing, + but no longer exists by the time file_contents is called. + Return None if file not found. Optionally raise if fatal is True.""" + unit_name = sentry_unit.info['unit_name'] + file_contents = False + tries = 0 + while not file_contents and tries < (max_wait / 4): + try: + file_contents = sentry_unit.file_contents(file_name) + except IOError: + self.log.debug('Attempt {} to open file {} from {} ' + 'failed'.format(tries, file_name, + unit_name)) + time.sleep(4) + tries += 1 + + if file_contents: + return file_contents + elif not fatal: + return None + elif fatal: + msg = 'Failed to get file contents from unit.' + amulet.raise_status(amulet.FAIL, msg) + + def port_knock_tcp(self, host="localhost", port=22, timeout=15): + """Open a TCP socket to check for a listening sevice on a host. + + :param host: host name or IP address, default to localhost + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :returns: True if successful, False if connect failed + """ + + # Resolve host name if possible + try: + connect_host = socket.gethostbyname(host) + host_human = "{} ({})".format(connect_host, host) + except socket.error as e: + self.log.warn('Unable to resolve address: ' + '{} ({}) Trying anyway!'.format(host, e)) + connect_host = host + host_human = connect_host + + # Attempt socket connection + try: + knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + knock.settimeout(timeout) + knock.connect((connect_host, port)) + knock.close() + self.log.debug('Socket connect OK for host ' + '{} on port {}.'.format(host_human, port)) + return True + except socket.error as e: + self.log.debug('Socket connect FAIL for' + ' {} port {} ({})'.format(host_human, port, e)) + return False + + def port_knock_units(self, sentry_units, port=22, + timeout=15, expect_success=True): + """Open a TCP socket to check for a listening sevice on each + listed juju unit. + + :param sentry_units: list of sentry unit pointers + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :expect_success: True by default, set False to invert logic + :returns: None if successful, Failure message otherwise + """ + for unit in sentry_units: + host = unit.info['public-address'] + connected = self.port_knock_tcp(host, port, timeout) + if not connected and expect_success: + return 'Socket connect failed.' + elif connected and not expect_success: + return 'Socket connected unexpectedly.' + + def get_uuid_epoch_stamp(self): + """Returns a stamp string based on uuid4 and epoch time. Useful in + generating test messages which need to be unique-ish.""" + return '[{}-{}]'.format(uuid.uuid4(), time.time()) + +# amulet juju action helpers: + def run_action(self, unit_sentry, action, + _check_output=subprocess.check_output): + """Run the named action on a given unit sentry. + + _check_output parameter is used for dependency injection. + + @return action_id. + """ + unit_id = unit_sentry.info["unit_name"] + command = ["juju", "action", "do", "--format=json", unit_id, action] + self.log.info("Running command: %s\n" % " ".join(command)) + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + action_id = data[u'Action queued with id'] + return action_id + + def wait_on_action(self, action_id, _check_output=subprocess.check_output): + """Wait for a given action, returning if it completed or not. + + _check_output parameter is used for dependency injection. + """ + command = ["juju", "action", "fetch", "--format=json", "--wait=0", + action_id] + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + return data.get(u"status") == "completed" + + def status_get(self, unit): + """Return the current service status of this unit.""" + raw_status, return_code = unit.run( + "status-get --format=json --include-data") + if return_code != 0: + return ("unknown", "") + status = json.loads(raw_status) + return (status["status"], status["message"]) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index b01e6cb8..722bc645 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -44,20 +44,31 @@ def _determine_branch_locations(self, other_services): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb'] + + # Charms outside the lp:~openstack-charmers namespace + base_charms = ['mysql', 'mongodb', 'nrpe'] + + # Force these charms to current series even when using an older series. + # ie. Use trusty/nrpe even when series is precise, as the P charm + # does not possess the necessary external master config and hooks. + force_series_current = ['nrpe'] if self.series in ['precise', 'trusty']: base_series = self.series else: base_series = self.current_next - if self.stable: - for svc in other_services: + for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if self.stable: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, svc['name']) - else: - for svc in other_services: + else: if svc['name'] in base_charms: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, @@ -66,6 +77,7 @@ def _determine_branch_locations(self, other_services): temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, svc['name']) + return other_services def _add_services(self, this_service, other_services): @@ -77,21 +89,23 @@ def _add_services(self, this_service, other_services): services = other_services services.append(this_service) + + # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Most OpenStack subordinate charms do not expose an origin option - # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: - if svc['name'] not in use_source + ignore: + if svc['name'] not in use_source + no_origin: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source and svc['name'] not in ignore: + if svc['name'] in use_source and svc['name'] not in no_origin: config = {'source': self.source} self.d.configure(svc['name'], config) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 03f79277..2b3087ea 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -27,6 +27,7 @@ import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import pika import swiftclient from charmhelpers.contrib.amulet.utils import ( @@ -602,3 +603,361 @@ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): self.log.debug('Ceph {} samples (OK): ' '{}'.format(sample_type, samples)) return None + +# rabbitmq/amqp specific helpers: + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.server_properties['product'] == 'RabbitMQ' + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) From b1646010c850253366a87a5d1dd3f780146557e7 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 6 Oct 2015 11:06:36 +0100 Subject: [PATCH 0840/2699] Add basic status assessment for monitor role --- ceph-proxy/hooks/ceph.py | 4 +++- ceph-proxy/hooks/hooks.py | 46 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index d4b98b58..1d77030a 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -21,7 +21,8 @@ log, ERROR, WARNING, - cached + cached, + status_set, ) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, @@ -365,6 +366,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, log('Looks like {} is in use, skipping.'.format(dev)) return + status_set('maintenance', 'Initializing device {}'.format(dev)) cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options if cmp_pkgrevno('ceph', '0.48.3') >= 0: diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index 2306fcff..c680a4aa 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -26,7 +26,9 @@ remote_unit, Hooks, UnregisteredHookError, service_name, - relations_of_type + relations_of_type, + status_set, + local_unit, ) from charmhelpers.core.host import ( service_restart, @@ -152,6 +154,7 @@ def config_changed(): # Support use of single node ceph if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1): + status_set('maintenance', 'Bootstrapping single Ceph MON') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() @@ -181,6 +184,20 @@ def get_mon_hosts(): return hosts +def get_peer_units(): + ''' + Returns a dictionary of unit names from the mon peer relation with + a flag indicating whether the unit has presented its address + ''' + units = {} + units[local_unit()] = True + for relid in relation_ids('mon'): + for unit in related_units(relid): + addr = relation_get('ceph-public-address', unit, relid) + units[unit] = addr is not None + return units + + def reformat_osd(): if config('osd-reformat'): return True @@ -210,6 +227,7 @@ def mon_relation(): moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: + status_set('maintenance', 'Bootstrapping MON cluster') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() for dev in get_devices(): @@ -384,8 +402,34 @@ def update_nrpe_config(): nrpe_setup.write() +def assess_status(): + '''Assess status of current unit''' + moncount = int(config('monitor-count')) + units = get_peer_units() + # not enough peers and mon_count > 1 + if len(units.keys()) < moncount: + status_set('blocked', 'Insufficient peer units to bootstrap' + ' cluster (require {})'.format(moncount)) + return + + # mon_count > 1, peers, but no ceph-public-address + ready = sum(1 for unit_ready in units.itervalues() if unit_ready) + if ready < moncount: + status_set('waiting', 'Peer units detected, waiting for addresses') + return + + # active - bootstrapped + quorum status check + if ceph.is_bootstrapped() and ceph.is_quorum(): + status_set('active', 'Unit active and clustered') + else: + # Unit should be running and clustered, but no quorum + # TODO: should this be blocked or waiting? + status_set('blocked', 'Unit not clustered (no quorum)') + + if __name__ == '__main__': try: hooks.execute(sys.argv) except UnregisteredHookError as e: log('Unknown hook {} - skipping.'.format(e)) + assess_status() From 98e61fc4b2dc547af614517c404834d1e8957d12 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 6 Oct 2015 11:06:36 +0100 Subject: [PATCH 0841/2699] Add basic status assessment for monitor role --- ceph-mon/hooks/ceph.py | 4 +++- ceph-mon/hooks/hooks.py | 46 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index d4b98b58..1d77030a 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -21,7 +21,8 @@ log, ERROR, WARNING, - cached + cached, + status_set, ) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, @@ -365,6 +366,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, log('Looks like {} is in use, skipping.'.format(dev)) return + status_set('maintenance', 'Initializing device {}'.format(dev)) cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options if cmp_pkgrevno('ceph', '0.48.3') >= 0: diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index 2306fcff..c680a4aa 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -26,7 +26,9 @@ remote_unit, Hooks, UnregisteredHookError, service_name, - relations_of_type + relations_of_type, + status_set, + local_unit, ) from charmhelpers.core.host import ( service_restart, @@ -152,6 +154,7 @@ def config_changed(): # Support use of single node ceph if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1): + status_set('maintenance', 'Bootstrapping single Ceph MON') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() @@ -181,6 +184,20 @@ def get_mon_hosts(): return hosts +def get_peer_units(): + ''' + Returns a dictionary of unit names from the mon peer relation with + a flag indicating whether the unit has presented its address + ''' + units = {} + units[local_unit()] = True + for relid in relation_ids('mon'): + for unit in related_units(relid): + addr = relation_get('ceph-public-address', unit, relid) + units[unit] = addr is not None + return units + + def reformat_osd(): if config('osd-reformat'): return True @@ -210,6 +227,7 @@ def mon_relation(): moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: + status_set('maintenance', 'Bootstrapping MON cluster') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() for dev in get_devices(): @@ -384,8 +402,34 @@ def update_nrpe_config(): nrpe_setup.write() +def assess_status(): + '''Assess status of current unit''' + moncount = int(config('monitor-count')) + units = get_peer_units() + # not enough peers and mon_count > 1 + if len(units.keys()) < moncount: + status_set('blocked', 'Insufficient peer units to bootstrap' + ' cluster (require {})'.format(moncount)) + return + + # mon_count > 1, peers, but no ceph-public-address + ready = sum(1 for unit_ready in units.itervalues() if unit_ready) + if ready < moncount: + status_set('waiting', 'Peer units detected, waiting for addresses') + return + + # active - bootstrapped + quorum status check + if ceph.is_bootstrapped() and ceph.is_quorum(): + status_set('active', 'Unit active and clustered') + else: + # Unit should be running and clustered, but no quorum + # TODO: should this be blocked or waiting? + status_set('blocked', 'Unit not clustered (no quorum)') + + if __name__ == '__main__': try: hooks.execute(sys.argv) except UnregisteredHookError as e: log('Unknown hook {} - skipping.'.format(e)) + assess_status() From 8df55a3cefc13470a1ff67aa66a61d1eae83426f Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 6 Oct 2015 11:21:45 +0100 Subject: [PATCH 0842/2699] Make unit messaging consistent --- ceph-proxy/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/hooks.py index c680a4aa..ccd575ab 100755 --- a/ceph-proxy/hooks/hooks.py +++ b/ceph-proxy/hooks/hooks.py @@ -420,7 +420,7 @@ def assess_status(): # active - bootstrapped + quorum status check if ceph.is_bootstrapped() and ceph.is_quorum(): - status_set('active', 'Unit active and clustered') + status_set('active', 'Unit is ready and clustered') else: # Unit should be running and clustered, but no quorum # TODO: should this be blocked or waiting? From 36aee1f24473c0fbf0c1afbc6d3794cf8cfb6c3f Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 6 Oct 2015 11:21:45 +0100 Subject: [PATCH 0843/2699] Make unit messaging consistent --- ceph-mon/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/hooks.py index c680a4aa..ccd575ab 100755 --- a/ceph-mon/hooks/hooks.py +++ b/ceph-mon/hooks/hooks.py @@ -420,7 +420,7 @@ def assess_status(): # active - bootstrapped + quorum status check if ceph.is_bootstrapped() and ceph.is_quorum(): - status_set('active', 'Unit active and clustered') + status_set('active', 'Unit is ready and clustered') else: # Unit should be running and clustered, but no quorum # TODO: should this be blocked or waiting? From 55ce7919d062907bcb554d0f012373363b33be35 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 6 Oct 2015 11:44:28 +0100 Subject: [PATCH 0844/2699] Add basic status support --- ceph-osd/hooks/ceph.py | 14 +++++++++++++- ceph-osd/hooks/hooks.py | 29 ++++++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index ad2a2d51..f9448db2 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -18,7 +18,8 @@ ) from charmhelpers.core.hookenv import ( log, - ERROR, WARNING + ERROR, WARNING, + status_set, ) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, @@ -333,6 +334,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, log('Looks like {} is in use, skipping.'.format(dev)) return + status_set('maintenance', 'Initializing device {}'.format(dev)) cmd = ['ceph-disk-prepare'] # Later versions of ceph support more options if cmp_pkgrevno('ceph', '0.48.3') >= 0: @@ -382,3 +384,13 @@ def osdize_dir(path): def filesystem_mounted(fs): return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 + + +def get_running_osds(): + '''Returns a list of the pids of the current running OSD daemons''' + cmd = ['pgrep', 'ceph-osd'] + try: + result = subprocess.check_output(cmd) + return result.split() + except subprocess.CalledProcessError: + return [] diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/hooks.py index 9d9a3d4c..7ef5d796 100755 --- a/ceph-osd/hooks/hooks.py +++ b/ceph-osd/hooks/hooks.py @@ -22,7 +22,8 @@ relation_get, Hooks, UnregisteredHookError, - service_name + service_name, + status_set, ) from charmhelpers.core.host import ( umount, @@ -227,8 +228,34 @@ def update_nrpe_config(): nrpe_setup.write() +def assess_status(): + '''Assess status of current unit''' + # Check for mon relation + if len(relation_ids('mon')) < 1: + status_set('blocked', 'Missing relation: monitor') + return + + # Check for monitors with presented addresses + # Check for bootstrap key presentation + monitors = get_mon_hosts() + if len(monitors) < 1 or not get_conf('osd_bootstrap_key'): + status_set('waiting', 'Incomplete relation: monitor') + return + + # Check for OSD device creation parity i.e. at least some devices + # must have been presented and used for this charm to be operational + running_osds = ceph.get_running_osds() + if not running_osds: + status_set('blocked', + 'No block devices detected using current configuration') + else: + status_set('active', + 'Unit is ready ({} OSD)'.format(len(running_osds))) + + if __name__ == '__main__': try: hooks.execute(sys.argv) except UnregisteredHookError as e: log('Unknown hook {} - skipping.'.format(e)) + assess_status() From a8dd9f0827ee9b8a8de299699cbacaf7dc041b0a Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 6 Oct 2015 14:11:36 +0000 Subject: [PATCH 0845/2699] update amulet test dependency setup file --- ceph-proxy/tests/00-setup | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/tests/00-setup b/ceph-proxy/tests/00-setup index dd9158fd..94e5611f 100755 --- a/ceph-proxy/tests/00-setup +++ b/ceph-proxy/tests/00-setup @@ -4,11 +4,14 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes -sudo apt-get install --yes python-amulet \ +sudo apt-get install --yes amulet \ + distro-info-data \ python-cinderclient \ python-distro-info \ python-glanceclient \ python-heatclient \ python-keystoneclient \ + python-neutronclient \ python-novaclient \ + python-pika \ python-swiftclient From 5d9ec7cff005395169518464f9ae9c2ca94c541c Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 6 Oct 2015 14:11:36 +0000 Subject: [PATCH 0846/2699] update amulet test dependency setup file --- ceph-mon/tests/00-setup | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-mon/tests/00-setup b/ceph-mon/tests/00-setup index dd9158fd..94e5611f 100755 --- a/ceph-mon/tests/00-setup +++ b/ceph-mon/tests/00-setup @@ -4,11 +4,14 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes -sudo apt-get install --yes python-amulet \ +sudo apt-get install --yes amulet \ + distro-info-data \ python-cinderclient \ python-distro-info \ python-glanceclient \ python-heatclient \ python-keystoneclient \ + python-neutronclient \ python-novaclient \ + python-pika \ python-swiftclient From 9c388918b0348fc3ba44830eac1f8c09292b7c2b Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 6 Oct 2015 14:11:37 +0000 Subject: [PATCH 0847/2699] update bundletester test plan yaml file --- ceph-proxy/tests/tests.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index db533a1e..64e3e2d1 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -8,11 +8,13 @@ sources: - ppa:juju/stable packages: - amulet - - python-amulet + - distro-info-data - python-cinderclient - python-distro-info - python-glanceclient - python-heatclient - python-keystoneclient + - python-neutronclient - python-novaclient + - python-pika - python-swiftclient From d27c01666c4a43f3cf3113c3f4c565aaee419af0 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 6 Oct 2015 14:11:37 +0000 Subject: [PATCH 0848/2699] update bundletester test plan yaml file --- ceph-mon/tests/tests.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index db533a1e..64e3e2d1 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -8,11 +8,13 @@ sources: - ppa:juju/stable packages: - amulet - - python-amulet + - distro-info-data - python-cinderclient - python-distro-info - python-glanceclient - python-heatclient - python-keystoneclient + - python-neutronclient - python-novaclient + - python-pika - python-swiftclient From 47bad7aa0223464671e0c64ee1df94042f08ef6b Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 6 Oct 2015 14:11:38 +0000 Subject: [PATCH 0849/2699] update amulet test dependency setup file --- ceph-osd/tests/00-setup | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-osd/tests/00-setup b/ceph-osd/tests/00-setup index dd9158fd..94e5611f 100755 --- a/ceph-osd/tests/00-setup +++ b/ceph-osd/tests/00-setup @@ -4,11 +4,14 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes -sudo apt-get install --yes python-amulet \ +sudo apt-get install --yes amulet \ + distro-info-data \ python-cinderclient \ python-distro-info \ python-glanceclient \ python-heatclient \ python-keystoneclient \ + python-neutronclient \ python-novaclient \ + python-pika \ python-swiftclient From 3ef13a4f58386dd01097e570413e4284a6233e3d Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 6 Oct 2015 14:11:38 +0000 Subject: [PATCH 0850/2699] update bundletester test plan yaml file --- ceph-osd/tests/tests.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index db533a1e..64e3e2d1 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -8,11 +8,13 @@ sources: - ppa:juju/stable packages: - amulet - - python-amulet + - distro-info-data - python-cinderclient - python-distro-info - python-glanceclient - python-heatclient - python-keystoneclient + - python-neutronclient - python-novaclient + - python-pika - python-swiftclient From 29184412fcb6ddc61133b547f230634b00efc788 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 6 Oct 2015 14:11:39 +0000 Subject: [PATCH 0851/2699] update amulet test dependency setup file --- ceph-radosgw/tests/00-setup | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/tests/00-setup b/ceph-radosgw/tests/00-setup index dd9158fd..94e5611f 100755 --- a/ceph-radosgw/tests/00-setup +++ b/ceph-radosgw/tests/00-setup @@ -4,11 +4,14 @@ set -ex sudo add-apt-repository --yes ppa:juju/stable sudo apt-get update --yes -sudo apt-get install --yes python-amulet \ +sudo apt-get install --yes amulet \ + distro-info-data \ python-cinderclient \ python-distro-info \ python-glanceclient \ python-heatclient \ python-keystoneclient \ + python-neutronclient \ python-novaclient \ + python-pika \ python-swiftclient From 4b21432bb0519ab2795f1d6e67b39be4f0241935 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 6 Oct 2015 14:11:39 +0000 Subject: [PATCH 0852/2699] update bundletester test plan yaml file --- ceph-radosgw/tests/tests.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index db533a1e..64e3e2d1 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -8,11 +8,13 @@ sources: - ppa:juju/stable packages: - amulet - - python-amulet + - distro-info-data - python-cinderclient - python-distro-info - python-glanceclient - python-heatclient - python-keystoneclient + - python-neutronclient - python-novaclient + - python-pika - python-swiftclient From b7effc451642a5ac4199d9ea3c5453e377738b85 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 6 Oct 2015 14:36:46 +0000 Subject: [PATCH 0853/2699] update makefile unit test target --- ceph-proxy/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index 93e2758b..5e54804a 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -9,7 +9,7 @@ lint: test: @# Bundletester expects unit tests here. @echo Starting unit tests... - @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests + @$(PYTHON) /usr/bin/nosetests -v --nologcapture --with-coverage unit_tests functional_test: @echo Starting Amulet tests... From c1afc3b5dcf78a248afebb8cd2aced1d4dfcd4fe Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 6 Oct 2015 14:36:46 +0000 Subject: [PATCH 0854/2699] update makefile unit test target --- ceph-mon/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index 93e2758b..5e54804a 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -9,7 +9,7 @@ lint: test: @# Bundletester expects unit tests here. @echo Starting unit tests... - @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests + @$(PYTHON) /usr/bin/nosetests -v --nologcapture --with-coverage unit_tests functional_test: @echo Starting Amulet tests... From ee1245fc52f687e9c8936201c9ca75d1b0dccac0 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 6 Oct 2015 14:38:06 +0000 Subject: [PATCH 0855/2699] update makefile unit test target --- ceph-radosgw/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index edf686fd..6b3b3430 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -9,7 +9,7 @@ lint: test: @# Bundletester expects unit tests here. @echo Starting unit tests... - @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests + @$(PYTHON) /usr/bin/nosetests -v --nologcapture --with-coverage unit_tests functional_test: @echo Starting Amulet tests... From 1a76264367a24e22221ea64e42c3213a448437a6 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 6 Oct 2015 21:02:06 +0100 Subject: [PATCH 0856/2699] Add some unit tests to cover service status --- ceph-proxy/hooks/{hooks.py => ceph_hooks.py} | 0 ceph-proxy/hooks/client-relation-changed | 2 +- ceph-proxy/hooks/client-relation-joined | 2 +- ceph-proxy/hooks/config-changed | 2 +- ceph-proxy/hooks/install.real | 2 +- ceph-proxy/hooks/mon-relation-changed | 2 +- ceph-proxy/hooks/mon-relation-departed | 2 +- ceph-proxy/hooks/mon-relation-joined | 2 +- .../nrpe-external-master-relation-changed | 2 +- .../nrpe-external-master-relation-joined | 2 +- ceph-proxy/hooks/osd-relation-joined | 2 +- ceph-proxy/hooks/radosgw-relation-joined | 2 +- ceph-proxy/hooks/start | 2 +- ceph-proxy/hooks/stop | 2 +- ceph-proxy/hooks/update-status | 1 + ceph-proxy/hooks/upgrade-charm | 2 +- ceph-proxy/unit_tests/test_status.py | 95 ++++++++++++++ ceph-proxy/unit_tests/test_utils.py | 121 ++++++++++++++++++ 18 files changed, 231 insertions(+), 14 deletions(-) rename ceph-proxy/hooks/{hooks.py => ceph_hooks.py} (100%) create mode 120000 ceph-proxy/hooks/update-status create mode 100644 ceph-proxy/unit_tests/test_status.py create mode 100644 ceph-proxy/unit_tests/test_utils.py diff --git a/ceph-proxy/hooks/hooks.py b/ceph-proxy/hooks/ceph_hooks.py similarity index 100% rename from ceph-proxy/hooks/hooks.py rename to ceph-proxy/hooks/ceph_hooks.py diff --git a/ceph-proxy/hooks/client-relation-changed b/ceph-proxy/hooks/client-relation-changed index 9416ca6a..52d96630 120000 --- a/ceph-proxy/hooks/client-relation-changed +++ b/ceph-proxy/hooks/client-relation-changed @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/client-relation-joined b/ceph-proxy/hooks/client-relation-joined index 9416ca6a..52d96630 120000 --- a/ceph-proxy/hooks/client-relation-joined +++ b/ceph-proxy/hooks/client-relation-joined @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/config-changed b/ceph-proxy/hooks/config-changed index 9416ca6a..52d96630 120000 --- a/ceph-proxy/hooks/config-changed +++ b/ceph-proxy/hooks/config-changed @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/install.real b/ceph-proxy/hooks/install.real index 9416ca6a..52d96630 120000 --- a/ceph-proxy/hooks/install.real +++ b/ceph-proxy/hooks/install.real @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/mon-relation-changed b/ceph-proxy/hooks/mon-relation-changed index 9416ca6a..52d96630 120000 --- a/ceph-proxy/hooks/mon-relation-changed +++ b/ceph-proxy/hooks/mon-relation-changed @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/mon-relation-departed b/ceph-proxy/hooks/mon-relation-departed index 9416ca6a..52d96630 120000 --- a/ceph-proxy/hooks/mon-relation-departed +++ b/ceph-proxy/hooks/mon-relation-departed @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/mon-relation-joined b/ceph-proxy/hooks/mon-relation-joined index 9416ca6a..52d96630 120000 --- a/ceph-proxy/hooks/mon-relation-joined +++ b/ceph-proxy/hooks/mon-relation-joined @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/nrpe-external-master-relation-changed b/ceph-proxy/hooks/nrpe-external-master-relation-changed index 9416ca6a..52d96630 120000 --- a/ceph-proxy/hooks/nrpe-external-master-relation-changed +++ b/ceph-proxy/hooks/nrpe-external-master-relation-changed @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/nrpe-external-master-relation-joined b/ceph-proxy/hooks/nrpe-external-master-relation-joined index 9416ca6a..52d96630 120000 --- a/ceph-proxy/hooks/nrpe-external-master-relation-joined +++ b/ceph-proxy/hooks/nrpe-external-master-relation-joined @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/osd-relation-joined b/ceph-proxy/hooks/osd-relation-joined index 9416ca6a..52d96630 120000 --- a/ceph-proxy/hooks/osd-relation-joined +++ b/ceph-proxy/hooks/osd-relation-joined @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/radosgw-relation-joined b/ceph-proxy/hooks/radosgw-relation-joined index 9416ca6a..52d96630 120000 --- a/ceph-proxy/hooks/radosgw-relation-joined +++ b/ceph-proxy/hooks/radosgw-relation-joined @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/start b/ceph-proxy/hooks/start index 9416ca6a..52d96630 120000 --- a/ceph-proxy/hooks/start +++ b/ceph-proxy/hooks/start @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/stop b/ceph-proxy/hooks/stop index 9416ca6a..52d96630 120000 --- a/ceph-proxy/hooks/stop +++ b/ceph-proxy/hooks/stop @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/update-status b/ceph-proxy/hooks/update-status new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-proxy/hooks/update-status @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/upgrade-charm b/ceph-proxy/hooks/upgrade-charm index 9416ca6a..52d96630 120000 --- a/ceph-proxy/hooks/upgrade-charm +++ b/ceph-proxy/hooks/upgrade-charm @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/unit_tests/test_status.py b/ceph-proxy/unit_tests/test_status.py new file mode 100644 index 00000000..bd8b0241 --- /dev/null +++ b/ceph-proxy/unit_tests/test_status.py @@ -0,0 +1,95 @@ +import mock +import test_utils + +with mock.patch('utils.get_unit_hostname'): + import ceph_hooks as hooks + +TO_PATCH = [ + 'status_set', + 'config', + 'ceph', + 'relation_ids', + 'relation_get', + 'related_units', + 'local_unit', +] + +NO_PEERS = { + 'ceph-mon1': True +} + +ENOUGH_PEERS_INCOMPLETE = { + 'ceph-mon1': True, + 'ceph-mon2': False, + 'ceph-mon3': False, +} + +ENOUGH_PEERS_COMPLETE = { + 'ceph-mon1': True, + 'ceph-mon2': True, + 'ceph-mon3': True, +} + + +class ServiceStatusTestCase(test_utils.CharmTestCase): + + def setUp(self): + super(ServiceStatusTestCase, self).setUp(hooks, TO_PATCH) + self.config.side_effect = self.test_config.get + self.test_config.set('monitor-count', 3) + self.local_unit.return_value = 'ceph-mon1' + + @mock.patch.object(hooks, 'get_peer_units') + def test_assess_status_no_peers(self, _peer_units): + _peer_units.return_value = NO_PEERS + hooks.assess_status() + self.status_set.assert_called_with('blocked', mock.ANY) + + @mock.patch.object(hooks, 'get_peer_units') + def test_assess_status_peers_incomplete(self, _peer_units): + _peer_units.return_value = ENOUGH_PEERS_INCOMPLETE + hooks.assess_status() + self.status_set.assert_called_with('waiting', mock.ANY) + + @mock.patch.object(hooks, 'get_peer_units') + def test_assess_status_peers_complete_active(self, _peer_units): + _peer_units.return_value = ENOUGH_PEERS_COMPLETE + self.ceph.is_bootstrapped.return_value = True + self.ceph.is_quorum.return_value = True + hooks.assess_status() + self.status_set.assert_called_with('active', mock.ANY) + + @mock.patch.object(hooks, 'get_peer_units') + def test_assess_status_peers_complete_down(self, _peer_units): + _peer_units.return_value = ENOUGH_PEERS_COMPLETE + self.ceph.is_bootstrapped.return_value = False + self.ceph.is_quorum.return_value = False + hooks.assess_status() + self.status_set.assert_called_with('blocked', mock.ANY) + + def test_get_peer_units_no_peers(self): + self.relation_ids.return_value = ['mon:1'] + self.related_units.return_value = [] + self.assertEquals({'ceph-mon1': True}, + hooks.get_peer_units()) + + def test_get_peer_units_peers_incomplete(self): + self.relation_ids.return_value = ['mon:1'] + self.related_units.return_value = ['ceph-mon2', + 'ceph-mon3'] + self.relation_get.return_value = None + self.assertEquals({'ceph-mon1': True, + 'ceph-mon2': False, + 'ceph-mon3': False}, + hooks.get_peer_units()) + + def test_get_peer_units_peers_complete(self): + self.relation_ids.return_value = ['mon:1'] + self.related_units.return_value = ['ceph-mon2', + 'ceph-mon3'] + self.relation_get.side_effect = ['ceph-mon2', + 'ceph-mon3'] + self.assertEquals({'ceph-mon1': True, + 'ceph-mon2': True, + 'ceph-mon3': True}, + hooks.get_peer_units()) \ No newline at end of file diff --git a/ceph-proxy/unit_tests/test_utils.py b/ceph-proxy/unit_tests/test_utils.py new file mode 100644 index 00000000..663a0488 --- /dev/null +++ b/ceph-proxy/unit_tests/test_utils.py @@ -0,0 +1,121 @@ +import logging +import unittest +import os +import yaml + +from contextlib import contextmanager +from mock import patch, MagicMock + + +def load_config(): + ''' + Walk backwords from __file__ looking for config.yaml, load and return the + 'options' section' + ''' + config = None + f = __file__ + while config is None: + d = os.path.dirname(f) + if os.path.isfile(os.path.join(d, 'config.yaml')): + config = os.path.join(d, 'config.yaml') + break + f = d + + if not config: + logging.error('Could not find config.yaml in any parent directory ' + 'of %s. ' % f) + raise Exception + + return yaml.safe_load(open(config).read())['options'] + + +def get_default_config(): + ''' + Load default charm config from config.yaml return as a dict. + If no default is set in config.yaml, its value is None. + ''' + default_config = {} + config = load_config() + for k, v in config.iteritems(): + if 'default' in v: + default_config[k] = v['default'] + else: + default_config[k] = None + return default_config + + +class CharmTestCase(unittest.TestCase): + + def setUp(self, obj, patches): + super(CharmTestCase, self).setUp() + self.patches = patches + self.obj = obj + self.test_config = TestConfig() + self.test_relation = TestRelation() + self.patch_all() + + def patch(self, method): + _m = patch.object(self.obj, method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def patch_all(self): + for method in self.patches: + setattr(self, method, self.patch(method)) + + +class TestConfig(object): + + def __init__(self): + self.config = get_default_config() + + def get(self, attr=None): + if not attr: + return self.get_all() + try: + return self.config[attr] + except KeyError: + return None + + def get_all(self): + return self.config + + def set(self, attr, value): + if attr not in self.config: + raise KeyError + self.config[attr] = value + + +class TestRelation(object): + + def __init__(self, relation_data={}): + self.relation_data = relation_data + + def set(self, relation_data): + self.relation_data = relation_data + + def get(self, attr=None, unit=None, rid=None): + if attr is None: + return self.relation_data + elif attr in self.relation_data: + return self.relation_data[attr] + return None + + +@contextmanager +def patch_open(): + '''Patch open() to allow mocking both open() itself and the file that is + yielded. + + Yields the mock for "open" and "file", respectively.''' + mock_open = MagicMock(spec=open) + mock_file = MagicMock(spec=file) + + @contextmanager + def stub_open(*args, **kwargs): + mock_open(*args, **kwargs) + yield mock_file + + with patch('__builtin__.open', stub_open): + yield mock_open, mock_file From bb746c056958fd7d944c6a40ee336bf93215d07a Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 6 Oct 2015 21:02:06 +0100 Subject: [PATCH 0857/2699] Add some unit tests to cover service status --- ceph-mon/hooks/{hooks.py => ceph_hooks.py} | 0 ceph-mon/hooks/client-relation-changed | 2 +- ceph-mon/hooks/client-relation-joined | 2 +- ceph-mon/hooks/config-changed | 2 +- ceph-mon/hooks/install.real | 2 +- ceph-mon/hooks/mon-relation-changed | 2 +- ceph-mon/hooks/mon-relation-departed | 2 +- ceph-mon/hooks/mon-relation-joined | 2 +- .../nrpe-external-master-relation-changed | 2 +- .../nrpe-external-master-relation-joined | 2 +- ceph-mon/hooks/osd-relation-joined | 2 +- ceph-mon/hooks/radosgw-relation-joined | 2 +- ceph-mon/hooks/start | 2 +- ceph-mon/hooks/stop | 2 +- ceph-mon/hooks/update-status | 1 + ceph-mon/hooks/upgrade-charm | 2 +- ceph-mon/unit_tests/test_status.py | 95 ++++++++++++++ ceph-mon/unit_tests/test_utils.py | 121 ++++++++++++++++++ 18 files changed, 231 insertions(+), 14 deletions(-) rename ceph-mon/hooks/{hooks.py => ceph_hooks.py} (100%) create mode 120000 ceph-mon/hooks/update-status create mode 100644 ceph-mon/unit_tests/test_status.py create mode 100644 ceph-mon/unit_tests/test_utils.py diff --git a/ceph-mon/hooks/hooks.py b/ceph-mon/hooks/ceph_hooks.py similarity index 100% rename from ceph-mon/hooks/hooks.py rename to ceph-mon/hooks/ceph_hooks.py diff --git a/ceph-mon/hooks/client-relation-changed b/ceph-mon/hooks/client-relation-changed index 9416ca6a..52d96630 120000 --- a/ceph-mon/hooks/client-relation-changed +++ b/ceph-mon/hooks/client-relation-changed @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/client-relation-joined b/ceph-mon/hooks/client-relation-joined index 9416ca6a..52d96630 120000 --- a/ceph-mon/hooks/client-relation-joined +++ b/ceph-mon/hooks/client-relation-joined @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/config-changed b/ceph-mon/hooks/config-changed index 9416ca6a..52d96630 120000 --- a/ceph-mon/hooks/config-changed +++ b/ceph-mon/hooks/config-changed @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/install.real b/ceph-mon/hooks/install.real index 9416ca6a..52d96630 120000 --- a/ceph-mon/hooks/install.real +++ b/ceph-mon/hooks/install.real @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/mon-relation-changed b/ceph-mon/hooks/mon-relation-changed index 9416ca6a..52d96630 120000 --- a/ceph-mon/hooks/mon-relation-changed +++ b/ceph-mon/hooks/mon-relation-changed @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/mon-relation-departed b/ceph-mon/hooks/mon-relation-departed index 9416ca6a..52d96630 120000 --- a/ceph-mon/hooks/mon-relation-departed +++ b/ceph-mon/hooks/mon-relation-departed @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/mon-relation-joined b/ceph-mon/hooks/mon-relation-joined index 9416ca6a..52d96630 120000 --- a/ceph-mon/hooks/mon-relation-joined +++ b/ceph-mon/hooks/mon-relation-joined @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/nrpe-external-master-relation-changed b/ceph-mon/hooks/nrpe-external-master-relation-changed index 9416ca6a..52d96630 120000 --- a/ceph-mon/hooks/nrpe-external-master-relation-changed +++ b/ceph-mon/hooks/nrpe-external-master-relation-changed @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/nrpe-external-master-relation-joined b/ceph-mon/hooks/nrpe-external-master-relation-joined index 9416ca6a..52d96630 120000 --- a/ceph-mon/hooks/nrpe-external-master-relation-joined +++ b/ceph-mon/hooks/nrpe-external-master-relation-joined @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/osd-relation-joined b/ceph-mon/hooks/osd-relation-joined index 9416ca6a..52d96630 120000 --- a/ceph-mon/hooks/osd-relation-joined +++ b/ceph-mon/hooks/osd-relation-joined @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/radosgw-relation-joined b/ceph-mon/hooks/radosgw-relation-joined index 9416ca6a..52d96630 120000 --- a/ceph-mon/hooks/radosgw-relation-joined +++ b/ceph-mon/hooks/radosgw-relation-joined @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/start b/ceph-mon/hooks/start index 9416ca6a..52d96630 120000 --- a/ceph-mon/hooks/start +++ b/ceph-mon/hooks/start @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/stop b/ceph-mon/hooks/stop index 9416ca6a..52d96630 120000 --- a/ceph-mon/hooks/stop +++ b/ceph-mon/hooks/stop @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/update-status b/ceph-mon/hooks/update-status new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/update-status @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/upgrade-charm b/ceph-mon/hooks/upgrade-charm index 9416ca6a..52d96630 120000 --- a/ceph-mon/hooks/upgrade-charm +++ b/ceph-mon/hooks/upgrade-charm @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py new file mode 100644 index 00000000..bd8b0241 --- /dev/null +++ b/ceph-mon/unit_tests/test_status.py @@ -0,0 +1,95 @@ +import mock +import test_utils + +with mock.patch('utils.get_unit_hostname'): + import ceph_hooks as hooks + +TO_PATCH = [ + 'status_set', + 'config', + 'ceph', + 'relation_ids', + 'relation_get', + 'related_units', + 'local_unit', +] + +NO_PEERS = { + 'ceph-mon1': True +} + +ENOUGH_PEERS_INCOMPLETE = { + 'ceph-mon1': True, + 'ceph-mon2': False, + 'ceph-mon3': False, +} + +ENOUGH_PEERS_COMPLETE = { + 'ceph-mon1': True, + 'ceph-mon2': True, + 'ceph-mon3': True, +} + + +class ServiceStatusTestCase(test_utils.CharmTestCase): + + def setUp(self): + super(ServiceStatusTestCase, self).setUp(hooks, TO_PATCH) + self.config.side_effect = self.test_config.get + self.test_config.set('monitor-count', 3) + self.local_unit.return_value = 'ceph-mon1' + + @mock.patch.object(hooks, 'get_peer_units') + def test_assess_status_no_peers(self, _peer_units): + _peer_units.return_value = NO_PEERS + hooks.assess_status() + self.status_set.assert_called_with('blocked', mock.ANY) + + @mock.patch.object(hooks, 'get_peer_units') + def test_assess_status_peers_incomplete(self, _peer_units): + _peer_units.return_value = ENOUGH_PEERS_INCOMPLETE + hooks.assess_status() + self.status_set.assert_called_with('waiting', mock.ANY) + + @mock.patch.object(hooks, 'get_peer_units') + def test_assess_status_peers_complete_active(self, _peer_units): + _peer_units.return_value = ENOUGH_PEERS_COMPLETE + self.ceph.is_bootstrapped.return_value = True + self.ceph.is_quorum.return_value = True + hooks.assess_status() + self.status_set.assert_called_with('active', mock.ANY) + + @mock.patch.object(hooks, 'get_peer_units') + def test_assess_status_peers_complete_down(self, _peer_units): + _peer_units.return_value = ENOUGH_PEERS_COMPLETE + self.ceph.is_bootstrapped.return_value = False + self.ceph.is_quorum.return_value = False + hooks.assess_status() + self.status_set.assert_called_with('blocked', mock.ANY) + + def test_get_peer_units_no_peers(self): + self.relation_ids.return_value = ['mon:1'] + self.related_units.return_value = [] + self.assertEquals({'ceph-mon1': True}, + hooks.get_peer_units()) + + def test_get_peer_units_peers_incomplete(self): + self.relation_ids.return_value = ['mon:1'] + self.related_units.return_value = ['ceph-mon2', + 'ceph-mon3'] + self.relation_get.return_value = None + self.assertEquals({'ceph-mon1': True, + 'ceph-mon2': False, + 'ceph-mon3': False}, + hooks.get_peer_units()) + + def test_get_peer_units_peers_complete(self): + self.relation_ids.return_value = ['mon:1'] + self.related_units.return_value = ['ceph-mon2', + 'ceph-mon3'] + self.relation_get.side_effect = ['ceph-mon2', + 'ceph-mon3'] + self.assertEquals({'ceph-mon1': True, + 'ceph-mon2': True, + 'ceph-mon3': True}, + hooks.get_peer_units()) \ No newline at end of file diff --git a/ceph-mon/unit_tests/test_utils.py b/ceph-mon/unit_tests/test_utils.py new file mode 100644 index 00000000..663a0488 --- /dev/null +++ b/ceph-mon/unit_tests/test_utils.py @@ -0,0 +1,121 @@ +import logging +import unittest +import os +import yaml + +from contextlib import contextmanager +from mock import patch, MagicMock + + +def load_config(): + ''' + Walk backwords from __file__ looking for config.yaml, load and return the + 'options' section' + ''' + config = None + f = __file__ + while config is None: + d = os.path.dirname(f) + if os.path.isfile(os.path.join(d, 'config.yaml')): + config = os.path.join(d, 'config.yaml') + break + f = d + + if not config: + logging.error('Could not find config.yaml in any parent directory ' + 'of %s. ' % f) + raise Exception + + return yaml.safe_load(open(config).read())['options'] + + +def get_default_config(): + ''' + Load default charm config from config.yaml return as a dict. + If no default is set in config.yaml, its value is None. + ''' + default_config = {} + config = load_config() + for k, v in config.iteritems(): + if 'default' in v: + default_config[k] = v['default'] + else: + default_config[k] = None + return default_config + + +class CharmTestCase(unittest.TestCase): + + def setUp(self, obj, patches): + super(CharmTestCase, self).setUp() + self.patches = patches + self.obj = obj + self.test_config = TestConfig() + self.test_relation = TestRelation() + self.patch_all() + + def patch(self, method): + _m = patch.object(self.obj, method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def patch_all(self): + for method in self.patches: + setattr(self, method, self.patch(method)) + + +class TestConfig(object): + + def __init__(self): + self.config = get_default_config() + + def get(self, attr=None): + if not attr: + return self.get_all() + try: + return self.config[attr] + except KeyError: + return None + + def get_all(self): + return self.config + + def set(self, attr, value): + if attr not in self.config: + raise KeyError + self.config[attr] = value + + +class TestRelation(object): + + def __init__(self, relation_data={}): + self.relation_data = relation_data + + def set(self, relation_data): + self.relation_data = relation_data + + def get(self, attr=None, unit=None, rid=None): + if attr is None: + return self.relation_data + elif attr in self.relation_data: + return self.relation_data[attr] + return None + + +@contextmanager +def patch_open(): + '''Patch open() to allow mocking both open() itself and the file that is + yielded. + + Yields the mock for "open" and "file", respectively.''' + mock_open = MagicMock(spec=open) + mock_file = MagicMock(spec=file) + + @contextmanager + def stub_open(*args, **kwargs): + mock_open(*args, **kwargs) + yield mock_file + + with patch('__builtin__.open', stub_open): + yield mock_open, mock_file From d95bd069660d34e54a41f5a1acdea57a0109217e Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 6 Oct 2015 21:02:31 +0100 Subject: [PATCH 0858/2699] Tidy lint --- ceph-proxy/unit_tests/test_status.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/unit_tests/test_status.py b/ceph-proxy/unit_tests/test_status.py index bd8b0241..6fc136a5 100644 --- a/ceph-proxy/unit_tests/test_status.py +++ b/ceph-proxy/unit_tests/test_status.py @@ -92,4 +92,5 @@ def test_get_peer_units_peers_complete(self): self.assertEquals({'ceph-mon1': True, 'ceph-mon2': True, 'ceph-mon3': True}, - hooks.get_peer_units()) \ No newline at end of file + hooks.get_peer_units()) + From 6bbe110347e664ea5afa4d151a0d8464588d770e Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 6 Oct 2015 21:02:31 +0100 Subject: [PATCH 0859/2699] Tidy lint --- ceph-mon/unit_tests/test_status.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index bd8b0241..6fc136a5 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -92,4 +92,5 @@ def test_get_peer_units_peers_complete(self): self.assertEquals({'ceph-mon1': True, 'ceph-mon2': True, 'ceph-mon3': True}, - hooks.get_peer_units()) \ No newline at end of file + hooks.get_peer_units()) + From 12cca792cf1f93e353b39c4b87b55d4f5a9e4db0 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 6 Oct 2015 21:02:54 +0100 Subject: [PATCH 0860/2699] Tidy harder --- ceph-proxy/unit_tests/test_status.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-proxy/unit_tests/test_status.py b/ceph-proxy/unit_tests/test_status.py index 6fc136a5..973f2b6e 100644 --- a/ceph-proxy/unit_tests/test_status.py +++ b/ceph-proxy/unit_tests/test_status.py @@ -93,4 +93,3 @@ def test_get_peer_units_peers_complete(self): 'ceph-mon2': True, 'ceph-mon3': True}, hooks.get_peer_units()) - From ec779f9fd5a866c121488548e37f997c5ffc4fef Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 6 Oct 2015 21:02:54 +0100 Subject: [PATCH 0861/2699] Tidy harder --- ceph-mon/unit_tests/test_status.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index 6fc136a5..973f2b6e 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -93,4 +93,3 @@ def test_get_peer_units_peers_complete(self): 'ceph-mon2': True, 'ceph-mon3': True}, hooks.get_peer_units()) - From 7394648cf128d9c56eed083eae663b72d095d7e6 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 6 Oct 2015 21:15:38 +0100 Subject: [PATCH 0862/2699] Add unit tests for service status --- ceph-osd/.coveragerc | 7 + ceph-osd/.pydevproject | 1 + ceph-osd/Makefile | 9 +- ceph-osd/hooks/{hooks.py => ceph_hooks.py} | 0 ceph-osd/hooks/config-changed | 2 +- ceph-osd/hooks/install.real | 2 +- ceph-osd/hooks/mon-relation-changed | 2 +- ceph-osd/hooks/mon-relation-departed | 2 +- .../nrpe-external-master-relation-changed | 2 +- .../nrpe-external-master-relation-joined | 2 +- ceph-osd/hooks/start | 2 +- ceph-osd/hooks/stop | 2 +- ceph-osd/hooks/update-status | 1 + ceph-osd/hooks/upgrade-charm | 2 +- ceph-osd/setup.cfg | 5 + ceph-osd/unit_tests/__init__.py | 2 + ceph-osd/unit_tests/test_status.py | 56 ++++++++ ceph-osd/unit_tests/test_utils.py | 121 ++++++++++++++++++ 18 files changed, 209 insertions(+), 11 deletions(-) create mode 100644 ceph-osd/.coveragerc rename ceph-osd/hooks/{hooks.py => ceph_hooks.py} (100%) create mode 120000 ceph-osd/hooks/update-status create mode 100644 ceph-osd/setup.cfg create mode 100644 ceph-osd/unit_tests/test_status.py create mode 100644 ceph-osd/unit_tests/test_utils.py diff --git a/ceph-osd/.coveragerc b/ceph-osd/.coveragerc new file mode 100644 index 00000000..7f7b5be3 --- /dev/null +++ b/ceph-osd/.coveragerc @@ -0,0 +1,7 @@ +[report] +# Regexes for lines to exclude from consideration +exclude_lines = + if __name__ == .__main__.: +include= + hooks/hooks.py + hooks/ceph*.py diff --git a/ceph-osd/.pydevproject b/ceph-osd/.pydevproject index bb30cc40..be2105d0 100644 --- a/ceph-osd/.pydevproject +++ b/ceph-osd/.pydevproject @@ -4,5 +4,6 @@ Default /ceph-osd/hooks +/ceph-osd/unit_tests diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index de395a59..306c8444 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -3,9 +3,14 @@ PYTHON := /usr/bin/env python lint: @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \ - hooks tests unit_tests + hooks tests unit_tests @charm proof +test: + @# Bundletester expects unit tests here. + @echo Starting unit tests... + @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests + functional_test: @echo Starting Amulet tests... @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 @@ -13,7 +18,7 @@ functional_test: bin/charm_helpers_sync.py: @mkdir -p bin @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ - > bin/charm_helpers_sync.py + > bin/charm_helpers_sync.py sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml diff --git a/ceph-osd/hooks/hooks.py b/ceph-osd/hooks/ceph_hooks.py similarity index 100% rename from ceph-osd/hooks/hooks.py rename to ceph-osd/hooks/ceph_hooks.py diff --git a/ceph-osd/hooks/config-changed b/ceph-osd/hooks/config-changed index 9416ca6a..52d96630 120000 --- a/ceph-osd/hooks/config-changed +++ b/ceph-osd/hooks/config-changed @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/install.real b/ceph-osd/hooks/install.real index 9416ca6a..52d96630 120000 --- a/ceph-osd/hooks/install.real +++ b/ceph-osd/hooks/install.real @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/mon-relation-changed b/ceph-osd/hooks/mon-relation-changed index 9416ca6a..52d96630 120000 --- a/ceph-osd/hooks/mon-relation-changed +++ b/ceph-osd/hooks/mon-relation-changed @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/mon-relation-departed b/ceph-osd/hooks/mon-relation-departed index 9416ca6a..52d96630 120000 --- a/ceph-osd/hooks/mon-relation-departed +++ b/ceph-osd/hooks/mon-relation-departed @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/nrpe-external-master-relation-changed b/ceph-osd/hooks/nrpe-external-master-relation-changed index 9416ca6a..52d96630 120000 --- a/ceph-osd/hooks/nrpe-external-master-relation-changed +++ b/ceph-osd/hooks/nrpe-external-master-relation-changed @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/nrpe-external-master-relation-joined b/ceph-osd/hooks/nrpe-external-master-relation-joined index 9416ca6a..52d96630 120000 --- a/ceph-osd/hooks/nrpe-external-master-relation-joined +++ b/ceph-osd/hooks/nrpe-external-master-relation-joined @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/start b/ceph-osd/hooks/start index 9416ca6a..52d96630 120000 --- a/ceph-osd/hooks/start +++ b/ceph-osd/hooks/start @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/stop b/ceph-osd/hooks/stop index 9416ca6a..52d96630 120000 --- a/ceph-osd/hooks/stop +++ b/ceph-osd/hooks/stop @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/update-status b/ceph-osd/hooks/update-status new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-osd/hooks/update-status @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/upgrade-charm b/ceph-osd/hooks/upgrade-charm index 9416ca6a..52d96630 120000 --- a/ceph-osd/hooks/upgrade-charm +++ b/ceph-osd/hooks/upgrade-charm @@ -1 +1 @@ -hooks.py \ No newline at end of file +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/setup.cfg b/ceph-osd/setup.cfg new file mode 100644 index 00000000..37083b62 --- /dev/null +++ b/ceph-osd/setup.cfg @@ -0,0 +1,5 @@ +[nosetests] +verbosity=2 +with-coverage=1 +cover-erase=1 +cover-package=hooks diff --git a/ceph-osd/unit_tests/__init__.py b/ceph-osd/unit_tests/__init__.py index e69de29b..f80aab3d 100644 --- a/ceph-osd/unit_tests/__init__.py +++ b/ceph-osd/unit_tests/__init__.py @@ -0,0 +1,2 @@ +import sys +sys.path.append('hooks') diff --git a/ceph-osd/unit_tests/test_status.py b/ceph-osd/unit_tests/test_status.py new file mode 100644 index 00000000..0cededfc --- /dev/null +++ b/ceph-osd/unit_tests/test_status.py @@ -0,0 +1,56 @@ +import mock +import test_utils + +with mock.patch('utils.get_unit_hostname'): + import ceph_hooks as hooks + +TO_PATCH = [ + 'status_set', + 'config', + 'ceph', + 'relation_ids', + 'relation_get', + 'related_units', + 'get_conf', +] + +CEPH_MONS = [ + 'ceph/0', + 'ceph/1', + 'ceph/2', +] + +class ServiceStatusTestCase(test_utils.CharmTestCase): + + def setUp(self): + super(ServiceStatusTestCase, self).setUp(hooks, TO_PATCH) + self.config.side_effect = self.test_config.get + + def test_assess_status_no_monitor_relation(self): + self.relation_ids.return_value = [] + hooks.assess_status() + self.status_set.assert_called_with('blocked', mock.ANY) + + def test_assess_status_monitor_relation_incomplete(self): + self.relation_ids.return_value = ['mon:1'] + self.related_units.return_value = CEPH_MONS + self.get_conf.return_value = None + hooks.assess_status() + self.status_set.assert_called_with('waiting', mock.ANY) + + def test_assess_status_monitor_complete_no_disks(self): + self.relation_ids.return_value = ['mon:1'] + self.related_units.return_value = CEPH_MONS + self.get_conf.return_value = 'monitor-bootstrap-key' + self.ceph.get_running_osds.return_value = [] + hooks.assess_status() + self.status_set.assert_called_with('blocked', mock.ANY) + + def test_assess_status_monitor_complete_disks(self): + self.relation_ids.return_value = ['mon:1'] + self.related_units.return_value = CEPH_MONS + self.get_conf.return_value = 'monitor-bootstrap-key' + self.ceph.get_running_osds.return_value = ['12345', + '67890'] + hooks.assess_status() + self.status_set.assert_called_with('active', mock.ANY) diff --git a/ceph-osd/unit_tests/test_utils.py b/ceph-osd/unit_tests/test_utils.py new file mode 100644 index 00000000..663a0488 --- /dev/null +++ b/ceph-osd/unit_tests/test_utils.py @@ -0,0 +1,121 @@ +import logging +import unittest +import os +import yaml + +from contextlib import contextmanager +from mock import patch, MagicMock + + +def load_config(): + ''' + Walk backwords from __file__ looking for config.yaml, load and return the + 'options' section' + ''' + config = None + f = __file__ + while config is None: + d = os.path.dirname(f) + if os.path.isfile(os.path.join(d, 'config.yaml')): + config = os.path.join(d, 'config.yaml') + break + f = d + + if not config: + logging.error('Could not find config.yaml in any parent directory ' + 'of %s. ' % f) + raise Exception + + return yaml.safe_load(open(config).read())['options'] + + +def get_default_config(): + ''' + Load default charm config from config.yaml return as a dict. + If no default is set in config.yaml, its value is None. + ''' + default_config = {} + config = load_config() + for k, v in config.iteritems(): + if 'default' in v: + default_config[k] = v['default'] + else: + default_config[k] = None + return default_config + + +class CharmTestCase(unittest.TestCase): + + def setUp(self, obj, patches): + super(CharmTestCase, self).setUp() + self.patches = patches + self.obj = obj + self.test_config = TestConfig() + self.test_relation = TestRelation() + self.patch_all() + + def patch(self, method): + _m = patch.object(self.obj, method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def patch_all(self): + for method in self.patches: + setattr(self, method, self.patch(method)) + + +class TestConfig(object): + + def __init__(self): + self.config = get_default_config() + + def get(self, attr=None): + if not attr: + return self.get_all() + try: + return self.config[attr] + except KeyError: + return None + + def get_all(self): + return self.config + + def set(self, attr, value): + if attr not in self.config: + raise KeyError + self.config[attr] = value + + +class TestRelation(object): + + def __init__(self, relation_data={}): + self.relation_data = relation_data + + def set(self, relation_data): + self.relation_data = relation_data + + def get(self, attr=None, unit=None, rid=None): + if attr is None: + return self.relation_data + elif attr in self.relation_data: + return self.relation_data[attr] + return None + + +@contextmanager +def patch_open(): + '''Patch open() to allow mocking both open() itself and the file that is + yielded. + + Yields the mock for "open" and "file", respectively.''' + mock_open = MagicMock(spec=open) + mock_file = MagicMock(spec=file) + + @contextmanager + def stub_open(*args, **kwargs): + mock_open(*args, **kwargs) + yield mock_file + + with patch('__builtin__.open', stub_open): + yield mock_open, mock_file From fe7b5e13c0c7a145f15440ec2955408bad7d9e5e Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 6 Oct 2015 21:16:21 +0100 Subject: [PATCH 0863/2699] Tidy import of hooks --- ceph-osd/unit_tests/test_status.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-osd/unit_tests/test_status.py b/ceph-osd/unit_tests/test_status.py index 0cededfc..1b5ab315 100644 --- a/ceph-osd/unit_tests/test_status.py +++ b/ceph-osd/unit_tests/test_status.py @@ -1,8 +1,7 @@ import mock import test_utils -with mock.patch('utils.get_unit_hostname'): - import ceph_hooks as hooks +import ceph_hooks as hooks TO_PATCH = [ 'status_set', From ae7c252765fa62f63dafa6f056cb0389edb77ca0 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 6 Oct 2015 21:16:42 +0100 Subject: [PATCH 0864/2699] Tidy imports --- ceph-proxy/unit_tests/test_status.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-proxy/unit_tests/test_status.py b/ceph-proxy/unit_tests/test_status.py index 973f2b6e..c4330185 100644 --- a/ceph-proxy/unit_tests/test_status.py +++ b/ceph-proxy/unit_tests/test_status.py @@ -1,8 +1,7 @@ import mock import test_utils -with mock.patch('utils.get_unit_hostname'): - import ceph_hooks as hooks +import ceph_hooks as hooks TO_PATCH = [ 'status_set', From a36659e56e99048a46d14dcd874c89d9c9238554 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 6 Oct 2015 21:16:42 +0100 Subject: [PATCH 0865/2699] Tidy imports --- ceph-mon/unit_tests/test_status.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index 973f2b6e..c4330185 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -1,8 +1,7 @@ import mock import test_utils -with mock.patch('utils.get_unit_hostname'): - import ceph_hooks as hooks +import ceph_hooks as hooks TO_PATCH = [ 'status_set', From 98a9bb30efa4547c073187095a92d5a3b0a4231b Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 7 Oct 2015 14:26:08 +0000 Subject: [PATCH 0866/2699] Start move over to contexts for ceph.conf to pave way for workload status --- ceph-radosgw/hooks/ceph_radosgw_context.py | 80 +++++++++++++++++++++- ceph-radosgw/hooks/hooks.py | 30 +++++++- ceph-radosgw/hooks/utils.py | 42 +++++++++++- 3 files changed, 146 insertions(+), 6 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 79c295c1..d6fca54b 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -3,7 +3,14 @@ determine_api_port, determine_apache_port, ) - +from charmhelpers.core.host import cmp_pkgrevno +from charmhelpers.core.hookenv import ( + config, + relation_ids, + related_units, + relation_get, +) +import socket class HAProxyContext(context.HAProxyContext): @@ -27,3 +34,74 @@ def __call__(self): # for haproxy.conf ctxt['service_ports'] = port_mapping return ctxt + + +class IdentityServiceContext(context.IdentityServiceContext): + interfaces = ['identity-service'] + + def __call__(self): + ctxt = super(IdentityServiceContext, self).__call__() + if not ctxt: + return + + for relid in relation_ids('identity-service'): + for unit in related_units(relid): + if not ctxt.get('admin_token'): + ctxt['admin_token'] = \ + relation_get('admin_token', unit, relid) + + ctxt['auth_type'] = 'keystone' + ctxt['user_roles'] = config('operator-roles') + ctxt['cache_size'] = config('cache-size') + ctxt['revocation_check_interval'] = config('revocation-check-interval') + if self.context_complete(ctxt): + return ctxt + + return {} + + +class MonContext(context.OSContextGenerator): + interfaces = ['mon'] + + def __call__(self): + if not relation_ids('mon'): + return {} + hosts = [] + auth = 'none' + for relid in relation_ids('mon'): + for unit in related_units(relid): + host_ip = self.get_host_ip(relation_get('ceph-public-address', + unit, relid)) + hosts.append('{}:6789'.format(host_ip)) + _auth = relation_get('auth', unit, relid) + if _auth: + auth = _auth + hosts.sort() + ctxt = { + 'auth_supported': auth, + 'mon_hosts': ' '.join(hosts), + 'hostname': socket.gethostname(), + 'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0, + 'use_syslog': str(config('use-syslog')).lower(), + 'embedded_webserver': config('use-embedded-webserver'), + } + + if self.context_complete(ctxt): + print ctxt + return ctxt + + return {} + + def get_host_ip(self, hostname=None): + try: + if not hostname: + hostname = unit_get('private-address') + # Test to see if already an IPv4 address + socket.inet_aton(hostname) + return hostname + except socket.error: + # This may throw an NXDOMAIN exception; in which case + # things are badly broken so just let it kill the hook + answers = dns.resolver.query(hostname, 'A') + if answers: + return answers[0].address diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index ceb20d10..4912b5e1 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -23,6 +23,7 @@ relation_set, log, ERROR, Hooks, UnregisteredHookError, + status_set, ) from charmhelpers.fetch import ( apt_update, @@ -32,7 +33,7 @@ ) from charmhelpers.core.host import ( lsb_release, - restart_on_change + restart_on_change, ) from utils import ( render_template, @@ -41,6 +42,8 @@ is_apache_24, CEPHRG_HA_RES, register_configs, + REQUIRED_INTERFACES, + check_optional_relations, ) from charmhelpers.payload.execd import execd_preinstall @@ -55,7 +58,9 @@ canonical_url, PUBLIC, INTERNAL, ADMIN, ) - +from charmhelpers.contrib.openstack.utils import ( + os_workload_status, +) hooks = Hooks() CONFIGS = register_configs() @@ -96,6 +101,7 @@ def install_ceph_optimised_packages(): def install_packages(): + status_set('maintenance', 'Installing apt packages') add_source(config('source'), config('key')) if (config('use-ceph-optimised-packages') and not config('use-embedded-webserver')): @@ -110,7 +116,10 @@ def install_packages(): @hooks.hook('install.real') +@os_workload_status(CONFIGS, REQUIRED_INTERFACES, + charm_func=check_optional_relations) def install(): + status_set('maintenance', 'Executing pre-install') execd_preinstall() enable_pocket('multiverse') install_packages() @@ -139,6 +148,7 @@ def emit_cephconf(): if ks_conf: cephcontext.update(ks_conf) + print cephcontext with open('/etc/ceph/ceph.conf', 'w') as cephconf: cephconf.write(render_template('ceph.conf', cephcontext)) @@ -177,6 +187,8 @@ def apache_ports(): @hooks.hook('upgrade-charm', 'config-changed') +@os_workload_status(CONFIGS, REQUIRED_INTERFACES, + charm_func=check_optional_relations) @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw'], '/etc/haproxy/haproxy.cfg': ['haproxy']}) def config_changed(): @@ -184,6 +196,7 @@ def config_changed(): emit_cephconf() CONFIGS.write_all() if not config('use-embedded-webserver'): + status_set('maintenance', 'configuring apache') emit_apacheconf() install_www_scripts() apache_sites() @@ -236,12 +249,15 @@ def get_keystone_conf(): config('revocation-check-interval') } if None not in ks_auth.itervalues(): + print ks_auth return ks_auth return None @hooks.hook('mon-relation-departed', 'mon-relation-changed') +@os_workload_status(CONFIGS, REQUIRED_INTERFACES, + charm_func=check_optional_relations) @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) def mon_relation(): emit_cephconf() @@ -273,6 +289,8 @@ def restart(): @hooks.hook('identity-service-relation-joined') +@os_workload_status(CONFIGS, REQUIRED_INTERFACES, + charm_func=check_optional_relations) def identity_joined(relid=None): if cmp_pkgrevno('radosgw', '0.55') < 0: log('Integration with keystone requires ceph >= 0.55') @@ -293,6 +311,8 @@ def identity_joined(relid=None): @hooks.hook('identity-service-relation-changed') +@os_workload_status(CONFIGS, REQUIRED_INTERFACES, + charm_func=check_optional_relations) @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) def identity_changed(): emit_cephconf() @@ -301,6 +321,8 @@ def identity_changed(): @hooks.hook('cluster-relation-changed', 'cluster-relation-joined') +@os_workload_status(CONFIGS, REQUIRED_INTERFACES, + charm_func=check_optional_relations) @restart_on_change({'/etc/haproxy/haproxy.cfg': ['haproxy']}) def cluster_changed(): CONFIGS.write_all() @@ -309,6 +331,8 @@ def cluster_changed(): @hooks.hook('ha-relation-joined') +@os_workload_status(CONFIGS, REQUIRED_INTERFACES, + charm_func=check_optional_relations) def ha_relation_joined(): # Obtain the config values necessary for the cluster config. These # include multicast port and interface to bind to. @@ -361,6 +385,8 @@ def ha_relation_joined(): @hooks.hook('ha-relation-changed') +@os_workload_status(CONFIGS, REQUIRED_INTERFACES, + charm_func=check_optional_relations) def ha_relation_changed(): clustered = relation_get('clustered') if clustered: diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 7c96bfb1..95c80595 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -1,4 +1,3 @@ - # # Copyright 2012 Canonical Ltd. # @@ -12,16 +11,26 @@ import os from copy import deepcopy from collections import OrderedDict -from charmhelpers.core.hookenv import unit_get +from charmhelpers.core.hookenv import unit_get, relation_ids, status_get from charmhelpers.fetch import apt_install from charmhelpers.contrib.openstack import context, templating +from charmhelpers.contrib.openstack.utils import set_os_workload_status +from charmhelpers.contrib.hahelpers.cluster import get_hacluster_config +from charmhelpers.core.host import cmp_pkgrevno import ceph_radosgw_context +# The interface is said to be satisfied if anyone of the interfaces in the +# list has a complete context. +REQUIRED_INTERFACES = { + 'identity': ['identity-service'], + 'mon': ['ceph-radosgw'], +} CEPHRG_HA_RES = 'grp_cephrg_vips' TEMPLATES_DIR = 'templates' TEMPLATES = 'templates/' HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' +CEPH_CONF = '/etc/ceph/ceph.conf' BASE_RESOURCE_MAP = OrderedDict([ (HAPROXY_CONF, { @@ -29,6 +38,10 @@ ceph_radosgw_context.HAProxyContext()], 'services': ['haproxy'], }), + (CEPH_CONF, { + 'contexts': [ceph_radosgw_context.MonContext()], + 'services': ['radosgw'], + }), ]) try: @@ -58,7 +71,13 @@ def resource_map(): def register_configs(release='icehouse'): configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, openstack_release=release) - for cfg, rscs in resource_map().iteritems(): + CONFIGS = resource_map() + if cmp_pkgrevno('radosgw', '0.55') >= 0: + # Add keystone configuration if found + CONFIGS[CEPH_CONF]['contexts'].append( + ceph_radosgw_context.IdentityServiceContext() + ) + for cfg, rscs in CONFIGS.iteritems(): configs.register(cfg, rscs['contexts']) return configs @@ -103,3 +122,20 @@ def is_apache_24(): return True else: return False + + +def check_optional_relations(configs): + required_interfaces = {} + if relation_ids('ha'): + required_interfaces['ha'] = ['cluster'] + try: + get_hacluster_config() + except: + return ('blocked', + 'hacluster missing configuration: ' + 'vip, vip_iface, vip_cidr') + if required_interfaces: + set_os_workload_status(configs, required_interfaces) + return status_get() + else: + return 'unknown', 'No optional relations' From 0cf6646b47c78885477ce2b1ff2c2bc3f2f9fedb Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 8 Oct 2015 04:41:59 -0700 Subject: [PATCH 0867/2699] Tidy lint --- ceph-osd/unit_tests/test_status.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/unit_tests/test_status.py b/ceph-osd/unit_tests/test_status.py index 1b5ab315..f4342d28 100644 --- a/ceph-osd/unit_tests/test_status.py +++ b/ceph-osd/unit_tests/test_status.py @@ -19,6 +19,7 @@ 'ceph/2', ] + class ServiceStatusTestCase(test_utils.CharmTestCase): def setUp(self): From da95e2c405c0b63f7c56051400872bb92cfb510e Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 8 Oct 2015 12:13:08 +0000 Subject: [PATCH 0868/2699] Added unit tests --- ceph-radosgw/hooks/ceph_radosgw_context.py | 41 ++--- .../charmhelpers/contrib/openstack/context.py | 1 + ceph-radosgw/hooks/hooks.py | 100 +----------- ceph-radosgw/hooks/install | 2 +- ceph-radosgw/hooks/utils.py | 15 +- ceph-radosgw/unit_tests/test_hooks.py | 143 ++++-------------- 6 files changed, 64 insertions(+), 238 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index d6fca54b..dfca00c6 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -9,8 +9,11 @@ relation_ids, related_units, relation_get, + unit_get, ) import socket +import dns.resolver + class HAProxyContext(context.HAProxyContext): @@ -36,20 +39,21 @@ def __call__(self): return ctxt -class IdentityServiceContext(context.IdentityServiceContext): - interfaces = ['identity-service'] +class IdentityServiceContext(context.IdentityServiceContext): + interfaces = ['identity-service'] - def __call__(self): - ctxt = super(IdentityServiceContext, self).__call__() - if not ctxt: + def __call__(self): + ctxt = super(IdentityServiceContext, self).__call__() + if not ctxt: return + ctxt['admin_token'] = None for relid in relation_ids('identity-service'): for unit in related_units(relid): if not ctxt.get('admin_token'): ctxt['admin_token'] = \ relation_get('admin_token', unit, relid) - + ctxt['auth_type'] = 'keystone' ctxt['user_roles'] = config('operator-roles') ctxt['cache_size'] = config('cache-size') @@ -57,11 +61,11 @@ def __call__(self): if self.context_complete(ctxt): return ctxt - return {} + return {} -class MonContext(context.OSContextGenerator): - interfaces = ['mon'] +class MonContext(context.OSContextGenerator): + interfaces = ['ceph-radosgw'] def __call__(self): if not relation_ids('mon'): @@ -70,12 +74,14 @@ def __call__(self): auth = 'none' for relid in relation_ids('mon'): for unit in related_units(relid): - host_ip = self.get_host_ip(relation_get('ceph-public-address', - unit, relid)) - hosts.append('{}:6789'.format(host_ip)) - _auth = relation_get('auth', unit, relid) - if _auth: - auth = _auth + ceph_public_addr = relation_get('ceph-public-address', unit, + relid) + if ceph_public_addr: + host_ip = self.get_host_ip(ceph_public_addr) + hosts.append('{}:6789'.format(host_ip)) + _auth = relation_get('auth', unit, relid) + if _auth: + auth = _auth hosts.sort() ctxt = { 'auth_supported': auth, @@ -84,13 +90,12 @@ def __call__(self): 'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0, 'use_syslog': str(config('use-syslog')).lower(), 'embedded_webserver': config('use-embedded-webserver'), - } + } if self.context_complete(ctxt): - print ctxt return ctxt - return {} + return {} def get_host_ip(self, hostname=None): try: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 49c04de0..faa4d79b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -574,6 +574,7 @@ def __call__(self): if not relation_ids('cluster') and not self.singlenode_mode: return {} + print "config('prefer-ipv6'): {}".format(config('prefer-ipv6')) if config('prefer-ipv6'): addr = get_ipv6_addr(exc_list=[config('vip')])[0] else: diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 4912b5e1..8898706b 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -16,7 +16,6 @@ from charmhelpers.core.hookenv import ( relation_get, relation_ids, - related_units, config, unit_get, open_port, @@ -37,7 +36,6 @@ ) from utils import ( render_template, - get_host_ip, enable_pocket, is_apache_24, CEPHRG_HA_RES, @@ -48,7 +46,6 @@ from charmhelpers.payload.execd import execd_preinstall from charmhelpers.core.host import cmp_pkgrevno -from socket import gethostname as get_unit_hostname from charmhelpers.contrib.network.ip import ( get_iface_for_address, @@ -59,7 +56,7 @@ PUBLIC, INTERNAL, ADMIN, ) from charmhelpers.contrib.openstack.utils import ( - os_workload_status, + set_os_workload_status, ) hooks = Hooks() CONFIGS = register_configs() @@ -116,42 +113,15 @@ def install_packages(): @hooks.hook('install.real') -@os_workload_status(CONFIGS, REQUIRED_INTERFACES, - charm_func=check_optional_relations) def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() enable_pocket('multiverse') install_packages() os.makedirs(NSS_DIR) - - -def emit_cephconf(): - # Ensure ceph directory actually exists if not os.path.exists('/etc/ceph'): os.makedirs('/etc/ceph') - cephcontext = { - 'auth_supported': get_auth() or 'none', - 'mon_hosts': ' '.join(get_mon_hosts()), - 'hostname': get_unit_hostname(), - 'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0, - 'use_syslog': str(config('use-syslog')).lower(), - 'embedded_webserver': config('use-embedded-webserver'), - } - - # Check to ensure that correct version of ceph is - # in use - if cmp_pkgrevno('radosgw', '0.55') >= 0: - # Add keystone configuration if found - ks_conf = get_keystone_conf() - if ks_conf: - cephcontext.update(ks_conf) - - print cephcontext - with open('/etc/ceph/ceph.conf', 'w') as cephconf: - cephconf.write(render_template('ceph.conf', cephcontext)) - def emit_apacheconf(): apachecontext = { @@ -187,13 +157,10 @@ def apache_ports(): @hooks.hook('upgrade-charm', 'config-changed') -@os_workload_status(CONFIGS, REQUIRED_INTERFACES, - charm_func=check_optional_relations) @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw'], '/etc/haproxy/haproxy.cfg': ['haproxy']}) def config_changed(): install_packages() - emit_cephconf() CONFIGS.write_all() if not config('use-embedded-webserver'): status_set('maintenance', 'configuring apache') @@ -207,60 +174,11 @@ def config_changed(): identity_joined(relid=r_id) -def get_mon_hosts(): - hosts = [] - for relid in relation_ids('mon'): - for unit in related_units(relid): - host_ip = get_host_ip(relation_get('ceph-public-address', - unit, relid)) - hosts.append('{}:6789'.format(host_ip)) - - hosts.sort() - return hosts - - -def get_auth(): - return get_conf('auth') - - -def get_conf(name): - for relid in relation_ids('mon'): - for unit in related_units(relid): - conf = relation_get(name, - unit, relid) - if conf: - return conf - return None - - -def get_keystone_conf(): - for relid in relation_ids('identity-service'): - for unit in related_units(relid): - ks_auth = { - 'auth_type': 'keystone', - 'auth_protocol': - relation_get('auth_protocol', unit, relid) or "http", - 'auth_host': relation_get('auth_host', unit, relid), - 'auth_port': relation_get('auth_port', unit, relid), - 'admin_token': relation_get('admin_token', unit, relid), - 'user_roles': config('operator-roles'), - 'cache_size': config('cache-size'), - 'revocation_check_interval': - config('revocation-check-interval') - } - if None not in ks_auth.itervalues(): - print ks_auth - return ks_auth - return None - - @hooks.hook('mon-relation-departed', 'mon-relation-changed') -@os_workload_status(CONFIGS, REQUIRED_INTERFACES, - charm_func=check_optional_relations) @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) def mon_relation(): - emit_cephconf() + CONFIGS.write_all() key = relation_get('radosgw_key') if key: ceph.import_radosgw_key(key) @@ -289,8 +207,6 @@ def restart(): @hooks.hook('identity-service-relation-joined') -@os_workload_status(CONFIGS, REQUIRED_INTERFACES, - charm_func=check_optional_relations) def identity_joined(relid=None): if cmp_pkgrevno('radosgw', '0.55') < 0: log('Integration with keystone requires ceph >= 0.55') @@ -311,18 +227,14 @@ def identity_joined(relid=None): @hooks.hook('identity-service-relation-changed') -@os_workload_status(CONFIGS, REQUIRED_INTERFACES, - charm_func=check_optional_relations) @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) def identity_changed(): - emit_cephconf() + CONFIGS.write_all() restart() @hooks.hook('cluster-relation-changed', 'cluster-relation-joined') -@os_workload_status(CONFIGS, REQUIRED_INTERFACES, - charm_func=check_optional_relations) @restart_on_change({'/etc/haproxy/haproxy.cfg': ['haproxy']}) def cluster_changed(): CONFIGS.write_all() @@ -331,8 +243,6 @@ def cluster_changed(): @hooks.hook('ha-relation-joined') -@os_workload_status(CONFIGS, REQUIRED_INTERFACES, - charm_func=check_optional_relations) def ha_relation_joined(): # Obtain the config values necessary for the cluster config. These # include multicast port and interface to bind to. @@ -385,8 +295,6 @@ def ha_relation_joined(): @hooks.hook('ha-relation-changed') -@os_workload_status(CONFIGS, REQUIRED_INTERFACES, - charm_func=check_optional_relations) def ha_relation_changed(): clustered = relation_get('clustered') if clustered: @@ -403,3 +311,5 @@ def ha_relation_changed(): hooks.execute(sys.argv) except UnregisteredHookError as e: log('Unknown hook {} - skipping.'.format(e)) + set_os_workload_status(CONFIGS, REQUIRED_INTERFACES, + charm_func=check_optional_relations) diff --git a/ceph-radosgw/hooks/install b/ceph-radosgw/hooks/install index 83a9d3ce..fa9f910f 100755 --- a/ceph-radosgw/hooks/install +++ b/ceph-radosgw/hooks/install @@ -2,7 +2,7 @@ # Wrapper to deal with newer Ubuntu versions that don't have py2 installed # by default. -declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml' 'jinja2' 'dnspython') check_and_install() { pkg="${1}-${2}" diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 95c80595..9681a3c7 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -9,10 +9,11 @@ import socket import re import os +import dns.resolver +import jinja2 from copy import deepcopy from collections import OrderedDict from charmhelpers.core.hookenv import unit_get, relation_ids, status_get -from charmhelpers.fetch import apt_install from charmhelpers.contrib.openstack import context, templating from charmhelpers.contrib.openstack.utils import set_os_workload_status from charmhelpers.contrib.hahelpers.cluster import get_hacluster_config @@ -44,18 +45,6 @@ }), ]) -try: - import jinja2 -except ImportError: - apt_install('python-jinja2', fatal=True) - import jinja2 - -try: - import dns.resolver -except ImportError: - apt_install('python-dnspython', fatal=True) - import dns.resolver - def resource_map(): ''' diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index fe568690..77b15031 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -23,6 +23,7 @@ import hooks as ceph_hooks TO_PATCH = [ + 'CONFIGS', 'add_source', 'apt_update', 'apt_install', @@ -31,22 +32,20 @@ 'cmp_pkgrevno', 'execd_preinstall', 'enable_pocket', - 'get_host_ip', 'get_iface_for_address', 'get_netmask_for_address', - 'get_unit_hostname', 'glob', 'is_apache_24', 'log', 'lsb_release', 'open_port', 'os', - 'related_units', 'relation_ids', 'relation_set', 'relation_get', 'render_template', 'shutil', + 'status_set', 'subprocess', 'sys', 'unit_get', @@ -123,30 +122,30 @@ def test_install(self): self.enable_pocket.assert_called_with('multiverse') self.os.makedirs.called_with('/var/lib/ceph/nss') - def test_emit_cephconf(self): - _get_keystone_conf = self.patch('get_keystone_conf') - _get_auth = self.patch('get_auth') - _get_mon_hosts = self.patch('get_mon_hosts') - _get_auth.return_value = 'cephx' - _get_keystone_conf.return_value = {'keystone_key': 'keystone_value'} - _get_mon_hosts.return_value = ['10.0.0.1:6789', '10.0.0.2:6789'] - self.get_unit_hostname.return_value = 'bob' - self.os.path.exists.return_value = False - cephcontext = { - 'auth_supported': 'cephx', - 'mon_hosts': '10.0.0.1:6789 10.0.0.2:6789', - 'hostname': 'bob', - 'old_auth': False, - 'use_syslog': 'false', - 'keystone_key': 'keystone_value', - 'embedded_webserver': False, - } - self.cmp_pkgrevno.return_value = 1 - with patch_open() as (_open, _file): - ceph_hooks.emit_cephconf() - self.os.makedirs.assert_called_with('/etc/ceph') - _open.assert_called_with('/etc/ceph/ceph.conf', 'w') - self.render_template.assert_called_with('ceph.conf', cephcontext) +# def test_emit_cephconf(self): +# _get_keystone_conf = self.patch('get_keystone_conf') +# _get_auth = self.patch('get_auth') +# _get_mon_hosts = self.patch('get_mon_hosts') +# _get_auth.return_value = 'cephx' +# _get_keystone_conf.return_value = {'keystone_key': 'keystone_value'} +# _get_mon_hosts.return_value = ['10.0.0.1:6789', '10.0.0.2:6789'] +# self.get_unit_hostname.return_value = 'bob' +# self.os.path.exists.return_value = False +# cephcontext = { +# 'auth_supported': 'cephx', +# 'mon_hosts': '10.0.0.1:6789 10.0.0.2:6789', +# 'hostname': 'bob', +# 'old_auth': False, +# 'use_syslog': 'false', +# 'keystone_key': 'keystone_value', +# 'embedded_webserver': False, +# } +# self.cmp_pkgrevno.return_value = 1 +# with patch_open() as (_open, _file): +# ceph_hooks.emit_cephconf() +# self.os.makedirs.assert_called_with('/etc/ceph') +# _open.assert_called_with('/etc/ceph/ceph.conf', 'w') +# self.render_template.assert_called_with('ceph.conf', cephcontext) def test_emit_apacheconf(self): self.is_apache_24.return_value = True @@ -195,7 +194,6 @@ def test_apache_reload(self): def test_config_changed(self): _install_packages = self.patch('install_packages') - _emit_cephconf = self.patch('emit_cephconf') _emit_apacheconf = self.patch('emit_apacheconf') _install_www_scripts = self.patch('install_www_scripts') _apache_sites = self.patch('apache_sites') @@ -203,105 +201,30 @@ def test_config_changed(self): _apache_reload = self.patch('apache_reload') ceph_hooks.config_changed() _install_packages.assert_called() - _emit_cephconf.assert_called() + self.CONFIGS.write_all.assert_called_with() _emit_apacheconf.assert_called() _install_www_scripts.assert_called() _apache_sites.assert_called() _apache_modules.assert_called() _apache_reload.assert_called() - def test_get_mon_hosts(self): - self.relation_ids.return_value = ['monrelid'] - self.related_units.return_value = ['monunit'] - - def rel_get(k, *args): - return {'private-address': '127.0.0.1', - 'ceph-public-address': '10.0.0.1'}[k] - - self.relation_get.side_effect = rel_get - self.get_host_ip.side_effect = lambda x: x - self.assertEquals(ceph_hooks.get_mon_hosts(), ['10.0.0.1:6789']) - - def test_get_conf(self): - self.relation_ids.return_value = ['monrelid'] - self.related_units.return_value = ['monunit'] - self.relation_get.return_value = 'bob' - self.assertEquals(ceph_hooks.get_conf('key'), 'bob') - - def test_get_conf_nomatch(self): - self.relation_ids.return_value = ['monrelid'] - self.related_units.return_value = ['monunit'] - self.relation_get.return_value = '' - self.assertEquals(ceph_hooks.get_conf('key'), None) - - def test_get_auth(self): - self.relation_ids.return_value = ['monrelid'] - self.related_units.return_value = ['monunit'] - self.relation_get.return_value = 'bob' - self.assertEquals(ceph_hooks.get_auth(), 'bob') - - def test_get_keystone_conf(self): - self.test_config.set('operator-roles', 'admin') - self.test_config.set('cache-size', '42') - self.test_config.set('revocation-check-interval', '21') - self.relation_ids.return_value = ['idrelid'] - self.related_units.return_value = ['idunit'] - - def _relation_get(key, unit, relid): - ks_dict = { - 'auth_protocol': 'https', - 'auth_host': '10.0.0.2', - 'auth_port': '8090', - 'admin_token': 'sectocken', - } - return ks_dict[key] - self.relation_get.side_effect = _relation_get - self.assertEquals(ceph_hooks.get_keystone_conf(), { - 'auth_type': 'keystone', - 'auth_protocol': 'https', - 'admin_token': 'sectocken', - 'user_roles': 'admin', - 'auth_host': '10.0.0.2', - 'cache_size': '42', - 'auth_port': '8090', - 'revocation_check_interval': '21'}) - - def test_get_keystone_conf_missinginfo(self): - self.test_config.set('operator-roles', 'admin') - self.test_config.set('cache-size', '42') - self.test_config.set('revocation-check-interval', '21') - self.relation_ids.return_value = ['idrelid'] - self.related_units.return_value = ['idunit'] - - def _relation_get(key, unit, relid): - ks_dict = { - 'auth_protocol': 'https', - 'auth_host': '10.0.0.2', - 'auth_port': '8090', - } - return ks_dict[key] if key in ks_dict else None - self.relation_get.side_effect = _relation_get - self.assertEquals(ceph_hooks.get_keystone_conf(), None) - def test_mon_relation(self): - _emit_cephconf = self.patch('emit_cephconf') _ceph = self.patch('ceph') _restart = self.patch('restart') self.relation_get.return_value = 'seckey' ceph_hooks.mon_relation() _restart.assert_called() _ceph.import_radosgw_key.assert_called_with('seckey') - _emit_cephconf.assert_called() + self.CONFIGS.write_all.assert_called_with() def test_mon_relation_nokey(self): - _emit_cephconf = self.patch('emit_cephconf') _ceph = self.patch('ceph') _restart = self.patch('restart') self.relation_get.return_value = None ceph_hooks.mon_relation() self.assertFalse(_ceph.import_radosgw_key.called) self.assertFalse(_restart.called) - _emit_cephconf.assert_called() + self.CONFIGS.write_all.assert_called_with() def test_gateway_relation(self): self.unit_get.return_value = 'myserver' @@ -374,10 +297,9 @@ def test_identity_joined_public_name(self, _config, _unit_get, admin_url='http://myserv:80/swift') def test_identity_changed(self): - _emit_cephconf = self.patch('emit_cephconf') _restart = self.patch('restart') ceph_hooks.identity_changed() - _emit_cephconf.assert_called() + self.CONFIGS.write_all.assert_called_with() _restart.assert_called() @patch('charmhelpers.contrib.openstack.ip.is_clustered') @@ -391,12 +313,11 @@ def test_canonical_url_ipv6(self, _config, _unit_get, _is_clustered): self.assertEquals(ceph_hooks.canonical_url({}, PUBLIC), 'http://[%s]' % ipv6_addr) - @patch.object(ceph_hooks, 'CONFIGS') - def test_cluster_changed(self, configs): + def test_cluster_changed(self): _id_joined = self.patch('identity_joined') self.relation_ids.return_value = ['rid'] ceph_hooks.cluster_changed() - configs.write_all.assert_called() + self.CONFIGS.write_all.assert_called_with() _id_joined.assert_called_with(relid='rid') def test_ha_relation_joined_no_vip(self): From 29ea1899077f91af0dd1ffde9a795526c023a4a0 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 8 Oct 2015 12:13:59 +0000 Subject: [PATCH 0869/2699] ceph_radosgw_context.py unit tests --- .../unit_tests/test_ceph_radosgw_context.py | 177 ++++++++++++++++++ 1 file changed, 177 insertions(+) create mode 100644 ceph-radosgw/unit_tests/test_ceph_radosgw_context.py diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py new file mode 100644 index 00000000..efad4d5e --- /dev/null +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -0,0 +1,177 @@ +from mock import patch + +import ceph_radosgw_context as context + +from test_utils import CharmTestCase +import charmhelpers + +TO_PATCH = [ + 'config', + 'relation_get', + 'relation_ids', + 'related_units', + 'cmp_pkgrevno', + 'socket', +] + + +class HAProxyContextTests(CharmTestCase): + def setUp(self): + super(HAProxyContextTests, self).setUp(context, TO_PATCH) + self.relation_get.side_effect = self.test_relation.get + self.config.side_effect = self.test_config.get + + @patch('charmhelpers.contrib.openstack.context.unit_get') + @patch('charmhelpers.contrib.openstack.context.local_unit') + @patch('charmhelpers.contrib.openstack.context.get_host_ip') + @patch('charmhelpers.contrib.openstack.context.config') + @patch('charmhelpers.contrib.hahelpers.cluster.config_get') + @patch('charmhelpers.contrib.openstack.context.relation_ids') + @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') + def test_ctxt(self, _harelation_ids, _ctxtrelation_ids, _haconfig, + _ctxtconfig, _get_host_ip, _local_unit, _unit_get): + _get_host_ip.return_value = '10.0.0.10' + _unit_get.return_value = '10.0.0.10' + _ctxtconfig.side_effect = self.test_config.get + _haconfig.side_effect = self.test_config.get + _harelation_ids.return_value = [] + haproxy_context = context.HAProxyContext() + expect = { + 'cephradosgw_bind_port': 70, + 'service_ports': {'cephradosgw-server': [80, 70]} + } + self.assertEqual(expect, haproxy_context()) + + +class IdentityServiceContextTest(CharmTestCase): + + def setUp(self): + super(IdentityServiceContextTest, self).setUp(context, TO_PATCH) + self.relation_get.side_effect = self.test_relation.get + self.config.side_effect = self.test_config.get + + @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') + @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') + @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') + @patch.object(charmhelpers.contrib.openstack.context, 'related_units') + @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') + @patch.object(charmhelpers.contrib.openstack.context, 'log') + def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, + _format_ipv6_addr): + self.test_config.set('operator-roles', 'Babel') + self.test_config.set('cache-size', '42') + self.test_config.set('revocation-check-interval', '7500000') + self.test_relation.set({'admin_token': 'ubuntutesting'}) + self.relation_ids.return_value = ['identity-service:5'] + self.related_units.return_value = ['keystone/0'] + _format_ipv6_addr.return_value = False + _rids.return_value = 'rid1' + _runits.return_value = 'runit' + _ctxt_comp.return_value = True + id_data = { + 'service_port': 9876, + 'service_host': '127.0.0.4', + 'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', + 'auth_host': '127.0.0.5', + 'auth_port': 5432, + 'service_tenant': 'ten', + 'service_username': 'admin', + 'service_password': 'adminpass', + } + _rget.return_value = id_data + ids_ctxt = context.IdentityServiceContext() + expect = { + 'admin_password': 'adminpass', + 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', + 'admin_tenant_name': 'ten', + 'admin_token': 'ubuntutesting', + 'admin_user': 'admin', + 'auth_host': '127.0.0.5', + 'auth_port': 5432, + 'auth_protocol': 'http', + 'auth_type': 'keystone', + 'cache_size': '42', + 'revocation_check_interval': '7500000', + 'service_host': '127.0.0.4', + 'service_port': 9876, + 'service_protocol': 'http', + 'user_roles': 'Babel', + } + self.assertEqual(expect, ids_ctxt()) + + @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') + @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') + @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') + @patch.object(charmhelpers.contrib.openstack.context, 'related_units') + @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') + @patch.object(charmhelpers.contrib.openstack.context, 'log') + def test_ids_ctxt_no_admin_token(self, _log, _rids, _runits, _rget, + _ctxt_comp, _format_ipv6_addr): + self.test_config.set('operator-roles', 'Babel') + self.test_config.set('cache-size', '42') + self.test_config.set('revocation-check-interval', '7500000') + self.test_relation.set({}) + self.relation_ids.return_value = ['identity-service:5'] + self.related_units.return_value = ['keystone/0'] + _format_ipv6_addr.return_value = False + _rids.return_value = 'rid1' + _runits.return_value = 'runit' + _ctxt_comp.return_value = True + id_data = { + 'service_port': 9876, + 'service_host': '127.0.0.4', + 'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', + 'auth_host': '127.0.0.5', + 'auth_port': 5432, + 'service_tenant': 'ten', + 'service_username': 'admin', + 'service_password': 'adminpass', + } + _rget.return_value = id_data + ids_ctxt = context.IdentityServiceContext() + self.assertEqual({}, ids_ctxt()) + + @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') + @patch.object(charmhelpers.contrib.openstack.context, 'log') + def test_ids_ctxt_no_rels(self, _log, _rids): + _rids.return_value = [] + ids_ctxt = context.IdentityServiceContext() + self.assertEquals(ids_ctxt(), None) + + +class MonContextTest(CharmTestCase): + + def setUp(self): + super(MonContextTest, self).setUp(context, TO_PATCH) + self.config.side_effect = self.test_config.get + + def test_ctxt(self): + self.socket.gethostname.return_value = '10.0.0.10' + mon_ctxt = context.MonContext() + addresses = ['10.5.4.1', '10.5.4.2', '10.5.4.3'] + + def _relation_get(attr, unit, rid): + if attr == 'ceph-public-address': + return addresses.pop() + elif attr == 'auth': + return 'cephx' + self.relation_get.side_effect = _relation_get + self.relation_ids.return_value = ['mon:6'] + self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] + expect = { + 'auth_supported': 'cephx', + 'embedded_webserver': False, + 'hostname': '10.0.0.10', + 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', + 'old_auth': False, + 'use_syslog': 'false' + } + self.assertEqual(expect, mon_ctxt()) + + def test_ctxt_missing_data(self): + self.socket.gethostname.return_value = '10.0.0.10' + mon_ctxt = context.MonContext() + self.relation_get.return_value = None + self.relation_ids.return_value = ['mon:6'] + self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] + self.assertEqual({}, mon_ctxt()) From cacf59145b26cc68742527e5e51de6bcfcd91fb6 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 8 Oct 2015 12:31:24 +0000 Subject: [PATCH 0870/2699] Cant compare pkgrevno of a package thats not installed --- ceph-radosgw/hooks/utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 9681a3c7..c431a359 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -18,6 +18,7 @@ from charmhelpers.contrib.openstack.utils import set_os_workload_status from charmhelpers.contrib.hahelpers.cluster import get_hacluster_config from charmhelpers.core.host import cmp_pkgrevno +from charmhelpers.fetch import filter_installed_packages import ceph_radosgw_context @@ -61,7 +62,8 @@ def register_configs(release='icehouse'): configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, openstack_release=release) CONFIGS = resource_map() - if cmp_pkgrevno('radosgw', '0.55') >= 0: + pkg = 'radosgw' + if not filter_installed_packages([pkg]) and cmp_pkgrevno(pkg, '0.55') >= 0: # Add keystone configuration if found CONFIGS[CEPH_CONF]['contexts'].append( ceph_radosgw_context.IdentityServiceContext() From fb6ff154d01f2eac68062164772515bf14311c5d Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 8 Oct 2015 14:17:51 +0100 Subject: [PATCH 0871/2699] Tidyup --- .../charmhelpers/contrib/openstack/context.py | 1 - ceph-radosgw/unit_tests/test_hooks.py | 25 ------------------- 2 files changed, 26 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index faa4d79b..49c04de0 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -574,7 +574,6 @@ def __call__(self): if not relation_ids('cluster') and not self.singlenode_mode: return {} - print "config('prefer-ipv6'): {}".format(config('prefer-ipv6')) if config('prefer-ipv6'): addr = get_ipv6_addr(exc_list=[config('vip')])[0] else: diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 77b15031..ec9279d7 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -122,31 +122,6 @@ def test_install(self): self.enable_pocket.assert_called_with('multiverse') self.os.makedirs.called_with('/var/lib/ceph/nss') -# def test_emit_cephconf(self): -# _get_keystone_conf = self.patch('get_keystone_conf') -# _get_auth = self.patch('get_auth') -# _get_mon_hosts = self.patch('get_mon_hosts') -# _get_auth.return_value = 'cephx' -# _get_keystone_conf.return_value = {'keystone_key': 'keystone_value'} -# _get_mon_hosts.return_value = ['10.0.0.1:6789', '10.0.0.2:6789'] -# self.get_unit_hostname.return_value = 'bob' -# self.os.path.exists.return_value = False -# cephcontext = { -# 'auth_supported': 'cephx', -# 'mon_hosts': '10.0.0.1:6789 10.0.0.2:6789', -# 'hostname': 'bob', -# 'old_auth': False, -# 'use_syslog': 'false', -# 'keystone_key': 'keystone_value', -# 'embedded_webserver': False, -# } -# self.cmp_pkgrevno.return_value = 1 -# with patch_open() as (_open, _file): -# ceph_hooks.emit_cephconf() -# self.os.makedirs.assert_called_with('/etc/ceph') -# _open.assert_called_with('/etc/ceph/ceph.conf', 'w') -# self.render_template.assert_called_with('ceph.conf', cephcontext) - def test_emit_apacheconf(self): self.is_apache_24.return_value = True self.unit_get.return_value = '10.0.0.1' From 464db2d773d67c5c21453d79be2a4f5a39390624 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Oct 2015 10:51:42 +0000 Subject: [PATCH 0872/2699] Identity relation should be an optional interface for status checks as not all versions support it. Catch inconsistent auth passed back by mons --- ceph-radosgw/hooks/ceph_radosgw_context.py | 13 +++++++++++-- ceph-radosgw/hooks/utils.py | 3 ++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index dfca00c6..fc8e6cc7 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -5,7 +5,9 @@ ) from charmhelpers.core.host import cmp_pkgrevno from charmhelpers.core.hookenv import ( + WARNING, config, + log, relation_ids, related_units, relation_get, @@ -71,7 +73,7 @@ def __call__(self): if not relation_ids('mon'): return {} hosts = [] - auth = 'none' + auths = [] for relid in relation_ids('mon'): for unit in related_units(relid): ceph_public_addr = relation_get('ceph-public-address', unit, @@ -81,7 +83,14 @@ def __call__(self): hosts.append('{}:6789'.format(host_ip)) _auth = relation_get('auth', unit, relid) if _auth: - auth = _auth + auths.append(_auth) + if len(set(auths)) != 1: + e=("Inconsistent or absent auth returned by mon units. Setting " + "auth_supported to 'none'") + log(e, level=WARNING) + auth = 'none' + else: + auth = auths[0] hosts.sort() ctxt = { 'auth_supported': auth, diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index c431a359..7ecae7a8 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -25,7 +25,6 @@ # The interface is said to be satisfied if anyone of the interfaces in the # list has a complete context. REQUIRED_INTERFACES = { - 'identity': ['identity-service'], 'mon': ['ceph-radosgw'], } CEPHRG_HA_RES = 'grp_cephrg_vips' @@ -125,6 +124,8 @@ def check_optional_relations(configs): return ('blocked', 'hacluster missing configuration: ' 'vip, vip_iface, vip_cidr') + if cmp_pkgrevno(pkg, '0.55') >= 0 and relation_ids('identity-service'): + required_interfaces['identity'] = ['identity-service'] if required_interfaces: set_os_workload_status(configs, required_interfaces) return status_get() From 106c88f34698171735de32d2246e89b91910cbb4 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Oct 2015 10:56:01 +0000 Subject: [PATCH 0873/2699] Fix lint and add unit test to test inconsistent auths --- ceph-radosgw/hooks/ceph_radosgw_context.py | 4 +-- ceph-radosgw/hooks/utils.py | 3 ++- .../unit_tests/test_ceph_radosgw_context.py | 25 +++++++++++++++++++ 3 files changed, 29 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index fc8e6cc7..1e079904 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -85,8 +85,8 @@ def __call__(self): if _auth: auths.append(_auth) if len(set(auths)) != 1: - e=("Inconsistent or absent auth returned by mon units. Setting " - "auth_supported to 'none'") + e = ("Inconsistent or absent auth returned by mon units. Setting " + "auth_supported to 'none'") log(e, level=WARNING) auth = 'none' else: diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 7ecae7a8..d14c6ecb 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -124,7 +124,8 @@ def check_optional_relations(configs): return ('blocked', 'hacluster missing configuration: ' 'vip, vip_iface, vip_cidr') - if cmp_pkgrevno(pkg, '0.55') >= 0 and relation_ids('identity-service'): + if cmp_pkgrevno('radosgw', '0.55') >= 0 and \ + relation_ids('identity-service'): required_interfaces['identity'] = ['identity-service'] if required_interfaces: set_os_workload_status(configs, required_interfaces) diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index efad4d5e..5bd801d7 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -7,6 +7,7 @@ TO_PATCH = [ 'config', + 'log', 'relation_get', 'relation_ids', 'related_units', @@ -175,3 +176,27 @@ def test_ctxt_missing_data(self): self.relation_ids.return_value = ['mon:6'] self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] self.assertEqual({}, mon_ctxt()) + + def test_ctxt_inconsistent_auths(self): + self.socket.gethostname.return_value = '10.0.0.10' + mon_ctxt = context.MonContext() + addresses = ['10.5.4.1', '10.5.4.2', '10.5.4.3'] + auths = ['cephx', 'cephy', 'cephz'] + + def _relation_get(attr, unit, rid): + if attr == 'ceph-public-address': + return addresses.pop() + elif attr == 'auth': + return auths.pop() + self.relation_get.side_effect = _relation_get + self.relation_ids.return_value = ['mon:6'] + self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] + expect = { + 'auth_supported': 'none', + 'embedded_webserver': False, + 'hostname': '10.0.0.10', + 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', + 'old_auth': False, + 'use_syslog': 'false' + } + self.assertEqual(expect, mon_ctxt()) From 3ce08a6d78cd931b5d798c5a2c61cdfe78b4d1d3 Mon Sep 17 00:00:00 2001 From: David Ames Date: Mon, 12 Oct 2015 07:48:38 -0700 Subject: [PATCH 0874/2699] [thedac, trivial] Add test_ctxt_consistent_auths --- .../unit_tests/test_ceph_radosgw_context.py | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 5bd801d7..4394f683 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -200,3 +200,27 @@ def _relation_get(attr, unit, rid): 'use_syslog': 'false' } self.assertEqual(expect, mon_ctxt()) + + def test_ctxt_consistent_auths(self): + self.socket.gethostname.return_value = '10.0.0.10' + mon_ctxt = context.MonContext() + addresses = ['10.5.4.1', '10.5.4.2', '10.5.4.3'] + auths = ['cephx', 'cephx', 'cephx'] + + def _relation_get(attr, unit, rid): + if attr == 'ceph-public-address': + return addresses.pop() + elif attr == 'auth': + return auths.pop() + self.relation_get.side_effect = _relation_get + self.relation_ids.return_value = ['mon:6'] + self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] + expect = { + 'auth_supported': 'cephx', + 'embedded_webserver': False, + 'hostname': '10.0.0.10', + 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', + 'old_auth': False, + 'use_syslog': 'false' + } + self.assertEqual(expect, mon_ctxt()) From 668a39ee59441151e6409ab833381c15368d700e Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 30 Oct 2015 11:15:38 +0900 Subject: [PATCH 0875/2699] Add tox configurations and requirements definitions --- ceph-proxy/.testr.conf | 8 +++++ ceph-proxy/hooks/ceph_hooks.py | 4 +-- .../requirements/requirements-precise.txt | 6 ++++ .../requirements/requirements-trusty.txt | 7 ++++ ceph-proxy/requirements/test-requirements.txt | 7 ++++ ceph-proxy/tox.ini | 35 +++++++++++++++++++ 6 files changed, 65 insertions(+), 2 deletions(-) create mode 100644 ceph-proxy/.testr.conf create mode 100644 ceph-proxy/requirements/requirements-precise.txt create mode 100644 ceph-proxy/requirements/requirements-trusty.txt create mode 100644 ceph-proxy/requirements/test-requirements.txt create mode 100644 ceph-proxy/tox.ini diff --git a/ceph-proxy/.testr.conf b/ceph-proxy/.testr.conf new file mode 100644 index 00000000..801646bb --- /dev/null +++ b/ceph-proxy/.testr.conf @@ -0,0 +1,8 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ + OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ + OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ + ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION + +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index ccd575ab..9d733637 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -146,8 +146,8 @@ def config_changed(): umount(e_mountpoint) osd_journal = config('osd-journal') - if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) - and os.path.exists(osd_journal)): + if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) and + os.path.exists(osd_journal)): ceph.zap_disk(osd_journal) with open(JOURNAL_ZAPPED, 'w') as zapped: zapped.write('DONE') diff --git a/ceph-proxy/requirements/requirements-precise.txt b/ceph-proxy/requirements/requirements-precise.txt new file mode 100644 index 00000000..21ee7d12 --- /dev/null +++ b/ceph-proxy/requirements/requirements-precise.txt @@ -0,0 +1,6 @@ +PyYAML==3.10 +simplejson==2.3.2 +netifaces==0.8 +netaddr==0.7.10 +Jinja2==2.6 +six==1.1.0 diff --git a/ceph-proxy/requirements/requirements-trusty.txt b/ceph-proxy/requirements/requirements-trusty.txt new file mode 100644 index 00000000..b73a7e45 --- /dev/null +++ b/ceph-proxy/requirements/requirements-trusty.txt @@ -0,0 +1,7 @@ +PyYAML>=3.10 +simplejson>=3.3.1 +netifaces>=0.8 +netaddr>=0.7.10 +Jinja2>=2.7.2 +six>=1.5.2 +dnspython diff --git a/ceph-proxy/requirements/test-requirements.txt b/ceph-proxy/requirements/test-requirements.txt new file mode 100644 index 00000000..ff4fb63b --- /dev/null +++ b/ceph-proxy/requirements/test-requirements.txt @@ -0,0 +1,7 @@ +testtools +coverage +mock +flake8==2.1.0 +# No version required +charm-tools +os-testr diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini new file mode 100644 index 00000000..e9cf1d53 --- /dev/null +++ b/ceph-proxy/tox.ini @@ -0,0 +1,35 @@ +[tox] +# Default to current LTS +envlist = lint,py27-trusty +skipsdist = True + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 +install_command = + pip install --allow-unverified python-apt {opts} {packages} +commands = ostestr {posargs} + +[testenv:py27-precise] +basepython = python2.7 +deps = -r{toxinidir}/requirements/requirements-precise.txt + -r{toxinidir}/requirements/test-requirements.txt + +[testenv:py27-trusty] +basepython = python2.7 +deps = -r{toxinidir}/requirements/requirements-trusty.txt + -r{toxinidir}/requirements/test-requirements.txt + +[testenv:lint] +basepython = python2.7 +deps = -r{toxinidir}/requirements/requirements-trusty.txt + -r{toxinidir}/requirements/test-requirements.txt +commands = flake8 {posargs} hooks unit_tests tests + charm proof + +[testenv:venv] +commands = {posargs} + +[flake8] +ignore = E402,E226 +exclude = hooks/charmhelpers From b95ed78e81a3b15f24e46c9a0120104abd8cf17a Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 30 Oct 2015 11:15:38 +0900 Subject: [PATCH 0876/2699] Add tox configurations and requirements definitions --- ceph-mon/.testr.conf | 8 +++++ ceph-mon/hooks/ceph_hooks.py | 4 +-- .../requirements/requirements-precise.txt | 6 ++++ ceph-mon/requirements/requirements-trusty.txt | 7 ++++ ceph-mon/requirements/test-requirements.txt | 7 ++++ ceph-mon/tox.ini | 35 +++++++++++++++++++ 6 files changed, 65 insertions(+), 2 deletions(-) create mode 100644 ceph-mon/.testr.conf create mode 100644 ceph-mon/requirements/requirements-precise.txt create mode 100644 ceph-mon/requirements/requirements-trusty.txt create mode 100644 ceph-mon/requirements/test-requirements.txt create mode 100644 ceph-mon/tox.ini diff --git a/ceph-mon/.testr.conf b/ceph-mon/.testr.conf new file mode 100644 index 00000000..801646bb --- /dev/null +++ b/ceph-mon/.testr.conf @@ -0,0 +1,8 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ + OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ + OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ + ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION + +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index ccd575ab..9d733637 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -146,8 +146,8 @@ def config_changed(): umount(e_mountpoint) osd_journal = config('osd-journal') - if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) - and os.path.exists(osd_journal)): + if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) and + os.path.exists(osd_journal)): ceph.zap_disk(osd_journal) with open(JOURNAL_ZAPPED, 'w') as zapped: zapped.write('DONE') diff --git a/ceph-mon/requirements/requirements-precise.txt b/ceph-mon/requirements/requirements-precise.txt new file mode 100644 index 00000000..21ee7d12 --- /dev/null +++ b/ceph-mon/requirements/requirements-precise.txt @@ -0,0 +1,6 @@ +PyYAML==3.10 +simplejson==2.3.2 +netifaces==0.8 +netaddr==0.7.10 +Jinja2==2.6 +six==1.1.0 diff --git a/ceph-mon/requirements/requirements-trusty.txt b/ceph-mon/requirements/requirements-trusty.txt new file mode 100644 index 00000000..b73a7e45 --- /dev/null +++ b/ceph-mon/requirements/requirements-trusty.txt @@ -0,0 +1,7 @@ +PyYAML>=3.10 +simplejson>=3.3.1 +netifaces>=0.8 +netaddr>=0.7.10 +Jinja2>=2.7.2 +six>=1.5.2 +dnspython diff --git a/ceph-mon/requirements/test-requirements.txt b/ceph-mon/requirements/test-requirements.txt new file mode 100644 index 00000000..ff4fb63b --- /dev/null +++ b/ceph-mon/requirements/test-requirements.txt @@ -0,0 +1,7 @@ +testtools +coverage +mock +flake8==2.1.0 +# No version required +charm-tools +os-testr diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini new file mode 100644 index 00000000..e9cf1d53 --- /dev/null +++ b/ceph-mon/tox.ini @@ -0,0 +1,35 @@ +[tox] +# Default to current LTS +envlist = lint,py27-trusty +skipsdist = True + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 +install_command = + pip install --allow-unverified python-apt {opts} {packages} +commands = ostestr {posargs} + +[testenv:py27-precise] +basepython = python2.7 +deps = -r{toxinidir}/requirements/requirements-precise.txt + -r{toxinidir}/requirements/test-requirements.txt + +[testenv:py27-trusty] +basepython = python2.7 +deps = -r{toxinidir}/requirements/requirements-trusty.txt + -r{toxinidir}/requirements/test-requirements.txt + +[testenv:lint] +basepython = python2.7 +deps = -r{toxinidir}/requirements/requirements-trusty.txt + -r{toxinidir}/requirements/test-requirements.txt +commands = flake8 {posargs} hooks unit_tests tests + charm proof + +[testenv:venv] +commands = {posargs} + +[flake8] +ignore = E402,E226 +exclude = hooks/charmhelpers From 68c38eb3ee98ae1156ea4594b56f1da422ec82f1 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 30 Oct 2015 11:22:54 +0900 Subject: [PATCH 0877/2699] Add tox support --- ceph-osd/.bzrignore | 1 + ceph-osd/.testr.conf | 8 ++++ ceph-osd/.testrepository/0 | 28 ++++++++++++++ ceph-osd/.testrepository/1 | 28 ++++++++++++++ ceph-osd/.testrepository/failing | 0 ceph-osd/.testrepository/format | 1 + ceph-osd/.testrepository/next-stream | 1 + ceph-osd/.testrepository/times.dbm | Bin 0 -> 12288 bytes ceph-osd/hooks/ceph_hooks.py | 6 +-- .../requirements/requirements-precise.txt | 6 +++ ceph-osd/requirements/requirements-trusty.txt | 7 ++++ ceph-osd/requirements/test-requirements.txt | 7 ++++ ceph-osd/tox.ini | 35 ++++++++++++++++++ 13 files changed, 125 insertions(+), 3 deletions(-) create mode 100644 ceph-osd/.testr.conf create mode 100644 ceph-osd/.testrepository/0 create mode 100644 ceph-osd/.testrepository/1 create mode 100644 ceph-osd/.testrepository/failing create mode 100644 ceph-osd/.testrepository/format create mode 100644 ceph-osd/.testrepository/next-stream create mode 100644 ceph-osd/.testrepository/times.dbm create mode 100644 ceph-osd/requirements/requirements-precise.txt create mode 100644 ceph-osd/requirements/requirements-trusty.txt create mode 100644 ceph-osd/requirements/test-requirements.txt create mode 100644 ceph-osd/tox.ini diff --git a/ceph-osd/.bzrignore b/ceph-osd/.bzrignore index df8ebfbf..6ae62c65 100644 --- a/ceph-osd/.bzrignore +++ b/ceph-osd/.bzrignore @@ -1,3 +1,4 @@ .coverage .project +.tox bin diff --git a/ceph-osd/.testr.conf b/ceph-osd/.testr.conf new file mode 100644 index 00000000..801646bb --- /dev/null +++ b/ceph-osd/.testr.conf @@ -0,0 +1,8 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ + OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ + OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ + ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION + +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/ceph-osd/.testrepository/0 b/ceph-osd/.testrepository/0 new file mode 100644 index 00000000..9bf1a840 --- /dev/null +++ b/ceph-osd/.testrepository/0 @@ -0,0 +1,28 @@ +time: 2015-10-30 02:21:00.720250Z +tags: worker-0 +test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_disks +time: 2015-10-30 02:21:00.754778Z +successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_disks [ multipart +] +tags: -worker-0 +time: 2015-10-30 02:21:00.730741Z +tags: worker-1 +test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_relation_incomplete +time: 2015-10-30 02:21:00.758154Z +successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_relation_incomplete [ multipart +] +tags: -worker-1 +time: 2015-10-30 02:21:00.743827Z +tags: worker-2 +test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_no_monitor_relation +time: 2015-10-30 02:21:00.772254Z +successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_no_monitor_relation [ multipart +] +tags: -worker-2 +time: 2015-10-30 02:21:00.755015Z +tags: worker-3 +test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_no_disks +time: 2015-10-30 02:21:00.786637Z +successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_no_disks [ multipart +] +tags: -worker-3 diff --git a/ceph-osd/.testrepository/1 b/ceph-osd/.testrepository/1 new file mode 100644 index 00000000..4b986ba9 --- /dev/null +++ b/ceph-osd/.testrepository/1 @@ -0,0 +1,28 @@ +time: 2015-10-30 02:22:03.291297Z +tags: worker-0 +test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_disks +time: 2015-10-30 02:22:03.320167Z +successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_disks [ multipart +] +tags: -worker-0 +time: 2015-10-30 02:22:03.297768Z +tags: worker-3 +test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_relation_incomplete +time: 2015-10-30 02:22:03.324847Z +successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_relation_incomplete [ multipart +] +tags: -worker-3 +time: 2015-10-30 02:22:03.309743Z +tags: worker-2 +test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_no_monitor_relation +time: 2015-10-30 02:22:03.338165Z +successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_no_monitor_relation [ multipart +] +tags: -worker-2 +time: 2015-10-30 02:22:03.304786Z +tags: worker-1 +test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_no_disks +time: 2015-10-30 02:22:03.342692Z +successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_no_disks [ multipart +] +tags: -worker-1 diff --git a/ceph-osd/.testrepository/failing b/ceph-osd/.testrepository/failing new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/.testrepository/format b/ceph-osd/.testrepository/format new file mode 100644 index 00000000..d00491fd --- /dev/null +++ b/ceph-osd/.testrepository/format @@ -0,0 +1 @@ +1 diff --git a/ceph-osd/.testrepository/next-stream b/ceph-osd/.testrepository/next-stream new file mode 100644 index 00000000..0cfbf088 --- /dev/null +++ b/ceph-osd/.testrepository/next-stream @@ -0,0 +1 @@ +2 diff --git a/ceph-osd/.testrepository/times.dbm b/ceph-osd/.testrepository/times.dbm new file mode 100644 index 0000000000000000000000000000000000000000..a2a93fd6cff6dded6b92b259c174ca5229f2c29d GIT binary patch literal 12288 zcmeI%ze>a~9KiAPhC_#5Tm%;%!BVT(yF~;C!S$Rklrv!Qu9YU|Te!M72`)a4tDCD& zVbWf}6Sq6W;akZ6@Jsn*N+N_1kxTw7cKc%6S9V<5@)aR^U9plso?VIq*E8AMe~%Bl zAs-{e36tL*ZYIC=hjB^-5I_I{1Q0*~0R#|0009IL_&b50XY{`K7>M{)KFbSvDnAFV z#fJa_2q1s}0tg_000IagfWW^Nh@vz}v*B@;MT@Gm%4%ax;&WweVHYMD>*l_k>T%n; zas%gu(cO%~7;VgQqN-{&uU(9~QH`DzwyY~3{3su#Cu;`}k*D?iZl=3.10 +simplejson>=3.3.1 +netifaces>=0.8 +netaddr>=0.7.10 +Jinja2>=2.7.2 +six>=1.5.2 +dnspython diff --git a/ceph-osd/requirements/test-requirements.txt b/ceph-osd/requirements/test-requirements.txt new file mode 100644 index 00000000..ff4fb63b --- /dev/null +++ b/ceph-osd/requirements/test-requirements.txt @@ -0,0 +1,7 @@ +testtools +coverage +mock +flake8==2.1.0 +# No version required +charm-tools +os-testr diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini new file mode 100644 index 00000000..e9cf1d53 --- /dev/null +++ b/ceph-osd/tox.ini @@ -0,0 +1,35 @@ +[tox] +# Default to current LTS +envlist = lint,py27-trusty +skipsdist = True + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 +install_command = + pip install --allow-unverified python-apt {opts} {packages} +commands = ostestr {posargs} + +[testenv:py27-precise] +basepython = python2.7 +deps = -r{toxinidir}/requirements/requirements-precise.txt + -r{toxinidir}/requirements/test-requirements.txt + +[testenv:py27-trusty] +basepython = python2.7 +deps = -r{toxinidir}/requirements/requirements-trusty.txt + -r{toxinidir}/requirements/test-requirements.txt + +[testenv:lint] +basepython = python2.7 +deps = -r{toxinidir}/requirements/requirements-trusty.txt + -r{toxinidir}/requirements/test-requirements.txt +commands = flake8 {posargs} hooks unit_tests tests + charm proof + +[testenv:venv] +commands = {posargs} + +[flake8] +ignore = E402,E226 +exclude = hooks/charmhelpers From 7de10466ce872edd804a966679b3f43053dae849 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 30 Oct 2015 11:23:36 +0900 Subject: [PATCH 0878/2699] Drop testrepository from bzr --- ceph-osd/.bzrignore | 1 + ceph-osd/.testrepository/0 | 28 --------------------------- ceph-osd/.testrepository/1 | 28 --------------------------- ceph-osd/.testrepository/failing | 0 ceph-osd/.testrepository/format | 1 - ceph-osd/.testrepository/next-stream | 1 - ceph-osd/.testrepository/times.dbm | Bin 12288 -> 0 bytes 7 files changed, 1 insertion(+), 58 deletions(-) delete mode 100644 ceph-osd/.testrepository/0 delete mode 100644 ceph-osd/.testrepository/1 delete mode 100644 ceph-osd/.testrepository/failing delete mode 100644 ceph-osd/.testrepository/format delete mode 100644 ceph-osd/.testrepository/next-stream delete mode 100644 ceph-osd/.testrepository/times.dbm diff --git a/ceph-osd/.bzrignore b/ceph-osd/.bzrignore index 6ae62c65..ed5e46cf 100644 --- a/ceph-osd/.bzrignore +++ b/ceph-osd/.bzrignore @@ -1,4 +1,5 @@ .coverage .project .tox +.testrepository bin diff --git a/ceph-osd/.testrepository/0 b/ceph-osd/.testrepository/0 deleted file mode 100644 index 9bf1a840..00000000 --- a/ceph-osd/.testrepository/0 +++ /dev/null @@ -1,28 +0,0 @@ -time: 2015-10-30 02:21:00.720250Z -tags: worker-0 -test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_disks -time: 2015-10-30 02:21:00.754778Z -successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_disks [ multipart -] -tags: -worker-0 -time: 2015-10-30 02:21:00.730741Z -tags: worker-1 -test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_relation_incomplete -time: 2015-10-30 02:21:00.758154Z -successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_relation_incomplete [ multipart -] -tags: -worker-1 -time: 2015-10-30 02:21:00.743827Z -tags: worker-2 -test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_no_monitor_relation -time: 2015-10-30 02:21:00.772254Z -successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_no_monitor_relation [ multipart -] -tags: -worker-2 -time: 2015-10-30 02:21:00.755015Z -tags: worker-3 -test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_no_disks -time: 2015-10-30 02:21:00.786637Z -successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_no_disks [ multipart -] -tags: -worker-3 diff --git a/ceph-osd/.testrepository/1 b/ceph-osd/.testrepository/1 deleted file mode 100644 index 4b986ba9..00000000 --- a/ceph-osd/.testrepository/1 +++ /dev/null @@ -1,28 +0,0 @@ -time: 2015-10-30 02:22:03.291297Z -tags: worker-0 -test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_disks -time: 2015-10-30 02:22:03.320167Z -successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_disks [ multipart -] -tags: -worker-0 -time: 2015-10-30 02:22:03.297768Z -tags: worker-3 -test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_relation_incomplete -time: 2015-10-30 02:22:03.324847Z -successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_relation_incomplete [ multipart -] -tags: -worker-3 -time: 2015-10-30 02:22:03.309743Z -tags: worker-2 -test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_no_monitor_relation -time: 2015-10-30 02:22:03.338165Z -successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_no_monitor_relation [ multipart -] -tags: -worker-2 -time: 2015-10-30 02:22:03.304786Z -tags: worker-1 -test: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_no_disks -time: 2015-10-30 02:22:03.342692Z -successful: unit_tests.test_status.ServiceStatusTestCase.test_assess_status_monitor_complete_no_disks [ multipart -] -tags: -worker-1 diff --git a/ceph-osd/.testrepository/failing b/ceph-osd/.testrepository/failing deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-osd/.testrepository/format b/ceph-osd/.testrepository/format deleted file mode 100644 index d00491fd..00000000 --- a/ceph-osd/.testrepository/format +++ /dev/null @@ -1 +0,0 @@ -1 diff --git a/ceph-osd/.testrepository/next-stream b/ceph-osd/.testrepository/next-stream deleted file mode 100644 index 0cfbf088..00000000 --- a/ceph-osd/.testrepository/next-stream +++ /dev/null @@ -1 +0,0 @@ -2 diff --git a/ceph-osd/.testrepository/times.dbm b/ceph-osd/.testrepository/times.dbm deleted file mode 100644 index a2a93fd6cff6dded6b92b259c174ca5229f2c29d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12288 zcmeI%ze>a~9KiAPhC_#5Tm%;%!BVT(yF~;C!S$Rklrv!Qu9YU|Te!M72`)a4tDCD& zVbWf}6Sq6W;akZ6@Jsn*N+N_1kxTw7cKc%6S9V<5@)aR^U9plso?VIq*E8AMe~%Bl zAs-{e36tL*ZYIC=hjB^-5I_I{1Q0*~0R#|0009IL_&b50XY{`K7>M{)KFbSvDnAFV z#fJa_2q1s}0tg_000IagfWW^Nh@vz}v*B@;MT@Gm%4%ax;&WweVHYMD>*l_k>T%n; zas%gu(cO%~7;VgQqN-{&uU(9~QH`DzwyY~3{3su#Cu;`}k*D?iZl Date: Fri, 30 Oct 2015 12:30:49 +0900 Subject: [PATCH 0879/2699] Make things alot simpler --- ...requirements-trusty.txt => requirements.txt} | 0 .../requirements/requirements-precise.txt | 6 ------ .../{requirements => }/test-requirements.txt | 0 ceph-proxy/tox.ini | 17 ++++++----------- 4 files changed, 6 insertions(+), 17 deletions(-) rename ceph-proxy/{requirements/requirements-trusty.txt => requirements.txt} (100%) delete mode 100644 ceph-proxy/requirements/requirements-precise.txt rename ceph-proxy/{requirements => }/test-requirements.txt (100%) diff --git a/ceph-proxy/requirements/requirements-trusty.txt b/ceph-proxy/requirements.txt similarity index 100% rename from ceph-proxy/requirements/requirements-trusty.txt rename to ceph-proxy/requirements.txt diff --git a/ceph-proxy/requirements/requirements-precise.txt b/ceph-proxy/requirements/requirements-precise.txt deleted file mode 100644 index 21ee7d12..00000000 --- a/ceph-proxy/requirements/requirements-precise.txt +++ /dev/null @@ -1,6 +0,0 @@ -PyYAML==3.10 -simplejson==2.3.2 -netifaces==0.8 -netaddr==0.7.10 -Jinja2==2.6 -six==1.1.0 diff --git a/ceph-proxy/requirements/test-requirements.txt b/ceph-proxy/test-requirements.txt similarity index 100% rename from ceph-proxy/requirements/test-requirements.txt rename to ceph-proxy/test-requirements.txt diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index e9cf1d53..6bdf5757 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -1,6 +1,6 @@ [tox] # Default to current LTS -envlist = lint,py27-trusty +envlist = lint,py27 skipsdist = True [testenv] @@ -10,20 +10,15 @@ install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} -[testenv:py27-precise] +[testenv:py27] basepython = python2.7 -deps = -r{toxinidir}/requirements/requirements-precise.txt - -r{toxinidir}/requirements/test-requirements.txt - -[testenv:py27-trusty] -basepython = python2.7 -deps = -r{toxinidir}/requirements/requirements-trusty.txt - -r{toxinidir}/requirements/test-requirements.txt +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt [testenv:lint] basepython = python2.7 -deps = -r{toxinidir}/requirements/requirements-trusty.txt - -r{toxinidir}/requirements/test-requirements.txt +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} hooks unit_tests tests charm proof From 70a066c8ce94e6911efa430a726027aa7c829631 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 30 Oct 2015 12:30:49 +0900 Subject: [PATCH 0880/2699] Make things alot simpler --- ...requirements-trusty.txt => requirements.txt} | 0 ceph-mon/requirements/requirements-precise.txt | 6 ------ .../{requirements => }/test-requirements.txt | 0 ceph-mon/tox.ini | 17 ++++++----------- 4 files changed, 6 insertions(+), 17 deletions(-) rename ceph-mon/{requirements/requirements-trusty.txt => requirements.txt} (100%) delete mode 100644 ceph-mon/requirements/requirements-precise.txt rename ceph-mon/{requirements => }/test-requirements.txt (100%) diff --git a/ceph-mon/requirements/requirements-trusty.txt b/ceph-mon/requirements.txt similarity index 100% rename from ceph-mon/requirements/requirements-trusty.txt rename to ceph-mon/requirements.txt diff --git a/ceph-mon/requirements/requirements-precise.txt b/ceph-mon/requirements/requirements-precise.txt deleted file mode 100644 index 21ee7d12..00000000 --- a/ceph-mon/requirements/requirements-precise.txt +++ /dev/null @@ -1,6 +0,0 @@ -PyYAML==3.10 -simplejson==2.3.2 -netifaces==0.8 -netaddr==0.7.10 -Jinja2==2.6 -six==1.1.0 diff --git a/ceph-mon/requirements/test-requirements.txt b/ceph-mon/test-requirements.txt similarity index 100% rename from ceph-mon/requirements/test-requirements.txt rename to ceph-mon/test-requirements.txt diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index e9cf1d53..6bdf5757 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -1,6 +1,6 @@ [tox] # Default to current LTS -envlist = lint,py27-trusty +envlist = lint,py27 skipsdist = True [testenv] @@ -10,20 +10,15 @@ install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} -[testenv:py27-precise] +[testenv:py27] basepython = python2.7 -deps = -r{toxinidir}/requirements/requirements-precise.txt - -r{toxinidir}/requirements/test-requirements.txt - -[testenv:py27-trusty] -basepython = python2.7 -deps = -r{toxinidir}/requirements/requirements-trusty.txt - -r{toxinidir}/requirements/test-requirements.txt +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt [testenv:lint] basepython = python2.7 -deps = -r{toxinidir}/requirements/requirements-trusty.txt - -r{toxinidir}/requirements/test-requirements.txt +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} hooks unit_tests tests charm proof From e733a618bc987ff6e2b5f14a02fa2bd5e6dbd9c4 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 30 Oct 2015 12:37:15 +0900 Subject: [PATCH 0881/2699] Make things alot simpler --- ...equirements-trusty.txt => requirements.txt} | 0 ceph-osd/requirements/requirements-precise.txt | 6 ------ .../{requirements => }/test-requirements.txt | 0 ceph-osd/tox.ini | 18 ++++++------------ 4 files changed, 6 insertions(+), 18 deletions(-) rename ceph-osd/{requirements/requirements-trusty.txt => requirements.txt} (100%) delete mode 100644 ceph-osd/requirements/requirements-precise.txt rename ceph-osd/{requirements => }/test-requirements.txt (100%) diff --git a/ceph-osd/requirements/requirements-trusty.txt b/ceph-osd/requirements.txt similarity index 100% rename from ceph-osd/requirements/requirements-trusty.txt rename to ceph-osd/requirements.txt diff --git a/ceph-osd/requirements/requirements-precise.txt b/ceph-osd/requirements/requirements-precise.txt deleted file mode 100644 index 21ee7d12..00000000 --- a/ceph-osd/requirements/requirements-precise.txt +++ /dev/null @@ -1,6 +0,0 @@ -PyYAML==3.10 -simplejson==2.3.2 -netifaces==0.8 -netaddr==0.7.10 -Jinja2==2.6 -six==1.1.0 diff --git a/ceph-osd/requirements/test-requirements.txt b/ceph-osd/test-requirements.txt similarity index 100% rename from ceph-osd/requirements/test-requirements.txt rename to ceph-osd/test-requirements.txt diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index e9cf1d53..4e328e48 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -1,6 +1,5 @@ [tox] -# Default to current LTS -envlist = lint,py27-trusty +envlist = lint,py27 skipsdist = True [testenv] @@ -10,20 +9,15 @@ install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} -[testenv:py27-precise] +[testenv:py27] basepython = python2.7 -deps = -r{toxinidir}/requirements/requirements-precise.txt - -r{toxinidir}/requirements/test-requirements.txt - -[testenv:py27-trusty] -basepython = python2.7 -deps = -r{toxinidir}/requirements/requirements-trusty.txt - -r{toxinidir}/requirements/test-requirements.txt +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt [testenv:lint] basepython = python2.7 -deps = -r{toxinidir}/requirements/requirements-trusty.txt - -r{toxinidir}/requirements/test-requirements.txt +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} hooks unit_tests tests charm proof From e566087d396fdf3780bca500597f5e57f3b6cb74 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 30 Oct 2015 15:04:43 +0900 Subject: [PATCH 0882/2699] Resync tox integration --- ceph-proxy/requirements.txt | 18 +++++++++++------- ceph-proxy/test-requirements.txt | 13 +++++++------ ceph-proxy/tox.ini | 1 - 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/ceph-proxy/requirements.txt b/ceph-proxy/requirements.txt index b73a7e45..426002dc 100644 --- a/ceph-proxy/requirements.txt +++ b/ceph-proxy/requirements.txt @@ -1,7 +1,11 @@ -PyYAML>=3.10 -simplejson>=3.3.1 -netifaces>=0.8 -netaddr>=0.7.10 -Jinja2>=2.7.2 -six>=1.5.2 -dnspython +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +PyYAML>=3.1.0 +simplejson>=2.2.0 +netifaces>=0.10.4 +netaddr>=0.7.12,!=0.7.16 +Jinja2>=2.6 # BSD License (3 clause) +six>=1.9.0 +dnspython>=1.12.0 +psutil>=1.1.1,<2.0.0 diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index ff4fb63b..3af44d73 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -1,7 +1,8 @@ -testtools -coverage -mock -flake8==2.1.0 -# No version required +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +coverage>=3.6 +mock>=1.2 +flake8>=2.2.4,<=2.4.1 +os-testr>=0.4.1 charm-tools -os-testr diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 6bdf5757..4e328e48 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -1,5 +1,4 @@ [tox] -# Default to current LTS envlist = lint,py27 skipsdist = True From 6ede5f46bd738a6f37a66cb625eb73c32172059c Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 30 Oct 2015 15:04:43 +0900 Subject: [PATCH 0883/2699] Resync tox integration --- ceph-mon/requirements.txt | 18 +++++++++++------- ceph-mon/test-requirements.txt | 13 +++++++------ ceph-mon/tox.ini | 1 - 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/ceph-mon/requirements.txt b/ceph-mon/requirements.txt index b73a7e45..426002dc 100644 --- a/ceph-mon/requirements.txt +++ b/ceph-mon/requirements.txt @@ -1,7 +1,11 @@ -PyYAML>=3.10 -simplejson>=3.3.1 -netifaces>=0.8 -netaddr>=0.7.10 -Jinja2>=2.7.2 -six>=1.5.2 -dnspython +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +PyYAML>=3.1.0 +simplejson>=2.2.0 +netifaces>=0.10.4 +netaddr>=0.7.12,!=0.7.16 +Jinja2>=2.6 # BSD License (3 clause) +six>=1.9.0 +dnspython>=1.12.0 +psutil>=1.1.1,<2.0.0 diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index ff4fb63b..3af44d73 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -1,7 +1,8 @@ -testtools -coverage -mock -flake8==2.1.0 -# No version required +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +coverage>=3.6 +mock>=1.2 +flake8>=2.2.4,<=2.4.1 +os-testr>=0.4.1 charm-tools -os-testr diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 6bdf5757..4e328e48 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -1,5 +1,4 @@ [tox] -# Default to current LTS envlist = lint,py27 skipsdist = True From 9ae3bb2d981fa6e00edd87b0adea2c927896d84d Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 30 Oct 2015 15:05:27 +0900 Subject: [PATCH 0884/2699] resync requirements with openstack upstream --- ceph-osd/requirements.txt | 18 +++++++++++------- ceph-osd/test-requirements.txt | 13 +++++++------ 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/ceph-osd/requirements.txt b/ceph-osd/requirements.txt index b73a7e45..426002dc 100644 --- a/ceph-osd/requirements.txt +++ b/ceph-osd/requirements.txt @@ -1,7 +1,11 @@ -PyYAML>=3.10 -simplejson>=3.3.1 -netifaces>=0.8 -netaddr>=0.7.10 -Jinja2>=2.7.2 -six>=1.5.2 -dnspython +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +PyYAML>=3.1.0 +simplejson>=2.2.0 +netifaces>=0.10.4 +netaddr>=0.7.12,!=0.7.16 +Jinja2>=2.6 # BSD License (3 clause) +six>=1.9.0 +dnspython>=1.12.0 +psutil>=1.1.1,<2.0.0 diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index ff4fb63b..3af44d73 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -1,7 +1,8 @@ -testtools -coverage -mock -flake8==2.1.0 -# No version required +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +coverage>=3.6 +mock>=1.2 +flake8>=2.2.4,<=2.4.1 +os-testr>=0.4.1 charm-tools -os-testr From 08d5c65a9e40b5a986ccbb2310a50fb9b7250698 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 3 Nov 2015 11:58:54 +0000 Subject: [PATCH 0885/2699] Add tox support --- ceph-radosgw/.bzrignore | 6 +++--- ceph-radosgw/.testr.conf | 8 +++++++ ceph-radosgw/requirements.txt | 11 ++++++++++ ceph-radosgw/test-requirements.txt | 8 +++++++ ceph-radosgw/tox.ini | 29 ++++++++++++++++++++++++++ ceph-radosgw/unit_tests/test_hooks.py | 30 +++++++++++++-------------- 6 files changed, 74 insertions(+), 18 deletions(-) create mode 100644 ceph-radosgw/.testr.conf create mode 100644 ceph-radosgw/requirements.txt create mode 100644 ceph-radosgw/test-requirements.txt create mode 100644 ceph-radosgw/tox.ini diff --git a/ceph-radosgw/.bzrignore b/ceph-radosgw/.bzrignore index e06c3a88..9ad1b67f 100644 --- a/ceph-radosgw/.bzrignore +++ b/ceph-radosgw/.bzrignore @@ -1,5 +1,5 @@ -.project -.pydevproject bin -.coveragerc .coverage +.testrepository +.tox +tags diff --git a/ceph-radosgw/.testr.conf b/ceph-radosgw/.testr.conf new file mode 100644 index 00000000..801646bb --- /dev/null +++ b/ceph-radosgw/.testr.conf @@ -0,0 +1,8 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ + OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ + OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ + ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION + +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/ceph-radosgw/requirements.txt b/ceph-radosgw/requirements.txt new file mode 100644 index 00000000..426002dc --- /dev/null +++ b/ceph-radosgw/requirements.txt @@ -0,0 +1,11 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +PyYAML>=3.1.0 +simplejson>=2.2.0 +netifaces>=0.10.4 +netaddr>=0.7.12,!=0.7.16 +Jinja2>=2.6 # BSD License (3 clause) +six>=1.9.0 +dnspython>=1.12.0 +psutil>=1.1.1,<2.0.0 diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt new file mode 100644 index 00000000..3af44d73 --- /dev/null +++ b/ceph-radosgw/test-requirements.txt @@ -0,0 +1,8 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +coverage>=3.6 +mock>=1.2 +flake8>=2.2.4,<=2.4.1 +os-testr>=0.4.1 +charm-tools diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini new file mode 100644 index 00000000..4e328e48 --- /dev/null +++ b/ceph-radosgw/tox.ini @@ -0,0 +1,29 @@ +[tox] +envlist = lint,py27 +skipsdist = True + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 +install_command = + pip install --allow-unverified python-apt {opts} {packages} +commands = ostestr {posargs} + +[testenv:py27] +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:lint] +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = flake8 {posargs} hooks unit_tests tests + charm proof + +[testenv:venv] +commands = {posargs} + +[flake8] +ignore = E402,E226 +exclude = hooks/charmhelpers diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index ec9279d7..9b17cd10 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -85,7 +85,7 @@ def test_install_packages(self): self.test_config.set('use-ceph-optimised-packages', '') ceph_hooks.install_packages() self.add_source.assert_called_with('distro', 'secretkey') - self.apt_update.assert_called() + self.assertTrue(self.apt_update.called) self.apt_install.assert_called_with(['libapache2-mod-fastcgi', 'apache2'], fatal=True) @@ -95,8 +95,8 @@ def test_install_optimised_packages_no_embedded(self): _install_packages = self.patch('install_ceph_optimised_packages') ceph_hooks.install_packages() self.add_source.assert_called_with('distro', 'secretkey') - self.apt_update.assert_called() - _install_packages.assert_called() + self.assertTrue(self.apt_update.called) + self.assertTrue(_install_packages.called) self.apt_install.assert_called_with(['libapache2-mod-fastcgi', 'apache2'], fatal=True) @@ -106,8 +106,8 @@ def test_install_optimised_packages_embedded(self): _install_packages = self.patch('install_ceph_optimised_packages') ceph_hooks.install_packages() self.add_source.assert_called_with('distro', 'secretkey') - self.apt_update.assert_called() - _install_packages.assert_called() + self.assertTrue(self.apt_update.called) + self.assertFalse(_install_packages.called) self.apt_install.assert_called_with(['radosgw', 'ntp', 'haproxy'], fatal=True) @@ -117,8 +117,8 @@ def test_install_optimised_packages_embedded(self): def test_install(self): _install_packages = self.patch('install_packages') ceph_hooks.install() - self.execd_preinstall.assert_called() - _install_packages.assert_called() + self.assertTrue(self.execd_preinstall.called) + self.assertTrue(_install_packages.called) self.enable_pocket.assert_called_with('multiverse') self.os.makedirs.called_with('/var/lib/ceph/nss') @@ -175,20 +175,20 @@ def test_config_changed(self): _apache_modules = self.patch('apache_modules') _apache_reload = self.patch('apache_reload') ceph_hooks.config_changed() - _install_packages.assert_called() + self.assertTrue(_install_packages.called) self.CONFIGS.write_all.assert_called_with() - _emit_apacheconf.assert_called() - _install_www_scripts.assert_called() - _apache_sites.assert_called() - _apache_modules.assert_called() - _apache_reload.assert_called() + self.assertTrue(_emit_apacheconf.called) + self.assertTrue(_install_www_scripts.called) + self.assertTrue(_apache_sites.called) + self.assertTrue(_apache_modules.called) + self.assertTrue(_apache_reload.called) def test_mon_relation(self): _ceph = self.patch('ceph') _restart = self.patch('restart') self.relation_get.return_value = 'seckey' ceph_hooks.mon_relation() - _restart.assert_called() + self.assertTrue(_restart.called) _ceph.import_radosgw_key.assert_called_with('seckey') self.CONFIGS.write_all.assert_called_with() @@ -275,7 +275,7 @@ def test_identity_changed(self): _restart = self.patch('restart') ceph_hooks.identity_changed() self.CONFIGS.write_all.assert_called_with() - _restart.assert_called() + self.assertTrue(_restart.called) @patch('charmhelpers.contrib.openstack.ip.is_clustered') @patch('charmhelpers.contrib.openstack.ip.unit_get') From fc7c45d8e545142a1791c61e5f7da80be583d1e4 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 12 Nov 2015 16:31:42 +0000 Subject: [PATCH 0886/2699] [hopem,r=] Enable s3 keystone when using keystone for auth. Closes-Bug: 1515688 --- ceph-radosgw/templates/ceph.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 85c72c5a..e1c95fce 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -29,5 +29,6 @@ rgw keystone admin token = {{ admin_token }} rgw keystone accepted roles = {{ user_roles }} rgw keystone token cache size = {{ cache_size }} rgw keystone revocation interval = {{ revocation_check_interval }} +rgw s3 auth use keystone = true #nss db path = /var/lib/ceph/nss {% endif %} From 779809ef313b16c4c9076b938db9e23174ea09b2 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 18 Nov 2015 10:29:56 +0000 Subject: [PATCH 0887/2699] Update maintainer --- ceph-proxy/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 5afda9ed..3c5f9262 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -1,6 +1,6 @@ name: ceph summary: Highly scalable distributed storage -maintainer: James Page +maintainer: OpenStack Charmers description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. From 26f3d8e597eb16e350c5c49d7c03cc8ec0964120 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 18 Nov 2015 10:29:56 +0000 Subject: [PATCH 0888/2699] Update maintainer --- ceph-mon/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 5afda9ed..3c5f9262 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -1,6 +1,6 @@ name: ceph summary: Highly scalable distributed storage -maintainer: James Page +maintainer: OpenStack Charmers description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. From f6e07a20b2fee4836bd021402dddd00f789c3ce8 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 18 Nov 2015 10:30:34 +0000 Subject: [PATCH 0889/2699] Update maintainer --- ceph-osd/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 74e207e3..dad778db 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -1,6 +1,6 @@ name: ceph-osd summary: Highly scalable distributed storage - Ceph OSD storage -maintainer: James Page +maintainer: OpenStack Charmers provides: nrpe-external-master: interface: nrpe-external-master From 0c67b60551330c172d182603acf044c03dffed4e Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 18 Nov 2015 10:31:05 +0000 Subject: [PATCH 0890/2699] Update maintainer --- ceph-radosgw/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 59d6d9d9..a442d5d8 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -1,6 +1,6 @@ name: ceph-radosgw summary: Highly scalable distributed storage - RADOS HTTP Gateway -maintainer: James Page +maintainer: OpenStack Charmers description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. From 29791f7f4c0645bbbd75f84bea4252e20074ba1c Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 11:56:09 +0000 Subject: [PATCH 0891/2699] [hopem,r=] Configure RGW pools with optimal settings. Partially-Closes-Bug: 1476749 --- ceph-radosgw/config.yaml | 8 ++++++ ceph-radosgw/hooks/ceph.py | 38 +++++++++++++++++++++++++++ ceph-radosgw/hooks/hooks.py | 24 ++++++++++++----- ceph-radosgw/unit_tests/test_hooks.py | 15 +++++++++++ 4 files changed, 79 insertions(+), 6 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 8a9e76eb..325ccfd8 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -99,3 +99,11 @@ options: the following public endpoint for the ceph-radosgw: https://files.example.com:80/swift/v1 + ceph-osd-replication-count: + type: int + default: 3 + description: | + This value dictates the number of replicas ceph must make of any object + it stores within RGW pools. Note that once the RGW pools have been + created, changing this value will not have any effect (although it can be + changed in ceph by manually configuring your ceph cluster). diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index ffff7fc0..a42e6c2f 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -14,6 +14,14 @@ from socket import gethostname as get_unit_hostname +from charmhelpers.core.hookenv import ( + config, +) + +from charmhelpers.contrib.storage.linux.ceph import ( + CephBrokerRq, +) + LEADER = 'leader' PEON = 'peon' QUORUM = [LEADER, PEON] @@ -219,3 +227,33 @@ def get_named_key(name, caps=None): if 'key' in element: key = element.split(' = ')[1].strip() # IGNORE:E1103 return key + + +def get_create_rgw_pools_rq(): + """Pre-create RGW pools so that they have the correct settings. + + When RGW creates its own pools it will create them with non-optimal + settings (LP: #1476749). + """ + rq = CephBrokerRq() + replicas = config('ceph-osd-replication-count') + + # Buckets likely to contain the most data and therefore requiring the most + # PGs + heavy = ['.rgw.buckets'] + + for pool in heavy: + rq.add_op_create_pool(name=pool, replica_count=replicas) + + # TODO: we want these pools to have a smaller pg_num/pgp_num than the + # others but do not currently have the ability to override this with the + # broker api (LP: #1517846). Right now omit this so that the remaining + # pools are created when the RGW is installed. + # + # Buckets not expected to contain too much data + #light = ['.rgw', '.rgw.buckets.index', '.rgw.control', '.rgw.gc', + # '.rgw.root'] + #for pool in light: + # rq.add_op_create_pool(name=pool, replica_count=replicas) + + return rq diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 8898706b..0f923855 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -20,7 +20,9 @@ unit_get, open_port, relation_set, - log, ERROR, + log, + DEBUG, + ERROR, Hooks, UnregisteredHookError, status_set, ) @@ -58,6 +60,11 @@ from charmhelpers.contrib.openstack.utils import ( set_os_workload_status, ) +from charmhelpers.contrib.storage.linux.ceph import ( + send_request_if_needed, + is_request_complete, +) + hooks = Hooks() CONFIGS = register_configs() @@ -178,11 +185,16 @@ def config_changed(): 'mon-relation-changed') @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) def mon_relation(): - CONFIGS.write_all() - key = relation_get('radosgw_key') - if key: - ceph.import_radosgw_key(key) - restart() # TODO figure out a better way todo this + rq = ceph.get_create_rgw_pools_rq() + if is_request_complete(rq): + log('Broker request complete', level=DEBUG) + CONFIGS.write_all() + key = relation_get('radosgw_key') + if key: + ceph.import_radosgw_key(key) + restart() # TODO figure out a better way todo this + else: + send_request_if_needed(rq) @hooks.hook('gateway-relation-joined') diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 9b17cd10..b507320b 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -183,6 +183,7 @@ def test_config_changed(self): self.assertTrue(_apache_modules.called) self.assertTrue(_apache_reload.called) + @patch.object(ceph_hooks, 'is_request_complete', lambda *args: True) def test_mon_relation(self): _ceph = self.patch('ceph') _restart = self.patch('restart') @@ -192,6 +193,7 @@ def test_mon_relation(self): _ceph.import_radosgw_key.assert_called_with('seckey') self.CONFIGS.write_all.assert_called_with() + @patch.object(ceph_hooks, 'is_request_complete', lambda *args: True) def test_mon_relation_nokey(self): _ceph = self.patch('ceph') _restart = self.patch('restart') @@ -201,6 +203,19 @@ def test_mon_relation_nokey(self): self.assertFalse(_restart.called) self.CONFIGS.write_all.assert_called_with() + @patch.object(ceph_hooks, 'send_request_if_needed') + @patch.object(ceph_hooks, 'is_request_complete', lambda *args: False) + def test_mon_relation_send_broker_request(self, + mock_send_request_if_needed): + _ceph = self.patch('ceph') + _restart = self.patch('restart') + self.relation_get.return_value = 'seckey' + ceph_hooks.mon_relation() + self.assertFalse(_restart.called) + self.assertFalse(_ceph.import_radosgw_key.called) + self.assertFalse(self.CONFIGS.called) + self.assertTrue(mock_send_request_if_needed.called) + def test_gateway_relation(self): self.unit_get.return_value = 'myserver' ceph_hooks.gateway_relation() From 0b284a06358812074e21c69569a2a0c119f891d7 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 12:00:57 +0000 Subject: [PATCH 0892/2699] [hopem,r=] Add support for RGW relation to handle broker requests. Partially-Closes-Bug: 1476749 --- ceph-proxy/hooks/ceph_hooks.py | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 9d733637..ed6f2c14 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -290,16 +290,26 @@ def osd_relation(relid=None): def radosgw_relation(relid=None): # Install radosgw for admin tools apt_install(packages=filter_installed_packages(['radosgw'])) + + """Process broker request(s).""" if ceph.is_quorum(): - log('mon cluster in quorum - providing radosgw with keys') - data = { - 'fsid': config('fsid'), - 'radosgw_key': ceph.get_radosgw_key(), - 'auth': config('auth-supported'), - 'ceph-public-address': get_public_addr(), - } - relation_set(relation_id=relid, - relation_settings=data) + settings = relation_get(rid=relid) + if 'broker_req' in settings: + if not ceph.is_leader(): + log("Not leader - ignoring broker request", level=DEBUG) + else: + rsp = process_requests(settings['broker_req']) + unit_id = remote_unit().replace('/', '-') + unit_response_key = 'broker-rsp-' + unit_id + log('mon cluster in quorum - providing radosgw with keys') + data = { + 'fsid': config('fsid'), + 'radosgw_key': ceph.get_radosgw_key(), + 'auth': config('auth-supported'), + 'ceph-public-address': get_public_addr(), + unit_response_key: rsp, + } + relation_set(relation_id=relid, relation_settings=data) else: log('mon cluster not in quorum - deferring key provision') From 44cda5344a6402c89b9082498cff0d3548c2a06e Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 12:00:57 +0000 Subject: [PATCH 0893/2699] [hopem,r=] Add support for RGW relation to handle broker requests. Partially-Closes-Bug: 1476749 --- ceph-mon/hooks/ceph_hooks.py | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 9d733637..ed6f2c14 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -290,16 +290,26 @@ def osd_relation(relid=None): def radosgw_relation(relid=None): # Install radosgw for admin tools apt_install(packages=filter_installed_packages(['radosgw'])) + + """Process broker request(s).""" if ceph.is_quorum(): - log('mon cluster in quorum - providing radosgw with keys') - data = { - 'fsid': config('fsid'), - 'radosgw_key': ceph.get_radosgw_key(), - 'auth': config('auth-supported'), - 'ceph-public-address': get_public_addr(), - } - relation_set(relation_id=relid, - relation_settings=data) + settings = relation_get(rid=relid) + if 'broker_req' in settings: + if not ceph.is_leader(): + log("Not leader - ignoring broker request", level=DEBUG) + else: + rsp = process_requests(settings['broker_req']) + unit_id = remote_unit().replace('/', '-') + unit_response_key = 'broker-rsp-' + unit_id + log('mon cluster in quorum - providing radosgw with keys') + data = { + 'fsid': config('fsid'), + 'radosgw_key': ceph.get_radosgw_key(), + 'auth': config('auth-supported'), + 'ceph-public-address': get_public_addr(), + unit_response_key: rsp, + } + relation_set(relation_id=relid, relation_settings=data) else: log('mon cluster not in quorum - deferring key provision') From d5aff2955da3af614d28760a80251f0b30929d61 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 13:37:13 +0000 Subject: [PATCH 0894/2699] charmhelpers sync --- .../hooks/charmhelpers/cli/__init__.py | 6 +- .../contrib/openstack/amulet/deployment.py | 104 +++++++++++++++++- .../contrib/openstack/amulet/utils.py | 28 ++++- .../charmhelpers/contrib/openstack/context.py | 34 ++++-- .../charmhelpers/contrib/openstack/neutron.py | 18 ++- .../charmhelpers/contrib/openstack/utils.py | 23 +++- .../contrib/storage/linux/ceph.py | 18 +-- .../contrib/storage/linux/loopback.py | 10 ++ .../hooks/charmhelpers/core/hookenv.py | 14 +++ ceph-radosgw/hooks/charmhelpers/core/host.py | 37 ++++++- .../hooks/charmhelpers/core/hugepage.py | 2 + .../charmhelpers/core/services/helpers.py | 7 +- .../hooks/charmhelpers/core/templating.py | 19 +++- .../hooks/charmhelpers/fetch/__init__.py | 2 +- .../contrib/openstack/amulet/deployment.py | 104 +++++++++++++++++- .../contrib/openstack/amulet/utils.py | 28 ++++- 16 files changed, 408 insertions(+), 46 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/cli/__init__.py b/ceph-radosgw/hooks/charmhelpers/cli/__init__.py index 16d52cc4..2d37ab31 100644 --- a/ceph-radosgw/hooks/charmhelpers/cli/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/cli/__init__.py @@ -20,7 +20,7 @@ from six.moves import zip -from charmhelpers.core import unitdata +import charmhelpers.core.unitdata class OutputFormatter(object): @@ -163,8 +163,8 @@ def run(self): if getattr(arguments.func, '_cli_no_output', False): output = '' self.formatter.format_output(output, arguments.format) - if unitdata._KV: - unitdata._KV.flush() + if charmhelpers.core.unitdata._KV: + charmhelpers.core.unitdata._KV.flush() cmdline = CommandLine() diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 722bc645..0506491b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -14,12 +14,18 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import logging +import re +import sys import six from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +DEBUG = logging.DEBUG +ERROR = logging.ERROR + class OpenStackAmuletDeployment(AmuletDeployment): """OpenStack amulet deployment. @@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment): that is specifically for use by OpenStack charms. """ - def __init__(self, series=None, openstack=None, source=None, stable=True): + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): """Initialize the deployment environment.""" super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') self.openstack = openstack self.source = source self.stable = stable @@ -38,6 +47,22 @@ def __init__(self, series=None, openstack=None, source=None, stable=True): # out. self.current_next = "trusty" + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + def _determine_branch_locations(self, other_services): """Determine the branch locations for the other services. @@ -45,6 +70,8 @@ def _determine_branch_locations(self, other_services): stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" + self.log.info('OpenStackAmuletDeployment: determine branch locations') + # Charms outside the lp:~openstack-charmers namespace base_charms = ['mysql', 'mongodb', 'nrpe'] @@ -82,6 +109,8 @@ def _determine_branch_locations(self, other_services): def _add_services(self, this_service, other_services): """Add services to the deployment and set openstack-origin/source.""" + self.log.info('OpenStackAmuletDeployment: adding services') + other_services = self._determine_branch_locations(other_services) super(OpenStackAmuletDeployment, self)._add_services(this_service, @@ -95,7 +124,8 @@ def _add_services(self, this_service, other_services): 'ceph-osd', 'ceph-radosgw'] # Charms which can not use openstack-origin, ie. many subordinates - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] if self.openstack: for svc in services: @@ -111,9 +141,79 @@ def _add_services(self, this_service, other_services): def _configure_services(self, configs): """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') for service, config in six.iteritems(configs): self.d.configure(service, config) + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=1800): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + self.log.info('Waiting for extended status on units...') + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') + def _get_openstack_release(self): """Get openstack release. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 2b3087ea..388b60e6 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -18,6 +18,7 @@ import json import logging import os +import re import six import time import urllib @@ -604,7 +605,22 @@ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): '{}'.format(sample_type, samples)) return None -# rabbitmq/amqp specific helpers: + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + def add_rmq_test_user(self, sentry_units, username="testuser1", password="changeme"): """Add a test user via the first rmq juju unit, check connection as @@ -805,7 +821,10 @@ def configure_rmq_ssl_on(self, sentry_units, deployment, if port: config['ssl_port'] = port - deployment.configure('rabbitmq-server', config) + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) # Confirm tries = 0 @@ -832,7 +851,10 @@ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): # Disable RMQ SSL config = {'ssl': 'off'} - deployment.configure('rabbitmq-server', config) + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) # Confirm tries = 0 diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 49c04de0..48216338 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -952,6 +952,19 @@ def pg_ctxt(self): 'config': config} return ovs_ctxt + def midonet_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + midonet_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + mido_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'midonet', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': midonet_config} + + return mido_ctxt + def __call__(self): if self.network_manager not in ['quantum', 'neutron']: return {} @@ -973,6 +986,8 @@ def __call__(self): ctxt.update(self.nuage_ctxt()) elif self.plugin == 'plumgrid': ctxt.update(self.pg_ctxt()) + elif self.plugin == 'midonet': + ctxt.update(self.midonet_ctxt()) alchemy_flags = config('neutron-alchemy-flags') if alchemy_flags: @@ -1105,7 +1120,7 @@ class SubordinateConfigContext(OSContextGenerator): ctxt = { ... other context ... - 'subordinate_config': { + 'subordinate_configuration': { 'DEFAULT': { 'key1': 'value1', }, @@ -1146,22 +1161,23 @@ def __call__(self): try: sub_config = json.loads(sub_config) except: - log('Could not parse JSON from subordinate_config ' - 'setting from %s' % rid, level=ERROR) + log('Could not parse JSON from ' + 'subordinate_configuration setting from %s' + % rid, level=ERROR) continue for service in self.services: if service not in sub_config: - log('Found subordinate_config on %s but it contained' - 'nothing for %s service' % (rid, service), - level=INFO) + log('Found subordinate_configuration on %s but it ' + 'contained nothing for %s service' + % (rid, service), level=INFO) continue sub_config = sub_config[service] if self.config_file not in sub_config: - log('Found subordinate_config on %s but it contained' - 'nothing for %s' % (rid, self.config_file), - level=INFO) + log('Found subordinate_configuration on %s but it ' + 'contained nothing for %s' + % (rid, self.config_file), level=INFO) continue sub_config = sub_config[self.config_file] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index 2a59d86b..d17c847e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -204,11 +204,25 @@ def neutron_plugins(): database=config('database'), ssl_dir=NEUTRON_CONF_DIR)], 'services': [], - 'packages': [['plumgrid-lxc'], - ['iovisor-dkms']], + 'packages': ['plumgrid-lxc', + 'iovisor-dkms'], 'server_packages': ['neutron-server', 'neutron-plugin-plumgrid'], 'server_services': ['neutron-server'] + }, + 'midonet': { + 'config': '/etc/neutron/plugins/midonet/midonet.ini', + 'driver': 'midonet.neutron.plugin.MidonetPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [[headers_package()] + determine_dkms_package()], + 'server_packages': ['neutron-server', + 'python-neutron-plugin-midonet'], + 'server_services': ['neutron-server'] } } if release >= 'icehouse': diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index eefcf08b..fc479a30 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -26,6 +26,7 @@ import six import traceback +import uuid import yaml from charmhelpers.contrib.network import ip @@ -41,6 +42,7 @@ log as juju_log, charm_dir, INFO, + related_units, relation_ids, relation_set, status_set, @@ -121,6 +123,7 @@ ('2.2.2', 'kilo'), ('2.3.0', 'liberty'), ('2.4.0', 'liberty'), + ('2.5.0', 'liberty'), ]) # >= Liberty version->codename mapping @@ -858,7 +861,9 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None): if charm_state != 'active' and charm_state != 'unknown': state = workload_state_compare(state, charm_state) if message: - message = "{} {}".format(message, charm_message) + charm_message = charm_message.replace("Incomplete relations: ", + "") + message = "{}, {}".format(message, charm_message) else: message = charm_message @@ -975,3 +980,19 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs): action_set({'outcome': 'no upgrade available.'}) return ret + + +def remote_restart(rel_name, remote_service=None): + trigger = { + 'restart-trigger': str(uuid.uuid4()), + } + if remote_service: + trigger['remote-service'] = remote_service + for rid in relation_ids(rel_name): + # This subordinate can be related to two seperate services using + # different subordinate relations so only issue the restart if + # the principle is conencted down the relation we think it is + if related_units(relid=rid): + relation_set(relation_id=rid, + relation_settings=trigger, + ) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 83f264db..fd43371e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -202,10 +202,10 @@ def create_key_file(service, key): log('Created new keyfile at %s.' % keyfile, level=INFO) -def get_ceph_nodes(): - """Query named relation 'ceph' to determine current nodes.""" +def get_ceph_nodes(relation='ceph'): + """Query named relation to determine current nodes.""" hosts = [] - for r_id in relation_ids('ceph'): + for r_id in relation_ids(relation): for unit in related_units(r_id): hosts.append(relation_get('private-address', unit=unit, rid=r_id)) @@ -357,14 +357,14 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, service_start(svc) -def ensure_ceph_keyring(service, user=None, group=None): +def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): """Ensures a ceph keyring is created for a named service and optionally ensures user and group ownership. Returns False if no ceph key is available in relation state. """ key = None - for rid in relation_ids('ceph'): + for rid in relation_ids(relation): for unit in related_units(rid): key = relation_get('key', rid=rid, unit=unit) if key: @@ -540,7 +540,7 @@ def get_previous_request(rid): return request -def get_request_states(request): +def get_request_states(request, relation='ceph'): """Return a dict of requests per relation id with their corresponding completion state. @@ -552,7 +552,7 @@ def get_request_states(request): """ complete = [] requests = {} - for rid in relation_ids('ceph'): + for rid in relation_ids(relation): complete = False previous_request = get_previous_request(rid) if request == previous_request: @@ -643,7 +643,7 @@ def get_broker_rsp_key(): return 'broker-rsp-' + local_unit().replace('/', '-') -def send_request_if_needed(request): +def send_request_if_needed(request, relation='ceph'): """Send broker request if an equivalent request has not already been sent @param request: A CephBrokerRq object @@ -652,6 +652,6 @@ def send_request_if_needed(request): log('Request already sent but not complete, not sending new request', level=DEBUG) else: - for rid in relation_ids('ceph'): + for rid in relation_ids(relation): log('Sending request {}'.format(request.request_id), level=DEBUG) relation_set(relation_id=rid, broker_req=request.request) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py index c296f098..3a3f5146 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -76,3 +76,13 @@ def ensure_loopback_device(path, size): check_call(cmd) return create_loopback(path) + + +def is_mapped_loopback_device(device): + """ + Checks if a given device name is an existing/mapped loopback device. + :param device: str: Full path to the device (eg, /dev/loop1). + :returns: str: Path to the backing file if is a loopback device + empty string otherwise + """ + return loopback_devices().get(device, "") diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index c2bee134..454b52ae 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -490,6 +490,19 @@ def relation_types(): return rel_types +@cached +def peer_relation_id(): + '''Get a peer relation id if a peer relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + @cached def relation_to_interface(relation_name): """ @@ -820,6 +833,7 @@ def status_get(): def translate_exc(from_exc, to_exc): def inner_translate_exc1(f): + @wraps(f) def inner_translate_exc2(*args, **kwargs): try: return f(*args, **kwargs) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index cb3c527e..579871bc 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -67,7 +67,9 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): """Pause a system service. Stop it, and prevent it from starting again at boot.""" - stopped = service_stop(service_name) + stopped = True + if service_running(service_name): + stopped = service_stop(service_name) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if os.path.exists(upstart_file): @@ -105,7 +107,9 @@ def service_resume(service_name, init_dir="/etc/init", "Unable to detect {0} as either Upstart {1} or SysV {2}".format( service_name, upstart_file, sysv_file)) - started = service_start(service_name) + started = service_running(service_name) + if not started: + started = service_start(service_name) return started @@ -566,7 +570,14 @@ def chdir(d): os.chdir(cur) -def chownr(path, owner, group, follow_links=True): +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """ + Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param bool follow_links: Also Chown links if True + :param bool chowntopdir: Also chown path itself if True + """ uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid if follow_links: @@ -574,6 +585,10 @@ def chownr(path, owner, group, follow_links=True): else: chown = os.lchown + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) for root, dirs, files in os.walk(path): for name in dirs + files: full = os.path.join(root, name) @@ -584,3 +599,19 @@ def chownr(path, owner, group, follow_links=True): def lchownr(path, owner, group): chownr(path, owner, group, follow_links=False) + + +def get_total_ram(): + '''The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + ''' + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() diff --git a/ceph-radosgw/hooks/charmhelpers/core/hugepage.py b/ceph-radosgw/hooks/charmhelpers/core/hugepage.py index 4aaca3f5..a783ad94 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hugepage.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hugepage.py @@ -46,6 +46,8 @@ def hugepage_support(user, group='hugetlb', nr_hugepages=256, group_info = add_group(group) gid = group_info.gr_gid add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages sysctl_settings = { 'vm.nr_hugepages': nr_hugepages, 'vm.max_map_count': max_map_count, diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py index 3f677833..12d768e6 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py @@ -249,16 +249,18 @@ class TemplateCallback(ManagerCallback): :param int perms: The permissions of the rendered file :param partial on_change_action: functools partial to be executed when rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader """ def __init__(self, source, target, owner='root', group='root', perms=0o444, - on_change_action=None): + on_change_action=None, template_loader=None): self.source = source self.target = target self.owner = owner self.group = group self.perms = perms self.on_change_action = on_change_action + self.template_loader = template_loader def __call__(self, manager, service_name, event_name): pre_checksum = '' @@ -269,7 +271,8 @@ def __call__(self, manager, service_name, event_name): for ctx in service.get('required_data', []): context.update(ctx) templating.render(self.source, self.target, context, - self.owner, self.group, self.perms) + self.owner, self.group, self.perms, + template_loader=self.template_loader) if self.on_change_action: if pre_checksum == host.file_hash(self.target): hookenv.log( diff --git a/ceph-radosgw/hooks/charmhelpers/core/templating.py b/ceph-radosgw/hooks/charmhelpers/core/templating.py index 45319998..239719d4 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/core/templating.py @@ -21,7 +21,7 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8'): + perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): """ Render a template. @@ -52,17 +52,24 @@ def render(source, target, context, owner='root', group='root', apt_install('python-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - loader = Environment(loader=FileSystemLoader(templates_dir)) + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) try: source = source - template = loader.get_template(source) + template = template_env.get_template(source) except exceptions.TemplateNotFound as e: hookenv.log('Could not load template %s from %s.' % (source, templates_dir), level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index cd0b783c..5f831c35 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -225,12 +225,12 @@ def apt_purge(packages, fatal=False): def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark""" + log("Marking {} as {}".format(packages, mark)) cmd = ['apt-mark', mark] if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) - log("Holding {}".format(packages)) if fatal: subprocess.check_call(cmd, universal_newlines=True) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 722bc645..0506491b 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -14,12 +14,18 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import logging +import re +import sys import six from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +DEBUG = logging.DEBUG +ERROR = logging.ERROR + class OpenStackAmuletDeployment(AmuletDeployment): """OpenStack amulet deployment. @@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment): that is specifically for use by OpenStack charms. """ - def __init__(self, series=None, openstack=None, source=None, stable=True): + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): """Initialize the deployment environment.""" super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') self.openstack = openstack self.source = source self.stable = stable @@ -38,6 +47,22 @@ def __init__(self, series=None, openstack=None, source=None, stable=True): # out. self.current_next = "trusty" + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + def _determine_branch_locations(self, other_services): """Determine the branch locations for the other services. @@ -45,6 +70,8 @@ def _determine_branch_locations(self, other_services): stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" + self.log.info('OpenStackAmuletDeployment: determine branch locations') + # Charms outside the lp:~openstack-charmers namespace base_charms = ['mysql', 'mongodb', 'nrpe'] @@ -82,6 +109,8 @@ def _determine_branch_locations(self, other_services): def _add_services(self, this_service, other_services): """Add services to the deployment and set openstack-origin/source.""" + self.log.info('OpenStackAmuletDeployment: adding services') + other_services = self._determine_branch_locations(other_services) super(OpenStackAmuletDeployment, self)._add_services(this_service, @@ -95,7 +124,8 @@ def _add_services(self, this_service, other_services): 'ceph-osd', 'ceph-radosgw'] # Charms which can not use openstack-origin, ie. many subordinates - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] if self.openstack: for svc in services: @@ -111,9 +141,79 @@ def _add_services(self, this_service, other_services): def _configure_services(self, configs): """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') for service, config in six.iteritems(configs): self.d.configure(service, config) + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=1800): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + self.log.info('Waiting for extended status on units...') + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') + def _get_openstack_release(self): """Get openstack release. diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 2b3087ea..388b60e6 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -18,6 +18,7 @@ import json import logging import os +import re import six import time import urllib @@ -604,7 +605,22 @@ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): '{}'.format(sample_type, samples)) return None -# rabbitmq/amqp specific helpers: + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + def add_rmq_test_user(self, sentry_units, username="testuser1", password="changeme"): """Add a test user via the first rmq juju unit, check connection as @@ -805,7 +821,10 @@ def configure_rmq_ssl_on(self, sentry_units, deployment, if port: config['ssl_port'] = port - deployment.configure('rabbitmq-server', config) + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) # Confirm tries = 0 @@ -832,7 +851,10 @@ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): # Disable RMQ SSL config = {'ssl': 'off'} - deployment.configure('rabbitmq-server', config) + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) # Confirm tries = 0 From db9cf63d9c88fd7d557294000e5ef2f1051a7a05 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 13:48:55 +0000 Subject: [PATCH 0895/2699] charmhelpers sync --- .../hooks/charmhelpers/contrib/storage/linux/ceph.py | 10 +++++----- ceph-radosgw/hooks/hooks.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index fd43371e..cd737bbb 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -570,14 +570,14 @@ def get_request_states(request, relation='ceph'): return requests -def is_request_sent(request): +def is_request_sent(request, relation='ceph'): """Check to see if a functionally equivalent request has already been sent Returns True if a similair request has been sent @param request: A CephBrokerRq object """ - states = get_request_states(request) + states = get_request_states(request, relation=relation) for rid in states.keys(): if not states[rid]['sent']: return False @@ -585,7 +585,7 @@ def is_request_sent(request): return True -def is_request_complete(request): +def is_request_complete(request, relation='ceph'): """Check to see if a functionally equivalent request has already been completed @@ -593,7 +593,7 @@ def is_request_complete(request): @param request: A CephBrokerRq object """ - states = get_request_states(request) + states = get_request_states(request, relation=relation) for rid in states.keys(): if not states[rid]['complete']: return False @@ -648,7 +648,7 @@ def send_request_if_needed(request, relation='ceph'): @param request: A CephBrokerRq object """ - if is_request_sent(request): + if is_request_sent(request, relation=relation): log('Request already sent but not complete, not sending new request', level=DEBUG) else: diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 0f923855..53765655 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -194,7 +194,7 @@ def mon_relation(): ceph.import_radosgw_key(key) restart() # TODO figure out a better way todo this else: - send_request_if_needed(rq) + send_request_if_needed(rq, relation='mon') @hooks.hook('gateway-relation-joined') From 3cadd35cba049dbad8d619f0006489f58f285bea Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 13:49:54 +0000 Subject: [PATCH 0896/2699] more --- ceph-radosgw/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 53765655..3047fde9 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -186,7 +186,7 @@ def config_changed(): @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) def mon_relation(): rq = ceph.get_create_rgw_pools_rq() - if is_request_complete(rq): + if is_request_complete(rq, relation='mon'): log('Broker request complete', level=DEBUG) CONFIGS.write_all() key = relation_get('radosgw_key') From 08882691626729fc4ac8f76c10dca3db3c55580f Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 15:38:13 +0000 Subject: [PATCH 0897/2699] add radosgw-relation-changed rel --- ceph-proxy/hooks/ceph_hooks.py | 1 + ceph-proxy/hooks/radosgw-relation-changed | 1 + 2 files changed, 2 insertions(+) create mode 120000 ceph-proxy/hooks/radosgw-relation-changed diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index ed6f2c14..48a2a455 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -286,6 +286,7 @@ def osd_relation(relid=None): log('mon cluster not in quorum - deferring fsid provision') +@hooks.hook('radosgw-relation-changed') @hooks.hook('radosgw-relation-joined') def radosgw_relation(relid=None): # Install radosgw for admin tools diff --git a/ceph-proxy/hooks/radosgw-relation-changed b/ceph-proxy/hooks/radosgw-relation-changed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-proxy/hooks/radosgw-relation-changed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file From 49bb8dd33ba02ac4fb1b48b409c3b14e5cf032cc Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 15:38:13 +0000 Subject: [PATCH 0898/2699] add radosgw-relation-changed rel --- ceph-mon/hooks/ceph_hooks.py | 1 + ceph-mon/hooks/radosgw-relation-changed | 1 + 2 files changed, 2 insertions(+) create mode 120000 ceph-mon/hooks/radosgw-relation-changed diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index ed6f2c14..48a2a455 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -286,6 +286,7 @@ def osd_relation(relid=None): log('mon cluster not in quorum - deferring fsid provision') +@hooks.hook('radosgw-relation-changed') @hooks.hook('radosgw-relation-joined') def radosgw_relation(relid=None): # Install radosgw for admin tools diff --git a/ceph-mon/hooks/radosgw-relation-changed b/ceph-mon/hooks/radosgw-relation-changed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/radosgw-relation-changed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file From b4d9dbed9e7b98321133876b9e5b8bc84df26d79 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 17:16:51 +0000 Subject: [PATCH 0899/2699] charmhelper sync to get fix for bug 1517846 --- ceph-proxy/hooks/charmhelpers/cli/__init__.py | 6 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 52 ++++++-- .../hooks/charmhelpers/contrib/network/ip.py | 8 +- .../contrib/storage/linux/ceph.py | 78 +++++------ ceph-proxy/hooks/charmhelpers/core/hookenv.py | 46 +++++++ ceph-proxy/hooks/charmhelpers/core/host.py | 85 +++++++++--- .../hooks/charmhelpers/core/hugepage.py | 11 +- ceph-proxy/hooks/charmhelpers/core/kernel.py | 68 ++++++++++ .../charmhelpers/core/services/helpers.py | 7 +- .../hooks/charmhelpers/core/strutils.py | 30 +++++ .../hooks/charmhelpers/core/templating.py | 19 ++- .../hooks/charmhelpers/fetch/__init__.py | 2 +- .../charmhelpers/contrib/amulet/deployment.py | 6 +- .../charmhelpers/contrib/amulet/utils.py | 72 +++++++--- .../contrib/openstack/amulet/deployment.py | 123 ++++++++++++++++-- .../contrib/openstack/amulet/utils.py | 30 ++++- 16 files changed, 527 insertions(+), 116 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/core/kernel.py diff --git a/ceph-proxy/hooks/charmhelpers/cli/__init__.py b/ceph-proxy/hooks/charmhelpers/cli/__init__.py index 16d52cc4..2d37ab31 100644 --- a/ceph-proxy/hooks/charmhelpers/cli/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/cli/__init__.py @@ -20,7 +20,7 @@ from six.moves import zip -from charmhelpers.core import unitdata +import charmhelpers.core.unitdata class OutputFormatter(object): @@ -163,8 +163,8 @@ def run(self): if getattr(arguments.func, '_cli_no_output', False): output = '' self.formatter.format_output(output, arguments.format) - if unitdata._KV: - unitdata._KV.flush() + if charmhelpers.core.unitdata._KV: + charmhelpers.core.unitdata._KV.flush() cmdline = CommandLine() diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 95a79c2e..65b1a27e 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -148,6 +148,13 @@ def __init__(self, shortname, description, check_cmd): self.description = description self.check_cmd = self._locate_cmd(check_cmd) + def _get_check_filename(self): + return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) + + def _get_service_filename(self, hostname): + return os.path.join(NRPE.nagios_exportdir, + 'service__{}_{}.cfg'.format(hostname, self.command)) + def _locate_cmd(self, check_cmd): search_path = ( '/usr/lib/nagios/plugins', @@ -163,9 +170,21 @@ def _locate_cmd(self, check_cmd): log('Check command not found: {}'.format(parts[0])) return '' + def _remove_service_files(self): + if not os.path.exists(NRPE.nagios_exportdir): + return + for f in os.listdir(NRPE.nagios_exportdir): + if f.endswith('_{}.cfg'.format(self.command)): + os.remove(os.path.join(NRPE.nagios_exportdir, f)) + + def remove(self, hostname): + nrpe_check_file = self._get_check_filename() + if os.path.exists(nrpe_check_file): + os.remove(nrpe_check_file) + self._remove_service_files() + def write(self, nagios_context, hostname, nagios_servicegroups): - nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( - self.command) + nrpe_check_file = self._get_check_filename() with open(nrpe_check_file, 'w') as nrpe_check_config: nrpe_check_config.write("# check {}\n".format(self.shortname)) nrpe_check_config.write("command[{}]={}\n".format( @@ -180,9 +199,7 @@ def write(self, nagios_context, hostname, nagios_servicegroups): def write_service_config(self, nagios_context, hostname, nagios_servicegroups): - for f in os.listdir(NRPE.nagios_exportdir): - if re.search('.*{}.cfg'.format(self.command), f): - os.remove(os.path.join(NRPE.nagios_exportdir, f)) + self._remove_service_files() templ_vars = { 'nagios_hostname': hostname, @@ -192,8 +209,7 @@ def write_service_config(self, nagios_context, hostname, 'command': self.command, } nrpe_service_text = Check.service_template.format(**templ_vars) - nrpe_service_file = '{}/service__{}_{}.cfg'.format( - NRPE.nagios_exportdir, hostname, self.command) + nrpe_service_file = self._get_service_filename(hostname) with open(nrpe_service_file, 'w') as nrpe_service_config: nrpe_service_config.write(str(nrpe_service_text)) @@ -218,12 +234,32 @@ def __init__(self, hostname=None): if hostname: self.hostname = hostname else: - self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + nagios_hostname = get_nagios_hostname() + if nagios_hostname: + self.hostname = nagios_hostname + else: + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) self.checks = [] def add_check(self, *args, **kwargs): self.checks.append(Check(*args, **kwargs)) + def remove_check(self, *args, **kwargs): + if kwargs.get('shortname') is None: + raise ValueError('shortname of check must be specified') + + # Use sensible defaults if they're not specified - these are not + # actually used during removal, but they're required for constructing + # the Check object; check_disk is chosen because it's part of the + # nagios-plugins-basic package. + if kwargs.get('check_cmd') is None: + kwargs['check_cmd'] = 'check_disk' + if kwargs.get('description') is None: + kwargs['description'] = '' + + check = Check(*args, **kwargs) + check.remove(self.hostname) + def write(self): try: nagios_uid = pwd.getpwnam('nagios').pw_uid diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 67b4dccc..7f3b66b1 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -23,7 +23,7 @@ from functools import partial from charmhelpers.core.hookenv import unit_get -from charmhelpers.fetch import apt_install +from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, WARNING, @@ -32,13 +32,15 @@ try: import netifaces except ImportError: - apt_install('python-netifaces') + apt_update(fatal=True) + apt_install('python-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: - apt_install('python-netaddr') + apt_update(fatal=True) + apt_install('python-netaddr', fatal=True) import netaddr diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index b4fda124..bfed4aaa 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -26,6 +26,7 @@ import os import shutil +import six import json import time import uuid @@ -59,6 +60,8 @@ apt_install, ) +from charmhelpers.core.kernel import modprobe + KEYRING = '/etc/ceph/ceph.client.{}.keyring' KEYFILE = '/etc/ceph/ceph.client.{}.key' @@ -123,29 +126,37 @@ def get_osds(service): return None -def create_pool(service, name, replicas=3): +def update_pool(client, pool, settings): + cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] + for k, v in six.iteritems(settings): + cmd.append(k) + cmd.append(v) + + check_call(cmd) + + +def create_pool(service, name, replicas=3, pg_num=None): """Create a new RADOS pool.""" if pool_exists(service, name): log("Ceph pool {} already exists, skipping creation".format(name), level=WARNING) return - # Calculate the number of placement groups based - # on upstream recommended best practices. - osds = get_osds(service) - if osds: - pgnum = (len(osds) * 100 // replicas) - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - pgnum = 200 + if not pg_num: + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pg_num = (len(osds) * 100 // replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pg_num = 200 - cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] check_call(cmd) - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', - str(replicas)] - check_call(cmd) + update_pool(service, name, settings={'size': str(replicas)}) def delete_pool(service, name): @@ -200,10 +211,10 @@ def create_key_file(service, key): log('Created new keyfile at %s.' % keyfile, level=INFO) -def get_ceph_nodes(): - """Query named relation 'ceph' to determine current nodes.""" +def get_ceph_nodes(relation='ceph'): + """Query named relation to determine current nodes.""" hosts = [] - for r_id in relation_ids('ceph'): + for r_id in relation_ids(relation): for unit in related_units(r_id): hosts.append(relation_get('private-address', unit=unit, rid=r_id)) @@ -291,17 +302,6 @@ def place_data_on_block_device(blk_device, data_src_dst): os.chown(data_src_dst, uid, gid) -# TODO: re-use -def modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - log('Loading kernel module', level=INFO) - cmd = ['modprobe', module] - check_call(cmd) - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module) - - def copy_files(src, dst, symlinks=False, ignore=None): """Copy files from src to dst.""" for item in os.listdir(src): @@ -366,14 +366,14 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, service_start(svc) -def ensure_ceph_keyring(service, user=None, group=None): +def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): """Ensures a ceph keyring is created for a named service and optionally ensures user and group ownership. Returns False if no ceph key is available in relation state. """ key = None - for rid in relation_ids('ceph'): + for rid in relation_ids(relation): for unit in related_units(rid): key = relation_get('key', rid=rid, unit=unit) if key: @@ -549,7 +549,7 @@ def get_previous_request(rid): return request -def get_request_states(request): +def get_request_states(request, relation='ceph'): """Return a dict of requests per relation id with their corresponding completion state. @@ -561,7 +561,7 @@ def get_request_states(request): """ complete = [] requests = {} - for rid in relation_ids('ceph'): + for rid in relation_ids(relation): complete = False previous_request = get_previous_request(rid) if request == previous_request: @@ -579,14 +579,14 @@ def get_request_states(request): return requests -def is_request_sent(request): +def is_request_sent(request, relation='ceph'): """Check to see if a functionally equivalent request has already been sent Returns True if a similair request has been sent @param request: A CephBrokerRq object """ - states = get_request_states(request) + states = get_request_states(request, relation=relation) for rid in states.keys(): if not states[rid]['sent']: return False @@ -594,7 +594,7 @@ def is_request_sent(request): return True -def is_request_complete(request): +def is_request_complete(request, relation='ceph'): """Check to see if a functionally equivalent request has already been completed @@ -602,7 +602,7 @@ def is_request_complete(request): @param request: A CephBrokerRq object """ - states = get_request_states(request) + states = get_request_states(request, relation=relation) for rid in states.keys(): if not states[rid]['complete']: return False @@ -652,15 +652,15 @@ def get_broker_rsp_key(): return 'broker-rsp-' + local_unit().replace('/', '-') -def send_request_if_needed(request): +def send_request_if_needed(request, relation='ceph'): """Send broker request if an equivalent request has not already been sent @param request: A CephBrokerRq object """ - if is_request_sent(request): + if is_request_sent(request, relation=relation): log('Request already sent but not complete, not sending new request', level=DEBUG) else: - for rid in relation_ids('ceph'): + for rid in relation_ids(relation): log('Sending request {}'.format(request.request_id), level=DEBUG) relation_set(relation_id=rid, broker_req=request.request) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index ab53a780..454b52ae 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -490,6 +490,19 @@ def relation_types(): return rel_types +@cached +def peer_relation_id(): + '''Get a peer relation id if a peer relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + @cached def relation_to_interface(relation_name): """ @@ -623,6 +636,38 @@ def unit_private_ip(): return unit_get('private-address') +@cached +def storage_get(attribute="", storage_id=""): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=""): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + class UnregisteredHookError(Exception): """Raised when an undefined hook is called""" pass @@ -788,6 +833,7 @@ def status_get(): def translate_exc(from_exc, to_exc): def inner_translate_exc1(f): + @wraps(f) def inner_translate_exc2(*args, **kwargs): try: return f(*args, **kwargs) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 29e8fee0..579871bc 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -63,33 +63,53 @@ def service_reload(service_name, restart_on_failure=False): return service_result -def service_pause(service_name, init_dir=None): +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): """Pause a system service. Stop it, and prevent it from starting again at boot.""" - if init_dir is None: - init_dir = "/etc/init" - stopped = service_stop(service_name) - # XXX: Support systemd too - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") + stopped = True + if service_running(service_name): + stopped = service_stop(service_name) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) return stopped -def service_resume(service_name, init_dir=None): +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d"): """Resume a system service. Reenable starting again at boot. Start the service""" - # XXX: Support systemd too - if init_dir is None: - init_dir = "/etc/init" - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) - started = service_start(service_name) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) + + started = service_running(service_name) + if not started: + started = service_start(service_name) return started @@ -550,7 +570,14 @@ def chdir(d): os.chdir(cur) -def chownr(path, owner, group, follow_links=True): +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """ + Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param bool follow_links: Also Chown links if True + :param bool chowntopdir: Also chown path itself if True + """ uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid if follow_links: @@ -558,6 +585,10 @@ def chownr(path, owner, group, follow_links=True): else: chown = os.lchown + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) for root, dirs, files in os.walk(path): for name in dirs + files: full = os.path.join(root, name) @@ -568,3 +599,19 @@ def chownr(path, owner, group, follow_links=True): def lchownr(path, owner, group): chownr(path, owner, group, follow_links=False) + + +def get_total_ram(): + '''The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + ''' + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() diff --git a/ceph-proxy/hooks/charmhelpers/core/hugepage.py b/ceph-proxy/hooks/charmhelpers/core/hugepage.py index ba4340ff..a783ad94 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hugepage.py +++ b/ceph-proxy/hooks/charmhelpers/core/hugepage.py @@ -25,11 +25,13 @@ fstab_mount, mkdir, ) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output def hugepage_support(user, group='hugetlb', nr_hugepages=256, max_map_count=65536, mnt_point='/run/hugepages/kvm', - pagesize='2MB', mount=True): + pagesize='2MB', mount=True, set_shmmax=False): """Enable hugepages on system. Args: @@ -44,11 +46,18 @@ def hugepage_support(user, group='hugetlb', nr_hugepages=256, group_info = add_group(group) gid = group_info.gr_gid add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages sysctl_settings = { 'vm.nr_hugepages': nr_hugepages, 'vm.max_map_count': max_map_count, 'vm.hugetlb_shm_group': gid, } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) lfstab = fstab.Fstab() diff --git a/ceph-proxy/hooks/charmhelpers/core/kernel.py b/ceph-proxy/hooks/charmhelpers/core/kernel.py new file mode 100644 index 00000000..5dc64952 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/kernel.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = "Jorge Niedbalski " + +from charmhelpers.core.hookenv import ( + log, + INFO +) + +from subprocess import check_call, check_output +import re + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + check_call(cmd) + if persist: + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 + + +def update_initramfs(version='all'): + """Updates an initramfs image""" + return check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py index 3f677833..12d768e6 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py @@ -249,16 +249,18 @@ class TemplateCallback(ManagerCallback): :param int perms: The permissions of the rendered file :param partial on_change_action: functools partial to be executed when rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader """ def __init__(self, source, target, owner='root', group='root', perms=0o444, - on_change_action=None): + on_change_action=None, template_loader=None): self.source = source self.target = target self.owner = owner self.group = group self.perms = perms self.on_change_action = on_change_action + self.template_loader = template_loader def __call__(self, manager, service_name, event_name): pre_checksum = '' @@ -269,7 +271,8 @@ def __call__(self, manager, service_name, event_name): for ctx in service.get('required_data', []): context.update(ctx) templating.render(self.source, self.target, context, - self.owner, self.group, self.perms) + self.owner, self.group, self.perms, + template_loader=self.template_loader) if self.on_change_action: if pre_checksum == host.file_hash(self.target): hookenv.log( diff --git a/ceph-proxy/hooks/charmhelpers/core/strutils.py b/ceph-proxy/hooks/charmhelpers/core/strutils.py index a2a784aa..7e3f9693 100644 --- a/ceph-proxy/hooks/charmhelpers/core/strutils.py +++ b/ceph-proxy/hooks/charmhelpers/core/strutils.py @@ -18,6 +18,7 @@ # along with charm-helpers. If not, see . import six +import re def bool_from_string(value): @@ -40,3 +41,32 @@ def bool_from_string(value): msg = "Unable to interpret string value '%s' as boolean" % (value) raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if not matches: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/ceph-proxy/hooks/charmhelpers/core/templating.py b/ceph-proxy/hooks/charmhelpers/core/templating.py index 45319998..239719d4 100644 --- a/ceph-proxy/hooks/charmhelpers/core/templating.py +++ b/ceph-proxy/hooks/charmhelpers/core/templating.py @@ -21,7 +21,7 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8'): + perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): """ Render a template. @@ -52,17 +52,24 @@ def render(source, target, context, owner='root', group='root', apt_install('python-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - loader = Environment(loader=FileSystemLoader(templates_dir)) + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) try: source = source - template = loader.get_template(source) + template = template_env.get_template(source) except exceptions.TemplateNotFound as e: hookenv.log('Could not load template %s from %s.' % (source, templates_dir), level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index cd0b783c..5f831c35 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -225,12 +225,12 @@ def apt_purge(packages, fatal=False): def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark""" + log("Marking {} as {}".format(packages, mark)) cmd = ['apt-mark', mark] if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) - log("Holding {}".format(packages)) if fatal: subprocess.check_call(cmd, universal_newlines=True) diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py index 367d6b47..d451698d 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py @@ -51,7 +51,8 @@ def _add_services(self, this_service, other_services): if 'units' not in this_service: this_service['units'] = 1 - self.d.add(this_service['name'], units=this_service['units']) + self.d.add(this_service['name'], units=this_service['units'], + constraints=this_service.get('constraints')) for svc in other_services: if 'location' in svc: @@ -64,7 +65,8 @@ def _add_services(self, this_service, other_services): if 'units' not in svc: svc['units'] = 1 - self.d.add(svc['name'], charm=branch_location, units=svc['units']) + self.d.add(svc['name'], charm=branch_location, units=svc['units'], + constraints=svc.get('constraints')) def _add_relations(self, relations): """Add all of the relations for the services.""" diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index 6770f26b..2591a9b1 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -326,7 +326,7 @@ def service_restarted(self, sentry_unit, service, filename, def service_restarted_since(self, sentry_unit, mtime, service, pgrep_full=None, sleep_time=20, - retry_count=2, retry_sleep_time=30): + retry_count=30, retry_sleep_time=10): """Check if service was been started after a given time. Args: @@ -334,8 +334,9 @@ def service_restarted_since(self, sentry_unit, mtime, service, mtime (float): The epoch time to check against service (string): service name to look for in process table pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Seconds to sleep before looking for process - retry_count (int): If service is not found, how many times to retry + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry Returns: bool: True if service found and its start time it newer than mtime, @@ -359,11 +360,12 @@ def service_restarted_since(self, sentry_unit, mtime, service, pgrep_full) self.log.debug('Attempt {} to get {} proc start time on {} ' 'OK'.format(tries, service, unit_name)) - except IOError: + except IOError as e: # NOTE(beisner) - race avoidance, proc may not exist yet. # https://bugs.launchpad.net/charm-helpers/+bug/1474030 self.log.debug('Attempt {} to get {} proc start time on {} ' - 'failed'.format(tries, service, unit_name)) + 'failed\n{}'.format(tries, service, + unit_name, e)) time.sleep(retry_sleep_time) tries += 1 @@ -383,35 +385,62 @@ def service_restarted_since(self, sentry_unit, mtime, service, return False def config_updated_since(self, sentry_unit, filename, mtime, - sleep_time=20): + sleep_time=20, retry_count=30, + retry_sleep_time=10): """Check if file was modified after a given time. Args: sentry_unit (sentry): The sentry unit to check the file mtime on filename (string): The file to check mtime of mtime (float): The epoch time to check against - sleep_time (int): Seconds to sleep before looking for process + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry Returns: bool: True if file was modified more recently than mtime, False if - file was modified before mtime, + file was modified before mtime, or if file not found. """ - self.log.debug('Checking %s updated since %s' % (filename, mtime)) + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s updated since %s on ' + '%s' % (filename, mtime, unit_name)) time.sleep(sleep_time) - file_mtime = self._get_file_mtime(sentry_unit, filename) + file_mtime = None + tries = 0 + while tries <= retry_count and not file_mtime: + try: + file_mtime = self._get_file_mtime(sentry_unit, filename) + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'OK'.format(tries, filename, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, file may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'failed\n{}'.format(tries, filename, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not file_mtime: + self.log.warn('Could not determine file mtime, assuming ' + 'file does not exist') + return False + if file_mtime >= mtime: self.log.debug('File mtime is newer than provided mtime ' - '(%s >= %s)' % (file_mtime, mtime)) + '(%s >= %s) on %s (OK)' % (file_mtime, + mtime, unit_name)) return True else: - self.log.warn('File mtime %s is older than provided mtime %s' - % (file_mtime, mtime)) + self.log.warn('File mtime is older than provided mtime' + '(%s < on %s) on %s' % (file_mtime, + mtime, unit_name)) return False def validate_service_config_changed(self, sentry_unit, mtime, service, filename, pgrep_full=None, - sleep_time=20, retry_count=2, - retry_sleep_time=30): + sleep_time=20, retry_count=30, + retry_sleep_time=10): """Check service and file were updated after mtime Args: @@ -456,7 +485,9 @@ def validate_service_config_changed(self, sentry_unit, mtime, service, sentry_unit, filename, mtime, - sleep_time=0) + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) return service_restart and config_update @@ -776,3 +807,12 @@ def wait_on_action(self, action_id, _check_output=subprocess.check_output): output = _check_output(command, universal_newlines=True) data = json.loads(output) return data.get(u"status") == "completed" + + def status_get(self, unit): + """Return the current service status of this unit.""" + raw_status, return_code = unit.run( + "status-get --format=json --include-data") + if return_code != 0: + return ("unknown", "") + status = json.loads(raw_status) + return (status["status"], status["message"]) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 63155d8d..0506491b 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -14,12 +14,18 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import logging +import re +import sys import six from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +DEBUG = logging.DEBUG +ERROR = logging.ERROR + class OpenStackAmuletDeployment(AmuletDeployment): """OpenStack amulet deployment. @@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment): that is specifically for use by OpenStack charms. """ - def __init__(self, series=None, openstack=None, source=None, stable=True): + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): """Initialize the deployment environment.""" super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') self.openstack = openstack self.source = source self.stable = stable @@ -38,6 +47,22 @@ def __init__(self, series=None, openstack=None, source=None, stable=True): # out. self.current_next = "trusty" + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + def _determine_branch_locations(self, other_services): """Determine the branch locations for the other services. @@ -45,6 +70,8 @@ def _determine_branch_locations(self, other_services): stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" + self.log.info('OpenStackAmuletDeployment: determine branch locations') + # Charms outside the lp:~openstack-charmers namespace base_charms = ['mysql', 'mongodb', 'nrpe'] @@ -58,19 +85,17 @@ def _determine_branch_locations(self, other_services): else: base_series = self.current_next - if self.stable: - for svc in other_services: - if svc['name'] in force_series_current: - base_series = self.current_next - + for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if self.stable: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, svc['name']) - else: - for svc in other_services: - if svc['name'] in force_series_current: - base_series = self.current_next - + else: if svc['name'] in base_charms: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, @@ -79,10 +104,13 @@ def _determine_branch_locations(self, other_services): temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, svc['name']) + return other_services def _add_services(self, this_service, other_services): """Add services to the deployment and set openstack-origin/source.""" + self.log.info('OpenStackAmuletDeployment: adding services') + other_services = self._determine_branch_locations(other_services) super(OpenStackAmuletDeployment, self)._add_services(this_service, @@ -96,7 +124,8 @@ def _add_services(self, this_service, other_services): 'ceph-osd', 'ceph-radosgw'] # Charms which can not use openstack-origin, ie. many subordinates - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] if self.openstack: for svc in services: @@ -112,9 +141,79 @@ def _add_services(self, this_service, other_services): def _configure_services(self, configs): """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') for service, config in six.iteritems(configs): self.d.configure(service, config) + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=1800): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + self.log.info('Waiting for extended status on units...') + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') + def _get_openstack_release(self): """Get openstack release. diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index b1397419..388b60e6 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -18,6 +18,7 @@ import json import logging import os +import re import six import time import urllib @@ -604,7 +605,22 @@ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): '{}'.format(sample_type, samples)) return None -# rabbitmq/amqp specific helpers: + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + def add_rmq_test_user(self, sentry_units, username="testuser1", password="changeme"): """Add a test user via the first rmq juju unit, check connection as @@ -752,7 +768,7 @@ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): self.log.debug('SSL is enabled @{}:{} ' '({})'.format(host, port, unit_name)) return True - elif not port and not conf_ssl: + elif not conf_ssl: self.log.debug('SSL not enabled @{}:{} ' '({})'.format(host, port, unit_name)) return False @@ -805,7 +821,10 @@ def configure_rmq_ssl_on(self, sentry_units, deployment, if port: config['ssl_port'] = port - deployment.configure('rabbitmq-server', config) + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) # Confirm tries = 0 @@ -832,7 +851,10 @@ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): # Disable RMQ SSL config = {'ssl': 'off'} - deployment.configure('rabbitmq-server', config) + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) # Confirm tries = 0 From f881148f604e3934f7190d7532b9b8d6822b3db9 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 17:16:51 +0000 Subject: [PATCH 0900/2699] charmhelper sync to get fix for bug 1517846 --- ceph-mon/hooks/charmhelpers/cli/__init__.py | 6 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 52 ++++++-- .../hooks/charmhelpers/contrib/network/ip.py | 8 +- .../contrib/storage/linux/ceph.py | 78 +++++------ ceph-mon/hooks/charmhelpers/core/hookenv.py | 46 +++++++ ceph-mon/hooks/charmhelpers/core/host.py | 85 +++++++++--- ceph-mon/hooks/charmhelpers/core/hugepage.py | 11 +- ceph-mon/hooks/charmhelpers/core/kernel.py | 68 ++++++++++ .../charmhelpers/core/services/helpers.py | 7 +- ceph-mon/hooks/charmhelpers/core/strutils.py | 30 +++++ .../hooks/charmhelpers/core/templating.py | 19 ++- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 2 +- .../charmhelpers/contrib/amulet/deployment.py | 6 +- .../charmhelpers/contrib/amulet/utils.py | 72 +++++++--- .../contrib/openstack/amulet/deployment.py | 123 ++++++++++++++++-- .../contrib/openstack/amulet/utils.py | 30 ++++- 16 files changed, 527 insertions(+), 116 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/core/kernel.py diff --git a/ceph-mon/hooks/charmhelpers/cli/__init__.py b/ceph-mon/hooks/charmhelpers/cli/__init__.py index 16d52cc4..2d37ab31 100644 --- a/ceph-mon/hooks/charmhelpers/cli/__init__.py +++ b/ceph-mon/hooks/charmhelpers/cli/__init__.py @@ -20,7 +20,7 @@ from six.moves import zip -from charmhelpers.core import unitdata +import charmhelpers.core.unitdata class OutputFormatter(object): @@ -163,8 +163,8 @@ def run(self): if getattr(arguments.func, '_cli_no_output', False): output = '' self.formatter.format_output(output, arguments.format) - if unitdata._KV: - unitdata._KV.flush() + if charmhelpers.core.unitdata._KV: + charmhelpers.core.unitdata._KV.flush() cmdline = CommandLine() diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 95a79c2e..65b1a27e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -148,6 +148,13 @@ def __init__(self, shortname, description, check_cmd): self.description = description self.check_cmd = self._locate_cmd(check_cmd) + def _get_check_filename(self): + return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) + + def _get_service_filename(self, hostname): + return os.path.join(NRPE.nagios_exportdir, + 'service__{}_{}.cfg'.format(hostname, self.command)) + def _locate_cmd(self, check_cmd): search_path = ( '/usr/lib/nagios/plugins', @@ -163,9 +170,21 @@ def _locate_cmd(self, check_cmd): log('Check command not found: {}'.format(parts[0])) return '' + def _remove_service_files(self): + if not os.path.exists(NRPE.nagios_exportdir): + return + for f in os.listdir(NRPE.nagios_exportdir): + if f.endswith('_{}.cfg'.format(self.command)): + os.remove(os.path.join(NRPE.nagios_exportdir, f)) + + def remove(self, hostname): + nrpe_check_file = self._get_check_filename() + if os.path.exists(nrpe_check_file): + os.remove(nrpe_check_file) + self._remove_service_files() + def write(self, nagios_context, hostname, nagios_servicegroups): - nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( - self.command) + nrpe_check_file = self._get_check_filename() with open(nrpe_check_file, 'w') as nrpe_check_config: nrpe_check_config.write("# check {}\n".format(self.shortname)) nrpe_check_config.write("command[{}]={}\n".format( @@ -180,9 +199,7 @@ def write(self, nagios_context, hostname, nagios_servicegroups): def write_service_config(self, nagios_context, hostname, nagios_servicegroups): - for f in os.listdir(NRPE.nagios_exportdir): - if re.search('.*{}.cfg'.format(self.command), f): - os.remove(os.path.join(NRPE.nagios_exportdir, f)) + self._remove_service_files() templ_vars = { 'nagios_hostname': hostname, @@ -192,8 +209,7 @@ def write_service_config(self, nagios_context, hostname, 'command': self.command, } nrpe_service_text = Check.service_template.format(**templ_vars) - nrpe_service_file = '{}/service__{}_{}.cfg'.format( - NRPE.nagios_exportdir, hostname, self.command) + nrpe_service_file = self._get_service_filename(hostname) with open(nrpe_service_file, 'w') as nrpe_service_config: nrpe_service_config.write(str(nrpe_service_text)) @@ -218,12 +234,32 @@ def __init__(self, hostname=None): if hostname: self.hostname = hostname else: - self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + nagios_hostname = get_nagios_hostname() + if nagios_hostname: + self.hostname = nagios_hostname + else: + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) self.checks = [] def add_check(self, *args, **kwargs): self.checks.append(Check(*args, **kwargs)) + def remove_check(self, *args, **kwargs): + if kwargs.get('shortname') is None: + raise ValueError('shortname of check must be specified') + + # Use sensible defaults if they're not specified - these are not + # actually used during removal, but they're required for constructing + # the Check object; check_disk is chosen because it's part of the + # nagios-plugins-basic package. + if kwargs.get('check_cmd') is None: + kwargs['check_cmd'] = 'check_disk' + if kwargs.get('description') is None: + kwargs['description'] = '' + + check = Check(*args, **kwargs) + check.remove(self.hostname) + def write(self): try: nagios_uid = pwd.getpwnam('nagios').pw_uid diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 67b4dccc..7f3b66b1 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -23,7 +23,7 @@ from functools import partial from charmhelpers.core.hookenv import unit_get -from charmhelpers.fetch import apt_install +from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, WARNING, @@ -32,13 +32,15 @@ try: import netifaces except ImportError: - apt_install('python-netifaces') + apt_update(fatal=True) + apt_install('python-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: - apt_install('python-netaddr') + apt_update(fatal=True) + apt_install('python-netaddr', fatal=True) import netaddr diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index b4fda124..bfed4aaa 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -26,6 +26,7 @@ import os import shutil +import six import json import time import uuid @@ -59,6 +60,8 @@ apt_install, ) +from charmhelpers.core.kernel import modprobe + KEYRING = '/etc/ceph/ceph.client.{}.keyring' KEYFILE = '/etc/ceph/ceph.client.{}.key' @@ -123,29 +126,37 @@ def get_osds(service): return None -def create_pool(service, name, replicas=3): +def update_pool(client, pool, settings): + cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] + for k, v in six.iteritems(settings): + cmd.append(k) + cmd.append(v) + + check_call(cmd) + + +def create_pool(service, name, replicas=3, pg_num=None): """Create a new RADOS pool.""" if pool_exists(service, name): log("Ceph pool {} already exists, skipping creation".format(name), level=WARNING) return - # Calculate the number of placement groups based - # on upstream recommended best practices. - osds = get_osds(service) - if osds: - pgnum = (len(osds) * 100 // replicas) - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - pgnum = 200 + if not pg_num: + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pg_num = (len(osds) * 100 // replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pg_num = 200 - cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] check_call(cmd) - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', - str(replicas)] - check_call(cmd) + update_pool(service, name, settings={'size': str(replicas)}) def delete_pool(service, name): @@ -200,10 +211,10 @@ def create_key_file(service, key): log('Created new keyfile at %s.' % keyfile, level=INFO) -def get_ceph_nodes(): - """Query named relation 'ceph' to determine current nodes.""" +def get_ceph_nodes(relation='ceph'): + """Query named relation to determine current nodes.""" hosts = [] - for r_id in relation_ids('ceph'): + for r_id in relation_ids(relation): for unit in related_units(r_id): hosts.append(relation_get('private-address', unit=unit, rid=r_id)) @@ -291,17 +302,6 @@ def place_data_on_block_device(blk_device, data_src_dst): os.chown(data_src_dst, uid, gid) -# TODO: re-use -def modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - log('Loading kernel module', level=INFO) - cmd = ['modprobe', module] - check_call(cmd) - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module) - - def copy_files(src, dst, symlinks=False, ignore=None): """Copy files from src to dst.""" for item in os.listdir(src): @@ -366,14 +366,14 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, service_start(svc) -def ensure_ceph_keyring(service, user=None, group=None): +def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): """Ensures a ceph keyring is created for a named service and optionally ensures user and group ownership. Returns False if no ceph key is available in relation state. """ key = None - for rid in relation_ids('ceph'): + for rid in relation_ids(relation): for unit in related_units(rid): key = relation_get('key', rid=rid, unit=unit) if key: @@ -549,7 +549,7 @@ def get_previous_request(rid): return request -def get_request_states(request): +def get_request_states(request, relation='ceph'): """Return a dict of requests per relation id with their corresponding completion state. @@ -561,7 +561,7 @@ def get_request_states(request): """ complete = [] requests = {} - for rid in relation_ids('ceph'): + for rid in relation_ids(relation): complete = False previous_request = get_previous_request(rid) if request == previous_request: @@ -579,14 +579,14 @@ def get_request_states(request): return requests -def is_request_sent(request): +def is_request_sent(request, relation='ceph'): """Check to see if a functionally equivalent request has already been sent Returns True if a similair request has been sent @param request: A CephBrokerRq object """ - states = get_request_states(request) + states = get_request_states(request, relation=relation) for rid in states.keys(): if not states[rid]['sent']: return False @@ -594,7 +594,7 @@ def is_request_sent(request): return True -def is_request_complete(request): +def is_request_complete(request, relation='ceph'): """Check to see if a functionally equivalent request has already been completed @@ -602,7 +602,7 @@ def is_request_complete(request): @param request: A CephBrokerRq object """ - states = get_request_states(request) + states = get_request_states(request, relation=relation) for rid in states.keys(): if not states[rid]['complete']: return False @@ -652,15 +652,15 @@ def get_broker_rsp_key(): return 'broker-rsp-' + local_unit().replace('/', '-') -def send_request_if_needed(request): +def send_request_if_needed(request, relation='ceph'): """Send broker request if an equivalent request has not already been sent @param request: A CephBrokerRq object """ - if is_request_sent(request): + if is_request_sent(request, relation=relation): log('Request already sent but not complete, not sending new request', level=DEBUG) else: - for rid in relation_ids('ceph'): + for rid in relation_ids(relation): log('Sending request {}'.format(request.request_id), level=DEBUG) relation_set(relation_id=rid, broker_req=request.request) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index ab53a780..454b52ae 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -490,6 +490,19 @@ def relation_types(): return rel_types +@cached +def peer_relation_id(): + '''Get a peer relation id if a peer relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + @cached def relation_to_interface(relation_name): """ @@ -623,6 +636,38 @@ def unit_private_ip(): return unit_get('private-address') +@cached +def storage_get(attribute="", storage_id=""): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=""): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + class UnregisteredHookError(Exception): """Raised when an undefined hook is called""" pass @@ -788,6 +833,7 @@ def status_get(): def translate_exc(from_exc, to_exc): def inner_translate_exc1(f): + @wraps(f) def inner_translate_exc2(*args, **kwargs): try: return f(*args, **kwargs) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 29e8fee0..579871bc 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -63,33 +63,53 @@ def service_reload(service_name, restart_on_failure=False): return service_result -def service_pause(service_name, init_dir=None): +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): """Pause a system service. Stop it, and prevent it from starting again at boot.""" - if init_dir is None: - init_dir = "/etc/init" - stopped = service_stop(service_name) - # XXX: Support systemd too - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") + stopped = True + if service_running(service_name): + stopped = service_stop(service_name) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) return stopped -def service_resume(service_name, init_dir=None): +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d"): """Resume a system service. Reenable starting again at boot. Start the service""" - # XXX: Support systemd too - if init_dir is None: - init_dir = "/etc/init" - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) - started = service_start(service_name) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) + + started = service_running(service_name) + if not started: + started = service_start(service_name) return started @@ -550,7 +570,14 @@ def chdir(d): os.chdir(cur) -def chownr(path, owner, group, follow_links=True): +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """ + Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param bool follow_links: Also Chown links if True + :param bool chowntopdir: Also chown path itself if True + """ uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid if follow_links: @@ -558,6 +585,10 @@ def chownr(path, owner, group, follow_links=True): else: chown = os.lchown + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) for root, dirs, files in os.walk(path): for name in dirs + files: full = os.path.join(root, name) @@ -568,3 +599,19 @@ def chownr(path, owner, group, follow_links=True): def lchownr(path, owner, group): chownr(path, owner, group, follow_links=False) + + +def get_total_ram(): + '''The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + ''' + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() diff --git a/ceph-mon/hooks/charmhelpers/core/hugepage.py b/ceph-mon/hooks/charmhelpers/core/hugepage.py index ba4340ff..a783ad94 100644 --- a/ceph-mon/hooks/charmhelpers/core/hugepage.py +++ b/ceph-mon/hooks/charmhelpers/core/hugepage.py @@ -25,11 +25,13 @@ fstab_mount, mkdir, ) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output def hugepage_support(user, group='hugetlb', nr_hugepages=256, max_map_count=65536, mnt_point='/run/hugepages/kvm', - pagesize='2MB', mount=True): + pagesize='2MB', mount=True, set_shmmax=False): """Enable hugepages on system. Args: @@ -44,11 +46,18 @@ def hugepage_support(user, group='hugetlb', nr_hugepages=256, group_info = add_group(group) gid = group_info.gr_gid add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages sysctl_settings = { 'vm.nr_hugepages': nr_hugepages, 'vm.max_map_count': max_map_count, 'vm.hugetlb_shm_group': gid, } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) lfstab = fstab.Fstab() diff --git a/ceph-mon/hooks/charmhelpers/core/kernel.py b/ceph-mon/hooks/charmhelpers/core/kernel.py new file mode 100644 index 00000000..5dc64952 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/kernel.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = "Jorge Niedbalski " + +from charmhelpers.core.hookenv import ( + log, + INFO +) + +from subprocess import check_call, check_output +import re + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + check_call(cmd) + if persist: + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 + + +def update_initramfs(version='all'): + """Updates an initramfs image""" + return check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py index 3f677833..12d768e6 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-mon/hooks/charmhelpers/core/services/helpers.py @@ -249,16 +249,18 @@ class TemplateCallback(ManagerCallback): :param int perms: The permissions of the rendered file :param partial on_change_action: functools partial to be executed when rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader """ def __init__(self, source, target, owner='root', group='root', perms=0o444, - on_change_action=None): + on_change_action=None, template_loader=None): self.source = source self.target = target self.owner = owner self.group = group self.perms = perms self.on_change_action = on_change_action + self.template_loader = template_loader def __call__(self, manager, service_name, event_name): pre_checksum = '' @@ -269,7 +271,8 @@ def __call__(self, manager, service_name, event_name): for ctx in service.get('required_data', []): context.update(ctx) templating.render(self.source, self.target, context, - self.owner, self.group, self.perms) + self.owner, self.group, self.perms, + template_loader=self.template_loader) if self.on_change_action: if pre_checksum == host.file_hash(self.target): hookenv.log( diff --git a/ceph-mon/hooks/charmhelpers/core/strutils.py b/ceph-mon/hooks/charmhelpers/core/strutils.py index a2a784aa..7e3f9693 100644 --- a/ceph-mon/hooks/charmhelpers/core/strutils.py +++ b/ceph-mon/hooks/charmhelpers/core/strutils.py @@ -18,6 +18,7 @@ # along with charm-helpers. If not, see . import six +import re def bool_from_string(value): @@ -40,3 +41,32 @@ def bool_from_string(value): msg = "Unable to interpret string value '%s' as boolean" % (value) raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if not matches: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/ceph-mon/hooks/charmhelpers/core/templating.py b/ceph-mon/hooks/charmhelpers/core/templating.py index 45319998..239719d4 100644 --- a/ceph-mon/hooks/charmhelpers/core/templating.py +++ b/ceph-mon/hooks/charmhelpers/core/templating.py @@ -21,7 +21,7 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8'): + perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): """ Render a template. @@ -52,17 +52,24 @@ def render(source, target, context, owner='root', group='root', apt_install('python-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - loader = Environment(loader=FileSystemLoader(templates_dir)) + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) try: source = source - template = loader.get_template(source) + template = template_env.get_template(source) except exceptions.TemplateNotFound as e: hookenv.log('Could not load template %s from %s.' % (source, templates_dir), level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index cd0b783c..5f831c35 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -225,12 +225,12 @@ def apt_purge(packages, fatal=False): def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark""" + log("Marking {} as {}".format(packages, mark)) cmd = ['apt-mark', mark] if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) - log("Holding {}".format(packages)) if fatal: subprocess.check_call(cmd, universal_newlines=True) diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py index 367d6b47..d451698d 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py @@ -51,7 +51,8 @@ def _add_services(self, this_service, other_services): if 'units' not in this_service: this_service['units'] = 1 - self.d.add(this_service['name'], units=this_service['units']) + self.d.add(this_service['name'], units=this_service['units'], + constraints=this_service.get('constraints')) for svc in other_services: if 'location' in svc: @@ -64,7 +65,8 @@ def _add_services(self, this_service, other_services): if 'units' not in svc: svc['units'] = 1 - self.d.add(svc['name'], charm=branch_location, units=svc['units']) + self.d.add(svc['name'], charm=branch_location, units=svc['units'], + constraints=svc.get('constraints')) def _add_relations(self, relations): """Add all of the relations for the services.""" diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index 6770f26b..2591a9b1 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -326,7 +326,7 @@ def service_restarted(self, sentry_unit, service, filename, def service_restarted_since(self, sentry_unit, mtime, service, pgrep_full=None, sleep_time=20, - retry_count=2, retry_sleep_time=30): + retry_count=30, retry_sleep_time=10): """Check if service was been started after a given time. Args: @@ -334,8 +334,9 @@ def service_restarted_since(self, sentry_unit, mtime, service, mtime (float): The epoch time to check against service (string): service name to look for in process table pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Seconds to sleep before looking for process - retry_count (int): If service is not found, how many times to retry + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry Returns: bool: True if service found and its start time it newer than mtime, @@ -359,11 +360,12 @@ def service_restarted_since(self, sentry_unit, mtime, service, pgrep_full) self.log.debug('Attempt {} to get {} proc start time on {} ' 'OK'.format(tries, service, unit_name)) - except IOError: + except IOError as e: # NOTE(beisner) - race avoidance, proc may not exist yet. # https://bugs.launchpad.net/charm-helpers/+bug/1474030 self.log.debug('Attempt {} to get {} proc start time on {} ' - 'failed'.format(tries, service, unit_name)) + 'failed\n{}'.format(tries, service, + unit_name, e)) time.sleep(retry_sleep_time) tries += 1 @@ -383,35 +385,62 @@ def service_restarted_since(self, sentry_unit, mtime, service, return False def config_updated_since(self, sentry_unit, filename, mtime, - sleep_time=20): + sleep_time=20, retry_count=30, + retry_sleep_time=10): """Check if file was modified after a given time. Args: sentry_unit (sentry): The sentry unit to check the file mtime on filename (string): The file to check mtime of mtime (float): The epoch time to check against - sleep_time (int): Seconds to sleep before looking for process + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry Returns: bool: True if file was modified more recently than mtime, False if - file was modified before mtime, + file was modified before mtime, or if file not found. """ - self.log.debug('Checking %s updated since %s' % (filename, mtime)) + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s updated since %s on ' + '%s' % (filename, mtime, unit_name)) time.sleep(sleep_time) - file_mtime = self._get_file_mtime(sentry_unit, filename) + file_mtime = None + tries = 0 + while tries <= retry_count and not file_mtime: + try: + file_mtime = self._get_file_mtime(sentry_unit, filename) + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'OK'.format(tries, filename, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, file may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'failed\n{}'.format(tries, filename, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not file_mtime: + self.log.warn('Could not determine file mtime, assuming ' + 'file does not exist') + return False + if file_mtime >= mtime: self.log.debug('File mtime is newer than provided mtime ' - '(%s >= %s)' % (file_mtime, mtime)) + '(%s >= %s) on %s (OK)' % (file_mtime, + mtime, unit_name)) return True else: - self.log.warn('File mtime %s is older than provided mtime %s' - % (file_mtime, mtime)) + self.log.warn('File mtime is older than provided mtime' + '(%s < on %s) on %s' % (file_mtime, + mtime, unit_name)) return False def validate_service_config_changed(self, sentry_unit, mtime, service, filename, pgrep_full=None, - sleep_time=20, retry_count=2, - retry_sleep_time=30): + sleep_time=20, retry_count=30, + retry_sleep_time=10): """Check service and file were updated after mtime Args: @@ -456,7 +485,9 @@ def validate_service_config_changed(self, sentry_unit, mtime, service, sentry_unit, filename, mtime, - sleep_time=0) + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) return service_restart and config_update @@ -776,3 +807,12 @@ def wait_on_action(self, action_id, _check_output=subprocess.check_output): output = _check_output(command, universal_newlines=True) data = json.loads(output) return data.get(u"status") == "completed" + + def status_get(self, unit): + """Return the current service status of this unit.""" + raw_status, return_code = unit.run( + "status-get --format=json --include-data") + if return_code != 0: + return ("unknown", "") + status = json.loads(raw_status) + return (status["status"], status["message"]) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 63155d8d..0506491b 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -14,12 +14,18 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import logging +import re +import sys import six from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +DEBUG = logging.DEBUG +ERROR = logging.ERROR + class OpenStackAmuletDeployment(AmuletDeployment): """OpenStack amulet deployment. @@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment): that is specifically for use by OpenStack charms. """ - def __init__(self, series=None, openstack=None, source=None, stable=True): + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): """Initialize the deployment environment.""" super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') self.openstack = openstack self.source = source self.stable = stable @@ -38,6 +47,22 @@ def __init__(self, series=None, openstack=None, source=None, stable=True): # out. self.current_next = "trusty" + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + def _determine_branch_locations(self, other_services): """Determine the branch locations for the other services. @@ -45,6 +70,8 @@ def _determine_branch_locations(self, other_services): stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" + self.log.info('OpenStackAmuletDeployment: determine branch locations') + # Charms outside the lp:~openstack-charmers namespace base_charms = ['mysql', 'mongodb', 'nrpe'] @@ -58,19 +85,17 @@ def _determine_branch_locations(self, other_services): else: base_series = self.current_next - if self.stable: - for svc in other_services: - if svc['name'] in force_series_current: - base_series = self.current_next - + for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if self.stable: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, svc['name']) - else: - for svc in other_services: - if svc['name'] in force_series_current: - base_series = self.current_next - + else: if svc['name'] in base_charms: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, @@ -79,10 +104,13 @@ def _determine_branch_locations(self, other_services): temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, svc['name']) + return other_services def _add_services(self, this_service, other_services): """Add services to the deployment and set openstack-origin/source.""" + self.log.info('OpenStackAmuletDeployment: adding services') + other_services = self._determine_branch_locations(other_services) super(OpenStackAmuletDeployment, self)._add_services(this_service, @@ -96,7 +124,8 @@ def _add_services(self, this_service, other_services): 'ceph-osd', 'ceph-radosgw'] # Charms which can not use openstack-origin, ie. many subordinates - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] if self.openstack: for svc in services: @@ -112,9 +141,79 @@ def _add_services(self, this_service, other_services): def _configure_services(self, configs): """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') for service, config in six.iteritems(configs): self.d.configure(service, config) + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=1800): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + self.log.info('Waiting for extended status on units...') + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') + def _get_openstack_release(self): """Get openstack release. diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index b1397419..388b60e6 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -18,6 +18,7 @@ import json import logging import os +import re import six import time import urllib @@ -604,7 +605,22 @@ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): '{}'.format(sample_type, samples)) return None -# rabbitmq/amqp specific helpers: + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + def add_rmq_test_user(self, sentry_units, username="testuser1", password="changeme"): """Add a test user via the first rmq juju unit, check connection as @@ -752,7 +768,7 @@ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): self.log.debug('SSL is enabled @{}:{} ' '({})'.format(host, port, unit_name)) return True - elif not port and not conf_ssl: + elif not conf_ssl: self.log.debug('SSL not enabled @{}:{} ' '({})'.format(host, port, unit_name)) return False @@ -805,7 +821,10 @@ def configure_rmq_ssl_on(self, sentry_units, deployment, if port: config['ssl_port'] = port - deployment.configure('rabbitmq-server', config) + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) # Confirm tries = 0 @@ -832,7 +851,10 @@ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): # Disable RMQ SSL config = {'ssl': 'off'} - deployment.configure('rabbitmq-server', config) + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) # Confirm tries = 0 From 776a0fcd15c34551d12186ccbc79471117d14b57 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 17:41:35 +0000 Subject: [PATCH 0901/2699] charmhelper sync to get fix for bug 1517846 --- ceph-proxy/hooks/charmhelpers/cli/__init__.py | 6 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 52 ++++++-- .../hooks/charmhelpers/contrib/network/ip.py | 8 +- .../contrib/storage/linux/ceph.py | 78 +++++------ ceph-proxy/hooks/charmhelpers/core/hookenv.py | 46 +++++++ ceph-proxy/hooks/charmhelpers/core/host.py | 85 +++++++++--- .../hooks/charmhelpers/core/hugepage.py | 11 +- ceph-proxy/hooks/charmhelpers/core/kernel.py | 68 ++++++++++ .../charmhelpers/core/services/helpers.py | 7 +- .../hooks/charmhelpers/core/strutils.py | 30 +++++ .../hooks/charmhelpers/core/templating.py | 19 ++- .../hooks/charmhelpers/fetch/__init__.py | 2 +- .../charmhelpers/contrib/amulet/deployment.py | 6 +- .../charmhelpers/contrib/amulet/utils.py | 72 +++++++--- .../contrib/openstack/amulet/deployment.py | 123 ++++++++++++++++-- .../contrib/openstack/amulet/utils.py | 30 ++++- 16 files changed, 527 insertions(+), 116 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/core/kernel.py diff --git a/ceph-proxy/hooks/charmhelpers/cli/__init__.py b/ceph-proxy/hooks/charmhelpers/cli/__init__.py index 16d52cc4..2d37ab31 100644 --- a/ceph-proxy/hooks/charmhelpers/cli/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/cli/__init__.py @@ -20,7 +20,7 @@ from six.moves import zip -from charmhelpers.core import unitdata +import charmhelpers.core.unitdata class OutputFormatter(object): @@ -163,8 +163,8 @@ def run(self): if getattr(arguments.func, '_cli_no_output', False): output = '' self.formatter.format_output(output, arguments.format) - if unitdata._KV: - unitdata._KV.flush() + if charmhelpers.core.unitdata._KV: + charmhelpers.core.unitdata._KV.flush() cmdline = CommandLine() diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 95a79c2e..65b1a27e 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -148,6 +148,13 @@ def __init__(self, shortname, description, check_cmd): self.description = description self.check_cmd = self._locate_cmd(check_cmd) + def _get_check_filename(self): + return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) + + def _get_service_filename(self, hostname): + return os.path.join(NRPE.nagios_exportdir, + 'service__{}_{}.cfg'.format(hostname, self.command)) + def _locate_cmd(self, check_cmd): search_path = ( '/usr/lib/nagios/plugins', @@ -163,9 +170,21 @@ def _locate_cmd(self, check_cmd): log('Check command not found: {}'.format(parts[0])) return '' + def _remove_service_files(self): + if not os.path.exists(NRPE.nagios_exportdir): + return + for f in os.listdir(NRPE.nagios_exportdir): + if f.endswith('_{}.cfg'.format(self.command)): + os.remove(os.path.join(NRPE.nagios_exportdir, f)) + + def remove(self, hostname): + nrpe_check_file = self._get_check_filename() + if os.path.exists(nrpe_check_file): + os.remove(nrpe_check_file) + self._remove_service_files() + def write(self, nagios_context, hostname, nagios_servicegroups): - nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( - self.command) + nrpe_check_file = self._get_check_filename() with open(nrpe_check_file, 'w') as nrpe_check_config: nrpe_check_config.write("# check {}\n".format(self.shortname)) nrpe_check_config.write("command[{}]={}\n".format( @@ -180,9 +199,7 @@ def write(self, nagios_context, hostname, nagios_servicegroups): def write_service_config(self, nagios_context, hostname, nagios_servicegroups): - for f in os.listdir(NRPE.nagios_exportdir): - if re.search('.*{}.cfg'.format(self.command), f): - os.remove(os.path.join(NRPE.nagios_exportdir, f)) + self._remove_service_files() templ_vars = { 'nagios_hostname': hostname, @@ -192,8 +209,7 @@ def write_service_config(self, nagios_context, hostname, 'command': self.command, } nrpe_service_text = Check.service_template.format(**templ_vars) - nrpe_service_file = '{}/service__{}_{}.cfg'.format( - NRPE.nagios_exportdir, hostname, self.command) + nrpe_service_file = self._get_service_filename(hostname) with open(nrpe_service_file, 'w') as nrpe_service_config: nrpe_service_config.write(str(nrpe_service_text)) @@ -218,12 +234,32 @@ def __init__(self, hostname=None): if hostname: self.hostname = hostname else: - self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + nagios_hostname = get_nagios_hostname() + if nagios_hostname: + self.hostname = nagios_hostname + else: + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) self.checks = [] def add_check(self, *args, **kwargs): self.checks.append(Check(*args, **kwargs)) + def remove_check(self, *args, **kwargs): + if kwargs.get('shortname') is None: + raise ValueError('shortname of check must be specified') + + # Use sensible defaults if they're not specified - these are not + # actually used during removal, but they're required for constructing + # the Check object; check_disk is chosen because it's part of the + # nagios-plugins-basic package. + if kwargs.get('check_cmd') is None: + kwargs['check_cmd'] = 'check_disk' + if kwargs.get('description') is None: + kwargs['description'] = '' + + check = Check(*args, **kwargs) + check.remove(self.hostname) + def write(self): try: nagios_uid = pwd.getpwnam('nagios').pw_uid diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 67b4dccc..7f3b66b1 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -23,7 +23,7 @@ from functools import partial from charmhelpers.core.hookenv import unit_get -from charmhelpers.fetch import apt_install +from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, WARNING, @@ -32,13 +32,15 @@ try: import netifaces except ImportError: - apt_install('python-netifaces') + apt_update(fatal=True) + apt_install('python-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: - apt_install('python-netaddr') + apt_update(fatal=True) + apt_install('python-netaddr', fatal=True) import netaddr diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index b4fda124..bfed4aaa 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -26,6 +26,7 @@ import os import shutil +import six import json import time import uuid @@ -59,6 +60,8 @@ apt_install, ) +from charmhelpers.core.kernel import modprobe + KEYRING = '/etc/ceph/ceph.client.{}.keyring' KEYFILE = '/etc/ceph/ceph.client.{}.key' @@ -123,29 +126,37 @@ def get_osds(service): return None -def create_pool(service, name, replicas=3): +def update_pool(client, pool, settings): + cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] + for k, v in six.iteritems(settings): + cmd.append(k) + cmd.append(v) + + check_call(cmd) + + +def create_pool(service, name, replicas=3, pg_num=None): """Create a new RADOS pool.""" if pool_exists(service, name): log("Ceph pool {} already exists, skipping creation".format(name), level=WARNING) return - # Calculate the number of placement groups based - # on upstream recommended best practices. - osds = get_osds(service) - if osds: - pgnum = (len(osds) * 100 // replicas) - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - pgnum = 200 + if not pg_num: + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pg_num = (len(osds) * 100 // replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pg_num = 200 - cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] check_call(cmd) - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', - str(replicas)] - check_call(cmd) + update_pool(service, name, settings={'size': str(replicas)}) def delete_pool(service, name): @@ -200,10 +211,10 @@ def create_key_file(service, key): log('Created new keyfile at %s.' % keyfile, level=INFO) -def get_ceph_nodes(): - """Query named relation 'ceph' to determine current nodes.""" +def get_ceph_nodes(relation='ceph'): + """Query named relation to determine current nodes.""" hosts = [] - for r_id in relation_ids('ceph'): + for r_id in relation_ids(relation): for unit in related_units(r_id): hosts.append(relation_get('private-address', unit=unit, rid=r_id)) @@ -291,17 +302,6 @@ def place_data_on_block_device(blk_device, data_src_dst): os.chown(data_src_dst, uid, gid) -# TODO: re-use -def modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - log('Loading kernel module', level=INFO) - cmd = ['modprobe', module] - check_call(cmd) - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module) - - def copy_files(src, dst, symlinks=False, ignore=None): """Copy files from src to dst.""" for item in os.listdir(src): @@ -366,14 +366,14 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, service_start(svc) -def ensure_ceph_keyring(service, user=None, group=None): +def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): """Ensures a ceph keyring is created for a named service and optionally ensures user and group ownership. Returns False if no ceph key is available in relation state. """ key = None - for rid in relation_ids('ceph'): + for rid in relation_ids(relation): for unit in related_units(rid): key = relation_get('key', rid=rid, unit=unit) if key: @@ -549,7 +549,7 @@ def get_previous_request(rid): return request -def get_request_states(request): +def get_request_states(request, relation='ceph'): """Return a dict of requests per relation id with their corresponding completion state. @@ -561,7 +561,7 @@ def get_request_states(request): """ complete = [] requests = {} - for rid in relation_ids('ceph'): + for rid in relation_ids(relation): complete = False previous_request = get_previous_request(rid) if request == previous_request: @@ -579,14 +579,14 @@ def get_request_states(request): return requests -def is_request_sent(request): +def is_request_sent(request, relation='ceph'): """Check to see if a functionally equivalent request has already been sent Returns True if a similair request has been sent @param request: A CephBrokerRq object """ - states = get_request_states(request) + states = get_request_states(request, relation=relation) for rid in states.keys(): if not states[rid]['sent']: return False @@ -594,7 +594,7 @@ def is_request_sent(request): return True -def is_request_complete(request): +def is_request_complete(request, relation='ceph'): """Check to see if a functionally equivalent request has already been completed @@ -602,7 +602,7 @@ def is_request_complete(request): @param request: A CephBrokerRq object """ - states = get_request_states(request) + states = get_request_states(request, relation=relation) for rid in states.keys(): if not states[rid]['complete']: return False @@ -652,15 +652,15 @@ def get_broker_rsp_key(): return 'broker-rsp-' + local_unit().replace('/', '-') -def send_request_if_needed(request): +def send_request_if_needed(request, relation='ceph'): """Send broker request if an equivalent request has not already been sent @param request: A CephBrokerRq object """ - if is_request_sent(request): + if is_request_sent(request, relation=relation): log('Request already sent but not complete, not sending new request', level=DEBUG) else: - for rid in relation_ids('ceph'): + for rid in relation_ids(relation): log('Sending request {}'.format(request.request_id), level=DEBUG) relation_set(relation_id=rid, broker_req=request.request) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index ab53a780..454b52ae 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -490,6 +490,19 @@ def relation_types(): return rel_types +@cached +def peer_relation_id(): + '''Get a peer relation id if a peer relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + @cached def relation_to_interface(relation_name): """ @@ -623,6 +636,38 @@ def unit_private_ip(): return unit_get('private-address') +@cached +def storage_get(attribute="", storage_id=""): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=""): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + class UnregisteredHookError(Exception): """Raised when an undefined hook is called""" pass @@ -788,6 +833,7 @@ def status_get(): def translate_exc(from_exc, to_exc): def inner_translate_exc1(f): + @wraps(f) def inner_translate_exc2(*args, **kwargs): try: return f(*args, **kwargs) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 29e8fee0..579871bc 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -63,33 +63,53 @@ def service_reload(service_name, restart_on_failure=False): return service_result -def service_pause(service_name, init_dir=None): +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): """Pause a system service. Stop it, and prevent it from starting again at boot.""" - if init_dir is None: - init_dir = "/etc/init" - stopped = service_stop(service_name) - # XXX: Support systemd too - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") + stopped = True + if service_running(service_name): + stopped = service_stop(service_name) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) return stopped -def service_resume(service_name, init_dir=None): +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d"): """Resume a system service. Reenable starting again at boot. Start the service""" - # XXX: Support systemd too - if init_dir is None: - init_dir = "/etc/init" - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) - started = service_start(service_name) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) + + started = service_running(service_name) + if not started: + started = service_start(service_name) return started @@ -550,7 +570,14 @@ def chdir(d): os.chdir(cur) -def chownr(path, owner, group, follow_links=True): +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """ + Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param bool follow_links: Also Chown links if True + :param bool chowntopdir: Also chown path itself if True + """ uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid if follow_links: @@ -558,6 +585,10 @@ def chownr(path, owner, group, follow_links=True): else: chown = os.lchown + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) for root, dirs, files in os.walk(path): for name in dirs + files: full = os.path.join(root, name) @@ -568,3 +599,19 @@ def chownr(path, owner, group, follow_links=True): def lchownr(path, owner, group): chownr(path, owner, group, follow_links=False) + + +def get_total_ram(): + '''The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + ''' + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() diff --git a/ceph-proxy/hooks/charmhelpers/core/hugepage.py b/ceph-proxy/hooks/charmhelpers/core/hugepage.py index ba4340ff..a783ad94 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hugepage.py +++ b/ceph-proxy/hooks/charmhelpers/core/hugepage.py @@ -25,11 +25,13 @@ fstab_mount, mkdir, ) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output def hugepage_support(user, group='hugetlb', nr_hugepages=256, max_map_count=65536, mnt_point='/run/hugepages/kvm', - pagesize='2MB', mount=True): + pagesize='2MB', mount=True, set_shmmax=False): """Enable hugepages on system. Args: @@ -44,11 +46,18 @@ def hugepage_support(user, group='hugetlb', nr_hugepages=256, group_info = add_group(group) gid = group_info.gr_gid add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages sysctl_settings = { 'vm.nr_hugepages': nr_hugepages, 'vm.max_map_count': max_map_count, 'vm.hugetlb_shm_group': gid, } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) lfstab = fstab.Fstab() diff --git a/ceph-proxy/hooks/charmhelpers/core/kernel.py b/ceph-proxy/hooks/charmhelpers/core/kernel.py new file mode 100644 index 00000000..5dc64952 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/kernel.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = "Jorge Niedbalski " + +from charmhelpers.core.hookenv import ( + log, + INFO +) + +from subprocess import check_call, check_output +import re + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + check_call(cmd) + if persist: + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 + + +def update_initramfs(version='all'): + """Updates an initramfs image""" + return check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py index 3f677833..12d768e6 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py @@ -249,16 +249,18 @@ class TemplateCallback(ManagerCallback): :param int perms: The permissions of the rendered file :param partial on_change_action: functools partial to be executed when rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader """ def __init__(self, source, target, owner='root', group='root', perms=0o444, - on_change_action=None): + on_change_action=None, template_loader=None): self.source = source self.target = target self.owner = owner self.group = group self.perms = perms self.on_change_action = on_change_action + self.template_loader = template_loader def __call__(self, manager, service_name, event_name): pre_checksum = '' @@ -269,7 +271,8 @@ def __call__(self, manager, service_name, event_name): for ctx in service.get('required_data', []): context.update(ctx) templating.render(self.source, self.target, context, - self.owner, self.group, self.perms) + self.owner, self.group, self.perms, + template_loader=self.template_loader) if self.on_change_action: if pre_checksum == host.file_hash(self.target): hookenv.log( diff --git a/ceph-proxy/hooks/charmhelpers/core/strutils.py b/ceph-proxy/hooks/charmhelpers/core/strutils.py index a2a784aa..7e3f9693 100644 --- a/ceph-proxy/hooks/charmhelpers/core/strutils.py +++ b/ceph-proxy/hooks/charmhelpers/core/strutils.py @@ -18,6 +18,7 @@ # along with charm-helpers. If not, see . import six +import re def bool_from_string(value): @@ -40,3 +41,32 @@ def bool_from_string(value): msg = "Unable to interpret string value '%s' as boolean" % (value) raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if not matches: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/ceph-proxy/hooks/charmhelpers/core/templating.py b/ceph-proxy/hooks/charmhelpers/core/templating.py index 45319998..239719d4 100644 --- a/ceph-proxy/hooks/charmhelpers/core/templating.py +++ b/ceph-proxy/hooks/charmhelpers/core/templating.py @@ -21,7 +21,7 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8'): + perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): """ Render a template. @@ -52,17 +52,24 @@ def render(source, target, context, owner='root', group='root', apt_install('python-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - loader = Environment(loader=FileSystemLoader(templates_dir)) + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) try: source = source - template = loader.get_template(source) + template = template_env.get_template(source) except exceptions.TemplateNotFound as e: hookenv.log('Could not load template %s from %s.' % (source, templates_dir), level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index cd0b783c..5f831c35 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -225,12 +225,12 @@ def apt_purge(packages, fatal=False): def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark""" + log("Marking {} as {}".format(packages, mark)) cmd = ['apt-mark', mark] if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) - log("Holding {}".format(packages)) if fatal: subprocess.check_call(cmd, universal_newlines=True) diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py index 367d6b47..d451698d 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py @@ -51,7 +51,8 @@ def _add_services(self, this_service, other_services): if 'units' not in this_service: this_service['units'] = 1 - self.d.add(this_service['name'], units=this_service['units']) + self.d.add(this_service['name'], units=this_service['units'], + constraints=this_service.get('constraints')) for svc in other_services: if 'location' in svc: @@ -64,7 +65,8 @@ def _add_services(self, this_service, other_services): if 'units' not in svc: svc['units'] = 1 - self.d.add(svc['name'], charm=branch_location, units=svc['units']) + self.d.add(svc['name'], charm=branch_location, units=svc['units'], + constraints=svc.get('constraints')) def _add_relations(self, relations): """Add all of the relations for the services.""" diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index 6770f26b..2591a9b1 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -326,7 +326,7 @@ def service_restarted(self, sentry_unit, service, filename, def service_restarted_since(self, sentry_unit, mtime, service, pgrep_full=None, sleep_time=20, - retry_count=2, retry_sleep_time=30): + retry_count=30, retry_sleep_time=10): """Check if service was been started after a given time. Args: @@ -334,8 +334,9 @@ def service_restarted_since(self, sentry_unit, mtime, service, mtime (float): The epoch time to check against service (string): service name to look for in process table pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Seconds to sleep before looking for process - retry_count (int): If service is not found, how many times to retry + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry Returns: bool: True if service found and its start time it newer than mtime, @@ -359,11 +360,12 @@ def service_restarted_since(self, sentry_unit, mtime, service, pgrep_full) self.log.debug('Attempt {} to get {} proc start time on {} ' 'OK'.format(tries, service, unit_name)) - except IOError: + except IOError as e: # NOTE(beisner) - race avoidance, proc may not exist yet. # https://bugs.launchpad.net/charm-helpers/+bug/1474030 self.log.debug('Attempt {} to get {} proc start time on {} ' - 'failed'.format(tries, service, unit_name)) + 'failed\n{}'.format(tries, service, + unit_name, e)) time.sleep(retry_sleep_time) tries += 1 @@ -383,35 +385,62 @@ def service_restarted_since(self, sentry_unit, mtime, service, return False def config_updated_since(self, sentry_unit, filename, mtime, - sleep_time=20): + sleep_time=20, retry_count=30, + retry_sleep_time=10): """Check if file was modified after a given time. Args: sentry_unit (sentry): The sentry unit to check the file mtime on filename (string): The file to check mtime of mtime (float): The epoch time to check against - sleep_time (int): Seconds to sleep before looking for process + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry Returns: bool: True if file was modified more recently than mtime, False if - file was modified before mtime, + file was modified before mtime, or if file not found. """ - self.log.debug('Checking %s updated since %s' % (filename, mtime)) + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s updated since %s on ' + '%s' % (filename, mtime, unit_name)) time.sleep(sleep_time) - file_mtime = self._get_file_mtime(sentry_unit, filename) + file_mtime = None + tries = 0 + while tries <= retry_count and not file_mtime: + try: + file_mtime = self._get_file_mtime(sentry_unit, filename) + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'OK'.format(tries, filename, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, file may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'failed\n{}'.format(tries, filename, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not file_mtime: + self.log.warn('Could not determine file mtime, assuming ' + 'file does not exist') + return False + if file_mtime >= mtime: self.log.debug('File mtime is newer than provided mtime ' - '(%s >= %s)' % (file_mtime, mtime)) + '(%s >= %s) on %s (OK)' % (file_mtime, + mtime, unit_name)) return True else: - self.log.warn('File mtime %s is older than provided mtime %s' - % (file_mtime, mtime)) + self.log.warn('File mtime is older than provided mtime' + '(%s < on %s) on %s' % (file_mtime, + mtime, unit_name)) return False def validate_service_config_changed(self, sentry_unit, mtime, service, filename, pgrep_full=None, - sleep_time=20, retry_count=2, - retry_sleep_time=30): + sleep_time=20, retry_count=30, + retry_sleep_time=10): """Check service and file were updated after mtime Args: @@ -456,7 +485,9 @@ def validate_service_config_changed(self, sentry_unit, mtime, service, sentry_unit, filename, mtime, - sleep_time=0) + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) return service_restart and config_update @@ -776,3 +807,12 @@ def wait_on_action(self, action_id, _check_output=subprocess.check_output): output = _check_output(command, universal_newlines=True) data = json.loads(output) return data.get(u"status") == "completed" + + def status_get(self, unit): + """Return the current service status of this unit.""" + raw_status, return_code = unit.run( + "status-get --format=json --include-data") + if return_code != 0: + return ("unknown", "") + status = json.loads(raw_status) + return (status["status"], status["message"]) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 63155d8d..0506491b 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -14,12 +14,18 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import logging +import re +import sys import six from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +DEBUG = logging.DEBUG +ERROR = logging.ERROR + class OpenStackAmuletDeployment(AmuletDeployment): """OpenStack amulet deployment. @@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment): that is specifically for use by OpenStack charms. """ - def __init__(self, series=None, openstack=None, source=None, stable=True): + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): """Initialize the deployment environment.""" super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') self.openstack = openstack self.source = source self.stable = stable @@ -38,6 +47,22 @@ def __init__(self, series=None, openstack=None, source=None, stable=True): # out. self.current_next = "trusty" + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + def _determine_branch_locations(self, other_services): """Determine the branch locations for the other services. @@ -45,6 +70,8 @@ def _determine_branch_locations(self, other_services): stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" + self.log.info('OpenStackAmuletDeployment: determine branch locations') + # Charms outside the lp:~openstack-charmers namespace base_charms = ['mysql', 'mongodb', 'nrpe'] @@ -58,19 +85,17 @@ def _determine_branch_locations(self, other_services): else: base_series = self.current_next - if self.stable: - for svc in other_services: - if svc['name'] in force_series_current: - base_series = self.current_next - + for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if self.stable: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, svc['name']) - else: - for svc in other_services: - if svc['name'] in force_series_current: - base_series = self.current_next - + else: if svc['name'] in base_charms: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, @@ -79,10 +104,13 @@ def _determine_branch_locations(self, other_services): temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, svc['name']) + return other_services def _add_services(self, this_service, other_services): """Add services to the deployment and set openstack-origin/source.""" + self.log.info('OpenStackAmuletDeployment: adding services') + other_services = self._determine_branch_locations(other_services) super(OpenStackAmuletDeployment, self)._add_services(this_service, @@ -96,7 +124,8 @@ def _add_services(self, this_service, other_services): 'ceph-osd', 'ceph-radosgw'] # Charms which can not use openstack-origin, ie. many subordinates - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] if self.openstack: for svc in services: @@ -112,9 +141,79 @@ def _add_services(self, this_service, other_services): def _configure_services(self, configs): """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') for service, config in six.iteritems(configs): self.d.configure(service, config) + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=1800): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + self.log.info('Waiting for extended status on units...') + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') + def _get_openstack_release(self): """Get openstack release. diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index b1397419..388b60e6 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -18,6 +18,7 @@ import json import logging import os +import re import six import time import urllib @@ -604,7 +605,22 @@ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): '{}'.format(sample_type, samples)) return None -# rabbitmq/amqp specific helpers: + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + def add_rmq_test_user(self, sentry_units, username="testuser1", password="changeme"): """Add a test user via the first rmq juju unit, check connection as @@ -752,7 +768,7 @@ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): self.log.debug('SSL is enabled @{}:{} ' '({})'.format(host, port, unit_name)) return True - elif not port and not conf_ssl: + elif not conf_ssl: self.log.debug('SSL not enabled @{}:{} ' '({})'.format(host, port, unit_name)) return False @@ -805,7 +821,10 @@ def configure_rmq_ssl_on(self, sentry_units, deployment, if port: config['ssl_port'] = port - deployment.configure('rabbitmq-server', config) + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) # Confirm tries = 0 @@ -832,7 +851,10 @@ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): # Disable RMQ SSL config = {'ssl': 'off'} - deployment.configure('rabbitmq-server', config) + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) # Confirm tries = 0 From aba39146c1160ad4f43bbe9a273b9f1f2151523e Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 17:41:35 +0000 Subject: [PATCH 0902/2699] charmhelper sync to get fix for bug 1517846 --- ceph-mon/hooks/charmhelpers/cli/__init__.py | 6 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 52 ++++++-- .../hooks/charmhelpers/contrib/network/ip.py | 8 +- .../contrib/storage/linux/ceph.py | 78 +++++------ ceph-mon/hooks/charmhelpers/core/hookenv.py | 46 +++++++ ceph-mon/hooks/charmhelpers/core/host.py | 85 +++++++++--- ceph-mon/hooks/charmhelpers/core/hugepage.py | 11 +- ceph-mon/hooks/charmhelpers/core/kernel.py | 68 ++++++++++ .../charmhelpers/core/services/helpers.py | 7 +- ceph-mon/hooks/charmhelpers/core/strutils.py | 30 +++++ .../hooks/charmhelpers/core/templating.py | 19 ++- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 2 +- .../charmhelpers/contrib/amulet/deployment.py | 6 +- .../charmhelpers/contrib/amulet/utils.py | 72 +++++++--- .../contrib/openstack/amulet/deployment.py | 123 ++++++++++++++++-- .../contrib/openstack/amulet/utils.py | 30 ++++- 16 files changed, 527 insertions(+), 116 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/core/kernel.py diff --git a/ceph-mon/hooks/charmhelpers/cli/__init__.py b/ceph-mon/hooks/charmhelpers/cli/__init__.py index 16d52cc4..2d37ab31 100644 --- a/ceph-mon/hooks/charmhelpers/cli/__init__.py +++ b/ceph-mon/hooks/charmhelpers/cli/__init__.py @@ -20,7 +20,7 @@ from six.moves import zip -from charmhelpers.core import unitdata +import charmhelpers.core.unitdata class OutputFormatter(object): @@ -163,8 +163,8 @@ def run(self): if getattr(arguments.func, '_cli_no_output', False): output = '' self.formatter.format_output(output, arguments.format) - if unitdata._KV: - unitdata._KV.flush() + if charmhelpers.core.unitdata._KV: + charmhelpers.core.unitdata._KV.flush() cmdline = CommandLine() diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 95a79c2e..65b1a27e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -148,6 +148,13 @@ def __init__(self, shortname, description, check_cmd): self.description = description self.check_cmd = self._locate_cmd(check_cmd) + def _get_check_filename(self): + return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) + + def _get_service_filename(self, hostname): + return os.path.join(NRPE.nagios_exportdir, + 'service__{}_{}.cfg'.format(hostname, self.command)) + def _locate_cmd(self, check_cmd): search_path = ( '/usr/lib/nagios/plugins', @@ -163,9 +170,21 @@ def _locate_cmd(self, check_cmd): log('Check command not found: {}'.format(parts[0])) return '' + def _remove_service_files(self): + if not os.path.exists(NRPE.nagios_exportdir): + return + for f in os.listdir(NRPE.nagios_exportdir): + if f.endswith('_{}.cfg'.format(self.command)): + os.remove(os.path.join(NRPE.nagios_exportdir, f)) + + def remove(self, hostname): + nrpe_check_file = self._get_check_filename() + if os.path.exists(nrpe_check_file): + os.remove(nrpe_check_file) + self._remove_service_files() + def write(self, nagios_context, hostname, nagios_servicegroups): - nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( - self.command) + nrpe_check_file = self._get_check_filename() with open(nrpe_check_file, 'w') as nrpe_check_config: nrpe_check_config.write("# check {}\n".format(self.shortname)) nrpe_check_config.write("command[{}]={}\n".format( @@ -180,9 +199,7 @@ def write(self, nagios_context, hostname, nagios_servicegroups): def write_service_config(self, nagios_context, hostname, nagios_servicegroups): - for f in os.listdir(NRPE.nagios_exportdir): - if re.search('.*{}.cfg'.format(self.command), f): - os.remove(os.path.join(NRPE.nagios_exportdir, f)) + self._remove_service_files() templ_vars = { 'nagios_hostname': hostname, @@ -192,8 +209,7 @@ def write_service_config(self, nagios_context, hostname, 'command': self.command, } nrpe_service_text = Check.service_template.format(**templ_vars) - nrpe_service_file = '{}/service__{}_{}.cfg'.format( - NRPE.nagios_exportdir, hostname, self.command) + nrpe_service_file = self._get_service_filename(hostname) with open(nrpe_service_file, 'w') as nrpe_service_config: nrpe_service_config.write(str(nrpe_service_text)) @@ -218,12 +234,32 @@ def __init__(self, hostname=None): if hostname: self.hostname = hostname else: - self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + nagios_hostname = get_nagios_hostname() + if nagios_hostname: + self.hostname = nagios_hostname + else: + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) self.checks = [] def add_check(self, *args, **kwargs): self.checks.append(Check(*args, **kwargs)) + def remove_check(self, *args, **kwargs): + if kwargs.get('shortname') is None: + raise ValueError('shortname of check must be specified') + + # Use sensible defaults if they're not specified - these are not + # actually used during removal, but they're required for constructing + # the Check object; check_disk is chosen because it's part of the + # nagios-plugins-basic package. + if kwargs.get('check_cmd') is None: + kwargs['check_cmd'] = 'check_disk' + if kwargs.get('description') is None: + kwargs['description'] = '' + + check = Check(*args, **kwargs) + check.remove(self.hostname) + def write(self): try: nagios_uid = pwd.getpwnam('nagios').pw_uid diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 67b4dccc..7f3b66b1 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -23,7 +23,7 @@ from functools import partial from charmhelpers.core.hookenv import unit_get -from charmhelpers.fetch import apt_install +from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, WARNING, @@ -32,13 +32,15 @@ try: import netifaces except ImportError: - apt_install('python-netifaces') + apt_update(fatal=True) + apt_install('python-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: - apt_install('python-netaddr') + apt_update(fatal=True) + apt_install('python-netaddr', fatal=True) import netaddr diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index b4fda124..bfed4aaa 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -26,6 +26,7 @@ import os import shutil +import six import json import time import uuid @@ -59,6 +60,8 @@ apt_install, ) +from charmhelpers.core.kernel import modprobe + KEYRING = '/etc/ceph/ceph.client.{}.keyring' KEYFILE = '/etc/ceph/ceph.client.{}.key' @@ -123,29 +126,37 @@ def get_osds(service): return None -def create_pool(service, name, replicas=3): +def update_pool(client, pool, settings): + cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] + for k, v in six.iteritems(settings): + cmd.append(k) + cmd.append(v) + + check_call(cmd) + + +def create_pool(service, name, replicas=3, pg_num=None): """Create a new RADOS pool.""" if pool_exists(service, name): log("Ceph pool {} already exists, skipping creation".format(name), level=WARNING) return - # Calculate the number of placement groups based - # on upstream recommended best practices. - osds = get_osds(service) - if osds: - pgnum = (len(osds) * 100 // replicas) - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - pgnum = 200 + if not pg_num: + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pg_num = (len(osds) * 100 // replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pg_num = 200 - cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] check_call(cmd) - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', - str(replicas)] - check_call(cmd) + update_pool(service, name, settings={'size': str(replicas)}) def delete_pool(service, name): @@ -200,10 +211,10 @@ def create_key_file(service, key): log('Created new keyfile at %s.' % keyfile, level=INFO) -def get_ceph_nodes(): - """Query named relation 'ceph' to determine current nodes.""" +def get_ceph_nodes(relation='ceph'): + """Query named relation to determine current nodes.""" hosts = [] - for r_id in relation_ids('ceph'): + for r_id in relation_ids(relation): for unit in related_units(r_id): hosts.append(relation_get('private-address', unit=unit, rid=r_id)) @@ -291,17 +302,6 @@ def place_data_on_block_device(blk_device, data_src_dst): os.chown(data_src_dst, uid, gid) -# TODO: re-use -def modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - log('Loading kernel module', level=INFO) - cmd = ['modprobe', module] - check_call(cmd) - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module) - - def copy_files(src, dst, symlinks=False, ignore=None): """Copy files from src to dst.""" for item in os.listdir(src): @@ -366,14 +366,14 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, service_start(svc) -def ensure_ceph_keyring(service, user=None, group=None): +def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): """Ensures a ceph keyring is created for a named service and optionally ensures user and group ownership. Returns False if no ceph key is available in relation state. """ key = None - for rid in relation_ids('ceph'): + for rid in relation_ids(relation): for unit in related_units(rid): key = relation_get('key', rid=rid, unit=unit) if key: @@ -549,7 +549,7 @@ def get_previous_request(rid): return request -def get_request_states(request): +def get_request_states(request, relation='ceph'): """Return a dict of requests per relation id with their corresponding completion state. @@ -561,7 +561,7 @@ def get_request_states(request): """ complete = [] requests = {} - for rid in relation_ids('ceph'): + for rid in relation_ids(relation): complete = False previous_request = get_previous_request(rid) if request == previous_request: @@ -579,14 +579,14 @@ def get_request_states(request): return requests -def is_request_sent(request): +def is_request_sent(request, relation='ceph'): """Check to see if a functionally equivalent request has already been sent Returns True if a similair request has been sent @param request: A CephBrokerRq object """ - states = get_request_states(request) + states = get_request_states(request, relation=relation) for rid in states.keys(): if not states[rid]['sent']: return False @@ -594,7 +594,7 @@ def is_request_sent(request): return True -def is_request_complete(request): +def is_request_complete(request, relation='ceph'): """Check to see if a functionally equivalent request has already been completed @@ -602,7 +602,7 @@ def is_request_complete(request): @param request: A CephBrokerRq object """ - states = get_request_states(request) + states = get_request_states(request, relation=relation) for rid in states.keys(): if not states[rid]['complete']: return False @@ -652,15 +652,15 @@ def get_broker_rsp_key(): return 'broker-rsp-' + local_unit().replace('/', '-') -def send_request_if_needed(request): +def send_request_if_needed(request, relation='ceph'): """Send broker request if an equivalent request has not already been sent @param request: A CephBrokerRq object """ - if is_request_sent(request): + if is_request_sent(request, relation=relation): log('Request already sent but not complete, not sending new request', level=DEBUG) else: - for rid in relation_ids('ceph'): + for rid in relation_ids(relation): log('Sending request {}'.format(request.request_id), level=DEBUG) relation_set(relation_id=rid, broker_req=request.request) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index ab53a780..454b52ae 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -490,6 +490,19 @@ def relation_types(): return rel_types +@cached +def peer_relation_id(): + '''Get a peer relation id if a peer relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + @cached def relation_to_interface(relation_name): """ @@ -623,6 +636,38 @@ def unit_private_ip(): return unit_get('private-address') +@cached +def storage_get(attribute="", storage_id=""): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=""): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + class UnregisteredHookError(Exception): """Raised when an undefined hook is called""" pass @@ -788,6 +833,7 @@ def status_get(): def translate_exc(from_exc, to_exc): def inner_translate_exc1(f): + @wraps(f) def inner_translate_exc2(*args, **kwargs): try: return f(*args, **kwargs) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 29e8fee0..579871bc 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -63,33 +63,53 @@ def service_reload(service_name, restart_on_failure=False): return service_result -def service_pause(service_name, init_dir=None): +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): """Pause a system service. Stop it, and prevent it from starting again at boot.""" - if init_dir is None: - init_dir = "/etc/init" - stopped = service_stop(service_name) - # XXX: Support systemd too - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") + stopped = True + if service_running(service_name): + stopped = service_stop(service_name) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) return stopped -def service_resume(service_name, init_dir=None): +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d"): """Resume a system service. Reenable starting again at boot. Start the service""" - # XXX: Support systemd too - if init_dir is None: - init_dir = "/etc/init" - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) - started = service_start(service_name) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) + + started = service_running(service_name) + if not started: + started = service_start(service_name) return started @@ -550,7 +570,14 @@ def chdir(d): os.chdir(cur) -def chownr(path, owner, group, follow_links=True): +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """ + Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param bool follow_links: Also Chown links if True + :param bool chowntopdir: Also chown path itself if True + """ uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid if follow_links: @@ -558,6 +585,10 @@ def chownr(path, owner, group, follow_links=True): else: chown = os.lchown + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) for root, dirs, files in os.walk(path): for name in dirs + files: full = os.path.join(root, name) @@ -568,3 +599,19 @@ def chownr(path, owner, group, follow_links=True): def lchownr(path, owner, group): chownr(path, owner, group, follow_links=False) + + +def get_total_ram(): + '''The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + ''' + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() diff --git a/ceph-mon/hooks/charmhelpers/core/hugepage.py b/ceph-mon/hooks/charmhelpers/core/hugepage.py index ba4340ff..a783ad94 100644 --- a/ceph-mon/hooks/charmhelpers/core/hugepage.py +++ b/ceph-mon/hooks/charmhelpers/core/hugepage.py @@ -25,11 +25,13 @@ fstab_mount, mkdir, ) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output def hugepage_support(user, group='hugetlb', nr_hugepages=256, max_map_count=65536, mnt_point='/run/hugepages/kvm', - pagesize='2MB', mount=True): + pagesize='2MB', mount=True, set_shmmax=False): """Enable hugepages on system. Args: @@ -44,11 +46,18 @@ def hugepage_support(user, group='hugetlb', nr_hugepages=256, group_info = add_group(group) gid = group_info.gr_gid add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages sysctl_settings = { 'vm.nr_hugepages': nr_hugepages, 'vm.max_map_count': max_map_count, 'vm.hugetlb_shm_group': gid, } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) lfstab = fstab.Fstab() diff --git a/ceph-mon/hooks/charmhelpers/core/kernel.py b/ceph-mon/hooks/charmhelpers/core/kernel.py new file mode 100644 index 00000000..5dc64952 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/kernel.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = "Jorge Niedbalski " + +from charmhelpers.core.hookenv import ( + log, + INFO +) + +from subprocess import check_call, check_output +import re + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + check_call(cmd) + if persist: + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 + + +def update_initramfs(version='all'): + """Updates an initramfs image""" + return check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py index 3f677833..12d768e6 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-mon/hooks/charmhelpers/core/services/helpers.py @@ -249,16 +249,18 @@ class TemplateCallback(ManagerCallback): :param int perms: The permissions of the rendered file :param partial on_change_action: functools partial to be executed when rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader """ def __init__(self, source, target, owner='root', group='root', perms=0o444, - on_change_action=None): + on_change_action=None, template_loader=None): self.source = source self.target = target self.owner = owner self.group = group self.perms = perms self.on_change_action = on_change_action + self.template_loader = template_loader def __call__(self, manager, service_name, event_name): pre_checksum = '' @@ -269,7 +271,8 @@ def __call__(self, manager, service_name, event_name): for ctx in service.get('required_data', []): context.update(ctx) templating.render(self.source, self.target, context, - self.owner, self.group, self.perms) + self.owner, self.group, self.perms, + template_loader=self.template_loader) if self.on_change_action: if pre_checksum == host.file_hash(self.target): hookenv.log( diff --git a/ceph-mon/hooks/charmhelpers/core/strutils.py b/ceph-mon/hooks/charmhelpers/core/strutils.py index a2a784aa..7e3f9693 100644 --- a/ceph-mon/hooks/charmhelpers/core/strutils.py +++ b/ceph-mon/hooks/charmhelpers/core/strutils.py @@ -18,6 +18,7 @@ # along with charm-helpers. If not, see . import six +import re def bool_from_string(value): @@ -40,3 +41,32 @@ def bool_from_string(value): msg = "Unable to interpret string value '%s' as boolean" % (value) raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if not matches: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/ceph-mon/hooks/charmhelpers/core/templating.py b/ceph-mon/hooks/charmhelpers/core/templating.py index 45319998..239719d4 100644 --- a/ceph-mon/hooks/charmhelpers/core/templating.py +++ b/ceph-mon/hooks/charmhelpers/core/templating.py @@ -21,7 +21,7 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8'): + perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): """ Render a template. @@ -52,17 +52,24 @@ def render(source, target, context, owner='root', group='root', apt_install('python-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - loader = Environment(loader=FileSystemLoader(templates_dir)) + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) try: source = source - template = loader.get_template(source) + template = template_env.get_template(source) except exceptions.TemplateNotFound as e: hookenv.log('Could not load template %s from %s.' % (source, templates_dir), level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index cd0b783c..5f831c35 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -225,12 +225,12 @@ def apt_purge(packages, fatal=False): def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark""" + log("Marking {} as {}".format(packages, mark)) cmd = ['apt-mark', mark] if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) - log("Holding {}".format(packages)) if fatal: subprocess.check_call(cmd, universal_newlines=True) diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py index 367d6b47..d451698d 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py @@ -51,7 +51,8 @@ def _add_services(self, this_service, other_services): if 'units' not in this_service: this_service['units'] = 1 - self.d.add(this_service['name'], units=this_service['units']) + self.d.add(this_service['name'], units=this_service['units'], + constraints=this_service.get('constraints')) for svc in other_services: if 'location' in svc: @@ -64,7 +65,8 @@ def _add_services(self, this_service, other_services): if 'units' not in svc: svc['units'] = 1 - self.d.add(svc['name'], charm=branch_location, units=svc['units']) + self.d.add(svc['name'], charm=branch_location, units=svc['units'], + constraints=svc.get('constraints')) def _add_relations(self, relations): """Add all of the relations for the services.""" diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index 6770f26b..2591a9b1 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -326,7 +326,7 @@ def service_restarted(self, sentry_unit, service, filename, def service_restarted_since(self, sentry_unit, mtime, service, pgrep_full=None, sleep_time=20, - retry_count=2, retry_sleep_time=30): + retry_count=30, retry_sleep_time=10): """Check if service was been started after a given time. Args: @@ -334,8 +334,9 @@ def service_restarted_since(self, sentry_unit, mtime, service, mtime (float): The epoch time to check against service (string): service name to look for in process table pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Seconds to sleep before looking for process - retry_count (int): If service is not found, how many times to retry + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry Returns: bool: True if service found and its start time it newer than mtime, @@ -359,11 +360,12 @@ def service_restarted_since(self, sentry_unit, mtime, service, pgrep_full) self.log.debug('Attempt {} to get {} proc start time on {} ' 'OK'.format(tries, service, unit_name)) - except IOError: + except IOError as e: # NOTE(beisner) - race avoidance, proc may not exist yet. # https://bugs.launchpad.net/charm-helpers/+bug/1474030 self.log.debug('Attempt {} to get {} proc start time on {} ' - 'failed'.format(tries, service, unit_name)) + 'failed\n{}'.format(tries, service, + unit_name, e)) time.sleep(retry_sleep_time) tries += 1 @@ -383,35 +385,62 @@ def service_restarted_since(self, sentry_unit, mtime, service, return False def config_updated_since(self, sentry_unit, filename, mtime, - sleep_time=20): + sleep_time=20, retry_count=30, + retry_sleep_time=10): """Check if file was modified after a given time. Args: sentry_unit (sentry): The sentry unit to check the file mtime on filename (string): The file to check mtime of mtime (float): The epoch time to check against - sleep_time (int): Seconds to sleep before looking for process + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry Returns: bool: True if file was modified more recently than mtime, False if - file was modified before mtime, + file was modified before mtime, or if file not found. """ - self.log.debug('Checking %s updated since %s' % (filename, mtime)) + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s updated since %s on ' + '%s' % (filename, mtime, unit_name)) time.sleep(sleep_time) - file_mtime = self._get_file_mtime(sentry_unit, filename) + file_mtime = None + tries = 0 + while tries <= retry_count and not file_mtime: + try: + file_mtime = self._get_file_mtime(sentry_unit, filename) + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'OK'.format(tries, filename, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, file may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'failed\n{}'.format(tries, filename, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not file_mtime: + self.log.warn('Could not determine file mtime, assuming ' + 'file does not exist') + return False + if file_mtime >= mtime: self.log.debug('File mtime is newer than provided mtime ' - '(%s >= %s)' % (file_mtime, mtime)) + '(%s >= %s) on %s (OK)' % (file_mtime, + mtime, unit_name)) return True else: - self.log.warn('File mtime %s is older than provided mtime %s' - % (file_mtime, mtime)) + self.log.warn('File mtime is older than provided mtime' + '(%s < on %s) on %s' % (file_mtime, + mtime, unit_name)) return False def validate_service_config_changed(self, sentry_unit, mtime, service, filename, pgrep_full=None, - sleep_time=20, retry_count=2, - retry_sleep_time=30): + sleep_time=20, retry_count=30, + retry_sleep_time=10): """Check service and file were updated after mtime Args: @@ -456,7 +485,9 @@ def validate_service_config_changed(self, sentry_unit, mtime, service, sentry_unit, filename, mtime, - sleep_time=0) + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) return service_restart and config_update @@ -776,3 +807,12 @@ def wait_on_action(self, action_id, _check_output=subprocess.check_output): output = _check_output(command, universal_newlines=True) data = json.loads(output) return data.get(u"status") == "completed" + + def status_get(self, unit): + """Return the current service status of this unit.""" + raw_status, return_code = unit.run( + "status-get --format=json --include-data") + if return_code != 0: + return ("unknown", "") + status = json.loads(raw_status) + return (status["status"], status["message"]) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 63155d8d..0506491b 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -14,12 +14,18 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import logging +import re +import sys import six from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +DEBUG = logging.DEBUG +ERROR = logging.ERROR + class OpenStackAmuletDeployment(AmuletDeployment): """OpenStack amulet deployment. @@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment): that is specifically for use by OpenStack charms. """ - def __init__(self, series=None, openstack=None, source=None, stable=True): + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): """Initialize the deployment environment.""" super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') self.openstack = openstack self.source = source self.stable = stable @@ -38,6 +47,22 @@ def __init__(self, series=None, openstack=None, source=None, stable=True): # out. self.current_next = "trusty" + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + def _determine_branch_locations(self, other_services): """Determine the branch locations for the other services. @@ -45,6 +70,8 @@ def _determine_branch_locations(self, other_services): stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" + self.log.info('OpenStackAmuletDeployment: determine branch locations') + # Charms outside the lp:~openstack-charmers namespace base_charms = ['mysql', 'mongodb', 'nrpe'] @@ -58,19 +85,17 @@ def _determine_branch_locations(self, other_services): else: base_series = self.current_next - if self.stable: - for svc in other_services: - if svc['name'] in force_series_current: - base_series = self.current_next - + for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if self.stable: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, svc['name']) - else: - for svc in other_services: - if svc['name'] in force_series_current: - base_series = self.current_next - + else: if svc['name'] in base_charms: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, @@ -79,10 +104,13 @@ def _determine_branch_locations(self, other_services): temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, svc['name']) + return other_services def _add_services(self, this_service, other_services): """Add services to the deployment and set openstack-origin/source.""" + self.log.info('OpenStackAmuletDeployment: adding services') + other_services = self._determine_branch_locations(other_services) super(OpenStackAmuletDeployment, self)._add_services(this_service, @@ -96,7 +124,8 @@ def _add_services(self, this_service, other_services): 'ceph-osd', 'ceph-radosgw'] # Charms which can not use openstack-origin, ie. many subordinates - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] if self.openstack: for svc in services: @@ -112,9 +141,79 @@ def _add_services(self, this_service, other_services): def _configure_services(self, configs): """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') for service, config in six.iteritems(configs): self.d.configure(service, config) + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=1800): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + self.log.info('Waiting for extended status on units...') + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') + def _get_openstack_release(self): """Get openstack release. diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index b1397419..388b60e6 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -18,6 +18,7 @@ import json import logging import os +import re import six import time import urllib @@ -604,7 +605,22 @@ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): '{}'.format(sample_type, samples)) return None -# rabbitmq/amqp specific helpers: + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + def add_rmq_test_user(self, sentry_units, username="testuser1", password="changeme"): """Add a test user via the first rmq juju unit, check connection as @@ -752,7 +768,7 @@ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): self.log.debug('SSL is enabled @{}:{} ' '({})'.format(host, port, unit_name)) return True - elif not port and not conf_ssl: + elif not conf_ssl: self.log.debug('SSL not enabled @{}:{} ' '({})'.format(host, port, unit_name)) return False @@ -805,7 +821,10 @@ def configure_rmq_ssl_on(self, sentry_units, deployment, if port: config['ssl_port'] = port - deployment.configure('rabbitmq-server', config) + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) # Confirm tries = 0 @@ -832,7 +851,10 @@ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): # Disable RMQ SSL config = {'ssl': 'off'} - deployment.configure('rabbitmq-server', config) + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) # Confirm tries = 0 From 35b7d542a8fd1fd63d4c6e073bbbbe6f912da54b Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 17:58:38 +0000 Subject: [PATCH 0903/2699] add pool create pg_num override support --- ceph-proxy/hooks/ceph_broker.py | 11 ++++++++++- ceph-proxy/unit_tests/test_ceph_broker.py | 21 +++++++++++++++++++-- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index e162dcb2..e9d5e5a6 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -83,12 +83,21 @@ def process_requests_v1(reqs): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} + # Mandatory params pool = params['pool'] replicas = params['replicas'] + + # Optional params + pg_num = req.get('pg_num') + if pg_num: + # Ensure string + pg_num = str(pg_num) + if not pool_exists(service=svc, name=pool): log("Creating pool '%s' (replicas=%s)" % (pool, replicas), level=INFO) - create_pool(service=svc, name=pool, replicas=replicas) + create_pool(service=svc, name=pool, replicas=replicas, + pg_num=pg_num) else: log("Pool '%s' already exists - skipping create" % (pool), level=DEBUG) diff --git a/ceph-proxy/unit_tests/test_ceph_broker.py b/ceph-proxy/unit_tests/test_ceph_broker.py index fc698174..93ca98ec 100644 --- a/ceph-proxy/unit_tests/test_ceph_broker.py +++ b/ceph-proxy/unit_tests/test_ceph_broker.py @@ -53,7 +53,24 @@ def test_process_requests_create_pool(self, mock_log, mock_pool_exists, rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') mock_create_pool.assert_called_with(service='admin', name='foo', - replicas=3) + replicas=3, pg_num=None) + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.pool_exists') + @mock.patch('ceph_broker.log') + def test_process_requests_create_pool_w_pg_num(self, mock_log, + mock_pool_exists, + mock_create_pool): + mock_pool_exists.return_value = False + reqs = json.dumps({'api-version': 1, + 'ops': [{'op': 'create-pool', 'name': + 'foo', 'replicas': 3, + 'pg_num': 100}]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_called_with(service='admin', name='foo') + mock_create_pool.assert_called_with(service='admin', name='foo', + replicas=3, pg_num='100') self.assertEqual(json.loads(rc), {'exit-code': 0}) @mock.patch('ceph_broker.create_pool') @@ -84,7 +101,7 @@ def test_process_requests_create_pool_rid(self, mock_log, mock_pool_exists, rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') mock_create_pool.assert_called_with(service='admin', name='foo', - replicas=3) + replicas=3, pg_num=None) self.assertEqual(json.loads(rc)['exit-code'], 0) self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') From 7e04a163e5a487ac2d27851e266baed12e5ddf05 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 17:58:38 +0000 Subject: [PATCH 0904/2699] add pool create pg_num override support --- ceph-mon/hooks/ceph_broker.py | 11 ++++++++++- ceph-mon/unit_tests/test_ceph_broker.py | 21 +++++++++++++++++++-- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index e162dcb2..e9d5e5a6 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -83,12 +83,21 @@ def process_requests_v1(reqs): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} + # Mandatory params pool = params['pool'] replicas = params['replicas'] + + # Optional params + pg_num = req.get('pg_num') + if pg_num: + # Ensure string + pg_num = str(pg_num) + if not pool_exists(service=svc, name=pool): log("Creating pool '%s' (replicas=%s)" % (pool, replicas), level=INFO) - create_pool(service=svc, name=pool, replicas=replicas) + create_pool(service=svc, name=pool, replicas=replicas, + pg_num=pg_num) else: log("Pool '%s' already exists - skipping create" % (pool), level=DEBUG) diff --git a/ceph-mon/unit_tests/test_ceph_broker.py b/ceph-mon/unit_tests/test_ceph_broker.py index fc698174..93ca98ec 100644 --- a/ceph-mon/unit_tests/test_ceph_broker.py +++ b/ceph-mon/unit_tests/test_ceph_broker.py @@ -53,7 +53,24 @@ def test_process_requests_create_pool(self, mock_log, mock_pool_exists, rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') mock_create_pool.assert_called_with(service='admin', name='foo', - replicas=3) + replicas=3, pg_num=None) + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.pool_exists') + @mock.patch('ceph_broker.log') + def test_process_requests_create_pool_w_pg_num(self, mock_log, + mock_pool_exists, + mock_create_pool): + mock_pool_exists.return_value = False + reqs = json.dumps({'api-version': 1, + 'ops': [{'op': 'create-pool', 'name': + 'foo', 'replicas': 3, + 'pg_num': 100}]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_called_with(service='admin', name='foo') + mock_create_pool.assert_called_with(service='admin', name='foo', + replicas=3, pg_num='100') self.assertEqual(json.loads(rc), {'exit-code': 0}) @mock.patch('ceph_broker.create_pool') @@ -84,7 +101,7 @@ def test_process_requests_create_pool_rid(self, mock_log, mock_pool_exists, rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') mock_create_pool.assert_called_with(service='admin', name='foo', - replicas=3) + replicas=3, pg_num=None) self.assertEqual(json.loads(rc)['exit-code'], 0) self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') From ede530c3ab4e6f11e4939df989cf94ed1c4801fd Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 18:02:46 +0000 Subject: [PATCH 0905/2699] fix u/t --- ceph-radosgw/unit_tests/test_hooks.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index b507320b..2651065f 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -183,7 +183,8 @@ def test_config_changed(self): self.assertTrue(_apache_modules.called) self.assertTrue(_apache_reload.called) - @patch.object(ceph_hooks, 'is_request_complete', lambda *args: True) + @patch.object(ceph_hooks, 'is_request_complete', + lambda *args, **kwargs: True) def test_mon_relation(self): _ceph = self.patch('ceph') _restart = self.patch('restart') @@ -193,7 +194,8 @@ def test_mon_relation(self): _ceph.import_radosgw_key.assert_called_with('seckey') self.CONFIGS.write_all.assert_called_with() - @patch.object(ceph_hooks, 'is_request_complete', lambda *args: True) + @patch.object(ceph_hooks, 'is_request_complete', + lambda *args, **kwargs: True) def test_mon_relation_nokey(self): _ceph = self.patch('ceph') _restart = self.patch('restart') @@ -204,7 +206,8 @@ def test_mon_relation_nokey(self): self.CONFIGS.write_all.assert_called_with() @patch.object(ceph_hooks, 'send_request_if_needed') - @patch.object(ceph_hooks, 'is_request_complete', lambda *args: False) + @patch.object(ceph_hooks, 'is_request_complete', + lambda *args, **kwargs: False) def test_mon_relation_send_broker_request(self, mock_send_request_if_needed): _ceph = self.patch('ceph') From f88fc198be1053c0aec159561b1ae28c39da048e Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 18:05:59 +0000 Subject: [PATCH 0906/2699] add pre-config of lighter pools --- ceph-radosgw/hooks/ceph.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index a42e6c2f..911810b8 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -245,15 +245,11 @@ def get_create_rgw_pools_rq(): for pool in heavy: rq.add_op_create_pool(name=pool, replica_count=replicas) - # TODO: we want these pools to have a smaller pg_num/pgp_num than the - # others but do not currently have the ability to override this with the - # broker api (LP: #1517846). Right now omit this so that the remaining - # pools are created when the RGW is installed. - # - # Buckets not expected to contain too much data - #light = ['.rgw', '.rgw.buckets.index', '.rgw.control', '.rgw.gc', - # '.rgw.root'] - #for pool in light: - # rq.add_op_create_pool(name=pool, replica_count=replicas) + # NOTE: we want these pools to have a smaller pg_num/pgp_num than the + # others since they are not expected to contain as much data + light = ['.rgw', '.rgw.buckets.index', '.rgw.control', '.rgw.gc', + '.rgw.root'] + for pool in light: + rq.add_op_create_pool(name=pool, replica_count=replicas, pg_num=100) return rq From 2205c50556ad2d96278d0f79cf8c3287f9f9341d Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 18:14:14 +0000 Subject: [PATCH 0907/2699] add pool create pg_num override support --- ceph-proxy/hooks/ceph_broker.py | 6 ++++++ ceph-proxy/unit_tests/test_ceph_broker.py | 25 ++++++++++++++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index e9d5e5a6..bd23d435 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -12,6 +12,7 @@ ) from charmhelpers.contrib.storage.linux.ceph import ( create_pool, + get_osds, pool_exists, ) @@ -90,6 +91,11 @@ def process_requests_v1(reqs): # Optional params pg_num = req.get('pg_num') if pg_num: + # Cap pg_num to max allowed just in case. + osds = get_osds(svc) + if osds: + pg_num = min(pg_num, (len(osds) * 100 // replicas)) + # Ensure string pg_num = str(pg_num) diff --git a/ceph-proxy/unit_tests/test_ceph_broker.py b/ceph-proxy/unit_tests/test_ceph_broker.py index 93ca98ec..8f08cdc7 100644 --- a/ceph-proxy/unit_tests/test_ceph_broker.py +++ b/ceph-proxy/unit_tests/test_ceph_broker.py @@ -56,12 +56,15 @@ def test_process_requests_create_pool(self, mock_log, mock_pool_exists, replicas=3, pg_num=None) self.assertEqual(json.loads(rc), {'exit-code': 0}) + @mock.patch('ceph_broker.get_osds') @mock.patch('ceph_broker.create_pool') @mock.patch('ceph_broker.pool_exists') @mock.patch('ceph_broker.log') def test_process_requests_create_pool_w_pg_num(self, mock_log, mock_pool_exists, - mock_create_pool): + mock_create_pool, + mock_get_osds): + mock_get_osds.return_value = [0, 1, 2] mock_pool_exists.return_value = False reqs = json.dumps({'api-version': 1, 'ops': [{'op': 'create-pool', 'name': @@ -73,6 +76,26 @@ def test_process_requests_create_pool_w_pg_num(self, mock_log, replicas=3, pg_num='100') self.assertEqual(json.loads(rc), {'exit-code': 0}) + @mock.patch('ceph_broker.get_osds') + @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.pool_exists') + @mock.patch('ceph_broker.log') + def test_process_requests_create_pool_w_pg_num_capped(self, mock_log, + mock_pool_exists, + mock_create_pool, + mock_get_osds): + mock_get_osds.return_value = [0, 1, 2] + mock_pool_exists.return_value = False + reqs = json.dumps({'api-version': 1, + 'ops': [{'op': 'create-pool', 'name': + 'foo', 'replicas': 3, + 'pg_num': 300}]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_called_with(service='admin', name='foo') + mock_create_pool.assert_called_with(service='admin', name='foo', + replicas=3, pg_num='100') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + @mock.patch('ceph_broker.create_pool') @mock.patch('ceph_broker.pool_exists') @mock.patch('ceph_broker.log') From d513a5a340b9e6e977d8b3f485516fcf3f90a8b4 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 18:14:14 +0000 Subject: [PATCH 0908/2699] add pool create pg_num override support --- ceph-mon/hooks/ceph_broker.py | 6 ++++++ ceph-mon/unit_tests/test_ceph_broker.py | 25 ++++++++++++++++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index e9d5e5a6..bd23d435 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -12,6 +12,7 @@ ) from charmhelpers.contrib.storage.linux.ceph import ( create_pool, + get_osds, pool_exists, ) @@ -90,6 +91,11 @@ def process_requests_v1(reqs): # Optional params pg_num = req.get('pg_num') if pg_num: + # Cap pg_num to max allowed just in case. + osds = get_osds(svc) + if osds: + pg_num = min(pg_num, (len(osds) * 100 // replicas)) + # Ensure string pg_num = str(pg_num) diff --git a/ceph-mon/unit_tests/test_ceph_broker.py b/ceph-mon/unit_tests/test_ceph_broker.py index 93ca98ec..8f08cdc7 100644 --- a/ceph-mon/unit_tests/test_ceph_broker.py +++ b/ceph-mon/unit_tests/test_ceph_broker.py @@ -56,12 +56,15 @@ def test_process_requests_create_pool(self, mock_log, mock_pool_exists, replicas=3, pg_num=None) self.assertEqual(json.loads(rc), {'exit-code': 0}) + @mock.patch('ceph_broker.get_osds') @mock.patch('ceph_broker.create_pool') @mock.patch('ceph_broker.pool_exists') @mock.patch('ceph_broker.log') def test_process_requests_create_pool_w_pg_num(self, mock_log, mock_pool_exists, - mock_create_pool): + mock_create_pool, + mock_get_osds): + mock_get_osds.return_value = [0, 1, 2] mock_pool_exists.return_value = False reqs = json.dumps({'api-version': 1, 'ops': [{'op': 'create-pool', 'name': @@ -73,6 +76,26 @@ def test_process_requests_create_pool_w_pg_num(self, mock_log, replicas=3, pg_num='100') self.assertEqual(json.loads(rc), {'exit-code': 0}) + @mock.patch('ceph_broker.get_osds') + @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.pool_exists') + @mock.patch('ceph_broker.log') + def test_process_requests_create_pool_w_pg_num_capped(self, mock_log, + mock_pool_exists, + mock_create_pool, + mock_get_osds): + mock_get_osds.return_value = [0, 1, 2] + mock_pool_exists.return_value = False + reqs = json.dumps({'api-version': 1, + 'ops': [{'op': 'create-pool', 'name': + 'foo', 'replicas': 3, + 'pg_num': 300}]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_called_with(service='admin', name='foo') + mock_create_pool.assert_called_with(service='admin', name='foo', + replicas=3, pg_num='100') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + @mock.patch('ceph_broker.create_pool') @mock.patch('ceph_broker.pool_exists') @mock.patch('ceph_broker.log') From d6b0998b3752cbc31b61e524da04f9641b47f60b Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 18:48:34 +0000 Subject: [PATCH 0909/2699] sync ch from lp:~hopem/charm-helpers/lp1517846.2 --- .../contrib/storage/linux/ceph.py | 52 ++++++++++++------- 1 file changed, 34 insertions(+), 18 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index cd737bbb..1235389e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -26,6 +26,7 @@ import os import shutil +import six import json import time import uuid @@ -125,29 +126,37 @@ def get_osds(service): return None -def create_pool(service, name, replicas=3): +def update_pool(client, pool, settings): + cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] + for k, v in six.iteritems(settings): + cmd.append(k) + cmd.append(v) + + check_call(cmd) + + +def create_pool(service, name, replicas=3, pg_num=None): """Create a new RADOS pool.""" if pool_exists(service, name): log("Ceph pool {} already exists, skipping creation".format(name), level=WARNING) return - # Calculate the number of placement groups based - # on upstream recommended best practices. - osds = get_osds(service) - if osds: - pgnum = (len(osds) * 100 // replicas) - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - pgnum = 200 + if not pg_num: + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pg_num = (len(osds) * 100 // replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pg_num = 200 - cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] check_call(cmd) - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', - str(replicas)] - check_call(cmd) + update_pool(service, name, settings={'size': str(replicas)}) def delete_pool(service, name): @@ -413,9 +422,16 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] - def add_op_create_pool(self, name, replica_count=3): + def add_op_create_pool(self, name, replica_count=3, pg_num=None): + """Adds an operation to create a pool. + + @param pg_num setting: optional setting. If not provided, this value + will be calculated by the broker based on how many OSDs are in the + cluster at the time of creation. Note that, if provided, this value + will be capped at the current available maximum. + """ self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count}) + 'replicas': replica_count, 'pg_num': pg_num}) def set_ops(self, ops): """Set request ops to provided value. @@ -433,8 +449,8 @@ def request(self): def _ops_equal(self, other): if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in ['replicas', 'name', 'op']: - if self.ops[req_no][key] != other.ops[req_no][key]: + for key in ['replicas', 'name', 'op', 'pg_num']: + if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: return False From ec8740bee8ee9e0cad2a688295107b5818c06a1d Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 19:14:57 +0000 Subject: [PATCH 0910/2699] more sync --- .../charmhelpers/contrib/storage/linux/ceph.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index bfed4aaa..1235389e 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -422,9 +422,16 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] - def add_op_create_pool(self, name, replica_count=3): + def add_op_create_pool(self, name, replica_count=3, pg_num=None): + """Adds an operation to create a pool. + + @param pg_num setting: optional setting. If not provided, this value + will be calculated by the broker based on how many OSDs are in the + cluster at the time of creation. Note that, if provided, this value + will be capped at the current available maximum. + """ self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count}) + 'replicas': replica_count, 'pg_num': pg_num}) def set_ops(self, ops): """Set request ops to provided value. @@ -442,8 +449,8 @@ def request(self): def _ops_equal(self, other): if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in ['replicas', 'name', 'op']: - if self.ops[req_no][key] != other.ops[req_no][key]: + for key in ['replicas', 'name', 'op', 'pg_num']: + if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: return False From 0d0d964382c70ebdc440fd5b062af00b323ee89b Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 19:14:57 +0000 Subject: [PATCH 0911/2699] more sync --- .../charmhelpers/contrib/storage/linux/ceph.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index bfed4aaa..1235389e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -422,9 +422,16 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] - def add_op_create_pool(self, name, replica_count=3): + def add_op_create_pool(self, name, replica_count=3, pg_num=None): + """Adds an operation to create a pool. + + @param pg_num setting: optional setting. If not provided, this value + will be calculated by the broker based on how many OSDs are in the + cluster at the time of creation. Note that, if provided, this value + will be capped at the current available maximum. + """ self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count}) + 'replicas': replica_count, 'pg_num': pg_num}) def set_ops(self, ops): """Set request ops to provided value. @@ -442,8 +449,8 @@ def request(self): def _ops_equal(self, other): if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in ['replicas', 'name', 'op']: - if self.ops[req_no][key] != other.ops[req_no][key]: + for key in ['replicas', 'name', 'op', 'pg_num']: + if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: return False From 2f8ccc248318ec23fd6b4c0b012fca5796e1d5ac Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 20:01:19 +0000 Subject: [PATCH 0912/2699] added full list of rgw pools --- ceph-radosgw/hooks/ceph.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index 911810b8..87395bbc 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -234,6 +234,10 @@ def get_create_rgw_pools_rq(): When RGW creates its own pools it will create them with non-optimal settings (LP: #1476749). + + NOTE: see http://docs.ceph.com/docs/master/radosgw/config-ref/#pools and + http://docs.ceph.com/docs/master/radosgw/config/#create-pools for + list of supported/required pools. """ rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') @@ -247,8 +251,20 @@ def get_create_rgw_pools_rq(): # NOTE: we want these pools to have a smaller pg_num/pgp_num than the # others since they are not expected to contain as much data - light = ['.rgw', '.rgw.buckets.index', '.rgw.control', '.rgw.gc', - '.rgw.root'] + light = ['.rgw', + '.rgw.root', + '.rgw.control', + '.rgw.gc', + '.rgw.buckets', + '.rgw.buckets.index', + '.rgw.buckets.extra', + '.log', + '.intent-log' + '.usage', + '.users' + '.users.email' + '.users.swift' + '.users.uid'] for pool in light: rq.add_op_create_pool(name=pool, replica_count=replicas, pg_num=100) From 04a6c77ee7d752eb18a7e5c2cecc481676a7170a Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 19 Nov 2015 20:18:10 +0000 Subject: [PATCH 0913/2699] make lightweight pool pg_num configurable --- ceph-radosgw/config.yaml | 14 ++++++++++++++ ceph-radosgw/hooks/ceph.py | 3 ++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 325ccfd8..ea0bdd17 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -107,3 +107,17 @@ options: it stores within RGW pools. Note that once the RGW pools have been created, changing this value will not have any effect (although it can be changed in ceph by manually configuring your ceph cluster). + rgw-lightweight-pool-pg-num: + type: int + default: 64 + description: | + When the Rados Gatway is installed it, by default, creates pools with + pg_num 8 which, in the majority of cases is suboptimal. A few rgw pools + tend to carry more data than others e.g. .rgw.buckets tends to be larger + than most. So, for pools with greater requirements than others the charm + will apply the optimal value i.e. corresponding to the number of OSDs + up+in the cluster at the time the pool is created. For others it will use + this value which can be altered depending on how big you cluster is. Note + that once a pool has been created, changes to this setting will be + ignored. + diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index 87395bbc..335716c2 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -265,7 +265,8 @@ def get_create_rgw_pools_rq(): '.users.email' '.users.swift' '.users.uid'] + pg_num = config('rgw-lightweight-pool-pg-num') for pool in light: - rq.add_op_create_pool(name=pool, replica_count=replicas, pg_num=100) + rq.add_op_create_pool(name=pool, replica_count=replicas, pg_num=pg_num) return rq From 7ffe13e4dd3bd04ecf11eedcc8519ccd951394da Mon Sep 17 00:00:00 2001 From: Andrew Wilkins Date: Mon, 23 Nov 2015 17:12:54 +0800 Subject: [PATCH 0914/2699] Sync charmhelpers to update storage helpers --- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 4 ++-- ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py | 10 +++++++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 454b52ae..e70f1ca5 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -637,7 +637,7 @@ def unit_private_ip(): @cached -def storage_get(attribute="", storage_id=""): +def storage_get(attribute=None, storage_id=None): """Get storage attributes""" _args = ['storage-get', '--format=json'] if storage_id: @@ -651,7 +651,7 @@ def storage_get(attribute="", storage_id=""): @cached -def storage_list(storage_name=""): +def storage_list(storage_name=None): """List the storage IDs for the unit""" _args = ['storage-list', '--format=json'] if storage_name: diff --git a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py index 3531315a..8ec69692 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py @@ -64,11 +64,15 @@ def branch(self, source, dest): except Exception as e: raise e - def install(self, source): + def install(self, source, dest=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: From 3fb67101c72fc89ae3229b70dd6548a3e884fcae Mon Sep 17 00:00:00 2001 From: Andrew Wilkins Date: Mon, 23 Nov 2015 17:12:54 +0800 Subject: [PATCH 0915/2699] Sync charmhelpers to update storage helpers --- ceph-mon/hooks/charmhelpers/core/hookenv.py | 4 ++-- ceph-mon/hooks/charmhelpers/fetch/bzrurl.py | 10 +++++++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 454b52ae..e70f1ca5 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -637,7 +637,7 @@ def unit_private_ip(): @cached -def storage_get(attribute="", storage_id=""): +def storage_get(attribute=None, storage_id=None): """Get storage attributes""" _args = ['storage-get', '--format=json'] if storage_id: @@ -651,7 +651,7 @@ def storage_get(attribute="", storage_id=""): @cached -def storage_list(storage_name=""): +def storage_list(storage_name=None): """List the storage IDs for the unit""" _args = ['storage-list', '--format=json'] if storage_name: diff --git a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py index 3531315a..8ec69692 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py @@ -64,11 +64,15 @@ def branch(self, source, dest): except Exception as e: raise e - def install(self, source): + def install(self, source, dest=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: From d171d2e981fe30558ef5805bc63912a8134f89bb Mon Sep 17 00:00:00 2001 From: Andrew Wilkins Date: Mon, 23 Nov 2015 17:13:18 +0800 Subject: [PATCH 0916/2699] Add osd-devices and osd-journal block storage Add the "osd-devices" block-type storage, with minimum of 0 and no maximum. Volumes assigned to a unit will be added as OSDs. Also, add the "osd-journal" block-type storage, with minimum of 0 and maximum of 1. The osd-journal storage, if supplied, will be used in favour of osd-journal configuration if supplied. For now, handling of osd-journal is static, just as the configuration method was before. Removing or adding the journal after deployment is not currently supported. This is possible with further changes, but requires stopping Ceph and migrating the existing journal, and so out of scope here. --- ceph-proxy/config.yaml | 3 +- ceph-proxy/hooks/ceph_hooks.py | 42 +++++++++++++++---- ceph-proxy/hooks/osd-devices-storage-attached | 1 + .../hooks/osd-devices-storage-detaching | 1 + ceph-proxy/metadata.yaml | 9 ++++ 5 files changed, 48 insertions(+), 8 deletions(-) create mode 120000 ceph-proxy/hooks/osd-devices-storage-attached create mode 120000 ceph-proxy/hooks/osd-devices-storage-detaching diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index b1880de1..891c7400 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -40,7 +40,8 @@ options: The devices to format and set up as osd volumes. . These devices are the range of devices that will be checked for and - used across all service units. + used across all service units, in addition to any volumes attached + via the --storage flag during deployment. . For ceph >= 0.56.6 these can also be directories instead of devices - the charm assumes anything not starting with /dev is a directory instead. diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 9d733637..8a041cbe 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -29,6 +29,8 @@ relations_of_type, status_set, local_unit, + storage_get, + storage_list ) from charmhelpers.core.host import ( service_restart, @@ -145,7 +147,7 @@ def config_changed(): if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): umount(e_mountpoint) - osd_journal = config('osd-journal') + osd_journal = get_osd_journal() if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) and os.path.exists(osd_journal)): ceph.zap_disk(osd_journal) @@ -158,14 +160,34 @@ def config_changed(): ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() + storage_changed() + + if relations_of_type('nrpe-external-master'): + update_nrpe_config() + + +@hooks.hook('osd-devices-storage-attached', 'osd-devices-storage-detaching') +def storage_changed(): if ceph.is_bootstrapped(): for dev in get_devices(): - ceph.osdize(dev, config('osd-format'), config('osd-journal'), + ceph.osdize(dev, config('osd-format'), get_osd_journal(), reformat_osd(), config('ignore-device-errors')) ceph.start_osds(get_devices()) - if relations_of_type('nrpe-external-master'): - update_nrpe_config() + +def get_osd_journal(): + ''' + Returns the block device path to use for the OSD journal, if any. + + If there is an osd-journal storage instance attached, it will be + used as the journal. Otherwise, the osd-journal configuration will + be returned. + ''' + storage_ids = storage_list('osd-journal') + if storage_ids: + # There can be at most one osd-journal storage instance. + return storage_get('location', storage_ids[0]) + return config('osd-journal') def get_mon_hosts(): @@ -207,9 +229,15 @@ def reformat_osd(): def get_devices(): if config('osd-devices'): - return config('osd-devices').split(' ') + devices = config('osd-devices').split(' ') else: - return [] + devices = [] + # List storage instances for the 'osd-devices' + # store declared for this charm too, and add + # their block device paths to the list. + storage_ids = storage_list('osd-devices') + devices.extend((storage_get('location', s) for s in storage_ids)) + return devices @hooks.hook('mon-relation-joined') @@ -231,7 +259,7 @@ def mon_relation(): ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() for dev in get_devices(): - ceph.osdize(dev, config('osd-format'), config('osd-journal'), + ceph.osdize(dev, config('osd-format'), get_osd_journal(), reformat_osd(), config('ignore-device-errors')) ceph.start_osds(get_devices()) notify_osds() diff --git a/ceph-proxy/hooks/osd-devices-storage-attached b/ceph-proxy/hooks/osd-devices-storage-attached new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-proxy/hooks/osd-devices-storage-attached @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/osd-devices-storage-detaching b/ceph-proxy/hooks/osd-devices-storage-detaching new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-proxy/hooks/osd-devices-storage-detaching @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 3c5f9262..b8843305 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -25,3 +25,12 @@ provides: nrpe-external-master: interface: nrpe-external-master scope: container +storage: + osd-devices: + type: block + multiple: + range: 0- + osd-journal: + type: block + multiple: + range: 0-1 From 3c12bab15dc9db03406aaeab0fb88f18f60a3b94 Mon Sep 17 00:00:00 2001 From: Andrew Wilkins Date: Mon, 23 Nov 2015 17:13:18 +0800 Subject: [PATCH 0917/2699] Add osd-devices and osd-journal block storage Add the "osd-devices" block-type storage, with minimum of 0 and no maximum. Volumes assigned to a unit will be added as OSDs. Also, add the "osd-journal" block-type storage, with minimum of 0 and maximum of 1. The osd-journal storage, if supplied, will be used in favour of osd-journal configuration if supplied. For now, handling of osd-journal is static, just as the configuration method was before. Removing or adding the journal after deployment is not currently supported. This is possible with further changes, but requires stopping Ceph and migrating the existing journal, and so out of scope here. --- ceph-mon/config.yaml | 3 +- ceph-mon/hooks/ceph_hooks.py | 42 ++++++++++++++++---- ceph-mon/hooks/osd-devices-storage-attached | 1 + ceph-mon/hooks/osd-devices-storage-detaching | 1 + ceph-mon/metadata.yaml | 9 +++++ 5 files changed, 48 insertions(+), 8 deletions(-) create mode 120000 ceph-mon/hooks/osd-devices-storage-attached create mode 120000 ceph-mon/hooks/osd-devices-storage-detaching diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index b1880de1..891c7400 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -40,7 +40,8 @@ options: The devices to format and set up as osd volumes. . These devices are the range of devices that will be checked for and - used across all service units. + used across all service units, in addition to any volumes attached + via the --storage flag during deployment. . For ceph >= 0.56.6 these can also be directories instead of devices - the charm assumes anything not starting with /dev is a directory instead. diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 9d733637..8a041cbe 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -29,6 +29,8 @@ relations_of_type, status_set, local_unit, + storage_get, + storage_list ) from charmhelpers.core.host import ( service_restart, @@ -145,7 +147,7 @@ def config_changed(): if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): umount(e_mountpoint) - osd_journal = config('osd-journal') + osd_journal = get_osd_journal() if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) and os.path.exists(osd_journal)): ceph.zap_disk(osd_journal) @@ -158,14 +160,34 @@ def config_changed(): ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() + storage_changed() + + if relations_of_type('nrpe-external-master'): + update_nrpe_config() + + +@hooks.hook('osd-devices-storage-attached', 'osd-devices-storage-detaching') +def storage_changed(): if ceph.is_bootstrapped(): for dev in get_devices(): - ceph.osdize(dev, config('osd-format'), config('osd-journal'), + ceph.osdize(dev, config('osd-format'), get_osd_journal(), reformat_osd(), config('ignore-device-errors')) ceph.start_osds(get_devices()) - if relations_of_type('nrpe-external-master'): - update_nrpe_config() + +def get_osd_journal(): + ''' + Returns the block device path to use for the OSD journal, if any. + + If there is an osd-journal storage instance attached, it will be + used as the journal. Otherwise, the osd-journal configuration will + be returned. + ''' + storage_ids = storage_list('osd-journal') + if storage_ids: + # There can be at most one osd-journal storage instance. + return storage_get('location', storage_ids[0]) + return config('osd-journal') def get_mon_hosts(): @@ -207,9 +229,15 @@ def reformat_osd(): def get_devices(): if config('osd-devices'): - return config('osd-devices').split(' ') + devices = config('osd-devices').split(' ') else: - return [] + devices = [] + # List storage instances for the 'osd-devices' + # store declared for this charm too, and add + # their block device paths to the list. + storage_ids = storage_list('osd-devices') + devices.extend((storage_get('location', s) for s in storage_ids)) + return devices @hooks.hook('mon-relation-joined') @@ -231,7 +259,7 @@ def mon_relation(): ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() for dev in get_devices(): - ceph.osdize(dev, config('osd-format'), config('osd-journal'), + ceph.osdize(dev, config('osd-format'), get_osd_journal(), reformat_osd(), config('ignore-device-errors')) ceph.start_osds(get_devices()) notify_osds() diff --git a/ceph-mon/hooks/osd-devices-storage-attached b/ceph-mon/hooks/osd-devices-storage-attached new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/osd-devices-storage-attached @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/osd-devices-storage-detaching b/ceph-mon/hooks/osd-devices-storage-detaching new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/osd-devices-storage-detaching @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 3c5f9262..b8843305 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -25,3 +25,12 @@ provides: nrpe-external-master: interface: nrpe-external-master scope: container +storage: + osd-devices: + type: block + multiple: + range: 0- + osd-journal: + type: block + multiple: + range: 0-1 From dae3ebf7503ff5be03f268b8dd77be5eb8ab9f78 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 25 Nov 2015 11:09:14 +0000 Subject: [PATCH 0918/2699] [hopem,r=] Make RGW port configurable. Closes-Bug: 1517551 --- ceph-radosgw/config.yaml | 5 ++++ ceph-radosgw/hooks/ceph_radosgw_context.py | 11 ++++---- ceph-radosgw/hooks/hooks.py | 25 +++++++++++++------ ceph-radosgw/templates/ceph.conf | 2 +- ceph-radosgw/{files => templates}/ports.conf | 2 +- ceph-radosgw/templates/rgw | 2 +- .../unit_tests/test_ceph_radosgw_context.py | 9 ++++--- ceph-radosgw/unit_tests/test_hooks.py | 2 ++ 8 files changed, 40 insertions(+), 18 deletions(-) rename ceph-radosgw/{files => templates}/ports.conf (88%) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 8a9e76eb..2860510e 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -21,6 +21,11 @@ options: description: | Key ID to import to the apt keyring to support use with arbitary source configuration from outside of Launchpad archives or PPA's. + port: + type: int + default: 80 + description: | + The port that the RADOS Gateway will listen on. # Keystone integration operator-roles: default: "Member,Admin" diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 1e079904..6adac61d 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -21,18 +21,17 @@ class HAProxyContext(context.HAProxyContext): def __call__(self): ctxt = super(HAProxyContext, self).__call__() + port = config('port') # Apache ports - a_cephradosgw_api = determine_apache_port(80, - singlenode_mode=True) + a_cephradosgw_api = determine_apache_port(port, singlenode_mode=True) port_mapping = { - 'cephradosgw-server': [ - 80, a_cephradosgw_api] + 'cephradosgw-server': [port, a_cephradosgw_api] } ctxt['cephradosgw_bind_port'] = determine_api_port( - 80, + port, singlenode_mode=True, ) @@ -99,6 +98,8 @@ def __call__(self): 'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0, 'use_syslog': str(config('use-syslog')).lower(), 'embedded_webserver': config('use-embedded-webserver'), + 'port': determine_apache_port(config('port'), + singlenode_mode=True) } if self.context_complete(ctxt): diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 8898706b..18b8a95b 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -34,6 +34,9 @@ lsb_release, restart_on_change, ) +from charmhelpers.contrib.hahelpers.cluster import ( + determine_apache_port, +) from utils import ( render_template, enable_pocket, @@ -58,6 +61,9 @@ from charmhelpers.contrib.openstack.utils import ( set_os_workload_status, ) + +APACHE_PORTS_CONF = '/etc/apache2/ports.conf' + hooks = Hooks() CONFIGS = register_configs() @@ -125,7 +131,8 @@ def install(): def emit_apacheconf(): apachecontext = { - "hostname": unit_get('private-address') + "hostname": unit_get('private-address'), + "port": determine_apache_port(config('port'), singlenode_mode=True) } site_conf = '/etc/apache2/sites-available/rgw' if is_apache_24(): @@ -152,7 +159,11 @@ def apache_reload(): def apache_ports(): - shutil.copy('files/ports.conf', '/etc/apache2/ports.conf') + portscontext = { + "port": determine_apache_port(config('port'), singlenode_mode=True) + } + with open(APACHE_PORTS_CONF, 'w') as portsconf: + portsconf.write(render_template('ports.conf', portscontext)) @hooks.hook('upgrade-charm', @@ -188,22 +199,22 @@ def mon_relation(): @hooks.hook('gateway-relation-joined') def gateway_relation(): relation_set(hostname=unit_get('private-address'), - port=80) + port=config('port')) def start(): subprocess.call(['service', 'radosgw', 'start']) - open_port(port=80) + open_port(port=config('port')) def stop(): subprocess.call(['service', 'radosgw', 'stop']) - open_port(port=80) + open_port(port=config('port')) def restart(): subprocess.call(['service', 'radosgw', 'restart']) - open_port(port=80) + open_port(port=config('port')) @hooks.hook('identity-service-relation-joined') @@ -212,7 +223,7 @@ def identity_joined(relid=None): log('Integration with keystone requires ceph >= 0.55') sys.exit(1) - port = 80 + port = config('port') admin_url = '%s:%i/swift' % (canonical_url(None, ADMIN), port) internal_url = '%s:%s/swift/v1' % \ (canonical_url(None, INTERNAL), port) diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index e1c95fce..4213d324 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -17,7 +17,7 @@ keyring = /etc/ceph/keyring.rados.gateway rgw socket path = /tmp/radosgw.sock log file = /var/log/ceph/radosgw.log {% if embedded_webserver %} -rgw frontends = civetweb port=70 +rgw frontends = civetweb port={{ port }} {% else %} # Turn off 100-continue optimization as stock mod_fastcgi # does not support it diff --git a/ceph-radosgw/files/ports.conf b/ceph-radosgw/templates/ports.conf similarity index 88% rename from ceph-radosgw/files/ports.conf rename to ceph-radosgw/templates/ports.conf index 83a775fe..9b011ec1 100644 --- a/ceph-radosgw/files/ports.conf +++ b/ceph-radosgw/templates/ports.conf @@ -1,4 +1,4 @@ -Listen 70 +Listen {{ port }} Listen 443 diff --git a/ceph-radosgw/templates/rgw b/ceph-radosgw/templates/rgw index 6101e1bc..7a3e4724 100644 --- a/ceph-radosgw/templates/rgw +++ b/ceph-radosgw/templates/rgw @@ -2,7 +2,7 @@ FastCgiExternalServer /var/www/s3gw.fcgi -socket /tmp/radosgw.sock - + ServerName {{ hostname }} ServerAdmin ceph@ubuntu.com DocumentRoot /var/www diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 4394f683..4a043418 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -165,7 +165,8 @@ def _relation_get(attr, unit, rid): 'hostname': '10.0.0.10', 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', 'old_auth': False, - 'use_syslog': 'false' + 'use_syslog': 'false', + 'port': 70 } self.assertEqual(expect, mon_ctxt()) @@ -197,7 +198,8 @@ def _relation_get(attr, unit, rid): 'hostname': '10.0.0.10', 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', 'old_auth': False, - 'use_syslog': 'false' + 'use_syslog': 'false', + 'port': 70 } self.assertEqual(expect, mon_ctxt()) @@ -221,6 +223,7 @@ def _relation_get(attr, unit, rid): 'hostname': '10.0.0.10', 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', 'old_auth': False, - 'use_syslog': 'false' + 'use_syslog': 'false', + 'port': 70 } self.assertEqual(expect, mon_ctxt()) diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 9b17cd10..0f428ef3 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -127,6 +127,7 @@ def test_emit_apacheconf(self): self.unit_get.return_value = '10.0.0.1' apachecontext = { "hostname": '10.0.0.1', + "port": 70, } vhost_file = '/etc/apache2/sites-available/rgw.conf' with patch_open() as (_open, _file): @@ -167,6 +168,7 @@ def test_apache_reload(self): ] self.subprocess.call.assert_has_calls(calls) + @patch.object(ceph_hooks, 'apache_ports', lambda *args: True) def test_config_changed(self): _install_packages = self.patch('install_packages') _emit_apacheconf = self.patch('emit_apacheconf') From ef3628e11b41fb0228a271a9a2e2d1ed1f9f0cd4 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 26 Nov 2015 16:20:26 +0000 Subject: [PATCH 0919/2699] [hopem,r=] Add debug and verbose config options. Closes-Bug: 1520236 --- ceph-radosgw/config.yaml | 4 ++++ ceph-radosgw/hooks/ceph_radosgw_context.py | 1 + ceph-radosgw/templates/ceph.conf | 1 + ceph-radosgw/unit_tests/test_ceph_radosgw_context.py | 9 ++++++--- 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 8a9e76eb..6294eb65 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -1,4 +1,8 @@ options: + loglevel: + default: 1 + type: int + description: RadosGW debug level. Max is 20. source: type: string default: diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 1e079904..5cdd82a0 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -99,6 +99,7 @@ def __call__(self): 'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0, 'use_syslog': str(config('use-syslog')).lower(), 'embedded_webserver': config('use-embedded-webserver'), + 'loglevel': config('loglevel'), } if self.context_complete(ctxt): diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index e1c95fce..e62e1e25 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -10,6 +10,7 @@ mon host = {{ mon_hosts }} log to syslog = {{ use_syslog }} err to syslog = {{ use_syslog }} clog to syslog = {{ use_syslog }} +debug rgw = {{ loglevel }}/5 [client.radosgw.gateway] host = {{ hostname }} diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 4394f683..7a58d4e9 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -165,7 +165,8 @@ def _relation_get(attr, unit, rid): 'hostname': '10.0.0.10', 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', 'old_auth': False, - 'use_syslog': 'false' + 'use_syslog': 'false', + 'loglevel': 1, } self.assertEqual(expect, mon_ctxt()) @@ -197,7 +198,8 @@ def _relation_get(attr, unit, rid): 'hostname': '10.0.0.10', 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', 'old_auth': False, - 'use_syslog': 'false' + 'use_syslog': 'false', + 'loglevel': 1, } self.assertEqual(expect, mon_ctxt()) @@ -221,6 +223,7 @@ def _relation_get(attr, unit, rid): 'hostname': '10.0.0.10', 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', 'old_auth': False, - 'use_syslog': 'false' + 'use_syslog': 'false', + 'loglevel': 1, } self.assertEqual(expect, mon_ctxt()) From b4da6851632670460663e95cbb48e2b9f877d224 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 2 Dec 2015 11:09:50 +0000 Subject: [PATCH 0920/2699] [hopem,r=] Don't disable HTTP 100-Continue if using ceph optimised packages with Apache. Closes-Bug: 1515387 --- ceph-radosgw/hooks/ceph_radosgw_context.py | 7 +++++++ ceph-radosgw/templates/ceph.conf | 2 +- ceph-radosgw/unit_tests/test_ceph_radosgw_context.py | 3 +++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 5cdd82a0..d4dd4a6d 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -102,6 +102,13 @@ def __call__(self): 'loglevel': config('loglevel'), } + if (config('use-ceph-optimised-packages') and + not config('use-embedded-webserver')): + ctxt['disable_100_continue'] = False + else: + # NOTE: currently only applied if NOT using embedded webserver + ctxt['disable_100_continue'] = True + if self.context_complete(ctxt): return ctxt diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index e62e1e25..af26e72c 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -19,7 +19,7 @@ rgw socket path = /tmp/radosgw.sock log file = /var/log/ceph/radosgw.log {% if embedded_webserver %} rgw frontends = civetweb port=70 -{% else %} +{% elif disable_100_continue %} # Turn off 100-continue optimization as stock mod_fastcgi # does not support it rgw print continue = false diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 7a58d4e9..f7e873b6 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -162,6 +162,7 @@ def _relation_get(attr, unit, rid): expect = { 'auth_supported': 'cephx', 'embedded_webserver': False, + 'disable_100_continue': True, 'hostname': '10.0.0.10', 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', 'old_auth': False, @@ -195,6 +196,7 @@ def _relation_get(attr, unit, rid): expect = { 'auth_supported': 'none', 'embedded_webserver': False, + 'disable_100_continue': True, 'hostname': '10.0.0.10', 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', 'old_auth': False, @@ -220,6 +222,7 @@ def _relation_get(attr, unit, rid): expect = { 'auth_supported': 'cephx', 'embedded_webserver': False, + 'disable_100_continue': True, 'hostname': '10.0.0.10', 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', 'old_auth': False, From 44052bcd2ec472353fe0bf474d1220651c4429bb Mon Sep 17 00:00:00 2001 From: David Ames Date: Thu, 3 Dec 2015 15:02:21 -0800 Subject: [PATCH 0921/2699] Fix lp:1522130 Add sane haproxy timeout defaults and make them configurable. --- ceph-radosgw/config.yaml | 24 +++++++++++++++++++ .../contrib/openstack/templates/haproxy.cfg | 12 ++++++++-- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 6294eb65..d2421afc 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -103,3 +103,27 @@ options: the following public endpoint for the ceph-radosgw: https://files.example.com:80/swift/v1 + haproxy-server-timeout: + type: int + default: + description: | + Server timeout configuration in ms for haproxy, used in HA + configurations. If not provided, default value of 30000ms is used. + haproxy-client-timeout: + type: int + default: + description: | + Client timeout configuration in ms for haproxy, used in HA + configurations. If not provided, default value of 30000ms is used. + haproxy-queue-timeout: + type: int + default: + description: | + Queue timeout configuration in ms for haproxy, used in HA + configurations. If not provided, default value of 5000ms is used. + haproxy-connect-timeout: + type: int + default: + description: | + Connect timeout configuration in ms for haproxy, used in HA + configurations. If not provided, default value of 5000ms is used. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index ad875f16..4a3e2d7a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -12,8 +12,16 @@ defaults option tcplog option dontlognull retries 3 - timeout queue 1000 - timeout connect 1000 +{% if haproxy_queue_timeout -%} + timeout queue {{ haproxy_queue_timeout }} +{% else -%} + timeout queue 5000 +{% endif -%} +{% if haproxy_connect_timeout -%} + timeout connect {{ haproxy_connect_timeout }} +{% else -%} + timeout connect 5000 +{% endif -%} {% if haproxy_client_timeout -%} timeout client {{ haproxy_client_timeout }} {% else -%} From 08497b1b13f73b143e090a8f25e4b872a1d6b1d2 Mon Sep 17 00:00:00 2001 From: David Ames Date: Mon, 7 Dec 2015 14:51:36 -0800 Subject: [PATCH 0922/2699] Add haproxy context for new timeout values. Fix template whitespace --- .../charmhelpers/contrib/openstack/context.py | 6 +++++ .../contrib/openstack/templates/haproxy.cfg | 25 +++++++++---------- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 49c04de0..1aee3caa 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -626,6 +626,12 @@ def __call__(self): if config('haproxy-client-timeout'): ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') + if config('haproxy-queue-timeout'): + ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout') + + if config('haproxy-connect-timeout'): + ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') + if config('prefer-ipv6'): ctxt['ipv6'] = True ctxt['local_host'] = 'ip6-localhost' diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 4a3e2d7a..8721d8a1 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -12,27 +12,26 @@ defaults option tcplog option dontlognull retries 3 -{% if haproxy_queue_timeout -%} +{%- if haproxy_queue_timeout %} timeout queue {{ haproxy_queue_timeout }} -{% else -%} +{%- else %} timeout queue 5000 -{% endif -%} -{% if haproxy_connect_timeout -%} +{%- endif %} +{%- if haproxy_connect_timeout %} timeout connect {{ haproxy_connect_timeout }} -{% else -%} +{%- else %} timeout connect 5000 -{% endif -%} -{% if haproxy_client_timeout -%} +{%- endif %} +{%- if haproxy_client_timeout %} timeout client {{ haproxy_client_timeout }} -{% else -%} +{%- else %} timeout client 30000 -{% endif -%} - -{% if haproxy_server_timeout -%} +{%- endif %} +{%- if haproxy_server_timeout %} timeout server {{ haproxy_server_timeout }} -{% else -%} +{%- else %} timeout server 30000 -{% endif -%} +{%- endif %} listen stats {{ stat_port }} mode http From b78ca2ad01e2ad024c535a771c9620d778f0b447 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 21 Dec 2015 11:59:34 +0100 Subject: [PATCH 0923/2699] * Handle multiple journals * Select least-used journal disk instead of blind RR * Add safeguard for OSD journals * Avoid zapping journals repeatedly, add check for journal partition type * Check that journal devices don't overlap with data devices --- ceph-osd/hooks/ceph.py | 44 ++++++++++++++++++++++------ ceph-osd/hooks/ceph_hooks.py | 56 +++++++++++++++++++++++++++--------- 2 files changed, 79 insertions(+), 21 deletions(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index f9448db2..269e5fbf 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -18,7 +18,7 @@ ) from charmhelpers.core.hookenv import ( log, - ERROR, WARNING, + ERROR, WARNING, DEBUG, status_set, ) from charmhelpers.contrib.storage.linux.utils import ( @@ -109,16 +109,20 @@ def add_bootstrap_hint(peer): 'btrfs' ] +CEPH_PARTITIONS = [ + '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data + '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal + ] def is_osd_disk(dev): try: info = subprocess.check_output(['sgdisk', '-i', '1', dev]) info = info.split("\n") # IGNORE:E1103 for line in info: - if line.startswith( - 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' - ): - return True + for ptype in CEPH_PARTITIONS: + sig = 'Partition GUID code: {}'.format(ptype) + if line.startswith(sig): + return True except subprocess.CalledProcessError: pass return False @@ -307,6 +311,28 @@ def update_monfs(): with open(upstart, 'w'): pass +def maybe_zap_journal(journal_dev): + if (is_osd_disk(journal_dev)): + log('Looks like {} is already an OSD data or journal, skipping.'.format( + journal_dev)) + return + zap_disk(journal_dev) + log("Zapped journal device {}".format(journal_dev)) + +def get_partitions(dev): + cmd = ['partx', '--raw', '--noheadings', dev] + try: + out = subprocess.check_output(cmd).splitlines() + log("get partitions: {}".format(out), level=DEBUG) + return out + except subprocess.CalledProcessError as e: + log("Can't get info for {0}: {1}".format(dev, e.output)) + return [] + +def find_least_used_journal(journal_devices): + usages = map(lambda a: (len(get_partitions(a)), a), journal_devices) + least = min(usages, key=lambda t: t[0]) + return least[1] def osdize(dev, osd_format, osd_journal, reformat_osd=False, ignore_errors=False): @@ -327,7 +353,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, return if (is_osd_disk(dev) and not reformat_osd): - log('Looks like {} is already an OSD, skipping.'.format(dev)) + log('Looks like {} is already an OSD data or journal, skipping.'.format(dev)) return if is_device_mounted(dev): @@ -344,8 +370,9 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, if reformat_osd: cmd.append('--zap-disk') cmd.append(dev) - if osd_journal and os.path.exists(osd_journal): - cmd.append(osd_journal) + if osd_journal: + least_used = find_least_used_journal(osd_journal) + cmd.append(least_used) else: # Just provide the device - no other options # for older versions of ceph @@ -354,6 +381,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, zap_disk(dev) try: + log("osdize cmd: {}".format(cmd)) subprocess.check_call(cmd) except subprocess.CalledProcessError as e: if ignore_errors: diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index cb3e08be..f35bb705 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -16,6 +16,7 @@ from charmhelpers.core.hookenv import ( log, ERROR, + DEBUG, config, relation_ids, related_units, @@ -103,6 +104,28 @@ def emit_cephconf(): JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' +def read_zapped_journals(): + if os.path.exists(JOURNAL_ZAPPED): + with open(JOURNAL_ZAPPED) as zapfile: + zapped = set( + filter(None, + [l.strip() for l in zapfile.readlines()])) + log("read zapped: {}".format(zapped), level=DEBUG) + return zapped + return set() + +def write_zapped_journals(journal_devs): + with open(JOURNAL_ZAPPED, 'w') as zapfile: + log("write zapped: {}".format(journal_devs), + level=DEBUG) + zapfile.write('\n'.join(sorted(list(journal_devs)))) + +def check_overlap(journaldevs, datadevs): + if not journaldevs.isdisjoint(datadevs): + msg = "Journal/data devices mustn't overlap; journal: {0}, data: {1}".format( + journaldevs, datadevs) + log(msg, level=ERROR) + raise ValueError(msg) @hooks.hook('config-changed') def config_changed(): @@ -121,20 +144,24 @@ def config_changed(): e_mountpoint = config('ephemeral-unmount') if (e_mountpoint and ceph.filesystem_mounted(e_mountpoint)): umount(e_mountpoint) - - osd_journal = config('osd-journal') - if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) and - os.path.exists(osd_journal)): - ceph.zap_disk(osd_journal) - with open(JOURNAL_ZAPPED, 'w') as zapped: - zapped.write('DONE') + prepare_disks_and_activate() + +def prepare_disks_and_activate(): + osd_journal = get_journal_devices() + check_overlap(osd_journal, set(get_devices())) + log("got journal devs: {}".format(osd_journal), level=DEBUG) + already_zapped = read_zapped_journals() + non_zapped = osd_journal - already_zapped + for journ in non_zapped: + ceph.maybe_zap_journal(journ) + write_zapped_journals(osd_journal) if ceph.is_bootstrapped(): log('ceph bootstrapped, rescanning disks') emit_cephconf() for dev in get_devices(): ceph.osdize(dev, config('osd-format'), - config('osd-journal'), config('osd-reformat'), + osd_journal, config('osd-reformat'), config('ignore-device-errors')) ceph.start_osds(get_devices()) @@ -184,6 +211,13 @@ def get_devices(): else: return [] +def get_journal_devices(): + osd_journal = config('osd-journal') + if not osd_journal: + return set() + osd_journal = [l.strip() for l in config('osd-journal').split(' ')] + osd_journal = set(filter(os.path.exists, osd_journal)) + return osd_journal @hooks.hook('mon-relation-changed', 'mon-relation-departed') @@ -193,11 +227,7 @@ def mon_relation(): log('mon has provided conf- scanning disks') emit_cephconf() ceph.import_osd_bootstrap_key(bootstrap_key) - for dev in get_devices(): - ceph.osdize(dev, config('osd-format'), - config('osd-journal'), config('osd-reformat'), - config('ignore-device-errors')) - ceph.start_osds(get_devices()) + prepare_disks_and_activate() else: log('mon cluster has not yet provided conf') From beb5efde5a9f1ea9471f1f064183dfd58571394b Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sat, 26 Dec 2015 19:21:00 -0500 Subject: [PATCH 0924/2699] [hopem,r=] Configure rados gateway nss with CA and signing certs from keystone so that it can decrypt revoked token list from keystone. Partially-Closes-Bug: 1520339 --- ceph-radosgw/hooks/ceph_radosgw_context.py | 7 ++ ceph-radosgw/hooks/hooks.py | 123 ++++++++++++++++++++- ceph-radosgw/templates/ceph.conf | 4 +- ceph-radosgw/unit_tests/test_hooks.py | 18 ++- 4 files changed, 141 insertions(+), 11 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 1e079904..839731e0 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -13,6 +13,7 @@ relation_get, unit_get, ) +import os import socket import dns.resolver @@ -101,6 +102,12 @@ def __call__(self): 'embedded_webserver': config('use-embedded-webserver'), } + certs_path = '/var/lib/ceph/nss' + paths = [os.path.join(certs_path, 'ca.pem'), + os.path.join(certs_path, 'signing_certificate.pem')] + if all([os.path.isfile(p) for p in paths]): + ctxt['cms'] = True + if self.context_complete(ctxt): return ctxt diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 8898706b..91c43bd0 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -13,14 +13,19 @@ import glob import os import ceph + from charmhelpers.core.hookenv import ( relation_get, relation_ids, + related_units, config, unit_get, open_port, relation_set, - log, ERROR, + log, + DEBUG, + WARNING, + ERROR, Hooks, UnregisteredHookError, status_set, ) @@ -43,9 +48,11 @@ REQUIRED_INTERFACES, check_optional_relations, ) - from charmhelpers.payload.execd import execd_preinstall -from charmhelpers.core.host import cmp_pkgrevno +from charmhelpers.core.host import ( + cmp_pkgrevno, + mkdir, +) from charmhelpers.contrib.network.ip import ( get_iface_for_address, @@ -89,6 +96,11 @@ def install_ceph_optimised_packages(): 'radosgw', 'ntp', 'haproxy', + 'libnss3-tools', + 'python-keystoneclient', + 'python-six', # Ensures correct version is installed for precise + # since python-keystoneclient does not pull in icehouse + # version ] APACHE_PACKAGES = [ @@ -155,6 +167,99 @@ def apache_ports(): shutil.copy('files/ports.conf', '/etc/apache2/ports.conf') +def setup_keystone_certs(unit=None, rid=None): + """ + Get CA and signing certs from Keystone used to decrypt revoked token list. + """ + import requests + try: + # Kilo and newer + from keystoneclient.exceptions import ConnectionRefused + except ImportError: + # Juno and older + from keystoneclient.exceptions import ConnectionError as \ + ConnectionRefused + + from keystoneclient.v2_0 import client + + certs_path = '/var/lib/ceph/nss' + mkdir(certs_path) + + rdata = relation_get(unit=unit, rid=rid) + auth_protocol = rdata.get('auth_protocol', 'http') + + required_keys = ['admin_token', 'auth_host', 'auth_port'] + settings = {} + for key in required_keys: + settings[key] = rdata.get(key) + + if not all(settings.values()): + log("Missing relation settings (%s) - skipping cert setup" % + (', '.join([k for k in settings.keys() if not settings[k]])), + level=DEBUG) + return + + auth_endpoint = "%s://%s:%s/v2.0" % (auth_protocol, settings['auth_host'], + settings['auth_port']) + keystone = client.Client(token=settings['admin_token'], + endpoint=auth_endpoint) + + # CA + try: + # Kilo and newer + ca_cert = keystone.certificates.get_ca_certificate() + except AttributeError: + # Juno and older + ca_cert = requests.request('GET', auth_endpoint + + '/certificates/ca').text + except ConnectionRefused: + log("Error connecting to keystone - skipping ca/signing cert setup", + level=WARNING) + return + + if ca_cert: + log("Updating ca cert from keystone", level=DEBUG) + ca = os.path.join(certs_path, 'ca.pem') + with open(ca, 'w') as fd: + fd.write(ca_cert) + + out = subprocess.check_output(['openssl', 'x509', '-in', ca, + '-pubkey']) + p = subprocess.Popen(['certutil', '-d', certs_path, '-A', '-n', 'ca', + '-t', 'TCu,Cu,Tuw'], stdin=subprocess.PIPE) + p.communicate(out) + else: + log("No ca cert available from keystone", level=DEBUG) + + # Signing cert + try: + # Kilo and newer + signing_cert = keystone.certificates.get_signing_certificate() + except AttributeError: + # Juno and older + signing_cert = requests.request('GET', auth_endpoint + + '/certificates/signing').text + except ConnectionRefused: + log("Error connecting to keystone - skipping ca/signing cert setup", + level=WARNING) + return + + if signing_cert: + log("Updating signing cert from keystone", level=DEBUG) + signing_cert_path = os.path.join(certs_path, 'signing_certificate.pem') + with open(signing_cert_path, 'w') as fd: + fd.write(signing_cert) + + out = subprocess.check_output(['openssl', 'x509', '-in', + signing_cert_path, '-pubkey']) + p = subprocess.Popen(['certutil', '-A', '-d', certs_path, '-n', + 'signing_cert', '-t', 'P,P,P'], + stdin=subprocess.PIPE) + p.communicate(out) + else: + log("No signing cert available from keystone", level=DEBUG) + + @hooks.hook('upgrade-charm', 'config-changed') @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw'], @@ -170,8 +275,9 @@ def config_changed(): apache_modules() apache_ports() apache_reload() + for r_id in relation_ids('identity-service'): - identity_joined(relid=r_id) + identity_changed(relid=r_id) @hooks.hook('mon-relation-departed', @@ -225,10 +331,17 @@ def identity_joined(relid=None): requested_roles=config('operator-roles'), relation_id=relid) + if relid: + for unit in related_units(relid): + setup_keystone_certs(unit=unit, rid=relid) + else: + setup_keystone_certs() + @hooks.hook('identity-service-relation-changed') @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) -def identity_changed(): +def identity_changed(relid=None): + identity_joined(relid) CONFIGS.write_all() restart() diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index e1c95fce..a4626d58 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -30,5 +30,7 @@ rgw keystone accepted roles = {{ user_roles }} rgw keystone token cache size = {{ cache_size }} rgw keystone revocation interval = {{ revocation_check_interval }} rgw s3 auth use keystone = true -#nss db path = /var/lib/ceph/nss +{% if cms -%} +nss db path = /var/lib/ceph/nss +{% endif %} {% endif %} diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 9b17cd10..fecc8b71 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -43,6 +43,7 @@ 'relation_ids', 'relation_set', 'relation_get', + 'related_units', 'render_template', 'shutil', 'status_set', @@ -108,9 +109,8 @@ def test_install_optimised_packages_embedded(self): self.add_source.assert_called_with('distro', 'secretkey') self.assertTrue(self.apt_update.called) self.assertFalse(_install_packages.called) - self.apt_install.assert_called_with(['radosgw', - 'ntp', - 'haproxy'], fatal=True) + self.apt_install.assert_called_with(ceph_hooks.PACKAGES, + fatal=True) self.apt_purge.assert_called_with(['libapache2-mod-fastcgi', 'apache2']) @@ -167,6 +167,7 @@ def test_apache_reload(self): ] self.subprocess.call.assert_has_calls(calls) + @patch.object(ceph_hooks, 'mkdir', lambda *args: None) def test_config_changed(self): _install_packages = self.patch('install_packages') _emit_apacheconf = self.patch('emit_apacheconf') @@ -221,12 +222,15 @@ def test_restart(self): cmd = ['service', 'radosgw', 'restart'] self.subprocess.call.assert_called_with(cmd) + @patch.object(ceph_hooks, 'setup_keystone_certs') @patch('charmhelpers.contrib.openstack.ip.service_name', lambda *args: 'ceph-radosgw') @patch('charmhelpers.contrib.openstack.ip.config') - def test_identity_joined_early_version(self, _config): + def test_identity_joined_early_version(self, _config, + mock_setup_keystone_certs): self.cmp_pkgrevno.return_value = -1 ceph_hooks.identity_joined() + self.assertTrue(mock_setup_keystone_certs.called) self.sys.exit.assert_called_with(1) @patch('charmhelpers.contrib.openstack.ip.service_name', @@ -234,6 +238,7 @@ def test_identity_joined_early_version(self, _config): @patch('charmhelpers.contrib.openstack.ip.resolve_address') @patch('charmhelpers.contrib.openstack.ip.config') def test_identity_joined(self, _config, _resolve_address): + self.related_units = ['unit/0'] self.cmp_pkgrevno.return_value = 1 _resolve_address.return_value = 'myserv' _config.side_effect = self.test_config.get @@ -257,6 +262,7 @@ def test_identity_joined(self, _config, _resolve_address): @patch('charmhelpers.contrib.openstack.ip.config') def test_identity_joined_public_name(self, _config, _unit_get, _is_clustered): + self.related_units = ['unit/0'] _config.side_effect = self.test_config.get self.test_config.set('os-public-hostname', 'files.example.com') _unit_get.return_value = 'myserv' @@ -271,11 +277,13 @@ def test_identity_joined_public_name(self, _config, _unit_get, relation_id='rid', admin_url='http://myserv:80/swift') - def test_identity_changed(self): + @patch.object(ceph_hooks, 'identity_joined') + def test_identity_changed(self, mock_identity_joined): _restart = self.patch('restart') ceph_hooks.identity_changed() self.CONFIGS.write_all.assert_called_with() self.assertTrue(_restart.called) + self.assertTrue(mock_identity_joined.called) @patch('charmhelpers.contrib.openstack.ip.is_clustered') @patch('charmhelpers.contrib.openstack.ip.unit_get') From 658142de09db1857044bb969248310a1ab354ccd Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 4 Jan 2016 16:25:48 -0500 Subject: [PATCH 0925/2699] [corey.bryant,r=trivial] Sync charm-helpers. --- .../charmhelpers/contrib/charmsupport/nrpe.py | 14 +- .../hooks/charmhelpers/contrib/network/ip.py | 42 +- .../contrib/storage/linux/ceph.py | 400 +++++++++++++++++- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 44 +- ceph-proxy/hooks/charmhelpers/core/host.py | 28 +- .../charmhelpers/core/services/helpers.py | 16 +- .../hooks/charmhelpers/core/templating.py | 20 +- .../hooks/charmhelpers/fetch/__init__.py | 10 +- .../hooks/charmhelpers/fetch/archiveurl.py | 2 +- ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py | 44 +- ceph-proxy/hooks/charmhelpers/fetch/giturl.py | 38 +- .../contrib/openstack/amulet/deployment.py | 8 +- 12 files changed, 551 insertions(+), 115 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 65b1a27e..2f246429 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -296,7 +296,7 @@ def get_nagios_hostcontext(relation_name='nrpe-external-master'): :param str relation_name: Name of relation nrpe sub joined to """ for rel in relations_of_type(relation_name): - if 'nagios_hostname' in rel: + if 'nagios_host_context' in rel: return rel['nagios_host_context'] @@ -337,11 +337,13 @@ def add_init_service_checks(nrpe, services, unit_name): upstart_init = '/etc/init/%s.conf' % svc sysv_init = '/etc/init.d/%s' % svc if os.path.exists(upstart_init): - nrpe.add_check( - shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_upstart_job %s' % svc - ) + # Don't add a check for these services from neutron-gateway + if svc not in ['ext-port', 'os-charm-phy-nic-mtu']: + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % svc cron_file = ('*/5 * * * * root ' diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 7f3b66b1..998f00c1 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -53,7 +53,7 @@ def _validate_cidr(network): def no_ip_found_error_out(network): - errmsg = ("No IP address found in network: %s" % network) + errmsg = ("No IP address found in network(s): %s" % network) raise ValueError(errmsg) @@ -61,7 +61,7 @@ def get_address_in_network(network, fallback=None, fatal=False): """Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, - '192.168.1.0/24'. + '192.168.1.0/24'. Supports multiple networks as a space-delimited list. :param fallback (str): If no address is found, return fallback. :param fatal (boolean): If no address is found, fallback is not set and fatal is True then exit(1). @@ -75,24 +75,26 @@ def get_address_in_network(network, fallback=None, fatal=False): else: return None - _validate_cidr(network) - network = netaddr.IPNetwork(network) - for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) - if network.version == 4 and netifaces.AF_INET in addresses: - addr = addresses[netifaces.AF_INET][0]['addr'] - netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - if cidr in network: - return str(cidr.ip) - - if network.version == 6 and netifaces.AF_INET6 in addresses: - for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - if cidr in network: - return str(cidr.ip) + networks = network.split() or [network] + for network in networks: + _validate_cidr(network) + network = netaddr.IPNetwork(network) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if network.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + if cidr in network: + return str(cidr.ip) + + if network.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if not addr['addr'].startswith('fe80'): + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) if fallback is not None: return fallback diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1235389e..60ae52b8 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -23,10 +23,11 @@ # James Page # Adam Gandelman # +import bisect +import six import os import shutil -import six import json import time import uuid @@ -73,35 +74,372 @@ err to syslog = {use_syslog} clog to syslog = {use_syslog} """ +# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs) +powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608] -def install(): - """Basic Ceph client installation.""" - ceph_dir = "/etc/ceph" - if not os.path.exists(ceph_dir): - os.mkdir(ceph_dir) +def validator(value, valid_type, valid_range=None): + """ + Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + Example input: + validator(value=1, + valid_type=int, + valid_range=[0, 2]) + This says I'm testing value=1. It must be an int inclusive in [0,2] + + :param value: The value to validate + :param valid_type: The type that value should be. + :param valid_range: A range of values that value can assume. + :return: + """ + assert isinstance(value, valid_type), "{} is not a {}".format( + value, + valid_type) + if valid_range is not None: + assert isinstance(valid_range, list), \ + "valid_range must be a list, was given {}".format(valid_range) + # If we're dealing with strings + if valid_type is six.string_types: + assert value in valid_range, \ + "{} is not in the list {}".format(value, valid_range) + # Integer, float should have a min and max + else: + if len(valid_range) != 2: + raise ValueError( + "Invalid valid_range list of {} for {}. " + "List must be [min,max]".format(valid_range, value)) + assert value >= valid_range[0], \ + "{} is less than minimum allowed value of {}".format( + value, valid_range[0]) + assert value <= valid_range[1], \ + "{} is greater than maximum allowed value of {}".format( + value, valid_range[1]) + + +class PoolCreationError(Exception): + """ + A custom error to inform the caller that a pool creation failed. Provides an error message + """ + def __init__(self, message): + super(PoolCreationError, self).__init__(message) - apt_install('ceph-common', fatal=True) +class Pool(object): + """ + An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. + Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). + """ + def __init__(self, service, name): + self.service = service + self.name = name + + # Create the pool if it doesn't exist already + # To be implemented by subclasses + def create(self): + pass -def rbd_exists(service, pool, rbd_img): - """Check to see if a RADOS block device exists.""" + def add_cache_tier(self, cache_pool, mode): + """ + Adds a new cache tier to an existing pool. + :param cache_pool: six.string_types. The cache tier pool name to add. + :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] + :return: None + """ + # Check the input types and values + validator(value=cache_pool, valid_type=six.string_types) + validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) + + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) + + def remove_cache_tier(self, cache_pool): + """ + Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. + :param cache_pool: six.string_types. The cache tier pool name to remove. + :return: None + """ + # read-only is easy, writeback is much harder + mode = get_cache_mode(cache_pool) + if mode == 'readonly': + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + elif mode == 'writeback': + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) + # Flush the cache and wait for it to return + check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + def get_pgs(self, pool_size): + """ + :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for + erasure coded pools + :return: int. The number of pgs to use. + """ + validator(value=pool_size, valid_type=int) + osds = get_osds(self.service) + if not osds: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + return 200 + + # Calculate based on Ceph best practices + if osds < 5: + return 128 + elif 5 < osds < 10: + return 512 + elif 10 < osds < 50: + return 4096 + else: + estimate = (osds * 100) / pool_size + # Return the next nearest power of 2 + index = bisect.bisect_right(powers_of_two, estimate) + return powers_of_two[index] + + +class ReplicatedPool(Pool): + def __init__(self, service, name, replicas=2): + super(ReplicatedPool, self).__init__(service=service, name=name) + self.replicas = replicas + + def create(self): + if not pool_exists(self.service, self.name): + # Create it + pgs = self.get_pgs(self.replicas) + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)] + try: + check_call(cmd) + except CalledProcessError: + raise + + +# Default jerasure erasure coded pool +class ErasurePool(Pool): + def __init__(self, service, name, erasure_code_profile="default"): + super(ErasurePool, self).__init__(service=service, name=name) + self.erasure_code_profile = erasure_code_profile + + def create(self): + if not pool_exists(self.service, self.name): + # Try to find the erasure profile information so we can properly size the pgs + erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile) + + # Check for errors + if erasure_profile is None: + log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile), + level=ERROR) + raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile)) + if 'k' not in erasure_profile or 'm' not in erasure_profile: + # Error + log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile), + level=ERROR) + raise PoolCreationError( + message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile)) + + pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) + # Create it + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), + 'erasure', self.erasure_code_profile] + try: + check_call(cmd) + except CalledProcessError: + raise + + """Get an existing erasure code profile if it already exists. + Returns json formatted output""" + + +def get_erasure_profile(service, name): + """ + :param service: six.string_types. The Ceph user name to run the command under + :param name: + :return: + """ try: - out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]).decode('UTF-8') + out = check_output(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name, '--format=json']) + return json.loads(out) + except (CalledProcessError, OSError, ValueError): + return None + + +def pool_set(service, pool_name, key, value): + """ + Sets a value for a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param key: six.string_types + :param value: + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value] + try: + check_call(cmd) except CalledProcessError: - return False + raise - return rbd_img in out + +def snapshot_pool(service, pool_name, snapshot_name): + """ + Snapshots a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise -def create_rbd_image(service, pool, image, sizemb): - """Create a new RADOS block device.""" - cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, - '--pool', pool] +def remove_pool_snapshot(service, pool_name, snapshot_name): + """ + Remove a snapshot from a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +# max_bytes should be an int or long +def set_pool_quota(service, pool_name, max_bytes): + """ + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param max_bytes: int or long + :return: None. Can raise CalledProcessError + """ + # Set a byte quota on a RADOS pool in ceph. + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_quota(service, pool_name): + """ + Set a byte quota on a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', + data_chunks=2, coding_chunks=1, + locality=None, durability_estimator=None): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :param erasure_plugin_name: six.string_types + :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', + 'room', 'root', 'row']) + :param data_chunks: int + :param coding_chunks: int + :param locality: int + :param durability_estimator: int + :return: None. Can raise CalledProcessError + """ + # Ensure this failure_domain is allowed by Ceph + validator(failure_domain, six.string_types, + ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) + + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, + 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks), + 'ruleset_failure_domain=' + failure_domain] + if locality is not None and durability_estimator is not None: + raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + + # Add plugin specific information + if locality is not None: + # For local erasure codes + cmd.append('l=' + str(locality)) + if durability_estimator is not None: + # For Shec erasure codes + cmd.append('c=' + str(durability_estimator)) + + if erasure_profile_exists(service, profile_name): + cmd.append('--force') + + try: + check_call(cmd) + except CalledProcessError: + raise + + +def rename_pool(service, old_name, new_name): + """ + Rename a Ceph pool from old_name to new_name + :param service: six.string_types. The Ceph user name to run the command under + :param old_name: six.string_types + :param new_name: six.string_types + :return: None + """ + validator(value=old_name, valid_type=six.string_types) + validator(value=new_name, valid_type=six.string_types) + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] check_call(cmd) +def erasure_profile_exists(service, name): + """ + Check to see if an Erasure code profile already exists. + :param service: six.string_types. The Ceph user name to run the command under + :param name: six.string_types + :return: int or None + """ + validator(value=name, valid_type=six.string_types) + try: + check_call(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name]) + return True + except CalledProcessError: + return False + + +def get_cache_mode(service, pool_name): + """ + Find the current caching mode of the pool_name given. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: int or None + """ + validator(value=service, valid_type=six.string_types) + validator(value=pool_name, valid_type=six.string_types) + out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) + try: + osd_json = json.loads(out) + for pool in osd_json['pools']: + if pool['pool_name'] == pool_name: + return pool['cache_mode'] + return None + except ValueError: + raise + + def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: @@ -126,6 +464,33 @@ def get_osds(service): return None +def install(): + """Basic Ceph client installation.""" + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + """Check to see if a RADOS block device exists.""" + try: + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]).decode('UTF-8') + except CalledProcessError: + return False + + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] + check_call(cmd) + + def update_pool(client, pool, settings): cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] for k, v in six.iteritems(settings): @@ -414,6 +779,7 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ + def __init__(self, api_version=1, request_id=None): self.api_version = api_version if request_id: diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index e70f1ca5..2dd70bc9 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -492,7 +492,7 @@ def relation_types(): @cached def peer_relation_id(): - '''Get a peer relation id if a peer relation has been joined, else None.''' + '''Get the peers relation id if a peers relation has been joined, else None.''' md = metadata() section = md.get('peers') if section: @@ -517,12 +517,12 @@ def relation_to_interface(relation_name): def relation_to_role_and_interface(relation_name): """ Given the name of a relation, return the role and the name of the interface - that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. """ _metadata = metadata() - for role in ('provides', 'requires', 'peer'): + for role in ('provides', 'requires', 'peers'): interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') if interface: return role, interface @@ -534,7 +534,7 @@ def role_and_interface_to_relations(role, interface_name): """ Given a role and interface name, return a list of relation names for the current charm that use that interface under that role (where role is one - of ``provides``, ``requires``, or ``peer``). + of ``provides``, ``requires``, or ``peers``). :returns: A list of relation names. """ @@ -555,7 +555,7 @@ def interface_to_relations(interface_name): :returns: A list of relation names. """ results = [] - for role in ('provides', 'requires', 'peer'): + for role in ('provides', 'requires', 'peers'): results.extend(role_and_interface_to_relations(role, interface_name)) return results @@ -878,6 +878,40 @@ def leader_set(settings=None, **kwargs): subprocess.check_call(cmd) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_register(ptype, klass, pid): + """ is used while a hook is running to let Juju know that a + payload has been started.""" + cmd = ['payload-register'] + for x in [ptype, klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_unregister(klass, pid): + """ is used while a hook is running to let Juju know + that a payload has been manually stopped. The and provided + must match a payload that has been previously registered with juju using + payload-register.""" + cmd = ['payload-unregister'] + for x in [klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_status_set(klass, pid, status): + """is used to update the current status of a registered payload. + The and provided must match a payload that has been previously + registered with juju using payload-register. The must be one of the + follow: starting, started, stopping, stopped""" + cmd = ['payload-status-set'] + for x in [klass, pid, status]: + cmd.append(x) + subprocess.check_call(cmd) + + @cached def juju_version(): """Full version string (eg. '1.23.3.1-trusty-amd64')""" diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 579871bc..c5fd81ca 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -146,8 +146,22 @@ def service_available(service_name): return True -def adduser(username, password=None, shell='/bin/bash', system_user=False): - """Add a user to the system""" +def adduser(username, password=None, shell='/bin/bash', system_user=False, + primary_group=None, secondary_groups=None): + """ + Add a user to the system. + + Will log but otherwise succeed if the user already exists. + + :param str username: Username to create + :param str password: Password for user; if ``None``, create a system user + :param str shell: The default shell for the user + :param bool system_user: Whether to create a login or system user + :param str primary_group: Primary group for user; defaults to their username + :param list secondary_groups: Optional list of additional groups + + :returns: The password database entry struct, as returned by `pwd.getpwnam` + """ try: user_info = pwd.getpwnam(username) log('user {0} already exists!'.format(username)) @@ -162,6 +176,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): '--shell', shell, '--password', password, ]) + if not primary_group: + try: + grp.getgrnam(username) + primary_group = username # avoid "group exists" error + except KeyError: + pass + if primary_group: + cmd.extend(['-g', primary_group]) + if secondary_groups: + cmd.extend(['-G', ','.join(secondary_groups)]) cmd.append(username) subprocess.check_call(cmd) user_info = pwd.getpwnam(username) diff --git a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py index 12d768e6..24237042 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py @@ -243,13 +243,15 @@ class TemplateCallback(ManagerCallback): :param str source: The template source file, relative to `$CHARM_DIR/templates` - :param str target: The target to write the rendered template to + :param str target: The target to write the rendered template to (or None) :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file :param partial on_change_action: functools partial to be executed when rendered file changes :param jinja2 loader template_loader: A jinja2 template loader + + :return str: The rendered template """ def __init__(self, source, target, owner='root', group='root', perms=0o444, @@ -267,12 +269,14 @@ def __call__(self, manager, service_name, event_name): if self.on_change_action and os.path.isfile(self.target): pre_checksum = host.file_hash(self.target) service = manager.get_service(service_name) - context = {} + context = {'ctx': {}} for ctx in service.get('required_data', []): context.update(ctx) - templating.render(self.source, self.target, context, - self.owner, self.group, self.perms, - template_loader=self.template_loader) + context['ctx'].update(ctx) + + result = templating.render(self.source, self.target, context, + self.owner, self.group, self.perms, + template_loader=self.template_loader) if self.on_change_action: if pre_checksum == host.file_hash(self.target): hookenv.log( @@ -281,6 +285,8 @@ def __call__(self, manager, service_name, event_name): else: self.on_change_action() + return result + # Convenience aliases for templates render_template = template = TemplateCallback diff --git a/ceph-proxy/hooks/charmhelpers/core/templating.py b/ceph-proxy/hooks/charmhelpers/core/templating.py index 239719d4..d2d8eafe 100644 --- a/ceph-proxy/hooks/charmhelpers/core/templating.py +++ b/ceph-proxy/hooks/charmhelpers/core/templating.py @@ -27,7 +27,8 @@ def render(source, target, context, owner='root', group='root', The `source` path, if not absolute, is relative to the `templates_dir`. - The `target` path should be absolute. + The `target` path should be absolute. It can also be `None`, in which + case no file will be written. The context should be a dict containing the values to be replaced in the template. @@ -36,6 +37,9 @@ def render(source, target, context, owner='root', group='root', If omitted, `templates_dir` defaults to the `templates` folder in the charm. + The rendered template will be written to the file as well as being returned + as a string. + Note: Using this requires python-jinja2; if it is not installed, calling this will attempt to use charmhelpers.fetch.apt_install to install it. """ @@ -67,9 +71,11 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - target_dir = os.path.dirname(target) - if not os.path.exists(target_dir): - # This is a terrible default directory permission, as the file - # or its siblings will often contain secrets. - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) - host.write_file(target, content.encode(encoding), owner, group, perms) + if target is not None: + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) + return content diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 5f831c35..db0d86a2 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -98,6 +98,14 @@ 'liberty/proposed': 'trusty-proposed/liberty', 'trusty-liberty/proposed': 'trusty-proposed/liberty', 'trusty-proposed/liberty': 'trusty-proposed/liberty', + # Mitaka + 'mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka/updates': 'trusty-updates/mitaka', + 'trusty-updates/mitaka': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', } # The order of this list is very important. Handlers should be listed in from @@ -411,7 +419,7 @@ def plugins(fetch_handlers=None): importlib.import_module(package), classname) plugin_list.append(handler_class()) - except (ImportError, AttributeError): + except NotImplementedError: # Skip missing plugins so that they can be ommitted from # installation if desired log("FetchHandler {} not found, skipping plugin".format( diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py index efd7f9f0..b8e0943d 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -108,7 +108,7 @@ def download(self, source, dest): install_opener(opener) response = urlopen(source) try: - with open(dest, 'w') as dest_file: + with open(dest, 'wb') as dest_file: dest_file.write(response.read()) except Exception as e: if os.path.isfile(dest): diff --git a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py index 8ec69692..cafd27f7 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py @@ -15,54 +15,40 @@ # along with charm-helpers. If not, see . import os +from subprocess import check_call from charmhelpers.fetch import ( BaseFetchHandler, - UnhandledSource + UnhandledSource, + filter_installed_packages, + apt_install, ) from charmhelpers.core.host import mkdir -import six -if six.PY3: - raise ImportError('bzrlib does not support Python3') -try: - from bzrlib.branch import Branch - from bzrlib import bzrdir, workingtree, errors -except ImportError: - from charmhelpers.fetch import apt_install - apt_install("python-bzrlib") - from bzrlib.branch import Branch - from bzrlib import bzrdir, workingtree, errors +if filter_installed_packages(['bzr']) != []: + apt_install(['bzr']) + if filter_installed_packages(['bzr']) != []: + raise NotImplementedError('Unable to install bzr') class BzrUrlFetchHandler(BaseFetchHandler): """Handler for bazaar branches via generic and lp URLs""" def can_handle(self, source): url_parts = self.parse_url(source) - if url_parts.scheme not in ('bzr+ssh', 'lp'): + if url_parts.scheme not in ('bzr+ssh', 'lp', ''): return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.bzr')) else: return True def branch(self, source, dest): - url_parts = self.parse_url(source) - # If we use lp:branchname scheme we need to load plugins if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - if url_parts.scheme == "lp": - from bzrlib.plugin import load_plugins - load_plugins() - try: - local_branch = bzrdir.BzrDir.create_branch_convenience(dest) - except errors.AlreadyControlDirError: - local_branch = Branch.open(dest) - try: - remote_branch = Branch.open(source) - remote_branch.push(local_branch) - tree = workingtree.WorkingTree.open(dest) - tree.update() - except Exception as e: - raise e + if os.path.exists(dest): + check_call(['bzr', 'pull', '--overwrite', '-d', dest, source]) + else: + check_call(['bzr', 'branch', source, dest]) def install(self, source, dest=None): url_parts = self.parse_url(source) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py index f023b26d..bbf89d5c 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py @@ -15,24 +15,19 @@ # along with charm-helpers. If not, see . import os +from subprocess import check_call from charmhelpers.fetch import ( BaseFetchHandler, - UnhandledSource + UnhandledSource, + filter_installed_packages, + apt_install, ) from charmhelpers.core.host import mkdir -import six -if six.PY3: - raise ImportError('GitPython does not support Python 3') - -try: - from git import Repo -except ImportError: - from charmhelpers.fetch import apt_install - apt_install("python-git") - from git import Repo - -from git.exc import GitCommandError # noqa E402 +if filter_installed_packages(['git']) != []: + apt_install(['git']) + if filter_installed_packages(['git']) != []: + raise NotImplementedError('Unable to install git') class GitUrlFetchHandler(BaseFetchHandler): @@ -40,19 +35,24 @@ class GitUrlFetchHandler(BaseFetchHandler): def can_handle(self, source): url_parts = self.parse_url(source) # TODO (mattyw) no support for ssh git@ yet - if url_parts.scheme not in ('http', 'https', 'git'): + if url_parts.scheme not in ('http', 'https', 'git', ''): return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.git')) else: return True - def clone(self, source, dest, branch, depth=None): + def clone(self, source, dest, branch="master", depth=None): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - if depth: - Repo.clone_from(source, dest, branch=branch, depth=depth) + if os.path.exists(dest): + cmd = ['git', '-C', dest, 'pull', source, branch] else: - Repo.clone_from(source, dest, branch=branch) + cmd = ['git', 'clone', source, dest, '--branch', branch] + if depth: + cmd.extend(['--depth', depth]) + check_call(cmd) def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) @@ -66,8 +66,6 @@ def install(self, source, branch="master", dest=None, depth=None): mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch, depth) - except GitCommandError as e: - raise UnhandledSource(e) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 0506491b..58b1a79c 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -225,7 +225,8 @@ def _get_openstack_release(self): self.precise_havana, self.precise_icehouse, self.trusty_icehouse, self.trusty_juno, self.utopic_juno, self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, - self.wily_liberty) = range(12) + self.wily_liberty, self.trusty_mitaka, + self.xenial_mitaka) = range(14) releases = { ('precise', None): self.precise_essex, @@ -237,9 +238,11 @@ def _get_openstack_release(self): ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, ('utopic', None): self.utopic_juno, ('vivid', None): self.vivid_kilo, - ('wily', None): self.wily_liberty} + ('wily', None): self.wily_liberty, + ('xenial', None): self.xenial_mitaka} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -256,6 +259,7 @@ def _get_openstack_release_string(self): ('utopic', 'juno'), ('vivid', 'kilo'), ('wily', 'liberty'), + ('xenial', 'mitaka'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] From c0d34ce12e15934a5efdc97dc64ffcf183ecc536 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 4 Jan 2016 16:25:48 -0500 Subject: [PATCH 0926/2699] [corey.bryant,r=trivial] Sync charm-helpers. --- .../charmhelpers/contrib/charmsupport/nrpe.py | 14 +- .../hooks/charmhelpers/contrib/network/ip.py | 42 +- .../contrib/storage/linux/ceph.py | 400 +++++++++++++++++- ceph-mon/hooks/charmhelpers/core/hookenv.py | 44 +- ceph-mon/hooks/charmhelpers/core/host.py | 28 +- .../charmhelpers/core/services/helpers.py | 16 +- .../hooks/charmhelpers/core/templating.py | 20 +- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 10 +- .../hooks/charmhelpers/fetch/archiveurl.py | 2 +- ceph-mon/hooks/charmhelpers/fetch/bzrurl.py | 44 +- ceph-mon/hooks/charmhelpers/fetch/giturl.py | 38 +- .../contrib/openstack/amulet/deployment.py | 8 +- 12 files changed, 551 insertions(+), 115 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 65b1a27e..2f246429 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -296,7 +296,7 @@ def get_nagios_hostcontext(relation_name='nrpe-external-master'): :param str relation_name: Name of relation nrpe sub joined to """ for rel in relations_of_type(relation_name): - if 'nagios_hostname' in rel: + if 'nagios_host_context' in rel: return rel['nagios_host_context'] @@ -337,11 +337,13 @@ def add_init_service_checks(nrpe, services, unit_name): upstart_init = '/etc/init/%s.conf' % svc sysv_init = '/etc/init.d/%s' % svc if os.path.exists(upstart_init): - nrpe.add_check( - shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_upstart_job %s' % svc - ) + # Don't add a check for these services from neutron-gateway + if svc not in ['ext-port', 'os-charm-phy-nic-mtu']: + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % svc cron_file = ('*/5 * * * * root ' diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 7f3b66b1..998f00c1 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -53,7 +53,7 @@ def _validate_cidr(network): def no_ip_found_error_out(network): - errmsg = ("No IP address found in network: %s" % network) + errmsg = ("No IP address found in network(s): %s" % network) raise ValueError(errmsg) @@ -61,7 +61,7 @@ def get_address_in_network(network, fallback=None, fatal=False): """Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, - '192.168.1.0/24'. + '192.168.1.0/24'. Supports multiple networks as a space-delimited list. :param fallback (str): If no address is found, return fallback. :param fatal (boolean): If no address is found, fallback is not set and fatal is True then exit(1). @@ -75,24 +75,26 @@ def get_address_in_network(network, fallback=None, fatal=False): else: return None - _validate_cidr(network) - network = netaddr.IPNetwork(network) - for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) - if network.version == 4 and netifaces.AF_INET in addresses: - addr = addresses[netifaces.AF_INET][0]['addr'] - netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - if cidr in network: - return str(cidr.ip) - - if network.version == 6 and netifaces.AF_INET6 in addresses: - for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - if cidr in network: - return str(cidr.ip) + networks = network.split() or [network] + for network in networks: + _validate_cidr(network) + network = netaddr.IPNetwork(network) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if network.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + if cidr in network: + return str(cidr.ip) + + if network.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if not addr['addr'].startswith('fe80'): + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) if fallback is not None: return fallback diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1235389e..60ae52b8 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -23,10 +23,11 @@ # James Page # Adam Gandelman # +import bisect +import six import os import shutil -import six import json import time import uuid @@ -73,35 +74,372 @@ err to syslog = {use_syslog} clog to syslog = {use_syslog} """ +# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs) +powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608] -def install(): - """Basic Ceph client installation.""" - ceph_dir = "/etc/ceph" - if not os.path.exists(ceph_dir): - os.mkdir(ceph_dir) +def validator(value, valid_type, valid_range=None): + """ + Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + Example input: + validator(value=1, + valid_type=int, + valid_range=[0, 2]) + This says I'm testing value=1. It must be an int inclusive in [0,2] + + :param value: The value to validate + :param valid_type: The type that value should be. + :param valid_range: A range of values that value can assume. + :return: + """ + assert isinstance(value, valid_type), "{} is not a {}".format( + value, + valid_type) + if valid_range is not None: + assert isinstance(valid_range, list), \ + "valid_range must be a list, was given {}".format(valid_range) + # If we're dealing with strings + if valid_type is six.string_types: + assert value in valid_range, \ + "{} is not in the list {}".format(value, valid_range) + # Integer, float should have a min and max + else: + if len(valid_range) != 2: + raise ValueError( + "Invalid valid_range list of {} for {}. " + "List must be [min,max]".format(valid_range, value)) + assert value >= valid_range[0], \ + "{} is less than minimum allowed value of {}".format( + value, valid_range[0]) + assert value <= valid_range[1], \ + "{} is greater than maximum allowed value of {}".format( + value, valid_range[1]) + + +class PoolCreationError(Exception): + """ + A custom error to inform the caller that a pool creation failed. Provides an error message + """ + def __init__(self, message): + super(PoolCreationError, self).__init__(message) - apt_install('ceph-common', fatal=True) +class Pool(object): + """ + An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. + Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). + """ + def __init__(self, service, name): + self.service = service + self.name = name + + # Create the pool if it doesn't exist already + # To be implemented by subclasses + def create(self): + pass -def rbd_exists(service, pool, rbd_img): - """Check to see if a RADOS block device exists.""" + def add_cache_tier(self, cache_pool, mode): + """ + Adds a new cache tier to an existing pool. + :param cache_pool: six.string_types. The cache tier pool name to add. + :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] + :return: None + """ + # Check the input types and values + validator(value=cache_pool, valid_type=six.string_types) + validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) + + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) + + def remove_cache_tier(self, cache_pool): + """ + Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. + :param cache_pool: six.string_types. The cache tier pool name to remove. + :return: None + """ + # read-only is easy, writeback is much harder + mode = get_cache_mode(cache_pool) + if mode == 'readonly': + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + elif mode == 'writeback': + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) + # Flush the cache and wait for it to return + check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + def get_pgs(self, pool_size): + """ + :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for + erasure coded pools + :return: int. The number of pgs to use. + """ + validator(value=pool_size, valid_type=int) + osds = get_osds(self.service) + if not osds: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + return 200 + + # Calculate based on Ceph best practices + if osds < 5: + return 128 + elif 5 < osds < 10: + return 512 + elif 10 < osds < 50: + return 4096 + else: + estimate = (osds * 100) / pool_size + # Return the next nearest power of 2 + index = bisect.bisect_right(powers_of_two, estimate) + return powers_of_two[index] + + +class ReplicatedPool(Pool): + def __init__(self, service, name, replicas=2): + super(ReplicatedPool, self).__init__(service=service, name=name) + self.replicas = replicas + + def create(self): + if not pool_exists(self.service, self.name): + # Create it + pgs = self.get_pgs(self.replicas) + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)] + try: + check_call(cmd) + except CalledProcessError: + raise + + +# Default jerasure erasure coded pool +class ErasurePool(Pool): + def __init__(self, service, name, erasure_code_profile="default"): + super(ErasurePool, self).__init__(service=service, name=name) + self.erasure_code_profile = erasure_code_profile + + def create(self): + if not pool_exists(self.service, self.name): + # Try to find the erasure profile information so we can properly size the pgs + erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile) + + # Check for errors + if erasure_profile is None: + log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile), + level=ERROR) + raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile)) + if 'k' not in erasure_profile or 'm' not in erasure_profile: + # Error + log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile), + level=ERROR) + raise PoolCreationError( + message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile)) + + pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) + # Create it + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), + 'erasure', self.erasure_code_profile] + try: + check_call(cmd) + except CalledProcessError: + raise + + """Get an existing erasure code profile if it already exists. + Returns json formatted output""" + + +def get_erasure_profile(service, name): + """ + :param service: six.string_types. The Ceph user name to run the command under + :param name: + :return: + """ try: - out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]).decode('UTF-8') + out = check_output(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name, '--format=json']) + return json.loads(out) + except (CalledProcessError, OSError, ValueError): + return None + + +def pool_set(service, pool_name, key, value): + """ + Sets a value for a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param key: six.string_types + :param value: + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value] + try: + check_call(cmd) except CalledProcessError: - return False + raise - return rbd_img in out + +def snapshot_pool(service, pool_name, snapshot_name): + """ + Snapshots a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise -def create_rbd_image(service, pool, image, sizemb): - """Create a new RADOS block device.""" - cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, - '--pool', pool] +def remove_pool_snapshot(service, pool_name, snapshot_name): + """ + Remove a snapshot from a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +# max_bytes should be an int or long +def set_pool_quota(service, pool_name, max_bytes): + """ + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param max_bytes: int or long + :return: None. Can raise CalledProcessError + """ + # Set a byte quota on a RADOS pool in ceph. + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_quota(service, pool_name): + """ + Set a byte quota on a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', + data_chunks=2, coding_chunks=1, + locality=None, durability_estimator=None): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :param erasure_plugin_name: six.string_types + :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', + 'room', 'root', 'row']) + :param data_chunks: int + :param coding_chunks: int + :param locality: int + :param durability_estimator: int + :return: None. Can raise CalledProcessError + """ + # Ensure this failure_domain is allowed by Ceph + validator(failure_domain, six.string_types, + ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) + + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, + 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks), + 'ruleset_failure_domain=' + failure_domain] + if locality is not None and durability_estimator is not None: + raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + + # Add plugin specific information + if locality is not None: + # For local erasure codes + cmd.append('l=' + str(locality)) + if durability_estimator is not None: + # For Shec erasure codes + cmd.append('c=' + str(durability_estimator)) + + if erasure_profile_exists(service, profile_name): + cmd.append('--force') + + try: + check_call(cmd) + except CalledProcessError: + raise + + +def rename_pool(service, old_name, new_name): + """ + Rename a Ceph pool from old_name to new_name + :param service: six.string_types. The Ceph user name to run the command under + :param old_name: six.string_types + :param new_name: six.string_types + :return: None + """ + validator(value=old_name, valid_type=six.string_types) + validator(value=new_name, valid_type=six.string_types) + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] check_call(cmd) +def erasure_profile_exists(service, name): + """ + Check to see if an Erasure code profile already exists. + :param service: six.string_types. The Ceph user name to run the command under + :param name: six.string_types + :return: int or None + """ + validator(value=name, valid_type=six.string_types) + try: + check_call(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name]) + return True + except CalledProcessError: + return False + + +def get_cache_mode(service, pool_name): + """ + Find the current caching mode of the pool_name given. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: int or None + """ + validator(value=service, valid_type=six.string_types) + validator(value=pool_name, valid_type=six.string_types) + out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) + try: + osd_json = json.loads(out) + for pool in osd_json['pools']: + if pool['pool_name'] == pool_name: + return pool['cache_mode'] + return None + except ValueError: + raise + + def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: @@ -126,6 +464,33 @@ def get_osds(service): return None +def install(): + """Basic Ceph client installation.""" + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + """Check to see if a RADOS block device exists.""" + try: + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]).decode('UTF-8') + except CalledProcessError: + return False + + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] + check_call(cmd) + + def update_pool(client, pool, settings): cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] for k, v in six.iteritems(settings): @@ -414,6 +779,7 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ + def __init__(self, api_version=1, request_id=None): self.api_version = api_version if request_id: diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index e70f1ca5..2dd70bc9 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -492,7 +492,7 @@ def relation_types(): @cached def peer_relation_id(): - '''Get a peer relation id if a peer relation has been joined, else None.''' + '''Get the peers relation id if a peers relation has been joined, else None.''' md = metadata() section = md.get('peers') if section: @@ -517,12 +517,12 @@ def relation_to_interface(relation_name): def relation_to_role_and_interface(relation_name): """ Given the name of a relation, return the role and the name of the interface - that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. """ _metadata = metadata() - for role in ('provides', 'requires', 'peer'): + for role in ('provides', 'requires', 'peers'): interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') if interface: return role, interface @@ -534,7 +534,7 @@ def role_and_interface_to_relations(role, interface_name): """ Given a role and interface name, return a list of relation names for the current charm that use that interface under that role (where role is one - of ``provides``, ``requires``, or ``peer``). + of ``provides``, ``requires``, or ``peers``). :returns: A list of relation names. """ @@ -555,7 +555,7 @@ def interface_to_relations(interface_name): :returns: A list of relation names. """ results = [] - for role in ('provides', 'requires', 'peer'): + for role in ('provides', 'requires', 'peers'): results.extend(role_and_interface_to_relations(role, interface_name)) return results @@ -878,6 +878,40 @@ def leader_set(settings=None, **kwargs): subprocess.check_call(cmd) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_register(ptype, klass, pid): + """ is used while a hook is running to let Juju know that a + payload has been started.""" + cmd = ['payload-register'] + for x in [ptype, klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_unregister(klass, pid): + """ is used while a hook is running to let Juju know + that a payload has been manually stopped. The and provided + must match a payload that has been previously registered with juju using + payload-register.""" + cmd = ['payload-unregister'] + for x in [klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_status_set(klass, pid, status): + """is used to update the current status of a registered payload. + The and provided must match a payload that has been previously + registered with juju using payload-register. The must be one of the + follow: starting, started, stopping, stopped""" + cmd = ['payload-status-set'] + for x in [klass, pid, status]: + cmd.append(x) + subprocess.check_call(cmd) + + @cached def juju_version(): """Full version string (eg. '1.23.3.1-trusty-amd64')""" diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 579871bc..c5fd81ca 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -146,8 +146,22 @@ def service_available(service_name): return True -def adduser(username, password=None, shell='/bin/bash', system_user=False): - """Add a user to the system""" +def adduser(username, password=None, shell='/bin/bash', system_user=False, + primary_group=None, secondary_groups=None): + """ + Add a user to the system. + + Will log but otherwise succeed if the user already exists. + + :param str username: Username to create + :param str password: Password for user; if ``None``, create a system user + :param str shell: The default shell for the user + :param bool system_user: Whether to create a login or system user + :param str primary_group: Primary group for user; defaults to their username + :param list secondary_groups: Optional list of additional groups + + :returns: The password database entry struct, as returned by `pwd.getpwnam` + """ try: user_info = pwd.getpwnam(username) log('user {0} already exists!'.format(username)) @@ -162,6 +176,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): '--shell', shell, '--password', password, ]) + if not primary_group: + try: + grp.getgrnam(username) + primary_group = username # avoid "group exists" error + except KeyError: + pass + if primary_group: + cmd.extend(['-g', primary_group]) + if secondary_groups: + cmd.extend(['-G', ','.join(secondary_groups)]) cmd.append(username) subprocess.check_call(cmd) user_info = pwd.getpwnam(username) diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py index 12d768e6..24237042 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-mon/hooks/charmhelpers/core/services/helpers.py @@ -243,13 +243,15 @@ class TemplateCallback(ManagerCallback): :param str source: The template source file, relative to `$CHARM_DIR/templates` - :param str target: The target to write the rendered template to + :param str target: The target to write the rendered template to (or None) :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file :param partial on_change_action: functools partial to be executed when rendered file changes :param jinja2 loader template_loader: A jinja2 template loader + + :return str: The rendered template """ def __init__(self, source, target, owner='root', group='root', perms=0o444, @@ -267,12 +269,14 @@ def __call__(self, manager, service_name, event_name): if self.on_change_action and os.path.isfile(self.target): pre_checksum = host.file_hash(self.target) service = manager.get_service(service_name) - context = {} + context = {'ctx': {}} for ctx in service.get('required_data', []): context.update(ctx) - templating.render(self.source, self.target, context, - self.owner, self.group, self.perms, - template_loader=self.template_loader) + context['ctx'].update(ctx) + + result = templating.render(self.source, self.target, context, + self.owner, self.group, self.perms, + template_loader=self.template_loader) if self.on_change_action: if pre_checksum == host.file_hash(self.target): hookenv.log( @@ -281,6 +285,8 @@ def __call__(self, manager, service_name, event_name): else: self.on_change_action() + return result + # Convenience aliases for templates render_template = template = TemplateCallback diff --git a/ceph-mon/hooks/charmhelpers/core/templating.py b/ceph-mon/hooks/charmhelpers/core/templating.py index 239719d4..d2d8eafe 100644 --- a/ceph-mon/hooks/charmhelpers/core/templating.py +++ b/ceph-mon/hooks/charmhelpers/core/templating.py @@ -27,7 +27,8 @@ def render(source, target, context, owner='root', group='root', The `source` path, if not absolute, is relative to the `templates_dir`. - The `target` path should be absolute. + The `target` path should be absolute. It can also be `None`, in which + case no file will be written. The context should be a dict containing the values to be replaced in the template. @@ -36,6 +37,9 @@ def render(source, target, context, owner='root', group='root', If omitted, `templates_dir` defaults to the `templates` folder in the charm. + The rendered template will be written to the file as well as being returned + as a string. + Note: Using this requires python-jinja2; if it is not installed, calling this will attempt to use charmhelpers.fetch.apt_install to install it. """ @@ -67,9 +71,11 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - target_dir = os.path.dirname(target) - if not os.path.exists(target_dir): - # This is a terrible default directory permission, as the file - # or its siblings will often contain secrets. - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) - host.write_file(target, content.encode(encoding), owner, group, perms) + if target is not None: + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) + return content diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 5f831c35..db0d86a2 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -98,6 +98,14 @@ 'liberty/proposed': 'trusty-proposed/liberty', 'trusty-liberty/proposed': 'trusty-proposed/liberty', 'trusty-proposed/liberty': 'trusty-proposed/liberty', + # Mitaka + 'mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka/updates': 'trusty-updates/mitaka', + 'trusty-updates/mitaka': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', } # The order of this list is very important. Handlers should be listed in from @@ -411,7 +419,7 @@ def plugins(fetch_handlers=None): importlib.import_module(package), classname) plugin_list.append(handler_class()) - except (ImportError, AttributeError): + except NotImplementedError: # Skip missing plugins so that they can be ommitted from # installation if desired log("FetchHandler {} not found, skipping plugin".format( diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index efd7f9f0..b8e0943d 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -108,7 +108,7 @@ def download(self, source, dest): install_opener(opener) response = urlopen(source) try: - with open(dest, 'w') as dest_file: + with open(dest, 'wb') as dest_file: dest_file.write(response.read()) except Exception as e: if os.path.isfile(dest): diff --git a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py index 8ec69692..cafd27f7 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py @@ -15,54 +15,40 @@ # along with charm-helpers. If not, see . import os +from subprocess import check_call from charmhelpers.fetch import ( BaseFetchHandler, - UnhandledSource + UnhandledSource, + filter_installed_packages, + apt_install, ) from charmhelpers.core.host import mkdir -import six -if six.PY3: - raise ImportError('bzrlib does not support Python3') -try: - from bzrlib.branch import Branch - from bzrlib import bzrdir, workingtree, errors -except ImportError: - from charmhelpers.fetch import apt_install - apt_install("python-bzrlib") - from bzrlib.branch import Branch - from bzrlib import bzrdir, workingtree, errors +if filter_installed_packages(['bzr']) != []: + apt_install(['bzr']) + if filter_installed_packages(['bzr']) != []: + raise NotImplementedError('Unable to install bzr') class BzrUrlFetchHandler(BaseFetchHandler): """Handler for bazaar branches via generic and lp URLs""" def can_handle(self, source): url_parts = self.parse_url(source) - if url_parts.scheme not in ('bzr+ssh', 'lp'): + if url_parts.scheme not in ('bzr+ssh', 'lp', ''): return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.bzr')) else: return True def branch(self, source, dest): - url_parts = self.parse_url(source) - # If we use lp:branchname scheme we need to load plugins if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - if url_parts.scheme == "lp": - from bzrlib.plugin import load_plugins - load_plugins() - try: - local_branch = bzrdir.BzrDir.create_branch_convenience(dest) - except errors.AlreadyControlDirError: - local_branch = Branch.open(dest) - try: - remote_branch = Branch.open(source) - remote_branch.push(local_branch) - tree = workingtree.WorkingTree.open(dest) - tree.update() - except Exception as e: - raise e + if os.path.exists(dest): + check_call(['bzr', 'pull', '--overwrite', '-d', dest, source]) + else: + check_call(['bzr', 'branch', source, dest]) def install(self, source, dest=None): url_parts = self.parse_url(source) diff --git a/ceph-mon/hooks/charmhelpers/fetch/giturl.py b/ceph-mon/hooks/charmhelpers/fetch/giturl.py index f023b26d..bbf89d5c 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/giturl.py @@ -15,24 +15,19 @@ # along with charm-helpers. If not, see . import os +from subprocess import check_call from charmhelpers.fetch import ( BaseFetchHandler, - UnhandledSource + UnhandledSource, + filter_installed_packages, + apt_install, ) from charmhelpers.core.host import mkdir -import six -if six.PY3: - raise ImportError('GitPython does not support Python 3') - -try: - from git import Repo -except ImportError: - from charmhelpers.fetch import apt_install - apt_install("python-git") - from git import Repo - -from git.exc import GitCommandError # noqa E402 +if filter_installed_packages(['git']) != []: + apt_install(['git']) + if filter_installed_packages(['git']) != []: + raise NotImplementedError('Unable to install git') class GitUrlFetchHandler(BaseFetchHandler): @@ -40,19 +35,24 @@ class GitUrlFetchHandler(BaseFetchHandler): def can_handle(self, source): url_parts = self.parse_url(source) # TODO (mattyw) no support for ssh git@ yet - if url_parts.scheme not in ('http', 'https', 'git'): + if url_parts.scheme not in ('http', 'https', 'git', ''): return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.git')) else: return True - def clone(self, source, dest, branch, depth=None): + def clone(self, source, dest, branch="master", depth=None): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - if depth: - Repo.clone_from(source, dest, branch=branch, depth=depth) + if os.path.exists(dest): + cmd = ['git', '-C', dest, 'pull', source, branch] else: - Repo.clone_from(source, dest, branch=branch) + cmd = ['git', 'clone', source, dest, '--branch', branch] + if depth: + cmd.extend(['--depth', depth]) + check_call(cmd) def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) @@ -66,8 +66,6 @@ def install(self, source, branch="master", dest=None, depth=None): mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch, depth) - except GitCommandError as e: - raise UnhandledSource(e) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 0506491b..58b1a79c 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -225,7 +225,8 @@ def _get_openstack_release(self): self.precise_havana, self.precise_icehouse, self.trusty_icehouse, self.trusty_juno, self.utopic_juno, self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, - self.wily_liberty) = range(12) + self.wily_liberty, self.trusty_mitaka, + self.xenial_mitaka) = range(14) releases = { ('precise', None): self.precise_essex, @@ -237,9 +238,11 @@ def _get_openstack_release(self): ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, ('utopic', None): self.utopic_juno, ('vivid', None): self.vivid_kilo, - ('wily', None): self.wily_liberty} + ('wily', None): self.wily_liberty, + ('xenial', None): self.xenial_mitaka} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -256,6 +259,7 @@ def _get_openstack_release_string(self): ('utopic', 'juno'), ('vivid', 'kilo'), ('wily', 'liberty'), + ('xenial', 'mitaka'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] From d16d2703ededf3bd3fe5f1ca690b32564d18f18f Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 8 Jan 2016 14:02:43 -0500 Subject: [PATCH 0927/2699] charmhelpers sync --- .../charmhelpers/contrib/openstack/utils.py | 1011 +++++++++++++++++ ceph-proxy/hooks/charmhelpers/core/host.py | 51 +- ceph-proxy/hooks/charmhelpers/fetch/giturl.py | 7 +- 3 files changed, 1047 insertions(+), 22 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py new file mode 100644 index 00000000..2af4476d --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -0,0 +1,1011 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# Common python helper functions used for OpenStack charms. +from collections import OrderedDict +from functools import wraps + +import subprocess +import json +import os +import sys +import re + +import six +import traceback +import uuid +import yaml + +from charmhelpers.contrib.network import ip + +from charmhelpers.core import ( + unitdata, +) + +from charmhelpers.core.hookenv import ( + action_fail, + action_set, + config, + log as juju_log, + charm_dir, + INFO, + related_units, + relation_ids, + relation_set, + status_set, + hook_name +) + +from charmhelpers.contrib.storage.linux.lvm import ( + deactivate_lvm_volume_group, + is_lvm_physical_volume, + remove_lvm_physical_volume, +) + +from charmhelpers.contrib.network.ip import ( + get_ipv6_addr, + is_ipv6, +) + +from charmhelpers.contrib.python.packages import ( + pip_create_virtualenv, + pip_install, +) + +from charmhelpers.core.host import lsb_release, mounts, umount +from charmhelpers.fetch import apt_install, apt_cache, install_remote +from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk +from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device + +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' + +DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' + 'restricted main multiverse universe') + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ('wily', 'liberty'), + ('xenial', 'mitaka'), +]) + + +OPENSTACK_CODENAMES = OrderedDict([ + ('2011.2', 'diablo'), + ('2012.1', 'essex'), + ('2012.2', 'folsom'), + ('2013.1', 'grizzly'), + ('2013.2', 'havana'), + ('2014.1', 'icehouse'), + ('2014.2', 'juno'), + ('2015.1', 'kilo'), + ('2015.2', 'liberty'), + ('2016.1', 'mitaka'), +]) + +# The ugly duckling +SWIFT_CODENAMES = OrderedDict([ + ('1.4.3', 'diablo'), + ('1.4.8', 'essex'), + ('1.7.4', 'folsom'), + ('1.8.0', 'grizzly'), + ('1.7.7', 'grizzly'), + ('1.7.6', 'grizzly'), + ('1.10.0', 'havana'), + ('1.9.1', 'havana'), + ('1.9.0', 'havana'), + ('1.13.1', 'icehouse'), + ('1.13.0', 'icehouse'), + ('1.12.0', 'icehouse'), + ('1.11.0', 'icehouse'), + ('2.0.0', 'juno'), + ('2.1.0', 'juno'), + ('2.2.0', 'juno'), + ('2.2.1', 'kilo'), + ('2.2.2', 'kilo'), + ('2.3.0', 'liberty'), + ('2.4.0', 'liberty'), + ('2.5.0', 'liberty'), +]) + +# >= Liberty version->codename mapping +PACKAGE_CODENAMES = { + 'nova-common': OrderedDict([ + ('12.0', 'liberty'), + ('13.0', 'mitaka'), + ]), + 'neutron-common': OrderedDict([ + ('7.0', 'liberty'), + ('8.0', 'mitaka'), + ]), + 'cinder-common': OrderedDict([ + ('7.0', 'liberty'), + ('8.0', 'mitaka'), + ]), + 'keystone': OrderedDict([ + ('8.0', 'liberty'), + ('9.0', 'mitaka'), + ]), + 'horizon-common': OrderedDict([ + ('8.0', 'liberty'), + ('9.0', 'mitaka'), + ]), + 'ceilometer-common': OrderedDict([ + ('5.0', 'liberty'), + ('6.0', 'mitaka'), + ]), + 'heat-common': OrderedDict([ + ('5.0', 'liberty'), + ('6.0', 'mitaka'), + ]), + 'glance-common': OrderedDict([ + ('11.0', 'liberty'), + ('12.0', 'mitaka'), + ]), + 'openstack-dashboard': OrderedDict([ + ('8.0', 'liberty'), + ('9.0', 'mitaka'), + ]), +} + +DEFAULT_LOOPBACK_SIZE = '5G' + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + + +def get_os_codename_install_source(src): + '''Derive OpenStack release codename from a given installation source.''' + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = '' + if src is None: + return rel + if src in ['distro', 'distro-proposed']: + try: + rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] + except KeyError: + e = 'Could not derive openstack release for '\ + 'this Ubuntu release: %s' % ubuntu_rel + error_out(e) + return rel + + if src.startswith('cloud:'): + ca_rel = src.split(':')[1] + ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + return ca_rel + + # Best guess match based on deb string provided + if src.startswith('deb') or src.startswith('ppa'): + for k, v in six.iteritems(OPENSTACK_CODENAMES): + if v in src: + return v + + +def get_os_version_install_source(src): + codename = get_os_codename_install_source(src) + return get_os_version_codename(codename) + + +def get_os_codename_version(vers): + '''Determine OpenStack codename from version number.''' + try: + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): + '''Determine OpenStack version number from codename.''' + for k, v in six.iteritems(version_map): + if v == codename: + return k + e = 'Could not derive OpenStack version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_os_codename_package(package, fatal=True): + '''Derive OpenStack release codename from an installed package.''' + import apt_pkg as apt + + cache = apt_cache() + + try: + pkg = cache[package] + except: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + if 'swift' in pkg.name: + # Fully x.y.z match for swift versions + match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) + else: + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match('^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + + # >= Liberty independent project versions + if (package in PACKAGE_CODENAMES and + vers in PACKAGE_CODENAMES[package]): + return PACKAGE_CODENAMES[package][vers] + else: + # < Liberty co-ordinated project versions + try: + if 'swift' in pkg.name: + return SWIFT_CODENAMES[vers] + else: + return OPENSTACK_CODENAMES[vers] + except KeyError: + if not fatal: + return None + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_package(pkg, fatal=True): + '''Derive OpenStack version number from an installed package.''' + codename = get_os_codename_package(pkg, fatal=fatal) + + if not codename: + return None + + if 'swift' in pkg: + vers_map = SWIFT_CODENAMES + else: + vers_map = OPENSTACK_CODENAMES + + for version, cname in six.iteritems(vers_map): + if cname == codename: + return version + # e = "Could not determine OpenStack version for package: %s" % pkg + # error_out(e) + + +os_rel = None + + +def os_release(package, base='essex'): + ''' + Returns OpenStack release codename from a cached global. + If the codename can not be determined from either an installed package or + the installation source, the earliest release supported by the charm should + be returned. + ''' + global os_rel + if os_rel: + return os_rel + os_rel = (get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config('openstack-origin')) or + base) + return os_rel + + +def import_key(keyid): + cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ + "--recv-keys %s" % keyid + try: + subprocess.check_call(cmd.split(' ')) + except subprocess.CalledProcessError: + error_out("Error importing repo key %s" % keyid) + + +def configure_installation_source(rel): + '''Configure apt installation source.''' + if rel == 'distro': + return + elif rel == 'distro-proposed': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(DISTRO_PROPOSED % ubuntu_rel) + elif rel[:4] == "ppa:": + src = rel + subprocess.check_call(["add-apt-repository", "-y", src]) + elif rel[:3] == "deb": + l = len(rel.split('|')) + if l == 2: + src, key = rel.split('|') + juju_log("Importing PPA key from keyserver for %s" % src) + import_key(key) + elif l == 1: + src = rel + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(src) + elif rel[:6] == 'cloud:': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = rel.split(':')[1] + u_rel = rel.split('-')[0] + ca_rel = rel.split('-')[1] + + if u_rel != ubuntu_rel: + e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ + 'version (%s)' % (ca_rel, ubuntu_rel) + error_out(e) + + if 'staging' in ca_rel: + # staging is just a regular PPA. + os_rel = ca_rel.split('/')[0] + ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel + cmd = 'add-apt-repository -y %s' % ppa + subprocess.check_call(cmd.split(' ')) + return + + # map charm config options to actual archive pockets. + pockets = { + 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'icehouse': 'precise-updates/icehouse', + 'icehouse/updates': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'juno': 'trusty-updates/juno', + 'juno/updates': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'kilo': 'trusty-updates/kilo', + 'kilo/updates': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'liberty': 'trusty-updates/liberty', + 'liberty/updates': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'mitaka': 'trusty-updates/mitaka', + 'mitaka/updates': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + } + + try: + pocket = pockets[ca_rel] + except KeyError: + e = 'Invalid Cloud Archive release specified: %s' % rel + error_out(e) + + src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) + apt_install('ubuntu-cloud-keyring', fatal=True) + + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: + f.write(src) + else: + error_out("Invalid openstack-release specified: %s" % rel) + + +def config_value_changed(option): + """ + Determine if config value changed since last call to this function. + """ + hook_data = unitdata.HookData() + with hook_data(): + db = unitdata.kv() + current = config(option) + saved = db.get(option) + db.set(option, current) + if saved is None: + return False + return current != saved + + +def save_script_rc(script_path="scripts/scriptrc", **env_vars): + """ + Write an rc file in the charm-delivered directory containing + exported environment variables provided by env_vars. Any charm scripts run + outside the juju hook environment can source this scriptrc to obtain + updated config information necessary to perform health checks or + service changes. + """ + juju_rc_path = "%s/%s" % (charm_dir(), script_path) + if not os.path.exists(os.path.dirname(juju_rc_path)): + os.mkdir(os.path.dirname(juju_rc_path)) + with open(juju_rc_path, 'wb') as rc_script: + rc_script.write( + "#!/bin/bash\n") + [rc_script.write('export %s=%s\n' % (u, p)) + for u, p in six.iteritems(env_vars) if u != "script_path"] + + +def openstack_upgrade_available(package): + """ + Determines if an OpenStack upgrade is available from installation + source, based on version of installed package. + + :param package: str: Name of installed package. + + :returns: bool: : Returns True if configured installation source offers + a newer version of package. + + """ + + import apt_pkg as apt + src = config('openstack-origin') + cur_vers = get_os_version_package(package) + if "swift" in package: + codename = get_os_codename_install_source(src) + available_vers = get_os_version_codename(codename, SWIFT_CODENAMES) + else: + available_vers = get_os_version_install_source(src) + apt.init() + return apt.version_compare(available_vers, cur_vers) == 1 + + +def ensure_block_device(block_device): + ''' + Confirm block_device, create as loopback if necessary. + + :param block_device: str: Full path of block device to ensure. + + :returns: str: Full path of ensured block device. + ''' + _none = ['None', 'none', None] + if (block_device in _none): + error_out('prepare_storage(): Missing required input: block_device=%s.' + % block_device) + + if block_device.startswith('/dev/'): + bdev = block_device + elif block_device.startswith('/'): + _bd = block_device.split('|') + if len(_bd) == 2: + bdev, size = _bd + else: + bdev = block_device + size = DEFAULT_LOOPBACK_SIZE + bdev = ensure_loopback_device(bdev, size) + else: + bdev = '/dev/%s' % block_device + + if not is_block_device(bdev): + error_out('Failed to locate valid block device at %s' % bdev) + + return bdev + + +def clean_storage(block_device): + ''' + Ensures a block device is clean. That is: + - unmounted + - any lvm volume groups are deactivated + - any lvm physical device signatures removed + - partition table wiped + + :param block_device: str: Full path to block device to clean. + ''' + for mp, d in mounts(): + if d == block_device: + juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % + (d, mp), level=INFO) + umount(mp, persist=True) + + if is_lvm_physical_volume(block_device): + deactivate_lvm_volume_group(block_device) + remove_lvm_physical_volume(block_device) + else: + zap_disk(block_device) + +is_ip = ip.is_ip +ns_query = ip.ns_query +get_host_ip = ip.get_host_ip +get_hostname = ip.get_hostname + + +def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): + mm_map = {} + if os.path.isfile(mm_file): + with open(mm_file, 'r') as f: + mm_map = json.load(f) + return mm_map + + +def sync_db_with_multi_ipv6_addresses(database, database_user, + relation_prefix=None): + hosts = get_ipv6_addr(dynamic_only=False) + + if config('vip'): + vips = config('vip').split() + for vip in vips: + if vip and is_ipv6(vip): + hosts.append(vip) + + kwargs = {'database': database, + 'username': database_user, + 'hostname': json.dumps(hosts)} + + if relation_prefix: + for key in list(kwargs.keys()): + kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] + del kwargs[key] + + for rid in relation_ids('shared-db'): + relation_set(relation_id=rid, **kwargs) + + +def os_requires_version(ostack_release, pkg): + """ + Decorator for hook to specify minimum supported release + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args): + if os_release(pkg) < ostack_release: + raise Exception("This hook is not supported on releases" + " before %s" % ostack_release) + f(*args) + return wrapped_f + return wrap + + +def git_install_requested(): + """ + Returns true if openstack-origin-git is specified. + """ + return config('openstack-origin-git') is not None + + +requirements_dir = None + + +def _git_yaml_load(projects_yaml): + """ + Load the specified yaml into a dictionary. + """ + if not projects_yaml: + return None + + return yaml.load(projects_yaml) + + +def git_clone_and_install(projects_yaml, core_project): + """ + Clone/install all specified OpenStack repositories. + + The expected format of projects_yaml is: + + repositories: + - {name: keystone, + repository: 'git://git.openstack.org/openstack/keystone.git', + branch: 'stable/icehouse'} + - {name: requirements, + repository: 'git://git.openstack.org/openstack/requirements.git', + branch: 'stable/icehouse'} + + directory: /mnt/openstack-git + http_proxy: squid-proxy-url + https_proxy: squid-proxy-url + + The directory, http_proxy, and https_proxy keys are optional. + + """ + global requirements_dir + parent_dir = '/mnt/openstack-git' + http_proxy = None + + projects = _git_yaml_load(projects_yaml) + _git_validate_projects_yaml(projects, core_project) + + old_environ = dict(os.environ) + + if 'http_proxy' in projects.keys(): + http_proxy = projects['http_proxy'] + os.environ['http_proxy'] = projects['http_proxy'] + if 'https_proxy' in projects.keys(): + os.environ['https_proxy'] = projects['https_proxy'] + + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + + pip_create_virtualenv(os.path.join(parent_dir, 'venv')) + + # Upgrade setuptools and pip from default virtualenv versions. The default + # versions in trusty break master OpenStack branch deployments. + for p in ['pip', 'setuptools']: + pip_install(p, upgrade=True, proxy=http_proxy, + venv=os.path.join(parent_dir, 'venv')) + + for p in projects['repositories']: + repo = p['repository'] + branch = p['branch'] + depth = '1' + if 'depth' in p.keys(): + depth = p['depth'] + if p['name'] == 'requirements': + repo_dir = _git_clone_and_install_single(repo, branch, depth, + parent_dir, http_proxy, + update_requirements=False) + requirements_dir = repo_dir + else: + repo_dir = _git_clone_and_install_single(repo, branch, depth, + parent_dir, http_proxy, + update_requirements=True) + + os.environ = old_environ + + +def _git_validate_projects_yaml(projects, core_project): + """ + Validate the projects yaml. + """ + _git_ensure_key_exists('repositories', projects) + + for project in projects['repositories']: + _git_ensure_key_exists('name', project.keys()) + _git_ensure_key_exists('repository', project.keys()) + _git_ensure_key_exists('branch', project.keys()) + + if projects['repositories'][0]['name'] != 'requirements': + error_out('{} git repo must be specified first'.format('requirements')) + + if projects['repositories'][-1]['name'] != core_project: + error_out('{} git repo must be specified last'.format(core_project)) + + +def _git_ensure_key_exists(key, keys): + """ + Ensure that key exists in keys. + """ + if key not in keys: + error_out('openstack-origin-git key \'{}\' is missing'.format(key)) + + +def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, + update_requirements): + """ + Clone and install a single git repository. + """ + if not os.path.exists(parent_dir): + juju_log('Directory already exists at {}. ' + 'No need to create directory.'.format(parent_dir)) + os.mkdir(parent_dir) + + juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) + repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth) + + venv = os.path.join(parent_dir, 'venv') + + if update_requirements: + if not requirements_dir: + error_out('requirements repo must be cloned before ' + 'updating from global requirements.') + _git_update_requirements(venv, repo_dir, requirements_dir) + + juju_log('Installing git repo from dir: {}'.format(repo_dir)) + if http_proxy: + pip_install(repo_dir, proxy=http_proxy, venv=venv) + else: + pip_install(repo_dir, venv=venv) + + return repo_dir + + +def _git_update_requirements(venv, package_dir, reqs_dir): + """ + Update from global requirements. + + Update an OpenStack git directory's requirements.txt and + test-requirements.txt from global-requirements.txt. + """ + orig_dir = os.getcwd() + os.chdir(reqs_dir) + python = os.path.join(venv, 'bin/python') + cmd = [python, 'update.py', package_dir] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + package = os.path.basename(package_dir) + error_out("Error updating {} from " + "global-requirements.txt".format(package)) + os.chdir(orig_dir) + + +def git_pip_venv_dir(projects_yaml): + """ + Return the pip virtualenv path. + """ + parent_dir = '/mnt/openstack-git' + + projects = _git_yaml_load(projects_yaml) + + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + + return os.path.join(parent_dir, 'venv') + + +def git_src_dir(projects_yaml, project): + """ + Return the directory where the specified project's source is located. + """ + parent_dir = '/mnt/openstack-git' + + projects = _git_yaml_load(projects_yaml) + + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + + for p in projects['repositories']: + if p['name'] == project: + return os.path.join(parent_dir, os.path.basename(p['repository'])) + + return None + + +def git_yaml_value(projects_yaml, key): + """ + Return the value in projects_yaml for the specified key. + """ + projects = _git_yaml_load(projects_yaml) + + if key in projects.keys(): + return projects[key] + + return None + + +def os_workload_status(configs, required_interfaces, charm_func=None): + """ + Decorator to set workload status based on complete contexts + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args, **kwargs): + # Run the original function first + f(*args, **kwargs) + # Set workload status now that contexts have been + # acted on + set_os_workload_status(configs, required_interfaces, charm_func) + return wrapped_f + return wrap + + +def set_os_workload_status(configs, required_interfaces, charm_func=None): + """ + Set workload status based on complete contexts. + status-set missing or incomplete contexts + and juju-log details of missing required data. + charm_func is a charm specific function to run checking + for charm specific requirements such as a VIP setting. + """ + incomplete_rel_data = incomplete_relation_data(configs, required_interfaces) + state = 'active' + missing_relations = [] + incomplete_relations = [] + message = None + charm_state = None + charm_message = None + + for generic_interface in incomplete_rel_data.keys(): + related_interface = None + missing_data = {} + # Related or not? + for interface in incomplete_rel_data[generic_interface]: + if incomplete_rel_data[generic_interface][interface].get('related'): + related_interface = interface + missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data') + # No relation ID for the generic_interface + if not related_interface: + juju_log("{} relation is missing and must be related for " + "functionality. ".format(generic_interface), 'WARN') + state = 'blocked' + if generic_interface not in missing_relations: + missing_relations.append(generic_interface) + else: + # Relation ID exists but no related unit + if not missing_data: + # Edge case relation ID exists but departing + if ('departed' in hook_name() or 'broken' in hook_name()) \ + and related_interface in hook_name(): + state = 'blocked' + if generic_interface not in missing_relations: + missing_relations.append(generic_interface) + juju_log("{} relation's interface, {}, " + "relationship is departed or broken " + "and is required for functionality." + "".format(generic_interface, related_interface), "WARN") + # Normal case relation ID exists but no related unit + # (joining) + else: + juju_log("{} relations's interface, {}, is related but has " + "no units in the relation." + "".format(generic_interface, related_interface), "INFO") + # Related unit exists and data missing on the relation + else: + juju_log("{} relation's interface, {}, is related awaiting " + "the following data from the relationship: {}. " + "".format(generic_interface, related_interface, + ", ".join(missing_data)), "INFO") + if state != 'blocked': + state = 'waiting' + if generic_interface not in incomplete_relations \ + and generic_interface not in missing_relations: + incomplete_relations.append(generic_interface) + + if missing_relations: + message = "Missing relations: {}".format(", ".join(missing_relations)) + if incomplete_relations: + message += "; incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'blocked' + elif incomplete_relations: + message = "Incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'waiting' + + # Run charm specific checks + if charm_func: + charm_state, charm_message = charm_func(configs) + if charm_state != 'active' and charm_state != 'unknown': + state = workload_state_compare(state, charm_state) + if message: + charm_message = charm_message.replace("Incomplete relations: ", + "") + message = "{}, {}".format(message, charm_message) + else: + message = charm_message + + # Set to active if all requirements have been met + if state == 'active': + message = "Unit is ready" + juju_log(message, "INFO") + + status_set(state, message) + + +def workload_state_compare(current_workload_state, workload_state): + """ Return highest priority of two states""" + hierarchy = {'unknown': -1, + 'active': 0, + 'maintenance': 1, + 'waiting': 2, + 'blocked': 3, + } + + if hierarchy.get(workload_state) is None: + workload_state = 'unknown' + if hierarchy.get(current_workload_state) is None: + current_workload_state = 'unknown' + + # Set workload_state based on hierarchy of statuses + if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): + return current_workload_state + else: + return workload_state + + +def incomplete_relation_data(configs, required_interfaces): + """ + Check complete contexts against required_interfaces + Return dictionary of incomplete relation data. + + configs is an OSConfigRenderer object with configs registered + + required_interfaces is a dictionary of required general interfaces + with dictionary values of possible specific interfaces. + Example: + required_interfaces = {'database': ['shared-db', 'pgsql-db']} + + The interface is said to be satisfied if anyone of the interfaces in the + list has a complete context. + + Return dictionary of incomplete or missing required contexts with relation + status of interfaces and any missing data points. Example: + {'message': + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}}, + 'identity': + {'identity-service': {'related': False}}, + 'database': + {'pgsql-db': {'related': False}, + 'shared-db': {'related': True}}} + """ + complete_ctxts = configs.complete_contexts() + incomplete_relations = [] + for svc_type in required_interfaces.keys(): + # Avoid duplicates + found_ctxt = False + for interface in required_interfaces[svc_type]: + if interface in complete_ctxts: + found_ctxt = True + if not found_ctxt: + incomplete_relations.append(svc_type) + incomplete_context_data = {} + for i in incomplete_relations: + incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i]) + return incomplete_context_data + + +def do_action_openstack_upgrade(package, upgrade_callback, configs): + """Perform action-managed OpenStack upgrade. + + Upgrades packages to the configured openstack-origin version and sets + the corresponding action status as a result. + + If the charm was installed from source we cannot upgrade it. + For backwards compatibility a config flag (action-managed-upgrade) must + be set for this code to run, otherwise a full service level upgrade will + fire on config-changed. + + @param package: package name for determining if upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if git_install_requested(): + action_set({'outcome': 'installed from source, skipped upgrade.'}) + else: + if openstack_upgrade_available(package): + if config('action-managed-upgrade'): + juju_log('Upgrading OpenStack release') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed.'}) + ret = True + except: + action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('do_openstack_upgrade resulted in an ' + 'unexpected error') + else: + action_set({'outcome': 'action-managed-upgrade config is ' + 'False, skipped upgrade.'}) + else: + action_set({'outcome': 'no upgrade available.'}) + + return ret + + +def remote_restart(rel_name, remote_service=None): + trigger = { + 'restart-trigger': str(uuid.uuid4()), + } + if remote_service: + trigger['remote-service'] = remote_service + for rid in relation_ids(rel_name): + # This subordinate can be related to two seperate services using + # different subordinate relations so only issue the restart if + # the principle is conencted down the relation we think it is + if related_units(relid=rid): + relation_set(relation_id=rid, + relation_settings=trigger, + ) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index c5fd81ca..710fdab9 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -72,7 +72,9 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): stopped = service_stop(service_name) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if os.path.exists(upstart_file): + if init_is_systemd(): + service('disable', service_name) + elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) with open(override_path, 'w') as fh: @@ -80,9 +82,9 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): elif os.path.exists(sysv_file): subprocess.check_call(["update-rc.d", service_name, "disable"]) else: - # XXX: Support SystemD too raise ValueError( - "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( service_name, upstart_file, sysv_file)) return stopped @@ -94,7 +96,9 @@ def service_resume(service_name, init_dir="/etc/init", Reenable starting again at boot. Start the service""" upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if os.path.exists(upstart_file): + if init_is_systemd(): + service('enable', service_name) + elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) if os.path.exists(override_path): @@ -102,9 +106,9 @@ def service_resume(service_name, init_dir="/etc/init", elif os.path.exists(sysv_file): subprocess.check_call(["update-rc.d", service_name, "enable"]) else: - # XXX: Support SystemD too raise ValueError( - "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( service_name, upstart_file, sysv_file)) started = service_running(service_name) @@ -115,23 +119,29 @@ def service_resume(service_name, init_dir="/etc/init", def service(action, service_name): """Control a system service""" - cmd = ['service', service_name, action] + if init_is_systemd(): + cmd = ['systemctl', action, service_name] + else: + cmd = ['service', service_name, action] return subprocess.call(cmd) == 0 -def service_running(service): +def service_running(service_name): """Determine whether a system service is running""" - try: - output = subprocess.check_output( - ['service', service, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False + if init_is_systemd(): + return service('is-active', service_name) else: - if ("start/running" in output or "is running" in output): - return True - else: + try: + output = subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False def service_available(service_name): @@ -146,6 +156,13 @@ def service_available(service_name): return True +SYSTEMD_SYSTEM = '/run/systemd/system' + + +def init_is_systemd(): + return os.path.isdir(SYSTEMD_SYSTEM) + + def adduser(username, password=None, shell='/bin/bash', system_user=False, primary_group=None, secondary_groups=None): """ diff --git a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py index bbf89d5c..9ad8dc60 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py @@ -22,7 +22,6 @@ filter_installed_packages, apt_install, ) -from charmhelpers.core.host import mkdir if filter_installed_packages(['git']) != []: apt_install(['git']) @@ -50,8 +49,8 @@ def clone(self, source, dest, branch="master", depth=None): cmd = ['git', '-C', dest, 'pull', source, branch] else: cmd = ['git', 'clone', source, dest, '--branch', branch] - if depth: - cmd.extend(['--depth', depth]) + if depth: + cmd.extend(['--depth', depth]) check_call(cmd) def install(self, source, branch="master", dest=None, depth=None): @@ -62,8 +61,6 @@ def install(self, source, branch="master", dest=None, depth=None): else: dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch, depth) except OSError as e: From b7578260edc8c37617dc30e810e9cfc50021e85c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 8 Jan 2016 14:02:43 -0500 Subject: [PATCH 0928/2699] charmhelpers sync --- .../charmhelpers/contrib/openstack/utils.py | 1011 +++++++++++++++++ ceph-mon/hooks/charmhelpers/core/host.py | 51 +- ceph-mon/hooks/charmhelpers/fetch/giturl.py | 7 +- 3 files changed, 1047 insertions(+), 22 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py new file mode 100644 index 00000000..2af4476d --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -0,0 +1,1011 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# Common python helper functions used for OpenStack charms. +from collections import OrderedDict +from functools import wraps + +import subprocess +import json +import os +import sys +import re + +import six +import traceback +import uuid +import yaml + +from charmhelpers.contrib.network import ip + +from charmhelpers.core import ( + unitdata, +) + +from charmhelpers.core.hookenv import ( + action_fail, + action_set, + config, + log as juju_log, + charm_dir, + INFO, + related_units, + relation_ids, + relation_set, + status_set, + hook_name +) + +from charmhelpers.contrib.storage.linux.lvm import ( + deactivate_lvm_volume_group, + is_lvm_physical_volume, + remove_lvm_physical_volume, +) + +from charmhelpers.contrib.network.ip import ( + get_ipv6_addr, + is_ipv6, +) + +from charmhelpers.contrib.python.packages import ( + pip_create_virtualenv, + pip_install, +) + +from charmhelpers.core.host import lsb_release, mounts, umount +from charmhelpers.fetch import apt_install, apt_cache, install_remote +from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk +from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device + +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' + +DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' + 'restricted main multiverse universe') + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ('wily', 'liberty'), + ('xenial', 'mitaka'), +]) + + +OPENSTACK_CODENAMES = OrderedDict([ + ('2011.2', 'diablo'), + ('2012.1', 'essex'), + ('2012.2', 'folsom'), + ('2013.1', 'grizzly'), + ('2013.2', 'havana'), + ('2014.1', 'icehouse'), + ('2014.2', 'juno'), + ('2015.1', 'kilo'), + ('2015.2', 'liberty'), + ('2016.1', 'mitaka'), +]) + +# The ugly duckling +SWIFT_CODENAMES = OrderedDict([ + ('1.4.3', 'diablo'), + ('1.4.8', 'essex'), + ('1.7.4', 'folsom'), + ('1.8.0', 'grizzly'), + ('1.7.7', 'grizzly'), + ('1.7.6', 'grizzly'), + ('1.10.0', 'havana'), + ('1.9.1', 'havana'), + ('1.9.0', 'havana'), + ('1.13.1', 'icehouse'), + ('1.13.0', 'icehouse'), + ('1.12.0', 'icehouse'), + ('1.11.0', 'icehouse'), + ('2.0.0', 'juno'), + ('2.1.0', 'juno'), + ('2.2.0', 'juno'), + ('2.2.1', 'kilo'), + ('2.2.2', 'kilo'), + ('2.3.0', 'liberty'), + ('2.4.0', 'liberty'), + ('2.5.0', 'liberty'), +]) + +# >= Liberty version->codename mapping +PACKAGE_CODENAMES = { + 'nova-common': OrderedDict([ + ('12.0', 'liberty'), + ('13.0', 'mitaka'), + ]), + 'neutron-common': OrderedDict([ + ('7.0', 'liberty'), + ('8.0', 'mitaka'), + ]), + 'cinder-common': OrderedDict([ + ('7.0', 'liberty'), + ('8.0', 'mitaka'), + ]), + 'keystone': OrderedDict([ + ('8.0', 'liberty'), + ('9.0', 'mitaka'), + ]), + 'horizon-common': OrderedDict([ + ('8.0', 'liberty'), + ('9.0', 'mitaka'), + ]), + 'ceilometer-common': OrderedDict([ + ('5.0', 'liberty'), + ('6.0', 'mitaka'), + ]), + 'heat-common': OrderedDict([ + ('5.0', 'liberty'), + ('6.0', 'mitaka'), + ]), + 'glance-common': OrderedDict([ + ('11.0', 'liberty'), + ('12.0', 'mitaka'), + ]), + 'openstack-dashboard': OrderedDict([ + ('8.0', 'liberty'), + ('9.0', 'mitaka'), + ]), +} + +DEFAULT_LOOPBACK_SIZE = '5G' + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + + +def get_os_codename_install_source(src): + '''Derive OpenStack release codename from a given installation source.''' + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = '' + if src is None: + return rel + if src in ['distro', 'distro-proposed']: + try: + rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] + except KeyError: + e = 'Could not derive openstack release for '\ + 'this Ubuntu release: %s' % ubuntu_rel + error_out(e) + return rel + + if src.startswith('cloud:'): + ca_rel = src.split(':')[1] + ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + return ca_rel + + # Best guess match based on deb string provided + if src.startswith('deb') or src.startswith('ppa'): + for k, v in six.iteritems(OPENSTACK_CODENAMES): + if v in src: + return v + + +def get_os_version_install_source(src): + codename = get_os_codename_install_source(src) + return get_os_version_codename(codename) + + +def get_os_codename_version(vers): + '''Determine OpenStack codename from version number.''' + try: + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): + '''Determine OpenStack version number from codename.''' + for k, v in six.iteritems(version_map): + if v == codename: + return k + e = 'Could not derive OpenStack version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_os_codename_package(package, fatal=True): + '''Derive OpenStack release codename from an installed package.''' + import apt_pkg as apt + + cache = apt_cache() + + try: + pkg = cache[package] + except: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + if 'swift' in pkg.name: + # Fully x.y.z match for swift versions + match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) + else: + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match('^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + + # >= Liberty independent project versions + if (package in PACKAGE_CODENAMES and + vers in PACKAGE_CODENAMES[package]): + return PACKAGE_CODENAMES[package][vers] + else: + # < Liberty co-ordinated project versions + try: + if 'swift' in pkg.name: + return SWIFT_CODENAMES[vers] + else: + return OPENSTACK_CODENAMES[vers] + except KeyError: + if not fatal: + return None + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_package(pkg, fatal=True): + '''Derive OpenStack version number from an installed package.''' + codename = get_os_codename_package(pkg, fatal=fatal) + + if not codename: + return None + + if 'swift' in pkg: + vers_map = SWIFT_CODENAMES + else: + vers_map = OPENSTACK_CODENAMES + + for version, cname in six.iteritems(vers_map): + if cname == codename: + return version + # e = "Could not determine OpenStack version for package: %s" % pkg + # error_out(e) + + +os_rel = None + + +def os_release(package, base='essex'): + ''' + Returns OpenStack release codename from a cached global. + If the codename can not be determined from either an installed package or + the installation source, the earliest release supported by the charm should + be returned. + ''' + global os_rel + if os_rel: + return os_rel + os_rel = (get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config('openstack-origin')) or + base) + return os_rel + + +def import_key(keyid): + cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ + "--recv-keys %s" % keyid + try: + subprocess.check_call(cmd.split(' ')) + except subprocess.CalledProcessError: + error_out("Error importing repo key %s" % keyid) + + +def configure_installation_source(rel): + '''Configure apt installation source.''' + if rel == 'distro': + return + elif rel == 'distro-proposed': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(DISTRO_PROPOSED % ubuntu_rel) + elif rel[:4] == "ppa:": + src = rel + subprocess.check_call(["add-apt-repository", "-y", src]) + elif rel[:3] == "deb": + l = len(rel.split('|')) + if l == 2: + src, key = rel.split('|') + juju_log("Importing PPA key from keyserver for %s" % src) + import_key(key) + elif l == 1: + src = rel + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(src) + elif rel[:6] == 'cloud:': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = rel.split(':')[1] + u_rel = rel.split('-')[0] + ca_rel = rel.split('-')[1] + + if u_rel != ubuntu_rel: + e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ + 'version (%s)' % (ca_rel, ubuntu_rel) + error_out(e) + + if 'staging' in ca_rel: + # staging is just a regular PPA. + os_rel = ca_rel.split('/')[0] + ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel + cmd = 'add-apt-repository -y %s' % ppa + subprocess.check_call(cmd.split(' ')) + return + + # map charm config options to actual archive pockets. + pockets = { + 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'icehouse': 'precise-updates/icehouse', + 'icehouse/updates': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'juno': 'trusty-updates/juno', + 'juno/updates': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'kilo': 'trusty-updates/kilo', + 'kilo/updates': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'liberty': 'trusty-updates/liberty', + 'liberty/updates': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'mitaka': 'trusty-updates/mitaka', + 'mitaka/updates': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + } + + try: + pocket = pockets[ca_rel] + except KeyError: + e = 'Invalid Cloud Archive release specified: %s' % rel + error_out(e) + + src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) + apt_install('ubuntu-cloud-keyring', fatal=True) + + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: + f.write(src) + else: + error_out("Invalid openstack-release specified: %s" % rel) + + +def config_value_changed(option): + """ + Determine if config value changed since last call to this function. + """ + hook_data = unitdata.HookData() + with hook_data(): + db = unitdata.kv() + current = config(option) + saved = db.get(option) + db.set(option, current) + if saved is None: + return False + return current != saved + + +def save_script_rc(script_path="scripts/scriptrc", **env_vars): + """ + Write an rc file in the charm-delivered directory containing + exported environment variables provided by env_vars. Any charm scripts run + outside the juju hook environment can source this scriptrc to obtain + updated config information necessary to perform health checks or + service changes. + """ + juju_rc_path = "%s/%s" % (charm_dir(), script_path) + if not os.path.exists(os.path.dirname(juju_rc_path)): + os.mkdir(os.path.dirname(juju_rc_path)) + with open(juju_rc_path, 'wb') as rc_script: + rc_script.write( + "#!/bin/bash\n") + [rc_script.write('export %s=%s\n' % (u, p)) + for u, p in six.iteritems(env_vars) if u != "script_path"] + + +def openstack_upgrade_available(package): + """ + Determines if an OpenStack upgrade is available from installation + source, based on version of installed package. + + :param package: str: Name of installed package. + + :returns: bool: : Returns True if configured installation source offers + a newer version of package. + + """ + + import apt_pkg as apt + src = config('openstack-origin') + cur_vers = get_os_version_package(package) + if "swift" in package: + codename = get_os_codename_install_source(src) + available_vers = get_os_version_codename(codename, SWIFT_CODENAMES) + else: + available_vers = get_os_version_install_source(src) + apt.init() + return apt.version_compare(available_vers, cur_vers) == 1 + + +def ensure_block_device(block_device): + ''' + Confirm block_device, create as loopback if necessary. + + :param block_device: str: Full path of block device to ensure. + + :returns: str: Full path of ensured block device. + ''' + _none = ['None', 'none', None] + if (block_device in _none): + error_out('prepare_storage(): Missing required input: block_device=%s.' + % block_device) + + if block_device.startswith('/dev/'): + bdev = block_device + elif block_device.startswith('/'): + _bd = block_device.split('|') + if len(_bd) == 2: + bdev, size = _bd + else: + bdev = block_device + size = DEFAULT_LOOPBACK_SIZE + bdev = ensure_loopback_device(bdev, size) + else: + bdev = '/dev/%s' % block_device + + if not is_block_device(bdev): + error_out('Failed to locate valid block device at %s' % bdev) + + return bdev + + +def clean_storage(block_device): + ''' + Ensures a block device is clean. That is: + - unmounted + - any lvm volume groups are deactivated + - any lvm physical device signatures removed + - partition table wiped + + :param block_device: str: Full path to block device to clean. + ''' + for mp, d in mounts(): + if d == block_device: + juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % + (d, mp), level=INFO) + umount(mp, persist=True) + + if is_lvm_physical_volume(block_device): + deactivate_lvm_volume_group(block_device) + remove_lvm_physical_volume(block_device) + else: + zap_disk(block_device) + +is_ip = ip.is_ip +ns_query = ip.ns_query +get_host_ip = ip.get_host_ip +get_hostname = ip.get_hostname + + +def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): + mm_map = {} + if os.path.isfile(mm_file): + with open(mm_file, 'r') as f: + mm_map = json.load(f) + return mm_map + + +def sync_db_with_multi_ipv6_addresses(database, database_user, + relation_prefix=None): + hosts = get_ipv6_addr(dynamic_only=False) + + if config('vip'): + vips = config('vip').split() + for vip in vips: + if vip and is_ipv6(vip): + hosts.append(vip) + + kwargs = {'database': database, + 'username': database_user, + 'hostname': json.dumps(hosts)} + + if relation_prefix: + for key in list(kwargs.keys()): + kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] + del kwargs[key] + + for rid in relation_ids('shared-db'): + relation_set(relation_id=rid, **kwargs) + + +def os_requires_version(ostack_release, pkg): + """ + Decorator for hook to specify minimum supported release + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args): + if os_release(pkg) < ostack_release: + raise Exception("This hook is not supported on releases" + " before %s" % ostack_release) + f(*args) + return wrapped_f + return wrap + + +def git_install_requested(): + """ + Returns true if openstack-origin-git is specified. + """ + return config('openstack-origin-git') is not None + + +requirements_dir = None + + +def _git_yaml_load(projects_yaml): + """ + Load the specified yaml into a dictionary. + """ + if not projects_yaml: + return None + + return yaml.load(projects_yaml) + + +def git_clone_and_install(projects_yaml, core_project): + """ + Clone/install all specified OpenStack repositories. + + The expected format of projects_yaml is: + + repositories: + - {name: keystone, + repository: 'git://git.openstack.org/openstack/keystone.git', + branch: 'stable/icehouse'} + - {name: requirements, + repository: 'git://git.openstack.org/openstack/requirements.git', + branch: 'stable/icehouse'} + + directory: /mnt/openstack-git + http_proxy: squid-proxy-url + https_proxy: squid-proxy-url + + The directory, http_proxy, and https_proxy keys are optional. + + """ + global requirements_dir + parent_dir = '/mnt/openstack-git' + http_proxy = None + + projects = _git_yaml_load(projects_yaml) + _git_validate_projects_yaml(projects, core_project) + + old_environ = dict(os.environ) + + if 'http_proxy' in projects.keys(): + http_proxy = projects['http_proxy'] + os.environ['http_proxy'] = projects['http_proxy'] + if 'https_proxy' in projects.keys(): + os.environ['https_proxy'] = projects['https_proxy'] + + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + + pip_create_virtualenv(os.path.join(parent_dir, 'venv')) + + # Upgrade setuptools and pip from default virtualenv versions. The default + # versions in trusty break master OpenStack branch deployments. + for p in ['pip', 'setuptools']: + pip_install(p, upgrade=True, proxy=http_proxy, + venv=os.path.join(parent_dir, 'venv')) + + for p in projects['repositories']: + repo = p['repository'] + branch = p['branch'] + depth = '1' + if 'depth' in p.keys(): + depth = p['depth'] + if p['name'] == 'requirements': + repo_dir = _git_clone_and_install_single(repo, branch, depth, + parent_dir, http_proxy, + update_requirements=False) + requirements_dir = repo_dir + else: + repo_dir = _git_clone_and_install_single(repo, branch, depth, + parent_dir, http_proxy, + update_requirements=True) + + os.environ = old_environ + + +def _git_validate_projects_yaml(projects, core_project): + """ + Validate the projects yaml. + """ + _git_ensure_key_exists('repositories', projects) + + for project in projects['repositories']: + _git_ensure_key_exists('name', project.keys()) + _git_ensure_key_exists('repository', project.keys()) + _git_ensure_key_exists('branch', project.keys()) + + if projects['repositories'][0]['name'] != 'requirements': + error_out('{} git repo must be specified first'.format('requirements')) + + if projects['repositories'][-1]['name'] != core_project: + error_out('{} git repo must be specified last'.format(core_project)) + + +def _git_ensure_key_exists(key, keys): + """ + Ensure that key exists in keys. + """ + if key not in keys: + error_out('openstack-origin-git key \'{}\' is missing'.format(key)) + + +def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, + update_requirements): + """ + Clone and install a single git repository. + """ + if not os.path.exists(parent_dir): + juju_log('Directory already exists at {}. ' + 'No need to create directory.'.format(parent_dir)) + os.mkdir(parent_dir) + + juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) + repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth) + + venv = os.path.join(parent_dir, 'venv') + + if update_requirements: + if not requirements_dir: + error_out('requirements repo must be cloned before ' + 'updating from global requirements.') + _git_update_requirements(venv, repo_dir, requirements_dir) + + juju_log('Installing git repo from dir: {}'.format(repo_dir)) + if http_proxy: + pip_install(repo_dir, proxy=http_proxy, venv=venv) + else: + pip_install(repo_dir, venv=venv) + + return repo_dir + + +def _git_update_requirements(venv, package_dir, reqs_dir): + """ + Update from global requirements. + + Update an OpenStack git directory's requirements.txt and + test-requirements.txt from global-requirements.txt. + """ + orig_dir = os.getcwd() + os.chdir(reqs_dir) + python = os.path.join(venv, 'bin/python') + cmd = [python, 'update.py', package_dir] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + package = os.path.basename(package_dir) + error_out("Error updating {} from " + "global-requirements.txt".format(package)) + os.chdir(orig_dir) + + +def git_pip_venv_dir(projects_yaml): + """ + Return the pip virtualenv path. + """ + parent_dir = '/mnt/openstack-git' + + projects = _git_yaml_load(projects_yaml) + + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + + return os.path.join(parent_dir, 'venv') + + +def git_src_dir(projects_yaml, project): + """ + Return the directory where the specified project's source is located. + """ + parent_dir = '/mnt/openstack-git' + + projects = _git_yaml_load(projects_yaml) + + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + + for p in projects['repositories']: + if p['name'] == project: + return os.path.join(parent_dir, os.path.basename(p['repository'])) + + return None + + +def git_yaml_value(projects_yaml, key): + """ + Return the value in projects_yaml for the specified key. + """ + projects = _git_yaml_load(projects_yaml) + + if key in projects.keys(): + return projects[key] + + return None + + +def os_workload_status(configs, required_interfaces, charm_func=None): + """ + Decorator to set workload status based on complete contexts + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args, **kwargs): + # Run the original function first + f(*args, **kwargs) + # Set workload status now that contexts have been + # acted on + set_os_workload_status(configs, required_interfaces, charm_func) + return wrapped_f + return wrap + + +def set_os_workload_status(configs, required_interfaces, charm_func=None): + """ + Set workload status based on complete contexts. + status-set missing or incomplete contexts + and juju-log details of missing required data. + charm_func is a charm specific function to run checking + for charm specific requirements such as a VIP setting. + """ + incomplete_rel_data = incomplete_relation_data(configs, required_interfaces) + state = 'active' + missing_relations = [] + incomplete_relations = [] + message = None + charm_state = None + charm_message = None + + for generic_interface in incomplete_rel_data.keys(): + related_interface = None + missing_data = {} + # Related or not? + for interface in incomplete_rel_data[generic_interface]: + if incomplete_rel_data[generic_interface][interface].get('related'): + related_interface = interface + missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data') + # No relation ID for the generic_interface + if not related_interface: + juju_log("{} relation is missing and must be related for " + "functionality. ".format(generic_interface), 'WARN') + state = 'blocked' + if generic_interface not in missing_relations: + missing_relations.append(generic_interface) + else: + # Relation ID exists but no related unit + if not missing_data: + # Edge case relation ID exists but departing + if ('departed' in hook_name() or 'broken' in hook_name()) \ + and related_interface in hook_name(): + state = 'blocked' + if generic_interface not in missing_relations: + missing_relations.append(generic_interface) + juju_log("{} relation's interface, {}, " + "relationship is departed or broken " + "and is required for functionality." + "".format(generic_interface, related_interface), "WARN") + # Normal case relation ID exists but no related unit + # (joining) + else: + juju_log("{} relations's interface, {}, is related but has " + "no units in the relation." + "".format(generic_interface, related_interface), "INFO") + # Related unit exists and data missing on the relation + else: + juju_log("{} relation's interface, {}, is related awaiting " + "the following data from the relationship: {}. " + "".format(generic_interface, related_interface, + ", ".join(missing_data)), "INFO") + if state != 'blocked': + state = 'waiting' + if generic_interface not in incomplete_relations \ + and generic_interface not in missing_relations: + incomplete_relations.append(generic_interface) + + if missing_relations: + message = "Missing relations: {}".format(", ".join(missing_relations)) + if incomplete_relations: + message += "; incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'blocked' + elif incomplete_relations: + message = "Incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'waiting' + + # Run charm specific checks + if charm_func: + charm_state, charm_message = charm_func(configs) + if charm_state != 'active' and charm_state != 'unknown': + state = workload_state_compare(state, charm_state) + if message: + charm_message = charm_message.replace("Incomplete relations: ", + "") + message = "{}, {}".format(message, charm_message) + else: + message = charm_message + + # Set to active if all requirements have been met + if state == 'active': + message = "Unit is ready" + juju_log(message, "INFO") + + status_set(state, message) + + +def workload_state_compare(current_workload_state, workload_state): + """ Return highest priority of two states""" + hierarchy = {'unknown': -1, + 'active': 0, + 'maintenance': 1, + 'waiting': 2, + 'blocked': 3, + } + + if hierarchy.get(workload_state) is None: + workload_state = 'unknown' + if hierarchy.get(current_workload_state) is None: + current_workload_state = 'unknown' + + # Set workload_state based on hierarchy of statuses + if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): + return current_workload_state + else: + return workload_state + + +def incomplete_relation_data(configs, required_interfaces): + """ + Check complete contexts against required_interfaces + Return dictionary of incomplete relation data. + + configs is an OSConfigRenderer object with configs registered + + required_interfaces is a dictionary of required general interfaces + with dictionary values of possible specific interfaces. + Example: + required_interfaces = {'database': ['shared-db', 'pgsql-db']} + + The interface is said to be satisfied if anyone of the interfaces in the + list has a complete context. + + Return dictionary of incomplete or missing required contexts with relation + status of interfaces and any missing data points. Example: + {'message': + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}}, + 'identity': + {'identity-service': {'related': False}}, + 'database': + {'pgsql-db': {'related': False}, + 'shared-db': {'related': True}}} + """ + complete_ctxts = configs.complete_contexts() + incomplete_relations = [] + for svc_type in required_interfaces.keys(): + # Avoid duplicates + found_ctxt = False + for interface in required_interfaces[svc_type]: + if interface in complete_ctxts: + found_ctxt = True + if not found_ctxt: + incomplete_relations.append(svc_type) + incomplete_context_data = {} + for i in incomplete_relations: + incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i]) + return incomplete_context_data + + +def do_action_openstack_upgrade(package, upgrade_callback, configs): + """Perform action-managed OpenStack upgrade. + + Upgrades packages to the configured openstack-origin version and sets + the corresponding action status as a result. + + If the charm was installed from source we cannot upgrade it. + For backwards compatibility a config flag (action-managed-upgrade) must + be set for this code to run, otherwise a full service level upgrade will + fire on config-changed. + + @param package: package name for determining if upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if git_install_requested(): + action_set({'outcome': 'installed from source, skipped upgrade.'}) + else: + if openstack_upgrade_available(package): + if config('action-managed-upgrade'): + juju_log('Upgrading OpenStack release') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed.'}) + ret = True + except: + action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('do_openstack_upgrade resulted in an ' + 'unexpected error') + else: + action_set({'outcome': 'action-managed-upgrade config is ' + 'False, skipped upgrade.'}) + else: + action_set({'outcome': 'no upgrade available.'}) + + return ret + + +def remote_restart(rel_name, remote_service=None): + trigger = { + 'restart-trigger': str(uuid.uuid4()), + } + if remote_service: + trigger['remote-service'] = remote_service + for rid in relation_ids(rel_name): + # This subordinate can be related to two seperate services using + # different subordinate relations so only issue the restart if + # the principle is conencted down the relation we think it is + if related_units(relid=rid): + relation_set(relation_id=rid, + relation_settings=trigger, + ) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index c5fd81ca..710fdab9 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -72,7 +72,9 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): stopped = service_stop(service_name) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if os.path.exists(upstart_file): + if init_is_systemd(): + service('disable', service_name) + elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) with open(override_path, 'w') as fh: @@ -80,9 +82,9 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): elif os.path.exists(sysv_file): subprocess.check_call(["update-rc.d", service_name, "disable"]) else: - # XXX: Support SystemD too raise ValueError( - "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( service_name, upstart_file, sysv_file)) return stopped @@ -94,7 +96,9 @@ def service_resume(service_name, init_dir="/etc/init", Reenable starting again at boot. Start the service""" upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if os.path.exists(upstart_file): + if init_is_systemd(): + service('enable', service_name) + elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) if os.path.exists(override_path): @@ -102,9 +106,9 @@ def service_resume(service_name, init_dir="/etc/init", elif os.path.exists(sysv_file): subprocess.check_call(["update-rc.d", service_name, "enable"]) else: - # XXX: Support SystemD too raise ValueError( - "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( service_name, upstart_file, sysv_file)) started = service_running(service_name) @@ -115,23 +119,29 @@ def service_resume(service_name, init_dir="/etc/init", def service(action, service_name): """Control a system service""" - cmd = ['service', service_name, action] + if init_is_systemd(): + cmd = ['systemctl', action, service_name] + else: + cmd = ['service', service_name, action] return subprocess.call(cmd) == 0 -def service_running(service): +def service_running(service_name): """Determine whether a system service is running""" - try: - output = subprocess.check_output( - ['service', service, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False + if init_is_systemd(): + return service('is-active', service_name) else: - if ("start/running" in output or "is running" in output): - return True - else: + try: + output = subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False def service_available(service_name): @@ -146,6 +156,13 @@ def service_available(service_name): return True +SYSTEMD_SYSTEM = '/run/systemd/system' + + +def init_is_systemd(): + return os.path.isdir(SYSTEMD_SYSTEM) + + def adduser(username, password=None, shell='/bin/bash', system_user=False, primary_group=None, secondary_groups=None): """ diff --git a/ceph-mon/hooks/charmhelpers/fetch/giturl.py b/ceph-mon/hooks/charmhelpers/fetch/giturl.py index bbf89d5c..9ad8dc60 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/giturl.py @@ -22,7 +22,6 @@ filter_installed_packages, apt_install, ) -from charmhelpers.core.host import mkdir if filter_installed_packages(['git']) != []: apt_install(['git']) @@ -50,8 +49,8 @@ def clone(self, source, dest, branch="master", depth=None): cmd = ['git', '-C', dest, 'pull', source, branch] else: cmd = ['git', 'clone', source, dest, '--branch', branch] - if depth: - cmd.extend(['--depth', depth]) + if depth: + cmd.extend(['--depth', depth]) check_call(cmd) def install(self, source, branch="master", dest=None, depth=None): @@ -62,8 +61,6 @@ def install(self, source, branch="master", dest=None, depth=None): else: dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch, depth) except OSError as e: From 86ff84b13e291f0cdfda3e3ec154db4a04069db3 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 8 Jan 2016 14:03:51 -0500 Subject: [PATCH 0929/2699] update to ue ceph-disk prepare instead of ceph-disk-prepare --- ceph-proxy/README.md | 2 +- ceph-proxy/charm-helpers-hooks.yaml | 1 + ceph-proxy/files/upstart/ceph-hotplug.conf | 2 +- ceph-proxy/hooks/ceph.py | 9 +++++---- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/ceph-proxy/README.md b/ceph-proxy/README.md index c8c7af76..9fac8245 100644 --- a/ceph-proxy/README.md +++ b/ceph-proxy/README.md @@ -91,7 +91,7 @@ hook to wait for all three nodes to come up, and then write their addresses to ceph.conf in the "mon host" parameter. After we initialize the monitor cluster a quorum forms quickly, and OSD bringup proceeds. -The osds use so-called "OSD hotplugging". **ceph-disk-prepare** is used to +The osds use so-called "OSD hotplugging". **ceph-disk prepare** is used to create the filesystems with a special GPT partition type. *udev* is set up to mount such filesystems and start the osd daemons as their storage becomes visible to the system (or after `udevadm trigger`). diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index eeee6f8c..3177ba1c 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -9,5 +9,6 @@ include: - ceph - payload.execd - contrib.openstack.alternatives + - contrib.openstack.utils - contrib.network.ip - contrib.charmsupport diff --git a/ceph-proxy/files/upstart/ceph-hotplug.conf b/ceph-proxy/files/upstart/ceph-hotplug.conf index 70204529..d82e7c84 100644 --- a/ceph-proxy/files/upstart/ceph-hotplug.conf +++ b/ceph-proxy/files/upstart/ceph-hotplug.conf @@ -8,4 +8,4 @@ stop on runlevel [!2345] task instance $DEVNAME -exec /usr/sbin/ceph-disk-activate --mount -- "$DEVNAME" +exec /usr/sbin/ceph-disk activate --mount -- "$DEVNAME" diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 1d77030a..ff3a7a40 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -131,10 +131,10 @@ def start_osds(devices): # Scan for ceph block devices rescan_osd_devices() if cmp_pkgrevno('ceph', "0.56.6") >= 0: - # Use ceph-disk-activate for directory based OSD's + # Use ceph-disk activate for directory based OSD's for dev_or_path in devices: if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call(['ceph-disk-activate', dev_or_path]) + subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) def rescan_osd_devices(): @@ -367,7 +367,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, return status_set('maintenance', 'Initializing device {}'.format(dev)) - cmd = ['ceph-disk-prepare'] + cmd = ['ceph-disk', 'prepare'] # Later versions of ceph support more options if cmp_pkgrevno('ceph', '0.48.3') >= 0: if osd_format: @@ -407,7 +407,8 @@ def osdize_dir(path): mkdir(path) cmd = [ - 'ceph-disk-prepare', + 'ceph-disk', + 'prepare', '--data-dir', path ] From 1f1bcb2ea6794994b917534e7eaddffa3e167728 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 8 Jan 2016 14:03:51 -0500 Subject: [PATCH 0930/2699] update to ue ceph-disk prepare instead of ceph-disk-prepare --- ceph-mon/README.md | 2 +- ceph-mon/charm-helpers-hooks.yaml | 1 + ceph-mon/files/upstart/ceph-hotplug.conf | 2 +- ceph-mon/hooks/ceph.py | 9 +++++---- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index c8c7af76..9fac8245 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -91,7 +91,7 @@ hook to wait for all three nodes to come up, and then write their addresses to ceph.conf in the "mon host" parameter. After we initialize the monitor cluster a quorum forms quickly, and OSD bringup proceeds. -The osds use so-called "OSD hotplugging". **ceph-disk-prepare** is used to +The osds use so-called "OSD hotplugging". **ceph-disk prepare** is used to create the filesystems with a special GPT partition type. *udev* is set up to mount such filesystems and start the osd daemons as their storage becomes visible to the system (or after `udevadm trigger`). diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index eeee6f8c..3177ba1c 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -9,5 +9,6 @@ include: - ceph - payload.execd - contrib.openstack.alternatives + - contrib.openstack.utils - contrib.network.ip - contrib.charmsupport diff --git a/ceph-mon/files/upstart/ceph-hotplug.conf b/ceph-mon/files/upstart/ceph-hotplug.conf index 70204529..d82e7c84 100644 --- a/ceph-mon/files/upstart/ceph-hotplug.conf +++ b/ceph-mon/files/upstart/ceph-hotplug.conf @@ -8,4 +8,4 @@ stop on runlevel [!2345] task instance $DEVNAME -exec /usr/sbin/ceph-disk-activate --mount -- "$DEVNAME" +exec /usr/sbin/ceph-disk activate --mount -- "$DEVNAME" diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 1d77030a..ff3a7a40 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -131,10 +131,10 @@ def start_osds(devices): # Scan for ceph block devices rescan_osd_devices() if cmp_pkgrevno('ceph', "0.56.6") >= 0: - # Use ceph-disk-activate for directory based OSD's + # Use ceph-disk activate for directory based OSD's for dev_or_path in devices: if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call(['ceph-disk-activate', dev_or_path]) + subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) def rescan_osd_devices(): @@ -367,7 +367,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, return status_set('maintenance', 'Initializing device {}'.format(dev)) - cmd = ['ceph-disk-prepare'] + cmd = ['ceph-disk', 'prepare'] # Later versions of ceph support more options if cmp_pkgrevno('ceph', '0.48.3') >= 0: if osd_format: @@ -407,7 +407,8 @@ def osdize_dir(path): mkdir(path) cmd = [ - 'ceph-disk-prepare', + 'ceph-disk', + 'prepare', '--data-dir', path ] From 6d70d475f4abf31e6c31c2b69e253b1cec179fa6 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:44:24 +0000 Subject: [PATCH 0931/2699] remove amulet tests for unsupported releases --- ceph-proxy/tests/019-basic-vivid-kilo | 9 --------- 1 file changed, 9 deletions(-) delete mode 100755 ceph-proxy/tests/019-basic-vivid-kilo diff --git a/ceph-proxy/tests/019-basic-vivid-kilo b/ceph-proxy/tests/019-basic-vivid-kilo deleted file mode 100755 index 934261b5..00000000 --- a/ceph-proxy/tests/019-basic-vivid-kilo +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on vivid-kilo.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='vivid') - deployment.run_tests() From 4ddecbca42be81449468536cb8fe367f8ee75bb1 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:44:24 +0000 Subject: [PATCH 0932/2699] remove amulet tests for unsupported releases --- ceph-mon/tests/019-basic-vivid-kilo | 9 --------- 1 file changed, 9 deletions(-) delete mode 100755 ceph-mon/tests/019-basic-vivid-kilo diff --git a/ceph-mon/tests/019-basic-vivid-kilo b/ceph-mon/tests/019-basic-vivid-kilo deleted file mode 100755 index 934261b5..00000000 --- a/ceph-mon/tests/019-basic-vivid-kilo +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on vivid-kilo.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='vivid') - deployment.run_tests() From a28aaf16a5261af701b3fd8888aa974851e50dc4 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:44:25 +0000 Subject: [PATCH 0933/2699] remove amulet tests for unsupported releases --- ceph-osd/tests/019-basic-vivid-kilo | 9 --------- 1 file changed, 9 deletions(-) delete mode 100755 ceph-osd/tests/019-basic-vivid-kilo diff --git a/ceph-osd/tests/019-basic-vivid-kilo b/ceph-osd/tests/019-basic-vivid-kilo deleted file mode 100755 index 6fd3f288..00000000 --- a/ceph-osd/tests/019-basic-vivid-kilo +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph-osd deployment on vivid-kilo.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='vivid') - deployment.run_tests() From 3d0650d864f59bade0bcbc15f2ffc72c43b98224 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:44:26 +0000 Subject: [PATCH 0934/2699] remove amulet tests for unsupported releases --- ceph-radosgw/tests/019-basic-vivid-kilo | 9 --------- 1 file changed, 9 deletions(-) delete mode 100755 ceph-radosgw/tests/019-basic-vivid-kilo diff --git a/ceph-radosgw/tests/019-basic-vivid-kilo b/ceph-radosgw/tests/019-basic-vivid-kilo deleted file mode 100755 index 9238de85..00000000 --- a/ceph-radosgw/tests/019-basic-vivid-kilo +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph-radosgw deployment on vivid-kilo.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='vivid') - deployment.run_tests() From 82073fbd5ce07f72d097c04f3cfb13955d78b504 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:44:49 +0000 Subject: [PATCH 0935/2699] Move 00-setup to prevent extra, unnecessary bootstrap in test runs. --- ceph-proxy/Makefile | 1 + ceph-proxy/tests/{ => setup}/00-setup | 0 2 files changed, 1 insertion(+) rename ceph-proxy/tests/{ => setup}/00-setup (100%) diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index 5e54804a..42b6f4a7 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -13,6 +13,7 @@ test: functional_test: @echo Starting Amulet tests... + @tests/setup/00-setup @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 bin/charm_helpers_sync.py: diff --git a/ceph-proxy/tests/00-setup b/ceph-proxy/tests/setup/00-setup similarity index 100% rename from ceph-proxy/tests/00-setup rename to ceph-proxy/tests/setup/00-setup From 33c7b36ca9e07670064509b1048b99fc3a7e35c6 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:44:49 +0000 Subject: [PATCH 0936/2699] Move 00-setup to prevent extra, unnecessary bootstrap in test runs. --- ceph-mon/Makefile | 1 + ceph-mon/tests/{ => setup}/00-setup | 0 2 files changed, 1 insertion(+) rename ceph-mon/tests/{ => setup}/00-setup (100%) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index 5e54804a..42b6f4a7 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -13,6 +13,7 @@ test: functional_test: @echo Starting Amulet tests... + @tests/setup/00-setup @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 bin/charm_helpers_sync.py: diff --git a/ceph-mon/tests/00-setup b/ceph-mon/tests/setup/00-setup similarity index 100% rename from ceph-mon/tests/00-setup rename to ceph-mon/tests/setup/00-setup From d816fff0125fea47abea068b2b110b57549d1aea Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:44:50 +0000 Subject: [PATCH 0937/2699] Move 00-setup to prevent extra, unnecessary bootstrap in test runs. --- ceph-osd/Makefile | 1 + ceph-osd/tests/{ => setup}/00-setup | 0 2 files changed, 1 insertion(+) rename ceph-osd/tests/{ => setup}/00-setup (100%) diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index 306c8444..38a0db81 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -13,6 +13,7 @@ test: functional_test: @echo Starting Amulet tests... + @tests/setup/00-setup @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 bin/charm_helpers_sync.py: diff --git a/ceph-osd/tests/00-setup b/ceph-osd/tests/setup/00-setup similarity index 100% rename from ceph-osd/tests/00-setup rename to ceph-osd/tests/setup/00-setup From 1279dcc7289a4ee9ee545e00713960d26cdb808f Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:44:50 +0000 Subject: [PATCH 0938/2699] Move 00-setup to prevent extra, unnecessary bootstrap in test runs. --- ceph-radosgw/Makefile | 1 + ceph-radosgw/tests/{ => setup}/00-setup | 0 2 files changed, 1 insertion(+) rename ceph-radosgw/tests/{ => setup}/00-setup (100%) diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index 6b3b3430..7ada5682 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -13,6 +13,7 @@ test: functional_test: @echo Starting Amulet tests... + @tests/setup/00-setup @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 bin/charm_helpers_sync.py: diff --git a/ceph-radosgw/tests/00-setup b/ceph-radosgw/tests/setup/00-setup similarity index 100% rename from ceph-radosgw/tests/00-setup rename to ceph-radosgw/tests/setup/00-setup From 8f8247420ddcf0bf209a5809321805ed47a7a76b Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:45:09 +0000 Subject: [PATCH 0939/2699] Re-number amulet test file names; add missing combos as not-executable for now. --- ceph-proxy/tests/018-basic-trusty-liberty | 11 +++++++++++ ceph-proxy/tests/019-basic-trusty-mitaka | 11 +++++++++++ ceph-proxy/tests/020-basic-wily-liberty | 9 +++++++++ ceph-proxy/tests/021-basic-xenial-mitaka | 9 +++++++++ 4 files changed, 40 insertions(+) create mode 100644 ceph-proxy/tests/018-basic-trusty-liberty create mode 100644 ceph-proxy/tests/019-basic-trusty-mitaka create mode 100644 ceph-proxy/tests/020-basic-wily-liberty create mode 100644 ceph-proxy/tests/021-basic-xenial-mitaka diff --git a/ceph-proxy/tests/018-basic-trusty-liberty b/ceph-proxy/tests/018-basic-trusty-liberty new file mode 100644 index 00000000..f339371b --- /dev/null +++ b/ceph-proxy/tests/018-basic-trusty-liberty @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on trusty-liberty.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='trusty', + openstack='cloud:trusty-liberty', + source='cloud:trusty-updates/liberty') + deployment.run_tests() diff --git a/ceph-proxy/tests/019-basic-trusty-mitaka b/ceph-proxy/tests/019-basic-trusty-mitaka new file mode 100644 index 00000000..f339371b --- /dev/null +++ b/ceph-proxy/tests/019-basic-trusty-mitaka @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on trusty-liberty.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='trusty', + openstack='cloud:trusty-liberty', + source='cloud:trusty-updates/liberty') + deployment.run_tests() diff --git a/ceph-proxy/tests/020-basic-wily-liberty b/ceph-proxy/tests/020-basic-wily-liberty new file mode 100644 index 00000000..b0d8096b --- /dev/null +++ b/ceph-proxy/tests/020-basic-wily-liberty @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on wily-liberty.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='wily') + deployment.run_tests() diff --git a/ceph-proxy/tests/021-basic-xenial-mitaka b/ceph-proxy/tests/021-basic-xenial-mitaka new file mode 100644 index 00000000..ae3d3350 --- /dev/null +++ b/ceph-proxy/tests/021-basic-xenial-mitaka @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on xenial-mitaka.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='xenial') + deployment.run_tests() From 901ea1a29d4ecbaa7ccddb8423925e0b6ddcd789 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:45:09 +0000 Subject: [PATCH 0940/2699] Re-number amulet test file names; add missing combos as not-executable for now. --- ceph-mon/tests/018-basic-trusty-liberty | 11 +++++++++++ ceph-mon/tests/019-basic-trusty-mitaka | 11 +++++++++++ ceph-mon/tests/020-basic-wily-liberty | 9 +++++++++ ceph-mon/tests/021-basic-xenial-mitaka | 9 +++++++++ 4 files changed, 40 insertions(+) create mode 100644 ceph-mon/tests/018-basic-trusty-liberty create mode 100644 ceph-mon/tests/019-basic-trusty-mitaka create mode 100644 ceph-mon/tests/020-basic-wily-liberty create mode 100644 ceph-mon/tests/021-basic-xenial-mitaka diff --git a/ceph-mon/tests/018-basic-trusty-liberty b/ceph-mon/tests/018-basic-trusty-liberty new file mode 100644 index 00000000..f339371b --- /dev/null +++ b/ceph-mon/tests/018-basic-trusty-liberty @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on trusty-liberty.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='trusty', + openstack='cloud:trusty-liberty', + source='cloud:trusty-updates/liberty') + deployment.run_tests() diff --git a/ceph-mon/tests/019-basic-trusty-mitaka b/ceph-mon/tests/019-basic-trusty-mitaka new file mode 100644 index 00000000..f339371b --- /dev/null +++ b/ceph-mon/tests/019-basic-trusty-mitaka @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on trusty-liberty.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='trusty', + openstack='cloud:trusty-liberty', + source='cloud:trusty-updates/liberty') + deployment.run_tests() diff --git a/ceph-mon/tests/020-basic-wily-liberty b/ceph-mon/tests/020-basic-wily-liberty new file mode 100644 index 00000000..b0d8096b --- /dev/null +++ b/ceph-mon/tests/020-basic-wily-liberty @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on wily-liberty.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='wily') + deployment.run_tests() diff --git a/ceph-mon/tests/021-basic-xenial-mitaka b/ceph-mon/tests/021-basic-xenial-mitaka new file mode 100644 index 00000000..ae3d3350 --- /dev/null +++ b/ceph-mon/tests/021-basic-xenial-mitaka @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on xenial-mitaka.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='xenial') + deployment.run_tests() From f14a60f75d33093317e8998f5058d3f735284483 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:45:10 +0000 Subject: [PATCH 0941/2699] Re-number amulet test file names; add missing combos as not-executable for now. --- ceph-osd/tests/018-basic-trusty-liberty | 11 +++++++++++ ceph-osd/tests/019-basic-trusty-mitaka | 11 +++++++++++ ceph-osd/tests/020-basic-wily-liberty | 9 +++++++++ ceph-osd/tests/021-basic-xenial-mitaka | 9 +++++++++ 4 files changed, 40 insertions(+) create mode 100644 ceph-osd/tests/018-basic-trusty-liberty create mode 100644 ceph-osd/tests/019-basic-trusty-mitaka create mode 100644 ceph-osd/tests/020-basic-wily-liberty create mode 100644 ceph-osd/tests/021-basic-xenial-mitaka diff --git a/ceph-osd/tests/018-basic-trusty-liberty b/ceph-osd/tests/018-basic-trusty-liberty new file mode 100644 index 00000000..98eb440f --- /dev/null +++ b/ceph-osd/tests/018-basic-trusty-liberty @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-osd deployment on trusty-liberty.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='trusty', + openstack='cloud:trusty-liberty', + source='cloud:trusty-updates/liberty') + deployment.run_tests() diff --git a/ceph-osd/tests/019-basic-trusty-mitaka b/ceph-osd/tests/019-basic-trusty-mitaka new file mode 100644 index 00000000..98eb440f --- /dev/null +++ b/ceph-osd/tests/019-basic-trusty-mitaka @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-osd deployment on trusty-liberty.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='trusty', + openstack='cloud:trusty-liberty', + source='cloud:trusty-updates/liberty') + deployment.run_tests() diff --git a/ceph-osd/tests/020-basic-wily-liberty b/ceph-osd/tests/020-basic-wily-liberty new file mode 100644 index 00000000..5e943af5 --- /dev/null +++ b/ceph-osd/tests/020-basic-wily-liberty @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-osd deployment on wily-liberty.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='wily') + deployment.run_tests() diff --git a/ceph-osd/tests/021-basic-xenial-mitaka b/ceph-osd/tests/021-basic-xenial-mitaka new file mode 100644 index 00000000..07741bdd --- /dev/null +++ b/ceph-osd/tests/021-basic-xenial-mitaka @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-osd deployment on xenial-mitaka.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='xenial') + deployment.run_tests() From 279ab0a62551d2b14208641ddf761a68df309e0e Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:45:11 +0000 Subject: [PATCH 0942/2699] Re-number amulet test file names; add missing combos as not-executable for now. --- ceph-radosgw/tests/018-basic-trusty-liberty | 11 +++++++++++ ceph-radosgw/tests/019-basic-trusty-mitaka | 11 +++++++++++ ceph-radosgw/tests/020-basic-wily-liberty | 9 +++++++++ ceph-radosgw/tests/021-basic-xenial-mitaka | 9 +++++++++ 4 files changed, 40 insertions(+) create mode 100644 ceph-radosgw/tests/018-basic-trusty-liberty create mode 100644 ceph-radosgw/tests/019-basic-trusty-mitaka create mode 100644 ceph-radosgw/tests/020-basic-wily-liberty create mode 100644 ceph-radosgw/tests/021-basic-xenial-mitaka diff --git a/ceph-radosgw/tests/018-basic-trusty-liberty b/ceph-radosgw/tests/018-basic-trusty-liberty new file mode 100644 index 00000000..8093f583 --- /dev/null +++ b/ceph-radosgw/tests/018-basic-trusty-liberty @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-radosgw deployment on trusty-liberty.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='trusty', + openstack='cloud:trusty-liberty', + source='cloud:trusty-updates/liberty') + deployment.run_tests() diff --git a/ceph-radosgw/tests/019-basic-trusty-mitaka b/ceph-radosgw/tests/019-basic-trusty-mitaka new file mode 100644 index 00000000..8093f583 --- /dev/null +++ b/ceph-radosgw/tests/019-basic-trusty-mitaka @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-radosgw deployment on trusty-liberty.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='trusty', + openstack='cloud:trusty-liberty', + source='cloud:trusty-updates/liberty') + deployment.run_tests() diff --git a/ceph-radosgw/tests/020-basic-wily-liberty b/ceph-radosgw/tests/020-basic-wily-liberty new file mode 100644 index 00000000..05c7bf23 --- /dev/null +++ b/ceph-radosgw/tests/020-basic-wily-liberty @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-radosgw deployment on wily-liberty.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='wily') + deployment.run_tests() diff --git a/ceph-radosgw/tests/021-basic-xenial-mitaka b/ceph-radosgw/tests/021-basic-xenial-mitaka new file mode 100644 index 00000000..44130bb5 --- /dev/null +++ b/ceph-radosgw/tests/021-basic-xenial-mitaka @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-radosgw deployment on xenial-mitaka.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='xenial') + deployment.run_tests() From 3f48d08da3fb012b456367ea0379fc4e9484174e Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:45:33 +0000 Subject: [PATCH 0943/2699] Update tests/README --- ceph-proxy/tests/README | 140 +++++++++++++++++++++++++--------------- 1 file changed, 88 insertions(+), 52 deletions(-) diff --git a/ceph-proxy/tests/README b/ceph-proxy/tests/README index da63fb67..79c5b063 100644 --- a/ceph-proxy/tests/README +++ b/ceph-proxy/tests/README @@ -1,77 +1,113 @@ -This directory provides Amulet tests that focus on verification of ceph -deployments. +This directory provides Amulet tests to verify basic deployment functionality +from the perspective of this charm, its requirements and its features, as +exercised in a subset of the full OpenStack deployment test bundle topology. -test_* methods are called in lexical sort order. +Reference: lp:openstack-charm-testing for full test bundles. -Test name convention to ensure desired test order: +A single topology and configuration is defined and deployed, once for each of +the defined Ubuntu:OpenStack release combos. The ongoing goal is for this +charm to always possess tests and combo definitions for all currently-supported +release combinations of U:OS. + +test_* methods are called in lexical sort order, as with most runners. However, +each individual test method should be idempotent and expected to pass regardless +of run order or Ubuntu:OpenStack combo. When writing or modifying tests, +ensure that every individual test is not dependent on another test_ method. + +Test naming convention, purely for code organization purposes: 1xx service and endpoint checks 2xx relation checks 3xx config checks 4xx functional checks - 9xx restarts and other final checks - -Common uses of ceph relations in bundle deployments: - - [ nova-compute, ceph ] - - [ glance, ceph ] - - [ cinder, cinder-ceph ] - - [ cinder-ceph, ceph ] - -More detailed relations of ceph service in a common deployment: - relations: - client: - - cinder-ceph - - glance - - nova-compute - mon: - - ceph - -In order to run tests, you'll need charm-tools installed (in addition to -juju, of course): + 9xx restarts, config changes, actions and other final checks + +In order to run tests, charm-tools and juju must be installed: sudo add-apt-repository ppa:juju/stable sudo apt-get update - sudo apt-get install charm-tools + sudo apt-get install charm-tools juju juju-deployer amulet + +Alternatively, tests may be exercised with proposed or development versions +of juju and related tools: + + # juju proposed version + sudo add-apt-repository ppa:juju/proposed + sudo apt-get update + sudo apt-get install charm-tools juju juju-deployer + + # juju development version + sudo add-apt-repository ppa:juju/devel + sudo apt-get update + sudo apt-get install charm-tools juju juju-deployer -If you use a web proxy server to access the web, you'll need to set the -AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. +Some tests may need to download files. If a web proxy server is required in +the environment, the AMULET_HTTP_PROXY environment variable must be set and +passed into the juju test command. This is unrelated to juju's http proxy +settings or behavior. The following examples demonstrate different ways that tests can be executed. All examples are run from the charm's root directory. - * To run all tests (starting with 00-setup): + * To run all +x tests in the tests directory: - make test + bzr branch lp:charms/trusty/foo + cd foo + make functional_test - * To run a specific test module (or modules): + * To run the tests against a specific release combo as defined in tests/: - juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + bzr branch lp:charms/trusty/foo + cd foo + juju test -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse - * To run a specific test module (or modules), and keep the environment - deployed after a failure: + * To run tests and keep the juju environment deployed after a failure: - juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + bzr branch lp:charms/trusty/foo + cd foo + juju test --set-e -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse * To re-run a test module against an already deployed environment (one that was deployed by a previous call to 'juju test --set-e'): - ./tests/15-basic-trusty-icehouse - -For debugging and test development purposes, all code should be idempotent. -In other words, the code should have the ability to be re-run without changing -the results beyond the initial run. This enables editing and re-running of a -test module against an already deployed environment, as described above. - -Manual debugging tips: - - * Set the following env vars before using the OpenStack CLI as admin: - export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 - export OS_TENANT_NAME=admin + ./tests/015-basic-trusty-icehouse + + * Even with --set-e, `juju test` will tear down the deployment when all + tests pass. The following work flow may be more effective when + iterating on test writing. + + bzr branch lp:charms/trusty/foo + cd foo + ./tests/setup/00-setup + juju bootstrap + ./tests/015-basic-trusty-icehouse + # make some changes, run tests again + ./tests/015-basic-trusty-icehouse + # make some changes, run tests again + ./tests/015-basic-trusty-icehouse + + * There may be test definitions in the tests/ dir which are not set +x + executable. This is generally true for deprecated releases, or for + upcoming releases which are not yet validated and enabled. To enable + and run these tests: + bzr branch lp:charms/trusty/foo + cd foo + ls tests + chmod +x tests/017-basic-trusty-kilo + ./tests/setup/00-setup + juju bootstrap + ./tests/017-basic-trusty-kilo + + +Additional notes: + + * Use DEBUG to turn on debug logging, use ERROR otherwise. + u = OpenStackAmuletUtils(ERROR) + u = OpenStackAmuletUtils(DEBUG) + + * To interact with the deployed environment: export OS_USERNAME=admin export OS_PASSWORD=openstack + export OS_TENANT_NAME=admin export OS_REGION_NAME=RegionOne - - * Set the following env vars before using the OpenStack CLI as demoUser: - export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 - export OS_TENANT_NAME=demoTenant - export OS_USERNAME=demoUser - export OS_PASSWORD=password - export OS_REGION_NAME=RegionOne + export OS_AUTH_URL=${OS_AUTH_PROTOCOL:-http}://`juju-deployer -e trusty -f keystone`:5000/v2.0 + keystone user-list + glance image-list From 9f7f2a4992a99c75af80d880d6718792b024d09b Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:45:33 +0000 Subject: [PATCH 0944/2699] Update tests/README --- ceph-mon/tests/README | 140 ++++++++++++++++++++++++++---------------- 1 file changed, 88 insertions(+), 52 deletions(-) diff --git a/ceph-mon/tests/README b/ceph-mon/tests/README index da63fb67..79c5b063 100644 --- a/ceph-mon/tests/README +++ b/ceph-mon/tests/README @@ -1,77 +1,113 @@ -This directory provides Amulet tests that focus on verification of ceph -deployments. +This directory provides Amulet tests to verify basic deployment functionality +from the perspective of this charm, its requirements and its features, as +exercised in a subset of the full OpenStack deployment test bundle topology. -test_* methods are called in lexical sort order. +Reference: lp:openstack-charm-testing for full test bundles. -Test name convention to ensure desired test order: +A single topology and configuration is defined and deployed, once for each of +the defined Ubuntu:OpenStack release combos. The ongoing goal is for this +charm to always possess tests and combo definitions for all currently-supported +release combinations of U:OS. + +test_* methods are called in lexical sort order, as with most runners. However, +each individual test method should be idempotent and expected to pass regardless +of run order or Ubuntu:OpenStack combo. When writing or modifying tests, +ensure that every individual test is not dependent on another test_ method. + +Test naming convention, purely for code organization purposes: 1xx service and endpoint checks 2xx relation checks 3xx config checks 4xx functional checks - 9xx restarts and other final checks - -Common uses of ceph relations in bundle deployments: - - [ nova-compute, ceph ] - - [ glance, ceph ] - - [ cinder, cinder-ceph ] - - [ cinder-ceph, ceph ] - -More detailed relations of ceph service in a common deployment: - relations: - client: - - cinder-ceph - - glance - - nova-compute - mon: - - ceph - -In order to run tests, you'll need charm-tools installed (in addition to -juju, of course): + 9xx restarts, config changes, actions and other final checks + +In order to run tests, charm-tools and juju must be installed: sudo add-apt-repository ppa:juju/stable sudo apt-get update - sudo apt-get install charm-tools + sudo apt-get install charm-tools juju juju-deployer amulet + +Alternatively, tests may be exercised with proposed or development versions +of juju and related tools: + + # juju proposed version + sudo add-apt-repository ppa:juju/proposed + sudo apt-get update + sudo apt-get install charm-tools juju juju-deployer + + # juju development version + sudo add-apt-repository ppa:juju/devel + sudo apt-get update + sudo apt-get install charm-tools juju juju-deployer -If you use a web proxy server to access the web, you'll need to set the -AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. +Some tests may need to download files. If a web proxy server is required in +the environment, the AMULET_HTTP_PROXY environment variable must be set and +passed into the juju test command. This is unrelated to juju's http proxy +settings or behavior. The following examples demonstrate different ways that tests can be executed. All examples are run from the charm's root directory. - * To run all tests (starting with 00-setup): + * To run all +x tests in the tests directory: - make test + bzr branch lp:charms/trusty/foo + cd foo + make functional_test - * To run a specific test module (or modules): + * To run the tests against a specific release combo as defined in tests/: - juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + bzr branch lp:charms/trusty/foo + cd foo + juju test -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse - * To run a specific test module (or modules), and keep the environment - deployed after a failure: + * To run tests and keep the juju environment deployed after a failure: - juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + bzr branch lp:charms/trusty/foo + cd foo + juju test --set-e -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse * To re-run a test module against an already deployed environment (one that was deployed by a previous call to 'juju test --set-e'): - ./tests/15-basic-trusty-icehouse - -For debugging and test development purposes, all code should be idempotent. -In other words, the code should have the ability to be re-run without changing -the results beyond the initial run. This enables editing and re-running of a -test module against an already deployed environment, as described above. - -Manual debugging tips: - - * Set the following env vars before using the OpenStack CLI as admin: - export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 - export OS_TENANT_NAME=admin + ./tests/015-basic-trusty-icehouse + + * Even with --set-e, `juju test` will tear down the deployment when all + tests pass. The following work flow may be more effective when + iterating on test writing. + + bzr branch lp:charms/trusty/foo + cd foo + ./tests/setup/00-setup + juju bootstrap + ./tests/015-basic-trusty-icehouse + # make some changes, run tests again + ./tests/015-basic-trusty-icehouse + # make some changes, run tests again + ./tests/015-basic-trusty-icehouse + + * There may be test definitions in the tests/ dir which are not set +x + executable. This is generally true for deprecated releases, or for + upcoming releases which are not yet validated and enabled. To enable + and run these tests: + bzr branch lp:charms/trusty/foo + cd foo + ls tests + chmod +x tests/017-basic-trusty-kilo + ./tests/setup/00-setup + juju bootstrap + ./tests/017-basic-trusty-kilo + + +Additional notes: + + * Use DEBUG to turn on debug logging, use ERROR otherwise. + u = OpenStackAmuletUtils(ERROR) + u = OpenStackAmuletUtils(DEBUG) + + * To interact with the deployed environment: export OS_USERNAME=admin export OS_PASSWORD=openstack + export OS_TENANT_NAME=admin export OS_REGION_NAME=RegionOne - - * Set the following env vars before using the OpenStack CLI as demoUser: - export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 - export OS_TENANT_NAME=demoTenant - export OS_USERNAME=demoUser - export OS_PASSWORD=password - export OS_REGION_NAME=RegionOne + export OS_AUTH_URL=${OS_AUTH_PROTOCOL:-http}://`juju-deployer -e trusty -f keystone`:5000/v2.0 + keystone user-list + glance image-list From d5e9b6b452bb9611574200e13a695d08dda3721e Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:45:34 +0000 Subject: [PATCH 0945/2699] Update tests/README --- ceph-osd/tests/README | 129 ++++++++++++++++++++++++++++-------------- 1 file changed, 86 insertions(+), 43 deletions(-) diff --git a/ceph-osd/tests/README b/ceph-osd/tests/README index 5280a1b2..79c5b063 100644 --- a/ceph-osd/tests/README +++ b/ceph-osd/tests/README @@ -1,70 +1,113 @@ -This directory provides Amulet tests that focus on verification of ceph-osd -deployments. +This directory provides Amulet tests to verify basic deployment functionality +from the perspective of this charm, its requirements and its features, as +exercised in a subset of the full OpenStack deployment test bundle topology. -test_* methods are called in lexical sort order. +Reference: lp:openstack-charm-testing for full test bundles. -Test name convention to ensure desired test order: +A single topology and configuration is defined and deployed, once for each of +the defined Ubuntu:OpenStack release combos. The ongoing goal is for this +charm to always possess tests and combo definitions for all currently-supported +release combinations of U:OS. + +test_* methods are called in lexical sort order, as with most runners. However, +each individual test method should be idempotent and expected to pass regardless +of run order or Ubuntu:OpenStack combo. When writing or modifying tests, +ensure that every individual test is not dependent on another test_ method. + +Test naming convention, purely for code organization purposes: 1xx service and endpoint checks 2xx relation checks 3xx config checks 4xx functional checks - 9xx restarts and other final checks + 9xx restarts, config changes, actions and other final checks -Common uses of ceph-osd relations in bundle deployments: - - - "ceph-osd:mon" - - "ceph:osd" +In order to run tests, charm-tools and juju must be installed: + sudo add-apt-repository ppa:juju/stable + sudo apt-get update + sudo apt-get install charm-tools juju juju-deployer amulet -More detailed relations of ceph-osd service in a common deployment: - relations: -???? +Alternatively, tests may be exercised with proposed or development versions +of juju and related tools: -In order to run tests, you'll need charm-tools installed (in addition to -juju, of course): - sudo add-apt-repository ppa:juju/stable + # juju proposed version + sudo add-apt-repository ppa:juju/proposed + sudo apt-get update + sudo apt-get install charm-tools juju juju-deployer + + # juju development version + sudo add-apt-repository ppa:juju/devel sudo apt-get update - sudo apt-get install charm-tools + sudo apt-get install charm-tools juju juju-deployer -If you use a web proxy server to access the web, you'll need to set the -AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. +Some tests may need to download files. If a web proxy server is required in +the environment, the AMULET_HTTP_PROXY environment variable must be set and +passed into the juju test command. This is unrelated to juju's http proxy +settings or behavior. The following examples demonstrate different ways that tests can be executed. All examples are run from the charm's root directory. - * To run all tests (starting with 00-setup): + * To run all +x tests in the tests directory: - make test + bzr branch lp:charms/trusty/foo + cd foo + make functional_test - * To run a specific test module (or modules): + * To run the tests against a specific release combo as defined in tests/: - juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + bzr branch lp:charms/trusty/foo + cd foo + juju test -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse - * To run a specific test module (or modules), and keep the environment - deployed after a failure: + * To run tests and keep the juju environment deployed after a failure: - juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + bzr branch lp:charms/trusty/foo + cd foo + juju test --set-e -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse * To re-run a test module against an already deployed environment (one that was deployed by a previous call to 'juju test --set-e'): - ./tests/15-basic-trusty-icehouse - -For debugging and test development purposes, all code should be idempotent. -In other words, the code should have the ability to be re-run without changing -the results beyond the initial run. This enables editing and re-running of a -test module against an already deployed environment, as described above. - -Manual debugging tips: - - * Set the following env vars before using the OpenStack CLI as admin: - export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 - export OS_TENANT_NAME=admin + ./tests/015-basic-trusty-icehouse + + * Even with --set-e, `juju test` will tear down the deployment when all + tests pass. The following work flow may be more effective when + iterating on test writing. + + bzr branch lp:charms/trusty/foo + cd foo + ./tests/setup/00-setup + juju bootstrap + ./tests/015-basic-trusty-icehouse + # make some changes, run tests again + ./tests/015-basic-trusty-icehouse + # make some changes, run tests again + ./tests/015-basic-trusty-icehouse + + * There may be test definitions in the tests/ dir which are not set +x + executable. This is generally true for deprecated releases, or for + upcoming releases which are not yet validated and enabled. To enable + and run these tests: + bzr branch lp:charms/trusty/foo + cd foo + ls tests + chmod +x tests/017-basic-trusty-kilo + ./tests/setup/00-setup + juju bootstrap + ./tests/017-basic-trusty-kilo + + +Additional notes: + + * Use DEBUG to turn on debug logging, use ERROR otherwise. + u = OpenStackAmuletUtils(ERROR) + u = OpenStackAmuletUtils(DEBUG) + + * To interact with the deployed environment: export OS_USERNAME=admin export OS_PASSWORD=openstack + export OS_TENANT_NAME=admin export OS_REGION_NAME=RegionOne - - * Set the following env vars before using the OpenStack CLI as demoUser: - export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 - export OS_TENANT_NAME=demoTenant - export OS_USERNAME=demoUser - export OS_PASSWORD=password - export OS_REGION_NAME=RegionOne + export OS_AUTH_URL=${OS_AUTH_PROTOCOL:-http}://`juju-deployer -e trusty -f keystone`:5000/v2.0 + keystone user-list + glance image-list From c552a5142c8393e1c21a9571f0b739a69b0ca036 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:45:34 +0000 Subject: [PATCH 0946/2699] Update tests/README --- ceph-radosgw/tests/README | 120 ++++++++++++++++++++++++++++---------- 1 file changed, 90 insertions(+), 30 deletions(-) diff --git a/ceph-radosgw/tests/README b/ceph-radosgw/tests/README index 003fbe25..79c5b063 100644 --- a/ceph-radosgw/tests/README +++ b/ceph-radosgw/tests/README @@ -1,53 +1,113 @@ -This directory provides Amulet tests that focus on verification of -ceph-radosgw deployments. +This directory provides Amulet tests to verify basic deployment functionality +from the perspective of this charm, its requirements and its features, as +exercised in a subset of the full OpenStack deployment test bundle topology. -In order to run tests, you'll need charm-tools installed (in addition to -juju, of course): +Reference: lp:openstack-charm-testing for full test bundles. + +A single topology and configuration is defined and deployed, once for each of +the defined Ubuntu:OpenStack release combos. The ongoing goal is for this +charm to always possess tests and combo definitions for all currently-supported +release combinations of U:OS. + +test_* methods are called in lexical sort order, as with most runners. However, +each individual test method should be idempotent and expected to pass regardless +of run order or Ubuntu:OpenStack combo. When writing or modifying tests, +ensure that every individual test is not dependent on another test_ method. + +Test naming convention, purely for code organization purposes: + 1xx service and endpoint checks + 2xx relation checks + 3xx config checks + 4xx functional checks + 9xx restarts, config changes, actions and other final checks + +In order to run tests, charm-tools and juju must be installed: sudo add-apt-repository ppa:juju/stable sudo apt-get update - sudo apt-get install charm-tools + sudo apt-get install charm-tools juju juju-deployer amulet + +Alternatively, tests may be exercised with proposed or development versions +of juju and related tools: + + # juju proposed version + sudo add-apt-repository ppa:juju/proposed + sudo apt-get update + sudo apt-get install charm-tools juju juju-deployer + + # juju development version + sudo add-apt-repository ppa:juju/devel + sudo apt-get update + sudo apt-get install charm-tools juju juju-deployer -If you use a web proxy server to access the web, you'll need to set the -AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. +Some tests may need to download files. If a web proxy server is required in +the environment, the AMULET_HTTP_PROXY environment variable must be set and +passed into the juju test command. This is unrelated to juju's http proxy +settings or behavior. The following examples demonstrate different ways that tests can be executed. All examples are run from the charm's root directory. - * To run all tests (starting with 00-setup): + * To run all +x tests in the tests directory: - make test + bzr branch lp:charms/trusty/foo + cd foo + make functional_test - * To run a specific test module (or modules): + * To run the tests against a specific release combo as defined in tests/: - juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + bzr branch lp:charms/trusty/foo + cd foo + juju test -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse - * To run a specific test module (or modules), and keep the environment - deployed after a failure: + * To run tests and keep the juju environment deployed after a failure: - juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + bzr branch lp:charms/trusty/foo + cd foo + juju test --set-e -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse * To re-run a test module against an already deployed environment (one that was deployed by a previous call to 'juju test --set-e'): - ./tests/15-basic-trusty-icehouse + ./tests/015-basic-trusty-icehouse -For debugging and test development purposes, all code should be idempotent. -In other words, the code should have the ability to be re-run without changing -the results beyond the initial run. This enables editing and re-running of a -test module against an already deployed environment, as described above. + * Even with --set-e, `juju test` will tear down the deployment when all + tests pass. The following work flow may be more effective when + iterating on test writing. -Manual debugging tips: + bzr branch lp:charms/trusty/foo + cd foo + ./tests/setup/00-setup + juju bootstrap + ./tests/015-basic-trusty-icehouse + # make some changes, run tests again + ./tests/015-basic-trusty-icehouse + # make some changes, run tests again + ./tests/015-basic-trusty-icehouse - * Set the following env vars before using the OpenStack CLI as admin: - export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 - export OS_TENANT_NAME=admin + * There may be test definitions in the tests/ dir which are not set +x + executable. This is generally true for deprecated releases, or for + upcoming releases which are not yet validated and enabled. To enable + and run these tests: + bzr branch lp:charms/trusty/foo + cd foo + ls tests + chmod +x tests/017-basic-trusty-kilo + ./tests/setup/00-setup + juju bootstrap + ./tests/017-basic-trusty-kilo + + +Additional notes: + + * Use DEBUG to turn on debug logging, use ERROR otherwise. + u = OpenStackAmuletUtils(ERROR) + u = OpenStackAmuletUtils(DEBUG) + + * To interact with the deployed environment: export OS_USERNAME=admin export OS_PASSWORD=openstack + export OS_TENANT_NAME=admin export OS_REGION_NAME=RegionOne - - * Set the following env vars before using the OpenStack CLI as demoUser: - export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 - export OS_TENANT_NAME=demoTenant - export OS_USERNAME=demoUser - export OS_PASSWORD=password - export OS_REGION_NAME=RegionOne + export OS_AUTH_URL=${OS_AUTH_PROTOCOL:-http}://`juju-deployer -e trusty -f keystone`:5000/v2.0 + keystone user-list + glance image-list From b5f8765d240ced9d159358a949179ac83ff69f0d Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:45:48 +0000 Subject: [PATCH 0947/2699] Update bundletester testplan yaml file --- ceph-osd/tests/tests.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 64e3e2d1..4d17631b 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,5 +1,5 @@ bootstrap: true -reset: true +reset: false virtualenv: true makefile: - lint @@ -9,6 +9,7 @@ sources: packages: - amulet - distro-info-data + - python-ceilometerclient - python-cinderclient - python-distro-info - python-glanceclient From 696753b9e4e603ffcfc1c29584ddfc126b64f234 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:45:48 +0000 Subject: [PATCH 0948/2699] Update bundletester testplan yaml file --- ceph-proxy/tests/tests.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 64e3e2d1..4d17631b 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -1,5 +1,5 @@ bootstrap: true -reset: true +reset: false virtualenv: true makefile: - lint @@ -9,6 +9,7 @@ sources: packages: - amulet - distro-info-data + - python-ceilometerclient - python-cinderclient - python-distro-info - python-glanceclient From eb135cabda4d19dbeec37fb90d7c5d4199f27758 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:45:48 +0000 Subject: [PATCH 0949/2699] Update bundletester testplan yaml file --- ceph-mon/tests/tests.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 64e3e2d1..4d17631b 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,5 +1,5 @@ bootstrap: true -reset: true +reset: false virtualenv: true makefile: - lint @@ -9,6 +9,7 @@ sources: packages: - amulet - distro-info-data + - python-ceilometerclient - python-cinderclient - python-distro-info - python-glanceclient From 07d1a4704d27a7d50d172d5902a09869a5f073b1 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Jan 2016 21:45:49 +0000 Subject: [PATCH 0950/2699] Update bundletester testplan yaml file --- ceph-radosgw/tests/tests.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 64e3e2d1..4d17631b 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,5 +1,5 @@ bootstrap: true -reset: true +reset: false virtualenv: true makefile: - lint @@ -9,6 +9,7 @@ sources: packages: - amulet - distro-info-data + - python-ceilometerclient - python-cinderclient - python-distro-info - python-glanceclient From 720e7827153b384f34b82a63050713d6b44afe31 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 11 Jan 2016 10:18:55 -0500 Subject: [PATCH 0951/2699] update to work with infernalis in additionto firefly --- ceph-proxy/charm-helpers-hooks.yaml | 1 - ceph-proxy/hooks/ceph.py | 80 +++++++++++++++++++++++++++-- ceph-proxy/hooks/ceph_hooks.py | 2 +- 3 files changed, 76 insertions(+), 7 deletions(-) diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index 3177ba1c..eeee6f8c 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -9,6 +9,5 @@ include: - ceph - payload.execd - contrib.openstack.alternatives - - contrib.openstack.utils - contrib.network.ip - contrib.charmsupport diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index ff3a7a40..be2251e8 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -11,8 +11,10 @@ import subprocess import time import os +import re from charmhelpers.core.host import ( mkdir, + chownr, service_restart, cmp_pkgrevno, lsb_release @@ -24,6 +26,9 @@ cached, status_set, ) +from charmhelpers.fetch import ( + apt_cache +) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, is_block_device, @@ -39,10 +44,58 @@ PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] +def ceph_user(): + if get_version() > 1: + return 'ceph' + else: + return "root" + + +def get_version(): + '''Derive Ceph release from an installed package.''' + import apt_pkg as apt + + cache = apt_cache() + package = "ceph" + try: + pkg = cache[package] + except: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match('^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + return float(vers) + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + def is_quorum(): asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ + "sudo", + "-u", + ceph_user(), "ceph", "--admin-daemon", asok, @@ -67,6 +120,9 @@ def is_quorum(): def is_leader(): asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ + "sudo", + "-u", + ceph_user(), "ceph", "--admin-daemon", asok, @@ -96,6 +152,9 @@ def wait_for_quorum(): def add_bootstrap_hint(peer): asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ + "sudo", + "-u", + ceph_user(), "ceph", "--admin-daemon", asok, @@ -161,6 +220,9 @@ def wait_for_bootstrap(): def import_osd_bootstrap_key(key): if not os.path.exists(_bootstrap_keyring): cmd = [ + "sudo", + "-u", + ceph_user(), 'ceph-authtool', _bootstrap_keyring, '--create-keyring', @@ -219,6 +281,9 @@ def get_osd_bootstrap_key(): def import_radosgw_key(key): if not os.path.exists(_radosgw_keyring): cmd = [ + "sudo", + "-u", + ceph_user(), 'ceph-authtool', _radosgw_keyring, '--create-keyring', @@ -247,6 +312,9 @@ def get_radosgw_key(): def get_named_key(name, caps=None): caps = caps or _default_caps cmd = [ + "sudo", + "-u", + ceph_user(), 'ceph', '--name', 'mon.', '--keyring', @@ -270,7 +338,7 @@ def upgrade_key_caps(key, caps): # Not the MON leader OR not clustered return cmd = [ - 'ceph', 'auth', 'caps', key + "sudo", "-u", ceph_user(),'ceph', 'auth', 'caps', key ] for subsystem, subcaps in caps.iteritems(): cmd.extend([subsystem, '; '.join(subcaps)]) @@ -297,8 +365,8 @@ def bootstrap_monitor_cluster(secret): log('bootstrap_monitor_cluster: mon already initialized.') else: # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', perms=0o755) - mkdir(path) + mkdir('/var/run/ceph', owner=ceph_user(), group=ceph_user(), perms=0o755) + mkdir(path, owner=ceph_user(), group=ceph_user()) # end changes for Ceph >= 0.61.3 try: subprocess.check_call(['ceph-authtool', keyring, @@ -307,9 +375,11 @@ def bootstrap_monitor_cluster(secret): '--cap', 'mon', 'allow *']) subprocess.check_call(['ceph-mon', '--mkfs', + # '--setuser', ceph_user(), + # '--setgroup', ceph_user(), '-i', hostname, '--keyring', keyring]) - + chownr(path, ceph_user(), ceph_user()) with open(done, 'w'): pass with open(init_marker, 'w'): @@ -405,7 +475,7 @@ def osdize_dir(path): level=ERROR) raise - mkdir(path) + mkdir(path, owner=ceph_user(), group=ceph_user()) cmd = [ 'ceph-disk', 'prepare', diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 8a041cbe..387191ee 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -111,7 +111,7 @@ def emit_cephconf(): # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) - mkdir(os.path.dirname(charm_ceph_conf)) + mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(), group=ceph.ceph_user()) render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100) From 6e9d07a45a20b1dcfbde3ece6cd5fa4f70b142c5 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 11 Jan 2016 10:18:55 -0500 Subject: [PATCH 0952/2699] update to work with infernalis in additionto firefly --- ceph-mon/charm-helpers-hooks.yaml | 1 - ceph-mon/hooks/ceph.py | 80 +++++++++++++++++++++++++++++-- ceph-mon/hooks/ceph_hooks.py | 2 +- 3 files changed, 76 insertions(+), 7 deletions(-) diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index 3177ba1c..eeee6f8c 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -9,6 +9,5 @@ include: - ceph - payload.execd - contrib.openstack.alternatives - - contrib.openstack.utils - contrib.network.ip - contrib.charmsupport diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index ff3a7a40..be2251e8 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -11,8 +11,10 @@ import subprocess import time import os +import re from charmhelpers.core.host import ( mkdir, + chownr, service_restart, cmp_pkgrevno, lsb_release @@ -24,6 +26,9 @@ cached, status_set, ) +from charmhelpers.fetch import ( + apt_cache +) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, is_block_device, @@ -39,10 +44,58 @@ PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] +def ceph_user(): + if get_version() > 1: + return 'ceph' + else: + return "root" + + +def get_version(): + '''Derive Ceph release from an installed package.''' + import apt_pkg as apt + + cache = apt_cache() + package = "ceph" + try: + pkg = cache[package] + except: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match('^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + return float(vers) + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + def is_quorum(): asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ + "sudo", + "-u", + ceph_user(), "ceph", "--admin-daemon", asok, @@ -67,6 +120,9 @@ def is_quorum(): def is_leader(): asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ + "sudo", + "-u", + ceph_user(), "ceph", "--admin-daemon", asok, @@ -96,6 +152,9 @@ def wait_for_quorum(): def add_bootstrap_hint(peer): asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ + "sudo", + "-u", + ceph_user(), "ceph", "--admin-daemon", asok, @@ -161,6 +220,9 @@ def wait_for_bootstrap(): def import_osd_bootstrap_key(key): if not os.path.exists(_bootstrap_keyring): cmd = [ + "sudo", + "-u", + ceph_user(), 'ceph-authtool', _bootstrap_keyring, '--create-keyring', @@ -219,6 +281,9 @@ def get_osd_bootstrap_key(): def import_radosgw_key(key): if not os.path.exists(_radosgw_keyring): cmd = [ + "sudo", + "-u", + ceph_user(), 'ceph-authtool', _radosgw_keyring, '--create-keyring', @@ -247,6 +312,9 @@ def get_radosgw_key(): def get_named_key(name, caps=None): caps = caps or _default_caps cmd = [ + "sudo", + "-u", + ceph_user(), 'ceph', '--name', 'mon.', '--keyring', @@ -270,7 +338,7 @@ def upgrade_key_caps(key, caps): # Not the MON leader OR not clustered return cmd = [ - 'ceph', 'auth', 'caps', key + "sudo", "-u", ceph_user(),'ceph', 'auth', 'caps', key ] for subsystem, subcaps in caps.iteritems(): cmd.extend([subsystem, '; '.join(subcaps)]) @@ -297,8 +365,8 @@ def bootstrap_monitor_cluster(secret): log('bootstrap_monitor_cluster: mon already initialized.') else: # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', perms=0o755) - mkdir(path) + mkdir('/var/run/ceph', owner=ceph_user(), group=ceph_user(), perms=0o755) + mkdir(path, owner=ceph_user(), group=ceph_user()) # end changes for Ceph >= 0.61.3 try: subprocess.check_call(['ceph-authtool', keyring, @@ -307,9 +375,11 @@ def bootstrap_monitor_cluster(secret): '--cap', 'mon', 'allow *']) subprocess.check_call(['ceph-mon', '--mkfs', + # '--setuser', ceph_user(), + # '--setgroup', ceph_user(), '-i', hostname, '--keyring', keyring]) - + chownr(path, ceph_user(), ceph_user()) with open(done, 'w'): pass with open(init_marker, 'w'): @@ -405,7 +475,7 @@ def osdize_dir(path): level=ERROR) raise - mkdir(path) + mkdir(path, owner=ceph_user(), group=ceph_user()) cmd = [ 'ceph-disk', 'prepare', diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 8a041cbe..387191ee 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -111,7 +111,7 @@ def emit_cephconf(): # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) - mkdir(os.path.dirname(charm_ceph_conf)) + mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(), group=ceph.ceph_user()) render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100) From 57ba5c51cea90a8e9cef61470348902579a6b7fd Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 11 Jan 2016 10:24:30 -0500 Subject: [PATCH 0953/2699] remove commented out addition --- ceph-proxy/hooks/ceph.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index be2251e8..b7a9cc89 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -375,8 +375,6 @@ def bootstrap_monitor_cluster(secret): '--cap', 'mon', 'allow *']) subprocess.check_call(['ceph-mon', '--mkfs', - # '--setuser', ceph_user(), - # '--setgroup', ceph_user(), '-i', hostname, '--keyring', keyring]) chownr(path, ceph_user(), ceph_user()) From 45fe48f860af09d04d5c46f5e48b9dc2134fb745 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 11 Jan 2016 10:24:30 -0500 Subject: [PATCH 0954/2699] remove commented out addition --- ceph-mon/hooks/ceph.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index be2251e8..b7a9cc89 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -375,8 +375,6 @@ def bootstrap_monitor_cluster(secret): '--cap', 'mon', 'allow *']) subprocess.check_call(['ceph-mon', '--mkfs', - # '--setuser', ceph_user(), - # '--setgroup', ceph_user(), '-i', hostname, '--keyring', keyring]) chownr(path, ceph_user(), ceph_user()) From 8af80b5246c86758776764acf7afc822a5bdf41d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 11 Jan 2016 10:46:02 -0500 Subject: [PATCH 0955/2699] update for lint --- ceph-proxy/hooks/ceph.py | 14 +++++++------- ceph-proxy/hooks/ceph_hooks.py | 3 ++- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index b7a9cc89..b3e6fead 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -12,6 +12,7 @@ import time import os import re +import sys from charmhelpers.core.host import ( mkdir, chownr, @@ -44,6 +45,7 @@ PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] + def ceph_user(): if get_version() > 1: return 'ceph' @@ -60,16 +62,12 @@ def get_version(): try: pkg = cache[package] except: - if not fatal: - return None # the package is unknown to the current apt cache. e = 'Could not determine version of package with no installation '\ 'candidate: %s' % package error_out(e) if not pkg.current_ver: - if not fatal: - return None # package is known, but no version is currently installed. e = 'Could not determine version of uninstalled package: %s' % package error_out(e) @@ -86,7 +84,8 @@ def get_version(): def error_out(msg): - juju_log("FATAL ERROR: %s" % msg, level='ERROR') + log("FATAL ERROR: %s" % msg, + level=ERROR) sys.exit(1) @@ -338,7 +337,7 @@ def upgrade_key_caps(key, caps): # Not the MON leader OR not clustered return cmd = [ - "sudo", "-u", ceph_user(),'ceph', 'auth', 'caps', key + "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key ] for subsystem, subcaps in caps.iteritems(): cmd.extend([subsystem, '; '.join(subcaps)]) @@ -365,7 +364,8 @@ def bootstrap_monitor_cluster(secret): log('bootstrap_monitor_cluster: mon already initialized.') else: # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', owner=ceph_user(), group=ceph_user(), perms=0o755) + mkdir('/var/run/ceph', owner=ceph_user(), + group=ceph_user(), perms=0o755) mkdir(path, owner=ceph_user(), group=ceph_user()) # end changes for Ceph >= 0.61.3 try: diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 387191ee..3a383938 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -111,7 +111,8 @@ def emit_cephconf(): # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) - mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(), group=ceph.ceph_user()) + mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(), + group=ceph.ceph_user()) render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100) From 966852ee3dfd45a207083dedd63e98bcae2c6a76 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 11 Jan 2016 10:46:02 -0500 Subject: [PATCH 0956/2699] update for lint --- ceph-mon/hooks/ceph.py | 14 +++++++------- ceph-mon/hooks/ceph_hooks.py | 3 ++- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index b7a9cc89..b3e6fead 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -12,6 +12,7 @@ import time import os import re +import sys from charmhelpers.core.host import ( mkdir, chownr, @@ -44,6 +45,7 @@ PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] + def ceph_user(): if get_version() > 1: return 'ceph' @@ -60,16 +62,12 @@ def get_version(): try: pkg = cache[package] except: - if not fatal: - return None # the package is unknown to the current apt cache. e = 'Could not determine version of package with no installation '\ 'candidate: %s' % package error_out(e) if not pkg.current_ver: - if not fatal: - return None # package is known, but no version is currently installed. e = 'Could not determine version of uninstalled package: %s' % package error_out(e) @@ -86,7 +84,8 @@ def get_version(): def error_out(msg): - juju_log("FATAL ERROR: %s" % msg, level='ERROR') + log("FATAL ERROR: %s" % msg, + level=ERROR) sys.exit(1) @@ -338,7 +337,7 @@ def upgrade_key_caps(key, caps): # Not the MON leader OR not clustered return cmd = [ - "sudo", "-u", ceph_user(),'ceph', 'auth', 'caps', key + "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key ] for subsystem, subcaps in caps.iteritems(): cmd.extend([subsystem, '; '.join(subcaps)]) @@ -365,7 +364,8 @@ def bootstrap_monitor_cluster(secret): log('bootstrap_monitor_cluster: mon already initialized.') else: # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', owner=ceph_user(), group=ceph_user(), perms=0o755) + mkdir('/var/run/ceph', owner=ceph_user(), + group=ceph_user(), perms=0o755) mkdir(path, owner=ceph_user(), group=ceph_user()) # end changes for Ceph >= 0.61.3 try: diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 387191ee..3a383938 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -111,7 +111,8 @@ def emit_cephconf(): # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) - mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(), group=ceph.ceph_user()) + mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(), + group=ceph.ceph_user()) render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100) From 83e9c2876298f92068fc93824077da52b4028ed8 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 12 Jan 2016 09:16:54 -0500 Subject: [PATCH 0957/2699] add support for infernalis --- ceph-osd/hooks/ceph.py | 126 ++++++++++++++++++++++++++++++----- ceph-osd/hooks/ceph_hooks.py | 3 +- 2 files changed, 110 insertions(+), 19 deletions(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index f9448db2..158620fc 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -11,16 +11,25 @@ import subprocess import time import os +import re +import sys from charmhelpers.core.host import ( mkdir, + chownr, service_restart, - cmp_pkgrevno + cmp_pkgrevno, + lsb_release ) from charmhelpers.core.hookenv import ( log, - ERROR, WARNING, + ERROR, + WARNING, + cached, status_set, ) +from charmhelpers.fetch import ( + apt_cache +) from charmhelpers.contrib.storage.linux.utils import ( zap_disk, is_block_device, @@ -37,9 +46,55 @@ PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] +def ceph_user(): + if get_version() > 1: + return 'ceph' + else: + return "root" + + +def get_version(): + '''Derive Ceph release from an installed package.''' + import apt_pkg as apt + + cache = apt_cache() + package = "ceph" + try: + pkg = cache[package] + except: + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match('^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + return float(vers) + + +def error_out(msg): + log("FATAL ERROR: %s" % msg, + level=ERROR) + sys.exit(1) + + def is_quorum(): asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ + "sudo", + "-u", + ceph_user(), "ceph", "--admin-daemon", asok, @@ -64,6 +119,9 @@ def is_quorum(): def is_leader(): asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ + "sudo", + "-u", + ceph_user(), "ceph", "--admin-daemon", asok, @@ -93,6 +151,9 @@ def wait_for_quorum(): def add_bootstrap_hint(peer): asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) cmd = [ + "sudo", + "-u", + ceph_user(), "ceph", "--admin-daemon", asok, @@ -127,11 +188,11 @@ def is_osd_disk(dev): def start_osds(devices): # Scan for ceph block devices rescan_osd_devices() - if cmp_pkgrevno('ceph', '0.56.6') >= 0: - # Use ceph-disk-activate for directory based OSD's + if cmp_pkgrevno('ceph', "0.56.6") >= 0: + # Use ceph-disk activate for directory based OSD's for dev_or_path in devices: if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call(['ceph-disk-activate', dev_or_path]) + subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) def rescan_osd_devices(): @@ -158,6 +219,9 @@ def wait_for_bootstrap(): def import_osd_bootstrap_key(key): if not os.path.exists(_bootstrap_keyring): cmd = [ + "sudo", + "-u", + ceph_user(), 'ceph-authtool', _bootstrap_keyring, '--create-keyring', @@ -216,6 +280,9 @@ def get_osd_bootstrap_key(): def import_radosgw_key(key): if not os.path.exists(_radosgw_keyring): cmd = [ + "sudo", + "-u", + ceph_user(), 'ceph-authtool', _radosgw_keyring, '--create-keyring', @@ -244,6 +311,9 @@ def get_radosgw_key(): def get_named_key(name, caps=None): caps = caps or _default_caps cmd = [ + "sudo", + "-u", + ceph_user(), 'ceph', '--name', 'mon.', '--keyring', @@ -261,19 +331,29 @@ def get_named_key(name, caps=None): return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 +@cached +def systemd(): + return (lsb_release()['DISTRIB_CODENAME'] >= 'vivid') + + def bootstrap_monitor_cluster(secret): hostname = get_unit_hostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) done = '{}/done'.format(path) - upstart = '{}/upstart'.format(path) + if systemd(): + init_marker = '{}/systemd'.format(path) + else: + init_marker = '{}/upstart'.format(path) + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) if os.path.exists(done): log('bootstrap_monitor_cluster: mon already initialized.') else: # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', perms=0755) - mkdir(path) + mkdir('/var/run/ceph', owner=ceph_user(), + group=ceph_user(), perms=0o755) + mkdir(path, owner=ceph_user(), group=ceph_user()) # end changes for Ceph >= 0.61.3 try: subprocess.check_call(['ceph-authtool', keyring, @@ -284,13 +364,17 @@ def bootstrap_monitor_cluster(secret): subprocess.check_call(['ceph-mon', '--mkfs', '-i', hostname, '--keyring', keyring]) - + chownr(path, ceph_user(), ceph_user()) with open(done, 'w'): pass - with open(upstart, 'w'): + with open(init_marker, 'w'): pass - service_restart('ceph-mon-all') + if systemd(): + subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) + service_restart('ceph-mon') + else: + service_restart('ceph-mon-all') except: raise finally: @@ -300,11 +384,14 @@ def bootstrap_monitor_cluster(secret): def update_monfs(): hostname = get_unit_hostname() monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - upstart = '{}/upstart'.format(monfs) - if os.path.exists(monfs) and not os.path.exists(upstart): + if systemd(): + init_marker = '{}/systemd'.format(monfs) + else: + init_marker = '{}/upstart'.format(monfs) + if os.path.exists(monfs) and not os.path.exists(init_marker): # Mark mon as managed by upstart so that # it gets start correctly on reboots - with open(upstart, 'w'): + with open(init_marker, 'w'): pass @@ -335,7 +422,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, return status_set('maintenance', 'Initializing device {}'.format(dev)) - cmd = ['ceph-disk-prepare'] + cmd = ['ceph-disk', 'prepare'] # Later versions of ceph support more options if cmp_pkgrevno('ceph', '0.48.3') >= 0: if osd_format: @@ -368,14 +455,17 @@ def osdize_dir(path): log('Path {} is already configured as an OSD - bailing'.format(path)) return - if cmp_pkgrevno('ceph', '0.56.6') < 0: + if cmp_pkgrevno('ceph', "0.56.6") < 0: log('Unable to use directories for OSDs with ceph < 0.56.6', level=ERROR) raise - mkdir(path) + mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) + chownr('/var/lib/ceph', ceph_user(), ceph_user()) cmd = [ - 'ceph-disk-prepare', + 'sudo', '-u', ceph_user(), + 'ceph-disk', + 'prepare', '--data-dir', path ] diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index cb3e08be..9ff5d443 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -95,7 +95,8 @@ def emit_cephconf(): # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) - mkdir(os.path.dirname(charm_ceph_conf)) + mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(), + group=ceph.ceph_user()) with open(charm_ceph_conf, 'w') as cephconf: cephconf.write(render_template('ceph.conf', cephcontext)) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', From 57a66f660a42e41d32faace396c01db386a2cf19 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 12 Jan 2016 09:17:36 -0500 Subject: [PATCH 0958/2699] fix permissions when creating OSD with directory --- ceph-proxy/hooks/ceph.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index b3e6fead..a32fa58e 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -473,8 +473,10 @@ def osdize_dir(path): level=ERROR) raise - mkdir(path, owner=ceph_user(), group=ceph_user()) + mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) + chownr('/var/lib/ceph', ceph_user(), ceph_user()) cmd = [ + 'sudo', '-u', ceph_user(), 'ceph-disk', 'prepare', '--data-dir', From 32b71386b49e06c32063179f0f9d1eb6511a9867 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 12 Jan 2016 09:17:36 -0500 Subject: [PATCH 0959/2699] fix permissions when creating OSD with directory --- ceph-mon/hooks/ceph.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index b3e6fead..a32fa58e 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -473,8 +473,10 @@ def osdize_dir(path): level=ERROR) raise - mkdir(path, owner=ceph_user(), group=ceph_user()) + mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) + chownr('/var/lib/ceph', ceph_user(), ceph_user()) cmd = [ + 'sudo', '-u', ceph_user(), 'ceph-disk', 'prepare', '--data-dir', From 199ce85152ff6a1c8d11851f4ad8026f0f0f267c Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 13 Jan 2016 12:14:23 +0200 Subject: [PATCH 0960/2699] [trivial] fix setup_keystone_certs() exception handler --- ceph-radosgw/hooks/hooks.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 91c43bd0..cecff3e7 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -206,12 +206,13 @@ def setup_keystone_certs(unit=None, rid=None): # CA try: - # Kilo and newer - ca_cert = keystone.certificates.get_ca_certificate() - except AttributeError: - # Juno and older - ca_cert = requests.request('GET', auth_endpoint + - '/certificates/ca').text + try: + # Kilo and newer + ca_cert = keystone.certificates.get_ca_certificate() + except AttributeError: + # Juno and older + ca_cert = requests.request('GET', auth_endpoint + + '/certificates/ca').text except ConnectionRefused: log("Error connecting to keystone - skipping ca/signing cert setup", level=WARNING) @@ -233,12 +234,13 @@ def setup_keystone_certs(unit=None, rid=None): # Signing cert try: - # Kilo and newer - signing_cert = keystone.certificates.get_signing_certificate() - except AttributeError: - # Juno and older - signing_cert = requests.request('GET', auth_endpoint + - '/certificates/signing').text + try: + # Kilo and newer + signing_cert = keystone.certificates.get_signing_certificate() + except AttributeError: + # Juno and older + signing_cert = requests.request('GET', auth_endpoint + + '/certificates/signing').text except ConnectionRefused: log("Error connecting to keystone - skipping ca/signing cert setup", level=WARNING) From ac66599e0027363a0a0dbf7e40d278b5d09d2e69 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 13 Jan 2016 14:45:54 +0200 Subject: [PATCH 0961/2699] [hopem,r=] Add loglevel config option. Closes-Bug: 1520236 --- ceph-proxy/config.yaml | 4 ++++ ceph-proxy/hooks/ceph_hooks.py | 1 + ceph-proxy/templates/ceph.conf | 2 ++ 3 files changed, 7 insertions(+) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 891c7400..d47f6764 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -1,4 +1,8 @@ options: + loglevel: + default: 1 + type: int + description: Mon and OSD debug level. Max is 20. fsid: type: string default: diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 8a041cbe..fd68dde3 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -99,6 +99,7 @@ def emit_cephconf(): 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': config('ceph-public-network'), 'ceph_cluster_network': config('ceph-cluster-network'), + 'loglevel': config('loglevel'), } if config('prefer-ipv6'): diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index e168f54a..64f52a6c 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -14,6 +14,8 @@ log to syslog = {{ use_syslog }} err to syslog = {{ use_syslog }} clog to syslog = {{ use_syslog }} mon cluster log to syslog = {{ use_syslog }} +debug mon = {{ loglevel }}/5 +debug osd = {{ loglevel }}/5 {%- if ceph_public_network is string %} public network = {{ ceph_public_network }} From 580a6eafc3ce0aa3e66e8cf76df858703c727884 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 13 Jan 2016 14:45:54 +0200 Subject: [PATCH 0962/2699] [hopem,r=] Add loglevel config option. Closes-Bug: 1520236 --- ceph-mon/config.yaml | 4 ++++ ceph-mon/hooks/ceph_hooks.py | 1 + ceph-mon/templates/ceph.conf | 2 ++ 3 files changed, 7 insertions(+) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 891c7400..d47f6764 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -1,4 +1,8 @@ options: + loglevel: + default: 1 + type: int + description: Mon and OSD debug level. Max is 20. fsid: type: string default: diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 8a041cbe..fd68dde3 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -99,6 +99,7 @@ def emit_cephconf(): 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': config('ceph-public-network'), 'ceph_cluster_network': config('ceph-cluster-network'), + 'loglevel': config('loglevel'), } if config('prefer-ipv6'): diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index e168f54a..64f52a6c 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -14,6 +14,8 @@ log to syslog = {{ use_syslog }} err to syslog = {{ use_syslog }} clog to syslog = {{ use_syslog }} mon cluster log to syslog = {{ use_syslog }} +debug mon = {{ loglevel }}/5 +debug osd = {{ loglevel }}/5 {%- if ceph_public_network is string %} public network = {{ ceph_public_network }} From fd191037cf85e1de3f190b25d1647504cf04187b Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 13 Jan 2016 14:48:57 +0200 Subject: [PATCH 0963/2699] [hopem,r=] Add loglevel config option. Closes-Bug: 1520236 --- ceph-osd/config.yaml | 4 ++++ ceph-osd/hooks/ceph_hooks.py | 1 + ceph-osd/templates/ceph.conf | 1 + 3 files changed, 6 insertions(+) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index fdba28f5..2dc2a586 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -1,4 +1,8 @@ options: + loglevel: + default: 1 + type: int + description: OSD debug level. Max is 20. osd-devices: type: string default: /dev/vdb diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index cb3e08be..3079f871 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -83,6 +83,7 @@ def emit_cephconf(): 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': config('ceph-public-network'), 'ceph_cluster_network': config('ceph-cluster-network'), + 'loglevel': config('loglevel'), } if config('prefer-ipv6'): diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 39728c17..0696142a 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -13,6 +13,7 @@ fsid = {{ fsid }} log to syslog = {{ use_syslog }} err to syslog = {{ use_syslog }} clog to syslog = {{ use_syslog }} +debug osd = {{ loglevel }}/5 {%- if ceph_public_network is string %} public network = {{ ceph_public_network }} From b93c908d7cfe9a1053aa54ea463a41a7646afb83 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 18 Jan 2016 16:42:36 +0000 Subject: [PATCH 0964/2699] Add configuration option for toggling use of direct io for OSD journals --- ceph-osd/config.yaml | 5 +++++ ceph-osd/hooks/ceph_hooks.py | 1 + ceph-osd/templates/ceph.conf | 1 + 3 files changed, 7 insertions(+) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 2dc2a586..2afa6afc 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -152,3 +152,8 @@ options: description: | A comma-separated list of nagios servicegroups. If left empty, the nagios_context will be used as the servicegroup + use-direct-io: + default: True + type: boolean + description: Configure use of direct IO for OSD journals. + diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index aa22646c..dc0815ff 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -84,6 +84,7 @@ def emit_cephconf(): 'ceph_public_network': config('ceph-public-network'), 'ceph_cluster_network': config('ceph-cluster-network'), 'loglevel': config('loglevel'), + 'dio': str(config('use-direct-io')).lower(), } if config('prefer-ipv6'): diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 0696142a..edd4f646 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -40,4 +40,5 @@ keyring = /var/lib/ceph/mds/$cluster-$id/keyring keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = {{ osd_journal_size }} filestore xattr use omap = true +journal dio = {{ dio }} From e7d1d71ae2e5f208e2ec37afd658bb6de2eca858 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 19 Jan 2016 12:47:41 +0000 Subject: [PATCH 0965/2699] Fix typo in mitaka amulet test definition --- ceph-osd/tests/019-basic-trusty-mitaka | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-osd/tests/019-basic-trusty-mitaka b/ceph-osd/tests/019-basic-trusty-mitaka index 98eb440f..537bfa42 100644 --- a/ceph-osd/tests/019-basic-trusty-mitaka +++ b/ceph-osd/tests/019-basic-trusty-mitaka @@ -1,11 +1,11 @@ #!/usr/bin/python -"""Amulet tests on a basic ceph-osd deployment on trusty-liberty.""" +"""Amulet tests on a basic ceph-osd deployment on trusty-mitaka.""" from basic_deployment import CephOsdBasicDeployment if __name__ == '__main__': deployment = CephOsdBasicDeployment(series='trusty', - openstack='cloud:trusty-liberty', - source='cloud:trusty-updates/liberty') + openstack='cloud:trusty-mitaka', + source='cloud:trusty-updates/mitaka') deployment.run_tests() From e31343a580662380daded0e4b26cce0d65407b8c Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 19 Jan 2016 12:47:41 +0000 Subject: [PATCH 0966/2699] Fix typo in mitaka amulet test definition --- ceph-proxy/tests/019-basic-trusty-mitaka | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/tests/019-basic-trusty-mitaka b/ceph-proxy/tests/019-basic-trusty-mitaka index f339371b..2eca19d6 100644 --- a/ceph-proxy/tests/019-basic-trusty-mitaka +++ b/ceph-proxy/tests/019-basic-trusty-mitaka @@ -1,11 +1,11 @@ #!/usr/bin/python -"""Amulet tests on a basic ceph deployment on trusty-liberty.""" +"""Amulet tests on a basic ceph deployment on trusty-mitaka.""" from basic_deployment import CephBasicDeployment if __name__ == '__main__': deployment = CephBasicDeployment(series='trusty', - openstack='cloud:trusty-liberty', - source='cloud:trusty-updates/liberty') + openstack='cloud:trusty-mitaka', + source='cloud:trusty-updates/mitaka') deployment.run_tests() From a16b6a3d0768b2e9fac904eb72c45a59ff9af808 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 19 Jan 2016 12:47:41 +0000 Subject: [PATCH 0967/2699] Fix typo in mitaka amulet test definition --- ceph-mon/tests/019-basic-trusty-mitaka | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-mon/tests/019-basic-trusty-mitaka b/ceph-mon/tests/019-basic-trusty-mitaka index f339371b..2eca19d6 100644 --- a/ceph-mon/tests/019-basic-trusty-mitaka +++ b/ceph-mon/tests/019-basic-trusty-mitaka @@ -1,11 +1,11 @@ #!/usr/bin/python -"""Amulet tests on a basic ceph deployment on trusty-liberty.""" +"""Amulet tests on a basic ceph deployment on trusty-mitaka.""" from basic_deployment import CephBasicDeployment if __name__ == '__main__': deployment = CephBasicDeployment(series='trusty', - openstack='cloud:trusty-liberty', - source='cloud:trusty-updates/liberty') + openstack='cloud:trusty-mitaka', + source='cloud:trusty-updates/mitaka') deployment.run_tests() From 990749e01e49f7dbe49cc3de1fd3bed368aaf1c2 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 19 Jan 2016 12:47:42 +0000 Subject: [PATCH 0968/2699] Fix typo in mitaka amulet test definition --- ceph-radosgw/tests/019-basic-trusty-mitaka | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/tests/019-basic-trusty-mitaka b/ceph-radosgw/tests/019-basic-trusty-mitaka index 8093f583..7f4eca48 100644 --- a/ceph-radosgw/tests/019-basic-trusty-mitaka +++ b/ceph-radosgw/tests/019-basic-trusty-mitaka @@ -1,11 +1,11 @@ #!/usr/bin/python -"""Amulet tests on a basic ceph-radosgw deployment on trusty-liberty.""" +"""Amulet tests on a basic ceph-radosgw deployment on trusty-mitaka.""" from basic_deployment import CephRadosGwBasicDeployment if __name__ == '__main__': deployment = CephRadosGwBasicDeployment(series='trusty', - openstack='cloud:trusty-liberty', - source='cloud:trusty-updates/liberty') + openstack='cloud:trusty-mitaka', + source='cloud:trusty-updates/mitaka') deployment.run_tests() From b093786f61155a6a9eb9eeaac7fdd8585f648acc Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 20 Jan 2016 10:49:49 +0000 Subject: [PATCH 0969/2699] [trivial] catch requests.exceptions.ConnectionError if KS connection fails --- ceph-radosgw/hooks/hooks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 60de3c9f..c7929ba8 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -224,7 +224,7 @@ def setup_keystone_certs(unit=None, rid=None): # Juno and older ca_cert = requests.request('GET', auth_endpoint + '/certificates/ca').text - except ConnectionRefused: + except (ConnectionRefused, requests.exceptions.ConnectionError): log("Error connecting to keystone - skipping ca/signing cert setup", level=WARNING) return @@ -252,7 +252,7 @@ def setup_keystone_certs(unit=None, rid=None): # Juno and older signing_cert = requests.request('GET', auth_endpoint + '/certificates/signing').text - except ConnectionRefused: + except (ConnectionRefused, requests.exceptions.ConnectionError): log("Error connecting to keystone - skipping ca/signing cert setup", level=WARNING) return From 2d51b5336f624c01f53651725c19dc9510840bf0 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 22 Jan 2016 16:57:11 +0100 Subject: [PATCH 0970/2699] Make writing the zapfile safer --- ceph-osd/hooks/ceph_hooks.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index f35bb705..3a7637f1 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -11,6 +11,7 @@ import os import shutil import sys +import tempfile import ceph from charmhelpers.core.hookenv import ( @@ -115,10 +116,12 @@ def read_zapped_journals(): return set() def write_zapped_journals(journal_devs): - with open(JOURNAL_ZAPPED, 'w') as zapfile: + tmpfh, tmpfile = tempfile.mkstemp() + with os.fdopen(tmpfh, 'wb') as zapfile: log("write zapped: {}".format(journal_devs), level=DEBUG) zapfile.write('\n'.join(sorted(list(journal_devs)))) + os.rename(tmpfile, JOURNAL_ZAPPED) def check_overlap(journaldevs, datadevs): if not journaldevs.isdisjoint(datadevs): From 5c03713c439768c843bab22587375b88e9f79292 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 22 Jan 2016 22:29:34 +0000 Subject: [PATCH 0971/2699] sync charmhelpers for mitaka cloud archive support --- ceph-osd/hooks/charmhelpers/cli/__init__.py | 6 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 66 ++- .../hooks/charmhelpers/contrib/network/ip.py | 50 +-- ceph-osd/hooks/charmhelpers/core/hookenv.py | 88 +++- ceph-osd/hooks/charmhelpers/core/host.py | 203 +++++++--- ceph-osd/hooks/charmhelpers/core/hugepage.py | 11 +- ceph-osd/hooks/charmhelpers/core/kernel.py | 68 ++++ .../charmhelpers/core/services/helpers.py | 19 +- ceph-osd/hooks/charmhelpers/core/strutils.py | 30 ++ .../hooks/charmhelpers/core/templating.py | 29 +- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 12 +- .../hooks/charmhelpers/fetch/archiveurl.py | 2 +- ceph-osd/hooks/charmhelpers/fetch/bzrurl.py | 54 +-- ceph-osd/hooks/charmhelpers/fetch/giturl.py | 41 +- .../charmhelpers/contrib/amulet/deployment.py | 6 +- .../charmhelpers/contrib/amulet/utils.py | 344 +++++++++++++--- .../contrib/openstack/amulet/deployment.py | 143 ++++++- .../contrib/openstack/amulet/utils.py | 381 ++++++++++++++++++ 18 files changed, 1313 insertions(+), 240 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/core/kernel.py diff --git a/ceph-osd/hooks/charmhelpers/cli/__init__.py b/ceph-osd/hooks/charmhelpers/cli/__init__.py index 16d52cc4..2d37ab31 100644 --- a/ceph-osd/hooks/charmhelpers/cli/__init__.py +++ b/ceph-osd/hooks/charmhelpers/cli/__init__.py @@ -20,7 +20,7 @@ from six.moves import zip -from charmhelpers.core import unitdata +import charmhelpers.core.unitdata class OutputFormatter(object): @@ -163,8 +163,8 @@ def run(self): if getattr(arguments.func, '_cli_no_output', False): output = '' self.formatter.format_output(output, arguments.format) - if unitdata._KV: - unitdata._KV.flush() + if charmhelpers.core.unitdata._KV: + charmhelpers.core.unitdata._KV.flush() cmdline = CommandLine() diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 95a79c2e..2f246429 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -148,6 +148,13 @@ def __init__(self, shortname, description, check_cmd): self.description = description self.check_cmd = self._locate_cmd(check_cmd) + def _get_check_filename(self): + return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) + + def _get_service_filename(self, hostname): + return os.path.join(NRPE.nagios_exportdir, + 'service__{}_{}.cfg'.format(hostname, self.command)) + def _locate_cmd(self, check_cmd): search_path = ( '/usr/lib/nagios/plugins', @@ -163,9 +170,21 @@ def _locate_cmd(self, check_cmd): log('Check command not found: {}'.format(parts[0])) return '' + def _remove_service_files(self): + if not os.path.exists(NRPE.nagios_exportdir): + return + for f in os.listdir(NRPE.nagios_exportdir): + if f.endswith('_{}.cfg'.format(self.command)): + os.remove(os.path.join(NRPE.nagios_exportdir, f)) + + def remove(self, hostname): + nrpe_check_file = self._get_check_filename() + if os.path.exists(nrpe_check_file): + os.remove(nrpe_check_file) + self._remove_service_files() + def write(self, nagios_context, hostname, nagios_servicegroups): - nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( - self.command) + nrpe_check_file = self._get_check_filename() with open(nrpe_check_file, 'w') as nrpe_check_config: nrpe_check_config.write("# check {}\n".format(self.shortname)) nrpe_check_config.write("command[{}]={}\n".format( @@ -180,9 +199,7 @@ def write(self, nagios_context, hostname, nagios_servicegroups): def write_service_config(self, nagios_context, hostname, nagios_servicegroups): - for f in os.listdir(NRPE.nagios_exportdir): - if re.search('.*{}.cfg'.format(self.command), f): - os.remove(os.path.join(NRPE.nagios_exportdir, f)) + self._remove_service_files() templ_vars = { 'nagios_hostname': hostname, @@ -192,8 +209,7 @@ def write_service_config(self, nagios_context, hostname, 'command': self.command, } nrpe_service_text = Check.service_template.format(**templ_vars) - nrpe_service_file = '{}/service__{}_{}.cfg'.format( - NRPE.nagios_exportdir, hostname, self.command) + nrpe_service_file = self._get_service_filename(hostname) with open(nrpe_service_file, 'w') as nrpe_service_config: nrpe_service_config.write(str(nrpe_service_text)) @@ -218,12 +234,32 @@ def __init__(self, hostname=None): if hostname: self.hostname = hostname else: - self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + nagios_hostname = get_nagios_hostname() + if nagios_hostname: + self.hostname = nagios_hostname + else: + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) self.checks = [] def add_check(self, *args, **kwargs): self.checks.append(Check(*args, **kwargs)) + def remove_check(self, *args, **kwargs): + if kwargs.get('shortname') is None: + raise ValueError('shortname of check must be specified') + + # Use sensible defaults if they're not specified - these are not + # actually used during removal, but they're required for constructing + # the Check object; check_disk is chosen because it's part of the + # nagios-plugins-basic package. + if kwargs.get('check_cmd') is None: + kwargs['check_cmd'] = 'check_disk' + if kwargs.get('description') is None: + kwargs['description'] = '' + + check = Check(*args, **kwargs) + check.remove(self.hostname) + def write(self): try: nagios_uid = pwd.getpwnam('nagios').pw_uid @@ -260,7 +296,7 @@ def get_nagios_hostcontext(relation_name='nrpe-external-master'): :param str relation_name: Name of relation nrpe sub joined to """ for rel in relations_of_type(relation_name): - if 'nagios_hostname' in rel: + if 'nagios_host_context' in rel: return rel['nagios_host_context'] @@ -301,11 +337,13 @@ def add_init_service_checks(nrpe, services, unit_name): upstart_init = '/etc/init/%s.conf' % svc sysv_init = '/etc/init.d/%s' % svc if os.path.exists(upstart_init): - nrpe.add_check( - shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_upstart_job %s' % svc - ) + # Don't add a check for these services from neutron-gateway + if svc not in ['ext-port', 'os-charm-phy-nic-mtu']: + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % svc cron_file = ('*/5 * * * * root ' diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 67b4dccc..998f00c1 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -23,7 +23,7 @@ from functools import partial from charmhelpers.core.hookenv import unit_get -from charmhelpers.fetch import apt_install +from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, WARNING, @@ -32,13 +32,15 @@ try: import netifaces except ImportError: - apt_install('python-netifaces') + apt_update(fatal=True) + apt_install('python-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: - apt_install('python-netaddr') + apt_update(fatal=True) + apt_install('python-netaddr', fatal=True) import netaddr @@ -51,7 +53,7 @@ def _validate_cidr(network): def no_ip_found_error_out(network): - errmsg = ("No IP address found in network: %s" % network) + errmsg = ("No IP address found in network(s): %s" % network) raise ValueError(errmsg) @@ -59,7 +61,7 @@ def get_address_in_network(network, fallback=None, fatal=False): """Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, - '192.168.1.0/24'. + '192.168.1.0/24'. Supports multiple networks as a space-delimited list. :param fallback (str): If no address is found, return fallback. :param fatal (boolean): If no address is found, fallback is not set and fatal is True then exit(1). @@ -73,24 +75,26 @@ def get_address_in_network(network, fallback=None, fatal=False): else: return None - _validate_cidr(network) - network = netaddr.IPNetwork(network) - for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) - if network.version == 4 and netifaces.AF_INET in addresses: - addr = addresses[netifaces.AF_INET][0]['addr'] - netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - if cidr in network: - return str(cidr.ip) - - if network.version == 6 and netifaces.AF_INET6 in addresses: - for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - if cidr in network: - return str(cidr.ip) + networks = network.split() or [network] + for network in networks: + _validate_cidr(network) + network = netaddr.IPNetwork(network) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if network.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + if cidr in network: + return str(cidr.ip) + + if network.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if not addr['addr'].startswith('fe80'): + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) if fallback is not None: return fallback diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index ab53a780..2dd70bc9 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -490,6 +490,19 @@ def relation_types(): return rel_types +@cached +def peer_relation_id(): + '''Get the peers relation id if a peers relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + @cached def relation_to_interface(relation_name): """ @@ -504,12 +517,12 @@ def relation_to_interface(relation_name): def relation_to_role_and_interface(relation_name): """ Given the name of a relation, return the role and the name of the interface - that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. """ _metadata = metadata() - for role in ('provides', 'requires', 'peer'): + for role in ('provides', 'requires', 'peers'): interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') if interface: return role, interface @@ -521,7 +534,7 @@ def role_and_interface_to_relations(role, interface_name): """ Given a role and interface name, return a list of relation names for the current charm that use that interface under that role (where role is one - of ``provides``, ``requires``, or ``peer``). + of ``provides``, ``requires``, or ``peers``). :returns: A list of relation names. """ @@ -542,7 +555,7 @@ def interface_to_relations(interface_name): :returns: A list of relation names. """ results = [] - for role in ('provides', 'requires', 'peer'): + for role in ('provides', 'requires', 'peers'): results.extend(role_and_interface_to_relations(role, interface_name)) return results @@ -623,6 +636,38 @@ def unit_private_ip(): return unit_get('private-address') +@cached +def storage_get(attribute=None, storage_id=None): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=None): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + class UnregisteredHookError(Exception): """Raised when an undefined hook is called""" pass @@ -788,6 +833,7 @@ def status_get(): def translate_exc(from_exc, to_exc): def inner_translate_exc1(f): + @wraps(f) def inner_translate_exc2(*args, **kwargs): try: return f(*args, **kwargs) @@ -832,6 +878,40 @@ def leader_set(settings=None, **kwargs): subprocess.check_call(cmd) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_register(ptype, klass, pid): + """ is used while a hook is running to let Juju know that a + payload has been started.""" + cmd = ['payload-register'] + for x in [ptype, klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_unregister(klass, pid): + """ is used while a hook is running to let Juju know + that a payload has been manually stopped. The and provided + must match a payload that has been previously registered with juju using + payload-register.""" + cmd = ['payload-unregister'] + for x in [klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_status_set(klass, pid, status): + """is used to update the current status of a registered payload. + The and provided must match a payload that has been previously + registered with juju using payload-register. The must be one of the + follow: starting, started, stopping, stopped""" + cmd = ['payload-status-set'] + for x in [klass, pid, status]: + cmd.append(x) + subprocess.check_call(cmd) + + @cached def juju_version(): """Full version string (eg. '1.23.3.1-trusty-amd64')""" diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 29e8fee0..a7720906 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -63,55 +63,86 @@ def service_reload(service_name, restart_on_failure=False): return service_result -def service_pause(service_name, init_dir=None): +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): """Pause a system service. Stop it, and prevent it from starting again at boot.""" - if init_dir is None: - init_dir = "/etc/init" - stopped = service_stop(service_name) - # XXX: Support systemd too - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") + stopped = True + if service_running(service_name): + stopped = service_stop(service_name) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('disable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) return stopped -def service_resume(service_name, init_dir=None): +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d"): """Resume a system service. Reenable starting again at boot. Start the service""" - # XXX: Support systemd too - if init_dir is None: - init_dir = "/etc/init" - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) - started = service_start(service_name) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('enable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + + started = service_running(service_name) + if not started: + started = service_start(service_name) return started def service(action, service_name): """Control a system service""" - cmd = ['service', service_name, action] + if init_is_systemd(): + cmd = ['systemctl', action, service_name] + else: + cmd = ['service', service_name, action] return subprocess.call(cmd) == 0 -def service_running(service): +def service_running(service_name): """Determine whether a system service is running""" - try: - output = subprocess.check_output( - ['service', service, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False + if init_is_systemd(): + return service('is-active', service_name) else: - if ("start/running" in output or "is running" in output): - return True - else: + try: + output = subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: return False + else: + if ("start/running" in output or "is running" in output or + "up and running" in output): + return True + else: + return False def service_available(service_name): @@ -126,8 +157,29 @@ def service_available(service_name): return True -def adduser(username, password=None, shell='/bin/bash', system_user=False): - """Add a user to the system""" +SYSTEMD_SYSTEM = '/run/systemd/system' + + +def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" + return os.path.isdir(SYSTEMD_SYSTEM) + + +def adduser(username, password=None, shell='/bin/bash', system_user=False, + primary_group=None, secondary_groups=None): + """Add a user to the system. + + Will log but otherwise succeed if the user already exists. + + :param str username: Username to create + :param str password: Password for user; if ``None``, create a system user + :param str shell: The default shell for the user + :param bool system_user: Whether to create a login or system user + :param str primary_group: Primary group for user; defaults to username + :param list secondary_groups: Optional list of additional groups + + :returns: The password database entry struct, as returned by `pwd.getpwnam` + """ try: user_info = pwd.getpwnam(username) log('user {0} already exists!'.format(username)) @@ -142,6 +194,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): '--shell', shell, '--password', password, ]) + if not primary_group: + try: + grp.getgrnam(username) + primary_group = username # avoid "group exists" error + except KeyError: + pass + if primary_group: + cmd.extend(['-g', primary_group]) + if secondary_groups: + cmd.extend(['-G', ','.join(secondary_groups)]) cmd.append(username) subprocess.check_call(cmd) user_info = pwd.getpwnam(username) @@ -239,14 +301,12 @@ def write_file(path, content, owner='root', group='root', perms=0o444): def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab - """ + """Remove the given mountpoint entry from /etc/fstab""" return Fstab.remove_by_mountpoint(mp) def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file - """ + """Adds the given device entry to the /etc/fstab file""" return Fstab.add(dev, mp, fs, options=options) @@ -302,8 +362,7 @@ def fstab_mount(mountpoint): def file_hash(path, hash_type='md5'): - """ - Generate a hash checksum of the contents of 'path' or None if not found. + """Generate a hash checksum of the contents of 'path' or None if not found. :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. @@ -318,10 +377,9 @@ def file_hash(path, hash_type='md5'): def path_hash(path): - """ - Generate a hash checksum of all files matching 'path'. Standard wildcards - like '*' and '?' are supported, see documentation for the 'glob' module for - more information. + """Generate a hash checksum of all files matching 'path'. Standard + wildcards like '*' and '?' are supported, see documentation for the 'glob' + module for more information. :return: dict: A { filename: hash } dictionary for all matched files. Empty if none found. @@ -333,8 +391,7 @@ def path_hash(path): def check_hash(path, checksum, hash_type='md5'): - """ - Validate a file using a cryptographic checksum. + """Validate a file using a cryptographic checksum. :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. @@ -349,6 +406,7 @@ def check_hash(path, checksum, hash_type='md5'): class ChecksumError(ValueError): + """A class derived from Value error to indicate the checksum failed.""" pass @@ -454,7 +512,7 @@ def get_bond_master(interface): def list_nics(nic_type=None): - '''Return a list of nics of given type(s)''' + """Return a list of nics of given type(s)""" if isinstance(nic_type, six.string_types): int_types = [nic_type] else: @@ -496,12 +554,13 @@ def list_nics(nic_type=None): def set_nic_mtu(nic, mtu): - '''Set MTU on a network interface''' + """Set the Maximum Transmission Unit (MTU) on a network interface.""" cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] subprocess.check_call(cmd) def get_nic_mtu(nic): + """Return the Maximum Transmission Unit (MTU) for a network interface.""" cmd = ['ip', 'addr', 'show', nic] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" @@ -513,6 +572,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): + """Return the Media Access Control (MAC) for a network interface.""" cmd = ['ip', '-o', '-0', 'addr', 'show', nic] ip_output = subprocess.check_output(cmd).decode('UTF-8') hwaddr = "" @@ -523,7 +583,7 @@ def get_nic_hwaddr(nic): def cmp_pkgrevno(package, revno, pkgcache=None): - '''Compare supplied revno with the revno of the installed package + """Compare supplied revno with the revno of the installed package * 1 => Installed revno is greater than supplied arg * 0 => Installed revno is the same as supplied arg @@ -532,7 +592,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None): This function imports apt_cache function from charmhelpers.fetch if the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. - ''' + """ import apt_pkg if not pkgcache: from charmhelpers.fetch import apt_cache @@ -542,15 +602,30 @@ def cmp_pkgrevno(package, revno, pkgcache=None): @contextmanager -def chdir(d): +def chdir(directory): + """Change the current working directory to a different directory for a code + block and return the previous directory after the block exits. Useful to + run commands from a specificed directory. + + :param str directory: The directory path to change to for this context. + """ cur = os.getcwd() try: - yield os.chdir(d) + yield os.chdir(directory) finally: os.chdir(cur) -def chownr(path, owner, group, follow_links=True): +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + :param bool follow_links: Also Chown links if True + :param bool chowntopdir: Also chown path itself if True + """ uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid if follow_links: @@ -558,6 +633,10 @@ def chownr(path, owner, group, follow_links=True): else: chown = os.lchown + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) for root, dirs, files in os.walk(path): for name in dirs + files: full = os.path.join(root, name) @@ -567,4 +646,28 @@ def chownr(path, owner, group, follow_links=True): def lchownr(path, owner, group): + """Recursively change user and group ownership of files and directories + in a given path, not following symbolic links. See the documentation for + 'os.lchown' for more information. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + """ chownr(path, owner, group, follow_links=False) + + +def get_total_ram(): + """The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + """ + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() diff --git a/ceph-osd/hooks/charmhelpers/core/hugepage.py b/ceph-osd/hooks/charmhelpers/core/hugepage.py index ba4340ff..a783ad94 100644 --- a/ceph-osd/hooks/charmhelpers/core/hugepage.py +++ b/ceph-osd/hooks/charmhelpers/core/hugepage.py @@ -25,11 +25,13 @@ fstab_mount, mkdir, ) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output def hugepage_support(user, group='hugetlb', nr_hugepages=256, max_map_count=65536, mnt_point='/run/hugepages/kvm', - pagesize='2MB', mount=True): + pagesize='2MB', mount=True, set_shmmax=False): """Enable hugepages on system. Args: @@ -44,11 +46,18 @@ def hugepage_support(user, group='hugetlb', nr_hugepages=256, group_info = add_group(group) gid = group_info.gr_gid add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages sysctl_settings = { 'vm.nr_hugepages': nr_hugepages, 'vm.max_map_count': max_map_count, 'vm.hugetlb_shm_group': gid, } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) lfstab = fstab.Fstab() diff --git a/ceph-osd/hooks/charmhelpers/core/kernel.py b/ceph-osd/hooks/charmhelpers/core/kernel.py new file mode 100644 index 00000000..5dc64952 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/kernel.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = "Jorge Niedbalski " + +from charmhelpers.core.hookenv import ( + log, + INFO +) + +from subprocess import check_call, check_output +import re + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + check_call(cmd) + if persist: + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 + + +def update_initramfs(version='all'): + """Updates an initramfs image""" + return check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-osd/hooks/charmhelpers/core/services/helpers.py b/ceph-osd/hooks/charmhelpers/core/services/helpers.py index 3f677833..24237042 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-osd/hooks/charmhelpers/core/services/helpers.py @@ -243,33 +243,40 @@ class TemplateCallback(ManagerCallback): :param str source: The template source file, relative to `$CHARM_DIR/templates` - :param str target: The target to write the rendered template to + :param str target: The target to write the rendered template to (or None) :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file :param partial on_change_action: functools partial to be executed when rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader + + :return str: The rendered template """ def __init__(self, source, target, owner='root', group='root', perms=0o444, - on_change_action=None): + on_change_action=None, template_loader=None): self.source = source self.target = target self.owner = owner self.group = group self.perms = perms self.on_change_action = on_change_action + self.template_loader = template_loader def __call__(self, manager, service_name, event_name): pre_checksum = '' if self.on_change_action and os.path.isfile(self.target): pre_checksum = host.file_hash(self.target) service = manager.get_service(service_name) - context = {} + context = {'ctx': {}} for ctx in service.get('required_data', []): context.update(ctx) - templating.render(self.source, self.target, context, - self.owner, self.group, self.perms) + context['ctx'].update(ctx) + + result = templating.render(self.source, self.target, context, + self.owner, self.group, self.perms, + template_loader=self.template_loader) if self.on_change_action: if pre_checksum == host.file_hash(self.target): hookenv.log( @@ -278,6 +285,8 @@ def __call__(self, manager, service_name, event_name): else: self.on_change_action() + return result + # Convenience aliases for templates render_template = template = TemplateCallback diff --git a/ceph-osd/hooks/charmhelpers/core/strutils.py b/ceph-osd/hooks/charmhelpers/core/strutils.py index a2a784aa..7e3f9693 100644 --- a/ceph-osd/hooks/charmhelpers/core/strutils.py +++ b/ceph-osd/hooks/charmhelpers/core/strutils.py @@ -18,6 +18,7 @@ # along with charm-helpers. If not, see . import six +import re def bool_from_string(value): @@ -40,3 +41,32 @@ def bool_from_string(value): msg = "Unable to interpret string value '%s' as boolean" % (value) raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if not matches: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/ceph-osd/hooks/charmhelpers/core/templating.py b/ceph-osd/hooks/charmhelpers/core/templating.py index 45319998..d2d8eafe 100644 --- a/ceph-osd/hooks/charmhelpers/core/templating.py +++ b/ceph-osd/hooks/charmhelpers/core/templating.py @@ -21,13 +21,14 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8'): + perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): """ Render a template. The `source` path, if not absolute, is relative to the `templates_dir`. - The `target` path should be absolute. + The `target` path should be absolute. It can also be `None`, in which + case no file will be written. The context should be a dict containing the values to be replaced in the template. @@ -36,6 +37,9 @@ def render(source, target, context, owner='root', group='root', If omitted, `templates_dir` defaults to the `templates` folder in the charm. + The rendered template will be written to the file as well as being returned + as a string. + Note: Using this requires python-jinja2; if it is not installed, calling this will attempt to use charmhelpers.fetch.apt_install to install it. """ @@ -52,17 +56,26 @@ def render(source, target, context, owner='root', group='root', apt_install('python-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - loader = Environment(loader=FileSystemLoader(templates_dir)) + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) try: source = source - template = loader.get_template(source) + template = template_env.get_template(source) except exceptions.TemplateNotFound as e: hookenv.log('Could not load template %s from %s.' % (source, templates_dir), level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) - host.write_file(target, content.encode(encoding), owner, group, perms) + if target is not None: + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) + return content diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index cd0b783c..db0d86a2 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -98,6 +98,14 @@ 'liberty/proposed': 'trusty-proposed/liberty', 'trusty-liberty/proposed': 'trusty-proposed/liberty', 'trusty-proposed/liberty': 'trusty-proposed/liberty', + # Mitaka + 'mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka/updates': 'trusty-updates/mitaka', + 'trusty-updates/mitaka': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', } # The order of this list is very important. Handlers should be listed in from @@ -225,12 +233,12 @@ def apt_purge(packages, fatal=False): def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark""" + log("Marking {} as {}".format(packages, mark)) cmd = ['apt-mark', mark] if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) - log("Holding {}".format(packages)) if fatal: subprocess.check_call(cmd, universal_newlines=True) @@ -411,7 +419,7 @@ def plugins(fetch_handlers=None): importlib.import_module(package), classname) plugin_list.append(handler_class()) - except (ImportError, AttributeError): + except NotImplementedError: # Skip missing plugins so that they can be ommitted from # installation if desired log("FetchHandler {} not found, skipping plugin".format( diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index efd7f9f0..b8e0943d 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -108,7 +108,7 @@ def download(self, source, dest): install_opener(opener) response = urlopen(source) try: - with open(dest, 'w') as dest_file: + with open(dest, 'wb') as dest_file: dest_file.write(response.read()) except Exception as e: if os.path.isfile(dest): diff --git a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py index 3531315a..cafd27f7 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py @@ -15,60 +15,50 @@ # along with charm-helpers. If not, see . import os +from subprocess import check_call from charmhelpers.fetch import ( BaseFetchHandler, - UnhandledSource + UnhandledSource, + filter_installed_packages, + apt_install, ) from charmhelpers.core.host import mkdir -import six -if six.PY3: - raise ImportError('bzrlib does not support Python3') -try: - from bzrlib.branch import Branch - from bzrlib import bzrdir, workingtree, errors -except ImportError: - from charmhelpers.fetch import apt_install - apt_install("python-bzrlib") - from bzrlib.branch import Branch - from bzrlib import bzrdir, workingtree, errors +if filter_installed_packages(['bzr']) != []: + apt_install(['bzr']) + if filter_installed_packages(['bzr']) != []: + raise NotImplementedError('Unable to install bzr') class BzrUrlFetchHandler(BaseFetchHandler): """Handler for bazaar branches via generic and lp URLs""" def can_handle(self, source): url_parts = self.parse_url(source) - if url_parts.scheme not in ('bzr+ssh', 'lp'): + if url_parts.scheme not in ('bzr+ssh', 'lp', ''): return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.bzr')) else: return True def branch(self, source, dest): - url_parts = self.parse_url(source) - # If we use lp:branchname scheme we need to load plugins if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - if url_parts.scheme == "lp": - from bzrlib.plugin import load_plugins - load_plugins() - try: - local_branch = bzrdir.BzrDir.create_branch_convenience(dest) - except errors.AlreadyControlDirError: - local_branch = Branch.open(dest) - try: - remote_branch = Branch.open(source) - remote_branch.push(local_branch) - tree = workingtree.WorkingTree.open(dest) - tree.update() - except Exception as e: - raise e + if os.path.exists(dest): + check_call(['bzr', 'pull', '--overwrite', '-d', dest, source]) + else: + check_call(['bzr', 'branch', source, dest]) - def install(self, source): + def install(self, source, dest=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: diff --git a/ceph-osd/hooks/charmhelpers/fetch/giturl.py b/ceph-osd/hooks/charmhelpers/fetch/giturl.py index f023b26d..65ed5319 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/giturl.py @@ -15,24 +15,18 @@ # along with charm-helpers. If not, see . import os +from subprocess import check_call, CalledProcessError from charmhelpers.fetch import ( BaseFetchHandler, - UnhandledSource + UnhandledSource, + filter_installed_packages, + apt_install, ) -from charmhelpers.core.host import mkdir -import six -if six.PY3: - raise ImportError('GitPython does not support Python 3') - -try: - from git import Repo -except ImportError: - from charmhelpers.fetch import apt_install - apt_install("python-git") - from git import Repo - -from git.exc import GitCommandError # noqa E402 +if filter_installed_packages(['git']) != []: + apt_install(['git']) + if filter_installed_packages(['git']) != []: + raise NotImplementedError('Unable to install git') class GitUrlFetchHandler(BaseFetchHandler): @@ -40,19 +34,24 @@ class GitUrlFetchHandler(BaseFetchHandler): def can_handle(self, source): url_parts = self.parse_url(source) # TODO (mattyw) no support for ssh git@ yet - if url_parts.scheme not in ('http', 'https', 'git'): + if url_parts.scheme not in ('http', 'https', 'git', ''): return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.git')) else: return True - def clone(self, source, dest, branch, depth=None): + def clone(self, source, dest, branch="master", depth=None): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - if depth: - Repo.clone_from(source, dest, branch=branch, depth=depth) + if os.path.exists(dest): + cmd = ['git', '-C', dest, 'pull', source, branch] else: - Repo.clone_from(source, dest, branch=branch) + cmd = ['git', 'clone', source, dest, '--branch', branch] + if depth: + cmd.extend(['--depth', depth]) + check_call(cmd) def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) @@ -62,11 +61,9 @@ def install(self, source, branch="master", dest=None, depth=None): else: dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch, depth) - except GitCommandError as e: + except CalledProcessError as e: raise UnhandledSource(e) except OSError as e: raise UnhandledSource(e.strerror) diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py index 367d6b47..d451698d 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py @@ -51,7 +51,8 @@ def _add_services(self, this_service, other_services): if 'units' not in this_service: this_service['units'] = 1 - self.d.add(this_service['name'], units=this_service['units']) + self.d.add(this_service['name'], units=this_service['units'], + constraints=this_service.get('constraints')) for svc in other_services: if 'location' in svc: @@ -64,7 +65,8 @@ def _add_services(self, this_service, other_services): if 'units' not in svc: svc['units'] = 1 - self.d.add(svc['name'], charm=branch_location, units=svc['units']) + self.d.add(svc['name'], charm=branch_location, units=svc['units'], + constraints=svc.get('constraints')) def _add_relations(self, relations): """Add all of the relations for the services.""" diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index 7816c934..2591a9b1 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -19,9 +19,11 @@ import logging import os import re +import socket import subprocess import sys import time +import uuid import amulet import distro_info @@ -114,7 +116,7 @@ def validate_services(self, commands): # /!\ DEPRECATION WARNING (beisner): # New and existing tests should be rewritten to use # validate_services_by_name() as it is aware of init systems. - self.log.warn('/!\\ DEPRECATION WARNING: use ' + self.log.warn('DEPRECATION WARNING: use ' 'validate_services_by_name instead of validate_services ' 'due to init system differences.') @@ -269,33 +271,52 @@ def _get_dir_mtime(self, sentry_unit, directory): """Get last modification time of directory.""" return sentry_unit.directory_stat(directory)['mtime'] - def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): - """Get process' start time. + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): + """Get start time of a process based on the last modification time + of the /proc/pid directory. - Determine start time of the process based on the last modification - time of the /proc/pid directory. If pgrep_full is True, the process - name is matched against the full command line. - """ - if pgrep_full: - cmd = 'pgrep -o -f {}'.format(service) - else: - cmd = 'pgrep -o {}'.format(service) - cmd = cmd + ' | grep -v pgrep || exit 0' - cmd_out = sentry_unit.run(cmd) - self.log.debug('CMDout: ' + str(cmd_out)) - if cmd_out[0]: - self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) - proc_dir = '/proc/{}'.format(cmd_out[0].strip()) - return self._get_dir_mtime(sentry_unit, proc_dir) + :sentry_unit: The sentry unit to check for the service on + :service: service name to look for in process table + :pgrep_full: [Deprecated] Use full command line search mode with pgrep + :returns: epoch time of service process start + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + if pgrep_full is not None: + # /!\ DEPRECATION WARNING (beisner): + # No longer implemented, as pidof is now used instead of pgrep. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' + 'longer implemented re: lp 1474030.') + + pid_list = self.get_process_id_list(sentry_unit, service) + pid = pid_list[0] + proc_dir = '/proc/{}'.format(pid) + self.log.debug('Pid for {} on {}: {}'.format( + service, sentry_unit.info['unit_name'], pid)) + + return self._get_dir_mtime(sentry_unit, proc_dir) def service_restarted(self, sentry_unit, service, filename, - pgrep_full=False, sleep_time=20): + pgrep_full=None, sleep_time=20): """Check if service was restarted. Compare a service's start time vs a file's last modification time (such as a config file for that service) to determine if the service has been restarted. """ + # /!\ DEPRECATION WARNING (beisner): + # This method is prone to races in that no before-time is known. + # Use validate_service_config_changed instead. + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + self.log.warn('DEPRECATION WARNING: use ' + 'validate_service_config_changed instead of ' + 'service_restarted due to known races.') + time.sleep(sleep_time) if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= self._get_file_mtime(sentry_unit, filename)): @@ -304,78 +325,122 @@ def service_restarted(self, sentry_unit, service, filename, return False def service_restarted_since(self, sentry_unit, mtime, service, - pgrep_full=False, sleep_time=20, - retry_count=2): + pgrep_full=None, sleep_time=20, + retry_count=30, retry_sleep_time=10): """Check if service was been started after a given time. Args: sentry_unit (sentry): The sentry unit to check for the service on mtime (float): The epoch time to check against service (string): service name to look for in process table - pgrep_full (boolean): Use full command line search mode with pgrep - sleep_time (int): Seconds to sleep before looking for process - retry_count (int): If service is not found, how many times to retry + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry Returns: bool: True if service found and its start time it newer than mtime, False if service is older than mtime or if service was not found. """ - self.log.debug('Checking %s restarted since %s' % (service, mtime)) + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s service restarted since %s on ' + '%s' % (service, mtime, unit_name)) time.sleep(sleep_time) - proc_start_time = self._get_proc_start_time(sentry_unit, service, - pgrep_full) - while retry_count > 0 and not proc_start_time: - self.log.debug('No pid file found for service %s, will retry %i ' - 'more times' % (service, retry_count)) - time.sleep(30) - proc_start_time = self._get_proc_start_time(sentry_unit, service, - pgrep_full) - retry_count = retry_count - 1 + proc_start_time = None + tries = 0 + while tries <= retry_count and not proc_start_time: + try: + proc_start_time = self._get_proc_start_time(sentry_unit, + service, + pgrep_full) + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'OK'.format(tries, service, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, proc may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'failed\n{}'.format(tries, service, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 if not proc_start_time: self.log.warn('No proc start time found, assuming service did ' 'not start') return False if proc_start_time >= mtime: - self.log.debug('proc start time is newer than provided mtime' - '(%s >= %s)' % (proc_start_time, mtime)) + self.log.debug('Proc start time is newer than provided mtime' + '(%s >= %s) on %s (OK)' % (proc_start_time, + mtime, unit_name)) return True else: - self.log.warn('proc start time (%s) is older than provided mtime ' - '(%s), service did not restart' % (proc_start_time, - mtime)) + self.log.warn('Proc start time (%s) is older than provided mtime ' + '(%s) on %s, service did not ' + 'restart' % (proc_start_time, mtime, unit_name)) return False def config_updated_since(self, sentry_unit, filename, mtime, - sleep_time=20): + sleep_time=20, retry_count=30, + retry_sleep_time=10): """Check if file was modified after a given time. Args: sentry_unit (sentry): The sentry unit to check the file mtime on filename (string): The file to check mtime of mtime (float): The epoch time to check against - sleep_time (int): Seconds to sleep before looking for process + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry Returns: bool: True if file was modified more recently than mtime, False if - file was modified before mtime, + file was modified before mtime, or if file not found. """ - self.log.debug('Checking %s updated since %s' % (filename, mtime)) + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s updated since %s on ' + '%s' % (filename, mtime, unit_name)) time.sleep(sleep_time) - file_mtime = self._get_file_mtime(sentry_unit, filename) + file_mtime = None + tries = 0 + while tries <= retry_count and not file_mtime: + try: + file_mtime = self._get_file_mtime(sentry_unit, filename) + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'OK'.format(tries, filename, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, file may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'failed\n{}'.format(tries, filename, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not file_mtime: + self.log.warn('Could not determine file mtime, assuming ' + 'file does not exist') + return False + if file_mtime >= mtime: self.log.debug('File mtime is newer than provided mtime ' - '(%s >= %s)' % (file_mtime, mtime)) + '(%s >= %s) on %s (OK)' % (file_mtime, + mtime, unit_name)) return True else: - self.log.warn('File mtime %s is older than provided mtime %s' - % (file_mtime, mtime)) + self.log.warn('File mtime is older than provided mtime' + '(%s < on %s) on %s' % (file_mtime, + mtime, unit_name)) return False def validate_service_config_changed(self, sentry_unit, mtime, service, - filename, pgrep_full=False, - sleep_time=20, retry_count=2): + filename, pgrep_full=None, + sleep_time=20, retry_count=30, + retry_sleep_time=10): """Check service and file were updated after mtime Args: @@ -383,9 +448,10 @@ def validate_service_config_changed(self, sentry_unit, mtime, service, mtime (float): The epoch time to check against service (string): service name to look for in process table filename (string): The file to check mtime of - pgrep_full (boolean): Use full command line search mode with pgrep - sleep_time (int): Seconds to sleep before looking for process + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep in seconds to pass to test helpers retry_count (int): If service is not found, how many times to retry + retry_sleep_time (int): Time in seconds to wait between retries Typical Usage: u = OpenStackAmuletUtils(ERROR) @@ -402,15 +468,27 @@ def validate_service_config_changed(self, sentry_unit, mtime, service, mtime, False if service is older than mtime or if service was not found or if filename was modified before mtime. """ - self.log.debug('Checking %s restarted since %s' % (service, mtime)) - time.sleep(sleep_time) - service_restart = self.service_restarted_since(sentry_unit, mtime, - service, - pgrep_full=pgrep_full, - sleep_time=0, - retry_count=retry_count) - config_update = self.config_updated_since(sentry_unit, filename, mtime, - sleep_time=0) + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + service_restart = self.service_restarted_since( + sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + config_update = self.config_updated_since( + sentry_unit, + filename, + mtime, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + return service_restart and config_update def get_sentry_time(self, sentry_unit): @@ -428,7 +506,6 @@ def get_ubuntu_releases(self): """Return a list of all Ubuntu releases in order of release.""" _d = distro_info.UbuntuDistroInfo() _release_list = _d.all - self.log.debug('Ubuntu release list: {}'.format(_release_list)) return _release_list def file_to_url(self, file_rel_path): @@ -568,6 +645,142 @@ def validate_list_of_identical_dicts(self, list_of_dicts): return None + def validate_sectionless_conf(self, file_contents, expected): + """A crude conf parser. Useful to inspect configuration files which + do not have section headers (as would be necessary in order to use + the configparser). Such as openstack-dashboard or rabbitmq confs.""" + for line in file_contents.split('\n'): + if '=' in line: + args = line.split('=') + if len(args) <= 1: + continue + key = args[0].strip() + value = args[1].strip() + if key in expected.keys(): + if expected[key] != value: + msg = ('Config mismatch. Expected, actual: {}, ' + '{}'.format(expected[key], value)) + amulet.raise_status(amulet.FAIL, msg=msg) + + def get_unit_hostnames(self, units): + """Return a dict of juju unit names to hostnames.""" + host_names = {} + for unit in units: + host_names[unit.info['unit_name']] = \ + str(unit.file_contents('/etc/hostname').strip()) + self.log.debug('Unit host names: {}'.format(host_names)) + return host_names + + def run_cmd_unit(self, sentry_unit, cmd): + """Run a command on a unit, return the output and exit code.""" + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` command returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + msg = ('{} `{}` command returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output), code + + def file_exists_on_unit(self, sentry_unit, file_name): + """Check if a file exists on a unit.""" + try: + sentry_unit.file_stat(file_name) + return True + except IOError: + return False + except Exception as e: + msg = 'Error checking file {}: {}'.format(file_name, e) + amulet.raise_status(amulet.FAIL, msg=msg) + + def file_contents_safe(self, sentry_unit, file_name, + max_wait=60, fatal=False): + """Get file contents from a sentry unit. Wrap amulet file_contents + with retry logic to address races where a file checks as existing, + but no longer exists by the time file_contents is called. + Return None if file not found. Optionally raise if fatal is True.""" + unit_name = sentry_unit.info['unit_name'] + file_contents = False + tries = 0 + while not file_contents and tries < (max_wait / 4): + try: + file_contents = sentry_unit.file_contents(file_name) + except IOError: + self.log.debug('Attempt {} to open file {} from {} ' + 'failed'.format(tries, file_name, + unit_name)) + time.sleep(4) + tries += 1 + + if file_contents: + return file_contents + elif not fatal: + return None + elif fatal: + msg = 'Failed to get file contents from unit.' + amulet.raise_status(amulet.FAIL, msg) + + def port_knock_tcp(self, host="localhost", port=22, timeout=15): + """Open a TCP socket to check for a listening sevice on a host. + + :param host: host name or IP address, default to localhost + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :returns: True if successful, False if connect failed + """ + + # Resolve host name if possible + try: + connect_host = socket.gethostbyname(host) + host_human = "{} ({})".format(connect_host, host) + except socket.error as e: + self.log.warn('Unable to resolve address: ' + '{} ({}) Trying anyway!'.format(host, e)) + connect_host = host + host_human = connect_host + + # Attempt socket connection + try: + knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + knock.settimeout(timeout) + knock.connect((connect_host, port)) + knock.close() + self.log.debug('Socket connect OK for host ' + '{} on port {}.'.format(host_human, port)) + return True + except socket.error as e: + self.log.debug('Socket connect FAIL for' + ' {} port {} ({})'.format(host_human, port, e)) + return False + + def port_knock_units(self, sentry_units, port=22, + timeout=15, expect_success=True): + """Open a TCP socket to check for a listening sevice on each + listed juju unit. + + :param sentry_units: list of sentry unit pointers + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :expect_success: True by default, set False to invert logic + :returns: None if successful, Failure message otherwise + """ + for unit in sentry_units: + host = unit.info['public-address'] + connected = self.port_knock_tcp(host, port, timeout) + if not connected and expect_success: + return 'Socket connect failed.' + elif connected and not expect_success: + return 'Socket connected unexpectedly.' + + def get_uuid_epoch_stamp(self): + """Returns a stamp string based on uuid4 and epoch time. Useful in + generating test messages which need to be unique-ish.""" + return '[{}-{}]'.format(uuid.uuid4(), time.time()) + +# amulet juju action helpers: def run_action(self, unit_sentry, action, _check_output=subprocess.check_output): """Run the named action on a given unit sentry. @@ -594,3 +807,12 @@ def wait_on_action(self, action_id, _check_output=subprocess.check_output): output = _check_output(command, universal_newlines=True) data = json.loads(output) return data.get(u"status") == "completed" + + def status_get(self, unit): + """Return the current service status of this unit.""" + raw_status, return_code = unit.run( + "status-get --format=json --include-data") + if return_code != 0: + return ("unknown", "") + status = json.loads(raw_status) + return (status["status"], status["message"]) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 07ee2ef1..cbaad10d 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -14,12 +14,18 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import logging +import re +import sys import six from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +DEBUG = logging.DEBUG +ERROR = logging.ERROR + class OpenStackAmuletDeployment(AmuletDeployment): """OpenStack amulet deployment. @@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment): that is specifically for use by OpenStack charms. """ - def __init__(self, series=None, openstack=None, source=None, stable=True): + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): """Initialize the deployment environment.""" super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') self.openstack = openstack self.source = source self.stable = stable @@ -38,26 +47,55 @@ def __init__(self, series=None, openstack=None, source=None, stable=True): # out. self.current_next = "trusty" + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + def _determine_branch_locations(self, other_services): """Determine the branch locations for the other services. Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" + + self.log.info('OpenStackAmuletDeployment: determine branch locations') + + # Charms outside the lp:~openstack-charmers namespace base_charms = ['mysql', 'mongodb', 'nrpe'] + # Force these charms to current series even when using an older series. + # ie. Use trusty/nrpe even when series is precise, as the P charm + # does not possess the necessary external master config and hooks. + force_series_current = ['nrpe'] + if self.series in ['precise', 'trusty']: base_series = self.series else: base_series = self.current_next - if self.stable: - for svc in other_services: + for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if self.stable: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, svc['name']) - else: - for svc in other_services: + else: if svc['name'] in base_charms: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, @@ -66,10 +104,13 @@ def _determine_branch_locations(self, other_services): temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, svc['name']) + return other_services def _add_services(self, this_service, other_services): """Add services to the deployment and set openstack-origin/source.""" + self.log.info('OpenStackAmuletDeployment: adding services') + other_services = self._determine_branch_locations(other_services) super(OpenStackAmuletDeployment, self)._add_services(this_service, @@ -77,29 +118,103 @@ def _add_services(self, this_service, other_services): services = other_services services.append(this_service) + + # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Most OpenStack subordinate charms do not expose an origin option - # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', + 'cinder-backup'] if self.openstack: for svc in services: - if svc['name'] not in use_source + ignore: + if svc['name'] not in use_source + no_origin: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source and svc['name'] not in ignore: + if svc['name'] in use_source and svc['name'] not in no_origin: config = {'source': self.source} self.d.configure(svc['name'], config) def _configure_services(self, configs): """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') for service, config in six.iteritems(configs): self.d.configure(service, config) + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=1800): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + self.log.info('Waiting for extended status on units...') + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') + def _get_openstack_release(self): """Get openstack release. @@ -111,7 +226,8 @@ def _get_openstack_release(self): self.precise_havana, self.precise_icehouse, self.trusty_icehouse, self.trusty_juno, self.utopic_juno, self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, - self.wily_liberty) = range(12) + self.wily_liberty, self.trusty_mitaka, + self.xenial_mitaka) = range(14) releases = { ('precise', None): self.precise_essex, @@ -123,9 +239,11 @@ def _get_openstack_release(self): ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, ('utopic', None): self.utopic_juno, ('vivid', None): self.vivid_kilo, - ('wily', None): self.wily_liberty} + ('wily', None): self.wily_liberty, + ('xenial', None): self.xenial_mitaka} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -142,6 +260,7 @@ def _get_openstack_release_string(self): ('utopic', 'juno'), ('vivid', 'kilo'), ('wily', 'liberty'), + ('xenial', 'mitaka'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index 03f79277..388b60e6 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -18,6 +18,7 @@ import json import logging import os +import re import six import time import urllib @@ -27,6 +28,7 @@ import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import pika import swiftclient from charmhelpers.contrib.amulet.utils import ( @@ -602,3 +604,382 @@ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): self.log.debug('Ceph {} samples (OK): ' '{}'.format(sample_type, samples)) return None + + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.server_properties['product'] == 'RabbitMQ' + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) From c1bf954fe4318f2625c37e4d2b3beeb2003bbeff Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 22 Jan 2016 22:32:15 +0000 Subject: [PATCH 0972/2699] enable liberty amulet test targets --- ceph-osd/tests/018-basic-trusty-liberty | 0 ceph-osd/tests/020-basic-wily-liberty | 0 2 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 ceph-osd/tests/018-basic-trusty-liberty mode change 100644 => 100755 ceph-osd/tests/020-basic-wily-liberty diff --git a/ceph-osd/tests/018-basic-trusty-liberty b/ceph-osd/tests/018-basic-trusty-liberty old mode 100644 new mode 100755 diff --git a/ceph-osd/tests/020-basic-wily-liberty b/ceph-osd/tests/020-basic-wily-liberty old mode 100644 new mode 100755 From 831a46d0379ccce60c86d6a6fa30be3bfeac6c07 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 25 Jan 2016 11:10:14 -0500 Subject: [PATCH 0973/2699] remove osd stuff --- ceph-proxy/README.md | 28 -------- ceph-proxy/config.yaml | 72 ------------------- ceph-proxy/files/upstart/ceph-osd.conf | 37 ---------- ceph-proxy/hooks/ceph_hooks.py | 70 +----------------- ceph-proxy/hooks/osd-devices-storage-attached | 1 - .../hooks/osd-devices-storage-detaching | 1 - ceph-proxy/hooks/osd-relation-joined | 1 - ceph-proxy/metadata.yaml | 11 +-- ceph-proxy/tests/basic_deployment.py | 30 ++++---- 9 files changed, 20 insertions(+), 231 deletions(-) delete mode 100644 ceph-proxy/files/upstart/ceph-osd.conf delete mode 120000 ceph-proxy/hooks/osd-devices-storage-attached delete mode 120000 ceph-proxy/hooks/osd-devices-storage-detaching delete mode 120000 ceph-proxy/hooks/osd-relation-joined diff --git a/ceph-proxy/README.md b/ceph-proxy/README.md index 9fac8245..9235b3df 100644 --- a/ceph-proxy/README.md +++ b/ceph-proxy/README.md @@ -26,30 +26,12 @@ These two pieces of configuration must NOT be changed post bootstrap; attempting to do this will cause a reconfiguration error and new service units will not join the existing ceph cluster. -The charm also supports the specification of storage devices to be used in the -ceph cluster. - - osd-devices: - A list of devices that the charm will attempt to detect, initialise and - activate as ceph storage. - - This can be a superset of the actual storage devices presented to each - service unit and can be changed post ceph bootstrap using `juju set`. - - The full path of each device must be provided, e.g. /dev/vdb. - - For Ceph >= 0.56.6 (Raring or the Grizzly Cloud Archive) use of - directories instead of devices is also supported. - At a minimum you must provide a juju config file during initial deployment with the fsid and monitor-secret options (contents of cepy.yaml below): ceph: fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== - osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde - -Specifying the osd-devices to use is also a good idea. Boot things up by using: @@ -91,14 +73,4 @@ hook to wait for all three nodes to come up, and then write their addresses to ceph.conf in the "mon host" parameter. After we initialize the monitor cluster a quorum forms quickly, and OSD bringup proceeds. -The osds use so-called "OSD hotplugging". **ceph-disk prepare** is used to -create the filesystems with a special GPT partition type. *udev* is set up -to mount such filesystems and start the osd daemons as their storage becomes -visible to the system (or after `udevadm trigger`). - -The Chef cookbook mentioned above performs some extra steps to generate an OSD -bootstrapping key and propagate it to the other nodes in the cluster. Since -all OSDs run on nodes that also run mon, we don't need this and did not -implement it. - See [the documentation](http://ceph.com/docs/master/dev/mon-bootstrap/) for more information on Ceph monitor cluster deployment strategies and pitfalls. diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index d47f6764..6a5f8c3d 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -37,78 +37,6 @@ options: How many nodes to wait for before trying to create the monitor cluster this number needs to be odd, and more than three is a waste except for very large clusters. - osd-devices: - type: string - default: /dev/vdb - description: | - The devices to format and set up as osd volumes. - . - These devices are the range of devices that will be checked for and - used across all service units, in addition to any volumes attached - via the --storage flag during deployment. - . - For ceph >= 0.56.6 these can also be directories instead of devices - the - charm assumes anything not starting with /dev is a directory instead. - osd-journal: - type: string - default: - description: | - The device to use as a shared journal drive for all OSD's. By default - no journal device will be used. - . - Only supported with ceph >= 0.48.3. - osd-journal-size: - type: int - default: 1024 - description: | - Ceph osd journal size. The journal size should be at least twice the - product of the expected drive speed multiplied by filestore max sync - interval. However, the most common practice is to partition the journal - drive (often an SSD), and mount it such that Ceph uses the entire - partition for the journal. - . - Only supported with ceph >= 0.48.3. - osd-format: - type: string - default: xfs - description: | - Format of filesystem to use for OSD devices; supported formats include: - . - xfs (Default >= 0.48.3) - ext4 (Only option < 0.48.3) - btrfs (experimental and not recommended) - . - Only supported with ceph >= 0.48.3. - osd-reformat: - type: string - default: - description: | - By default, the charm will not re-format a device that already looks - as if it might be an OSD device. This is a safeguard to try to - prevent data loss. - . - Specifying this option (any value) forces a reformat of any OSD devices - found which are not already mounted. - ignore-device-errors: - type: boolean - default: False - description: | - By default, the charm will raise errors if a whitelisted device is found, - but for some reason the charm is unable to initialize the device for use - by Ceph. - . - Setting this option to 'True' will result in the charm classifying such - problems as warnings only and will not result in a hook error. - ephemeral-unmount: - type: string - default: - description: | - Cloud instances provider ephermeral storage which is normally mounted - on /mnt. - . - Providing this option will force an unmount of the ephemeral device - so that it can be used as a OSD storage device. This is useful for - testing purposes (cloud deployment is not a typical use case). source: type: string default: diff --git a/ceph-proxy/files/upstart/ceph-osd.conf b/ceph-proxy/files/upstart/ceph-osd.conf deleted file mode 100644 index 119ad000..00000000 --- a/ceph-proxy/files/upstart/ceph-osd.conf +++ /dev/null @@ -1,37 +0,0 @@ -description "Ceph OSD" - -start on ceph-osd -stop on runlevel [!2345] - -respawn -respawn limit 5 30 - -pre-start script - set -e - test -x /usr/bin/ceph-osd || { stop; exit 0; } - test -d "/var/lib/ceph/osd/${cluster:-ceph}-$id" || { stop; exit 0; } - - install -d -m0755 /var/run/ceph - - # update location in crush; put in some suitable defaults on the - # command line, ceph.conf can override what it wants - location="$(ceph-conf --cluster="${cluster:-ceph}" --name="osd.$id" --lookup osd_crush_location || :)" - weight="$(ceph-conf --cluster="$cluster" --name="osd.$id" --lookup osd_crush_weight || :)" - ceph \ - --cluster="${cluster:-ceph}" \ - --name="osd.$id" \ - --keyring="/var/lib/ceph/osd/${cluster:-ceph}-$id/keyring" \ - osd crush set \ - -- \ - "$id" "osd.$id" "${weight:-1}" \ - pool=default \ - host="$(hostname -s)" \ - $location \ - || : -end script - -instance ${cluster:-ceph}/$id -export cluster -export id - -exec /usr/bin/ceph-osd --cluster="${cluster:-ceph}" -i "$id" -f diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 518830c0..fced655a 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -28,9 +28,7 @@ service_name, relations_of_type, status_set, - local_unit, - storage_get, - storage_list + local_unit ) from charmhelpers.core.host import ( service_restart, @@ -135,9 +133,6 @@ def config_changed(): if not config('monitor-secret'): log('No monitor-secret supplied, cannot proceed.', level=ERROR) sys.exit(1) - if config('osd-format') not in ceph.DISK_FORMATS: - log('Invalid OSD disk format configuration specified', level=ERROR) - sys.exit(1) sysctl_dict = config('sysctl') if sysctl_dict: @@ -145,53 +140,16 @@ def config_changed(): emit_cephconf() - e_mountpoint = config('ephemeral-unmount') - if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): - umount(e_mountpoint) - - osd_journal = get_osd_journal() - if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) and - os.path.exists(osd_journal)): - ceph.zap_disk(osd_journal) - with open(JOURNAL_ZAPPED, 'w') as zapped: - zapped.write('DONE') - # Support use of single node ceph if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1): status_set('maintenance', 'Bootstrapping single Ceph MON') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() - storage_changed() - if relations_of_type('nrpe-external-master'): update_nrpe_config() -@hooks.hook('osd-devices-storage-attached', 'osd-devices-storage-detaching') -def storage_changed(): - if ceph.is_bootstrapped(): - for dev in get_devices(): - ceph.osdize(dev, config('osd-format'), get_osd_journal(), - reformat_osd(), config('ignore-device-errors')) - ceph.start_osds(get_devices()) - - -def get_osd_journal(): - ''' - Returns the block device path to use for the OSD journal, if any. - - If there is an osd-journal storage instance attached, it will be - used as the journal. Otherwise, the osd-journal configuration will - be returned. - ''' - storage_ids = storage_list('osd-journal') - if storage_ids: - # There can be at most one osd-journal storage instance. - return storage_get('location', storage_ids[0]) - return config('osd-journal') - - def get_mon_hosts(): hosts = [] addr = get_public_addr() @@ -222,26 +180,6 @@ def get_peer_units(): return units -def reformat_osd(): - if config('osd-reformat'): - return True - else: - return False - - -def get_devices(): - if config('osd-devices'): - devices = config('osd-devices').split(' ') - else: - devices = [] - # List storage instances for the 'osd-devices' - # store declared for this charm too, and add - # their block device paths to the list. - storage_ids = storage_list('osd-devices') - devices.extend((storage_get('location', s) for s in storage_ids)) - return devices - - @hooks.hook('mon-relation-joined') def mon_relation_joined(): for relid in relation_ids('mon'): @@ -260,10 +198,6 @@ def mon_relation(): status_set('maintenance', 'Bootstrapping MON cluster') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() - for dev in get_devices(): - ceph.osdize(dev, config('osd-format'), get_osd_journal(), - reformat_osd(), config('ignore-device-errors')) - ceph.start_osds(get_devices()) notify_osds() notify_radosgws() notify_client() @@ -409,8 +343,6 @@ def start(): service_restart('ceph-mon') else: service_restart('ceph-mon-all') - if ceph.is_bootstrapped(): - ceph.start_osds(get_devices()) @hooks.hook('nrpe-external-master-relation-joined') diff --git a/ceph-proxy/hooks/osd-devices-storage-attached b/ceph-proxy/hooks/osd-devices-storage-attached deleted file mode 120000 index 52d96630..00000000 --- a/ceph-proxy/hooks/osd-devices-storage-attached +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/osd-devices-storage-detaching b/ceph-proxy/hooks/osd-devices-storage-detaching deleted file mode 120000 index 52d96630..00000000 --- a/ceph-proxy/hooks/osd-devices-storage-detaching +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/osd-relation-joined b/ceph-proxy/hooks/osd-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/ceph-proxy/hooks/osd-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index b8843305..238ef07b 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -1,4 +1,4 @@ -name: ceph +name: ceph-mon summary: Highly scalable distributed storage maintainer: OpenStack Charmers description: | @@ -25,12 +25,3 @@ provides: nrpe-external-master: interface: nrpe-external-master scope: container -storage: - osd-devices: - type: block - multiple: - range: 0- - osd-journal: - type: block - multiple: - range: 0-1 diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 385bc118..73a4370e 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -35,9 +35,10 @@ def _add_services(self): and the rest of the service are from lp branches that are compatible with the local charm (e.g. stable or next). """ - this_service = {'name': 'ceph', 'units': 3} + this_service = {'name': 'ceph-mon', 'units': 3} other_services = [{'name': 'mysql'}, {'name': 'keystone'}, + {'name': 'ceph-osd', 'units': 3}, {'name': 'rabbitmq-server'}, {'name': 'nova-compute'}, {'name': 'glance'}, @@ -51,17 +52,18 @@ def _add_relations(self): 'nova-compute:shared-db': 'mysql:shared-db', 'nova-compute:amqp': 'rabbitmq-server:amqp', 'nova-compute:image-service': 'glance:image-service', - 'nova-compute:ceph': 'ceph:client', + 'nova-compute:ceph': 'ceph-mon:client', 'keystone:shared-db': 'mysql:shared-db', 'glance:shared-db': 'mysql:shared-db', 'glance:identity-service': 'keystone:identity-service', 'glance:amqp': 'rabbitmq-server:amqp', - 'glance:ceph': 'ceph:client', + 'glance:ceph': 'ceph-mon:client', 'cinder:shared-db': 'mysql:shared-db', 'cinder:identity-service': 'keystone:identity-service', 'cinder:amqp': 'rabbitmq-server:amqp', 'cinder:image-service': 'glance:image-service', - 'cinder:ceph': 'ceph:client' + 'cinder:ceph': 'ceph-mon:client', + 'ceph-osd:mon': 'ceph-mon:osd' } super(CephBasicDeployment, self)._add_relations(relations) @@ -76,6 +78,9 @@ def _configure_services(self): 'auth-supported': 'none', 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', + } + + ceph_osd_config = { 'osd-reformat': 'yes', 'ephemeral-unmount': '/mnt', 'osd-devices': '/dev/vdb /srv/ceph' @@ -84,7 +89,8 @@ def _configure_services(self): configs = {'keystone': keystone_config, 'mysql': mysql_config, 'cinder': cinder_config, - 'ceph': ceph_config} + 'ceph-mon': ceph_config, + 'ceph-osd': ceph_osd_config} super(CephBasicDeployment, self)._configure_services(configs) def _initialize_tests(self): @@ -96,9 +102,9 @@ def _initialize_tests(self): self.nova_sentry = self.d.sentry.unit['nova-compute/0'] self.glance_sentry = self.d.sentry.unit['glance/0'] self.cinder_sentry = self.d.sentry.unit['cinder/0'] - self.ceph0_sentry = self.d.sentry.unit['ceph/0'] - self.ceph1_sentry = self.d.sentry.unit['ceph/1'] - self.ceph2_sentry = self.d.sentry.unit['ceph/2'] + self.ceph0_sentry = self.d.sentry.unit['ceph-mon/0'] + self.ceph1_sentry = self.d.sentry.unit['ceph-mon/1'] + self.ceph2_sentry = self.d.sentry.unit['ceph-mon/2'] u.log.debug('openstack release val: {}'.format( self._get_openstack_release())) u.log.debug('openstack release str: {}'.format( @@ -211,7 +217,7 @@ def test_200_ceph_nova_client_relation(self): """Verify the ceph to nova ceph-client relation data.""" u.log.debug('Checking ceph:nova-compute ceph relation data...') unit = self.ceph0_sentry - relation = ['client', 'nova-compute:ceph'] + relation = ['client', 'nova-compute:ceph-mon'] expected = { 'private-address': u.valid_ip, 'auth': 'none', @@ -227,7 +233,7 @@ def test_201_nova_ceph_client_relation(self): """Verify the nova to ceph client relation data.""" u.log.debug('Checking nova-compute:ceph ceph-client relation data...') unit = self.nova_sentry - relation = ['ceph', 'ceph:client'] + relation = ['ceph-mon', 'ceph-mon:client'] expected = { 'private-address': u.valid_ip } @@ -257,7 +263,7 @@ def test_203_glance_ceph_client_relation(self): """Verify the glance to ceph client relation data.""" u.log.debug('Checking glance:ceph client relation data...') unit = self.glance_sentry - relation = ['ceph', 'ceph:client'] + relation = ['ceph-mon', 'ceph-mon:client'] expected = { 'private-address': u.valid_ip } @@ -287,7 +293,7 @@ def test_205_cinder_ceph_client_relation(self): """Verify the cinder to ceph ceph-client relation data.""" u.log.debug('Checking cinder:ceph ceph relation data...') unit = self.cinder_sentry - relation = ['ceph', 'ceph:client'] + relation = ['ceph-mon', 'ceph-mon:client'] expected = { 'private-address': u.valid_ip } From 666579ecf1eb9b83a176d77133bae66a2c5e574d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 25 Jan 2016 11:10:14 -0500 Subject: [PATCH 0974/2699] remove osd stuff --- ceph-mon/README.md | 28 -------- ceph-mon/config.yaml | 72 -------------------- ceph-mon/files/upstart/ceph-osd.conf | 37 ---------- ceph-mon/hooks/ceph_hooks.py | 70 +------------------ ceph-mon/hooks/osd-devices-storage-attached | 1 - ceph-mon/hooks/osd-devices-storage-detaching | 1 - ceph-mon/hooks/osd-relation-joined | 1 - ceph-mon/metadata.yaml | 11 +-- ceph-mon/tests/basic_deployment.py | 30 ++++---- 9 files changed, 20 insertions(+), 231 deletions(-) delete mode 100644 ceph-mon/files/upstart/ceph-osd.conf delete mode 120000 ceph-mon/hooks/osd-devices-storage-attached delete mode 120000 ceph-mon/hooks/osd-devices-storage-detaching delete mode 120000 ceph-mon/hooks/osd-relation-joined diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 9fac8245..9235b3df 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -26,30 +26,12 @@ These two pieces of configuration must NOT be changed post bootstrap; attempting to do this will cause a reconfiguration error and new service units will not join the existing ceph cluster. -The charm also supports the specification of storage devices to be used in the -ceph cluster. - - osd-devices: - A list of devices that the charm will attempt to detect, initialise and - activate as ceph storage. - - This can be a superset of the actual storage devices presented to each - service unit and can be changed post ceph bootstrap using `juju set`. - - The full path of each device must be provided, e.g. /dev/vdb. - - For Ceph >= 0.56.6 (Raring or the Grizzly Cloud Archive) use of - directories instead of devices is also supported. - At a minimum you must provide a juju config file during initial deployment with the fsid and monitor-secret options (contents of cepy.yaml below): ceph: fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== - osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde - -Specifying the osd-devices to use is also a good idea. Boot things up by using: @@ -91,14 +73,4 @@ hook to wait for all three nodes to come up, and then write their addresses to ceph.conf in the "mon host" parameter. After we initialize the monitor cluster a quorum forms quickly, and OSD bringup proceeds. -The osds use so-called "OSD hotplugging". **ceph-disk prepare** is used to -create the filesystems with a special GPT partition type. *udev* is set up -to mount such filesystems and start the osd daemons as their storage becomes -visible to the system (or after `udevadm trigger`). - -The Chef cookbook mentioned above performs some extra steps to generate an OSD -bootstrapping key and propagate it to the other nodes in the cluster. Since -all OSDs run on nodes that also run mon, we don't need this and did not -implement it. - See [the documentation](http://ceph.com/docs/master/dev/mon-bootstrap/) for more information on Ceph monitor cluster deployment strategies and pitfalls. diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index d47f6764..6a5f8c3d 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -37,78 +37,6 @@ options: How many nodes to wait for before trying to create the monitor cluster this number needs to be odd, and more than three is a waste except for very large clusters. - osd-devices: - type: string - default: /dev/vdb - description: | - The devices to format and set up as osd volumes. - . - These devices are the range of devices that will be checked for and - used across all service units, in addition to any volumes attached - via the --storage flag during deployment. - . - For ceph >= 0.56.6 these can also be directories instead of devices - the - charm assumes anything not starting with /dev is a directory instead. - osd-journal: - type: string - default: - description: | - The device to use as a shared journal drive for all OSD's. By default - no journal device will be used. - . - Only supported with ceph >= 0.48.3. - osd-journal-size: - type: int - default: 1024 - description: | - Ceph osd journal size. The journal size should be at least twice the - product of the expected drive speed multiplied by filestore max sync - interval. However, the most common practice is to partition the journal - drive (often an SSD), and mount it such that Ceph uses the entire - partition for the journal. - . - Only supported with ceph >= 0.48.3. - osd-format: - type: string - default: xfs - description: | - Format of filesystem to use for OSD devices; supported formats include: - . - xfs (Default >= 0.48.3) - ext4 (Only option < 0.48.3) - btrfs (experimental and not recommended) - . - Only supported with ceph >= 0.48.3. - osd-reformat: - type: string - default: - description: | - By default, the charm will not re-format a device that already looks - as if it might be an OSD device. This is a safeguard to try to - prevent data loss. - . - Specifying this option (any value) forces a reformat of any OSD devices - found which are not already mounted. - ignore-device-errors: - type: boolean - default: False - description: | - By default, the charm will raise errors if a whitelisted device is found, - but for some reason the charm is unable to initialize the device for use - by Ceph. - . - Setting this option to 'True' will result in the charm classifying such - problems as warnings only and will not result in a hook error. - ephemeral-unmount: - type: string - default: - description: | - Cloud instances provider ephermeral storage which is normally mounted - on /mnt. - . - Providing this option will force an unmount of the ephemeral device - so that it can be used as a OSD storage device. This is useful for - testing purposes (cloud deployment is not a typical use case). source: type: string default: diff --git a/ceph-mon/files/upstart/ceph-osd.conf b/ceph-mon/files/upstart/ceph-osd.conf deleted file mode 100644 index 119ad000..00000000 --- a/ceph-mon/files/upstart/ceph-osd.conf +++ /dev/null @@ -1,37 +0,0 @@ -description "Ceph OSD" - -start on ceph-osd -stop on runlevel [!2345] - -respawn -respawn limit 5 30 - -pre-start script - set -e - test -x /usr/bin/ceph-osd || { stop; exit 0; } - test -d "/var/lib/ceph/osd/${cluster:-ceph}-$id" || { stop; exit 0; } - - install -d -m0755 /var/run/ceph - - # update location in crush; put in some suitable defaults on the - # command line, ceph.conf can override what it wants - location="$(ceph-conf --cluster="${cluster:-ceph}" --name="osd.$id" --lookup osd_crush_location || :)" - weight="$(ceph-conf --cluster="$cluster" --name="osd.$id" --lookup osd_crush_weight || :)" - ceph \ - --cluster="${cluster:-ceph}" \ - --name="osd.$id" \ - --keyring="/var/lib/ceph/osd/${cluster:-ceph}-$id/keyring" \ - osd crush set \ - -- \ - "$id" "osd.$id" "${weight:-1}" \ - pool=default \ - host="$(hostname -s)" \ - $location \ - || : -end script - -instance ${cluster:-ceph}/$id -export cluster -export id - -exec /usr/bin/ceph-osd --cluster="${cluster:-ceph}" -i "$id" -f diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 518830c0..fced655a 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -28,9 +28,7 @@ service_name, relations_of_type, status_set, - local_unit, - storage_get, - storage_list + local_unit ) from charmhelpers.core.host import ( service_restart, @@ -135,9 +133,6 @@ def config_changed(): if not config('monitor-secret'): log('No monitor-secret supplied, cannot proceed.', level=ERROR) sys.exit(1) - if config('osd-format') not in ceph.DISK_FORMATS: - log('Invalid OSD disk format configuration specified', level=ERROR) - sys.exit(1) sysctl_dict = config('sysctl') if sysctl_dict: @@ -145,53 +140,16 @@ def config_changed(): emit_cephconf() - e_mountpoint = config('ephemeral-unmount') - if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): - umount(e_mountpoint) - - osd_journal = get_osd_journal() - if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) and - os.path.exists(osd_journal)): - ceph.zap_disk(osd_journal) - with open(JOURNAL_ZAPPED, 'w') as zapped: - zapped.write('DONE') - # Support use of single node ceph if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1): status_set('maintenance', 'Bootstrapping single Ceph MON') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() - storage_changed() - if relations_of_type('nrpe-external-master'): update_nrpe_config() -@hooks.hook('osd-devices-storage-attached', 'osd-devices-storage-detaching') -def storage_changed(): - if ceph.is_bootstrapped(): - for dev in get_devices(): - ceph.osdize(dev, config('osd-format'), get_osd_journal(), - reformat_osd(), config('ignore-device-errors')) - ceph.start_osds(get_devices()) - - -def get_osd_journal(): - ''' - Returns the block device path to use for the OSD journal, if any. - - If there is an osd-journal storage instance attached, it will be - used as the journal. Otherwise, the osd-journal configuration will - be returned. - ''' - storage_ids = storage_list('osd-journal') - if storage_ids: - # There can be at most one osd-journal storage instance. - return storage_get('location', storage_ids[0]) - return config('osd-journal') - - def get_mon_hosts(): hosts = [] addr = get_public_addr() @@ -222,26 +180,6 @@ def get_peer_units(): return units -def reformat_osd(): - if config('osd-reformat'): - return True - else: - return False - - -def get_devices(): - if config('osd-devices'): - devices = config('osd-devices').split(' ') - else: - devices = [] - # List storage instances for the 'osd-devices' - # store declared for this charm too, and add - # their block device paths to the list. - storage_ids = storage_list('osd-devices') - devices.extend((storage_get('location', s) for s in storage_ids)) - return devices - - @hooks.hook('mon-relation-joined') def mon_relation_joined(): for relid in relation_ids('mon'): @@ -260,10 +198,6 @@ def mon_relation(): status_set('maintenance', 'Bootstrapping MON cluster') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() - for dev in get_devices(): - ceph.osdize(dev, config('osd-format'), get_osd_journal(), - reformat_osd(), config('ignore-device-errors')) - ceph.start_osds(get_devices()) notify_osds() notify_radosgws() notify_client() @@ -409,8 +343,6 @@ def start(): service_restart('ceph-mon') else: service_restart('ceph-mon-all') - if ceph.is_bootstrapped(): - ceph.start_osds(get_devices()) @hooks.hook('nrpe-external-master-relation-joined') diff --git a/ceph-mon/hooks/osd-devices-storage-attached b/ceph-mon/hooks/osd-devices-storage-attached deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/osd-devices-storage-attached +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/osd-devices-storage-detaching b/ceph-mon/hooks/osd-devices-storage-detaching deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/osd-devices-storage-detaching +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/osd-relation-joined b/ceph-mon/hooks/osd-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/osd-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index b8843305..238ef07b 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -1,4 +1,4 @@ -name: ceph +name: ceph-mon summary: Highly scalable distributed storage maintainer: OpenStack Charmers description: | @@ -25,12 +25,3 @@ provides: nrpe-external-master: interface: nrpe-external-master scope: container -storage: - osd-devices: - type: block - multiple: - range: 0- - osd-journal: - type: block - multiple: - range: 0-1 diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 385bc118..73a4370e 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -35,9 +35,10 @@ def _add_services(self): and the rest of the service are from lp branches that are compatible with the local charm (e.g. stable or next). """ - this_service = {'name': 'ceph', 'units': 3} + this_service = {'name': 'ceph-mon', 'units': 3} other_services = [{'name': 'mysql'}, {'name': 'keystone'}, + {'name': 'ceph-osd', 'units': 3}, {'name': 'rabbitmq-server'}, {'name': 'nova-compute'}, {'name': 'glance'}, @@ -51,17 +52,18 @@ def _add_relations(self): 'nova-compute:shared-db': 'mysql:shared-db', 'nova-compute:amqp': 'rabbitmq-server:amqp', 'nova-compute:image-service': 'glance:image-service', - 'nova-compute:ceph': 'ceph:client', + 'nova-compute:ceph': 'ceph-mon:client', 'keystone:shared-db': 'mysql:shared-db', 'glance:shared-db': 'mysql:shared-db', 'glance:identity-service': 'keystone:identity-service', 'glance:amqp': 'rabbitmq-server:amqp', - 'glance:ceph': 'ceph:client', + 'glance:ceph': 'ceph-mon:client', 'cinder:shared-db': 'mysql:shared-db', 'cinder:identity-service': 'keystone:identity-service', 'cinder:amqp': 'rabbitmq-server:amqp', 'cinder:image-service': 'glance:image-service', - 'cinder:ceph': 'ceph:client' + 'cinder:ceph': 'ceph-mon:client', + 'ceph-osd:mon': 'ceph-mon:osd' } super(CephBasicDeployment, self)._add_relations(relations) @@ -76,6 +78,9 @@ def _configure_services(self): 'auth-supported': 'none', 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', + } + + ceph_osd_config = { 'osd-reformat': 'yes', 'ephemeral-unmount': '/mnt', 'osd-devices': '/dev/vdb /srv/ceph' @@ -84,7 +89,8 @@ def _configure_services(self): configs = {'keystone': keystone_config, 'mysql': mysql_config, 'cinder': cinder_config, - 'ceph': ceph_config} + 'ceph-mon': ceph_config, + 'ceph-osd': ceph_osd_config} super(CephBasicDeployment, self)._configure_services(configs) def _initialize_tests(self): @@ -96,9 +102,9 @@ def _initialize_tests(self): self.nova_sentry = self.d.sentry.unit['nova-compute/0'] self.glance_sentry = self.d.sentry.unit['glance/0'] self.cinder_sentry = self.d.sentry.unit['cinder/0'] - self.ceph0_sentry = self.d.sentry.unit['ceph/0'] - self.ceph1_sentry = self.d.sentry.unit['ceph/1'] - self.ceph2_sentry = self.d.sentry.unit['ceph/2'] + self.ceph0_sentry = self.d.sentry.unit['ceph-mon/0'] + self.ceph1_sentry = self.d.sentry.unit['ceph-mon/1'] + self.ceph2_sentry = self.d.sentry.unit['ceph-mon/2'] u.log.debug('openstack release val: {}'.format( self._get_openstack_release())) u.log.debug('openstack release str: {}'.format( @@ -211,7 +217,7 @@ def test_200_ceph_nova_client_relation(self): """Verify the ceph to nova ceph-client relation data.""" u.log.debug('Checking ceph:nova-compute ceph relation data...') unit = self.ceph0_sentry - relation = ['client', 'nova-compute:ceph'] + relation = ['client', 'nova-compute:ceph-mon'] expected = { 'private-address': u.valid_ip, 'auth': 'none', @@ -227,7 +233,7 @@ def test_201_nova_ceph_client_relation(self): """Verify the nova to ceph client relation data.""" u.log.debug('Checking nova-compute:ceph ceph-client relation data...') unit = self.nova_sentry - relation = ['ceph', 'ceph:client'] + relation = ['ceph-mon', 'ceph-mon:client'] expected = { 'private-address': u.valid_ip } @@ -257,7 +263,7 @@ def test_203_glance_ceph_client_relation(self): """Verify the glance to ceph client relation data.""" u.log.debug('Checking glance:ceph client relation data...') unit = self.glance_sentry - relation = ['ceph', 'ceph:client'] + relation = ['ceph-mon', 'ceph-mon:client'] expected = { 'private-address': u.valid_ip } @@ -287,7 +293,7 @@ def test_205_cinder_ceph_client_relation(self): """Verify the cinder to ceph ceph-client relation data.""" u.log.debug('Checking cinder:ceph ceph relation data...') unit = self.cinder_sentry - relation = ['ceph', 'ceph:client'] + relation = ['ceph-mon', 'ceph-mon:client'] expected = { 'private-address': u.valid_ip } From ce4cbd7b53a6bbab1fd7bd842b2ce8a62da3d834 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 25 Jan 2016 11:11:49 -0500 Subject: [PATCH 0975/2699] setup fsid/mon secret if not configured --- ceph-proxy/hooks/ceph_hooks.py | 44 +++++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 11 deletions(-) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index fced655a..1c3ce05d 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -23,6 +23,8 @@ related_units, relation_get, relation_set, + leader_set, leader_get, + is_leader, remote_unit, Hooks, UnregisteredHookError, service_name, @@ -91,7 +93,7 @@ def emit_cephconf(): cephcontext = { 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), - 'fsid': config('fsid'), + 'fsid': leader_get('fsid'), 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), @@ -126,13 +128,28 @@ def config_changed(): log('Monitor hosts are ' + repr(get_mon_hosts())) - # Pre-flight checks - if not config('fsid'): - log('No fsid supplied, cannot proceed.', level=ERROR) - sys.exit(1) - if not config('monitor-secret'): - log('No monitor-secret supplied, cannot proceed.', level=ERROR) - sys.exit(1) + if is_leader(): + if not leader_get('fsid') or not leader_get('monitor-secret'): + if config('fsid'): + fsid = config('fsid') + else: + fsid = "{}".format(uuid.uuid1()) + if config('monitor-secret'): + mon_secret = config('monitor-secret') + else: + mon_secret = "{}".format(ceph.generate_monitor_secret()) + status_set('maintenance', 'Creating FSID and Monitor Secret') + opts = { + 'fsid': fsid, + 'monitor-secret': mon_secret, + } + log("Settings for the cluster are: {}".format(opts)) + leader_set(opts) + else: + if leader_get('fsid') is None or leader_get('monitor-secret') is None: + log('still waiting for leader to setup keys') + status_set('waiting', 'Waiting for leader to setup keys') + sys.exit(0) sysctl_dict = config('sysctl') if sysctl_dict: @@ -191,12 +208,17 @@ def mon_relation_joined(): @hooks.hook('mon-relation-departed', 'mon-relation-changed') def mon_relation(): + if leader_get('monitor-secret') is None: + log('still waiting for leader to setup keys') + status_set('waiting', 'Waiting for leader to setup keys') + return emit_cephconf() moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: status_set('maintenance', 'Bootstrapping MON cluster') - ceph.bootstrap_monitor_cluster(config('monitor-secret')) + + ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) ceph.wait_for_bootstrap() notify_osds() notify_radosgws() @@ -239,7 +261,7 @@ def osd_relation(relid=None): if ceph.is_quorum(): log('mon cluster in quorum - providing fsid & keys') data = { - 'fsid': config('fsid'), + 'fsid': leader_get('fsid'), 'osd_bootstrap_key': ceph.get_osd_bootstrap_key(), 'auth': config('auth-supported'), 'ceph-public-address': get_public_addr(), @@ -268,7 +290,7 @@ def radosgw_relation(relid=None): unit_response_key = 'broker-rsp-' + unit_id log('mon cluster in quorum - providing radosgw with keys') data = { - 'fsid': config('fsid'), + 'fsid': leader_get('fsid'), 'radosgw_key': ceph.get_radosgw_key(), 'auth': config('auth-supported'), 'ceph-public-address': get_public_addr(), From 52c345d48593930e5c446e58130fadd8c3171608 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 25 Jan 2016 11:11:49 -0500 Subject: [PATCH 0976/2699] setup fsid/mon secret if not configured --- ceph-mon/hooks/ceph_hooks.py | 44 +++++++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 11 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index fced655a..1c3ce05d 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -23,6 +23,8 @@ related_units, relation_get, relation_set, + leader_set, leader_get, + is_leader, remote_unit, Hooks, UnregisteredHookError, service_name, @@ -91,7 +93,7 @@ def emit_cephconf(): cephcontext = { 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), - 'fsid': config('fsid'), + 'fsid': leader_get('fsid'), 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), @@ -126,13 +128,28 @@ def config_changed(): log('Monitor hosts are ' + repr(get_mon_hosts())) - # Pre-flight checks - if not config('fsid'): - log('No fsid supplied, cannot proceed.', level=ERROR) - sys.exit(1) - if not config('monitor-secret'): - log('No monitor-secret supplied, cannot proceed.', level=ERROR) - sys.exit(1) + if is_leader(): + if not leader_get('fsid') or not leader_get('monitor-secret'): + if config('fsid'): + fsid = config('fsid') + else: + fsid = "{}".format(uuid.uuid1()) + if config('monitor-secret'): + mon_secret = config('monitor-secret') + else: + mon_secret = "{}".format(ceph.generate_monitor_secret()) + status_set('maintenance', 'Creating FSID and Monitor Secret') + opts = { + 'fsid': fsid, + 'monitor-secret': mon_secret, + } + log("Settings for the cluster are: {}".format(opts)) + leader_set(opts) + else: + if leader_get('fsid') is None or leader_get('monitor-secret') is None: + log('still waiting for leader to setup keys') + status_set('waiting', 'Waiting for leader to setup keys') + sys.exit(0) sysctl_dict = config('sysctl') if sysctl_dict: @@ -191,12 +208,17 @@ def mon_relation_joined(): @hooks.hook('mon-relation-departed', 'mon-relation-changed') def mon_relation(): + if leader_get('monitor-secret') is None: + log('still waiting for leader to setup keys') + status_set('waiting', 'Waiting for leader to setup keys') + return emit_cephconf() moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: status_set('maintenance', 'Bootstrapping MON cluster') - ceph.bootstrap_monitor_cluster(config('monitor-secret')) + + ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) ceph.wait_for_bootstrap() notify_osds() notify_radosgws() @@ -239,7 +261,7 @@ def osd_relation(relid=None): if ceph.is_quorum(): log('mon cluster in quorum - providing fsid & keys') data = { - 'fsid': config('fsid'), + 'fsid': leader_get('fsid'), 'osd_bootstrap_key': ceph.get_osd_bootstrap_key(), 'auth': config('auth-supported'), 'ceph-public-address': get_public_addr(), @@ -268,7 +290,7 @@ def radosgw_relation(relid=None): unit_response_key = 'broker-rsp-' + unit_id log('mon cluster in quorum - providing radosgw with keys') data = { - 'fsid': config('fsid'), + 'fsid': leader_get('fsid'), 'radosgw_key': ceph.get_radosgw_key(), 'auth': config('auth-supported'), 'ceph-public-address': get_public_addr(), From b907af8bb6068f0881fed30807c0bf236a0ed0df Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 25 Jan 2016 13:14:51 -0500 Subject: [PATCH 0977/2699] fixing hooks --- ceph-proxy/hooks/ceph.py | 13 +++++++++++++ ceph-proxy/hooks/ceph_hooks.py | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index a32fa58e..f3ce6910 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -230,6 +230,19 @@ def import_osd_bootstrap_key(key): ] subprocess.check_call(cmd) + +def generate_monitor_secret(): + cmd = [ + 'ceph-authtool', + '/dev/stdout', + '--name=mon.', + '--gen-key' + ] + res = subprocess.check_output(cmd) + + return "{}==".format(res.split('=')[1].strip()) + + # OSD caps taken from ceph-create-keys _osd_bootstrap_caps = { 'mon': [ diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 1c3ce05d..5caeb8fe 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -12,7 +12,7 @@ import os import shutil import sys - +import uuid import ceph from charmhelpers.core.hookenv import ( log, From 5f4f8fc21eef63cd3038042364ead3e0e9ae9d28 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 25 Jan 2016 13:14:51 -0500 Subject: [PATCH 0978/2699] fixing hooks --- ceph-mon/hooks/ceph.py | 13 +++++++++++++ ceph-mon/hooks/ceph_hooks.py | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index a32fa58e..f3ce6910 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -230,6 +230,19 @@ def import_osd_bootstrap_key(key): ] subprocess.check_call(cmd) + +def generate_monitor_secret(): + cmd = [ + 'ceph-authtool', + '/dev/stdout', + '--name=mon.', + '--gen-key' + ] + res = subprocess.check_output(cmd) + + return "{}==".format(res.split('=')[1].strip()) + + # OSD caps taken from ceph-create-keys _osd_bootstrap_caps = { 'mon': [ diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 1c3ce05d..5caeb8fe 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -12,7 +12,7 @@ import os import shutil import sys - +import uuid import ceph from charmhelpers.core.hookenv import ( log, From 353c095fa87d99076a802d245af03ede85a5c623 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 26 Jan 2016 07:57:01 -0500 Subject: [PATCH 0979/2699] lint updates --- ceph-proxy/hooks/ceph_hooks.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 5caeb8fe..3419ccbe 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -17,7 +17,6 @@ from charmhelpers.core.hookenv import ( log, DEBUG, - ERROR, config, relation_ids, related_units, @@ -34,7 +33,6 @@ ) from charmhelpers.core.host import ( service_restart, - umount, mkdir, write_file, rsync, @@ -217,7 +215,6 @@ def mon_relation(): moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: status_set('maintenance', 'Bootstrapping MON cluster') - ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) ceph.wait_for_bootstrap() notify_osds() From 1ad4e64e908adb92b939c12ddda173548fb3b6f8 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 26 Jan 2016 07:57:01 -0500 Subject: [PATCH 0980/2699] lint updates --- ceph-mon/hooks/ceph_hooks.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 5caeb8fe..3419ccbe 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -17,7 +17,6 @@ from charmhelpers.core.hookenv import ( log, DEBUG, - ERROR, config, relation_ids, related_units, @@ -34,7 +33,6 @@ ) from charmhelpers.core.host import ( service_restart, - umount, mkdir, write_file, rsync, @@ -217,7 +215,6 @@ def mon_relation(): moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: status_set('maintenance', 'Bootstrapping MON cluster') - ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) ceph.wait_for_bootstrap() notify_osds() From 252542d96a66d1c2bd56c370d7f00c9a1c3f2b4e Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 26 Jan 2016 15:24:34 -0500 Subject: [PATCH 0981/2699] charmhelpers sync / fixing relations in basic_deployment --- ceph-proxy/hooks/charmhelpers/core/host.py | 67 ++++++++++++------- ceph-proxy/hooks/charmhelpers/fetch/giturl.py | 4 +- ceph-proxy/tests/basic_deployment.py | 11 ++- .../contrib/openstack/amulet/deployment.py | 5 +- 4 files changed, 52 insertions(+), 35 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 710fdab9..a7720906 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -138,7 +138,8 @@ def service_running(service_name): except subprocess.CalledProcessError: return False else: - if ("start/running" in output or "is running" in output): + if ("start/running" in output or "is running" in output or + "up and running" in output): return True else: return False @@ -160,13 +161,13 @@ def service_available(service_name): def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" return os.path.isdir(SYSTEMD_SYSTEM) def adduser(username, password=None, shell='/bin/bash', system_user=False, primary_group=None, secondary_groups=None): - """ - Add a user to the system. + """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -174,7 +175,7 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False, :param str password: Password for user; if ``None``, create a system user :param str shell: The default shell for the user :param bool system_user: Whether to create a login or system user - :param str primary_group: Primary group for user; defaults to their username + :param str primary_group: Primary group for user; defaults to username :param list secondary_groups: Optional list of additional groups :returns: The password database entry struct, as returned by `pwd.getpwnam` @@ -300,14 +301,12 @@ def write_file(path, content, owner='root', group='root', perms=0o444): def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab - """ + """Remove the given mountpoint entry from /etc/fstab""" return Fstab.remove_by_mountpoint(mp) def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file - """ + """Adds the given device entry to the /etc/fstab file""" return Fstab.add(dev, mp, fs, options=options) @@ -363,8 +362,7 @@ def fstab_mount(mountpoint): def file_hash(path, hash_type='md5'): - """ - Generate a hash checksum of the contents of 'path' or None if not found. + """Generate a hash checksum of the contents of 'path' or None if not found. :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. @@ -379,10 +377,9 @@ def file_hash(path, hash_type='md5'): def path_hash(path): - """ - Generate a hash checksum of all files matching 'path'. Standard wildcards - like '*' and '?' are supported, see documentation for the 'glob' module for - more information. + """Generate a hash checksum of all files matching 'path'. Standard + wildcards like '*' and '?' are supported, see documentation for the 'glob' + module for more information. :return: dict: A { filename: hash } dictionary for all matched files. Empty if none found. @@ -394,8 +391,7 @@ def path_hash(path): def check_hash(path, checksum, hash_type='md5'): - """ - Validate a file using a cryptographic checksum. + """Validate a file using a cryptographic checksum. :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. @@ -410,6 +406,7 @@ def check_hash(path, checksum, hash_type='md5'): class ChecksumError(ValueError): + """A class derived from Value error to indicate the checksum failed.""" pass @@ -515,7 +512,7 @@ def get_bond_master(interface): def list_nics(nic_type=None): - '''Return a list of nics of given type(s)''' + """Return a list of nics of given type(s)""" if isinstance(nic_type, six.string_types): int_types = [nic_type] else: @@ -557,12 +554,13 @@ def list_nics(nic_type=None): def set_nic_mtu(nic, mtu): - '''Set MTU on a network interface''' + """Set the Maximum Transmission Unit (MTU) on a network interface.""" cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] subprocess.check_call(cmd) def get_nic_mtu(nic): + """Return the Maximum Transmission Unit (MTU) for a network interface.""" cmd = ['ip', 'addr', 'show', nic] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" @@ -574,6 +572,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): + """Return the Media Access Control (MAC) for a network interface.""" cmd = ['ip', '-o', '-0', 'addr', 'show', nic] ip_output = subprocess.check_output(cmd).decode('UTF-8') hwaddr = "" @@ -584,7 +583,7 @@ def get_nic_hwaddr(nic): def cmp_pkgrevno(package, revno, pkgcache=None): - '''Compare supplied revno with the revno of the installed package + """Compare supplied revno with the revno of the installed package * 1 => Installed revno is greater than supplied arg * 0 => Installed revno is the same as supplied arg @@ -593,7 +592,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None): This function imports apt_cache function from charmhelpers.fetch if the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. - ''' + """ import apt_pkg if not pkgcache: from charmhelpers.fetch import apt_cache @@ -603,19 +602,27 @@ def cmp_pkgrevno(package, revno, pkgcache=None): @contextmanager -def chdir(d): +def chdir(directory): + """Change the current working directory to a different directory for a code + block and return the previous directory after the block exits. Useful to + run commands from a specificed directory. + + :param str directory: The directory path to change to for this context. + """ cur = os.getcwd() try: - yield os.chdir(d) + yield os.chdir(directory) finally: os.chdir(cur) def chownr(path, owner, group, follow_links=True, chowntopdir=False): - """ - Recursively change user and group ownership of files and directories + """Recursively change user and group ownership of files and directories in given path. Doesn't chown path itself by default, only its children. + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. :param bool follow_links: Also Chown links if True :param bool chowntopdir: Also chown path itself if True """ @@ -639,15 +646,23 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): def lchownr(path, owner, group): + """Recursively change user and group ownership of files and directories + in a given path, not following symbolic links. See the documentation for + 'os.lchown' for more information. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + """ chownr(path, owner, group, follow_links=False) def get_total_ram(): - '''The total amount of system RAM in bytes. + """The total amount of system RAM in bytes. This is what is reported by the OS, and may be overcommitted when there are multiple containers hosted on the same machine. - ''' + """ with open('/proc/meminfo', 'r') as f: for line in f.readlines(): if line: diff --git a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py index 9ad8dc60..65ed5319 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py @@ -15,7 +15,7 @@ # along with charm-helpers. If not, see . import os -from subprocess import check_call +from subprocess import check_call, CalledProcessError from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, @@ -63,6 +63,8 @@ def install(self, source, branch="master", dest=None, depth=None): branch_name) try: self.clone(source, dest_dir, branch, depth) + except CalledProcessError as e: + raise UnhandledSource(e) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 73a4370e..3503f2d1 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -102,6 +102,7 @@ def _initialize_tests(self): self.nova_sentry = self.d.sentry.unit['nova-compute/0'] self.glance_sentry = self.d.sentry.unit['glance/0'] self.cinder_sentry = self.d.sentry.unit['cinder/0'] + self.ceph_osd_sentry = self.d.sentry.unit['ceph-osd/0'] self.ceph0_sentry = self.d.sentry.unit['ceph-mon/0'] self.ceph1_sentry = self.d.sentry.unit['ceph-mon/1'] self.ceph2_sentry = self.d.sentry.unit['ceph-mon/2'] @@ -164,8 +165,7 @@ def test_100_ceph_processes(self): # Process name and quantity of processes to expect on each unit ceph_processes = { - 'ceph-mon': 1, - 'ceph-osd': 2 + 'ceph-mon': 1 } # Units with process names and PID quantities expected @@ -193,6 +193,8 @@ def test_102_services(self): self.cinder_sentry: ['cinder-api', 'cinder-scheduler', 'cinder-volume'], + self.ceph_osd_sentry: ['ceph-osd', + 'ceph-osd-all'], } if self._get_openstack_release() < self.vivid_kilo: @@ -200,10 +202,7 @@ def test_102_services(self): # are checked by process name instead. ceph_services = [ 'ceph-mon-all', - 'ceph-mon id=`hostname`', - 'ceph-osd-all', - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) + 'ceph-mon id=`hostname`' ] services[self.ceph0_sentry] = ceph_services services[self.ceph1_sentry] = ceph_services diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 58b1a79c..d2ede320 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -121,11 +121,12 @@ def _add_services(self, this_service, other_services): # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw'] + 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', - 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', + 'cinder-backup'] if self.openstack: for svc in services: From 8deee4ddddf10c5fd99521c92d75ae40f04f7d01 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 26 Jan 2016 15:24:34 -0500 Subject: [PATCH 0982/2699] charmhelpers sync / fixing relations in basic_deployment --- ceph-mon/hooks/charmhelpers/core/host.py | 67 ++++++++++++------- ceph-mon/hooks/charmhelpers/fetch/giturl.py | 4 +- ceph-mon/tests/basic_deployment.py | 11 ++- .../contrib/openstack/amulet/deployment.py | 5 +- 4 files changed, 52 insertions(+), 35 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 710fdab9..a7720906 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -138,7 +138,8 @@ def service_running(service_name): except subprocess.CalledProcessError: return False else: - if ("start/running" in output or "is running" in output): + if ("start/running" in output or "is running" in output or + "up and running" in output): return True else: return False @@ -160,13 +161,13 @@ def service_available(service_name): def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" return os.path.isdir(SYSTEMD_SYSTEM) def adduser(username, password=None, shell='/bin/bash', system_user=False, primary_group=None, secondary_groups=None): - """ - Add a user to the system. + """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -174,7 +175,7 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False, :param str password: Password for user; if ``None``, create a system user :param str shell: The default shell for the user :param bool system_user: Whether to create a login or system user - :param str primary_group: Primary group for user; defaults to their username + :param str primary_group: Primary group for user; defaults to username :param list secondary_groups: Optional list of additional groups :returns: The password database entry struct, as returned by `pwd.getpwnam` @@ -300,14 +301,12 @@ def write_file(path, content, owner='root', group='root', perms=0o444): def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab - """ + """Remove the given mountpoint entry from /etc/fstab""" return Fstab.remove_by_mountpoint(mp) def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file - """ + """Adds the given device entry to the /etc/fstab file""" return Fstab.add(dev, mp, fs, options=options) @@ -363,8 +362,7 @@ def fstab_mount(mountpoint): def file_hash(path, hash_type='md5'): - """ - Generate a hash checksum of the contents of 'path' or None if not found. + """Generate a hash checksum of the contents of 'path' or None if not found. :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. @@ -379,10 +377,9 @@ def file_hash(path, hash_type='md5'): def path_hash(path): - """ - Generate a hash checksum of all files matching 'path'. Standard wildcards - like '*' and '?' are supported, see documentation for the 'glob' module for - more information. + """Generate a hash checksum of all files matching 'path'. Standard + wildcards like '*' and '?' are supported, see documentation for the 'glob' + module for more information. :return: dict: A { filename: hash } dictionary for all matched files. Empty if none found. @@ -394,8 +391,7 @@ def path_hash(path): def check_hash(path, checksum, hash_type='md5'): - """ - Validate a file using a cryptographic checksum. + """Validate a file using a cryptographic checksum. :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. @@ -410,6 +406,7 @@ def check_hash(path, checksum, hash_type='md5'): class ChecksumError(ValueError): + """A class derived from Value error to indicate the checksum failed.""" pass @@ -515,7 +512,7 @@ def get_bond_master(interface): def list_nics(nic_type=None): - '''Return a list of nics of given type(s)''' + """Return a list of nics of given type(s)""" if isinstance(nic_type, six.string_types): int_types = [nic_type] else: @@ -557,12 +554,13 @@ def list_nics(nic_type=None): def set_nic_mtu(nic, mtu): - '''Set MTU on a network interface''' + """Set the Maximum Transmission Unit (MTU) on a network interface.""" cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] subprocess.check_call(cmd) def get_nic_mtu(nic): + """Return the Maximum Transmission Unit (MTU) for a network interface.""" cmd = ['ip', 'addr', 'show', nic] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" @@ -574,6 +572,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): + """Return the Media Access Control (MAC) for a network interface.""" cmd = ['ip', '-o', '-0', 'addr', 'show', nic] ip_output = subprocess.check_output(cmd).decode('UTF-8') hwaddr = "" @@ -584,7 +583,7 @@ def get_nic_hwaddr(nic): def cmp_pkgrevno(package, revno, pkgcache=None): - '''Compare supplied revno with the revno of the installed package + """Compare supplied revno with the revno of the installed package * 1 => Installed revno is greater than supplied arg * 0 => Installed revno is the same as supplied arg @@ -593,7 +592,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None): This function imports apt_cache function from charmhelpers.fetch if the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. - ''' + """ import apt_pkg if not pkgcache: from charmhelpers.fetch import apt_cache @@ -603,19 +602,27 @@ def cmp_pkgrevno(package, revno, pkgcache=None): @contextmanager -def chdir(d): +def chdir(directory): + """Change the current working directory to a different directory for a code + block and return the previous directory after the block exits. Useful to + run commands from a specificed directory. + + :param str directory: The directory path to change to for this context. + """ cur = os.getcwd() try: - yield os.chdir(d) + yield os.chdir(directory) finally: os.chdir(cur) def chownr(path, owner, group, follow_links=True, chowntopdir=False): - """ - Recursively change user and group ownership of files and directories + """Recursively change user and group ownership of files and directories in given path. Doesn't chown path itself by default, only its children. + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. :param bool follow_links: Also Chown links if True :param bool chowntopdir: Also chown path itself if True """ @@ -639,15 +646,23 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): def lchownr(path, owner, group): + """Recursively change user and group ownership of files and directories + in a given path, not following symbolic links. See the documentation for + 'os.lchown' for more information. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + """ chownr(path, owner, group, follow_links=False) def get_total_ram(): - '''The total amount of system RAM in bytes. + """The total amount of system RAM in bytes. This is what is reported by the OS, and may be overcommitted when there are multiple containers hosted on the same machine. - ''' + """ with open('/proc/meminfo', 'r') as f: for line in f.readlines(): if line: diff --git a/ceph-mon/hooks/charmhelpers/fetch/giturl.py b/ceph-mon/hooks/charmhelpers/fetch/giturl.py index 9ad8dc60..65ed5319 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/giturl.py @@ -15,7 +15,7 @@ # along with charm-helpers. If not, see . import os -from subprocess import check_call +from subprocess import check_call, CalledProcessError from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, @@ -63,6 +63,8 @@ def install(self, source, branch="master", dest=None, depth=None): branch_name) try: self.clone(source, dest_dir, branch, depth) + except CalledProcessError as e: + raise UnhandledSource(e) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 73a4370e..3503f2d1 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -102,6 +102,7 @@ def _initialize_tests(self): self.nova_sentry = self.d.sentry.unit['nova-compute/0'] self.glance_sentry = self.d.sentry.unit['glance/0'] self.cinder_sentry = self.d.sentry.unit['cinder/0'] + self.ceph_osd_sentry = self.d.sentry.unit['ceph-osd/0'] self.ceph0_sentry = self.d.sentry.unit['ceph-mon/0'] self.ceph1_sentry = self.d.sentry.unit['ceph-mon/1'] self.ceph2_sentry = self.d.sentry.unit['ceph-mon/2'] @@ -164,8 +165,7 @@ def test_100_ceph_processes(self): # Process name and quantity of processes to expect on each unit ceph_processes = { - 'ceph-mon': 1, - 'ceph-osd': 2 + 'ceph-mon': 1 } # Units with process names and PID quantities expected @@ -193,6 +193,8 @@ def test_102_services(self): self.cinder_sentry: ['cinder-api', 'cinder-scheduler', 'cinder-volume'], + self.ceph_osd_sentry: ['ceph-osd', + 'ceph-osd-all'], } if self._get_openstack_release() < self.vivid_kilo: @@ -200,10 +202,7 @@ def test_102_services(self): # are checked by process name instead. ceph_services = [ 'ceph-mon-all', - 'ceph-mon id=`hostname`', - 'ceph-osd-all', - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) + 'ceph-mon id=`hostname`' ] services[self.ceph0_sentry] = ceph_services services[self.ceph1_sentry] = ceph_services diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 58b1a79c..d2ede320 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -121,11 +121,12 @@ def _add_services(self, this_service, other_services): # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw'] + 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', - 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', + 'cinder-backup'] if self.openstack: for svc in services: From c657a13a31ab0773c8cd0233a1a52a5a443d3b05 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 28 Jan 2016 10:45:57 +0100 Subject: [PATCH 0983/2699] fix relations --- ceph-proxy/hooks/osd-relation-joined | 1 + ceph-proxy/tests/basic_deployment.py | 5 +++++ 2 files changed, 6 insertions(+) create mode 120000 ceph-proxy/hooks/osd-relation-joined diff --git a/ceph-proxy/hooks/osd-relation-joined b/ceph-proxy/hooks/osd-relation-joined new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-proxy/hooks/osd-relation-joined @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 3503f2d1..7b1d9c15 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -26,6 +26,11 @@ def __init__(self, series=None, openstack=None, source=None, stable=False): self._add_relations() self._configure_services() self._deploy() + + u.log.info('Waiting on extended status checks...') + exclude_services = ['mysql'] + self._auto_wait_for_status(exclude_services=exclude_services) + self._initialize_tests() def _add_services(self): From 192b53aa21ff400f216cb2cf7c3e9cfbcd94d995 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 28 Jan 2016 10:45:57 +0100 Subject: [PATCH 0984/2699] fix relations --- ceph-mon/hooks/osd-relation-joined | 1 + ceph-mon/tests/basic_deployment.py | 5 +++++ 2 files changed, 6 insertions(+) create mode 120000 ceph-mon/hooks/osd-relation-joined diff --git a/ceph-mon/hooks/osd-relation-joined b/ceph-mon/hooks/osd-relation-joined new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/osd-relation-joined @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 3503f2d1..7b1d9c15 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -26,6 +26,11 @@ def __init__(self, series=None, openstack=None, source=None, stable=False): self._add_relations() self._configure_services() self._deploy() + + u.log.info('Waiting on extended status checks...') + exclude_services = ['mysql'] + self._auto_wait_for_status(exclude_services=exclude_services) + self._initialize_tests() def _add_services(self): From 289f48b8936bb4325cbed8f25e7845e213e4254d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 28 Jan 2016 18:21:15 +0100 Subject: [PATCH 0985/2699] fix tests --- ceph-proxy/files/upstart/ceph-mon.conf | 1 + ceph-proxy/templates/ceph.conf | 5 ----- ceph-proxy/tests/basic_deployment.py | 23 +++++++---------------- 3 files changed, 8 insertions(+), 21 deletions(-) diff --git a/ceph-proxy/files/upstart/ceph-mon.conf b/ceph-proxy/files/upstart/ceph-mon.conf index 2cf7bfa5..74a4b643 100644 --- a/ceph-proxy/files/upstart/ceph-mon.conf +++ b/ceph-proxy/files/upstart/ceph-mon.conf @@ -22,3 +22,4 @@ export id #usage "cluster = name of cluster (defaults to 'ceph'); id = monitor instance id" exec /usr/bin/ceph-mon --cluster="${cluster:-ceph}" -i "$id" -f + diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index 64f52a6c..f64db7cb 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -37,8 +37,3 @@ keyring = /var/lib/ceph/mon/$cluster-$id/keyring [mds] keyring = /var/lib/ceph/mds/$cluster-$id/keyring -[osd] -keyring = /var/lib/ceph/osd/$cluster-$id/keyring -osd journal size = {{ osd_journal_size }} -filestore xattr use omap = true - diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 7b1d9c15..c009503c 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -116,9 +116,6 @@ def _initialize_tests(self): u.log.debug('openstack release str: {}'.format( self._get_openstack_release_string())) - # Let things settle a bit original moving forward - time.sleep(30) - # Authenticate admin with keystone self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, user='admin', @@ -198,8 +195,7 @@ def test_102_services(self): self.cinder_sentry: ['cinder-api', 'cinder-scheduler', 'cinder-volume'], - self.ceph_osd_sentry: ['ceph-osd', - 'ceph-osd-all'], + self.ceph_osd_sentry: ['ceph-osd-all'], } if self._get_openstack_release() < self.vivid_kilo: @@ -219,9 +215,9 @@ def test_102_services(self): def test_200_ceph_nova_client_relation(self): """Verify the ceph to nova ceph-client relation data.""" - u.log.debug('Checking ceph:nova-compute ceph relation data...') + u.log.debug('Checking ceph:nova-compute ceph-mon relation data...') unit = self.ceph0_sentry - relation = ['client', 'nova-compute:ceph-mon'] + relation = ['client', 'nova-compute:ceph'] expected = { 'private-address': u.valid_ip, 'auth': 'none', @@ -230,14 +226,14 @@ def test_200_ceph_nova_client_relation(self): ret = u.validate_relation_data(unit, relation, expected) if ret: - message = u.relation_error('ceph to nova ceph-client', ret) + message = u.relation_error('ceph-mon to nova ceph-client', ret) amulet.raise_status(amulet.FAIL, msg=message) def test_201_nova_ceph_client_relation(self): """Verify the nova to ceph client relation data.""" u.log.debug('Checking nova-compute:ceph ceph-client relation data...') unit = self.nova_sentry - relation = ['ceph-mon', 'ceph-mon:client'] + relation = ['ceph', 'ceph-mon:client'] expected = { 'private-address': u.valid_ip } @@ -267,7 +263,7 @@ def test_203_glance_ceph_client_relation(self): """Verify the glance to ceph client relation data.""" u.log.debug('Checking glance:ceph client relation data...') unit = self.glance_sentry - relation = ['ceph-mon', 'ceph-mon:client'] + relation = ['ceph', 'ceph-mon:client'] expected = { 'private-address': u.valid_ip } @@ -297,7 +293,7 @@ def test_205_cinder_ceph_client_relation(self): """Verify the cinder to ceph ceph-client relation data.""" u.log.debug('Checking cinder:ceph ceph relation data...') unit = self.cinder_sentry - relation = ['ceph-mon', 'ceph-mon:client'] + relation = ['ceph', 'ceph-mon:client'] expected = { 'private-address': u.valid_ip } @@ -330,11 +326,6 @@ def test_300_ceph_config(self): 'mds': { 'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring' }, - 'osd': { - 'keyring': '/var/lib/ceph/osd/$cluster-$id/keyring', - 'osd journal size': '1024', - 'filestore xattr use omap': 'true' - }, } for section, pairs in expected.iteritems(): From af952d061ff32b2c3cb750accceb20ba8eac43f7 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 28 Jan 2016 18:21:15 +0100 Subject: [PATCH 0986/2699] fix tests --- ceph-mon/files/upstart/ceph-mon.conf | 1 + ceph-mon/templates/ceph.conf | 5 ----- ceph-mon/tests/basic_deployment.py | 23 +++++++---------------- 3 files changed, 8 insertions(+), 21 deletions(-) diff --git a/ceph-mon/files/upstart/ceph-mon.conf b/ceph-mon/files/upstart/ceph-mon.conf index 2cf7bfa5..74a4b643 100644 --- a/ceph-mon/files/upstart/ceph-mon.conf +++ b/ceph-mon/files/upstart/ceph-mon.conf @@ -22,3 +22,4 @@ export id #usage "cluster = name of cluster (defaults to 'ceph'); id = monitor instance id" exec /usr/bin/ceph-mon --cluster="${cluster:-ceph}" -i "$id" -f + diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 64f52a6c..f64db7cb 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -37,8 +37,3 @@ keyring = /var/lib/ceph/mon/$cluster-$id/keyring [mds] keyring = /var/lib/ceph/mds/$cluster-$id/keyring -[osd] -keyring = /var/lib/ceph/osd/$cluster-$id/keyring -osd journal size = {{ osd_journal_size }} -filestore xattr use omap = true - diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 7b1d9c15..c009503c 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -116,9 +116,6 @@ def _initialize_tests(self): u.log.debug('openstack release str: {}'.format( self._get_openstack_release_string())) - # Let things settle a bit original moving forward - time.sleep(30) - # Authenticate admin with keystone self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, user='admin', @@ -198,8 +195,7 @@ def test_102_services(self): self.cinder_sentry: ['cinder-api', 'cinder-scheduler', 'cinder-volume'], - self.ceph_osd_sentry: ['ceph-osd', - 'ceph-osd-all'], + self.ceph_osd_sentry: ['ceph-osd-all'], } if self._get_openstack_release() < self.vivid_kilo: @@ -219,9 +215,9 @@ def test_102_services(self): def test_200_ceph_nova_client_relation(self): """Verify the ceph to nova ceph-client relation data.""" - u.log.debug('Checking ceph:nova-compute ceph relation data...') + u.log.debug('Checking ceph:nova-compute ceph-mon relation data...') unit = self.ceph0_sentry - relation = ['client', 'nova-compute:ceph-mon'] + relation = ['client', 'nova-compute:ceph'] expected = { 'private-address': u.valid_ip, 'auth': 'none', @@ -230,14 +226,14 @@ def test_200_ceph_nova_client_relation(self): ret = u.validate_relation_data(unit, relation, expected) if ret: - message = u.relation_error('ceph to nova ceph-client', ret) + message = u.relation_error('ceph-mon to nova ceph-client', ret) amulet.raise_status(amulet.FAIL, msg=message) def test_201_nova_ceph_client_relation(self): """Verify the nova to ceph client relation data.""" u.log.debug('Checking nova-compute:ceph ceph-client relation data...') unit = self.nova_sentry - relation = ['ceph-mon', 'ceph-mon:client'] + relation = ['ceph', 'ceph-mon:client'] expected = { 'private-address': u.valid_ip } @@ -267,7 +263,7 @@ def test_203_glance_ceph_client_relation(self): """Verify the glance to ceph client relation data.""" u.log.debug('Checking glance:ceph client relation data...') unit = self.glance_sentry - relation = ['ceph-mon', 'ceph-mon:client'] + relation = ['ceph', 'ceph-mon:client'] expected = { 'private-address': u.valid_ip } @@ -297,7 +293,7 @@ def test_205_cinder_ceph_client_relation(self): """Verify the cinder to ceph ceph-client relation data.""" u.log.debug('Checking cinder:ceph ceph relation data...') unit = self.cinder_sentry - relation = ['ceph-mon', 'ceph-mon:client'] + relation = ['ceph', 'ceph-mon:client'] expected = { 'private-address': u.valid_ip } @@ -330,11 +326,6 @@ def test_300_ceph_config(self): 'mds': { 'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring' }, - 'osd': { - 'keyring': '/var/lib/ceph/osd/$cluster-$id/keyring', - 'osd journal size': '1024', - 'filestore xattr use omap': 'true' - }, } for section, pairs in expected.iteritems(): From bdb8a22d31abf938ffee753d798d893c4f1708fa Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 29 Jan 2016 17:39:39 +0100 Subject: [PATCH 0987/2699] update hook order --- ceph-proxy/hooks/ceph_hooks.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 3419ccbe..5a381ecb 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -126,6 +126,12 @@ def config_changed(): log('Monitor hosts are ' + repr(get_mon_hosts())) + sysctl_dict = config('sysctl') + if sysctl_dict: + create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf') + if relations_of_type('nrpe-external-master'): + update_nrpe_config() + if is_leader(): if not leader_get('fsid') or not leader_get('monitor-secret'): if config('fsid'): @@ -149,10 +155,6 @@ def config_changed(): status_set('waiting', 'Waiting for leader to setup keys') sys.exit(0) - sysctl_dict = config('sysctl') - if sysctl_dict: - create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf') - emit_cephconf() # Support use of single node ceph @@ -161,9 +163,6 @@ def config_changed(): ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() - if relations_of_type('nrpe-external-master'): - update_nrpe_config() - def get_mon_hosts(): hosts = [] From 3a636be69ff8a1999c3f6997d8d0d0fb2af8e32f Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 29 Jan 2016 17:39:39 +0100 Subject: [PATCH 0988/2699] update hook order --- ceph-mon/hooks/ceph_hooks.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 3419ccbe..5a381ecb 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -126,6 +126,12 @@ def config_changed(): log('Monitor hosts are ' + repr(get_mon_hosts())) + sysctl_dict = config('sysctl') + if sysctl_dict: + create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf') + if relations_of_type('nrpe-external-master'): + update_nrpe_config() + if is_leader(): if not leader_get('fsid') or not leader_get('monitor-secret'): if config('fsid'): @@ -149,10 +155,6 @@ def config_changed(): status_set('waiting', 'Waiting for leader to setup keys') sys.exit(0) - sysctl_dict = config('sysctl') - if sysctl_dict: - create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf') - emit_cephconf() # Support use of single node ceph @@ -161,9 +163,6 @@ def config_changed(): ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() - if relations_of_type('nrpe-external-master'): - update_nrpe_config() - def get_mon_hosts(): hosts = [] From cef0c4a534007e09db76ae2b1ba8a2060458f28d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Sun, 31 Jan 2016 10:41:45 +0100 Subject: [PATCH 0989/2699] add actions from ceph charm --- ceph-proxy/actions.yaml | 4 ++++ ceph-proxy/actions/pause | 6 ++++++ ceph-proxy/actions/resume | 6 ++++++ 3 files changed, 16 insertions(+) create mode 100644 ceph-proxy/actions.yaml create mode 100755 ceph-proxy/actions/pause create mode 100755 ceph-proxy/actions/resume diff --git a/ceph-proxy/actions.yaml b/ceph-proxy/actions.yaml new file mode 100644 index 00000000..d69cfb05 --- /dev/null +++ b/ceph-proxy/actions.yaml @@ -0,0 +1,4 @@ +pause: + description: Pause ceph health operations +resume: + description: Resume ceph health operations \ No newline at end of file diff --git a/ceph-proxy/actions/pause b/ceph-proxy/actions/pause new file mode 100755 index 00000000..207c4f65 --- /dev/null +++ b/ceph-proxy/actions/pause @@ -0,0 +1,6 @@ +#!/bin/bash + +set -eux + +ceph osd set nodown +ceph osd set noout \ No newline at end of file diff --git a/ceph-proxy/actions/resume b/ceph-proxy/actions/resume new file mode 100755 index 00000000..39d15a1f --- /dev/null +++ b/ceph-proxy/actions/resume @@ -0,0 +1,6 @@ +#!/bin/bash + +set -eux + +ceph osd unset nodown +ceph osd unset noout \ No newline at end of file From 744e1ba2e9a14f02eb319fd72183b54c7e0de44c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Sun, 31 Jan 2016 10:41:45 +0100 Subject: [PATCH 0990/2699] add actions from ceph charm --- ceph-mon/actions.yaml | 4 ++++ ceph-mon/actions/pause | 6 ++++++ ceph-mon/actions/resume | 6 ++++++ 3 files changed, 16 insertions(+) create mode 100644 ceph-mon/actions.yaml create mode 100755 ceph-mon/actions/pause create mode 100755 ceph-mon/actions/resume diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml new file mode 100644 index 00000000..d69cfb05 --- /dev/null +++ b/ceph-mon/actions.yaml @@ -0,0 +1,4 @@ +pause: + description: Pause ceph health operations +resume: + description: Resume ceph health operations \ No newline at end of file diff --git a/ceph-mon/actions/pause b/ceph-mon/actions/pause new file mode 100755 index 00000000..207c4f65 --- /dev/null +++ b/ceph-mon/actions/pause @@ -0,0 +1,6 @@ +#!/bin/bash + +set -eux + +ceph osd set nodown +ceph osd set noout \ No newline at end of file diff --git a/ceph-mon/actions/resume b/ceph-mon/actions/resume new file mode 100755 index 00000000..39d15a1f --- /dev/null +++ b/ceph-mon/actions/resume @@ -0,0 +1,6 @@ +#!/bin/bash + +set -eux + +ceph osd unset nodown +ceph osd unset noout \ No newline at end of file From e92350ee89f4f961e27f1c582a5904a682e1fd2a Mon Sep 17 00:00:00 2001 From: Bjorn Tillenius Date: Tue, 2 Feb 2016 16:54:17 +0200 Subject: [PATCH 0991/2699] Resolve symlinks in get_devices(). --- ceph-osd/hooks/ceph_hooks.py | 4 +- ceph-osd/unit_tests/test_config.py | 59 ++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 ceph-osd/unit_tests/test_config.py diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 18d7141d..3a434815 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -218,7 +218,9 @@ def reformat_osd(): def get_devices(): if config('osd-devices'): - return config('osd-devices').split(' ') + return [ + os.path.realpath(path) + for path in config('osd-devices').split(' ')] else: return [] diff --git a/ceph-osd/unit_tests/test_config.py b/ceph-osd/unit_tests/test_config.py new file mode 100644 index 00000000..2ee69d97 --- /dev/null +++ b/ceph-osd/unit_tests/test_config.py @@ -0,0 +1,59 @@ +import os.path +import shutil +import tempfile + +import test_utils + +import ceph_hooks as hooks + +TO_PATCH = [ + 'config', +] + + + +class GetDevicesTestCase(test_utils.CharmTestCase): + + def setUp(self): + super(GetDevicesTestCase, self).setUp(hooks, TO_PATCH) + self.config.side_effect = self.test_config.get + self.tmp_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp_dir) + + def test_get_devices_empty(self): + """ + If osd-devices is set to an empty string, get_devices() returns + an empty list. + """ + self.test_config.set("osd-devices", "") + self.assertEqual([], hooks.get_devices()) + + def test_get_devices_non_existing_files(self): + """ + If osd-devices points to a file that doesn't exist, it's still + returned by get_devices(). + """ + non_existing = os.path.join(self.tmp_dir, "no-such-file") + self.test_config.set("osd-devices", non_existing) + self.assertEqual([non_existing], hooks.get_devices()) + + def test_get_devices_multiple(self): + """ + Multiple devices can be specified in osd-devices by separating + them with spaces. + """ + device1 = os.path.join(self.tmp_dir, "device1") + device2 = os.path.join(self.tmp_dir, "device2") + self.test_config.set("osd-devices", "{} {}".format(device1, device2)) + self.assertEqual([device1, device2], hooks.get_devices()) + + def test_get_devices_symlink(self): + """ + If a symlink is specified in osd-devices, get_devices() resolves + it and returns the link target. + """ + device = os.path.join(self.tmp_dir, "device") + link = os.path.join(self.tmp_dir, "link") + os.symlink(device, link) + self.test_config.set("osd-devices", link) + self.assertEqual([device], hooks.get_devices()) From a30902d362c302e19265c64d8c96b7999faf6065 Mon Sep 17 00:00:00 2001 From: Bjorn Tillenius Date: Tue, 2 Feb 2016 19:02:58 +0200 Subject: [PATCH 0992/2699] Lint. --- ceph-osd/unit_tests/test_config.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-osd/unit_tests/test_config.py b/ceph-osd/unit_tests/test_config.py index 2ee69d97..4ad626ff 100644 --- a/ceph-osd/unit_tests/test_config.py +++ b/ceph-osd/unit_tests/test_config.py @@ -11,7 +11,6 @@ ] - class GetDevicesTestCase(test_utils.CharmTestCase): def setUp(self): From 26324cc9a8a3099996c37d7b2657d1b89710aa37 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 10 Feb 2016 15:21:47 +0000 Subject: [PATCH 0993/2699] [hopem,r=] Support multiple l3 segments. Closes-Bug: 1523871 --- ceph-osd/config.yaml | 6 ++++++ ceph-osd/hooks/ceph_hooks.py | 21 ++++++++++++++------- ceph-osd/hooks/utils.py | 13 ++++++++----- 3 files changed, 28 insertions(+), 12 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 2dc2a586..e748671a 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -107,12 +107,18 @@ options: description: | The IP address and netmask of the public (front-side) network (e.g., 192.168.0.0/24) + . + If multiple networks are to be used, a space-delimited list of a.b.c.d/x + can be provided. ceph-cluster-network: type: string default: description: | The IP address and netmask of the cluster (back-side) network (e.g., 192.168.0.0/24) + . + If multiple networks are to be used, a space-delimited list of a.b.c.d/x + can be provided. prefer-ipv6: type: boolean default: False diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 18d7141d..cb5c6cc6 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -41,15 +41,16 @@ from charmhelpers.core.sysctl import create as create_sysctl from utils import ( - render_template, get_host_ip, - assert_charm_supports_ipv6 + get_networks, + assert_charm_supports_ipv6, + render_template, ) from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( get_ipv6_addr, - format_ipv6_addr + format_ipv6_addr, ) from charmhelpers.contrib.charmsupport import nrpe @@ -76,6 +77,12 @@ def emit_cephconf(): mon_hosts = get_mon_hosts() log('Monitor hosts are ' + repr(mon_hosts)) + networks = get_networks('ceph-public-network') + public_network = ', '.join(networks) + + networks = get_networks('ceph-cluster-network') + cluster_network = ', '.join(networks) + cephcontext = { 'auth_supported': get_auth(), 'mon_hosts': ' '.join(mon_hosts), @@ -83,16 +90,16 @@ def emit_cephconf(): 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), - 'ceph_public_network': config('ceph-public-network'), - 'ceph_cluster_network': config('ceph-cluster-network'), + 'ceph_public_network': public_network, + 'ceph_cluster_network': cluster_network, 'loglevel': config('loglevel'), } if config('prefer-ipv6'): dynamic_ipv6_address = get_ipv6_addr()[0] - if not config('ceph-public-network'): + if not public_network: cephcontext['public_addr'] = dynamic_ipv6_address - if not config('ceph-cluster-network'): + if not cluster_network: cephcontext['cluster_addr'] = dynamic_ipv6_address # Install ceph.conf as an alternative to support diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index ada3563b..1823790a 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -23,8 +23,8 @@ lsb_release ) -from charmhelpers.contrib.network import ip from charmhelpers.contrib.network.ip import ( + get_address_in_network, get_ipv6_addr ) @@ -87,10 +87,13 @@ def get_host_ip(hostname=None): return answers[0].address -@cached -def get_public_addr(): - return ip.get_address_in_network(config('ceph-public-network'), - fallback=get_host_ip()) +def get_networks(config_opt='ceph-public-network'): + networks = config(config_opt) + if networks: + networks = networks.split() + return [n for n in networks if get_address_in_network(n)] + + return [] def assert_charm_supports_ipv6(): From 9280ae7953459330f716553f6a6db7a3131b6455 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 12 Feb 2016 08:22:48 -0500 Subject: [PATCH 0994/2699] rename actions --- ceph-proxy/actions.yaml | 4 ++-- ceph-proxy/actions/{pause => pause-health} | 0 ceph-proxy/actions/{resume => resume-health} | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename ceph-proxy/actions/{pause => pause-health} (100%) rename ceph-proxy/actions/{resume => resume-health} (100%) diff --git a/ceph-proxy/actions.yaml b/ceph-proxy/actions.yaml index d69cfb05..5f9310c5 100644 --- a/ceph-proxy/actions.yaml +++ b/ceph-proxy/actions.yaml @@ -1,4 +1,4 @@ -pause: +pause-health: description: Pause ceph health operations -resume: +resume-health: description: Resume ceph health operations \ No newline at end of file diff --git a/ceph-proxy/actions/pause b/ceph-proxy/actions/pause-health similarity index 100% rename from ceph-proxy/actions/pause rename to ceph-proxy/actions/pause-health diff --git a/ceph-proxy/actions/resume b/ceph-proxy/actions/resume-health similarity index 100% rename from ceph-proxy/actions/resume rename to ceph-proxy/actions/resume-health From 22d23d2c3dd82aa63e7ab90c8df11dde5e5885b2 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 12 Feb 2016 08:22:48 -0500 Subject: [PATCH 0995/2699] rename actions --- ceph-mon/actions.yaml | 4 ++-- ceph-mon/actions/{pause => pause-health} | 0 ceph-mon/actions/{resume => resume-health} | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename ceph-mon/actions/{pause => pause-health} (100%) rename ceph-mon/actions/{resume => resume-health} (100%) diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index d69cfb05..5f9310c5 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -1,4 +1,4 @@ -pause: +pause-health: description: Pause ceph health operations -resume: +resume-health: description: Resume ceph health operations \ No newline at end of file diff --git a/ceph-mon/actions/pause b/ceph-mon/actions/pause-health similarity index 100% rename from ceph-mon/actions/pause rename to ceph-mon/actions/pause-health diff --git a/ceph-mon/actions/resume b/ceph-mon/actions/resume-health similarity index 100% rename from ceph-mon/actions/resume rename to ceph-mon/actions/resume-health From e802cdc4143ec2f3fe242dc472d37d3a9a988f9c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 12 Feb 2016 08:26:41 -0500 Subject: [PATCH 0996/2699] add tests --- ceph-proxy/tests/basic_deployment.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index c009503c..66e48876 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -437,6 +437,27 @@ def test_400_ceph_check_osd_pools(self): u.log.debug('Pool list on all ceph units produced the ' 'same results (OK).') + def test_402_pause_resume_actions(self): + """Veryfy that pause/resume works""" + u.log.debug("Testing pause") + cmd = "ceph -s" + + sentry_unit = self.ceph0_sentry + action_id = u.run_action(sentry_unit, 'pause-health') + assert u.wait_on_action(action_id), "Pause health action failed." + + output, code = sentry_unit.run(cmd) + if 'nodown' not in output or 'noout' not in output: + amulet.raise_status(amulet.FAIL, msg="Missing noout,nodown") + + u.log.debug("Testing resume") + action_id = u.run_action(sentry_unit, 'resume-health') + assert u.wait_on_action(action_id), "Resume health action failed." + + output, code = sentry_unit.run(cmd) + if 'nodown' in output or 'noout' in output: + amulet.raise_status(amulet.FAIL, msg="Still has noout,nodown") + def test_410_ceph_cinder_vol_create(self): """Create and confirm a ceph-backed cinder volume, and inspect ceph cinder pool object count as the volume is created From 9f8f55d320fd1e6ab79eb6cb8d07e44b0170b2ec Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 12 Feb 2016 08:26:41 -0500 Subject: [PATCH 0997/2699] add tests --- ceph-mon/tests/basic_deployment.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index c009503c..66e48876 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -437,6 +437,27 @@ def test_400_ceph_check_osd_pools(self): u.log.debug('Pool list on all ceph units produced the ' 'same results (OK).') + def test_402_pause_resume_actions(self): + """Veryfy that pause/resume works""" + u.log.debug("Testing pause") + cmd = "ceph -s" + + sentry_unit = self.ceph0_sentry + action_id = u.run_action(sentry_unit, 'pause-health') + assert u.wait_on_action(action_id), "Pause health action failed." + + output, code = sentry_unit.run(cmd) + if 'nodown' not in output or 'noout' not in output: + amulet.raise_status(amulet.FAIL, msg="Missing noout,nodown") + + u.log.debug("Testing resume") + action_id = u.run_action(sentry_unit, 'resume-health') + assert u.wait_on_action(action_id), "Resume health action failed." + + output, code = sentry_unit.run(cmd) + if 'nodown' in output or 'noout' in output: + amulet.raise_status(amulet.FAIL, msg="Still has noout,nodown") + def test_410_ceph_cinder_vol_create(self): """Create and confirm a ceph-backed cinder volume, and inspect ceph cinder pool object count as the volume is created From 519d8937f9765c85534f5839e9c19b82ba02f0da Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 12 Feb 2016 12:29:36 -0500 Subject: [PATCH 0998/2699] update for lint --- ceph-proxy/tests/basic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 66e48876..f6fd5418 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -26,7 +26,7 @@ def __init__(self, series=None, openstack=None, source=None, stable=False): self._add_relations() self._configure_services() self._deploy() - + u.log.info('Waiting on extended status checks...') exclude_services = ['mysql'] self._auto_wait_for_status(exclude_services=exclude_services) From 9a11fd31fc03c8de46b6ddb7e131f984436f81cb Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 12 Feb 2016 12:29:36 -0500 Subject: [PATCH 0999/2699] update for lint --- ceph-mon/tests/basic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 66e48876..f6fd5418 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -26,7 +26,7 @@ def __init__(self, series=None, openstack=None, source=None, stable=False): self._add_relations() self._configure_services() self._deploy() - + u.log.info('Waiting on extended status checks...') exclude_services = ['mysql'] self._auto_wait_for_status(exclude_services=exclude_services) From 6af53748f2823f55c4f120e391fdd312754f7cdc Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 12 Feb 2016 21:23:26 +0000 Subject: [PATCH 1000/2699] Update amulet test definitions; Wait for workload status before testing. --- ceph-osd/tests/019-basic-trusty-mitaka | 0 ceph-osd/tests/basic_deployment.py | 10 +++++++--- 2 files changed, 7 insertions(+), 3 deletions(-) mode change 100644 => 100755 ceph-osd/tests/019-basic-trusty-mitaka diff --git a/ceph-osd/tests/019-basic-trusty-mitaka b/ceph-osd/tests/019-basic-trusty-mitaka old mode 100644 new mode 100755 diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 4aba7fc0..61a4fe0a 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -27,6 +27,13 @@ def __init__(self, series=None, openstack=None, source=None, self._add_relations() self._configure_services() self._deploy() + + u.log.info('Waiting on extended status checks...') + exclude_services = ['mysql'] + + # Wait for deployment ready msgs, except exclusions + self._auto_wait_for_status(exclude_services=exclude_services) + self._initialize_tests() def _add_services(self): @@ -114,9 +121,6 @@ def _initialize_tests(self): u.log.debug('openstack release str: {}'.format( self._get_openstack_release_string())) - # Let things settle a bit original moving forward - time.sleep(30) - # Authenticate admin with keystone self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, user='admin', From 6eb10d5118bc0f12426bf2948ce2651df50c5e48 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 12 Feb 2016 21:43:28 +0000 Subject: [PATCH 1001/2699] Update amulet test definitions; Wait for workload status before testing. --- ceph-radosgw/tests/018-basic-trusty-liberty | 0 ceph-radosgw/tests/019-basic-trusty-mitaka | 0 ceph-radosgw/tests/020-basic-wily-liberty | 0 ceph-radosgw/tests/basic_deployment.py | 11 +++++++---- 4 files changed, 7 insertions(+), 4 deletions(-) mode change 100644 => 100755 ceph-radosgw/tests/018-basic-trusty-liberty mode change 100644 => 100755 ceph-radosgw/tests/019-basic-trusty-mitaka mode change 100644 => 100755 ceph-radosgw/tests/020-basic-wily-liberty diff --git a/ceph-radosgw/tests/018-basic-trusty-liberty b/ceph-radosgw/tests/018-basic-trusty-liberty old mode 100644 new mode 100755 diff --git a/ceph-radosgw/tests/019-basic-trusty-mitaka b/ceph-radosgw/tests/019-basic-trusty-mitaka old mode 100644 new mode 100755 diff --git a/ceph-radosgw/tests/020-basic-wily-liberty b/ceph-radosgw/tests/020-basic-wily-liberty old mode 100644 new mode 100755 diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 95fa404f..b879a608 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -1,7 +1,6 @@ #!/usr/bin/python import amulet -import time from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment ) @@ -26,6 +25,13 @@ def __init__(self, series=None, openstack=None, source=None, stable=False): self._add_relations() self._configure_services() self._deploy() + + u.log.info('Waiting on extended status checks...') + exclude_services = ['mysql'] + + # Wait for deployment ready msgs, except exclusions + self._auto_wait_for_status(exclude_services=exclude_services) + self._initialize_tests() def _add_services(self): @@ -108,9 +114,6 @@ def _initialize_tests(self): u.log.debug('openstack release str: {}'.format( self._get_openstack_release_string())) - # Let things settle a bit original moving forward - time.sleep(30) - # Authenticate admin with keystone self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, user='admin', From e5db15143cba664ee7e4abd198d6340065db2c66 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 16 Feb 2016 06:59:45 +0000 Subject: [PATCH 1002/2699] Tidy tox targets --- ceph-osd/tox.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 4e328e48..e8bf7cf9 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = lint,py27 +envlist = pep8,py27 skipsdist = True [testenv] @@ -14,7 +14,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -[testenv:lint] +[testenv:pep8] basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt From 86ceb10dbb76188958512a41c91ab295804bc8f2 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 16 Feb 2016 07:00:26 +0000 Subject: [PATCH 1003/2699] Tidy tox targets --- ceph-radosgw/tox.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 4e328e48..e8bf7cf9 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = lint,py27 +envlist = pep8,py27 skipsdist = True [testenv] @@ -14,7 +14,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -[testenv:lint] +[testenv:pep8] basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt From 5cf6e5deb7b243414027912b1664ca768d4a232c Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 17 Feb 2016 08:14:56 +0000 Subject: [PATCH 1004/2699] relation_get needs the remoute unit if not running in the context of the relation that it is updating --- ceph-proxy/hooks/ceph_hooks.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 5a381ecb..c5461b60 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -231,7 +231,8 @@ def notify_osds(): def notify_radosgws(): for relid in relation_ids('radosgw'): - radosgw_relation(relid) + for unit in related_units(relid): + radosgw_relation(relid=relid, unit=unit) def notify_client(): @@ -270,19 +271,20 @@ def osd_relation(relid=None): @hooks.hook('radosgw-relation-changed') @hooks.hook('radosgw-relation-joined') -def radosgw_relation(relid=None): +def radosgw_relation(relid=None, unit=None): # Install radosgw for admin tools apt_install(packages=filter_installed_packages(['radosgw'])) - + if not unit: + unit=remote_unit() """Process broker request(s).""" if ceph.is_quorum(): - settings = relation_get(rid=relid) + settings = relation_get(rid=relid, unit=unit) if 'broker_req' in settings: if not ceph.is_leader(): log("Not leader - ignoring broker request", level=DEBUG) else: rsp = process_requests(settings['broker_req']) - unit_id = remote_unit().replace('/', '-') + unit_id = unit.replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id log('mon cluster in quorum - providing radosgw with keys') data = { From 9129e262a5656e72ce887e9f3d672bf9ee4e81af Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 17 Feb 2016 08:14:56 +0000 Subject: [PATCH 1005/2699] relation_get needs the remoute unit if not running in the context of the relation that it is updating --- ceph-mon/hooks/ceph_hooks.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 5a381ecb..c5461b60 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -231,7 +231,8 @@ def notify_osds(): def notify_radosgws(): for relid in relation_ids('radosgw'): - radosgw_relation(relid) + for unit in related_units(relid): + radosgw_relation(relid=relid, unit=unit) def notify_client(): @@ -270,19 +271,20 @@ def osd_relation(relid=None): @hooks.hook('radosgw-relation-changed') @hooks.hook('radosgw-relation-joined') -def radosgw_relation(relid=None): +def radosgw_relation(relid=None, unit=None): # Install radosgw for admin tools apt_install(packages=filter_installed_packages(['radosgw'])) - + if not unit: + unit=remote_unit() """Process broker request(s).""" if ceph.is_quorum(): - settings = relation_get(rid=relid) + settings = relation_get(rid=relid, unit=unit) if 'broker_req' in settings: if not ceph.is_leader(): log("Not leader - ignoring broker request", level=DEBUG) else: rsp = process_requests(settings['broker_req']) - unit_id = remote_unit().replace('/', '-') + unit_id = unit.replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id log('mon cluster in quorum - providing radosgw with keys') data = { From 35ce386d4f1f2cb7716bd76a54d5c08e3c901b48 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 17 Feb 2016 08:52:55 +0000 Subject: [PATCH 1006/2699] Added logging --- ceph-proxy/hooks/ceph_hooks.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index c5461b60..6be4cf0b 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -230,6 +230,7 @@ def notify_osds(): def notify_radosgws(): + log('LY: notify_radosgws') for relid in relation_ids('radosgw'): for unit in related_units(relid): radosgw_relation(relid=relid, unit=unit) @@ -273,16 +274,20 @@ def osd_relation(relid=None): @hooks.hook('radosgw-relation-joined') def radosgw_relation(relid=None, unit=None): # Install radosgw for admin tools + log('LY: radosgw_relation {} {}'.format(relid, unit)) apt_install(packages=filter_installed_packages(['radosgw'])) if not unit: - unit=remote_unit() + unit = remote_unit() """Process broker request(s).""" if ceph.is_quorum(): + log('LY: ceph is quorum') settings = relation_get(rid=relid, unit=unit) + log('LY: {}'.format(settings)) if 'broker_req' in settings: if not ceph.is_leader(): log("Not leader - ignoring broker request", level=DEBUG) else: + log('LY: Processing radosgw request') rsp = process_requests(settings['broker_req']) unit_id = unit.replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id @@ -294,6 +299,7 @@ def radosgw_relation(relid=None, unit=None): 'ceph-public-address': get_public_addr(), unit_response_key: rsp, } + log('LY: Setting radosgw with {}'.format(data)) relation_set(relation_id=relid, relation_settings=data) else: log('mon cluster not in quorum - deferring key provision') From 794bc13379fba5d606db0b3c3c16fab74ed0c408 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 17 Feb 2016 08:52:55 +0000 Subject: [PATCH 1007/2699] Added logging --- ceph-mon/hooks/ceph_hooks.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index c5461b60..6be4cf0b 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -230,6 +230,7 @@ def notify_osds(): def notify_radosgws(): + log('LY: notify_radosgws') for relid in relation_ids('radosgw'): for unit in related_units(relid): radosgw_relation(relid=relid, unit=unit) @@ -273,16 +274,20 @@ def osd_relation(relid=None): @hooks.hook('radosgw-relation-joined') def radosgw_relation(relid=None, unit=None): # Install radosgw for admin tools + log('LY: radosgw_relation {} {}'.format(relid, unit)) apt_install(packages=filter_installed_packages(['radosgw'])) if not unit: - unit=remote_unit() + unit = remote_unit() """Process broker request(s).""" if ceph.is_quorum(): + log('LY: ceph is quorum') settings = relation_get(rid=relid, unit=unit) + log('LY: {}'.format(settings)) if 'broker_req' in settings: if not ceph.is_leader(): log("Not leader - ignoring broker request", level=DEBUG) else: + log('LY: Processing radosgw request') rsp = process_requests(settings['broker_req']) unit_id = unit.replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id @@ -294,6 +299,7 @@ def radosgw_relation(relid=None, unit=None): 'ceph-public-address': get_public_addr(), unit_response_key: rsp, } + log('LY: Setting radosgw with {}'.format(data)) relation_set(relation_id=relid, relation_settings=data) else: log('mon cluster not in quorum - deferring key provision') From d6ddd0bd1b9dc8ad17b6b9e45b0e9879a5d6a182 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 17 Feb 2016 09:34:53 +0000 Subject: [PATCH 1008/2699] Check quorom --- ceph-proxy/hooks/ceph_hooks.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 6be4cf0b..82fdbb59 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -279,6 +279,10 @@ def radosgw_relation(relid=None, unit=None): if not unit: unit = remote_unit() """Process broker request(s).""" + import time + print "LY: Quorum: {}".format(ceph.is_quorum()) + time.sleep(60) + print "LY: Quorum: {}".format(ceph.is_quorum()) if ceph.is_quorum(): log('LY: ceph is quorum') settings = relation_get(rid=relid, unit=unit) From 072bfb56fb78c64e4e808fd65d3d57dd76f3a874 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 17 Feb 2016 09:34:53 +0000 Subject: [PATCH 1009/2699] Check quorom --- ceph-mon/hooks/ceph_hooks.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 6be4cf0b..82fdbb59 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -279,6 +279,10 @@ def radosgw_relation(relid=None, unit=None): if not unit: unit = remote_unit() """Process broker request(s).""" + import time + print "LY: Quorum: {}".format(ceph.is_quorum()) + time.sleep(60) + print "LY: Quorum: {}".format(ceph.is_quorum()) if ceph.is_quorum(): log('LY: ceph is quorum') settings = relation_get(rid=relid, unit=unit) From 9d36ed752345673775bc971ebbd39f2b3828afc2 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 17 Feb 2016 09:46:25 +0000 Subject: [PATCH 1010/2699] Wait for qurom --- ceph-proxy/hooks/ceph.py | 1 + ceph-proxy/hooks/ceph_hooks.py | 1 + 2 files changed, 2 insertions(+) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index f3ce6910..5523b08a 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -145,6 +145,7 @@ def is_leader(): def wait_for_quorum(): while not is_quorum(): + log("Waiting for quorum to be reached") time.sleep(3) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 82fdbb59..7775d3ab 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -216,6 +216,7 @@ def mon_relation(): status_set('maintenance', 'Bootstrapping MON cluster') ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) ceph.wait_for_bootstrap() + ceph.wait_for_quorum() notify_osds() notify_radosgws() notify_client() From 6111dddf5ca485828f67ab3b4f6747ee7763a7c5 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 17 Feb 2016 09:46:25 +0000 Subject: [PATCH 1011/2699] Wait for qurom --- ceph-mon/hooks/ceph.py | 1 + ceph-mon/hooks/ceph_hooks.py | 1 + 2 files changed, 2 insertions(+) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index f3ce6910..5523b08a 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -145,6 +145,7 @@ def is_leader(): def wait_for_quorum(): while not is_quorum(): + log("Waiting for quorum to be reached") time.sleep(3) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 82fdbb59..7775d3ab 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -216,6 +216,7 @@ def mon_relation(): status_set('maintenance', 'Bootstrapping MON cluster') ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) ceph.wait_for_bootstrap() + ceph.wait_for_quorum() notify_osds() notify_radosgws() notify_client() From 7a5ad1759553b1941b53cd7d7d6ffccd1f038939 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 17 Feb 2016 10:05:11 +0000 Subject: [PATCH 1012/2699] Remove temp debug logging --- ceph-proxy/hooks/ceph_hooks.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 7775d3ab..2a0d1682 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -231,7 +231,6 @@ def notify_osds(): def notify_radosgws(): - log('LY: notify_radosgws') for relid in relation_ids('radosgw'): for unit in related_units(relid): radosgw_relation(relid=relid, unit=unit) @@ -275,24 +274,16 @@ def osd_relation(relid=None): @hooks.hook('radosgw-relation-joined') def radosgw_relation(relid=None, unit=None): # Install radosgw for admin tools - log('LY: radosgw_relation {} {}'.format(relid, unit)) apt_install(packages=filter_installed_packages(['radosgw'])) if not unit: unit = remote_unit() """Process broker request(s).""" - import time - print "LY: Quorum: {}".format(ceph.is_quorum()) - time.sleep(60) - print "LY: Quorum: {}".format(ceph.is_quorum()) if ceph.is_quorum(): - log('LY: ceph is quorum') settings = relation_get(rid=relid, unit=unit) - log('LY: {}'.format(settings)) if 'broker_req' in settings: if not ceph.is_leader(): log("Not leader - ignoring broker request", level=DEBUG) else: - log('LY: Processing radosgw request') rsp = process_requests(settings['broker_req']) unit_id = unit.replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id @@ -304,7 +295,6 @@ def radosgw_relation(relid=None, unit=None): 'ceph-public-address': get_public_addr(), unit_response_key: rsp, } - log('LY: Setting radosgw with {}'.format(data)) relation_set(relation_id=relid, relation_settings=data) else: log('mon cluster not in quorum - deferring key provision') From 5bc07fe46850147d663ba4e8d94891af6be5bb1a Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 17 Feb 2016 10:05:11 +0000 Subject: [PATCH 1013/2699] Remove temp debug logging --- ceph-mon/hooks/ceph_hooks.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 7775d3ab..2a0d1682 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -231,7 +231,6 @@ def notify_osds(): def notify_radosgws(): - log('LY: notify_radosgws') for relid in relation_ids('radosgw'): for unit in related_units(relid): radosgw_relation(relid=relid, unit=unit) @@ -275,24 +274,16 @@ def osd_relation(relid=None): @hooks.hook('radosgw-relation-joined') def radosgw_relation(relid=None, unit=None): # Install radosgw for admin tools - log('LY: radosgw_relation {} {}'.format(relid, unit)) apt_install(packages=filter_installed_packages(['radosgw'])) if not unit: unit = remote_unit() """Process broker request(s).""" - import time - print "LY: Quorum: {}".format(ceph.is_quorum()) - time.sleep(60) - print "LY: Quorum: {}".format(ceph.is_quorum()) if ceph.is_quorum(): - log('LY: ceph is quorum') settings = relation_get(rid=relid, unit=unit) - log('LY: {}'.format(settings)) if 'broker_req' in settings: if not ceph.is_leader(): log("Not leader - ignoring broker request", level=DEBUG) else: - log('LY: Processing radosgw request') rsp = process_requests(settings['broker_req']) unit_id = unit.replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id @@ -304,7 +295,6 @@ def radosgw_relation(relid=None, unit=None): 'ceph-public-address': get_public_addr(), unit_response_key: rsp, } - log('LY: Setting radosgw with {}'.format(data)) relation_set(relation_id=relid, relation_settings=data) else: log('mon cluster not in quorum - deferring key provision') From aca7e00877314a2d394b4ff4e7c67876051e8c63 Mon Sep 17 00:00:00 2001 From: Ante Karamatic Date: Wed, 17 Feb 2016 12:27:44 +0100 Subject: [PATCH 1014/2699] Give up also when Keystone denies connection, seen in Liberty --- ceph-radosgw/hooks/hooks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 3b1313b4..39fa99c1 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -189,7 +189,7 @@ def setup_keystone_certs(unit=None, rid=None): import requests try: # Kilo and newer - from keystoneclient.exceptions import ConnectionRefused + from keystoneclient.exceptions import ConnectionRefused, Forbidden except ImportError: # Juno and older from keystoneclient.exceptions import ConnectionError as \ @@ -228,7 +228,7 @@ def setup_keystone_certs(unit=None, rid=None): # Juno and older ca_cert = requests.request('GET', auth_endpoint + '/certificates/ca').text - except (ConnectionRefused, requests.exceptions.ConnectionError): + except (ConnectionRefused, requests.exceptions.ConnectionError, Forbidden): log("Error connecting to keystone - skipping ca/signing cert setup", level=WARNING) return From 7a681fdb5ccec107a9363056a9f54486352f9c02 Mon Sep 17 00:00:00 2001 From: Ante Karamatic Date: Wed, 17 Feb 2016 15:32:06 +0100 Subject: [PATCH 1015/2699] Add Forbidden exception for Juno and older --- ceph-radosgw/hooks/hooks.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 39fa99c1..9a4b1a98 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -189,11 +189,16 @@ def setup_keystone_certs(unit=None, rid=None): import requests try: # Kilo and newer - from keystoneclient.exceptions import ConnectionRefused, Forbidden + from keystoneclient.exceptions import ( + ConnectionRefused, + Forbidden + ) except ImportError: # Juno and older - from keystoneclient.exceptions import ConnectionError as \ - ConnectionRefused + from keystoneclient.exceptions import ( + ConnectionError as ConnectionRefused, + Forbidden + ) from keystoneclient.v2_0 import client From 294886d28aa14bf54157214e5cb5d7f6341e362b Mon Sep 17 00:00:00 2001 From: Ante Karamatic Date: Wed, 17 Feb 2016 17:02:22 +0100 Subject: [PATCH 1016/2699] Fix lint errors --- ceph-radosgw/hooks/hooks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 9a4b1a98..e88c633e 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -192,13 +192,13 @@ def setup_keystone_certs(unit=None, rid=None): from keystoneclient.exceptions import ( ConnectionRefused, Forbidden - ) + ) except ImportError: # Juno and older from keystoneclient.exceptions import ( ConnectionError as ConnectionRefused, Forbidden - ) + ) from keystoneclient.v2_0 import client From 399abf58c552bf28f589c27c7134f9b4d4f7ec07 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 17 Feb 2016 21:45:37 +0000 Subject: [PATCH 1017/2699] Cherry-pick hopem amulet test update --- ceph-radosgw/tests/basic_deployment.py | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index b879a608..ed0e1c7d 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -231,22 +231,33 @@ def test_200_ceph_radosgw_ceph_relation(self): message = u.relation_error('ceph-radosgw to ceph', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_201_ceph0_ceph_radosgw_relation(self): - """Verify the ceph0 to ceph-radosgw relation data.""" + def test_201_ceph_radosgw_relation(self): + """Verify the ceph to ceph-radosgw relation data. + + At least one unit (the leader) must have all data provided by the ceph + charm. + """ u.log.debug('Checking ceph0:radosgw radosgw:mon relation data...') - unit = self.ceph0_sentry + s_entries = [ + self.ceph0_sentry, + self.ceph1_sentry, + self.ceph2_sentry + ] relation = ['radosgw', 'ceph-radosgw:mon'] expected = { 'private-address': u.valid_ip, 'radosgw_key': u.not_null, - 'auth': 'none', + 'auth': 'none', 'ceph-public-address': u.valid_ip, 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' } - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('ceph0 to ceph-radosgw', ret) + ret = [] + for unit in s_entries: + ret.append(u.validate_relation_data(unit, relation, expected)) + + if not any(ret): + message = u.relation_error('ceph to ceph-radosgw', ret) amulet.raise_status(amulet.FAIL, msg=message) def test_202_ceph1_ceph_radosgw_relation(self): From 5c82def03b55ed4c1faac55fc51ffa3bec154908 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 17 Feb 2016 21:46:00 +0000 Subject: [PATCH 1018/2699] Enable Xenial amulet test --- ceph-radosgw/tests/021-basic-xenial-mitaka | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 ceph-radosgw/tests/021-basic-xenial-mitaka diff --git a/ceph-radosgw/tests/021-basic-xenial-mitaka b/ceph-radosgw/tests/021-basic-xenial-mitaka old mode 100644 new mode 100755 From 74ea5abd971e23034276171966b8df39c119106f Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 17 Feb 2016 22:17:25 +0000 Subject: [PATCH 1019/2699] Sync charm helpers for Mitaka awareness --- .../hooks/charmhelpers/contrib/network/ip.py | 57 ++- .../contrib/openstack/amulet/deployment.py | 13 +- .../charmhelpers/contrib/openstack/context.py | 52 ++- .../contrib/openstack/files/check_haproxy.sh | 12 +- .../charmhelpers/contrib/openstack/neutron.py | 24 +- .../contrib/openstack/templates/haproxy.cfg | 5 +- .../templates/section-keystone-authtoken | 11 + .../charmhelpers/contrib/openstack/utils.py | 287 ++++++++++--- .../charmhelpers/contrib/python/packages.py | 46 +- .../contrib/storage/linux/ceph.py | 400 +++++++++++++++++- .../hooks/charmhelpers/core/hookenv.py | 48 ++- ceph-radosgw/hooks/charmhelpers/core/host.py | 138 ++++-- .../charmhelpers/core/services/helpers.py | 16 +- .../hooks/charmhelpers/core/templating.py | 20 +- .../hooks/charmhelpers/fetch/__init__.py | 10 +- .../hooks/charmhelpers/fetch/archiveurl.py | 2 +- .../hooks/charmhelpers/fetch/bzrurl.py | 54 +-- .../hooks/charmhelpers/fetch/giturl.py | 41 +- .../contrib/openstack/amulet/deployment.py | 13 +- 19 files changed, 985 insertions(+), 264 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index 7f3b66b1..4efe7993 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -53,7 +53,7 @@ def _validate_cidr(network): def no_ip_found_error_out(network): - errmsg = ("No IP address found in network: %s" % network) + errmsg = ("No IP address found in network(s): %s" % network) raise ValueError(errmsg) @@ -61,7 +61,7 @@ def get_address_in_network(network, fallback=None, fatal=False): """Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, - '192.168.1.0/24'. + '192.168.1.0/24'. Supports multiple networks as a space-delimited list. :param fallback (str): If no address is found, return fallback. :param fatal (boolean): If no address is found, fallback is not set and fatal is True then exit(1). @@ -75,24 +75,26 @@ def get_address_in_network(network, fallback=None, fatal=False): else: return None - _validate_cidr(network) - network = netaddr.IPNetwork(network) - for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) - if network.version == 4 and netifaces.AF_INET in addresses: - addr = addresses[netifaces.AF_INET][0]['addr'] - netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - if cidr in network: - return str(cidr.ip) - - if network.version == 6 and netifaces.AF_INET6 in addresses: - for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - if cidr in network: - return str(cidr.ip) + networks = network.split() or [network] + for network in networks: + _validate_cidr(network) + network = netaddr.IPNetwork(network) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if network.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + if cidr in network: + return str(cidr.ip) + + if network.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if not addr['addr'].startswith('fe80'): + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) if fallback is not None: return fallback @@ -454,3 +456,18 @@ def get_hostname(address, fqdn=True): return result else: return result.split('.')[0] + + +def port_has_listener(address, port): + """ + Returns True if the address:port is open and being listened to, + else False. + + @param address: an IP address or hostname + @param port: integer port + + Note calls 'zc' via a subprocess shell + """ + cmd = ['nc', '-z', address, str(port)] + result = subprocess.call(cmd) + return not(bool(result)) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 0506491b..d2ede320 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -121,11 +121,12 @@ def _add_services(self, this_service, other_services): # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw'] + 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', - 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', + 'cinder-backup'] if self.openstack: for svc in services: @@ -225,7 +226,8 @@ def _get_openstack_release(self): self.precise_havana, self.precise_icehouse, self.trusty_icehouse, self.trusty_juno, self.utopic_juno, self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, - self.wily_liberty) = range(12) + self.wily_liberty, self.trusty_mitaka, + self.xenial_mitaka) = range(14) releases = { ('precise', None): self.precise_essex, @@ -237,9 +239,11 @@ def _get_openstack_release(self): ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, ('utopic', None): self.utopic_juno, ('vivid', None): self.vivid_kilo, - ('wily', None): self.wily_liberty} + ('wily', None): self.wily_liberty, + ('xenial', None): self.xenial_mitaka} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -256,6 +260,7 @@ def _get_openstack_release_string(self): ('utopic', 'juno'), ('vivid', 'kilo'), ('wily', 'liberty'), + ('xenial', 'mitaka'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 61073cd3..a8c6ab0c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -57,6 +57,7 @@ get_nic_hwaddr, mkdir, write_file, + pwgen, ) from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, @@ -87,6 +88,14 @@ is_bridge_member, ) from charmhelpers.contrib.openstack.utils import get_host_ip +from charmhelpers.core.unitdata import kv + +try: + import psutil +except ImportError: + apt_install('python-psutil', fatal=True) + import psutil + CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' ADDRESS_TYPES = ['admin', 'internal', 'public'] @@ -401,6 +410,7 @@ def __call__(self): auth_host = format_ipv6_addr(auth_host) or auth_host svc_protocol = rdata.get('service_protocol') or 'http' auth_protocol = rdata.get('auth_protocol') or 'http' + api_version = rdata.get('api_version') or '2.0' ctxt.update({'service_port': rdata.get('service_port'), 'service_host': serv_host, 'auth_host': auth_host, @@ -409,7 +419,8 @@ def __call__(self): 'admin_user': rdata.get('service_username'), 'admin_password': rdata.get('service_password'), 'service_protocol': svc_protocol, - 'auth_protocol': auth_protocol}) + 'auth_protocol': auth_protocol, + 'api_version': api_version}) if self.context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse @@ -636,11 +647,18 @@ def __call__(self): ctxt['ipv6'] = True ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' - ctxt['stat_port'] = ':::8888' else: ctxt['local_host'] = '127.0.0.1' ctxt['haproxy_host'] = '0.0.0.0' - ctxt['stat_port'] = ':8888' + + ctxt['stat_port'] = '8888' + + db = kv() + ctxt['stat_password'] = db.get('stat-password') + if not ctxt['stat_password']: + ctxt['stat_password'] = db.set('stat-password', + pwgen(32)) + db.flush() for frontend in cluster_hosts: if (len(cluster_hosts[frontend]['backends']) > 1 or @@ -1094,6 +1112,20 @@ def __call__(self): config_flags_parser(config_flags)} +class LibvirtConfigFlagsContext(OSContextGenerator): + """ + This context provides support for extending + the libvirt section through user-defined flags. + """ + def __call__(self): + ctxt = {} + libvirt_flags = config('libvirt-flags') + if libvirt_flags: + ctxt['libvirt_flags'] = config_flags_parser( + libvirt_flags) + return ctxt + + class SubordinateConfigContext(OSContextGenerator): """ @@ -1234,13 +1266,11 @@ class WorkerConfigContext(OSContextGenerator): @property def num_cpus(self): - try: - from psutil import NUM_CPUS - except ImportError: - apt_install('python-psutil', fatal=True) - from psutil import NUM_CPUS - - return NUM_CPUS + # NOTE: use cpu_count if present (16.04 support) + if hasattr(psutil, 'cpu_count'): + return psutil.cpu_count() + else: + return psutil.NUM_CPUS def __call__(self): multiplier = config('worker-multiplier') or 0 @@ -1443,6 +1473,8 @@ def __call__(self): rdata.get('service_protocol') or 'http', 'auth_protocol': rdata.get('auth_protocol') or 'http', + 'api_version': + rdata.get('api_version') or '2.0', } if self.context_complete(ctxt): return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh index eb8527f5..0df07176 100755 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh @@ -9,15 +9,17 @@ CRITICAL=0 NOTACTIVE='' LOGFILE=/var/log/nagios/check_haproxy.log -AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') +AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}') -for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'}); +typeset -i N_INSTANCES=0 +for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg) do - output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK') + N_INSTANCES=N_INSTANCES+1 + output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK') if [ $? != 0 ]; then date >> $LOGFILE echo $output >> $LOGFILE - /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1 + /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1 CRITICAL=1 NOTACTIVE="${NOTACTIVE} $appserver" fi @@ -28,5 +30,5 @@ if [ $CRITICAL = 1 ]; then exit 2 fi -echo "OK: All haproxy instances looking good" +echo "OK: All haproxy instances ($N_INSTANCES) looking good" exit 0 diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index d17c847e..dbc489ab 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -50,7 +50,7 @@ def determine_dkms_package(): if kernel_version() >= (3, 13): return [] else: - return ['openvswitch-datapath-dkms'] + return [headers_package(), 'openvswitch-datapath-dkms'] # legacy @@ -70,7 +70,7 @@ def quantum_plugins(): relation_prefix='neutron', ssl_dir=QUANTUM_CONF_DIR)], 'services': ['quantum-plugin-openvswitch-agent'], - 'packages': [[headers_package()] + determine_dkms_package(), + 'packages': [determine_dkms_package(), ['quantum-plugin-openvswitch-agent']], 'server_packages': ['quantum-server', 'quantum-plugin-openvswitch'], @@ -111,7 +111,7 @@ def neutron_plugins(): relation_prefix='neutron', ssl_dir=NEUTRON_CONF_DIR)], 'services': ['neutron-plugin-openvswitch-agent'], - 'packages': [[headers_package()] + determine_dkms_package(), + 'packages': [determine_dkms_package(), ['neutron-plugin-openvswitch-agent']], 'server_packages': ['neutron-server', 'neutron-plugin-openvswitch'], @@ -155,7 +155,7 @@ def neutron_plugins(): relation_prefix='neutron', ssl_dir=NEUTRON_CONF_DIR)], 'services': [], - 'packages': [[headers_package()] + determine_dkms_package(), + 'packages': [determine_dkms_package(), ['neutron-plugin-cisco']], 'server_packages': ['neutron-server', 'neutron-plugin-cisco'], @@ -174,7 +174,7 @@ def neutron_plugins(): 'neutron-dhcp-agent', 'nova-api-metadata', 'etcd'], - 'packages': [[headers_package()] + determine_dkms_package(), + 'packages': [determine_dkms_package(), ['calico-compute', 'bird', 'neutron-dhcp-agent', @@ -219,7 +219,7 @@ def neutron_plugins(): relation_prefix='neutron', ssl_dir=NEUTRON_CONF_DIR)], 'services': [], - 'packages': [[headers_package()] + determine_dkms_package()], + 'packages': [determine_dkms_package()], 'server_packages': ['neutron-server', 'python-neutron-plugin-midonet'], 'server_services': ['neutron-server'] @@ -233,6 +233,18 @@ def neutron_plugins(): 'neutron-plugin-ml2'] # NOTE: patch in vmware renames nvp->nsx for icehouse onwards plugins['nvp'] = plugins['nsx'] + if release >= 'kilo': + plugins['midonet']['driver'] = ( + 'neutron.plugins.midonet.plugin.MidonetPluginV2') + if release >= 'liberty': + midonet_origin = config('midonet-origin') + if midonet_origin is not None and midonet_origin[4:5] == '1': + plugins['midonet']['driver'] = ( + 'midonet.neutron.plugin_v1.MidonetPluginV2') + plugins['midonet']['server_packages'].remove( + 'python-neutron-plugin-midonet') + plugins['midonet']['server_packages'].append( + 'python-networking-midonet') return plugins diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 8721d8a1..32b62767 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -33,13 +33,14 @@ defaults timeout server 30000 {%- endif %} -listen stats {{ stat_port }} +listen stats + bind {{ local_host }}:{{ stat_port }} mode http stats enable stats hide-version stats realm Haproxy\ Statistics stats uri / - stats auth admin:password + stats auth admin:{{ stat_password }} {% if frontends -%} {% for service, ports in service_ports.items() -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken index 2a37edd5..0b6da25c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken @@ -1,4 +1,14 @@ {% if auth_host -%} +{% if api_version == '3' -%} +[keystone_authtoken] +auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }} +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +project_domain_name = default +user_domain_name = default +auth_plugin = password +{% else -%} [keystone_authtoken] identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }} auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} @@ -7,3 +17,4 @@ admin_user = {{ admin_user }} admin_password = {{ admin_password }} signing_dir = {{ signing_dir }} {% endif -%} +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index fc479a30..80dd2e0d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -23,8 +23,10 @@ import os import sys import re +import itertools import six +import tempfile import traceback import uuid import yaml @@ -41,6 +43,7 @@ config, log as juju_log, charm_dir, + DEBUG, INFO, related_units, relation_ids, @@ -58,6 +61,7 @@ from charmhelpers.contrib.network.ip import ( get_ipv6_addr, is_ipv6, + port_has_listener, ) from charmhelpers.contrib.python.packages import ( @@ -65,7 +69,7 @@ pip_install, ) -from charmhelpers.core.host import lsb_release, mounts, umount +from charmhelpers.core.host import lsb_release, mounts, umount, service_running from charmhelpers.fetch import apt_install, apt_cache, install_remote from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device @@ -86,6 +90,7 @@ ('utopic', 'juno'), ('vivid', 'kilo'), ('wily', 'liberty'), + ('xenial', 'mitaka'), ]) @@ -99,61 +104,70 @@ ('2014.2', 'juno'), ('2015.1', 'kilo'), ('2015.2', 'liberty'), + ('2016.1', 'mitaka'), ]) -# The ugly duckling +# The ugly duckling - must list releases oldest to newest SWIFT_CODENAMES = OrderedDict([ - ('1.4.3', 'diablo'), - ('1.4.8', 'essex'), - ('1.7.4', 'folsom'), - ('1.8.0', 'grizzly'), - ('1.7.7', 'grizzly'), - ('1.7.6', 'grizzly'), - ('1.10.0', 'havana'), - ('1.9.1', 'havana'), - ('1.9.0', 'havana'), - ('1.13.1', 'icehouse'), - ('1.13.0', 'icehouse'), - ('1.12.0', 'icehouse'), - ('1.11.0', 'icehouse'), - ('2.0.0', 'juno'), - ('2.1.0', 'juno'), - ('2.2.0', 'juno'), - ('2.2.1', 'kilo'), - ('2.2.2', 'kilo'), - ('2.3.0', 'liberty'), - ('2.4.0', 'liberty'), - ('2.5.0', 'liberty'), + ('diablo', + ['1.4.3']), + ('essex', + ['1.4.8']), + ('folsom', + ['1.7.4']), + ('grizzly', + ['1.7.6', '1.7.7', '1.8.0']), + ('havana', + ['1.9.0', '1.9.1', '1.10.0']), + ('icehouse', + ['1.11.0', '1.12.0', '1.13.0', '1.13.1']), + ('juno', + ['2.0.0', '2.1.0', '2.2.0']), + ('kilo', + ['2.2.1', '2.2.2']), + ('liberty', + ['2.3.0', '2.4.0', '2.5.0']), + ('mitaka', + ['2.5.0']), ]) # >= Liberty version->codename mapping PACKAGE_CODENAMES = { 'nova-common': OrderedDict([ - ('12.0.0', 'liberty'), + ('12.0', 'liberty'), + ('13.0', 'mitaka'), ]), 'neutron-common': OrderedDict([ - ('7.0.0', 'liberty'), + ('7.0', 'liberty'), + ('8.0', 'mitaka'), ]), 'cinder-common': OrderedDict([ - ('7.0.0', 'liberty'), + ('7.0', 'liberty'), + ('8.0', 'mitaka'), ]), 'keystone': OrderedDict([ - ('8.0.0', 'liberty'), + ('8.0', 'liberty'), + ('9.0', 'mitaka'), ]), 'horizon-common': OrderedDict([ - ('8.0.0', 'liberty'), + ('8.0', 'liberty'), + ('9.0', 'mitaka'), ]), 'ceilometer-common': OrderedDict([ - ('5.0.0', 'liberty'), + ('5.0', 'liberty'), + ('6.0', 'mitaka'), ]), 'heat-common': OrderedDict([ - ('5.0.0', 'liberty'), + ('5.0', 'liberty'), + ('6.0', 'mitaka'), ]), 'glance-common': OrderedDict([ - ('11.0.0', 'liberty'), + ('11.0', 'liberty'), + ('12.0', 'mitaka'), ]), 'openstack-dashboard': OrderedDict([ - ('8.0.0', 'liberty'), + ('8.0', 'liberty'), + ('9.0', 'mitaka'), ]), } @@ -216,6 +230,33 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): error_out(e) +def get_os_version_codename_swift(codename): + '''Determine OpenStack version number of swift from codename.''' + for k, v in six.iteritems(SWIFT_CODENAMES): + if k == codename: + return v[-1] + e = 'Could not derive swift version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_swift_codename(version): + '''Determine OpenStack codename that corresponds to swift version.''' + codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] + if len(codenames) > 1: + # If more than one release codename contains this version we determine + # the actual codename based on the highest available install source. + for codename in reversed(codenames): + releases = UBUNTU_OPENSTACK_RELEASE + release = [k for k, v in six.iteritems(releases) if codename in v] + ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) + if codename in ret or release[0] in ret: + return codename + elif len(codenames) == 1: + return codenames[0] + return None + + def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' import apt_pkg as apt @@ -240,7 +281,14 @@ def get_os_codename_package(package, fatal=True): error_out(e) vers = apt.upstream_version(pkg.current_ver.ver_str) - match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) + if 'swift' in pkg.name: + # Fully x.y.z match for swift versions + match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) + else: + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match('^(\d+)\.(\d+)', vers) + if match: vers = match.group(0) @@ -252,13 +300,8 @@ def get_os_codename_package(package, fatal=True): # < Liberty co-ordinated project versions try: if 'swift' in pkg.name: - swift_vers = vers[:5] - if swift_vers not in SWIFT_CODENAMES: - # Deal with 1.10.0 upward - swift_vers = vers[:6] - return SWIFT_CODENAMES[swift_vers] + return get_swift_codename(vers) else: - vers = vers[:6] return OPENSTACK_CODENAMES[vers] except KeyError: if not fatal: @@ -276,12 +319,14 @@ def get_os_version_package(pkg, fatal=True): if 'swift' in pkg: vers_map = SWIFT_CODENAMES + for cname, version in six.iteritems(vers_map): + if cname == codename: + return version[-1] else: vers_map = OPENSTACK_CODENAMES - - for version, cname in six.iteritems(vers_map): - if cname == codename: - return version + for version, cname in six.iteritems(vers_map): + if cname == codename: + return version # e = "Could not determine OpenStack version for package: %s" % pkg # error_out(e) @@ -306,12 +351,42 @@ def os_release(package, base='essex'): def import_key(keyid): - cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ - "--recv-keys %s" % keyid - try: - subprocess.check_call(cmd.split(' ')) - except subprocess.CalledProcessError: - error_out("Error importing repo key %s" % keyid) + key = keyid.strip() + if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and + key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): + juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG) + juju_log("Importing ASCII Armor PGP key", level=DEBUG) + with tempfile.NamedTemporaryFile() as keyfile: + with open(keyfile.name, 'w') as fd: + fd.write(key) + fd.write("\n") + + cmd = ['apt-key', 'add', keyfile.name] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error_out("Error importing PGP key '%s'" % key) + else: + juju_log("PGP key found (looks like Radix64 format)", level=DEBUG) + juju_log("Importing PGP key from keyserver", level=DEBUG) + cmd = ['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error_out("Error importing PGP key '%s'" % key) + + +def get_source_and_pgp_key(input): + """Look for a pgp key ID or ascii-armor key in the given input.""" + index = input.strip() + index = input.rfind('|') + if index < 0: + return input, None + + key = input[index + 1:].strip('|') + source = input[:index] + return source, key def configure_installation_source(rel): @@ -323,16 +398,16 @@ def configure_installation_source(rel): with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: f.write(DISTRO_PROPOSED % ubuntu_rel) elif rel[:4] == "ppa:": - src = rel + src, key = get_source_and_pgp_key(rel) + if key: + import_key(key) + subprocess.check_call(["add-apt-repository", "-y", src]) elif rel[:3] == "deb": - l = len(rel.split('|')) - if l == 2: - src, key = rel.split('|') - juju_log("Importing PPA key from keyserver for %s" % src) + src, key = get_source_and_pgp_key(rel) + if key: import_key(key) - elif l == 1: - src = rel + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: f.write(src) elif rel[:6] == 'cloud:': @@ -377,6 +452,9 @@ def configure_installation_source(rel): 'liberty': 'trusty-updates/liberty', 'liberty/updates': 'trusty-updates/liberty', 'liberty/proposed': 'trusty-proposed/liberty', + 'mitaka': 'trusty-updates/mitaka', + 'mitaka/updates': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', } try: @@ -444,11 +522,16 @@ def openstack_upgrade_available(package): cur_vers = get_os_version_package(package) if "swift" in package: codename = get_os_codename_install_source(src) - available_vers = get_os_version_codename(codename, SWIFT_CODENAMES) + avail_vers = get_os_version_codename_swift(codename) else: - available_vers = get_os_version_install_source(src) + avail_vers = get_os_version_install_source(src) apt.init() - return apt.version_compare(available_vers, cur_vers) == 1 + if "swift" in package: + major_cur_vers = cur_vers.split('.', 1)[0] + major_avail_vers = avail_vers.split('.', 1)[0] + major_diff = apt.version_compare(major_avail_vers, major_cur_vers) + return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0) + return apt.version_compare(avail_vers, cur_vers) == 1 def ensure_block_device(block_device): @@ -577,7 +660,7 @@ def _git_yaml_load(projects_yaml): return yaml.load(projects_yaml) -def git_clone_and_install(projects_yaml, core_project, depth=1): +def git_clone_and_install(projects_yaml, core_project): """ Clone/install all specified OpenStack repositories. @@ -627,6 +710,9 @@ def git_clone_and_install(projects_yaml, core_project, depth=1): for p in projects['repositories']: repo = p['repository'] branch = p['branch'] + depth = '1' + if 'depth' in p.keys(): + depth = p['depth'] if p['name'] == 'requirements': repo_dir = _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, @@ -671,19 +757,13 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, """ Clone and install a single git repository. """ - dest_dir = os.path.join(parent_dir, os.path.basename(repo)) - if not os.path.exists(parent_dir): juju_log('Directory already exists at {}. ' 'No need to create directory.'.format(parent_dir)) os.mkdir(parent_dir) - if not os.path.exists(dest_dir): - juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) - repo_dir = install_remote(repo, dest=parent_dir, branch=branch, - depth=depth) - else: - repo_dir = dest_dir + juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) + repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth) venv = os.path.join(parent_dir, 'venv') @@ -782,13 +862,23 @@ def wrapped_f(*args, **kwargs): return wrap -def set_os_workload_status(configs, required_interfaces, charm_func=None): +def set_os_workload_status(configs, required_interfaces, charm_func=None, services=None, ports=None): """ Set workload status based on complete contexts. status-set missing or incomplete contexts and juju-log details of missing required data. charm_func is a charm specific function to run checking for charm specific requirements such as a VIP setting. + + This function also checks for whether the services defined are ACTUALLY + running and that the ports they advertise are open and being listened to. + + @param services - OPTIONAL: a [{'service': , 'ports': []] + The ports are optional. + If services is a [] then ports are ignored. + @param ports - OPTIONAL: an [] representing ports that shoudl be + open. + @returns None """ incomplete_rel_data = incomplete_relation_data(configs, required_interfaces) state = 'active' @@ -867,6 +957,65 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None): else: message = charm_message + # If the charm thinks the unit is active, check that the actual services + # really are active. + if services is not None and state == 'active': + # if we're passed the dict() then just grab the values as a list. + if isinstance(services, dict): + services = services.values() + # either extract the list of services from the dictionary, or if + # it is a simple string, use that. i.e. works with mixed lists. + _s = [] + for s in services: + if isinstance(s, dict) and 'service' in s: + _s.append(s['service']) + if isinstance(s, str): + _s.append(s) + services_running = [service_running(s) for s in _s] + if not all(services_running): + not_running = [s for s, running in zip(_s, services_running) + if not running] + message = ("Services not running that should be: {}" + .format(", ".join(not_running))) + state = 'blocked' + # also verify that the ports that should be open are open + # NB, that ServiceManager objects only OPTIONALLY have ports + port_map = OrderedDict([(s['service'], s['ports']) + for s in services if 'ports' in s]) + if state == 'active' and port_map: + all_ports = list(itertools.chain(*port_map.values())) + ports_open = [port_has_listener('0.0.0.0', p) + for p in all_ports] + if not all(ports_open): + not_opened = [p for p, opened in zip(all_ports, ports_open) + if not opened] + map_not_open = OrderedDict() + for service, ports in port_map.items(): + closed_ports = set(ports).intersection(not_opened) + if closed_ports: + map_not_open[service] = closed_ports + # find which service has missing ports. They are in service + # order which makes it a bit easier. + message = ( + "Services with ports not open that should be: {}" + .format( + ", ".join([ + "{}: [{}]".format( + service, + ", ".join([str(v) for v in ports])) + for service, ports in map_not_open.items()]))) + state = 'blocked' + + if ports is not None and state == 'active': + # and we can also check ports which we don't know the service for + ports_open = [port_has_listener('0.0.0.0', p) for p in ports] + if not all(ports_open): + message = ( + "Ports which should be open, but are not: {}" + .format(", ".join([str(p) for p, v in zip(ports, ports_open) + if not v]))) + state = 'blocked' + # Set to active if all requirements have been met if state == 'active': message = "Unit is ready" diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py index 10b32e33..a2411c37 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py @@ -19,20 +19,35 @@ import os import subprocess +import sys from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import charm_dir, log -try: - from pip import main as pip_execute -except ImportError: - apt_update() - apt_install('python-pip') - from pip import main as pip_execute - __author__ = "Jorge Niedbalski " +def pip_execute(*args, **kwargs): + """Overriden pip_execute() to stop sys.path being changed. + + The act of importing main from the pip module seems to cause add wheels + from the /usr/share/python-wheels which are installed by various tools. + This function ensures that sys.path remains the same after the call is + executed. + """ + try: + _path = sys.path + try: + from pip import main as _pip_execute + except ImportError: + apt_update() + apt_install('python-pip') + from pip import main as _pip_execute + _pip_execute(*args, **kwargs) + finally: + sys.path = _path + + def parse_options(given, available): """Given a set of options, check if available""" for key, value in sorted(given.items()): @@ -42,8 +57,12 @@ def parse_options(given, available): yield "--{0}={1}".format(key, value) -def pip_install_requirements(requirements, **options): - """Install a requirements file """ +def pip_install_requirements(requirements, constraints=None, **options): + """Install a requirements file. + + :param constraints: Path to pip constraints file. + http://pip.readthedocs.org/en/stable/user_guide/#constraints-files + """ command = ["install"] available_options = ('proxy', 'src', 'log', ) @@ -51,8 +70,13 @@ def pip_install_requirements(requirements, **options): command.append(option) command.append("-r {0}".format(requirements)) - log("Installing from file: {} with options: {}".format(requirements, - command)) + if constraints: + command.append("-c {0}".format(constraints)) + log("Installing from file: {} with constraints {} " + "and options: {}".format(requirements, constraints, command)) + else: + log("Installing from file: {} with options: {}".format(requirements, + command)) pip_execute(command) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1235389e..60ae52b8 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -23,10 +23,11 @@ # James Page # Adam Gandelman # +import bisect +import six import os import shutil -import six import json import time import uuid @@ -73,35 +74,372 @@ err to syslog = {use_syslog} clog to syslog = {use_syslog} """ +# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs) +powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608] -def install(): - """Basic Ceph client installation.""" - ceph_dir = "/etc/ceph" - if not os.path.exists(ceph_dir): - os.mkdir(ceph_dir) +def validator(value, valid_type, valid_range=None): + """ + Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + Example input: + validator(value=1, + valid_type=int, + valid_range=[0, 2]) + This says I'm testing value=1. It must be an int inclusive in [0,2] + + :param value: The value to validate + :param valid_type: The type that value should be. + :param valid_range: A range of values that value can assume. + :return: + """ + assert isinstance(value, valid_type), "{} is not a {}".format( + value, + valid_type) + if valid_range is not None: + assert isinstance(valid_range, list), \ + "valid_range must be a list, was given {}".format(valid_range) + # If we're dealing with strings + if valid_type is six.string_types: + assert value in valid_range, \ + "{} is not in the list {}".format(value, valid_range) + # Integer, float should have a min and max + else: + if len(valid_range) != 2: + raise ValueError( + "Invalid valid_range list of {} for {}. " + "List must be [min,max]".format(valid_range, value)) + assert value >= valid_range[0], \ + "{} is less than minimum allowed value of {}".format( + value, valid_range[0]) + assert value <= valid_range[1], \ + "{} is greater than maximum allowed value of {}".format( + value, valid_range[1]) + + +class PoolCreationError(Exception): + """ + A custom error to inform the caller that a pool creation failed. Provides an error message + """ + def __init__(self, message): + super(PoolCreationError, self).__init__(message) - apt_install('ceph-common', fatal=True) +class Pool(object): + """ + An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. + Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). + """ + def __init__(self, service, name): + self.service = service + self.name = name + + # Create the pool if it doesn't exist already + # To be implemented by subclasses + def create(self): + pass -def rbd_exists(service, pool, rbd_img): - """Check to see if a RADOS block device exists.""" + def add_cache_tier(self, cache_pool, mode): + """ + Adds a new cache tier to an existing pool. + :param cache_pool: six.string_types. The cache tier pool name to add. + :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] + :return: None + """ + # Check the input types and values + validator(value=cache_pool, valid_type=six.string_types) + validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) + + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) + + def remove_cache_tier(self, cache_pool): + """ + Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. + :param cache_pool: six.string_types. The cache tier pool name to remove. + :return: None + """ + # read-only is easy, writeback is much harder + mode = get_cache_mode(cache_pool) + if mode == 'readonly': + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + elif mode == 'writeback': + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) + # Flush the cache and wait for it to return + check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + def get_pgs(self, pool_size): + """ + :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for + erasure coded pools + :return: int. The number of pgs to use. + """ + validator(value=pool_size, valid_type=int) + osds = get_osds(self.service) + if not osds: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + return 200 + + # Calculate based on Ceph best practices + if osds < 5: + return 128 + elif 5 < osds < 10: + return 512 + elif 10 < osds < 50: + return 4096 + else: + estimate = (osds * 100) / pool_size + # Return the next nearest power of 2 + index = bisect.bisect_right(powers_of_two, estimate) + return powers_of_two[index] + + +class ReplicatedPool(Pool): + def __init__(self, service, name, replicas=2): + super(ReplicatedPool, self).__init__(service=service, name=name) + self.replicas = replicas + + def create(self): + if not pool_exists(self.service, self.name): + # Create it + pgs = self.get_pgs(self.replicas) + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)] + try: + check_call(cmd) + except CalledProcessError: + raise + + +# Default jerasure erasure coded pool +class ErasurePool(Pool): + def __init__(self, service, name, erasure_code_profile="default"): + super(ErasurePool, self).__init__(service=service, name=name) + self.erasure_code_profile = erasure_code_profile + + def create(self): + if not pool_exists(self.service, self.name): + # Try to find the erasure profile information so we can properly size the pgs + erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile) + + # Check for errors + if erasure_profile is None: + log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile), + level=ERROR) + raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile)) + if 'k' not in erasure_profile or 'm' not in erasure_profile: + # Error + log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile), + level=ERROR) + raise PoolCreationError( + message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile)) + + pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) + # Create it + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), + 'erasure', self.erasure_code_profile] + try: + check_call(cmd) + except CalledProcessError: + raise + + """Get an existing erasure code profile if it already exists. + Returns json formatted output""" + + +def get_erasure_profile(service, name): + """ + :param service: six.string_types. The Ceph user name to run the command under + :param name: + :return: + """ try: - out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]).decode('UTF-8') + out = check_output(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name, '--format=json']) + return json.loads(out) + except (CalledProcessError, OSError, ValueError): + return None + + +def pool_set(service, pool_name, key, value): + """ + Sets a value for a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param key: six.string_types + :param value: + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value] + try: + check_call(cmd) except CalledProcessError: - return False + raise - return rbd_img in out + +def snapshot_pool(service, pool_name, snapshot_name): + """ + Snapshots a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise -def create_rbd_image(service, pool, image, sizemb): - """Create a new RADOS block device.""" - cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, - '--pool', pool] +def remove_pool_snapshot(service, pool_name, snapshot_name): + """ + Remove a snapshot from a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +# max_bytes should be an int or long +def set_pool_quota(service, pool_name, max_bytes): + """ + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param max_bytes: int or long + :return: None. Can raise CalledProcessError + """ + # Set a byte quota on a RADOS pool in ceph. + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_quota(service, pool_name): + """ + Set a byte quota on a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', + data_chunks=2, coding_chunks=1, + locality=None, durability_estimator=None): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :param erasure_plugin_name: six.string_types + :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', + 'room', 'root', 'row']) + :param data_chunks: int + :param coding_chunks: int + :param locality: int + :param durability_estimator: int + :return: None. Can raise CalledProcessError + """ + # Ensure this failure_domain is allowed by Ceph + validator(failure_domain, six.string_types, + ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) + + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, + 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks), + 'ruleset_failure_domain=' + failure_domain] + if locality is not None and durability_estimator is not None: + raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + + # Add plugin specific information + if locality is not None: + # For local erasure codes + cmd.append('l=' + str(locality)) + if durability_estimator is not None: + # For Shec erasure codes + cmd.append('c=' + str(durability_estimator)) + + if erasure_profile_exists(service, profile_name): + cmd.append('--force') + + try: + check_call(cmd) + except CalledProcessError: + raise + + +def rename_pool(service, old_name, new_name): + """ + Rename a Ceph pool from old_name to new_name + :param service: six.string_types. The Ceph user name to run the command under + :param old_name: six.string_types + :param new_name: six.string_types + :return: None + """ + validator(value=old_name, valid_type=six.string_types) + validator(value=new_name, valid_type=six.string_types) + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] check_call(cmd) +def erasure_profile_exists(service, name): + """ + Check to see if an Erasure code profile already exists. + :param service: six.string_types. The Ceph user name to run the command under + :param name: six.string_types + :return: int or None + """ + validator(value=name, valid_type=six.string_types) + try: + check_call(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name]) + return True + except CalledProcessError: + return False + + +def get_cache_mode(service, pool_name): + """ + Find the current caching mode of the pool_name given. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: int or None + """ + validator(value=service, valid_type=six.string_types) + validator(value=pool_name, valid_type=six.string_types) + out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) + try: + osd_json = json.loads(out) + for pool in osd_json['pools']: + if pool['pool_name'] == pool_name: + return pool['cache_mode'] + return None + except ValueError: + raise + + def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: @@ -126,6 +464,33 @@ def get_osds(service): return None +def install(): + """Basic Ceph client installation.""" + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + """Check to see if a RADOS block device exists.""" + try: + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]).decode('UTF-8') + except CalledProcessError: + return False + + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] + check_call(cmd) + + def update_pool(client, pool, settings): cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] for k, v in six.iteritems(settings): @@ -414,6 +779,7 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ + def __init__(self, api_version=1, request_id=None): self.api_version = api_version if request_id: diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 454b52ae..2dd70bc9 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -492,7 +492,7 @@ def relation_types(): @cached def peer_relation_id(): - '''Get a peer relation id if a peer relation has been joined, else None.''' + '''Get the peers relation id if a peers relation has been joined, else None.''' md = metadata() section = md.get('peers') if section: @@ -517,12 +517,12 @@ def relation_to_interface(relation_name): def relation_to_role_and_interface(relation_name): """ Given the name of a relation, return the role and the name of the interface - that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. """ _metadata = metadata() - for role in ('provides', 'requires', 'peer'): + for role in ('provides', 'requires', 'peers'): interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') if interface: return role, interface @@ -534,7 +534,7 @@ def role_and_interface_to_relations(role, interface_name): """ Given a role and interface name, return a list of relation names for the current charm that use that interface under that role (where role is one - of ``provides``, ``requires``, or ``peer``). + of ``provides``, ``requires``, or ``peers``). :returns: A list of relation names. """ @@ -555,7 +555,7 @@ def interface_to_relations(interface_name): :returns: A list of relation names. """ results = [] - for role in ('provides', 'requires', 'peer'): + for role in ('provides', 'requires', 'peers'): results.extend(role_and_interface_to_relations(role, interface_name)) return results @@ -637,7 +637,7 @@ def unit_private_ip(): @cached -def storage_get(attribute="", storage_id=""): +def storage_get(attribute=None, storage_id=None): """Get storage attributes""" _args = ['storage-get', '--format=json'] if storage_id: @@ -651,7 +651,7 @@ def storage_get(attribute="", storage_id=""): @cached -def storage_list(storage_name=""): +def storage_list(storage_name=None): """List the storage IDs for the unit""" _args = ['storage-list', '--format=json'] if storage_name: @@ -878,6 +878,40 @@ def leader_set(settings=None, **kwargs): subprocess.check_call(cmd) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_register(ptype, klass, pid): + """ is used while a hook is running to let Juju know that a + payload has been started.""" + cmd = ['payload-register'] + for x in [ptype, klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_unregister(klass, pid): + """ is used while a hook is running to let Juju know + that a payload has been manually stopped. The and provided + must match a payload that has been previously registered with juju using + payload-register.""" + cmd = ['payload-unregister'] + for x in [klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_status_set(klass, pid, status): + """is used to update the current status of a registered payload. + The and provided must match a payload that has been previously + registered with juju using payload-register. The must be one of the + follow: starting, started, stopping, stopped""" + cmd = ['payload-status-set'] + for x in [klass, pid, status]: + cmd.append(x) + subprocess.check_call(cmd) + + @cached def juju_version(): """Full version string (eg. '1.23.3.1-trusty-amd64')""" diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 579871bc..a7720906 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -72,7 +72,9 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): stopped = service_stop(service_name) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if os.path.exists(upstart_file): + if init_is_systemd(): + service('disable', service_name) + elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) with open(override_path, 'w') as fh: @@ -80,9 +82,9 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): elif os.path.exists(sysv_file): subprocess.check_call(["update-rc.d", service_name, "disable"]) else: - # XXX: Support SystemD too raise ValueError( - "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( service_name, upstart_file, sysv_file)) return stopped @@ -94,7 +96,9 @@ def service_resume(service_name, init_dir="/etc/init", Reenable starting again at boot. Start the service""" upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if os.path.exists(upstart_file): + if init_is_systemd(): + service('enable', service_name) + elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) if os.path.exists(override_path): @@ -102,9 +106,9 @@ def service_resume(service_name, init_dir="/etc/init", elif os.path.exists(sysv_file): subprocess.check_call(["update-rc.d", service_name, "enable"]) else: - # XXX: Support SystemD too raise ValueError( - "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( service_name, upstart_file, sysv_file)) started = service_running(service_name) @@ -115,23 +119,30 @@ def service_resume(service_name, init_dir="/etc/init", def service(action, service_name): """Control a system service""" - cmd = ['service', service_name, action] + if init_is_systemd(): + cmd = ['systemctl', action, service_name] + else: + cmd = ['service', service_name, action] return subprocess.call(cmd) == 0 -def service_running(service): +def service_running(service_name): """Determine whether a system service is running""" - try: - output = subprocess.check_output( - ['service', service, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False + if init_is_systemd(): + return service('is-active', service_name) else: - if ("start/running" in output or "is running" in output): - return True - else: + try: + output = subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: return False + else: + if ("start/running" in output or "is running" in output or + "up and running" in output): + return True + else: + return False def service_available(service_name): @@ -146,8 +157,29 @@ def service_available(service_name): return True -def adduser(username, password=None, shell='/bin/bash', system_user=False): - """Add a user to the system""" +SYSTEMD_SYSTEM = '/run/systemd/system' + + +def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" + return os.path.isdir(SYSTEMD_SYSTEM) + + +def adduser(username, password=None, shell='/bin/bash', system_user=False, + primary_group=None, secondary_groups=None): + """Add a user to the system. + + Will log but otherwise succeed if the user already exists. + + :param str username: Username to create + :param str password: Password for user; if ``None``, create a system user + :param str shell: The default shell for the user + :param bool system_user: Whether to create a login or system user + :param str primary_group: Primary group for user; defaults to username + :param list secondary_groups: Optional list of additional groups + + :returns: The password database entry struct, as returned by `pwd.getpwnam` + """ try: user_info = pwd.getpwnam(username) log('user {0} already exists!'.format(username)) @@ -162,6 +194,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): '--shell', shell, '--password', password, ]) + if not primary_group: + try: + grp.getgrnam(username) + primary_group = username # avoid "group exists" error + except KeyError: + pass + if primary_group: + cmd.extend(['-g', primary_group]) + if secondary_groups: + cmd.extend(['-G', ','.join(secondary_groups)]) cmd.append(username) subprocess.check_call(cmd) user_info = pwd.getpwnam(username) @@ -259,14 +301,12 @@ def write_file(path, content, owner='root', group='root', perms=0o444): def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab - """ + """Remove the given mountpoint entry from /etc/fstab""" return Fstab.remove_by_mountpoint(mp) def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file - """ + """Adds the given device entry to the /etc/fstab file""" return Fstab.add(dev, mp, fs, options=options) @@ -322,8 +362,7 @@ def fstab_mount(mountpoint): def file_hash(path, hash_type='md5'): - """ - Generate a hash checksum of the contents of 'path' or None if not found. + """Generate a hash checksum of the contents of 'path' or None if not found. :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. @@ -338,10 +377,9 @@ def file_hash(path, hash_type='md5'): def path_hash(path): - """ - Generate a hash checksum of all files matching 'path'. Standard wildcards - like '*' and '?' are supported, see documentation for the 'glob' module for - more information. + """Generate a hash checksum of all files matching 'path'. Standard + wildcards like '*' and '?' are supported, see documentation for the 'glob' + module for more information. :return: dict: A { filename: hash } dictionary for all matched files. Empty if none found. @@ -353,8 +391,7 @@ def path_hash(path): def check_hash(path, checksum, hash_type='md5'): - """ - Validate a file using a cryptographic checksum. + """Validate a file using a cryptographic checksum. :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. @@ -369,6 +406,7 @@ def check_hash(path, checksum, hash_type='md5'): class ChecksumError(ValueError): + """A class derived from Value error to indicate the checksum failed.""" pass @@ -474,7 +512,7 @@ def get_bond_master(interface): def list_nics(nic_type=None): - '''Return a list of nics of given type(s)''' + """Return a list of nics of given type(s)""" if isinstance(nic_type, six.string_types): int_types = [nic_type] else: @@ -516,12 +554,13 @@ def list_nics(nic_type=None): def set_nic_mtu(nic, mtu): - '''Set MTU on a network interface''' + """Set the Maximum Transmission Unit (MTU) on a network interface.""" cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] subprocess.check_call(cmd) def get_nic_mtu(nic): + """Return the Maximum Transmission Unit (MTU) for a network interface.""" cmd = ['ip', 'addr', 'show', nic] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" @@ -533,6 +572,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): + """Return the Media Access Control (MAC) for a network interface.""" cmd = ['ip', '-o', '-0', 'addr', 'show', nic] ip_output = subprocess.check_output(cmd).decode('UTF-8') hwaddr = "" @@ -543,7 +583,7 @@ def get_nic_hwaddr(nic): def cmp_pkgrevno(package, revno, pkgcache=None): - '''Compare supplied revno with the revno of the installed package + """Compare supplied revno with the revno of the installed package * 1 => Installed revno is greater than supplied arg * 0 => Installed revno is the same as supplied arg @@ -552,7 +592,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None): This function imports apt_cache function from charmhelpers.fetch if the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. - ''' + """ import apt_pkg if not pkgcache: from charmhelpers.fetch import apt_cache @@ -562,19 +602,27 @@ def cmp_pkgrevno(package, revno, pkgcache=None): @contextmanager -def chdir(d): +def chdir(directory): + """Change the current working directory to a different directory for a code + block and return the previous directory after the block exits. Useful to + run commands from a specificed directory. + + :param str directory: The directory path to change to for this context. + """ cur = os.getcwd() try: - yield os.chdir(d) + yield os.chdir(directory) finally: os.chdir(cur) def chownr(path, owner, group, follow_links=True, chowntopdir=False): - """ - Recursively change user and group ownership of files and directories + """Recursively change user and group ownership of files and directories in given path. Doesn't chown path itself by default, only its children. + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. :param bool follow_links: Also Chown links if True :param bool chowntopdir: Also chown path itself if True """ @@ -598,15 +646,23 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): def lchownr(path, owner, group): + """Recursively change user and group ownership of files and directories + in a given path, not following symbolic links. See the documentation for + 'os.lchown' for more information. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + """ chownr(path, owner, group, follow_links=False) def get_total_ram(): - '''The total amount of system RAM in bytes. + """The total amount of system RAM in bytes. This is what is reported by the OS, and may be overcommitted when there are multiple containers hosted on the same machine. - ''' + """ with open('/proc/meminfo', 'r') as f: for line in f.readlines(): if line: diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py index 12d768e6..24237042 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py @@ -243,13 +243,15 @@ class TemplateCallback(ManagerCallback): :param str source: The template source file, relative to `$CHARM_DIR/templates` - :param str target: The target to write the rendered template to + :param str target: The target to write the rendered template to (or None) :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file :param partial on_change_action: functools partial to be executed when rendered file changes :param jinja2 loader template_loader: A jinja2 template loader + + :return str: The rendered template """ def __init__(self, source, target, owner='root', group='root', perms=0o444, @@ -267,12 +269,14 @@ def __call__(self, manager, service_name, event_name): if self.on_change_action and os.path.isfile(self.target): pre_checksum = host.file_hash(self.target) service = manager.get_service(service_name) - context = {} + context = {'ctx': {}} for ctx in service.get('required_data', []): context.update(ctx) - templating.render(self.source, self.target, context, - self.owner, self.group, self.perms, - template_loader=self.template_loader) + context['ctx'].update(ctx) + + result = templating.render(self.source, self.target, context, + self.owner, self.group, self.perms, + template_loader=self.template_loader) if self.on_change_action: if pre_checksum == host.file_hash(self.target): hookenv.log( @@ -281,6 +285,8 @@ def __call__(self, manager, service_name, event_name): else: self.on_change_action() + return result + # Convenience aliases for templates render_template = template = TemplateCallback diff --git a/ceph-radosgw/hooks/charmhelpers/core/templating.py b/ceph-radosgw/hooks/charmhelpers/core/templating.py index 239719d4..d2d8eafe 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/core/templating.py @@ -27,7 +27,8 @@ def render(source, target, context, owner='root', group='root', The `source` path, if not absolute, is relative to the `templates_dir`. - The `target` path should be absolute. + The `target` path should be absolute. It can also be `None`, in which + case no file will be written. The context should be a dict containing the values to be replaced in the template. @@ -36,6 +37,9 @@ def render(source, target, context, owner='root', group='root', If omitted, `templates_dir` defaults to the `templates` folder in the charm. + The rendered template will be written to the file as well as being returned + as a string. + Note: Using this requires python-jinja2; if it is not installed, calling this will attempt to use charmhelpers.fetch.apt_install to install it. """ @@ -67,9 +71,11 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - target_dir = os.path.dirname(target) - if not os.path.exists(target_dir): - # This is a terrible default directory permission, as the file - # or its siblings will often contain secrets. - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) - host.write_file(target, content.encode(encoding), owner, group, perms) + if target is not None: + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) + return content diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 5f831c35..db0d86a2 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -98,6 +98,14 @@ 'liberty/proposed': 'trusty-proposed/liberty', 'trusty-liberty/proposed': 'trusty-proposed/liberty', 'trusty-proposed/liberty': 'trusty-proposed/liberty', + # Mitaka + 'mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka/updates': 'trusty-updates/mitaka', + 'trusty-updates/mitaka': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', } # The order of this list is very important. Handlers should be listed in from @@ -411,7 +419,7 @@ def plugins(fetch_handlers=None): importlib.import_module(package), classname) plugin_list.append(handler_class()) - except (ImportError, AttributeError): + except NotImplementedError: # Skip missing plugins so that they can be ommitted from # installation if desired log("FetchHandler {} not found, skipping plugin".format( diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py index efd7f9f0..b8e0943d 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py @@ -108,7 +108,7 @@ def download(self, source, dest): install_opener(opener) response = urlopen(source) try: - with open(dest, 'w') as dest_file: + with open(dest, 'wb') as dest_file: dest_file.write(response.read()) except Exception as e: if os.path.isfile(dest): diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py index 3531315a..cafd27f7 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py @@ -15,60 +15,50 @@ # along with charm-helpers. If not, see . import os +from subprocess import check_call from charmhelpers.fetch import ( BaseFetchHandler, - UnhandledSource + UnhandledSource, + filter_installed_packages, + apt_install, ) from charmhelpers.core.host import mkdir -import six -if six.PY3: - raise ImportError('bzrlib does not support Python3') -try: - from bzrlib.branch import Branch - from bzrlib import bzrdir, workingtree, errors -except ImportError: - from charmhelpers.fetch import apt_install - apt_install("python-bzrlib") - from bzrlib.branch import Branch - from bzrlib import bzrdir, workingtree, errors +if filter_installed_packages(['bzr']) != []: + apt_install(['bzr']) + if filter_installed_packages(['bzr']) != []: + raise NotImplementedError('Unable to install bzr') class BzrUrlFetchHandler(BaseFetchHandler): """Handler for bazaar branches via generic and lp URLs""" def can_handle(self, source): url_parts = self.parse_url(source) - if url_parts.scheme not in ('bzr+ssh', 'lp'): + if url_parts.scheme not in ('bzr+ssh', 'lp', ''): return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.bzr')) else: return True def branch(self, source, dest): - url_parts = self.parse_url(source) - # If we use lp:branchname scheme we need to load plugins if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - if url_parts.scheme == "lp": - from bzrlib.plugin import load_plugins - load_plugins() - try: - local_branch = bzrdir.BzrDir.create_branch_convenience(dest) - except errors.AlreadyControlDirError: - local_branch = Branch.open(dest) - try: - remote_branch = Branch.open(source) - remote_branch.push(local_branch) - tree = workingtree.WorkingTree.open(dest) - tree.update() - except Exception as e: - raise e + if os.path.exists(dest): + check_call(['bzr', 'pull', '--overwrite', '-d', dest, source]) + else: + check_call(['bzr', 'branch', source, dest]) - def install(self, source): + def install(self, source, dest=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py index f023b26d..65ed5319 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py @@ -15,24 +15,18 @@ # along with charm-helpers. If not, see . import os +from subprocess import check_call, CalledProcessError from charmhelpers.fetch import ( BaseFetchHandler, - UnhandledSource + UnhandledSource, + filter_installed_packages, + apt_install, ) -from charmhelpers.core.host import mkdir -import six -if six.PY3: - raise ImportError('GitPython does not support Python 3') - -try: - from git import Repo -except ImportError: - from charmhelpers.fetch import apt_install - apt_install("python-git") - from git import Repo - -from git.exc import GitCommandError # noqa E402 +if filter_installed_packages(['git']) != []: + apt_install(['git']) + if filter_installed_packages(['git']) != []: + raise NotImplementedError('Unable to install git') class GitUrlFetchHandler(BaseFetchHandler): @@ -40,19 +34,24 @@ class GitUrlFetchHandler(BaseFetchHandler): def can_handle(self, source): url_parts = self.parse_url(source) # TODO (mattyw) no support for ssh git@ yet - if url_parts.scheme not in ('http', 'https', 'git'): + if url_parts.scheme not in ('http', 'https', 'git', ''): return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.git')) else: return True - def clone(self, source, dest, branch, depth=None): + def clone(self, source, dest, branch="master", depth=None): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - if depth: - Repo.clone_from(source, dest, branch=branch, depth=depth) + if os.path.exists(dest): + cmd = ['git', '-C', dest, 'pull', source, branch] else: - Repo.clone_from(source, dest, branch=branch) + cmd = ['git', 'clone', source, dest, '--branch', branch] + if depth: + cmd.extend(['--depth', depth]) + check_call(cmd) def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) @@ -62,11 +61,9 @@ def install(self, source, branch="master", dest=None, depth=None): else: dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch, depth) - except GitCommandError as e: + except CalledProcessError as e: raise UnhandledSource(e) except OSError as e: raise UnhandledSource(e.strerror) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 0506491b..d2ede320 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -121,11 +121,12 @@ def _add_services(self, this_service, other_services): # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw'] + 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', - 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', + 'cinder-backup'] if self.openstack: for svc in services: @@ -225,7 +226,8 @@ def _get_openstack_release(self): self.precise_havana, self.precise_icehouse, self.trusty_icehouse, self.trusty_juno, self.utopic_juno, self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, - self.wily_liberty) = range(12) + self.wily_liberty, self.trusty_mitaka, + self.xenial_mitaka) = range(14) releases = { ('precise', None): self.precise_essex, @@ -237,9 +239,11 @@ def _get_openstack_release(self): ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, ('utopic', None): self.utopic_juno, ('vivid', None): self.vivid_kilo, - ('wily', None): self.wily_liberty} + ('wily', None): self.wily_liberty, + ('xenial', None): self.xenial_mitaka} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -256,6 +260,7 @@ def _get_openstack_release_string(self): ('utopic', 'juno'), ('vivid', 'kilo'), ('wily', 'liberty'), + ('xenial', 'mitaka'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] From c531536f82770cba579ba7451613907a3383776b Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 17 Feb 2016 22:18:48 +0000 Subject: [PATCH 1020/2699] Tidy lint --- ceph-radosgw/tests/basic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index ed0e1c7d..71ff4996 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -247,7 +247,7 @@ def test_201_ceph_radosgw_relation(self): expected = { 'private-address': u.valid_ip, 'radosgw_key': u.not_null, - 'auth': 'none', + 'auth': 'none', 'ceph-public-address': u.valid_ip, 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' } From 9ebcc3dcbeade3344fac5058f6c7db33642bc876 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 17 Feb 2016 22:39:08 +0000 Subject: [PATCH 1021/2699] Update amulet test --- ceph-radosgw/tests/basic_deployment.py | 36 -------------------------- 1 file changed, 36 deletions(-) diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 71ff4996..907dc051 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -260,42 +260,6 @@ def test_201_ceph_radosgw_relation(self): message = u.relation_error('ceph to ceph-radosgw', ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_202_ceph1_ceph_radosgw_relation(self): - """Verify the ceph1 to ceph-radosgw relation data.""" - u.log.debug('Checking ceph1:radosgw ceph-radosgw:mon relation data...') - unit = self.ceph1_sentry - relation = ['radosgw', 'ceph-radosgw:mon'] - expected = { - 'private-address': u.valid_ip, - 'radosgw_key': u.not_null, - 'auth': 'none', - 'ceph-public-address': u.valid_ip, - 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('ceph1 to ceph-radosgw', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_203_ceph2_ceph_radosgw_relation(self): - """Verify the ceph2 to ceph-radosgw relation data.""" - u.log.debug('Checking ceph2:radosgw ceph-radosgw:mon relation data...') - unit = self.ceph2_sentry - relation = ['radosgw', 'ceph-radosgw:mon'] - expected = { - 'private-address': u.valid_ip, - 'radosgw_key': u.not_null, - 'auth': 'none', - 'ceph-public-address': u.valid_ip, - 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('ceph2 to ceph-radosgw', ret) - amulet.raise_status(amulet.FAIL, msg=message) - def test_204_ceph_radosgw_keystone_relation(self): """Verify the ceph-radosgw to keystone relation data.""" u.log.debug('Checking ceph-radosgw to keystone id service ' From 74539c33ff6c516fd70566695563313772e83d8b Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 17 Feb 2016 22:46:07 +0000 Subject: [PATCH 1022/2699] Fix unit test re: c-h sync --- ceph-radosgw/unit_tests/test_ceph_radosgw_context.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index a2501daf..7c049afe 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -87,6 +87,7 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, 'admin_tenant_name': 'ten', 'admin_token': 'ubuntutesting', 'admin_user': 'admin', + 'api_version': '2.0', 'auth_host': '127.0.0.5', 'auth_port': 5432, 'auth_protocol': 'http', From a3319932cb39c7f5f1ef9ab4c9c1ec508551673b Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 17 Feb 2016 22:52:17 +0000 Subject: [PATCH 1023/2699] Disable Xenial test re: swift api fail --- ceph-radosgw/tests/021-basic-xenial-mitaka | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 ceph-radosgw/tests/021-basic-xenial-mitaka diff --git a/ceph-radosgw/tests/021-basic-xenial-mitaka b/ceph-radosgw/tests/021-basic-xenial-mitaka old mode 100755 new mode 100644 From 0e82276603b4a86292868d3a0ac8c85ad5f8c6b7 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 18 Feb 2016 11:05:18 +0000 Subject: [PATCH 1024/2699] Resync actions bits from ceph --- ceph-proxy/README.md | 6 ++++++ ceph-proxy/actions.yaml | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/README.md b/ceph-proxy/README.md index 9235b3df..103e57f7 100644 --- a/ceph-proxy/README.md +++ b/ceph-proxy/README.md @@ -41,6 +41,12 @@ By default the ceph cluster will not bootstrap until 3 service units have been deployed and started; this is to ensure that a quorum is achieved prior to adding storage devices. +## Actions + +This charm supports pausing and resuming ceph's health functions on a cluster, for example when doing maintainance on a machine. to pause or resume, call: + +`juju action do --unit ceph-mon/0 pause-health` or `juju action do --unit ceph-mon/0 resume-health` + ## Scale Out Usage You can use the Ceph OSD and Ceph Radosgw charms: diff --git a/ceph-proxy/actions.yaml b/ceph-proxy/actions.yaml index 5f9310c5..9cb421a7 100644 --- a/ceph-proxy/actions.yaml +++ b/ceph-proxy/actions.yaml @@ -1,4 +1,4 @@ pause-health: - description: Pause ceph health operations + description: Pause ceph health operations across the entire ceph cluster resume-health: - description: Resume ceph health operations \ No newline at end of file + description: Resume ceph health operations across the entire ceph cluster From f01c0d0e7daf2141e233e7da07cf9c655dd7a08d Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 18 Feb 2016 11:05:18 +0000 Subject: [PATCH 1025/2699] Resync actions bits from ceph --- ceph-mon/README.md | 6 ++++++ ceph-mon/actions.yaml | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 9235b3df..103e57f7 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -41,6 +41,12 @@ By default the ceph cluster will not bootstrap until 3 service units have been deployed and started; this is to ensure that a quorum is achieved prior to adding storage devices. +## Actions + +This charm supports pausing and resuming ceph's health functions on a cluster, for example when doing maintainance on a machine. to pause or resume, call: + +`juju action do --unit ceph-mon/0 pause-health` or `juju action do --unit ceph-mon/0 resume-health` + ## Scale Out Usage You can use the Ceph OSD and Ceph Radosgw charms: diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 5f9310c5..9cb421a7 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -1,4 +1,4 @@ pause-health: - description: Pause ceph health operations + description: Pause ceph health operations across the entire ceph cluster resume-health: - description: Resume ceph health operations \ No newline at end of file + description: Resume ceph health operations across the entire ceph cluster From 9f89f1968f4d99002395b2663d803d2bfceaac65 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 18 Feb 2016 11:39:47 +0000 Subject: [PATCH 1026/2699] post-review fixes --- ceph-osd/hooks/utils.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 1823790a..f0c98df0 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -88,6 +88,11 @@ def get_host_ip(hostname=None): def get_networks(config_opt='ceph-public-network'): + """Get all configured networks from provided config option. + + If public network(s) are provided, go through them and return those for + which we have an address configured. + """ networks = config(config_opt) if networks: networks = networks.split() @@ -96,6 +101,16 @@ def get_networks(config_opt='ceph-public-network'): return [] +def get_public_addr(fallback=None): + """Get all configured public networks addresses. + + If public network(s) are provided, go through them and return the first + address we have configured on any of those networks. + """ + return get_address_in_network(config('ceph-public-network'), + fallback=fallback) + + def assert_charm_supports_ipv6(): """Check whether we are able to support charms ipv6.""" if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": From cb0e93f9b09293ed70b867b9976919a5b9cce549 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 18 Feb 2016 17:10:53 +0000 Subject: [PATCH 1027/2699] remove unused function --- ceph-osd/hooks/utils.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index f0c98df0..0071ecbd 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -101,16 +101,6 @@ def get_networks(config_opt='ceph-public-network'): return [] -def get_public_addr(fallback=None): - """Get all configured public networks addresses. - - If public network(s) are provided, go through them and return the first - address we have configured on any of those networks. - """ - return get_address_in_network(config('ceph-public-network'), - fallback=fallback) - - def assert_charm_supports_ipv6(): """Check whether we are able to support charms ipv6.""" if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": From 8589666cc1c419095f10490114b38e4143c64194 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 22 Feb 2016 15:20:53 -0500 Subject: [PATCH 1028/2699] fix amulet --- ceph-radosgw/tests/basic_deployment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 907dc051..9c47b49f 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -397,7 +397,6 @@ def test_306_nova_rbd_config(self): conf = '/etc/nova/nova.conf' expected = { 'libvirt': { - 'rbd_pool': 'nova', 'rbd_user': 'nova-compute', 'rbd_secret_uuid': u.not_null } From 1a796ae314e50184b881da6b8fa96eb628db1acb Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 22 Feb 2016 15:21:12 -0500 Subject: [PATCH 1029/2699] fix amulet --- ceph-osd/tests/basic_deployment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 61a4fe0a..42fc1b91 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -384,7 +384,6 @@ def test_306_nova_rbd_config(self): conf = '/etc/nova/nova.conf' expected = { 'libvirt': { - 'rbd_pool': 'nova', 'rbd_user': 'nova-compute', 'rbd_secret_uuid': u.not_null } From 9b54820a4fb803b5ca1975596b8e3fc5e1f6d7d5 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 22 Feb 2016 22:32:13 -0500 Subject: [PATCH 1030/2699] fix amulet --- ceph-proxy/tests/basic_deployment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index f6fd5418..4825031d 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -387,7 +387,6 @@ def test_306_nova_rbd_config(self): conf = '/etc/nova/nova.conf' expected = { 'libvirt': { - 'rbd_pool': 'nova', 'rbd_user': 'nova-compute', 'rbd_secret_uuid': u.not_null } From 2c7ab38a7c8c37f26ff73598fcfd98291713f162 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 22 Feb 2016 22:32:13 -0500 Subject: [PATCH 1031/2699] fix amulet --- ceph-mon/tests/basic_deployment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index f6fd5418..4825031d 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -387,7 +387,6 @@ def test_306_nova_rbd_config(self): conf = '/etc/nova/nova.conf' expected = { 'libvirt': { - 'rbd_pool': 'nova', 'rbd_user': 'nova-compute', 'rbd_secret_uuid': u.not_null } From b7058e68f1bb97ad4fa8f344f60ba9c3eee20754 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Feb 2016 15:43:27 -0500 Subject: [PATCH 1032/2699] [hopem,r=] Support multiple l3 segments. Closes-Bug: 1523871 --- ceph-proxy/hooks/ceph_hooks.py | 27 ++++++++++++++++------- ceph-proxy/hooks/utils.py | 39 ++++++++++++++++++++++++++++++---- 2 files changed, 54 insertions(+), 12 deletions(-) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 2a0d1682..cdf4c5b2 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -54,6 +54,7 @@ from charmhelpers.core.templating import render from utils import ( + get_networks, get_public_addr, assert_charm_supports_ipv6 ) @@ -88,6 +89,12 @@ def install(): def emit_cephconf(): + networks = get_networks('ceph-public-network') + public_network = ', '.join(networks) + + networks = get_networks('ceph-cluster-network') + cluster_network = ', '.join(networks) + cephcontext = { 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), @@ -95,16 +102,16 @@ def emit_cephconf(): 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), - 'ceph_public_network': config('ceph-public-network'), - 'ceph_cluster_network': config('ceph-cluster-network'), + 'ceph_public_network': public_network, + 'ceph_cluster_network': cluster_network, 'loglevel': config('loglevel'), } if config('prefer-ipv6'): dynamic_ipv6_address = get_ipv6_addr()[0] - if not config('ceph-public-network'): + if not public_network: cephcontext['public_addr'] = dynamic_ipv6_address - if not config('ceph-cluster-network'): + if not cluster_network: cephcontext['cluster_addr'] = dynamic_ipv6_address # Install ceph.conf as an alternative to support @@ -196,10 +203,11 @@ def get_peer_units(): @hooks.hook('mon-relation-joined') def mon_relation_joined(): + public_addr = get_public_addr() for relid in relation_ids('mon'): relation_set(relation_id=relid, relation_settings={'ceph-public-address': - get_public_addr()}) + public_addr}) @hooks.hook('mon-relation-departed', @@ -258,11 +266,12 @@ def upgrade_keys(): def osd_relation(relid=None): if ceph.is_quorum(): log('mon cluster in quorum - providing fsid & keys') + public_addr = get_public_addr() data = { 'fsid': leader_get('fsid'), 'osd_bootstrap_key': ceph.get_osd_bootstrap_key(), 'auth': config('auth-supported'), - 'ceph-public-address': get_public_addr(), + 'ceph-public-address': public_addr, } relation_set(relation_id=relid, relation_settings=data) @@ -288,11 +297,12 @@ def radosgw_relation(relid=None, unit=None): unit_id = unit.replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id log('mon cluster in quorum - providing radosgw with keys') + public_addr = get_public_addr() data = { 'fsid': leader_get('fsid'), 'radosgw_key': ceph.get_radosgw_key(), 'auth': config('auth-supported'), - 'ceph-public-address': get_public_addr(), + 'ceph-public-address': public_addr, unit_response_key: rsp, } relation_set(relation_id=relid, relation_settings=data) @@ -314,9 +324,10 @@ def client_relation_joined(relid=None): service_name = units[0].split('/')[0] if service_name is not None: + public_addr = get_public_addr() data = {'key': ceph.get_named_key(service_name), 'auth': config('auth-supported'), - 'ceph-public-address': get_public_addr()} + 'ceph-public-address': public_addr} relation_set(relation_id=relid, relation_settings=data) else: diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 5a196d47..fec7d9c1 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -23,8 +23,8 @@ lsb_release ) -from charmhelpers.contrib.network import ip from charmhelpers.contrib.network.ip import ( + get_address_in_network, get_ipv6_addr ) @@ -71,10 +71,41 @@ def get_host_ip(hostname=None): return answers[0].address -@cached +def get_networks(config_opt='ceph-public-network'): + """Get all configured networks from provided config option. + + If public network(s) are provided, go through them and return those for + which we have an address configured. + """ + networks = config(config_opt) + if networks: + networks = networks.split() + return [n for n in networks if get_address_in_network(n)] + + return [] + + def get_public_addr(): - return ip.get_address_in_network(config('ceph-public-network'), - fallback=get_host_ip()) + return get_network_addrs('ceph-public-network', fallback=get_host_ip())[0] + + +def get_network_addrs(config_opt, fallback=None): + """Get all configured public networks addresses. + + If public network(s) are provided, go through them and return the + addresses we have configured on any of those networks. + """ + addrs = [] + networks = config(config_opt) + if networks: + networks = networks.split() + addrs = [get_address_in_network(n) for n in networks] + addrs = [a for a in addrs if a] + + if not addrs and fallback: + return [fallback] + + return addrs def assert_charm_supports_ipv6(): From 72c73391ea62bbfe6828cae63a27dfc12a0b5a30 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Feb 2016 15:43:27 -0500 Subject: [PATCH 1033/2699] [hopem,r=] Support multiple l3 segments. Closes-Bug: 1523871 --- ceph-mon/hooks/ceph_hooks.py | 27 +++++++++++++++++-------- ceph-mon/hooks/utils.py | 39 ++++++++++++++++++++++++++++++++---- 2 files changed, 54 insertions(+), 12 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 2a0d1682..cdf4c5b2 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -54,6 +54,7 @@ from charmhelpers.core.templating import render from utils import ( + get_networks, get_public_addr, assert_charm_supports_ipv6 ) @@ -88,6 +89,12 @@ def install(): def emit_cephconf(): + networks = get_networks('ceph-public-network') + public_network = ', '.join(networks) + + networks = get_networks('ceph-cluster-network') + cluster_network = ', '.join(networks) + cephcontext = { 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), @@ -95,16 +102,16 @@ def emit_cephconf(): 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), - 'ceph_public_network': config('ceph-public-network'), - 'ceph_cluster_network': config('ceph-cluster-network'), + 'ceph_public_network': public_network, + 'ceph_cluster_network': cluster_network, 'loglevel': config('loglevel'), } if config('prefer-ipv6'): dynamic_ipv6_address = get_ipv6_addr()[0] - if not config('ceph-public-network'): + if not public_network: cephcontext['public_addr'] = dynamic_ipv6_address - if not config('ceph-cluster-network'): + if not cluster_network: cephcontext['cluster_addr'] = dynamic_ipv6_address # Install ceph.conf as an alternative to support @@ -196,10 +203,11 @@ def get_peer_units(): @hooks.hook('mon-relation-joined') def mon_relation_joined(): + public_addr = get_public_addr() for relid in relation_ids('mon'): relation_set(relation_id=relid, relation_settings={'ceph-public-address': - get_public_addr()}) + public_addr}) @hooks.hook('mon-relation-departed', @@ -258,11 +266,12 @@ def upgrade_keys(): def osd_relation(relid=None): if ceph.is_quorum(): log('mon cluster in quorum - providing fsid & keys') + public_addr = get_public_addr() data = { 'fsid': leader_get('fsid'), 'osd_bootstrap_key': ceph.get_osd_bootstrap_key(), 'auth': config('auth-supported'), - 'ceph-public-address': get_public_addr(), + 'ceph-public-address': public_addr, } relation_set(relation_id=relid, relation_settings=data) @@ -288,11 +297,12 @@ def radosgw_relation(relid=None, unit=None): unit_id = unit.replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id log('mon cluster in quorum - providing radosgw with keys') + public_addr = get_public_addr() data = { 'fsid': leader_get('fsid'), 'radosgw_key': ceph.get_radosgw_key(), 'auth': config('auth-supported'), - 'ceph-public-address': get_public_addr(), + 'ceph-public-address': public_addr, unit_response_key: rsp, } relation_set(relation_id=relid, relation_settings=data) @@ -314,9 +324,10 @@ def client_relation_joined(relid=None): service_name = units[0].split('/')[0] if service_name is not None: + public_addr = get_public_addr() data = {'key': ceph.get_named_key(service_name), 'auth': config('auth-supported'), - 'ceph-public-address': get_public_addr()} + 'ceph-public-address': public_addr} relation_set(relation_id=relid, relation_settings=data) else: diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 5a196d47..fec7d9c1 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -23,8 +23,8 @@ lsb_release ) -from charmhelpers.contrib.network import ip from charmhelpers.contrib.network.ip import ( + get_address_in_network, get_ipv6_addr ) @@ -71,10 +71,41 @@ def get_host_ip(hostname=None): return answers[0].address -@cached +def get_networks(config_opt='ceph-public-network'): + """Get all configured networks from provided config option. + + If public network(s) are provided, go through them and return those for + which we have an address configured. + """ + networks = config(config_opt) + if networks: + networks = networks.split() + return [n for n in networks if get_address_in_network(n)] + + return [] + + def get_public_addr(): - return ip.get_address_in_network(config('ceph-public-network'), - fallback=get_host_ip()) + return get_network_addrs('ceph-public-network', fallback=get_host_ip())[0] + + +def get_network_addrs(config_opt, fallback=None): + """Get all configured public networks addresses. + + If public network(s) are provided, go through them and return the + addresses we have configured on any of those networks. + """ + addrs = [] + networks = config(config_opt) + if networks: + networks = networks.split() + addrs = [get_address_in_network(n) for n in networks] + addrs = [a for a in addrs if a] + + if not addrs and fallback: + return [fallback] + + return addrs def assert_charm_supports_ipv6(): From 0db2f5df7d0914fc70f9154794d60b10803f70d2 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Feb 2016 15:56:04 -0500 Subject: [PATCH 1034/2699] only use fallback for get_public_addr() if networks not provided in config --- ceph-proxy/hooks/utils.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index fec7d9c1..9b42159a 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -12,7 +12,8 @@ from charmhelpers.core.hookenv import ( unit_get, cached, - config + config, + status_set, ) from charmhelpers.fetch import ( apt_install, @@ -86,10 +87,10 @@ def get_networks(config_opt='ceph-public-network'): def get_public_addr(): - return get_network_addrs('ceph-public-network', fallback=get_host_ip())[0] + return get_network_addrs('ceph-public-network')[0] -def get_network_addrs(config_opt, fallback=None): +def get_network_addrs(config_opt): """Get all configured public networks addresses. If public network(s) are provided, go through them and return the @@ -102,8 +103,14 @@ def get_network_addrs(config_opt, fallback=None): addrs = [get_address_in_network(n) for n in networks] addrs = [a for a in addrs if a] - if not addrs and fallback: - return [fallback] + if not addrs: + if networks: + msg = ("Could not find an address on any of '%s' - resolve this " + "error to retry" % (networks)) + status_set('blocked', msg) + raise Exception(msg) + else: + return [get_host_ip()] return addrs From 0feb05732399246cf1583441da5a7df80a292ee0 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Feb 2016 15:56:04 -0500 Subject: [PATCH 1035/2699] only use fallback for get_public_addr() if networks not provided in config --- ceph-mon/hooks/utils.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index fec7d9c1..9b42159a 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -12,7 +12,8 @@ from charmhelpers.core.hookenv import ( unit_get, cached, - config + config, + status_set, ) from charmhelpers.fetch import ( apt_install, @@ -86,10 +87,10 @@ def get_networks(config_opt='ceph-public-network'): def get_public_addr(): - return get_network_addrs('ceph-public-network', fallback=get_host_ip())[0] + return get_network_addrs('ceph-public-network')[0] -def get_network_addrs(config_opt, fallback=None): +def get_network_addrs(config_opt): """Get all configured public networks addresses. If public network(s) are provided, go through them and return the @@ -102,8 +103,14 @@ def get_network_addrs(config_opt, fallback=None): addrs = [get_address_in_network(n) for n in networks] addrs = [a for a in addrs if a] - if not addrs and fallback: - return [fallback] + if not addrs: + if networks: + msg = ("Could not find an address on any of '%s' - resolve this " + "error to retry" % (networks)) + status_set('blocked', msg) + raise Exception(msg) + else: + return [get_host_ip()] return addrs From 81367cf1b174b96719aeb9b203bbc8a958ff2909 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Feb 2016 15:59:01 -0500 Subject: [PATCH 1036/2699] update config.yaml --- ceph-proxy/config.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 6a5f8c3d..30abb8a6 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -70,12 +70,18 @@ options: description: | The IP address and netmask of the public (front-side) network (e.g., 192.168.0.0/24) + . + If multiple networks are to be used, a space-delimited list of a.b.c.d/x + can be provided. ceph-cluster-network: type: string default: description: | The IP address and netmask of the cluster (back-side) network (e.g., 192.168.0.0/24) + . + If multiple networks are to be used, a space-delimited list of a.b.c.d/x + can be provided. prefer-ipv6: type: boolean default: False From 9a99bdb467ae32cc3a72800ab1bdca75bf5e635e Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 24 Feb 2016 15:59:01 -0500 Subject: [PATCH 1037/2699] update config.yaml --- ceph-mon/config.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 6a5f8c3d..30abb8a6 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -70,12 +70,18 @@ options: description: | The IP address and netmask of the public (front-side) network (e.g., 192.168.0.0/24) + . + If multiple networks are to be used, a space-delimited list of a.b.c.d/x + can be provided. ceph-cluster-network: type: string default: description: | The IP address and netmask of the cluster (back-side) network (e.g., 192.168.0.0/24) + . + If multiple networks are to be used, a space-delimited list of a.b.c.d/x + can be provided. prefer-ipv6: type: boolean default: False From 60ae21d88f445ee1bfca2ad39d77f0bf7cba5233 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 24 Feb 2016 21:53:28 +0000 Subject: [PATCH 1038/2699] Add gitreview prior to migration to openstack --- ceph-osd/.gitreview | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 ceph-osd/.gitreview diff --git a/ceph-osd/.gitreview b/ceph-osd/.gitreview new file mode 100644 index 00000000..c365f65e --- /dev/null +++ b/ceph-osd/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=openstack/charm-ceph-osd.git From aa3783f8f65b74ffaeef96b9521319b56867776f Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 24 Feb 2016 21:53:28 +0000 Subject: [PATCH 1039/2699] Add gitreview prior to migration to openstack --- ceph-proxy/.gitreview | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 ceph-proxy/.gitreview diff --git a/ceph-proxy/.gitreview b/ceph-proxy/.gitreview new file mode 100644 index 00000000..f13dc9dd --- /dev/null +++ b/ceph-proxy/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=openstack/charm-ceph-mon.git From b352b6d198f0e4258e7607f0e7922d1331745c0f Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 24 Feb 2016 21:53:28 +0000 Subject: [PATCH 1040/2699] Add gitreview prior to migration to openstack --- ceph-mon/.gitreview | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 ceph-mon/.gitreview diff --git a/ceph-mon/.gitreview b/ceph-mon/.gitreview new file mode 100644 index 00000000..f13dc9dd --- /dev/null +++ b/ceph-mon/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=openstack/charm-ceph-mon.git From 0b33bc31791a4bf8efeba20cda926d3b4be1d040 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 24 Feb 2016 21:53:29 +0000 Subject: [PATCH 1041/2699] Add gitreview prior to migration to openstack --- ceph-radosgw/.gitreview | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 ceph-radosgw/.gitreview diff --git a/ceph-radosgw/.gitreview b/ceph-radosgw/.gitreview new file mode 100644 index 00000000..b6cf22ea --- /dev/null +++ b/ceph-radosgw/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=openstack/charm-ceph-radosgw.git From 5450ddd5e2559bafa1e1ea4a913b0915a6b7d590 Mon Sep 17 00:00:00 2001 From: uoscibot Date: Mon, 29 Feb 2016 10:45:55 +0000 Subject: [PATCH 1042/2699] Adapt imports and metadata for github move --- ceph-osd/{.bzrignore => .gitignore} | 1 + 1 file changed, 1 insertion(+) rename ceph-osd/{.bzrignore => .gitignore} (81%) diff --git a/ceph-osd/.bzrignore b/ceph-osd/.gitignore similarity index 81% rename from ceph-osd/.bzrignore rename to ceph-osd/.gitignore index ed5e46cf..d089d14d 100644 --- a/ceph-osd/.bzrignore +++ b/ceph-osd/.gitignore @@ -3,3 +3,4 @@ .tox .testrepository bin +*.sw[nop] From 0d062bad0144dbe950470ef2439aafd50d4ae38e Mon Sep 17 00:00:00 2001 From: uoscibot Date: Mon, 29 Feb 2016 10:46:04 +0000 Subject: [PATCH 1043/2699] Adapt imports and metadata for github move --- ceph-proxy/{.bzrignore => .gitignore} | 1 + 1 file changed, 1 insertion(+) rename ceph-proxy/{.bzrignore => .gitignore} (77%) diff --git a/ceph-proxy/.bzrignore b/ceph-proxy/.gitignore similarity index 77% rename from ceph-proxy/.bzrignore rename to ceph-proxy/.gitignore index 2f108ef8..d9f5c5f2 100644 --- a/ceph-proxy/.bzrignore +++ b/ceph-proxy/.gitignore @@ -2,3 +2,4 @@ bin .coverage .testrepository .tox +*.sw[nop] From fa3347bb462650691d4a861e2497f555fd2f4465 Mon Sep 17 00:00:00 2001 From: uoscibot Date: Mon, 29 Feb 2016 10:46:04 +0000 Subject: [PATCH 1044/2699] Adapt imports and metadata for github move --- ceph-mon/{.bzrignore => .gitignore} | 1 + 1 file changed, 1 insertion(+) rename ceph-mon/{.bzrignore => .gitignore} (77%) diff --git a/ceph-mon/.bzrignore b/ceph-mon/.gitignore similarity index 77% rename from ceph-mon/.bzrignore rename to ceph-mon/.gitignore index 2f108ef8..d9f5c5f2 100644 --- a/ceph-mon/.bzrignore +++ b/ceph-mon/.gitignore @@ -2,3 +2,4 @@ bin .coverage .testrepository .tox +*.sw[nop] From 68fb134f579770a3dca8925b43d7533891f979bf Mon Sep 17 00:00:00 2001 From: uoscibot Date: Mon, 29 Feb 2016 10:46:11 +0000 Subject: [PATCH 1045/2699] Adapt imports and metadata for github move --- ceph-radosgw/{.bzrignore => .gitignore} | 1 + 1 file changed, 1 insertion(+) rename ceph-radosgw/{.bzrignore => .gitignore} (80%) diff --git a/ceph-radosgw/.bzrignore b/ceph-radosgw/.gitignore similarity index 80% rename from ceph-radosgw/.bzrignore rename to ceph-radosgw/.gitignore index 9ad1b67f..a16ea07e 100644 --- a/ceph-radosgw/.bzrignore +++ b/ceph-radosgw/.gitignore @@ -3,3 +3,4 @@ bin .testrepository .tox tags +*.sw[nop] From c54e5a3d1013489f3241a3a25cc6c0628f7377f0 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 2 Mar 2016 10:27:34 +0000 Subject: [PATCH 1046/2699] Fix tox configuration Rename lint->pep8, ensure that sitepackages are used with newer versions of tox/virtualenv. Change-Id: Ia6b6e27692e581439e6e5a74ecaeb6b3a1e0742f --- ceph-proxy/tox.ini | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 4e328e48..d0c290fe 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = lint,py27 +envlist = pep8,py27 skipsdist = True [testenv] @@ -8,13 +8,14 @@ setenv = VIRTUAL_ENV={envdir} install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} +sitepackages = True [testenv:py27] basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -[testenv:lint] +[testenv:pep8] basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt From 121e5ba72ba36d2e4813f9ee459012f9c45f53e1 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 2 Mar 2016 10:27:34 +0000 Subject: [PATCH 1047/2699] Fix tox configuration Rename lint->pep8, ensure that sitepackages are used with newer versions of tox/virtualenv. Change-Id: Ia6b6e27692e581439e6e5a74ecaeb6b3a1e0742f --- ceph-mon/tox.ini | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 4e328e48..d0c290fe 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = lint,py27 +envlist = pep8,py27 skipsdist = True [testenv] @@ -8,13 +8,14 @@ setenv = VIRTUAL_ENV={envdir} install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} +sitepackages = True [testenv:py27] basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -[testenv:lint] +[testenv:pep8] basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt From 98279caedbca5e3d90812b589b0d656ddd8a28a5 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 2 Mar 2016 10:58:17 +0000 Subject: [PATCH 1048/2699] Resync charm-helpers Change-Id: I71a1b15a6a9ec4588dee2855a852ae6186902c25 --- ceph-proxy/.gitignore | 1 + .../hooks/charmhelpers/contrib/network/ip.py | 15 ++++++ .../contrib/storage/linux/ceph.py | 50 ++++++++++++++----- 3 files changed, 54 insertions(+), 12 deletions(-) diff --git a/ceph-proxy/.gitignore b/ceph-proxy/.gitignore index d9f5c5f2..f5295367 100644 --- a/ceph-proxy/.gitignore +++ b/ceph-proxy/.gitignore @@ -3,3 +3,4 @@ bin .testrepository .tox *.sw[nop] +*.pyc diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 998f00c1..4efe7993 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -456,3 +456,18 @@ def get_hostname(address, fqdn=True): return result else: return result.split('.')[0] + + +def port_has_listener(address, port): + """ + Returns True if the address:port is open and being listened to, + else False. + + @param address: an IP address or hostname + @param port: integer port + + Note calls 'zc' via a subprocess shell + """ + cmd = ['nc', '-z', address, str(port)] + result = subprocess.call(cmd) + return not(bool(result)) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 60ae52b8..fb1bee34 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -120,6 +120,7 @@ class PoolCreationError(Exception): """ A custom error to inform the caller that a pool creation failed. Provides an error message """ + def __init__(self, message): super(PoolCreationError, self).__init__(message) @@ -129,6 +130,7 @@ class Pool(object): An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). """ + def __init__(self, service, name): self.service = service self.name = name @@ -180,36 +182,41 @@ def get_pgs(self, pool_size): :return: int. The number of pgs to use. """ validator(value=pool_size, valid_type=int) - osds = get_osds(self.service) - if not osds: + osd_list = get_osds(self.service) + if not osd_list: # NOTE(james-page): Default to 200 for older ceph versions # which don't support OSD query from cli return 200 + osd_list_length = len(osd_list) # Calculate based on Ceph best practices - if osds < 5: + if osd_list_length < 5: return 128 - elif 5 < osds < 10: + elif 5 < osd_list_length < 10: return 512 - elif 10 < osds < 50: + elif 10 < osd_list_length < 50: return 4096 else: - estimate = (osds * 100) / pool_size + estimate = (osd_list_length * 100) / pool_size # Return the next nearest power of 2 index = bisect.bisect_right(powers_of_two, estimate) return powers_of_two[index] class ReplicatedPool(Pool): - def __init__(self, service, name, replicas=2): + def __init__(self, service, name, pg_num=None, replicas=2): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas + if pg_num is None: + self.pg_num = self.get_pgs(self.replicas) + else: + self.pg_num = pg_num def create(self): if not pool_exists(self.service, self.name): # Create it - pgs = self.get_pgs(self.replicas) - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)] + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num)] try: check_call(cmd) except CalledProcessError: @@ -241,7 +248,7 @@ def create(self): pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs), 'erasure', self.erasure_code_profile] try: check_call(cmd) @@ -322,7 +329,8 @@ def set_pool_quota(service, pool_name, max_bytes): :return: None. Can raise CalledProcessError """ # Set a byte quota on a RADOS pool in ceph. - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes] + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, + 'max_bytes', str(max_bytes)] try: check_call(cmd) except CalledProcessError: @@ -343,7 +351,25 @@ def remove_pool_quota(service, pool_name): raise -def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', +def remove_erasure_profile(service, profile_name): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', + profile_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', + failure_domain='host', data_chunks=2, coding_chunks=1, locality=None, durability_estimator=None): """ From ab463f44d11c1406773b06fed2af4a50c1f9dab9 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 2 Mar 2016 10:58:17 +0000 Subject: [PATCH 1049/2699] Resync charm-helpers Change-Id: I71a1b15a6a9ec4588dee2855a852ae6186902c25 --- ceph-mon/.gitignore | 1 + .../hooks/charmhelpers/contrib/network/ip.py | 15 ++++++ .../contrib/storage/linux/ceph.py | 50 ++++++++++++++----- 3 files changed, 54 insertions(+), 12 deletions(-) diff --git a/ceph-mon/.gitignore b/ceph-mon/.gitignore index d9f5c5f2..f5295367 100644 --- a/ceph-mon/.gitignore +++ b/ceph-mon/.gitignore @@ -3,3 +3,4 @@ bin .testrepository .tox *.sw[nop] +*.pyc diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 998f00c1..4efe7993 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -456,3 +456,18 @@ def get_hostname(address, fqdn=True): return result else: return result.split('.')[0] + + +def port_has_listener(address, port): + """ + Returns True if the address:port is open and being listened to, + else False. + + @param address: an IP address or hostname + @param port: integer port + + Note calls 'zc' via a subprocess shell + """ + cmd = ['nc', '-z', address, str(port)] + result = subprocess.call(cmd) + return not(bool(result)) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 60ae52b8..fb1bee34 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -120,6 +120,7 @@ class PoolCreationError(Exception): """ A custom error to inform the caller that a pool creation failed. Provides an error message """ + def __init__(self, message): super(PoolCreationError, self).__init__(message) @@ -129,6 +130,7 @@ class Pool(object): An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). """ + def __init__(self, service, name): self.service = service self.name = name @@ -180,36 +182,41 @@ def get_pgs(self, pool_size): :return: int. The number of pgs to use. """ validator(value=pool_size, valid_type=int) - osds = get_osds(self.service) - if not osds: + osd_list = get_osds(self.service) + if not osd_list: # NOTE(james-page): Default to 200 for older ceph versions # which don't support OSD query from cli return 200 + osd_list_length = len(osd_list) # Calculate based on Ceph best practices - if osds < 5: + if osd_list_length < 5: return 128 - elif 5 < osds < 10: + elif 5 < osd_list_length < 10: return 512 - elif 10 < osds < 50: + elif 10 < osd_list_length < 50: return 4096 else: - estimate = (osds * 100) / pool_size + estimate = (osd_list_length * 100) / pool_size # Return the next nearest power of 2 index = bisect.bisect_right(powers_of_two, estimate) return powers_of_two[index] class ReplicatedPool(Pool): - def __init__(self, service, name, replicas=2): + def __init__(self, service, name, pg_num=None, replicas=2): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas + if pg_num is None: + self.pg_num = self.get_pgs(self.replicas) + else: + self.pg_num = pg_num def create(self): if not pool_exists(self.service, self.name): # Create it - pgs = self.get_pgs(self.replicas) - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)] + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num)] try: check_call(cmd) except CalledProcessError: @@ -241,7 +248,7 @@ def create(self): pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs), 'erasure', self.erasure_code_profile] try: check_call(cmd) @@ -322,7 +329,8 @@ def set_pool_quota(service, pool_name, max_bytes): :return: None. Can raise CalledProcessError """ # Set a byte quota on a RADOS pool in ceph. - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes] + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, + 'max_bytes', str(max_bytes)] try: check_call(cmd) except CalledProcessError: @@ -343,7 +351,25 @@ def remove_pool_quota(service, pool_name): raise -def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', +def remove_erasure_profile(service, profile_name): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', + profile_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', + failure_domain='host', data_chunks=2, coding_chunks=1, locality=None, durability_estimator=None): """ From 97180679c99f153faff92bf61bf1d537577caf05 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 2 Mar 2016 10:58:54 +0000 Subject: [PATCH 1050/2699] Resync charm-helpers Change-Id: Ibbfcc6d2f0086ee9baf347ccfdf0344ed9c0fb82 --- ceph-osd/.gitignore | 1 + ceph-osd/hooks/charmhelpers/contrib/network/ip.py | 15 +++++++++++++++ .../contrib/openstack/amulet/deployment.py | 2 +- 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/ceph-osd/.gitignore b/ceph-osd/.gitignore index d089d14d..d1b248ee 100644 --- a/ceph-osd/.gitignore +++ b/ceph-osd/.gitignore @@ -4,3 +4,4 @@ .testrepository bin *.sw[nop] +*.pyc diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 998f00c1..4efe7993 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -456,3 +456,18 @@ def get_hostname(address, fqdn=True): return result else: return result.split('.')[0] + + +def port_has_listener(address, port): + """ + Returns True if the address:port is open and being listened to, + else False. + + @param address: an IP address or hostname + @param port: integer port + + Note calls 'zc' via a subprocess shell + """ + cmd = ['nc', '-z', address, str(port)] + result = subprocess.call(cmd) + return not(bool(result)) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index cbaad10d..d2ede320 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -121,7 +121,7 @@ def _add_services(self, this_service, other_services): # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw'] + 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', From 2bb6bc2408a9fec2453c9405b12bdc2f3e650c16 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 2 Mar 2016 10:59:31 +0000 Subject: [PATCH 1051/2699] Resync charm-helpers Change-Id: I904c16d01184307170be1e2b458bad396aef8576 --- ceph-radosgw/.gitignore | 1 + .../charmhelpers/contrib/openstack/neutron.py | 18 ++++--- .../templates/openstack_https_frontend | 2 + .../templates/openstack_https_frontend.conf | 2 + .../contrib/storage/linux/ceph.py | 50 ++++++++++++++----- 5 files changed, 53 insertions(+), 20 deletions(-) diff --git a/ceph-radosgw/.gitignore b/ceph-radosgw/.gitignore index a16ea07e..25d8aecb 100644 --- a/ceph-radosgw/.gitignore +++ b/ceph-radosgw/.gitignore @@ -4,3 +4,4 @@ bin .tox tags *.sw[nop] +*.pyc diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index dbc489ab..d057ea6e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -237,14 +237,16 @@ def neutron_plugins(): plugins['midonet']['driver'] = ( 'neutron.plugins.midonet.plugin.MidonetPluginV2') if release >= 'liberty': - midonet_origin = config('midonet-origin') - if midonet_origin is not None and midonet_origin[4:5] == '1': - plugins['midonet']['driver'] = ( - 'midonet.neutron.plugin_v1.MidonetPluginV2') - plugins['midonet']['server_packages'].remove( - 'python-neutron-plugin-midonet') - plugins['midonet']['server_packages'].append( - 'python-networking-midonet') + plugins['midonet']['driver'] = ( + 'midonet.neutron.plugin_v1.MidonetPluginV2') + plugins['midonet']['server_packages'].remove( + 'python-neutron-plugin-midonet') + plugins['midonet']['server_packages'].append( + 'python-networking-midonet') + plugins['plumgrid']['driver'] = ( + 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2') + plugins['plumgrid']['server_packages'].remove( + 'neutron-plugin-plumgrid') return plugins diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend index ce28fa3f..6a923804 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -6,6 +6,8 @@ Listen {{ ext_port }} ServerName {{ endpoint }} SSLEngine on + SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 + SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} ProxyPass / http://localhost:{{ int }}/ diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf index ce28fa3f..6a923804 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -6,6 +6,8 @@ Listen {{ ext_port }} ServerName {{ endpoint }} SSLEngine on + SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 + SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} ProxyPass / http://localhost:{{ int }}/ diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 60ae52b8..fb1bee34 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -120,6 +120,7 @@ class PoolCreationError(Exception): """ A custom error to inform the caller that a pool creation failed. Provides an error message """ + def __init__(self, message): super(PoolCreationError, self).__init__(message) @@ -129,6 +130,7 @@ class Pool(object): An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). """ + def __init__(self, service, name): self.service = service self.name = name @@ -180,36 +182,41 @@ def get_pgs(self, pool_size): :return: int. The number of pgs to use. """ validator(value=pool_size, valid_type=int) - osds = get_osds(self.service) - if not osds: + osd_list = get_osds(self.service) + if not osd_list: # NOTE(james-page): Default to 200 for older ceph versions # which don't support OSD query from cli return 200 + osd_list_length = len(osd_list) # Calculate based on Ceph best practices - if osds < 5: + if osd_list_length < 5: return 128 - elif 5 < osds < 10: + elif 5 < osd_list_length < 10: return 512 - elif 10 < osds < 50: + elif 10 < osd_list_length < 50: return 4096 else: - estimate = (osds * 100) / pool_size + estimate = (osd_list_length * 100) / pool_size # Return the next nearest power of 2 index = bisect.bisect_right(powers_of_two, estimate) return powers_of_two[index] class ReplicatedPool(Pool): - def __init__(self, service, name, replicas=2): + def __init__(self, service, name, pg_num=None, replicas=2): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas + if pg_num is None: + self.pg_num = self.get_pgs(self.replicas) + else: + self.pg_num = pg_num def create(self): if not pool_exists(self.service, self.name): # Create it - pgs = self.get_pgs(self.replicas) - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)] + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num)] try: check_call(cmd) except CalledProcessError: @@ -241,7 +248,7 @@ def create(self): pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs), 'erasure', self.erasure_code_profile] try: check_call(cmd) @@ -322,7 +329,8 @@ def set_pool_quota(service, pool_name, max_bytes): :return: None. Can raise CalledProcessError """ # Set a byte quota on a RADOS pool in ceph. - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes] + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, + 'max_bytes', str(max_bytes)] try: check_call(cmd) except CalledProcessError: @@ -343,7 +351,25 @@ def remove_pool_quota(service, pool_name): raise -def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', +def remove_erasure_profile(service, profile_name): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', + profile_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', + failure_domain='host', data_chunks=2, coding_chunks=1, locality=None, durability_estimator=None): """ From 9a624c14b1f808f96a7949fae5647258bb0ca79d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 2 Mar 2016 16:24:02 -0500 Subject: [PATCH 1052/2699] support Ceph's --dmcrypt flag for OSD preparation Tests now verify that ceph osds are running to ensure they pass in either order Change-Id: Ia543f4b085d4e97976ba08db508761f8dde97c42 --- ceph-osd/config.yaml | 12 +++++- ceph-osd/hooks/ceph.py | 19 +++++++--- ceph-osd/hooks/ceph_hooks.py | 3 +- ceph-osd/tests/README | 4 +- ceph-osd/tests/basic_deployment.py | 60 +++++++++++++++++++++++++++++- 5 files changed, 88 insertions(+), 10 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index dfa4aec0..169a3331 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -54,6 +54,16 @@ options: Specifying this option (any value) forces a reformat of any OSD devices found which are not already mounted. + osd-encrypt: + type: boolean + default: False + description: | + By default, the charm will not encrypt Ceph OSD devices; however, by + setting osd-encrypt to True, Ceph's dmcrypt support will be used to + encrypt OSD devices. + + Specifying this option on a running Ceph OSD node will have no effect + until new disks are added, at which point new disks will be encrypted. ignore-device-errors: type: boolean default: False @@ -137,7 +147,7 @@ options: kernel.threads-max: 2097152 }' description: | YAML-formatted associative array of sysctl key/value pairs to be set - persistently. By default we set pid_max, max_map_count and + persistently. By default we set pid_max, max_map_count and threads-max to a high value to avoid problems with large numbers (>20) of OSDs recovering. very large clusters should set those values even higher (e.g. max for kernel.pid_max is 4194303). diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 05f5fcf4..40e33597 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -172,6 +172,7 @@ def add_bootstrap_hint(peer): ] CEPH_PARTITIONS = [ + '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal ] @@ -428,15 +429,16 @@ def find_least_used_journal(journal_devices): def osdize(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False): + ignore_errors=False, encrypt=False): if dev.startswith('/dev'): - osdize_dev(dev, osd_format, osd_journal, reformat_osd, ignore_errors) + osdize_dev(dev, osd_format, osd_journal, + reformat_osd, ignore_errors, encrypt) else: - osdize_dir(dev) + osdize_dir(dev, encrypt) def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False): + ignore_errors=False, encrypt=False): if not os.path.exists(dev): log('Path {} does not exist - bailing'.format(dev)) return @@ -457,6 +459,9 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, status_set('maintenance', 'Initializing device {}'.format(dev)) cmd = ['ceph-disk', 'prepare'] # Later versions of ceph support more options + if cmp_pkgrevno('ceph', '0.60') >= 0: + if encrypt: + cmd.append('--dmcrypt') if cmp_pkgrevno('ceph', '0.48.3') >= 0: if osd_format: cmd.append('--fs-type') @@ -485,7 +490,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, raise e -def osdize_dir(path): +def osdize_dir(path, encrypt=False): if os.path.exists(os.path.join(path, 'upstart')): log('Path {} is already configured as an OSD - bailing'.format(path)) return @@ -504,6 +509,10 @@ def osdize_dir(path): '--data-dir', path ] + if cmp_pkgrevno('ceph', '0.60') >= 0: + if encrypt: + cmd.append('--dmcrypt') + log("osdize dir cmd: {}".format(cmd)) subprocess.check_call(cmd) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 84e7a150..1c402796 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -181,7 +181,8 @@ def prepare_disks_and_activate(): for dev in get_devices(): ceph.osdize(dev, config('osd-format'), osd_journal, config('osd-reformat'), - config('ignore-device-errors')) + config('ignore-device-errors'), + config('osd-encrypt')) ceph.start_osds(get_devices()) diff --git a/ceph-osd/tests/README b/ceph-osd/tests/README index 79c5b063..31adf8ec 100644 --- a/ceph-osd/tests/README +++ b/ceph-osd/tests/README @@ -57,13 +57,13 @@ All examples are run from the charm's root directory. bzr branch lp:charms/trusty/foo cd foo - juju test -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse + juju test -v -p AMULET_HTTP_PROXY --timeout 2700 015-basic-trusty-icehouse * To run tests and keep the juju environment deployed after a failure: bzr branch lp:charms/trusty/foo cd foo - juju test --set-e -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse + juju test --set-e -v -p AMULET_HTTP_PROXY --timeout 2700 015-basic-trusty-icehouse * To re-run a test module against an already deployed environment (one that was deployed by a previous call to 'juju test --set-e'): diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 42fc1b91..c320cf1c 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -182,7 +182,7 @@ def test_100_ceph_processes(self): self.ceph0_sentry: ceph_processes, self.ceph1_sentry: ceph_processes, self.ceph2_sentry: ceph_processes, - self.ceph_osd_sentry: {'ceph-osd': 2} + self.ceph_osd_sentry: {'ceph-osd': True} } actual_pids = u.get_unit_process_ids(expected_processes) @@ -572,3 +572,61 @@ def test_499_ceph_cmds_exit_zero(self): # FYI: No restart check as ceph services do not restart # when charm config changes, unless monitor count increases. + + def test_900_ceph_encryption(self): + """Verify that the new disk is added with encryption by checking for + Ceph's encryption keys directory""" + sentry = self.ceph_osd_sentry + set_default = { + 'osd-encrypt': 'False', + 'osd-devices': '/dev/vdb /srv/ceph', + } + set_alternate = { + 'osd-encrypt': 'True', + 'osd-devices': '/dev/vdb /srv/ceph /srv/ceph_encrypted', + } + juju_service = 'ceph-osd' + u.log.debug('Making config change on {}...'.format(juju_service)) + mtime = u.get_sentry_time(sentry) + self.d.configure(juju_service, set_alternate) + unit_name = sentry.info['unit_name'] + + sleep_time = 30 + retry_count = 30 + file_mtime = None + time.sleep(sleep_time) + + filename = '/etc/ceph/dmcrypt-keys' + tries = 0 + retry_sleep_time = 10 + while tries <= retry_count and not file_mtime: + try: + stat = sentry.directory_stat(filename) + file_mtime = stat['mtime'] + self.log.debug('Attempt {} to get {} mtime on {} ' + 'OK'.format(tries, filename, unit_name)) + except IOError as e: + self.d.configure(juju_service, set_default) + self.log.debug('Attempt {} to get {} mtime on {} ' + 'failed\n{}'.format(tries, filename, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + self.d.configure(juju_service, set_default) + + if not file_mtime: + self.log.warn('Could not determine mtime, assuming ' + 'folder does not exist') + return False + + if file_mtime >= mtime: + self.log.debug('Folder mtime is newer than provided mtime ' + '(%s >= %s) on %s (OK)' % (file_mtime, + mtime, unit_name)) + return True + else: + self.log.warn('Folder mtime is older than provided mtime' + '(%s < on %s) on %s' % (file_mtime, + mtime, unit_name)) + return False From 7e236651d507ace974e47c23fe838945140ffc76 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 2 Mar 2016 12:23:53 +0000 Subject: [PATCH 1053/2699] Add Ipv6 support Adds support for configuring the Rados Gateway to use IPv6 addresses and networks. This can be enabled by setting prefer-ipv6=True. Change-Id: I801fab14accd8c3498ea5468d135f34f159717cb Closes-Bug: 1513524 --- ceph-radosgw/config.yaml | 12 ++ ceph-radosgw/hooks/ceph_radosgw_context.py | 145 +++++++++++++--- ceph-radosgw/hooks/hooks.py | 156 ++++++++---------- ceph-radosgw/hooks/utils.py | 95 +++++++---- ceph-radosgw/templates/ceph.conf | 3 + ceph-radosgw/templates/rgw.conf | 25 +++ .../unit_tests/test_ceph_radosgw_context.py | 43 +++-- ceph-radosgw/unit_tests/test_hooks.py | 67 -------- 8 files changed, 324 insertions(+), 222 deletions(-) create mode 100644 ceph-radosgw/templates/rgw.conf diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 04afec7f..60b500c4 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -153,3 +153,15 @@ options: description: | Connect timeout configuration in ms for haproxy, used in HA configurations. If not provided, default value of 5000ms is used. + prefer-ipv6: + type: boolean + default: False + description: | + If True enables IPv6 support. The charm will expect network interfaces + to be configured with an IPv6 address. If set to False (default) IPv4 + is expected. + . + NOTE: these charms do not currently support IPv6 privacy extension. In + order for this charm to function correctly, the privacy extension must be + disabled and a non-temporary address must be configured/available on + your network interface. diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 61276651..73558b17 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -1,3 +1,11 @@ +import os +import re +import socket +import tempfile +import glob +import shutil +import subprocess + from charmhelpers.contrib.openstack import context from charmhelpers.contrib.hahelpers.cluster import ( determine_api_port, @@ -5,17 +13,69 @@ ) from charmhelpers.core.host import cmp_pkgrevno from charmhelpers.core.hookenv import ( + DEBUG, WARNING, config, log, relation_ids, related_units, relation_get, - unit_get, + status_set, ) -import os -import socket -import dns.resolver +from charmhelpers.contrib.network.ip import ( + format_ipv6_addr, + get_host_ip, + get_ipv6_addr, +) + + +def is_apache_24(): + if os.path.exists('/etc/apache2/conf-available'): + return True + else: + return False + + +class ApacheContext(context.OSContextGenerator): + interfaces = ['http'] + service_namespace = 'ceph-radosgw' + + def __call__(self): + ctxt = {} + if config('use-embedded-webserver'): + log("Skipping ApacheContext since we are using the embedded " + "webserver") + return {} + + status_set('maintenance', 'configuring apache') + + src = 'files/www/*' + dst = '/var/www/' + log("Installing www scripts", level=DEBUG) + try: + for x in glob.glob(src): + shutil.copy(x, dst) + except IOError as e: + log("Error copying files from '%s' to '%s': %s" % (src, dst, e), + level=WARNING) + + try: + subprocess.check_call(['a2enmod', 'fastcgi']) + subprocess.check_call(['a2enmod', 'rewrite']) + except subprocess.CalledProcessError as e: + log("Error enabling apache modules - %s" % e, level=WARNING) + + try: + if is_apache_24(): + subprocess.check_call(['a2dissite', '000-default']) + else: + subprocess.check_call(['a2dissite', 'default']) + except subprocess.CalledProcessError as e: + log("Error disabling apache sites - %s" % e, level=WARNING) + + ctxt['hostname'] = socket.gethostname() + ctxt['port'] = determine_api_port(config('port'), singlenode_mode=True) + return ctxt class HAProxyContext(context.HAProxyContext): @@ -66,24 +126,60 @@ def __call__(self): return {} +def ensure_host_resolvable_v6(hostname): + """Ensure that we can resolve our hostname to an IPv6 address by adding it + to /etc/hosts if it is not already resolvable. + """ + try: + socket.getaddrinfo(hostname, None, socket.AF_INET6) + except socket.gaierror: + log("Host '%s' is not ipv6 resolvable - adding to /etc/hosts" % + hostname, level=DEBUG) + else: + log("Host '%s' appears to be ipv6 resolvable" % (hostname), + level=DEBUG) + return + + # This must be the backend address used by haproxy + host_addr = get_ipv6_addr(exc_list=[config('vip')])[0] + dtmp = tempfile.mkdtemp() + try: + tmp_hosts = os.path.join(dtmp, 'hosts') + shutil.copy('/etc/hosts', tmp_hosts) + with open(tmp_hosts, 'a+') as fd: + lines = fd.readlines() + for line in lines: + key = "^%s\s+" % (host_addr) + if re.search(key, line): + break + else: + fd.write("%s\t%s\n" % (host_addr, hostname)) + + os.rename(tmp_hosts, '/etc/hosts') + finally: + shutil.rmtree(dtmp) + + class MonContext(context.OSContextGenerator): interfaces = ['ceph-radosgw'] def __call__(self): if not relation_ids('mon'): return {} - hosts = [] + mon_hosts = [] auths = [] for relid in relation_ids('mon'): for unit in related_units(relid): ceph_public_addr = relation_get('ceph-public-address', unit, relid) if ceph_public_addr: - host_ip = self.get_host_ip(ceph_public_addr) - hosts.append('{}:6789'.format(host_ip)) + host_ip = format_ipv6_addr(ceph_public_addr) or \ + get_host_ip(ceph_public_addr) + mon_hosts.append('{}:6789'.format(host_ip)) _auth = relation_get('auth', unit, relid) if _auth: auths.append(_auth) + if len(set(auths)) != 1: e = ("Inconsistent or absent auth returned by mon units. Setting " "auth_supported to 'none'") @@ -91,17 +187,28 @@ def __call__(self): auth = 'none' else: auth = auths[0] - hosts.sort() + + # /etc/init.d/radosgw mandates that a dns name is used for this + # parameter so ensure that address is resolvable + host = socket.gethostname() + if config('prefer-ipv6'): + ensure_host_resolvable_v6(host) + + port = determine_apache_port(config('port'), singlenode_mode=True) + if config('prefer-ipv6'): + port = "[::]:%s" % (port) + + mon_hosts.sort() ctxt = { 'auth_supported': auth, - 'mon_hosts': ' '.join(hosts), - 'hostname': socket.gethostname(), + 'mon_hosts': ' '.join(mon_hosts), + 'hostname': host, 'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0, 'use_syslog': str(config('use-syslog')).lower(), 'embedded_webserver': config('use-embedded-webserver'), 'loglevel': config('loglevel'), - 'port': determine_apache_port(config('port'), - singlenode_mode=True) + 'port': port, + 'ipv6': config('prefer-ipv6') } certs_path = '/var/lib/ceph/nss' @@ -121,17 +228,3 @@ def __call__(self): return ctxt return {} - - def get_host_ip(self, hostname=None): - try: - if not hostname: - hostname = unit_get('private-address') - # Test to see if already an IPv4 address - socket.inet_aton(hostname) - return hostname - except socket.error: - # This may throw an NXDOMAIN exception; in which case - # things are badly broken so just let it kill the hook - answers = dns.resolver.query(hostname, 'A') - if answers: - return answers[0].address diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index e88c633e..54ecf6e2 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -1,17 +1,16 @@ #!/usr/bin/python - # -# Copyright 2012 Canonical Ltd. +# Copyright 2016 Canonical Ltd. # # Authors: # James Page +# Edward Hope-Morley # -import shutil +import os import subprocess import sys -import glob -import os + import ceph from charmhelpers.core.hookenv import ( @@ -39,27 +38,17 @@ lsb_release, restart_on_change, ) -from charmhelpers.contrib.hahelpers.cluster import ( - determine_apache_port, -) -from utils import ( - render_template, - enable_pocket, - is_apache_24, - CEPHRG_HA_RES, - register_configs, - REQUIRED_INTERFACES, - check_optional_relations, -) from charmhelpers.payload.execd import execd_preinstall from charmhelpers.core.host import ( cmp_pkgrevno, mkdir, ) - from charmhelpers.contrib.network.ip import ( + format_ipv6_addr, + get_ipv6_addr, get_iface_for_address, get_netmask_for_address, + is_ipv6, ) from charmhelpers.contrib.openstack.ip import ( canonical_url, @@ -72,18 +61,17 @@ send_request_if_needed, is_request_complete, ) - -APACHE_PORTS_CONF = '/etc/apache2/ports.conf' +from utils import ( + enable_pocket, + CEPHRG_HA_RES, + register_configs, + REQUIRED_INTERFACES, + check_optional_relations, + setup_ipv6, +) hooks = Hooks() CONFIGS = register_configs() - - -def install_www_scripts(): - for x in glob.glob('files/www/*'): - shutil.copy(x, '/var/www/') - - NSS_DIR = '/var/lib/ceph/nss' @@ -145,43 +133,6 @@ def install(): os.makedirs('/etc/ceph') -def emit_apacheconf(): - apachecontext = { - "hostname": unit_get('private-address'), - "port": determine_apache_port(config('port'), singlenode_mode=True) - } - site_conf = '/etc/apache2/sites-available/rgw' - if is_apache_24(): - site_conf = '/etc/apache2/sites-available/rgw.conf' - with open(site_conf, 'w') as apacheconf: - apacheconf.write(render_template('rgw', apachecontext)) - - -def apache_sites(): - if is_apache_24(): - subprocess.check_call(['a2dissite', '000-default']) - else: - subprocess.check_call(['a2dissite', 'default']) - subprocess.check_call(['a2ensite', 'rgw']) - - -def apache_modules(): - subprocess.check_call(['a2enmod', 'fastcgi']) - subprocess.check_call(['a2enmod', 'rewrite']) - - -def apache_reload(): - subprocess.call(['service', 'apache2', 'reload']) - - -def apache_ports(): - portscontext = { - "port": determine_apache_port(config('port'), singlenode_mode=True) - } - with open(APACHE_PORTS_CONF, 'w') as portsconf: - portsconf.write(render_template('ports.conf', portscontext)) - - def setup_keystone_certs(unit=None, rid=None): """ Get CA and signing certs from Keystone used to decrypt revoked token list. @@ -213,6 +164,9 @@ def setup_keystone_certs(unit=None, rid=None): for key in required_keys: settings[key] = rdata.get(key) + if is_ipv6(settings.get('auth_host')): + settings['auth_host'] = format_ipv6_addr(settings.get('auth_host')) + if not all(settings.values()): log("Missing relation settings (%s) - skipping cert setup" % (', '.join([k for k in settings.keys() if not settings[k]])), @@ -288,19 +242,29 @@ def setup_keystone_certs(unit=None, rid=None): '/etc/haproxy/haproxy.cfg': ['haproxy']}) def config_changed(): install_packages() - CONFIGS.write_all() - if not config('use-embedded-webserver'): - status_set('maintenance', 'configuring apache') - emit_apacheconf() - install_www_scripts() - apache_sites() - apache_modules() - apache_ports() - apache_reload() + + if config('prefer-ipv6'): + status_set('maintenance', 'configuring ipv6') + setup_ipv6() for r_id in relation_ids('identity-service'): identity_changed(relid=r_id) + for r_id in relation_ids('cluster'): + cluster_joined(rid=r_id) + + CONFIGS.write_all() + + if not config('use-embedded-webserver'): + try: + subprocess.check_call(['a2ensite', 'rgw']) + except subprocess.CalledProcessError as e: + log("Error enabling apache module 'rgw' - %s" % e, level=WARNING) + + # Ensure started but do a soft reload + subprocess.call(['service', 'apache2', 'start']) + subprocess.call(['service', 'apache2', 'reload']) + @hooks.hook('mon-relation-departed', 'mon-relation-changed') @@ -373,8 +337,18 @@ def identity_changed(relid=None): restart() -@hooks.hook('cluster-relation-changed', - 'cluster-relation-joined') +@hooks.hook('cluster-relation-joined') +@restart_on_change({'/etc/haproxy/haproxy.cfg': ['haproxy']}) +def cluster_joined(rid=None): + settings = {} + if config('prefer-ipv6'): + private_addr = get_ipv6_addr(exc_list=[config('vip')])[0] + settings['private-address'] = private_addr + + relation_set(relation_id=rid, **settings) + + +@hooks.hook('cluster-relation-changed') @restart_on_change({'/etc/haproxy/haproxy.cfg': ['haproxy']}) def cluster_changed(): CONFIGS.write_all() @@ -384,17 +358,12 @@ def cluster_changed(): @hooks.hook('ha-relation-joined') def ha_relation_joined(): - # Obtain the config values necessary for the cluster config. These - # include multicast port and interface to bind to. - corosync_bindiface = config('ha-bindiface') - corosync_mcastport = config('ha-mcastport') vip = config('vip') if not vip: - log('Unable to configure hacluster as vip not provided', - level=ERROR) + log('Unable to configure hacluster as vip not provided', level=ERROR) sys.exit(1) + # Obtain resources - # SWIFT_HA_RES = 'grp_swift_vips' resources = { 'res_cephrg_haproxy': 'lsb:haproxy' } @@ -404,15 +373,25 @@ def ha_relation_joined(): vip_group = [] for vip in vip.split(): + if is_ipv6(vip): + res_rgw_vip = 'ocf:heartbeat:IPv6addr' + vip_params = 'ipv6addr' + else: + res_rgw_vip = 'ocf:heartbeat:IPaddr2' + vip_params = 'ip' + iface = get_iface_for_address(vip) + netmask = get_netmask_for_address(vip) + if iface is not None: vip_key = 'res_cephrg_{}_vip'.format(iface) - resources[vip_key] = 'ocf:heartbeat:IPaddr2' + resources[vip_key] = res_rgw_vip resource_params[vip_key] = ( - 'params ip="{vip}" cidr_netmask="{netmask}"' - ' nic="{iface}"'.format(vip=vip, + 'params {ip}="{vip}" cidr_netmask="{netmask}"' + ' nic="{iface}"'.format(ip=vip_params, + vip=vip, iface=iface, - netmask=get_netmask_for_address(vip)) + netmask=netmask) ) vip_group.append(vip_key) @@ -426,6 +405,11 @@ def ha_relation_joined(): 'cl_cephrg_haproxy': 'res_cephrg_haproxy' } + # Obtain the config values necessary for the cluster config. These + # include multicast port and interface to bind to. + corosync_bindiface = config('ha-bindiface') + corosync_mcastport = config('ha-mcastport') + relation_set(init_services=init_services, corosync_bindiface=corosync_bindiface, corosync_mcastport=corosync_mcastport, diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index d14c6ecb..9d6413c9 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -1,27 +1,45 @@ # -# Copyright 2012 Canonical Ltd. +# Copyright 2016 Canonical Ltd. # # Authors: # James Page # Paul Collins +# Edward Hope-Morley # -import socket -import re import os -import dns.resolver +import re import jinja2 + from copy import deepcopy from collections import OrderedDict -from charmhelpers.core.hookenv import unit_get, relation_ids, status_get -from charmhelpers.contrib.openstack import context, templating -from charmhelpers.contrib.openstack.utils import set_os_workload_status -from charmhelpers.contrib.hahelpers.cluster import get_hacluster_config -from charmhelpers.core.host import cmp_pkgrevno -from charmhelpers.fetch import filter_installed_packages import ceph_radosgw_context +from charmhelpers.core.hookenv import ( + relation_ids, + status_get, +) +from charmhelpers.contrib.openstack import ( + context, + templating, +) +from charmhelpers.contrib.openstack.utils import ( + os_release, + set_os_workload_status, +) +from charmhelpers.contrib.hahelpers.cluster import get_hacluster_config +from charmhelpers.core.host import ( + cmp_pkgrevno, + lsb_release, +) +from charmhelpers.fetch import ( + apt_install, + apt_update, + add_source, + filter_installed_packages, +) + # The interface is said to be satisfied if anyone of the interfaces in the # list has a complete context. REQUIRED_INTERFACES = { @@ -32,6 +50,9 @@ TEMPLATES = 'templates/' HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' CEPH_CONF = '/etc/ceph/ceph.conf' +APACHE_CONF = '/etc/apache2/sites-available/rgw' +APACHE_24_CONF = '/etc/apache2/sites-available/rgw.conf' +APACHE_PORTS_CONF = '/etc/apache2/ports.conf' BASE_RESOURCE_MAP = OrderedDict([ (HAPROXY_CONF, { @@ -39,6 +60,18 @@ ceph_radosgw_context.HAProxyContext()], 'services': ['haproxy'], }), + (APACHE_CONF, { + 'contexts': [ceph_radosgw_context.ApacheContext()], + 'services': ['apache2'], + }), + (APACHE_24_CONF, { + 'contexts': [ceph_radosgw_context.ApacheContext()], + 'services': ['apache2'], + }), + (APACHE_PORTS_CONF, { + 'contexts': [ceph_radosgw_context.ApacheContext()], + 'services': ['apache2'], + }), (CEPH_CONF, { 'contexts': [ceph_radosgw_context.MonContext()], 'services': ['radosgw'], @@ -51,6 +84,11 @@ def resource_map(): Dynamically generate a map of resources that will be managed for a single hook execution. ''' + if os.path.exists('/etc/apache2/conf-available'): + BASE_RESOURCE_MAP.pop(APACHE_CONF) + else: + BASE_RESOURCE_MAP.pop(APACHE_24_CONF) + resource_map = deepcopy(BASE_RESOURCE_MAP) return resource_map @@ -92,28 +130,6 @@ def enable_pocket(pocket): sources.write(line) -def get_host_ip(hostname=None): - try: - if not hostname: - hostname = unit_get('private-address') - # Test to see if already an IPv4 address - socket.inet_aton(hostname) - return hostname - except socket.error: - # This may throw an NXDOMAIN exception; in which case - # things are badly broken so just let it kill the hook - answers = dns.resolver.query(hostname, 'A') - if answers: - return answers[0].address - - -def is_apache_24(): - if os.path.exists('/etc/apache2/conf-available'): - return True - else: - return False - - def check_optional_relations(configs): required_interfaces = {} if relation_ids('ha'): @@ -132,3 +148,18 @@ def check_optional_relations(configs): return status_get() else: return 'unknown', 'No optional relations' + + +def setup_ipv6(): + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower() + if ubuntu_rel < "trusty": + raise Exception("IPv6 is not supported in the charms for Ubuntu " + "versions less than Trusty 14.04") + + # Need haproxy >= 1.5.3 for ipv6 so for Trusty if we are <= Kilo we need to + # use trusty-backports otherwise we can use the UCA. + if ubuntu_rel == 'trusty' and os_release('ceph-common') < 'liberty': + add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports ' + 'main') + apt_update(fatal=True) + apt_install('haproxy/trusty-backports', fatal=True) diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index af0fc43b..28efdeba 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -11,6 +11,9 @@ log to syslog = {{ use_syslog }} err to syslog = {{ use_syslog }} clog to syslog = {{ use_syslog }} debug rgw = {{ loglevel }}/5 +{% if ipv6 -%} +ms bind ipv6 = true +{% endif %} [client.radosgw.gateway] host = {{ hostname }} diff --git a/ceph-radosgw/templates/rgw.conf b/ceph-radosgw/templates/rgw.conf new file mode 100644 index 00000000..7a3e4724 --- /dev/null +++ b/ceph-radosgw/templates/rgw.conf @@ -0,0 +1,25 @@ + + FastCgiExternalServer /var/www/s3gw.fcgi -socket /tmp/radosgw.sock + + + + ServerName {{ hostname }} + ServerAdmin ceph@ubuntu.com + DocumentRoot /var/www + RewriteEngine On + RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /s3gw.fcgi?page=$1¶ms=$2&%{QUERY_STRING} [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L] + + + Options +ExecCGI + AllowOverride All + SetHandler fastcgi-script + Order allow,deny + Allow from all + AuthBasicAuthoritative Off + + + AllowEncodedSlashes On + ErrorLog /var/log/apache2/error.log + CustomLog /var/log/apache2/access.log combined + ServerSignature Off + diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 7c049afe..5071b671 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -13,6 +13,7 @@ 'related_units', 'cmp_pkgrevno', 'socket', + 'is_apache_24', ] @@ -147,8 +148,9 @@ def setUp(self): super(MonContextTest, self).setUp(context, TO_PATCH) self.config.side_effect = self.test_config.get - def test_ctxt(self): - self.socket.gethostname.return_value = '10.0.0.10' + @patch.object(context, 'ensure_host_resolvable_v6') + def test_ctxt(self, mock_ensure_rsv_v6): + self.socket.gethostname.return_value = 'testhost' mon_ctxt = context.MonContext() addresses = ['10.5.4.1', '10.5.4.2', '10.5.4.3'] @@ -157,6 +159,7 @@ def _relation_get(attr, unit, rid): return addresses.pop() elif attr == 'auth': return 'cephx' + self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] @@ -164,17 +167,26 @@ def _relation_get(attr, unit, rid): 'auth_supported': 'cephx', 'embedded_webserver': False, 'disable_100_continue': True, - 'hostname': '10.0.0.10', + 'hostname': 'testhost', 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', 'old_auth': False, 'use_syslog': 'false', 'loglevel': 1, - 'port': 70 + 'port': 70, + 'ipv6': False } self.assertEqual(expect, mon_ctxt()) + self.assertFalse(mock_ensure_rsv_v6.called) + + self.test_config.set('prefer-ipv6', True) + addresses = ['10.5.4.1', '10.5.4.2', '10.5.4.3'] + expect['ipv6'] = True + expect['port'] = "[::]:%s" % (70) + self.assertEqual(expect, mon_ctxt()) + self.assertTrue(mock_ensure_rsv_v6.called) def test_ctxt_missing_data(self): - self.socket.gethostname.return_value = '10.0.0.10' + self.socket.gethostname.return_value = 'testhost' mon_ctxt = context.MonContext() self.relation_get.return_value = None self.relation_ids.return_value = ['mon:6'] @@ -182,7 +194,7 @@ def test_ctxt_missing_data(self): self.assertEqual({}, mon_ctxt()) def test_ctxt_inconsistent_auths(self): - self.socket.gethostname.return_value = '10.0.0.10' + self.socket.gethostname.return_value = 'testhost' mon_ctxt = context.MonContext() addresses = ['10.5.4.1', '10.5.4.2', '10.5.4.3'] auths = ['cephx', 'cephy', 'cephz'] @@ -199,17 +211,18 @@ def _relation_get(attr, unit, rid): 'auth_supported': 'none', 'embedded_webserver': False, 'disable_100_continue': True, - 'hostname': '10.0.0.10', + 'hostname': 'testhost', 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', 'old_auth': False, 'use_syslog': 'false', 'loglevel': 1, - 'port': 70 + 'port': 70, + 'ipv6': False } self.assertEqual(expect, mon_ctxt()) def test_ctxt_consistent_auths(self): - self.socket.gethostname.return_value = '10.0.0.10' + self.socket.gethostname.return_value = 'testhost' mon_ctxt = context.MonContext() addresses = ['10.5.4.1', '10.5.4.2', '10.5.4.3'] auths = ['cephx', 'cephx', 'cephx'] @@ -226,11 +239,19 @@ def _relation_get(attr, unit, rid): 'auth_supported': 'cephx', 'embedded_webserver': False, 'disable_100_continue': True, - 'hostname': '10.0.0.10', + 'hostname': 'testhost', 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', 'old_auth': False, 'use_syslog': 'false', 'loglevel': 1, - 'port': 70 + 'port': 70, + 'ipv6': False } self.assertEqual(expect, mon_ctxt()) + + +class ApacheContextTest(CharmTestCase): + + def setUp(self): + super(ApacheContextTest, self).setUp(context, TO_PATCH) + self.config.side_effect = self.test_config.get diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 96cbf543..44fd7a87 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -6,7 +6,6 @@ from test_utils import ( CharmTestCase, - patch_open ) from charmhelpers.contrib.openstack.ip import PUBLIC @@ -34,8 +33,6 @@ 'enable_pocket', 'get_iface_for_address', 'get_netmask_for_address', - 'glob', - 'is_apache_24', 'log', 'lsb_release', 'open_port', @@ -44,8 +41,6 @@ 'relation_set', 'relation_get', 'related_units', - 'render_template', - 'shutil', 'status_set', 'subprocess', 'sys', @@ -62,11 +57,6 @@ def setUp(self): self.test_config.set('key', 'secretkey') self.test_config.set('use-syslog', False) - def test_install_www_scripts(self): - self.glob.glob.return_value = ['files/www/bob'] - ceph_hooks.install_www_scripts() - self.shutil.copy.assert_called_with('files/www/bob', '/var/www/') - def test_install_ceph_optimised_packages(self): self.lsb_release.return_value = {'DISTRIB_CODENAME': 'vivid'} fastcgi_source = ( @@ -122,69 +112,12 @@ def test_install(self): self.enable_pocket.assert_called_with('multiverse') self.os.makedirs.called_with('/var/lib/ceph/nss') - def test_emit_apacheconf(self): - self.is_apache_24.return_value = True - self.unit_get.return_value = '10.0.0.1' - apachecontext = { - "hostname": '10.0.0.1', - "port": 70, - } - vhost_file = '/etc/apache2/sites-available/rgw.conf' - with patch_open() as (_open, _file): - ceph_hooks.emit_apacheconf() - _open.assert_called_with(vhost_file, 'w') - self.render_template.assert_called_with('rgw', apachecontext) - - def test_apache_sites24(self): - self.is_apache_24.return_value = True - ceph_hooks.apache_sites() - calls = [ - call(['a2dissite', '000-default']), - call(['a2ensite', 'rgw']), - ] - self.subprocess.check_call.assert_has_calls(calls) - - def test_apache_sites22(self): - self.is_apache_24.return_value = False - ceph_hooks.apache_sites() - calls = [ - call(['a2dissite', 'default']), - call(['a2ensite', 'rgw']), - ] - self.subprocess.check_call.assert_has_calls(calls) - - def test_apache_modules(self): - ceph_hooks.apache_modules() - calls = [ - call(['a2enmod', 'fastcgi']), - call(['a2enmod', 'rewrite']), - ] - self.subprocess.check_call.assert_has_calls(calls) - - def test_apache_reload(self): - ceph_hooks.apache_reload() - calls = [ - call(['service', 'apache2', 'reload']), - ] - self.subprocess.call.assert_has_calls(calls) - - @patch.object(ceph_hooks, 'apache_ports', lambda *args: True) @patch.object(ceph_hooks, 'mkdir', lambda *args: None) def test_config_changed(self): _install_packages = self.patch('install_packages') - _emit_apacheconf = self.patch('emit_apacheconf') - _install_www_scripts = self.patch('install_www_scripts') - _apache_sites = self.patch('apache_sites') - _apache_modules = self.patch('apache_modules') - _apache_reload = self.patch('apache_reload') ceph_hooks.config_changed() self.assertTrue(_install_packages.called) self.CONFIGS.write_all.assert_called_with() - self.assertTrue(_emit_apacheconf.called) - self.assertTrue(_install_www_scripts.called) - self.assertTrue(_apache_sites.called) - self.assertTrue(_apache_modules.called) - self.assertTrue(_apache_reload.called) @patch.object(ceph_hooks, 'is_request_complete', lambda *args, **kwargs: True) From 951bbe0c508d08a67cd7640880715ed3e21755cc Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 4 Mar 2016 13:55:57 +0000 Subject: [PATCH 1054/2699] Enable Xenial-Mitaka amulet test target. Change-Id: I5d45747b1c9500f8aea219a212d4bab9a7381526 --- ceph-radosgw/tests/021-basic-xenial-mitaka | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 ceph-radosgw/tests/021-basic-xenial-mitaka diff --git a/ceph-radosgw/tests/021-basic-xenial-mitaka b/ceph-radosgw/tests/021-basic-xenial-mitaka old mode 100644 new mode 100755 From bffa9f5c699285d9e0b626d9ff5d57d3405b423d Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 4 Mar 2016 21:23:00 +0000 Subject: [PATCH 1055/2699] Fix issues when using embedded webserver Remove apache configuration files from the context map if embedded webserver is enabled; as this is the recommended way of deploying radosgw, switch the amulet test to exercise this option instead of apache. Change-Id: I83d87c088a264ebd556e5d3285f63c60d4b799d8 Close-Bug: 1553357 --- ceph-radosgw/hooks/utils.py | 10 ++++++++-- ceph-radosgw/tests/basic_deployment.py | 8 +++++--- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 9d6413c9..08d53260 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -19,6 +19,7 @@ from charmhelpers.core.hookenv import ( relation_ids, status_get, + config, ) from charmhelpers.contrib.openstack import ( context, @@ -84,10 +85,15 @@ def resource_map(): Dynamically generate a map of resources that will be managed for a single hook execution. ''' - if os.path.exists('/etc/apache2/conf-available'): - BASE_RESOURCE_MAP.pop(APACHE_CONF) + if not config('use-embedded-webserver'): + if os.path.exists('/etc/apache2/conf-available'): + BASE_RESOURCE_MAP.pop(APACHE_CONF) + else: + BASE_RESOURCE_MAP.pop(APACHE_24_CONF) else: + BASE_RESOURCE_MAP.pop(APACHE_CONF) BASE_RESOURCE_MAP.pop(APACHE_24_CONF) + BASE_RESOURCE_MAP.pop(APACHE_PORTS_CONF) resource_map = deepcopy(BASE_RESOURCE_MAP) return resource_map diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 9c47b49f..79ebb3eb 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -89,11 +89,13 @@ def _configure_services(self): 'ephemeral-unmount': '/mnt', 'osd-devices': '/dev/vdb /srv/ceph' } + radosgw_config = {"use-embedded-webserver": True} configs = {'keystone': keystone_config, 'mysql': mysql_config, 'cinder': cinder_config, - 'ceph': ceph_config} + 'ceph': ceph_config, + 'ceph-radosgw': radosgw_config} super(CephRadosGwBasicDeployment, self)._configure_services(configs) def _initialize_tests(self): @@ -329,12 +331,12 @@ def test_300_ceph_radosgw_config(self): 'keyring': '/etc/ceph/keyring.rados.gateway', 'rgw socket path': '/tmp/radosgw.sock', 'log file': '/var/log/ceph/radosgw.log', - 'rgw print continue': 'false', 'rgw keystone url': 'http://{}:35357/'.format(keystone_ip), 'rgw keystone admin token': 'ubuntutesting', 'rgw keystone accepted roles': 'Member,Admin', 'rgw keystone token cache size': '500', - 'rgw keystone revocation interval': '600' + 'rgw keystone revocation interval': '600', + 'rgw frontends': 'civetweb port=70', }, } From b3a2b0692a2e6d187f3c3c5af137ed78292cab20 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Sat, 5 Mar 2016 19:12:53 -0800 Subject: [PATCH 1056/2699] Add support for cache tier management This change will add two new actions to the ceph-mon charm. These actions will allow the user to create and remove cache tiers from existing pools. Both writeback and read only mode are supported. Limitations of this patch include not having fine grain control over the cache tier properties. Things like hit_set_count, bloom filter control or cache sizing are not supported yet. Change-Id: I5a37e79d0d23d35295a8ae97177c940af66b0485 --- ceph-proxy/.gitignore | 1 + ceph-proxy/Makefile | 2 +- ceph-proxy/actions.yaml | 37 +++++++++ ceph-proxy/actions/__init__.py | 1 + ceph-proxy/actions/create-cache-tier | 1 + ceph-proxy/actions/create-cache-tier.py | 41 ++++++++++ ceph-proxy/actions/remove-cache-tier | 1 + ceph-proxy/actions/remove-cache-tier.py | 41 ++++++++++ .../contrib/storage/linux/ceph.py | 4 +- ceph-proxy/tests/basic_deployment.py | 76 ++++++++++++++++++- .../charmhelpers/contrib/amulet/utils.py | 6 +- ceph-proxy/tox.ini | 2 +- 12 files changed, 205 insertions(+), 8 deletions(-) create mode 100644 ceph-proxy/actions/__init__.py create mode 120000 ceph-proxy/actions/create-cache-tier create mode 100755 ceph-proxy/actions/create-cache-tier.py create mode 120000 ceph-proxy/actions/remove-cache-tier create mode 100755 ceph-proxy/actions/remove-cache-tier.py diff --git a/ceph-proxy/.gitignore b/ceph-proxy/.gitignore index f5295367..78324922 100644 --- a/ceph-proxy/.gitignore +++ b/ceph-proxy/.gitignore @@ -4,3 +4,4 @@ bin .tox *.sw[nop] *.pyc +.idea diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index 42b6f4a7..ef306df2 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -3,7 +3,7 @@ PYTHON := /usr/bin/env python lint: @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \ - hooks tests unit_tests + actions hooks tests unit_tests @charm proof test: diff --git a/ceph-proxy/actions.yaml b/ceph-proxy/actions.yaml index 9cb421a7..a93054bb 100644 --- a/ceph-proxy/actions.yaml +++ b/ceph-proxy/actions.yaml @@ -2,3 +2,40 @@ pause-health: description: Pause ceph health operations across the entire ceph cluster resume-health: description: Resume ceph health operations across the entire ceph cluster +create-cache-tier: + description: Create a new cache tier + params: + backer-pool: + type: string + description: | + The name of the pool that will back the cache tier. Also known as + the cold pool + cache-pool: + type: string + description: | + The name of the pool that will be the cache pool. Also known + as the hot pool + cache-mode: + type: string + default: writeback + enum: [writeback, readonly] + description: | + The mode of the caching tier. Please refer to the Ceph docs for more + information + required: [backer-pool, cache-pool] + additionalProperties: false +remove-cache-tier: + description: Remove an existing cache tier + params: + backer-pool: + type: string + description: | + The name of the pool that backs the cache tier. Also known as + the cold pool + cache-pool: + type: string + description: | + The name of the pool that is the cache pool. Also known + as the hot pool + required: [backer-pool, cache-pool] + additionalProperties: false diff --git a/ceph-proxy/actions/__init__.py b/ceph-proxy/actions/__init__.py new file mode 100644 index 00000000..9847ec9e --- /dev/null +++ b/ceph-proxy/actions/__init__.py @@ -0,0 +1 @@ +__author__ = 'chris' diff --git a/ceph-proxy/actions/create-cache-tier b/ceph-proxy/actions/create-cache-tier new file mode 120000 index 00000000..2a7e4346 --- /dev/null +++ b/ceph-proxy/actions/create-cache-tier @@ -0,0 +1 @@ +create-cache-tier.py \ No newline at end of file diff --git a/ceph-proxy/actions/create-cache-tier.py b/ceph-proxy/actions/create-cache-tier.py new file mode 100755 index 00000000..e8170cf2 --- /dev/null +++ b/ceph-proxy/actions/create-cache-tier.py @@ -0,0 +1,41 @@ +#!/usr/bin/python +__author__ = 'chris' +from subprocess import CalledProcessError +import sys + +sys.path.append('hooks') + +from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists +from charmhelpers.core.hookenv import action_get, log, action_fail + + +def make_cache_tier(): + backer_pool = action_get("backer-pool") + cache_pool = action_get("cache-pool") + cache_mode = action_get("cache-mode") + + # Pre flight checks + if not pool_exists('admin', backer_pool): + log("Please create {} pool before calling create-cache-tier".format( + backer_pool)) + action_fail("create-cache-tier failed. Backer pool {} must exist " + "before calling this".format(backer_pool)) + + if not pool_exists('admin', cache_pool): + log("Please create {} pool before calling create-cache-tier".format( + cache_pool)) + action_fail("create-cache-tier failed. Cache pool {} must exist " + "before calling this".format(cache_pool)) + + pool = Pool(service='admin', name=backer_pool) + try: + pool.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) + except CalledProcessError as err: + log("Add cache tier failed with message: {}".format( + err.message)) + action_fail("create-cache-tier failed. Add cache tier failed with " + "message: {}".format(err.message)) + + +if __name__ == '__main__': + make_cache_tier() diff --git a/ceph-proxy/actions/remove-cache-tier b/ceph-proxy/actions/remove-cache-tier new file mode 120000 index 00000000..136c0f06 --- /dev/null +++ b/ceph-proxy/actions/remove-cache-tier @@ -0,0 +1 @@ +remove-cache-tier.py \ No newline at end of file diff --git a/ceph-proxy/actions/remove-cache-tier.py b/ceph-proxy/actions/remove-cache-tier.py new file mode 100755 index 00000000..79db9cf7 --- /dev/null +++ b/ceph-proxy/actions/remove-cache-tier.py @@ -0,0 +1,41 @@ +#!/usr/bin/python +from subprocess import CalledProcessError +import sys + +sys.path.append('hooks') + +from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists +from charmhelpers.core.hookenv import action_get, log, action_fail + +__author__ = 'chris' + + +def delete_cache_tier(): + backer_pool = action_get("backer-pool") + cache_pool = action_get("cache-pool") + + # Pre flight checks + if not pool_exists('admin', backer_pool): + log("Backer pool {} must exist before calling this".format( + backer_pool)) + action_fail("remove-cache-tier failed. Backer pool {} must exist " + "before calling this".format(backer_pool)) + + if not pool_exists('admin', cache_pool): + log("Cache pool {} must exist before calling this".format( + cache_pool)) + action_fail("remove-cache-tier failed. Cache pool {} must exist " + "before calling this".format(cache_pool)) + + pool = Pool(service='admin', name=backer_pool) + try: + pool.remove_cache_tier(cache_pool=cache_pool) + except CalledProcessError as err: + log("Removing the cache tier failed with message: {}".format( + err.message)) + action_fail("remove-cache-tier failed. Removing the cache tier failed " + "with message: {}".format(err.message)) + + +if __name__ == '__main__': + delete_cache_tier() diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index fb1bee34..826bf82a 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -163,7 +163,7 @@ def remove_cache_tier(self, cache_pool): :return: None """ # read-only is easy, writeback is much harder - mode = get_cache_mode(cache_pool) + mode = get_cache_mode(self.service, cache_pool) if mode == 'readonly': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) @@ -171,7 +171,7 @@ def remove_cache_tier(self, cache_pool): elif mode == 'writeback': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) # Flush the cache and wait for it to return - check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) + check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 4825031d..b8f21ee5 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -1,6 +1,7 @@ #!/usr/bin/python import amulet +import re import time from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment @@ -9,7 +10,7 @@ OpenStackAmuletUtils, DEBUG, # ERROR -) + ) # Use DEBUG to turn on debug logging u = OpenStackAmuletUtils(DEBUG) @@ -457,6 +458,75 @@ def test_402_pause_resume_actions(self): if 'nodown' in output or 'noout' in output: amulet.raise_status(amulet.FAIL, msg="Still has noout,nodown") + @staticmethod + def find_pool(sentry_unit, pool_name): + """ + This will do a ceph osd dump and search for pool you specify + :param sentry_unit: The unit to run this command from. + :param pool_name: str. The name of the Ceph pool to query + :return: str or None. The ceph pool or None if not found + """ + output, dump_code = sentry_unit.run("ceph osd dump") + if dump_code is not 0: + amulet.raise_status( + amulet.FAIL, + msg="ceph osd dump failed with output: {}".format( + output)) + for line in output.split('\n'): + match = re.search(r"pool\s+\d+\s+'(?P.*)'", line) + if match: + name = match.group('pool_name') + if name == pool_name: + return line + return None + + def test_403_cache_tier_actions(self): + """Verify that cache tier add/remove works""" + u.log.debug("Testing cache tiering") + + sentry_unit = self.ceph0_sentry + # Create our backer pool + output, code = sentry_unit.run("ceph osd pool create cold 128 128 ") + if code is not 0: + amulet.raise_status( + amulet.FAIL, + msg="ceph osd pool create cold failed with output: {}".format( + output)) + + # Create our cache pool + output, code = sentry_unit.run("ceph osd pool create hot 128 128 ") + if code is not 0: + amulet.raise_status( + amulet.FAIL, + msg="ceph osd pool create hot failed with output: {}".format( + output)) + + action_id = u.run_action(sentry_unit, + 'create-cache-tier', + params={ + 'backer-pool': 'cold', + 'cache-pool': 'hot', + 'cache-mode': 'writeback'}) + assert u.wait_on_action(action_id), \ + "Create cache tier action failed." + + pool_line = self.find_pool( + sentry_unit=sentry_unit, + pool_name='hot') + + assert "cache_mode writeback" in pool_line, \ + "cache_mode writeback not found in cache pool" + remove_action_id = u.run_action(sentry_unit, + 'remove-cache-tier', + params={ + 'backer-pool': 'cold', + 'cache-pool': 'hot'}) + assert u.wait_on_action(remove_action_id), \ + "Remove cache tier action failed" + pool_line = self.find_pool(sentry_unit=sentry_unit, pool_name='hot') + assert "cache_mode" not in pool_line, \ + "cache_mode is still enabled on cache pool" + def test_410_ceph_cinder_vol_create(self): """Create and confirm a ceph-backed cinder volume, and inspect ceph cinder pool object count as the volume is created @@ -592,5 +662,5 @@ def test_499_ceph_cmds_exit_zero(self): if ret: amulet.raise_status(amulet.FAIL, msg=ret) - # FYI: No restart check as ceph services do not restart - # when charm config changes, unless monitor count increases. + # FYI: No restart check as ceph services do not restart + # when charm config changes, unless monitor count increases. diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index 2591a9b1..a967b4f8 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -781,16 +781,20 @@ def get_uuid_epoch_stamp(self): return '[{}-{}]'.format(uuid.uuid4(), time.time()) # amulet juju action helpers: - def run_action(self, unit_sentry, action, + def run_action(self, unit_sentry, action, params=None, _check_output=subprocess.check_output): """Run the named action on a given unit sentry. + params a dict of parameters to use _check_output parameter is used for dependency injection. @return action_id. """ unit_id = unit_sentry.info["unit_name"] command = ["juju", "action", "do", "--format=json", unit_id, action] + if params is not None: + for key, value in params.iteritems(): + command.append("{}={}".format(key, value)) self.log.info("Running command: %s\n" % " ".join(command)) output = _check_output(command, universal_newlines=True) data = json.loads(output) diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index d0c290fe..838990c1 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -19,7 +19,7 @@ deps = -r{toxinidir}/requirements.txt basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} hooks unit_tests tests +commands = flake8 {posargs} actions hooks unit_tests tests charm proof [testenv:venv] From 2684327e88044fbe8d3f54bd88a451d0b793f140 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Sat, 5 Mar 2016 19:12:53 -0800 Subject: [PATCH 1057/2699] Add support for cache tier management This change will add two new actions to the ceph-mon charm. These actions will allow the user to create and remove cache tiers from existing pools. Both writeback and read only mode are supported. Limitations of this patch include not having fine grain control over the cache tier properties. Things like hit_set_count, bloom filter control or cache sizing are not supported yet. Change-Id: I5a37e79d0d23d35295a8ae97177c940af66b0485 --- ceph-mon/.gitignore | 1 + ceph-mon/Makefile | 2 +- ceph-mon/actions.yaml | 37 +++++++++ ceph-mon/actions/__init__.py | 1 + ceph-mon/actions/create-cache-tier | 1 + ceph-mon/actions/create-cache-tier.py | 41 ++++++++++ ceph-mon/actions/remove-cache-tier | 1 + ceph-mon/actions/remove-cache-tier.py | 41 ++++++++++ .../contrib/storage/linux/ceph.py | 4 +- ceph-mon/tests/basic_deployment.py | 76 ++++++++++++++++++- .../charmhelpers/contrib/amulet/utils.py | 6 +- ceph-mon/tox.ini | 2 +- 12 files changed, 205 insertions(+), 8 deletions(-) create mode 100644 ceph-mon/actions/__init__.py create mode 120000 ceph-mon/actions/create-cache-tier create mode 100755 ceph-mon/actions/create-cache-tier.py create mode 120000 ceph-mon/actions/remove-cache-tier create mode 100755 ceph-mon/actions/remove-cache-tier.py diff --git a/ceph-mon/.gitignore b/ceph-mon/.gitignore index f5295367..78324922 100644 --- a/ceph-mon/.gitignore +++ b/ceph-mon/.gitignore @@ -4,3 +4,4 @@ bin .tox *.sw[nop] *.pyc +.idea diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index 42b6f4a7..ef306df2 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -3,7 +3,7 @@ PYTHON := /usr/bin/env python lint: @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \ - hooks tests unit_tests + actions hooks tests unit_tests @charm proof test: diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 9cb421a7..a93054bb 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -2,3 +2,40 @@ pause-health: description: Pause ceph health operations across the entire ceph cluster resume-health: description: Resume ceph health operations across the entire ceph cluster +create-cache-tier: + description: Create a new cache tier + params: + backer-pool: + type: string + description: | + The name of the pool that will back the cache tier. Also known as + the cold pool + cache-pool: + type: string + description: | + The name of the pool that will be the cache pool. Also known + as the hot pool + cache-mode: + type: string + default: writeback + enum: [writeback, readonly] + description: | + The mode of the caching tier. Please refer to the Ceph docs for more + information + required: [backer-pool, cache-pool] + additionalProperties: false +remove-cache-tier: + description: Remove an existing cache tier + params: + backer-pool: + type: string + description: | + The name of the pool that backs the cache tier. Also known as + the cold pool + cache-pool: + type: string + description: | + The name of the pool that is the cache pool. Also known + as the hot pool + required: [backer-pool, cache-pool] + additionalProperties: false diff --git a/ceph-mon/actions/__init__.py b/ceph-mon/actions/__init__.py new file mode 100644 index 00000000..9847ec9e --- /dev/null +++ b/ceph-mon/actions/__init__.py @@ -0,0 +1 @@ +__author__ = 'chris' diff --git a/ceph-mon/actions/create-cache-tier b/ceph-mon/actions/create-cache-tier new file mode 120000 index 00000000..2a7e4346 --- /dev/null +++ b/ceph-mon/actions/create-cache-tier @@ -0,0 +1 @@ +create-cache-tier.py \ No newline at end of file diff --git a/ceph-mon/actions/create-cache-tier.py b/ceph-mon/actions/create-cache-tier.py new file mode 100755 index 00000000..e8170cf2 --- /dev/null +++ b/ceph-mon/actions/create-cache-tier.py @@ -0,0 +1,41 @@ +#!/usr/bin/python +__author__ = 'chris' +from subprocess import CalledProcessError +import sys + +sys.path.append('hooks') + +from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists +from charmhelpers.core.hookenv import action_get, log, action_fail + + +def make_cache_tier(): + backer_pool = action_get("backer-pool") + cache_pool = action_get("cache-pool") + cache_mode = action_get("cache-mode") + + # Pre flight checks + if not pool_exists('admin', backer_pool): + log("Please create {} pool before calling create-cache-tier".format( + backer_pool)) + action_fail("create-cache-tier failed. Backer pool {} must exist " + "before calling this".format(backer_pool)) + + if not pool_exists('admin', cache_pool): + log("Please create {} pool before calling create-cache-tier".format( + cache_pool)) + action_fail("create-cache-tier failed. Cache pool {} must exist " + "before calling this".format(cache_pool)) + + pool = Pool(service='admin', name=backer_pool) + try: + pool.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) + except CalledProcessError as err: + log("Add cache tier failed with message: {}".format( + err.message)) + action_fail("create-cache-tier failed. Add cache tier failed with " + "message: {}".format(err.message)) + + +if __name__ == '__main__': + make_cache_tier() diff --git a/ceph-mon/actions/remove-cache-tier b/ceph-mon/actions/remove-cache-tier new file mode 120000 index 00000000..136c0f06 --- /dev/null +++ b/ceph-mon/actions/remove-cache-tier @@ -0,0 +1 @@ +remove-cache-tier.py \ No newline at end of file diff --git a/ceph-mon/actions/remove-cache-tier.py b/ceph-mon/actions/remove-cache-tier.py new file mode 100755 index 00000000..79db9cf7 --- /dev/null +++ b/ceph-mon/actions/remove-cache-tier.py @@ -0,0 +1,41 @@ +#!/usr/bin/python +from subprocess import CalledProcessError +import sys + +sys.path.append('hooks') + +from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists +from charmhelpers.core.hookenv import action_get, log, action_fail + +__author__ = 'chris' + + +def delete_cache_tier(): + backer_pool = action_get("backer-pool") + cache_pool = action_get("cache-pool") + + # Pre flight checks + if not pool_exists('admin', backer_pool): + log("Backer pool {} must exist before calling this".format( + backer_pool)) + action_fail("remove-cache-tier failed. Backer pool {} must exist " + "before calling this".format(backer_pool)) + + if not pool_exists('admin', cache_pool): + log("Cache pool {} must exist before calling this".format( + cache_pool)) + action_fail("remove-cache-tier failed. Cache pool {} must exist " + "before calling this".format(cache_pool)) + + pool = Pool(service='admin', name=backer_pool) + try: + pool.remove_cache_tier(cache_pool=cache_pool) + except CalledProcessError as err: + log("Removing the cache tier failed with message: {}".format( + err.message)) + action_fail("remove-cache-tier failed. Removing the cache tier failed " + "with message: {}".format(err.message)) + + +if __name__ == '__main__': + delete_cache_tier() diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index fb1bee34..826bf82a 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -163,7 +163,7 @@ def remove_cache_tier(self, cache_pool): :return: None """ # read-only is easy, writeback is much harder - mode = get_cache_mode(cache_pool) + mode = get_cache_mode(self.service, cache_pool) if mode == 'readonly': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) @@ -171,7 +171,7 @@ def remove_cache_tier(self, cache_pool): elif mode == 'writeback': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) # Flush the cache and wait for it to return - check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) + check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 4825031d..b8f21ee5 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -1,6 +1,7 @@ #!/usr/bin/python import amulet +import re import time from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment @@ -9,7 +10,7 @@ OpenStackAmuletUtils, DEBUG, # ERROR -) + ) # Use DEBUG to turn on debug logging u = OpenStackAmuletUtils(DEBUG) @@ -457,6 +458,75 @@ def test_402_pause_resume_actions(self): if 'nodown' in output or 'noout' in output: amulet.raise_status(amulet.FAIL, msg="Still has noout,nodown") + @staticmethod + def find_pool(sentry_unit, pool_name): + """ + This will do a ceph osd dump and search for pool you specify + :param sentry_unit: The unit to run this command from. + :param pool_name: str. The name of the Ceph pool to query + :return: str or None. The ceph pool or None if not found + """ + output, dump_code = sentry_unit.run("ceph osd dump") + if dump_code is not 0: + amulet.raise_status( + amulet.FAIL, + msg="ceph osd dump failed with output: {}".format( + output)) + for line in output.split('\n'): + match = re.search(r"pool\s+\d+\s+'(?P.*)'", line) + if match: + name = match.group('pool_name') + if name == pool_name: + return line + return None + + def test_403_cache_tier_actions(self): + """Verify that cache tier add/remove works""" + u.log.debug("Testing cache tiering") + + sentry_unit = self.ceph0_sentry + # Create our backer pool + output, code = sentry_unit.run("ceph osd pool create cold 128 128 ") + if code is not 0: + amulet.raise_status( + amulet.FAIL, + msg="ceph osd pool create cold failed with output: {}".format( + output)) + + # Create our cache pool + output, code = sentry_unit.run("ceph osd pool create hot 128 128 ") + if code is not 0: + amulet.raise_status( + amulet.FAIL, + msg="ceph osd pool create hot failed with output: {}".format( + output)) + + action_id = u.run_action(sentry_unit, + 'create-cache-tier', + params={ + 'backer-pool': 'cold', + 'cache-pool': 'hot', + 'cache-mode': 'writeback'}) + assert u.wait_on_action(action_id), \ + "Create cache tier action failed." + + pool_line = self.find_pool( + sentry_unit=sentry_unit, + pool_name='hot') + + assert "cache_mode writeback" in pool_line, \ + "cache_mode writeback not found in cache pool" + remove_action_id = u.run_action(sentry_unit, + 'remove-cache-tier', + params={ + 'backer-pool': 'cold', + 'cache-pool': 'hot'}) + assert u.wait_on_action(remove_action_id), \ + "Remove cache tier action failed" + pool_line = self.find_pool(sentry_unit=sentry_unit, pool_name='hot') + assert "cache_mode" not in pool_line, \ + "cache_mode is still enabled on cache pool" + def test_410_ceph_cinder_vol_create(self): """Create and confirm a ceph-backed cinder volume, and inspect ceph cinder pool object count as the volume is created @@ -592,5 +662,5 @@ def test_499_ceph_cmds_exit_zero(self): if ret: amulet.raise_status(amulet.FAIL, msg=ret) - # FYI: No restart check as ceph services do not restart - # when charm config changes, unless monitor count increases. + # FYI: No restart check as ceph services do not restart + # when charm config changes, unless monitor count increases. diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index 2591a9b1..a967b4f8 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -781,16 +781,20 @@ def get_uuid_epoch_stamp(self): return '[{}-{}]'.format(uuid.uuid4(), time.time()) # amulet juju action helpers: - def run_action(self, unit_sentry, action, + def run_action(self, unit_sentry, action, params=None, _check_output=subprocess.check_output): """Run the named action on a given unit sentry. + params a dict of parameters to use _check_output parameter is used for dependency injection. @return action_id. """ unit_id = unit_sentry.info["unit_name"] command = ["juju", "action", "do", "--format=json", unit_id, action] + if params is not None: + for key, value in params.iteritems(): + command.append("{}={}".format(key, value)) self.log.info("Running command: %s\n" % " ".join(command)) output = _check_output(command, universal_newlines=True) data = json.loads(output) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index d0c290fe..838990c1 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -19,7 +19,7 @@ deps = -r{toxinidir}/requirements.txt basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} hooks unit_tests tests +commands = flake8 {posargs} actions hooks unit_tests tests charm proof [testenv:venv] From 4c1dc289e954a146d8530b8421c839becfec8a68 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Tue, 1 Mar 2016 11:03:18 -0800 Subject: [PATCH 1058/2699] Rolling upgrades for Ceph Monitor Cluster This change adds functionality to allow the ceph monitor cluster to upgrade in a serial rolled fashion. This will use the ceph monitor cluster itself as a locking mechanism and only allows 1 ceph monitor at a time to upgrade. If a monitor has been waiting on the previous server for more than 10 minutes and hasn't seen it finish it will assume it died during the upgrade and proceed with its own upgrade. Limitations of this patch: As long as the monitor cluster does not split brain this should work fine. Also this assumes that NTP among the ceph cluster is fairly accurate. Change-Id: I7254261b6206f0ec34a8aa1e94e7b06ae308d8f8 --- ceph-proxy/.gitignore | 1 + ceph-proxy/hooks/ceph.py | 81 +++++-- ceph-proxy/hooks/ceph_hooks.py | 221 +++++++++++++++++- .../contrib/storage/linux/ceph.py | 131 ++++++++++- ceph-proxy/unit_tests/test_upgrade_roll.py | 136 +++++++++++ 5 files changed, 538 insertions(+), 32 deletions(-) create mode 100644 ceph-proxy/unit_tests/test_upgrade_roll.py diff --git a/ceph-proxy/.gitignore b/ceph-proxy/.gitignore index f5295367..9ed8f1c5 100644 --- a/ceph-proxy/.gitignore +++ b/ceph-proxy/.gitignore @@ -3,4 +3,5 @@ bin .testrepository .tox *.sw[nop] +.idea *.pyc diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 5523b08a..01a2a569 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -1,4 +1,3 @@ - # # Copyright 2012 Canonical Ltd. # @@ -6,35 +5,32 @@ # James Page # Paul Collins # - import json import subprocess import time import os import re import sys + +from charmhelpers.contrib.storage.linux.utils import ( + is_block_device, + zap_disk, + is_device_mounted) from charmhelpers.core.host import ( mkdir, chownr, service_restart, - cmp_pkgrevno, - lsb_release -) + lsb_release, + cmp_pkgrevno) from charmhelpers.core.hookenv import ( log, ERROR, - WARNING, cached, status_set, -) + WARNING) from charmhelpers.fetch import ( apt_cache ) -from charmhelpers.contrib.storage.linux.utils import ( - zap_disk, - is_block_device, - is_device_mounted, -) from utils import ( get_unit_hostname, ) @@ -53,8 +49,32 @@ def ceph_user(): return "root" +def get_local_mon_ids(): + """ + This will list the /var/lib/ceph/mon/* directories and try + to split the ID off of the directory name and return it in + a list + + :return: list. A list of monitor identifiers :raise: OSError if + something goes wrong with listing the directory. + """ + mon_ids = [] + mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') + if os.path.exists(mon_path): + try: + dirs = os.listdir(mon_path) + for mon_dir in dirs: + # Basically this takes everything after ceph- as the monitor ID + match = re.search('ceph-(?P.*)', mon_dir) + if match: + mon_ids.append(match.group('mon_id')) + except OSError: + raise + return mon_ids + + def get_version(): - '''Derive Ceph release from an installed package.''' + """Derive Ceph release from an installed package.""" import apt_pkg as apt cache = apt_cache() @@ -63,7 +83,7 @@ def get_version(): pkg = cache[package] except: # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation '\ + e = 'Could not determine version of package with no installation ' \ 'candidate: %s' % package error_out(e) @@ -165,6 +185,7 @@ def add_bootstrap_hint(peer): # Ignore any errors for this call subprocess.call(cmd) + DISK_FORMATS = [ 'xfs', 'ext4', @@ -178,7 +199,7 @@ def is_osd_disk(dev): info = info.split("\n") # IGNORE:E1103 for line in info: if line.startswith( - 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' + 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' ): return True except subprocess.CalledProcessError: @@ -213,7 +234,7 @@ def is_bootstrapped(): def wait_for_bootstrap(): - while (not is_bootstrapped()): + while not is_bootstrapped(): time.sleep(3) @@ -243,7 +264,6 @@ def generate_monitor_secret(): return "{}==".format(res.split('=')[1].strip()) - # OSD caps taken from ceph-create-keys _osd_bootstrap_caps = { 'mon': [ @@ -310,6 +330,9 @@ def import_radosgw_key(key): 'mon': ['allow rw'], 'osd': ['allow rwx'] } +_upgrade_caps = { + 'mon': ['allow rwx'] +} def get_radosgw_key(): @@ -321,6 +344,26 @@ def get_radosgw_key(): 'osd': ['allow rwx'] } +admin_caps = { + 'mds': ['allow'], + 'mon': ['allow *'], + 'osd': ['allow *'] +} + +osd_upgrade_caps = { + 'mon': ['allow command "config-key"', + 'allow command "osd tree"', + 'allow command "config-key list"', + 'allow command "config-key put"', + 'allow command "config-key get"', + 'allow command "config-key exists"', + ] +} + + +def get_upgrade_key(): + return get_named_key('upgrade-osd', _upgrade_caps) + def get_named_key(name, caps=None): caps = caps or _default_caps @@ -346,7 +389,7 @@ def get_named_key(name, caps=None): def upgrade_key_caps(key, caps): - ''' Upgrade key to have capabilities caps ''' + """ Upgrade key to have capabilities caps """ if not is_leader(): # Not the MON leader OR not clustered return @@ -440,7 +483,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, log('Path {} is not a block device - bailing'.format(dev)) return - if (is_osd_disk(dev) and not reformat_osd): + if is_osd_disk(dev) and not reformat_osd: log('Looks like {} is already an OSD, skipping.'.format(dev)) return diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index cdf4c5b2..354c155c 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -10,10 +10,17 @@ import glob import os +import random import shutil +import socket +import subprocess import sys import uuid +import time + import ceph +from charmhelpers.core import host +from charmhelpers.core import hookenv from charmhelpers.core.hookenv import ( log, DEBUG, @@ -29,15 +36,14 @@ service_name, relations_of_type, status_set, - local_unit -) + local_unit) from charmhelpers.core.host import ( service_restart, mkdir, write_file, rsync, - cmp_pkgrevno -) + cmp_pkgrevno, + service_stop, service_start) from charmhelpers.fetch import ( apt_install, apt_update, @@ -52,7 +58,11 @@ ) from charmhelpers.core.sysctl import create as create_sysctl from charmhelpers.core.templating import render - +from charmhelpers.contrib.storage.linux.ceph import ( + monitor_key_set, + monitor_key_exists, + monitor_key_get, + get_mon_map) from utils import ( get_networks, get_public_addr, @@ -61,7 +71,6 @@ from ceph_broker import ( process_requests ) - from charmhelpers.contrib.charmsupport import nrpe hooks = Hooks() @@ -71,6 +80,186 @@ STATUS_FILE = '/var/lib/nagios/cat-ceph-status.txt' STATUS_CRONFILE = '/etc/cron.d/cat-ceph-health' +# A dict of valid ceph upgrade paths. Mapping is old -> new +upgrade_paths = { + 'cloud:trusty-juno': 'cloud:trusty-kilo', + 'cloud:trusty-kilo': 'cloud:trusty-liberty', + 'cloud:trusty-liberty': 'cloud:trusty-mitaka', +} + + +def pretty_print_upgrade_paths(): + lines = [] + for key, value in upgrade_paths.iteritems(): + lines.append("{} -> {}".format(key, value)) + return lines + + +def check_for_upgrade(): + release_info = host.lsb_release() + if not release_info['DISTRIB_CODENAME'] == 'trusty': + log("Invalid upgrade path from {}. Only trusty is currently " + "supported".format(release_info['DISTRIB_CODENAME'])) + return + + c = hookenv.config() + old_version = c.previous('source') + log('old_version: {}'.format(old_version)) + # Strip all whitespace + new_version = hookenv.config('source') + if new_version: + # replace all whitespace + new_version = new_version.replace(' ', '') + log('new_version: {}'.format(new_version)) + + if old_version in upgrade_paths: + if new_version == upgrade_paths[old_version]: + log("{} to {} is a valid upgrade path. Proceeding.".format( + old_version, new_version)) + roll_monitor_cluster(new_version) + else: + # Log a helpful error message + log("Invalid upgrade path from {} to {}. " + "Valid paths are: {}".format(old_version, + new_version, + pretty_print_upgrade_paths())) + + +def lock_and_roll(my_name): + start_timestamp = time.time() + + log('monitor_key_set {}_start {}'.format(my_name, start_timestamp)) + monitor_key_set('admin', "{}_start".format(my_name), start_timestamp) + log("Rolling") + # This should be quick + upgrade_monitor() + log("Done") + + stop_timestamp = time.time() + # Set a key to inform others I am finished + log('monitor_key_set {}_done {}'.format(my_name, stop_timestamp)) + monitor_key_set('admin', "{}_done".format(my_name), stop_timestamp) + + +def wait_on_previous_node(previous_node): + log("Previous node is: {}".format(previous_node)) + + previous_node_finished = monitor_key_exists( + 'admin', + "{}_done".format(previous_node)) + + while previous_node_finished is False: + log("{} is not finished. Waiting".format(previous_node)) + # Has this node been trying to upgrade for longer than + # 10 minutes? + # If so then move on and consider that node dead. + + # NOTE: This assumes the clusters clocks are somewhat accurate + # If the hosts clock is really far off it may cause it to skip + # the previous node even though it shouldn't. + current_timestamp = time.time() + previous_node_start_time = monitor_key_get( + 'admin', + "{}_start".format(previous_node)) + if (current_timestamp - (10 * 60)) > previous_node_start_time: + # Previous node is probably dead. Lets move on + if previous_node_start_time is not None: + log( + "Waited 10 mins on node {}. current time: {} > " + "previous node start time: {} Moving on".format( + previous_node, + (current_timestamp - (10 * 60)), + previous_node_start_time)) + return + else: + # I have to wait. Sleep a random amount of time and then + # check if I can lock,upgrade and roll. + wait_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + previous_node_finished = monitor_key_exists( + 'admin', + "{}_done".format(previous_node)) + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +def roll_monitor_cluster(new_version): + """ + This is tricky to get right so here's what we're going to do. + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous monitor is upgraded yet. + """ + log('roll_monitor_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + monitor_list = [] + mon_map = get_mon_map('admin') + if mon_map['monmap']['mons']: + for mon in mon_map['monmap']['mons']: + monitor_list.append(mon['name']) + else: + status_set('blocked', 'Unable to get monitor cluster information') + sys.exit(1) + log('monitor_list: {}'.format(monitor_list)) + + # A sorted list of osd unit names + mon_sorted_list = sorted(monitor_list) + + try: + position = mon_sorted_list.index(my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(my_name=my_name) + else: + # Check if the previous node has finished + status_set('blocked', + 'Waiting on {} to finish upgrading'.format( + mon_sorted_list[position - 1])) + wait_on_previous_node(previous_node=mon_sorted_list[position - 1]) + lock_and_roll(my_name=my_name) + except ValueError: + log("Failed to find {} in list {}.".format( + my_name, mon_sorted_list)) + status_set('blocked', 'failed to upgrade monitor') + + +def upgrade_monitor(): + current_version = ceph.get_version() + status_set("maintenance", "Upgrading monitor") + log("Current ceph version is {}".format(current_version)) + new_version = config('release-version') + log("Upgrading to: {}".format(new_version)) + + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph source failed with message: {}".format( + err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + try: + if ceph.systemd(): + for mon_id in ceph.get_local_mon_ids(): + service_stop('ceph-mon@{}'.format(mon_id)) + else: + service_stop('ceph-mon-all') + apt_install(packages=ceph.PACKAGES, fatal=True) + if ceph.systemd(): + for mon_id in ceph.get_local_mon_ids(): + service_start('ceph-mon@{}'.format(mon_id)) + else: + service_start('ceph-mon-all') + status_set("active", "") + except subprocess.CalledProcessError as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + def install_upstart_scripts(): # Only install upstart configurations for older versions @@ -123,6 +312,7 @@ def emit_cephconf(): install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100) + JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' @@ -131,6 +321,9 @@ def config_changed(): if config('prefer-ipv6'): assert_charm_supports_ipv6() + # Check if an upgrade was requested + check_for_upgrade() + log('Monitor hosts are ' + repr(get_mon_hosts())) sysctl_dict = config('sysctl') @@ -165,7 +358,7 @@ def config_changed(): emit_cephconf() # Support use of single node ceph - if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1): + if not ceph.is_bootstrapped() and int(config('monitor-count')) == 1: status_set('maintenance', 'Bootstrapping single Ceph MON') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() @@ -188,10 +381,10 @@ def get_mon_hosts(): def get_peer_units(): - ''' + """ Returns a dictionary of unit names from the mon peer relation with a flag indicating whether the unit has presented its address - ''' + """ units = {} units[local_unit()] = True for relid in relation_ids('mon'): @@ -206,8 +399,7 @@ def mon_relation_joined(): public_addr = get_public_addr() for relid in relation_ids('mon'): relation_set(relation_id=relid, - relation_settings={'ceph-public-address': - public_addr}) + relation_settings={'ceph-public-address': public_addr}) @hooks.hook('mon-relation-departed', @@ -250,7 +442,7 @@ def notify_client(): def upgrade_keys(): - ''' Ceph now required mon allow rw for pool creation ''' + """ Ceph now required mon allow rw for pool creation """ if len(relation_ids('radosgw')) > 0: ceph.upgrade_key_caps('client.radosgw.gateway', ceph._radosgw_caps) @@ -272,6 +464,8 @@ def osd_relation(relid=None): 'osd_bootstrap_key': ceph.get_osd_bootstrap_key(), 'auth': config('auth-supported'), 'ceph-public-address': public_addr, + 'osd_upgrade_key': ceph.get_named_key('osd-upgrade', + caps=ceph.osd_upgrade_caps), } relation_set(relation_id=relid, relation_settings=data) @@ -430,6 +624,9 @@ def assess_status(): # Unit should be running and clustered, but no quorum # TODO: should this be blocked or waiting? status_set('blocked', 'Unit not clustered (no quorum)') + # If there's a pending lock for this unit, + # can i get the lock? + # reboot the ceph-mon process if __name__ == '__main__': diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index fb1bee34..2903670c 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -24,6 +24,8 @@ # Adam Gandelman # import bisect +import errno +import hashlib import six import os @@ -163,7 +165,7 @@ def remove_cache_tier(self, cache_pool): :return: None """ # read-only is easy, writeback is much harder - mode = get_cache_mode(cache_pool) + mode = get_cache_mode(self.service, cache_pool) if mode == 'readonly': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) @@ -259,6 +261,133 @@ def create(self): Returns json formatted output""" +def get_mon_map(service): + """ + Returns the current monitor map. + :param service: six.string_types. The Ceph user name to run the command under + :return: json string. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + mon_status = check_output( + ['ceph', '--id', service, + 'mon_status', '--format=json']) + try: + return json.loads(mon_status) + except ValueError as v: + log("Unable to parse mon_status json: {}. Error: {}".format( + mon_status, v.message)) + raise + except CalledProcessError as e: + log("mon_status command failed with message: {}".format( + e.message)) + raise + +def hash_monitor_names(service): + """ + Uses the get_mon_map() function to get information about the monitor + cluster. + Hash the name of each monitor. Return a sorted list of monitor hashes + in an ascending order. + :param service: six.string_types. The Ceph user name to run the command under + :rtype : dict. json dict of monitor name, ip address and rank + example: { + 'name': 'ip-172-31-13-165', + 'rank': 0, + 'addr': '172.31.13.165:6789/0'} + """ + try: + hash_list = [] + monitor_list = get_mon_map(service=service) + if monitor_list['monmap']['mons']: + for mon in monitor_list['monmap']['mons']: + hash_list.append( + hashlib.sha224(mon['name'].encode('utf-8')).hexdigest()) + return sorted(hash_list) + else: + return None + except (ValueError, CalledProcessError): + raise + + +def monitor_key_delete(service, key): + """ + Delete a key and value pair from the monitor cluster + :param service: six.string_types. The Ceph user name to run the command under + Deletes a key value pair on the monitor cluster. + :param key: six.string_types. The key to delete. + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'del', str(key)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_set(service, key, value): + """ + Sets a key value pair on the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to set. + :param value: The value to set. This will be converted to a string + before setting + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'put', str(key), str(value)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_get(service, key): + """ + Gets the value of an existing key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for. + :return: Returns the value of that key or None if not found. + """ + try: + output = check_output( + ['ceph', '--id', service, + 'config-key', 'get', str(key)]) + return output + except CalledProcessError as e: + log("Monitor config-key get failed with message: {}".format( + e.output)) + return None + + +def monitor_key_exists(service, key): + """ + Searches for the existence of a key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for + :return: Returns True if the key exists, False if not and raises an + exception if an unknown error occurs. :raise: CalledProcessError if + an unknown error occurs + """ + try: + check_call( + ['ceph', '--id', service, + 'config-key', 'exists', str(key)]) + # I can return true here regardless because Ceph returns + # ENOENT if the key wasn't found + return True + except CalledProcessError as e: + if e.returncode == errno.ENOENT: + return False + else: + log("Unknown error from ceph config-get exists: {} {}".format( + e.returncode, e.output)) + raise + + def get_erasure_profile(service, name): """ :param service: six.string_types. The Ceph user name to run the command under diff --git a/ceph-proxy/unit_tests/test_upgrade_roll.py b/ceph-proxy/unit_tests/test_upgrade_roll.py new file mode 100644 index 00000000..8af24ac5 --- /dev/null +++ b/ceph-proxy/unit_tests/test_upgrade_roll.py @@ -0,0 +1,136 @@ +__author__ = 'chris' +import time + +from mock import patch, call, MagicMock +import sys + +sys.path.append('/home/chris/repos/ceph-mon/hooks') + +import test_utils +import ceph_hooks + +TO_PATCH = [ + 'hookenv', + 'status_set', + 'config', + 'ceph', + 'log', + 'add_source', + 'apt_update', + 'apt_install', + 'service_stop', + 'service_start', + 'host', +] + + +def config_side_effect(*args): + if args[0] == 'source': + return 'cloud:trusty-kilo' + elif args[0] == 'key': + return 'key' + elif args[0] == 'release-version': + return 'cloud:trusty-kilo' + + +previous_node_start_time = time.time() - (9 * 60) + + +def monitor_key_side_effect(*args): + if args[1] == \ + 'ip-192-168-1-2_done': + return False + elif args[1] == \ + 'ip-192-168-1-2_start': + # Return that the previous node started 9 minutes ago + return previous_node_start_time + + +class UpgradeRollingTestCase(test_utils.CharmTestCase): + def setUp(self): + super(UpgradeRollingTestCase, self).setUp(ceph_hooks, TO_PATCH) + + @patch('ceph_hooks.roll_monitor_cluster') + def test_check_for_upgrade(self, roll_monitor_cluster): + self.host.lsb_release.return_value = { + 'DISTRIB_CODENAME': 'trusty', + } + previous_mock = MagicMock().return_value + previous_mock.previous.return_value = "cloud:trusty-juno" + self.hookenv.config.side_effect = [previous_mock, + config_side_effect('source')] + ceph_hooks.check_for_upgrade() + + roll_monitor_cluster.assert_called_with('cloud:trusty-kilo') + + @patch('ceph_hooks.upgrade_monitor') + @patch('ceph_hooks.monitor_key_set') + def test_lock_and_roll(self, monitor_key_set, upgrade_monitor): + monitor_key_set.monitor_key_set.return_value = None + ceph_hooks.lock_and_roll(my_name='ip-192-168-1-2') + upgrade_monitor.assert_called_once_with() + + def test_upgrade_monitor(self): + self.config.side_effect = config_side_effect + self.ceph.get_version.return_value = "0.80" + self.ceph.systemd.return_value = False + ceph_hooks.upgrade_monitor() + self.service_stop.assert_called_with('ceph-mon-all') + self.service_start.assert_called_with('ceph-mon-all') + self.status_set.assert_has_calls([ + call('maintenance', 'Upgrading monitor'), + call('active', '') + ]) + + @patch('ceph_hooks.lock_and_roll') + @patch('ceph_hooks.wait_on_previous_node') + @patch('ceph_hooks.get_mon_map') + @patch('ceph_hooks.socket') + def test_roll_monitor_cluster_second(self, + socket, + get_mon_map, + wait_on_previous_node, + lock_and_roll): + wait_on_previous_node.return_value = None + socket.gethostname.return_value = "ip-192-168-1-3" + get_mon_map.return_value = { + 'monmap': { + 'mons': [ + { + 'name': 'ip-192-168-1-2', + }, + { + 'name': 'ip-192-168-1-3', + }, + ] + } + } + ceph_hooks.roll_monitor_cluster('0.94.1') + self.status_set.assert_called_with( + 'blocked', + 'Waiting on ip-192-168-1-2 to finish upgrading') + lock_and_roll.assert_called_with(my_name="ip-192-168-1-3") + + @patch('ceph_hooks.monitor_key_get') + @patch('ceph_hooks.monitor_key_exists') + def test_wait_on_previous_node(self, + monitor_key_exists, + monitor_key_get): + monitor_key_get.side_effect = monitor_key_side_effect + monitor_key_exists.return_value = False + + ceph_hooks.wait_on_previous_node("ip-192-168-1-2") + + # Make sure we checked to see if the previous node started + monitor_key_get.assert_has_calls( + [call('admin', 'ip-192-168-1-2_start')] + ) + # Make sure we checked to see if the previous node was finished + monitor_key_exists.assert_has_calls( + [call('admin', 'ip-192-168-1-2_done')] + ) + # Make sure we waited at last once before proceeding + self.log.assert_has_calls( + [call('Previous node is: ip-192-168-1-2')], + [call('ip-192-168-1-2 is not finished. Waiting')], + ) From f28112c73be7cda821aeed8d593f3d9decc21276 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Tue, 1 Mar 2016 11:03:18 -0800 Subject: [PATCH 1059/2699] Rolling upgrades for Ceph Monitor Cluster This change adds functionality to allow the ceph monitor cluster to upgrade in a serial rolled fashion. This will use the ceph monitor cluster itself as a locking mechanism and only allows 1 ceph monitor at a time to upgrade. If a monitor has been waiting on the previous server for more than 10 minutes and hasn't seen it finish it will assume it died during the upgrade and proceed with its own upgrade. Limitations of this patch: As long as the monitor cluster does not split brain this should work fine. Also this assumes that NTP among the ceph cluster is fairly accurate. Change-Id: I7254261b6206f0ec34a8aa1e94e7b06ae308d8f8 --- ceph-mon/.gitignore | 1 + ceph-mon/hooks/ceph.py | 81 +++++-- ceph-mon/hooks/ceph_hooks.py | 221 +++++++++++++++++- .../contrib/storage/linux/ceph.py | 131 ++++++++++- ceph-mon/unit_tests/test_upgrade_roll.py | 136 +++++++++++ 5 files changed, 538 insertions(+), 32 deletions(-) create mode 100644 ceph-mon/unit_tests/test_upgrade_roll.py diff --git a/ceph-mon/.gitignore b/ceph-mon/.gitignore index f5295367..9ed8f1c5 100644 --- a/ceph-mon/.gitignore +++ b/ceph-mon/.gitignore @@ -3,4 +3,5 @@ bin .testrepository .tox *.sw[nop] +.idea *.pyc diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 5523b08a..01a2a569 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -1,4 +1,3 @@ - # # Copyright 2012 Canonical Ltd. # @@ -6,35 +5,32 @@ # James Page # Paul Collins # - import json import subprocess import time import os import re import sys + +from charmhelpers.contrib.storage.linux.utils import ( + is_block_device, + zap_disk, + is_device_mounted) from charmhelpers.core.host import ( mkdir, chownr, service_restart, - cmp_pkgrevno, - lsb_release -) + lsb_release, + cmp_pkgrevno) from charmhelpers.core.hookenv import ( log, ERROR, - WARNING, cached, status_set, -) + WARNING) from charmhelpers.fetch import ( apt_cache ) -from charmhelpers.contrib.storage.linux.utils import ( - zap_disk, - is_block_device, - is_device_mounted, -) from utils import ( get_unit_hostname, ) @@ -53,8 +49,32 @@ def ceph_user(): return "root" +def get_local_mon_ids(): + """ + This will list the /var/lib/ceph/mon/* directories and try + to split the ID off of the directory name and return it in + a list + + :return: list. A list of monitor identifiers :raise: OSError if + something goes wrong with listing the directory. + """ + mon_ids = [] + mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') + if os.path.exists(mon_path): + try: + dirs = os.listdir(mon_path) + for mon_dir in dirs: + # Basically this takes everything after ceph- as the monitor ID + match = re.search('ceph-(?P.*)', mon_dir) + if match: + mon_ids.append(match.group('mon_id')) + except OSError: + raise + return mon_ids + + def get_version(): - '''Derive Ceph release from an installed package.''' + """Derive Ceph release from an installed package.""" import apt_pkg as apt cache = apt_cache() @@ -63,7 +83,7 @@ def get_version(): pkg = cache[package] except: # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation '\ + e = 'Could not determine version of package with no installation ' \ 'candidate: %s' % package error_out(e) @@ -165,6 +185,7 @@ def add_bootstrap_hint(peer): # Ignore any errors for this call subprocess.call(cmd) + DISK_FORMATS = [ 'xfs', 'ext4', @@ -178,7 +199,7 @@ def is_osd_disk(dev): info = info.split("\n") # IGNORE:E1103 for line in info: if line.startswith( - 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' + 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' ): return True except subprocess.CalledProcessError: @@ -213,7 +234,7 @@ def is_bootstrapped(): def wait_for_bootstrap(): - while (not is_bootstrapped()): + while not is_bootstrapped(): time.sleep(3) @@ -243,7 +264,6 @@ def generate_monitor_secret(): return "{}==".format(res.split('=')[1].strip()) - # OSD caps taken from ceph-create-keys _osd_bootstrap_caps = { 'mon': [ @@ -310,6 +330,9 @@ def import_radosgw_key(key): 'mon': ['allow rw'], 'osd': ['allow rwx'] } +_upgrade_caps = { + 'mon': ['allow rwx'] +} def get_radosgw_key(): @@ -321,6 +344,26 @@ def get_radosgw_key(): 'osd': ['allow rwx'] } +admin_caps = { + 'mds': ['allow'], + 'mon': ['allow *'], + 'osd': ['allow *'] +} + +osd_upgrade_caps = { + 'mon': ['allow command "config-key"', + 'allow command "osd tree"', + 'allow command "config-key list"', + 'allow command "config-key put"', + 'allow command "config-key get"', + 'allow command "config-key exists"', + ] +} + + +def get_upgrade_key(): + return get_named_key('upgrade-osd', _upgrade_caps) + def get_named_key(name, caps=None): caps = caps or _default_caps @@ -346,7 +389,7 @@ def get_named_key(name, caps=None): def upgrade_key_caps(key, caps): - ''' Upgrade key to have capabilities caps ''' + """ Upgrade key to have capabilities caps """ if not is_leader(): # Not the MON leader OR not clustered return @@ -440,7 +483,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, log('Path {} is not a block device - bailing'.format(dev)) return - if (is_osd_disk(dev) and not reformat_osd): + if is_osd_disk(dev) and not reformat_osd: log('Looks like {} is already an OSD, skipping.'.format(dev)) return diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index cdf4c5b2..354c155c 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -10,10 +10,17 @@ import glob import os +import random import shutil +import socket +import subprocess import sys import uuid +import time + import ceph +from charmhelpers.core import host +from charmhelpers.core import hookenv from charmhelpers.core.hookenv import ( log, DEBUG, @@ -29,15 +36,14 @@ service_name, relations_of_type, status_set, - local_unit -) + local_unit) from charmhelpers.core.host import ( service_restart, mkdir, write_file, rsync, - cmp_pkgrevno -) + cmp_pkgrevno, + service_stop, service_start) from charmhelpers.fetch import ( apt_install, apt_update, @@ -52,7 +58,11 @@ ) from charmhelpers.core.sysctl import create as create_sysctl from charmhelpers.core.templating import render - +from charmhelpers.contrib.storage.linux.ceph import ( + monitor_key_set, + monitor_key_exists, + monitor_key_get, + get_mon_map) from utils import ( get_networks, get_public_addr, @@ -61,7 +71,6 @@ from ceph_broker import ( process_requests ) - from charmhelpers.contrib.charmsupport import nrpe hooks = Hooks() @@ -71,6 +80,186 @@ STATUS_FILE = '/var/lib/nagios/cat-ceph-status.txt' STATUS_CRONFILE = '/etc/cron.d/cat-ceph-health' +# A dict of valid ceph upgrade paths. Mapping is old -> new +upgrade_paths = { + 'cloud:trusty-juno': 'cloud:trusty-kilo', + 'cloud:trusty-kilo': 'cloud:trusty-liberty', + 'cloud:trusty-liberty': 'cloud:trusty-mitaka', +} + + +def pretty_print_upgrade_paths(): + lines = [] + for key, value in upgrade_paths.iteritems(): + lines.append("{} -> {}".format(key, value)) + return lines + + +def check_for_upgrade(): + release_info = host.lsb_release() + if not release_info['DISTRIB_CODENAME'] == 'trusty': + log("Invalid upgrade path from {}. Only trusty is currently " + "supported".format(release_info['DISTRIB_CODENAME'])) + return + + c = hookenv.config() + old_version = c.previous('source') + log('old_version: {}'.format(old_version)) + # Strip all whitespace + new_version = hookenv.config('source') + if new_version: + # replace all whitespace + new_version = new_version.replace(' ', '') + log('new_version: {}'.format(new_version)) + + if old_version in upgrade_paths: + if new_version == upgrade_paths[old_version]: + log("{} to {} is a valid upgrade path. Proceeding.".format( + old_version, new_version)) + roll_monitor_cluster(new_version) + else: + # Log a helpful error message + log("Invalid upgrade path from {} to {}. " + "Valid paths are: {}".format(old_version, + new_version, + pretty_print_upgrade_paths())) + + +def lock_and_roll(my_name): + start_timestamp = time.time() + + log('monitor_key_set {}_start {}'.format(my_name, start_timestamp)) + monitor_key_set('admin', "{}_start".format(my_name), start_timestamp) + log("Rolling") + # This should be quick + upgrade_monitor() + log("Done") + + stop_timestamp = time.time() + # Set a key to inform others I am finished + log('monitor_key_set {}_done {}'.format(my_name, stop_timestamp)) + monitor_key_set('admin', "{}_done".format(my_name), stop_timestamp) + + +def wait_on_previous_node(previous_node): + log("Previous node is: {}".format(previous_node)) + + previous_node_finished = monitor_key_exists( + 'admin', + "{}_done".format(previous_node)) + + while previous_node_finished is False: + log("{} is not finished. Waiting".format(previous_node)) + # Has this node been trying to upgrade for longer than + # 10 minutes? + # If so then move on and consider that node dead. + + # NOTE: This assumes the clusters clocks are somewhat accurate + # If the hosts clock is really far off it may cause it to skip + # the previous node even though it shouldn't. + current_timestamp = time.time() + previous_node_start_time = monitor_key_get( + 'admin', + "{}_start".format(previous_node)) + if (current_timestamp - (10 * 60)) > previous_node_start_time: + # Previous node is probably dead. Lets move on + if previous_node_start_time is not None: + log( + "Waited 10 mins on node {}. current time: {} > " + "previous node start time: {} Moving on".format( + previous_node, + (current_timestamp - (10 * 60)), + previous_node_start_time)) + return + else: + # I have to wait. Sleep a random amount of time and then + # check if I can lock,upgrade and roll. + wait_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + previous_node_finished = monitor_key_exists( + 'admin', + "{}_done".format(previous_node)) + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +def roll_monitor_cluster(new_version): + """ + This is tricky to get right so here's what we're going to do. + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous monitor is upgraded yet. + """ + log('roll_monitor_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + monitor_list = [] + mon_map = get_mon_map('admin') + if mon_map['monmap']['mons']: + for mon in mon_map['monmap']['mons']: + monitor_list.append(mon['name']) + else: + status_set('blocked', 'Unable to get monitor cluster information') + sys.exit(1) + log('monitor_list: {}'.format(monitor_list)) + + # A sorted list of osd unit names + mon_sorted_list = sorted(monitor_list) + + try: + position = mon_sorted_list.index(my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(my_name=my_name) + else: + # Check if the previous node has finished + status_set('blocked', + 'Waiting on {} to finish upgrading'.format( + mon_sorted_list[position - 1])) + wait_on_previous_node(previous_node=mon_sorted_list[position - 1]) + lock_and_roll(my_name=my_name) + except ValueError: + log("Failed to find {} in list {}.".format( + my_name, mon_sorted_list)) + status_set('blocked', 'failed to upgrade monitor') + + +def upgrade_monitor(): + current_version = ceph.get_version() + status_set("maintenance", "Upgrading monitor") + log("Current ceph version is {}".format(current_version)) + new_version = config('release-version') + log("Upgrading to: {}".format(new_version)) + + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph source failed with message: {}".format( + err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + try: + if ceph.systemd(): + for mon_id in ceph.get_local_mon_ids(): + service_stop('ceph-mon@{}'.format(mon_id)) + else: + service_stop('ceph-mon-all') + apt_install(packages=ceph.PACKAGES, fatal=True) + if ceph.systemd(): + for mon_id in ceph.get_local_mon_ids(): + service_start('ceph-mon@{}'.format(mon_id)) + else: + service_start('ceph-mon-all') + status_set("active", "") + except subprocess.CalledProcessError as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + def install_upstart_scripts(): # Only install upstart configurations for older versions @@ -123,6 +312,7 @@ def emit_cephconf(): install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100) + JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' @@ -131,6 +321,9 @@ def config_changed(): if config('prefer-ipv6'): assert_charm_supports_ipv6() + # Check if an upgrade was requested + check_for_upgrade() + log('Monitor hosts are ' + repr(get_mon_hosts())) sysctl_dict = config('sysctl') @@ -165,7 +358,7 @@ def config_changed(): emit_cephconf() # Support use of single node ceph - if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1): + if not ceph.is_bootstrapped() and int(config('monitor-count')) == 1: status_set('maintenance', 'Bootstrapping single Ceph MON') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() @@ -188,10 +381,10 @@ def get_mon_hosts(): def get_peer_units(): - ''' + """ Returns a dictionary of unit names from the mon peer relation with a flag indicating whether the unit has presented its address - ''' + """ units = {} units[local_unit()] = True for relid in relation_ids('mon'): @@ -206,8 +399,7 @@ def mon_relation_joined(): public_addr = get_public_addr() for relid in relation_ids('mon'): relation_set(relation_id=relid, - relation_settings={'ceph-public-address': - public_addr}) + relation_settings={'ceph-public-address': public_addr}) @hooks.hook('mon-relation-departed', @@ -250,7 +442,7 @@ def notify_client(): def upgrade_keys(): - ''' Ceph now required mon allow rw for pool creation ''' + """ Ceph now required mon allow rw for pool creation """ if len(relation_ids('radosgw')) > 0: ceph.upgrade_key_caps('client.radosgw.gateway', ceph._radosgw_caps) @@ -272,6 +464,8 @@ def osd_relation(relid=None): 'osd_bootstrap_key': ceph.get_osd_bootstrap_key(), 'auth': config('auth-supported'), 'ceph-public-address': public_addr, + 'osd_upgrade_key': ceph.get_named_key('osd-upgrade', + caps=ceph.osd_upgrade_caps), } relation_set(relation_id=relid, relation_settings=data) @@ -430,6 +624,9 @@ def assess_status(): # Unit should be running and clustered, but no quorum # TODO: should this be blocked or waiting? status_set('blocked', 'Unit not clustered (no quorum)') + # If there's a pending lock for this unit, + # can i get the lock? + # reboot the ceph-mon process if __name__ == '__main__': diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index fb1bee34..2903670c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -24,6 +24,8 @@ # Adam Gandelman # import bisect +import errno +import hashlib import six import os @@ -163,7 +165,7 @@ def remove_cache_tier(self, cache_pool): :return: None """ # read-only is easy, writeback is much harder - mode = get_cache_mode(cache_pool) + mode = get_cache_mode(self.service, cache_pool) if mode == 'readonly': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) @@ -259,6 +261,133 @@ def create(self): Returns json formatted output""" +def get_mon_map(service): + """ + Returns the current monitor map. + :param service: six.string_types. The Ceph user name to run the command under + :return: json string. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + mon_status = check_output( + ['ceph', '--id', service, + 'mon_status', '--format=json']) + try: + return json.loads(mon_status) + except ValueError as v: + log("Unable to parse mon_status json: {}. Error: {}".format( + mon_status, v.message)) + raise + except CalledProcessError as e: + log("mon_status command failed with message: {}".format( + e.message)) + raise + +def hash_monitor_names(service): + """ + Uses the get_mon_map() function to get information about the monitor + cluster. + Hash the name of each monitor. Return a sorted list of monitor hashes + in an ascending order. + :param service: six.string_types. The Ceph user name to run the command under + :rtype : dict. json dict of monitor name, ip address and rank + example: { + 'name': 'ip-172-31-13-165', + 'rank': 0, + 'addr': '172.31.13.165:6789/0'} + """ + try: + hash_list = [] + monitor_list = get_mon_map(service=service) + if monitor_list['monmap']['mons']: + for mon in monitor_list['monmap']['mons']: + hash_list.append( + hashlib.sha224(mon['name'].encode('utf-8')).hexdigest()) + return sorted(hash_list) + else: + return None + except (ValueError, CalledProcessError): + raise + + +def monitor_key_delete(service, key): + """ + Delete a key and value pair from the monitor cluster + :param service: six.string_types. The Ceph user name to run the command under + Deletes a key value pair on the monitor cluster. + :param key: six.string_types. The key to delete. + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'del', str(key)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_set(service, key, value): + """ + Sets a key value pair on the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to set. + :param value: The value to set. This will be converted to a string + before setting + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'put', str(key), str(value)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_get(service, key): + """ + Gets the value of an existing key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for. + :return: Returns the value of that key or None if not found. + """ + try: + output = check_output( + ['ceph', '--id', service, + 'config-key', 'get', str(key)]) + return output + except CalledProcessError as e: + log("Monitor config-key get failed with message: {}".format( + e.output)) + return None + + +def monitor_key_exists(service, key): + """ + Searches for the existence of a key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for + :return: Returns True if the key exists, False if not and raises an + exception if an unknown error occurs. :raise: CalledProcessError if + an unknown error occurs + """ + try: + check_call( + ['ceph', '--id', service, + 'config-key', 'exists', str(key)]) + # I can return true here regardless because Ceph returns + # ENOENT if the key wasn't found + return True + except CalledProcessError as e: + if e.returncode == errno.ENOENT: + return False + else: + log("Unknown error from ceph config-get exists: {} {}".format( + e.returncode, e.output)) + raise + + def get_erasure_profile(service, name): """ :param service: six.string_types. The Ceph user name to run the command under diff --git a/ceph-mon/unit_tests/test_upgrade_roll.py b/ceph-mon/unit_tests/test_upgrade_roll.py new file mode 100644 index 00000000..8af24ac5 --- /dev/null +++ b/ceph-mon/unit_tests/test_upgrade_roll.py @@ -0,0 +1,136 @@ +__author__ = 'chris' +import time + +from mock import patch, call, MagicMock +import sys + +sys.path.append('/home/chris/repos/ceph-mon/hooks') + +import test_utils +import ceph_hooks + +TO_PATCH = [ + 'hookenv', + 'status_set', + 'config', + 'ceph', + 'log', + 'add_source', + 'apt_update', + 'apt_install', + 'service_stop', + 'service_start', + 'host', +] + + +def config_side_effect(*args): + if args[0] == 'source': + return 'cloud:trusty-kilo' + elif args[0] == 'key': + return 'key' + elif args[0] == 'release-version': + return 'cloud:trusty-kilo' + + +previous_node_start_time = time.time() - (9 * 60) + + +def monitor_key_side_effect(*args): + if args[1] == \ + 'ip-192-168-1-2_done': + return False + elif args[1] == \ + 'ip-192-168-1-2_start': + # Return that the previous node started 9 minutes ago + return previous_node_start_time + + +class UpgradeRollingTestCase(test_utils.CharmTestCase): + def setUp(self): + super(UpgradeRollingTestCase, self).setUp(ceph_hooks, TO_PATCH) + + @patch('ceph_hooks.roll_monitor_cluster') + def test_check_for_upgrade(self, roll_monitor_cluster): + self.host.lsb_release.return_value = { + 'DISTRIB_CODENAME': 'trusty', + } + previous_mock = MagicMock().return_value + previous_mock.previous.return_value = "cloud:trusty-juno" + self.hookenv.config.side_effect = [previous_mock, + config_side_effect('source')] + ceph_hooks.check_for_upgrade() + + roll_monitor_cluster.assert_called_with('cloud:trusty-kilo') + + @patch('ceph_hooks.upgrade_monitor') + @patch('ceph_hooks.monitor_key_set') + def test_lock_and_roll(self, monitor_key_set, upgrade_monitor): + monitor_key_set.monitor_key_set.return_value = None + ceph_hooks.lock_and_roll(my_name='ip-192-168-1-2') + upgrade_monitor.assert_called_once_with() + + def test_upgrade_monitor(self): + self.config.side_effect = config_side_effect + self.ceph.get_version.return_value = "0.80" + self.ceph.systemd.return_value = False + ceph_hooks.upgrade_monitor() + self.service_stop.assert_called_with('ceph-mon-all') + self.service_start.assert_called_with('ceph-mon-all') + self.status_set.assert_has_calls([ + call('maintenance', 'Upgrading monitor'), + call('active', '') + ]) + + @patch('ceph_hooks.lock_and_roll') + @patch('ceph_hooks.wait_on_previous_node') + @patch('ceph_hooks.get_mon_map') + @patch('ceph_hooks.socket') + def test_roll_monitor_cluster_second(self, + socket, + get_mon_map, + wait_on_previous_node, + lock_and_roll): + wait_on_previous_node.return_value = None + socket.gethostname.return_value = "ip-192-168-1-3" + get_mon_map.return_value = { + 'monmap': { + 'mons': [ + { + 'name': 'ip-192-168-1-2', + }, + { + 'name': 'ip-192-168-1-3', + }, + ] + } + } + ceph_hooks.roll_monitor_cluster('0.94.1') + self.status_set.assert_called_with( + 'blocked', + 'Waiting on ip-192-168-1-2 to finish upgrading') + lock_and_roll.assert_called_with(my_name="ip-192-168-1-3") + + @patch('ceph_hooks.monitor_key_get') + @patch('ceph_hooks.monitor_key_exists') + def test_wait_on_previous_node(self, + monitor_key_exists, + monitor_key_get): + monitor_key_get.side_effect = monitor_key_side_effect + monitor_key_exists.return_value = False + + ceph_hooks.wait_on_previous_node("ip-192-168-1-2") + + # Make sure we checked to see if the previous node started + monitor_key_get.assert_has_calls( + [call('admin', 'ip-192-168-1-2_start')] + ) + # Make sure we checked to see if the previous node was finished + monitor_key_exists.assert_has_calls( + [call('admin', 'ip-192-168-1-2_done')] + ) + # Make sure we waited at last once before proceeding + self.log.assert_has_calls( + [call('Previous node is: ip-192-168-1-2')], + [call('ip-192-168-1-2 is not finished. Waiting')], + ) From e798a6d3d893a3f1a2a2e5f5f2577dd3f83ace12 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Wed, 9 Mar 2016 17:17:40 -0800 Subject: [PATCH 1060/2699] Add multiple pool support This patch adds support for radosgw to be configured to talk to a pool other than the default. By setting the pool-prefix the patch will create those pools after being related to the ceph cluster. This code follows the general format laid out on ceph' s wiki for federated rados gateways. Change-Id: If5e873647be136cd374f4bc8755a05f26a7399dc --- ceph-radosgw/.gitignore | 1 + ceph-radosgw/config.yaml | 10 + ceph-radosgw/hooks/ceph.py | 14 +- .../contrib/openstack/amulet/utils.py | 46 +- .../charmhelpers/contrib/openstack/utils.py | 661 ++++++++++++++---- .../contrib/storage/linux/ceph.py | 132 +++- .../hooks/charmhelpers/core/hookenv.py | 31 + ceph-radosgw/hooks/charmhelpers/core/host.py | 50 +- ceph-radosgw/hooks/hooks.py | 4 +- .../contrib/openstack/amulet/utils.py | 46 +- ceph-radosgw/unit_tests/test_ceph.py | 73 +- 11 files changed, 907 insertions(+), 161 deletions(-) diff --git a/ceph-radosgw/.gitignore b/ceph-radosgw/.gitignore index 25d8aecb..8521ed00 100644 --- a/ceph-radosgw/.gitignore +++ b/ceph-radosgw/.gitignore @@ -5,3 +5,4 @@ bin tags *.sw[nop] *.pyc +.idea diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 60b500c4..077341fc 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -165,3 +165,13 @@ options: order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on your network interface. + pool-prefix: + type: string + default: + description: | + The rados gateway stores objects in many different pools. If you would + like to have multiple rados gateways each pointing to a separate set of + pools set this prefix. The charm will then set up a new set of pools. + If your prefix has a dash in it that will be used to split the prefix + into region and zone. Please read the documentation on federated rados + gateways for more information on region and zone. diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index 335716c2..56c76186 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -1,4 +1,3 @@ - # # Copyright 2012 Canonical Ltd. # @@ -93,6 +92,7 @@ def add_bootstrap_hint(peer): # Ignore any errors for this call subprocess.call(cmd) + DISK_FORMATS = [ 'xfs', 'ext4', @@ -106,7 +106,7 @@ def is_osd_disk(dev): info = info.split("\n") # IGNORE:E1103 for line in info: if line.startswith( - 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' + 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' ): return True except subprocess.CalledProcessError: @@ -229,8 +229,9 @@ def get_named_key(name, caps=None): return key -def get_create_rgw_pools_rq(): - """Pre-create RGW pools so that they have the correct settings. +def get_create_rgw_pools_rq(prefix): + """Pre-create RGW pools so that they have the correct settings. This + will prepend a prefix onto the pools if specified in the config.yaml When RGW creates its own pools it will create them with non-optimal settings (LP: #1476749). @@ -267,6 +268,11 @@ def get_create_rgw_pools_rq(): '.users.uid'] pg_num = config('rgw-lightweight-pool-pg-num') for pool in light: + if prefix: + pool = "{prefix}{pool}".format( + prefix=prefix, + pool=pool) + rq.add_op_create_pool(name=pool, replica_count=replicas, pg_num=pg_num) return rq diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 388b60e6..2995124d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -27,6 +27,10 @@ import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client +from keystoneclient.auth.identity import v3 as keystone_id_v3 +from keystoneclient import session as keystone_session +from keystoneclient.v3 import client as keystone_client_v3 + import novaclient.v1_1.client as nova_client import pika import swiftclient @@ -139,7 +143,7 @@ def validate_role_data(self, expected, actual): return "role {} does not exist".format(e['name']) return ret - def validate_user_data(self, expected, actual): + def validate_user_data(self, expected, actual, api_version=None): """Validate user data. Validate a list of actual user data vs a list of expected user @@ -150,10 +154,15 @@ def validate_user_data(self, expected, actual): for e in expected: found = False for act in actual: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'tenantId': act.tenantId, - 'id': act.id} - if e['name'] == a['name']: + if e['name'] == act.name: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'id': act.id} + if api_version == 2: + a['tenantId'] = act.tenantId + else: + a['default_project_id'] = getattr(act, + 'default_project_id', + 'none') found = True ret = self._validate_dict_data(e, a) if ret: @@ -188,15 +197,30 @@ def authenticate_cinder_admin(self, keystone_sentry, username, return cinder_client.Client(username, password, tenant, ept) def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant): + tenant=None, api_version=None, + keystone_ip=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') unit = keystone_sentry - service_ip = unit.relation('shared-db', - 'mysql:shared-db')['private-address'] - ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) + if not keystone_ip: + keystone_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name='admin_domain', + username=user, + password=password, + domain_name='admin_domain', + auth_url=ep, + ) + sess = keystone_session.Session(auth=auth) + return keystone_client_v3.Client(session=sess) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 80dd2e0d..68eb27e1 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -24,6 +24,7 @@ import sys import re import itertools +import functools import six import tempfile @@ -69,7 +70,15 @@ pip_install, ) -from charmhelpers.core.host import lsb_release, mounts, umount, service_running +from charmhelpers.core.host import ( + lsb_release, + mounts, + umount, + service_running, + service_pause, + service_resume, + restart_on_change_helper, +) from charmhelpers.fetch import apt_install, apt_cache, install_remote from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device @@ -763,7 +772,8 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, os.mkdir(parent_dir) juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) - repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth) + repo_dir = install_remote( + repo, dest=parent_dir, branch=branch, depth=depth) venv = os.path.join(parent_dir, 'venv') @@ -862,66 +872,155 @@ def wrapped_f(*args, **kwargs): return wrap -def set_os_workload_status(configs, required_interfaces, charm_func=None, services=None, ports=None): +def set_os_workload_status(configs, required_interfaces, charm_func=None, + services=None, ports=None): + """Set the state of the workload status for the charm. + + This calls _determine_os_workload_status() to get the new state, message + and sets the status using status_set() + + @param configs: a templating.OSConfigRenderer() object + @param required_interfaces: {generic: [specific, specific2, ...]} + @param charm_func: a callable function that returns state, message. The + signature is charm_func(configs) -> (state, message) + @param services: list of strings OR dictionary specifying services/ports + @param ports: OPTIONAL list of port numbers. + @returns state, message: the new workload status, user message """ - Set workload status based on complete contexts. - status-set missing or incomplete contexts - and juju-log details of missing required data. - charm_func is a charm specific function to run checking - for charm specific requirements such as a VIP setting. - - This function also checks for whether the services defined are ACTUALLY - running and that the ports they advertise are open and being listened to. - - @param services - OPTIONAL: a [{'service': , 'ports': []] - The ports are optional. - If services is a [] then ports are ignored. - @param ports - OPTIONAL: an [] representing ports that shoudl be - open. - @returns None + state, message = _determine_os_workload_status( + configs, required_interfaces, charm_func, services, ports) + status_set(state, message) + + +def _determine_os_workload_status( + configs, required_interfaces, charm_func=None, + services=None, ports=None): + """Determine the state of the workload status for the charm. + + This function returns the new workload status for the charm based + on the state of the interfaces, the paused state and whether the + services are actually running and any specified ports are open. + + This checks: + + 1. if the unit should be paused, that it is actually paused. If so the + state is 'maintenance' + message, else 'broken'. + 2. that the interfaces/relations are complete. If they are not then + it sets the state to either 'broken' or 'waiting' and an appropriate + message. + 3. If all the relation data is set, then it checks that the actual + services really are running. If not it sets the state to 'broken'. + + If everything is okay then the state returns 'active'. + + @param configs: a templating.OSConfigRenderer() object + @param required_interfaces: {generic: [specific, specific2, ...]} + @param charm_func: a callable function that returns state, message. The + signature is charm_func(configs) -> (state, message) + @param services: list of strings OR dictionary specifying services/ports + @param ports: OPTIONAL list of port numbers. + @returns state, message: the new workload status, user message + """ + state, message = _ows_check_if_paused(services, ports) + + if state is None: + state, message = _ows_check_generic_interfaces( + configs, required_interfaces) + + if state != 'maintenance' and charm_func: + # _ows_check_charm_func() may modify the state, message + state, message = _ows_check_charm_func( + state, message, lambda: charm_func(configs)) + + if state is None: + state, message = _ows_check_services_running(services, ports) + + if state is None: + state = 'active' + message = "Unit is ready" + juju_log(message, 'INFO') + + return state, message + + +def _ows_check_if_paused(services=None, ports=None): + """Check if the unit is supposed to be paused, and if so check that the + services/ports (if passed) are actually stopped/not being listened to. + + if the unit isn't supposed to be paused, just return None, None + + @param services: OPTIONAL services spec or list of service names. + @param ports: OPTIONAL list of port numbers. + @returns state, message or None, None """ - incomplete_rel_data = incomplete_relation_data(configs, required_interfaces) - state = 'active' - missing_relations = [] - incomplete_relations = [] + if is_unit_paused_set(): + state, message = check_actually_paused(services=services, + ports=ports) + if state is None: + # we're paused okay, so set maintenance and return + state = "maintenance" + message = "Paused. Use 'resume' action to resume normal service." + return state, message + return None, None + + +def _ows_check_generic_interfaces(configs, required_interfaces): + """Check the complete contexts to determine the workload status. + + - Checks for missing or incomplete contexts + - juju log details of missing required data. + - determines the correct workload status + - creates an appropriate message for status_set(...) + + if there are no problems then the function returns None, None + + @param configs: a templating.OSConfigRenderer() object + @params required_interfaces: {generic_interface: [specific_interface], } + @returns state, message or None, None + """ + incomplete_rel_data = incomplete_relation_data(configs, + required_interfaces) + state = None message = None - charm_state = None - charm_message = None + missing_relations = set() + incomplete_relations = set() - for generic_interface in incomplete_rel_data.keys(): + for generic_interface, relations_states in incomplete_rel_data.items(): related_interface = None missing_data = {} # Related or not? - for interface in incomplete_rel_data[generic_interface]: - if incomplete_rel_data[generic_interface][interface].get('related'): + for interface, relation_state in relations_states.items(): + if relation_state.get('related'): related_interface = interface - missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data') - # No relation ID for the generic_interface + missing_data = relation_state.get('missing_data') + break + # No relation ID for the generic_interface? if not related_interface: juju_log("{} relation is missing and must be related for " "functionality. ".format(generic_interface), 'WARN') state = 'blocked' - if generic_interface not in missing_relations: - missing_relations.append(generic_interface) + missing_relations.add(generic_interface) else: - # Relation ID exists but no related unit + # Relation ID eists but no related unit if not missing_data: - # Edge case relation ID exists but departing - if ('departed' in hook_name() or 'broken' in hook_name()) \ - and related_interface in hook_name(): + # Edge case - relation ID exists but departings + _hook_name = hook_name() + if (('departed' in _hook_name or 'broken' in _hook_name) and + related_interface in _hook_name): state = 'blocked' - if generic_interface not in missing_relations: - missing_relations.append(generic_interface) + missing_relations.add(generic_interface) juju_log("{} relation's interface, {}, " "relationship is departed or broken " "and is required for functionality." - "".format(generic_interface, related_interface), "WARN") + "".format(generic_interface, related_interface), + "WARN") # Normal case relation ID exists but no related unit # (joining) else: - juju_log("{} relations's interface, {}, is related but has " - "no units in the relation." - "".format(generic_interface, related_interface), "INFO") + juju_log("{} relations's interface, {}, is related but has" + " no units in the relation." + "".format(generic_interface, related_interface), + "INFO") # Related unit exists and data missing on the relation else: juju_log("{} relation's interface, {}, is related awaiting " @@ -930,9 +1029,8 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None, servic ", ".join(missing_data)), "INFO") if state != 'blocked': state = 'waiting' - if generic_interface not in incomplete_relations \ - and generic_interface not in missing_relations: - incomplete_relations.append(generic_interface) + if generic_interface not in missing_relations: + incomplete_relations.add(generic_interface) if missing_relations: message = "Missing relations: {}".format(", ".join(missing_relations)) @@ -945,9 +1043,22 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None, servic "".format(", ".join(incomplete_relations)) state = 'waiting' - # Run charm specific checks - if charm_func: - charm_state, charm_message = charm_func(configs) + return state, message + + +def _ows_check_charm_func(state, message, charm_func_with_configs): + """Run a custom check function for the charm to see if it wants to + change the state. This is only run if not in 'maintenance' and + tests to see if the new state is more important that the previous + one determined by the interfaces/relations check. + + @param state: the previously determined state so far. + @param message: the user orientated message so far. + @param charm_func: a callable function that returns state, message + @returns state, message strings. + """ + if charm_func_with_configs: + charm_state, charm_message = charm_func_with_configs() if charm_state != 'active' and charm_state != 'unknown': state = workload_state_compare(state, charm_state) if message: @@ -956,72 +1067,151 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None, servic message = "{}, {}".format(message, charm_message) else: message = charm_message + return state, message + - # If the charm thinks the unit is active, check that the actual services - # really are active. - if services is not None and state == 'active': - # if we're passed the dict() then just grab the values as a list. - if isinstance(services, dict): - services = services.values() - # either extract the list of services from the dictionary, or if - # it is a simple string, use that. i.e. works with mixed lists. - _s = [] - for s in services: - if isinstance(s, dict) and 'service' in s: - _s.append(s['service']) - if isinstance(s, str): - _s.append(s) - services_running = [service_running(s) for s in _s] - if not all(services_running): - not_running = [s for s, running in zip(_s, services_running) - if not running] - message = ("Services not running that should be: {}" - .format(", ".join(not_running))) +def _ows_check_services_running(services, ports): + """Check that the services that should be running are actually running + and that any ports specified are being listened to. + + @param services: list of strings OR dictionary specifying services/ports + @param ports: list of ports + @returns state, message: strings or None, None + """ + messages = [] + state = None + if services is not None: + services = _extract_services_list_helper(services) + services_running, running = _check_running_services(services) + if not all(running): + messages.append( + "Services not running that should be: {}" + .format(", ".join(_filter_tuples(services_running, False)))) state = 'blocked' # also verify that the ports that should be open are open # NB, that ServiceManager objects only OPTIONALLY have ports - port_map = OrderedDict([(s['service'], s['ports']) - for s in services if 'ports' in s]) - if state == 'active' and port_map: - all_ports = list(itertools.chain(*port_map.values())) - ports_open = [port_has_listener('0.0.0.0', p) - for p in all_ports] - if not all(ports_open): - not_opened = [p for p, opened in zip(all_ports, ports_open) - if not opened] - map_not_open = OrderedDict() - for service, ports in port_map.items(): - closed_ports = set(ports).intersection(not_opened) - if closed_ports: - map_not_open[service] = closed_ports - # find which service has missing ports. They are in service - # order which makes it a bit easier. - message = ( - "Services with ports not open that should be: {}" - .format( - ", ".join([ - "{}: [{}]".format( - service, - ", ".join([str(v) for v in ports])) - for service, ports in map_not_open.items()]))) - state = 'blocked' - - if ports is not None and state == 'active': - # and we can also check ports which we don't know the service for - ports_open = [port_has_listener('0.0.0.0', p) for p in ports] + map_not_open, ports_open = ( + _check_listening_on_services_ports(services)) if not all(ports_open): - message = ( + # find which service has missing ports. They are in service + # order which makes it a bit easier. + message_parts = {service: ", ".join([str(v) for v in open_ports]) + for service, open_ports in map_not_open.items()} + message = ", ".join( + ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) + messages.append( + "Services with ports not open that should be: {}" + .format(message)) + state = 'blocked' + + if ports is not None: + # and we can also check ports which we don't know the service for + ports_open, ports_open_bools = _check_listening_on_ports_list(ports) + if not all(ports_open_bools): + messages.append( "Ports which should be open, but are not: {}" - .format(", ".join([str(p) for p, v in zip(ports, ports_open) + .format(", ".join([str(p) for p, v in ports_open if not v]))) state = 'blocked' - # Set to active if all requirements have been met - if state == 'active': - message = "Unit is ready" - juju_log(message, "INFO") + if state is not None: + message = "; ".join(messages) + return state, message - status_set(state, message) + return None, None + + +def _extract_services_list_helper(services): + """Extract a OrderedDict of {service: [ports]} of the supplied services + for use by the other functions. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param services: see above + @returns OrderedDict(service: [ports], ...) + """ + if services is None: + return {} + if isinstance(services, dict): + services = services.values() + # either extract the list of services from the dictionary, or if + # it is a simple string, use that. i.e. works with mixed lists. + _s = OrderedDict() + for s in services: + if isinstance(s, dict) and 'service' in s: + _s[s['service']] = s.get('ports', []) + if isinstance(s, str): + _s[s] = [] + return _s + + +def _check_running_services(services): + """Check that the services dict provided is actually running and provide + a list of (service, boolean) tuples for each service. + + Returns both a zipped list of (service, boolean) and a list of booleans + in the same order as the services. + + @param services: OrderedDict of strings: [ports], one for each service to + check. + @returns [(service, boolean), ...], : results for checks + [boolean] : just the result of the service checks + """ + services_running = [service_running(s) for s in services] + return list(zip(services, services_running)), services_running + + +def _check_listening_on_services_ports(services, test=False): + """Check that the unit is actually listening (has the port open) on the + ports that the service specifies are open. If test is True then the + function returns the services with ports that are open rather than + closed. + + Returns an OrderedDict of service: ports and a list of booleans + + @param services: OrderedDict(service: [port, ...], ...) + @param test: default=False, if False, test for closed, otherwise open. + @returns OrderedDict(service: [port-not-open, ...]...), [boolean] + """ + test = not(not(test)) # ensure test is True or False + all_ports = list(itertools.chain(*services.values())) + ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] + map_ports = OrderedDict() + matched_ports = [p for p, opened in zip(all_ports, ports_states) + if opened == test] # essentially opened xor test + for service, ports in services.items(): + set_ports = set(ports).intersection(matched_ports) + if set_ports: + map_ports[service] = set_ports + return map_ports, ports_states + + +def _check_listening_on_ports_list(ports): + """Check that the ports list given are being listened to + + Returns a list of ports being listened to and a list of the + booleans. + + @param ports: LIST or port numbers. + @returns [(port_num, boolean), ...], [boolean] + """ + ports_open = [port_has_listener('0.0.0.0', p) for p in ports] + return zip(ports, ports_open), ports_open + + +def _filter_tuples(services_states, state): + """Return a simple list from a list of tuples according to the condition + + @param services_states: LIST of (string, boolean): service and running + state. + @param state: Boolean to match the tuple against. + @returns [LIST of strings] that matched the tuple RHS. + """ + return [s for s, b in services_states if b == state] def workload_state_compare(current_workload_state, workload_state): @@ -1046,8 +1236,7 @@ def workload_state_compare(current_workload_state, workload_state): def incomplete_relation_data(configs, required_interfaces): - """ - Check complete contexts against required_interfaces + """Check complete contexts against required_interfaces Return dictionary of incomplete relation data. configs is an OSConfigRenderer object with configs registered @@ -1072,19 +1261,13 @@ def incomplete_relation_data(configs, required_interfaces): 'shared-db': {'related': True}}} """ complete_ctxts = configs.complete_contexts() - incomplete_relations = [] - for svc_type in required_interfaces.keys(): - # Avoid duplicates - found_ctxt = False - for interface in required_interfaces[svc_type]: - if interface in complete_ctxts: - found_ctxt = True - if not found_ctxt: - incomplete_relations.append(svc_type) - incomplete_context_data = {} - for i in incomplete_relations: - incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i]) - return incomplete_context_data + incomplete_relations = [ + svc_type + for svc_type, interfaces in required_interfaces.items() + if not set(interfaces).intersection(complete_ctxts)] + return { + i: configs.get_incomplete_context_data(required_interfaces[i]) + for i in incomplete_relations} def do_action_openstack_upgrade(package, upgrade_callback, configs): @@ -1145,3 +1328,245 @@ def remote_restart(rel_name, remote_service=None): relation_set(relation_id=rid, relation_settings=trigger, ) + + +def check_actually_paused(services=None, ports=None): + """Check that services listed in the services object and and ports + are actually closed (not listened to), to verify that the unit is + properly paused. + + @param services: See _extract_services_list_helper + @returns status, : string for status (None if okay) + message : string for problem for status_set + """ + state = None + message = None + messages = [] + if services is not None: + services = _extract_services_list_helper(services) + services_running, services_states = _check_running_services(services) + if any(services_states): + # there shouldn't be any running so this is a problem + messages.append("these services running: {}" + .format(", ".join( + _filter_tuples(services_running, True)))) + state = "blocked" + ports_open, ports_open_bools = ( + _check_listening_on_services_ports(services, True)) + if any(ports_open_bools): + message_parts = {service: ", ".join([str(v) for v in open_ports]) + for service, open_ports in ports_open.items()} + message = ", ".join( + ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) + messages.append( + "these service:ports are open: {}".format(message)) + state = 'blocked' + if ports is not None: + ports_open, bools = _check_listening_on_ports_list(ports) + if any(bools): + messages.append( + "these ports which should be closed, but are open: {}" + .format(", ".join([str(p) for p, v in ports_open if v]))) + state = 'blocked' + if messages: + message = ("Services should be paused but {}" + .format(", ".join(messages))) + return state, message + + +def set_unit_paused(): + """Set the unit to a paused state in the local kv() store. + This does NOT actually pause the unit + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', True) + + +def clear_unit_paused(): + """Clear the unit from a paused state in the local kv() store + This does NOT actually restart any services - it only clears the + local state. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', False) + + +def is_unit_paused_set(): + """Return the state of the kv().get('unit-paused'). + This does NOT verify that the unit really is paused. + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-paused'))) + except: + return False + + +def pause_unit(assess_status_func, services=None, ports=None, + charm_func=None): + """Pause a unit by stopping the services and setting 'unit-paused' + in the local kv() store. + + Also checks that the services have stopped and ports are no longer + being listened to. + + An optional charm_func() can be called that can either raise an + Exception or return non None, None to indicate that the unit + didn't pause cleanly. + + The signature for charm_func is: + charm_func() -> message: string + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param assess_status_func: (f() -> message: string | None) or None + @param services: OPTIONAL see above + @param ports: OPTIONAL list of port + @param charm_func: function to run for custom charm pausing. + @returns None + @raises Exception(message) on an error for action_fail(). + """ + services = _extract_services_list_helper(services) + messages = [] + if services: + for service in services.keys(): + stopped = service_pause(service) + if not stopped: + messages.append("{} didn't stop cleanly.".format(service)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + message.append(str(e)) + set_unit_paused() + if assess_status_func: + message = assess_status_func() + if message: + messages.append(message) + if messages: + raise Exception("Couldn't pause: {}".format("; ".join(messages))) + + +def resume_unit(assess_status_func, services=None, ports=None, + charm_func=None): + """Resume a unit by starting the services and clearning 'unit-paused' + in the local kv() store. + + Also checks that the services have started and ports are being listened to. + + An optional charm_func() can be called that can either raise an + Exception or return non None to indicate that the unit + didn't resume cleanly. + + The signature for charm_func is: + charm_func() -> message: string + + charm_func() is executed after any services are started, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param assess_status_func: (f() -> message: string | None) or None + @param services: OPTIONAL see above + @param ports: OPTIONAL list of port + @param charm_func: function to run for custom charm resuming. + @returns None + @raises Exception(message) on an error for action_fail(). + """ + services = _extract_services_list_helper(services) + messages = [] + if services: + for service in services.keys(): + started = service_resume(service) + if not started: + messages.append("{} didn't start cleanly.".format(service)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + message.append(str(e)) + clear_unit_paused() + if assess_status_func: + message = assess_status_func() + if message: + messages.append(message) + if messages: + raise Exception("Couldn't resume: {}".format("; ".join(messages))) + + +def make_assess_status_func(*args, **kwargs): + """Creates an assess_status_func() suitable for handing to pause_unit() + and resume_unit(). + + This uses the _determine_os_workload_status(...) function to determine + what the workload_status should be for the unit. If the unit is + not in maintenance or active states, then the message is returned to + the caller. This is so an action that doesn't result in either a + complete pause or complete resume can signal failure with an action_fail() + """ + def _assess_status_func(): + state, message = _determine_os_workload_status(*args, **kwargs) + status_set(state, message) + if state not in ['maintenance', 'active']: + return message + return None + + return _assess_status_func + + +def pausable_restart_on_change(restart_map, stopstart=False): + """A restart_on_change decorator that checks to see if the unit is + paused. If it is paused then the decorated function doesn't fire. + + This is provided as a helper, as the @restart_on_change(...) decorator + is in core.host, yet the openstack specific helpers are in this file + (contrib.openstack.utils). Thus, this needs to be an optional feature + for openstack charms (or charms that wish to use the openstack + pause/resume type features). + + It is used as follows: + + from contrib.openstack.utils import ( + pausable_restart_on_change as restart_on_change) + + @restart_on_change(restart_map, stopstart=) + def some_hook(...): + pass + + see core.utils.restart_on_change() for more details. + + @param f: the function to decorate + @param restart_map: the restart map {conf_file: [services]} + @param stopstart: DEFAULT false; whether to stop, start or just restart + @returns decorator to use a restart_on_change with pausability + """ + def wrap(f): + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + if is_unit_paused_set(): + return f(*args, **kwargs) + # otherwise, normal restart_on_change functionality + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart) + return wrapped_f + return wrap diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index fb1bee34..f4582545 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -24,6 +24,8 @@ # Adam Gandelman # import bisect +import errno +import hashlib import six import os @@ -163,7 +165,7 @@ def remove_cache_tier(self, cache_pool): :return: None """ # read-only is easy, writeback is much harder - mode = get_cache_mode(cache_pool) + mode = get_cache_mode(self.service, cache_pool) if mode == 'readonly': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) @@ -259,6 +261,134 @@ def create(self): Returns json formatted output""" +def get_mon_map(service): + """ + Returns the current monitor map. + :param service: six.string_types. The Ceph user name to run the command under + :return: json string. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + mon_status = check_output( + ['ceph', '--id', service, + 'mon_status', '--format=json']) + try: + return json.loads(mon_status) + except ValueError as v: + log("Unable to parse mon_status json: {}. Error: {}".format( + mon_status, v.message)) + raise + except CalledProcessError as e: + log("mon_status command failed with message: {}".format( + e.message)) + raise + + +def hash_monitor_names(service): + """ + Uses the get_mon_map() function to get information about the monitor + cluster. + Hash the name of each monitor. Return a sorted list of monitor hashes + in an ascending order. + :param service: six.string_types. The Ceph user name to run the command under + :rtype : dict. json dict of monitor name, ip address and rank + example: { + 'name': 'ip-172-31-13-165', + 'rank': 0, + 'addr': '172.31.13.165:6789/0'} + """ + try: + hash_list = [] + monitor_list = get_mon_map(service=service) + if monitor_list['monmap']['mons']: + for mon in monitor_list['monmap']['mons']: + hash_list.append( + hashlib.sha224(mon['name'].encode('utf-8')).hexdigest()) + return sorted(hash_list) + else: + return None + except (ValueError, CalledProcessError): + raise + + +def monitor_key_delete(service, key): + """ + Delete a key and value pair from the monitor cluster + :param service: six.string_types. The Ceph user name to run the command under + Deletes a key value pair on the monitor cluster. + :param key: six.string_types. The key to delete. + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'del', str(key)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_set(service, key, value): + """ + Sets a key value pair on the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to set. + :param value: The value to set. This will be converted to a string + before setting + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'put', str(key), str(value)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_get(service, key): + """ + Gets the value of an existing key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for. + :return: Returns the value of that key or None if not found. + """ + try: + output = check_output( + ['ceph', '--id', service, + 'config-key', 'get', str(key)]) + return output + except CalledProcessError as e: + log("Monitor config-key get failed with message: {}".format( + e.output)) + return None + + +def monitor_key_exists(service, key): + """ + Searches for the existence of a key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for + :return: Returns True if the key exists, False if not and raises an + exception if an unknown error occurs. :raise: CalledProcessError if + an unknown error occurs + """ + try: + check_call( + ['ceph', '--id', service, + 'config-key', 'exists', str(key)]) + # I can return true here regardless because Ceph returns + # ENOENT if the key wasn't found + return True + except CalledProcessError as e: + if e.returncode == errno.ENOENT: + return False + else: + log("Unknown error from ceph config-get exists: {} {}".format( + e.returncode, e.output)) + raise + + def get_erasure_profile(service, name): """ :param service: six.string_types. The Ceph user name to run the command under diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 2dd70bc9..01321296 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -912,6 +912,24 @@ def payload_status_set(klass, pid, status): subprocess.check_call(cmd) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def resource_get(name): + """used to fetch the resource path of the given name. + + must match a name of defined resource in metadata.yaml + + returns either a path or False if resource not available + """ + if not name: + return False + + cmd = ['resource-get', name] + try: + return subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError: + return False + + @cached def juju_version(): """Full version string (eg. '1.23.3.1-trusty-amd64')""" @@ -976,3 +994,16 @@ def _run_atexit(): for callback, args, kwargs in reversed(_atexit): callback(*args, **kwargs) del _atexit[:] + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get_primary_address(binding): + ''' + Retrieve the primary network address for a named binding + + :param binding: string. The name of a relation of extra-binding + :return: string. The primary IP address for the named binding + :raise: NotImplementedError if run on Juju < 2.0 + ''' + cmd = ['network-get', '--primary-address', binding] + return subprocess.check_output(cmd).strip() diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index a7720906..481087bb 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -30,6 +30,8 @@ import string import subprocess import hashlib +import functools +import itertools from contextlib import contextmanager from collections import OrderedDict @@ -428,27 +430,47 @@ def config_changed(): restarted if any file matching the pattern got changed, created or removed. Standard wildcards are supported, see documentation for the 'glob' module for more information. + + @param restart_map: {path_file_name: [service_name, ...] + @param stopstart: DEFAULT false; whether to stop, start OR restart + @returns result from decorated function """ def wrap(f): + @functools.wraps(f) def wrapped_f(*args, **kwargs): - checksums = {path: path_hash(path) for path in restart_map} - f(*args, **kwargs) - restarts = [] - for path in restart_map: - if path_hash(path) != checksums[path]: - restarts += restart_map[path] - services_list = list(OrderedDict.fromkeys(restarts)) - if not stopstart: - for service_name in services_list: - service('restart', service_name) - else: - for action in ['stop', 'start']: - for service_name in services_list: - service(action, service_name) + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart) return wrapped_f return wrap +def restart_on_change_helper(lambda_f, restart_map, stopstart=False): + """Helper function to perform the restart_on_change function. + + This is provided for decorators to restart services if files described + in the restart_map have changed after an invocation of lambda_f(). + + @param lambda_f: function to call. + @param restart_map: {file: [service, ...]} + @param stopstart: whether to stop, start or restart a service + @returns result of lambda_f() + """ + checksums = {path: path_hash(path) for path in restart_map} + r = lambda_f() + # create a list of lists of the services to restart + restarts = [restart_map[path] + for path in restart_map + if path_hash(path) != checksums[path]] + # create a flat list of ordered services without duplicates from lists + services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) + if services_list: + actions = ('stop', 'start') if stopstart else ('restart',) + for action in actions: + for service_name in services_list: + service(action, service_name) + return r + + def lsb_release(): """Return /etc/lsb-release in a dict""" d = {} diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 54ecf6e2..2a0d558c 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -138,6 +138,7 @@ def setup_keystone_certs(unit=None, rid=None): Get CA and signing certs from Keystone used to decrypt revoked token list. """ import requests + try: # Kilo and newer from keystoneclient.exceptions import ( @@ -270,7 +271,8 @@ def config_changed(): 'mon-relation-changed') @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) def mon_relation(): - rq = ceph.get_create_rgw_pools_rq() + rq = ceph.get_create_rgw_pools_rq( + prefix=config('pool-prefix')) if is_request_complete(rq, relation='mon'): log('Broker request complete', level=DEBUG) CONFIGS.write_all() diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 388b60e6..2995124d 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -27,6 +27,10 @@ import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client +from keystoneclient.auth.identity import v3 as keystone_id_v3 +from keystoneclient import session as keystone_session +from keystoneclient.v3 import client as keystone_client_v3 + import novaclient.v1_1.client as nova_client import pika import swiftclient @@ -139,7 +143,7 @@ def validate_role_data(self, expected, actual): return "role {} does not exist".format(e['name']) return ret - def validate_user_data(self, expected, actual): + def validate_user_data(self, expected, actual, api_version=None): """Validate user data. Validate a list of actual user data vs a list of expected user @@ -150,10 +154,15 @@ def validate_user_data(self, expected, actual): for e in expected: found = False for act in actual: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'tenantId': act.tenantId, - 'id': act.id} - if e['name'] == a['name']: + if e['name'] == act.name: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'id': act.id} + if api_version == 2: + a['tenantId'] = act.tenantId + else: + a['default_project_id'] = getattr(act, + 'default_project_id', + 'none') found = True ret = self._validate_dict_data(e, a) if ret: @@ -188,15 +197,30 @@ def authenticate_cinder_admin(self, keystone_sentry, username, return cinder_client.Client(username, password, tenant, ept) def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant): + tenant=None, api_version=None, + keystone_ip=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') unit = keystone_sentry - service_ip = unit.relation('shared-db', - 'mysql:shared-db')['private-address'] - ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) + if not keystone_ip: + keystone_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name='admin_domain', + username=user, + password=password, + domain_name='admin_domain', + auth_url=ep, + ) + sess = keystone_session.Session(auth=auth) + return keystone_client_v3.Client(session=sess) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index 2e2d1bf1..c068d621 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -1,4 +1,5 @@ import ceph +from mock import patch, call from test_utils import CharmTestCase @@ -10,8 +11,14 @@ ] -class CephRadosGWCephTests(CharmTestCase): +def config_side_effect(*args): + if args[0] == 'ceph-osd-replication-count': + return 3 + elif args[0] == 'rgw-lightweight-pool-pg-num': + return 10 + +class CephRadosGWCephTests(CharmTestCase): def setUp(self): super(CephRadosGWCephTests, self).setUp(ceph, TO_PATCH) @@ -65,6 +72,7 @@ def test_wait_for_quorum_yes(self): def quorum(): return results.pop() + _is_quorum = self.patch('is_quorum') _is_quorum.side_effect = quorum ceph.wait_for_quorum() @@ -81,6 +89,7 @@ def test_wait_for_bootstrap(self): def bootstrapped(): return results.pop() + _is_bootstrapped = self.patch('is_bootstrapped') _is_bootstrapped.side_effect = bootstrapped ceph.wait_for_bootstrap() @@ -195,3 +204,65 @@ def test_get_named_key_get(self): 'mon', 'allow r', 'osd', 'allow rwx' ] self.subprocess.check_output.assert_called_with(cmd) + + @patch('ceph.CephBrokerRq') + @patch('ceph.config') + def test_create_rgw_pools_rq_with_prefix(self, config, broker): + config.side_effect = config_side_effect + ceph.get_create_rgw_pools_rq(prefix='us-east') + broker.assert_has_calls([ + call().add_op_create_pool( + replica_count=3, name='.rgw.buckets'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='us-east.rgw'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='us-east.rgw.root'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='us-east.rgw.control'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='us-east.rgw.gc'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='us-east.rgw.buckets'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='us-east.rgw.buckets.index'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='us-east.rgw.buckets.extra'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='us-east.log'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='us-east.intent-log.usage'), + call().add_op_create_pool( + pg_num=10, replica_count=3, + name='us-east.users.users.email.users.swift.users.uid')] + ) + + @patch('ceph.CephBrokerRq') + @patch('ceph.config') + def test_create_rgw_pools_rq_without_prefix(self, config, broker): + config.side_effect = config_side_effect + ceph.get_create_rgw_pools_rq(prefix=None) + broker.assert_has_calls([ + call().add_op_create_pool( + replica_count=3, name='.rgw.buckets'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='.rgw'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='.rgw.root'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='.rgw.control'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='.rgw.gc'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='.rgw.buckets'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='.rgw.buckets.index'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='.rgw.buckets.extra'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='.log'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='.intent-log.usage'), + call().add_op_create_pool( + pg_num=10, replica_count=3, + name='.users.users.email.users.swift.users.uid')] + ) From d022a8a30434d85ff5f1be1219b128ae2ed56bef Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Tue, 15 Mar 2016 20:08:25 -0700 Subject: [PATCH 1061/2699] Use tox in Makefile targets Modify the Makefile to point at the appropriate tox targets so that tox and Make output can be equivalent. This involves mapping the lint target to the pep8 target and the test target to the py27 target. Change-Id: I9fc18109335953f2354fd2797da11c9979824d4b --- ceph-proxy/Makefile | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index ef306df2..6751aafc 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -2,14 +2,11 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \ - actions hooks tests unit_tests - @charm proof + @tox -e pep8 test: - @# Bundletester expects unit tests here. @echo Starting unit tests... - @$(PYTHON) /usr/bin/nosetests -v --nologcapture --with-coverage unit_tests + @tox -e py27 functional_test: @echo Starting Amulet tests... From 292d8eaa6cdc2cc07e4ca3182d472db12c2c852c Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Tue, 15 Mar 2016 20:08:25 -0700 Subject: [PATCH 1062/2699] Use tox in Makefile targets Modify the Makefile to point at the appropriate tox targets so that tox and Make output can be equivalent. This involves mapping the lint target to the pep8 target and the test target to the py27 target. Change-Id: I9fc18109335953f2354fd2797da11c9979824d4b --- ceph-mon/Makefile | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index ef306df2..6751aafc 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -2,14 +2,11 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \ - actions hooks tests unit_tests - @charm proof + @tox -e pep8 test: - @# Bundletester expects unit tests here. @echo Starting unit tests... - @$(PYTHON) /usr/bin/nosetests -v --nologcapture --with-coverage unit_tests + @tox -e py27 functional_test: @echo Starting Amulet tests... From 864db41cc431ff4c0b95b81535161a71b2fb83c1 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Tue, 15 Mar 2016 20:08:30 -0700 Subject: [PATCH 1063/2699] Use tox in Makefile targets Modify the Makefile to point at the appropriate tox targets so that tox and Make output can be equivalent. This involves mapping the lint target to the pep8 target and the test target to the py27 target. Change-Id: I7216b8338ca3f548b6b373821d2bf9a4dca37286 --- ceph-osd/Makefile | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index 38a0db81..c6109cc8 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -2,14 +2,11 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \ - hooks tests unit_tests - @charm proof + @tox -e pep8 test: - @# Bundletester expects unit tests here. @echo Starting unit tests... - @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests + @tox -e py27 functional_test: @echo Starting Amulet tests... From 15faf44cb4f98e5c033f01f909c1161ecd4db34c Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Tue, 15 Mar 2016 20:08:35 -0700 Subject: [PATCH 1064/2699] Use tox in Makefile targets Modify the Makefile to point at the appropriate tox targets so that tox and Make output can be equivalent. This involves mapping the lint target to the pep8 target and the test target to the py27 target. Change-Id: Iee67a6bf266c8a7ded52fe1bdfde4ce59e5bea8e --- ceph-radosgw/Makefile | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index 7ada5682..cfe78e3d 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -2,14 +2,11 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \ - hooks tests unit_tests - @charm proof + @tox -e pep8 test: - @# Bundletester expects unit tests here. @echo Starting unit tests... - @$(PYTHON) /usr/bin/nosetests -v --nologcapture --with-coverage unit_tests + @tox -e py27 functional_test: @echo Starting Amulet tests... From 43df71247efbcb0ff925b0f4cbbff28e1f64293a Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 16 Mar 2016 17:36:08 +0000 Subject: [PATCH 1065/2699] Update amulet test to include a non-existent osd-devices value The osd-devices charm config option is a whitelist, and the charm needs to gracefully handle items in that whitelist which may not exist. Change-Id: Ieac69a69b74a13db192e4f0fb4fc49a2f83b3a4a --- ceph-proxy/tests/basic_deployment.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index b8f21ee5..1b24e60b 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -86,10 +86,12 @@ def _configure_services(self): 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', } + # Include a non-existent device as osd-devices is a whitelist, + # and this will catch cases where proposals attempt to change that. ceph_osd_config = { 'osd-reformat': 'yes', 'ephemeral-unmount': '/mnt', - 'osd-devices': '/dev/vdb /srv/ceph' + 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' } configs = {'keystone': keystone_config, From 9157c2e9733181b514880edc848cb404c704e03e Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 16 Mar 2016 17:36:08 +0000 Subject: [PATCH 1066/2699] Update amulet test to include a non-existent osd-devices value The osd-devices charm config option is a whitelist, and the charm needs to gracefully handle items in that whitelist which may not exist. Change-Id: Ieac69a69b74a13db192e4f0fb4fc49a2f83b3a4a --- ceph-mon/tests/basic_deployment.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index b8f21ee5..1b24e60b 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -86,10 +86,12 @@ def _configure_services(self): 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', } + # Include a non-existent device as osd-devices is a whitelist, + # and this will catch cases where proposals attempt to change that. ceph_osd_config = { 'osd-reformat': 'yes', 'ephemeral-unmount': '/mnt', - 'osd-devices': '/dev/vdb /srv/ceph' + 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' } configs = {'keystone': keystone_config, From d5fdeb769883ca417cc1cf0f9eaf08bde97fd841 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 16 Mar 2016 17:58:22 +0000 Subject: [PATCH 1067/2699] Update amulet test to include a non-existent osd-devices value The osd-devices charm config option is a whitelist, and the charm needs to gracefully handle items in that whitelist which may not exist. Change-Id: Iea212ef0e0987767e0e666ee2e30a59d4bef189a --- ceph-osd/tests/basic_deployment.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index c320cf1c..49a10b11 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -90,10 +90,13 @@ def _configure_services(self): 'ephemeral-unmount': '/mnt', 'osd-devices': '/dev/vdb /srv/ceph' } + + # Include a non-existent device as osd-devices is a whitelist, + # and this will catch cases where proposals attempt to change that. ceph_osd_config = { 'osd-reformat': 'yes', 'ephemeral-unmount': '/mnt', - 'osd-devices': '/dev/vdb /srv/ceph' + 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' } configs = {'keystone': keystone_config, From 1613c9642f701cbdd410c3b65cd1c4a51095e4c4 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 15 Mar 2016 17:05:41 +0000 Subject: [PATCH 1068/2699] Add NRPE support Add support for the nrpe subordinate. Check definitions are past from this charm to the nrpe charm Change-Id: Icfeddc6ccbec2869a6f0880b4be0f67289d3b745 Closes-Bug: 1557611 --- ceph-radosgw/charm-helpers-hooks.yaml | 1 + ceph-radosgw/config.yaml | 16 + .../contrib/charmsupport/__init__.py | 15 + .../charmhelpers/contrib/charmsupport/nrpe.py | 398 ++++++++++++++++++ .../contrib/charmsupport/volumes.py | 175 ++++++++ .../contrib/openstack/amulet/utils.py | 13 +- .../charmhelpers/contrib/openstack/utils.py | 2 +- .../contrib/storage/linux/ceph.py | 2 +- ceph-radosgw/hooks/hooks.py | 17 + .../nrpe-external-master-relation-changed | 1 + .../nrpe-external-master-relation-joined | 1 + ceph-radosgw/hooks/utils.py | 8 + ceph-radosgw/metadata.yaml | 3 + .../contrib/openstack/amulet/utils.py | 13 +- ceph-radosgw/unit_tests/test_hooks.py | 4 +- 15 files changed, 656 insertions(+), 13 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/volumes.py create mode 120000 ceph-radosgw/hooks/nrpe-external-master-relation-changed create mode 120000 ceph-radosgw/hooks/nrpe-external-master-relation-joined diff --git a/ceph-radosgw/charm-helpers-hooks.yaml b/ceph-radosgw/charm-helpers-hooks.yaml index f8185cbb..9f626c62 100644 --- a/ceph-radosgw/charm-helpers-hooks.yaml +++ b/ceph-radosgw/charm-helpers-hooks.yaml @@ -15,3 +15,4 @@ include: - contrib.openstack.ip - contrib.storage.linux - contrib.python.packages + - contrib.charmsupport diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 077341fc..114998cb 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -175,3 +175,19 @@ options: If your prefix has a dash in it that will be used to split the prefix into region and zone. Please read the documentation on federated rados gateways for more information on region and zone. + nagios_context: + default: "juju" + type: string + description: | + Used by the nrpe-external-master subordinate charm. + A string that will be prepended to instance name to set the host name + in nagios. So for instance the hostname would be something like: + juju-myservice-0 + If you're running multiple environments with the same services in them + this allows you to differentiate between them. + nagios_servicegroups: + default: "" + type: string + description: | + A comma-separated list of nagios servicegroups. + If left empty, the nagios_context will be used as the servicegroup diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/__init__.py new file mode 100644 index 00000000..d1400a02 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py new file mode 100644 index 00000000..2f246429 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -0,0 +1,398 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +"""Compatibility with the nrpe-external-master charm""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Matthew Wedgwood + +import subprocess +import pwd +import grp +import os +import glob +import shutil +import re +import shlex +import yaml + +from charmhelpers.core.hookenv import ( + config, + local_unit, + log, + relation_ids, + relation_set, + relations_of_type, +) + +from charmhelpers.core.host import service + +# This module adds compatibility with the nrpe-external-master and plain nrpe +# subordinate charms. To use it in your charm: +# +# 1. Update metadata.yaml +# +# provides: +# (...) +# nrpe-external-master: +# interface: nrpe-external-master +# scope: container +# +# and/or +# +# provides: +# (...) +# local-monitors: +# interface: local-monitors +# scope: container + +# +# 2. Add the following to config.yaml +# +# nagios_context: +# default: "juju" +# type: string +# description: | +# Used by the nrpe subordinate charms. +# A string that will be prepended to instance name to set the host name +# in nagios. So for instance the hostname would be something like: +# juju-myservice-0 +# If you're running multiple environments with the same services in them +# this allows you to differentiate between them. +# nagios_servicegroups: +# default: "" +# type: string +# description: | +# A comma-separated list of nagios servicegroups. +# If left empty, the nagios_context will be used as the servicegroup +# +# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master +# +# 4. Update your hooks.py with something like this: +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE() +# nrpe_compat.add_check( +# shortname = "myservice", +# description = "Check MyService", +# check_cmd = "check_http -w 2 -c 10 http://localhost" +# ) +# nrpe_compat.add_check( +# "myservice_other", +# "Check for widget failures", +# check_cmd = "/srv/myapp/scripts/widget_check" +# ) +# nrpe_compat.write() +# +# def config_changed(): +# (...) +# update_nrpe_config() +# +# def nrpe_external_master_relation_changed(): +# update_nrpe_config() +# +# def local_monitors_relation_changed(): +# update_nrpe_config() +# +# 5. ln -s hooks.py nrpe-external-master-relation-changed +# ln -s hooks.py local-monitors-relation-changed + + +class CheckException(Exception): + pass + + +class Check(object): + shortname_re = '[A-Za-z0-9-_]+$' + service_template = (""" +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +define service {{ + use active-service + host_name {nagios_hostname} + service_description {nagios_hostname}[{shortname}] """ + """{description} + check_command check_nrpe!{command} + servicegroups {nagios_servicegroup} +}} +""") + + def __init__(self, shortname, description, check_cmd): + super(Check, self).__init__() + # XXX: could be better to calculate this from the service name + if not re.match(self.shortname_re, shortname): + raise CheckException("shortname must match {}".format( + Check.shortname_re)) + self.shortname = shortname + self.command = "check_{}".format(shortname) + # Note: a set of invalid characters is defined by the + # Nagios server config + # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= + self.description = description + self.check_cmd = self._locate_cmd(check_cmd) + + def _get_check_filename(self): + return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) + + def _get_service_filename(self, hostname): + return os.path.join(NRPE.nagios_exportdir, + 'service__{}_{}.cfg'.format(hostname, self.command)) + + def _locate_cmd(self, check_cmd): + search_path = ( + '/usr/lib/nagios/plugins', + '/usr/local/lib/nagios/plugins', + ) + parts = shlex.split(check_cmd) + for path in search_path: + if os.path.exists(os.path.join(path, parts[0])): + command = os.path.join(path, parts[0]) + if len(parts) > 1: + command += " " + " ".join(parts[1:]) + return command + log('Check command not found: {}'.format(parts[0])) + return '' + + def _remove_service_files(self): + if not os.path.exists(NRPE.nagios_exportdir): + return + for f in os.listdir(NRPE.nagios_exportdir): + if f.endswith('_{}.cfg'.format(self.command)): + os.remove(os.path.join(NRPE.nagios_exportdir, f)) + + def remove(self, hostname): + nrpe_check_file = self._get_check_filename() + if os.path.exists(nrpe_check_file): + os.remove(nrpe_check_file) + self._remove_service_files() + + def write(self, nagios_context, hostname, nagios_servicegroups): + nrpe_check_file = self._get_check_filename() + with open(nrpe_check_file, 'w') as nrpe_check_config: + nrpe_check_config.write("# check {}\n".format(self.shortname)) + nrpe_check_config.write("command[{}]={}\n".format( + self.command, self.check_cmd)) + + if not os.path.exists(NRPE.nagios_exportdir): + log('Not writing service config as {} is not accessible'.format( + NRPE.nagios_exportdir)) + else: + self.write_service_config(nagios_context, hostname, + nagios_servicegroups) + + def write_service_config(self, nagios_context, hostname, + nagios_servicegroups): + self._remove_service_files() + + templ_vars = { + 'nagios_hostname': hostname, + 'nagios_servicegroup': nagios_servicegroups, + 'description': self.description, + 'shortname': self.shortname, + 'command': self.command, + } + nrpe_service_text = Check.service_template.format(**templ_vars) + nrpe_service_file = self._get_service_filename(hostname) + with open(nrpe_service_file, 'w') as nrpe_service_config: + nrpe_service_config.write(str(nrpe_service_text)) + + def run(self): + subprocess.call(self.check_cmd) + + +class NRPE(object): + nagios_logdir = '/var/log/nagios' + nagios_exportdir = '/var/lib/nagios/export' + nrpe_confdir = '/etc/nagios/nrpe.d' + + def __init__(self, hostname=None): + super(NRPE, self).__init__() + self.config = config() + self.nagios_context = self.config['nagios_context'] + if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: + self.nagios_servicegroups = self.config['nagios_servicegroups'] + else: + self.nagios_servicegroups = self.nagios_context + self.unit_name = local_unit().replace('/', '-') + if hostname: + self.hostname = hostname + else: + nagios_hostname = get_nagios_hostname() + if nagios_hostname: + self.hostname = nagios_hostname + else: + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + self.checks = [] + + def add_check(self, *args, **kwargs): + self.checks.append(Check(*args, **kwargs)) + + def remove_check(self, *args, **kwargs): + if kwargs.get('shortname') is None: + raise ValueError('shortname of check must be specified') + + # Use sensible defaults if they're not specified - these are not + # actually used during removal, but they're required for constructing + # the Check object; check_disk is chosen because it's part of the + # nagios-plugins-basic package. + if kwargs.get('check_cmd') is None: + kwargs['check_cmd'] = 'check_disk' + if kwargs.get('description') is None: + kwargs['description'] = '' + + check = Check(*args, **kwargs) + check.remove(self.hostname) + + def write(self): + try: + nagios_uid = pwd.getpwnam('nagios').pw_uid + nagios_gid = grp.getgrnam('nagios').gr_gid + except: + log("Nagios user not set up, nrpe checks not updated") + return + + if not os.path.exists(NRPE.nagios_logdir): + os.mkdir(NRPE.nagios_logdir) + os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) + + nrpe_monitors = {} + monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} + for nrpecheck in self.checks: + nrpecheck.write(self.nagios_context, self.hostname, + self.nagios_servicegroups) + nrpe_monitors[nrpecheck.shortname] = { + "command": nrpecheck.command, + } + + service('restart', 'nagios-nrpe-server') + + monitor_ids = relation_ids("local-monitors") + \ + relation_ids("nrpe-external-master") + for rid in monitor_ids: + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + +def get_nagios_hostcontext(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_host_context + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_host_context' in rel: + return rel['nagios_host_context'] + + +def get_nagios_hostname(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_hostname + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_hostname'] + + +def get_nagios_unit_name(relation_name='nrpe-external-master'): + """ + Return the nagios unit name prepended with host_context if needed + + :param str relation_name: Name of relation nrpe sub joined to + """ + host_context = get_nagios_hostcontext(relation_name) + if host_context: + unit = "%s:%s" % (host_context, local_unit()) + else: + unit = local_unit() + return unit + + +def add_init_service_checks(nrpe, services, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param list services: List of services to check + :param str unit_name: Unit name to use in check description + """ + for svc in services: + upstart_init = '/etc/init/%s.conf' % svc + sysv_init = '/etc/init.d/%s' % svc + if os.path.exists(upstart_init): + # Don't add a check for these services from neutron-gateway + if svc not in ['ext-port', 'os-charm-phy-nic-mtu']: + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) + elif os.path.exists(sysv_init): + cronpath = '/etc/cron.d/nagios-service-check-%s' % svc + cron_file = ('*/5 * * * * root ' + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-s /etc/init.d/%s status > ' + '/var/lib/nagios/service-check-%s.txt\n' % (svc, + svc) + ) + f = open(cronpath, 'w') + f.write(cron_file) + f.close() + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_status_file.py -f ' + '/var/lib/nagios/service-check-%s.txt' % svc, + ) + + +def copy_nrpe_checks(): + """ + Copy the nrpe checks into place + + """ + NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' + nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', + 'charmhelpers', 'contrib', 'openstack', + 'files') + + if not os.path.exists(NAGIOS_PLUGINS): + os.makedirs(NAGIOS_PLUGINS) + for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): + if os.path.isfile(fname): + shutil.copy2(fname, + os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) + + +def add_haproxy_checks(nrpe, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param str unit_name: Unit name to use in check description + """ + nrpe.add_check( + shortname='haproxy_servers', + description='Check HAProxy {%s}' % unit_name, + check_cmd='check_haproxy.sh') + nrpe.add_check( + shortname='haproxy_queue', + description='Check HAProxy queue depth {%s}' % unit_name, + check_cmd='check_haproxy_queue_depth.sh') diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/volumes.py new file mode 100644 index 00000000..320961b9 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -0,0 +1,175 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +''' +Functions for managing volumes in juju units. One volume is supported per unit. +Subordinates may have their own storage, provided it is on its own partition. + +Configuration stanzas:: + + volume-ephemeral: + type: boolean + default: true + description: > + If false, a volume is mounted as sepecified in "volume-map" + If true, ephemeral storage will be used, meaning that log data + will only exist as long as the machine. YOU HAVE BEEN WARNED. + volume-map: + type: string + default: {} + description: > + YAML map of units to device names, e.g: + "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" + Service units will raise a configure-error if volume-ephemeral + is 'true' and no volume-map value is set. Use 'juju set' to set a + value and 'juju resolved' to complete configuration. + +Usage:: + + from charmsupport.volumes import configure_volume, VolumeConfigurationError + from charmsupport.hookenv import log, ERROR + def post_mount_hook(): + stop_service('myservice') + def post_mount_hook(): + start_service('myservice') + + if __name__ == '__main__': + try: + configure_volume(before_change=pre_mount_hook, + after_change=post_mount_hook) + except VolumeConfigurationError: + log('Storage could not be configured', ERROR) + +''' + +# XXX: Known limitations +# - fstab is neither consulted nor updated + +import os +from charmhelpers.core import hookenv +from charmhelpers.core import host +import yaml + + +MOUNT_BASE = '/srv/juju/volumes' + + +class VolumeConfigurationError(Exception): + '''Volume configuration data is missing or invalid''' + pass + + +def get_config(): + '''Gather and sanity-check volume configuration data''' + volume_config = {} + config = hookenv.config() + + errors = False + + if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): + volume_config['ephemeral'] = True + else: + volume_config['ephemeral'] = False + + try: + volume_map = yaml.safe_load(config.get('volume-map', '{}')) + except yaml.YAMLError as e: + hookenv.log("Error parsing YAML volume-map: {}".format(e), + hookenv.ERROR) + errors = True + if volume_map is None: + # probably an empty string + volume_map = {} + elif not isinstance(volume_map, dict): + hookenv.log("Volume-map should be a dictionary, not {}".format( + type(volume_map))) + errors = True + + volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) + if volume_config['device'] and volume_config['ephemeral']: + # asked for ephemeral storage but also defined a volume ID + hookenv.log('A volume is defined for this unit, but ephemeral ' + 'storage was requested', hookenv.ERROR) + errors = True + elif not volume_config['device'] and not volume_config['ephemeral']: + # asked for permanent storage but did not define volume ID + hookenv.log('Ephemeral storage was requested, but there is no volume ' + 'defined for this unit.', hookenv.ERROR) + errors = True + + unit_mount_name = hookenv.local_unit().replace('/', '-') + volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) + + if errors: + return None + return volume_config + + +def mount_volume(config): + if os.path.exists(config['mountpoint']): + if not os.path.isdir(config['mountpoint']): + hookenv.log('Not a directory: {}'.format(config['mountpoint'])) + raise VolumeConfigurationError() + else: + host.mkdir(config['mountpoint']) + if os.path.ismount(config['mountpoint']): + unmount_volume(config) + if not host.mount(config['device'], config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def unmount_volume(config): + if os.path.ismount(config['mountpoint']): + if not host.umount(config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def managed_mounts(): + '''List of all mounted managed volumes''' + return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) + + +def configure_volume(before_change=lambda: None, after_change=lambda: None): + '''Set up storage (or don't) according to the charm's volume configuration. + Returns the mount point or "ephemeral". before_change and after_change + are optional functions to be called if the volume configuration changes. + ''' + + config = get_config() + if not config: + hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) + raise VolumeConfigurationError() + + if config['ephemeral']: + if os.path.ismount(config['mountpoint']): + before_change() + unmount_volume(config) + after_change() + return 'ephemeral' + else: + # persistent storage + if os.path.ismount(config['mountpoint']): + mounts = dict(managed_mounts()) + if mounts.get(config['mountpoint']) != config['device']: + before_change() + unmount_volume(config) + mount_volume(config) + after_change() + else: + before_change() + mount_volume(config) + after_change() + return config['mountpoint'] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 2995124d..ef3bdccf 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -31,7 +31,7 @@ from keystoneclient import session as keystone_session from keystoneclient.v3 import client as keystone_client_v3 -import novaclient.v1_1.client as nova_client +import novaclient.client as nova_client import pika import swiftclient @@ -42,6 +42,8 @@ DEBUG = logging.DEBUG ERROR = logging.ERROR +NOVA_CLIENT_VERSION = "2" + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -157,12 +159,12 @@ def validate_user_data(self, expected, actual, api_version=None): if e['name'] == act.name: a = {'enabled': act.enabled, 'name': act.name, 'email': act.email, 'id': act.id} - if api_version == 2: - a['tenantId'] = act.tenantId - else: + if api_version == 3: a['default_project_id'] = getattr(act, 'default_project_id', 'none') + else: + a['tenantId'] = act.tenantId found = True ret = self._validate_dict_data(e, a) if ret: @@ -249,7 +251,8 @@ def authenticate_nova_user(self, keystone, user, password, tenant): self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return nova_client.Client(username=user, api_key=password, + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, project_id=tenant, auth_url=ep) def authenticate_swift_user(self, keystone, user, password, tenant): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 68eb27e1..3fb67b10 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -137,7 +137,7 @@ ('liberty', ['2.3.0', '2.4.0', '2.5.0']), ('mitaka', - ['2.5.0']), + ['2.5.0', '2.6.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index f4582545..1b4b1de7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -173,7 +173,7 @@ def remove_cache_tier(self, cache_pool): elif mode == 'writeback': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) # Flush the cache and wait for it to return - check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) + check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 2a0d558c..03aecfcb 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -68,7 +68,9 @@ REQUIRED_INTERFACES, check_optional_relations, setup_ipv6, + services, ) +from charmhelpers.contrib.charmsupport import nrpe hooks = Hooks() CONFIGS = register_configs() @@ -265,6 +267,7 @@ def config_changed(): # Ensure started but do a soft reload subprocess.call(['service', 'apache2', 'start']) subprocess.call(['service', 'apache2', 'reload']) + update_nrpe_config() @hooks.hook('mon-relation-departed', @@ -432,6 +435,20 @@ def ha_relation_changed(): identity_joined(relid=r_id) +@hooks.hook('nrpe-external-master-relation-joined', + 'nrpe-external-master-relation-changed') +def update_nrpe_config(): + # python-dbus is used by check_upstart_job + apt_install('python-dbus') + hostname = nrpe.get_nagios_hostname() + current_unit = nrpe.get_nagios_unit_name() + nrpe_setup = nrpe.NRPE(hostname=hostname) + nrpe.copy_nrpe_checks() + nrpe.add_init_service_checks(nrpe_setup, services(), current_unit) + nrpe.add_haproxy_checks(nrpe_setup, current_unit) + nrpe_setup.write() + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-radosgw/hooks/nrpe-external-master-relation-changed b/ceph-radosgw/hooks/nrpe-external-master-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/nrpe-external-master-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/nrpe-external-master-relation-joined b/ceph-radosgw/hooks/nrpe-external-master-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/nrpe-external-master-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 08d53260..0e7f4c71 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -124,6 +124,14 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): return template.render(context) +def services(): + ''' Returns a list of services associate with this charm ''' + _services = [] + for v in BASE_RESOURCE_MAP.values(): + _services.extend(v.get('services', [])) + return list(set(_services)) + + def enable_pocket(pocket): apt_sources = "/etc/apt/sources.list" with open(apt_sources, "r") as sources: diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index a442d5d8..6399b759 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -21,6 +21,9 @@ requires: interface: hacluster scope: container provides: + nrpe-external-master: + interface: nrpe-external-master + scope: container gateway: interface: http peers: diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 2995124d..ef3bdccf 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -31,7 +31,7 @@ from keystoneclient import session as keystone_session from keystoneclient.v3 import client as keystone_client_v3 -import novaclient.v1_1.client as nova_client +import novaclient.client as nova_client import pika import swiftclient @@ -42,6 +42,8 @@ DEBUG = logging.DEBUG ERROR = logging.ERROR +NOVA_CLIENT_VERSION = "2" + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -157,12 +159,12 @@ def validate_user_data(self, expected, actual, api_version=None): if e['name'] == act.name: a = {'enabled': act.enabled, 'name': act.name, 'email': act.email, 'id': act.id} - if api_version == 2: - a['tenantId'] = act.tenantId - else: + if api_version == 3: a['default_project_id'] = getattr(act, 'default_project_id', 'none') + else: + a['tenantId'] = act.tenantId found = True ret = self._validate_dict_data(e, a) if ret: @@ -249,7 +251,8 @@ def authenticate_nova_user(self, keystone, user, password, tenant): self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return nova_client.Client(username=user, api_key=password, + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, project_id=tenant, auth_url=ep) def authenticate_swift_user(self, keystone, user, password, tenant): diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 44fd7a87..737ee0d3 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -112,12 +112,14 @@ def test_install(self): self.enable_pocket.assert_called_with('multiverse') self.os.makedirs.called_with('/var/lib/ceph/nss') + @patch.object(ceph_hooks, 'update_nrpe_config') @patch.object(ceph_hooks, 'mkdir', lambda *args: None) - def test_config_changed(self): + def test_config_changed(self, update_nrpe_config): _install_packages = self.patch('install_packages') ceph_hooks.config_changed() self.assertTrue(_install_packages.called) self.CONFIGS.write_all.assert_called_with() + update_nrpe_config.assert_called_with() @patch.object(ceph_hooks, 'is_request_complete', lambda *args, **kwargs: True) From c66b67375b3dd23ff1ba18225c08abe3e56a7dc9 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Wed, 2 Mar 2016 15:12:31 -0800 Subject: [PATCH 1069/2699] Add support for replacing a failed OSD drive This patch adds an action to replace a hard drive for an particular osd server. The user executing the action will give the OSD number and also the device name of the replacement drive. The rest is taken care of by the action. The action will attempt to go through all the osd removal steps for the failed drive. It will force unmount the drive and if that fails it will lazy unmount the drive. This force and then lazy pattern comes from experience with dead hard drives not behaving nicely with umount. Change-Id: I914cd484280ac3f9b9f1fad8b35ee53e92438a0a --- ceph-osd/.gitignore | 1 + ceph-osd/actions.yaml | 12 +++ ceph-osd/actions/__init__.py | 3 + ceph-osd/actions/replace-osd | 1 + ceph-osd/actions/replace_osd.py | 84 ++++++++++++++++++ ceph-osd/hooks/ceph.py | 104 +++++++++++++++++++++- ceph-osd/tox.ini | 2 +- ceph-osd/unit_tests/__init__.py | 1 + ceph-osd/unit_tests/test_replace_osd.py | 113 ++++++++++++++++++++++++ 9 files changed, 316 insertions(+), 5 deletions(-) create mode 100644 ceph-osd/actions.yaml create mode 100644 ceph-osd/actions/__init__.py create mode 120000 ceph-osd/actions/replace-osd create mode 100755 ceph-osd/actions/replace_osd.py create mode 100644 ceph-osd/unit_tests/test_replace_osd.py diff --git a/ceph-osd/.gitignore b/ceph-osd/.gitignore index d1b248ee..31c3f033 100644 --- a/ceph-osd/.gitignore +++ b/ceph-osd/.gitignore @@ -4,4 +4,5 @@ .testrepository bin *.sw[nop] +.idea *.pyc diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml new file mode 100644 index 00000000..be586451 --- /dev/null +++ b/ceph-osd/actions.yaml @@ -0,0 +1,12 @@ +replace-osd: + description: Replace a failed osd with a fresh disk + params: + osd-number: + type: integer + description: The osd number to operate on. Example 99. Hint you can get this information from `ceph osd tree`. + replacement-device: + type: string + description: The replacement device to use. Example /dev/sdb. + required: [osd-number, replacement-device] + additionalProperties: false + diff --git a/ceph-osd/actions/__init__.py b/ceph-osd/actions/__init__.py new file mode 100644 index 00000000..ff2381cc --- /dev/null +++ b/ceph-osd/actions/__init__.py @@ -0,0 +1,3 @@ +__author__ = 'chris' +import sys +sys.path.append('hooks') diff --git a/ceph-osd/actions/replace-osd b/ceph-osd/actions/replace-osd new file mode 120000 index 00000000..d9f1a694 --- /dev/null +++ b/ceph-osd/actions/replace-osd @@ -0,0 +1 @@ +replace_osd.py \ No newline at end of file diff --git a/ceph-osd/actions/replace_osd.py b/ceph-osd/actions/replace_osd.py new file mode 100755 index 00000000..fd4264f4 --- /dev/null +++ b/ceph-osd/actions/replace_osd.py @@ -0,0 +1,84 @@ +#!/usr/bin/python + +from charmhelpers.core.hookenv import action_get, log, config, action_fail + +__author__ = 'chris' + +import os +import sys + +sys.path.append('hooks') + +import ceph + +""" +Given a OSD number this script will attempt to turn that back into a mount +point and then replace the OSD with a new one. +""" + + +def get_disk_stats(): + try: + # https://www.kernel.org/doc/Documentation/iostats.txt + with open('/proc/diskstats', 'r') as diskstats: + return diskstats.readlines() + except IOError as err: + log('Could not open /proc/diskstats. Error: {}'.format(err.message)) + action_fail('replace-osd failed because /proc/diskstats could not ' + 'be opened {}'.format(err.message)) + return None + + +def lookup_device_name(major_number, minor_number): + """ + + :param major_number: int. The major device number + :param minor_number: int. The minor device number + :return: string. The name of the device. Example: /dev/sda. + Returns None on error. + """ + diskstats = get_disk_stats() + for line in diskstats: + parts = line.split() + if not len(parts) > 3: + # Skip bogus lines + continue + try: + if int(parts[0]) is major_number and int(parts[1]) is \ + minor_number: + # Found our device. Return its name + return parts[2] + except ValueError as value_err: + log('Could not convert {} or {} into an integer. Error: {}' + .format(parts[0], parts[1], value_err.message)) + continue + return None + + +def get_device_number(osd_number): + """ + This function will return a tuple of (major_number, minor_number) + device number for the given osd. + :param osd_number: int + :rtype : (major_number,minor_number) + """ + path = "/var/lib/ceph/osd/ceph-{}".format(osd_number) + info = os.lstat(path) + major_number = os.major(info.st_dev) + minor_number = os.minor(info.st_dev) + return major_number, minor_number + + +if __name__ == '__main__': + dead_osd_number = action_get("osd-number") + replacement_device = action_get("replacement-device") + major, minor = get_device_number(dead_osd_number) + device_name = lookup_device_name(major, minor) + osd_format = config('osd-format') + osd_journal = config('osd-journal') + + ceph.replace_osd(dead_osd_number=dead_osd_number, + dead_osd_device="/dev/{}".format(device_name), + new_osd_device=replacement_device, + osd_format=osd_format, + osd_journal=osd_journal) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 40e33597..51b06ac8 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -1,4 +1,3 @@ - # # Copyright 2012 Canonical Ltd. # @@ -6,19 +5,24 @@ # James Page # Paul Collins # - +import ctypes +import ctypes.util +import errno import json import subprocess import time import os import re import sys +import shutil +from charmhelpers.cli.host import mounts from charmhelpers.core.host import ( mkdir, chownr, service_restart, cmp_pkgrevno, - lsb_release + lsb_release, + service_stop ) from charmhelpers.core.hookenv import ( log, @@ -64,7 +68,7 @@ def get_version(): pkg = cache[package] except: # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation '\ + e = 'Could not determine version of package with no installation ' \ 'candidate: %s' % package error_out(e) @@ -165,6 +169,7 @@ def add_bootstrap_hint(peer): # Ignore any errors for this call subprocess.call(cmd) + DISK_FORMATS = [ 'xfs', 'ext4', @@ -178,6 +183,97 @@ def add_bootstrap_hint(peer): ] +def umount(mount_point): + """ + This function unmounts a mounted directory forcibly. This will + be used for unmounting broken hard drive mounts which may hang. + If umount returns EBUSY this will lazy unmount. + :param mount_point: str. A String representing the filesystem mount point + :return: int. Returns 0 on success. errno otherwise. + """ + libc_path = ctypes.util.find_library("c") + libc = ctypes.CDLL(libc_path, use_errno=True) + + # First try to umount with MNT_FORCE + ret = libc.umount(mount_point, 1) + if ret < 0: + err = ctypes.get_errno() + if err == errno.EBUSY: + # Detach from try. IE lazy umount + ret = libc.umount(mount_point, 2) + if ret < 0: + err = ctypes.get_errno() + return err + return 0 + else: + return err + return 0 + + +def replace_osd(dead_osd_number, + dead_osd_device, + new_osd_device, + osd_format, + osd_journal, + reformat_osd=False, + ignore_errors=False): + """ + This function will automate the replacement of a failed osd disk as much + as possible. It will revoke the keys for the old osd, remove it from the + crush map and then add a new osd into the cluster. + :param dead_osd_number: The osd number found in ceph osd tree. Example: 99 + :param dead_osd_device: The physical device. Example: /dev/sda + :param osd_format: + :param osd_journal: + :param reformat_osd: + :param ignore_errors: + """ + host_mounts = mounts() + mount_point = None + for mount in host_mounts: + if mount[1] == dead_osd_device: + mount_point = mount[0] + # need to convert dev to osd number + # also need to get the mounted drive so we can tell the admin to + # replace it + try: + # Drop this osd out of the cluster. This will begin a + # rebalance operation + status_set('maintenance', 'Removing osd {}'.format(dead_osd_number)) + subprocess.check_output(['ceph', 'osd', 'out', + 'osd.{}'.format(dead_osd_number)]) + + # Kill the osd process if it's not already dead + if systemd(): + service_stop('ceph-osd@{}'.format(dead_osd_number)) + else: + subprocess.check_output(['stop', 'ceph-osd', 'id={}'.format( + dead_osd_number)]), + # umount if still mounted + ret = umount(mount_point) + if ret < 0: + raise RuntimeError('umount {} failed with error: {}'.format( + mount_point, os.strerror(ret))) + # Clean up the old mount point + shutil.rmtree(mount_point) + subprocess.check_output(['ceph', 'osd', 'crush', 'remove', + 'osd.{}'.format(dead_osd_number)]) + # Revoke the OSDs access keys + subprocess.check_output(['ceph', 'auth', 'del', + 'osd.{}'.format(dead_osd_number)]) + subprocess.check_output(['ceph', 'osd', 'rm', + 'osd.{}'.format(dead_osd_number)]) + status_set('maintenance', 'Setting up replacement osd {}'.format( + new_osd_device)) + osdize(new_osd_device, + osd_format, + osd_journal, + reformat_osd, + ignore_errors) + except subprocess.CalledProcessError as e: + log('replace_osd failed with error: ' + e.output) + + def is_osd_disk(dev): try: info = subprocess.check_output(['sgdisk', '-i', '1', dev]) diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index e8bf7cf9..be053f44 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -18,7 +18,7 @@ deps = -r{toxinidir}/requirements.txt basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} hooks unit_tests tests +commands = flake8 {posargs} actions hooks unit_tests tests charm proof [testenv:venv] diff --git a/ceph-osd/unit_tests/__init__.py b/ceph-osd/unit_tests/__init__.py index f80aab3d..466d7781 100644 --- a/ceph-osd/unit_tests/__init__.py +++ b/ceph-osd/unit_tests/__init__.py @@ -1,2 +1,3 @@ import sys sys.path.append('hooks') +sys.path.append('actions') diff --git a/ceph-osd/unit_tests/test_replace_osd.py b/ceph-osd/unit_tests/test_replace_osd.py new file mode 100644 index 00000000..827a0ff8 --- /dev/null +++ b/ceph-osd/unit_tests/test_replace_osd.py @@ -0,0 +1,113 @@ +import errno +import posix + +from mock import call, Mock, patch + +import test_utils +import ceph +import replace_osd + +TO_PATCH = [ + 'ctypes', + 'status_set', +] + +proc_data = [ + ' 8 0 sda 2291336 263100 108136080 1186276 28844343 28798167 ' + '2145908072 49433216 0 7550032 50630100\n', + ' 8 1 sda1 1379 1636 8314 692 75 17 1656 0 0 496 692\n', + ' 8 2 sda2 1 0 2 0 0 0 0 0 0 0 0\n', +] + + +def umount_busy(*args): + # MNT_FORCE + if args[1] == 1: + return -1 + # MNT_DETACH + if args[1] == 2: + return 0 + + +class ReplaceOsdTestCase(test_utils.CharmTestCase): + def setUp(self): + super(ReplaceOsdTestCase, self).setUp(ceph, TO_PATCH) + + def test_umount_ebusy(self): + self.ctypes.util.find_library.return_value = 'libc.so.6' + umount_mock = Mock() + self.ctypes.CDLL.return_value = umount_mock + umount_mock.umount.side_effect = umount_busy + self.ctypes.get_errno.return_value = errno.EBUSY + + ret = ceph.umount('/some/osd/mount') + umount_mock.assert_has_calls([ + call.umount('/some/osd/mount', 1), + call.umount('/some/osd/mount', 2), + ]) + assert ret == 0 + + def test_umount(self): + self.ctypes.util.find_library.return_value = 'libc.so.6' + umount_mock = Mock() + self.ctypes.CDLL.return_value = umount_mock + umount_mock.umount.return_value = 0 + + ret = ceph.umount('/some/osd/mount') + umount_mock.assert_has_calls([ + call.umount('/some/osd/mount', 1), + ]) + assert ret == 0 + + @patch('ceph.mounts') + @patch('ceph.subprocess') + @patch('ceph.umount') + @patch('ceph.osdize') + @patch('ceph.shutil') + @patch('ceph.systemd') + def test_replace_osd(self, + systemd, + shutil, + osdize, + umount, + subprocess, + mounts): + mounts.return_value = [['/var/lib/ceph/osd/ceph-a', '/dev/sda']] + subprocess.check_output.return_value = True + self.status_set.return_value = None + systemd.return_value = False + umount.return_value = 0 + osdize.return_value = None + shutil.rmtree.return_value = None + ceph.replace_osd(dead_osd_number=0, + dead_osd_device='/dev/sda', + new_osd_device='/dev/sdb', + osd_format=True, + osd_journal=None, + reformat_osd=False, + ignore_errors=False) + subprocess.check_output.assert_has_calls( + [ + call(['ceph', 'osd', 'out', 'osd.0']), + call(['stop', 'ceph-osd', 'id=0']), + call(['ceph', 'osd', 'crush', 'remove', 'osd.0']), + call(['ceph', 'auth', 'del', 'osd.0']), + call(['ceph', 'osd', 'rm', 'osd.0']) + ] + ) + + @patch('replace_osd.get_disk_stats') + def test_lookup_device_name(self, disk_stats): + disk_stats.return_value = proc_data + dev_name = replace_osd.lookup_device_name(major_number=8, + minor_number=0) + assert dev_name == 'sda', "dev_name: {}".format(dev_name) + + @patch('replace_osd.os.lstat') + def test_get_device_number(self, lstat): + lstat.return_value = posix.stat_result([ + 16877, 16, 51729L, 3, 0, 0, 217, 0, 1458086872, 1458086872 + ]) + major, minor = replace_osd.get_device_number(1) + assert major == 202 + assert minor == 17 From 205c30affd49ce086a47226318f845e437d1812a Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 16 Mar 2016 16:13:25 -0400 Subject: [PATCH 1070/2699] add juju availability zone to ceph osd location when present The approach here is to use the availability zone as an imaginary rack. All hosts that are in the same AZ will be in the same imaginary rack. From Ceph's perspective this doesn't matter as it's just a bucket after all. This will give users the ability to further customize their ceph deployment. Change-Id: Ie25ac1b001db558d6a40fe3eaca014e8f4174241 --- ceph-osd/hooks/ceph_hooks.py | 11 +++++++++++ ceph-osd/templates/ceph.conf | 4 ++++ 2 files changed, 15 insertions(+) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 1c402796..912abc11 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -12,6 +12,7 @@ import shutil import sys import tempfile +import socket import ceph from charmhelpers.core.hookenv import ( @@ -73,6 +74,12 @@ def install(): install_upstart_scripts() +def az_info(): + az_info = os.environ.get('JUJU_AVAILABILITY_ZONE') + log("AZ Info: " + az_info) + return az_info + + def emit_cephconf(): mon_hosts = get_mon_hosts() log('Monitor hosts are ' + repr(mon_hosts)) @@ -103,6 +110,10 @@ def emit_cephconf(): if not cluster_network: cephcontext['cluster_addr'] = dynamic_ipv6_address + if az_info(): + cephcontext['crush_location'] = "root=default rack={} host={}" \ + .format(az_info(), socket.gethostname()) + # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index edd4f646..66da0aca 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -29,6 +29,10 @@ public addr = {{ public_addr }} cluster addr = {{ cluster_addr }} {%- endif %} +{% if crush_location %} +osd crush location = {{crush_location}} +{% endif %} + [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring From 5a127db86287181e9280c5a5d77b0ce32acfe1ff Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 23 Mar 2016 08:51:57 +0000 Subject: [PATCH 1071/2699] Update to charm-tools >= 2.0.0 The new release of charm-tools no longer ships the charm command; update minimum version requirement and switch to using charm-proof instead, unblocking current pep8 failures across all charms. Also pin the version of requests to 2.6.0 until theblues (indirect dependency of charm-tools) sort out its requirements versioning. Change-Id: I86b9094501dc1101bcad7038acd92f89ac71c95c --- ceph-osd/test-requirements.txt | 3 ++- ceph-osd/tox.ini | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 3af44d73..4faf2545 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -5,4 +5,5 @@ coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 -charm-tools +charm-tools>=2.0.0 +requests==2.6.0 diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index be053f44..7f8650e8 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -19,7 +19,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} actions hooks unit_tests tests - charm proof + charm-proof [testenv:venv] commands = {posargs} From c75fb320c7cf3573d8a98eb91e6601968eb41c50 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 23 Mar 2016 08:51:55 +0000 Subject: [PATCH 1072/2699] Update to charm-tools >= 2.0.0 The new release of charm-tools no longer ships the charm command; update minimum version requirement and switch to using charm-proof instead, unblocking current pep8 failures across all charms. Also pin the version of requests to 2.6.0 until theblues (indirect dependency of charm-tools) sort out its requirements versioning. Change-Id: I29210e210b90aa81bf6ee9a020bd6de656750a77 --- ceph-proxy/test-requirements.txt | 3 ++- ceph-proxy/tox.ini | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 3af44d73..4faf2545 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -5,4 +5,5 @@ coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 -charm-tools +charm-tools>=2.0.0 +requests==2.6.0 diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 838990c1..487dde23 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -20,7 +20,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} actions hooks unit_tests tests - charm proof + charm-proof [testenv:venv] commands = {posargs} From 3cd3be118b2d5393d5e9f9544d3f6c8c1a2bebdb Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 23 Mar 2016 08:51:55 +0000 Subject: [PATCH 1073/2699] Update to charm-tools >= 2.0.0 The new release of charm-tools no longer ships the charm command; update minimum version requirement and switch to using charm-proof instead, unblocking current pep8 failures across all charms. Also pin the version of requests to 2.6.0 until theblues (indirect dependency of charm-tools) sort out its requirements versioning. Change-Id: I29210e210b90aa81bf6ee9a020bd6de656750a77 --- ceph-mon/test-requirements.txt | 3 ++- ceph-mon/tox.ini | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 3af44d73..4faf2545 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -5,4 +5,5 @@ coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 -charm-tools +charm-tools>=2.0.0 +requests==2.6.0 diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 838990c1..487dde23 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -20,7 +20,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} actions hooks unit_tests tests - charm proof + charm-proof [testenv:venv] commands = {posargs} From 1bf83538de9db1625ef35224f5c6797e9d4072e6 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 23 Mar 2016 08:51:58 +0000 Subject: [PATCH 1074/2699] Update to charm-tools >= 2.0.0 The new release of charm-tools no longer ships the charm command; update minimum version requirement and switch to using charm-proof instead, unblocking current pep8 failures across all charms. Also pin the version of requests to 2.6.0 until theblues (indirect dependency of charm-tools) sort out its requirements versioning. Change-Id: I10271bfa64674a4789d1cc623a2a7250a154ec43 --- ceph-radosgw/test-requirements.txt | 3 ++- ceph-radosgw/tox.ini | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 3af44d73..4faf2545 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -5,4 +5,5 @@ coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 -charm-tools +charm-tools>=2.0.0 +requests==2.6.0 diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index e8bf7cf9..a4d9c0d8 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -19,7 +19,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} hooks unit_tests tests - charm proof + charm-proof [testenv:venv] commands = {posargs} From cde030d1f8a43f224ccf2b1b0e1d3b307e47e9e3 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Thu, 3 Mar 2016 15:38:46 -0800 Subject: [PATCH 1075/2699] Rolling upgrades of ceph osd cluster This change adds functionality to allow the ceph osd cluster to upgrade in a serial rolled fashion. This will use the ceph monitor cluster to lock and allows only 1 ceph osd server at a time to upgrade. The upgrade is initiated setting a config value for source for the service which will prompt the osd cluster to upgrade to that new source and restart all osds processes server by server. If an osd server has been waiting on a previous server for more than 10 minutes and hasn't seen it finish it will assume it died during the upgrade and proceed with its own upgrade. I had to modify the amulet test slightly to use the ceph-mon charm instead of the default ceph charm. I also changed the test so that it uses 3 ceph-osd servers instead of 1. Limtations of this patch: If the osd failure domain has been set to osd than this patch will cause brief temporary outages while osd processes are being restarted. Future work will handle this case. Change-Id: Id9f89241f3aebe4886310e9b208bcb19f88e1e3e --- ceph-osd/charm-helpers-hooks.yaml | 1 + ceph-osd/hooks/ceph.py | 133 +- ceph-osd/hooks/ceph_hooks.py | 224 ++- .../contrib/storage/linux/ceph.py | 1195 +++++++++++++++++ ceph-osd/templates/ceph.conf | 2 + ceph-osd/tests/basic_deployment.py | 35 +- ceph-osd/unit_tests/test_upgrade_roll.py | 157 +++ 7 files changed, 1713 insertions(+), 34 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py create mode 100644 ceph-osd/unit_tests/test_upgrade_roll.py diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index c8c54766..cb5cbac0 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -5,6 +5,7 @@ include: - cli - fetch - contrib.storage.linux: + - ceph - utils - contrib.openstack.alternatives - contrib.network.ip diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 51b06ac8..0b23979b 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -19,11 +19,10 @@ from charmhelpers.core.host import ( mkdir, chownr, - service_restart, cmp_pkgrevno, lsb_release, - service_stop -) + service_stop, + service_restart) from charmhelpers.core.hookenv import ( log, ERROR, @@ -58,6 +57,112 @@ def ceph_user(): return "root" +class CrushLocation(object): + def __init__(self, + name, + identifier, + host, + rack, + row, + datacenter, + chassis, + root): + self.name = name + self.identifier = identifier + self.host = host + self.rack = rack + self.row = row + self.datacenter = datacenter + self.chassis = chassis + self.root = root + + def __str__(self): + return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ + "chassis :{} root: {}".format(self.name, self.identifier, + self.host, self.rack, self.row, + self.datacenter, self.chassis, + self.root) + + def __eq__(self, other): + return not self.name < other.name and not other.name < self.name + + def __ne__(self, other): + return self.name < other.name or other.name < self.name + + def __gt__(self, other): + return self.name > other.name + + def __ge__(self, other): + return not self.name < other.name + + def __le__(self, other): + return self.name < other.name + + +def get_osd_tree(service): + """ + Returns the current osd map in JSON. + :return: List. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + tree = subprocess.check_output( + ['ceph', '--id', service, + 'osd', 'tree', '--format=json']) + try: + json_tree = json.loads(tree) + crush_list = [] + # Make sure children are present in the json + if not json_tree['nodes']: + return None + child_ids = json_tree['nodes'][0]['children'] + for child in json_tree['nodes']: + if child['id'] in child_ids: + crush_list.append( + CrushLocation( + name=child.get('name'), + identifier=child['id'], + host=child.get('host'), + rack=child.get('rack'), + row=child.get('row'), + datacenter=child.get('datacenter'), + chassis=child.get('chassis'), + root=child.get('root') + ) + ) + return crush_list + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + +def get_local_osd_ids(): + """ + This will list the /var/lib/ceph/osd/* directories and try + to split the ID off of the directory name and return it in + a list + + :return: list. A list of osd identifiers :raise: OSError if + something goes wrong with listing the directory. + """ + osd_ids = [] + osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') + if os.path.exists(osd_path): + try: + dirs = os.listdir(osd_path) + for osd_dir in dirs: + osd_id = osd_dir.split('-')[1] + osd_ids.append(osd_id) + except OSError: + raise + return osd_ids + + def get_version(): '''Derive Ceph release from an installed package.''' import apt_pkg as apt @@ -308,6 +413,7 @@ def rescan_osd_devices(): _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" +_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" def is_bootstrapped(): @@ -333,6 +439,21 @@ def import_osd_bootstrap_key(key): ] subprocess.check_call(cmd) + +def import_osd_upgrade_key(key): + if not os.path.exists(_upgrade_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _upgrade_keyring, + '--create-keyring', + '--name=client.osd-upgrade', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + # OSD caps taken from ceph-create-keys _osd_bootstrap_caps = { 'mon': [ @@ -499,7 +620,7 @@ def update_monfs(): def maybe_zap_journal(journal_dev): - if (is_osd_disk(journal_dev)): + if is_osd_disk(journal_dev): log('Looks like {} is already an OSD data' ' or journal, skipping.'.format(journal_dev)) return @@ -543,7 +664,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, log('Path {} is not a block device - bailing'.format(dev)) return - if (is_osd_disk(dev) and not reformat_osd): + if is_osd_disk(dev) and not reformat_osd: log('Looks like {} is already an' ' OSD data or journal, skipping.'.format(dev)) return @@ -617,7 +738,7 @@ def filesystem_mounted(fs): def get_running_osds(): - '''Returns a list of the pids of the current running OSD daemons''' + """Returns a list of the pids of the current running OSD daemons""" cmd = ['pgrep', 'ceph-osd'] try: result = subprocess.check_output(cmd) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 912abc11..f31bbf52 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -9,12 +9,16 @@ import glob import os +import random import shutil +import subprocess import sys import tempfile import socket +import time import ceph +from charmhelpers.core import hookenv from charmhelpers.core.hookenv import ( log, ERROR, @@ -31,8 +35,8 @@ from charmhelpers.core.host import ( umount, mkdir, - cmp_pkgrevno -) + cmp_pkgrevno, + service_stop, service_start) from charmhelpers.fetch import ( add_source, apt_install, @@ -40,24 +44,216 @@ filter_installed_packages, ) from charmhelpers.core.sysctl import create as create_sysctl +from charmhelpers.core import host from utils import ( get_host_ip, get_networks, assert_charm_supports_ipv6, - render_template, -) + render_template) from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( get_ipv6_addr, format_ipv6_addr, ) - +from charmhelpers.contrib.storage.linux.ceph import ( + monitor_key_set, + monitor_key_exists, + monitor_key_get) from charmhelpers.contrib.charmsupport import nrpe hooks = Hooks() +# A dict of valid ceph upgrade paths. Mapping is old -> new +upgrade_paths = { + 'cloud:trusty-juno': 'cloud:trusty-kilo', + 'cloud:trusty-kilo': 'cloud:trusty-liberty', + 'cloud:trusty-liberty': 'cloud:trusty-mitaka', +} + + +def pretty_print_upgrade_paths(): + lines = [] + for key, value in upgrade_paths.iteritems(): + lines.append("{} -> {}".format(key, value)) + return lines + + +def check_for_upgrade(): + release_info = host.lsb_release() + if not release_info['DISTRIB_CODENAME'] == 'trusty': + log("Invalid upgrade path from {}. Only trusty is currently " + "supported".format(release_info['DISTRIB_CODENAME'])) + return + + c = hookenv.config() + old_version = c.previous('source') + log('old_version: {}'.format(old_version)) + # Strip all whitespace + new_version = hookenv.config('source') + if new_version: + # replace all whitespace + new_version = new_version.replace(' ', '') + log('new_version: {}'.format(new_version)) + + if old_version in upgrade_paths: + if new_version == upgrade_paths[old_version]: + log("{} to {} is a valid upgrade path. Proceeding.".format( + old_version, new_version)) + roll_osd_cluster(new_version) + else: + # Log a helpful error message + log("Invalid upgrade path from {} to {}. " + "Valid paths are: {}".format(old_version, + new_version, + pretty_print_upgrade_paths())) + + +def lock_and_roll(my_name): + start_timestamp = time.time() + + log('monitor_key_set {}_start {}'.format(my_name, start_timestamp)) + monitor_key_set('osd-upgrade', "{}_start".format(my_name), start_timestamp) + log("Rolling") + # This should be quick + upgrade_osd() + log("Done") + + stop_timestamp = time.time() + # Set a key to inform others I am finished + log('monitor_key_set {}_done {}'.format(my_name, stop_timestamp)) + monitor_key_set('osd-upgrade', "{}_done".format(my_name), stop_timestamp) + + +def wait_on_previous_node(previous_node): + log("Previous node is: {}".format(previous_node)) + + previous_node_finished = monitor_key_exists( + 'osd-upgrade', + "{}_done".format(previous_node)) + + while previous_node_finished is False: + log("{} is not finished. Waiting".format(previous_node)) + # Has this node been trying to upgrade for longer than + # 10 minutes? + # If so then move on and consider that node dead. + + # NOTE: This assumes the clusters clocks are somewhat accurate + # If the hosts clock is really far off it may cause it to skip + # the previous node even though it shouldn't. + current_timestamp = time.time() + previous_node_start_time = monitor_key_get( + 'osd-upgrade', + "{}_start".format(previous_node)) + if (current_timestamp - (10 * 60)) > previous_node_start_time: + # Previous node is probably dead. Lets move on + if previous_node_start_time is not None: + log( + "Waited 10 mins on node {}. current time: {} > " + "previous node start time: {} Moving on".format( + previous_node, + (current_timestamp - (10 * 60)), + previous_node_start_time)) + return + else: + # I have to wait. Sleep a random amount of time and then + # check if I can lock,upgrade and roll. + wait_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + previous_node_finished = monitor_key_exists( + 'osd-upgrade', + "{}_done".format(previous_node)) + + +def get_upgrade_position(osd_sorted_list, match_name): + for index, item in enumerate(osd_sorted_list): + if item.name == match_name: + return index + return None + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +# 2. This assumes that the osd failure domain is not set to osd. +# It rolls an entire server at a time. +def roll_osd_cluster(new_version): + """ + This is tricky to get right so here's what we're going to do. + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous osd is upgraded yet. + + TODO: If you're not in the same failure domain it's safe to upgrade + 1. Examine all pools and adopt the most strict failure domain policy + Example: Pool 1: Failure domain = rack + Pool 2: Failure domain = host + Pool 3: Failure domain = row + + outcome: Failure domain = host + """ + log('roll_osd_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + osd_tree = ceph.get_osd_tree(service='osd-upgrade') + # A sorted list of osd unit names + osd_sorted_list = sorted(osd_tree) + log("osd_sorted_list: {}".format(osd_sorted_list)) + + try: + position = get_upgrade_position(osd_sorted_list, my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(my_name=my_name) + else: + # Check if the previous node has finished + status_set('blocked', + 'Waiting on {} to finish upgrading'.format( + osd_sorted_list[position - 1].name)) + wait_on_previous_node( + previous_node=osd_sorted_list[position - 1].name) + lock_and_roll(my_name=my_name) + except ValueError: + log("Failed to find name {} in list {}".format( + my_name, osd_sorted_list)) + status_set('blocked', 'failed to upgrade osd') + + +def upgrade_osd(): + current_version = ceph.get_version() + status_set("maintenance", "Upgrading osd") + log("Current ceph version is {}".format(current_version)) + new_version = config('release-version') + log("Upgrading to: {}".format(new_version)) + + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph source failed with message: {}".format( + err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + try: + if ceph.systemd(): + for osd_id in ceph.get_local_osd_ids(): + service_stop('ceph-osd@{}'.format(osd_id)) + else: + service_stop('ceph-osd-all') + apt_install(packages=ceph.PACKAGES, fatal=True) + if ceph.systemd(): + for osd_id in ceph.get_local_osd_ids(): + service_start('ceph-osd@{}'.format(osd_id)) + else: + service_start('ceph-osd-all') + except subprocess.CalledProcessError as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + def install_upstart_scripts(): # Only install upstart configurations for older versions @@ -124,6 +320,7 @@ def emit_cephconf(): install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 90) + JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' @@ -158,6 +355,9 @@ def check_overlap(journaldevs, datadevs): @hooks.hook('config-changed') def config_changed(): + # Check if an upgrade was requested + check_for_upgrade() + # Pre-flight checks if config('osd-format') not in ceph.DISK_FORMATS: log('Invalid OSD disk format configuration specified', level=ERROR) @@ -171,7 +371,7 @@ def config_changed(): create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-osd-charm.conf') e_mountpoint = config('ephemeral-unmount') - if (e_mountpoint and ceph.filesystem_mounted(e_mountpoint)): + if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): umount(e_mountpoint) prepare_disks_and_activate() @@ -201,8 +401,14 @@ def get_mon_hosts(): hosts = [] for relid in relation_ids('mon'): for unit in related_units(relid): - addr = relation_get('ceph-public-address', unit, relid) or \ - get_host_ip(relation_get('private-address', unit, relid)) + addr = \ + relation_get('ceph-public-address', + unit, + relid) or get_host_ip( + relation_get( + 'private-address', + unit, + relid)) if addr: hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) @@ -258,10 +464,12 @@ def get_journal_devices(): 'mon-relation-departed') def mon_relation(): bootstrap_key = relation_get('osd_bootstrap_key') + upgrade_key = relation_get('osd_upgrade_key') if get_fsid() and get_auth() and bootstrap_key: log('mon has provided conf- scanning disks') emit_cephconf() ceph.import_osd_bootstrap_key(bootstrap_key) + ceph.import_osd_upgrade_key(upgrade_key) prepare_disks_and_activate() else: log('mon cluster has not yet provided conf') diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py new file mode 100644 index 00000000..f4582545 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -0,0 +1,1195 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# +import bisect +import errno +import hashlib +import six + +import os +import shutil +import json +import time +import uuid + +from subprocess import ( + check_call, + check_output, + CalledProcessError, +) +from charmhelpers.core.hookenv import ( + local_unit, + relation_get, + relation_ids, + relation_set, + related_units, + log, + DEBUG, + INFO, + WARNING, + ERROR, +) +from charmhelpers.core.host import ( + mount, + mounts, + service_start, + service_stop, + service_running, + umount, +) +from charmhelpers.fetch import ( + apt_install, +) + +from charmhelpers.core.kernel import modprobe + +KEYRING = '/etc/ceph/ceph.client.{}.keyring' +KEYFILE = '/etc/ceph/ceph.client.{}.key' + +CEPH_CONF = """[global] +auth supported = {auth} +keyring = {keyring} +mon host = {mon_hosts} +log to syslog = {use_syslog} +err to syslog = {use_syslog} +clog to syslog = {use_syslog} +""" +# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs) +powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608] + + +def validator(value, valid_type, valid_range=None): + """ + Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + Example input: + validator(value=1, + valid_type=int, + valid_range=[0, 2]) + This says I'm testing value=1. It must be an int inclusive in [0,2] + + :param value: The value to validate + :param valid_type: The type that value should be. + :param valid_range: A range of values that value can assume. + :return: + """ + assert isinstance(value, valid_type), "{} is not a {}".format( + value, + valid_type) + if valid_range is not None: + assert isinstance(valid_range, list), \ + "valid_range must be a list, was given {}".format(valid_range) + # If we're dealing with strings + if valid_type is six.string_types: + assert value in valid_range, \ + "{} is not in the list {}".format(value, valid_range) + # Integer, float should have a min and max + else: + if len(valid_range) != 2: + raise ValueError( + "Invalid valid_range list of {} for {}. " + "List must be [min,max]".format(valid_range, value)) + assert value >= valid_range[0], \ + "{} is less than minimum allowed value of {}".format( + value, valid_range[0]) + assert value <= valid_range[1], \ + "{} is greater than maximum allowed value of {}".format( + value, valid_range[1]) + + +class PoolCreationError(Exception): + """ + A custom error to inform the caller that a pool creation failed. Provides an error message + """ + + def __init__(self, message): + super(PoolCreationError, self).__init__(message) + + +class Pool(object): + """ + An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. + Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). + """ + + def __init__(self, service, name): + self.service = service + self.name = name + + # Create the pool if it doesn't exist already + # To be implemented by subclasses + def create(self): + pass + + def add_cache_tier(self, cache_pool, mode): + """ + Adds a new cache tier to an existing pool. + :param cache_pool: six.string_types. The cache tier pool name to add. + :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] + :return: None + """ + # Check the input types and values + validator(value=cache_pool, valid_type=six.string_types) + validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) + + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) + + def remove_cache_tier(self, cache_pool): + """ + Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. + :param cache_pool: six.string_types. The cache tier pool name to remove. + :return: None + """ + # read-only is easy, writeback is much harder + mode = get_cache_mode(self.service, cache_pool) + if mode == 'readonly': + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + elif mode == 'writeback': + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) + # Flush the cache and wait for it to return + check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + def get_pgs(self, pool_size): + """ + :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for + erasure coded pools + :return: int. The number of pgs to use. + """ + validator(value=pool_size, valid_type=int) + osd_list = get_osds(self.service) + if not osd_list: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + return 200 + + osd_list_length = len(osd_list) + # Calculate based on Ceph best practices + if osd_list_length < 5: + return 128 + elif 5 < osd_list_length < 10: + return 512 + elif 10 < osd_list_length < 50: + return 4096 + else: + estimate = (osd_list_length * 100) / pool_size + # Return the next nearest power of 2 + index = bisect.bisect_right(powers_of_two, estimate) + return powers_of_two[index] + + +class ReplicatedPool(Pool): + def __init__(self, service, name, pg_num=None, replicas=2): + super(ReplicatedPool, self).__init__(service=service, name=name) + self.replicas = replicas + if pg_num is None: + self.pg_num = self.get_pgs(self.replicas) + else: + self.pg_num = pg_num + + def create(self): + if not pool_exists(self.service, self.name): + # Create it + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num)] + try: + check_call(cmd) + except CalledProcessError: + raise + + +# Default jerasure erasure coded pool +class ErasurePool(Pool): + def __init__(self, service, name, erasure_code_profile="default"): + super(ErasurePool, self).__init__(service=service, name=name) + self.erasure_code_profile = erasure_code_profile + + def create(self): + if not pool_exists(self.service, self.name): + # Try to find the erasure profile information so we can properly size the pgs + erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile) + + # Check for errors + if erasure_profile is None: + log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile), + level=ERROR) + raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile)) + if 'k' not in erasure_profile or 'm' not in erasure_profile: + # Error + log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile), + level=ERROR) + raise PoolCreationError( + message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile)) + + pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) + # Create it + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile] + try: + check_call(cmd) + except CalledProcessError: + raise + + """Get an existing erasure code profile if it already exists. + Returns json formatted output""" + + +def get_mon_map(service): + """ + Returns the current monitor map. + :param service: six.string_types. The Ceph user name to run the command under + :return: json string. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + mon_status = check_output( + ['ceph', '--id', service, + 'mon_status', '--format=json']) + try: + return json.loads(mon_status) + except ValueError as v: + log("Unable to parse mon_status json: {}. Error: {}".format( + mon_status, v.message)) + raise + except CalledProcessError as e: + log("mon_status command failed with message: {}".format( + e.message)) + raise + + +def hash_monitor_names(service): + """ + Uses the get_mon_map() function to get information about the monitor + cluster. + Hash the name of each monitor. Return a sorted list of monitor hashes + in an ascending order. + :param service: six.string_types. The Ceph user name to run the command under + :rtype : dict. json dict of monitor name, ip address and rank + example: { + 'name': 'ip-172-31-13-165', + 'rank': 0, + 'addr': '172.31.13.165:6789/0'} + """ + try: + hash_list = [] + monitor_list = get_mon_map(service=service) + if monitor_list['monmap']['mons']: + for mon in monitor_list['monmap']['mons']: + hash_list.append( + hashlib.sha224(mon['name'].encode('utf-8')).hexdigest()) + return sorted(hash_list) + else: + return None + except (ValueError, CalledProcessError): + raise + + +def monitor_key_delete(service, key): + """ + Delete a key and value pair from the monitor cluster + :param service: six.string_types. The Ceph user name to run the command under + Deletes a key value pair on the monitor cluster. + :param key: six.string_types. The key to delete. + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'del', str(key)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_set(service, key, value): + """ + Sets a key value pair on the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to set. + :param value: The value to set. This will be converted to a string + before setting + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'put', str(key), str(value)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_get(service, key): + """ + Gets the value of an existing key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for. + :return: Returns the value of that key or None if not found. + """ + try: + output = check_output( + ['ceph', '--id', service, + 'config-key', 'get', str(key)]) + return output + except CalledProcessError as e: + log("Monitor config-key get failed with message: {}".format( + e.output)) + return None + + +def monitor_key_exists(service, key): + """ + Searches for the existence of a key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for + :return: Returns True if the key exists, False if not and raises an + exception if an unknown error occurs. :raise: CalledProcessError if + an unknown error occurs + """ + try: + check_call( + ['ceph', '--id', service, + 'config-key', 'exists', str(key)]) + # I can return true here regardless because Ceph returns + # ENOENT if the key wasn't found + return True + except CalledProcessError as e: + if e.returncode == errno.ENOENT: + return False + else: + log("Unknown error from ceph config-get exists: {} {}".format( + e.returncode, e.output)) + raise + + +def get_erasure_profile(service, name): + """ + :param service: six.string_types. The Ceph user name to run the command under + :param name: + :return: + """ + try: + out = check_output(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name, '--format=json']) + return json.loads(out) + except (CalledProcessError, OSError, ValueError): + return None + + +def pool_set(service, pool_name, key, value): + """ + Sets a value for a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param key: six.string_types + :param value: + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def snapshot_pool(service, pool_name, snapshot_name): + """ + Snapshots a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_snapshot(service, pool_name, snapshot_name): + """ + Remove a snapshot from a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +# max_bytes should be an int or long +def set_pool_quota(service, pool_name, max_bytes): + """ + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param max_bytes: int or long + :return: None. Can raise CalledProcessError + """ + # Set a byte quota on a RADOS pool in ceph. + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, + 'max_bytes', str(max_bytes)] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_quota(service, pool_name): + """ + Set a byte quota on a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_erasure_profile(service, profile_name): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', + profile_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', + failure_domain='host', + data_chunks=2, coding_chunks=1, + locality=None, durability_estimator=None): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :param erasure_plugin_name: six.string_types + :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', + 'room', 'root', 'row']) + :param data_chunks: int + :param coding_chunks: int + :param locality: int + :param durability_estimator: int + :return: None. Can raise CalledProcessError + """ + # Ensure this failure_domain is allowed by Ceph + validator(failure_domain, six.string_types, + ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) + + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, + 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks), + 'ruleset_failure_domain=' + failure_domain] + if locality is not None and durability_estimator is not None: + raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + + # Add plugin specific information + if locality is not None: + # For local erasure codes + cmd.append('l=' + str(locality)) + if durability_estimator is not None: + # For Shec erasure codes + cmd.append('c=' + str(durability_estimator)) + + if erasure_profile_exists(service, profile_name): + cmd.append('--force') + + try: + check_call(cmd) + except CalledProcessError: + raise + + +def rename_pool(service, old_name, new_name): + """ + Rename a Ceph pool from old_name to new_name + :param service: six.string_types. The Ceph user name to run the command under + :param old_name: six.string_types + :param new_name: six.string_types + :return: None + """ + validator(value=old_name, valid_type=six.string_types) + validator(value=new_name, valid_type=six.string_types) + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] + check_call(cmd) + + +def erasure_profile_exists(service, name): + """ + Check to see if an Erasure code profile already exists. + :param service: six.string_types. The Ceph user name to run the command under + :param name: six.string_types + :return: int or None + """ + validator(value=name, valid_type=six.string_types) + try: + check_call(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name]) + return True + except CalledProcessError: + return False + + +def get_cache_mode(service, pool_name): + """ + Find the current caching mode of the pool_name given. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: int or None + """ + validator(value=service, valid_type=six.string_types) + validator(value=pool_name, valid_type=six.string_types) + out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) + try: + osd_json = json.loads(out) + for pool in osd_json['pools']: + if pool['pool_name'] == pool_name: + return pool['cache_mode'] + return None + except ValueError: + raise + + +def pool_exists(service, name): + """Check to see if a RADOS pool already exists.""" + try: + out = check_output(['rados', '--id', service, + 'lspools']).decode('UTF-8') + except CalledProcessError: + return False + + return name in out + + +def get_osds(service): + """Return a list of all Ceph Object Storage Daemons currently in the + cluster. + """ + version = ceph_version() + if version and version >= '0.56': + return json.loads(check_output(['ceph', '--id', service, + 'osd', 'ls', + '--format=json']).decode('UTF-8')) + + return None + + +def install(): + """Basic Ceph client installation.""" + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + """Check to see if a RADOS block device exists.""" + try: + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]).decode('UTF-8') + except CalledProcessError: + return False + + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] + check_call(cmd) + + +def update_pool(client, pool, settings): + cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] + for k, v in six.iteritems(settings): + cmd.append(k) + cmd.append(v) + + check_call(cmd) + + +def create_pool(service, name, replicas=3, pg_num=None): + """Create a new RADOS pool.""" + if pool_exists(service, name): + log("Ceph pool {} already exists, skipping creation".format(name), + level=WARNING) + return + + if not pg_num: + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pg_num = (len(osds) * 100 // replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pg_num = 200 + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] + check_call(cmd) + + update_pool(service, name, settings={'size': str(replicas)}) + + +def delete_pool(service, name): + """Delete a RADOS pool from ceph.""" + cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, + '--yes-i-really-really-mean-it'] + check_call(cmd) + + +def _keyfile_path(service): + return KEYFILE.format(service) + + +def _keyring_path(service): + return KEYRING.format(service) + + +def create_keyring(service, key): + """Create a new Ceph keyring containing key.""" + keyring = _keyring_path(service) + if os.path.exists(keyring): + log('Ceph keyring exists at %s.' % keyring, level=WARNING) + return + + cmd = ['ceph-authtool', keyring, '--create-keyring', + '--name=client.{}'.format(service), '--add-key={}'.format(key)] + check_call(cmd) + log('Created new ceph keyring at %s.' % keyring, level=DEBUG) + + +def delete_keyring(service): + """Delete an existing Ceph keyring.""" + keyring = _keyring_path(service) + if not os.path.exists(keyring): + log('Keyring does not exist at %s' % keyring, level=WARNING) + return + + os.remove(keyring) + log('Deleted ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + """Create a file containing key.""" + keyfile = _keyfile_path(service) + if os.path.exists(keyfile): + log('Keyfile exists at %s.' % keyfile, level=WARNING) + return + + with open(keyfile, 'w') as fd: + fd.write(key) + + log('Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(relation='ceph'): + """Query named relation to determine current nodes.""" + hosts = [] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + + return hosts + + +def configure(service, key, auth, use_syslog): + """Perform basic configuration of Ceph.""" + create_keyring(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF.format(auth=auth, + keyring=_keyring_path(service), + mon_hosts=",".join(map(str, hosts)), + use_syslog=use_syslog)) + modprobe('rbd') + + +def image_mapped(name): + """Determine whether a RADOS block device is mapped locally.""" + try: + out = check_output(['rbd', 'showmapped']).decode('UTF-8') + except CalledProcessError: + return False + + return name in out + + +def map_block_storage(service, pool, image): + """Map a RADOS block device for local use.""" + cmd = [ + 'rbd', + 'map', + '{}/{}'.format(pool, image), + '--user', + service, + '--secret', + _keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + """Determine whether a filesytems is already mounted.""" + return fs in [f for f, m in mounts()] + + +def make_filesystem(blk_device, fstype='ext4', timeout=10): + """Make a new filesystem on the specified block device.""" + count = 0 + e_noent = os.errno.ENOENT + while not os.path.exists(blk_device): + if count >= timeout: + log('Gave up waiting on block device %s' % blk_device, + level=ERROR) + raise IOError(e_noent, os.strerror(e_noent), blk_device) + + log('Waiting for block device %s to appear' % blk_device, + level=DEBUG) + count += 1 + time.sleep(1) + else: + log('Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + check_call(['mkfs', '-t', fstype, blk_device]) + + +def place_data_on_block_device(blk_device, data_src_dst): + """Migrate data in data_src_dst to blk_device and then remount.""" + # mount block device into /mnt + mount(blk_device, '/mnt') + # copy data to /mnt + copy_files(data_src_dst, '/mnt') + # umount block device + umount('/mnt') + # Grab user/group ID's from original source + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + # re-mount where the data should originally be + # TODO: persist is currently a NO-OP in core.host + mount(blk_device, data_src_dst, persist=True) + # ensure original ownership of new mount. + os.chown(data_src_dst, uid, gid) + + +def copy_files(src, dst, symlinks=False, ignore=None): + """Copy files from src to dst.""" + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, + blk_device, fstype, system_services=[], + replicas=3): + """NOTE: This function must only be called from a single service unit for + the same rbd_img otherwise data loss will occur. + + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being re-mounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('Creating new pool {}.'.format(pool), level=INFO) + create_pool(service, pool, replicas=replicas) + + if not rbd_exists(service, pool, rbd_img): + log('Creating RBD image ({}).'.format(rbd_img), level=INFO) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), + level=INFO) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if service_running(svc): + log('Stopping services {} prior to migrating data.' + .format(svc), level=DEBUG) + service_stop(svc) + + place_data_on_block_device(blk_device, mount_point) + + for svc in system_services: + log('Starting service {} after migrating data.' + .format(svc), level=DEBUG) + service_start(svc) + + +def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): + """Ensures a ceph keyring is created for a named service and optionally + ensures user and group ownership. + + Returns False if no ceph key is available in relation state. + """ + key = None + for rid in relation_ids(relation): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break + + if not key: + return False + + create_keyring(service=service, key=key) + keyring = _keyring_path(service) + if user and group: + check_call(['chown', '%s.%s' % (user, group), keyring]) + + return True + + +def ceph_version(): + """Retrieve the local version of ceph.""" + if os.path.exists('/usr/bin/ceph'): + cmd = ['ceph', '-v'] + output = check_output(cmd).decode('US-ASCII') + output = output.split() + if len(output) > 3: + return output[2] + else: + return None + else: + return None + + +class CephBrokerRq(object): + """Ceph broker request. + + Multiple operations can be added to a request and sent to the Ceph broker + to be executed. + + Request is json-encoded for sending over the wire. + + The API is versioned and defaults to version 1. + """ + + def __init__(self, api_version=1, request_id=None): + self.api_version = api_version + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) + self.ops = [] + + def add_op_create_pool(self, name, replica_count=3, pg_num=None): + """Adds an operation to create a pool. + + @param pg_num setting: optional setting. If not provided, this value + will be calculated by the broker based on how many OSDs are in the + cluster at the time of creation. Note that, if provided, this value + will be capped at the current available maximum. + """ + self.ops.append({'op': 'create-pool', 'name': name, + 'replicas': replica_count, 'pg_num': pg_num}) + + def set_ops(self, ops): + """Set request ops to provided value. + + Useful for injecting ops that come from a previous request + to allow comparisons to ensure validity. + """ + self.ops = ops + + @property + def request(self): + return json.dumps({'api-version': self.api_version, 'ops': self.ops, + 'request-id': self.request_id}) + + def _ops_equal(self, other): + if len(self.ops) == len(other.ops): + for req_no in range(0, len(self.ops)): + for key in ['replicas', 'name', 'op', 'pg_num']: + if self.ops[req_no].get(key) != other.ops[req_no].get(key): + return False + else: + return False + return True + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + if self.api_version == other.api_version and \ + self._ops_equal(other): + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + + +class CephBrokerRsp(object): + """Ceph broker response. + + Response is json-decoded and contents provided as methods/properties. + + The API is versioned and defaults to version 1. + """ + + def __init__(self, encoded_rsp): + self.api_version = None + self.rsp = json.loads(encoded_rsp) + + @property + def request_id(self): + return self.rsp.get('request-id') + + @property + def exit_code(self): + return self.rsp.get('exit-code') + + @property + def exit_msg(self): + return self.rsp.get('stderr') + + +# Ceph Broker Conversation: +# If a charm needs an action to be taken by ceph it can create a CephBrokerRq +# and send that request to ceph via the ceph relation. The CephBrokerRq has a +# unique id so that the client can identity which CephBrokerRsp is associated +# with the request. Ceph will also respond to each client unit individually +# creating a response key per client unit eg glance/0 will get a CephBrokerRsp +# via key broker-rsp-glance-0 +# +# To use this the charm can just do something like: +# +# from charmhelpers.contrib.storage.linux.ceph import ( +# send_request_if_needed, +# is_request_complete, +# CephBrokerRq, +# ) +# +# @hooks.hook('ceph-relation-changed') +# def ceph_changed(): +# rq = CephBrokerRq() +# rq.add_op_create_pool(name='poolname', replica_count=3) +# +# if is_request_complete(rq): +# +# else: +# send_request_if_needed(get_ceph_request()) +# +# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example +# of glance having sent a request to ceph which ceph has successfully processed +# 'ceph:8': { +# 'ceph/0': { +# 'auth': 'cephx', +# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', +# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', +# 'ceph-public-address': '10.5.44.103', +# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', +# 'private-address': '10.5.44.103', +# }, +# 'glance/0': { +# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' +# '"ops": [{"replicas": 3, "name": "glance", ' +# '"op": "create-pool"}]}'), +# 'private-address': '10.5.44.109', +# }, +# } + +def get_previous_request(rid): + """Return the last ceph broker request sent on a given relation + + @param rid: Relation id to query for request + """ + request = None + broker_req = relation_get(attribute='broker_req', rid=rid, + unit=local_unit()) + if broker_req: + request_data = json.loads(broker_req) + request = CephBrokerRq(api_version=request_data['api-version'], + request_id=request_data['request-id']) + request.set_ops(request_data['ops']) + + return request + + +def get_request_states(request, relation='ceph'): + """Return a dict of requests per relation id with their corresponding + completion state. + + This allows a charm, which has a request for ceph, to see whether there is + an equivalent request already being processed and if so what state that + request is in. + + @param request: A CephBrokerRq object + """ + complete = [] + requests = {} + for rid in relation_ids(relation): + complete = False + previous_request = get_previous_request(rid) + if request == previous_request: + sent = True + complete = is_request_complete_for_rid(previous_request, rid) + else: + sent = False + complete = False + + requests[rid] = { + 'sent': sent, + 'complete': complete, + } + + return requests + + +def is_request_sent(request, relation='ceph'): + """Check to see if a functionally equivalent request has already been sent + + Returns True if a similair request has been sent + + @param request: A CephBrokerRq object + """ + states = get_request_states(request, relation=relation) + for rid in states.keys(): + if not states[rid]['sent']: + return False + + return True + + +def is_request_complete(request, relation='ceph'): + """Check to see if a functionally equivalent request has already been + completed + + Returns True if a similair request has been completed + + @param request: A CephBrokerRq object + """ + states = get_request_states(request, relation=relation) + for rid in states.keys(): + if not states[rid]['complete']: + return False + + return True + + +def is_request_complete_for_rid(request, rid): + """Check if a given request has been completed on the given relation + + @param request: A CephBrokerRq object + @param rid: Relation ID + """ + broker_key = get_broker_rsp_key() + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if rdata.get(broker_key): + rsp = CephBrokerRsp(rdata.get(broker_key)) + if rsp.request_id == request.request_id: + if not rsp.exit_code: + return True + else: + # The remote unit sent no reply targeted at this unit so either the + # remote ceph cluster does not support unit targeted replies or it + # has not processed our request yet. + if rdata.get('broker_rsp'): + request_data = json.loads(rdata['broker_rsp']) + if request_data.get('request-id'): + log('Ignoring legacy broker_rsp without unit key as remote ' + 'service supports unit specific replies', level=DEBUG) + else: + log('Using legacy broker_rsp as remote service does not ' + 'supports unit specific replies', level=DEBUG) + rsp = CephBrokerRsp(rdata['broker_rsp']) + if not rsp.exit_code: + return True + + return False + + +def get_broker_rsp_key(): + """Return broker response key for this unit + + This is the key that ceph is going to use to pass request status + information back to this unit + """ + return 'broker-rsp-' + local_unit().replace('/', '-') + + +def send_request_if_needed(request, relation='ceph'): + """Send broker request if an equivalent request has not already been sent + + @param request: A CephBrokerRq object + """ + if is_request_sent(request, relation=relation): + log('Request already sent but not complete, not sending new request', + level=DEBUG) + else: + for rid in relation_ids(relation): + log('Sending request {}'.format(request.request_id), level=DEBUG) + relation_set(relation_id=rid, broker_req=request.request) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 66da0aca..7fec00e5 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -33,6 +33,8 @@ cluster addr = {{ cluster_addr }} osd crush location = {{crush_location}} {% endif %} +[client.osd-upgrade] +keyring = /var/lib/ceph/osd/ceph.client.osd-upgrade.keyring [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 49a10b11..7800a00d 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -43,8 +43,8 @@ def _add_services(self): and the rest of the service are from lp branches that are compatible with the local charm (e.g. stable or next). """ - this_service = {'name': 'ceph-osd'} - other_services = [{'name': 'ceph', 'units': 3}, + this_service = {'name': 'ceph-osd', 'units': 3} + other_services = [{'name': 'ceph-mon', 'units': 3}, {'name': 'mysql'}, {'name': 'keystone'}, {'name': 'rabbitmq-server'}, @@ -60,18 +60,18 @@ def _add_relations(self): 'nova-compute:shared-db': 'mysql:shared-db', 'nova-compute:amqp': 'rabbitmq-server:amqp', 'nova-compute:image-service': 'glance:image-service', - 'nova-compute:ceph': 'ceph:client', + 'nova-compute:ceph': 'ceph-mon:client', 'keystone:shared-db': 'mysql:shared-db', 'glance:shared-db': 'mysql:shared-db', 'glance:identity-service': 'keystone:identity-service', 'glance:amqp': 'rabbitmq-server:amqp', - 'glance:ceph': 'ceph:client', + 'glance:ceph': 'ceph-mon:client', 'cinder:shared-db': 'mysql:shared-db', 'cinder:identity-service': 'keystone:identity-service', 'cinder:amqp': 'rabbitmq-server:amqp', 'cinder:image-service': 'glance:image-service', - 'cinder:ceph': 'ceph:client', - 'ceph-osd:mon': 'ceph:osd' + 'cinder:ceph': 'ceph-mon:client', + 'ceph-osd:mon': 'ceph-mon:osd' } super(CephOsdBasicDeployment, self)._add_relations(relations) @@ -86,9 +86,6 @@ def _configure_services(self): 'auth-supported': 'none', 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', - 'osd-reformat': 'yes', - 'ephemeral-unmount': '/mnt', - 'osd-devices': '/dev/vdb /srv/ceph' } # Include a non-existent device as osd-devices is a whitelist, @@ -102,7 +99,7 @@ def _configure_services(self): configs = {'keystone': keystone_config, 'mysql': mysql_config, 'cinder': cinder_config, - 'ceph': ceph_config, + 'ceph-mon': ceph_config, 'ceph-osd': ceph_osd_config} super(CephOsdBasicDeployment, self)._configure_services(configs) @@ -115,10 +112,12 @@ def _initialize_tests(self): self.nova_sentry = self.d.sentry.unit['nova-compute/0'] self.glance_sentry = self.d.sentry.unit['glance/0'] self.cinder_sentry = self.d.sentry.unit['cinder/0'] - self.ceph0_sentry = self.d.sentry.unit['ceph/0'] - self.ceph1_sentry = self.d.sentry.unit['ceph/1'] - self.ceph2_sentry = self.d.sentry.unit['ceph/2'] + self.ceph0_sentry = self.d.sentry.unit['ceph-mon/0'] + self.ceph1_sentry = self.d.sentry.unit['ceph-mon/1'] + self.ceph2_sentry = self.d.sentry.unit['ceph-mon/2'] self.ceph_osd_sentry = self.d.sentry.unit['ceph-osd/0'] + self.ceph_osd1_sentry = self.d.sentry.unit['ceph-osd/1'] + self.ceph_osd2_sentry = self.d.sentry.unit['ceph-osd/2'] u.log.debug('openstack release val: {}'.format( self._get_openstack_release())) u.log.debug('openstack release str: {}'.format( @@ -177,7 +176,6 @@ def test_100_ceph_processes(self): # Process name and quantity of processes to expect on each unit ceph_processes = { 'ceph-mon': 1, - 'ceph-osd': 2 } # Units with process names and PID quantities expected @@ -214,9 +212,6 @@ def test_102_services(self): ceph_services = [ 'ceph-mon-all', 'ceph-mon id=`hostname`', - 'ceph-osd-all', - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) ] services[self.ceph0_sentry] = ceph_services services[self.ceph1_sentry] = ceph_services @@ -233,16 +228,16 @@ def test_102_services(self): def test_200_ceph_osd_ceph_relation(self): """Verify the ceph-osd to ceph relation data.""" - u.log.debug('Checking ceph-osd:ceph mon relation data...') + u.log.debug('Checking ceph-osd:ceph-mon relation data...') unit = self.ceph_osd_sentry - relation = ['mon', 'ceph:osd'] + relation = ['mon', 'ceph-mon:osd'] expected = { 'private-address': u.valid_ip } ret = u.validate_relation_data(unit, relation, expected) if ret: - message = u.relation_error('ceph-osd to ceph', ret) + message = u.relation_error('ceph-osd to ceph-mon', ret) amulet.raise_status(amulet.FAIL, msg=message) def test_201_ceph0_to_ceph_osd_relation(self): diff --git a/ceph-osd/unit_tests/test_upgrade_roll.py b/ceph-osd/unit_tests/test_upgrade_roll.py new file mode 100644 index 00000000..840e247c --- /dev/null +++ b/ceph-osd/unit_tests/test_upgrade_roll.py @@ -0,0 +1,157 @@ +import time + +__author__ = 'chris' +from mock import patch, call, MagicMock +import sys + +sys.path.append('/home/chris/repos/ceph-osd/hooks') + +from ceph import CrushLocation + +import test_utils +import ceph_hooks + +TO_PATCH = [ + 'apt_install', + 'apt_update', + 'add_source', + 'config', + 'ceph', + 'get_conf', + 'hookenv', + 'host', + 'log', + 'service_start', + 'service_stop', + 'socket', + 'status_set', +] + + +def config_side_effect(*args): + if args[0] == 'source': + return 'cloud:trusty-kilo' + elif args[0] == 'key': + return 'key' + elif args[0] == 'release-version': + return 'cloud:trusty-kilo' + + +previous_node_start_time = time.time() - (9 * 60) + + +def monitor_key_side_effect(*args): + if args[1] == \ + 'ip-192-168-1-2_done': + return False + elif args[1] == \ + 'ip-192-168-1-2_start': + # Return that the previous node started 9 minutes ago + return previous_node_start_time + + +class UpgradeRollingTestCase(test_utils.CharmTestCase): + def setUp(self): + super(UpgradeRollingTestCase, self).setUp(ceph_hooks, TO_PATCH) + + @patch('ceph_hooks.roll_osd_cluster') + def test_check_for_upgrade(self, roll_osd_cluster): + self.host.lsb_release.return_value = { + 'DISTRIB_CODENAME': 'trusty', + } + previous_mock = MagicMock().return_value + previous_mock.previous.return_value = "cloud:trusty-juno" + self.hookenv.config.side_effect = [previous_mock, + config_side_effect('source')] + ceph_hooks.check_for_upgrade() + + roll_osd_cluster.assert_called_with('cloud:trusty-kilo') + + @patch('ceph_hooks.upgrade_osd') + @patch('ceph_hooks.monitor_key_set') + def test_lock_and_roll(self, monitor_key_set, upgrade_osd): + monitor_key_set.monitor_key_set.return_value = None + ceph_hooks.lock_and_roll(my_name='ip-192-168-1-2') + upgrade_osd.assert_called_once_with() + + def test_upgrade_osd(self): + self.config.side_effect = config_side_effect + self.ceph.get_version.return_value = "0.80" + self.ceph.systemd.return_value = False + ceph_hooks.upgrade_osd() + self.service_stop.assert_called_with('ceph-osd-all') + self.service_start.assert_called_with('ceph-osd-all') + self.status_set.assert_has_calls([ + call('maintenance', 'Upgrading osd'), + ]) + + @patch('ceph_hooks.lock_and_roll') + @patch('ceph_hooks.get_upgrade_position') + def test_roll_osd_cluster_first(self, + get_upgrade_position, + lock_and_roll): + self.socket.gethostname.return_value = "ip-192-168-1-2" + self.ceph.get_osd_tree.return_value = "" + get_upgrade_position.return_value = 0 + ceph_hooks.roll_osd_cluster('0.94.1') + lock_and_roll.assert_called_with(my_name="ip-192-168-1-2") + + @patch('ceph_hooks.lock_and_roll') + @patch('ceph_hooks.get_upgrade_position') + @patch('ceph_hooks.wait_on_previous_node') + def test_roll_osd_cluster_second(self, + wait_on_previous_node, + get_upgrade_position, + lock_and_roll): + wait_on_previous_node.return_value = None + self.socket.gethostname.return_value = "ip-192-168-1-3" + self.ceph.get_osd_tree.return_value = [ + CrushLocation( + name="ip-192-168-1-2", + identifier='a', + host='host-a', + rack='rack-a', + row='row-a', + datacenter='dc-1', + chassis='chassis-a', + root='ceph'), + CrushLocation( + name="ip-192-168-1-3", + identifier='a', + host='host-b', + rack='rack-a', + row='row-a', + datacenter='dc-1', + chassis='chassis-a', + root='ceph') + ] + get_upgrade_position.return_value = 1 + ceph_hooks.roll_osd_cluster('0.94.1') + self.status_set.assert_called_with( + 'blocked', + 'Waiting on ip-192-168-1-2 to finish upgrading') + lock_and_roll.assert_called_with(my_name="ip-192-168-1-3") + + @patch('ceph_hooks.monitor_key_get') + @patch('ceph_hooks.monitor_key_exists') + def test_wait_on_previous_node(self, + monitor_key_exists, + monitor_key_get): + monitor_key_get.side_effect = monitor_key_side_effect + monitor_key_exists.return_value = False + + ceph_hooks.wait_on_previous_node("ip-192-168-1-2") + + # Make sure we checked to see if the previous node started + monitor_key_get.assert_has_calls( + [call('osd-upgrade', 'ip-192-168-1-2_start')] + ) + # Make sure we checked to see if the previous node was finished + monitor_key_exists.assert_has_calls( + [call('osd-upgrade', 'ip-192-168-1-2_done')] + ) + # Make sure we waited at last once before proceeding + self.log.assert_has_calls( + [call('Previous node is: ip-192-168-1-2')], + [call('ip-192-168-1-2 is not finished. Waiting')], + ) From e7724f3f3eea8d6dac57aea9c7b6f85b840b16d9 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 22 Mar 2016 19:29:52 +0000 Subject: [PATCH 1076/2699] Add hardening support Add charmhelpers.contrib.hardening and calls to install, config-changed, upgrade-charm and update-status hooks. Also add new config option to allow one or more hardening modules to be applied at runtime. Change-Id: Ic417d678d3b0f7bfda5b393628a67297d7e79107 --- ceph-osd/charm-helpers-hooks.yaml | 1 + ceph-osd/config.yaml | 6 + ceph-osd/hardening.yaml | 5 + ceph-osd/hooks/ceph_hooks.py | 10 + .../contrib/hardening/README.hardening.md | 38 ++ .../contrib/hardening/__init__.py | 15 + .../contrib/hardening/apache/__init__.py | 19 + .../hardening/apache/checks/__init__.py | 31 + .../contrib/hardening/apache/checks/config.py | 100 ++++ .../hardening/apache/templates/__init__.py | 0 .../hardening/apache/templates/alias.conf | 31 + .../hardening/apache/templates/hardening.conf | 18 + .../contrib/hardening/audits/__init__.py | 63 ++ .../contrib/hardening/audits/apache.py | 100 ++++ .../contrib/hardening/audits/apt.py | 105 ++++ .../contrib/hardening/audits/file.py | 552 ++++++++++++++++++ .../contrib/hardening/defaults/__init__.py | 0 .../contrib/hardening/defaults/apache.yaml | 13 + .../hardening/defaults/apache.yaml.schema | 9 + .../contrib/hardening/defaults/mysql.yaml | 38 ++ .../hardening/defaults/mysql.yaml.schema | 15 + .../contrib/hardening/defaults/os.yaml | 67 +++ .../contrib/hardening/defaults/os.yaml.schema | 42 ++ .../contrib/hardening/defaults/ssh.yaml | 49 ++ .../hardening/defaults/ssh.yaml.schema | 42 ++ .../charmhelpers/contrib/hardening/harden.py | 84 +++ .../contrib/hardening/host/__init__.py | 19 + .../contrib/hardening/host/checks/__init__.py | 50 ++ .../contrib/hardening/host/checks/apt.py | 39 ++ .../contrib/hardening/host/checks/limits.py | 55 ++ .../contrib/hardening/host/checks/login.py | 67 +++ .../hardening/host/checks/minimize_access.py | 52 ++ .../contrib/hardening/host/checks/pam.py | 134 +++++ .../contrib/hardening/host/checks/profile.py | 45 ++ .../hardening/host/checks/securetty.py | 39 ++ .../hardening/host/checks/suid_sgid.py | 131 +++++ .../contrib/hardening/host/checks/sysctl.py | 211 +++++++ .../hardening/host/templates/10.hardcore.conf | 8 + .../host/templates/99-juju-hardening.conf | 7 + .../hardening/host/templates/__init__.py | 0 .../hardening/host/templates/login.defs | 349 +++++++++++ .../contrib/hardening/host/templates/modules | 117 ++++ .../hardening/host/templates/passwdqc.conf | 11 + .../host/templates/pinerolo_profile.sh | 8 + .../hardening/host/templates/securetty | 11 + .../contrib/hardening/host/templates/tally2 | 14 + .../contrib/hardening/mysql/__init__.py | 19 + .../hardening/mysql/checks/__init__.py | 31 + .../contrib/hardening/mysql/checks/config.py | 89 +++ .../hardening/mysql/templates/__init__.py | 0 .../hardening/mysql/templates/hardening.cnf | 12 + .../contrib/hardening/ssh/__init__.py | 19 + .../contrib/hardening/ssh/checks/__init__.py | 31 + .../contrib/hardening/ssh/checks/config.py | 394 +++++++++++++ .../hardening/ssh/templates/__init__.py | 0 .../hardening/ssh/templates/ssh_config | 70 +++ .../hardening/ssh/templates/sshd_config | 159 +++++ .../contrib/hardening/templating.py | 71 +++ .../charmhelpers/contrib/hardening/utils.py | 157 +++++ ceph-osd/hooks/charmhelpers/core/hookenv.py | 31 + ceph-osd/hooks/charmhelpers/core/host.py | 50 +- .../charmhelpers/contrib/amulet/utils.py | 7 +- .../contrib/openstack/amulet/utils.py | 53 +- ceph-osd/unit_tests/test_config.py | 16 +- ceph-osd/unit_tests/test_status.py | 7 +- ceph-osd/unit_tests/test_utils.py | 7 + 66 files changed, 4012 insertions(+), 31 deletions(-) create mode 100644 ceph-osd/hardening.yaml create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/README.hardening.md create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/apache/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/audits/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/audits/file.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/apt.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/limits.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/login.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/pam.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/profile.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/login.defs create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/modules create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/securetty create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/tally2 create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index c8c54766..1c484af6 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -9,3 +9,4 @@ include: - contrib.openstack.alternatives - contrib.network.ip - contrib.charmsupport + - contrib.hardening|inc=* \ No newline at end of file diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 169a3331..b15628ac 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -172,4 +172,10 @@ options: default: True type: boolean description: Configure use of direct IO for OSD journals. + harden: + default: + type: string + description: | + Apply system hardening. Supports a space-delimited list of modules + to run. Supported modules currently include os, ssh, apache and mysql. diff --git a/ceph-osd/hardening.yaml b/ceph-osd/hardening.yaml new file mode 100644 index 00000000..314bb385 --- /dev/null +++ b/ceph-osd/hardening.yaml @@ -0,0 +1,5 @@ +# Overrides file for contrib.hardening. See README.hardening in +# contrib.hardening for info on how to use this file. +ssh: + server: + use_pam: 'yes' # juju requires this diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 912abc11..c49b7db1 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -55,6 +55,7 @@ ) from charmhelpers.contrib.charmsupport import nrpe +from charmhelpers.contrib.hardening.harden import harden hooks = Hooks() @@ -67,6 +68,7 @@ def install_upstart_scripts(): @hooks.hook('install.real') +@harden() def install(): add_source(config('source'), config('key')) apt_update(fatal=True) @@ -157,6 +159,7 @@ def check_overlap(journaldevs, datadevs): @hooks.hook('config-changed') +@harden() def config_changed(): # Pre-flight checks if config('osd-format') not in ceph.DISK_FORMATS: @@ -268,6 +271,7 @@ def mon_relation(): @hooks.hook('upgrade-charm') +@harden() def upgrade_charm(): if get_fsid() and get_auth(): emit_cephconf() @@ -318,6 +322,12 @@ def assess_status(): 'Unit is ready ({} OSD)'.format(len(running_osds))) +@hooks.hook('update-status') +@harden() +def update_status(): + log('Updating status.') + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/README.hardening.md b/ceph-osd/hooks/charmhelpers/contrib/hardening/README.hardening.md new file mode 100644 index 00000000..91280c03 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/README.hardening.md @@ -0,0 +1,38 @@ +# Juju charm-helpers hardening library + +## Description + +This library provides multiple implementations of system and application +hardening that conform to the standards of http://hardening.io/. + +Current implementations include: + + * OS + * SSH + * MySQL + * Apache + +## Requirements + +* Juju Charms + +## Usage + +1. Synchronise this library into your charm and add the harden() decorator + (from contrib.hardening.harden) to any functions or methods you want to use + to trigger hardening of your application/system. + +2. Add a config option called 'harden' to your charm config.yaml and set it to + a space-delimited list of hardening modules you want to run e.g. "os ssh" + +3. Override any config defaults (contrib.hardening.defaults) by adding a file + called hardening.yaml to your charm root containing the name(s) of the + modules whose settings you want override at root level and then any settings + with overrides e.g. + + os: + general: + desktop_enable: True + +4. Now just run your charm as usual and hardening will be applied each time the + hook runs. diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/__init__.py new file mode 100644 index 00000000..a1335320 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py new file mode 100644 index 00000000..d1304792 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.apache.checks import config + + +def run_apache_checks(): + log("Starting Apache hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("Apache hardening checks complete.", level=DEBUG) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py new file mode 100644 index 00000000..8249ca01 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -0,0 +1,100 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import re +import subprocess + + +from charmhelpers.core.hookenv import ( + log, + INFO, +) +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + DirectoryPermissionAudit, + NoReadWriteForOther, + TemplatedFile, +) +from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit +from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get Apache hardening config audits. + + :returns: dictionary of audits + """ + if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0: + log("Apache server does not appear to be installed on this node - " + "skipping apache hardening", level=INFO) + return [] + + context = ApacheConfContext() + settings = utils.get_settings('apache') + audits = [ + FilePermissionAudit(paths='/etc/apache2/apache2.conf', user='root', + group='root', mode=0o0640), + + TemplatedFile(os.path.join(settings['common']['apache_dir'], + 'mods-available/alias.conf'), + context, + TEMPLATES_DIR, + mode=0o0755, + user='root', + service_actions=[{'service': 'apache2', + 'actions': ['restart']}]), + + TemplatedFile(os.path.join(settings['common']['apache_dir'], + 'conf-enabled/hardening.conf'), + context, + TEMPLATES_DIR, + mode=0o0640, + user='root', + service_actions=[{'service': 'apache2', + 'actions': ['restart']}]), + + DirectoryPermissionAudit(settings['common']['apache_dir'], + user='root', + group='root', + mode=0o640), + + DisabledModuleAudit(settings['hardening']['modules_to_disable']), + + NoReadWriteForOther(settings['common']['apache_dir']), + ] + + return audits + + +class ApacheConfContext(object): + """Defines the set of key/value pairs to set in a apache config file. + + This context, when called, will return a dictionary containing the + key/value pairs of setting to specify in the + /etc/apache/conf-enabled/hardening.conf file. + """ + def __call__(self): + settings = utils.get_settings('apache') + ctxt = settings['hardening'] + + out = subprocess.check_output(['apache2', '-v']) + ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', + out).group(1) + ctxt['apache_icondir'] = '/usr/share/apache2/icons/' + ctxt['traceenable'] = settings['hardening']['traceenable'] + return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf new file mode 100644 index 00000000..e46a58a3 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf @@ -0,0 +1,31 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### + + # + # Aliases: Add here as many aliases as you need (with no limit). The format is + # Alias fakename realname + # + # Note that if you include a trailing / on fakename then the server will + # require it to be present in the URL. So "/icons" isn't aliased in this + # example, only "/icons/". If the fakename is slash-terminated, then the + # realname must also be slash terminated, and if the fakename omits the + # trailing slash, the realname must also omit it. + # + # We include the /icons/ alias for FancyIndexed directory listings. If + # you do not use FancyIndexing, you may comment this out. + # + Alias /icons/ "{{ apache_icondir }}/" + + + Options -Indexes -MultiViews -FollowSymLinks + AllowOverride None +{% if apache_version == '2.4' -%} + Require all granted +{% else -%} + Order allow,deny + Allow from all +{% endif %} + + diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf new file mode 100644 index 00000000..07945418 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf @@ -0,0 +1,18 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### + + + + # http://httpd.apache.org/docs/2.4/upgrading.html + {% if apache_version > '2.2' -%} + Require all granted + {% else -%} + Order Allow,Deny + Deny from all + {% endif %} + + + +TraceEnable {{ traceenable }} diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/__init__.py new file mode 100644 index 00000000..6a7057b3 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/__init__.py @@ -0,0 +1,63 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + + +class BaseAudit(object): # NO-QA + """Base class for hardening checks. + + The lifecycle of a hardening check is to first check to see if the system + is in compliance for the specified check. If it is not in compliance, the + check method will return a value which will be supplied to the. + """ + def __init__(self, *args, **kwargs): + self.unless = kwargs.get('unless', None) + super(BaseAudit, self).__init__() + + def ensure_compliance(self): + """Checks to see if the current hardening check is in compliance or + not. + + If the check that is performed is not in compliance, then an exception + should be raised. + """ + pass + + def _take_action(self): + """Determines whether to perform the action or not. + + Checks whether or not an action should be taken. This is determined by + the truthy value for the unless parameter. If unless is a callback + method, it will be invoked with no parameters in order to determine + whether or not the action should be taken. Otherwise, the truthy value + of the unless attribute will determine if the action should be + performed. + """ + # Do the action if there isn't an unless override. + if self.unless is None: + return True + + # Invoke the callback if there is one. + if hasattr(self.unless, '__call__'): + results = self.unless() + if results: + return False + else: + return True + + if self.unless: + return False + else: + return True diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py new file mode 100644 index 00000000..cf3c987d --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -0,0 +1,100 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import re +import subprocess + +from six import string_types + +from charmhelpers.core.hookenv import ( + log, + INFO, + ERROR, +) + +from charmhelpers.contrib.hardening.audits import BaseAudit + + +class DisabledModuleAudit(BaseAudit): + """Audits Apache2 modules. + + Determines if the apache2 modules are enabled. If the modules are enabled + then they are removed in the ensure_compliance. + """ + def __init__(self, modules): + if modules is None: + self.modules = [] + elif isinstance(modules, string_types): + self.modules = [modules] + else: + self.modules = modules + + def ensure_compliance(self): + """Ensures that the modules are not loaded.""" + if not self.modules: + return + + try: + loaded_modules = self._get_loaded_modules() + non_compliant_modules = [] + for module in self.modules: + if module in loaded_modules: + log("Module '%s' is enabled but should not be." % + (module), level=INFO) + non_compliant_modules.append(module) + + if len(non_compliant_modules) == 0: + return + + for module in non_compliant_modules: + self._disable_module(module) + self._restart_apache() + except subprocess.CalledProcessError as e: + log('Error occurred auditing apache module compliance. ' + 'This may have been already reported. ' + 'Output is: %s' % e.output, level=ERROR) + + @staticmethod + def _get_loaded_modules(): + """Returns the modules which are enabled in Apache.""" + output = subprocess.check_output(['apache2ctl', '-M']) + modules = [] + for line in output.strip().split(): + # Each line of the enabled module output looks like: + # module_name (static|shared) + # Plus a header line at the top of the output which is stripped + # out by the regex. + matcher = re.search(r'^ (\S*)', line) + if matcher: + modules.append(matcher.group(1)) + return modules + + @staticmethod + def _disable_module(module): + """Disables the specified module in Apache.""" + try: + subprocess.check_call(['a2dismod', module]) + except subprocess.CalledProcessError as e: + # Note: catch error here to allow the attempt of disabling + # multiple modules in one go rather than failing after the + # first module fails. + log('Error occurred disabling module %s. ' + 'Output is: %s' % (module, e.output), level=ERROR) + + @staticmethod + def _restart_apache(): + """Restarts the apache process""" + subprocess.check_output(['service', 'apache2', 'restart']) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py new file mode 100644 index 00000000..e94af031 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -0,0 +1,105 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from __future__ import absolute_import # required for external apt import +from apt import apt_pkg +from six import string_types + +from charmhelpers.fetch import ( + apt_cache, + apt_purge +) +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) +from charmhelpers.contrib.hardening.audits import BaseAudit + + +class AptConfig(BaseAudit): + + def __init__(self, config, **kwargs): + self.config = config + + def verify_config(self): + apt_pkg.init() + for cfg in self.config: + value = apt_pkg.config.get(cfg['key'], cfg.get('default', '')) + if value and value != cfg['expected']: + log("APT config '%s' has unexpected value '%s' " + "(expected='%s')" % + (cfg['key'], value, cfg['expected']), level=WARNING) + + def ensure_compliance(self): + self.verify_config() + + +class RestrictedPackages(BaseAudit): + """Class used to audit restricted packages on the system.""" + + def __init__(self, pkgs, **kwargs): + super(RestrictedPackages, self).__init__(**kwargs) + if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): + self.pkgs = [pkgs] + else: + self.pkgs = pkgs + + def ensure_compliance(self): + cache = apt_cache() + + for p in self.pkgs: + if p not in cache: + continue + + pkg = cache[p] + if not self.is_virtual_package(pkg): + if not pkg.current_ver: + log("Package '%s' is not installed." % pkg.name, + level=DEBUG) + continue + else: + log("Restricted package '%s' is installed" % pkg.name, + level=WARNING) + self.delete_package(cache, pkg) + else: + log("Checking restricted virtual package '%s' provides" % + pkg.name, level=DEBUG) + self.delete_package(cache, pkg) + + def delete_package(self, cache, pkg): + """Deletes the package from the system. + + Deletes the package form the system, properly handling virtual + packages. + + :param cache: the apt cache + :param pkg: the package to remove + """ + if self.is_virtual_package(pkg): + log("Package '%s' appears to be virtual - purging provides" % + pkg.name, level=DEBUG) + for _p in pkg.provides_list: + self.delete_package(cache, _p[2].parent_pkg) + elif not pkg.current_ver: + log("Package '%s' not installed" % pkg.name, level=DEBUG) + return + else: + log("Purging package '%s'" % pkg.name, level=DEBUG) + apt_purge(pkg.name) + + def is_virtual_package(self, pkg): + return pkg.has_provides and not pkg.has_versions diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/file.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/file.py new file mode 100644 index 00000000..0fb545a9 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/file.py @@ -0,0 +1,552 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import grp +import os +import pwd +import re + +from subprocess import ( + CalledProcessError, + check_output, + check_call, +) +from traceback import format_exc +from six import string_types +from stat import ( + S_ISGID, + S_ISUID +) + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + WARNING, + ERROR, +) +from charmhelpers.core import unitdata +from charmhelpers.core.host import file_hash +from charmhelpers.contrib.hardening.audits import BaseAudit +from charmhelpers.contrib.hardening.templating import ( + get_template_path, + render_and_write, +) +from charmhelpers.contrib.hardening import utils + + +class BaseFileAudit(BaseAudit): + """Base class for file audits. + + Provides api stubs for compliance check flow that must be used by any class + that implemented this one. + """ + + def __init__(self, paths, always_comply=False, *args, **kwargs): + """ + :param paths: string path of list of paths of files we want to apply + compliance checks are criteria to. + :param always_comply: if true compliance criteria is always applied + else compliance is skipped for non-existent + paths. + """ + super(BaseFileAudit, self).__init__(*args, **kwargs) + self.always_comply = always_comply + if isinstance(paths, string_types) or not hasattr(paths, '__iter__'): + self.paths = [paths] + else: + self.paths = paths + + def ensure_compliance(self): + """Ensure that the all registered files comply to registered criteria. + """ + for p in self.paths: + if os.path.exists(p): + if self.is_compliant(p): + continue + + log('File %s is not in compliance.' % p, level=INFO) + else: + if not self.always_comply: + log("Non-existent path '%s' - skipping compliance check" + % (p), level=INFO) + continue + + if self._take_action(): + log("Applying compliance criteria to '%s'" % (p), level=INFO) + self.comply(p) + + def is_compliant(self, path): + """Audits the path to see if it is compliance. + + :param path: the path to the file that should be checked. + """ + raise NotImplementedError + + def comply(self, path): + """Enforces the compliance of a path. + + :param path: the path to the file that should be enforced. + """ + raise NotImplementedError + + @classmethod + def _get_stat(cls, path): + """Returns the Posix st_stat information for the specified file path. + + :param path: the path to get the st_stat information for. + :returns: an st_stat object for the path or None if the path doesn't + exist. + """ + return os.stat(path) + + +class FilePermissionAudit(BaseFileAudit): + """Implements an audit for file permissions and ownership for a user. + + This class implements functionality that ensures that a specific user/group + will own the file(s) specified and that the permissions specified are + applied properly to the file. + """ + def __init__(self, paths, user, group=None, mode=0o600, **kwargs): + self.user = user + self.group = group + self.mode = mode + super(FilePermissionAudit, self).__init__(paths, user, group, mode, + **kwargs) + + @property + def user(self): + return self._user + + @user.setter + def user(self, name): + try: + user = pwd.getpwnam(name) + except KeyError: + log('Unknown user %s' % name, level=ERROR) + user = None + self._user = user + + @property + def group(self): + return self._group + + @group.setter + def group(self, name): + try: + group = None + if name: + group = grp.getgrnam(name) + else: + group = grp.getgrgid(self.user.pw_gid) + except KeyError: + log('Unknown group %s' % name, level=ERROR) + self._group = group + + def is_compliant(self, path): + """Checks if the path is in compliance. + + Used to determine if the path specified meets the necessary + requirements to be in compliance with the check itself. + + :param path: the file path to check + :returns: True if the path is compliant, False otherwise. + """ + stat = self._get_stat(path) + user = self.user + group = self.group + + compliant = True + if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid: + log('File %s is not owned by %s:%s.' % (path, user.pw_name, + group.gr_name), + level=INFO) + compliant = False + + # POSIX refers to the st_mode bits as corresponding to both the + # file type and file permission bits, where the least significant 12 + # bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the + # file permission bits (8-0) + perms = stat.st_mode & 0o7777 + if perms != self.mode: + log('File %s has incorrect permissions, currently set to %s' % + (path, oct(stat.st_mode & 0o7777)), level=INFO) + compliant = False + + return compliant + + def comply(self, path): + """Issues a chown and chmod to the file paths specified.""" + utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name, + self.mode) + + +class DirectoryPermissionAudit(FilePermissionAudit): + """Performs a permission check for the specified directory path.""" + + def __init__(self, paths, user, group=None, mode=0o600, + recursive=True, **kwargs): + super(DirectoryPermissionAudit, self).__init__(paths, user, group, + mode, **kwargs) + self.recursive = recursive + + def is_compliant(self, path): + """Checks if the directory is compliant. + + Used to determine if the path specified and all of its children + directories are in compliance with the check itself. + + :param path: the directory path to check + :returns: True if the directory tree is compliant, otherwise False. + """ + if not os.path.isdir(path): + log('Path specified %s is not a directory.' % path, level=ERROR) + raise ValueError("%s is not a directory." % path) + + if not self.recursive: + return super(DirectoryPermissionAudit, self).is_compliant(path) + + compliant = True + for root, dirs, _ in os.walk(path): + if len(dirs) > 0: + continue + + if not super(DirectoryPermissionAudit, self).is_compliant(root): + compliant = False + continue + + return compliant + + def comply(self, path): + for root, dirs, _ in os.walk(path): + if len(dirs) > 0: + super(DirectoryPermissionAudit, self).comply(root) + + +class ReadOnly(BaseFileAudit): + """Audits that files and folders are read only.""" + def __init__(self, paths, *args, **kwargs): + super(ReadOnly, self).__init__(paths=paths, *args, **kwargs) + + def is_compliant(self, path): + try: + output = check_output(['find', path, '-perm', '-go+w', + '-type', 'f']).strip() + + # The find above will find any files which have permission sets + # which allow too broad of write access. As such, the path is + # compliant if there is no output. + if output: + return False + + return True + except CalledProcessError as e: + log('Error occurred checking finding writable files for %s. ' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + return False + + def comply(self, path): + try: + check_output(['chmod', 'go-w', '-R', path]) + except CalledProcessError as e: + log('Error occurred removing writeable permissions for %s. ' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + + +class NoReadWriteForOther(BaseFileAudit): + """Ensures that the files found under the base path are readable or + writable by anyone other than the owner or the group. + """ + def __init__(self, paths): + super(NoReadWriteForOther, self).__init__(paths) + + def is_compliant(self, path): + try: + cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o', + '-perm', '-o+w', '-type', 'f'] + output = check_output(cmd).strip() + + # The find above here will find any files which have read or + # write permissions for other, meaning there is too broad of access + # to read/write the file. As such, the path is compliant if there's + # no output. + if output: + return False + + return True + except CalledProcessError as e: + log('Error occurred while finding files which are readable or ' + 'writable to the world in %s. ' + 'Command output is: %s.' % (path, e.output), level=ERROR) + + def comply(self, path): + try: + check_output(['chmod', '-R', 'o-rw', path]) + except CalledProcessError as e: + log('Error occurred attempting to change modes of files under ' + 'path %s. Output of command is: %s' % (path, e.output)) + + +class NoSUIDSGIDAudit(BaseFileAudit): + """Audits that specified files do not have SUID/SGID bits set.""" + def __init__(self, paths, *args, **kwargs): + super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs) + + def is_compliant(self, path): + stat = self._get_stat(path) + if (stat.st_mode & (S_ISGID | S_ISUID)) != 0: + return False + + return True + + def comply(self, path): + try: + log('Removing suid/sgid from %s.' % path, level=DEBUG) + check_output(['chmod', '-s', path]) + except CalledProcessError as e: + log('Error occurred removing suid/sgid from %s.' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + + +class TemplatedFile(BaseFileAudit): + """The TemplatedFileAudit audits the contents of a templated file. + + This audit renders a file from a template, sets the appropriate file + permissions, then generates a hashsum with which to check the content + changed. + """ + def __init__(self, path, context, template_dir, mode, user='root', + group='root', service_actions=None, **kwargs): + self.context = context + self.user = user + self.group = group + self.mode = mode + self.template_dir = template_dir + self.service_actions = service_actions + super(TemplatedFile, self).__init__(paths=path, always_comply=True, + **kwargs) + + def is_compliant(self, path): + """Determines if the templated file is compliant. + + A templated file is only compliant if it has not changed (as + determined by its sha256 hashsum) AND its file permissions are set + appropriately. + + :param path: the path to check compliance. + """ + same_templates = self.templates_match(path) + same_content = self.contents_match(path) + same_permissions = self.permissions_match(path) + + if same_content and same_permissions and same_templates: + return True + + return False + + def run_service_actions(self): + """Run any actions on services requested.""" + if not self.service_actions: + return + + for svc_action in self.service_actions: + name = svc_action['service'] + actions = svc_action['actions'] + log("Running service '%s' actions '%s'" % (name, actions), + level=DEBUG) + for action in actions: + cmd = ['service', name, action] + try: + check_call(cmd) + except CalledProcessError as exc: + log("Service name='%s' action='%s' failed - %s" % + (name, action, exc), level=WARNING) + + def comply(self, path): + """Ensures the contents and the permissions of the file. + + :param path: the path to correct + """ + dirname = os.path.dirname(path) + if not os.path.exists(dirname): + os.makedirs(dirname) + + self.pre_write() + render_and_write(self.template_dir, path, self.context()) + utils.ensure_permissions(path, self.user, self.group, self.mode) + self.run_service_actions() + self.save_checksum(path) + self.post_write() + + def pre_write(self): + """Invoked prior to writing the template.""" + pass + + def post_write(self): + """Invoked after writing the template.""" + pass + + def templates_match(self, path): + """Determines if the template files are the same. + + The template file equality is determined by the hashsum of the + template files themselves. If there is no hashsum, then the content + cannot be sure to be the same so treat it as if they changed. + Otherwise, return whether or not the hashsums are the same. + + :param path: the path to check + :returns: boolean + """ + template_path = get_template_path(self.template_dir, path) + key = 'hardening:template:%s' % template_path + template_checksum = file_hash(template_path) + kv = unitdata.kv() + stored_tmplt_checksum = kv.get(key) + if not stored_tmplt_checksum: + kv.set(key, template_checksum) + kv.flush() + log('Saved template checksum for %s.' % template_path, + level=DEBUG) + # Since we don't have a template checksum, then assume it doesn't + # match and return that the template is different. + return False + elif stored_tmplt_checksum != template_checksum: + kv.set(key, template_checksum) + kv.flush() + log('Updated template checksum for %s.' % template_path, + level=DEBUG) + return False + + # Here the template hasn't changed based upon the calculated + # checksum of the template and what was previously stored. + return True + + def contents_match(self, path): + """Determines if the file content is the same. + + This is determined by comparing hashsum of the file contents and + the saved hashsum. If there is no hashsum, then the content cannot + be sure to be the same so treat them as if they are not the same. + Otherwise, return True if the hashsums are the same, False if they + are not the same. + + :param path: the file to check. + """ + checksum = file_hash(path) + + kv = unitdata.kv() + stored_checksum = kv.get('hardening:%s' % path) + if not stored_checksum: + # If the checksum hasn't been generated, return False to ensure + # the file is written and the checksum stored. + log('Checksum for %s has not been calculated.' % path, level=DEBUG) + return False + elif stored_checksum != checksum: + log('Checksum mismatch for %s.' % path, level=DEBUG) + return False + + return True + + def permissions_match(self, path): + """Determines if the file owner and permissions match. + + :param path: the path to check. + """ + audit = FilePermissionAudit(path, self.user, self.group, self.mode) + return audit.is_compliant(path) + + def save_checksum(self, path): + """Calculates and saves the checksum for the path specified. + + :param path: the path of the file to save the checksum. + """ + checksum = file_hash(path) + kv = unitdata.kv() + kv.set('hardening:%s' % path, checksum) + kv.flush() + + +class DeletedFile(BaseFileAudit): + """Audit to ensure that a file is deleted.""" + def __init__(self, paths): + super(DeletedFile, self).__init__(paths) + + def is_compliant(self, path): + return not os.path.exists(path) + + def comply(self, path): + os.remove(path) + + +class FileContentAudit(BaseFileAudit): + """Audit the contents of a file.""" + def __init__(self, paths, cases, **kwargs): + # Cases we expect to pass + self.pass_cases = cases.get('pass', []) + # Cases we expect to fail + self.fail_cases = cases.get('fail', []) + super(FileContentAudit, self).__init__(paths, **kwargs) + + def is_compliant(self, path): + """ + Given a set of content matching cases i.e. tuple(regex, bool) where + bool value denotes whether or not regex is expected to match, check that + all cases match as expected with the contents of the file. Cases can be + expected to pass of fail. + + :param path: Path of file to check. + :returns: Boolean value representing whether or not all cases are + found to be compliant. + """ + log("Auditing contents of file '%s'" % (path), level=DEBUG) + with open(path, 'r') as fd: + contents = fd.read() + + matches = 0 + for pattern in self.pass_cases: + key = re.compile(pattern, flags=re.MULTILINE) + results = re.search(key, contents) + if results: + matches += 1 + else: + log("Pattern '%s' was expected to pass but instead it failed" + % (pattern), level=WARNING) + + for pattern in self.fail_cases: + key = re.compile(pattern, flags=re.MULTILINE) + results = re.search(key, contents) + if not results: + matches += 1 + else: + log("Pattern '%s' was expected to fail but instead it passed" + % (pattern), level=WARNING) + + total = len(self.pass_cases) + len(self.fail_cases) + log("Checked %s cases and %s passed" % (total, matches), level=DEBUG) + return matches == total + + def comply(self, *args, **kwargs): + """NOOP since we just issue warnings. This is to avoid the + NotImplememtedError. + """ + log("Not applying any compliance criteria, only checks.", level=INFO) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml new file mode 100644 index 00000000..e5ada29f --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml @@ -0,0 +1,13 @@ +# NOTE: this file contains the default configuration for the 'apache' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'apache' as the root key followed by any of the following with new +# values. + +common: + apache_dir: '/etc/apache2' + +hardening: + traceenable: 'off' + allowed_http_methods: "GET POST" + modules_to_disable: [ cgi, cgid ] \ No newline at end of file diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema new file mode 100644 index 00000000..227589b5 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema @@ -0,0 +1,9 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + apache_dir: + traceenable: + +hardening: + allowed_http_methods: + modules_to_disable: diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml new file mode 100644 index 00000000..682d22bf --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml @@ -0,0 +1,38 @@ +# NOTE: this file contains the default configuration for the 'mysql' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'mysql' as the root key followed by any of the following with new +# values. + +hardening: + mysql-conf: /etc/mysql/my.cnf + hardening-conf: /etc/mysql/conf.d/hardening.cnf + +security: + # @see http://www.symantec.com/connect/articles/securing-mysql-step-step + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot + chroot: None + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create + safe-user-create: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth + secure-auth: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links + skip-symbolic-links: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database + skip-show-database: True + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile + local-infile: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs + allow-suspicious-udfs: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges + automatic-sp-privileges: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv + secure-file-priv: /tmp diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema new file mode 100644 index 00000000..2edf325c --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema @@ -0,0 +1,15 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +hardening: + mysql-conf: + hardening-conf: +security: + chroot: + safe-user-create: + secure-auth: + skip-symbolic-links: + skip-show-database: + local-infile: + allow-suspicious-udfs: + automatic-sp-privileges: + secure-file-priv: diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml new file mode 100644 index 00000000..ddd4286c --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml @@ -0,0 +1,67 @@ +# NOTE: this file contains the default configuration for the 'os' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'os' as the root key followed by any of the following with new +# values. + +general: + desktop_enable: False # (type:boolean) + +environment: + extra_user_paths: [] + umask: 027 + root_path: / + +auth: + pw_max_age: 60 + # discourage password cycling + pw_min_age: 7 + retries: 5 + lockout_time: 600 + timeout: 60 + allow_homeless: False # (type:boolean) + pam_passwdqc_enable: True # (type:boolean) + pam_passwdqc_options: 'min=disabled,disabled,16,12,8' + root_ttys: + console + tty1 + tty2 + tty3 + tty4 + tty5 + tty6 + uid_min: 1000 + gid_min: 1000 + sys_uid_min: 100 + sys_uid_max: 999 + sys_gid_min: 100 + sys_gid_max: 999 + chfn_restrict: + +security: + users_allow: [] + suid_sgid_enforce: True # (type:boolean) + # user-defined blacklist and whitelist + suid_sgid_blacklist: [] + suid_sgid_whitelist: [] + # if this is True, remove any suid/sgid bits from files that were not in the whitelist + suid_sgid_dry_run_on_unknown: False # (type:boolean) + suid_sgid_remove_from_unknown: False # (type:boolean) + # remove packages with known issues + packages_clean: True # (type:boolean) + packages_list: + xinetd + inetd + ypserv + telnet-server + rsh-server + rsync + kernel_enable_module_loading: True # (type:boolean) + kernel_enable_core_dump: False # (type:boolean) + +sysctl: + kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128 + kernel_enable_sysrq: False # (type:boolean) + forwarding: False # (type:boolean) + ipv6_enable: False # (type:boolean) + arp_restricted: True # (type:boolean) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema new file mode 100644 index 00000000..88b3966e --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema @@ -0,0 +1,42 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +general: + desktop_enable: +environment: + extra_user_paths: + umask: + root_path: +auth: + pw_max_age: + pw_min_age: + retries: + lockout_time: + timeout: + allow_homeless: + pam_passwdqc_enable: + pam_passwdqc_options: + root_ttys: + uid_min: + gid_min: + sys_uid_min: + sys_uid_max: + sys_gid_min: + sys_gid_max: + chfn_restrict: +security: + users_allow: + suid_sgid_enforce: + suid_sgid_blacklist: + suid_sgid_whitelist: + suid_sgid_dry_run_on_unknown: + suid_sgid_remove_from_unknown: + packages_clean: + packages_list: + kernel_enable_module_loading: + kernel_enable_core_dump: +sysctl: + kernel_secure_sysrq: + kernel_enable_sysrq: + forwarding: + ipv6_enable: + arp_restricted: diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml new file mode 100644 index 00000000..cd529bca --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml @@ -0,0 +1,49 @@ +# NOTE: this file contains the default configuration for the 'ssh' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'ssh' as the root key followed by any of the following with new +# values. + +common: + service_name: 'ssh' + network_ipv6_enable: False # (type:boolean) + ports: [22] + remote_hosts: [] + +client: + package: 'openssh-client' + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + roaming: False + password_authentication: 'no' + +server: + host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key', + '/etc/ssh/ssh_host_ecdsa_key'] + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + allow_root_with_key: False # (type:boolean) + allow_tcp_forwarding: 'no' + allow_agent_forwarding: 'no' + allow_x11_forwarding: 'no' + use_privilege_separation: 'sandbox' + listen_to: ['0.0.0.0'] + use_pam: 'no' + package: 'openssh-server' + password_authentication: 'no' + alive_interval: '600' + alive_count: '3' + sftp_enable: False # (type:boolean) + sftp_group: 'sftponly' + sftp_chroot: '/home/%u' + deny_users: [] + allow_users: [] + deny_groups: [] + allow_groups: [] + print_motd: 'no' + print_last_log: 'no' + use_dns: 'no' + max_auth_tries: 2 + max_sessions: 10 diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema new file mode 100644 index 00000000..d05e054b --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema @@ -0,0 +1,42 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + service_name: + network_ipv6_enable: + ports: + remote_hosts: +client: + package: + cbc_required: + weak_hmac: + weak_kex: + roaming: + password_authentication: +server: + host_key_files: + cbc_required: + weak_hmac: + weak_kex: + allow_root_with_key: + allow_tcp_forwarding: + allow_agent_forwarding: + allow_x11_forwarding: + use_privilege_separation: + listen_to: + use_pam: + package: + password_authentication: + alive_interval: + alive_count: + sftp_enable: + sftp_group: + sftp_chroot: + deny_users: + allow_users: + deny_groups: + allow_groups: + print_motd: + print_last_log: + use_dns: + max_auth_tries: + max_sessions: diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py new file mode 100644 index 00000000..ac7568d6 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py @@ -0,0 +1,84 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six + +from collections import OrderedDict + +from charmhelpers.core.hookenv import ( + config, + log, + DEBUG, + WARNING, +) +from charmhelpers.contrib.hardening.host.checks import run_os_checks +from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks +from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks +from charmhelpers.contrib.hardening.apache.checks import run_apache_checks + + +def harden(overrides=None): + """Hardening decorator. + + This is the main entry point for running the hardening stack. In order to + run modules of the stack you must add this decorator to charm hook(s) and + ensure that your charm config.yaml contains the 'harden' option set to + one or more of the supported modules. Setting these will cause the + corresponding hardening code to be run when the hook fires. + + This decorator can and should be applied to more than one hook or function + such that hardening modules are called multiple times. This is because + subsequent calls will perform auditing checks that will report any changes + to resources hardened by the first run (and possibly perform compliance + actions as a result of any detected infractions). + + :param overrides: Optional list of stack modules used to override those + provided with 'harden' config. + :returns: Returns value returned by decorated function once executed. + """ + def _harden_inner1(f): + log("Hardening function '%s'" % (f.__name__), level=DEBUG) + + def _harden_inner2(*args, **kwargs): + RUN_CATALOG = OrderedDict([('os', run_os_checks), + ('ssh', run_ssh_checks), + ('mysql', run_mysql_checks), + ('apache', run_apache_checks)]) + + enabled = overrides or (config("harden") or "").split() + if enabled: + modules_to_run = [] + # modules will always be performed in the following order + for module, func in six.iteritems(RUN_CATALOG): + if module in enabled: + enabled.remove(module) + modules_to_run.append(func) + + if enabled: + log("Unknown hardening modules '%s' - ignoring" % + (', '.join(enabled)), level=WARNING) + + for hardener in modules_to_run: + log("Executing hardening module '%s'" % + (hardener.__name__), level=DEBUG) + hardener() + else: + log("No hardening applied to '%s'" % (f.__name__), level=DEBUG) + + return f(*args, **kwargs) + return _harden_inner2 + + return _harden_inner1 diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py new file mode 100644 index 00000000..c3bd5985 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py @@ -0,0 +1,50 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.host.checks import ( + apt, + limits, + login, + minimize_access, + pam, + profile, + securetty, + suid_sgid, + sysctl +) + + +def run_os_checks(): + log("Starting OS hardening checks.", level=DEBUG) + checks = apt.get_audits() + checks.extend(limits.get_audits()) + checks.extend(login.get_audits()) + checks.extend(minimize_access.get_audits()) + checks.extend(pam.get_audits()) + checks.extend(profile.get_audits()) + checks.extend(securetty.get_audits()) + checks.extend(suid_sgid.get_audits()) + checks.extend(sysctl.get_audits()) + + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("OS hardening checks complete.", level=DEBUG) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/apt.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/apt.py new file mode 100644 index 00000000..2c221cda --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/apt.py @@ -0,0 +1,39 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.utils import get_settings +from charmhelpers.contrib.hardening.audits.apt import ( + AptConfig, + RestrictedPackages, +) + + +def get_audits(): + """Get OS hardening apt audits. + + :returns: dictionary of audits + """ + audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated', + 'expected': 'false'}])] + + settings = get_settings('os') + clean_packages = settings['security']['packages_clean'] + if clean_packages: + security_packages = settings['security']['packages_list'] + if security_packages: + audits.append(RestrictedPackages(security_packages)) + + return audits diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/limits.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/limits.py new file mode 100644 index 00000000..8ce9dc2b --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/limits.py @@ -0,0 +1,55 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import ( + DirectoryPermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening security limits audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Ensure that the /etc/security/limits.d directory is only writable + # by the root user, but others can execute and read. + audits.append(DirectoryPermissionAudit('/etc/security/limits.d', + user='root', group='root', + mode=0o755)) + + # If core dumps are not enabled, then don't allow core dumps to be + # created as they may contain sensitive information. + if not settings['security']['kernel_enable_core_dump']: + audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf', + SecurityLimitsContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', mode=0o0440)) + return audits + + +class SecurityLimitsContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'disable_core_dump': + not settings['security']['kernel_enable_core_dump']} + return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/login.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/login.py new file mode 100644 index 00000000..d32c4f60 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/login.py @@ -0,0 +1,67 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from six import string_types + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening login.defs audits. + + :returns: dictionary of audits + """ + audits = [TemplatedFile('/etc/login.defs', LoginContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', mode=0o0444)] + return audits + + +class LoginContext(object): + + def __call__(self): + settings = utils.get_settings('os') + + # Octal numbers in yaml end up being turned into decimal, + # so check if the umask is entered as a string (e.g. '027') + # or as an octal umask as we know it (e.g. 002). If its not + # a string assume it to be octal and turn it into an octal + # string. + umask = settings['environment']['umask'] + if not isinstance(umask, string_types): + umask = '%s' % oct(umask) + + ctxt = { + 'additional_user_paths': + settings['environment']['extra_user_paths'], + 'umask': umask, + 'pwd_max_age': settings['auth']['pw_max_age'], + 'pwd_min_age': settings['auth']['pw_min_age'], + 'uid_min': settings['auth']['uid_min'], + 'sys_uid_min': settings['auth']['sys_uid_min'], + 'sys_uid_max': settings['auth']['sys_uid_max'], + 'gid_min': settings['auth']['gid_min'], + 'sys_gid_min': settings['auth']['sys_gid_min'], + 'sys_gid_max': settings['auth']['sys_gid_max'], + 'login_retries': settings['auth']['retries'], + 'login_timeout': settings['auth']['timeout'], + 'chfn_restrict': settings['auth']['chfn_restrict'], + 'allow_login_without_home': settings['auth']['allow_homeless'] + } + + return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py new file mode 100644 index 00000000..c471064b --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py @@ -0,0 +1,52 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + ReadOnly, +) +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening access audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Remove write permissions from $PATH folders for all regular users. + # This prevents changing system-wide commands from normal users. + path_folders = {'/usr/local/sbin', + '/usr/local/bin', + '/usr/sbin', + '/usr/bin', + '/bin'} + extra_user_paths = settings['environment']['extra_user_paths'] + path_folders.update(extra_user_paths) + audits.append(ReadOnly(path_folders)) + + # Only allow the root user to have access to the shadow file. + audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600)) + + if 'change_user' not in settings['security']['users_allow']: + # su should only be accessible to user and group root, unless it is + # expressly defined to allow users to change to root via the + # security_users_allow config option. + audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750)) + + return audits diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/pam.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/pam.py new file mode 100644 index 00000000..383fe28e --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/pam.py @@ -0,0 +1,134 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from subprocess import ( + check_output, + CalledProcessError, +) + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, +) +from charmhelpers.fetch import ( + apt_install, + apt_purge, + apt_update, +) +from charmhelpers.contrib.hardening.audits.file import ( + TemplatedFile, + DeletedFile, +) +from charmhelpers.contrib.hardening import utils +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR + + +def get_audits(): + """Get OS hardening PAM authentication audits. + + :returns: dictionary of audits + """ + audits = [] + + settings = utils.get_settings('os') + + if settings['auth']['pam_passwdqc_enable']: + audits.append(PasswdqcPAM('/etc/passwdqc.conf')) + + if settings['auth']['retries']: + audits.append(Tally2PAM('/usr/share/pam-configs/tally2')) + else: + audits.append(DeletedFile('/usr/share/pam-configs/tally2')) + + return audits + + +class PasswdqcPAMContext(object): + + def __call__(self): + ctxt = {} + settings = utils.get_settings('os') + + ctxt['auth_pam_passwdqc_options'] = \ + settings['auth']['pam_passwdqc_options'] + + return ctxt + + +class PasswdqcPAM(TemplatedFile): + """The PAM Audit verifies the linux PAM settings.""" + def __init__(self, path): + super(PasswdqcPAM, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=PasswdqcPAMContext(), + user='root', + group='root', + mode=0o0640) + + def pre_write(self): + # Always remove? + for pkg in ['libpam-ccreds', 'libpam-cracklib']: + log("Purging package '%s'" % pkg, level=DEBUG), + apt_purge(pkg) + + apt_update(fatal=True) + for pkg in ['libpam-passwdqc']: + log("Installing package '%s'" % pkg, level=DEBUG), + apt_install(pkg) + + def post_write(self): + """Updates the PAM configuration after the file has been written""" + try: + check_output(['pam-auth-update', '--package']) + except CalledProcessError as e: + log('Error calling pam-auth-update: %s' % e, level=ERROR) + + +class Tally2PAMContext(object): + + def __call__(self): + ctxt = {} + settings = utils.get_settings('os') + + ctxt['auth_lockout_time'] = settings['auth']['lockout_time'] + ctxt['auth_retries'] = settings['auth']['retries'] + + return ctxt + + +class Tally2PAM(TemplatedFile): + """The PAM Audit verifies the linux PAM settings.""" + def __init__(self, path): + super(Tally2PAM, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=Tally2PAMContext(), + user='root', + group='root', + mode=0o0640) + + def pre_write(self): + # Always remove? + apt_purge('libpam-ccreds') + apt_update(fatal=True) + apt_install('libpam-modules') + + def post_write(self): + """Updates the PAM configuration after the file has been written""" + try: + check_output(['pam-auth-update', '--package']) + except CalledProcessError as e: + log('Error calling pam-auth-update: %s' % e, level=ERROR) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/profile.py new file mode 100644 index 00000000..f7443357 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/profile.py @@ -0,0 +1,45 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening profile audits. + + :returns: dictionary of audits + """ + audits = [] + + settings = utils.get_settings('os') + + # If core dumps are not enabled, then don't allow core dumps to be + # created as they may contain sensitive information. + if not settings['security']['kernel_enable_core_dump']: + audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh', + ProfileContext(), + template_dir=TEMPLATES_DIR, + mode=0o0755, user='root', group='root')) + return audits + + +class ProfileContext(object): + + def __call__(self): + ctxt = {} + return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py new file mode 100644 index 00000000..e33c73ca --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py @@ -0,0 +1,39 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening Secure TTY audits. + + :returns: dictionary of audits + """ + audits = [] + audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(), + template_dir=TEMPLATES_DIR, + mode=0o0400, user='root', group='root')) + return audits + + +class SecureTTYContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'ttys': settings['auth']['root_ttys']} + return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py new file mode 100644 index 00000000..0534689b --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py @@ -0,0 +1,131 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import subprocess + +from charmhelpers.core.hookenv import ( + log, + INFO, +) +from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit +from charmhelpers.contrib.hardening import utils + + +BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh', + '/usr/libexec/openssh/ssh-keysign', + '/usr/lib/openssh/ssh-keysign', + '/sbin/netreport', + '/usr/sbin/usernetctl', + '/usr/sbin/userisdnctl', + '/usr/sbin/pppd', + '/usr/bin/lockfile', + '/usr/bin/mail-lock', + '/usr/bin/mail-unlock', + '/usr/bin/mail-touchlock', + '/usr/bin/dotlockfile', + '/usr/bin/arping', + '/usr/sbin/uuidd', + '/usr/bin/mtr', + '/usr/lib/evolution/camel-lock-helper-1.2', + '/usr/lib/pt_chown', + '/usr/lib/eject/dmcrypt-get-device', + '/usr/lib/mc/cons.saver'] + +WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount', + '/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at', + '/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp', + '/usr/bin/passwd', '/usr/bin/ssh-agent', + '/usr/libexec/utempter/utempter', '/usr/sbin/lockdev', + '/usr/sbin/sendmail.sendmail', '/usr/bin/expiry', + '/bin/ping6', '/usr/bin/traceroute6.iputils', + '/sbin/mount.nfs', '/sbin/umount.nfs', + '/sbin/mount.nfs4', '/sbin/umount.nfs4', + '/usr/bin/crontab', + '/usr/bin/wall', '/usr/bin/write', + '/usr/bin/screen', + '/usr/bin/mlocate', + '/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh', + '/bin/fusermount', + '/usr/bin/pkexec', + '/usr/bin/sudo', '/usr/bin/sudoedit', + '/usr/sbin/postdrop', '/usr/sbin/postqueue', + '/usr/sbin/suexec', + '/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth', + '/usr/kerberos/bin/ksu', + '/usr/sbin/ccreds_validate', + '/usr/bin/Xorg', + '/usr/bin/X', + '/usr/lib/dbus-1.0/dbus-daemon-launch-helper', + '/usr/lib/vte/gnome-pty-helper', + '/usr/lib/libvte9/gnome-pty-helper', + '/usr/lib/libvte-2.90-9/gnome-pty-helper'] + + +def get_audits(): + """Get OS hardening suid/sgid audits. + + :returns: dictionary of audits + """ + checks = [] + settings = utils.get_settings('os') + if not settings['security']['suid_sgid_enforce']: + log("Skipping suid/sgid hardening", level=INFO) + return checks + + # Build the blacklist and whitelist of files for suid/sgid checks. + # There are a total of 4 lists: + # 1. the system blacklist + # 2. the system whitelist + # 3. the user blacklist + # 4. the user whitelist + # + # The blacklist is the set of paths which should NOT have the suid/sgid bit + # set and the whitelist is the set of paths which MAY have the suid/sgid + # bit setl. The user whitelist/blacklist effectively override the system + # whitelist/blacklist. + u_b = settings['security']['suid_sgid_blacklist'] + u_w = settings['security']['suid_sgid_whitelist'] + + blacklist = set(BLACKLIST) - set(u_w + u_b) + whitelist = set(WHITELIST) - set(u_b + u_w) + + checks.append(NoSUIDSGIDAudit(blacklist)) + + dry_run = settings['security']['suid_sgid_dry_run_on_unknown'] + + if settings['security']['suid_sgid_remove_from_unknown'] or dry_run: + # If the policy is a dry_run (e.g. complain only) or remove unknown + # suid/sgid bits then find all of the paths which have the suid/sgid + # bit set and then remove the whitelisted paths. + root_path = settings['environment']['root_path'] + unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist) + checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run)) + + return checks + + +def find_paths_with_suid_sgid(root_path): + """Finds all paths/files which have an suid/sgid bit enabled. + + Starting with the root_path, this will recursively find all paths which + have an suid or sgid bit set. + """ + cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000', + '-type', 'f', '!', '-path', '/proc/*', '-print'] + + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, _ = p.communicate() + return set(out.split('\n')) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py new file mode 100644 index 00000000..4a76d74e --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -0,0 +1,211 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import platform +import re +import six +import subprocess + +from charmhelpers.core.hookenv import ( + log, + INFO, + WARNING, +) +from charmhelpers.contrib.hardening import utils +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR + + +SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s +net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s +net.ipv4.conf.all.rp_filter=1 +net.ipv4.conf.default.rp_filter=1 +net.ipv4.icmp_echo_ignore_broadcasts=1 +net.ipv4.icmp_ignore_bogus_error_responses=1 +net.ipv4.icmp_ratelimit=100 +net.ipv4.icmp_ratemask=88089 +net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s +net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s +net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s +net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s +net.ipv4.tcp_rfc1337=1 +net.ipv4.tcp_syncookies=1 +net.ipv4.conf.all.shared_media=1 +net.ipv4.conf.default.shared_media=1 +net.ipv4.conf.all.accept_source_route=0 +net.ipv4.conf.default.accept_source_route=0 +net.ipv4.conf.all.accept_redirects=0 +net.ipv4.conf.default.accept_redirects=0 +net.ipv6.conf.all.accept_redirects=0 +net.ipv6.conf.default.accept_redirects=0 +net.ipv4.conf.all.secure_redirects=0 +net.ipv4.conf.default.secure_redirects=0 +net.ipv4.conf.all.send_redirects=0 +net.ipv4.conf.default.send_redirects=0 +net.ipv4.conf.all.log_martians=0 +net.ipv6.conf.default.router_solicitations=0 +net.ipv6.conf.default.accept_ra_rtr_pref=0 +net.ipv6.conf.default.accept_ra_pinfo=0 +net.ipv6.conf.default.accept_ra_defrtr=0 +net.ipv6.conf.default.autoconf=0 +net.ipv6.conf.default.dad_transmits=0 +net.ipv6.conf.default.max_addresses=1 +net.ipv6.conf.all.accept_ra=0 +net.ipv6.conf.default.accept_ra=0 +kernel.modules_disabled=%(kernel_modules_disabled)s +kernel.sysrq=%(kernel_sysrq)s +fs.suid_dumpable=%(fs_suid_dumpable)s +kernel.randomize_va_space=2 +""" + + +def get_audits(): + """Get OS hardening sysctl audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Apply the sysctl settings which are configured to be applied. + audits.append(SysctlConf()) + # Make sure that only root has access to the sysctl.conf file, and + # that it is read-only. + audits.append(FilePermissionAudit('/etc/sysctl.conf', + user='root', + group='root', mode=0o0440)) + # If module loading is not enabled, then ensure that the modules + # file has the appropriate permissions and rebuild the initramfs + if not settings['security']['kernel_enable_module_loading']: + audits.append(ModulesTemplate()) + + return audits + + +class ModulesContext(object): + + def __call__(self): + settings = utils.get_settings('os') + with open('/proc/cpuinfo', 'r') as fd: + cpuinfo = fd.readlines() + + for line in cpuinfo: + match = re.search(r"^vendor_id\s+:\s+(.+)", line) + if match: + vendor = match.group(1) + + if vendor == "GenuineIntel": + vendor = "intel" + elif vendor == "AuthenticAMD": + vendor = "amd" + + ctxt = {'arch': platform.processor(), + 'cpuVendor': vendor, + 'desktop_enable': settings['general']['desktop_enable']} + + return ctxt + + +class ModulesTemplate(object): + + def __init__(self): + super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules', + ModulesContext(), + templates_dir=TEMPLATES_DIR, + user='root', group='root', + mode=0o0440) + + def post_write(self): + subprocess.check_call(['update-initramfs', '-u']) + + +class SysCtlHardeningContext(object): + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'sysctl': {}} + + log("Applying sysctl settings", level=INFO) + extras = {'net_ipv4_ip_forward': 0, + 'net_ipv6_conf_all_forwarding': 0, + 'net_ipv6_conf_all_disable_ipv6': 1, + 'net_ipv4_tcp_timestamps': 0, + 'net_ipv4_conf_all_arp_ignore': 0, + 'net_ipv4_conf_all_arp_announce': 0, + 'kernel_sysrq': 0, + 'fs_suid_dumpable': 0, + 'kernel_modules_disabled': 1} + + if settings['sysctl']['ipv6_enable']: + extras['net_ipv6_conf_all_disable_ipv6'] = 0 + + if settings['sysctl']['forwarding']: + extras['net_ipv4_ip_forward'] = 1 + extras['net_ipv6_conf_all_forwarding'] = 1 + + if settings['sysctl']['arp_restricted']: + extras['net_ipv4_conf_all_arp_ignore'] = 1 + extras['net_ipv4_conf_all_arp_announce'] = 2 + + if settings['security']['kernel_enable_module_loading']: + extras['kernel_modules_disabled'] = 0 + + if settings['sysctl']['kernel_enable_sysrq']: + sysrq_val = settings['sysctl']['kernel_secure_sysrq'] + extras['kernel_sysrq'] = sysrq_val + + if settings['security']['kernel_enable_core_dump']: + extras['fs_suid_dumpable'] = 1 + + settings.update(extras) + for d in (SYSCTL_DEFAULTS % settings).split(): + d = d.strip().partition('=') + key = d[0].strip() + path = os.path.join('/proc/sys', key.replace('.', '/')) + if not os.path.exists(path): + log("Skipping '%s' since '%s' does not exist" % (key, path), + level=WARNING) + continue + + ctxt['sysctl'][key] = d[2] or None + + # Translate for python3 + return {'sysctl_settings': + [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]} + + +class SysctlConf(TemplatedFile): + """An audit check for sysctl settings.""" + def __init__(self): + self.conffile = '/etc/sysctl.d/99-juju-hardening.conf' + super(SysctlConf, self).__init__(self.conffile, + SysCtlHardeningContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', + mode=0o0440) + + def post_write(self): + try: + subprocess.check_call(['sysctl', '-p', self.conffile]) + except subprocess.CalledProcessError as e: + # NOTE: on some systems if sysctl cannot apply all settings it + # will return non-zero as well. + log("sysctl command returned an error (maybe some " + "keys could not be set) - %s" % (e), + level=WARNING) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf new file mode 100644 index 00000000..0014191f --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf @@ -0,0 +1,8 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +{% if disable_core_dump -%} +# Prevent core dumps for all users. These are usually only needed by developers and may contain sensitive information. +* hard core 0 +{% endif %} \ No newline at end of file diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf new file mode 100644 index 00000000..101f1e1d --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf @@ -0,0 +1,7 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +{% for key, value in sysctl_settings -%} +{{ key }}={{ value }} +{% endfor -%} diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/login.defs b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/login.defs new file mode 100644 index 00000000..db137d6d --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/login.defs @@ -0,0 +1,349 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# +# /etc/login.defs - Configuration control definitions for the login package. +# +# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH. +# If unspecified, some arbitrary (and possibly incorrect) value will +# be assumed. All other items are optional - if not specified then +# the described action or option will be inhibited. +# +# Comment lines (lines beginning with "#") and blank lines are ignored. +# +# Modified for Linux. --marekm + +# REQUIRED for useradd/userdel/usermod +# Directory where mailboxes reside, _or_ name of file, relative to the +# home directory. If you _do_ define MAIL_DIR and MAIL_FILE, +# MAIL_DIR takes precedence. +# +# Essentially: +# - MAIL_DIR defines the location of users mail spool files +# (for mbox use) by appending the username to MAIL_DIR as defined +# below. +# - MAIL_FILE defines the location of the users mail spool files as the +# fully-qualified filename obtained by prepending the user home +# directory before $MAIL_FILE +# +# NOTE: This is no more used for setting up users MAIL environment variable +# which is, starting from shadow 4.0.12-1 in Debian, entirely the +# job of the pam_mail PAM modules +# See default PAM configuration files provided for +# login, su, etc. +# +# This is a temporary situation: setting these variables will soon +# move to /etc/default/useradd and the variables will then be +# no more supported +MAIL_DIR /var/mail +#MAIL_FILE .mail + +# +# Enable logging and display of /var/log/faillog login failure info. +# This option conflicts with the pam_tally PAM module. +# +FAILLOG_ENAB yes + +# +# Enable display of unknown usernames when login failures are recorded. +# +# WARNING: Unknown usernames may become world readable. +# See #290803 and #298773 for details about how this could become a security +# concern +LOG_UNKFAIL_ENAB no + +# +# Enable logging of successful logins +# +LOG_OK_LOGINS yes + +# +# Enable "syslog" logging of su activity - in addition to sulog file logging. +# SYSLOG_SG_ENAB does the same for newgrp and sg. +# +SYSLOG_SU_ENAB yes +SYSLOG_SG_ENAB yes + +# +# If defined, all su activity is logged to this file. +# +#SULOG_FILE /var/log/sulog + +# +# If defined, file which maps tty line to TERM environment parameter. +# Each line of the file is in a format something like "vt100 tty01". +# +#TTYTYPE_FILE /etc/ttytype + +# +# If defined, login failures will be logged here in a utmp format +# last, when invoked as lastb, will read /var/log/btmp, so... +# +FTMP_FILE /var/log/btmp + +# +# If defined, the command name to display when running "su -". For +# example, if this is defined as "su" then a "ps" will display the +# command is "-su". If not defined, then "ps" would display the +# name of the shell actually being run, e.g. something like "-sh". +# +SU_NAME su + +# +# If defined, file which inhibits all the usual chatter during the login +# sequence. If a full pathname, then hushed mode will be enabled if the +# user's name or shell are found in the file. If not a full pathname, then +# hushed mode will be enabled if the file exists in the user's home directory. +# +HUSHLOGIN_FILE .hushlogin +#HUSHLOGIN_FILE /etc/hushlogins + +# +# *REQUIRED* The default PATH settings, for superuser and normal users. +# +# (they are minimal, add the rest in the shell startup files) +ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin{% if additional_user_paths %}{{ additional_user_paths }}{% endif %} + +# +# Terminal permissions +# +# TTYGROUP Login tty will be assigned this group ownership. +# TTYPERM Login tty will be set to this permission. +# +# If you have a "write" program which is "setgid" to a special group +# which owns the terminals, define TTYGROUP to the group number and +# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign +# TTYPERM to either 622 or 600. +# +# In Debian /usr/bin/bsd-write or similar programs are setgid tty +# However, the default and recommended value for TTYPERM is still 0600 +# to not allow anyone to write to anyone else console or terminal + +# Users can still allow other people to write them by issuing +# the "mesg y" command. + +TTYGROUP tty +TTYPERM 0600 + +# +# Login configuration initializations: +# +# ERASECHAR Terminal ERASE character ('\010' = backspace). +# KILLCHAR Terminal KILL character ('\025' = CTRL/U). +# UMASK Default "umask" value. +# +# The ERASECHAR and KILLCHAR are used only on System V machines. +# +# UMASK is the default umask value for pam_umask and is used by +# useradd and newusers to set the mode of the new home directories. +# 022 is the "historical" value in Debian for UMASK +# 027, or even 077, could be considered better for privacy +# There is no One True Answer here : each sysadmin must make up his/her +# mind. +# +# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value +# for private user groups, i. e. the uid is the same as gid, and username is +# the same as the primary group name: for these, the user permissions will be +# used as group permissions, e. g. 022 will become 002. +# +# Prefix these values with "0" to get octal, "0x" to get hexadecimal. +# +ERASECHAR 0177 +KILLCHAR 025 +UMASK {{ umask }} + +# Enable setting of the umask group bits to be the same as owner bits (examples: `022` -> `002`, `077` -> `007`) for non-root users, if the uid is the same as gid, and username is the same as the primary group name. +# If set to yes, userdel will remove the user´s group if it contains no more members, and useradd will create by default a group with the name of the user. +USERGROUPS_ENAB yes + +# +# Password aging controls: +# +# PASS_MAX_DAYS Maximum number of days a password may be used. +# PASS_MIN_DAYS Minimum number of days allowed between password changes. +# PASS_WARN_AGE Number of days warning given before a password expires. +# +PASS_MAX_DAYS {{ pwd_max_age }} +PASS_MIN_DAYS {{ pwd_min_age }} +PASS_WARN_AGE 7 + +# +# Min/max values for automatic uid selection in useradd +# +UID_MIN {{ uid_min }} +UID_MAX 60000 +# System accounts +SYS_UID_MIN {{ sys_uid_min }} +SYS_UID_MAX {{ sys_uid_max }} + +# Min/max values for automatic gid selection in groupadd +GID_MIN {{ gid_min }} +GID_MAX 60000 +# System accounts +SYS_GID_MIN {{ sys_gid_min }} +SYS_GID_MAX {{ sys_gid_max }} + +# +# Max number of login retries if password is bad. This will most likely be +# overriden by PAM, since the default pam_unix module has it's own built +# in of 3 retries. However, this is a safe fallback in case you are using +# an authentication module that does not enforce PAM_MAXTRIES. +# +LOGIN_RETRIES {{ login_retries }} + +# +# Max time in seconds for login +# +LOGIN_TIMEOUT {{ login_timeout }} + +# +# Which fields may be changed by regular users using chfn - use +# any combination of letters "frwh" (full name, room number, work +# phone, home phone). If not defined, no changes are allowed. +# For backward compatibility, "yes" = "rwh" and "no" = "frwh". +# +{% if chfn_restrict %} +CHFN_RESTRICT {{ chfn_restrict }} +{% endif %} + +# +# Should login be allowed if we can't cd to the home directory? +# Default in no. +# +DEFAULT_HOME {% if allow_login_without_home %} yes {% else %} no {% endif %} + +# +# If defined, this command is run when removing a user. +# It should remove any at/cron/print jobs etc. owned by +# the user to be removed (passed as the first argument). +# +#USERDEL_CMD /usr/sbin/userdel_local + +# +# Enable setting of the umask group bits to be the same as owner bits +# (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is +# the same as gid, and username is the same as the primary group name. +# +# If set to yes, userdel will remove the user´s group if it contains no +# more members, and useradd will create by default a group with the name +# of the user. +# +USERGROUPS_ENAB yes + +# +# Instead of the real user shell, the program specified by this parameter +# will be launched, although its visible name (argv[0]) will be the shell's. +# The program may do whatever it wants (logging, additional authentification, +# banner, ...) before running the actual shell. +# +# FAKE_SHELL /bin/fakeshell + +# +# If defined, either full pathname of a file containing device names or +# a ":" delimited list of device names. Root logins will be allowed only +# upon these devices. +# +# This variable is used by login and su. +# +#CONSOLE /etc/consoles +#CONSOLE console:tty01:tty02:tty03:tty04 + +# +# List of groups to add to the user's supplementary group set +# when logging in on the console (as determined by the CONSOLE +# setting). Default is none. +# +# Use with caution - it is possible for users to gain permanent +# access to these groups, even when not logged in on the console. +# How to do it is left as an exercise for the reader... +# +# This variable is used by login and su. +# +#CONSOLE_GROUPS floppy:audio:cdrom + +# +# If set to "yes", new passwords will be encrypted using the MD5-based +# algorithm compatible with the one used by recent releases of FreeBSD. +# It supports passwords of unlimited length and longer salt strings. +# Set to "no" if you need to copy encrypted passwords to other systems +# which don't understand the new algorithm. Default is "no". +# +# This variable is deprecated. You should use ENCRYPT_METHOD. +# +MD5_CRYPT_ENAB no + +# +# If set to MD5 , MD5-based algorithm will be used for encrypting password +# If set to SHA256, SHA256-based algorithm will be used for encrypting password +# If set to SHA512, SHA512-based algorithm will be used for encrypting password +# If set to DES, DES-based algorithm will be used for encrypting password (default) +# Overrides the MD5_CRYPT_ENAB option +# +# Note: It is recommended to use a value consistent with +# the PAM modules configuration. +# +ENCRYPT_METHOD SHA512 + +# +# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512. +# +# Define the number of SHA rounds. +# With a lot of rounds, it is more difficult to brute forcing the password. +# But note also that it more CPU resources will be needed to authenticate +# users. +# +# If not specified, the libc will choose the default number of rounds (5000). +# The values must be inside the 1000-999999999 range. +# If only one of the MIN or MAX values is set, then this value will be used. +# If MIN > MAX, the highest value will be used. +# +# SHA_CRYPT_MIN_ROUNDS 5000 +# SHA_CRYPT_MAX_ROUNDS 5000 + +################# OBSOLETED BY PAM ############## +# # +# These options are now handled by PAM. Please # +# edit the appropriate file in /etc/pam.d/ to # +# enable the equivelants of them. +# +############### + +#MOTD_FILE +#DIALUPS_CHECK_ENAB +#LASTLOG_ENAB +#MAIL_CHECK_ENAB +#OBSCURE_CHECKS_ENAB +#PORTTIME_CHECKS_ENAB +#SU_WHEEL_ONLY +#CRACKLIB_DICTPATH +#PASS_CHANGE_TRIES +#PASS_ALWAYS_WARN +#ENVIRON_FILE +#NOLOGINS_FILE +#ISSUE_FILE +#PASS_MIN_LEN +#PASS_MAX_LEN +#ULIMIT +#ENV_HZ +#CHFN_AUTH +#CHSH_AUTH +#FAIL_DELAY + +################# OBSOLETED ####################### +# # +# These options are no more handled by shadow. # +# # +# Shadow utilities will display a warning if they # +# still appear. # +# # +################################################### + +# CLOSE_SESSIONS +# LOGIN_STRING +# NO_PASSWORD_CONSOLE +# QMAIL_DIR + + + diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/modules b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/modules new file mode 100644 index 00000000..ef0354ee --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/modules @@ -0,0 +1,117 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# /etc/modules: kernel modules to load at boot time. +# +# This file contains the names of kernel modules that should be loaded +# at boot time, one per line. Lines beginning with "#" are ignored. +# Parameters can be specified after the module name. + +# Arch +# ---- +# +# Modules for certains builds, contains support modules and some CPU-specific optimizations. + +{% if arch == "x86_64" -%} +# Optimize for x86_64 cryptographic features +twofish-x86_64-3way +twofish-x86_64 +aes-x86_64 +salsa20-x86_64 +blowfish-x86_64 +{% endif -%} + +{% if cpuVendor == "intel" -%} +# Intel-specific optimizations +ghash-clmulni-intel +aesni-intel +kvm-intel +{% endif -%} + +{% if cpuVendor == "amd" -%} +# AMD-specific optimizations +kvm-amd +{% endif -%} + +kvm + + +# Crypto +# ------ + +# Some core modules which comprise strong cryptography. +blowfish_common +blowfish_generic +ctr +cts +lrw +lzo +rmd160 +rmd256 +rmd320 +serpent +sha512_generic +twofish_common +twofish_generic +xts +zlib + + +# Drivers +# ------- + +# Basics +lp +rtc +loop + +# Filesystems +ext2 +btrfs + +{% if desktop_enable -%} +# Desktop +psmouse +snd +snd_ac97_codec +snd_intel8x0 +snd_page_alloc +snd_pcm +snd_timer +soundcore +usbhid +{% endif -%} + +# Lib +# --- +xz + + +# Net +# --- + +# All packets needed for netfilter rules (ie iptables, ebtables). +ip_tables +x_tables +iptable_filter +iptable_nat + +# Targets +ipt_LOG +ipt_REJECT + +# Modules +xt_connlimit +xt_tcpudp +xt_recent +xt_limit +xt_conntrack +nf_conntrack +nf_conntrack_ipv4 +nf_defrag_ipv4 +xt_state +nf_nat + +# Addons +xt_pknock \ No newline at end of file diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf new file mode 100644 index 00000000..f98d14e5 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf @@ -0,0 +1,11 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +Name: passwdqc password strength enforcement +Default: yes +Priority: 1024 +Conflicts: cracklib +Password-Type: Primary +Password: + requisite pam_passwdqc.so {{ auth_pam_passwdqc_options }} diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh new file mode 100644 index 00000000..fd2de791 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh @@ -0,0 +1,8 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# Disable core dumps via soft limits for all users. Compliance to this setting +# is voluntary and can be modified by users up to a hard limit. This setting is +# a sane default. +ulimit -S -c 0 > /dev/null 2>&1 diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/securetty b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/securetty new file mode 100644 index 00000000..15b18d4e --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/securetty @@ -0,0 +1,11 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# A list of TTYs, from which root can log in +# see `man securetty` for reference +{% if ttys -%} +{% for tty in ttys -%} +{{ tty }} +{% endfor -%} +{% endif -%} diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/tally2 b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/tally2 new file mode 100644 index 00000000..d9620299 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/tally2 @@ -0,0 +1,14 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +Name: tally2 lockout after failed attempts enforcement +Default: yes +Priority: 1024 +Conflicts: cracklib +Auth-Type: Primary +Auth-Initial: + required pam_tally2.so deny={{ auth_retries }} onerr=fail unlock_time={{ auth_lockout_time }} +Account-Type: Primary +Account-Initial: + required pam_tally2.so diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py new file mode 100644 index 00000000..d4f0ec19 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.mysql.checks import config + + +def run_mysql_checks(): + log("Starting MySQL hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("MySQL hardening checks complete.", level=DEBUG) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py new file mode 100644 index 00000000..3af8b89d --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -0,0 +1,89 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six +import subprocess + +from charmhelpers.core.hookenv import ( + log, + WARNING, +) +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + DirectoryPermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.mysql import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get MySQL hardening config audits. + + :returns: dictionary of audits + """ + if subprocess.call(['which', 'mysql'], stdout=subprocess.PIPE) != 0: + log("MySQL does not appear to be installed on this node - " + "skipping mysql hardening", level=WARNING) + return [] + + settings = utils.get_settings('mysql') + hardening_settings = settings['hardening'] + my_cnf = hardening_settings['mysql-conf'] + + audits = [ + FilePermissionAudit(paths=[my_cnf], user='root', + group='root', mode=0o0600), + + TemplatedFile(hardening_settings['hardening-conf'], + MySQLConfContext(), + TEMPLATES_DIR, + mode=0o0750, + user='mysql', + group='root', + service_actions=[{'service': 'mysql', + 'actions': ['restart']}]), + + # MySQL and Percona charms do not allow configuration of the + # data directory, so use the default. + DirectoryPermissionAudit('/var/lib/mysql', + user='mysql', + group='mysql', + recursive=False, + mode=0o755), + + DirectoryPermissionAudit('/etc/mysql', + user='root', + group='root', + recursive=False, + mode=0o700), + ] + + return audits + + +class MySQLConfContext(object): + """Defines the set of key/value pairs to set in a mysql config file. + + This context, when called, will return a dictionary containing the + key/value pairs of setting to specify in the + /etc/mysql/conf.d/hardening.cnf file. + """ + def __call__(self): + settings = utils.get_settings('mysql') + # Translate for python3 + return {'mysql_settings': + [(k, v) for k, v in six.iteritems(settings['security'])]} diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf new file mode 100644 index 00000000..8242586c --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf @@ -0,0 +1,12 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +[mysqld] +{% for setting, value in mysql_settings -%} +{% if value == 'True' -%} +{{ setting }} +{% elif value != 'None' and value != None -%} +{{ setting }} = {{ value }} +{% endif -%} +{% endfor -%} diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py new file mode 100644 index 00000000..b85150d5 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.ssh.checks import config + + +def run_ssh_checks(): + log("Starting SSH hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("SSH hardening checks complete.", level=DEBUG) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py new file mode 100644 index 00000000..3fb6ae8d --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -0,0 +1,394 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.fetch import ( + apt_install, + apt_update, +) +from charmhelpers.core.host import lsb_release +from charmhelpers.contrib.hardening.audits.file import ( + TemplatedFile, + FileContentAudit, +) +from charmhelpers.contrib.hardening.ssh import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get SSH hardening config audits. + + :returns: dictionary of audits + """ + audits = [SSHConfig(), SSHDConfig(), SSHConfigFileContentAudit(), + SSHDConfigFileContentAudit()] + return audits + + +class SSHConfigContext(object): + + type = 'client' + + def get_macs(self, allow_weak_mac): + if allow_weak_mac: + weak_macs = 'weak' + else: + weak_macs = 'default' + + default = 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160' + macs = {'default': default, + 'weak': default + ',hmac-sha1'} + + default = ('hmac-sha2-512-etm@openssh.com,' + 'hmac-sha2-256-etm@openssh.com,' + 'hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,' + 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160') + macs_66 = {'default': default, + 'weak': default + ',hmac-sha1'} + + # Use newer ciphers on Ubuntu Trusty and above + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG) + macs = macs_66 + + return macs[weak_macs] + + def get_kexs(self, allow_weak_kex): + if allow_weak_kex: + weak_kex = 'weak' + else: + weak_kex = 'default' + + default = 'diffie-hellman-group-exchange-sha256' + weak = (default + ',diffie-hellman-group14-sha1,' + 'diffie-hellman-group-exchange-sha1,' + 'diffie-hellman-group1-sha1') + kex = {'default': default, + 'weak': weak} + + default = ('curve25519-sha256@libssh.org,' + 'diffie-hellman-group-exchange-sha256') + weak = (default + ',diffie-hellman-group14-sha1,' + 'diffie-hellman-group-exchange-sha1,' + 'diffie-hellman-group1-sha1') + kex_66 = {'default': default, + 'weak': weak} + + # Use newer kex on Ubuntu Trusty and above + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + log('Detected Ubuntu 14.04 or newer, using new key exchange ' + 'algorithms', level=DEBUG) + kex = kex_66 + + return kex[weak_kex] + + def get_ciphers(self, cbc_required): + if cbc_required: + weak_ciphers = 'weak' + else: + weak_ciphers = 'default' + + default = 'aes256-ctr,aes192-ctr,aes128-ctr' + cipher = {'default': default, + 'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'} + + default = ('chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,' + 'aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr') + ciphers_66 = {'default': default, + 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'} + + # Use newer ciphers on ubuntu Trusty and above + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + log('Detected Ubuntu 14.04 or newer, using new ciphers', + level=DEBUG) + cipher = ciphers_66 + + return cipher[weak_ciphers] + + def __call__(self): + settings = utils.get_settings('ssh') + if settings['common']['network_ipv6_enable']: + addr_family = 'any' + else: + addr_family = 'inet' + + ctxt = { + 'addr_family': addr_family, + 'remote_hosts': settings['common']['remote_hosts'], + 'password_auth_allowed': + settings['client']['password_authentication'], + 'ports': settings['common']['ports'], + 'ciphers': self.get_ciphers(settings['client']['cbc_required']), + 'macs': self.get_macs(settings['client']['weak_hmac']), + 'kexs': self.get_kexs(settings['client']['weak_kex']), + 'roaming': settings['client']['roaming'], + } + return ctxt + + +class SSHConfig(TemplatedFile): + def __init__(self): + path = '/etc/ssh/ssh_config' + super(SSHConfig, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=SSHConfigContext(), + user='root', + group='root', + mode=0o0644) + + def pre_write(self): + settings = utils.get_settings('ssh') + apt_update(fatal=True) + apt_install(settings['client']['package']) + if not os.path.exists('/etc/ssh'): + os.makedir('/etc/ssh') + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + def post_write(self): + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + +class SSHDConfigContext(SSHConfigContext): + + type = 'server' + + def __call__(self): + settings = utils.get_settings('ssh') + if settings['common']['network_ipv6_enable']: + addr_family = 'any' + else: + addr_family = 'inet' + + ctxt = { + 'ssh_ip': settings['server']['listen_to'], + 'password_auth_allowed': + settings['server']['password_authentication'], + 'ports': settings['common']['ports'], + 'addr_family': addr_family, + 'ciphers': self.get_ciphers(settings['server']['cbc_required']), + 'macs': self.get_macs(settings['server']['weak_hmac']), + 'kexs': self.get_kexs(settings['server']['weak_kex']), + 'host_key_files': settings['server']['host_key_files'], + 'allow_root_with_key': settings['server']['allow_root_with_key'], + 'password_authentication': + settings['server']['password_authentication'], + 'use_priv_sep': settings['server']['use_privilege_separation'], + 'use_pam': settings['server']['use_pam'], + 'allow_x11_forwarding': settings['server']['allow_x11_forwarding'], + 'print_motd': settings['server']['print_motd'], + 'print_last_log': settings['server']['print_last_log'], + 'client_alive_interval': + settings['server']['alive_interval'], + 'client_alive_count': settings['server']['alive_count'], + 'allow_tcp_forwarding': settings['server']['allow_tcp_forwarding'], + 'allow_agent_forwarding': + settings['server']['allow_agent_forwarding'], + 'deny_users': settings['server']['deny_users'], + 'allow_users': settings['server']['allow_users'], + 'deny_groups': settings['server']['deny_groups'], + 'allow_groups': settings['server']['allow_groups'], + 'use_dns': settings['server']['use_dns'], + 'sftp_enable': settings['server']['sftp_enable'], + 'sftp_group': settings['server']['sftp_group'], + 'sftp_chroot': settings['server']['sftp_chroot'], + 'max_auth_tries': settings['server']['max_auth_tries'], + 'max_sessions': settings['server']['max_sessions'], + } + return ctxt + + +class SSHDConfig(TemplatedFile): + def __init__(self): + path = '/etc/ssh/sshd_config' + super(SSHDConfig, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=SSHDConfigContext(), + user='root', + group='root', + mode=0o0600, + service_actions=[{'service': 'ssh', + 'actions': + ['restart']}]) + + def pre_write(self): + settings = utils.get_settings('ssh') + apt_update(fatal=True) + apt_install(settings['server']['package']) + if not os.path.exists('/etc/ssh'): + os.makedir('/etc/ssh') + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + def post_write(self): + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + +class SSHConfigFileContentAudit(FileContentAudit): + def __init__(self): + self.path = '/etc/ssh/ssh_config' + super(SSHConfigFileContentAudit, self).__init__(self.path, {}) + + def is_compliant(self, *args, **kwargs): + self.pass_cases = [] + self.fail_cases = [] + settings = utils.get_settings('ssh') + + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + if not settings['client']['weak_hmac']: + self.fail_cases.append(r'^MACs.+,hmac-sha1$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['client']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + + if settings['client']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + + if settings['client']['roaming']: + self.pass_cases.append(r'^UseRoaming yes$') + else: + self.fail_cases.append(r'^UseRoaming yes$') + + return super(SSHConfigFileContentAudit, self).is_compliant(*args, + **kwargs) + + +class SSHDConfigFileContentAudit(FileContentAudit): + def __init__(self): + self.path = '/etc/ssh/sshd_config' + super(SSHDConfigFileContentAudit, self).__init__(self.path, {}) + + def is_compliant(self, *args, **kwargs): + self.pass_cases = [] + self.fail_cases = [] + settings = utils.get_settings('ssh') + + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + + if settings['server']['sftp_enable']: + self.pass_cases.append(r'^Subsystem\ssftp') + else: + self.fail_cases.append(r'^Subsystem\ssftp') + + return super(SSHDConfigFileContentAudit, self).is_compliant(*args, + **kwargs) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config new file mode 100644 index 00000000..9742d8e2 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config @@ -0,0 +1,70 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# This is the ssh client system-wide configuration file. See +# ssh_config(5) for more information. This file provides defaults for +# users, and the values can be changed in per-user configuration files +# or on the command line. + +# Configuration data is parsed as follows: +# 1. command line options +# 2. user-specific file +# 3. system-wide file +# Any configuration value is only changed the first time it is set. +# Thus, host-specific definitions should be at the beginning of the +# configuration file, and defaults at the end. + +# Site-wide defaults for some commonly used options. For a comprehensive +# list of available options, their meanings and defaults, please see the +# ssh_config(5) man page. + +# Restrict the following configuration to be limited to this Host. +{% if remote_hosts -%} +Host {{ ' '.join(remote_hosts) }} +{% endif %} +ForwardAgent no +ForwardX11 no +ForwardX11Trusted yes +RhostsRSAAuthentication no +RSAAuthentication yes +PasswordAuthentication {{ password_auth_allowed }} +HostbasedAuthentication no +GSSAPIAuthentication no +GSSAPIDelegateCredentials no +GSSAPIKeyExchange no +GSSAPITrustDNS no +BatchMode no +CheckHostIP yes +AddressFamily {{ addr_family }} +ConnectTimeout 0 +StrictHostKeyChecking ask +IdentityFile ~/.ssh/identity +IdentityFile ~/.ssh/id_rsa +IdentityFile ~/.ssh/id_dsa +# The port at the destination should be defined +{% for port in ports -%} +Port {{ port }} +{% endfor %} +Protocol 2 +Cipher 3des +{% if ciphers -%} +Ciphers {{ ciphers }} +{%- endif %} +{% if macs -%} +MACs {{ macs }} +{%- endif %} +{% if kexs -%} +KexAlgorithms {{ kexs }} +{%- endif %} +EscapeChar ~ +Tunnel no +TunnelDevice any:any +PermitLocalCommand no +VisualHostKey no +RekeyLimit 1G 1h +SendEnv LANG LC_* +HashKnownHosts yes +{% if roaming -%} +UseRoaming {{ roaming }} +{% endif %} diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config new file mode 100644 index 00000000..5f87298a --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config @@ -0,0 +1,159 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# Package generated configuration file +# See the sshd_config(5) manpage for details + +# What ports, IPs and protocols we listen for +{% for port in ports -%} +Port {{ port }} +{% endfor -%} +AddressFamily {{ addr_family }} +# Use these options to restrict which interfaces/protocols sshd will bind to +{% if ssh_ip -%} +{% for ip in ssh_ip -%} +ListenAddress {{ ip }} +{% endfor %} +{%- else -%} +ListenAddress :: +ListenAddress 0.0.0.0 +{% endif -%} +Protocol 2 +{% if ciphers -%} +Ciphers {{ ciphers }} +{% endif -%} +{% if macs -%} +MACs {{ macs }} +{% endif -%} +{% if kexs -%} +KexAlgorithms {{ kexs }} +{% endif -%} +# HostKeys for protocol version 2 +{% for keyfile in host_key_files -%} +HostKey {{ keyfile }} +{% endfor -%} + +# Privilege Separation is turned on for security +{% if use_priv_sep -%} +UsePrivilegeSeparation {{ use_priv_sep }} +{% endif -%} + +# Lifetime and size of ephemeral version 1 server key +KeyRegenerationInterval 3600 +ServerKeyBits 1024 + +# Logging +SyslogFacility AUTH +LogLevel VERBOSE + +# Authentication: +LoginGraceTime 30s +{% if allow_root_with_key -%} +PermitRootLogin without-password +{% else -%} +PermitRootLogin no +{% endif %} +PermitTunnel no +PermitUserEnvironment no +StrictModes yes + +RSAAuthentication yes +PubkeyAuthentication yes +AuthorizedKeysFile %h/.ssh/authorized_keys + +# Don't read the user's ~/.rhosts and ~/.shosts files +IgnoreRhosts yes +# For this to work you will also need host keys in /etc/ssh_known_hosts +RhostsRSAAuthentication no +# similar for protocol version 2 +HostbasedAuthentication no +# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication +IgnoreUserKnownHosts yes + +# To enable empty passwords, change to yes (NOT RECOMMENDED) +PermitEmptyPasswords no + +# Change to yes to enable challenge-response passwords (beware issues with +# some PAM modules and threads) +ChallengeResponseAuthentication no + +# Change to no to disable tunnelled clear text passwords +PasswordAuthentication {{ password_authentication }} + +# Kerberos options +KerberosAuthentication no +KerberosGetAFSToken no +KerberosOrLocalPasswd no +KerberosTicketCleanup yes + +# GSSAPI options +GSSAPIAuthentication no +GSSAPICleanupCredentials yes + +X11Forwarding {{ allow_x11_forwarding }} +X11DisplayOffset 10 +X11UseLocalhost yes +GatewayPorts no +PrintMotd {{ print_motd }} +PrintLastLog {{ print_last_log }} +TCPKeepAlive no +UseLogin no + +ClientAliveInterval {{ client_alive_interval }} +ClientAliveCountMax {{ client_alive_count }} +AllowTcpForwarding {{ allow_tcp_forwarding }} +AllowAgentForwarding {{ allow_agent_forwarding }} + +MaxStartups 10:30:100 +#Banner /etc/issue.net + +# Allow client to pass locale environment variables +AcceptEnv LANG LC_* + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the ChallengeResponseAuthentication and +# PasswordAuthentication. Depending on your PAM configuration, +# PAM authentication via ChallengeResponseAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# PAM authentication, then enable this but set PasswordAuthentication +# and ChallengeResponseAuthentication to 'no'. +UsePAM {{ use_pam }} + +{% if deny_users -%} +DenyUsers {{ deny_users }} +{% endif -%} +{% if allow_users -%} +AllowUsers {{ allow_users }} +{% endif -%} +{% if deny_groups -%} +DenyGroups {{ deny_groups }} +{% endif -%} +{% if allow_groups -%} +AllowGroups allow_groups +{% endif -%} +UseDNS {{ use_dns }} +MaxAuthTries {{ max_auth_tries }} +MaxSessions {{ max_sessions }} + +{% if sftp_enable -%} +# Configuration, in case SFTP is used +## override default of no subsystems +## Subsystem sftp /opt/app/openssh5/libexec/sftp-server +Subsystem sftp internal-sftp -l VERBOSE + +## These lines must appear at the *end* of sshd_config +Match Group {{ sftp_group }} +ForceCommand internal-sftp -l VERBOSE +ChrootDirectory {{ sftp_chroot }} +{% else -%} +# Configuration, in case SFTP is used +## override default of no subsystems +## Subsystem sftp /opt/app/openssh5/libexec/sftp-server +## These lines must appear at the *end* of sshd_config +Match Group sftponly +ForceCommand internal-sftp -l VERBOSE +ChrootDirectory /sftpchroot/home/%u +{% endif %} diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py new file mode 100644 index 00000000..d2ab7dc9 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py @@ -0,0 +1,71 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) + +try: + from jinja2 import FileSystemLoader, Environment +except ImportError: + from charmhelpers.fetch import apt_install + from charmhelpers.fetch import apt_update + apt_update(fatal=True) + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment + + +# NOTE: function separated from main rendering code to facilitate easier +# mocking in unit tests. +def write(path, data): + with open(path, 'wb') as out: + out.write(data) + + +def get_template_path(template_dir, path): + """Returns the template file which would be used to render the path. + + The path to the template file is returned. + :param template_dir: the directory the templates are located in + :param path: the file path to be written to. + :returns: path to the template file + """ + return os.path.join(template_dir, os.path.basename(path)) + + +def render_and_write(template_dir, path, context): + """Renders the specified template into the file. + + :param template_dir: the directory to load the template from + :param path: the path to write the templated contents to + :param context: the parameters to pass to the rendering engine + """ + env = Environment(loader=FileSystemLoader(template_dir)) + template_file = os.path.basename(path) + template = env.get_template(template_file) + log('Rendering from template: %s' % template.name, level=DEBUG) + rendered_content = template.render(context) + if not rendered_content: + log("Render returned None - skipping '%s'" % path, + level=WARNING) + return + + write(path, rendered_content.encode('utf-8').strip()) + log('Wrote template %s' % path, level=DEBUG) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py new file mode 100644 index 00000000..a6743a4d --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py @@ -0,0 +1,157 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import glob +import grp +import os +import pwd +import six +import yaml + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + WARNING, + ERROR, +) + + +# Global settings cache. Since each hook fire entails a fresh module import it +# is safe to hold this in memory and not risk missing config changes (since +# they will result in a new hook fire and thus re-import). +__SETTINGS__ = {} + + +def _get_defaults(modules): + """Load the default config for the provided modules. + + :param modules: stack modules config defaults to lookup. + :returns: modules default config dictionary. + """ + default = os.path.join(os.path.dirname(__file__), + 'defaults/%s.yaml' % (modules)) + return yaml.safe_load(open(default)) + + +def _get_schema(modules): + """Load the config schema for the provided modules. + + NOTE: this schema is intended to have 1-1 relationship with they keys in + the default config and is used a means to verify valid overrides provided + by the user. + + :param modules: stack modules config schema to lookup. + :returns: modules default schema dictionary. + """ + schema = os.path.join(os.path.dirname(__file__), + 'defaults/%s.yaml.schema' % (modules)) + return yaml.safe_load(open(schema)) + + +def _get_user_provided_overrides(modules): + """Load user-provided config overrides. + + :param modules: stack modules to lookup in user overrides yaml file. + :returns: overrides dictionary. + """ + overrides = os.path.join(os.environ['JUJU_CHARM_DIR'], + 'hardening.yaml') + if os.path.exists(overrides): + log("Found user-provided config overrides file '%s'" % + (overrides), level=DEBUG) + settings = yaml.safe_load(open(overrides)) + if settings and settings.get(modules): + log("Applying '%s' overrides" % (modules), level=DEBUG) + return settings.get(modules) + + log("No overrides found for '%s'" % (modules), level=DEBUG) + else: + log("No hardening config overrides file '%s' found in charm " + "root dir" % (overrides), level=DEBUG) + + return {} + + +def _apply_overrides(settings, overrides, schema): + """Get overrides config overlayed onto modules defaults. + + :param modules: require stack modules config. + :returns: dictionary of modules config with user overrides applied. + """ + if overrides: + for k, v in six.iteritems(overrides): + if k in schema: + if schema[k] is None: + settings[k] = v + elif type(schema[k]) is dict: + settings[k] = _apply_overrides(settings[k], overrides[k], + schema[k]) + else: + raise Exception("Unexpected type found in schema '%s'" % + type(schema[k]), level=ERROR) + else: + log("Unknown override key '%s' - ignoring" % (k), level=INFO) + + return settings + + +def get_settings(modules): + global __SETTINGS__ + if modules in __SETTINGS__: + return __SETTINGS__[modules] + + schema = _get_schema(modules) + settings = _get_defaults(modules) + overrides = _get_user_provided_overrides(modules) + __SETTINGS__[modules] = _apply_overrides(settings, overrides, schema) + return __SETTINGS__[modules] + + +def ensure_permissions(path, user, group, permissions, maxdepth=-1): + """Ensure permissions for path. + + If path is a file, apply to file and return. If path is a directory, + apply recursively (if required) to directory contents and return. + + :param user: user name + :param group: group name + :param permissions: octal permissions + :param maxdepth: maximum recursion depth. A negative maxdepth allows + infinite recursion and maxdepth=0 means no recursion. + :returns: None + """ + if not os.path.exists(path): + log("File '%s' does not exist - cannot set permissions" % (path), + level=WARNING) + return + + _user = pwd.getpwnam(user) + os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid) + os.chmod(path, permissions) + + if maxdepth == 0: + log("Max recursion depth reached - skipping further recursion", + level=DEBUG) + return + elif maxdepth > 0: + maxdepth -= 1 + + if os.path.isdir(path): + contents = glob.glob("%s/*" % (path)) + for c in contents: + ensure_permissions(c, user=user, group=group, + permissions=permissions, maxdepth=maxdepth) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 2dd70bc9..01321296 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -912,6 +912,24 @@ def payload_status_set(klass, pid, status): subprocess.check_call(cmd) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def resource_get(name): + """used to fetch the resource path of the given name. + + must match a name of defined resource in metadata.yaml + + returns either a path or False if resource not available + """ + if not name: + return False + + cmd = ['resource-get', name] + try: + return subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError: + return False + + @cached def juju_version(): """Full version string (eg. '1.23.3.1-trusty-amd64')""" @@ -976,3 +994,16 @@ def _run_atexit(): for callback, args, kwargs in reversed(_atexit): callback(*args, **kwargs) del _atexit[:] + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get_primary_address(binding): + ''' + Retrieve the primary network address for a named binding + + :param binding: string. The name of a relation of extra-binding + :return: string. The primary IP address for the named binding + :raise: NotImplementedError if run on Juju < 2.0 + ''' + cmd = ['network-get', '--primary-address', binding] + return subprocess.check_output(cmd).strip() diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index a7720906..481087bb 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -30,6 +30,8 @@ import string import subprocess import hashlib +import functools +import itertools from contextlib import contextmanager from collections import OrderedDict @@ -428,27 +430,47 @@ def config_changed(): restarted if any file matching the pattern got changed, created or removed. Standard wildcards are supported, see documentation for the 'glob' module for more information. + + @param restart_map: {path_file_name: [service_name, ...] + @param stopstart: DEFAULT false; whether to stop, start OR restart + @returns result from decorated function """ def wrap(f): + @functools.wraps(f) def wrapped_f(*args, **kwargs): - checksums = {path: path_hash(path) for path in restart_map} - f(*args, **kwargs) - restarts = [] - for path in restart_map: - if path_hash(path) != checksums[path]: - restarts += restart_map[path] - services_list = list(OrderedDict.fromkeys(restarts)) - if not stopstart: - for service_name in services_list: - service('restart', service_name) - else: - for action in ['stop', 'start']: - for service_name in services_list: - service(action, service_name) + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart) return wrapped_f return wrap +def restart_on_change_helper(lambda_f, restart_map, stopstart=False): + """Helper function to perform the restart_on_change function. + + This is provided for decorators to restart services if files described + in the restart_map have changed after an invocation of lambda_f(). + + @param lambda_f: function to call. + @param restart_map: {file: [service, ...]} + @param stopstart: whether to stop, start or restart a service + @returns result of lambda_f() + """ + checksums = {path: path_hash(path) for path in restart_map} + r = lambda_f() + # create a list of lists of the services to restart + restarts = [restart_map[path] + for path in restart_map + if path_hash(path) != checksums[path]] + # create a flat list of ordered services without duplicates from lists + services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) + if services_list: + actions = ('stop', 'start') if stopstart else ('restart',) + for action in actions: + for service_name in services_list: + service(action, service_name) + return r + + def lsb_release(): """Return /etc/lsb-release in a dict""" d = {} diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index 2591a9b1..3e159039 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -782,15 +782,20 @@ def get_uuid_epoch_stamp(self): # amulet juju action helpers: def run_action(self, unit_sentry, action, - _check_output=subprocess.check_output): + _check_output=subprocess.check_output, + params=None): """Run the named action on a given unit sentry. + params a dict of parameters to use _check_output parameter is used for dependency injection. @return action_id. """ unit_id = unit_sentry.info["unit_name"] command = ["juju", "action", "do", "--format=json", unit_id, action] + if params is not None: + for key, value in params.iteritems(): + command.append("{}={}".format(key, value)) self.log.info("Running command: %s\n" % " ".join(command)) output = _check_output(command, universal_newlines=True) data = json.loads(output) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index 388b60e6..ef3bdccf 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -27,7 +27,11 @@ import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client -import novaclient.v1_1.client as nova_client +from keystoneclient.auth.identity import v3 as keystone_id_v3 +from keystoneclient import session as keystone_session +from keystoneclient.v3 import client as keystone_client_v3 + +import novaclient.client as nova_client import pika import swiftclient @@ -38,6 +42,8 @@ DEBUG = logging.DEBUG ERROR = logging.ERROR +NOVA_CLIENT_VERSION = "2" + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -139,7 +145,7 @@ def validate_role_data(self, expected, actual): return "role {} does not exist".format(e['name']) return ret - def validate_user_data(self, expected, actual): + def validate_user_data(self, expected, actual, api_version=None): """Validate user data. Validate a list of actual user data vs a list of expected user @@ -150,10 +156,15 @@ def validate_user_data(self, expected, actual): for e in expected: found = False for act in actual: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'tenantId': act.tenantId, - 'id': act.id} - if e['name'] == a['name']: + if e['name'] == act.name: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'id': act.id} + if api_version == 3: + a['default_project_id'] = getattr(act, + 'default_project_id', + 'none') + else: + a['tenantId'] = act.tenantId found = True ret = self._validate_dict_data(e, a) if ret: @@ -188,15 +199,30 @@ def authenticate_cinder_admin(self, keystone_sentry, username, return cinder_client.Client(username, password, tenant, ept) def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant): + tenant=None, api_version=None, + keystone_ip=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') unit = keystone_sentry - service_ip = unit.relation('shared-db', - 'mysql:shared-db')['private-address'] - ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) + if not keystone_ip: + keystone_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name='admin_domain', + username=user, + password=password, + domain_name='admin_domain', + auth_url=ep, + ) + sess = keystone_session.Session(auth=auth) + return keystone_client_v3.Client(session=sess) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" @@ -225,7 +251,8 @@ def authenticate_nova_user(self, keystone, user, password, tenant): self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return nova_client.Client(username=user, api_key=password, + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, project_id=tenant, auth_url=ep) def authenticate_swift_user(self, keystone, user, password, tenant): diff --git a/ceph-osd/unit_tests/test_config.py b/ceph-osd/unit_tests/test_config.py index 4ad626ff..e635a28c 100644 --- a/ceph-osd/unit_tests/test_config.py +++ b/ceph-osd/unit_tests/test_config.py @@ -1,10 +1,22 @@ import os.path import shutil import tempfile - +import sys import test_utils -import ceph_hooks as hooks +from mock import patch, MagicMock + +# python-apt is not installed as part of test-requirements but is imported by +# some charmhelpers modules so create a fake import. +mock_apt = MagicMock() +sys.modules['apt'] = mock_apt +mock_apt.apt_pkg = MagicMock() + + +with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + import ceph_hooks as hooks TO_PATCH = [ 'config', diff --git a/ceph-osd/unit_tests/test_status.py b/ceph-osd/unit_tests/test_status.py index f4342d28..f7e19b83 100644 --- a/ceph-osd/unit_tests/test_status.py +++ b/ceph-osd/unit_tests/test_status.py @@ -1,7 +1,12 @@ import mock import test_utils -import ceph_hooks as hooks +from mock import patch + +with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + import ceph_hooks as hooks TO_PATCH = [ 'status_set', diff --git a/ceph-osd/unit_tests/test_utils.py b/ceph-osd/unit_tests/test_utils.py index 663a0488..33333ce7 100644 --- a/ceph-osd/unit_tests/test_utils.py +++ b/ceph-osd/unit_tests/test_utils.py @@ -1,11 +1,18 @@ import logging import unittest import os +import sys import yaml from contextlib import contextmanager from mock import patch, MagicMock +# python-apt is not installed as part of test-requirements but is imported by +# some charmhelpers modules so create a fake import. +mock_apt = MagicMock() +sys.modules['apt'] = mock_apt +mock_apt.apt_pkg = MagicMock() + def load_config(): ''' From 321d87513c17e773e53d1856c4cc9f0e3141aa8d Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 22 Mar 2016 19:30:12 +0000 Subject: [PATCH 1077/2699] Add hardening support Add charmhelpers.contrib.hardening and calls to install, config-changed, upgrade-charm and update-status hooks. Also add new config option to allow one or more hardening modules to be applied at runtime. Change-Id: I7f5b2f6f271829c696a496319d64f575d0da1e26 --- ceph-radosgw/charm-helpers-hooks.yaml | 1 + ceph-radosgw/config.yaml | 7 + ceph-radosgw/hardening.yaml | 5 + .../contrib/hardening/README.hardening.md | 38 ++ .../contrib/hardening/__init__.py | 15 + .../contrib/hardening/apache/__init__.py | 19 + .../hardening/apache/checks/__init__.py | 31 + .../contrib/hardening/apache/checks/config.py | 100 ++++ .../hardening/apache/templates/__init__.py | 0 .../hardening/apache/templates/alias.conf | 31 + .../hardening/apache/templates/hardening.conf | 18 + .../contrib/hardening/audits/__init__.py | 63 ++ .../contrib/hardening/audits/apache.py | 100 ++++ .../contrib/hardening/audits/apt.py | 105 ++++ .../contrib/hardening/audits/file.py | 552 ++++++++++++++++++ .../contrib/hardening/defaults/__init__.py | 0 .../contrib/hardening/defaults/apache.yaml | 13 + .../hardening/defaults/apache.yaml.schema | 9 + .../contrib/hardening/defaults/mysql.yaml | 38 ++ .../hardening/defaults/mysql.yaml.schema | 15 + .../contrib/hardening/defaults/os.yaml | 67 +++ .../contrib/hardening/defaults/os.yaml.schema | 42 ++ .../contrib/hardening/defaults/ssh.yaml | 49 ++ .../hardening/defaults/ssh.yaml.schema | 42 ++ .../charmhelpers/contrib/hardening/harden.py | 84 +++ .../contrib/hardening/host/__init__.py | 19 + .../contrib/hardening/host/checks/__init__.py | 50 ++ .../contrib/hardening/host/checks/apt.py | 39 ++ .../contrib/hardening/host/checks/limits.py | 55 ++ .../contrib/hardening/host/checks/login.py | 67 +++ .../hardening/host/checks/minimize_access.py | 52 ++ .../contrib/hardening/host/checks/pam.py | 134 +++++ .../contrib/hardening/host/checks/profile.py | 45 ++ .../hardening/host/checks/securetty.py | 39 ++ .../hardening/host/checks/suid_sgid.py | 131 +++++ .../contrib/hardening/host/checks/sysctl.py | 211 +++++++ .../hardening/host/templates/10.hardcore.conf | 8 + .../host/templates/99-juju-hardening.conf | 7 + .../hardening/host/templates/__init__.py | 0 .../hardening/host/templates/login.defs | 349 +++++++++++ .../contrib/hardening/host/templates/modules | 117 ++++ .../hardening/host/templates/passwdqc.conf | 11 + .../host/templates/pinerolo_profile.sh | 8 + .../hardening/host/templates/securetty | 11 + .../contrib/hardening/host/templates/tally2 | 14 + .../contrib/hardening/mysql/__init__.py | 19 + .../hardening/mysql/checks/__init__.py | 31 + .../contrib/hardening/mysql/checks/config.py | 89 +++ .../hardening/mysql/templates/__init__.py | 0 .../hardening/mysql/templates/hardening.cnf | 12 + .../contrib/hardening/ssh/__init__.py | 19 + .../contrib/hardening/ssh/checks/__init__.py | 31 + .../contrib/hardening/ssh/checks/config.py | 394 +++++++++++++ .../hardening/ssh/templates/__init__.py | 0 .../hardening/ssh/templates/ssh_config | 70 +++ .../hardening/ssh/templates/sshd_config | 159 +++++ .../contrib/hardening/templating.py | 71 +++ .../charmhelpers/contrib/hardening/utils.py | 157 +++++ .../templates/section-keystone-authtoken | 18 +- .../section-keystone-authtoken-legacy | 10 + ceph-radosgw/hooks/hooks.py | 9 + ceph-radosgw/hooks/update-status | 1 + .../charmhelpers/contrib/amulet/utils.py | 7 +- ceph-radosgw/unit_tests/test_hooks.py | 18 +- 64 files changed, 3909 insertions(+), 17 deletions(-) create mode 100644 ceph-radosgw/hardening.yaml create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/README.hardening.md create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/file.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/apt.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/limits.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/login.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/pam.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/profile.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/login.defs create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/modules create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/securetty create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/tally2 create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy create mode 120000 ceph-radosgw/hooks/update-status diff --git a/ceph-radosgw/charm-helpers-hooks.yaml b/ceph-radosgw/charm-helpers-hooks.yaml index 9f626c62..ed1b6154 100644 --- a/ceph-radosgw/charm-helpers-hooks.yaml +++ b/ceph-radosgw/charm-helpers-hooks.yaml @@ -16,3 +16,4 @@ include: - contrib.storage.linux - contrib.python.packages - contrib.charmsupport + - contrib.hardening|inc=* \ No newline at end of file diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 114998cb..85bf9e45 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -191,3 +191,10 @@ options: description: | A comma-separated list of nagios servicegroups. If left empty, the nagios_context will be used as the servicegroup + harden: + default: + type: string + description: | + Apply system hardening. Supports a space-delimited list of modules + to run. Supported modules currently include os, ssh, apache and mysql. + diff --git a/ceph-radosgw/hardening.yaml b/ceph-radosgw/hardening.yaml new file mode 100644 index 00000000..314bb385 --- /dev/null +++ b/ceph-radosgw/hardening.yaml @@ -0,0 +1,5 @@ +# Overrides file for contrib.hardening. See README.hardening in +# contrib.hardening for info on how to use this file. +ssh: + server: + use_pam: 'yes' # juju requires this diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/README.hardening.md b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/README.hardening.md new file mode 100644 index 00000000..91280c03 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/README.hardening.md @@ -0,0 +1,38 @@ +# Juju charm-helpers hardening library + +## Description + +This library provides multiple implementations of system and application +hardening that conform to the standards of http://hardening.io/. + +Current implementations include: + + * OS + * SSH + * MySQL + * Apache + +## Requirements + +* Juju Charms + +## Usage + +1. Synchronise this library into your charm and add the harden() decorator + (from contrib.hardening.harden) to any functions or methods you want to use + to trigger hardening of your application/system. + +2. Add a config option called 'harden' to your charm config.yaml and set it to + a space-delimited list of hardening modules you want to run e.g. "os ssh" + +3. Override any config defaults (contrib.hardening.defaults) by adding a file + called hardening.yaml to your charm root containing the name(s) of the + modules whose settings you want override at root level and then any settings + with overrides e.g. + + os: + general: + desktop_enable: True + +4. Now just run your charm as usual and hardening will be applied each time the + hook runs. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/__init__.py new file mode 100644 index 00000000..a1335320 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py new file mode 100644 index 00000000..d1304792 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.apache.checks import config + + +def run_apache_checks(): + log("Starting Apache hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("Apache hardening checks complete.", level=DEBUG) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py new file mode 100644 index 00000000..8249ca01 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -0,0 +1,100 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import re +import subprocess + + +from charmhelpers.core.hookenv import ( + log, + INFO, +) +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + DirectoryPermissionAudit, + NoReadWriteForOther, + TemplatedFile, +) +from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit +from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get Apache hardening config audits. + + :returns: dictionary of audits + """ + if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0: + log("Apache server does not appear to be installed on this node - " + "skipping apache hardening", level=INFO) + return [] + + context = ApacheConfContext() + settings = utils.get_settings('apache') + audits = [ + FilePermissionAudit(paths='/etc/apache2/apache2.conf', user='root', + group='root', mode=0o0640), + + TemplatedFile(os.path.join(settings['common']['apache_dir'], + 'mods-available/alias.conf'), + context, + TEMPLATES_DIR, + mode=0o0755, + user='root', + service_actions=[{'service': 'apache2', + 'actions': ['restart']}]), + + TemplatedFile(os.path.join(settings['common']['apache_dir'], + 'conf-enabled/hardening.conf'), + context, + TEMPLATES_DIR, + mode=0o0640, + user='root', + service_actions=[{'service': 'apache2', + 'actions': ['restart']}]), + + DirectoryPermissionAudit(settings['common']['apache_dir'], + user='root', + group='root', + mode=0o640), + + DisabledModuleAudit(settings['hardening']['modules_to_disable']), + + NoReadWriteForOther(settings['common']['apache_dir']), + ] + + return audits + + +class ApacheConfContext(object): + """Defines the set of key/value pairs to set in a apache config file. + + This context, when called, will return a dictionary containing the + key/value pairs of setting to specify in the + /etc/apache/conf-enabled/hardening.conf file. + """ + def __call__(self): + settings = utils.get_settings('apache') + ctxt = settings['hardening'] + + out = subprocess.check_output(['apache2', '-v']) + ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', + out).group(1) + ctxt['apache_icondir'] = '/usr/share/apache2/icons/' + ctxt['traceenable'] = settings['hardening']['traceenable'] + return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf new file mode 100644 index 00000000..e46a58a3 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf @@ -0,0 +1,31 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### + + # + # Aliases: Add here as many aliases as you need (with no limit). The format is + # Alias fakename realname + # + # Note that if you include a trailing / on fakename then the server will + # require it to be present in the URL. So "/icons" isn't aliased in this + # example, only "/icons/". If the fakename is slash-terminated, then the + # realname must also be slash terminated, and if the fakename omits the + # trailing slash, the realname must also omit it. + # + # We include the /icons/ alias for FancyIndexed directory listings. If + # you do not use FancyIndexing, you may comment this out. + # + Alias /icons/ "{{ apache_icondir }}/" + + + Options -Indexes -MultiViews -FollowSymLinks + AllowOverride None +{% if apache_version == '2.4' -%} + Require all granted +{% else -%} + Order allow,deny + Allow from all +{% endif %} + + diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf new file mode 100644 index 00000000..07945418 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf @@ -0,0 +1,18 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### + + + + # http://httpd.apache.org/docs/2.4/upgrading.html + {% if apache_version > '2.2' -%} + Require all granted + {% else -%} + Order Allow,Deny + Deny from all + {% endif %} + + + +TraceEnable {{ traceenable }} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/__init__.py new file mode 100644 index 00000000..6a7057b3 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/__init__.py @@ -0,0 +1,63 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + + +class BaseAudit(object): # NO-QA + """Base class for hardening checks. + + The lifecycle of a hardening check is to first check to see if the system + is in compliance for the specified check. If it is not in compliance, the + check method will return a value which will be supplied to the. + """ + def __init__(self, *args, **kwargs): + self.unless = kwargs.get('unless', None) + super(BaseAudit, self).__init__() + + def ensure_compliance(self): + """Checks to see if the current hardening check is in compliance or + not. + + If the check that is performed is not in compliance, then an exception + should be raised. + """ + pass + + def _take_action(self): + """Determines whether to perform the action or not. + + Checks whether or not an action should be taken. This is determined by + the truthy value for the unless parameter. If unless is a callback + method, it will be invoked with no parameters in order to determine + whether or not the action should be taken. Otherwise, the truthy value + of the unless attribute will determine if the action should be + performed. + """ + # Do the action if there isn't an unless override. + if self.unless is None: + return True + + # Invoke the callback if there is one. + if hasattr(self.unless, '__call__'): + results = self.unless() + if results: + return False + else: + return True + + if self.unless: + return False + else: + return True diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py new file mode 100644 index 00000000..cf3c987d --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -0,0 +1,100 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import re +import subprocess + +from six import string_types + +from charmhelpers.core.hookenv import ( + log, + INFO, + ERROR, +) + +from charmhelpers.contrib.hardening.audits import BaseAudit + + +class DisabledModuleAudit(BaseAudit): + """Audits Apache2 modules. + + Determines if the apache2 modules are enabled. If the modules are enabled + then they are removed in the ensure_compliance. + """ + def __init__(self, modules): + if modules is None: + self.modules = [] + elif isinstance(modules, string_types): + self.modules = [modules] + else: + self.modules = modules + + def ensure_compliance(self): + """Ensures that the modules are not loaded.""" + if not self.modules: + return + + try: + loaded_modules = self._get_loaded_modules() + non_compliant_modules = [] + for module in self.modules: + if module in loaded_modules: + log("Module '%s' is enabled but should not be." % + (module), level=INFO) + non_compliant_modules.append(module) + + if len(non_compliant_modules) == 0: + return + + for module in non_compliant_modules: + self._disable_module(module) + self._restart_apache() + except subprocess.CalledProcessError as e: + log('Error occurred auditing apache module compliance. ' + 'This may have been already reported. ' + 'Output is: %s' % e.output, level=ERROR) + + @staticmethod + def _get_loaded_modules(): + """Returns the modules which are enabled in Apache.""" + output = subprocess.check_output(['apache2ctl', '-M']) + modules = [] + for line in output.strip().split(): + # Each line of the enabled module output looks like: + # module_name (static|shared) + # Plus a header line at the top of the output which is stripped + # out by the regex. + matcher = re.search(r'^ (\S*)', line) + if matcher: + modules.append(matcher.group(1)) + return modules + + @staticmethod + def _disable_module(module): + """Disables the specified module in Apache.""" + try: + subprocess.check_call(['a2dismod', module]) + except subprocess.CalledProcessError as e: + # Note: catch error here to allow the attempt of disabling + # multiple modules in one go rather than failing after the + # first module fails. + log('Error occurred disabling module %s. ' + 'Output is: %s' % (module, e.output), level=ERROR) + + @staticmethod + def _restart_apache(): + """Restarts the apache process""" + subprocess.check_output(['service', 'apache2', 'restart']) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py new file mode 100644 index 00000000..e94af031 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -0,0 +1,105 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from __future__ import absolute_import # required for external apt import +from apt import apt_pkg +from six import string_types + +from charmhelpers.fetch import ( + apt_cache, + apt_purge +) +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) +from charmhelpers.contrib.hardening.audits import BaseAudit + + +class AptConfig(BaseAudit): + + def __init__(self, config, **kwargs): + self.config = config + + def verify_config(self): + apt_pkg.init() + for cfg in self.config: + value = apt_pkg.config.get(cfg['key'], cfg.get('default', '')) + if value and value != cfg['expected']: + log("APT config '%s' has unexpected value '%s' " + "(expected='%s')" % + (cfg['key'], value, cfg['expected']), level=WARNING) + + def ensure_compliance(self): + self.verify_config() + + +class RestrictedPackages(BaseAudit): + """Class used to audit restricted packages on the system.""" + + def __init__(self, pkgs, **kwargs): + super(RestrictedPackages, self).__init__(**kwargs) + if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): + self.pkgs = [pkgs] + else: + self.pkgs = pkgs + + def ensure_compliance(self): + cache = apt_cache() + + for p in self.pkgs: + if p not in cache: + continue + + pkg = cache[p] + if not self.is_virtual_package(pkg): + if not pkg.current_ver: + log("Package '%s' is not installed." % pkg.name, + level=DEBUG) + continue + else: + log("Restricted package '%s' is installed" % pkg.name, + level=WARNING) + self.delete_package(cache, pkg) + else: + log("Checking restricted virtual package '%s' provides" % + pkg.name, level=DEBUG) + self.delete_package(cache, pkg) + + def delete_package(self, cache, pkg): + """Deletes the package from the system. + + Deletes the package form the system, properly handling virtual + packages. + + :param cache: the apt cache + :param pkg: the package to remove + """ + if self.is_virtual_package(pkg): + log("Package '%s' appears to be virtual - purging provides" % + pkg.name, level=DEBUG) + for _p in pkg.provides_list: + self.delete_package(cache, _p[2].parent_pkg) + elif not pkg.current_ver: + log("Package '%s' not installed" % pkg.name, level=DEBUG) + return + else: + log("Purging package '%s'" % pkg.name, level=DEBUG) + apt_purge(pkg.name) + + def is_virtual_package(self, pkg): + return pkg.has_provides and not pkg.has_versions diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/file.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/file.py new file mode 100644 index 00000000..0fb545a9 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/file.py @@ -0,0 +1,552 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import grp +import os +import pwd +import re + +from subprocess import ( + CalledProcessError, + check_output, + check_call, +) +from traceback import format_exc +from six import string_types +from stat import ( + S_ISGID, + S_ISUID +) + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + WARNING, + ERROR, +) +from charmhelpers.core import unitdata +from charmhelpers.core.host import file_hash +from charmhelpers.contrib.hardening.audits import BaseAudit +from charmhelpers.contrib.hardening.templating import ( + get_template_path, + render_and_write, +) +from charmhelpers.contrib.hardening import utils + + +class BaseFileAudit(BaseAudit): + """Base class for file audits. + + Provides api stubs for compliance check flow that must be used by any class + that implemented this one. + """ + + def __init__(self, paths, always_comply=False, *args, **kwargs): + """ + :param paths: string path of list of paths of files we want to apply + compliance checks are criteria to. + :param always_comply: if true compliance criteria is always applied + else compliance is skipped for non-existent + paths. + """ + super(BaseFileAudit, self).__init__(*args, **kwargs) + self.always_comply = always_comply + if isinstance(paths, string_types) or not hasattr(paths, '__iter__'): + self.paths = [paths] + else: + self.paths = paths + + def ensure_compliance(self): + """Ensure that the all registered files comply to registered criteria. + """ + for p in self.paths: + if os.path.exists(p): + if self.is_compliant(p): + continue + + log('File %s is not in compliance.' % p, level=INFO) + else: + if not self.always_comply: + log("Non-existent path '%s' - skipping compliance check" + % (p), level=INFO) + continue + + if self._take_action(): + log("Applying compliance criteria to '%s'" % (p), level=INFO) + self.comply(p) + + def is_compliant(self, path): + """Audits the path to see if it is compliance. + + :param path: the path to the file that should be checked. + """ + raise NotImplementedError + + def comply(self, path): + """Enforces the compliance of a path. + + :param path: the path to the file that should be enforced. + """ + raise NotImplementedError + + @classmethod + def _get_stat(cls, path): + """Returns the Posix st_stat information for the specified file path. + + :param path: the path to get the st_stat information for. + :returns: an st_stat object for the path or None if the path doesn't + exist. + """ + return os.stat(path) + + +class FilePermissionAudit(BaseFileAudit): + """Implements an audit for file permissions and ownership for a user. + + This class implements functionality that ensures that a specific user/group + will own the file(s) specified and that the permissions specified are + applied properly to the file. + """ + def __init__(self, paths, user, group=None, mode=0o600, **kwargs): + self.user = user + self.group = group + self.mode = mode + super(FilePermissionAudit, self).__init__(paths, user, group, mode, + **kwargs) + + @property + def user(self): + return self._user + + @user.setter + def user(self, name): + try: + user = pwd.getpwnam(name) + except KeyError: + log('Unknown user %s' % name, level=ERROR) + user = None + self._user = user + + @property + def group(self): + return self._group + + @group.setter + def group(self, name): + try: + group = None + if name: + group = grp.getgrnam(name) + else: + group = grp.getgrgid(self.user.pw_gid) + except KeyError: + log('Unknown group %s' % name, level=ERROR) + self._group = group + + def is_compliant(self, path): + """Checks if the path is in compliance. + + Used to determine if the path specified meets the necessary + requirements to be in compliance with the check itself. + + :param path: the file path to check + :returns: True if the path is compliant, False otherwise. + """ + stat = self._get_stat(path) + user = self.user + group = self.group + + compliant = True + if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid: + log('File %s is not owned by %s:%s.' % (path, user.pw_name, + group.gr_name), + level=INFO) + compliant = False + + # POSIX refers to the st_mode bits as corresponding to both the + # file type and file permission bits, where the least significant 12 + # bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the + # file permission bits (8-0) + perms = stat.st_mode & 0o7777 + if perms != self.mode: + log('File %s has incorrect permissions, currently set to %s' % + (path, oct(stat.st_mode & 0o7777)), level=INFO) + compliant = False + + return compliant + + def comply(self, path): + """Issues a chown and chmod to the file paths specified.""" + utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name, + self.mode) + + +class DirectoryPermissionAudit(FilePermissionAudit): + """Performs a permission check for the specified directory path.""" + + def __init__(self, paths, user, group=None, mode=0o600, + recursive=True, **kwargs): + super(DirectoryPermissionAudit, self).__init__(paths, user, group, + mode, **kwargs) + self.recursive = recursive + + def is_compliant(self, path): + """Checks if the directory is compliant. + + Used to determine if the path specified and all of its children + directories are in compliance with the check itself. + + :param path: the directory path to check + :returns: True if the directory tree is compliant, otherwise False. + """ + if not os.path.isdir(path): + log('Path specified %s is not a directory.' % path, level=ERROR) + raise ValueError("%s is not a directory." % path) + + if not self.recursive: + return super(DirectoryPermissionAudit, self).is_compliant(path) + + compliant = True + for root, dirs, _ in os.walk(path): + if len(dirs) > 0: + continue + + if not super(DirectoryPermissionAudit, self).is_compliant(root): + compliant = False + continue + + return compliant + + def comply(self, path): + for root, dirs, _ in os.walk(path): + if len(dirs) > 0: + super(DirectoryPermissionAudit, self).comply(root) + + +class ReadOnly(BaseFileAudit): + """Audits that files and folders are read only.""" + def __init__(self, paths, *args, **kwargs): + super(ReadOnly, self).__init__(paths=paths, *args, **kwargs) + + def is_compliant(self, path): + try: + output = check_output(['find', path, '-perm', '-go+w', + '-type', 'f']).strip() + + # The find above will find any files which have permission sets + # which allow too broad of write access. As such, the path is + # compliant if there is no output. + if output: + return False + + return True + except CalledProcessError as e: + log('Error occurred checking finding writable files for %s. ' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + return False + + def comply(self, path): + try: + check_output(['chmod', 'go-w', '-R', path]) + except CalledProcessError as e: + log('Error occurred removing writeable permissions for %s. ' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + + +class NoReadWriteForOther(BaseFileAudit): + """Ensures that the files found under the base path are readable or + writable by anyone other than the owner or the group. + """ + def __init__(self, paths): + super(NoReadWriteForOther, self).__init__(paths) + + def is_compliant(self, path): + try: + cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o', + '-perm', '-o+w', '-type', 'f'] + output = check_output(cmd).strip() + + # The find above here will find any files which have read or + # write permissions for other, meaning there is too broad of access + # to read/write the file. As such, the path is compliant if there's + # no output. + if output: + return False + + return True + except CalledProcessError as e: + log('Error occurred while finding files which are readable or ' + 'writable to the world in %s. ' + 'Command output is: %s.' % (path, e.output), level=ERROR) + + def comply(self, path): + try: + check_output(['chmod', '-R', 'o-rw', path]) + except CalledProcessError as e: + log('Error occurred attempting to change modes of files under ' + 'path %s. Output of command is: %s' % (path, e.output)) + + +class NoSUIDSGIDAudit(BaseFileAudit): + """Audits that specified files do not have SUID/SGID bits set.""" + def __init__(self, paths, *args, **kwargs): + super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs) + + def is_compliant(self, path): + stat = self._get_stat(path) + if (stat.st_mode & (S_ISGID | S_ISUID)) != 0: + return False + + return True + + def comply(self, path): + try: + log('Removing suid/sgid from %s.' % path, level=DEBUG) + check_output(['chmod', '-s', path]) + except CalledProcessError as e: + log('Error occurred removing suid/sgid from %s.' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + + +class TemplatedFile(BaseFileAudit): + """The TemplatedFileAudit audits the contents of a templated file. + + This audit renders a file from a template, sets the appropriate file + permissions, then generates a hashsum with which to check the content + changed. + """ + def __init__(self, path, context, template_dir, mode, user='root', + group='root', service_actions=None, **kwargs): + self.context = context + self.user = user + self.group = group + self.mode = mode + self.template_dir = template_dir + self.service_actions = service_actions + super(TemplatedFile, self).__init__(paths=path, always_comply=True, + **kwargs) + + def is_compliant(self, path): + """Determines if the templated file is compliant. + + A templated file is only compliant if it has not changed (as + determined by its sha256 hashsum) AND its file permissions are set + appropriately. + + :param path: the path to check compliance. + """ + same_templates = self.templates_match(path) + same_content = self.contents_match(path) + same_permissions = self.permissions_match(path) + + if same_content and same_permissions and same_templates: + return True + + return False + + def run_service_actions(self): + """Run any actions on services requested.""" + if not self.service_actions: + return + + for svc_action in self.service_actions: + name = svc_action['service'] + actions = svc_action['actions'] + log("Running service '%s' actions '%s'" % (name, actions), + level=DEBUG) + for action in actions: + cmd = ['service', name, action] + try: + check_call(cmd) + except CalledProcessError as exc: + log("Service name='%s' action='%s' failed - %s" % + (name, action, exc), level=WARNING) + + def comply(self, path): + """Ensures the contents and the permissions of the file. + + :param path: the path to correct + """ + dirname = os.path.dirname(path) + if not os.path.exists(dirname): + os.makedirs(dirname) + + self.pre_write() + render_and_write(self.template_dir, path, self.context()) + utils.ensure_permissions(path, self.user, self.group, self.mode) + self.run_service_actions() + self.save_checksum(path) + self.post_write() + + def pre_write(self): + """Invoked prior to writing the template.""" + pass + + def post_write(self): + """Invoked after writing the template.""" + pass + + def templates_match(self, path): + """Determines if the template files are the same. + + The template file equality is determined by the hashsum of the + template files themselves. If there is no hashsum, then the content + cannot be sure to be the same so treat it as if they changed. + Otherwise, return whether or not the hashsums are the same. + + :param path: the path to check + :returns: boolean + """ + template_path = get_template_path(self.template_dir, path) + key = 'hardening:template:%s' % template_path + template_checksum = file_hash(template_path) + kv = unitdata.kv() + stored_tmplt_checksum = kv.get(key) + if not stored_tmplt_checksum: + kv.set(key, template_checksum) + kv.flush() + log('Saved template checksum for %s.' % template_path, + level=DEBUG) + # Since we don't have a template checksum, then assume it doesn't + # match and return that the template is different. + return False + elif stored_tmplt_checksum != template_checksum: + kv.set(key, template_checksum) + kv.flush() + log('Updated template checksum for %s.' % template_path, + level=DEBUG) + return False + + # Here the template hasn't changed based upon the calculated + # checksum of the template and what was previously stored. + return True + + def contents_match(self, path): + """Determines if the file content is the same. + + This is determined by comparing hashsum of the file contents and + the saved hashsum. If there is no hashsum, then the content cannot + be sure to be the same so treat them as if they are not the same. + Otherwise, return True if the hashsums are the same, False if they + are not the same. + + :param path: the file to check. + """ + checksum = file_hash(path) + + kv = unitdata.kv() + stored_checksum = kv.get('hardening:%s' % path) + if not stored_checksum: + # If the checksum hasn't been generated, return False to ensure + # the file is written and the checksum stored. + log('Checksum for %s has not been calculated.' % path, level=DEBUG) + return False + elif stored_checksum != checksum: + log('Checksum mismatch for %s.' % path, level=DEBUG) + return False + + return True + + def permissions_match(self, path): + """Determines if the file owner and permissions match. + + :param path: the path to check. + """ + audit = FilePermissionAudit(path, self.user, self.group, self.mode) + return audit.is_compliant(path) + + def save_checksum(self, path): + """Calculates and saves the checksum for the path specified. + + :param path: the path of the file to save the checksum. + """ + checksum = file_hash(path) + kv = unitdata.kv() + kv.set('hardening:%s' % path, checksum) + kv.flush() + + +class DeletedFile(BaseFileAudit): + """Audit to ensure that a file is deleted.""" + def __init__(self, paths): + super(DeletedFile, self).__init__(paths) + + def is_compliant(self, path): + return not os.path.exists(path) + + def comply(self, path): + os.remove(path) + + +class FileContentAudit(BaseFileAudit): + """Audit the contents of a file.""" + def __init__(self, paths, cases, **kwargs): + # Cases we expect to pass + self.pass_cases = cases.get('pass', []) + # Cases we expect to fail + self.fail_cases = cases.get('fail', []) + super(FileContentAudit, self).__init__(paths, **kwargs) + + def is_compliant(self, path): + """ + Given a set of content matching cases i.e. tuple(regex, bool) where + bool value denotes whether or not regex is expected to match, check that + all cases match as expected with the contents of the file. Cases can be + expected to pass of fail. + + :param path: Path of file to check. + :returns: Boolean value representing whether or not all cases are + found to be compliant. + """ + log("Auditing contents of file '%s'" % (path), level=DEBUG) + with open(path, 'r') as fd: + contents = fd.read() + + matches = 0 + for pattern in self.pass_cases: + key = re.compile(pattern, flags=re.MULTILINE) + results = re.search(key, contents) + if results: + matches += 1 + else: + log("Pattern '%s' was expected to pass but instead it failed" + % (pattern), level=WARNING) + + for pattern in self.fail_cases: + key = re.compile(pattern, flags=re.MULTILINE) + results = re.search(key, contents) + if not results: + matches += 1 + else: + log("Pattern '%s' was expected to fail but instead it passed" + % (pattern), level=WARNING) + + total = len(self.pass_cases) + len(self.fail_cases) + log("Checked %s cases and %s passed" % (total, matches), level=DEBUG) + return matches == total + + def comply(self, *args, **kwargs): + """NOOP since we just issue warnings. This is to avoid the + NotImplememtedError. + """ + log("Not applying any compliance criteria, only checks.", level=INFO) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml new file mode 100644 index 00000000..e5ada29f --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml @@ -0,0 +1,13 @@ +# NOTE: this file contains the default configuration for the 'apache' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'apache' as the root key followed by any of the following with new +# values. + +common: + apache_dir: '/etc/apache2' + +hardening: + traceenable: 'off' + allowed_http_methods: "GET POST" + modules_to_disable: [ cgi, cgid ] \ No newline at end of file diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema new file mode 100644 index 00000000..227589b5 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema @@ -0,0 +1,9 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + apache_dir: + traceenable: + +hardening: + allowed_http_methods: + modules_to_disable: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml new file mode 100644 index 00000000..682d22bf --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml @@ -0,0 +1,38 @@ +# NOTE: this file contains the default configuration for the 'mysql' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'mysql' as the root key followed by any of the following with new +# values. + +hardening: + mysql-conf: /etc/mysql/my.cnf + hardening-conf: /etc/mysql/conf.d/hardening.cnf + +security: + # @see http://www.symantec.com/connect/articles/securing-mysql-step-step + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot + chroot: None + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create + safe-user-create: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth + secure-auth: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links + skip-symbolic-links: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database + skip-show-database: True + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile + local-infile: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs + allow-suspicious-udfs: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges + automatic-sp-privileges: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv + secure-file-priv: /tmp diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema new file mode 100644 index 00000000..2edf325c --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema @@ -0,0 +1,15 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +hardening: + mysql-conf: + hardening-conf: +security: + chroot: + safe-user-create: + secure-auth: + skip-symbolic-links: + skip-show-database: + local-infile: + allow-suspicious-udfs: + automatic-sp-privileges: + secure-file-priv: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml new file mode 100644 index 00000000..ddd4286c --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml @@ -0,0 +1,67 @@ +# NOTE: this file contains the default configuration for the 'os' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'os' as the root key followed by any of the following with new +# values. + +general: + desktop_enable: False # (type:boolean) + +environment: + extra_user_paths: [] + umask: 027 + root_path: / + +auth: + pw_max_age: 60 + # discourage password cycling + pw_min_age: 7 + retries: 5 + lockout_time: 600 + timeout: 60 + allow_homeless: False # (type:boolean) + pam_passwdqc_enable: True # (type:boolean) + pam_passwdqc_options: 'min=disabled,disabled,16,12,8' + root_ttys: + console + tty1 + tty2 + tty3 + tty4 + tty5 + tty6 + uid_min: 1000 + gid_min: 1000 + sys_uid_min: 100 + sys_uid_max: 999 + sys_gid_min: 100 + sys_gid_max: 999 + chfn_restrict: + +security: + users_allow: [] + suid_sgid_enforce: True # (type:boolean) + # user-defined blacklist and whitelist + suid_sgid_blacklist: [] + suid_sgid_whitelist: [] + # if this is True, remove any suid/sgid bits from files that were not in the whitelist + suid_sgid_dry_run_on_unknown: False # (type:boolean) + suid_sgid_remove_from_unknown: False # (type:boolean) + # remove packages with known issues + packages_clean: True # (type:boolean) + packages_list: + xinetd + inetd + ypserv + telnet-server + rsh-server + rsync + kernel_enable_module_loading: True # (type:boolean) + kernel_enable_core_dump: False # (type:boolean) + +sysctl: + kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128 + kernel_enable_sysrq: False # (type:boolean) + forwarding: False # (type:boolean) + ipv6_enable: False # (type:boolean) + arp_restricted: True # (type:boolean) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema new file mode 100644 index 00000000..88b3966e --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema @@ -0,0 +1,42 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +general: + desktop_enable: +environment: + extra_user_paths: + umask: + root_path: +auth: + pw_max_age: + pw_min_age: + retries: + lockout_time: + timeout: + allow_homeless: + pam_passwdqc_enable: + pam_passwdqc_options: + root_ttys: + uid_min: + gid_min: + sys_uid_min: + sys_uid_max: + sys_gid_min: + sys_gid_max: + chfn_restrict: +security: + users_allow: + suid_sgid_enforce: + suid_sgid_blacklist: + suid_sgid_whitelist: + suid_sgid_dry_run_on_unknown: + suid_sgid_remove_from_unknown: + packages_clean: + packages_list: + kernel_enable_module_loading: + kernel_enable_core_dump: +sysctl: + kernel_secure_sysrq: + kernel_enable_sysrq: + forwarding: + ipv6_enable: + arp_restricted: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml new file mode 100644 index 00000000..cd529bca --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml @@ -0,0 +1,49 @@ +# NOTE: this file contains the default configuration for the 'ssh' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'ssh' as the root key followed by any of the following with new +# values. + +common: + service_name: 'ssh' + network_ipv6_enable: False # (type:boolean) + ports: [22] + remote_hosts: [] + +client: + package: 'openssh-client' + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + roaming: False + password_authentication: 'no' + +server: + host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key', + '/etc/ssh/ssh_host_ecdsa_key'] + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + allow_root_with_key: False # (type:boolean) + allow_tcp_forwarding: 'no' + allow_agent_forwarding: 'no' + allow_x11_forwarding: 'no' + use_privilege_separation: 'sandbox' + listen_to: ['0.0.0.0'] + use_pam: 'no' + package: 'openssh-server' + password_authentication: 'no' + alive_interval: '600' + alive_count: '3' + sftp_enable: False # (type:boolean) + sftp_group: 'sftponly' + sftp_chroot: '/home/%u' + deny_users: [] + allow_users: [] + deny_groups: [] + allow_groups: [] + print_motd: 'no' + print_last_log: 'no' + use_dns: 'no' + max_auth_tries: 2 + max_sessions: 10 diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema new file mode 100644 index 00000000..d05e054b --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema @@ -0,0 +1,42 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + service_name: + network_ipv6_enable: + ports: + remote_hosts: +client: + package: + cbc_required: + weak_hmac: + weak_kex: + roaming: + password_authentication: +server: + host_key_files: + cbc_required: + weak_hmac: + weak_kex: + allow_root_with_key: + allow_tcp_forwarding: + allow_agent_forwarding: + allow_x11_forwarding: + use_privilege_separation: + listen_to: + use_pam: + package: + password_authentication: + alive_interval: + alive_count: + sftp_enable: + sftp_group: + sftp_chroot: + deny_users: + allow_users: + deny_groups: + allow_groups: + print_motd: + print_last_log: + use_dns: + max_auth_tries: + max_sessions: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py new file mode 100644 index 00000000..ac7568d6 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py @@ -0,0 +1,84 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six + +from collections import OrderedDict + +from charmhelpers.core.hookenv import ( + config, + log, + DEBUG, + WARNING, +) +from charmhelpers.contrib.hardening.host.checks import run_os_checks +from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks +from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks +from charmhelpers.contrib.hardening.apache.checks import run_apache_checks + + +def harden(overrides=None): + """Hardening decorator. + + This is the main entry point for running the hardening stack. In order to + run modules of the stack you must add this decorator to charm hook(s) and + ensure that your charm config.yaml contains the 'harden' option set to + one or more of the supported modules. Setting these will cause the + corresponding hardening code to be run when the hook fires. + + This decorator can and should be applied to more than one hook or function + such that hardening modules are called multiple times. This is because + subsequent calls will perform auditing checks that will report any changes + to resources hardened by the first run (and possibly perform compliance + actions as a result of any detected infractions). + + :param overrides: Optional list of stack modules used to override those + provided with 'harden' config. + :returns: Returns value returned by decorated function once executed. + """ + def _harden_inner1(f): + log("Hardening function '%s'" % (f.__name__), level=DEBUG) + + def _harden_inner2(*args, **kwargs): + RUN_CATALOG = OrderedDict([('os', run_os_checks), + ('ssh', run_ssh_checks), + ('mysql', run_mysql_checks), + ('apache', run_apache_checks)]) + + enabled = overrides or (config("harden") or "").split() + if enabled: + modules_to_run = [] + # modules will always be performed in the following order + for module, func in six.iteritems(RUN_CATALOG): + if module in enabled: + enabled.remove(module) + modules_to_run.append(func) + + if enabled: + log("Unknown hardening modules '%s' - ignoring" % + (', '.join(enabled)), level=WARNING) + + for hardener in modules_to_run: + log("Executing hardening module '%s'" % + (hardener.__name__), level=DEBUG) + hardener() + else: + log("No hardening applied to '%s'" % (f.__name__), level=DEBUG) + + return f(*args, **kwargs) + return _harden_inner2 + + return _harden_inner1 diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py new file mode 100644 index 00000000..c3bd5985 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py @@ -0,0 +1,50 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.host.checks import ( + apt, + limits, + login, + minimize_access, + pam, + profile, + securetty, + suid_sgid, + sysctl +) + + +def run_os_checks(): + log("Starting OS hardening checks.", level=DEBUG) + checks = apt.get_audits() + checks.extend(limits.get_audits()) + checks.extend(login.get_audits()) + checks.extend(minimize_access.get_audits()) + checks.extend(pam.get_audits()) + checks.extend(profile.get_audits()) + checks.extend(securetty.get_audits()) + checks.extend(suid_sgid.get_audits()) + checks.extend(sysctl.get_audits()) + + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("OS hardening checks complete.", level=DEBUG) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/apt.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/apt.py new file mode 100644 index 00000000..2c221cda --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/apt.py @@ -0,0 +1,39 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.utils import get_settings +from charmhelpers.contrib.hardening.audits.apt import ( + AptConfig, + RestrictedPackages, +) + + +def get_audits(): + """Get OS hardening apt audits. + + :returns: dictionary of audits + """ + audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated', + 'expected': 'false'}])] + + settings = get_settings('os') + clean_packages = settings['security']['packages_clean'] + if clean_packages: + security_packages = settings['security']['packages_list'] + if security_packages: + audits.append(RestrictedPackages(security_packages)) + + return audits diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/limits.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/limits.py new file mode 100644 index 00000000..8ce9dc2b --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/limits.py @@ -0,0 +1,55 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import ( + DirectoryPermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening security limits audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Ensure that the /etc/security/limits.d directory is only writable + # by the root user, but others can execute and read. + audits.append(DirectoryPermissionAudit('/etc/security/limits.d', + user='root', group='root', + mode=0o755)) + + # If core dumps are not enabled, then don't allow core dumps to be + # created as they may contain sensitive information. + if not settings['security']['kernel_enable_core_dump']: + audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf', + SecurityLimitsContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', mode=0o0440)) + return audits + + +class SecurityLimitsContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'disable_core_dump': + not settings['security']['kernel_enable_core_dump']} + return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/login.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/login.py new file mode 100644 index 00000000..d32c4f60 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/login.py @@ -0,0 +1,67 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from six import string_types + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening login.defs audits. + + :returns: dictionary of audits + """ + audits = [TemplatedFile('/etc/login.defs', LoginContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', mode=0o0444)] + return audits + + +class LoginContext(object): + + def __call__(self): + settings = utils.get_settings('os') + + # Octal numbers in yaml end up being turned into decimal, + # so check if the umask is entered as a string (e.g. '027') + # or as an octal umask as we know it (e.g. 002). If its not + # a string assume it to be octal and turn it into an octal + # string. + umask = settings['environment']['umask'] + if not isinstance(umask, string_types): + umask = '%s' % oct(umask) + + ctxt = { + 'additional_user_paths': + settings['environment']['extra_user_paths'], + 'umask': umask, + 'pwd_max_age': settings['auth']['pw_max_age'], + 'pwd_min_age': settings['auth']['pw_min_age'], + 'uid_min': settings['auth']['uid_min'], + 'sys_uid_min': settings['auth']['sys_uid_min'], + 'sys_uid_max': settings['auth']['sys_uid_max'], + 'gid_min': settings['auth']['gid_min'], + 'sys_gid_min': settings['auth']['sys_gid_min'], + 'sys_gid_max': settings['auth']['sys_gid_max'], + 'login_retries': settings['auth']['retries'], + 'login_timeout': settings['auth']['timeout'], + 'chfn_restrict': settings['auth']['chfn_restrict'], + 'allow_login_without_home': settings['auth']['allow_homeless'] + } + + return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py new file mode 100644 index 00000000..c471064b --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py @@ -0,0 +1,52 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + ReadOnly, +) +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening access audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Remove write permissions from $PATH folders for all regular users. + # This prevents changing system-wide commands from normal users. + path_folders = {'/usr/local/sbin', + '/usr/local/bin', + '/usr/sbin', + '/usr/bin', + '/bin'} + extra_user_paths = settings['environment']['extra_user_paths'] + path_folders.update(extra_user_paths) + audits.append(ReadOnly(path_folders)) + + # Only allow the root user to have access to the shadow file. + audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600)) + + if 'change_user' not in settings['security']['users_allow']: + # su should only be accessible to user and group root, unless it is + # expressly defined to allow users to change to root via the + # security_users_allow config option. + audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750)) + + return audits diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/pam.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/pam.py new file mode 100644 index 00000000..383fe28e --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/pam.py @@ -0,0 +1,134 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from subprocess import ( + check_output, + CalledProcessError, +) + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, +) +from charmhelpers.fetch import ( + apt_install, + apt_purge, + apt_update, +) +from charmhelpers.contrib.hardening.audits.file import ( + TemplatedFile, + DeletedFile, +) +from charmhelpers.contrib.hardening import utils +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR + + +def get_audits(): + """Get OS hardening PAM authentication audits. + + :returns: dictionary of audits + """ + audits = [] + + settings = utils.get_settings('os') + + if settings['auth']['pam_passwdqc_enable']: + audits.append(PasswdqcPAM('/etc/passwdqc.conf')) + + if settings['auth']['retries']: + audits.append(Tally2PAM('/usr/share/pam-configs/tally2')) + else: + audits.append(DeletedFile('/usr/share/pam-configs/tally2')) + + return audits + + +class PasswdqcPAMContext(object): + + def __call__(self): + ctxt = {} + settings = utils.get_settings('os') + + ctxt['auth_pam_passwdqc_options'] = \ + settings['auth']['pam_passwdqc_options'] + + return ctxt + + +class PasswdqcPAM(TemplatedFile): + """The PAM Audit verifies the linux PAM settings.""" + def __init__(self, path): + super(PasswdqcPAM, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=PasswdqcPAMContext(), + user='root', + group='root', + mode=0o0640) + + def pre_write(self): + # Always remove? + for pkg in ['libpam-ccreds', 'libpam-cracklib']: + log("Purging package '%s'" % pkg, level=DEBUG), + apt_purge(pkg) + + apt_update(fatal=True) + for pkg in ['libpam-passwdqc']: + log("Installing package '%s'" % pkg, level=DEBUG), + apt_install(pkg) + + def post_write(self): + """Updates the PAM configuration after the file has been written""" + try: + check_output(['pam-auth-update', '--package']) + except CalledProcessError as e: + log('Error calling pam-auth-update: %s' % e, level=ERROR) + + +class Tally2PAMContext(object): + + def __call__(self): + ctxt = {} + settings = utils.get_settings('os') + + ctxt['auth_lockout_time'] = settings['auth']['lockout_time'] + ctxt['auth_retries'] = settings['auth']['retries'] + + return ctxt + + +class Tally2PAM(TemplatedFile): + """The PAM Audit verifies the linux PAM settings.""" + def __init__(self, path): + super(Tally2PAM, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=Tally2PAMContext(), + user='root', + group='root', + mode=0o0640) + + def pre_write(self): + # Always remove? + apt_purge('libpam-ccreds') + apt_update(fatal=True) + apt_install('libpam-modules') + + def post_write(self): + """Updates the PAM configuration after the file has been written""" + try: + check_output(['pam-auth-update', '--package']) + except CalledProcessError as e: + log('Error calling pam-auth-update: %s' % e, level=ERROR) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/profile.py new file mode 100644 index 00000000..f7443357 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/profile.py @@ -0,0 +1,45 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening profile audits. + + :returns: dictionary of audits + """ + audits = [] + + settings = utils.get_settings('os') + + # If core dumps are not enabled, then don't allow core dumps to be + # created as they may contain sensitive information. + if not settings['security']['kernel_enable_core_dump']: + audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh', + ProfileContext(), + template_dir=TEMPLATES_DIR, + mode=0o0755, user='root', group='root')) + return audits + + +class ProfileContext(object): + + def __call__(self): + ctxt = {} + return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py new file mode 100644 index 00000000..e33c73ca --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py @@ -0,0 +1,39 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening Secure TTY audits. + + :returns: dictionary of audits + """ + audits = [] + audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(), + template_dir=TEMPLATES_DIR, + mode=0o0400, user='root', group='root')) + return audits + + +class SecureTTYContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'ttys': settings['auth']['root_ttys']} + return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py new file mode 100644 index 00000000..0534689b --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py @@ -0,0 +1,131 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import subprocess + +from charmhelpers.core.hookenv import ( + log, + INFO, +) +from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit +from charmhelpers.contrib.hardening import utils + + +BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh', + '/usr/libexec/openssh/ssh-keysign', + '/usr/lib/openssh/ssh-keysign', + '/sbin/netreport', + '/usr/sbin/usernetctl', + '/usr/sbin/userisdnctl', + '/usr/sbin/pppd', + '/usr/bin/lockfile', + '/usr/bin/mail-lock', + '/usr/bin/mail-unlock', + '/usr/bin/mail-touchlock', + '/usr/bin/dotlockfile', + '/usr/bin/arping', + '/usr/sbin/uuidd', + '/usr/bin/mtr', + '/usr/lib/evolution/camel-lock-helper-1.2', + '/usr/lib/pt_chown', + '/usr/lib/eject/dmcrypt-get-device', + '/usr/lib/mc/cons.saver'] + +WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount', + '/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at', + '/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp', + '/usr/bin/passwd', '/usr/bin/ssh-agent', + '/usr/libexec/utempter/utempter', '/usr/sbin/lockdev', + '/usr/sbin/sendmail.sendmail', '/usr/bin/expiry', + '/bin/ping6', '/usr/bin/traceroute6.iputils', + '/sbin/mount.nfs', '/sbin/umount.nfs', + '/sbin/mount.nfs4', '/sbin/umount.nfs4', + '/usr/bin/crontab', + '/usr/bin/wall', '/usr/bin/write', + '/usr/bin/screen', + '/usr/bin/mlocate', + '/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh', + '/bin/fusermount', + '/usr/bin/pkexec', + '/usr/bin/sudo', '/usr/bin/sudoedit', + '/usr/sbin/postdrop', '/usr/sbin/postqueue', + '/usr/sbin/suexec', + '/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth', + '/usr/kerberos/bin/ksu', + '/usr/sbin/ccreds_validate', + '/usr/bin/Xorg', + '/usr/bin/X', + '/usr/lib/dbus-1.0/dbus-daemon-launch-helper', + '/usr/lib/vte/gnome-pty-helper', + '/usr/lib/libvte9/gnome-pty-helper', + '/usr/lib/libvte-2.90-9/gnome-pty-helper'] + + +def get_audits(): + """Get OS hardening suid/sgid audits. + + :returns: dictionary of audits + """ + checks = [] + settings = utils.get_settings('os') + if not settings['security']['suid_sgid_enforce']: + log("Skipping suid/sgid hardening", level=INFO) + return checks + + # Build the blacklist and whitelist of files for suid/sgid checks. + # There are a total of 4 lists: + # 1. the system blacklist + # 2. the system whitelist + # 3. the user blacklist + # 4. the user whitelist + # + # The blacklist is the set of paths which should NOT have the suid/sgid bit + # set and the whitelist is the set of paths which MAY have the suid/sgid + # bit setl. The user whitelist/blacklist effectively override the system + # whitelist/blacklist. + u_b = settings['security']['suid_sgid_blacklist'] + u_w = settings['security']['suid_sgid_whitelist'] + + blacklist = set(BLACKLIST) - set(u_w + u_b) + whitelist = set(WHITELIST) - set(u_b + u_w) + + checks.append(NoSUIDSGIDAudit(blacklist)) + + dry_run = settings['security']['suid_sgid_dry_run_on_unknown'] + + if settings['security']['suid_sgid_remove_from_unknown'] or dry_run: + # If the policy is a dry_run (e.g. complain only) or remove unknown + # suid/sgid bits then find all of the paths which have the suid/sgid + # bit set and then remove the whitelisted paths. + root_path = settings['environment']['root_path'] + unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist) + checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run)) + + return checks + + +def find_paths_with_suid_sgid(root_path): + """Finds all paths/files which have an suid/sgid bit enabled. + + Starting with the root_path, this will recursively find all paths which + have an suid or sgid bit set. + """ + cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000', + '-type', 'f', '!', '-path', '/proc/*', '-print'] + + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, _ = p.communicate() + return set(out.split('\n')) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py new file mode 100644 index 00000000..4a76d74e --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -0,0 +1,211 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import platform +import re +import six +import subprocess + +from charmhelpers.core.hookenv import ( + log, + INFO, + WARNING, +) +from charmhelpers.contrib.hardening import utils +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR + + +SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s +net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s +net.ipv4.conf.all.rp_filter=1 +net.ipv4.conf.default.rp_filter=1 +net.ipv4.icmp_echo_ignore_broadcasts=1 +net.ipv4.icmp_ignore_bogus_error_responses=1 +net.ipv4.icmp_ratelimit=100 +net.ipv4.icmp_ratemask=88089 +net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s +net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s +net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s +net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s +net.ipv4.tcp_rfc1337=1 +net.ipv4.tcp_syncookies=1 +net.ipv4.conf.all.shared_media=1 +net.ipv4.conf.default.shared_media=1 +net.ipv4.conf.all.accept_source_route=0 +net.ipv4.conf.default.accept_source_route=0 +net.ipv4.conf.all.accept_redirects=0 +net.ipv4.conf.default.accept_redirects=0 +net.ipv6.conf.all.accept_redirects=0 +net.ipv6.conf.default.accept_redirects=0 +net.ipv4.conf.all.secure_redirects=0 +net.ipv4.conf.default.secure_redirects=0 +net.ipv4.conf.all.send_redirects=0 +net.ipv4.conf.default.send_redirects=0 +net.ipv4.conf.all.log_martians=0 +net.ipv6.conf.default.router_solicitations=0 +net.ipv6.conf.default.accept_ra_rtr_pref=0 +net.ipv6.conf.default.accept_ra_pinfo=0 +net.ipv6.conf.default.accept_ra_defrtr=0 +net.ipv6.conf.default.autoconf=0 +net.ipv6.conf.default.dad_transmits=0 +net.ipv6.conf.default.max_addresses=1 +net.ipv6.conf.all.accept_ra=0 +net.ipv6.conf.default.accept_ra=0 +kernel.modules_disabled=%(kernel_modules_disabled)s +kernel.sysrq=%(kernel_sysrq)s +fs.suid_dumpable=%(fs_suid_dumpable)s +kernel.randomize_va_space=2 +""" + + +def get_audits(): + """Get OS hardening sysctl audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Apply the sysctl settings which are configured to be applied. + audits.append(SysctlConf()) + # Make sure that only root has access to the sysctl.conf file, and + # that it is read-only. + audits.append(FilePermissionAudit('/etc/sysctl.conf', + user='root', + group='root', mode=0o0440)) + # If module loading is not enabled, then ensure that the modules + # file has the appropriate permissions and rebuild the initramfs + if not settings['security']['kernel_enable_module_loading']: + audits.append(ModulesTemplate()) + + return audits + + +class ModulesContext(object): + + def __call__(self): + settings = utils.get_settings('os') + with open('/proc/cpuinfo', 'r') as fd: + cpuinfo = fd.readlines() + + for line in cpuinfo: + match = re.search(r"^vendor_id\s+:\s+(.+)", line) + if match: + vendor = match.group(1) + + if vendor == "GenuineIntel": + vendor = "intel" + elif vendor == "AuthenticAMD": + vendor = "amd" + + ctxt = {'arch': platform.processor(), + 'cpuVendor': vendor, + 'desktop_enable': settings['general']['desktop_enable']} + + return ctxt + + +class ModulesTemplate(object): + + def __init__(self): + super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules', + ModulesContext(), + templates_dir=TEMPLATES_DIR, + user='root', group='root', + mode=0o0440) + + def post_write(self): + subprocess.check_call(['update-initramfs', '-u']) + + +class SysCtlHardeningContext(object): + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'sysctl': {}} + + log("Applying sysctl settings", level=INFO) + extras = {'net_ipv4_ip_forward': 0, + 'net_ipv6_conf_all_forwarding': 0, + 'net_ipv6_conf_all_disable_ipv6': 1, + 'net_ipv4_tcp_timestamps': 0, + 'net_ipv4_conf_all_arp_ignore': 0, + 'net_ipv4_conf_all_arp_announce': 0, + 'kernel_sysrq': 0, + 'fs_suid_dumpable': 0, + 'kernel_modules_disabled': 1} + + if settings['sysctl']['ipv6_enable']: + extras['net_ipv6_conf_all_disable_ipv6'] = 0 + + if settings['sysctl']['forwarding']: + extras['net_ipv4_ip_forward'] = 1 + extras['net_ipv6_conf_all_forwarding'] = 1 + + if settings['sysctl']['arp_restricted']: + extras['net_ipv4_conf_all_arp_ignore'] = 1 + extras['net_ipv4_conf_all_arp_announce'] = 2 + + if settings['security']['kernel_enable_module_loading']: + extras['kernel_modules_disabled'] = 0 + + if settings['sysctl']['kernel_enable_sysrq']: + sysrq_val = settings['sysctl']['kernel_secure_sysrq'] + extras['kernel_sysrq'] = sysrq_val + + if settings['security']['kernel_enable_core_dump']: + extras['fs_suid_dumpable'] = 1 + + settings.update(extras) + for d in (SYSCTL_DEFAULTS % settings).split(): + d = d.strip().partition('=') + key = d[0].strip() + path = os.path.join('/proc/sys', key.replace('.', '/')) + if not os.path.exists(path): + log("Skipping '%s' since '%s' does not exist" % (key, path), + level=WARNING) + continue + + ctxt['sysctl'][key] = d[2] or None + + # Translate for python3 + return {'sysctl_settings': + [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]} + + +class SysctlConf(TemplatedFile): + """An audit check for sysctl settings.""" + def __init__(self): + self.conffile = '/etc/sysctl.d/99-juju-hardening.conf' + super(SysctlConf, self).__init__(self.conffile, + SysCtlHardeningContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', + mode=0o0440) + + def post_write(self): + try: + subprocess.check_call(['sysctl', '-p', self.conffile]) + except subprocess.CalledProcessError as e: + # NOTE: on some systems if sysctl cannot apply all settings it + # will return non-zero as well. + log("sysctl command returned an error (maybe some " + "keys could not be set) - %s" % (e), + level=WARNING) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf new file mode 100644 index 00000000..0014191f --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf @@ -0,0 +1,8 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +{% if disable_core_dump -%} +# Prevent core dumps for all users. These are usually only needed by developers and may contain sensitive information. +* hard core 0 +{% endif %} \ No newline at end of file diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf new file mode 100644 index 00000000..101f1e1d --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf @@ -0,0 +1,7 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +{% for key, value in sysctl_settings -%} +{{ key }}={{ value }} +{% endfor -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/login.defs b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/login.defs new file mode 100644 index 00000000..db137d6d --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/login.defs @@ -0,0 +1,349 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# +# /etc/login.defs - Configuration control definitions for the login package. +# +# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH. +# If unspecified, some arbitrary (and possibly incorrect) value will +# be assumed. All other items are optional - if not specified then +# the described action or option will be inhibited. +# +# Comment lines (lines beginning with "#") and blank lines are ignored. +# +# Modified for Linux. --marekm + +# REQUIRED for useradd/userdel/usermod +# Directory where mailboxes reside, _or_ name of file, relative to the +# home directory. If you _do_ define MAIL_DIR and MAIL_FILE, +# MAIL_DIR takes precedence. +# +# Essentially: +# - MAIL_DIR defines the location of users mail spool files +# (for mbox use) by appending the username to MAIL_DIR as defined +# below. +# - MAIL_FILE defines the location of the users mail spool files as the +# fully-qualified filename obtained by prepending the user home +# directory before $MAIL_FILE +# +# NOTE: This is no more used for setting up users MAIL environment variable +# which is, starting from shadow 4.0.12-1 in Debian, entirely the +# job of the pam_mail PAM modules +# See default PAM configuration files provided for +# login, su, etc. +# +# This is a temporary situation: setting these variables will soon +# move to /etc/default/useradd and the variables will then be +# no more supported +MAIL_DIR /var/mail +#MAIL_FILE .mail + +# +# Enable logging and display of /var/log/faillog login failure info. +# This option conflicts with the pam_tally PAM module. +# +FAILLOG_ENAB yes + +# +# Enable display of unknown usernames when login failures are recorded. +# +# WARNING: Unknown usernames may become world readable. +# See #290803 and #298773 for details about how this could become a security +# concern +LOG_UNKFAIL_ENAB no + +# +# Enable logging of successful logins +# +LOG_OK_LOGINS yes + +# +# Enable "syslog" logging of su activity - in addition to sulog file logging. +# SYSLOG_SG_ENAB does the same for newgrp and sg. +# +SYSLOG_SU_ENAB yes +SYSLOG_SG_ENAB yes + +# +# If defined, all su activity is logged to this file. +# +#SULOG_FILE /var/log/sulog + +# +# If defined, file which maps tty line to TERM environment parameter. +# Each line of the file is in a format something like "vt100 tty01". +# +#TTYTYPE_FILE /etc/ttytype + +# +# If defined, login failures will be logged here in a utmp format +# last, when invoked as lastb, will read /var/log/btmp, so... +# +FTMP_FILE /var/log/btmp + +# +# If defined, the command name to display when running "su -". For +# example, if this is defined as "su" then a "ps" will display the +# command is "-su". If not defined, then "ps" would display the +# name of the shell actually being run, e.g. something like "-sh". +# +SU_NAME su + +# +# If defined, file which inhibits all the usual chatter during the login +# sequence. If a full pathname, then hushed mode will be enabled if the +# user's name or shell are found in the file. If not a full pathname, then +# hushed mode will be enabled if the file exists in the user's home directory. +# +HUSHLOGIN_FILE .hushlogin +#HUSHLOGIN_FILE /etc/hushlogins + +# +# *REQUIRED* The default PATH settings, for superuser and normal users. +# +# (they are minimal, add the rest in the shell startup files) +ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin{% if additional_user_paths %}{{ additional_user_paths }}{% endif %} + +# +# Terminal permissions +# +# TTYGROUP Login tty will be assigned this group ownership. +# TTYPERM Login tty will be set to this permission. +# +# If you have a "write" program which is "setgid" to a special group +# which owns the terminals, define TTYGROUP to the group number and +# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign +# TTYPERM to either 622 or 600. +# +# In Debian /usr/bin/bsd-write or similar programs are setgid tty +# However, the default and recommended value for TTYPERM is still 0600 +# to not allow anyone to write to anyone else console or terminal + +# Users can still allow other people to write them by issuing +# the "mesg y" command. + +TTYGROUP tty +TTYPERM 0600 + +# +# Login configuration initializations: +# +# ERASECHAR Terminal ERASE character ('\010' = backspace). +# KILLCHAR Terminal KILL character ('\025' = CTRL/U). +# UMASK Default "umask" value. +# +# The ERASECHAR and KILLCHAR are used only on System V machines. +# +# UMASK is the default umask value for pam_umask and is used by +# useradd and newusers to set the mode of the new home directories. +# 022 is the "historical" value in Debian for UMASK +# 027, or even 077, could be considered better for privacy +# There is no One True Answer here : each sysadmin must make up his/her +# mind. +# +# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value +# for private user groups, i. e. the uid is the same as gid, and username is +# the same as the primary group name: for these, the user permissions will be +# used as group permissions, e. g. 022 will become 002. +# +# Prefix these values with "0" to get octal, "0x" to get hexadecimal. +# +ERASECHAR 0177 +KILLCHAR 025 +UMASK {{ umask }} + +# Enable setting of the umask group bits to be the same as owner bits (examples: `022` -> `002`, `077` -> `007`) for non-root users, if the uid is the same as gid, and username is the same as the primary group name. +# If set to yes, userdel will remove the user´s group if it contains no more members, and useradd will create by default a group with the name of the user. +USERGROUPS_ENAB yes + +# +# Password aging controls: +# +# PASS_MAX_DAYS Maximum number of days a password may be used. +# PASS_MIN_DAYS Minimum number of days allowed between password changes. +# PASS_WARN_AGE Number of days warning given before a password expires. +# +PASS_MAX_DAYS {{ pwd_max_age }} +PASS_MIN_DAYS {{ pwd_min_age }} +PASS_WARN_AGE 7 + +# +# Min/max values for automatic uid selection in useradd +# +UID_MIN {{ uid_min }} +UID_MAX 60000 +# System accounts +SYS_UID_MIN {{ sys_uid_min }} +SYS_UID_MAX {{ sys_uid_max }} + +# Min/max values for automatic gid selection in groupadd +GID_MIN {{ gid_min }} +GID_MAX 60000 +# System accounts +SYS_GID_MIN {{ sys_gid_min }} +SYS_GID_MAX {{ sys_gid_max }} + +# +# Max number of login retries if password is bad. This will most likely be +# overriden by PAM, since the default pam_unix module has it's own built +# in of 3 retries. However, this is a safe fallback in case you are using +# an authentication module that does not enforce PAM_MAXTRIES. +# +LOGIN_RETRIES {{ login_retries }} + +# +# Max time in seconds for login +# +LOGIN_TIMEOUT {{ login_timeout }} + +# +# Which fields may be changed by regular users using chfn - use +# any combination of letters "frwh" (full name, room number, work +# phone, home phone). If not defined, no changes are allowed. +# For backward compatibility, "yes" = "rwh" and "no" = "frwh". +# +{% if chfn_restrict %} +CHFN_RESTRICT {{ chfn_restrict }} +{% endif %} + +# +# Should login be allowed if we can't cd to the home directory? +# Default in no. +# +DEFAULT_HOME {% if allow_login_without_home %} yes {% else %} no {% endif %} + +# +# If defined, this command is run when removing a user. +# It should remove any at/cron/print jobs etc. owned by +# the user to be removed (passed as the first argument). +# +#USERDEL_CMD /usr/sbin/userdel_local + +# +# Enable setting of the umask group bits to be the same as owner bits +# (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is +# the same as gid, and username is the same as the primary group name. +# +# If set to yes, userdel will remove the user´s group if it contains no +# more members, and useradd will create by default a group with the name +# of the user. +# +USERGROUPS_ENAB yes + +# +# Instead of the real user shell, the program specified by this parameter +# will be launched, although its visible name (argv[0]) will be the shell's. +# The program may do whatever it wants (logging, additional authentification, +# banner, ...) before running the actual shell. +# +# FAKE_SHELL /bin/fakeshell + +# +# If defined, either full pathname of a file containing device names or +# a ":" delimited list of device names. Root logins will be allowed only +# upon these devices. +# +# This variable is used by login and su. +# +#CONSOLE /etc/consoles +#CONSOLE console:tty01:tty02:tty03:tty04 + +# +# List of groups to add to the user's supplementary group set +# when logging in on the console (as determined by the CONSOLE +# setting). Default is none. +# +# Use with caution - it is possible for users to gain permanent +# access to these groups, even when not logged in on the console. +# How to do it is left as an exercise for the reader... +# +# This variable is used by login and su. +# +#CONSOLE_GROUPS floppy:audio:cdrom + +# +# If set to "yes", new passwords will be encrypted using the MD5-based +# algorithm compatible with the one used by recent releases of FreeBSD. +# It supports passwords of unlimited length and longer salt strings. +# Set to "no" if you need to copy encrypted passwords to other systems +# which don't understand the new algorithm. Default is "no". +# +# This variable is deprecated. You should use ENCRYPT_METHOD. +# +MD5_CRYPT_ENAB no + +# +# If set to MD5 , MD5-based algorithm will be used for encrypting password +# If set to SHA256, SHA256-based algorithm will be used for encrypting password +# If set to SHA512, SHA512-based algorithm will be used for encrypting password +# If set to DES, DES-based algorithm will be used for encrypting password (default) +# Overrides the MD5_CRYPT_ENAB option +# +# Note: It is recommended to use a value consistent with +# the PAM modules configuration. +# +ENCRYPT_METHOD SHA512 + +# +# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512. +# +# Define the number of SHA rounds. +# With a lot of rounds, it is more difficult to brute forcing the password. +# But note also that it more CPU resources will be needed to authenticate +# users. +# +# If not specified, the libc will choose the default number of rounds (5000). +# The values must be inside the 1000-999999999 range. +# If only one of the MIN or MAX values is set, then this value will be used. +# If MIN > MAX, the highest value will be used. +# +# SHA_CRYPT_MIN_ROUNDS 5000 +# SHA_CRYPT_MAX_ROUNDS 5000 + +################# OBSOLETED BY PAM ############## +# # +# These options are now handled by PAM. Please # +# edit the appropriate file in /etc/pam.d/ to # +# enable the equivelants of them. +# +############### + +#MOTD_FILE +#DIALUPS_CHECK_ENAB +#LASTLOG_ENAB +#MAIL_CHECK_ENAB +#OBSCURE_CHECKS_ENAB +#PORTTIME_CHECKS_ENAB +#SU_WHEEL_ONLY +#CRACKLIB_DICTPATH +#PASS_CHANGE_TRIES +#PASS_ALWAYS_WARN +#ENVIRON_FILE +#NOLOGINS_FILE +#ISSUE_FILE +#PASS_MIN_LEN +#PASS_MAX_LEN +#ULIMIT +#ENV_HZ +#CHFN_AUTH +#CHSH_AUTH +#FAIL_DELAY + +################# OBSOLETED ####################### +# # +# These options are no more handled by shadow. # +# # +# Shadow utilities will display a warning if they # +# still appear. # +# # +################################################### + +# CLOSE_SESSIONS +# LOGIN_STRING +# NO_PASSWORD_CONSOLE +# QMAIL_DIR + + + diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/modules b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/modules new file mode 100644 index 00000000..ef0354ee --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/modules @@ -0,0 +1,117 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# /etc/modules: kernel modules to load at boot time. +# +# This file contains the names of kernel modules that should be loaded +# at boot time, one per line. Lines beginning with "#" are ignored. +# Parameters can be specified after the module name. + +# Arch +# ---- +# +# Modules for certains builds, contains support modules and some CPU-specific optimizations. + +{% if arch == "x86_64" -%} +# Optimize for x86_64 cryptographic features +twofish-x86_64-3way +twofish-x86_64 +aes-x86_64 +salsa20-x86_64 +blowfish-x86_64 +{% endif -%} + +{% if cpuVendor == "intel" -%} +# Intel-specific optimizations +ghash-clmulni-intel +aesni-intel +kvm-intel +{% endif -%} + +{% if cpuVendor == "amd" -%} +# AMD-specific optimizations +kvm-amd +{% endif -%} + +kvm + + +# Crypto +# ------ + +# Some core modules which comprise strong cryptography. +blowfish_common +blowfish_generic +ctr +cts +lrw +lzo +rmd160 +rmd256 +rmd320 +serpent +sha512_generic +twofish_common +twofish_generic +xts +zlib + + +# Drivers +# ------- + +# Basics +lp +rtc +loop + +# Filesystems +ext2 +btrfs + +{% if desktop_enable -%} +# Desktop +psmouse +snd +snd_ac97_codec +snd_intel8x0 +snd_page_alloc +snd_pcm +snd_timer +soundcore +usbhid +{% endif -%} + +# Lib +# --- +xz + + +# Net +# --- + +# All packets needed for netfilter rules (ie iptables, ebtables). +ip_tables +x_tables +iptable_filter +iptable_nat + +# Targets +ipt_LOG +ipt_REJECT + +# Modules +xt_connlimit +xt_tcpudp +xt_recent +xt_limit +xt_conntrack +nf_conntrack +nf_conntrack_ipv4 +nf_defrag_ipv4 +xt_state +nf_nat + +# Addons +xt_pknock \ No newline at end of file diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf new file mode 100644 index 00000000..f98d14e5 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf @@ -0,0 +1,11 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +Name: passwdqc password strength enforcement +Default: yes +Priority: 1024 +Conflicts: cracklib +Password-Type: Primary +Password: + requisite pam_passwdqc.so {{ auth_pam_passwdqc_options }} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh new file mode 100644 index 00000000..fd2de791 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh @@ -0,0 +1,8 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# Disable core dumps via soft limits for all users. Compliance to this setting +# is voluntary and can be modified by users up to a hard limit. This setting is +# a sane default. +ulimit -S -c 0 > /dev/null 2>&1 diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/securetty b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/securetty new file mode 100644 index 00000000..15b18d4e --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/securetty @@ -0,0 +1,11 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# A list of TTYs, from which root can log in +# see `man securetty` for reference +{% if ttys -%} +{% for tty in ttys -%} +{{ tty }} +{% endfor -%} +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/tally2 b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/tally2 new file mode 100644 index 00000000..d9620299 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/tally2 @@ -0,0 +1,14 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +Name: tally2 lockout after failed attempts enforcement +Default: yes +Priority: 1024 +Conflicts: cracklib +Auth-Type: Primary +Auth-Initial: + required pam_tally2.so deny={{ auth_retries }} onerr=fail unlock_time={{ auth_lockout_time }} +Account-Type: Primary +Account-Initial: + required pam_tally2.so diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py new file mode 100644 index 00000000..d4f0ec19 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.mysql.checks import config + + +def run_mysql_checks(): + log("Starting MySQL hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("MySQL hardening checks complete.", level=DEBUG) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py new file mode 100644 index 00000000..3af8b89d --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -0,0 +1,89 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six +import subprocess + +from charmhelpers.core.hookenv import ( + log, + WARNING, +) +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + DirectoryPermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.mysql import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get MySQL hardening config audits. + + :returns: dictionary of audits + """ + if subprocess.call(['which', 'mysql'], stdout=subprocess.PIPE) != 0: + log("MySQL does not appear to be installed on this node - " + "skipping mysql hardening", level=WARNING) + return [] + + settings = utils.get_settings('mysql') + hardening_settings = settings['hardening'] + my_cnf = hardening_settings['mysql-conf'] + + audits = [ + FilePermissionAudit(paths=[my_cnf], user='root', + group='root', mode=0o0600), + + TemplatedFile(hardening_settings['hardening-conf'], + MySQLConfContext(), + TEMPLATES_DIR, + mode=0o0750, + user='mysql', + group='root', + service_actions=[{'service': 'mysql', + 'actions': ['restart']}]), + + # MySQL and Percona charms do not allow configuration of the + # data directory, so use the default. + DirectoryPermissionAudit('/var/lib/mysql', + user='mysql', + group='mysql', + recursive=False, + mode=0o755), + + DirectoryPermissionAudit('/etc/mysql', + user='root', + group='root', + recursive=False, + mode=0o700), + ] + + return audits + + +class MySQLConfContext(object): + """Defines the set of key/value pairs to set in a mysql config file. + + This context, when called, will return a dictionary containing the + key/value pairs of setting to specify in the + /etc/mysql/conf.d/hardening.cnf file. + """ + def __call__(self): + settings = utils.get_settings('mysql') + # Translate for python3 + return {'mysql_settings': + [(k, v) for k, v in six.iteritems(settings['security'])]} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf new file mode 100644 index 00000000..8242586c --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf @@ -0,0 +1,12 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +[mysqld] +{% for setting, value in mysql_settings -%} +{% if value == 'True' -%} +{{ setting }} +{% elif value != 'None' and value != None -%} +{{ setting }} = {{ value }} +{% endif -%} +{% endfor -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py new file mode 100644 index 00000000..b85150d5 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.ssh.checks import config + + +def run_ssh_checks(): + log("Starting SSH hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("SSH hardening checks complete.", level=DEBUG) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py new file mode 100644 index 00000000..3fb6ae8d --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -0,0 +1,394 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.fetch import ( + apt_install, + apt_update, +) +from charmhelpers.core.host import lsb_release +from charmhelpers.contrib.hardening.audits.file import ( + TemplatedFile, + FileContentAudit, +) +from charmhelpers.contrib.hardening.ssh import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get SSH hardening config audits. + + :returns: dictionary of audits + """ + audits = [SSHConfig(), SSHDConfig(), SSHConfigFileContentAudit(), + SSHDConfigFileContentAudit()] + return audits + + +class SSHConfigContext(object): + + type = 'client' + + def get_macs(self, allow_weak_mac): + if allow_weak_mac: + weak_macs = 'weak' + else: + weak_macs = 'default' + + default = 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160' + macs = {'default': default, + 'weak': default + ',hmac-sha1'} + + default = ('hmac-sha2-512-etm@openssh.com,' + 'hmac-sha2-256-etm@openssh.com,' + 'hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,' + 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160') + macs_66 = {'default': default, + 'weak': default + ',hmac-sha1'} + + # Use newer ciphers on Ubuntu Trusty and above + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG) + macs = macs_66 + + return macs[weak_macs] + + def get_kexs(self, allow_weak_kex): + if allow_weak_kex: + weak_kex = 'weak' + else: + weak_kex = 'default' + + default = 'diffie-hellman-group-exchange-sha256' + weak = (default + ',diffie-hellman-group14-sha1,' + 'diffie-hellman-group-exchange-sha1,' + 'diffie-hellman-group1-sha1') + kex = {'default': default, + 'weak': weak} + + default = ('curve25519-sha256@libssh.org,' + 'diffie-hellman-group-exchange-sha256') + weak = (default + ',diffie-hellman-group14-sha1,' + 'diffie-hellman-group-exchange-sha1,' + 'diffie-hellman-group1-sha1') + kex_66 = {'default': default, + 'weak': weak} + + # Use newer kex on Ubuntu Trusty and above + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + log('Detected Ubuntu 14.04 or newer, using new key exchange ' + 'algorithms', level=DEBUG) + kex = kex_66 + + return kex[weak_kex] + + def get_ciphers(self, cbc_required): + if cbc_required: + weak_ciphers = 'weak' + else: + weak_ciphers = 'default' + + default = 'aes256-ctr,aes192-ctr,aes128-ctr' + cipher = {'default': default, + 'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'} + + default = ('chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,' + 'aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr') + ciphers_66 = {'default': default, + 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'} + + # Use newer ciphers on ubuntu Trusty and above + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + log('Detected Ubuntu 14.04 or newer, using new ciphers', + level=DEBUG) + cipher = ciphers_66 + + return cipher[weak_ciphers] + + def __call__(self): + settings = utils.get_settings('ssh') + if settings['common']['network_ipv6_enable']: + addr_family = 'any' + else: + addr_family = 'inet' + + ctxt = { + 'addr_family': addr_family, + 'remote_hosts': settings['common']['remote_hosts'], + 'password_auth_allowed': + settings['client']['password_authentication'], + 'ports': settings['common']['ports'], + 'ciphers': self.get_ciphers(settings['client']['cbc_required']), + 'macs': self.get_macs(settings['client']['weak_hmac']), + 'kexs': self.get_kexs(settings['client']['weak_kex']), + 'roaming': settings['client']['roaming'], + } + return ctxt + + +class SSHConfig(TemplatedFile): + def __init__(self): + path = '/etc/ssh/ssh_config' + super(SSHConfig, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=SSHConfigContext(), + user='root', + group='root', + mode=0o0644) + + def pre_write(self): + settings = utils.get_settings('ssh') + apt_update(fatal=True) + apt_install(settings['client']['package']) + if not os.path.exists('/etc/ssh'): + os.makedir('/etc/ssh') + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + def post_write(self): + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + +class SSHDConfigContext(SSHConfigContext): + + type = 'server' + + def __call__(self): + settings = utils.get_settings('ssh') + if settings['common']['network_ipv6_enable']: + addr_family = 'any' + else: + addr_family = 'inet' + + ctxt = { + 'ssh_ip': settings['server']['listen_to'], + 'password_auth_allowed': + settings['server']['password_authentication'], + 'ports': settings['common']['ports'], + 'addr_family': addr_family, + 'ciphers': self.get_ciphers(settings['server']['cbc_required']), + 'macs': self.get_macs(settings['server']['weak_hmac']), + 'kexs': self.get_kexs(settings['server']['weak_kex']), + 'host_key_files': settings['server']['host_key_files'], + 'allow_root_with_key': settings['server']['allow_root_with_key'], + 'password_authentication': + settings['server']['password_authentication'], + 'use_priv_sep': settings['server']['use_privilege_separation'], + 'use_pam': settings['server']['use_pam'], + 'allow_x11_forwarding': settings['server']['allow_x11_forwarding'], + 'print_motd': settings['server']['print_motd'], + 'print_last_log': settings['server']['print_last_log'], + 'client_alive_interval': + settings['server']['alive_interval'], + 'client_alive_count': settings['server']['alive_count'], + 'allow_tcp_forwarding': settings['server']['allow_tcp_forwarding'], + 'allow_agent_forwarding': + settings['server']['allow_agent_forwarding'], + 'deny_users': settings['server']['deny_users'], + 'allow_users': settings['server']['allow_users'], + 'deny_groups': settings['server']['deny_groups'], + 'allow_groups': settings['server']['allow_groups'], + 'use_dns': settings['server']['use_dns'], + 'sftp_enable': settings['server']['sftp_enable'], + 'sftp_group': settings['server']['sftp_group'], + 'sftp_chroot': settings['server']['sftp_chroot'], + 'max_auth_tries': settings['server']['max_auth_tries'], + 'max_sessions': settings['server']['max_sessions'], + } + return ctxt + + +class SSHDConfig(TemplatedFile): + def __init__(self): + path = '/etc/ssh/sshd_config' + super(SSHDConfig, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=SSHDConfigContext(), + user='root', + group='root', + mode=0o0600, + service_actions=[{'service': 'ssh', + 'actions': + ['restart']}]) + + def pre_write(self): + settings = utils.get_settings('ssh') + apt_update(fatal=True) + apt_install(settings['server']['package']) + if not os.path.exists('/etc/ssh'): + os.makedir('/etc/ssh') + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + def post_write(self): + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + +class SSHConfigFileContentAudit(FileContentAudit): + def __init__(self): + self.path = '/etc/ssh/ssh_config' + super(SSHConfigFileContentAudit, self).__init__(self.path, {}) + + def is_compliant(self, *args, **kwargs): + self.pass_cases = [] + self.fail_cases = [] + settings = utils.get_settings('ssh') + + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + if not settings['client']['weak_hmac']: + self.fail_cases.append(r'^MACs.+,hmac-sha1$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['client']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + + if settings['client']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + + if settings['client']['roaming']: + self.pass_cases.append(r'^UseRoaming yes$') + else: + self.fail_cases.append(r'^UseRoaming yes$') + + return super(SSHConfigFileContentAudit, self).is_compliant(*args, + **kwargs) + + +class SSHDConfigFileContentAudit(FileContentAudit): + def __init__(self): + self.path = '/etc/ssh/sshd_config' + super(SSHDConfigFileContentAudit, self).__init__(self.path, {}) + + def is_compliant(self, *args, **kwargs): + self.pass_cases = [] + self.fail_cases = [] + settings = utils.get_settings('ssh') + + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + + if settings['server']['sftp_enable']: + self.pass_cases.append(r'^Subsystem\ssftp') + else: + self.fail_cases.append(r'^Subsystem\ssftp') + + return super(SSHDConfigFileContentAudit, self).is_compliant(*args, + **kwargs) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config new file mode 100644 index 00000000..9742d8e2 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config @@ -0,0 +1,70 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# This is the ssh client system-wide configuration file. See +# ssh_config(5) for more information. This file provides defaults for +# users, and the values can be changed in per-user configuration files +# or on the command line. + +# Configuration data is parsed as follows: +# 1. command line options +# 2. user-specific file +# 3. system-wide file +# Any configuration value is only changed the first time it is set. +# Thus, host-specific definitions should be at the beginning of the +# configuration file, and defaults at the end. + +# Site-wide defaults for some commonly used options. For a comprehensive +# list of available options, their meanings and defaults, please see the +# ssh_config(5) man page. + +# Restrict the following configuration to be limited to this Host. +{% if remote_hosts -%} +Host {{ ' '.join(remote_hosts) }} +{% endif %} +ForwardAgent no +ForwardX11 no +ForwardX11Trusted yes +RhostsRSAAuthentication no +RSAAuthentication yes +PasswordAuthentication {{ password_auth_allowed }} +HostbasedAuthentication no +GSSAPIAuthentication no +GSSAPIDelegateCredentials no +GSSAPIKeyExchange no +GSSAPITrustDNS no +BatchMode no +CheckHostIP yes +AddressFamily {{ addr_family }} +ConnectTimeout 0 +StrictHostKeyChecking ask +IdentityFile ~/.ssh/identity +IdentityFile ~/.ssh/id_rsa +IdentityFile ~/.ssh/id_dsa +# The port at the destination should be defined +{% for port in ports -%} +Port {{ port }} +{% endfor %} +Protocol 2 +Cipher 3des +{% if ciphers -%} +Ciphers {{ ciphers }} +{%- endif %} +{% if macs -%} +MACs {{ macs }} +{%- endif %} +{% if kexs -%} +KexAlgorithms {{ kexs }} +{%- endif %} +EscapeChar ~ +Tunnel no +TunnelDevice any:any +PermitLocalCommand no +VisualHostKey no +RekeyLimit 1G 1h +SendEnv LANG LC_* +HashKnownHosts yes +{% if roaming -%} +UseRoaming {{ roaming }} +{% endif %} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config new file mode 100644 index 00000000..5f87298a --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config @@ -0,0 +1,159 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# Package generated configuration file +# See the sshd_config(5) manpage for details + +# What ports, IPs and protocols we listen for +{% for port in ports -%} +Port {{ port }} +{% endfor -%} +AddressFamily {{ addr_family }} +# Use these options to restrict which interfaces/protocols sshd will bind to +{% if ssh_ip -%} +{% for ip in ssh_ip -%} +ListenAddress {{ ip }} +{% endfor %} +{%- else -%} +ListenAddress :: +ListenAddress 0.0.0.0 +{% endif -%} +Protocol 2 +{% if ciphers -%} +Ciphers {{ ciphers }} +{% endif -%} +{% if macs -%} +MACs {{ macs }} +{% endif -%} +{% if kexs -%} +KexAlgorithms {{ kexs }} +{% endif -%} +# HostKeys for protocol version 2 +{% for keyfile in host_key_files -%} +HostKey {{ keyfile }} +{% endfor -%} + +# Privilege Separation is turned on for security +{% if use_priv_sep -%} +UsePrivilegeSeparation {{ use_priv_sep }} +{% endif -%} + +# Lifetime and size of ephemeral version 1 server key +KeyRegenerationInterval 3600 +ServerKeyBits 1024 + +# Logging +SyslogFacility AUTH +LogLevel VERBOSE + +# Authentication: +LoginGraceTime 30s +{% if allow_root_with_key -%} +PermitRootLogin without-password +{% else -%} +PermitRootLogin no +{% endif %} +PermitTunnel no +PermitUserEnvironment no +StrictModes yes + +RSAAuthentication yes +PubkeyAuthentication yes +AuthorizedKeysFile %h/.ssh/authorized_keys + +# Don't read the user's ~/.rhosts and ~/.shosts files +IgnoreRhosts yes +# For this to work you will also need host keys in /etc/ssh_known_hosts +RhostsRSAAuthentication no +# similar for protocol version 2 +HostbasedAuthentication no +# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication +IgnoreUserKnownHosts yes + +# To enable empty passwords, change to yes (NOT RECOMMENDED) +PermitEmptyPasswords no + +# Change to yes to enable challenge-response passwords (beware issues with +# some PAM modules and threads) +ChallengeResponseAuthentication no + +# Change to no to disable tunnelled clear text passwords +PasswordAuthentication {{ password_authentication }} + +# Kerberos options +KerberosAuthentication no +KerberosGetAFSToken no +KerberosOrLocalPasswd no +KerberosTicketCleanup yes + +# GSSAPI options +GSSAPIAuthentication no +GSSAPICleanupCredentials yes + +X11Forwarding {{ allow_x11_forwarding }} +X11DisplayOffset 10 +X11UseLocalhost yes +GatewayPorts no +PrintMotd {{ print_motd }} +PrintLastLog {{ print_last_log }} +TCPKeepAlive no +UseLogin no + +ClientAliveInterval {{ client_alive_interval }} +ClientAliveCountMax {{ client_alive_count }} +AllowTcpForwarding {{ allow_tcp_forwarding }} +AllowAgentForwarding {{ allow_agent_forwarding }} + +MaxStartups 10:30:100 +#Banner /etc/issue.net + +# Allow client to pass locale environment variables +AcceptEnv LANG LC_* + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the ChallengeResponseAuthentication and +# PasswordAuthentication. Depending on your PAM configuration, +# PAM authentication via ChallengeResponseAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# PAM authentication, then enable this but set PasswordAuthentication +# and ChallengeResponseAuthentication to 'no'. +UsePAM {{ use_pam }} + +{% if deny_users -%} +DenyUsers {{ deny_users }} +{% endif -%} +{% if allow_users -%} +AllowUsers {{ allow_users }} +{% endif -%} +{% if deny_groups -%} +DenyGroups {{ deny_groups }} +{% endif -%} +{% if allow_groups -%} +AllowGroups allow_groups +{% endif -%} +UseDNS {{ use_dns }} +MaxAuthTries {{ max_auth_tries }} +MaxSessions {{ max_sessions }} + +{% if sftp_enable -%} +# Configuration, in case SFTP is used +## override default of no subsystems +## Subsystem sftp /opt/app/openssh5/libexec/sftp-server +Subsystem sftp internal-sftp -l VERBOSE + +## These lines must appear at the *end* of sshd_config +Match Group {{ sftp_group }} +ForceCommand internal-sftp -l VERBOSE +ChrootDirectory {{ sftp_chroot }} +{% else -%} +# Configuration, in case SFTP is used +## override default of no subsystems +## Subsystem sftp /opt/app/openssh5/libexec/sftp-server +## These lines must appear at the *end* of sshd_config +Match Group sftponly +ForceCommand internal-sftp -l VERBOSE +ChrootDirectory /sftpchroot/home/%u +{% endif %} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py new file mode 100644 index 00000000..d2ab7dc9 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py @@ -0,0 +1,71 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) + +try: + from jinja2 import FileSystemLoader, Environment +except ImportError: + from charmhelpers.fetch import apt_install + from charmhelpers.fetch import apt_update + apt_update(fatal=True) + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment + + +# NOTE: function separated from main rendering code to facilitate easier +# mocking in unit tests. +def write(path, data): + with open(path, 'wb') as out: + out.write(data) + + +def get_template_path(template_dir, path): + """Returns the template file which would be used to render the path. + + The path to the template file is returned. + :param template_dir: the directory the templates are located in + :param path: the file path to be written to. + :returns: path to the template file + """ + return os.path.join(template_dir, os.path.basename(path)) + + +def render_and_write(template_dir, path, context): + """Renders the specified template into the file. + + :param template_dir: the directory to load the template from + :param path: the path to write the templated contents to + :param context: the parameters to pass to the rendering engine + """ + env = Environment(loader=FileSystemLoader(template_dir)) + template_file = os.path.basename(path) + template = env.get_template(template_file) + log('Rendering from template: %s' % template.name, level=DEBUG) + rendered_content = template.render(context) + if not rendered_content: + log("Render returned None - skipping '%s'" % path, + level=WARNING) + return + + write(path, rendered_content.encode('utf-8').strip()) + log('Wrote template %s' % path, level=DEBUG) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py new file mode 100644 index 00000000..a6743a4d --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py @@ -0,0 +1,157 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import glob +import grp +import os +import pwd +import six +import yaml + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + WARNING, + ERROR, +) + + +# Global settings cache. Since each hook fire entails a fresh module import it +# is safe to hold this in memory and not risk missing config changes (since +# they will result in a new hook fire and thus re-import). +__SETTINGS__ = {} + + +def _get_defaults(modules): + """Load the default config for the provided modules. + + :param modules: stack modules config defaults to lookup. + :returns: modules default config dictionary. + """ + default = os.path.join(os.path.dirname(__file__), + 'defaults/%s.yaml' % (modules)) + return yaml.safe_load(open(default)) + + +def _get_schema(modules): + """Load the config schema for the provided modules. + + NOTE: this schema is intended to have 1-1 relationship with they keys in + the default config and is used a means to verify valid overrides provided + by the user. + + :param modules: stack modules config schema to lookup. + :returns: modules default schema dictionary. + """ + schema = os.path.join(os.path.dirname(__file__), + 'defaults/%s.yaml.schema' % (modules)) + return yaml.safe_load(open(schema)) + + +def _get_user_provided_overrides(modules): + """Load user-provided config overrides. + + :param modules: stack modules to lookup in user overrides yaml file. + :returns: overrides dictionary. + """ + overrides = os.path.join(os.environ['JUJU_CHARM_DIR'], + 'hardening.yaml') + if os.path.exists(overrides): + log("Found user-provided config overrides file '%s'" % + (overrides), level=DEBUG) + settings = yaml.safe_load(open(overrides)) + if settings and settings.get(modules): + log("Applying '%s' overrides" % (modules), level=DEBUG) + return settings.get(modules) + + log("No overrides found for '%s'" % (modules), level=DEBUG) + else: + log("No hardening config overrides file '%s' found in charm " + "root dir" % (overrides), level=DEBUG) + + return {} + + +def _apply_overrides(settings, overrides, schema): + """Get overrides config overlayed onto modules defaults. + + :param modules: require stack modules config. + :returns: dictionary of modules config with user overrides applied. + """ + if overrides: + for k, v in six.iteritems(overrides): + if k in schema: + if schema[k] is None: + settings[k] = v + elif type(schema[k]) is dict: + settings[k] = _apply_overrides(settings[k], overrides[k], + schema[k]) + else: + raise Exception("Unexpected type found in schema '%s'" % + type(schema[k]), level=ERROR) + else: + log("Unknown override key '%s' - ignoring" % (k), level=INFO) + + return settings + + +def get_settings(modules): + global __SETTINGS__ + if modules in __SETTINGS__: + return __SETTINGS__[modules] + + schema = _get_schema(modules) + settings = _get_defaults(modules) + overrides = _get_user_provided_overrides(modules) + __SETTINGS__[modules] = _apply_overrides(settings, overrides, schema) + return __SETTINGS__[modules] + + +def ensure_permissions(path, user, group, permissions, maxdepth=-1): + """Ensure permissions for path. + + If path is a file, apply to file and return. If path is a directory, + apply recursively (if required) to directory contents and return. + + :param user: user name + :param group: group name + :param permissions: octal permissions + :param maxdepth: maximum recursion depth. A negative maxdepth allows + infinite recursion and maxdepth=0 means no recursion. + :returns: None + """ + if not os.path.exists(path): + log("File '%s' does not exist - cannot set permissions" % (path), + level=WARNING) + return + + _user = pwd.getpwnam(user) + os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid) + os.chmod(path, permissions) + + if maxdepth == 0: + log("Max recursion depth reached - skipping further recursion", + level=DEBUG) + return + elif maxdepth > 0: + maxdepth -= 1 + + if os.path.isdir(path): + contents = glob.glob("%s/*" % (path)) + for c in contents: + ensure_permissions(c, user=user, group=group, + permissions=permissions, maxdepth=maxdepth) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken index 0b6da25c..5dcebe7c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken @@ -1,20 +1,12 @@ {% if auth_host -%} -{% if api_version == '3' -%} [keystone_authtoken] -auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }} +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +auth_plugin = password +project_domain_id = default +user_domain_id = default project_name = {{ admin_tenant_name }} username = {{ admin_user }} password = {{ admin_password }} -project_domain_name = default -user_domain_name = default -auth_plugin = password -{% else -%} -[keystone_authtoken] -identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }} -auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} -admin_tenant_name = {{ admin_tenant_name }} -admin_user = {{ admin_user }} -admin_password = {{ admin_password }} signing_dir = {{ signing_dir }} {% endif -%} -{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy new file mode 100644 index 00000000..9356b2be --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy @@ -0,0 +1,10 @@ +{% if auth_host -%} +[keystone_authtoken] +# Juno specific config (Bug #1557223) +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} +identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +admin_tenant_name = {{ admin_tenant_name }} +admin_user = {{ admin_user }} +admin_password = {{ admin_password }} +signing_dir = {{ signing_dir }} +{% endif -%} diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 03aecfcb..ab0bfcb0 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -71,6 +71,7 @@ services, ) from charmhelpers.contrib.charmsupport import nrpe +from charmhelpers.contrib.hardening.harden import harden hooks = Hooks() CONFIGS = register_configs() @@ -125,6 +126,7 @@ def install_packages(): @hooks.hook('install.real') +@harden() def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() @@ -243,6 +245,7 @@ def setup_keystone_certs(unit=None, rid=None): 'config-changed') @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw'], '/etc/haproxy/haproxy.cfg': ['haproxy']}) +@harden() def config_changed(): install_packages() @@ -449,6 +452,12 @@ def update_nrpe_config(): nrpe_setup.write() +@hooks.hook('update-status') +@harden() +def update_status(): + log('Updating status.') + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-radosgw/hooks/update-status b/ceph-radosgw/hooks/update-status new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/update-status @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index 2591a9b1..3e159039 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -782,15 +782,20 @@ def get_uuid_epoch_stamp(self): # amulet juju action helpers: def run_action(self, unit_sentry, action, - _check_output=subprocess.check_output): + _check_output=subprocess.check_output, + params=None): """Run the named action on a given unit sentry. + params a dict of parameters to use _check_output parameter is used for dependency injection. @return action_id. """ unit_id = unit_sentry.info["unit_name"] command = ["juju", "action", "do", "--format=json", unit_id, action] + if params is not None: + for key, value in params.iteritems(): + command.append("{}={}".format(key, value)) self.log.info("Running command: %s\n" % " ".join(command)) output = _check_output(command, universal_newlines=True) data = json.loads(output) diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 737ee0d3..8e7607ec 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -1,3 +1,5 @@ +import sys + from mock import ( call, patch, @@ -9,6 +11,13 @@ ) from charmhelpers.contrib.openstack.ip import PUBLIC +# python-apt is not installed as part of test-requirements but is imported by +# some charmhelpers modules so create a fake import. +mock_apt = MagicMock() +sys.modules['apt'] = mock_apt +mock_apt.apt_pkg = MagicMock() + + dnsmock = MagicMock() modules = { 'dns': dnsmock, @@ -17,9 +26,12 @@ module_patcher = patch.dict('sys.modules', modules) module_patcher.start() -with patch('charmhelpers.fetch.apt_install'): - with patch('utils.register_configs'): - import hooks as ceph_hooks +with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + with patch('charmhelpers.fetch.apt_install'): + with patch('utils.register_configs'): + import hooks as ceph_hooks TO_PATCH = [ 'CONFIGS', From 68232b66ab0ce475444978e45d9b26ff08b1924d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 21 Mar 2016 16:51:09 -0400 Subject: [PATCH 1078/2699] Merge ceph charm into ceph-mon Squashed commit of the following: commit 9b832d9391f9fea2d1491d01da6101585930fc75 Merge: 43df712 7b36210 Author: Chris MacNaughton Date: Mon Mar 21 16:40:54 2016 -0400 Merge branch 'master' of github.com:openstack/charm-ceph into charm-ceph-mon Change-Id: I42cfe6f1e5887627981f8ce4beff164803cc3957 commit 7b36210bac5bef3bacae2614995e123ef926453f Author: Chris Holcombe Date: Fri Mar 18 15:37:06 2016 -0700 Add ceph-osd to ceph This change adds ceph-osd back into ceph for amulet testing. Change-Id: Ice4aaf7739e8c839189313d3f6175a834cf64219 commit e87e0b7bd22fe5ccae2aafcf6bd30f145405e01b Author: Ryan Beisner Date: Wed Mar 16 17:33:48 2016 +0000 Update amulet test to include a non-existent osd-devices value The osd-devices charm config option is a whitelist, and the charm needs to gracefully handle items in that whitelist which may not exist. Change-Id: I5f9c6c1e4519fd671d6d36b415c9c8f763495dad commit ffce15d52333de4063d04b808cfbca5d890fb996 Merge: fe8bf6e 9614896 Author: Jenkins Date: Wed Mar 16 17:45:25 2016 +0000 Merge "Revert "Make 'blocked' status when node have no storage device"" commit 961489609d85851bd63c6825339a296bdf74e320 Author: Chris Holcombe Date: Wed Mar 16 16:55:02 2016 +0000 Revert "Make 'blocked' status when node have no storage device" This reverts commit fc04dd0fff33639b812627d04645134dd7d4d3de. Change-Id: I9efbf623fc9aa6096725a15e53df426739ac16ff commit fe8bf6e4a5cb466a5efc6403c215e7aece2c6b9c Author: Billy Olsen Date: Tue Mar 15 20:08:20 2016 -0700 Use tox in Makefile targets Modify the Makefile to point at the appropriate tox targets so that tox and Make output can be equivalent. This involves mapping the lint target to the pep8 target and the test target to the py27 target. Change-Id: I99761d2fdf120bacff58d0aa5c2e584382c2e72b commit fc04dd0fff33639b812627d04645134dd7d4d3de Author: Seyeong Kim Date: Fri Mar 11 06:07:52 2016 +0000 Make 'blocked' status when node have no storage device Currently there is an msg for no storage status on ceph node. But it doesn't make this charm state 'blocked'. is_storage_fine function has been created to check storage devices on ceph_hooks.py and using it on assess_status. Change-Id: I790fde0280060fa220ee83de2ad2319ac2c77230 Closes-Bug: lp1424510 commit a7c5e85c408ab8446a18cc6761b1d0b292641ea7 Author: Ryan Beisner Date: Fri Mar 4 14:36:38 2016 +0000 Enable Xenial-Mitaka amulet test target. Change-Id: I0c386fc0c052cc1ac52c0a30f7a39fa914a61100 commit e80c5097c26ac4eb200a289daa272d5c7ac82539 Author: uoscibot Date: Mon Feb 29 10:45:49 2016 +0000 Adapt imports and metadata for github move commit 391ed288fc763b69f0cd92459f236e7581a5f244 Merge: 78250bd 6228ea2 Author: Edward Hope-Morley Date: Thu Feb 25 13:34:27 2016 -0500 [hopem,r=] Support multiple l3 segments. Closes-Bug: 1523871 commit 6228ea2a8fa578c3c6b24b59f621e6e1026a7668 Merge: 6159390 78250bd Author: Edward Hope-Morley Date: Thu Feb 25 09:29:46 2016 -0500 sync /next commit 78250bd65c861adcb321f1c634def29fcfdaa8a9 Author: James Page Date: Wed Feb 24 21:53:28 2016 +0000 Add gitreview prior to migration to openstack commit 61593905939359ba72768ccb8f1a450a571c1d24 Author: Edward Hope-Morley Date: Wed Feb 24 15:56:20 2016 -0500 only use fallback for get_public_addr() if networks not provided in config commit 34841b0aea85b3d5693a5336dbf956a406414474 Merge: 08d1cbc 092368d Author: James Page Date: Wed Feb 24 14:22:20 2016 +0000 Add actions to support configuration of erasure coded pools. commit 092368d646d4e02b2d2ac08026b6cbf2c94a4042 Merge: de98010 08d1cbc Author: Chris Holcombe Date: Tue Feb 23 08:19:56 2016 -0800 Merge upstream commit 08d1cbcdc943493a556e0187d2b3e6fbe83b69e3 Merge: 2d4ff89 414e519 Author: James Page Date: Tue Feb 23 09:49:50 2016 +0000 Fix amulet tests for nova-compute changes. commit 414e5195c939a99adcaf79e27eb057c07c7f4761 Author: Edward Hope-Morley Date: Mon Feb 22 15:21:00 2016 -0500 fix amulet commit e99e991be21c6d98fc670bcafa30684c0ba4d5e0 Author: Edward Hope-Morley Date: Mon Feb 22 12:56:00 2016 -0500 fixup commit de98010f6f8d81e63d47ac03d33aa40bd870c7ea Author: Chris Holcombe Date: Mon Feb 22 08:05:32 2016 -0800 charmhelpers sync commit 2d4ff89e4bba2e93e08a6dd00bc2367e90b708fe Merge: f16e3fa f98627c Author: Liam Young Date: Mon Feb 22 09:26:38 2016 +0000 [james-page, r=gnuoy] Add configuration option for toggling use of direct io for OSD journals commit f3803cb60d55154e35ac2294170b27fb348141b3 Author: Chris Holcombe Date: Fri Feb 19 08:11:18 2016 -0800 Change /usr/bin/python2.7 to /usr/bin/python commit 612ba454c4263d9bfc672fe168a55c2f01599d70 Merge: c3d20a0 f16e3fa Author: Chris Holcombe Date: Thu Feb 18 17:16:55 2016 -0800 Merge upstream and resolve conflicts with actions and actions.yaml commit c3d20a0eb67918d11585851a7b5df55ce0290392 Author: Chris Holcombe Date: Thu Feb 18 17:10:56 2016 -0800 Fix up the niggles and provide feedback to the action user as to why something failed commit ea5cc48ccbb5d6515703bd5c93c13b2147972cd1 Author: Edward Hope-Morley Date: Thu Feb 18 17:42:05 2016 +0000 more commit f58dd864eac130a6bc20b46c1495d7fa34a54894 Author: Edward Hope-Morley Date: Thu Feb 18 17:09:52 2016 +0000 restore sanity commit 32631ccde309040b92ba76ecc12b16bad953f486 Author: Edward Hope-Morley Date: Thu Feb 18 11:40:09 2016 +0000 post-review fixes commit 7ada8f0de65d397648d041fae20ed21b3f38bd15 Author: Edward Hope-Morley Date: Thu Feb 18 11:36:46 2016 +0000 post-review fixes commit f16e3fac5240133c1c7dfd406caacd21b364532a Merge: a0ffb8b 7709b7d Author: James Page Date: Thu Feb 18 11:02:17 2016 +0000 Add pause/resume cluster health actions Add actions to pause and resume cluster health monitoring within ceph for all osd devices. This will ensure that no rebalancing is done whilst maintenance actions are happening within the cluster. commit a0ffb8bf97c9cf3c19d17090c96f2ea60c89da65 Merge: 65439ba 531b40d Author: James Page Date: Thu Feb 18 10:38:53 2016 +0000 Wait for quorom and query the right unit remote_unit when not in radosgw context commit 65439ba7dc3acf494c9a8d11e2cdd274d144b485 Merge: 5e77170 afd390b Author: James Page Date: Wed Feb 17 11:28:44 2016 +0000 Update test target definitions; Wait for unit status. commit 531b40d9b2d216b467cca59d7649ab5bb4577b3d Author: Liam Young Date: Wed Feb 17 10:15:37 2016 +0000 Wait for quorom and query the right unit remote_unit when not in radosgw context commit 5e77170f378be92a3e2e8de3c06dad158b4a14ca Author: James Page Date: Tue Feb 16 06:59:17 2016 +0000 Tidy tox targets commit 732d8e11cd5058e680a5982bce77648952c8532f Author: Chris Holcombe Date: Fri Feb 12 14:17:34 2016 -0800 Used a list as an integer. I meant to use the size of the list commit afd390b3ed4212883a02ca971e5613246c3ae6a8 Author: Ryan Beisner Date: Fri Feb 12 21:24:20 2016 +0000 No need to not wait for nonexistent nrpe commit 9721ce8006720d24b8e4133fbbb8a01d989a71c8 Author: Ryan Beisner Date: Fri Feb 12 21:02:36 2016 +0000 Disable Xenial test re: pending lp1537155 commit d12e2658f5b5e6c38b98ae986134f83df2e0a380 Author: Ryan Beisner Date: Fri Feb 12 20:57:08 2016 +0000 Update test target definitions; Wait for unit status. commit 7709b7d5385757fc6d8fe48fa7646efcdb77564a Author: Chris MacNaughton Date: Fri Feb 12 08:26:13 2016 -0500 rename actions commit 2c945523486227dd1c58a1c1a76a779d9c131a71 Merge: 7271408 27d5d4b Author: James Page Date: Fri Feb 12 12:34:20 2016 +0000 Resolve symlinks in get_devices(). commit 7edce1dd489a4718a150f7f38ffd366855e49828 Author: Edward Hope-Morley Date: Wed Feb 10 15:20:52 2016 +0000 [hopem,r=] Support multiple l3 segments. Closes-Bug: 1523871 commit 27d5d4b8bb0fd61a3910dad1bdf46adc2b476649 Author: Bjorn Tillenius Date: Tue Feb 2 19:01:53 2016 +0200 Lint. commit 6980d3a3418ba512e65a79a62b140b238d54a17b Author: Bjorn Tillenius Date: Tue Feb 2 17:34:19 2016 +0200 Resolve symlinks in get_devices(). commit f98627c1c163d702ae1142a6153801073d57280c Merge: 4f0dc6d 7271408 Author: James Page Date: Sat Jan 30 15:45:01 2016 +0100 rebase commit eaa365a180e8eda88e6ef9f1a6c975a0b780dee5 Author: Chris Holcombe Date: Fri Jan 22 15:21:45 2016 -0800 Clean up another lint error commit 477cdc96fbe124509995a02c358c24c64451c9e4 Author: Chris Holcombe Date: Fri Jan 22 15:04:27 2016 -0800 Patching up the other unit tests to passing status commit faa7b3ad95ebed02718ff58b3e3203b7d59be709 Author: Chris MacNaughton Date: Fri Jan 22 16:42:58 2016 -0500 remove regex commit 1e3b2f5dd409a02399735aa2aeb5e78d18ea2240 Author: Chris MacNaughton Date: Fri Jan 22 16:10:15 2016 -0500 lint fix commit 620209aeb47900430f039eb2e65bfe00db672e32 Author: Chris MacNaughton Date: Fri Jan 22 16:05:15 2016 -0500 use search instead of match commit 2f47939fa84c43c485042325a925d72797df6480 Author: Chris MacNaughton Date: Fri Jan 22 15:16:22 2016 -0500 fix line length commit f203a5bdfc12a2a99e3695840f16182e037f1df1 Author: Chris MacNaughton Date: Fri Jan 22 15:02:10 2016 -0500 modify regex to not care about order commit 706b272fc91d432921750b3af09689361f4b8bb9 Author: Chris MacNaughton Date: Fri Jan 22 14:16:46 2016 -0500 try with sleeping commit 66d6952a65ceb5c8858f262daf127f96ed03ea81 Merge: e446a77 7271408 Author: Chris Holcombe Date: Fri Jan 22 10:46:50 2016 -0800 Merge upstream and resolve conflicts commit fc714c96f40bac9fb89108cd56962343472f63cf Author: Chris MacNaughton Date: Fri Jan 22 11:10:34 2016 -0500 fix variable name commit 8cb53237c6588a00d86dcc0a564d18eb7cd751ae Author: Chris MacNaughton Date: Fri Jan 22 10:47:26 2016 -0500 update to use correct(?) commands commit b762e9842ca335845fe3a442dfdde838e5246b3b Author: Chris MacNaughton Date: Fri Jan 22 08:01:03 2016 -0500 update tests.yaml commit e446a7731cbe377f30c88bb99083745ba95caa4e Author: Chris Holcombe Date: Thu Jan 21 14:19:53 2016 -0800 Clean up lint warnings. Also added a few more mock unit tests commit 32ff93e8d0166b2346c422cbb9cd53bc4f805256 Author: Chris Holcombe Date: Thu Jan 21 09:38:47 2016 -0800 Adding a unit test file for ceph_ops commit 4f0dc6d8b76b8545453293b2c69e2d6a164db10e Author: James Page Date: Mon Jan 18 16:39:49 2016 +0000 Add configuration option for toggling use of direct io for OSD journals commit 1977cdbde1d0fa7ad57baa07d97f477143d54787 Author: Chris Holcombe Date: Mon Jan 18 08:07:35 2016 -0800 Add actions to lint. Change actions.yaml to use enum and also change underscores to dashes. Log action_fail in addition to exiting -1. Merge v2 requests with v1 requests since this does not break backwards compatibility. Add unit tests. Modify tox.ini to include actions. . commit 3f0e16bcc483952e340fa89505011b7a115ff421 Author: Chris MacNaughton Date: Fri Jan 15 16:45:00 2016 -0500 fix version commit c665092be6f9d07f45a0b9baf2e0f128e4ecdc37 Author: Chris MacNaughton Date: Fri Jan 15 16:20:27 2016 -0500 updating tests commit 80de4d7256efbbc6c2ab7cdfcb1ab292668be607 Author: Chris MacNaughton Date: Thu Jan 14 13:19:10 2016 -0500 update readme commit 44365d58785e9ba63179d092b875c2029024aa8b Author: Chris MacNaughton Date: Thu Jan 14 13:17:19 2016 -0500 add pause/resume actions pause calls: `ceph osd set noout ceoh osd set nodown` resume calls: `ceph osd unset noout ceph osd unset nodown` commit bdd4e69e801e2178532e31216efe7e815b06f864 Author: Chris Holcombe Date: Tue Dec 15 04:54:21 2015 -0800 Missed a few typos commit 0158586bde1a1f878c0a046a97510b8b90a95ce9 Author: Chris Holcombe Date: Tue Dec 15 04:41:22 2015 -0800 lint errors commit 92ad78733279112bbba8e12d3fb19809ab9d0ff7 Author: Chris Holcombe Date: Mon Dec 14 17:44:22 2015 -0800 Actions are working and lightly tested. Need to create a more robust, automated test setup Change-Id: Ia18b19961dab66bb6c19ef7e9c421b2fec60fcc7 --- ceph-proxy/.gitignore | 4 +- ceph-proxy/.gitreview | 2 +- ceph-proxy/README.md | 8 +- ceph-proxy/actions.yaml | 175 ++++++++++++++ ceph-proxy/actions/__init__.py | 2 + ceph-proxy/actions/ceph_ops.py | 103 ++++++++ ceph-proxy/actions/create-erasure-profile | 89 +++++++ ceph-proxy/actions/create-pool | 38 +++ ceph-proxy/actions/delete-erasure-profile | 24 ++ ceph-proxy/actions/delete-pool | 28 +++ ceph-proxy/actions/get-erasure-profile | 18 ++ ceph-proxy/actions/list-erasure-profiles | 22 ++ ceph-proxy/actions/list-pools | 17 ++ ceph-proxy/actions/pool-get | 19 ++ ceph-proxy/actions/pool-set | 23 ++ ceph-proxy/actions/pool-statistics | 15 ++ ceph-proxy/actions/remove-pool-snapshot | 19 ++ ceph-proxy/actions/rename-pool | 16 ++ ceph-proxy/actions/set-pool-max-bytes | 16 ++ ceph-proxy/actions/snapshot-pool | 18 ++ ceph-proxy/config.yaml | 4 + ceph-proxy/hooks/ceph_broker.py | 280 ++++++++++++++++++---- ceph-proxy/hooks/ceph_hooks.py | 3 +- ceph-proxy/templates/ceph.conf | 1 - ceph-proxy/tests/018-basic-trusty-liberty | 0 ceph-proxy/tests/019-basic-trusty-mitaka | 0 ceph-proxy/tests/020-basic-wily-liberty | 0 ceph-proxy/tests/021-basic-xenial-mitaka | 0 ceph-proxy/tests/basic_deployment.py | 14 +- ceph-proxy/tests/tests.yaml | 1 + ceph-proxy/unit_tests/test_ceph_broker.py | 94 ++++---- ceph-proxy/unit_tests/test_ceph_ops.py | 217 +++++++++++++++++ ceph-proxy/unit_tests/test_status.py | 1 - 33 files changed, 1170 insertions(+), 101 deletions(-) create mode 100755 ceph-proxy/actions/ceph_ops.py create mode 100755 ceph-proxy/actions/create-erasure-profile create mode 100755 ceph-proxy/actions/create-pool create mode 100755 ceph-proxy/actions/delete-erasure-profile create mode 100755 ceph-proxy/actions/delete-pool create mode 100755 ceph-proxy/actions/get-erasure-profile create mode 100755 ceph-proxy/actions/list-erasure-profiles create mode 100755 ceph-proxy/actions/list-pools create mode 100755 ceph-proxy/actions/pool-get create mode 100755 ceph-proxy/actions/pool-set create mode 100755 ceph-proxy/actions/pool-statistics create mode 100755 ceph-proxy/actions/remove-pool-snapshot create mode 100755 ceph-proxy/actions/rename-pool create mode 100755 ceph-proxy/actions/set-pool-max-bytes create mode 100755 ceph-proxy/actions/snapshot-pool mode change 100644 => 100755 ceph-proxy/tests/018-basic-trusty-liberty mode change 100644 => 100755 ceph-proxy/tests/019-basic-trusty-mitaka mode change 100644 => 100755 ceph-proxy/tests/020-basic-wily-liberty mode change 100644 => 100755 ceph-proxy/tests/021-basic-xenial-mitaka create mode 100644 ceph-proxy/unit_tests/test_ceph_ops.py diff --git a/ceph-proxy/.gitignore b/ceph-proxy/.gitignore index 86e1f1b2..7d2fd1fb 100644 --- a/ceph-proxy/.gitignore +++ b/ceph-proxy/.gitignore @@ -1,8 +1,8 @@ bin +.idea .coverage .testrepository .tox *.sw[nop] .idea -*.pyc -.idea +*.pyc \ No newline at end of file diff --git a/ceph-proxy/.gitreview b/ceph-proxy/.gitreview index f13dc9dd..47000658 100644 --- a/ceph-proxy/.gitreview +++ b/ceph-proxy/.gitreview @@ -1,4 +1,4 @@ [gerrit] host=review.openstack.org port=29418 -project=openstack/charm-ceph-mon.git +project=openstack/charm-ceph-mon.git \ No newline at end of file diff --git a/ceph-proxy/README.md b/ceph-proxy/README.md index 103e57f7..a66ca060 100644 --- a/ceph-proxy/README.md +++ b/ceph-proxy/README.md @@ -9,15 +9,15 @@ juju # Usage The ceph charm has two pieces of mandatory configuration for which no defaults -are provided. You _must_ set these configuration options before deployment or the charm will not work: +are provided. You _must_ set these configuration options before deployment or the charm will not work: fsid: uuid specific to a ceph cluster used to ensure that different clusters don't get mixed up - use `uuid` to generate one. - monitor-secret: + monitor-secret: a ceph generated key used by the daemons that manage to cluster - to control security. You can use the ceph-authtool command to + to control security. You can use the ceph-authtool command to generate one: ceph-authtool /dev/stdout --name=mon. --gen-key @@ -30,7 +30,7 @@ At a minimum you must provide a juju config file during initial deployment with the fsid and monitor-secret options (contents of cepy.yaml below): ceph: - fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 + fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== Boot things up by using: diff --git a/ceph-proxy/actions.yaml b/ceph-proxy/actions.yaml index a93054bb..3f8e5dfe 100644 --- a/ceph-proxy/actions.yaml +++ b/ceph-proxy/actions.yaml @@ -39,3 +39,178 @@ remove-cache-tier: as the hot pool required: [backer-pool, cache-pool] additionalProperties: false + +create-pool: + description: Creates a pool + params: + name: + type: string + description: The name of the pool + profile-name: + type: string + description: The crush profile to use for this pool. The ruleset must exist first. + pool-type: + type: string + default: "replicated" + enum: [replicated, erasure] + description: | + The pool type which may either be replicated to recover from lost OSDs by keeping multiple copies of the + objects or erasure to get a kind of generalized RAID5 capability. + replicas: + type: integer + default: 3 + description: | + For the replicated pool this is the number of replicas to store of each object. + erasure-profile-name: + type: string + default: default + description: | + The name of the erasure coding profile to use for this pool. Note this profile must exist + before calling create-pool + required: [name] + additionalProperties: false +create-erasure-profile: + description: Create a new erasure code profile to use on a pool. + params: + name: + type: string + description: The name of the profile + failure-domain: + type: string + default: host + enum: [chassis, datacenter, host, osd, pdu, pod, rack, region, room, root, row] + description: | + The failure-domain=host will create a CRUSH ruleset that ensures no two chunks are stored in the same host. + plugin: + type: string + default: "jerasure" + enum: [jerasure, isa, lrc, shec] + description: | + The erasure plugin to use for this profile. + See http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ for more details + data-chunks: + type: integer + default: 3 + description: | + The number of data chunks, i.e. the number of chunks in which the original object is divided. For instance + if K = 2 a 10KB object will be divided into K objects of 5KB each. + coding-chunks: + type: integer + default: 2 + description: | + The number of coding chunks, i.e. the number of additional chunks computed by the encoding functions. + If there are 2 coding chunks, it means 2 OSDs can be out without losing data. + locality-chunks: + type: integer + description: | + Group the coding and data chunks into sets of size locality. For instance, for k=4 and m=2, when locality=3 + two groups of three are created. Each set can be recovered without reading chunks from another set. + durability-estimator: + type: integer + description: | + The number of parity chunks each of which includes each data chunk in its calculation range. The number is used + as a durability estimator. For instance, if c=2, 2 OSDs can be down without losing data. + required: [name, data-chunks, coding-chunks] + additionalProperties: false +get-erasure-profile: + description: Display an erasure code profile. + params: + name: + type: string + description: The name of the profile + required: [name] + additionalProperties: false +delete-erasure-profile: + description: Deletes an erasure code profile. + params: + name: + type: string + description: The name of the profile + required: [name] + additionalProperties: false +list-erasure-profiles: + description: List the names of all erasure code profiles + additionalProperties: false +list-pools: + description: List your cluster’s pools + additionalProperties: false +set-pool-max-bytes: + description: Set pool quotas for the maximum number of bytes. + params: + max: + type: integer + description: The name of the pool + pool-name: + type: string + description: The name of the pool + required: [pool-name, max] + additionalProperties: false +delete-pool: + description: Deletes the named pool + params: + pool-name: + type: string + description: The name of the pool + required: [pool-name] + additionalProperties: false +rename-pool: + description: Rename a pool + params: + pool-name: + type: string + description: The name of the pool + new-name: + type: string + description: The new name of the pool + required: [pool-name, new-name] + additionalProperties: false +pool-statistics: + description: Show a pool’s utilization statistics + additionalProperties: false +snapshot-pool: + description: Snapshot a pool + params: + pool-name: + type: string + description: The name of the pool + snapshot-name: + type: string + description: The name of the snapshot + required: [snapshot-name, pool-name] + additionalProperties: false +remove-pool-snapshot: + description: Remove a pool snapshot + params: + pool-name: + type: string + description: The name of the pool + snapshot-name: + type: string + description: The name of the snapshot + required: [snapshot-name, pool-name] + additionalProperties: false +pool-set: + description: Set a value for the pool + params: + pool-name: + type: string + description: The pool to set this variable on. + key: + type: string + description: Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + value: + type: string + description: The value to set + required: [key, value, pool-name] + additionalProperties: false +pool-get: + description: Get a value for the pool + params: + pool-name: + type: string + description: The pool to get this variable from. + key: + type: string + description: Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#get-pool-values + required: [key, pool-name] + additionalProperties: false diff --git a/ceph-proxy/actions/__init__.py b/ceph-proxy/actions/__init__.py index 9847ec9e..ff2381cc 100644 --- a/ceph-proxy/actions/__init__.py +++ b/ceph-proxy/actions/__init__.py @@ -1 +1,3 @@ __author__ = 'chris' +import sys +sys.path.append('hooks') diff --git a/ceph-proxy/actions/ceph_ops.py b/ceph-proxy/actions/ceph_ops.py new file mode 100755 index 00000000..e70ebc7e --- /dev/null +++ b/ceph-proxy/actions/ceph_ops.py @@ -0,0 +1,103 @@ +__author__ = 'chris' +from subprocess import CalledProcessError, check_output +import sys + +sys.path.append('hooks') + +import rados +from charmhelpers.core.hookenv import log, action_get, action_fail +from charmhelpers.contrib.storage.linux.ceph import pool_set, \ + set_pool_quota, snapshot_pool, remove_pool_snapshot + + +# Connect to Ceph via Librados and return a connection +def connect(): + try: + cluster = rados.Rados(conffile='/etc/ceph/ceph.conf') + cluster.connect() + return cluster + except (rados.IOError, + rados.ObjectNotFound, + rados.NoData, + rados.NoSpace, + rados.PermissionError) as rados_error: + log("librados failed with error: {}".format(str(rados_error))) + + +def create_crush_rule(): + # Shell out + pass + + +def list_pools(): + try: + cluster = connect() + pool_list = cluster.list_pools() + cluster.shutdown() + return pool_list + except (rados.IOError, + rados.ObjectNotFound, + rados.NoData, + rados.NoSpace, + rados.PermissionError) as e: + action_fail(e.message) + + +def pool_get(): + key = action_get("key") + pool_name = action_get("pool_name") + try: + value = check_output(['ceph', 'osd', 'pool', 'get', pool_name, key]) + return value + except CalledProcessError as e: + action_fail(e.message) + + +def set_pool(): + key = action_get("key") + value = action_get("value") + pool_name = action_get("pool_name") + pool_set(service='ceph', pool_name=pool_name, key=key, value=value) + + +def pool_stats(): + try: + pool_name = action_get("pool-name") + cluster = connect() + ioctx = cluster.open_ioctx(pool_name) + stats = ioctx.get_stats() + ioctx.close() + cluster.shutdown() + return stats + except (rados.Error, + rados.IOError, + rados.ObjectNotFound, + rados.NoData, + rados.NoSpace, + rados.PermissionError) as e: + action_fail(e.message) + + +def delete_pool_snapshot(): + pool_name = action_get("pool-name") + snapshot_name = action_get("snapshot-name") + remove_pool_snapshot(service='ceph', + pool_name=pool_name, + snapshot_name=snapshot_name) + + +# Note only one or the other can be set +def set_pool_max_bytes(): + pool_name = action_get("pool-name") + max_bytes = action_get("max") + set_pool_quota(service='ceph', + pool_name=pool_name, + max_bytes=max_bytes) + + +def snapshot_ceph_pool(): + pool_name = action_get("pool-name") + snapshot_name = action_get("snapshot-name") + snapshot_pool(service='ceph', + pool_name=pool_name, + snapshot_name=snapshot_name) diff --git a/ceph-proxy/actions/create-erasure-profile b/ceph-proxy/actions/create-erasure-profile new file mode 100755 index 00000000..2b00b588 --- /dev/null +++ b/ceph-proxy/actions/create-erasure-profile @@ -0,0 +1,89 @@ +#!/usr/bin/python +from subprocess import CalledProcessError +import sys + +sys.path.append('hooks') + +from charmhelpers.contrib.storage.linux.ceph import create_erasure_profile +from charmhelpers.core.hookenv import action_get, log, action_fail + + +def make_erasure_profile(): + name = action_get("name") + plugin = action_get("plugin") + failure_domain = action_get("failure-domain") + + # jerasure requires k+m + # isa requires k+m + # local requires k+m+l + # shec requires k+m+c + + if plugin == "jerasure": + k = action_get("data-chunks") + m = action_get("coding-chunks") + try: + create_erasure_profile(service='admin', + erasure_plugin_name=plugin, + profile_name=name, + data_chunks=k, + coding_chunks=m, + failure_domain=failure_domain) + except CalledProcessError as e: + log(e) + action_fail("Create erasure profile failed with " + "message: {}".format(e.message)) + elif plugin == "isa": + k = action_get("data-chunks") + m = action_get("coding-chunks") + try: + create_erasure_profile(service='admin', + erasure_plugin_name=plugin, + profile_name=name, + data_chunks=k, + coding_chunks=m, + failure_domain=failure_domain) + except CalledProcessError as e: + log(e) + action_fail("Create erasure profile failed with " + "message: {}".format(e.message)) + elif plugin == "local": + k = action_get("data-chunks") + m = action_get("coding-chunks") + l = action_get("locality-chunks") + try: + create_erasure_profile(service='admin', + erasure_plugin_name=plugin, + profile_name=name, + data_chunks=k, + coding_chunks=m, + locality=l, + failure_domain=failure_domain) + except CalledProcessError as e: + log(e) + action_fail("Create erasure profile failed with " + "message: {}".format(e.message)) + elif plugin == "shec": + k = action_get("data-chunks") + m = action_get("coding-chunks") + c = action_get("durability-estimator") + try: + create_erasure_profile(service='admin', + erasure_plugin_name=plugin, + profile_name=name, + data_chunks=k, + coding_chunks=m, + durability_estimator=c, + failure_domain=failure_domain) + except CalledProcessError as e: + log(e) + action_fail("Create erasure profile failed with " + "message: {}".format(e.message)) + else: + # Unknown erasure plugin + action_fail("Unknown erasure-plugin type of {}. " + "Only jerasure, isa, local or shec is " + "allowed".format(plugin)) + + +if __name__ == '__main__': + make_erasure_profile() diff --git a/ceph-proxy/actions/create-pool b/ceph-proxy/actions/create-pool new file mode 100755 index 00000000..4d1d2148 --- /dev/null +++ b/ceph-proxy/actions/create-pool @@ -0,0 +1,38 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import CalledProcessError +from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.contrib.storage.linux.ceph import ErasurePool, ReplicatedPool + + +def create_pool(): + pool_name = action_get("name") + pool_type = action_get("pool-type") + try: + if pool_type == "replicated": + replicas = action_get("replicas") + replicated_pool = ReplicatedPool(name=pool_name, + service='admin', + replicas=replicas) + replicated_pool.create() + + elif pool_type == "erasure": + crush_profile_name = action_get("erasure-profile-name") + erasure_pool = ErasurePool(name=pool_name, + erasure_code_profile=crush_profile_name, + service='admin') + erasure_pool.create() + else: + log("Unknown pool type of {}. Only erasure or replicated is " + "allowed".format(pool_type)) + action_fail("Unknown pool type of {}. Only erasure or replicated " + "is allowed".format(pool_type)) + except CalledProcessError as e: + action_fail("Pool creation failed because of a failed process. " + "Ret Code: {} Message: {}".format(e.returncode, e.message)) + + +if __name__ == '__main__': + create_pool() diff --git a/ceph-proxy/actions/delete-erasure-profile b/ceph-proxy/actions/delete-erasure-profile new file mode 100755 index 00000000..075c410e --- /dev/null +++ b/ceph-proxy/actions/delete-erasure-profile @@ -0,0 +1,24 @@ +#!/usr/bin/python +from subprocess import CalledProcessError + +__author__ = 'chris' +import sys + +sys.path.append('hooks') + +from charmhelpers.contrib.storage.linux.ceph import remove_erasure_profile +from charmhelpers.core.hookenv import action_get, log, action_fail + + +def delete_erasure_profile(): + name = action_get("name") + + try: + remove_erasure_profile(service='admin', profile_name=name) + except CalledProcessError as e: + action_fail("Remove erasure profile failed with error: {}".format( + e.message)) + + +if __name__ == '__main__': + delete_erasure_profile() diff --git a/ceph-proxy/actions/delete-pool b/ceph-proxy/actions/delete-pool new file mode 100755 index 00000000..3d655076 --- /dev/null +++ b/ceph-proxy/actions/delete-pool @@ -0,0 +1,28 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') + +import rados +from ceph_ops import connect +from charmhelpers.core.hookenv import action_get, log, action_fail + + +def remove_pool(): + try: + pool_name = action_get("name") + cluster = connect() + log("Deleting pool: {}".format(pool_name)) + cluster.delete_pool(str(pool_name)) # Convert from unicode + cluster.shutdown() + except (rados.IOError, + rados.ObjectNotFound, + rados.NoData, + rados.NoSpace, + rados.PermissionError) as e: + log(e) + action_fail(e) + + +if __name__ == '__main__': + remove_pool() diff --git a/ceph-proxy/actions/get-erasure-profile b/ceph-proxy/actions/get-erasure-profile new file mode 100755 index 00000000..29ece59d --- /dev/null +++ b/ceph-proxy/actions/get-erasure-profile @@ -0,0 +1,18 @@ +#!/usr/bin/python +__author__ = 'chris' +import sys + +sys.path.append('hooks') + +from charmhelpers.contrib.storage.linux.ceph import get_erasure_profile +from charmhelpers.core.hookenv import action_get, action_set + + +def make_erasure_profile(): + name = action_get("name") + out = get_erasure_profile(service='admin', name=name) + action_set({'message': out}) + + +if __name__ == '__main__': + make_erasure_profile() diff --git a/ceph-proxy/actions/list-erasure-profiles b/ceph-proxy/actions/list-erasure-profiles new file mode 100755 index 00000000..cf6dfa09 --- /dev/null +++ b/ceph-proxy/actions/list-erasure-profiles @@ -0,0 +1,22 @@ +#!/usr/bin/python +__author__ = 'chris' +import sys +from subprocess import check_output, CalledProcessError + +sys.path.append('hooks') + +from charmhelpers.core.hookenv import action_get, log, action_set, action_fail + +if __name__ == '__main__': + name = action_get("name") + try: + out = check_output(['ceph', + '--id', 'admin', + 'osd', + 'erasure-code-profile', + 'ls']).decode('UTF-8') + action_set({'message': out}) + except CalledProcessError as e: + log(e) + action_fail("Listing erasure profiles failed with error: {}".format( + e.message)) diff --git a/ceph-proxy/actions/list-pools b/ceph-proxy/actions/list-pools new file mode 100755 index 00000000..102667cf --- /dev/null +++ b/ceph-proxy/actions/list-pools @@ -0,0 +1,17 @@ +#!/usr/bin/python +__author__ = 'chris' +import sys +from subprocess import check_output, CalledProcessError + +sys.path.append('hooks') + +from charmhelpers.core.hookenv import log, action_set, action_fail + +if __name__ == '__main__': + try: + out = check_output(['ceph', '--id', 'admin', + 'osd', 'lspools']).decode('UTF-8') + action_set({'message': out}) + except CalledProcessError as e: + log(e) + action_fail("List pools failed with error: {}".format(e.message)) diff --git a/ceph-proxy/actions/pool-get b/ceph-proxy/actions/pool-get new file mode 100755 index 00000000..e4f924b9 --- /dev/null +++ b/ceph-proxy/actions/pool-get @@ -0,0 +1,19 @@ +#!/usr/bin/python +__author__ = 'chris' +import sys +from subprocess import check_output, CalledProcessError + +sys.path.append('hooks') + +from charmhelpers.core.hookenv import log, action_set, action_get, action_fail + +if __name__ == '__main__': + name = action_get('pool-name') + key = action_get('key') + try: + out = check_output(['ceph', '--id', 'admin', + 'osd', 'pool', 'get', name, key]).decode('UTF-8') + action_set({'message': out}) + except CalledProcessError as e: + log(e) + action_fail("Pool get failed with message: {}".format(e.message)) diff --git a/ceph-proxy/actions/pool-set b/ceph-proxy/actions/pool-set new file mode 100755 index 00000000..1f6e13b8 --- /dev/null +++ b/ceph-proxy/actions/pool-set @@ -0,0 +1,23 @@ +#!/usr/bin/python +from subprocess import CalledProcessError +import sys + +sys.path.append('hooks') + +from charmhelpers.core.hookenv import action_get, log, action_fail +from ceph_broker import handle_set_pool_value + +if __name__ == '__main__': + name = action_get("pool-name") + key = action_get("key") + value = action_get("value") + request = {'name': name, + 'key': key, + 'value': value} + + try: + handle_set_pool_value(service='admin', request=request) + except CalledProcessError as e: + log(e.message) + action_fail("Setting pool key: {} and value: {} failed with " + "message: {}".format(key, value, e.message)) diff --git a/ceph-proxy/actions/pool-statistics b/ceph-proxy/actions/pool-statistics new file mode 100755 index 00000000..536c889a --- /dev/null +++ b/ceph-proxy/actions/pool-statistics @@ -0,0 +1,15 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import check_output, CalledProcessError +from charmhelpers.core.hookenv import log, action_set, action_fail + +if __name__ == '__main__': + try: + out = check_output(['ceph', '--id', 'admin', + 'df']).decode('UTF-8') + action_set({'message': out}) + except CalledProcessError as e: + log(e) + action_fail("ceph df failed with message: {}".format(e.message)) diff --git a/ceph-proxy/actions/remove-pool-snapshot b/ceph-proxy/actions/remove-pool-snapshot new file mode 100755 index 00000000..387849ea --- /dev/null +++ b/ceph-proxy/actions/remove-pool-snapshot @@ -0,0 +1,19 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import CalledProcessError +from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.contrib.storage.linux.ceph import remove_pool_snapshot + +if __name__ == '__main__': + name = action_get("pool-name") + snapname = action_get("snapshot-name") + try: + remove_pool_snapshot(service='admin', + pool_name=name, + snapshot_name=snapname) + except CalledProcessError as e: + log(e) + action_fail("Remove pool snapshot failed with message: {}".format( + e.message)) diff --git a/ceph-proxy/actions/rename-pool b/ceph-proxy/actions/rename-pool new file mode 100755 index 00000000..6fe088ec --- /dev/null +++ b/ceph-proxy/actions/rename-pool @@ -0,0 +1,16 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import CalledProcessError +from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.contrib.storage.linux.ceph import rename_pool + +if __name__ == '__main__': + name = action_get("pool-name") + new_name = action_get("new-name") + try: + rename_pool(service='admin', old_name=name, new_name=new_name) + except CalledProcessError as e: + log(e) + action_fail("Renaming pool failed with message: {}".format(e.message)) diff --git a/ceph-proxy/actions/set-pool-max-bytes b/ceph-proxy/actions/set-pool-max-bytes new file mode 100755 index 00000000..86360885 --- /dev/null +++ b/ceph-proxy/actions/set-pool-max-bytes @@ -0,0 +1,16 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import CalledProcessError +from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.contrib.storage.linux.ceph import set_pool_quota + +if __name__ == '__main__': + max_bytes = action_get("max") + name = action_get("pool-name") + try: + set_pool_quota(service='admin', pool_name=name, max_bytes=max_bytes) + except CalledProcessError as e: + log(e) + action_fail("Set pool quota failed with message: {}".format(e.message)) diff --git a/ceph-proxy/actions/snapshot-pool b/ceph-proxy/actions/snapshot-pool new file mode 100755 index 00000000..a02619bf --- /dev/null +++ b/ceph-proxy/actions/snapshot-pool @@ -0,0 +1,18 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import CalledProcessError +from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.contrib.storage.linux.ceph import snapshot_pool + +if __name__ == '__main__': + name = action_get("pool-name") + snapname = action_get("snapshot-name") + try: + snapshot_pool(service='admin', + pool_name=name, + snapshot_name=snapname) + except CalledProcessError as e: + log(e) + action_fail("Snapshot pool failed with message: {}".format(e.message)) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 30abb8a6..c486a851 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -121,3 +121,7 @@ options: description: | A comma-separated list of nagios servicegroups. If left empty, the nagios_context will be used as the servicegroup + use-direct-io: + default: True + type: boolean + description: Configure use of direct IO for OSD journals. diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index bd23d435..d01d38ef 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -1,24 +1,71 @@ #!/usr/bin/python # -# Copyright 2014 Canonical Ltd. +# Copyright 2015 Canonical Ltd. # import json +from charmhelpers.contrib.storage.linux.ceph import validator, \ + erasure_profile_exists, ErasurePool, set_pool_quota, \ + pool_set, snapshot_pool, remove_pool_snapshot, create_erasure_profile, \ + ReplicatedPool, rename_pool, Pool, get_osds, pool_exists, delete_pool + from charmhelpers.core.hookenv import ( log, DEBUG, INFO, ERROR, ) -from charmhelpers.contrib.storage.linux.ceph import ( - create_pool, - get_osds, - pool_exists, -) + +# This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ +# This should do a decent job of preventing people from passing in bad values. +# It will give a useful error message +POOL_KEYS = { + # "Ceph Key Name": [Python type, [Valid Range]] + "size": [int], + "min_size": [int], + "crash_replay_interval": [int], + "pgp_num": [int], # = or < pg_num + "crush_ruleset": [int], + "hashpspool": [bool], + "nodelete": [bool], + "nopgchange": [bool], + "nosizechange": [bool], + "write_fadvise_dontneed": [bool], + "noscrub": [bool], + "nodeep-scrub": [bool], + "hit_set_type": [basestring, ["bloom", "explicit_hash", + "explicit_object"]], + "hit_set_count": [int, [1, 1]], + "hit_set_period": [int], + "hit_set_fpp": [float, [0.0, 1.0]], + "cache_target_dirty_ratio": [float], + "cache_target_dirty_high_ratio": [float], + "cache_target_full_ratio": [float], + "target_max_bytes": [int], + "target_max_objects": [int], + "cache_min_flush_age": [int], + "cache_min_evict_age": [int], + "fast_read": [bool], +} + +CEPH_BUCKET_TYPES = [ + 'osd', + 'host', + 'chassis', + 'rack', + 'row', + 'pdu', + 'pod', + 'room', + 'datacenter', + 'region', + 'root' +] def decode_req_encode_rsp(f): """Decorator to decode incoming requests and encode responses.""" + def decode_inner(req): return json.dumps(f(json.loads(req))) @@ -42,15 +89,14 @@ def process_requests(reqs): resp['request-id'] = request_id return resp - except Exception as exc: log(str(exc), level=ERROR) msg = ("Unexpected error occurred while processing requests: %s" % - (reqs)) + reqs) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - msg = ("Missing or invalid api version (%s)" % (version)) + msg = ("Missing or invalid api version (%s)" % version) resp = {'exit-code': 1, 'stderr': msg} if request_id: resp['request-id'] = request_id @@ -58,6 +104,156 @@ def process_requests(reqs): return resp +def handle_create_erasure_profile(request, service): + # "local" | "shec" or it defaults to "jerasure" + erasure_type = request.get('erasure-type') + # "host" | "rack" or it defaults to "host" # Any valid Ceph bucket + failure_domain = request.get('failure-domain') + name = request.get('name') + k = request.get('k') + m = request.get('m') + l = request.get('l') + + if failure_domain not in CEPH_BUCKET_TYPES: + msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + create_erasure_profile(service=service, erasure_plugin_name=erasure_type, + profile_name=name, failure_domain=failure_domain, + data_chunks=k, coding_chunks=m, locality=l) + + +def handle_erasure_pool(request, service): + pool_name = request.get('name') + erasure_profile = request.get('erasure-profile') + quota = request.get('max-bytes') + + if erasure_profile is None: + erasure_profile = "default-canonical" + + # Check for missing params + if pool_name is None: + msg = "Missing parameter. name is required for the pool" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds + if not erasure_profile_exists(service=service, name=erasure_profile): + # TODO: Fail and tell them to create the profile or default + msg = "erasure-profile {} does not exist. Please create it with: " \ + "create-erasure-profile".format(erasure_profile) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + pass + pool = ErasurePool(service=service, name=pool_name, + erasure_code_profile=erasure_profile) + # Ok make the erasure pool + if not pool_exists(service=service, name=pool_name): + log("Creating pool '%s' (erasure_profile=%s)" % (pool, + erasure_profile), + level=INFO) + pool.create() + + # Set a quota if requested + if quota is not None: + set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + + +def handle_replicated_pool(request, service): + pool_name = request.get('name') + replicas = request.get('replicas') + quota = request.get('max-bytes') + + # Optional params + pg_num = request.get('pg_num') + if pg_num: + # Cap pg_num to max allowed just in case. + osds = get_osds(service) + if osds: + pg_num = min(pg_num, (len(osds) * 100 // replicas)) + + # Check for missing params + if pool_name is None or replicas is None: + msg = "Missing parameter. name and replicas are required" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + pool = ReplicatedPool(service=service, + name=pool_name, + replicas=replicas, + pg_num=pg_num) + if not pool_exists(service=service, name=pool_name): + log("Creating pool '%s' (replicas=%s)" % (pool, replicas), + level=INFO) + pool.create() + else: + log("Pool '%s' already exists - skipping create" % pool, + level=DEBUG) + + # Set a quota if requested + if quota is not None: + set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + + +def handle_create_cache_tier(request, service): + # mode = "writeback" | "readonly" + storage_pool = request.get('cold-pool') + cache_pool = request.get('hot-pool') + cache_mode = request.get('mode') + + if cache_mode is None: + cache_mode = "writeback" + + # cache and storage pool must exist first + if not pool_exists(service=service, name=storage_pool) or not pool_exists( + service=service, name=cache_pool): + msg = "cold-pool: {} and hot-pool: {} must exist. Please create " \ + "them first".format(storage_pool, cache_pool) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + p = Pool(service=service, name=storage_pool) + p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) + + +def handle_remove_cache_tier(request, service): + storage_pool = request.get('cold-pool') + cache_pool = request.get('hot-pool') + # cache and storage pool must exist first + if not pool_exists(service=service, name=storage_pool) or not pool_exists( + service=service, name=cache_pool): + msg = "cold-pool: {} or hot-pool: {} doesn't exist. Not " \ + "deleting cache tier".format(storage_pool, cache_pool) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + pool = Pool(name=storage_pool, service=service) + pool.remove_cache_tier(cache_pool=cache_pool) + + +def handle_set_pool_value(request, service): + # Set arbitrary pool values + params = {'pool': request.get('name'), + 'key': request.get('key'), + 'value': request.get('value')} + if params['key'] not in POOL_KEYS: + msg = "Invalid key '%s'" % params['key'] + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Get the validation method + validator_params = POOL_KEYS[params['key']] + if len(validator_params) is 1: + # Validate that what the user passed is actually legal per Ceph's rules + validator(params['value'], validator_params[0]) + else: + # Validate that what the user passed is actually legal per Ceph's rules + validator(params['value'], validator_params[0], validator_params[1]) + # Set the value + pool_set(service=service, pool_name=params['pool'], key=params['key'], + value=params['value']) + + def process_requests_v1(reqs): """Process v1 requests. @@ -70,45 +266,45 @@ def process_requests_v1(reqs): log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) for req in reqs: op = req.get('op') - log("Processing op='%s'" % (op), level=DEBUG) + log("Processing op='%s'" % op, level=DEBUG) # Use admin client since we do not have other client key locations # setup to use them for these operations. svc = 'admin' if op == "create-pool": - params = {'pool': req.get('name'), - 'replicas': req.get('replicas')} - if not all(params.iteritems()): - msg = ("Missing parameter(s): %s" % - (' '.join([k for k in params.iterkeys() - if not params[k]]))) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - # Mandatory params - pool = params['pool'] - replicas = params['replicas'] - - # Optional params - pg_num = req.get('pg_num') - if pg_num: - # Cap pg_num to max allowed just in case. - osds = get_osds(svc) - if osds: - pg_num = min(pg_num, (len(osds) * 100 // replicas)) - - # Ensure string - pg_num = str(pg_num) - - if not pool_exists(service=svc, name=pool): - log("Creating pool '%s' (replicas=%s)" % (pool, replicas), - level=INFO) - create_pool(service=svc, name=pool, replicas=replicas, - pg_num=pg_num) + pool_type = req.get('pool-type') # "replicated" | "erasure" + + # Default to replicated if pool_type isn't given + if pool_type == 'erasure': + handle_erasure_pool(request=req, service=svc) else: - log("Pool '%s' already exists - skipping create" % (pool), - level=DEBUG) + handle_replicated_pool(request=req, service=svc) + elif op == "create-cache-tier": + handle_create_cache_tier(request=req, service=svc) + elif op == "remove-cache-tier": + handle_remove_cache_tier(request=req, service=svc) + elif op == "create-erasure-profile": + handle_create_erasure_profile(request=req, service=svc) + elif op == "delete-pool": + pool = req.get('name') + delete_pool(service=svc, name=pool) + elif op == "rename-pool": + old_name = req.get('name') + new_name = req.get('new-name') + rename_pool(service=svc, old_name=old_name, new_name=new_name) + elif op == "snapshot-pool": + pool = req.get('name') + snapshot_name = req.get('snapshot-name') + snapshot_pool(service=svc, pool_name=pool, + snapshot_name=snapshot_name) + elif op == "remove-pool-snapshot": + pool = req.get('name') + snapshot_name = req.get('snapshot-name') + remove_pool_snapshot(service=svc, pool_name=pool, + snapshot_name=snapshot_name) + elif op == "set-pool-value": + handle_set_pool_value(request=req, service=svc) else: - msg = "Unknown operation '%s'" % (op) + msg = "Unknown operation '%s'" % op log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 354c155c..385afdd7 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -54,7 +54,7 @@ from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( get_ipv6_addr, - format_ipv6_addr + format_ipv6_addr, ) from charmhelpers.core.sysctl import create as create_sysctl from charmhelpers.core.templating import render @@ -294,6 +294,7 @@ def emit_cephconf(): 'ceph_public_network': public_network, 'ceph_cluster_network': cluster_network, 'loglevel': config('loglevel'), + 'dio': str(config('use-direct-io')).lower(), } if config('prefer-ipv6'): diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index f64db7cb..631381bc 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -36,4 +36,3 @@ keyring = /var/lib/ceph/mon/$cluster-$id/keyring [mds] keyring = /var/lib/ceph/mds/$cluster-$id/keyring - diff --git a/ceph-proxy/tests/018-basic-trusty-liberty b/ceph-proxy/tests/018-basic-trusty-liberty old mode 100644 new mode 100755 diff --git a/ceph-proxy/tests/019-basic-trusty-mitaka b/ceph-proxy/tests/019-basic-trusty-mitaka old mode 100644 new mode 100755 diff --git a/ceph-proxy/tests/020-basic-wily-liberty b/ceph-proxy/tests/020-basic-wily-liberty old mode 100644 new mode 100755 diff --git a/ceph-proxy/tests/021-basic-xenial-mitaka b/ceph-proxy/tests/021-basic-xenial-mitaka old mode 100644 new mode 100755 diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 1b24e60b..63ddca40 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -3,6 +3,7 @@ import amulet import re import time + from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment ) @@ -30,6 +31,8 @@ def __init__(self, series=None, openstack=None, source=None, stable=False): u.log.info('Waiting on extended status checks...') exclude_services = ['mysql'] + + # Wait for deployment ready msgs, except exclusions self._auto_wait_for_status(exclude_services=exclude_services) self._initialize_tests() @@ -79,6 +82,9 @@ def _configure_services(self): 'admin-token': 'ubuntutesting'} mysql_config = {'dataset-size': '50%'} cinder_config = {'block-device': 'None', 'glance-api-version': '2'} + + # Include a non-existent device as osd-devices is a whitelist, + # and this will catch cases where proposals attempt to change that. ceph_config = { 'monitor-count': '3', 'auth-supported': 'none', @@ -198,7 +204,6 @@ def test_102_services(self): self.cinder_sentry: ['cinder-api', 'cinder-scheduler', 'cinder-volume'], - self.ceph_osd_sentry: ['ceph-osd-all'], } if self._get_openstack_release() < self.vivid_kilo: @@ -212,6 +217,13 @@ def test_102_services(self): services[self.ceph1_sentry] = ceph_services services[self.ceph2_sentry] = ceph_services + ceph_osd_services = [ + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) + ] + + services[self.ceph_osd_sentry] = ceph_osd_services + ret = u.validate_services_by_name(services) if ret: amulet.raise_status(amulet.FAIL, msg=ret) diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 4d17631b..49e721b3 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -19,3 +19,4 @@ packages: - python-novaclient - python-pika - python-swiftclient + - python-nose \ No newline at end of file diff --git a/ceph-proxy/unit_tests/test_ceph_broker.py b/ceph-proxy/unit_tests/test_ceph_broker.py index 8f08cdc7..b720d94a 100644 --- a/ceph-proxy/unit_tests/test_ceph_broker.py +++ b/ceph-proxy/unit_tests/test_ceph_broker.py @@ -1,12 +1,12 @@ import json -import mock import unittest +import mock + import ceph_broker class CephBrokerTestCase(unittest.TestCase): - def setUp(self): super(CephBrokerTestCase, self).setUp() @@ -20,15 +20,15 @@ def test_process_requests_noop(self, mock_log): def test_process_requests_missing_api_version(self, mock_log): req = json.dumps({'ops': []}) rc = ceph_broker.process_requests(req) - self.assertEqual(json.loads(rc), {'exit-code': 1, - 'stderr': - ('Missing or invalid api version ' - '(None)')}) + self.assertEqual(json.loads(rc), { + 'exit-code': 1, + 'stderr': 'Missing or invalid api version (None)'}) @mock.patch('ceph_broker.log') def test_process_requests_invalid_api_version(self, mock_log): req = json.dumps({'api-version': 2, 'ops': []}) rc = ceph_broker.process_requests(req) + print "Return: %s" % rc self.assertEqual(json.loads(rc), {'exit-code': 1, 'stderr': 'Missing or invalid api version (2)'}) @@ -41,90 +41,88 @@ def test_process_requests_invalid(self, mock_log): {'exit-code': 1, 'stderr': "Unknown operation 'invalid_op'"}) - @mock.patch('ceph_broker.create_pool') - @mock.patch('ceph_broker.pool_exists') - @mock.patch('ceph_broker.log') - def test_process_requests_create_pool(self, mock_log, mock_pool_exists, - mock_create_pool): - mock_pool_exists.return_value = False - reqs = json.dumps({'api-version': 1, - 'ops': [{'op': 'create-pool', 'name': - 'foo', 'replicas': 3}]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_create_pool.assert_called_with(service='admin', name='foo', - replicas=3, pg_num=None) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.get_osds') - @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.ReplicatedPool') @mock.patch('ceph_broker.pool_exists') @mock.patch('ceph_broker.log') def test_process_requests_create_pool_w_pg_num(self, mock_log, mock_pool_exists, - mock_create_pool, + mock_replicated_pool, mock_get_osds): mock_get_osds.return_value = [0, 1, 2] mock_pool_exists.return_value = False reqs = json.dumps({'api-version': 1, - 'ops': [{'op': 'create-pool', 'name': - 'foo', 'replicas': 3, - 'pg_num': 100}]}) + 'ops': [{ + 'op': 'create-pool', + 'name': 'foo', + 'replicas': 3, + 'pg_num': 100}]}) rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_create_pool.assert_called_with(service='admin', name='foo', - replicas=3, pg_num='100') + mock_replicated_pool.assert_called_with(service='admin', name='foo', + replicas=3, pg_num=100) self.assertEqual(json.loads(rc), {'exit-code': 0}) @mock.patch('ceph_broker.get_osds') - @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.ReplicatedPool') @mock.patch('ceph_broker.pool_exists') @mock.patch('ceph_broker.log') def test_process_requests_create_pool_w_pg_num_capped(self, mock_log, mock_pool_exists, - mock_create_pool, + mock_replicated_pool, mock_get_osds): mock_get_osds.return_value = [0, 1, 2] mock_pool_exists.return_value = False reqs = json.dumps({'api-version': 1, - 'ops': [{'op': 'create-pool', 'name': - 'foo', 'replicas': 3, - 'pg_num': 300}]}) + 'ops': [{ + 'op': 'create-pool', + 'name': 'foo', + 'replicas': 3, + 'pg_num': 300}]}) rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_create_pool.assert_called_with(service='admin', name='foo', - replicas=3, pg_num='100') + mock_pool_exists.assert_called_with(service='admin', + name='foo') + mock_replicated_pool.assert_called_with(service='admin', name='foo', + replicas=3, pg_num=100) + self.assertEqual(json.loads(rc), {'exit-code': 0}) self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.ReplicatedPool') @mock.patch('ceph_broker.pool_exists') @mock.patch('ceph_broker.log') def test_process_requests_create_pool_exists(self, mock_log, mock_pool_exists, - mock_create_pool): + mock_replicated_pool): mock_pool_exists.return_value = True reqs = json.dumps({'api-version': 1, - 'ops': [{'op': 'create-pool', 'name': 'foo', + 'ops': [{'op': 'create-pool', + 'name': 'foo', 'replicas': 3}]}) rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', name='foo') - self.assertFalse(mock_create_pool.called) + mock_pool_exists.assert_called_with(service='admin', + name='foo') + self.assertFalse(mock_replicated_pool.create.called) self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.ReplicatedPool') @mock.patch('ceph_broker.pool_exists') @mock.patch('ceph_broker.log') - def test_process_requests_create_pool_rid(self, mock_log, mock_pool_exists, - mock_create_pool): + def test_process_requests_create_pool_rid(self, mock_log, + mock_pool_exists, + mock_replicated_pool): mock_pool_exists.return_value = False reqs = json.dumps({'api-version': 1, 'request-id': '1ef5aede', - 'ops': [{'op': 'create-pool', 'name': - 'foo', 'replicas': 3}]}) + 'ops': [{ + 'op': 'create-pool', + 'name': 'foo', + 'replicas': 3}]}) rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_create_pool.assert_called_with(service='admin', name='foo', - replicas=3, pg_num=None) + mock_replicated_pool.assert_called_with(service='admin', + name='foo', + pg_num=None, + replicas=3) self.assertEqual(json.loads(rc)['exit-code'], 0) self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') diff --git a/ceph-proxy/unit_tests/test_ceph_ops.py b/ceph-proxy/unit_tests/test_ceph_ops.py new file mode 100644 index 00000000..88e64c7d --- /dev/null +++ b/ceph-proxy/unit_tests/test_ceph_ops.py @@ -0,0 +1,217 @@ +__author__ = 'chris' + +import json +from hooks import ceph_broker + +import mock +import unittest + + +class TestCephOps(unittest.TestCase): + """ + @mock.patch('ceph_broker.log') + def test_connect(self, mock_broker): + self.fail() + """ + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.create_erasure_profile') + def test_create_erasure_profile(self, mock_create_erasure, mock_log): + req = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'create-erasure-profile', + 'name': 'foo', + 'erasure-type': 'jerasure', + 'failure-domain': 'rack', + 'k': 3, + 'm': 2, + }]}) + rc = ceph_broker.process_requests(req) + mock_create_erasure.assert_called_with(service='admin', + profile_name='foo', + coding_chunks=2, + data_chunks=3, + locality=None, + failure_domain='rack', + erasure_plugin_name='jerasure') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.pool_exists') + @mock.patch('hooks.ceph_broker.ReplicatedPool.create') + def test_process_requests_create_replicated_pool(self, + mock_replicated_pool, + mock_pool_exists, + mock_log): + mock_pool_exists.return_value = False + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'create-pool', + 'pool-type': 'replicated', + 'name': 'foo', + 'replicas': 3 + }]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_called_with(service='admin', name='foo') + mock_replicated_pool.assert_called_with() + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.delete_pool') + def test_process_requests_delete_pool(self, + mock_delete_pool, + mock_log): + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'delete-pool', + 'name': 'foo', + }]}) + rc = ceph_broker.process_requests(reqs) + mock_delete_pool.assert_called_with(service='admin', name='foo') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.pool_exists') + @mock.patch('hooks.ceph_broker.ErasurePool.create') + @mock.patch('hooks.ceph_broker.erasure_profile_exists') + def test_process_requests_create_erasure_pool(self, mock_profile_exists, + mock_erasure_pool, + mock_pool_exists, + mock_log): + mock_pool_exists.return_value = False + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'create-pool', + 'pool-type': 'erasure', + 'name': 'foo', + 'erasure-profile': 'default' + }]}) + rc = ceph_broker.process_requests(reqs) + mock_profile_exists.assert_called_with(service='admin', name='default') + mock_pool_exists.assert_called_with(service='admin', name='foo') + mock_erasure_pool.assert_called_with() + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.pool_exists') + @mock.patch('hooks.ceph_broker.Pool.add_cache_tier') + def test_process_requests_create_cache_tier(self, mock_pool, + mock_pool_exists, mock_log): + mock_pool_exists.return_value = True + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'create-cache-tier', + 'cold-pool': 'foo', + 'hot-pool': 'foo-ssd', + 'mode': 'writeback', + 'erasure-profile': 'default' + }]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_any_call(service='admin', name='foo') + mock_pool_exists.assert_any_call(service='admin', name='foo-ssd') + + mock_pool.assert_called_with(cache_pool='foo-ssd', mode='writeback') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.pool_exists') + @mock.patch('hooks.ceph_broker.Pool.remove_cache_tier') + def test_process_requests_remove_cache_tier(self, mock_pool, + mock_pool_exists, mock_log): + mock_pool_exists.return_value = True + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'remove-cache-tier', + 'hot-pool': 'foo-ssd', + }]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_any_call(service='admin', name='foo-ssd') + + mock_pool.assert_called_with(cache_pool='foo-ssd') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.snapshot_pool') + def test_snapshot_pool(self, mock_snapshot_pool, mock_log): + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'snapshot-pool', + 'name': 'foo', + 'snapshot-name': 'foo-snap1', + }]}) + rc = ceph_broker.process_requests(reqs) + mock_snapshot_pool.return_value = 1 + mock_snapshot_pool.assert_called_with(service='admin', + pool_name='foo', + snapshot_name='foo-snap1') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.rename_pool') + def test_rename_pool(self, mock_rename_pool, mock_log): + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'rename-pool', + 'name': 'foo', + 'new-name': 'foo2', + }]}) + rc = ceph_broker.process_requests(reqs) + mock_rename_pool.assert_called_with(service='admin', + old_name='foo', + new_name='foo2') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.remove_pool_snapshot') + def test_remove_pool_snapshot(self, mock_snapshot_pool, mock_broker): + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'remove-pool-snapshot', + 'name': 'foo', + 'snapshot-name': 'foo-snap1', + }]}) + rc = ceph_broker.process_requests(reqs) + mock_snapshot_pool.assert_called_with(service='admin', + pool_name='foo', + snapshot_name='foo-snap1') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.pool_set') + def test_set_pool_value(self, mock_set_pool, mock_broker): + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'set-pool-value', + 'name': 'foo', + 'key': 'size', + 'value': 3, + }]}) + rc = ceph_broker.process_requests(reqs) + mock_set_pool.assert_called_with(service='admin', + pool_name='foo', + key='size', + value=3) + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + def test_set_invalid_pool_value(self, mock_broker): + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'set-pool-value', + 'name': 'foo', + 'key': 'size', + 'value': 'abc', + }]}) + rc = ceph_broker.process_requests(reqs) + # self.assertRaises(AssertionError) + self.assertEqual(json.loads(rc)['exit-code'], 1) + + ''' + @mock.patch('ceph_broker.log') + def test_set_pool_max_bytes(self, mock_broker): + self.fail() + ''' + + +if __name__ == '__main__': + unittest.main() diff --git a/ceph-proxy/unit_tests/test_status.py b/ceph-proxy/unit_tests/test_status.py index c4330185..88625908 100644 --- a/ceph-proxy/unit_tests/test_status.py +++ b/ceph-proxy/unit_tests/test_status.py @@ -31,7 +31,6 @@ class ServiceStatusTestCase(test_utils.CharmTestCase): - def setUp(self): super(ServiceStatusTestCase, self).setUp(hooks, TO_PATCH) self.config.side_effect = self.test_config.get From 9bcfd09639c086c77f2ca29187b38051b740b3f5 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 21 Mar 2016 16:51:09 -0400 Subject: [PATCH 1079/2699] Merge ceph charm into ceph-mon Squashed commit of the following: commit 9b832d9391f9fea2d1491d01da6101585930fc75 Merge: 9157c2e 7b36210 Author: Chris MacNaughton Date: Mon Mar 21 16:40:54 2016 -0400 Merge branch 'master' of github.com:openstack/charm-ceph into charm-ceph-mon Change-Id: I42cfe6f1e5887627981f8ce4beff164803cc3957 commit 7b36210bac5bef3bacae2614995e123ef926453f Author: Chris Holcombe Date: Fri Mar 18 15:37:06 2016 -0700 Add ceph-osd to ceph This change adds ceph-osd back into ceph for amulet testing. Change-Id: Ice4aaf7739e8c839189313d3f6175a834cf64219 commit e87e0b7bd22fe5ccae2aafcf6bd30f145405e01b Author: Ryan Beisner Date: Wed Mar 16 17:33:48 2016 +0000 Update amulet test to include a non-existent osd-devices value The osd-devices charm config option is a whitelist, and the charm needs to gracefully handle items in that whitelist which may not exist. Change-Id: I5f9c6c1e4519fd671d6d36b415c9c8f763495dad commit ffce15d52333de4063d04b808cfbca5d890fb996 Merge: fe8bf6e 9614896 Author: Jenkins Date: Wed Mar 16 17:45:25 2016 +0000 Merge "Revert "Make 'blocked' status when node have no storage device"" commit 961489609d85851bd63c6825339a296bdf74e320 Author: Chris Holcombe Date: Wed Mar 16 16:55:02 2016 +0000 Revert "Make 'blocked' status when node have no storage device" This reverts commit fc04dd0fff33639b812627d04645134dd7d4d3de. Change-Id: I9efbf623fc9aa6096725a15e53df426739ac16ff commit fe8bf6e4a5cb466a5efc6403c215e7aece2c6b9c Author: Billy Olsen Date: Tue Mar 15 20:08:20 2016 -0700 Use tox in Makefile targets Modify the Makefile to point at the appropriate tox targets so that tox and Make output can be equivalent. This involves mapping the lint target to the pep8 target and the test target to the py27 target. Change-Id: I99761d2fdf120bacff58d0aa5c2e584382c2e72b commit fc04dd0fff33639b812627d04645134dd7d4d3de Author: Seyeong Kim Date: Fri Mar 11 06:07:52 2016 +0000 Make 'blocked' status when node have no storage device Currently there is an msg for no storage status on ceph node. But it doesn't make this charm state 'blocked'. is_storage_fine function has been created to check storage devices on ceph_hooks.py and using it on assess_status. Change-Id: I790fde0280060fa220ee83de2ad2319ac2c77230 Closes-Bug: lp1424510 commit a7c5e85c408ab8446a18cc6761b1d0b292641ea7 Author: Ryan Beisner Date: Fri Mar 4 14:36:38 2016 +0000 Enable Xenial-Mitaka amulet test target. Change-Id: I0c386fc0c052cc1ac52c0a30f7a39fa914a61100 commit e80c5097c26ac4eb200a289daa272d5c7ac82539 Author: uoscibot Date: Mon Feb 29 10:45:49 2016 +0000 Adapt imports and metadata for github move commit 391ed288fc763b69f0cd92459f236e7581a5f244 Merge: 78250bd 6228ea2 Author: Edward Hope-Morley Date: Thu Feb 25 13:34:27 2016 -0500 [hopem,r=] Support multiple l3 segments. Closes-Bug: 1523871 commit 6228ea2a8fa578c3c6b24b59f621e6e1026a7668 Merge: 6159390 78250bd Author: Edward Hope-Morley Date: Thu Feb 25 09:29:46 2016 -0500 sync /next commit 78250bd65c861adcb321f1c634def29fcfdaa8a9 Author: James Page Date: Wed Feb 24 21:53:28 2016 +0000 Add gitreview prior to migration to openstack commit 61593905939359ba72768ccb8f1a450a571c1d24 Author: Edward Hope-Morley Date: Wed Feb 24 15:56:20 2016 -0500 only use fallback for get_public_addr() if networks not provided in config commit 34841b0aea85b3d5693a5336dbf956a406414474 Merge: 08d1cbc 092368d Author: James Page Date: Wed Feb 24 14:22:20 2016 +0000 Add actions to support configuration of erasure coded pools. commit 092368d646d4e02b2d2ac08026b6cbf2c94a4042 Merge: de98010 08d1cbc Author: Chris Holcombe Date: Tue Feb 23 08:19:56 2016 -0800 Merge upstream commit 08d1cbcdc943493a556e0187d2b3e6fbe83b69e3 Merge: 2d4ff89 414e519 Author: James Page Date: Tue Feb 23 09:49:50 2016 +0000 Fix amulet tests for nova-compute changes. commit 414e5195c939a99adcaf79e27eb057c07c7f4761 Author: Edward Hope-Morley Date: Mon Feb 22 15:21:00 2016 -0500 fix amulet commit e99e991be21c6d98fc670bcafa30684c0ba4d5e0 Author: Edward Hope-Morley Date: Mon Feb 22 12:56:00 2016 -0500 fixup commit de98010f6f8d81e63d47ac03d33aa40bd870c7ea Author: Chris Holcombe Date: Mon Feb 22 08:05:32 2016 -0800 charmhelpers sync commit 2d4ff89e4bba2e93e08a6dd00bc2367e90b708fe Merge: f16e3fa f98627c Author: Liam Young Date: Mon Feb 22 09:26:38 2016 +0000 [james-page, r=gnuoy] Add configuration option for toggling use of direct io for OSD journals commit f3803cb60d55154e35ac2294170b27fb348141b3 Author: Chris Holcombe Date: Fri Feb 19 08:11:18 2016 -0800 Change /usr/bin/python2.7 to /usr/bin/python commit 612ba454c4263d9bfc672fe168a55c2f01599d70 Merge: c3d20a0 f16e3fa Author: Chris Holcombe Date: Thu Feb 18 17:16:55 2016 -0800 Merge upstream and resolve conflicts with actions and actions.yaml commit c3d20a0eb67918d11585851a7b5df55ce0290392 Author: Chris Holcombe Date: Thu Feb 18 17:10:56 2016 -0800 Fix up the niggles and provide feedback to the action user as to why something failed commit ea5cc48ccbb5d6515703bd5c93c13b2147972cd1 Author: Edward Hope-Morley Date: Thu Feb 18 17:42:05 2016 +0000 more commit f58dd864eac130a6bc20b46c1495d7fa34a54894 Author: Edward Hope-Morley Date: Thu Feb 18 17:09:52 2016 +0000 restore sanity commit 32631ccde309040b92ba76ecc12b16bad953f486 Author: Edward Hope-Morley Date: Thu Feb 18 11:40:09 2016 +0000 post-review fixes commit 7ada8f0de65d397648d041fae20ed21b3f38bd15 Author: Edward Hope-Morley Date: Thu Feb 18 11:36:46 2016 +0000 post-review fixes commit f16e3fac5240133c1c7dfd406caacd21b364532a Merge: a0ffb8b 7709b7d Author: James Page Date: Thu Feb 18 11:02:17 2016 +0000 Add pause/resume cluster health actions Add actions to pause and resume cluster health monitoring within ceph for all osd devices. This will ensure that no rebalancing is done whilst maintenance actions are happening within the cluster. commit a0ffb8bf97c9cf3c19d17090c96f2ea60c89da65 Merge: 65439ba 531b40d Author: James Page Date: Thu Feb 18 10:38:53 2016 +0000 Wait for quorom and query the right unit remote_unit when not in radosgw context commit 65439ba7dc3acf494c9a8d11e2cdd274d144b485 Merge: 5e77170 afd390b Author: James Page Date: Wed Feb 17 11:28:44 2016 +0000 Update test target definitions; Wait for unit status. commit 531b40d9b2d216b467cca59d7649ab5bb4577b3d Author: Liam Young Date: Wed Feb 17 10:15:37 2016 +0000 Wait for quorom and query the right unit remote_unit when not in radosgw context commit 5e77170f378be92a3e2e8de3c06dad158b4a14ca Author: James Page Date: Tue Feb 16 06:59:17 2016 +0000 Tidy tox targets commit 732d8e11cd5058e680a5982bce77648952c8532f Author: Chris Holcombe Date: Fri Feb 12 14:17:34 2016 -0800 Used a list as an integer. I meant to use the size of the list commit afd390b3ed4212883a02ca971e5613246c3ae6a8 Author: Ryan Beisner Date: Fri Feb 12 21:24:20 2016 +0000 No need to not wait for nonexistent nrpe commit 9721ce8006720d24b8e4133fbbb8a01d989a71c8 Author: Ryan Beisner Date: Fri Feb 12 21:02:36 2016 +0000 Disable Xenial test re: pending lp1537155 commit d12e2658f5b5e6c38b98ae986134f83df2e0a380 Author: Ryan Beisner Date: Fri Feb 12 20:57:08 2016 +0000 Update test target definitions; Wait for unit status. commit 7709b7d5385757fc6d8fe48fa7646efcdb77564a Author: Chris MacNaughton Date: Fri Feb 12 08:26:13 2016 -0500 rename actions commit 2c945523486227dd1c58a1c1a76a779d9c131a71 Merge: f112851 27d5d4b Author: James Page Date: Fri Feb 12 12:34:20 2016 +0000 Resolve symlinks in get_devices(). commit 7edce1dd489a4718a150f7f38ffd366855e49828 Author: Edward Hope-Morley Date: Wed Feb 10 15:20:52 2016 +0000 [hopem,r=] Support multiple l3 segments. Closes-Bug: 1523871 commit 27d5d4b8bb0fd61a3910dad1bdf46adc2b476649 Author: Bjorn Tillenius Date: Tue Feb 2 19:01:53 2016 +0200 Lint. commit 6980d3a3418ba512e65a79a62b140b238d54a17b Author: Bjorn Tillenius Date: Tue Feb 2 17:34:19 2016 +0200 Resolve symlinks in get_devices(). commit f98627c1c163d702ae1142a6153801073d57280c Merge: 4f0dc6d f112851 Author: James Page Date: Sat Jan 30 15:45:01 2016 +0100 rebase commit eaa365a180e8eda88e6ef9f1a6c975a0b780dee5 Author: Chris Holcombe Date: Fri Jan 22 15:21:45 2016 -0800 Clean up another lint error commit 477cdc96fbe124509995a02c358c24c64451c9e4 Author: Chris Holcombe Date: Fri Jan 22 15:04:27 2016 -0800 Patching up the other unit tests to passing status commit faa7b3ad95ebed02718ff58b3e3203b7d59be709 Author: Chris MacNaughton Date: Fri Jan 22 16:42:58 2016 -0500 remove regex commit 1e3b2f5dd409a02399735aa2aeb5e78d18ea2240 Author: Chris MacNaughton Date: Fri Jan 22 16:10:15 2016 -0500 lint fix commit 620209aeb47900430f039eb2e65bfe00db672e32 Author: Chris MacNaughton Date: Fri Jan 22 16:05:15 2016 -0500 use search instead of match commit 2f47939fa84c43c485042325a925d72797df6480 Author: Chris MacNaughton Date: Fri Jan 22 15:16:22 2016 -0500 fix line length commit f203a5bdfc12a2a99e3695840f16182e037f1df1 Author: Chris MacNaughton Date: Fri Jan 22 15:02:10 2016 -0500 modify regex to not care about order commit 706b272fc91d432921750b3af09689361f4b8bb9 Author: Chris MacNaughton Date: Fri Jan 22 14:16:46 2016 -0500 try with sleeping commit 66d6952a65ceb5c8858f262daf127f96ed03ea81 Merge: e446a77 f112851 Author: Chris Holcombe Date: Fri Jan 22 10:46:50 2016 -0800 Merge upstream and resolve conflicts commit fc714c96f40bac9fb89108cd56962343472f63cf Author: Chris MacNaughton Date: Fri Jan 22 11:10:34 2016 -0500 fix variable name commit 8cb53237c6588a00d86dcc0a564d18eb7cd751ae Author: Chris MacNaughton Date: Fri Jan 22 10:47:26 2016 -0500 update to use correct(?) commands commit b762e9842ca335845fe3a442dfdde838e5246b3b Author: Chris MacNaughton Date: Fri Jan 22 08:01:03 2016 -0500 update tests.yaml commit e446a7731cbe377f30c88bb99083745ba95caa4e Author: Chris Holcombe Date: Thu Jan 21 14:19:53 2016 -0800 Clean up lint warnings. Also added a few more mock unit tests commit 32ff93e8d0166b2346c422cbb9cd53bc4f805256 Author: Chris Holcombe Date: Thu Jan 21 09:38:47 2016 -0800 Adding a unit test file for ceph_ops commit 4f0dc6d8b76b8545453293b2c69e2d6a164db10e Author: James Page Date: Mon Jan 18 16:39:49 2016 +0000 Add configuration option for toggling use of direct io for OSD journals commit 1977cdbde1d0fa7ad57baa07d97f477143d54787 Author: Chris Holcombe Date: Mon Jan 18 08:07:35 2016 -0800 Add actions to lint. Change actions.yaml to use enum and also change underscores to dashes. Log action_fail in addition to exiting -1. Merge v2 requests with v1 requests since this does not break backwards compatibility. Add unit tests. Modify tox.ini to include actions. . commit 3f0e16bcc483952e340fa89505011b7a115ff421 Author: Chris MacNaughton Date: Fri Jan 15 16:45:00 2016 -0500 fix version commit c665092be6f9d07f45a0b9baf2e0f128e4ecdc37 Author: Chris MacNaughton Date: Fri Jan 15 16:20:27 2016 -0500 updating tests commit 80de4d7256efbbc6c2ab7cdfcb1ab292668be607 Author: Chris MacNaughton Date: Thu Jan 14 13:19:10 2016 -0500 update readme commit 44365d58785e9ba63179d092b875c2029024aa8b Author: Chris MacNaughton Date: Thu Jan 14 13:17:19 2016 -0500 add pause/resume actions pause calls: `ceph osd set noout ceoh osd set nodown` resume calls: `ceph osd unset noout ceph osd unset nodown` commit bdd4e69e801e2178532e31216efe7e815b06f864 Author: Chris Holcombe Date: Tue Dec 15 04:54:21 2015 -0800 Missed a few typos commit 0158586bde1a1f878c0a046a97510b8b90a95ce9 Author: Chris Holcombe Date: Tue Dec 15 04:41:22 2015 -0800 lint errors commit 92ad78733279112bbba8e12d3fb19809ab9d0ff7 Author: Chris Holcombe Date: Mon Dec 14 17:44:22 2015 -0800 Actions are working and lightly tested. Need to create a more robust, automated test setup Change-Id: Ia18b19961dab66bb6c19ef7e9c421b2fec60fcc7 --- ceph-mon/.gitignore | 4 +- ceph-mon/.gitreview | 2 +- ceph-mon/README.md | 8 +- ceph-mon/actions.yaml | 175 +++++++++++++++ ceph-mon/actions/__init__.py | 2 + ceph-mon/actions/ceph_ops.py | 103 +++++++++ ceph-mon/actions/create-erasure-profile | 89 ++++++++ ceph-mon/actions/create-pool | 38 ++++ ceph-mon/actions/delete-erasure-profile | 24 ++ ceph-mon/actions/delete-pool | 28 +++ ceph-mon/actions/get-erasure-profile | 18 ++ ceph-mon/actions/list-erasure-profiles | 22 ++ ceph-mon/actions/list-pools | 17 ++ ceph-mon/actions/pool-get | 19 ++ ceph-mon/actions/pool-set | 23 ++ ceph-mon/actions/pool-statistics | 15 ++ ceph-mon/actions/remove-pool-snapshot | 19 ++ ceph-mon/actions/rename-pool | 16 ++ ceph-mon/actions/set-pool-max-bytes | 16 ++ ceph-mon/actions/snapshot-pool | 18 ++ ceph-mon/config.yaml | 4 + ceph-mon/hooks/ceph_broker.py | 280 ++++++++++++++++++++---- ceph-mon/hooks/ceph_hooks.py | 3 +- ceph-mon/templates/ceph.conf | 1 - ceph-mon/tests/018-basic-trusty-liberty | 0 ceph-mon/tests/019-basic-trusty-mitaka | 0 ceph-mon/tests/020-basic-wily-liberty | 0 ceph-mon/tests/021-basic-xenial-mitaka | 0 ceph-mon/tests/basic_deployment.py | 14 +- ceph-mon/tests/tests.yaml | 1 + ceph-mon/unit_tests/test_ceph_broker.py | 94 ++++---- ceph-mon/unit_tests/test_ceph_ops.py | 217 ++++++++++++++++++ ceph-mon/unit_tests/test_status.py | 1 - 33 files changed, 1170 insertions(+), 101 deletions(-) create mode 100755 ceph-mon/actions/ceph_ops.py create mode 100755 ceph-mon/actions/create-erasure-profile create mode 100755 ceph-mon/actions/create-pool create mode 100755 ceph-mon/actions/delete-erasure-profile create mode 100755 ceph-mon/actions/delete-pool create mode 100755 ceph-mon/actions/get-erasure-profile create mode 100755 ceph-mon/actions/list-erasure-profiles create mode 100755 ceph-mon/actions/list-pools create mode 100755 ceph-mon/actions/pool-get create mode 100755 ceph-mon/actions/pool-set create mode 100755 ceph-mon/actions/pool-statistics create mode 100755 ceph-mon/actions/remove-pool-snapshot create mode 100755 ceph-mon/actions/rename-pool create mode 100755 ceph-mon/actions/set-pool-max-bytes create mode 100755 ceph-mon/actions/snapshot-pool mode change 100644 => 100755 ceph-mon/tests/018-basic-trusty-liberty mode change 100644 => 100755 ceph-mon/tests/019-basic-trusty-mitaka mode change 100644 => 100755 ceph-mon/tests/020-basic-wily-liberty mode change 100644 => 100755 ceph-mon/tests/021-basic-xenial-mitaka create mode 100644 ceph-mon/unit_tests/test_ceph_ops.py diff --git a/ceph-mon/.gitignore b/ceph-mon/.gitignore index 86e1f1b2..7d2fd1fb 100644 --- a/ceph-mon/.gitignore +++ b/ceph-mon/.gitignore @@ -1,8 +1,8 @@ bin +.idea .coverage .testrepository .tox *.sw[nop] .idea -*.pyc -.idea +*.pyc \ No newline at end of file diff --git a/ceph-mon/.gitreview b/ceph-mon/.gitreview index f13dc9dd..47000658 100644 --- a/ceph-mon/.gitreview +++ b/ceph-mon/.gitreview @@ -1,4 +1,4 @@ [gerrit] host=review.openstack.org port=29418 -project=openstack/charm-ceph-mon.git +project=openstack/charm-ceph-mon.git \ No newline at end of file diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 103e57f7..a66ca060 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -9,15 +9,15 @@ juju # Usage The ceph charm has two pieces of mandatory configuration for which no defaults -are provided. You _must_ set these configuration options before deployment or the charm will not work: +are provided. You _must_ set these configuration options before deployment or the charm will not work: fsid: uuid specific to a ceph cluster used to ensure that different clusters don't get mixed up - use `uuid` to generate one. - monitor-secret: + monitor-secret: a ceph generated key used by the daemons that manage to cluster - to control security. You can use the ceph-authtool command to + to control security. You can use the ceph-authtool command to generate one: ceph-authtool /dev/stdout --name=mon. --gen-key @@ -30,7 +30,7 @@ At a minimum you must provide a juju config file during initial deployment with the fsid and monitor-secret options (contents of cepy.yaml below): ceph: - fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 + fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== Boot things up by using: diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index a93054bb..3f8e5dfe 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -39,3 +39,178 @@ remove-cache-tier: as the hot pool required: [backer-pool, cache-pool] additionalProperties: false + +create-pool: + description: Creates a pool + params: + name: + type: string + description: The name of the pool + profile-name: + type: string + description: The crush profile to use for this pool. The ruleset must exist first. + pool-type: + type: string + default: "replicated" + enum: [replicated, erasure] + description: | + The pool type which may either be replicated to recover from lost OSDs by keeping multiple copies of the + objects or erasure to get a kind of generalized RAID5 capability. + replicas: + type: integer + default: 3 + description: | + For the replicated pool this is the number of replicas to store of each object. + erasure-profile-name: + type: string + default: default + description: | + The name of the erasure coding profile to use for this pool. Note this profile must exist + before calling create-pool + required: [name] + additionalProperties: false +create-erasure-profile: + description: Create a new erasure code profile to use on a pool. + params: + name: + type: string + description: The name of the profile + failure-domain: + type: string + default: host + enum: [chassis, datacenter, host, osd, pdu, pod, rack, region, room, root, row] + description: | + The failure-domain=host will create a CRUSH ruleset that ensures no two chunks are stored in the same host. + plugin: + type: string + default: "jerasure" + enum: [jerasure, isa, lrc, shec] + description: | + The erasure plugin to use for this profile. + See http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ for more details + data-chunks: + type: integer + default: 3 + description: | + The number of data chunks, i.e. the number of chunks in which the original object is divided. For instance + if K = 2 a 10KB object will be divided into K objects of 5KB each. + coding-chunks: + type: integer + default: 2 + description: | + The number of coding chunks, i.e. the number of additional chunks computed by the encoding functions. + If there are 2 coding chunks, it means 2 OSDs can be out without losing data. + locality-chunks: + type: integer + description: | + Group the coding and data chunks into sets of size locality. For instance, for k=4 and m=2, when locality=3 + two groups of three are created. Each set can be recovered without reading chunks from another set. + durability-estimator: + type: integer + description: | + The number of parity chunks each of which includes each data chunk in its calculation range. The number is used + as a durability estimator. For instance, if c=2, 2 OSDs can be down without losing data. + required: [name, data-chunks, coding-chunks] + additionalProperties: false +get-erasure-profile: + description: Display an erasure code profile. + params: + name: + type: string + description: The name of the profile + required: [name] + additionalProperties: false +delete-erasure-profile: + description: Deletes an erasure code profile. + params: + name: + type: string + description: The name of the profile + required: [name] + additionalProperties: false +list-erasure-profiles: + description: List the names of all erasure code profiles + additionalProperties: false +list-pools: + description: List your cluster’s pools + additionalProperties: false +set-pool-max-bytes: + description: Set pool quotas for the maximum number of bytes. + params: + max: + type: integer + description: The name of the pool + pool-name: + type: string + description: The name of the pool + required: [pool-name, max] + additionalProperties: false +delete-pool: + description: Deletes the named pool + params: + pool-name: + type: string + description: The name of the pool + required: [pool-name] + additionalProperties: false +rename-pool: + description: Rename a pool + params: + pool-name: + type: string + description: The name of the pool + new-name: + type: string + description: The new name of the pool + required: [pool-name, new-name] + additionalProperties: false +pool-statistics: + description: Show a pool’s utilization statistics + additionalProperties: false +snapshot-pool: + description: Snapshot a pool + params: + pool-name: + type: string + description: The name of the pool + snapshot-name: + type: string + description: The name of the snapshot + required: [snapshot-name, pool-name] + additionalProperties: false +remove-pool-snapshot: + description: Remove a pool snapshot + params: + pool-name: + type: string + description: The name of the pool + snapshot-name: + type: string + description: The name of the snapshot + required: [snapshot-name, pool-name] + additionalProperties: false +pool-set: + description: Set a value for the pool + params: + pool-name: + type: string + description: The pool to set this variable on. + key: + type: string + description: Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + value: + type: string + description: The value to set + required: [key, value, pool-name] + additionalProperties: false +pool-get: + description: Get a value for the pool + params: + pool-name: + type: string + description: The pool to get this variable from. + key: + type: string + description: Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#get-pool-values + required: [key, pool-name] + additionalProperties: false diff --git a/ceph-mon/actions/__init__.py b/ceph-mon/actions/__init__.py index 9847ec9e..ff2381cc 100644 --- a/ceph-mon/actions/__init__.py +++ b/ceph-mon/actions/__init__.py @@ -1 +1,3 @@ __author__ = 'chris' +import sys +sys.path.append('hooks') diff --git a/ceph-mon/actions/ceph_ops.py b/ceph-mon/actions/ceph_ops.py new file mode 100755 index 00000000..e70ebc7e --- /dev/null +++ b/ceph-mon/actions/ceph_ops.py @@ -0,0 +1,103 @@ +__author__ = 'chris' +from subprocess import CalledProcessError, check_output +import sys + +sys.path.append('hooks') + +import rados +from charmhelpers.core.hookenv import log, action_get, action_fail +from charmhelpers.contrib.storage.linux.ceph import pool_set, \ + set_pool_quota, snapshot_pool, remove_pool_snapshot + + +# Connect to Ceph via Librados and return a connection +def connect(): + try: + cluster = rados.Rados(conffile='/etc/ceph/ceph.conf') + cluster.connect() + return cluster + except (rados.IOError, + rados.ObjectNotFound, + rados.NoData, + rados.NoSpace, + rados.PermissionError) as rados_error: + log("librados failed with error: {}".format(str(rados_error))) + + +def create_crush_rule(): + # Shell out + pass + + +def list_pools(): + try: + cluster = connect() + pool_list = cluster.list_pools() + cluster.shutdown() + return pool_list + except (rados.IOError, + rados.ObjectNotFound, + rados.NoData, + rados.NoSpace, + rados.PermissionError) as e: + action_fail(e.message) + + +def pool_get(): + key = action_get("key") + pool_name = action_get("pool_name") + try: + value = check_output(['ceph', 'osd', 'pool', 'get', pool_name, key]) + return value + except CalledProcessError as e: + action_fail(e.message) + + +def set_pool(): + key = action_get("key") + value = action_get("value") + pool_name = action_get("pool_name") + pool_set(service='ceph', pool_name=pool_name, key=key, value=value) + + +def pool_stats(): + try: + pool_name = action_get("pool-name") + cluster = connect() + ioctx = cluster.open_ioctx(pool_name) + stats = ioctx.get_stats() + ioctx.close() + cluster.shutdown() + return stats + except (rados.Error, + rados.IOError, + rados.ObjectNotFound, + rados.NoData, + rados.NoSpace, + rados.PermissionError) as e: + action_fail(e.message) + + +def delete_pool_snapshot(): + pool_name = action_get("pool-name") + snapshot_name = action_get("snapshot-name") + remove_pool_snapshot(service='ceph', + pool_name=pool_name, + snapshot_name=snapshot_name) + + +# Note only one or the other can be set +def set_pool_max_bytes(): + pool_name = action_get("pool-name") + max_bytes = action_get("max") + set_pool_quota(service='ceph', + pool_name=pool_name, + max_bytes=max_bytes) + + +def snapshot_ceph_pool(): + pool_name = action_get("pool-name") + snapshot_name = action_get("snapshot-name") + snapshot_pool(service='ceph', + pool_name=pool_name, + snapshot_name=snapshot_name) diff --git a/ceph-mon/actions/create-erasure-profile b/ceph-mon/actions/create-erasure-profile new file mode 100755 index 00000000..2b00b588 --- /dev/null +++ b/ceph-mon/actions/create-erasure-profile @@ -0,0 +1,89 @@ +#!/usr/bin/python +from subprocess import CalledProcessError +import sys + +sys.path.append('hooks') + +from charmhelpers.contrib.storage.linux.ceph import create_erasure_profile +from charmhelpers.core.hookenv import action_get, log, action_fail + + +def make_erasure_profile(): + name = action_get("name") + plugin = action_get("plugin") + failure_domain = action_get("failure-domain") + + # jerasure requires k+m + # isa requires k+m + # local requires k+m+l + # shec requires k+m+c + + if plugin == "jerasure": + k = action_get("data-chunks") + m = action_get("coding-chunks") + try: + create_erasure_profile(service='admin', + erasure_plugin_name=plugin, + profile_name=name, + data_chunks=k, + coding_chunks=m, + failure_domain=failure_domain) + except CalledProcessError as e: + log(e) + action_fail("Create erasure profile failed with " + "message: {}".format(e.message)) + elif plugin == "isa": + k = action_get("data-chunks") + m = action_get("coding-chunks") + try: + create_erasure_profile(service='admin', + erasure_plugin_name=plugin, + profile_name=name, + data_chunks=k, + coding_chunks=m, + failure_domain=failure_domain) + except CalledProcessError as e: + log(e) + action_fail("Create erasure profile failed with " + "message: {}".format(e.message)) + elif plugin == "local": + k = action_get("data-chunks") + m = action_get("coding-chunks") + l = action_get("locality-chunks") + try: + create_erasure_profile(service='admin', + erasure_plugin_name=plugin, + profile_name=name, + data_chunks=k, + coding_chunks=m, + locality=l, + failure_domain=failure_domain) + except CalledProcessError as e: + log(e) + action_fail("Create erasure profile failed with " + "message: {}".format(e.message)) + elif plugin == "shec": + k = action_get("data-chunks") + m = action_get("coding-chunks") + c = action_get("durability-estimator") + try: + create_erasure_profile(service='admin', + erasure_plugin_name=plugin, + profile_name=name, + data_chunks=k, + coding_chunks=m, + durability_estimator=c, + failure_domain=failure_domain) + except CalledProcessError as e: + log(e) + action_fail("Create erasure profile failed with " + "message: {}".format(e.message)) + else: + # Unknown erasure plugin + action_fail("Unknown erasure-plugin type of {}. " + "Only jerasure, isa, local or shec is " + "allowed".format(plugin)) + + +if __name__ == '__main__': + make_erasure_profile() diff --git a/ceph-mon/actions/create-pool b/ceph-mon/actions/create-pool new file mode 100755 index 00000000..4d1d2148 --- /dev/null +++ b/ceph-mon/actions/create-pool @@ -0,0 +1,38 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import CalledProcessError +from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.contrib.storage.linux.ceph import ErasurePool, ReplicatedPool + + +def create_pool(): + pool_name = action_get("name") + pool_type = action_get("pool-type") + try: + if pool_type == "replicated": + replicas = action_get("replicas") + replicated_pool = ReplicatedPool(name=pool_name, + service='admin', + replicas=replicas) + replicated_pool.create() + + elif pool_type == "erasure": + crush_profile_name = action_get("erasure-profile-name") + erasure_pool = ErasurePool(name=pool_name, + erasure_code_profile=crush_profile_name, + service='admin') + erasure_pool.create() + else: + log("Unknown pool type of {}. Only erasure or replicated is " + "allowed".format(pool_type)) + action_fail("Unknown pool type of {}. Only erasure or replicated " + "is allowed".format(pool_type)) + except CalledProcessError as e: + action_fail("Pool creation failed because of a failed process. " + "Ret Code: {} Message: {}".format(e.returncode, e.message)) + + +if __name__ == '__main__': + create_pool() diff --git a/ceph-mon/actions/delete-erasure-profile b/ceph-mon/actions/delete-erasure-profile new file mode 100755 index 00000000..075c410e --- /dev/null +++ b/ceph-mon/actions/delete-erasure-profile @@ -0,0 +1,24 @@ +#!/usr/bin/python +from subprocess import CalledProcessError + +__author__ = 'chris' +import sys + +sys.path.append('hooks') + +from charmhelpers.contrib.storage.linux.ceph import remove_erasure_profile +from charmhelpers.core.hookenv import action_get, log, action_fail + + +def delete_erasure_profile(): + name = action_get("name") + + try: + remove_erasure_profile(service='admin', profile_name=name) + except CalledProcessError as e: + action_fail("Remove erasure profile failed with error: {}".format( + e.message)) + + +if __name__ == '__main__': + delete_erasure_profile() diff --git a/ceph-mon/actions/delete-pool b/ceph-mon/actions/delete-pool new file mode 100755 index 00000000..3d655076 --- /dev/null +++ b/ceph-mon/actions/delete-pool @@ -0,0 +1,28 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') + +import rados +from ceph_ops import connect +from charmhelpers.core.hookenv import action_get, log, action_fail + + +def remove_pool(): + try: + pool_name = action_get("name") + cluster = connect() + log("Deleting pool: {}".format(pool_name)) + cluster.delete_pool(str(pool_name)) # Convert from unicode + cluster.shutdown() + except (rados.IOError, + rados.ObjectNotFound, + rados.NoData, + rados.NoSpace, + rados.PermissionError) as e: + log(e) + action_fail(e) + + +if __name__ == '__main__': + remove_pool() diff --git a/ceph-mon/actions/get-erasure-profile b/ceph-mon/actions/get-erasure-profile new file mode 100755 index 00000000..29ece59d --- /dev/null +++ b/ceph-mon/actions/get-erasure-profile @@ -0,0 +1,18 @@ +#!/usr/bin/python +__author__ = 'chris' +import sys + +sys.path.append('hooks') + +from charmhelpers.contrib.storage.linux.ceph import get_erasure_profile +from charmhelpers.core.hookenv import action_get, action_set + + +def make_erasure_profile(): + name = action_get("name") + out = get_erasure_profile(service='admin', name=name) + action_set({'message': out}) + + +if __name__ == '__main__': + make_erasure_profile() diff --git a/ceph-mon/actions/list-erasure-profiles b/ceph-mon/actions/list-erasure-profiles new file mode 100755 index 00000000..cf6dfa09 --- /dev/null +++ b/ceph-mon/actions/list-erasure-profiles @@ -0,0 +1,22 @@ +#!/usr/bin/python +__author__ = 'chris' +import sys +from subprocess import check_output, CalledProcessError + +sys.path.append('hooks') + +from charmhelpers.core.hookenv import action_get, log, action_set, action_fail + +if __name__ == '__main__': + name = action_get("name") + try: + out = check_output(['ceph', + '--id', 'admin', + 'osd', + 'erasure-code-profile', + 'ls']).decode('UTF-8') + action_set({'message': out}) + except CalledProcessError as e: + log(e) + action_fail("Listing erasure profiles failed with error: {}".format( + e.message)) diff --git a/ceph-mon/actions/list-pools b/ceph-mon/actions/list-pools new file mode 100755 index 00000000..102667cf --- /dev/null +++ b/ceph-mon/actions/list-pools @@ -0,0 +1,17 @@ +#!/usr/bin/python +__author__ = 'chris' +import sys +from subprocess import check_output, CalledProcessError + +sys.path.append('hooks') + +from charmhelpers.core.hookenv import log, action_set, action_fail + +if __name__ == '__main__': + try: + out = check_output(['ceph', '--id', 'admin', + 'osd', 'lspools']).decode('UTF-8') + action_set({'message': out}) + except CalledProcessError as e: + log(e) + action_fail("List pools failed with error: {}".format(e.message)) diff --git a/ceph-mon/actions/pool-get b/ceph-mon/actions/pool-get new file mode 100755 index 00000000..e4f924b9 --- /dev/null +++ b/ceph-mon/actions/pool-get @@ -0,0 +1,19 @@ +#!/usr/bin/python +__author__ = 'chris' +import sys +from subprocess import check_output, CalledProcessError + +sys.path.append('hooks') + +from charmhelpers.core.hookenv import log, action_set, action_get, action_fail + +if __name__ == '__main__': + name = action_get('pool-name') + key = action_get('key') + try: + out = check_output(['ceph', '--id', 'admin', + 'osd', 'pool', 'get', name, key]).decode('UTF-8') + action_set({'message': out}) + except CalledProcessError as e: + log(e) + action_fail("Pool get failed with message: {}".format(e.message)) diff --git a/ceph-mon/actions/pool-set b/ceph-mon/actions/pool-set new file mode 100755 index 00000000..1f6e13b8 --- /dev/null +++ b/ceph-mon/actions/pool-set @@ -0,0 +1,23 @@ +#!/usr/bin/python +from subprocess import CalledProcessError +import sys + +sys.path.append('hooks') + +from charmhelpers.core.hookenv import action_get, log, action_fail +from ceph_broker import handle_set_pool_value + +if __name__ == '__main__': + name = action_get("pool-name") + key = action_get("key") + value = action_get("value") + request = {'name': name, + 'key': key, + 'value': value} + + try: + handle_set_pool_value(service='admin', request=request) + except CalledProcessError as e: + log(e.message) + action_fail("Setting pool key: {} and value: {} failed with " + "message: {}".format(key, value, e.message)) diff --git a/ceph-mon/actions/pool-statistics b/ceph-mon/actions/pool-statistics new file mode 100755 index 00000000..536c889a --- /dev/null +++ b/ceph-mon/actions/pool-statistics @@ -0,0 +1,15 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import check_output, CalledProcessError +from charmhelpers.core.hookenv import log, action_set, action_fail + +if __name__ == '__main__': + try: + out = check_output(['ceph', '--id', 'admin', + 'df']).decode('UTF-8') + action_set({'message': out}) + except CalledProcessError as e: + log(e) + action_fail("ceph df failed with message: {}".format(e.message)) diff --git a/ceph-mon/actions/remove-pool-snapshot b/ceph-mon/actions/remove-pool-snapshot new file mode 100755 index 00000000..387849ea --- /dev/null +++ b/ceph-mon/actions/remove-pool-snapshot @@ -0,0 +1,19 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import CalledProcessError +from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.contrib.storage.linux.ceph import remove_pool_snapshot + +if __name__ == '__main__': + name = action_get("pool-name") + snapname = action_get("snapshot-name") + try: + remove_pool_snapshot(service='admin', + pool_name=name, + snapshot_name=snapname) + except CalledProcessError as e: + log(e) + action_fail("Remove pool snapshot failed with message: {}".format( + e.message)) diff --git a/ceph-mon/actions/rename-pool b/ceph-mon/actions/rename-pool new file mode 100755 index 00000000..6fe088ec --- /dev/null +++ b/ceph-mon/actions/rename-pool @@ -0,0 +1,16 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import CalledProcessError +from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.contrib.storage.linux.ceph import rename_pool + +if __name__ == '__main__': + name = action_get("pool-name") + new_name = action_get("new-name") + try: + rename_pool(service='admin', old_name=name, new_name=new_name) + except CalledProcessError as e: + log(e) + action_fail("Renaming pool failed with message: {}".format(e.message)) diff --git a/ceph-mon/actions/set-pool-max-bytes b/ceph-mon/actions/set-pool-max-bytes new file mode 100755 index 00000000..86360885 --- /dev/null +++ b/ceph-mon/actions/set-pool-max-bytes @@ -0,0 +1,16 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import CalledProcessError +from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.contrib.storage.linux.ceph import set_pool_quota + +if __name__ == '__main__': + max_bytes = action_get("max") + name = action_get("pool-name") + try: + set_pool_quota(service='admin', pool_name=name, max_bytes=max_bytes) + except CalledProcessError as e: + log(e) + action_fail("Set pool quota failed with message: {}".format(e.message)) diff --git a/ceph-mon/actions/snapshot-pool b/ceph-mon/actions/snapshot-pool new file mode 100755 index 00000000..a02619bf --- /dev/null +++ b/ceph-mon/actions/snapshot-pool @@ -0,0 +1,18 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import CalledProcessError +from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.contrib.storage.linux.ceph import snapshot_pool + +if __name__ == '__main__': + name = action_get("pool-name") + snapname = action_get("snapshot-name") + try: + snapshot_pool(service='admin', + pool_name=name, + snapshot_name=snapname) + except CalledProcessError as e: + log(e) + action_fail("Snapshot pool failed with message: {}".format(e.message)) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 30abb8a6..c486a851 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -121,3 +121,7 @@ options: description: | A comma-separated list of nagios servicegroups. If left empty, the nagios_context will be used as the servicegroup + use-direct-io: + default: True + type: boolean + description: Configure use of direct IO for OSD journals. diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index bd23d435..d01d38ef 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -1,24 +1,71 @@ #!/usr/bin/python # -# Copyright 2014 Canonical Ltd. +# Copyright 2015 Canonical Ltd. # import json +from charmhelpers.contrib.storage.linux.ceph import validator, \ + erasure_profile_exists, ErasurePool, set_pool_quota, \ + pool_set, snapshot_pool, remove_pool_snapshot, create_erasure_profile, \ + ReplicatedPool, rename_pool, Pool, get_osds, pool_exists, delete_pool + from charmhelpers.core.hookenv import ( log, DEBUG, INFO, ERROR, ) -from charmhelpers.contrib.storage.linux.ceph import ( - create_pool, - get_osds, - pool_exists, -) + +# This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ +# This should do a decent job of preventing people from passing in bad values. +# It will give a useful error message +POOL_KEYS = { + # "Ceph Key Name": [Python type, [Valid Range]] + "size": [int], + "min_size": [int], + "crash_replay_interval": [int], + "pgp_num": [int], # = or < pg_num + "crush_ruleset": [int], + "hashpspool": [bool], + "nodelete": [bool], + "nopgchange": [bool], + "nosizechange": [bool], + "write_fadvise_dontneed": [bool], + "noscrub": [bool], + "nodeep-scrub": [bool], + "hit_set_type": [basestring, ["bloom", "explicit_hash", + "explicit_object"]], + "hit_set_count": [int, [1, 1]], + "hit_set_period": [int], + "hit_set_fpp": [float, [0.0, 1.0]], + "cache_target_dirty_ratio": [float], + "cache_target_dirty_high_ratio": [float], + "cache_target_full_ratio": [float], + "target_max_bytes": [int], + "target_max_objects": [int], + "cache_min_flush_age": [int], + "cache_min_evict_age": [int], + "fast_read": [bool], +} + +CEPH_BUCKET_TYPES = [ + 'osd', + 'host', + 'chassis', + 'rack', + 'row', + 'pdu', + 'pod', + 'room', + 'datacenter', + 'region', + 'root' +] def decode_req_encode_rsp(f): """Decorator to decode incoming requests and encode responses.""" + def decode_inner(req): return json.dumps(f(json.loads(req))) @@ -42,15 +89,14 @@ def process_requests(reqs): resp['request-id'] = request_id return resp - except Exception as exc: log(str(exc), level=ERROR) msg = ("Unexpected error occurred while processing requests: %s" % - (reqs)) + reqs) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - msg = ("Missing or invalid api version (%s)" % (version)) + msg = ("Missing or invalid api version (%s)" % version) resp = {'exit-code': 1, 'stderr': msg} if request_id: resp['request-id'] = request_id @@ -58,6 +104,156 @@ def process_requests(reqs): return resp +def handle_create_erasure_profile(request, service): + # "local" | "shec" or it defaults to "jerasure" + erasure_type = request.get('erasure-type') + # "host" | "rack" or it defaults to "host" # Any valid Ceph bucket + failure_domain = request.get('failure-domain') + name = request.get('name') + k = request.get('k') + m = request.get('m') + l = request.get('l') + + if failure_domain not in CEPH_BUCKET_TYPES: + msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + create_erasure_profile(service=service, erasure_plugin_name=erasure_type, + profile_name=name, failure_domain=failure_domain, + data_chunks=k, coding_chunks=m, locality=l) + + +def handle_erasure_pool(request, service): + pool_name = request.get('name') + erasure_profile = request.get('erasure-profile') + quota = request.get('max-bytes') + + if erasure_profile is None: + erasure_profile = "default-canonical" + + # Check for missing params + if pool_name is None: + msg = "Missing parameter. name is required for the pool" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds + if not erasure_profile_exists(service=service, name=erasure_profile): + # TODO: Fail and tell them to create the profile or default + msg = "erasure-profile {} does not exist. Please create it with: " \ + "create-erasure-profile".format(erasure_profile) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + pass + pool = ErasurePool(service=service, name=pool_name, + erasure_code_profile=erasure_profile) + # Ok make the erasure pool + if not pool_exists(service=service, name=pool_name): + log("Creating pool '%s' (erasure_profile=%s)" % (pool, + erasure_profile), + level=INFO) + pool.create() + + # Set a quota if requested + if quota is not None: + set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + + +def handle_replicated_pool(request, service): + pool_name = request.get('name') + replicas = request.get('replicas') + quota = request.get('max-bytes') + + # Optional params + pg_num = request.get('pg_num') + if pg_num: + # Cap pg_num to max allowed just in case. + osds = get_osds(service) + if osds: + pg_num = min(pg_num, (len(osds) * 100 // replicas)) + + # Check for missing params + if pool_name is None or replicas is None: + msg = "Missing parameter. name and replicas are required" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + pool = ReplicatedPool(service=service, + name=pool_name, + replicas=replicas, + pg_num=pg_num) + if not pool_exists(service=service, name=pool_name): + log("Creating pool '%s' (replicas=%s)" % (pool, replicas), + level=INFO) + pool.create() + else: + log("Pool '%s' already exists - skipping create" % pool, + level=DEBUG) + + # Set a quota if requested + if quota is not None: + set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + + +def handle_create_cache_tier(request, service): + # mode = "writeback" | "readonly" + storage_pool = request.get('cold-pool') + cache_pool = request.get('hot-pool') + cache_mode = request.get('mode') + + if cache_mode is None: + cache_mode = "writeback" + + # cache and storage pool must exist first + if not pool_exists(service=service, name=storage_pool) or not pool_exists( + service=service, name=cache_pool): + msg = "cold-pool: {} and hot-pool: {} must exist. Please create " \ + "them first".format(storage_pool, cache_pool) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + p = Pool(service=service, name=storage_pool) + p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) + + +def handle_remove_cache_tier(request, service): + storage_pool = request.get('cold-pool') + cache_pool = request.get('hot-pool') + # cache and storage pool must exist first + if not pool_exists(service=service, name=storage_pool) or not pool_exists( + service=service, name=cache_pool): + msg = "cold-pool: {} or hot-pool: {} doesn't exist. Not " \ + "deleting cache tier".format(storage_pool, cache_pool) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + pool = Pool(name=storage_pool, service=service) + pool.remove_cache_tier(cache_pool=cache_pool) + + +def handle_set_pool_value(request, service): + # Set arbitrary pool values + params = {'pool': request.get('name'), + 'key': request.get('key'), + 'value': request.get('value')} + if params['key'] not in POOL_KEYS: + msg = "Invalid key '%s'" % params['key'] + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Get the validation method + validator_params = POOL_KEYS[params['key']] + if len(validator_params) is 1: + # Validate that what the user passed is actually legal per Ceph's rules + validator(params['value'], validator_params[0]) + else: + # Validate that what the user passed is actually legal per Ceph's rules + validator(params['value'], validator_params[0], validator_params[1]) + # Set the value + pool_set(service=service, pool_name=params['pool'], key=params['key'], + value=params['value']) + + def process_requests_v1(reqs): """Process v1 requests. @@ -70,45 +266,45 @@ def process_requests_v1(reqs): log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) for req in reqs: op = req.get('op') - log("Processing op='%s'" % (op), level=DEBUG) + log("Processing op='%s'" % op, level=DEBUG) # Use admin client since we do not have other client key locations # setup to use them for these operations. svc = 'admin' if op == "create-pool": - params = {'pool': req.get('name'), - 'replicas': req.get('replicas')} - if not all(params.iteritems()): - msg = ("Missing parameter(s): %s" % - (' '.join([k for k in params.iterkeys() - if not params[k]]))) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - # Mandatory params - pool = params['pool'] - replicas = params['replicas'] - - # Optional params - pg_num = req.get('pg_num') - if pg_num: - # Cap pg_num to max allowed just in case. - osds = get_osds(svc) - if osds: - pg_num = min(pg_num, (len(osds) * 100 // replicas)) - - # Ensure string - pg_num = str(pg_num) - - if not pool_exists(service=svc, name=pool): - log("Creating pool '%s' (replicas=%s)" % (pool, replicas), - level=INFO) - create_pool(service=svc, name=pool, replicas=replicas, - pg_num=pg_num) + pool_type = req.get('pool-type') # "replicated" | "erasure" + + # Default to replicated if pool_type isn't given + if pool_type == 'erasure': + handle_erasure_pool(request=req, service=svc) else: - log("Pool '%s' already exists - skipping create" % (pool), - level=DEBUG) + handle_replicated_pool(request=req, service=svc) + elif op == "create-cache-tier": + handle_create_cache_tier(request=req, service=svc) + elif op == "remove-cache-tier": + handle_remove_cache_tier(request=req, service=svc) + elif op == "create-erasure-profile": + handle_create_erasure_profile(request=req, service=svc) + elif op == "delete-pool": + pool = req.get('name') + delete_pool(service=svc, name=pool) + elif op == "rename-pool": + old_name = req.get('name') + new_name = req.get('new-name') + rename_pool(service=svc, old_name=old_name, new_name=new_name) + elif op == "snapshot-pool": + pool = req.get('name') + snapshot_name = req.get('snapshot-name') + snapshot_pool(service=svc, pool_name=pool, + snapshot_name=snapshot_name) + elif op == "remove-pool-snapshot": + pool = req.get('name') + snapshot_name = req.get('snapshot-name') + remove_pool_snapshot(service=svc, pool_name=pool, + snapshot_name=snapshot_name) + elif op == "set-pool-value": + handle_set_pool_value(request=req, service=svc) else: - msg = "Unknown operation '%s'" % (op) + msg = "Unknown operation '%s'" % op log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 354c155c..385afdd7 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -54,7 +54,7 @@ from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( get_ipv6_addr, - format_ipv6_addr + format_ipv6_addr, ) from charmhelpers.core.sysctl import create as create_sysctl from charmhelpers.core.templating import render @@ -294,6 +294,7 @@ def emit_cephconf(): 'ceph_public_network': public_network, 'ceph_cluster_network': cluster_network, 'loglevel': config('loglevel'), + 'dio': str(config('use-direct-io')).lower(), } if config('prefer-ipv6'): diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index f64db7cb..631381bc 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -36,4 +36,3 @@ keyring = /var/lib/ceph/mon/$cluster-$id/keyring [mds] keyring = /var/lib/ceph/mds/$cluster-$id/keyring - diff --git a/ceph-mon/tests/018-basic-trusty-liberty b/ceph-mon/tests/018-basic-trusty-liberty old mode 100644 new mode 100755 diff --git a/ceph-mon/tests/019-basic-trusty-mitaka b/ceph-mon/tests/019-basic-trusty-mitaka old mode 100644 new mode 100755 diff --git a/ceph-mon/tests/020-basic-wily-liberty b/ceph-mon/tests/020-basic-wily-liberty old mode 100644 new mode 100755 diff --git a/ceph-mon/tests/021-basic-xenial-mitaka b/ceph-mon/tests/021-basic-xenial-mitaka old mode 100644 new mode 100755 diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 1b24e60b..63ddca40 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -3,6 +3,7 @@ import amulet import re import time + from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment ) @@ -30,6 +31,8 @@ def __init__(self, series=None, openstack=None, source=None, stable=False): u.log.info('Waiting on extended status checks...') exclude_services = ['mysql'] + + # Wait for deployment ready msgs, except exclusions self._auto_wait_for_status(exclude_services=exclude_services) self._initialize_tests() @@ -79,6 +82,9 @@ def _configure_services(self): 'admin-token': 'ubuntutesting'} mysql_config = {'dataset-size': '50%'} cinder_config = {'block-device': 'None', 'glance-api-version': '2'} + + # Include a non-existent device as osd-devices is a whitelist, + # and this will catch cases where proposals attempt to change that. ceph_config = { 'monitor-count': '3', 'auth-supported': 'none', @@ -198,7 +204,6 @@ def test_102_services(self): self.cinder_sentry: ['cinder-api', 'cinder-scheduler', 'cinder-volume'], - self.ceph_osd_sentry: ['ceph-osd-all'], } if self._get_openstack_release() < self.vivid_kilo: @@ -212,6 +217,13 @@ def test_102_services(self): services[self.ceph1_sentry] = ceph_services services[self.ceph2_sentry] = ceph_services + ceph_osd_services = [ + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) + ] + + services[self.ceph_osd_sentry] = ceph_osd_services + ret = u.validate_services_by_name(services) if ret: amulet.raise_status(amulet.FAIL, msg=ret) diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 4d17631b..49e721b3 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -19,3 +19,4 @@ packages: - python-novaclient - python-pika - python-swiftclient + - python-nose \ No newline at end of file diff --git a/ceph-mon/unit_tests/test_ceph_broker.py b/ceph-mon/unit_tests/test_ceph_broker.py index 8f08cdc7..b720d94a 100644 --- a/ceph-mon/unit_tests/test_ceph_broker.py +++ b/ceph-mon/unit_tests/test_ceph_broker.py @@ -1,12 +1,12 @@ import json -import mock import unittest +import mock + import ceph_broker class CephBrokerTestCase(unittest.TestCase): - def setUp(self): super(CephBrokerTestCase, self).setUp() @@ -20,15 +20,15 @@ def test_process_requests_noop(self, mock_log): def test_process_requests_missing_api_version(self, mock_log): req = json.dumps({'ops': []}) rc = ceph_broker.process_requests(req) - self.assertEqual(json.loads(rc), {'exit-code': 1, - 'stderr': - ('Missing or invalid api version ' - '(None)')}) + self.assertEqual(json.loads(rc), { + 'exit-code': 1, + 'stderr': 'Missing or invalid api version (None)'}) @mock.patch('ceph_broker.log') def test_process_requests_invalid_api_version(self, mock_log): req = json.dumps({'api-version': 2, 'ops': []}) rc = ceph_broker.process_requests(req) + print "Return: %s" % rc self.assertEqual(json.loads(rc), {'exit-code': 1, 'stderr': 'Missing or invalid api version (2)'}) @@ -41,90 +41,88 @@ def test_process_requests_invalid(self, mock_log): {'exit-code': 1, 'stderr': "Unknown operation 'invalid_op'"}) - @mock.patch('ceph_broker.create_pool') - @mock.patch('ceph_broker.pool_exists') - @mock.patch('ceph_broker.log') - def test_process_requests_create_pool(self, mock_log, mock_pool_exists, - mock_create_pool): - mock_pool_exists.return_value = False - reqs = json.dumps({'api-version': 1, - 'ops': [{'op': 'create-pool', 'name': - 'foo', 'replicas': 3}]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_create_pool.assert_called_with(service='admin', name='foo', - replicas=3, pg_num=None) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.get_osds') - @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.ReplicatedPool') @mock.patch('ceph_broker.pool_exists') @mock.patch('ceph_broker.log') def test_process_requests_create_pool_w_pg_num(self, mock_log, mock_pool_exists, - mock_create_pool, + mock_replicated_pool, mock_get_osds): mock_get_osds.return_value = [0, 1, 2] mock_pool_exists.return_value = False reqs = json.dumps({'api-version': 1, - 'ops': [{'op': 'create-pool', 'name': - 'foo', 'replicas': 3, - 'pg_num': 100}]}) + 'ops': [{ + 'op': 'create-pool', + 'name': 'foo', + 'replicas': 3, + 'pg_num': 100}]}) rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_create_pool.assert_called_with(service='admin', name='foo', - replicas=3, pg_num='100') + mock_replicated_pool.assert_called_with(service='admin', name='foo', + replicas=3, pg_num=100) self.assertEqual(json.loads(rc), {'exit-code': 0}) @mock.patch('ceph_broker.get_osds') - @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.ReplicatedPool') @mock.patch('ceph_broker.pool_exists') @mock.patch('ceph_broker.log') def test_process_requests_create_pool_w_pg_num_capped(self, mock_log, mock_pool_exists, - mock_create_pool, + mock_replicated_pool, mock_get_osds): mock_get_osds.return_value = [0, 1, 2] mock_pool_exists.return_value = False reqs = json.dumps({'api-version': 1, - 'ops': [{'op': 'create-pool', 'name': - 'foo', 'replicas': 3, - 'pg_num': 300}]}) + 'ops': [{ + 'op': 'create-pool', + 'name': 'foo', + 'replicas': 3, + 'pg_num': 300}]}) rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_create_pool.assert_called_with(service='admin', name='foo', - replicas=3, pg_num='100') + mock_pool_exists.assert_called_with(service='admin', + name='foo') + mock_replicated_pool.assert_called_with(service='admin', name='foo', + replicas=3, pg_num=100) + self.assertEqual(json.loads(rc), {'exit-code': 0}) self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.ReplicatedPool') @mock.patch('ceph_broker.pool_exists') @mock.patch('ceph_broker.log') def test_process_requests_create_pool_exists(self, mock_log, mock_pool_exists, - mock_create_pool): + mock_replicated_pool): mock_pool_exists.return_value = True reqs = json.dumps({'api-version': 1, - 'ops': [{'op': 'create-pool', 'name': 'foo', + 'ops': [{'op': 'create-pool', + 'name': 'foo', 'replicas': 3}]}) rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', name='foo') - self.assertFalse(mock_create_pool.called) + mock_pool_exists.assert_called_with(service='admin', + name='foo') + self.assertFalse(mock_replicated_pool.create.called) self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.create_pool') + @mock.patch('ceph_broker.ReplicatedPool') @mock.patch('ceph_broker.pool_exists') @mock.patch('ceph_broker.log') - def test_process_requests_create_pool_rid(self, mock_log, mock_pool_exists, - mock_create_pool): + def test_process_requests_create_pool_rid(self, mock_log, + mock_pool_exists, + mock_replicated_pool): mock_pool_exists.return_value = False reqs = json.dumps({'api-version': 1, 'request-id': '1ef5aede', - 'ops': [{'op': 'create-pool', 'name': - 'foo', 'replicas': 3}]}) + 'ops': [{ + 'op': 'create-pool', + 'name': 'foo', + 'replicas': 3}]}) rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_create_pool.assert_called_with(service='admin', name='foo', - replicas=3, pg_num=None) + mock_replicated_pool.assert_called_with(service='admin', + name='foo', + pg_num=None, + replicas=3) self.assertEqual(json.loads(rc)['exit-code'], 0) self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') diff --git a/ceph-mon/unit_tests/test_ceph_ops.py b/ceph-mon/unit_tests/test_ceph_ops.py new file mode 100644 index 00000000..88e64c7d --- /dev/null +++ b/ceph-mon/unit_tests/test_ceph_ops.py @@ -0,0 +1,217 @@ +__author__ = 'chris' + +import json +from hooks import ceph_broker + +import mock +import unittest + + +class TestCephOps(unittest.TestCase): + """ + @mock.patch('ceph_broker.log') + def test_connect(self, mock_broker): + self.fail() + """ + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.create_erasure_profile') + def test_create_erasure_profile(self, mock_create_erasure, mock_log): + req = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'create-erasure-profile', + 'name': 'foo', + 'erasure-type': 'jerasure', + 'failure-domain': 'rack', + 'k': 3, + 'm': 2, + }]}) + rc = ceph_broker.process_requests(req) + mock_create_erasure.assert_called_with(service='admin', + profile_name='foo', + coding_chunks=2, + data_chunks=3, + locality=None, + failure_domain='rack', + erasure_plugin_name='jerasure') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.pool_exists') + @mock.patch('hooks.ceph_broker.ReplicatedPool.create') + def test_process_requests_create_replicated_pool(self, + mock_replicated_pool, + mock_pool_exists, + mock_log): + mock_pool_exists.return_value = False + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'create-pool', + 'pool-type': 'replicated', + 'name': 'foo', + 'replicas': 3 + }]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_called_with(service='admin', name='foo') + mock_replicated_pool.assert_called_with() + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.delete_pool') + def test_process_requests_delete_pool(self, + mock_delete_pool, + mock_log): + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'delete-pool', + 'name': 'foo', + }]}) + rc = ceph_broker.process_requests(reqs) + mock_delete_pool.assert_called_with(service='admin', name='foo') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.pool_exists') + @mock.patch('hooks.ceph_broker.ErasurePool.create') + @mock.patch('hooks.ceph_broker.erasure_profile_exists') + def test_process_requests_create_erasure_pool(self, mock_profile_exists, + mock_erasure_pool, + mock_pool_exists, + mock_log): + mock_pool_exists.return_value = False + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'create-pool', + 'pool-type': 'erasure', + 'name': 'foo', + 'erasure-profile': 'default' + }]}) + rc = ceph_broker.process_requests(reqs) + mock_profile_exists.assert_called_with(service='admin', name='default') + mock_pool_exists.assert_called_with(service='admin', name='foo') + mock_erasure_pool.assert_called_with() + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.pool_exists') + @mock.patch('hooks.ceph_broker.Pool.add_cache_tier') + def test_process_requests_create_cache_tier(self, mock_pool, + mock_pool_exists, mock_log): + mock_pool_exists.return_value = True + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'create-cache-tier', + 'cold-pool': 'foo', + 'hot-pool': 'foo-ssd', + 'mode': 'writeback', + 'erasure-profile': 'default' + }]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_any_call(service='admin', name='foo') + mock_pool_exists.assert_any_call(service='admin', name='foo-ssd') + + mock_pool.assert_called_with(cache_pool='foo-ssd', mode='writeback') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.pool_exists') + @mock.patch('hooks.ceph_broker.Pool.remove_cache_tier') + def test_process_requests_remove_cache_tier(self, mock_pool, + mock_pool_exists, mock_log): + mock_pool_exists.return_value = True + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'remove-cache-tier', + 'hot-pool': 'foo-ssd', + }]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_any_call(service='admin', name='foo-ssd') + + mock_pool.assert_called_with(cache_pool='foo-ssd') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.snapshot_pool') + def test_snapshot_pool(self, mock_snapshot_pool, mock_log): + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'snapshot-pool', + 'name': 'foo', + 'snapshot-name': 'foo-snap1', + }]}) + rc = ceph_broker.process_requests(reqs) + mock_snapshot_pool.return_value = 1 + mock_snapshot_pool.assert_called_with(service='admin', + pool_name='foo', + snapshot_name='foo-snap1') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.rename_pool') + def test_rename_pool(self, mock_rename_pool, mock_log): + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'rename-pool', + 'name': 'foo', + 'new-name': 'foo2', + }]}) + rc = ceph_broker.process_requests(reqs) + mock_rename_pool.assert_called_with(service='admin', + old_name='foo', + new_name='foo2') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.remove_pool_snapshot') + def test_remove_pool_snapshot(self, mock_snapshot_pool, mock_broker): + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'remove-pool-snapshot', + 'name': 'foo', + 'snapshot-name': 'foo-snap1', + }]}) + rc = ceph_broker.process_requests(reqs) + mock_snapshot_pool.assert_called_with(service='admin', + pool_name='foo', + snapshot_name='foo-snap1') + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + @mock.patch('hooks.ceph_broker.pool_set') + def test_set_pool_value(self, mock_set_pool, mock_broker): + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'set-pool-value', + 'name': 'foo', + 'key': 'size', + 'value': 3, + }]}) + rc = ceph_broker.process_requests(reqs) + mock_set_pool.assert_called_with(service='admin', + pool_name='foo', + key='size', + value=3) + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + def test_set_invalid_pool_value(self, mock_broker): + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'set-pool-value', + 'name': 'foo', + 'key': 'size', + 'value': 'abc', + }]}) + rc = ceph_broker.process_requests(reqs) + # self.assertRaises(AssertionError) + self.assertEqual(json.loads(rc)['exit-code'], 1) + + ''' + @mock.patch('ceph_broker.log') + def test_set_pool_max_bytes(self, mock_broker): + self.fail() + ''' + + +if __name__ == '__main__': + unittest.main() diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index c4330185..88625908 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -31,7 +31,6 @@ class ServiceStatusTestCase(test_utils.CharmTestCase): - def setUp(self): super(ServiceStatusTestCase, self).setUp(hooks, TO_PATCH) self.config.side_effect = self.test_config.get From 9c236665c5bc2f3b2f3f5f426bca54c62a2b888d Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Fri, 25 Mar 2016 15:02:50 +0000 Subject: [PATCH 1080/2699] Revert "Rolling upgrades of ceph osd cluster" This reverts commit cde030d1f8a43f224ccf2b1b0e1d3b307e47e9e3. Change-Id: Ic6f371fcc2879886b705fdce4d59bc99e41eea89 --- ceph-osd/charm-helpers-hooks.yaml | 1 - ceph-osd/hooks/ceph.py | 133 +- ceph-osd/hooks/ceph_hooks.py | 224 +-- .../contrib/storage/linux/ceph.py | 1195 ----------------- ceph-osd/templates/ceph.conf | 2 - ceph-osd/tests/basic_deployment.py | 35 +- ceph-osd/unit_tests/test_upgrade_roll.py | 157 --- 7 files changed, 34 insertions(+), 1713 deletions(-) delete mode 100644 ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py delete mode 100644 ceph-osd/unit_tests/test_upgrade_roll.py diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index cb5cbac0..c8c54766 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -5,7 +5,6 @@ include: - cli - fetch - contrib.storage.linux: - - ceph - utils - contrib.openstack.alternatives - contrib.network.ip diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 0b23979b..51b06ac8 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -19,10 +19,11 @@ from charmhelpers.core.host import ( mkdir, chownr, + service_restart, cmp_pkgrevno, lsb_release, - service_stop, - service_restart) + service_stop +) from charmhelpers.core.hookenv import ( log, ERROR, @@ -57,112 +58,6 @@ def ceph_user(): return "root" -class CrushLocation(object): - def __init__(self, - name, - identifier, - host, - rack, - row, - datacenter, - chassis, - root): - self.name = name - self.identifier = identifier - self.host = host - self.rack = rack - self.row = row - self.datacenter = datacenter - self.chassis = chassis - self.root = root - - def __str__(self): - return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ - "chassis :{} root: {}".format(self.name, self.identifier, - self.host, self.rack, self.row, - self.datacenter, self.chassis, - self.root) - - def __eq__(self, other): - return not self.name < other.name and not other.name < self.name - - def __ne__(self, other): - return self.name < other.name or other.name < self.name - - def __gt__(self, other): - return self.name > other.name - - def __ge__(self, other): - return not self.name < other.name - - def __le__(self, other): - return self.name < other.name - - -def get_osd_tree(service): - """ - Returns the current osd map in JSON. - :return: List. :raise: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails - """ - try: - tree = subprocess.check_output( - ['ceph', '--id', service, - 'osd', 'tree', '--format=json']) - try: - json_tree = json.loads(tree) - crush_list = [] - # Make sure children are present in the json - if not json_tree['nodes']: - return None - child_ids = json_tree['nodes'][0]['children'] - for child in json_tree['nodes']: - if child['id'] in child_ids: - crush_list.append( - CrushLocation( - name=child.get('name'), - identifier=child['id'], - host=child.get('host'), - rack=child.get('rack'), - row=child.get('row'), - datacenter=child.get('datacenter'), - chassis=child.get('chassis'), - root=child.get('root') - ) - ) - return crush_list - except ValueError as v: - log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v.message)) - raise - except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( - e.message)) - raise - - -def get_local_osd_ids(): - """ - This will list the /var/lib/ceph/osd/* directories and try - to split the ID off of the directory name and return it in - a list - - :return: list. A list of osd identifiers :raise: OSError if - something goes wrong with listing the directory. - """ - osd_ids = [] - osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') - if os.path.exists(osd_path): - try: - dirs = os.listdir(osd_path) - for osd_dir in dirs: - osd_id = osd_dir.split('-')[1] - osd_ids.append(osd_id) - except OSError: - raise - return osd_ids - - def get_version(): '''Derive Ceph release from an installed package.''' import apt_pkg as apt @@ -413,7 +308,6 @@ def rescan_osd_devices(): _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" -_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" def is_bootstrapped(): @@ -439,21 +333,6 @@ def import_osd_bootstrap_key(key): ] subprocess.check_call(cmd) - -def import_osd_upgrade_key(key): - if not os.path.exists(_upgrade_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _upgrade_keyring, - '--create-keyring', - '--name=client.osd-upgrade', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - # OSD caps taken from ceph-create-keys _osd_bootstrap_caps = { 'mon': [ @@ -620,7 +499,7 @@ def update_monfs(): def maybe_zap_journal(journal_dev): - if is_osd_disk(journal_dev): + if (is_osd_disk(journal_dev)): log('Looks like {} is already an OSD data' ' or journal, skipping.'.format(journal_dev)) return @@ -664,7 +543,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, log('Path {} is not a block device - bailing'.format(dev)) return - if is_osd_disk(dev) and not reformat_osd: + if (is_osd_disk(dev) and not reformat_osd): log('Looks like {} is already an' ' OSD data or journal, skipping.'.format(dev)) return @@ -738,7 +617,7 @@ def filesystem_mounted(fs): def get_running_osds(): - """Returns a list of the pids of the current running OSD daemons""" + '''Returns a list of the pids of the current running OSD daemons''' cmd = ['pgrep', 'ceph-osd'] try: result = subprocess.check_output(cmd) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index f31bbf52..912abc11 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -9,16 +9,12 @@ import glob import os -import random import shutil -import subprocess import sys import tempfile import socket -import time import ceph -from charmhelpers.core import hookenv from charmhelpers.core.hookenv import ( log, ERROR, @@ -35,8 +31,8 @@ from charmhelpers.core.host import ( umount, mkdir, - cmp_pkgrevno, - service_stop, service_start) + cmp_pkgrevno +) from charmhelpers.fetch import ( add_source, apt_install, @@ -44,216 +40,24 @@ filter_installed_packages, ) from charmhelpers.core.sysctl import create as create_sysctl -from charmhelpers.core import host from utils import ( get_host_ip, get_networks, assert_charm_supports_ipv6, - render_template) + render_template, +) from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( get_ipv6_addr, format_ipv6_addr, ) -from charmhelpers.contrib.storage.linux.ceph import ( - monitor_key_set, - monitor_key_exists, - monitor_key_get) + from charmhelpers.contrib.charmsupport import nrpe hooks = Hooks() -# A dict of valid ceph upgrade paths. Mapping is old -> new -upgrade_paths = { - 'cloud:trusty-juno': 'cloud:trusty-kilo', - 'cloud:trusty-kilo': 'cloud:trusty-liberty', - 'cloud:trusty-liberty': 'cloud:trusty-mitaka', -} - - -def pretty_print_upgrade_paths(): - lines = [] - for key, value in upgrade_paths.iteritems(): - lines.append("{} -> {}".format(key, value)) - return lines - - -def check_for_upgrade(): - release_info = host.lsb_release() - if not release_info['DISTRIB_CODENAME'] == 'trusty': - log("Invalid upgrade path from {}. Only trusty is currently " - "supported".format(release_info['DISTRIB_CODENAME'])) - return - - c = hookenv.config() - old_version = c.previous('source') - log('old_version: {}'.format(old_version)) - # Strip all whitespace - new_version = hookenv.config('source') - if new_version: - # replace all whitespace - new_version = new_version.replace(' ', '') - log('new_version: {}'.format(new_version)) - - if old_version in upgrade_paths: - if new_version == upgrade_paths[old_version]: - log("{} to {} is a valid upgrade path. Proceeding.".format( - old_version, new_version)) - roll_osd_cluster(new_version) - else: - # Log a helpful error message - log("Invalid upgrade path from {} to {}. " - "Valid paths are: {}".format(old_version, - new_version, - pretty_print_upgrade_paths())) - - -def lock_and_roll(my_name): - start_timestamp = time.time() - - log('monitor_key_set {}_start {}'.format(my_name, start_timestamp)) - monitor_key_set('osd-upgrade', "{}_start".format(my_name), start_timestamp) - log("Rolling") - # This should be quick - upgrade_osd() - log("Done") - - stop_timestamp = time.time() - # Set a key to inform others I am finished - log('monitor_key_set {}_done {}'.format(my_name, stop_timestamp)) - monitor_key_set('osd-upgrade', "{}_done".format(my_name), stop_timestamp) - - -def wait_on_previous_node(previous_node): - log("Previous node is: {}".format(previous_node)) - - previous_node_finished = monitor_key_exists( - 'osd-upgrade', - "{}_done".format(previous_node)) - - while previous_node_finished is False: - log("{} is not finished. Waiting".format(previous_node)) - # Has this node been trying to upgrade for longer than - # 10 minutes? - # If so then move on and consider that node dead. - - # NOTE: This assumes the clusters clocks are somewhat accurate - # If the hosts clock is really far off it may cause it to skip - # the previous node even though it shouldn't. - current_timestamp = time.time() - previous_node_start_time = monitor_key_get( - 'osd-upgrade', - "{}_start".format(previous_node)) - if (current_timestamp - (10 * 60)) > previous_node_start_time: - # Previous node is probably dead. Lets move on - if previous_node_start_time is not None: - log( - "Waited 10 mins on node {}. current time: {} > " - "previous node start time: {} Moving on".format( - previous_node, - (current_timestamp - (10 * 60)), - previous_node_start_time)) - return - else: - # I have to wait. Sleep a random amount of time and then - # check if I can lock,upgrade and roll. - wait_time = random.randrange(5, 30) - log('waiting for {} seconds'.format(wait_time)) - time.sleep(wait_time) - previous_node_finished = monitor_key_exists( - 'osd-upgrade', - "{}_done".format(previous_node)) - - -def get_upgrade_position(osd_sorted_list, match_name): - for index, item in enumerate(osd_sorted_list): - if item.name == match_name: - return index - return None - - -# Edge cases: -# 1. Previous node dies on upgrade, can we retry? -# 2. This assumes that the osd failure domain is not set to osd. -# It rolls an entire server at a time. -def roll_osd_cluster(new_version): - """ - This is tricky to get right so here's what we're going to do. - There's 2 possible cases: Either I'm first in line or not. - If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous osd is upgraded yet. - - TODO: If you're not in the same failure domain it's safe to upgrade - 1. Examine all pools and adopt the most strict failure domain policy - Example: Pool 1: Failure domain = rack - Pool 2: Failure domain = host - Pool 3: Failure domain = row - - outcome: Failure domain = host - """ - log('roll_osd_cluster called with {}'.format(new_version)) - my_name = socket.gethostname() - osd_tree = ceph.get_osd_tree(service='osd-upgrade') - # A sorted list of osd unit names - osd_sorted_list = sorted(osd_tree) - log("osd_sorted_list: {}".format(osd_sorted_list)) - - try: - position = get_upgrade_position(osd_sorted_list, my_name) - log("upgrade position: {}".format(position)) - if position == 0: - # I'm first! Roll - # First set a key to inform others I'm about to roll - lock_and_roll(my_name=my_name) - else: - # Check if the previous node has finished - status_set('blocked', - 'Waiting on {} to finish upgrading'.format( - osd_sorted_list[position - 1].name)) - wait_on_previous_node( - previous_node=osd_sorted_list[position - 1].name) - lock_and_roll(my_name=my_name) - except ValueError: - log("Failed to find name {} in list {}".format( - my_name, osd_sorted_list)) - status_set('blocked', 'failed to upgrade osd') - - -def upgrade_osd(): - current_version = ceph.get_version() - status_set("maintenance", "Upgrading osd") - log("Current ceph version is {}".format(current_version)) - new_version = config('release-version') - log("Upgrading to: {}".format(new_version)) - - try: - add_source(config('source'), config('key')) - apt_update(fatal=True) - except subprocess.CalledProcessError as err: - log("Adding the ceph source failed with message: {}".format( - err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - try: - if ceph.systemd(): - for osd_id in ceph.get_local_osd_ids(): - service_stop('ceph-osd@{}'.format(osd_id)) - else: - service_stop('ceph-osd-all') - apt_install(packages=ceph.PACKAGES, fatal=True) - if ceph.systemd(): - for osd_id in ceph.get_local_osd_ids(): - service_start('ceph-osd@{}'.format(osd_id)) - else: - service_start('ceph-osd-all') - except subprocess.CalledProcessError as err: - log("Stopping ceph and upgrading packages failed " - "with message: {}".format(err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - def install_upstart_scripts(): # Only install upstart configurations for older versions @@ -320,7 +124,6 @@ def emit_cephconf(): install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 90) - JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' @@ -355,9 +158,6 @@ def check_overlap(journaldevs, datadevs): @hooks.hook('config-changed') def config_changed(): - # Check if an upgrade was requested - check_for_upgrade() - # Pre-flight checks if config('osd-format') not in ceph.DISK_FORMATS: log('Invalid OSD disk format configuration specified', level=ERROR) @@ -371,7 +171,7 @@ def config_changed(): create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-osd-charm.conf') e_mountpoint = config('ephemeral-unmount') - if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): + if (e_mountpoint and ceph.filesystem_mounted(e_mountpoint)): umount(e_mountpoint) prepare_disks_and_activate() @@ -401,14 +201,8 @@ def get_mon_hosts(): hosts = [] for relid in relation_ids('mon'): for unit in related_units(relid): - addr = \ - relation_get('ceph-public-address', - unit, - relid) or get_host_ip( - relation_get( - 'private-address', - unit, - relid)) + addr = relation_get('ceph-public-address', unit, relid) or \ + get_host_ip(relation_get('private-address', unit, relid)) if addr: hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) @@ -464,12 +258,10 @@ def get_journal_devices(): 'mon-relation-departed') def mon_relation(): bootstrap_key = relation_get('osd_bootstrap_key') - upgrade_key = relation_get('osd_upgrade_key') if get_fsid() and get_auth() and bootstrap_key: log('mon has provided conf- scanning disks') emit_cephconf() ceph.import_osd_bootstrap_key(bootstrap_key) - ceph.import_osd_upgrade_key(upgrade_key) prepare_disks_and_activate() else: log('mon cluster has not yet provided conf') diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py deleted file mode 100644 index f4582545..00000000 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ /dev/null @@ -1,1195 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -# -# Copyright 2012 Canonical Ltd. -# -# This file is sourced from lp:openstack-charm-helpers -# -# Authors: -# James Page -# Adam Gandelman -# -import bisect -import errno -import hashlib -import six - -import os -import shutil -import json -import time -import uuid - -from subprocess import ( - check_call, - check_output, - CalledProcessError, -) -from charmhelpers.core.hookenv import ( - local_unit, - relation_get, - relation_ids, - relation_set, - related_units, - log, - DEBUG, - INFO, - WARNING, - ERROR, -) -from charmhelpers.core.host import ( - mount, - mounts, - service_start, - service_stop, - service_running, - umount, -) -from charmhelpers.fetch import ( - apt_install, -) - -from charmhelpers.core.kernel import modprobe - -KEYRING = '/etc/ceph/ceph.client.{}.keyring' -KEYFILE = '/etc/ceph/ceph.client.{}.key' - -CEPH_CONF = """[global] -auth supported = {auth} -keyring = {keyring} -mon host = {mon_hosts} -log to syslog = {use_syslog} -err to syslog = {use_syslog} -clog to syslog = {use_syslog} -""" -# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs) -powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608] - - -def validator(value, valid_type, valid_range=None): - """ - Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values - Example input: - validator(value=1, - valid_type=int, - valid_range=[0, 2]) - This says I'm testing value=1. It must be an int inclusive in [0,2] - - :param value: The value to validate - :param valid_type: The type that value should be. - :param valid_range: A range of values that value can assume. - :return: - """ - assert isinstance(value, valid_type), "{} is not a {}".format( - value, - valid_type) - if valid_range is not None: - assert isinstance(valid_range, list), \ - "valid_range must be a list, was given {}".format(valid_range) - # If we're dealing with strings - if valid_type is six.string_types: - assert value in valid_range, \ - "{} is not in the list {}".format(value, valid_range) - # Integer, float should have a min and max - else: - if len(valid_range) != 2: - raise ValueError( - "Invalid valid_range list of {} for {}. " - "List must be [min,max]".format(valid_range, value)) - assert value >= valid_range[0], \ - "{} is less than minimum allowed value of {}".format( - value, valid_range[0]) - assert value <= valid_range[1], \ - "{} is greater than maximum allowed value of {}".format( - value, valid_range[1]) - - -class PoolCreationError(Exception): - """ - A custom error to inform the caller that a pool creation failed. Provides an error message - """ - - def __init__(self, message): - super(PoolCreationError, self).__init__(message) - - -class Pool(object): - """ - An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. - Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). - """ - - def __init__(self, service, name): - self.service = service - self.name = name - - # Create the pool if it doesn't exist already - # To be implemented by subclasses - def create(self): - pass - - def add_cache_tier(self, cache_pool, mode): - """ - Adds a new cache tier to an existing pool. - :param cache_pool: six.string_types. The cache tier pool name to add. - :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] - :return: None - """ - # Check the input types and values - validator(value=cache_pool, valid_type=six.string_types) - validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) - - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) - - def remove_cache_tier(self, cache_pool): - """ - Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. - :param cache_pool: six.string_types. The cache tier pool name to remove. - :return: None - """ - # read-only is easy, writeback is much harder - mode = get_cache_mode(self.service, cache_pool) - if mode == 'readonly': - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) - - elif mode == 'writeback': - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) - # Flush the cache and wait for it to return - check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) - - def get_pgs(self, pool_size): - """ - :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for - erasure coded pools - :return: int. The number of pgs to use. - """ - validator(value=pool_size, valid_type=int) - osd_list = get_osds(self.service) - if not osd_list: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - return 200 - - osd_list_length = len(osd_list) - # Calculate based on Ceph best practices - if osd_list_length < 5: - return 128 - elif 5 < osd_list_length < 10: - return 512 - elif 10 < osd_list_length < 50: - return 4096 - else: - estimate = (osd_list_length * 100) / pool_size - # Return the next nearest power of 2 - index = bisect.bisect_right(powers_of_two, estimate) - return powers_of_two[index] - - -class ReplicatedPool(Pool): - def __init__(self, service, name, pg_num=None, replicas=2): - super(ReplicatedPool, self).__init__(service=service, name=name) - self.replicas = replicas - if pg_num is None: - self.pg_num = self.get_pgs(self.replicas) - else: - self.pg_num = pg_num - - def create(self): - if not pool_exists(self.service, self.name): - # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num)] - try: - check_call(cmd) - except CalledProcessError: - raise - - -# Default jerasure erasure coded pool -class ErasurePool(Pool): - def __init__(self, service, name, erasure_code_profile="default"): - super(ErasurePool, self).__init__(service=service, name=name) - self.erasure_code_profile = erasure_code_profile - - def create(self): - if not pool_exists(self.service, self.name): - # Try to find the erasure profile information so we can properly size the pgs - erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile) - - # Check for errors - if erasure_profile is None: - log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile), - level=ERROR) - raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile)) - if 'k' not in erasure_profile or 'm' not in erasure_profile: - # Error - log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile), - level=ERROR) - raise PoolCreationError( - message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile)) - - pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) - # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile] - try: - check_call(cmd) - except CalledProcessError: - raise - - """Get an existing erasure code profile if it already exists. - Returns json formatted output""" - - -def get_mon_map(service): - """ - Returns the current monitor map. - :param service: six.string_types. The Ceph user name to run the command under - :return: json string. :raise: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails - """ - try: - mon_status = check_output( - ['ceph', '--id', service, - 'mon_status', '--format=json']) - try: - return json.loads(mon_status) - except ValueError as v: - log("Unable to parse mon_status json: {}. Error: {}".format( - mon_status, v.message)) - raise - except CalledProcessError as e: - log("mon_status command failed with message: {}".format( - e.message)) - raise - - -def hash_monitor_names(service): - """ - Uses the get_mon_map() function to get information about the monitor - cluster. - Hash the name of each monitor. Return a sorted list of monitor hashes - in an ascending order. - :param service: six.string_types. The Ceph user name to run the command under - :rtype : dict. json dict of monitor name, ip address and rank - example: { - 'name': 'ip-172-31-13-165', - 'rank': 0, - 'addr': '172.31.13.165:6789/0'} - """ - try: - hash_list = [] - monitor_list = get_mon_map(service=service) - if monitor_list['monmap']['mons']: - for mon in monitor_list['monmap']['mons']: - hash_list.append( - hashlib.sha224(mon['name'].encode('utf-8')).hexdigest()) - return sorted(hash_list) - else: - return None - except (ValueError, CalledProcessError): - raise - - -def monitor_key_delete(service, key): - """ - Delete a key and value pair from the monitor cluster - :param service: six.string_types. The Ceph user name to run the command under - Deletes a key value pair on the monitor cluster. - :param key: six.string_types. The key to delete. - """ - try: - check_output( - ['ceph', '--id', service, - 'config-key', 'del', str(key)]) - except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format( - e.output)) - raise - - -def monitor_key_set(service, key, value): - """ - Sets a key value pair on the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to set. - :param value: The value to set. This will be converted to a string - before setting - """ - try: - check_output( - ['ceph', '--id', service, - 'config-key', 'put', str(key), str(value)]) - except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format( - e.output)) - raise - - -def monitor_key_get(service, key): - """ - Gets the value of an existing key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to search for. - :return: Returns the value of that key or None if not found. - """ - try: - output = check_output( - ['ceph', '--id', service, - 'config-key', 'get', str(key)]) - return output - except CalledProcessError as e: - log("Monitor config-key get failed with message: {}".format( - e.output)) - return None - - -def monitor_key_exists(service, key): - """ - Searches for the existence of a key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to search for - :return: Returns True if the key exists, False if not and raises an - exception if an unknown error occurs. :raise: CalledProcessError if - an unknown error occurs - """ - try: - check_call( - ['ceph', '--id', service, - 'config-key', 'exists', str(key)]) - # I can return true here regardless because Ceph returns - # ENOENT if the key wasn't found - return True - except CalledProcessError as e: - if e.returncode == errno.ENOENT: - return False - else: - log("Unknown error from ceph config-get exists: {} {}".format( - e.returncode, e.output)) - raise - - -def get_erasure_profile(service, name): - """ - :param service: six.string_types. The Ceph user name to run the command under - :param name: - :return: - """ - try: - out = check_output(['ceph', '--id', service, - 'osd', 'erasure-code-profile', 'get', - name, '--format=json']) - return json.loads(out) - except (CalledProcessError, OSError, ValueError): - return None - - -def pool_set(service, pool_name, key, value): - """ - Sets a value for a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param key: six.string_types - :param value: - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def snapshot_pool(service, pool_name, snapshot_name): - """ - Snapshots a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def remove_pool_snapshot(service, pool_name, snapshot_name): - """ - Remove a snapshot from a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise - - -# max_bytes should be an int or long -def set_pool_quota(service, pool_name, max_bytes): - """ - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param max_bytes: int or long - :return: None. Can raise CalledProcessError - """ - # Set a byte quota on a RADOS pool in ceph. - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, - 'max_bytes', str(max_bytes)] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def remove_pool_quota(service, pool_name): - """ - Set a byte quota on a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def remove_erasure_profile(service, profile_name): - """ - Create a new erasure code profile if one does not already exist for it. Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command under - :param profile_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', - profile_name] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', - failure_domain='host', - data_chunks=2, coding_chunks=1, - locality=None, durability_estimator=None): - """ - Create a new erasure code profile if one does not already exist for it. Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command under - :param profile_name: six.string_types - :param erasure_plugin_name: six.string_types - :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', - 'room', 'root', 'row']) - :param data_chunks: int - :param coding_chunks: int - :param locality: int - :param durability_estimator: int - :return: None. Can raise CalledProcessError - """ - # Ensure this failure_domain is allowed by Ceph - validator(failure_domain, six.string_types, - ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) - - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, - 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks), - 'ruleset_failure_domain=' + failure_domain] - if locality is not None and durability_estimator is not None: - raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") - - # Add plugin specific information - if locality is not None: - # For local erasure codes - cmd.append('l=' + str(locality)) - if durability_estimator is not None: - # For Shec erasure codes - cmd.append('c=' + str(durability_estimator)) - - if erasure_profile_exists(service, profile_name): - cmd.append('--force') - - try: - check_call(cmd) - except CalledProcessError: - raise - - -def rename_pool(service, old_name, new_name): - """ - Rename a Ceph pool from old_name to new_name - :param service: six.string_types. The Ceph user name to run the command under - :param old_name: six.string_types - :param new_name: six.string_types - :return: None - """ - validator(value=old_name, valid_type=six.string_types) - validator(value=new_name, valid_type=six.string_types) - - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] - check_call(cmd) - - -def erasure_profile_exists(service, name): - """ - Check to see if an Erasure code profile already exists. - :param service: six.string_types. The Ceph user name to run the command under - :param name: six.string_types - :return: int or None - """ - validator(value=name, valid_type=six.string_types) - try: - check_call(['ceph', '--id', service, - 'osd', 'erasure-code-profile', 'get', - name]) - return True - except CalledProcessError: - return False - - -def get_cache_mode(service, pool_name): - """ - Find the current caching mode of the pool_name given. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :return: int or None - """ - validator(value=service, valid_type=six.string_types) - validator(value=pool_name, valid_type=six.string_types) - out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) - try: - osd_json = json.loads(out) - for pool in osd_json['pools']: - if pool['pool_name'] == pool_name: - return pool['cache_mode'] - return None - except ValueError: - raise - - -def pool_exists(service, name): - """Check to see if a RADOS pool already exists.""" - try: - out = check_output(['rados', '--id', service, - 'lspools']).decode('UTF-8') - except CalledProcessError: - return False - - return name in out - - -def get_osds(service): - """Return a list of all Ceph Object Storage Daemons currently in the - cluster. - """ - version = ceph_version() - if version and version >= '0.56': - return json.loads(check_output(['ceph', '--id', service, - 'osd', 'ls', - '--format=json']).decode('UTF-8')) - - return None - - -def install(): - """Basic Ceph client installation.""" - ceph_dir = "/etc/ceph" - if not os.path.exists(ceph_dir): - os.mkdir(ceph_dir) - - apt_install('ceph-common', fatal=True) - - -def rbd_exists(service, pool, rbd_img): - """Check to see if a RADOS block device exists.""" - try: - out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]).decode('UTF-8') - except CalledProcessError: - return False - - return rbd_img in out - - -def create_rbd_image(service, pool, image, sizemb): - """Create a new RADOS block device.""" - cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, - '--pool', pool] - check_call(cmd) - - -def update_pool(client, pool, settings): - cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] - for k, v in six.iteritems(settings): - cmd.append(k) - cmd.append(v) - - check_call(cmd) - - -def create_pool(service, name, replicas=3, pg_num=None): - """Create a new RADOS pool.""" - if pool_exists(service, name): - log("Ceph pool {} already exists, skipping creation".format(name), - level=WARNING) - return - - if not pg_num: - # Calculate the number of placement groups based - # on upstream recommended best practices. - osds = get_osds(service) - if osds: - pg_num = (len(osds) * 100 // replicas) - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - pg_num = 200 - - cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] - check_call(cmd) - - update_pool(service, name, settings={'size': str(replicas)}) - - -def delete_pool(service, name): - """Delete a RADOS pool from ceph.""" - cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, - '--yes-i-really-really-mean-it'] - check_call(cmd) - - -def _keyfile_path(service): - return KEYFILE.format(service) - - -def _keyring_path(service): - return KEYRING.format(service) - - -def create_keyring(service, key): - """Create a new Ceph keyring containing key.""" - keyring = _keyring_path(service) - if os.path.exists(keyring): - log('Ceph keyring exists at %s.' % keyring, level=WARNING) - return - - cmd = ['ceph-authtool', keyring, '--create-keyring', - '--name=client.{}'.format(service), '--add-key={}'.format(key)] - check_call(cmd) - log('Created new ceph keyring at %s.' % keyring, level=DEBUG) - - -def delete_keyring(service): - """Delete an existing Ceph keyring.""" - keyring = _keyring_path(service) - if not os.path.exists(keyring): - log('Keyring does not exist at %s' % keyring, level=WARNING) - return - - os.remove(keyring) - log('Deleted ring at %s.' % keyring, level=INFO) - - -def create_key_file(service, key): - """Create a file containing key.""" - keyfile = _keyfile_path(service) - if os.path.exists(keyfile): - log('Keyfile exists at %s.' % keyfile, level=WARNING) - return - - with open(keyfile, 'w') as fd: - fd.write(key) - - log('Created new keyfile at %s.' % keyfile, level=INFO) - - -def get_ceph_nodes(relation='ceph'): - """Query named relation to determine current nodes.""" - hosts = [] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - hosts.append(relation_get('private-address', unit=unit, rid=r_id)) - - return hosts - - -def configure(service, key, auth, use_syslog): - """Perform basic configuration of Ceph.""" - create_keyring(service, key) - create_key_file(service, key) - hosts = get_ceph_nodes() - with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: - ceph_conf.write(CEPH_CONF.format(auth=auth, - keyring=_keyring_path(service), - mon_hosts=",".join(map(str, hosts)), - use_syslog=use_syslog)) - modprobe('rbd') - - -def image_mapped(name): - """Determine whether a RADOS block device is mapped locally.""" - try: - out = check_output(['rbd', 'showmapped']).decode('UTF-8') - except CalledProcessError: - return False - - return name in out - - -def map_block_storage(service, pool, image): - """Map a RADOS block device for local use.""" - cmd = [ - 'rbd', - 'map', - '{}/{}'.format(pool, image), - '--user', - service, - '--secret', - _keyfile_path(service), - ] - check_call(cmd) - - -def filesystem_mounted(fs): - """Determine whether a filesytems is already mounted.""" - return fs in [f for f, m in mounts()] - - -def make_filesystem(blk_device, fstype='ext4', timeout=10): - """Make a new filesystem on the specified block device.""" - count = 0 - e_noent = os.errno.ENOENT - while not os.path.exists(blk_device): - if count >= timeout: - log('Gave up waiting on block device %s' % blk_device, - level=ERROR) - raise IOError(e_noent, os.strerror(e_noent), blk_device) - - log('Waiting for block device %s to appear' % blk_device, - level=DEBUG) - count += 1 - time.sleep(1) - else: - log('Formatting block device %s as filesystem %s.' % - (blk_device, fstype), level=INFO) - check_call(['mkfs', '-t', fstype, blk_device]) - - -def place_data_on_block_device(blk_device, data_src_dst): - """Migrate data in data_src_dst to blk_device and then remount.""" - # mount block device into /mnt - mount(blk_device, '/mnt') - # copy data to /mnt - copy_files(data_src_dst, '/mnt') - # umount block device - umount('/mnt') - # Grab user/group ID's from original source - _dir = os.stat(data_src_dst) - uid = _dir.st_uid - gid = _dir.st_gid - # re-mount where the data should originally be - # TODO: persist is currently a NO-OP in core.host - mount(blk_device, data_src_dst, persist=True) - # ensure original ownership of new mount. - os.chown(data_src_dst, uid, gid) - - -def copy_files(src, dst, symlinks=False, ignore=None): - """Copy files from src to dst.""" - for item in os.listdir(src): - s = os.path.join(src, item) - d = os.path.join(dst, item) - if os.path.isdir(s): - shutil.copytree(s, d, symlinks, ignore) - else: - shutil.copy2(s, d) - - -def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, - blk_device, fstype, system_services=[], - replicas=3): - """NOTE: This function must only be called from a single service unit for - the same rbd_img otherwise data loss will occur. - - Ensures given pool and RBD image exists, is mapped to a block device, - and the device is formatted and mounted at the given mount_point. - - If formatting a device for the first time, data existing at mount_point - will be migrated to the RBD device before being re-mounted. - - All services listed in system_services will be stopped prior to data - migration and restarted when complete. - """ - # Ensure pool, RBD image, RBD mappings are in place. - if not pool_exists(service, pool): - log('Creating new pool {}.'.format(pool), level=INFO) - create_pool(service, pool, replicas=replicas) - - if not rbd_exists(service, pool, rbd_img): - log('Creating RBD image ({}).'.format(rbd_img), level=INFO) - create_rbd_image(service, pool, rbd_img, sizemb) - - if not image_mapped(rbd_img): - log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), - level=INFO) - map_block_storage(service, pool, rbd_img) - - # make file system - # TODO: What happens if for whatever reason this is run again and - # the data is already in the rbd device and/or is mounted?? - # When it is mounted already, it will fail to make the fs - # XXX: This is really sketchy! Need to at least add an fstab entry - # otherwise this hook will blow away existing data if its executed - # after a reboot. - if not filesystem_mounted(mount_point): - make_filesystem(blk_device, fstype) - - for svc in system_services: - if service_running(svc): - log('Stopping services {} prior to migrating data.' - .format(svc), level=DEBUG) - service_stop(svc) - - place_data_on_block_device(blk_device, mount_point) - - for svc in system_services: - log('Starting service {} after migrating data.' - .format(svc), level=DEBUG) - service_start(svc) - - -def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): - """Ensures a ceph keyring is created for a named service and optionally - ensures user and group ownership. - - Returns False if no ceph key is available in relation state. - """ - key = None - for rid in relation_ids(relation): - for unit in related_units(rid): - key = relation_get('key', rid=rid, unit=unit) - if key: - break - - if not key: - return False - - create_keyring(service=service, key=key) - keyring = _keyring_path(service) - if user and group: - check_call(['chown', '%s.%s' % (user, group), keyring]) - - return True - - -def ceph_version(): - """Retrieve the local version of ceph.""" - if os.path.exists('/usr/bin/ceph'): - cmd = ['ceph', '-v'] - output = check_output(cmd).decode('US-ASCII') - output = output.split() - if len(output) > 3: - return output[2] - else: - return None - else: - return None - - -class CephBrokerRq(object): - """Ceph broker request. - - Multiple operations can be added to a request and sent to the Ceph broker - to be executed. - - Request is json-encoded for sending over the wire. - - The API is versioned and defaults to version 1. - """ - - def __init__(self, api_version=1, request_id=None): - self.api_version = api_version - if request_id: - self.request_id = request_id - else: - self.request_id = str(uuid.uuid1()) - self.ops = [] - - def add_op_create_pool(self, name, replica_count=3, pg_num=None): - """Adds an operation to create a pool. - - @param pg_num setting: optional setting. If not provided, this value - will be calculated by the broker based on how many OSDs are in the - cluster at the time of creation. Note that, if provided, this value - will be capped at the current available maximum. - """ - self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num}) - - def set_ops(self, ops): - """Set request ops to provided value. - - Useful for injecting ops that come from a previous request - to allow comparisons to ensure validity. - """ - self.ops = ops - - @property - def request(self): - return json.dumps({'api-version': self.api_version, 'ops': self.ops, - 'request-id': self.request_id}) - - def _ops_equal(self, other): - if len(self.ops) == len(other.ops): - for req_no in range(0, len(self.ops)): - for key in ['replicas', 'name', 'op', 'pg_num']: - if self.ops[req_no].get(key) != other.ops[req_no].get(key): - return False - else: - return False - return True - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - if self.api_version == other.api_version and \ - self._ops_equal(other): - return True - else: - return False - - def __ne__(self, other): - return not self.__eq__(other) - - -class CephBrokerRsp(object): - """Ceph broker response. - - Response is json-decoded and contents provided as methods/properties. - - The API is versioned and defaults to version 1. - """ - - def __init__(self, encoded_rsp): - self.api_version = None - self.rsp = json.loads(encoded_rsp) - - @property - def request_id(self): - return self.rsp.get('request-id') - - @property - def exit_code(self): - return self.rsp.get('exit-code') - - @property - def exit_msg(self): - return self.rsp.get('stderr') - - -# Ceph Broker Conversation: -# If a charm needs an action to be taken by ceph it can create a CephBrokerRq -# and send that request to ceph via the ceph relation. The CephBrokerRq has a -# unique id so that the client can identity which CephBrokerRsp is associated -# with the request. Ceph will also respond to each client unit individually -# creating a response key per client unit eg glance/0 will get a CephBrokerRsp -# via key broker-rsp-glance-0 -# -# To use this the charm can just do something like: -# -# from charmhelpers.contrib.storage.linux.ceph import ( -# send_request_if_needed, -# is_request_complete, -# CephBrokerRq, -# ) -# -# @hooks.hook('ceph-relation-changed') -# def ceph_changed(): -# rq = CephBrokerRq() -# rq.add_op_create_pool(name='poolname', replica_count=3) -# -# if is_request_complete(rq): -# -# else: -# send_request_if_needed(get_ceph_request()) -# -# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example -# of glance having sent a request to ceph which ceph has successfully processed -# 'ceph:8': { -# 'ceph/0': { -# 'auth': 'cephx', -# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', -# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', -# 'ceph-public-address': '10.5.44.103', -# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', -# 'private-address': '10.5.44.103', -# }, -# 'glance/0': { -# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' -# '"ops": [{"replicas": 3, "name": "glance", ' -# '"op": "create-pool"}]}'), -# 'private-address': '10.5.44.109', -# }, -# } - -def get_previous_request(rid): - """Return the last ceph broker request sent on a given relation - - @param rid: Relation id to query for request - """ - request = None - broker_req = relation_get(attribute='broker_req', rid=rid, - unit=local_unit()) - if broker_req: - request_data = json.loads(broker_req) - request = CephBrokerRq(api_version=request_data['api-version'], - request_id=request_data['request-id']) - request.set_ops(request_data['ops']) - - return request - - -def get_request_states(request, relation='ceph'): - """Return a dict of requests per relation id with their corresponding - completion state. - - This allows a charm, which has a request for ceph, to see whether there is - an equivalent request already being processed and if so what state that - request is in. - - @param request: A CephBrokerRq object - """ - complete = [] - requests = {} - for rid in relation_ids(relation): - complete = False - previous_request = get_previous_request(rid) - if request == previous_request: - sent = True - complete = is_request_complete_for_rid(previous_request, rid) - else: - sent = False - complete = False - - requests[rid] = { - 'sent': sent, - 'complete': complete, - } - - return requests - - -def is_request_sent(request, relation='ceph'): - """Check to see if a functionally equivalent request has already been sent - - Returns True if a similair request has been sent - - @param request: A CephBrokerRq object - """ - states = get_request_states(request, relation=relation) - for rid in states.keys(): - if not states[rid]['sent']: - return False - - return True - - -def is_request_complete(request, relation='ceph'): - """Check to see if a functionally equivalent request has already been - completed - - Returns True if a similair request has been completed - - @param request: A CephBrokerRq object - """ - states = get_request_states(request, relation=relation) - for rid in states.keys(): - if not states[rid]['complete']: - return False - - return True - - -def is_request_complete_for_rid(request, rid): - """Check if a given request has been completed on the given relation - - @param request: A CephBrokerRq object - @param rid: Relation ID - """ - broker_key = get_broker_rsp_key() - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - if rdata.get(broker_key): - rsp = CephBrokerRsp(rdata.get(broker_key)) - if rsp.request_id == request.request_id: - if not rsp.exit_code: - return True - else: - # The remote unit sent no reply targeted at this unit so either the - # remote ceph cluster does not support unit targeted replies or it - # has not processed our request yet. - if rdata.get('broker_rsp'): - request_data = json.loads(rdata['broker_rsp']) - if request_data.get('request-id'): - log('Ignoring legacy broker_rsp without unit key as remote ' - 'service supports unit specific replies', level=DEBUG) - else: - log('Using legacy broker_rsp as remote service does not ' - 'supports unit specific replies', level=DEBUG) - rsp = CephBrokerRsp(rdata['broker_rsp']) - if not rsp.exit_code: - return True - - return False - - -def get_broker_rsp_key(): - """Return broker response key for this unit - - This is the key that ceph is going to use to pass request status - information back to this unit - """ - return 'broker-rsp-' + local_unit().replace('/', '-') - - -def send_request_if_needed(request, relation='ceph'): - """Send broker request if an equivalent request has not already been sent - - @param request: A CephBrokerRq object - """ - if is_request_sent(request, relation=relation): - log('Request already sent but not complete, not sending new request', - level=DEBUG) - else: - for rid in relation_ids(relation): - log('Sending request {}'.format(request.request_id), level=DEBUG) - relation_set(relation_id=rid, broker_req=request.request) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 7fec00e5..66da0aca 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -33,8 +33,6 @@ cluster addr = {{ cluster_addr }} osd crush location = {{crush_location}} {% endif %} -[client.osd-upgrade] -keyring = /var/lib/ceph/osd/ceph.client.osd-upgrade.keyring [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 7800a00d..49a10b11 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -43,8 +43,8 @@ def _add_services(self): and the rest of the service are from lp branches that are compatible with the local charm (e.g. stable or next). """ - this_service = {'name': 'ceph-osd', 'units': 3} - other_services = [{'name': 'ceph-mon', 'units': 3}, + this_service = {'name': 'ceph-osd'} + other_services = [{'name': 'ceph', 'units': 3}, {'name': 'mysql'}, {'name': 'keystone'}, {'name': 'rabbitmq-server'}, @@ -60,18 +60,18 @@ def _add_relations(self): 'nova-compute:shared-db': 'mysql:shared-db', 'nova-compute:amqp': 'rabbitmq-server:amqp', 'nova-compute:image-service': 'glance:image-service', - 'nova-compute:ceph': 'ceph-mon:client', + 'nova-compute:ceph': 'ceph:client', 'keystone:shared-db': 'mysql:shared-db', 'glance:shared-db': 'mysql:shared-db', 'glance:identity-service': 'keystone:identity-service', 'glance:amqp': 'rabbitmq-server:amqp', - 'glance:ceph': 'ceph-mon:client', + 'glance:ceph': 'ceph:client', 'cinder:shared-db': 'mysql:shared-db', 'cinder:identity-service': 'keystone:identity-service', 'cinder:amqp': 'rabbitmq-server:amqp', 'cinder:image-service': 'glance:image-service', - 'cinder:ceph': 'ceph-mon:client', - 'ceph-osd:mon': 'ceph-mon:osd' + 'cinder:ceph': 'ceph:client', + 'ceph-osd:mon': 'ceph:osd' } super(CephOsdBasicDeployment, self)._add_relations(relations) @@ -86,6 +86,9 @@ def _configure_services(self): 'auth-supported': 'none', 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', + 'osd-reformat': 'yes', + 'ephemeral-unmount': '/mnt', + 'osd-devices': '/dev/vdb /srv/ceph' } # Include a non-existent device as osd-devices is a whitelist, @@ -99,7 +102,7 @@ def _configure_services(self): configs = {'keystone': keystone_config, 'mysql': mysql_config, 'cinder': cinder_config, - 'ceph-mon': ceph_config, + 'ceph': ceph_config, 'ceph-osd': ceph_osd_config} super(CephOsdBasicDeployment, self)._configure_services(configs) @@ -112,12 +115,10 @@ def _initialize_tests(self): self.nova_sentry = self.d.sentry.unit['nova-compute/0'] self.glance_sentry = self.d.sentry.unit['glance/0'] self.cinder_sentry = self.d.sentry.unit['cinder/0'] - self.ceph0_sentry = self.d.sentry.unit['ceph-mon/0'] - self.ceph1_sentry = self.d.sentry.unit['ceph-mon/1'] - self.ceph2_sentry = self.d.sentry.unit['ceph-mon/2'] + self.ceph0_sentry = self.d.sentry.unit['ceph/0'] + self.ceph1_sentry = self.d.sentry.unit['ceph/1'] + self.ceph2_sentry = self.d.sentry.unit['ceph/2'] self.ceph_osd_sentry = self.d.sentry.unit['ceph-osd/0'] - self.ceph_osd1_sentry = self.d.sentry.unit['ceph-osd/1'] - self.ceph_osd2_sentry = self.d.sentry.unit['ceph-osd/2'] u.log.debug('openstack release val: {}'.format( self._get_openstack_release())) u.log.debug('openstack release str: {}'.format( @@ -176,6 +177,7 @@ def test_100_ceph_processes(self): # Process name and quantity of processes to expect on each unit ceph_processes = { 'ceph-mon': 1, + 'ceph-osd': 2 } # Units with process names and PID quantities expected @@ -212,6 +214,9 @@ def test_102_services(self): ceph_services = [ 'ceph-mon-all', 'ceph-mon id=`hostname`', + 'ceph-osd-all', + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) ] services[self.ceph0_sentry] = ceph_services services[self.ceph1_sentry] = ceph_services @@ -228,16 +233,16 @@ def test_102_services(self): def test_200_ceph_osd_ceph_relation(self): """Verify the ceph-osd to ceph relation data.""" - u.log.debug('Checking ceph-osd:ceph-mon relation data...') + u.log.debug('Checking ceph-osd:ceph mon relation data...') unit = self.ceph_osd_sentry - relation = ['mon', 'ceph-mon:osd'] + relation = ['mon', 'ceph:osd'] expected = { 'private-address': u.valid_ip } ret = u.validate_relation_data(unit, relation, expected) if ret: - message = u.relation_error('ceph-osd to ceph-mon', ret) + message = u.relation_error('ceph-osd to ceph', ret) amulet.raise_status(amulet.FAIL, msg=message) def test_201_ceph0_to_ceph_osd_relation(self): diff --git a/ceph-osd/unit_tests/test_upgrade_roll.py b/ceph-osd/unit_tests/test_upgrade_roll.py deleted file mode 100644 index 840e247c..00000000 --- a/ceph-osd/unit_tests/test_upgrade_roll.py +++ /dev/null @@ -1,157 +0,0 @@ -import time - -__author__ = 'chris' -from mock import patch, call, MagicMock -import sys - -sys.path.append('/home/chris/repos/ceph-osd/hooks') - -from ceph import CrushLocation - -import test_utils -import ceph_hooks - -TO_PATCH = [ - 'apt_install', - 'apt_update', - 'add_source', - 'config', - 'ceph', - 'get_conf', - 'hookenv', - 'host', - 'log', - 'service_start', - 'service_stop', - 'socket', - 'status_set', -] - - -def config_side_effect(*args): - if args[0] == 'source': - return 'cloud:trusty-kilo' - elif args[0] == 'key': - return 'key' - elif args[0] == 'release-version': - return 'cloud:trusty-kilo' - - -previous_node_start_time = time.time() - (9 * 60) - - -def monitor_key_side_effect(*args): - if args[1] == \ - 'ip-192-168-1-2_done': - return False - elif args[1] == \ - 'ip-192-168-1-2_start': - # Return that the previous node started 9 minutes ago - return previous_node_start_time - - -class UpgradeRollingTestCase(test_utils.CharmTestCase): - def setUp(self): - super(UpgradeRollingTestCase, self).setUp(ceph_hooks, TO_PATCH) - - @patch('ceph_hooks.roll_osd_cluster') - def test_check_for_upgrade(self, roll_osd_cluster): - self.host.lsb_release.return_value = { - 'DISTRIB_CODENAME': 'trusty', - } - previous_mock = MagicMock().return_value - previous_mock.previous.return_value = "cloud:trusty-juno" - self.hookenv.config.side_effect = [previous_mock, - config_side_effect('source')] - ceph_hooks.check_for_upgrade() - - roll_osd_cluster.assert_called_with('cloud:trusty-kilo') - - @patch('ceph_hooks.upgrade_osd') - @patch('ceph_hooks.monitor_key_set') - def test_lock_and_roll(self, monitor_key_set, upgrade_osd): - monitor_key_set.monitor_key_set.return_value = None - ceph_hooks.lock_and_roll(my_name='ip-192-168-1-2') - upgrade_osd.assert_called_once_with() - - def test_upgrade_osd(self): - self.config.side_effect = config_side_effect - self.ceph.get_version.return_value = "0.80" - self.ceph.systemd.return_value = False - ceph_hooks.upgrade_osd() - self.service_stop.assert_called_with('ceph-osd-all') - self.service_start.assert_called_with('ceph-osd-all') - self.status_set.assert_has_calls([ - call('maintenance', 'Upgrading osd'), - ]) - - @patch('ceph_hooks.lock_and_roll') - @patch('ceph_hooks.get_upgrade_position') - def test_roll_osd_cluster_first(self, - get_upgrade_position, - lock_and_roll): - self.socket.gethostname.return_value = "ip-192-168-1-2" - self.ceph.get_osd_tree.return_value = "" - get_upgrade_position.return_value = 0 - ceph_hooks.roll_osd_cluster('0.94.1') - lock_and_roll.assert_called_with(my_name="ip-192-168-1-2") - - @patch('ceph_hooks.lock_and_roll') - @patch('ceph_hooks.get_upgrade_position') - @patch('ceph_hooks.wait_on_previous_node') - def test_roll_osd_cluster_second(self, - wait_on_previous_node, - get_upgrade_position, - lock_and_roll): - wait_on_previous_node.return_value = None - self.socket.gethostname.return_value = "ip-192-168-1-3" - self.ceph.get_osd_tree.return_value = [ - CrushLocation( - name="ip-192-168-1-2", - identifier='a', - host='host-a', - rack='rack-a', - row='row-a', - datacenter='dc-1', - chassis='chassis-a', - root='ceph'), - CrushLocation( - name="ip-192-168-1-3", - identifier='a', - host='host-b', - rack='rack-a', - row='row-a', - datacenter='dc-1', - chassis='chassis-a', - root='ceph') - ] - get_upgrade_position.return_value = 1 - ceph_hooks.roll_osd_cluster('0.94.1') - self.status_set.assert_called_with( - 'blocked', - 'Waiting on ip-192-168-1-2 to finish upgrading') - lock_and_roll.assert_called_with(my_name="ip-192-168-1-3") - - @patch('ceph_hooks.monitor_key_get') - @patch('ceph_hooks.monitor_key_exists') - def test_wait_on_previous_node(self, - monitor_key_exists, - monitor_key_get): - monitor_key_get.side_effect = monitor_key_side_effect - monitor_key_exists.return_value = False - - ceph_hooks.wait_on_previous_node("ip-192-168-1-2") - - # Make sure we checked to see if the previous node started - monitor_key_get.assert_has_calls( - [call('osd-upgrade', 'ip-192-168-1-2_start')] - ) - # Make sure we checked to see if the previous node was finished - monitor_key_exists.assert_has_calls( - [call('osd-upgrade', 'ip-192-168-1-2_done')] - ) - # Make sure we waited at last once before proceeding - self.log.assert_has_calls( - [call('Previous node is: ip-192-168-1-2')], - [call('ip-192-168-1-2 is not finished. Waiting')], - ) From d585a8f1d76bfd836ba6b3c4cf197c6548db6c4b Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 23 Mar 2016 13:43:48 -0400 Subject: [PATCH 1081/2699] Update ceph_encryption amulet test to raise on failure Currently, when this test should fail, it just returns false, when it should amulet.raise_status so that the test gets marked as failed. For Mitaka, we are currently skipping the encryption test as the Ceph charm cannot currently deploy encryption on Infernalis Change-Id: I6a15b2d2560a5dffb9a77a8e5965613a8d3f6aac --- ceph-osd/tests/basic_deployment.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 49a10b11..04d8c4d9 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -579,6 +579,10 @@ def test_499_ceph_cmds_exit_zero(self): def test_900_ceph_encryption(self): """Verify that the new disk is added with encryption by checking for Ceph's encryption keys directory""" + + if self._get_openstack_release() >= self.trusty_mitaka: + u.log.warn("Skipping encryption test for Mitaka") + return sentry = self.ceph_osd_sentry set_default = { 'osd-encrypt': 'False', @@ -621,15 +625,14 @@ def test_900_ceph_encryption(self): if not file_mtime: self.log.warn('Could not determine mtime, assuming ' 'folder does not exist') - return False + amulet.raise_status('folder does not exist') if file_mtime >= mtime: self.log.debug('Folder mtime is newer than provided mtime ' '(%s >= %s) on %s (OK)' % (file_mtime, mtime, unit_name)) - return True else: self.log.warn('Folder mtime is older than provided mtime' '(%s < on %s) on %s' % (file_mtime, mtime, unit_name)) - return False + amulet.raise_status('Folder mtime is older than provided mtime') From 7c11669209fd3241fee811be46dcdea802ee06e5 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 29 Mar 2016 10:30:56 +0000 Subject: [PATCH 1082/2699] Add pause/resume actions and sync charm-helpers Adds pause and resume unit to the charm such that the charm stays paused during maintenance operations. Change-Id: Iba0d39f9c5ca482a12fdb9f91d2890a715d3f8b7 --- ceph-radosgw/actions.yaml | 4 ++ ceph-radosgw/actions/actions.py | 47 +++++++++++++ ceph-radosgw/actions/pause | 1 + ceph-radosgw/actions/resume | 1 + .../templates/section-keystone-authtoken | 18 ++--- .../section-keystone-authtoken-legacy | 10 +++ ceph-radosgw/hooks/hooks.py | 20 +++--- ceph-radosgw/hooks/utils.py | 70 +++++++++++++++++++ ceph-radosgw/tests/basic_deployment.py | 45 ++++++++++++ .../charmhelpers/contrib/amulet/utils.py | 7 +- ceph-radosgw/unit_tests/__init__.py | 1 + ceph-radosgw/unit_tests/test_actions.py | 64 +++++++++++++++++ .../unit_tests/test_ceph_radosgw_utils.py | 56 +++++++++++++++ 13 files changed, 320 insertions(+), 24 deletions(-) create mode 100644 ceph-radosgw/actions.yaml create mode 100755 ceph-radosgw/actions/actions.py create mode 120000 ceph-radosgw/actions/pause create mode 120000 ceph-radosgw/actions/resume create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy create mode 100644 ceph-radosgw/unit_tests/test_actions.py create mode 100644 ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py diff --git a/ceph-radosgw/actions.yaml b/ceph-radosgw/actions.yaml new file mode 100644 index 00000000..4aa9d8f5 --- /dev/null +++ b/ceph-radosgw/actions.yaml @@ -0,0 +1,4 @@ +pause: + description: Pause the ceph-radosgw unit. +resume: + descrpition: Resume the ceph-radosgw unit. diff --git a/ceph-radosgw/actions/actions.py b/ceph-radosgw/actions/actions.py new file mode 100755 index 00000000..e2e67471 --- /dev/null +++ b/ceph-radosgw/actions/actions.py @@ -0,0 +1,47 @@ +#!/usr/bin/python + +import os +import sys + +sys.path.append('hooks/') +from charmhelpers.core.hookenv import action_fail +from utils import ( + pause_unit_helper, + resume_unit_helper, + register_configs, +) + + +def pause(args): + """Pause the Ceilometer services. + @raises Exception should the service fail to stop. + """ + pause_unit_helper(register_configs()) + + +def resume(args): + """Resume the Ceilometer services. + @raises Exception should the service fail to start.""" + resume_unit_helper(register_configs()) + + +# A dictionary of all the defined actions to callables (which take +# parsed arguments). +ACTIONS = {"pause": pause, "resume": resume} + + +def main(args): + action_name = os.path.basename(args[0]) + try: + action = ACTIONS[action_name] + except KeyError: + return "Action %s undefined" % action_name + else: + try: + action(args) + except Exception as e: + action_fail(str(e)) + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/ceph-radosgw/actions/pause b/ceph-radosgw/actions/pause new file mode 120000 index 00000000..405a394e --- /dev/null +++ b/ceph-radosgw/actions/pause @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/ceph-radosgw/actions/resume b/ceph-radosgw/actions/resume new file mode 120000 index 00000000..405a394e --- /dev/null +++ b/ceph-radosgw/actions/resume @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken index 0b6da25c..5dcebe7c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken @@ -1,20 +1,12 @@ {% if auth_host -%} -{% if api_version == '3' -%} [keystone_authtoken] -auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }} +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +auth_plugin = password +project_domain_id = default +user_domain_id = default project_name = {{ admin_tenant_name }} username = {{ admin_user }} password = {{ admin_password }} -project_domain_name = default -user_domain_name = default -auth_plugin = password -{% else -%} -[keystone_authtoken] -identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }} -auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} -admin_tenant_name = {{ admin_tenant_name }} -admin_user = {{ admin_user }} -admin_password = {{ admin_password }} signing_dir = {{ signing_dir }} {% endif -%} -{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy new file mode 100644 index 00000000..9356b2be --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy @@ -0,0 +1,10 @@ +{% if auth_host -%} +[keystone_authtoken] +# Juno specific config (Bug #1557223) +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} +identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +admin_tenant_name = {{ admin_tenant_name }} +admin_user = {{ admin_user }} +admin_password = {{ admin_password }} +signing_dir = {{ signing_dir }} +{% endif -%} diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 03aecfcb..a3be6f2a 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -36,7 +36,6 @@ ) from charmhelpers.core.host import ( lsb_release, - restart_on_change, ) from charmhelpers.payload.execd import execd_preinstall from charmhelpers.core.host import ( @@ -54,21 +53,21 @@ canonical_url, PUBLIC, INTERNAL, ADMIN, ) -from charmhelpers.contrib.openstack.utils import ( - set_os_workload_status, -) from charmhelpers.contrib.storage.linux.ceph import ( send_request_if_needed, is_request_complete, ) +from charmhelpers.contrib.openstack.utils import ( + is_unit_paused_set, + pausable_restart_on_change as restart_on_change, +) from utils import ( enable_pocket, CEPHRG_HA_RES, register_configs, - REQUIRED_INTERFACES, - check_optional_relations, setup_ipv6, services, + assess_status, ) from charmhelpers.contrib.charmsupport import nrpe @@ -282,7 +281,8 @@ def mon_relation(): key = relation_get('radosgw_key') if key: ceph.import_radosgw_key(key) - restart() # TODO figure out a better way todo this + if not is_unit_paused_set(): + restart() # TODO figure out a better way todo this else: send_request_if_needed(rq, relation='mon') @@ -339,7 +339,8 @@ def identity_joined(relid=None): def identity_changed(relid=None): identity_joined(relid) CONFIGS.write_all() - restart() + if not is_unit_paused_set(): + restart() @hooks.hook('cluster-relation-joined') @@ -454,5 +455,4 @@ def update_nrpe_config(): hooks.execute(sys.argv) except UnregisteredHookError as e: log('Unknown hook {} - skipping.'.format(e)) - set_os_workload_status(CONFIGS, REQUIRED_INTERFACES, - charm_func=check_optional_relations) + assess_status(CONFIGS) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 0e7f4c71..81d7b7c5 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -28,6 +28,9 @@ from charmhelpers.contrib.openstack.utils import ( os_release, set_os_workload_status, + make_assess_status_func, + pause_unit, + resume_unit, ) from charmhelpers.contrib.hahelpers.cluster import get_hacluster_config from charmhelpers.core.host import ( @@ -177,3 +180,70 @@ def setup_ipv6(): 'main') apt_update(fatal=True) apt_install('haproxy/trusty-backports', fatal=True) + + +def assess_status(configs): + """Assess status of current unit + Decides what the state of the unit should be based on the current + configuration. + SIDE EFFECT: calls set_os_workload_status(...) which sets the workload + status of the unit. + Also calls status_set(...) directly if paused state isn't complete. + @param configs: a templating.OSConfigRenderer() object + @returns None - this function is executed for its side-effect + """ + assess_status_func(configs)() + + +def assess_status_func(configs): + """Helper function to create the function that will assess_status() for + the unit. + Uses charmhelpers.contrib.openstack.utils.make_assess_status_func() to + create the appropriate status function and then returns it. + Used directly by assess_status() and also for pausing and resuming + the unit. + + NOTE(ajkavanagh) ports are not checked due to race hazards with services + that don't behave sychronously w.r.t their service scripts. e.g. + apache2. + @param configs: a templating.OSConfigRenderer() object + @return f() -> None : a function that assesses the unit's workload status + """ + return make_assess_status_func( + configs, REQUIRED_INTERFACES, + charm_func=check_optional_relations, + services=services(), ports=None) + + +def pause_unit_helper(configs): + """Helper function to pause a unit, and then call assess_status(...) in + effect, so that the status is correctly updated. + Uses charmhelpers.contrib.openstack.utils.pause_unit() to do the work. + @param configs: a templating.OSConfigRenderer() object + @returns None - this function is executed for its side-effect + """ + _pause_resume_helper(pause_unit, configs) + + +def resume_unit_helper(configs): + """Helper function to resume a unit, and then call assess_status(...) in + effect, so that the status is correctly updated. + Uses charmhelpers.contrib.openstack.utils.resume_unit() to do the work. + @param configs: a templating.OSConfigRenderer() object + @returns None - this function is executed for its side-effect + """ + _pause_resume_helper(resume_unit, configs) + + +def _pause_resume_helper(f, configs): + """Helper function that uses the make_assess_status_func(...) from + charmhelpers.contrib.openstack.utils to create an assess_status(...) + function that can be used with the pause/resume of the unit + @param f: the function to be used with the assess_status(...) function + @returns None - this function is executed for its side-effect + """ + # TODO(ajkavanagh) - ports= has been left off because of the race hazard + # that exists due to service_start() + f(assess_status_func(configs), + services=services(), + ports=None) diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 79ebb3eb..e45c1933 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -1,6 +1,9 @@ #!/usr/bin/python import amulet +import subprocess +import json +import time from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment ) @@ -98,6 +101,32 @@ def _configure_services(self): 'ceph-radosgw': radosgw_config} super(CephRadosGwBasicDeployment, self)._configure_services(configs) + def _run_action(self, unit_id, action, *args): + command = ["juju", "action", "do", "--format=json", unit_id, action] + command.extend(args) + print("Running command: %s\n" % " ".join(command)) + output = subprocess.check_output(command) + output_json = output.decode(encoding="UTF-8") + data = json.loads(output_json) + action_id = data[u'Action queued with id'] + return action_id + + def _wait_on_action(self, action_id): + command = ["juju", "action", "fetch", "--format=json", action_id] + while True: + try: + output = subprocess.check_output(command) + except Exception as e: + print(e) + return False + output_json = output.decode(encoding="UTF-8") + data = json.loads(output_json) + if data[u"status"] == "completed": + return True + elif data[u"status"] == "failed": + return False + time.sleep(2) + def _initialize_tests(self): """Perform final initialization before tests get run.""" # Access the sentries for inspecting service units @@ -491,6 +520,22 @@ def test_499_ceph_cmds_exit_zero(self): if ret: amulet.raise_status(amulet.FAIL, msg=ret) + def test_910_pause_and_resume(self): + """The services can be paused and resumed. """ + u.log.debug('Checking pause and resume actions...') + unit_name = "ceph-radosgw/0" + unit = self.d.sentry.unit[unit_name] + + assert u.status_get(unit)[0] == "active" + + action_id = self._run_action(unit_name, "pause") + assert self._wait_on_action(action_id), "Pause action failed." + assert u.status_get(unit)[0] == "maintenance" + + action_id = self._run_action(unit_name, "resume") + assert self._wait_on_action(action_id), "Resume action failed." + assert u.status_get(unit)[0] == "active" + u.log.debug('OK') # Note(beisner): need to add basic object store functional checks. # FYI: No restart check as ceph services do not restart diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index 2591a9b1..3e159039 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -782,15 +782,20 @@ def get_uuid_epoch_stamp(self): # amulet juju action helpers: def run_action(self, unit_sentry, action, - _check_output=subprocess.check_output): + _check_output=subprocess.check_output, + params=None): """Run the named action on a given unit sentry. + params a dict of parameters to use _check_output parameter is used for dependency injection. @return action_id. """ unit_id = unit_sentry.info["unit_name"] command = ["juju", "action", "do", "--format=json", unit_id, action] + if params is not None: + for key, value in params.iteritems(): + command.append("{}={}".format(key, value)) self.log.info("Running command: %s\n" % " ".join(command)) output = _check_output(command, universal_newlines=True) data = json.loads(output) diff --git a/ceph-radosgw/unit_tests/__init__.py b/ceph-radosgw/unit_tests/__init__.py index afaed60c..43aa3614 100644 --- a/ceph-radosgw/unit_tests/__init__.py +++ b/ceph-radosgw/unit_tests/__init__.py @@ -1,3 +1,4 @@ import sys +sys.path.append('actions/') sys.path.append('hooks/') diff --git a/ceph-radosgw/unit_tests/test_actions.py b/ceph-radosgw/unit_tests/test_actions.py new file mode 100644 index 00000000..0597b9b6 --- /dev/null +++ b/ceph-radosgw/unit_tests/test_actions.py @@ -0,0 +1,64 @@ +import mock +from mock import patch + +from test_utils import CharmTestCase + +with patch('utils.register_configs') as configs: + configs.return_value = 'test-config' + import actions + + +class PauseTestCase(CharmTestCase): + + def setUp(self): + super(PauseTestCase, self).setUp( + actions, ["pause_unit_helper"]) + + def test_pauses_services(self): + actions.pause([]) + self.pause_unit_helper.assert_called_once_with('test-config') + + +class ResumeTestCase(CharmTestCase): + + def setUp(self): + super(ResumeTestCase, self).setUp( + actions, ["resume_unit_helper"]) + + def test_pauses_services(self): + actions.resume([]) + self.resume_unit_helper.assert_called_once_with('test-config') + + +class MainTestCase(CharmTestCase): + + def setUp(self): + super(MainTestCase, self).setUp(actions, ["action_fail"]) + + def test_invokes_action(self): + dummy_calls = [] + + def dummy_action(args): + dummy_calls.append(True) + + with mock.patch.dict(actions.ACTIONS, {"foo": dummy_action}): + actions.main(["foo"]) + self.assertEqual(dummy_calls, [True]) + + def test_unknown_action(self): + """Unknown actions aren't a traceback.""" + exit_string = actions.main(["foo"]) + self.assertEqual("Action foo undefined", exit_string) + + def test_failing_action(self): + """Actions which traceback trigger action_fail() calls.""" + dummy_calls = [] + + self.action_fail.side_effect = dummy_calls.append + + def dummy_action(args): + raise ValueError("uh oh") + + with mock.patch.dict(actions.ACTIONS, {"foo": dummy_action}): + actions.main(["foo"]) + self.assertEqual(dummy_calls, ["uh oh"]) diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py new file mode 100644 index 00000000..a9fae89d --- /dev/null +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -0,0 +1,56 @@ +import utils +from mock import patch, MagicMock + +from test_utils import CharmTestCase + +TO_PATCH = [ +] + + +class CephRadosGWUtilTests(CharmTestCase): + def setUp(self): + super(CephRadosGWUtilTests, self).setUp(utils, TO_PATCH) + + def test_assess_status(self): + with patch.object(utils, 'assess_status_func') as asf: + callee = MagicMock() + asf.return_value = callee + utils.assess_status('test-config') + asf.assert_called_once_with('test-config') + callee.assert_called_once_with() + + @patch.object(utils, 'check_optional_relations') + @patch.object(utils, 'REQUIRED_INTERFACES') + @patch.object(utils, 'services') + @patch.object(utils, 'make_assess_status_func') + def test_assess_status_func(self, + make_assess_status_func, + services, + REQUIRED_INTERFACES, + check_optional_relations): + services.return_value = 's1' + utils.assess_status_func('test-config') + # ports=None whilst port checks are disabled. + make_assess_status_func.assert_called_once_with( + 'test-config', REQUIRED_INTERFACES, + charm_func=check_optional_relations, + services='s1', ports=None) + + def test_pause_unit_helper(self): + with patch.object(utils, '_pause_resume_helper') as prh: + utils.pause_unit_helper('random-config') + prh.assert_called_once_with(utils.pause_unit, 'random-config') + with patch.object(utils, '_pause_resume_helper') as prh: + utils.resume_unit_helper('random-config') + prh.assert_called_once_with(utils.resume_unit, 'random-config') + + @patch.object(utils, 'services') + def test_pause_resume_helper(self, services): + f = MagicMock() + services.return_value = 's1' + with patch.object(utils, 'assess_status_func') as asf: + asf.return_value = 'assessor' + utils._pause_resume_helper(f, 'some-config') + asf.assert_called_once_with('some-config') + # ports=None whilst port checks are disabled. + f.assert_called_once_with('assessor', services='s1', ports=None) From 2b2d6d39cb76ef9022a88283f3e2daae18b874cc Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 22 Mar 2016 19:29:44 +0000 Subject: [PATCH 1083/2699] Add hardening support Add charmhelpers.contrib.hardening and calls to install, config-changed, upgrade-charm and update-status hooks. Also add new config option to allow one or more hardening modules to be applied at runtime. Change-Id: If3e20565b1917828cb9fa2cf00b93bd13c1db00f --- ceph-proxy/charm-helpers-hooks.yaml | 1 + ceph-proxy/config.yaml | 6 + ceph-proxy/hardening.yaml | 5 + ceph-proxy/hooks/ceph_hooks.py | 10 + .../contrib/hardening/README.hardening.md | 38 ++ .../contrib/hardening/__init__.py | 15 + .../contrib/hardening/apache/__init__.py | 19 + .../hardening/apache/checks/__init__.py | 31 + .../contrib/hardening/apache/checks/config.py | 100 ++++ .../hardening/apache/templates/__init__.py | 0 .../hardening/apache/templates/alias.conf | 31 + .../hardening/apache/templates/hardening.conf | 18 + .../contrib/hardening/audits/__init__.py | 63 ++ .../contrib/hardening/audits/apache.py | 100 ++++ .../contrib/hardening/audits/apt.py | 105 ++++ .../contrib/hardening/audits/file.py | 552 ++++++++++++++++++ .../contrib/hardening/defaults/__init__.py | 0 .../contrib/hardening/defaults/apache.yaml | 13 + .../hardening/defaults/apache.yaml.schema | 9 + .../contrib/hardening/defaults/mysql.yaml | 38 ++ .../hardening/defaults/mysql.yaml.schema | 15 + .../contrib/hardening/defaults/os.yaml | 67 +++ .../contrib/hardening/defaults/os.yaml.schema | 42 ++ .../contrib/hardening/defaults/ssh.yaml | 49 ++ .../hardening/defaults/ssh.yaml.schema | 42 ++ .../charmhelpers/contrib/hardening/harden.py | 84 +++ .../contrib/hardening/host/__init__.py | 19 + .../contrib/hardening/host/checks/__init__.py | 50 ++ .../contrib/hardening/host/checks/apt.py | 39 ++ .../contrib/hardening/host/checks/limits.py | 55 ++ .../contrib/hardening/host/checks/login.py | 67 +++ .../hardening/host/checks/minimize_access.py | 52 ++ .../contrib/hardening/host/checks/pam.py | 134 +++++ .../contrib/hardening/host/checks/profile.py | 45 ++ .../hardening/host/checks/securetty.py | 39 ++ .../hardening/host/checks/suid_sgid.py | 131 +++++ .../contrib/hardening/host/checks/sysctl.py | 211 +++++++ .../hardening/host/templates/10.hardcore.conf | 8 + .../host/templates/99-juju-hardening.conf | 7 + .../hardening/host/templates/__init__.py | 0 .../hardening/host/templates/login.defs | 349 +++++++++++ .../contrib/hardening/host/templates/modules | 117 ++++ .../hardening/host/templates/passwdqc.conf | 11 + .../host/templates/pinerolo_profile.sh | 8 + .../hardening/host/templates/securetty | 11 + .../contrib/hardening/host/templates/tally2 | 14 + .../contrib/hardening/mysql/__init__.py | 19 + .../hardening/mysql/checks/__init__.py | 31 + .../contrib/hardening/mysql/checks/config.py | 89 +++ .../hardening/mysql/templates/__init__.py | 0 .../hardening/mysql/templates/hardening.cnf | 12 + .../contrib/hardening/ssh/__init__.py | 19 + .../contrib/hardening/ssh/checks/__init__.py | 31 + .../contrib/hardening/ssh/checks/config.py | 394 +++++++++++++ .../hardening/ssh/templates/__init__.py | 0 .../hardening/ssh/templates/ssh_config | 70 +++ .../hardening/ssh/templates/sshd_config | 159 +++++ .../contrib/hardening/templating.py | 71 +++ .../charmhelpers/contrib/hardening/utils.py | 157 +++++ .../contrib/storage/linux/ceph.py | 1 + ceph-proxy/hooks/charmhelpers/core/hookenv.py | 31 + ceph-proxy/hooks/charmhelpers/core/host.py | 50 +- .../charmhelpers/contrib/amulet/utils.py | 5 +- .../contrib/openstack/amulet/utils.py | 53 +- ceph-proxy/unit_tests/test_ceph_ops.py | 105 ++-- ceph-proxy/unit_tests/test_status.py | 5 +- ceph-proxy/unit_tests/test_upgrade_roll.py | 20 +- 67 files changed, 4052 insertions(+), 90 deletions(-) create mode 100644 ceph-proxy/hardening.yaml create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/README.hardening.md create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apt.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/file.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/harden.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/apt.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/limits.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/login.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/pam.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/profile.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/login.defs create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/modules create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/securetty create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/tally2 create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/templating.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/utils.py diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index eeee6f8c..f4b2a26a 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -11,3 +11,4 @@ include: - contrib.openstack.alternatives - contrib.network.ip - contrib.charmsupport + - contrib.hardening|inc=* \ No newline at end of file diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index c486a851..5e340a3a 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -125,3 +125,9 @@ options: default: True type: boolean description: Configure use of direct IO for OSD journals. + harden: + default: + type: string + description: | + Apply system hardening. Supports a space-delimited list of modules + to run. Supported modules currently include os, ssh, apache and mysql. diff --git a/ceph-proxy/hardening.yaml b/ceph-proxy/hardening.yaml new file mode 100644 index 00000000..314bb385 --- /dev/null +++ b/ceph-proxy/hardening.yaml @@ -0,0 +1,5 @@ +# Overrides file for contrib.hardening. See README.hardening in +# contrib.hardening for info on how to use this file. +ssh: + server: + use_pam: 'yes' # juju requires this diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 385afdd7..f6ab44a9 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -72,6 +72,7 @@ process_requests ) from charmhelpers.contrib.charmsupport import nrpe +from charmhelpers.contrib.hardening.harden import harden hooks = Hooks() @@ -269,6 +270,7 @@ def install_upstart_scripts(): @hooks.hook('install.real') +@harden() def install(): execd_preinstall() add_source(config('source'), config('key')) @@ -318,6 +320,7 @@ def emit_cephconf(): @hooks.hook('config-changed') +@harden() def config_changed(): if config('prefer-ipv6'): assert_charm_supports_ipv6() @@ -553,6 +556,7 @@ def client_relation_changed(): @hooks.hook('upgrade-charm') +@harden() def upgrade_charm(): emit_cephconf() apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) @@ -630,6 +634,12 @@ def assess_status(): # reboot the ceph-mon process +@hooks.hook('update-status') +@harden() +def update_status(): + log('Updating status.') + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/README.hardening.md b/ceph-proxy/hooks/charmhelpers/contrib/hardening/README.hardening.md new file mode 100644 index 00000000..91280c03 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/README.hardening.md @@ -0,0 +1,38 @@ +# Juju charm-helpers hardening library + +## Description + +This library provides multiple implementations of system and application +hardening that conform to the standards of http://hardening.io/. + +Current implementations include: + + * OS + * SSH + * MySQL + * Apache + +## Requirements + +* Juju Charms + +## Usage + +1. Synchronise this library into your charm and add the harden() decorator + (from contrib.hardening.harden) to any functions or methods you want to use + to trigger hardening of your application/system. + +2. Add a config option called 'harden' to your charm config.yaml and set it to + a space-delimited list of hardening modules you want to run e.g. "os ssh" + +3. Override any config defaults (contrib.hardening.defaults) by adding a file + called hardening.yaml to your charm root containing the name(s) of the + modules whose settings you want override at root level and then any settings + with overrides e.g. + + os: + general: + desktop_enable: True + +4. Now just run your charm as usual and hardening will be applied each time the + hook runs. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/__init__.py new file mode 100644 index 00000000..a1335320 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py new file mode 100644 index 00000000..d1304792 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.apache.checks import config + + +def run_apache_checks(): + log("Starting Apache hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("Apache hardening checks complete.", level=DEBUG) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py new file mode 100644 index 00000000..8249ca01 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -0,0 +1,100 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import re +import subprocess + + +from charmhelpers.core.hookenv import ( + log, + INFO, +) +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + DirectoryPermissionAudit, + NoReadWriteForOther, + TemplatedFile, +) +from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit +from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get Apache hardening config audits. + + :returns: dictionary of audits + """ + if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0: + log("Apache server does not appear to be installed on this node - " + "skipping apache hardening", level=INFO) + return [] + + context = ApacheConfContext() + settings = utils.get_settings('apache') + audits = [ + FilePermissionAudit(paths='/etc/apache2/apache2.conf', user='root', + group='root', mode=0o0640), + + TemplatedFile(os.path.join(settings['common']['apache_dir'], + 'mods-available/alias.conf'), + context, + TEMPLATES_DIR, + mode=0o0755, + user='root', + service_actions=[{'service': 'apache2', + 'actions': ['restart']}]), + + TemplatedFile(os.path.join(settings['common']['apache_dir'], + 'conf-enabled/hardening.conf'), + context, + TEMPLATES_DIR, + mode=0o0640, + user='root', + service_actions=[{'service': 'apache2', + 'actions': ['restart']}]), + + DirectoryPermissionAudit(settings['common']['apache_dir'], + user='root', + group='root', + mode=0o640), + + DisabledModuleAudit(settings['hardening']['modules_to_disable']), + + NoReadWriteForOther(settings['common']['apache_dir']), + ] + + return audits + + +class ApacheConfContext(object): + """Defines the set of key/value pairs to set in a apache config file. + + This context, when called, will return a dictionary containing the + key/value pairs of setting to specify in the + /etc/apache/conf-enabled/hardening.conf file. + """ + def __call__(self): + settings = utils.get_settings('apache') + ctxt = settings['hardening'] + + out = subprocess.check_output(['apache2', '-v']) + ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', + out).group(1) + ctxt['apache_icondir'] = '/usr/share/apache2/icons/' + ctxt['traceenable'] = settings['hardening']['traceenable'] + return ctxt diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf new file mode 100644 index 00000000..e46a58a3 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf @@ -0,0 +1,31 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### + + # + # Aliases: Add here as many aliases as you need (with no limit). The format is + # Alias fakename realname + # + # Note that if you include a trailing / on fakename then the server will + # require it to be present in the URL. So "/icons" isn't aliased in this + # example, only "/icons/". If the fakename is slash-terminated, then the + # realname must also be slash terminated, and if the fakename omits the + # trailing slash, the realname must also omit it. + # + # We include the /icons/ alias for FancyIndexed directory listings. If + # you do not use FancyIndexing, you may comment this out. + # + Alias /icons/ "{{ apache_icondir }}/" + + + Options -Indexes -MultiViews -FollowSymLinks + AllowOverride None +{% if apache_version == '2.4' -%} + Require all granted +{% else -%} + Order allow,deny + Allow from all +{% endif %} + + diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf new file mode 100644 index 00000000..07945418 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf @@ -0,0 +1,18 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### + + + + # http://httpd.apache.org/docs/2.4/upgrading.html + {% if apache_version > '2.2' -%} + Require all granted + {% else -%} + Order Allow,Deny + Deny from all + {% endif %} + + + +TraceEnable {{ traceenable }} diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/__init__.py new file mode 100644 index 00000000..6a7057b3 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/__init__.py @@ -0,0 +1,63 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + + +class BaseAudit(object): # NO-QA + """Base class for hardening checks. + + The lifecycle of a hardening check is to first check to see if the system + is in compliance for the specified check. If it is not in compliance, the + check method will return a value which will be supplied to the. + """ + def __init__(self, *args, **kwargs): + self.unless = kwargs.get('unless', None) + super(BaseAudit, self).__init__() + + def ensure_compliance(self): + """Checks to see if the current hardening check is in compliance or + not. + + If the check that is performed is not in compliance, then an exception + should be raised. + """ + pass + + def _take_action(self): + """Determines whether to perform the action or not. + + Checks whether or not an action should be taken. This is determined by + the truthy value for the unless parameter. If unless is a callback + method, it will be invoked with no parameters in order to determine + whether or not the action should be taken. Otherwise, the truthy value + of the unless attribute will determine if the action should be + performed. + """ + # Do the action if there isn't an unless override. + if self.unless is None: + return True + + # Invoke the callback if there is one. + if hasattr(self.unless, '__call__'): + results = self.unless() + if results: + return False + else: + return True + + if self.unless: + return False + else: + return True diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py new file mode 100644 index 00000000..cf3c987d --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -0,0 +1,100 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import re +import subprocess + +from six import string_types + +from charmhelpers.core.hookenv import ( + log, + INFO, + ERROR, +) + +from charmhelpers.contrib.hardening.audits import BaseAudit + + +class DisabledModuleAudit(BaseAudit): + """Audits Apache2 modules. + + Determines if the apache2 modules are enabled. If the modules are enabled + then they are removed in the ensure_compliance. + """ + def __init__(self, modules): + if modules is None: + self.modules = [] + elif isinstance(modules, string_types): + self.modules = [modules] + else: + self.modules = modules + + def ensure_compliance(self): + """Ensures that the modules are not loaded.""" + if not self.modules: + return + + try: + loaded_modules = self._get_loaded_modules() + non_compliant_modules = [] + for module in self.modules: + if module in loaded_modules: + log("Module '%s' is enabled but should not be." % + (module), level=INFO) + non_compliant_modules.append(module) + + if len(non_compliant_modules) == 0: + return + + for module in non_compliant_modules: + self._disable_module(module) + self._restart_apache() + except subprocess.CalledProcessError as e: + log('Error occurred auditing apache module compliance. ' + 'This may have been already reported. ' + 'Output is: %s' % e.output, level=ERROR) + + @staticmethod + def _get_loaded_modules(): + """Returns the modules which are enabled in Apache.""" + output = subprocess.check_output(['apache2ctl', '-M']) + modules = [] + for line in output.strip().split(): + # Each line of the enabled module output looks like: + # module_name (static|shared) + # Plus a header line at the top of the output which is stripped + # out by the regex. + matcher = re.search(r'^ (\S*)', line) + if matcher: + modules.append(matcher.group(1)) + return modules + + @staticmethod + def _disable_module(module): + """Disables the specified module in Apache.""" + try: + subprocess.check_call(['a2dismod', module]) + except subprocess.CalledProcessError as e: + # Note: catch error here to allow the attempt of disabling + # multiple modules in one go rather than failing after the + # first module fails. + log('Error occurred disabling module %s. ' + 'Output is: %s' % (module, e.output), level=ERROR) + + @staticmethod + def _restart_apache(): + """Restarts the apache process""" + subprocess.check_output(['service', 'apache2', 'restart']) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apt.py new file mode 100644 index 00000000..e94af031 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -0,0 +1,105 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from __future__ import absolute_import # required for external apt import +from apt import apt_pkg +from six import string_types + +from charmhelpers.fetch import ( + apt_cache, + apt_purge +) +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) +from charmhelpers.contrib.hardening.audits import BaseAudit + + +class AptConfig(BaseAudit): + + def __init__(self, config, **kwargs): + self.config = config + + def verify_config(self): + apt_pkg.init() + for cfg in self.config: + value = apt_pkg.config.get(cfg['key'], cfg.get('default', '')) + if value and value != cfg['expected']: + log("APT config '%s' has unexpected value '%s' " + "(expected='%s')" % + (cfg['key'], value, cfg['expected']), level=WARNING) + + def ensure_compliance(self): + self.verify_config() + + +class RestrictedPackages(BaseAudit): + """Class used to audit restricted packages on the system.""" + + def __init__(self, pkgs, **kwargs): + super(RestrictedPackages, self).__init__(**kwargs) + if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): + self.pkgs = [pkgs] + else: + self.pkgs = pkgs + + def ensure_compliance(self): + cache = apt_cache() + + for p in self.pkgs: + if p not in cache: + continue + + pkg = cache[p] + if not self.is_virtual_package(pkg): + if not pkg.current_ver: + log("Package '%s' is not installed." % pkg.name, + level=DEBUG) + continue + else: + log("Restricted package '%s' is installed" % pkg.name, + level=WARNING) + self.delete_package(cache, pkg) + else: + log("Checking restricted virtual package '%s' provides" % + pkg.name, level=DEBUG) + self.delete_package(cache, pkg) + + def delete_package(self, cache, pkg): + """Deletes the package from the system. + + Deletes the package form the system, properly handling virtual + packages. + + :param cache: the apt cache + :param pkg: the package to remove + """ + if self.is_virtual_package(pkg): + log("Package '%s' appears to be virtual - purging provides" % + pkg.name, level=DEBUG) + for _p in pkg.provides_list: + self.delete_package(cache, _p[2].parent_pkg) + elif not pkg.current_ver: + log("Package '%s' not installed" % pkg.name, level=DEBUG) + return + else: + log("Purging package '%s'" % pkg.name, level=DEBUG) + apt_purge(pkg.name) + + def is_virtual_package(self, pkg): + return pkg.has_provides and not pkg.has_versions diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/file.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/file.py new file mode 100644 index 00000000..0fb545a9 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/file.py @@ -0,0 +1,552 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import grp +import os +import pwd +import re + +from subprocess import ( + CalledProcessError, + check_output, + check_call, +) +from traceback import format_exc +from six import string_types +from stat import ( + S_ISGID, + S_ISUID +) + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + WARNING, + ERROR, +) +from charmhelpers.core import unitdata +from charmhelpers.core.host import file_hash +from charmhelpers.contrib.hardening.audits import BaseAudit +from charmhelpers.contrib.hardening.templating import ( + get_template_path, + render_and_write, +) +from charmhelpers.contrib.hardening import utils + + +class BaseFileAudit(BaseAudit): + """Base class for file audits. + + Provides api stubs for compliance check flow that must be used by any class + that implemented this one. + """ + + def __init__(self, paths, always_comply=False, *args, **kwargs): + """ + :param paths: string path of list of paths of files we want to apply + compliance checks are criteria to. + :param always_comply: if true compliance criteria is always applied + else compliance is skipped for non-existent + paths. + """ + super(BaseFileAudit, self).__init__(*args, **kwargs) + self.always_comply = always_comply + if isinstance(paths, string_types) or not hasattr(paths, '__iter__'): + self.paths = [paths] + else: + self.paths = paths + + def ensure_compliance(self): + """Ensure that the all registered files comply to registered criteria. + """ + for p in self.paths: + if os.path.exists(p): + if self.is_compliant(p): + continue + + log('File %s is not in compliance.' % p, level=INFO) + else: + if not self.always_comply: + log("Non-existent path '%s' - skipping compliance check" + % (p), level=INFO) + continue + + if self._take_action(): + log("Applying compliance criteria to '%s'" % (p), level=INFO) + self.comply(p) + + def is_compliant(self, path): + """Audits the path to see if it is compliance. + + :param path: the path to the file that should be checked. + """ + raise NotImplementedError + + def comply(self, path): + """Enforces the compliance of a path. + + :param path: the path to the file that should be enforced. + """ + raise NotImplementedError + + @classmethod + def _get_stat(cls, path): + """Returns the Posix st_stat information for the specified file path. + + :param path: the path to get the st_stat information for. + :returns: an st_stat object for the path or None if the path doesn't + exist. + """ + return os.stat(path) + + +class FilePermissionAudit(BaseFileAudit): + """Implements an audit for file permissions and ownership for a user. + + This class implements functionality that ensures that a specific user/group + will own the file(s) specified and that the permissions specified are + applied properly to the file. + """ + def __init__(self, paths, user, group=None, mode=0o600, **kwargs): + self.user = user + self.group = group + self.mode = mode + super(FilePermissionAudit, self).__init__(paths, user, group, mode, + **kwargs) + + @property + def user(self): + return self._user + + @user.setter + def user(self, name): + try: + user = pwd.getpwnam(name) + except KeyError: + log('Unknown user %s' % name, level=ERROR) + user = None + self._user = user + + @property + def group(self): + return self._group + + @group.setter + def group(self, name): + try: + group = None + if name: + group = grp.getgrnam(name) + else: + group = grp.getgrgid(self.user.pw_gid) + except KeyError: + log('Unknown group %s' % name, level=ERROR) + self._group = group + + def is_compliant(self, path): + """Checks if the path is in compliance. + + Used to determine if the path specified meets the necessary + requirements to be in compliance with the check itself. + + :param path: the file path to check + :returns: True if the path is compliant, False otherwise. + """ + stat = self._get_stat(path) + user = self.user + group = self.group + + compliant = True + if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid: + log('File %s is not owned by %s:%s.' % (path, user.pw_name, + group.gr_name), + level=INFO) + compliant = False + + # POSIX refers to the st_mode bits as corresponding to both the + # file type and file permission bits, where the least significant 12 + # bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the + # file permission bits (8-0) + perms = stat.st_mode & 0o7777 + if perms != self.mode: + log('File %s has incorrect permissions, currently set to %s' % + (path, oct(stat.st_mode & 0o7777)), level=INFO) + compliant = False + + return compliant + + def comply(self, path): + """Issues a chown and chmod to the file paths specified.""" + utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name, + self.mode) + + +class DirectoryPermissionAudit(FilePermissionAudit): + """Performs a permission check for the specified directory path.""" + + def __init__(self, paths, user, group=None, mode=0o600, + recursive=True, **kwargs): + super(DirectoryPermissionAudit, self).__init__(paths, user, group, + mode, **kwargs) + self.recursive = recursive + + def is_compliant(self, path): + """Checks if the directory is compliant. + + Used to determine if the path specified and all of its children + directories are in compliance with the check itself. + + :param path: the directory path to check + :returns: True if the directory tree is compliant, otherwise False. + """ + if not os.path.isdir(path): + log('Path specified %s is not a directory.' % path, level=ERROR) + raise ValueError("%s is not a directory." % path) + + if not self.recursive: + return super(DirectoryPermissionAudit, self).is_compliant(path) + + compliant = True + for root, dirs, _ in os.walk(path): + if len(dirs) > 0: + continue + + if not super(DirectoryPermissionAudit, self).is_compliant(root): + compliant = False + continue + + return compliant + + def comply(self, path): + for root, dirs, _ in os.walk(path): + if len(dirs) > 0: + super(DirectoryPermissionAudit, self).comply(root) + + +class ReadOnly(BaseFileAudit): + """Audits that files and folders are read only.""" + def __init__(self, paths, *args, **kwargs): + super(ReadOnly, self).__init__(paths=paths, *args, **kwargs) + + def is_compliant(self, path): + try: + output = check_output(['find', path, '-perm', '-go+w', + '-type', 'f']).strip() + + # The find above will find any files which have permission sets + # which allow too broad of write access. As such, the path is + # compliant if there is no output. + if output: + return False + + return True + except CalledProcessError as e: + log('Error occurred checking finding writable files for %s. ' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + return False + + def comply(self, path): + try: + check_output(['chmod', 'go-w', '-R', path]) + except CalledProcessError as e: + log('Error occurred removing writeable permissions for %s. ' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + + +class NoReadWriteForOther(BaseFileAudit): + """Ensures that the files found under the base path are readable or + writable by anyone other than the owner or the group. + """ + def __init__(self, paths): + super(NoReadWriteForOther, self).__init__(paths) + + def is_compliant(self, path): + try: + cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o', + '-perm', '-o+w', '-type', 'f'] + output = check_output(cmd).strip() + + # The find above here will find any files which have read or + # write permissions for other, meaning there is too broad of access + # to read/write the file. As such, the path is compliant if there's + # no output. + if output: + return False + + return True + except CalledProcessError as e: + log('Error occurred while finding files which are readable or ' + 'writable to the world in %s. ' + 'Command output is: %s.' % (path, e.output), level=ERROR) + + def comply(self, path): + try: + check_output(['chmod', '-R', 'o-rw', path]) + except CalledProcessError as e: + log('Error occurred attempting to change modes of files under ' + 'path %s. Output of command is: %s' % (path, e.output)) + + +class NoSUIDSGIDAudit(BaseFileAudit): + """Audits that specified files do not have SUID/SGID bits set.""" + def __init__(self, paths, *args, **kwargs): + super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs) + + def is_compliant(self, path): + stat = self._get_stat(path) + if (stat.st_mode & (S_ISGID | S_ISUID)) != 0: + return False + + return True + + def comply(self, path): + try: + log('Removing suid/sgid from %s.' % path, level=DEBUG) + check_output(['chmod', '-s', path]) + except CalledProcessError as e: + log('Error occurred removing suid/sgid from %s.' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + + +class TemplatedFile(BaseFileAudit): + """The TemplatedFileAudit audits the contents of a templated file. + + This audit renders a file from a template, sets the appropriate file + permissions, then generates a hashsum with which to check the content + changed. + """ + def __init__(self, path, context, template_dir, mode, user='root', + group='root', service_actions=None, **kwargs): + self.context = context + self.user = user + self.group = group + self.mode = mode + self.template_dir = template_dir + self.service_actions = service_actions + super(TemplatedFile, self).__init__(paths=path, always_comply=True, + **kwargs) + + def is_compliant(self, path): + """Determines if the templated file is compliant. + + A templated file is only compliant if it has not changed (as + determined by its sha256 hashsum) AND its file permissions are set + appropriately. + + :param path: the path to check compliance. + """ + same_templates = self.templates_match(path) + same_content = self.contents_match(path) + same_permissions = self.permissions_match(path) + + if same_content and same_permissions and same_templates: + return True + + return False + + def run_service_actions(self): + """Run any actions on services requested.""" + if not self.service_actions: + return + + for svc_action in self.service_actions: + name = svc_action['service'] + actions = svc_action['actions'] + log("Running service '%s' actions '%s'" % (name, actions), + level=DEBUG) + for action in actions: + cmd = ['service', name, action] + try: + check_call(cmd) + except CalledProcessError as exc: + log("Service name='%s' action='%s' failed - %s" % + (name, action, exc), level=WARNING) + + def comply(self, path): + """Ensures the contents and the permissions of the file. + + :param path: the path to correct + """ + dirname = os.path.dirname(path) + if not os.path.exists(dirname): + os.makedirs(dirname) + + self.pre_write() + render_and_write(self.template_dir, path, self.context()) + utils.ensure_permissions(path, self.user, self.group, self.mode) + self.run_service_actions() + self.save_checksum(path) + self.post_write() + + def pre_write(self): + """Invoked prior to writing the template.""" + pass + + def post_write(self): + """Invoked after writing the template.""" + pass + + def templates_match(self, path): + """Determines if the template files are the same. + + The template file equality is determined by the hashsum of the + template files themselves. If there is no hashsum, then the content + cannot be sure to be the same so treat it as if they changed. + Otherwise, return whether or not the hashsums are the same. + + :param path: the path to check + :returns: boolean + """ + template_path = get_template_path(self.template_dir, path) + key = 'hardening:template:%s' % template_path + template_checksum = file_hash(template_path) + kv = unitdata.kv() + stored_tmplt_checksum = kv.get(key) + if not stored_tmplt_checksum: + kv.set(key, template_checksum) + kv.flush() + log('Saved template checksum for %s.' % template_path, + level=DEBUG) + # Since we don't have a template checksum, then assume it doesn't + # match and return that the template is different. + return False + elif stored_tmplt_checksum != template_checksum: + kv.set(key, template_checksum) + kv.flush() + log('Updated template checksum for %s.' % template_path, + level=DEBUG) + return False + + # Here the template hasn't changed based upon the calculated + # checksum of the template and what was previously stored. + return True + + def contents_match(self, path): + """Determines if the file content is the same. + + This is determined by comparing hashsum of the file contents and + the saved hashsum. If there is no hashsum, then the content cannot + be sure to be the same so treat them as if they are not the same. + Otherwise, return True if the hashsums are the same, False if they + are not the same. + + :param path: the file to check. + """ + checksum = file_hash(path) + + kv = unitdata.kv() + stored_checksum = kv.get('hardening:%s' % path) + if not stored_checksum: + # If the checksum hasn't been generated, return False to ensure + # the file is written and the checksum stored. + log('Checksum for %s has not been calculated.' % path, level=DEBUG) + return False + elif stored_checksum != checksum: + log('Checksum mismatch for %s.' % path, level=DEBUG) + return False + + return True + + def permissions_match(self, path): + """Determines if the file owner and permissions match. + + :param path: the path to check. + """ + audit = FilePermissionAudit(path, self.user, self.group, self.mode) + return audit.is_compliant(path) + + def save_checksum(self, path): + """Calculates and saves the checksum for the path specified. + + :param path: the path of the file to save the checksum. + """ + checksum = file_hash(path) + kv = unitdata.kv() + kv.set('hardening:%s' % path, checksum) + kv.flush() + + +class DeletedFile(BaseFileAudit): + """Audit to ensure that a file is deleted.""" + def __init__(self, paths): + super(DeletedFile, self).__init__(paths) + + def is_compliant(self, path): + return not os.path.exists(path) + + def comply(self, path): + os.remove(path) + + +class FileContentAudit(BaseFileAudit): + """Audit the contents of a file.""" + def __init__(self, paths, cases, **kwargs): + # Cases we expect to pass + self.pass_cases = cases.get('pass', []) + # Cases we expect to fail + self.fail_cases = cases.get('fail', []) + super(FileContentAudit, self).__init__(paths, **kwargs) + + def is_compliant(self, path): + """ + Given a set of content matching cases i.e. tuple(regex, bool) where + bool value denotes whether or not regex is expected to match, check that + all cases match as expected with the contents of the file. Cases can be + expected to pass of fail. + + :param path: Path of file to check. + :returns: Boolean value representing whether or not all cases are + found to be compliant. + """ + log("Auditing contents of file '%s'" % (path), level=DEBUG) + with open(path, 'r') as fd: + contents = fd.read() + + matches = 0 + for pattern in self.pass_cases: + key = re.compile(pattern, flags=re.MULTILINE) + results = re.search(key, contents) + if results: + matches += 1 + else: + log("Pattern '%s' was expected to pass but instead it failed" + % (pattern), level=WARNING) + + for pattern in self.fail_cases: + key = re.compile(pattern, flags=re.MULTILINE) + results = re.search(key, contents) + if not results: + matches += 1 + else: + log("Pattern '%s' was expected to fail but instead it passed" + % (pattern), level=WARNING) + + total = len(self.pass_cases) + len(self.fail_cases) + log("Checked %s cases and %s passed" % (total, matches), level=DEBUG) + return matches == total + + def comply(self, *args, **kwargs): + """NOOP since we just issue warnings. This is to avoid the + NotImplememtedError. + """ + log("Not applying any compliance criteria, only checks.", level=INFO) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml new file mode 100644 index 00000000..e5ada29f --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml @@ -0,0 +1,13 @@ +# NOTE: this file contains the default configuration for the 'apache' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'apache' as the root key followed by any of the following with new +# values. + +common: + apache_dir: '/etc/apache2' + +hardening: + traceenable: 'off' + allowed_http_methods: "GET POST" + modules_to_disable: [ cgi, cgid ] \ No newline at end of file diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema new file mode 100644 index 00000000..227589b5 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema @@ -0,0 +1,9 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + apache_dir: + traceenable: + +hardening: + allowed_http_methods: + modules_to_disable: diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml new file mode 100644 index 00000000..682d22bf --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml @@ -0,0 +1,38 @@ +# NOTE: this file contains the default configuration for the 'mysql' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'mysql' as the root key followed by any of the following with new +# values. + +hardening: + mysql-conf: /etc/mysql/my.cnf + hardening-conf: /etc/mysql/conf.d/hardening.cnf + +security: + # @see http://www.symantec.com/connect/articles/securing-mysql-step-step + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot + chroot: None + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create + safe-user-create: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth + secure-auth: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links + skip-symbolic-links: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database + skip-show-database: True + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile + local-infile: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs + allow-suspicious-udfs: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges + automatic-sp-privileges: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv + secure-file-priv: /tmp diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema new file mode 100644 index 00000000..2edf325c --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema @@ -0,0 +1,15 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +hardening: + mysql-conf: + hardening-conf: +security: + chroot: + safe-user-create: + secure-auth: + skip-symbolic-links: + skip-show-database: + local-infile: + allow-suspicious-udfs: + automatic-sp-privileges: + secure-file-priv: diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml new file mode 100644 index 00000000..ddd4286c --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml @@ -0,0 +1,67 @@ +# NOTE: this file contains the default configuration for the 'os' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'os' as the root key followed by any of the following with new +# values. + +general: + desktop_enable: False # (type:boolean) + +environment: + extra_user_paths: [] + umask: 027 + root_path: / + +auth: + pw_max_age: 60 + # discourage password cycling + pw_min_age: 7 + retries: 5 + lockout_time: 600 + timeout: 60 + allow_homeless: False # (type:boolean) + pam_passwdqc_enable: True # (type:boolean) + pam_passwdqc_options: 'min=disabled,disabled,16,12,8' + root_ttys: + console + tty1 + tty2 + tty3 + tty4 + tty5 + tty6 + uid_min: 1000 + gid_min: 1000 + sys_uid_min: 100 + sys_uid_max: 999 + sys_gid_min: 100 + sys_gid_max: 999 + chfn_restrict: + +security: + users_allow: [] + suid_sgid_enforce: True # (type:boolean) + # user-defined blacklist and whitelist + suid_sgid_blacklist: [] + suid_sgid_whitelist: [] + # if this is True, remove any suid/sgid bits from files that were not in the whitelist + suid_sgid_dry_run_on_unknown: False # (type:boolean) + suid_sgid_remove_from_unknown: False # (type:boolean) + # remove packages with known issues + packages_clean: True # (type:boolean) + packages_list: + xinetd + inetd + ypserv + telnet-server + rsh-server + rsync + kernel_enable_module_loading: True # (type:boolean) + kernel_enable_core_dump: False # (type:boolean) + +sysctl: + kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128 + kernel_enable_sysrq: False # (type:boolean) + forwarding: False # (type:boolean) + ipv6_enable: False # (type:boolean) + arp_restricted: True # (type:boolean) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema new file mode 100644 index 00000000..88b3966e --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema @@ -0,0 +1,42 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +general: + desktop_enable: +environment: + extra_user_paths: + umask: + root_path: +auth: + pw_max_age: + pw_min_age: + retries: + lockout_time: + timeout: + allow_homeless: + pam_passwdqc_enable: + pam_passwdqc_options: + root_ttys: + uid_min: + gid_min: + sys_uid_min: + sys_uid_max: + sys_gid_min: + sys_gid_max: + chfn_restrict: +security: + users_allow: + suid_sgid_enforce: + suid_sgid_blacklist: + suid_sgid_whitelist: + suid_sgid_dry_run_on_unknown: + suid_sgid_remove_from_unknown: + packages_clean: + packages_list: + kernel_enable_module_loading: + kernel_enable_core_dump: +sysctl: + kernel_secure_sysrq: + kernel_enable_sysrq: + forwarding: + ipv6_enable: + arp_restricted: diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml new file mode 100644 index 00000000..cd529bca --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml @@ -0,0 +1,49 @@ +# NOTE: this file contains the default configuration for the 'ssh' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'ssh' as the root key followed by any of the following with new +# values. + +common: + service_name: 'ssh' + network_ipv6_enable: False # (type:boolean) + ports: [22] + remote_hosts: [] + +client: + package: 'openssh-client' + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + roaming: False + password_authentication: 'no' + +server: + host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key', + '/etc/ssh/ssh_host_ecdsa_key'] + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + allow_root_with_key: False # (type:boolean) + allow_tcp_forwarding: 'no' + allow_agent_forwarding: 'no' + allow_x11_forwarding: 'no' + use_privilege_separation: 'sandbox' + listen_to: ['0.0.0.0'] + use_pam: 'no' + package: 'openssh-server' + password_authentication: 'no' + alive_interval: '600' + alive_count: '3' + sftp_enable: False # (type:boolean) + sftp_group: 'sftponly' + sftp_chroot: '/home/%u' + deny_users: [] + allow_users: [] + deny_groups: [] + allow_groups: [] + print_motd: 'no' + print_last_log: 'no' + use_dns: 'no' + max_auth_tries: 2 + max_sessions: 10 diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema new file mode 100644 index 00000000..d05e054b --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema @@ -0,0 +1,42 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + service_name: + network_ipv6_enable: + ports: + remote_hosts: +client: + package: + cbc_required: + weak_hmac: + weak_kex: + roaming: + password_authentication: +server: + host_key_files: + cbc_required: + weak_hmac: + weak_kex: + allow_root_with_key: + allow_tcp_forwarding: + allow_agent_forwarding: + allow_x11_forwarding: + use_privilege_separation: + listen_to: + use_pam: + package: + password_authentication: + alive_interval: + alive_count: + sftp_enable: + sftp_group: + sftp_chroot: + deny_users: + allow_users: + deny_groups: + allow_groups: + print_motd: + print_last_log: + use_dns: + max_auth_tries: + max_sessions: diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/harden.py new file mode 100644 index 00000000..ac7568d6 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/harden.py @@ -0,0 +1,84 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six + +from collections import OrderedDict + +from charmhelpers.core.hookenv import ( + config, + log, + DEBUG, + WARNING, +) +from charmhelpers.contrib.hardening.host.checks import run_os_checks +from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks +from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks +from charmhelpers.contrib.hardening.apache.checks import run_apache_checks + + +def harden(overrides=None): + """Hardening decorator. + + This is the main entry point for running the hardening stack. In order to + run modules of the stack you must add this decorator to charm hook(s) and + ensure that your charm config.yaml contains the 'harden' option set to + one or more of the supported modules. Setting these will cause the + corresponding hardening code to be run when the hook fires. + + This decorator can and should be applied to more than one hook or function + such that hardening modules are called multiple times. This is because + subsequent calls will perform auditing checks that will report any changes + to resources hardened by the first run (and possibly perform compliance + actions as a result of any detected infractions). + + :param overrides: Optional list of stack modules used to override those + provided with 'harden' config. + :returns: Returns value returned by decorated function once executed. + """ + def _harden_inner1(f): + log("Hardening function '%s'" % (f.__name__), level=DEBUG) + + def _harden_inner2(*args, **kwargs): + RUN_CATALOG = OrderedDict([('os', run_os_checks), + ('ssh', run_ssh_checks), + ('mysql', run_mysql_checks), + ('apache', run_apache_checks)]) + + enabled = overrides or (config("harden") or "").split() + if enabled: + modules_to_run = [] + # modules will always be performed in the following order + for module, func in six.iteritems(RUN_CATALOG): + if module in enabled: + enabled.remove(module) + modules_to_run.append(func) + + if enabled: + log("Unknown hardening modules '%s' - ignoring" % + (', '.join(enabled)), level=WARNING) + + for hardener in modules_to_run: + log("Executing hardening module '%s'" % + (hardener.__name__), level=DEBUG) + hardener() + else: + log("No hardening applied to '%s'" % (f.__name__), level=DEBUG) + + return f(*args, **kwargs) + return _harden_inner2 + + return _harden_inner1 diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py new file mode 100644 index 00000000..c3bd5985 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py @@ -0,0 +1,50 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.host.checks import ( + apt, + limits, + login, + minimize_access, + pam, + profile, + securetty, + suid_sgid, + sysctl +) + + +def run_os_checks(): + log("Starting OS hardening checks.", level=DEBUG) + checks = apt.get_audits() + checks.extend(limits.get_audits()) + checks.extend(login.get_audits()) + checks.extend(minimize_access.get_audits()) + checks.extend(pam.get_audits()) + checks.extend(profile.get_audits()) + checks.extend(securetty.get_audits()) + checks.extend(suid_sgid.get_audits()) + checks.extend(sysctl.get_audits()) + + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("OS hardening checks complete.", level=DEBUG) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/apt.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/apt.py new file mode 100644 index 00000000..2c221cda --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/apt.py @@ -0,0 +1,39 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.utils import get_settings +from charmhelpers.contrib.hardening.audits.apt import ( + AptConfig, + RestrictedPackages, +) + + +def get_audits(): + """Get OS hardening apt audits. + + :returns: dictionary of audits + """ + audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated', + 'expected': 'false'}])] + + settings = get_settings('os') + clean_packages = settings['security']['packages_clean'] + if clean_packages: + security_packages = settings['security']['packages_list'] + if security_packages: + audits.append(RestrictedPackages(security_packages)) + + return audits diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/limits.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/limits.py new file mode 100644 index 00000000..8ce9dc2b --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/limits.py @@ -0,0 +1,55 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import ( + DirectoryPermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening security limits audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Ensure that the /etc/security/limits.d directory is only writable + # by the root user, but others can execute and read. + audits.append(DirectoryPermissionAudit('/etc/security/limits.d', + user='root', group='root', + mode=0o755)) + + # If core dumps are not enabled, then don't allow core dumps to be + # created as they may contain sensitive information. + if not settings['security']['kernel_enable_core_dump']: + audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf', + SecurityLimitsContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', mode=0o0440)) + return audits + + +class SecurityLimitsContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'disable_core_dump': + not settings['security']['kernel_enable_core_dump']} + return ctxt diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/login.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/login.py new file mode 100644 index 00000000..d32c4f60 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/login.py @@ -0,0 +1,67 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from six import string_types + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening login.defs audits. + + :returns: dictionary of audits + """ + audits = [TemplatedFile('/etc/login.defs', LoginContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', mode=0o0444)] + return audits + + +class LoginContext(object): + + def __call__(self): + settings = utils.get_settings('os') + + # Octal numbers in yaml end up being turned into decimal, + # so check if the umask is entered as a string (e.g. '027') + # or as an octal umask as we know it (e.g. 002). If its not + # a string assume it to be octal and turn it into an octal + # string. + umask = settings['environment']['umask'] + if not isinstance(umask, string_types): + umask = '%s' % oct(umask) + + ctxt = { + 'additional_user_paths': + settings['environment']['extra_user_paths'], + 'umask': umask, + 'pwd_max_age': settings['auth']['pw_max_age'], + 'pwd_min_age': settings['auth']['pw_min_age'], + 'uid_min': settings['auth']['uid_min'], + 'sys_uid_min': settings['auth']['sys_uid_min'], + 'sys_uid_max': settings['auth']['sys_uid_max'], + 'gid_min': settings['auth']['gid_min'], + 'sys_gid_min': settings['auth']['sys_gid_min'], + 'sys_gid_max': settings['auth']['sys_gid_max'], + 'login_retries': settings['auth']['retries'], + 'login_timeout': settings['auth']['timeout'], + 'chfn_restrict': settings['auth']['chfn_restrict'], + 'allow_login_without_home': settings['auth']['allow_homeless'] + } + + return ctxt diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py new file mode 100644 index 00000000..c471064b --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py @@ -0,0 +1,52 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + ReadOnly, +) +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening access audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Remove write permissions from $PATH folders for all regular users. + # This prevents changing system-wide commands from normal users. + path_folders = {'/usr/local/sbin', + '/usr/local/bin', + '/usr/sbin', + '/usr/bin', + '/bin'} + extra_user_paths = settings['environment']['extra_user_paths'] + path_folders.update(extra_user_paths) + audits.append(ReadOnly(path_folders)) + + # Only allow the root user to have access to the shadow file. + audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600)) + + if 'change_user' not in settings['security']['users_allow']: + # su should only be accessible to user and group root, unless it is + # expressly defined to allow users to change to root via the + # security_users_allow config option. + audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750)) + + return audits diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/pam.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/pam.py new file mode 100644 index 00000000..383fe28e --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/pam.py @@ -0,0 +1,134 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from subprocess import ( + check_output, + CalledProcessError, +) + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, +) +from charmhelpers.fetch import ( + apt_install, + apt_purge, + apt_update, +) +from charmhelpers.contrib.hardening.audits.file import ( + TemplatedFile, + DeletedFile, +) +from charmhelpers.contrib.hardening import utils +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR + + +def get_audits(): + """Get OS hardening PAM authentication audits. + + :returns: dictionary of audits + """ + audits = [] + + settings = utils.get_settings('os') + + if settings['auth']['pam_passwdqc_enable']: + audits.append(PasswdqcPAM('/etc/passwdqc.conf')) + + if settings['auth']['retries']: + audits.append(Tally2PAM('/usr/share/pam-configs/tally2')) + else: + audits.append(DeletedFile('/usr/share/pam-configs/tally2')) + + return audits + + +class PasswdqcPAMContext(object): + + def __call__(self): + ctxt = {} + settings = utils.get_settings('os') + + ctxt['auth_pam_passwdqc_options'] = \ + settings['auth']['pam_passwdqc_options'] + + return ctxt + + +class PasswdqcPAM(TemplatedFile): + """The PAM Audit verifies the linux PAM settings.""" + def __init__(self, path): + super(PasswdqcPAM, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=PasswdqcPAMContext(), + user='root', + group='root', + mode=0o0640) + + def pre_write(self): + # Always remove? + for pkg in ['libpam-ccreds', 'libpam-cracklib']: + log("Purging package '%s'" % pkg, level=DEBUG), + apt_purge(pkg) + + apt_update(fatal=True) + for pkg in ['libpam-passwdqc']: + log("Installing package '%s'" % pkg, level=DEBUG), + apt_install(pkg) + + def post_write(self): + """Updates the PAM configuration after the file has been written""" + try: + check_output(['pam-auth-update', '--package']) + except CalledProcessError as e: + log('Error calling pam-auth-update: %s' % e, level=ERROR) + + +class Tally2PAMContext(object): + + def __call__(self): + ctxt = {} + settings = utils.get_settings('os') + + ctxt['auth_lockout_time'] = settings['auth']['lockout_time'] + ctxt['auth_retries'] = settings['auth']['retries'] + + return ctxt + + +class Tally2PAM(TemplatedFile): + """The PAM Audit verifies the linux PAM settings.""" + def __init__(self, path): + super(Tally2PAM, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=Tally2PAMContext(), + user='root', + group='root', + mode=0o0640) + + def pre_write(self): + # Always remove? + apt_purge('libpam-ccreds') + apt_update(fatal=True) + apt_install('libpam-modules') + + def post_write(self): + """Updates the PAM configuration after the file has been written""" + try: + check_output(['pam-auth-update', '--package']) + except CalledProcessError as e: + log('Error calling pam-auth-update: %s' % e, level=ERROR) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/profile.py new file mode 100644 index 00000000..f7443357 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/profile.py @@ -0,0 +1,45 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening profile audits. + + :returns: dictionary of audits + """ + audits = [] + + settings = utils.get_settings('os') + + # If core dumps are not enabled, then don't allow core dumps to be + # created as they may contain sensitive information. + if not settings['security']['kernel_enable_core_dump']: + audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh', + ProfileContext(), + template_dir=TEMPLATES_DIR, + mode=0o0755, user='root', group='root')) + return audits + + +class ProfileContext(object): + + def __call__(self): + ctxt = {} + return ctxt diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py new file mode 100644 index 00000000..e33c73ca --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py @@ -0,0 +1,39 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening Secure TTY audits. + + :returns: dictionary of audits + """ + audits = [] + audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(), + template_dir=TEMPLATES_DIR, + mode=0o0400, user='root', group='root')) + return audits + + +class SecureTTYContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'ttys': settings['auth']['root_ttys']} + return ctxt diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py new file mode 100644 index 00000000..0534689b --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py @@ -0,0 +1,131 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import subprocess + +from charmhelpers.core.hookenv import ( + log, + INFO, +) +from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit +from charmhelpers.contrib.hardening import utils + + +BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh', + '/usr/libexec/openssh/ssh-keysign', + '/usr/lib/openssh/ssh-keysign', + '/sbin/netreport', + '/usr/sbin/usernetctl', + '/usr/sbin/userisdnctl', + '/usr/sbin/pppd', + '/usr/bin/lockfile', + '/usr/bin/mail-lock', + '/usr/bin/mail-unlock', + '/usr/bin/mail-touchlock', + '/usr/bin/dotlockfile', + '/usr/bin/arping', + '/usr/sbin/uuidd', + '/usr/bin/mtr', + '/usr/lib/evolution/camel-lock-helper-1.2', + '/usr/lib/pt_chown', + '/usr/lib/eject/dmcrypt-get-device', + '/usr/lib/mc/cons.saver'] + +WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount', + '/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at', + '/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp', + '/usr/bin/passwd', '/usr/bin/ssh-agent', + '/usr/libexec/utempter/utempter', '/usr/sbin/lockdev', + '/usr/sbin/sendmail.sendmail', '/usr/bin/expiry', + '/bin/ping6', '/usr/bin/traceroute6.iputils', + '/sbin/mount.nfs', '/sbin/umount.nfs', + '/sbin/mount.nfs4', '/sbin/umount.nfs4', + '/usr/bin/crontab', + '/usr/bin/wall', '/usr/bin/write', + '/usr/bin/screen', + '/usr/bin/mlocate', + '/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh', + '/bin/fusermount', + '/usr/bin/pkexec', + '/usr/bin/sudo', '/usr/bin/sudoedit', + '/usr/sbin/postdrop', '/usr/sbin/postqueue', + '/usr/sbin/suexec', + '/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth', + '/usr/kerberos/bin/ksu', + '/usr/sbin/ccreds_validate', + '/usr/bin/Xorg', + '/usr/bin/X', + '/usr/lib/dbus-1.0/dbus-daemon-launch-helper', + '/usr/lib/vte/gnome-pty-helper', + '/usr/lib/libvte9/gnome-pty-helper', + '/usr/lib/libvte-2.90-9/gnome-pty-helper'] + + +def get_audits(): + """Get OS hardening suid/sgid audits. + + :returns: dictionary of audits + """ + checks = [] + settings = utils.get_settings('os') + if not settings['security']['suid_sgid_enforce']: + log("Skipping suid/sgid hardening", level=INFO) + return checks + + # Build the blacklist and whitelist of files for suid/sgid checks. + # There are a total of 4 lists: + # 1. the system blacklist + # 2. the system whitelist + # 3. the user blacklist + # 4. the user whitelist + # + # The blacklist is the set of paths which should NOT have the suid/sgid bit + # set and the whitelist is the set of paths which MAY have the suid/sgid + # bit setl. The user whitelist/blacklist effectively override the system + # whitelist/blacklist. + u_b = settings['security']['suid_sgid_blacklist'] + u_w = settings['security']['suid_sgid_whitelist'] + + blacklist = set(BLACKLIST) - set(u_w + u_b) + whitelist = set(WHITELIST) - set(u_b + u_w) + + checks.append(NoSUIDSGIDAudit(blacklist)) + + dry_run = settings['security']['suid_sgid_dry_run_on_unknown'] + + if settings['security']['suid_sgid_remove_from_unknown'] or dry_run: + # If the policy is a dry_run (e.g. complain only) or remove unknown + # suid/sgid bits then find all of the paths which have the suid/sgid + # bit set and then remove the whitelisted paths. + root_path = settings['environment']['root_path'] + unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist) + checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run)) + + return checks + + +def find_paths_with_suid_sgid(root_path): + """Finds all paths/files which have an suid/sgid bit enabled. + + Starting with the root_path, this will recursively find all paths which + have an suid or sgid bit set. + """ + cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000', + '-type', 'f', '!', '-path', '/proc/*', '-print'] + + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, _ = p.communicate() + return set(out.split('\n')) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py new file mode 100644 index 00000000..4a76d74e --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -0,0 +1,211 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import platform +import re +import six +import subprocess + +from charmhelpers.core.hookenv import ( + log, + INFO, + WARNING, +) +from charmhelpers.contrib.hardening import utils +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR + + +SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s +net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s +net.ipv4.conf.all.rp_filter=1 +net.ipv4.conf.default.rp_filter=1 +net.ipv4.icmp_echo_ignore_broadcasts=1 +net.ipv4.icmp_ignore_bogus_error_responses=1 +net.ipv4.icmp_ratelimit=100 +net.ipv4.icmp_ratemask=88089 +net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s +net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s +net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s +net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s +net.ipv4.tcp_rfc1337=1 +net.ipv4.tcp_syncookies=1 +net.ipv4.conf.all.shared_media=1 +net.ipv4.conf.default.shared_media=1 +net.ipv4.conf.all.accept_source_route=0 +net.ipv4.conf.default.accept_source_route=0 +net.ipv4.conf.all.accept_redirects=0 +net.ipv4.conf.default.accept_redirects=0 +net.ipv6.conf.all.accept_redirects=0 +net.ipv6.conf.default.accept_redirects=0 +net.ipv4.conf.all.secure_redirects=0 +net.ipv4.conf.default.secure_redirects=0 +net.ipv4.conf.all.send_redirects=0 +net.ipv4.conf.default.send_redirects=0 +net.ipv4.conf.all.log_martians=0 +net.ipv6.conf.default.router_solicitations=0 +net.ipv6.conf.default.accept_ra_rtr_pref=0 +net.ipv6.conf.default.accept_ra_pinfo=0 +net.ipv6.conf.default.accept_ra_defrtr=0 +net.ipv6.conf.default.autoconf=0 +net.ipv6.conf.default.dad_transmits=0 +net.ipv6.conf.default.max_addresses=1 +net.ipv6.conf.all.accept_ra=0 +net.ipv6.conf.default.accept_ra=0 +kernel.modules_disabled=%(kernel_modules_disabled)s +kernel.sysrq=%(kernel_sysrq)s +fs.suid_dumpable=%(fs_suid_dumpable)s +kernel.randomize_va_space=2 +""" + + +def get_audits(): + """Get OS hardening sysctl audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Apply the sysctl settings which are configured to be applied. + audits.append(SysctlConf()) + # Make sure that only root has access to the sysctl.conf file, and + # that it is read-only. + audits.append(FilePermissionAudit('/etc/sysctl.conf', + user='root', + group='root', mode=0o0440)) + # If module loading is not enabled, then ensure that the modules + # file has the appropriate permissions and rebuild the initramfs + if not settings['security']['kernel_enable_module_loading']: + audits.append(ModulesTemplate()) + + return audits + + +class ModulesContext(object): + + def __call__(self): + settings = utils.get_settings('os') + with open('/proc/cpuinfo', 'r') as fd: + cpuinfo = fd.readlines() + + for line in cpuinfo: + match = re.search(r"^vendor_id\s+:\s+(.+)", line) + if match: + vendor = match.group(1) + + if vendor == "GenuineIntel": + vendor = "intel" + elif vendor == "AuthenticAMD": + vendor = "amd" + + ctxt = {'arch': platform.processor(), + 'cpuVendor': vendor, + 'desktop_enable': settings['general']['desktop_enable']} + + return ctxt + + +class ModulesTemplate(object): + + def __init__(self): + super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules', + ModulesContext(), + templates_dir=TEMPLATES_DIR, + user='root', group='root', + mode=0o0440) + + def post_write(self): + subprocess.check_call(['update-initramfs', '-u']) + + +class SysCtlHardeningContext(object): + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'sysctl': {}} + + log("Applying sysctl settings", level=INFO) + extras = {'net_ipv4_ip_forward': 0, + 'net_ipv6_conf_all_forwarding': 0, + 'net_ipv6_conf_all_disable_ipv6': 1, + 'net_ipv4_tcp_timestamps': 0, + 'net_ipv4_conf_all_arp_ignore': 0, + 'net_ipv4_conf_all_arp_announce': 0, + 'kernel_sysrq': 0, + 'fs_suid_dumpable': 0, + 'kernel_modules_disabled': 1} + + if settings['sysctl']['ipv6_enable']: + extras['net_ipv6_conf_all_disable_ipv6'] = 0 + + if settings['sysctl']['forwarding']: + extras['net_ipv4_ip_forward'] = 1 + extras['net_ipv6_conf_all_forwarding'] = 1 + + if settings['sysctl']['arp_restricted']: + extras['net_ipv4_conf_all_arp_ignore'] = 1 + extras['net_ipv4_conf_all_arp_announce'] = 2 + + if settings['security']['kernel_enable_module_loading']: + extras['kernel_modules_disabled'] = 0 + + if settings['sysctl']['kernel_enable_sysrq']: + sysrq_val = settings['sysctl']['kernel_secure_sysrq'] + extras['kernel_sysrq'] = sysrq_val + + if settings['security']['kernel_enable_core_dump']: + extras['fs_suid_dumpable'] = 1 + + settings.update(extras) + for d in (SYSCTL_DEFAULTS % settings).split(): + d = d.strip().partition('=') + key = d[0].strip() + path = os.path.join('/proc/sys', key.replace('.', '/')) + if not os.path.exists(path): + log("Skipping '%s' since '%s' does not exist" % (key, path), + level=WARNING) + continue + + ctxt['sysctl'][key] = d[2] or None + + # Translate for python3 + return {'sysctl_settings': + [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]} + + +class SysctlConf(TemplatedFile): + """An audit check for sysctl settings.""" + def __init__(self): + self.conffile = '/etc/sysctl.d/99-juju-hardening.conf' + super(SysctlConf, self).__init__(self.conffile, + SysCtlHardeningContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', + mode=0o0440) + + def post_write(self): + try: + subprocess.check_call(['sysctl', '-p', self.conffile]) + except subprocess.CalledProcessError as e: + # NOTE: on some systems if sysctl cannot apply all settings it + # will return non-zero as well. + log("sysctl command returned an error (maybe some " + "keys could not be set) - %s" % (e), + level=WARNING) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf new file mode 100644 index 00000000..0014191f --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf @@ -0,0 +1,8 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +{% if disable_core_dump -%} +# Prevent core dumps for all users. These are usually only needed by developers and may contain sensitive information. +* hard core 0 +{% endif %} \ No newline at end of file diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf new file mode 100644 index 00000000..101f1e1d --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf @@ -0,0 +1,7 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +{% for key, value in sysctl_settings -%} +{{ key }}={{ value }} +{% endfor -%} diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/login.defs b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/login.defs new file mode 100644 index 00000000..db137d6d --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/login.defs @@ -0,0 +1,349 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# +# /etc/login.defs - Configuration control definitions for the login package. +# +# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH. +# If unspecified, some arbitrary (and possibly incorrect) value will +# be assumed. All other items are optional - if not specified then +# the described action or option will be inhibited. +# +# Comment lines (lines beginning with "#") and blank lines are ignored. +# +# Modified for Linux. --marekm + +# REQUIRED for useradd/userdel/usermod +# Directory where mailboxes reside, _or_ name of file, relative to the +# home directory. If you _do_ define MAIL_DIR and MAIL_FILE, +# MAIL_DIR takes precedence. +# +# Essentially: +# - MAIL_DIR defines the location of users mail spool files +# (for mbox use) by appending the username to MAIL_DIR as defined +# below. +# - MAIL_FILE defines the location of the users mail spool files as the +# fully-qualified filename obtained by prepending the user home +# directory before $MAIL_FILE +# +# NOTE: This is no more used for setting up users MAIL environment variable +# which is, starting from shadow 4.0.12-1 in Debian, entirely the +# job of the pam_mail PAM modules +# See default PAM configuration files provided for +# login, su, etc. +# +# This is a temporary situation: setting these variables will soon +# move to /etc/default/useradd and the variables will then be +# no more supported +MAIL_DIR /var/mail +#MAIL_FILE .mail + +# +# Enable logging and display of /var/log/faillog login failure info. +# This option conflicts with the pam_tally PAM module. +# +FAILLOG_ENAB yes + +# +# Enable display of unknown usernames when login failures are recorded. +# +# WARNING: Unknown usernames may become world readable. +# See #290803 and #298773 for details about how this could become a security +# concern +LOG_UNKFAIL_ENAB no + +# +# Enable logging of successful logins +# +LOG_OK_LOGINS yes + +# +# Enable "syslog" logging of su activity - in addition to sulog file logging. +# SYSLOG_SG_ENAB does the same for newgrp and sg. +# +SYSLOG_SU_ENAB yes +SYSLOG_SG_ENAB yes + +# +# If defined, all su activity is logged to this file. +# +#SULOG_FILE /var/log/sulog + +# +# If defined, file which maps tty line to TERM environment parameter. +# Each line of the file is in a format something like "vt100 tty01". +# +#TTYTYPE_FILE /etc/ttytype + +# +# If defined, login failures will be logged here in a utmp format +# last, when invoked as lastb, will read /var/log/btmp, so... +# +FTMP_FILE /var/log/btmp + +# +# If defined, the command name to display when running "su -". For +# example, if this is defined as "su" then a "ps" will display the +# command is "-su". If not defined, then "ps" would display the +# name of the shell actually being run, e.g. something like "-sh". +# +SU_NAME su + +# +# If defined, file which inhibits all the usual chatter during the login +# sequence. If a full pathname, then hushed mode will be enabled if the +# user's name or shell are found in the file. If not a full pathname, then +# hushed mode will be enabled if the file exists in the user's home directory. +# +HUSHLOGIN_FILE .hushlogin +#HUSHLOGIN_FILE /etc/hushlogins + +# +# *REQUIRED* The default PATH settings, for superuser and normal users. +# +# (they are minimal, add the rest in the shell startup files) +ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin{% if additional_user_paths %}{{ additional_user_paths }}{% endif %} + +# +# Terminal permissions +# +# TTYGROUP Login tty will be assigned this group ownership. +# TTYPERM Login tty will be set to this permission. +# +# If you have a "write" program which is "setgid" to a special group +# which owns the terminals, define TTYGROUP to the group number and +# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign +# TTYPERM to either 622 or 600. +# +# In Debian /usr/bin/bsd-write or similar programs are setgid tty +# However, the default and recommended value for TTYPERM is still 0600 +# to not allow anyone to write to anyone else console or terminal + +# Users can still allow other people to write them by issuing +# the "mesg y" command. + +TTYGROUP tty +TTYPERM 0600 + +# +# Login configuration initializations: +# +# ERASECHAR Terminal ERASE character ('\010' = backspace). +# KILLCHAR Terminal KILL character ('\025' = CTRL/U). +# UMASK Default "umask" value. +# +# The ERASECHAR and KILLCHAR are used only on System V machines. +# +# UMASK is the default umask value for pam_umask and is used by +# useradd and newusers to set the mode of the new home directories. +# 022 is the "historical" value in Debian for UMASK +# 027, or even 077, could be considered better for privacy +# There is no One True Answer here : each sysadmin must make up his/her +# mind. +# +# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value +# for private user groups, i. e. the uid is the same as gid, and username is +# the same as the primary group name: for these, the user permissions will be +# used as group permissions, e. g. 022 will become 002. +# +# Prefix these values with "0" to get octal, "0x" to get hexadecimal. +# +ERASECHAR 0177 +KILLCHAR 025 +UMASK {{ umask }} + +# Enable setting of the umask group bits to be the same as owner bits (examples: `022` -> `002`, `077` -> `007`) for non-root users, if the uid is the same as gid, and username is the same as the primary group name. +# If set to yes, userdel will remove the user´s group if it contains no more members, and useradd will create by default a group with the name of the user. +USERGROUPS_ENAB yes + +# +# Password aging controls: +# +# PASS_MAX_DAYS Maximum number of days a password may be used. +# PASS_MIN_DAYS Minimum number of days allowed between password changes. +# PASS_WARN_AGE Number of days warning given before a password expires. +# +PASS_MAX_DAYS {{ pwd_max_age }} +PASS_MIN_DAYS {{ pwd_min_age }} +PASS_WARN_AGE 7 + +# +# Min/max values for automatic uid selection in useradd +# +UID_MIN {{ uid_min }} +UID_MAX 60000 +# System accounts +SYS_UID_MIN {{ sys_uid_min }} +SYS_UID_MAX {{ sys_uid_max }} + +# Min/max values for automatic gid selection in groupadd +GID_MIN {{ gid_min }} +GID_MAX 60000 +# System accounts +SYS_GID_MIN {{ sys_gid_min }} +SYS_GID_MAX {{ sys_gid_max }} + +# +# Max number of login retries if password is bad. This will most likely be +# overriden by PAM, since the default pam_unix module has it's own built +# in of 3 retries. However, this is a safe fallback in case you are using +# an authentication module that does not enforce PAM_MAXTRIES. +# +LOGIN_RETRIES {{ login_retries }} + +# +# Max time in seconds for login +# +LOGIN_TIMEOUT {{ login_timeout }} + +# +# Which fields may be changed by regular users using chfn - use +# any combination of letters "frwh" (full name, room number, work +# phone, home phone). If not defined, no changes are allowed. +# For backward compatibility, "yes" = "rwh" and "no" = "frwh". +# +{% if chfn_restrict %} +CHFN_RESTRICT {{ chfn_restrict }} +{% endif %} + +# +# Should login be allowed if we can't cd to the home directory? +# Default in no. +# +DEFAULT_HOME {% if allow_login_without_home %} yes {% else %} no {% endif %} + +# +# If defined, this command is run when removing a user. +# It should remove any at/cron/print jobs etc. owned by +# the user to be removed (passed as the first argument). +# +#USERDEL_CMD /usr/sbin/userdel_local + +# +# Enable setting of the umask group bits to be the same as owner bits +# (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is +# the same as gid, and username is the same as the primary group name. +# +# If set to yes, userdel will remove the user´s group if it contains no +# more members, and useradd will create by default a group with the name +# of the user. +# +USERGROUPS_ENAB yes + +# +# Instead of the real user shell, the program specified by this parameter +# will be launched, although its visible name (argv[0]) will be the shell's. +# The program may do whatever it wants (logging, additional authentification, +# banner, ...) before running the actual shell. +# +# FAKE_SHELL /bin/fakeshell + +# +# If defined, either full pathname of a file containing device names or +# a ":" delimited list of device names. Root logins will be allowed only +# upon these devices. +# +# This variable is used by login and su. +# +#CONSOLE /etc/consoles +#CONSOLE console:tty01:tty02:tty03:tty04 + +# +# List of groups to add to the user's supplementary group set +# when logging in on the console (as determined by the CONSOLE +# setting). Default is none. +# +# Use with caution - it is possible for users to gain permanent +# access to these groups, even when not logged in on the console. +# How to do it is left as an exercise for the reader... +# +# This variable is used by login and su. +# +#CONSOLE_GROUPS floppy:audio:cdrom + +# +# If set to "yes", new passwords will be encrypted using the MD5-based +# algorithm compatible with the one used by recent releases of FreeBSD. +# It supports passwords of unlimited length and longer salt strings. +# Set to "no" if you need to copy encrypted passwords to other systems +# which don't understand the new algorithm. Default is "no". +# +# This variable is deprecated. You should use ENCRYPT_METHOD. +# +MD5_CRYPT_ENAB no + +# +# If set to MD5 , MD5-based algorithm will be used for encrypting password +# If set to SHA256, SHA256-based algorithm will be used for encrypting password +# If set to SHA512, SHA512-based algorithm will be used for encrypting password +# If set to DES, DES-based algorithm will be used for encrypting password (default) +# Overrides the MD5_CRYPT_ENAB option +# +# Note: It is recommended to use a value consistent with +# the PAM modules configuration. +# +ENCRYPT_METHOD SHA512 + +# +# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512. +# +# Define the number of SHA rounds. +# With a lot of rounds, it is more difficult to brute forcing the password. +# But note also that it more CPU resources will be needed to authenticate +# users. +# +# If not specified, the libc will choose the default number of rounds (5000). +# The values must be inside the 1000-999999999 range. +# If only one of the MIN or MAX values is set, then this value will be used. +# If MIN > MAX, the highest value will be used. +# +# SHA_CRYPT_MIN_ROUNDS 5000 +# SHA_CRYPT_MAX_ROUNDS 5000 + +################# OBSOLETED BY PAM ############## +# # +# These options are now handled by PAM. Please # +# edit the appropriate file in /etc/pam.d/ to # +# enable the equivelants of them. +# +############### + +#MOTD_FILE +#DIALUPS_CHECK_ENAB +#LASTLOG_ENAB +#MAIL_CHECK_ENAB +#OBSCURE_CHECKS_ENAB +#PORTTIME_CHECKS_ENAB +#SU_WHEEL_ONLY +#CRACKLIB_DICTPATH +#PASS_CHANGE_TRIES +#PASS_ALWAYS_WARN +#ENVIRON_FILE +#NOLOGINS_FILE +#ISSUE_FILE +#PASS_MIN_LEN +#PASS_MAX_LEN +#ULIMIT +#ENV_HZ +#CHFN_AUTH +#CHSH_AUTH +#FAIL_DELAY + +################# OBSOLETED ####################### +# # +# These options are no more handled by shadow. # +# # +# Shadow utilities will display a warning if they # +# still appear. # +# # +################################################### + +# CLOSE_SESSIONS +# LOGIN_STRING +# NO_PASSWORD_CONSOLE +# QMAIL_DIR + + + diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/modules b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/modules new file mode 100644 index 00000000..ef0354ee --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/modules @@ -0,0 +1,117 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# /etc/modules: kernel modules to load at boot time. +# +# This file contains the names of kernel modules that should be loaded +# at boot time, one per line. Lines beginning with "#" are ignored. +# Parameters can be specified after the module name. + +# Arch +# ---- +# +# Modules for certains builds, contains support modules and some CPU-specific optimizations. + +{% if arch == "x86_64" -%} +# Optimize for x86_64 cryptographic features +twofish-x86_64-3way +twofish-x86_64 +aes-x86_64 +salsa20-x86_64 +blowfish-x86_64 +{% endif -%} + +{% if cpuVendor == "intel" -%} +# Intel-specific optimizations +ghash-clmulni-intel +aesni-intel +kvm-intel +{% endif -%} + +{% if cpuVendor == "amd" -%} +# AMD-specific optimizations +kvm-amd +{% endif -%} + +kvm + + +# Crypto +# ------ + +# Some core modules which comprise strong cryptography. +blowfish_common +blowfish_generic +ctr +cts +lrw +lzo +rmd160 +rmd256 +rmd320 +serpent +sha512_generic +twofish_common +twofish_generic +xts +zlib + + +# Drivers +# ------- + +# Basics +lp +rtc +loop + +# Filesystems +ext2 +btrfs + +{% if desktop_enable -%} +# Desktop +psmouse +snd +snd_ac97_codec +snd_intel8x0 +snd_page_alloc +snd_pcm +snd_timer +soundcore +usbhid +{% endif -%} + +# Lib +# --- +xz + + +# Net +# --- + +# All packets needed for netfilter rules (ie iptables, ebtables). +ip_tables +x_tables +iptable_filter +iptable_nat + +# Targets +ipt_LOG +ipt_REJECT + +# Modules +xt_connlimit +xt_tcpudp +xt_recent +xt_limit +xt_conntrack +nf_conntrack +nf_conntrack_ipv4 +nf_defrag_ipv4 +xt_state +nf_nat + +# Addons +xt_pknock \ No newline at end of file diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf new file mode 100644 index 00000000..f98d14e5 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf @@ -0,0 +1,11 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +Name: passwdqc password strength enforcement +Default: yes +Priority: 1024 +Conflicts: cracklib +Password-Type: Primary +Password: + requisite pam_passwdqc.so {{ auth_pam_passwdqc_options }} diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh new file mode 100644 index 00000000..fd2de791 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh @@ -0,0 +1,8 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# Disable core dumps via soft limits for all users. Compliance to this setting +# is voluntary and can be modified by users up to a hard limit. This setting is +# a sane default. +ulimit -S -c 0 > /dev/null 2>&1 diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/securetty b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/securetty new file mode 100644 index 00000000..15b18d4e --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/securetty @@ -0,0 +1,11 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# A list of TTYs, from which root can log in +# see `man securetty` for reference +{% if ttys -%} +{% for tty in ttys -%} +{{ tty }} +{% endfor -%} +{% endif -%} diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/tally2 b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/tally2 new file mode 100644 index 00000000..d9620299 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/tally2 @@ -0,0 +1,14 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +Name: tally2 lockout after failed attempts enforcement +Default: yes +Priority: 1024 +Conflicts: cracklib +Auth-Type: Primary +Auth-Initial: + required pam_tally2.so deny={{ auth_retries }} onerr=fail unlock_time={{ auth_lockout_time }} +Account-Type: Primary +Account-Initial: + required pam_tally2.so diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py new file mode 100644 index 00000000..d4f0ec19 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.mysql.checks import config + + +def run_mysql_checks(): + log("Starting MySQL hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("MySQL hardening checks complete.", level=DEBUG) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py new file mode 100644 index 00000000..3af8b89d --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -0,0 +1,89 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six +import subprocess + +from charmhelpers.core.hookenv import ( + log, + WARNING, +) +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + DirectoryPermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.mysql import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get MySQL hardening config audits. + + :returns: dictionary of audits + """ + if subprocess.call(['which', 'mysql'], stdout=subprocess.PIPE) != 0: + log("MySQL does not appear to be installed on this node - " + "skipping mysql hardening", level=WARNING) + return [] + + settings = utils.get_settings('mysql') + hardening_settings = settings['hardening'] + my_cnf = hardening_settings['mysql-conf'] + + audits = [ + FilePermissionAudit(paths=[my_cnf], user='root', + group='root', mode=0o0600), + + TemplatedFile(hardening_settings['hardening-conf'], + MySQLConfContext(), + TEMPLATES_DIR, + mode=0o0750, + user='mysql', + group='root', + service_actions=[{'service': 'mysql', + 'actions': ['restart']}]), + + # MySQL and Percona charms do not allow configuration of the + # data directory, so use the default. + DirectoryPermissionAudit('/var/lib/mysql', + user='mysql', + group='mysql', + recursive=False, + mode=0o755), + + DirectoryPermissionAudit('/etc/mysql', + user='root', + group='root', + recursive=False, + mode=0o700), + ] + + return audits + + +class MySQLConfContext(object): + """Defines the set of key/value pairs to set in a mysql config file. + + This context, when called, will return a dictionary containing the + key/value pairs of setting to specify in the + /etc/mysql/conf.d/hardening.cnf file. + """ + def __call__(self): + settings = utils.get_settings('mysql') + # Translate for python3 + return {'mysql_settings': + [(k, v) for k, v in six.iteritems(settings['security'])]} diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf b/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf new file mode 100644 index 00000000..8242586c --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf @@ -0,0 +1,12 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +[mysqld] +{% for setting, value in mysql_settings -%} +{% if value == 'True' -%} +{{ setting }} +{% elif value != 'None' and value != None -%} +{{ setting }} = {{ value }} +{% endif -%} +{% endfor -%} diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py new file mode 100644 index 00000000..b85150d5 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.ssh.checks import config + + +def run_ssh_checks(): + log("Starting SSH hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("SSH hardening checks complete.", level=DEBUG) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py new file mode 100644 index 00000000..3fb6ae8d --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -0,0 +1,394 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.fetch import ( + apt_install, + apt_update, +) +from charmhelpers.core.host import lsb_release +from charmhelpers.contrib.hardening.audits.file import ( + TemplatedFile, + FileContentAudit, +) +from charmhelpers.contrib.hardening.ssh import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get SSH hardening config audits. + + :returns: dictionary of audits + """ + audits = [SSHConfig(), SSHDConfig(), SSHConfigFileContentAudit(), + SSHDConfigFileContentAudit()] + return audits + + +class SSHConfigContext(object): + + type = 'client' + + def get_macs(self, allow_weak_mac): + if allow_weak_mac: + weak_macs = 'weak' + else: + weak_macs = 'default' + + default = 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160' + macs = {'default': default, + 'weak': default + ',hmac-sha1'} + + default = ('hmac-sha2-512-etm@openssh.com,' + 'hmac-sha2-256-etm@openssh.com,' + 'hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,' + 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160') + macs_66 = {'default': default, + 'weak': default + ',hmac-sha1'} + + # Use newer ciphers on Ubuntu Trusty and above + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG) + macs = macs_66 + + return macs[weak_macs] + + def get_kexs(self, allow_weak_kex): + if allow_weak_kex: + weak_kex = 'weak' + else: + weak_kex = 'default' + + default = 'diffie-hellman-group-exchange-sha256' + weak = (default + ',diffie-hellman-group14-sha1,' + 'diffie-hellman-group-exchange-sha1,' + 'diffie-hellman-group1-sha1') + kex = {'default': default, + 'weak': weak} + + default = ('curve25519-sha256@libssh.org,' + 'diffie-hellman-group-exchange-sha256') + weak = (default + ',diffie-hellman-group14-sha1,' + 'diffie-hellman-group-exchange-sha1,' + 'diffie-hellman-group1-sha1') + kex_66 = {'default': default, + 'weak': weak} + + # Use newer kex on Ubuntu Trusty and above + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + log('Detected Ubuntu 14.04 or newer, using new key exchange ' + 'algorithms', level=DEBUG) + kex = kex_66 + + return kex[weak_kex] + + def get_ciphers(self, cbc_required): + if cbc_required: + weak_ciphers = 'weak' + else: + weak_ciphers = 'default' + + default = 'aes256-ctr,aes192-ctr,aes128-ctr' + cipher = {'default': default, + 'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'} + + default = ('chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,' + 'aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr') + ciphers_66 = {'default': default, + 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'} + + # Use newer ciphers on ubuntu Trusty and above + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + log('Detected Ubuntu 14.04 or newer, using new ciphers', + level=DEBUG) + cipher = ciphers_66 + + return cipher[weak_ciphers] + + def __call__(self): + settings = utils.get_settings('ssh') + if settings['common']['network_ipv6_enable']: + addr_family = 'any' + else: + addr_family = 'inet' + + ctxt = { + 'addr_family': addr_family, + 'remote_hosts': settings['common']['remote_hosts'], + 'password_auth_allowed': + settings['client']['password_authentication'], + 'ports': settings['common']['ports'], + 'ciphers': self.get_ciphers(settings['client']['cbc_required']), + 'macs': self.get_macs(settings['client']['weak_hmac']), + 'kexs': self.get_kexs(settings['client']['weak_kex']), + 'roaming': settings['client']['roaming'], + } + return ctxt + + +class SSHConfig(TemplatedFile): + def __init__(self): + path = '/etc/ssh/ssh_config' + super(SSHConfig, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=SSHConfigContext(), + user='root', + group='root', + mode=0o0644) + + def pre_write(self): + settings = utils.get_settings('ssh') + apt_update(fatal=True) + apt_install(settings['client']['package']) + if not os.path.exists('/etc/ssh'): + os.makedir('/etc/ssh') + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + def post_write(self): + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + +class SSHDConfigContext(SSHConfigContext): + + type = 'server' + + def __call__(self): + settings = utils.get_settings('ssh') + if settings['common']['network_ipv6_enable']: + addr_family = 'any' + else: + addr_family = 'inet' + + ctxt = { + 'ssh_ip': settings['server']['listen_to'], + 'password_auth_allowed': + settings['server']['password_authentication'], + 'ports': settings['common']['ports'], + 'addr_family': addr_family, + 'ciphers': self.get_ciphers(settings['server']['cbc_required']), + 'macs': self.get_macs(settings['server']['weak_hmac']), + 'kexs': self.get_kexs(settings['server']['weak_kex']), + 'host_key_files': settings['server']['host_key_files'], + 'allow_root_with_key': settings['server']['allow_root_with_key'], + 'password_authentication': + settings['server']['password_authentication'], + 'use_priv_sep': settings['server']['use_privilege_separation'], + 'use_pam': settings['server']['use_pam'], + 'allow_x11_forwarding': settings['server']['allow_x11_forwarding'], + 'print_motd': settings['server']['print_motd'], + 'print_last_log': settings['server']['print_last_log'], + 'client_alive_interval': + settings['server']['alive_interval'], + 'client_alive_count': settings['server']['alive_count'], + 'allow_tcp_forwarding': settings['server']['allow_tcp_forwarding'], + 'allow_agent_forwarding': + settings['server']['allow_agent_forwarding'], + 'deny_users': settings['server']['deny_users'], + 'allow_users': settings['server']['allow_users'], + 'deny_groups': settings['server']['deny_groups'], + 'allow_groups': settings['server']['allow_groups'], + 'use_dns': settings['server']['use_dns'], + 'sftp_enable': settings['server']['sftp_enable'], + 'sftp_group': settings['server']['sftp_group'], + 'sftp_chroot': settings['server']['sftp_chroot'], + 'max_auth_tries': settings['server']['max_auth_tries'], + 'max_sessions': settings['server']['max_sessions'], + } + return ctxt + + +class SSHDConfig(TemplatedFile): + def __init__(self): + path = '/etc/ssh/sshd_config' + super(SSHDConfig, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=SSHDConfigContext(), + user='root', + group='root', + mode=0o0600, + service_actions=[{'service': 'ssh', + 'actions': + ['restart']}]) + + def pre_write(self): + settings = utils.get_settings('ssh') + apt_update(fatal=True) + apt_install(settings['server']['package']) + if not os.path.exists('/etc/ssh'): + os.makedir('/etc/ssh') + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + def post_write(self): + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + +class SSHConfigFileContentAudit(FileContentAudit): + def __init__(self): + self.path = '/etc/ssh/ssh_config' + super(SSHConfigFileContentAudit, self).__init__(self.path, {}) + + def is_compliant(self, *args, **kwargs): + self.pass_cases = [] + self.fail_cases = [] + settings = utils.get_settings('ssh') + + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + if not settings['client']['weak_hmac']: + self.fail_cases.append(r'^MACs.+,hmac-sha1$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['client']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + + if settings['client']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + + if settings['client']['roaming']: + self.pass_cases.append(r'^UseRoaming yes$') + else: + self.fail_cases.append(r'^UseRoaming yes$') + + return super(SSHConfigFileContentAudit, self).is_compliant(*args, + **kwargs) + + +class SSHDConfigFileContentAudit(FileContentAudit): + def __init__(self): + self.path = '/etc/ssh/sshd_config' + super(SSHDConfigFileContentAudit, self).__init__(self.path, {}) + + def is_compliant(self, *args, **kwargs): + self.pass_cases = [] + self.fail_cases = [] + settings = utils.get_settings('ssh') + + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + + if settings['server']['sftp_enable']: + self.pass_cases.append(r'^Subsystem\ssftp') + else: + self.fail_cases.append(r'^Subsystem\ssftp') + + return super(SSHDConfigFileContentAudit, self).is_compliant(*args, + **kwargs) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config new file mode 100644 index 00000000..9742d8e2 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config @@ -0,0 +1,70 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# This is the ssh client system-wide configuration file. See +# ssh_config(5) for more information. This file provides defaults for +# users, and the values can be changed in per-user configuration files +# or on the command line. + +# Configuration data is parsed as follows: +# 1. command line options +# 2. user-specific file +# 3. system-wide file +# Any configuration value is only changed the first time it is set. +# Thus, host-specific definitions should be at the beginning of the +# configuration file, and defaults at the end. + +# Site-wide defaults for some commonly used options. For a comprehensive +# list of available options, their meanings and defaults, please see the +# ssh_config(5) man page. + +# Restrict the following configuration to be limited to this Host. +{% if remote_hosts -%} +Host {{ ' '.join(remote_hosts) }} +{% endif %} +ForwardAgent no +ForwardX11 no +ForwardX11Trusted yes +RhostsRSAAuthentication no +RSAAuthentication yes +PasswordAuthentication {{ password_auth_allowed }} +HostbasedAuthentication no +GSSAPIAuthentication no +GSSAPIDelegateCredentials no +GSSAPIKeyExchange no +GSSAPITrustDNS no +BatchMode no +CheckHostIP yes +AddressFamily {{ addr_family }} +ConnectTimeout 0 +StrictHostKeyChecking ask +IdentityFile ~/.ssh/identity +IdentityFile ~/.ssh/id_rsa +IdentityFile ~/.ssh/id_dsa +# The port at the destination should be defined +{% for port in ports -%} +Port {{ port }} +{% endfor %} +Protocol 2 +Cipher 3des +{% if ciphers -%} +Ciphers {{ ciphers }} +{%- endif %} +{% if macs -%} +MACs {{ macs }} +{%- endif %} +{% if kexs -%} +KexAlgorithms {{ kexs }} +{%- endif %} +EscapeChar ~ +Tunnel no +TunnelDevice any:any +PermitLocalCommand no +VisualHostKey no +RekeyLimit 1G 1h +SendEnv LANG LC_* +HashKnownHosts yes +{% if roaming -%} +UseRoaming {{ roaming }} +{% endif %} diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config new file mode 100644 index 00000000..5f87298a --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config @@ -0,0 +1,159 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# Package generated configuration file +# See the sshd_config(5) manpage for details + +# What ports, IPs and protocols we listen for +{% for port in ports -%} +Port {{ port }} +{% endfor -%} +AddressFamily {{ addr_family }} +# Use these options to restrict which interfaces/protocols sshd will bind to +{% if ssh_ip -%} +{% for ip in ssh_ip -%} +ListenAddress {{ ip }} +{% endfor %} +{%- else -%} +ListenAddress :: +ListenAddress 0.0.0.0 +{% endif -%} +Protocol 2 +{% if ciphers -%} +Ciphers {{ ciphers }} +{% endif -%} +{% if macs -%} +MACs {{ macs }} +{% endif -%} +{% if kexs -%} +KexAlgorithms {{ kexs }} +{% endif -%} +# HostKeys for protocol version 2 +{% for keyfile in host_key_files -%} +HostKey {{ keyfile }} +{% endfor -%} + +# Privilege Separation is turned on for security +{% if use_priv_sep -%} +UsePrivilegeSeparation {{ use_priv_sep }} +{% endif -%} + +# Lifetime and size of ephemeral version 1 server key +KeyRegenerationInterval 3600 +ServerKeyBits 1024 + +# Logging +SyslogFacility AUTH +LogLevel VERBOSE + +# Authentication: +LoginGraceTime 30s +{% if allow_root_with_key -%} +PermitRootLogin without-password +{% else -%} +PermitRootLogin no +{% endif %} +PermitTunnel no +PermitUserEnvironment no +StrictModes yes + +RSAAuthentication yes +PubkeyAuthentication yes +AuthorizedKeysFile %h/.ssh/authorized_keys + +# Don't read the user's ~/.rhosts and ~/.shosts files +IgnoreRhosts yes +# For this to work you will also need host keys in /etc/ssh_known_hosts +RhostsRSAAuthentication no +# similar for protocol version 2 +HostbasedAuthentication no +# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication +IgnoreUserKnownHosts yes + +# To enable empty passwords, change to yes (NOT RECOMMENDED) +PermitEmptyPasswords no + +# Change to yes to enable challenge-response passwords (beware issues with +# some PAM modules and threads) +ChallengeResponseAuthentication no + +# Change to no to disable tunnelled clear text passwords +PasswordAuthentication {{ password_authentication }} + +# Kerberos options +KerberosAuthentication no +KerberosGetAFSToken no +KerberosOrLocalPasswd no +KerberosTicketCleanup yes + +# GSSAPI options +GSSAPIAuthentication no +GSSAPICleanupCredentials yes + +X11Forwarding {{ allow_x11_forwarding }} +X11DisplayOffset 10 +X11UseLocalhost yes +GatewayPorts no +PrintMotd {{ print_motd }} +PrintLastLog {{ print_last_log }} +TCPKeepAlive no +UseLogin no + +ClientAliveInterval {{ client_alive_interval }} +ClientAliveCountMax {{ client_alive_count }} +AllowTcpForwarding {{ allow_tcp_forwarding }} +AllowAgentForwarding {{ allow_agent_forwarding }} + +MaxStartups 10:30:100 +#Banner /etc/issue.net + +# Allow client to pass locale environment variables +AcceptEnv LANG LC_* + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the ChallengeResponseAuthentication and +# PasswordAuthentication. Depending on your PAM configuration, +# PAM authentication via ChallengeResponseAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# PAM authentication, then enable this but set PasswordAuthentication +# and ChallengeResponseAuthentication to 'no'. +UsePAM {{ use_pam }} + +{% if deny_users -%} +DenyUsers {{ deny_users }} +{% endif -%} +{% if allow_users -%} +AllowUsers {{ allow_users }} +{% endif -%} +{% if deny_groups -%} +DenyGroups {{ deny_groups }} +{% endif -%} +{% if allow_groups -%} +AllowGroups allow_groups +{% endif -%} +UseDNS {{ use_dns }} +MaxAuthTries {{ max_auth_tries }} +MaxSessions {{ max_sessions }} + +{% if sftp_enable -%} +# Configuration, in case SFTP is used +## override default of no subsystems +## Subsystem sftp /opt/app/openssh5/libexec/sftp-server +Subsystem sftp internal-sftp -l VERBOSE + +## These lines must appear at the *end* of sshd_config +Match Group {{ sftp_group }} +ForceCommand internal-sftp -l VERBOSE +ChrootDirectory {{ sftp_chroot }} +{% else -%} +# Configuration, in case SFTP is used +## override default of no subsystems +## Subsystem sftp /opt/app/openssh5/libexec/sftp-server +## These lines must appear at the *end* of sshd_config +Match Group sftponly +ForceCommand internal-sftp -l VERBOSE +ChrootDirectory /sftpchroot/home/%u +{% endif %} diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/templating.py new file mode 100644 index 00000000..d2ab7dc9 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/templating.py @@ -0,0 +1,71 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) + +try: + from jinja2 import FileSystemLoader, Environment +except ImportError: + from charmhelpers.fetch import apt_install + from charmhelpers.fetch import apt_update + apt_update(fatal=True) + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment + + +# NOTE: function separated from main rendering code to facilitate easier +# mocking in unit tests. +def write(path, data): + with open(path, 'wb') as out: + out.write(data) + + +def get_template_path(template_dir, path): + """Returns the template file which would be used to render the path. + + The path to the template file is returned. + :param template_dir: the directory the templates are located in + :param path: the file path to be written to. + :returns: path to the template file + """ + return os.path.join(template_dir, os.path.basename(path)) + + +def render_and_write(template_dir, path, context): + """Renders the specified template into the file. + + :param template_dir: the directory to load the template from + :param path: the path to write the templated contents to + :param context: the parameters to pass to the rendering engine + """ + env = Environment(loader=FileSystemLoader(template_dir)) + template_file = os.path.basename(path) + template = env.get_template(template_file) + log('Rendering from template: %s' % template.name, level=DEBUG) + rendered_content = template.render(context) + if not rendered_content: + log("Render returned None - skipping '%s'" % path, + level=WARNING) + return + + write(path, rendered_content.encode('utf-8').strip()) + log('Wrote template %s' % path, level=DEBUG) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/utils.py new file mode 100644 index 00000000..a6743a4d --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/utils.py @@ -0,0 +1,157 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import glob +import grp +import os +import pwd +import six +import yaml + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + WARNING, + ERROR, +) + + +# Global settings cache. Since each hook fire entails a fresh module import it +# is safe to hold this in memory and not risk missing config changes (since +# they will result in a new hook fire and thus re-import). +__SETTINGS__ = {} + + +def _get_defaults(modules): + """Load the default config for the provided modules. + + :param modules: stack modules config defaults to lookup. + :returns: modules default config dictionary. + """ + default = os.path.join(os.path.dirname(__file__), + 'defaults/%s.yaml' % (modules)) + return yaml.safe_load(open(default)) + + +def _get_schema(modules): + """Load the config schema for the provided modules. + + NOTE: this schema is intended to have 1-1 relationship with they keys in + the default config and is used a means to verify valid overrides provided + by the user. + + :param modules: stack modules config schema to lookup. + :returns: modules default schema dictionary. + """ + schema = os.path.join(os.path.dirname(__file__), + 'defaults/%s.yaml.schema' % (modules)) + return yaml.safe_load(open(schema)) + + +def _get_user_provided_overrides(modules): + """Load user-provided config overrides. + + :param modules: stack modules to lookup in user overrides yaml file. + :returns: overrides dictionary. + """ + overrides = os.path.join(os.environ['JUJU_CHARM_DIR'], + 'hardening.yaml') + if os.path.exists(overrides): + log("Found user-provided config overrides file '%s'" % + (overrides), level=DEBUG) + settings = yaml.safe_load(open(overrides)) + if settings and settings.get(modules): + log("Applying '%s' overrides" % (modules), level=DEBUG) + return settings.get(modules) + + log("No overrides found for '%s'" % (modules), level=DEBUG) + else: + log("No hardening config overrides file '%s' found in charm " + "root dir" % (overrides), level=DEBUG) + + return {} + + +def _apply_overrides(settings, overrides, schema): + """Get overrides config overlayed onto modules defaults. + + :param modules: require stack modules config. + :returns: dictionary of modules config with user overrides applied. + """ + if overrides: + for k, v in six.iteritems(overrides): + if k in schema: + if schema[k] is None: + settings[k] = v + elif type(schema[k]) is dict: + settings[k] = _apply_overrides(settings[k], overrides[k], + schema[k]) + else: + raise Exception("Unexpected type found in schema '%s'" % + type(schema[k]), level=ERROR) + else: + log("Unknown override key '%s' - ignoring" % (k), level=INFO) + + return settings + + +def get_settings(modules): + global __SETTINGS__ + if modules in __SETTINGS__: + return __SETTINGS__[modules] + + schema = _get_schema(modules) + settings = _get_defaults(modules) + overrides = _get_user_provided_overrides(modules) + __SETTINGS__[modules] = _apply_overrides(settings, overrides, schema) + return __SETTINGS__[modules] + + +def ensure_permissions(path, user, group, permissions, maxdepth=-1): + """Ensure permissions for path. + + If path is a file, apply to file and return. If path is a directory, + apply recursively (if required) to directory contents and return. + + :param user: user name + :param group: group name + :param permissions: octal permissions + :param maxdepth: maximum recursion depth. A negative maxdepth allows + infinite recursion and maxdepth=0 means no recursion. + :returns: None + """ + if not os.path.exists(path): + log("File '%s' does not exist - cannot set permissions" % (path), + level=WARNING) + return + + _user = pwd.getpwnam(user) + os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid) + os.chmod(path, permissions) + + if maxdepth == 0: + log("Max recursion depth reached - skipping further recursion", + level=DEBUG) + return + elif maxdepth > 0: + maxdepth -= 1 + + if os.path.isdir(path): + contents = glob.glob("%s/*" % (path)) + for c in contents: + ensure_permissions(c, user=user, group=group, + permissions=permissions, maxdepth=maxdepth) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 14549174..1b4b1de7 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -283,6 +283,7 @@ def get_mon_map(service): e.message)) raise + def hash_monitor_names(service): """ Uses the get_mon_map() function to get information about the monitor diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 2dd70bc9..01321296 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -912,6 +912,24 @@ def payload_status_set(klass, pid, status): subprocess.check_call(cmd) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def resource_get(name): + """used to fetch the resource path of the given name. + + must match a name of defined resource in metadata.yaml + + returns either a path or False if resource not available + """ + if not name: + return False + + cmd = ['resource-get', name] + try: + return subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError: + return False + + @cached def juju_version(): """Full version string (eg. '1.23.3.1-trusty-amd64')""" @@ -976,3 +994,16 @@ def _run_atexit(): for callback, args, kwargs in reversed(_atexit): callback(*args, **kwargs) del _atexit[:] + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get_primary_address(binding): + ''' + Retrieve the primary network address for a named binding + + :param binding: string. The name of a relation of extra-binding + :return: string. The primary IP address for the named binding + :raise: NotImplementedError if run on Juju < 2.0 + ''' + cmd = ['network-get', '--primary-address', binding] + return subprocess.check_output(cmd).strip() diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index a7720906..481087bb 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -30,6 +30,8 @@ import string import subprocess import hashlib +import functools +import itertools from contextlib import contextmanager from collections import OrderedDict @@ -428,27 +430,47 @@ def config_changed(): restarted if any file matching the pattern got changed, created or removed. Standard wildcards are supported, see documentation for the 'glob' module for more information. + + @param restart_map: {path_file_name: [service_name, ...] + @param stopstart: DEFAULT false; whether to stop, start OR restart + @returns result from decorated function """ def wrap(f): + @functools.wraps(f) def wrapped_f(*args, **kwargs): - checksums = {path: path_hash(path) for path in restart_map} - f(*args, **kwargs) - restarts = [] - for path in restart_map: - if path_hash(path) != checksums[path]: - restarts += restart_map[path] - services_list = list(OrderedDict.fromkeys(restarts)) - if not stopstart: - for service_name in services_list: - service('restart', service_name) - else: - for action in ['stop', 'start']: - for service_name in services_list: - service(action, service_name) + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart) return wrapped_f return wrap +def restart_on_change_helper(lambda_f, restart_map, stopstart=False): + """Helper function to perform the restart_on_change function. + + This is provided for decorators to restart services if files described + in the restart_map have changed after an invocation of lambda_f(). + + @param lambda_f: function to call. + @param restart_map: {file: [service, ...]} + @param stopstart: whether to stop, start or restart a service + @returns result of lambda_f() + """ + checksums = {path: path_hash(path) for path in restart_map} + r = lambda_f() + # create a list of lists of the services to restart + restarts = [restart_map[path] + for path in restart_map + if path_hash(path) != checksums[path]] + # create a flat list of ordered services without duplicates from lists + services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) + if services_list: + actions = ('stop', 'start') if stopstart else ('restart',) + for action in actions: + for service_name in services_list: + service(action, service_name) + return r + + def lsb_release(): """Return /etc/lsb-release in a dict""" d = {} diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index a967b4f8..3e159039 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -781,8 +781,9 @@ def get_uuid_epoch_stamp(self): return '[{}-{}]'.format(uuid.uuid4(), time.time()) # amulet juju action helpers: - def run_action(self, unit_sentry, action, params=None, - _check_output=subprocess.check_output): + def run_action(self, unit_sentry, action, + _check_output=subprocess.check_output, + params=None): """Run the named action on a given unit sentry. params a dict of parameters to use diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index 388b60e6..ef3bdccf 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -27,7 +27,11 @@ import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client -import novaclient.v1_1.client as nova_client +from keystoneclient.auth.identity import v3 as keystone_id_v3 +from keystoneclient import session as keystone_session +from keystoneclient.v3 import client as keystone_client_v3 + +import novaclient.client as nova_client import pika import swiftclient @@ -38,6 +42,8 @@ DEBUG = logging.DEBUG ERROR = logging.ERROR +NOVA_CLIENT_VERSION = "2" + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -139,7 +145,7 @@ def validate_role_data(self, expected, actual): return "role {} does not exist".format(e['name']) return ret - def validate_user_data(self, expected, actual): + def validate_user_data(self, expected, actual, api_version=None): """Validate user data. Validate a list of actual user data vs a list of expected user @@ -150,10 +156,15 @@ def validate_user_data(self, expected, actual): for e in expected: found = False for act in actual: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'tenantId': act.tenantId, - 'id': act.id} - if e['name'] == a['name']: + if e['name'] == act.name: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'id': act.id} + if api_version == 3: + a['default_project_id'] = getattr(act, + 'default_project_id', + 'none') + else: + a['tenantId'] = act.tenantId found = True ret = self._validate_dict_data(e, a) if ret: @@ -188,15 +199,30 @@ def authenticate_cinder_admin(self, keystone_sentry, username, return cinder_client.Client(username, password, tenant, ept) def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant): + tenant=None, api_version=None, + keystone_ip=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') unit = keystone_sentry - service_ip = unit.relation('shared-db', - 'mysql:shared-db')['private-address'] - ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) + if not keystone_ip: + keystone_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name='admin_domain', + username=user, + password=password, + domain_name='admin_domain', + auth_url=ep, + ) + sess = keystone_session.Session(auth=auth) + return keystone_client_v3.Client(session=sess) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" @@ -225,7 +251,8 @@ def authenticate_nova_user(self, keystone, user, password, tenant): self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return nova_client.Client(username=user, api_key=password, + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, project_id=tenant, auth_url=ep) def authenticate_swift_user(self, keystone, user, password, tenant): diff --git a/ceph-proxy/unit_tests/test_ceph_ops.py b/ceph-proxy/unit_tests/test_ceph_ops.py index 88e64c7d..5e82fa8b 100644 --- a/ceph-proxy/unit_tests/test_ceph_ops.py +++ b/ceph-proxy/unit_tests/test_ceph_ops.py @@ -1,22 +1,21 @@ __author__ = 'chris' import json -from hooks import ceph_broker - -import mock import unittest +from mock import ( + call, + patch, +) + +from hooks import ceph_broker + class TestCephOps(unittest.TestCase): - """ - @mock.patch('ceph_broker.log') - def test_connect(self, mock_broker): - self.fail() - """ - - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.create_erasure_profile') - def test_create_erasure_profile(self, mock_create_erasure, mock_log): + + @patch.object(ceph_broker, 'create_erasure_profile') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + def test_create_erasure_profile(self, mock_create_erasure): req = json.dumps({'api-version': 1, 'ops': [{ 'op': 'create-erasure-profile', @@ -36,13 +35,15 @@ def test_create_erasure_profile(self, mock_create_erasure, mock_log): erasure_plugin_name='jerasure') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.pool_exists') - @mock.patch('hooks.ceph_broker.ReplicatedPool.create') + @patch.object(ceph_broker, 'get_osds') + @patch.object(ceph_broker, 'pool_exists') + @patch.object(ceph_broker, 'ReplicatedPool') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) def test_process_requests_create_replicated_pool(self, mock_replicated_pool, mock_pool_exists, - mock_log): + mock_get_osds): + mock_get_osds.return_value = 0 mock_pool_exists.return_value = False reqs = json.dumps({'api-version': 1, 'ops': [{ @@ -53,14 +54,14 @@ def test_process_requests_create_replicated_pool(self, }]}) rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_replicated_pool.assert_called_with() + calls = [call(pg_num=None, name=u'foo', service='admin', replicas=3)] + mock_replicated_pool.assert_has_calls(calls) self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.delete_pool') + @patch.object(ceph_broker, 'delete_pool') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) def test_process_requests_delete_pool(self, - mock_delete_pool, - mock_log): + mock_delete_pool): reqs = json.dumps({'api-version': 1, 'ops': [{ 'op': 'delete-pool', @@ -70,14 +71,13 @@ def test_process_requests_delete_pool(self, mock_delete_pool.assert_called_with(service='admin', name='foo') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.pool_exists') - @mock.patch('hooks.ceph_broker.ErasurePool.create') - @mock.patch('hooks.ceph_broker.erasure_profile_exists') + @patch.object(ceph_broker, 'pool_exists') + @patch.object(ceph_broker.ErasurePool, 'create') + @patch.object(ceph_broker, 'erasure_profile_exists') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) def test_process_requests_create_erasure_pool(self, mock_profile_exists, mock_erasure_pool, - mock_pool_exists, - mock_log): + mock_pool_exists): mock_pool_exists.return_value = False reqs = json.dumps({'api-version': 1, 'ops': [{ @@ -92,11 +92,11 @@ def test_process_requests_create_erasure_pool(self, mock_profile_exists, mock_erasure_pool.assert_called_with() self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.pool_exists') - @mock.patch('hooks.ceph_broker.Pool.add_cache_tier') + @patch.object(ceph_broker, 'pool_exists') + @patch.object(ceph_broker.Pool, 'add_cache_tier') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) def test_process_requests_create_cache_tier(self, mock_pool, - mock_pool_exists, mock_log): + mock_pool_exists): mock_pool_exists.return_value = True reqs = json.dumps({'api-version': 1, 'ops': [{ @@ -113,11 +113,11 @@ def test_process_requests_create_cache_tier(self, mock_pool, mock_pool.assert_called_with(cache_pool='foo-ssd', mode='writeback') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.pool_exists') - @mock.patch('hooks.ceph_broker.Pool.remove_cache_tier') + @patch.object(ceph_broker, 'pool_exists') + @patch.object(ceph_broker.Pool, 'remove_cache_tier') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) def test_process_requests_remove_cache_tier(self, mock_pool, - mock_pool_exists, mock_log): + mock_pool_exists): mock_pool_exists.return_value = True reqs = json.dumps({'api-version': 1, 'ops': [{ @@ -130,9 +130,9 @@ def test_process_requests_remove_cache_tier(self, mock_pool, mock_pool.assert_called_with(cache_pool='foo-ssd') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.snapshot_pool') - def test_snapshot_pool(self, mock_snapshot_pool, mock_log): + @patch.object(ceph_broker, 'snapshot_pool') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + def test_snapshot_pool(self, mock_snapshot_pool): reqs = json.dumps({'api-version': 1, 'ops': [{ 'op': 'snapshot-pool', @@ -146,9 +146,9 @@ def test_snapshot_pool(self, mock_snapshot_pool, mock_log): snapshot_name='foo-snap1') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.rename_pool') - def test_rename_pool(self, mock_rename_pool, mock_log): + @patch.object(ceph_broker, 'rename_pool') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + def test_rename_pool(self, mock_rename_pool): reqs = json.dumps({'api-version': 1, 'ops': [{ 'op': 'rename-pool', @@ -161,9 +161,9 @@ def test_rename_pool(self, mock_rename_pool, mock_log): new_name='foo2') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.remove_pool_snapshot') - def test_remove_pool_snapshot(self, mock_snapshot_pool, mock_broker): + @patch.object(ceph_broker, 'remove_pool_snapshot') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + def test_remove_pool_snapshot(self, mock_snapshot_pool): reqs = json.dumps({'api-version': 1, 'ops': [{ 'op': 'remove-pool-snapshot', @@ -176,9 +176,9 @@ def test_remove_pool_snapshot(self, mock_snapshot_pool, mock_broker): snapshot_name='foo-snap1') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.pool_set') - def test_set_pool_value(self, mock_set_pool, mock_broker): + @patch.object(ceph_broker, 'pool_set') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + def test_set_pool_value(self, mock_set_pool): reqs = json.dumps({'api-version': 1, 'ops': [{ 'op': 'set-pool-value', @@ -193,8 +193,8 @@ def test_set_pool_value(self, mock_set_pool, mock_broker): value=3) self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - def test_set_invalid_pool_value(self, mock_broker): + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + def test_set_invalid_pool_value(self): reqs = json.dumps({'api-version': 1, 'ops': [{ 'op': 'set-pool-value', @@ -203,15 +203,8 @@ def test_set_invalid_pool_value(self, mock_broker): 'value': 'abc', }]}) rc = ceph_broker.process_requests(reqs) - # self.assertRaises(AssertionError) self.assertEqual(json.loads(rc)['exit-code'], 1) - ''' - @mock.patch('ceph_broker.log') - def test_set_pool_max_bytes(self, mock_broker): - self.fail() - ''' - if __name__ == '__main__': unittest.main() diff --git a/ceph-proxy/unit_tests/test_status.py b/ceph-proxy/unit_tests/test_status.py index 88625908..46cc0178 100644 --- a/ceph-proxy/unit_tests/test_status.py +++ b/ceph-proxy/unit_tests/test_status.py @@ -1,7 +1,10 @@ import mock import test_utils -import ceph_hooks as hooks +with mock.patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + import ceph_hooks as hooks TO_PATCH = [ 'status_set', diff --git a/ceph-proxy/unit_tests/test_upgrade_roll.py b/ceph-proxy/unit_tests/test_upgrade_roll.py index 8af24ac5..dd0ae231 100644 --- a/ceph-proxy/unit_tests/test_upgrade_roll.py +++ b/ceph-proxy/unit_tests/test_upgrade_roll.py @@ -7,7 +7,11 @@ sys.path.append('/home/chris/repos/ceph-mon/hooks') import test_utils -import ceph_hooks + +with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + import ceph_hooks TO_PATCH = [ 'hookenv', @@ -111,11 +115,18 @@ def test_roll_monitor_cluster_second(self, 'Waiting on ip-192-168-1-2 to finish upgrading') lock_and_roll.assert_called_with(my_name="ip-192-168-1-3") + @patch.object(ceph_hooks, 'time') @patch('ceph_hooks.monitor_key_get') @patch('ceph_hooks.monitor_key_exists') - def test_wait_on_previous_node(self, - monitor_key_exists, - monitor_key_get): + def test_wait_on_previous_node(self, monitor_key_exists, monitor_key_get, + mock_time): + tval = [previous_node_start_time] + + def fake_time(): + tval[0] += 100 + return tval[0] + + mock_time.time.side_effect = fake_time monitor_key_get.side_effect = monitor_key_side_effect monitor_key_exists.return_value = False @@ -134,3 +145,4 @@ def test_wait_on_previous_node(self, [call('Previous node is: ip-192-168-1-2')], [call('ip-192-168-1-2 is not finished. Waiting')], ) + self.assertEqual(tval[0], previous_node_start_time + 700) From d345c1a17b66c39935756a4142748a89c0cb5dcc Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 22 Mar 2016 19:29:44 +0000 Subject: [PATCH 1084/2699] Add hardening support Add charmhelpers.contrib.hardening and calls to install, config-changed, upgrade-charm and update-status hooks. Also add new config option to allow one or more hardening modules to be applied at runtime. Change-Id: If3e20565b1917828cb9fa2cf00b93bd13c1db00f --- ceph-mon/charm-helpers-hooks.yaml | 1 + ceph-mon/config.yaml | 6 + ceph-mon/hardening.yaml | 5 + ceph-mon/hooks/ceph_hooks.py | 10 + .../contrib/hardening/README.hardening.md | 38 ++ .../contrib/hardening/__init__.py | 15 + .../contrib/hardening/apache/__init__.py | 19 + .../hardening/apache/checks/__init__.py | 31 + .../contrib/hardening/apache/checks/config.py | 100 ++++ .../hardening/apache/templates/__init__.py | 0 .../hardening/apache/templates/alias.conf | 31 + .../hardening/apache/templates/hardening.conf | 18 + .../contrib/hardening/audits/__init__.py | 63 ++ .../contrib/hardening/audits/apache.py | 100 ++++ .../contrib/hardening/audits/apt.py | 105 ++++ .../contrib/hardening/audits/file.py | 552 ++++++++++++++++++ .../contrib/hardening/defaults/__init__.py | 0 .../contrib/hardening/defaults/apache.yaml | 13 + .../hardening/defaults/apache.yaml.schema | 9 + .../contrib/hardening/defaults/mysql.yaml | 38 ++ .../hardening/defaults/mysql.yaml.schema | 15 + .../contrib/hardening/defaults/os.yaml | 67 +++ .../contrib/hardening/defaults/os.yaml.schema | 42 ++ .../contrib/hardening/defaults/ssh.yaml | 49 ++ .../hardening/defaults/ssh.yaml.schema | 42 ++ .../charmhelpers/contrib/hardening/harden.py | 84 +++ .../contrib/hardening/host/__init__.py | 19 + .../contrib/hardening/host/checks/__init__.py | 50 ++ .../contrib/hardening/host/checks/apt.py | 39 ++ .../contrib/hardening/host/checks/limits.py | 55 ++ .../contrib/hardening/host/checks/login.py | 67 +++ .../hardening/host/checks/minimize_access.py | 52 ++ .../contrib/hardening/host/checks/pam.py | 134 +++++ .../contrib/hardening/host/checks/profile.py | 45 ++ .../hardening/host/checks/securetty.py | 39 ++ .../hardening/host/checks/suid_sgid.py | 131 +++++ .../contrib/hardening/host/checks/sysctl.py | 211 +++++++ .../hardening/host/templates/10.hardcore.conf | 8 + .../host/templates/99-juju-hardening.conf | 7 + .../hardening/host/templates/__init__.py | 0 .../hardening/host/templates/login.defs | 349 +++++++++++ .../contrib/hardening/host/templates/modules | 117 ++++ .../hardening/host/templates/passwdqc.conf | 11 + .../host/templates/pinerolo_profile.sh | 8 + .../hardening/host/templates/securetty | 11 + .../contrib/hardening/host/templates/tally2 | 14 + .../contrib/hardening/mysql/__init__.py | 19 + .../hardening/mysql/checks/__init__.py | 31 + .../contrib/hardening/mysql/checks/config.py | 89 +++ .../hardening/mysql/templates/__init__.py | 0 .../hardening/mysql/templates/hardening.cnf | 12 + .../contrib/hardening/ssh/__init__.py | 19 + .../contrib/hardening/ssh/checks/__init__.py | 31 + .../contrib/hardening/ssh/checks/config.py | 394 +++++++++++++ .../hardening/ssh/templates/__init__.py | 0 .../hardening/ssh/templates/ssh_config | 70 +++ .../hardening/ssh/templates/sshd_config | 159 +++++ .../contrib/hardening/templating.py | 71 +++ .../charmhelpers/contrib/hardening/utils.py | 157 +++++ .../contrib/storage/linux/ceph.py | 1 + ceph-mon/hooks/charmhelpers/core/hookenv.py | 31 + ceph-mon/hooks/charmhelpers/core/host.py | 50 +- .../charmhelpers/contrib/amulet/utils.py | 5 +- .../contrib/openstack/amulet/utils.py | 53 +- ceph-mon/unit_tests/test_ceph_ops.py | 105 ++-- ceph-mon/unit_tests/test_status.py | 5 +- ceph-mon/unit_tests/test_upgrade_roll.py | 20 +- 67 files changed, 4052 insertions(+), 90 deletions(-) create mode 100644 ceph-mon/hardening.yaml create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/README.hardening.md create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/apache/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/apt.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/limits.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/pam.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/login.defs create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/modules create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/securetty create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/tally2 create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index eeee6f8c..f4b2a26a 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -11,3 +11,4 @@ include: - contrib.openstack.alternatives - contrib.network.ip - contrib.charmsupport + - contrib.hardening|inc=* \ No newline at end of file diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index c486a851..5e340a3a 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -125,3 +125,9 @@ options: default: True type: boolean description: Configure use of direct IO for OSD journals. + harden: + default: + type: string + description: | + Apply system hardening. Supports a space-delimited list of modules + to run. Supported modules currently include os, ssh, apache and mysql. diff --git a/ceph-mon/hardening.yaml b/ceph-mon/hardening.yaml new file mode 100644 index 00000000..314bb385 --- /dev/null +++ b/ceph-mon/hardening.yaml @@ -0,0 +1,5 @@ +# Overrides file for contrib.hardening. See README.hardening in +# contrib.hardening for info on how to use this file. +ssh: + server: + use_pam: 'yes' # juju requires this diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 385afdd7..f6ab44a9 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -72,6 +72,7 @@ process_requests ) from charmhelpers.contrib.charmsupport import nrpe +from charmhelpers.contrib.hardening.harden import harden hooks = Hooks() @@ -269,6 +270,7 @@ def install_upstart_scripts(): @hooks.hook('install.real') +@harden() def install(): execd_preinstall() add_source(config('source'), config('key')) @@ -318,6 +320,7 @@ def emit_cephconf(): @hooks.hook('config-changed') +@harden() def config_changed(): if config('prefer-ipv6'): assert_charm_supports_ipv6() @@ -553,6 +556,7 @@ def client_relation_changed(): @hooks.hook('upgrade-charm') +@harden() def upgrade_charm(): emit_cephconf() apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) @@ -630,6 +634,12 @@ def assess_status(): # reboot the ceph-mon process +@hooks.hook('update-status') +@harden() +def update_status(): + log('Updating status.') + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/README.hardening.md b/ceph-mon/hooks/charmhelpers/contrib/hardening/README.hardening.md new file mode 100644 index 00000000..91280c03 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/README.hardening.md @@ -0,0 +1,38 @@ +# Juju charm-helpers hardening library + +## Description + +This library provides multiple implementations of system and application +hardening that conform to the standards of http://hardening.io/. + +Current implementations include: + + * OS + * SSH + * MySQL + * Apache + +## Requirements + +* Juju Charms + +## Usage + +1. Synchronise this library into your charm and add the harden() decorator + (from contrib.hardening.harden) to any functions or methods you want to use + to trigger hardening of your application/system. + +2. Add a config option called 'harden' to your charm config.yaml and set it to + a space-delimited list of hardening modules you want to run e.g. "os ssh" + +3. Override any config defaults (contrib.hardening.defaults) by adding a file + called hardening.yaml to your charm root containing the name(s) of the + modules whose settings you want override at root level and then any settings + with overrides e.g. + + os: + general: + desktop_enable: True + +4. Now just run your charm as usual and hardening will be applied each time the + hook runs. diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/__init__.py new file mode 100644 index 00000000..a1335320 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py new file mode 100644 index 00000000..d1304792 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.apache.checks import config + + +def run_apache_checks(): + log("Starting Apache hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("Apache hardening checks complete.", level=DEBUG) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py new file mode 100644 index 00000000..8249ca01 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -0,0 +1,100 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import re +import subprocess + + +from charmhelpers.core.hookenv import ( + log, + INFO, +) +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + DirectoryPermissionAudit, + NoReadWriteForOther, + TemplatedFile, +) +from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit +from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get Apache hardening config audits. + + :returns: dictionary of audits + """ + if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0: + log("Apache server does not appear to be installed on this node - " + "skipping apache hardening", level=INFO) + return [] + + context = ApacheConfContext() + settings = utils.get_settings('apache') + audits = [ + FilePermissionAudit(paths='/etc/apache2/apache2.conf', user='root', + group='root', mode=0o0640), + + TemplatedFile(os.path.join(settings['common']['apache_dir'], + 'mods-available/alias.conf'), + context, + TEMPLATES_DIR, + mode=0o0755, + user='root', + service_actions=[{'service': 'apache2', + 'actions': ['restart']}]), + + TemplatedFile(os.path.join(settings['common']['apache_dir'], + 'conf-enabled/hardening.conf'), + context, + TEMPLATES_DIR, + mode=0o0640, + user='root', + service_actions=[{'service': 'apache2', + 'actions': ['restart']}]), + + DirectoryPermissionAudit(settings['common']['apache_dir'], + user='root', + group='root', + mode=0o640), + + DisabledModuleAudit(settings['hardening']['modules_to_disable']), + + NoReadWriteForOther(settings['common']['apache_dir']), + ] + + return audits + + +class ApacheConfContext(object): + """Defines the set of key/value pairs to set in a apache config file. + + This context, when called, will return a dictionary containing the + key/value pairs of setting to specify in the + /etc/apache/conf-enabled/hardening.conf file. + """ + def __call__(self): + settings = utils.get_settings('apache') + ctxt = settings['hardening'] + + out = subprocess.check_output(['apache2', '-v']) + ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', + out).group(1) + ctxt['apache_icondir'] = '/usr/share/apache2/icons/' + ctxt['traceenable'] = settings['hardening']['traceenable'] + return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf new file mode 100644 index 00000000..e46a58a3 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf @@ -0,0 +1,31 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### + + # + # Aliases: Add here as many aliases as you need (with no limit). The format is + # Alias fakename realname + # + # Note that if you include a trailing / on fakename then the server will + # require it to be present in the URL. So "/icons" isn't aliased in this + # example, only "/icons/". If the fakename is slash-terminated, then the + # realname must also be slash terminated, and if the fakename omits the + # trailing slash, the realname must also omit it. + # + # We include the /icons/ alias for FancyIndexed directory listings. If + # you do not use FancyIndexing, you may comment this out. + # + Alias /icons/ "{{ apache_icondir }}/" + + + Options -Indexes -MultiViews -FollowSymLinks + AllowOverride None +{% if apache_version == '2.4' -%} + Require all granted +{% else -%} + Order allow,deny + Allow from all +{% endif %} + + diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf new file mode 100644 index 00000000..07945418 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf @@ -0,0 +1,18 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### + + + + # http://httpd.apache.org/docs/2.4/upgrading.html + {% if apache_version > '2.2' -%} + Require all granted + {% else -%} + Order Allow,Deny + Deny from all + {% endif %} + + + +TraceEnable {{ traceenable }} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py new file mode 100644 index 00000000..6a7057b3 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py @@ -0,0 +1,63 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + + +class BaseAudit(object): # NO-QA + """Base class for hardening checks. + + The lifecycle of a hardening check is to first check to see if the system + is in compliance for the specified check. If it is not in compliance, the + check method will return a value which will be supplied to the. + """ + def __init__(self, *args, **kwargs): + self.unless = kwargs.get('unless', None) + super(BaseAudit, self).__init__() + + def ensure_compliance(self): + """Checks to see if the current hardening check is in compliance or + not. + + If the check that is performed is not in compliance, then an exception + should be raised. + """ + pass + + def _take_action(self): + """Determines whether to perform the action or not. + + Checks whether or not an action should be taken. This is determined by + the truthy value for the unless parameter. If unless is a callback + method, it will be invoked with no parameters in order to determine + whether or not the action should be taken. Otherwise, the truthy value + of the unless attribute will determine if the action should be + performed. + """ + # Do the action if there isn't an unless override. + if self.unless is None: + return True + + # Invoke the callback if there is one. + if hasattr(self.unless, '__call__'): + results = self.unless() + if results: + return False + else: + return True + + if self.unless: + return False + else: + return True diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py new file mode 100644 index 00000000..cf3c987d --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -0,0 +1,100 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import re +import subprocess + +from six import string_types + +from charmhelpers.core.hookenv import ( + log, + INFO, + ERROR, +) + +from charmhelpers.contrib.hardening.audits import BaseAudit + + +class DisabledModuleAudit(BaseAudit): + """Audits Apache2 modules. + + Determines if the apache2 modules are enabled. If the modules are enabled + then they are removed in the ensure_compliance. + """ + def __init__(self, modules): + if modules is None: + self.modules = [] + elif isinstance(modules, string_types): + self.modules = [modules] + else: + self.modules = modules + + def ensure_compliance(self): + """Ensures that the modules are not loaded.""" + if not self.modules: + return + + try: + loaded_modules = self._get_loaded_modules() + non_compliant_modules = [] + for module in self.modules: + if module in loaded_modules: + log("Module '%s' is enabled but should not be." % + (module), level=INFO) + non_compliant_modules.append(module) + + if len(non_compliant_modules) == 0: + return + + for module in non_compliant_modules: + self._disable_module(module) + self._restart_apache() + except subprocess.CalledProcessError as e: + log('Error occurred auditing apache module compliance. ' + 'This may have been already reported. ' + 'Output is: %s' % e.output, level=ERROR) + + @staticmethod + def _get_loaded_modules(): + """Returns the modules which are enabled in Apache.""" + output = subprocess.check_output(['apache2ctl', '-M']) + modules = [] + for line in output.strip().split(): + # Each line of the enabled module output looks like: + # module_name (static|shared) + # Plus a header line at the top of the output which is stripped + # out by the regex. + matcher = re.search(r'^ (\S*)', line) + if matcher: + modules.append(matcher.group(1)) + return modules + + @staticmethod + def _disable_module(module): + """Disables the specified module in Apache.""" + try: + subprocess.check_call(['a2dismod', module]) + except subprocess.CalledProcessError as e: + # Note: catch error here to allow the attempt of disabling + # multiple modules in one go rather than failing after the + # first module fails. + log('Error occurred disabling module %s. ' + 'Output is: %s' % (module, e.output), level=ERROR) + + @staticmethod + def _restart_apache(): + """Restarts the apache process""" + subprocess.check_output(['service', 'apache2', 'restart']) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py new file mode 100644 index 00000000..e94af031 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -0,0 +1,105 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from __future__ import absolute_import # required for external apt import +from apt import apt_pkg +from six import string_types + +from charmhelpers.fetch import ( + apt_cache, + apt_purge +) +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) +from charmhelpers.contrib.hardening.audits import BaseAudit + + +class AptConfig(BaseAudit): + + def __init__(self, config, **kwargs): + self.config = config + + def verify_config(self): + apt_pkg.init() + for cfg in self.config: + value = apt_pkg.config.get(cfg['key'], cfg.get('default', '')) + if value and value != cfg['expected']: + log("APT config '%s' has unexpected value '%s' " + "(expected='%s')" % + (cfg['key'], value, cfg['expected']), level=WARNING) + + def ensure_compliance(self): + self.verify_config() + + +class RestrictedPackages(BaseAudit): + """Class used to audit restricted packages on the system.""" + + def __init__(self, pkgs, **kwargs): + super(RestrictedPackages, self).__init__(**kwargs) + if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): + self.pkgs = [pkgs] + else: + self.pkgs = pkgs + + def ensure_compliance(self): + cache = apt_cache() + + for p in self.pkgs: + if p not in cache: + continue + + pkg = cache[p] + if not self.is_virtual_package(pkg): + if not pkg.current_ver: + log("Package '%s' is not installed." % pkg.name, + level=DEBUG) + continue + else: + log("Restricted package '%s' is installed" % pkg.name, + level=WARNING) + self.delete_package(cache, pkg) + else: + log("Checking restricted virtual package '%s' provides" % + pkg.name, level=DEBUG) + self.delete_package(cache, pkg) + + def delete_package(self, cache, pkg): + """Deletes the package from the system. + + Deletes the package form the system, properly handling virtual + packages. + + :param cache: the apt cache + :param pkg: the package to remove + """ + if self.is_virtual_package(pkg): + log("Package '%s' appears to be virtual - purging provides" % + pkg.name, level=DEBUG) + for _p in pkg.provides_list: + self.delete_package(cache, _p[2].parent_pkg) + elif not pkg.current_ver: + log("Package '%s' not installed" % pkg.name, level=DEBUG) + return + else: + log("Purging package '%s'" % pkg.name, level=DEBUG) + apt_purge(pkg.name) + + def is_virtual_package(self, pkg): + return pkg.has_provides and not pkg.has_versions diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py new file mode 100644 index 00000000..0fb545a9 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py @@ -0,0 +1,552 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import grp +import os +import pwd +import re + +from subprocess import ( + CalledProcessError, + check_output, + check_call, +) +from traceback import format_exc +from six import string_types +from stat import ( + S_ISGID, + S_ISUID +) + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + WARNING, + ERROR, +) +from charmhelpers.core import unitdata +from charmhelpers.core.host import file_hash +from charmhelpers.contrib.hardening.audits import BaseAudit +from charmhelpers.contrib.hardening.templating import ( + get_template_path, + render_and_write, +) +from charmhelpers.contrib.hardening import utils + + +class BaseFileAudit(BaseAudit): + """Base class for file audits. + + Provides api stubs for compliance check flow that must be used by any class + that implemented this one. + """ + + def __init__(self, paths, always_comply=False, *args, **kwargs): + """ + :param paths: string path of list of paths of files we want to apply + compliance checks are criteria to. + :param always_comply: if true compliance criteria is always applied + else compliance is skipped for non-existent + paths. + """ + super(BaseFileAudit, self).__init__(*args, **kwargs) + self.always_comply = always_comply + if isinstance(paths, string_types) or not hasattr(paths, '__iter__'): + self.paths = [paths] + else: + self.paths = paths + + def ensure_compliance(self): + """Ensure that the all registered files comply to registered criteria. + """ + for p in self.paths: + if os.path.exists(p): + if self.is_compliant(p): + continue + + log('File %s is not in compliance.' % p, level=INFO) + else: + if not self.always_comply: + log("Non-existent path '%s' - skipping compliance check" + % (p), level=INFO) + continue + + if self._take_action(): + log("Applying compliance criteria to '%s'" % (p), level=INFO) + self.comply(p) + + def is_compliant(self, path): + """Audits the path to see if it is compliance. + + :param path: the path to the file that should be checked. + """ + raise NotImplementedError + + def comply(self, path): + """Enforces the compliance of a path. + + :param path: the path to the file that should be enforced. + """ + raise NotImplementedError + + @classmethod + def _get_stat(cls, path): + """Returns the Posix st_stat information for the specified file path. + + :param path: the path to get the st_stat information for. + :returns: an st_stat object for the path or None if the path doesn't + exist. + """ + return os.stat(path) + + +class FilePermissionAudit(BaseFileAudit): + """Implements an audit for file permissions and ownership for a user. + + This class implements functionality that ensures that a specific user/group + will own the file(s) specified and that the permissions specified are + applied properly to the file. + """ + def __init__(self, paths, user, group=None, mode=0o600, **kwargs): + self.user = user + self.group = group + self.mode = mode + super(FilePermissionAudit, self).__init__(paths, user, group, mode, + **kwargs) + + @property + def user(self): + return self._user + + @user.setter + def user(self, name): + try: + user = pwd.getpwnam(name) + except KeyError: + log('Unknown user %s' % name, level=ERROR) + user = None + self._user = user + + @property + def group(self): + return self._group + + @group.setter + def group(self, name): + try: + group = None + if name: + group = grp.getgrnam(name) + else: + group = grp.getgrgid(self.user.pw_gid) + except KeyError: + log('Unknown group %s' % name, level=ERROR) + self._group = group + + def is_compliant(self, path): + """Checks if the path is in compliance. + + Used to determine if the path specified meets the necessary + requirements to be in compliance with the check itself. + + :param path: the file path to check + :returns: True if the path is compliant, False otherwise. + """ + stat = self._get_stat(path) + user = self.user + group = self.group + + compliant = True + if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid: + log('File %s is not owned by %s:%s.' % (path, user.pw_name, + group.gr_name), + level=INFO) + compliant = False + + # POSIX refers to the st_mode bits as corresponding to both the + # file type and file permission bits, where the least significant 12 + # bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the + # file permission bits (8-0) + perms = stat.st_mode & 0o7777 + if perms != self.mode: + log('File %s has incorrect permissions, currently set to %s' % + (path, oct(stat.st_mode & 0o7777)), level=INFO) + compliant = False + + return compliant + + def comply(self, path): + """Issues a chown and chmod to the file paths specified.""" + utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name, + self.mode) + + +class DirectoryPermissionAudit(FilePermissionAudit): + """Performs a permission check for the specified directory path.""" + + def __init__(self, paths, user, group=None, mode=0o600, + recursive=True, **kwargs): + super(DirectoryPermissionAudit, self).__init__(paths, user, group, + mode, **kwargs) + self.recursive = recursive + + def is_compliant(self, path): + """Checks if the directory is compliant. + + Used to determine if the path specified and all of its children + directories are in compliance with the check itself. + + :param path: the directory path to check + :returns: True if the directory tree is compliant, otherwise False. + """ + if not os.path.isdir(path): + log('Path specified %s is not a directory.' % path, level=ERROR) + raise ValueError("%s is not a directory." % path) + + if not self.recursive: + return super(DirectoryPermissionAudit, self).is_compliant(path) + + compliant = True + for root, dirs, _ in os.walk(path): + if len(dirs) > 0: + continue + + if not super(DirectoryPermissionAudit, self).is_compliant(root): + compliant = False + continue + + return compliant + + def comply(self, path): + for root, dirs, _ in os.walk(path): + if len(dirs) > 0: + super(DirectoryPermissionAudit, self).comply(root) + + +class ReadOnly(BaseFileAudit): + """Audits that files and folders are read only.""" + def __init__(self, paths, *args, **kwargs): + super(ReadOnly, self).__init__(paths=paths, *args, **kwargs) + + def is_compliant(self, path): + try: + output = check_output(['find', path, '-perm', '-go+w', + '-type', 'f']).strip() + + # The find above will find any files which have permission sets + # which allow too broad of write access. As such, the path is + # compliant if there is no output. + if output: + return False + + return True + except CalledProcessError as e: + log('Error occurred checking finding writable files for %s. ' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + return False + + def comply(self, path): + try: + check_output(['chmod', 'go-w', '-R', path]) + except CalledProcessError as e: + log('Error occurred removing writeable permissions for %s. ' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + + +class NoReadWriteForOther(BaseFileAudit): + """Ensures that the files found under the base path are readable or + writable by anyone other than the owner or the group. + """ + def __init__(self, paths): + super(NoReadWriteForOther, self).__init__(paths) + + def is_compliant(self, path): + try: + cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o', + '-perm', '-o+w', '-type', 'f'] + output = check_output(cmd).strip() + + # The find above here will find any files which have read or + # write permissions for other, meaning there is too broad of access + # to read/write the file. As such, the path is compliant if there's + # no output. + if output: + return False + + return True + except CalledProcessError as e: + log('Error occurred while finding files which are readable or ' + 'writable to the world in %s. ' + 'Command output is: %s.' % (path, e.output), level=ERROR) + + def comply(self, path): + try: + check_output(['chmod', '-R', 'o-rw', path]) + except CalledProcessError as e: + log('Error occurred attempting to change modes of files under ' + 'path %s. Output of command is: %s' % (path, e.output)) + + +class NoSUIDSGIDAudit(BaseFileAudit): + """Audits that specified files do not have SUID/SGID bits set.""" + def __init__(self, paths, *args, **kwargs): + super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs) + + def is_compliant(self, path): + stat = self._get_stat(path) + if (stat.st_mode & (S_ISGID | S_ISUID)) != 0: + return False + + return True + + def comply(self, path): + try: + log('Removing suid/sgid from %s.' % path, level=DEBUG) + check_output(['chmod', '-s', path]) + except CalledProcessError as e: + log('Error occurred removing suid/sgid from %s.' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + + +class TemplatedFile(BaseFileAudit): + """The TemplatedFileAudit audits the contents of a templated file. + + This audit renders a file from a template, sets the appropriate file + permissions, then generates a hashsum with which to check the content + changed. + """ + def __init__(self, path, context, template_dir, mode, user='root', + group='root', service_actions=None, **kwargs): + self.context = context + self.user = user + self.group = group + self.mode = mode + self.template_dir = template_dir + self.service_actions = service_actions + super(TemplatedFile, self).__init__(paths=path, always_comply=True, + **kwargs) + + def is_compliant(self, path): + """Determines if the templated file is compliant. + + A templated file is only compliant if it has not changed (as + determined by its sha256 hashsum) AND its file permissions are set + appropriately. + + :param path: the path to check compliance. + """ + same_templates = self.templates_match(path) + same_content = self.contents_match(path) + same_permissions = self.permissions_match(path) + + if same_content and same_permissions and same_templates: + return True + + return False + + def run_service_actions(self): + """Run any actions on services requested.""" + if not self.service_actions: + return + + for svc_action in self.service_actions: + name = svc_action['service'] + actions = svc_action['actions'] + log("Running service '%s' actions '%s'" % (name, actions), + level=DEBUG) + for action in actions: + cmd = ['service', name, action] + try: + check_call(cmd) + except CalledProcessError as exc: + log("Service name='%s' action='%s' failed - %s" % + (name, action, exc), level=WARNING) + + def comply(self, path): + """Ensures the contents and the permissions of the file. + + :param path: the path to correct + """ + dirname = os.path.dirname(path) + if not os.path.exists(dirname): + os.makedirs(dirname) + + self.pre_write() + render_and_write(self.template_dir, path, self.context()) + utils.ensure_permissions(path, self.user, self.group, self.mode) + self.run_service_actions() + self.save_checksum(path) + self.post_write() + + def pre_write(self): + """Invoked prior to writing the template.""" + pass + + def post_write(self): + """Invoked after writing the template.""" + pass + + def templates_match(self, path): + """Determines if the template files are the same. + + The template file equality is determined by the hashsum of the + template files themselves. If there is no hashsum, then the content + cannot be sure to be the same so treat it as if they changed. + Otherwise, return whether or not the hashsums are the same. + + :param path: the path to check + :returns: boolean + """ + template_path = get_template_path(self.template_dir, path) + key = 'hardening:template:%s' % template_path + template_checksum = file_hash(template_path) + kv = unitdata.kv() + stored_tmplt_checksum = kv.get(key) + if not stored_tmplt_checksum: + kv.set(key, template_checksum) + kv.flush() + log('Saved template checksum for %s.' % template_path, + level=DEBUG) + # Since we don't have a template checksum, then assume it doesn't + # match and return that the template is different. + return False + elif stored_tmplt_checksum != template_checksum: + kv.set(key, template_checksum) + kv.flush() + log('Updated template checksum for %s.' % template_path, + level=DEBUG) + return False + + # Here the template hasn't changed based upon the calculated + # checksum of the template and what was previously stored. + return True + + def contents_match(self, path): + """Determines if the file content is the same. + + This is determined by comparing hashsum of the file contents and + the saved hashsum. If there is no hashsum, then the content cannot + be sure to be the same so treat them as if they are not the same. + Otherwise, return True if the hashsums are the same, False if they + are not the same. + + :param path: the file to check. + """ + checksum = file_hash(path) + + kv = unitdata.kv() + stored_checksum = kv.get('hardening:%s' % path) + if not stored_checksum: + # If the checksum hasn't been generated, return False to ensure + # the file is written and the checksum stored. + log('Checksum for %s has not been calculated.' % path, level=DEBUG) + return False + elif stored_checksum != checksum: + log('Checksum mismatch for %s.' % path, level=DEBUG) + return False + + return True + + def permissions_match(self, path): + """Determines if the file owner and permissions match. + + :param path: the path to check. + """ + audit = FilePermissionAudit(path, self.user, self.group, self.mode) + return audit.is_compliant(path) + + def save_checksum(self, path): + """Calculates and saves the checksum for the path specified. + + :param path: the path of the file to save the checksum. + """ + checksum = file_hash(path) + kv = unitdata.kv() + kv.set('hardening:%s' % path, checksum) + kv.flush() + + +class DeletedFile(BaseFileAudit): + """Audit to ensure that a file is deleted.""" + def __init__(self, paths): + super(DeletedFile, self).__init__(paths) + + def is_compliant(self, path): + return not os.path.exists(path) + + def comply(self, path): + os.remove(path) + + +class FileContentAudit(BaseFileAudit): + """Audit the contents of a file.""" + def __init__(self, paths, cases, **kwargs): + # Cases we expect to pass + self.pass_cases = cases.get('pass', []) + # Cases we expect to fail + self.fail_cases = cases.get('fail', []) + super(FileContentAudit, self).__init__(paths, **kwargs) + + def is_compliant(self, path): + """ + Given a set of content matching cases i.e. tuple(regex, bool) where + bool value denotes whether or not regex is expected to match, check that + all cases match as expected with the contents of the file. Cases can be + expected to pass of fail. + + :param path: Path of file to check. + :returns: Boolean value representing whether or not all cases are + found to be compliant. + """ + log("Auditing contents of file '%s'" % (path), level=DEBUG) + with open(path, 'r') as fd: + contents = fd.read() + + matches = 0 + for pattern in self.pass_cases: + key = re.compile(pattern, flags=re.MULTILINE) + results = re.search(key, contents) + if results: + matches += 1 + else: + log("Pattern '%s' was expected to pass but instead it failed" + % (pattern), level=WARNING) + + for pattern in self.fail_cases: + key = re.compile(pattern, flags=re.MULTILINE) + results = re.search(key, contents) + if not results: + matches += 1 + else: + log("Pattern '%s' was expected to fail but instead it passed" + % (pattern), level=WARNING) + + total = len(self.pass_cases) + len(self.fail_cases) + log("Checked %s cases and %s passed" % (total, matches), level=DEBUG) + return matches == total + + def comply(self, *args, **kwargs): + """NOOP since we just issue warnings. This is to avoid the + NotImplememtedError. + """ + log("Not applying any compliance criteria, only checks.", level=INFO) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml new file mode 100644 index 00000000..e5ada29f --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml @@ -0,0 +1,13 @@ +# NOTE: this file contains the default configuration for the 'apache' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'apache' as the root key followed by any of the following with new +# values. + +common: + apache_dir: '/etc/apache2' + +hardening: + traceenable: 'off' + allowed_http_methods: "GET POST" + modules_to_disable: [ cgi, cgid ] \ No newline at end of file diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema new file mode 100644 index 00000000..227589b5 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema @@ -0,0 +1,9 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + apache_dir: + traceenable: + +hardening: + allowed_http_methods: + modules_to_disable: diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml new file mode 100644 index 00000000..682d22bf --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml @@ -0,0 +1,38 @@ +# NOTE: this file contains the default configuration for the 'mysql' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'mysql' as the root key followed by any of the following with new +# values. + +hardening: + mysql-conf: /etc/mysql/my.cnf + hardening-conf: /etc/mysql/conf.d/hardening.cnf + +security: + # @see http://www.symantec.com/connect/articles/securing-mysql-step-step + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot + chroot: None + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create + safe-user-create: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth + secure-auth: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links + skip-symbolic-links: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database + skip-show-database: True + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile + local-infile: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs + allow-suspicious-udfs: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges + automatic-sp-privileges: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv + secure-file-priv: /tmp diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema new file mode 100644 index 00000000..2edf325c --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema @@ -0,0 +1,15 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +hardening: + mysql-conf: + hardening-conf: +security: + chroot: + safe-user-create: + secure-auth: + skip-symbolic-links: + skip-show-database: + local-infile: + allow-suspicious-udfs: + automatic-sp-privileges: + secure-file-priv: diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml new file mode 100644 index 00000000..ddd4286c --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml @@ -0,0 +1,67 @@ +# NOTE: this file contains the default configuration for the 'os' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'os' as the root key followed by any of the following with new +# values. + +general: + desktop_enable: False # (type:boolean) + +environment: + extra_user_paths: [] + umask: 027 + root_path: / + +auth: + pw_max_age: 60 + # discourage password cycling + pw_min_age: 7 + retries: 5 + lockout_time: 600 + timeout: 60 + allow_homeless: False # (type:boolean) + pam_passwdqc_enable: True # (type:boolean) + pam_passwdqc_options: 'min=disabled,disabled,16,12,8' + root_ttys: + console + tty1 + tty2 + tty3 + tty4 + tty5 + tty6 + uid_min: 1000 + gid_min: 1000 + sys_uid_min: 100 + sys_uid_max: 999 + sys_gid_min: 100 + sys_gid_max: 999 + chfn_restrict: + +security: + users_allow: [] + suid_sgid_enforce: True # (type:boolean) + # user-defined blacklist and whitelist + suid_sgid_blacklist: [] + suid_sgid_whitelist: [] + # if this is True, remove any suid/sgid bits from files that were not in the whitelist + suid_sgid_dry_run_on_unknown: False # (type:boolean) + suid_sgid_remove_from_unknown: False # (type:boolean) + # remove packages with known issues + packages_clean: True # (type:boolean) + packages_list: + xinetd + inetd + ypserv + telnet-server + rsh-server + rsync + kernel_enable_module_loading: True # (type:boolean) + kernel_enable_core_dump: False # (type:boolean) + +sysctl: + kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128 + kernel_enable_sysrq: False # (type:boolean) + forwarding: False # (type:boolean) + ipv6_enable: False # (type:boolean) + arp_restricted: True # (type:boolean) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema new file mode 100644 index 00000000..88b3966e --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema @@ -0,0 +1,42 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +general: + desktop_enable: +environment: + extra_user_paths: + umask: + root_path: +auth: + pw_max_age: + pw_min_age: + retries: + lockout_time: + timeout: + allow_homeless: + pam_passwdqc_enable: + pam_passwdqc_options: + root_ttys: + uid_min: + gid_min: + sys_uid_min: + sys_uid_max: + sys_gid_min: + sys_gid_max: + chfn_restrict: +security: + users_allow: + suid_sgid_enforce: + suid_sgid_blacklist: + suid_sgid_whitelist: + suid_sgid_dry_run_on_unknown: + suid_sgid_remove_from_unknown: + packages_clean: + packages_list: + kernel_enable_module_loading: + kernel_enable_core_dump: +sysctl: + kernel_secure_sysrq: + kernel_enable_sysrq: + forwarding: + ipv6_enable: + arp_restricted: diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml new file mode 100644 index 00000000..cd529bca --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml @@ -0,0 +1,49 @@ +# NOTE: this file contains the default configuration for the 'ssh' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'ssh' as the root key followed by any of the following with new +# values. + +common: + service_name: 'ssh' + network_ipv6_enable: False # (type:boolean) + ports: [22] + remote_hosts: [] + +client: + package: 'openssh-client' + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + roaming: False + password_authentication: 'no' + +server: + host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key', + '/etc/ssh/ssh_host_ecdsa_key'] + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + allow_root_with_key: False # (type:boolean) + allow_tcp_forwarding: 'no' + allow_agent_forwarding: 'no' + allow_x11_forwarding: 'no' + use_privilege_separation: 'sandbox' + listen_to: ['0.0.0.0'] + use_pam: 'no' + package: 'openssh-server' + password_authentication: 'no' + alive_interval: '600' + alive_count: '3' + sftp_enable: False # (type:boolean) + sftp_group: 'sftponly' + sftp_chroot: '/home/%u' + deny_users: [] + allow_users: [] + deny_groups: [] + allow_groups: [] + print_motd: 'no' + print_last_log: 'no' + use_dns: 'no' + max_auth_tries: 2 + max_sessions: 10 diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema new file mode 100644 index 00000000..d05e054b --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema @@ -0,0 +1,42 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + service_name: + network_ipv6_enable: + ports: + remote_hosts: +client: + package: + cbc_required: + weak_hmac: + weak_kex: + roaming: + password_authentication: +server: + host_key_files: + cbc_required: + weak_hmac: + weak_kex: + allow_root_with_key: + allow_tcp_forwarding: + allow_agent_forwarding: + allow_x11_forwarding: + use_privilege_separation: + listen_to: + use_pam: + package: + password_authentication: + alive_interval: + alive_count: + sftp_enable: + sftp_group: + sftp_chroot: + deny_users: + allow_users: + deny_groups: + allow_groups: + print_motd: + print_last_log: + use_dns: + max_auth_tries: + max_sessions: diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py new file mode 100644 index 00000000..ac7568d6 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py @@ -0,0 +1,84 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six + +from collections import OrderedDict + +from charmhelpers.core.hookenv import ( + config, + log, + DEBUG, + WARNING, +) +from charmhelpers.contrib.hardening.host.checks import run_os_checks +from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks +from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks +from charmhelpers.contrib.hardening.apache.checks import run_apache_checks + + +def harden(overrides=None): + """Hardening decorator. + + This is the main entry point for running the hardening stack. In order to + run modules of the stack you must add this decorator to charm hook(s) and + ensure that your charm config.yaml contains the 'harden' option set to + one or more of the supported modules. Setting these will cause the + corresponding hardening code to be run when the hook fires. + + This decorator can and should be applied to more than one hook or function + such that hardening modules are called multiple times. This is because + subsequent calls will perform auditing checks that will report any changes + to resources hardened by the first run (and possibly perform compliance + actions as a result of any detected infractions). + + :param overrides: Optional list of stack modules used to override those + provided with 'harden' config. + :returns: Returns value returned by decorated function once executed. + """ + def _harden_inner1(f): + log("Hardening function '%s'" % (f.__name__), level=DEBUG) + + def _harden_inner2(*args, **kwargs): + RUN_CATALOG = OrderedDict([('os', run_os_checks), + ('ssh', run_ssh_checks), + ('mysql', run_mysql_checks), + ('apache', run_apache_checks)]) + + enabled = overrides or (config("harden") or "").split() + if enabled: + modules_to_run = [] + # modules will always be performed in the following order + for module, func in six.iteritems(RUN_CATALOG): + if module in enabled: + enabled.remove(module) + modules_to_run.append(func) + + if enabled: + log("Unknown hardening modules '%s' - ignoring" % + (', '.join(enabled)), level=WARNING) + + for hardener in modules_to_run: + log("Executing hardening module '%s'" % + (hardener.__name__), level=DEBUG) + hardener() + else: + log("No hardening applied to '%s'" % (f.__name__), level=DEBUG) + + return f(*args, **kwargs) + return _harden_inner2 + + return _harden_inner1 diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py new file mode 100644 index 00000000..c3bd5985 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py @@ -0,0 +1,50 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.host.checks import ( + apt, + limits, + login, + minimize_access, + pam, + profile, + securetty, + suid_sgid, + sysctl +) + + +def run_os_checks(): + log("Starting OS hardening checks.", level=DEBUG) + checks = apt.get_audits() + checks.extend(limits.get_audits()) + checks.extend(login.get_audits()) + checks.extend(minimize_access.get_audits()) + checks.extend(pam.get_audits()) + checks.extend(profile.get_audits()) + checks.extend(securetty.get_audits()) + checks.extend(suid_sgid.get_audits()) + checks.extend(sysctl.get_audits()) + + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("OS hardening checks complete.", level=DEBUG) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/apt.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/apt.py new file mode 100644 index 00000000..2c221cda --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/apt.py @@ -0,0 +1,39 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.utils import get_settings +from charmhelpers.contrib.hardening.audits.apt import ( + AptConfig, + RestrictedPackages, +) + + +def get_audits(): + """Get OS hardening apt audits. + + :returns: dictionary of audits + """ + audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated', + 'expected': 'false'}])] + + settings = get_settings('os') + clean_packages = settings['security']['packages_clean'] + if clean_packages: + security_packages = settings['security']['packages_list'] + if security_packages: + audits.append(RestrictedPackages(security_packages)) + + return audits diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/limits.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/limits.py new file mode 100644 index 00000000..8ce9dc2b --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/limits.py @@ -0,0 +1,55 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import ( + DirectoryPermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening security limits audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Ensure that the /etc/security/limits.d directory is only writable + # by the root user, but others can execute and read. + audits.append(DirectoryPermissionAudit('/etc/security/limits.d', + user='root', group='root', + mode=0o755)) + + # If core dumps are not enabled, then don't allow core dumps to be + # created as they may contain sensitive information. + if not settings['security']['kernel_enable_core_dump']: + audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf', + SecurityLimitsContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', mode=0o0440)) + return audits + + +class SecurityLimitsContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'disable_core_dump': + not settings['security']['kernel_enable_core_dump']} + return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py new file mode 100644 index 00000000..d32c4f60 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py @@ -0,0 +1,67 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from six import string_types + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening login.defs audits. + + :returns: dictionary of audits + """ + audits = [TemplatedFile('/etc/login.defs', LoginContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', mode=0o0444)] + return audits + + +class LoginContext(object): + + def __call__(self): + settings = utils.get_settings('os') + + # Octal numbers in yaml end up being turned into decimal, + # so check if the umask is entered as a string (e.g. '027') + # or as an octal umask as we know it (e.g. 002). If its not + # a string assume it to be octal and turn it into an octal + # string. + umask = settings['environment']['umask'] + if not isinstance(umask, string_types): + umask = '%s' % oct(umask) + + ctxt = { + 'additional_user_paths': + settings['environment']['extra_user_paths'], + 'umask': umask, + 'pwd_max_age': settings['auth']['pw_max_age'], + 'pwd_min_age': settings['auth']['pw_min_age'], + 'uid_min': settings['auth']['uid_min'], + 'sys_uid_min': settings['auth']['sys_uid_min'], + 'sys_uid_max': settings['auth']['sys_uid_max'], + 'gid_min': settings['auth']['gid_min'], + 'sys_gid_min': settings['auth']['sys_gid_min'], + 'sys_gid_max': settings['auth']['sys_gid_max'], + 'login_retries': settings['auth']['retries'], + 'login_timeout': settings['auth']['timeout'], + 'chfn_restrict': settings['auth']['chfn_restrict'], + 'allow_login_without_home': settings['auth']['allow_homeless'] + } + + return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py new file mode 100644 index 00000000..c471064b --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py @@ -0,0 +1,52 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + ReadOnly, +) +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening access audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Remove write permissions from $PATH folders for all regular users. + # This prevents changing system-wide commands from normal users. + path_folders = {'/usr/local/sbin', + '/usr/local/bin', + '/usr/sbin', + '/usr/bin', + '/bin'} + extra_user_paths = settings['environment']['extra_user_paths'] + path_folders.update(extra_user_paths) + audits.append(ReadOnly(path_folders)) + + # Only allow the root user to have access to the shadow file. + audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600)) + + if 'change_user' not in settings['security']['users_allow']: + # su should only be accessible to user and group root, unless it is + # expressly defined to allow users to change to root via the + # security_users_allow config option. + audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750)) + + return audits diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/pam.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/pam.py new file mode 100644 index 00000000..383fe28e --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/pam.py @@ -0,0 +1,134 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from subprocess import ( + check_output, + CalledProcessError, +) + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, +) +from charmhelpers.fetch import ( + apt_install, + apt_purge, + apt_update, +) +from charmhelpers.contrib.hardening.audits.file import ( + TemplatedFile, + DeletedFile, +) +from charmhelpers.contrib.hardening import utils +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR + + +def get_audits(): + """Get OS hardening PAM authentication audits. + + :returns: dictionary of audits + """ + audits = [] + + settings = utils.get_settings('os') + + if settings['auth']['pam_passwdqc_enable']: + audits.append(PasswdqcPAM('/etc/passwdqc.conf')) + + if settings['auth']['retries']: + audits.append(Tally2PAM('/usr/share/pam-configs/tally2')) + else: + audits.append(DeletedFile('/usr/share/pam-configs/tally2')) + + return audits + + +class PasswdqcPAMContext(object): + + def __call__(self): + ctxt = {} + settings = utils.get_settings('os') + + ctxt['auth_pam_passwdqc_options'] = \ + settings['auth']['pam_passwdqc_options'] + + return ctxt + + +class PasswdqcPAM(TemplatedFile): + """The PAM Audit verifies the linux PAM settings.""" + def __init__(self, path): + super(PasswdqcPAM, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=PasswdqcPAMContext(), + user='root', + group='root', + mode=0o0640) + + def pre_write(self): + # Always remove? + for pkg in ['libpam-ccreds', 'libpam-cracklib']: + log("Purging package '%s'" % pkg, level=DEBUG), + apt_purge(pkg) + + apt_update(fatal=True) + for pkg in ['libpam-passwdqc']: + log("Installing package '%s'" % pkg, level=DEBUG), + apt_install(pkg) + + def post_write(self): + """Updates the PAM configuration after the file has been written""" + try: + check_output(['pam-auth-update', '--package']) + except CalledProcessError as e: + log('Error calling pam-auth-update: %s' % e, level=ERROR) + + +class Tally2PAMContext(object): + + def __call__(self): + ctxt = {} + settings = utils.get_settings('os') + + ctxt['auth_lockout_time'] = settings['auth']['lockout_time'] + ctxt['auth_retries'] = settings['auth']['retries'] + + return ctxt + + +class Tally2PAM(TemplatedFile): + """The PAM Audit verifies the linux PAM settings.""" + def __init__(self, path): + super(Tally2PAM, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=Tally2PAMContext(), + user='root', + group='root', + mode=0o0640) + + def pre_write(self): + # Always remove? + apt_purge('libpam-ccreds') + apt_update(fatal=True) + apt_install('libpam-modules') + + def post_write(self): + """Updates the PAM configuration after the file has been written""" + try: + check_output(['pam-auth-update', '--package']) + except CalledProcessError as e: + log('Error calling pam-auth-update: %s' % e, level=ERROR) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py new file mode 100644 index 00000000..f7443357 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py @@ -0,0 +1,45 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening profile audits. + + :returns: dictionary of audits + """ + audits = [] + + settings = utils.get_settings('os') + + # If core dumps are not enabled, then don't allow core dumps to be + # created as they may contain sensitive information. + if not settings['security']['kernel_enable_core_dump']: + audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh', + ProfileContext(), + template_dir=TEMPLATES_DIR, + mode=0o0755, user='root', group='root')) + return audits + + +class ProfileContext(object): + + def __call__(self): + ctxt = {} + return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py new file mode 100644 index 00000000..e33c73ca --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py @@ -0,0 +1,39 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening Secure TTY audits. + + :returns: dictionary of audits + """ + audits = [] + audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(), + template_dir=TEMPLATES_DIR, + mode=0o0400, user='root', group='root')) + return audits + + +class SecureTTYContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'ttys': settings['auth']['root_ttys']} + return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py new file mode 100644 index 00000000..0534689b --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py @@ -0,0 +1,131 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import subprocess + +from charmhelpers.core.hookenv import ( + log, + INFO, +) +from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit +from charmhelpers.contrib.hardening import utils + + +BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh', + '/usr/libexec/openssh/ssh-keysign', + '/usr/lib/openssh/ssh-keysign', + '/sbin/netreport', + '/usr/sbin/usernetctl', + '/usr/sbin/userisdnctl', + '/usr/sbin/pppd', + '/usr/bin/lockfile', + '/usr/bin/mail-lock', + '/usr/bin/mail-unlock', + '/usr/bin/mail-touchlock', + '/usr/bin/dotlockfile', + '/usr/bin/arping', + '/usr/sbin/uuidd', + '/usr/bin/mtr', + '/usr/lib/evolution/camel-lock-helper-1.2', + '/usr/lib/pt_chown', + '/usr/lib/eject/dmcrypt-get-device', + '/usr/lib/mc/cons.saver'] + +WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount', + '/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at', + '/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp', + '/usr/bin/passwd', '/usr/bin/ssh-agent', + '/usr/libexec/utempter/utempter', '/usr/sbin/lockdev', + '/usr/sbin/sendmail.sendmail', '/usr/bin/expiry', + '/bin/ping6', '/usr/bin/traceroute6.iputils', + '/sbin/mount.nfs', '/sbin/umount.nfs', + '/sbin/mount.nfs4', '/sbin/umount.nfs4', + '/usr/bin/crontab', + '/usr/bin/wall', '/usr/bin/write', + '/usr/bin/screen', + '/usr/bin/mlocate', + '/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh', + '/bin/fusermount', + '/usr/bin/pkexec', + '/usr/bin/sudo', '/usr/bin/sudoedit', + '/usr/sbin/postdrop', '/usr/sbin/postqueue', + '/usr/sbin/suexec', + '/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth', + '/usr/kerberos/bin/ksu', + '/usr/sbin/ccreds_validate', + '/usr/bin/Xorg', + '/usr/bin/X', + '/usr/lib/dbus-1.0/dbus-daemon-launch-helper', + '/usr/lib/vte/gnome-pty-helper', + '/usr/lib/libvte9/gnome-pty-helper', + '/usr/lib/libvte-2.90-9/gnome-pty-helper'] + + +def get_audits(): + """Get OS hardening suid/sgid audits. + + :returns: dictionary of audits + """ + checks = [] + settings = utils.get_settings('os') + if not settings['security']['suid_sgid_enforce']: + log("Skipping suid/sgid hardening", level=INFO) + return checks + + # Build the blacklist and whitelist of files for suid/sgid checks. + # There are a total of 4 lists: + # 1. the system blacklist + # 2. the system whitelist + # 3. the user blacklist + # 4. the user whitelist + # + # The blacklist is the set of paths which should NOT have the suid/sgid bit + # set and the whitelist is the set of paths which MAY have the suid/sgid + # bit setl. The user whitelist/blacklist effectively override the system + # whitelist/blacklist. + u_b = settings['security']['suid_sgid_blacklist'] + u_w = settings['security']['suid_sgid_whitelist'] + + blacklist = set(BLACKLIST) - set(u_w + u_b) + whitelist = set(WHITELIST) - set(u_b + u_w) + + checks.append(NoSUIDSGIDAudit(blacklist)) + + dry_run = settings['security']['suid_sgid_dry_run_on_unknown'] + + if settings['security']['suid_sgid_remove_from_unknown'] or dry_run: + # If the policy is a dry_run (e.g. complain only) or remove unknown + # suid/sgid bits then find all of the paths which have the suid/sgid + # bit set and then remove the whitelisted paths. + root_path = settings['environment']['root_path'] + unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist) + checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run)) + + return checks + + +def find_paths_with_suid_sgid(root_path): + """Finds all paths/files which have an suid/sgid bit enabled. + + Starting with the root_path, this will recursively find all paths which + have an suid or sgid bit set. + """ + cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000', + '-type', 'f', '!', '-path', '/proc/*', '-print'] + + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, _ = p.communicate() + return set(out.split('\n')) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py new file mode 100644 index 00000000..4a76d74e --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -0,0 +1,211 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import platform +import re +import six +import subprocess + +from charmhelpers.core.hookenv import ( + log, + INFO, + WARNING, +) +from charmhelpers.contrib.hardening import utils +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR + + +SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s +net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s +net.ipv4.conf.all.rp_filter=1 +net.ipv4.conf.default.rp_filter=1 +net.ipv4.icmp_echo_ignore_broadcasts=1 +net.ipv4.icmp_ignore_bogus_error_responses=1 +net.ipv4.icmp_ratelimit=100 +net.ipv4.icmp_ratemask=88089 +net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s +net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s +net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s +net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s +net.ipv4.tcp_rfc1337=1 +net.ipv4.tcp_syncookies=1 +net.ipv4.conf.all.shared_media=1 +net.ipv4.conf.default.shared_media=1 +net.ipv4.conf.all.accept_source_route=0 +net.ipv4.conf.default.accept_source_route=0 +net.ipv4.conf.all.accept_redirects=0 +net.ipv4.conf.default.accept_redirects=0 +net.ipv6.conf.all.accept_redirects=0 +net.ipv6.conf.default.accept_redirects=0 +net.ipv4.conf.all.secure_redirects=0 +net.ipv4.conf.default.secure_redirects=0 +net.ipv4.conf.all.send_redirects=0 +net.ipv4.conf.default.send_redirects=0 +net.ipv4.conf.all.log_martians=0 +net.ipv6.conf.default.router_solicitations=0 +net.ipv6.conf.default.accept_ra_rtr_pref=0 +net.ipv6.conf.default.accept_ra_pinfo=0 +net.ipv6.conf.default.accept_ra_defrtr=0 +net.ipv6.conf.default.autoconf=0 +net.ipv6.conf.default.dad_transmits=0 +net.ipv6.conf.default.max_addresses=1 +net.ipv6.conf.all.accept_ra=0 +net.ipv6.conf.default.accept_ra=0 +kernel.modules_disabled=%(kernel_modules_disabled)s +kernel.sysrq=%(kernel_sysrq)s +fs.suid_dumpable=%(fs_suid_dumpable)s +kernel.randomize_va_space=2 +""" + + +def get_audits(): + """Get OS hardening sysctl audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Apply the sysctl settings which are configured to be applied. + audits.append(SysctlConf()) + # Make sure that only root has access to the sysctl.conf file, and + # that it is read-only. + audits.append(FilePermissionAudit('/etc/sysctl.conf', + user='root', + group='root', mode=0o0440)) + # If module loading is not enabled, then ensure that the modules + # file has the appropriate permissions and rebuild the initramfs + if not settings['security']['kernel_enable_module_loading']: + audits.append(ModulesTemplate()) + + return audits + + +class ModulesContext(object): + + def __call__(self): + settings = utils.get_settings('os') + with open('/proc/cpuinfo', 'r') as fd: + cpuinfo = fd.readlines() + + for line in cpuinfo: + match = re.search(r"^vendor_id\s+:\s+(.+)", line) + if match: + vendor = match.group(1) + + if vendor == "GenuineIntel": + vendor = "intel" + elif vendor == "AuthenticAMD": + vendor = "amd" + + ctxt = {'arch': platform.processor(), + 'cpuVendor': vendor, + 'desktop_enable': settings['general']['desktop_enable']} + + return ctxt + + +class ModulesTemplate(object): + + def __init__(self): + super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules', + ModulesContext(), + templates_dir=TEMPLATES_DIR, + user='root', group='root', + mode=0o0440) + + def post_write(self): + subprocess.check_call(['update-initramfs', '-u']) + + +class SysCtlHardeningContext(object): + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'sysctl': {}} + + log("Applying sysctl settings", level=INFO) + extras = {'net_ipv4_ip_forward': 0, + 'net_ipv6_conf_all_forwarding': 0, + 'net_ipv6_conf_all_disable_ipv6': 1, + 'net_ipv4_tcp_timestamps': 0, + 'net_ipv4_conf_all_arp_ignore': 0, + 'net_ipv4_conf_all_arp_announce': 0, + 'kernel_sysrq': 0, + 'fs_suid_dumpable': 0, + 'kernel_modules_disabled': 1} + + if settings['sysctl']['ipv6_enable']: + extras['net_ipv6_conf_all_disable_ipv6'] = 0 + + if settings['sysctl']['forwarding']: + extras['net_ipv4_ip_forward'] = 1 + extras['net_ipv6_conf_all_forwarding'] = 1 + + if settings['sysctl']['arp_restricted']: + extras['net_ipv4_conf_all_arp_ignore'] = 1 + extras['net_ipv4_conf_all_arp_announce'] = 2 + + if settings['security']['kernel_enable_module_loading']: + extras['kernel_modules_disabled'] = 0 + + if settings['sysctl']['kernel_enable_sysrq']: + sysrq_val = settings['sysctl']['kernel_secure_sysrq'] + extras['kernel_sysrq'] = sysrq_val + + if settings['security']['kernel_enable_core_dump']: + extras['fs_suid_dumpable'] = 1 + + settings.update(extras) + for d in (SYSCTL_DEFAULTS % settings).split(): + d = d.strip().partition('=') + key = d[0].strip() + path = os.path.join('/proc/sys', key.replace('.', '/')) + if not os.path.exists(path): + log("Skipping '%s' since '%s' does not exist" % (key, path), + level=WARNING) + continue + + ctxt['sysctl'][key] = d[2] or None + + # Translate for python3 + return {'sysctl_settings': + [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]} + + +class SysctlConf(TemplatedFile): + """An audit check for sysctl settings.""" + def __init__(self): + self.conffile = '/etc/sysctl.d/99-juju-hardening.conf' + super(SysctlConf, self).__init__(self.conffile, + SysCtlHardeningContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', + mode=0o0440) + + def post_write(self): + try: + subprocess.check_call(['sysctl', '-p', self.conffile]) + except subprocess.CalledProcessError as e: + # NOTE: on some systems if sysctl cannot apply all settings it + # will return non-zero as well. + log("sysctl command returned an error (maybe some " + "keys could not be set) - %s" % (e), + level=WARNING) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf new file mode 100644 index 00000000..0014191f --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf @@ -0,0 +1,8 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +{% if disable_core_dump -%} +# Prevent core dumps for all users. These are usually only needed by developers and may contain sensitive information. +* hard core 0 +{% endif %} \ No newline at end of file diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf new file mode 100644 index 00000000..101f1e1d --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf @@ -0,0 +1,7 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +{% for key, value in sysctl_settings -%} +{{ key }}={{ value }} +{% endfor -%} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/login.defs b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/login.defs new file mode 100644 index 00000000..db137d6d --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/login.defs @@ -0,0 +1,349 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# +# /etc/login.defs - Configuration control definitions for the login package. +# +# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH. +# If unspecified, some arbitrary (and possibly incorrect) value will +# be assumed. All other items are optional - if not specified then +# the described action or option will be inhibited. +# +# Comment lines (lines beginning with "#") and blank lines are ignored. +# +# Modified for Linux. --marekm + +# REQUIRED for useradd/userdel/usermod +# Directory where mailboxes reside, _or_ name of file, relative to the +# home directory. If you _do_ define MAIL_DIR and MAIL_FILE, +# MAIL_DIR takes precedence. +# +# Essentially: +# - MAIL_DIR defines the location of users mail spool files +# (for mbox use) by appending the username to MAIL_DIR as defined +# below. +# - MAIL_FILE defines the location of the users mail spool files as the +# fully-qualified filename obtained by prepending the user home +# directory before $MAIL_FILE +# +# NOTE: This is no more used for setting up users MAIL environment variable +# which is, starting from shadow 4.0.12-1 in Debian, entirely the +# job of the pam_mail PAM modules +# See default PAM configuration files provided for +# login, su, etc. +# +# This is a temporary situation: setting these variables will soon +# move to /etc/default/useradd and the variables will then be +# no more supported +MAIL_DIR /var/mail +#MAIL_FILE .mail + +# +# Enable logging and display of /var/log/faillog login failure info. +# This option conflicts with the pam_tally PAM module. +# +FAILLOG_ENAB yes + +# +# Enable display of unknown usernames when login failures are recorded. +# +# WARNING: Unknown usernames may become world readable. +# See #290803 and #298773 for details about how this could become a security +# concern +LOG_UNKFAIL_ENAB no + +# +# Enable logging of successful logins +# +LOG_OK_LOGINS yes + +# +# Enable "syslog" logging of su activity - in addition to sulog file logging. +# SYSLOG_SG_ENAB does the same for newgrp and sg. +# +SYSLOG_SU_ENAB yes +SYSLOG_SG_ENAB yes + +# +# If defined, all su activity is logged to this file. +# +#SULOG_FILE /var/log/sulog + +# +# If defined, file which maps tty line to TERM environment parameter. +# Each line of the file is in a format something like "vt100 tty01". +# +#TTYTYPE_FILE /etc/ttytype + +# +# If defined, login failures will be logged here in a utmp format +# last, when invoked as lastb, will read /var/log/btmp, so... +# +FTMP_FILE /var/log/btmp + +# +# If defined, the command name to display when running "su -". For +# example, if this is defined as "su" then a "ps" will display the +# command is "-su". If not defined, then "ps" would display the +# name of the shell actually being run, e.g. something like "-sh". +# +SU_NAME su + +# +# If defined, file which inhibits all the usual chatter during the login +# sequence. If a full pathname, then hushed mode will be enabled if the +# user's name or shell are found in the file. If not a full pathname, then +# hushed mode will be enabled if the file exists in the user's home directory. +# +HUSHLOGIN_FILE .hushlogin +#HUSHLOGIN_FILE /etc/hushlogins + +# +# *REQUIRED* The default PATH settings, for superuser and normal users. +# +# (they are minimal, add the rest in the shell startup files) +ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin{% if additional_user_paths %}{{ additional_user_paths }}{% endif %} + +# +# Terminal permissions +# +# TTYGROUP Login tty will be assigned this group ownership. +# TTYPERM Login tty will be set to this permission. +# +# If you have a "write" program which is "setgid" to a special group +# which owns the terminals, define TTYGROUP to the group number and +# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign +# TTYPERM to either 622 or 600. +# +# In Debian /usr/bin/bsd-write or similar programs are setgid tty +# However, the default and recommended value for TTYPERM is still 0600 +# to not allow anyone to write to anyone else console or terminal + +# Users can still allow other people to write them by issuing +# the "mesg y" command. + +TTYGROUP tty +TTYPERM 0600 + +# +# Login configuration initializations: +# +# ERASECHAR Terminal ERASE character ('\010' = backspace). +# KILLCHAR Terminal KILL character ('\025' = CTRL/U). +# UMASK Default "umask" value. +# +# The ERASECHAR and KILLCHAR are used only on System V machines. +# +# UMASK is the default umask value for pam_umask and is used by +# useradd and newusers to set the mode of the new home directories. +# 022 is the "historical" value in Debian for UMASK +# 027, or even 077, could be considered better for privacy +# There is no One True Answer here : each sysadmin must make up his/her +# mind. +# +# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value +# for private user groups, i. e. the uid is the same as gid, and username is +# the same as the primary group name: for these, the user permissions will be +# used as group permissions, e. g. 022 will become 002. +# +# Prefix these values with "0" to get octal, "0x" to get hexadecimal. +# +ERASECHAR 0177 +KILLCHAR 025 +UMASK {{ umask }} + +# Enable setting of the umask group bits to be the same as owner bits (examples: `022` -> `002`, `077` -> `007`) for non-root users, if the uid is the same as gid, and username is the same as the primary group name. +# If set to yes, userdel will remove the user´s group if it contains no more members, and useradd will create by default a group with the name of the user. +USERGROUPS_ENAB yes + +# +# Password aging controls: +# +# PASS_MAX_DAYS Maximum number of days a password may be used. +# PASS_MIN_DAYS Minimum number of days allowed between password changes. +# PASS_WARN_AGE Number of days warning given before a password expires. +# +PASS_MAX_DAYS {{ pwd_max_age }} +PASS_MIN_DAYS {{ pwd_min_age }} +PASS_WARN_AGE 7 + +# +# Min/max values for automatic uid selection in useradd +# +UID_MIN {{ uid_min }} +UID_MAX 60000 +# System accounts +SYS_UID_MIN {{ sys_uid_min }} +SYS_UID_MAX {{ sys_uid_max }} + +# Min/max values for automatic gid selection in groupadd +GID_MIN {{ gid_min }} +GID_MAX 60000 +# System accounts +SYS_GID_MIN {{ sys_gid_min }} +SYS_GID_MAX {{ sys_gid_max }} + +# +# Max number of login retries if password is bad. This will most likely be +# overriden by PAM, since the default pam_unix module has it's own built +# in of 3 retries. However, this is a safe fallback in case you are using +# an authentication module that does not enforce PAM_MAXTRIES. +# +LOGIN_RETRIES {{ login_retries }} + +# +# Max time in seconds for login +# +LOGIN_TIMEOUT {{ login_timeout }} + +# +# Which fields may be changed by regular users using chfn - use +# any combination of letters "frwh" (full name, room number, work +# phone, home phone). If not defined, no changes are allowed. +# For backward compatibility, "yes" = "rwh" and "no" = "frwh". +# +{% if chfn_restrict %} +CHFN_RESTRICT {{ chfn_restrict }} +{% endif %} + +# +# Should login be allowed if we can't cd to the home directory? +# Default in no. +# +DEFAULT_HOME {% if allow_login_without_home %} yes {% else %} no {% endif %} + +# +# If defined, this command is run when removing a user. +# It should remove any at/cron/print jobs etc. owned by +# the user to be removed (passed as the first argument). +# +#USERDEL_CMD /usr/sbin/userdel_local + +# +# Enable setting of the umask group bits to be the same as owner bits +# (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is +# the same as gid, and username is the same as the primary group name. +# +# If set to yes, userdel will remove the user´s group if it contains no +# more members, and useradd will create by default a group with the name +# of the user. +# +USERGROUPS_ENAB yes + +# +# Instead of the real user shell, the program specified by this parameter +# will be launched, although its visible name (argv[0]) will be the shell's. +# The program may do whatever it wants (logging, additional authentification, +# banner, ...) before running the actual shell. +# +# FAKE_SHELL /bin/fakeshell + +# +# If defined, either full pathname of a file containing device names or +# a ":" delimited list of device names. Root logins will be allowed only +# upon these devices. +# +# This variable is used by login and su. +# +#CONSOLE /etc/consoles +#CONSOLE console:tty01:tty02:tty03:tty04 + +# +# List of groups to add to the user's supplementary group set +# when logging in on the console (as determined by the CONSOLE +# setting). Default is none. +# +# Use with caution - it is possible for users to gain permanent +# access to these groups, even when not logged in on the console. +# How to do it is left as an exercise for the reader... +# +# This variable is used by login and su. +# +#CONSOLE_GROUPS floppy:audio:cdrom + +# +# If set to "yes", new passwords will be encrypted using the MD5-based +# algorithm compatible with the one used by recent releases of FreeBSD. +# It supports passwords of unlimited length and longer salt strings. +# Set to "no" if you need to copy encrypted passwords to other systems +# which don't understand the new algorithm. Default is "no". +# +# This variable is deprecated. You should use ENCRYPT_METHOD. +# +MD5_CRYPT_ENAB no + +# +# If set to MD5 , MD5-based algorithm will be used for encrypting password +# If set to SHA256, SHA256-based algorithm will be used for encrypting password +# If set to SHA512, SHA512-based algorithm will be used for encrypting password +# If set to DES, DES-based algorithm will be used for encrypting password (default) +# Overrides the MD5_CRYPT_ENAB option +# +# Note: It is recommended to use a value consistent with +# the PAM modules configuration. +# +ENCRYPT_METHOD SHA512 + +# +# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512. +# +# Define the number of SHA rounds. +# With a lot of rounds, it is more difficult to brute forcing the password. +# But note also that it more CPU resources will be needed to authenticate +# users. +# +# If not specified, the libc will choose the default number of rounds (5000). +# The values must be inside the 1000-999999999 range. +# If only one of the MIN or MAX values is set, then this value will be used. +# If MIN > MAX, the highest value will be used. +# +# SHA_CRYPT_MIN_ROUNDS 5000 +# SHA_CRYPT_MAX_ROUNDS 5000 + +################# OBSOLETED BY PAM ############## +# # +# These options are now handled by PAM. Please # +# edit the appropriate file in /etc/pam.d/ to # +# enable the equivelants of them. +# +############### + +#MOTD_FILE +#DIALUPS_CHECK_ENAB +#LASTLOG_ENAB +#MAIL_CHECK_ENAB +#OBSCURE_CHECKS_ENAB +#PORTTIME_CHECKS_ENAB +#SU_WHEEL_ONLY +#CRACKLIB_DICTPATH +#PASS_CHANGE_TRIES +#PASS_ALWAYS_WARN +#ENVIRON_FILE +#NOLOGINS_FILE +#ISSUE_FILE +#PASS_MIN_LEN +#PASS_MAX_LEN +#ULIMIT +#ENV_HZ +#CHFN_AUTH +#CHSH_AUTH +#FAIL_DELAY + +################# OBSOLETED ####################### +# # +# These options are no more handled by shadow. # +# # +# Shadow utilities will display a warning if they # +# still appear. # +# # +################################################### + +# CLOSE_SESSIONS +# LOGIN_STRING +# NO_PASSWORD_CONSOLE +# QMAIL_DIR + + + diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/modules b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/modules new file mode 100644 index 00000000..ef0354ee --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/modules @@ -0,0 +1,117 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# /etc/modules: kernel modules to load at boot time. +# +# This file contains the names of kernel modules that should be loaded +# at boot time, one per line. Lines beginning with "#" are ignored. +# Parameters can be specified after the module name. + +# Arch +# ---- +# +# Modules for certains builds, contains support modules and some CPU-specific optimizations. + +{% if arch == "x86_64" -%} +# Optimize for x86_64 cryptographic features +twofish-x86_64-3way +twofish-x86_64 +aes-x86_64 +salsa20-x86_64 +blowfish-x86_64 +{% endif -%} + +{% if cpuVendor == "intel" -%} +# Intel-specific optimizations +ghash-clmulni-intel +aesni-intel +kvm-intel +{% endif -%} + +{% if cpuVendor == "amd" -%} +# AMD-specific optimizations +kvm-amd +{% endif -%} + +kvm + + +# Crypto +# ------ + +# Some core modules which comprise strong cryptography. +blowfish_common +blowfish_generic +ctr +cts +lrw +lzo +rmd160 +rmd256 +rmd320 +serpent +sha512_generic +twofish_common +twofish_generic +xts +zlib + + +# Drivers +# ------- + +# Basics +lp +rtc +loop + +# Filesystems +ext2 +btrfs + +{% if desktop_enable -%} +# Desktop +psmouse +snd +snd_ac97_codec +snd_intel8x0 +snd_page_alloc +snd_pcm +snd_timer +soundcore +usbhid +{% endif -%} + +# Lib +# --- +xz + + +# Net +# --- + +# All packets needed for netfilter rules (ie iptables, ebtables). +ip_tables +x_tables +iptable_filter +iptable_nat + +# Targets +ipt_LOG +ipt_REJECT + +# Modules +xt_connlimit +xt_tcpudp +xt_recent +xt_limit +xt_conntrack +nf_conntrack +nf_conntrack_ipv4 +nf_defrag_ipv4 +xt_state +nf_nat + +# Addons +xt_pknock \ No newline at end of file diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf new file mode 100644 index 00000000..f98d14e5 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf @@ -0,0 +1,11 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +Name: passwdqc password strength enforcement +Default: yes +Priority: 1024 +Conflicts: cracklib +Password-Type: Primary +Password: + requisite pam_passwdqc.so {{ auth_pam_passwdqc_options }} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh new file mode 100644 index 00000000..fd2de791 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh @@ -0,0 +1,8 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# Disable core dumps via soft limits for all users. Compliance to this setting +# is voluntary and can be modified by users up to a hard limit. This setting is +# a sane default. +ulimit -S -c 0 > /dev/null 2>&1 diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/securetty b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/securetty new file mode 100644 index 00000000..15b18d4e --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/securetty @@ -0,0 +1,11 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# A list of TTYs, from which root can log in +# see `man securetty` for reference +{% if ttys -%} +{% for tty in ttys -%} +{{ tty }} +{% endfor -%} +{% endif -%} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/tally2 b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/tally2 new file mode 100644 index 00000000..d9620299 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/tally2 @@ -0,0 +1,14 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +Name: tally2 lockout after failed attempts enforcement +Default: yes +Priority: 1024 +Conflicts: cracklib +Auth-Type: Primary +Auth-Initial: + required pam_tally2.so deny={{ auth_retries }} onerr=fail unlock_time={{ auth_lockout_time }} +Account-Type: Primary +Account-Initial: + required pam_tally2.so diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py new file mode 100644 index 00000000..d4f0ec19 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.mysql.checks import config + + +def run_mysql_checks(): + log("Starting MySQL hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("MySQL hardening checks complete.", level=DEBUG) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py new file mode 100644 index 00000000..3af8b89d --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -0,0 +1,89 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six +import subprocess + +from charmhelpers.core.hookenv import ( + log, + WARNING, +) +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + DirectoryPermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.mysql import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get MySQL hardening config audits. + + :returns: dictionary of audits + """ + if subprocess.call(['which', 'mysql'], stdout=subprocess.PIPE) != 0: + log("MySQL does not appear to be installed on this node - " + "skipping mysql hardening", level=WARNING) + return [] + + settings = utils.get_settings('mysql') + hardening_settings = settings['hardening'] + my_cnf = hardening_settings['mysql-conf'] + + audits = [ + FilePermissionAudit(paths=[my_cnf], user='root', + group='root', mode=0o0600), + + TemplatedFile(hardening_settings['hardening-conf'], + MySQLConfContext(), + TEMPLATES_DIR, + mode=0o0750, + user='mysql', + group='root', + service_actions=[{'service': 'mysql', + 'actions': ['restart']}]), + + # MySQL and Percona charms do not allow configuration of the + # data directory, so use the default. + DirectoryPermissionAudit('/var/lib/mysql', + user='mysql', + group='mysql', + recursive=False, + mode=0o755), + + DirectoryPermissionAudit('/etc/mysql', + user='root', + group='root', + recursive=False, + mode=0o700), + ] + + return audits + + +class MySQLConfContext(object): + """Defines the set of key/value pairs to set in a mysql config file. + + This context, when called, will return a dictionary containing the + key/value pairs of setting to specify in the + /etc/mysql/conf.d/hardening.cnf file. + """ + def __call__(self): + settings = utils.get_settings('mysql') + # Translate for python3 + return {'mysql_settings': + [(k, v) for k, v in six.iteritems(settings['security'])]} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf new file mode 100644 index 00000000..8242586c --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf @@ -0,0 +1,12 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +[mysqld] +{% for setting, value in mysql_settings -%} +{% if value == 'True' -%} +{{ setting }} +{% elif value != 'None' and value != None -%} +{{ setting }} = {{ value }} +{% endif -%} +{% endfor -%} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/__init__.py new file mode 100644 index 00000000..277b8c77 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py new file mode 100644 index 00000000..b85150d5 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.ssh.checks import config + + +def run_ssh_checks(): + log("Starting SSH hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("SSH hardening checks complete.", level=DEBUG) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py new file mode 100644 index 00000000..3fb6ae8d --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -0,0 +1,394 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.fetch import ( + apt_install, + apt_update, +) +from charmhelpers.core.host import lsb_release +from charmhelpers.contrib.hardening.audits.file import ( + TemplatedFile, + FileContentAudit, +) +from charmhelpers.contrib.hardening.ssh import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get SSH hardening config audits. + + :returns: dictionary of audits + """ + audits = [SSHConfig(), SSHDConfig(), SSHConfigFileContentAudit(), + SSHDConfigFileContentAudit()] + return audits + + +class SSHConfigContext(object): + + type = 'client' + + def get_macs(self, allow_weak_mac): + if allow_weak_mac: + weak_macs = 'weak' + else: + weak_macs = 'default' + + default = 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160' + macs = {'default': default, + 'weak': default + ',hmac-sha1'} + + default = ('hmac-sha2-512-etm@openssh.com,' + 'hmac-sha2-256-etm@openssh.com,' + 'hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,' + 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160') + macs_66 = {'default': default, + 'weak': default + ',hmac-sha1'} + + # Use newer ciphers on Ubuntu Trusty and above + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG) + macs = macs_66 + + return macs[weak_macs] + + def get_kexs(self, allow_weak_kex): + if allow_weak_kex: + weak_kex = 'weak' + else: + weak_kex = 'default' + + default = 'diffie-hellman-group-exchange-sha256' + weak = (default + ',diffie-hellman-group14-sha1,' + 'diffie-hellman-group-exchange-sha1,' + 'diffie-hellman-group1-sha1') + kex = {'default': default, + 'weak': weak} + + default = ('curve25519-sha256@libssh.org,' + 'diffie-hellman-group-exchange-sha256') + weak = (default + ',diffie-hellman-group14-sha1,' + 'diffie-hellman-group-exchange-sha1,' + 'diffie-hellman-group1-sha1') + kex_66 = {'default': default, + 'weak': weak} + + # Use newer kex on Ubuntu Trusty and above + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + log('Detected Ubuntu 14.04 or newer, using new key exchange ' + 'algorithms', level=DEBUG) + kex = kex_66 + + return kex[weak_kex] + + def get_ciphers(self, cbc_required): + if cbc_required: + weak_ciphers = 'weak' + else: + weak_ciphers = 'default' + + default = 'aes256-ctr,aes192-ctr,aes128-ctr' + cipher = {'default': default, + 'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'} + + default = ('chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,' + 'aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr') + ciphers_66 = {'default': default, + 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'} + + # Use newer ciphers on ubuntu Trusty and above + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + log('Detected Ubuntu 14.04 or newer, using new ciphers', + level=DEBUG) + cipher = ciphers_66 + + return cipher[weak_ciphers] + + def __call__(self): + settings = utils.get_settings('ssh') + if settings['common']['network_ipv6_enable']: + addr_family = 'any' + else: + addr_family = 'inet' + + ctxt = { + 'addr_family': addr_family, + 'remote_hosts': settings['common']['remote_hosts'], + 'password_auth_allowed': + settings['client']['password_authentication'], + 'ports': settings['common']['ports'], + 'ciphers': self.get_ciphers(settings['client']['cbc_required']), + 'macs': self.get_macs(settings['client']['weak_hmac']), + 'kexs': self.get_kexs(settings['client']['weak_kex']), + 'roaming': settings['client']['roaming'], + } + return ctxt + + +class SSHConfig(TemplatedFile): + def __init__(self): + path = '/etc/ssh/ssh_config' + super(SSHConfig, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=SSHConfigContext(), + user='root', + group='root', + mode=0o0644) + + def pre_write(self): + settings = utils.get_settings('ssh') + apt_update(fatal=True) + apt_install(settings['client']['package']) + if not os.path.exists('/etc/ssh'): + os.makedir('/etc/ssh') + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + def post_write(self): + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + +class SSHDConfigContext(SSHConfigContext): + + type = 'server' + + def __call__(self): + settings = utils.get_settings('ssh') + if settings['common']['network_ipv6_enable']: + addr_family = 'any' + else: + addr_family = 'inet' + + ctxt = { + 'ssh_ip': settings['server']['listen_to'], + 'password_auth_allowed': + settings['server']['password_authentication'], + 'ports': settings['common']['ports'], + 'addr_family': addr_family, + 'ciphers': self.get_ciphers(settings['server']['cbc_required']), + 'macs': self.get_macs(settings['server']['weak_hmac']), + 'kexs': self.get_kexs(settings['server']['weak_kex']), + 'host_key_files': settings['server']['host_key_files'], + 'allow_root_with_key': settings['server']['allow_root_with_key'], + 'password_authentication': + settings['server']['password_authentication'], + 'use_priv_sep': settings['server']['use_privilege_separation'], + 'use_pam': settings['server']['use_pam'], + 'allow_x11_forwarding': settings['server']['allow_x11_forwarding'], + 'print_motd': settings['server']['print_motd'], + 'print_last_log': settings['server']['print_last_log'], + 'client_alive_interval': + settings['server']['alive_interval'], + 'client_alive_count': settings['server']['alive_count'], + 'allow_tcp_forwarding': settings['server']['allow_tcp_forwarding'], + 'allow_agent_forwarding': + settings['server']['allow_agent_forwarding'], + 'deny_users': settings['server']['deny_users'], + 'allow_users': settings['server']['allow_users'], + 'deny_groups': settings['server']['deny_groups'], + 'allow_groups': settings['server']['allow_groups'], + 'use_dns': settings['server']['use_dns'], + 'sftp_enable': settings['server']['sftp_enable'], + 'sftp_group': settings['server']['sftp_group'], + 'sftp_chroot': settings['server']['sftp_chroot'], + 'max_auth_tries': settings['server']['max_auth_tries'], + 'max_sessions': settings['server']['max_sessions'], + } + return ctxt + + +class SSHDConfig(TemplatedFile): + def __init__(self): + path = '/etc/ssh/sshd_config' + super(SSHDConfig, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=SSHDConfigContext(), + user='root', + group='root', + mode=0o0600, + service_actions=[{'service': 'ssh', + 'actions': + ['restart']}]) + + def pre_write(self): + settings = utils.get_settings('ssh') + apt_update(fatal=True) + apt_install(settings['server']['package']) + if not os.path.exists('/etc/ssh'): + os.makedir('/etc/ssh') + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + def post_write(self): + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + +class SSHConfigFileContentAudit(FileContentAudit): + def __init__(self): + self.path = '/etc/ssh/ssh_config' + super(SSHConfigFileContentAudit, self).__init__(self.path, {}) + + def is_compliant(self, *args, **kwargs): + self.pass_cases = [] + self.fail_cases = [] + settings = utils.get_settings('ssh') + + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + if not settings['client']['weak_hmac']: + self.fail_cases.append(r'^MACs.+,hmac-sha1$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['client']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + + if settings['client']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + + if settings['client']['roaming']: + self.pass_cases.append(r'^UseRoaming yes$') + else: + self.fail_cases.append(r'^UseRoaming yes$') + + return super(SSHConfigFileContentAudit, self).is_compliant(*args, + **kwargs) + + +class SSHDConfigFileContentAudit(FileContentAudit): + def __init__(self): + self.path = '/etc/ssh/sshd_config' + super(SSHDConfigFileContentAudit, self).__init__(self.path, {}) + + def is_compliant(self, *args, **kwargs): + self.pass_cases = [] + self.fail_cases = [] + settings = utils.get_settings('ssh') + + if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + + if settings['server']['sftp_enable']: + self.pass_cases.append(r'^Subsystem\ssftp') + else: + self.fail_cases.append(r'^Subsystem\ssftp') + + return super(SSHDConfigFileContentAudit, self).is_compliant(*args, + **kwargs) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config new file mode 100644 index 00000000..9742d8e2 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config @@ -0,0 +1,70 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# This is the ssh client system-wide configuration file. See +# ssh_config(5) for more information. This file provides defaults for +# users, and the values can be changed in per-user configuration files +# or on the command line. + +# Configuration data is parsed as follows: +# 1. command line options +# 2. user-specific file +# 3. system-wide file +# Any configuration value is only changed the first time it is set. +# Thus, host-specific definitions should be at the beginning of the +# configuration file, and defaults at the end. + +# Site-wide defaults for some commonly used options. For a comprehensive +# list of available options, their meanings and defaults, please see the +# ssh_config(5) man page. + +# Restrict the following configuration to be limited to this Host. +{% if remote_hosts -%} +Host {{ ' '.join(remote_hosts) }} +{% endif %} +ForwardAgent no +ForwardX11 no +ForwardX11Trusted yes +RhostsRSAAuthentication no +RSAAuthentication yes +PasswordAuthentication {{ password_auth_allowed }} +HostbasedAuthentication no +GSSAPIAuthentication no +GSSAPIDelegateCredentials no +GSSAPIKeyExchange no +GSSAPITrustDNS no +BatchMode no +CheckHostIP yes +AddressFamily {{ addr_family }} +ConnectTimeout 0 +StrictHostKeyChecking ask +IdentityFile ~/.ssh/identity +IdentityFile ~/.ssh/id_rsa +IdentityFile ~/.ssh/id_dsa +# The port at the destination should be defined +{% for port in ports -%} +Port {{ port }} +{% endfor %} +Protocol 2 +Cipher 3des +{% if ciphers -%} +Ciphers {{ ciphers }} +{%- endif %} +{% if macs -%} +MACs {{ macs }} +{%- endif %} +{% if kexs -%} +KexAlgorithms {{ kexs }} +{%- endif %} +EscapeChar ~ +Tunnel no +TunnelDevice any:any +PermitLocalCommand no +VisualHostKey no +RekeyLimit 1G 1h +SendEnv LANG LC_* +HashKnownHosts yes +{% if roaming -%} +UseRoaming {{ roaming }} +{% endif %} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config new file mode 100644 index 00000000..5f87298a --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config @@ -0,0 +1,159 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# Package generated configuration file +# See the sshd_config(5) manpage for details + +# What ports, IPs and protocols we listen for +{% for port in ports -%} +Port {{ port }} +{% endfor -%} +AddressFamily {{ addr_family }} +# Use these options to restrict which interfaces/protocols sshd will bind to +{% if ssh_ip -%} +{% for ip in ssh_ip -%} +ListenAddress {{ ip }} +{% endfor %} +{%- else -%} +ListenAddress :: +ListenAddress 0.0.0.0 +{% endif -%} +Protocol 2 +{% if ciphers -%} +Ciphers {{ ciphers }} +{% endif -%} +{% if macs -%} +MACs {{ macs }} +{% endif -%} +{% if kexs -%} +KexAlgorithms {{ kexs }} +{% endif -%} +# HostKeys for protocol version 2 +{% for keyfile in host_key_files -%} +HostKey {{ keyfile }} +{% endfor -%} + +# Privilege Separation is turned on for security +{% if use_priv_sep -%} +UsePrivilegeSeparation {{ use_priv_sep }} +{% endif -%} + +# Lifetime and size of ephemeral version 1 server key +KeyRegenerationInterval 3600 +ServerKeyBits 1024 + +# Logging +SyslogFacility AUTH +LogLevel VERBOSE + +# Authentication: +LoginGraceTime 30s +{% if allow_root_with_key -%} +PermitRootLogin without-password +{% else -%} +PermitRootLogin no +{% endif %} +PermitTunnel no +PermitUserEnvironment no +StrictModes yes + +RSAAuthentication yes +PubkeyAuthentication yes +AuthorizedKeysFile %h/.ssh/authorized_keys + +# Don't read the user's ~/.rhosts and ~/.shosts files +IgnoreRhosts yes +# For this to work you will also need host keys in /etc/ssh_known_hosts +RhostsRSAAuthentication no +# similar for protocol version 2 +HostbasedAuthentication no +# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication +IgnoreUserKnownHosts yes + +# To enable empty passwords, change to yes (NOT RECOMMENDED) +PermitEmptyPasswords no + +# Change to yes to enable challenge-response passwords (beware issues with +# some PAM modules and threads) +ChallengeResponseAuthentication no + +# Change to no to disable tunnelled clear text passwords +PasswordAuthentication {{ password_authentication }} + +# Kerberos options +KerberosAuthentication no +KerberosGetAFSToken no +KerberosOrLocalPasswd no +KerberosTicketCleanup yes + +# GSSAPI options +GSSAPIAuthentication no +GSSAPICleanupCredentials yes + +X11Forwarding {{ allow_x11_forwarding }} +X11DisplayOffset 10 +X11UseLocalhost yes +GatewayPorts no +PrintMotd {{ print_motd }} +PrintLastLog {{ print_last_log }} +TCPKeepAlive no +UseLogin no + +ClientAliveInterval {{ client_alive_interval }} +ClientAliveCountMax {{ client_alive_count }} +AllowTcpForwarding {{ allow_tcp_forwarding }} +AllowAgentForwarding {{ allow_agent_forwarding }} + +MaxStartups 10:30:100 +#Banner /etc/issue.net + +# Allow client to pass locale environment variables +AcceptEnv LANG LC_* + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the ChallengeResponseAuthentication and +# PasswordAuthentication. Depending on your PAM configuration, +# PAM authentication via ChallengeResponseAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# PAM authentication, then enable this but set PasswordAuthentication +# and ChallengeResponseAuthentication to 'no'. +UsePAM {{ use_pam }} + +{% if deny_users -%} +DenyUsers {{ deny_users }} +{% endif -%} +{% if allow_users -%} +AllowUsers {{ allow_users }} +{% endif -%} +{% if deny_groups -%} +DenyGroups {{ deny_groups }} +{% endif -%} +{% if allow_groups -%} +AllowGroups allow_groups +{% endif -%} +UseDNS {{ use_dns }} +MaxAuthTries {{ max_auth_tries }} +MaxSessions {{ max_sessions }} + +{% if sftp_enable -%} +# Configuration, in case SFTP is used +## override default of no subsystems +## Subsystem sftp /opt/app/openssh5/libexec/sftp-server +Subsystem sftp internal-sftp -l VERBOSE + +## These lines must appear at the *end* of sshd_config +Match Group {{ sftp_group }} +ForceCommand internal-sftp -l VERBOSE +ChrootDirectory {{ sftp_chroot }} +{% else -%} +# Configuration, in case SFTP is used +## override default of no subsystems +## Subsystem sftp /opt/app/openssh5/libexec/sftp-server +## These lines must appear at the *end* of sshd_config +Match Group sftponly +ForceCommand internal-sftp -l VERBOSE +ChrootDirectory /sftpchroot/home/%u +{% endif %} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py new file mode 100644 index 00000000..d2ab7dc9 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py @@ -0,0 +1,71 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) + +try: + from jinja2 import FileSystemLoader, Environment +except ImportError: + from charmhelpers.fetch import apt_install + from charmhelpers.fetch import apt_update + apt_update(fatal=True) + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment + + +# NOTE: function separated from main rendering code to facilitate easier +# mocking in unit tests. +def write(path, data): + with open(path, 'wb') as out: + out.write(data) + + +def get_template_path(template_dir, path): + """Returns the template file which would be used to render the path. + + The path to the template file is returned. + :param template_dir: the directory the templates are located in + :param path: the file path to be written to. + :returns: path to the template file + """ + return os.path.join(template_dir, os.path.basename(path)) + + +def render_and_write(template_dir, path, context): + """Renders the specified template into the file. + + :param template_dir: the directory to load the template from + :param path: the path to write the templated contents to + :param context: the parameters to pass to the rendering engine + """ + env = Environment(loader=FileSystemLoader(template_dir)) + template_file = os.path.basename(path) + template = env.get_template(template_file) + log('Rendering from template: %s' % template.name, level=DEBUG) + rendered_content = template.render(context) + if not rendered_content: + log("Render returned None - skipping '%s'" % path, + level=WARNING) + return + + write(path, rendered_content.encode('utf-8').strip()) + log('Wrote template %s' % path, level=DEBUG) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py new file mode 100644 index 00000000..a6743a4d --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py @@ -0,0 +1,157 @@ +# Copyright 2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import glob +import grp +import os +import pwd +import six +import yaml + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + WARNING, + ERROR, +) + + +# Global settings cache. Since each hook fire entails a fresh module import it +# is safe to hold this in memory and not risk missing config changes (since +# they will result in a new hook fire and thus re-import). +__SETTINGS__ = {} + + +def _get_defaults(modules): + """Load the default config for the provided modules. + + :param modules: stack modules config defaults to lookup. + :returns: modules default config dictionary. + """ + default = os.path.join(os.path.dirname(__file__), + 'defaults/%s.yaml' % (modules)) + return yaml.safe_load(open(default)) + + +def _get_schema(modules): + """Load the config schema for the provided modules. + + NOTE: this schema is intended to have 1-1 relationship with they keys in + the default config and is used a means to verify valid overrides provided + by the user. + + :param modules: stack modules config schema to lookup. + :returns: modules default schema dictionary. + """ + schema = os.path.join(os.path.dirname(__file__), + 'defaults/%s.yaml.schema' % (modules)) + return yaml.safe_load(open(schema)) + + +def _get_user_provided_overrides(modules): + """Load user-provided config overrides. + + :param modules: stack modules to lookup in user overrides yaml file. + :returns: overrides dictionary. + """ + overrides = os.path.join(os.environ['JUJU_CHARM_DIR'], + 'hardening.yaml') + if os.path.exists(overrides): + log("Found user-provided config overrides file '%s'" % + (overrides), level=DEBUG) + settings = yaml.safe_load(open(overrides)) + if settings and settings.get(modules): + log("Applying '%s' overrides" % (modules), level=DEBUG) + return settings.get(modules) + + log("No overrides found for '%s'" % (modules), level=DEBUG) + else: + log("No hardening config overrides file '%s' found in charm " + "root dir" % (overrides), level=DEBUG) + + return {} + + +def _apply_overrides(settings, overrides, schema): + """Get overrides config overlayed onto modules defaults. + + :param modules: require stack modules config. + :returns: dictionary of modules config with user overrides applied. + """ + if overrides: + for k, v in six.iteritems(overrides): + if k in schema: + if schema[k] is None: + settings[k] = v + elif type(schema[k]) is dict: + settings[k] = _apply_overrides(settings[k], overrides[k], + schema[k]) + else: + raise Exception("Unexpected type found in schema '%s'" % + type(schema[k]), level=ERROR) + else: + log("Unknown override key '%s' - ignoring" % (k), level=INFO) + + return settings + + +def get_settings(modules): + global __SETTINGS__ + if modules in __SETTINGS__: + return __SETTINGS__[modules] + + schema = _get_schema(modules) + settings = _get_defaults(modules) + overrides = _get_user_provided_overrides(modules) + __SETTINGS__[modules] = _apply_overrides(settings, overrides, schema) + return __SETTINGS__[modules] + + +def ensure_permissions(path, user, group, permissions, maxdepth=-1): + """Ensure permissions for path. + + If path is a file, apply to file and return. If path is a directory, + apply recursively (if required) to directory contents and return. + + :param user: user name + :param group: group name + :param permissions: octal permissions + :param maxdepth: maximum recursion depth. A negative maxdepth allows + infinite recursion and maxdepth=0 means no recursion. + :returns: None + """ + if not os.path.exists(path): + log("File '%s' does not exist - cannot set permissions" % (path), + level=WARNING) + return + + _user = pwd.getpwnam(user) + os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid) + os.chmod(path, permissions) + + if maxdepth == 0: + log("Max recursion depth reached - skipping further recursion", + level=DEBUG) + return + elif maxdepth > 0: + maxdepth -= 1 + + if os.path.isdir(path): + contents = glob.glob("%s/*" % (path)) + for c in contents: + ensure_permissions(c, user=user, group=group, + permissions=permissions, maxdepth=maxdepth) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 14549174..1b4b1de7 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -283,6 +283,7 @@ def get_mon_map(service): e.message)) raise + def hash_monitor_names(service): """ Uses the get_mon_map() function to get information about the monitor diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 2dd70bc9..01321296 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -912,6 +912,24 @@ def payload_status_set(klass, pid, status): subprocess.check_call(cmd) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def resource_get(name): + """used to fetch the resource path of the given name. + + must match a name of defined resource in metadata.yaml + + returns either a path or False if resource not available + """ + if not name: + return False + + cmd = ['resource-get', name] + try: + return subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError: + return False + + @cached def juju_version(): """Full version string (eg. '1.23.3.1-trusty-amd64')""" @@ -976,3 +994,16 @@ def _run_atexit(): for callback, args, kwargs in reversed(_atexit): callback(*args, **kwargs) del _atexit[:] + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get_primary_address(binding): + ''' + Retrieve the primary network address for a named binding + + :param binding: string. The name of a relation of extra-binding + :return: string. The primary IP address for the named binding + :raise: NotImplementedError if run on Juju < 2.0 + ''' + cmd = ['network-get', '--primary-address', binding] + return subprocess.check_output(cmd).strip() diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index a7720906..481087bb 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -30,6 +30,8 @@ import string import subprocess import hashlib +import functools +import itertools from contextlib import contextmanager from collections import OrderedDict @@ -428,27 +430,47 @@ def config_changed(): restarted if any file matching the pattern got changed, created or removed. Standard wildcards are supported, see documentation for the 'glob' module for more information. + + @param restart_map: {path_file_name: [service_name, ...] + @param stopstart: DEFAULT false; whether to stop, start OR restart + @returns result from decorated function """ def wrap(f): + @functools.wraps(f) def wrapped_f(*args, **kwargs): - checksums = {path: path_hash(path) for path in restart_map} - f(*args, **kwargs) - restarts = [] - for path in restart_map: - if path_hash(path) != checksums[path]: - restarts += restart_map[path] - services_list = list(OrderedDict.fromkeys(restarts)) - if not stopstart: - for service_name in services_list: - service('restart', service_name) - else: - for action in ['stop', 'start']: - for service_name in services_list: - service(action, service_name) + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart) return wrapped_f return wrap +def restart_on_change_helper(lambda_f, restart_map, stopstart=False): + """Helper function to perform the restart_on_change function. + + This is provided for decorators to restart services if files described + in the restart_map have changed after an invocation of lambda_f(). + + @param lambda_f: function to call. + @param restart_map: {file: [service, ...]} + @param stopstart: whether to stop, start or restart a service + @returns result of lambda_f() + """ + checksums = {path: path_hash(path) for path in restart_map} + r = lambda_f() + # create a list of lists of the services to restart + restarts = [restart_map[path] + for path in restart_map + if path_hash(path) != checksums[path]] + # create a flat list of ordered services without duplicates from lists + services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) + if services_list: + actions = ('stop', 'start') if stopstart else ('restart',) + for action in actions: + for service_name in services_list: + service(action, service_name) + return r + + def lsb_release(): """Return /etc/lsb-release in a dict""" d = {} diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index a967b4f8..3e159039 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -781,8 +781,9 @@ def get_uuid_epoch_stamp(self): return '[{}-{}]'.format(uuid.uuid4(), time.time()) # amulet juju action helpers: - def run_action(self, unit_sentry, action, params=None, - _check_output=subprocess.check_output): + def run_action(self, unit_sentry, action, + _check_output=subprocess.check_output, + params=None): """Run the named action on a given unit sentry. params a dict of parameters to use diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 388b60e6..ef3bdccf 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -27,7 +27,11 @@ import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client -import novaclient.v1_1.client as nova_client +from keystoneclient.auth.identity import v3 as keystone_id_v3 +from keystoneclient import session as keystone_session +from keystoneclient.v3 import client as keystone_client_v3 + +import novaclient.client as nova_client import pika import swiftclient @@ -38,6 +42,8 @@ DEBUG = logging.DEBUG ERROR = logging.ERROR +NOVA_CLIENT_VERSION = "2" + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -139,7 +145,7 @@ def validate_role_data(self, expected, actual): return "role {} does not exist".format(e['name']) return ret - def validate_user_data(self, expected, actual): + def validate_user_data(self, expected, actual, api_version=None): """Validate user data. Validate a list of actual user data vs a list of expected user @@ -150,10 +156,15 @@ def validate_user_data(self, expected, actual): for e in expected: found = False for act in actual: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'tenantId': act.tenantId, - 'id': act.id} - if e['name'] == a['name']: + if e['name'] == act.name: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'id': act.id} + if api_version == 3: + a['default_project_id'] = getattr(act, + 'default_project_id', + 'none') + else: + a['tenantId'] = act.tenantId found = True ret = self._validate_dict_data(e, a) if ret: @@ -188,15 +199,30 @@ def authenticate_cinder_admin(self, keystone_sentry, username, return cinder_client.Client(username, password, tenant, ept) def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant): + tenant=None, api_version=None, + keystone_ip=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') unit = keystone_sentry - service_ip = unit.relation('shared-db', - 'mysql:shared-db')['private-address'] - ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) + if not keystone_ip: + keystone_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name='admin_domain', + username=user, + password=password, + domain_name='admin_domain', + auth_url=ep, + ) + sess = keystone_session.Session(auth=auth) + return keystone_client_v3.Client(session=sess) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" @@ -225,7 +251,8 @@ def authenticate_nova_user(self, keystone, user, password, tenant): self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return nova_client.Client(username=user, api_key=password, + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, project_id=tenant, auth_url=ep) def authenticate_swift_user(self, keystone, user, password, tenant): diff --git a/ceph-mon/unit_tests/test_ceph_ops.py b/ceph-mon/unit_tests/test_ceph_ops.py index 88e64c7d..5e82fa8b 100644 --- a/ceph-mon/unit_tests/test_ceph_ops.py +++ b/ceph-mon/unit_tests/test_ceph_ops.py @@ -1,22 +1,21 @@ __author__ = 'chris' import json -from hooks import ceph_broker - -import mock import unittest +from mock import ( + call, + patch, +) + +from hooks import ceph_broker + class TestCephOps(unittest.TestCase): - """ - @mock.patch('ceph_broker.log') - def test_connect(self, mock_broker): - self.fail() - """ - - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.create_erasure_profile') - def test_create_erasure_profile(self, mock_create_erasure, mock_log): + + @patch.object(ceph_broker, 'create_erasure_profile') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + def test_create_erasure_profile(self, mock_create_erasure): req = json.dumps({'api-version': 1, 'ops': [{ 'op': 'create-erasure-profile', @@ -36,13 +35,15 @@ def test_create_erasure_profile(self, mock_create_erasure, mock_log): erasure_plugin_name='jerasure') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.pool_exists') - @mock.patch('hooks.ceph_broker.ReplicatedPool.create') + @patch.object(ceph_broker, 'get_osds') + @patch.object(ceph_broker, 'pool_exists') + @patch.object(ceph_broker, 'ReplicatedPool') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) def test_process_requests_create_replicated_pool(self, mock_replicated_pool, mock_pool_exists, - mock_log): + mock_get_osds): + mock_get_osds.return_value = 0 mock_pool_exists.return_value = False reqs = json.dumps({'api-version': 1, 'ops': [{ @@ -53,14 +54,14 @@ def test_process_requests_create_replicated_pool(self, }]}) rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_replicated_pool.assert_called_with() + calls = [call(pg_num=None, name=u'foo', service='admin', replicas=3)] + mock_replicated_pool.assert_has_calls(calls) self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.delete_pool') + @patch.object(ceph_broker, 'delete_pool') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) def test_process_requests_delete_pool(self, - mock_delete_pool, - mock_log): + mock_delete_pool): reqs = json.dumps({'api-version': 1, 'ops': [{ 'op': 'delete-pool', @@ -70,14 +71,13 @@ def test_process_requests_delete_pool(self, mock_delete_pool.assert_called_with(service='admin', name='foo') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.pool_exists') - @mock.patch('hooks.ceph_broker.ErasurePool.create') - @mock.patch('hooks.ceph_broker.erasure_profile_exists') + @patch.object(ceph_broker, 'pool_exists') + @patch.object(ceph_broker.ErasurePool, 'create') + @patch.object(ceph_broker, 'erasure_profile_exists') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) def test_process_requests_create_erasure_pool(self, mock_profile_exists, mock_erasure_pool, - mock_pool_exists, - mock_log): + mock_pool_exists): mock_pool_exists.return_value = False reqs = json.dumps({'api-version': 1, 'ops': [{ @@ -92,11 +92,11 @@ def test_process_requests_create_erasure_pool(self, mock_profile_exists, mock_erasure_pool.assert_called_with() self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.pool_exists') - @mock.patch('hooks.ceph_broker.Pool.add_cache_tier') + @patch.object(ceph_broker, 'pool_exists') + @patch.object(ceph_broker.Pool, 'add_cache_tier') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) def test_process_requests_create_cache_tier(self, mock_pool, - mock_pool_exists, mock_log): + mock_pool_exists): mock_pool_exists.return_value = True reqs = json.dumps({'api-version': 1, 'ops': [{ @@ -113,11 +113,11 @@ def test_process_requests_create_cache_tier(self, mock_pool, mock_pool.assert_called_with(cache_pool='foo-ssd', mode='writeback') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.pool_exists') - @mock.patch('hooks.ceph_broker.Pool.remove_cache_tier') + @patch.object(ceph_broker, 'pool_exists') + @patch.object(ceph_broker.Pool, 'remove_cache_tier') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) def test_process_requests_remove_cache_tier(self, mock_pool, - mock_pool_exists, mock_log): + mock_pool_exists): mock_pool_exists.return_value = True reqs = json.dumps({'api-version': 1, 'ops': [{ @@ -130,9 +130,9 @@ def test_process_requests_remove_cache_tier(self, mock_pool, mock_pool.assert_called_with(cache_pool='foo-ssd') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.snapshot_pool') - def test_snapshot_pool(self, mock_snapshot_pool, mock_log): + @patch.object(ceph_broker, 'snapshot_pool') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + def test_snapshot_pool(self, mock_snapshot_pool): reqs = json.dumps({'api-version': 1, 'ops': [{ 'op': 'snapshot-pool', @@ -146,9 +146,9 @@ def test_snapshot_pool(self, mock_snapshot_pool, mock_log): snapshot_name='foo-snap1') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.rename_pool') - def test_rename_pool(self, mock_rename_pool, mock_log): + @patch.object(ceph_broker, 'rename_pool') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + def test_rename_pool(self, mock_rename_pool): reqs = json.dumps({'api-version': 1, 'ops': [{ 'op': 'rename-pool', @@ -161,9 +161,9 @@ def test_rename_pool(self, mock_rename_pool, mock_log): new_name='foo2') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.remove_pool_snapshot') - def test_remove_pool_snapshot(self, mock_snapshot_pool, mock_broker): + @patch.object(ceph_broker, 'remove_pool_snapshot') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + def test_remove_pool_snapshot(self, mock_snapshot_pool): reqs = json.dumps({'api-version': 1, 'ops': [{ 'op': 'remove-pool-snapshot', @@ -176,9 +176,9 @@ def test_remove_pool_snapshot(self, mock_snapshot_pool, mock_broker): snapshot_name='foo-snap1') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - @mock.patch('hooks.ceph_broker.pool_set') - def test_set_pool_value(self, mock_set_pool, mock_broker): + @patch.object(ceph_broker, 'pool_set') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + def test_set_pool_value(self, mock_set_pool): reqs = json.dumps({'api-version': 1, 'ops': [{ 'op': 'set-pool-value', @@ -193,8 +193,8 @@ def test_set_pool_value(self, mock_set_pool, mock_broker): value=3) self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.log') - def test_set_invalid_pool_value(self, mock_broker): + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + def test_set_invalid_pool_value(self): reqs = json.dumps({'api-version': 1, 'ops': [{ 'op': 'set-pool-value', @@ -203,15 +203,8 @@ def test_set_invalid_pool_value(self, mock_broker): 'value': 'abc', }]}) rc = ceph_broker.process_requests(reqs) - # self.assertRaises(AssertionError) self.assertEqual(json.loads(rc)['exit-code'], 1) - ''' - @mock.patch('ceph_broker.log') - def test_set_pool_max_bytes(self, mock_broker): - self.fail() - ''' - if __name__ == '__main__': unittest.main() diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index 88625908..46cc0178 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -1,7 +1,10 @@ import mock import test_utils -import ceph_hooks as hooks +with mock.patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + import ceph_hooks as hooks TO_PATCH = [ 'status_set', diff --git a/ceph-mon/unit_tests/test_upgrade_roll.py b/ceph-mon/unit_tests/test_upgrade_roll.py index 8af24ac5..dd0ae231 100644 --- a/ceph-mon/unit_tests/test_upgrade_roll.py +++ b/ceph-mon/unit_tests/test_upgrade_roll.py @@ -7,7 +7,11 @@ sys.path.append('/home/chris/repos/ceph-mon/hooks') import test_utils -import ceph_hooks + +with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + import ceph_hooks TO_PATCH = [ 'hookenv', @@ -111,11 +115,18 @@ def test_roll_monitor_cluster_second(self, 'Waiting on ip-192-168-1-2 to finish upgrading') lock_and_roll.assert_called_with(my_name="ip-192-168-1-3") + @patch.object(ceph_hooks, 'time') @patch('ceph_hooks.monitor_key_get') @patch('ceph_hooks.monitor_key_exists') - def test_wait_on_previous_node(self, - monitor_key_exists, - monitor_key_get): + def test_wait_on_previous_node(self, monitor_key_exists, monitor_key_get, + mock_time): + tval = [previous_node_start_time] + + def fake_time(): + tval[0] += 100 + return tval[0] + + mock_time.time.side_effect = fake_time monitor_key_get.side_effect = monitor_key_side_effect monitor_key_exists.return_value = False @@ -134,3 +145,4 @@ def test_wait_on_previous_node(self, [call('Previous node is: ip-192-168-1-2')], [call('ip-192-168-1-2 is not finished. Waiting')], ) + self.assertEqual(tval[0], previous_node_start_time + 700) From 003da4074fb4d636ec2abfed17923afe9a5f0803 Mon Sep 17 00:00:00 2001 From: Trent Lloyd Date: Wed, 30 Mar 2016 13:11:58 +0800 Subject: [PATCH 1085/2699] Check if /var/lib/ceph/nss exists before creation mkdir() throws an exception if the directory already exists, so we should check for it's existance before creation as hooks should be idempotent. This occurred in practice when the install hook had to be re-run, following a previous failure due to juju API timeout. It would also occur if the keystone relation was re-built, so that case is also fixed. Change-Id: I4052cda5bb20f76ab592ed7817bdc1e5b5b2138d Closes-Bug: #1563667 --- ceph-radosgw/hooks/hooks.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 03aecfcb..52bca94a 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -130,7 +130,8 @@ def install(): execd_preinstall() enable_pocket('multiverse') install_packages() - os.makedirs(NSS_DIR) + if not os.path.exists(NSS_DIR): + os.makedirs(NSS_DIR) if not os.path.exists('/etc/ceph'): os.makedirs('/etc/ceph') @@ -157,7 +158,8 @@ def setup_keystone_certs(unit=None, rid=None): from keystoneclient.v2_0 import client certs_path = '/var/lib/ceph/nss' - mkdir(certs_path) + if not os.path.exists(certs_path): + mkdir(certs_path) rdata = relation_get(unit=unit, rid=rid) auth_protocol = rdata.get('auth_protocol', 'http') From d8d661f950f57ef95d69e20ad7ade2da0c4f014e Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 23 Mar 2016 14:06:00 +0000 Subject: [PATCH 1086/2699] Add multinetwork support The identity relation supports admin/internal/public networks so this patch allows these networks to be configured by adding os-*-network to config.yaml. Closes-Bug: 1483984 Change-Id: If9d3d3281f189a0e14af59fdadd97cddc4786848 --- ceph-radosgw/config.yaml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 85bf9e45..f53870d2 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -96,6 +96,32 @@ options: description: | Default multicast port number that will be used to communicate between HA Cluster nodes. + # Network configuration options + # by default all access is over 'private-address' + os-admin-network: + type: string + default: + description: | + The IP address and netmask of the OpenStack Admin network (e.g., + 192.168.0.0/24) + . + This network will be used for admin endpoints. + os-internal-network: + type: string + default: + description: | + The IP address and netmask of the OpenStack Internal network (e.g., + 192.168.0.0/24) + . + This network will be used for internal endpoints. + os-public-network: + type: string + default: + description: | + The IP address and netmask of the OpenStack Public network (e.g., + 192.168.0.0/24) + . + This network will be used for public endpoints. os-public-hostname: type: string default: From e1f08f67505079dbea0d1bc2d4b8d79bc96e5de1 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 31 Mar 2016 13:05:19 +0100 Subject: [PATCH 1087/2699] Add Juju Network Space support Juju 2.0 provides support for network spaces, allowing charm authors to support direct binding of relations and extra-bindings onto underlying network spaces. Resync charm-helpers to pickup support in API endpoint resolution code and add API extra-bindings to the charm metadata. Change-Id: Iada9f4d29cca9963900d4ec722c7681fa554b16c --- ceph-radosgw/.gitignore | 1 + ceph-radosgw/README.md | 25 +++++++++++ .../hooks/charmhelpers/contrib/network/ip.py | 9 ++++ .../contrib/openstack/amulet/deployment.py | 4 +- .../charmhelpers/contrib/openstack/ip.py | 42 +++++++++++++++---- ceph-radosgw/metadata.yaml | 4 ++ .../contrib/openstack/amulet/deployment.py | 4 +- 7 files changed, 80 insertions(+), 9 deletions(-) diff --git a/ceph-radosgw/.gitignore b/ceph-radosgw/.gitignore index 8521ed00..9fca5d06 100644 --- a/ceph-radosgw/.gitignore +++ b/ceph-radosgw/.gitignore @@ -6,3 +6,4 @@ tags *.sw[nop] *.pyc .idea +.unit-state.db diff --git a/ceph-radosgw/README.md b/ceph-radosgw/README.md index e405d017..ea88272e 100644 --- a/ceph-radosgw/README.md +++ b/ceph-radosgw/README.md @@ -66,6 +66,31 @@ and then stick a HA loadbalancer on the front:: Should give you a bit more bang on the front end if you really need it. +Network Space support +===================== + +This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above. + +API endpoints can be bound to distinct network spaces supporting the network separation of public, internal and admin endpoints. + +To use this feature, use the --bind option when deploying the charm: + + juju deploy ceph-radosgw --bind "public=public-space internal=internal-space admin=admin-space" + +alternatively these can also be provided as part of a juju native bundle configuration: + + ceph-radosgw: + charm: cs:xenial/ceph-radosgw + num_units: 1 + bindings: + public: public-space + admin: admin-space + internal: internal-space + +NOTE: Spaces must be configured in the underlying provider prior to attempting to use them. + +NOTE: Existing deployments using os-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. + Contact Information =================== diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index 4efe7993..b9c79000 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -191,6 +191,15 @@ def _get_for_address(address, key): get_netmask_for_address = partial(_get_for_address, key='netmask') +def resolve_network_cidr(ip_address): + ''' + Resolves the full address cidr of an ip_address based on + configured network interfaces + ''' + netmask = get_netmask_for_address(ip_address) + return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr) + + def format_ipv6_addr(address): """If address is IPv6, wrap it in '[]' otherwise return None. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index d2ede320..d21c9c78 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -126,7 +126,9 @@ def _add_services(self, this_service, other_services): # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup'] + 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'] if self.openstack: for svc in services: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index 3dca6dc1..532a1dc1 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -14,16 +14,19 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . + from charmhelpers.core.hookenv import ( config, unit_get, service_name, + network_get_primary_address, ) from charmhelpers.contrib.network.ip import ( get_address_in_network, is_address_in_network, is_ipv6, get_ipv6_addr, + resolve_network_cidr, ) from charmhelpers.contrib.hahelpers.cluster import is_clustered @@ -33,16 +36,19 @@ ADDRESS_MAP = { PUBLIC: { + 'binding': 'public', 'config': 'os-public-network', 'fallback': 'public-address', 'override': 'os-public-hostname', }, INTERNAL: { + 'binding': 'internal', 'config': 'os-internal-network', 'fallback': 'private-address', 'override': 'os-internal-hostname', }, ADMIN: { + 'binding': 'admin', 'config': 'os-admin-network', 'fallback': 'private-address', 'override': 'os-admin-hostname', @@ -110,7 +116,7 @@ def resolve_address(endpoint_type=PUBLIC): correct network. If clustered with no nets defined, return primary vip. If not clustered, return unit address ensuring address is on configured net - split if one is configured. + split if one is configured, or a Juju 2.0 extra-binding has been used. :param endpoint_type: Network endpoing type """ @@ -125,23 +131,45 @@ def resolve_address(endpoint_type=PUBLIC): net_type = ADDRESS_MAP[endpoint_type]['config'] net_addr = config(net_type) net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] + binding = ADDRESS_MAP[endpoint_type]['binding'] clustered = is_clustered() - if clustered: - if not net_addr: - # If no net-splits defined, we expect a single vip - resolved_address = vips[0] - else: + + if clustered and vips: + if net_addr: for vip in vips: if is_address_in_network(net_addr, vip): resolved_address = vip break + else: + # NOTE: endeavour to check vips against network space + # bindings + try: + bound_cidr = resolve_network_cidr( + network_get_primary_address(binding) + ) + for vip in vips: + if is_address_in_network(bound_cidr, vip): + resolved_address = vip + break + except NotImplementedError: + # If no net-splits configured and no support for extra + # bindings/network spaces so we expect a single vip + resolved_address = vips[0] else: if config('prefer-ipv6'): fallback_addr = get_ipv6_addr(exc_list=vips)[0] else: fallback_addr = unit_get(net_fallback) - resolved_address = get_address_in_network(net_addr, fallback_addr) + if net_addr: + resolved_address = get_address_in_network(net_addr, fallback_addr) + else: + # NOTE: only try to use extra bindings if legacy network + # configuration is not in use + try: + resolved_address = network_get_primary_address(binding) + except NotImplementedError: + resolved_address = fallback_addr if resolved_address is None: raise ValueError("Unable to resolve a suitable IP address based on " diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 6399b759..a7f215b5 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -12,6 +12,10 @@ tags: - storage - file-servers - misc +extra-bindings: + public: + admin: + internal: requires: mon: interface: ceph-radosgw diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index d2ede320..d21c9c78 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -126,7 +126,9 @@ def _add_services(self, this_service, other_services): # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup'] + 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'] if self.openstack: for svc in services: From 177aa43bcb0d14046bfd255c03621e782f06bfd2 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Thu, 31 Mar 2016 11:36:27 -0700 Subject: [PATCH 1088/2699] Rolling upgrades of ceph osd cluster This change adds functionality to allow the ceph osd cluster to upgrade in a serial rolled fashion. This will use the ceph monitor cluster to lock and allows only 1 ceph osd server at a time to upgrade. The upgrade is initiated setting a config value for source for the service which will prompt the osd cluster to upgrade to that new source and restart all osds processes server by server. If an osd server has been waiting on a previous server for more than 10 minutes and hasn't seen it finish it will assume it died during the upgrade and proceed with its own upgrade. I had to modify the amulet test slightly to use the ceph-mon charm instead of the default ceph charm. I also changed the test so that it uses 3 ceph-osd servers instead of 1. Limtations of this patch: If the osd failure domain has been set to osd than this patch will cause brief temporary outages while osd processes are being restarted. Future work will handle this case. This reverts commit 9c236665c5bc2f3b2f3f5f426bca54c62a2b888d. Change-Id: Ied010278085611b6d552e050a9d2bfdad7f3d35d --- ceph-osd/charm-helpers-hooks.yaml | 3 +- ceph-osd/hooks/ceph.py | 133 +- ceph-osd/hooks/ceph_hooks.py | 224 ++- .../contrib/storage/linux/ceph.py | 1195 +++++++++++++++++ ceph-osd/templates/ceph.conf | 2 + ceph-osd/tests/basic_deployment.py | 35 +- ceph-osd/unit_tests/test_upgrade_roll.py | 157 +++ 7 files changed, 1714 insertions(+), 35 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py create mode 100644 ceph-osd/unit_tests/test_upgrade_roll.py diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index 1c484af6..b727a82a 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -5,8 +5,9 @@ include: - cli - fetch - contrib.storage.linux: + - ceph - utils - contrib.openstack.alternatives - contrib.network.ip - contrib.charmsupport - - contrib.hardening|inc=* \ No newline at end of file + - contrib.hardening|inc=* diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 51b06ac8..0b23979b 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -19,11 +19,10 @@ from charmhelpers.core.host import ( mkdir, chownr, - service_restart, cmp_pkgrevno, lsb_release, - service_stop -) + service_stop, + service_restart) from charmhelpers.core.hookenv import ( log, ERROR, @@ -58,6 +57,112 @@ def ceph_user(): return "root" +class CrushLocation(object): + def __init__(self, + name, + identifier, + host, + rack, + row, + datacenter, + chassis, + root): + self.name = name + self.identifier = identifier + self.host = host + self.rack = rack + self.row = row + self.datacenter = datacenter + self.chassis = chassis + self.root = root + + def __str__(self): + return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ + "chassis :{} root: {}".format(self.name, self.identifier, + self.host, self.rack, self.row, + self.datacenter, self.chassis, + self.root) + + def __eq__(self, other): + return not self.name < other.name and not other.name < self.name + + def __ne__(self, other): + return self.name < other.name or other.name < self.name + + def __gt__(self, other): + return self.name > other.name + + def __ge__(self, other): + return not self.name < other.name + + def __le__(self, other): + return self.name < other.name + + +def get_osd_tree(service): + """ + Returns the current osd map in JSON. + :return: List. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + tree = subprocess.check_output( + ['ceph', '--id', service, + 'osd', 'tree', '--format=json']) + try: + json_tree = json.loads(tree) + crush_list = [] + # Make sure children are present in the json + if not json_tree['nodes']: + return None + child_ids = json_tree['nodes'][0]['children'] + for child in json_tree['nodes']: + if child['id'] in child_ids: + crush_list.append( + CrushLocation( + name=child.get('name'), + identifier=child['id'], + host=child.get('host'), + rack=child.get('rack'), + row=child.get('row'), + datacenter=child.get('datacenter'), + chassis=child.get('chassis'), + root=child.get('root') + ) + ) + return crush_list + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + +def get_local_osd_ids(): + """ + This will list the /var/lib/ceph/osd/* directories and try + to split the ID off of the directory name and return it in + a list + + :return: list. A list of osd identifiers :raise: OSError if + something goes wrong with listing the directory. + """ + osd_ids = [] + osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') + if os.path.exists(osd_path): + try: + dirs = os.listdir(osd_path) + for osd_dir in dirs: + osd_id = osd_dir.split('-')[1] + osd_ids.append(osd_id) + except OSError: + raise + return osd_ids + + def get_version(): '''Derive Ceph release from an installed package.''' import apt_pkg as apt @@ -308,6 +413,7 @@ def rescan_osd_devices(): _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" +_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" def is_bootstrapped(): @@ -333,6 +439,21 @@ def import_osd_bootstrap_key(key): ] subprocess.check_call(cmd) + +def import_osd_upgrade_key(key): + if not os.path.exists(_upgrade_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _upgrade_keyring, + '--create-keyring', + '--name=client.osd-upgrade', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + # OSD caps taken from ceph-create-keys _osd_bootstrap_caps = { 'mon': [ @@ -499,7 +620,7 @@ def update_monfs(): def maybe_zap_journal(journal_dev): - if (is_osd_disk(journal_dev)): + if is_osd_disk(journal_dev): log('Looks like {} is already an OSD data' ' or journal, skipping.'.format(journal_dev)) return @@ -543,7 +664,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, log('Path {} is not a block device - bailing'.format(dev)) return - if (is_osd_disk(dev) and not reformat_osd): + if is_osd_disk(dev) and not reformat_osd: log('Looks like {} is already an' ' OSD data or journal, skipping.'.format(dev)) return @@ -617,7 +738,7 @@ def filesystem_mounted(fs): def get_running_osds(): - '''Returns a list of the pids of the current running OSD daemons''' + """Returns a list of the pids of the current running OSD daemons""" cmd = ['pgrep', 'ceph-osd'] try: result = subprocess.check_output(cmd) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index c49b7db1..e508e0e6 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -9,12 +9,16 @@ import glob import os +import random import shutil +import subprocess import sys import tempfile import socket +import time import ceph +from charmhelpers.core import hookenv from charmhelpers.core.hookenv import ( log, ERROR, @@ -31,8 +35,8 @@ from charmhelpers.core.host import ( umount, mkdir, - cmp_pkgrevno -) + cmp_pkgrevno, + service_stop, service_start) from charmhelpers.fetch import ( add_source, apt_install, @@ -40,25 +44,217 @@ filter_installed_packages, ) from charmhelpers.core.sysctl import create as create_sysctl +from charmhelpers.core import host from utils import ( get_host_ip, get_networks, assert_charm_supports_ipv6, - render_template, -) + render_template) from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( get_ipv6_addr, format_ipv6_addr, ) - +from charmhelpers.contrib.storage.linux.ceph import ( + monitor_key_set, + monitor_key_exists, + monitor_key_get) from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden hooks = Hooks() +# A dict of valid ceph upgrade paths. Mapping is old -> new +upgrade_paths = { + 'cloud:trusty-juno': 'cloud:trusty-kilo', + 'cloud:trusty-kilo': 'cloud:trusty-liberty', + 'cloud:trusty-liberty': 'cloud:trusty-mitaka', +} + + +def pretty_print_upgrade_paths(): + lines = [] + for key, value in upgrade_paths.iteritems(): + lines.append("{} -> {}".format(key, value)) + return lines + + +def check_for_upgrade(): + release_info = host.lsb_release() + if not release_info['DISTRIB_CODENAME'] == 'trusty': + log("Invalid upgrade path from {}. Only trusty is currently " + "supported".format(release_info['DISTRIB_CODENAME'])) + return + + c = hookenv.config() + old_version = c.previous('source') + log('old_version: {}'.format(old_version)) + # Strip all whitespace + new_version = hookenv.config('source') + if new_version: + # replace all whitespace + new_version = new_version.replace(' ', '') + log('new_version: {}'.format(new_version)) + + if old_version in upgrade_paths: + if new_version == upgrade_paths[old_version]: + log("{} to {} is a valid upgrade path. Proceeding.".format( + old_version, new_version)) + roll_osd_cluster(new_version) + else: + # Log a helpful error message + log("Invalid upgrade path from {} to {}. " + "Valid paths are: {}".format(old_version, + new_version, + pretty_print_upgrade_paths())) + + +def lock_and_roll(my_name): + start_timestamp = time.time() + + log('monitor_key_set {}_start {}'.format(my_name, start_timestamp)) + monitor_key_set('osd-upgrade', "{}_start".format(my_name), start_timestamp) + log("Rolling") + # This should be quick + upgrade_osd() + log("Done") + + stop_timestamp = time.time() + # Set a key to inform others I am finished + log('monitor_key_set {}_done {}'.format(my_name, stop_timestamp)) + monitor_key_set('osd-upgrade', "{}_done".format(my_name), stop_timestamp) + + +def wait_on_previous_node(previous_node): + log("Previous node is: {}".format(previous_node)) + + previous_node_finished = monitor_key_exists( + 'osd-upgrade', + "{}_done".format(previous_node)) + + while previous_node_finished is False: + log("{} is not finished. Waiting".format(previous_node)) + # Has this node been trying to upgrade for longer than + # 10 minutes? + # If so then move on and consider that node dead. + + # NOTE: This assumes the clusters clocks are somewhat accurate + # If the hosts clock is really far off it may cause it to skip + # the previous node even though it shouldn't. + current_timestamp = time.time() + previous_node_start_time = monitor_key_get( + 'osd-upgrade', + "{}_start".format(previous_node)) + if (current_timestamp - (10 * 60)) > previous_node_start_time: + # Previous node is probably dead. Lets move on + if previous_node_start_time is not None: + log( + "Waited 10 mins on node {}. current time: {} > " + "previous node start time: {} Moving on".format( + previous_node, + (current_timestamp - (10 * 60)), + previous_node_start_time)) + return + else: + # I have to wait. Sleep a random amount of time and then + # check if I can lock,upgrade and roll. + wait_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + previous_node_finished = monitor_key_exists( + 'osd-upgrade', + "{}_done".format(previous_node)) + + +def get_upgrade_position(osd_sorted_list, match_name): + for index, item in enumerate(osd_sorted_list): + if item.name == match_name: + return index + return None + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +# 2. This assumes that the osd failure domain is not set to osd. +# It rolls an entire server at a time. +def roll_osd_cluster(new_version): + """ + This is tricky to get right so here's what we're going to do. + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous osd is upgraded yet. + + TODO: If you're not in the same failure domain it's safe to upgrade + 1. Examine all pools and adopt the most strict failure domain policy + Example: Pool 1: Failure domain = rack + Pool 2: Failure domain = host + Pool 3: Failure domain = row + + outcome: Failure domain = host + """ + log('roll_osd_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + osd_tree = ceph.get_osd_tree(service='osd-upgrade') + # A sorted list of osd unit names + osd_sorted_list = sorted(osd_tree) + log("osd_sorted_list: {}".format(osd_sorted_list)) + + try: + position = get_upgrade_position(osd_sorted_list, my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(my_name=my_name) + else: + # Check if the previous node has finished + status_set('blocked', + 'Waiting on {} to finish upgrading'.format( + osd_sorted_list[position - 1].name)) + wait_on_previous_node( + previous_node=osd_sorted_list[position - 1].name) + lock_and_roll(my_name=my_name) + except ValueError: + log("Failed to find name {} in list {}".format( + my_name, osd_sorted_list)) + status_set('blocked', 'failed to upgrade osd') + + +def upgrade_osd(): + current_version = ceph.get_version() + status_set("maintenance", "Upgrading osd") + log("Current ceph version is {}".format(current_version)) + new_version = config('release-version') + log("Upgrading to: {}".format(new_version)) + + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph source failed with message: {}".format( + err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + try: + if ceph.systemd(): + for osd_id in ceph.get_local_osd_ids(): + service_stop('ceph-osd@{}'.format(osd_id)) + else: + service_stop('ceph-osd-all') + apt_install(packages=ceph.PACKAGES, fatal=True) + if ceph.systemd(): + for osd_id in ceph.get_local_osd_ids(): + service_start('ceph-osd@{}'.format(osd_id)) + else: + service_start('ceph-osd-all') + except subprocess.CalledProcessError as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + def install_upstart_scripts(): # Only install upstart configurations for older versions @@ -126,6 +322,7 @@ def emit_cephconf(): install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 90) + JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' @@ -161,6 +358,9 @@ def check_overlap(journaldevs, datadevs): @hooks.hook('config-changed') @harden() def config_changed(): + # Check if an upgrade was requested + check_for_upgrade() + # Pre-flight checks if config('osd-format') not in ceph.DISK_FORMATS: log('Invalid OSD disk format configuration specified', level=ERROR) @@ -174,7 +374,7 @@ def config_changed(): create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-osd-charm.conf') e_mountpoint = config('ephemeral-unmount') - if (e_mountpoint and ceph.filesystem_mounted(e_mountpoint)): + if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): umount(e_mountpoint) prepare_disks_and_activate() @@ -204,8 +404,14 @@ def get_mon_hosts(): hosts = [] for relid in relation_ids('mon'): for unit in related_units(relid): - addr = relation_get('ceph-public-address', unit, relid) or \ - get_host_ip(relation_get('private-address', unit, relid)) + addr = \ + relation_get('ceph-public-address', + unit, + relid) or get_host_ip( + relation_get( + 'private-address', + unit, + relid)) if addr: hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) @@ -261,10 +467,12 @@ def get_journal_devices(): 'mon-relation-departed') def mon_relation(): bootstrap_key = relation_get('osd_bootstrap_key') + upgrade_key = relation_get('osd_upgrade_key') if get_fsid() and get_auth() and bootstrap_key: log('mon has provided conf- scanning disks') emit_cephconf() ceph.import_osd_bootstrap_key(bootstrap_key) + ceph.import_osd_upgrade_key(upgrade_key) prepare_disks_and_activate() else: log('mon cluster has not yet provided conf') diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py new file mode 100644 index 00000000..f4582545 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -0,0 +1,1195 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# +import bisect +import errno +import hashlib +import six + +import os +import shutil +import json +import time +import uuid + +from subprocess import ( + check_call, + check_output, + CalledProcessError, +) +from charmhelpers.core.hookenv import ( + local_unit, + relation_get, + relation_ids, + relation_set, + related_units, + log, + DEBUG, + INFO, + WARNING, + ERROR, +) +from charmhelpers.core.host import ( + mount, + mounts, + service_start, + service_stop, + service_running, + umount, +) +from charmhelpers.fetch import ( + apt_install, +) + +from charmhelpers.core.kernel import modprobe + +KEYRING = '/etc/ceph/ceph.client.{}.keyring' +KEYFILE = '/etc/ceph/ceph.client.{}.key' + +CEPH_CONF = """[global] +auth supported = {auth} +keyring = {keyring} +mon host = {mon_hosts} +log to syslog = {use_syslog} +err to syslog = {use_syslog} +clog to syslog = {use_syslog} +""" +# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs) +powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608] + + +def validator(value, valid_type, valid_range=None): + """ + Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + Example input: + validator(value=1, + valid_type=int, + valid_range=[0, 2]) + This says I'm testing value=1. It must be an int inclusive in [0,2] + + :param value: The value to validate + :param valid_type: The type that value should be. + :param valid_range: A range of values that value can assume. + :return: + """ + assert isinstance(value, valid_type), "{} is not a {}".format( + value, + valid_type) + if valid_range is not None: + assert isinstance(valid_range, list), \ + "valid_range must be a list, was given {}".format(valid_range) + # If we're dealing with strings + if valid_type is six.string_types: + assert value in valid_range, \ + "{} is not in the list {}".format(value, valid_range) + # Integer, float should have a min and max + else: + if len(valid_range) != 2: + raise ValueError( + "Invalid valid_range list of {} for {}. " + "List must be [min,max]".format(valid_range, value)) + assert value >= valid_range[0], \ + "{} is less than minimum allowed value of {}".format( + value, valid_range[0]) + assert value <= valid_range[1], \ + "{} is greater than maximum allowed value of {}".format( + value, valid_range[1]) + + +class PoolCreationError(Exception): + """ + A custom error to inform the caller that a pool creation failed. Provides an error message + """ + + def __init__(self, message): + super(PoolCreationError, self).__init__(message) + + +class Pool(object): + """ + An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. + Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). + """ + + def __init__(self, service, name): + self.service = service + self.name = name + + # Create the pool if it doesn't exist already + # To be implemented by subclasses + def create(self): + pass + + def add_cache_tier(self, cache_pool, mode): + """ + Adds a new cache tier to an existing pool. + :param cache_pool: six.string_types. The cache tier pool name to add. + :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] + :return: None + """ + # Check the input types and values + validator(value=cache_pool, valid_type=six.string_types) + validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) + + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) + + def remove_cache_tier(self, cache_pool): + """ + Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. + :param cache_pool: six.string_types. The cache tier pool name to remove. + :return: None + """ + # read-only is easy, writeback is much harder + mode = get_cache_mode(self.service, cache_pool) + if mode == 'readonly': + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + elif mode == 'writeback': + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) + # Flush the cache and wait for it to return + check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + def get_pgs(self, pool_size): + """ + :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for + erasure coded pools + :return: int. The number of pgs to use. + """ + validator(value=pool_size, valid_type=int) + osd_list = get_osds(self.service) + if not osd_list: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + return 200 + + osd_list_length = len(osd_list) + # Calculate based on Ceph best practices + if osd_list_length < 5: + return 128 + elif 5 < osd_list_length < 10: + return 512 + elif 10 < osd_list_length < 50: + return 4096 + else: + estimate = (osd_list_length * 100) / pool_size + # Return the next nearest power of 2 + index = bisect.bisect_right(powers_of_two, estimate) + return powers_of_two[index] + + +class ReplicatedPool(Pool): + def __init__(self, service, name, pg_num=None, replicas=2): + super(ReplicatedPool, self).__init__(service=service, name=name) + self.replicas = replicas + if pg_num is None: + self.pg_num = self.get_pgs(self.replicas) + else: + self.pg_num = pg_num + + def create(self): + if not pool_exists(self.service, self.name): + # Create it + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num)] + try: + check_call(cmd) + except CalledProcessError: + raise + + +# Default jerasure erasure coded pool +class ErasurePool(Pool): + def __init__(self, service, name, erasure_code_profile="default"): + super(ErasurePool, self).__init__(service=service, name=name) + self.erasure_code_profile = erasure_code_profile + + def create(self): + if not pool_exists(self.service, self.name): + # Try to find the erasure profile information so we can properly size the pgs + erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile) + + # Check for errors + if erasure_profile is None: + log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile), + level=ERROR) + raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile)) + if 'k' not in erasure_profile or 'm' not in erasure_profile: + # Error + log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile), + level=ERROR) + raise PoolCreationError( + message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile)) + + pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) + # Create it + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile] + try: + check_call(cmd) + except CalledProcessError: + raise + + """Get an existing erasure code profile if it already exists. + Returns json formatted output""" + + +def get_mon_map(service): + """ + Returns the current monitor map. + :param service: six.string_types. The Ceph user name to run the command under + :return: json string. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + mon_status = check_output( + ['ceph', '--id', service, + 'mon_status', '--format=json']) + try: + return json.loads(mon_status) + except ValueError as v: + log("Unable to parse mon_status json: {}. Error: {}".format( + mon_status, v.message)) + raise + except CalledProcessError as e: + log("mon_status command failed with message: {}".format( + e.message)) + raise + + +def hash_monitor_names(service): + """ + Uses the get_mon_map() function to get information about the monitor + cluster. + Hash the name of each monitor. Return a sorted list of monitor hashes + in an ascending order. + :param service: six.string_types. The Ceph user name to run the command under + :rtype : dict. json dict of monitor name, ip address and rank + example: { + 'name': 'ip-172-31-13-165', + 'rank': 0, + 'addr': '172.31.13.165:6789/0'} + """ + try: + hash_list = [] + monitor_list = get_mon_map(service=service) + if monitor_list['monmap']['mons']: + for mon in monitor_list['monmap']['mons']: + hash_list.append( + hashlib.sha224(mon['name'].encode('utf-8')).hexdigest()) + return sorted(hash_list) + else: + return None + except (ValueError, CalledProcessError): + raise + + +def monitor_key_delete(service, key): + """ + Delete a key and value pair from the monitor cluster + :param service: six.string_types. The Ceph user name to run the command under + Deletes a key value pair on the monitor cluster. + :param key: six.string_types. The key to delete. + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'del', str(key)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_set(service, key, value): + """ + Sets a key value pair on the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to set. + :param value: The value to set. This will be converted to a string + before setting + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'put', str(key), str(value)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_get(service, key): + """ + Gets the value of an existing key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for. + :return: Returns the value of that key or None if not found. + """ + try: + output = check_output( + ['ceph', '--id', service, + 'config-key', 'get', str(key)]) + return output + except CalledProcessError as e: + log("Monitor config-key get failed with message: {}".format( + e.output)) + return None + + +def monitor_key_exists(service, key): + """ + Searches for the existence of a key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for + :return: Returns True if the key exists, False if not and raises an + exception if an unknown error occurs. :raise: CalledProcessError if + an unknown error occurs + """ + try: + check_call( + ['ceph', '--id', service, + 'config-key', 'exists', str(key)]) + # I can return true here regardless because Ceph returns + # ENOENT if the key wasn't found + return True + except CalledProcessError as e: + if e.returncode == errno.ENOENT: + return False + else: + log("Unknown error from ceph config-get exists: {} {}".format( + e.returncode, e.output)) + raise + + +def get_erasure_profile(service, name): + """ + :param service: six.string_types. The Ceph user name to run the command under + :param name: + :return: + """ + try: + out = check_output(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name, '--format=json']) + return json.loads(out) + except (CalledProcessError, OSError, ValueError): + return None + + +def pool_set(service, pool_name, key, value): + """ + Sets a value for a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param key: six.string_types + :param value: + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def snapshot_pool(service, pool_name, snapshot_name): + """ + Snapshots a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_snapshot(service, pool_name, snapshot_name): + """ + Remove a snapshot from a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +# max_bytes should be an int or long +def set_pool_quota(service, pool_name, max_bytes): + """ + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param max_bytes: int or long + :return: None. Can raise CalledProcessError + """ + # Set a byte quota on a RADOS pool in ceph. + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, + 'max_bytes', str(max_bytes)] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_quota(service, pool_name): + """ + Set a byte quota on a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_erasure_profile(service, profile_name): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', + profile_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', + failure_domain='host', + data_chunks=2, coding_chunks=1, + locality=None, durability_estimator=None): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :param erasure_plugin_name: six.string_types + :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', + 'room', 'root', 'row']) + :param data_chunks: int + :param coding_chunks: int + :param locality: int + :param durability_estimator: int + :return: None. Can raise CalledProcessError + """ + # Ensure this failure_domain is allowed by Ceph + validator(failure_domain, six.string_types, + ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) + + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, + 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks), + 'ruleset_failure_domain=' + failure_domain] + if locality is not None and durability_estimator is not None: + raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + + # Add plugin specific information + if locality is not None: + # For local erasure codes + cmd.append('l=' + str(locality)) + if durability_estimator is not None: + # For Shec erasure codes + cmd.append('c=' + str(durability_estimator)) + + if erasure_profile_exists(service, profile_name): + cmd.append('--force') + + try: + check_call(cmd) + except CalledProcessError: + raise + + +def rename_pool(service, old_name, new_name): + """ + Rename a Ceph pool from old_name to new_name + :param service: six.string_types. The Ceph user name to run the command under + :param old_name: six.string_types + :param new_name: six.string_types + :return: None + """ + validator(value=old_name, valid_type=six.string_types) + validator(value=new_name, valid_type=six.string_types) + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] + check_call(cmd) + + +def erasure_profile_exists(service, name): + """ + Check to see if an Erasure code profile already exists. + :param service: six.string_types. The Ceph user name to run the command under + :param name: six.string_types + :return: int or None + """ + validator(value=name, valid_type=six.string_types) + try: + check_call(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name]) + return True + except CalledProcessError: + return False + + +def get_cache_mode(service, pool_name): + """ + Find the current caching mode of the pool_name given. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: int or None + """ + validator(value=service, valid_type=six.string_types) + validator(value=pool_name, valid_type=six.string_types) + out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) + try: + osd_json = json.loads(out) + for pool in osd_json['pools']: + if pool['pool_name'] == pool_name: + return pool['cache_mode'] + return None + except ValueError: + raise + + +def pool_exists(service, name): + """Check to see if a RADOS pool already exists.""" + try: + out = check_output(['rados', '--id', service, + 'lspools']).decode('UTF-8') + except CalledProcessError: + return False + + return name in out + + +def get_osds(service): + """Return a list of all Ceph Object Storage Daemons currently in the + cluster. + """ + version = ceph_version() + if version and version >= '0.56': + return json.loads(check_output(['ceph', '--id', service, + 'osd', 'ls', + '--format=json']).decode('UTF-8')) + + return None + + +def install(): + """Basic Ceph client installation.""" + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + """Check to see if a RADOS block device exists.""" + try: + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]).decode('UTF-8') + except CalledProcessError: + return False + + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] + check_call(cmd) + + +def update_pool(client, pool, settings): + cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] + for k, v in six.iteritems(settings): + cmd.append(k) + cmd.append(v) + + check_call(cmd) + + +def create_pool(service, name, replicas=3, pg_num=None): + """Create a new RADOS pool.""" + if pool_exists(service, name): + log("Ceph pool {} already exists, skipping creation".format(name), + level=WARNING) + return + + if not pg_num: + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pg_num = (len(osds) * 100 // replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pg_num = 200 + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] + check_call(cmd) + + update_pool(service, name, settings={'size': str(replicas)}) + + +def delete_pool(service, name): + """Delete a RADOS pool from ceph.""" + cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, + '--yes-i-really-really-mean-it'] + check_call(cmd) + + +def _keyfile_path(service): + return KEYFILE.format(service) + + +def _keyring_path(service): + return KEYRING.format(service) + + +def create_keyring(service, key): + """Create a new Ceph keyring containing key.""" + keyring = _keyring_path(service) + if os.path.exists(keyring): + log('Ceph keyring exists at %s.' % keyring, level=WARNING) + return + + cmd = ['ceph-authtool', keyring, '--create-keyring', + '--name=client.{}'.format(service), '--add-key={}'.format(key)] + check_call(cmd) + log('Created new ceph keyring at %s.' % keyring, level=DEBUG) + + +def delete_keyring(service): + """Delete an existing Ceph keyring.""" + keyring = _keyring_path(service) + if not os.path.exists(keyring): + log('Keyring does not exist at %s' % keyring, level=WARNING) + return + + os.remove(keyring) + log('Deleted ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + """Create a file containing key.""" + keyfile = _keyfile_path(service) + if os.path.exists(keyfile): + log('Keyfile exists at %s.' % keyfile, level=WARNING) + return + + with open(keyfile, 'w') as fd: + fd.write(key) + + log('Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(relation='ceph'): + """Query named relation to determine current nodes.""" + hosts = [] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + + return hosts + + +def configure(service, key, auth, use_syslog): + """Perform basic configuration of Ceph.""" + create_keyring(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF.format(auth=auth, + keyring=_keyring_path(service), + mon_hosts=",".join(map(str, hosts)), + use_syslog=use_syslog)) + modprobe('rbd') + + +def image_mapped(name): + """Determine whether a RADOS block device is mapped locally.""" + try: + out = check_output(['rbd', 'showmapped']).decode('UTF-8') + except CalledProcessError: + return False + + return name in out + + +def map_block_storage(service, pool, image): + """Map a RADOS block device for local use.""" + cmd = [ + 'rbd', + 'map', + '{}/{}'.format(pool, image), + '--user', + service, + '--secret', + _keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + """Determine whether a filesytems is already mounted.""" + return fs in [f for f, m in mounts()] + + +def make_filesystem(blk_device, fstype='ext4', timeout=10): + """Make a new filesystem on the specified block device.""" + count = 0 + e_noent = os.errno.ENOENT + while not os.path.exists(blk_device): + if count >= timeout: + log('Gave up waiting on block device %s' % blk_device, + level=ERROR) + raise IOError(e_noent, os.strerror(e_noent), blk_device) + + log('Waiting for block device %s to appear' % blk_device, + level=DEBUG) + count += 1 + time.sleep(1) + else: + log('Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + check_call(['mkfs', '-t', fstype, blk_device]) + + +def place_data_on_block_device(blk_device, data_src_dst): + """Migrate data in data_src_dst to blk_device and then remount.""" + # mount block device into /mnt + mount(blk_device, '/mnt') + # copy data to /mnt + copy_files(data_src_dst, '/mnt') + # umount block device + umount('/mnt') + # Grab user/group ID's from original source + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + # re-mount where the data should originally be + # TODO: persist is currently a NO-OP in core.host + mount(blk_device, data_src_dst, persist=True) + # ensure original ownership of new mount. + os.chown(data_src_dst, uid, gid) + + +def copy_files(src, dst, symlinks=False, ignore=None): + """Copy files from src to dst.""" + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, + blk_device, fstype, system_services=[], + replicas=3): + """NOTE: This function must only be called from a single service unit for + the same rbd_img otherwise data loss will occur. + + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being re-mounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('Creating new pool {}.'.format(pool), level=INFO) + create_pool(service, pool, replicas=replicas) + + if not rbd_exists(service, pool, rbd_img): + log('Creating RBD image ({}).'.format(rbd_img), level=INFO) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), + level=INFO) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if service_running(svc): + log('Stopping services {} prior to migrating data.' + .format(svc), level=DEBUG) + service_stop(svc) + + place_data_on_block_device(blk_device, mount_point) + + for svc in system_services: + log('Starting service {} after migrating data.' + .format(svc), level=DEBUG) + service_start(svc) + + +def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): + """Ensures a ceph keyring is created for a named service and optionally + ensures user and group ownership. + + Returns False if no ceph key is available in relation state. + """ + key = None + for rid in relation_ids(relation): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break + + if not key: + return False + + create_keyring(service=service, key=key) + keyring = _keyring_path(service) + if user and group: + check_call(['chown', '%s.%s' % (user, group), keyring]) + + return True + + +def ceph_version(): + """Retrieve the local version of ceph.""" + if os.path.exists('/usr/bin/ceph'): + cmd = ['ceph', '-v'] + output = check_output(cmd).decode('US-ASCII') + output = output.split() + if len(output) > 3: + return output[2] + else: + return None + else: + return None + + +class CephBrokerRq(object): + """Ceph broker request. + + Multiple operations can be added to a request and sent to the Ceph broker + to be executed. + + Request is json-encoded for sending over the wire. + + The API is versioned and defaults to version 1. + """ + + def __init__(self, api_version=1, request_id=None): + self.api_version = api_version + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) + self.ops = [] + + def add_op_create_pool(self, name, replica_count=3, pg_num=None): + """Adds an operation to create a pool. + + @param pg_num setting: optional setting. If not provided, this value + will be calculated by the broker based on how many OSDs are in the + cluster at the time of creation. Note that, if provided, this value + will be capped at the current available maximum. + """ + self.ops.append({'op': 'create-pool', 'name': name, + 'replicas': replica_count, 'pg_num': pg_num}) + + def set_ops(self, ops): + """Set request ops to provided value. + + Useful for injecting ops that come from a previous request + to allow comparisons to ensure validity. + """ + self.ops = ops + + @property + def request(self): + return json.dumps({'api-version': self.api_version, 'ops': self.ops, + 'request-id': self.request_id}) + + def _ops_equal(self, other): + if len(self.ops) == len(other.ops): + for req_no in range(0, len(self.ops)): + for key in ['replicas', 'name', 'op', 'pg_num']: + if self.ops[req_no].get(key) != other.ops[req_no].get(key): + return False + else: + return False + return True + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + if self.api_version == other.api_version and \ + self._ops_equal(other): + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + + +class CephBrokerRsp(object): + """Ceph broker response. + + Response is json-decoded and contents provided as methods/properties. + + The API is versioned and defaults to version 1. + """ + + def __init__(self, encoded_rsp): + self.api_version = None + self.rsp = json.loads(encoded_rsp) + + @property + def request_id(self): + return self.rsp.get('request-id') + + @property + def exit_code(self): + return self.rsp.get('exit-code') + + @property + def exit_msg(self): + return self.rsp.get('stderr') + + +# Ceph Broker Conversation: +# If a charm needs an action to be taken by ceph it can create a CephBrokerRq +# and send that request to ceph via the ceph relation. The CephBrokerRq has a +# unique id so that the client can identity which CephBrokerRsp is associated +# with the request. Ceph will also respond to each client unit individually +# creating a response key per client unit eg glance/0 will get a CephBrokerRsp +# via key broker-rsp-glance-0 +# +# To use this the charm can just do something like: +# +# from charmhelpers.contrib.storage.linux.ceph import ( +# send_request_if_needed, +# is_request_complete, +# CephBrokerRq, +# ) +# +# @hooks.hook('ceph-relation-changed') +# def ceph_changed(): +# rq = CephBrokerRq() +# rq.add_op_create_pool(name='poolname', replica_count=3) +# +# if is_request_complete(rq): +# +# else: +# send_request_if_needed(get_ceph_request()) +# +# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example +# of glance having sent a request to ceph which ceph has successfully processed +# 'ceph:8': { +# 'ceph/0': { +# 'auth': 'cephx', +# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', +# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', +# 'ceph-public-address': '10.5.44.103', +# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', +# 'private-address': '10.5.44.103', +# }, +# 'glance/0': { +# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' +# '"ops": [{"replicas": 3, "name": "glance", ' +# '"op": "create-pool"}]}'), +# 'private-address': '10.5.44.109', +# }, +# } + +def get_previous_request(rid): + """Return the last ceph broker request sent on a given relation + + @param rid: Relation id to query for request + """ + request = None + broker_req = relation_get(attribute='broker_req', rid=rid, + unit=local_unit()) + if broker_req: + request_data = json.loads(broker_req) + request = CephBrokerRq(api_version=request_data['api-version'], + request_id=request_data['request-id']) + request.set_ops(request_data['ops']) + + return request + + +def get_request_states(request, relation='ceph'): + """Return a dict of requests per relation id with their corresponding + completion state. + + This allows a charm, which has a request for ceph, to see whether there is + an equivalent request already being processed and if so what state that + request is in. + + @param request: A CephBrokerRq object + """ + complete = [] + requests = {} + for rid in relation_ids(relation): + complete = False + previous_request = get_previous_request(rid) + if request == previous_request: + sent = True + complete = is_request_complete_for_rid(previous_request, rid) + else: + sent = False + complete = False + + requests[rid] = { + 'sent': sent, + 'complete': complete, + } + + return requests + + +def is_request_sent(request, relation='ceph'): + """Check to see if a functionally equivalent request has already been sent + + Returns True if a similair request has been sent + + @param request: A CephBrokerRq object + """ + states = get_request_states(request, relation=relation) + for rid in states.keys(): + if not states[rid]['sent']: + return False + + return True + + +def is_request_complete(request, relation='ceph'): + """Check to see if a functionally equivalent request has already been + completed + + Returns True if a similair request has been completed + + @param request: A CephBrokerRq object + """ + states = get_request_states(request, relation=relation) + for rid in states.keys(): + if not states[rid]['complete']: + return False + + return True + + +def is_request_complete_for_rid(request, rid): + """Check if a given request has been completed on the given relation + + @param request: A CephBrokerRq object + @param rid: Relation ID + """ + broker_key = get_broker_rsp_key() + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if rdata.get(broker_key): + rsp = CephBrokerRsp(rdata.get(broker_key)) + if rsp.request_id == request.request_id: + if not rsp.exit_code: + return True + else: + # The remote unit sent no reply targeted at this unit so either the + # remote ceph cluster does not support unit targeted replies or it + # has not processed our request yet. + if rdata.get('broker_rsp'): + request_data = json.loads(rdata['broker_rsp']) + if request_data.get('request-id'): + log('Ignoring legacy broker_rsp without unit key as remote ' + 'service supports unit specific replies', level=DEBUG) + else: + log('Using legacy broker_rsp as remote service does not ' + 'supports unit specific replies', level=DEBUG) + rsp = CephBrokerRsp(rdata['broker_rsp']) + if not rsp.exit_code: + return True + + return False + + +def get_broker_rsp_key(): + """Return broker response key for this unit + + This is the key that ceph is going to use to pass request status + information back to this unit + """ + return 'broker-rsp-' + local_unit().replace('/', '-') + + +def send_request_if_needed(request, relation='ceph'): + """Send broker request if an equivalent request has not already been sent + + @param request: A CephBrokerRq object + """ + if is_request_sent(request, relation=relation): + log('Request already sent but not complete, not sending new request', + level=DEBUG) + else: + for rid in relation_ids(relation): + log('Sending request {}'.format(request.request_id), level=DEBUG) + relation_set(relation_id=rid, broker_req=request.request) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 66da0aca..7fec00e5 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -33,6 +33,8 @@ cluster addr = {{ cluster_addr }} osd crush location = {{crush_location}} {% endif %} +[client.osd-upgrade] +keyring = /var/lib/ceph/osd/ceph.client.osd-upgrade.keyring [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 04d8c4d9..630452a5 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -43,8 +43,8 @@ def _add_services(self): and the rest of the service are from lp branches that are compatible with the local charm (e.g. stable or next). """ - this_service = {'name': 'ceph-osd'} - other_services = [{'name': 'ceph', 'units': 3}, + this_service = {'name': 'ceph-osd', 'units': 3} + other_services = [{'name': 'ceph-mon', 'units': 3}, {'name': 'mysql'}, {'name': 'keystone'}, {'name': 'rabbitmq-server'}, @@ -60,18 +60,18 @@ def _add_relations(self): 'nova-compute:shared-db': 'mysql:shared-db', 'nova-compute:amqp': 'rabbitmq-server:amqp', 'nova-compute:image-service': 'glance:image-service', - 'nova-compute:ceph': 'ceph:client', + 'nova-compute:ceph': 'ceph-mon:client', 'keystone:shared-db': 'mysql:shared-db', 'glance:shared-db': 'mysql:shared-db', 'glance:identity-service': 'keystone:identity-service', 'glance:amqp': 'rabbitmq-server:amqp', - 'glance:ceph': 'ceph:client', + 'glance:ceph': 'ceph-mon:client', 'cinder:shared-db': 'mysql:shared-db', 'cinder:identity-service': 'keystone:identity-service', 'cinder:amqp': 'rabbitmq-server:amqp', 'cinder:image-service': 'glance:image-service', - 'cinder:ceph': 'ceph:client', - 'ceph-osd:mon': 'ceph:osd' + 'cinder:ceph': 'ceph-mon:client', + 'ceph-osd:mon': 'ceph-mon:osd' } super(CephOsdBasicDeployment, self)._add_relations(relations) @@ -86,9 +86,6 @@ def _configure_services(self): 'auth-supported': 'none', 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', - 'osd-reformat': 'yes', - 'ephemeral-unmount': '/mnt', - 'osd-devices': '/dev/vdb /srv/ceph' } # Include a non-existent device as osd-devices is a whitelist, @@ -102,7 +99,7 @@ def _configure_services(self): configs = {'keystone': keystone_config, 'mysql': mysql_config, 'cinder': cinder_config, - 'ceph': ceph_config, + 'ceph-mon': ceph_config, 'ceph-osd': ceph_osd_config} super(CephOsdBasicDeployment, self)._configure_services(configs) @@ -115,10 +112,12 @@ def _initialize_tests(self): self.nova_sentry = self.d.sentry.unit['nova-compute/0'] self.glance_sentry = self.d.sentry.unit['glance/0'] self.cinder_sentry = self.d.sentry.unit['cinder/0'] - self.ceph0_sentry = self.d.sentry.unit['ceph/0'] - self.ceph1_sentry = self.d.sentry.unit['ceph/1'] - self.ceph2_sentry = self.d.sentry.unit['ceph/2'] + self.ceph0_sentry = self.d.sentry.unit['ceph-mon/0'] + self.ceph1_sentry = self.d.sentry.unit['ceph-mon/1'] + self.ceph2_sentry = self.d.sentry.unit['ceph-mon/2'] self.ceph_osd_sentry = self.d.sentry.unit['ceph-osd/0'] + self.ceph_osd1_sentry = self.d.sentry.unit['ceph-osd/1'] + self.ceph_osd2_sentry = self.d.sentry.unit['ceph-osd/2'] u.log.debug('openstack release val: {}'.format( self._get_openstack_release())) u.log.debug('openstack release str: {}'.format( @@ -177,7 +176,6 @@ def test_100_ceph_processes(self): # Process name and quantity of processes to expect on each unit ceph_processes = { 'ceph-mon': 1, - 'ceph-osd': 2 } # Units with process names and PID quantities expected @@ -214,9 +212,6 @@ def test_102_services(self): ceph_services = [ 'ceph-mon-all', 'ceph-mon id=`hostname`', - 'ceph-osd-all', - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) ] services[self.ceph0_sentry] = ceph_services services[self.ceph1_sentry] = ceph_services @@ -233,16 +228,16 @@ def test_102_services(self): def test_200_ceph_osd_ceph_relation(self): """Verify the ceph-osd to ceph relation data.""" - u.log.debug('Checking ceph-osd:ceph mon relation data...') + u.log.debug('Checking ceph-osd:ceph-mon relation data...') unit = self.ceph_osd_sentry - relation = ['mon', 'ceph:osd'] + relation = ['mon', 'ceph-mon:osd'] expected = { 'private-address': u.valid_ip } ret = u.validate_relation_data(unit, relation, expected) if ret: - message = u.relation_error('ceph-osd to ceph', ret) + message = u.relation_error('ceph-osd to ceph-mon', ret) amulet.raise_status(amulet.FAIL, msg=message) def test_201_ceph0_to_ceph_osd_relation(self): diff --git a/ceph-osd/unit_tests/test_upgrade_roll.py b/ceph-osd/unit_tests/test_upgrade_roll.py new file mode 100644 index 00000000..840e247c --- /dev/null +++ b/ceph-osd/unit_tests/test_upgrade_roll.py @@ -0,0 +1,157 @@ +import time + +__author__ = 'chris' +from mock import patch, call, MagicMock +import sys + +sys.path.append('/home/chris/repos/ceph-osd/hooks') + +from ceph import CrushLocation + +import test_utils +import ceph_hooks + +TO_PATCH = [ + 'apt_install', + 'apt_update', + 'add_source', + 'config', + 'ceph', + 'get_conf', + 'hookenv', + 'host', + 'log', + 'service_start', + 'service_stop', + 'socket', + 'status_set', +] + + +def config_side_effect(*args): + if args[0] == 'source': + return 'cloud:trusty-kilo' + elif args[0] == 'key': + return 'key' + elif args[0] == 'release-version': + return 'cloud:trusty-kilo' + + +previous_node_start_time = time.time() - (9 * 60) + + +def monitor_key_side_effect(*args): + if args[1] == \ + 'ip-192-168-1-2_done': + return False + elif args[1] == \ + 'ip-192-168-1-2_start': + # Return that the previous node started 9 minutes ago + return previous_node_start_time + + +class UpgradeRollingTestCase(test_utils.CharmTestCase): + def setUp(self): + super(UpgradeRollingTestCase, self).setUp(ceph_hooks, TO_PATCH) + + @patch('ceph_hooks.roll_osd_cluster') + def test_check_for_upgrade(self, roll_osd_cluster): + self.host.lsb_release.return_value = { + 'DISTRIB_CODENAME': 'trusty', + } + previous_mock = MagicMock().return_value + previous_mock.previous.return_value = "cloud:trusty-juno" + self.hookenv.config.side_effect = [previous_mock, + config_side_effect('source')] + ceph_hooks.check_for_upgrade() + + roll_osd_cluster.assert_called_with('cloud:trusty-kilo') + + @patch('ceph_hooks.upgrade_osd') + @patch('ceph_hooks.monitor_key_set') + def test_lock_and_roll(self, monitor_key_set, upgrade_osd): + monitor_key_set.monitor_key_set.return_value = None + ceph_hooks.lock_and_roll(my_name='ip-192-168-1-2') + upgrade_osd.assert_called_once_with() + + def test_upgrade_osd(self): + self.config.side_effect = config_side_effect + self.ceph.get_version.return_value = "0.80" + self.ceph.systemd.return_value = False + ceph_hooks.upgrade_osd() + self.service_stop.assert_called_with('ceph-osd-all') + self.service_start.assert_called_with('ceph-osd-all') + self.status_set.assert_has_calls([ + call('maintenance', 'Upgrading osd'), + ]) + + @patch('ceph_hooks.lock_and_roll') + @patch('ceph_hooks.get_upgrade_position') + def test_roll_osd_cluster_first(self, + get_upgrade_position, + lock_and_roll): + self.socket.gethostname.return_value = "ip-192-168-1-2" + self.ceph.get_osd_tree.return_value = "" + get_upgrade_position.return_value = 0 + ceph_hooks.roll_osd_cluster('0.94.1') + lock_and_roll.assert_called_with(my_name="ip-192-168-1-2") + + @patch('ceph_hooks.lock_and_roll') + @patch('ceph_hooks.get_upgrade_position') + @patch('ceph_hooks.wait_on_previous_node') + def test_roll_osd_cluster_second(self, + wait_on_previous_node, + get_upgrade_position, + lock_and_roll): + wait_on_previous_node.return_value = None + self.socket.gethostname.return_value = "ip-192-168-1-3" + self.ceph.get_osd_tree.return_value = [ + CrushLocation( + name="ip-192-168-1-2", + identifier='a', + host='host-a', + rack='rack-a', + row='row-a', + datacenter='dc-1', + chassis='chassis-a', + root='ceph'), + CrushLocation( + name="ip-192-168-1-3", + identifier='a', + host='host-b', + rack='rack-a', + row='row-a', + datacenter='dc-1', + chassis='chassis-a', + root='ceph') + ] + get_upgrade_position.return_value = 1 + ceph_hooks.roll_osd_cluster('0.94.1') + self.status_set.assert_called_with( + 'blocked', + 'Waiting on ip-192-168-1-2 to finish upgrading') + lock_and_roll.assert_called_with(my_name="ip-192-168-1-3") + + @patch('ceph_hooks.monitor_key_get') + @patch('ceph_hooks.monitor_key_exists') + def test_wait_on_previous_node(self, + monitor_key_exists, + monitor_key_get): + monitor_key_get.side_effect = monitor_key_side_effect + monitor_key_exists.return_value = False + + ceph_hooks.wait_on_previous_node("ip-192-168-1-2") + + # Make sure we checked to see if the previous node started + monitor_key_get.assert_has_calls( + [call('osd-upgrade', 'ip-192-168-1-2_start')] + ) + # Make sure we checked to see if the previous node was finished + monitor_key_exists.assert_has_calls( + [call('osd-upgrade', 'ip-192-168-1-2_done')] + ) + # Make sure we waited at last once before proceeding + self.log.assert_has_calls( + [call('Previous node is: ip-192-168-1-2')], + [call('ip-192-168-1-2 is not finished. Waiting')], + ) From 7f5a74b6eae3232e157a082ed958ce8f2b9c01f5 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 17 Mar 2016 08:21:03 -0400 Subject: [PATCH 1089/2699] Add support for availability zones to the crush map If the deployemnt environment supports it, and the user would like to, we can use Juju's Availability Zone information to setup the Ceph cluster to use Availability Zones as the failure domain instead of the host. Change-Id: I4566696750b388918761ded0ed5beb0bf82ff501 Depends-On: Ie25ac1b001db558d6a40fe3eaca014e8f4174241 --- ceph-proxy/config.yaml | 6 ++++++ ceph-proxy/hooks/ceph_hooks.py | 27 +++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 30abb8a6..57dd1913 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -104,6 +104,12 @@ options: threads-max to a high value to avoid problems with large numbers (>20) of OSDs recovering. very large clusters should set those values even higher (e.g. max for kernel.pid_max is 4194303). + customize-failure-domain: + type: boolean + default: false + description: | + Setting this to true will tell Ceph to replicate across Juju's + Availability Zone instead of specifically by host. nagios_context: type: string default: "juju" diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 354c155c..4ee5907c 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -417,6 +417,33 @@ def mon_relation(): ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) ceph.wait_for_bootstrap() ceph.wait_for_quorum() + # If we can and want to + if is_leader() and config('customize-failure-domain'): + # But only if the environment supports it + if os.environ.get('JUJU_AVAILABILITY_ZONE'): + cmds = [ + "ceph osd getcrushmap -o /tmp/crush.map", + "crushtool -d /tmp/crush.map| " + "sed 's/step chooseleaf firstn 0 type host/step " + "chooseleaf firstn 0 type rack/' > " + "/tmp/crush.decompiled", + "crushtool -c /tmp/crush.decompiled -o /tmp/crush.map", + "crushtool -i /tmp/crush.map --test", + "ceph osd setcrushmap -i /tmp/crush.map" + ] + for cmd in cmds: + try: + subprocess.check_call(cmd, shell=True) + except subprocess.CalledProcessError as e: + log("Failed to modify crush map:", level='error') + log("Cmd: {}".format(cmd), level='error') + log("Error: {}".format(e.output), level='error') + break + else: + log( + "Your Juju environment doesn't" + "have support for Availability Zones" + ) notify_osds() notify_radosgws() notify_client() From a1d33b5dadbf18c00487672d685fb86a27de8d9d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 17 Mar 2016 08:21:03 -0400 Subject: [PATCH 1090/2699] Add support for availability zones to the crush map If the deployemnt environment supports it, and the user would like to, we can use Juju's Availability Zone information to setup the Ceph cluster to use Availability Zones as the failure domain instead of the host. Change-Id: I4566696750b388918761ded0ed5beb0bf82ff501 Depends-On: Ie25ac1b001db558d6a40fe3eaca014e8f4174241 --- ceph-mon/config.yaml | 6 ++++++ ceph-mon/hooks/ceph_hooks.py | 27 +++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 30abb8a6..57dd1913 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -104,6 +104,12 @@ options: threads-max to a high value to avoid problems with large numbers (>20) of OSDs recovering. very large clusters should set those values even higher (e.g. max for kernel.pid_max is 4194303). + customize-failure-domain: + type: boolean + default: false + description: | + Setting this to true will tell Ceph to replicate across Juju's + Availability Zone instead of specifically by host. nagios_context: type: string default: "juju" diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 354c155c..4ee5907c 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -417,6 +417,33 @@ def mon_relation(): ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) ceph.wait_for_bootstrap() ceph.wait_for_quorum() + # If we can and want to + if is_leader() and config('customize-failure-domain'): + # But only if the environment supports it + if os.environ.get('JUJU_AVAILABILITY_ZONE'): + cmds = [ + "ceph osd getcrushmap -o /tmp/crush.map", + "crushtool -d /tmp/crush.map| " + "sed 's/step chooseleaf firstn 0 type host/step " + "chooseleaf firstn 0 type rack/' > " + "/tmp/crush.decompiled", + "crushtool -c /tmp/crush.decompiled -o /tmp/crush.map", + "crushtool -i /tmp/crush.map --test", + "ceph osd setcrushmap -i /tmp/crush.map" + ] + for cmd in cmds: + try: + subprocess.check_call(cmd, shell=True) + except subprocess.CalledProcessError as e: + log("Failed to modify crush map:", level='error') + log("Cmd: {}".format(cmd), level='error') + log("Error: {}".format(e.output), level='error') + break + else: + log( + "Your Juju environment doesn't" + "have support for Availability Zones" + ) notify_osds() notify_radosgws() notify_client() From bdd9b7e9b2da7a4d880f3ea3fffb43ad5fca37d5 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 7 Apr 2016 15:03:51 +0100 Subject: [PATCH 1091/2699] Check for Keystone apache2 process for liberty+ The keystone charm recently changed to run keystone as a wsgi process under Apache2; refactor amulet test to ensure that apache2 is checked instead of keystone for >= liberty. Change-Id: Ie61cc1ecae4b55e6e12b249b7765b01644126c7c --- ceph-proxy/tests/basic_deployment.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 63ddca40..c890f210 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -224,6 +224,9 @@ def test_102_services(self): services[self.ceph_osd_sentry] = ceph_osd_services + if self._get_openstack_release() >= self.trusty_liberty: + services[self.keystone_sentry] = ['apache2'] + ret = u.validate_services_by_name(services) if ret: amulet.raise_status(amulet.FAIL, msg=ret) From c6822697534a34fb5dea991d8e77654d555f4ffd Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 7 Apr 2016 15:03:51 +0100 Subject: [PATCH 1092/2699] Check for Keystone apache2 process for liberty+ The keystone charm recently changed to run keystone as a wsgi process under Apache2; refactor amulet test to ensure that apache2 is checked instead of keystone for >= liberty. Change-Id: Ie61cc1ecae4b55e6e12b249b7765b01644126c7c --- ceph-mon/tests/basic_deployment.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 63ddca40..c890f210 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -224,6 +224,9 @@ def test_102_services(self): services[self.ceph_osd_sentry] = ceph_osd_services + if self._get_openstack_release() >= self.trusty_liberty: + services[self.keystone_sentry] = ['apache2'] + ret = u.validate_services_by_name(services) if ret: amulet.raise_status(amulet.FAIL, msg=ret) From 0b1159a91232f5e5cb8bdbb85d83a8dd264c7a46 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 7 Apr 2016 15:08:32 +0100 Subject: [PATCH 1093/2699] Check for Keystone apache2 process for liberty+ The keystone charm recently changed to run keystone as a wsgi process under Apache2; refactor amulet test to ensure that apache2 is checked instead of keystone for >= liberty. Change-Id: Ide7c6e6349b80662677c6d9f3ef3e84b09b18b9b --- ceph-osd/tests/basic_deployment.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 630452a5..87e236cb 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -222,6 +222,9 @@ def test_102_services(self): 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) ] + if self._get_openstack_release() >= self.trusty_liberty: + services[self.keystone_sentry] = ['apache2'] + ret = u.validate_services_by_name(services) if ret: amulet.raise_status(amulet.FAIL, msg=ret) From fe3b9cfd5cb3812eff82da867a2a356e66bec526 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 7 Apr 2016 11:44:46 +0100 Subject: [PATCH 1094/2699] Add support for Juju network spaces Juju 2.0 provides support for network spaces, allowing charm authors to support direct binding of relations and extra-bindings onto underlying network spaces. Add public and cluster extra bindings to this charm to support separation of client facing and cluster network traffic using Juju network spaces. Existing network configuration options will still be preferred over any Juju provided network bindings, ensuring that upgrades to existing deployments don't break. Change-Id: If4ce1ef545638130cb7e5f0d77b949d9b2e28090 --- ceph-proxy/.project | 2 +- ceph-proxy/.pydevproject | 5 +- ceph-proxy/README.md | 23 +++++++++ ceph-proxy/hooks/ceph_hooks.py | 4 ++ ceph-proxy/hooks/utils.py | 32 ++++++++++-- ceph-proxy/metadata.yaml | 3 ++ ceph-proxy/unit_tests/test_ceph_networking.py | 51 +++++++++++++++++++ 7 files changed, 114 insertions(+), 6 deletions(-) create mode 100644 ceph-proxy/unit_tests/test_ceph_networking.py diff --git a/ceph-proxy/.project b/ceph-proxy/.project index be5d4201..17434fc2 100644 --- a/ceph-proxy/.project +++ b/ceph-proxy/.project @@ -1,6 +1,6 @@ - ceph + ceph-mon diff --git a/ceph-proxy/.pydevproject b/ceph-proxy/.pydevproject index 998e0aa1..683d89d8 100644 --- a/ceph-proxy/.pydevproject +++ b/ceph-proxy/.pydevproject @@ -3,6 +3,9 @@ python 2.7 Default -/ceph/hooks +/ceph-mon/hooks +/ceph-mon/unit_tests +/ceph-mon/tests +/ceph-mon/actions diff --git a/ceph-proxy/README.md b/ceph-proxy/README.md index a66ca060..5d66b597 100644 --- a/ceph-proxy/README.md +++ b/ceph-proxy/README.md @@ -54,6 +54,29 @@ You can use the Ceph OSD and Ceph Radosgw charms: - [Ceph OSD](https://jujucharms.com/precise/ceph-osd) - [Ceph Rados Gateway](https://jujucharms.com/precise/ceph-radosgw) +## Network Space support + +This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above. + +Network traffic can be bound to specific network spaces using the public (front-side) and cluster (back-side) bindings: + + juju deploy ceph-mon --bind "public=data-space cluster=cluster-space" + +alternatively these can also be provided as part of a Juju native bundle configuration: + + ceph-mon: + charm: cs:xenial/ceph-mon + num_units: 1 + bindings: + public: data-space + cluster: cluster-space + +Please refer to the [Ceph Network Reference](http://docs.ceph.com/docs/master/rados/configuration/network-config-ref) for details on how using these options effects network traffic within a Ceph deployment. + +**NOTE:** Spaces must be configured in the underlying provider prior to attempting to use them. + +**NOTE**: Existing deployments using ceph-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. + # Contact Information ## Authors diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 0f168ead..3769f524 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -66,6 +66,7 @@ from utils import ( get_networks, get_public_addr, + get_cluster_addr, assert_charm_supports_ipv6 ) from ceph_broker import ( @@ -305,6 +306,9 @@ def emit_cephconf(): cephcontext['public_addr'] = dynamic_ipv6_address if not cluster_network: cephcontext['cluster_addr'] = dynamic_ipv6_address + else: + cephcontext['public_addr'] = get_public_addr() + cephcontext['cluster_addr'] = get_cluster_addr() # Install ceph.conf as an alternative to support # co-existence with other charms that write this file diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 9b42159a..b61912a9 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -14,6 +14,8 @@ cached, config, status_set, + network_get_primary_address, + log, DEBUG, ) from charmhelpers.fetch import ( apt_install, @@ -72,6 +74,32 @@ def get_host_ip(hostname=None): return answers[0].address +@cached +def get_public_addr(): + if config('ceph-public-network'): + return get_network_addrs('ceph-public-network')[0] + + try: + return network_get_primary_address('public') + except NotImplementedError: + log("network-get not supported", DEBUG) + + return get_host_ip() + + +@cached +def get_cluster_addr(): + if config('ceph-cluster-network'): + return get_network_addrs('ceph-cluster-network')[0] + + try: + return network_get_primary_address('cluster') + except NotImplementedError: + log("network-get not supported", DEBUG) + + return get_host_ip() + + def get_networks(config_opt='ceph-public-network'): """Get all configured networks from provided config option. @@ -86,10 +114,6 @@ def get_networks(config_opt='ceph-public-network'): return [] -def get_public_addr(): - return get_network_addrs('ceph-public-network')[0] - - def get_network_addrs(config_opt): """Get all configured public networks addresses. diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 238ef07b..9c3969dd 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -12,6 +12,9 @@ tags: peers: mon: interface: ceph +extra-bindings: + public: + cluster: provides: nrpe-external-master: interface: nrpe-external-master diff --git a/ceph-proxy/unit_tests/test_ceph_networking.py b/ceph-proxy/unit_tests/test_ceph_networking.py new file mode 100644 index 00000000..ae3a7ff5 --- /dev/null +++ b/ceph-proxy/unit_tests/test_ceph_networking.py @@ -0,0 +1,51 @@ +import test_utils +import charmhelpers.core.hookenv as hookenv +import utils as ceph_utils + +TO_PATCH_SPACES = [ + 'network_get_primary_address', + 'log', + 'get_host_ip', + 'config', + 'get_network_addrs', + 'cached', +] + + +class CephNetworkSpaceTestCase(test_utils.CharmTestCase): + def setUp(self): + super(CephNetworkSpaceTestCase, self).setUp(ceph_utils, + TO_PATCH_SPACES) + self.config.side_effect = self.test_config.get + + def tearDown(self): + # Reset @cached cache + hookenv.cache = {} + + def test_no_network_space_support(self): + self.get_host_ip.return_value = '192.168.2.1' + self.network_get_primary_address.side_effect = NotImplementedError + self.assertEqual(ceph_utils.get_cluster_addr(), + '192.168.2.1') + self.assertEqual(ceph_utils.get_public_addr(), + '192.168.2.1') + + def test_public_network_space(self): + self.network_get_primary_address.return_value = '10.20.40.2' + self.assertEqual(ceph_utils.get_public_addr(), + '10.20.40.2') + self.network_get_primary_address.assert_called_with('public') + self.config.assert_called_with('ceph-public-network') + + def test_cluster_network_space(self): + self.network_get_primary_address.return_value = '10.20.50.2' + self.assertEqual(ceph_utils.get_cluster_addr(), + '10.20.50.2') + self.network_get_primary_address.assert_called_with('cluster') + self.config.assert_called_with('ceph-cluster-network') + + def test_config_options_in_use(self): + self.get_network_addrs.return_value = ['192.122.20.2'] + self.test_config.set('ceph-cluster-network', '192.122.20.0/24') + self.assertEqual(ceph_utils.get_cluster_addr(), + '192.122.20.2') From 552ca0b44aa761031161a51f1d1af8cfba4bf995 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 7 Apr 2016 11:44:46 +0100 Subject: [PATCH 1095/2699] Add support for Juju network spaces Juju 2.0 provides support for network spaces, allowing charm authors to support direct binding of relations and extra-bindings onto underlying network spaces. Add public and cluster extra bindings to this charm to support separation of client facing and cluster network traffic using Juju network spaces. Existing network configuration options will still be preferred over any Juju provided network bindings, ensuring that upgrades to existing deployments don't break. Change-Id: If4ce1ef545638130cb7e5f0d77b949d9b2e28090 --- ceph-mon/.project | 2 +- ceph-mon/.pydevproject | 5 +- ceph-mon/README.md | 23 ++++++++++ ceph-mon/hooks/ceph_hooks.py | 4 ++ ceph-mon/hooks/utils.py | 32 +++++++++++-- ceph-mon/metadata.yaml | 3 ++ ceph-mon/unit_tests/test_ceph_networking.py | 51 +++++++++++++++++++++ 7 files changed, 114 insertions(+), 6 deletions(-) create mode 100644 ceph-mon/unit_tests/test_ceph_networking.py diff --git a/ceph-mon/.project b/ceph-mon/.project index be5d4201..17434fc2 100644 --- a/ceph-mon/.project +++ b/ceph-mon/.project @@ -1,6 +1,6 @@ - ceph + ceph-mon diff --git a/ceph-mon/.pydevproject b/ceph-mon/.pydevproject index 998e0aa1..683d89d8 100644 --- a/ceph-mon/.pydevproject +++ b/ceph-mon/.pydevproject @@ -3,6 +3,9 @@ python 2.7 Default -/ceph/hooks +/ceph-mon/hooks +/ceph-mon/unit_tests +/ceph-mon/tests +/ceph-mon/actions diff --git a/ceph-mon/README.md b/ceph-mon/README.md index a66ca060..5d66b597 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -54,6 +54,29 @@ You can use the Ceph OSD and Ceph Radosgw charms: - [Ceph OSD](https://jujucharms.com/precise/ceph-osd) - [Ceph Rados Gateway](https://jujucharms.com/precise/ceph-radosgw) +## Network Space support + +This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above. + +Network traffic can be bound to specific network spaces using the public (front-side) and cluster (back-side) bindings: + + juju deploy ceph-mon --bind "public=data-space cluster=cluster-space" + +alternatively these can also be provided as part of a Juju native bundle configuration: + + ceph-mon: + charm: cs:xenial/ceph-mon + num_units: 1 + bindings: + public: data-space + cluster: cluster-space + +Please refer to the [Ceph Network Reference](http://docs.ceph.com/docs/master/rados/configuration/network-config-ref) for details on how using these options effects network traffic within a Ceph deployment. + +**NOTE:** Spaces must be configured in the underlying provider prior to attempting to use them. + +**NOTE**: Existing deployments using ceph-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. + # Contact Information ## Authors diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 0f168ead..3769f524 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -66,6 +66,7 @@ from utils import ( get_networks, get_public_addr, + get_cluster_addr, assert_charm_supports_ipv6 ) from ceph_broker import ( @@ -305,6 +306,9 @@ def emit_cephconf(): cephcontext['public_addr'] = dynamic_ipv6_address if not cluster_network: cephcontext['cluster_addr'] = dynamic_ipv6_address + else: + cephcontext['public_addr'] = get_public_addr() + cephcontext['cluster_addr'] = get_cluster_addr() # Install ceph.conf as an alternative to support # co-existence with other charms that write this file diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 9b42159a..b61912a9 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -14,6 +14,8 @@ cached, config, status_set, + network_get_primary_address, + log, DEBUG, ) from charmhelpers.fetch import ( apt_install, @@ -72,6 +74,32 @@ def get_host_ip(hostname=None): return answers[0].address +@cached +def get_public_addr(): + if config('ceph-public-network'): + return get_network_addrs('ceph-public-network')[0] + + try: + return network_get_primary_address('public') + except NotImplementedError: + log("network-get not supported", DEBUG) + + return get_host_ip() + + +@cached +def get_cluster_addr(): + if config('ceph-cluster-network'): + return get_network_addrs('ceph-cluster-network')[0] + + try: + return network_get_primary_address('cluster') + except NotImplementedError: + log("network-get not supported", DEBUG) + + return get_host_ip() + + def get_networks(config_opt='ceph-public-network'): """Get all configured networks from provided config option. @@ -86,10 +114,6 @@ def get_networks(config_opt='ceph-public-network'): return [] -def get_public_addr(): - return get_network_addrs('ceph-public-network')[0] - - def get_network_addrs(config_opt): """Get all configured public networks addresses. diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 238ef07b..9c3969dd 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -12,6 +12,9 @@ tags: peers: mon: interface: ceph +extra-bindings: + public: + cluster: provides: nrpe-external-master: interface: nrpe-external-master diff --git a/ceph-mon/unit_tests/test_ceph_networking.py b/ceph-mon/unit_tests/test_ceph_networking.py new file mode 100644 index 00000000..ae3a7ff5 --- /dev/null +++ b/ceph-mon/unit_tests/test_ceph_networking.py @@ -0,0 +1,51 @@ +import test_utils +import charmhelpers.core.hookenv as hookenv +import utils as ceph_utils + +TO_PATCH_SPACES = [ + 'network_get_primary_address', + 'log', + 'get_host_ip', + 'config', + 'get_network_addrs', + 'cached', +] + + +class CephNetworkSpaceTestCase(test_utils.CharmTestCase): + def setUp(self): + super(CephNetworkSpaceTestCase, self).setUp(ceph_utils, + TO_PATCH_SPACES) + self.config.side_effect = self.test_config.get + + def tearDown(self): + # Reset @cached cache + hookenv.cache = {} + + def test_no_network_space_support(self): + self.get_host_ip.return_value = '192.168.2.1' + self.network_get_primary_address.side_effect = NotImplementedError + self.assertEqual(ceph_utils.get_cluster_addr(), + '192.168.2.1') + self.assertEqual(ceph_utils.get_public_addr(), + '192.168.2.1') + + def test_public_network_space(self): + self.network_get_primary_address.return_value = '10.20.40.2' + self.assertEqual(ceph_utils.get_public_addr(), + '10.20.40.2') + self.network_get_primary_address.assert_called_with('public') + self.config.assert_called_with('ceph-public-network') + + def test_cluster_network_space(self): + self.network_get_primary_address.return_value = '10.20.50.2' + self.assertEqual(ceph_utils.get_cluster_addr(), + '10.20.50.2') + self.network_get_primary_address.assert_called_with('cluster') + self.config.assert_called_with('ceph-cluster-network') + + def test_config_options_in_use(self): + self.get_network_addrs.return_value = ['192.122.20.2'] + self.test_config.set('ceph-cluster-network', '192.122.20.0/24') + self.assertEqual(ceph_utils.get_cluster_addr(), + '192.122.20.2') From b04c1089ef9f72b18d2ca3afca3f7ff633bd4699 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Fri, 8 Apr 2016 11:25:18 +0000 Subject: [PATCH 1096/2699] Pause/resume for ceph-osd charm This changeset provides pause and resume actions to the ceph charm. The pause action issues a 'ceph osd out ' for each of the ceph osd ids that are on the unit. The action does not stop the ceph osd processes. Note that if the pause-health action is NOT used on the ceph-mon charm then the cluster will start trying to rebalance the PGs accross the remaining OSDs. If the cluster might reach its 'full ratio' then this will be a breaking action. The charm does NOT check for this eventuality. The resume action issues a 'ceph osd in ' for each of the local ceph osd process on the unit. The charm 'remembers' that a pause action was issued, and if successful, it shows a 'maintenance' workload status as a reminder. Change-Id: I9f53c9c6c4bb737670ffcd542acec0b320cc7f6a --- ceph-osd/actions.yaml | 26 ++++++ ceph-osd/actions/pause | 1 + ceph-osd/actions/pause_resume.py | 74 +++++++++++++++++ ceph-osd/actions/resume | 1 + ceph-osd/hooks/ceph.py | 12 ++- ceph-osd/hooks/ceph_hooks.py | 15 +++- ceph-osd/hooks/utils.py | 41 +++++++++- ceph-osd/tests/basic_deployment.py | 16 ++++ .../unit_tests/test_actions_pause_resume.py | 79 +++++++++++++++++++ 9 files changed, 260 insertions(+), 5 deletions(-) create mode 120000 ceph-osd/actions/pause create mode 100755 ceph-osd/actions/pause_resume.py create mode 120000 ceph-osd/actions/resume create mode 100644 ceph-osd/unit_tests/test_actions_pause_resume.py diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index be586451..65ff76ab 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -1,3 +1,29 @@ +pause: + description: | + CAUTION - Set the local osd units in the charm to 'out' but does not stop + the osds. Unless the osd cluster is set to noout (see below), this removes + them from the ceph cluster and forces ceph to migrate the PGs to other OSDs + in the cluster. See the following. + + http://docs.ceph.com/docs/master/rados/operations/add-or-rm-osds/#removing-the-osd + "Do not let your cluster reach its full ratio when removing an OSD. + Removing OSDs could cause the cluster to reach or exceed its full ratio." + Also note that for small clusters you may encounter the corner case where + some PGs remain stuck in the active+remapped state. Refer to the above link + on how to resolve this. + + pause-health (on a ceph-mon) unit can be used before pausing a ceph-osd + unit to stop the cluster rebalancing the data off this ceph-osd unit. + pause-health sets 'noout' on the cluster such that it will not try to + rebalance the data accross the remaining units. + + It is up to the user of the charm to determine whether pause-health should + be used as it depends on whether the osd is being paused for maintenance or + to remove it from the cluster completely. +resume: + description: | + Set the local osd units in the charm to 'in'. Note that the pause option + does NOT stop the osd processes. replace-osd: description: Replace a failed osd with a fresh disk params: diff --git a/ceph-osd/actions/pause b/ceph-osd/actions/pause new file mode 120000 index 00000000..bd4c0e00 --- /dev/null +++ b/ceph-osd/actions/pause @@ -0,0 +1 @@ +pause_resume.py \ No newline at end of file diff --git a/ceph-osd/actions/pause_resume.py b/ceph-osd/actions/pause_resume.py new file mode 100755 index 00000000..68149f34 --- /dev/null +++ b/ceph-osd/actions/pause_resume.py @@ -0,0 +1,74 @@ +#!/usr/bin/python +# pause/resume actions file. + +import os +import sys +from subprocess import check_call + +sys.path.append('hooks') + +from charmhelpers.core.hookenv import ( + action_fail, +) + +from ceph import get_local_osd_ids +from ceph_hooks import assess_status + +from utils import ( + set_unit_paused, + clear_unit_paused, +) + + +def pause(args): + """Pause the ceph-osd units on the local machine only. + + Optionally uses the 'osd-number' from juju action param to only pause a + specific osd. If all the osds are not stopped then the paused status is + not set. + + @raises CalledProcessError if the ceph commands fails. + @raises OSError if it can't get the local osd ids. + """ + for local_id in get_local_osd_ids(): + cmd = ['ceph', 'osd', 'out', str(local_id)] + check_call(cmd) + set_unit_paused() + assess_status() + + +def resume(args): + """Resume the ceph-osd units on this local machine only + + @raises subprocess.CalledProcessError should the osd units fails to resume. + @raises OSError if the unit can't get the local osd ids + """ + for local_id in get_local_osd_ids(): + cmd = ['ceph', 'osd', 'in', str(local_id)] + check_call(cmd) + clear_unit_paused() + assess_status() + + +# A dictionary of all the defined actions to callables (which take +# parsed arguments). +ACTIONS = {"pause": pause, "resume": resume} + + +def main(args): + action_name = os.path.basename(args[0]) + try: + action = ACTIONS[action_name] + except KeyError: + s = "Action {} undefined".format(action_name) + action_fail(s) + return s + else: + try: + action(args) + except Exception as e: + action_fail("Action {} failed: {}".format(action_name, str(e))) + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/ceph-osd/actions/resume b/ceph-osd/actions/resume new file mode 120000 index 00000000..bd4c0e00 --- /dev/null +++ b/ceph-osd/actions/resume @@ -0,0 +1 @@ +pause_resume.py \ No newline at end of file diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 0b23979b..d51ea400 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -157,12 +157,22 @@ def get_local_osd_ids(): dirs = os.listdir(osd_path) for osd_dir in dirs: osd_id = osd_dir.split('-')[1] - osd_ids.append(osd_id) + if _is_int(osd_id): + osd_ids.append(osd_id) except OSError: raise return osd_ids +def _is_int(v): + """Return True if the object v can be turned into an integer.""" + try: + int(v) + return True + except ValueError: + return False + + def get_version(): '''Derive Ceph release from an installed package.''' import apt_pkg as apt diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index e508e0e6..5ba176cf 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -36,7 +36,9 @@ umount, mkdir, cmp_pkgrevno, - service_stop, service_start) + service_stop, + service_start +) from charmhelpers.fetch import ( add_source, apt_install, @@ -50,7 +52,9 @@ get_host_ip, get_networks, assert_charm_supports_ipv6, - render_template) + render_template, + is_unit_paused_set, +) from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( @@ -506,7 +510,12 @@ def update_nrpe_config(): def assess_status(): - '''Assess status of current unit''' + """Assess status of current unit""" + # check to see if the unit is paused. + if is_unit_paused_set(): + status_set('maintenance', + "Paused. Use 'resume' action to resume normal service.") + return # Check for mon relation if len(relation_ids('mon')) < 1: status_set('blocked', 'Missing relation: monitor') diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 0071ecbd..b6fa3744 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -12,8 +12,9 @@ from charmhelpers.core.hookenv import ( unit_get, cached, - config + config, ) +from charmhelpers.core import unitdata from charmhelpers.fetch import ( apt_install, filter_installed_packages @@ -106,3 +107,41 @@ def assert_charm_supports_ipv6(): if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") + + +# copied charmhelpers.contrib.openstack.utils so that the charm does need the +# entire set of dependencies that that module actually also has to bring in +# from charmhelpers. +def set_unit_paused(): + """Set the unit to a paused state in the local kv() store. + This does NOT actually pause the unit + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', True) + + +def clear_unit_paused(): + """Clear the unit from a paused state in the local kv() store + This does NOT actually restart any services - it only clears the + local state. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', False) + + +def is_unit_paused_set(): + """Return the state of the kv().get('unit-paused'). + This does NOT verify that the unit really is paused. + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-paused'))) + except: + return False diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 87e236cb..9e522391 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -634,3 +634,19 @@ def test_900_ceph_encryption(self): '(%s < on %s) on %s' % (file_mtime, mtime, unit_name)) amulet.raise_status('Folder mtime is older than provided mtime') + + def test_910_pause_and_resume(self): + """The services can be paused and resumed. """ + u.log.debug('Checking pause and resume actions...') + sentry_unit = self.ceph_osd_sentry + + assert u.status_get(sentry_unit)[0] == "active" + + action_id = u.run_action(sentry_unit, "pause") + assert u.wait_on_action(action_id), "Pause action failed." + assert u.status_get(sentry_unit)[0] == "maintenance" + + action_id = u.run_action(sentry_unit, "resume") + assert u.wait_on_action(action_id), "Resume action failed." + assert u.status_get(sentry_unit)[0] == "active" + u.log.debug('OK') diff --git a/ceph-osd/unit_tests/test_actions_pause_resume.py b/ceph-osd/unit_tests/test_actions_pause_resume.py new file mode 100644 index 00000000..43c3aafc --- /dev/null +++ b/ceph-osd/unit_tests/test_actions_pause_resume.py @@ -0,0 +1,79 @@ +import mock + +import sys + +from test_utils import CharmTestCase + +sys.path.append('hooks') + +import pause_resume as actions + + +class PauseTestCase(CharmTestCase): + + def setUp(self): + super(PauseTestCase, self).setUp( + actions, ["check_call", + "get_local_osd_ids", + "set_unit_paused", + "assess_status"]) + + def test_pauses_services(self): + self.get_local_osd_ids.return_value = [5] + actions.pause([]) + cmd = ['ceph', 'osd', 'out', '5'] + self.check_call.assert_called_once_with(cmd) + self.set_unit_paused.assert_called_once_with() + self.assess_status.assert_called_once_with() + + +class ResumeTestCase(CharmTestCase): + + def setUp(self): + super(ResumeTestCase, self).setUp( + actions, ["check_call", + "get_local_osd_ids", + "clear_unit_paused", + "assess_status"]) + + def test_pauses_services(self): + self.get_local_osd_ids.return_value = [5] + actions.resume([]) + cmd = ['ceph', 'osd', 'in', '5'] + self.check_call.assert_called_once_with(cmd) + self.clear_unit_paused.assert_called_once_with() + self.assess_status.assert_called_once_with() + + +class MainTestCase(CharmTestCase): + + def setUp(self): + super(MainTestCase, self).setUp(actions, ["action_fail"]) + + def test_invokes_action(self): + dummy_calls = [] + + def dummy_action(args): + dummy_calls.append(True) + + with mock.patch.dict(actions.ACTIONS, {"foo": dummy_action}): + actions.main(["foo"]) + self.assertEqual(dummy_calls, [True]) + + def test_unknown_action(self): + """Unknown actions aren't a traceback.""" + exit_string = actions.main(["foo"]) + self.assertEqual("Action foo undefined", exit_string) + + def test_failing_action(self): + """Actions which traceback trigger action_fail() calls.""" + dummy_calls = [] + + self.action_fail.side_effect = dummy_calls.append + + def dummy_action(args): + raise ValueError("uh oh") + + with mock.patch.dict(actions.ACTIONS, {"foo": dummy_action}): + actions.main(["foo"]) + self.assertEqual(dummy_calls, ["Action foo failed: uh oh"]) From 9593cf0a1aa7433cc394b29c6cc1a7feccbace64 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 7 Apr 2016 14:29:15 +0100 Subject: [PATCH 1097/2699] Add support for Juju network spaces Juju 2.0 provides support for network spaces, allowing charm authors to support direct binding of relations and extra-bindings onto underlying network spaces. Add public and cluster extra bindings to this charm to support separation of client facing and cluster network traffic using Juju network spaces. Existing network configuration options will still be preferred over any Juju provided network bindings, ensuring that upgrades to existing deployments don't break. Change-Id: I78ab6993ad5bd324ea52e279c6ca2630f965544c --- ceph-osd/README.md | 27 ++++++++++- ceph-osd/hooks/ceph_hooks.py | 5 ++ ceph-osd/hooks/utils.py | 54 +++++++++++++++++++++ ceph-osd/metadata.yaml | 3 ++ ceph-osd/unit_tests/test_ceph_networking.py | 51 +++++++++++++++++++ 5 files changed, 139 insertions(+), 1 deletion(-) create mode 100644 ceph-osd/unit_tests/test_ceph_networking.py diff --git a/ceph-osd/README.md b/ceph-osd/README.md index 0f3173af..7b599a7d 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -40,9 +40,34 @@ Once the ceph charm has bootstrapped the cluster, it will notify the ceph-osd charm which will scan for the configured storage devices and add them to the pool of available storage. +Network Space support +===================== + +This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above. + +Network traffic can be bound to specific network spaces using the public (front-side) and cluster (back-side) bindings: + + juju deploy ceph-osd --bind "public=data-space cluster=cluster-space" + +alternatively these can also be provided as part of a Juju native bundle configuration: + + ceph-osd: + charm: cs:xenial/ceph-osd + num_units: 1 + bindings: + public: data-space + cluster: cluster-space + +Please refer to the [Ceph Network Reference](http://docs.ceph.com/docs/master/rados/configuration/network-config-ref) for details on how using these options effects network traffic within a Ceph deployment. + +**NOTE:** Spaces must be configured in the underlying provider prior to attempting to use them. + +**NOTE**: Existing deployments using ceph-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. + + Contact Information =================== Author: James Page Report bugs at: http://bugs.launchpad.net/charms/+source/ceph-osd/+filebug -Location: http://jujucharms.com/charms/ceph-osd \ No newline at end of file +Location: http://jujucharms.com/charms/ceph-osd diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 5ba176cf..5c47350b 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -54,6 +54,8 @@ assert_charm_supports_ipv6, render_template, is_unit_paused_set, + get_public_addr, + get_cluster_addr, ) from charmhelpers.contrib.openstack.alternatives import install_alternative @@ -311,6 +313,9 @@ def emit_cephconf(): cephcontext['public_addr'] = dynamic_ipv6_address if not cluster_network: cephcontext['cluster_addr'] = dynamic_ipv6_address + else: + cephcontext['public_addr'] = get_public_addr() + cephcontext['cluster_addr'] = get_cluster_addr() if az_info(): cephcontext['crush_location'] = "root=default rack={} host={}" \ diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index b6fa3744..f4069bef 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -13,6 +13,9 @@ unit_get, cached, config, + network_get_primary_address, + log, DEBUG, + status_set, ) from charmhelpers.core import unitdata from charmhelpers.fetch import ( @@ -88,6 +91,32 @@ def get_host_ip(hostname=None): return answers[0].address +@cached +def get_public_addr(): + if config('ceph-public-network'): + return get_network_addrs('ceph-public-network')[0] + + try: + return network_get_primary_address('public') + except NotImplementedError: + log("network-get not supported", DEBUG) + + return get_host_ip() + + +@cached +def get_cluster_addr(): + if config('ceph-cluster-network'): + return get_network_addrs('ceph-cluster-network')[0] + + try: + return network_get_primary_address('cluster') + except NotImplementedError: + log("network-get not supported", DEBUG) + + return get_host_ip() + + def get_networks(config_opt='ceph-public-network'): """Get all configured networks from provided config option. @@ -102,6 +131,31 @@ def get_networks(config_opt='ceph-public-network'): return [] +def get_network_addrs(config_opt): + """Get all configured public networks addresses. + + If public network(s) are provided, go through them and return the + addresses we have configured on any of those networks. + """ + addrs = [] + networks = config(config_opt) + if networks: + networks = networks.split() + addrs = [get_address_in_network(n) for n in networks] + addrs = [a for a in addrs if a] + + if not addrs: + if networks: + msg = ("Could not find an address on any of '%s' - resolve this " + "error to retry" % (networks)) + status_set('blocked', msg) + raise Exception(msg) + else: + return [get_host_ip()] + + return addrs + + def assert_charm_supports_ipv6(): """Check whether we are able to support charms ipv6.""" if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index dad778db..a48b5d31 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -16,6 +16,9 @@ description: | . This charm provides the Ceph OSD personality for expanding storage capacity within a ceph deployment. +extra-bindings: + public: + cluster: requires: mon: interface: ceph-osd diff --git a/ceph-osd/unit_tests/test_ceph_networking.py b/ceph-osd/unit_tests/test_ceph_networking.py new file mode 100644 index 00000000..ae3a7ff5 --- /dev/null +++ b/ceph-osd/unit_tests/test_ceph_networking.py @@ -0,0 +1,51 @@ +import test_utils +import charmhelpers.core.hookenv as hookenv +import utils as ceph_utils + +TO_PATCH_SPACES = [ + 'network_get_primary_address', + 'log', + 'get_host_ip', + 'config', + 'get_network_addrs', + 'cached', +] + + +class CephNetworkSpaceTestCase(test_utils.CharmTestCase): + def setUp(self): + super(CephNetworkSpaceTestCase, self).setUp(ceph_utils, + TO_PATCH_SPACES) + self.config.side_effect = self.test_config.get + + def tearDown(self): + # Reset @cached cache + hookenv.cache = {} + + def test_no_network_space_support(self): + self.get_host_ip.return_value = '192.168.2.1' + self.network_get_primary_address.side_effect = NotImplementedError + self.assertEqual(ceph_utils.get_cluster_addr(), + '192.168.2.1') + self.assertEqual(ceph_utils.get_public_addr(), + '192.168.2.1') + + def test_public_network_space(self): + self.network_get_primary_address.return_value = '10.20.40.2' + self.assertEqual(ceph_utils.get_public_addr(), + '10.20.40.2') + self.network_get_primary_address.assert_called_with('public') + self.config.assert_called_with('ceph-public-network') + + def test_cluster_network_space(self): + self.network_get_primary_address.return_value = '10.20.50.2' + self.assertEqual(ceph_utils.get_cluster_addr(), + '10.20.50.2') + self.network_get_primary_address.assert_called_with('cluster') + self.config.assert_called_with('ceph-cluster-network') + + def test_config_options_in_use(self): + self.get_network_addrs.return_value = ['192.122.20.2'] + self.test_config.set('ceph-cluster-network', '192.122.20.0/24') + self.assertEqual(ceph_utils.get_cluster_addr(), + '192.122.20.2') From d648475ca9aa532a0ee8dc11f3c5cb3f476cdef2 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 11 Apr 2016 18:52:43 +0000 Subject: [PATCH 1098/2699] Enable Xenial-Mitaka amulet test target Also remove irrelevant service checks from amulet test. Change-Id: I66bb1ecfa85980f7c1ea340a225f40163028e56c --- ceph-osd/tests/021-basic-xenial-mitaka | 0 ceph-osd/tests/basic_deployment.py | 4 ---- 2 files changed, 4 deletions(-) mode change 100644 => 100755 ceph-osd/tests/021-basic-xenial-mitaka diff --git a/ceph-osd/tests/021-basic-xenial-mitaka b/ceph-osd/tests/021-basic-xenial-mitaka old mode 100644 new mode 100755 diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 9e522391..31eb3bcf 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -195,10 +195,6 @@ def test_102_services(self): """Verify the expected services are running on the service units.""" services = { - self.mysql_sentry: ['mysql'], - self.rabbitmq_sentry: ['rabbitmq-server'], - self.nova_sentry: ['nova-compute'], - self.keystone_sentry: ['keystone'], self.glance_sentry: ['glance-registry', 'glance-api'], self.cinder_sentry: ['cinder-api', From 3f84505026201be3e3a3191282c68cb8d4efa54b Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 12 Apr 2016 14:06:45 +0000 Subject: [PATCH 1099/2699] Charmhelper sync before 1604 testing Change-Id: I688e367fa758cf9f052e85de13b3c549876807a7 --- .../hooks/charmhelpers/contrib/network/ip.py | 9 +++++ .../contrib/storage/linux/ceph.py | 2 +- ceph-osd/hooks/charmhelpers/core/host.py | 38 +++++++++++++++---- .../charmhelpers/contrib/amulet/utils.py | 26 ++++++++----- .../contrib/openstack/amulet/deployment.py | 4 +- 5 files changed, 59 insertions(+), 20 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 4efe7993..b9c79000 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -191,6 +191,15 @@ def _get_for_address(address, key): get_netmask_for_address = partial(_get_for_address, key='netmask') +def resolve_network_cidr(ip_address): + ''' + Resolves the full address cidr of an ip_address based on + configured network interfaces + ''' + netmask = get_netmask_for_address(ip_address) + return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr) + + def format_ipv6_addr(address): """If address is IPv6, wrap it in '[]' otherwise return None. diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index f4582545..1b4b1de7 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -173,7 +173,7 @@ def remove_cache_tier(self, cache_pool): elif mode == 'writeback': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) # Flush the cache and wait for it to return - check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) + check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 481087bb..bfea6a15 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -128,6 +128,13 @@ def service(action, service_name): return subprocess.call(cmd) == 0 +def systemv_services_running(): + output = subprocess.check_output( + ['service', '--status-all'], + stderr=subprocess.STDOUT).decode('UTF-8') + return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row] + + def service_running(service_name): """Determine whether a system service is running""" if init_is_systemd(): @@ -140,11 +147,15 @@ def service_running(service_name): except subprocess.CalledProcessError: return False else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running 'start/running' if ("start/running" in output or "is running" in output or "up and running" in output): return True - else: - return False + # Check System V scripts init script return codes + if service_name in systemv_services_running(): + return True + return False def service_available(service_name): @@ -412,7 +423,7 @@ class ChecksumError(ValueError): pass -def restart_on_change(restart_map, stopstart=False): +def restart_on_change(restart_map, stopstart=False, restart_functions=None): """Restart services based on configuration files changing This function is used a decorator, for example:: @@ -433,18 +444,22 @@ def config_changed(): @param restart_map: {path_file_name: [service_name, ...] @param stopstart: DEFAULT false; whether to stop, start OR restart + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} @returns result from decorated function """ def wrap(f): @functools.wraps(f) def wrapped_f(*args, **kwargs): return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart) + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) return wrapped_f return wrap -def restart_on_change_helper(lambda_f, restart_map, stopstart=False): +def restart_on_change_helper(lambda_f, restart_map, stopstart=False, + restart_functions=None): """Helper function to perform the restart_on_change function. This is provided for decorators to restart services if files described @@ -453,8 +468,12 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False): @param lambda_f: function to call. @param restart_map: {file: [service, ...]} @param stopstart: whether to stop, start or restart a service + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} @returns result of lambda_f() """ + if restart_functions is None: + restart_functions = {} checksums = {path: path_hash(path) for path in restart_map} r = lambda_f() # create a list of lists of the services to restart @@ -465,9 +484,12 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False): services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) if services_list: actions = ('stop', 'start') if stopstart else ('restart',) - for action in actions: - for service_name in services_list: - service(action, service_name) + for service_name in services_list: + if service_name in restart_functions: + restart_functions[service_name](service_name) + else: + for action in actions: + service(action, service_name) return r diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index 3e159039..7e5c25a9 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -601,7 +601,7 @@ def validate_unit_process_ids(self, expected, actual): return ('Process name count mismatch. expected, actual: {}, ' '{}'.format(len(expected), len(actual))) - for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ + for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ zip(e_proc_names.items(), a_proc_names.items()): if e_proc_name != a_proc_name: return ('Process name mismatch. expected, actual: {}, ' @@ -610,25 +610,31 @@ def validate_unit_process_ids(self, expected, actual): a_pids_length = len(a_pids) fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})'.format(e_sentry_name, e_proc_name, - e_pids_length, a_pids_length, + e_pids, a_pids_length, a_pids)) - # If expected is not bool, ensure PID quantities match - if not isinstance(e_pids_length, bool) and \ - a_pids_length != e_pids_length: + # If expected is a list, ensure at least one PID quantity match + if isinstance(e_pids, list) and \ + a_pids_length not in e_pids: + return fail_msg + # If expected is not bool and not list, + # ensure PID quantities match + elif not isinstance(e_pids, bool) and \ + not isinstance(e_pids, list) and \ + a_pids_length != e_pids: return fail_msg # If expected is bool True, ensure 1 or more PIDs exist - elif isinstance(e_pids_length, bool) and \ - e_pids_length is True and a_pids_length < 1: + elif isinstance(e_pids, bool) and \ + e_pids is True and a_pids_length < 1: return fail_msg # If expected is bool False, ensure 0 PIDs exist - elif isinstance(e_pids_length, bool) and \ - e_pids_length is False and a_pids_length != 0: + elif isinstance(e_pids, bool) and \ + e_pids is False and a_pids_length != 0: return fail_msg else: self.log.debug('PID check OK: {} {} {}: ' '{}'.format(e_sentry_name, e_proc_name, - e_pids_length, a_pids)) + e_pids, a_pids)) return None def validate_list_of_identical_dicts(self, list_of_dicts): diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index d2ede320..d21c9c78 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -126,7 +126,9 @@ def _add_services(self, this_service, other_services): # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup'] + 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'] if self.openstack: for svc in services: From 657b90dfc5cb5277fb74248ded00c30df68d7939 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 12 Apr 2016 14:10:11 +0000 Subject: [PATCH 1100/2699] Charmhelper sync before 1604 testing Change-Id: Ia8423df6ab1c80a2cb142c7cef47be00467db187 --- .../hooks/charmhelpers/contrib/network/ip.py | 9 +++++ ceph-proxy/hooks/charmhelpers/core/host.py | 38 +++++++++++++++---- .../charmhelpers/contrib/amulet/utils.py | 26 ++++++++----- .../contrib/openstack/amulet/deployment.py | 4 +- 4 files changed, 58 insertions(+), 19 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 4efe7993..b9c79000 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -191,6 +191,15 @@ def _get_for_address(address, key): get_netmask_for_address = partial(_get_for_address, key='netmask') +def resolve_network_cidr(ip_address): + ''' + Resolves the full address cidr of an ip_address based on + configured network interfaces + ''' + netmask = get_netmask_for_address(ip_address) + return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr) + + def format_ipv6_addr(address): """If address is IPv6, wrap it in '[]' otherwise return None. diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 481087bb..bfea6a15 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -128,6 +128,13 @@ def service(action, service_name): return subprocess.call(cmd) == 0 +def systemv_services_running(): + output = subprocess.check_output( + ['service', '--status-all'], + stderr=subprocess.STDOUT).decode('UTF-8') + return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row] + + def service_running(service_name): """Determine whether a system service is running""" if init_is_systemd(): @@ -140,11 +147,15 @@ def service_running(service_name): except subprocess.CalledProcessError: return False else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running 'start/running' if ("start/running" in output or "is running" in output or "up and running" in output): return True - else: - return False + # Check System V scripts init script return codes + if service_name in systemv_services_running(): + return True + return False def service_available(service_name): @@ -412,7 +423,7 @@ class ChecksumError(ValueError): pass -def restart_on_change(restart_map, stopstart=False): +def restart_on_change(restart_map, stopstart=False, restart_functions=None): """Restart services based on configuration files changing This function is used a decorator, for example:: @@ -433,18 +444,22 @@ def config_changed(): @param restart_map: {path_file_name: [service_name, ...] @param stopstart: DEFAULT false; whether to stop, start OR restart + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} @returns result from decorated function """ def wrap(f): @functools.wraps(f) def wrapped_f(*args, **kwargs): return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart) + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) return wrapped_f return wrap -def restart_on_change_helper(lambda_f, restart_map, stopstart=False): +def restart_on_change_helper(lambda_f, restart_map, stopstart=False, + restart_functions=None): """Helper function to perform the restart_on_change function. This is provided for decorators to restart services if files described @@ -453,8 +468,12 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False): @param lambda_f: function to call. @param restart_map: {file: [service, ...]} @param stopstart: whether to stop, start or restart a service + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} @returns result of lambda_f() """ + if restart_functions is None: + restart_functions = {} checksums = {path: path_hash(path) for path in restart_map} r = lambda_f() # create a list of lists of the services to restart @@ -465,9 +484,12 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False): services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) if services_list: actions = ('stop', 'start') if stopstart else ('restart',) - for action in actions: - for service_name in services_list: - service(action, service_name) + for service_name in services_list: + if service_name in restart_functions: + restart_functions[service_name](service_name) + else: + for action in actions: + service(action, service_name) return r diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index 3e159039..7e5c25a9 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -601,7 +601,7 @@ def validate_unit_process_ids(self, expected, actual): return ('Process name count mismatch. expected, actual: {}, ' '{}'.format(len(expected), len(actual))) - for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ + for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ zip(e_proc_names.items(), a_proc_names.items()): if e_proc_name != a_proc_name: return ('Process name mismatch. expected, actual: {}, ' @@ -610,25 +610,31 @@ def validate_unit_process_ids(self, expected, actual): a_pids_length = len(a_pids) fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})'.format(e_sentry_name, e_proc_name, - e_pids_length, a_pids_length, + e_pids, a_pids_length, a_pids)) - # If expected is not bool, ensure PID quantities match - if not isinstance(e_pids_length, bool) and \ - a_pids_length != e_pids_length: + # If expected is a list, ensure at least one PID quantity match + if isinstance(e_pids, list) and \ + a_pids_length not in e_pids: + return fail_msg + # If expected is not bool and not list, + # ensure PID quantities match + elif not isinstance(e_pids, bool) and \ + not isinstance(e_pids, list) and \ + a_pids_length != e_pids: return fail_msg # If expected is bool True, ensure 1 or more PIDs exist - elif isinstance(e_pids_length, bool) and \ - e_pids_length is True and a_pids_length < 1: + elif isinstance(e_pids, bool) and \ + e_pids is True and a_pids_length < 1: return fail_msg # If expected is bool False, ensure 0 PIDs exist - elif isinstance(e_pids_length, bool) and \ - e_pids_length is False and a_pids_length != 0: + elif isinstance(e_pids, bool) and \ + e_pids is False and a_pids_length != 0: return fail_msg else: self.log.debug('PID check OK: {} {} {}: ' '{}'.format(e_sentry_name, e_proc_name, - e_pids_length, a_pids)) + e_pids, a_pids)) return None def validate_list_of_identical_dicts(self, list_of_dicts): diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index d2ede320..d21c9c78 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -126,7 +126,9 @@ def _add_services(self, this_service, other_services): # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup'] + 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'] if self.openstack: for svc in services: From 7a32d2b946cae24f7096a98b56722da6354281be Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 12 Apr 2016 14:10:11 +0000 Subject: [PATCH 1101/2699] Charmhelper sync before 1604 testing Change-Id: Ia8423df6ab1c80a2cb142c7cef47be00467db187 --- .../hooks/charmhelpers/contrib/network/ip.py | 9 +++++ ceph-mon/hooks/charmhelpers/core/host.py | 38 +++++++++++++++---- .../charmhelpers/contrib/amulet/utils.py | 26 ++++++++----- .../contrib/openstack/amulet/deployment.py | 4 +- 4 files changed, 58 insertions(+), 19 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 4efe7993..b9c79000 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -191,6 +191,15 @@ def _get_for_address(address, key): get_netmask_for_address = partial(_get_for_address, key='netmask') +def resolve_network_cidr(ip_address): + ''' + Resolves the full address cidr of an ip_address based on + configured network interfaces + ''' + netmask = get_netmask_for_address(ip_address) + return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr) + + def format_ipv6_addr(address): """If address is IPv6, wrap it in '[]' otherwise return None. diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 481087bb..bfea6a15 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -128,6 +128,13 @@ def service(action, service_name): return subprocess.call(cmd) == 0 +def systemv_services_running(): + output = subprocess.check_output( + ['service', '--status-all'], + stderr=subprocess.STDOUT).decode('UTF-8') + return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row] + + def service_running(service_name): """Determine whether a system service is running""" if init_is_systemd(): @@ -140,11 +147,15 @@ def service_running(service_name): except subprocess.CalledProcessError: return False else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running 'start/running' if ("start/running" in output or "is running" in output or "up and running" in output): return True - else: - return False + # Check System V scripts init script return codes + if service_name in systemv_services_running(): + return True + return False def service_available(service_name): @@ -412,7 +423,7 @@ class ChecksumError(ValueError): pass -def restart_on_change(restart_map, stopstart=False): +def restart_on_change(restart_map, stopstart=False, restart_functions=None): """Restart services based on configuration files changing This function is used a decorator, for example:: @@ -433,18 +444,22 @@ def config_changed(): @param restart_map: {path_file_name: [service_name, ...] @param stopstart: DEFAULT false; whether to stop, start OR restart + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} @returns result from decorated function """ def wrap(f): @functools.wraps(f) def wrapped_f(*args, **kwargs): return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart) + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) return wrapped_f return wrap -def restart_on_change_helper(lambda_f, restart_map, stopstart=False): +def restart_on_change_helper(lambda_f, restart_map, stopstart=False, + restart_functions=None): """Helper function to perform the restart_on_change function. This is provided for decorators to restart services if files described @@ -453,8 +468,12 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False): @param lambda_f: function to call. @param restart_map: {file: [service, ...]} @param stopstart: whether to stop, start or restart a service + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} @returns result of lambda_f() """ + if restart_functions is None: + restart_functions = {} checksums = {path: path_hash(path) for path in restart_map} r = lambda_f() # create a list of lists of the services to restart @@ -465,9 +484,12 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False): services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) if services_list: actions = ('stop', 'start') if stopstart else ('restart',) - for action in actions: - for service_name in services_list: - service(action, service_name) + for service_name in services_list: + if service_name in restart_functions: + restart_functions[service_name](service_name) + else: + for action in actions: + service(action, service_name) return r diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index 3e159039..7e5c25a9 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -601,7 +601,7 @@ def validate_unit_process_ids(self, expected, actual): return ('Process name count mismatch. expected, actual: {}, ' '{}'.format(len(expected), len(actual))) - for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ + for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ zip(e_proc_names.items(), a_proc_names.items()): if e_proc_name != a_proc_name: return ('Process name mismatch. expected, actual: {}, ' @@ -610,25 +610,31 @@ def validate_unit_process_ids(self, expected, actual): a_pids_length = len(a_pids) fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})'.format(e_sentry_name, e_proc_name, - e_pids_length, a_pids_length, + e_pids, a_pids_length, a_pids)) - # If expected is not bool, ensure PID quantities match - if not isinstance(e_pids_length, bool) and \ - a_pids_length != e_pids_length: + # If expected is a list, ensure at least one PID quantity match + if isinstance(e_pids, list) and \ + a_pids_length not in e_pids: + return fail_msg + # If expected is not bool and not list, + # ensure PID quantities match + elif not isinstance(e_pids, bool) and \ + not isinstance(e_pids, list) and \ + a_pids_length != e_pids: return fail_msg # If expected is bool True, ensure 1 or more PIDs exist - elif isinstance(e_pids_length, bool) and \ - e_pids_length is True and a_pids_length < 1: + elif isinstance(e_pids, bool) and \ + e_pids is True and a_pids_length < 1: return fail_msg # If expected is bool False, ensure 0 PIDs exist - elif isinstance(e_pids_length, bool) and \ - e_pids_length is False and a_pids_length != 0: + elif isinstance(e_pids, bool) and \ + e_pids is False and a_pids_length != 0: return fail_msg else: self.log.debug('PID check OK: {} {} {}: ' '{}'.format(e_sentry_name, e_proc_name, - e_pids_length, a_pids)) + e_pids, a_pids)) return None def validate_list_of_identical_dicts(self, list_of_dicts): diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index d2ede320..d21c9c78 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -126,7 +126,9 @@ def _add_services(self, this_service, other_services): # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup'] + 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'] if self.openstack: for svc in services: From cfd0cc454932a731ae66e8dbe59e6740bc308910 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 12 Apr 2016 14:13:03 +0000 Subject: [PATCH 1102/2699] Charmhelper sync before 1604 testing Change-Id: I78e5d82fb957adb0c78bad79a9f3cd7ca0718457 --- .../charmhelpers/contrib/openstack/context.py | 104 +++++++++++++++++- .../charmhelpers/contrib/openstack/utils.py | 9 +- ceph-radosgw/hooks/charmhelpers/core/host.py | 38 +++++-- ceph-radosgw/tests/basic_deployment.py | 3 + .../charmhelpers/contrib/amulet/utils.py | 26 +++-- 5 files changed, 158 insertions(+), 22 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index a8c6ab0c..c07b33dd 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -20,7 +20,7 @@ import re import time from base64 import b64decode -from subprocess import check_call +from subprocess import check_call, CalledProcessError import six import yaml @@ -45,6 +45,7 @@ INFO, WARNING, ERROR, + status_set, ) from charmhelpers.core.sysctl import create as sysctl_create @@ -1479,3 +1480,104 @@ def __call__(self): if self.context_complete(ctxt): return ctxt return {} + + +class InternalEndpointContext(OSContextGenerator): + """Internal endpoint context. + + This context provides the endpoint type used for communication between + services e.g. between Nova and Cinder internally. Openstack uses Public + endpoints by default so this allows admins to optionally use internal + endpoints. + """ + def __call__(self): + return {'use_internal_endpoints': config('use-internal-endpoints')} + + +class AppArmorContext(OSContextGenerator): + """Base class for apparmor contexts.""" + + def __init__(self): + self._ctxt = None + self.aa_profile = None + self.aa_utils_packages = ['apparmor-utils'] + + @property + def ctxt(self): + if self._ctxt is not None: + return self._ctxt + self._ctxt = self._determine_ctxt() + return self._ctxt + + def _determine_ctxt(self): + """ + Validate aa-profile-mode settings is disable, enforce, or complain. + + :return ctxt: Dictionary of the apparmor profile or None + """ + if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: + ctxt = {'aa-profile-mode': config('aa-profile-mode')} + else: + ctxt = None + return ctxt + + def __call__(self): + return self.ctxt + + def install_aa_utils(self): + """ + Install packages required for apparmor configuration. + """ + log("Installing apparmor utils.") + ensure_packages(self.aa_utils_packages) + + def manually_disable_aa_profile(self): + """ + Manually disable an apparmor profile. + + If aa-profile-mode is set to disabled (default) this is required as the + template has been written but apparmor is yet unaware of the profile + and aa-disable aa-profile fails. Without this the profile would kick + into enforce mode on the next service restart. + + """ + profile_path = '/etc/apparmor.d' + disable_path = '/etc/apparmor.d/disable' + if not os.path.lexists(os.path.join(disable_path, self.aa_profile)): + os.symlink(os.path.join(profile_path, self.aa_profile), + os.path.join(disable_path, self.aa_profile)) + + def setup_aa_profile(self): + """ + Setup an apparmor profile. + The ctxt dictionary will contain the apparmor profile mode and + the apparmor profile name. + Makes calls out to aa-disable, aa-complain, or aa-enforce to setup + the apparmor profile. + """ + self() + if not self.ctxt: + log("Not enabling apparmor Profile") + return + self.install_aa_utils() + cmd = ['aa-{}'.format(self.ctxt['aa-profile-mode'])] + cmd.append(self.ctxt['aa-profile']) + log("Setting up the apparmor profile for {} in {} mode." + "".format(self.ctxt['aa-profile'], self.ctxt['aa-profile-mode'])) + try: + check_call(cmd) + except CalledProcessError as e: + # If aa-profile-mode is set to disabled (default) manual + # disabling is required as the template has been written but + # apparmor is yet unaware of the profile and aa-disable aa-profile + # fails. If aa-disable learns to read profile files first this can + # be removed. + if self.ctxt['aa-profile-mode'] == 'disable': + log("Manually disabling the apparmor profile for {}." + "".format(self.ctxt['aa-profile'])) + self.manually_disable_aa_profile() + return + status_set('blocked', "Apparmor profile {} failed to be set to {}." + "".format(self.ctxt['aa-profile'], + self.ctxt['aa-profile-mode'])) + raise e diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 3fb67b10..61d58793 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -137,7 +137,7 @@ ('liberty', ['2.3.0', '2.4.0', '2.5.0']), ('mitaka', - ['2.5.0', '2.6.0']), + ['2.5.0', '2.6.0', '2.7.0']), ]) # >= Liberty version->codename mapping @@ -156,6 +156,7 @@ ]), 'keystone': OrderedDict([ ('8.0', 'liberty'), + ('8.1', 'liberty'), ('9.0', 'mitaka'), ]), 'horizon-common': OrderedDict([ @@ -1534,7 +1535,8 @@ def _assess_status_func(): return _assess_status_func -def pausable_restart_on_change(restart_map, stopstart=False): +def pausable_restart_on_change(restart_map, stopstart=False, + restart_functions=None): """A restart_on_change decorator that checks to see if the unit is paused. If it is paused then the decorated function doesn't fire. @@ -1567,6 +1569,7 @@ def wrapped_f(*args, **kwargs): return f(*args, **kwargs) # otherwise, normal restart_on_change functionality return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart) + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) return wrapped_f return wrap diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 481087bb..bfea6a15 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -128,6 +128,13 @@ def service(action, service_name): return subprocess.call(cmd) == 0 +def systemv_services_running(): + output = subprocess.check_output( + ['service', '--status-all'], + stderr=subprocess.STDOUT).decode('UTF-8') + return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row] + + def service_running(service_name): """Determine whether a system service is running""" if init_is_systemd(): @@ -140,11 +147,15 @@ def service_running(service_name): except subprocess.CalledProcessError: return False else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running 'start/running' if ("start/running" in output or "is running" in output or "up and running" in output): return True - else: - return False + # Check System V scripts init script return codes + if service_name in systemv_services_running(): + return True + return False def service_available(service_name): @@ -412,7 +423,7 @@ class ChecksumError(ValueError): pass -def restart_on_change(restart_map, stopstart=False): +def restart_on_change(restart_map, stopstart=False, restart_functions=None): """Restart services based on configuration files changing This function is used a decorator, for example:: @@ -433,18 +444,22 @@ def config_changed(): @param restart_map: {path_file_name: [service_name, ...] @param stopstart: DEFAULT false; whether to stop, start OR restart + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} @returns result from decorated function """ def wrap(f): @functools.wraps(f) def wrapped_f(*args, **kwargs): return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart) + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) return wrapped_f return wrap -def restart_on_change_helper(lambda_f, restart_map, stopstart=False): +def restart_on_change_helper(lambda_f, restart_map, stopstart=False, + restart_functions=None): """Helper function to perform the restart_on_change function. This is provided for decorators to restart services if files described @@ -453,8 +468,12 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False): @param lambda_f: function to call. @param restart_map: {file: [service, ...]} @param stopstart: whether to stop, start or restart a service + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} @returns result of lambda_f() """ + if restart_functions is None: + restart_functions = {} checksums = {path: path_hash(path) for path in restart_map} r = lambda_f() # create a list of lists of the services to restart @@ -465,9 +484,12 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False): services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) if services_list: actions = ('stop', 'start') if stopstart else ('restart',) - for action in actions: - for service_name in services_list: - service(action, service_name) + for service_name in services_list: + if service_name in restart_functions: + restart_functions[service_name](service_name) + else: + for action in actions: + service(action, service_name) return r diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index e45c1933..76c428cb 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -243,6 +243,9 @@ def test_102_services(self): services[self.ceph2_sentry] = ceph_services services[self.ceph_radosgw_sentry] = ['radosgw-all'] + if self._get_openstack_release() >= self.trusty_liberty: + services[self.keystone_sentry] = ['apache2'] + ret = u.validate_services_by_name(services) if ret: amulet.raise_status(amulet.FAIL, msg=ret) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index 3e159039..7e5c25a9 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -601,7 +601,7 @@ def validate_unit_process_ids(self, expected, actual): return ('Process name count mismatch. expected, actual: {}, ' '{}'.format(len(expected), len(actual))) - for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ + for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ zip(e_proc_names.items(), a_proc_names.items()): if e_proc_name != a_proc_name: return ('Process name mismatch. expected, actual: {}, ' @@ -610,25 +610,31 @@ def validate_unit_process_ids(self, expected, actual): a_pids_length = len(a_pids) fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})'.format(e_sentry_name, e_proc_name, - e_pids_length, a_pids_length, + e_pids, a_pids_length, a_pids)) - # If expected is not bool, ensure PID quantities match - if not isinstance(e_pids_length, bool) and \ - a_pids_length != e_pids_length: + # If expected is a list, ensure at least one PID quantity match + if isinstance(e_pids, list) and \ + a_pids_length not in e_pids: + return fail_msg + # If expected is not bool and not list, + # ensure PID quantities match + elif not isinstance(e_pids, bool) and \ + not isinstance(e_pids, list) and \ + a_pids_length != e_pids: return fail_msg # If expected is bool True, ensure 1 or more PIDs exist - elif isinstance(e_pids_length, bool) and \ - e_pids_length is True and a_pids_length < 1: + elif isinstance(e_pids, bool) and \ + e_pids is True and a_pids_length < 1: return fail_msg # If expected is bool False, ensure 0 PIDs exist - elif isinstance(e_pids_length, bool) and \ - e_pids_length is False and a_pids_length != 0: + elif isinstance(e_pids, bool) and \ + e_pids is False and a_pids_length != 0: return fail_msg else: self.log.debug('PID check OK: {} {} {}: ' '{}'.format(e_sentry_name, e_proc_name, - e_pids_length, a_pids)) + e_pids, a_pids)) return None def validate_list_of_identical_dicts(self, list_of_dicts): From a987856472ce6a8a1383bfa0e8472dffd10481e6 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 13 Apr 2016 10:24:28 +0000 Subject: [PATCH 1103/2699] Fix pbr requirement Change-Id: I86dc381922bff2b2ca3c6c7b0f326e6d388fa32d --- ceph-osd/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/requirements.txt b/ceph-osd/requirements.txt index 426002dc..6a3271b0 100644 --- a/ceph-osd/requirements.txt +++ b/ceph-osd/requirements.txt @@ -1,6 +1,7 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. +pbr>=1.8.0,<1.9.0 PyYAML>=3.1.0 simplejson>=2.2.0 netifaces>=0.10.4 From 8b3d058a5aea7a79bd35ba86aeb634df7c87309e Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 13 Apr 2016 10:25:08 +0000 Subject: [PATCH 1104/2699] Fix pbr requirement Change-Id: I3de4fcbbafbdce16e6eaabb33f15d7e2418924fe --- ceph-proxy/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/requirements.txt b/ceph-proxy/requirements.txt index 426002dc..6a3271b0 100644 --- a/ceph-proxy/requirements.txt +++ b/ceph-proxy/requirements.txt @@ -1,6 +1,7 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. +pbr>=1.8.0,<1.9.0 PyYAML>=3.1.0 simplejson>=2.2.0 netifaces>=0.10.4 From 96013a1882eb65ffdc15523202cdce3732af5d61 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 13 Apr 2016 10:25:08 +0000 Subject: [PATCH 1105/2699] Fix pbr requirement Change-Id: I3de4fcbbafbdce16e6eaabb33f15d7e2418924fe --- ceph-mon/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/requirements.txt b/ceph-mon/requirements.txt index 426002dc..6a3271b0 100644 --- a/ceph-mon/requirements.txt +++ b/ceph-mon/requirements.txt @@ -1,6 +1,7 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. +pbr>=1.8.0,<1.9.0 PyYAML>=3.1.0 simplejson>=2.2.0 netifaces>=0.10.4 From 4646fdb40232edb352c5a3c3a6727a9db4b3e14b Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 13 Apr 2016 10:25:32 +0000 Subject: [PATCH 1106/2699] Fix pbr requirement Change-Id: I98d30e17d7d1f96d378f47844536b244918b0571 --- ceph-radosgw/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/requirements.txt b/ceph-radosgw/requirements.txt index 426002dc..6a3271b0 100644 --- a/ceph-radosgw/requirements.txt +++ b/ceph-radosgw/requirements.txt @@ -1,6 +1,7 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. +pbr>=1.8.0,<1.9.0 PyYAML>=3.1.0 simplejson>=2.2.0 netifaces>=0.10.4 From 9aca4084f204a0239a4996503d354fb19dc39429 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 14 Apr 2016 07:51:00 -0400 Subject: [PATCH 1107/2699] Update test to validate processes better This test is being updated to use the new functionality being made available in the charmhelpers sync Also, update tests/charmhelpers Change-Id: I3109be2ee6069101c72ca1cfd786bcfc66fa27e0 --- ceph-osd/tests/basic_deployment.py | 2 +- .../charmhelpers/contrib/amulet/utils.py | 26 ++++++++++++------- .../contrib/openstack/amulet/deployment.py | 4 ++- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 87e236cb..bd9eb80d 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -183,7 +183,7 @@ def test_100_ceph_processes(self): self.ceph0_sentry: ceph_processes, self.ceph1_sentry: ceph_processes, self.ceph2_sentry: ceph_processes, - self.ceph_osd_sentry: {'ceph-osd': True} + self.ceph_osd_sentry: {'ceph-osd': [2, 3]} } actual_pids = u.get_unit_process_ids(expected_processes) diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index 3e159039..7e5c25a9 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -601,7 +601,7 @@ def validate_unit_process_ids(self, expected, actual): return ('Process name count mismatch. expected, actual: {}, ' '{}'.format(len(expected), len(actual))) - for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ + for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ zip(e_proc_names.items(), a_proc_names.items()): if e_proc_name != a_proc_name: return ('Process name mismatch. expected, actual: {}, ' @@ -610,25 +610,31 @@ def validate_unit_process_ids(self, expected, actual): a_pids_length = len(a_pids) fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})'.format(e_sentry_name, e_proc_name, - e_pids_length, a_pids_length, + e_pids, a_pids_length, a_pids)) - # If expected is not bool, ensure PID quantities match - if not isinstance(e_pids_length, bool) and \ - a_pids_length != e_pids_length: + # If expected is a list, ensure at least one PID quantity match + if isinstance(e_pids, list) and \ + a_pids_length not in e_pids: + return fail_msg + # If expected is not bool and not list, + # ensure PID quantities match + elif not isinstance(e_pids, bool) and \ + not isinstance(e_pids, list) and \ + a_pids_length != e_pids: return fail_msg # If expected is bool True, ensure 1 or more PIDs exist - elif isinstance(e_pids_length, bool) and \ - e_pids_length is True and a_pids_length < 1: + elif isinstance(e_pids, bool) and \ + e_pids is True and a_pids_length < 1: return fail_msg # If expected is bool False, ensure 0 PIDs exist - elif isinstance(e_pids_length, bool) and \ - e_pids_length is False and a_pids_length != 0: + elif isinstance(e_pids, bool) and \ + e_pids is False and a_pids_length != 0: return fail_msg else: self.log.debug('PID check OK: {} {} {}: ' '{}'.format(e_sentry_name, e_proc_name, - e_pids_length, a_pids)) + e_pids, a_pids)) return None def validate_list_of_identical_dicts(self, list_of_dicts): diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index d2ede320..d21c9c78 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -126,7 +126,9 @@ def _add_services(self, this_service, other_services): # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup'] + 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'] if self.openstack: for svc in services: From bad3231c8d83544a0e34111f9fd0908fb54e8bf0 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Thu, 14 Apr 2016 08:24:54 -0700 Subject: [PATCH 1108/2699] Charmhelpers Resync The fix to bug https://bugs.launchpad.net/charms/+source/glance/+bug/1565120 requires a resync of all charms that use the ceph charmhelpers code. Change-Id: Iaf4971f9588467ad25312e36ecbbe676596d9b3c --- ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1b4b1de7..b9e9edec 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -221,6 +221,10 @@ def create(self): self.name, str(self.pg_num)] try: check_call(cmd) + # Set the pool replica size + update_pool(client=self.service, + pool=self.name, + settings={'size': str(self.replicas)}) except CalledProcessError: raise From d928891fbac625a9fa9f8eb6770314db7109ccb6 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 15 Apr 2016 13:29:52 +0100 Subject: [PATCH 1109/2699] Ensure all units provide ceph-public-addr radosgw relation was only providing information when executed by a leader unit. This patch ensures that the minimum info is provided regardless. Change-Id: I2f841933f70847fb1bddc94f5ae525f5588021c2 Closes-Bug: 1570823 --- ceph-proxy/hooks/ceph_hooks.py | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 3769f524..aec816aa 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -515,26 +515,29 @@ def radosgw_relation(relid=None, unit=None): apt_install(packages=filter_installed_packages(['radosgw'])) if not unit: unit = remote_unit() - """Process broker request(s).""" + if ceph.is_quorum(): + log('mon cluster in quorum - providing radosgw with keys') + public_addr = get_public_addr() + data = { + 'fsid': leader_get('fsid'), + 'radosgw_key': ceph.get_radosgw_key(), + 'auth': config('auth-supported'), + 'ceph-public-address': public_addr, + } + settings = relation_get(rid=relid, unit=unit) + """Process broker request(s).""" if 'broker_req' in settings: - if not ceph.is_leader(): - log("Not leader - ignoring broker request", level=DEBUG) - else: + if ceph.is_leader(): rsp = process_requests(settings['broker_req']) unit_id = unit.replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id - log('mon cluster in quorum - providing radosgw with keys') - public_addr = get_public_addr() - data = { - 'fsid': leader_get('fsid'), - 'radosgw_key': ceph.get_radosgw_key(), - 'auth': config('auth-supported'), - 'ceph-public-address': public_addr, - unit_response_key: rsp, - } - relation_set(relation_id=relid, relation_settings=data) + data[unit_response_key] = rsp + else: + log("Not leader - ignoring broker request", level=DEBUG) + + relation_set(relation_id=relid, relation_settings=data) else: log('mon cluster not in quorum - deferring key provision') From 5a3da34fa69f2c052c2ce1501e1150368b0b81e7 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 15 Apr 2016 13:29:52 +0100 Subject: [PATCH 1110/2699] Ensure all units provide ceph-public-addr radosgw relation was only providing information when executed by a leader unit. This patch ensures that the minimum info is provided regardless. Change-Id: I2f841933f70847fb1bddc94f5ae525f5588021c2 Closes-Bug: 1570823 --- ceph-mon/hooks/ceph_hooks.py | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 3769f524..aec816aa 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -515,26 +515,29 @@ def radosgw_relation(relid=None, unit=None): apt_install(packages=filter_installed_packages(['radosgw'])) if not unit: unit = remote_unit() - """Process broker request(s).""" + if ceph.is_quorum(): + log('mon cluster in quorum - providing radosgw with keys') + public_addr = get_public_addr() + data = { + 'fsid': leader_get('fsid'), + 'radosgw_key': ceph.get_radosgw_key(), + 'auth': config('auth-supported'), + 'ceph-public-address': public_addr, + } + settings = relation_get(rid=relid, unit=unit) + """Process broker request(s).""" if 'broker_req' in settings: - if not ceph.is_leader(): - log("Not leader - ignoring broker request", level=DEBUG) - else: + if ceph.is_leader(): rsp = process_requests(settings['broker_req']) unit_id = unit.replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id - log('mon cluster in quorum - providing radosgw with keys') - public_addr = get_public_addr() - data = { - 'fsid': leader_get('fsid'), - 'radosgw_key': ceph.get_radosgw_key(), - 'auth': config('auth-supported'), - 'ceph-public-address': public_addr, - unit_response_key: rsp, - } - relation_set(relation_id=relid, relation_settings=data) + data[unit_response_key] = rsp + else: + log("Not leader - ignoring broker request", level=DEBUG) + + relation_set(relation_id=relid, relation_settings=data) else: log('mon cluster not in quorum - deferring key provision') From f8cfbb8a29f6764ca7bdb3a11e6231cfc13c6af7 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Thu, 14 Apr 2016 08:31:07 -0700 Subject: [PATCH 1111/2699] Charmhelpers resync The fix to bug https://bugs.launchpad.net/charms/+source/glance/+bug/1565120 requires a resync of all charms that use the ceph charmhelpers code. Change-Id: I61c073b8625d9ad52fddb411bcb3bf7b1cc8f45e --- .../charmhelpers/contrib/storage/linux/ceph.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1b4b1de7..a3e8bb98 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -166,12 +166,19 @@ def remove_cache_tier(self, cache_pool): """ # read-only is easy, writeback is much harder mode = get_cache_mode(self.service, cache_pool) + version = ceph_version() if mode == 'readonly': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) elif mode == 'writeback': - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) + pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', + 'cache-mode', cache_pool, 'forward'] + if version >= '10.1': + # Jewel added a mandatory flag + pool_forward_cmd.append('--yes-i-really-mean-it') + + check_call(pool_forward_cmd) # Flush the cache and wait for it to return check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) @@ -221,6 +228,10 @@ def create(self): self.name, str(self.pg_num)] try: check_call(cmd) + # Set the pool replica size + update_pool(client=self.service, + pool=self.name, + settings={'size': str(self.replicas)}) except CalledProcessError: raise From fc5093f142121bdd33b1bcbe6da367b642497789 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Thu, 14 Apr 2016 08:31:07 -0700 Subject: [PATCH 1112/2699] Charmhelpers resync The fix to bug https://bugs.launchpad.net/charms/+source/glance/+bug/1565120 requires a resync of all charms that use the ceph charmhelpers code. Change-Id: I61c073b8625d9ad52fddb411bcb3bf7b1cc8f45e --- .../charmhelpers/contrib/storage/linux/ceph.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1b4b1de7..a3e8bb98 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -166,12 +166,19 @@ def remove_cache_tier(self, cache_pool): """ # read-only is easy, writeback is much harder mode = get_cache_mode(self.service, cache_pool) + version = ceph_version() if mode == 'readonly': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) elif mode == 'writeback': - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) + pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', + 'cache-mode', cache_pool, 'forward'] + if version >= '10.1': + # Jewel added a mandatory flag + pool_forward_cmd.append('--yes-i-really-mean-it') + + check_call(pool_forward_cmd) # Flush the cache and wait for it to return check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) @@ -221,6 +228,10 @@ def create(self): self.name, str(self.pg_num)] try: check_call(cmd) + # Set the pool replica size + update_pool(client=self.service, + pool=self.name, + settings={'size': str(self.replicas)}) except CalledProcessError: raise From 80d26400fb084e5f1fc77d77d443885a951d49e7 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Tue, 19 Apr 2016 22:22:04 +0000 Subject: [PATCH 1113/2699] Revert "add juju availability zone to ceph osd location when present" This reverts commit 205c30affd49ce086a47226318f845e437d1812a. Support for juju provided zones was broken on older Ceph releases where MAAS zones are not configured (i.e. nothing other than the default zone). Backing this change out until we can provide a more complete and backwards compatible solution. Closes-Bug: 1570960 Change-Id: I889d556d180d47b54af2991a65efcca09d685332 --- ceph-osd/hooks/ceph_hooks.py | 10 ---------- ceph-osd/templates/ceph.conf | 4 ---- 2 files changed, 14 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 5c47350b..be383eed 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -278,12 +278,6 @@ def install(): install_upstart_scripts() -def az_info(): - az_info = os.environ.get('JUJU_AVAILABILITY_ZONE') - log("AZ Info: " + az_info) - return az_info - - def emit_cephconf(): mon_hosts = get_mon_hosts() log('Monitor hosts are ' + repr(mon_hosts)) @@ -317,10 +311,6 @@ def emit_cephconf(): cephcontext['public_addr'] = get_public_addr() cephcontext['cluster_addr'] = get_cluster_addr() - if az_info(): - cephcontext['crush_location'] = "root=default rack={} host={}" \ - .format(az_info(), socket.gethostname()) - # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 7fec00e5..67690449 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -29,10 +29,6 @@ public addr = {{ public_addr }} cluster addr = {{ cluster_addr }} {%- endif %} -{% if crush_location %} -osd crush location = {{crush_location}} -{% endif %} - [client.osd-upgrade] keyring = /var/lib/ceph/osd/ceph.client.osd-upgrade.keyring From df7581795afb908bb4ce44b0dec0f437e16550f5 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 20 Apr 2016 11:19:30 +0100 Subject: [PATCH 1114/2699] Fix ceph-broker logging Also ensure that ceph broker actions return their status correctly. Change-Id: Id42612e44acda3326196795f0685878b5d2a2753 Closes-Bug: 1572491 --- ceph-proxy/hooks/ceph_broker.py | 74 ++++++++++++++++---------- ceph-proxy/unit_tests/test_ceph_ops.py | 6 ++- 2 files changed, 52 insertions(+), 28 deletions(-) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index d01d38ef..329da8a8 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -4,17 +4,28 @@ # import json -from charmhelpers.contrib.storage.linux.ceph import validator, \ - erasure_profile_exists, ErasurePool, set_pool_quota, \ - pool_set, snapshot_pool, remove_pool_snapshot, create_erasure_profile, \ - ReplicatedPool, rename_pool, Pool, get_osds, pool_exists, delete_pool - from charmhelpers.core.hookenv import ( log, DEBUG, INFO, ERROR, ) +from charmhelpers.contrib.storage.linux.ceph import ( + create_erasure_profile, + delete_pool, + erasure_profile_exists, + get_osds, + pool_exists, + pool_set, + remove_pool_snapshot, + rename_pool, + set_pool_quota, + snapshot_pool, + validator, + ErasurePool, + Pool, + ReplicatedPool, +) # This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ # This should do a decent job of preventing people from passing in bad values. @@ -89,6 +100,7 @@ def process_requests(reqs): resp['request-id'] = request_id return resp + except Exception as exc: log(str(exc), level=ERROR) msg = ("Unexpected error occurred while processing requests: %s" % @@ -141,16 +153,16 @@ def handle_erasure_pool(request, service): # TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds if not erasure_profile_exists(service=service, name=erasure_profile): # TODO: Fail and tell them to create the profile or default - msg = "erasure-profile {} does not exist. Please create it with: " \ - "create-erasure-profile".format(erasure_profile) + msg = ("erasure-profile {} does not exist. Please create it with: " + "create-erasure-profile".format(erasure_profile)) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - pass + pool = ErasurePool(service=service, name=pool_name, erasure_code_profile=erasure_profile) # Ok make the erasure pool if not pool_exists(service=service, name=pool_name): - log("Creating pool '%s' (erasure_profile=%s)" % (pool, + log("Creating pool '%s' (erasure_profile=%s)" % (pool.name, erasure_profile), level=INFO) pool.create() @@ -184,11 +196,11 @@ def handle_replicated_pool(request, service): replicas=replicas, pg_num=pg_num) if not pool_exists(service=service, name=pool_name): - log("Creating pool '%s' (replicas=%s)" % (pool, replicas), + log("Creating pool '%s' (replicas=%s)" % (pool.name, replicas), level=INFO) pool.create() else: - log("Pool '%s' already exists - skipping create" % pool, + log("Pool '%s' already exists - skipping create" % pool.name, level=DEBUG) # Set a quota if requested @@ -208,10 +220,11 @@ def handle_create_cache_tier(request, service): # cache and storage pool must exist first if not pool_exists(service=service, name=storage_pool) or not pool_exists( service=service, name=cache_pool): - msg = "cold-pool: {} and hot-pool: {} must exist. Please create " \ - "them first".format(storage_pool, cache_pool) + msg = ("cold-pool: {} and hot-pool: {} must exist. Please create " + "them first".format(storage_pool, cache_pool)) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} + p = Pool(service=service, name=storage_pool) p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) @@ -222,8 +235,8 @@ def handle_remove_cache_tier(request, service): # cache and storage pool must exist first if not pool_exists(service=service, name=storage_pool) or not pool_exists( service=service, name=cache_pool): - msg = "cold-pool: {} or hot-pool: {} doesn't exist. Not " \ - "deleting cache tier".format(storage_pool, cache_pool) + msg = ("cold-pool: {} or hot-pool: {} doesn't exist. Not " + "deleting cache tier".format(storage_pool, cache_pool)) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} @@ -249,6 +262,7 @@ def handle_set_pool_value(request, service): else: # Validate that what the user passed is actually legal per Ceph's rules validator(params['value'], validator_params[0], validator_params[1]) + # Set the value pool_set(service=service, pool_name=params['pool'], key=params['key'], value=params['value']) @@ -263,6 +277,7 @@ def process_requests_v1(reqs): Returns a response dict containing the exit code (non-zero if any operation failed along with an explanation). """ + ret = None log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) for req in reqs: op = req.get('op') @@ -275,37 +290,42 @@ def process_requests_v1(reqs): # Default to replicated if pool_type isn't given if pool_type == 'erasure': - handle_erasure_pool(request=req, service=svc) + ret = handle_erasure_pool(request=req, service=svc) else: - handle_replicated_pool(request=req, service=svc) + ret = handle_replicated_pool(request=req, service=svc) + elif op == "create-cache-tier": - handle_create_cache_tier(request=req, service=svc) + ret = handle_create_cache_tier(request=req, service=svc) elif op == "remove-cache-tier": - handle_remove_cache_tier(request=req, service=svc) + ret = handle_remove_cache_tier(request=req, service=svc) elif op == "create-erasure-profile": - handle_create_erasure_profile(request=req, service=svc) + ret = handle_create_erasure_profile(request=req, service=svc) elif op == "delete-pool": pool = req.get('name') - delete_pool(service=svc, name=pool) + ret = delete_pool(service=svc, name=pool) elif op == "rename-pool": old_name = req.get('name') new_name = req.get('new-name') - rename_pool(service=svc, old_name=old_name, new_name=new_name) + ret = rename_pool(service=svc, old_name=old_name, + new_name=new_name) elif op == "snapshot-pool": pool = req.get('name') snapshot_name = req.get('snapshot-name') - snapshot_pool(service=svc, pool_name=pool, - snapshot_name=snapshot_name) + ret = snapshot_pool(service=svc, pool_name=pool, + snapshot_name=snapshot_name) elif op == "remove-pool-snapshot": pool = req.get('name') snapshot_name = req.get('snapshot-name') - remove_pool_snapshot(service=svc, pool_name=pool, - snapshot_name=snapshot_name) + ret = remove_pool_snapshot(service=svc, pool_name=pool, + snapshot_name=snapshot_name) elif op == "set-pool-value": - handle_set_pool_value(request=req, service=svc) + ret = handle_set_pool_value(request=req, service=svc) else: msg = "Unknown operation '%s'" % op log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} + if type(ret) == dict and 'exit-code' in ret: + return ret + return {'exit-code': 0} diff --git a/ceph-proxy/unit_tests/test_ceph_ops.py b/ceph-proxy/unit_tests/test_ceph_ops.py index 5e82fa8b..fba81769 100644 --- a/ceph-proxy/unit_tests/test_ceph_ops.py +++ b/ceph-proxy/unit_tests/test_ceph_ops.py @@ -67,6 +67,7 @@ def test_process_requests_delete_pool(self, 'op': 'delete-pool', 'name': 'foo', }]}) + mock_delete_pool.return_value = {'exit-code': 0} rc = ceph_broker.process_requests(reqs) mock_delete_pool.assert_called_with(service='admin', name='foo') self.assertEqual(json.loads(rc), {'exit-code': 0}) @@ -139,8 +140,8 @@ def test_snapshot_pool(self, mock_snapshot_pool): 'name': 'foo', 'snapshot-name': 'foo-snap1', }]}) + mock_snapshot_pool.return_value = {'exit-code': 0} rc = ceph_broker.process_requests(reqs) - mock_snapshot_pool.return_value = 1 mock_snapshot_pool.assert_called_with(service='admin', pool_name='foo', snapshot_name='foo-snap1') @@ -155,6 +156,7 @@ def test_rename_pool(self, mock_rename_pool): 'name': 'foo', 'new-name': 'foo2', }]}) + mock_rename_pool.return_value = {'exit-code': 0} rc = ceph_broker.process_requests(reqs) mock_rename_pool.assert_called_with(service='admin', old_name='foo', @@ -170,6 +172,7 @@ def test_remove_pool_snapshot(self, mock_snapshot_pool): 'name': 'foo', 'snapshot-name': 'foo-snap1', }]}) + mock_snapshot_pool.return_value = {'exit-code': 0} rc = ceph_broker.process_requests(reqs) mock_snapshot_pool.assert_called_with(service='admin', pool_name='foo', @@ -186,6 +189,7 @@ def test_set_pool_value(self, mock_set_pool): 'key': 'size', 'value': 3, }]}) + mock_set_pool.return_value = {'exit-code': 0} rc = ceph_broker.process_requests(reqs) mock_set_pool.assert_called_with(service='admin', pool_name='foo', From 8a9fffcf824eece8af941703fe84a1321b9d5b8f Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 20 Apr 2016 11:19:30 +0100 Subject: [PATCH 1115/2699] Fix ceph-broker logging Also ensure that ceph broker actions return their status correctly. Change-Id: Id42612e44acda3326196795f0685878b5d2a2753 Closes-Bug: 1572491 --- ceph-mon/hooks/ceph_broker.py | 74 ++++++++++++++++++---------- ceph-mon/unit_tests/test_ceph_ops.py | 6 ++- 2 files changed, 52 insertions(+), 28 deletions(-) diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index d01d38ef..329da8a8 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -4,17 +4,28 @@ # import json -from charmhelpers.contrib.storage.linux.ceph import validator, \ - erasure_profile_exists, ErasurePool, set_pool_quota, \ - pool_set, snapshot_pool, remove_pool_snapshot, create_erasure_profile, \ - ReplicatedPool, rename_pool, Pool, get_osds, pool_exists, delete_pool - from charmhelpers.core.hookenv import ( log, DEBUG, INFO, ERROR, ) +from charmhelpers.contrib.storage.linux.ceph import ( + create_erasure_profile, + delete_pool, + erasure_profile_exists, + get_osds, + pool_exists, + pool_set, + remove_pool_snapshot, + rename_pool, + set_pool_quota, + snapshot_pool, + validator, + ErasurePool, + Pool, + ReplicatedPool, +) # This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ # This should do a decent job of preventing people from passing in bad values. @@ -89,6 +100,7 @@ def process_requests(reqs): resp['request-id'] = request_id return resp + except Exception as exc: log(str(exc), level=ERROR) msg = ("Unexpected error occurred while processing requests: %s" % @@ -141,16 +153,16 @@ def handle_erasure_pool(request, service): # TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds if not erasure_profile_exists(service=service, name=erasure_profile): # TODO: Fail and tell them to create the profile or default - msg = "erasure-profile {} does not exist. Please create it with: " \ - "create-erasure-profile".format(erasure_profile) + msg = ("erasure-profile {} does not exist. Please create it with: " + "create-erasure-profile".format(erasure_profile)) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - pass + pool = ErasurePool(service=service, name=pool_name, erasure_code_profile=erasure_profile) # Ok make the erasure pool if not pool_exists(service=service, name=pool_name): - log("Creating pool '%s' (erasure_profile=%s)" % (pool, + log("Creating pool '%s' (erasure_profile=%s)" % (pool.name, erasure_profile), level=INFO) pool.create() @@ -184,11 +196,11 @@ def handle_replicated_pool(request, service): replicas=replicas, pg_num=pg_num) if not pool_exists(service=service, name=pool_name): - log("Creating pool '%s' (replicas=%s)" % (pool, replicas), + log("Creating pool '%s' (replicas=%s)" % (pool.name, replicas), level=INFO) pool.create() else: - log("Pool '%s' already exists - skipping create" % pool, + log("Pool '%s' already exists - skipping create" % pool.name, level=DEBUG) # Set a quota if requested @@ -208,10 +220,11 @@ def handle_create_cache_tier(request, service): # cache and storage pool must exist first if not pool_exists(service=service, name=storage_pool) or not pool_exists( service=service, name=cache_pool): - msg = "cold-pool: {} and hot-pool: {} must exist. Please create " \ - "them first".format(storage_pool, cache_pool) + msg = ("cold-pool: {} and hot-pool: {} must exist. Please create " + "them first".format(storage_pool, cache_pool)) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} + p = Pool(service=service, name=storage_pool) p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) @@ -222,8 +235,8 @@ def handle_remove_cache_tier(request, service): # cache and storage pool must exist first if not pool_exists(service=service, name=storage_pool) or not pool_exists( service=service, name=cache_pool): - msg = "cold-pool: {} or hot-pool: {} doesn't exist. Not " \ - "deleting cache tier".format(storage_pool, cache_pool) + msg = ("cold-pool: {} or hot-pool: {} doesn't exist. Not " + "deleting cache tier".format(storage_pool, cache_pool)) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} @@ -249,6 +262,7 @@ def handle_set_pool_value(request, service): else: # Validate that what the user passed is actually legal per Ceph's rules validator(params['value'], validator_params[0], validator_params[1]) + # Set the value pool_set(service=service, pool_name=params['pool'], key=params['key'], value=params['value']) @@ -263,6 +277,7 @@ def process_requests_v1(reqs): Returns a response dict containing the exit code (non-zero if any operation failed along with an explanation). """ + ret = None log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) for req in reqs: op = req.get('op') @@ -275,37 +290,42 @@ def process_requests_v1(reqs): # Default to replicated if pool_type isn't given if pool_type == 'erasure': - handle_erasure_pool(request=req, service=svc) + ret = handle_erasure_pool(request=req, service=svc) else: - handle_replicated_pool(request=req, service=svc) + ret = handle_replicated_pool(request=req, service=svc) + elif op == "create-cache-tier": - handle_create_cache_tier(request=req, service=svc) + ret = handle_create_cache_tier(request=req, service=svc) elif op == "remove-cache-tier": - handle_remove_cache_tier(request=req, service=svc) + ret = handle_remove_cache_tier(request=req, service=svc) elif op == "create-erasure-profile": - handle_create_erasure_profile(request=req, service=svc) + ret = handle_create_erasure_profile(request=req, service=svc) elif op == "delete-pool": pool = req.get('name') - delete_pool(service=svc, name=pool) + ret = delete_pool(service=svc, name=pool) elif op == "rename-pool": old_name = req.get('name') new_name = req.get('new-name') - rename_pool(service=svc, old_name=old_name, new_name=new_name) + ret = rename_pool(service=svc, old_name=old_name, + new_name=new_name) elif op == "snapshot-pool": pool = req.get('name') snapshot_name = req.get('snapshot-name') - snapshot_pool(service=svc, pool_name=pool, - snapshot_name=snapshot_name) + ret = snapshot_pool(service=svc, pool_name=pool, + snapshot_name=snapshot_name) elif op == "remove-pool-snapshot": pool = req.get('name') snapshot_name = req.get('snapshot-name') - remove_pool_snapshot(service=svc, pool_name=pool, - snapshot_name=snapshot_name) + ret = remove_pool_snapshot(service=svc, pool_name=pool, + snapshot_name=snapshot_name) elif op == "set-pool-value": - handle_set_pool_value(request=req, service=svc) + ret = handle_set_pool_value(request=req, service=svc) else: msg = "Unknown operation '%s'" % op log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} + if type(ret) == dict and 'exit-code' in ret: + return ret + return {'exit-code': 0} diff --git a/ceph-mon/unit_tests/test_ceph_ops.py b/ceph-mon/unit_tests/test_ceph_ops.py index 5e82fa8b..fba81769 100644 --- a/ceph-mon/unit_tests/test_ceph_ops.py +++ b/ceph-mon/unit_tests/test_ceph_ops.py @@ -67,6 +67,7 @@ def test_process_requests_delete_pool(self, 'op': 'delete-pool', 'name': 'foo', }]}) + mock_delete_pool.return_value = {'exit-code': 0} rc = ceph_broker.process_requests(reqs) mock_delete_pool.assert_called_with(service='admin', name='foo') self.assertEqual(json.loads(rc), {'exit-code': 0}) @@ -139,8 +140,8 @@ def test_snapshot_pool(self, mock_snapshot_pool): 'name': 'foo', 'snapshot-name': 'foo-snap1', }]}) + mock_snapshot_pool.return_value = {'exit-code': 0} rc = ceph_broker.process_requests(reqs) - mock_snapshot_pool.return_value = 1 mock_snapshot_pool.assert_called_with(service='admin', pool_name='foo', snapshot_name='foo-snap1') @@ -155,6 +156,7 @@ def test_rename_pool(self, mock_rename_pool): 'name': 'foo', 'new-name': 'foo2', }]}) + mock_rename_pool.return_value = {'exit-code': 0} rc = ceph_broker.process_requests(reqs) mock_rename_pool.assert_called_with(service='admin', old_name='foo', @@ -170,6 +172,7 @@ def test_remove_pool_snapshot(self, mock_snapshot_pool): 'name': 'foo', 'snapshot-name': 'foo-snap1', }]}) + mock_snapshot_pool.return_value = {'exit-code': 0} rc = ceph_broker.process_requests(reqs) mock_snapshot_pool.assert_called_with(service='admin', pool_name='foo', @@ -186,6 +189,7 @@ def test_set_pool_value(self, mock_set_pool): 'key': 'size', 'value': 3, }]}) + mock_set_pool.return_value = {'exit-code': 0} rc = ceph_broker.process_requests(reqs) mock_set_pool.assert_called_with(service='admin', pool_name='foo', From 32df68763402b67f09729f7fb564eadb2ccb0246 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 20 Apr 2016 11:58:32 +0100 Subject: [PATCH 1116/2699] Sync charm-helpers to get fix for pool_exists() Change-Id: I87449b1cda34b5c69faf101a0cc3a749160ff543 Closes-Bug: 1572506 --- ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index a3e8bb98..d008081f 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -615,7 +615,7 @@ def pool_exists(service, name): except CalledProcessError: return False - return name in out + return name in out.split() def get_osds(service): From 6d3d7ad7288e6bf966538ba6f2b16aaf58a33878 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 20 Apr 2016 11:58:32 +0100 Subject: [PATCH 1117/2699] Sync charm-helpers to get fix for pool_exists() Change-Id: I87449b1cda34b5c69faf101a0cc3a749160ff543 Closes-Bug: 1572506 --- ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index a3e8bb98..d008081f 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -615,7 +615,7 @@ def pool_exists(service, name): except CalledProcessError: return False - return name in out + return name in out.split() def get_osds(service): From 171d56a6f39bb0aca993d01be01ac7953562081b Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 20 Apr 2016 08:12:27 -0400 Subject: [PATCH 1118/2699] Charmhelpers Sync, fixes mount detection This charmhelpers change corrects how we detect if a device is mounted Closes-Bug: #1513009 Closes-Bug: #1571840 Change-Id: I5c42fba38d11dd1cc02723dbe8e1b6bae7d5d35d --- .../hooks/charmhelpers/contrib/storage/linux/utils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index 1e57941a..4e35c297 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -64,8 +64,8 @@ def is_device_mounted(device): :returns: boolean: True if the path represents a mounted device, False if it doesn't. ''' - is_partition = bool(re.search(r".*[0-9]+\b", device)) - out = check_output(['mount']).decode('UTF-8') - if is_partition: - return bool(re.search(device + r"\b", out)) - return bool(re.search(device + r"[0-9]*\b", out)) + try: + out = check_output(['lsblk', '-P', device]).decode('UTF-8') + except: + return False + return bool(re.search(r'MOUNTPOINT=".+"', out)) From dfd83041d445e74b73b10f87e4b497ec414b4df6 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 20 Apr 2016 14:28:21 +0100 Subject: [PATCH 1119/2699] Fix rgw list of pools to create on install Closes-Bug: 1572572 Change-Id: I8dcdb1085afbea015f39124d917667ea1dd4014a --- ceph-radosgw/hooks/ceph.py | 8 +++---- ceph-radosgw/tests/basic_deployment.py | 4 ++-- ceph-radosgw/unit_tests/test_ceph.py | 29 +++++++++++++++++++++----- 3 files changed, 30 insertions(+), 11 deletions(-) diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index 56c76186..98370f4d 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -260,11 +260,11 @@ def get_create_rgw_pools_rq(prefix): '.rgw.buckets.index', '.rgw.buckets.extra', '.log', - '.intent-log' + '.intent-log', '.usage', - '.users' - '.users.email' - '.users.swift' + '.users', + '.users.email', + '.users.swift', '.users.uid'] pg_num = config('rgw-lightweight-pool-pg-num') for pool in light: diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 76c428cb..57a94638 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -290,7 +290,7 @@ def test_201_ceph_radosgw_relation(self): for unit in s_entries: ret.append(u.validate_relation_data(unit, relation, expected)) - if not any(ret): + if any(ret): message = u.relation_error('ceph to ceph-radosgw', ret) amulet.raise_status(amulet.FAIL, msg=message) @@ -493,12 +493,12 @@ def test_498_radosgw_cmds_exit_zero(self): the ceph_radosgw unit.""" sentry_units = [self.ceph_radosgw_sentry] commands = [ - 'sudo radosgw-admin regions list', 'sudo radosgw-admin bucket list', 'sudo radosgw-admin zone list', 'sudo radosgw-admin metadata list', 'sudo radosgw-admin gc list' ] + ret = u.check_commands_on_units(commands, sentry_units) if ret: amulet.raise_status(amulet.FAIL, msg=ret) diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index c068d621..c993d4f3 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -230,10 +230,22 @@ def test_create_rgw_pools_rq_with_prefix(self, config, broker): call().add_op_create_pool( pg_num=10, replica_count=3, name='us-east.log'), call().add_op_create_pool( - pg_num=10, replica_count=3, name='us-east.intent-log.usage'), + pg_num=10, replica_count=3, name='us-east.intent-log'), call().add_op_create_pool( pg_num=10, replica_count=3, - name='us-east.users.users.email.users.swift.users.uid')] + name='us-east.usage'), + call().add_op_create_pool( + pg_num=10, replica_count=3, + name='us-east.users'), + call().add_op_create_pool( + pg_num=10, replica_count=3, + name='us-east.users.email'), + call().add_op_create_pool( + pg_num=10, replica_count=3, + name='us-east.users.swift'), + call().add_op_create_pool( + pg_num=10, replica_count=3, + name='us-east.users.uid')] ) @patch('ceph.CephBrokerRq') @@ -261,8 +273,15 @@ def test_create_rgw_pools_rq_without_prefix(self, config, broker): call().add_op_create_pool( pg_num=10, replica_count=3, name='.log'), call().add_op_create_pool( - pg_num=10, replica_count=3, name='.intent-log.usage'), + pg_num=10, replica_count=3, name='.intent-log'), call().add_op_create_pool( - pg_num=10, replica_count=3, - name='.users.users.email.users.swift.users.uid')] + pg_num=10, replica_count=3, name='.usage'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='.users'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='.users.email'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='.users.swift'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='.users.uid')] ) From 60d642f9796ac818aa6b4e615d55b0f963e4d135 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 22 Apr 2016 14:30:51 +0100 Subject: [PATCH 1120/2699] RGW >= Jewel requires pool names prefixed with zone When deploying Jewel RGW (10.*) or above, pool names are expected to be prefixed by the RGW client zone name. Closes-Bug: 1573549 Change-Id: I01fa558b2f259deb243267ba7d714f7b4df5df75 --- ceph-radosgw/hooks/ceph.py | 39 +++++++++---- ceph-radosgw/tests/basic_deployment.py | 11 ++++ ceph-radosgw/unit_tests/test_ceph.py | 76 +++++++++++++++++++++----- ceph-radosgw/unit_tests/test_hooks.py | 9 --- 4 files changed, 102 insertions(+), 33 deletions(-) diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index 98370f4d..9b4a22a6 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -7,19 +7,20 @@ # import json +import os import subprocess import time -import os + from socket import gethostname as get_unit_hostname from charmhelpers.core.hookenv import ( config, ) - from charmhelpers.contrib.storage.linux.ceph import ( CephBrokerRq, ) +from charmhelpers.fetch import apt_cache LEADER = 'leader' PEON = 'peon' @@ -229,9 +230,17 @@ def get_named_key(name, caps=None): return key -def get_create_rgw_pools_rq(prefix): - """Pre-create RGW pools so that they have the correct settings. This - will prepend a prefix onto the pools if specified in the config.yaml +def get_rgw_version(): + from apt import apt_pkg + pkg = apt_cache()['radosgw'] + version = apt_pkg.upstream_version(pkg.current_ver.ver_str) + return version + + +def get_create_rgw_pools_rq(prefix=None): + """Pre-create RGW pools so that they have the correct settings. + + If a prefix is provided it will be prepended to each pool name. When RGW creates its own pools it will create them with non-optimal settings (LP: #1476749). @@ -240,14 +249,26 @@ def get_create_rgw_pools_rq(prefix): http://docs.ceph.com/docs/master/radosgw/config/#create-pools for list of supported/required pools. """ + from apt import apt_pkg + + apt_pkg.init() rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') + # Jewel and above automatically always prefix pool names with zone when + # creating them (see LP: 1573549). + if prefix is None: + vc = apt_pkg.version_compare(get_rgw_version(), '10.0.0') + if vc >= 0: + prefix = 'default' + else: + prefix = '' + # Buckets likely to contain the most data and therefore requiring the most # PGs heavy = ['.rgw.buckets'] - for pool in heavy: + pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) rq.add_op_create_pool(name=pool, replica_count=replicas) # NOTE: we want these pools to have a smaller pg_num/pgp_num than the @@ -268,11 +289,7 @@ def get_create_rgw_pools_rq(prefix): '.users.uid'] pg_num = config('rgw-lightweight-pool-pg-num') for pool in light: - if prefix: - pool = "{prefix}{pool}".format( - prefix=prefix, - pool=pool) - + pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) rq.add_op_create_pool(name=pool, replica_count=replicas, pg_num=pg_num) return rq diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 57a94638..5b473058 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -447,6 +447,17 @@ def test_400_ceph_check_osd_pools(self): u.log.debug('Checking pools on ceph units...') expected_pools = self.get_ceph_expected_pools(radosgw=True) + + if self._get_openstack_release() >= self.trusty_mitaka: + non_rgw_pools = self.get_ceph_expected_pools() + _expected_pools = [] + for pool in expected_pools: + if pool not in non_rgw_pools: + # prepend zone name + _expected_pools.append('default%s' % (pool)) + + expected_pools = _expected_pools + results = [] sentries = [ self.ceph_radosgw_sentry, diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index c993d4f3..3256387e 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -1,5 +1,15 @@ +import sys + +from mock import patch, call, MagicMock + +# python-apt is not installed as part of test-requirements but is imported by +# some charmhelpers modules so create a fake import. +mock_apt = MagicMock() +mock_apt.apt_pkg = MagicMock() +sys.modules['apt'] = mock_apt +sys.modules['apt_pkg'] = mock_apt.apt_pkg + import ceph -from mock import patch, call from test_utils import CharmTestCase @@ -205,14 +215,12 @@ def test_get_named_key_get(self): ] self.subprocess.check_output.assert_called_with(cmd) - @patch('ceph.CephBrokerRq') - @patch('ceph.config') - def test_create_rgw_pools_rq_with_prefix(self, config, broker): - config.side_effect = config_side_effect + @patch.object(ceph, 'CephBrokerRq') + @patch.object(ceph, 'config') + def test_create_rgw_pools_rq_with_prefix(self, mock_config, mock_broker): + mock_config.side_effect = config_side_effect ceph.get_create_rgw_pools_rq(prefix='us-east') - broker.assert_has_calls([ - call().add_op_create_pool( - replica_count=3, name='.rgw.buckets'), + mock_broker.assert_has_calls([ call().add_op_create_pool( pg_num=10, replica_count=3, name='us-east.rgw'), call().add_op_create_pool( @@ -248,12 +256,14 @@ def test_create_rgw_pools_rq_with_prefix(self, config, broker): name='us-east.users.uid')] ) - @patch('ceph.CephBrokerRq') - @patch('ceph.config') - def test_create_rgw_pools_rq_without_prefix(self, config, broker): - config.side_effect = config_side_effect + @patch.object(mock_apt.apt_pkg, 'version_compare', lambda *args: -1) + @patch.object(ceph, 'CephBrokerRq') + @patch.object(ceph, 'config') + def test_create_rgw_pools_rq_no_prefix_pre_jewel(self, mock_config, + mock_broker): + mock_config.side_effect = config_side_effect ceph.get_create_rgw_pools_rq(prefix=None) - broker.assert_has_calls([ + mock_broker.assert_has_calls([ call().add_op_create_pool( replica_count=3, name='.rgw.buckets'), call().add_op_create_pool( @@ -285,3 +295,43 @@ def test_create_rgw_pools_rq_without_prefix(self, config, broker): call().add_op_create_pool( pg_num=10, replica_count=3, name='.users.uid')] ) + + @patch.object(mock_apt.apt_pkg, 'version_compare', lambda *args: 0) + @patch.object(ceph, 'CephBrokerRq') + @patch.object(ceph, 'config') + def test_create_rgw_pools_rq_no_prefix_post_jewel(self, mock_config, + mock_broker): + mock_config.side_effect = config_side_effect + ceph.get_create_rgw_pools_rq(prefix=None) + mock_broker.assert_has_calls([ + call().add_op_create_pool( + replica_count=3, name='default.rgw.buckets'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='default.rgw'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='default.rgw.root'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='default.rgw.control'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='default.rgw.gc'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='default.rgw.buckets'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='default.rgw.buckets.index'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='default.rgw.buckets.extra'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='default.log'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='default.intent-log'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='default.usage'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='default.users'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='default.users.email'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='default.users.swift'), + call().add_op_create_pool( + pg_num=10, replica_count=3, name='default.users.uid')] + ) diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 8e7607ec..818c0614 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -1,5 +1,3 @@ -import sys - from mock import ( call, patch, @@ -11,13 +9,6 @@ ) from charmhelpers.contrib.openstack.ip import PUBLIC -# python-apt is not installed as part of test-requirements but is imported by -# some charmhelpers modules so create a fake import. -mock_apt = MagicMock() -sys.modules['apt'] = mock_apt -mock_apt.apt_pkg = MagicMock() - - dnsmock = MagicMock() modules = { 'dns': dnsmock, From 341449e8276af7658d61a80fa2c48115db2b77bc Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 2 May 2016 07:51:42 -0500 Subject: [PATCH 1121/2699] Stop using trusty-backports haproxy with >= L For < L we use backport haproxy when prefer-ipv6=True but should not use it with >= L. Change-Id: I61d641e62945ce86d1ebe33724e1c207c2b0cfe0 Closes-Bug: 1577243 --- ceph-radosgw/hooks/ceph.py | 12 +++--------- ceph-radosgw/hooks/utils.py | 15 +++++++++++++-- ceph-radosgw/unit_tests/test_ceph.py | 27 +++++++++++++++++++++++++++ ceph-radosgw/unit_tests/test_hooks.py | 9 --------- ceph-radosgw/unit_tests/test_utils.py | 17 ++++++++++------- 5 files changed, 53 insertions(+), 27 deletions(-) diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index 9b4a22a6..0f357231 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -14,13 +14,14 @@ from socket import gethostname as get_unit_hostname +from utils import get_pkg_version + from charmhelpers.core.hookenv import ( config, ) from charmhelpers.contrib.storage.linux.ceph import ( CephBrokerRq, ) -from charmhelpers.fetch import apt_cache LEADER = 'leader' PEON = 'peon' @@ -230,13 +231,6 @@ def get_named_key(name, caps=None): return key -def get_rgw_version(): - from apt import apt_pkg - pkg = apt_cache()['radosgw'] - version = apt_pkg.upstream_version(pkg.current_ver.ver_str) - return version - - def get_create_rgw_pools_rq(prefix=None): """Pre-create RGW pools so that they have the correct settings. @@ -258,7 +252,7 @@ def get_create_rgw_pools_rq(prefix=None): # Jewel and above automatically always prefix pool names with zone when # creating them (see LP: 1573549). if prefix is None: - vc = apt_pkg.version_compare(get_rgw_version(), '10.0.0') + vc = apt_pkg.version_compare(get_pkg_version('radosgw'), '10.0.0') if vc >= 0: prefix = 'default' else: diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 81d7b7c5..aedd9eb0 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -26,7 +26,6 @@ templating, ) from charmhelpers.contrib.openstack.utils import ( - os_release, set_os_workload_status, make_assess_status_func, pause_unit, @@ -43,6 +42,7 @@ add_source, filter_installed_packages, ) +from charmhelpers.fetch import apt_cache # The interface is said to be satisfied if anyone of the interfaces in the # list has a complete context. @@ -173,9 +173,13 @@ def setup_ipv6(): raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") + from apt import apt_pkg + apt_pkg.init() + # Need haproxy >= 1.5.3 for ipv6 so for Trusty if we are <= Kilo we need to # use trusty-backports otherwise we can use the UCA. - if ubuntu_rel == 'trusty' and os_release('ceph-common') < 'liberty': + vc = apt_pkg.version_compare(get_pkg_version('haproxy'), '1.5.3') + if ubuntu_rel == 'trusty' and vc == -1: add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports ' 'main') apt_update(fatal=True) @@ -247,3 +251,10 @@ def _pause_resume_helper(f, configs): f(assess_status_func(configs), services=services(), ports=None) + + +def get_pkg_version(name): + from apt import apt_pkg + pkg = apt_cache()[name] + version = apt_pkg.upstream_version(pkg.current_ver.ver_str) + return version diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index 3256387e..7e9bdde4 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -10,6 +10,7 @@ sys.modules['apt_pkg'] = mock_apt.apt_pkg import ceph +import utils from test_utils import CharmTestCase @@ -335,3 +336,29 @@ def test_create_rgw_pools_rq_no_prefix_post_jewel(self, mock_config, call().add_op_create_pool( pg_num=10, replica_count=3, name='default.users.uid')] ) + + @patch.object(mock_apt.apt_pkg, 'version_compare', lambda *args: -1) + @patch.object(utils, 'lsb_release', + lambda: {'DISTRIB_CODENAME': 'trusty'}) + @patch.object(utils, 'add_source') + @patch.object(utils, 'apt_update') + @patch.object(utils, 'apt_install') + def test_setup_ipv6_install_backports(self, mock_add_source, + mock_apt_update, + mock_apt_install): + utils.setup_ipv6() + self.assertTrue(mock_apt_update.called) + self.assertTrue(mock_apt_install.called) + + @patch.object(mock_apt.apt_pkg, 'version_compare', lambda *args: 0) + @patch.object(utils, 'lsb_release', + lambda: {'DISTRIB_CODENAME': 'trusty'}) + @patch.object(utils, 'add_source') + @patch.object(utils, 'apt_update') + @patch.object(utils, 'apt_install') + def test_setup_ipv6_not_install_backports(self, mock_add_source, + mock_apt_update, + mock_apt_install): + utils.setup_ipv6() + self.assertFalse(mock_apt_update.called) + self.assertFalse(mock_apt_install.called) diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 818c0614..82989f54 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -1,7 +1,6 @@ from mock import ( call, patch, - MagicMock ) from test_utils import ( @@ -9,14 +8,6 @@ ) from charmhelpers.contrib.openstack.ip import PUBLIC -dnsmock = MagicMock() -modules = { - 'dns': dnsmock, - 'dns.resolver': dnsmock, -} -module_patcher = patch.dict('sys.modules', modules) -module_patcher.start() - with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: lambda *args, **kwargs: f(*args, **kwargs)) diff --git a/ceph-radosgw/unit_tests/test_utils.py b/ceph-radosgw/unit_tests/test_utils.py index 060fba97..613f888d 100644 --- a/ceph-radosgw/unit_tests/test_utils.py +++ b/ceph-radosgw/unit_tests/test_utils.py @@ -11,9 +11,10 @@ def load_config(): - '''Walk backwords from __file__ looking for config.yaml, - load and return the 'options' section' - ''' + """Walk backwards from __file__ looking for config.yaml. + + Load and return the 'options' section. + """ config = None f = __file__ while config is None: @@ -32,9 +33,10 @@ def load_config(): def get_default_config(): - '''Load default charm config from config.yaml return as a dict. + """Load default charm config from config.yaml return as a dict. + If no default is set in config.yaml, its value is None. - ''' + """ default_config = {} config = load_config() for k, v in config.iteritems(): @@ -106,10 +108,11 @@ def get(self, attr=None, unit=None, rid=None): @contextmanager def patch_open(): - '''Patch open() to allow mocking both open() itself and the file that is + """Patch open() to allow mocking both open() itself and the file that is yielded. + Yields the mock for "open" and "file", respectively. - ''' + """ mock_open = MagicMock(spec=open) mock_file = MagicMock(spec=file) From b1a85b3b75e4a07f994ce120a98362e52301c040 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 17 May 2016 10:08:18 +0100 Subject: [PATCH 1122/2699] Limit OSD object name lengths for Jewel + ext4 As of the Ceph Jewel release, certain limitations apply to OSD object name lengths: specifically if ext4 is in use for block devices or a directory based OSD is configured, OSD's must be configured to limit object name length: osd max object name len = 256 osd max object namespace len = 64 This may cause problems storing objects with long names via the ceph-radosgw charm or for direct users of RADOS. Also ensure that ceph.conf as a final newline as ceph requires this. Change-Id: I26f1d8a6f9560b307929f294d2d637c92986cf41 Closes-Bug: 1580320 Closes-Bug: 1578403 --- ceph-osd/hooks/ceph_hooks.py | 19 +++++++++++++++++++ ceph-osd/templates/ceph.conf | 5 +++++ 2 files changed, 24 insertions(+) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index be383eed..57e74b05 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -278,6 +278,24 @@ def install(): install_upstart_scripts() +def use_short_objects(): + ''' + Determine whether OSD's should be configured with + limited object name lengths. + + @return: boolean indicating whether OSD's should be limited + ''' + if cmp_pkgrevno('ceph', "10.2.0") >= 0: + if config('osd-format') in ('ext4'): + return True + for device in config('osd-devices'): + if not device.startswith('/dev'): + # TODO: determine format of directory based + # OSD location + return True + return False + + def emit_cephconf(): mon_hosts = get_mon_hosts() log('Monitor hosts are ' + repr(mon_hosts)) @@ -299,6 +317,7 @@ def emit_cephconf(): 'ceph_cluster_network': cluster_network, 'loglevel': config('loglevel'), 'dio': str(config('use-direct-io')).lower(), + 'short_object_len': use_short_objects(), } if config('prefer-ipv6'): diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 67690449..c67ab4f5 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -44,3 +44,8 @@ osd journal size = {{ osd_journal_size }} filestore xattr use omap = true journal dio = {{ dio }} +{%- if short_object_len %} +osd max object name len = 256 +osd max object namespace len = 64 +{% endif %} + From 741f42e98b2ea9a09663b736be09338a85489666 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 7 Apr 2016 08:43:19 -0400 Subject: [PATCH 1123/2699] Add support for Storage hooks This adds support for Juju's storage hooks by merging the config provided osd-devices with Juju storage provided osd-devices, in the same way that the existing Ceph charm handles them. In addition to providing support for ceph-osds via Juju storage, we provide support for multiple journal devices through Juju storage as well. We have to add a shim hook to ensure that Ceph is installed prior to storage hook invocation because storage attached at deploy time will execute hooks before the install hook Change-Id: Idad46e8f4cc32e09fbd64d29cd93745662e9f542 --- ceph-osd/config.yaml | 3 +- ceph-osd/hooks/add-storage | 7 +++++ ceph-osd/hooks/ceph_hooks.py | 30 ++++++++++++++------ ceph-osd/hooks/osd-devices-storage-attached | 1 + ceph-osd/hooks/osd-devices-storage-detaching | 1 + ceph-osd/hooks/storage.real | 1 + ceph-osd/metadata.yaml | 9 ++++++ 7 files changed, 43 insertions(+), 9 deletions(-) create mode 100755 ceph-osd/hooks/add-storage create mode 120000 ceph-osd/hooks/osd-devices-storage-attached create mode 120000 ceph-osd/hooks/osd-devices-storage-detaching create mode 120000 ceph-osd/hooks/storage.real diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index b15628ac..2506d9c7 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -10,7 +10,8 @@ options: The devices to format and set up as osd volumes. These devices are the range of devices that will be checked for and - used across all service units. + used across all service units, in addition to any volumes attached + via the --storage flag during deployment. For ceph >= 0.56.6 these can also be directories instead of devices - the charm assumes anything not starting with /dev is a directory instead. diff --git a/ceph-osd/hooks/add-storage b/ceph-osd/hooks/add-storage new file mode 100755 index 00000000..5b02c170 --- /dev/null +++ b/ceph-osd/hooks/add-storage @@ -0,0 +1,7 @@ +#!/bin/bash +# Wrapper to deal with newer Ubuntu versions that don't have py2 installed +# by default. + +dpkg -l|grep 'python-apt ' || exit 0 + +exec ./hooks/storage.real \ No newline at end of file diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index e508e0e6..6b846109 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -31,6 +31,8 @@ UnregisteredHookError, service_name, status_set, + storage_get, + storage_list, ) from charmhelpers.core.host import ( umount, @@ -379,6 +381,7 @@ def config_changed(): prepare_disks_and_activate() +@hooks.hook('storage.real') def prepare_disks_and_activate(): osd_journal = get_journal_devices() check_overlap(osd_journal, set(get_devices())) @@ -447,20 +450,31 @@ def reformat_osd(): def get_devices(): if config('osd-devices'): - return [ + devices = [ os.path.realpath(path) for path in config('osd-devices').split(' ')] else: - return [] + devices = [] + + # List storage instances for the 'osd-devices' + # store declared for this charm too, and add + # their block device paths to the list. + storage_ids = storage_list('osd-devices') + devices.extend((storage_get('location', s) for s in storage_ids)) + return devices def get_journal_devices(): - osd_journal = config('osd-journal') - if not osd_journal: - return set() - osd_journal = [l.strip() for l in config('osd-journal').split(' ')] - osd_journal = set(filter(os.path.exists, osd_journal)) - return osd_journal + if config('osd-journal'): + devices = config('osd-journal') + devices = [l.strip() for l in config('osd-journal').split(' ')] + else: + devices = [] + storage_ids = storage_list('osd-journals') + devices.extend((storage_get('location', s) for s in storage_ids)) + devices = filter(os.path.exists, devices) + + return set(devices) @hooks.hook('mon-relation-changed', diff --git a/ceph-osd/hooks/osd-devices-storage-attached b/ceph-osd/hooks/osd-devices-storage-attached new file mode 120000 index 00000000..68134a91 --- /dev/null +++ b/ceph-osd/hooks/osd-devices-storage-attached @@ -0,0 +1 @@ +add-storage \ No newline at end of file diff --git a/ceph-osd/hooks/osd-devices-storage-detaching b/ceph-osd/hooks/osd-devices-storage-detaching new file mode 120000 index 00000000..68134a91 --- /dev/null +++ b/ceph-osd/hooks/osd-devices-storage-detaching @@ -0,0 +1 @@ +add-storage \ No newline at end of file diff --git a/ceph-osd/hooks/storage.real b/ceph-osd/hooks/storage.real new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-osd/hooks/storage.real @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index dad778db..01a2aa64 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -19,3 +19,12 @@ description: | requires: mon: interface: ceph-osd +storage: + osd-devices: + type: block + multiple: + range: 0- + osd-journals: + type: block + multiple: + range: 0- \ No newline at end of file From c710859ccc3eea199888e1ff616ab4103250957a Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 16 May 2016 09:25:12 -0400 Subject: [PATCH 1124/2699] Fix Availability Zone support to not break when not set In addition to ensuring that we have AZ set, we ned to ensure that the user has asked to have the crush map customized, ensuring that uysing the availability zone features are entirely opt-in Change-Id: Ie13f50d4d084317199813d417a8de6dab25d340d Closes-Bug: 1582274 --- ceph-osd/config.yaml | 6 ++++++ ceph-osd/hooks/ceph_hooks.py | 16 ++++++++++++++++ ceph-osd/templates/ceph.conf | 4 ++++ 3 files changed, 26 insertions(+) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index b15628ac..fccc44eb 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -151,6 +151,12 @@ options: threads-max to a high value to avoid problems with large numbers (>20) of OSDs recovering. very large clusters should set those values even higher (e.g. max for kernel.pid_max is 4194303). + customize-failure-domain: + type: boolean + default: false + description: | + Setting this to true will tell Ceph to replicate across Juju's + Availability Zone instead of specifically by host. nagios_context: type: string default: "juju" diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 57e74b05..b5799632 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -278,6 +278,12 @@ def install(): install_upstart_scripts() +def az_info(): + az_info = os.environ.get('JUJU_AVAILABILITY_ZONE') + log("AZ Info: " + az_info) + return az_info + + def use_short_objects(): ''' Determine whether OSD's should be configured with @@ -330,6 +336,16 @@ def emit_cephconf(): cephcontext['public_addr'] = get_public_addr() cephcontext['cluster_addr'] = get_cluster_addr() + if config('customize-failure-domain'): + if az_info(): + cephcontext['crush_location'] = "root=default rack={} host={}" \ + .format(az_info(), socket.gethostname()) + else: + log( + "Your Juju environment doesn't" + "have support for Availability Zones" + ) + # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index c67ab4f5..adbb565e 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -29,6 +29,10 @@ public addr = {{ public_addr }} cluster addr = {{ cluster_addr }} {%- endif %} +{% if crush_location %} +osd crush location = {{crush_location}} +{% endif %} + [client.osd-upgrade] keyring = /var/lib/ceph/osd/ceph.client.osd-upgrade.keyring From 9d0394b5715ed68062f2efc5b774679ce6046a4d Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 18 May 2016 14:02:36 +0100 Subject: [PATCH 1125/2699] Resync charm-helpers Avoid use of 'service --status-all' which is currently broken on trusty for upstart managed daemons; the change moves to detecting how the daemon is managed, and then using upstart status XXX or the return code of service XXX status to determine whether a process is running. Fixes for IPv6 network address detection under Ubuntu 16.04 which changes the output format of the ip commands slightly. Update the version map to include 8.1.x as a Neutron version for Mitaka. Change-Id: I8aa5c59441362100ff48be0181f4cfdb98f2e6d7 Closes-Bug: 1581171 Closes-Bug: 1581598 Closes-Bug: 1580674 --- .../hooks/charmhelpers/contrib/network/ip.py | 21 ++++++++++- .../contrib/storage/linux/ceph.py | 15 +++++++- ceph-osd/hooks/charmhelpers/core/host.py | 37 +++++++++---------- 3 files changed, 49 insertions(+), 24 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index b9c79000..6bba07b6 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -214,7 +214,16 @@ def format_ipv6_addr(address): def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): - """Return the assigned IP address for a given interface, if any.""" + """Return the assigned IP address for a given interface, if any. + + :param iface: network interface on which address(es) are expected to + be found. + :param inet_type: inet address family + :param inc_aliases: include alias interfaces in search + :param fatal: if True, raise exception if address not found + :param exc_list: list of addresses to ignore + :return: list of ip addresses + """ # Extract nic if passed /dev/ethX if '/' in iface: iface = iface.split('/')[-1] @@ -315,6 +324,14 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, We currently only support scope global IPv6 addresses i.e. non-temporary addresses. If no global IPv6 address is found, return the first one found in the ipv6 address list. + + :param iface: network interface on which ipv6 address(es) are expected to + be found. + :param inc_aliases: include alias interfaces in search + :param fatal: if True, raise exception if address not found + :param exc_list: list of addresses to ignore + :param dynamic_only: only recognise dynamic addresses + :return: list of ipv6 addresses """ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', inc_aliases=inc_aliases, fatal=fatal, @@ -336,7 +353,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, cmd = ['ip', 'addr', 'show', iface] out = subprocess.check_output(cmd).decode('UTF-8') if dynamic_only: - key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") + key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") else: key = re.compile("inet6 (.+)/[0-9]+ scope global.*") diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1b4b1de7..d008081f 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -166,12 +166,19 @@ def remove_cache_tier(self, cache_pool): """ # read-only is easy, writeback is much harder mode = get_cache_mode(self.service, cache_pool) + version = ceph_version() if mode == 'readonly': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) elif mode == 'writeback': - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) + pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', + 'cache-mode', cache_pool, 'forward'] + if version >= '10.1': + # Jewel added a mandatory flag + pool_forward_cmd.append('--yes-i-really-mean-it') + + check_call(pool_forward_cmd) # Flush the cache and wait for it to return check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) @@ -221,6 +228,10 @@ def create(self): self.name, str(self.pg_num)] try: check_call(cmd) + # Set the pool replica size + update_pool(client=self.service, + pool=self.name, + settings={'size': str(self.replicas)}) except CalledProcessError: raise @@ -604,7 +615,7 @@ def pool_exists(service, name): except CalledProcessError: return False - return name in out + return name in out.split() def get_osds(service): diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index bfea6a15..64b2df55 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -128,11 +128,8 @@ def service(action, service_name): return subprocess.call(cmd) == 0 -def systemv_services_running(): - output = subprocess.check_output( - ['service', '--status-all'], - stderr=subprocess.STDOUT).decode('UTF-8') - return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row] +_UPSTART_CONF = "/etc/init/{}.conf" +_INIT_D_CONF = "/etc/init.d/{}" def service_running(service_name): @@ -140,22 +137,22 @@ def service_running(service_name): if init_is_systemd(): return service('is-active', service_name) else: - try: - output = subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False - else: - # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running 'start/running' - if ("start/running" in output or "is running" in output or - "up and running" in output): - return True + if os.path.exists(_UPSTART_CONF.format(service_name)): + try: + output = subprocess.check_output( + ['status', service_name], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: + return False + else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running 'start/running' + if "start/running" in output: + return True + elif os.path.exists(_INIT_D_CONF.format(service_name)): # Check System V scripts init script return codes - if service_name in systemv_services_running(): - return True - return False + return service('status', service_name) + return False def service_available(service_name): From d66a3ed22e262d335f818bdb50c71e21f18195e8 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 18 May 2016 14:03:04 +0100 Subject: [PATCH 1126/2699] Resync charm-helpers Avoid use of 'service --status-all' which is currently broken on trusty for upstart managed daemons; the change moves to detecting how the daemon is managed, and then using upstart status XXX or the return code of service XXX status to determine whether a process is running. Fixes for IPv6 network address detection under Ubuntu 16.04 which changes the output format of the ip commands slightly. Update the version map to include 8.1.x as a Neutron version for Mitaka. Change-Id: I8fd67b8c4d1a44af2dff8b26d7a8ccc40dd3e409 Closes-Bug: 1581171 Closes-Bug: 1581598 Closes-Bug: 1580674 --- .../hooks/charmhelpers/contrib/network/ip.py | 21 ++++++++++- .../section-keystone-authtoken-mitaka | 12 ++++++ .../charmhelpers/contrib/openstack/utils.py | 1 + .../contrib/storage/linux/ceph.py | 11 +++++- .../contrib/storage/linux/utils.py | 10 ++--- ceph-radosgw/hooks/charmhelpers/core/host.py | 37 +++++++++---------- 6 files changed, 63 insertions(+), 29 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index b9c79000..6bba07b6 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -214,7 +214,16 @@ def format_ipv6_addr(address): def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): - """Return the assigned IP address for a given interface, if any.""" + """Return the assigned IP address for a given interface, if any. + + :param iface: network interface on which address(es) are expected to + be found. + :param inet_type: inet address family + :param inc_aliases: include alias interfaces in search + :param fatal: if True, raise exception if address not found + :param exc_list: list of addresses to ignore + :return: list of ip addresses + """ # Extract nic if passed /dev/ethX if '/' in iface: iface = iface.split('/')[-1] @@ -315,6 +324,14 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, We currently only support scope global IPv6 addresses i.e. non-temporary addresses. If no global IPv6 address is found, return the first one found in the ipv6 address list. + + :param iface: network interface on which ipv6 address(es) are expected to + be found. + :param inc_aliases: include alias interfaces in search + :param fatal: if True, raise exception if address not found + :param exc_list: list of addresses to ignore + :param dynamic_only: only recognise dynamic addresses + :return: list of ipv6 addresses """ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', inc_aliases=inc_aliases, fatal=fatal, @@ -336,7 +353,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, cmd = ['ip', 'addr', 'show', iface] out = subprocess.check_output(cmd).decode('UTF-8') if dynamic_only: - key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") + key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") else: key = re.compile("inet6 (.+)/[0-9]+ scope global.*") diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka new file mode 100644 index 00000000..dd6f3641 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka @@ -0,0 +1,12 @@ +{% if auth_host -%} +[keystone_authtoken] +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +auth_type = password +project_domain_name = default +user_domain_name = default +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +signing_dir = {{ signing_dir }} +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 61d58793..e64a106c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -149,6 +149,7 @@ 'neutron-common': OrderedDict([ ('7.0', 'liberty'), ('8.0', 'mitaka'), + ('8.1', 'mitaka'), ]), 'cinder-common': OrderedDict([ ('7.0', 'liberty'), diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index b9e9edec..d008081f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -166,12 +166,19 @@ def remove_cache_tier(self, cache_pool): """ # read-only is easy, writeback is much harder mode = get_cache_mode(self.service, cache_pool) + version = ceph_version() if mode == 'readonly': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) elif mode == 'writeback': - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) + pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', + 'cache-mode', cache_pool, 'forward'] + if version >= '10.1': + # Jewel added a mandatory flag + pool_forward_cmd.append('--yes-i-really-mean-it') + + check_call(pool_forward_cmd) # Flush the cache and wait for it to return check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) @@ -608,7 +615,7 @@ def pool_exists(service, name): except CalledProcessError: return False - return name in out + return name in out.split() def get_osds(service): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py index 1e57941a..4e35c297 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -64,8 +64,8 @@ def is_device_mounted(device): :returns: boolean: True if the path represents a mounted device, False if it doesn't. ''' - is_partition = bool(re.search(r".*[0-9]+\b", device)) - out = check_output(['mount']).decode('UTF-8') - if is_partition: - return bool(re.search(device + r"\b", out)) - return bool(re.search(device + r"[0-9]*\b", out)) + try: + out = check_output(['lsblk', '-P', device]).decode('UTF-8') + except: + return False + return bool(re.search(r'MOUNTPOINT=".+"', out)) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index bfea6a15..64b2df55 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -128,11 +128,8 @@ def service(action, service_name): return subprocess.call(cmd) == 0 -def systemv_services_running(): - output = subprocess.check_output( - ['service', '--status-all'], - stderr=subprocess.STDOUT).decode('UTF-8') - return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row] +_UPSTART_CONF = "/etc/init/{}.conf" +_INIT_D_CONF = "/etc/init.d/{}" def service_running(service_name): @@ -140,22 +137,22 @@ def service_running(service_name): if init_is_systemd(): return service('is-active', service_name) else: - try: - output = subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False - else: - # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running 'start/running' - if ("start/running" in output or "is running" in output or - "up and running" in output): - return True + if os.path.exists(_UPSTART_CONF.format(service_name)): + try: + output = subprocess.check_output( + ['status', service_name], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: + return False + else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running 'start/running' + if "start/running" in output: + return True + elif os.path.exists(_INIT_D_CONF.format(service_name)): # Check System V scripts init script return codes - if service_name in systemv_services_running(): - return True - return False + return service('status', service_name) + return False def service_available(service_name): From 425d036fd8bc935a139b6f8a38233f43f1e4e43f Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 18 May 2016 14:02:05 +0100 Subject: [PATCH 1127/2699] Resync charm-helpers Avoid use of 'service --status-all' which is currently broken on trusty for upstart managed daemons; the change moves to detecting how the daemon is managed, and then using upstart status XXX or the return code of service XXX status to determine whether a process is running. Fixes for IPv6 network address detection under Ubuntu 16.04 which changes the output format of the ip commands slightly. Update the version map to include 8.1.x as a Neutron version for Mitaka. Fixup mocking of apt during load of unit tests and ensure that tox does not use any site packages, ensuring that test execution is isolated from installed OS Change-Id: I0f8d7cb2689f5e4c94390b324850e4e9b6e10eb5 Closes-Bug: 1581171 Closes-Bug: 1581598 Closes-Bug: 1580674 --- .../hooks/charmhelpers/contrib/network/ip.py | 21 ++++++++++- .../contrib/storage/linux/utils.py | 10 ++--- ceph-proxy/hooks/charmhelpers/core/host.py | 37 +++++++++---------- ceph-proxy/tox.ini | 2 +- ceph-proxy/unit_tests/test_status.py | 7 ++++ ceph-proxy/unit_tests/test_upgrade_roll.py | 6 +++ 6 files changed, 55 insertions(+), 28 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index b9c79000..6bba07b6 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -214,7 +214,16 @@ def format_ipv6_addr(address): def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): - """Return the assigned IP address for a given interface, if any.""" + """Return the assigned IP address for a given interface, if any. + + :param iface: network interface on which address(es) are expected to + be found. + :param inet_type: inet address family + :param inc_aliases: include alias interfaces in search + :param fatal: if True, raise exception if address not found + :param exc_list: list of addresses to ignore + :return: list of ip addresses + """ # Extract nic if passed /dev/ethX if '/' in iface: iface = iface.split('/')[-1] @@ -315,6 +324,14 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, We currently only support scope global IPv6 addresses i.e. non-temporary addresses. If no global IPv6 address is found, return the first one found in the ipv6 address list. + + :param iface: network interface on which ipv6 address(es) are expected to + be found. + :param inc_aliases: include alias interfaces in search + :param fatal: if True, raise exception if address not found + :param exc_list: list of addresses to ignore + :param dynamic_only: only recognise dynamic addresses + :return: list of ipv6 addresses """ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', inc_aliases=inc_aliases, fatal=fatal, @@ -336,7 +353,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, cmd = ['ip', 'addr', 'show', iface] out = subprocess.check_output(cmd).decode('UTF-8') if dynamic_only: - key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") + key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") else: key = re.compile("inet6 (.+)/[0-9]+ scope global.*") diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index 1e57941a..4e35c297 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -64,8 +64,8 @@ def is_device_mounted(device): :returns: boolean: True if the path represents a mounted device, False if it doesn't. ''' - is_partition = bool(re.search(r".*[0-9]+\b", device)) - out = check_output(['mount']).decode('UTF-8') - if is_partition: - return bool(re.search(device + r"\b", out)) - return bool(re.search(device + r"[0-9]*\b", out)) + try: + out = check_output(['lsblk', '-P', device]).decode('UTF-8') + except: + return False + return bool(re.search(r'MOUNTPOINT=".+"', out)) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index bfea6a15..64b2df55 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -128,11 +128,8 @@ def service(action, service_name): return subprocess.call(cmd) == 0 -def systemv_services_running(): - output = subprocess.check_output( - ['service', '--status-all'], - stderr=subprocess.STDOUT).decode('UTF-8') - return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row] +_UPSTART_CONF = "/etc/init/{}.conf" +_INIT_D_CONF = "/etc/init.d/{}" def service_running(service_name): @@ -140,22 +137,22 @@ def service_running(service_name): if init_is_systemd(): return service('is-active', service_name) else: - try: - output = subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False - else: - # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running 'start/running' - if ("start/running" in output or "is running" in output or - "up and running" in output): - return True + if os.path.exists(_UPSTART_CONF.format(service_name)): + try: + output = subprocess.check_output( + ['status', service_name], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: + return False + else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running 'start/running' + if "start/running" in output: + return True + elif os.path.exists(_INIT_D_CONF.format(service_name)): # Check System V scripts init script return codes - if service_name in systemv_services_running(): - return True - return False + return service('status', service_name) + return False def service_available(service_name): diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 487dde23..9c02ada3 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -8,7 +8,7 @@ setenv = VIRTUAL_ENV={envdir} install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} -sitepackages = True +sitepackages = False [testenv:py27] basepython = python2.7 diff --git a/ceph-proxy/unit_tests/test_status.py b/ceph-proxy/unit_tests/test_status.py index 46cc0178..0900b2e5 100644 --- a/ceph-proxy/unit_tests/test_status.py +++ b/ceph-proxy/unit_tests/test_status.py @@ -1,5 +1,12 @@ import mock import test_utils +import sys + +# python-apt is not installed as part of test-requirements but is imported by +# some charmhelpers modules so create a fake import. +mock_apt = mock.MagicMock() +sys.modules['apt'] = mock_apt +mock_apt.apt_pkg = mock.MagicMock() with mock.patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: diff --git a/ceph-proxy/unit_tests/test_upgrade_roll.py b/ceph-proxy/unit_tests/test_upgrade_roll.py index dd0ae231..82e9c55a 100644 --- a/ceph-proxy/unit_tests/test_upgrade_roll.py +++ b/ceph-proxy/unit_tests/test_upgrade_roll.py @@ -8,6 +8,12 @@ import test_utils +# python-apt is not installed as part of test-requirements but is imported by +# some charmhelpers modules so create a fake import. +mock_apt = MagicMock() +sys.modules['apt'] = mock_apt +mock_apt.apt_pkg = MagicMock() + with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: lambda *args, **kwargs: f(*args, **kwargs)) From 14794487f4e3c12d57d8a3c87ae1c88623e9f656 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 18 May 2016 14:02:05 +0100 Subject: [PATCH 1128/2699] Resync charm-helpers Avoid use of 'service --status-all' which is currently broken on trusty for upstart managed daemons; the change moves to detecting how the daemon is managed, and then using upstart status XXX or the return code of service XXX status to determine whether a process is running. Fixes for IPv6 network address detection under Ubuntu 16.04 which changes the output format of the ip commands slightly. Update the version map to include 8.1.x as a Neutron version for Mitaka. Fixup mocking of apt during load of unit tests and ensure that tox does not use any site packages, ensuring that test execution is isolated from installed OS Change-Id: I0f8d7cb2689f5e4c94390b324850e4e9b6e10eb5 Closes-Bug: 1581171 Closes-Bug: 1581598 Closes-Bug: 1580674 --- .../hooks/charmhelpers/contrib/network/ip.py | 21 ++++++++++- .../contrib/storage/linux/utils.py | 10 ++--- ceph-mon/hooks/charmhelpers/core/host.py | 37 +++++++++---------- ceph-mon/tox.ini | 2 +- ceph-mon/unit_tests/test_status.py | 7 ++++ ceph-mon/unit_tests/test_upgrade_roll.py | 6 +++ 6 files changed, 55 insertions(+), 28 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index b9c79000..6bba07b6 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -214,7 +214,16 @@ def format_ipv6_addr(address): def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): - """Return the assigned IP address for a given interface, if any.""" + """Return the assigned IP address for a given interface, if any. + + :param iface: network interface on which address(es) are expected to + be found. + :param inet_type: inet address family + :param inc_aliases: include alias interfaces in search + :param fatal: if True, raise exception if address not found + :param exc_list: list of addresses to ignore + :return: list of ip addresses + """ # Extract nic if passed /dev/ethX if '/' in iface: iface = iface.split('/')[-1] @@ -315,6 +324,14 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, We currently only support scope global IPv6 addresses i.e. non-temporary addresses. If no global IPv6 address is found, return the first one found in the ipv6 address list. + + :param iface: network interface on which ipv6 address(es) are expected to + be found. + :param inc_aliases: include alias interfaces in search + :param fatal: if True, raise exception if address not found + :param exc_list: list of addresses to ignore + :param dynamic_only: only recognise dynamic addresses + :return: list of ipv6 addresses """ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', inc_aliases=inc_aliases, fatal=fatal, @@ -336,7 +353,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, cmd = ['ip', 'addr', 'show', iface] out = subprocess.check_output(cmd).decode('UTF-8') if dynamic_only: - key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") + key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") else: key = re.compile("inet6 (.+)/[0-9]+ scope global.*") diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index 1e57941a..4e35c297 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -64,8 +64,8 @@ def is_device_mounted(device): :returns: boolean: True if the path represents a mounted device, False if it doesn't. ''' - is_partition = bool(re.search(r".*[0-9]+\b", device)) - out = check_output(['mount']).decode('UTF-8') - if is_partition: - return bool(re.search(device + r"\b", out)) - return bool(re.search(device + r"[0-9]*\b", out)) + try: + out = check_output(['lsblk', '-P', device]).decode('UTF-8') + except: + return False + return bool(re.search(r'MOUNTPOINT=".+"', out)) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index bfea6a15..64b2df55 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -128,11 +128,8 @@ def service(action, service_name): return subprocess.call(cmd) == 0 -def systemv_services_running(): - output = subprocess.check_output( - ['service', '--status-all'], - stderr=subprocess.STDOUT).decode('UTF-8') - return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row] +_UPSTART_CONF = "/etc/init/{}.conf" +_INIT_D_CONF = "/etc/init.d/{}" def service_running(service_name): @@ -140,22 +137,22 @@ def service_running(service_name): if init_is_systemd(): return service('is-active', service_name) else: - try: - output = subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False - else: - # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running 'start/running' - if ("start/running" in output or "is running" in output or - "up and running" in output): - return True + if os.path.exists(_UPSTART_CONF.format(service_name)): + try: + output = subprocess.check_output( + ['status', service_name], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: + return False + else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running 'start/running' + if "start/running" in output: + return True + elif os.path.exists(_INIT_D_CONF.format(service_name)): # Check System V scripts init script return codes - if service_name in systemv_services_running(): - return True - return False + return service('status', service_name) + return False def service_available(service_name): diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 487dde23..9c02ada3 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -8,7 +8,7 @@ setenv = VIRTUAL_ENV={envdir} install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} -sitepackages = True +sitepackages = False [testenv:py27] basepython = python2.7 diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index 46cc0178..0900b2e5 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -1,5 +1,12 @@ import mock import test_utils +import sys + +# python-apt is not installed as part of test-requirements but is imported by +# some charmhelpers modules so create a fake import. +mock_apt = mock.MagicMock() +sys.modules['apt'] = mock_apt +mock_apt.apt_pkg = mock.MagicMock() with mock.patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: diff --git a/ceph-mon/unit_tests/test_upgrade_roll.py b/ceph-mon/unit_tests/test_upgrade_roll.py index dd0ae231..82e9c55a 100644 --- a/ceph-mon/unit_tests/test_upgrade_roll.py +++ b/ceph-mon/unit_tests/test_upgrade_roll.py @@ -8,6 +8,12 @@ import test_utils +# python-apt is not installed as part of test-requirements but is imported by +# some charmhelpers modules so create a fake import. +mock_apt = MagicMock() +sys.modules['apt'] = mock_apt +mock_apt.apt_pkg = MagicMock() + with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: lambda *args, **kwargs: f(*args, **kwargs)) From 445843669f04be7531dab53bcedcfc26c88e5fc5 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 17 May 2016 09:27:56 +0100 Subject: [PATCH 1129/2699] Defer radosgw key provision until OSD's detected The RADOS gateway on startup will try to initialize a number of pools in the Ceph cluster; if no OSD's are present at the point of startup, this operation is re-tried for a period of 5 minutes. As ceph-radosgw and ceph-mon are typically deployed in LXC containers, they are up and running before ceph-osd units are fully operational, resulting in ceph-radosgw units with no running radosgw process. By deferring the provision of keys to related ceph-radosgw units until ceph-osd units are detected, the chance of this race happening is greatly reduced. Also add trailing space to ceph.conf as ceph requires that configuration files end with a newline. Change-Id: I2a21f021502bde5c688bd3ac4b84ef24a8bdef68 Closes-Bug: 1577519 Closes-Bug: 1578403 --- ceph-proxy/hooks/ceph_hooks.py | 26 +++++++++++++++++++++++--- ceph-proxy/templates/ceph.conf | 1 + 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index aec816aa..8b2bf5ef 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -504,10 +504,27 @@ def osd_relation(relid=None): } relation_set(relation_id=relid, relation_settings=data) + # NOTE: radosgw key provision is gated on presence of OSD + # units so ensure that any deferred hooks are processed + notify_radosgws() else: log('mon cluster not in quorum - deferring fsid provision') +def related_osds(num_units=3): + ''' + Determine whether there are OSD units currently related + + @param num_units: The minimum number of units required + @return: boolean indicating whether the required number of + units where detected. + ''' + for r_id in relation_ids('osd'): + if len(related_units(r_id)) >= num_units: + return True + return False + + @hooks.hook('radosgw-relation-changed') @hooks.hook('radosgw-relation-joined') def radosgw_relation(relid=None, unit=None): @@ -516,8 +533,11 @@ def radosgw_relation(relid=None, unit=None): if not unit: unit = remote_unit() - if ceph.is_quorum(): - log('mon cluster in quorum - providing radosgw with keys') + # NOTE: radosgw needs some usage OSD storage, so defer key + # provision until OSD units are detected. + if ceph.is_quorum() and related_osds(): + log('mon cluster in quorum and osds related ' + '- providing radosgw with keys') public_addr = get_public_addr() data = { 'fsid': leader_get('fsid'), @@ -539,7 +559,7 @@ def radosgw_relation(relid=None, unit=None): relation_set(relation_id=relid, relation_settings=data) else: - log('mon cluster not in quorum - deferring key provision') + log('mon cluster not in quorum or no osds - deferring key provision') @hooks.hook('client-relation-joined') diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index 631381bc..f64db7cb 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -36,3 +36,4 @@ keyring = /var/lib/ceph/mon/$cluster-$id/keyring [mds] keyring = /var/lib/ceph/mds/$cluster-$id/keyring + From bd40f61418fe0a55d28532b17ce091a5aa5ecfa2 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 17 May 2016 09:27:56 +0100 Subject: [PATCH 1130/2699] Defer radosgw key provision until OSD's detected The RADOS gateway on startup will try to initialize a number of pools in the Ceph cluster; if no OSD's are present at the point of startup, this operation is re-tried for a period of 5 minutes. As ceph-radosgw and ceph-mon are typically deployed in LXC containers, they are up and running before ceph-osd units are fully operational, resulting in ceph-radosgw units with no running radosgw process. By deferring the provision of keys to related ceph-radosgw units until ceph-osd units are detected, the chance of this race happening is greatly reduced. Also add trailing space to ceph.conf as ceph requires that configuration files end with a newline. Change-Id: I2a21f021502bde5c688bd3ac4b84ef24a8bdef68 Closes-Bug: 1577519 Closes-Bug: 1578403 --- ceph-mon/hooks/ceph_hooks.py | 26 +++++++++++++++++++++++--- ceph-mon/templates/ceph.conf | 1 + 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index aec816aa..8b2bf5ef 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -504,10 +504,27 @@ def osd_relation(relid=None): } relation_set(relation_id=relid, relation_settings=data) + # NOTE: radosgw key provision is gated on presence of OSD + # units so ensure that any deferred hooks are processed + notify_radosgws() else: log('mon cluster not in quorum - deferring fsid provision') +def related_osds(num_units=3): + ''' + Determine whether there are OSD units currently related + + @param num_units: The minimum number of units required + @return: boolean indicating whether the required number of + units where detected. + ''' + for r_id in relation_ids('osd'): + if len(related_units(r_id)) >= num_units: + return True + return False + + @hooks.hook('radosgw-relation-changed') @hooks.hook('radosgw-relation-joined') def radosgw_relation(relid=None, unit=None): @@ -516,8 +533,11 @@ def radosgw_relation(relid=None, unit=None): if not unit: unit = remote_unit() - if ceph.is_quorum(): - log('mon cluster in quorum - providing radosgw with keys') + # NOTE: radosgw needs some usage OSD storage, so defer key + # provision until OSD units are detected. + if ceph.is_quorum() and related_osds(): + log('mon cluster in quorum and osds related ' + '- providing radosgw with keys') public_addr = get_public_addr() data = { 'fsid': leader_get('fsid'), @@ -539,7 +559,7 @@ def radosgw_relation(relid=None, unit=None): relation_set(relation_id=relid, relation_settings=data) else: - log('mon cluster not in quorum - deferring key provision') + log('mon cluster not in quorum or no osds - deferring key provision') @hooks.hook('client-relation-joined') diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 631381bc..f64db7cb 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -36,3 +36,4 @@ keyring = /var/lib/ceph/mon/$cluster-$id/keyring [mds] keyring = /var/lib/ceph/mds/$cluster-$id/keyring + From 76d96add1cc854a0ce1b759287e9281dffd9a419 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 9 May 2016 12:41:21 +0100 Subject: [PATCH 1131/2699] Guard against invalid cert returned from keystone Sometimes keystone returns invalid string data when doing a GET for SSL certs during setup phases so we now check returned data and ignore if invalid. Also perform some code cleanup around cert management and add unit tests. Closes-Bug: 1542074 Change-Id: I6c28f540d902296cfd83a99f4e052ba8b2771e9e --- ceph-radosgw/hooks/hooks.py | 120 +--------- ceph-radosgw/hooks/utils.py | 224 +++++++++++++++++- .../unit_tests/test_ceph_radosgw_utils.py | 110 ++++++++- ceph-radosgw/unit_tests/test_hooks.py | 1 - ceph-radosgw/unit_tests/test_utils.py | 25 +- 5 files changed, 327 insertions(+), 153 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 2becbc3a..48013a39 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -34,16 +34,10 @@ apt_purge, add_source, ) -from charmhelpers.core.host import ( - lsb_release, -) +from charmhelpers.core.host import lsb_release from charmhelpers.payload.execd import execd_preinstall -from charmhelpers.core.host import ( - cmp_pkgrevno, - mkdir, -) +from charmhelpers.core.host import cmp_pkgrevno from charmhelpers.contrib.network.ip import ( - format_ipv6_addr, get_ipv6_addr, get_iface_for_address, get_netmask_for_address, @@ -68,6 +62,7 @@ setup_ipv6, services, assess_status, + setup_keystone_certs, ) from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden @@ -93,14 +88,14 @@ def install_ceph_optimised_packages(): PACKAGES = [ - 'radosgw', - 'ntp', 'haproxy', 'libnss3-tools', + 'ntp', 'python-keystoneclient', 'python-six', # Ensures correct version is installed for precise # since python-keystoneclient does not pull in icehouse # version + 'radosgw', ] APACHE_PACKAGES = [ @@ -137,111 +132,6 @@ def install(): os.makedirs('/etc/ceph') -def setup_keystone_certs(unit=None, rid=None): - """ - Get CA and signing certs from Keystone used to decrypt revoked token list. - """ - import requests - - try: - # Kilo and newer - from keystoneclient.exceptions import ( - ConnectionRefused, - Forbidden - ) - except ImportError: - # Juno and older - from keystoneclient.exceptions import ( - ConnectionError as ConnectionRefused, - Forbidden - ) - - from keystoneclient.v2_0 import client - - certs_path = '/var/lib/ceph/nss' - if not os.path.exists(certs_path): - mkdir(certs_path) - - rdata = relation_get(unit=unit, rid=rid) - auth_protocol = rdata.get('auth_protocol', 'http') - - required_keys = ['admin_token', 'auth_host', 'auth_port'] - settings = {} - for key in required_keys: - settings[key] = rdata.get(key) - - if is_ipv6(settings.get('auth_host')): - settings['auth_host'] = format_ipv6_addr(settings.get('auth_host')) - - if not all(settings.values()): - log("Missing relation settings (%s) - skipping cert setup" % - (', '.join([k for k in settings.keys() if not settings[k]])), - level=DEBUG) - return - - auth_endpoint = "%s://%s:%s/v2.0" % (auth_protocol, settings['auth_host'], - settings['auth_port']) - keystone = client.Client(token=settings['admin_token'], - endpoint=auth_endpoint) - - # CA - try: - try: - # Kilo and newer - ca_cert = keystone.certificates.get_ca_certificate() - except AttributeError: - # Juno and older - ca_cert = requests.request('GET', auth_endpoint + - '/certificates/ca').text - except (ConnectionRefused, requests.exceptions.ConnectionError, Forbidden): - log("Error connecting to keystone - skipping ca/signing cert setup", - level=WARNING) - return - - if ca_cert: - log("Updating ca cert from keystone", level=DEBUG) - ca = os.path.join(certs_path, 'ca.pem') - with open(ca, 'w') as fd: - fd.write(ca_cert) - - out = subprocess.check_output(['openssl', 'x509', '-in', ca, - '-pubkey']) - p = subprocess.Popen(['certutil', '-d', certs_path, '-A', '-n', 'ca', - '-t', 'TCu,Cu,Tuw'], stdin=subprocess.PIPE) - p.communicate(out) - else: - log("No ca cert available from keystone", level=DEBUG) - - # Signing cert - try: - try: - # Kilo and newer - signing_cert = keystone.certificates.get_signing_certificate() - except AttributeError: - # Juno and older - signing_cert = requests.request('GET', auth_endpoint + - '/certificates/signing').text - except (ConnectionRefused, requests.exceptions.ConnectionError): - log("Error connecting to keystone - skipping ca/signing cert setup", - level=WARNING) - return - - if signing_cert: - log("Updating signing cert from keystone", level=DEBUG) - signing_cert_path = os.path.join(certs_path, 'signing_certificate.pem') - with open(signing_cert_path, 'w') as fd: - fd.write(signing_cert) - - out = subprocess.check_output(['openssl', 'x509', '-in', - signing_cert_path, '-pubkey']) - p = subprocess.Popen(['certutil', '-A', '-d', certs_path, '-n', - 'signing_cert', '-t', 'P,P,P'], - stdin=subprocess.PIPE) - p.communicate(out) - else: - log("No signing cert available from keystone", level=DEBUG) - - @hooks.hook('upgrade-charm', 'config-changed') @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw'], diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index aedd9eb0..33966ca5 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -9,40 +9,78 @@ import os import re -import jinja2 +import subprocess +import sys -from copy import deepcopy from collections import OrderedDict +from copy import deepcopy +import jinja2 import ceph_radosgw_context from charmhelpers.core.hookenv import ( + config, + log, + DEBUG, + INFO, + relation_get, relation_ids, status_get, - config, +) +from charmhelpers.contrib.network.ip import ( + format_ipv6_addr, + is_ipv6, ) from charmhelpers.contrib.openstack import ( context, templating, ) from charmhelpers.contrib.openstack.utils import ( - set_os_workload_status, make_assess_status_func, pause_unit, resume_unit, + set_os_workload_status, ) from charmhelpers.contrib.hahelpers.cluster import get_hacluster_config from charmhelpers.core.host import ( cmp_pkgrevno, lsb_release, + mkdir, ) from charmhelpers.fetch import ( + apt_cache, apt_install, apt_update, add_source, filter_installed_packages, ) -from charmhelpers.fetch import apt_cache + +# NOTE: some packages are installed by the charm so may not be available +# yet. Calls that depend on them should be aware of this (and use the +# defer_if_unavailable() decorator). +try: + import keystoneclient + from keystoneclient.v2_0 import client + try: + # Kilo and newer + from keystoneclient.exceptions import ( + ConnectionRefused, + Forbidden, + ) + except ImportError: + # Juno and older + from keystoneclient.exceptions import ( + ConnectionError as ConnectionRefused, + Forbidden, + ) +except ImportError: + keystoneclient = None + +# This is installed as a dep of python-keystoneclient +try: + import requests +except ImportError: + requests = None # The interface is said to be satisfied if anyone of the interfaces in the # list has a complete context. @@ -83,11 +121,19 @@ ]) +class KSCertSetupException(BaseException): + """Keystone SSL Certificate Setup Exception. + + This exception should be raised if any part of cert setup fails. + """ + pass + + def resource_map(): - ''' - Dynamically generate a map of resources that will be managed for a single - hook execution. - ''' + """Dynamically generate a map of resources. + + These will be managed for a single hook execution. + """ if not config('use-embedded-webserver'): if os.path.exists('/etc/apache2/conf-available'): BASE_RESOURCE_MAP.pop(APACHE_CONF) @@ -128,7 +174,7 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): def services(): - ''' Returns a list of services associate with this charm ''' + """Returns a list of services associate with this charm.""" _services = [] for v in BASE_RESOURCE_MAP.values(): _services.extend(v.get('services', [])) @@ -187,7 +233,8 @@ def setup_ipv6(): def assess_status(configs): - """Assess status of current unit + """Assess status of current unit. + Decides what the state of the unit should be based on the current configuration. SIDE EFFECT: calls set_os_workload_status(...) which sets the workload @@ -258,3 +305,158 @@ def get_pkg_version(name): pkg = apt_cache()[name] version = apt_pkg.upstream_version(pkg.current_ver.ver_str) return version + + +def defer_if_unavailable(modules): + """If a function depends on a package/module that is installed by the charm + but may not yet have been installed, it can be deferred using this + decorator. + + :param modules: list of modules that must be importable. + """ + def _inner1_defer_if_unavailable(f): + def _inner2_defer_if_unavailable(*args, **kwargs): + for m in modules: + if m not in sys.modules: + log("Module '{}' does not appear to be available " + "yet - deferring call to '{}' until it " + "is.".format(m, f.__name__), level=INFO) + return + + return f(*args, **kwargs) + + return _inner2_defer_if_unavailable + + return _inner1_defer_if_unavailable + + +@defer_if_unavailable(['keystoneclient']) +def get_ks_cert(ksclient, auth_endpoint, cert_type): + """Get certificate from keystone. + + :param admin_token: Keystone admin token + :param auth_endpoint: Keystone auth endpoint url + :param certs_path: Path to local certs store + :returns: certificate + """ + try: + try: + # Kilo and newer + if cert_type == 'ca': + cert = ksclient.certificates.get_ca_certificate() + elif cert_type == 'signing': + cert = ksclient.certificates.get_signing_certificate() + else: + raise KSCertSetupException("Invalid cert type " + "'{}'".format(cert_type)) + except AttributeError: + # Juno and older + cert = requests.request('GET', "{}/certificates/{}". + format(auth_endpoint, cert_type)).text + except (ConnectionRefused, requests.exceptions.ConnectionError, Forbidden): + raise KSCertSetupException("Error connecting to keystone") + + return cert + + +@defer_if_unavailable(['keystoneclient']) +def get_ks_ca_cert(admin_token, auth_endpoint, certs_path): + """"Get and store keystone CA certificate. + + :param admin_token: Keystone admin token + :param auth_endpoint: Keystone auth endpoint url + :param certs_path: Path to local certs store + :returns: None + """ + ksclient = client.Client(token=admin_token, endpoint=auth_endpoint) + ca_cert = get_ks_cert(ksclient, auth_endpoint, 'ca') + if ca_cert: + try: + # Cert should not contain unicode chars. + str(ca_cert) + except UnicodeEncodeError: + raise KSCertSetupException("Did not get a valid ca cert from " + "keystone - cert setup incomplete") + + log("Updating ca cert from keystone", level=DEBUG) + ca = os.path.join(certs_path, 'ca.pem') + with open(ca, 'w') as fd: + fd.write(ca_cert) + + out = subprocess.check_output(['openssl', 'x509', '-in', ca, + '-pubkey']) + p = subprocess.Popen(['certutil', '-d', certs_path, '-A', '-n', 'ca', + '-t', 'TCu,Cu,Tuw'], stdin=subprocess.PIPE) + p.communicate(out) + else: + raise KSCertSetupException("No ca cert available from keystone") + + +@defer_if_unavailable(['keystoneclient']) +def get_ks_signing_cert(admin_token, auth_endpoint, certs_path): + """"Get and store keystone signing certificate. + + :param admin_token: Keystone admin token + :param auth_endpoint: Keystone auth endpoint url + :param certs_path: Path to local certs store + :returns: None + """ + ksclient = client.Client(token=admin_token, endpoint=auth_endpoint) + signing_cert = get_ks_cert(ksclient, auth_endpoint, 'signing') + if signing_cert: + try: + # Cert should not contain unicode chars. + str(signing_cert) + except UnicodeEncodeError: + raise KSCertSetupException("Invalid signing cert from keystone") + + log("Updating signing cert from keystone", level=DEBUG) + signing_cert_path = os.path.join(certs_path, 'signing_certificate.pem') + with open(signing_cert_path, 'w') as fd: + fd.write(signing_cert) + + out = subprocess.check_output(['openssl', 'x509', '-in', + signing_cert_path, '-pubkey']) + p = subprocess.Popen(['certutil', '-A', '-d', certs_path, '-n', + 'signing_cert', '-t', 'P,P,P'], + stdin=subprocess.PIPE) + p.communicate(out) + else: + raise KSCertSetupException("No signing cert available from keystone") + + +@defer_if_unavailable(['keystoneclient']) +def setup_keystone_certs(unit=None, rid=None): + """ + Get CA and signing certs from Keystone used to decrypt revoked token list. + + :param unit: context unit id + :param rid: context relation id + :returns: None + """ + certs_path = '/var/lib/ceph/nss' + if not os.path.exists(certs_path): + mkdir(certs_path) + + rdata = relation_get(unit=unit, rid=rid) + required = ['admin_token', 'auth_host', 'auth_port'] + settings = {key: rdata.get(key) for key in required} + if not all(settings.values()): + log("Missing relation settings ({}) - deferring cert setup".format( + ', '.join([k for k in settings if not settings[k]])), + level=DEBUG) + return + + auth_protocol = rdata.get('auth_protocol', 'http') + if is_ipv6(settings.get('auth_host')): + settings['auth_host'] = format_ipv6_addr(settings.get('auth_host')) + + auth_endpoint = "{}://{}:{}/v2.0".format(auth_protocol, + settings['auth_host'], + settings['auth_port']) + + try: + get_ks_ca_cert(settings['admin_token'], auth_endpoint, certs_path) + get_ks_signing_cert(settings['admin_token'], auth_endpoint, certs_path) + except KSCertSetupException as e: + log("Keystone certs setup incomplete - {}".format(e), level=INFO) diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index a9fae89d..2dae2970 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -1,5 +1,13 @@ +import sys + +from mock import ( + call, + patch, + mock_open, + MagicMock, +) + import utils -from mock import patch, MagicMock from test_utils import CharmTestCase @@ -9,7 +17,7 @@ class CephRadosGWUtilTests(CharmTestCase): def setUp(self): - super(CephRadosGWUtilTests, self).setUp(utils, TO_PATCH) + super(CephRadosGWUtilTests, self).setUp(None, TO_PATCH) def test_assess_status(self): with patch.object(utils, 'assess_status_func') as asf: @@ -54,3 +62,101 @@ def test_pause_resume_helper(self, services): asf.assert_called_once_with('some-config') # ports=None whilst port checks are disabled. f.assert_called_once_with('assessor', services='s1', ports=None) + + @patch.dict('sys.modules', {'requests': MagicMock(), + 'keystoneclient': MagicMock()}) + @patch.object(utils, 'is_ipv6', lambda addr: False) + @patch.object(utils, 'get_ks_signing_cert') + @patch.object(utils, 'get_ks_ca_cert') + @patch.object(utils, 'relation_get') + @patch.object(utils, 'mkdir') + def test_setup_keystone_certs(self, mock_mkdir, mock_relation_get, + mock_get_ks_ca_cert, + mock_get_ks_signing_cert): + auth_host = 'foo/bar' + auth_port = 80 + admin_token = '666' + auth_url = 'http://%s:%s/v2.0' % (auth_host, auth_port) + mock_relation_get.return_value = {'auth_host': auth_host, + 'auth_port': auth_port, + 'admin_token': admin_token} + utils.setup_keystone_certs() + mock_get_ks_signing_cert.assert_has_calls([call(admin_token, auth_url, + '/var/lib/ceph/nss')]) + mock_get_ks_ca_cert.assert_has_calls([call(admin_token, auth_url, + '/var/lib/ceph/nss')]) + + def test_get_ks_signing_cert(self): + auth_host = 'foo/bar' + auth_port = 80 + admin_token = '666' + auth_url = 'http://%s:%s/v2.0' % (auth_host, auth_port) + + mock_ksclient = MagicMock + m = mock_open() + with patch.dict('sys.modules', + {'requests': MagicMock(), + 'keystoneclient': mock_ksclient, + 'keystoneclient.exceptions': MagicMock(), + 'keystoneclient.exceptions.ConnectionRefused': + MagicMock(), + 'keystoneclient.exceptions.Forbidden': MagicMock(), + 'keystoneclient.v2_0': MagicMock(), + 'keystoneclient.v2_0.client': MagicMock()}): + # Reimport + del sys.modules['utils'] + import utils + with patch.object(utils, 'subprocess') as mock_subprocess: + with patch.object(utils, 'open', m, create=True): + mock_certificates = MagicMock() + mock_ksclient.certificates = mock_certificates + mock_certificates.get_signing_certificate.return_value = \ + 'signing_cert_data' + utils.get_ks_signing_cert(admin_token, auth_url, + '/foo/bar') + mock_certificates.get_signing_certificate.return_value = \ + None + self.assertRaises(utils.KSCertSetupException, + utils.get_ks_signing_cert, admin_token, + auth_url, '/foo/bar') + + c = ['openssl', 'x509', '-in', + '/foo/bar/signing_certificate.pem', + '-pubkey'] + mock_subprocess.check_output.assert_called_with(c) + + def test_get_ks_ca_cert(self): + auth_host = 'foo/bar' + auth_port = 80 + admin_token = '666' + auth_url = 'http://%s:%s/v2.0' % (auth_host, auth_port) + + mock_ksclient = MagicMock + m = mock_open() + with patch.dict('sys.modules', + {'requests': MagicMock(), + 'keystoneclient': mock_ksclient, + 'keystoneclient.exceptions': MagicMock(), + 'keystoneclient.exceptions.ConnectionRefused': + MagicMock(), + 'keystoneclient.exceptions.Forbidden': MagicMock(), + 'keystoneclient.v2_0': MagicMock(), + 'keystoneclient.v2_0.client': MagicMock()}): + # Reimport + del sys.modules['utils'] + import utils + with patch.object(utils, 'subprocess') as mock_subprocess: + with patch.object(utils, 'open', m, create=True): + mock_certificates = MagicMock() + mock_ksclient.certificates = mock_certificates + mock_certificates.get_ca_certificate.return_value = \ + 'ca_cert_data' + utils.get_ks_ca_cert(admin_token, auth_url, '/foo/bar') + mock_certificates.get_ca_certificate.return_value = None + self.assertRaises(utils.KSCertSetupException, + utils.get_ks_ca_cert, admin_token, + auth_url, '/foo/bar') + + c = ['openssl', 'x509', '-in', '/foo/bar/ca.pem', + '-pubkey'] + mock_subprocess.check_output.assert_called_with(c) diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 82989f54..b698a72d 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -107,7 +107,6 @@ def test_install(self): self.os.makedirs.called_with('/var/lib/ceph/nss') @patch.object(ceph_hooks, 'update_nrpe_config') - @patch.object(ceph_hooks, 'mkdir', lambda *args: None) def test_config_changed(self, update_nrpe_config): _install_packages = self.patch('install_packages') ceph_hooks.config_changed() diff --git a/ceph-radosgw/unit_tests/test_utils.py b/ceph-radosgw/unit_tests/test_utils.py index 613f888d..9c3cb38a 100644 --- a/ceph-radosgw/unit_tests/test_utils.py +++ b/ceph-radosgw/unit_tests/test_utils.py @@ -3,11 +3,7 @@ import unittest import yaml -from contextlib import contextmanager -from mock import ( - patch, - MagicMock, -) +from mock import patch def load_config(): @@ -104,22 +100,3 @@ def get(self, attr=None, unit=None, rid=None): elif attr in self.relation_data: return self.relation_data[attr] return None - - -@contextmanager -def patch_open(): - """Patch open() to allow mocking both open() itself and the file that is - yielded. - - Yields the mock for "open" and "file", respectively. - """ - mock_open = MagicMock(spec=open) - mock_file = MagicMock(spec=file) - - @contextmanager - def stub_open(*args, **kwargs): - mock_open(*args, **kwargs) - yield mock_file - - with patch('__builtin__.open', stub_open): - yield mock_open, mock_file From dbe9e5cb94dc7b3aa9920d9b9e4099bcdb9e6f8e Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 27 May 2016 11:09:35 +0100 Subject: [PATCH 1132/2699] Resync charm helpers Add support for OpenStack Newton and Ocata. Rework version detection code to just match on major version for OpenStack projects using semantic versioning. Provide fallback version detection based on major.minor versions for swift packages. Rework config-flags support helpers. Fix is_ip function to correctly detect both IPv4 and IPv6 addresses. Change-Id: I3546fbeb847a6d8793b4bb75048d7a68d1236d0b --- .../hooks/charmhelpers/contrib/network/ip.py | 6 +- .../charmhelpers/contrib/openstack/context.py | 88 +------- .../contrib/openstack/exceptions.py | 6 + .../charmhelpers/contrib/openstack/utils.py | 206 ++++++++++++++++-- .../contrib/storage/linux/ceph.py | 41 ++++ .../hooks/charmhelpers/fetch/__init__.py | 8 + 6 files changed, 247 insertions(+), 108 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/exceptions.py diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index 6bba07b6..99d78f2f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -405,10 +405,10 @@ def is_ip(address): Returns True if address is a valid IP address. """ try: - # Test to see if already an IPv4 address - socket.inet_aton(address) + # Test to see if already an IPv4/IPv6 address + address = netaddr.IPAddress(address) return True - except socket.error: + except netaddr.AddrFormatError: return False diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index c07b33dd..5faa7eda 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -23,7 +23,6 @@ from subprocess import check_call, CalledProcessError import six -import yaml from charmhelpers.fetch import ( apt_install, @@ -50,6 +49,7 @@ from charmhelpers.core.sysctl import create as sysctl_create from charmhelpers.core.strutils import bool_from_string +from charmhelpers.contrib.openstack.exceptions import OSContextError from charmhelpers.core.host import ( get_bond_master, @@ -88,7 +88,10 @@ is_address_in_network, is_bridge_member, ) -from charmhelpers.contrib.openstack.utils import get_host_ip +from charmhelpers.contrib.openstack.utils import ( + config_flags_parser, + get_host_ip, +) from charmhelpers.core.unitdata import kv try: @@ -101,10 +104,6 @@ ADDRESS_TYPES = ['admin', 'internal', 'public'] -class OSContextError(Exception): - pass - - def ensure_packages(packages): """Install but do not upgrade required plugin packages.""" required = filter_installed_packages(packages) @@ -125,83 +124,6 @@ def context_complete(ctxt): return True -def config_flags_parser(config_flags): - """Parses config flags string into dict. - - This parsing method supports a few different formats for the config - flag values to be parsed: - - 1. A string in the simple format of key=value pairs, with the possibility - of specifying multiple key value pairs within the same string. For - example, a string in the format of 'key1=value1, key2=value2' will - return a dict of: - - {'key1': 'value1', - 'key2': 'value2'}. - - 2. A string in the above format, but supporting a comma-delimited list - of values for the same key. For example, a string in the format of - 'key1=value1, key2=value3,value4,value5' will return a dict of: - - {'key1', 'value1', - 'key2', 'value2,value3,value4'} - - 3. A string containing a colon character (:) prior to an equal - character (=) will be treated as yaml and parsed as such. This can be - used to specify more complex key value pairs. For example, - a string in the format of 'key1: subkey1=value1, subkey2=value2' will - return a dict of: - - {'key1', 'subkey1=value1, subkey2=value2'} - - The provided config_flags string may be a list of comma-separated values - which themselves may be comma-separated list of values. - """ - # If we find a colon before an equals sign then treat it as yaml. - # Note: limit it to finding the colon first since this indicates assignment - # for inline yaml. - colon = config_flags.find(':') - equals = config_flags.find('=') - if colon > 0: - if colon < equals or equals < 0: - return yaml.safe_load(config_flags) - - if config_flags.find('==') >= 0: - log("config_flags is not in expected format (key=value)", level=ERROR) - raise OSContextError - - # strip the following from each value. - post_strippers = ' ,' - # we strip any leading/trailing '=' or ' ' from the string then - # split on '='. - split = config_flags.strip(' =').split('=') - limit = len(split) - flags = {} - for i in range(0, limit - 1): - current = split[i] - next = split[i + 1] - vindex = next.rfind(',') - if (i == limit - 2) or (vindex < 0): - value = next - else: - value = next[:vindex] - - if i == 0: - key = current - else: - # if this not the first entry, expect an embedded key. - index = current.rfind(',') - if index < 0: - log("Invalid config value(s) at index %s" % (i), level=ERROR) - raise OSContextError - key = current[index + 1:] - - # Add to collection. - flags[key.strip(post_strippers)] = value.rstrip(post_strippers) - - return flags - - class OSContextGenerator(object): """Base class for all context generators.""" interfaces = [] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/exceptions.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/exceptions.py new file mode 100644 index 00000000..ea4eb68e --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/exceptions.py @@ -0,0 +1,6 @@ +class OSContextError(Exception): + """Raised when an error occurs during context generation. + + This exception is principally used in contrib.openstack.context + """ + pass diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index e64a106c..bd6efc48 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -25,6 +25,7 @@ import re import itertools import functools +import shutil import six import tempfile @@ -46,6 +47,7 @@ charm_dir, DEBUG, INFO, + ERROR, related_units, relation_ids, relation_set, @@ -82,6 +84,7 @@ from charmhelpers.fetch import apt_install, apt_cache, install_remote from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device +from charmhelpers.contrib.openstack.exceptions import OSContextError CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' @@ -100,6 +103,8 @@ ('vivid', 'kilo'), ('wily', 'liberty'), ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zebra', 'ocata'), # TODO: upload with real Z name ]) @@ -114,6 +119,8 @@ ('2015.1', 'kilo'), ('2015.2', 'liberty'), ('2016.1', 'mitaka'), + ('2016.2', 'newton'), + ('2017.1', 'ocata'), ]) # The ugly duckling - must list releases oldest to newest @@ -138,47 +145,65 @@ ['2.3.0', '2.4.0', '2.5.0']), ('mitaka', ['2.5.0', '2.6.0', '2.7.0']), + ('newton', + ['2.8.0']), ]) # >= Liberty version->codename mapping PACKAGE_CODENAMES = { 'nova-common': OrderedDict([ - ('12.0', 'liberty'), - ('13.0', 'mitaka'), + ('12', 'liberty'), + ('13', 'mitaka'), + ('14', 'newton'), + ('15', 'ocata'), ]), 'neutron-common': OrderedDict([ - ('7.0', 'liberty'), - ('8.0', 'mitaka'), - ('8.1', 'mitaka'), + ('7', 'liberty'), + ('8', 'mitaka'), + ('9', 'newton'), + ('10', 'ocata'), ]), 'cinder-common': OrderedDict([ - ('7.0', 'liberty'), - ('8.0', 'mitaka'), + ('7', 'liberty'), + ('8', 'mitaka'), + ('9', 'newton'), + ('10', 'ocata'), ]), 'keystone': OrderedDict([ - ('8.0', 'liberty'), - ('8.1', 'liberty'), - ('9.0', 'mitaka'), + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), ]), 'horizon-common': OrderedDict([ - ('8.0', 'liberty'), - ('9.0', 'mitaka'), + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), ]), 'ceilometer-common': OrderedDict([ - ('5.0', 'liberty'), - ('6.0', 'mitaka'), + ('5', 'liberty'), + ('6', 'mitaka'), + ('7', 'newton'), + ('8', 'ocata'), ]), 'heat-common': OrderedDict([ - ('5.0', 'liberty'), - ('6.0', 'mitaka'), + ('5', 'liberty'), + ('6', 'mitaka'), + ('7', 'newton'), + ('8', 'ocata'), ]), 'glance-common': OrderedDict([ - ('11.0', 'liberty'), - ('12.0', 'mitaka'), + ('11', 'liberty'), + ('12', 'mitaka'), + ('13', 'newton'), + ('14', 'ocata'), ]), 'openstack-dashboard': OrderedDict([ - ('8.0', 'liberty'), - ('9.0', 'mitaka'), + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), ]), } @@ -254,6 +279,7 @@ def get_os_version_codename_swift(codename): def get_swift_codename(version): '''Determine OpenStack codename that corresponds to swift version.''' codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] + if len(codenames) > 1: # If more than one release codename contains this version we determine # the actual codename based on the highest available install source. @@ -265,6 +291,16 @@ def get_swift_codename(version): return codename elif len(codenames) == 1: return codenames[0] + + # NOTE: fallback - attempt to match with just major.minor version + match = re.match('^(\d+)\.(\d+)', version) + if match: + major_minor_version = match.group(0) + for codename, versions in six.iteritems(SWIFT_CODENAMES): + for release_version in versions: + if release_version.startswith(major_minor_version): + return codename + return None @@ -303,10 +339,13 @@ def get_os_codename_package(package, fatal=True): if match: vers = match.group(0) + # Generate a major version number for newer semantic + # versions of openstack projects + major_vers = vers.split('.')[0] # >= Liberty independent project versions if (package in PACKAGE_CODENAMES and - vers in PACKAGE_CODENAMES[package]): - return PACKAGE_CODENAMES[package][vers] + major_vers in PACKAGE_CODENAMES[package]): + return PACKAGE_CODENAMES[package][major_vers] else: # < Liberty co-ordinated project versions try: @@ -466,6 +505,9 @@ def configure_installation_source(rel): 'mitaka': 'trusty-updates/mitaka', 'mitaka/updates': 'trusty-updates/mitaka', 'mitaka/proposed': 'trusty-proposed/mitaka', + 'newton': 'xenial-updates/newton', + 'newton/updates': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', } try: @@ -858,6 +900,47 @@ def git_yaml_value(projects_yaml, key): return None +def git_generate_systemd_init_files(templates_dir): + """ + Generate systemd init files. + + Generates and installs systemd init units and script files based on the + *.init.in files contained in the templates_dir directory. + + This code is based on the openstack-pkg-tools package and its init + script generation, which is used by the OpenStack packages. + """ + for f in os.listdir(templates_dir): + if f.endswith(".init.in"): + init_in_file = f + init_file = f[:-8] + service_file = "{}.service".format(init_file) + + init_in_source = os.path.join(templates_dir, init_in_file) + init_source = os.path.join(templates_dir, init_file) + service_source = os.path.join(templates_dir, service_file) + + init_dest = os.path.join('/etc/init.d', init_file) + service_dest = os.path.join('/lib/systemd/system', service_file) + + shutil.copyfile(init_in_source, init_source) + with open(init_source, 'a') as outfile: + template = '/usr/share/openstack-pkg-tools/init-script-template' + with open(template) as infile: + outfile.write('\n\n{}'.format(infile.read())) + + cmd = ['pkgos-gen-systemd-unit', init_in_source] + subprocess.check_call(cmd) + + if os.path.exists(init_dest): + os.remove(init_dest) + if os.path.exists(service_dest): + os.remove(service_dest) + shutil.move(init_source, init_dest) + shutil.move(service_source, service_dest) + os.chmod(init_dest, 0o755) + + def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts @@ -1574,3 +1657,82 @@ def wrapped_f(*args, **kwargs): restart_functions) return wrapped_f return wrap + + +def config_flags_parser(config_flags): + """Parses config flags string into dict. + + This parsing method supports a few different formats for the config + flag values to be parsed: + + 1. A string in the simple format of key=value pairs, with the possibility + of specifying multiple key value pairs within the same string. For + example, a string in the format of 'key1=value1, key2=value2' will + return a dict of: + + {'key1': 'value1', + 'key2': 'value2'}. + + 2. A string in the above format, but supporting a comma-delimited list + of values for the same key. For example, a string in the format of + 'key1=value1, key2=value3,value4,value5' will return a dict of: + + {'key1', 'value1', + 'key2', 'value2,value3,value4'} + + 3. A string containing a colon character (:) prior to an equal + character (=) will be treated as yaml and parsed as such. This can be + used to specify more complex key value pairs. For example, + a string in the format of 'key1: subkey1=value1, subkey2=value2' will + return a dict of: + + {'key1', 'subkey1=value1, subkey2=value2'} + + The provided config_flags string may be a list of comma-separated values + which themselves may be comma-separated list of values. + """ + # If we find a colon before an equals sign then treat it as yaml. + # Note: limit it to finding the colon first since this indicates assignment + # for inline yaml. + colon = config_flags.find(':') + equals = config_flags.find('=') + if colon > 0: + if colon < equals or equals < 0: + return yaml.safe_load(config_flags) + + if config_flags.find('==') >= 0: + juju_log("config_flags is not in expected format (key=value)", + level=ERROR) + raise OSContextError + + # strip the following from each value. + post_strippers = ' ,' + # we strip any leading/trailing '=' or ' ' from the string then + # split on '='. + split = config_flags.strip(' =').split('=') + limit = len(split) + flags = {} + for i in range(0, limit - 1): + current = split[i] + next = split[i + 1] + vindex = next.rfind(',') + if (i == limit - 2) or (vindex < 0): + value = next + else: + value = next[:vindex] + + if i == 0: + key = current + else: + # if this not the first entry, expect an embedded key. + index = current.rfind(',') + if index < 0: + juju_log("Invalid config value(s) at index %s" % (i), + level=ERROR) + raise OSContextError + key = current[index + 1:] + + # Add to collection. + flags[key.strip(post_strippers)] = value.rstrip(post_strippers) + + return flags diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index d008081f..2528f5cf 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -40,6 +40,7 @@ CalledProcessError, ) from charmhelpers.core.hookenv import ( + config, local_unit, relation_get, relation_ids, @@ -64,6 +65,7 @@ ) from charmhelpers.core.kernel import modprobe +from charmhelpers.contrib.openstack.utils import config_flags_parser KEYRING = '/etc/ceph/ceph.client.{}.keyring' KEYFILE = '/etc/ceph/ceph.client.{}.key' @@ -1204,3 +1206,42 @@ def send_request_if_needed(request, relation='ceph'): for rid in relation_ids(relation): log('Sending request {}'.format(request.request_id), level=DEBUG) relation_set(relation_id=rid, broker_req=request.request) + + +class CephConfContext(object): + """Ceph config (ceph.conf) context. + + Supports user-provided Ceph configuration settings. Use can provide a + dictionary as the value for the config-flags charm option containing + Ceph configuration settings keyede by their section in ceph.conf. + """ + def __init__(self, permitted_sections=None): + self.permitted_sections = permitted_sections or [] + + def __call__(self): + conf = config('config-flags') + if not conf: + return {} + + conf = config_flags_parser(conf) + if type(conf) != dict: + log("Provided config-flags is not a dictionary - ignoring", + level=WARNING) + return {} + + permitted = self.permitted_sections + if permitted: + diff = set(conf.keys()).symmetric_difference(set(permitted)) + if diff: + log("Config-flags contains invalid keys '%s' - they will be " + "ignored" % (', '.join(diff)), level=WARNING) + + ceph_conf = {} + for key in conf: + if permitted and key not in permitted: + log("Ignoring key '%s'" % key, level=WARNING) + continue + + ceph_conf[key] = conf[key] + + return ceph_conf diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index db0d86a2..ad485ec8 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -106,6 +106,14 @@ 'mitaka/proposed': 'trusty-proposed/mitaka', 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', + # Newton + 'newton': 'xenial-updates/newton', + 'xenial-newton': 'xenial-updates/newton', + 'xenial-newton/updates': 'xenial-updates/newton', + 'xenial-updates/newton': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', + 'xenial-newton/proposed': 'xenial-proposed/newton', + 'xenial-proposed/newton': 'xenial-proposed/newton', } # The order of this list is very important. Handlers should be listed in from From 1c24b8cedef25e6b1f38eccf92a01eb91a57e161 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 26 May 2016 14:48:16 +0100 Subject: [PATCH 1133/2699] Add support for user-provided ceph config Adds a new config-flags option to the charm that supports setting a dictionary of ceph configuration settings that will be applied to ceph.conf. This implementation supports config sections so that settings can be applied to any section supported by the ceph.conf template in the charm. Change-Id: I306fd138820746c565f8c7cd83d3ffcc388b9735 Closes-Bug: 1522375 --- ceph-osd/charm-helpers-hooks.yaml | 9 +- ceph-osd/config.yaml | 19 + ceph-osd/hooks/ceph_hooks.py | 13 +- .../hooks/charmhelpers/contrib/network/ip.py | 6 +- .../contrib/openstack/exceptions.py | 6 + .../charmhelpers/contrib/openstack/utils.py | 1738 +++++++++++++++++ .../charmhelpers/contrib/python/__init__.py | 15 + .../charmhelpers/contrib/python/packages.py | 145 ++ .../contrib/storage/linux/ceph.py | 41 + .../contrib/storage/linux/loopback.py | 88 + .../charmhelpers/contrib/storage/linux/lvm.py | 105 + ceph-osd/hooks/charmhelpers/fetch/__init__.py | 8 + ceph-osd/templates/ceph.conf | 33 +- ceph-osd/unit_tests/test_ceph_hooks.py | 118 ++ ceph-osd/unit_tests/test_upgrade_roll.py | 1 + 15 files changed, 2325 insertions(+), 20 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/exceptions.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/python/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/python/packages.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py create mode 100644 ceph-osd/unit_tests/test_ceph_hooks.py diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index b727a82a..24c4f22a 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -4,10 +4,13 @@ include: - core - cli - fetch - - contrib.storage.linux: - - ceph - - utils + - contrib.python.packages + - contrib.storage.linux - contrib.openstack.alternatives - contrib.network.ip + - contrib.openstack: + - alternatives + - exceptions + - utils - contrib.charmsupport - contrib.hardening|inc=* diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index b658bb40..6253bcf2 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -3,6 +3,25 @@ options: default: 1 type: int description: OSD debug level. Max is 20. + config-flags: + type: string + default: + description: | + User provided Ceph configuration. Supports a string representation of + a python dictionary where each top-level key represents a section in + the ceph.conf template. You may only use sections supported in the + template. + . + WARNING: this is not the recommended way to configure the underlying + services that this charm installs and is used at the user's own risk. + This option is mainly provided as a stop-gap for users that either + want to test the effect of modifying some config or who have found + a critical bug in the way the charm has configured their services + and need it fixed immediately. We ask that whenever this is used, + that the user consider opening a bug on this charm at + http://bugs.launchpad.net/charms providing an explanation of why the + config was needed so that we may consider it for inclusion as a + natively supported config in the the charm. osd-devices: type: string default: /dev/vdb diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index f38108ec..4ab41bc0 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -66,6 +66,7 @@ format_ipv6_addr, ) from charmhelpers.contrib.storage.linux.ceph import ( + CephConfContext, monitor_key_set, monitor_key_exists, monitor_key_get) @@ -304,7 +305,7 @@ def use_short_objects(): return False -def emit_cephconf(): +def get_ceph_context(): mon_hosts = get_mon_hosts() log('Monitor hosts are ' + repr(mon_hosts)) @@ -348,13 +349,21 @@ def emit_cephconf(): "have support for Availability Zones" ) + # NOTE(dosaboy): these sections must correspond to what is supported in the + # config template. + sections = ['global', 'osd'] + cephcontext.update(CephConfContext(permitted_sections=sections)()) + return cephcontext + + +def emit_cephconf(): # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(), group=ceph.ceph_user()) with open(charm_ceph_conf, 'w') as cephconf: - cephconf.write(render_template('ceph.conf', cephcontext)) + cephconf.write(render_template('ceph.conf', get_ceph_context())) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 90) diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 6bba07b6..99d78f2f 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -405,10 +405,10 @@ def is_ip(address): Returns True if address is a valid IP address. """ try: - # Test to see if already an IPv4 address - socket.inet_aton(address) + # Test to see if already an IPv4/IPv6 address + address = netaddr.IPAddress(address) return True - except socket.error: + except netaddr.AddrFormatError: return False diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/exceptions.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/exceptions.py new file mode 100644 index 00000000..ea4eb68e --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/exceptions.py @@ -0,0 +1,6 @@ +class OSContextError(Exception): + """Raised when an error occurs during context generation. + + This exception is principally used in contrib.openstack.context + """ + pass diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py new file mode 100644 index 00000000..bd6efc48 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -0,0 +1,1738 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# Common python helper functions used for OpenStack charms. +from collections import OrderedDict +from functools import wraps + +import subprocess +import json +import os +import sys +import re +import itertools +import functools +import shutil + +import six +import tempfile +import traceback +import uuid +import yaml + +from charmhelpers.contrib.network import ip + +from charmhelpers.core import ( + unitdata, +) + +from charmhelpers.core.hookenv import ( + action_fail, + action_set, + config, + log as juju_log, + charm_dir, + DEBUG, + INFO, + ERROR, + related_units, + relation_ids, + relation_set, + status_set, + hook_name +) + +from charmhelpers.contrib.storage.linux.lvm import ( + deactivate_lvm_volume_group, + is_lvm_physical_volume, + remove_lvm_physical_volume, +) + +from charmhelpers.contrib.network.ip import ( + get_ipv6_addr, + is_ipv6, + port_has_listener, +) + +from charmhelpers.contrib.python.packages import ( + pip_create_virtualenv, + pip_install, +) + +from charmhelpers.core.host import ( + lsb_release, + mounts, + umount, + service_running, + service_pause, + service_resume, + restart_on_change_helper, +) +from charmhelpers.fetch import apt_install, apt_cache, install_remote +from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk +from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device +from charmhelpers.contrib.openstack.exceptions import OSContextError + +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' + +DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' + 'restricted main multiverse universe') + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ('wily', 'liberty'), + ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zebra', 'ocata'), # TODO: upload with real Z name +]) + + +OPENSTACK_CODENAMES = OrderedDict([ + ('2011.2', 'diablo'), + ('2012.1', 'essex'), + ('2012.2', 'folsom'), + ('2013.1', 'grizzly'), + ('2013.2', 'havana'), + ('2014.1', 'icehouse'), + ('2014.2', 'juno'), + ('2015.1', 'kilo'), + ('2015.2', 'liberty'), + ('2016.1', 'mitaka'), + ('2016.2', 'newton'), + ('2017.1', 'ocata'), +]) + +# The ugly duckling - must list releases oldest to newest +SWIFT_CODENAMES = OrderedDict([ + ('diablo', + ['1.4.3']), + ('essex', + ['1.4.8']), + ('folsom', + ['1.7.4']), + ('grizzly', + ['1.7.6', '1.7.7', '1.8.0']), + ('havana', + ['1.9.0', '1.9.1', '1.10.0']), + ('icehouse', + ['1.11.0', '1.12.0', '1.13.0', '1.13.1']), + ('juno', + ['2.0.0', '2.1.0', '2.2.0']), + ('kilo', + ['2.2.1', '2.2.2']), + ('liberty', + ['2.3.0', '2.4.0', '2.5.0']), + ('mitaka', + ['2.5.0', '2.6.0', '2.7.0']), + ('newton', + ['2.8.0']), +]) + +# >= Liberty version->codename mapping +PACKAGE_CODENAMES = { + 'nova-common': OrderedDict([ + ('12', 'liberty'), + ('13', 'mitaka'), + ('14', 'newton'), + ('15', 'ocata'), + ]), + 'neutron-common': OrderedDict([ + ('7', 'liberty'), + ('8', 'mitaka'), + ('9', 'newton'), + ('10', 'ocata'), + ]), + 'cinder-common': OrderedDict([ + ('7', 'liberty'), + ('8', 'mitaka'), + ('9', 'newton'), + ('10', 'ocata'), + ]), + 'keystone': OrderedDict([ + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), + ]), + 'horizon-common': OrderedDict([ + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), + ]), + 'ceilometer-common': OrderedDict([ + ('5', 'liberty'), + ('6', 'mitaka'), + ('7', 'newton'), + ('8', 'ocata'), + ]), + 'heat-common': OrderedDict([ + ('5', 'liberty'), + ('6', 'mitaka'), + ('7', 'newton'), + ('8', 'ocata'), + ]), + 'glance-common': OrderedDict([ + ('11', 'liberty'), + ('12', 'mitaka'), + ('13', 'newton'), + ('14', 'ocata'), + ]), + 'openstack-dashboard': OrderedDict([ + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), + ]), +} + +DEFAULT_LOOPBACK_SIZE = '5G' + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + + +def get_os_codename_install_source(src): + '''Derive OpenStack release codename from a given installation source.''' + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = '' + if src is None: + return rel + if src in ['distro', 'distro-proposed']: + try: + rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] + except KeyError: + e = 'Could not derive openstack release for '\ + 'this Ubuntu release: %s' % ubuntu_rel + error_out(e) + return rel + + if src.startswith('cloud:'): + ca_rel = src.split(':')[1] + ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + return ca_rel + + # Best guess match based on deb string provided + if src.startswith('deb') or src.startswith('ppa'): + for k, v in six.iteritems(OPENSTACK_CODENAMES): + if v in src: + return v + + +def get_os_version_install_source(src): + codename = get_os_codename_install_source(src) + return get_os_version_codename(codename) + + +def get_os_codename_version(vers): + '''Determine OpenStack codename from version number.''' + try: + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): + '''Determine OpenStack version number from codename.''' + for k, v in six.iteritems(version_map): + if v == codename: + return k + e = 'Could not derive OpenStack version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_os_version_codename_swift(codename): + '''Determine OpenStack version number of swift from codename.''' + for k, v in six.iteritems(SWIFT_CODENAMES): + if k == codename: + return v[-1] + e = 'Could not derive swift version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_swift_codename(version): + '''Determine OpenStack codename that corresponds to swift version.''' + codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] + + if len(codenames) > 1: + # If more than one release codename contains this version we determine + # the actual codename based on the highest available install source. + for codename in reversed(codenames): + releases = UBUNTU_OPENSTACK_RELEASE + release = [k for k, v in six.iteritems(releases) if codename in v] + ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) + if codename in ret or release[0] in ret: + return codename + elif len(codenames) == 1: + return codenames[0] + + # NOTE: fallback - attempt to match with just major.minor version + match = re.match('^(\d+)\.(\d+)', version) + if match: + major_minor_version = match.group(0) + for codename, versions in six.iteritems(SWIFT_CODENAMES): + for release_version in versions: + if release_version.startswith(major_minor_version): + return codename + + return None + + +def get_os_codename_package(package, fatal=True): + '''Derive OpenStack release codename from an installed package.''' + import apt_pkg as apt + + cache = apt_cache() + + try: + pkg = cache[package] + except: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + if 'swift' in pkg.name: + # Fully x.y.z match for swift versions + match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) + else: + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match('^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + + # Generate a major version number for newer semantic + # versions of openstack projects + major_vers = vers.split('.')[0] + # >= Liberty independent project versions + if (package in PACKAGE_CODENAMES and + major_vers in PACKAGE_CODENAMES[package]): + return PACKAGE_CODENAMES[package][major_vers] + else: + # < Liberty co-ordinated project versions + try: + if 'swift' in pkg.name: + return get_swift_codename(vers) + else: + return OPENSTACK_CODENAMES[vers] + except KeyError: + if not fatal: + return None + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_package(pkg, fatal=True): + '''Derive OpenStack version number from an installed package.''' + codename = get_os_codename_package(pkg, fatal=fatal) + + if not codename: + return None + + if 'swift' in pkg: + vers_map = SWIFT_CODENAMES + for cname, version in six.iteritems(vers_map): + if cname == codename: + return version[-1] + else: + vers_map = OPENSTACK_CODENAMES + for version, cname in six.iteritems(vers_map): + if cname == codename: + return version + # e = "Could not determine OpenStack version for package: %s" % pkg + # error_out(e) + + +os_rel = None + + +def os_release(package, base='essex'): + ''' + Returns OpenStack release codename from a cached global. + If the codename can not be determined from either an installed package or + the installation source, the earliest release supported by the charm should + be returned. + ''' + global os_rel + if os_rel: + return os_rel + os_rel = (get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config('openstack-origin')) or + base) + return os_rel + + +def import_key(keyid): + key = keyid.strip() + if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and + key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): + juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG) + juju_log("Importing ASCII Armor PGP key", level=DEBUG) + with tempfile.NamedTemporaryFile() as keyfile: + with open(keyfile.name, 'w') as fd: + fd.write(key) + fd.write("\n") + + cmd = ['apt-key', 'add', keyfile.name] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error_out("Error importing PGP key '%s'" % key) + else: + juju_log("PGP key found (looks like Radix64 format)", level=DEBUG) + juju_log("Importing PGP key from keyserver", level=DEBUG) + cmd = ['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error_out("Error importing PGP key '%s'" % key) + + +def get_source_and_pgp_key(input): + """Look for a pgp key ID or ascii-armor key in the given input.""" + index = input.strip() + index = input.rfind('|') + if index < 0: + return input, None + + key = input[index + 1:].strip('|') + source = input[:index] + return source, key + + +def configure_installation_source(rel): + '''Configure apt installation source.''' + if rel == 'distro': + return + elif rel == 'distro-proposed': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(DISTRO_PROPOSED % ubuntu_rel) + elif rel[:4] == "ppa:": + src, key = get_source_and_pgp_key(rel) + if key: + import_key(key) + + subprocess.check_call(["add-apt-repository", "-y", src]) + elif rel[:3] == "deb": + src, key = get_source_and_pgp_key(rel) + if key: + import_key(key) + + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(src) + elif rel[:6] == 'cloud:': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = rel.split(':')[1] + u_rel = rel.split('-')[0] + ca_rel = rel.split('-')[1] + + if u_rel != ubuntu_rel: + e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ + 'version (%s)' % (ca_rel, ubuntu_rel) + error_out(e) + + if 'staging' in ca_rel: + # staging is just a regular PPA. + os_rel = ca_rel.split('/')[0] + ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel + cmd = 'add-apt-repository -y %s' % ppa + subprocess.check_call(cmd.split(' ')) + return + + # map charm config options to actual archive pockets. + pockets = { + 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'icehouse': 'precise-updates/icehouse', + 'icehouse/updates': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'juno': 'trusty-updates/juno', + 'juno/updates': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'kilo': 'trusty-updates/kilo', + 'kilo/updates': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'liberty': 'trusty-updates/liberty', + 'liberty/updates': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'mitaka': 'trusty-updates/mitaka', + 'mitaka/updates': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + 'newton': 'xenial-updates/newton', + 'newton/updates': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', + } + + try: + pocket = pockets[ca_rel] + except KeyError: + e = 'Invalid Cloud Archive release specified: %s' % rel + error_out(e) + + src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) + apt_install('ubuntu-cloud-keyring', fatal=True) + + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: + f.write(src) + else: + error_out("Invalid openstack-release specified: %s" % rel) + + +def config_value_changed(option): + """ + Determine if config value changed since last call to this function. + """ + hook_data = unitdata.HookData() + with hook_data(): + db = unitdata.kv() + current = config(option) + saved = db.get(option) + db.set(option, current) + if saved is None: + return False + return current != saved + + +def save_script_rc(script_path="scripts/scriptrc", **env_vars): + """ + Write an rc file in the charm-delivered directory containing + exported environment variables provided by env_vars. Any charm scripts run + outside the juju hook environment can source this scriptrc to obtain + updated config information necessary to perform health checks or + service changes. + """ + juju_rc_path = "%s/%s" % (charm_dir(), script_path) + if not os.path.exists(os.path.dirname(juju_rc_path)): + os.mkdir(os.path.dirname(juju_rc_path)) + with open(juju_rc_path, 'wb') as rc_script: + rc_script.write( + "#!/bin/bash\n") + [rc_script.write('export %s=%s\n' % (u, p)) + for u, p in six.iteritems(env_vars) if u != "script_path"] + + +def openstack_upgrade_available(package): + """ + Determines if an OpenStack upgrade is available from installation + source, based on version of installed package. + + :param package: str: Name of installed package. + + :returns: bool: : Returns True if configured installation source offers + a newer version of package. + + """ + + import apt_pkg as apt + src = config('openstack-origin') + cur_vers = get_os_version_package(package) + if "swift" in package: + codename = get_os_codename_install_source(src) + avail_vers = get_os_version_codename_swift(codename) + else: + avail_vers = get_os_version_install_source(src) + apt.init() + if "swift" in package: + major_cur_vers = cur_vers.split('.', 1)[0] + major_avail_vers = avail_vers.split('.', 1)[0] + major_diff = apt.version_compare(major_avail_vers, major_cur_vers) + return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0) + return apt.version_compare(avail_vers, cur_vers) == 1 + + +def ensure_block_device(block_device): + ''' + Confirm block_device, create as loopback if necessary. + + :param block_device: str: Full path of block device to ensure. + + :returns: str: Full path of ensured block device. + ''' + _none = ['None', 'none', None] + if (block_device in _none): + error_out('prepare_storage(): Missing required input: block_device=%s.' + % block_device) + + if block_device.startswith('/dev/'): + bdev = block_device + elif block_device.startswith('/'): + _bd = block_device.split('|') + if len(_bd) == 2: + bdev, size = _bd + else: + bdev = block_device + size = DEFAULT_LOOPBACK_SIZE + bdev = ensure_loopback_device(bdev, size) + else: + bdev = '/dev/%s' % block_device + + if not is_block_device(bdev): + error_out('Failed to locate valid block device at %s' % bdev) + + return bdev + + +def clean_storage(block_device): + ''' + Ensures a block device is clean. That is: + - unmounted + - any lvm volume groups are deactivated + - any lvm physical device signatures removed + - partition table wiped + + :param block_device: str: Full path to block device to clean. + ''' + for mp, d in mounts(): + if d == block_device: + juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % + (d, mp), level=INFO) + umount(mp, persist=True) + + if is_lvm_physical_volume(block_device): + deactivate_lvm_volume_group(block_device) + remove_lvm_physical_volume(block_device) + else: + zap_disk(block_device) + +is_ip = ip.is_ip +ns_query = ip.ns_query +get_host_ip = ip.get_host_ip +get_hostname = ip.get_hostname + + +def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): + mm_map = {} + if os.path.isfile(mm_file): + with open(mm_file, 'r') as f: + mm_map = json.load(f) + return mm_map + + +def sync_db_with_multi_ipv6_addresses(database, database_user, + relation_prefix=None): + hosts = get_ipv6_addr(dynamic_only=False) + + if config('vip'): + vips = config('vip').split() + for vip in vips: + if vip and is_ipv6(vip): + hosts.append(vip) + + kwargs = {'database': database, + 'username': database_user, + 'hostname': json.dumps(hosts)} + + if relation_prefix: + for key in list(kwargs.keys()): + kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] + del kwargs[key] + + for rid in relation_ids('shared-db'): + relation_set(relation_id=rid, **kwargs) + + +def os_requires_version(ostack_release, pkg): + """ + Decorator for hook to specify minimum supported release + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args): + if os_release(pkg) < ostack_release: + raise Exception("This hook is not supported on releases" + " before %s" % ostack_release) + f(*args) + return wrapped_f + return wrap + + +def git_install_requested(): + """ + Returns true if openstack-origin-git is specified. + """ + return config('openstack-origin-git') is not None + + +requirements_dir = None + + +def _git_yaml_load(projects_yaml): + """ + Load the specified yaml into a dictionary. + """ + if not projects_yaml: + return None + + return yaml.load(projects_yaml) + + +def git_clone_and_install(projects_yaml, core_project): + """ + Clone/install all specified OpenStack repositories. + + The expected format of projects_yaml is: + + repositories: + - {name: keystone, + repository: 'git://git.openstack.org/openstack/keystone.git', + branch: 'stable/icehouse'} + - {name: requirements, + repository: 'git://git.openstack.org/openstack/requirements.git', + branch: 'stable/icehouse'} + + directory: /mnt/openstack-git + http_proxy: squid-proxy-url + https_proxy: squid-proxy-url + + The directory, http_proxy, and https_proxy keys are optional. + + """ + global requirements_dir + parent_dir = '/mnt/openstack-git' + http_proxy = None + + projects = _git_yaml_load(projects_yaml) + _git_validate_projects_yaml(projects, core_project) + + old_environ = dict(os.environ) + + if 'http_proxy' in projects.keys(): + http_proxy = projects['http_proxy'] + os.environ['http_proxy'] = projects['http_proxy'] + if 'https_proxy' in projects.keys(): + os.environ['https_proxy'] = projects['https_proxy'] + + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + + pip_create_virtualenv(os.path.join(parent_dir, 'venv')) + + # Upgrade setuptools and pip from default virtualenv versions. The default + # versions in trusty break master OpenStack branch deployments. + for p in ['pip', 'setuptools']: + pip_install(p, upgrade=True, proxy=http_proxy, + venv=os.path.join(parent_dir, 'venv')) + + for p in projects['repositories']: + repo = p['repository'] + branch = p['branch'] + depth = '1' + if 'depth' in p.keys(): + depth = p['depth'] + if p['name'] == 'requirements': + repo_dir = _git_clone_and_install_single(repo, branch, depth, + parent_dir, http_proxy, + update_requirements=False) + requirements_dir = repo_dir + else: + repo_dir = _git_clone_and_install_single(repo, branch, depth, + parent_dir, http_proxy, + update_requirements=True) + + os.environ = old_environ + + +def _git_validate_projects_yaml(projects, core_project): + """ + Validate the projects yaml. + """ + _git_ensure_key_exists('repositories', projects) + + for project in projects['repositories']: + _git_ensure_key_exists('name', project.keys()) + _git_ensure_key_exists('repository', project.keys()) + _git_ensure_key_exists('branch', project.keys()) + + if projects['repositories'][0]['name'] != 'requirements': + error_out('{} git repo must be specified first'.format('requirements')) + + if projects['repositories'][-1]['name'] != core_project: + error_out('{} git repo must be specified last'.format(core_project)) + + +def _git_ensure_key_exists(key, keys): + """ + Ensure that key exists in keys. + """ + if key not in keys: + error_out('openstack-origin-git key \'{}\' is missing'.format(key)) + + +def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, + update_requirements): + """ + Clone and install a single git repository. + """ + if not os.path.exists(parent_dir): + juju_log('Directory already exists at {}. ' + 'No need to create directory.'.format(parent_dir)) + os.mkdir(parent_dir) + + juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) + repo_dir = install_remote( + repo, dest=parent_dir, branch=branch, depth=depth) + + venv = os.path.join(parent_dir, 'venv') + + if update_requirements: + if not requirements_dir: + error_out('requirements repo must be cloned before ' + 'updating from global requirements.') + _git_update_requirements(venv, repo_dir, requirements_dir) + + juju_log('Installing git repo from dir: {}'.format(repo_dir)) + if http_proxy: + pip_install(repo_dir, proxy=http_proxy, venv=venv) + else: + pip_install(repo_dir, venv=venv) + + return repo_dir + + +def _git_update_requirements(venv, package_dir, reqs_dir): + """ + Update from global requirements. + + Update an OpenStack git directory's requirements.txt and + test-requirements.txt from global-requirements.txt. + """ + orig_dir = os.getcwd() + os.chdir(reqs_dir) + python = os.path.join(venv, 'bin/python') + cmd = [python, 'update.py', package_dir] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + package = os.path.basename(package_dir) + error_out("Error updating {} from " + "global-requirements.txt".format(package)) + os.chdir(orig_dir) + + +def git_pip_venv_dir(projects_yaml): + """ + Return the pip virtualenv path. + """ + parent_dir = '/mnt/openstack-git' + + projects = _git_yaml_load(projects_yaml) + + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + + return os.path.join(parent_dir, 'venv') + + +def git_src_dir(projects_yaml, project): + """ + Return the directory where the specified project's source is located. + """ + parent_dir = '/mnt/openstack-git' + + projects = _git_yaml_load(projects_yaml) + + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + + for p in projects['repositories']: + if p['name'] == project: + return os.path.join(parent_dir, os.path.basename(p['repository'])) + + return None + + +def git_yaml_value(projects_yaml, key): + """ + Return the value in projects_yaml for the specified key. + """ + projects = _git_yaml_load(projects_yaml) + + if key in projects.keys(): + return projects[key] + + return None + + +def git_generate_systemd_init_files(templates_dir): + """ + Generate systemd init files. + + Generates and installs systemd init units and script files based on the + *.init.in files contained in the templates_dir directory. + + This code is based on the openstack-pkg-tools package and its init + script generation, which is used by the OpenStack packages. + """ + for f in os.listdir(templates_dir): + if f.endswith(".init.in"): + init_in_file = f + init_file = f[:-8] + service_file = "{}.service".format(init_file) + + init_in_source = os.path.join(templates_dir, init_in_file) + init_source = os.path.join(templates_dir, init_file) + service_source = os.path.join(templates_dir, service_file) + + init_dest = os.path.join('/etc/init.d', init_file) + service_dest = os.path.join('/lib/systemd/system', service_file) + + shutil.copyfile(init_in_source, init_source) + with open(init_source, 'a') as outfile: + template = '/usr/share/openstack-pkg-tools/init-script-template' + with open(template) as infile: + outfile.write('\n\n{}'.format(infile.read())) + + cmd = ['pkgos-gen-systemd-unit', init_in_source] + subprocess.check_call(cmd) + + if os.path.exists(init_dest): + os.remove(init_dest) + if os.path.exists(service_dest): + os.remove(service_dest) + shutil.move(init_source, init_dest) + shutil.move(service_source, service_dest) + os.chmod(init_dest, 0o755) + + +def os_workload_status(configs, required_interfaces, charm_func=None): + """ + Decorator to set workload status based on complete contexts + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args, **kwargs): + # Run the original function first + f(*args, **kwargs) + # Set workload status now that contexts have been + # acted on + set_os_workload_status(configs, required_interfaces, charm_func) + return wrapped_f + return wrap + + +def set_os_workload_status(configs, required_interfaces, charm_func=None, + services=None, ports=None): + """Set the state of the workload status for the charm. + + This calls _determine_os_workload_status() to get the new state, message + and sets the status using status_set() + + @param configs: a templating.OSConfigRenderer() object + @param required_interfaces: {generic: [specific, specific2, ...]} + @param charm_func: a callable function that returns state, message. The + signature is charm_func(configs) -> (state, message) + @param services: list of strings OR dictionary specifying services/ports + @param ports: OPTIONAL list of port numbers. + @returns state, message: the new workload status, user message + """ + state, message = _determine_os_workload_status( + configs, required_interfaces, charm_func, services, ports) + status_set(state, message) + + +def _determine_os_workload_status( + configs, required_interfaces, charm_func=None, + services=None, ports=None): + """Determine the state of the workload status for the charm. + + This function returns the new workload status for the charm based + on the state of the interfaces, the paused state and whether the + services are actually running and any specified ports are open. + + This checks: + + 1. if the unit should be paused, that it is actually paused. If so the + state is 'maintenance' + message, else 'broken'. + 2. that the interfaces/relations are complete. If they are not then + it sets the state to either 'broken' or 'waiting' and an appropriate + message. + 3. If all the relation data is set, then it checks that the actual + services really are running. If not it sets the state to 'broken'. + + If everything is okay then the state returns 'active'. + + @param configs: a templating.OSConfigRenderer() object + @param required_interfaces: {generic: [specific, specific2, ...]} + @param charm_func: a callable function that returns state, message. The + signature is charm_func(configs) -> (state, message) + @param services: list of strings OR dictionary specifying services/ports + @param ports: OPTIONAL list of port numbers. + @returns state, message: the new workload status, user message + """ + state, message = _ows_check_if_paused(services, ports) + + if state is None: + state, message = _ows_check_generic_interfaces( + configs, required_interfaces) + + if state != 'maintenance' and charm_func: + # _ows_check_charm_func() may modify the state, message + state, message = _ows_check_charm_func( + state, message, lambda: charm_func(configs)) + + if state is None: + state, message = _ows_check_services_running(services, ports) + + if state is None: + state = 'active' + message = "Unit is ready" + juju_log(message, 'INFO') + + return state, message + + +def _ows_check_if_paused(services=None, ports=None): + """Check if the unit is supposed to be paused, and if so check that the + services/ports (if passed) are actually stopped/not being listened to. + + if the unit isn't supposed to be paused, just return None, None + + @param services: OPTIONAL services spec or list of service names. + @param ports: OPTIONAL list of port numbers. + @returns state, message or None, None + """ + if is_unit_paused_set(): + state, message = check_actually_paused(services=services, + ports=ports) + if state is None: + # we're paused okay, so set maintenance and return + state = "maintenance" + message = "Paused. Use 'resume' action to resume normal service." + return state, message + return None, None + + +def _ows_check_generic_interfaces(configs, required_interfaces): + """Check the complete contexts to determine the workload status. + + - Checks for missing or incomplete contexts + - juju log details of missing required data. + - determines the correct workload status + - creates an appropriate message for status_set(...) + + if there are no problems then the function returns None, None + + @param configs: a templating.OSConfigRenderer() object + @params required_interfaces: {generic_interface: [specific_interface], } + @returns state, message or None, None + """ + incomplete_rel_data = incomplete_relation_data(configs, + required_interfaces) + state = None + message = None + missing_relations = set() + incomplete_relations = set() + + for generic_interface, relations_states in incomplete_rel_data.items(): + related_interface = None + missing_data = {} + # Related or not? + for interface, relation_state in relations_states.items(): + if relation_state.get('related'): + related_interface = interface + missing_data = relation_state.get('missing_data') + break + # No relation ID for the generic_interface? + if not related_interface: + juju_log("{} relation is missing and must be related for " + "functionality. ".format(generic_interface), 'WARN') + state = 'blocked' + missing_relations.add(generic_interface) + else: + # Relation ID eists but no related unit + if not missing_data: + # Edge case - relation ID exists but departings + _hook_name = hook_name() + if (('departed' in _hook_name or 'broken' in _hook_name) and + related_interface in _hook_name): + state = 'blocked' + missing_relations.add(generic_interface) + juju_log("{} relation's interface, {}, " + "relationship is departed or broken " + "and is required for functionality." + "".format(generic_interface, related_interface), + "WARN") + # Normal case relation ID exists but no related unit + # (joining) + else: + juju_log("{} relations's interface, {}, is related but has" + " no units in the relation." + "".format(generic_interface, related_interface), + "INFO") + # Related unit exists and data missing on the relation + else: + juju_log("{} relation's interface, {}, is related awaiting " + "the following data from the relationship: {}. " + "".format(generic_interface, related_interface, + ", ".join(missing_data)), "INFO") + if state != 'blocked': + state = 'waiting' + if generic_interface not in missing_relations: + incomplete_relations.add(generic_interface) + + if missing_relations: + message = "Missing relations: {}".format(", ".join(missing_relations)) + if incomplete_relations: + message += "; incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'blocked' + elif incomplete_relations: + message = "Incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'waiting' + + return state, message + + +def _ows_check_charm_func(state, message, charm_func_with_configs): + """Run a custom check function for the charm to see if it wants to + change the state. This is only run if not in 'maintenance' and + tests to see if the new state is more important that the previous + one determined by the interfaces/relations check. + + @param state: the previously determined state so far. + @param message: the user orientated message so far. + @param charm_func: a callable function that returns state, message + @returns state, message strings. + """ + if charm_func_with_configs: + charm_state, charm_message = charm_func_with_configs() + if charm_state != 'active' and charm_state != 'unknown': + state = workload_state_compare(state, charm_state) + if message: + charm_message = charm_message.replace("Incomplete relations: ", + "") + message = "{}, {}".format(message, charm_message) + else: + message = charm_message + return state, message + + +def _ows_check_services_running(services, ports): + """Check that the services that should be running are actually running + and that any ports specified are being listened to. + + @param services: list of strings OR dictionary specifying services/ports + @param ports: list of ports + @returns state, message: strings or None, None + """ + messages = [] + state = None + if services is not None: + services = _extract_services_list_helper(services) + services_running, running = _check_running_services(services) + if not all(running): + messages.append( + "Services not running that should be: {}" + .format(", ".join(_filter_tuples(services_running, False)))) + state = 'blocked' + # also verify that the ports that should be open are open + # NB, that ServiceManager objects only OPTIONALLY have ports + map_not_open, ports_open = ( + _check_listening_on_services_ports(services)) + if not all(ports_open): + # find which service has missing ports. They are in service + # order which makes it a bit easier. + message_parts = {service: ", ".join([str(v) for v in open_ports]) + for service, open_ports in map_not_open.items()} + message = ", ".join( + ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) + messages.append( + "Services with ports not open that should be: {}" + .format(message)) + state = 'blocked' + + if ports is not None: + # and we can also check ports which we don't know the service for + ports_open, ports_open_bools = _check_listening_on_ports_list(ports) + if not all(ports_open_bools): + messages.append( + "Ports which should be open, but are not: {}" + .format(", ".join([str(p) for p, v in ports_open + if not v]))) + state = 'blocked' + + if state is not None: + message = "; ".join(messages) + return state, message + + return None, None + + +def _extract_services_list_helper(services): + """Extract a OrderedDict of {service: [ports]} of the supplied services + for use by the other functions. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param services: see above + @returns OrderedDict(service: [ports], ...) + """ + if services is None: + return {} + if isinstance(services, dict): + services = services.values() + # either extract the list of services from the dictionary, or if + # it is a simple string, use that. i.e. works with mixed lists. + _s = OrderedDict() + for s in services: + if isinstance(s, dict) and 'service' in s: + _s[s['service']] = s.get('ports', []) + if isinstance(s, str): + _s[s] = [] + return _s + + +def _check_running_services(services): + """Check that the services dict provided is actually running and provide + a list of (service, boolean) tuples for each service. + + Returns both a zipped list of (service, boolean) and a list of booleans + in the same order as the services. + + @param services: OrderedDict of strings: [ports], one for each service to + check. + @returns [(service, boolean), ...], : results for checks + [boolean] : just the result of the service checks + """ + services_running = [service_running(s) for s in services] + return list(zip(services, services_running)), services_running + + +def _check_listening_on_services_ports(services, test=False): + """Check that the unit is actually listening (has the port open) on the + ports that the service specifies are open. If test is True then the + function returns the services with ports that are open rather than + closed. + + Returns an OrderedDict of service: ports and a list of booleans + + @param services: OrderedDict(service: [port, ...], ...) + @param test: default=False, if False, test for closed, otherwise open. + @returns OrderedDict(service: [port-not-open, ...]...), [boolean] + """ + test = not(not(test)) # ensure test is True or False + all_ports = list(itertools.chain(*services.values())) + ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] + map_ports = OrderedDict() + matched_ports = [p for p, opened in zip(all_ports, ports_states) + if opened == test] # essentially opened xor test + for service, ports in services.items(): + set_ports = set(ports).intersection(matched_ports) + if set_ports: + map_ports[service] = set_ports + return map_ports, ports_states + + +def _check_listening_on_ports_list(ports): + """Check that the ports list given are being listened to + + Returns a list of ports being listened to and a list of the + booleans. + + @param ports: LIST or port numbers. + @returns [(port_num, boolean), ...], [boolean] + """ + ports_open = [port_has_listener('0.0.0.0', p) for p in ports] + return zip(ports, ports_open), ports_open + + +def _filter_tuples(services_states, state): + """Return a simple list from a list of tuples according to the condition + + @param services_states: LIST of (string, boolean): service and running + state. + @param state: Boolean to match the tuple against. + @returns [LIST of strings] that matched the tuple RHS. + """ + return [s for s, b in services_states if b == state] + + +def workload_state_compare(current_workload_state, workload_state): + """ Return highest priority of two states""" + hierarchy = {'unknown': -1, + 'active': 0, + 'maintenance': 1, + 'waiting': 2, + 'blocked': 3, + } + + if hierarchy.get(workload_state) is None: + workload_state = 'unknown' + if hierarchy.get(current_workload_state) is None: + current_workload_state = 'unknown' + + # Set workload_state based on hierarchy of statuses + if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): + return current_workload_state + else: + return workload_state + + +def incomplete_relation_data(configs, required_interfaces): + """Check complete contexts against required_interfaces + Return dictionary of incomplete relation data. + + configs is an OSConfigRenderer object with configs registered + + required_interfaces is a dictionary of required general interfaces + with dictionary values of possible specific interfaces. + Example: + required_interfaces = {'database': ['shared-db', 'pgsql-db']} + + The interface is said to be satisfied if anyone of the interfaces in the + list has a complete context. + + Return dictionary of incomplete or missing required contexts with relation + status of interfaces and any missing data points. Example: + {'message': + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}}, + 'identity': + {'identity-service': {'related': False}}, + 'database': + {'pgsql-db': {'related': False}, + 'shared-db': {'related': True}}} + """ + complete_ctxts = configs.complete_contexts() + incomplete_relations = [ + svc_type + for svc_type, interfaces in required_interfaces.items() + if not set(interfaces).intersection(complete_ctxts)] + return { + i: configs.get_incomplete_context_data(required_interfaces[i]) + for i in incomplete_relations} + + +def do_action_openstack_upgrade(package, upgrade_callback, configs): + """Perform action-managed OpenStack upgrade. + + Upgrades packages to the configured openstack-origin version and sets + the corresponding action status as a result. + + If the charm was installed from source we cannot upgrade it. + For backwards compatibility a config flag (action-managed-upgrade) must + be set for this code to run, otherwise a full service level upgrade will + fire on config-changed. + + @param package: package name for determining if upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if git_install_requested(): + action_set({'outcome': 'installed from source, skipped upgrade.'}) + else: + if openstack_upgrade_available(package): + if config('action-managed-upgrade'): + juju_log('Upgrading OpenStack release') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed.'}) + ret = True + except: + action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('do_openstack_upgrade resulted in an ' + 'unexpected error') + else: + action_set({'outcome': 'action-managed-upgrade config is ' + 'False, skipped upgrade.'}) + else: + action_set({'outcome': 'no upgrade available.'}) + + return ret + + +def remote_restart(rel_name, remote_service=None): + trigger = { + 'restart-trigger': str(uuid.uuid4()), + } + if remote_service: + trigger['remote-service'] = remote_service + for rid in relation_ids(rel_name): + # This subordinate can be related to two seperate services using + # different subordinate relations so only issue the restart if + # the principle is conencted down the relation we think it is + if related_units(relid=rid): + relation_set(relation_id=rid, + relation_settings=trigger, + ) + + +def check_actually_paused(services=None, ports=None): + """Check that services listed in the services object and and ports + are actually closed (not listened to), to verify that the unit is + properly paused. + + @param services: See _extract_services_list_helper + @returns status, : string for status (None if okay) + message : string for problem for status_set + """ + state = None + message = None + messages = [] + if services is not None: + services = _extract_services_list_helper(services) + services_running, services_states = _check_running_services(services) + if any(services_states): + # there shouldn't be any running so this is a problem + messages.append("these services running: {}" + .format(", ".join( + _filter_tuples(services_running, True)))) + state = "blocked" + ports_open, ports_open_bools = ( + _check_listening_on_services_ports(services, True)) + if any(ports_open_bools): + message_parts = {service: ", ".join([str(v) for v in open_ports]) + for service, open_ports in ports_open.items()} + message = ", ".join( + ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) + messages.append( + "these service:ports are open: {}".format(message)) + state = 'blocked' + if ports is not None: + ports_open, bools = _check_listening_on_ports_list(ports) + if any(bools): + messages.append( + "these ports which should be closed, but are open: {}" + .format(", ".join([str(p) for p, v in ports_open if v]))) + state = 'blocked' + if messages: + message = ("Services should be paused but {}" + .format(", ".join(messages))) + return state, message + + +def set_unit_paused(): + """Set the unit to a paused state in the local kv() store. + This does NOT actually pause the unit + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', True) + + +def clear_unit_paused(): + """Clear the unit from a paused state in the local kv() store + This does NOT actually restart any services - it only clears the + local state. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', False) + + +def is_unit_paused_set(): + """Return the state of the kv().get('unit-paused'). + This does NOT verify that the unit really is paused. + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-paused'))) + except: + return False + + +def pause_unit(assess_status_func, services=None, ports=None, + charm_func=None): + """Pause a unit by stopping the services and setting 'unit-paused' + in the local kv() store. + + Also checks that the services have stopped and ports are no longer + being listened to. + + An optional charm_func() can be called that can either raise an + Exception or return non None, None to indicate that the unit + didn't pause cleanly. + + The signature for charm_func is: + charm_func() -> message: string + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param assess_status_func: (f() -> message: string | None) or None + @param services: OPTIONAL see above + @param ports: OPTIONAL list of port + @param charm_func: function to run for custom charm pausing. + @returns None + @raises Exception(message) on an error for action_fail(). + """ + services = _extract_services_list_helper(services) + messages = [] + if services: + for service in services.keys(): + stopped = service_pause(service) + if not stopped: + messages.append("{} didn't stop cleanly.".format(service)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + message.append(str(e)) + set_unit_paused() + if assess_status_func: + message = assess_status_func() + if message: + messages.append(message) + if messages: + raise Exception("Couldn't pause: {}".format("; ".join(messages))) + + +def resume_unit(assess_status_func, services=None, ports=None, + charm_func=None): + """Resume a unit by starting the services and clearning 'unit-paused' + in the local kv() store. + + Also checks that the services have started and ports are being listened to. + + An optional charm_func() can be called that can either raise an + Exception or return non None to indicate that the unit + didn't resume cleanly. + + The signature for charm_func is: + charm_func() -> message: string + + charm_func() is executed after any services are started, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param assess_status_func: (f() -> message: string | None) or None + @param services: OPTIONAL see above + @param ports: OPTIONAL list of port + @param charm_func: function to run for custom charm resuming. + @returns None + @raises Exception(message) on an error for action_fail(). + """ + services = _extract_services_list_helper(services) + messages = [] + if services: + for service in services.keys(): + started = service_resume(service) + if not started: + messages.append("{} didn't start cleanly.".format(service)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + message.append(str(e)) + clear_unit_paused() + if assess_status_func: + message = assess_status_func() + if message: + messages.append(message) + if messages: + raise Exception("Couldn't resume: {}".format("; ".join(messages))) + + +def make_assess_status_func(*args, **kwargs): + """Creates an assess_status_func() suitable for handing to pause_unit() + and resume_unit(). + + This uses the _determine_os_workload_status(...) function to determine + what the workload_status should be for the unit. If the unit is + not in maintenance or active states, then the message is returned to + the caller. This is so an action that doesn't result in either a + complete pause or complete resume can signal failure with an action_fail() + """ + def _assess_status_func(): + state, message = _determine_os_workload_status(*args, **kwargs) + status_set(state, message) + if state not in ['maintenance', 'active']: + return message + return None + + return _assess_status_func + + +def pausable_restart_on_change(restart_map, stopstart=False, + restart_functions=None): + """A restart_on_change decorator that checks to see if the unit is + paused. If it is paused then the decorated function doesn't fire. + + This is provided as a helper, as the @restart_on_change(...) decorator + is in core.host, yet the openstack specific helpers are in this file + (contrib.openstack.utils). Thus, this needs to be an optional feature + for openstack charms (or charms that wish to use the openstack + pause/resume type features). + + It is used as follows: + + from contrib.openstack.utils import ( + pausable_restart_on_change as restart_on_change) + + @restart_on_change(restart_map, stopstart=) + def some_hook(...): + pass + + see core.utils.restart_on_change() for more details. + + @param f: the function to decorate + @param restart_map: the restart map {conf_file: [services]} + @param stopstart: DEFAULT false; whether to stop, start or just restart + @returns decorator to use a restart_on_change with pausability + """ + def wrap(f): + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + if is_unit_paused_set(): + return f(*args, **kwargs) + # otherwise, normal restart_on_change functionality + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) + return wrapped_f + return wrap + + +def config_flags_parser(config_flags): + """Parses config flags string into dict. + + This parsing method supports a few different formats for the config + flag values to be parsed: + + 1. A string in the simple format of key=value pairs, with the possibility + of specifying multiple key value pairs within the same string. For + example, a string in the format of 'key1=value1, key2=value2' will + return a dict of: + + {'key1': 'value1', + 'key2': 'value2'}. + + 2. A string in the above format, but supporting a comma-delimited list + of values for the same key. For example, a string in the format of + 'key1=value1, key2=value3,value4,value5' will return a dict of: + + {'key1', 'value1', + 'key2', 'value2,value3,value4'} + + 3. A string containing a colon character (:) prior to an equal + character (=) will be treated as yaml and parsed as such. This can be + used to specify more complex key value pairs. For example, + a string in the format of 'key1: subkey1=value1, subkey2=value2' will + return a dict of: + + {'key1', 'subkey1=value1, subkey2=value2'} + + The provided config_flags string may be a list of comma-separated values + which themselves may be comma-separated list of values. + """ + # If we find a colon before an equals sign then treat it as yaml. + # Note: limit it to finding the colon first since this indicates assignment + # for inline yaml. + colon = config_flags.find(':') + equals = config_flags.find('=') + if colon > 0: + if colon < equals or equals < 0: + return yaml.safe_load(config_flags) + + if config_flags.find('==') >= 0: + juju_log("config_flags is not in expected format (key=value)", + level=ERROR) + raise OSContextError + + # strip the following from each value. + post_strippers = ' ,' + # we strip any leading/trailing '=' or ' ' from the string then + # split on '='. + split = config_flags.strip(' =').split('=') + limit = len(split) + flags = {} + for i in range(0, limit - 1): + current = split[i] + next = split[i + 1] + vindex = next.rfind(',') + if (i == limit - 2) or (vindex < 0): + value = next + else: + value = next[:vindex] + + if i == 0: + key = current + else: + # if this not the first entry, expect an embedded key. + index = current.rfind(',') + if index < 0: + juju_log("Invalid config value(s) at index %s" % (i), + level=ERROR) + raise OSContextError + key = current[index + 1:] + + # Add to collection. + flags[key.strip(post_strippers)] = value.rstrip(post_strippers) + + return flags diff --git a/ceph-osd/hooks/charmhelpers/contrib/python/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/python/__init__.py new file mode 100644 index 00000000..d1400a02 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/python/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-osd/hooks/charmhelpers/contrib/python/packages.py b/ceph-osd/hooks/charmhelpers/contrib/python/packages.py new file mode 100644 index 00000000..a2411c37 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/python/packages.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import subprocess +import sys + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import charm_dir, log + +__author__ = "Jorge Niedbalski " + + +def pip_execute(*args, **kwargs): + """Overriden pip_execute() to stop sys.path being changed. + + The act of importing main from the pip module seems to cause add wheels + from the /usr/share/python-wheels which are installed by various tools. + This function ensures that sys.path remains the same after the call is + executed. + """ + try: + _path = sys.path + try: + from pip import main as _pip_execute + except ImportError: + apt_update() + apt_install('python-pip') + from pip import main as _pip_execute + _pip_execute(*args, **kwargs) + finally: + sys.path = _path + + +def parse_options(given, available): + """Given a set of options, check if available""" + for key, value in sorted(given.items()): + if not value: + continue + if key in available: + yield "--{0}={1}".format(key, value) + + +def pip_install_requirements(requirements, constraints=None, **options): + """Install a requirements file. + + :param constraints: Path to pip constraints file. + http://pip.readthedocs.org/en/stable/user_guide/#constraints-files + """ + command = ["install"] + + available_options = ('proxy', 'src', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + command.append("-r {0}".format(requirements)) + if constraints: + command.append("-c {0}".format(constraints)) + log("Installing from file: {} with constraints {} " + "and options: {}".format(requirements, constraints, command)) + else: + log("Installing from file: {} with options: {}".format(requirements, + command)) + pip_execute(command) + + +def pip_install(package, fatal=False, upgrade=False, venv=None, **options): + """Install a python package""" + if venv: + venv_python = os.path.join(venv, 'bin/pip') + command = [venv_python, "install"] + else: + command = ["install"] + + available_options = ('proxy', 'src', 'log', 'index-url', ) + for option in parse_options(options, available_options): + command.append(option) + + if upgrade: + command.append('--upgrade') + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Installing {} package with options: {}".format(package, + command)) + if venv: + subprocess.check_call(command) + else: + pip_execute(command) + + +def pip_uninstall(package, **options): + """Uninstall a python package""" + command = ["uninstall", "-q", "-y"] + + available_options = ('proxy', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Uninstalling {} package with options: {}".format(package, + command)) + pip_execute(command) + + +def pip_list(): + """Returns the list of current python installed packages + """ + return pip_execute(["list"]) + + +def pip_create_virtualenv(path=None): + """Create an isolated Python environment.""" + apt_install('python-virtualenv') + + if path: + venv_path = path + else: + venv_path = os.path.join(charm_dir(), 'venv') + + if not os.path.exists(venv_path): + subprocess.check_call(['virtualenv', venv_path]) diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index d008081f..b2484e78 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -40,6 +40,7 @@ CalledProcessError, ) from charmhelpers.core.hookenv import ( + config, local_unit, relation_get, relation_ids, @@ -64,6 +65,7 @@ ) from charmhelpers.core.kernel import modprobe +from charmhelpers.contrib.openstack.utils import config_flags_parser KEYRING = '/etc/ceph/ceph.client.{}.keyring' KEYFILE = '/etc/ceph/ceph.client.{}.key' @@ -1204,3 +1206,42 @@ def send_request_if_needed(request, relation='ceph'): for rid in relation_ids(relation): log('Sending request {}'.format(request.request_id), level=DEBUG) relation_set(relation_id=rid, broker_req=request.request) + + +class CephConfContext(object): + """Ceph config (ceph.conf) context. + + Supports user-provided Ceph configuration settings. Use can provide a + dictionary as the value for the config-flags charm option containing + Ceph configuration settings keyede by their section in ceph.conf. + """ + def __init__(self, permitted_sections=None): + self.permitted_sections = permitted_sections or [] + + def __call__(self): + conf = config('config-flags') + if not conf: + return {} + + conf = config_flags_parser(conf) + if type(conf) != dict: + log("Provided config-flags is not a dictionary - ignoring", + level=WARNING) + return {} + + permitted = self.permitted_sections + if permitted: + diff = set(conf.keys()).difference(set(permitted)) + if diff: + log("Config-flags contains invalid keys '%s' - they will be " + "ignored" % (', '.join(diff)), level=WARNING) + + ceph_conf = {} + for key in conf: + if permitted and key not in permitted: + log("Ignoring key '%s'" % key, level=WARNING) + continue + + ceph_conf[key] = conf[key] + + return ceph_conf diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py new file mode 100644 index 00000000..3a3f5146 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -0,0 +1,88 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import re +from subprocess import ( + check_call, + check_output, +) + +import six + + +################################################## +# loopback device helpers. +################################################## +def loopback_devices(): + ''' + Parse through 'losetup -a' output to determine currently mapped + loopback devices. Output is expected to look like: + + /dev/loop0: [0807]:961814 (/tmp/my.img) + + :returns: dict: a dict mapping {loopback_dev: backing_file} + ''' + loopbacks = {} + cmd = ['losetup', '-a'] + devs = [d.strip().split(' ') for d in + check_output(cmd).splitlines() if d != ''] + for dev, _, f in devs: + loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] + return loopbacks + + +def create_loopback(file_path): + ''' + Create a loopback device for a given backing file. + + :returns: str: Full path to new loopback device (eg, /dev/loop0) + ''' + file_path = os.path.abspath(file_path) + check_call(['losetup', '--find', file_path]) + for d, f in six.iteritems(loopback_devices()): + if f == file_path: + return d + + +def ensure_loopback_device(path, size): + ''' + Ensure a loopback device exists for a given backing file path and size. + If it a loopback device is not mapped to file, a new one will be created. + + TODO: Confirm size of found loopback device. + + :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) + ''' + for d, f in six.iteritems(loopback_devices()): + if f == path: + return d + + if not os.path.exists(path): + cmd = ['truncate', '--size', size, path] + check_call(cmd) + + return create_loopback(path) + + +def is_mapped_loopback_device(device): + """ + Checks if a given device name is an existing/mapped loopback device. + :param device: str: Full path to the device (eg, /dev/loop1). + :returns: str: Path to the backing file if is a loopback device + empty string otherwise + """ + return loopback_devices().get(device, "") diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py new file mode 100644 index 00000000..34b5f71a --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -0,0 +1,105 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from subprocess import ( + CalledProcessError, + check_call, + check_output, + Popen, + PIPE, +) + + +################################################## +# LVM helpers. +################################################## +def deactivate_lvm_volume_group(block_device): + ''' + Deactivate any volume gruop associated with an LVM physical volume. + + :param block_device: str: Full path to LVM physical volume + ''' + vg = list_lvm_volume_group(block_device) + if vg: + cmd = ['vgchange', '-an', vg] + check_call(cmd) + + +def is_lvm_physical_volume(block_device): + ''' + Determine whether a block device is initialized as an LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: boolean: True if block device is a PV, False if not. + ''' + try: + check_output(['pvdisplay', block_device]) + return True + except CalledProcessError: + return False + + +def remove_lvm_physical_volume(block_device): + ''' + Remove LVM PV signatures from a given block device. + + :param block_device: str: Full path of block device to scrub. + ''' + p = Popen(['pvremove', '-ff', block_device], + stdin=PIPE) + p.communicate(input='y\n') + + +def list_lvm_volume_group(block_device): + ''' + List LVM volume group associated with a given block device. + + Assumes block device is a valid LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: str: Name of volume group associated with block device or None + ''' + vg = None + pvd = check_output(['pvdisplay', block_device]).splitlines() + for l in pvd: + l = l.decode('UTF-8') + if l.strip().startswith('VG Name'): + vg = ' '.join(l.strip().split()[2:]) + return vg + + +def create_lvm_physical_volume(block_device): + ''' + Initialize a block device as an LVM physical volume. + + :param block_device: str: Full path of block device to initialize. + + ''' + check_call(['pvcreate', block_device]) + + +def create_lvm_volume_group(volume_group, block_device): + ''' + Create an LVM volume group backed by a given block device. + + Assumes block device has already been initialized as an LVM PV. + + :param volume_group: str: Name of volume group to create. + :block_device: str: Full path of PV-initialized block device. + ''' + check_call(['vgcreate', volume_group, block_device]) diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index db0d86a2..ad485ec8 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -106,6 +106,14 @@ 'mitaka/proposed': 'trusty-proposed/mitaka', 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', + # Newton + 'newton': 'xenial-updates/newton', + 'xenial-newton': 'xenial-updates/newton', + 'xenial-newton/updates': 'xenial-updates/newton', + 'xenial-updates/newton': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', + 'xenial-newton/proposed': 'xenial-proposed/newton', + 'xenial-proposed/newton': 'xenial-proposed/newton', } # The order of this list is very important. Handlers should be listed in from diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index adbb565e..4491baef 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -1,11 +1,11 @@ [global] -{% if old_auth %} +{%- if old_auth %} auth supported = {{ auth_supported }} -{% else %} +{%- else %} auth cluster required = {{ auth_supported }} auth service required = {{ auth_supported }} auth client required = {{ auth_supported }} -{% endif %} +{%- endif %} keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} @@ -15,22 +15,27 @@ err to syslog = {{ use_syslog }} clog to syslog = {{ use_syslog }} debug osd = {{ loglevel }}/5 -{%- if ceph_public_network is string %} +{% if ceph_public_network is string %} public network = {{ ceph_public_network }} {%- endif %} {%- if ceph_cluster_network is string %} cluster network = {{ ceph_cluster_network }} {%- endif %} - -{% if public_addr %} +{%- if public_addr %} public addr = {{ public_addr }} -{% endif %} -{% if cluster_addr %} +{%- endif %} +{%- if cluster_addr %} cluster addr = {{ cluster_addr }} {%- endif %} - -{% if crush_location %} +{%- if crush_location %} osd crush location = {{crush_location}} +{%- endif %} +{% if global -%} +# The following are user-provided options provided via the config-flags charm option. +# User-provided [global] section config +{% for key in global -%} +{{ key }} = {{ global[key] }} +{% endfor %} {% endif %} [client.osd-upgrade] @@ -47,9 +52,13 @@ keyring = /var/lib/ceph/osd/$cluster-$id/keyring osd journal size = {{ osd_journal_size }} filestore xattr use omap = true journal dio = {{ dio }} - {%- if short_object_len %} osd max object name len = 256 osd max object namespace len = 64 {% endif %} - +{% if osd -%} +# The following are user-provided options provided via the config-flags charm option. +{% for key in osd -%} +{{ key }} = {{ osd[key] }} +{% endfor %} +{% endif %} diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py new file mode 100644 index 00000000..f0608128 --- /dev/null +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -0,0 +1,118 @@ +import copy +import unittest + +from mock import patch + +import charmhelpers.contrib.storage.linux.ceph as ceph +import ceph_hooks + + +CHARM_CONFIG = {'config-flags': '', + 'loglevel': 1, + 'use-syslog': True, + 'osd-journal-size': 1024, + 'use-direct-io': True, + 'osd-format': 'ext4', + 'prefer-ipv6': False, + 'customize-failure-domain': False} + + +class CephHooksTestCase(unittest.TestCase): + def setUp(self): + super(CephHooksTestCase, self).setUp() + + @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') + @patch.object(ceph_hooks, 'get_auth', lambda *args: False) + @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") + @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") + @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) + @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', + '10.0.0.2']) + @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph, 'config') + @patch.object(ceph_hooks, 'config') + def test_get_ceph_context(self, mock_config, mock_config2): + config = copy.deepcopy(CHARM_CONFIG) + mock_config.side_effect = lambda key: config[key] + mock_config2.side_effect = lambda key: config[key] + ctxt = ceph_hooks.get_ceph_context() + expected = {'auth_supported': False, + 'ceph_cluster_network': '', + 'ceph_public_network': '', + 'cluster_addr': '10.1.0.1', + 'dio': 'true', + 'fsid': '1234', + 'loglevel': 1, + 'mon_hosts': '10.0.0.1 10.0.0.2', + 'old_auth': False, + 'osd_journal_size': 1024, + 'public_addr': '10.0.0.1', + 'short_object_len': True, + 'use_syslog': 'true'} + self.assertEqual(ctxt, expected) + + @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') + @patch.object(ceph_hooks, 'get_auth', lambda *args: False) + @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") + @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") + @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) + @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', + '10.0.0.2']) + @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph, 'config') + @patch.object(ceph_hooks, 'config') + def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): + config = copy.deepcopy(CHARM_CONFIG) + config['config-flags'] = '{"osd": {"osd max write size": 1024}}' + mock_config.side_effect = lambda key: config[key] + mock_config2.side_effect = lambda key: config[key] + ctxt = ceph_hooks.get_ceph_context() + expected = {'auth_supported': False, + 'ceph_cluster_network': '', + 'ceph_public_network': '', + 'cluster_addr': '10.1.0.1', + 'dio': 'true', + 'fsid': '1234', + 'loglevel': 1, + 'mon_hosts': '10.0.0.1 10.0.0.2', + 'old_auth': False, + 'osd': {'osd max write size': 1024}, + 'osd_journal_size': 1024, + 'public_addr': '10.0.0.1', + 'short_object_len': True, + 'use_syslog': 'true'} + self.assertEqual(ctxt, expected) + + @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') + @patch.object(ceph_hooks, 'get_auth', lambda *args: False) + @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") + @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") + @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) + @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', + '10.0.0.2']) + @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph, 'config') + @patch.object(ceph_hooks, 'config') + def test_get_ceph_context_w_config_flags_invalid(self, mock_config, + mock_config2): + config = copy.deepcopy(CHARM_CONFIG) + config['config-flags'] = ('{"osd": {"osd max write size": 1024},' + '"foo": "bar"}') + mock_config.side_effect = lambda key: config[key] + mock_config2.side_effect = lambda key: config[key] + ctxt = ceph_hooks.get_ceph_context() + expected = {'auth_supported': False, + 'ceph_cluster_network': '', + 'ceph_public_network': '', + 'cluster_addr': '10.1.0.1', + 'dio': 'true', + 'fsid': '1234', + 'loglevel': 1, + 'mon_hosts': '10.0.0.1 10.0.0.2', + 'old_auth': False, + 'osd': {'osd max write size': 1024}, + 'osd_journal_size': 1024, + 'public_addr': '10.0.0.1', + 'short_object_len': True, + 'use_syslog': 'true'} + self.assertEqual(ctxt, expected) diff --git a/ceph-osd/unit_tests/test_upgrade_roll.py b/ceph-osd/unit_tests/test_upgrade_roll.py index 840e247c..e76543cc 100644 --- a/ceph-osd/unit_tests/test_upgrade_roll.py +++ b/ceph-osd/unit_tests/test_upgrade_roll.py @@ -132,6 +132,7 @@ def test_roll_osd_cluster_second(self, 'Waiting on ip-192-168-1-2 to finish upgrading') lock_and_roll.assert_called_with(my_name="ip-192-168-1-3") + @patch('time.time', lambda *args: previous_node_start_time + 10 * 60 + 1) @patch('ceph_hooks.monitor_key_get') @patch('ceph_hooks.monitor_key_exists') def test_wait_on_previous_node(self, From f1e0a04db886a4ea39c0bdae090088f31f89e880 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 26 May 2016 15:18:19 +0100 Subject: [PATCH 1134/2699] Add support for user-provided ceph config Adds a new config-flags option to the charm that supports setting a dictionary of ceph configuration settings that will be applied to ceph.conf. This implementation supports config sections so that settings can be applied to any section supported by the ceph.conf template in the charm. Change-Id: I8a447209b9040890e7c10585321b71da08a26b11 Closes-Bug: 1522375 --- ceph-radosgw/charm-helpers-hooks.yaml | 11 ++++------- ceph-radosgw/config.yaml | 19 +++++++++++++++++++ ceph-radosgw/hooks/ceph_radosgw_context.py | 9 +++++++++ .../contrib/storage/linux/ceph.py | 2 +- ceph-radosgw/hooks/utils.py | 9 --------- ceph-radosgw/templates/ceph.conf | 14 ++++++++++++++ .../unit_tests/test_ceph_radosgw_context.py | 14 +++++++++++++- 7 files changed, 60 insertions(+), 18 deletions(-) diff --git a/ceph-radosgw/charm-helpers-hooks.yaml b/ceph-radosgw/charm-helpers-hooks.yaml index ed1b6154..25fcc42b 100644 --- a/ceph-radosgw/charm-helpers-hooks.yaml +++ b/ceph-radosgw/charm-helpers-hooks.yaml @@ -4,16 +4,13 @@ include: - core - cli - fetch - - contrib.storage.linux: - - utils + - contrib.python.packages + - contrib.storage.linux - contrib.hahelpers: - apache - cluster - payload.execd - - contrib.openstack|inc=* - contrib.network.ip - - contrib.openstack.ip - - contrib.storage.linux - - contrib.python.packages + - contrib.openstack|inc=* - contrib.charmsupport - - contrib.hardening|inc=* \ No newline at end of file + - contrib.hardening|inc=* diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index f53870d2..81aa5eb3 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -25,6 +25,25 @@ options: description: | Key ID to import to the apt keyring to support use with arbitary source configuration from outside of Launchpad archives or PPA's. + config-flags: + type: string + default: + description: | + User provided Ceph configuration. Supports a string representation of + a python dictionary where each top-level key represents a section in + the ceph.conf template. You may only use sections supported in the + template. + . + WARNING: this is not the recommended way to configure the underlying + services that this charm installs and is used at the user's own risk. + This option is mainly provided as a stop-gap for users that either + want to test the effect of modifying some config or who have found + a critical bug in the way the charm has configured their services + and need it fixed immediately. We ask that whenever this is used, + that the user consider opening a bug on this charm at + http://bugs.launchpad.net/charms providing an explanation of why the + config was needed so that we may consider it for inclusion as a + natively supported config in the the charm. port: type: int default: 80 diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 73558b17..179e615e 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -27,6 +27,7 @@ get_host_ip, get_ipv6_addr, ) +from charmhelpers.contrib.storage.linux.ceph import CephConfContext def is_apache_24(): @@ -224,6 +225,14 @@ def __call__(self): # NOTE: currently only applied if NOT using embedded webserver ctxt['disable_100_continue'] = True + # NOTE(dosaboy): these sections must correspond to what is supported in + # the config template. + sections = ['global', 'client.radosgw.gateway'] + user_provided = CephConfContext(permitted_sections=sections)() + user_provided = {k.replace('.', '_'): user_provided[k] + for k in user_provided} + ctxt.update(user_provided) + if self.context_complete(ctxt): return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 2528f5cf..b2484e78 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1231,7 +1231,7 @@ def __call__(self): permitted = self.permitted_sections if permitted: - diff = set(conf.keys()).symmetric_difference(set(permitted)) + diff = set(conf.keys()).difference(set(permitted)) if diff: log("Config-flags contains invalid keys '%s' - they will be " "ignored" % (', '.join(diff)), level=WARNING) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 33966ca5..914bcec7 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -14,7 +14,6 @@ from collections import OrderedDict from copy import deepcopy -import jinja2 import ceph_radosgw_context @@ -165,14 +164,6 @@ def register_configs(release='icehouse'): return configs -def render_template(template_name, context, template_dir=TEMPLATES_DIR): - templates = jinja2.Environment( - loader=jinja2.FileSystemLoader(template_dir) - ) - template = templates.get_template(template_name) - return template.render(context) - - def services(): """Returns a list of services associate with this charm.""" _services = [] diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 28efdeba..c37d3da6 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -14,6 +14,13 @@ debug rgw = {{ loglevel }}/5 {% if ipv6 -%} ms bind ipv6 = true {% endif %} +{% if global -%} +# The following are user-provided options provided via the config-flags charm option. +# User-provided [global] section config +{% for key in global -%} +{{ key }} = {{ global[key] }} +{% endfor %} +{% endif %} [client.radosgw.gateway] host = {{ hostname }} @@ -38,3 +45,10 @@ rgw s3 auth use keystone = true nss db path = /var/lib/ceph/nss {% endif %} {% endif %} +{% if client_radosgw_gateway -%} +# The following are user-provided options provided via the config-flags charm option. +# User-provided [client.radosgw.gateway] section config +{% for key in client_radosgw_gateway -%} +{{ key }} = {{ client_radosgw_gateway[key] }} +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 5071b671..2942a0ed 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -1,9 +1,10 @@ from mock import patch import ceph_radosgw_context as context +import charmhelpers +import charmhelpers.contrib.storage.linux.ceph as ceph from test_utils import CharmTestCase -import charmhelpers TO_PATCH = [ 'config', @@ -148,6 +149,8 @@ def setUp(self): super(MonContextTest, self).setUp(context, TO_PATCH) self.config.side_effect = self.test_config.get + @patch.object(ceph, 'config', lambda *args: + '{"client.radosgw.gateway": {"rgw init timeout": 60}}') @patch.object(context, 'ensure_host_resolvable_v6') def test_ctxt(self, mock_ensure_rsv_v6): self.socket.gethostname.return_value = 'testhost' @@ -173,6 +176,7 @@ def _relation_get(attr, unit, rid): 'use_syslog': 'false', 'loglevel': 1, 'port': 70, + 'client_radosgw_gateway': {'rgw init timeout': 60}, 'ipv6': False } self.assertEqual(expect, mon_ctxt()) @@ -185,6 +189,8 @@ def _relation_get(attr, unit, rid): self.assertEqual(expect, mon_ctxt()) self.assertTrue(mock_ensure_rsv_v6.called) + @patch.object(ceph, 'config', lambda *args: + '{"client.radosgw.gateway": {"rgw init timeout": 60}}') def test_ctxt_missing_data(self): self.socket.gethostname.return_value = 'testhost' mon_ctxt = context.MonContext() @@ -193,6 +199,8 @@ def test_ctxt_missing_data(self): self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] self.assertEqual({}, mon_ctxt()) + @patch.object(ceph, 'config', lambda *args: + '{"client.radosgw.gateway": {"rgw init timeout": 60}}') def test_ctxt_inconsistent_auths(self): self.socket.gethostname.return_value = 'testhost' mon_ctxt = context.MonContext() @@ -217,10 +225,13 @@ def _relation_get(attr, unit, rid): 'use_syslog': 'false', 'loglevel': 1, 'port': 70, + 'client_radosgw_gateway': {'rgw init timeout': 60}, 'ipv6': False } self.assertEqual(expect, mon_ctxt()) + @patch.object(ceph, 'config', lambda *args: + '{"client.radosgw.gateway": {"rgw init timeout": 60}}') def test_ctxt_consistent_auths(self): self.socket.gethostname.return_value = 'testhost' mon_ctxt = context.MonContext() @@ -245,6 +256,7 @@ def _relation_get(attr, unit, rid): 'use_syslog': 'false', 'loglevel': 1, 'port': 70, + 'client_radosgw_gateway': {'rgw init timeout': 60}, 'ipv6': False } self.assertEqual(expect, mon_ctxt()) From c79c0653d02d1f8b6297da2dbe7fa56366191b37 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 14 Jun 2016 14:42:56 -0400 Subject: [PATCH 1135/2699] migrating to proxy charm --- ceph-proxy/config.yaml | 112 +- ceph-proxy/hooks/ceph_hooks.py | 539 +-------- ceph-proxy/hooks/mon-relation-changed | 1 - ceph-proxy/hooks/mon-relation-departed | 1 - ceph-proxy/hooks/mon-relation-joined | 1 - .../nrpe-external-master-relation-changed | 1 - .../nrpe-external-master-relation-joined | 1 - ceph-proxy/hooks/osd-relation-joined | 1 - ceph-proxy/hooks/start | 1 - ceph-proxy/hooks/stop | 1 - ceph-proxy/hooks/upgrade-charm | 1 - ceph-proxy/metadata.yaml | 18 +- .../templates/ceph.client.admin.keyring | 2 + ceph-proxy/templates/ceph.conf | 32 +- ceph-proxy/templates/mon.keyring | 3 + ceph-proxy/tests/014-basic-precise-icehouse | 11 - ceph-proxy/tests/015-basic-trusty-icehouse | 9 - ceph-proxy/tests/016-basic-trusty-juno | 11 - ceph-proxy/tests/017-basic-trusty-kilo | 11 - ceph-proxy/tests/018-basic-trusty-liberty | 11 - ceph-proxy/tests/019-basic-trusty-mitaka | 11 - ceph-proxy/tests/020-basic-wily-liberty | 9 - ceph-proxy/tests/021-basic-xenial-mitaka | 9 - ceph-proxy/tests/README | 113 -- ceph-proxy/tests/basic_deployment.py | 683 ----------- ceph-proxy/tests/charmhelpers/__init__.py | 38 - .../tests/charmhelpers/contrib/__init__.py | 15 - .../charmhelpers/contrib/amulet/__init__.py | 15 - .../charmhelpers/contrib/amulet/deployment.py | 95 -- .../charmhelpers/contrib/amulet/utils.py | 829 -------------- .../contrib/openstack/__init__.py | 15 - .../contrib/openstack/amulet/__init__.py | 15 - .../contrib/openstack/amulet/deployment.py | 304 ----- .../contrib/openstack/amulet/utils.py | 1012 ----------------- ceph-proxy/tests/setup/00-setup | 17 - ceph-proxy/tests/tests.yaml | 22 - ceph-proxy/unit_tests/__init__.py | 2 - ceph-proxy/unit_tests/test_ceph_broker.py | 137 --- ceph-proxy/unit_tests/test_ceph_networking.py | 51 - ceph-proxy/unit_tests/test_ceph_ops.py | 214 ---- ceph-proxy/unit_tests/test_status.py | 103 -- ceph-proxy/unit_tests/test_upgrade_roll.py | 154 --- ceph-proxy/unit_tests/test_utils.py | 121 -- 43 files changed, 78 insertions(+), 4674 deletions(-) delete mode 120000 ceph-proxy/hooks/mon-relation-changed delete mode 120000 ceph-proxy/hooks/mon-relation-departed delete mode 120000 ceph-proxy/hooks/mon-relation-joined delete mode 120000 ceph-proxy/hooks/nrpe-external-master-relation-changed delete mode 120000 ceph-proxy/hooks/nrpe-external-master-relation-joined delete mode 120000 ceph-proxy/hooks/osd-relation-joined delete mode 120000 ceph-proxy/hooks/start delete mode 120000 ceph-proxy/hooks/stop delete mode 120000 ceph-proxy/hooks/upgrade-charm create mode 100644 ceph-proxy/templates/ceph.client.admin.keyring create mode 100644 ceph-proxy/templates/mon.keyring delete mode 100755 ceph-proxy/tests/014-basic-precise-icehouse delete mode 100755 ceph-proxy/tests/015-basic-trusty-icehouse delete mode 100755 ceph-proxy/tests/016-basic-trusty-juno delete mode 100755 ceph-proxy/tests/017-basic-trusty-kilo delete mode 100755 ceph-proxy/tests/018-basic-trusty-liberty delete mode 100755 ceph-proxy/tests/019-basic-trusty-mitaka delete mode 100755 ceph-proxy/tests/020-basic-wily-liberty delete mode 100755 ceph-proxy/tests/021-basic-xenial-mitaka delete mode 100644 ceph-proxy/tests/README delete mode 100644 ceph-proxy/tests/basic_deployment.py delete mode 100644 ceph-proxy/tests/charmhelpers/__init__.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/__init__.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py delete mode 100755 ceph-proxy/tests/setup/00-setup delete mode 100644 ceph-proxy/tests/tests.yaml delete mode 100644 ceph-proxy/unit_tests/__init__.py delete mode 100644 ceph-proxy/unit_tests/test_ceph_broker.py delete mode 100644 ceph-proxy/unit_tests/test_ceph_networking.py delete mode 100644 ceph-proxy/unit_tests/test_ceph_ops.py delete mode 100644 ceph-proxy/unit_tests/test_status.py delete mode 100644 ceph-proxy/unit_tests/test_upgrade_roll.py delete mode 100644 ceph-proxy/unit_tests/test_utils.py diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 55bd761a..2d7957d1 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -1,8 +1,4 @@ options: - loglevel: - default: 1 - type: int - description: Mon and OSD debug level. Max is 20. fsid: type: string default: @@ -11,32 +7,22 @@ options: . This configuration element is mandatory and the service will fail on install if it is not provided. - auth-supported: + monitor-hosts: type: string - default: cephx + default: description: | - Which authentication flavour to use. - . - Valid options are "cephx" and "none". If "none" is specified, - keys will still be created and deployed so that it can be - enabled later. - monitor-secret: + Space separated list of existing monitor hosts, in the format + {IP / Hostname}:{port} {IP / Hostname}:{port} + admin-key: type: string default: description: | - This value will become the mon. key. To generate a suitable value use: - . - ceph-authtool /dev/stdout --name=mon. --gen-key - . - This configuration element is mandatory and the service will fail on - install if it is not provided. - monitor-count: - type: int - default: 3 + Admin cephx key for existing Ceph cluster + mon-key: + type: string + default: description: | - How many nodes to wait for before trying to create the monitor cluster - this number needs to be odd, and more than three is a waste except for - very large clusters. + Monitor cephx key source: type: string default: @@ -59,81 +45,3 @@ options: description: | Key ID to import to the apt keyring to support use with arbitary source configuration from outside of Launchpad archives or PPA's. - use-syslog: - type: boolean - default: False - description: | - If set to True, supporting services will log to syslog. - ceph-public-network: - type: string - default: - description: | - The IP address and netmask of the public (front-side) network (e.g., - 192.168.0.0/24) - . - If multiple networks are to be used, a space-delimited list of a.b.c.d/x - can be provided. - ceph-cluster-network: - type: string - default: - description: | - The IP address and netmask of the cluster (back-side) network (e.g., - 192.168.0.0/24) - . - If multiple networks are to be used, a space-delimited list of a.b.c.d/x - can be provided. - prefer-ipv6: - type: boolean - default: False - description: | - If True enables IPv6 support. The charm will expect network interfaces - to be configured with an IPv6 address. If set to False (default) IPv4 - is expected. - - NOTE: these charms do not currently support IPv6 privacy extension. In - order for this charm to function correctly, the privacy extension must be - disabled and a non-temporary address must be configured/available on - your network interface. - sysctl: - type: string - default: '{ kernel.pid_max : 2097152, vm.max_map_count : 524288, - kernel.threads-max: 2097152 }' - description: | - YAML-formatted associative array of sysctl key/value pairs to be set - persistently. By default we set pid_max, max_map_count and - threads-max to a high value to avoid problems with large numbers (>20) - of OSDs recovering. very large clusters should set those values even - higher (e.g. max for kernel.pid_max is 4194303). - customize-failure-domain: - type: boolean - default: false - description: | - Setting this to true will tell Ceph to replicate across Juju's - Availability Zone instead of specifically by host. - nagios_context: - type: string - default: "juju" - type: string - description: | - Used by the nrpe-external-master subordinate charm. - A string that will be prepended to instance name to set the host name - in nagios. So for instance the hostname would be something like: - juju-myservice-0 - If you're running multiple environments with the same services in them - this allows you to differentiate between them. - nagios_servicegroups: - default: "" - type: string - description: | - A comma-separated list of nagios servicegroups. - If left empty, the nagios_context will be used as the servicegroup - use-direct-io: - default: True - type: boolean - description: Configure use of direct IO for OSD journals. - harden: - default: - type: string - description: | - Apply system hardening. Supports a space-delimited list of modules - to run. Supported modules currently include os, ssh, apache and mysql. diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 8b2bf5ef..aa3431f3 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -63,205 +63,21 @@ monitor_key_exists, monitor_key_get, get_mon_map) -from utils import ( - get_networks, - get_public_addr, - get_cluster_addr, - assert_charm_supports_ipv6 -) + from ceph_broker import ( process_requests ) + +from utils import ( + get_public_addr, + get_unit_hostname, +) + from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden hooks = Hooks() -NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' -SCRIPTS_DIR = '/usr/local/bin' -STATUS_FILE = '/var/lib/nagios/cat-ceph-status.txt' -STATUS_CRONFILE = '/etc/cron.d/cat-ceph-health' - -# A dict of valid ceph upgrade paths. Mapping is old -> new -upgrade_paths = { - 'cloud:trusty-juno': 'cloud:trusty-kilo', - 'cloud:trusty-kilo': 'cloud:trusty-liberty', - 'cloud:trusty-liberty': 'cloud:trusty-mitaka', -} - - -def pretty_print_upgrade_paths(): - lines = [] - for key, value in upgrade_paths.iteritems(): - lines.append("{} -> {}".format(key, value)) - return lines - - -def check_for_upgrade(): - release_info = host.lsb_release() - if not release_info['DISTRIB_CODENAME'] == 'trusty': - log("Invalid upgrade path from {}. Only trusty is currently " - "supported".format(release_info['DISTRIB_CODENAME'])) - return - - c = hookenv.config() - old_version = c.previous('source') - log('old_version: {}'.format(old_version)) - # Strip all whitespace - new_version = hookenv.config('source') - if new_version: - # replace all whitespace - new_version = new_version.replace(' ', '') - log('new_version: {}'.format(new_version)) - - if old_version in upgrade_paths: - if new_version == upgrade_paths[old_version]: - log("{} to {} is a valid upgrade path. Proceeding.".format( - old_version, new_version)) - roll_monitor_cluster(new_version) - else: - # Log a helpful error message - log("Invalid upgrade path from {} to {}. " - "Valid paths are: {}".format(old_version, - new_version, - pretty_print_upgrade_paths())) - - -def lock_and_roll(my_name): - start_timestamp = time.time() - - log('monitor_key_set {}_start {}'.format(my_name, start_timestamp)) - monitor_key_set('admin', "{}_start".format(my_name), start_timestamp) - log("Rolling") - # This should be quick - upgrade_monitor() - log("Done") - - stop_timestamp = time.time() - # Set a key to inform others I am finished - log('monitor_key_set {}_done {}'.format(my_name, stop_timestamp)) - monitor_key_set('admin', "{}_done".format(my_name), stop_timestamp) - - -def wait_on_previous_node(previous_node): - log("Previous node is: {}".format(previous_node)) - - previous_node_finished = monitor_key_exists( - 'admin', - "{}_done".format(previous_node)) - - while previous_node_finished is False: - log("{} is not finished. Waiting".format(previous_node)) - # Has this node been trying to upgrade for longer than - # 10 minutes? - # If so then move on and consider that node dead. - - # NOTE: This assumes the clusters clocks are somewhat accurate - # If the hosts clock is really far off it may cause it to skip - # the previous node even though it shouldn't. - current_timestamp = time.time() - previous_node_start_time = monitor_key_get( - 'admin', - "{}_start".format(previous_node)) - if (current_timestamp - (10 * 60)) > previous_node_start_time: - # Previous node is probably dead. Lets move on - if previous_node_start_time is not None: - log( - "Waited 10 mins on node {}. current time: {} > " - "previous node start time: {} Moving on".format( - previous_node, - (current_timestamp - (10 * 60)), - previous_node_start_time)) - return - else: - # I have to wait. Sleep a random amount of time and then - # check if I can lock,upgrade and roll. - wait_time = random.randrange(5, 30) - log('waiting for {} seconds'.format(wait_time)) - time.sleep(wait_time) - previous_node_finished = monitor_key_exists( - 'admin', - "{}_done".format(previous_node)) - - -# Edge cases: -# 1. Previous node dies on upgrade, can we retry? -def roll_monitor_cluster(new_version): - """ - This is tricky to get right so here's what we're going to do. - There's 2 possible cases: Either I'm first in line or not. - If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous monitor is upgraded yet. - """ - log('roll_monitor_cluster called with {}'.format(new_version)) - my_name = socket.gethostname() - monitor_list = [] - mon_map = get_mon_map('admin') - if mon_map['monmap']['mons']: - for mon in mon_map['monmap']['mons']: - monitor_list.append(mon['name']) - else: - status_set('blocked', 'Unable to get monitor cluster information') - sys.exit(1) - log('monitor_list: {}'.format(monitor_list)) - - # A sorted list of osd unit names - mon_sorted_list = sorted(monitor_list) - - try: - position = mon_sorted_list.index(my_name) - log("upgrade position: {}".format(position)) - if position == 0: - # I'm first! Roll - # First set a key to inform others I'm about to roll - lock_and_roll(my_name=my_name) - else: - # Check if the previous node has finished - status_set('blocked', - 'Waiting on {} to finish upgrading'.format( - mon_sorted_list[position - 1])) - wait_on_previous_node(previous_node=mon_sorted_list[position - 1]) - lock_and_roll(my_name=my_name) - except ValueError: - log("Failed to find {} in list {}.".format( - my_name, mon_sorted_list)) - status_set('blocked', 'failed to upgrade monitor') - - -def upgrade_monitor(): - current_version = ceph.get_version() - status_set("maintenance", "Upgrading monitor") - log("Current ceph version is {}".format(current_version)) - new_version = config('release-version') - log("Upgrading to: {}".format(new_version)) - - try: - add_source(config('source'), config('key')) - apt_update(fatal=True) - except subprocess.CalledProcessError as err: - log("Adding the ceph source failed with message: {}".format( - err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - try: - if ceph.systemd(): - for mon_id in ceph.get_local_mon_ids(): - service_stop('ceph-mon@{}'.format(mon_id)) - else: - service_stop('ceph-mon-all') - apt_install(packages=ceph.PACKAGES, fatal=True) - if ceph.systemd(): - for mon_id in ceph.get_local_mon_ids(): - service_start('ceph-mon@{}'.format(mon_id)) - else: - service_start('ceph-mon-all') - status_set("active", "") - except subprocess.CalledProcessError as err: - log("Stopping ceph and upgrading packages failed " - "with message: {}".format(err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - def install_upstart_scripts(): # Only install upstart configurations for older versions @@ -281,35 +97,14 @@ def install(): def emit_cephconf(): - networks = get_networks('ceph-public-network') - public_network = ', '.join(networks) - - networks = get_networks('ceph-cluster-network') - cluster_network = ', '.join(networks) cephcontext = { - 'auth_supported': config('auth-supported'), - 'mon_hosts': ' '.join(get_mon_hosts()), - 'fsid': leader_get('fsid'), - 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, - 'osd_journal_size': config('osd-journal-size'), + 'mon_hosts': config('monitor-hosts'), + 'fsid': config('fsid'), 'use_syslog': str(config('use-syslog')).lower(), - 'ceph_public_network': public_network, - 'ceph_cluster_network': cluster_network, 'loglevel': config('loglevel'), - 'dio': str(config('use-direct-io')).lower(), } - if config('prefer-ipv6'): - dynamic_ipv6_address = get_ipv6_addr()[0] - if not public_network: - cephcontext['public_addr'] = dynamic_ipv6_address - if not cluster_network: - cephcontext['cluster_addr'] = dynamic_ipv6_address - else: - cephcontext['public_addr'] = get_public_addr() - cephcontext['cluster_addr'] = get_cluster_addr() - # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) @@ -318,152 +113,22 @@ def emit_cephconf(): render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100) + keyring = 'ceph.client.admin.keyring' + keyring_path = '/etc/ceph/' + keyring + render(keyring, keyring_path, {'admin_key': config('admin-key')}, perms=0o600) + keyring = 'keyring' + keyring_path = '/var/lib/ceph/mon/ceph-' + get_unit_hostname()+ '/' + keyring + render('mon.keyring', keyring_path, {'mon_key': config('mon-key')}, perms=0o600) -JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' - + notify_radosgws() + notify_client() @hooks.hook('config-changed') @harden() def config_changed(): - if config('prefer-ipv6'): - assert_charm_supports_ipv6() - - # Check if an upgrade was requested - check_for_upgrade() - - log('Monitor hosts are ' + repr(get_mon_hosts())) - - sysctl_dict = config('sysctl') - if sysctl_dict: - create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf') - if relations_of_type('nrpe-external-master'): - update_nrpe_config() - - if is_leader(): - if not leader_get('fsid') or not leader_get('monitor-secret'): - if config('fsid'): - fsid = config('fsid') - else: - fsid = "{}".format(uuid.uuid1()) - if config('monitor-secret'): - mon_secret = config('monitor-secret') - else: - mon_secret = "{}".format(ceph.generate_monitor_secret()) - status_set('maintenance', 'Creating FSID and Monitor Secret') - opts = { - 'fsid': fsid, - 'monitor-secret': mon_secret, - } - log("Settings for the cluster are: {}".format(opts)) - leader_set(opts) - else: - if leader_get('fsid') is None or leader_get('monitor-secret') is None: - log('still waiting for leader to setup keys') - status_set('waiting', 'Waiting for leader to setup keys') - sys.exit(0) - emit_cephconf() - # Support use of single node ceph - if not ceph.is_bootstrapped() and int(config('monitor-count')) == 1: - status_set('maintenance', 'Bootstrapping single Ceph MON') - ceph.bootstrap_monitor_cluster(config('monitor-secret')) - ceph.wait_for_bootstrap() - - -def get_mon_hosts(): - hosts = [] - addr = get_public_addr() - hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) - - for relid in relation_ids('mon'): - for unit in related_units(relid): - addr = relation_get('ceph-public-address', unit, relid) - if addr is not None: - hosts.append('{}:6789'.format( - format_ipv6_addr(addr) or addr)) - - hosts.sort() - return hosts - - -def get_peer_units(): - """ - Returns a dictionary of unit names from the mon peer relation with - a flag indicating whether the unit has presented its address - """ - units = {} - units[local_unit()] = True - for relid in relation_ids('mon'): - for unit in related_units(relid): - addr = relation_get('ceph-public-address', unit, relid) - units[unit] = addr is not None - return units - - -@hooks.hook('mon-relation-joined') -def mon_relation_joined(): - public_addr = get_public_addr() - for relid in relation_ids('mon'): - relation_set(relation_id=relid, - relation_settings={'ceph-public-address': public_addr}) - - -@hooks.hook('mon-relation-departed', - 'mon-relation-changed') -def mon_relation(): - if leader_get('monitor-secret') is None: - log('still waiting for leader to setup keys') - status_set('waiting', 'Waiting for leader to setup keys') - return - emit_cephconf() - - moncount = int(config('monitor-count')) - if len(get_mon_hosts()) >= moncount: - status_set('maintenance', 'Bootstrapping MON cluster') - ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) - ceph.wait_for_bootstrap() - ceph.wait_for_quorum() - # If we can and want to - if is_leader() and config('customize-failure-domain'): - # But only if the environment supports it - if os.environ.get('JUJU_AVAILABILITY_ZONE'): - cmds = [ - "ceph osd getcrushmap -o /tmp/crush.map", - "crushtool -d /tmp/crush.map| " - "sed 's/step chooseleaf firstn 0 type host/step " - "chooseleaf firstn 0 type rack/' > " - "/tmp/crush.decompiled", - "crushtool -c /tmp/crush.decompiled -o /tmp/crush.map", - "crushtool -i /tmp/crush.map --test", - "ceph osd setcrushmap -i /tmp/crush.map" - ] - for cmd in cmds: - try: - subprocess.check_call(cmd, shell=True) - except subprocess.CalledProcessError as e: - log("Failed to modify crush map:", level='error') - log("Cmd: {}".format(cmd), level='error') - log("Error: {}".format(e.output), level='error') - break - else: - log( - "Your Juju environment doesn't" - "have support for Availability Zones" - ) - notify_osds() - notify_radosgws() - notify_client() - else: - log('Not enough mons ({}), punting.' - .format(len(get_mon_hosts()))) - - -def notify_osds(): - for relid in relation_ids('osd'): - osd_relation(relid) - def notify_radosgws(): for relid in relation_ids('radosgw'): @@ -476,55 +141,6 @@ def notify_client(): client_relation_joined(relid) -def upgrade_keys(): - """ Ceph now required mon allow rw for pool creation """ - if len(relation_ids('radosgw')) > 0: - ceph.upgrade_key_caps('client.radosgw.gateway', - ceph._radosgw_caps) - for relid in relation_ids('client'): - units = related_units(relid) - if len(units) > 0: - service_name = units[0].split('/')[0] - ceph.upgrade_key_caps('client.{}'.format(service_name), - ceph._default_caps) - - -@hooks.hook('osd-relation-joined') -def osd_relation(relid=None): - if ceph.is_quorum(): - log('mon cluster in quorum - providing fsid & keys') - public_addr = get_public_addr() - data = { - 'fsid': leader_get('fsid'), - 'osd_bootstrap_key': ceph.get_osd_bootstrap_key(), - 'auth': config('auth-supported'), - 'ceph-public-address': public_addr, - 'osd_upgrade_key': ceph.get_named_key('osd-upgrade', - caps=ceph.osd_upgrade_caps), - } - relation_set(relation_id=relid, - relation_settings=data) - # NOTE: radosgw key provision is gated on presence of OSD - # units so ensure that any deferred hooks are processed - notify_radosgws() - else: - log('mon cluster not in quorum - deferring fsid provision') - - -def related_osds(num_units=3): - ''' - Determine whether there are OSD units currently related - - @param num_units: The minimum number of units required - @return: boolean indicating whether the required number of - units where detected. - ''' - for r_id in relation_ids('osd'): - if len(related_units(r_id)) >= num_units: - return True - return False - - @hooks.hook('radosgw-relation-changed') @hooks.hook('radosgw-relation-joined') def radosgw_relation(relid=None, unit=None): @@ -535,14 +151,14 @@ def radosgw_relation(relid=None, unit=None): # NOTE: radosgw needs some usage OSD storage, so defer key # provision until OSD units are detected. - if ceph.is_quorum() and related_osds(): + if ready(): log('mon cluster in quorum and osds related ' '- providing radosgw with keys') public_addr = get_public_addr() data = { - 'fsid': leader_get('fsid'), + 'fsid': config('fsid'), 'radosgw_key': ceph.get_radosgw_key(), - 'auth': config('auth-supported'), + 'auth': 'cephx', 'ceph-public-address': public_addr, } @@ -559,13 +175,12 @@ def radosgw_relation(relid=None, unit=None): relation_set(relation_id=relid, relation_settings=data) else: - log('mon cluster not in quorum or no osds - deferring key provision') + log('FSID or admin key not provided, please configure them') @hooks.hook('client-relation-joined') def client_relation_joined(relid=None): - if ceph.is_quorum(): - log('mon cluster in quorum - providing client with keys') + if ready(): service_name = None if relid is None: units = [remote_unit()] @@ -578,18 +193,18 @@ def client_relation_joined(relid=None): if service_name is not None: public_addr = get_public_addr() data = {'key': ceph.get_named_key(service_name), - 'auth': config('auth-supported'), + 'auth': 'cephx', 'ceph-public-address': public_addr} relation_set(relation_id=relid, relation_settings=data) else: - log('mon cluster not in quorum - deferring key provision') + log('FSID or admin key not provided, please configure them') @hooks.hook('client-relation-changed') def client_relation_changed(): """Process broker requests from ceph client relations.""" - if ceph.is_quorum(): + if ready(): settings = relation_get() if 'broker_req' in settings: if not ceph.is_leader(): @@ -606,86 +221,44 @@ def client_relation_changed(): } relation_set(relation_settings=data) else: - log('mon cluster not in quorum', level=DEBUG) - - -@hooks.hook('upgrade-charm') -@harden() -def upgrade_charm(): - emit_cephconf() - apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) - install_upstart_scripts() - ceph.update_monfs() - upgrade_keys() - mon_relation_joined() + log('FSID or admin key not provided, please configure them') -@hooks.hook('start') -def start(): - # In case we're being redeployed to the same machines, try - # to make sure everything is running as soon as possible. - if ceph.systemd(): - service_restart('ceph-mon') - else: - service_restart('ceph-mon-all') - - -@hooks.hook('nrpe-external-master-relation-joined') -@hooks.hook('nrpe-external-master-relation-changed') -def update_nrpe_config(): - # python-dbus is used by check_upstart_job - apt_install('python-dbus') - log('Refreshing nagios checks') - if os.path.isdir(NAGIOS_PLUGINS): - rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', - 'check_ceph_status.py'), - os.path.join(NAGIOS_PLUGINS, 'check_ceph_status.py')) - - script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh') - rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', - 'nagios', 'collect_ceph_status.sh'), - script) - cronjob = "{} root {}\n".format('*/5 * * * *', script) - write_file(STATUS_CRONFILE, cronjob) - - # Find out if nrpe set nagios_hostname - hostname = nrpe.get_nagios_hostname() - current_unit = nrpe.get_nagios_unit_name() - nrpe_setup = nrpe.NRPE(hostname=hostname) - nrpe_setup.add_check( - shortname="ceph", - description='Check Ceph health {%s}' % current_unit, - check_cmd='check_ceph_status.py -f {}'.format(STATUS_FILE) - ) - nrpe_setup.write() +def ready(): + return config('fsid') and config('admin-key') def assess_status(): '''Assess status of current unit''' - moncount = int(config('monitor-count')) - units = get_peer_units() - # not enough peers and mon_count > 1 - if len(units.keys()) < moncount: - status_set('blocked', 'Insufficient peer units to bootstrap' - ' cluster (require {})'.format(moncount)) - return - - # mon_count > 1, peers, but no ceph-public-address - ready = sum(1 for unit_ready in units.itervalues() if unit_ready) - if ready < moncount: - status_set('waiting', 'Peer units detected, waiting for addresses') - return - - # active - bootstrapped + quorum status check - if ceph.is_bootstrapped() and ceph.is_quorum(): - status_set('active', 'Unit is ready and clustered') + if ready(): + status_set('active', 'Ready to proxy settings') else: - # Unit should be running and clustered, but no quorum - # TODO: should this be blocked or waiting? - status_set('blocked', 'Unit not clustered (no quorum)') - # If there's a pending lock for this unit, - # can i get the lock? - # reboot the ceph-mon process + status_set('blocked', 'Ensure FSID and admin-key are set') + # moncount = int(config('monitor-count')) + # units = get_peer_units() + # # not enough peers and mon_count > 1 + # if len(units.keys()) < moncount: + # status_set('blocked', 'Insufficient peer units to bootstrap' + # ' cluster (require {})'.format(moncount)) + # return + + # # mon_count > 1, peers, but no ceph-public-address + # ready = sum(1 for unit_ready in units.itervalues() if unit_ready) + # if ready < moncount: + # status_set('waiting', 'Peer units detected, waiting for addresses') + # return + + # # active - bootstrapped + quorum status check + # if ceph.is_bootstrapped() and ceph.is_quorum(): + # status_set('active', 'Unit is ready and clustered') + # else: + # # Unit should be running and clustered, but no quorum + # # TODO: should this be blocked or waiting? + # status_set('blocked', 'Unit not clustered (no quorum)') + # # If there's a pending lock for this unit, + # # can i get the lock? + # # reboot the ceph-mon process + # status_set('active', 'doing some shit maybe?') @hooks.hook('update-status') diff --git a/ceph-proxy/hooks/mon-relation-changed b/ceph-proxy/hooks/mon-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-proxy/hooks/mon-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/mon-relation-departed b/ceph-proxy/hooks/mon-relation-departed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-proxy/hooks/mon-relation-departed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/mon-relation-joined b/ceph-proxy/hooks/mon-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/ceph-proxy/hooks/mon-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/nrpe-external-master-relation-changed b/ceph-proxy/hooks/nrpe-external-master-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-proxy/hooks/nrpe-external-master-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/nrpe-external-master-relation-joined b/ceph-proxy/hooks/nrpe-external-master-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/ceph-proxy/hooks/nrpe-external-master-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/osd-relation-joined b/ceph-proxy/hooks/osd-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/ceph-proxy/hooks/osd-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/start b/ceph-proxy/hooks/start deleted file mode 120000 index 52d96630..00000000 --- a/ceph-proxy/hooks/start +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/stop b/ceph-proxy/hooks/stop deleted file mode 120000 index 52d96630..00000000 --- a/ceph-proxy/hooks/stop +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/upgrade-charm b/ceph-proxy/hooks/upgrade-charm deleted file mode 120000 index 52d96630..00000000 --- a/ceph-proxy/hooks/upgrade-charm +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 9c3969dd..0ff33664 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -1,5 +1,5 @@ -name: ceph-mon -summary: Highly scalable distributed storage +name: ceph-proxy +summary: Proxy to Juju external Ceph cluster maintainer: OpenStack Charmers description: | Ceph is a distributed storage and network file system designed to provide @@ -9,22 +9,8 @@ tags: - storage - file-servers - misc -peers: - mon: - interface: ceph -extra-bindings: - public: - cluster: provides: - nrpe-external-master: - interface: nrpe-external-master - scope: container client: interface: ceph-client - osd: - interface: ceph-osd radosgw: interface: ceph-radosgw - nrpe-external-master: - interface: nrpe-external-master - scope: container diff --git a/ceph-proxy/templates/ceph.client.admin.keyring b/ceph-proxy/templates/ceph.client.admin.keyring new file mode 100644 index 00000000..89758812 --- /dev/null +++ b/ceph-proxy/templates/ceph.client.admin.keyring @@ -0,0 +1,2 @@ +[client.admin] + key = {{admin_key}} \ No newline at end of file diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index f64db7cb..74febcc6 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -1,11 +1,9 @@ [global] -{% if old_auth %} -auth supported = {{ auth_supported }} -{% else %} -auth cluster required = {{ auth_supported }} -auth service required = {{ auth_supported }} -auth client required = {{ auth_supported }} -{% endif %} + +auth cluster required = cephx +auth service required = cephx +auth client required = cephx + keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} @@ -17,23 +15,3 @@ mon cluster log to syslog = {{ use_syslog }} debug mon = {{ loglevel }}/5 debug osd = {{ loglevel }}/5 -{%- if ceph_public_network is string %} -public network = {{ ceph_public_network }} -{%- endif %} -{%- if ceph_cluster_network is string %} -cluster network = {{ ceph_cluster_network }} -{%- endif %} - -{% if public_addr %} -public addr = {{ public_addr }} -{% endif %} -{% if cluster_addr %} -cluster addr = {{ cluster_addr }} -{%- endif %} - -[mon] -keyring = /var/lib/ceph/mon/$cluster-$id/keyring - -[mds] -keyring = /var/lib/ceph/mds/$cluster-$id/keyring - diff --git a/ceph-proxy/templates/mon.keyring b/ceph-proxy/templates/mon.keyring new file mode 100644 index 00000000..2b9d542f --- /dev/null +++ b/ceph-proxy/templates/mon.keyring @@ -0,0 +1,3 @@ +[mon.] + key = {{mon_key}} + caps mon = "allow *" diff --git a/ceph-proxy/tests/014-basic-precise-icehouse b/ceph-proxy/tests/014-basic-precise-icehouse deleted file mode 100755 index 020cd751..00000000 --- a/ceph-proxy/tests/014-basic-precise-icehouse +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on precise-icehouse.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='precise', - openstack='cloud:precise-icehouse', - source='cloud:precise-updates/icehouse') - deployment.run_tests() diff --git a/ceph-proxy/tests/015-basic-trusty-icehouse b/ceph-proxy/tests/015-basic-trusty-icehouse deleted file mode 100755 index f67fea91..00000000 --- a/ceph-proxy/tests/015-basic-trusty-icehouse +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on trusty-icehouse.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='trusty') - deployment.run_tests() diff --git a/ceph-proxy/tests/016-basic-trusty-juno b/ceph-proxy/tests/016-basic-trusty-juno deleted file mode 100755 index 28c7684e..00000000 --- a/ceph-proxy/tests/016-basic-trusty-juno +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on trusty-juno.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='trusty', - openstack='cloud:trusty-juno', - source='cloud:trusty-updates/juno') - deployment.run_tests() diff --git a/ceph-proxy/tests/017-basic-trusty-kilo b/ceph-proxy/tests/017-basic-trusty-kilo deleted file mode 100755 index 0a787b22..00000000 --- a/ceph-proxy/tests/017-basic-trusty-kilo +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on trusty-kilo.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='trusty', - openstack='cloud:trusty-kilo', - source='cloud:trusty-updates/kilo') - deployment.run_tests() diff --git a/ceph-proxy/tests/018-basic-trusty-liberty b/ceph-proxy/tests/018-basic-trusty-liberty deleted file mode 100755 index f339371b..00000000 --- a/ceph-proxy/tests/018-basic-trusty-liberty +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on trusty-liberty.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='trusty', - openstack='cloud:trusty-liberty', - source='cloud:trusty-updates/liberty') - deployment.run_tests() diff --git a/ceph-proxy/tests/019-basic-trusty-mitaka b/ceph-proxy/tests/019-basic-trusty-mitaka deleted file mode 100755 index 2eca19d6..00000000 --- a/ceph-proxy/tests/019-basic-trusty-mitaka +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on trusty-mitaka.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='trusty', - openstack='cloud:trusty-mitaka', - source='cloud:trusty-updates/mitaka') - deployment.run_tests() diff --git a/ceph-proxy/tests/020-basic-wily-liberty b/ceph-proxy/tests/020-basic-wily-liberty deleted file mode 100755 index b0d8096b..00000000 --- a/ceph-proxy/tests/020-basic-wily-liberty +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on wily-liberty.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='wily') - deployment.run_tests() diff --git a/ceph-proxy/tests/021-basic-xenial-mitaka b/ceph-proxy/tests/021-basic-xenial-mitaka deleted file mode 100755 index ae3d3350..00000000 --- a/ceph-proxy/tests/021-basic-xenial-mitaka +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on xenial-mitaka.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='xenial') - deployment.run_tests() diff --git a/ceph-proxy/tests/README b/ceph-proxy/tests/README deleted file mode 100644 index 79c5b063..00000000 --- a/ceph-proxy/tests/README +++ /dev/null @@ -1,113 +0,0 @@ -This directory provides Amulet tests to verify basic deployment functionality -from the perspective of this charm, its requirements and its features, as -exercised in a subset of the full OpenStack deployment test bundle topology. - -Reference: lp:openstack-charm-testing for full test bundles. - -A single topology and configuration is defined and deployed, once for each of -the defined Ubuntu:OpenStack release combos. The ongoing goal is for this -charm to always possess tests and combo definitions for all currently-supported -release combinations of U:OS. - -test_* methods are called in lexical sort order, as with most runners. However, -each individual test method should be idempotent and expected to pass regardless -of run order or Ubuntu:OpenStack combo. When writing or modifying tests, -ensure that every individual test is not dependent on another test_ method. - -Test naming convention, purely for code organization purposes: - 1xx service and endpoint checks - 2xx relation checks - 3xx config checks - 4xx functional checks - 9xx restarts, config changes, actions and other final checks - -In order to run tests, charm-tools and juju must be installed: - sudo add-apt-repository ppa:juju/stable - sudo apt-get update - sudo apt-get install charm-tools juju juju-deployer amulet - -Alternatively, tests may be exercised with proposed or development versions -of juju and related tools: - - # juju proposed version - sudo add-apt-repository ppa:juju/proposed - sudo apt-get update - sudo apt-get install charm-tools juju juju-deployer - - # juju development version - sudo add-apt-repository ppa:juju/devel - sudo apt-get update - sudo apt-get install charm-tools juju juju-deployer - -Some tests may need to download files. If a web proxy server is required in -the environment, the AMULET_HTTP_PROXY environment variable must be set and -passed into the juju test command. This is unrelated to juju's http proxy -settings or behavior. - -The following examples demonstrate different ways that tests can be executed. -All examples are run from the charm's root directory. - - * To run all +x tests in the tests directory: - - bzr branch lp:charms/trusty/foo - cd foo - make functional_test - - * To run the tests against a specific release combo as defined in tests/: - - bzr branch lp:charms/trusty/foo - cd foo - juju test -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse - - * To run tests and keep the juju environment deployed after a failure: - - bzr branch lp:charms/trusty/foo - cd foo - juju test --set-e -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse - - * To re-run a test module against an already deployed environment (one - that was deployed by a previous call to 'juju test --set-e'): - - ./tests/015-basic-trusty-icehouse - - * Even with --set-e, `juju test` will tear down the deployment when all - tests pass. The following work flow may be more effective when - iterating on test writing. - - bzr branch lp:charms/trusty/foo - cd foo - ./tests/setup/00-setup - juju bootstrap - ./tests/015-basic-trusty-icehouse - # make some changes, run tests again - ./tests/015-basic-trusty-icehouse - # make some changes, run tests again - ./tests/015-basic-trusty-icehouse - - * There may be test definitions in the tests/ dir which are not set +x - executable. This is generally true for deprecated releases, or for - upcoming releases which are not yet validated and enabled. To enable - and run these tests: - bzr branch lp:charms/trusty/foo - cd foo - ls tests - chmod +x tests/017-basic-trusty-kilo - ./tests/setup/00-setup - juju bootstrap - ./tests/017-basic-trusty-kilo - - -Additional notes: - - * Use DEBUG to turn on debug logging, use ERROR otherwise. - u = OpenStackAmuletUtils(ERROR) - u = OpenStackAmuletUtils(DEBUG) - - * To interact with the deployed environment: - export OS_USERNAME=admin - export OS_PASSWORD=openstack - export OS_TENANT_NAME=admin - export OS_REGION_NAME=RegionOne - export OS_AUTH_URL=${OS_AUTH_PROTOCOL:-http}://`juju-deployer -e trusty -f keystone`:5000/v2.0 - keystone user-list - glance image-list diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py deleted file mode 100644 index c890f210..00000000 --- a/ceph-proxy/tests/basic_deployment.py +++ /dev/null @@ -1,683 +0,0 @@ -#!/usr/bin/python - -import amulet -import re -import time - -from charmhelpers.contrib.openstack.amulet.deployment import ( - OpenStackAmuletDeployment -) -from charmhelpers.contrib.openstack.amulet.utils import ( # noqa - OpenStackAmuletUtils, - DEBUG, - # ERROR - ) - -# Use DEBUG to turn on debug logging -u = OpenStackAmuletUtils(DEBUG) - - -class CephBasicDeployment(OpenStackAmuletDeployment): - """Amulet tests on a basic ceph deployment.""" - - def __init__(self, series=None, openstack=None, source=None, stable=False): - """Deploy the entire test environment.""" - super(CephBasicDeployment, self).__init__(series, openstack, source, - stable) - self._add_services() - self._add_relations() - self._configure_services() - self._deploy() - - u.log.info('Waiting on extended status checks...') - exclude_services = ['mysql'] - - # Wait for deployment ready msgs, except exclusions - self._auto_wait_for_status(exclude_services=exclude_services) - - self._initialize_tests() - - def _add_services(self): - """Add services - - Add the services that we're testing, where ceph is local, - and the rest of the service are from lp branches that are - compatible with the local charm (e.g. stable or next). - """ - this_service = {'name': 'ceph-mon', 'units': 3} - other_services = [{'name': 'mysql'}, - {'name': 'keystone'}, - {'name': 'ceph-osd', 'units': 3}, - {'name': 'rabbitmq-server'}, - {'name': 'nova-compute'}, - {'name': 'glance'}, - {'name': 'cinder'}] - super(CephBasicDeployment, self)._add_services(this_service, - other_services) - - def _add_relations(self): - """Add all of the relations for the services.""" - relations = { - 'nova-compute:shared-db': 'mysql:shared-db', - 'nova-compute:amqp': 'rabbitmq-server:amqp', - 'nova-compute:image-service': 'glance:image-service', - 'nova-compute:ceph': 'ceph-mon:client', - 'keystone:shared-db': 'mysql:shared-db', - 'glance:shared-db': 'mysql:shared-db', - 'glance:identity-service': 'keystone:identity-service', - 'glance:amqp': 'rabbitmq-server:amqp', - 'glance:ceph': 'ceph-mon:client', - 'cinder:shared-db': 'mysql:shared-db', - 'cinder:identity-service': 'keystone:identity-service', - 'cinder:amqp': 'rabbitmq-server:amqp', - 'cinder:image-service': 'glance:image-service', - 'cinder:ceph': 'ceph-mon:client', - 'ceph-osd:mon': 'ceph-mon:osd' - } - super(CephBasicDeployment, self)._add_relations(relations) - - def _configure_services(self): - """Configure all of the services.""" - keystone_config = {'admin-password': 'openstack', - 'admin-token': 'ubuntutesting'} - mysql_config = {'dataset-size': '50%'} - cinder_config = {'block-device': 'None', 'glance-api-version': '2'} - - # Include a non-existent device as osd-devices is a whitelist, - # and this will catch cases where proposals attempt to change that. - ceph_config = { - 'monitor-count': '3', - 'auth-supported': 'none', - 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', - 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', - } - - # Include a non-existent device as osd-devices is a whitelist, - # and this will catch cases where proposals attempt to change that. - ceph_osd_config = { - 'osd-reformat': 'yes', - 'ephemeral-unmount': '/mnt', - 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' - } - - configs = {'keystone': keystone_config, - 'mysql': mysql_config, - 'cinder': cinder_config, - 'ceph-mon': ceph_config, - 'ceph-osd': ceph_osd_config} - super(CephBasicDeployment, self)._configure_services(configs) - - def _initialize_tests(self): - """Perform final initialization before tests get run.""" - # Access the sentries for inspecting service units - self.mysql_sentry = self.d.sentry.unit['mysql/0'] - self.keystone_sentry = self.d.sentry.unit['keystone/0'] - self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] - self.nova_sentry = self.d.sentry.unit['nova-compute/0'] - self.glance_sentry = self.d.sentry.unit['glance/0'] - self.cinder_sentry = self.d.sentry.unit['cinder/0'] - self.ceph_osd_sentry = self.d.sentry.unit['ceph-osd/0'] - self.ceph0_sentry = self.d.sentry.unit['ceph-mon/0'] - self.ceph1_sentry = self.d.sentry.unit['ceph-mon/1'] - self.ceph2_sentry = self.d.sentry.unit['ceph-mon/2'] - u.log.debug('openstack release val: {}'.format( - self._get_openstack_release())) - u.log.debug('openstack release str: {}'.format( - self._get_openstack_release_string())) - - # Authenticate admin with keystone - self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, - user='admin', - password='openstack', - tenant='admin') - # Authenticate admin with cinder endpoint - self.cinder = u.authenticate_cinder_admin(self.keystone_sentry, - username='admin', - password='openstack', - tenant='admin') - # Authenticate admin with glance endpoint - self.glance = u.authenticate_glance_admin(self.keystone) - - # Authenticate admin with nova endpoint - self.nova = u.authenticate_nova_user(self.keystone, - user='admin', - password='openstack', - tenant='admin') - - # Create a demo tenant/role/user - self.demo_tenant = 'demoTenant' - self.demo_role = 'demoRole' - self.demo_user = 'demoUser' - if not u.tenant_exists(self.keystone, self.demo_tenant): - tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, - description='demo tenant', - enabled=True) - self.keystone.roles.create(name=self.demo_role) - self.keystone.users.create(name=self.demo_user, - password='password', - tenant_id=tenant.id, - email='demo@demo.com') - - # Authenticate demo user with keystone - self.keystone_demo = u.authenticate_keystone_user(self.keystone, - self.demo_user, - 'password', - self.demo_tenant) - - # Authenticate demo user with nova-api - self.nova_demo = u.authenticate_nova_user(self.keystone, - self.demo_user, - 'password', - self.demo_tenant) - - def test_100_ceph_processes(self): - """Verify that the expected service processes are running - on each ceph unit.""" - - # Process name and quantity of processes to expect on each unit - ceph_processes = { - 'ceph-mon': 1 - } - - # Units with process names and PID quantities expected - expected_processes = { - self.ceph0_sentry: ceph_processes, - self.ceph1_sentry: ceph_processes, - self.ceph2_sentry: ceph_processes - } - - actual_pids = u.get_unit_process_ids(expected_processes) - ret = u.validate_unit_process_ids(expected_processes, actual_pids) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_102_services(self): - """Verify the expected services are running on the service units.""" - - services = { - self.mysql_sentry: ['mysql'], - self.rabbitmq_sentry: ['rabbitmq-server'], - self.nova_sentry: ['nova-compute'], - self.keystone_sentry: ['keystone'], - self.glance_sentry: ['glance-registry', - 'glance-api'], - self.cinder_sentry: ['cinder-api', - 'cinder-scheduler', - 'cinder-volume'], - } - - if self._get_openstack_release() < self.vivid_kilo: - # For upstart systems only. Ceph services under systemd - # are checked by process name instead. - ceph_services = [ - 'ceph-mon-all', - 'ceph-mon id=`hostname`' - ] - services[self.ceph0_sentry] = ceph_services - services[self.ceph1_sentry] = ceph_services - services[self.ceph2_sentry] = ceph_services - - ceph_osd_services = [ - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) - ] - - services[self.ceph_osd_sentry] = ceph_osd_services - - if self._get_openstack_release() >= self.trusty_liberty: - services[self.keystone_sentry] = ['apache2'] - - ret = u.validate_services_by_name(services) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_200_ceph_nova_client_relation(self): - """Verify the ceph to nova ceph-client relation data.""" - u.log.debug('Checking ceph:nova-compute ceph-mon relation data...') - unit = self.ceph0_sentry - relation = ['client', 'nova-compute:ceph'] - expected = { - 'private-address': u.valid_ip, - 'auth': 'none', - 'key': u.not_null - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('ceph-mon to nova ceph-client', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_201_nova_ceph_client_relation(self): - """Verify the nova to ceph client relation data.""" - u.log.debug('Checking nova-compute:ceph ceph-client relation data...') - unit = self.nova_sentry - relation = ['ceph', 'ceph-mon:client'] - expected = { - 'private-address': u.valid_ip - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('nova to ceph ceph-client', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_202_ceph_glance_client_relation(self): - """Verify the ceph to glance ceph-client relation data.""" - u.log.debug('Checking ceph:glance client relation data...') - unit = self.ceph1_sentry - relation = ['client', 'glance:ceph'] - expected = { - 'private-address': u.valid_ip, - 'auth': 'none', - 'key': u.not_null - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('ceph to glance ceph-client', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_203_glance_ceph_client_relation(self): - """Verify the glance to ceph client relation data.""" - u.log.debug('Checking glance:ceph client relation data...') - unit = self.glance_sentry - relation = ['ceph', 'ceph-mon:client'] - expected = { - 'private-address': u.valid_ip - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('glance to ceph ceph-client', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_204_ceph_cinder_client_relation(self): - """Verify the ceph to cinder ceph-client relation data.""" - u.log.debug('Checking ceph:cinder ceph relation data...') - unit = self.ceph2_sentry - relation = ['client', 'cinder:ceph'] - expected = { - 'private-address': u.valid_ip, - 'auth': 'none', - 'key': u.not_null - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('ceph to cinder ceph-client', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_205_cinder_ceph_client_relation(self): - """Verify the cinder to ceph ceph-client relation data.""" - u.log.debug('Checking cinder:ceph ceph relation data...') - unit = self.cinder_sentry - relation = ['ceph', 'ceph-mon:client'] - expected = { - 'private-address': u.valid_ip - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('cinder to ceph ceph-client', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_300_ceph_config(self): - """Verify the data in the ceph config file.""" - u.log.debug('Checking ceph config file data...') - unit = self.ceph0_sentry - conf = '/etc/ceph/ceph.conf' - expected = { - 'global': { - 'keyring': '/etc/ceph/$cluster.$name.keyring', - 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', - 'log to syslog': 'false', - 'err to syslog': 'false', - 'clog to syslog': 'false', - 'mon cluster log to syslog': 'false', - 'auth cluster required': 'none', - 'auth service required': 'none', - 'auth client required': 'none' - }, - 'mon': { - 'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring' - }, - 'mds': { - 'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring' - }, - } - - for section, pairs in expected.iteritems(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "ceph config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_302_cinder_rbd_config(self): - """Verify the cinder config file data regarding ceph.""" - u.log.debug('Checking cinder (rbd) config file data...') - unit = self.cinder_sentry - conf = '/etc/cinder/cinder.conf' - expected = { - 'DEFAULT': { - 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' - } - } - for section, pairs in expected.iteritems(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "cinder (rbd) config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_304_glance_rbd_config(self): - """Verify the glance config file data regarding ceph.""" - u.log.debug('Checking glance (rbd) config file data...') - unit = self.glance_sentry - conf = '/etc/glance/glance-api.conf' - config = { - 'default_store': 'rbd', - 'rbd_store_ceph_conf': '/etc/ceph/ceph.conf', - 'rbd_store_user': 'glance', - 'rbd_store_pool': 'glance', - 'rbd_store_chunk_size': '8' - } - - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later - config['stores'] = ('glance.store.filesystem.Store,' - 'glance.store.http.Store,' - 'glance.store.rbd.Store') - section = 'glance_store' - else: - # Juno or earlier - section = 'DEFAULT' - - expected = {section: config} - for section, pairs in expected.iteritems(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "glance (rbd) config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_306_nova_rbd_config(self): - """Verify the nova config file data regarding ceph.""" - u.log.debug('Checking nova (rbd) config file data...') - unit = self.nova_sentry - conf = '/etc/nova/nova.conf' - expected = { - 'libvirt': { - 'rbd_user': 'nova-compute', - 'rbd_secret_uuid': u.not_null - } - } - for section, pairs in expected.iteritems(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "nova (rbd) config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_400_ceph_check_osd_pools(self): - """Check osd pools on all ceph units, expect them to be - identical, and expect specific pools to be present.""" - u.log.debug('Checking pools on ceph units...') - - expected_pools = self.get_ceph_expected_pools() - results = [] - sentries = [ - self.ceph0_sentry, - self.ceph1_sentry, - self.ceph2_sentry - ] - - # Check for presence of expected pools on each unit - u.log.debug('Expected pools: {}'.format(expected_pools)) - for sentry_unit in sentries: - pools = u.get_ceph_pools(sentry_unit) - results.append(pools) - - for expected_pool in expected_pools: - if expected_pool not in pools: - msg = ('{} does not have pool: ' - '{}'.format(sentry_unit.info['unit_name'], - expected_pool)) - amulet.raise_status(amulet.FAIL, msg=msg) - u.log.debug('{} has (at least) the expected ' - 'pools.'.format(sentry_unit.info['unit_name'])) - - # Check that all units returned the same pool name:id data - ret = u.validate_list_of_identical_dicts(results) - if ret: - u.log.debug('Pool list results: {}'.format(results)) - msg = ('{}; Pool list results are not identical on all ' - 'ceph units.'.format(ret)) - amulet.raise_status(amulet.FAIL, msg=msg) - else: - u.log.debug('Pool list on all ceph units produced the ' - 'same results (OK).') - - def test_402_pause_resume_actions(self): - """Veryfy that pause/resume works""" - u.log.debug("Testing pause") - cmd = "ceph -s" - - sentry_unit = self.ceph0_sentry - action_id = u.run_action(sentry_unit, 'pause-health') - assert u.wait_on_action(action_id), "Pause health action failed." - - output, code = sentry_unit.run(cmd) - if 'nodown' not in output or 'noout' not in output: - amulet.raise_status(amulet.FAIL, msg="Missing noout,nodown") - - u.log.debug("Testing resume") - action_id = u.run_action(sentry_unit, 'resume-health') - assert u.wait_on_action(action_id), "Resume health action failed." - - output, code = sentry_unit.run(cmd) - if 'nodown' in output or 'noout' in output: - amulet.raise_status(amulet.FAIL, msg="Still has noout,nodown") - - @staticmethod - def find_pool(sentry_unit, pool_name): - """ - This will do a ceph osd dump and search for pool you specify - :param sentry_unit: The unit to run this command from. - :param pool_name: str. The name of the Ceph pool to query - :return: str or None. The ceph pool or None if not found - """ - output, dump_code = sentry_unit.run("ceph osd dump") - if dump_code is not 0: - amulet.raise_status( - amulet.FAIL, - msg="ceph osd dump failed with output: {}".format( - output)) - for line in output.split('\n'): - match = re.search(r"pool\s+\d+\s+'(?P.*)'", line) - if match: - name = match.group('pool_name') - if name == pool_name: - return line - return None - - def test_403_cache_tier_actions(self): - """Verify that cache tier add/remove works""" - u.log.debug("Testing cache tiering") - - sentry_unit = self.ceph0_sentry - # Create our backer pool - output, code = sentry_unit.run("ceph osd pool create cold 128 128 ") - if code is not 0: - amulet.raise_status( - amulet.FAIL, - msg="ceph osd pool create cold failed with output: {}".format( - output)) - - # Create our cache pool - output, code = sentry_unit.run("ceph osd pool create hot 128 128 ") - if code is not 0: - amulet.raise_status( - amulet.FAIL, - msg="ceph osd pool create hot failed with output: {}".format( - output)) - - action_id = u.run_action(sentry_unit, - 'create-cache-tier', - params={ - 'backer-pool': 'cold', - 'cache-pool': 'hot', - 'cache-mode': 'writeback'}) - assert u.wait_on_action(action_id), \ - "Create cache tier action failed." - - pool_line = self.find_pool( - sentry_unit=sentry_unit, - pool_name='hot') - - assert "cache_mode writeback" in pool_line, \ - "cache_mode writeback not found in cache pool" - remove_action_id = u.run_action(sentry_unit, - 'remove-cache-tier', - params={ - 'backer-pool': 'cold', - 'cache-pool': 'hot'}) - assert u.wait_on_action(remove_action_id), \ - "Remove cache tier action failed" - pool_line = self.find_pool(sentry_unit=sentry_unit, pool_name='hot') - assert "cache_mode" not in pool_line, \ - "cache_mode is still enabled on cache pool" - - def test_410_ceph_cinder_vol_create(self): - """Create and confirm a ceph-backed cinder volume, and inspect - ceph cinder pool object count as the volume is created - and deleted.""" - sentry_unit = self.ceph0_sentry - obj_count_samples = [] - pool_size_samples = [] - pools = u.get_ceph_pools(self.ceph0_sentry) - cinder_pool = pools['cinder'] - - # Check ceph cinder pool object count, disk space usage and pool name - u.log.debug('Checking ceph cinder pool original samples...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - cinder_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - expected = 'cinder' - if pool_name != expected: - msg = ('Ceph pool {} unexpected name (actual, expected): ' - '{}. {}'.format(cinder_pool, pool_name, expected)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create ceph-backed cinder volume - cinder_vol = u.create_cinder_volume(self.cinder) - - # Re-check ceph cinder pool object count and disk usage - time.sleep(10) - u.log.debug('Checking ceph cinder pool samples after volume create...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - cinder_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - # Delete ceph-backed cinder volume - u.delete_resource(self.cinder.volumes, cinder_vol, msg="cinder volume") - - # Final check, ceph cinder pool object count and disk usage - time.sleep(10) - u.log.debug('Checking ceph cinder pool after volume delete...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - cinder_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - # Validate ceph cinder pool object count samples over time - ret = u.validate_ceph_pool_samples(obj_count_samples, - "cinder pool object count") - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - # Validate ceph cinder pool disk space usage samples over time - ret = u.validate_ceph_pool_samples(pool_size_samples, - "cinder pool disk usage") - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_412_ceph_glance_image_create_delete(self): - """Create and confirm a ceph-backed glance image, and inspect - ceph glance pool object count as the image is created - and deleted.""" - sentry_unit = self.ceph0_sentry - obj_count_samples = [] - pool_size_samples = [] - pools = u.get_ceph_pools(self.ceph0_sentry) - glance_pool = pools['glance'] - - # Check ceph glance pool object count, disk space usage and pool name - u.log.debug('Checking ceph glance pool original samples...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - glance_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - expected = 'glance' - if pool_name != expected: - msg = ('Ceph glance pool {} unexpected name (actual, ' - 'expected): {}. {}'.format(glance_pool, - pool_name, expected)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create ceph-backed glance image - glance_img = u.create_cirros_image(self.glance, "cirros-image-1") - - # Re-check ceph glance pool object count and disk usage - time.sleep(10) - u.log.debug('Checking ceph glance pool samples after image create...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - glance_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - # Delete ceph-backed glance image - u.delete_resource(self.glance.images, - glance_img, msg="glance image") - - # Final check, ceph glance pool object count and disk usage - time.sleep(10) - u.log.debug('Checking ceph glance pool samples after image delete...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - glance_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - # Validate ceph glance pool object count samples over time - ret = u.validate_ceph_pool_samples(obj_count_samples, - "glance pool object count") - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - # Validate ceph glance pool disk space usage samples over time - ret = u.validate_ceph_pool_samples(pool_size_samples, - "glance pool disk usage") - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_499_ceph_cmds_exit_zero(self): - """Check basic functionality of ceph cli commands against - all ceph units.""" - sentry_units = [ - self.ceph0_sentry, - self.ceph1_sentry, - self.ceph2_sentry - ] - commands = [ - 'sudo ceph health', - 'sudo ceph mds stat', - 'sudo ceph pg stat', - 'sudo ceph osd stat', - 'sudo ceph mon stat', - ] - ret = u.check_commands_on_units(commands, sentry_units) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - # FYI: No restart check as ceph services do not restart - # when charm config changes, unless monitor count increases. diff --git a/ceph-proxy/tests/charmhelpers/__init__.py b/ceph-proxy/tests/charmhelpers/__init__.py deleted file mode 100644 index f72e7f84..00000000 --- a/ceph-proxy/tests/charmhelpers/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -# Bootstrap charm-helpers, installing its dependencies if necessary using -# only standard libraries. -import subprocess -import sys - -try: - import six # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa - -try: - import yaml # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa diff --git a/ceph-proxy/tests/charmhelpers/contrib/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/__init__.py deleted file mode 100644 index d1400a02..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py deleted file mode 100644 index d1400a02..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py deleted file mode 100644 index d451698d..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import amulet -import os -import six - - -class AmuletDeployment(object): - """Amulet deployment. - - This class provides generic Amulet deployment and test runner - methods. - """ - - def __init__(self, series=None): - """Initialize the deployment environment.""" - self.series = None - - if series: - self.series = series - self.d = amulet.Deployment(series=self.series) - else: - self.d = amulet.Deployment() - - def _add_services(self, this_service, other_services): - """Add services. - - Add services to the deployment where this_service is the local charm - that we're testing and other_services are the other services that - are being used in the local amulet tests. - """ - if this_service['name'] != os.path.basename(os.getcwd()): - s = this_service['name'] - msg = "The charm's root directory name needs to be {}".format(s) - amulet.raise_status(amulet.FAIL, msg=msg) - - if 'units' not in this_service: - this_service['units'] = 1 - - self.d.add(this_service['name'], units=this_service['units'], - constraints=this_service.get('constraints')) - - for svc in other_services: - if 'location' in svc: - branch_location = svc['location'] - elif self.series: - branch_location = 'cs:{}/{}'.format(self.series, svc['name']), - else: - branch_location = None - - if 'units' not in svc: - svc['units'] = 1 - - self.d.add(svc['name'], charm=branch_location, units=svc['units'], - constraints=svc.get('constraints')) - - def _add_relations(self, relations): - """Add all of the relations for the services.""" - for k, v in six.iteritems(relations): - self.d.relate(k, v) - - def _configure_services(self, configs): - """Configure all of the services.""" - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _deploy(self): - """Deploy environment and wait for all hooks to finish executing.""" - try: - self.d.setup(timeout=900) - self.d.sentry.wait(timeout=900) - except amulet.helpers.TimeoutError: - amulet.raise_status(amulet.FAIL, msg="Deployment timed out") - except Exception: - raise - - def run_tests(self): - """Run all of the methods that are prefixed with 'test_'.""" - for test in dir(self): - if test.startswith('test_'): - getattr(self, test)() diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py deleted file mode 100644 index 7e5c25a9..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ /dev/null @@ -1,829 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import io -import json -import logging -import os -import re -import socket -import subprocess -import sys -import time -import uuid - -import amulet -import distro_info -import six -from six.moves import configparser -if six.PY3: - from urllib import parse as urlparse -else: - import urlparse - - -class AmuletUtils(object): - """Amulet utilities. - - This class provides common utility functions that are used by Amulet - tests. - """ - - def __init__(self, log_level=logging.ERROR): - self.log = self.get_logger(level=log_level) - self.ubuntu_releases = self.get_ubuntu_releases() - - def get_logger(self, name="amulet-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def valid_ip(self, ip): - if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): - return True - else: - return False - - def valid_url(self, url): - p = re.compile( - r'^(?:http|ftp)s?://' - r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa - r'localhost|' - r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' - r'(?::\d+)?' - r'(?:/?|[/?]\S+)$', - re.IGNORECASE) - if p.match(url): - return True - else: - return False - - def get_ubuntu_release_from_sentry(self, sentry_unit): - """Get Ubuntu release codename from sentry unit. - - :param sentry_unit: amulet sentry/service unit pointer - :returns: list of strings - release codename, failure message - """ - msg = None - cmd = 'lsb_release -cs' - release, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} lsb_release: {}'.format( - sentry_unit.info['unit_name'], release)) - else: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, release, code)) - if release not in self.ubuntu_releases: - msg = ("Release ({}) not found in Ubuntu releases " - "({})".format(release, self.ubuntu_releases)) - return release, msg - - def validate_services(self, commands): - """Validate that lists of commands succeed on service units. Can be - used to verify system services are running on the corresponding - service units. - - :param commands: dict with sentry keys and arbitrary command list vals - :returns: None if successful, Failure string message otherwise - """ - self.log.debug('Checking status of system services...') - - # /!\ DEPRECATION WARNING (beisner): - # New and existing tests should be rewritten to use - # validate_services_by_name() as it is aware of init systems. - self.log.warn('DEPRECATION WARNING: use ' - 'validate_services_by_name instead of validate_services ' - 'due to init system differences.') - - for k, v in six.iteritems(commands): - for cmd in v: - output, code = k.run(cmd) - self.log.debug('{} `{}` returned ' - '{}'.format(k.info['unit_name'], - cmd, code)) - if code != 0: - return "command `{}` returned {}".format(cmd, str(code)) - return None - - def validate_services_by_name(self, sentry_services): - """Validate system service status by service name, automatically - detecting init system based on Ubuntu release codename. - - :param sentry_services: dict with sentry keys and svc list values - :returns: None if successful, Failure string message otherwise - """ - self.log.debug('Checking status of system services...') - - # Point at which systemd became a thing - systemd_switch = self.ubuntu_releases.index('vivid') - - for sentry_unit, services_list in six.iteritems(sentry_services): - # Get lsb_release codename from unit - release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) - if ret: - return ret - - for service_name in services_list: - if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name in ['rabbitmq-server', 'apache2']): - # init is systemd (or regular sysv) - cmd = 'sudo service {} status'.format(service_name) - output, code = sentry_unit.run(cmd) - service_running = code == 0 - elif self.ubuntu_releases.index(release) < systemd_switch: - # init is upstart - cmd = 'sudo status {}'.format(service_name) - output, code = sentry_unit.run(cmd) - service_running = code == 0 and "start/running" in output - - self.log.debug('{} `{}` returned ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code)) - if not service_running: - return u"command `{}` returned {} {}".format( - cmd, output, str(code)) - return None - - def _get_config(self, unit, filename): - """Get a ConfigParser object for parsing a unit's config file.""" - file_contents = unit.file_contents(filename) - - # NOTE(beisner): by default, ConfigParser does not handle options - # with no value, such as the flags used in the mysql my.cnf file. - # https://bugs.python.org/issue7005 - config = configparser.ConfigParser(allow_no_value=True) - config.readfp(io.StringIO(file_contents)) - return config - - def validate_config_data(self, sentry_unit, config_file, section, - expected): - """Validate config file data. - - Verify that the specified section of the config file contains - the expected option key:value pairs. - - Compare expected dictionary data vs actual dictionary data. - The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluates a variable and returns a - bool. - """ - self.log.debug('Validating config file data ({} in {} on {})' - '...'.format(section, config_file, - sentry_unit.info['unit_name'])) - config = self._get_config(sentry_unit, config_file) - - if section != 'DEFAULT' and not config.has_section(section): - return "section [{}] does not exist".format(section) - - for k in expected.keys(): - if not config.has_option(section, k): - return "section [{}] is missing option {}".format(section, k) - - actual = config.get(section, k) - v = expected[k] - if (isinstance(v, six.string_types) or - isinstance(v, bool) or - isinstance(v, six.integer_types)): - # handle explicit values - if actual != v: - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) - # handle function pointers, such as not_null or valid_ip - elif not v(actual): - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) - return None - - def _validate_dict_data(self, expected, actual): - """Validate dictionary data. - - Compare expected dictionary data vs actual dictionary data. - The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluates a variable and returns a - bool. - """ - self.log.debug('actual: {}'.format(repr(actual))) - self.log.debug('expected: {}'.format(repr(expected))) - - for k, v in six.iteritems(expected): - if k in actual: - if (isinstance(v, six.string_types) or - isinstance(v, bool) or - isinstance(v, six.integer_types)): - # handle explicit values - if v != actual[k]: - return "{}:{}".format(k, actual[k]) - # handle function pointers, such as not_null or valid_ip - elif not v(actual[k]): - return "{}:{}".format(k, actual[k]) - else: - return "key '{}' does not exist".format(k) - return None - - def validate_relation_data(self, sentry_unit, relation, expected): - """Validate actual relation data based on expected relation data.""" - actual = sentry_unit.relation(relation[0], relation[1]) - return self._validate_dict_data(expected, actual) - - def _validate_list_data(self, expected, actual): - """Compare expected list vs actual list data.""" - for e in expected: - if e not in actual: - return "expected item {} not found in actual list".format(e) - return None - - def not_null(self, string): - if string is not None: - return True - else: - return False - - def _get_file_mtime(self, sentry_unit, filename): - """Get last modification time of file.""" - return sentry_unit.file_stat(filename)['mtime'] - - def _get_dir_mtime(self, sentry_unit, directory): - """Get last modification time of directory.""" - return sentry_unit.directory_stat(directory)['mtime'] - - def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): - """Get start time of a process based on the last modification time - of the /proc/pid directory. - - :sentry_unit: The sentry unit to check for the service on - :service: service name to look for in process table - :pgrep_full: [Deprecated] Use full command line search mode with pgrep - :returns: epoch time of service process start - :param commands: list of bash commands - :param sentry_units: list of sentry unit pointers - :returns: None if successful; Failure message otherwise - """ - if pgrep_full is not None: - # /!\ DEPRECATION WARNING (beisner): - # No longer implemented, as pidof is now used instead of pgrep. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' - 'longer implemented re: lp 1474030.') - - pid_list = self.get_process_id_list(sentry_unit, service) - pid = pid_list[0] - proc_dir = '/proc/{}'.format(pid) - self.log.debug('Pid for {} on {}: {}'.format( - service, sentry_unit.info['unit_name'], pid)) - - return self._get_dir_mtime(sentry_unit, proc_dir) - - def service_restarted(self, sentry_unit, service, filename, - pgrep_full=None, sleep_time=20): - """Check if service was restarted. - - Compare a service's start time vs a file's last modification time - (such as a config file for that service) to determine if the service - has been restarted. - """ - # /!\ DEPRECATION WARNING (beisner): - # This method is prone to races in that no before-time is known. - # Use validate_service_config_changed instead. - - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - self.log.warn('DEPRECATION WARNING: use ' - 'validate_service_config_changed instead of ' - 'service_restarted due to known races.') - - time.sleep(sleep_time) - if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= - self._get_file_mtime(sentry_unit, filename)): - return True - else: - return False - - def service_restarted_since(self, sentry_unit, mtime, service, - pgrep_full=None, sleep_time=20, - retry_count=30, retry_sleep_time=10): - """Check if service was been started after a given time. - - Args: - sentry_unit (sentry): The sentry unit to check for the service on - mtime (float): The epoch time to check against - service (string): service name to look for in process table - pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Initial sleep time (s) before looking for file - retry_sleep_time (int): Time (s) to sleep between retries - retry_count (int): If file is not found, how many times to retry - - Returns: - bool: True if service found and its start time it newer than mtime, - False if service is older than mtime or if service was - not found. - """ - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - - unit_name = sentry_unit.info['unit_name'] - self.log.debug('Checking that %s service restarted since %s on ' - '%s' % (service, mtime, unit_name)) - time.sleep(sleep_time) - proc_start_time = None - tries = 0 - while tries <= retry_count and not proc_start_time: - try: - proc_start_time = self._get_proc_start_time(sentry_unit, - service, - pgrep_full) - self.log.debug('Attempt {} to get {} proc start time on {} ' - 'OK'.format(tries, service, unit_name)) - except IOError as e: - # NOTE(beisner) - race avoidance, proc may not exist yet. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.debug('Attempt {} to get {} proc start time on {} ' - 'failed\n{}'.format(tries, service, - unit_name, e)) - time.sleep(retry_sleep_time) - tries += 1 - - if not proc_start_time: - self.log.warn('No proc start time found, assuming service did ' - 'not start') - return False - if proc_start_time >= mtime: - self.log.debug('Proc start time is newer than provided mtime' - '(%s >= %s) on %s (OK)' % (proc_start_time, - mtime, unit_name)) - return True - else: - self.log.warn('Proc start time (%s) is older than provided mtime ' - '(%s) on %s, service did not ' - 'restart' % (proc_start_time, mtime, unit_name)) - return False - - def config_updated_since(self, sentry_unit, filename, mtime, - sleep_time=20, retry_count=30, - retry_sleep_time=10): - """Check if file was modified after a given time. - - Args: - sentry_unit (sentry): The sentry unit to check the file mtime on - filename (string): The file to check mtime of - mtime (float): The epoch time to check against - sleep_time (int): Initial sleep time (s) before looking for file - retry_sleep_time (int): Time (s) to sleep between retries - retry_count (int): If file is not found, how many times to retry - - Returns: - bool: True if file was modified more recently than mtime, False if - file was modified before mtime, or if file not found. - """ - unit_name = sentry_unit.info['unit_name'] - self.log.debug('Checking that %s updated since %s on ' - '%s' % (filename, mtime, unit_name)) - time.sleep(sleep_time) - file_mtime = None - tries = 0 - while tries <= retry_count and not file_mtime: - try: - file_mtime = self._get_file_mtime(sentry_unit, filename) - self.log.debug('Attempt {} to get {} file mtime on {} ' - 'OK'.format(tries, filename, unit_name)) - except IOError as e: - # NOTE(beisner) - race avoidance, file may not exist yet. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.debug('Attempt {} to get {} file mtime on {} ' - 'failed\n{}'.format(tries, filename, - unit_name, e)) - time.sleep(retry_sleep_time) - tries += 1 - - if not file_mtime: - self.log.warn('Could not determine file mtime, assuming ' - 'file does not exist') - return False - - if file_mtime >= mtime: - self.log.debug('File mtime is newer than provided mtime ' - '(%s >= %s) on %s (OK)' % (file_mtime, - mtime, unit_name)) - return True - else: - self.log.warn('File mtime is older than provided mtime' - '(%s < on %s) on %s' % (file_mtime, - mtime, unit_name)) - return False - - def validate_service_config_changed(self, sentry_unit, mtime, service, - filename, pgrep_full=None, - sleep_time=20, retry_count=30, - retry_sleep_time=10): - """Check service and file were updated after mtime - - Args: - sentry_unit (sentry): The sentry unit to check for the service on - mtime (float): The epoch time to check against - service (string): service name to look for in process table - filename (string): The file to check mtime of - pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Initial sleep in seconds to pass to test helpers - retry_count (int): If service is not found, how many times to retry - retry_sleep_time (int): Time in seconds to wait between retries - - Typical Usage: - u = OpenStackAmuletUtils(ERROR) - ... - mtime = u.get_sentry_time(self.cinder_sentry) - self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) - if not u.validate_service_config_changed(self.cinder_sentry, - mtime, - 'cinder-api', - '/etc/cinder/cinder.conf') - amulet.raise_status(amulet.FAIL, msg='update failed') - Returns: - bool: True if both service and file where updated/restarted after - mtime, False if service is older than mtime or if service was - not found or if filename was modified before mtime. - """ - - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - - service_restart = self.service_restarted_since( - sentry_unit, mtime, - service, - pgrep_full=pgrep_full, - sleep_time=sleep_time, - retry_count=retry_count, - retry_sleep_time=retry_sleep_time) - - config_update = self.config_updated_since( - sentry_unit, - filename, - mtime, - sleep_time=sleep_time, - retry_count=retry_count, - retry_sleep_time=retry_sleep_time) - - return service_restart and config_update - - def get_sentry_time(self, sentry_unit): - """Return current epoch time on a sentry""" - cmd = "date +'%s'" - return float(sentry_unit.run(cmd)[0]) - - def relation_error(self, name, data): - return 'unexpected relation data in {} - {}'.format(name, data) - - def endpoint_error(self, name, data): - return 'unexpected endpoint data in {} - {}'.format(name, data) - - def get_ubuntu_releases(self): - """Return a list of all Ubuntu releases in order of release.""" - _d = distro_info.UbuntuDistroInfo() - _release_list = _d.all - return _release_list - - def file_to_url(self, file_rel_path): - """Convert a relative file path to a file URL.""" - _abs_path = os.path.abspath(file_rel_path) - return urlparse.urlparse(_abs_path, scheme='file').geturl() - - def check_commands_on_units(self, commands, sentry_units): - """Check that all commands in a list exit zero on all - sentry units in a list. - - :param commands: list of bash commands - :param sentry_units: list of sentry unit pointers - :returns: None if successful; Failure message otherwise - """ - self.log.debug('Checking exit codes for {} commands on {} ' - 'sentry units...'.format(len(commands), - len(sentry_units))) - for sentry_unit in sentry_units: - for cmd in commands: - output, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} `{}` returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - else: - return ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - return None - - def get_process_id_list(self, sentry_unit, process_name, - expect_success=True): - """Get a list of process ID(s) from a single sentry juju unit - for a single process name. - - :param sentry_unit: Amulet sentry instance (juju unit) - :param process_name: Process name - :param expect_success: If False, expect the PID to be missing, - raise if it is present. - :returns: List of process IDs - """ - cmd = 'pidof -x {}'.format(process_name) - if not expect_success: - cmd += " || exit 0 && exit 1" - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return str(output).split() - - def get_unit_process_ids(self, unit_processes, expect_success=True): - """Construct a dict containing unit sentries, process names, and - process IDs. - - :param unit_processes: A dictionary of Amulet sentry instance - to list of process names. - :param expect_success: if False expect the processes to not be - running, raise if they are. - :returns: Dictionary of Amulet sentry instance to dictionary - of process names to PIDs. - """ - pid_dict = {} - for sentry_unit, process_list in six.iteritems(unit_processes): - pid_dict[sentry_unit] = {} - for process in process_list: - pids = self.get_process_id_list( - sentry_unit, process, expect_success=expect_success) - pid_dict[sentry_unit].update({process: pids}) - return pid_dict - - def validate_unit_process_ids(self, expected, actual): - """Validate process id quantities for services on units.""" - self.log.debug('Checking units for running processes...') - self.log.debug('Expected PIDs: {}'.format(expected)) - self.log.debug('Actual PIDs: {}'.format(actual)) - - if len(actual) != len(expected): - return ('Unit count mismatch. expected, actual: {}, ' - '{} '.format(len(expected), len(actual))) - - for (e_sentry, e_proc_names) in six.iteritems(expected): - e_sentry_name = e_sentry.info['unit_name'] - if e_sentry in actual.keys(): - a_proc_names = actual[e_sentry] - else: - return ('Expected sentry ({}) not found in actual dict data.' - '{}'.format(e_sentry_name, e_sentry)) - - if len(e_proc_names.keys()) != len(a_proc_names.keys()): - return ('Process name count mismatch. expected, actual: {}, ' - '{}'.format(len(expected), len(actual))) - - for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ - zip(e_proc_names.items(), a_proc_names.items()): - if e_proc_name != a_proc_name: - return ('Process name mismatch. expected, actual: {}, ' - '{}'.format(e_proc_name, a_proc_name)) - - a_pids_length = len(a_pids) - fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' - '{}, {} ({})'.format(e_sentry_name, e_proc_name, - e_pids, a_pids_length, - a_pids)) - - # If expected is a list, ensure at least one PID quantity match - if isinstance(e_pids, list) and \ - a_pids_length not in e_pids: - return fail_msg - # If expected is not bool and not list, - # ensure PID quantities match - elif not isinstance(e_pids, bool) and \ - not isinstance(e_pids, list) and \ - a_pids_length != e_pids: - return fail_msg - # If expected is bool True, ensure 1 or more PIDs exist - elif isinstance(e_pids, bool) and \ - e_pids is True and a_pids_length < 1: - return fail_msg - # If expected is bool False, ensure 0 PIDs exist - elif isinstance(e_pids, bool) and \ - e_pids is False and a_pids_length != 0: - return fail_msg - else: - self.log.debug('PID check OK: {} {} {}: ' - '{}'.format(e_sentry_name, e_proc_name, - e_pids, a_pids)) - return None - - def validate_list_of_identical_dicts(self, list_of_dicts): - """Check that all dicts within a list are identical.""" - hashes = [] - for _dict in list_of_dicts: - hashes.append(hash(frozenset(_dict.items()))) - - self.log.debug('Hashes: {}'.format(hashes)) - if len(set(hashes)) == 1: - self.log.debug('Dicts within list are identical') - else: - return 'Dicts within list are not identical' - - return None - - def validate_sectionless_conf(self, file_contents, expected): - """A crude conf parser. Useful to inspect configuration files which - do not have section headers (as would be necessary in order to use - the configparser). Such as openstack-dashboard or rabbitmq confs.""" - for line in file_contents.split('\n'): - if '=' in line: - args = line.split('=') - if len(args) <= 1: - continue - key = args[0].strip() - value = args[1].strip() - if key in expected.keys(): - if expected[key] != value: - msg = ('Config mismatch. Expected, actual: {}, ' - '{}'.format(expected[key], value)) - amulet.raise_status(amulet.FAIL, msg=msg) - - def get_unit_hostnames(self, units): - """Return a dict of juju unit names to hostnames.""" - host_names = {} - for unit in units: - host_names[unit.info['unit_name']] = \ - str(unit.file_contents('/etc/hostname').strip()) - self.log.debug('Unit host names: {}'.format(host_names)) - return host_names - - def run_cmd_unit(self, sentry_unit, cmd): - """Run a command on a unit, return the output and exit code.""" - output, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} `{}` command returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - else: - msg = ('{} `{}` command returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return str(output), code - - def file_exists_on_unit(self, sentry_unit, file_name): - """Check if a file exists on a unit.""" - try: - sentry_unit.file_stat(file_name) - return True - except IOError: - return False - except Exception as e: - msg = 'Error checking file {}: {}'.format(file_name, e) - amulet.raise_status(amulet.FAIL, msg=msg) - - def file_contents_safe(self, sentry_unit, file_name, - max_wait=60, fatal=False): - """Get file contents from a sentry unit. Wrap amulet file_contents - with retry logic to address races where a file checks as existing, - but no longer exists by the time file_contents is called. - Return None if file not found. Optionally raise if fatal is True.""" - unit_name = sentry_unit.info['unit_name'] - file_contents = False - tries = 0 - while not file_contents and tries < (max_wait / 4): - try: - file_contents = sentry_unit.file_contents(file_name) - except IOError: - self.log.debug('Attempt {} to open file {} from {} ' - 'failed'.format(tries, file_name, - unit_name)) - time.sleep(4) - tries += 1 - - if file_contents: - return file_contents - elif not fatal: - return None - elif fatal: - msg = 'Failed to get file contents from unit.' - amulet.raise_status(amulet.FAIL, msg) - - def port_knock_tcp(self, host="localhost", port=22, timeout=15): - """Open a TCP socket to check for a listening sevice on a host. - - :param host: host name or IP address, default to localhost - :param port: TCP port number, default to 22 - :param timeout: Connect timeout, default to 15 seconds - :returns: True if successful, False if connect failed - """ - - # Resolve host name if possible - try: - connect_host = socket.gethostbyname(host) - host_human = "{} ({})".format(connect_host, host) - except socket.error as e: - self.log.warn('Unable to resolve address: ' - '{} ({}) Trying anyway!'.format(host, e)) - connect_host = host - host_human = connect_host - - # Attempt socket connection - try: - knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - knock.settimeout(timeout) - knock.connect((connect_host, port)) - knock.close() - self.log.debug('Socket connect OK for host ' - '{} on port {}.'.format(host_human, port)) - return True - except socket.error as e: - self.log.debug('Socket connect FAIL for' - ' {} port {} ({})'.format(host_human, port, e)) - return False - - def port_knock_units(self, sentry_units, port=22, - timeout=15, expect_success=True): - """Open a TCP socket to check for a listening sevice on each - listed juju unit. - - :param sentry_units: list of sentry unit pointers - :param port: TCP port number, default to 22 - :param timeout: Connect timeout, default to 15 seconds - :expect_success: True by default, set False to invert logic - :returns: None if successful, Failure message otherwise - """ - for unit in sentry_units: - host = unit.info['public-address'] - connected = self.port_knock_tcp(host, port, timeout) - if not connected and expect_success: - return 'Socket connect failed.' - elif connected and not expect_success: - return 'Socket connected unexpectedly.' - - def get_uuid_epoch_stamp(self): - """Returns a stamp string based on uuid4 and epoch time. Useful in - generating test messages which need to be unique-ish.""" - return '[{}-{}]'.format(uuid.uuid4(), time.time()) - -# amulet juju action helpers: - def run_action(self, unit_sentry, action, - _check_output=subprocess.check_output, - params=None): - """Run the named action on a given unit sentry. - - params a dict of parameters to use - _check_output parameter is used for dependency injection. - - @return action_id. - """ - unit_id = unit_sentry.info["unit_name"] - command = ["juju", "action", "do", "--format=json", unit_id, action] - if params is not None: - for key, value in params.iteritems(): - command.append("{}={}".format(key, value)) - self.log.info("Running command: %s\n" % " ".join(command)) - output = _check_output(command, universal_newlines=True) - data = json.loads(output) - action_id = data[u'Action queued with id'] - return action_id - - def wait_on_action(self, action_id, _check_output=subprocess.check_output): - """Wait for a given action, returning if it completed or not. - - _check_output parameter is used for dependency injection. - """ - command = ["juju", "action", "fetch", "--format=json", "--wait=0", - action_id] - output = _check_output(command, universal_newlines=True) - data = json.loads(output) - return data.get(u"status") == "completed" - - def status_get(self, unit): - """Return the current service status of this unit.""" - raw_status, return_code = unit.run( - "status-get --format=json --include-data") - if return_code != 0: - return ("unknown", "") - status = json.loads(raw_status) - return (status["status"], status["message"]) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py deleted file mode 100644 index d1400a02..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py deleted file mode 100644 index d1400a02..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py deleted file mode 100644 index d21c9c78..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ /dev/null @@ -1,304 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import logging -import re -import sys -import six -from collections import OrderedDict -from charmhelpers.contrib.amulet.deployment import ( - AmuletDeployment -) - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - - -class OpenStackAmuletDeployment(AmuletDeployment): - """OpenStack amulet deployment. - - This class inherits from AmuletDeployment and has additional support - that is specifically for use by OpenStack charms. - """ - - def __init__(self, series=None, openstack=None, source=None, - stable=True, log_level=DEBUG): - """Initialize the deployment environment.""" - super(OpenStackAmuletDeployment, self).__init__(series) - self.log = self.get_logger(level=log_level) - self.log.info('OpenStackAmuletDeployment: init') - self.openstack = openstack - self.source = source - self.stable = stable - # Note(coreycb): this needs to be changed when new next branches come - # out. - self.current_next = "trusty" - - def get_logger(self, name="deployment-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def _determine_branch_locations(self, other_services): - """Determine the branch locations for the other services. - - Determine if the local branch being tested is derived from its - stable or next (dev) branch, and based on this, use the corresonding - stable or next branches for the other_services.""" - - self.log.info('OpenStackAmuletDeployment: determine branch locations') - - # Charms outside the lp:~openstack-charmers namespace - base_charms = ['mysql', 'mongodb', 'nrpe'] - - # Force these charms to current series even when using an older series. - # ie. Use trusty/nrpe even when series is precise, as the P charm - # does not possess the necessary external master config and hooks. - force_series_current = ['nrpe'] - - if self.series in ['precise', 'trusty']: - base_series = self.series - else: - base_series = self.current_next - - for svc in other_services: - if svc['name'] in force_series_current: - base_series = self.current_next - # If a location has been explicitly set, use it - if svc.get('location'): - continue - if self.stable: - temp = 'lp:charms/{}/{}' - svc['location'] = temp.format(base_series, - svc['name']) - else: - if svc['name'] in base_charms: - temp = 'lp:charms/{}/{}' - svc['location'] = temp.format(base_series, - svc['name']) - else: - temp = 'lp:~openstack-charmers/charms/{}/{}/next' - svc['location'] = temp.format(self.current_next, - svc['name']) - - return other_services - - def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin/source.""" - self.log.info('OpenStackAmuletDeployment: adding services') - - other_services = self._determine_branch_locations(other_services) - - super(OpenStackAmuletDeployment, self)._add_services(this_service, - other_services) - - services = other_services - services.append(this_service) - - # Charms which should use the source config option - use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] - - # Charms which can not use openstack-origin, ie. many subordinates - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', - 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'] - - if self.openstack: - for svc in services: - if svc['name'] not in use_source + no_origin: - config = {'openstack-origin': self.openstack} - self.d.configure(svc['name'], config) - - if self.source: - for svc in services: - if svc['name'] in use_source and svc['name'] not in no_origin: - config = {'source': self.source} - self.d.configure(svc['name'], config) - - def _configure_services(self, configs): - """Configure all of the services.""" - self.log.info('OpenStackAmuletDeployment: configure services') - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=1800): - """Wait for all units to have a specific extended status, except - for any defined as excluded. Unless specified via message, any - status containing any case of 'ready' will be considered a match. - - Examples of message usage: - - Wait for all unit status to CONTAIN any case of 'ready' or 'ok': - message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) - - Wait for all units to reach this status (exact match): - message = re.compile('^Unit is ready and clustered$') - - Wait for all units to reach any one of these (exact match): - message = re.compile('Unit is ready|OK|Ready') - - Wait for at least one unit to reach this status (exact match): - message = {'ready'} - - See Amulet's sentry.wait_for_messages() for message usage detail. - https://github.com/juju/amulet/blob/master/amulet/sentry.py - - :param message: Expected status match - :param exclude_services: List of juju service names to ignore, - not to be used in conjuction with include_only. - :param include_only: List of juju service names to exclusively check, - not to be used in conjuction with exclude_services. - :param timeout: Maximum time in seconds to wait for status match - :returns: None. Raises if timeout is hit. - """ - self.log.info('Waiting for extended status on units...') - - all_services = self.d.services.keys() - - if exclude_services and include_only: - raise ValueError('exclude_services can not be used ' - 'with include_only') - - if message: - if isinstance(message, re._pattern_type): - match = message.pattern - else: - match = message - - self.log.debug('Custom extended status wait match: ' - '{}'.format(match)) - else: - self.log.debug('Default extended status wait match: contains ' - 'READY (case-insensitive)') - message = re.compile('.*ready.*', re.IGNORECASE) - - if exclude_services: - self.log.debug('Excluding services from extended status match: ' - '{}'.format(exclude_services)) - else: - exclude_services = [] - - if include_only: - services = include_only - else: - services = list(set(all_services) - set(exclude_services)) - - self.log.debug('Waiting up to {}s for extended status on services: ' - '{}'.format(timeout, services)) - service_messages = {service: message for service in services} - self.d.sentry.wait_for_messages(service_messages, timeout=timeout) - self.log.info('OK') - - def _get_openstack_release(self): - """Get openstack release. - - Return an integer representing the enum value of the openstack - release. - """ - # Must be ordered by OpenStack release (not by Ubuntu release): - (self.precise_essex, self.precise_folsom, self.precise_grizzly, - self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.utopic_juno, - self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, - self.wily_liberty, self.trusty_mitaka, - self.xenial_mitaka) = range(14) - - releases = { - ('precise', None): self.precise_essex, - ('precise', 'cloud:precise-folsom'): self.precise_folsom, - ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, - ('precise', 'cloud:precise-havana'): self.precise_havana, - ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, - ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-juno'): self.trusty_juno, - ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, - ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, - ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo, - ('wily', None): self.wily_liberty, - ('xenial', None): self.xenial_mitaka} - return releases[(self.series, self.openstack)] - - def _get_openstack_release_string(self): - """Get openstack release string. - - Return a string representing the openstack release. - """ - releases = OrderedDict([ - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), - ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), - ('xenial', 'mitaka'), - ]) - if self.openstack: - os_origin = self.openstack.split(':')[1] - return os_origin.split('%s-' % self.series)[1].split('/')[0] - else: - return releases[self.series] - - def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools in a ceph + cinder + glance - test scenario, based on OpenStack release and whether ceph radosgw - is flagged as present or not.""" - - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later - pools = [ - 'rbd', - 'cinder', - 'glance' - ] - else: - # Juno or earlier - pools = [ - 'data', - 'metadata', - 'rbd', - 'cinder', - 'glance' - ] - - if radosgw: - pools.extend([ - '.rgw.root', - '.rgw.control', - '.rgw', - '.rgw.gc', - '.users.uid' - ]) - - return pools diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py deleted file mode 100644 index ef3bdccf..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ /dev/null @@ -1,1012 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import amulet -import json -import logging -import os -import re -import six -import time -import urllib - -import cinderclient.v1.client as cinder_client -import glanceclient.v1.client as glance_client -import heatclient.v1.client as heat_client -import keystoneclient.v2_0 as keystone_client -from keystoneclient.auth.identity import v3 as keystone_id_v3 -from keystoneclient import session as keystone_session -from keystoneclient.v3 import client as keystone_client_v3 - -import novaclient.client as nova_client -import pika -import swiftclient - -from charmhelpers.contrib.amulet.utils import ( - AmuletUtils -) - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - -NOVA_CLIENT_VERSION = "2" - - -class OpenStackAmuletUtils(AmuletUtils): - """OpenStack amulet utilities. - - This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charm tests. - """ - - def __init__(self, log_level=ERROR): - """Initialize the deployment environment.""" - super(OpenStackAmuletUtils, self).__init__(log_level) - - def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): - """Validate endpoint data. - - Validate actual endpoint data vs expected endpoint data. The ports - are used to find the matching endpoint. - """ - self.log.debug('Validating endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = False - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if (admin_port in ep.adminurl and - internal_port in ep.internalurl and - public_port in ep.publicurl): - found = True - actual = {'id': ep.id, - 'region': ep.region, - 'adminurl': ep.adminurl, - 'internalurl': ep.internalurl, - 'publicurl': ep.publicurl, - 'service_id': ep.service_id} - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if not found: - return 'endpoint not found' - - def validate_svc_catalog_endpoint_data(self, expected, actual): - """Validate service catalog endpoint data. - - Validate a list of actual service catalog endpoints vs a list of - expected service catalog endpoints. - """ - self.log.debug('Validating service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - ret = self._validate_dict_data(expected[k][0], actual[k][0]) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_tenant_data(self, expected, actual): - """Validate tenant data. - - Validate a list of actual tenant data vs list of expected tenant - data. - """ - self.log.debug('Validating tenant data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'enabled': act.enabled, 'description': act.description, - 'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected tenant data - {}".format(ret) - if not found: - return "tenant {} does not exist".format(e['name']) - return ret - - def validate_role_data(self, expected, actual): - """Validate role data. - - Validate a list of actual role data vs a list of expected role - data. - """ - self.log.debug('Validating role data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected role data - {}".format(ret) - if not found: - return "role {} does not exist".format(e['name']) - return ret - - def validate_user_data(self, expected, actual, api_version=None): - """Validate user data. - - Validate a list of actual user data vs a list of expected user - data. - """ - self.log.debug('Validating user data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - if e['name'] == act.name: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'id': act.id} - if api_version == 3: - a['default_project_id'] = getattr(act, - 'default_project_id', - 'none') - else: - a['tenantId'] = act.tenantId - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected user data - {}".format(ret) - if not found: - return "user {} does not exist".format(e['name']) - return ret - - def validate_flavor_data(self, expected, actual): - """Validate flavor data. - - Validate a list of actual flavors vs a list of expected flavors. - """ - self.log.debug('Validating flavor data...') - self.log.debug('actual: {}'.format(repr(actual))) - act = [a.name for a in actual] - return self._validate_list_data(expected, act) - - def tenant_exists(self, keystone, tenant): - """Return True if tenant exists.""" - self.log.debug('Checking if tenant exists ({})...'.format(tenant)) - return tenant in [t.name for t in keystone.tenants.list()] - - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant): - """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - service_ip = \ - keystone_sentry.relation('shared-db', - 'mysql:shared-db')['private-address'] - ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) - return cinder_client.Client(username, password, tenant, ept) - - def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant=None, api_version=None, - keystone_ip=None): - """Authenticates admin user with the keystone admin endpoint.""" - self.log.debug('Authenticating keystone admin...') - unit = keystone_sentry - if not keystone_ip: - keystone_ip = unit.relation('shared-db', - 'mysql:shared-db')['private-address'] - base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) - else: - ep = base_ep + "/v3" - auth = keystone_id_v3.Password( - user_domain_name='admin_domain', - username=user, - password=password, - domain_name='admin_domain', - auth_url=ep, - ) - sess = keystone_session.Session(auth=auth) - return keystone_client_v3.Client(session=sess) - - def authenticate_keystone_user(self, keystone, user, password, tenant): - """Authenticates a regular user with the keystone public endpoint.""" - self.log.debug('Authenticating keystone user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) - - def authenticate_glance_admin(self, keystone): - """Authenticates admin user with glance.""" - self.log.debug('Authenticating glance admin...') - ep = keystone.service_catalog.url_for(service_type='image', - endpoint_type='adminURL') - return glance_client.Client(ep, token=keystone.auth_token) - - def authenticate_heat_admin(self, keystone): - """Authenticates the admin user with heat.""" - self.log.debug('Authenticating heat admin...') - ep = keystone.service_catalog.url_for(service_type='orchestration', - endpoint_type='publicURL') - return heat_client.Client(endpoint=ep, token=keystone.auth_token) - - def authenticate_nova_user(self, keystone, user, password, tenant): - """Authenticates a regular user with nova-api.""" - self.log.debug('Authenticating nova user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) - - def authenticate_swift_user(self, keystone, user, password, tenant): - """Authenticates a regular user with swift api.""" - self.log.debug('Authenticating swift user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') - - def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection - :param image_name: display name for new image - :returns: glance image pointer - """ - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) - - # Download cirros image - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - local_path = os.path.join('tests', cirros_img) - - if not os.path.exists(local_path): - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - opener.retrieve(cirros_url, local_path) - f.close() - - # Create glance image - with open(local_path) as f: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', data=f) - - # Wait for image to reach active status - img_id = image.id - ret = self.resource_reaches_status(glance.images, img_id, - expected_stat='active', - msg='Image status wait') - if not ret: - msg = 'Glance image failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new image - self.log.debug('Validating image attributes...') - val_img_name = glance.images.get(img_id).name - val_img_stat = glance.images.get(img_id).status - val_img_pub = glance.images.get(img_id).is_public - val_img_cfmt = glance.images.get(img_id).container_format - val_img_dfmt = glance.images.get(img_id).disk_format - msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' - 'container fmt:{} disk fmt:{}'.format( - val_img_name, val_img_pub, img_id, - val_img_stat, val_img_cfmt, val_img_dfmt)) - - if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == 'bare' \ - and val_img_dfmt == 'qcow2': - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return image - - def delete_image(self, glance, image): - """Delete the specified image.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_image.') - self.log.debug('Deleting glance image ({})...'.format(image)) - return self.delete_resource(glance.images, image, msg='glance image') - - def create_instance(self, nova, image_name, instance_name, flavor): - """Create the specified instance.""" - self.log.debug('Creating instance ' - '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.images.find(name=image_name) - flavor = nova.flavors.find(name=flavor) - instance = nova.servers.create(name=instance_name, image=image, - flavor=flavor) - - count = 1 - status = instance.status - while status != 'ACTIVE' and count < 60: - time.sleep(3) - instance = nova.servers.get(instance.id) - status = instance.status - self.log.debug('instance status: {}'.format(status)) - count += 1 - - if status != 'ACTIVE': - self.log.error('instance creation timed out') - return None - - return instance - - def delete_instance(self, nova, instance): - """Delete the specified instance.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_instance.') - self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, - msg='nova instance') - - def create_or_get_keypair(self, nova, keypair_name="testkey"): - """Create a new keypair, or return pointer if it already exists.""" - try: - _keypair = nova.keypairs.get(keypair_name) - self.log.debug('Keypair ({}) already exists, ' - 'using it.'.format(keypair_name)) - return _keypair - except: - self.log.debug('Keypair ({}) does not exist, ' - 'creating it.'.format(keypair_name)) - - _keypair = nova.keypairs.create(name=keypair_name) - return _keypair - - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, - img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, OR - optionally as a clone of an existing volume, OR optionally - from a snapshot. Wait for the new volume status to reach - the expected status, validate and return a resource pointer. - - :param vol_name: cinder volume display name - :param vol_size: size in gigabytes - :param img_id: optional glance image id - :param src_vol_id: optional source volume id to clone - :param snap_id: optional snapshot id to use - :returns: cinder volume pointer - """ - # Handle parameter input and avoid impossible combinations - if img_id and not src_vol_id and not snap_id: - # Create volume from image - self.log.debug('Creating cinder volume from glance image...') - bootable = 'true' - elif src_vol_id and not img_id and not snap_id: - # Clone an existing volume - self.log.debug('Cloning cinder volume...') - bootable = cinder.volumes.get(src_vol_id).bootable - elif snap_id and not src_vol_id and not img_id: - # Create volume from snapshot - self.log.debug('Creating cinder volume from snapshot...') - snap = cinder.volume_snapshots.find(id=snap_id) - vol_size = snap.size - snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id - bootable = cinder.volumes.get(snap_vol_id).bootable - elif not img_id and not src_vol_id and not snap_id: - # Create volume - self.log.debug('Creating cinder volume...') - bootable = 'false' - else: - # Impossible combination of parameters - msg = ('Invalid method use - name:{} size:{} img_id:{} ' - 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, - img_id, src_vol_id, - snap_id)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create new volume - try: - vol_new = cinder.volumes.create(display_name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except Exception as e: - msg = 'Failed to create volume: {}'.format(e) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Wait for volume to reach available status - ret = self.resource_reaches_status(cinder.volumes, vol_id, - expected_stat="available", - msg="Volume status wait") - if not ret: - msg = 'Cinder volume failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new volume - self.log.debug('Validating volume attributes...') - val_vol_name = cinder.volumes.get(vol_id).display_name - val_vol_boot = cinder.volumes.get(vol_id).bootable - val_vol_stat = cinder.volumes.get(vol_id).status - val_vol_size = cinder.volumes.get(vol_id).size - msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' - '{} size:{}'.format(val_vol_name, vol_id, - val_vol_stat, val_vol_boot, - val_vol_size)) - - if val_vol_boot == bootable and val_vol_stat == 'available' \ - and val_vol_name == vol_name and val_vol_size == vol_size: - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return vol_new - - def delete_resource(self, resource, resource_id, - msg="resource", max_wait=120): - """Delete one openstack resource, such as one instance, keypair, - image, volume, stack, etc., and confirm deletion within max wait time. - - :param resource: pointer to os resource type, ex:glance_client.images - :param resource_id: unique name or id for the openstack resource - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, otherwise False - """ - self.log.debug('Deleting OpenStack resource ' - '{} ({})'.format(resource_id, msg)) - num_before = len(list(resource.list())) - resource.delete(resource_id) - - tries = 0 - num_after = len(list(resource.list())) - while num_after != (num_before - 1) and tries < (max_wait / 4): - self.log.debug('{} delete check: ' - '{} [{}:{}] {}'.format(msg, tries, - num_before, - num_after, - resource_id)) - time.sleep(4) - num_after = len(list(resource.list())) - tries += 1 - - self.log.debug('{}: expected, actual count = {}, ' - '{}'.format(msg, num_before - 1, num_after)) - - if num_after == (num_before - 1): - return True - else: - self.log.error('{} delete timed out'.format(msg)) - return False - - def resource_reaches_status(self, resource, resource_id, - expected_stat='available', - msg='resource', max_wait=120): - """Wait for an openstack resources status to reach an - expected status within a specified time. Useful to confirm that - nova instances, cinder vols, snapshots, glance images, heat stacks - and other resources eventually reach the expected status. - - :param resource: pointer to os resource type, ex: heat_client.stacks - :param resource_id: unique id for the openstack resource - :param expected_stat: status to expect resource to reach - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, False if status is not reached - """ - - tries = 0 - resource_stat = resource.get(resource_id).status - while resource_stat != expected_stat and tries < (max_wait / 4): - self.log.debug('{} status check: ' - '{} [{}:{}] {}'.format(msg, tries, - resource_stat, - expected_stat, - resource_id)) - time.sleep(4) - resource_stat = resource.get(resource_id).status - tries += 1 - - self.log.debug('{}: expected, actual status = {}, ' - '{}'.format(msg, resource_stat, expected_stat)) - - if resource_stat == expected_stat: - return True - else: - self.log.debug('{} never reached expected status: ' - '{}'.format(resource_id, expected_stat)) - return False - - def get_ceph_osd_id_cmd(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return ("`initctl list | grep 'ceph-osd ' | " - "awk 'NR=={} {{ print $2 }}' | " - "grep -o '[0-9]*'`".format(index + 1)) - - def get_ceph_pools(self, sentry_unit): - """Return a dict of ceph pools from a single ceph unit, with - pool name as keys, pool id as vals.""" - pools = {} - cmd = 'sudo ceph osd lspools' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, - for pool in str(output).split(','): - pool_id_name = pool.split(' ') - if len(pool_id_name) == 2: - pool_id = pool_id_name[0] - pool_name = pool_id_name[1] - pools[pool_name] = int(pool_id) - - self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], - pools)) - return pools - - def get_ceph_df(self, sentry_unit): - """Return dict of ceph df json output, including ceph pool state. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :returns: Dict of ceph df output - """ - cmd = 'sudo ceph df --format=json' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return json.loads(output) - - def get_ceph_pool_sample(self, sentry_unit, pool_id=0): - """Take a sample of attributes of a ceph pool, returning ceph - pool name, object count and disk space used for the specified - pool ID number. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :param pool_id: Ceph pool ID - :returns: List of pool name, object count, kb disk space used - """ - df = self.get_ceph_df(sentry_unit) - pool_name = df['pools'][pool_id]['name'] - obj_count = df['pools'][pool_id]['stats']['objects'] - kb_used = df['pools'][pool_id]['stats']['kb_used'] - self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, pool_id, - obj_count, kb_used)) - return pool_name, obj_count, kb_used - - def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): - """Validate ceph pool samples taken over time, such as pool - object counts or pool kb used, before adding, after adding, and - after deleting items which affect those pool attributes. The - 2nd element is expected to be greater than the 1st; 3rd is expected - to be less than the 2nd. - - :param samples: List containing 3 data samples - :param sample_type: String for logging and usage context - :returns: None if successful, Failure message otherwise - """ - original, created, deleted = range(3) - if samples[created] <= samples[original] or \ - samples[deleted] >= samples[created]: - return ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - else: - self.log.debug('Ceph {} samples (OK): ' - '{}'.format(sample_type, samples)) - return None - - # rabbitmq/amqp specific helpers: - - def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): - """Wait for rmq units extended status to show cluster readiness, - after an optional initial sleep period. Initial sleep is likely - necessary to be effective following a config change, as status - message may not instantly update to non-ready.""" - - if init_sleep: - time.sleep(init_sleep) - - message = re.compile('^Unit is ready and clustered$') - deployment._auto_wait_for_status(message=message, - timeout=timeout, - include_only=['rabbitmq-server']) - - def add_rmq_test_user(self, sentry_units, - username="testuser1", password="changeme"): - """Add a test user via the first rmq juju unit, check connection as - the new user against all sentry units. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful. Raise on error. - """ - self.log.debug('Adding rmq user ({})...'.format(username)) - - # Check that user does not already exist - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - if username in output: - self.log.warning('User ({}) already exists, returning ' - 'gracefully.'.format(username)) - return - - perms = '".*" ".*" ".*"' - cmds = ['rabbitmqctl add_user {} {}'.format(username, password), - 'rabbitmqctl set_permissions {} {}'.format(username, perms)] - - # Add user via first unit - for cmd in cmds: - output, _ = self.run_cmd_unit(sentry_units[0], cmd) - - # Check connection against the other sentry_units - self.log.debug('Checking user connect against units...') - for sentry_unit in sentry_units: - connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, - username=username, - password=password) - connection.close() - - def delete_rmq_test_user(self, sentry_units, username="testuser1"): - """Delete a rabbitmq user via the first rmq juju unit. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful or no such user. - """ - self.log.debug('Deleting rmq user ({})...'.format(username)) - - # Check that the user exists - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - - if username not in output: - self.log.warning('User ({}) does not exist, returning ' - 'gracefully.'.format(username)) - return - - # Delete the user - cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) - - def get_rmq_cluster_status(self, sentry_unit): - """Execute rabbitmq cluster status command on a unit and return - the full output. - - :param unit: sentry unit - :returns: String containing console output of cluster status command - """ - cmd = 'rabbitmqctl cluster_status' - output, _ = self.run_cmd_unit(sentry_unit, cmd) - self.log.debug('{} cluster_status:\n{}'.format( - sentry_unit.info['unit_name'], output)) - return str(output) - - def get_rmq_cluster_running_nodes(self, sentry_unit): - """Parse rabbitmqctl cluster_status output string, return list of - running rabbitmq cluster nodes. - - :param unit: sentry unit - :returns: List containing node names of running nodes - """ - # NOTE(beisner): rabbitmqctl cluster_status output is not - # json-parsable, do string chop foo, then json.loads that. - str_stat = self.get_rmq_cluster_status(sentry_unit) - if 'running_nodes' in str_stat: - pos_start = str_stat.find("{running_nodes,") + 15 - pos_end = str_stat.find("]},", pos_start) + 1 - str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') - run_nodes = json.loads(str_run_nodes) - return run_nodes - else: - return [] - - def validate_rmq_cluster_running_nodes(self, sentry_units): - """Check that all rmq unit hostnames are represented in the - cluster_status output of all units. - - :param host_names: dict of juju unit names to host names - :param units: list of sentry unit pointers (all rmq units) - :returns: None if successful, otherwise return error message - """ - host_names = self.get_unit_hostnames(sentry_units) - errors = [] - - # Query every unit for cluster_status running nodes - for query_unit in sentry_units: - query_unit_name = query_unit.info['unit_name'] - running_nodes = self.get_rmq_cluster_running_nodes(query_unit) - - # Confirm that every unit is represented in the queried unit's - # cluster_status running nodes output. - for validate_unit in sentry_units: - val_host_name = host_names[validate_unit.info['unit_name']] - val_node_name = 'rabbit@{}'.format(val_host_name) - - if val_node_name not in running_nodes: - errors.append('Cluster member check failed on {}: {} not ' - 'in {}\n'.format(query_unit_name, - val_node_name, - running_nodes)) - if errors: - return ''.join(errors) - - def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): - """Check a single juju rmq unit for ssl and port in the config file.""" - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - conf_file = '/etc/rabbitmq/rabbitmq.config' - conf_contents = str(self.file_contents_safe(sentry_unit, - conf_file, max_wait=16)) - # Checks - conf_ssl = 'ssl' in conf_contents - conf_port = str(port) in conf_contents - - # Port explicitly checked in config - if port and conf_port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif port and not conf_port and conf_ssl: - self.log.debug('SSL is enabled @{} but not on port {} ' - '({})'.format(host, port, unit_name)) - return False - # Port not checked (useful when checking that ssl is disabled) - elif not port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif not conf_ssl: - self.log.debug('SSL not enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return False - else: - msg = ('Unknown condition when checking SSL status @{}:{} ' - '({})'.format(host, port, unit_name)) - amulet.raise_status(amulet.FAIL, msg) - - def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): - """Check that ssl is enabled on rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :param port: optional ssl port override to validate - :returns: None if successful, otherwise return error message - """ - for sentry_unit in sentry_units: - if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): - return ('Unexpected condition: ssl is disabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def validate_rmq_ssl_disabled_units(self, sentry_units): - """Check that ssl is enabled on listed rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :returns: True if successful. Raise on error. - """ - for sentry_unit in sentry_units: - if self.rmq_ssl_is_enabled_on_unit(sentry_unit): - return ('Unexpected condition: ssl is enabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def configure_rmq_ssl_on(self, sentry_units, deployment, - port=None, max_wait=60): - """Turn ssl charm config option on, with optional non-default - ssl port specification. Confirm that it is enabled on every - unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param port: amqp port, use defaults if None - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: on') - - # Enable RMQ SSL - config = {'ssl': 'on'} - if port: - config['ssl_port'] = port - - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): - """Turn ssl charm config option off, confirm that it is disabled - on every unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: off') - - # Disable RMQ SSL - config = {'ssl': 'off'} - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def connect_amqp_by_unit(self, sentry_unit, ssl=False, - port=None, fatal=True, - username="testuser1", password="changeme"): - """Establish and return a pika amqp connection to the rabbitmq service - running on a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :param fatal: boolean, default to True (raises on connect error) - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: pika amqp connection pointer or None if failed and non-fatal - """ - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - # Default port logic if port is not specified - if ssl and not port: - port = 5671 - elif not ssl and not port: - port = 5672 - - self.log.debug('Connecting to amqp on {}:{} ({}) as ' - '{}...'.format(host, port, unit_name, username)) - - try: - credentials = pika.PlainCredentials(username, password) - parameters = pika.ConnectionParameters(host=host, port=port, - credentials=credentials, - ssl=ssl, - connection_attempts=3, - retry_delay=5, - socket_timeout=1) - connection = pika.BlockingConnection(parameters) - assert connection.server_properties['product'] == 'RabbitMQ' - self.log.debug('Connect OK') - return connection - except Exception as e: - msg = ('amqp connection failed to {}:{} as ' - '{} ({})'.format(host, port, username, str(e))) - if fatal: - amulet.raise_status(amulet.FAIL, msg) - else: - self.log.warn(msg) - return None - - def publish_amqp_message_by_unit(self, sentry_unit, message, - queue="test", ssl=False, - username="testuser1", - password="changeme", - port=None): - """Publish an amqp message to a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param message: amqp message string - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: None. Raises exception if publish failed. - """ - self.log.debug('Publishing message to {} queue:\n{}'.format(queue, - message)) - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - - # NOTE(beisner): extra debug here re: pika hang potential: - # https://github.com/pika/pika/issues/297 - # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw - self.log.debug('Defining channel...') - channel = connection.channel() - self.log.debug('Declaring queue...') - channel.queue_declare(queue=queue, auto_delete=False, durable=True) - self.log.debug('Publishing message...') - channel.basic_publish(exchange='', routing_key=queue, body=message) - self.log.debug('Closing channel...') - channel.close() - self.log.debug('Closing connection...') - connection.close() - - def get_amqp_message_by_unit(self, sentry_unit, queue="test", - username="testuser1", - password="changeme", - ssl=False, port=None): - """Get an amqp message from a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: amqp message body as string. Raise if get fails. - """ - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - channel = connection.channel() - method_frame, _, body = channel.basic_get(queue) - - if method_frame: - self.log.debug('Retreived message from {} queue:\n{}'.format(queue, - body)) - channel.basic_ack(method_frame.delivery_tag) - channel.close() - connection.close() - return body - else: - msg = 'No message retrieved.' - amulet.raise_status(amulet.FAIL, msg) diff --git a/ceph-proxy/tests/setup/00-setup b/ceph-proxy/tests/setup/00-setup deleted file mode 100755 index 94e5611f..00000000 --- a/ceph-proxy/tests/setup/00-setup +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -set -ex - -sudo add-apt-repository --yes ppa:juju/stable -sudo apt-get update --yes -sudo apt-get install --yes amulet \ - distro-info-data \ - python-cinderclient \ - python-distro-info \ - python-glanceclient \ - python-heatclient \ - python-keystoneclient \ - python-neutronclient \ - python-novaclient \ - python-pika \ - python-swiftclient diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml deleted file mode 100644 index 49e721b3..00000000 --- a/ceph-proxy/tests/tests.yaml +++ /dev/null @@ -1,22 +0,0 @@ -bootstrap: true -reset: false -virtualenv: true -makefile: - - lint - - test -sources: - - ppa:juju/stable -packages: - - amulet - - distro-info-data - - python-ceilometerclient - - python-cinderclient - - python-distro-info - - python-glanceclient - - python-heatclient - - python-keystoneclient - - python-neutronclient - - python-novaclient - - python-pika - - python-swiftclient - - python-nose \ No newline at end of file diff --git a/ceph-proxy/unit_tests/__init__.py b/ceph-proxy/unit_tests/__init__.py deleted file mode 100644 index f80aab3d..00000000 --- a/ceph-proxy/unit_tests/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -import sys -sys.path.append('hooks') diff --git a/ceph-proxy/unit_tests/test_ceph_broker.py b/ceph-proxy/unit_tests/test_ceph_broker.py deleted file mode 100644 index b720d94a..00000000 --- a/ceph-proxy/unit_tests/test_ceph_broker.py +++ /dev/null @@ -1,137 +0,0 @@ -import json -import unittest - -import mock - -import ceph_broker - - -class CephBrokerTestCase(unittest.TestCase): - def setUp(self): - super(CephBrokerTestCase, self).setUp() - - @mock.patch('ceph_broker.log') - def test_process_requests_noop(self, mock_log): - req = json.dumps({'api-version': 1, 'ops': []}) - rc = ceph_broker.process_requests(req) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @mock.patch('ceph_broker.log') - def test_process_requests_missing_api_version(self, mock_log): - req = json.dumps({'ops': []}) - rc = ceph_broker.process_requests(req) - self.assertEqual(json.loads(rc), { - 'exit-code': 1, - 'stderr': 'Missing or invalid api version (None)'}) - - @mock.patch('ceph_broker.log') - def test_process_requests_invalid_api_version(self, mock_log): - req = json.dumps({'api-version': 2, 'ops': []}) - rc = ceph_broker.process_requests(req) - print "Return: %s" % rc - self.assertEqual(json.loads(rc), - {'exit-code': 1, - 'stderr': 'Missing or invalid api version (2)'}) - - @mock.patch('ceph_broker.log') - def test_process_requests_invalid(self, mock_log): - reqs = json.dumps({'api-version': 1, 'ops': [{'op': 'invalid_op'}]}) - rc = ceph_broker.process_requests(reqs) - self.assertEqual(json.loads(rc), - {'exit-code': 1, - 'stderr': "Unknown operation 'invalid_op'"}) - - @mock.patch('ceph_broker.get_osds') - @mock.patch('ceph_broker.ReplicatedPool') - @mock.patch('ceph_broker.pool_exists') - @mock.patch('ceph_broker.log') - def test_process_requests_create_pool_w_pg_num(self, mock_log, - mock_pool_exists, - mock_replicated_pool, - mock_get_osds): - mock_get_osds.return_value = [0, 1, 2] - mock_pool_exists.return_value = False - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'create-pool', - 'name': 'foo', - 'replicas': 3, - 'pg_num': 100}]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_replicated_pool.assert_called_with(service='admin', name='foo', - replicas=3, pg_num=100) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @mock.patch('ceph_broker.get_osds') - @mock.patch('ceph_broker.ReplicatedPool') - @mock.patch('ceph_broker.pool_exists') - @mock.patch('ceph_broker.log') - def test_process_requests_create_pool_w_pg_num_capped(self, mock_log, - mock_pool_exists, - mock_replicated_pool, - mock_get_osds): - mock_get_osds.return_value = [0, 1, 2] - mock_pool_exists.return_value = False - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'create-pool', - 'name': 'foo', - 'replicas': 3, - 'pg_num': 300}]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', - name='foo') - mock_replicated_pool.assert_called_with(service='admin', name='foo', - replicas=3, pg_num=100) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @mock.patch('ceph_broker.ReplicatedPool') - @mock.patch('ceph_broker.pool_exists') - @mock.patch('ceph_broker.log') - def test_process_requests_create_pool_exists(self, mock_log, - mock_pool_exists, - mock_replicated_pool): - mock_pool_exists.return_value = True - reqs = json.dumps({'api-version': 1, - 'ops': [{'op': 'create-pool', - 'name': 'foo', - 'replicas': 3}]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', - name='foo') - self.assertFalse(mock_replicated_pool.create.called) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @mock.patch('ceph_broker.ReplicatedPool') - @mock.patch('ceph_broker.pool_exists') - @mock.patch('ceph_broker.log') - def test_process_requests_create_pool_rid(self, mock_log, - mock_pool_exists, - mock_replicated_pool): - mock_pool_exists.return_value = False - reqs = json.dumps({'api-version': 1, - 'request-id': '1ef5aede', - 'ops': [{ - 'op': 'create-pool', - 'name': 'foo', - 'replicas': 3}]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_replicated_pool.assert_called_with(service='admin', - name='foo', - pg_num=None, - replicas=3) - self.assertEqual(json.loads(rc)['exit-code'], 0) - self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') - - @mock.patch('ceph_broker.log') - def test_process_requests_invalid_api_rid(self, mock_log): - reqs = json.dumps({'api-version': 0, 'request-id': '1ef5aede', - 'ops': [{'op': 'create-pool'}]}) - rc = ceph_broker.process_requests(reqs) - self.assertEqual(json.loads(rc)['exit-code'], 1) - self.assertEqual(json.loads(rc)['stderr'], - "Missing or invalid api version (0)") - self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') diff --git a/ceph-proxy/unit_tests/test_ceph_networking.py b/ceph-proxy/unit_tests/test_ceph_networking.py deleted file mode 100644 index ae3a7ff5..00000000 --- a/ceph-proxy/unit_tests/test_ceph_networking.py +++ /dev/null @@ -1,51 +0,0 @@ -import test_utils -import charmhelpers.core.hookenv as hookenv -import utils as ceph_utils - -TO_PATCH_SPACES = [ - 'network_get_primary_address', - 'log', - 'get_host_ip', - 'config', - 'get_network_addrs', - 'cached', -] - - -class CephNetworkSpaceTestCase(test_utils.CharmTestCase): - def setUp(self): - super(CephNetworkSpaceTestCase, self).setUp(ceph_utils, - TO_PATCH_SPACES) - self.config.side_effect = self.test_config.get - - def tearDown(self): - # Reset @cached cache - hookenv.cache = {} - - def test_no_network_space_support(self): - self.get_host_ip.return_value = '192.168.2.1' - self.network_get_primary_address.side_effect = NotImplementedError - self.assertEqual(ceph_utils.get_cluster_addr(), - '192.168.2.1') - self.assertEqual(ceph_utils.get_public_addr(), - '192.168.2.1') - - def test_public_network_space(self): - self.network_get_primary_address.return_value = '10.20.40.2' - self.assertEqual(ceph_utils.get_public_addr(), - '10.20.40.2') - self.network_get_primary_address.assert_called_with('public') - self.config.assert_called_with('ceph-public-network') - - def test_cluster_network_space(self): - self.network_get_primary_address.return_value = '10.20.50.2' - self.assertEqual(ceph_utils.get_cluster_addr(), - '10.20.50.2') - self.network_get_primary_address.assert_called_with('cluster') - self.config.assert_called_with('ceph-cluster-network') - - def test_config_options_in_use(self): - self.get_network_addrs.return_value = ['192.122.20.2'] - self.test_config.set('ceph-cluster-network', '192.122.20.0/24') - self.assertEqual(ceph_utils.get_cluster_addr(), - '192.122.20.2') diff --git a/ceph-proxy/unit_tests/test_ceph_ops.py b/ceph-proxy/unit_tests/test_ceph_ops.py deleted file mode 100644 index fba81769..00000000 --- a/ceph-proxy/unit_tests/test_ceph_ops.py +++ /dev/null @@ -1,214 +0,0 @@ -__author__ = 'chris' - -import json -import unittest - -from mock import ( - call, - patch, -) - -from hooks import ceph_broker - - -class TestCephOps(unittest.TestCase): - - @patch.object(ceph_broker, 'create_erasure_profile') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) - def test_create_erasure_profile(self, mock_create_erasure): - req = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'create-erasure-profile', - 'name': 'foo', - 'erasure-type': 'jerasure', - 'failure-domain': 'rack', - 'k': 3, - 'm': 2, - }]}) - rc = ceph_broker.process_requests(req) - mock_create_erasure.assert_called_with(service='admin', - profile_name='foo', - coding_chunks=2, - data_chunks=3, - locality=None, - failure_domain='rack', - erasure_plugin_name='jerasure') - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @patch.object(ceph_broker, 'get_osds') - @patch.object(ceph_broker, 'pool_exists') - @patch.object(ceph_broker, 'ReplicatedPool') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) - def test_process_requests_create_replicated_pool(self, - mock_replicated_pool, - mock_pool_exists, - mock_get_osds): - mock_get_osds.return_value = 0 - mock_pool_exists.return_value = False - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'create-pool', - 'pool-type': 'replicated', - 'name': 'foo', - 'replicas': 3 - }]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', name='foo') - calls = [call(pg_num=None, name=u'foo', service='admin', replicas=3)] - mock_replicated_pool.assert_has_calls(calls) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @patch.object(ceph_broker, 'delete_pool') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) - def test_process_requests_delete_pool(self, - mock_delete_pool): - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'delete-pool', - 'name': 'foo', - }]}) - mock_delete_pool.return_value = {'exit-code': 0} - rc = ceph_broker.process_requests(reqs) - mock_delete_pool.assert_called_with(service='admin', name='foo') - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @patch.object(ceph_broker, 'pool_exists') - @patch.object(ceph_broker.ErasurePool, 'create') - @patch.object(ceph_broker, 'erasure_profile_exists') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) - def test_process_requests_create_erasure_pool(self, mock_profile_exists, - mock_erasure_pool, - mock_pool_exists): - mock_pool_exists.return_value = False - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'create-pool', - 'pool-type': 'erasure', - 'name': 'foo', - 'erasure-profile': 'default' - }]}) - rc = ceph_broker.process_requests(reqs) - mock_profile_exists.assert_called_with(service='admin', name='default') - mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_erasure_pool.assert_called_with() - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @patch.object(ceph_broker, 'pool_exists') - @patch.object(ceph_broker.Pool, 'add_cache_tier') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) - def test_process_requests_create_cache_tier(self, mock_pool, - mock_pool_exists): - mock_pool_exists.return_value = True - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'create-cache-tier', - 'cold-pool': 'foo', - 'hot-pool': 'foo-ssd', - 'mode': 'writeback', - 'erasure-profile': 'default' - }]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_any_call(service='admin', name='foo') - mock_pool_exists.assert_any_call(service='admin', name='foo-ssd') - - mock_pool.assert_called_with(cache_pool='foo-ssd', mode='writeback') - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @patch.object(ceph_broker, 'pool_exists') - @patch.object(ceph_broker.Pool, 'remove_cache_tier') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) - def test_process_requests_remove_cache_tier(self, mock_pool, - mock_pool_exists): - mock_pool_exists.return_value = True - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'remove-cache-tier', - 'hot-pool': 'foo-ssd', - }]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_any_call(service='admin', name='foo-ssd') - - mock_pool.assert_called_with(cache_pool='foo-ssd') - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @patch.object(ceph_broker, 'snapshot_pool') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) - def test_snapshot_pool(self, mock_snapshot_pool): - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'snapshot-pool', - 'name': 'foo', - 'snapshot-name': 'foo-snap1', - }]}) - mock_snapshot_pool.return_value = {'exit-code': 0} - rc = ceph_broker.process_requests(reqs) - mock_snapshot_pool.assert_called_with(service='admin', - pool_name='foo', - snapshot_name='foo-snap1') - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @patch.object(ceph_broker, 'rename_pool') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) - def test_rename_pool(self, mock_rename_pool): - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'rename-pool', - 'name': 'foo', - 'new-name': 'foo2', - }]}) - mock_rename_pool.return_value = {'exit-code': 0} - rc = ceph_broker.process_requests(reqs) - mock_rename_pool.assert_called_with(service='admin', - old_name='foo', - new_name='foo2') - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @patch.object(ceph_broker, 'remove_pool_snapshot') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) - def test_remove_pool_snapshot(self, mock_snapshot_pool): - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'remove-pool-snapshot', - 'name': 'foo', - 'snapshot-name': 'foo-snap1', - }]}) - mock_snapshot_pool.return_value = {'exit-code': 0} - rc = ceph_broker.process_requests(reqs) - mock_snapshot_pool.assert_called_with(service='admin', - pool_name='foo', - snapshot_name='foo-snap1') - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @patch.object(ceph_broker, 'pool_set') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) - def test_set_pool_value(self, mock_set_pool): - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'set-pool-value', - 'name': 'foo', - 'key': 'size', - 'value': 3, - }]}) - mock_set_pool.return_value = {'exit-code': 0} - rc = ceph_broker.process_requests(reqs) - mock_set_pool.assert_called_with(service='admin', - pool_name='foo', - key='size', - value=3) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) - def test_set_invalid_pool_value(self): - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'set-pool-value', - 'name': 'foo', - 'key': 'size', - 'value': 'abc', - }]}) - rc = ceph_broker.process_requests(reqs) - self.assertEqual(json.loads(rc)['exit-code'], 1) - - -if __name__ == '__main__': - unittest.main() diff --git a/ceph-proxy/unit_tests/test_status.py b/ceph-proxy/unit_tests/test_status.py deleted file mode 100644 index 0900b2e5..00000000 --- a/ceph-proxy/unit_tests/test_status.py +++ /dev/null @@ -1,103 +0,0 @@ -import mock -import test_utils -import sys - -# python-apt is not installed as part of test-requirements but is imported by -# some charmhelpers modules so create a fake import. -mock_apt = mock.MagicMock() -sys.modules['apt'] = mock_apt -mock_apt.apt_pkg = mock.MagicMock() - -with mock.patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: - mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: - lambda *args, **kwargs: f(*args, **kwargs)) - import ceph_hooks as hooks - -TO_PATCH = [ - 'status_set', - 'config', - 'ceph', - 'relation_ids', - 'relation_get', - 'related_units', - 'local_unit', -] - -NO_PEERS = { - 'ceph-mon1': True -} - -ENOUGH_PEERS_INCOMPLETE = { - 'ceph-mon1': True, - 'ceph-mon2': False, - 'ceph-mon3': False, -} - -ENOUGH_PEERS_COMPLETE = { - 'ceph-mon1': True, - 'ceph-mon2': True, - 'ceph-mon3': True, -} - - -class ServiceStatusTestCase(test_utils.CharmTestCase): - def setUp(self): - super(ServiceStatusTestCase, self).setUp(hooks, TO_PATCH) - self.config.side_effect = self.test_config.get - self.test_config.set('monitor-count', 3) - self.local_unit.return_value = 'ceph-mon1' - - @mock.patch.object(hooks, 'get_peer_units') - def test_assess_status_no_peers(self, _peer_units): - _peer_units.return_value = NO_PEERS - hooks.assess_status() - self.status_set.assert_called_with('blocked', mock.ANY) - - @mock.patch.object(hooks, 'get_peer_units') - def test_assess_status_peers_incomplete(self, _peer_units): - _peer_units.return_value = ENOUGH_PEERS_INCOMPLETE - hooks.assess_status() - self.status_set.assert_called_with('waiting', mock.ANY) - - @mock.patch.object(hooks, 'get_peer_units') - def test_assess_status_peers_complete_active(self, _peer_units): - _peer_units.return_value = ENOUGH_PEERS_COMPLETE - self.ceph.is_bootstrapped.return_value = True - self.ceph.is_quorum.return_value = True - hooks.assess_status() - self.status_set.assert_called_with('active', mock.ANY) - - @mock.patch.object(hooks, 'get_peer_units') - def test_assess_status_peers_complete_down(self, _peer_units): - _peer_units.return_value = ENOUGH_PEERS_COMPLETE - self.ceph.is_bootstrapped.return_value = False - self.ceph.is_quorum.return_value = False - hooks.assess_status() - self.status_set.assert_called_with('blocked', mock.ANY) - - def test_get_peer_units_no_peers(self): - self.relation_ids.return_value = ['mon:1'] - self.related_units.return_value = [] - self.assertEquals({'ceph-mon1': True}, - hooks.get_peer_units()) - - def test_get_peer_units_peers_incomplete(self): - self.relation_ids.return_value = ['mon:1'] - self.related_units.return_value = ['ceph-mon2', - 'ceph-mon3'] - self.relation_get.return_value = None - self.assertEquals({'ceph-mon1': True, - 'ceph-mon2': False, - 'ceph-mon3': False}, - hooks.get_peer_units()) - - def test_get_peer_units_peers_complete(self): - self.relation_ids.return_value = ['mon:1'] - self.related_units.return_value = ['ceph-mon2', - 'ceph-mon3'] - self.relation_get.side_effect = ['ceph-mon2', - 'ceph-mon3'] - self.assertEquals({'ceph-mon1': True, - 'ceph-mon2': True, - 'ceph-mon3': True}, - hooks.get_peer_units()) diff --git a/ceph-proxy/unit_tests/test_upgrade_roll.py b/ceph-proxy/unit_tests/test_upgrade_roll.py deleted file mode 100644 index 82e9c55a..00000000 --- a/ceph-proxy/unit_tests/test_upgrade_roll.py +++ /dev/null @@ -1,154 +0,0 @@ -__author__ = 'chris' -import time - -from mock import patch, call, MagicMock -import sys - -sys.path.append('/home/chris/repos/ceph-mon/hooks') - -import test_utils - -# python-apt is not installed as part of test-requirements but is imported by -# some charmhelpers modules so create a fake import. -mock_apt = MagicMock() -sys.modules['apt'] = mock_apt -mock_apt.apt_pkg = MagicMock() - -with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: - mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: - lambda *args, **kwargs: f(*args, **kwargs)) - import ceph_hooks - -TO_PATCH = [ - 'hookenv', - 'status_set', - 'config', - 'ceph', - 'log', - 'add_source', - 'apt_update', - 'apt_install', - 'service_stop', - 'service_start', - 'host', -] - - -def config_side_effect(*args): - if args[0] == 'source': - return 'cloud:trusty-kilo' - elif args[0] == 'key': - return 'key' - elif args[0] == 'release-version': - return 'cloud:trusty-kilo' - - -previous_node_start_time = time.time() - (9 * 60) - - -def monitor_key_side_effect(*args): - if args[1] == \ - 'ip-192-168-1-2_done': - return False - elif args[1] == \ - 'ip-192-168-1-2_start': - # Return that the previous node started 9 minutes ago - return previous_node_start_time - - -class UpgradeRollingTestCase(test_utils.CharmTestCase): - def setUp(self): - super(UpgradeRollingTestCase, self).setUp(ceph_hooks, TO_PATCH) - - @patch('ceph_hooks.roll_monitor_cluster') - def test_check_for_upgrade(self, roll_monitor_cluster): - self.host.lsb_release.return_value = { - 'DISTRIB_CODENAME': 'trusty', - } - previous_mock = MagicMock().return_value - previous_mock.previous.return_value = "cloud:trusty-juno" - self.hookenv.config.side_effect = [previous_mock, - config_side_effect('source')] - ceph_hooks.check_for_upgrade() - - roll_monitor_cluster.assert_called_with('cloud:trusty-kilo') - - @patch('ceph_hooks.upgrade_monitor') - @patch('ceph_hooks.monitor_key_set') - def test_lock_and_roll(self, monitor_key_set, upgrade_monitor): - monitor_key_set.monitor_key_set.return_value = None - ceph_hooks.lock_and_roll(my_name='ip-192-168-1-2') - upgrade_monitor.assert_called_once_with() - - def test_upgrade_monitor(self): - self.config.side_effect = config_side_effect - self.ceph.get_version.return_value = "0.80" - self.ceph.systemd.return_value = False - ceph_hooks.upgrade_monitor() - self.service_stop.assert_called_with('ceph-mon-all') - self.service_start.assert_called_with('ceph-mon-all') - self.status_set.assert_has_calls([ - call('maintenance', 'Upgrading monitor'), - call('active', '') - ]) - - @patch('ceph_hooks.lock_and_roll') - @patch('ceph_hooks.wait_on_previous_node') - @patch('ceph_hooks.get_mon_map') - @patch('ceph_hooks.socket') - def test_roll_monitor_cluster_second(self, - socket, - get_mon_map, - wait_on_previous_node, - lock_and_roll): - wait_on_previous_node.return_value = None - socket.gethostname.return_value = "ip-192-168-1-3" - get_mon_map.return_value = { - 'monmap': { - 'mons': [ - { - 'name': 'ip-192-168-1-2', - }, - { - 'name': 'ip-192-168-1-3', - }, - ] - } - } - ceph_hooks.roll_monitor_cluster('0.94.1') - self.status_set.assert_called_with( - 'blocked', - 'Waiting on ip-192-168-1-2 to finish upgrading') - lock_and_roll.assert_called_with(my_name="ip-192-168-1-3") - - @patch.object(ceph_hooks, 'time') - @patch('ceph_hooks.monitor_key_get') - @patch('ceph_hooks.monitor_key_exists') - def test_wait_on_previous_node(self, monitor_key_exists, monitor_key_get, - mock_time): - tval = [previous_node_start_time] - - def fake_time(): - tval[0] += 100 - return tval[0] - - mock_time.time.side_effect = fake_time - monitor_key_get.side_effect = monitor_key_side_effect - monitor_key_exists.return_value = False - - ceph_hooks.wait_on_previous_node("ip-192-168-1-2") - - # Make sure we checked to see if the previous node started - monitor_key_get.assert_has_calls( - [call('admin', 'ip-192-168-1-2_start')] - ) - # Make sure we checked to see if the previous node was finished - monitor_key_exists.assert_has_calls( - [call('admin', 'ip-192-168-1-2_done')] - ) - # Make sure we waited at last once before proceeding - self.log.assert_has_calls( - [call('Previous node is: ip-192-168-1-2')], - [call('ip-192-168-1-2 is not finished. Waiting')], - ) - self.assertEqual(tval[0], previous_node_start_time + 700) diff --git a/ceph-proxy/unit_tests/test_utils.py b/ceph-proxy/unit_tests/test_utils.py deleted file mode 100644 index 663a0488..00000000 --- a/ceph-proxy/unit_tests/test_utils.py +++ /dev/null @@ -1,121 +0,0 @@ -import logging -import unittest -import os -import yaml - -from contextlib import contextmanager -from mock import patch, MagicMock - - -def load_config(): - ''' - Walk backwords from __file__ looking for config.yaml, load and return the - 'options' section' - ''' - config = None - f = __file__ - while config is None: - d = os.path.dirname(f) - if os.path.isfile(os.path.join(d, 'config.yaml')): - config = os.path.join(d, 'config.yaml') - break - f = d - - if not config: - logging.error('Could not find config.yaml in any parent directory ' - 'of %s. ' % f) - raise Exception - - return yaml.safe_load(open(config).read())['options'] - - -def get_default_config(): - ''' - Load default charm config from config.yaml return as a dict. - If no default is set in config.yaml, its value is None. - ''' - default_config = {} - config = load_config() - for k, v in config.iteritems(): - if 'default' in v: - default_config[k] = v['default'] - else: - default_config[k] = None - return default_config - - -class CharmTestCase(unittest.TestCase): - - def setUp(self, obj, patches): - super(CharmTestCase, self).setUp() - self.patches = patches - self.obj = obj - self.test_config = TestConfig() - self.test_relation = TestRelation() - self.patch_all() - - def patch(self, method): - _m = patch.object(self.obj, method) - mock = _m.start() - self.addCleanup(_m.stop) - return mock - - def patch_all(self): - for method in self.patches: - setattr(self, method, self.patch(method)) - - -class TestConfig(object): - - def __init__(self): - self.config = get_default_config() - - def get(self, attr=None): - if not attr: - return self.get_all() - try: - return self.config[attr] - except KeyError: - return None - - def get_all(self): - return self.config - - def set(self, attr, value): - if attr not in self.config: - raise KeyError - self.config[attr] = value - - -class TestRelation(object): - - def __init__(self, relation_data={}): - self.relation_data = relation_data - - def set(self, relation_data): - self.relation_data = relation_data - - def get(self, attr=None, unit=None, rid=None): - if attr is None: - return self.relation_data - elif attr in self.relation_data: - return self.relation_data[attr] - return None - - -@contextmanager -def patch_open(): - '''Patch open() to allow mocking both open() itself and the file that is - yielded. - - Yields the mock for "open" and "file", respectively.''' - mock_open = MagicMock(spec=open) - mock_file = MagicMock(spec=file) - - @contextmanager - def stub_open(*args, **kwargs): - mock_open(*args, **kwargs) - yield mock_file - - with patch('__builtin__.open', stub_open): - yield mock_open, mock_file From de59dc5f024b92547b166400e882cf3c687a8ce6 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 15 Jun 2016 11:27:31 +0000 Subject: [PATCH 1136/2699] Fix for multiple status-set - related to bug 1588462 This change fixes the obvious race for a status_set() between check_optional_interfaces() and assess_status() as the later calls the former which calls status_set(), returns the status, which is then potentially set again by the assess_status() function. This cleans up the code so that only a single status_set() is performed when calling assess_status(). Change-Id: Idb11019cec20061622b5f36911e49adfb9bac14e Related-Bug:#1588462 --- ceph-radosgw/hooks/utils.py | 46 +++++++++++++------ .../unit_tests/test_ceph_radosgw_utils.py | 9 +++- 2 files changed, 40 insertions(+), 15 deletions(-) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 914bcec7..69f39634 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -24,7 +24,6 @@ INFO, relation_get, relation_ids, - status_get, ) from charmhelpers.contrib.network.ip import ( format_ipv6_addr, @@ -38,7 +37,6 @@ make_assess_status_func, pause_unit, resume_unit, - set_os_workload_status, ) from charmhelpers.contrib.hahelpers.cluster import get_hacluster_config from charmhelpers.core.host import ( @@ -184,24 +182,40 @@ def enable_pocket(pocket): sources.write(line) +def get_optional_interfaces(): + """Return the optional interfaces that should be checked if the relavent + relations have appeared. + :returns: {general_interface: [specific_int1, specific_int2, ...], ...} + """ + optional_interfaces = {} + if relation_ids('ha'): + optional_interfaces['ha'] = ['cluster'] + if (cmp_pkgrevno('radosgw', '0.55') >= 0 and + relation_ids('identity-service')): + optional_interfaces['identity'] = ['identity-service'] + return optional_interfaces + + def check_optional_relations(configs): - required_interfaces = {} + """Check that if we have a relation_id for high availability that we can + get the hacluster config. If we can't then we are blocked. This function + is called from assess_status/set_os_workload_status as the charm_func and + needs to return either 'unknown', '' if there is no problem or the status, + message if there is a problem. + + :param configs: an OSConfigRender() instance. + :return 2-tuple: (string, string) = (status, message) + """ if relation_ids('ha'): - required_interfaces['ha'] = ['cluster'] try: get_hacluster_config() except: return ('blocked', 'hacluster missing configuration: ' 'vip, vip_iface, vip_cidr') - if cmp_pkgrevno('radosgw', '0.55') >= 0 and \ - relation_ids('identity-service'): - required_interfaces['identity'] = ['identity-service'] - if required_interfaces: - set_os_workload_status(configs, required_interfaces) - return status_get() - else: - return 'unknown', 'No optional relations' + # return 'unknown' as the lowest priority to not clobber an existing + # status. + return 'unknown', '' def setup_ipv6(): @@ -245,14 +259,20 @@ def assess_status_func(configs): Used directly by assess_status() and also for pausing and resuming the unit. + NOTE: REQUIRED_INTERFACES is augmented with the optional interfaces + depending on the current config before being passed to the + make_assess_status_func() function. + NOTE(ajkavanagh) ports are not checked due to race hazards with services that don't behave sychronously w.r.t their service scripts. e.g. apache2. @param configs: a templating.OSConfigRenderer() object @return f() -> None : a function that assesses the unit's workload status """ + required_interfaces = REQUIRED_INTERFACES.copy() + required_interfaces.update(get_optional_interfaces()) return make_assess_status_func( - configs, REQUIRED_INTERFACES, + configs, required_interfaces, charm_func=check_optional_relations, services=services(), ports=None) diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index 2dae2970..1e1daf72 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -27,6 +27,7 @@ def test_assess_status(self): asf.assert_called_once_with('test-config') callee.assert_called_once_with() + @patch.object(utils, 'get_optional_interfaces') @patch.object(utils, 'check_optional_relations') @patch.object(utils, 'REQUIRED_INTERFACES') @patch.object(utils, 'services') @@ -35,12 +36,16 @@ def test_assess_status_func(self, make_assess_status_func, services, REQUIRED_INTERFACES, - check_optional_relations): + check_optional_relations, + get_optional_interfaces): services.return_value = 's1' + REQUIRED_INTERFACES.copy.return_value = {'int': ['test 1']} + get_optional_interfaces.return_value = {'opt': ['test 2']} utils.assess_status_func('test-config') # ports=None whilst port checks are disabled. make_assess_status_func.assert_called_once_with( - 'test-config', REQUIRED_INTERFACES, + 'test-config', + {'int': ['test 1'], 'opt': ['test 2']}, charm_func=check_optional_relations, services='s1', ports=None) From d9f760fffcc68e94dc4cd94df5dbfe3b8f420a78 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 15 Jun 2016 13:44:40 -0400 Subject: [PATCH 1137/2699] ceph clients can relate through the proxy --- ceph-proxy/config.yaml | 10 +++++----- ceph-proxy/hooks/ceph.py | 2 +- ceph-proxy/hooks/ceph_hooks.py | 4 ++-- ceph-proxy/metadata.yaml | 3 +++ ceph-proxy/requirements.txt | 1 + ceph-proxy/templates/ceph.client.admin.keyring | 3 ++- ceph-proxy/templates/mon.keyring | 6 +++--- 7 files changed, 17 insertions(+), 12 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 2d7957d1..97ae7ce7 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -18,11 +18,11 @@ options: default: description: | Admin cephx key for existing Ceph cluster - mon-key: - type: string - default: - description: | - Monitor cephx key + # mon-key: + # type: string + # default: + # description: | + # Monitor cephx key source: type: string default: diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 01a2a569..a3bc52d6 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -372,7 +372,7 @@ def get_named_key(name, caps=None): "-u", ceph_user(), 'ceph', - '--name', 'mon.', + '--name', 'client.admin', '--keyring', '/var/lib/ceph/mon/ceph-{}/keyring'.format( get_unit_hostname() diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index aa3431f3..4d141cc0 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -115,11 +115,11 @@ def emit_cephconf(): charm_ceph_conf, 100) keyring = 'ceph.client.admin.keyring' keyring_path = '/etc/ceph/' + keyring - render(keyring, keyring_path, {'admin_key': config('admin-key')}, perms=0o600) + render(keyring, keyring_path, {'admin_key': config('admin-key')}, owner=ceph.ceph_user(), perms=0o600) keyring = 'keyring' keyring_path = '/var/lib/ceph/mon/ceph-' + get_unit_hostname()+ '/' + keyring - render('mon.keyring', keyring_path, {'mon_key': config('mon-key')}, perms=0o600) + render('mon.keyring', keyring_path, {'admin_key': config('admin-key')}, owner=ceph.ceph_user(), perms=0o600) notify_radosgws() notify_client() diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 0ff33664..72acc5fc 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -9,6 +9,9 @@ tags: - storage - file-servers - misc +extra-bindings: + public: + cluster: provides: client: interface: ceph-client diff --git a/ceph-proxy/requirements.txt b/ceph-proxy/requirements.txt index 6a3271b0..a72939e8 100644 --- a/ceph-proxy/requirements.txt +++ b/ceph-proxy/requirements.txt @@ -10,3 +10,4 @@ Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 dnspython>=1.12.0 psutil>=1.1.1,<2.0.0 +charm-tools>=2.0.0 \ No newline at end of file diff --git a/ceph-proxy/templates/ceph.client.admin.keyring b/ceph-proxy/templates/ceph.client.admin.keyring index 89758812..ce0a4da8 100644 --- a/ceph-proxy/templates/ceph.client.admin.keyring +++ b/ceph-proxy/templates/ceph.client.admin.keyring @@ -1,2 +1,3 @@ [client.admin] - key = {{admin_key}} \ No newline at end of file + key = {{admin_key}} + diff --git a/ceph-proxy/templates/mon.keyring b/ceph-proxy/templates/mon.keyring index 2b9d542f..567c2ead 100644 --- a/ceph-proxy/templates/mon.keyring +++ b/ceph-proxy/templates/mon.keyring @@ -1,3 +1,3 @@ -[mon.] - key = {{mon_key}} - caps mon = "allow *" +[client.admin] + key = {{admin_key}} + From 5a9d3e4bd920807d79435d129c3fa014f7ca367f Mon Sep 17 00:00:00 2001 From: David Ames Date: Wed, 15 Jun 2016 14:12:24 -0700 Subject: [PATCH 1138/2699] DNS HA Implement DNS high availability. Pass the correct information to hacluster to register a DNS entry with MAAS 2.0 or greater rather than using a virtual IP. Charm-helpers sync to bring in DNS HA helpers Change-Id: I08d62bf1076137c0b9afbd3c851bfa8d09d1b847 --- ceph-radosgw/README.md | 46 ++++++-- ceph-radosgw/config.yaml | 30 +++++ .../charmhelpers/contrib/hahelpers/cluster.py | 69 +++++++++-- .../contrib/openstack/amulet/deployment.py | 51 ++++---- .../contrib/openstack/ha/__init__.py | 0 .../contrib/openstack/ha/utils.py | 111 ++++++++++++++++++ .../charmhelpers/contrib/openstack/ip.py | 11 +- .../charmhelpers/contrib/openstack/utils.py | 69 +++++++++++ ceph-radosgw/hooks/charmhelpers/core/host.py | 56 ++++++++- ceph-radosgw/hooks/hooks.py | 84 ++++++------- ceph-radosgw/unit_tests/test_hooks.py | 48 +++++++- 11 files changed, 470 insertions(+), 105 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py diff --git a/ceph-radosgw/README.md b/ceph-radosgw/README.md index ea88272e..a6729749 100644 --- a/ceph-radosgw/README.md +++ b/ceph-radosgw/README.md @@ -52,19 +52,39 @@ This is enabled by relating the ceph-radosgw service with keystone:: If you try to relate the radosgw to keystone with an earlier version of ceph the hook will error out to let you know. -Scale-out -========= - -Its possible to scale-out the RADOS Gateway itself:: - - juju add-unit -n 2 ceph-radosgw - -and then stick a HA loadbalancer on the front:: - - juju deploy haproxy - juju add-relation haproxy ceph-radosgw - -Should give you a bit more bang on the front end if you really need it. +HA/Clustering +============= + +There are two mutually exclusive high availability options: using virtual +IP(s) or DNS. In both cases, a relationship to hacluster is required which +provides the corosync back end HA functionality. + +To use virtual IP(s) the clustered nodes must be on the same subnet such that +the VIP is a valid IP on the subnet for one of the node's interfaces and each +node has an interface in said subnet. The VIP becomes a highly-available API +endpoint. + +At a minimum, the config option 'vip' must be set in order to use virtual IP +HA. If multiple networks are being used, a VIP should be provided for each +network, separated by spaces. Optionally, vip_iface or vip_cidr may be +specified. + +To use DNS high availability there are several prerequisites. However, DNS HA +does not require the clustered nodes to be on the same subnet. +Currently the DNS HA feature is only available for MAAS 2.0 or greater +environments. MAAS 2.0 requires Juju 2.0 or greater. The clustered nodes must +have static or "reserved" IP addresses registered in MAAS. The DNS hostname(s) +must be pre-registered in MAAS before use with DNS HA. + +At a minimum, the config option 'dns-ha' must be set to true and at least one +of 'os-public-hostname', 'os-internal-hostname' or 'os-internal-hostname' must +be set in order to use DNS HA. One or more of the above hostnames may be set. + +The charm will throw an exception in the following circumstances: +If neither 'vip' nor 'dns-ha' is set and the charm is related to hacluster +If both 'vip' and 'dns-ha' are set as they are mutually exclusive +If 'dns-ha' is set and none of the os-{admin,internal,public}-hostname(s) are +set Network Space support ===================== diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 81aa5eb3..ad17b9ae 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -95,6 +95,12 @@ options: Enable this option to disable use of Apache and enable the embedded web container feature. + dns-ha: + type: boolean + default: False + description: | + Use DNS HA with MAAS 2.0. Note if this is set do not set vip + settings below. vip: type: string default: @@ -153,6 +159,30 @@ options: the following public endpoint for the ceph-radosgw: https://files.example.com:80/swift/v1 + os-internal-hostname: + type: string + default: + description: | + The hostname or address of the internal endpoints created for ceph-radosgw + in the keystone identity provider. + + This value will be used for internal endpoints. For example, an + os-internal-hostname set to 'files.internal.example.com' with will create + the following internal endpoint for the ceph-radosgw: + + https://files.internal.example.com:80/swift/v1 + os-admin-hostname: + type: string + default: + description: | + The hostname or address of the admin endpoints created for ceph-radosgw + in the keystone identity provider. + + This value will be used for admin endpoints. For example, an + os-admin-hostname set to 'files.admin.example.com' with will create + the following admin endpoint for the ceph-radosgw: + + https://files.admin.example.com:80/swift/v1 ceph-osd-replication-count: type: int default: 3 diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index aa0b515d..92325a96 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -41,10 +41,11 @@ relation_get, config as config_get, INFO, - ERROR, + DEBUG, WARNING, unit_get, - is_leader as juju_is_leader + is_leader as juju_is_leader, + status_set, ) from charmhelpers.core.decorators import ( retry_on_exception, @@ -60,6 +61,10 @@ class HAIncompleteConfig(Exception): pass +class HAIncorrectConfig(Exception): + pass + + class CRMResourceNotFound(Exception): pass @@ -274,27 +279,71 @@ def get_hacluster_config(exclude_keys=None): Obtains all relevant configuration from charm configuration required for initiating a relation to hacluster: - ha-bindiface, ha-mcastport, vip + ha-bindiface, ha-mcastport, vip, os-internal-hostname, + os-admin-hostname, os-public-hostname param: exclude_keys: list of setting key(s) to be excluded. returns: dict: A dict containing settings keyed by setting name. - raises: HAIncompleteConfig if settings are missing. + raises: HAIncompleteConfig if settings are missing or incorrect. ''' - settings = ['ha-bindiface', 'ha-mcastport', 'vip'] + settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname', + 'os-admin-hostname', 'os-public-hostname'] conf = {} for setting in settings: if exclude_keys and setting in exclude_keys: continue conf[setting] = config_get(setting) - missing = [] - [missing.append(s) for s, v in six.iteritems(conf) if v is None] - if missing: - log('Insufficient config data to configure hacluster.', level=ERROR) - raise HAIncompleteConfig + + if not valid_hacluster_config(): + raise HAIncorrectConfig('Insufficient or incorrect config data to ' + 'configure hacluster.') return conf +def valid_hacluster_config(): + ''' + Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname + must be set. + + Note: ha-bindiface and ha-macastport both have defaults and will always + be set. We only care that either vip or dns-ha is set. + + :returns: boolean: valid config returns true. + raises: HAIncompatibileConfig if settings conflict. + raises: HAIncompleteConfig if settings are missing. + ''' + vip = config_get('vip') + dns = config_get('dns-ha') + if not(bool(vip) ^ bool(dns)): + msg = ('HA: Either vip or dns-ha must be set but not both in order to ' + 'use high availability') + status_set('blocked', msg) + raise HAIncorrectConfig(msg) + + # If dns-ha then one of os-*-hostname must be set + if dns: + dns_settings = ['os-internal-hostname', 'os-admin-hostname', + 'os-public-hostname'] + # At this point it is unknown if one or all of the possible + # network spaces are in HA. Validate at least one is set which is + # the minimum required. + for setting in dns_settings: + if config_get(setting): + log('DNS HA: At least one hostname is set {}: {}' + ''.format(setting, config_get(setting)), + level=DEBUG) + return True + + msg = ('DNS HA: At least one os-*-hostname(s) must be set to use ' + 'DNS HA') + status_set('blocked', msg) + raise HAIncompleteConfig(msg) + + log('VIP HA: VIP is set {}'.format(vip), level=DEBUG) + return True + + def canonical_url(configs, vip_setting='vip'): ''' Returns the correct HTTP URL to this host given the state of HTTPS diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index d21c9c78..6b917d0c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -43,9 +43,6 @@ def __init__(self, series=None, openstack=None, source=None, self.openstack = openstack self.source = source self.stable = stable - # Note(coreycb): this needs to be changed when new next branches come - # out. - self.current_next = "trusty" def get_logger(self, name="deployment-logger", level=logging.DEBUG): """Get a logger object that will log to stdout.""" @@ -72,38 +69,34 @@ def _determine_branch_locations(self, other_services): self.log.info('OpenStackAmuletDeployment: determine branch locations') - # Charms outside the lp:~openstack-charmers namespace - base_charms = ['mysql', 'mongodb', 'nrpe'] - - # Force these charms to current series even when using an older series. - # ie. Use trusty/nrpe even when series is precise, as the P charm - # does not possess the necessary external master config and hooks. - force_series_current = ['nrpe'] - - if self.series in ['precise', 'trusty']: - base_series = self.series - else: - base_series = self.current_next + # Charms outside the ~openstack-charmers + base_charms = { + 'mysql': ['precise', 'trusty'], + 'mongodb': ['precise', 'trusty'], + 'nrpe': ['precise', 'trusty'], + } for svc in other_services: - if svc['name'] in force_series_current: - base_series = self.current_next # If a location has been explicitly set, use it if svc.get('location'): continue - if self.stable: - temp = 'lp:charms/{}/{}' - svc['location'] = temp.format(base_series, - svc['name']) + if svc['name'] in base_charms: + # NOTE: not all charms have support for all series we + # want/need to test against, so fix to most recent + # that each base charm supports + target_series = self.series + if self.series not in base_charms[svc['name']]: + target_series = base_charms[svc['name']][-1] + svc['location'] = 'cs:{}/{}'.format(target_series, + svc['name']) + elif self.stable: + svc['location'] = 'cs:{}/{}'.format(self.series, + svc['name']) else: - if svc['name'] in base_charms: - temp = 'lp:charms/{}/{}' - svc['location'] = temp.format(base_series, - svc['name']) - else: - temp = 'lp:~openstack-charmers/charms/{}/{}/next' - svc['location'] = temp.format(self.current_next, - svc['name']) + svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( + self.series, + svc['name'] + ) return other_services diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py new file mode 100644 index 00000000..34064237 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -0,0 +1,111 @@ +# Copyright 2014-2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# +# Copyright 2016 Canonical Ltd. +# +# Authors: +# Openstack Charmers < +# + +""" +Helpers for high availability. +""" + +import re + +from charmhelpers.core.hookenv import ( + log, + relation_set, + charm_name, + config, + status_set, + DEBUG, +) + +from charmhelpers.contrib.openstack.ip import ( + resolve_address, +) + + +class DNSHAException(Exception): + """Raised when an error occurs setting up DNS HA + """ + + pass + + +def update_dns_ha_resource_params(resources, resource_params, + relation_id=None, + crm_ocf='ocf:maas:dns'): + """ Check for os-*-hostname settings and update resource dictionaries for + the HA relation. + + @param resources: Pointer to dictionary of resources. + Usually instantiated in ha_joined(). + @param resource_params: Pointer to dictionary of resource parameters. + Usually instantiated in ha_joined() + @param relation_id: Relation ID of the ha relation + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ + + settings = ['os-admin-hostname', 'os-internal-hostname', + 'os-public-hostname'] + + # Check which DNS settings are set and update dictionaries + hostname_group = [] + for setting in settings: + hostname = config(setting) + if hostname is None: + log('DNS HA: Hostname setting {} is None. Ignoring.' + ''.format(setting), + DEBUG) + continue + m = re.search('os-(.+?)-hostname', setting) + if m: + networkspace = m.group(1) + else: + msg = ('Unexpected DNS hostname setting: {}. ' + 'Cannot determine network space name' + ''.format(setting)) + status_set('blocked', msg) + raise DNSHAException(msg) + + hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace) + if hostname_key in hostname_group: + log('DNS HA: Resource {}: {} already exists in ' + 'hostname group - skipping'.format(hostname_key, hostname), + DEBUG) + continue + + hostname_group.append(hostname_key) + resources[hostname_key] = crm_ocf + resource_params[hostname_key] = ( + 'params fqdn="{}" ip_address="{}" ' + ''.format(hostname, resolve_address(endpoint_type=networkspace, + override=False))) + + if len(hostname_group) >= 1: + log('DNS HA: Hostname group is set with {} as members. ' + 'Informing the ha relation'.format(' '.join(hostname_group)), + DEBUG) + relation_set(relation_id=relation_id, groups={ + 'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)}) + else: + msg = 'DNS HA: Hostname group has no members.' + status_set('blocked', msg) + raise DNSHAException(msg) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index 532a1dc1..7875b997 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -109,7 +109,7 @@ def _get_address_override(endpoint_type=PUBLIC): return addr_override.format(service_name=service_name()) -def resolve_address(endpoint_type=PUBLIC): +def resolve_address(endpoint_type=PUBLIC, override=True): """Return unit address depending on net config. If unit is clustered with vip(s) and has net splits defined, return vip on @@ -119,10 +119,13 @@ def resolve_address(endpoint_type=PUBLIC): split if one is configured, or a Juju 2.0 extra-binding has been used. :param endpoint_type: Network endpoing type + :param override: Accept hostname overrides or not """ - resolved_address = _get_address_override(endpoint_type) - if resolved_address: - return resolved_address + resolved_address = None + if override: + resolved_address = _get_address_override(endpoint_type) + if resolved_address: + return resolved_address vips = config('vip') if vips: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index bd6efc48..53e58424 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -51,6 +51,7 @@ related_units, relation_ids, relation_set, + service_name, status_set, hook_name ) @@ -207,6 +208,27 @@ ]), } +GIT_DEFAULT_REPOS = { + 'requirements': 'git://github.com/openstack/requirements', + 'cinder': 'git://github.com/openstack/cinder', + 'glance': 'git://github.com/openstack/glance', + 'horizon': 'git://github.com/openstack/horizon', + 'keystone': 'git://github.com/openstack/keystone', + 'neutron': 'git://github.com/openstack/neutron', + 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas', + 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas', + 'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas', + 'nova': 'git://github.com/openstack/nova', +} + +GIT_DEFAULT_BRANCHES = { + 'icehouse': 'icehouse-eol', + 'kilo': 'stable/kilo', + 'liberty': 'stable/liberty', + 'mitaka': 'stable/mitaka', + 'master': 'master', +} + DEFAULT_LOOPBACK_SIZE = '5G' @@ -703,6 +725,53 @@ def git_install_requested(): requirements_dir = None +def git_default_repos(projects): + """ + Returns default repos if a default openstack-origin-git value is specified. + """ + service = service_name() + + for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): + if projects == default: + + # add the requirements repo first + repo = { + 'name': 'requirements', + 'repository': GIT_DEFAULT_REPOS['requirements'], + 'branch': branch, + } + repos = [repo] + + # neutron and nova charms require some additional repos + if service == 'neutron': + for svc in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas']: + repo = { + 'name': svc, + 'repository': GIT_DEFAULT_REPOS[svc], + 'branch': branch, + } + repos.append(repo) + elif service == 'nova': + repo = { + 'name': 'neutron', + 'repository': GIT_DEFAULT_REPOS['neutron'], + 'branch': branch, + } + repos.append(repo) + + # finally add the current service's repo + repo = { + 'name': service, + 'repository': GIT_DEFAULT_REPOS[service], + 'branch': branch, + } + repos.append(repo) + + return yaml.dump(dict(repositories=repos)) + + return projects + + def _git_yaml_load(projects_yaml): """ Load the specified yaml into a dictionary. diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 64b2df55..e367e450 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -176,7 +176,7 @@ def init_is_systemd(): def adduser(username, password=None, shell='/bin/bash', system_user=False, - primary_group=None, secondary_groups=None): + primary_group=None, secondary_groups=None, uid=None): """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -187,15 +187,21 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False, :param bool system_user: Whether to create a login or system user :param str primary_group: Primary group for user; defaults to username :param list secondary_groups: Optional list of additional groups + :param int uid: UID for user being created :returns: The password database entry struct, as returned by `pwd.getpwnam` """ try: user_info = pwd.getpwnam(username) log('user {0} already exists!'.format(username)) + if uid: + user_info = pwd.getpwuid(int(uid)) + log('user with uid {0} already exists!'.format(uid)) except KeyError: log('creating user {0}'.format(username)) cmd = ['useradd'] + if uid: + cmd.extend(['--uid', str(uid)]) if system_user or password is None: cmd.append('--system') else: @@ -230,14 +236,58 @@ def user_exists(username): return user_exists -def add_group(group_name, system_group=False): - """Add a group to the system""" +def uid_exists(uid): + """Check if a uid exists""" + try: + pwd.getpwuid(uid) + uid_exists = True + except KeyError: + uid_exists = False + return uid_exists + + +def group_exists(groupname): + """Check if a group exists""" + try: + grp.getgrnam(groupname) + group_exists = True + except KeyError: + group_exists = False + return group_exists + + +def gid_exists(gid): + """Check if a gid exists""" + try: + grp.getgrgid(gid) + gid_exists = True + except KeyError: + gid_exists = False + return gid_exists + + +def add_group(group_name, system_group=False, gid=None): + """Add a group to the system + + Will log but otherwise succeed if the group already exists. + + :param str group_name: group to create + :param bool system_group: Create system group + :param int gid: GID for user being created + + :returns: The password database entry struct, as returned by `grp.getgrnam` + """ try: group_info = grp.getgrnam(group_name) log('group {0} already exists!'.format(group_name)) + if gid: + group_info = grp.getgrgid(gid) + log('group with gid {0} already exists!'.format(gid)) except KeyError: log('creating group {0}'.format(group_name)) cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) if system_group: cmd.append('--system') else: diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 48013a39..e3775f77 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -24,7 +24,6 @@ log, DEBUG, WARNING, - ERROR, Hooks, UnregisteredHookError, status_set, ) @@ -55,6 +54,12 @@ is_unit_paused_set, pausable_restart_on_change as restart_on_change, ) +from charmhelpers.contrib.hahelpers.cluster import ( + get_hacluster_config, +) +from charmhelpers.contrib.openstack.ha.utils import ( + update_dns_ha_resource_params, +) from utils import ( enable_pocket, CEPHRG_HA_RES, @@ -258,12 +263,8 @@ def cluster_changed(): @hooks.hook('ha-relation-joined') -def ha_relation_joined(): - vip = config('vip') - if not vip: - log('Unable to configure hacluster as vip not provided', level=ERROR) - sys.exit(1) - +def ha_relation_joined(relation_id=None): + cluster_config = get_hacluster_config() # Obtain resources resources = { 'res_cephrg_haproxy': 'lsb:haproxy' @@ -272,32 +273,37 @@ def ha_relation_joined(): 'res_cephrg_haproxy': 'op monitor interval="5s"' } - vip_group = [] - for vip in vip.split(): - if is_ipv6(vip): - res_rgw_vip = 'ocf:heartbeat:IPv6addr' - vip_params = 'ipv6addr' - else: - res_rgw_vip = 'ocf:heartbeat:IPaddr2' - vip_params = 'ip' - - iface = get_iface_for_address(vip) - netmask = get_netmask_for_address(vip) - - if iface is not None: - vip_key = 'res_cephrg_{}_vip'.format(iface) - resources[vip_key] = res_rgw_vip - resource_params[vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}"' - ' nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask) - ) - vip_group.append(vip_key) - - if len(vip_group) >= 1: - relation_set(groups={CEPHRG_HA_RES: ' '.join(vip_group)}) + if config('dns-ha'): + update_dns_ha_resource_params(relation_id=relation_id, + resources=resources, + resource_params=resource_params) + else: + vip_group = [] + for vip in cluster_config['vip'].split(): + if is_ipv6(vip): + res_rgw_vip = 'ocf:heartbeat:IPv6addr' + vip_params = 'ipv6addr' + else: + res_rgw_vip = 'ocf:heartbeat:IPaddr2' + vip_params = 'ip' + + iface = get_iface_for_address(vip) + netmask = get_netmask_for_address(vip) + + if iface is not None: + vip_key = 'res_cephrg_{}_vip'.format(iface) + resources[vip_key] = res_rgw_vip + resource_params[vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}"' + ' nic="{iface}"'.format(ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask) + ) + vip_group.append(vip_key) + + if len(vip_group) >= 1: + relation_set(groups={CEPHRG_HA_RES: ' '.join(vip_group)}) init_services = { 'res_cephrg_haproxy': 'haproxy' @@ -306,14 +312,10 @@ def ha_relation_joined(): 'cl_cephrg_haproxy': 'res_cephrg_haproxy' } - # Obtain the config values necessary for the cluster config. These - # include multicast port and interface to bind to. - corosync_bindiface = config('ha-bindiface') - corosync_mcastport = config('ha-mcastport') - - relation_set(init_services=init_services, - corosync_bindiface=corosync_bindiface, - corosync_mcastport=corosync_mcastport, + relation_set(relation_id=relation_id, + init_services=init_services, + corosync_bindiface=cluster_config['ha-bindiface'], + corosync_mcastport=cluster_config['ha-mcastport'], resources=resources, resource_params=resource_params, clones=clones) diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index b698a72d..7f23d5d8 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -39,6 +39,8 @@ 'subprocess', 'sys', 'unit_get', + 'get_hacluster_config', + 'update_dns_ha_resource_params', ] @@ -251,15 +253,15 @@ def test_cluster_changed(self): self.CONFIGS.write_all.assert_called_with() _id_joined.assert_called_with(relid='rid') - def test_ha_relation_joined_no_vip(self): - self.test_config.set('vip', '') - ceph_hooks.ha_relation_joined() - self.sys.exit.assert_called_with(1) - def test_ha_relation_joined_vip(self): self.test_config.set('ha-bindiface', 'eth8') self.test_config.set('ha-mcastport', '5000') self.test_config.set('vip', '10.0.0.10') + self.get_hacluster_config.return_value = { + 'vip': '10.0.0.10', + 'ha-bindiface': 'eth8', + 'ha-mcastport': '5000', + } self.get_iface_for_address.return_value = 'eth7' self.get_netmask_for_address.return_value = '255.255.0.0' ceph_hooks.ha_relation_joined() @@ -270,6 +272,7 @@ def test_ha_relation_joined_vip(self): resource_params = {'res_cephrg_haproxy': 'op monitor interval="5s"', 'res_cephrg_eth7_vip': eth_params} self.relation_set.assert_called_with( + relation_id=None, init_services={'res_cephrg_haproxy': 'haproxy'}, corosync_bindiface='eth8', corosync_mcastport='5000', @@ -277,6 +280,41 @@ def test_ha_relation_joined_vip(self): resources=resources, clones={'cl_cephrg_haproxy': 'res_cephrg_haproxy'}) + def test_ha_joined_dns_ha(self): + def _fake_update(resources, resource_params, relation_id=None): + resources.update({'res_cephrg_public_hostname': 'ocf:maas:dns'}) + resource_params.update({'res_cephrg_public_hostname': + 'params fqdn="keystone.maas" ' + 'ip_address="10.0.0.1"'}) + + self.test_config.set('dns-ha', True) + self.get_hacluster_config.return_value = { + 'vip': None, + 'ha-bindiface': 'em0', + 'ha-mcastport': '8080', + 'os-admin-hostname': None, + 'os-internal-hostname': None, + 'os-public-hostname': 'keystone.maas', + } + args = { + 'relation_id': None, + 'corosync_bindiface': 'em0', + 'corosync_mcastport': '8080', + 'init_services': {'res_cephrg_haproxy': 'haproxy'}, + 'resources': {'res_cephrg_public_hostname': 'ocf:maas:dns', + 'res_cephrg_haproxy': 'lsb:haproxy'}, + 'resource_params': { + 'res_cephrg_public_hostname': 'params fqdn="keystone.maas" ' + 'ip_address="10.0.0.1"', + 'res_cephrg_haproxy': 'op monitor interval="5s"'}, + 'clones': {'cl_cephrg_haproxy': 'res_cephrg_haproxy'} + } + self.update_dns_ha_resource_params.side_effect = _fake_update + + ceph_hooks.ha_relation_joined() + self.assertTrue(self.update_dns_ha_resource_params.called) + self.relation_set.assert_called_with(**args) + def test_ha_relation_changed(self): _id_joined = self.patch('identity_joined') self.relation_get.return_value = True From 7fa07c5c64e5c05f65be8547849ca3d8a3e700df Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 17 Jun 2016 11:35:58 +0100 Subject: [PATCH 1139/2699] Switch to using charm-store for amulet tests All OpenStack charms are now directly published to the charm store on landing; switch Amulet helper to resolve charms using the charm store rather than bzr branches, removing the lag between charm changes landing and being available for other charms to use for testing. This is also important for new layered charms where the charm must be build and published prior to being consumable. Change-Id: I395ee0e0b33c2df98783718fa45829ea572f24cc --- ceph-mon/charm-helpers-hooks.yaml | 9 +- .../hooks/charmhelpers/contrib/network/ip.py | 6 +- .../contrib/openstack/amulet/__init__.py | 15 + .../contrib/openstack/amulet/deployment.py | 297 ++++ .../contrib/openstack/amulet/utils.py | 1012 +++++++++++ .../charmhelpers/contrib/openstack/context.py | 1505 +++++++++++++++++ .../contrib/openstack/exceptions.py | 6 + .../contrib/openstack/files/__init__.py | 18 + .../contrib/openstack/ha/__init__.py | 0 .../contrib/openstack/ha/utils.py | 111 ++ .../charmhelpers/contrib/openstack/ip.py | 182 ++ .../charmhelpers/contrib/openstack/neutron.py | 384 +++++ .../contrib/openstack/templates/__init__.py | 18 + .../contrib/openstack/templating.py | 323 ++++ .../charmhelpers/contrib/openstack/utils.py | 1036 ++++++++++-- .../charmhelpers/contrib/python/__init__.py | 15 + .../charmhelpers/contrib/python/packages.py | 145 ++ .../contrib/storage/linux/ceph.py | 41 + .../contrib/storage/linux/loopback.py | 88 + .../charmhelpers/contrib/storage/linux/lvm.py | 105 ++ ceph-mon/hooks/charmhelpers/core/host.py | 56 +- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 15 +- ceph-mon/hooks/charmhelpers/fetch/bzrurl.py | 23 +- .../contrib/openstack/amulet/deployment.py | 51 +- 24 files changed, 5289 insertions(+), 172 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/context.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/files/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/ha/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/templates/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/python/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/python/packages.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index f4b2a26a..5cc0852a 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -4,11 +4,10 @@ include: - core - cli - fetch - - contrib.storage.linux: - - utils - - ceph + - contrib.storage.linux - payload.execd - - contrib.openstack.alternatives + - contrib.openstack - contrib.network.ip - contrib.charmsupport - - contrib.hardening|inc=* \ No newline at end of file + - contrib.hardening|inc=* + - contrib.python.packages diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 6bba07b6..99d78f2f 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -405,10 +405,10 @@ def is_ip(address): Returns True if address is a valid IP address. """ try: - # Test to see if already an IPv4 address - socket.inet_aton(address) + # Test to see if already an IPv4/IPv6 address + address = netaddr.IPAddress(address) return True - except socket.error: + except netaddr.AddrFormatError: return False diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 00000000..d1400a02 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 00000000..6b917d0c --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,297 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import logging +import re +import sys +import six +from collections import OrderedDict +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') + self.openstack = openstack + self.source = source + self.stable = stable + + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + + self.log.info('OpenStackAmuletDeployment: determine branch locations') + + # Charms outside the ~openstack-charmers + base_charms = { + 'mysql': ['precise', 'trusty'], + 'mongodb': ['precise', 'trusty'], + 'nrpe': ['precise', 'trusty'], + } + + for svc in other_services: + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if svc['name'] in base_charms: + # NOTE: not all charms have support for all series we + # want/need to test against, so fix to most recent + # that each base charm supports + target_series = self.series + if self.series not in base_charms[svc['name']]: + target_series = base_charms[svc['name']][-1] + svc['location'] = 'cs:{}/{}'.format(target_series, + svc['name']) + elif self.stable: + svc['location'] = 'cs:{}/{}'.format(self.series, + svc['name']) + else: + svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( + self.series, + svc['name'] + ) + + return other_services + + def _add_services(self, this_service, other_services): + """Add services to the deployment and set openstack-origin/source.""" + self.log.info('OpenStackAmuletDeployment: adding services') + + other_services = self._determine_branch_locations(other_services) + + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + + services = other_services + services.append(this_service) + + # Charms which should use the source config option + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', + 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'] + + if self.openstack: + for svc in services: + if svc['name'] not in use_source + no_origin: + config = {'openstack-origin': self.openstack} + self.d.configure(svc['name'], config) + + if self.source: + for svc in services: + if svc['name'] in use_source and svc['name'] not in no_origin: + config = {'source': self.source} + self.d.configure(svc['name'], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=1800): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + self.log.info('Waiting for extended status on units...') + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + # Must be ordered by OpenStack release (not by Ubuntu release): + (self.precise_essex, self.precise_folsom, self.precise_grizzly, + self.precise_havana, self.precise_icehouse, + self.trusty_icehouse, self.trusty_juno, self.utopic_juno, + self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, + self.wily_liberty, self.trusty_mitaka, + self.xenial_mitaka) = range(14) + + releases = { + ('precise', None): self.precise_essex, + ('precise', 'cloud:precise-folsom'): self.precise_folsom, + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, + ('precise', 'cloud:precise-havana'): self.precise_havana, + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, + ('trusty', None): self.trusty_icehouse, + ('trusty', 'cloud:trusty-juno'): self.trusty_juno, + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, + ('utopic', None): self.utopic_juno, + ('vivid', None): self.vivid_kilo, + ('wily', None): self.wily_liberty, + ('xenial', None): self.xenial_mitaka} + return releases[(self.series, self.openstack)] + + def _get_openstack_release_string(self): + """Get openstack release string. + + Return a string representing the openstack release. + """ + releases = OrderedDict([ + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ('wily', 'liberty'), + ('xenial', 'mitaka'), + ]) + if self.openstack: + os_origin = self.openstack.split(':')[1] + return os_origin.split('%s-' % self.series)[1].split('/')[0] + else: + return releases[self.series] + + def get_ceph_expected_pools(self, radosgw=False): + """Return a list of expected ceph pools in a ceph + cinder + glance + test scenario, based on OpenStack release and whether ceph radosgw + is flagged as present or not.""" + + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + pools = [ + 'rbd', + 'cinder', + 'glance' + ] + else: + # Juno or earlier + pools = [ + 'data', + 'metadata', + 'rbd', + 'cinder', + 'glance' + ] + + if radosgw: + pools.extend([ + '.rgw.root', + '.rgw.control', + '.rgw', + '.rgw.gc', + '.users.uid' + ]) + + return pools diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 00000000..ef3bdccf --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,1012 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import amulet +import json +import logging +import os +import re +import six +import time +import urllib + +import cinderclient.v1.client as cinder_client +import glanceclient.v1.client as glance_client +import heatclient.v1.client as heat_client +import keystoneclient.v2_0 as keystone_client +from keystoneclient.auth.identity import v3 as keystone_id_v3 +from keystoneclient import session as keystone_session +from keystoneclient.v3 import client as keystone_client_v3 + +import novaclient.client as nova_client +import pika +import swiftclient + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + +NOVA_CLIENT_VERSION = "2" + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charm tests. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('Validating service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('Validating tenant data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('Validating role data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual, api_version=None): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('Validating user data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + if e['name'] == act.name: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'id': act.id} + if api_version == 3: + a['default_project_id'] = getattr(act, + 'default_project_id', + 'none') + else: + a['tenantId'] = act.tenantId + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('Validating flavor data...') + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) + return tenant in [t.name for t in keystone.tenants.list()] + + def authenticate_cinder_admin(self, keystone_sentry, username, + password, tenant): + """Authenticates admin user with cinder.""" + # NOTE(beisner): cinder python client doesn't accept tokens. + service_ip = \ + keystone_sentry.relation('shared-db', + 'mysql:shared-db')['private-address'] + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) + return cinder_client.Client(username, password, tenant, ept) + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant=None, api_version=None, + keystone_ip=None): + """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') + unit = keystone_sentry + if not keystone_ip: + keystone_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name='admin_domain', + username=user, + password=password, + domain_name='admin_domain', + auth_url=ep, + ) + sess = keystone_session.Session(auth=auth) + return keystone_client_v3.Client(session=sess) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') + ep = keystone.service_catalog.url_for(service_type='image', + endpoint_type='adminURL') + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + endpoint_type='publicURL') + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def authenticate_swift_user(self, keystone, user, password, tenant): + """Authenticates a regular user with swift api.""" + self.log.debug('Authenticating swift user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :returns: glance image pointer + """ + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Download cirros image + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open('http://download.cirros-cloud.net/version/released') + version = f.read().strip() + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) + local_path = os.path.join('tests', cirros_img) + + if not os.path.exists(local_path): + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', + version, cirros_img) + opener.retrieve(cirros_url, local_path) + f.close() + + # Create glance image + with open(local_path) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + + # Wait for image to reach active status + img_id = image.id + ret = self.resource_reaches_status(glance.images, img_id, + expected_stat='active', + msg='Image status wait') + if not ret: + msg = 'Glance image failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new image + self.log.debug('Validating image attributes...') + val_img_name = glance.images.get(img_id).name + val_img_stat = glance.images.get(img_id).status + val_img_pub = glance.images.get(img_id).is_public + val_img_cfmt = glance.images.get(img_id).container_format + val_img_dfmt = glance.images.get(img_id).disk_format + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' + 'container fmt:{} disk fmt:{}'.format( + val_img_name, val_img_pub, img_id, + val_img_stat, val_img_cfmt, val_img_dfmt)) + + if val_img_name == image_name and val_img_stat == 'active' \ + and val_img_pub is True and val_img_cfmt == 'bare' \ + and val_img_dfmt == 'qcow2': + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) + return self.delete_resource(glance.images, image, msg='glance image') + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) + image = nova.images.find(name=image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) + return self.delete_resource(nova.servers, instance, + msg='nova instance') + + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, + img_id=None, src_vol_id=None, snap_id=None): + """Create cinder volume, optionally from a glance image, OR + optionally as a clone of an existing volume, OR optionally + from a snapshot. Wait for the new volume status to reach + the expected status, validate and return a resource pointer. + + :param vol_name: cinder volume display name + :param vol_size: size in gigabytes + :param img_id: optional glance image id + :param src_vol_id: optional source volume id to clone + :param snap_id: optional snapshot id to use + :returns: cinder volume pointer + """ + # Handle parameter input and avoid impossible combinations + if img_id and not src_vol_id and not snap_id: + # Create volume from image + self.log.debug('Creating cinder volume from glance image...') + bootable = 'true' + elif src_vol_id and not img_id and not snap_id: + # Clone an existing volume + self.log.debug('Cloning cinder volume...') + bootable = cinder.volumes.get(src_vol_id).bootable + elif snap_id and not src_vol_id and not img_id: + # Create volume from snapshot + self.log.debug('Creating cinder volume from snapshot...') + snap = cinder.volume_snapshots.find(id=snap_id) + vol_size = snap.size + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id + bootable = cinder.volumes.get(snap_vol_id).bootable + elif not img_id and not src_vol_id and not snap_id: + # Create volume + self.log.debug('Creating cinder volume...') + bootable = 'false' + else: + # Impossible combination of parameters + msg = ('Invalid method use - name:{} size:{} img_id:{} ' + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, + img_id, src_vol_id, + snap_id)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create new volume + try: + vol_new = cinder.volumes.create(display_name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except Exception as e: + msg = 'Failed to create volume: {}'.format(e) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Wait for volume to reach available status + ret = self.resource_reaches_status(cinder.volumes, vol_id, + expected_stat="available", + msg="Volume status wait") + if not ret: + msg = 'Cinder volume failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new volume + self.log.debug('Validating volume attributes...') + val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_boot = cinder.volumes.get(vol_id).bootable + val_vol_stat = cinder.volumes.get(vol_id).status + val_vol_size = cinder.volumes.get(vol_id).size + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' + '{} size:{}'.format(val_vol_name, vol_id, + val_vol_stat, val_vol_boot, + val_vol_size)) + + if val_vol_boot == bootable and val_vol_stat == 'available' \ + and val_vol_name == vol_name and val_vol_size == vol_size: + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return vol_new + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + self.log.debug('Deleting OpenStack resource ' + '{} ({})'.format(resource_id, msg)) + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) + return False + + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False + + def get_ceph_osd_id_cmd(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return ("`initctl list | grep 'ceph-osd ' | " + "awk 'NR=={} {{ print $2 }}' | " + "grep -o '[0-9]*'`".format(index + 1)) + + def get_ceph_pools(self, sentry_unit): + """Return a dict of ceph pools from a single ceph unit, with + pool name as keys, pool id as vals.""" + pools = {} + cmd = 'sudo ceph osd lspools' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, + for pool in str(output).split(','): + pool_id_name = pool.split(' ') + if len(pool_id_name) == 2: + pool_id = pool_id_name[0] + pool_name = pool_id_name[1] + pools[pool_name] = int(pool_id) + + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], + pools)) + return pools + + def get_ceph_df(self, sentry_unit): + """Return dict of ceph df json output, including ceph pool state. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :returns: Dict of ceph df output + """ + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return json.loads(output) + + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param pool_id: Ceph pool ID + :returns: List of pool name, object count, kb disk space used + """ + df = self.get_ceph_df(sentry_unit) + pool_name = df['pools'][pool_id]['name'] + obj_count = df['pools'][pool_id]['stats']['objects'] + kb_used = df['pools'][pool_id]['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) + return pool_name, obj_count, kb_used + + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes. The + 2nd element is expected to be greater than the 1st; 3rd is expected + to be less than the 2nd. + + :param samples: List containing 3 data samples + :param sample_type: String for logging and usage context + :returns: None if successful, Failure message otherwise + """ + original, created, deleted = range(3) + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + return ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + else: + self.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None + + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.server_properties['product'] == 'RabbitMQ' + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py new file mode 100644 index 00000000..5faa7eda --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -0,0 +1,1505 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import glob +import json +import os +import re +import time +from base64 import b64decode +from subprocess import check_call, CalledProcessError + +import six + +from charmhelpers.fetch import ( + apt_install, + filter_installed_packages, +) +from charmhelpers.core.hookenv import ( + config, + is_relation_made, + local_unit, + log, + relation_get, + relation_ids, + related_units, + relation_set, + unit_get, + unit_private_ip, + charm_name, + DEBUG, + INFO, + WARNING, + ERROR, + status_set, +) + +from charmhelpers.core.sysctl import create as sysctl_create +from charmhelpers.core.strutils import bool_from_string +from charmhelpers.contrib.openstack.exceptions import OSContextError + +from charmhelpers.core.host import ( + get_bond_master, + is_phy_iface, + list_nics, + get_nic_hwaddr, + mkdir, + write_file, + pwgen, +) +from charmhelpers.contrib.hahelpers.cluster import ( + determine_apache_port, + determine_api_port, + https, + is_clustered, +) +from charmhelpers.contrib.hahelpers.apache import ( + get_cert, + get_ca_cert, + install_ca_cert, +) +from charmhelpers.contrib.openstack.neutron import ( + neutron_plugin_attribute, + parse_data_port_mappings, +) +from charmhelpers.contrib.openstack.ip import ( + resolve_address, + INTERNAL, +) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_ipv4_addr, + get_ipv6_addr, + get_netmask_for_address, + format_ipv6_addr, + is_address_in_network, + is_bridge_member, +) +from charmhelpers.contrib.openstack.utils import ( + config_flags_parser, + get_host_ip, +) +from charmhelpers.core.unitdata import kv + +try: + import psutil +except ImportError: + apt_install('python-psutil', fatal=True) + import psutil + +CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' +ADDRESS_TYPES = ['admin', 'internal', 'public'] + + +def ensure_packages(packages): + """Install but do not upgrade required plugin packages.""" + required = filter_installed_packages(packages) + if required: + apt_install(required, fatal=True) + + +def context_complete(ctxt): + _missing = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + _missing.append(k) + + if _missing: + log('Missing required data: %s' % ' '.join(_missing), level=INFO) + return False + + return True + + +class OSContextGenerator(object): + """Base class for all context generators.""" + interfaces = [] + related = False + complete = False + missing_data = [] + + def __call__(self): + raise NotImplementedError + + def context_complete(self, ctxt): + """Check for missing data for the required context data. + Set self.missing_data if it exists and return False. + Set self.complete if no missing data and return True. + """ + # Fresh start + self.complete = False + self.missing_data = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + if k not in self.missing_data: + self.missing_data.append(k) + + if self.missing_data: + self.complete = False + log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO) + else: + self.complete = True + return self.complete + + def get_related(self): + """Check if any of the context interfaces have relation ids. + Set self.related and return True if one of the interfaces + has relation ids. + """ + # Fresh start + self.related = False + try: + for interface in self.interfaces: + if relation_ids(interface): + self.related = True + return self.related + except AttributeError as e: + log("{} {}" + "".format(self, e), 'INFO') + return self.related + + +class SharedDBContext(OSContextGenerator): + interfaces = ['shared-db'] + + def __init__(self, + database=None, user=None, relation_prefix=None, ssl_dir=None): + """Allows inspecting relation for settings prefixed with + relation_prefix. This is useful for parsing access for multiple + databases returned via the shared-db interface (eg, nova_password, + quantum_password) + """ + self.relation_prefix = relation_prefix + self.database = database + self.user = user + self.ssl_dir = ssl_dir + self.rel_name = self.interfaces[0] + + def __call__(self): + self.database = self.database or config('database') + self.user = self.user or config('database-user') + if None in [self.database, self.user]: + log("Could not generate shared_db context. Missing required charm " + "config options. (database name and user)", level=ERROR) + raise OSContextError + + ctxt = {} + + # NOTE(jamespage) if mysql charm provides a network upon which + # access to the database should be made, reconfigure relation + # with the service units local address and defer execution + access_network = relation_get('access-network') + if access_network is not None: + if self.relation_prefix is not None: + hostname_key = "{}_hostname".format(self.relation_prefix) + else: + hostname_key = "hostname" + access_hostname = get_address_in_network(access_network, + unit_get('private-address')) + set_hostname = relation_get(attribute=hostname_key, + unit=local_unit()) + if set_hostname != access_hostname: + relation_set(relation_settings={hostname_key: access_hostname}) + return None # Defer any further hook execution for now.... + + password_setting = 'password' + if self.relation_prefix: + password_setting = self.relation_prefix + '_password' + + for rid in relation_ids(self.interfaces[0]): + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + host = rdata.get('db_host') + host = format_ipv6_addr(host) or host + ctxt = { + 'database_host': host, + 'database': self.database, + 'database_user': self.user, + 'database_password': rdata.get(password_setting), + 'database_type': 'mysql' + } + if self.context_complete(ctxt): + db_ssl(rdata, ctxt, self.ssl_dir) + return ctxt + return {} + + +class PostgresqlDBContext(OSContextGenerator): + interfaces = ['pgsql-db'] + + def __init__(self, database=None): + self.database = database + + def __call__(self): + self.database = self.database or config('database') + if self.database is None: + log('Could not generate postgresql_db context. Missing required ' + 'charm config options. (database name)', level=ERROR) + raise OSContextError + + ctxt = {} + for rid in relation_ids(self.interfaces[0]): + self.related = True + for unit in related_units(rid): + rel_host = relation_get('host', rid=rid, unit=unit) + rel_user = relation_get('user', rid=rid, unit=unit) + rel_passwd = relation_get('password', rid=rid, unit=unit) + ctxt = {'database_host': rel_host, + 'database': self.database, + 'database_user': rel_user, + 'database_password': rel_passwd, + 'database_type': 'postgresql'} + if self.context_complete(ctxt): + return ctxt + + return {} + + +def db_ssl(rdata, ctxt, ssl_dir): + if 'ssl_ca' in rdata and ssl_dir: + ca_path = os.path.join(ssl_dir, 'db-client.ca') + with open(ca_path, 'w') as fh: + fh.write(b64decode(rdata['ssl_ca'])) + + ctxt['database_ssl_ca'] = ca_path + elif 'ssl_ca' in rdata: + log("Charm not setup for ssl support but ssl ca found", level=INFO) + return ctxt + + if 'ssl_cert' in rdata: + cert_path = os.path.join( + ssl_dir, 'db-client.cert') + if not os.path.exists(cert_path): + log("Waiting 1m for ssl client cert validity", level=INFO) + time.sleep(60) + + with open(cert_path, 'w') as fh: + fh.write(b64decode(rdata['ssl_cert'])) + + ctxt['database_ssl_cert'] = cert_path + key_path = os.path.join(ssl_dir, 'db-client.key') + with open(key_path, 'w') as fh: + fh.write(b64decode(rdata['ssl_key'])) + + ctxt['database_ssl_key'] = key_path + + return ctxt + + +class IdentityServiceContext(OSContextGenerator): + + def __init__(self, service=None, service_user=None, rel_name='identity-service'): + self.service = service + self.service_user = service_user + self.rel_name = rel_name + self.interfaces = [self.rel_name] + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + if self.service and self.service_user: + # This is required for pki token signing if we don't want /tmp to + # be used. + cachedir = '/var/cache/%s' % (self.service) + if not os.path.isdir(cachedir): + log("Creating service cache dir %s" % (cachedir), level=DEBUG) + mkdir(path=cachedir, owner=self.service_user, + group=self.service_user, perms=0o700) + + ctxt['signing_dir'] = cachedir + + for rid in relation_ids(self.rel_name): + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + serv_host = rdata.get('service_host') + serv_host = format_ipv6_addr(serv_host) or serv_host + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host + svc_protocol = rdata.get('service_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + api_version = rdata.get('api_version') or '2.0' + ctxt.update({'service_port': rdata.get('service_port'), + 'service_host': serv_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('service_tenant'), + 'admin_user': rdata.get('service_username'), + 'admin_password': rdata.get('service_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol, + 'api_version': api_version}) + + if self.context_complete(ctxt): + # NOTE(jamespage) this is required for >= icehouse + # so a missing value just indicates keystone needs + # upgrading + ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') + return ctxt + + return {} + + +class AMQPContext(OSContextGenerator): + + def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): + self.ssl_dir = ssl_dir + self.rel_name = rel_name + self.relation_prefix = relation_prefix + self.interfaces = [rel_name] + + def __call__(self): + log('Generating template context for amqp', level=DEBUG) + conf = config() + if self.relation_prefix: + user_setting = '%s-rabbit-user' % (self.relation_prefix) + vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) + else: + user_setting = 'rabbit-user' + vhost_setting = 'rabbit-vhost' + + try: + username = conf[user_setting] + vhost = conf[vhost_setting] + except KeyError as e: + log('Could not generate shared_db context. Missing required charm ' + 'config options: %s.' % e, level=ERROR) + raise OSContextError + + ctxt = {} + for rid in relation_ids(self.rel_name): + ha_vip_only = False + self.related = True + for unit in related_units(rid): + if relation_get('clustered', rid=rid, unit=unit): + ctxt['clustered'] = True + vip = relation_get('vip', rid=rid, unit=unit) + vip = format_ipv6_addr(vip) or vip + ctxt['rabbitmq_host'] = vip + else: + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + ctxt['rabbitmq_host'] = host + + ctxt.update({ + 'rabbitmq_user': username, + 'rabbitmq_password': relation_get('password', rid=rid, + unit=unit), + 'rabbitmq_virtual_host': vhost, + }) + + ssl_port = relation_get('ssl_port', rid=rid, unit=unit) + if ssl_port: + ctxt['rabbit_ssl_port'] = ssl_port + + ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) + if ssl_ca: + ctxt['rabbit_ssl_ca'] = ssl_ca + + if relation_get('ha_queues', rid=rid, unit=unit) is not None: + ctxt['rabbitmq_ha_queues'] = True + + ha_vip_only = relation_get('ha-vip-only', + rid=rid, unit=unit) is not None + + if self.context_complete(ctxt): + if 'rabbit_ssl_ca' in ctxt: + if not self.ssl_dir: + log("Charm not setup for ssl support but ssl ca " + "found", level=INFO) + break + + ca_path = os.path.join( + self.ssl_dir, 'rabbit-client-ca.pem') + with open(ca_path, 'w') as fh: + fh.write(b64decode(ctxt['rabbit_ssl_ca'])) + ctxt['rabbit_ssl_ca'] = ca_path + + # Sufficient information found = break out! + break + + # Used for active/active rabbitmq >= grizzly + if (('clustered' not in ctxt or ha_vip_only) and + len(related_units(rid)) > 1): + rabbitmq_hosts = [] + for unit in related_units(rid): + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + rabbitmq_hosts.append(host) + + ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + + oslo_messaging_flags = conf.get('oslo-messaging-flags', None) + if oslo_messaging_flags: + ctxt['oslo_messaging_flags'] = config_flags_parser( + oslo_messaging_flags) + + if not self.complete: + return {} + + return ctxt + + +class CephContext(OSContextGenerator): + """Generates context for /etc/ceph/ceph.conf templates.""" + interfaces = ['ceph'] + + def __call__(self): + if not relation_ids('ceph'): + return {} + + log('Generating template context for ceph', level=DEBUG) + mon_hosts = [] + ctxt = { + 'use_syslog': str(config('use-syslog')).lower() + } + for rid in relation_ids('ceph'): + for unit in related_units(rid): + if not ctxt.get('auth'): + ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) + if not ctxt.get('key'): + ctxt['key'] = relation_get('key', rid=rid, unit=unit) + ceph_pub_addr = relation_get('ceph-public-address', rid=rid, + unit=unit) + unit_priv_addr = relation_get('private-address', rid=rid, + unit=unit) + ceph_addr = ceph_pub_addr or unit_priv_addr + ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr + mon_hosts.append(ceph_addr) + + ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) + + if not os.path.isdir('/etc/ceph'): + os.mkdir('/etc/ceph') + + if not self.context_complete(ctxt): + return {} + + ensure_packages(['ceph-common']) + return ctxt + + +class HAProxyContext(OSContextGenerator): + """Provides half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + """ + interfaces = ['cluster'] + + def __init__(self, singlenode_mode=False): + self.singlenode_mode = singlenode_mode + + def __call__(self): + if not relation_ids('cluster') and not self.singlenode_mode: + return {} + + if config('prefer-ipv6'): + addr = get_ipv6_addr(exc_list=[config('vip')])[0] + else: + addr = get_host_ip(unit_get('private-address')) + + l_unit = local_unit().replace('/', '-') + cluster_hosts = {} + + # NOTE(jamespage): build out map of configured network endpoints + # and associated backends + for addr_type in ADDRESS_TYPES: + cfg_opt = 'os-{}-network'.format(addr_type) + laddr = get_address_in_network(config(cfg_opt)) + if laddr: + netmask = get_netmask_for_address(laddr) + cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, + netmask), + 'backends': {l_unit: laddr}} + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _laddr = relation_get('{}-address'.format(addr_type), + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[laddr]['backends'][_unit] = _laddr + + # NOTE(jamespage) add backend based on private address - this + # with either be the only backend or the fallback if no acls + # match in the frontend + cluster_hosts[addr] = {} + netmask = get_netmask_for_address(addr) + cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), + 'backends': {l_unit: addr}} + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _laddr = relation_get('private-address', + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[addr]['backends'][_unit] = _laddr + + ctxt = { + 'frontends': cluster_hosts, + 'default_backend': addr + } + + if config('haproxy-server-timeout'): + ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') + + if config('haproxy-client-timeout'): + ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') + + if config('haproxy-queue-timeout'): + ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout') + + if config('haproxy-connect-timeout'): + ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') + + if config('prefer-ipv6'): + ctxt['ipv6'] = True + ctxt['local_host'] = 'ip6-localhost' + ctxt['haproxy_host'] = '::' + else: + ctxt['local_host'] = '127.0.0.1' + ctxt['haproxy_host'] = '0.0.0.0' + + ctxt['stat_port'] = '8888' + + db = kv() + ctxt['stat_password'] = db.get('stat-password') + if not ctxt['stat_password']: + ctxt['stat_password'] = db.set('stat-password', + pwgen(32)) + db.flush() + + for frontend in cluster_hosts: + if (len(cluster_hosts[frontend]['backends']) > 1 or + self.singlenode_mode): + # Enable haproxy when we have enough peers. + log('Ensuring haproxy enabled in /etc/default/haproxy.', + level=DEBUG) + with open('/etc/default/haproxy', 'w') as out: + out.write('ENABLED=1\n') + + return ctxt + + log('HAProxy context is incomplete, this unit has no peers.', + level=INFO) + return {} + + +class ImageServiceContext(OSContextGenerator): + interfaces = ['image-service'] + + def __call__(self): + """Obtains the glance API server from the image-service relation. + Useful in nova and cinder (currently). + """ + log('Generating template context for image-service.', level=DEBUG) + rids = relation_ids('image-service') + if not rids: + return {} + + for rid in rids: + for unit in related_units(rid): + api_server = relation_get('glance-api-server', + rid=rid, unit=unit) + if api_server: + return {'glance_api_servers': api_server} + + log("ImageService context is incomplete. Missing required relation " + "data.", level=INFO) + return {} + + +class ApacheSSLContext(OSContextGenerator): + """Generates a context for an apache vhost configuration that configures + HTTPS reverse proxying for one or many endpoints. Generated context + looks something like:: + + { + 'namespace': 'cinder', + 'private_address': 'iscsi.mycinderhost.com', + 'endpoints': [(8776, 8766), (8777, 8767)] + } + + The endpoints list consists of a tuples mapping external ports + to internal ports. + """ + interfaces = ['https'] + + # charms should inherit this context and set external ports + # and service namespace accordingly. + external_ports = [] + service_namespace = None + + def enable_modules(self): + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] + check_call(cmd) + + def configure_cert(self, cn=None): + ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) + mkdir(path=ssl_dir) + cert, key = get_cert(cn) + if cn: + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + else: + cert_filename = 'cert' + key_filename = 'key' + + write_file(path=os.path.join(ssl_dir, cert_filename), + content=b64decode(cert)) + write_file(path=os.path.join(ssl_dir, key_filename), + content=b64decode(key)) + + def configure_ca(self): + ca_cert = get_ca_cert() + if ca_cert: + install_ca_cert(b64decode(ca_cert)) + + def canonical_names(self): + """Figure out which canonical names clients will access this service. + """ + cns = [] + for r_id in relation_ids('identity-service'): + for unit in related_units(r_id): + rdata = relation_get(rid=r_id, unit=unit) + for k in rdata: + if k.startswith('ssl_key_'): + cns.append(k.lstrip('ssl_key_')) + + return sorted(list(set(cns))) + + def get_network_addresses(self): + """For each network configured, return corresponding address and vip + (if available). + + Returns a list of tuples of the form: + + [(address_in_net_a, vip_in_net_a), + (address_in_net_b, vip_in_net_b), + ...] + + or, if no vip(s) available: + + [(address_in_net_a, address_in_net_a), + (address_in_net_b, address_in_net_b), + ...] + """ + addresses = [] + if config('vip'): + vips = config('vip').split() + else: + vips = [] + + for net_type in ['os-internal-network', 'os-admin-network', + 'os-public-network']: + addr = get_address_in_network(config(net_type), + unit_get('private-address')) + if len(vips) > 1 and is_clustered(): + if not config(net_type): + log("Multiple networks configured but net_type " + "is None (%s)." % net_type, level=WARNING) + continue + + for vip in vips: + if is_address_in_network(config(net_type), vip): + addresses.append((addr, vip)) + break + + elif is_clustered() and config('vip'): + addresses.append((addr, config('vip'))) + else: + addresses.append((addr, addr)) + + return sorted(addresses) + + def __call__(self): + if isinstance(self.external_ports, six.string_types): + self.external_ports = [self.external_ports] + + if not self.external_ports or not https(): + return {} + + self.configure_ca() + self.enable_modules() + + ctxt = {'namespace': self.service_namespace, + 'endpoints': [], + 'ext_ports': []} + + cns = self.canonical_names() + if cns: + for cn in cns: + self.configure_cert(cn) + else: + # Expect cert/key provided in config (currently assumed that ca + # uses ip for cn) + cn = resolve_address(endpoint_type=INTERNAL) + self.configure_cert(cn) + + addresses = self.get_network_addresses() + for address, endpoint in sorted(set(addresses)): + for api_port in self.external_ports: + ext_port = determine_apache_port(api_port, + singlenode_mode=True) + int_port = determine_api_port(api_port, singlenode_mode=True) + portmap = (address, endpoint, int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + ctxt['ext_ports'].append(int(ext_port)) + + ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) + return ctxt + + +class NeutronContext(OSContextGenerator): + interfaces = [] + + @property + def plugin(self): + return None + + @property + def network_manager(self): + return None + + @property + def packages(self): + return neutron_plugin_attribute(self.plugin, 'packages', + self.network_manager) + + @property + def neutron_security_groups(self): + return None + + def _ensure_packages(self): + for pkgs in self.packages: + ensure_packages(pkgs) + + def _save_flag_file(self): + if self.network_manager == 'quantum': + _file = '/etc/nova/quantum_plugin.conf' + else: + _file = '/etc/nova/neutron_plugin.conf' + + with open(_file, 'wb') as out: + out.write(self.plugin + '\n') + + def ovs_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'ovs', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return ovs_ctxt + + def nuage_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + nuage_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'vsp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return nuage_ctxt + + def nvp_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + nvp_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'nvp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return nvp_ctxt + + def n1kv_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + n1kv_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + n1kv_user_config_flags = config('n1kv-config-flags') + restrict_policy_profiles = config('n1kv-restrict-policy-profiles') + n1kv_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'n1kv', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': n1kv_config, + 'vsm_ip': config('n1kv-vsm-ip'), + 'vsm_username': config('n1kv-vsm-username'), + 'vsm_password': config('n1kv-vsm-password'), + 'restrict_policy_profiles': restrict_policy_profiles} + + if n1kv_user_config_flags: + flags = config_flags_parser(n1kv_user_config_flags) + n1kv_ctxt['user_config_flags'] = flags + + return n1kv_ctxt + + def calico_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + calico_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'Calico', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return calico_ctxt + + def neutron_ctxt(self): + if https(): + proto = 'https' + else: + proto = 'http' + + if is_clustered(): + host = config('vip') + else: + host = unit_get('private-address') + + ctxt = {'network_manager': self.network_manager, + 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} + return ctxt + + def pg_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'plumgrid', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + return ovs_ctxt + + def midonet_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + midonet_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + mido_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'midonet', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': midonet_config} + + return mido_ctxt + + def __call__(self): + if self.network_manager not in ['quantum', 'neutron']: + return {} + + if not self.plugin: + return {} + + ctxt = self.neutron_ctxt() + + if self.plugin == 'ovs': + ctxt.update(self.ovs_ctxt()) + elif self.plugin in ['nvp', 'nsx']: + ctxt.update(self.nvp_ctxt()) + elif self.plugin == 'n1kv': + ctxt.update(self.n1kv_ctxt()) + elif self.plugin == 'Calico': + ctxt.update(self.calico_ctxt()) + elif self.plugin == 'vsp': + ctxt.update(self.nuage_ctxt()) + elif self.plugin == 'plumgrid': + ctxt.update(self.pg_ctxt()) + elif self.plugin == 'midonet': + ctxt.update(self.midonet_ctxt()) + + alchemy_flags = config('neutron-alchemy-flags') + if alchemy_flags: + flags = config_flags_parser(alchemy_flags) + ctxt['neutron_alchemy_flags'] = flags + + self._save_flag_file() + return ctxt + + +class NeutronPortContext(OSContextGenerator): + + def resolve_ports(self, ports): + """Resolve NICs not yet bound to bridge(s) + + If hwaddress provided then returns resolved hwaddress otherwise NIC. + """ + if not ports: + return None + + hwaddr_to_nic = {} + hwaddr_to_ip = {} + for nic in list_nics(): + # Ignore virtual interfaces (bond masters will be identified from + # their slaves) + if not is_phy_iface(nic): + continue + + _nic = get_bond_master(nic) + if _nic: + log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), + level=DEBUG) + nic = _nic + + hwaddr = get_nic_hwaddr(nic) + hwaddr_to_nic[hwaddr] = nic + addresses = get_ipv4_addr(nic, fatal=False) + addresses += get_ipv6_addr(iface=nic, fatal=False) + hwaddr_to_ip[hwaddr] = addresses + + resolved = [] + mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) + for entry in ports: + if re.match(mac_regex, entry): + # NIC is in known NICs and does NOT hace an IP address + if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: + # If the nic is part of a bridge then don't use it + if is_bridge_member(hwaddr_to_nic[entry]): + continue + + # Entry is a MAC address for a valid interface that doesn't + # have an IP address assigned yet. + resolved.append(hwaddr_to_nic[entry]) + else: + # If the passed entry is not a MAC address, assume it's a valid + # interface, and that the user put it there on purpose (we can + # trust it to be the real external network). + resolved.append(entry) + + # Ensure no duplicates + return list(set(resolved)) + + +class OSConfigFlagContext(OSContextGenerator): + """Provides support for user-defined config flags. + + Users can define a comma-seperated list of key=value pairs + in the charm configuration and apply them at any point in + any file by using a template flag. + + Sometimes users might want config flags inserted within a + specific section so this class allows users to specify the + template flag name, allowing for multiple template flags + (sections) within the same context. + + NOTE: the value of config-flags may be a comma-separated list of + key=value pairs and some Openstack config files support + comma-separated lists as values. + """ + + def __init__(self, charm_flag='config-flags', + template_flag='user_config_flags'): + """ + :param charm_flag: config flags in charm configuration. + :param template_flag: insert point for user-defined flags in template + file. + """ + super(OSConfigFlagContext, self).__init__() + self._charm_flag = charm_flag + self._template_flag = template_flag + + def __call__(self): + config_flags = config(self._charm_flag) + if not config_flags: + return {} + + return {self._template_flag: + config_flags_parser(config_flags)} + + +class LibvirtConfigFlagsContext(OSContextGenerator): + """ + This context provides support for extending + the libvirt section through user-defined flags. + """ + def __call__(self): + ctxt = {} + libvirt_flags = config('libvirt-flags') + if libvirt_flags: + ctxt['libvirt_flags'] = config_flags_parser( + libvirt_flags) + return ctxt + + +class SubordinateConfigContext(OSContextGenerator): + + """ + Responsible for inspecting relations to subordinates that + may be exporting required config via a json blob. + + The subordinate interface allows subordinates to export their + configuration requirements to the principle for multiple config + files and multiple serivces. Ie, a subordinate that has interfaces + to both glance and nova may export to following yaml blob as json:: + + glance: + /etc/glance/glance-api.conf: + sections: + DEFAULT: + - [key1, value1] + /etc/glance/glance-registry.conf: + MYSECTION: + - [key2, value2] + nova: + /etc/nova/nova.conf: + sections: + DEFAULT: + - [key3, value3] + + + It is then up to the principle charms to subscribe this context to + the service+config file it is interestd in. Configuration data will + be available in the template context, in glance's case, as:: + + ctxt = { + ... other context ... + 'subordinate_configuration': { + 'DEFAULT': { + 'key1': 'value1', + }, + 'MYSECTION': { + 'key2': 'value2', + }, + } + } + """ + + def __init__(self, service, config_file, interface): + """ + :param service : Service name key to query in any subordinate + data found + :param config_file : Service's config file to query sections + :param interface : Subordinate interface to inspect + """ + self.config_file = config_file + if isinstance(service, list): + self.services = service + else: + self.services = [service] + if isinstance(interface, list): + self.interfaces = interface + else: + self.interfaces = [interface] + + def __call__(self): + ctxt = {'sections': {}} + rids = [] + for interface in self.interfaces: + rids.extend(relation_ids(interface)) + for rid in rids: + for unit in related_units(rid): + sub_config = relation_get('subordinate_configuration', + rid=rid, unit=unit) + if sub_config and sub_config != '': + try: + sub_config = json.loads(sub_config) + except: + log('Could not parse JSON from ' + 'subordinate_configuration setting from %s' + % rid, level=ERROR) + continue + + for service in self.services: + if service not in sub_config: + log('Found subordinate_configuration on %s but it ' + 'contained nothing for %s service' + % (rid, service), level=INFO) + continue + + sub_config = sub_config[service] + if self.config_file not in sub_config: + log('Found subordinate_configuration on %s but it ' + 'contained nothing for %s' + % (rid, self.config_file), level=INFO) + continue + + sub_config = sub_config[self.config_file] + for k, v in six.iteritems(sub_config): + if k == 'sections': + for section, config_list in six.iteritems(v): + log("adding section '%s'" % (section), + level=DEBUG) + if ctxt[k].get(section): + ctxt[k][section].extend(config_list) + else: + ctxt[k][section] = config_list + else: + ctxt[k] = v + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) + return ctxt + + +class LogLevelContext(OSContextGenerator): + + def __call__(self): + ctxt = {} + ctxt['debug'] = \ + False if config('debug') is None else config('debug') + ctxt['verbose'] = \ + False if config('verbose') is None else config('verbose') + + return ctxt + + +class SyslogContext(OSContextGenerator): + + def __call__(self): + ctxt = {'use_syslog': config('use-syslog')} + return ctxt + + +class BindHostContext(OSContextGenerator): + + def __call__(self): + if config('prefer-ipv6'): + return {'bind_host': '::'} + else: + return {'bind_host': '0.0.0.0'} + + +class WorkerConfigContext(OSContextGenerator): + + @property + def num_cpus(self): + # NOTE: use cpu_count if present (16.04 support) + if hasattr(psutil, 'cpu_count'): + return psutil.cpu_count() + else: + return psutil.NUM_CPUS + + def __call__(self): + multiplier = config('worker-multiplier') or 0 + ctxt = {"workers": self.num_cpus * multiplier} + return ctxt + + +class ZeroMQContext(OSContextGenerator): + interfaces = ['zeromq-configuration'] + + def __call__(self): + ctxt = {} + if is_relation_made('zeromq-configuration', 'host'): + for rid in relation_ids('zeromq-configuration'): + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + ctxt['zmq_redis_address'] = relation_get( + 'zmq_redis_address', unit, rid) + + return ctxt + + +class NotificationDriverContext(OSContextGenerator): + + def __init__(self, zmq_relation='zeromq-configuration', + amqp_relation='amqp'): + """ + :param zmq_relation: Name of Zeromq relation to check + """ + self.zmq_relation = zmq_relation + self.amqp_relation = amqp_relation + + def __call__(self): + ctxt = {'notifications': 'False'} + if is_relation_made(self.amqp_relation): + ctxt['notifications'] = "True" + + return ctxt + + +class SysctlContext(OSContextGenerator): + """This context check if the 'sysctl' option exists on configuration + then creates a file with the loaded contents""" + def __call__(self): + sysctl_dict = config('sysctl') + if sysctl_dict: + sysctl_create(sysctl_dict, + '/etc/sysctl.d/50-{0}.conf'.format(charm_name())) + return {'sysctl': sysctl_dict} + + +class NeutronAPIContext(OSContextGenerator): + ''' + Inspects current neutron-plugin-api relation for neutron settings. Return + defaults if it is not present. + ''' + interfaces = ['neutron-plugin-api'] + + def __call__(self): + self.neutron_defaults = { + 'l2_population': { + 'rel_key': 'l2-population', + 'default': False, + }, + 'overlay_network_type': { + 'rel_key': 'overlay-network-type', + 'default': 'gre', + }, + 'neutron_security_groups': { + 'rel_key': 'neutron-security-groups', + 'default': False, + }, + 'network_device_mtu': { + 'rel_key': 'network-device-mtu', + 'default': None, + }, + 'enable_dvr': { + 'rel_key': 'enable-dvr', + 'default': False, + }, + 'enable_l3ha': { + 'rel_key': 'enable-l3ha', + 'default': False, + }, + } + ctxt = self.get_neutron_options({}) + for rid in relation_ids('neutron-plugin-api'): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if 'l2-population' in rdata: + ctxt.update(self.get_neutron_options(rdata)) + + return ctxt + + def get_neutron_options(self, rdata): + settings = {} + for nkey in self.neutron_defaults.keys(): + defv = self.neutron_defaults[nkey]['default'] + rkey = self.neutron_defaults[nkey]['rel_key'] + if rkey in rdata.keys(): + if type(defv) is bool: + settings[nkey] = bool_from_string(rdata[rkey]) + else: + settings[nkey] = rdata[rkey] + else: + settings[nkey] = defv + return settings + + +class ExternalPortContext(NeutronPortContext): + + def __call__(self): + ctxt = {} + ports = config('ext-port') + if ports: + ports = [p.strip() for p in ports.split()] + ports = self.resolve_ports(ports) + if ports: + ctxt = {"ext_port": ports[0]} + napi_settings = NeutronAPIContext()() + mtu = napi_settings.get('network_device_mtu') + if mtu: + ctxt['ext_port_mtu'] = mtu + + return ctxt + + +class DataPortContext(NeutronPortContext): + + def __call__(self): + ports = config('data-port') + if ports: + # Map of {port/mac:bridge} + portmap = parse_data_port_mappings(ports) + ports = portmap.keys() + # Resolve provided ports or mac addresses and filter out those + # already attached to a bridge. + resolved = self.resolve_ports(ports) + # FIXME: is this necessary? + normalized = {get_nic_hwaddr(port): port for port in resolved + if port not in ports} + normalized.update({port: port for port in resolved + if port in ports}) + if resolved: + return {normalized[port]: bridge for port, bridge in + six.iteritems(portmap) if port in normalized.keys()} + + return None + + +class PhyNICMTUContext(DataPortContext): + + def __call__(self): + ctxt = {} + mappings = super(PhyNICMTUContext, self).__call__() + if mappings and mappings.keys(): + ports = sorted(mappings.keys()) + napi_settings = NeutronAPIContext()() + mtu = napi_settings.get('network_device_mtu') + all_ports = set() + # If any of ports is a vlan device, its underlying device must have + # mtu applied first. + for port in ports: + for lport in glob.glob("/sys/class/net/%s/lower_*" % port): + lport = os.path.basename(lport) + all_ports.add(lport.split('_')[1]) + + all_ports = list(all_ports) + all_ports.extend(ports) + if mtu: + ctxt["devs"] = '\\n'.join(all_ports) + ctxt['mtu'] = mtu + + return ctxt + + +class NetworkServiceContext(OSContextGenerator): + + def __init__(self, rel_name='quantum-network-service'): + self.rel_name = rel_name + self.interfaces = [rel_name] + + def __call__(self): + for rid in relation_ids(self.rel_name): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + ctxt = { + 'keystone_host': rdata.get('keystone_host'), + 'service_port': rdata.get('service_port'), + 'auth_port': rdata.get('auth_port'), + 'service_tenant': rdata.get('service_tenant'), + 'service_username': rdata.get('service_username'), + 'service_password': rdata.get('service_password'), + 'quantum_host': rdata.get('quantum_host'), + 'quantum_port': rdata.get('quantum_port'), + 'quantum_url': rdata.get('quantum_url'), + 'region': rdata.get('region'), + 'service_protocol': + rdata.get('service_protocol') or 'http', + 'auth_protocol': + rdata.get('auth_protocol') or 'http', + 'api_version': + rdata.get('api_version') or '2.0', + } + if self.context_complete(ctxt): + return ctxt + return {} + + +class InternalEndpointContext(OSContextGenerator): + """Internal endpoint context. + + This context provides the endpoint type used for communication between + services e.g. between Nova and Cinder internally. Openstack uses Public + endpoints by default so this allows admins to optionally use internal + endpoints. + """ + def __call__(self): + return {'use_internal_endpoints': config('use-internal-endpoints')} + + +class AppArmorContext(OSContextGenerator): + """Base class for apparmor contexts.""" + + def __init__(self): + self._ctxt = None + self.aa_profile = None + self.aa_utils_packages = ['apparmor-utils'] + + @property + def ctxt(self): + if self._ctxt is not None: + return self._ctxt + self._ctxt = self._determine_ctxt() + return self._ctxt + + def _determine_ctxt(self): + """ + Validate aa-profile-mode settings is disable, enforce, or complain. + + :return ctxt: Dictionary of the apparmor profile or None + """ + if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: + ctxt = {'aa-profile-mode': config('aa-profile-mode')} + else: + ctxt = None + return ctxt + + def __call__(self): + return self.ctxt + + def install_aa_utils(self): + """ + Install packages required for apparmor configuration. + """ + log("Installing apparmor utils.") + ensure_packages(self.aa_utils_packages) + + def manually_disable_aa_profile(self): + """ + Manually disable an apparmor profile. + + If aa-profile-mode is set to disabled (default) this is required as the + template has been written but apparmor is yet unaware of the profile + and aa-disable aa-profile fails. Without this the profile would kick + into enforce mode on the next service restart. + + """ + profile_path = '/etc/apparmor.d' + disable_path = '/etc/apparmor.d/disable' + if not os.path.lexists(os.path.join(disable_path, self.aa_profile)): + os.symlink(os.path.join(profile_path, self.aa_profile), + os.path.join(disable_path, self.aa_profile)) + + def setup_aa_profile(self): + """ + Setup an apparmor profile. + The ctxt dictionary will contain the apparmor profile mode and + the apparmor profile name. + Makes calls out to aa-disable, aa-complain, or aa-enforce to setup + the apparmor profile. + """ + self() + if not self.ctxt: + log("Not enabling apparmor Profile") + return + self.install_aa_utils() + cmd = ['aa-{}'.format(self.ctxt['aa-profile-mode'])] + cmd.append(self.ctxt['aa-profile']) + log("Setting up the apparmor profile for {} in {} mode." + "".format(self.ctxt['aa-profile'], self.ctxt['aa-profile-mode'])) + try: + check_call(cmd) + except CalledProcessError as e: + # If aa-profile-mode is set to disabled (default) manual + # disabling is required as the template has been written but + # apparmor is yet unaware of the profile and aa-disable aa-profile + # fails. If aa-disable learns to read profile files first this can + # be removed. + if self.ctxt['aa-profile-mode'] == 'disable': + log("Manually disabling the apparmor profile for {}." + "".format(self.ctxt['aa-profile'])) + self.manually_disable_aa_profile() + return + status_set('blocked', "Apparmor profile {} failed to be set to {}." + "".format(self.ctxt['aa-profile'], + self.ctxt['aa-profile-mode'])) + raise e diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py new file mode 100644 index 00000000..ea4eb68e --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py @@ -0,0 +1,6 @@ +class OSContextError(Exception): + """Raised when an error occurs during context generation. + + This exception is principally used in contrib.openstack.context + """ + pass diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/files/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/files/__init__.py new file mode 100644 index 00000000..75876796 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/files/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py new file mode 100644 index 00000000..34064237 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -0,0 +1,111 @@ +# Copyright 2014-2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# +# Copyright 2016 Canonical Ltd. +# +# Authors: +# Openstack Charmers < +# + +""" +Helpers for high availability. +""" + +import re + +from charmhelpers.core.hookenv import ( + log, + relation_set, + charm_name, + config, + status_set, + DEBUG, +) + +from charmhelpers.contrib.openstack.ip import ( + resolve_address, +) + + +class DNSHAException(Exception): + """Raised when an error occurs setting up DNS HA + """ + + pass + + +def update_dns_ha_resource_params(resources, resource_params, + relation_id=None, + crm_ocf='ocf:maas:dns'): + """ Check for os-*-hostname settings and update resource dictionaries for + the HA relation. + + @param resources: Pointer to dictionary of resources. + Usually instantiated in ha_joined(). + @param resource_params: Pointer to dictionary of resource parameters. + Usually instantiated in ha_joined() + @param relation_id: Relation ID of the ha relation + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ + + settings = ['os-admin-hostname', 'os-internal-hostname', + 'os-public-hostname'] + + # Check which DNS settings are set and update dictionaries + hostname_group = [] + for setting in settings: + hostname = config(setting) + if hostname is None: + log('DNS HA: Hostname setting {} is None. Ignoring.' + ''.format(setting), + DEBUG) + continue + m = re.search('os-(.+?)-hostname', setting) + if m: + networkspace = m.group(1) + else: + msg = ('Unexpected DNS hostname setting: {}. ' + 'Cannot determine network space name' + ''.format(setting)) + status_set('blocked', msg) + raise DNSHAException(msg) + + hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace) + if hostname_key in hostname_group: + log('DNS HA: Resource {}: {} already exists in ' + 'hostname group - skipping'.format(hostname_key, hostname), + DEBUG) + continue + + hostname_group.append(hostname_key) + resources[hostname_key] = crm_ocf + resource_params[hostname_key] = ( + 'params fqdn="{}" ip_address="{}" ' + ''.format(hostname, resolve_address(endpoint_type=networkspace, + override=False))) + + if len(hostname_group) >= 1: + log('DNS HA: Hostname group is set with {} as members. ' + 'Informing the ha relation'.format(' '.join(hostname_group)), + DEBUG) + relation_set(relation_id=relation_id, groups={ + 'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)}) + else: + msg = 'DNS HA: Hostname group has no members.' + status_set('blocked', msg) + raise DNSHAException(msg) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py new file mode 100644 index 00000000..7875b997 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py @@ -0,0 +1,182 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + + +from charmhelpers.core.hookenv import ( + config, + unit_get, + service_name, + network_get_primary_address, +) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + is_address_in_network, + is_ipv6, + get_ipv6_addr, + resolve_network_cidr, +) +from charmhelpers.contrib.hahelpers.cluster import is_clustered + +PUBLIC = 'public' +INTERNAL = 'int' +ADMIN = 'admin' + +ADDRESS_MAP = { + PUBLIC: { + 'binding': 'public', + 'config': 'os-public-network', + 'fallback': 'public-address', + 'override': 'os-public-hostname', + }, + INTERNAL: { + 'binding': 'internal', + 'config': 'os-internal-network', + 'fallback': 'private-address', + 'override': 'os-internal-hostname', + }, + ADMIN: { + 'binding': 'admin', + 'config': 'os-admin-network', + 'fallback': 'private-address', + 'override': 'os-admin-hostname', + } +} + + +def canonical_url(configs, endpoint_type=PUBLIC): + """Returns the correct HTTP URL to this host given the state of HTTPS + configuration, hacluster and charm configuration. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :param endpoint_type: str endpoint type to resolve. + :param returns: str base URL for services on the current service unit. + """ + scheme = _get_scheme(configs) + + address = resolve_address(endpoint_type) + if is_ipv6(address): + address = "[{}]".format(address) + + return '%s://%s' % (scheme, address) + + +def _get_scheme(configs): + """Returns the scheme to use for the url (either http or https) + depending upon whether https is in the configs value. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :returns: either 'http' or 'https' depending on whether https is + configured within the configs context. + """ + scheme = 'http' + if configs and 'https' in configs.complete_contexts(): + scheme = 'https' + return scheme + + +def _get_address_override(endpoint_type=PUBLIC): + """Returns any address overrides that the user has defined based on the + endpoint type. + + Note: this function allows for the service name to be inserted into the + address if the user specifies {service_name}.somehost.org. + + :param endpoint_type: the type of endpoint to retrieve the override + value for. + :returns: any endpoint address or hostname that the user has overridden + or None if an override is not present. + """ + override_key = ADDRESS_MAP[endpoint_type]['override'] + addr_override = config(override_key) + if not addr_override: + return None + else: + return addr_override.format(service_name=service_name()) + + +def resolve_address(endpoint_type=PUBLIC, override=True): + """Return unit address depending on net config. + + If unit is clustered with vip(s) and has net splits defined, return vip on + correct network. If clustered with no nets defined, return primary vip. + + If not clustered, return unit address ensuring address is on configured net + split if one is configured, or a Juju 2.0 extra-binding has been used. + + :param endpoint_type: Network endpoing type + :param override: Accept hostname overrides or not + """ + resolved_address = None + if override: + resolved_address = _get_address_override(endpoint_type) + if resolved_address: + return resolved_address + + vips = config('vip') + if vips: + vips = vips.split() + + net_type = ADDRESS_MAP[endpoint_type]['config'] + net_addr = config(net_type) + net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] + binding = ADDRESS_MAP[endpoint_type]['binding'] + clustered = is_clustered() + + if clustered and vips: + if net_addr: + for vip in vips: + if is_address_in_network(net_addr, vip): + resolved_address = vip + break + else: + # NOTE: endeavour to check vips against network space + # bindings + try: + bound_cidr = resolve_network_cidr( + network_get_primary_address(binding) + ) + for vip in vips: + if is_address_in_network(bound_cidr, vip): + resolved_address = vip + break + except NotImplementedError: + # If no net-splits configured and no support for extra + # bindings/network spaces so we expect a single vip + resolved_address = vips[0] + else: + if config('prefer-ipv6'): + fallback_addr = get_ipv6_addr(exc_list=vips)[0] + else: + fallback_addr = unit_get(net_fallback) + + if net_addr: + resolved_address = get_address_in_network(net_addr, fallback_addr) + else: + # NOTE: only try to use extra bindings if legacy network + # configuration is not in use + try: + resolved_address = network_get_primary_address(binding) + except NotImplementedError: + resolved_address = fallback_addr + + if resolved_address is None: + raise ValueError("Unable to resolve a suitable IP address based on " + "charm state and configuration. (net_type=%s, " + "clustered=%s)" % (net_type, clustered)) + + return resolved_address diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py new file mode 100644 index 00000000..d057ea6e --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py @@ -0,0 +1,384 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# Various utilies for dealing with Neutron and the renaming from Quantum. + +import six +from subprocess import check_output + +from charmhelpers.core.hookenv import ( + config, + log, + ERROR, +) + +from charmhelpers.contrib.openstack.utils import os_release + + +def headers_package(): + """Ensures correct linux-headers for running kernel are installed, + for building DKMS package""" + kver = check_output(['uname', '-r']).decode('UTF-8').strip() + return 'linux-headers-%s' % kver + +QUANTUM_CONF_DIR = '/etc/quantum' + + +def kernel_version(): + """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ + kver = check_output(['uname', '-r']).decode('UTF-8').strip() + kver = kver.split('.') + return (int(kver[0]), int(kver[1])) + + +def determine_dkms_package(): + """ Determine which DKMS package should be used based on kernel version """ + # NOTE: 3.13 kernels have support for GRE and VXLAN native + if kernel_version() >= (3, 13): + return [] + else: + return [headers_package(), 'openvswitch-datapath-dkms'] + + +# legacy + + +def quantum_plugins(): + from charmhelpers.contrib.openstack import context + return { + 'ovs': { + 'config': '/etc/quantum/plugins/openvswitch/' + 'ovs_quantum_plugin.ini', + 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' + 'OVSQuantumPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=QUANTUM_CONF_DIR)], + 'services': ['quantum-plugin-openvswitch-agent'], + 'packages': [determine_dkms_package(), + ['quantum-plugin-openvswitch-agent']], + 'server_packages': ['quantum-server', + 'quantum-plugin-openvswitch'], + 'server_services': ['quantum-server'] + }, + 'nvp': { + 'config': '/etc/quantum/plugins/nicira/nvp.ini', + 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' + 'QuantumPlugin.NvpPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=QUANTUM_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['quantum-server', + 'quantum-plugin-nicira'], + 'server_services': ['quantum-server'] + } + } + +NEUTRON_CONF_DIR = '/etc/neutron' + + +def neutron_plugins(): + from charmhelpers.contrib.openstack import context + release = os_release('nova-common') + plugins = { + 'ovs': { + 'config': '/etc/neutron/plugins/openvswitch/' + 'ovs_neutron_plugin.ini', + 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' + 'OVSNeutronPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': ['neutron-plugin-openvswitch-agent'], + 'packages': [determine_dkms_package(), + ['neutron-plugin-openvswitch-agent']], + 'server_packages': ['neutron-server', + 'neutron-plugin-openvswitch'], + 'server_services': ['neutron-server'] + }, + 'nvp': { + 'config': '/etc/neutron/plugins/nicira/nvp.ini', + 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' + 'NeutronPlugin.NvpPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', + 'neutron-plugin-nicira'], + 'server_services': ['neutron-server'] + }, + 'nsx': { + 'config': '/etc/neutron/plugins/vmware/nsx.ini', + 'driver': 'vmware', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', + 'neutron-plugin-vmware'], + 'server_services': ['neutron-server'] + }, + 'n1kv': { + 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', + 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [determine_dkms_package(), + ['neutron-plugin-cisco']], + 'server_packages': ['neutron-server', + 'neutron-plugin-cisco'], + 'server_services': ['neutron-server'] + }, + 'Calico': { + 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', + 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': ['calico-felix', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata', + 'etcd'], + 'packages': [determine_dkms_package(), + ['calico-compute', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata', + 'etcd']], + 'server_packages': ['neutron-server', 'calico-control', 'etcd'], + 'server_services': ['neutron-server', 'etcd'] + }, + 'vsp': { + 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', + 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], + 'server_services': ['neutron-server'] + }, + 'plumgrid': { + 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', + 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', + 'contexts': [ + context.SharedDBContext(user=config('database-user'), + database=config('database'), + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': ['plumgrid-lxc', + 'iovisor-dkms'], + 'server_packages': ['neutron-server', + 'neutron-plugin-plumgrid'], + 'server_services': ['neutron-server'] + }, + 'midonet': { + 'config': '/etc/neutron/plugins/midonet/midonet.ini', + 'driver': 'midonet.neutron.plugin.MidonetPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [determine_dkms_package()], + 'server_packages': ['neutron-server', + 'python-neutron-plugin-midonet'], + 'server_services': ['neutron-server'] + } + } + if release >= 'icehouse': + # NOTE: patch in ml2 plugin for icehouse onwards + plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' + plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' + plugins['ovs']['server_packages'] = ['neutron-server', + 'neutron-plugin-ml2'] + # NOTE: patch in vmware renames nvp->nsx for icehouse onwards + plugins['nvp'] = plugins['nsx'] + if release >= 'kilo': + plugins['midonet']['driver'] = ( + 'neutron.plugins.midonet.plugin.MidonetPluginV2') + if release >= 'liberty': + plugins['midonet']['driver'] = ( + 'midonet.neutron.plugin_v1.MidonetPluginV2') + plugins['midonet']['server_packages'].remove( + 'python-neutron-plugin-midonet') + plugins['midonet']['server_packages'].append( + 'python-networking-midonet') + plugins['plumgrid']['driver'] = ( + 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2') + plugins['plumgrid']['server_packages'].remove( + 'neutron-plugin-plumgrid') + return plugins + + +def neutron_plugin_attribute(plugin, attr, net_manager=None): + manager = net_manager or network_manager() + if manager == 'quantum': + plugins = quantum_plugins() + elif manager == 'neutron': + plugins = neutron_plugins() + else: + log("Network manager '%s' does not support plugins." % (manager), + level=ERROR) + raise Exception + + try: + _plugin = plugins[plugin] + except KeyError: + log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) + raise Exception + + try: + return _plugin[attr] + except KeyError: + return None + + +def network_manager(): + ''' + Deals with the renaming of Quantum to Neutron in H and any situations + that require compatability (eg, deploying H with network-manager=quantum, + upgrading from G). + ''' + release = os_release('nova-common') + manager = config('network-manager').lower() + + if manager not in ['quantum', 'neutron']: + return manager + + if release in ['essex']: + # E does not support neutron + log('Neutron networking not supported in Essex.', level=ERROR) + raise Exception + elif release in ['folsom', 'grizzly']: + # neutron is named quantum in F and G + return 'quantum' + else: + # ensure accurate naming for all releases post-H + return 'neutron' + + +def parse_mappings(mappings, key_rvalue=False): + """By default mappings are lvalue keyed. + + If key_rvalue is True, the mapping will be reversed to allow multiple + configs for the same lvalue. + """ + parsed = {} + if mappings: + mappings = mappings.split() + for m in mappings: + p = m.partition(':') + + if key_rvalue: + key_index = 2 + val_index = 0 + # if there is no rvalue skip to next + if not p[1]: + continue + else: + key_index = 0 + val_index = 2 + + key = p[key_index].strip() + parsed[key] = p[val_index].strip() + + return parsed + + +def parse_bridge_mappings(mappings): + """Parse bridge mappings. + + Mappings must be a space-delimited list of provider:bridge mappings. + + Returns dict of the form {provider:bridge}. + """ + return parse_mappings(mappings) + + +def parse_data_port_mappings(mappings, default_bridge='br-data'): + """Parse data port mappings. + + Mappings must be a space-delimited list of bridge:port. + + Returns dict of the form {port:bridge} where ports may be mac addresses or + interface names. + """ + + # NOTE(dosaboy): we use rvalue for key to allow multiple values to be + # proposed for since it may be a mac address which will differ + # across units this allowing first-known-good to be chosen. + _mappings = parse_mappings(mappings, key_rvalue=True) + if not _mappings or list(_mappings.values()) == ['']: + if not mappings: + return {} + + # For backwards-compatibility we need to support port-only provided in + # config. + _mappings = {mappings.split()[0]: default_bridge} + + ports = _mappings.keys() + if len(set(ports)) != len(ports): + raise Exception("It is not allowed to have the same port configured " + "on more than one bridge") + + return _mappings + + +def parse_vlan_range_mappings(mappings): + """Parse vlan range mappings. + + Mappings must be a space-delimited list of provider:start:end mappings. + + The start:end range is optional and may be omitted. + + Returns dict of the form {provider: (start, end)}. + """ + _mappings = parse_mappings(mappings) + if not _mappings: + return {} + + mappings = {} + for p, r in six.iteritems(_mappings): + mappings[p] = tuple(r.split(':')) + + return mappings diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/templates/__init__.py new file mode 100644 index 00000000..75876796 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/templates/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py new file mode 100644 index 00000000..e5e3cb1b --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py @@ -0,0 +1,323 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os + +import six + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import ( + log, + ERROR, + INFO +) +from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES + +try: + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions +except ImportError: + apt_update(fatal=True) + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions + + +class OSConfigException(Exception): + pass + + +def get_loader(templates_dir, os_release): + """ + Create a jinja2.ChoiceLoader containing template dirs up to + and including os_release. If directory template directory + is missing at templates_dir, it will be omitted from the loader. + templates_dir is added to the bottom of the search list as a base + loading dir. + + A charm may also ship a templates dir with this module + and it will be appended to the bottom of the search list, eg:: + + hooks/charmhelpers/contrib/openstack/templates + + :param templates_dir (str): Base template directory containing release + sub-directories. + :param os_release (str): OpenStack release codename to construct template + loader. + :returns: jinja2.ChoiceLoader constructed with a list of + jinja2.FilesystemLoaders, ordered in descending + order by OpenStack release. + """ + tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) + for rel in six.itervalues(OPENSTACK_CODENAMES)] + + if not os.path.isdir(templates_dir): + log('Templates directory not found @ %s.' % templates_dir, + level=ERROR) + raise OSConfigException + + # the bottom contains tempaltes_dir and possibly a common templates dir + # shipped with the helper. + loaders = [FileSystemLoader(templates_dir)] + helper_templates = os.path.join(os.path.dirname(__file__), 'templates') + if os.path.isdir(helper_templates): + loaders.append(FileSystemLoader(helper_templates)) + + for rel, tmpl_dir in tmpl_dirs: + if os.path.isdir(tmpl_dir): + loaders.insert(0, FileSystemLoader(tmpl_dir)) + if rel == os_release: + break + log('Creating choice loader with dirs: %s' % + [l.searchpath for l in loaders], level=INFO) + return ChoiceLoader(loaders) + + +class OSConfigTemplate(object): + """ + Associates a config file template with a list of context generators. + Responsible for constructing a template context based on those generators. + """ + def __init__(self, config_file, contexts): + self.config_file = config_file + + if hasattr(contexts, '__call__'): + self.contexts = [contexts] + else: + self.contexts = contexts + + self._complete_contexts = [] + + def context(self): + ctxt = {} + for context in self.contexts: + _ctxt = context() + if _ctxt: + ctxt.update(_ctxt) + # track interfaces for every complete context. + [self._complete_contexts.append(interface) + for interface in context.interfaces + if interface not in self._complete_contexts] + return ctxt + + def complete_contexts(self): + ''' + Return a list of interfaces that have satisfied contexts. + ''' + if self._complete_contexts: + return self._complete_contexts + self.context() + return self._complete_contexts + + +class OSConfigRenderer(object): + """ + This class provides a common templating system to be used by OpenStack + charms. It is intended to help charms share common code and templates, + and ease the burden of managing config templates across multiple OpenStack + releases. + + Basic usage:: + + # import some common context generates from charmhelpers + from charmhelpers.contrib.openstack import context + + # Create a renderer object for a specific OS release. + configs = OSConfigRenderer(templates_dir='/tmp/templates', + openstack_release='folsom') + # register some config files with context generators. + configs.register(config_file='/etc/nova/nova.conf', + contexts=[context.SharedDBContext(), + context.AMQPContext()]) + configs.register(config_file='/etc/nova/api-paste.ini', + contexts=[context.IdentityServiceContext()]) + configs.register(config_file='/etc/haproxy/haproxy.conf', + contexts=[context.HAProxyContext()]) + # write out a single config + configs.write('/etc/nova/nova.conf') + # write out all registered configs + configs.write_all() + + **OpenStack Releases and template loading** + + When the object is instantiated, it is associated with a specific OS + release. This dictates how the template loader will be constructed. + + The constructed loader attempts to load the template from several places + in the following order: + - from the most recent OS release-specific template dir (if one exists) + - the base templates_dir + - a template directory shipped in the charm with this helper file. + + For the example above, '/tmp/templates' contains the following structure:: + + /tmp/templates/nova.conf + /tmp/templates/api-paste.ini + /tmp/templates/grizzly/api-paste.ini + /tmp/templates/havana/api-paste.ini + + Since it was registered with the grizzly release, it first seraches + the grizzly directory for nova.conf, then the templates dir. + + When writing api-paste.ini, it will find the template in the grizzly + directory. + + If the object were created with folsom, it would fall back to the + base templates dir for its api-paste.ini template. + + This system should help manage changes in config files through + openstack releases, allowing charms to fall back to the most recently + updated config template for a given release + + The haproxy.conf, since it is not shipped in the templates dir, will + be loaded from the module directory's template directory, eg + $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows + us to ship common templates (haproxy, apache) with the helpers. + + **Context generators** + + Context generators are used to generate template contexts during hook + execution. Doing so may require inspecting service relations, charm + config, etc. When registered, a config file is associated with a list + of generators. When a template is rendered and written, all context + generates are called in a chain to generate the context dictionary + passed to the jinja2 template. See context.py for more info. + """ + def __init__(self, templates_dir, openstack_release): + if not os.path.isdir(templates_dir): + log('Could not locate templates dir %s' % templates_dir, + level=ERROR) + raise OSConfigException + + self.templates_dir = templates_dir + self.openstack_release = openstack_release + self.templates = {} + self._tmpl_env = None + + if None in [Environment, ChoiceLoader, FileSystemLoader]: + # if this code is running, the object is created pre-install hook. + # jinja2 shouldn't get touched until the module is reloaded on next + # hook execution, with proper jinja2 bits successfully imported. + apt_install('python-jinja2') + + def register(self, config_file, contexts): + """ + Register a config file with a list of context generators to be called + during rendering. + """ + self.templates[config_file] = OSConfigTemplate(config_file=config_file, + contexts=contexts) + log('Registered config file: %s' % config_file, level=INFO) + + def _get_tmpl_env(self): + if not self._tmpl_env: + loader = get_loader(self.templates_dir, self.openstack_release) + self._tmpl_env = Environment(loader=loader) + + def _get_template(self, template): + self._get_tmpl_env() + template = self._tmpl_env.get_template(template) + log('Loaded template from %s' % template.filename, level=INFO) + return template + + def render(self, config_file): + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + ctxt = self.templates[config_file].context() + + _tmpl = os.path.basename(config_file) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound: + # if no template is found with basename, try looking for it + # using a munged full path, eg: + # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf + _tmpl = '_'.join(config_file.split('/')[1:]) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound as e: + log('Could not load template from %s by %s or %s.' % + (self.templates_dir, os.path.basename(config_file), _tmpl), + level=ERROR) + raise e + + log('Rendering from template: %s' % _tmpl, level=INFO) + return template.render(ctxt) + + def write(self, config_file): + """ + Write a single config file, raises if config file is not registered. + """ + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + + _out = self.render(config_file) + + with open(config_file, 'wb') as out: + out.write(_out) + + log('Wrote template %s.' % config_file, level=INFO) + + def write_all(self): + """ + Write out all registered config files. + """ + [self.write(k) for k in six.iterkeys(self.templates)] + + def set_release(self, openstack_release): + """ + Resets the template environment and generates a new template loader + based on a the new openstack release. + """ + self._tmpl_env = None + self.openstack_release = openstack_release + self._get_tmpl_env() + + def complete_contexts(self): + ''' + Returns a list of context interfaces that yield a complete context. + ''' + interfaces = [] + [interfaces.extend(i.complete_contexts()) + for i in six.itervalues(self.templates)] + return interfaces + + def get_incomplete_context_data(self, interfaces): + ''' + Return dictionary of relation status of interfaces and any missing + required context data. Example: + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}} + ''' + incomplete_context_data = {} + + for i in six.itervalues(self.templates): + for context in i.contexts: + for interface in interfaces: + related = False + if interface in context.interfaces: + related = context.get_related() + missing_data = context.missing_data + if missing_data: + incomplete_context_data[interface] = {'missing_data': missing_data} + if related: + if incomplete_context_data.get(interface): + incomplete_context_data[interface].update({'related': True}) + else: + incomplete_context_data[interface] = {'related': True} + else: + incomplete_context_data[interface] = {'related': False} + return incomplete_context_data diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 2af4476d..53e58424 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -23,8 +23,12 @@ import os import sys import re +import itertools +import functools +import shutil import six +import tempfile import traceback import uuid import yaml @@ -41,10 +45,13 @@ config, log as juju_log, charm_dir, + DEBUG, INFO, + ERROR, related_units, relation_ids, relation_set, + service_name, status_set, hook_name ) @@ -58,6 +65,7 @@ from charmhelpers.contrib.network.ip import ( get_ipv6_addr, is_ipv6, + port_has_listener, ) from charmhelpers.contrib.python.packages import ( @@ -65,10 +73,19 @@ pip_install, ) -from charmhelpers.core.host import lsb_release, mounts, umount +from charmhelpers.core.host import ( + lsb_release, + mounts, + umount, + service_running, + service_pause, + service_resume, + restart_on_change_helper, +) from charmhelpers.fetch import apt_install, apt_cache, install_remote from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device +from charmhelpers.contrib.openstack.exceptions import OSContextError CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' @@ -87,6 +104,8 @@ ('vivid', 'kilo'), ('wily', 'liberty'), ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zebra', 'ocata'), # TODO: upload with real Z name ]) @@ -101,73 +120,115 @@ ('2015.1', 'kilo'), ('2015.2', 'liberty'), ('2016.1', 'mitaka'), + ('2016.2', 'newton'), + ('2017.1', 'ocata'), ]) -# The ugly duckling +# The ugly duckling - must list releases oldest to newest SWIFT_CODENAMES = OrderedDict([ - ('1.4.3', 'diablo'), - ('1.4.8', 'essex'), - ('1.7.4', 'folsom'), - ('1.8.0', 'grizzly'), - ('1.7.7', 'grizzly'), - ('1.7.6', 'grizzly'), - ('1.10.0', 'havana'), - ('1.9.1', 'havana'), - ('1.9.0', 'havana'), - ('1.13.1', 'icehouse'), - ('1.13.0', 'icehouse'), - ('1.12.0', 'icehouse'), - ('1.11.0', 'icehouse'), - ('2.0.0', 'juno'), - ('2.1.0', 'juno'), - ('2.2.0', 'juno'), - ('2.2.1', 'kilo'), - ('2.2.2', 'kilo'), - ('2.3.0', 'liberty'), - ('2.4.0', 'liberty'), - ('2.5.0', 'liberty'), + ('diablo', + ['1.4.3']), + ('essex', + ['1.4.8']), + ('folsom', + ['1.7.4']), + ('grizzly', + ['1.7.6', '1.7.7', '1.8.0']), + ('havana', + ['1.9.0', '1.9.1', '1.10.0']), + ('icehouse', + ['1.11.0', '1.12.0', '1.13.0', '1.13.1']), + ('juno', + ['2.0.0', '2.1.0', '2.2.0']), + ('kilo', + ['2.2.1', '2.2.2']), + ('liberty', + ['2.3.0', '2.4.0', '2.5.0']), + ('mitaka', + ['2.5.0', '2.6.0', '2.7.0']), + ('newton', + ['2.8.0']), ]) # >= Liberty version->codename mapping PACKAGE_CODENAMES = { 'nova-common': OrderedDict([ - ('12.0', 'liberty'), - ('13.0', 'mitaka'), + ('12', 'liberty'), + ('13', 'mitaka'), + ('14', 'newton'), + ('15', 'ocata'), ]), 'neutron-common': OrderedDict([ - ('7.0', 'liberty'), - ('8.0', 'mitaka'), + ('7', 'liberty'), + ('8', 'mitaka'), + ('9', 'newton'), + ('10', 'ocata'), ]), 'cinder-common': OrderedDict([ - ('7.0', 'liberty'), - ('8.0', 'mitaka'), + ('7', 'liberty'), + ('8', 'mitaka'), + ('9', 'newton'), + ('10', 'ocata'), ]), 'keystone': OrderedDict([ - ('8.0', 'liberty'), - ('9.0', 'mitaka'), + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), ]), 'horizon-common': OrderedDict([ - ('8.0', 'liberty'), - ('9.0', 'mitaka'), + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), ]), 'ceilometer-common': OrderedDict([ - ('5.0', 'liberty'), - ('6.0', 'mitaka'), + ('5', 'liberty'), + ('6', 'mitaka'), + ('7', 'newton'), + ('8', 'ocata'), ]), 'heat-common': OrderedDict([ - ('5.0', 'liberty'), - ('6.0', 'mitaka'), + ('5', 'liberty'), + ('6', 'mitaka'), + ('7', 'newton'), + ('8', 'ocata'), ]), 'glance-common': OrderedDict([ - ('11.0', 'liberty'), - ('12.0', 'mitaka'), + ('11', 'liberty'), + ('12', 'mitaka'), + ('13', 'newton'), + ('14', 'ocata'), ]), 'openstack-dashboard': OrderedDict([ - ('8.0', 'liberty'), - ('9.0', 'mitaka'), + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), ]), } +GIT_DEFAULT_REPOS = { + 'requirements': 'git://github.com/openstack/requirements', + 'cinder': 'git://github.com/openstack/cinder', + 'glance': 'git://github.com/openstack/glance', + 'horizon': 'git://github.com/openstack/horizon', + 'keystone': 'git://github.com/openstack/keystone', + 'neutron': 'git://github.com/openstack/neutron', + 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas', + 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas', + 'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas', + 'nova': 'git://github.com/openstack/nova', +} + +GIT_DEFAULT_BRANCHES = { + 'icehouse': 'icehouse-eol', + 'kilo': 'stable/kilo', + 'liberty': 'stable/liberty', + 'mitaka': 'stable/mitaka', + 'master': 'master', +} + DEFAULT_LOOPBACK_SIZE = '5G' @@ -227,6 +288,44 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): error_out(e) +def get_os_version_codename_swift(codename): + '''Determine OpenStack version number of swift from codename.''' + for k, v in six.iteritems(SWIFT_CODENAMES): + if k == codename: + return v[-1] + e = 'Could not derive swift version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_swift_codename(version): + '''Determine OpenStack codename that corresponds to swift version.''' + codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] + + if len(codenames) > 1: + # If more than one release codename contains this version we determine + # the actual codename based on the highest available install source. + for codename in reversed(codenames): + releases = UBUNTU_OPENSTACK_RELEASE + release = [k for k, v in six.iteritems(releases) if codename in v] + ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) + if codename in ret or release[0] in ret: + return codename + elif len(codenames) == 1: + return codenames[0] + + # NOTE: fallback - attempt to match with just major.minor version + match = re.match('^(\d+)\.(\d+)', version) + if match: + major_minor_version = match.group(0) + for codename, versions in six.iteritems(SWIFT_CODENAMES): + for release_version in versions: + if release_version.startswith(major_minor_version): + return codename + + return None + + def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' import apt_pkg as apt @@ -262,15 +361,18 @@ def get_os_codename_package(package, fatal=True): if match: vers = match.group(0) + # Generate a major version number for newer semantic + # versions of openstack projects + major_vers = vers.split('.')[0] # >= Liberty independent project versions if (package in PACKAGE_CODENAMES and - vers in PACKAGE_CODENAMES[package]): - return PACKAGE_CODENAMES[package][vers] + major_vers in PACKAGE_CODENAMES[package]): + return PACKAGE_CODENAMES[package][major_vers] else: # < Liberty co-ordinated project versions try: if 'swift' in pkg.name: - return SWIFT_CODENAMES[vers] + return get_swift_codename(vers) else: return OPENSTACK_CODENAMES[vers] except KeyError: @@ -289,12 +391,14 @@ def get_os_version_package(pkg, fatal=True): if 'swift' in pkg: vers_map = SWIFT_CODENAMES + for cname, version in six.iteritems(vers_map): + if cname == codename: + return version[-1] else: vers_map = OPENSTACK_CODENAMES - - for version, cname in six.iteritems(vers_map): - if cname == codename: - return version + for version, cname in six.iteritems(vers_map): + if cname == codename: + return version # e = "Could not determine OpenStack version for package: %s" % pkg # error_out(e) @@ -319,12 +423,42 @@ def os_release(package, base='essex'): def import_key(keyid): - cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ - "--recv-keys %s" % keyid - try: - subprocess.check_call(cmd.split(' ')) - except subprocess.CalledProcessError: - error_out("Error importing repo key %s" % keyid) + key = keyid.strip() + if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and + key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): + juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG) + juju_log("Importing ASCII Armor PGP key", level=DEBUG) + with tempfile.NamedTemporaryFile() as keyfile: + with open(keyfile.name, 'w') as fd: + fd.write(key) + fd.write("\n") + + cmd = ['apt-key', 'add', keyfile.name] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error_out("Error importing PGP key '%s'" % key) + else: + juju_log("PGP key found (looks like Radix64 format)", level=DEBUG) + juju_log("Importing PGP key from keyserver", level=DEBUG) + cmd = ['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error_out("Error importing PGP key '%s'" % key) + + +def get_source_and_pgp_key(input): + """Look for a pgp key ID or ascii-armor key in the given input.""" + index = input.strip() + index = input.rfind('|') + if index < 0: + return input, None + + key = input[index + 1:].strip('|') + source = input[:index] + return source, key def configure_installation_source(rel): @@ -336,16 +470,16 @@ def configure_installation_source(rel): with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: f.write(DISTRO_PROPOSED % ubuntu_rel) elif rel[:4] == "ppa:": - src = rel + src, key = get_source_and_pgp_key(rel) + if key: + import_key(key) + subprocess.check_call(["add-apt-repository", "-y", src]) elif rel[:3] == "deb": - l = len(rel.split('|')) - if l == 2: - src, key = rel.split('|') - juju_log("Importing PPA key from keyserver for %s" % src) + src, key = get_source_and_pgp_key(rel) + if key: import_key(key) - elif l == 1: - src = rel + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: f.write(src) elif rel[:6] == 'cloud:': @@ -393,6 +527,9 @@ def configure_installation_source(rel): 'mitaka': 'trusty-updates/mitaka', 'mitaka/updates': 'trusty-updates/mitaka', 'mitaka/proposed': 'trusty-proposed/mitaka', + 'newton': 'xenial-updates/newton', + 'newton/updates': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', } try: @@ -460,11 +597,16 @@ def openstack_upgrade_available(package): cur_vers = get_os_version_package(package) if "swift" in package: codename = get_os_codename_install_source(src) - available_vers = get_os_version_codename(codename, SWIFT_CODENAMES) + avail_vers = get_os_version_codename_swift(codename) else: - available_vers = get_os_version_install_source(src) + avail_vers = get_os_version_install_source(src) apt.init() - return apt.version_compare(available_vers, cur_vers) == 1 + if "swift" in package: + major_cur_vers = cur_vers.split('.', 1)[0] + major_avail_vers = avail_vers.split('.', 1)[0] + major_diff = apt.version_compare(major_avail_vers, major_cur_vers) + return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0) + return apt.version_compare(avail_vers, cur_vers) == 1 def ensure_block_device(block_device): @@ -583,6 +725,53 @@ def git_install_requested(): requirements_dir = None +def git_default_repos(projects): + """ + Returns default repos if a default openstack-origin-git value is specified. + """ + service = service_name() + + for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): + if projects == default: + + # add the requirements repo first + repo = { + 'name': 'requirements', + 'repository': GIT_DEFAULT_REPOS['requirements'], + 'branch': branch, + } + repos = [repo] + + # neutron and nova charms require some additional repos + if service == 'neutron': + for svc in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas']: + repo = { + 'name': svc, + 'repository': GIT_DEFAULT_REPOS[svc], + 'branch': branch, + } + repos.append(repo) + elif service == 'nova': + repo = { + 'name': 'neutron', + 'repository': GIT_DEFAULT_REPOS['neutron'], + 'branch': branch, + } + repos.append(repo) + + # finally add the current service's repo + repo = { + 'name': service, + 'repository': GIT_DEFAULT_REPOS[service], + 'branch': branch, + } + repos.append(repo) + + return yaml.dump(dict(repositories=repos)) + + return projects + + def _git_yaml_load(projects_yaml): """ Load the specified yaml into a dictionary. @@ -696,7 +885,8 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, os.mkdir(parent_dir) juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) - repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth) + repo_dir = install_remote( + repo, dest=parent_dir, branch=branch, depth=depth) venv = os.path.join(parent_dir, 'venv') @@ -779,6 +969,47 @@ def git_yaml_value(projects_yaml, key): return None +def git_generate_systemd_init_files(templates_dir): + """ + Generate systemd init files. + + Generates and installs systemd init units and script files based on the + *.init.in files contained in the templates_dir directory. + + This code is based on the openstack-pkg-tools package and its init + script generation, which is used by the OpenStack packages. + """ + for f in os.listdir(templates_dir): + if f.endswith(".init.in"): + init_in_file = f + init_file = f[:-8] + service_file = "{}.service".format(init_file) + + init_in_source = os.path.join(templates_dir, init_in_file) + init_source = os.path.join(templates_dir, init_file) + service_source = os.path.join(templates_dir, service_file) + + init_dest = os.path.join('/etc/init.d', init_file) + service_dest = os.path.join('/lib/systemd/system', service_file) + + shutil.copyfile(init_in_source, init_source) + with open(init_source, 'a') as outfile: + template = '/usr/share/openstack-pkg-tools/init-script-template' + with open(template) as infile: + outfile.write('\n\n{}'.format(infile.read())) + + cmd = ['pkgos-gen-systemd-unit', init_in_source] + subprocess.check_call(cmd) + + if os.path.exists(init_dest): + os.remove(init_dest) + if os.path.exists(service_dest): + os.remove(service_dest) + shutil.move(init_source, init_dest) + shutil.move(service_source, service_dest) + os.chmod(init_dest, 0o755) + + def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts @@ -795,56 +1026,155 @@ def wrapped_f(*args, **kwargs): return wrap -def set_os_workload_status(configs, required_interfaces, charm_func=None): +def set_os_workload_status(configs, required_interfaces, charm_func=None, + services=None, ports=None): + """Set the state of the workload status for the charm. + + This calls _determine_os_workload_status() to get the new state, message + and sets the status using status_set() + + @param configs: a templating.OSConfigRenderer() object + @param required_interfaces: {generic: [specific, specific2, ...]} + @param charm_func: a callable function that returns state, message. The + signature is charm_func(configs) -> (state, message) + @param services: list of strings OR dictionary specifying services/ports + @param ports: OPTIONAL list of port numbers. + @returns state, message: the new workload status, user message + """ + state, message = _determine_os_workload_status( + configs, required_interfaces, charm_func, services, ports) + status_set(state, message) + + +def _determine_os_workload_status( + configs, required_interfaces, charm_func=None, + services=None, ports=None): + """Determine the state of the workload status for the charm. + + This function returns the new workload status for the charm based + on the state of the interfaces, the paused state and whether the + services are actually running and any specified ports are open. + + This checks: + + 1. if the unit should be paused, that it is actually paused. If so the + state is 'maintenance' + message, else 'broken'. + 2. that the interfaces/relations are complete. If they are not then + it sets the state to either 'broken' or 'waiting' and an appropriate + message. + 3. If all the relation data is set, then it checks that the actual + services really are running. If not it sets the state to 'broken'. + + If everything is okay then the state returns 'active'. + + @param configs: a templating.OSConfigRenderer() object + @param required_interfaces: {generic: [specific, specific2, ...]} + @param charm_func: a callable function that returns state, message. The + signature is charm_func(configs) -> (state, message) + @param services: list of strings OR dictionary specifying services/ports + @param ports: OPTIONAL list of port numbers. + @returns state, message: the new workload status, user message + """ + state, message = _ows_check_if_paused(services, ports) + + if state is None: + state, message = _ows_check_generic_interfaces( + configs, required_interfaces) + + if state != 'maintenance' and charm_func: + # _ows_check_charm_func() may modify the state, message + state, message = _ows_check_charm_func( + state, message, lambda: charm_func(configs)) + + if state is None: + state, message = _ows_check_services_running(services, ports) + + if state is None: + state = 'active' + message = "Unit is ready" + juju_log(message, 'INFO') + + return state, message + + +def _ows_check_if_paused(services=None, ports=None): + """Check if the unit is supposed to be paused, and if so check that the + services/ports (if passed) are actually stopped/not being listened to. + + if the unit isn't supposed to be paused, just return None, None + + @param services: OPTIONAL services spec or list of service names. + @param ports: OPTIONAL list of port numbers. + @returns state, message or None, None """ - Set workload status based on complete contexts. - status-set missing or incomplete contexts - and juju-log details of missing required data. - charm_func is a charm specific function to run checking - for charm specific requirements such as a VIP setting. + if is_unit_paused_set(): + state, message = check_actually_paused(services=services, + ports=ports) + if state is None: + # we're paused okay, so set maintenance and return + state = "maintenance" + message = "Paused. Use 'resume' action to resume normal service." + return state, message + return None, None + + +def _ows_check_generic_interfaces(configs, required_interfaces): + """Check the complete contexts to determine the workload status. + + - Checks for missing or incomplete contexts + - juju log details of missing required data. + - determines the correct workload status + - creates an appropriate message for status_set(...) + + if there are no problems then the function returns None, None + + @param configs: a templating.OSConfigRenderer() object + @params required_interfaces: {generic_interface: [specific_interface], } + @returns state, message or None, None """ - incomplete_rel_data = incomplete_relation_data(configs, required_interfaces) - state = 'active' - missing_relations = [] - incomplete_relations = [] + incomplete_rel_data = incomplete_relation_data(configs, + required_interfaces) + state = None message = None - charm_state = None - charm_message = None + missing_relations = set() + incomplete_relations = set() - for generic_interface in incomplete_rel_data.keys(): + for generic_interface, relations_states in incomplete_rel_data.items(): related_interface = None missing_data = {} # Related or not? - for interface in incomplete_rel_data[generic_interface]: - if incomplete_rel_data[generic_interface][interface].get('related'): + for interface, relation_state in relations_states.items(): + if relation_state.get('related'): related_interface = interface - missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data') - # No relation ID for the generic_interface + missing_data = relation_state.get('missing_data') + break + # No relation ID for the generic_interface? if not related_interface: juju_log("{} relation is missing and must be related for " "functionality. ".format(generic_interface), 'WARN') state = 'blocked' - if generic_interface not in missing_relations: - missing_relations.append(generic_interface) + missing_relations.add(generic_interface) else: - # Relation ID exists but no related unit + # Relation ID eists but no related unit if not missing_data: - # Edge case relation ID exists but departing - if ('departed' in hook_name() or 'broken' in hook_name()) \ - and related_interface in hook_name(): + # Edge case - relation ID exists but departings + _hook_name = hook_name() + if (('departed' in _hook_name or 'broken' in _hook_name) and + related_interface in _hook_name): state = 'blocked' - if generic_interface not in missing_relations: - missing_relations.append(generic_interface) + missing_relations.add(generic_interface) juju_log("{} relation's interface, {}, " "relationship is departed or broken " "and is required for functionality." - "".format(generic_interface, related_interface), "WARN") + "".format(generic_interface, related_interface), + "WARN") # Normal case relation ID exists but no related unit # (joining) else: - juju_log("{} relations's interface, {}, is related but has " - "no units in the relation." - "".format(generic_interface, related_interface), "INFO") + juju_log("{} relations's interface, {}, is related but has" + " no units in the relation." + "".format(generic_interface, related_interface), + "INFO") # Related unit exists and data missing on the relation else: juju_log("{} relation's interface, {}, is related awaiting " @@ -853,9 +1183,8 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None): ", ".join(missing_data)), "INFO") if state != 'blocked': state = 'waiting' - if generic_interface not in incomplete_relations \ - and generic_interface not in missing_relations: - incomplete_relations.append(generic_interface) + if generic_interface not in missing_relations: + incomplete_relations.add(generic_interface) if missing_relations: message = "Missing relations: {}".format(", ".join(missing_relations)) @@ -868,9 +1197,22 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None): "".format(", ".join(incomplete_relations)) state = 'waiting' - # Run charm specific checks - if charm_func: - charm_state, charm_message = charm_func(configs) + return state, message + + +def _ows_check_charm_func(state, message, charm_func_with_configs): + """Run a custom check function for the charm to see if it wants to + change the state. This is only run if not in 'maintenance' and + tests to see if the new state is more important that the previous + one determined by the interfaces/relations check. + + @param state: the previously determined state so far. + @param message: the user orientated message so far. + @param charm_func: a callable function that returns state, message + @returns state, message strings. + """ + if charm_func_with_configs: + charm_state, charm_message = charm_func_with_configs() if charm_state != 'active' and charm_state != 'unknown': state = workload_state_compare(state, charm_state) if message: @@ -879,13 +1221,151 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None): message = "{}, {}".format(message, charm_message) else: message = charm_message + return state, message - # Set to active if all requirements have been met - if state == 'active': - message = "Unit is ready" - juju_log(message, "INFO") - status_set(state, message) +def _ows_check_services_running(services, ports): + """Check that the services that should be running are actually running + and that any ports specified are being listened to. + + @param services: list of strings OR dictionary specifying services/ports + @param ports: list of ports + @returns state, message: strings or None, None + """ + messages = [] + state = None + if services is not None: + services = _extract_services_list_helper(services) + services_running, running = _check_running_services(services) + if not all(running): + messages.append( + "Services not running that should be: {}" + .format(", ".join(_filter_tuples(services_running, False)))) + state = 'blocked' + # also verify that the ports that should be open are open + # NB, that ServiceManager objects only OPTIONALLY have ports + map_not_open, ports_open = ( + _check_listening_on_services_ports(services)) + if not all(ports_open): + # find which service has missing ports. They are in service + # order which makes it a bit easier. + message_parts = {service: ", ".join([str(v) for v in open_ports]) + for service, open_ports in map_not_open.items()} + message = ", ".join( + ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) + messages.append( + "Services with ports not open that should be: {}" + .format(message)) + state = 'blocked' + + if ports is not None: + # and we can also check ports which we don't know the service for + ports_open, ports_open_bools = _check_listening_on_ports_list(ports) + if not all(ports_open_bools): + messages.append( + "Ports which should be open, but are not: {}" + .format(", ".join([str(p) for p, v in ports_open + if not v]))) + state = 'blocked' + + if state is not None: + message = "; ".join(messages) + return state, message + + return None, None + + +def _extract_services_list_helper(services): + """Extract a OrderedDict of {service: [ports]} of the supplied services + for use by the other functions. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param services: see above + @returns OrderedDict(service: [ports], ...) + """ + if services is None: + return {} + if isinstance(services, dict): + services = services.values() + # either extract the list of services from the dictionary, or if + # it is a simple string, use that. i.e. works with mixed lists. + _s = OrderedDict() + for s in services: + if isinstance(s, dict) and 'service' in s: + _s[s['service']] = s.get('ports', []) + if isinstance(s, str): + _s[s] = [] + return _s + + +def _check_running_services(services): + """Check that the services dict provided is actually running and provide + a list of (service, boolean) tuples for each service. + + Returns both a zipped list of (service, boolean) and a list of booleans + in the same order as the services. + + @param services: OrderedDict of strings: [ports], one for each service to + check. + @returns [(service, boolean), ...], : results for checks + [boolean] : just the result of the service checks + """ + services_running = [service_running(s) for s in services] + return list(zip(services, services_running)), services_running + + +def _check_listening_on_services_ports(services, test=False): + """Check that the unit is actually listening (has the port open) on the + ports that the service specifies are open. If test is True then the + function returns the services with ports that are open rather than + closed. + + Returns an OrderedDict of service: ports and a list of booleans + + @param services: OrderedDict(service: [port, ...], ...) + @param test: default=False, if False, test for closed, otherwise open. + @returns OrderedDict(service: [port-not-open, ...]...), [boolean] + """ + test = not(not(test)) # ensure test is True or False + all_ports = list(itertools.chain(*services.values())) + ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] + map_ports = OrderedDict() + matched_ports = [p for p, opened in zip(all_ports, ports_states) + if opened == test] # essentially opened xor test + for service, ports in services.items(): + set_ports = set(ports).intersection(matched_ports) + if set_ports: + map_ports[service] = set_ports + return map_ports, ports_states + + +def _check_listening_on_ports_list(ports): + """Check that the ports list given are being listened to + + Returns a list of ports being listened to and a list of the + booleans. + + @param ports: LIST or port numbers. + @returns [(port_num, boolean), ...], [boolean] + """ + ports_open = [port_has_listener('0.0.0.0', p) for p in ports] + return zip(ports, ports_open), ports_open + + +def _filter_tuples(services_states, state): + """Return a simple list from a list of tuples according to the condition + + @param services_states: LIST of (string, boolean): service and running + state. + @param state: Boolean to match the tuple against. + @returns [LIST of strings] that matched the tuple RHS. + """ + return [s for s, b in services_states if b == state] def workload_state_compare(current_workload_state, workload_state): @@ -910,8 +1390,7 @@ def workload_state_compare(current_workload_state, workload_state): def incomplete_relation_data(configs, required_interfaces): - """ - Check complete contexts against required_interfaces + """Check complete contexts against required_interfaces Return dictionary of incomplete relation data. configs is an OSConfigRenderer object with configs registered @@ -936,19 +1415,13 @@ def incomplete_relation_data(configs, required_interfaces): 'shared-db': {'related': True}}} """ complete_ctxts = configs.complete_contexts() - incomplete_relations = [] - for svc_type in required_interfaces.keys(): - # Avoid duplicates - found_ctxt = False - for interface in required_interfaces[svc_type]: - if interface in complete_ctxts: - found_ctxt = True - if not found_ctxt: - incomplete_relations.append(svc_type) - incomplete_context_data = {} - for i in incomplete_relations: - incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i]) - return incomplete_context_data + incomplete_relations = [ + svc_type + for svc_type, interfaces in required_interfaces.items() + if not set(interfaces).intersection(complete_ctxts)] + return { + i: configs.get_incomplete_context_data(required_interfaces[i]) + for i in incomplete_relations} def do_action_openstack_upgrade(package, upgrade_callback, configs): @@ -1009,3 +1482,326 @@ def remote_restart(rel_name, remote_service=None): relation_set(relation_id=rid, relation_settings=trigger, ) + + +def check_actually_paused(services=None, ports=None): + """Check that services listed in the services object and and ports + are actually closed (not listened to), to verify that the unit is + properly paused. + + @param services: See _extract_services_list_helper + @returns status, : string for status (None if okay) + message : string for problem for status_set + """ + state = None + message = None + messages = [] + if services is not None: + services = _extract_services_list_helper(services) + services_running, services_states = _check_running_services(services) + if any(services_states): + # there shouldn't be any running so this is a problem + messages.append("these services running: {}" + .format(", ".join( + _filter_tuples(services_running, True)))) + state = "blocked" + ports_open, ports_open_bools = ( + _check_listening_on_services_ports(services, True)) + if any(ports_open_bools): + message_parts = {service: ", ".join([str(v) for v in open_ports]) + for service, open_ports in ports_open.items()} + message = ", ".join( + ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) + messages.append( + "these service:ports are open: {}".format(message)) + state = 'blocked' + if ports is not None: + ports_open, bools = _check_listening_on_ports_list(ports) + if any(bools): + messages.append( + "these ports which should be closed, but are open: {}" + .format(", ".join([str(p) for p, v in ports_open if v]))) + state = 'blocked' + if messages: + message = ("Services should be paused but {}" + .format(", ".join(messages))) + return state, message + + +def set_unit_paused(): + """Set the unit to a paused state in the local kv() store. + This does NOT actually pause the unit + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', True) + + +def clear_unit_paused(): + """Clear the unit from a paused state in the local kv() store + This does NOT actually restart any services - it only clears the + local state. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', False) + + +def is_unit_paused_set(): + """Return the state of the kv().get('unit-paused'). + This does NOT verify that the unit really is paused. + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-paused'))) + except: + return False + + +def pause_unit(assess_status_func, services=None, ports=None, + charm_func=None): + """Pause a unit by stopping the services and setting 'unit-paused' + in the local kv() store. + + Also checks that the services have stopped and ports are no longer + being listened to. + + An optional charm_func() can be called that can either raise an + Exception or return non None, None to indicate that the unit + didn't pause cleanly. + + The signature for charm_func is: + charm_func() -> message: string + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param assess_status_func: (f() -> message: string | None) or None + @param services: OPTIONAL see above + @param ports: OPTIONAL list of port + @param charm_func: function to run for custom charm pausing. + @returns None + @raises Exception(message) on an error for action_fail(). + """ + services = _extract_services_list_helper(services) + messages = [] + if services: + for service in services.keys(): + stopped = service_pause(service) + if not stopped: + messages.append("{} didn't stop cleanly.".format(service)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + message.append(str(e)) + set_unit_paused() + if assess_status_func: + message = assess_status_func() + if message: + messages.append(message) + if messages: + raise Exception("Couldn't pause: {}".format("; ".join(messages))) + + +def resume_unit(assess_status_func, services=None, ports=None, + charm_func=None): + """Resume a unit by starting the services and clearning 'unit-paused' + in the local kv() store. + + Also checks that the services have started and ports are being listened to. + + An optional charm_func() can be called that can either raise an + Exception or return non None to indicate that the unit + didn't resume cleanly. + + The signature for charm_func is: + charm_func() -> message: string + + charm_func() is executed after any services are started, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param assess_status_func: (f() -> message: string | None) or None + @param services: OPTIONAL see above + @param ports: OPTIONAL list of port + @param charm_func: function to run for custom charm resuming. + @returns None + @raises Exception(message) on an error for action_fail(). + """ + services = _extract_services_list_helper(services) + messages = [] + if services: + for service in services.keys(): + started = service_resume(service) + if not started: + messages.append("{} didn't start cleanly.".format(service)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + message.append(str(e)) + clear_unit_paused() + if assess_status_func: + message = assess_status_func() + if message: + messages.append(message) + if messages: + raise Exception("Couldn't resume: {}".format("; ".join(messages))) + + +def make_assess_status_func(*args, **kwargs): + """Creates an assess_status_func() suitable for handing to pause_unit() + and resume_unit(). + + This uses the _determine_os_workload_status(...) function to determine + what the workload_status should be for the unit. If the unit is + not in maintenance or active states, then the message is returned to + the caller. This is so an action that doesn't result in either a + complete pause or complete resume can signal failure with an action_fail() + """ + def _assess_status_func(): + state, message = _determine_os_workload_status(*args, **kwargs) + status_set(state, message) + if state not in ['maintenance', 'active']: + return message + return None + + return _assess_status_func + + +def pausable_restart_on_change(restart_map, stopstart=False, + restart_functions=None): + """A restart_on_change decorator that checks to see if the unit is + paused. If it is paused then the decorated function doesn't fire. + + This is provided as a helper, as the @restart_on_change(...) decorator + is in core.host, yet the openstack specific helpers are in this file + (contrib.openstack.utils). Thus, this needs to be an optional feature + for openstack charms (or charms that wish to use the openstack + pause/resume type features). + + It is used as follows: + + from contrib.openstack.utils import ( + pausable_restart_on_change as restart_on_change) + + @restart_on_change(restart_map, stopstart=) + def some_hook(...): + pass + + see core.utils.restart_on_change() for more details. + + @param f: the function to decorate + @param restart_map: the restart map {conf_file: [services]} + @param stopstart: DEFAULT false; whether to stop, start or just restart + @returns decorator to use a restart_on_change with pausability + """ + def wrap(f): + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + if is_unit_paused_set(): + return f(*args, **kwargs) + # otherwise, normal restart_on_change functionality + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) + return wrapped_f + return wrap + + +def config_flags_parser(config_flags): + """Parses config flags string into dict. + + This parsing method supports a few different formats for the config + flag values to be parsed: + + 1. A string in the simple format of key=value pairs, with the possibility + of specifying multiple key value pairs within the same string. For + example, a string in the format of 'key1=value1, key2=value2' will + return a dict of: + + {'key1': 'value1', + 'key2': 'value2'}. + + 2. A string in the above format, but supporting a comma-delimited list + of values for the same key. For example, a string in the format of + 'key1=value1, key2=value3,value4,value5' will return a dict of: + + {'key1', 'value1', + 'key2', 'value2,value3,value4'} + + 3. A string containing a colon character (:) prior to an equal + character (=) will be treated as yaml and parsed as such. This can be + used to specify more complex key value pairs. For example, + a string in the format of 'key1: subkey1=value1, subkey2=value2' will + return a dict of: + + {'key1', 'subkey1=value1, subkey2=value2'} + + The provided config_flags string may be a list of comma-separated values + which themselves may be comma-separated list of values. + """ + # If we find a colon before an equals sign then treat it as yaml. + # Note: limit it to finding the colon first since this indicates assignment + # for inline yaml. + colon = config_flags.find(':') + equals = config_flags.find('=') + if colon > 0: + if colon < equals or equals < 0: + return yaml.safe_load(config_flags) + + if config_flags.find('==') >= 0: + juju_log("config_flags is not in expected format (key=value)", + level=ERROR) + raise OSContextError + + # strip the following from each value. + post_strippers = ' ,' + # we strip any leading/trailing '=' or ' ' from the string then + # split on '='. + split = config_flags.strip(' =').split('=') + limit = len(split) + flags = {} + for i in range(0, limit - 1): + current = split[i] + next = split[i + 1] + vindex = next.rfind(',') + if (i == limit - 2) or (vindex < 0): + value = next + else: + value = next[:vindex] + + if i == 0: + key = current + else: + # if this not the first entry, expect an embedded key. + index = current.rfind(',') + if index < 0: + juju_log("Invalid config value(s) at index %s" % (i), + level=ERROR) + raise OSContextError + key = current[index + 1:] + + # Add to collection. + flags[key.strip(post_strippers)] = value.rstrip(post_strippers) + + return flags diff --git a/ceph-mon/hooks/charmhelpers/contrib/python/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/python/__init__.py new file mode 100644 index 00000000..d1400a02 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/python/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/ceph-mon/hooks/charmhelpers/contrib/python/packages.py b/ceph-mon/hooks/charmhelpers/contrib/python/packages.py new file mode 100644 index 00000000..a2411c37 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/python/packages.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import subprocess +import sys + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import charm_dir, log + +__author__ = "Jorge Niedbalski " + + +def pip_execute(*args, **kwargs): + """Overriden pip_execute() to stop sys.path being changed. + + The act of importing main from the pip module seems to cause add wheels + from the /usr/share/python-wheels which are installed by various tools. + This function ensures that sys.path remains the same after the call is + executed. + """ + try: + _path = sys.path + try: + from pip import main as _pip_execute + except ImportError: + apt_update() + apt_install('python-pip') + from pip import main as _pip_execute + _pip_execute(*args, **kwargs) + finally: + sys.path = _path + + +def parse_options(given, available): + """Given a set of options, check if available""" + for key, value in sorted(given.items()): + if not value: + continue + if key in available: + yield "--{0}={1}".format(key, value) + + +def pip_install_requirements(requirements, constraints=None, **options): + """Install a requirements file. + + :param constraints: Path to pip constraints file. + http://pip.readthedocs.org/en/stable/user_guide/#constraints-files + """ + command = ["install"] + + available_options = ('proxy', 'src', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + command.append("-r {0}".format(requirements)) + if constraints: + command.append("-c {0}".format(constraints)) + log("Installing from file: {} with constraints {} " + "and options: {}".format(requirements, constraints, command)) + else: + log("Installing from file: {} with options: {}".format(requirements, + command)) + pip_execute(command) + + +def pip_install(package, fatal=False, upgrade=False, venv=None, **options): + """Install a python package""" + if venv: + venv_python = os.path.join(venv, 'bin/pip') + command = [venv_python, "install"] + else: + command = ["install"] + + available_options = ('proxy', 'src', 'log', 'index-url', ) + for option in parse_options(options, available_options): + command.append(option) + + if upgrade: + command.append('--upgrade') + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Installing {} package with options: {}".format(package, + command)) + if venv: + subprocess.check_call(command) + else: + pip_execute(command) + + +def pip_uninstall(package, **options): + """Uninstall a python package""" + command = ["uninstall", "-q", "-y"] + + available_options = ('proxy', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Uninstalling {} package with options: {}".format(package, + command)) + pip_execute(command) + + +def pip_list(): + """Returns the list of current python installed packages + """ + return pip_execute(["list"]) + + +def pip_create_virtualenv(path=None): + """Create an isolated Python environment.""" + apt_install('python-virtualenv') + + if path: + venv_path = path + else: + venv_path = os.path.join(charm_dir(), 'venv') + + if not os.path.exists(venv_path): + subprocess.check_call(['virtualenv', venv_path]) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index d008081f..b2484e78 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -40,6 +40,7 @@ CalledProcessError, ) from charmhelpers.core.hookenv import ( + config, local_unit, relation_get, relation_ids, @@ -64,6 +65,7 @@ ) from charmhelpers.core.kernel import modprobe +from charmhelpers.contrib.openstack.utils import config_flags_parser KEYRING = '/etc/ceph/ceph.client.{}.keyring' KEYFILE = '/etc/ceph/ceph.client.{}.key' @@ -1204,3 +1206,42 @@ def send_request_if_needed(request, relation='ceph'): for rid in relation_ids(relation): log('Sending request {}'.format(request.request_id), level=DEBUG) relation_set(relation_id=rid, broker_req=request.request) + + +class CephConfContext(object): + """Ceph config (ceph.conf) context. + + Supports user-provided Ceph configuration settings. Use can provide a + dictionary as the value for the config-flags charm option containing + Ceph configuration settings keyede by their section in ceph.conf. + """ + def __init__(self, permitted_sections=None): + self.permitted_sections = permitted_sections or [] + + def __call__(self): + conf = config('config-flags') + if not conf: + return {} + + conf = config_flags_parser(conf) + if type(conf) != dict: + log("Provided config-flags is not a dictionary - ignoring", + level=WARNING) + return {} + + permitted = self.permitted_sections + if permitted: + diff = set(conf.keys()).difference(set(permitted)) + if diff: + log("Config-flags contains invalid keys '%s' - they will be " + "ignored" % (', '.join(diff)), level=WARNING) + + ceph_conf = {} + for key in conf: + if permitted and key not in permitted: + log("Ignoring key '%s'" % key, level=WARNING) + continue + + ceph_conf[key] = conf[key] + + return ceph_conf diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py new file mode 100644 index 00000000..3a3f5146 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -0,0 +1,88 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import re +from subprocess import ( + check_call, + check_output, +) + +import six + + +################################################## +# loopback device helpers. +################################################## +def loopback_devices(): + ''' + Parse through 'losetup -a' output to determine currently mapped + loopback devices. Output is expected to look like: + + /dev/loop0: [0807]:961814 (/tmp/my.img) + + :returns: dict: a dict mapping {loopback_dev: backing_file} + ''' + loopbacks = {} + cmd = ['losetup', '-a'] + devs = [d.strip().split(' ') for d in + check_output(cmd).splitlines() if d != ''] + for dev, _, f in devs: + loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] + return loopbacks + + +def create_loopback(file_path): + ''' + Create a loopback device for a given backing file. + + :returns: str: Full path to new loopback device (eg, /dev/loop0) + ''' + file_path = os.path.abspath(file_path) + check_call(['losetup', '--find', file_path]) + for d, f in six.iteritems(loopback_devices()): + if f == file_path: + return d + + +def ensure_loopback_device(path, size): + ''' + Ensure a loopback device exists for a given backing file path and size. + If it a loopback device is not mapped to file, a new one will be created. + + TODO: Confirm size of found loopback device. + + :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) + ''' + for d, f in six.iteritems(loopback_devices()): + if f == path: + return d + + if not os.path.exists(path): + cmd = ['truncate', '--size', size, path] + check_call(cmd) + + return create_loopback(path) + + +def is_mapped_loopback_device(device): + """ + Checks if a given device name is an existing/mapped loopback device. + :param device: str: Full path to the device (eg, /dev/loop1). + :returns: str: Path to the backing file if is a loopback device + empty string otherwise + """ + return loopback_devices().get(device, "") diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py new file mode 100644 index 00000000..34b5f71a --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -0,0 +1,105 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from subprocess import ( + CalledProcessError, + check_call, + check_output, + Popen, + PIPE, +) + + +################################################## +# LVM helpers. +################################################## +def deactivate_lvm_volume_group(block_device): + ''' + Deactivate any volume gruop associated with an LVM physical volume. + + :param block_device: str: Full path to LVM physical volume + ''' + vg = list_lvm_volume_group(block_device) + if vg: + cmd = ['vgchange', '-an', vg] + check_call(cmd) + + +def is_lvm_physical_volume(block_device): + ''' + Determine whether a block device is initialized as an LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: boolean: True if block device is a PV, False if not. + ''' + try: + check_output(['pvdisplay', block_device]) + return True + except CalledProcessError: + return False + + +def remove_lvm_physical_volume(block_device): + ''' + Remove LVM PV signatures from a given block device. + + :param block_device: str: Full path of block device to scrub. + ''' + p = Popen(['pvremove', '-ff', block_device], + stdin=PIPE) + p.communicate(input='y\n') + + +def list_lvm_volume_group(block_device): + ''' + List LVM volume group associated with a given block device. + + Assumes block device is a valid LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: str: Name of volume group associated with block device or None + ''' + vg = None + pvd = check_output(['pvdisplay', block_device]).splitlines() + for l in pvd: + l = l.decode('UTF-8') + if l.strip().startswith('VG Name'): + vg = ' '.join(l.strip().split()[2:]) + return vg + + +def create_lvm_physical_volume(block_device): + ''' + Initialize a block device as an LVM physical volume. + + :param block_device: str: Full path of block device to initialize. + + ''' + check_call(['pvcreate', block_device]) + + +def create_lvm_volume_group(volume_group, block_device): + ''' + Create an LVM volume group backed by a given block device. + + Assumes block device has already been initialized as an LVM PV. + + :param volume_group: str: Name of volume group to create. + :block_device: str: Full path of PV-initialized block device. + ''' + check_call(['vgcreate', volume_group, block_device]) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 64b2df55..e367e450 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -176,7 +176,7 @@ def init_is_systemd(): def adduser(username, password=None, shell='/bin/bash', system_user=False, - primary_group=None, secondary_groups=None): + primary_group=None, secondary_groups=None, uid=None): """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -187,15 +187,21 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False, :param bool system_user: Whether to create a login or system user :param str primary_group: Primary group for user; defaults to username :param list secondary_groups: Optional list of additional groups + :param int uid: UID for user being created :returns: The password database entry struct, as returned by `pwd.getpwnam` """ try: user_info = pwd.getpwnam(username) log('user {0} already exists!'.format(username)) + if uid: + user_info = pwd.getpwuid(int(uid)) + log('user with uid {0} already exists!'.format(uid)) except KeyError: log('creating user {0}'.format(username)) cmd = ['useradd'] + if uid: + cmd.extend(['--uid', str(uid)]) if system_user or password is None: cmd.append('--system') else: @@ -230,14 +236,58 @@ def user_exists(username): return user_exists -def add_group(group_name, system_group=False): - """Add a group to the system""" +def uid_exists(uid): + """Check if a uid exists""" + try: + pwd.getpwuid(uid) + uid_exists = True + except KeyError: + uid_exists = False + return uid_exists + + +def group_exists(groupname): + """Check if a group exists""" + try: + grp.getgrnam(groupname) + group_exists = True + except KeyError: + group_exists = False + return group_exists + + +def gid_exists(gid): + """Check if a gid exists""" + try: + grp.getgrgid(gid) + gid_exists = True + except KeyError: + gid_exists = False + return gid_exists + + +def add_group(group_name, system_group=False, gid=None): + """Add a group to the system + + Will log but otherwise succeed if the group already exists. + + :param str group_name: group to create + :param bool system_group: Create system group + :param int gid: GID for user being created + + :returns: The password database entry struct, as returned by `grp.getgrnam` + """ try: group_info = grp.getgrnam(group_name) log('group {0} already exists!'.format(group_name)) + if gid: + group_info = grp.getgrgid(gid) + log('group with gid {0} already exists!'.format(gid)) except KeyError: log('creating group {0}'.format(group_name)) cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) if system_group: cmd.append('--system') else: diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index db0d86a2..68b0f94d 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -106,6 +106,14 @@ 'mitaka/proposed': 'trusty-proposed/mitaka', 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', + # Newton + 'newton': 'xenial-updates/newton', + 'xenial-newton': 'xenial-updates/newton', + 'xenial-newton/updates': 'xenial-updates/newton', + 'xenial-updates/newton': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', + 'xenial-newton/proposed': 'xenial-proposed/newton', + 'xenial-proposed/newton': 'xenial-proposed/newton', } # The order of this list is very important. Handlers should be listed in from @@ -390,16 +398,13 @@ def install_remote(source, *args, **kwargs): # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] - installed_to = None for handler in handlers: try: - installed_to = handler.install(source, *args, **kwargs) + return handler.install(source, *args, **kwargs) except UnhandledSource as e: log('Install source attempt unsuccessful: {}'.format(e), level='WARNING') - if not installed_to: - raise UnhandledSource("No handler found for source {}".format(source)) - return installed_to + raise UnhandledSource("No handler found for source {}".format(source)) def install_from_config(config_var_name): diff --git a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py index cafd27f7..b743753e 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py @@ -42,15 +42,23 @@ def can_handle(self, source): else: return True - def branch(self, source, dest): + def branch(self, source, dest, revno=None): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) + cmd_opts = [] + if revno: + cmd_opts += ['-r', str(revno)] if os.path.exists(dest): - check_call(['bzr', 'pull', '--overwrite', '-d', dest, source]) + cmd = ['bzr', 'pull'] + cmd += cmd_opts + cmd += ['--overwrite', '-d', dest, source] else: - check_call(['bzr', 'branch', source, dest]) + cmd = ['bzr', 'branch'] + cmd += cmd_opts + cmd += [source, dest] + check_call(cmd) - def install(self, source, dest=None): + def install(self, source, dest=None, revno=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] if dest: @@ -59,10 +67,11 @@ def install(self, source, dest=None): dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) + if dest and not os.path.exists(dest): + mkdir(dest, perms=0o755) + try: - self.branch(source, dest_dir) + self.branch(source, dest_dir, revno) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index d21c9c78..6b917d0c 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -43,9 +43,6 @@ def __init__(self, series=None, openstack=None, source=None, self.openstack = openstack self.source = source self.stable = stable - # Note(coreycb): this needs to be changed when new next branches come - # out. - self.current_next = "trusty" def get_logger(self, name="deployment-logger", level=logging.DEBUG): """Get a logger object that will log to stdout.""" @@ -72,38 +69,34 @@ def _determine_branch_locations(self, other_services): self.log.info('OpenStackAmuletDeployment: determine branch locations') - # Charms outside the lp:~openstack-charmers namespace - base_charms = ['mysql', 'mongodb', 'nrpe'] - - # Force these charms to current series even when using an older series. - # ie. Use trusty/nrpe even when series is precise, as the P charm - # does not possess the necessary external master config and hooks. - force_series_current = ['nrpe'] - - if self.series in ['precise', 'trusty']: - base_series = self.series - else: - base_series = self.current_next + # Charms outside the ~openstack-charmers + base_charms = { + 'mysql': ['precise', 'trusty'], + 'mongodb': ['precise', 'trusty'], + 'nrpe': ['precise', 'trusty'], + } for svc in other_services: - if svc['name'] in force_series_current: - base_series = self.current_next # If a location has been explicitly set, use it if svc.get('location'): continue - if self.stable: - temp = 'lp:charms/{}/{}' - svc['location'] = temp.format(base_series, - svc['name']) + if svc['name'] in base_charms: + # NOTE: not all charms have support for all series we + # want/need to test against, so fix to most recent + # that each base charm supports + target_series = self.series + if self.series not in base_charms[svc['name']]: + target_series = base_charms[svc['name']][-1] + svc['location'] = 'cs:{}/{}'.format(target_series, + svc['name']) + elif self.stable: + svc['location'] = 'cs:{}/{}'.format(self.series, + svc['name']) else: - if svc['name'] in base_charms: - temp = 'lp:charms/{}/{}' - svc['location'] = temp.format(base_series, - svc['name']) - else: - temp = 'lp:~openstack-charmers/charms/{}/{}/next' - svc['location'] = temp.format(self.current_next, - svc['name']) + svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( + self.series, + svc['name'] + ) return other_services From 5c1f2d7316dff7cf3b8c9b80ad88b4d36518d8fd Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 17 Jun 2016 11:37:15 +0100 Subject: [PATCH 1140/2699] Switch to using charm-store for amulet tests All OpenStack charms are now directly published to the charm store on landing; switch Amulet helper to resolve charms using the charm store rather than bzr branches, removing the lag between charm changes landing and being available for other charms to use for testing. This is also important for new layered charms where the charm must be build and published prior to being consumable. Change-Id: Ib0c1da50750027f343bd3b77dcd6b85a059435dd --- .../charmhelpers/contrib/openstack/utils.py | 69 +++++++++++++++++++ ceph-osd/hooks/charmhelpers/core/host.py | 56 ++++++++++++++- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 7 +- ceph-osd/hooks/charmhelpers/fetch/bzrurl.py | 23 +++++-- .../contrib/openstack/amulet/deployment.py | 51 ++++++-------- 5 files changed, 162 insertions(+), 44 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index bd6efc48..53e58424 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -51,6 +51,7 @@ related_units, relation_ids, relation_set, + service_name, status_set, hook_name ) @@ -207,6 +208,27 @@ ]), } +GIT_DEFAULT_REPOS = { + 'requirements': 'git://github.com/openstack/requirements', + 'cinder': 'git://github.com/openstack/cinder', + 'glance': 'git://github.com/openstack/glance', + 'horizon': 'git://github.com/openstack/horizon', + 'keystone': 'git://github.com/openstack/keystone', + 'neutron': 'git://github.com/openstack/neutron', + 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas', + 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas', + 'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas', + 'nova': 'git://github.com/openstack/nova', +} + +GIT_DEFAULT_BRANCHES = { + 'icehouse': 'icehouse-eol', + 'kilo': 'stable/kilo', + 'liberty': 'stable/liberty', + 'mitaka': 'stable/mitaka', + 'master': 'master', +} + DEFAULT_LOOPBACK_SIZE = '5G' @@ -703,6 +725,53 @@ def git_install_requested(): requirements_dir = None +def git_default_repos(projects): + """ + Returns default repos if a default openstack-origin-git value is specified. + """ + service = service_name() + + for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): + if projects == default: + + # add the requirements repo first + repo = { + 'name': 'requirements', + 'repository': GIT_DEFAULT_REPOS['requirements'], + 'branch': branch, + } + repos = [repo] + + # neutron and nova charms require some additional repos + if service == 'neutron': + for svc in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas']: + repo = { + 'name': svc, + 'repository': GIT_DEFAULT_REPOS[svc], + 'branch': branch, + } + repos.append(repo) + elif service == 'nova': + repo = { + 'name': 'neutron', + 'repository': GIT_DEFAULT_REPOS['neutron'], + 'branch': branch, + } + repos.append(repo) + + # finally add the current service's repo + repo = { + 'name': service, + 'repository': GIT_DEFAULT_REPOS[service], + 'branch': branch, + } + repos.append(repo) + + return yaml.dump(dict(repositories=repos)) + + return projects + + def _git_yaml_load(projects_yaml): """ Load the specified yaml into a dictionary. diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 64b2df55..e367e450 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -176,7 +176,7 @@ def init_is_systemd(): def adduser(username, password=None, shell='/bin/bash', system_user=False, - primary_group=None, secondary_groups=None): + primary_group=None, secondary_groups=None, uid=None): """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -187,15 +187,21 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False, :param bool system_user: Whether to create a login or system user :param str primary_group: Primary group for user; defaults to username :param list secondary_groups: Optional list of additional groups + :param int uid: UID for user being created :returns: The password database entry struct, as returned by `pwd.getpwnam` """ try: user_info = pwd.getpwnam(username) log('user {0} already exists!'.format(username)) + if uid: + user_info = pwd.getpwuid(int(uid)) + log('user with uid {0} already exists!'.format(uid)) except KeyError: log('creating user {0}'.format(username)) cmd = ['useradd'] + if uid: + cmd.extend(['--uid', str(uid)]) if system_user or password is None: cmd.append('--system') else: @@ -230,14 +236,58 @@ def user_exists(username): return user_exists -def add_group(group_name, system_group=False): - """Add a group to the system""" +def uid_exists(uid): + """Check if a uid exists""" + try: + pwd.getpwuid(uid) + uid_exists = True + except KeyError: + uid_exists = False + return uid_exists + + +def group_exists(groupname): + """Check if a group exists""" + try: + grp.getgrnam(groupname) + group_exists = True + except KeyError: + group_exists = False + return group_exists + + +def gid_exists(gid): + """Check if a gid exists""" + try: + grp.getgrgid(gid) + gid_exists = True + except KeyError: + gid_exists = False + return gid_exists + + +def add_group(group_name, system_group=False, gid=None): + """Add a group to the system + + Will log but otherwise succeed if the group already exists. + + :param str group_name: group to create + :param bool system_group: Create system group + :param int gid: GID for user being created + + :returns: The password database entry struct, as returned by `grp.getgrnam` + """ try: group_info = grp.getgrnam(group_name) log('group {0} already exists!'.format(group_name)) + if gid: + group_info = grp.getgrgid(gid) + log('group with gid {0} already exists!'.format(gid)) except KeyError: log('creating group {0}'.format(group_name)) cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) if system_group: cmd.append('--system') else: diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index ad485ec8..68b0f94d 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -398,16 +398,13 @@ def install_remote(source, *args, **kwargs): # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] - installed_to = None for handler in handlers: try: - installed_to = handler.install(source, *args, **kwargs) + return handler.install(source, *args, **kwargs) except UnhandledSource as e: log('Install source attempt unsuccessful: {}'.format(e), level='WARNING') - if not installed_to: - raise UnhandledSource("No handler found for source {}".format(source)) - return installed_to + raise UnhandledSource("No handler found for source {}".format(source)) def install_from_config(config_var_name): diff --git a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py index cafd27f7..b743753e 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py @@ -42,15 +42,23 @@ def can_handle(self, source): else: return True - def branch(self, source, dest): + def branch(self, source, dest, revno=None): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) + cmd_opts = [] + if revno: + cmd_opts += ['-r', str(revno)] if os.path.exists(dest): - check_call(['bzr', 'pull', '--overwrite', '-d', dest, source]) + cmd = ['bzr', 'pull'] + cmd += cmd_opts + cmd += ['--overwrite', '-d', dest, source] else: - check_call(['bzr', 'branch', source, dest]) + cmd = ['bzr', 'branch'] + cmd += cmd_opts + cmd += [source, dest] + check_call(cmd) - def install(self, source, dest=None): + def install(self, source, dest=None, revno=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] if dest: @@ -59,10 +67,11 @@ def install(self, source, dest=None): dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) + if dest and not os.path.exists(dest): + mkdir(dest, perms=0o755) + try: - self.branch(source, dest_dir) + self.branch(source, dest_dir, revno) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index d21c9c78..6b917d0c 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -43,9 +43,6 @@ def __init__(self, series=None, openstack=None, source=None, self.openstack = openstack self.source = source self.stable = stable - # Note(coreycb): this needs to be changed when new next branches come - # out. - self.current_next = "trusty" def get_logger(self, name="deployment-logger", level=logging.DEBUG): """Get a logger object that will log to stdout.""" @@ -72,38 +69,34 @@ def _determine_branch_locations(self, other_services): self.log.info('OpenStackAmuletDeployment: determine branch locations') - # Charms outside the lp:~openstack-charmers namespace - base_charms = ['mysql', 'mongodb', 'nrpe'] - - # Force these charms to current series even when using an older series. - # ie. Use trusty/nrpe even when series is precise, as the P charm - # does not possess the necessary external master config and hooks. - force_series_current = ['nrpe'] - - if self.series in ['precise', 'trusty']: - base_series = self.series - else: - base_series = self.current_next + # Charms outside the ~openstack-charmers + base_charms = { + 'mysql': ['precise', 'trusty'], + 'mongodb': ['precise', 'trusty'], + 'nrpe': ['precise', 'trusty'], + } for svc in other_services: - if svc['name'] in force_series_current: - base_series = self.current_next # If a location has been explicitly set, use it if svc.get('location'): continue - if self.stable: - temp = 'lp:charms/{}/{}' - svc['location'] = temp.format(base_series, - svc['name']) + if svc['name'] in base_charms: + # NOTE: not all charms have support for all series we + # want/need to test against, so fix to most recent + # that each base charm supports + target_series = self.series + if self.series not in base_charms[svc['name']]: + target_series = base_charms[svc['name']][-1] + svc['location'] = 'cs:{}/{}'.format(target_series, + svc['name']) + elif self.stable: + svc['location'] = 'cs:{}/{}'.format(self.series, + svc['name']) else: - if svc['name'] in base_charms: - temp = 'lp:charms/{}/{}' - svc['location'] = temp.format(base_series, - svc['name']) - else: - temp = 'lp:~openstack-charmers/charms/{}/{}/next' - svc['location'] = temp.format(self.current_next, - svc['name']) + svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( + self.series, + svc['name'] + ) return other_services From a1a98b78125fdb3945eb09ef0fe053b2a3276f0a Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 17 Jun 2016 11:38:33 +0100 Subject: [PATCH 1141/2699] Switch to using charm-store for amulet tests All OpenStack charms are now directly published to the charm store on landing; switch Amulet helper to resolve charms using the charm store rather than bzr branches, removing the lag between charm changes landing and being available for other charms to use for testing. This is also important for new layered charms where the charm must be build and published prior to being consumable. Change-Id: Ibbbd4148e2fc8c8a8b2126f9ca7be5c36187d75f --- .../charmhelpers/contrib/hahelpers/cluster.py | 69 +++++++++-- .../contrib/openstack/amulet/deployment.py | 51 ++++---- .../contrib/openstack/ha/__init__.py | 0 .../contrib/openstack/ha/utils.py | 111 ++++++++++++++++++ .../charmhelpers/contrib/openstack/ip.py | 11 +- .../charmhelpers/contrib/openstack/utils.py | 69 +++++++++++ ceph-radosgw/hooks/charmhelpers/core/host.py | 56 ++++++++- .../hooks/charmhelpers/fetch/__init__.py | 7 +- .../hooks/charmhelpers/fetch/bzrurl.py | 23 ++-- .../contrib/openstack/amulet/deployment.py | 51 ++++---- 10 files changed, 361 insertions(+), 87 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index aa0b515d..92325a96 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -41,10 +41,11 @@ relation_get, config as config_get, INFO, - ERROR, + DEBUG, WARNING, unit_get, - is_leader as juju_is_leader + is_leader as juju_is_leader, + status_set, ) from charmhelpers.core.decorators import ( retry_on_exception, @@ -60,6 +61,10 @@ class HAIncompleteConfig(Exception): pass +class HAIncorrectConfig(Exception): + pass + + class CRMResourceNotFound(Exception): pass @@ -274,27 +279,71 @@ def get_hacluster_config(exclude_keys=None): Obtains all relevant configuration from charm configuration required for initiating a relation to hacluster: - ha-bindiface, ha-mcastport, vip + ha-bindiface, ha-mcastport, vip, os-internal-hostname, + os-admin-hostname, os-public-hostname param: exclude_keys: list of setting key(s) to be excluded. returns: dict: A dict containing settings keyed by setting name. - raises: HAIncompleteConfig if settings are missing. + raises: HAIncompleteConfig if settings are missing or incorrect. ''' - settings = ['ha-bindiface', 'ha-mcastport', 'vip'] + settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname', + 'os-admin-hostname', 'os-public-hostname'] conf = {} for setting in settings: if exclude_keys and setting in exclude_keys: continue conf[setting] = config_get(setting) - missing = [] - [missing.append(s) for s, v in six.iteritems(conf) if v is None] - if missing: - log('Insufficient config data to configure hacluster.', level=ERROR) - raise HAIncompleteConfig + + if not valid_hacluster_config(): + raise HAIncorrectConfig('Insufficient or incorrect config data to ' + 'configure hacluster.') return conf +def valid_hacluster_config(): + ''' + Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname + must be set. + + Note: ha-bindiface and ha-macastport both have defaults and will always + be set. We only care that either vip or dns-ha is set. + + :returns: boolean: valid config returns true. + raises: HAIncompatibileConfig if settings conflict. + raises: HAIncompleteConfig if settings are missing. + ''' + vip = config_get('vip') + dns = config_get('dns-ha') + if not(bool(vip) ^ bool(dns)): + msg = ('HA: Either vip or dns-ha must be set but not both in order to ' + 'use high availability') + status_set('blocked', msg) + raise HAIncorrectConfig(msg) + + # If dns-ha then one of os-*-hostname must be set + if dns: + dns_settings = ['os-internal-hostname', 'os-admin-hostname', + 'os-public-hostname'] + # At this point it is unknown if one or all of the possible + # network spaces are in HA. Validate at least one is set which is + # the minimum required. + for setting in dns_settings: + if config_get(setting): + log('DNS HA: At least one hostname is set {}: {}' + ''.format(setting, config_get(setting)), + level=DEBUG) + return True + + msg = ('DNS HA: At least one os-*-hostname(s) must be set to use ' + 'DNS HA') + status_set('blocked', msg) + raise HAIncompleteConfig(msg) + + log('VIP HA: VIP is set {}'.format(vip), level=DEBUG) + return True + + def canonical_url(configs, vip_setting='vip'): ''' Returns the correct HTTP URL to this host given the state of HTTPS diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index d21c9c78..6b917d0c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -43,9 +43,6 @@ def __init__(self, series=None, openstack=None, source=None, self.openstack = openstack self.source = source self.stable = stable - # Note(coreycb): this needs to be changed when new next branches come - # out. - self.current_next = "trusty" def get_logger(self, name="deployment-logger", level=logging.DEBUG): """Get a logger object that will log to stdout.""" @@ -72,38 +69,34 @@ def _determine_branch_locations(self, other_services): self.log.info('OpenStackAmuletDeployment: determine branch locations') - # Charms outside the lp:~openstack-charmers namespace - base_charms = ['mysql', 'mongodb', 'nrpe'] - - # Force these charms to current series even when using an older series. - # ie. Use trusty/nrpe even when series is precise, as the P charm - # does not possess the necessary external master config and hooks. - force_series_current = ['nrpe'] - - if self.series in ['precise', 'trusty']: - base_series = self.series - else: - base_series = self.current_next + # Charms outside the ~openstack-charmers + base_charms = { + 'mysql': ['precise', 'trusty'], + 'mongodb': ['precise', 'trusty'], + 'nrpe': ['precise', 'trusty'], + } for svc in other_services: - if svc['name'] in force_series_current: - base_series = self.current_next # If a location has been explicitly set, use it if svc.get('location'): continue - if self.stable: - temp = 'lp:charms/{}/{}' - svc['location'] = temp.format(base_series, - svc['name']) + if svc['name'] in base_charms: + # NOTE: not all charms have support for all series we + # want/need to test against, so fix to most recent + # that each base charm supports + target_series = self.series + if self.series not in base_charms[svc['name']]: + target_series = base_charms[svc['name']][-1] + svc['location'] = 'cs:{}/{}'.format(target_series, + svc['name']) + elif self.stable: + svc['location'] = 'cs:{}/{}'.format(self.series, + svc['name']) else: - if svc['name'] in base_charms: - temp = 'lp:charms/{}/{}' - svc['location'] = temp.format(base_series, - svc['name']) - else: - temp = 'lp:~openstack-charmers/charms/{}/{}/next' - svc['location'] = temp.format(self.current_next, - svc['name']) + svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( + self.series, + svc['name'] + ) return other_services diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py new file mode 100644 index 00000000..34064237 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -0,0 +1,111 @@ +# Copyright 2014-2016 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# +# Copyright 2016 Canonical Ltd. +# +# Authors: +# Openstack Charmers < +# + +""" +Helpers for high availability. +""" + +import re + +from charmhelpers.core.hookenv import ( + log, + relation_set, + charm_name, + config, + status_set, + DEBUG, +) + +from charmhelpers.contrib.openstack.ip import ( + resolve_address, +) + + +class DNSHAException(Exception): + """Raised when an error occurs setting up DNS HA + """ + + pass + + +def update_dns_ha_resource_params(resources, resource_params, + relation_id=None, + crm_ocf='ocf:maas:dns'): + """ Check for os-*-hostname settings and update resource dictionaries for + the HA relation. + + @param resources: Pointer to dictionary of resources. + Usually instantiated in ha_joined(). + @param resource_params: Pointer to dictionary of resource parameters. + Usually instantiated in ha_joined() + @param relation_id: Relation ID of the ha relation + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ + + settings = ['os-admin-hostname', 'os-internal-hostname', + 'os-public-hostname'] + + # Check which DNS settings are set and update dictionaries + hostname_group = [] + for setting in settings: + hostname = config(setting) + if hostname is None: + log('DNS HA: Hostname setting {} is None. Ignoring.' + ''.format(setting), + DEBUG) + continue + m = re.search('os-(.+?)-hostname', setting) + if m: + networkspace = m.group(1) + else: + msg = ('Unexpected DNS hostname setting: {}. ' + 'Cannot determine network space name' + ''.format(setting)) + status_set('blocked', msg) + raise DNSHAException(msg) + + hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace) + if hostname_key in hostname_group: + log('DNS HA: Resource {}: {} already exists in ' + 'hostname group - skipping'.format(hostname_key, hostname), + DEBUG) + continue + + hostname_group.append(hostname_key) + resources[hostname_key] = crm_ocf + resource_params[hostname_key] = ( + 'params fqdn="{}" ip_address="{}" ' + ''.format(hostname, resolve_address(endpoint_type=networkspace, + override=False))) + + if len(hostname_group) >= 1: + log('DNS HA: Hostname group is set with {} as members. ' + 'Informing the ha relation'.format(' '.join(hostname_group)), + DEBUG) + relation_set(relation_id=relation_id, groups={ + 'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)}) + else: + msg = 'DNS HA: Hostname group has no members.' + status_set('blocked', msg) + raise DNSHAException(msg) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index 532a1dc1..7875b997 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -109,7 +109,7 @@ def _get_address_override(endpoint_type=PUBLIC): return addr_override.format(service_name=service_name()) -def resolve_address(endpoint_type=PUBLIC): +def resolve_address(endpoint_type=PUBLIC, override=True): """Return unit address depending on net config. If unit is clustered with vip(s) and has net splits defined, return vip on @@ -119,10 +119,13 @@ def resolve_address(endpoint_type=PUBLIC): split if one is configured, or a Juju 2.0 extra-binding has been used. :param endpoint_type: Network endpoing type + :param override: Accept hostname overrides or not """ - resolved_address = _get_address_override(endpoint_type) - if resolved_address: - return resolved_address + resolved_address = None + if override: + resolved_address = _get_address_override(endpoint_type) + if resolved_address: + return resolved_address vips = config('vip') if vips: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index bd6efc48..53e58424 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -51,6 +51,7 @@ related_units, relation_ids, relation_set, + service_name, status_set, hook_name ) @@ -207,6 +208,27 @@ ]), } +GIT_DEFAULT_REPOS = { + 'requirements': 'git://github.com/openstack/requirements', + 'cinder': 'git://github.com/openstack/cinder', + 'glance': 'git://github.com/openstack/glance', + 'horizon': 'git://github.com/openstack/horizon', + 'keystone': 'git://github.com/openstack/keystone', + 'neutron': 'git://github.com/openstack/neutron', + 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas', + 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas', + 'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas', + 'nova': 'git://github.com/openstack/nova', +} + +GIT_DEFAULT_BRANCHES = { + 'icehouse': 'icehouse-eol', + 'kilo': 'stable/kilo', + 'liberty': 'stable/liberty', + 'mitaka': 'stable/mitaka', + 'master': 'master', +} + DEFAULT_LOOPBACK_SIZE = '5G' @@ -703,6 +725,53 @@ def git_install_requested(): requirements_dir = None +def git_default_repos(projects): + """ + Returns default repos if a default openstack-origin-git value is specified. + """ + service = service_name() + + for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): + if projects == default: + + # add the requirements repo first + repo = { + 'name': 'requirements', + 'repository': GIT_DEFAULT_REPOS['requirements'], + 'branch': branch, + } + repos = [repo] + + # neutron and nova charms require some additional repos + if service == 'neutron': + for svc in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas']: + repo = { + 'name': svc, + 'repository': GIT_DEFAULT_REPOS[svc], + 'branch': branch, + } + repos.append(repo) + elif service == 'nova': + repo = { + 'name': 'neutron', + 'repository': GIT_DEFAULT_REPOS['neutron'], + 'branch': branch, + } + repos.append(repo) + + # finally add the current service's repo + repo = { + 'name': service, + 'repository': GIT_DEFAULT_REPOS[service], + 'branch': branch, + } + repos.append(repo) + + return yaml.dump(dict(repositories=repos)) + + return projects + + def _git_yaml_load(projects_yaml): """ Load the specified yaml into a dictionary. diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 64b2df55..e367e450 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -176,7 +176,7 @@ def init_is_systemd(): def adduser(username, password=None, shell='/bin/bash', system_user=False, - primary_group=None, secondary_groups=None): + primary_group=None, secondary_groups=None, uid=None): """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -187,15 +187,21 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False, :param bool system_user: Whether to create a login or system user :param str primary_group: Primary group for user; defaults to username :param list secondary_groups: Optional list of additional groups + :param int uid: UID for user being created :returns: The password database entry struct, as returned by `pwd.getpwnam` """ try: user_info = pwd.getpwnam(username) log('user {0} already exists!'.format(username)) + if uid: + user_info = pwd.getpwuid(int(uid)) + log('user with uid {0} already exists!'.format(uid)) except KeyError: log('creating user {0}'.format(username)) cmd = ['useradd'] + if uid: + cmd.extend(['--uid', str(uid)]) if system_user or password is None: cmd.append('--system') else: @@ -230,14 +236,58 @@ def user_exists(username): return user_exists -def add_group(group_name, system_group=False): - """Add a group to the system""" +def uid_exists(uid): + """Check if a uid exists""" + try: + pwd.getpwuid(uid) + uid_exists = True + except KeyError: + uid_exists = False + return uid_exists + + +def group_exists(groupname): + """Check if a group exists""" + try: + grp.getgrnam(groupname) + group_exists = True + except KeyError: + group_exists = False + return group_exists + + +def gid_exists(gid): + """Check if a gid exists""" + try: + grp.getgrgid(gid) + gid_exists = True + except KeyError: + gid_exists = False + return gid_exists + + +def add_group(group_name, system_group=False, gid=None): + """Add a group to the system + + Will log but otherwise succeed if the group already exists. + + :param str group_name: group to create + :param bool system_group: Create system group + :param int gid: GID for user being created + + :returns: The password database entry struct, as returned by `grp.getgrnam` + """ try: group_info = grp.getgrnam(group_name) log('group {0} already exists!'.format(group_name)) + if gid: + group_info = grp.getgrgid(gid) + log('group with gid {0} already exists!'.format(gid)) except KeyError: log('creating group {0}'.format(group_name)) cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) if system_group: cmd.append('--system') else: diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index ad485ec8..68b0f94d 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -398,16 +398,13 @@ def install_remote(source, *args, **kwargs): # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] - installed_to = None for handler in handlers: try: - installed_to = handler.install(source, *args, **kwargs) + return handler.install(source, *args, **kwargs) except UnhandledSource as e: log('Install source attempt unsuccessful: {}'.format(e), level='WARNING') - if not installed_to: - raise UnhandledSource("No handler found for source {}".format(source)) - return installed_to + raise UnhandledSource("No handler found for source {}".format(source)) def install_from_config(config_var_name): diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py index cafd27f7..b743753e 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py @@ -42,15 +42,23 @@ def can_handle(self, source): else: return True - def branch(self, source, dest): + def branch(self, source, dest, revno=None): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) + cmd_opts = [] + if revno: + cmd_opts += ['-r', str(revno)] if os.path.exists(dest): - check_call(['bzr', 'pull', '--overwrite', '-d', dest, source]) + cmd = ['bzr', 'pull'] + cmd += cmd_opts + cmd += ['--overwrite', '-d', dest, source] else: - check_call(['bzr', 'branch', source, dest]) + cmd = ['bzr', 'branch'] + cmd += cmd_opts + cmd += [source, dest] + check_call(cmd) - def install(self, source, dest=None): + def install(self, source, dest=None, revno=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] if dest: @@ -59,10 +67,11 @@ def install(self, source, dest=None): dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) + if dest and not os.path.exists(dest): + mkdir(dest, perms=0o755) + try: - self.branch(source, dest_dir) + self.branch(source, dest_dir, revno) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index d21c9c78..6b917d0c 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -43,9 +43,6 @@ def __init__(self, series=None, openstack=None, source=None, self.openstack = openstack self.source = source self.stable = stable - # Note(coreycb): this needs to be changed when new next branches come - # out. - self.current_next = "trusty" def get_logger(self, name="deployment-logger", level=logging.DEBUG): """Get a logger object that will log to stdout.""" @@ -72,38 +69,34 @@ def _determine_branch_locations(self, other_services): self.log.info('OpenStackAmuletDeployment: determine branch locations') - # Charms outside the lp:~openstack-charmers namespace - base_charms = ['mysql', 'mongodb', 'nrpe'] - - # Force these charms to current series even when using an older series. - # ie. Use trusty/nrpe even when series is precise, as the P charm - # does not possess the necessary external master config and hooks. - force_series_current = ['nrpe'] - - if self.series in ['precise', 'trusty']: - base_series = self.series - else: - base_series = self.current_next + # Charms outside the ~openstack-charmers + base_charms = { + 'mysql': ['precise', 'trusty'], + 'mongodb': ['precise', 'trusty'], + 'nrpe': ['precise', 'trusty'], + } for svc in other_services: - if svc['name'] in force_series_current: - base_series = self.current_next # If a location has been explicitly set, use it if svc.get('location'): continue - if self.stable: - temp = 'lp:charms/{}/{}' - svc['location'] = temp.format(base_series, - svc['name']) + if svc['name'] in base_charms: + # NOTE: not all charms have support for all series we + # want/need to test against, so fix to most recent + # that each base charm supports + target_series = self.series + if self.series not in base_charms[svc['name']]: + target_series = base_charms[svc['name']][-1] + svc['location'] = 'cs:{}/{}'.format(target_series, + svc['name']) + elif self.stable: + svc['location'] = 'cs:{}/{}'.format(self.series, + svc['name']) else: - if svc['name'] in base_charms: - temp = 'lp:charms/{}/{}' - svc['location'] = temp.format(base_series, - svc['name']) - else: - temp = 'lp:~openstack-charmers/charms/{}/{}/next' - svc['location'] = temp.format(self.current_next, - svc['name']) + svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( + self.series, + svc['name'] + ) return other_services From 3b76b4564948c02cce0713dc3305e24403f9b9c1 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Fri, 10 Jun 2016 09:26:33 -0700 Subject: [PATCH 1142/2699] Increase rgw init timeout to 1200 seconds This changeset increases the initialization timeout for rgw instances from 300 -> 1200 seconds. This change decreases the chance that the rgw instance will timeout prior to OSD's actually joing the Ceph storage cluster as usable storage is required for this operation. Change-Id: I6c5442edc2fb25ff37d7a4bd0bc49aabd6f2d24c Closes-Bug: 1577519 --- ceph-radosgw/templates/ceph.conf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index c37d3da6..e35fb2d0 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -24,6 +24,7 @@ ms bind ipv6 = true [client.radosgw.gateway] host = {{ hostname }} +rgw init timeout = 1200 keyring = /etc/ceph/keyring.rados.gateway rgw socket path = /tmp/radosgw.sock log file = /var/log/ceph/radosgw.log @@ -51,4 +52,4 @@ nss db path = /var/lib/ceph/nss {% for key in client_radosgw_gateway -%} {{ key }} = {{ client_radosgw_gateway[key] }} {% endfor %} -{% endif %} \ No newline at end of file +{% endif %} From 956b30a0d90c5ea6846b710e809cc717e558a331 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 22 Jun 2016 15:42:29 +0100 Subject: [PATCH 1143/2699] Re-license charm as Apache-2.0 All contributions to this charm where made under Canonical copyright; switch to Apache-2.0 license as agreed so we can move forward with official project status. In order to make this change, this commit also drops the inclusion of upstart configurations for very early versions of Ceph (argonaut), as they are no longer required. Change-Id: I9609dd79855b545a2c5adc12b7ac573c6f246d48 --- ceph-osd/.gitignore | 1 + ceph-osd/LICENSE | 202 ++++++++++++++++++ ceph-osd/actions/__init__.py | 15 +- ceph-osd/actions/pause_resume.py | 15 ++ ceph-osd/actions/replace_osd.py | 16 +- ceph-osd/copyright | 19 +- ceph-osd/files/upstart/ceph-create-keys.conf | 8 - ceph-osd/files/upstart/ceph-hotplug.conf | 11 - .../files/upstart/ceph-mon-all-starter.conf | 20 -- ceph-osd/files/upstart/ceph-mon-all.conf | 4 - ceph-osd/files/upstart/ceph-mon.conf | 24 --- ceph-osd/files/upstart/ceph-osd.conf | 37 ---- ceph-osd/hooks/ceph.py | 15 +- ceph-osd/hooks/ceph_hooks.py | 26 +-- ceph-osd/hooks/utils.py | 15 +- ceph-osd/tests/014-basic-precise-icehouse | 14 ++ ceph-osd/tests/015-basic-trusty-icehouse | 14 ++ ceph-osd/tests/016-basic-trusty-juno | 14 ++ ceph-osd/tests/017-basic-trusty-kilo | 14 ++ ceph-osd/tests/018-basic-trusty-liberty | 14 ++ ceph-osd/tests/019-basic-trusty-mitaka | 14 ++ ceph-osd/tests/020-basic-wily-liberty | 14 ++ ceph-osd/tests/021-basic-xenial-mitaka | 14 ++ ceph-osd/tests/basic_deployment.py | 14 ++ ceph-osd/unit_tests/__init__.py | 14 ++ .../unit_tests/test_actions_pause_resume.py | 14 ++ ceph-osd/unit_tests/test_ceph_hooks.py | 14 ++ ceph-osd/unit_tests/test_ceph_networking.py | 14 ++ ceph-osd/unit_tests/test_config.py | 14 ++ ceph-osd/unit_tests/test_replace_osd.py | 14 ++ ceph-osd/unit_tests/test_status.py | 14 ++ ceph-osd/unit_tests/test_upgrade_roll.py | 18 +- ceph-osd/unit_tests/test_utils.py | 14 ++ 33 files changed, 540 insertions(+), 144 deletions(-) create mode 100644 ceph-osd/LICENSE delete mode 100644 ceph-osd/files/upstart/ceph-create-keys.conf delete mode 100644 ceph-osd/files/upstart/ceph-hotplug.conf delete mode 100644 ceph-osd/files/upstart/ceph-mon-all-starter.conf delete mode 100644 ceph-osd/files/upstart/ceph-mon-all.conf delete mode 100644 ceph-osd/files/upstart/ceph-mon.conf delete mode 100644 ceph-osd/files/upstart/ceph-osd.conf diff --git a/ceph-osd/.gitignore b/ceph-osd/.gitignore index 31c3f033..32a80896 100644 --- a/ceph-osd/.gitignore +++ b/ceph-osd/.gitignore @@ -6,3 +6,4 @@ bin *.sw[nop] .idea *.pyc +.unit-state.db diff --git a/ceph-osd/LICENSE b/ceph-osd/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/ceph-osd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ceph-osd/actions/__init__.py b/ceph-osd/actions/__init__.py index ff2381cc..b7fe4e1b 100644 --- a/ceph-osd/actions/__init__.py +++ b/ceph-osd/actions/__init__.py @@ -1,3 +1,16 @@ -__author__ = 'chris' +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys sys.path.append('hooks') diff --git a/ceph-osd/actions/pause_resume.py b/ceph-osd/actions/pause_resume.py index 68149f34..f918994a 100755 --- a/ceph-osd/actions/pause_resume.py +++ b/ceph-osd/actions/pause_resume.py @@ -1,4 +1,19 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # pause/resume actions file. import os diff --git a/ceph-osd/actions/replace_osd.py b/ceph-osd/actions/replace_osd.py index fd4264f4..df9c7583 100755 --- a/ceph-osd/actions/replace_osd.py +++ b/ceph-osd/actions/replace_osd.py @@ -1,9 +1,21 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import action_get, log, config, action_fail -__author__ = 'chris' - import os import sys diff --git a/ceph-osd/copyright b/ceph-osd/copyright index bdfae0e0..c801b143 100644 --- a/ceph-osd/copyright +++ b/ceph-osd/copyright @@ -1,15 +1,16 @@ Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0 -Comment: The licensing of this charm is aligned to upstream ceph - as the ceph upstart integration is distributed as part of the charm. Files: * Copyright: 2012, Canonical Ltd. -License: LGPL-2.1 +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at -Files: files/upstart/* -Copyright: 2004-2010 by Sage Weil -License: LGPL-2.1 + http://www.apache.org/licenses/LICENSE-2.0 -License: LGPL-2.1 - On Debian GNU/Linux system you can find the complete text of the - LGPL-2.1 license in '/usr/share/common-licenses/LGPL-2.1' + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. diff --git a/ceph-osd/files/upstart/ceph-create-keys.conf b/ceph-osd/files/upstart/ceph-create-keys.conf deleted file mode 100644 index 6fb45818..00000000 --- a/ceph-osd/files/upstart/ceph-create-keys.conf +++ /dev/null @@ -1,8 +0,0 @@ -description "Create Ceph client.admin key when possible" - -start on started ceph-mon -stop on runlevel [!2345] - -task - -exec /usr/sbin/ceph-create-keys --cluster="${cluster:-ceph}" -i "${id:-$(hostname)}" diff --git a/ceph-osd/files/upstart/ceph-hotplug.conf b/ceph-osd/files/upstart/ceph-hotplug.conf deleted file mode 100644 index 70204529..00000000 --- a/ceph-osd/files/upstart/ceph-hotplug.conf +++ /dev/null @@ -1,11 +0,0 @@ -description "Ceph hotplug" - -start on block-device-added \ - DEVTYPE=partition \ - ID_PART_ENTRY_TYPE=4fbd7e29-9d25-41b8-afd0-062c0ceff05d -stop on runlevel [!2345] - -task -instance $DEVNAME - -exec /usr/sbin/ceph-disk-activate --mount -- "$DEVNAME" diff --git a/ceph-osd/files/upstart/ceph-mon-all-starter.conf b/ceph-osd/files/upstart/ceph-mon-all-starter.conf deleted file mode 100644 index f7188cb7..00000000 --- a/ceph-osd/files/upstart/ceph-mon-all-starter.conf +++ /dev/null @@ -1,20 +0,0 @@ -description "Ceph MON (start all instances)" - -start on starting ceph-mon-all -stop on runlevel [!2345] - -task - -script - set -e - # TODO what's the valid charset for cluster names and mon ids? - find /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[a-z0-9]+-[a-z0-9._-]+' -printf '%P\n' \ - | while read f; do - if [ -e "/var/lib/ceph/mon/$f/done" ]; then - cluster="${f%%-*}" - id="${f#*-}" - - initctl emit ceph-mon cluster="$cluster" id="$id" - fi - done -end script diff --git a/ceph-osd/files/upstart/ceph-mon-all.conf b/ceph-osd/files/upstart/ceph-mon-all.conf deleted file mode 100644 index 006f2f20..00000000 --- a/ceph-osd/files/upstart/ceph-mon-all.conf +++ /dev/null @@ -1,4 +0,0 @@ -description "Ceph monitor (all instances)" - -start on (local-filesystems and net-device-up IFACE!=lo) -stop on runlevel [!2345] diff --git a/ceph-osd/files/upstart/ceph-mon.conf b/ceph-osd/files/upstart/ceph-mon.conf deleted file mode 100644 index 2cf7bfa5..00000000 --- a/ceph-osd/files/upstart/ceph-mon.conf +++ /dev/null @@ -1,24 +0,0 @@ -description "Ceph MON" - -start on ceph-mon -stop on runlevel [!2345] or stopping ceph-mon-all - -respawn -respawn limit 5 30 - -pre-start script - set -e - test -x /usr/bin/ceph-mon || { stop; exit 0; } - test -d "/var/lib/ceph/mon/${cluster:-ceph}-$id" || { stop; exit 0; } - - install -d -m0755 /var/run/ceph -end script - -instance ${cluster:-ceph}/$id -export cluster -export id - -# this breaks oneiric -#usage "cluster = name of cluster (defaults to 'ceph'); id = monitor instance id" - -exec /usr/bin/ceph-mon --cluster="${cluster:-ceph}" -i "$id" -f diff --git a/ceph-osd/files/upstart/ceph-osd.conf b/ceph-osd/files/upstart/ceph-osd.conf deleted file mode 100644 index 119ad000..00000000 --- a/ceph-osd/files/upstart/ceph-osd.conf +++ /dev/null @@ -1,37 +0,0 @@ -description "Ceph OSD" - -start on ceph-osd -stop on runlevel [!2345] - -respawn -respawn limit 5 30 - -pre-start script - set -e - test -x /usr/bin/ceph-osd || { stop; exit 0; } - test -d "/var/lib/ceph/osd/${cluster:-ceph}-$id" || { stop; exit 0; } - - install -d -m0755 /var/run/ceph - - # update location in crush; put in some suitable defaults on the - # command line, ceph.conf can override what it wants - location="$(ceph-conf --cluster="${cluster:-ceph}" --name="osd.$id" --lookup osd_crush_location || :)" - weight="$(ceph-conf --cluster="$cluster" --name="osd.$id" --lookup osd_crush_weight || :)" - ceph \ - --cluster="${cluster:-ceph}" \ - --name="osd.$id" \ - --keyring="/var/lib/ceph/osd/${cluster:-ceph}-$id/keyring" \ - osd crush set \ - -- \ - "$id" "osd.$id" "${weight:-1}" \ - pool=default \ - host="$(hostname -s)" \ - $location \ - || : -end script - -instance ${cluster:-ceph}/$id -export cluster -export id - -exec /usr/bin/ceph-osd --cluster="${cluster:-ceph}" -i "$id" -f diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index d51ea400..22e6c9af 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -1,10 +1,17 @@ +# Copyright 2016 Canonical Ltd # -# Copyright 2012 Canonical Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# Authors: -# James Page -# Paul Collins +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import ctypes import ctypes.util import errno diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 4ab41bc0..e7f1bc50 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -1,16 +1,21 @@ #!/usr/bin/python - # -# Copyright 2012 Canonical Ltd. +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# Authors: -# James Page +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -import glob import os import random -import shutil import subprocess import sys import tempfile @@ -265,20 +270,12 @@ def upgrade_osd(): sys.exit(1) -def install_upstart_scripts(): - # Only install upstart configurations for older versions - if cmp_pkgrevno('ceph', "0.55.1") < 0: - for x in glob.glob('files/upstart/*.conf'): - shutil.copy(x, '/etc/init/') - - @hooks.hook('install.real') @harden() def install(): add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) - install_upstart_scripts() def az_info(): @@ -540,7 +537,6 @@ def mon_relation(): def upgrade_charm(): if get_fsid() and get_auth(): emit_cephconf() - install_upstart_scripts() apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index f4069bef..2acad66d 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -1,11 +1,16 @@ - +# Copyright 2016 Canonical Ltd # -# Copyright 2012 Canonical Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# Authors: -# James Page -# Paul Collins +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import socket import re diff --git a/ceph-osd/tests/014-basic-precise-icehouse b/ceph-osd/tests/014-basic-precise-icehouse index 13aee613..7f921234 100755 --- a/ceph-osd/tests/014-basic-precise-icehouse +++ b/ceph-osd/tests/014-basic-precise-icehouse @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-osd deployment on precise-icehouse.""" diff --git a/ceph-osd/tests/015-basic-trusty-icehouse b/ceph-osd/tests/015-basic-trusty-icehouse index 9079f5e3..d857e215 100755 --- a/ceph-osd/tests/015-basic-trusty-icehouse +++ b/ceph-osd/tests/015-basic-trusty-icehouse @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-osd deployment on trusty-icehouse.""" diff --git a/ceph-osd/tests/016-basic-trusty-juno b/ceph-osd/tests/016-basic-trusty-juno index 5606a174..6245f584 100755 --- a/ceph-osd/tests/016-basic-trusty-juno +++ b/ceph-osd/tests/016-basic-trusty-juno @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-osd deployment on trusty-juno.""" diff --git a/ceph-osd/tests/017-basic-trusty-kilo b/ceph-osd/tests/017-basic-trusty-kilo index 5bb258e8..a2657fe4 100755 --- a/ceph-osd/tests/017-basic-trusty-kilo +++ b/ceph-osd/tests/017-basic-trusty-kilo @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-osd deployment on trusty-kilo.""" diff --git a/ceph-osd/tests/018-basic-trusty-liberty b/ceph-osd/tests/018-basic-trusty-liberty index 98eb440f..004a1bbb 100755 --- a/ceph-osd/tests/018-basic-trusty-liberty +++ b/ceph-osd/tests/018-basic-trusty-liberty @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-osd deployment on trusty-liberty.""" diff --git a/ceph-osd/tests/019-basic-trusty-mitaka b/ceph-osd/tests/019-basic-trusty-mitaka index 537bfa42..836a8253 100755 --- a/ceph-osd/tests/019-basic-trusty-mitaka +++ b/ceph-osd/tests/019-basic-trusty-mitaka @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-osd deployment on trusty-mitaka.""" diff --git a/ceph-osd/tests/020-basic-wily-liberty b/ceph-osd/tests/020-basic-wily-liberty index 5e943af5..f84097c3 100755 --- a/ceph-osd/tests/020-basic-wily-liberty +++ b/ceph-osd/tests/020-basic-wily-liberty @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-osd deployment on wily-liberty.""" diff --git a/ceph-osd/tests/021-basic-xenial-mitaka b/ceph-osd/tests/021-basic-xenial-mitaka index 07741bdd..4f9e3b41 100755 --- a/ceph-osd/tests/021-basic-xenial-mitaka +++ b/ceph-osd/tests/021-basic-xenial-mitaka @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-osd deployment on xenial-mitaka.""" diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index dbbf3db5..8f6ac398 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import amulet import time diff --git a/ceph-osd/unit_tests/__init__.py b/ceph-osd/unit_tests/__init__.py index 466d7781..d7a4ace1 100644 --- a/ceph-osd/unit_tests/__init__.py +++ b/ceph-osd/unit_tests/__init__.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys sys.path.append('hooks') sys.path.append('actions') diff --git a/ceph-osd/unit_tests/test_actions_pause_resume.py b/ceph-osd/unit_tests/test_actions_pause_resume.py index 43c3aafc..1d2fe072 100644 --- a/ceph-osd/unit_tests/test_actions_pause_resume.py +++ b/ceph-osd/unit_tests/test_actions_pause_resume.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import mock import sys diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index f0608128..ecc12a7a 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import copy import unittest diff --git a/ceph-osd/unit_tests/test_ceph_networking.py b/ceph-osd/unit_tests/test_ceph_networking.py index ae3a7ff5..168e82fc 100644 --- a/ceph-osd/unit_tests/test_ceph_networking.py +++ b/ceph-osd/unit_tests/test_ceph_networking.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import test_utils import charmhelpers.core.hookenv as hookenv import utils as ceph_utils diff --git a/ceph-osd/unit_tests/test_config.py b/ceph-osd/unit_tests/test_config.py index e635a28c..b4a85722 100644 --- a/ceph-osd/unit_tests/test_config.py +++ b/ceph-osd/unit_tests/test_config.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import os.path import shutil import tempfile diff --git a/ceph-osd/unit_tests/test_replace_osd.py b/ceph-osd/unit_tests/test_replace_osd.py index 827a0ff8..dd3e9c11 100644 --- a/ceph-osd/unit_tests/test_replace_osd.py +++ b/ceph-osd/unit_tests/test_replace_osd.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import errno import posix diff --git a/ceph-osd/unit_tests/test_status.py b/ceph-osd/unit_tests/test_status.py index f7e19b83..c5669a10 100644 --- a/ceph-osd/unit_tests/test_status.py +++ b/ceph-osd/unit_tests/test_status.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import mock import test_utils diff --git a/ceph-osd/unit_tests/test_upgrade_roll.py b/ceph-osd/unit_tests/test_upgrade_roll.py index e76543cc..a3535b5c 100644 --- a/ceph-osd/unit_tests/test_upgrade_roll.py +++ b/ceph-osd/unit_tests/test_upgrade_roll.py @@ -1,10 +1,20 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import time -__author__ = 'chris' from mock import patch, call, MagicMock -import sys - -sys.path.append('/home/chris/repos/ceph-osd/hooks') from ceph import CrushLocation diff --git a/ceph-osd/unit_tests/test_utils.py b/ceph-osd/unit_tests/test_utils.py index 33333ce7..ceb34cab 100644 --- a/ceph-osd/unit_tests/test_utils.py +++ b/ceph-osd/unit_tests/test_utils.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import logging import unittest import os From 91e2ea87af134859a1e0bb26f376f4efd165f37c Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 22 Jun 2016 15:43:45 +0100 Subject: [PATCH 1144/2699] Re-license charm as Apache-2.0 All contributions to this charm where made under Canonical copyright; switch to Apache-2.0 license as agreed so we can move forward with official project status. Change-Id: I2fdcac3d1b45cf48dec52bc15acfee13107a9bae --- ceph-radosgw/LICENSE | 202 ++++++++++++++++++ ceph-radosgw/actions/actions.py | 14 ++ ceph-radosgw/copyright | 17 +- ceph-radosgw/hooks/ceph.py | 15 +- ceph-radosgw/hooks/ceph_radosgw_context.py | 14 ++ ceph-radosgw/hooks/hooks.py | 15 +- ceph-radosgw/hooks/utils.py | 15 +- ceph-radosgw/tests/014-basic-precise-icehouse | 14 ++ ceph-radosgw/tests/015-basic-trusty-icehouse | 14 ++ ceph-radosgw/tests/016-basic-trusty-juno | 14 ++ ceph-radosgw/tests/017-basic-trusty-kilo | 14 ++ ceph-radosgw/tests/018-basic-trusty-liberty | 14 ++ ceph-radosgw/tests/019-basic-trusty-mitaka | 14 ++ ceph-radosgw/tests/020-basic-wily-liberty | 14 ++ ceph-radosgw/tests/021-basic-xenial-mitaka | 14 ++ ceph-radosgw/tests/basic_deployment.py | 14 ++ ceph-radosgw/unit_tests/__init__.py | 14 ++ ceph-radosgw/unit_tests/test_actions.py | 14 ++ ceph-radosgw/unit_tests/test_ceph.py | 14 ++ .../unit_tests/test_ceph_radosgw_context.py | 14 ++ .../unit_tests/test_ceph_radosgw_utils.py | 14 ++ ceph-radosgw/unit_tests/test_hooks.py | 14 ++ ceph-radosgw/unit_tests/test_utils.py | 14 ++ 23 files changed, 498 insertions(+), 18 deletions(-) create mode 100644 ceph-radosgw/LICENSE diff --git a/ceph-radosgw/LICENSE b/ceph-radosgw/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/ceph-radosgw/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ceph-radosgw/actions/actions.py b/ceph-radosgw/actions/actions.py index e2e67471..7a477b78 100755 --- a/ceph-radosgw/actions/actions.py +++ b/ceph-radosgw/actions/actions.py @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import sys diff --git a/ceph-radosgw/copyright b/ceph-radosgw/copyright index f35c8617..e0dee89e 100644 --- a/ceph-radosgw/copyright +++ b/ceph-radosgw/copyright @@ -1,9 +1,16 @@ Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0 Files: * -Copyright: 2012, Canonical Ltd. -License: LGPL-2.1 +Copyright: Copyright 2011, Canonical Ltd., All Rights Reserved. +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at -License: LGPL-2.1 - On Debian GNU/Linux system you can find the complete text of the - LGPL-2.1 license in '/usr/share/common-licenses/LGPL-2.1' + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index 0f357231..2f095a3d 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -1,10 +1,17 @@ # -# Copyright 2012 Canonical Ltd. +# Copyright 2016 Canonical Ltd # -# Authors: -# James Page -# Paul Collins +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import json import os diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 179e615e..389538cc 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import os import re import socket diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 48013a39..940c7625 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -1,11 +1,18 @@ #!/usr/bin/python # -# Copyright 2016 Canonical Ltd. +# Copyright 2016 Canonical Ltd # -# Authors: -# James Page -# Edward Hope-Morley +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import subprocess diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 69f39634..b87dd0ca 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -1,11 +1,16 @@ +# Copyright 2016 Canonical Ltd # -# Copyright 2016 Canonical Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# Authors: -# James Page -# Paul Collins -# Edward Hope-Morley +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import re diff --git a/ceph-radosgw/tests/014-basic-precise-icehouse b/ceph-radosgw/tests/014-basic-precise-icehouse index 8a3ab031..4acef0d8 100755 --- a/ceph-radosgw/tests/014-basic-precise-icehouse +++ b/ceph-radosgw/tests/014-basic-precise-icehouse @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-radosgw deployment on precise-icehouse.""" diff --git a/ceph-radosgw/tests/015-basic-trusty-icehouse b/ceph-radosgw/tests/015-basic-trusty-icehouse index 34588d2f..5829eef8 100755 --- a/ceph-radosgw/tests/015-basic-trusty-icehouse +++ b/ceph-radosgw/tests/015-basic-trusty-icehouse @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-radosgw deployment on trusty-icehouse.""" diff --git a/ceph-radosgw/tests/016-basic-trusty-juno b/ceph-radosgw/tests/016-basic-trusty-juno index d5de9b14..d95bebc6 100755 --- a/ceph-radosgw/tests/016-basic-trusty-juno +++ b/ceph-radosgw/tests/016-basic-trusty-juno @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-radosgw deployment on trusty-juno.""" diff --git a/ceph-radosgw/tests/017-basic-trusty-kilo b/ceph-radosgw/tests/017-basic-trusty-kilo index 3335b188..30c18d19 100755 --- a/ceph-radosgw/tests/017-basic-trusty-kilo +++ b/ceph-radosgw/tests/017-basic-trusty-kilo @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-radosgw deployment on trusty-kilo.""" diff --git a/ceph-radosgw/tests/018-basic-trusty-liberty b/ceph-radosgw/tests/018-basic-trusty-liberty index 8093f583..9a4dae35 100755 --- a/ceph-radosgw/tests/018-basic-trusty-liberty +++ b/ceph-radosgw/tests/018-basic-trusty-liberty @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-radosgw deployment on trusty-liberty.""" diff --git a/ceph-radosgw/tests/019-basic-trusty-mitaka b/ceph-radosgw/tests/019-basic-trusty-mitaka index 7f4eca48..b53a0fe1 100755 --- a/ceph-radosgw/tests/019-basic-trusty-mitaka +++ b/ceph-radosgw/tests/019-basic-trusty-mitaka @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-radosgw deployment on trusty-mitaka.""" diff --git a/ceph-radosgw/tests/020-basic-wily-liberty b/ceph-radosgw/tests/020-basic-wily-liberty index 05c7bf23..089140dc 100755 --- a/ceph-radosgw/tests/020-basic-wily-liberty +++ b/ceph-radosgw/tests/020-basic-wily-liberty @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-radosgw deployment on wily-liberty.""" diff --git a/ceph-radosgw/tests/021-basic-xenial-mitaka b/ceph-radosgw/tests/021-basic-xenial-mitaka index 44130bb5..4de2f125 100755 --- a/ceph-radosgw/tests/021-basic-xenial-mitaka +++ b/ceph-radosgw/tests/021-basic-xenial-mitaka @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph-radosgw deployment on xenial-mitaka.""" diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 5b473058..0c45e6ac 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import amulet import subprocess diff --git a/ceph-radosgw/unit_tests/__init__.py b/ceph-radosgw/unit_tests/__init__.py index 43aa3614..184cf3d8 100644 --- a/ceph-radosgw/unit_tests/__init__.py +++ b/ceph-radosgw/unit_tests/__init__.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys sys.path.append('actions/') diff --git a/ceph-radosgw/unit_tests/test_actions.py b/ceph-radosgw/unit_tests/test_actions.py index 0597b9b6..7d02cb07 100644 --- a/ceph-radosgw/unit_tests/test_actions.py +++ b/ceph-radosgw/unit_tests/test_actions.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import mock from mock import patch diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index 7e9bdde4..e8e608bf 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys from mock import patch, call, MagicMock diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 2942a0ed..bdbdc965 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from mock import patch import ceph_radosgw_context as context diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index 1e1daf72..205b648f 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys from mock import ( diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index b698a72d..99b6cfd3 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from mock import ( call, patch, diff --git a/ceph-radosgw/unit_tests/test_utils.py b/ceph-radosgw/unit_tests/test_utils.py index 9c3cb38a..e80722d4 100644 --- a/ceph-radosgw/unit_tests/test_utils.py +++ b/ceph-radosgw/unit_tests/test_utils.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import logging import os import unittest From 11d113970a60ac473ae1c1e3b6dec67fe5f0c919 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 28 Jun 2016 08:02:36 -0400 Subject: [PATCH 1145/2699] document this thing --- ceph-proxy/README.md | 94 ++++++-------------------------------------- 1 file changed, 12 insertions(+), 82 deletions(-) diff --git a/ceph-proxy/README.md b/ceph-proxy/README.md index 5d66b597..9421e43c 100644 --- a/ceph-proxy/README.md +++ b/ceph-proxy/README.md @@ -3,103 +3,33 @@ Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. -This charm deploys a Ceph cluster. -juju +This charm allows connecting an existing Ceph deployment with a Juju environment. # Usage -The ceph charm has two pieces of mandatory configuration for which no defaults -are provided. You _must_ set these configuration options before deployment or the charm will not work: +Your config.yaml needs to provide the monitor-hosts and fsid options like below: - fsid: - uuid specific to a ceph cluster used to ensure that different - clusters don't get mixed up - use `uuid` to generate one. +`config.yaml`: +```yaml +ceph-proxy: + monitor-hosts: IP_ADDRESS:PORT IP ADDRESS:PORT + fsid: FSID +``` - monitor-secret: - a ceph generated key used by the daemons that manage to cluster - to control security. You can use the ceph-authtool command to - generate one: +You must then provide this configuration to the new deployment: `juju deploy ceph-proxy -c config.yaml`. - ceph-authtool /dev/stdout --name=mon. --gen-key - -These two pieces of configuration must NOT be changed post bootstrap; attempting -to do this will cause a reconfiguration error and new service units will not join -the existing ceph cluster. - -At a minimum you must provide a juju config file during initial deployment -with the fsid and monitor-secret options (contents of cepy.yaml below): - - ceph: - fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 - monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== - -Boot things up by using: - - juju deploy -n 3 --config ceph.yaml ceph - -By default the ceph cluster will not bootstrap until 3 service units have been -deployed and started; this is to ensure that a quorum is achieved prior to adding -storage devices. - -## Actions - -This charm supports pausing and resuming ceph's health functions on a cluster, for example when doing maintainance on a machine. to pause or resume, call: - -`juju action do --unit ceph-mon/0 pause-health` or `juju action do --unit ceph-mon/0 resume-health` - -## Scale Out Usage - -You can use the Ceph OSD and Ceph Radosgw charms: - -- [Ceph OSD](https://jujucharms.com/precise/ceph-osd) -- [Ceph Rados Gateway](https://jujucharms.com/precise/ceph-radosgw) - -## Network Space support - -This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above. - -Network traffic can be bound to specific network spaces using the public (front-side) and cluster (back-side) bindings: - - juju deploy ceph-mon --bind "public=data-space cluster=cluster-space" - -alternatively these can also be provided as part of a Juju native bundle configuration: - - ceph-mon: - charm: cs:xenial/ceph-mon - num_units: 1 - bindings: - public: data-space - cluster: cluster-space - -Please refer to the [Ceph Network Reference](http://docs.ceph.com/docs/master/rados/configuration/network-config-ref) for details on how using these options effects network traffic within a Ceph deployment. - -**NOTE:** Spaces must be configured in the underlying provider prior to attempting to use them. - -**NOTE**: Existing deployments using ceph-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. +This charm noes NOT insert itself between the clusters, but merely makes the external cluster available through Juju's environment by exposing the same relations that the existing ceph charms do. # Contact Information ## Authors -- Paul Collins , -- James Page +- Chris MacNaughton -Report bugs on [Launchpad](http://bugs.launchpad.net/charms/+source/ceph/+filebug) +Report bugs on [Launchpad](http://bugs.launchpad.net/charms/+source/ceph-proxy/+filebug) ## Ceph - [Ceph website](http://ceph.com) - [Ceph mailing lists](http://ceph.com/resources/mailing-list-irc/) - [Ceph bug tracker](http://tracker.ceph.com/projects/ceph) - -# Technical Footnotes - -This charm uses the new-style Ceph deployment as reverse-engineered from the -Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected -a different strategy to form the monitor cluster. Since we don't know the -names *or* addresses of the machines in advance, we use the _relation-joined_ -hook to wait for all three nodes to come up, and then write their addresses -to ceph.conf in the "mon host" parameter. After we initialize the monitor -cluster a quorum forms quickly, and OSD bringup proceeds. - -See [the documentation](http://ceph.com/docs/master/dev/mon-bootstrap/) for more information on Ceph monitor cluster deployment strategies and pitfalls. From abe01c01dfa1aacd9e20689b9b3f7a32baed029d Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Wed, 29 Jun 2016 16:01:38 -0700 Subject: [PATCH 1146/2699] Symlink Actions This change symlinks the python actions files so that tox can properly test the code. Change-Id: Ib6d070555535dc202735124afdafa8dd401350df --- ceph-mon/actions/create-cache-tier | 2 +- ceph-mon/actions/create-erasure-profile | 90 +--------------------- ceph-mon/actions/create-erasure-profile.py | 89 +++++++++++++++++++++ ceph-mon/actions/create-pool | 39 +--------- ceph-mon/actions/create-pool.py | 38 +++++++++ ceph-mon/actions/delete-erasure-profile | 25 +----- ceph-mon/actions/delete-erasure-profile.py | 26 +++++++ ceph-mon/actions/delete-pool | 29 +------ ceph-mon/actions/delete-pool.py | 28 +++++++ ceph-mon/actions/get-erasure-profile | 19 +---- ceph-mon/actions/get-erasure-profile.py | 18 +++++ ceph-mon/actions/list-erasure-profiles | 23 +----- ceph-mon/actions/list-erasure-profiles.py | 22 ++++++ ceph-mon/actions/list-pools | 18 +---- ceph-mon/actions/list-pools.py | 17 ++++ ceph-mon/actions/pool-get | 20 +---- ceph-mon/actions/pool-get.py | 19 +++++ ceph-mon/actions/pool-set | 24 +----- ceph-mon/actions/pool-set.py | 23 ++++++ ceph-mon/actions/pool-statistics | 16 +--- ceph-mon/actions/pool-statistics.py | 15 ++++ ceph-mon/actions/remove-cache-tier | 2 +- ceph-mon/actions/remove-pool-snapshot | 20 +---- ceph-mon/actions/remove-pool-snapshot.py | 19 +++++ ceph-mon/actions/rename-pool | 17 +--- ceph-mon/actions/rename-pool.py | 16 ++++ ceph-mon/actions/set-pool-max-bytes | 17 +--- ceph-mon/actions/set-pool-max-bytes.py | 16 ++++ ceph-mon/actions/snapshot-pool | 19 +---- ceph-mon/actions/snapshot-pool.py | 18 +++++ 30 files changed, 380 insertions(+), 364 deletions(-) mode change 100755 => 120000 ceph-mon/actions/create-erasure-profile create mode 100755 ceph-mon/actions/create-erasure-profile.py mode change 100755 => 120000 ceph-mon/actions/create-pool create mode 100755 ceph-mon/actions/create-pool.py mode change 100755 => 120000 ceph-mon/actions/delete-erasure-profile create mode 100755 ceph-mon/actions/delete-erasure-profile.py mode change 100755 => 120000 ceph-mon/actions/delete-pool create mode 100755 ceph-mon/actions/delete-pool.py mode change 100755 => 120000 ceph-mon/actions/get-erasure-profile create mode 100755 ceph-mon/actions/get-erasure-profile.py mode change 100755 => 120000 ceph-mon/actions/list-erasure-profiles create mode 100755 ceph-mon/actions/list-erasure-profiles.py mode change 100755 => 120000 ceph-mon/actions/list-pools create mode 100755 ceph-mon/actions/list-pools.py mode change 100755 => 120000 ceph-mon/actions/pool-get create mode 100755 ceph-mon/actions/pool-get.py mode change 100755 => 120000 ceph-mon/actions/pool-set create mode 100755 ceph-mon/actions/pool-set.py mode change 100755 => 120000 ceph-mon/actions/pool-statistics create mode 100755 ceph-mon/actions/pool-statistics.py mode change 100755 => 120000 ceph-mon/actions/remove-pool-snapshot create mode 100755 ceph-mon/actions/remove-pool-snapshot.py mode change 100755 => 120000 ceph-mon/actions/rename-pool create mode 100755 ceph-mon/actions/rename-pool.py mode change 100755 => 120000 ceph-mon/actions/set-pool-max-bytes create mode 100755 ceph-mon/actions/set-pool-max-bytes.py mode change 100755 => 120000 ceph-mon/actions/snapshot-pool create mode 100755 ceph-mon/actions/snapshot-pool.py diff --git a/ceph-mon/actions/create-cache-tier b/ceph-mon/actions/create-cache-tier index 2a7e4346..5b049bef 120000 --- a/ceph-mon/actions/create-cache-tier +++ b/ceph-mon/actions/create-cache-tier @@ -1 +1 @@ -create-cache-tier.py \ No newline at end of file +./create-cache-tier.py \ No newline at end of file diff --git a/ceph-mon/actions/create-erasure-profile b/ceph-mon/actions/create-erasure-profile deleted file mode 100755 index 2b00b588..00000000 --- a/ceph-mon/actions/create-erasure-profile +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/python -from subprocess import CalledProcessError -import sys - -sys.path.append('hooks') - -from charmhelpers.contrib.storage.linux.ceph import create_erasure_profile -from charmhelpers.core.hookenv import action_get, log, action_fail - - -def make_erasure_profile(): - name = action_get("name") - plugin = action_get("plugin") - failure_domain = action_get("failure-domain") - - # jerasure requires k+m - # isa requires k+m - # local requires k+m+l - # shec requires k+m+c - - if plugin == "jerasure": - k = action_get("data-chunks") - m = action_get("coding-chunks") - try: - create_erasure_profile(service='admin', - erasure_plugin_name=plugin, - profile_name=name, - data_chunks=k, - coding_chunks=m, - failure_domain=failure_domain) - except CalledProcessError as e: - log(e) - action_fail("Create erasure profile failed with " - "message: {}".format(e.message)) - elif plugin == "isa": - k = action_get("data-chunks") - m = action_get("coding-chunks") - try: - create_erasure_profile(service='admin', - erasure_plugin_name=plugin, - profile_name=name, - data_chunks=k, - coding_chunks=m, - failure_domain=failure_domain) - except CalledProcessError as e: - log(e) - action_fail("Create erasure profile failed with " - "message: {}".format(e.message)) - elif plugin == "local": - k = action_get("data-chunks") - m = action_get("coding-chunks") - l = action_get("locality-chunks") - try: - create_erasure_profile(service='admin', - erasure_plugin_name=plugin, - profile_name=name, - data_chunks=k, - coding_chunks=m, - locality=l, - failure_domain=failure_domain) - except CalledProcessError as e: - log(e) - action_fail("Create erasure profile failed with " - "message: {}".format(e.message)) - elif plugin == "shec": - k = action_get("data-chunks") - m = action_get("coding-chunks") - c = action_get("durability-estimator") - try: - create_erasure_profile(service='admin', - erasure_plugin_name=plugin, - profile_name=name, - data_chunks=k, - coding_chunks=m, - durability_estimator=c, - failure_domain=failure_domain) - except CalledProcessError as e: - log(e) - action_fail("Create erasure profile failed with " - "message: {}".format(e.message)) - else: - # Unknown erasure plugin - action_fail("Unknown erasure-plugin type of {}. " - "Only jerasure, isa, local or shec is " - "allowed".format(plugin)) - - -if __name__ == '__main__': - make_erasure_profile() diff --git a/ceph-mon/actions/create-erasure-profile b/ceph-mon/actions/create-erasure-profile new file mode 120000 index 00000000..2e8dba41 --- /dev/null +++ b/ceph-mon/actions/create-erasure-profile @@ -0,0 +1 @@ +./create-erasure-profile.py \ No newline at end of file diff --git a/ceph-mon/actions/create-erasure-profile.py b/ceph-mon/actions/create-erasure-profile.py new file mode 100755 index 00000000..2b00b588 --- /dev/null +++ b/ceph-mon/actions/create-erasure-profile.py @@ -0,0 +1,89 @@ +#!/usr/bin/python +from subprocess import CalledProcessError +import sys + +sys.path.append('hooks') + +from charmhelpers.contrib.storage.linux.ceph import create_erasure_profile +from charmhelpers.core.hookenv import action_get, log, action_fail + + +def make_erasure_profile(): + name = action_get("name") + plugin = action_get("plugin") + failure_domain = action_get("failure-domain") + + # jerasure requires k+m + # isa requires k+m + # local requires k+m+l + # shec requires k+m+c + + if plugin == "jerasure": + k = action_get("data-chunks") + m = action_get("coding-chunks") + try: + create_erasure_profile(service='admin', + erasure_plugin_name=plugin, + profile_name=name, + data_chunks=k, + coding_chunks=m, + failure_domain=failure_domain) + except CalledProcessError as e: + log(e) + action_fail("Create erasure profile failed with " + "message: {}".format(e.message)) + elif plugin == "isa": + k = action_get("data-chunks") + m = action_get("coding-chunks") + try: + create_erasure_profile(service='admin', + erasure_plugin_name=plugin, + profile_name=name, + data_chunks=k, + coding_chunks=m, + failure_domain=failure_domain) + except CalledProcessError as e: + log(e) + action_fail("Create erasure profile failed with " + "message: {}".format(e.message)) + elif plugin == "local": + k = action_get("data-chunks") + m = action_get("coding-chunks") + l = action_get("locality-chunks") + try: + create_erasure_profile(service='admin', + erasure_plugin_name=plugin, + profile_name=name, + data_chunks=k, + coding_chunks=m, + locality=l, + failure_domain=failure_domain) + except CalledProcessError as e: + log(e) + action_fail("Create erasure profile failed with " + "message: {}".format(e.message)) + elif plugin == "shec": + k = action_get("data-chunks") + m = action_get("coding-chunks") + c = action_get("durability-estimator") + try: + create_erasure_profile(service='admin', + erasure_plugin_name=plugin, + profile_name=name, + data_chunks=k, + coding_chunks=m, + durability_estimator=c, + failure_domain=failure_domain) + except CalledProcessError as e: + log(e) + action_fail("Create erasure profile failed with " + "message: {}".format(e.message)) + else: + # Unknown erasure plugin + action_fail("Unknown erasure-plugin type of {}. " + "Only jerasure, isa, local or shec is " + "allowed".format(plugin)) + + +if __name__ == '__main__': + make_erasure_profile() diff --git a/ceph-mon/actions/create-pool b/ceph-mon/actions/create-pool deleted file mode 100755 index 4d1d2148..00000000 --- a/ceph-mon/actions/create-pool +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/python -import sys - -sys.path.append('hooks') -from subprocess import CalledProcessError -from charmhelpers.core.hookenv import action_get, log, action_fail -from charmhelpers.contrib.storage.linux.ceph import ErasurePool, ReplicatedPool - - -def create_pool(): - pool_name = action_get("name") - pool_type = action_get("pool-type") - try: - if pool_type == "replicated": - replicas = action_get("replicas") - replicated_pool = ReplicatedPool(name=pool_name, - service='admin', - replicas=replicas) - replicated_pool.create() - - elif pool_type == "erasure": - crush_profile_name = action_get("erasure-profile-name") - erasure_pool = ErasurePool(name=pool_name, - erasure_code_profile=crush_profile_name, - service='admin') - erasure_pool.create() - else: - log("Unknown pool type of {}. Only erasure or replicated is " - "allowed".format(pool_type)) - action_fail("Unknown pool type of {}. Only erasure or replicated " - "is allowed".format(pool_type)) - except CalledProcessError as e: - action_fail("Pool creation failed because of a failed process. " - "Ret Code: {} Message: {}".format(e.returncode, e.message)) - - -if __name__ == '__main__': - create_pool() diff --git a/ceph-mon/actions/create-pool b/ceph-mon/actions/create-pool new file mode 120000 index 00000000..4956f568 --- /dev/null +++ b/ceph-mon/actions/create-pool @@ -0,0 +1 @@ +./create-pool.py \ No newline at end of file diff --git a/ceph-mon/actions/create-pool.py b/ceph-mon/actions/create-pool.py new file mode 100755 index 00000000..4d1d2148 --- /dev/null +++ b/ceph-mon/actions/create-pool.py @@ -0,0 +1,38 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import CalledProcessError +from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.contrib.storage.linux.ceph import ErasurePool, ReplicatedPool + + +def create_pool(): + pool_name = action_get("name") + pool_type = action_get("pool-type") + try: + if pool_type == "replicated": + replicas = action_get("replicas") + replicated_pool = ReplicatedPool(name=pool_name, + service='admin', + replicas=replicas) + replicated_pool.create() + + elif pool_type == "erasure": + crush_profile_name = action_get("erasure-profile-name") + erasure_pool = ErasurePool(name=pool_name, + erasure_code_profile=crush_profile_name, + service='admin') + erasure_pool.create() + else: + log("Unknown pool type of {}. Only erasure or replicated is " + "allowed".format(pool_type)) + action_fail("Unknown pool type of {}. Only erasure or replicated " + "is allowed".format(pool_type)) + except CalledProcessError as e: + action_fail("Pool creation failed because of a failed process. " + "Ret Code: {} Message: {}".format(e.returncode, e.message)) + + +if __name__ == '__main__': + create_pool() diff --git a/ceph-mon/actions/delete-erasure-profile b/ceph-mon/actions/delete-erasure-profile deleted file mode 100755 index 075c410e..00000000 --- a/ceph-mon/actions/delete-erasure-profile +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/python -from subprocess import CalledProcessError - -__author__ = 'chris' -import sys - -sys.path.append('hooks') - -from charmhelpers.contrib.storage.linux.ceph import remove_erasure_profile -from charmhelpers.core.hookenv import action_get, log, action_fail - - -def delete_erasure_profile(): - name = action_get("name") - - try: - remove_erasure_profile(service='admin', profile_name=name) - except CalledProcessError as e: - action_fail("Remove erasure profile failed with error: {}".format( - e.message)) - - -if __name__ == '__main__': - delete_erasure_profile() diff --git a/ceph-mon/actions/delete-erasure-profile b/ceph-mon/actions/delete-erasure-profile new file mode 120000 index 00000000..a31aaba5 --- /dev/null +++ b/ceph-mon/actions/delete-erasure-profile @@ -0,0 +1 @@ +./delete-erasure-profile.py \ No newline at end of file diff --git a/ceph-mon/actions/delete-erasure-profile.py b/ceph-mon/actions/delete-erasure-profile.py new file mode 100755 index 00000000..1773eb82 --- /dev/null +++ b/ceph-mon/actions/delete-erasure-profile.py @@ -0,0 +1,26 @@ +#!/usr/bin/python +from subprocess import CalledProcessError + +__author__ = 'chris' +import sys + +sys.path.append('hooks') + +from charmhelpers.contrib.storage.linux.ceph import remove_erasure_profile +from charmhelpers.core.hookenv import action_get, log, action_fail + + +def delete_erasure_profile(): + name = action_get("name") + + try: + remove_erasure_profile(service='admin', profile_name=name) + except CalledProcessError as e: + log("Remove erasure profile failed with error {}".format(e.message), + level="ERROR") + action_fail("Remove erasure profile failed with error: {}".format( + e.message)) + + +if __name__ == '__main__': + delete_erasure_profile() diff --git a/ceph-mon/actions/delete-pool b/ceph-mon/actions/delete-pool deleted file mode 100755 index 3d655076..00000000 --- a/ceph-mon/actions/delete-pool +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/python -import sys - -sys.path.append('hooks') - -import rados -from ceph_ops import connect -from charmhelpers.core.hookenv import action_get, log, action_fail - - -def remove_pool(): - try: - pool_name = action_get("name") - cluster = connect() - log("Deleting pool: {}".format(pool_name)) - cluster.delete_pool(str(pool_name)) # Convert from unicode - cluster.shutdown() - except (rados.IOError, - rados.ObjectNotFound, - rados.NoData, - rados.NoSpace, - rados.PermissionError) as e: - log(e) - action_fail(e) - - -if __name__ == '__main__': - remove_pool() diff --git a/ceph-mon/actions/delete-pool b/ceph-mon/actions/delete-pool new file mode 120000 index 00000000..7b239cb5 --- /dev/null +++ b/ceph-mon/actions/delete-pool @@ -0,0 +1 @@ +./delete-pool.py \ No newline at end of file diff --git a/ceph-mon/actions/delete-pool.py b/ceph-mon/actions/delete-pool.py new file mode 100755 index 00000000..3d655076 --- /dev/null +++ b/ceph-mon/actions/delete-pool.py @@ -0,0 +1,28 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') + +import rados +from ceph_ops import connect +from charmhelpers.core.hookenv import action_get, log, action_fail + + +def remove_pool(): + try: + pool_name = action_get("name") + cluster = connect() + log("Deleting pool: {}".format(pool_name)) + cluster.delete_pool(str(pool_name)) # Convert from unicode + cluster.shutdown() + except (rados.IOError, + rados.ObjectNotFound, + rados.NoData, + rados.NoSpace, + rados.PermissionError) as e: + log(e) + action_fail(e) + + +if __name__ == '__main__': + remove_pool() diff --git a/ceph-mon/actions/get-erasure-profile b/ceph-mon/actions/get-erasure-profile deleted file mode 100755 index 29ece59d..00000000 --- a/ceph-mon/actions/get-erasure-profile +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/python -__author__ = 'chris' -import sys - -sys.path.append('hooks') - -from charmhelpers.contrib.storage.linux.ceph import get_erasure_profile -from charmhelpers.core.hookenv import action_get, action_set - - -def make_erasure_profile(): - name = action_get("name") - out = get_erasure_profile(service='admin', name=name) - action_set({'message': out}) - - -if __name__ == '__main__': - make_erasure_profile() diff --git a/ceph-mon/actions/get-erasure-profile b/ceph-mon/actions/get-erasure-profile new file mode 120000 index 00000000..a604d843 --- /dev/null +++ b/ceph-mon/actions/get-erasure-profile @@ -0,0 +1 @@ +./get-erasure-profile.py \ No newline at end of file diff --git a/ceph-mon/actions/get-erasure-profile.py b/ceph-mon/actions/get-erasure-profile.py new file mode 100755 index 00000000..29ece59d --- /dev/null +++ b/ceph-mon/actions/get-erasure-profile.py @@ -0,0 +1,18 @@ +#!/usr/bin/python +__author__ = 'chris' +import sys + +sys.path.append('hooks') + +from charmhelpers.contrib.storage.linux.ceph import get_erasure_profile +from charmhelpers.core.hookenv import action_get, action_set + + +def make_erasure_profile(): + name = action_get("name") + out = get_erasure_profile(service='admin', name=name) + action_set({'message': out}) + + +if __name__ == '__main__': + make_erasure_profile() diff --git a/ceph-mon/actions/list-erasure-profiles b/ceph-mon/actions/list-erasure-profiles deleted file mode 100755 index cf6dfa09..00000000 --- a/ceph-mon/actions/list-erasure-profiles +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/python -__author__ = 'chris' -import sys -from subprocess import check_output, CalledProcessError - -sys.path.append('hooks') - -from charmhelpers.core.hookenv import action_get, log, action_set, action_fail - -if __name__ == '__main__': - name = action_get("name") - try: - out = check_output(['ceph', - '--id', 'admin', - 'osd', - 'erasure-code-profile', - 'ls']).decode('UTF-8') - action_set({'message': out}) - except CalledProcessError as e: - log(e) - action_fail("Listing erasure profiles failed with error: {}".format( - e.message)) diff --git a/ceph-mon/actions/list-erasure-profiles b/ceph-mon/actions/list-erasure-profiles new file mode 120000 index 00000000..e8ad6605 --- /dev/null +++ b/ceph-mon/actions/list-erasure-profiles @@ -0,0 +1 @@ +./list-erasure-profiles.py \ No newline at end of file diff --git a/ceph-mon/actions/list-erasure-profiles.py b/ceph-mon/actions/list-erasure-profiles.py new file mode 100755 index 00000000..cf6dfa09 --- /dev/null +++ b/ceph-mon/actions/list-erasure-profiles.py @@ -0,0 +1,22 @@ +#!/usr/bin/python +__author__ = 'chris' +import sys +from subprocess import check_output, CalledProcessError + +sys.path.append('hooks') + +from charmhelpers.core.hookenv import action_get, log, action_set, action_fail + +if __name__ == '__main__': + name = action_get("name") + try: + out = check_output(['ceph', + '--id', 'admin', + 'osd', + 'erasure-code-profile', + 'ls']).decode('UTF-8') + action_set({'message': out}) + except CalledProcessError as e: + log(e) + action_fail("Listing erasure profiles failed with error: {}".format( + e.message)) diff --git a/ceph-mon/actions/list-pools b/ceph-mon/actions/list-pools deleted file mode 100755 index 102667cf..00000000 --- a/ceph-mon/actions/list-pools +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/python -__author__ = 'chris' -import sys -from subprocess import check_output, CalledProcessError - -sys.path.append('hooks') - -from charmhelpers.core.hookenv import log, action_set, action_fail - -if __name__ == '__main__': - try: - out = check_output(['ceph', '--id', 'admin', - 'osd', 'lspools']).decode('UTF-8') - action_set({'message': out}) - except CalledProcessError as e: - log(e) - action_fail("List pools failed with error: {}".format(e.message)) diff --git a/ceph-mon/actions/list-pools b/ceph-mon/actions/list-pools new file mode 120000 index 00000000..ac972184 --- /dev/null +++ b/ceph-mon/actions/list-pools @@ -0,0 +1 @@ +./list-pools.py \ No newline at end of file diff --git a/ceph-mon/actions/list-pools.py b/ceph-mon/actions/list-pools.py new file mode 100755 index 00000000..102667cf --- /dev/null +++ b/ceph-mon/actions/list-pools.py @@ -0,0 +1,17 @@ +#!/usr/bin/python +__author__ = 'chris' +import sys +from subprocess import check_output, CalledProcessError + +sys.path.append('hooks') + +from charmhelpers.core.hookenv import log, action_set, action_fail + +if __name__ == '__main__': + try: + out = check_output(['ceph', '--id', 'admin', + 'osd', 'lspools']).decode('UTF-8') + action_set({'message': out}) + except CalledProcessError as e: + log(e) + action_fail("List pools failed with error: {}".format(e.message)) diff --git a/ceph-mon/actions/pool-get b/ceph-mon/actions/pool-get deleted file mode 100755 index e4f924b9..00000000 --- a/ceph-mon/actions/pool-get +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/python -__author__ = 'chris' -import sys -from subprocess import check_output, CalledProcessError - -sys.path.append('hooks') - -from charmhelpers.core.hookenv import log, action_set, action_get, action_fail - -if __name__ == '__main__': - name = action_get('pool-name') - key = action_get('key') - try: - out = check_output(['ceph', '--id', 'admin', - 'osd', 'pool', 'get', name, key]).decode('UTF-8') - action_set({'message': out}) - except CalledProcessError as e: - log(e) - action_fail("Pool get failed with message: {}".format(e.message)) diff --git a/ceph-mon/actions/pool-get b/ceph-mon/actions/pool-get new file mode 120000 index 00000000..57a93cc7 --- /dev/null +++ b/ceph-mon/actions/pool-get @@ -0,0 +1 @@ +./pool-get.py \ No newline at end of file diff --git a/ceph-mon/actions/pool-get.py b/ceph-mon/actions/pool-get.py new file mode 100755 index 00000000..e4f924b9 --- /dev/null +++ b/ceph-mon/actions/pool-get.py @@ -0,0 +1,19 @@ +#!/usr/bin/python +__author__ = 'chris' +import sys +from subprocess import check_output, CalledProcessError + +sys.path.append('hooks') + +from charmhelpers.core.hookenv import log, action_set, action_get, action_fail + +if __name__ == '__main__': + name = action_get('pool-name') + key = action_get('key') + try: + out = check_output(['ceph', '--id', 'admin', + 'osd', 'pool', 'get', name, key]).decode('UTF-8') + action_set({'message': out}) + except CalledProcessError as e: + log(e) + action_fail("Pool get failed with message: {}".format(e.message)) diff --git a/ceph-mon/actions/pool-set b/ceph-mon/actions/pool-set deleted file mode 100755 index 1f6e13b8..00000000 --- a/ceph-mon/actions/pool-set +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/python -from subprocess import CalledProcessError -import sys - -sys.path.append('hooks') - -from charmhelpers.core.hookenv import action_get, log, action_fail -from ceph_broker import handle_set_pool_value - -if __name__ == '__main__': - name = action_get("pool-name") - key = action_get("key") - value = action_get("value") - request = {'name': name, - 'key': key, - 'value': value} - - try: - handle_set_pool_value(service='admin', request=request) - except CalledProcessError as e: - log(e.message) - action_fail("Setting pool key: {} and value: {} failed with " - "message: {}".format(key, value, e.message)) diff --git a/ceph-mon/actions/pool-set b/ceph-mon/actions/pool-set new file mode 120000 index 00000000..6dcb8466 --- /dev/null +++ b/ceph-mon/actions/pool-set @@ -0,0 +1 @@ +./pool-set.py \ No newline at end of file diff --git a/ceph-mon/actions/pool-set.py b/ceph-mon/actions/pool-set.py new file mode 100755 index 00000000..1f6e13b8 --- /dev/null +++ b/ceph-mon/actions/pool-set.py @@ -0,0 +1,23 @@ +#!/usr/bin/python +from subprocess import CalledProcessError +import sys + +sys.path.append('hooks') + +from charmhelpers.core.hookenv import action_get, log, action_fail +from ceph_broker import handle_set_pool_value + +if __name__ == '__main__': + name = action_get("pool-name") + key = action_get("key") + value = action_get("value") + request = {'name': name, + 'key': key, + 'value': value} + + try: + handle_set_pool_value(service='admin', request=request) + except CalledProcessError as e: + log(e.message) + action_fail("Setting pool key: {} and value: {} failed with " + "message: {}".format(key, value, e.message)) diff --git a/ceph-mon/actions/pool-statistics b/ceph-mon/actions/pool-statistics deleted file mode 100755 index 536c889a..00000000 --- a/ceph-mon/actions/pool-statistics +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/python -import sys - -sys.path.append('hooks') -from subprocess import check_output, CalledProcessError -from charmhelpers.core.hookenv import log, action_set, action_fail - -if __name__ == '__main__': - try: - out = check_output(['ceph', '--id', 'admin', - 'df']).decode('UTF-8') - action_set({'message': out}) - except CalledProcessError as e: - log(e) - action_fail("ceph df failed with message: {}".format(e.message)) diff --git a/ceph-mon/actions/pool-statistics b/ceph-mon/actions/pool-statistics new file mode 120000 index 00000000..2e9a80e0 --- /dev/null +++ b/ceph-mon/actions/pool-statistics @@ -0,0 +1 @@ +./pool-statistics.py \ No newline at end of file diff --git a/ceph-mon/actions/pool-statistics.py b/ceph-mon/actions/pool-statistics.py new file mode 100755 index 00000000..536c889a --- /dev/null +++ b/ceph-mon/actions/pool-statistics.py @@ -0,0 +1,15 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import check_output, CalledProcessError +from charmhelpers.core.hookenv import log, action_set, action_fail + +if __name__ == '__main__': + try: + out = check_output(['ceph', '--id', 'admin', + 'df']).decode('UTF-8') + action_set({'message': out}) + except CalledProcessError as e: + log(e) + action_fail("ceph df failed with message: {}".format(e.message)) diff --git a/ceph-mon/actions/remove-cache-tier b/ceph-mon/actions/remove-cache-tier index 136c0f06..efbda4fa 120000 --- a/ceph-mon/actions/remove-cache-tier +++ b/ceph-mon/actions/remove-cache-tier @@ -1 +1 @@ -remove-cache-tier.py \ No newline at end of file +./remove-cache-tier.py \ No newline at end of file diff --git a/ceph-mon/actions/remove-pool-snapshot b/ceph-mon/actions/remove-pool-snapshot deleted file mode 100755 index 387849ea..00000000 --- a/ceph-mon/actions/remove-pool-snapshot +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/python -import sys - -sys.path.append('hooks') -from subprocess import CalledProcessError -from charmhelpers.core.hookenv import action_get, log, action_fail -from charmhelpers.contrib.storage.linux.ceph import remove_pool_snapshot - -if __name__ == '__main__': - name = action_get("pool-name") - snapname = action_get("snapshot-name") - try: - remove_pool_snapshot(service='admin', - pool_name=name, - snapshot_name=snapname) - except CalledProcessError as e: - log(e) - action_fail("Remove pool snapshot failed with message: {}".format( - e.message)) diff --git a/ceph-mon/actions/remove-pool-snapshot b/ceph-mon/actions/remove-pool-snapshot new file mode 120000 index 00000000..ea226fff --- /dev/null +++ b/ceph-mon/actions/remove-pool-snapshot @@ -0,0 +1 @@ +./remove-pool-snapshot.py \ No newline at end of file diff --git a/ceph-mon/actions/remove-pool-snapshot.py b/ceph-mon/actions/remove-pool-snapshot.py new file mode 100755 index 00000000..387849ea --- /dev/null +++ b/ceph-mon/actions/remove-pool-snapshot.py @@ -0,0 +1,19 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import CalledProcessError +from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.contrib.storage.linux.ceph import remove_pool_snapshot + +if __name__ == '__main__': + name = action_get("pool-name") + snapname = action_get("snapshot-name") + try: + remove_pool_snapshot(service='admin', + pool_name=name, + snapshot_name=snapname) + except CalledProcessError as e: + log(e) + action_fail("Remove pool snapshot failed with message: {}".format( + e.message)) diff --git a/ceph-mon/actions/rename-pool b/ceph-mon/actions/rename-pool deleted file mode 100755 index 6fe088ec..00000000 --- a/ceph-mon/actions/rename-pool +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/python -import sys - -sys.path.append('hooks') -from subprocess import CalledProcessError -from charmhelpers.core.hookenv import action_get, log, action_fail -from charmhelpers.contrib.storage.linux.ceph import rename_pool - -if __name__ == '__main__': - name = action_get("pool-name") - new_name = action_get("new-name") - try: - rename_pool(service='admin', old_name=name, new_name=new_name) - except CalledProcessError as e: - log(e) - action_fail("Renaming pool failed with message: {}".format(e.message)) diff --git a/ceph-mon/actions/rename-pool b/ceph-mon/actions/rename-pool new file mode 120000 index 00000000..c7e4e7a6 --- /dev/null +++ b/ceph-mon/actions/rename-pool @@ -0,0 +1 @@ +./rename-pool.py \ No newline at end of file diff --git a/ceph-mon/actions/rename-pool.py b/ceph-mon/actions/rename-pool.py new file mode 100755 index 00000000..6fe088ec --- /dev/null +++ b/ceph-mon/actions/rename-pool.py @@ -0,0 +1,16 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import CalledProcessError +from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.contrib.storage.linux.ceph import rename_pool + +if __name__ == '__main__': + name = action_get("pool-name") + new_name = action_get("new-name") + try: + rename_pool(service='admin', old_name=name, new_name=new_name) + except CalledProcessError as e: + log(e) + action_fail("Renaming pool failed with message: {}".format(e.message)) diff --git a/ceph-mon/actions/set-pool-max-bytes b/ceph-mon/actions/set-pool-max-bytes deleted file mode 100755 index 86360885..00000000 --- a/ceph-mon/actions/set-pool-max-bytes +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/python -import sys - -sys.path.append('hooks') -from subprocess import CalledProcessError -from charmhelpers.core.hookenv import action_get, log, action_fail -from charmhelpers.contrib.storage.linux.ceph import set_pool_quota - -if __name__ == '__main__': - max_bytes = action_get("max") - name = action_get("pool-name") - try: - set_pool_quota(service='admin', pool_name=name, max_bytes=max_bytes) - except CalledProcessError as e: - log(e) - action_fail("Set pool quota failed with message: {}".format(e.message)) diff --git a/ceph-mon/actions/set-pool-max-bytes b/ceph-mon/actions/set-pool-max-bytes new file mode 120000 index 00000000..34d4d9b5 --- /dev/null +++ b/ceph-mon/actions/set-pool-max-bytes @@ -0,0 +1 @@ +./set-pool-max-bytes.py \ No newline at end of file diff --git a/ceph-mon/actions/set-pool-max-bytes.py b/ceph-mon/actions/set-pool-max-bytes.py new file mode 100755 index 00000000..86360885 --- /dev/null +++ b/ceph-mon/actions/set-pool-max-bytes.py @@ -0,0 +1,16 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import CalledProcessError +from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.contrib.storage.linux.ceph import set_pool_quota + +if __name__ == '__main__': + max_bytes = action_get("max") + name = action_get("pool-name") + try: + set_pool_quota(service='admin', pool_name=name, max_bytes=max_bytes) + except CalledProcessError as e: + log(e) + action_fail("Set pool quota failed with message: {}".format(e.message)) diff --git a/ceph-mon/actions/snapshot-pool b/ceph-mon/actions/snapshot-pool deleted file mode 100755 index a02619bf..00000000 --- a/ceph-mon/actions/snapshot-pool +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/python -import sys - -sys.path.append('hooks') -from subprocess import CalledProcessError -from charmhelpers.core.hookenv import action_get, log, action_fail -from charmhelpers.contrib.storage.linux.ceph import snapshot_pool - -if __name__ == '__main__': - name = action_get("pool-name") - snapname = action_get("snapshot-name") - try: - snapshot_pool(service='admin', - pool_name=name, - snapshot_name=snapname) - except CalledProcessError as e: - log(e) - action_fail("Snapshot pool failed with message: {}".format(e.message)) diff --git a/ceph-mon/actions/snapshot-pool b/ceph-mon/actions/snapshot-pool new file mode 120000 index 00000000..6e3eccc9 --- /dev/null +++ b/ceph-mon/actions/snapshot-pool @@ -0,0 +1 @@ +./snapshot-pool.py \ No newline at end of file diff --git a/ceph-mon/actions/snapshot-pool.py b/ceph-mon/actions/snapshot-pool.py new file mode 100755 index 00000000..a02619bf --- /dev/null +++ b/ceph-mon/actions/snapshot-pool.py @@ -0,0 +1,18 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks') +from subprocess import CalledProcessError +from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.contrib.storage.linux.ceph import snapshot_pool + +if __name__ == '__main__': + name = action_get("pool-name") + snapname = action_get("snapshot-name") + try: + snapshot_pool(service='admin', + pool_name=name, + snapshot_name=snapname) + except CalledProcessError as e: + log(e) + action_fail("Snapshot pool failed with message: {}".format(e.message)) From 712d99e54eecb33f5bfec1bfd16431606dd53b10 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 30 Jun 2016 13:06:08 -0400 Subject: [PATCH 1147/2699] initial commit --- ceph-fs/src/README.ex | 65 ++++++ ceph-fs/src/config.yaml | 28 +++ ceph-fs/src/icon.svg | 279 ++++++++++++++++++++++++++ ceph-fs/src/layer.yaml | 1 + ceph-fs/src/metadata.yaml | 19 ++ ceph-fs/src/reactive/charm_ceph_fs.py | 46 +++++ ceph-fs/src/templates/ceph.conf | 33 +++ ceph-fs/src/tests/00-setup | 5 + ceph-fs/src/tests/10-deploy | 31 +++ ceph-fs/tox.ini | 0 10 files changed, 507 insertions(+) create mode 100644 ceph-fs/src/README.ex create mode 100644 ceph-fs/src/config.yaml create mode 100644 ceph-fs/src/icon.svg create mode 100644 ceph-fs/src/layer.yaml create mode 100644 ceph-fs/src/metadata.yaml create mode 100644 ceph-fs/src/reactive/charm_ceph_fs.py create mode 100644 ceph-fs/src/templates/ceph.conf create mode 100755 ceph-fs/src/tests/00-setup create mode 100755 ceph-fs/src/tests/10-deploy create mode 100644 ceph-fs/tox.ini diff --git a/ceph-fs/src/README.ex b/ceph-fs/src/README.ex new file mode 100644 index 00000000..b6816b22 --- /dev/null +++ b/ceph-fs/src/README.ex @@ -0,0 +1,65 @@ +# Overview + +Describe the intended usage of this charm and anything unique about how this +charm relates to others here. + +This README will be displayed in the Charm Store, it should be either Markdown +or RST. Ideal READMEs include instructions on how to use the charm, expected +usage, and charm features that your audience might be interested in. For an +example of a well written README check out Hadoop: +http://jujucharms.com/charms/precise/hadoop + +Use this as a Markdown reference if you need help with the formatting of this +README: http://askubuntu.com/editing-help + +This charm provides [service][]. Add a description here of what the service +itself actually does. + +Also remember to check the [icon guidelines][] so that your charm looks good +in the Juju GUI. + +# Usage + +Step by step instructions on using the charm: + +juju deploy servicename + +and so on. If you're providing a web service or something that the end user +needs to go to, tell them here, especially if you're deploying a service that +might listen to a non-default port. + +You can then browse to http://ip-address to configure the service. + +## Scale out Usage + +If the charm has any recommendations for running at scale, outline them in +examples here. For example if you have a memcached relation that improves +performance, mention it here. + +## Known Limitations and Issues + +This not only helps users but gives people a place to start if they want to help +you add features to your charm. + +# Configuration + +The configuration options will be listed on the charm store, however If you're +making assumptions or opinionated decisions in the charm (like setting a default +administrator password), you should detail that here so the user knows how to +change it immediately, etc. + +# Contact Information + +Though this will be listed in the charm store itself don't assume a user will +know that, so include that information here: + +## Upstream Project Name + + - Upstream website + - Upstream bug tracker + - Upstream mailing list or contact information + - Feel free to add things if it's useful for users + + +[service]: http://example.com +[icon guidelines]: https://jujucharms.com/docs/stable/authors-charm-icon diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml new file mode 100644 index 00000000..49d6f9ea --- /dev/null +++ b/ceph-fs/src/config.yaml @@ -0,0 +1,28 @@ +options: + loglevel: + default: 1 + type: int + description: Mon and OSD debug level. Max is 20. + use-syslog: + type: boolean + default: False + description: | + If set to True, supporting services will log to syslog. + ceph-public-network: + type: string + default: + description: | + The IP address and netmask of the public (front-side) network (e.g., + 192.168.0.0/24) + . + If multiple networks are to be used, a space-delimited list of a.b.c.d/x + can be provided. + ceph-cluster-network: + type: string + default: + description: | + The IP address and netmask of the cluster (back-side) network (e.g., + 192.168.0.0/24) + . + If multiple networks are to be used, a space-delimited list of a.b.c.d/x + can be provided. \ No newline at end of file diff --git a/ceph-fs/src/icon.svg b/ceph-fs/src/icon.svg new file mode 100644 index 00000000..e092eef7 --- /dev/null +++ b/ceph-fs/src/icon.svg @@ -0,0 +1,279 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + diff --git a/ceph-fs/src/layer.yaml b/ceph-fs/src/layer.yaml new file mode 100644 index 00000000..c58e1127 --- /dev/null +++ b/ceph-fs/src/layer.yaml @@ -0,0 +1 @@ +includes: ['layer:ceph-base'] # if you use any interfaces, add them here diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml new file mode 100644 index 00000000..03614784 --- /dev/null +++ b/ceph-fs/src/metadata.yaml @@ -0,0 +1,19 @@ +name: charm-ceph-fs +summary: +maintainer: chris +description: | + +tags: + # Replace "misc" with one or more whitelisted tags from this list: + # https://jujucharms.com/docs/stable/authors-charm-metadata + - misc +subordinate: false +provides: + provides-relation: + interface: interface-name +requires: + requires-relation: + interface: interface-name +peers: + peer-relation: + interface: interface-name diff --git a/ceph-fs/src/reactive/charm_ceph_fs.py b/ceph-fs/src/reactive/charm_ceph_fs.py new file mode 100644 index 00000000..83c463fe --- /dev/null +++ b/ceph-fs/src/reactive/charm_ceph_fs.py @@ -0,0 +1,46 @@ +from charms.reactive import when, when_not, set_state + +from charmhelpers.core.hookenv import ( + config, +) + +from charmhelpers.contrib.network.ip import ( + get_address_in_network +) + +@when('ceph.installed') +@when('ceph-mon.available') +def setup_mds(mon): + + +@when('config.changed', 'ceph-mon.available') +def config_changed(): + networks = get_networks('ceph-public-network') + public_network = ', '.join(networks) + + networks = get_networks('ceph-cluster-network') + cluster_network = ', '.join(networks) + + cephcontext = { + # 'mon_hosts': ' '.join(get_mon_hosts()), + # 'fsid': leader_get('fsid'), + 'osd_journal_size': config('osd-journal-size'), + 'use_syslog': str(config('use-syslog')).lower(), + 'ceph_public_network': public_network, + 'ceph_cluster_network': cluster_network, + 'loglevel': config('loglevel'), + } + + +def get_networks(config_opt='ceph-public-network'): + """Get all configured networks from provided config option. + + If public network(s) are provided, go through them and return those for + which we have an address configured. + """ + networks = config(config_opt) + if networks: + networks = networks.split() + return [n for n in networks if get_address_in_network(n)] + + return [] diff --git a/ceph-fs/src/templates/ceph.conf b/ceph-fs/src/templates/ceph.conf new file mode 100644 index 00000000..ea9b7338 --- /dev/null +++ b/ceph-fs/src/templates/ceph.conf @@ -0,0 +1,33 @@ +[global] +auth cluster required = {{ auth_supported }} +auth service required = {{ auth_supported }} +auth client required = {{ auth_supported }} +{% endif %} +keyring = /etc/ceph/$cluster.$name.keyring +mon host = {{ mon_hosts }} +fsid = {{ fsid }} + +log to syslog = {{ use_syslog }} +err to syslog = {{ use_syslog }} +clog to syslog = {{ use_syslog }} +mon cluster log to syslog = {{ use_syslog }} +debug mon = {{ loglevel }}/5 +debug osd = {{ loglevel }}/5 + +{%- if ceph_public_network is string %} +public network = {{ ceph_public_network }} +{%- endif %} +{%- if ceph_cluster_network is string %} +cluster network = {{ ceph_cluster_network }} +{%- endif %} + +{% if public_addr %} +public addr = {{ public_addr }} +{% endif %} +{% if cluster_addr %} +cluster addr = {{ cluster_addr }} +{%- endif %} + +[mds] +keyring = /var/lib/ceph/mds/$cluster-$id/keyring + diff --git a/ceph-fs/src/tests/00-setup b/ceph-fs/src/tests/00-setup new file mode 100755 index 00000000..f0616a56 --- /dev/null +++ b/ceph-fs/src/tests/00-setup @@ -0,0 +1,5 @@ +#!/bin/bash + +sudo add-apt-repository ppa:juju/stable -y +sudo apt-get update +sudo apt-get install amulet python-requests -y diff --git a/ceph-fs/src/tests/10-deploy b/ceph-fs/src/tests/10-deploy new file mode 100755 index 00000000..d5778b04 --- /dev/null +++ b/ceph-fs/src/tests/10-deploy @@ -0,0 +1,31 @@ +#!/usr/bin/python3 + +import amulet +import requests +import unittest + + +class TestCharm(unittest.TestCase): + def setUp(self): + self.d = amulet.Deployment() + + self.d.add('charm-ceph-fs') + self.d.expose('charm-ceph-fs') + + self.d.setup(timeout=900) + self.d.sentry.wait() + + self.unit = self.d.sentry['charm-ceph-fs'][0] + + def test_service(self): + # test we can access over http + page = requests.get('http://{}'.format(self.unit.info['public-address'])) + self.assertEqual(page.status_code, 200) + # Now you can use self.d.sentry[SERVICE][UNIT] to address each of the units and perform + # more in-depth steps. Each self.d.sentry[SERVICE][UNIT] has the following methods: + # - .info - An array of the information of that unit from Juju + # - .file(PATH) - Get the details of a file on that unit + # - .file_contents(PATH) - Get plain text output of PATH file from that unit + # - .directory(PATH) - Get details of directory + # - .directory_contents(PATH) - List files and folders in PATH on that unit + # - .relation(relation, service:rel) - Get relation data from return service diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini new file mode 100644 index 00000000..e69de29b From 6053dea2e75be3d71dedee57823f912475e657eb Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 22 Jun 2016 15:34:29 +0100 Subject: [PATCH 1148/2699] Re-license charm as Apache-2.0 All contributions to this charm where made under Canonical copyright; switch to Apache-2.0 license as agreed so we can move forward with official project status. In order to make this change, this commit also drops the inclusion of upstart configurations for very early versions of Ceph (argonaut), as they are no longer required. Change-Id: I3d943dfd78f406ba29f86c51e22a13eab448452e --- ceph-mon/LICENSE | 202 ++++++++++++++++++ ceph-mon/actions/__init__.py | 15 +- ceph-mon/actions/ceph_ops.py | 15 +- ceph-mon/actions/create-cache-tier | 2 +- ceph-mon/actions/create-cache-tier.py | 16 +- ceph-mon/actions/create-erasure-profile | 2 +- ceph-mon/actions/create-erasure-profile.py | 15 ++ ceph-mon/actions/create-pool | 2 +- ceph-mon/actions/create-pool.py | 15 ++ ceph-mon/actions/delete-erasure-profile | 2 +- ceph-mon/actions/delete-erasure-profile.py | 15 ++ ceph-mon/actions/delete-pool | 2 +- ceph-mon/actions/delete-pool.py | 15 ++ ceph-mon/actions/get-erasure-profile | 2 +- ceph-mon/actions/get-erasure-profile.py | 16 +- ceph-mon/actions/list-erasure-profiles | 2 +- ceph-mon/actions/list-erasure-profiles.py | 16 +- ceph-mon/actions/list-pools | 2 +- ceph-mon/actions/list-pools.py | 16 +- ceph-mon/actions/pause-health | 16 +- ceph-mon/actions/pool-get | 2 +- ceph-mon/actions/pool-get.py | 16 +- ceph-mon/actions/pool-set | 2 +- ceph-mon/actions/pool-set.py | 15 ++ ceph-mon/actions/pool-statistics | 2 +- ceph-mon/actions/pool-statistics.py | 15 ++ ceph-mon/actions/remove-cache-tier | 2 +- ceph-mon/actions/remove-cache-tier.py | 15 ++ ceph-mon/actions/remove-pool-snapshot | 2 +- ceph-mon/actions/remove-pool-snapshot.py | 15 ++ ceph-mon/actions/rename-pool | 2 +- ceph-mon/actions/rename-pool.py | 15 ++ ceph-mon/actions/resume-health | 16 +- ceph-mon/actions/set-pool-max-bytes | 2 +- ceph-mon/actions/set-pool-max-bytes.py | 15 ++ ceph-mon/actions/snapshot-pool | 2 +- ceph-mon/actions/snapshot-pool.py | 15 ++ ceph-mon/copyright | 19 +- ceph-mon/files/upstart/ceph-create-keys.conf | 8 - ceph-mon/files/upstart/ceph-hotplug.conf | 11 - .../files/upstart/ceph-mon-all-starter.conf | 20 -- ceph-mon/files/upstart/ceph-mon-all.conf | 4 - ceph-mon/files/upstart/ceph-mon.conf | 25 --- ceph-mon/hooks/__init__.py | 13 ++ ceph-mon/hooks/ceph.py | 15 +- ceph-mon/hooks/ceph_broker.py | 14 +- ceph-mon/hooks/ceph_hooks.py | 27 +-- ceph-mon/hooks/utils.py | 15 +- ceph-mon/tests/014-basic-precise-icehouse | 14 ++ ceph-mon/tests/015-basic-trusty-icehouse | 14 ++ ceph-mon/tests/016-basic-trusty-juno | 14 ++ ceph-mon/tests/017-basic-trusty-kilo | 14 ++ ceph-mon/tests/018-basic-trusty-liberty | 14 ++ ceph-mon/tests/019-basic-trusty-mitaka | 14 ++ ceph-mon/tests/020-basic-wily-liberty | 14 ++ ceph-mon/tests/021-basic-xenial-mitaka | 14 ++ ceph-mon/tests/basic_deployment.py | 14 ++ ceph-mon/unit_tests/__init__.py | 14 ++ ceph-mon/unit_tests/test_ceph_broker.py | 14 ++ ceph-mon/unit_tests/test_ceph_networking.py | 14 ++ ceph-mon/unit_tests/test_ceph_ops.py | 14 +- ceph-mon/unit_tests/test_status.py | 14 ++ ceph-mon/unit_tests/test_upgrade_roll.py | 17 +- ceph-mon/unit_tests/test_utils.py | 14 ++ 64 files changed, 807 insertions(+), 132 deletions(-) create mode 100644 ceph-mon/LICENSE delete mode 100644 ceph-mon/files/upstart/ceph-create-keys.conf delete mode 100644 ceph-mon/files/upstart/ceph-hotplug.conf delete mode 100644 ceph-mon/files/upstart/ceph-mon-all-starter.conf delete mode 100644 ceph-mon/files/upstart/ceph-mon-all.conf delete mode 100644 ceph-mon/files/upstart/ceph-mon.conf diff --git a/ceph-mon/LICENSE b/ceph-mon/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/ceph-mon/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ceph-mon/actions/__init__.py b/ceph-mon/actions/__init__.py index ff2381cc..b7fe4e1b 100644 --- a/ceph-mon/actions/__init__.py +++ b/ceph-mon/actions/__init__.py @@ -1,3 +1,16 @@ -__author__ = 'chris' +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys sys.path.append('hooks') diff --git a/ceph-mon/actions/ceph_ops.py b/ceph-mon/actions/ceph_ops.py index e70ebc7e..c4df90f3 100755 --- a/ceph-mon/actions/ceph_ops.py +++ b/ceph-mon/actions/ceph_ops.py @@ -1,4 +1,17 @@ -__author__ = 'chris' +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from subprocess import CalledProcessError, check_output import sys diff --git a/ceph-mon/actions/create-cache-tier b/ceph-mon/actions/create-cache-tier index 5b049bef..2a7e4346 120000 --- a/ceph-mon/actions/create-cache-tier +++ b/ceph-mon/actions/create-cache-tier @@ -1 +1 @@ -./create-cache-tier.py \ No newline at end of file +create-cache-tier.py \ No newline at end of file diff --git a/ceph-mon/actions/create-cache-tier.py b/ceph-mon/actions/create-cache-tier.py index e8170cf2..614bdb05 100755 --- a/ceph-mon/actions/create-cache-tier.py +++ b/ceph-mon/actions/create-cache-tier.py @@ -1,5 +1,19 @@ #!/usr/bin/python -__author__ = 'chris' +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from subprocess import CalledProcessError import sys diff --git a/ceph-mon/actions/create-erasure-profile b/ceph-mon/actions/create-erasure-profile index 2e8dba41..58eef8ed 120000 --- a/ceph-mon/actions/create-erasure-profile +++ b/ceph-mon/actions/create-erasure-profile @@ -1 +1 @@ -./create-erasure-profile.py \ No newline at end of file +create-erasure-profile.py \ No newline at end of file diff --git a/ceph-mon/actions/create-erasure-profile.py b/ceph-mon/actions/create-erasure-profile.py index 2b00b588..a468058b 100755 --- a/ceph-mon/actions/create-erasure-profile.py +++ b/ceph-mon/actions/create-erasure-profile.py @@ -1,4 +1,19 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from subprocess import CalledProcessError import sys diff --git a/ceph-mon/actions/create-pool b/ceph-mon/actions/create-pool index 4956f568..bf2f130c 120000 --- a/ceph-mon/actions/create-pool +++ b/ceph-mon/actions/create-pool @@ -1 +1 @@ -./create-pool.py \ No newline at end of file +create-pool.py \ No newline at end of file diff --git a/ceph-mon/actions/create-pool.py b/ceph-mon/actions/create-pool.py index 4d1d2148..a0123bf7 100755 --- a/ceph-mon/actions/create-pool.py +++ b/ceph-mon/actions/create-pool.py @@ -1,4 +1,19 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys sys.path.append('hooks') diff --git a/ceph-mon/actions/delete-erasure-profile b/ceph-mon/actions/delete-erasure-profile index a31aaba5..719025e0 120000 --- a/ceph-mon/actions/delete-erasure-profile +++ b/ceph-mon/actions/delete-erasure-profile @@ -1 +1 @@ -./delete-erasure-profile.py \ No newline at end of file +delete-erasure-profile.py \ No newline at end of file diff --git a/ceph-mon/actions/delete-erasure-profile.py b/ceph-mon/actions/delete-erasure-profile.py index 1773eb82..49d27114 100755 --- a/ceph-mon/actions/delete-erasure-profile.py +++ b/ceph-mon/actions/delete-erasure-profile.py @@ -1,4 +1,19 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from subprocess import CalledProcessError __author__ = 'chris' diff --git a/ceph-mon/actions/delete-pool b/ceph-mon/actions/delete-pool index 7b239cb5..8deb7a08 120000 --- a/ceph-mon/actions/delete-pool +++ b/ceph-mon/actions/delete-pool @@ -1 +1 @@ -./delete-pool.py \ No newline at end of file +delete-pool.py \ No newline at end of file diff --git a/ceph-mon/actions/delete-pool.py b/ceph-mon/actions/delete-pool.py index 3d655076..a7881596 100755 --- a/ceph-mon/actions/delete-pool.py +++ b/ceph-mon/actions/delete-pool.py @@ -1,4 +1,19 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys sys.path.append('hooks') diff --git a/ceph-mon/actions/get-erasure-profile b/ceph-mon/actions/get-erasure-profile index a604d843..ec29f9e9 120000 --- a/ceph-mon/actions/get-erasure-profile +++ b/ceph-mon/actions/get-erasure-profile @@ -1 +1 @@ -./get-erasure-profile.py \ No newline at end of file +get-erasure-profile.py \ No newline at end of file diff --git a/ceph-mon/actions/get-erasure-profile.py b/ceph-mon/actions/get-erasure-profile.py index 29ece59d..92307119 100755 --- a/ceph-mon/actions/get-erasure-profile.py +++ b/ceph-mon/actions/get-erasure-profile.py @@ -1,5 +1,19 @@ #!/usr/bin/python -__author__ = 'chris' +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys sys.path.append('hooks') diff --git a/ceph-mon/actions/list-erasure-profiles b/ceph-mon/actions/list-erasure-profiles index e8ad6605..bcd2eca3 120000 --- a/ceph-mon/actions/list-erasure-profiles +++ b/ceph-mon/actions/list-erasure-profiles @@ -1 +1 @@ -./list-erasure-profiles.py \ No newline at end of file +list-erasure-profiles.py \ No newline at end of file diff --git a/ceph-mon/actions/list-erasure-profiles.py b/ceph-mon/actions/list-erasure-profiles.py index cf6dfa09..038db2c0 100755 --- a/ceph-mon/actions/list-erasure-profiles.py +++ b/ceph-mon/actions/list-erasure-profiles.py @@ -1,5 +1,19 @@ #!/usr/bin/python -__author__ = 'chris' +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys from subprocess import check_output, CalledProcessError diff --git a/ceph-mon/actions/list-pools b/ceph-mon/actions/list-pools index ac972184..e2fa46dd 120000 --- a/ceph-mon/actions/list-pools +++ b/ceph-mon/actions/list-pools @@ -1 +1 @@ -./list-pools.py \ No newline at end of file +list-pools.py \ No newline at end of file diff --git a/ceph-mon/actions/list-pools.py b/ceph-mon/actions/list-pools.py index 102667cf..1784a42a 100755 --- a/ceph-mon/actions/list-pools.py +++ b/ceph-mon/actions/list-pools.py @@ -1,5 +1,19 @@ #!/usr/bin/python -__author__ = 'chris' +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys from subprocess import check_output, CalledProcessError diff --git a/ceph-mon/actions/pause-health b/ceph-mon/actions/pause-health index 207c4f65..e00afd15 100755 --- a/ceph-mon/actions/pause-health +++ b/ceph-mon/actions/pause-health @@ -1,6 +1,20 @@ #!/bin/bash +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. set -eux ceph osd set nodown -ceph osd set noout \ No newline at end of file +ceph osd set noout diff --git a/ceph-mon/actions/pool-get b/ceph-mon/actions/pool-get index 57a93cc7..129b906d 120000 --- a/ceph-mon/actions/pool-get +++ b/ceph-mon/actions/pool-get @@ -1 +1 @@ -./pool-get.py \ No newline at end of file +pool-get.py \ No newline at end of file diff --git a/ceph-mon/actions/pool-get.py b/ceph-mon/actions/pool-get.py index e4f924b9..aa5faa29 100755 --- a/ceph-mon/actions/pool-get.py +++ b/ceph-mon/actions/pool-get.py @@ -1,5 +1,19 @@ #!/usr/bin/python -__author__ = 'chris' +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys from subprocess import check_output, CalledProcessError diff --git a/ceph-mon/actions/pool-set b/ceph-mon/actions/pool-set index 6dcb8466..8327dcdf 120000 --- a/ceph-mon/actions/pool-set +++ b/ceph-mon/actions/pool-set @@ -1 +1 @@ -./pool-set.py \ No newline at end of file +pool-set.py \ No newline at end of file diff --git a/ceph-mon/actions/pool-set.py b/ceph-mon/actions/pool-set.py index 1f6e13b8..850071e7 100755 --- a/ceph-mon/actions/pool-set.py +++ b/ceph-mon/actions/pool-set.py @@ -1,4 +1,19 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from subprocess import CalledProcessError import sys diff --git a/ceph-mon/actions/pool-statistics b/ceph-mon/actions/pool-statistics index 2e9a80e0..9d775f8f 120000 --- a/ceph-mon/actions/pool-statistics +++ b/ceph-mon/actions/pool-statistics @@ -1 +1 @@ -./pool-statistics.py \ No newline at end of file +pool-statistics.py \ No newline at end of file diff --git a/ceph-mon/actions/pool-statistics.py b/ceph-mon/actions/pool-statistics.py index 536c889a..54358d58 100755 --- a/ceph-mon/actions/pool-statistics.py +++ b/ceph-mon/actions/pool-statistics.py @@ -1,4 +1,19 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys sys.path.append('hooks') diff --git a/ceph-mon/actions/remove-cache-tier b/ceph-mon/actions/remove-cache-tier index efbda4fa..136c0f06 120000 --- a/ceph-mon/actions/remove-cache-tier +++ b/ceph-mon/actions/remove-cache-tier @@ -1 +1 @@ -./remove-cache-tier.py \ No newline at end of file +remove-cache-tier.py \ No newline at end of file diff --git a/ceph-mon/actions/remove-cache-tier.py b/ceph-mon/actions/remove-cache-tier.py index 79db9cf7..2572515a 100755 --- a/ceph-mon/actions/remove-cache-tier.py +++ b/ceph-mon/actions/remove-cache-tier.py @@ -1,4 +1,19 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from subprocess import CalledProcessError import sys diff --git a/ceph-mon/actions/remove-pool-snapshot b/ceph-mon/actions/remove-pool-snapshot index ea226fff..c4cc17b6 120000 --- a/ceph-mon/actions/remove-pool-snapshot +++ b/ceph-mon/actions/remove-pool-snapshot @@ -1 +1 @@ -./remove-pool-snapshot.py \ No newline at end of file +remove-pool-snapshot.py \ No newline at end of file diff --git a/ceph-mon/actions/remove-pool-snapshot.py b/ceph-mon/actions/remove-pool-snapshot.py index 387849ea..ea2eaf8e 100755 --- a/ceph-mon/actions/remove-pool-snapshot.py +++ b/ceph-mon/actions/remove-pool-snapshot.py @@ -1,4 +1,19 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys sys.path.append('hooks') diff --git a/ceph-mon/actions/rename-pool b/ceph-mon/actions/rename-pool index c7e4e7a6..ce3ff8f5 120000 --- a/ceph-mon/actions/rename-pool +++ b/ceph-mon/actions/rename-pool @@ -1 +1 @@ -./rename-pool.py \ No newline at end of file +rename-pool.py \ No newline at end of file diff --git a/ceph-mon/actions/rename-pool.py b/ceph-mon/actions/rename-pool.py index 6fe088ec..d6da3db9 100755 --- a/ceph-mon/actions/rename-pool.py +++ b/ceph-mon/actions/rename-pool.py @@ -1,4 +1,19 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys sys.path.append('hooks') diff --git a/ceph-mon/actions/resume-health b/ceph-mon/actions/resume-health index 39d15a1f..f42397ed 100755 --- a/ceph-mon/actions/resume-health +++ b/ceph-mon/actions/resume-health @@ -1,6 +1,20 @@ #!/bin/bash +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. set -eux ceph osd unset nodown -ceph osd unset noout \ No newline at end of file +ceph osd unset noout diff --git a/ceph-mon/actions/set-pool-max-bytes b/ceph-mon/actions/set-pool-max-bytes index 34d4d9b5..d633c0c2 120000 --- a/ceph-mon/actions/set-pool-max-bytes +++ b/ceph-mon/actions/set-pool-max-bytes @@ -1 +1 @@ -./set-pool-max-bytes.py \ No newline at end of file +set-pool-max-bytes.py \ No newline at end of file diff --git a/ceph-mon/actions/set-pool-max-bytes.py b/ceph-mon/actions/set-pool-max-bytes.py index 86360885..cd44af69 100755 --- a/ceph-mon/actions/set-pool-max-bytes.py +++ b/ceph-mon/actions/set-pool-max-bytes.py @@ -1,4 +1,19 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys sys.path.append('hooks') diff --git a/ceph-mon/actions/snapshot-pool b/ceph-mon/actions/snapshot-pool index 6e3eccc9..549e3459 120000 --- a/ceph-mon/actions/snapshot-pool +++ b/ceph-mon/actions/snapshot-pool @@ -1 +1 @@ -./snapshot-pool.py \ No newline at end of file +snapshot-pool.py \ No newline at end of file diff --git a/ceph-mon/actions/snapshot-pool.py b/ceph-mon/actions/snapshot-pool.py index a02619bf..5f03e0c2 100755 --- a/ceph-mon/actions/snapshot-pool.py +++ b/ceph-mon/actions/snapshot-pool.py @@ -1,4 +1,19 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys sys.path.append('hooks') diff --git a/ceph-mon/copyright b/ceph-mon/copyright index bdfae0e0..c801b143 100644 --- a/ceph-mon/copyright +++ b/ceph-mon/copyright @@ -1,15 +1,16 @@ Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0 -Comment: The licensing of this charm is aligned to upstream ceph - as the ceph upstart integration is distributed as part of the charm. Files: * Copyright: 2012, Canonical Ltd. -License: LGPL-2.1 +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at -Files: files/upstart/* -Copyright: 2004-2010 by Sage Weil -License: LGPL-2.1 + http://www.apache.org/licenses/LICENSE-2.0 -License: LGPL-2.1 - On Debian GNU/Linux system you can find the complete text of the - LGPL-2.1 license in '/usr/share/common-licenses/LGPL-2.1' + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. diff --git a/ceph-mon/files/upstart/ceph-create-keys.conf b/ceph-mon/files/upstart/ceph-create-keys.conf deleted file mode 100644 index 6fb45818..00000000 --- a/ceph-mon/files/upstart/ceph-create-keys.conf +++ /dev/null @@ -1,8 +0,0 @@ -description "Create Ceph client.admin key when possible" - -start on started ceph-mon -stop on runlevel [!2345] - -task - -exec /usr/sbin/ceph-create-keys --cluster="${cluster:-ceph}" -i "${id:-$(hostname)}" diff --git a/ceph-mon/files/upstart/ceph-hotplug.conf b/ceph-mon/files/upstart/ceph-hotplug.conf deleted file mode 100644 index d82e7c84..00000000 --- a/ceph-mon/files/upstart/ceph-hotplug.conf +++ /dev/null @@ -1,11 +0,0 @@ -description "Ceph hotplug" - -start on block-device-added \ - DEVTYPE=partition \ - ID_PART_ENTRY_TYPE=4fbd7e29-9d25-41b8-afd0-062c0ceff05d -stop on runlevel [!2345] - -task -instance $DEVNAME - -exec /usr/sbin/ceph-disk activate --mount -- "$DEVNAME" diff --git a/ceph-mon/files/upstart/ceph-mon-all-starter.conf b/ceph-mon/files/upstart/ceph-mon-all-starter.conf deleted file mode 100644 index f7188cb7..00000000 --- a/ceph-mon/files/upstart/ceph-mon-all-starter.conf +++ /dev/null @@ -1,20 +0,0 @@ -description "Ceph MON (start all instances)" - -start on starting ceph-mon-all -stop on runlevel [!2345] - -task - -script - set -e - # TODO what's the valid charset for cluster names and mon ids? - find /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[a-z0-9]+-[a-z0-9._-]+' -printf '%P\n' \ - | while read f; do - if [ -e "/var/lib/ceph/mon/$f/done" ]; then - cluster="${f%%-*}" - id="${f#*-}" - - initctl emit ceph-mon cluster="$cluster" id="$id" - fi - done -end script diff --git a/ceph-mon/files/upstart/ceph-mon-all.conf b/ceph-mon/files/upstart/ceph-mon-all.conf deleted file mode 100644 index 006f2f20..00000000 --- a/ceph-mon/files/upstart/ceph-mon-all.conf +++ /dev/null @@ -1,4 +0,0 @@ -description "Ceph monitor (all instances)" - -start on (local-filesystems and net-device-up IFACE!=lo) -stop on runlevel [!2345] diff --git a/ceph-mon/files/upstart/ceph-mon.conf b/ceph-mon/files/upstart/ceph-mon.conf deleted file mode 100644 index 74a4b643..00000000 --- a/ceph-mon/files/upstart/ceph-mon.conf +++ /dev/null @@ -1,25 +0,0 @@ -description "Ceph MON" - -start on ceph-mon -stop on runlevel [!2345] or stopping ceph-mon-all - -respawn -respawn limit 5 30 - -pre-start script - set -e - test -x /usr/bin/ceph-mon || { stop; exit 0; } - test -d "/var/lib/ceph/mon/${cluster:-ceph}-$id" || { stop; exit 0; } - - install -d -m0755 /var/run/ceph -end script - -instance ${cluster:-ceph}/$id -export cluster -export id - -# this breaks oneiric -#usage "cluster = name of cluster (defaults to 'ceph'); id = monitor instance id" - -exec /usr/bin/ceph-mon --cluster="${cluster:-ceph}" -i "$id" -f - diff --git a/ceph-mon/hooks/__init__.py b/ceph-mon/hooks/__init__.py index e69de29b..9b088de8 100644 --- a/ceph-mon/hooks/__init__.py +++ b/ceph-mon/hooks/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 01a2a569..026e783d 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -1,10 +1,17 @@ +# Copyright 2016 Canonical Ltd # -# Copyright 2012 Canonical Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# Authors: -# James Page -# Paul Collins +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import json import subprocess import time diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index 329da8a8..adaa4674 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -1,7 +1,19 @@ #!/usr/bin/python # -# Copyright 2015 Canonical Ltd. +# Copyright 2016 Canonical Ltd # +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import json from charmhelpers.core.hookenv import ( diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 8b2bf5ef..b83de3bb 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -1,17 +1,21 @@ #!/usr/bin/python - # -# Copyright 2012 Canonical Ltd. +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# Authors: -# Paul Collins -# James Page +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -import glob import os import random -import shutil import socket import subprocess import sys @@ -263,13 +267,6 @@ def upgrade_monitor(): sys.exit(1) -def install_upstart_scripts(): - # Only install upstart configurations for older versions - if cmp_pkgrevno('ceph', "0.55.1") < 0: - for x in glob.glob('files/upstart/*.conf'): - shutil.copy(x, '/etc/init/') - - @hooks.hook('install.real') @harden() def install(): @@ -277,7 +274,6 @@ def install(): add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) - install_upstart_scripts() def emit_cephconf(): @@ -614,7 +610,6 @@ def client_relation_changed(): def upgrade_charm(): emit_cephconf() apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) - install_upstart_scripts() ceph.update_monfs() upgrade_keys() mon_relation_joined() diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index b61912a9..33b02fb7 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -1,11 +1,16 @@ - +# Copyright 2016 Canonical Ltd # -# Copyright 2012 Canonical Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# Authors: -# James Page -# Paul Collins +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import socket import re diff --git a/ceph-mon/tests/014-basic-precise-icehouse b/ceph-mon/tests/014-basic-precise-icehouse index 020cd751..5957305e 100755 --- a/ceph-mon/tests/014-basic-precise-icehouse +++ b/ceph-mon/tests/014-basic-precise-icehouse @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph deployment on precise-icehouse.""" diff --git a/ceph-mon/tests/015-basic-trusty-icehouse b/ceph-mon/tests/015-basic-trusty-icehouse index f67fea91..a8639fe4 100755 --- a/ceph-mon/tests/015-basic-trusty-icehouse +++ b/ceph-mon/tests/015-basic-trusty-icehouse @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph deployment on trusty-icehouse.""" diff --git a/ceph-mon/tests/016-basic-trusty-juno b/ceph-mon/tests/016-basic-trusty-juno index 28c7684e..54f3670c 100755 --- a/ceph-mon/tests/016-basic-trusty-juno +++ b/ceph-mon/tests/016-basic-trusty-juno @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph deployment on trusty-juno.""" diff --git a/ceph-mon/tests/017-basic-trusty-kilo b/ceph-mon/tests/017-basic-trusty-kilo index 0a787b22..c3315591 100755 --- a/ceph-mon/tests/017-basic-trusty-kilo +++ b/ceph-mon/tests/017-basic-trusty-kilo @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph deployment on trusty-kilo.""" diff --git a/ceph-mon/tests/018-basic-trusty-liberty b/ceph-mon/tests/018-basic-trusty-liberty index f339371b..d6542657 100755 --- a/ceph-mon/tests/018-basic-trusty-liberty +++ b/ceph-mon/tests/018-basic-trusty-liberty @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph deployment on trusty-liberty.""" diff --git a/ceph-mon/tests/019-basic-trusty-mitaka b/ceph-mon/tests/019-basic-trusty-mitaka index 2eca19d6..e18197be 100755 --- a/ceph-mon/tests/019-basic-trusty-mitaka +++ b/ceph-mon/tests/019-basic-trusty-mitaka @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph deployment on trusty-mitaka.""" diff --git a/ceph-mon/tests/020-basic-wily-liberty b/ceph-mon/tests/020-basic-wily-liberty index b0d8096b..fd4ebc24 100755 --- a/ceph-mon/tests/020-basic-wily-liberty +++ b/ceph-mon/tests/020-basic-wily-liberty @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph deployment on wily-liberty.""" diff --git a/ceph-mon/tests/021-basic-xenial-mitaka b/ceph-mon/tests/021-basic-xenial-mitaka index ae3d3350..8d93f7b5 100755 --- a/ceph-mon/tests/021-basic-xenial-mitaka +++ b/ceph-mon/tests/021-basic-xenial-mitaka @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Amulet tests on a basic ceph deployment on xenial-mitaka.""" diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index c890f210..55dd0eec 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import amulet import re diff --git a/ceph-mon/unit_tests/__init__.py b/ceph-mon/unit_tests/__init__.py index f80aab3d..b7fe4e1b 100644 --- a/ceph-mon/unit_tests/__init__.py +++ b/ceph-mon/unit_tests/__init__.py @@ -1,2 +1,16 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys sys.path.append('hooks') diff --git a/ceph-mon/unit_tests/test_ceph_broker.py b/ceph-mon/unit_tests/test_ceph_broker.py index b720d94a..38153d2a 100644 --- a/ceph-mon/unit_tests/test_ceph_broker.py +++ b/ceph-mon/unit_tests/test_ceph_broker.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import json import unittest diff --git a/ceph-mon/unit_tests/test_ceph_networking.py b/ceph-mon/unit_tests/test_ceph_networking.py index ae3a7ff5..168e82fc 100644 --- a/ceph-mon/unit_tests/test_ceph_networking.py +++ b/ceph-mon/unit_tests/test_ceph_networking.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import test_utils import charmhelpers.core.hookenv as hookenv import utils as ceph_utils diff --git a/ceph-mon/unit_tests/test_ceph_ops.py b/ceph-mon/unit_tests/test_ceph_ops.py index fba81769..4a6713c7 100644 --- a/ceph-mon/unit_tests/test_ceph_ops.py +++ b/ceph-mon/unit_tests/test_ceph_ops.py @@ -1,4 +1,16 @@ -__author__ = 'chris' +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import json import unittest diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index 0900b2e5..ba287f9d 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import mock import test_utils import sys diff --git a/ceph-mon/unit_tests/test_upgrade_roll.py b/ceph-mon/unit_tests/test_upgrade_roll.py index 82e9c55a..6ff5a961 100644 --- a/ceph-mon/unit_tests/test_upgrade_roll.py +++ b/ceph-mon/unit_tests/test_upgrade_roll.py @@ -1,11 +1,22 @@ -__author__ = 'chris' +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import time from mock import patch, call, MagicMock import sys -sys.path.append('/home/chris/repos/ceph-mon/hooks') - import test_utils # python-apt is not installed as part of test-requirements but is imported by diff --git a/ceph-mon/unit_tests/test_utils.py b/ceph-mon/unit_tests/test_utils.py index 663a0488..97d3ee84 100644 --- a/ceph-mon/unit_tests/test_utils.py +++ b/ceph-mon/unit_tests/test_utils.py @@ -1,3 +1,17 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import logging import unittest import os From ca2589a0c7b3518d80d32d574e9908d38565520b Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 26 May 2016 14:14:22 +0100 Subject: [PATCH 1149/2699] Add support for user-provided ceph config Adds a new config-flags option to the charm that supports setting a dictionary of ceph configuration settings that will be applied to ceph.conf. This implementation supports config sections so that settings can be applied to any section supported by the ceph.conf template in the charm. Includes some unit test code cleanup. Closes-Bug: 1522375 Change-Id: I5fa0890d87425499dbd48af6f2bc1f196713a975 --- ceph-mon/charm-helpers-hooks.yaml | 4 + ceph-mon/config.yaml | 19 +++ ceph-mon/hooks/ceph_hooks.py | 14 ++- .../contrib/openstack/ha/utils.py | 21 +++- .../charmhelpers/contrib/openstack/utils.py | 32 +++-- ceph-mon/hooks/charmhelpers/core/hookenv.py | 2 +- ceph-mon/templates/ceph.conf | 36 ++++-- ceph-mon/unit_tests/test_ceph_broker.py | 3 +- ceph-mon/unit_tests/test_ceph_hooks.py | 117 ++++++++++++++++++ ceph-mon/unit_tests/test_ceph_networking.py | 2 +- ceph-mon/unit_tests/test_ceph_ops.py | 4 - ceph-mon/unit_tests/test_status.py | 3 +- ceph-mon/unit_tests/test_upgrade_roll.py | 2 +- 13 files changed, 225 insertions(+), 34 deletions(-) create mode 100644 ceph-mon/unit_tests/test_ceph_hooks.py diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index 5cc0852a..aa5351d9 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -8,6 +8,10 @@ include: - payload.execd - contrib.openstack - contrib.network.ip + - contrib.openstack: + - alternatives + - exceptions + - utils - contrib.charmsupport - contrib.hardening|inc=* - contrib.python.packages diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 55bd761a..00804cac 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -11,6 +11,25 @@ options: . This configuration element is mandatory and the service will fail on install if it is not provided. + config-flags: + type: string + default: + description: | + User provided Ceph configuration. Supports a string representation of + a python dictionary where each top-level key represents a section in + the ceph.conf template. You may only use sections supported in the + template. + . + WARNING: this is not the recommended way to configure the underlying + services that this charm installs and is used at the user's own risk. + This option is mainly provided as a stop-gap for users that either + want to test the effect of modifying some config or who have found + a critical bug in the way the charm has configured their services + and need it fixed immediately. We ask that whenever this is used, + that the user consider opening a bug on this charm at + http://bugs.launchpad.net/charms providing an explanation of why the + config was needed so that we may consider it for inclusion as a + natively supported config in the the charm. auth-supported: type: string default: cephx diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index b83de3bb..d6a43405 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -63,6 +63,7 @@ from charmhelpers.core.sysctl import create as create_sysctl from charmhelpers.core.templating import render from charmhelpers.contrib.storage.linux.ceph import ( + CephConfContext, monitor_key_set, monitor_key_exists, monitor_key_get, @@ -276,7 +277,7 @@ def install(): apt_install(packages=ceph.PACKAGES, fatal=True) -def emit_cephconf(): +def get_ceph_context(): networks = get_networks('ceph-public-network') public_network = ', '.join(networks) @@ -288,7 +289,6 @@ def emit_cephconf(): 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': leader_get('fsid'), 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, - 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': public_network, 'ceph_cluster_network': cluster_network, @@ -306,12 +306,20 @@ def emit_cephconf(): cephcontext['public_addr'] = get_public_addr() cephcontext['cluster_addr'] = get_cluster_addr() + # NOTE(dosaboy): these sections must correspond to what is supported in the + # config template. + sections = ['global', 'mds', 'mon'] + cephcontext.update(CephConfContext(permitted_sections=sections)()) + return cephcontext + + +def emit_cephconf(): # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(), group=ceph.ceph_user()) - render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644) + render('ceph.conf', charm_ceph_conf, get_ceph_context(), perms=0o644) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py index 34064237..2a8a1291 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -36,6 +36,10 @@ DEBUG, ) +from charmhelpers.core.host import ( + lsb_release +) + from charmhelpers.contrib.openstack.ip import ( resolve_address, ) @@ -63,8 +67,11 @@ def update_dns_ha_resource_params(resources, resource_params, DNS HA """ + # Validate the charm environment for DNS HA + assert_charm_supports_dns_ha() + settings = ['os-admin-hostname', 'os-internal-hostname', - 'os-public-hostname'] + 'os-public-hostname', 'os-access-hostname'] # Check which DNS settings are set and update dictionaries hostname_group = [] @@ -109,3 +116,15 @@ def update_dns_ha_resource_params(resources, resource_params, msg = 'DNS HA: Hostname group has no members.' status_set('blocked', msg) raise DNSHAException(msg) + + +def assert_charm_supports_dns_ha(): + """Validate prerequisites for DNS HA + The MAAS client is only available on Xenial or greater + """ + if lsb_release().get('DISTRIB_RELEASE') < '16.04': + msg = ('DNS HA is only supported on 16.04 and greater ' + 'versions of Ubuntu.') + status_set('blocked', msg) + raise DNSHAException(msg) + return True diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 53e58424..8da5c5ed 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -725,14 +725,15 @@ def git_install_requested(): requirements_dir = None -def git_default_repos(projects): +def git_default_repos(projects_yaml): """ Returns default repos if a default openstack-origin-git value is specified. """ service = service_name() + core_project = service for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): - if projects == default: + if projects_yaml == default: # add the requirements repo first repo = { @@ -742,34 +743,41 @@ def git_default_repos(projects): } repos = [repo] - # neutron and nova charms require some additional repos - if service == 'neutron': - for svc in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas']: + # neutron-* and nova-* charms require some additional repos + if service in ['neutron-api', 'neutron-gateway', + 'neutron-openvswitch']: + core_project = 'neutron' + for project in ['neutron-fwaas', 'neutron-lbaas', + 'neutron-vpnaas']: repo = { - 'name': svc, - 'repository': GIT_DEFAULT_REPOS[svc], + 'name': project, + 'repository': GIT_DEFAULT_REPOS[project], 'branch': branch, } repos.append(repo) - elif service == 'nova': + + elif service in ['nova-cloud-controller', 'nova-compute']: + core_project = 'nova' repo = { 'name': 'neutron', 'repository': GIT_DEFAULT_REPOS['neutron'], 'branch': branch, } repos.append(repo) + elif service == 'openstack-dashboard': + core_project = 'horizon' - # finally add the current service's repo + # finally add the current service's core project repo repo = { - 'name': service, - 'repository': GIT_DEFAULT_REPOS[service], + 'name': core_project, + 'repository': GIT_DEFAULT_REPOS[core_project], 'branch': branch, } repos.append(repo) return yaml.dump(dict(repositories=repos)) - return projects + return projects_yaml def _git_yaml_load(projects_yaml): diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 01321296..db117f9e 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -1006,4 +1006,4 @@ def network_get_primary_address(binding): :raise: NotImplementedError if run on Juju < 2.0 ''' cmd = ['network-get', '--primary-address', binding] - return subprocess.check_output(cmd).strip() + return subprocess.check_output(cmd).decode('UTF-8').strip() diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index f64db7cb..b48270fe 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -1,11 +1,11 @@ [global] -{% if old_auth %} +{%- if old_auth %} auth supported = {{ auth_supported }} -{% else %} +{%- else %} auth cluster required = {{ auth_supported }} auth service required = {{ auth_supported }} auth client required = {{ auth_supported }} -{% endif %} +{%- endif %} keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} @@ -17,23 +17,43 @@ mon cluster log to syslog = {{ use_syslog }} debug mon = {{ loglevel }}/5 debug osd = {{ loglevel }}/5 -{%- if ceph_public_network is string %} +{% if ceph_public_network is string %} public network = {{ ceph_public_network }} {%- endif %} {%- if ceph_cluster_network is string %} cluster network = {{ ceph_cluster_network }} {%- endif %} - -{% if public_addr %} +{%- if public_addr %} public addr = {{ public_addr }} -{% endif %} -{% if cluster_addr %} +{%- endif %} +{%- if cluster_addr %} cluster addr = {{ cluster_addr }} {%- endif %} +{% if global -%} +# The following are user-provided options provided via the config-flags charm option. +# User-provided [global] section config +{% for key in global -%} +{{ key }} = {{ global[key] }} +{% endfor %} +{% endif %} [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring +{% if mon -%} +# The following are user-provided options provided via the config-flags charm option. +# User-provided [mon] section config +{% for key in mon -%} +{{ key }} = {{ mon[key] }} +{% endfor %} +{% endif %} [mds] keyring = /var/lib/ceph/mds/$cluster-$id/keyring +{% if mds -%} +# The following are user-provided options provided via the config-flags charm option. +# User-provided [mds] section config +{% for key in mds -%} +{{ key }} = {{ mds[key] }} +{% endfor %} +{% endif %} diff --git a/ceph-mon/unit_tests/test_ceph_broker.py b/ceph-mon/unit_tests/test_ceph_broker.py index 38153d2a..45bdd3a9 100644 --- a/ceph-mon/unit_tests/test_ceph_broker.py +++ b/ceph-mon/unit_tests/test_ceph_broker.py @@ -13,9 +13,8 @@ # limitations under the License. import json -import unittest - import mock +import unittest import ceph_broker diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py new file mode 100644 index 00000000..a9702177 --- /dev/null +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -0,0 +1,117 @@ +import copy +import unittest +import sys + +from mock import patch, MagicMock + +# python-apt is not installed as part of test-requirements but is imported by +# some charmhelpers modules so create a fake import. +mock_apt = MagicMock() +sys.modules['apt'] = mock_apt +mock_apt.apt_pkg = MagicMock() + +import charmhelpers.contrib.storage.linux.ceph as ceph +import ceph_hooks + + +CHARM_CONFIG = {'config-flags': '', + 'auth-supported': False, + 'fsid': '1234', + 'loglevel': 1, + 'use-syslog': True, + 'osd-journal-size': 1024, + 'use-direct-io': True, + 'osd-format': 'ext4', + 'prefer-ipv6': False} + + +class CephHooksTestCase(unittest.TestCase): + def setUp(self): + super(CephHooksTestCase, self).setUp() + + @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") + @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") + @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) + @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', + '10.0.0.2']) + @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph_hooks, 'leader_get', lambda *args: '1234') + @patch.object(ceph, 'config') + @patch.object(ceph_hooks, 'config') + def test_get_ceph_context(self, mock_config, mock_config2): + config = copy.deepcopy(CHARM_CONFIG) + mock_config.side_effect = lambda key: config[key] + mock_config2.side_effect = lambda key: config[key] + ctxt = ceph_hooks.get_ceph_context() + expected = {'auth_supported': False, + 'ceph_cluster_network': '', + 'ceph_public_network': '', + 'cluster_addr': '10.1.0.1', + 'dio': 'true', + 'fsid': '1234', + 'loglevel': 1, + 'mon_hosts': '10.0.0.1 10.0.0.2', + 'old_auth': False, + 'public_addr': '10.0.0.1', + 'use_syslog': 'true'} + self.assertEqual(ctxt, expected) + + @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") + @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") + @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) + @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', + '10.0.0.2']) + @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph_hooks, 'leader_get', lambda *args: '1234') + @patch.object(ceph, 'config') + @patch.object(ceph_hooks, 'config') + def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): + config = copy.deepcopy(CHARM_CONFIG) + config['config-flags'] = '{"mon": {"mon sync max retries": 10}}' + mock_config.side_effect = lambda key: config[key] + mock_config2.side_effect = lambda key: config[key] + ctxt = ceph_hooks.get_ceph_context() + expected = {'auth_supported': False, + 'ceph_cluster_network': '', + 'ceph_public_network': '', + 'cluster_addr': '10.1.0.1', + 'dio': 'true', + 'fsid': '1234', + 'loglevel': 1, + 'mon_hosts': '10.0.0.1 10.0.0.2', + 'old_auth': False, + 'mon': {'mon sync max retries': 10}, + 'public_addr': '10.0.0.1', + 'use_syslog': 'true'} + self.assertEqual(ctxt, expected) + + @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") + @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") + @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) + @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', + '10.0.0.2']) + @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph_hooks, 'leader_get', lambda *args: '1234') + @patch.object(ceph, 'config') + @patch.object(ceph_hooks, 'config') + def test_get_ceph_context_w_config_flags_invalid(self, mock_config, + mock_config2): + config = copy.deepcopy(CHARM_CONFIG) + config['config-flags'] = ('{"mon": {"mon sync max retries": 10},' + '"foo": "bar"}') + mock_config.side_effect = lambda key: config[key] + mock_config2.side_effect = lambda key: config[key] + ctxt = ceph_hooks.get_ceph_context() + expected = {'auth_supported': False, + 'ceph_cluster_network': '', + 'ceph_public_network': '', + 'cluster_addr': '10.1.0.1', + 'dio': 'true', + 'fsid': '1234', + 'loglevel': 1, + 'mon_hosts': '10.0.0.1 10.0.0.2', + 'old_auth': False, + 'mon': {'mon sync max retries': 10}, + 'public_addr': '10.0.0.1', + 'use_syslog': 'true'} + self.assertEqual(ctxt, expected) diff --git a/ceph-mon/unit_tests/test_ceph_networking.py b/ceph-mon/unit_tests/test_ceph_networking.py index 168e82fc..9d16573c 100644 --- a/ceph-mon/unit_tests/test_ceph_networking.py +++ b/ceph-mon/unit_tests/test_ceph_networking.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import test_utils import charmhelpers.core.hookenv as hookenv +import test_utils import utils as ceph_utils TO_PATCH_SPACES = [ diff --git a/ceph-mon/unit_tests/test_ceph_ops.py b/ceph-mon/unit_tests/test_ceph_ops.py index 4a6713c7..062a5926 100644 --- a/ceph-mon/unit_tests/test_ceph_ops.py +++ b/ceph-mon/unit_tests/test_ceph_ops.py @@ -220,7 +220,3 @@ def test_set_invalid_pool_value(self): }]}) rc = ceph_broker.process_requests(reqs) self.assertEqual(json.loads(rc)['exit-code'], 1) - - -if __name__ == '__main__': - unittest.main() diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index ba287f9d..e2792e68 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -13,9 +13,10 @@ # limitations under the License. import mock -import test_utils import sys +import test_utils + # python-apt is not installed as part of test-requirements but is imported by # some charmhelpers modules so create a fake import. mock_apt = mock.MagicMock() diff --git a/ceph-mon/unit_tests/test_upgrade_roll.py b/ceph-mon/unit_tests/test_upgrade_roll.py index 6ff5a961..bb219bfa 100644 --- a/ceph-mon/unit_tests/test_upgrade_roll.py +++ b/ceph-mon/unit_tests/test_upgrade_roll.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +import sys import time from mock import patch, call, MagicMock -import sys import test_utils From c36dfd52a4db40d649e0557bdcc2ce6880df5055 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Mon, 28 Mar 2016 12:54:24 -0700 Subject: [PATCH 1150/2699] AppArmor Profile This change adds an app armor profile for ceph-mon. It defaults to complain mode. This will log all apparmor failures but not enforce them. Change-Id: I8b98580cda84191dff46105f8ce01d4a7a7d414f --- ceph-mon/config.yaml | 6 +++++ ceph-mon/files/apparmor/usr.bin.ceph-mon | 28 +++++++++++++++++++ ceph-mon/hooks/ceph.py | 3 ++- ceph-mon/hooks/ceph_hooks.py | 34 ++++++++++++++++++++++++ 4 files changed, 70 insertions(+), 1 deletion(-) create mode 100644 ceph-mon/files/apparmor/usr.bin.ceph-mon diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 00804cac..931631b0 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -156,3 +156,9 @@ options: description: | Apply system hardening. Supports a space-delimited list of modules to run. Supported modules currently include os, ssh, apache and mysql. + aa-profile-mode: + type: string + default: 'complain' + description: | + Enable apparmor profile. Valid settings: 'complain', 'enforce' or 'disable'. + AA complain by default. diff --git a/ceph-mon/files/apparmor/usr.bin.ceph-mon b/ceph-mon/files/apparmor/usr.bin.ceph-mon new file mode 100644 index 00000000..9c685916 --- /dev/null +++ b/ceph-mon/files/apparmor/usr.bin.ceph-mon @@ -0,0 +1,28 @@ +# vim:syntax=apparmor +# Author: Chris Holcombe +#include + +/usr/bin/ceph-mon { + #include + + /usr/bin/ceph-mon mr, + + network inet stream, + network inet6 stream, + + owner /etc/ceph/* rw, + /etc/passwd r, + + /proc/@{pid}/auxv r, + /proc/@{pid}/net/dev r, + + /run/ceph/* rw, + /tmp/ r, + /var/lib/ceph/** rwk, + /var/lib/charm/ceph-*/ceph.conf r, + /var/log/ceph/* rwk, + /var/run/ceph/* rwk, + /var/tmp/ r, + # Site-specific additions and overrides. + #include +} diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 026e783d..6bbb1074 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -46,7 +46,8 @@ PEON = 'peon' QUORUM = [LEADER, PEON] -PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', + 'xfsprogs', 'apparmor-utils'] def ceph_user(): diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index d6a43405..72de1bc0 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import glob import os import random @@ -21,6 +22,7 @@ import sys import uuid import time +import shutil import ceph from charmhelpers.core import host @@ -82,6 +84,12 @@ hooks = Hooks() +app_armor_modes = { + 'complain': 'aa-complain', + 'disabled': 'aa-disable', + 'enforce': 'aa-enforce', +} + NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' SCRIPTS_DIR = '/usr/local/bin' STATUS_FILE = '/var/lib/nagios/cat-ceph-status.txt' @@ -268,6 +276,31 @@ def upgrade_monitor(): sys.exit(1) +def install_apparmor_profile(): + log('Installing app-armor-profiles') + aa_mode = config('aa-profile-mode') + if aa_mode not in app_armor_modes: + log('Invalid apparmor mode: {}. Defaulting to complain'.format( + aa_mode), level='error') + aa_mode = 'complain' + apparmor_dir = os.path.join(os.sep, + 'etc', + 'apparmor.d', + 'local') + + for x in glob.glob('files/apparmor/*'): + shutil.copy(x, apparmor_dir) + try: + cmd = [ + app_armor_modes[aa_mode], + os.path.join(apparmor_dir, os.path.split(x)[-1]) + ] + subprocess.check_output(cmd) + except subprocess.CalledProcessError as err: + log('{} failed with error {}'.format( + app_armor_modes[aa_mode], err.output), level='error') + + @hooks.hook('install.real') @harden() def install(): @@ -374,6 +407,7 @@ def config_changed(): status_set('maintenance', 'Bootstrapping single Ceph MON') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() + install_apparmor_profile() def get_mon_hosts(): From 7fe5c4747a15718aa70642b1e5402e10ee5cde20 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 6 Jul 2016 15:55:47 +0100 Subject: [PATCH 1151/2699] Resync charmhelpers for licensing change The charm-helpers project have re-licensed to Apache 2.0 inline with the agreed licensing approach to intefaces, layers and charms generally. Resync helpers to bring charmhelpers inline with charm codebase. Change-Id: I9ab9a5d8801536e68e3ee672be0cc9accdb5fd9a --- ceph-osd/hooks/charmhelpers/__init__.py | 20 ++- ceph-osd/hooks/charmhelpers/cli/__init__.py | 20 ++- ceph-osd/hooks/charmhelpers/cli/benchmark.py | 20 ++- ceph-osd/hooks/charmhelpers/cli/commands.py | 20 ++- ceph-osd/hooks/charmhelpers/cli/hookenv.py | 20 ++- ceph-osd/hooks/charmhelpers/cli/host.py | 20 ++- ceph-osd/hooks/charmhelpers/cli/unitdata.py | 20 ++- .../hooks/charmhelpers/contrib/__init__.py | 20 ++- .../contrib/charmsupport/__init__.py | 20 ++- .../charmhelpers/contrib/charmsupport/nrpe.py | 20 ++- .../contrib/charmsupport/volumes.py | 20 ++- .../contrib/hardening/__init__.py | 20 ++- .../contrib/hardening/apache/__init__.py | 20 ++- .../hardening/apache/checks/__init__.py | 20 ++- .../contrib/hardening/apache/checks/config.py | 20 ++- .../contrib/hardening/audits/__init__.py | 20 ++- .../contrib/hardening/audits/apache.py | 20 ++- .../contrib/hardening/audits/apt.py | 20 ++- .../contrib/hardening/audits/file.py | 20 ++- .../charmhelpers/contrib/hardening/harden.py | 20 ++- .../contrib/hardening/host/__init__.py | 20 ++- .../contrib/hardening/host/checks/__init__.py | 20 ++- .../contrib/hardening/host/checks/apt.py | 20 ++- .../contrib/hardening/host/checks/limits.py | 20 ++- .../contrib/hardening/host/checks/login.py | 20 ++- .../hardening/host/checks/minimize_access.py | 20 ++- .../contrib/hardening/host/checks/pam.py | 20 ++- .../contrib/hardening/host/checks/profile.py | 20 ++- .../hardening/host/checks/securetty.py | 20 ++- .../hardening/host/checks/suid_sgid.py | 20 ++- .../contrib/hardening/host/checks/sysctl.py | 20 ++- .../contrib/hardening/mysql/__init__.py | 20 ++- .../hardening/mysql/checks/__init__.py | 20 ++- .../contrib/hardening/mysql/checks/config.py | 20 ++- .../contrib/hardening/ssh/__init__.py | 20 ++- .../contrib/hardening/ssh/checks/__init__.py | 20 ++- .../contrib/hardening/ssh/checks/config.py | 20 ++- .../contrib/hardening/templating.py | 20 ++- .../charmhelpers/contrib/hardening/utils.py | 20 ++- .../charmhelpers/contrib/network/__init__.py | 20 ++- .../hooks/charmhelpers/contrib/network/ip.py | 20 ++- .../contrib/openstack/__init__.py | 20 ++- .../contrib/openstack/alternatives.py | 20 ++- .../contrib/openstack/exceptions.py | 15 +++ .../charmhelpers/contrib/openstack/utils.py | 116 +++++++++++++----- .../charmhelpers/contrib/python/__init__.py | 20 ++- .../charmhelpers/contrib/python/packages.py | 26 ++-- .../charmhelpers/contrib/storage/__init__.py | 20 ++- .../contrib/storage/linux/__init__.py | 20 ++- .../contrib/storage/linux/ceph.py | 20 ++- .../contrib/storage/linux/loopback.py | 20 ++- .../charmhelpers/contrib/storage/linux/lvm.py | 20 ++- .../contrib/storage/linux/utils.py | 20 ++- ceph-osd/hooks/charmhelpers/core/__init__.py | 20 ++- .../hooks/charmhelpers/core/decorators.py | 20 ++- ceph-osd/hooks/charmhelpers/core/files.py | 20 ++- ceph-osd/hooks/charmhelpers/core/fstab.py | 20 ++- ceph-osd/hooks/charmhelpers/core/hookenv.py | 22 ++-- ceph-osd/hooks/charmhelpers/core/host.py | 20 ++- ceph-osd/hooks/charmhelpers/core/hugepage.py | 20 ++- ceph-osd/hooks/charmhelpers/core/kernel.py | 20 ++- .../charmhelpers/core/services/__init__.py | 20 ++- .../hooks/charmhelpers/core/services/base.py | 20 ++- .../charmhelpers/core/services/helpers.py | 20 ++- ceph-osd/hooks/charmhelpers/core/strutils.py | 20 ++- ceph-osd/hooks/charmhelpers/core/sysctl.py | 20 ++- .../hooks/charmhelpers/core/templating.py | 20 ++- ceph-osd/hooks/charmhelpers/core/unitdata.py | 21 ++-- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 20 ++- .../hooks/charmhelpers/fetch/archiveurl.py | 20 ++- ceph-osd/hooks/charmhelpers/fetch/bzrurl.py | 20 ++- ceph-osd/hooks/charmhelpers/fetch/giturl.py | 20 ++- ceph-osd/tests/charmhelpers/__init__.py | 20 ++- .../tests/charmhelpers/contrib/__init__.py | 20 ++- .../charmhelpers/contrib/amulet/__init__.py | 20 ++- .../charmhelpers/contrib/amulet/deployment.py | 20 ++- .../charmhelpers/contrib/amulet/utils.py | 20 ++- .../contrib/openstack/__init__.py | 20 ++- .../contrib/openstack/amulet/__init__.py | 20 ++- .../contrib/openstack/amulet/deployment.py | 20 ++- .../contrib/openstack/amulet/utils.py | 20 ++- 81 files changed, 818 insertions(+), 902 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/__init__.py b/ceph-osd/hooks/charmhelpers/__init__.py index f72e7f84..48867880 100644 --- a/ceph-osd/hooks/charmhelpers/__init__.py +++ b/ceph-osd/hooks/charmhelpers/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. diff --git a/ceph-osd/hooks/charmhelpers/cli/__init__.py b/ceph-osd/hooks/charmhelpers/cli/__init__.py index 2d37ab31..389b490f 100644 --- a/ceph-osd/hooks/charmhelpers/cli/__init__.py +++ b/ceph-osd/hooks/charmhelpers/cli/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import inspect import argparse diff --git a/ceph-osd/hooks/charmhelpers/cli/benchmark.py b/ceph-osd/hooks/charmhelpers/cli/benchmark.py index b23c16ce..303af14b 100644 --- a/ceph-osd/hooks/charmhelpers/cli/benchmark.py +++ b/ceph-osd/hooks/charmhelpers/cli/benchmark.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.contrib.benchmark import Benchmark diff --git a/ceph-osd/hooks/charmhelpers/cli/commands.py b/ceph-osd/hooks/charmhelpers/cli/commands.py index 7e91db00..b9310565 100644 --- a/ceph-osd/hooks/charmhelpers/cli/commands.py +++ b/ceph-osd/hooks/charmhelpers/cli/commands.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ This module loads sub-modules into the python runtime so they can be diff --git a/ceph-osd/hooks/charmhelpers/cli/hookenv.py b/ceph-osd/hooks/charmhelpers/cli/hookenv.py index 265c816e..bd72f448 100644 --- a/ceph-osd/hooks/charmhelpers/cli/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/cli/hookenv.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.core import hookenv diff --git a/ceph-osd/hooks/charmhelpers/cli/host.py b/ceph-osd/hooks/charmhelpers/cli/host.py index 58e78d6b..40396849 100644 --- a/ceph-osd/hooks/charmhelpers/cli/host.py +++ b/ceph-osd/hooks/charmhelpers/cli/host.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.core import host diff --git a/ceph-osd/hooks/charmhelpers/cli/unitdata.py b/ceph-osd/hooks/charmhelpers/cli/unitdata.py index d1cd95bf..c5728582 100644 --- a/ceph-osd/hooks/charmhelpers/cli/unitdata.py +++ b/ceph-osd/hooks/charmhelpers/cli/unitdata.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.core import unitdata diff --git a/ceph-osd/hooks/charmhelpers/contrib/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 2f246429..17976fb5 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Compatibility with the nrpe-external-master charm""" # Copyright 2012 Canonical Ltd. diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py index 320961b9..7ea43f08 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. ''' Functions for managing volumes in juju units. One volume is supported per unit. diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/__init__.py index a1335320..30a3e943 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/__init__.py @@ -1,15 +1,13 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py index d1304792..3bc2ebd4 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 8249ca01..51b636f7 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import re diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/__init__.py index 6a7057b3..9bf9c3c6 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. class BaseAudit(object): # NO-QA diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py index cf3c987d..d812948a 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import re import subprocess diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py index e94af031..3dc14e3c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from __future__ import absolute_import # required for external apt import from apt import apt_pkg diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/file.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/file.py index 0fb545a9..257c6351 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/file.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/file.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import grp import os diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py index ac7568d6..b55764cd 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import six diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py index c3bd5985..0e7e409f 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/apt.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/apt.py index 2c221cda..7ce41b00 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/apt.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/apt.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.utils import get_settings from charmhelpers.contrib.hardening.audits.apt import ( diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/limits.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/limits.py index 8ce9dc2b..e94f5ebe 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/limits.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/limits.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import ( DirectoryPermissionAudit, diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/login.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/login.py index d32c4f60..fe2bc6ef 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/login.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/login.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from six import string_types diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py index c471064b..6e64be00 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import ( FilePermissionAudit, diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/pam.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/pam.py index 383fe28e..9b38d5f0 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/pam.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/pam.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from subprocess import ( check_output, diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/profile.py index f7443357..56d65263 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/profile.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/profile.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import TemplatedFile from charmhelpers.contrib.hardening.host import TEMPLATES_DIR diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py index e33c73ca..34cd0217 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import TemplatedFile from charmhelpers.contrib.hardening.host import TEMPLATES_DIR diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py index 0534689b..bcbe3fde 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import subprocess diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py index 4a76d74e..f1ea5813 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import platform diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py index d4f0ec19..1990d851 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py index 3af8b89d..a79f33b7 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import six import subprocess diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py index b85150d5..edaf484b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py index 3fb6ae8d..94e524e2 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py index d2ab7dc9..2174c645 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py index a6743a4d..ff7485c2 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import glob import grp diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/network/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 99d78f2f..b5f457c7 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import glob import re diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py index ef77caf3..1501641e 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. ''' Helper for managing alternatives for file conflict resolution ''' diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/exceptions.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/exceptions.py index ea4eb68e..f85ae4f4 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/exceptions.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/exceptions.py @@ -1,3 +1,18 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + class OSContextError(Exception): """Raised when an error occurs during context generation. diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 53e58424..f4401913 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Common python helper functions used for OpenStack charms. from collections import OrderedDict @@ -222,7 +220,6 @@ } GIT_DEFAULT_BRANCHES = { - 'icehouse': 'icehouse-eol', 'kilo': 'stable/kilo', 'liberty': 'stable/liberty', 'mitaka': 'stable/mitaka', @@ -725,14 +722,15 @@ def git_install_requested(): requirements_dir = None -def git_default_repos(projects): +def git_default_repos(projects_yaml): """ Returns default repos if a default openstack-origin-git value is specified. """ service = service_name() + core_project = service for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): - if projects == default: + if projects_yaml == default: # add the requirements repo first repo = { @@ -742,34 +740,47 @@ def git_default_repos(projects): } repos = [repo] - # neutron and nova charms require some additional repos - if service == 'neutron': - for svc in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas']: + # NOTE(coreycb): This is a temp work-around until the requirements + # repo moves from stable/kilo branch to kilo-eol tag. The core + # repos have already done this. + if default == 'kilo': + branch = 'kilo-eol' + + # neutron-* and nova-* charms require some additional repos + if service in ['neutron-api', 'neutron-gateway', + 'neutron-openvswitch']: + core_project = 'neutron' + for project in ['neutron-fwaas', 'neutron-lbaas', + 'neutron-vpnaas', 'nova']: repo = { - 'name': svc, - 'repository': GIT_DEFAULT_REPOS[svc], + 'name': project, + 'repository': GIT_DEFAULT_REPOS[project], 'branch': branch, } repos.append(repo) - elif service == 'nova': + + elif service in ['nova-cloud-controller', 'nova-compute']: + core_project = 'nova' repo = { 'name': 'neutron', 'repository': GIT_DEFAULT_REPOS['neutron'], 'branch': branch, } repos.append(repo) + elif service == 'openstack-dashboard': + core_project = 'horizon' - # finally add the current service's repo + # finally add the current service's core project repo repo = { - 'name': service, - 'repository': GIT_DEFAULT_REPOS[service], + 'name': core_project, + 'repository': GIT_DEFAULT_REPOS[core_project], 'branch': branch, } repos.append(repo) return yaml.dump(dict(repositories=repos)) - return projects + return projects_yaml def _git_yaml_load(projects_yaml): @@ -829,6 +840,7 @@ def git_clone_and_install(projects_yaml, core_project): pip_install(p, upgrade=True, proxy=http_proxy, venv=os.path.join(parent_dir, 'venv')) + constraints = None for p in projects['repositories']: repo = p['repository'] branch = p['branch'] @@ -840,10 +852,15 @@ def git_clone_and_install(projects_yaml, core_project): parent_dir, http_proxy, update_requirements=False) requirements_dir = repo_dir + constraints = os.path.join(repo_dir, "upper-constraints.txt") + # upper-constraints didn't exist until after icehouse + if not os.path.isfile(constraints): + constraints = None else: repo_dir = _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, - update_requirements=True) + update_requirements=True, + constraints=constraints) os.environ = old_environ @@ -875,7 +892,7 @@ def _git_ensure_key_exists(key, keys): def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, - update_requirements): + update_requirements, constraints=None): """ Clone and install a single git repository. """ @@ -898,9 +915,10 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, juju_log('Installing git repo from dir: {}'.format(repo_dir)) if http_proxy: - pip_install(repo_dir, proxy=http_proxy, venv=venv) + pip_install(repo_dir, proxy=http_proxy, venv=venv, + constraints=constraints) else: - pip_install(repo_dir, venv=venv) + pip_install(repo_dir, venv=venv, constraints=constraints) return repo_dir @@ -980,6 +998,7 @@ def git_generate_systemd_init_files(templates_dir): script generation, which is used by the OpenStack packages. """ for f in os.listdir(templates_dir): + # Create the init script and systemd unit file from the template if f.endswith(".init.in"): init_in_file = f init_file = f[:-8] @@ -1005,10 +1024,47 @@ def git_generate_systemd_init_files(templates_dir): os.remove(init_dest) if os.path.exists(service_dest): os.remove(service_dest) - shutil.move(init_source, init_dest) - shutil.move(service_source, service_dest) + shutil.copyfile(init_source, init_dest) + shutil.copyfile(service_source, service_dest) os.chmod(init_dest, 0o755) + for f in os.listdir(templates_dir): + # If there's a service.in file, use it instead of the generated one + if f.endswith(".service.in"): + service_in_file = f + service_file = f[:-3] + + service_in_source = os.path.join(templates_dir, service_in_file) + service_source = os.path.join(templates_dir, service_file) + service_dest = os.path.join('/lib/systemd/system', service_file) + + shutil.copyfile(service_in_source, service_source) + + if os.path.exists(service_dest): + os.remove(service_dest) + shutil.copyfile(service_source, service_dest) + + for f in os.listdir(templates_dir): + # Generate the systemd unit if there's no existing .service.in + if f.endswith(".init.in"): + init_in_file = f + init_file = f[:-8] + service_in_file = "{}.service.in".format(init_file) + service_file = "{}.service".format(init_file) + + init_in_source = os.path.join(templates_dir, init_in_file) + service_in_source = os.path.join(templates_dir, service_in_file) + service_source = os.path.join(templates_dir, service_file) + service_dest = os.path.join('/lib/systemd/system', service_file) + + if not os.path.exists(service_in_source): + cmd = ['pkgos-gen-systemd-unit', init_in_source] + subprocess.check_call(cmd) + + if os.path.exists(service_dest): + os.remove(service_dest) + shutil.copyfile(service_source, service_dest) + def os_workload_status(configs, required_interfaces, charm_func=None): """ diff --git a/ceph-osd/hooks/charmhelpers/contrib/python/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/python/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/python/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/python/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/hooks/charmhelpers/contrib/python/packages.py b/ceph-osd/hooks/charmhelpers/contrib/python/packages.py index a2411c37..e29bd1bb 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/python/packages.py +++ b/ceph-osd/hooks/charmhelpers/contrib/python/packages.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import subprocess @@ -80,7 +78,8 @@ def pip_install_requirements(requirements, constraints=None, **options): pip_execute(command) -def pip_install(package, fatal=False, upgrade=False, venv=None, **options): +def pip_install(package, fatal=False, upgrade=False, venv=None, + constraints=None, **options): """Install a python package""" if venv: venv_python = os.path.join(venv, 'bin/pip') @@ -95,6 +94,9 @@ def pip_install(package, fatal=False, upgrade=False, venv=None, **options): if upgrade: command.append('--upgrade') + if constraints: + command.extend(['-c', constraints]) + if isinstance(package, list): command.extend(package) else: diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/storage/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index b2484e78..8a9b9486 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Copyright 2012 Canonical Ltd. diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py index 3a3f5146..1d6ae6f0 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import re diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py index 34b5f71a..4719f53c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from subprocess import ( CalledProcessError, diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index 4e35c297..3dc0df68 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import re diff --git a/ceph-osd/hooks/charmhelpers/core/__init__.py b/ceph-osd/hooks/charmhelpers/core/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-osd/hooks/charmhelpers/core/__init__.py +++ b/ceph-osd/hooks/charmhelpers/core/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/hooks/charmhelpers/core/decorators.py b/ceph-osd/hooks/charmhelpers/core/decorators.py index bb05620b..6ad41ee4 100644 --- a/ceph-osd/hooks/charmhelpers/core/decorators.py +++ b/ceph-osd/hooks/charmhelpers/core/decorators.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Copyright 2014 Canonical Ltd. diff --git a/ceph-osd/hooks/charmhelpers/core/files.py b/ceph-osd/hooks/charmhelpers/core/files.py index 0f12d321..fdd82b75 100644 --- a/ceph-osd/hooks/charmhelpers/core/files.py +++ b/ceph-osd/hooks/charmhelpers/core/files.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. __author__ = 'Jorge Niedbalski ' diff --git a/ceph-osd/hooks/charmhelpers/core/fstab.py b/ceph-osd/hooks/charmhelpers/core/fstab.py index 3056fbac..d9fa9152 100644 --- a/ceph-osd/hooks/charmhelpers/core/fstab.py +++ b/ceph-osd/hooks/charmhelpers/core/fstab.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import io import os diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 01321296..48b2b9dc 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. "Interactions with the Juju environment" # Copyright 2013 Canonical Ltd. @@ -1006,4 +1004,4 @@ def network_get_primary_address(binding): :raise: NotImplementedError if run on Juju < 2.0 ''' cmd = ['network-get', '--primary-address', binding] - return subprocess.check_output(cmd).strip() + return subprocess.check_output(cmd).decode('UTF-8').strip() diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index e367e450..35817b06 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Tools for working with the host system""" # Copyright 2012 Canonical Ltd. diff --git a/ceph-osd/hooks/charmhelpers/core/hugepage.py b/ceph-osd/hooks/charmhelpers/core/hugepage.py index a783ad94..54b5b5e2 100644 --- a/ceph-osd/hooks/charmhelpers/core/hugepage.py +++ b/ceph-osd/hooks/charmhelpers/core/hugepage.py @@ -2,19 +2,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import yaml from charmhelpers.core import fstab diff --git a/ceph-osd/hooks/charmhelpers/core/kernel.py b/ceph-osd/hooks/charmhelpers/core/kernel.py index 5dc64952..b166efec 100644 --- a/ceph-osd/hooks/charmhelpers/core/kernel.py +++ b/ceph-osd/hooks/charmhelpers/core/kernel.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. __author__ = "Jorge Niedbalski " diff --git a/ceph-osd/hooks/charmhelpers/core/services/__init__.py b/ceph-osd/hooks/charmhelpers/core/services/__init__.py index 0928158b..61fd074e 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/__init__.py +++ b/ceph-osd/hooks/charmhelpers/core/services/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from .base import * # NOQA from .helpers import * # NOQA diff --git a/ceph-osd/hooks/charmhelpers/core/services/base.py b/ceph-osd/hooks/charmhelpers/core/services/base.py index a42660ca..ca9dc996 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/base.py +++ b/ceph-osd/hooks/charmhelpers/core/services/base.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import json diff --git a/ceph-osd/hooks/charmhelpers/core/services/helpers.py b/ceph-osd/hooks/charmhelpers/core/services/helpers.py index 24237042..3e6e30d2 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-osd/hooks/charmhelpers/core/services/helpers.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import yaml diff --git a/ceph-osd/hooks/charmhelpers/core/strutils.py b/ceph-osd/hooks/charmhelpers/core/strutils.py index 7e3f9693..dd9b9717 100644 --- a/ceph-osd/hooks/charmhelpers/core/strutils.py +++ b/ceph-osd/hooks/charmhelpers/core/strutils.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import six import re diff --git a/ceph-osd/hooks/charmhelpers/core/sysctl.py b/ceph-osd/hooks/charmhelpers/core/sysctl.py index 21cc8ab2..6e413e31 100644 --- a/ceph-osd/hooks/charmhelpers/core/sysctl.py +++ b/ceph-osd/hooks/charmhelpers/core/sysctl.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import yaml diff --git a/ceph-osd/hooks/charmhelpers/core/templating.py b/ceph-osd/hooks/charmhelpers/core/templating.py index d2d8eafe..0a7560ff 100644 --- a/ceph-osd/hooks/charmhelpers/core/templating.py +++ b/ceph-osd/hooks/charmhelpers/core/templating.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os diff --git a/ceph-osd/hooks/charmhelpers/core/unitdata.py b/ceph-osd/hooks/charmhelpers/core/unitdata.py index 338104e0..54ec969f 100644 --- a/ceph-osd/hooks/charmhelpers/core/unitdata.py +++ b/ceph-osd/hooks/charmhelpers/core/unitdata.py @@ -3,20 +3,17 @@ # # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Authors: # Kapil Thangavelu diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 68b0f94d..8f39f2fe 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import importlib from tempfile import NamedTemporaryFile diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index b8e0943d..dd24f9ec 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import hashlib diff --git a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py index b743753e..b3404d85 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os from subprocess import check_call diff --git a/ceph-osd/hooks/charmhelpers/fetch/giturl.py b/ceph-osd/hooks/charmhelpers/fetch/giturl.py index 65ed5319..f708d1ee 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/giturl.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os from subprocess import check_call, CalledProcessError diff --git a/ceph-osd/tests/charmhelpers/__init__.py b/ceph-osd/tests/charmhelpers/__init__.py index f72e7f84..48867880 100644 --- a/ceph-osd/tests/charmhelpers/__init__.py +++ b/ceph-osd/tests/charmhelpers/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. diff --git a/ceph-osd/tests/charmhelpers/contrib/__init__.py b/ceph-osd/tests/charmhelpers/contrib/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-osd/tests/charmhelpers/contrib/__init__.py +++ b/ceph-osd/tests/charmhelpers/contrib/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-osd/tests/charmhelpers/contrib/amulet/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/__init__.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py index d451698d..0146236d 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import amulet import os diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index 7e5c25a9..a39ed4c8 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import io import json diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-osd/tests/charmhelpers/contrib/openstack/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/__init__.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/__init__.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 6b917d0c..f7220f35 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import logging import re diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index ef3bdccf..8040b570 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import amulet import json From dafa5686050a2166a130f8a107fe1734b814e76f Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 6 Jul 2016 15:56:07 +0100 Subject: [PATCH 1152/2699] Resync charmhelpers for licensing change The charm-helpers project have re-licensed to Apache 2.0 inline with the agreed licensing approach to intefaces, layers and charms generally. Resync helpers to bring charmhelpers inline with charm codebase. Change-Id: I2ec6e5f8246687c7de77b5ff0c3f68fb6851b7b9 --- ceph-mon/hooks/charmhelpers/__init__.py | 20 ++--- ceph-mon/hooks/charmhelpers/cli/__init__.py | 20 ++--- ceph-mon/hooks/charmhelpers/cli/benchmark.py | 20 ++--- ceph-mon/hooks/charmhelpers/cli/commands.py | 20 ++--- ceph-mon/hooks/charmhelpers/cli/hookenv.py | 20 ++--- ceph-mon/hooks/charmhelpers/cli/host.py | 20 ++--- ceph-mon/hooks/charmhelpers/cli/unitdata.py | 20 ++--- .../hooks/charmhelpers/contrib/__init__.py | 20 ++--- .../contrib/charmsupport/__init__.py | 20 ++--- .../charmhelpers/contrib/charmsupport/nrpe.py | 20 ++--- .../contrib/charmsupport/volumes.py | 20 ++--- .../contrib/hardening/__init__.py | 20 ++--- .../contrib/hardening/apache/__init__.py | 20 ++--- .../hardening/apache/checks/__init__.py | 20 ++--- .../contrib/hardening/apache/checks/config.py | 20 ++--- .../contrib/hardening/audits/__init__.py | 20 ++--- .../contrib/hardening/audits/apache.py | 20 ++--- .../contrib/hardening/audits/apt.py | 20 ++--- .../contrib/hardening/audits/file.py | 20 ++--- .../charmhelpers/contrib/hardening/harden.py | 20 ++--- .../contrib/hardening/host/__init__.py | 20 ++--- .../contrib/hardening/host/checks/__init__.py | 20 ++--- .../contrib/hardening/host/checks/apt.py | 20 ++--- .../contrib/hardening/host/checks/limits.py | 20 ++--- .../contrib/hardening/host/checks/login.py | 20 ++--- .../hardening/host/checks/minimize_access.py | 20 ++--- .../contrib/hardening/host/checks/pam.py | 20 ++--- .../contrib/hardening/host/checks/profile.py | 20 ++--- .../hardening/host/checks/securetty.py | 20 ++--- .../hardening/host/checks/suid_sgid.py | 20 ++--- .../contrib/hardening/host/checks/sysctl.py | 20 ++--- .../contrib/hardening/mysql/__init__.py | 20 ++--- .../hardening/mysql/checks/__init__.py | 20 ++--- .../contrib/hardening/mysql/checks/config.py | 20 ++--- .../contrib/hardening/ssh/__init__.py | 20 ++--- .../contrib/hardening/ssh/checks/__init__.py | 20 ++--- .../contrib/hardening/ssh/checks/config.py | 20 ++--- .../contrib/hardening/templating.py | 20 ++--- .../charmhelpers/contrib/hardening/utils.py | 20 ++--- .../charmhelpers/contrib/network/__init__.py | 20 ++--- .../hooks/charmhelpers/contrib/network/ip.py | 20 ++--- .../contrib/openstack/__init__.py | 20 ++--- .../contrib/openstack/alternatives.py | 20 ++--- .../contrib/openstack/amulet/__init__.py | 20 ++--- .../contrib/openstack/amulet/deployment.py | 20 ++--- .../contrib/openstack/amulet/utils.py | 20 ++--- .../charmhelpers/contrib/openstack/context.py | 36 ++++---- .../contrib/openstack/exceptions.py | 15 ++++ .../contrib/openstack/files/__init__.py | 20 ++--- .../contrib/openstack/ha/__init__.py | 13 +++ .../contrib/openstack/ha/utils.py | 20 ++--- .../charmhelpers/contrib/openstack/ip.py | 21 ++--- .../charmhelpers/contrib/openstack/neutron.py | 20 ++--- .../contrib/openstack/templates/__init__.py | 20 ++--- .../contrib/openstack/templating.py | 20 ++--- .../charmhelpers/contrib/openstack/utils.py | 86 +++++++++++++++---- .../charmhelpers/contrib/python/__init__.py | 20 ++--- .../charmhelpers/contrib/python/packages.py | 26 +++--- .../charmhelpers/contrib/storage/__init__.py | 20 ++--- .../contrib/storage/linux/__init__.py | 20 ++--- .../contrib/storage/linux/ceph.py | 20 ++--- .../contrib/storage/linux/loopback.py | 20 ++--- .../charmhelpers/contrib/storage/linux/lvm.py | 20 ++--- .../contrib/storage/linux/utils.py | 20 ++--- ceph-mon/hooks/charmhelpers/core/__init__.py | 20 ++--- .../hooks/charmhelpers/core/decorators.py | 20 ++--- ceph-mon/hooks/charmhelpers/core/files.py | 20 ++--- ceph-mon/hooks/charmhelpers/core/fstab.py | 20 ++--- ceph-mon/hooks/charmhelpers/core/hookenv.py | 20 ++--- ceph-mon/hooks/charmhelpers/core/host.py | 20 ++--- ceph-mon/hooks/charmhelpers/core/hugepage.py | 20 ++--- ceph-mon/hooks/charmhelpers/core/kernel.py | 20 ++--- .../charmhelpers/core/services/__init__.py | 20 ++--- .../hooks/charmhelpers/core/services/base.py | 20 ++--- .../charmhelpers/core/services/helpers.py | 20 ++--- ceph-mon/hooks/charmhelpers/core/strutils.py | 20 ++--- ceph-mon/hooks/charmhelpers/core/sysctl.py | 20 ++--- .../hooks/charmhelpers/core/templating.py | 20 ++--- ceph-mon/hooks/charmhelpers/core/unitdata.py | 21 ++--- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 20 ++--- .../hooks/charmhelpers/fetch/archiveurl.py | 20 ++--- ceph-mon/hooks/charmhelpers/fetch/bzrurl.py | 20 ++--- ceph-mon/hooks/charmhelpers/fetch/giturl.py | 20 ++--- .../hooks/charmhelpers/payload/__init__.py | 20 ++--- ceph-mon/hooks/charmhelpers/payload/execd.py | 20 ++--- ceph-mon/tests/charmhelpers/__init__.py | 20 ++--- .../tests/charmhelpers/contrib/__init__.py | 20 ++--- .../charmhelpers/contrib/amulet/__init__.py | 20 ++--- .../charmhelpers/contrib/amulet/deployment.py | 20 ++--- .../charmhelpers/contrib/amulet/utils.py | 20 ++--- .../contrib/openstack/__init__.py | 20 ++--- .../contrib/openstack/amulet/__init__.py | 20 ++--- .../contrib/openstack/amulet/deployment.py | 20 ++--- .../contrib/openstack/amulet/utils.py | 20 ++--- 94 files changed, 927 insertions(+), 1031 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/__init__.py b/ceph-mon/hooks/charmhelpers/__init__.py index f72e7f84..48867880 100644 --- a/ceph-mon/hooks/charmhelpers/__init__.py +++ b/ceph-mon/hooks/charmhelpers/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. diff --git a/ceph-mon/hooks/charmhelpers/cli/__init__.py b/ceph-mon/hooks/charmhelpers/cli/__init__.py index 2d37ab31..389b490f 100644 --- a/ceph-mon/hooks/charmhelpers/cli/__init__.py +++ b/ceph-mon/hooks/charmhelpers/cli/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import inspect import argparse diff --git a/ceph-mon/hooks/charmhelpers/cli/benchmark.py b/ceph-mon/hooks/charmhelpers/cli/benchmark.py index b23c16ce..303af14b 100644 --- a/ceph-mon/hooks/charmhelpers/cli/benchmark.py +++ b/ceph-mon/hooks/charmhelpers/cli/benchmark.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.contrib.benchmark import Benchmark diff --git a/ceph-mon/hooks/charmhelpers/cli/commands.py b/ceph-mon/hooks/charmhelpers/cli/commands.py index 7e91db00..b9310565 100644 --- a/ceph-mon/hooks/charmhelpers/cli/commands.py +++ b/ceph-mon/hooks/charmhelpers/cli/commands.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ This module loads sub-modules into the python runtime so they can be diff --git a/ceph-mon/hooks/charmhelpers/cli/hookenv.py b/ceph-mon/hooks/charmhelpers/cli/hookenv.py index 265c816e..bd72f448 100644 --- a/ceph-mon/hooks/charmhelpers/cli/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/cli/hookenv.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.core import hookenv diff --git a/ceph-mon/hooks/charmhelpers/cli/host.py b/ceph-mon/hooks/charmhelpers/cli/host.py index 58e78d6b..40396849 100644 --- a/ceph-mon/hooks/charmhelpers/cli/host.py +++ b/ceph-mon/hooks/charmhelpers/cli/host.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.core import host diff --git a/ceph-mon/hooks/charmhelpers/cli/unitdata.py b/ceph-mon/hooks/charmhelpers/cli/unitdata.py index d1cd95bf..c5728582 100644 --- a/ceph-mon/hooks/charmhelpers/cli/unitdata.py +++ b/ceph-mon/hooks/charmhelpers/cli/unitdata.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.core import unitdata diff --git a/ceph-mon/hooks/charmhelpers/contrib/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 2f246429..17976fb5 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Compatibility with the nrpe-external-master charm""" # Copyright 2012 Canonical Ltd. diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py index 320961b9..7ea43f08 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. ''' Functions for managing volumes in juju units. One volume is supported per unit. diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/__init__.py index a1335320..30a3e943 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/__init__.py @@ -1,15 +1,13 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py index d1304792..3bc2ebd4 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 8249ca01..51b636f7 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import re diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py index 6a7057b3..9bf9c3c6 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. class BaseAudit(object): # NO-QA diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py index cf3c987d..d812948a 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import re import subprocess diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py index e94af031..3dc14e3c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from __future__ import absolute_import # required for external apt import from apt import apt_pkg diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py index 0fb545a9..257c6351 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import grp import os diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py index ac7568d6..b55764cd 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import six diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py index c3bd5985..0e7e409f 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/apt.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/apt.py index 2c221cda..7ce41b00 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/apt.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/apt.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.utils import get_settings from charmhelpers.contrib.hardening.audits.apt import ( diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/limits.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/limits.py index 8ce9dc2b..e94f5ebe 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/limits.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/limits.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import ( DirectoryPermissionAudit, diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py index d32c4f60..fe2bc6ef 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from six import string_types diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py index c471064b..6e64be00 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import ( FilePermissionAudit, diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/pam.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/pam.py index 383fe28e..9b38d5f0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/pam.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/pam.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from subprocess import ( check_output, diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py index f7443357..56d65263 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import TemplatedFile from charmhelpers.contrib.hardening.host import TEMPLATES_DIR diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py index e33c73ca..34cd0217 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import TemplatedFile from charmhelpers.contrib.hardening.host import TEMPLATES_DIR diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py index 0534689b..bcbe3fde 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import subprocess diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py index 4a76d74e..f1ea5813 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import platform diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py index d4f0ec19..1990d851 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py index 3af8b89d..a79f33b7 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import six import subprocess diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py index b85150d5..edaf484b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py index 3fb6ae8d..94e524e2 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py index d2ab7dc9..2174c645 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py index a6743a4d..ff7485c2 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import glob import grp diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/network/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 99d78f2f..b5f457c7 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import glob import re diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py index ef77caf3..1501641e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. ''' Helper for managing alternatives for file conflict resolution ''' diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 6b917d0c..f7220f35 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import logging import re diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index ef3bdccf..8040b570 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import amulet import json diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 5faa7eda..7cbdc03d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import glob import json @@ -1438,7 +1436,7 @@ def _determine_ctxt(self): :return ctxt: Dictionary of the apparmor profile or None """ if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: - ctxt = {'aa-profile-mode': config('aa-profile-mode')} + ctxt = {'aa_profile_mode': config('aa-profile-mode')} else: ctxt = None return ctxt @@ -1482,10 +1480,10 @@ def setup_aa_profile(self): log("Not enabling apparmor Profile") return self.install_aa_utils() - cmd = ['aa-{}'.format(self.ctxt['aa-profile-mode'])] - cmd.append(self.ctxt['aa-profile']) + cmd = ['aa-{}'.format(self.ctxt['aa_profile_mode'])] + cmd.append(self.ctxt['aa_profile']) log("Setting up the apparmor profile for {} in {} mode." - "".format(self.ctxt['aa-profile'], self.ctxt['aa-profile-mode'])) + "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode'])) try: check_call(cmd) except CalledProcessError as e: @@ -1494,12 +1492,12 @@ def setup_aa_profile(self): # apparmor is yet unaware of the profile and aa-disable aa-profile # fails. If aa-disable learns to read profile files first this can # be removed. - if self.ctxt['aa-profile-mode'] == 'disable': + if self.ctxt['aa_profile_mode'] == 'disable': log("Manually disabling the apparmor profile for {}." - "".format(self.ctxt['aa-profile'])) + "".format(self.ctxt['aa_profile'])) self.manually_disable_aa_profile() return status_set('blocked', "Apparmor profile {} failed to be set to {}." - "".format(self.ctxt['aa-profile'], - self.ctxt['aa-profile-mode'])) + "".format(self.ctxt['aa_profile'], + self.ctxt['aa_profile_mode'])) raise e diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py index ea4eb68e..f85ae4f4 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py @@ -1,3 +1,18 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + class OSContextError(Exception): """Raised when an error occurs during context generation. diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/files/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/files/__init__.py index 75876796..9df5f746 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/files/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/files/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # dummy __init__.py to fool syncer into thinking this is a syncable python # module diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/__init__.py index e69de29b..9b088de8 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py index 2a8a1291..1f5310bb 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Copyright 2016 Canonical Ltd. diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py index 7875b997..0fd3ac25 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py @@ -1,19 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( config, diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py index d057ea6e..03427b49 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Various utilies for dealing with Neutron and the renaming from Quantum. diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/templates/__init__.py index 75876796..9df5f746 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/templates/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/templates/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # dummy __init__.py to fool syncer into thinking this is a syncable python # module diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py index e5e3cb1b..89588951 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 8da5c5ed..f4401913 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Common python helper functions used for OpenStack charms. from collections import OrderedDict @@ -222,7 +220,6 @@ } GIT_DEFAULT_BRANCHES = { - 'icehouse': 'icehouse-eol', 'kilo': 'stable/kilo', 'liberty': 'stable/liberty', 'mitaka': 'stable/mitaka', @@ -743,12 +740,18 @@ def git_default_repos(projects_yaml): } repos = [repo] + # NOTE(coreycb): This is a temp work-around until the requirements + # repo moves from stable/kilo branch to kilo-eol tag. The core + # repos have already done this. + if default == 'kilo': + branch = 'kilo-eol' + # neutron-* and nova-* charms require some additional repos if service in ['neutron-api', 'neutron-gateway', 'neutron-openvswitch']: core_project = 'neutron' for project in ['neutron-fwaas', 'neutron-lbaas', - 'neutron-vpnaas']: + 'neutron-vpnaas', 'nova']: repo = { 'name': project, 'repository': GIT_DEFAULT_REPOS[project], @@ -837,6 +840,7 @@ def git_clone_and_install(projects_yaml, core_project): pip_install(p, upgrade=True, proxy=http_proxy, venv=os.path.join(parent_dir, 'venv')) + constraints = None for p in projects['repositories']: repo = p['repository'] branch = p['branch'] @@ -848,10 +852,15 @@ def git_clone_and_install(projects_yaml, core_project): parent_dir, http_proxy, update_requirements=False) requirements_dir = repo_dir + constraints = os.path.join(repo_dir, "upper-constraints.txt") + # upper-constraints didn't exist until after icehouse + if not os.path.isfile(constraints): + constraints = None else: repo_dir = _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, - update_requirements=True) + update_requirements=True, + constraints=constraints) os.environ = old_environ @@ -883,7 +892,7 @@ def _git_ensure_key_exists(key, keys): def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, - update_requirements): + update_requirements, constraints=None): """ Clone and install a single git repository. """ @@ -906,9 +915,10 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, juju_log('Installing git repo from dir: {}'.format(repo_dir)) if http_proxy: - pip_install(repo_dir, proxy=http_proxy, venv=venv) + pip_install(repo_dir, proxy=http_proxy, venv=venv, + constraints=constraints) else: - pip_install(repo_dir, venv=venv) + pip_install(repo_dir, venv=venv, constraints=constraints) return repo_dir @@ -988,6 +998,7 @@ def git_generate_systemd_init_files(templates_dir): script generation, which is used by the OpenStack packages. """ for f in os.listdir(templates_dir): + # Create the init script and systemd unit file from the template if f.endswith(".init.in"): init_in_file = f init_file = f[:-8] @@ -1013,10 +1024,47 @@ def git_generate_systemd_init_files(templates_dir): os.remove(init_dest) if os.path.exists(service_dest): os.remove(service_dest) - shutil.move(init_source, init_dest) - shutil.move(service_source, service_dest) + shutil.copyfile(init_source, init_dest) + shutil.copyfile(service_source, service_dest) os.chmod(init_dest, 0o755) + for f in os.listdir(templates_dir): + # If there's a service.in file, use it instead of the generated one + if f.endswith(".service.in"): + service_in_file = f + service_file = f[:-3] + + service_in_source = os.path.join(templates_dir, service_in_file) + service_source = os.path.join(templates_dir, service_file) + service_dest = os.path.join('/lib/systemd/system', service_file) + + shutil.copyfile(service_in_source, service_source) + + if os.path.exists(service_dest): + os.remove(service_dest) + shutil.copyfile(service_source, service_dest) + + for f in os.listdir(templates_dir): + # Generate the systemd unit if there's no existing .service.in + if f.endswith(".init.in"): + init_in_file = f + init_file = f[:-8] + service_in_file = "{}.service.in".format(init_file) + service_file = "{}.service".format(init_file) + + init_in_source = os.path.join(templates_dir, init_in_file) + service_in_source = os.path.join(templates_dir, service_in_file) + service_source = os.path.join(templates_dir, service_file) + service_dest = os.path.join('/lib/systemd/system', service_file) + + if not os.path.exists(service_in_source): + cmd = ['pkgos-gen-systemd-unit', init_in_source] + subprocess.check_call(cmd) + + if os.path.exists(service_dest): + os.remove(service_dest) + shutil.copyfile(service_source, service_dest) + def os_workload_status(configs, required_interfaces, charm_func=None): """ diff --git a/ceph-mon/hooks/charmhelpers/contrib/python/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/python/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/python/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/python/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/python/packages.py b/ceph-mon/hooks/charmhelpers/contrib/python/packages.py index a2411c37..e29bd1bb 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/python/packages.py +++ b/ceph-mon/hooks/charmhelpers/contrib/python/packages.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import subprocess @@ -80,7 +78,8 @@ def pip_install_requirements(requirements, constraints=None, **options): pip_execute(command) -def pip_install(package, fatal=False, upgrade=False, venv=None, **options): +def pip_install(package, fatal=False, upgrade=False, venv=None, + constraints=None, **options): """Install a python package""" if venv: venv_python = os.path.join(venv, 'bin/pip') @@ -95,6 +94,9 @@ def pip_install(package, fatal=False, upgrade=False, venv=None, **options): if upgrade: command.append('--upgrade') + if constraints: + command.extend(['-c', constraints]) + if isinstance(package, list): command.extend(package) else: diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/storage/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index b2484e78..8a9b9486 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Copyright 2012 Canonical Ltd. diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py index 3a3f5146..1d6ae6f0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import re diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py index 34b5f71a..4719f53c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from subprocess import ( CalledProcessError, diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index 4e35c297..3dc0df68 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import re diff --git a/ceph-mon/hooks/charmhelpers/core/__init__.py b/ceph-mon/hooks/charmhelpers/core/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-mon/hooks/charmhelpers/core/__init__.py +++ b/ceph-mon/hooks/charmhelpers/core/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/core/decorators.py b/ceph-mon/hooks/charmhelpers/core/decorators.py index bb05620b..6ad41ee4 100644 --- a/ceph-mon/hooks/charmhelpers/core/decorators.py +++ b/ceph-mon/hooks/charmhelpers/core/decorators.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Copyright 2014 Canonical Ltd. diff --git a/ceph-mon/hooks/charmhelpers/core/files.py b/ceph-mon/hooks/charmhelpers/core/files.py index 0f12d321..fdd82b75 100644 --- a/ceph-mon/hooks/charmhelpers/core/files.py +++ b/ceph-mon/hooks/charmhelpers/core/files.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. __author__ = 'Jorge Niedbalski ' diff --git a/ceph-mon/hooks/charmhelpers/core/fstab.py b/ceph-mon/hooks/charmhelpers/core/fstab.py index 3056fbac..d9fa9152 100644 --- a/ceph-mon/hooks/charmhelpers/core/fstab.py +++ b/ceph-mon/hooks/charmhelpers/core/fstab.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import io import os diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index db117f9e..48b2b9dc 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. "Interactions with the Juju environment" # Copyright 2013 Canonical Ltd. diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index e367e450..35817b06 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Tools for working with the host system""" # Copyright 2012 Canonical Ltd. diff --git a/ceph-mon/hooks/charmhelpers/core/hugepage.py b/ceph-mon/hooks/charmhelpers/core/hugepage.py index a783ad94..54b5b5e2 100644 --- a/ceph-mon/hooks/charmhelpers/core/hugepage.py +++ b/ceph-mon/hooks/charmhelpers/core/hugepage.py @@ -2,19 +2,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import yaml from charmhelpers.core import fstab diff --git a/ceph-mon/hooks/charmhelpers/core/kernel.py b/ceph-mon/hooks/charmhelpers/core/kernel.py index 5dc64952..b166efec 100644 --- a/ceph-mon/hooks/charmhelpers/core/kernel.py +++ b/ceph-mon/hooks/charmhelpers/core/kernel.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. __author__ = "Jorge Niedbalski " diff --git a/ceph-mon/hooks/charmhelpers/core/services/__init__.py b/ceph-mon/hooks/charmhelpers/core/services/__init__.py index 0928158b..61fd074e 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/__init__.py +++ b/ceph-mon/hooks/charmhelpers/core/services/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from .base import * # NOQA from .helpers import * # NOQA diff --git a/ceph-mon/hooks/charmhelpers/core/services/base.py b/ceph-mon/hooks/charmhelpers/core/services/base.py index a42660ca..ca9dc996 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/base.py +++ b/ceph-mon/hooks/charmhelpers/core/services/base.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import json diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py index 24237042..3e6e30d2 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-mon/hooks/charmhelpers/core/services/helpers.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import yaml diff --git a/ceph-mon/hooks/charmhelpers/core/strutils.py b/ceph-mon/hooks/charmhelpers/core/strutils.py index 7e3f9693..dd9b9717 100644 --- a/ceph-mon/hooks/charmhelpers/core/strutils.py +++ b/ceph-mon/hooks/charmhelpers/core/strutils.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import six import re diff --git a/ceph-mon/hooks/charmhelpers/core/sysctl.py b/ceph-mon/hooks/charmhelpers/core/sysctl.py index 21cc8ab2..6e413e31 100644 --- a/ceph-mon/hooks/charmhelpers/core/sysctl.py +++ b/ceph-mon/hooks/charmhelpers/core/sysctl.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import yaml diff --git a/ceph-mon/hooks/charmhelpers/core/templating.py b/ceph-mon/hooks/charmhelpers/core/templating.py index d2d8eafe..0a7560ff 100644 --- a/ceph-mon/hooks/charmhelpers/core/templating.py +++ b/ceph-mon/hooks/charmhelpers/core/templating.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os diff --git a/ceph-mon/hooks/charmhelpers/core/unitdata.py b/ceph-mon/hooks/charmhelpers/core/unitdata.py index 338104e0..54ec969f 100644 --- a/ceph-mon/hooks/charmhelpers/core/unitdata.py +++ b/ceph-mon/hooks/charmhelpers/core/unitdata.py @@ -3,20 +3,17 @@ # # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Authors: # Kapil Thangavelu diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 68b0f94d..8f39f2fe 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import importlib from tempfile import NamedTemporaryFile diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index b8e0943d..dd24f9ec 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import hashlib diff --git a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py index b743753e..b3404d85 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os from subprocess import check_call diff --git a/ceph-mon/hooks/charmhelpers/fetch/giturl.py b/ceph-mon/hooks/charmhelpers/fetch/giturl.py index 65ed5319..f708d1ee 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/giturl.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os from subprocess import check_call, CalledProcessError diff --git a/ceph-mon/hooks/charmhelpers/payload/__init__.py b/ceph-mon/hooks/charmhelpers/payload/__init__.py index e6f42497..ee55cb3d 100644 --- a/ceph-mon/hooks/charmhelpers/payload/__init__.py +++ b/ceph-mon/hooks/charmhelpers/payload/__init__.py @@ -1,17 +1,15 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. "Tools for working with files injected into a charm just before deployment." diff --git a/ceph-mon/hooks/charmhelpers/payload/execd.py b/ceph-mon/hooks/charmhelpers/payload/execd.py index 4d4d81a6..0c42090f 100644 --- a/ceph-mon/hooks/charmhelpers/payload/execd.py +++ b/ceph-mon/hooks/charmhelpers/payload/execd.py @@ -2,19 +2,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import sys diff --git a/ceph-mon/tests/charmhelpers/__init__.py b/ceph-mon/tests/charmhelpers/__init__.py index f72e7f84..48867880 100644 --- a/ceph-mon/tests/charmhelpers/__init__.py +++ b/ceph-mon/tests/charmhelpers/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. diff --git a/ceph-mon/tests/charmhelpers/contrib/__init__.py b/ceph-mon/tests/charmhelpers/contrib/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-mon/tests/charmhelpers/contrib/__init__.py +++ b/ceph-mon/tests/charmhelpers/contrib/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-mon/tests/charmhelpers/contrib/amulet/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/__init__.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py index d451698d..0146236d 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import amulet import os diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index 7e5c25a9..a39ed4c8 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import io import json diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-mon/tests/charmhelpers/contrib/openstack/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/__init__.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/__init__.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 6b917d0c..f7220f35 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import logging import re diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index ef3bdccf..8040b570 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import amulet import json From cfd242d7916868674f44efa576a5c8691c27565a Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 6 Jul 2016 15:56:26 +0100 Subject: [PATCH 1153/2699] Resync charmhelpers for licensing change The charm-helpers project have re-licensed to Apache 2.0 inline with the agreed licensing approach to intefaces, layers and charms generally. Resync helpers to bring charmhelpers inline with charm codebase. Change-Id: I52581e9996fa2ef21672148ca527621e3b4f149f --- ceph-radosgw/hooks/charmhelpers/__init__.py | 20 ++- .../hooks/charmhelpers/cli/__init__.py | 20 ++- .../hooks/charmhelpers/cli/benchmark.py | 20 ++- .../hooks/charmhelpers/cli/commands.py | 20 ++- .../hooks/charmhelpers/cli/hookenv.py | 20 ++- ceph-radosgw/hooks/charmhelpers/cli/host.py | 20 ++- .../hooks/charmhelpers/cli/unitdata.py | 20 ++- .../hooks/charmhelpers/contrib/__init__.py | 20 ++- .../contrib/charmsupport/__init__.py | 20 ++- .../charmhelpers/contrib/charmsupport/nrpe.py | 20 ++- .../contrib/charmsupport/volumes.py | 20 ++- .../contrib/hahelpers/__init__.py | 20 ++- .../charmhelpers/contrib/hahelpers/apache.py | 20 ++- .../charmhelpers/contrib/hahelpers/cluster.py | 26 ++-- .../contrib/hardening/__init__.py | 20 ++- .../contrib/hardening/apache/__init__.py | 20 ++- .../hardening/apache/checks/__init__.py | 20 ++- .../contrib/hardening/apache/checks/config.py | 20 ++- .../contrib/hardening/audits/__init__.py | 20 ++- .../contrib/hardening/audits/apache.py | 20 ++- .../contrib/hardening/audits/apt.py | 20 ++- .../contrib/hardening/audits/file.py | 20 ++- .../charmhelpers/contrib/hardening/harden.py | 20 ++- .../contrib/hardening/host/__init__.py | 20 ++- .../contrib/hardening/host/checks/__init__.py | 20 ++- .../contrib/hardening/host/checks/apt.py | 20 ++- .../contrib/hardening/host/checks/limits.py | 20 ++- .../contrib/hardening/host/checks/login.py | 20 ++- .../hardening/host/checks/minimize_access.py | 20 ++- .../contrib/hardening/host/checks/pam.py | 20 ++- .../contrib/hardening/host/checks/profile.py | 20 ++- .../hardening/host/checks/securetty.py | 20 ++- .../hardening/host/checks/suid_sgid.py | 20 ++- .../contrib/hardening/host/checks/sysctl.py | 20 ++- .../contrib/hardening/mysql/__init__.py | 20 ++- .../hardening/mysql/checks/__init__.py | 20 ++- .../contrib/hardening/mysql/checks/config.py | 20 ++- .../contrib/hardening/ssh/__init__.py | 20 ++- .../contrib/hardening/ssh/checks/__init__.py | 20 ++- .../contrib/hardening/ssh/checks/config.py | 20 ++- .../contrib/hardening/templating.py | 20 ++- .../charmhelpers/contrib/hardening/utils.py | 20 ++- .../charmhelpers/contrib/network/__init__.py | 20 ++- .../hooks/charmhelpers/contrib/network/ip.py | 20 ++- .../contrib/openstack/__init__.py | 20 ++- .../contrib/openstack/alternatives.py | 20 ++- .../contrib/openstack/amulet/__init__.py | 20 ++- .../contrib/openstack/amulet/deployment.py | 20 ++- .../contrib/openstack/amulet/utils.py | 20 ++- .../charmhelpers/contrib/openstack/context.py | 36 +++--- .../contrib/openstack/exceptions.py | 15 +++ .../contrib/openstack/files/__init__.py | 20 ++- .../contrib/openstack/ha/__init__.py | 13 ++ .../contrib/openstack/ha/utils.py | 41 +++++-- .../charmhelpers/contrib/openstack/ip.py | 21 ++-- .../charmhelpers/contrib/openstack/neutron.py | 20 ++- .../contrib/openstack/templates/__init__.py | 20 ++- .../contrib/openstack/templating.py | 20 ++- .../charmhelpers/contrib/openstack/utils.py | 116 +++++++++++++----- .../charmhelpers/contrib/python/__init__.py | 20 ++- .../charmhelpers/contrib/python/packages.py | 26 ++-- .../charmhelpers/contrib/storage/__init__.py | 20 ++- .../contrib/storage/linux/__init__.py | 20 ++- .../contrib/storage/linux/ceph.py | 20 ++- .../contrib/storage/linux/loopback.py | 20 ++- .../charmhelpers/contrib/storage/linux/lvm.py | 20 ++- .../contrib/storage/linux/utils.py | 20 ++- .../hooks/charmhelpers/core/__init__.py | 20 ++- .../hooks/charmhelpers/core/decorators.py | 20 ++- ceph-radosgw/hooks/charmhelpers/core/files.py | 20 ++- ceph-radosgw/hooks/charmhelpers/core/fstab.py | 20 ++- .../hooks/charmhelpers/core/hookenv.py | 22 ++-- ceph-radosgw/hooks/charmhelpers/core/host.py | 20 ++- .../hooks/charmhelpers/core/hugepage.py | 20 ++- .../hooks/charmhelpers/core/kernel.py | 20 ++- .../charmhelpers/core/services/__init__.py | 20 ++- .../hooks/charmhelpers/core/services/base.py | 20 ++- .../charmhelpers/core/services/helpers.py | 20 ++- .../hooks/charmhelpers/core/strutils.py | 20 ++- .../hooks/charmhelpers/core/sysctl.py | 20 ++- .../hooks/charmhelpers/core/templating.py | 20 ++- .../hooks/charmhelpers/core/unitdata.py | 21 ++-- .../hooks/charmhelpers/fetch/__init__.py | 20 ++- .../hooks/charmhelpers/fetch/archiveurl.py | 20 ++- .../hooks/charmhelpers/fetch/bzrurl.py | 20 ++- .../hooks/charmhelpers/fetch/giturl.py | 20 ++- .../hooks/charmhelpers/payload/__init__.py | 20 ++- .../hooks/charmhelpers/payload/execd.py | 20 ++- ceph-radosgw/tests/charmhelpers/__init__.py | 20 ++- .../tests/charmhelpers/contrib/__init__.py | 20 ++- .../charmhelpers/contrib/amulet/__init__.py | 20 ++- .../charmhelpers/contrib/amulet/deployment.py | 20 ++- .../charmhelpers/contrib/amulet/utils.py | 20 ++- .../contrib/openstack/__init__.py | 20 ++- .../contrib/openstack/amulet/__init__.py | 20 ++- .../contrib/openstack/amulet/deployment.py | 20 ++- .../contrib/openstack/amulet/utils.py | 20 ++- 97 files changed, 997 insertions(+), 1080 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/__init__.py b/ceph-radosgw/hooks/charmhelpers/__init__.py index f72e7f84..48867880 100644 --- a/ceph-radosgw/hooks/charmhelpers/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. diff --git a/ceph-radosgw/hooks/charmhelpers/cli/__init__.py b/ceph-radosgw/hooks/charmhelpers/cli/__init__.py index 2d37ab31..389b490f 100644 --- a/ceph-radosgw/hooks/charmhelpers/cli/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/cli/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import inspect import argparse diff --git a/ceph-radosgw/hooks/charmhelpers/cli/benchmark.py b/ceph-radosgw/hooks/charmhelpers/cli/benchmark.py index b23c16ce..303af14b 100644 --- a/ceph-radosgw/hooks/charmhelpers/cli/benchmark.py +++ b/ceph-radosgw/hooks/charmhelpers/cli/benchmark.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.contrib.benchmark import Benchmark diff --git a/ceph-radosgw/hooks/charmhelpers/cli/commands.py b/ceph-radosgw/hooks/charmhelpers/cli/commands.py index 7e91db00..b9310565 100644 --- a/ceph-radosgw/hooks/charmhelpers/cli/commands.py +++ b/ceph-radosgw/hooks/charmhelpers/cli/commands.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ This module loads sub-modules into the python runtime so they can be diff --git a/ceph-radosgw/hooks/charmhelpers/cli/hookenv.py b/ceph-radosgw/hooks/charmhelpers/cli/hookenv.py index 265c816e..bd72f448 100644 --- a/ceph-radosgw/hooks/charmhelpers/cli/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/cli/hookenv.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.core import hookenv diff --git a/ceph-radosgw/hooks/charmhelpers/cli/host.py b/ceph-radosgw/hooks/charmhelpers/cli/host.py index 58e78d6b..40396849 100644 --- a/ceph-radosgw/hooks/charmhelpers/cli/host.py +++ b/ceph-radosgw/hooks/charmhelpers/cli/host.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.core import host diff --git a/ceph-radosgw/hooks/charmhelpers/cli/unitdata.py b/ceph-radosgw/hooks/charmhelpers/cli/unitdata.py index d1cd95bf..c5728582 100644 --- a/ceph-radosgw/hooks/charmhelpers/cli/unitdata.py +++ b/ceph-radosgw/hooks/charmhelpers/cli/unitdata.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.core import unitdata diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 2f246429..17976fb5 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Compatibility with the nrpe-external-master charm""" # Copyright 2012 Canonical Ltd. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/volumes.py index 320961b9..7ea43f08 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. ''' Functions for managing volumes in juju units. One volume is supported per unit. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py index 00917195..3313abac 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Copyright 2012 Canonical Ltd. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index 92325a96..e02350e0 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Copyright 2012 Canonical Ltd. @@ -280,14 +278,14 @@ def get_hacluster_config(exclude_keys=None): for initiating a relation to hacluster: ha-bindiface, ha-mcastport, vip, os-internal-hostname, - os-admin-hostname, os-public-hostname + os-admin-hostname, os-public-hostname, os-access-hostname param: exclude_keys: list of setting key(s) to be excluded. returns: dict: A dict containing settings keyed by setting name. raises: HAIncompleteConfig if settings are missing or incorrect. ''' settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname', - 'os-admin-hostname', 'os-public-hostname'] + 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname'] conf = {} for setting in settings: if exclude_keys and setting in exclude_keys: @@ -324,7 +322,7 @@ def valid_hacluster_config(): # If dns-ha then one of os-*-hostname must be set if dns: dns_settings = ['os-internal-hostname', 'os-admin-hostname', - 'os-public-hostname'] + 'os-public-hostname', 'os-access-hostname'] # At this point it is unknown if one or all of the possible # network spaces are in HA. Validate at least one is set which is # the minimum required. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/__init__.py index a1335320..30a3e943 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/__init__.py @@ -1,15 +1,13 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py index d1304792..3bc2ebd4 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 8249ca01..51b636f7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import re diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/__init__.py index 6a7057b3..9bf9c3c6 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. class BaseAudit(object): # NO-QA diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py index cf3c987d..d812948a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import re import subprocess diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py index e94af031..3dc14e3c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from __future__ import absolute_import # required for external apt import from apt import apt_pkg diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/file.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/file.py index 0fb545a9..257c6351 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/file.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/file.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import grp import os diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py index ac7568d6..b55764cd 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import six diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py index c3bd5985..0e7e409f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/apt.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/apt.py index 2c221cda..7ce41b00 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/apt.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/apt.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.utils import get_settings from charmhelpers.contrib.hardening.audits.apt import ( diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/limits.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/limits.py index 8ce9dc2b..e94f5ebe 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/limits.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/limits.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import ( DirectoryPermissionAudit, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/login.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/login.py index d32c4f60..fe2bc6ef 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/login.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/login.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from six import string_types diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py index c471064b..6e64be00 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import ( FilePermissionAudit, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/pam.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/pam.py index 383fe28e..9b38d5f0 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/pam.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/pam.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from subprocess import ( check_output, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/profile.py index f7443357..56d65263 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/profile.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/profile.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import TemplatedFile from charmhelpers.contrib.hardening.host import TEMPLATES_DIR diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py index e33c73ca..34cd0217 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import TemplatedFile from charmhelpers.contrib.hardening.host import TEMPLATES_DIR diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py index 0534689b..bcbe3fde 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import subprocess diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py index 4a76d74e..f1ea5813 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import platform diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py index d4f0ec19..1990d851 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py index 3af8b89d..a79f33b7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import six import subprocess diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py index b85150d5..edaf484b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py index 3fb6ae8d..94e524e2 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py index d2ab7dc9..2174c645 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py index a6743a4d..ff7485c2 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import glob import grp diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index 99d78f2f..b5f457c7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import glob import re diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py index ef77caf3..1501641e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. ''' Helper for managing alternatives for file conflict resolution ''' diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 6b917d0c..f7220f35 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import logging import re diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index ef3bdccf..8040b570 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import amulet import json diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 5faa7eda..7cbdc03d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import glob import json @@ -1438,7 +1436,7 @@ def _determine_ctxt(self): :return ctxt: Dictionary of the apparmor profile or None """ if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: - ctxt = {'aa-profile-mode': config('aa-profile-mode')} + ctxt = {'aa_profile_mode': config('aa-profile-mode')} else: ctxt = None return ctxt @@ -1482,10 +1480,10 @@ def setup_aa_profile(self): log("Not enabling apparmor Profile") return self.install_aa_utils() - cmd = ['aa-{}'.format(self.ctxt['aa-profile-mode'])] - cmd.append(self.ctxt['aa-profile']) + cmd = ['aa-{}'.format(self.ctxt['aa_profile_mode'])] + cmd.append(self.ctxt['aa_profile']) log("Setting up the apparmor profile for {} in {} mode." - "".format(self.ctxt['aa-profile'], self.ctxt['aa-profile-mode'])) + "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode'])) try: check_call(cmd) except CalledProcessError as e: @@ -1494,12 +1492,12 @@ def setup_aa_profile(self): # apparmor is yet unaware of the profile and aa-disable aa-profile # fails. If aa-disable learns to read profile files first this can # be removed. - if self.ctxt['aa-profile-mode'] == 'disable': + if self.ctxt['aa_profile_mode'] == 'disable': log("Manually disabling the apparmor profile for {}." - "".format(self.ctxt['aa-profile'])) + "".format(self.ctxt['aa_profile'])) self.manually_disable_aa_profile() return status_set('blocked', "Apparmor profile {} failed to be set to {}." - "".format(self.ctxt['aa-profile'], - self.ctxt['aa-profile-mode'])) + "".format(self.ctxt['aa_profile'], + self.ctxt['aa_profile_mode'])) raise e diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/exceptions.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/exceptions.py index ea4eb68e..f85ae4f4 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/exceptions.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/exceptions.py @@ -1,3 +1,18 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + class OSContextError(Exception): """Raised when an error occurs during context generation. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/__init__.py index 75876796..9df5f746 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # dummy __init__.py to fool syncer into thinking this is a syncable python # module diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/__init__.py index e69de29b..9b088de8 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py index 34064237..1f5310bb 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Copyright 2016 Canonical Ltd. @@ -36,6 +34,10 @@ DEBUG, ) +from charmhelpers.core.host import ( + lsb_release +) + from charmhelpers.contrib.openstack.ip import ( resolve_address, ) @@ -63,8 +65,11 @@ def update_dns_ha_resource_params(resources, resource_params, DNS HA """ + # Validate the charm environment for DNS HA + assert_charm_supports_dns_ha() + settings = ['os-admin-hostname', 'os-internal-hostname', - 'os-public-hostname'] + 'os-public-hostname', 'os-access-hostname'] # Check which DNS settings are set and update dictionaries hostname_group = [] @@ -109,3 +114,15 @@ def update_dns_ha_resource_params(resources, resource_params, msg = 'DNS HA: Hostname group has no members.' status_set('blocked', msg) raise DNSHAException(msg) + + +def assert_charm_supports_dns_ha(): + """Validate prerequisites for DNS HA + The MAAS client is only available on Xenial or greater + """ + if lsb_release().get('DISTRIB_RELEASE') < '16.04': + msg = ('DNS HA is only supported on 16.04 and greater ' + 'versions of Ubuntu.') + status_set('blocked', msg) + raise DNSHAException(msg) + return True diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index 7875b997..0fd3ac25 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -1,19 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( config, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index d057ea6e..03427b49 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Various utilies for dealing with Neutron and the renaming from Quantum. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/__init__.py index 75876796..9df5f746 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # dummy __init__.py to fool syncer into thinking this is a syncable python # module diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py index e5e3cb1b..89588951 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 53e58424..f4401913 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Common python helper functions used for OpenStack charms. from collections import OrderedDict @@ -222,7 +220,6 @@ } GIT_DEFAULT_BRANCHES = { - 'icehouse': 'icehouse-eol', 'kilo': 'stable/kilo', 'liberty': 'stable/liberty', 'mitaka': 'stable/mitaka', @@ -725,14 +722,15 @@ def git_install_requested(): requirements_dir = None -def git_default_repos(projects): +def git_default_repos(projects_yaml): """ Returns default repos if a default openstack-origin-git value is specified. """ service = service_name() + core_project = service for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): - if projects == default: + if projects_yaml == default: # add the requirements repo first repo = { @@ -742,34 +740,47 @@ def git_default_repos(projects): } repos = [repo] - # neutron and nova charms require some additional repos - if service == 'neutron': - for svc in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas']: + # NOTE(coreycb): This is a temp work-around until the requirements + # repo moves from stable/kilo branch to kilo-eol tag. The core + # repos have already done this. + if default == 'kilo': + branch = 'kilo-eol' + + # neutron-* and nova-* charms require some additional repos + if service in ['neutron-api', 'neutron-gateway', + 'neutron-openvswitch']: + core_project = 'neutron' + for project in ['neutron-fwaas', 'neutron-lbaas', + 'neutron-vpnaas', 'nova']: repo = { - 'name': svc, - 'repository': GIT_DEFAULT_REPOS[svc], + 'name': project, + 'repository': GIT_DEFAULT_REPOS[project], 'branch': branch, } repos.append(repo) - elif service == 'nova': + + elif service in ['nova-cloud-controller', 'nova-compute']: + core_project = 'nova' repo = { 'name': 'neutron', 'repository': GIT_DEFAULT_REPOS['neutron'], 'branch': branch, } repos.append(repo) + elif service == 'openstack-dashboard': + core_project = 'horizon' - # finally add the current service's repo + # finally add the current service's core project repo repo = { - 'name': service, - 'repository': GIT_DEFAULT_REPOS[service], + 'name': core_project, + 'repository': GIT_DEFAULT_REPOS[core_project], 'branch': branch, } repos.append(repo) return yaml.dump(dict(repositories=repos)) - return projects + return projects_yaml def _git_yaml_load(projects_yaml): @@ -829,6 +840,7 @@ def git_clone_and_install(projects_yaml, core_project): pip_install(p, upgrade=True, proxy=http_proxy, venv=os.path.join(parent_dir, 'venv')) + constraints = None for p in projects['repositories']: repo = p['repository'] branch = p['branch'] @@ -840,10 +852,15 @@ def git_clone_and_install(projects_yaml, core_project): parent_dir, http_proxy, update_requirements=False) requirements_dir = repo_dir + constraints = os.path.join(repo_dir, "upper-constraints.txt") + # upper-constraints didn't exist until after icehouse + if not os.path.isfile(constraints): + constraints = None else: repo_dir = _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, - update_requirements=True) + update_requirements=True, + constraints=constraints) os.environ = old_environ @@ -875,7 +892,7 @@ def _git_ensure_key_exists(key, keys): def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, - update_requirements): + update_requirements, constraints=None): """ Clone and install a single git repository. """ @@ -898,9 +915,10 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, juju_log('Installing git repo from dir: {}'.format(repo_dir)) if http_proxy: - pip_install(repo_dir, proxy=http_proxy, venv=venv) + pip_install(repo_dir, proxy=http_proxy, venv=venv, + constraints=constraints) else: - pip_install(repo_dir, venv=venv) + pip_install(repo_dir, venv=venv, constraints=constraints) return repo_dir @@ -980,6 +998,7 @@ def git_generate_systemd_init_files(templates_dir): script generation, which is used by the OpenStack packages. """ for f in os.listdir(templates_dir): + # Create the init script and systemd unit file from the template if f.endswith(".init.in"): init_in_file = f init_file = f[:-8] @@ -1005,10 +1024,47 @@ def git_generate_systemd_init_files(templates_dir): os.remove(init_dest) if os.path.exists(service_dest): os.remove(service_dest) - shutil.move(init_source, init_dest) - shutil.move(service_source, service_dest) + shutil.copyfile(init_source, init_dest) + shutil.copyfile(service_source, service_dest) os.chmod(init_dest, 0o755) + for f in os.listdir(templates_dir): + # If there's a service.in file, use it instead of the generated one + if f.endswith(".service.in"): + service_in_file = f + service_file = f[:-3] + + service_in_source = os.path.join(templates_dir, service_in_file) + service_source = os.path.join(templates_dir, service_file) + service_dest = os.path.join('/lib/systemd/system', service_file) + + shutil.copyfile(service_in_source, service_source) + + if os.path.exists(service_dest): + os.remove(service_dest) + shutil.copyfile(service_source, service_dest) + + for f in os.listdir(templates_dir): + # Generate the systemd unit if there's no existing .service.in + if f.endswith(".init.in"): + init_in_file = f + init_file = f[:-8] + service_in_file = "{}.service.in".format(init_file) + service_file = "{}.service".format(init_file) + + init_in_source = os.path.join(templates_dir, init_in_file) + service_in_source = os.path.join(templates_dir, service_in_file) + service_source = os.path.join(templates_dir, service_file) + service_dest = os.path.join('/lib/systemd/system', service_file) + + if not os.path.exists(service_in_source): + cmd = ['pkgos-gen-systemd-unit', init_in_source] + subprocess.check_call(cmd) + + if os.path.exists(service_dest): + os.remove(service_dest) + shutil.copyfile(service_source, service_dest) + def os_workload_status(configs, required_interfaces, charm_func=None): """ diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/python/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/python/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/python/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py index a2411c37..e29bd1bb 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import subprocess @@ -80,7 +78,8 @@ def pip_install_requirements(requirements, constraints=None, **options): pip_execute(command) -def pip_install(package, fatal=False, upgrade=False, venv=None, **options): +def pip_install(package, fatal=False, upgrade=False, venv=None, + constraints=None, **options): """Install a python package""" if venv: venv_python = os.path.join(venv, 'bin/pip') @@ -95,6 +94,9 @@ def pip_install(package, fatal=False, upgrade=False, venv=None, **options): if upgrade: command.append('--upgrade') + if constraints: + command.extend(['-c', constraints]) + if isinstance(package, list): command.extend(package) else: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index b2484e78..8a9b9486 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Copyright 2012 Canonical Ltd. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py index 3a3f5146..1d6ae6f0 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import re diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py index 34b5f71a..4719f53c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from subprocess import ( CalledProcessError, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py index 4e35c297..3dc0df68 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import re diff --git a/ceph-radosgw/hooks/charmhelpers/core/__init__.py b/ceph-radosgw/hooks/charmhelpers/core/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/core/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/hooks/charmhelpers/core/decorators.py b/ceph-radosgw/hooks/charmhelpers/core/decorators.py index bb05620b..6ad41ee4 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/decorators.py +++ b/ceph-radosgw/hooks/charmhelpers/core/decorators.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Copyright 2014 Canonical Ltd. diff --git a/ceph-radosgw/hooks/charmhelpers/core/files.py b/ceph-radosgw/hooks/charmhelpers/core/files.py index 0f12d321..fdd82b75 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/files.py +++ b/ceph-radosgw/hooks/charmhelpers/core/files.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. __author__ = 'Jorge Niedbalski ' diff --git a/ceph-radosgw/hooks/charmhelpers/core/fstab.py b/ceph-radosgw/hooks/charmhelpers/core/fstab.py index 3056fbac..d9fa9152 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/fstab.py +++ b/ceph-radosgw/hooks/charmhelpers/core/fstab.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import io import os diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 01321296..48b2b9dc 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. "Interactions with the Juju environment" # Copyright 2013 Canonical Ltd. @@ -1006,4 +1004,4 @@ def network_get_primary_address(binding): :raise: NotImplementedError if run on Juju < 2.0 ''' cmd = ['network-get', '--primary-address', binding] - return subprocess.check_output(cmd).strip() + return subprocess.check_output(cmd).decode('UTF-8').strip() diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index e367e450..35817b06 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Tools for working with the host system""" # Copyright 2012 Canonical Ltd. diff --git a/ceph-radosgw/hooks/charmhelpers/core/hugepage.py b/ceph-radosgw/hooks/charmhelpers/core/hugepage.py index a783ad94..54b5b5e2 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hugepage.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hugepage.py @@ -2,19 +2,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import yaml from charmhelpers.core import fstab diff --git a/ceph-radosgw/hooks/charmhelpers/core/kernel.py b/ceph-radosgw/hooks/charmhelpers/core/kernel.py index 5dc64952..b166efec 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/kernel.py +++ b/ceph-radosgw/hooks/charmhelpers/core/kernel.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. __author__ = "Jorge Niedbalski " diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/__init__.py b/ceph-radosgw/hooks/charmhelpers/core/services/__init__.py index 0928158b..61fd074e 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from .base import * # NOQA from .helpers import * # NOQA diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/base.py b/ceph-radosgw/hooks/charmhelpers/core/services/base.py index a42660ca..ca9dc996 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/base.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/base.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import json diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py index 24237042..3e6e30d2 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import yaml diff --git a/ceph-radosgw/hooks/charmhelpers/core/strutils.py b/ceph-radosgw/hooks/charmhelpers/core/strutils.py index 7e3f9693..dd9b9717 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/strutils.py +++ b/ceph-radosgw/hooks/charmhelpers/core/strutils.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import six import re diff --git a/ceph-radosgw/hooks/charmhelpers/core/sysctl.py b/ceph-radosgw/hooks/charmhelpers/core/sysctl.py index 21cc8ab2..6e413e31 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/sysctl.py +++ b/ceph-radosgw/hooks/charmhelpers/core/sysctl.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import yaml diff --git a/ceph-radosgw/hooks/charmhelpers/core/templating.py b/ceph-radosgw/hooks/charmhelpers/core/templating.py index d2d8eafe..0a7560ff 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/core/templating.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os diff --git a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py index 338104e0..54ec969f 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py +++ b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py @@ -3,20 +3,17 @@ # # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Authors: # Kapil Thangavelu diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 68b0f94d..8f39f2fe 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import importlib from tempfile import NamedTemporaryFile diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py index b8e0943d..dd24f9ec 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import hashlib diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py index b743753e..b3404d85 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os from subprocess import check_call diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py index 65ed5319..f708d1ee 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os from subprocess import check_call, CalledProcessError diff --git a/ceph-radosgw/hooks/charmhelpers/payload/__init__.py b/ceph-radosgw/hooks/charmhelpers/payload/__init__.py index e6f42497..ee55cb3d 100644 --- a/ceph-radosgw/hooks/charmhelpers/payload/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/payload/__init__.py @@ -1,17 +1,15 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. "Tools for working with files injected into a charm just before deployment." diff --git a/ceph-radosgw/hooks/charmhelpers/payload/execd.py b/ceph-radosgw/hooks/charmhelpers/payload/execd.py index 4d4d81a6..0c42090f 100644 --- a/ceph-radosgw/hooks/charmhelpers/payload/execd.py +++ b/ceph-radosgw/hooks/charmhelpers/payload/execd.py @@ -2,19 +2,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import sys diff --git a/ceph-radosgw/tests/charmhelpers/__init__.py b/ceph-radosgw/tests/charmhelpers/__init__.py index f72e7f84..48867880 100644 --- a/ceph-radosgw/tests/charmhelpers/__init__.py +++ b/ceph-radosgw/tests/charmhelpers/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. diff --git a/ceph-radosgw/tests/charmhelpers/contrib/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/__init__.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/__init__.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py index d451698d..0146236d 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import amulet import os diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index 7e5c25a9..a39ed4c8 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import io import json diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/__init__.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/__init__.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 6b917d0c..f7220f35 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import logging import re diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index ef3bdccf..8040b570 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import amulet import json From 6430f8177557cdfbd21abc04b984ae288ab710da Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 6 Jul 2016 15:28:48 +0000 Subject: [PATCH 1154/2699] Update .gitreview, fix duplicate requirements Also fixes tox Change-Id: If8fe82973557e5aaa17ce42c300d1c74c700baa6 --- ceph-proxy/.gitreview | 2 +- ceph-proxy/hooks/ceph_hooks.py | 69 ++++++----------------------- ceph-proxy/requirements.txt | 1 - ceph-proxy/tests/.gitkeep | 0 ceph-proxy/unit_tests/__init__.py | 0 ceph-proxy/unit_tests/test_dummy.py | 6 +++ 6 files changed, 20 insertions(+), 58 deletions(-) create mode 100644 ceph-proxy/tests/.gitkeep create mode 100644 ceph-proxy/unit_tests/__init__.py create mode 100644 ceph-proxy/unit_tests/test_dummy.py diff --git a/ceph-proxy/.gitreview b/ceph-proxy/.gitreview index 47000658..0eac5a3b 100644 --- a/ceph-proxy/.gitreview +++ b/ceph-proxy/.gitreview @@ -1,4 +1,4 @@ [gerrit] host=review.openstack.org port=29418 -project=openstack/charm-ceph-mon.git \ No newline at end of file +project=openstack/charm-ceph-proxy diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 4d141cc0..ebe3a621 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -10,17 +10,10 @@ import glob import os -import random import shutil -import socket -import subprocess import sys -import uuid -import time import ceph -from charmhelpers.core import host -from charmhelpers.core import hookenv from charmhelpers.core.hookenv import ( log, DEBUG, @@ -29,21 +22,13 @@ related_units, relation_get, relation_set, - leader_set, leader_get, - is_leader, remote_unit, Hooks, UnregisteredHookError, service_name, - relations_of_type, - status_set, - local_unit) + status_set,) from charmhelpers.core.host import ( - service_restart, mkdir, - write_file, - rsync, - cmp_pkgrevno, - service_stop, service_start) + cmp_pkgrevno,) from charmhelpers.fetch import ( apt_install, apt_update, @@ -52,17 +37,8 @@ ) from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.alternatives import install_alternative -from charmhelpers.contrib.network.ip import ( - get_ipv6_addr, - format_ipv6_addr, -) -from charmhelpers.core.sysctl import create as create_sysctl + from charmhelpers.core.templating import render -from charmhelpers.contrib.storage.linux.ceph import ( - monitor_key_set, - monitor_key_exists, - monitor_key_get, - get_mon_map) from ceph_broker import ( process_requests @@ -73,7 +49,6 @@ get_unit_hostname, ) -from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden hooks = Hooks() @@ -115,15 +90,22 @@ def emit_cephconf(): charm_ceph_conf, 100) keyring = 'ceph.client.admin.keyring' keyring_path = '/etc/ceph/' + keyring - render(keyring, keyring_path, {'admin_key': config('admin-key')}, owner=ceph.ceph_user(), perms=0o600) + ctx = {'admin_key': config('admin-key')} + user = ceph.ceph_user() + render(keyring, keyring_path, ctx, owner=user, perms=0o600) keyring = 'keyring' - keyring_path = '/var/lib/ceph/mon/ceph-' + get_unit_hostname()+ '/' + keyring - render('mon.keyring', keyring_path, {'admin_key': config('admin-key')}, owner=ceph.ceph_user(), perms=0o600) + keyring_path = ( + '/var/lib/ceph/mon/ceph-' + + get_unit_hostname() + + '/' + + keyring) + render('mon.keyring', keyring_path, ctx, owner=user, perms=0o600) notify_radosgws() notify_client() + @hooks.hook('config-changed') @harden() def config_changed(): @@ -234,31 +216,6 @@ def assess_status(): status_set('active', 'Ready to proxy settings') else: status_set('blocked', 'Ensure FSID and admin-key are set') - # moncount = int(config('monitor-count')) - # units = get_peer_units() - # # not enough peers and mon_count > 1 - # if len(units.keys()) < moncount: - # status_set('blocked', 'Insufficient peer units to bootstrap' - # ' cluster (require {})'.format(moncount)) - # return - - # # mon_count > 1, peers, but no ceph-public-address - # ready = sum(1 for unit_ready in units.itervalues() if unit_ready) - # if ready < moncount: - # status_set('waiting', 'Peer units detected, waiting for addresses') - # return - - # # active - bootstrapped + quorum status check - # if ceph.is_bootstrapped() and ceph.is_quorum(): - # status_set('active', 'Unit is ready and clustered') - # else: - # # Unit should be running and clustered, but no quorum - # # TODO: should this be blocked or waiting? - # status_set('blocked', 'Unit not clustered (no quorum)') - # # If there's a pending lock for this unit, - # # can i get the lock? - # # reboot the ceph-mon process - # status_set('active', 'doing some shit maybe?') @hooks.hook('update-status') diff --git a/ceph-proxy/requirements.txt b/ceph-proxy/requirements.txt index a72939e8..6a3271b0 100644 --- a/ceph-proxy/requirements.txt +++ b/ceph-proxy/requirements.txt @@ -10,4 +10,3 @@ Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 dnspython>=1.12.0 psutil>=1.1.1,<2.0.0 -charm-tools>=2.0.0 \ No newline at end of file diff --git a/ceph-proxy/tests/.gitkeep b/ceph-proxy/tests/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/unit_tests/__init__.py b/ceph-proxy/unit_tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/unit_tests/test_dummy.py b/ceph-proxy/unit_tests/test_dummy.py new file mode 100644 index 00000000..93f686e4 --- /dev/null +++ b/ceph-proxy/unit_tests/test_dummy.py @@ -0,0 +1,6 @@ +import unittest + + +class CharmTestCase(unittest.TestCase): + def test_it_works(self): + assert True From 801497e14e87536c969f8874ccc73ef649bc4af3 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Fri, 8 Jul 2016 09:28:33 -0700 Subject: [PATCH 1155/2699] Tweak AppArmor profile After some testing with aa-complain it was discovered that one of the apparmor rules was causing aa-complain to fail. This patch also fixes an indentation typo. Change-Id: I7a0e7e64f236136cd0f15fed22233cea533cad0c --- ceph-mon/files/apparmor/usr.bin.ceph-mon | 6 ++---- ceph-mon/hooks/ceph_hooks.py | 9 +++++---- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/ceph-mon/files/apparmor/usr.bin.ceph-mon b/ceph-mon/files/apparmor/usr.bin.ceph-mon index 9c685916..e1028773 100644 --- a/ceph-mon/files/apparmor/usr.bin.ceph-mon +++ b/ceph-mon/files/apparmor/usr.bin.ceph-mon @@ -13,8 +13,8 @@ owner /etc/ceph/* rw, /etc/passwd r, - /proc/@{pid}/auxv r, - /proc/@{pid}/net/dev r, + @{PROC}/@{pid}/auxv r, + @{PROC}/@{pid}/net/dev r, /run/ceph/* rw, /tmp/ r, @@ -23,6 +23,4 @@ /var/log/ceph/* rwk, /var/run/ceph/* rwk, /var/tmp/ r, - # Site-specific additions and overrides. - #include } diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 72de1bc0..00abb5a4 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -42,7 +42,7 @@ service_name, relations_of_type, status_set, - local_unit) + local_unit, ERROR) from charmhelpers.core.host import ( service_restart, mkdir, @@ -281,8 +281,8 @@ def install_apparmor_profile(): aa_mode = config('aa-profile-mode') if aa_mode not in app_armor_modes: log('Invalid apparmor mode: {}. Defaulting to complain'.format( - aa_mode), level='error') - aa_mode = 'complain' + aa_mode), level=ERROR) + aa_mode = 'complain' apparmor_dir = os.path.join(os.sep, 'etc', 'apparmor.d', @@ -298,7 +298,8 @@ def install_apparmor_profile(): subprocess.check_output(cmd) except subprocess.CalledProcessError as err: log('{} failed with error {}'.format( - app_armor_modes[aa_mode], err.output), level='error') + app_armor_modes[aa_mode], err.output), level=ERROR) + raise @hooks.hook('install.real') From bf6d7b26aa472d4e90d46ab818c2834c3b32e716 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 7 Jul 2016 11:15:06 -0400 Subject: [PATCH 1156/2699] Add testing This commit adds some unit testing to the helper libraries as well as amulet testing for the charm itself Change-Id: I13ff38655773521592c81bb5a6847a516abd943a --- ceph-proxy/Makefile | 8 +- ceph-proxy/charm-helpers-hooks.yaml | 10 +- ceph-proxy/config.yaml | 18 + ceph-proxy/hooks/ceph_hooks.py | 12 +- ceph-proxy/hooks/charmhelpers/__init__.py | 20 +- ceph-proxy/hooks/charmhelpers/cli/__init__.py | 20 +- .../hooks/charmhelpers/cli/benchmark.py | 20 +- ceph-proxy/hooks/charmhelpers/cli/commands.py | 20 +- ceph-proxy/hooks/charmhelpers/cli/hookenv.py | 20 +- ceph-proxy/hooks/charmhelpers/cli/host.py | 20 +- ceph-proxy/hooks/charmhelpers/cli/unitdata.py | 20 +- .../hooks/charmhelpers/contrib/__init__.py | 20 +- .../contrib/charmsupport/__init__.py | 20 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 20 +- .../contrib/charmsupport/volumes.py | 20 +- .../contrib/hardening/__init__.py | 20 +- .../contrib/hardening/apache/__init__.py | 20 +- .../hardening/apache/checks/__init__.py | 20 +- .../contrib/hardening/apache/checks/config.py | 20 +- .../contrib/hardening/audits/__init__.py | 20 +- .../contrib/hardening/audits/apache.py | 20 +- .../contrib/hardening/audits/apt.py | 20 +- .../contrib/hardening/audits/file.py | 20 +- .../charmhelpers/contrib/hardening/harden.py | 20 +- .../contrib/hardening/host/__init__.py | 20 +- .../contrib/hardening/host/checks/__init__.py | 20 +- .../contrib/hardening/host/checks/apt.py | 20 +- .../contrib/hardening/host/checks/limits.py | 20 +- .../contrib/hardening/host/checks/login.py | 20 +- .../hardening/host/checks/minimize_access.py | 20 +- .../contrib/hardening/host/checks/pam.py | 20 +- .../contrib/hardening/host/checks/profile.py | 20 +- .../hardening/host/checks/securetty.py | 20 +- .../hardening/host/checks/suid_sgid.py | 20 +- .../contrib/hardening/host/checks/sysctl.py | 20 +- .../contrib/hardening/mysql/__init__.py | 20 +- .../hardening/mysql/checks/__init__.py | 20 +- .../contrib/hardening/mysql/checks/config.py | 20 +- .../contrib/hardening/ssh/__init__.py | 20 +- .../contrib/hardening/ssh/checks/__init__.py | 20 +- .../contrib/hardening/ssh/checks/config.py | 20 +- .../contrib/hardening/templating.py | 20 +- .../charmhelpers/contrib/hardening/utils.py | 20 +- .../charmhelpers/contrib/network/__init__.py | 20 +- .../hooks/charmhelpers/contrib/network/ip.py | 26 +- .../contrib/openstack/__init__.py | 20 +- .../contrib/openstack/alternatives.py | 20 +- .../contrib/openstack/exceptions.py | 21 + .../charmhelpers/contrib/openstack/utils.py | 1122 +++++++++++++++-- .../charmhelpers/contrib/python/__init__.py | 13 + .../charmhelpers/contrib/python/debug.py | 54 + .../charmhelpers/contrib/python/packages.py | 147 +++ .../hooks/charmhelpers/contrib/python/rpdb.py | 56 + .../charmhelpers/contrib/python/version.py | 32 + .../charmhelpers/contrib/storage/__init__.py | 20 +- .../contrib/storage/linux/__init__.py | 20 +- .../contrib/storage/linux/ceph.py | 61 +- .../contrib/storage/linux/loopback.py | 86 ++ .../charmhelpers/contrib/storage/linux/lvm.py | 103 ++ .../contrib/storage/linux/utils.py | 20 +- .../hooks/charmhelpers/core/__init__.py | 20 +- .../hooks/charmhelpers/core/decorators.py | 20 +- ceph-proxy/hooks/charmhelpers/core/files.py | 20 +- ceph-proxy/hooks/charmhelpers/core/fstab.py | 20 +- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 22 +- ceph-proxy/hooks/charmhelpers/core/host.py | 79 +- .../hooks/charmhelpers/core/hugepage.py | 20 +- ceph-proxy/hooks/charmhelpers/core/kernel.py | 20 +- .../charmhelpers/core/services/__init__.py | 20 +- .../hooks/charmhelpers/core/services/base.py | 20 +- .../charmhelpers/core/services/helpers.py | 20 +- .../hooks/charmhelpers/core/strutils.py | 20 +- ceph-proxy/hooks/charmhelpers/core/sysctl.py | 20 +- .../hooks/charmhelpers/core/templating.py | 20 +- .../hooks/charmhelpers/core/unitdata.py | 21 +- .../hooks/charmhelpers/fetch/__init__.py | 35 +- .../hooks/charmhelpers/fetch/archiveurl.py | 20 +- ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py | 43 +- ceph-proxy/hooks/charmhelpers/fetch/giturl.py | 20 +- .../hooks/charmhelpers/payload/__init__.py | 20 +- .../hooks/charmhelpers/payload/execd.py | 20 +- ceph-proxy/templates/ceph.conf | 6 +- ceph-proxy/tests/014-basic-precise-icehouse | 11 + ceph-proxy/tests/015-basic-trusty-icehouse | 9 + ceph-proxy/tests/016-basic-trusty-juno | 11 + ceph-proxy/tests/017-basic-trusty-kilo | 11 + ceph-proxy/tests/018-basic-trusty-liberty | 11 + ceph-proxy/tests/019-basic-trusty-mitaka | 11 + ceph-proxy/tests/020-basic-wily-liberty | 9 + ceph-proxy/tests/021-basic-xenial-mitaka | 9 + ceph-proxy/tests/basic_deployment.py | 159 +++ ceph-proxy/tests/charmhelpers/__init__.py | 36 + .../tests/charmhelpers/contrib/__init__.py | 13 + .../charmhelpers/contrib/amulet/__init__.py | 13 + .../charmhelpers/contrib/amulet/deployment.py | 93 ++ .../charmhelpers/contrib/amulet/utils.py | 827 ++++++++++++ .../contrib/openstack/__init__.py | 13 + .../contrib/openstack/amulet/__init__.py | 13 + .../contrib/openstack/amulet/deployment.py | 295 +++++ .../contrib/openstack/amulet/utils.py | 1010 +++++++++++++++ ceph-proxy/tests/setup/00-setup | 17 + ceph-proxy/tests/tests.yaml | 22 + ceph-proxy/unit_tests/__init__.py | 2 + ceph-proxy/unit_tests/test_ceph_broker.py | 137 ++ ceph-proxy/unit_tests/test_dummy.py | 6 - ceph-proxy/unit_tests/test_utils.py | 121 ++ 106 files changed, 5129 insertions(+), 925 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/openstack/exceptions.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/python/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/python/debug.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/python/packages.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/python/rpdb.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/python/version.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/storage/linux/loopback.py create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py create mode 100755 ceph-proxy/tests/014-basic-precise-icehouse create mode 100755 ceph-proxy/tests/015-basic-trusty-icehouse create mode 100755 ceph-proxy/tests/016-basic-trusty-juno create mode 100755 ceph-proxy/tests/017-basic-trusty-kilo create mode 100755 ceph-proxy/tests/018-basic-trusty-liberty create mode 100755 ceph-proxy/tests/019-basic-trusty-mitaka create mode 100755 ceph-proxy/tests/020-basic-wily-liberty create mode 100755 ceph-proxy/tests/021-basic-xenial-mitaka create mode 100644 ceph-proxy/tests/basic_deployment.py create mode 100644 ceph-proxy/tests/charmhelpers/__init__.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/__init__.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py create mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py create mode 100755 ceph-proxy/tests/setup/00-setup create mode 100644 ceph-proxy/tests/tests.yaml create mode 100644 ceph-proxy/unit_tests/test_ceph_broker.py delete mode 100644 ceph-proxy/unit_tests/test_dummy.py create mode 100644 ceph-proxy/unit_tests/test_utils.py diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index 6751aafc..c6109cc8 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -16,12 +16,12 @@ functional_test: bin/charm_helpers_sync.py: @mkdir -p bin @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ - > bin/charm_helpers_sync.py + > bin/charm_helpers_sync.py sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml -publish: lint test - bzr push lp:charms/ceph - bzr push lp:charms/trusty/ceph +publish: lint + bzr push lp:charms/ceph-osd + bzr push lp:charms/trusty/ceph-osd diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index f4b2a26a..d12f6968 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -7,8 +7,14 @@ include: - contrib.storage.linux: - utils - ceph + - loopback + - lvm - payload.execd - - contrib.openstack.alternatives + - contrib.openstack: + - utils + - exceptions + - alternatives - contrib.network.ip - contrib.charmsupport - - contrib.hardening|inc=* \ No newline at end of file + - contrib.hardening|inc=* + - contrib.python \ No newline at end of file diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 97ae7ce7..b77b3b25 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -18,11 +18,29 @@ options: default: description: | Admin cephx key for existing Ceph cluster + auth-supported: + type: string + default: cephx + description: | + Which authentication flavour to use. + . + Valid options are "cephx" and "none". If "none" is specified, + keys will still be created and deployed so that it can be + enabled later. # mon-key: # type: string # default: # description: | # Monitor cephx key + use-syslog: + type: boolean + default: False + description: | + If set to True, supporting services will log to syslog. + loglevel: + default: 1 + type: int + description: Mon and OSD debug level. Max is 20. source: type: string default: diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index ebe3a621..74588875 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -74,6 +74,7 @@ def install(): def emit_cephconf(): cephcontext = { + 'auth_supported': config('auth-supported'), 'mon_hosts': config('monitor-hosts'), 'fsid': config('fsid'), 'use_syslog': str(config('use-syslog')).lower(), @@ -147,13 +148,10 @@ def radosgw_relation(relid=None, unit=None): settings = relation_get(rid=relid, unit=unit) """Process broker request(s).""" if 'broker_req' in settings: - if ceph.is_leader(): - rsp = process_requests(settings['broker_req']) - unit_id = unit.replace('/', '-') - unit_response_key = 'broker-rsp-' + unit_id - data[unit_response_key] = rsp - else: - log("Not leader - ignoring broker request", level=DEBUG) + rsp = process_requests(settings['broker_req']) + unit_id = unit.replace('/', '-') + unit_response_key = 'broker-rsp-' + unit_id + data[unit_response_key] = rsp relation_set(relation_id=relid, relation_settings=data) else: diff --git a/ceph-proxy/hooks/charmhelpers/__init__.py b/ceph-proxy/hooks/charmhelpers/__init__.py index f72e7f84..48867880 100644 --- a/ceph-proxy/hooks/charmhelpers/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. diff --git a/ceph-proxy/hooks/charmhelpers/cli/__init__.py b/ceph-proxy/hooks/charmhelpers/cli/__init__.py index 2d37ab31..389b490f 100644 --- a/ceph-proxy/hooks/charmhelpers/cli/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/cli/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import inspect import argparse diff --git a/ceph-proxy/hooks/charmhelpers/cli/benchmark.py b/ceph-proxy/hooks/charmhelpers/cli/benchmark.py index b23c16ce..303af14b 100644 --- a/ceph-proxy/hooks/charmhelpers/cli/benchmark.py +++ b/ceph-proxy/hooks/charmhelpers/cli/benchmark.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.contrib.benchmark import Benchmark diff --git a/ceph-proxy/hooks/charmhelpers/cli/commands.py b/ceph-proxy/hooks/charmhelpers/cli/commands.py index 7e91db00..b9310565 100644 --- a/ceph-proxy/hooks/charmhelpers/cli/commands.py +++ b/ceph-proxy/hooks/charmhelpers/cli/commands.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ This module loads sub-modules into the python runtime so they can be diff --git a/ceph-proxy/hooks/charmhelpers/cli/hookenv.py b/ceph-proxy/hooks/charmhelpers/cli/hookenv.py index 265c816e..bd72f448 100644 --- a/ceph-proxy/hooks/charmhelpers/cli/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/cli/hookenv.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.core import hookenv diff --git a/ceph-proxy/hooks/charmhelpers/cli/host.py b/ceph-proxy/hooks/charmhelpers/cli/host.py index 58e78d6b..40396849 100644 --- a/ceph-proxy/hooks/charmhelpers/cli/host.py +++ b/ceph-proxy/hooks/charmhelpers/cli/host.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.core import host diff --git a/ceph-proxy/hooks/charmhelpers/cli/unitdata.py b/ceph-proxy/hooks/charmhelpers/cli/unitdata.py index d1cd95bf..c5728582 100644 --- a/ceph-proxy/hooks/charmhelpers/cli/unitdata.py +++ b/ceph-proxy/hooks/charmhelpers/cli/unitdata.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from . import cmdline from charmhelpers.core import unitdata diff --git a/ceph-proxy/hooks/charmhelpers/contrib/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 2f246429..17976fb5 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Compatibility with the nrpe-external-master charm""" # Copyright 2012 Canonical Ltd. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py index 320961b9..7ea43f08 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. ''' Functions for managing volumes in juju units. One volume is supported per unit. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/__init__.py index a1335320..30a3e943 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/__init__.py @@ -1,15 +1,13 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py index d1304792..3bc2ebd4 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 8249ca01..51b636f7 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import re diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/__init__.py index 6a7057b3..9bf9c3c6 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. class BaseAudit(object): # NO-QA diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py index cf3c987d..d812948a 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import re import subprocess diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apt.py index e94af031..3dc14e3c 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from __future__ import absolute_import # required for external apt import from apt import apt_pkg diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/file.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/file.py index 0fb545a9..257c6351 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/file.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/file.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import grp import os diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/harden.py index ac7568d6..b55764cd 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/harden.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/harden.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import six diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py index c3bd5985..0e7e409f 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/apt.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/apt.py index 2c221cda..7ce41b00 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/apt.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/apt.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.utils import get_settings from charmhelpers.contrib.hardening.audits.apt import ( diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/limits.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/limits.py index 8ce9dc2b..e94f5ebe 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/limits.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/limits.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import ( DirectoryPermissionAudit, diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/login.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/login.py index d32c4f60..fe2bc6ef 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/login.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/login.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from six import string_types diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py index c471064b..6e64be00 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import ( FilePermissionAudit, diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/pam.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/pam.py index 383fe28e..9b38d5f0 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/pam.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/pam.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from subprocess import ( check_output, diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/profile.py index f7443357..56d65263 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/profile.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/profile.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import TemplatedFile from charmhelpers.contrib.hardening.host import TEMPLATES_DIR diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py index e33c73ca..34cd0217 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.contrib.hardening.audits.file import TemplatedFile from charmhelpers.contrib.hardening.host import TEMPLATES_DIR diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py index 0534689b..bcbe3fde 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import subprocess diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py index 4a76d74e..f1ea5813 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import platform diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py index d4f0ec19..1990d851 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py index 3af8b89d..a79f33b7 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import six import subprocess diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/__init__.py index 277b8c77..58bebd84 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from os import path diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py index b85150d5..edaf484b 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from charmhelpers.core.hookenv import ( log, diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py index 3fb6ae8d..94e524e2 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/templating.py index d2ab7dc9..2174c645 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/templating.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/templating.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/utils.py index a6743a4d..ff7485c2 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/utils.py @@ -1,18 +1,16 @@ # Copyright 2016 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import glob import grp diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/network/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 6bba07b6..b5f457c7 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import glob import re @@ -405,10 +403,10 @@ def is_ip(address): Returns True if address is a valid IP address. """ try: - # Test to see if already an IPv4 address - socket.inet_aton(address) + # Test to see if already an IPv4/IPv6 address + address = netaddr.IPAddress(address) return True - except socket.error: + except netaddr.AddrFormatError: return False diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py index ef77caf3..1501641e 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. ''' Helper for managing alternatives for file conflict resolution ''' diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/exceptions.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/exceptions.py new file mode 100644 index 00000000..f85ae4f4 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/exceptions.py @@ -0,0 +1,21 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class OSContextError(Exception): + """Raised when an error occurs during context generation. + + This exception is principally used in contrib.openstack.context + """ + pass diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index 2af4476d..f4401913 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Common python helper functions used for OpenStack charms. from collections import OrderedDict @@ -23,8 +21,12 @@ import os import sys import re +import itertools +import functools +import shutil import six +import tempfile import traceback import uuid import yaml @@ -41,10 +43,13 @@ config, log as juju_log, charm_dir, + DEBUG, INFO, + ERROR, related_units, relation_ids, relation_set, + service_name, status_set, hook_name ) @@ -58,6 +63,7 @@ from charmhelpers.contrib.network.ip import ( get_ipv6_addr, is_ipv6, + port_has_listener, ) from charmhelpers.contrib.python.packages import ( @@ -65,10 +71,19 @@ pip_install, ) -from charmhelpers.core.host import lsb_release, mounts, umount +from charmhelpers.core.host import ( + lsb_release, + mounts, + umount, + service_running, + service_pause, + service_resume, + restart_on_change_helper, +) from charmhelpers.fetch import apt_install, apt_cache, install_remote from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device +from charmhelpers.contrib.openstack.exceptions import OSContextError CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' @@ -87,6 +102,8 @@ ('vivid', 'kilo'), ('wily', 'liberty'), ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zebra', 'ocata'), # TODO: upload with real Z name ]) @@ -101,73 +118,114 @@ ('2015.1', 'kilo'), ('2015.2', 'liberty'), ('2016.1', 'mitaka'), + ('2016.2', 'newton'), + ('2017.1', 'ocata'), ]) -# The ugly duckling +# The ugly duckling - must list releases oldest to newest SWIFT_CODENAMES = OrderedDict([ - ('1.4.3', 'diablo'), - ('1.4.8', 'essex'), - ('1.7.4', 'folsom'), - ('1.8.0', 'grizzly'), - ('1.7.7', 'grizzly'), - ('1.7.6', 'grizzly'), - ('1.10.0', 'havana'), - ('1.9.1', 'havana'), - ('1.9.0', 'havana'), - ('1.13.1', 'icehouse'), - ('1.13.0', 'icehouse'), - ('1.12.0', 'icehouse'), - ('1.11.0', 'icehouse'), - ('2.0.0', 'juno'), - ('2.1.0', 'juno'), - ('2.2.0', 'juno'), - ('2.2.1', 'kilo'), - ('2.2.2', 'kilo'), - ('2.3.0', 'liberty'), - ('2.4.0', 'liberty'), - ('2.5.0', 'liberty'), + ('diablo', + ['1.4.3']), + ('essex', + ['1.4.8']), + ('folsom', + ['1.7.4']), + ('grizzly', + ['1.7.6', '1.7.7', '1.8.0']), + ('havana', + ['1.9.0', '1.9.1', '1.10.0']), + ('icehouse', + ['1.11.0', '1.12.0', '1.13.0', '1.13.1']), + ('juno', + ['2.0.0', '2.1.0', '2.2.0']), + ('kilo', + ['2.2.1', '2.2.2']), + ('liberty', + ['2.3.0', '2.4.0', '2.5.0']), + ('mitaka', + ['2.5.0', '2.6.0', '2.7.0']), + ('newton', + ['2.8.0']), ]) # >= Liberty version->codename mapping PACKAGE_CODENAMES = { 'nova-common': OrderedDict([ - ('12.0', 'liberty'), - ('13.0', 'mitaka'), + ('12', 'liberty'), + ('13', 'mitaka'), + ('14', 'newton'), + ('15', 'ocata'), ]), 'neutron-common': OrderedDict([ - ('7.0', 'liberty'), - ('8.0', 'mitaka'), + ('7', 'liberty'), + ('8', 'mitaka'), + ('9', 'newton'), + ('10', 'ocata'), ]), 'cinder-common': OrderedDict([ - ('7.0', 'liberty'), - ('8.0', 'mitaka'), + ('7', 'liberty'), + ('8', 'mitaka'), + ('9', 'newton'), + ('10', 'ocata'), ]), 'keystone': OrderedDict([ - ('8.0', 'liberty'), - ('9.0', 'mitaka'), + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), ]), 'horizon-common': OrderedDict([ - ('8.0', 'liberty'), - ('9.0', 'mitaka'), + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), ]), 'ceilometer-common': OrderedDict([ - ('5.0', 'liberty'), - ('6.0', 'mitaka'), + ('5', 'liberty'), + ('6', 'mitaka'), + ('7', 'newton'), + ('8', 'ocata'), ]), 'heat-common': OrderedDict([ - ('5.0', 'liberty'), - ('6.0', 'mitaka'), + ('5', 'liberty'), + ('6', 'mitaka'), + ('7', 'newton'), + ('8', 'ocata'), ]), 'glance-common': OrderedDict([ - ('11.0', 'liberty'), - ('12.0', 'mitaka'), + ('11', 'liberty'), + ('12', 'mitaka'), + ('13', 'newton'), + ('14', 'ocata'), ]), 'openstack-dashboard': OrderedDict([ - ('8.0', 'liberty'), - ('9.0', 'mitaka'), + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), ]), } +GIT_DEFAULT_REPOS = { + 'requirements': 'git://github.com/openstack/requirements', + 'cinder': 'git://github.com/openstack/cinder', + 'glance': 'git://github.com/openstack/glance', + 'horizon': 'git://github.com/openstack/horizon', + 'keystone': 'git://github.com/openstack/keystone', + 'neutron': 'git://github.com/openstack/neutron', + 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas', + 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas', + 'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas', + 'nova': 'git://github.com/openstack/nova', +} + +GIT_DEFAULT_BRANCHES = { + 'kilo': 'stable/kilo', + 'liberty': 'stable/liberty', + 'mitaka': 'stable/mitaka', + 'master': 'master', +} + DEFAULT_LOOPBACK_SIZE = '5G' @@ -227,6 +285,44 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): error_out(e) +def get_os_version_codename_swift(codename): + '''Determine OpenStack version number of swift from codename.''' + for k, v in six.iteritems(SWIFT_CODENAMES): + if k == codename: + return v[-1] + e = 'Could not derive swift version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_swift_codename(version): + '''Determine OpenStack codename that corresponds to swift version.''' + codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] + + if len(codenames) > 1: + # If more than one release codename contains this version we determine + # the actual codename based on the highest available install source. + for codename in reversed(codenames): + releases = UBUNTU_OPENSTACK_RELEASE + release = [k for k, v in six.iteritems(releases) if codename in v] + ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) + if codename in ret or release[0] in ret: + return codename + elif len(codenames) == 1: + return codenames[0] + + # NOTE: fallback - attempt to match with just major.minor version + match = re.match('^(\d+)\.(\d+)', version) + if match: + major_minor_version = match.group(0) + for codename, versions in six.iteritems(SWIFT_CODENAMES): + for release_version in versions: + if release_version.startswith(major_minor_version): + return codename + + return None + + def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' import apt_pkg as apt @@ -262,15 +358,18 @@ def get_os_codename_package(package, fatal=True): if match: vers = match.group(0) + # Generate a major version number for newer semantic + # versions of openstack projects + major_vers = vers.split('.')[0] # >= Liberty independent project versions if (package in PACKAGE_CODENAMES and - vers in PACKAGE_CODENAMES[package]): - return PACKAGE_CODENAMES[package][vers] + major_vers in PACKAGE_CODENAMES[package]): + return PACKAGE_CODENAMES[package][major_vers] else: # < Liberty co-ordinated project versions try: if 'swift' in pkg.name: - return SWIFT_CODENAMES[vers] + return get_swift_codename(vers) else: return OPENSTACK_CODENAMES[vers] except KeyError: @@ -289,12 +388,14 @@ def get_os_version_package(pkg, fatal=True): if 'swift' in pkg: vers_map = SWIFT_CODENAMES + for cname, version in six.iteritems(vers_map): + if cname == codename: + return version[-1] else: vers_map = OPENSTACK_CODENAMES - - for version, cname in six.iteritems(vers_map): - if cname == codename: - return version + for version, cname in six.iteritems(vers_map): + if cname == codename: + return version # e = "Could not determine OpenStack version for package: %s" % pkg # error_out(e) @@ -319,12 +420,42 @@ def os_release(package, base='essex'): def import_key(keyid): - cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ - "--recv-keys %s" % keyid - try: - subprocess.check_call(cmd.split(' ')) - except subprocess.CalledProcessError: - error_out("Error importing repo key %s" % keyid) + key = keyid.strip() + if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and + key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): + juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG) + juju_log("Importing ASCII Armor PGP key", level=DEBUG) + with tempfile.NamedTemporaryFile() as keyfile: + with open(keyfile.name, 'w') as fd: + fd.write(key) + fd.write("\n") + + cmd = ['apt-key', 'add', keyfile.name] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error_out("Error importing PGP key '%s'" % key) + else: + juju_log("PGP key found (looks like Radix64 format)", level=DEBUG) + juju_log("Importing PGP key from keyserver", level=DEBUG) + cmd = ['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error_out("Error importing PGP key '%s'" % key) + + +def get_source_and_pgp_key(input): + """Look for a pgp key ID or ascii-armor key in the given input.""" + index = input.strip() + index = input.rfind('|') + if index < 0: + return input, None + + key = input[index + 1:].strip('|') + source = input[:index] + return source, key def configure_installation_source(rel): @@ -336,16 +467,16 @@ def configure_installation_source(rel): with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: f.write(DISTRO_PROPOSED % ubuntu_rel) elif rel[:4] == "ppa:": - src = rel + src, key = get_source_and_pgp_key(rel) + if key: + import_key(key) + subprocess.check_call(["add-apt-repository", "-y", src]) elif rel[:3] == "deb": - l = len(rel.split('|')) - if l == 2: - src, key = rel.split('|') - juju_log("Importing PPA key from keyserver for %s" % src) + src, key = get_source_and_pgp_key(rel) + if key: import_key(key) - elif l == 1: - src = rel + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: f.write(src) elif rel[:6] == 'cloud:': @@ -393,6 +524,9 @@ def configure_installation_source(rel): 'mitaka': 'trusty-updates/mitaka', 'mitaka/updates': 'trusty-updates/mitaka', 'mitaka/proposed': 'trusty-proposed/mitaka', + 'newton': 'xenial-updates/newton', + 'newton/updates': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', } try: @@ -460,11 +594,16 @@ def openstack_upgrade_available(package): cur_vers = get_os_version_package(package) if "swift" in package: codename = get_os_codename_install_source(src) - available_vers = get_os_version_codename(codename, SWIFT_CODENAMES) + avail_vers = get_os_version_codename_swift(codename) else: - available_vers = get_os_version_install_source(src) + avail_vers = get_os_version_install_source(src) apt.init() - return apt.version_compare(available_vers, cur_vers) == 1 + if "swift" in package: + major_cur_vers = cur_vers.split('.', 1)[0] + major_avail_vers = avail_vers.split('.', 1)[0] + major_diff = apt.version_compare(major_avail_vers, major_cur_vers) + return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0) + return apt.version_compare(avail_vers, cur_vers) == 1 def ensure_block_device(block_device): @@ -583,6 +722,67 @@ def git_install_requested(): requirements_dir = None +def git_default_repos(projects_yaml): + """ + Returns default repos if a default openstack-origin-git value is specified. + """ + service = service_name() + core_project = service + + for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): + if projects_yaml == default: + + # add the requirements repo first + repo = { + 'name': 'requirements', + 'repository': GIT_DEFAULT_REPOS['requirements'], + 'branch': branch, + } + repos = [repo] + + # NOTE(coreycb): This is a temp work-around until the requirements + # repo moves from stable/kilo branch to kilo-eol tag. The core + # repos have already done this. + if default == 'kilo': + branch = 'kilo-eol' + + # neutron-* and nova-* charms require some additional repos + if service in ['neutron-api', 'neutron-gateway', + 'neutron-openvswitch']: + core_project = 'neutron' + for project in ['neutron-fwaas', 'neutron-lbaas', + 'neutron-vpnaas', 'nova']: + repo = { + 'name': project, + 'repository': GIT_DEFAULT_REPOS[project], + 'branch': branch, + } + repos.append(repo) + + elif service in ['nova-cloud-controller', 'nova-compute']: + core_project = 'nova' + repo = { + 'name': 'neutron', + 'repository': GIT_DEFAULT_REPOS['neutron'], + 'branch': branch, + } + repos.append(repo) + elif service == 'openstack-dashboard': + core_project = 'horizon' + + # finally add the current service's core project repo + repo = { + 'name': core_project, + 'repository': GIT_DEFAULT_REPOS[core_project], + 'branch': branch, + } + repos.append(repo) + + return yaml.dump(dict(repositories=repos)) + + return projects_yaml + + def _git_yaml_load(projects_yaml): """ Load the specified yaml into a dictionary. @@ -640,6 +840,7 @@ def git_clone_and_install(projects_yaml, core_project): pip_install(p, upgrade=True, proxy=http_proxy, venv=os.path.join(parent_dir, 'venv')) + constraints = None for p in projects['repositories']: repo = p['repository'] branch = p['branch'] @@ -651,10 +852,15 @@ def git_clone_and_install(projects_yaml, core_project): parent_dir, http_proxy, update_requirements=False) requirements_dir = repo_dir + constraints = os.path.join(repo_dir, "upper-constraints.txt") + # upper-constraints didn't exist until after icehouse + if not os.path.isfile(constraints): + constraints = None else: repo_dir = _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, - update_requirements=True) + update_requirements=True, + constraints=constraints) os.environ = old_environ @@ -686,7 +892,7 @@ def _git_ensure_key_exists(key, keys): def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, - update_requirements): + update_requirements, constraints=None): """ Clone and install a single git repository. """ @@ -696,7 +902,8 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, os.mkdir(parent_dir) juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) - repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth) + repo_dir = install_remote( + repo, dest=parent_dir, branch=branch, depth=depth) venv = os.path.join(parent_dir, 'venv') @@ -708,9 +915,10 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, juju_log('Installing git repo from dir: {}'.format(repo_dir)) if http_proxy: - pip_install(repo_dir, proxy=http_proxy, venv=venv) + pip_install(repo_dir, proxy=http_proxy, venv=venv, + constraints=constraints) else: - pip_install(repo_dir, venv=venv) + pip_install(repo_dir, venv=venv, constraints=constraints) return repo_dir @@ -779,6 +987,85 @@ def git_yaml_value(projects_yaml, key): return None +def git_generate_systemd_init_files(templates_dir): + """ + Generate systemd init files. + + Generates and installs systemd init units and script files based on the + *.init.in files contained in the templates_dir directory. + + This code is based on the openstack-pkg-tools package and its init + script generation, which is used by the OpenStack packages. + """ + for f in os.listdir(templates_dir): + # Create the init script and systemd unit file from the template + if f.endswith(".init.in"): + init_in_file = f + init_file = f[:-8] + service_file = "{}.service".format(init_file) + + init_in_source = os.path.join(templates_dir, init_in_file) + init_source = os.path.join(templates_dir, init_file) + service_source = os.path.join(templates_dir, service_file) + + init_dest = os.path.join('/etc/init.d', init_file) + service_dest = os.path.join('/lib/systemd/system', service_file) + + shutil.copyfile(init_in_source, init_source) + with open(init_source, 'a') as outfile: + template = '/usr/share/openstack-pkg-tools/init-script-template' + with open(template) as infile: + outfile.write('\n\n{}'.format(infile.read())) + + cmd = ['pkgos-gen-systemd-unit', init_in_source] + subprocess.check_call(cmd) + + if os.path.exists(init_dest): + os.remove(init_dest) + if os.path.exists(service_dest): + os.remove(service_dest) + shutil.copyfile(init_source, init_dest) + shutil.copyfile(service_source, service_dest) + os.chmod(init_dest, 0o755) + + for f in os.listdir(templates_dir): + # If there's a service.in file, use it instead of the generated one + if f.endswith(".service.in"): + service_in_file = f + service_file = f[:-3] + + service_in_source = os.path.join(templates_dir, service_in_file) + service_source = os.path.join(templates_dir, service_file) + service_dest = os.path.join('/lib/systemd/system', service_file) + + shutil.copyfile(service_in_source, service_source) + + if os.path.exists(service_dest): + os.remove(service_dest) + shutil.copyfile(service_source, service_dest) + + for f in os.listdir(templates_dir): + # Generate the systemd unit if there's no existing .service.in + if f.endswith(".init.in"): + init_in_file = f + init_file = f[:-8] + service_in_file = "{}.service.in".format(init_file) + service_file = "{}.service".format(init_file) + + init_in_source = os.path.join(templates_dir, init_in_file) + service_in_source = os.path.join(templates_dir, service_in_file) + service_source = os.path.join(templates_dir, service_file) + service_dest = os.path.join('/lib/systemd/system', service_file) + + if not os.path.exists(service_in_source): + cmd = ['pkgos-gen-systemd-unit', init_in_source] + subprocess.check_call(cmd) + + if os.path.exists(service_dest): + os.remove(service_dest) + shutil.copyfile(service_source, service_dest) + + def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts @@ -795,56 +1082,155 @@ def wrapped_f(*args, **kwargs): return wrap -def set_os_workload_status(configs, required_interfaces, charm_func=None): +def set_os_workload_status(configs, required_interfaces, charm_func=None, + services=None, ports=None): + """Set the state of the workload status for the charm. + + This calls _determine_os_workload_status() to get the new state, message + and sets the status using status_set() + + @param configs: a templating.OSConfigRenderer() object + @param required_interfaces: {generic: [specific, specific2, ...]} + @param charm_func: a callable function that returns state, message. The + signature is charm_func(configs) -> (state, message) + @param services: list of strings OR dictionary specifying services/ports + @param ports: OPTIONAL list of port numbers. + @returns state, message: the new workload status, user message """ - Set workload status based on complete contexts. - status-set missing or incomplete contexts - and juju-log details of missing required data. - charm_func is a charm specific function to run checking - for charm specific requirements such as a VIP setting. + state, message = _determine_os_workload_status( + configs, required_interfaces, charm_func, services, ports) + status_set(state, message) + + +def _determine_os_workload_status( + configs, required_interfaces, charm_func=None, + services=None, ports=None): + """Determine the state of the workload status for the charm. + + This function returns the new workload status for the charm based + on the state of the interfaces, the paused state and whether the + services are actually running and any specified ports are open. + + This checks: + + 1. if the unit should be paused, that it is actually paused. If so the + state is 'maintenance' + message, else 'broken'. + 2. that the interfaces/relations are complete. If they are not then + it sets the state to either 'broken' or 'waiting' and an appropriate + message. + 3. If all the relation data is set, then it checks that the actual + services really are running. If not it sets the state to 'broken'. + + If everything is okay then the state returns 'active'. + + @param configs: a templating.OSConfigRenderer() object + @param required_interfaces: {generic: [specific, specific2, ...]} + @param charm_func: a callable function that returns state, message. The + signature is charm_func(configs) -> (state, message) + @param services: list of strings OR dictionary specifying services/ports + @param ports: OPTIONAL list of port numbers. + @returns state, message: the new workload status, user message + """ + state, message = _ows_check_if_paused(services, ports) + + if state is None: + state, message = _ows_check_generic_interfaces( + configs, required_interfaces) + + if state != 'maintenance' and charm_func: + # _ows_check_charm_func() may modify the state, message + state, message = _ows_check_charm_func( + state, message, lambda: charm_func(configs)) + + if state is None: + state, message = _ows_check_services_running(services, ports) + + if state is None: + state = 'active' + message = "Unit is ready" + juju_log(message, 'INFO') + + return state, message + + +def _ows_check_if_paused(services=None, ports=None): + """Check if the unit is supposed to be paused, and if so check that the + services/ports (if passed) are actually stopped/not being listened to. + + if the unit isn't supposed to be paused, just return None, None + + @param services: OPTIONAL services spec or list of service names. + @param ports: OPTIONAL list of port numbers. + @returns state, message or None, None """ - incomplete_rel_data = incomplete_relation_data(configs, required_interfaces) - state = 'active' - missing_relations = [] - incomplete_relations = [] + if is_unit_paused_set(): + state, message = check_actually_paused(services=services, + ports=ports) + if state is None: + # we're paused okay, so set maintenance and return + state = "maintenance" + message = "Paused. Use 'resume' action to resume normal service." + return state, message + return None, None + + +def _ows_check_generic_interfaces(configs, required_interfaces): + """Check the complete contexts to determine the workload status. + + - Checks for missing or incomplete contexts + - juju log details of missing required data. + - determines the correct workload status + - creates an appropriate message for status_set(...) + + if there are no problems then the function returns None, None + + @param configs: a templating.OSConfigRenderer() object + @params required_interfaces: {generic_interface: [specific_interface], } + @returns state, message or None, None + """ + incomplete_rel_data = incomplete_relation_data(configs, + required_interfaces) + state = None message = None - charm_state = None - charm_message = None + missing_relations = set() + incomplete_relations = set() - for generic_interface in incomplete_rel_data.keys(): + for generic_interface, relations_states in incomplete_rel_data.items(): related_interface = None missing_data = {} # Related or not? - for interface in incomplete_rel_data[generic_interface]: - if incomplete_rel_data[generic_interface][interface].get('related'): + for interface, relation_state in relations_states.items(): + if relation_state.get('related'): related_interface = interface - missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data') - # No relation ID for the generic_interface + missing_data = relation_state.get('missing_data') + break + # No relation ID for the generic_interface? if not related_interface: juju_log("{} relation is missing and must be related for " "functionality. ".format(generic_interface), 'WARN') state = 'blocked' - if generic_interface not in missing_relations: - missing_relations.append(generic_interface) + missing_relations.add(generic_interface) else: - # Relation ID exists but no related unit + # Relation ID eists but no related unit if not missing_data: - # Edge case relation ID exists but departing - if ('departed' in hook_name() or 'broken' in hook_name()) \ - and related_interface in hook_name(): + # Edge case - relation ID exists but departings + _hook_name = hook_name() + if (('departed' in _hook_name or 'broken' in _hook_name) and + related_interface in _hook_name): state = 'blocked' - if generic_interface not in missing_relations: - missing_relations.append(generic_interface) + missing_relations.add(generic_interface) juju_log("{} relation's interface, {}, " "relationship is departed or broken " "and is required for functionality." - "".format(generic_interface, related_interface), "WARN") + "".format(generic_interface, related_interface), + "WARN") # Normal case relation ID exists but no related unit # (joining) else: - juju_log("{} relations's interface, {}, is related but has " - "no units in the relation." - "".format(generic_interface, related_interface), "INFO") + juju_log("{} relations's interface, {}, is related but has" + " no units in the relation." + "".format(generic_interface, related_interface), + "INFO") # Related unit exists and data missing on the relation else: juju_log("{} relation's interface, {}, is related awaiting " @@ -853,9 +1239,8 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None): ", ".join(missing_data)), "INFO") if state != 'blocked': state = 'waiting' - if generic_interface not in incomplete_relations \ - and generic_interface not in missing_relations: - incomplete_relations.append(generic_interface) + if generic_interface not in missing_relations: + incomplete_relations.add(generic_interface) if missing_relations: message = "Missing relations: {}".format(", ".join(missing_relations)) @@ -868,9 +1253,22 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None): "".format(", ".join(incomplete_relations)) state = 'waiting' - # Run charm specific checks - if charm_func: - charm_state, charm_message = charm_func(configs) + return state, message + + +def _ows_check_charm_func(state, message, charm_func_with_configs): + """Run a custom check function for the charm to see if it wants to + change the state. This is only run if not in 'maintenance' and + tests to see if the new state is more important that the previous + one determined by the interfaces/relations check. + + @param state: the previously determined state so far. + @param message: the user orientated message so far. + @param charm_func: a callable function that returns state, message + @returns state, message strings. + """ + if charm_func_with_configs: + charm_state, charm_message = charm_func_with_configs() if charm_state != 'active' and charm_state != 'unknown': state = workload_state_compare(state, charm_state) if message: @@ -879,13 +1277,151 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None): message = "{}, {}".format(message, charm_message) else: message = charm_message + return state, message - # Set to active if all requirements have been met - if state == 'active': - message = "Unit is ready" - juju_log(message, "INFO") - status_set(state, message) +def _ows_check_services_running(services, ports): + """Check that the services that should be running are actually running + and that any ports specified are being listened to. + + @param services: list of strings OR dictionary specifying services/ports + @param ports: list of ports + @returns state, message: strings or None, None + """ + messages = [] + state = None + if services is not None: + services = _extract_services_list_helper(services) + services_running, running = _check_running_services(services) + if not all(running): + messages.append( + "Services not running that should be: {}" + .format(", ".join(_filter_tuples(services_running, False)))) + state = 'blocked' + # also verify that the ports that should be open are open + # NB, that ServiceManager objects only OPTIONALLY have ports + map_not_open, ports_open = ( + _check_listening_on_services_ports(services)) + if not all(ports_open): + # find which service has missing ports. They are in service + # order which makes it a bit easier. + message_parts = {service: ", ".join([str(v) for v in open_ports]) + for service, open_ports in map_not_open.items()} + message = ", ".join( + ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) + messages.append( + "Services with ports not open that should be: {}" + .format(message)) + state = 'blocked' + + if ports is not None: + # and we can also check ports which we don't know the service for + ports_open, ports_open_bools = _check_listening_on_ports_list(ports) + if not all(ports_open_bools): + messages.append( + "Ports which should be open, but are not: {}" + .format(", ".join([str(p) for p, v in ports_open + if not v]))) + state = 'blocked' + + if state is not None: + message = "; ".join(messages) + return state, message + + return None, None + + +def _extract_services_list_helper(services): + """Extract a OrderedDict of {service: [ports]} of the supplied services + for use by the other functions. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param services: see above + @returns OrderedDict(service: [ports], ...) + """ + if services is None: + return {} + if isinstance(services, dict): + services = services.values() + # either extract the list of services from the dictionary, or if + # it is a simple string, use that. i.e. works with mixed lists. + _s = OrderedDict() + for s in services: + if isinstance(s, dict) and 'service' in s: + _s[s['service']] = s.get('ports', []) + if isinstance(s, str): + _s[s] = [] + return _s + + +def _check_running_services(services): + """Check that the services dict provided is actually running and provide + a list of (service, boolean) tuples for each service. + + Returns both a zipped list of (service, boolean) and a list of booleans + in the same order as the services. + + @param services: OrderedDict of strings: [ports], one for each service to + check. + @returns [(service, boolean), ...], : results for checks + [boolean] : just the result of the service checks + """ + services_running = [service_running(s) for s in services] + return list(zip(services, services_running)), services_running + + +def _check_listening_on_services_ports(services, test=False): + """Check that the unit is actually listening (has the port open) on the + ports that the service specifies are open. If test is True then the + function returns the services with ports that are open rather than + closed. + + Returns an OrderedDict of service: ports and a list of booleans + + @param services: OrderedDict(service: [port, ...], ...) + @param test: default=False, if False, test for closed, otherwise open. + @returns OrderedDict(service: [port-not-open, ...]...), [boolean] + """ + test = not(not(test)) # ensure test is True or False + all_ports = list(itertools.chain(*services.values())) + ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] + map_ports = OrderedDict() + matched_ports = [p for p, opened in zip(all_ports, ports_states) + if opened == test] # essentially opened xor test + for service, ports in services.items(): + set_ports = set(ports).intersection(matched_ports) + if set_ports: + map_ports[service] = set_ports + return map_ports, ports_states + + +def _check_listening_on_ports_list(ports): + """Check that the ports list given are being listened to + + Returns a list of ports being listened to and a list of the + booleans. + + @param ports: LIST or port numbers. + @returns [(port_num, boolean), ...], [boolean] + """ + ports_open = [port_has_listener('0.0.0.0', p) for p in ports] + return zip(ports, ports_open), ports_open + + +def _filter_tuples(services_states, state): + """Return a simple list from a list of tuples according to the condition + + @param services_states: LIST of (string, boolean): service and running + state. + @param state: Boolean to match the tuple against. + @returns [LIST of strings] that matched the tuple RHS. + """ + return [s for s, b in services_states if b == state] def workload_state_compare(current_workload_state, workload_state): @@ -910,8 +1446,7 @@ def workload_state_compare(current_workload_state, workload_state): def incomplete_relation_data(configs, required_interfaces): - """ - Check complete contexts against required_interfaces + """Check complete contexts against required_interfaces Return dictionary of incomplete relation data. configs is an OSConfigRenderer object with configs registered @@ -936,19 +1471,13 @@ def incomplete_relation_data(configs, required_interfaces): 'shared-db': {'related': True}}} """ complete_ctxts = configs.complete_contexts() - incomplete_relations = [] - for svc_type in required_interfaces.keys(): - # Avoid duplicates - found_ctxt = False - for interface in required_interfaces[svc_type]: - if interface in complete_ctxts: - found_ctxt = True - if not found_ctxt: - incomplete_relations.append(svc_type) - incomplete_context_data = {} - for i in incomplete_relations: - incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i]) - return incomplete_context_data + incomplete_relations = [ + svc_type + for svc_type, interfaces in required_interfaces.items() + if not set(interfaces).intersection(complete_ctxts)] + return { + i: configs.get_incomplete_context_data(required_interfaces[i]) + for i in incomplete_relations} def do_action_openstack_upgrade(package, upgrade_callback, configs): @@ -1009,3 +1538,326 @@ def remote_restart(rel_name, remote_service=None): relation_set(relation_id=rid, relation_settings=trigger, ) + + +def check_actually_paused(services=None, ports=None): + """Check that services listed in the services object and and ports + are actually closed (not listened to), to verify that the unit is + properly paused. + + @param services: See _extract_services_list_helper + @returns status, : string for status (None if okay) + message : string for problem for status_set + """ + state = None + message = None + messages = [] + if services is not None: + services = _extract_services_list_helper(services) + services_running, services_states = _check_running_services(services) + if any(services_states): + # there shouldn't be any running so this is a problem + messages.append("these services running: {}" + .format(", ".join( + _filter_tuples(services_running, True)))) + state = "blocked" + ports_open, ports_open_bools = ( + _check_listening_on_services_ports(services, True)) + if any(ports_open_bools): + message_parts = {service: ", ".join([str(v) for v in open_ports]) + for service, open_ports in ports_open.items()} + message = ", ".join( + ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) + messages.append( + "these service:ports are open: {}".format(message)) + state = 'blocked' + if ports is not None: + ports_open, bools = _check_listening_on_ports_list(ports) + if any(bools): + messages.append( + "these ports which should be closed, but are open: {}" + .format(", ".join([str(p) for p, v in ports_open if v]))) + state = 'blocked' + if messages: + message = ("Services should be paused but {}" + .format(", ".join(messages))) + return state, message + + +def set_unit_paused(): + """Set the unit to a paused state in the local kv() store. + This does NOT actually pause the unit + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', True) + + +def clear_unit_paused(): + """Clear the unit from a paused state in the local kv() store + This does NOT actually restart any services - it only clears the + local state. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', False) + + +def is_unit_paused_set(): + """Return the state of the kv().get('unit-paused'). + This does NOT verify that the unit really is paused. + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-paused'))) + except: + return False + + +def pause_unit(assess_status_func, services=None, ports=None, + charm_func=None): + """Pause a unit by stopping the services and setting 'unit-paused' + in the local kv() store. + + Also checks that the services have stopped and ports are no longer + being listened to. + + An optional charm_func() can be called that can either raise an + Exception or return non None, None to indicate that the unit + didn't pause cleanly. + + The signature for charm_func is: + charm_func() -> message: string + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param assess_status_func: (f() -> message: string | None) or None + @param services: OPTIONAL see above + @param ports: OPTIONAL list of port + @param charm_func: function to run for custom charm pausing. + @returns None + @raises Exception(message) on an error for action_fail(). + """ + services = _extract_services_list_helper(services) + messages = [] + if services: + for service in services.keys(): + stopped = service_pause(service) + if not stopped: + messages.append("{} didn't stop cleanly.".format(service)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + message.append(str(e)) + set_unit_paused() + if assess_status_func: + message = assess_status_func() + if message: + messages.append(message) + if messages: + raise Exception("Couldn't pause: {}".format("; ".join(messages))) + + +def resume_unit(assess_status_func, services=None, ports=None, + charm_func=None): + """Resume a unit by starting the services and clearning 'unit-paused' + in the local kv() store. + + Also checks that the services have started and ports are being listened to. + + An optional charm_func() can be called that can either raise an + Exception or return non None to indicate that the unit + didn't resume cleanly. + + The signature for charm_func is: + charm_func() -> message: string + + charm_func() is executed after any services are started, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param assess_status_func: (f() -> message: string | None) or None + @param services: OPTIONAL see above + @param ports: OPTIONAL list of port + @param charm_func: function to run for custom charm resuming. + @returns None + @raises Exception(message) on an error for action_fail(). + """ + services = _extract_services_list_helper(services) + messages = [] + if services: + for service in services.keys(): + started = service_resume(service) + if not started: + messages.append("{} didn't start cleanly.".format(service)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + message.append(str(e)) + clear_unit_paused() + if assess_status_func: + message = assess_status_func() + if message: + messages.append(message) + if messages: + raise Exception("Couldn't resume: {}".format("; ".join(messages))) + + +def make_assess_status_func(*args, **kwargs): + """Creates an assess_status_func() suitable for handing to pause_unit() + and resume_unit(). + + This uses the _determine_os_workload_status(...) function to determine + what the workload_status should be for the unit. If the unit is + not in maintenance or active states, then the message is returned to + the caller. This is so an action that doesn't result in either a + complete pause or complete resume can signal failure with an action_fail() + """ + def _assess_status_func(): + state, message = _determine_os_workload_status(*args, **kwargs) + status_set(state, message) + if state not in ['maintenance', 'active']: + return message + return None + + return _assess_status_func + + +def pausable_restart_on_change(restart_map, stopstart=False, + restart_functions=None): + """A restart_on_change decorator that checks to see if the unit is + paused. If it is paused then the decorated function doesn't fire. + + This is provided as a helper, as the @restart_on_change(...) decorator + is in core.host, yet the openstack specific helpers are in this file + (contrib.openstack.utils). Thus, this needs to be an optional feature + for openstack charms (or charms that wish to use the openstack + pause/resume type features). + + It is used as follows: + + from contrib.openstack.utils import ( + pausable_restart_on_change as restart_on_change) + + @restart_on_change(restart_map, stopstart=) + def some_hook(...): + pass + + see core.utils.restart_on_change() for more details. + + @param f: the function to decorate + @param restart_map: the restart map {conf_file: [services]} + @param stopstart: DEFAULT false; whether to stop, start or just restart + @returns decorator to use a restart_on_change with pausability + """ + def wrap(f): + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + if is_unit_paused_set(): + return f(*args, **kwargs) + # otherwise, normal restart_on_change functionality + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) + return wrapped_f + return wrap + + +def config_flags_parser(config_flags): + """Parses config flags string into dict. + + This parsing method supports a few different formats for the config + flag values to be parsed: + + 1. A string in the simple format of key=value pairs, with the possibility + of specifying multiple key value pairs within the same string. For + example, a string in the format of 'key1=value1, key2=value2' will + return a dict of: + + {'key1': 'value1', + 'key2': 'value2'}. + + 2. A string in the above format, but supporting a comma-delimited list + of values for the same key. For example, a string in the format of + 'key1=value1, key2=value3,value4,value5' will return a dict of: + + {'key1', 'value1', + 'key2', 'value2,value3,value4'} + + 3. A string containing a colon character (:) prior to an equal + character (=) will be treated as yaml and parsed as such. This can be + used to specify more complex key value pairs. For example, + a string in the format of 'key1: subkey1=value1, subkey2=value2' will + return a dict of: + + {'key1', 'subkey1=value1, subkey2=value2'} + + The provided config_flags string may be a list of comma-separated values + which themselves may be comma-separated list of values. + """ + # If we find a colon before an equals sign then treat it as yaml. + # Note: limit it to finding the colon first since this indicates assignment + # for inline yaml. + colon = config_flags.find(':') + equals = config_flags.find('=') + if colon > 0: + if colon < equals or equals < 0: + return yaml.safe_load(config_flags) + + if config_flags.find('==') >= 0: + juju_log("config_flags is not in expected format (key=value)", + level=ERROR) + raise OSContextError + + # strip the following from each value. + post_strippers = ' ,' + # we strip any leading/trailing '=' or ' ' from the string then + # split on '='. + split = config_flags.strip(' =').split('=') + limit = len(split) + flags = {} + for i in range(0, limit - 1): + current = split[i] + next = split[i + 1] + vindex = next.rfind(',') + if (i == limit - 2) or (vindex < 0): + value = next + else: + value = next[:vindex] + + if i == 0: + key = current + else: + # if this not the first entry, expect an embedded key. + index = current.rfind(',') + if index < 0: + juju_log("Invalid config value(s) at index %s" % (i), + level=ERROR) + raise OSContextError + key = current[index + 1:] + + # Add to collection. + flags[key.strip(post_strippers)] = value.rstrip(post_strippers) + + return flags diff --git a/ceph-proxy/hooks/charmhelpers/contrib/python/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/python/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/python/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/python/debug.py b/ceph-proxy/hooks/charmhelpers/contrib/python/debug.py new file mode 100644 index 00000000..7d04dfa5 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/python/debug.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import atexit +import sys + +from charmhelpers.contrib.python.rpdb import Rpdb +from charmhelpers.core.hookenv import ( + open_port, + close_port, + ERROR, + log +) + +__author__ = "Jorge Niedbalski " + +DEFAULT_ADDR = "0.0.0.0" +DEFAULT_PORT = 4444 + + +def _error(message): + log(message, level=ERROR) + + +def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT): + """ + Set a trace point using the remote debugger + """ + atexit.register(close_port, port) + try: + log("Starting a remote python debugger session on %s:%s" % (addr, + port)) + open_port(port) + debugger = Rpdb(addr=addr, port=port) + debugger.set_trace(sys._getframe().f_back) + except: + _error("Cannot start a remote debug session on %s:%s" % (addr, + port)) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/python/packages.py b/ceph-proxy/hooks/charmhelpers/contrib/python/packages.py new file mode 100644 index 00000000..e29bd1bb --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/python/packages.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess +import sys + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import charm_dir, log + +__author__ = "Jorge Niedbalski " + + +def pip_execute(*args, **kwargs): + """Overriden pip_execute() to stop sys.path being changed. + + The act of importing main from the pip module seems to cause add wheels + from the /usr/share/python-wheels which are installed by various tools. + This function ensures that sys.path remains the same after the call is + executed. + """ + try: + _path = sys.path + try: + from pip import main as _pip_execute + except ImportError: + apt_update() + apt_install('python-pip') + from pip import main as _pip_execute + _pip_execute(*args, **kwargs) + finally: + sys.path = _path + + +def parse_options(given, available): + """Given a set of options, check if available""" + for key, value in sorted(given.items()): + if not value: + continue + if key in available: + yield "--{0}={1}".format(key, value) + + +def pip_install_requirements(requirements, constraints=None, **options): + """Install a requirements file. + + :param constraints: Path to pip constraints file. + http://pip.readthedocs.org/en/stable/user_guide/#constraints-files + """ + command = ["install"] + + available_options = ('proxy', 'src', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + command.append("-r {0}".format(requirements)) + if constraints: + command.append("-c {0}".format(constraints)) + log("Installing from file: {} with constraints {} " + "and options: {}".format(requirements, constraints, command)) + else: + log("Installing from file: {} with options: {}".format(requirements, + command)) + pip_execute(command) + + +def pip_install(package, fatal=False, upgrade=False, venv=None, + constraints=None, **options): + """Install a python package""" + if venv: + venv_python = os.path.join(venv, 'bin/pip') + command = [venv_python, "install"] + else: + command = ["install"] + + available_options = ('proxy', 'src', 'log', 'index-url', ) + for option in parse_options(options, available_options): + command.append(option) + + if upgrade: + command.append('--upgrade') + + if constraints: + command.extend(['-c', constraints]) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Installing {} package with options: {}".format(package, + command)) + if venv: + subprocess.check_call(command) + else: + pip_execute(command) + + +def pip_uninstall(package, **options): + """Uninstall a python package""" + command = ["uninstall", "-q", "-y"] + + available_options = ('proxy', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Uninstalling {} package with options: {}".format(package, + command)) + pip_execute(command) + + +def pip_list(): + """Returns the list of current python installed packages + """ + return pip_execute(["list"]) + + +def pip_create_virtualenv(path=None): + """Create an isolated Python environment.""" + apt_install('python-virtualenv') + + if path: + venv_path = path + else: + venv_path = os.path.join(charm_dir(), 'venv') + + if not os.path.exists(venv_path): + subprocess.check_call(['virtualenv', venv_path]) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/python/rpdb.py b/ceph-proxy/hooks/charmhelpers/contrib/python/rpdb.py new file mode 100644 index 00000000..9b31610c --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/python/rpdb.py @@ -0,0 +1,56 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Remote Python Debugger (pdb wrapper).""" + +import pdb +import socket +import sys + +__author__ = "Bertrand Janin " +__version__ = "0.1.3" + + +class Rpdb(pdb.Pdb): + + def __init__(self, addr="127.0.0.1", port=4444): + """Initialize the socket and initialize pdb.""" + + # Backup stdin and stdout before replacing them by the socket handle + self.old_stdout = sys.stdout + self.old_stdin = sys.stdin + + # Open a 'reusable' socket to let the webapp reload on the same port + self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + self.skt.bind((addr, port)) + self.skt.listen(1) + (clientsocket, address) = self.skt.accept() + handle = clientsocket.makefile('rw') + pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle) + sys.stdout = sys.stdin = handle + + def shutdown(self): + """Revert stdin and stdout, close the socket.""" + sys.stdout = self.old_stdout + sys.stdin = self.old_stdin + self.skt.close() + self.set_continue() + + def do_continue(self, arg): + """Stop all operation on ``continue``.""" + self.shutdown() + return 1 + + do_EOF = do_quit = do_exit = do_c = do_cont = do_continue diff --git a/ceph-proxy/hooks/charmhelpers/contrib/python/version.py b/ceph-proxy/hooks/charmhelpers/contrib/python/version.py new file mode 100644 index 00000000..3eb42103 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/python/version.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +__author__ = "Jorge Niedbalski " + + +def current_version(): + """Current system python version""" + return sys.version_info + + +def current_version_string(): + """Current system python version as string major.minor.micro""" + return "{0}.{1}.{2}".format(sys.version_info.major, + sys.version_info.minor, + sys.version_info.micro) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index d008081f..8a9b9486 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Copyright 2012 Canonical Ltd. @@ -40,6 +38,7 @@ CalledProcessError, ) from charmhelpers.core.hookenv import ( + config, local_unit, relation_get, relation_ids, @@ -64,6 +63,7 @@ ) from charmhelpers.core.kernel import modprobe +from charmhelpers.contrib.openstack.utils import config_flags_parser KEYRING = '/etc/ceph/ceph.client.{}.keyring' KEYFILE = '/etc/ceph/ceph.client.{}.key' @@ -1204,3 +1204,42 @@ def send_request_if_needed(request, relation='ceph'): for rid in relation_ids(relation): log('Sending request {}'.format(request.request_id), level=DEBUG) relation_set(relation_id=rid, broker_req=request.request) + + +class CephConfContext(object): + """Ceph config (ceph.conf) context. + + Supports user-provided Ceph configuration settings. Use can provide a + dictionary as the value for the config-flags charm option containing + Ceph configuration settings keyede by their section in ceph.conf. + """ + def __init__(self, permitted_sections=None): + self.permitted_sections = permitted_sections or [] + + def __call__(self): + conf = config('config-flags') + if not conf: + return {} + + conf = config_flags_parser(conf) + if type(conf) != dict: + log("Provided config-flags is not a dictionary - ignoring", + level=WARNING) + return {} + + permitted = self.permitted_sections + if permitted: + diff = set(conf.keys()).difference(set(permitted)) + if diff: + log("Config-flags contains invalid keys '%s' - they will be " + "ignored" % (', '.join(diff)), level=WARNING) + + ceph_conf = {} + for key in conf: + if permitted and key not in permitted: + log("Ignoring key '%s'" % key, level=WARNING) + continue + + ceph_conf[key] = conf[key] + + return ceph_conf diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/loopback.py new file mode 100644 index 00000000..1d6ae6f0 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -0,0 +1,86 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from subprocess import ( + check_call, + check_output, +) + +import six + + +################################################## +# loopback device helpers. +################################################## +def loopback_devices(): + ''' + Parse through 'losetup -a' output to determine currently mapped + loopback devices. Output is expected to look like: + + /dev/loop0: [0807]:961814 (/tmp/my.img) + + :returns: dict: a dict mapping {loopback_dev: backing_file} + ''' + loopbacks = {} + cmd = ['losetup', '-a'] + devs = [d.strip().split(' ') for d in + check_output(cmd).splitlines() if d != ''] + for dev, _, f in devs: + loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] + return loopbacks + + +def create_loopback(file_path): + ''' + Create a loopback device for a given backing file. + + :returns: str: Full path to new loopback device (eg, /dev/loop0) + ''' + file_path = os.path.abspath(file_path) + check_call(['losetup', '--find', file_path]) + for d, f in six.iteritems(loopback_devices()): + if f == file_path: + return d + + +def ensure_loopback_device(path, size): + ''' + Ensure a loopback device exists for a given backing file path and size. + If it a loopback device is not mapped to file, a new one will be created. + + TODO: Confirm size of found loopback device. + + :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) + ''' + for d, f in six.iteritems(loopback_devices()): + if f == path: + return d + + if not os.path.exists(path): + cmd = ['truncate', '--size', size, path] + check_call(cmd) + + return create_loopback(path) + + +def is_mapped_loopback_device(device): + """ + Checks if a given device name is an existing/mapped loopback device. + :param device: str: Full path to the device (eg, /dev/loop1). + :returns: str: Path to the backing file if is a loopback device + empty string otherwise + """ + return loopback_devices().get(device, "") diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py new file mode 100644 index 00000000..4719f53c --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -0,0 +1,103 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from subprocess import ( + CalledProcessError, + check_call, + check_output, + Popen, + PIPE, +) + + +################################################## +# LVM helpers. +################################################## +def deactivate_lvm_volume_group(block_device): + ''' + Deactivate any volume gruop associated with an LVM physical volume. + + :param block_device: str: Full path to LVM physical volume + ''' + vg = list_lvm_volume_group(block_device) + if vg: + cmd = ['vgchange', '-an', vg] + check_call(cmd) + + +def is_lvm_physical_volume(block_device): + ''' + Determine whether a block device is initialized as an LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: boolean: True if block device is a PV, False if not. + ''' + try: + check_output(['pvdisplay', block_device]) + return True + except CalledProcessError: + return False + + +def remove_lvm_physical_volume(block_device): + ''' + Remove LVM PV signatures from a given block device. + + :param block_device: str: Full path of block device to scrub. + ''' + p = Popen(['pvremove', '-ff', block_device], + stdin=PIPE) + p.communicate(input='y\n') + + +def list_lvm_volume_group(block_device): + ''' + List LVM volume group associated with a given block device. + + Assumes block device is a valid LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: str: Name of volume group associated with block device or None + ''' + vg = None + pvd = check_output(['pvdisplay', block_device]).splitlines() + for l in pvd: + l = l.decode('UTF-8') + if l.strip().startswith('VG Name'): + vg = ' '.join(l.strip().split()[2:]) + return vg + + +def create_lvm_physical_volume(block_device): + ''' + Initialize a block device as an LVM physical volume. + + :param block_device: str: Full path of block device to initialize. + + ''' + check_call(['pvcreate', block_device]) + + +def create_lvm_volume_group(volume_group, block_device): + ''' + Create an LVM volume group backed by a given block device. + + Assumes block device has already been initialized as an LVM PV. + + :param volume_group: str: Name of volume group to create. + :block_device: str: Full path of PV-initialized block device. + ''' + check_call(['vgcreate', volume_group, block_device]) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index 4e35c297..3dc0df68 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import re diff --git a/ceph-proxy/hooks/charmhelpers/core/__init__.py b/ceph-proxy/hooks/charmhelpers/core/__init__.py index d1400a02..d7567b86 100644 --- a/ceph-proxy/hooks/charmhelpers/core/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/core/__init__.py @@ -1,15 +1,13 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/hooks/charmhelpers/core/decorators.py b/ceph-proxy/hooks/charmhelpers/core/decorators.py index bb05620b..6ad41ee4 100644 --- a/ceph-proxy/hooks/charmhelpers/core/decorators.py +++ b/ceph-proxy/hooks/charmhelpers/core/decorators.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Copyright 2014 Canonical Ltd. diff --git a/ceph-proxy/hooks/charmhelpers/core/files.py b/ceph-proxy/hooks/charmhelpers/core/files.py index 0f12d321..fdd82b75 100644 --- a/ceph-proxy/hooks/charmhelpers/core/files.py +++ b/ceph-proxy/hooks/charmhelpers/core/files.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. __author__ = 'Jorge Niedbalski ' diff --git a/ceph-proxy/hooks/charmhelpers/core/fstab.py b/ceph-proxy/hooks/charmhelpers/core/fstab.py index 3056fbac..d9fa9152 100644 --- a/ceph-proxy/hooks/charmhelpers/core/fstab.py +++ b/ceph-proxy/hooks/charmhelpers/core/fstab.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import io import os diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 01321296..48b2b9dc 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. "Interactions with the Juju environment" # Copyright 2013 Canonical Ltd. @@ -1006,4 +1004,4 @@ def network_get_primary_address(binding): :raise: NotImplementedError if run on Juju < 2.0 ''' cmd = ['network-get', '--primary-address', binding] - return subprocess.check_output(cmd).strip() + return subprocess.check_output(cmd).decode('UTF-8').strip() diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 64b2df55..53068599 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Tools for working with the host system""" # Copyright 2012 Canonical Ltd. @@ -176,7 +174,7 @@ def init_is_systemd(): def adduser(username, password=None, shell='/bin/bash', system_user=False, - primary_group=None, secondary_groups=None): + primary_group=None, secondary_groups=None, uid=None, home_dir=None): """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -187,15 +185,24 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False, :param bool system_user: Whether to create a login or system user :param str primary_group: Primary group for user; defaults to username :param list secondary_groups: Optional list of additional groups + :param int uid: UID for user being created + :param str home_dir: Home directory for user :returns: The password database entry struct, as returned by `pwd.getpwnam` """ try: user_info = pwd.getpwnam(username) log('user {0} already exists!'.format(username)) + if uid: + user_info = pwd.getpwuid(int(uid)) + log('user with uid {0} already exists!'.format(uid)) except KeyError: log('creating user {0}'.format(username)) cmd = ['useradd'] + if uid: + cmd.extend(['--uid', str(uid)]) + if home_dir: + cmd.extend(['--home', str(home_dir)]) if system_user or password is None: cmd.append('--system') else: @@ -230,14 +237,58 @@ def user_exists(username): return user_exists -def add_group(group_name, system_group=False): - """Add a group to the system""" +def uid_exists(uid): + """Check if a uid exists""" + try: + pwd.getpwuid(uid) + uid_exists = True + except KeyError: + uid_exists = False + return uid_exists + + +def group_exists(groupname): + """Check if a group exists""" + try: + grp.getgrnam(groupname) + group_exists = True + except KeyError: + group_exists = False + return group_exists + + +def gid_exists(gid): + """Check if a gid exists""" + try: + grp.getgrgid(gid) + gid_exists = True + except KeyError: + gid_exists = False + return gid_exists + + +def add_group(group_name, system_group=False, gid=None): + """Add a group to the system + + Will log but otherwise succeed if the group already exists. + + :param str group_name: group to create + :param bool system_group: Create system group + :param int gid: GID for user being created + + :returns: The password database entry struct, as returned by `grp.getgrnam` + """ try: group_info = grp.getgrnam(group_name) log('group {0} already exists!'.format(group_name)) + if gid: + group_info = grp.getgrgid(gid) + log('group with gid {0} already exists!'.format(gid)) except KeyError: log('creating group {0}'.format(group_name)) cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) if system_group: cmd.append('--system') else: diff --git a/ceph-proxy/hooks/charmhelpers/core/hugepage.py b/ceph-proxy/hooks/charmhelpers/core/hugepage.py index a783ad94..54b5b5e2 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hugepage.py +++ b/ceph-proxy/hooks/charmhelpers/core/hugepage.py @@ -2,19 +2,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import yaml from charmhelpers.core import fstab diff --git a/ceph-proxy/hooks/charmhelpers/core/kernel.py b/ceph-proxy/hooks/charmhelpers/core/kernel.py index 5dc64952..b166efec 100644 --- a/ceph-proxy/hooks/charmhelpers/core/kernel.py +++ b/ceph-proxy/hooks/charmhelpers/core/kernel.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. __author__ = "Jorge Niedbalski " diff --git a/ceph-proxy/hooks/charmhelpers/core/services/__init__.py b/ceph-proxy/hooks/charmhelpers/core/services/__init__.py index 0928158b..61fd074e 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from .base import * # NOQA from .helpers import * # NOQA diff --git a/ceph-proxy/hooks/charmhelpers/core/services/base.py b/ceph-proxy/hooks/charmhelpers/core/services/base.py index a42660ca..ca9dc996 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/base.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/base.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import json diff --git a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py index 24237042..3e6e30d2 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/helpers.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import yaml diff --git a/ceph-proxy/hooks/charmhelpers/core/strutils.py b/ceph-proxy/hooks/charmhelpers/core/strutils.py index 7e3f9693..dd9b9717 100644 --- a/ceph-proxy/hooks/charmhelpers/core/strutils.py +++ b/ceph-proxy/hooks/charmhelpers/core/strutils.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import six import re diff --git a/ceph-proxy/hooks/charmhelpers/core/sysctl.py b/ceph-proxy/hooks/charmhelpers/core/sysctl.py index 21cc8ab2..6e413e31 100644 --- a/ceph-proxy/hooks/charmhelpers/core/sysctl.py +++ b/ceph-proxy/hooks/charmhelpers/core/sysctl.py @@ -3,19 +3,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import yaml diff --git a/ceph-proxy/hooks/charmhelpers/core/templating.py b/ceph-proxy/hooks/charmhelpers/core/templating.py index d2d8eafe..0a7560ff 100644 --- a/ceph-proxy/hooks/charmhelpers/core/templating.py +++ b/ceph-proxy/hooks/charmhelpers/core/templating.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os diff --git a/ceph-proxy/hooks/charmhelpers/core/unitdata.py b/ceph-proxy/hooks/charmhelpers/core/unitdata.py index 338104e0..54ec969f 100644 --- a/ceph-proxy/hooks/charmhelpers/core/unitdata.py +++ b/ceph-proxy/hooks/charmhelpers/core/unitdata.py @@ -3,20 +3,17 @@ # # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # Authors: # Kapil Thangavelu diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index db0d86a2..8f39f2fe 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import importlib from tempfile import NamedTemporaryFile @@ -106,6 +104,14 @@ 'mitaka/proposed': 'trusty-proposed/mitaka', 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', + # Newton + 'newton': 'xenial-updates/newton', + 'xenial-newton': 'xenial-updates/newton', + 'xenial-newton/updates': 'xenial-updates/newton', + 'xenial-updates/newton': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', + 'xenial-newton/proposed': 'xenial-proposed/newton', + 'xenial-proposed/newton': 'xenial-proposed/newton', } # The order of this list is very important. Handlers should be listed in from @@ -390,16 +396,13 @@ def install_remote(source, *args, **kwargs): # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] - installed_to = None for handler in handlers: try: - installed_to = handler.install(source, *args, **kwargs) + return handler.install(source, *args, **kwargs) except UnhandledSource as e: log('Install source attempt unsuccessful: {}'.format(e), level='WARNING') - if not installed_to: - raise UnhandledSource("No handler found for source {}".format(source)) - return installed_to + raise UnhandledSource("No handler found for source {}".format(source)) def install_from_config(config_var_name): diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py index b8e0943d..dd24f9ec 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import hashlib diff --git a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py index cafd27f7..b3404d85 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os from subprocess import check_call @@ -42,15 +40,23 @@ def can_handle(self, source): else: return True - def branch(self, source, dest): + def branch(self, source, dest, revno=None): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) + cmd_opts = [] + if revno: + cmd_opts += ['-r', str(revno)] if os.path.exists(dest): - check_call(['bzr', 'pull', '--overwrite', '-d', dest, source]) + cmd = ['bzr', 'pull'] + cmd += cmd_opts + cmd += ['--overwrite', '-d', dest, source] else: - check_call(['bzr', 'branch', source, dest]) + cmd = ['bzr', 'branch'] + cmd += cmd_opts + cmd += [source, dest] + check_call(cmd) - def install(self, source, dest=None): + def install(self, source, dest=None, revno=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] if dest: @@ -59,10 +65,11 @@ def install(self, source, dest=None): dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) + if dest and not os.path.exists(dest): + mkdir(dest, perms=0o755) + try: - self.branch(source, dest_dir) + self.branch(source, dest_dir, revno) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir diff --git a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py index 65ed5319..f708d1ee 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py @@ -1,18 +1,16 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os from subprocess import check_call, CalledProcessError diff --git a/ceph-proxy/hooks/charmhelpers/payload/__init__.py b/ceph-proxy/hooks/charmhelpers/payload/__init__.py index e6f42497..ee55cb3d 100644 --- a/ceph-proxy/hooks/charmhelpers/payload/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/payload/__init__.py @@ -1,17 +1,15 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. "Tools for working with files injected into a charm just before deployment." diff --git a/ceph-proxy/hooks/charmhelpers/payload/execd.py b/ceph-proxy/hooks/charmhelpers/payload/execd.py index 4d4d81a6..0c42090f 100644 --- a/ceph-proxy/hooks/charmhelpers/payload/execd.py +++ b/ceph-proxy/hooks/charmhelpers/payload/execd.py @@ -2,19 +2,17 @@ # Copyright 2014-2015 Canonical Limited. # -# This file is part of charm-helpers. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. +# http://www.apache.org/licenses/LICENSE-2.0 # -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import sys diff --git a/ceph-proxy/templates/ceph.conf b/ceph-proxy/templates/ceph.conf index 74febcc6..3795912f 100644 --- a/ceph-proxy/templates/ceph.conf +++ b/ceph-proxy/templates/ceph.conf @@ -1,8 +1,8 @@ [global] -auth cluster required = cephx -auth service required = cephx -auth client required = cephx +auth cluster required = {{ auth_supported }} +auth service required = {{ auth_supported }} +auth client required = {{ auth_supported }} keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} diff --git a/ceph-proxy/tests/014-basic-precise-icehouse b/ceph-proxy/tests/014-basic-precise-icehouse new file mode 100755 index 00000000..020cd751 --- /dev/null +++ b/ceph-proxy/tests/014-basic-precise-icehouse @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on precise-icehouse.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='precise', + openstack='cloud:precise-icehouse', + source='cloud:precise-updates/icehouse') + deployment.run_tests() diff --git a/ceph-proxy/tests/015-basic-trusty-icehouse b/ceph-proxy/tests/015-basic-trusty-icehouse new file mode 100755 index 00000000..f67fea91 --- /dev/null +++ b/ceph-proxy/tests/015-basic-trusty-icehouse @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on trusty-icehouse.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='trusty') + deployment.run_tests() diff --git a/ceph-proxy/tests/016-basic-trusty-juno b/ceph-proxy/tests/016-basic-trusty-juno new file mode 100755 index 00000000..28c7684e --- /dev/null +++ b/ceph-proxy/tests/016-basic-trusty-juno @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on trusty-juno.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='trusty', + openstack='cloud:trusty-juno', + source='cloud:trusty-updates/juno') + deployment.run_tests() diff --git a/ceph-proxy/tests/017-basic-trusty-kilo b/ceph-proxy/tests/017-basic-trusty-kilo new file mode 100755 index 00000000..0a787b22 --- /dev/null +++ b/ceph-proxy/tests/017-basic-trusty-kilo @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on trusty-kilo.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='trusty', + openstack='cloud:trusty-kilo', + source='cloud:trusty-updates/kilo') + deployment.run_tests() diff --git a/ceph-proxy/tests/018-basic-trusty-liberty b/ceph-proxy/tests/018-basic-trusty-liberty new file mode 100755 index 00000000..f339371b --- /dev/null +++ b/ceph-proxy/tests/018-basic-trusty-liberty @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on trusty-liberty.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='trusty', + openstack='cloud:trusty-liberty', + source='cloud:trusty-updates/liberty') + deployment.run_tests() diff --git a/ceph-proxy/tests/019-basic-trusty-mitaka b/ceph-proxy/tests/019-basic-trusty-mitaka new file mode 100755 index 00000000..2eca19d6 --- /dev/null +++ b/ceph-proxy/tests/019-basic-trusty-mitaka @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on trusty-mitaka.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='trusty', + openstack='cloud:trusty-mitaka', + source='cloud:trusty-updates/mitaka') + deployment.run_tests() diff --git a/ceph-proxy/tests/020-basic-wily-liberty b/ceph-proxy/tests/020-basic-wily-liberty new file mode 100755 index 00000000..b0d8096b --- /dev/null +++ b/ceph-proxy/tests/020-basic-wily-liberty @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on wily-liberty.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='wily') + deployment.run_tests() diff --git a/ceph-proxy/tests/021-basic-xenial-mitaka b/ceph-proxy/tests/021-basic-xenial-mitaka new file mode 100755 index 00000000..ae3d3350 --- /dev/null +++ b/ceph-proxy/tests/021-basic-xenial-mitaka @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on xenial-mitaka.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='xenial') + deployment.run_tests() diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py new file mode 100644 index 00000000..4cddccf2 --- /dev/null +++ b/ceph-proxy/tests/basic_deployment.py @@ -0,0 +1,159 @@ +#!/usr/bin/python + +import amulet + +from charmhelpers.contrib.openstack.amulet.deployment import ( + OpenStackAmuletDeployment +) +from charmhelpers.contrib.openstack.amulet.utils import ( # noqa + OpenStackAmuletUtils, + DEBUG, + # ERROR + ) + +# Use DEBUG to turn on debug logging +u = OpenStackAmuletUtils(DEBUG) + + +class CephBasicDeployment(OpenStackAmuletDeployment): + """Amulet tests on a basic ceph deployment.""" + + def __init__(self, series=None, openstack=None, source=None, stable=False): + """Deploy the entire test environment.""" + super(CephBasicDeployment, self).__init__(series, openstack, source, + stable) + self._add_services() + self._add_relations() + self._configure_services() + self._deploy() + + u.log.info('Waiting on extended status checks...') + exclude_services = ['ceph-proxy', 'ceph-radosgw'] + + # Wait for deployment ready msgs, except exclusions + self._auto_wait_for_status(exclude_services=exclude_services) + + self._configure_proxy() + self._initialize_tests() + self._auto_wait_for_status() + + def _add_services(self): + """Add services + + Add the services that we're testing, where ceph is local, + and the rest of the service are from lp branches that are + compatible with the local charm (e.g. stable or next). + """ + this_service = {'name': 'ceph-proxy'} + other_services = [{'name': 'ceph-mon', 'units': 3}, + {'name': 'ceph-osd', 'units': 3}, + {'name': 'ceph-radosgw'}] + super(CephBasicDeployment, self)._add_services(this_service, + other_services) + + def _add_relations(self): + """Add all of the relations for the services.""" + relations = { + 'ceph-osd:mon': 'ceph-mon:osd', + 'ceph-radosgw:mon': 'ceph-proxy:radosgw', + } + super(CephBasicDeployment, self)._add_relations(relations) + + def _configure_services(self): + ceph_config = { + 'monitor-count': '3', + 'auth-supported': 'none', + 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', + } + + # Include a non-existent device as osd-devices is a whitelist, + # and this will catch cases where proposals attempt to change that. + ceph_osd_config = { + 'osd-reformat': 'yes', + 'ephemeral-unmount': '/mnt', + 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' + } + + radosgw_config = {"use-embedded-webserver": True} + proxy_config = { + 'source': self.source + } + configs = {'ceph-mon': ceph_config, + 'ceph-osd': ceph_osd_config, + 'ceph-radosgw': radosgw_config, + 'ceph-proxy': proxy_config} + super(CephBasicDeployment, self)._configure_services(configs) + + def _configure_proxy(self): + """Setup CephProxy with Ceph configuration + from running Ceph cluster + """ + mon_key = u.file_contents_safe( + self.d.sentry['ceph-mon'][0], + '/etc/ceph/ceph.client.admin.keyring' + ).split(' = ')[-1].rstrip() + + ceph_ips = [] + for x in self.d.sentry['ceph-mon']: + output, code = x.run("unit-get private-address") + ceph_ips.append(output + ':6789') + + proxy_config = { + 'auth-supported': 'none', + 'admin-key': mon_key, + 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + 'monitor-hosts': ' '.join(ceph_ips) + } + u.log.debug('Config: {}'.format(proxy_config)) + self.d.configure('ceph-proxy', proxy_config) + + def _initialize_tests(self): + """Perform final initialization before tests get run.""" + # Access the sentries for inspecting service units + self.ceph_osd_sentry = self.d.sentry['ceph-osd'][0] + self.ceph0_sentry = self.d.sentry['ceph-mon'][0] + self.radosgw_sentry = self.d.sentry['ceph-radosgw'][0] + self.proxy_sentry = self.d.sentry['ceph-proxy'][0] + + u.log.debug('openstack release val: {}'.format( + self._get_openstack_release())) + u.log.debug('openstack release str: {}'.format( + self._get_openstack_release_string())) + + def test_100_ceph_processes(self): + """Verify that the expected service processes are running + on each ceph unit.""" + + # Process name and quantity of processes to expect on each unit + ceph_processes = { + 'ceph-mon': 1, + } + + # Units with process names and PID quantities expected + expected_processes = { + self.ceph0_sentry: ceph_processes + } + + actual_pids = u.get_unit_process_ids(expected_processes) + ret = u.validate_unit_process_ids(expected_processes, actual_pids) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_499_ceph_cmds_exit_zero(self): + """Check basic functionality of ceph cli commands against + ceph proxy units.""" + sentry_units = [ + self.proxy_sentry, + self.ceph0_sentry + ] + commands = [ + 'sudo ceph health', + 'sudo ceph mds stat', + 'sudo ceph pg stat', + 'sudo ceph osd stat', + 'sudo ceph mon stat', + ] + ret = u.check_commands_on_units(commands, sentry_units) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) diff --git a/ceph-proxy/tests/charmhelpers/__init__.py b/ceph-proxy/tests/charmhelpers/__init__.py new file mode 100644 index 00000000..48867880 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/__init__.py @@ -0,0 +1,36 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa diff --git a/ceph-proxy/tests/charmhelpers/contrib/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/contrib/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py new file mode 100644 index 00000000..0146236d --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py @@ -0,0 +1,93 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import amulet +import os +import six + + +class AmuletDeployment(object): + """Amulet deployment. + + This class provides generic Amulet deployment and test runner + methods. + """ + + def __init__(self, series=None): + """Initialize the deployment environment.""" + self.series = None + + if series: + self.series = series + self.d = amulet.Deployment(series=self.series) + else: + self.d = amulet.Deployment() + + def _add_services(self, this_service, other_services): + """Add services. + + Add services to the deployment where this_service is the local charm + that we're testing and other_services are the other services that + are being used in the local amulet tests. + """ + if this_service['name'] != os.path.basename(os.getcwd()): + s = this_service['name'] + msg = "The charm's root directory name needs to be {}".format(s) + amulet.raise_status(amulet.FAIL, msg=msg) + + if 'units' not in this_service: + this_service['units'] = 1 + + self.d.add(this_service['name'], units=this_service['units'], + constraints=this_service.get('constraints')) + + for svc in other_services: + if 'location' in svc: + branch_location = svc['location'] + elif self.series: + branch_location = 'cs:{}/{}'.format(self.series, svc['name']), + else: + branch_location = None + + if 'units' not in svc: + svc['units'] = 1 + + self.d.add(svc['name'], charm=branch_location, units=svc['units'], + constraints=svc.get('constraints')) + + def _add_relations(self, relations): + """Add all of the relations for the services.""" + for k, v in six.iteritems(relations): + self.d.relate(k, v) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _deploy(self): + """Deploy environment and wait for all hooks to finish executing.""" + try: + self.d.setup(timeout=900) + self.d.sentry.wait(timeout=900) + except amulet.helpers.TimeoutError: + amulet.raise_status(amulet.FAIL, msg="Deployment timed out") + except Exception: + raise + + def run_tests(self): + """Run all of the methods that are prefixed with 'test_'.""" + for test in dir(self): + if test.startswith('test_'): + getattr(self, test)() diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py new file mode 100644 index 00000000..a39ed4c8 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -0,0 +1,827 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import json +import logging +import os +import re +import socket +import subprocess +import sys +import time +import uuid + +import amulet +import distro_info +import six +from six.moves import configparser +if six.PY3: + from urllib import parse as urlparse +else: + import urlparse + + +class AmuletUtils(object): + """Amulet utilities. + + This class provides common utility functions that are used by Amulet + tests. + """ + + def __init__(self, log_level=logging.ERROR): + self.log = self.get_logger(level=log_level) + self.ubuntu_releases = self.get_ubuntu_releases() + + def get_logger(self, name="amulet-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def valid_ip(self, ip): + if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): + return True + else: + return False + + def valid_url(self, url): + p = re.compile( + r'^(?:http|ftp)s?://' + r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa + r'localhost|' + r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' + r'(?::\d+)?' + r'(?:/?|[/?]\S+)$', + re.IGNORECASE) + if p.match(url): + return True + else: + return False + + def get_ubuntu_release_from_sentry(self, sentry_unit): + """Get Ubuntu release codename from sentry unit. + + :param sentry_unit: amulet sentry/service unit pointer + :returns: list of strings - release codename, failure message + """ + msg = None + cmd = 'lsb_release -cs' + release, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} lsb_release: {}'.format( + sentry_unit.info['unit_name'], release)) + else: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, release, code)) + if release not in self.ubuntu_releases: + msg = ("Release ({}) not found in Ubuntu releases " + "({})".format(release, self.ubuntu_releases)) + return release, msg + + def validate_services(self, commands): + """Validate that lists of commands succeed on service units. Can be + used to verify system services are running on the corresponding + service units. + + :param commands: dict with sentry keys and arbitrary command list vals + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # /!\ DEPRECATION WARNING (beisner): + # New and existing tests should be rewritten to use + # validate_services_by_name() as it is aware of init systems. + self.log.warn('DEPRECATION WARNING: use ' + 'validate_services_by_name instead of validate_services ' + 'due to init system differences.') + + for k, v in six.iteritems(commands): + for cmd in v: + output, code = k.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(k.info['unit_name'], + cmd, code)) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def validate_services_by_name(self, sentry_services): + """Validate system service status by service name, automatically + detecting init system based on Ubuntu release codename. + + :param sentry_services: dict with sentry keys and svc list values + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # Point at which systemd became a thing + systemd_switch = self.ubuntu_releases.index('vivid') + + for sentry_unit, services_list in six.iteritems(sentry_services): + # Get lsb_release codename from unit + release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) + if ret: + return ret + + for service_name in services_list: + if (self.ubuntu_releases.index(release) >= systemd_switch or + service_name in ['rabbitmq-server', 'apache2']): + # init is systemd (or regular sysv) + cmd = 'sudo service {} status'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 + elif self.ubuntu_releases.index(release) < systemd_switch: + # init is upstart + cmd = 'sudo status {}'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 and "start/running" in output + + self.log.debug('{} `{}` returned ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code)) + if not service_running: + return u"command `{}` returned {} {}".format( + cmd, output, str(code)) + return None + + def _get_config(self, unit, filename): + """Get a ConfigParser object for parsing a unit's config file.""" + file_contents = unit.file_contents(filename) + + # NOTE(beisner): by default, ConfigParser does not handle options + # with no value, such as the flags used in the mysql my.cnf file. + # https://bugs.python.org/issue7005 + config = configparser.ConfigParser(allow_no_value=True) + config.readfp(io.StringIO(file_contents)) + return config + + def validate_config_data(self, sentry_unit, config_file, section, + expected): + """Validate config file data. + + Verify that the specified section of the config file contains + the expected option key:value pairs. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. + """ + self.log.debug('Validating config file data ({} in {} on {})' + '...'.format(section, config_file, + sentry_unit.info['unit_name'])) + config = self._get_config(sentry_unit, config_file) + + if section != 'DEFAULT' and not config.has_section(section): + return "section [{}] does not exist".format(section) + + for k in expected.keys(): + if not config.has_option(section, k): + return "section [{}] is missing option {}".format(section, k) + + actual = config.get(section, k) + v = expected[k] + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if actual != v: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual): + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + return None + + def _validate_dict_data(self, expected, actual): + """Validate dictionary data. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. + """ + self.log.debug('actual: {}'.format(repr(actual))) + self.log.debug('expected: {}'.format(repr(expected))) + + for k, v in six.iteritems(expected): + if k in actual: + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if v != actual[k]: + return "{}:{}".format(k, actual[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual[k]): + return "{}:{}".format(k, actual[k]) + else: + return "key '{}' does not exist".format(k) + return None + + def validate_relation_data(self, sentry_unit, relation, expected): + """Validate actual relation data based on expected relation data.""" + actual = sentry_unit.relation(relation[0], relation[1]) + return self._validate_dict_data(expected, actual) + + def _validate_list_data(self, expected, actual): + """Compare expected list vs actual list data.""" + for e in expected: + if e not in actual: + return "expected item {} not found in actual list".format(e) + return None + + def not_null(self, string): + if string is not None: + return True + else: + return False + + def _get_file_mtime(self, sentry_unit, filename): + """Get last modification time of file.""" + return sentry_unit.file_stat(filename)['mtime'] + + def _get_dir_mtime(self, sentry_unit, directory): + """Get last modification time of directory.""" + return sentry_unit.directory_stat(directory)['mtime'] + + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): + """Get start time of a process based on the last modification time + of the /proc/pid directory. + + :sentry_unit: The sentry unit to check for the service on + :service: service name to look for in process table + :pgrep_full: [Deprecated] Use full command line search mode with pgrep + :returns: epoch time of service process start + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + if pgrep_full is not None: + # /!\ DEPRECATION WARNING (beisner): + # No longer implemented, as pidof is now used instead of pgrep. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' + 'longer implemented re: lp 1474030.') + + pid_list = self.get_process_id_list(sentry_unit, service) + pid = pid_list[0] + proc_dir = '/proc/{}'.format(pid) + self.log.debug('Pid for {} on {}: {}'.format( + service, sentry_unit.info['unit_name'], pid)) + + return self._get_dir_mtime(sentry_unit, proc_dir) + + def service_restarted(self, sentry_unit, service, filename, + pgrep_full=None, sleep_time=20): + """Check if service was restarted. + + Compare a service's start time vs a file's last modification time + (such as a config file for that service) to determine if the service + has been restarted. + """ + # /!\ DEPRECATION WARNING (beisner): + # This method is prone to races in that no before-time is known. + # Use validate_service_config_changed instead. + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + self.log.warn('DEPRECATION WARNING: use ' + 'validate_service_config_changed instead of ' + 'service_restarted due to known races.') + + time.sleep(sleep_time) + if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= + self._get_file_mtime(sentry_unit, filename)): + return True + else: + return False + + def service_restarted_since(self, sentry_unit, mtime, service, + pgrep_full=None, sleep_time=20, + retry_count=30, retry_sleep_time=10): + """Check if service was been started after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry + + Returns: + bool: True if service found and its start time it newer than mtime, + False if service is older than mtime or if service was + not found. + """ + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s service restarted since %s on ' + '%s' % (service, mtime, unit_name)) + time.sleep(sleep_time) + proc_start_time = None + tries = 0 + while tries <= retry_count and not proc_start_time: + try: + proc_start_time = self._get_proc_start_time(sentry_unit, + service, + pgrep_full) + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'OK'.format(tries, service, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, proc may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'failed\n{}'.format(tries, service, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not proc_start_time: + self.log.warn('No proc start time found, assuming service did ' + 'not start') + return False + if proc_start_time >= mtime: + self.log.debug('Proc start time is newer than provided mtime' + '(%s >= %s) on %s (OK)' % (proc_start_time, + mtime, unit_name)) + return True + else: + self.log.warn('Proc start time (%s) is older than provided mtime ' + '(%s) on %s, service did not ' + 'restart' % (proc_start_time, mtime, unit_name)) + return False + + def config_updated_since(self, sentry_unit, filename, mtime, + sleep_time=20, retry_count=30, + retry_sleep_time=10): + """Check if file was modified after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check the file mtime on + filename (string): The file to check mtime of + mtime (float): The epoch time to check against + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry + + Returns: + bool: True if file was modified more recently than mtime, False if + file was modified before mtime, or if file not found. + """ + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s updated since %s on ' + '%s' % (filename, mtime, unit_name)) + time.sleep(sleep_time) + file_mtime = None + tries = 0 + while tries <= retry_count and not file_mtime: + try: + file_mtime = self._get_file_mtime(sentry_unit, filename) + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'OK'.format(tries, filename, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, file may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'failed\n{}'.format(tries, filename, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not file_mtime: + self.log.warn('Could not determine file mtime, assuming ' + 'file does not exist') + return False + + if file_mtime >= mtime: + self.log.debug('File mtime is newer than provided mtime ' + '(%s >= %s) on %s (OK)' % (file_mtime, + mtime, unit_name)) + return True + else: + self.log.warn('File mtime is older than provided mtime' + '(%s < on %s) on %s' % (file_mtime, + mtime, unit_name)) + return False + + def validate_service_config_changed(self, sentry_unit, mtime, service, + filename, pgrep_full=None, + sleep_time=20, retry_count=30, + retry_sleep_time=10): + """Check service and file were updated after mtime + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + filename (string): The file to check mtime of + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep in seconds to pass to test helpers + retry_count (int): If service is not found, how many times to retry + retry_sleep_time (int): Time in seconds to wait between retries + + Typical Usage: + u = OpenStackAmuletUtils(ERROR) + ... + mtime = u.get_sentry_time(self.cinder_sentry) + self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) + if not u.validate_service_config_changed(self.cinder_sentry, + mtime, + 'cinder-api', + '/etc/cinder/cinder.conf') + amulet.raise_status(amulet.FAIL, msg='update failed') + Returns: + bool: True if both service and file where updated/restarted after + mtime, False if service is older than mtime or if service was + not found or if filename was modified before mtime. + """ + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + service_restart = self.service_restarted_since( + sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + config_update = self.config_updated_since( + sentry_unit, + filename, + mtime, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + return service_restart and config_update + + def get_sentry_time(self, sentry_unit): + """Return current epoch time on a sentry""" + cmd = "date +'%s'" + return float(sentry_unit.run(cmd)[0]) + + def relation_error(self, name, data): + return 'unexpected relation data in {} - {}'.format(name, data) + + def endpoint_error(self, name, data): + return 'unexpected endpoint data in {} - {}'.format(name, data) + + def get_ubuntu_releases(self): + """Return a list of all Ubuntu releases in order of release.""" + _d = distro_info.UbuntuDistroInfo() + _release_list = _d.all + return _release_list + + def file_to_url(self, file_rel_path): + """Convert a relative file path to a file URL.""" + _abs_path = os.path.abspath(file_rel_path) + return urlparse.urlparse(_abs_path, scheme='file').geturl() + + def check_commands_on_units(self, commands, sentry_units): + """Check that all commands in a list exit zero on all + sentry units in a list. + + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + self.log.debug('Checking exit codes for {} commands on {} ' + 'sentry units...'.format(len(commands), + len(sentry_units))) + for sentry_unit in sentry_units: + for cmd in commands: + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + return ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + return None + + def get_process_id_list(self, sentry_unit, process_name, + expect_success=True): + """Get a list of process ID(s) from a single sentry juju unit + for a single process name. + + :param sentry_unit: Amulet sentry instance (juju unit) + :param process_name: Process name + :param expect_success: If False, expect the PID to be missing, + raise if it is present. + :returns: List of process IDs + """ + cmd = 'pidof -x {}'.format(process_name) + if not expect_success: + cmd += " || exit 0 && exit 1" + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output).split() + + def get_unit_process_ids(self, unit_processes, expect_success=True): + """Construct a dict containing unit sentries, process names, and + process IDs. + + :param unit_processes: A dictionary of Amulet sentry instance + to list of process names. + :param expect_success: if False expect the processes to not be + running, raise if they are. + :returns: Dictionary of Amulet sentry instance to dictionary + of process names to PIDs. + """ + pid_dict = {} + for sentry_unit, process_list in six.iteritems(unit_processes): + pid_dict[sentry_unit] = {} + for process in process_list: + pids = self.get_process_id_list( + sentry_unit, process, expect_success=expect_success) + pid_dict[sentry_unit].update({process: pids}) + return pid_dict + + def validate_unit_process_ids(self, expected, actual): + """Validate process id quantities for services on units.""" + self.log.debug('Checking units for running processes...') + self.log.debug('Expected PIDs: {}'.format(expected)) + self.log.debug('Actual PIDs: {}'.format(actual)) + + if len(actual) != len(expected): + return ('Unit count mismatch. expected, actual: {}, ' + '{} '.format(len(expected), len(actual))) + + for (e_sentry, e_proc_names) in six.iteritems(expected): + e_sentry_name = e_sentry.info['unit_name'] + if e_sentry in actual.keys(): + a_proc_names = actual[e_sentry] + else: + return ('Expected sentry ({}) not found in actual dict data.' + '{}'.format(e_sentry_name, e_sentry)) + + if len(e_proc_names.keys()) != len(a_proc_names.keys()): + return ('Process name count mismatch. expected, actual: {}, ' + '{}'.format(len(expected), len(actual))) + + for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ + zip(e_proc_names.items(), a_proc_names.items()): + if e_proc_name != a_proc_name: + return ('Process name mismatch. expected, actual: {}, ' + '{}'.format(e_proc_name, a_proc_name)) + + a_pids_length = len(a_pids) + fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' + '{}, {} ({})'.format(e_sentry_name, e_proc_name, + e_pids, a_pids_length, + a_pids)) + + # If expected is a list, ensure at least one PID quantity match + if isinstance(e_pids, list) and \ + a_pids_length not in e_pids: + return fail_msg + # If expected is not bool and not list, + # ensure PID quantities match + elif not isinstance(e_pids, bool) and \ + not isinstance(e_pids, list) and \ + a_pids_length != e_pids: + return fail_msg + # If expected is bool True, ensure 1 or more PIDs exist + elif isinstance(e_pids, bool) and \ + e_pids is True and a_pids_length < 1: + return fail_msg + # If expected is bool False, ensure 0 PIDs exist + elif isinstance(e_pids, bool) and \ + e_pids is False and a_pids_length != 0: + return fail_msg + else: + self.log.debug('PID check OK: {} {} {}: ' + '{}'.format(e_sentry_name, e_proc_name, + e_pids, a_pids)) + return None + + def validate_list_of_identical_dicts(self, list_of_dicts): + """Check that all dicts within a list are identical.""" + hashes = [] + for _dict in list_of_dicts: + hashes.append(hash(frozenset(_dict.items()))) + + self.log.debug('Hashes: {}'.format(hashes)) + if len(set(hashes)) == 1: + self.log.debug('Dicts within list are identical') + else: + return 'Dicts within list are not identical' + + return None + + def validate_sectionless_conf(self, file_contents, expected): + """A crude conf parser. Useful to inspect configuration files which + do not have section headers (as would be necessary in order to use + the configparser). Such as openstack-dashboard or rabbitmq confs.""" + for line in file_contents.split('\n'): + if '=' in line: + args = line.split('=') + if len(args) <= 1: + continue + key = args[0].strip() + value = args[1].strip() + if key in expected.keys(): + if expected[key] != value: + msg = ('Config mismatch. Expected, actual: {}, ' + '{}'.format(expected[key], value)) + amulet.raise_status(amulet.FAIL, msg=msg) + + def get_unit_hostnames(self, units): + """Return a dict of juju unit names to hostnames.""" + host_names = {} + for unit in units: + host_names[unit.info['unit_name']] = \ + str(unit.file_contents('/etc/hostname').strip()) + self.log.debug('Unit host names: {}'.format(host_names)) + return host_names + + def run_cmd_unit(self, sentry_unit, cmd): + """Run a command on a unit, return the output and exit code.""" + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` command returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + msg = ('{} `{}` command returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output), code + + def file_exists_on_unit(self, sentry_unit, file_name): + """Check if a file exists on a unit.""" + try: + sentry_unit.file_stat(file_name) + return True + except IOError: + return False + except Exception as e: + msg = 'Error checking file {}: {}'.format(file_name, e) + amulet.raise_status(amulet.FAIL, msg=msg) + + def file_contents_safe(self, sentry_unit, file_name, + max_wait=60, fatal=False): + """Get file contents from a sentry unit. Wrap amulet file_contents + with retry logic to address races where a file checks as existing, + but no longer exists by the time file_contents is called. + Return None if file not found. Optionally raise if fatal is True.""" + unit_name = sentry_unit.info['unit_name'] + file_contents = False + tries = 0 + while not file_contents and tries < (max_wait / 4): + try: + file_contents = sentry_unit.file_contents(file_name) + except IOError: + self.log.debug('Attempt {} to open file {} from {} ' + 'failed'.format(tries, file_name, + unit_name)) + time.sleep(4) + tries += 1 + + if file_contents: + return file_contents + elif not fatal: + return None + elif fatal: + msg = 'Failed to get file contents from unit.' + amulet.raise_status(amulet.FAIL, msg) + + def port_knock_tcp(self, host="localhost", port=22, timeout=15): + """Open a TCP socket to check for a listening sevice on a host. + + :param host: host name or IP address, default to localhost + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :returns: True if successful, False if connect failed + """ + + # Resolve host name if possible + try: + connect_host = socket.gethostbyname(host) + host_human = "{} ({})".format(connect_host, host) + except socket.error as e: + self.log.warn('Unable to resolve address: ' + '{} ({}) Trying anyway!'.format(host, e)) + connect_host = host + host_human = connect_host + + # Attempt socket connection + try: + knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + knock.settimeout(timeout) + knock.connect((connect_host, port)) + knock.close() + self.log.debug('Socket connect OK for host ' + '{} on port {}.'.format(host_human, port)) + return True + except socket.error as e: + self.log.debug('Socket connect FAIL for' + ' {} port {} ({})'.format(host_human, port, e)) + return False + + def port_knock_units(self, sentry_units, port=22, + timeout=15, expect_success=True): + """Open a TCP socket to check for a listening sevice on each + listed juju unit. + + :param sentry_units: list of sentry unit pointers + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :expect_success: True by default, set False to invert logic + :returns: None if successful, Failure message otherwise + """ + for unit in sentry_units: + host = unit.info['public-address'] + connected = self.port_knock_tcp(host, port, timeout) + if not connected and expect_success: + return 'Socket connect failed.' + elif connected and not expect_success: + return 'Socket connected unexpectedly.' + + def get_uuid_epoch_stamp(self): + """Returns a stamp string based on uuid4 and epoch time. Useful in + generating test messages which need to be unique-ish.""" + return '[{}-{}]'.format(uuid.uuid4(), time.time()) + +# amulet juju action helpers: + def run_action(self, unit_sentry, action, + _check_output=subprocess.check_output, + params=None): + """Run the named action on a given unit sentry. + + params a dict of parameters to use + _check_output parameter is used for dependency injection. + + @return action_id. + """ + unit_id = unit_sentry.info["unit_name"] + command = ["juju", "action", "do", "--format=json", unit_id, action] + if params is not None: + for key, value in params.iteritems(): + command.append("{}={}".format(key, value)) + self.log.info("Running command: %s\n" % " ".join(command)) + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + action_id = data[u'Action queued with id'] + return action_id + + def wait_on_action(self, action_id, _check_output=subprocess.check_output): + """Wait for a given action, returning if it completed or not. + + _check_output parameter is used for dependency injection. + """ + command = ["juju", "action", "fetch", "--format=json", "--wait=0", + action_id] + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + return data.get(u"status") == "completed" + + def status_get(self, unit): + """Return the current service status of this unit.""" + raw_status, return_code = unit.run( + "status-get --format=json --include-data") + if return_code != 0: + return ("unknown", "") + status = json.loads(raw_status) + return (status["status"], status["message"]) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 00000000..6ce91dbe --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,295 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import re +import sys +import six +from collections import OrderedDict +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') + self.openstack = openstack + self.source = source + self.stable = stable + + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + + self.log.info('OpenStackAmuletDeployment: determine branch locations') + + # Charms outside the ~openstack-charmers + base_charms = { + 'mysql': ['precise', 'trusty'], + 'mongodb': ['precise', 'trusty'], + 'nrpe': ['precise', 'trusty', 'wily', 'xenial'], + } + + for svc in other_services: + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if svc['name'] in base_charms: + # NOTE: not all charms have support for all series we + # want/need to test against, so fix to most recent + # that each base charm supports + target_series = self.series + if self.series not in base_charms[svc['name']]: + target_series = base_charms[svc['name']][-1] + svc['location'] = 'cs:{}/{}'.format(target_series, + svc['name']) + elif self.stable: + svc['location'] = 'cs:{}/{}'.format(self.series, + svc['name']) + else: + svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( + self.series, + svc['name'] + ) + + return other_services + + def _add_services(self, this_service, other_services): + """Add services to the deployment and set openstack-origin/source.""" + self.log.info('OpenStackAmuletDeployment: adding services') + + other_services = self._determine_branch_locations(other_services) + + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + + services = other_services + services.append(this_service) + + # Charms which should use the source config option + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy'] + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', + 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'] + + if self.openstack: + for svc in services: + if svc['name'] not in use_source + no_origin: + config = {'openstack-origin': self.openstack} + self.d.configure(svc['name'], config) + + if self.source: + for svc in services: + if svc['name'] in use_source and svc['name'] not in no_origin: + config = {'source': self.source} + self.d.configure(svc['name'], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=1800): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + self.log.info('Waiting for extended status on units...') + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + # Must be ordered by OpenStack release (not by Ubuntu release): + (self.precise_essex, self.precise_folsom, self.precise_grizzly, + self.precise_havana, self.precise_icehouse, + self.trusty_icehouse, self.trusty_juno, self.utopic_juno, + self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, + self.wily_liberty, self.trusty_mitaka, + self.xenial_mitaka) = range(14) + + releases = { + ('precise', None): self.precise_essex, + ('precise', 'cloud:precise-folsom'): self.precise_folsom, + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, + ('precise', 'cloud:precise-havana'): self.precise_havana, + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, + ('trusty', None): self.trusty_icehouse, + ('trusty', 'cloud:trusty-juno'): self.trusty_juno, + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, + ('utopic', None): self.utopic_juno, + ('vivid', None): self.vivid_kilo, + ('wily', None): self.wily_liberty, + ('xenial', None): self.xenial_mitaka} + return releases[(self.series, self.openstack)] + + def _get_openstack_release_string(self): + """Get openstack release string. + + Return a string representing the openstack release. + """ + releases = OrderedDict([ + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ('wily', 'liberty'), + ('xenial', 'mitaka'), + ]) + if self.openstack: + os_origin = self.openstack.split(':')[1] + return os_origin.split('%s-' % self.series)[1].split('/')[0] + else: + return releases[self.series] + + def get_ceph_expected_pools(self, radosgw=False): + """Return a list of expected ceph pools in a ceph + cinder + glance + test scenario, based on OpenStack release and whether ceph radosgw + is flagged as present or not.""" + + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + pools = [ + 'rbd', + 'cinder', + 'glance' + ] + else: + # Juno or earlier + pools = [ + 'data', + 'metadata', + 'rbd', + 'cinder', + 'glance' + ] + + if radosgw: + pools.extend([ + '.rgw.root', + '.rgw.control', + '.rgw', + '.rgw.gc', + '.users.uid' + ]) + + return pools diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 00000000..8040b570 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,1010 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import amulet +import json +import logging +import os +import re +import six +import time +import urllib + +import cinderclient.v1.client as cinder_client +import glanceclient.v1.client as glance_client +import heatclient.v1.client as heat_client +import keystoneclient.v2_0 as keystone_client +from keystoneclient.auth.identity import v3 as keystone_id_v3 +from keystoneclient import session as keystone_session +from keystoneclient.v3 import client as keystone_client_v3 + +import novaclient.client as nova_client +import pika +import swiftclient + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + +NOVA_CLIENT_VERSION = "2" + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charm tests. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('Validating service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('Validating tenant data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('Validating role data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual, api_version=None): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('Validating user data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + if e['name'] == act.name: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'id': act.id} + if api_version == 3: + a['default_project_id'] = getattr(act, + 'default_project_id', + 'none') + else: + a['tenantId'] = act.tenantId + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('Validating flavor data...') + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) + return tenant in [t.name for t in keystone.tenants.list()] + + def authenticate_cinder_admin(self, keystone_sentry, username, + password, tenant): + """Authenticates admin user with cinder.""" + # NOTE(beisner): cinder python client doesn't accept tokens. + service_ip = \ + keystone_sentry.relation('shared-db', + 'mysql:shared-db')['private-address'] + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) + return cinder_client.Client(username, password, tenant, ept) + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant=None, api_version=None, + keystone_ip=None): + """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') + unit = keystone_sentry + if not keystone_ip: + keystone_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name='admin_domain', + username=user, + password=password, + domain_name='admin_domain', + auth_url=ep, + ) + sess = keystone_session.Session(auth=auth) + return keystone_client_v3.Client(session=sess) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') + ep = keystone.service_catalog.url_for(service_type='image', + endpoint_type='adminURL') + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + endpoint_type='publicURL') + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def authenticate_swift_user(self, keystone, user, password, tenant): + """Authenticates a regular user with swift api.""" + self.log.debug('Authenticating swift user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :returns: glance image pointer + """ + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Download cirros image + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open('http://download.cirros-cloud.net/version/released') + version = f.read().strip() + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) + local_path = os.path.join('tests', cirros_img) + + if not os.path.exists(local_path): + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', + version, cirros_img) + opener.retrieve(cirros_url, local_path) + f.close() + + # Create glance image + with open(local_path) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + + # Wait for image to reach active status + img_id = image.id + ret = self.resource_reaches_status(glance.images, img_id, + expected_stat='active', + msg='Image status wait') + if not ret: + msg = 'Glance image failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new image + self.log.debug('Validating image attributes...') + val_img_name = glance.images.get(img_id).name + val_img_stat = glance.images.get(img_id).status + val_img_pub = glance.images.get(img_id).is_public + val_img_cfmt = glance.images.get(img_id).container_format + val_img_dfmt = glance.images.get(img_id).disk_format + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' + 'container fmt:{} disk fmt:{}'.format( + val_img_name, val_img_pub, img_id, + val_img_stat, val_img_cfmt, val_img_dfmt)) + + if val_img_name == image_name and val_img_stat == 'active' \ + and val_img_pub is True and val_img_cfmt == 'bare' \ + and val_img_dfmt == 'qcow2': + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) + return self.delete_resource(glance.images, image, msg='glance image') + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) + image = nova.images.find(name=image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) + return self.delete_resource(nova.servers, instance, + msg='nova instance') + + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, + img_id=None, src_vol_id=None, snap_id=None): + """Create cinder volume, optionally from a glance image, OR + optionally as a clone of an existing volume, OR optionally + from a snapshot. Wait for the new volume status to reach + the expected status, validate and return a resource pointer. + + :param vol_name: cinder volume display name + :param vol_size: size in gigabytes + :param img_id: optional glance image id + :param src_vol_id: optional source volume id to clone + :param snap_id: optional snapshot id to use + :returns: cinder volume pointer + """ + # Handle parameter input and avoid impossible combinations + if img_id and not src_vol_id and not snap_id: + # Create volume from image + self.log.debug('Creating cinder volume from glance image...') + bootable = 'true' + elif src_vol_id and not img_id and not snap_id: + # Clone an existing volume + self.log.debug('Cloning cinder volume...') + bootable = cinder.volumes.get(src_vol_id).bootable + elif snap_id and not src_vol_id and not img_id: + # Create volume from snapshot + self.log.debug('Creating cinder volume from snapshot...') + snap = cinder.volume_snapshots.find(id=snap_id) + vol_size = snap.size + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id + bootable = cinder.volumes.get(snap_vol_id).bootable + elif not img_id and not src_vol_id and not snap_id: + # Create volume + self.log.debug('Creating cinder volume...') + bootable = 'false' + else: + # Impossible combination of parameters + msg = ('Invalid method use - name:{} size:{} img_id:{} ' + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, + img_id, src_vol_id, + snap_id)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create new volume + try: + vol_new = cinder.volumes.create(display_name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except Exception as e: + msg = 'Failed to create volume: {}'.format(e) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Wait for volume to reach available status + ret = self.resource_reaches_status(cinder.volumes, vol_id, + expected_stat="available", + msg="Volume status wait") + if not ret: + msg = 'Cinder volume failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new volume + self.log.debug('Validating volume attributes...') + val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_boot = cinder.volumes.get(vol_id).bootable + val_vol_stat = cinder.volumes.get(vol_id).status + val_vol_size = cinder.volumes.get(vol_id).size + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' + '{} size:{}'.format(val_vol_name, vol_id, + val_vol_stat, val_vol_boot, + val_vol_size)) + + if val_vol_boot == bootable and val_vol_stat == 'available' \ + and val_vol_name == vol_name and val_vol_size == vol_size: + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return vol_new + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + self.log.debug('Deleting OpenStack resource ' + '{} ({})'.format(resource_id, msg)) + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) + return False + + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False + + def get_ceph_osd_id_cmd(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return ("`initctl list | grep 'ceph-osd ' | " + "awk 'NR=={} {{ print $2 }}' | " + "grep -o '[0-9]*'`".format(index + 1)) + + def get_ceph_pools(self, sentry_unit): + """Return a dict of ceph pools from a single ceph unit, with + pool name as keys, pool id as vals.""" + pools = {} + cmd = 'sudo ceph osd lspools' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, + for pool in str(output).split(','): + pool_id_name = pool.split(' ') + if len(pool_id_name) == 2: + pool_id = pool_id_name[0] + pool_name = pool_id_name[1] + pools[pool_name] = int(pool_id) + + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], + pools)) + return pools + + def get_ceph_df(self, sentry_unit): + """Return dict of ceph df json output, including ceph pool state. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :returns: Dict of ceph df output + """ + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return json.loads(output) + + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param pool_id: Ceph pool ID + :returns: List of pool name, object count, kb disk space used + """ + df = self.get_ceph_df(sentry_unit) + pool_name = df['pools'][pool_id]['name'] + obj_count = df['pools'][pool_id]['stats']['objects'] + kb_used = df['pools'][pool_id]['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) + return pool_name, obj_count, kb_used + + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes. The + 2nd element is expected to be greater than the 1st; 3rd is expected + to be less than the 2nd. + + :param samples: List containing 3 data samples + :param sample_type: String for logging and usage context + :returns: None if successful, Failure message otherwise + """ + original, created, deleted = range(3) + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + return ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + else: + self.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None + + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.server_properties['product'] == 'RabbitMQ' + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) diff --git a/ceph-proxy/tests/setup/00-setup b/ceph-proxy/tests/setup/00-setup new file mode 100755 index 00000000..94e5611f --- /dev/null +++ b/ceph-proxy/tests/setup/00-setup @@ -0,0 +1,17 @@ +#!/bin/bash + +set -ex + +sudo add-apt-repository --yes ppa:juju/stable +sudo apt-get update --yes +sudo apt-get install --yes amulet \ + distro-info-data \ + python-cinderclient \ + python-distro-info \ + python-glanceclient \ + python-heatclient \ + python-keystoneclient \ + python-neutronclient \ + python-novaclient \ + python-pika \ + python-swiftclient diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml new file mode 100644 index 00000000..49e721b3 --- /dev/null +++ b/ceph-proxy/tests/tests.yaml @@ -0,0 +1,22 @@ +bootstrap: true +reset: false +virtualenv: true +makefile: + - lint + - test +sources: + - ppa:juju/stable +packages: + - amulet + - distro-info-data + - python-ceilometerclient + - python-cinderclient + - python-distro-info + - python-glanceclient + - python-heatclient + - python-keystoneclient + - python-neutronclient + - python-novaclient + - python-pika + - python-swiftclient + - python-nose \ No newline at end of file diff --git a/ceph-proxy/unit_tests/__init__.py b/ceph-proxy/unit_tests/__init__.py index e69de29b..f80aab3d 100644 --- a/ceph-proxy/unit_tests/__init__.py +++ b/ceph-proxy/unit_tests/__init__.py @@ -0,0 +1,2 @@ +import sys +sys.path.append('hooks') diff --git a/ceph-proxy/unit_tests/test_ceph_broker.py b/ceph-proxy/unit_tests/test_ceph_broker.py new file mode 100644 index 00000000..b720d94a --- /dev/null +++ b/ceph-proxy/unit_tests/test_ceph_broker.py @@ -0,0 +1,137 @@ +import json +import unittest + +import mock + +import ceph_broker + + +class CephBrokerTestCase(unittest.TestCase): + def setUp(self): + super(CephBrokerTestCase, self).setUp() + + @mock.patch('ceph_broker.log') + def test_process_requests_noop(self, mock_log): + req = json.dumps({'api-version': 1, 'ops': []}) + rc = ceph_broker.process_requests(req) + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.log') + def test_process_requests_missing_api_version(self, mock_log): + req = json.dumps({'ops': []}) + rc = ceph_broker.process_requests(req) + self.assertEqual(json.loads(rc), { + 'exit-code': 1, + 'stderr': 'Missing or invalid api version (None)'}) + + @mock.patch('ceph_broker.log') + def test_process_requests_invalid_api_version(self, mock_log): + req = json.dumps({'api-version': 2, 'ops': []}) + rc = ceph_broker.process_requests(req) + print "Return: %s" % rc + self.assertEqual(json.loads(rc), + {'exit-code': 1, + 'stderr': 'Missing or invalid api version (2)'}) + + @mock.patch('ceph_broker.log') + def test_process_requests_invalid(self, mock_log): + reqs = json.dumps({'api-version': 1, 'ops': [{'op': 'invalid_op'}]}) + rc = ceph_broker.process_requests(reqs) + self.assertEqual(json.loads(rc), + {'exit-code': 1, + 'stderr': "Unknown operation 'invalid_op'"}) + + @mock.patch('ceph_broker.get_osds') + @mock.patch('ceph_broker.ReplicatedPool') + @mock.patch('ceph_broker.pool_exists') + @mock.patch('ceph_broker.log') + def test_process_requests_create_pool_w_pg_num(self, mock_log, + mock_pool_exists, + mock_replicated_pool, + mock_get_osds): + mock_get_osds.return_value = [0, 1, 2] + mock_pool_exists.return_value = False + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'create-pool', + 'name': 'foo', + 'replicas': 3, + 'pg_num': 100}]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_called_with(service='admin', name='foo') + mock_replicated_pool.assert_called_with(service='admin', name='foo', + replicas=3, pg_num=100) + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.get_osds') + @mock.patch('ceph_broker.ReplicatedPool') + @mock.patch('ceph_broker.pool_exists') + @mock.patch('ceph_broker.log') + def test_process_requests_create_pool_w_pg_num_capped(self, mock_log, + mock_pool_exists, + mock_replicated_pool, + mock_get_osds): + mock_get_osds.return_value = [0, 1, 2] + mock_pool_exists.return_value = False + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'create-pool', + 'name': 'foo', + 'replicas': 3, + 'pg_num': 300}]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_called_with(service='admin', + name='foo') + mock_replicated_pool.assert_called_with(service='admin', name='foo', + replicas=3, pg_num=100) + self.assertEqual(json.loads(rc), {'exit-code': 0}) + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.ReplicatedPool') + @mock.patch('ceph_broker.pool_exists') + @mock.patch('ceph_broker.log') + def test_process_requests_create_pool_exists(self, mock_log, + mock_pool_exists, + mock_replicated_pool): + mock_pool_exists.return_value = True + reqs = json.dumps({'api-version': 1, + 'ops': [{'op': 'create-pool', + 'name': 'foo', + 'replicas': 3}]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_called_with(service='admin', + name='foo') + self.assertFalse(mock_replicated_pool.create.called) + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @mock.patch('ceph_broker.ReplicatedPool') + @mock.patch('ceph_broker.pool_exists') + @mock.patch('ceph_broker.log') + def test_process_requests_create_pool_rid(self, mock_log, + mock_pool_exists, + mock_replicated_pool): + mock_pool_exists.return_value = False + reqs = json.dumps({'api-version': 1, + 'request-id': '1ef5aede', + 'ops': [{ + 'op': 'create-pool', + 'name': 'foo', + 'replicas': 3}]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_called_with(service='admin', name='foo') + mock_replicated_pool.assert_called_with(service='admin', + name='foo', + pg_num=None, + replicas=3) + self.assertEqual(json.loads(rc)['exit-code'], 0) + self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') + + @mock.patch('ceph_broker.log') + def test_process_requests_invalid_api_rid(self, mock_log): + reqs = json.dumps({'api-version': 0, 'request-id': '1ef5aede', + 'ops': [{'op': 'create-pool'}]}) + rc = ceph_broker.process_requests(reqs) + self.assertEqual(json.loads(rc)['exit-code'], 1) + self.assertEqual(json.loads(rc)['stderr'], + "Missing or invalid api version (0)") + self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') diff --git a/ceph-proxy/unit_tests/test_dummy.py b/ceph-proxy/unit_tests/test_dummy.py deleted file mode 100644 index 93f686e4..00000000 --- a/ceph-proxy/unit_tests/test_dummy.py +++ /dev/null @@ -1,6 +0,0 @@ -import unittest - - -class CharmTestCase(unittest.TestCase): - def test_it_works(self): - assert True diff --git a/ceph-proxy/unit_tests/test_utils.py b/ceph-proxy/unit_tests/test_utils.py new file mode 100644 index 00000000..663a0488 --- /dev/null +++ b/ceph-proxy/unit_tests/test_utils.py @@ -0,0 +1,121 @@ +import logging +import unittest +import os +import yaml + +from contextlib import contextmanager +from mock import patch, MagicMock + + +def load_config(): + ''' + Walk backwords from __file__ looking for config.yaml, load and return the + 'options' section' + ''' + config = None + f = __file__ + while config is None: + d = os.path.dirname(f) + if os.path.isfile(os.path.join(d, 'config.yaml')): + config = os.path.join(d, 'config.yaml') + break + f = d + + if not config: + logging.error('Could not find config.yaml in any parent directory ' + 'of %s. ' % f) + raise Exception + + return yaml.safe_load(open(config).read())['options'] + + +def get_default_config(): + ''' + Load default charm config from config.yaml return as a dict. + If no default is set in config.yaml, its value is None. + ''' + default_config = {} + config = load_config() + for k, v in config.iteritems(): + if 'default' in v: + default_config[k] = v['default'] + else: + default_config[k] = None + return default_config + + +class CharmTestCase(unittest.TestCase): + + def setUp(self, obj, patches): + super(CharmTestCase, self).setUp() + self.patches = patches + self.obj = obj + self.test_config = TestConfig() + self.test_relation = TestRelation() + self.patch_all() + + def patch(self, method): + _m = patch.object(self.obj, method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def patch_all(self): + for method in self.patches: + setattr(self, method, self.patch(method)) + + +class TestConfig(object): + + def __init__(self): + self.config = get_default_config() + + def get(self, attr=None): + if not attr: + return self.get_all() + try: + return self.config[attr] + except KeyError: + return None + + def get_all(self): + return self.config + + def set(self, attr, value): + if attr not in self.config: + raise KeyError + self.config[attr] = value + + +class TestRelation(object): + + def __init__(self, relation_data={}): + self.relation_data = relation_data + + def set(self, relation_data): + self.relation_data = relation_data + + def get(self, attr=None, unit=None, rid=None): + if attr is None: + return self.relation_data + elif attr in self.relation_data: + return self.relation_data[attr] + return None + + +@contextmanager +def patch_open(): + '''Patch open() to allow mocking both open() itself and the file that is + yielded. + + Yields the mock for "open" and "file", respectively.''' + mock_open = MagicMock(spec=open) + mock_file = MagicMock(spec=file) + + @contextmanager + def stub_open(*args, **kwargs): + mock_open(*args, **kwargs) + yield mock_file + + with patch('__builtin__.open', stub_open): + yield mock_open, mock_file From aa661ef27227e220636f503dcdb8b2eea49661ca Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Tue, 28 Jun 2016 11:22:06 -0700 Subject: [PATCH 1157/2699] Add weight to ceph broker for pool creation Adds a weight parameter to the Ceph Broker API in order to allow the proper number of placement groups to be calculated based upon the relative amount of data that a pool will consume from the Ceph storage cluster. This commit adds two new config options: 1. 'expected-osd-count', an integer, which allows users deploying a bundle in an all-in-one deployment to specify the number of OSDs that are expected in the configuration in order to prevent faulty calculations due to races (e.g. pool creation requested prior to having all of the OSDs join the Ceph cluster). 2. 'pgs-per-osd', an integer, which allows users to tweak the number of placement groups per OSD based upon expected growth of the Ceph cluster itself. Partial-Bug: #1492742 Change-Id: Ia67a8a9d62a20182d440490b084551d89b8292e9 Signed-off-by: Billy Olsen --- ceph-mon/config.yaml | 27 +++ ceph-mon/hooks/ceph_broker.py | 25 +-- .../contrib/openstack/amulet/deployment.py | 4 +- .../charmhelpers/contrib/openstack/context.py | 9 +- .../charmhelpers/contrib/openstack/utils.py | 40 +++- .../contrib/storage/linux/ceph.py | 172 +++++++++++++----- ceph-mon/hooks/charmhelpers/core/host.py | 5 +- .../contrib/openstack/amulet/deployment.py | 4 +- ceph-mon/unit_tests/test_ceph_broker.py | 30 +-- ceph-mon/unit_tests/test_ceph_ops.py | 29 ++- 10 files changed, 240 insertions(+), 105 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 931631b0..b6e7452b 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -56,6 +56,33 @@ options: How many nodes to wait for before trying to create the monitor cluster this number needs to be odd, and more than three is a waste except for very large clusters. + expected-osd-count: + type: int + default: 0 + description: | + Provides an expected number of OSDs for the cluster. This value is used + when calculating the number of placement groups for a pool creation. + The number of placement groups for new pools are based upon the actual + number of OSDs in the cluster or the expected-osd-count, whichever is + greater. A value of 0 will cause the charm to only consider the OSDs + which are in the cluster. + pgs-per-osd: + type: int + default: 100 + description: | + The number of placement groups per OSD to target. It is important to + properly size the number of placement groups per OSD as too many + or too few placement groups oer OSD may cause resource constraints and + performance degradation. This value comes from the recommendation of + the Ceph placement group calculator (http://ceph.com/pgcalc/) and + recommended values are: + . + 100 - If the cluster OSD count is not expected to increase in the + foreseeable future. + 200 - If the cluster OSD count is expected to increase (up to 2x) in the + foreseeable future. + 300 - If the cluster OSD count is expected to increase between 2x and 3x + in the foreseeable future. source: type: string default: diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/hooks/ceph_broker.py index adaa4674..a3b9451d 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/hooks/ceph_broker.py @@ -26,7 +26,6 @@ create_erasure_profile, delete_pool, erasure_profile_exists, - get_osds, pool_exists, pool_set, remove_pool_snapshot, @@ -152,6 +151,7 @@ def handle_erasure_pool(request, service): pool_name = request.get('name') erasure_profile = request.get('erasure-profile') quota = request.get('max-bytes') + weight = request.get('weight') if erasure_profile is None: erasure_profile = "default-canonical" @@ -171,7 +171,8 @@ def handle_erasure_pool(request, service): return {'exit-code': 1, 'stderr': msg} pool = ErasurePool(service=service, name=pool_name, - erasure_code_profile=erasure_profile) + erasure_code_profile=erasure_profile, + percent_data=weight) # Ok make the erasure pool if not pool_exists(service=service, name=pool_name): log("Creating pool '%s' (erasure_profile=%s)" % (pool.name, @@ -188,14 +189,8 @@ def handle_replicated_pool(request, service): pool_name = request.get('name') replicas = request.get('replicas') quota = request.get('max-bytes') - - # Optional params + weight = request.get('weight') pg_num = request.get('pg_num') - if pg_num: - # Cap pg_num to max allowed just in case. - osds = get_osds(service) - if osds: - pg_num = min(pg_num, (len(osds) * 100 // replicas)) # Check for missing params if pool_name is None or replicas is None: @@ -203,10 +198,16 @@ def handle_replicated_pool(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} + kwargs = {} + if pg_num: + kwargs['pg_num'] = pg_num + if weight: + kwargs['percent_data'] = weight + if replicas: + kwargs['replicas'] = replicas + pool = ReplicatedPool(service=service, - name=pool_name, - replicas=replicas, - pg_num=pg_num) + name=pool_name, **kwargs) if not pool_exists(service=service, name=pool_name): log("Creating pool '%s' (replicas=%s)" % (pool.name, replicas), level=INFO) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index f7220f35..6ce91dbe 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -71,7 +71,7 @@ def _determine_branch_locations(self, other_services): base_charms = { 'mysql': ['precise', 'trusty'], 'mongodb': ['precise', 'trusty'], - 'nrpe': ['precise', 'trusty'], + 'nrpe': ['precise', 'trusty', 'wily', 'xenial'], } for svc in other_services: @@ -112,7 +112,7 @@ def _add_services(self, this_service, other_services): # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy'] # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 7cbdc03d..76737f22 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -57,6 +57,7 @@ mkdir, write_file, pwgen, + lsb_release, ) from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, @@ -1195,7 +1196,10 @@ def num_cpus(self): def __call__(self): multiplier = config('worker-multiplier') or 0 - ctxt = {"workers": self.num_cpus * multiplier} + count = int(self.num_cpus * multiplier) + if multiplier > 0 and count == 0: + count = 1 + ctxt = {"workers": count} return ctxt @@ -1436,7 +1440,8 @@ def _determine_ctxt(self): :return ctxt: Dictionary of the apparmor profile or None """ if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: - ctxt = {'aa_profile_mode': config('aa-profile-mode')} + ctxt = {'aa_profile_mode': config('aa-profile-mode'), + 'ubuntu_release': lsb_release()['DISTRIB_RELEASE']} else: ctxt = None return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index f4401913..519eae95 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -220,7 +220,6 @@ } GIT_DEFAULT_BRANCHES = { - 'kilo': 'stable/kilo', 'liberty': 'stable/liberty', 'mitaka': 'stable/mitaka', 'master': 'master', @@ -413,7 +412,8 @@ def os_release(package, base='essex'): global os_rel if os_rel: return os_rel - os_rel = (get_os_codename_package(package, fatal=False) or + os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or + get_os_codename_package(package, fatal=False) or get_os_codename_install_source(config('openstack-origin')) or base) return os_rel @@ -719,7 +719,24 @@ def git_install_requested(): return config('openstack-origin-git') is not None -requirements_dir = None +def git_os_codename_install_source(projects_yaml): + """ + Returns OpenStack codename of release being installed from source. + """ + if git_install_requested(): + projects = _git_yaml_load(projects_yaml) + + if projects in GIT_DEFAULT_BRANCHES.keys(): + if projects == 'master': + return 'newton' + return projects + + if 'release' in projects: + if projects['release'] == 'master': + return 'newton' + return projects['release'] + + return None def git_default_repos(projects_yaml): @@ -740,12 +757,6 @@ def git_default_repos(projects_yaml): } repos = [repo] - # NOTE(coreycb): This is a temp work-around until the requirements - # repo moves from stable/kilo branch to kilo-eol tag. The core - # repos have already done this. - if default == 'kilo': - branch = 'kilo-eol' - # neutron-* and nova-* charms require some additional repos if service in ['neutron-api', 'neutron-gateway', 'neutron-openvswitch']: @@ -778,7 +789,7 @@ def git_default_repos(projects_yaml): } repos.append(repo) - return yaml.dump(dict(repositories=repos)) + return yaml.dump(dict(repositories=repos, release=default)) return projects_yaml @@ -793,6 +804,9 @@ def _git_yaml_load(projects_yaml): return yaml.load(projects_yaml) +requirements_dir = None + + def git_clone_and_install(projects_yaml, core_project): """ Clone/install all specified OpenStack repositories. @@ -856,6 +870,10 @@ def git_clone_and_install(projects_yaml, core_project): # upper-constraints didn't exist until after icehouse if not os.path.isfile(constraints): constraints = None + # use constraints unless project yaml sets use_constraints to false + if 'use_constraints' in projects.keys(): + if not projects['use_constraints']: + constraints = None else: repo_dir = _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, @@ -882,6 +900,8 @@ def _git_validate_projects_yaml(projects, core_project): if projects['repositories'][-1]['name'] != core_project: error_out('{} git repo must be specified last'.format(core_project)) + _git_ensure_key_exists('release', projects) + def _git_ensure_key_exists(key, keys): """ diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 8a9b9486..beff2703 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -21,9 +21,10 @@ # James Page # Adam Gandelman # -import bisect + import errno import hashlib +import math import six import os @@ -76,8 +77,16 @@ err to syslog = {use_syslog} clog to syslog = {use_syslog} """ -# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs) -powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608] + +# The number of placement groups per OSD to target for placement group +# calculations. This number is chosen as 100 due to the ceph PG Calc +# documentation recommending to choose 100 for clusters which are not +# expected to increase in the foreseeable future. Since the majority of the +# calculations are done on deployment, target the case of non-expanding +# clusters as the default. +DEFAULT_PGS_PER_OSD_TARGET = 100 +DEFAULT_POOL_WEIGHT = 10.0 +LEGACY_PG_COUNT = 200 def validator(value, valid_type, valid_range=None): @@ -184,42 +193,106 @@ def remove_cache_tier(self, cache_pool): check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) - def get_pgs(self, pool_size): - """ - :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for - erasure coded pools + def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): + """Return the number of placement groups to use when creating the pool. + + Returns the number of placement groups which should be specified when + creating the pool. This is based upon the calculation guidelines + provided by the Ceph Placement Group Calculator (located online at + http://ceph.com/pgcalc/). + + The number of placement groups are calculated using the following: + + (Target PGs per OSD) * (OSD #) * (%Data) + ---------------------------------------- + (Pool size) + + Per the upstream guidelines, the OSD # should really be considered + based on the number of OSDs which are eligible to be selected by the + pool. Since the pool creation doesn't specify any of CRUSH set rules, + the default rule will be dependent upon the type of pool being + created (replicated or erasure). + + This code makes no attempt to determine the number of OSDs which can be + selected for the specific rule, rather it is left to the user to tune + in the form of 'expected-osd-count' config option. + + :param pool_size: int. pool_size is either the number of replicas for + replicated pools or the K+M sum for erasure coded pools + :param percent_data: float. the percentage of data that is expected to + be contained in the pool for the specific OSD set. Default value + is to assume 10% of the data is for this pool, which is a + relatively low % of the data but allows for the pg_num to be + increased. NOTE: the default is primarily to handle the scenario + where related charms requiring pools has not been upgraded to + include an update to indicate their relative usage of the pools. :return: int. The number of pgs to use. """ + + # Note: This calculation follows the approach that is provided + # by the Ceph PG Calculator located at http://ceph.com/pgcalc/. validator(value=pool_size, valid_type=int) + + # Ensure that percent data is set to something - even with a default + # it can be set to None, which would wreak havoc below. + if percent_data is None: + percent_data = DEFAULT_POOL_WEIGHT + + # If the expected-osd-count is specified, then use the max between + # the expected-osd-count and the actual osd_count osd_list = get_osds(self.service) - if not osd_list: + expected = config('expected-osd-count') or 0 + + if osd_list: + osd_count = max(expected, len(osd_list)) + + # Log a message to provide some insight if the calculations claim + # to be off because someone is setting the expected count and + # there are more OSDs in reality. Try to make a proper guess + # based upon the cluster itself. + if expected and osd_count != expected: + log("Found more OSDs than provided expected count. " + "Using the actual count instead", INFO) + elif expected: + # Use the expected-osd-count in older ceph versions to allow for + # a more accurate pg calculations + osd_count = expected + else: # NOTE(james-page): Default to 200 for older ceph versions # which don't support OSD query from cli - return 200 - - osd_list_length = len(osd_list) - # Calculate based on Ceph best practices - if osd_list_length < 5: - return 128 - elif 5 < osd_list_length < 10: - return 512 - elif 10 < osd_list_length < 50: - return 4096 + return LEGACY_PG_COUNT + + percent_data /= 100.0 + target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET + num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size + + # The CRUSH algorithm has a slight optimization for placement groups + # with powers of 2 so find the nearest power of 2. If the nearest + # power of 2 is more than 25% below the original value, the next + # highest value is used. To do this, find the nearest power of 2 such + # that 2^n <= num_pg, check to see if its within the 25% tolerance. + exponent = math.floor(math.log(num_pg, 2)) + nearest = 2 ** exponent + if (num_pg - nearest) > (num_pg * 0.25): + # Choose the next highest power of 2 since the nearest is more + # than 25% below the original value. + return int(nearest * 2) else: - estimate = (osd_list_length * 100) / pool_size - # Return the next nearest power of 2 - index = bisect.bisect_right(powers_of_two, estimate) - return powers_of_two[index] + return int(nearest) class ReplicatedPool(Pool): - def __init__(self, service, name, pg_num=None, replicas=2): + def __init__(self, service, name, pg_num=None, replicas=2, + percent_data=10.0): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas - if pg_num is None: - self.pg_num = self.get_pgs(self.replicas) + if pg_num: + # Since the number of placement groups were specified, ensure + # that there aren't too many created. + max_pgs = self.get_pgs(self.replicas, 100.0) + self.pg_num = min(pg_num, max_pgs) else: - self.pg_num = pg_num + self.pg_num = self.get_pgs(self.replicas, percent_data) def create(self): if not pool_exists(self.service, self.name): @@ -238,30 +311,39 @@ def create(self): # Default jerasure erasure coded pool class ErasurePool(Pool): - def __init__(self, service, name, erasure_code_profile="default"): + def __init__(self, service, name, erasure_code_profile="default", + percent_data=10.0): super(ErasurePool, self).__init__(service=service, name=name) self.erasure_code_profile = erasure_code_profile + self.percent_data = percent_data def create(self): if not pool_exists(self.service, self.name): - # Try to find the erasure profile information so we can properly size the pgs - erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile) + # Try to find the erasure profile information in order to properly + # size the number of placement groups. The size of an erasure + # coded placement group is calculated as k+m. + erasure_profile = get_erasure_profile(self.service, + self.erasure_code_profile) # Check for errors if erasure_profile is None: - log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile), - level=ERROR) - raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile)) + msg = ("Failed to discover erasure profile named " + "{}".format(self.erasure_code_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) if 'k' not in erasure_profile or 'm' not in erasure_profile: # Error - log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile), - level=ERROR) - raise PoolCreationError( - message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile)) - - pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) + msg = ("Unable to find k (data chunks) or m (coding chunks) " + "in erasure profile {}".format(erasure_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + + k = int(erasure_profile['k']) + m = int(erasure_profile['m']) + pgs = self.get_pgs(k + m, self.percent_data) # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs), + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), 'erasure', self.erasure_code_profile] try: check_call(cmd) @@ -955,16 +1037,22 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] - def add_op_create_pool(self, name, replica_count=3, pg_num=None): + def add_op_create_pool(self, name, replica_count=3, pg_num=None, + weight=None): """Adds an operation to create a pool. @param pg_num setting: optional setting. If not provided, this value will be calculated by the broker based on how many OSDs are in the cluster at the time of creation. Note that, if provided, this value will be capped at the current available maximum. + @param weight: the percentage of data the pool makes up """ + if pg_num and weight: + raise ValueError('pg_num and weight are mutually exclusive') + self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num}) + 'replicas': replica_count, 'pg_num': pg_num, + 'weight': weight}) def set_ops(self, ops): """Set request ops to provided value. @@ -982,7 +1070,7 @@ def request(self): def _ops_equal(self, other): if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in ['replicas', 'name', 'op', 'pg_num']: + for key in ['replicas', 'name', 'op', 'pg_num', 'weight']: if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 35817b06..53068599 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -174,7 +174,7 @@ def init_is_systemd(): def adduser(username, password=None, shell='/bin/bash', system_user=False, - primary_group=None, secondary_groups=None, uid=None): + primary_group=None, secondary_groups=None, uid=None, home_dir=None): """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -186,6 +186,7 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False, :param str primary_group: Primary group for user; defaults to username :param list secondary_groups: Optional list of additional groups :param int uid: UID for user being created + :param str home_dir: Home directory for user :returns: The password database entry struct, as returned by `pwd.getpwnam` """ @@ -200,6 +201,8 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False, cmd = ['useradd'] if uid: cmd.extend(['--uid', str(uid)]) + if home_dir: + cmd.extend(['--home', str(home_dir)]) if system_user or password is None: cmd.append('--system') else: diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index f7220f35..6ce91dbe 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -71,7 +71,7 @@ def _determine_branch_locations(self, other_services): base_charms = { 'mysql': ['precise', 'trusty'], 'mongodb': ['precise', 'trusty'], - 'nrpe': ['precise', 'trusty'], + 'nrpe': ['precise', 'trusty', 'wily', 'xenial'], } for svc in other_services: @@ -112,7 +112,7 @@ def _add_services(self, this_service, other_services): # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy'] # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', diff --git a/ceph-mon/unit_tests/test_ceph_broker.py b/ceph-mon/unit_tests/test_ceph_broker.py index 45bdd3a9..81ce8f79 100644 --- a/ceph-mon/unit_tests/test_ceph_broker.py +++ b/ceph-mon/unit_tests/test_ceph_broker.py @@ -54,15 +54,12 @@ def test_process_requests_invalid(self, mock_log): {'exit-code': 1, 'stderr': "Unknown operation 'invalid_op'"}) - @mock.patch('ceph_broker.get_osds') @mock.patch('ceph_broker.ReplicatedPool') @mock.patch('ceph_broker.pool_exists') @mock.patch('ceph_broker.log') def test_process_requests_create_pool_w_pg_num(self, mock_log, mock_pool_exists, - mock_replicated_pool, - mock_get_osds): - mock_get_osds.return_value = [0, 1, 2] + mock_replicated_pool): mock_pool_exists.return_value = False reqs = json.dumps({'api-version': 1, 'ops': [{ @@ -76,30 +73,6 @@ def test_process_requests_create_pool_w_pg_num(self, mock_log, replicas=3, pg_num=100) self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.get_osds') - @mock.patch('ceph_broker.ReplicatedPool') - @mock.patch('ceph_broker.pool_exists') - @mock.patch('ceph_broker.log') - def test_process_requests_create_pool_w_pg_num_capped(self, mock_log, - mock_pool_exists, - mock_replicated_pool, - mock_get_osds): - mock_get_osds.return_value = [0, 1, 2] - mock_pool_exists.return_value = False - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'create-pool', - 'name': 'foo', - 'replicas': 3, - 'pg_num': 300}]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', - name='foo') - mock_replicated_pool.assert_called_with(service='admin', name='foo', - replicas=3, pg_num=100) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - @mock.patch('ceph_broker.ReplicatedPool') @mock.patch('ceph_broker.pool_exists') @mock.patch('ceph_broker.log') @@ -134,7 +107,6 @@ def test_process_requests_create_pool_rid(self, mock_log, mock_pool_exists.assert_called_with(service='admin', name='foo') mock_replicated_pool.assert_called_with(service='admin', name='foo', - pg_num=None, replicas=3) self.assertEqual(json.loads(rc)['exit-code'], 0) self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') diff --git a/ceph-mon/unit_tests/test_ceph_ops.py b/ceph-mon/unit_tests/test_ceph_ops.py index 062a5926..cf62d5ec 100644 --- a/ceph-mon/unit_tests/test_ceph_ops.py +++ b/ceph-mon/unit_tests/test_ceph_ops.py @@ -47,15 +47,12 @@ def test_create_erasure_profile(self, mock_create_erasure): erasure_plugin_name='jerasure') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @patch.object(ceph_broker, 'get_osds') @patch.object(ceph_broker, 'pool_exists') @patch.object(ceph_broker, 'ReplicatedPool') @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) def test_process_requests_create_replicated_pool(self, mock_replicated_pool, - mock_pool_exists, - mock_get_osds): - mock_get_osds.return_value = 0 + mock_pool_exists): mock_pool_exists.return_value = False reqs = json.dumps({'api-version': 1, 'ops': [{ @@ -66,7 +63,29 @@ def test_process_requests_create_replicated_pool(self, }]}) rc = ceph_broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') - calls = [call(pg_num=None, name=u'foo', service='admin', replicas=3)] + calls = [call(name=u'foo', service='admin', replicas=3)] + mock_replicated_pool.assert_has_calls(calls) + self.assertEqual(json.loads(rc), {'exit-code': 0}) + + @patch.object(ceph_broker, 'pool_exists') + @patch.object(ceph_broker, 'ReplicatedPool') + @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + def test_process_requests_replicated_pool_weight(self, + mock_replicated_pool, + mock_pool_exists): + mock_pool_exists.return_value = False + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'create-pool', + 'pool-type': 'replicated', + 'name': 'foo', + 'weight': 40.0, + 'replicas': 3 + }]}) + rc = ceph_broker.process_requests(reqs) + mock_pool_exists.assert_called_with(service='admin', name='foo') + calls = [call(name=u'foo', service='admin', replicas=3, + percent_data=40.0)] mock_replicated_pool.assert_has_calls(calls) self.assertEqual(json.loads(rc), {'exit-code': 0}) From a176b8ef7f67568382c5e6f08edcb2708919adff Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Tue, 28 Jun 2016 13:43:42 -0700 Subject: [PATCH 1158/2699] Add rgw-buckets-pool-weight option for calculating pgs Provide the weight option to the Ceph broker request API for requesting the creation of a new Ceph storage pool. The weight is used to indicate the percentage of the data that the pool is expected to consume. Each environment may have slightly different needs based on the type of workload so a config option labelled rgw-buckets-pool-weight is provided to allow the operator to tune this value. Closes-Bug: #1492742 Change-Id: I15ae3b853fa3379a9de2ddde3e55dc242a4d4ab2 --- ceph-radosgw/config.yaml | 16 +- ceph-radosgw/hooks/ceph.py | 18 +- .../contrib/openstack/amulet/deployment.py | 4 +- .../charmhelpers/contrib/openstack/context.py | 9 +- .../charmhelpers/contrib/openstack/utils.py | 40 +++- .../contrib/storage/linux/ceph.py | 172 ++++++++++++---- ceph-radosgw/hooks/charmhelpers/core/host.py | 5 +- .../contrib/openstack/amulet/deployment.py | 4 +- ceph-radosgw/unit_tests/test_ceph.py | 184 +++++++----------- 9 files changed, 271 insertions(+), 181 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index ad17b9ae..9abae24b 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -191,9 +191,20 @@ options: it stores within RGW pools. Note that once the RGW pools have been created, changing this value will not have any effect (although it can be changed in ceph by manually configuring your ceph cluster). + rgw-buckets-pool-weight: + type: int + default: 20 + description: | + Defines a relative weighting of the pool as a percentage of the total + amount of data in the Ceph cluster. This effectively weights the number + of placement groups for the pool created to be appropriately portioned + to the amount of data expected. For example, if the amount of data loaded + into the RADOS Gateway/S3 interface is expected to be reserved for or + consume 20% of the data in the Ceph cluster, then this value would be + specified as 20. rgw-lightweight-pool-pg-num: type: int - default: 64 + default: -1 description: | When the Rados Gatway is installed it, by default, creates pools with pg_num 8 which, in the majority of cases is suboptimal. A few rgw pools @@ -203,7 +214,8 @@ options: up+in the cluster at the time the pool is created. For others it will use this value which can be altered depending on how big you cluster is. Note that once a pool has been created, changes to this setting will be - ignored. + ignored. Setting this value to -1, enables the number of placement + groups to be calculated based on the Ceph placement group calculator. haproxy-server-timeout: type: int default: diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index 2f095a3d..b55dd2c8 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -268,9 +268,11 @@ def get_create_rgw_pools_rq(prefix=None): # Buckets likely to contain the most data and therefore requiring the most # PGs heavy = ['.rgw.buckets'] + bucket_weight = config('rgw-buckets-pool-weight') for pool in heavy: pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) - rq.add_op_create_pool(name=pool, replica_count=replicas) + rq.add_op_create_pool(name=pool, replica_count=replicas, + weight=bucket_weight) # NOTE: we want these pools to have a smaller pg_num/pgp_num than the # others since they are not expected to contain as much data @@ -278,7 +280,6 @@ def get_create_rgw_pools_rq(prefix=None): '.rgw.root', '.rgw.control', '.rgw.gc', - '.rgw.buckets', '.rgw.buckets.index', '.rgw.buckets.extra', '.log', @@ -288,9 +289,20 @@ def get_create_rgw_pools_rq(prefix=None): '.users.email', '.users.swift', '.users.uid'] + weights = { + '.rgw.buckets.index': 1.00, + '.rgw.buckets.extra': 1.00 + } pg_num = config('rgw-lightweight-pool-pg-num') for pool in light: + # Per the Ceph PG Calculator, all of the lightweight pools get 0.10% + # of the data by default and only the .rgw.buckets.* get higher values + w = weights.get(pool, 0.10) pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) - rq.add_op_create_pool(name=pool, replica_count=replicas, pg_num=pg_num) + if pg_num > 0: + rq.add_op_create_pool(name=pool, replica_count=replicas, + pg_num=pg_num) + else: + rq.add_op_create_pool(name=pool, replica_count=replicas, weight=w) return rq diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index f7220f35..6ce91dbe 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -71,7 +71,7 @@ def _determine_branch_locations(self, other_services): base_charms = { 'mysql': ['precise', 'trusty'], 'mongodb': ['precise', 'trusty'], - 'nrpe': ['precise', 'trusty'], + 'nrpe': ['precise', 'trusty', 'wily', 'xenial'], } for svc in other_services: @@ -112,7 +112,7 @@ def _add_services(self, this_service, other_services): # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy'] # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 7cbdc03d..76737f22 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -57,6 +57,7 @@ mkdir, write_file, pwgen, + lsb_release, ) from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, @@ -1195,7 +1196,10 @@ def num_cpus(self): def __call__(self): multiplier = config('worker-multiplier') or 0 - ctxt = {"workers": self.num_cpus * multiplier} + count = int(self.num_cpus * multiplier) + if multiplier > 0 and count == 0: + count = 1 + ctxt = {"workers": count} return ctxt @@ -1436,7 +1440,8 @@ def _determine_ctxt(self): :return ctxt: Dictionary of the apparmor profile or None """ if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: - ctxt = {'aa_profile_mode': config('aa-profile-mode')} + ctxt = {'aa_profile_mode': config('aa-profile-mode'), + 'ubuntu_release': lsb_release()['DISTRIB_RELEASE']} else: ctxt = None return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index f4401913..519eae95 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -220,7 +220,6 @@ } GIT_DEFAULT_BRANCHES = { - 'kilo': 'stable/kilo', 'liberty': 'stable/liberty', 'mitaka': 'stable/mitaka', 'master': 'master', @@ -413,7 +412,8 @@ def os_release(package, base='essex'): global os_rel if os_rel: return os_rel - os_rel = (get_os_codename_package(package, fatal=False) or + os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or + get_os_codename_package(package, fatal=False) or get_os_codename_install_source(config('openstack-origin')) or base) return os_rel @@ -719,7 +719,24 @@ def git_install_requested(): return config('openstack-origin-git') is not None -requirements_dir = None +def git_os_codename_install_source(projects_yaml): + """ + Returns OpenStack codename of release being installed from source. + """ + if git_install_requested(): + projects = _git_yaml_load(projects_yaml) + + if projects in GIT_DEFAULT_BRANCHES.keys(): + if projects == 'master': + return 'newton' + return projects + + if 'release' in projects: + if projects['release'] == 'master': + return 'newton' + return projects['release'] + + return None def git_default_repos(projects_yaml): @@ -740,12 +757,6 @@ def git_default_repos(projects_yaml): } repos = [repo] - # NOTE(coreycb): This is a temp work-around until the requirements - # repo moves from stable/kilo branch to kilo-eol tag. The core - # repos have already done this. - if default == 'kilo': - branch = 'kilo-eol' - # neutron-* and nova-* charms require some additional repos if service in ['neutron-api', 'neutron-gateway', 'neutron-openvswitch']: @@ -778,7 +789,7 @@ def git_default_repos(projects_yaml): } repos.append(repo) - return yaml.dump(dict(repositories=repos)) + return yaml.dump(dict(repositories=repos, release=default)) return projects_yaml @@ -793,6 +804,9 @@ def _git_yaml_load(projects_yaml): return yaml.load(projects_yaml) +requirements_dir = None + + def git_clone_and_install(projects_yaml, core_project): """ Clone/install all specified OpenStack repositories. @@ -856,6 +870,10 @@ def git_clone_and_install(projects_yaml, core_project): # upper-constraints didn't exist until after icehouse if not os.path.isfile(constraints): constraints = None + # use constraints unless project yaml sets use_constraints to false + if 'use_constraints' in projects.keys(): + if not projects['use_constraints']: + constraints = None else: repo_dir = _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, @@ -882,6 +900,8 @@ def _git_validate_projects_yaml(projects, core_project): if projects['repositories'][-1]['name'] != core_project: error_out('{} git repo must be specified last'.format(core_project)) + _git_ensure_key_exists('release', projects) + def _git_ensure_key_exists(key, keys): """ diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 8a9b9486..beff2703 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -21,9 +21,10 @@ # James Page # Adam Gandelman # -import bisect + import errno import hashlib +import math import six import os @@ -76,8 +77,16 @@ err to syslog = {use_syslog} clog to syslog = {use_syslog} """ -# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs) -powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608] + +# The number of placement groups per OSD to target for placement group +# calculations. This number is chosen as 100 due to the ceph PG Calc +# documentation recommending to choose 100 for clusters which are not +# expected to increase in the foreseeable future. Since the majority of the +# calculations are done on deployment, target the case of non-expanding +# clusters as the default. +DEFAULT_PGS_PER_OSD_TARGET = 100 +DEFAULT_POOL_WEIGHT = 10.0 +LEGACY_PG_COUNT = 200 def validator(value, valid_type, valid_range=None): @@ -184,42 +193,106 @@ def remove_cache_tier(self, cache_pool): check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) - def get_pgs(self, pool_size): - """ - :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for - erasure coded pools + def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): + """Return the number of placement groups to use when creating the pool. + + Returns the number of placement groups which should be specified when + creating the pool. This is based upon the calculation guidelines + provided by the Ceph Placement Group Calculator (located online at + http://ceph.com/pgcalc/). + + The number of placement groups are calculated using the following: + + (Target PGs per OSD) * (OSD #) * (%Data) + ---------------------------------------- + (Pool size) + + Per the upstream guidelines, the OSD # should really be considered + based on the number of OSDs which are eligible to be selected by the + pool. Since the pool creation doesn't specify any of CRUSH set rules, + the default rule will be dependent upon the type of pool being + created (replicated or erasure). + + This code makes no attempt to determine the number of OSDs which can be + selected for the specific rule, rather it is left to the user to tune + in the form of 'expected-osd-count' config option. + + :param pool_size: int. pool_size is either the number of replicas for + replicated pools or the K+M sum for erasure coded pools + :param percent_data: float. the percentage of data that is expected to + be contained in the pool for the specific OSD set. Default value + is to assume 10% of the data is for this pool, which is a + relatively low % of the data but allows for the pg_num to be + increased. NOTE: the default is primarily to handle the scenario + where related charms requiring pools has not been upgraded to + include an update to indicate their relative usage of the pools. :return: int. The number of pgs to use. """ + + # Note: This calculation follows the approach that is provided + # by the Ceph PG Calculator located at http://ceph.com/pgcalc/. validator(value=pool_size, valid_type=int) + + # Ensure that percent data is set to something - even with a default + # it can be set to None, which would wreak havoc below. + if percent_data is None: + percent_data = DEFAULT_POOL_WEIGHT + + # If the expected-osd-count is specified, then use the max between + # the expected-osd-count and the actual osd_count osd_list = get_osds(self.service) - if not osd_list: + expected = config('expected-osd-count') or 0 + + if osd_list: + osd_count = max(expected, len(osd_list)) + + # Log a message to provide some insight if the calculations claim + # to be off because someone is setting the expected count and + # there are more OSDs in reality. Try to make a proper guess + # based upon the cluster itself. + if expected and osd_count != expected: + log("Found more OSDs than provided expected count. " + "Using the actual count instead", INFO) + elif expected: + # Use the expected-osd-count in older ceph versions to allow for + # a more accurate pg calculations + osd_count = expected + else: # NOTE(james-page): Default to 200 for older ceph versions # which don't support OSD query from cli - return 200 - - osd_list_length = len(osd_list) - # Calculate based on Ceph best practices - if osd_list_length < 5: - return 128 - elif 5 < osd_list_length < 10: - return 512 - elif 10 < osd_list_length < 50: - return 4096 + return LEGACY_PG_COUNT + + percent_data /= 100.0 + target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET + num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size + + # The CRUSH algorithm has a slight optimization for placement groups + # with powers of 2 so find the nearest power of 2. If the nearest + # power of 2 is more than 25% below the original value, the next + # highest value is used. To do this, find the nearest power of 2 such + # that 2^n <= num_pg, check to see if its within the 25% tolerance. + exponent = math.floor(math.log(num_pg, 2)) + nearest = 2 ** exponent + if (num_pg - nearest) > (num_pg * 0.25): + # Choose the next highest power of 2 since the nearest is more + # than 25% below the original value. + return int(nearest * 2) else: - estimate = (osd_list_length * 100) / pool_size - # Return the next nearest power of 2 - index = bisect.bisect_right(powers_of_two, estimate) - return powers_of_two[index] + return int(nearest) class ReplicatedPool(Pool): - def __init__(self, service, name, pg_num=None, replicas=2): + def __init__(self, service, name, pg_num=None, replicas=2, + percent_data=10.0): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas - if pg_num is None: - self.pg_num = self.get_pgs(self.replicas) + if pg_num: + # Since the number of placement groups were specified, ensure + # that there aren't too many created. + max_pgs = self.get_pgs(self.replicas, 100.0) + self.pg_num = min(pg_num, max_pgs) else: - self.pg_num = pg_num + self.pg_num = self.get_pgs(self.replicas, percent_data) def create(self): if not pool_exists(self.service, self.name): @@ -238,30 +311,39 @@ def create(self): # Default jerasure erasure coded pool class ErasurePool(Pool): - def __init__(self, service, name, erasure_code_profile="default"): + def __init__(self, service, name, erasure_code_profile="default", + percent_data=10.0): super(ErasurePool, self).__init__(service=service, name=name) self.erasure_code_profile = erasure_code_profile + self.percent_data = percent_data def create(self): if not pool_exists(self.service, self.name): - # Try to find the erasure profile information so we can properly size the pgs - erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile) + # Try to find the erasure profile information in order to properly + # size the number of placement groups. The size of an erasure + # coded placement group is calculated as k+m. + erasure_profile = get_erasure_profile(self.service, + self.erasure_code_profile) # Check for errors if erasure_profile is None: - log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile), - level=ERROR) - raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile)) + msg = ("Failed to discover erasure profile named " + "{}".format(self.erasure_code_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) if 'k' not in erasure_profile or 'm' not in erasure_profile: # Error - log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile), - level=ERROR) - raise PoolCreationError( - message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile)) - - pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) + msg = ("Unable to find k (data chunks) or m (coding chunks) " + "in erasure profile {}".format(erasure_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + + k = int(erasure_profile['k']) + m = int(erasure_profile['m']) + pgs = self.get_pgs(k + m, self.percent_data) # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs), + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), 'erasure', self.erasure_code_profile] try: check_call(cmd) @@ -955,16 +1037,22 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] - def add_op_create_pool(self, name, replica_count=3, pg_num=None): + def add_op_create_pool(self, name, replica_count=3, pg_num=None, + weight=None): """Adds an operation to create a pool. @param pg_num setting: optional setting. If not provided, this value will be calculated by the broker based on how many OSDs are in the cluster at the time of creation. Note that, if provided, this value will be capped at the current available maximum. + @param weight: the percentage of data the pool makes up """ + if pg_num and weight: + raise ValueError('pg_num and weight are mutually exclusive') + self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num}) + 'replicas': replica_count, 'pg_num': pg_num, + 'weight': weight}) def set_ops(self, ops): """Set request ops to provided value. @@ -982,7 +1070,7 @@ def request(self): def _ops_equal(self, other): if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in ['replicas', 'name', 'op', 'pg_num']: + for key in ['replicas', 'name', 'op', 'pg_num', 'weight']: if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 35817b06..53068599 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -174,7 +174,7 @@ def init_is_systemd(): def adduser(username, password=None, shell='/bin/bash', system_user=False, - primary_group=None, secondary_groups=None, uid=None): + primary_group=None, secondary_groups=None, uid=None, home_dir=None): """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -186,6 +186,7 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False, :param str primary_group: Primary group for user; defaults to username :param list secondary_groups: Optional list of additional groups :param int uid: UID for user being created + :param str home_dir: Home directory for user :returns: The password database entry struct, as returned by `pwd.getpwnam` """ @@ -200,6 +201,8 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False, cmd = ['useradd'] if uid: cmd.extend(['--uid', str(uid)]) + if home_dir: + cmd.extend(['--home', str(home_dir)]) if system_user or password is None: cmd.append('--system') else: diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index f7220f35..6ce91dbe 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -71,7 +71,7 @@ def _determine_branch_locations(self, other_services): base_charms = { 'mysql': ['precise', 'trusty'], 'mongodb': ['precise', 'trusty'], - 'nrpe': ['precise', 'trusty'], + 'nrpe': ['precise', 'trusty', 'wily', 'xenial'], } for svc in other_services: @@ -112,7 +112,7 @@ def _add_services(self, this_service, other_services): # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy'] # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index e8e608bf..7f8df64d 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -23,12 +23,13 @@ sys.modules['apt'] = mock_apt sys.modules['apt_pkg'] = mock_apt.apt_pkg -import ceph -import utils +import ceph # noqa +import utils # noqa -from test_utils import CharmTestCase +from test_utils import CharmTestCase # noqa TO_PATCH = [ + 'config', 'get_unit_hostname', 'os', 'subprocess', @@ -36,16 +37,10 @@ ] -def config_side_effect(*args): - if args[0] == 'ceph-osd-replication-count': - return 3 - elif args[0] == 'rgw-lightweight-pool-pg-num': - return 10 - - class CephRadosGWCephTests(CharmTestCase): def setUp(self): super(CephRadosGWCephTests, self).setUp(ceph, TO_PATCH) + self.config.side_effect = self.test_config.get def test_is_quorum_leader(self): self.os.path.exists.return_value = True @@ -230,125 +225,80 @@ def test_get_named_key_get(self): ] self.subprocess.check_output.assert_called_with(cmd) - @patch.object(ceph, 'CephBrokerRq') - @patch.object(ceph, 'config') - def test_create_rgw_pools_rq_with_prefix(self, mock_config, mock_broker): - mock_config.side_effect = config_side_effect + @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' + '.add_op_create_pool') + def test_create_rgw_pools_rq_with_prefix(self, mock_broker): + self.test_config.set('rgw-lightweight-pool-pg-num', 10) + self.test_config.set('ceph-osd-replication-count', 3) + self.test_config.set('rgw-buckets-pool-weight', 19) ceph.get_create_rgw_pools_rq(prefix='us-east') mock_broker.assert_has_calls([ - call().add_op_create_pool( - pg_num=10, replica_count=3, name='us-east.rgw'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='us-east.rgw.root'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='us-east.rgw.control'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='us-east.rgw.gc'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='us-east.rgw.buckets'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='us-east.rgw.buckets.index'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='us-east.rgw.buckets.extra'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='us-east.log'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='us-east.intent-log'), - call().add_op_create_pool( - pg_num=10, replica_count=3, - name='us-east.usage'), - call().add_op_create_pool( - pg_num=10, replica_count=3, - name='us-east.users'), - call().add_op_create_pool( - pg_num=10, replica_count=3, - name='us-east.users.email'), - call().add_op_create_pool( - pg_num=10, replica_count=3, - name='us-east.users.swift'), - call().add_op_create_pool( - pg_num=10, replica_count=3, - name='us-east.users.uid')] + call(replica_count=3, weight=19, name='us-east.rgw.buckets'), + call(pg_num=10, replica_count=3, name='us-east.rgw'), + call(pg_num=10, replica_count=3, name='us-east.rgw.root'), + call(pg_num=10, replica_count=3, name='us-east.rgw.control'), + call(pg_num=10, replica_count=3, name='us-east.rgw.gc'), + call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.index'), + call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.extra'), + call(pg_num=10, replica_count=3, name='us-east.log'), + call(pg_num=10, replica_count=3, name='us-east.intent-log'), + call(pg_num=10, replica_count=3, name='us-east.usage'), + call(pg_num=10, replica_count=3, name='us-east.users'), + call(pg_num=10, replica_count=3, name='us-east.users.email'), + call(pg_num=10, replica_count=3, name='us-east.users.swift'), + call(pg_num=10, replica_count=3, name='us-east.users.uid')] ) @patch.object(mock_apt.apt_pkg, 'version_compare', lambda *args: -1) - @patch.object(ceph, 'CephBrokerRq') - @patch.object(ceph, 'config') - def test_create_rgw_pools_rq_no_prefix_pre_jewel(self, mock_config, - mock_broker): - mock_config.side_effect = config_side_effect + @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' + '.add_op_create_pool') + def test_create_rgw_pools_rq_no_prefix_pre_jewel(self, mock_broker): + self.test_config.set('rgw-lightweight-pool-pg-num', -1) + self.test_config.set('ceph-osd-replication-count', 3) + self.test_config.set('rgw-buckets-pool-weight', 19) ceph.get_create_rgw_pools_rq(prefix=None) mock_broker.assert_has_calls([ - call().add_op_create_pool( - replica_count=3, name='.rgw.buckets'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='.rgw'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='.rgw.root'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='.rgw.control'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='.rgw.gc'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='.rgw.buckets'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='.rgw.buckets.index'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='.rgw.buckets.extra'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='.log'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='.intent-log'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='.usage'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='.users'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='.users.email'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='.users.swift'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='.users.uid')] + call(weight=19, replica_count=3, name='.rgw.buckets'), + call(weight=0.10, replica_count=3, name='.rgw'), + call(weight=0.10, replica_count=3, name='.rgw.root'), + call(weight=0.10, replica_count=3, name='.rgw.control'), + call(weight=0.10, replica_count=3, name='.rgw.gc'), + call(weight=1.00, replica_count=3, name='.rgw.buckets.index'), + call(weight=1.00, replica_count=3, name='.rgw.buckets.extra'), + call(weight=0.10, replica_count=3, name='.log'), + call(weight=0.10, replica_count=3, name='.intent-log'), + call(weight=0.10, replica_count=3, name='.usage'), + call(weight=0.10, replica_count=3, name='.users'), + call(weight=0.10, replica_count=3, name='.users.email'), + call(weight=0.10, replica_count=3, name='.users.swift'), + call(weight=0.10, replica_count=3, name='.users.uid')] ) @patch.object(mock_apt.apt_pkg, 'version_compare', lambda *args: 0) - @patch.object(ceph, 'CephBrokerRq') - @patch.object(ceph, 'config') - def test_create_rgw_pools_rq_no_prefix_post_jewel(self, mock_config, - mock_broker): - mock_config.side_effect = config_side_effect + @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' + '.add_op_create_pool') + def test_create_rgw_pools_rq_no_prefix_post_jewel(self, mock_broker): + self.test_config.set('rgw-lightweight-pool-pg-num', -1) + self.test_config.set('ceph-osd-replication-count', 3) + self.test_config.set('rgw-buckets-pool-weight', 19) ceph.get_create_rgw_pools_rq(prefix=None) mock_broker.assert_has_calls([ - call().add_op_create_pool( - replica_count=3, name='default.rgw.buckets'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='default.rgw'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='default.rgw.root'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='default.rgw.control'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='default.rgw.gc'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='default.rgw.buckets'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='default.rgw.buckets.index'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='default.rgw.buckets.extra'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='default.log'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='default.intent-log'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='default.usage'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='default.users'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='default.users.email'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='default.users.swift'), - call().add_op_create_pool( - pg_num=10, replica_count=3, name='default.users.uid')] + call(weight=19, replica_count=3, name='default.rgw.buckets'), + call(weight=0.10, replica_count=3, name='default.rgw'), + call(weight=0.10, replica_count=3, name='default.rgw.root'), + call(weight=0.10, replica_count=3, name='default.rgw.control'), + call(weight=0.10, replica_count=3, name='default.rgw.gc'), + call(weight=1.00, replica_count=3, + name='default.rgw.buckets.index'), + call(weight=1.00, replica_count=3, + name='default.rgw.buckets.extra'), + call(weight=0.10, replica_count=3, name='default.log'), + call(weight=0.10, replica_count=3, name='default.intent-log'), + call(weight=0.10, replica_count=3, name='default.usage'), + call(weight=0.10, replica_count=3, name='default.users'), + call(weight=0.10, replica_count=3, name='default.users.email'), + call(weight=0.10, replica_count=3, name='default.users.swift'), + call(weight=0.10, replica_count=3, name='default.users.uid')] ) @patch.object(mock_apt.apt_pkg, 'version_compare', lambda *args: -1) From fe52c74c17204aa2fabb8706d88057b865548bb8 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Tue, 8 Mar 2016 08:19:54 -0800 Subject: [PATCH 1159/2699] Perf Optimizations This patch starts down the road to automated performance tuning. It attempts to identify optimal settings for hard drives and network cards and then persist them for reboots. It is conservative but configurable via config.yaml settings. Change-Id: Id4e72ae13ec3cb594e667f57e8cc70b7e18af15b --- ceph-osd/.gitignore | 2 +- ceph-osd/config.yaml | 20 +- ceph-osd/hooks/ceph.py | 330 ++++++++++++++++++++++++++++- ceph-osd/hooks/ceph_hooks.py | 17 +- ceph-osd/templates/hdparm.conf | 7 + ceph-osd/unit_tests/test_tuning.py | 125 +++++++++++ 6 files changed, 497 insertions(+), 4 deletions(-) create mode 100644 ceph-osd/templates/hdparm.conf create mode 100644 ceph-osd/unit_tests/test_tuning.py diff --git a/ceph-osd/.gitignore b/ceph-osd/.gitignore index 32a80896..4219e517 100644 --- a/ceph-osd/.gitignore +++ b/ceph-osd/.gitignore @@ -4,6 +4,6 @@ .testrepository bin *.sw[nop] -.idea *.pyc .unit-state.db +.idea diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 6253bcf2..b54e82a0 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -164,7 +164,8 @@ options: sysctl: type: string default: '{ kernel.pid_max : 2097152, vm.max_map_count : 524288, - kernel.threads-max: 2097152 }' + kernel.threads-max: 2097152, vm.vfs_cache_pressure: 1, + vm.swappiness: 1 }' description: | YAML-formatted associative array of sysctl key/value pairs to be set persistently. By default we set pid_max, max_map_count and @@ -177,6 +178,14 @@ options: description: | Setting this to true will tell Ceph to replicate across Juju's Availability Zone instead of specifically by host. + max-sectors-kb: + default: 1048576 + type: int + description: | + This parameter will adjust every block device in your server to allow + greater IO operation sizes. If you have a RAID card with cache on it + consider tuning this much higher than the 1MB default. 1MB is a safe + default for spinning HDDs that don't have much cache. nagios_context: type: string default: "juju" @@ -204,4 +213,13 @@ options: description: | Apply system hardening. Supports a space-delimited list of modules to run. Supported modules currently include os, ssh, apache and mysql. + autotune: + default: False + type: boolean + description: | + Enabling this option will attempt to tune your network card sysctls and + hard drive settings. This changes hard drive read ahead settings and + max_sectors_kb. For the network card this will detect the link speed + and make appropriate sysctl changes. Enabling this option should + generally be safe. diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 22e6c9af..edb4f2e6 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -23,6 +23,7 @@ import sys import shutil from charmhelpers.cli.host import mounts +from charmhelpers.core import hookenv from charmhelpers.core.host import ( mkdir, chownr, @@ -48,7 +49,7 @@ ) from utils import ( get_unit_hostname, -) + render_template) LEADER = 'leader' PEON = 'peon' @@ -56,6 +57,333 @@ PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] +LinkSpeed = { + "BASE_10": 10, + "BASE_100": 100, + "BASE_1000": 1000, + "GBASE_10": 10000, + "GBASE_40": 40000, + "GBASE_100": 100000, + "UNKNOWN": None +} + +# Mapping of adapter speed to sysctl settings +NETWORK_ADAPTER_SYSCTLS = { + # 10Gb + LinkSpeed["GBASE_10"]: { + 'net.core.rmem_default': 524287, + 'net.core.wmem_default': 524287, + 'net.core.rmem_max': 524287, + 'net.core.wmem_max': 524287, + 'net.core.optmem_max': 524287, + 'net.core.netdev_max_backlog': 300000, + 'net.ipv4.tcp_rmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_wmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_mem': '10000000 10000000 10000000' + }, + # Mellanox 10/40Gb + LinkSpeed["GBASE_40"]: { + 'net.ipv4.tcp_timestamps': 0, + 'net.ipv4.tcp_sack': 1, + 'net.core.netdev_max_backlog': 250000, + 'net.core.rmem_max': 4194304, + 'net.core.wmem_max': 4194304, + 'net.core.rmem_default': 4194304, + 'net.core.wmem_default': 4194304, + 'net.core.optmem_max': 4194304, + 'net.ipv4.tcp_rmem': '4096 87380 4194304', + 'net.ipv4.tcp_wmem': '4096 65536 4194304', + 'net.ipv4.tcp_low_latency': 1, + 'net.ipv4.tcp_adv_win_scale': 1 + } +} + + +def save_sysctls(sysctl_dict, save_location): + """ + Persist the sysctls to the hard drive. + :param sysctl_dict: dict + :param save_location: path to save the settings to + :raise: IOError if anything goes wrong with writing. + """ + try: + # Persist the settings for reboots + with open(save_location, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + except IOError as e: + log("Unable to persist sysctl settings to {}. Error {}".format( + save_location, e.message), level=ERROR) + raise + + +def tune_nic(network_interface): + """ + This will set optimal sysctls for the particular network adapter. + :param network_interface: string The network adapter name. + """ + speed = get_link_speed(network_interface) + if speed in NETWORK_ADAPTER_SYSCTLS: + status_set('maintenance', 'Tuning device {}'.format( + network_interface)) + sysctl_file = os.path.join( + os.sep, + 'etc', + 'sysctl.d', + '51-ceph-osd-charm-{}.conf'.format(network_interface)) + try: + log("Saving sysctl_file: {} values: {}".format( + sysctl_file, NETWORK_ADAPTER_SYSCTLS[speed]), + level=DEBUG) + save_sysctls(sysctl_dict=NETWORK_ADAPTER_SYSCTLS[speed], + save_location=sysctl_file) + except IOError as e: + log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " + "failed. {}".format(network_interface, e.message), + level=ERROR) + + try: + # Apply the settings + log("Applying sysctl settings", level=DEBUG) + subprocess.check_output(["sysctl", "-p", sysctl_file]) + except subprocess.CalledProcessError as err: + log('sysctl -p {} failed with error {}'.format(sysctl_file, + err.output), + level=ERROR) + else: + log("No settings found for network adapter: {}".format( + network_interface), level=DEBUG) + + +def get_link_speed(network_interface): + """ + This will find the link speed for a given network device. Returns None + if an error occurs. + :param network_interface: string The network adapter interface. + :return: LinkSpeed + """ + speed_path = os.path.join(os.sep, 'sys', 'class', 'net', + network_interface, 'speed') + # I'm not sure where else we'd check if this doesn't exist + if not os.path.exists(speed_path): + return LinkSpeed["UNKNOWN"] + + try: + with open(speed_path, 'r') as sysfs: + nic_speed = sysfs.readlines() + + # Did we actually read anything? + if not nic_speed: + return LinkSpeed["UNKNOWN"] + + # Try to find a sysctl match for this particular speed + for name, speed in LinkSpeed.items(): + if speed == int(nic_speed[0].strip()): + return speed + # Default to UNKNOWN if we can't find a match + return LinkSpeed["UNKNOWN"] + except IOError as e: + log("Unable to open {path} because of error: {error}".format( + path=speed_path, + error=e.message), level='error') + return LinkSpeed["UNKNOWN"] + + +def persist_settings(settings_dict): + # Write all settings to /etc/hdparm.conf + """ + This will persist the hard drive settings to the /etc/hdparm.conf file + The settings_dict should be in the form of {"uuid": {"key":"value"}} + :param settings_dict: dict of settings to save + """ + hdparm_path = os.path.join(os.sep, 'etc', 'hdparm.conf') + try: + with open(hdparm_path, 'w') as hdparm: + hdparm.write(render_template('hdparm.conf', settings_dict)) + except IOError as err: + log("Unable to open {path} because of error: {error}".format( + path=hdparm_path, + error=err.message), level=ERROR) + + +def set_max_sectors_kb(dev_name, max_sectors_size): + """ + This function sets the max_sectors_kb size of a given block device. + :param dev_name: Name of the block device to query + :param max_sectors_size: int of the max_sectors_size to save + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + try: + with open(max_sectors_kb_path, 'w') as f: + f.write(max_sectors_size) + except IOError as e: + log('Failed to write max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e.message), level=ERROR) + + +def get_max_sectors_kb(dev_name): + """ + This function gets the max_sectors_kb size of a given block device. + :param dev_name: Name of the block device to query + :return: int which is either the max_sectors_kb or 0 on error. + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + + # Read in what Linux has set by default + if os.path.exists(max_sectors_kb_path): + try: + with open(max_sectors_kb_path, 'r') as f: + max_sectors_kb = f.read().strip() + return int(max_sectors_kb) + except IOError as e: + log('Failed to read max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e.message), level=ERROR) + # Bail. + return 0 + return 0 + + +def get_max_hw_sectors_kb(dev_name): + """ + This function gets the max_hw_sectors_kb for a given block device. + :param dev_name: Name of the block device to query + :return: int which is either the max_hw_sectors_kb or 0 on error. + """ + max_hw_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_hw_sectors_kb') + # Read in what the hardware supports + if os.path.exists(max_hw_sectors_kb_path): + try: + with open(max_hw_sectors_kb_path, 'r') as f: + max_hw_sectors_kb = f.read().strip() + return int(max_hw_sectors_kb) + except IOError as e: + log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( + max_hw_sectors_kb_path, e.message), level=ERROR) + return 0 + return 0 + + +def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): + """ + This function sets the hard drive read ahead. + :param dev_name: Name of the block device to set read ahead on. + :param read_ahead_sectors: int How many sectors to read ahead. + """ + try: + # Set the read ahead sectors to 256 + log('Setting read ahead to {} for device {}'.format( + read_ahead_sectors, + dev_name)) + subprocess.check_output(['hdparm', + '-a{}'.format(read_ahead_sectors), + dev_name]) + except subprocess.CalledProcessError as e: + log('hdparm failed with error: {}'.format(e.output), + level=ERROR) + + +def get_block_uuid(block_dev): + """ + This queries blkid to get the uuid for a block device. + :param block_dev: Name of the block device to query. + :return: The UUID of the device or None on Error. + """ + try: + block_info = subprocess.check_output( + ['blkid', '-o', 'export', block_dev]) + for tag in block_info.split('\n'): + parts = tag.split('=') + if parts[0] == 'UUID': + return parts[1] + return None + except subprocess.CalledProcessError as err: + log('get_block_uuid failed with error: {}'.format(err.output), + level=ERROR) + return None + + +def check_max_sectors(save_settings_dict, + block_dev, + uuid): + """ + Tune the max_hw_sectors if needed. + make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb or at + least 1MB for spinning disks + If the box has a RAID card with cache this could go much bigger. + :param save_settings_dict: The dict used to persist settings + :param block_dev: A block device name: Example: /dev/sda + :param uuid: The uuid of the block device + """ + dev_name = None + path_parts = os.path.split(block_dev) + if len(path_parts) == 2: + dev_name = path_parts[1] + else: + log('Unable to determine the block device name from path: {}'.format( + block_dev)) + # Play it safe and bail + return + max_sectors_kb = get_max_sectors_kb(dev_name=dev_name) + max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name) + + if max_sectors_kb < max_hw_sectors_kb: + # OK we have a situation where the hardware supports more than Linux is + # currently requesting + config_max_sectors_kb = hookenv.config('max-sectors-kb') + if config_max_sectors_kb < max_hw_sectors_kb: + # Set the max_sectors_kb to the config.yaml value if it is less + # than the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, config_max_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid][ + "read_ahead_sect"] = config_max_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=config_max_sectors_kb) + else: + # Set to the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, max_hw_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=max_hw_sectors_kb) + else: + log('max_sectors_kb match max_hw_sectors_kb. No change needed for ' + 'device: {}'.format(block_dev)) + + +def tune_dev(block_dev): + """ + Try to make some intelligent decisions with HDD tuning. Future work will + include optimizing SSDs. + This function will change the read ahead sectors and the max write + sectors for each block device. + :param block_dev: A block device name: Example: /dev/sda + """ + uuid = get_block_uuid(block_dev) + if uuid is None: + log('block device {} uuid is None. Unable to save to ' + 'hdparm.conf'.format(block_dev), level=DEBUG) + save_settings_dict = {} + log('Tuning device {}'.format(block_dev)) + status_set('maintenance', 'Tuning device {}'.format(block_dev)) + set_hdd_read_ahead(block_dev) + save_settings_dict["drive_settings"] = {} + save_settings_dict["drive_settings"][uuid] = {} + save_settings_dict["drive_settings"][uuid]['read_ahead_sect'] = 256 + + check_max_sectors(block_dev=block_dev, + save_settings_dict=save_settings_dict, + uuid=uuid) + + persist_settings(settings_dict=save_settings_dict) + status_set('maintenance', 'Finished tuning device {}'.format(block_dev)) + def ceph_user(): if get_version() > 1: diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index e7f1bc50..d6240fbb 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import os import random import subprocess @@ -21,6 +20,7 @@ import tempfile import socket import time +import netifaces import ceph from charmhelpers.core import hookenv @@ -270,12 +270,24 @@ def upgrade_osd(): sys.exit(1) +def tune_network_adapters(): + interfaces = netifaces.interfaces() + for interface in interfaces: + if interface == "lo": + # Skip the loopback + continue + log("Looking up {} for possible sysctl tuning.".format(interface)) + ceph.tune_nic(interface) + + @hooks.hook('install.real') @harden() def install(): add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) + if config('autotune'): + tune_network_adapters() def az_info(): @@ -440,6 +452,9 @@ def prepare_disks_and_activate(): osd_journal, config('osd-reformat'), config('ignore-device-errors'), config('osd-encrypt')) + # Make it fast! + if config('autotune'): + ceph.tune_dev(dev) ceph.start_osds(get_devices()) diff --git a/ceph-osd/templates/hdparm.conf b/ceph-osd/templates/hdparm.conf new file mode 100644 index 00000000..f0a4d59b --- /dev/null +++ b/ceph-osd/templates/hdparm.conf @@ -0,0 +1,7 @@ +{% for uuid,settings in drive_settings.items() %} + /dev/disk/by-uuid/{{ uuid }} { + {% for key, value in settings.items() %} + {{ key }} = {{ value }} + {% endfor %} + } +{% endfor %} \ No newline at end of file diff --git a/ceph-osd/unit_tests/test_tuning.py b/ceph-osd/unit_tests/test_tuning.py new file mode 100644 index 00000000..61a69443 --- /dev/null +++ b/ceph-osd/unit_tests/test_tuning.py @@ -0,0 +1,125 @@ +__author__ = 'Chris Holcombe ' +from mock import patch, call +import test_utils +import ceph + +TO_PATCH = [ + 'hookenv', + 'status_set', + 'subprocess', + 'log', +] + + +class PerformanceTestCase(test_utils.CharmTestCase): + def setUp(self): + super(PerformanceTestCase, self).setUp(ceph, TO_PATCH) + + def test_tune_nic(self): + with patch('ceph.get_link_speed', return_value=10000): + with patch('ceph.save_sysctls') as save_sysctls: + ceph.tune_nic('eth0') + save_sysctls.assert_has_calls( + [ + call( + save_location='/etc/sysctl.d/' + '51-ceph-osd-charm-eth0.conf', + sysctl_dict={ + 'net.core.rmem_max': 524287, + 'net.core.wmem_max': 524287, + 'net.core.rmem_default': 524287, + 'net.ipv4.tcp_wmem': + '10000000 10000000 10000000', + 'net.core.netdev_max_backlog': 300000, + 'net.core.optmem_max': 524287, + 'net.ipv4.tcp_mem': + '10000000 10000000 10000000', + 'net.ipv4.tcp_rmem': + '10000000 10000000 10000000', + 'net.core.wmem_default': 524287}) + ]) + self.status_set.assert_has_calls( + [ + call('maintenance', 'Tuning device eth0'), + ]) + + def test_get_block_uuid(self): + self.subprocess.check_output.return_value = \ + 'UUID=378f3c86-b21a-4172-832d-e2b3d4bc7511\nTYPE=ext2\n' + uuid = ceph.get_block_uuid('/dev/sda1') + self.assertEqual(uuid, '378f3c86-b21a-4172-832d-e2b3d4bc7511') + + @patch('ceph.persist_settings') + @patch('ceph.set_hdd_read_ahead') + @patch('ceph.get_max_sectors_kb') + @patch('ceph.get_max_hw_sectors_kb') + @patch('ceph.set_max_sectors_kb') + @patch('ceph.get_block_uuid') + def test_tune_dev(self, + block_uuid, + set_max_sectors_kb, + get_max_hw_sectors_kb, + get_max_sectors_kb, + set_hdd_read_ahead, + persist_settings): + self.hookenv.config.return_value = 712 + block_uuid.return_value = '378f3c86-b21a-4172-832d-e2b3d4bc7511' + set_hdd_read_ahead.return_value = None + get_max_sectors_kb.return_value = 512 + get_max_hw_sectors_kb.return_value = 1024 + ceph.tune_dev('/dev/sda') + # The config value was lower than the hardware value. + # We use the lower value. The user wants 712 but the hw supports + # 1K + set_max_sectors_kb.assert_called_with( + dev_name='sda', max_sectors_size=712 + ) + persist_settings.assert_called_with( + settings_dict={'drive_settings': { + '378f3c86-b21a-4172-832d-e2b3d4bc7511': { + 'read_ahead_sect': 712}}} + ) + self.status_set.assert_has_calls([ + call('maintenance', 'Tuning device /dev/sda'), + call('maintenance', 'Finished tuning device /dev/sda') + ]) + + @patch('ceph.persist_settings') + @patch('ceph.set_hdd_read_ahead') + @patch('ceph.get_max_sectors_kb') + @patch('ceph.get_max_hw_sectors_kb') + @patch('ceph.set_max_sectors_kb') + @patch('ceph.get_block_uuid') + def test_tune_dev_2(self, + block_uuid, + set_max_sectors_kb, + get_max_hw_sectors_kb, + get_max_sectors_kb, + set_hdd_read_ahead, + persist_settings): + self.hookenv.config.return_value = 2048 + block_uuid.return_value = '378f3c86-b21a-4172-832d-e2b3d4bc7511' + set_hdd_read_ahead.return_value = None + get_max_sectors_kb.return_value = 512 + get_max_hw_sectors_kb.return_value = 1024 + ceph.tune_dev('/dev/sda') + # The config value was higher than the hardware value. + # We use the lower value. The user wants 2K but the hw only support 1K + set_max_sectors_kb.assert_called_with( + dev_name='sda', max_sectors_size=1024 + ) + persist_settings.assert_called_with( + settings_dict={'drive_settings': { + '378f3c86-b21a-4172-832d-e2b3d4bc7511': { + 'read_ahead_sect': 1024}}} + ) + self.status_set.assert_has_calls([ + call('maintenance', 'Tuning device /dev/sda'), + call('maintenance', 'Finished tuning device /dev/sda') + ]) + + def test_set_hdd_read_ahead(self): + ceph.set_hdd_read_ahead(dev_name='/dev/sda') + self.subprocess.check_output.assert_called_with( + ['hdparm', '-a256', '/dev/sda'] + ) From 387694da742962b1056f06b2c5f2c2d6819390ef Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 15 Jul 2016 10:28:32 -0700 Subject: [PATCH 1160/2699] Pre-release charm-helpers sync To begin release testing get each charm up to date with lp:charm-helpers Change-Id: Id67f4e334c11e97cc61e755033653601b047a10b --- ceph-radosgw/hooks/charmhelpers/core/templating.py | 11 ++++++++--- ceph-radosgw/hooks/charmhelpers/fetch/__init__.py | 4 ++-- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/core/templating.py b/ceph-radosgw/hooks/charmhelpers/core/templating.py index 0a7560ff..7b801a34 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/core/templating.py @@ -13,6 +13,7 @@ # limitations under the License. import os +import sys from charmhelpers.core import host from charmhelpers.core import hookenv @@ -38,8 +39,9 @@ def render(source, target, context, owner='root', group='root', The rendered template will be written to the file as well as being returned as a string. - Note: Using this requires python-jinja2; if it is not installed, calling - this will attempt to use charmhelpers.fetch.apt_install to install it. + Note: Using this requires python-jinja2 or python3-jinja2; if it is not + installed, calling this will attempt to use charmhelpers.fetch.apt_install + to install it. """ try: from jinja2 import FileSystemLoader, Environment, exceptions @@ -51,7 +53,10 @@ def render(source, target, context, owner='root', group='root', 'charmhelpers.fetch to install it', level=hookenv.ERROR) raise - apt_install('python-jinja2', fatal=True) + if sys.version_info.major == 2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions if template_loader: diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 8f39f2fe..52eaf824 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -178,14 +178,14 @@ def filter_installed_packages(packages): return _pkgs -def apt_cache(in_memory=True): +def apt_cache(in_memory=True, progress=None): """Build and return an apt cache""" from apt import apt_pkg apt_pkg.init() if in_memory: apt_pkg.config.set("Dir::Cache::pkgcache", "") apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache() + return apt_pkg.Cache(progress) def apt_install(packages, options=None, fatal=False): From f76696b3ed4d97568e00be55a5e99eb3d9c38da8 Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 15 Jul 2016 10:48:32 -0700 Subject: [PATCH 1161/2699] Pre-release charm-helpers sync To begin release testing get each charm up to date with lp:charm-helpers Change-Id: I671fa63c665337295c1c448709fcc38123d4508d --- .../charmhelpers/contrib/openstack/utils.py | 40 +++- .../contrib/storage/linux/ceph.py | 172 +++++++++++++----- ceph-osd/hooks/charmhelpers/core/host.py | 5 +- .../hooks/charmhelpers/core/templating.py | 11 +- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 4 +- .../contrib/openstack/amulet/deployment.py | 4 +- 6 files changed, 176 insertions(+), 60 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index f4401913..519eae95 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -220,7 +220,6 @@ } GIT_DEFAULT_BRANCHES = { - 'kilo': 'stable/kilo', 'liberty': 'stable/liberty', 'mitaka': 'stable/mitaka', 'master': 'master', @@ -413,7 +412,8 @@ def os_release(package, base='essex'): global os_rel if os_rel: return os_rel - os_rel = (get_os_codename_package(package, fatal=False) or + os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or + get_os_codename_package(package, fatal=False) or get_os_codename_install_source(config('openstack-origin')) or base) return os_rel @@ -719,7 +719,24 @@ def git_install_requested(): return config('openstack-origin-git') is not None -requirements_dir = None +def git_os_codename_install_source(projects_yaml): + """ + Returns OpenStack codename of release being installed from source. + """ + if git_install_requested(): + projects = _git_yaml_load(projects_yaml) + + if projects in GIT_DEFAULT_BRANCHES.keys(): + if projects == 'master': + return 'newton' + return projects + + if 'release' in projects: + if projects['release'] == 'master': + return 'newton' + return projects['release'] + + return None def git_default_repos(projects_yaml): @@ -740,12 +757,6 @@ def git_default_repos(projects_yaml): } repos = [repo] - # NOTE(coreycb): This is a temp work-around until the requirements - # repo moves from stable/kilo branch to kilo-eol tag. The core - # repos have already done this. - if default == 'kilo': - branch = 'kilo-eol' - # neutron-* and nova-* charms require some additional repos if service in ['neutron-api', 'neutron-gateway', 'neutron-openvswitch']: @@ -778,7 +789,7 @@ def git_default_repos(projects_yaml): } repos.append(repo) - return yaml.dump(dict(repositories=repos)) + return yaml.dump(dict(repositories=repos, release=default)) return projects_yaml @@ -793,6 +804,9 @@ def _git_yaml_load(projects_yaml): return yaml.load(projects_yaml) +requirements_dir = None + + def git_clone_and_install(projects_yaml, core_project): """ Clone/install all specified OpenStack repositories. @@ -856,6 +870,10 @@ def git_clone_and_install(projects_yaml, core_project): # upper-constraints didn't exist until after icehouse if not os.path.isfile(constraints): constraints = None + # use constraints unless project yaml sets use_constraints to false + if 'use_constraints' in projects.keys(): + if not projects['use_constraints']: + constraints = None else: repo_dir = _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, @@ -882,6 +900,8 @@ def _git_validate_projects_yaml(projects, core_project): if projects['repositories'][-1]['name'] != core_project: error_out('{} git repo must be specified last'.format(core_project)) + _git_ensure_key_exists('release', projects) + def _git_ensure_key_exists(key, keys): """ diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 8a9b9486..beff2703 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -21,9 +21,10 @@ # James Page # Adam Gandelman # -import bisect + import errno import hashlib +import math import six import os @@ -76,8 +77,16 @@ err to syslog = {use_syslog} clog to syslog = {use_syslog} """ -# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs) -powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608] + +# The number of placement groups per OSD to target for placement group +# calculations. This number is chosen as 100 due to the ceph PG Calc +# documentation recommending to choose 100 for clusters which are not +# expected to increase in the foreseeable future. Since the majority of the +# calculations are done on deployment, target the case of non-expanding +# clusters as the default. +DEFAULT_PGS_PER_OSD_TARGET = 100 +DEFAULT_POOL_WEIGHT = 10.0 +LEGACY_PG_COUNT = 200 def validator(value, valid_type, valid_range=None): @@ -184,42 +193,106 @@ def remove_cache_tier(self, cache_pool): check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) - def get_pgs(self, pool_size): - """ - :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for - erasure coded pools + def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): + """Return the number of placement groups to use when creating the pool. + + Returns the number of placement groups which should be specified when + creating the pool. This is based upon the calculation guidelines + provided by the Ceph Placement Group Calculator (located online at + http://ceph.com/pgcalc/). + + The number of placement groups are calculated using the following: + + (Target PGs per OSD) * (OSD #) * (%Data) + ---------------------------------------- + (Pool size) + + Per the upstream guidelines, the OSD # should really be considered + based on the number of OSDs which are eligible to be selected by the + pool. Since the pool creation doesn't specify any of CRUSH set rules, + the default rule will be dependent upon the type of pool being + created (replicated or erasure). + + This code makes no attempt to determine the number of OSDs which can be + selected for the specific rule, rather it is left to the user to tune + in the form of 'expected-osd-count' config option. + + :param pool_size: int. pool_size is either the number of replicas for + replicated pools or the K+M sum for erasure coded pools + :param percent_data: float. the percentage of data that is expected to + be contained in the pool for the specific OSD set. Default value + is to assume 10% of the data is for this pool, which is a + relatively low % of the data but allows for the pg_num to be + increased. NOTE: the default is primarily to handle the scenario + where related charms requiring pools has not been upgraded to + include an update to indicate their relative usage of the pools. :return: int. The number of pgs to use. """ + + # Note: This calculation follows the approach that is provided + # by the Ceph PG Calculator located at http://ceph.com/pgcalc/. validator(value=pool_size, valid_type=int) + + # Ensure that percent data is set to something - even with a default + # it can be set to None, which would wreak havoc below. + if percent_data is None: + percent_data = DEFAULT_POOL_WEIGHT + + # If the expected-osd-count is specified, then use the max between + # the expected-osd-count and the actual osd_count osd_list = get_osds(self.service) - if not osd_list: + expected = config('expected-osd-count') or 0 + + if osd_list: + osd_count = max(expected, len(osd_list)) + + # Log a message to provide some insight if the calculations claim + # to be off because someone is setting the expected count and + # there are more OSDs in reality. Try to make a proper guess + # based upon the cluster itself. + if expected and osd_count != expected: + log("Found more OSDs than provided expected count. " + "Using the actual count instead", INFO) + elif expected: + # Use the expected-osd-count in older ceph versions to allow for + # a more accurate pg calculations + osd_count = expected + else: # NOTE(james-page): Default to 200 for older ceph versions # which don't support OSD query from cli - return 200 - - osd_list_length = len(osd_list) - # Calculate based on Ceph best practices - if osd_list_length < 5: - return 128 - elif 5 < osd_list_length < 10: - return 512 - elif 10 < osd_list_length < 50: - return 4096 + return LEGACY_PG_COUNT + + percent_data /= 100.0 + target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET + num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size + + # The CRUSH algorithm has a slight optimization for placement groups + # with powers of 2 so find the nearest power of 2. If the nearest + # power of 2 is more than 25% below the original value, the next + # highest value is used. To do this, find the nearest power of 2 such + # that 2^n <= num_pg, check to see if its within the 25% tolerance. + exponent = math.floor(math.log(num_pg, 2)) + nearest = 2 ** exponent + if (num_pg - nearest) > (num_pg * 0.25): + # Choose the next highest power of 2 since the nearest is more + # than 25% below the original value. + return int(nearest * 2) else: - estimate = (osd_list_length * 100) / pool_size - # Return the next nearest power of 2 - index = bisect.bisect_right(powers_of_two, estimate) - return powers_of_two[index] + return int(nearest) class ReplicatedPool(Pool): - def __init__(self, service, name, pg_num=None, replicas=2): + def __init__(self, service, name, pg_num=None, replicas=2, + percent_data=10.0): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas - if pg_num is None: - self.pg_num = self.get_pgs(self.replicas) + if pg_num: + # Since the number of placement groups were specified, ensure + # that there aren't too many created. + max_pgs = self.get_pgs(self.replicas, 100.0) + self.pg_num = min(pg_num, max_pgs) else: - self.pg_num = pg_num + self.pg_num = self.get_pgs(self.replicas, percent_data) def create(self): if not pool_exists(self.service, self.name): @@ -238,30 +311,39 @@ def create(self): # Default jerasure erasure coded pool class ErasurePool(Pool): - def __init__(self, service, name, erasure_code_profile="default"): + def __init__(self, service, name, erasure_code_profile="default", + percent_data=10.0): super(ErasurePool, self).__init__(service=service, name=name) self.erasure_code_profile = erasure_code_profile + self.percent_data = percent_data def create(self): if not pool_exists(self.service, self.name): - # Try to find the erasure profile information so we can properly size the pgs - erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile) + # Try to find the erasure profile information in order to properly + # size the number of placement groups. The size of an erasure + # coded placement group is calculated as k+m. + erasure_profile = get_erasure_profile(self.service, + self.erasure_code_profile) # Check for errors if erasure_profile is None: - log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile), - level=ERROR) - raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile)) + msg = ("Failed to discover erasure profile named " + "{}".format(self.erasure_code_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) if 'k' not in erasure_profile or 'm' not in erasure_profile: # Error - log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile), - level=ERROR) - raise PoolCreationError( - message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile)) - - pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) + msg = ("Unable to find k (data chunks) or m (coding chunks) " + "in erasure profile {}".format(erasure_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + + k = int(erasure_profile['k']) + m = int(erasure_profile['m']) + pgs = self.get_pgs(k + m, self.percent_data) # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs), + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), 'erasure', self.erasure_code_profile] try: check_call(cmd) @@ -955,16 +1037,22 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] - def add_op_create_pool(self, name, replica_count=3, pg_num=None): + def add_op_create_pool(self, name, replica_count=3, pg_num=None, + weight=None): """Adds an operation to create a pool. @param pg_num setting: optional setting. If not provided, this value will be calculated by the broker based on how many OSDs are in the cluster at the time of creation. Note that, if provided, this value will be capped at the current available maximum. + @param weight: the percentage of data the pool makes up """ + if pg_num and weight: + raise ValueError('pg_num and weight are mutually exclusive') + self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num}) + 'replicas': replica_count, 'pg_num': pg_num, + 'weight': weight}) def set_ops(self, ops): """Set request ops to provided value. @@ -982,7 +1070,7 @@ def request(self): def _ops_equal(self, other): if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in ['replicas', 'name', 'op', 'pg_num']: + for key in ['replicas', 'name', 'op', 'pg_num', 'weight']: if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 35817b06..53068599 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -174,7 +174,7 @@ def init_is_systemd(): def adduser(username, password=None, shell='/bin/bash', system_user=False, - primary_group=None, secondary_groups=None, uid=None): + primary_group=None, secondary_groups=None, uid=None, home_dir=None): """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -186,6 +186,7 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False, :param str primary_group: Primary group for user; defaults to username :param list secondary_groups: Optional list of additional groups :param int uid: UID for user being created + :param str home_dir: Home directory for user :returns: The password database entry struct, as returned by `pwd.getpwnam` """ @@ -200,6 +201,8 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False, cmd = ['useradd'] if uid: cmd.extend(['--uid', str(uid)]) + if home_dir: + cmd.extend(['--home', str(home_dir)]) if system_user or password is None: cmd.append('--system') else: diff --git a/ceph-osd/hooks/charmhelpers/core/templating.py b/ceph-osd/hooks/charmhelpers/core/templating.py index 0a7560ff..7b801a34 100644 --- a/ceph-osd/hooks/charmhelpers/core/templating.py +++ b/ceph-osd/hooks/charmhelpers/core/templating.py @@ -13,6 +13,7 @@ # limitations under the License. import os +import sys from charmhelpers.core import host from charmhelpers.core import hookenv @@ -38,8 +39,9 @@ def render(source, target, context, owner='root', group='root', The rendered template will be written to the file as well as being returned as a string. - Note: Using this requires python-jinja2; if it is not installed, calling - this will attempt to use charmhelpers.fetch.apt_install to install it. + Note: Using this requires python-jinja2 or python3-jinja2; if it is not + installed, calling this will attempt to use charmhelpers.fetch.apt_install + to install it. """ try: from jinja2 import FileSystemLoader, Environment, exceptions @@ -51,7 +53,10 @@ def render(source, target, context, owner='root', group='root', 'charmhelpers.fetch to install it', level=hookenv.ERROR) raise - apt_install('python-jinja2', fatal=True) + if sys.version_info.major == 2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions if template_loader: diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 8f39f2fe..52eaf824 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -178,14 +178,14 @@ def filter_installed_packages(packages): return _pkgs -def apt_cache(in_memory=True): +def apt_cache(in_memory=True, progress=None): """Build and return an apt cache""" from apt import apt_pkg apt_pkg.init() if in_memory: apt_pkg.config.set("Dir::Cache::pkgcache", "") apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache() + return apt_pkg.Cache(progress) def apt_install(packages, options=None, fatal=False): diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index f7220f35..6ce91dbe 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -71,7 +71,7 @@ def _determine_branch_locations(self, other_services): base_charms = { 'mysql': ['precise', 'trusty'], 'mongodb': ['precise', 'trusty'], - 'nrpe': ['precise', 'trusty'], + 'nrpe': ['precise', 'trusty', 'wily', 'xenial'], } for svc in other_services: @@ -112,7 +112,7 @@ def _add_services(self, this_service, other_services): # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon'] + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy'] # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', From 8bfb8a4a3207bd08bb4ef6e6a3642520ba62bdcb Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 15 Jul 2016 10:49:34 -0700 Subject: [PATCH 1162/2699] Pre-release charm-helpers sync To begin release testing get each charm up to date with lp:charm-helpers Change-Id: I4f8d2b4f18b9c4d447a0e003de5b3188cfa01529 --- ceph-mon/hooks/charmhelpers/core/templating.py | 11 ++++++++--- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 4 ++-- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/core/templating.py b/ceph-mon/hooks/charmhelpers/core/templating.py index 0a7560ff..7b801a34 100644 --- a/ceph-mon/hooks/charmhelpers/core/templating.py +++ b/ceph-mon/hooks/charmhelpers/core/templating.py @@ -13,6 +13,7 @@ # limitations under the License. import os +import sys from charmhelpers.core import host from charmhelpers.core import hookenv @@ -38,8 +39,9 @@ def render(source, target, context, owner='root', group='root', The rendered template will be written to the file as well as being returned as a string. - Note: Using this requires python-jinja2; if it is not installed, calling - this will attempt to use charmhelpers.fetch.apt_install to install it. + Note: Using this requires python-jinja2 or python3-jinja2; if it is not + installed, calling this will attempt to use charmhelpers.fetch.apt_install + to install it. """ try: from jinja2 import FileSystemLoader, Environment, exceptions @@ -51,7 +53,10 @@ def render(source, target, context, owner='root', group='root', 'charmhelpers.fetch to install it', level=hookenv.ERROR) raise - apt_install('python-jinja2', fatal=True) + if sys.version_info.major == 2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions if template_loader: diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 8f39f2fe..52eaf824 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -178,14 +178,14 @@ def filter_installed_packages(packages): return _pkgs -def apt_cache(in_memory=True): +def apt_cache(in_memory=True, progress=None): """Build and return an apt cache""" from apt import apt_pkg apt_pkg.init() if in_memory: apt_pkg.config.set("Dir::Cache::pkgcache", "") apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache() + return apt_pkg.Cache(progress) def apt_install(packages, options=None, fatal=False): From 3111e65acec9f8e68e478b0152a03cf864cb3133 Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 15 Jul 2016 10:52:57 -0700 Subject: [PATCH 1163/2699] Pre-release charm-helpers sync To begin release testing get each charm up to date with lp:charm-helpers Change-Id: I62c0e93b0b14cecf47bde1ced5871c0eb57750bd --- .../charmhelpers/contrib/openstack/utils.py | 40 +++- .../contrib/storage/linux/ceph.py | 172 +++++++++++++----- .../hooks/charmhelpers/core/templating.py | 11 +- .../hooks/charmhelpers/fetch/__init__.py | 4 +- 4 files changed, 170 insertions(+), 57 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index f4401913..519eae95 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -220,7 +220,6 @@ } GIT_DEFAULT_BRANCHES = { - 'kilo': 'stable/kilo', 'liberty': 'stable/liberty', 'mitaka': 'stable/mitaka', 'master': 'master', @@ -413,7 +412,8 @@ def os_release(package, base='essex'): global os_rel if os_rel: return os_rel - os_rel = (get_os_codename_package(package, fatal=False) or + os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or + get_os_codename_package(package, fatal=False) or get_os_codename_install_source(config('openstack-origin')) or base) return os_rel @@ -719,7 +719,24 @@ def git_install_requested(): return config('openstack-origin-git') is not None -requirements_dir = None +def git_os_codename_install_source(projects_yaml): + """ + Returns OpenStack codename of release being installed from source. + """ + if git_install_requested(): + projects = _git_yaml_load(projects_yaml) + + if projects in GIT_DEFAULT_BRANCHES.keys(): + if projects == 'master': + return 'newton' + return projects + + if 'release' in projects: + if projects['release'] == 'master': + return 'newton' + return projects['release'] + + return None def git_default_repos(projects_yaml): @@ -740,12 +757,6 @@ def git_default_repos(projects_yaml): } repos = [repo] - # NOTE(coreycb): This is a temp work-around until the requirements - # repo moves from stable/kilo branch to kilo-eol tag. The core - # repos have already done this. - if default == 'kilo': - branch = 'kilo-eol' - # neutron-* and nova-* charms require some additional repos if service in ['neutron-api', 'neutron-gateway', 'neutron-openvswitch']: @@ -778,7 +789,7 @@ def git_default_repos(projects_yaml): } repos.append(repo) - return yaml.dump(dict(repositories=repos)) + return yaml.dump(dict(repositories=repos, release=default)) return projects_yaml @@ -793,6 +804,9 @@ def _git_yaml_load(projects_yaml): return yaml.load(projects_yaml) +requirements_dir = None + + def git_clone_and_install(projects_yaml, core_project): """ Clone/install all specified OpenStack repositories. @@ -856,6 +870,10 @@ def git_clone_and_install(projects_yaml, core_project): # upper-constraints didn't exist until after icehouse if not os.path.isfile(constraints): constraints = None + # use constraints unless project yaml sets use_constraints to false + if 'use_constraints' in projects.keys(): + if not projects['use_constraints']: + constraints = None else: repo_dir = _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, @@ -882,6 +900,8 @@ def _git_validate_projects_yaml(projects, core_project): if projects['repositories'][-1]['name'] != core_project: error_out('{} git repo must be specified last'.format(core_project)) + _git_ensure_key_exists('release', projects) + def _git_ensure_key_exists(key, keys): """ diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 8a9b9486..beff2703 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -21,9 +21,10 @@ # James Page # Adam Gandelman # -import bisect + import errno import hashlib +import math import six import os @@ -76,8 +77,16 @@ err to syslog = {use_syslog} clog to syslog = {use_syslog} """ -# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs) -powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608] + +# The number of placement groups per OSD to target for placement group +# calculations. This number is chosen as 100 due to the ceph PG Calc +# documentation recommending to choose 100 for clusters which are not +# expected to increase in the foreseeable future. Since the majority of the +# calculations are done on deployment, target the case of non-expanding +# clusters as the default. +DEFAULT_PGS_PER_OSD_TARGET = 100 +DEFAULT_POOL_WEIGHT = 10.0 +LEGACY_PG_COUNT = 200 def validator(value, valid_type, valid_range=None): @@ -184,42 +193,106 @@ def remove_cache_tier(self, cache_pool): check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) - def get_pgs(self, pool_size): - """ - :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for - erasure coded pools + def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): + """Return the number of placement groups to use when creating the pool. + + Returns the number of placement groups which should be specified when + creating the pool. This is based upon the calculation guidelines + provided by the Ceph Placement Group Calculator (located online at + http://ceph.com/pgcalc/). + + The number of placement groups are calculated using the following: + + (Target PGs per OSD) * (OSD #) * (%Data) + ---------------------------------------- + (Pool size) + + Per the upstream guidelines, the OSD # should really be considered + based on the number of OSDs which are eligible to be selected by the + pool. Since the pool creation doesn't specify any of CRUSH set rules, + the default rule will be dependent upon the type of pool being + created (replicated or erasure). + + This code makes no attempt to determine the number of OSDs which can be + selected for the specific rule, rather it is left to the user to tune + in the form of 'expected-osd-count' config option. + + :param pool_size: int. pool_size is either the number of replicas for + replicated pools or the K+M sum for erasure coded pools + :param percent_data: float. the percentage of data that is expected to + be contained in the pool for the specific OSD set. Default value + is to assume 10% of the data is for this pool, which is a + relatively low % of the data but allows for the pg_num to be + increased. NOTE: the default is primarily to handle the scenario + where related charms requiring pools has not been upgraded to + include an update to indicate their relative usage of the pools. :return: int. The number of pgs to use. """ + + # Note: This calculation follows the approach that is provided + # by the Ceph PG Calculator located at http://ceph.com/pgcalc/. validator(value=pool_size, valid_type=int) + + # Ensure that percent data is set to something - even with a default + # it can be set to None, which would wreak havoc below. + if percent_data is None: + percent_data = DEFAULT_POOL_WEIGHT + + # If the expected-osd-count is specified, then use the max between + # the expected-osd-count and the actual osd_count osd_list = get_osds(self.service) - if not osd_list: + expected = config('expected-osd-count') or 0 + + if osd_list: + osd_count = max(expected, len(osd_list)) + + # Log a message to provide some insight if the calculations claim + # to be off because someone is setting the expected count and + # there are more OSDs in reality. Try to make a proper guess + # based upon the cluster itself. + if expected and osd_count != expected: + log("Found more OSDs than provided expected count. " + "Using the actual count instead", INFO) + elif expected: + # Use the expected-osd-count in older ceph versions to allow for + # a more accurate pg calculations + osd_count = expected + else: # NOTE(james-page): Default to 200 for older ceph versions # which don't support OSD query from cli - return 200 - - osd_list_length = len(osd_list) - # Calculate based on Ceph best practices - if osd_list_length < 5: - return 128 - elif 5 < osd_list_length < 10: - return 512 - elif 10 < osd_list_length < 50: - return 4096 + return LEGACY_PG_COUNT + + percent_data /= 100.0 + target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET + num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size + + # The CRUSH algorithm has a slight optimization for placement groups + # with powers of 2 so find the nearest power of 2. If the nearest + # power of 2 is more than 25% below the original value, the next + # highest value is used. To do this, find the nearest power of 2 such + # that 2^n <= num_pg, check to see if its within the 25% tolerance. + exponent = math.floor(math.log(num_pg, 2)) + nearest = 2 ** exponent + if (num_pg - nearest) > (num_pg * 0.25): + # Choose the next highest power of 2 since the nearest is more + # than 25% below the original value. + return int(nearest * 2) else: - estimate = (osd_list_length * 100) / pool_size - # Return the next nearest power of 2 - index = bisect.bisect_right(powers_of_two, estimate) - return powers_of_two[index] + return int(nearest) class ReplicatedPool(Pool): - def __init__(self, service, name, pg_num=None, replicas=2): + def __init__(self, service, name, pg_num=None, replicas=2, + percent_data=10.0): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas - if pg_num is None: - self.pg_num = self.get_pgs(self.replicas) + if pg_num: + # Since the number of placement groups were specified, ensure + # that there aren't too many created. + max_pgs = self.get_pgs(self.replicas, 100.0) + self.pg_num = min(pg_num, max_pgs) else: - self.pg_num = pg_num + self.pg_num = self.get_pgs(self.replicas, percent_data) def create(self): if not pool_exists(self.service, self.name): @@ -238,30 +311,39 @@ def create(self): # Default jerasure erasure coded pool class ErasurePool(Pool): - def __init__(self, service, name, erasure_code_profile="default"): + def __init__(self, service, name, erasure_code_profile="default", + percent_data=10.0): super(ErasurePool, self).__init__(service=service, name=name) self.erasure_code_profile = erasure_code_profile + self.percent_data = percent_data def create(self): if not pool_exists(self.service, self.name): - # Try to find the erasure profile information so we can properly size the pgs - erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile) + # Try to find the erasure profile information in order to properly + # size the number of placement groups. The size of an erasure + # coded placement group is calculated as k+m. + erasure_profile = get_erasure_profile(self.service, + self.erasure_code_profile) # Check for errors if erasure_profile is None: - log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile), - level=ERROR) - raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile)) + msg = ("Failed to discover erasure profile named " + "{}".format(self.erasure_code_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) if 'k' not in erasure_profile or 'm' not in erasure_profile: # Error - log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile), - level=ERROR) - raise PoolCreationError( - message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile)) - - pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) + msg = ("Unable to find k (data chunks) or m (coding chunks) " + "in erasure profile {}".format(erasure_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + + k = int(erasure_profile['k']) + m = int(erasure_profile['m']) + pgs = self.get_pgs(k + m, self.percent_data) # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs), + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), 'erasure', self.erasure_code_profile] try: check_call(cmd) @@ -955,16 +1037,22 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] - def add_op_create_pool(self, name, replica_count=3, pg_num=None): + def add_op_create_pool(self, name, replica_count=3, pg_num=None, + weight=None): """Adds an operation to create a pool. @param pg_num setting: optional setting. If not provided, this value will be calculated by the broker based on how many OSDs are in the cluster at the time of creation. Note that, if provided, this value will be capped at the current available maximum. + @param weight: the percentage of data the pool makes up """ + if pg_num and weight: + raise ValueError('pg_num and weight are mutually exclusive') + self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num}) + 'replicas': replica_count, 'pg_num': pg_num, + 'weight': weight}) def set_ops(self, ops): """Set request ops to provided value. @@ -982,7 +1070,7 @@ def request(self): def _ops_equal(self, other): if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in ['replicas', 'name', 'op', 'pg_num']: + for key in ['replicas', 'name', 'op', 'pg_num', 'weight']: if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: diff --git a/ceph-proxy/hooks/charmhelpers/core/templating.py b/ceph-proxy/hooks/charmhelpers/core/templating.py index 0a7560ff..7b801a34 100644 --- a/ceph-proxy/hooks/charmhelpers/core/templating.py +++ b/ceph-proxy/hooks/charmhelpers/core/templating.py @@ -13,6 +13,7 @@ # limitations under the License. import os +import sys from charmhelpers.core import host from charmhelpers.core import hookenv @@ -38,8 +39,9 @@ def render(source, target, context, owner='root', group='root', The rendered template will be written to the file as well as being returned as a string. - Note: Using this requires python-jinja2; if it is not installed, calling - this will attempt to use charmhelpers.fetch.apt_install to install it. + Note: Using this requires python-jinja2 or python3-jinja2; if it is not + installed, calling this will attempt to use charmhelpers.fetch.apt_install + to install it. """ try: from jinja2 import FileSystemLoader, Environment, exceptions @@ -51,7 +53,10 @@ def render(source, target, context, owner='root', group='root', 'charmhelpers.fetch to install it', level=hookenv.ERROR) raise - apt_install('python-jinja2', fatal=True) + if sys.version_info.major == 2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions if template_loader: diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 8f39f2fe..52eaf824 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -178,14 +178,14 @@ def filter_installed_packages(packages): return _pkgs -def apt_cache(in_memory=True): +def apt_cache(in_memory=True, progress=None): """Build and return an apt cache""" from apt import apt_pkg apt_pkg.init() if in_memory: apt_pkg.config.set("Dir::Cache::pkgcache", "") apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache() + return apt_pkg.Cache(progress) def apt_install(packages, options=None, fatal=False): From 59f4a2c884a8eb6f5d7dddce2b97f3a5a7025b31 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 18 Jul 2016 16:22:40 +0100 Subject: [PATCH 1164/2699] Revert "Add AppArmor profile" ceph-mon is typically deployed under LXC or LXD, where apparmor is not supported; revert addition of apparmor profile feature as its currently breaking MAAS+LXD deployments. This reverts commit 801497e14e87536c969f8874ccc73ef649bc4af3. This reverts commit c36dfd52a4db40d649e0557bdcc2ce6880df5055. Change-Id: I94b7c7f5dc0245d273394aeb352731f7bffb1c91 --- ceph-mon/config.yaml | 6 ---- ceph-mon/files/apparmor/usr.bin.ceph-mon | 26 ----------------- ceph-mon/hooks/ceph.py | 3 +- ceph-mon/hooks/ceph_hooks.py | 37 +----------------------- 4 files changed, 2 insertions(+), 70 deletions(-) delete mode 100644 ceph-mon/files/apparmor/usr.bin.ceph-mon diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index b6e7452b..1d76b2f5 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -183,9 +183,3 @@ options: description: | Apply system hardening. Supports a space-delimited list of modules to run. Supported modules currently include os, ssh, apache and mysql. - aa-profile-mode: - type: string - default: 'complain' - description: | - Enable apparmor profile. Valid settings: 'complain', 'enforce' or 'disable'. - AA complain by default. diff --git a/ceph-mon/files/apparmor/usr.bin.ceph-mon b/ceph-mon/files/apparmor/usr.bin.ceph-mon deleted file mode 100644 index e1028773..00000000 --- a/ceph-mon/files/apparmor/usr.bin.ceph-mon +++ /dev/null @@ -1,26 +0,0 @@ -# vim:syntax=apparmor -# Author: Chris Holcombe -#include - -/usr/bin/ceph-mon { - #include - - /usr/bin/ceph-mon mr, - - network inet stream, - network inet6 stream, - - owner /etc/ceph/* rw, - /etc/passwd r, - - @{PROC}/@{pid}/auxv r, - @{PROC}/@{pid}/net/dev r, - - /run/ceph/* rw, - /tmp/ r, - /var/lib/ceph/** rwk, - /var/lib/charm/ceph-*/ceph.conf r, - /var/log/ceph/* rwk, - /var/run/ceph/* rwk, - /var/tmp/ r, -} diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 6bbb1074..026e783d 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -46,8 +46,7 @@ PEON = 'peon' QUORUM = [LEADER, PEON] -PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', - 'xfsprogs', 'apparmor-utils'] +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] def ceph_user(): diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 00abb5a4..d6a43405 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import glob import os import random @@ -22,7 +21,6 @@ import sys import uuid import time -import shutil import ceph from charmhelpers.core import host @@ -42,7 +40,7 @@ service_name, relations_of_type, status_set, - local_unit, ERROR) + local_unit) from charmhelpers.core.host import ( service_restart, mkdir, @@ -84,12 +82,6 @@ hooks = Hooks() -app_armor_modes = { - 'complain': 'aa-complain', - 'disabled': 'aa-disable', - 'enforce': 'aa-enforce', -} - NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' SCRIPTS_DIR = '/usr/local/bin' STATUS_FILE = '/var/lib/nagios/cat-ceph-status.txt' @@ -276,32 +268,6 @@ def upgrade_monitor(): sys.exit(1) -def install_apparmor_profile(): - log('Installing app-armor-profiles') - aa_mode = config('aa-profile-mode') - if aa_mode not in app_armor_modes: - log('Invalid apparmor mode: {}. Defaulting to complain'.format( - aa_mode), level=ERROR) - aa_mode = 'complain' - apparmor_dir = os.path.join(os.sep, - 'etc', - 'apparmor.d', - 'local') - - for x in glob.glob('files/apparmor/*'): - shutil.copy(x, apparmor_dir) - try: - cmd = [ - app_armor_modes[aa_mode], - os.path.join(apparmor_dir, os.path.split(x)[-1]) - ] - subprocess.check_output(cmd) - except subprocess.CalledProcessError as err: - log('{} failed with error {}'.format( - app_armor_modes[aa_mode], err.output), level=ERROR) - raise - - @hooks.hook('install.real') @harden() def install(): @@ -408,7 +374,6 @@ def config_changed(): status_set('maintenance', 'Bootstrapping single Ceph MON') ceph.bootstrap_monitor_cluster(config('monitor-secret')) ceph.wait_for_bootstrap() - install_apparmor_profile() def get_mon_hosts(): From b6c14c5ceeebcc66fa9e631982e2414cda7e4a1a Mon Sep 17 00:00:00 2001 From: David Ames Date: Mon, 18 Jul 2016 10:06:20 -0700 Subject: [PATCH 1165/2699] Install python dependencies early for CH ip.py When using charmhelpers.contrib.network.ip apt install was quietly failing. Install python dependencies in the early install hook (before install.real). Charm-helpers sync to bring in apt_install with fatal=True Change-Id: Iced4e7be79c5d99a9f47a12085db51012218bdca Partial-Bug: 1601972 --- ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py | 4 ++-- ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index b5f457c7..d6dee17c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -414,7 +414,7 @@ def ns_query(address): try: import dns.resolver except ImportError: - apt_install('python-dnspython') + apt_install('python-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): @@ -458,7 +458,7 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - apt_install("python-dnspython") + apt_install("python-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 519eae95..889ac044 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -145,7 +145,7 @@ ('mitaka', ['2.5.0', '2.6.0', '2.7.0']), ('newton', - ['2.8.0']), + ['2.8.0', '2.9.0']), ]) # >= Liberty version->codename mapping From 59998d1576febd14264b1e7203dbe4f5c86cd375 Mon Sep 17 00:00:00 2001 From: David Ames Date: Mon, 18 Jul 2016 10:09:34 -0700 Subject: [PATCH 1166/2699] Install python dependencies early for CH ip.py When using charmhelpers.contrib.network.ip apt install was quietly failing. Install python dependencies in the early install hook (before install.real). Charm-helpers sync to bring in apt_install with fatal=True Change-Id: Idc7aef438e804124e1c8af08db1695ba7ed1872f Partial-Bug: 1601972 --- ceph-mon/hooks/charmhelpers/contrib/network/ip.py | 4 ++-- ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py | 2 +- ceph-mon/hooks/install | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index b5f457c7..d6dee17c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -414,7 +414,7 @@ def ns_query(address): try: import dns.resolver except ImportError: - apt_install('python-dnspython') + apt_install('python-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): @@ -458,7 +458,7 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - apt_install("python-dnspython") + apt_install("python-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 519eae95..889ac044 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -145,7 +145,7 @@ ('mitaka', ['2.5.0', '2.6.0', '2.7.0']), ('newton', - ['2.8.0']), + ['2.8.0', '2.9.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-mon/hooks/install b/ceph-mon/hooks/install index 83a9d3ce..29ff6894 100755 --- a/ceph-mon/hooks/install +++ b/ceph-mon/hooks/install @@ -2,7 +2,7 @@ # Wrapper to deal with newer Ubuntu versions that don't have py2 installed # by default. -declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml' 'dnspython') check_and_install() { pkg="${1}-${2}" From da901da584420dbe89e2fc5268f7d9d4c8217b9f Mon Sep 17 00:00:00 2001 From: David Ames Date: Mon, 18 Jul 2016 10:11:32 -0700 Subject: [PATCH 1167/2699] Install python dependencies early for CH ip.py When using charmhelpers.contrib.network.ip apt install was quietly failing. Install python dependencies in the early install hook (before install.real). Charm-helpers sync to bring in apt_install with fatal=True Change-Id: I0ed76694c452c70313ab26f8c2cf5f6103be9e31 Partial-Bug: 1601972 --- ceph-osd/hooks/charmhelpers/contrib/network/ip.py | 4 ++-- ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py | 2 +- ceph-osd/hooks/install | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index b5f457c7..d6dee17c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -414,7 +414,7 @@ def ns_query(address): try: import dns.resolver except ImportError: - apt_install('python-dnspython') + apt_install('python-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): @@ -458,7 +458,7 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - apt_install("python-dnspython") + apt_install("python-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 519eae95..889ac044 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -145,7 +145,7 @@ ('mitaka', ['2.5.0', '2.6.0', '2.7.0']), ('newton', - ['2.8.0']), + ['2.8.0', '2.9.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-osd/hooks/install b/ceph-osd/hooks/install index 83a9d3ce..29ff6894 100755 --- a/ceph-osd/hooks/install +++ b/ceph-osd/hooks/install @@ -2,7 +2,7 @@ # Wrapper to deal with newer Ubuntu versions that don't have py2 installed # by default. -declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml' 'dnspython') check_and_install() { pkg="${1}-${2}" From 7d9c07558f1bcc190b5ecb812cfc990609d0679d Mon Sep 17 00:00:00 2001 From: David Ames Date: Mon, 18 Jul 2016 10:13:54 -0700 Subject: [PATCH 1168/2699] Install python dependencies early for CH ip.py When using charmhelpers.contrib.network.ip apt install was quietly failing. Install python dependencies in the early install hook (before install.real). Charm-helpers sync to bring in apt_install with fatal=True Change-Id: Icfe46c617a21ac97eae54752aad5b0ed53aa706a Partial-Bug: 1601972 --- ceph-proxy/hooks/charmhelpers/contrib/network/ip.py | 4 ++-- ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py | 2 +- ceph-proxy/hooks/install | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index b5f457c7..d6dee17c 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -414,7 +414,7 @@ def ns_query(address): try: import dns.resolver except ImportError: - apt_install('python-dnspython') + apt_install('python-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): @@ -458,7 +458,7 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - apt_install("python-dnspython") + apt_install("python-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index 519eae95..889ac044 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -145,7 +145,7 @@ ('mitaka', ['2.5.0', '2.6.0', '2.7.0']), ('newton', - ['2.8.0']), + ['2.8.0', '2.9.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-proxy/hooks/install b/ceph-proxy/hooks/install index 83a9d3ce..29ff6894 100755 --- a/ceph-proxy/hooks/install +++ b/ceph-proxy/hooks/install @@ -2,7 +2,7 @@ # Wrapper to deal with newer Ubuntu versions that don't have py2 installed # by default. -declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml' 'dnspython') check_and_install() { pkg="${1}-${2}" From 309e2e2be5ee4cc9099021fac54e74bd20ddd399 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Thu, 14 Jul 2016 14:01:30 -0700 Subject: [PATCH 1169/2699] Update cephx permissions for OSD actions This change adds more permissions to the ceph osd-upgrade key, specifically the ability to mark an OSD as in/out and actively remove an OSD from the cluster. Some of the ceph-osd charm maintenance actions are failing because of this lack of permission. Closes-Bug: 1602826 Change-Id: I6af43b61149c6eeeeb5c77950701194beda2da71 --- ceph-mon/hooks/ceph.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py index 6bbb1074..25299879 100644 --- a/ceph-mon/hooks/ceph.py +++ b/ceph-mon/hooks/ceph.py @@ -365,6 +365,10 @@ def get_radosgw_key(): 'allow command "config-key put"', 'allow command "config-key get"', 'allow command "config-key exists"', + 'allow command "osd out"', + 'allow command "osd in"', + 'allow command "osd rm"', + 'allow command "auth del"', ] } From 9f11bd6bf7eaaa7713028fb60be66d4370c0c8eb Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Thu, 14 Jul 2016 14:07:01 -0700 Subject: [PATCH 1170/2699] Fix OSD replacement Use the osd-upgrade key when replacing OSD's as this key has the correct cephx permissions to perform the operation. Closes-Bug: 1602826 Depends-On: I6af43b61149c6eeeeb5c77950701194beda2da71 Change-Id: I32d2f1a4036e09d5d1fd13009c95ab1514e7304c --- ceph-osd/hooks/ceph.py | 32 ++++++++++++++++++------- ceph-osd/unit_tests/test_replace_osd.py | 15 ++++++++---- 2 files changed, 35 insertions(+), 12 deletions(-) diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/hooks/ceph.py index 22e6c9af..09f70960 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/hooks/ceph.py @@ -362,8 +362,12 @@ def replace_osd(dead_osd_number, # Drop this osd out of the cluster. This will begin a # rebalance operation status_set('maintenance', 'Removing osd {}'.format(dead_osd_number)) - subprocess.check_output(['ceph', 'osd', 'out', - 'osd.{}'.format(dead_osd_number)]) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'out', + 'osd.{}'.format(dead_osd_number)]) # Kill the osd process if it's not already dead if systemd(): @@ -378,13 +382,25 @@ def replace_osd(dead_osd_number, mount_point, os.strerror(ret))) # Clean up the old mount point shutil.rmtree(mount_point) - subprocess.check_output(['ceph', 'osd', 'crush', 'remove', - 'osd.{}'.format(dead_osd_number)]) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'crush', 'remove', + 'osd.{}'.format(dead_osd_number)]) # Revoke the OSDs access keys - subprocess.check_output(['ceph', 'auth', 'del', - 'osd.{}'.format(dead_osd_number)]) - subprocess.check_output(['ceph', 'osd', 'rm', - 'osd.{}'.format(dead_osd_number)]) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'auth', 'del', + 'osd.{}'.format(dead_osd_number)]) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'rm', + 'osd.{}'.format(dead_osd_number)]) status_set('maintenance', 'Setting up replacement osd {}'.format( new_osd_device)) osdize(new_osd_device, diff --git a/ceph-osd/unit_tests/test_replace_osd.py b/ceph-osd/unit_tests/test_replace_osd.py index dd3e9c11..ce919382 100644 --- a/ceph-osd/unit_tests/test_replace_osd.py +++ b/ceph-osd/unit_tests/test_replace_osd.py @@ -79,13 +79,16 @@ def test_umount(self): @patch('ceph.osdize') @patch('ceph.shutil') @patch('ceph.systemd') + @patch('ceph.ceph_user') def test_replace_osd(self, + ceph_user, systemd, shutil, osdize, umount, subprocess, mounts): + ceph_user.return_value = "ceph" mounts.return_value = [['/var/lib/ceph/osd/ceph-a', '/dev/sda']] subprocess.check_output.return_value = True self.status_set.return_value = None @@ -102,11 +105,15 @@ def test_replace_osd(self, ignore_errors=False) subprocess.check_output.assert_has_calls( [ - call(['ceph', 'osd', 'out', 'osd.0']), + call(['ceph', '--id', 'osd-upgrade', + 'osd', 'out', 'osd.0']), call(['stop', 'ceph-osd', 'id=0']), - call(['ceph', 'osd', 'crush', 'remove', 'osd.0']), - call(['ceph', 'auth', 'del', 'osd.0']), - call(['ceph', 'osd', 'rm', 'osd.0']) + call(['ceph', '--id', + 'osd-upgrade', 'osd', 'crush', 'remove', 'osd.0']), + call(['ceph', '--id', + 'osd-upgrade', 'auth', 'del', 'osd.0']), + call(['ceph', '--id', + 'osd-upgrade', 'osd', 'rm', 'osd.0']) ] ) From e760bfac5da1bfa0e9e1877bb3bacdb26b0ea513 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Thu, 14 Jul 2016 13:49:18 -0700 Subject: [PATCH 1171/2699] Use osd-upgrade user for pause/resume The pause and resume actions shell out to the ceph command to run OSD operations (in/out). Because the default cephx key given out by the monitor cluster does not contain the correct permissions, these commands fail. Use the osd-upgrade user which has the correct permissions. Closes-Bug: 1602826 Depends-On: I6af43b61149c6eeeeb5c77950701194beda2da71 Change-Id: Ie31bc9048972dbb0986ac8deb5b821a4db5d585f --- ceph-osd/actions/pause_resume.py | 11 ++++++++--- ceph-osd/unit_tests/test_actions_pause_resume.py | 9 ++++----- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/ceph-osd/actions/pause_resume.py b/ceph-osd/actions/pause_resume.py index f918994a..4d10b759 100755 --- a/ceph-osd/actions/pause_resume.py +++ b/ceph-osd/actions/pause_resume.py @@ -46,7 +46,10 @@ def pause(args): @raises OSError if it can't get the local osd ids. """ for local_id in get_local_osd_ids(): - cmd = ['ceph', 'osd', 'out', str(local_id)] + cmd = [ + 'ceph', + '--id', 'osd-upgrade', + 'osd', 'out', str(local_id)] check_call(cmd) set_unit_paused() assess_status() @@ -59,12 +62,14 @@ def resume(args): @raises OSError if the unit can't get the local osd ids """ for local_id in get_local_osd_ids(): - cmd = ['ceph', 'osd', 'in', str(local_id)] + cmd = [ + 'ceph', + '--id', 'osd-upgrade', + 'osd', 'in', str(local_id)] check_call(cmd) clear_unit_paused() assess_status() - # A dictionary of all the defined actions to callables (which take # parsed arguments). ACTIONS = {"pause": pause, "resume": resume} diff --git a/ceph-osd/unit_tests/test_actions_pause_resume.py b/ceph-osd/unit_tests/test_actions_pause_resume.py index 1d2fe072..b0ac4418 100644 --- a/ceph-osd/unit_tests/test_actions_pause_resume.py +++ b/ceph-osd/unit_tests/test_actions_pause_resume.py @@ -24,7 +24,6 @@ class PauseTestCase(CharmTestCase): - def setUp(self): super(PauseTestCase, self).setUp( actions, ["check_call", @@ -35,14 +34,14 @@ def setUp(self): def test_pauses_services(self): self.get_local_osd_ids.return_value = [5] actions.pause([]) - cmd = ['ceph', 'osd', 'out', '5'] + cmd = ['ceph', '--id', + 'osd-upgrade', 'osd', 'out', '5'] self.check_call.assert_called_once_with(cmd) self.set_unit_paused.assert_called_once_with() self.assess_status.assert_called_once_with() class ResumeTestCase(CharmTestCase): - def setUp(self): super(ResumeTestCase, self).setUp( actions, ["check_call", @@ -53,14 +52,14 @@ def setUp(self): def test_pauses_services(self): self.get_local_osd_ids.return_value = [5] actions.resume([]) - cmd = ['ceph', 'osd', 'in', '5'] + cmd = ['ceph', '--id', + 'osd-upgrade', 'osd', 'in', '5'] self.check_call.assert_called_once_with(cmd) self.clear_unit_paused.assert_called_once_with() self.assess_status.assert_called_once_with() class MainTestCase(CharmTestCase): - def setUp(self): super(MainTestCase, self).setUp(actions, ["action_fail"]) From cbc8e897e89fd4501296fe24061294063e9e2253 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 20 Jul 2016 14:23:35 +0100 Subject: [PATCH 1172/2699] Use bundletester for amulet test execution Switch to using bundletester for execution of functional tests, leveraging tox to build out test virtualenvs. Rename amulet tests inline with gate-* and dfs-* naming standards. Update README to refer to functional testing section of the charm guide. Change-Id: I0d378cdffdc2eaa5c34578e85aba6acb7a47a41d --- ceph-mon/Makefile | 3 +- ceph-mon/test-requirements.txt | 14 +++ ceph-mon/tests/016-basic-trusty-juno | 25 ---- ceph-mon/tests/020-basic-wily-liberty | 23 ---- ceph-mon/tests/README | 113 ------------------ ceph-mon/tests/README.md | 9 ++ ceph-mon/tests/basic_deployment.py | 20 ++-- ...e-icehouse => gate-basic-precise-icehouse} | 0 ...ty-icehouse => gate-basic-trusty-icehouse} | 0 ...sic-trusty-kilo => gate-basic-trusty-kilo} | 0 ...usty-liberty => gate-basic-trusty-liberty} | 0 ...trusty-mitaka => gate-basic-trusty-mitaka} | 0 ...xenial-mitaka => gate-basic-xenial-mitaka} | 0 ceph-mon/tests/setup/00-setup | 17 --- ceph-mon/tests/tests.yaml | 39 +++--- ceph-mon/tox.ini | 37 ++++++ 16 files changed, 88 insertions(+), 212 deletions(-) delete mode 100755 ceph-mon/tests/016-basic-trusty-juno delete mode 100755 ceph-mon/tests/020-basic-wily-liberty delete mode 100644 ceph-mon/tests/README create mode 100644 ceph-mon/tests/README.md rename ceph-mon/tests/{014-basic-precise-icehouse => gate-basic-precise-icehouse} (100%) rename ceph-mon/tests/{015-basic-trusty-icehouse => gate-basic-trusty-icehouse} (100%) rename ceph-mon/tests/{017-basic-trusty-kilo => gate-basic-trusty-kilo} (100%) rename ceph-mon/tests/{018-basic-trusty-liberty => gate-basic-trusty-liberty} (100%) rename ceph-mon/tests/{019-basic-trusty-mitaka => gate-basic-trusty-mitaka} (100%) rename ceph-mon/tests/{021-basic-xenial-mitaka => gate-basic-xenial-mitaka} (100%) delete mode 100755 ceph-mon/tests/setup/00-setup diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index 6751aafc..fe31123d 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -10,8 +10,7 @@ test: functional_test: @echo Starting Amulet tests... - @tests/setup/00-setup - @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 + @tox -e func27 bin/charm_helpers_sync.py: @mkdir -p bin diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 4faf2545..06972943 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -7,3 +7,17 @@ flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 charm-tools>=2.0.0 requests==2.6.0 +# BEGIN: Amulet OpenStack Charm Helper Requirements +# Liberty client lower constraints +amulet>=1.14.3,<2.0 +bundletester>=0.6.1,<1.0 +python-ceilometerclient>=1.5.0,<2.0 +python-cinderclient>=1.4.0,<2.0 +python-glanceclient>=1.1.0,<2.0 +python-heatclient>=0.8.0,<1.0 +python-novaclient>=2.30.1,<3.0 +python-openstackclient>=1.7.0,<2.0 +python-swiftclient>=2.6.0,<3.0 +pika>=0.10.0,<1.0 +distro-info +# END: Amulet OpenStack Charm Helper Requirements diff --git a/ceph-mon/tests/016-basic-trusty-juno b/ceph-mon/tests/016-basic-trusty-juno deleted file mode 100755 index 54f3670c..00000000 --- a/ceph-mon/tests/016-basic-trusty-juno +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on trusty-juno.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='trusty', - openstack='cloud:trusty-juno', - source='cloud:trusty-updates/juno') - deployment.run_tests() diff --git a/ceph-mon/tests/020-basic-wily-liberty b/ceph-mon/tests/020-basic-wily-liberty deleted file mode 100755 index fd4ebc24..00000000 --- a/ceph-mon/tests/020-basic-wily-liberty +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on wily-liberty.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='wily') - deployment.run_tests() diff --git a/ceph-mon/tests/README b/ceph-mon/tests/README deleted file mode 100644 index 79c5b063..00000000 --- a/ceph-mon/tests/README +++ /dev/null @@ -1,113 +0,0 @@ -This directory provides Amulet tests to verify basic deployment functionality -from the perspective of this charm, its requirements and its features, as -exercised in a subset of the full OpenStack deployment test bundle topology. - -Reference: lp:openstack-charm-testing for full test bundles. - -A single topology and configuration is defined and deployed, once for each of -the defined Ubuntu:OpenStack release combos. The ongoing goal is for this -charm to always possess tests and combo definitions for all currently-supported -release combinations of U:OS. - -test_* methods are called in lexical sort order, as with most runners. However, -each individual test method should be idempotent and expected to pass regardless -of run order or Ubuntu:OpenStack combo. When writing or modifying tests, -ensure that every individual test is not dependent on another test_ method. - -Test naming convention, purely for code organization purposes: - 1xx service and endpoint checks - 2xx relation checks - 3xx config checks - 4xx functional checks - 9xx restarts, config changes, actions and other final checks - -In order to run tests, charm-tools and juju must be installed: - sudo add-apt-repository ppa:juju/stable - sudo apt-get update - sudo apt-get install charm-tools juju juju-deployer amulet - -Alternatively, tests may be exercised with proposed or development versions -of juju and related tools: - - # juju proposed version - sudo add-apt-repository ppa:juju/proposed - sudo apt-get update - sudo apt-get install charm-tools juju juju-deployer - - # juju development version - sudo add-apt-repository ppa:juju/devel - sudo apt-get update - sudo apt-get install charm-tools juju juju-deployer - -Some tests may need to download files. If a web proxy server is required in -the environment, the AMULET_HTTP_PROXY environment variable must be set and -passed into the juju test command. This is unrelated to juju's http proxy -settings or behavior. - -The following examples demonstrate different ways that tests can be executed. -All examples are run from the charm's root directory. - - * To run all +x tests in the tests directory: - - bzr branch lp:charms/trusty/foo - cd foo - make functional_test - - * To run the tests against a specific release combo as defined in tests/: - - bzr branch lp:charms/trusty/foo - cd foo - juju test -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse - - * To run tests and keep the juju environment deployed after a failure: - - bzr branch lp:charms/trusty/foo - cd foo - juju test --set-e -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse - - * To re-run a test module against an already deployed environment (one - that was deployed by a previous call to 'juju test --set-e'): - - ./tests/015-basic-trusty-icehouse - - * Even with --set-e, `juju test` will tear down the deployment when all - tests pass. The following work flow may be more effective when - iterating on test writing. - - bzr branch lp:charms/trusty/foo - cd foo - ./tests/setup/00-setup - juju bootstrap - ./tests/015-basic-trusty-icehouse - # make some changes, run tests again - ./tests/015-basic-trusty-icehouse - # make some changes, run tests again - ./tests/015-basic-trusty-icehouse - - * There may be test definitions in the tests/ dir which are not set +x - executable. This is generally true for deprecated releases, or for - upcoming releases which are not yet validated and enabled. To enable - and run these tests: - bzr branch lp:charms/trusty/foo - cd foo - ls tests - chmod +x tests/017-basic-trusty-kilo - ./tests/setup/00-setup - juju bootstrap - ./tests/017-basic-trusty-kilo - - -Additional notes: - - * Use DEBUG to turn on debug logging, use ERROR otherwise. - u = OpenStackAmuletUtils(ERROR) - u = OpenStackAmuletUtils(DEBUG) - - * To interact with the deployed environment: - export OS_USERNAME=admin - export OS_PASSWORD=openstack - export OS_TENANT_NAME=admin - export OS_REGION_NAME=RegionOne - export OS_AUTH_URL=${OS_AUTH_PROTOCOL:-http}://`juju-deployer -e trusty -f keystone`:5000/v2.0 - keystone user-list - glance image-list diff --git a/ceph-mon/tests/README.md b/ceph-mon/tests/README.md new file mode 100644 index 00000000..046be7fb --- /dev/null +++ b/ceph-mon/tests/README.md @@ -0,0 +1,9 @@ +# Overview + +This directory provides Amulet tests to verify basic deployment functionality +from the perspective of this charm, its requirements and its features, as +exercised in a subset of the full OpenStack deployment test bundle topology. + +For full details on functional testing of OpenStack charms please refer to +the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing) +section of the OpenStack Charm Guide. diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 55dd0eec..f98622c3 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -124,16 +124,16 @@ def _configure_services(self): def _initialize_tests(self): """Perform final initialization before tests get run.""" # Access the sentries for inspecting service units - self.mysql_sentry = self.d.sentry.unit['mysql/0'] - self.keystone_sentry = self.d.sentry.unit['keystone/0'] - self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] - self.nova_sentry = self.d.sentry.unit['nova-compute/0'] - self.glance_sentry = self.d.sentry.unit['glance/0'] - self.cinder_sentry = self.d.sentry.unit['cinder/0'] - self.ceph_osd_sentry = self.d.sentry.unit['ceph-osd/0'] - self.ceph0_sentry = self.d.sentry.unit['ceph-mon/0'] - self.ceph1_sentry = self.d.sentry.unit['ceph-mon/1'] - self.ceph2_sentry = self.d.sentry.unit['ceph-mon/2'] + self.mysql_sentry = self.d.sentry['mysql'][0] + self.keystone_sentry = self.d.sentry['keystone'][0] + self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0] + self.nova_sentry = self.d.sentry['nova-compute'][0] + self.glance_sentry = self.d.sentry['glance'][0] + self.cinder_sentry = self.d.sentry['cinder'][0] + self.ceph_osd_sentry = self.d.sentry['ceph-osd'][0] + self.ceph0_sentry = self.d.sentry['ceph-mon'][0] + self.ceph1_sentry = self.d.sentry['ceph-mon'][1] + self.ceph2_sentry = self.d.sentry['ceph-mon'][2] u.log.debug('openstack release val: {}'.format( self._get_openstack_release())) u.log.debug('openstack release str: {}'.format( diff --git a/ceph-mon/tests/014-basic-precise-icehouse b/ceph-mon/tests/gate-basic-precise-icehouse similarity index 100% rename from ceph-mon/tests/014-basic-precise-icehouse rename to ceph-mon/tests/gate-basic-precise-icehouse diff --git a/ceph-mon/tests/015-basic-trusty-icehouse b/ceph-mon/tests/gate-basic-trusty-icehouse similarity index 100% rename from ceph-mon/tests/015-basic-trusty-icehouse rename to ceph-mon/tests/gate-basic-trusty-icehouse diff --git a/ceph-mon/tests/017-basic-trusty-kilo b/ceph-mon/tests/gate-basic-trusty-kilo similarity index 100% rename from ceph-mon/tests/017-basic-trusty-kilo rename to ceph-mon/tests/gate-basic-trusty-kilo diff --git a/ceph-mon/tests/018-basic-trusty-liberty b/ceph-mon/tests/gate-basic-trusty-liberty similarity index 100% rename from ceph-mon/tests/018-basic-trusty-liberty rename to ceph-mon/tests/gate-basic-trusty-liberty diff --git a/ceph-mon/tests/019-basic-trusty-mitaka b/ceph-mon/tests/gate-basic-trusty-mitaka similarity index 100% rename from ceph-mon/tests/019-basic-trusty-mitaka rename to ceph-mon/tests/gate-basic-trusty-mitaka diff --git a/ceph-mon/tests/021-basic-xenial-mitaka b/ceph-mon/tests/gate-basic-xenial-mitaka similarity index 100% rename from ceph-mon/tests/021-basic-xenial-mitaka rename to ceph-mon/tests/gate-basic-xenial-mitaka diff --git a/ceph-mon/tests/setup/00-setup b/ceph-mon/tests/setup/00-setup deleted file mode 100755 index 94e5611f..00000000 --- a/ceph-mon/tests/setup/00-setup +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -set -ex - -sudo add-apt-repository --yes ppa:juju/stable -sudo apt-get update --yes -sudo apt-get install --yes amulet \ - distro-info-data \ - python-cinderclient \ - python-distro-info \ - python-glanceclient \ - python-heatclient \ - python-keystoneclient \ - python-neutronclient \ - python-novaclient \ - python-pika \ - python-swiftclient diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 49e721b3..e3185c6d 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,22 +1,17 @@ -bootstrap: true -reset: false -virtualenv: true -makefile: - - lint - - test -sources: - - ppa:juju/stable -packages: - - amulet - - distro-info-data - - python-ceilometerclient - - python-cinderclient - - python-distro-info - - python-glanceclient - - python-heatclient - - python-keystoneclient - - python-neutronclient - - python-novaclient - - python-pika - - python-swiftclient - - python-nose \ No newline at end of file +# Bootstrap the model if necessary. +bootstrap: True +# Re-use bootstrap node instead of destroying/re-bootstrapping. +reset: True +# Use tox/requirements to drive the venv instead of bundletester's venv feature. +virtualenv: False +# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet. +makefile: [] +# Do not specify juju PPA sources. Juju is presumed to be pre-installed +# and configured in all test runner environments. +#sources: +# Do not specify or rely on system packages. +#packages: +# Do not specify python packages here. Use test-requirements.txt +# and tox instead. ie. The venv is constructed before bundletester +# is invoked. +#python-packages: diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 9c02ada3..16d4fe94 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -5,6 +5,8 @@ skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + AMULET_SETUP_TIMEOUT=2700 +passenv = HOME TERM AMULET_* install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} @@ -25,6 +27,41 @@ commands = flake8 {posargs} actions hooks unit_tests tests [testenv:venv] commands = {posargs} +[testenv:func27-noop] +# DRY RUN - For Debug +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy + +[testenv:func27] +# Charm Functional Test +# Run all gate tests which are +x (expected to always pass) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy + +[testenv:func27-smoke] +# Charm Functional Test +# Run a specific test as an Amulet smoke test (expected to always pass) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy + +[testenv:func27-dev] +# Charm Functional Test +# Run all development test targets which are +x (may not always pass!) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy + [flake8] ignore = E402,E226 exclude = hooks/charmhelpers From 151bc086a9adaf0aa2402bf3a2d17d5784341352 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 21 Jul 2016 14:25:45 +0000 Subject: [PATCH 1173/2699] Pre 1607 release charm-helpers sync Sync charmhelpers to pick up bug fixes for 1607 release Change-Id: I3bfa157e548f5a60ade8ae943178c0de8ccfcf58 --- ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py | 8 ++++++++ .../tests/charmhelpers/contrib/amulet/deployment.py | 10 +++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 889ac044..9d3e3d89 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -212,6 +212,7 @@ 'glance': 'git://github.com/openstack/glance', 'horizon': 'git://github.com/openstack/horizon', 'keystone': 'git://github.com/openstack/keystone', + 'networking-hyperv': 'git://github.com/openstack/networking-hyperv', 'neutron': 'git://github.com/openstack/neutron', 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas', 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas', @@ -761,6 +762,13 @@ def git_default_repos(projects_yaml): if service in ['neutron-api', 'neutron-gateway', 'neutron-openvswitch']: core_project = 'neutron' + if service == 'neutron-api': + repo = { + 'name': 'networking-hyperv', + 'repository': GIT_DEFAULT_REPOS['networking-hyperv'], + 'branch': branch, + } + repos.append(repo) for project in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas', 'nova']: repo = { diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py index 0146236d..9c65518e 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py @@ -78,11 +78,15 @@ def _configure_services(self, configs): def _deploy(self): """Deploy environment and wait for all hooks to finish executing.""" + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) try: - self.d.setup(timeout=900) - self.d.sentry.wait(timeout=900) + self.d.setup(timeout=timeout) + self.d.sentry.wait(timeout=timeout) except amulet.helpers.TimeoutError: - amulet.raise_status(amulet.FAIL, msg="Deployment timed out") + amulet.raise_status( + amulet.FAIL, + msg="Deployment timed out ({}s)".format(timeout) + ) except Exception: raise From dc553fb121d35d3d3e6432fa4b94012e6a4f0314 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 21 Jul 2016 14:26:07 +0000 Subject: [PATCH 1174/2699] Pre 1607 release charm-helpers sync Sync charmhelpers to pick up bug fixes for 1607 release Change-Id: I1fab5136b8320da6cb8d1efc269efa655e9fcb4c --- ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py | 8 ++++++++ .../tests/charmhelpers/contrib/amulet/deployment.py | 10 +++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 889ac044..9d3e3d89 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -212,6 +212,7 @@ 'glance': 'git://github.com/openstack/glance', 'horizon': 'git://github.com/openstack/horizon', 'keystone': 'git://github.com/openstack/keystone', + 'networking-hyperv': 'git://github.com/openstack/networking-hyperv', 'neutron': 'git://github.com/openstack/neutron', 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas', 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas', @@ -761,6 +762,13 @@ def git_default_repos(projects_yaml): if service in ['neutron-api', 'neutron-gateway', 'neutron-openvswitch']: core_project = 'neutron' + if service == 'neutron-api': + repo = { + 'name': 'networking-hyperv', + 'repository': GIT_DEFAULT_REPOS['networking-hyperv'], + 'branch': branch, + } + repos.append(repo) for project in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas', 'nova']: repo = { diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py index 0146236d..9c65518e 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py @@ -78,11 +78,15 @@ def _configure_services(self, configs): def _deploy(self): """Deploy environment and wait for all hooks to finish executing.""" + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) try: - self.d.setup(timeout=900) - self.d.sentry.wait(timeout=900) + self.d.setup(timeout=timeout) + self.d.sentry.wait(timeout=timeout) except amulet.helpers.TimeoutError: - amulet.raise_status(amulet.FAIL, msg="Deployment timed out") + amulet.raise_status( + amulet.FAIL, + msg="Deployment timed out ({}s)".format(timeout) + ) except Exception: raise From a92080b6c5e512945144f9016d233bdaee5b5569 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 21 Jul 2016 14:26:30 +0000 Subject: [PATCH 1175/2699] Pre 1607 release charm-helpers sync Sync charmhelpers to pick up bug fixes for 1607 release Change-Id: I28d0bd8622a67a675b53696adb4730d4b3a52c73 --- .../charmhelpers/contrib/hahelpers/apache.py | 23 +++++++++++++++---- .../charmhelpers/contrib/openstack/utils.py | 8 +++++++ .../charmhelpers/contrib/amulet/deployment.py | 10 +++++--- 3 files changed, 34 insertions(+), 7 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py index 3313abac..d0c69942 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -22,6 +22,7 @@ # Adam Gandelman # +import os import subprocess from charmhelpers.core.hookenv import ( @@ -72,9 +73,23 @@ def get_ca_cert(): return ca_cert +def retrieve_ca_cert(cert_file): + cert = None + if os.path.isfile(cert_file): + with open(cert_file, 'r') as crt: + cert = crt.read() + return cert + + def install_ca_cert(ca_cert): if ca_cert: - with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', - 'w') as crt: - crt.write(ca_cert) - subprocess.check_call(['update-ca-certificates', '--fresh']) + cert_file = ('/usr/local/share/ca-certificates/' + 'keystone_juju_ca_cert.crt') + old_cert = retrieve_ca_cert(cert_file) + if old_cert and old_cert == ca_cert: + log("CA cert is the same as installed version", level=INFO) + else: + log("Installing new CA cert", level=INFO) + with open(cert_file, 'w') as crt: + crt.write(ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 889ac044..9d3e3d89 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -212,6 +212,7 @@ 'glance': 'git://github.com/openstack/glance', 'horizon': 'git://github.com/openstack/horizon', 'keystone': 'git://github.com/openstack/keystone', + 'networking-hyperv': 'git://github.com/openstack/networking-hyperv', 'neutron': 'git://github.com/openstack/neutron', 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas', 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas', @@ -761,6 +762,13 @@ def git_default_repos(projects_yaml): if service in ['neutron-api', 'neutron-gateway', 'neutron-openvswitch']: core_project = 'neutron' + if service == 'neutron-api': + repo = { + 'name': 'networking-hyperv', + 'repository': GIT_DEFAULT_REPOS['networking-hyperv'], + 'branch': branch, + } + repos.append(repo) for project in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas', 'nova']: repo = { diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py index 0146236d..9c65518e 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py @@ -78,11 +78,15 @@ def _configure_services(self, configs): def _deploy(self): """Deploy environment and wait for all hooks to finish executing.""" + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) try: - self.d.setup(timeout=900) - self.d.sentry.wait(timeout=900) + self.d.setup(timeout=timeout) + self.d.sentry.wait(timeout=timeout) except amulet.helpers.TimeoutError: - amulet.raise_status(amulet.FAIL, msg="Deployment timed out") + amulet.raise_status( + amulet.FAIL, + msg="Deployment timed out ({}s)".format(timeout) + ) except Exception: raise From b04f2cd8e58374a5f8f5455aa925c725a105885a Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 21 Jul 2016 18:06:20 +0000 Subject: [PATCH 1176/2699] Use bundletester for amulet test execution Switch to using bundletester for execution of functional tests, leveraging tox to build out test virtualenvs. Rename amulet tests inline with gate-*, dev-* and dfs-* naming standards. Update README to refer to functional testing section of the charm guide. Change-Id: I618219551fc116c151cda4790c01aa9e86fa462a --- ceph-osd/Makefile | 3 +- ceph-osd/test-requirements.txt | 16 +++ ceph-osd/tests/README | 113 ------------------ ceph-osd/tests/README.md | 9 ++ ceph-osd/tests/basic_deployment.py | 26 ++-- ...ic-trusty-juno => dev-basic-xenial-newton} | 10 +- ...-wily-liberty => dev-basic-yakkety-newton} | 6 +- ...e-icehouse => gate-basic-precise-icehouse} | 2 +- ...ty-icehouse => gate-basic-trusty-icehouse} | 2 +- ...sic-trusty-kilo => gate-basic-trusty-kilo} | 2 +- ...usty-liberty => gate-basic-trusty-liberty} | 2 +- ...trusty-mitaka => gate-basic-trusty-mitaka} | 2 +- ...xenial-mitaka => gate-basic-xenial-mitaka} | 2 +- ceph-osd/tests/setup/00-setup | 17 --- ceph-osd/tests/tests.yaml | 38 +++--- ceph-osd/tox.ini | 48 +++++++- 16 files changed, 117 insertions(+), 181 deletions(-) delete mode 100644 ceph-osd/tests/README create mode 100644 ceph-osd/tests/README.md rename ceph-osd/tests/{016-basic-trusty-juno => dev-basic-xenial-newton} (69%) rename ceph-osd/tests/{020-basic-wily-liberty => dev-basic-yakkety-newton} (82%) rename ceph-osd/tests/{014-basic-precise-icehouse => gate-basic-precise-icehouse} (97%) rename ceph-osd/tests/{015-basic-trusty-icehouse => gate-basic-trusty-icehouse} (97%) rename ceph-osd/tests/{017-basic-trusty-kilo => gate-basic-trusty-kilo} (97%) rename ceph-osd/tests/{018-basic-trusty-liberty => gate-basic-trusty-liberty} (97%) rename ceph-osd/tests/{019-basic-trusty-mitaka => gate-basic-trusty-mitaka} (97%) rename ceph-osd/tests/{021-basic-xenial-mitaka => gate-basic-xenial-mitaka} (97%) delete mode 100755 ceph-osd/tests/setup/00-setup diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index c6109cc8..46b94807 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -10,8 +10,7 @@ test: functional_test: @echo Starting Amulet tests... - @tests/setup/00-setup - @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 + @tox -e func27 bin/charm_helpers_sync.py: @mkdir -p bin diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 4faf2545..74baa120 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -7,3 +7,19 @@ flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 charm-tools>=2.0.0 requests==2.6.0 +# BEGIN: Amulet OpenStack Charm Helper Requirements +# Liberty client lower constraints +amulet>=1.14.3,<2.0 +bundletester>=0.6.1,<1.0 +python-ceilometerclient>=1.5.0,<2.0 +python-cinderclient>=1.4.0,<2.0 +python-glanceclient>=1.1.0,<2.0 +python-heatclient>=0.8.0,<1.0 +python-keystoneclient>=1.7.1,<2.0 +python-neutronclient>=3.1.0,<4.0 +python-novaclient>=2.30.1,<3.0 +python-openstackclient>=1.7.0,<2.0 +python-swiftclient>=2.6.0,<3.0 +pika>=0.10.0,<1.0 +distro-info +# END: Amulet OpenStack Charm Helper Requirements diff --git a/ceph-osd/tests/README b/ceph-osd/tests/README deleted file mode 100644 index 31adf8ec..00000000 --- a/ceph-osd/tests/README +++ /dev/null @@ -1,113 +0,0 @@ -This directory provides Amulet tests to verify basic deployment functionality -from the perspective of this charm, its requirements and its features, as -exercised in a subset of the full OpenStack deployment test bundle topology. - -Reference: lp:openstack-charm-testing for full test bundles. - -A single topology and configuration is defined and deployed, once for each of -the defined Ubuntu:OpenStack release combos. The ongoing goal is for this -charm to always possess tests and combo definitions for all currently-supported -release combinations of U:OS. - -test_* methods are called in lexical sort order, as with most runners. However, -each individual test method should be idempotent and expected to pass regardless -of run order or Ubuntu:OpenStack combo. When writing or modifying tests, -ensure that every individual test is not dependent on another test_ method. - -Test naming convention, purely for code organization purposes: - 1xx service and endpoint checks - 2xx relation checks - 3xx config checks - 4xx functional checks - 9xx restarts, config changes, actions and other final checks - -In order to run tests, charm-tools and juju must be installed: - sudo add-apt-repository ppa:juju/stable - sudo apt-get update - sudo apt-get install charm-tools juju juju-deployer amulet - -Alternatively, tests may be exercised with proposed or development versions -of juju and related tools: - - # juju proposed version - sudo add-apt-repository ppa:juju/proposed - sudo apt-get update - sudo apt-get install charm-tools juju juju-deployer - - # juju development version - sudo add-apt-repository ppa:juju/devel - sudo apt-get update - sudo apt-get install charm-tools juju juju-deployer - -Some tests may need to download files. If a web proxy server is required in -the environment, the AMULET_HTTP_PROXY environment variable must be set and -passed into the juju test command. This is unrelated to juju's http proxy -settings or behavior. - -The following examples demonstrate different ways that tests can be executed. -All examples are run from the charm's root directory. - - * To run all +x tests in the tests directory: - - bzr branch lp:charms/trusty/foo - cd foo - make functional_test - - * To run the tests against a specific release combo as defined in tests/: - - bzr branch lp:charms/trusty/foo - cd foo - juju test -v -p AMULET_HTTP_PROXY --timeout 2700 015-basic-trusty-icehouse - - * To run tests and keep the juju environment deployed after a failure: - - bzr branch lp:charms/trusty/foo - cd foo - juju test --set-e -v -p AMULET_HTTP_PROXY --timeout 2700 015-basic-trusty-icehouse - - * To re-run a test module against an already deployed environment (one - that was deployed by a previous call to 'juju test --set-e'): - - ./tests/015-basic-trusty-icehouse - - * Even with --set-e, `juju test` will tear down the deployment when all - tests pass. The following work flow may be more effective when - iterating on test writing. - - bzr branch lp:charms/trusty/foo - cd foo - ./tests/setup/00-setup - juju bootstrap - ./tests/015-basic-trusty-icehouse - # make some changes, run tests again - ./tests/015-basic-trusty-icehouse - # make some changes, run tests again - ./tests/015-basic-trusty-icehouse - - * There may be test definitions in the tests/ dir which are not set +x - executable. This is generally true for deprecated releases, or for - upcoming releases which are not yet validated and enabled. To enable - and run these tests: - bzr branch lp:charms/trusty/foo - cd foo - ls tests - chmod +x tests/017-basic-trusty-kilo - ./tests/setup/00-setup - juju bootstrap - ./tests/017-basic-trusty-kilo - - -Additional notes: - - * Use DEBUG to turn on debug logging, use ERROR otherwise. - u = OpenStackAmuletUtils(ERROR) - u = OpenStackAmuletUtils(DEBUG) - - * To interact with the deployed environment: - export OS_USERNAME=admin - export OS_PASSWORD=openstack - export OS_TENANT_NAME=admin - export OS_REGION_NAME=RegionOne - export OS_AUTH_URL=${OS_AUTH_PROTOCOL:-http}://`juju-deployer -e trusty -f keystone`:5000/v2.0 - keystone user-list - glance image-list diff --git a/ceph-osd/tests/README.md b/ceph-osd/tests/README.md new file mode 100644 index 00000000..046be7fb --- /dev/null +++ b/ceph-osd/tests/README.md @@ -0,0 +1,9 @@ +# Overview + +This directory provides Amulet tests to verify basic deployment functionality +from the perspective of this charm, its requirements and its features, as +exercised in a subset of the full OpenStack deployment test bundle topology. + +For full details on functional testing of OpenStack charms please refer to +the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing) +section of the OpenStack Charm Guide. diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 8f6ac398..7b1154af 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # @@ -120,18 +120,18 @@ def _configure_services(self): def _initialize_tests(self): """Perform final initialization before tests get run.""" # Access the sentries for inspecting service units - self.mysql_sentry = self.d.sentry.unit['mysql/0'] - self.keystone_sentry = self.d.sentry.unit['keystone/0'] - self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] - self.nova_sentry = self.d.sentry.unit['nova-compute/0'] - self.glance_sentry = self.d.sentry.unit['glance/0'] - self.cinder_sentry = self.d.sentry.unit['cinder/0'] - self.ceph0_sentry = self.d.sentry.unit['ceph-mon/0'] - self.ceph1_sentry = self.d.sentry.unit['ceph-mon/1'] - self.ceph2_sentry = self.d.sentry.unit['ceph-mon/2'] - self.ceph_osd_sentry = self.d.sentry.unit['ceph-osd/0'] - self.ceph_osd1_sentry = self.d.sentry.unit['ceph-osd/1'] - self.ceph_osd2_sentry = self.d.sentry.unit['ceph-osd/2'] + self.mysql_sentry = self.d.sentry['mysql'][0] + self.keystone_sentry = self.d.sentry['keystone'][0] + self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0] + self.nova_sentry = self.d.sentry['nova-compute'][0] + self.glance_sentry = self.d.sentry['glance'][0] + self.cinder_sentry = self.d.sentry['cinder'][0] + self.ceph0_sentry = self.d.sentry['ceph-mon'][0] + self.ceph1_sentry = self.d.sentry['ceph-mon'][1] + self.ceph2_sentry = self.d.sentry['ceph-mon'][2] + self.ceph_osd_sentry = self.d.sentry['ceph-osd'][0] + self.ceph_osd1_sentry = self.d.sentry['ceph-osd'][1] + self.ceph_osd2_sentry = self.d.sentry['ceph-osd'][2] u.log.debug('openstack release val: {}'.format( self._get_openstack_release())) u.log.debug('openstack release str: {}'.format( diff --git a/ceph-osd/tests/016-basic-trusty-juno b/ceph-osd/tests/dev-basic-xenial-newton similarity index 69% rename from ceph-osd/tests/016-basic-trusty-juno rename to ceph-osd/tests/dev-basic-xenial-newton index 6245f584..4dd60b96 100755 --- a/ceph-osd/tests/016-basic-trusty-juno +++ b/ceph-osd/tests/dev-basic-xenial-newton @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # @@ -14,12 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Amulet tests on a basic ceph-osd deployment on trusty-juno.""" +"""Amulet tests on a basic ceph-osd deployment on xenial-newton.""" from basic_deployment import CephOsdBasicDeployment if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='trusty', - openstack='cloud:trusty-juno', - source='cloud:trusty-updates/juno') + deployment = CephOsdBasicDeployment(series='xenial', + openstack='cloud:xenial-newton', + source='cloud:xenial-updates/newton') deployment.run_tests() diff --git a/ceph-osd/tests/020-basic-wily-liberty b/ceph-osd/tests/dev-basic-yakkety-newton similarity index 82% rename from ceph-osd/tests/020-basic-wily-liberty rename to ceph-osd/tests/dev-basic-yakkety-newton index f84097c3..aac31445 100755 --- a/ceph-osd/tests/020-basic-wily-liberty +++ b/ceph-osd/tests/dev-basic-yakkety-newton @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # @@ -14,10 +14,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Amulet tests on a basic ceph-osd deployment on wily-liberty.""" +"""Amulet tests on a basic ceph-osd deployment on yakkety-newton.""" from basic_deployment import CephOsdBasicDeployment if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='wily') + deployment = CephOsdBasicDeployment(series='yakkety') deployment.run_tests() diff --git a/ceph-osd/tests/014-basic-precise-icehouse b/ceph-osd/tests/gate-basic-precise-icehouse similarity index 97% rename from ceph-osd/tests/014-basic-precise-icehouse rename to ceph-osd/tests/gate-basic-precise-icehouse index 7f921234..65c2abad 100755 --- a/ceph-osd/tests/014-basic-precise-icehouse +++ b/ceph-osd/tests/gate-basic-precise-icehouse @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-osd/tests/015-basic-trusty-icehouse b/ceph-osd/tests/gate-basic-trusty-icehouse similarity index 97% rename from ceph-osd/tests/015-basic-trusty-icehouse rename to ceph-osd/tests/gate-basic-trusty-icehouse index d857e215..d0bb7793 100755 --- a/ceph-osd/tests/015-basic-trusty-icehouse +++ b/ceph-osd/tests/gate-basic-trusty-icehouse @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-osd/tests/017-basic-trusty-kilo b/ceph-osd/tests/gate-basic-trusty-kilo similarity index 97% rename from ceph-osd/tests/017-basic-trusty-kilo rename to ceph-osd/tests/gate-basic-trusty-kilo index a2657fe4..5d30a670 100755 --- a/ceph-osd/tests/017-basic-trusty-kilo +++ b/ceph-osd/tests/gate-basic-trusty-kilo @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-osd/tests/018-basic-trusty-liberty b/ceph-osd/tests/gate-basic-trusty-liberty similarity index 97% rename from ceph-osd/tests/018-basic-trusty-liberty rename to ceph-osd/tests/gate-basic-trusty-liberty index 004a1bbb..41f1996a 100755 --- a/ceph-osd/tests/018-basic-trusty-liberty +++ b/ceph-osd/tests/gate-basic-trusty-liberty @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-osd/tests/019-basic-trusty-mitaka b/ceph-osd/tests/gate-basic-trusty-mitaka similarity index 97% rename from ceph-osd/tests/019-basic-trusty-mitaka rename to ceph-osd/tests/gate-basic-trusty-mitaka index 836a8253..0fd67940 100755 --- a/ceph-osd/tests/019-basic-trusty-mitaka +++ b/ceph-osd/tests/gate-basic-trusty-mitaka @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-osd/tests/021-basic-xenial-mitaka b/ceph-osd/tests/gate-basic-xenial-mitaka similarity index 97% rename from ceph-osd/tests/021-basic-xenial-mitaka rename to ceph-osd/tests/gate-basic-xenial-mitaka index 4f9e3b41..bbf91dd0 100755 --- a/ceph-osd/tests/021-basic-xenial-mitaka +++ b/ceph-osd/tests/gate-basic-xenial-mitaka @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-osd/tests/setup/00-setup b/ceph-osd/tests/setup/00-setup deleted file mode 100755 index 94e5611f..00000000 --- a/ceph-osd/tests/setup/00-setup +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -set -ex - -sudo add-apt-repository --yes ppa:juju/stable -sudo apt-get update --yes -sudo apt-get install --yes amulet \ - distro-info-data \ - python-cinderclient \ - python-distro-info \ - python-glanceclient \ - python-heatclient \ - python-keystoneclient \ - python-neutronclient \ - python-novaclient \ - python-pika \ - python-swiftclient diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 4d17631b..e3185c6d 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,21 +1,17 @@ -bootstrap: true -reset: false -virtualenv: true -makefile: - - lint - - test -sources: - - ppa:juju/stable -packages: - - amulet - - distro-info-data - - python-ceilometerclient - - python-cinderclient - - python-distro-info - - python-glanceclient - - python-heatclient - - python-keystoneclient - - python-neutronclient - - python-novaclient - - python-pika - - python-swiftclient +# Bootstrap the model if necessary. +bootstrap: True +# Re-use bootstrap node instead of destroying/re-bootstrapping. +reset: True +# Use tox/requirements to drive the venv instead of bundletester's venv feature. +virtualenv: False +# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet. +makefile: [] +# Do not specify juju PPA sources. Juju is presumed to be pre-installed +# and configured in all test runner environments. +#sources: +# Do not specify or rely on system packages. +#packages: +# Do not specify python packages here. Use test-requirements.txt +# and tox instead. ie. The venv is constructed before bundletester +# is invoked. +#python-packages: diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 7f8650e8..b73c6444 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -5,6 +5,8 @@ skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + AMULET_SETUP_TIMEOUT=2700 +passenv = HOME TERM AMULET_HTTP_PROXY AMULET_OS_VIP install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} @@ -18,12 +20,56 @@ deps = -r{toxinidir}/requirements.txt basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} actions hooks unit_tests tests +commands = flake8 {posargs} --exclude */charmhelpers hooks unit_tests tests actions charm-proof [testenv:venv] commands = {posargs} +[testenv:func27-noop] +# DRY RUN - For Debug +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy + +[testenv:func27] +# Charm Functional Test +# Run all gate tests which are +x (expected to always pass) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy + +[testenv:func27-smoke] +# Charm Functional Test +# Run a specific test as an Amulet smoke test (expected to always pass) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy + +[testenv:func27-dfs] +# Charm Functional Test +# Run all deploy-from-source tests which are +x (may not always pass!) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy + +[testenv:func27-dev] +# Charm Functional Test +# Run all development test targets which are +x (may not always pass!) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy + [flake8] ignore = E402,E226 exclude = hooks/charmhelpers From 8381911b38e99e64740f95912897bbad46ad3498 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 21 Jul 2016 18:13:40 +0000 Subject: [PATCH 1177/2699] Use bundletester for amulet test execution Switch to using bundletester for execution of functional tests, leveraging tox to build out test virtualenvs. Rename amulet tests inline with gate-*, dev-* and dfs-* naming standards. Update README to refer to functional testing section of the charm guide. Change-Id: Id91a7281d98be3bb69d42257ea9213471bdefa69 --- ceph-radosgw/Makefile | 3 +- ceph-radosgw/test-requirements.txt | 16 +++ ceph-radosgw/tests/README | 113 ------------------ ceph-radosgw/tests/README.md | 9 ++ ceph-radosgw/tests/basic_deployment.py | 26 ++-- ...ic-trusty-juno => dev-basic-xenial-newton} | 10 +- ...-wily-liberty => dev-basic-yakkety-newton} | 6 +- ...e-icehouse => gate-basic-precise-icehouse} | 2 +- ...ty-icehouse => gate-basic-trusty-icehouse} | 2 +- ...sic-trusty-kilo => gate-basic-trusty-kilo} | 2 +- ...usty-liberty => gate-basic-trusty-liberty} | 2 +- ...trusty-mitaka => gate-basic-trusty-mitaka} | 2 +- ...xenial-mitaka => gate-basic-xenial-mitaka} | 2 +- ceph-radosgw/tests/setup/00-setup | 17 --- ceph-radosgw/tests/tests.yaml | 38 +++--- ceph-radosgw/tox.ini | 48 +++++++- 16 files changed, 117 insertions(+), 181 deletions(-) delete mode 100644 ceph-radosgw/tests/README create mode 100644 ceph-radosgw/tests/README.md rename ceph-radosgw/tests/{016-basic-trusty-juno => dev-basic-xenial-newton} (68%) rename ceph-radosgw/tests/{020-basic-wily-liberty => dev-basic-yakkety-newton} (81%) rename ceph-radosgw/tests/{014-basic-precise-icehouse => gate-basic-precise-icehouse} (97%) rename ceph-radosgw/tests/{015-basic-trusty-icehouse => gate-basic-trusty-icehouse} (97%) rename ceph-radosgw/tests/{017-basic-trusty-kilo => gate-basic-trusty-kilo} (97%) rename ceph-radosgw/tests/{018-basic-trusty-liberty => gate-basic-trusty-liberty} (97%) rename ceph-radosgw/tests/{019-basic-trusty-mitaka => gate-basic-trusty-mitaka} (97%) rename ceph-radosgw/tests/{021-basic-xenial-mitaka => gate-basic-xenial-mitaka} (97%) delete mode 100755 ceph-radosgw/tests/setup/00-setup diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index cfe78e3d..ec26c512 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -10,8 +10,7 @@ test: functional_test: @echo Starting Amulet tests... - @tests/setup/00-setup - @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 + @tox -e func27 bin/charm_helpers_sync.py: @mkdir -p bin diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 4faf2545..74baa120 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -7,3 +7,19 @@ flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 charm-tools>=2.0.0 requests==2.6.0 +# BEGIN: Amulet OpenStack Charm Helper Requirements +# Liberty client lower constraints +amulet>=1.14.3,<2.0 +bundletester>=0.6.1,<1.0 +python-ceilometerclient>=1.5.0,<2.0 +python-cinderclient>=1.4.0,<2.0 +python-glanceclient>=1.1.0,<2.0 +python-heatclient>=0.8.0,<1.0 +python-keystoneclient>=1.7.1,<2.0 +python-neutronclient>=3.1.0,<4.0 +python-novaclient>=2.30.1,<3.0 +python-openstackclient>=1.7.0,<2.0 +python-swiftclient>=2.6.0,<3.0 +pika>=0.10.0,<1.0 +distro-info +# END: Amulet OpenStack Charm Helper Requirements diff --git a/ceph-radosgw/tests/README b/ceph-radosgw/tests/README deleted file mode 100644 index 79c5b063..00000000 --- a/ceph-radosgw/tests/README +++ /dev/null @@ -1,113 +0,0 @@ -This directory provides Amulet tests to verify basic deployment functionality -from the perspective of this charm, its requirements and its features, as -exercised in a subset of the full OpenStack deployment test bundle topology. - -Reference: lp:openstack-charm-testing for full test bundles. - -A single topology and configuration is defined and deployed, once for each of -the defined Ubuntu:OpenStack release combos. The ongoing goal is for this -charm to always possess tests and combo definitions for all currently-supported -release combinations of U:OS. - -test_* methods are called in lexical sort order, as with most runners. However, -each individual test method should be idempotent and expected to pass regardless -of run order or Ubuntu:OpenStack combo. When writing or modifying tests, -ensure that every individual test is not dependent on another test_ method. - -Test naming convention, purely for code organization purposes: - 1xx service and endpoint checks - 2xx relation checks - 3xx config checks - 4xx functional checks - 9xx restarts, config changes, actions and other final checks - -In order to run tests, charm-tools and juju must be installed: - sudo add-apt-repository ppa:juju/stable - sudo apt-get update - sudo apt-get install charm-tools juju juju-deployer amulet - -Alternatively, tests may be exercised with proposed or development versions -of juju and related tools: - - # juju proposed version - sudo add-apt-repository ppa:juju/proposed - sudo apt-get update - sudo apt-get install charm-tools juju juju-deployer - - # juju development version - sudo add-apt-repository ppa:juju/devel - sudo apt-get update - sudo apt-get install charm-tools juju juju-deployer - -Some tests may need to download files. If a web proxy server is required in -the environment, the AMULET_HTTP_PROXY environment variable must be set and -passed into the juju test command. This is unrelated to juju's http proxy -settings or behavior. - -The following examples demonstrate different ways that tests can be executed. -All examples are run from the charm's root directory. - - * To run all +x tests in the tests directory: - - bzr branch lp:charms/trusty/foo - cd foo - make functional_test - - * To run the tests against a specific release combo as defined in tests/: - - bzr branch lp:charms/trusty/foo - cd foo - juju test -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse - - * To run tests and keep the juju environment deployed after a failure: - - bzr branch lp:charms/trusty/foo - cd foo - juju test --set-e -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse - - * To re-run a test module against an already deployed environment (one - that was deployed by a previous call to 'juju test --set-e'): - - ./tests/015-basic-trusty-icehouse - - * Even with --set-e, `juju test` will tear down the deployment when all - tests pass. The following work flow may be more effective when - iterating on test writing. - - bzr branch lp:charms/trusty/foo - cd foo - ./tests/setup/00-setup - juju bootstrap - ./tests/015-basic-trusty-icehouse - # make some changes, run tests again - ./tests/015-basic-trusty-icehouse - # make some changes, run tests again - ./tests/015-basic-trusty-icehouse - - * There may be test definitions in the tests/ dir which are not set +x - executable. This is generally true for deprecated releases, or for - upcoming releases which are not yet validated and enabled. To enable - and run these tests: - bzr branch lp:charms/trusty/foo - cd foo - ls tests - chmod +x tests/017-basic-trusty-kilo - ./tests/setup/00-setup - juju bootstrap - ./tests/017-basic-trusty-kilo - - -Additional notes: - - * Use DEBUG to turn on debug logging, use ERROR otherwise. - u = OpenStackAmuletUtils(ERROR) - u = OpenStackAmuletUtils(DEBUG) - - * To interact with the deployed environment: - export OS_USERNAME=admin - export OS_PASSWORD=openstack - export OS_TENANT_NAME=admin - export OS_REGION_NAME=RegionOne - export OS_AUTH_URL=${OS_AUTH_PROTOCOL:-http}://`juju-deployer -e trusty -f keystone`:5000/v2.0 - keystone user-list - glance image-list diff --git a/ceph-radosgw/tests/README.md b/ceph-radosgw/tests/README.md new file mode 100644 index 00000000..046be7fb --- /dev/null +++ b/ceph-radosgw/tests/README.md @@ -0,0 +1,9 @@ +# Overview + +This directory provides Amulet tests to verify basic deployment functionality +from the perspective of this charm, its requirements and its features, as +exercised in a subset of the full OpenStack deployment test bundle topology. + +For full details on functional testing of OpenStack charms please refer to +the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing) +section of the OpenStack Charm Guide. diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 0c45e6ac..a926b13a 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # @@ -144,16 +144,16 @@ def _wait_on_action(self, action_id): def _initialize_tests(self): """Perform final initialization before tests get run.""" # Access the sentries for inspecting service units - self.mysql_sentry = self.d.sentry.unit['mysql/0'] - self.keystone_sentry = self.d.sentry.unit['keystone/0'] - self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] - self.nova_sentry = self.d.sentry.unit['nova-compute/0'] - self.glance_sentry = self.d.sentry.unit['glance/0'] - self.cinder_sentry = self.d.sentry.unit['cinder/0'] - self.ceph0_sentry = self.d.sentry.unit['ceph/0'] - self.ceph1_sentry = self.d.sentry.unit['ceph/1'] - self.ceph2_sentry = self.d.sentry.unit['ceph/2'] - self.ceph_radosgw_sentry = self.d.sentry.unit['ceph-radosgw/0'] + self.mysql_sentry = self.d.sentry['mysql'][0] + self.keystone_sentry = self.d.sentry['keystone'][0] + self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0] + self.nova_sentry = self.d.sentry['nova-compute'][0] + self.glance_sentry = self.d.sentry['glance'][0] + self.cinder_sentry = self.d.sentry['cinder'][0] + self.ceph0_sentry = self.d.sentry['ceph'][0] + self.ceph1_sentry = self.d.sentry['ceph'][1] + self.ceph2_sentry = self.d.sentry['ceph'][2] + self.ceph_radosgw_sentry = self.d.sentry['ceph-radosgw'][0] u.log.debug('openstack release val: {}'.format( self._get_openstack_release())) u.log.debug('openstack release str: {}'.format( @@ -551,8 +551,8 @@ def test_499_ceph_cmds_exit_zero(self): def test_910_pause_and_resume(self): """The services can be paused and resumed. """ u.log.debug('Checking pause and resume actions...') - unit_name = "ceph-radosgw/0" - unit = self.d.sentry.unit[unit_name] + unit = self.ceph_radosgw_sentry + unit_name = unit.info['unit_name'] assert u.status_get(unit)[0] == "active" diff --git a/ceph-radosgw/tests/016-basic-trusty-juno b/ceph-radosgw/tests/dev-basic-xenial-newton similarity index 68% rename from ceph-radosgw/tests/016-basic-trusty-juno rename to ceph-radosgw/tests/dev-basic-xenial-newton index d95bebc6..f53a3391 100755 --- a/ceph-radosgw/tests/016-basic-trusty-juno +++ b/ceph-radosgw/tests/dev-basic-xenial-newton @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # @@ -14,12 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Amulet tests on a basic ceph-radosgw deployment on trusty-juno.""" +"""Amulet tests on a basic ceph-radosgw deployment on xenial-newton.""" from basic_deployment import CephRadosGwBasicDeployment if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='trusty', - openstack='cloud:trusty-juno', - source='cloud:trusty-updates/juno') + deployment = CephRadosGwBasicDeployment(series='xenial', + openstack='cloud:xenial-newton', + source='cloud:xenial-updates/newton') deployment.run_tests() diff --git a/ceph-radosgw/tests/020-basic-wily-liberty b/ceph-radosgw/tests/dev-basic-yakkety-newton similarity index 81% rename from ceph-radosgw/tests/020-basic-wily-liberty rename to ceph-radosgw/tests/dev-basic-yakkety-newton index 089140dc..0b32576e 100755 --- a/ceph-radosgw/tests/020-basic-wily-liberty +++ b/ceph-radosgw/tests/dev-basic-yakkety-newton @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # @@ -14,10 +14,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Amulet tests on a basic ceph-radosgw deployment on wily-liberty.""" +"""Amulet tests on a basic ceph-radosgw deployment on yakkety-newton.""" from basic_deployment import CephRadosGwBasicDeployment if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='wily') + deployment = CephRadosGwBasicDeployment(series='yakkety') deployment.run_tests() diff --git a/ceph-radosgw/tests/014-basic-precise-icehouse b/ceph-radosgw/tests/gate-basic-precise-icehouse similarity index 97% rename from ceph-radosgw/tests/014-basic-precise-icehouse rename to ceph-radosgw/tests/gate-basic-precise-icehouse index 4acef0d8..925dfbda 100755 --- a/ceph-radosgw/tests/014-basic-precise-icehouse +++ b/ceph-radosgw/tests/gate-basic-precise-icehouse @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-radosgw/tests/015-basic-trusty-icehouse b/ceph-radosgw/tests/gate-basic-trusty-icehouse similarity index 97% rename from ceph-radosgw/tests/015-basic-trusty-icehouse rename to ceph-radosgw/tests/gate-basic-trusty-icehouse index 5829eef8..46066a7d 100755 --- a/ceph-radosgw/tests/015-basic-trusty-icehouse +++ b/ceph-radosgw/tests/gate-basic-trusty-icehouse @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-radosgw/tests/017-basic-trusty-kilo b/ceph-radosgw/tests/gate-basic-trusty-kilo similarity index 97% rename from ceph-radosgw/tests/017-basic-trusty-kilo rename to ceph-radosgw/tests/gate-basic-trusty-kilo index 30c18d19..ebbad248 100755 --- a/ceph-radosgw/tests/017-basic-trusty-kilo +++ b/ceph-radosgw/tests/gate-basic-trusty-kilo @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-radosgw/tests/018-basic-trusty-liberty b/ceph-radosgw/tests/gate-basic-trusty-liberty similarity index 97% rename from ceph-radosgw/tests/018-basic-trusty-liberty rename to ceph-radosgw/tests/gate-basic-trusty-liberty index 9a4dae35..f5579f5f 100755 --- a/ceph-radosgw/tests/018-basic-trusty-liberty +++ b/ceph-radosgw/tests/gate-basic-trusty-liberty @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-radosgw/tests/019-basic-trusty-mitaka b/ceph-radosgw/tests/gate-basic-trusty-mitaka similarity index 97% rename from ceph-radosgw/tests/019-basic-trusty-mitaka rename to ceph-radosgw/tests/gate-basic-trusty-mitaka index b53a0fe1..6f8e1484 100755 --- a/ceph-radosgw/tests/019-basic-trusty-mitaka +++ b/ceph-radosgw/tests/gate-basic-trusty-mitaka @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-radosgw/tests/021-basic-xenial-mitaka b/ceph-radosgw/tests/gate-basic-xenial-mitaka similarity index 97% rename from ceph-radosgw/tests/021-basic-xenial-mitaka rename to ceph-radosgw/tests/gate-basic-xenial-mitaka index 4de2f125..aa5d2db2 100755 --- a/ceph-radosgw/tests/021-basic-xenial-mitaka +++ b/ceph-radosgw/tests/gate-basic-xenial-mitaka @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-radosgw/tests/setup/00-setup b/ceph-radosgw/tests/setup/00-setup deleted file mode 100755 index 94e5611f..00000000 --- a/ceph-radosgw/tests/setup/00-setup +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -set -ex - -sudo add-apt-repository --yes ppa:juju/stable -sudo apt-get update --yes -sudo apt-get install --yes amulet \ - distro-info-data \ - python-cinderclient \ - python-distro-info \ - python-glanceclient \ - python-heatclient \ - python-keystoneclient \ - python-neutronclient \ - python-novaclient \ - python-pika \ - python-swiftclient diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 4d17631b..e3185c6d 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,21 +1,17 @@ -bootstrap: true -reset: false -virtualenv: true -makefile: - - lint - - test -sources: - - ppa:juju/stable -packages: - - amulet - - distro-info-data - - python-ceilometerclient - - python-cinderclient - - python-distro-info - - python-glanceclient - - python-heatclient - - python-keystoneclient - - python-neutronclient - - python-novaclient - - python-pika - - python-swiftclient +# Bootstrap the model if necessary. +bootstrap: True +# Re-use bootstrap node instead of destroying/re-bootstrapping. +reset: True +# Use tox/requirements to drive the venv instead of bundletester's venv feature. +virtualenv: False +# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet. +makefile: [] +# Do not specify juju PPA sources. Juju is presumed to be pre-installed +# and configured in all test runner environments. +#sources: +# Do not specify or rely on system packages. +#packages: +# Do not specify python packages here. Use test-requirements.txt +# and tox instead. ie. The venv is constructed before bundletester +# is invoked. +#python-packages: diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index a4d9c0d8..b73c6444 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -5,6 +5,8 @@ skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + AMULET_SETUP_TIMEOUT=2700 +passenv = HOME TERM AMULET_HTTP_PROXY AMULET_OS_VIP install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} @@ -18,12 +20,56 @@ deps = -r{toxinidir}/requirements.txt basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} hooks unit_tests tests +commands = flake8 {posargs} --exclude */charmhelpers hooks unit_tests tests actions charm-proof [testenv:venv] commands = {posargs} +[testenv:func27-noop] +# DRY RUN - For Debug +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy + +[testenv:func27] +# Charm Functional Test +# Run all gate tests which are +x (expected to always pass) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy + +[testenv:func27-smoke] +# Charm Functional Test +# Run a specific test as an Amulet smoke test (expected to always pass) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy + +[testenv:func27-dfs] +# Charm Functional Test +# Run all deploy-from-source tests which are +x (may not always pass!) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy + +[testenv:func27-dev] +# Charm Functional Test +# Run all development test targets which are +x (may not always pass!) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy + [flake8] ignore = E402,E226 exclude = hooks/charmhelpers From 1764bb3872fc2aa0bc2bce2bd06e7ed4f2698075 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Thu, 21 Jul 2016 13:10:14 -0700 Subject: [PATCH 1178/2699] Fix directory ownership as part of upgrade This change ensures that when ceph is upgraded from an older version that uses root to a newer version that uses ceph as the process owner that all directories are chowned. Closes-Bug: 1600338 Change-Id: Ifac8cde6e6ea6f3a366fb40b9ffd261036720310 --- ceph-osd/hooks/ceph_hooks.py | 9 +++++++-- ceph-osd/unit_tests/test_upgrade_roll.py | 7 +++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index d6240fbb..f2b18bd6 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -44,8 +44,8 @@ mkdir, cmp_pkgrevno, service_stop, - service_start -) + service_start, + chownr) from charmhelpers.fetch import ( add_source, apt_install, @@ -258,6 +258,11 @@ def upgrade_osd(): else: service_stop('ceph-osd-all') apt_install(packages=ceph.PACKAGES, fatal=True) + + # Ensure the ownership of Ceph's directories is correct + chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), + owner=ceph.ceph_user(), + group=ceph.ceph_user()) if ceph.systemd(): for osd_id in ceph.get_local_osd_ids(): service_start('ceph-osd@{}'.format(osd_id)) diff --git a/ceph-osd/unit_tests/test_upgrade_roll.py b/ceph-osd/unit_tests/test_upgrade_roll.py index a3535b5c..a3a6f260 100644 --- a/ceph-osd/unit_tests/test_upgrade_roll.py +++ b/ceph-osd/unit_tests/test_upgrade_roll.py @@ -35,6 +35,7 @@ 'service_stop', 'socket', 'status_set', + 'chownr', ] @@ -87,6 +88,7 @@ def test_lock_and_roll(self, monitor_key_set, upgrade_osd): def test_upgrade_osd(self): self.config.side_effect = config_side_effect self.ceph.get_version.return_value = "0.80" + self.ceph.ceph_user.return_value = "ceph" self.ceph.systemd.return_value = False ceph_hooks.upgrade_osd() self.service_stop.assert_called_with('ceph-osd-all') @@ -94,6 +96,11 @@ def test_upgrade_osd(self): self.status_set.assert_has_calls([ call('maintenance', 'Upgrading osd'), ]) + self.chownr.assert_has_calls( + [ + call(group='ceph', owner='ceph', path='/var/lib/ceph') + ] + ) @patch('ceph_hooks.lock_and_roll') @patch('ceph_hooks.get_upgrade_position') From fcfbd25b353d44b12ea8a953ac688b943d087262 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Thu, 21 Jul 2016 13:08:14 -0700 Subject: [PATCH 1179/2699] Fix directory ownership as part of upgrade This change ensures that when ceph is upgraded from an older version that uses root to a newer version that uses ceph as the process owner that all directories are chowned. Closes-Bug: 1600338 Change-Id: I162ebdbeee0d7bae4866b6462f98f11e3892b7ba --- ceph-mon/hooks/ceph_hooks.py | 9 ++++++++- ceph-mon/unit_tests/test_upgrade_roll.py | 7 +++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index d6a43405..6dd61061 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -47,7 +47,9 @@ write_file, rsync, cmp_pkgrevno, - service_stop, service_start) + service_stop, + service_start, + chownr) from charmhelpers.fetch import ( apt_install, apt_update, @@ -255,6 +257,11 @@ def upgrade_monitor(): else: service_stop('ceph-mon-all') apt_install(packages=ceph.PACKAGES, fatal=True) + + # Ensure the ownership of Ceph's directories is correct + chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), + owner=ceph.ceph_user(), + group=ceph.ceph_user()) if ceph.systemd(): for mon_id in ceph.get_local_mon_ids(): service_start('ceph-mon@{}'.format(mon_id)) diff --git a/ceph-mon/unit_tests/test_upgrade_roll.py b/ceph-mon/unit_tests/test_upgrade_roll.py index bb219bfa..b8edcbd7 100644 --- a/ceph-mon/unit_tests/test_upgrade_roll.py +++ b/ceph-mon/unit_tests/test_upgrade_roll.py @@ -42,6 +42,7 @@ 'service_stop', 'service_start', 'host', + 'chownr', ] @@ -94,6 +95,7 @@ def test_lock_and_roll(self, monitor_key_set, upgrade_monitor): def test_upgrade_monitor(self): self.config.side_effect = config_side_effect self.ceph.get_version.return_value = "0.80" + self.ceph.ceph_user.return_value = "ceph" self.ceph.systemd.return_value = False ceph_hooks.upgrade_monitor() self.service_stop.assert_called_with('ceph-mon-all') @@ -102,6 +104,11 @@ def test_upgrade_monitor(self): call('maintenance', 'Upgrading monitor'), call('active', '') ]) + self.chownr.assert_has_calls( + [ + call(group='ceph', owner='ceph', path='/var/lib/ceph') + ] + ) @patch('ceph_hooks.lock_and_roll') @patch('ceph_hooks.wait_on_previous_node') From 9c45cd2ff9fde76bf8165ce19e05b1e7637fa16e Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 1 Aug 2016 14:52:17 -0400 Subject: [PATCH 1180/2699] Migrate to shared lib This change moves our ceph.py and ceph_broker.py into a seperate repository that we can share between various ceph related Juju projects, along with a Makefile change to use a new git_sync file to partially sync a git repository into a specified path Change-Id: I2a6907890965b57d56167e1c73add6532792746d --- ceph-mon/Makefile | 10 +- ceph-mon/hooks/ceph.py | 557 -------- ceph-mon/hooks/ceph_hooks.py | 11 +- ceph-mon/lib/ceph/__init__.py | 0 ceph-mon/lib/ceph/ceph/__init__.py | 1 + ceph-mon/lib/ceph/ceph/ceph.py | 1183 +++++++++++++++++ .../{hooks => lib/ceph/ceph}/ceph_broker.py | 2 +- ceph-mon/unit_tests/__init__.py | 1 + ceph-mon/unit_tests/test_ceph_broker.py | 122 -- ceph-mon/unit_tests/test_ceph_ops.py | 2 +- 10 files changed, 1203 insertions(+), 686 deletions(-) delete mode 100644 ceph-mon/hooks/ceph.py create mode 100644 ceph-mon/lib/ceph/__init__.py create mode 100644 ceph-mon/lib/ceph/ceph/__init__.py create mode 100644 ceph-mon/lib/ceph/ceph/ceph.py rename ceph-mon/{hooks => lib/ceph/ceph}/ceph_broker.py (99%) delete mode 100644 ceph-mon/unit_tests/test_ceph_broker.py diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index fe31123d..eb6a2867 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -17,9 +17,17 @@ bin/charm_helpers_sync.py: @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ > bin/charm_helpers_sync.py -sync: bin/charm_helpers_sync.py +bin/git_sync.py: + @mkdir -p bin + @wget -O bin/git_sync.py https://raw.githubusercontent.com/ChrisMacNaughton/git-sync/master/git_sync.py + +ch-sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml +git-sync: bin/git_sync.py + $(PYTHON) bin/git_sync.py -d lib/ceph -s https://github.com/CanonicalLtd/charms_ceph.git + +sync: git-sync ch-sync publish: lint test bzr push lp:charms/ceph diff --git a/ceph-mon/hooks/ceph.py b/ceph-mon/hooks/ceph.py deleted file mode 100644 index f097f7fb..00000000 --- a/ceph-mon/hooks/ceph.py +++ /dev/null @@ -1,557 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import subprocess -import time -import os -import re -import sys - -from charmhelpers.contrib.storage.linux.utils import ( - is_block_device, - zap_disk, - is_device_mounted) -from charmhelpers.core.host import ( - mkdir, - chownr, - service_restart, - lsb_release, - cmp_pkgrevno) -from charmhelpers.core.hookenv import ( - log, - ERROR, - cached, - status_set, - WARNING) -from charmhelpers.fetch import ( - apt_cache -) -from utils import ( - get_unit_hostname, -) - -LEADER = 'leader' -PEON = 'peon' -QUORUM = [LEADER, PEON] - -PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] - - -def ceph_user(): - if get_version() > 1: - return 'ceph' - else: - return "root" - - -def get_local_mon_ids(): - """ - This will list the /var/lib/ceph/mon/* directories and try - to split the ID off of the directory name and return it in - a list - - :return: list. A list of monitor identifiers :raise: OSError if - something goes wrong with listing the directory. - """ - mon_ids = [] - mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') - if os.path.exists(mon_path): - try: - dirs = os.listdir(mon_path) - for mon_dir in dirs: - # Basically this takes everything after ceph- as the monitor ID - match = re.search('ceph-(?P.*)', mon_dir) - if match: - mon_ids.append(match.group('mon_id')) - except OSError: - raise - return mon_ids - - -def get_version(): - """Derive Ceph release from an installed package.""" - import apt_pkg as apt - - cache = apt_cache() - package = "ceph" - try: - pkg = cache[package] - except: - # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation ' \ - 'candidate: %s' % package - error_out(e) - - if not pkg.current_ver: - # package is known, but no version is currently installed. - e = 'Could not determine version of uninstalled package: %s' % package - error_out(e) - - vers = apt.upstream_version(pkg.current_ver.ver_str) - - # x.y match only for 20XX.X - # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', vers) - - if match: - vers = match.group(0) - return float(vers) - - -def error_out(msg): - log("FATAL ERROR: %s" % msg, - level=ERROR) - sys.exit(1) - - -def is_quorum(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(subprocess.check_output(cmd)) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] in QUORUM: - return True - else: - return False - else: - return False - - -def is_leader(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(subprocess.check_output(cmd)) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] == LEADER: - return True - else: - return False - else: - return False - - -def wait_for_quorum(): - while not is_quorum(): - log("Waiting for quorum to be reached") - time.sleep(3) - - -def add_bootstrap_hint(peer): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "add_bootstrap_peer_hint", - peer - ] - if os.path.exists(asok): - # Ignore any errors for this call - subprocess.call(cmd) - - -DISK_FORMATS = [ - 'xfs', - 'ext4', - 'btrfs' -] - - -def is_osd_disk(dev): - try: - info = subprocess.check_output(['sgdisk', '-i', '1', dev]) - info = info.split("\n") # IGNORE:E1103 - for line in info: - if line.startswith( - 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' - ): - return True - except subprocess.CalledProcessError: - pass - return False - - -def start_osds(devices): - # Scan for ceph block devices - rescan_osd_devices() - if cmp_pkgrevno('ceph', "0.56.6") >= 0: - # Use ceph-disk activate for directory based OSD's - for dev_or_path in devices: - if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) - - -def rescan_osd_devices(): - cmd = [ - 'udevadm', 'trigger', - '--subsystem-match=block', '--action=add' - ] - - subprocess.call(cmd) - - -_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" - - -def is_bootstrapped(): - return os.path.exists(_bootstrap_keyring) - - -def wait_for_bootstrap(): - while not is_bootstrapped(): - time.sleep(3) - - -def import_osd_bootstrap_key(key): - if not os.path.exists(_bootstrap_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _bootstrap_keyring, - '--create-keyring', - '--name=client.bootstrap-osd', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - -def generate_monitor_secret(): - cmd = [ - 'ceph-authtool', - '/dev/stdout', - '--name=mon.', - '--gen-key' - ] - res = subprocess.check_output(cmd) - - return "{}==".format(res.split('=')[1].strip()) - -# OSD caps taken from ceph-create-keys -_osd_bootstrap_caps = { - 'mon': [ - 'allow command osd create ...', - 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', - 'allow command mon getmap' - ] -} - -_osd_bootstrap_caps_profile = { - 'mon': [ - 'allow profile bootstrap-osd' - ] -} - - -def parse_key(raw_key): - # get-or-create appears to have different output depending - # on whether its 'get' or 'create' - # 'create' just returns the key, 'get' is more verbose and - # needs parsing - key = None - if len(raw_key.splitlines()) == 1: - key = raw_key - else: - for element in raw_key.splitlines(): - if 'key' in element: - key = element.split(' = ')[1].strip() # IGNORE:E1103 - return key - - -def get_osd_bootstrap_key(): - try: - # Attempt to get/create a key using the OSD bootstrap profile first - key = get_named_key('bootstrap-osd', - _osd_bootstrap_caps_profile) - except: - # If that fails try with the older style permissions - key = get_named_key('bootstrap-osd', - _osd_bootstrap_caps) - return key - - -_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" - - -def import_radosgw_key(key): - if not os.path.exists(_radosgw_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _radosgw_keyring, - '--create-keyring', - '--name=client.radosgw.gateway', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - -# OSD caps taken from ceph-create-keys -_radosgw_caps = { - 'mon': ['allow rw'], - 'osd': ['allow rwx'] -} -_upgrade_caps = { - 'mon': ['allow rwx'] -} - - -def get_radosgw_key(): - return get_named_key('radosgw.gateway', _radosgw_caps) - - -_default_caps = { - 'mon': ['allow rw'], - 'osd': ['allow rwx'] -} - -admin_caps = { - 'mds': ['allow'], - 'mon': ['allow *'], - 'osd': ['allow *'] -} - -osd_upgrade_caps = { - 'mon': ['allow command "config-key"', - 'allow command "osd tree"', - 'allow command "config-key list"', - 'allow command "config-key put"', - 'allow command "config-key get"', - 'allow command "config-key exists"', - 'allow command "osd out"', - 'allow command "osd in"', - 'allow command "osd rm"', - 'allow command "auth del"', - ] -} - - -def get_upgrade_key(): - return get_named_key('upgrade-osd', _upgrade_caps) - - -def get_named_key(name, caps=None): - caps = caps or _default_caps - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - get_unit_hostname() - ), - 'auth', 'get-or-create', 'client.{}'.format(name), - ] - # Add capabilities - for subsystem, subcaps in caps.iteritems(): - cmd.extend([ - subsystem, - '; '.join(subcaps), - ]) - return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 - - -def upgrade_key_caps(key, caps): - """ Upgrade key to have capabilities caps """ - if not is_leader(): - # Not the MON leader OR not clustered - return - cmd = [ - "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key - ] - for subsystem, subcaps in caps.iteritems(): - cmd.extend([subsystem, '; '.join(subcaps)]) - subprocess.check_call(cmd) - - -@cached -def systemd(): - return (lsb_release()['DISTRIB_CODENAME'] >= 'vivid') - - -def bootstrap_monitor_cluster(secret): - hostname = get_unit_hostname() - path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - done = '{}/done'.format(path) - if systemd(): - init_marker = '{}/systemd'.format(path) - else: - init_marker = '{}/upstart'.format(path) - - keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) - - if os.path.exists(done): - log('bootstrap_monitor_cluster: mon already initialized.') - else: - # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', owner=ceph_user(), - group=ceph_user(), perms=0o755) - mkdir(path, owner=ceph_user(), group=ceph_user()) - # end changes for Ceph >= 0.61.3 - try: - subprocess.check_call(['ceph-authtool', keyring, - '--create-keyring', '--name=mon.', - '--add-key={}'.format(secret), - '--cap', 'mon', 'allow *']) - - subprocess.check_call(['ceph-mon', '--mkfs', - '-i', hostname, - '--keyring', keyring]) - chownr(path, ceph_user(), ceph_user()) - with open(done, 'w'): - pass - with open(init_marker, 'w'): - pass - - if systemd(): - subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) - service_restart('ceph-mon') - else: - service_restart('ceph-mon-all') - except: - raise - finally: - os.unlink(keyring) - - -def update_monfs(): - hostname = get_unit_hostname() - monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - if systemd(): - init_marker = '{}/systemd'.format(monfs) - else: - init_marker = '{}/upstart'.format(monfs) - if os.path.exists(monfs) and not os.path.exists(init_marker): - # Mark mon as managed by upstart so that - # it gets start correctly on reboots - with open(init_marker, 'w'): - pass - - -def osdize(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False): - if dev.startswith('/dev'): - osdize_dev(dev, osd_format, osd_journal, reformat_osd, ignore_errors) - else: - osdize_dir(dev) - - -def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False): - if not os.path.exists(dev): - log('Path {} does not exist - bailing'.format(dev)) - return - - if not is_block_device(dev): - log('Path {} is not a block device - bailing'.format(dev)) - return - - if is_osd_disk(dev) and not reformat_osd: - log('Looks like {} is already an OSD, skipping.'.format(dev)) - return - - if is_device_mounted(dev): - log('Looks like {} is in use, skipping.'.format(dev)) - return - - status_set('maintenance', 'Initializing device {}'.format(dev)) - cmd = ['ceph-disk', 'prepare'] - # Later versions of ceph support more options - if cmp_pkgrevno('ceph', '0.48.3') >= 0: - if osd_format: - cmd.append('--fs-type') - cmd.append(osd_format) - if reformat_osd: - cmd.append('--zap-disk') - cmd.append(dev) - if osd_journal and os.path.exists(osd_journal): - cmd.append(osd_journal) - else: - # Just provide the device - no other options - # for older versions of ceph - cmd.append(dev) - if reformat_osd: - zap_disk(dev) - - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError as e: - if ignore_errors: - log('Unable to initialize device: {}'.format(dev), WARNING) - else: - log('Unable to initialize device: {}'.format(dev), ERROR) - raise e - - -def osdize_dir(path): - if os.path.exists(os.path.join(path, 'upstart')): - log('Path {} is already configured as an OSD - bailing'.format(path)) - return - - if cmp_pkgrevno('ceph', "0.56.6") < 0: - log('Unable to use directories for OSDs with ceph < 0.56.6', - level=ERROR) - raise - - mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) - chownr('/var/lib/ceph', ceph_user(), ceph_user()) - cmd = [ - 'sudo', '-u', ceph_user(), - 'ceph-disk', - 'prepare', - '--data-dir', - path - ] - subprocess.check_call(cmd) - - -def filesystem_mounted(fs): - return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 6dd61061..45df5bdb 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -22,7 +22,12 @@ import uuid import time -import ceph +sys.path.append('lib') +from ceph.ceph import ceph +from ceph.ceph.ceph_broker import ( + process_requests +) + from charmhelpers.core import host from charmhelpers.core import hookenv from charmhelpers.core.hookenv import ( @@ -76,9 +81,7 @@ get_cluster_addr, assert_charm_supports_ipv6 ) -from ceph_broker import ( - process_requests -) + from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/lib/ceph/ceph/__init__.py b/ceph-mon/lib/ceph/ceph/__init__.py new file mode 100644 index 00000000..9847ec9e --- /dev/null +++ b/ceph-mon/lib/ceph/ceph/__init__.py @@ -0,0 +1 @@ +__author__ = 'chris' diff --git a/ceph-mon/lib/ceph/ceph/ceph.py b/ceph-mon/lib/ceph/ceph/ceph.py new file mode 100644 index 00000000..4b68e039 --- /dev/null +++ b/ceph-mon/lib/ceph/ceph/ceph.py @@ -0,0 +1,1183 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import ctypes +import json +import subprocess +import time +import os +import re +import sys +import errno +import shutil + +from charmhelpers.core import hookenv + +from charmhelpers.core.host import ( + mkdir, + chownr, + service_restart, + lsb_release, + cmp_pkgrevno, service_stop, mounts) +from charmhelpers.core.hookenv import ( + log, + ERROR, + cached, + status_set, + WARNING, DEBUG) +from charmhelpers.core.services import render_template +from charmhelpers.fetch import ( + apt_cache +) + +from charmhelpers.contrib.storage.linux.utils import ( + is_block_device, + zap_disk, + is_device_mounted) +from utils import ( + get_unit_hostname, +) + + +LEADER = 'leader' +PEON = 'peon' +QUORUM = [LEADER, PEON] + +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] + +LinkSpeed = { + "BASE_10": 10, + "BASE_100": 100, + "BASE_1000": 1000, + "GBASE_10": 10000, + "GBASE_40": 40000, + "GBASE_100": 100000, + "UNKNOWN": None +} + +# Mapping of adapter speed to sysctl settings +NETWORK_ADAPTER_SYSCTLS = { + # 10Gb + LinkSpeed["GBASE_10"]: { + 'net.core.rmem_default': 524287, + 'net.core.wmem_default': 524287, + 'net.core.rmem_max': 524287, + 'net.core.wmem_max': 524287, + 'net.core.optmem_max': 524287, + 'net.core.netdev_max_backlog': 300000, + 'net.ipv4.tcp_rmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_wmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_mem': '10000000 10000000 10000000' + }, + # Mellanox 10/40Gb + LinkSpeed["GBASE_40"]: { + 'net.ipv4.tcp_timestamps': 0, + 'net.ipv4.tcp_sack': 1, + 'net.core.netdev_max_backlog': 250000, + 'net.core.rmem_max': 4194304, + 'net.core.wmem_max': 4194304, + 'net.core.rmem_default': 4194304, + 'net.core.wmem_default': 4194304, + 'net.core.optmem_max': 4194304, + 'net.ipv4.tcp_rmem': '4096 87380 4194304', + 'net.ipv4.tcp_wmem': '4096 65536 4194304', + 'net.ipv4.tcp_low_latency': 1, + 'net.ipv4.tcp_adv_win_scale': 1 + } +} + + +def save_sysctls(sysctl_dict, save_location): + """ + Persist the sysctls to the hard drive. + :param sysctl_dict: dict + :param save_location: path to save the settings to + :raise: IOError if anything goes wrong with writing. + """ + try: + # Persist the settings for reboots + with open(save_location, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + except IOError as e: + log("Unable to persist sysctl settings to {}. Error {}".format( + save_location, e.message), level=ERROR) + raise + + +def tune_nic(network_interface): + """ + This will set optimal sysctls for the particular network adapter. + :param network_interface: string The network adapter name. + """ + speed = get_link_speed(network_interface) + if speed in NETWORK_ADAPTER_SYSCTLS: + status_set('maintenance', 'Tuning device {}'.format( + network_interface)) + sysctl_file = os.path.join( + os.sep, + 'etc', + 'sysctl.d', + '51-ceph-osd-charm-{}.conf'.format(network_interface)) + try: + log("Saving sysctl_file: {} values: {}".format( + sysctl_file, NETWORK_ADAPTER_SYSCTLS[speed]), + level=DEBUG) + save_sysctls(sysctl_dict=NETWORK_ADAPTER_SYSCTLS[speed], + save_location=sysctl_file) + except IOError as e: + log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " + "failed. {}".format(network_interface, e.message), + level=ERROR) + + try: + # Apply the settings + log("Applying sysctl settings", level=DEBUG) + subprocess.check_output(["sysctl", "-p", sysctl_file]) + except subprocess.CalledProcessError as err: + log('sysctl -p {} failed with error {}'.format(sysctl_file, + err.output), + level=ERROR) + else: + log("No settings found for network adapter: {}".format( + network_interface), level=DEBUG) + + +def get_link_speed(network_interface): + """ + This will find the link speed for a given network device. Returns None + if an error occurs. + :param network_interface: string The network adapter interface. + :return: LinkSpeed + """ + speed_path = os.path.join(os.sep, 'sys', 'class', 'net', + network_interface, 'speed') + # I'm not sure where else we'd check if this doesn't exist + if not os.path.exists(speed_path): + return LinkSpeed["UNKNOWN"] + + try: + with open(speed_path, 'r') as sysfs: + nic_speed = sysfs.readlines() + + # Did we actually read anything? + if not nic_speed: + return LinkSpeed["UNKNOWN"] + + # Try to find a sysctl match for this particular speed + for name, speed in LinkSpeed.items(): + if speed == int(nic_speed[0].strip()): + return speed + # Default to UNKNOWN if we can't find a match + return LinkSpeed["UNKNOWN"] + except IOError as e: + log("Unable to open {path} because of error: {error}".format( + path=speed_path, + error=e.message), level='error') + return LinkSpeed["UNKNOWN"] + + +def persist_settings(settings_dict): + # Write all settings to /etc/hdparm.conf + """ + This will persist the hard drive settings to the /etc/hdparm.conf file + The settings_dict should be in the form of {"uuid": {"key":"value"}} + :param settings_dict: dict of settings to save + """ + hdparm_path = os.path.join(os.sep, 'etc', 'hdparm.conf') + try: + with open(hdparm_path, 'w') as hdparm: + hdparm.write(render_template('hdparm.conf', settings_dict)) + except IOError as err: + log("Unable to open {path} because of error: {error}".format( + path=hdparm_path, + error=err.message), level=ERROR) + + +def set_max_sectors_kb(dev_name, max_sectors_size): + """ + This function sets the max_sectors_kb size of a given block device. + :param dev_name: Name of the block device to query + :param max_sectors_size: int of the max_sectors_size to save + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + try: + with open(max_sectors_kb_path, 'w') as f: + f.write(max_sectors_size) + except IOError as e: + log('Failed to write max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e.message), level=ERROR) + + +def get_max_sectors_kb(dev_name): + """ + This function gets the max_sectors_kb size of a given block device. + :param dev_name: Name of the block device to query + :return: int which is either the max_sectors_kb or 0 on error. + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + + # Read in what Linux has set by default + if os.path.exists(max_sectors_kb_path): + try: + with open(max_sectors_kb_path, 'r') as f: + max_sectors_kb = f.read().strip() + return int(max_sectors_kb) + except IOError as e: + log('Failed to read max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e.message), level=ERROR) + # Bail. + return 0 + return 0 + + +def get_max_hw_sectors_kb(dev_name): + """ + This function gets the max_hw_sectors_kb for a given block device. + :param dev_name: Name of the block device to query + :return: int which is either the max_hw_sectors_kb or 0 on error. + """ + max_hw_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_hw_sectors_kb') + # Read in what the hardware supports + if os.path.exists(max_hw_sectors_kb_path): + try: + with open(max_hw_sectors_kb_path, 'r') as f: + max_hw_sectors_kb = f.read().strip() + return int(max_hw_sectors_kb) + except IOError as e: + log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( + max_hw_sectors_kb_path, e.message), level=ERROR) + return 0 + return 0 + + +def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): + """ + This function sets the hard drive read ahead. + :param dev_name: Name of the block device to set read ahead on. + :param read_ahead_sectors: int How many sectors to read ahead. + """ + try: + # Set the read ahead sectors to 256 + log('Setting read ahead to {} for device {}'.format( + read_ahead_sectors, + dev_name)) + subprocess.check_output(['hdparm', + '-a{}'.format(read_ahead_sectors), + dev_name]) + except subprocess.CalledProcessError as e: + log('hdparm failed with error: {}'.format(e.output), + level=ERROR) + + +def get_block_uuid(block_dev): + """ + This queries blkid to get the uuid for a block device. + :param block_dev: Name of the block device to query. + :return: The UUID of the device or None on Error. + """ + try: + block_info = subprocess.check_output( + ['blkid', '-o', 'export', block_dev]) + for tag in block_info.split('\n'): + parts = tag.split('=') + if parts[0] == 'UUID': + return parts[1] + return None + except subprocess.CalledProcessError as err: + log('get_block_uuid failed with error: {}'.format(err.output), + level=ERROR) + return None + + +def check_max_sectors(save_settings_dict, + block_dev, + uuid): + """ + Tune the max_hw_sectors if needed. + make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb or at + least 1MB for spinning disks + If the box has a RAID card with cache this could go much bigger. + :param save_settings_dict: The dict used to persist settings + :param block_dev: A block device name: Example: /dev/sda + :param uuid: The uuid of the block device + """ + dev_name = None + path_parts = os.path.split(block_dev) + if len(path_parts) == 2: + dev_name = path_parts[1] + else: + log('Unable to determine the block device name from path: {}'.format( + block_dev)) + # Play it safe and bail + return + max_sectors_kb = get_max_sectors_kb(dev_name=dev_name) + max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name) + + if max_sectors_kb < max_hw_sectors_kb: + # OK we have a situation where the hardware supports more than Linux is + # currently requesting + config_max_sectors_kb = hookenv.config('max-sectors-kb') + if config_max_sectors_kb < max_hw_sectors_kb: + # Set the max_sectors_kb to the config.yaml value if it is less + # than the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, config_max_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid][ + "read_ahead_sect"] = config_max_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=config_max_sectors_kb) + else: + # Set to the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, max_hw_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=max_hw_sectors_kb) + else: + log('max_sectors_kb match max_hw_sectors_kb. No change needed for ' + 'device: {}'.format(block_dev)) + + +def tune_dev(block_dev): + """ + Try to make some intelligent decisions with HDD tuning. Future work will + include optimizing SSDs. + This function will change the read ahead sectors and the max write + sectors for each block device. + :param block_dev: A block device name: Example: /dev/sda + """ + uuid = get_block_uuid(block_dev) + if uuid is None: + log('block device {} uuid is None. Unable to save to ' + 'hdparm.conf'.format(block_dev), level=DEBUG) + save_settings_dict = {} + log('Tuning device {}'.format(block_dev)) + status_set('maintenance', 'Tuning device {}'.format(block_dev)) + set_hdd_read_ahead(block_dev) + save_settings_dict["drive_settings"] = {} + save_settings_dict["drive_settings"][uuid] = {} + save_settings_dict["drive_settings"][uuid]['read_ahead_sect'] = 256 + + check_max_sectors(block_dev=block_dev, + save_settings_dict=save_settings_dict, + uuid=uuid) + + persist_settings(settings_dict=save_settings_dict) + status_set('maintenance', 'Finished tuning device {}'.format(block_dev)) + + +def ceph_user(): + if get_version() > 1: + return 'ceph' + else: + return "root" + + +class CrushLocation(object): + def __init__(self, + name, + identifier, + host, + rack, + row, + datacenter, + chassis, + root): + self.name = name + self.identifier = identifier + self.host = host + self.rack = rack + self.row = row + self.datacenter = datacenter + self.chassis = chassis + self.root = root + + def __str__(self): + return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ + "chassis :{} root: {}".format(self.name, self.identifier, + self.host, self.rack, self.row, + self.datacenter, self.chassis, + self.root) + + def __eq__(self, other): + return not self.name < other.name and not other.name < self.name + + def __ne__(self, other): + return self.name < other.name or other.name < self.name + + def __gt__(self, other): + return self.name > other.name + + def __ge__(self, other): + return not self.name < other.name + + def __le__(self, other): + return self.name < other.name + + +def get_osd_tree(service): + """ + Returns the current osd map in JSON. + :return: List. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + tree = subprocess.check_output( + ['ceph', '--id', service, + 'osd', 'tree', '--format=json']) + try: + json_tree = json.loads(tree) + crush_list = [] + # Make sure children are present in the json + if not json_tree['nodes']: + return None + child_ids = json_tree['nodes'][0]['children'] + for child in json_tree['nodes']: + if child['id'] in child_ids: + crush_list.append( + CrushLocation( + name=child.get('name'), + identifier=child['id'], + host=child.get('host'), + rack=child.get('rack'), + row=child.get('row'), + datacenter=child.get('datacenter'), + chassis=child.get('chassis'), + root=child.get('root') + ) + ) + return crush_list + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + +def get_local_osd_ids(): + """ + This will list the /var/lib/ceph/osd/* directories and try + to split the ID off of the directory name and return it in + a list + + :return: list. A list of osd identifiers :raise: OSError if + something goes wrong with listing the directory. + """ + osd_ids = [] + osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') + if os.path.exists(osd_path): + try: + dirs = os.listdir(osd_path) + for osd_dir in dirs: + osd_id = osd_dir.split('-')[1] + if _is_int(osd_id): + osd_ids.append(osd_id) + except OSError: + raise + return osd_ids + + +def get_local_mon_ids(): + """ + This will list the /var/lib/ceph/mon/* directories and try + to split the ID off of the directory name and return it in + a list + + :return: list. A list of monitor identifiers :raise: OSError if + something goes wrong with listing the directory. + """ + mon_ids = [] + mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') + if os.path.exists(mon_path): + try: + dirs = os.listdir(mon_path) + for mon_dir in dirs: + # Basically this takes everything after ceph- as the monitor ID + match = re.search('ceph-(?P.*)', mon_dir) + if match: + mon_ids.append(match.group('mon_id')) + except OSError: + raise + return mon_ids + + +def _is_int(v): + """Return True if the object v can be turned into an integer.""" + try: + int(v) + return True + except ValueError: + return False + + +def get_version(): + """Derive Ceph release from an installed package.""" + import apt_pkg as apt + + cache = apt_cache() + package = "ceph" + try: + pkg = cache[package] + except: + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation ' \ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match('^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + return float(vers) + + +def error_out(msg): + log("FATAL ERROR: %s" % msg, + level=ERROR) + sys.exit(1) + + +def is_quorum(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] in QUORUM: + return True + else: + return False + else: + return False + + +def is_leader(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] == LEADER: + return True + else: + return False + else: + return False + + +def wait_for_quorum(): + while not is_quorum(): + log("Waiting for quorum to be reached") + time.sleep(3) + + +def add_bootstrap_hint(peer): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "add_bootstrap_peer_hint", + peer + ] + if os.path.exists(asok): + # Ignore any errors for this call + subprocess.call(cmd) + + +DISK_FORMATS = [ + 'xfs', + 'ext4', + 'btrfs' +] + +CEPH_PARTITIONS = [ + '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data + '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data + '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal +] + + +def umount(mount_point): + """ + This function unmounts a mounted directory forcibly. This will + be used for unmounting broken hard drive mounts which may hang. + If umount returns EBUSY this will lazy unmount. + :param mount_point: str. A String representing the filesystem mount point + :return: int. Returns 0 on success. errno otherwise. + """ + libc_path = ctypes.util.find_library("c") + libc = ctypes.CDLL(libc_path, use_errno=True) + + # First try to umount with MNT_FORCE + ret = libc.umount(mount_point, 1) + if ret < 0: + err = ctypes.get_errno() + if err == errno.EBUSY: + # Detach from try. IE lazy umount + ret = libc.umount(mount_point, 2) + if ret < 0: + err = ctypes.get_errno() + return err + return 0 + else: + return err + return 0 + + +def replace_osd(dead_osd_number, + dead_osd_device, + new_osd_device, + osd_format, + osd_journal, + reformat_osd=False, + ignore_errors=False): + """ + This function will automate the replacement of a failed osd disk as much + as possible. It will revoke the keys for the old osd, remove it from the + crush map and then add a new osd into the cluster. + :param dead_osd_number: The osd number found in ceph osd tree. Example: 99 + :param dead_osd_device: The physical device. Example: /dev/sda + :param osd_format: + :param osd_journal: + :param reformat_osd: + :param ignore_errors: + """ + host_mounts = mounts() + mount_point = None + for mount in host_mounts: + if mount[1] == dead_osd_device: + mount_point = mount[0] + # need to convert dev to osd number + # also need to get the mounted drive so we can tell the admin to + # replace it + try: + # Drop this osd out of the cluster. This will begin a + # rebalance operation + status_set('maintenance', 'Removing osd {}'.format(dead_osd_number)) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'out', + 'osd.{}'.format(dead_osd_number)]) + + # Kill the osd process if it's not already dead + if systemd(): + service_stop('ceph-osd@{}'.format(dead_osd_number)) + else: + subprocess.check_output(['stop', 'ceph-osd', 'id={}'.format( + dead_osd_number)]), + # umount if still mounted + ret = umount(mount_point) + if ret < 0: + raise RuntimeError('umount {} failed with error: {}'.format( + mount_point, os.strerror(ret))) + # Clean up the old mount point + shutil.rmtree(mount_point) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'crush', 'remove', + 'osd.{}'.format(dead_osd_number)]) + # Revoke the OSDs access keys + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'auth', 'del', + 'osd.{}'.format(dead_osd_number)]) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'rm', + 'osd.{}'.format(dead_osd_number)]) + status_set('maintenance', 'Setting up replacement osd {}'.format( + new_osd_device)) + osdize(new_osd_device, + osd_format, + osd_journal, + reformat_osd, + ignore_errors) + except subprocess.CalledProcessError as e: + log('replace_osd failed with error: ' + e.output) + + +def is_osd_disk(dev): + try: + info = subprocess.check_output(['sgdisk', '-i', '1', dev]) + info = info.split("\n") # IGNORE:E1103 + for line in info: + for ptype in CEPH_PARTITIONS: + sig = 'Partition GUID code: {}'.format(ptype) + if line.startswith(sig): + return True + except subprocess.CalledProcessError: + pass + return False + + +def start_osds(devices): + # Scan for ceph block devices + rescan_osd_devices() + if cmp_pkgrevno('ceph', "0.56.6") >= 0: + # Use ceph-disk activate for directory based OSD's + for dev_or_path in devices: + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): + subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) + + +def rescan_osd_devices(): + cmd = [ + 'udevadm', 'trigger', + '--subsystem-match=block', '--action=add' + ] + + subprocess.call(cmd) + + +_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" +_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" + + +def is_bootstrapped(): + return os.path.exists(_bootstrap_keyring) + + +def wait_for_bootstrap(): + while not is_bootstrapped(): + time.sleep(3) + + +def import_osd_bootstrap_key(key): + if not os.path.exists(_bootstrap_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _bootstrap_keyring, + '--create-keyring', + '--name=client.bootstrap-osd', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + + +def import_osd_upgrade_key(key): + if not os.path.exists(_upgrade_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _upgrade_keyring, + '--create-keyring', + '--name=client.osd-upgrade', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + + +def generate_monitor_secret(): + cmd = [ + 'ceph-authtool', + '/dev/stdout', + '--name=mon.', + '--gen-key' + ] + res = subprocess.check_output(cmd) + + return "{}==".format(res.split('=')[1].strip()) + +# OSD caps taken from ceph-create-keys +_osd_bootstrap_caps = { + 'mon': [ + 'allow command osd create ...', + 'allow command osd crush set ...', + r'allow command auth add * osd allow\ * mon allow\ rwx', + 'allow command mon getmap' + ] +} + +_osd_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-osd' + ] +} + + +def parse_key(raw_key): + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(raw_key.splitlines()) == 1: + key = raw_key + else: + for element in raw_key.splitlines(): + if 'key' in element: + key = element.split(' = ')[1].strip() # IGNORE:E1103 + return key + + +def get_osd_bootstrap_key(): + try: + # Attempt to get/create a key using the OSD bootstrap profile first + key = get_named_key('bootstrap-osd', + _osd_bootstrap_caps_profile) + except: + # If that fails try with the older style permissions + key = get_named_key('bootstrap-osd', + _osd_bootstrap_caps) + return key + + +_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" + + +def import_radosgw_key(key): + if not os.path.exists(_radosgw_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _radosgw_keyring, + '--create-keyring', + '--name=client.radosgw.gateway', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + +# OSD caps taken from ceph-create-keys +_radosgw_caps = { + 'mon': ['allow rw'], + 'osd': ['allow rwx'] +} +_upgrade_caps = { + 'mon': ['allow rwx'] +} + + +def get_radosgw_key(): + return get_named_key('radosgw.gateway', _radosgw_caps) + + +_default_caps = { + 'mon': ['allow rw'], + 'osd': ['allow rwx'] +} + +admin_caps = { + 'mds': ['allow'], + 'mon': ['allow *'], + 'osd': ['allow *'] +} + +osd_upgrade_caps = { + 'mon': ['allow command "config-key"', + 'allow command "osd tree"', + 'allow command "config-key list"', + 'allow command "config-key put"', + 'allow command "config-key get"', + 'allow command "config-key exists"', + 'allow command "osd out"', + 'allow command "osd in"', + 'allow command "osd rm"', + 'allow command "auth del"', + ] +} + + +def get_upgrade_key(): + return get_named_key('upgrade-osd', _upgrade_caps) + + +def get_named_key(name, caps=None): + caps = caps or _default_caps + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + get_unit_hostname() + ), + 'auth', 'get-or-create', 'client.{}'.format(name), + ] + # Add capabilities + for subsystem, subcaps in caps.iteritems(): + cmd.extend([ + subsystem, + '; '.join(subcaps), + ]) + return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + + +def upgrade_key_caps(key, caps): + """ Upgrade key to have capabilities caps """ + if not is_leader(): + # Not the MON leader OR not clustered + return + cmd = [ + "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key + ] + for subsystem, subcaps in caps.iteritems(): + cmd.extend([subsystem, '; '.join(subcaps)]) + subprocess.check_call(cmd) + + +@cached +def systemd(): + return (lsb_release()['DISTRIB_CODENAME'] >= 'vivid') + + +def bootstrap_monitor_cluster(secret): + hostname = get_unit_hostname() + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + done = '{}/done'.format(path) + if systemd(): + init_marker = '{}/systemd'.format(path) + else: + init_marker = '{}/upstart'.format(path) + + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) + + if os.path.exists(done): + log('bootstrap_monitor_cluster: mon already initialized.') + else: + # Ceph >= 0.61.3 needs this for ceph-mon fs creation + mkdir('/var/run/ceph', owner=ceph_user(), + group=ceph_user(), perms=0o755) + mkdir(path, owner=ceph_user(), group=ceph_user()) + # end changes for Ceph >= 0.61.3 + try: + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring', '--name=mon.', + '--add-key={}'.format(secret), + '--cap', 'mon', 'allow *']) + + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + chownr(path, ceph_user(), ceph_user()) + with open(done, 'w'): + pass + with open(init_marker, 'w'): + pass + + if systemd(): + subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) + service_restart('ceph-mon') + else: + service_restart('ceph-mon-all') + except: + raise + finally: + os.unlink(keyring) + + +def update_monfs(): + hostname = get_unit_hostname() + monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + if systemd(): + init_marker = '{}/systemd'.format(monfs) + else: + init_marker = '{}/upstart'.format(monfs) + if os.path.exists(monfs) and not os.path.exists(init_marker): + # Mark mon as managed by upstart so that + # it gets start correctly on reboots + with open(init_marker, 'w'): + pass + + +def maybe_zap_journal(journal_dev): + if is_osd_disk(journal_dev): + log('Looks like {} is already an OSD data' + ' or journal, skipping.'.format(journal_dev)) + return + zap_disk(journal_dev) + log("Zapped journal device {}".format(journal_dev)) + + +def get_partitions(dev): + cmd = ['partx', '--raw', '--noheadings', dev] + try: + out = subprocess.check_output(cmd).splitlines() + log("get partitions: {}".format(out), level=DEBUG) + return out + except subprocess.CalledProcessError as e: + log("Can't get info for {0}: {1}".format(dev, e.output)) + return [] + + +def find_least_used_journal(journal_devices): + usages = map(lambda a: (len(get_partitions(a)), a), journal_devices) + least = min(usages, key=lambda t: t[0]) + return least[1] + + +def osdize(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False, encrypt=False): + if dev.startswith('/dev'): + osdize_dev(dev, osd_format, osd_journal, + reformat_osd, ignore_errors, encrypt) + else: + osdize_dir(dev, encrypt) + + +def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False, encrypt=False): + if not os.path.exists(dev): + log('Path {} does not exist - bailing'.format(dev)) + return + + if not is_block_device(dev): + log('Path {} is not a block device - bailing'.format(dev)) + return + + if is_osd_disk(dev) and not reformat_osd: + log('Looks like {} is already an' + ' OSD data or journal, skipping.'.format(dev)) + return + + if is_device_mounted(dev): + log('Looks like {} is in use, skipping.'.format(dev)) + return + + status_set('maintenance', 'Initializing device {}'.format(dev)) + cmd = ['ceph-disk', 'prepare'] + # Later versions of ceph support more options + if cmp_pkgrevno('ceph', '0.60') >= 0: + if encrypt: + cmd.append('--dmcrypt') + if cmp_pkgrevno('ceph', '0.48.3') >= 0: + if osd_format: + cmd.append('--fs-type') + cmd.append(osd_format) + if reformat_osd: + cmd.append('--zap-disk') + cmd.append(dev) + if osd_journal: + least_used = find_least_used_journal(osd_journal) + cmd.append(least_used) + else: + # Just provide the device - no other options + # for older versions of ceph + cmd.append(dev) + if reformat_osd: + zap_disk(dev) + + try: + log("osdize cmd: {}".format(cmd)) + subprocess.check_call(cmd) + except subprocess.CalledProcessError as e: + if ignore_errors: + log('Unable to initialize device: {}'.format(dev), WARNING) + else: + log('Unable to initialize device: {}'.format(dev), ERROR) + raise e + + +def osdize_dir(path, encrypt=False): + if os.path.exists(os.path.join(path, 'upstart')): + log('Path {} is already configured as an OSD - bailing'.format(path)) + return + + if cmp_pkgrevno('ceph', "0.56.6") < 0: + log('Unable to use directories for OSDs with ceph < 0.56.6', + level=ERROR) + raise + + mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) + chownr('/var/lib/ceph', ceph_user(), ceph_user()) + cmd = [ + 'sudo', '-u', ceph_user(), + 'ceph-disk', + 'prepare', + '--data-dir', + path + ] + if cmp_pkgrevno('ceph', '0.60') >= 0: + if encrypt: + cmd.append('--dmcrypt') + log("osdize dir cmd: {}".format(cmd)) + subprocess.check_call(cmd) + + +def filesystem_mounted(fs): + return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 + + +def get_running_osds(): + """Returns a list of the pids of the current running OSD daemons""" + cmd = ['pgrep', 'ceph-osd'] + try: + result = subprocess.check_output(cmd) + return result.split() + except subprocess.CalledProcessError: + return [] diff --git a/ceph-mon/hooks/ceph_broker.py b/ceph-mon/lib/ceph/ceph/ceph_broker.py similarity index 99% rename from ceph-mon/hooks/ceph_broker.py rename to ceph-mon/lib/ceph/ceph/ceph_broker.py index a3b9451d..ab4d8022 100644 --- a/ceph-mon/hooks/ceph_broker.py +++ b/ceph-mon/lib/ceph/ceph/ceph_broker.py @@ -22,7 +22,7 @@ INFO, ERROR, ) -from charmhelpers.contrib.storage.linux.ceph import ( +from charmhelpers.contrib.storage.linux.ceph import ( create_erasure_profile, delete_pool, erasure_profile_exists, diff --git a/ceph-mon/unit_tests/__init__.py b/ceph-mon/unit_tests/__init__.py index b7fe4e1b..8d6182b7 100644 --- a/ceph-mon/unit_tests/__init__.py +++ b/ceph-mon/unit_tests/__init__.py @@ -14,3 +14,4 @@ import sys sys.path.append('hooks') +sys.path.append('lib') diff --git a/ceph-mon/unit_tests/test_ceph_broker.py b/ceph-mon/unit_tests/test_ceph_broker.py deleted file mode 100644 index 81ce8f79..00000000 --- a/ceph-mon/unit_tests/test_ceph_broker.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import mock -import unittest - -import ceph_broker - - -class CephBrokerTestCase(unittest.TestCase): - def setUp(self): - super(CephBrokerTestCase, self).setUp() - - @mock.patch('ceph_broker.log') - def test_process_requests_noop(self, mock_log): - req = json.dumps({'api-version': 1, 'ops': []}) - rc = ceph_broker.process_requests(req) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @mock.patch('ceph_broker.log') - def test_process_requests_missing_api_version(self, mock_log): - req = json.dumps({'ops': []}) - rc = ceph_broker.process_requests(req) - self.assertEqual(json.loads(rc), { - 'exit-code': 1, - 'stderr': 'Missing or invalid api version (None)'}) - - @mock.patch('ceph_broker.log') - def test_process_requests_invalid_api_version(self, mock_log): - req = json.dumps({'api-version': 2, 'ops': []}) - rc = ceph_broker.process_requests(req) - print "Return: %s" % rc - self.assertEqual(json.loads(rc), - {'exit-code': 1, - 'stderr': 'Missing or invalid api version (2)'}) - - @mock.patch('ceph_broker.log') - def test_process_requests_invalid(self, mock_log): - reqs = json.dumps({'api-version': 1, 'ops': [{'op': 'invalid_op'}]}) - rc = ceph_broker.process_requests(reqs) - self.assertEqual(json.loads(rc), - {'exit-code': 1, - 'stderr': "Unknown operation 'invalid_op'"}) - - @mock.patch('ceph_broker.ReplicatedPool') - @mock.patch('ceph_broker.pool_exists') - @mock.patch('ceph_broker.log') - def test_process_requests_create_pool_w_pg_num(self, mock_log, - mock_pool_exists, - mock_replicated_pool): - mock_pool_exists.return_value = False - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'create-pool', - 'name': 'foo', - 'replicas': 3, - 'pg_num': 100}]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_replicated_pool.assert_called_with(service='admin', name='foo', - replicas=3, pg_num=100) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @mock.patch('ceph_broker.ReplicatedPool') - @mock.patch('ceph_broker.pool_exists') - @mock.patch('ceph_broker.log') - def test_process_requests_create_pool_exists(self, mock_log, - mock_pool_exists, - mock_replicated_pool): - mock_pool_exists.return_value = True - reqs = json.dumps({'api-version': 1, - 'ops': [{'op': 'create-pool', - 'name': 'foo', - 'replicas': 3}]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', - name='foo') - self.assertFalse(mock_replicated_pool.create.called) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @mock.patch('ceph_broker.ReplicatedPool') - @mock.patch('ceph_broker.pool_exists') - @mock.patch('ceph_broker.log') - def test_process_requests_create_pool_rid(self, mock_log, - mock_pool_exists, - mock_replicated_pool): - mock_pool_exists.return_value = False - reqs = json.dumps({'api-version': 1, - 'request-id': '1ef5aede', - 'ops': [{ - 'op': 'create-pool', - 'name': 'foo', - 'replicas': 3}]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_replicated_pool.assert_called_with(service='admin', - name='foo', - replicas=3) - self.assertEqual(json.loads(rc)['exit-code'], 0) - self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') - - @mock.patch('ceph_broker.log') - def test_process_requests_invalid_api_rid(self, mock_log): - reqs = json.dumps({'api-version': 0, 'request-id': '1ef5aede', - 'ops': [{'op': 'create-pool'}]}) - rc = ceph_broker.process_requests(reqs) - self.assertEqual(json.loads(rc)['exit-code'], 1) - self.assertEqual(json.loads(rc)['stderr'], - "Missing or invalid api version (0)") - self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') diff --git a/ceph-mon/unit_tests/test_ceph_ops.py b/ceph-mon/unit_tests/test_ceph_ops.py index cf62d5ec..8d24186d 100644 --- a/ceph-mon/unit_tests/test_ceph_ops.py +++ b/ceph-mon/unit_tests/test_ceph_ops.py @@ -20,7 +20,7 @@ patch, ) -from hooks import ceph_broker +from ceph.ceph import ceph_broker class TestCephOps(unittest.TestCase): From ffe5b591e1520b6568ee4041639cf28ba663c68f Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 2 Aug 2016 10:59:04 +0100 Subject: [PATCH 1181/2699] Resync charm helpers for PG calc fixes When the Ceph RADOS Gateway is used with small OSD configurations (3 OSDs), the light pools consume 0.1% of storage by default; this results in PG counts below or equal to 0; enforce a minimum PG count of 2 as specified in the official Ceph PG calculator. Change-Id: I33df3ade8ce85f1928f44d402b8a2f445c479efb Closes-Bug: 1607961 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 26 ++-- .../contrib/openstack/amulet/utils.py | 116 ++++++++++++++++++ .../contrib/storage/linux/ceph.py | 6 + .../contrib/openstack/amulet/utils.py | 116 ++++++++++++++++++ 4 files changed, 256 insertions(+), 8 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 17976fb5..0c39bd91 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -38,6 +38,7 @@ ) from charmhelpers.core.host import service +from charmhelpers.core import host # This module adds compatibility with the nrpe-external-master and plain nrpe # subordinate charms. To use it in your charm: @@ -332,16 +333,25 @@ def add_init_service_checks(nrpe, services, unit_name): :param str unit_name: Unit name to use in check description """ for svc in services: + # Don't add a check for these services from neutron-gateway + if svc in ['ext-port', 'os-charm-phy-nic-mtu']: + next + upstart_init = '/etc/init/%s.conf' % svc sysv_init = '/etc/init.d/%s' % svc - if os.path.exists(upstart_init): - # Don't add a check for these services from neutron-gateway - if svc not in ['ext-port', 'os-charm-phy-nic-mtu']: - nrpe.add_check( - shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_upstart_job %s' % svc - ) + + if host.init_is_systemd(): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_systemd.py %s' % svc + ) + elif os.path.exists(upstart_init): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % svc cron_file = ('*/5 * * * * root ' diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 8040b570..355c8cbb 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -83,6 +83,56 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, if not found: return 'endpoint not found' + def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate keystone v3 endpoint data. + + Validate the v3 endpoint data which has changed from v2. The + ports are used to find the matching endpoint. + + The new v3 endpoint data looks like: + + ['}, + region=RegionOne, + region_id=RegionOne, + service_id=17f842a0dc084b928e476fafe67e4095, + url=http://10.5.6.5:9312>, + '}, + region=RegionOne, + region_id=RegionOne, + service_id=72fc8736fb41435e8b3584205bb2cfa3, + url=http://10.5.6.6:35357/v3>, + ... ] + """ + self.log.debug('Validating v3 endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = [] + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if ((admin_port in ep.url and ep.interface == 'admin') or + (internal_port in ep.url and ep.interface == 'internal') or + (public_port in ep.url and ep.interface == 'public')): + found.append(ep.interface) + # note we ignore the links member. + actual = {'id': ep.id, + 'region': ep.region, + 'region_id': ep.region_id, + 'interface': self.not_null, + 'url': ep.url, + 'service_id': ep.service_id, } + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if len(found) != 3: + return 'Unexpected number of endpoints found' + def validate_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. @@ -100,6 +150,72 @@ def validate_svc_catalog_endpoint_data(self, expected, actual): return "endpoint {} does not exist".format(k) return ret + def validate_v3_svc_catalog_endpoint_data(self, expected, actual): + """Validate the keystone v3 catalog endpoint data. + + Validate a list of dictinaries that make up the keystone v3 service + catalogue. + + It is in the form of: + + + {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:35357/v3'}, + {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}, + {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}], + u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'f629388955bc407f8b11d8b7ca168086', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9312'}]} + + Note, that an added complication is that the order of admin, public, + internal against 'interface' in each region. + + Thus, the function sorts the expected and actual lists using the + interface key as a sort key, prior to the comparison. + """ + self.log.debug('Validating v3 service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + l_expected = sorted(v, key=lambda x: x['interface']) + l_actual = sorted(actual[k], key=lambda x: x['interface']) + if len(l_actual) != len(l_expected): + return ("endpoint {} has differing number of interfaces " + " - expected({}), actual({})" + .format(k, len(l_expected), len(l_actual))) + for i_expected, i_actual in zip(l_expected, l_actual): + self.log.debug("checking interface {}" + .format(i_expected['interface'])) + ret = self._validate_dict_data(i_expected, i_actual) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + def validate_tenant_data(self, expected, actual): """Validate tenant data. diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index beff2703..edb536c7 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -87,6 +87,7 @@ DEFAULT_PGS_PER_OSD_TARGET = 100 DEFAULT_POOL_WEIGHT = 10.0 LEGACY_PG_COUNT = 200 +DEFAULT_MINIMUM_PGS = 2 def validator(value, valid_type, valid_range=None): @@ -266,6 +267,11 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size + # NOTE: ensure a sane minimum number of PGS otherwise we don't get any + # reasonable data distribution in minimal OSD configurations + if num_pg < DEFAULT_MINIMUM_PGS: + num_pg = DEFAULT_MINIMUM_PGS + # The CRUSH algorithm has a slight optimization for placement groups # with powers of 2 so find the nearest power of 2. If the nearest # power of 2 is more than 25% below the original value, the next diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 8040b570..355c8cbb 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -83,6 +83,56 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, if not found: return 'endpoint not found' + def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate keystone v3 endpoint data. + + Validate the v3 endpoint data which has changed from v2. The + ports are used to find the matching endpoint. + + The new v3 endpoint data looks like: + + ['}, + region=RegionOne, + region_id=RegionOne, + service_id=17f842a0dc084b928e476fafe67e4095, + url=http://10.5.6.5:9312>, + '}, + region=RegionOne, + region_id=RegionOne, + service_id=72fc8736fb41435e8b3584205bb2cfa3, + url=http://10.5.6.6:35357/v3>, + ... ] + """ + self.log.debug('Validating v3 endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = [] + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if ((admin_port in ep.url and ep.interface == 'admin') or + (internal_port in ep.url and ep.interface == 'internal') or + (public_port in ep.url and ep.interface == 'public')): + found.append(ep.interface) + # note we ignore the links member. + actual = {'id': ep.id, + 'region': ep.region, + 'region_id': ep.region_id, + 'interface': self.not_null, + 'url': ep.url, + 'service_id': ep.service_id, } + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if len(found) != 3: + return 'Unexpected number of endpoints found' + def validate_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. @@ -100,6 +150,72 @@ def validate_svc_catalog_endpoint_data(self, expected, actual): return "endpoint {} does not exist".format(k) return ret + def validate_v3_svc_catalog_endpoint_data(self, expected, actual): + """Validate the keystone v3 catalog endpoint data. + + Validate a list of dictinaries that make up the keystone v3 service + catalogue. + + It is in the form of: + + + {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:35357/v3'}, + {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}, + {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}], + u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'f629388955bc407f8b11d8b7ca168086', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9312'}]} + + Note, that an added complication is that the order of admin, public, + internal against 'interface' in each region. + + Thus, the function sorts the expected and actual lists using the + interface key as a sort key, prior to the comparison. + """ + self.log.debug('Validating v3 service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + l_expected = sorted(v, key=lambda x: x['interface']) + l_actual = sorted(actual[k], key=lambda x: x['interface']) + if len(l_actual) != len(l_expected): + return ("endpoint {} has differing number of interfaces " + " - expected({}), actual({})" + .format(k, len(l_expected), len(l_actual))) + for i_expected, i_actual in zip(l_expected, l_actual): + self.log.debug("checking interface {}" + .format(i_expected['interface'])) + ret = self._validate_dict_data(i_expected, i_actual) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + def validate_tenant_data(self, expected, actual): """Validate tenant data. From 69986c4d2f3ff77c4abcb2e44198e37b03776822 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 1 Aug 2016 16:12:00 -0400 Subject: [PATCH 1182/2699] Migrate to shared lib This change moves our ceph.py into a seperate repository that we can share between various ceph related Juju projects, along with a Makefile change to use a new git_sync file to partially sync a git repository into a specified path Change-Id: Iaf3ea38b6e5268c517d53b36105b70f23de891bb --- ceph-osd/Makefile | 13 +- ceph-osd/actions/__init__.py | 1 + ceph-osd/actions/pause_resume.py | 3 +- ceph-osd/hooks/ceph_hooks.py | 3 +- ceph-osd/lib/ceph/__init__.py | 0 ceph-osd/lib/ceph/ceph/__init__.py | 1 + ceph-osd/{hooks => lib/ceph/ceph}/ceph.py | 113 +++++-- ceph-osd/lib/ceph/ceph/ceph_broker.py | 352 ++++++++++++++++++++++ ceph-osd/unit_tests/__init__.py | 1 + ceph-osd/unit_tests/test_replace_osd.py | 16 +- ceph-osd/unit_tests/test_tuning.py | 30 +- ceph-osd/unit_tests/test_upgrade_roll.py | 2 +- 12 files changed, 488 insertions(+), 47 deletions(-) create mode 100644 ceph-osd/lib/ceph/__init__.py create mode 100644 ceph-osd/lib/ceph/ceph/__init__.py rename ceph-osd/{hooks => lib/ceph/ceph}/ceph.py (93%) create mode 100644 ceph-osd/lib/ceph/ceph/ceph_broker.py diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index 46b94807..8255f93b 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -15,12 +15,21 @@ functional_test: bin/charm_helpers_sync.py: @mkdir -p bin @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ - > bin/charm_helpers_sync.py + > bin/charm_helpers_sync.py -sync: bin/charm_helpers_sync.py +bin/git_sync.py: + @mkdir -p bin + @wget -O bin/git_sync.py https://raw.githubusercontent.com/ChrisMacNaughton/git-sync/master/git_sync.py + +ch-sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml +git-sync: bin/git_sync.py + $(PYTHON) bin/git_sync.py -d lib/ceph -s https://github.com/CanonicalLtd/charms_ceph.git + +sync: git-sync ch-sync + publish: lint bzr push lp:charms/ceph-osd bzr push lp:charms/trusty/ceph-osd diff --git a/ceph-osd/actions/__init__.py b/ceph-osd/actions/__init__.py index b7fe4e1b..8d6182b7 100644 --- a/ceph-osd/actions/__init__.py +++ b/ceph-osd/actions/__init__.py @@ -14,3 +14,4 @@ import sys sys.path.append('hooks') +sys.path.append('lib') diff --git a/ceph-osd/actions/pause_resume.py b/ceph-osd/actions/pause_resume.py index 4d10b759..800c8814 100755 --- a/ceph-osd/actions/pause_resume.py +++ b/ceph-osd/actions/pause_resume.py @@ -20,13 +20,14 @@ import sys from subprocess import check_call +sys.path.append('lib') sys.path.append('hooks') from charmhelpers.core.hookenv import ( action_fail, ) -from ceph import get_local_osd_ids +from ceph.ceph.ceph import get_local_osd_ids from ceph_hooks import assess_status from utils import ( diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index f2b18bd6..81ae5f6b 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -22,7 +22,8 @@ import time import netifaces -import ceph +sys.path.append('lib') +from ceph.ceph import ceph from charmhelpers.core import hookenv from charmhelpers.core.hookenv import ( log, diff --git a/ceph-osd/lib/ceph/__init__.py b/ceph-osd/lib/ceph/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/lib/ceph/ceph/__init__.py b/ceph-osd/lib/ceph/ceph/__init__.py new file mode 100644 index 00000000..9847ec9e --- /dev/null +++ b/ceph-osd/lib/ceph/ceph/__init__.py @@ -0,0 +1 @@ +__author__ = 'chris' diff --git a/ceph-osd/hooks/ceph.py b/ceph-osd/lib/ceph/ceph/ceph.py similarity index 93% rename from ceph-osd/hooks/ceph.py rename to ceph-osd/lib/ceph/ceph/ceph.py index 712ef3f5..4b68e039 100644 --- a/ceph-osd/hooks/ceph.py +++ b/ceph-osd/lib/ceph/ceph/ceph.py @@ -4,52 +4,50 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import ctypes -import ctypes.util -import errno import json import subprocess import time import os import re import sys +import errno import shutil -from charmhelpers.cli.host import mounts + from charmhelpers.core import hookenv + from charmhelpers.core.host import ( mkdir, chownr, - cmp_pkgrevno, + service_restart, lsb_release, - service_stop, - service_restart) + cmp_pkgrevno, service_stop, mounts) from charmhelpers.core.hookenv import ( log, ERROR, - WARNING, - DEBUG, cached, status_set, -) + WARNING, DEBUG) +from charmhelpers.core.services import render_template from charmhelpers.fetch import ( apt_cache ) + from charmhelpers.contrib.storage.linux.utils import ( - zap_disk, is_block_device, - is_device_mounted, -) + zap_disk, + is_device_mounted) from utils import ( get_unit_hostname, - render_template) +) + LEADER = 'leader' PEON = 'peon' @@ -499,6 +497,30 @@ def get_local_osd_ids(): return osd_ids +def get_local_mon_ids(): + """ + This will list the /var/lib/ceph/mon/* directories and try + to split the ID off of the directory name and return it in + a list + + :return: list. A list of monitor identifiers :raise: OSError if + something goes wrong with listing the directory. + """ + mon_ids = [] + mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') + if os.path.exists(mon_path): + try: + dirs = os.listdir(mon_path) + for mon_dir in dirs: + # Basically this takes everything after ceph- as the monitor ID + match = re.search('ceph-(?P.*)', mon_dir) + if match: + mon_ids.append(match.group('mon_id')) + except OSError: + raise + return mon_ids + + def _is_int(v): """Return True if the object v can be turned into an integer.""" try: @@ -509,7 +531,7 @@ def _is_int(v): def get_version(): - '''Derive Ceph release from an installed package.''' + """Derive Ceph release from an installed package.""" import apt_pkg as apt cache = apt_cache() @@ -600,6 +622,7 @@ def is_leader(): def wait_for_quorum(): while not is_quorum(): + log("Waiting for quorum to be reached") time.sleep(3) @@ -782,7 +805,7 @@ def is_bootstrapped(): def wait_for_bootstrap(): - while (not is_bootstrapped()): + while not is_bootstrapped(): time.sleep(3) @@ -815,6 +838,18 @@ def import_osd_upgrade_key(key): ] subprocess.check_call(cmd) + +def generate_monitor_secret(): + cmd = [ + 'ceph-authtool', + '/dev/stdout', + '--name=mon.', + '--gen-key' + ] + res = subprocess.check_output(cmd) + + return "{}==".format(res.split('=')[1].strip()) + # OSD caps taken from ceph-create-keys _osd_bootstrap_caps = { 'mon': [ @@ -878,9 +913,12 @@ def import_radosgw_key(key): # OSD caps taken from ceph-create-keys _radosgw_caps = { - 'mon': ['allow r'], + 'mon': ['allow rw'], 'osd': ['allow rwx'] } +_upgrade_caps = { + 'mon': ['allow rwx'] +} def get_radosgw_key(): @@ -888,10 +926,34 @@ def get_radosgw_key(): _default_caps = { - 'mon': ['allow r'], + 'mon': ['allow rw'], 'osd': ['allow rwx'] } +admin_caps = { + 'mds': ['allow'], + 'mon': ['allow *'], + 'osd': ['allow *'] +} + +osd_upgrade_caps = { + 'mon': ['allow command "config-key"', + 'allow command "osd tree"', + 'allow command "config-key list"', + 'allow command "config-key put"', + 'allow command "config-key get"', + 'allow command "config-key exists"', + 'allow command "osd out"', + 'allow command "osd in"', + 'allow command "osd rm"', + 'allow command "auth del"', + ] +} + + +def get_upgrade_key(): + return get_named_key('upgrade-osd', _upgrade_caps) + def get_named_key(name, caps=None): caps = caps or _default_caps @@ -916,6 +978,19 @@ def get_named_key(name, caps=None): return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 +def upgrade_key_caps(key, caps): + """ Upgrade key to have capabilities caps """ + if not is_leader(): + # Not the MON leader OR not clustered + return + cmd = [ + "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key + ] + for subsystem, subcaps in caps.iteritems(): + cmd.extend([subsystem, '; '.join(subcaps)]) + subprocess.check_call(cmd) + + @cached def systemd(): return (lsb_release()['DISTRIB_CODENAME'] >= 'vivid') diff --git a/ceph-osd/lib/ceph/ceph/ceph_broker.py b/ceph-osd/lib/ceph/ceph/ceph_broker.py new file mode 100644 index 00000000..da6c3424 --- /dev/null +++ b/ceph-osd/lib/ceph/ceph/ceph_broker.py @@ -0,0 +1,352 @@ +#!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + ERROR, +) +from charmhelpers.contrib.storage.linux.ceph import ( + create_erasure_profile, + delete_pool, + erasure_profile_exists, + get_osds, + pool_exists, + pool_set, + remove_pool_snapshot, + rename_pool, + set_pool_quota, + snapshot_pool, + validator, + ErasurePool, + Pool, + ReplicatedPool, +) + +# This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ +# This should do a decent job of preventing people from passing in bad values. +# It will give a useful error message +POOL_KEYS = { + # "Ceph Key Name": [Python type, [Valid Range]] + "size": [int], + "min_size": [int], + "crash_replay_interval": [int], + "pgp_num": [int], # = or < pg_num + "crush_ruleset": [int], + "hashpspool": [bool], + "nodelete": [bool], + "nopgchange": [bool], + "nosizechange": [bool], + "write_fadvise_dontneed": [bool], + "noscrub": [bool], + "nodeep-scrub": [bool], + "hit_set_type": [basestring, ["bloom", "explicit_hash", + "explicit_object"]], + "hit_set_count": [int, [1, 1]], + "hit_set_period": [int], + "hit_set_fpp": [float, [0.0, 1.0]], + "cache_target_dirty_ratio": [float], + "cache_target_dirty_high_ratio": [float], + "cache_target_full_ratio": [float], + "target_max_bytes": [int], + "target_max_objects": [int], + "cache_min_flush_age": [int], + "cache_min_evict_age": [int], + "fast_read": [bool], +} + +CEPH_BUCKET_TYPES = [ + 'osd', + 'host', + 'chassis', + 'rack', + 'row', + 'pdu', + 'pod', + 'room', + 'datacenter', + 'region', + 'root' +] + + +def decode_req_encode_rsp(f): + """Decorator to decode incoming requests and encode responses.""" + + def decode_inner(req): + return json.dumps(f(json.loads(req))) + + return decode_inner + + +@decode_req_encode_rsp +def process_requests(reqs): + """Process Ceph broker request(s). + + This is a versioned api. API version must be supplied by the client making + the request. + """ + request_id = reqs.get('request-id') + try: + version = reqs.get('api-version') + if version == 1: + log('Processing request {}'.format(request_id), level=DEBUG) + resp = process_requests_v1(reqs['ops']) + if request_id: + resp['request-id'] = request_id + + return resp + + except Exception as exc: + log(str(exc), level=ERROR) + msg = ("Unexpected error occurred while processing requests: %s" % + reqs) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + msg = ("Missing or invalid api version (%s)" % version) + resp = {'exit-code': 1, 'stderr': msg} + if request_id: + resp['request-id'] = request_id + + return resp + + +def handle_create_erasure_profile(request, service): + # "local" | "shec" or it defaults to "jerasure" + erasure_type = request.get('erasure-type') + # "host" | "rack" or it defaults to "host" # Any valid Ceph bucket + failure_domain = request.get('failure-domain') + name = request.get('name') + k = request.get('k') + m = request.get('m') + l = request.get('l') + + if failure_domain not in CEPH_BUCKET_TYPES: + msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + create_erasure_profile(service=service, erasure_plugin_name=erasure_type, + profile_name=name, failure_domain=failure_domain, + data_chunks=k, coding_chunks=m, locality=l) + + +def handle_erasure_pool(request, service): + pool_name = request.get('name') + erasure_profile = request.get('erasure-profile') + quota = request.get('max-bytes') + weight = request.get('weight') + + if erasure_profile is None: + erasure_profile = "default-canonical" + + # Check for missing params + if pool_name is None: + msg = "Missing parameter. name is required for the pool" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds + if not erasure_profile_exists(service=service, name=erasure_profile): + # TODO: Fail and tell them to create the profile or default + msg = ("erasure-profile {} does not exist. Please create it with: " + "create-erasure-profile".format(erasure_profile)) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + pool = ErasurePool(service=service, name=pool_name, + erasure_code_profile=erasure_profile, + percent_data=weight) + # Ok make the erasure pool + if not pool_exists(service=service, name=pool_name): + log("Creating pool '%s' (erasure_profile=%s)" % (pool.name, + erasure_profile), + level=INFO) + pool.create() + + # Set a quota if requested + if quota is not None: + set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + + +def handle_replicated_pool(request, service): + pool_name = request.get('name') + replicas = request.get('replicas') + quota = request.get('max-bytes') + weight = request.get('weight') + + # Optional params + pg_num = request.get('pg_num') + if pg_num: + # Cap pg_num to max allowed just in case. + osds = get_osds(service) + if osds: + pg_num = min(pg_num, (len(osds) * 100 // replicas)) + + # Check for missing params + if pool_name is None or replicas is None: + msg = "Missing parameter. name and replicas are required" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + kwargs = {} + if pg_num: + kwargs['pg_num'] = pg_num + if weight: + kwargs['percent_data'] = weight + if replicas: + kwargs['replicas'] = replicas + + pool = ReplicatedPool(service=service, + name=pool_name, **kwargs) + if not pool_exists(service=service, name=pool_name): + log("Creating pool '%s' (replicas=%s)" % (pool.name, replicas), + level=INFO) + pool.create() + else: + log("Pool '%s' already exists - skipping create" % pool.name, + level=DEBUG) + + # Set a quota if requested + if quota is not None: + set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + + +def handle_create_cache_tier(request, service): + # mode = "writeback" | "readonly" + storage_pool = request.get('cold-pool') + cache_pool = request.get('hot-pool') + cache_mode = request.get('mode') + + if cache_mode is None: + cache_mode = "writeback" + + # cache and storage pool must exist first + if not pool_exists(service=service, name=storage_pool) or not pool_exists( + service=service, name=cache_pool): + msg = ("cold-pool: {} and hot-pool: {} must exist. Please create " + "them first".format(storage_pool, cache_pool)) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + p = Pool(service=service, name=storage_pool) + p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) + + +def handle_remove_cache_tier(request, service): + storage_pool = request.get('cold-pool') + cache_pool = request.get('hot-pool') + # cache and storage pool must exist first + if not pool_exists(service=service, name=storage_pool) or not pool_exists( + service=service, name=cache_pool): + msg = ("cold-pool: {} or hot-pool: {} doesn't exist. Not " + "deleting cache tier".format(storage_pool, cache_pool)) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + pool = Pool(name=storage_pool, service=service) + pool.remove_cache_tier(cache_pool=cache_pool) + + +def handle_set_pool_value(request, service): + # Set arbitrary pool values + params = {'pool': request.get('name'), + 'key': request.get('key'), + 'value': request.get('value')} + if params['key'] not in POOL_KEYS: + msg = "Invalid key '%s'" % params['key'] + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Get the validation method + validator_params = POOL_KEYS[params['key']] + if len(validator_params) is 1: + # Validate that what the user passed is actually legal per Ceph's rules + validator(params['value'], validator_params[0]) + else: + # Validate that what the user passed is actually legal per Ceph's rules + validator(params['value'], validator_params[0], validator_params[1]) + + # Set the value + pool_set(service=service, pool_name=params['pool'], key=params['key'], + value=params['value']) + + +def process_requests_v1(reqs): + """Process v1 requests. + + Takes a list of requests (dicts) and processes each one. If an error is + found, processing stops and the client is notified in the response. + + Returns a response dict containing the exit code (non-zero if any + operation failed along with an explanation). + """ + ret = None + log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) + for req in reqs: + op = req.get('op') + log("Processing op='%s'" % op, level=DEBUG) + # Use admin client since we do not have other client key locations + # setup to use them for these operations. + svc = 'admin' + if op == "create-pool": + pool_type = req.get('pool-type') # "replicated" | "erasure" + + # Default to replicated if pool_type isn't given + if pool_type == 'erasure': + ret = handle_erasure_pool(request=req, service=svc) + else: + ret = handle_replicated_pool(request=req, service=svc) + + elif op == "create-cache-tier": + ret = handle_create_cache_tier(request=req, service=svc) + elif op == "remove-cache-tier": + ret = handle_remove_cache_tier(request=req, service=svc) + elif op == "create-erasure-profile": + ret = handle_create_erasure_profile(request=req, service=svc) + elif op == "delete-pool": + pool = req.get('name') + ret = delete_pool(service=svc, name=pool) + elif op == "rename-pool": + old_name = req.get('name') + new_name = req.get('new-name') + ret = rename_pool(service=svc, old_name=old_name, + new_name=new_name) + elif op == "snapshot-pool": + pool = req.get('name') + snapshot_name = req.get('snapshot-name') + ret = snapshot_pool(service=svc, pool_name=pool, + snapshot_name=snapshot_name) + elif op == "remove-pool-snapshot": + pool = req.get('name') + snapshot_name = req.get('snapshot-name') + ret = remove_pool_snapshot(service=svc, pool_name=pool, + snapshot_name=snapshot_name) + elif op == "set-pool-value": + ret = handle_set_pool_value(request=req, service=svc) + else: + msg = "Unknown operation '%s'" % op + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + if type(ret) == dict and 'exit-code' in ret: + return ret + + return {'exit-code': 0} diff --git a/ceph-osd/unit_tests/__init__.py b/ceph-osd/unit_tests/__init__.py index d7a4ace1..84f643d0 100644 --- a/ceph-osd/unit_tests/__init__.py +++ b/ceph-osd/unit_tests/__init__.py @@ -14,4 +14,5 @@ import sys sys.path.append('hooks') +sys.path.append('lib') sys.path.append('actions') diff --git a/ceph-osd/unit_tests/test_replace_osd.py b/ceph-osd/unit_tests/test_replace_osd.py index ce919382..53109403 100644 --- a/ceph-osd/unit_tests/test_replace_osd.py +++ b/ceph-osd/unit_tests/test_replace_osd.py @@ -18,7 +18,7 @@ from mock import call, Mock, patch import test_utils -import ceph +from ceph.ceph import ceph import replace_osd TO_PATCH = [ @@ -73,13 +73,13 @@ def test_umount(self): ]) assert ret == 0 - @patch('ceph.mounts') - @patch('ceph.subprocess') - @patch('ceph.umount') - @patch('ceph.osdize') - @patch('ceph.shutil') - @patch('ceph.systemd') - @patch('ceph.ceph_user') + @patch('ceph.ceph.ceph.mounts') + @patch('ceph.ceph.ceph.subprocess') + @patch('ceph.ceph.ceph.umount') + @patch('ceph.ceph.ceph.osdize') + @patch('ceph.ceph.ceph.shutil') + @patch('ceph.ceph.ceph.systemd') + @patch('ceph.ceph.ceph.ceph_user') def test_replace_osd(self, ceph_user, systemd, diff --git a/ceph-osd/unit_tests/test_tuning.py b/ceph-osd/unit_tests/test_tuning.py index 61a69443..84358e53 100644 --- a/ceph-osd/unit_tests/test_tuning.py +++ b/ceph-osd/unit_tests/test_tuning.py @@ -1,7 +1,7 @@ __author__ = 'Chris Holcombe ' from mock import patch, call import test_utils -import ceph +from ceph.ceph import ceph TO_PATCH = [ 'hookenv', @@ -16,8 +16,8 @@ def setUp(self): super(PerformanceTestCase, self).setUp(ceph, TO_PATCH) def test_tune_nic(self): - with patch('ceph.get_link_speed', return_value=10000): - with patch('ceph.save_sysctls') as save_sysctls: + with patch('ceph.ceph.ceph.get_link_speed', return_value=10000): + with patch('ceph.ceph.ceph.save_sysctls') as save_sysctls: ceph.tune_nic('eth0') save_sysctls.assert_has_calls( [ @@ -49,12 +49,12 @@ def test_get_block_uuid(self): uuid = ceph.get_block_uuid('/dev/sda1') self.assertEqual(uuid, '378f3c86-b21a-4172-832d-e2b3d4bc7511') - @patch('ceph.persist_settings') - @patch('ceph.set_hdd_read_ahead') - @patch('ceph.get_max_sectors_kb') - @patch('ceph.get_max_hw_sectors_kb') - @patch('ceph.set_max_sectors_kb') - @patch('ceph.get_block_uuid') + @patch('ceph.ceph.ceph.persist_settings') + @patch('ceph.ceph.ceph.set_hdd_read_ahead') + @patch('ceph.ceph.ceph.get_max_sectors_kb') + @patch('ceph.ceph.ceph.get_max_hw_sectors_kb') + @patch('ceph.ceph.ceph.set_max_sectors_kb') + @patch('ceph.ceph.ceph.get_block_uuid') def test_tune_dev(self, block_uuid, set_max_sectors_kb, @@ -84,12 +84,12 @@ def test_tune_dev(self, call('maintenance', 'Finished tuning device /dev/sda') ]) - @patch('ceph.persist_settings') - @patch('ceph.set_hdd_read_ahead') - @patch('ceph.get_max_sectors_kb') - @patch('ceph.get_max_hw_sectors_kb') - @patch('ceph.set_max_sectors_kb') - @patch('ceph.get_block_uuid') + @patch('ceph.ceph.ceph.persist_settings') + @patch('ceph.ceph.ceph.set_hdd_read_ahead') + @patch('ceph.ceph.ceph.get_max_sectors_kb') + @patch('ceph.ceph.ceph.get_max_hw_sectors_kb') + @patch('ceph.ceph.ceph.set_max_sectors_kb') + @patch('ceph.ceph.ceph.get_block_uuid') def test_tune_dev_2(self, block_uuid, set_max_sectors_kb, diff --git a/ceph-osd/unit_tests/test_upgrade_roll.py b/ceph-osd/unit_tests/test_upgrade_roll.py index a3a6f260..7fca5918 100644 --- a/ceph-osd/unit_tests/test_upgrade_roll.py +++ b/ceph-osd/unit_tests/test_upgrade_roll.py @@ -16,7 +16,7 @@ from mock import patch, call, MagicMock -from ceph import CrushLocation +from ceph.ceph.ceph import CrushLocation import test_utils import ceph_hooks From 1d665753ddc07ec0b7c323cbec6c24bc9eccc5ac Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Wed, 3 Aug 2016 09:15:07 -0700 Subject: [PATCH 1183/2699] Add ceph_api --- ceph-fs/.gitignore | 1 + ceph-fs/src/README.ex | 65 --------------------------- ceph-fs/src/README.md | 33 ++++++++++++++ ceph-fs/src/actions.yaml | 57 +++++++++++++++++++++++ ceph-fs/src/config.yaml | 33 +++++++++----- ceph-fs/src/metadata.yaml | 25 +++++------ ceph-fs/src/reactive/charm_ceph_fs.py | 33 ++++++++++++-- ceph-fs/src/wheelhouse.txt | 1 + ceph-fs/tox.ini | 41 +++++++++++++++++ 9 files changed, 196 insertions(+), 93 deletions(-) create mode 100644 ceph-fs/.gitignore delete mode 100644 ceph-fs/src/README.ex create mode 100644 ceph-fs/src/README.md create mode 100644 ceph-fs/src/actions.yaml create mode 100644 ceph-fs/src/wheelhouse.txt diff --git a/ceph-fs/.gitignore b/ceph-fs/.gitignore new file mode 100644 index 00000000..485dee64 --- /dev/null +++ b/ceph-fs/.gitignore @@ -0,0 +1 @@ +.idea diff --git a/ceph-fs/src/README.ex b/ceph-fs/src/README.ex deleted file mode 100644 index b6816b22..00000000 --- a/ceph-fs/src/README.ex +++ /dev/null @@ -1,65 +0,0 @@ -# Overview - -Describe the intended usage of this charm and anything unique about how this -charm relates to others here. - -This README will be displayed in the Charm Store, it should be either Markdown -or RST. Ideal READMEs include instructions on how to use the charm, expected -usage, and charm features that your audience might be interested in. For an -example of a well written README check out Hadoop: -http://jujucharms.com/charms/precise/hadoop - -Use this as a Markdown reference if you need help with the formatting of this -README: http://askubuntu.com/editing-help - -This charm provides [service][]. Add a description here of what the service -itself actually does. - -Also remember to check the [icon guidelines][] so that your charm looks good -in the Juju GUI. - -# Usage - -Step by step instructions on using the charm: - -juju deploy servicename - -and so on. If you're providing a web service or something that the end user -needs to go to, tell them here, especially if you're deploying a service that -might listen to a non-default port. - -You can then browse to http://ip-address to configure the service. - -## Scale out Usage - -If the charm has any recommendations for running at scale, outline them in -examples here. For example if you have a memcached relation that improves -performance, mention it here. - -## Known Limitations and Issues - -This not only helps users but gives people a place to start if they want to help -you add features to your charm. - -# Configuration - -The configuration options will be listed on the charm store, however If you're -making assumptions or opinionated decisions in the charm (like setting a default -administrator password), you should detail that here so the user knows how to -change it immediately, etc. - -# Contact Information - -Though this will be listed in the charm store itself don't assume a user will -know that, so include that information here: - -## Upstream Project Name - - - Upstream website - - Upstream bug tracker - - Upstream mailing list or contact information - - Feel free to add things if it's useful for users - - -[service]: http://example.com -[icon guidelines]: https://jujucharms.com/docs/stable/authors-charm-icon diff --git a/ceph-fs/src/README.md b/ceph-fs/src/README.md new file mode 100644 index 00000000..dcbbee51 --- /dev/null +++ b/ceph-fs/src/README.md @@ -0,0 +1,33 @@ +# CephFS Charm + +# Overview + +Ceph is a distributed storage and network file system designed to provide +excellent performance, reliability, and scalability. + +This charm deploys a Ceph MDS cluster. +juju + +Usage +===== + +Boot things up by using:: + + juju deploy -n 3 --config ceph.yaml ceph-mon + juju deploy -n 3 --config ceph.yaml ceph-osd + +You can then deploy this charm by simply doing:: + + juju deploy -n 3 --config ceph.yaml ceph-fs + juju add-relation ceph-fs ceph-mon + +Once the ceph-mon and osd charms have bootstrapped the cluster, it will notify the ceph-fs charm. + +Contact Information +=================== + +## Ceph + +- [Ceph website](http://ceph.com) +- [Ceph mailing lists](http://ceph.com/resources/mailing-list-irc/) +- [Ceph bug tracker](http://tracker.ceph.com/projects/ceph) \ No newline at end of file diff --git a/ceph-fs/src/actions.yaml b/ceph-fs/src/actions.yaml new file mode 100644 index 00000000..77651dc7 --- /dev/null +++ b/ceph-fs/src/actions.yaml @@ -0,0 +1,57 @@ +get-quota: + description: View quota settings on a directory + params: + max-files: + type: boolean + description: | + The limit of how many files can be written. Use either this or + max-bytes but not both. + max-bytes: + type: integer + description: | + The maximum number of bytes that are allowed to be written. Use + either this or max-files but not both. + directory: + type: string + description: | + The directory to query for quota information. + required: [directory] + additionalProperties: false +remove-quota: + description: Remove a quota on a directory + params: + max-files: + type: boolean + description: | + The limit of how many files can be written. Use either this or + max-bytes but not both. + max-bytes: + type: integer + description: | + The maximum number of bytes that are allowed to be written. Use + either this or max-files but not both. + directory: + type: string + description: | + The directory to remove the quota from. + required: [directory] + additionalProperties: false +set-quota: + description: Create a new quota + params: + max-files: + type: integer + description: | + The limit of how many files can be written. Use either this or + max-bytes but not both. + max-bytes: + type: integer + description: | + The maximum number of bytes that are allowed to be written. Use + either this or max-files but not both. + directory: + type: string + description: | + The directory to apply this quota to. + required: [directory] + additionalProperties: false diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index 49d6f9ea..f0b9fa46 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -1,13 +1,4 @@ options: - loglevel: - default: 1 - type: int - description: Mon and OSD debug level. Max is 20. - use-syslog: - type: boolean - default: False - description: | - If set to True, supporting services will log to syslog. ceph-public-network: type: string default: @@ -25,4 +16,26 @@ options: 192.168.0.0/24) . If multiple networks are to be used, a space-delimited list of a.b.c.d/x - can be provided. \ No newline at end of file + can be provided. + loglevel: + default: 1 + type: int + description: Mon and OSD debug level. Max is 20. + source: + type: string + default: + description: | + Optional configuration to support use of additional sources such as: + - ppa:myteam/ppa + - cloud:trusty-proposed/kilo + - http://my.archive.com/ubuntu main + The last option should be used in conjunction with the key configuration + option. + Note that a minimum ceph version of 0.48.2 is required for use with this + charm which is NOT provided by the packages in the main Ubuntu archive + for precise but is provided in the Ubuntu cloud archive. + use-syslog: + type: boolean + default: False + description: | + If set to True, supporting services will log to syslog. diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 03614784..bbe5656f 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -1,19 +1,16 @@ name: charm-ceph-fs -summary: -maintainer: chris +summary: Highly scalable distributed storage +maintainer: OpenStack Charmers description: | - + Ceph is a distributed storage and network file system designed to provide + excellent performance, reliability, and scalability. tags: - # Replace "misc" with one or more whitelisted tags from this list: - # https://jujucharms.com/docs/stable/authors-charm-metadata + - openstack + - storage + - file-servers - misc subordinate: false -provides: - provides-relation: - interface: interface-name -requires: - requires-relation: - interface: interface-name -peers: - peer-relation: - interface: interface-name +#provides: +#requires: + #ceph-mon-cephfs: + #interface: ceph-mon-cephfs diff --git a/ceph-fs/src/reactive/charm_ceph_fs.py b/ceph-fs/src/reactive/charm_ceph_fs.py index 83c463fe..b661fe75 100644 --- a/ceph-fs/src/reactive/charm_ceph_fs.py +++ b/ceph-fs/src/reactive/charm_ceph_fs.py @@ -1,17 +1,42 @@ -from charms.reactive import when, when_not, set_state +from charms.reactive import when from charmhelpers.core.hookenv import ( config, -) + log, INFO, ERROR) from charmhelpers.contrib.network.ip import ( get_address_in_network ) @when('ceph.installed') -@when('ceph-mon.available') +# @when('ceph-mon.available') def setup_mds(mon): - + log("I'm in setup_mds()") + try: + from rados import Error as RadosError + from ceph_api import ceph_command + except ImportError as err: + log("rados is not installed yet: {}".format(err)) + return + # TODO: Monitor needs a new CephFS relation + # TODO: Update with the conf file location + osd = ceph_command.OsdCommand('/etc/ceph/ceph.conf') + mds = ceph_command.MdsCommand('/etc/ceph/ceph.conf') + + try: + log("Creating cephfs_data pool", level=INFO) + # TODO: Update with better pg values + osd.osd_pool_create('cephfs_data', 256) + + log("Creating cephfs_metadata pool", level=INFO) + # TODO: Update with better pg values + osd.osd_pool_create('cephfs_metadata', 256) + + log("Creating ceph fs", level=INFO) + mds.mds_newfs(metadata='cephfs_metadata', data='cephfs_data', sure=["--yes-i-really-mean-it"]) + except RadosError as err: + log(message='Error: {}'.format(err.message), level=ERROR) + @when('config.changed', 'ceph-mon.available') def config_changed(): diff --git a/ceph-fs/src/wheelhouse.txt b/ceph-fs/src/wheelhouse.txt new file mode 100644 index 00000000..f34b89e7 --- /dev/null +++ b/ceph-fs/src/wheelhouse.txt @@ -0,0 +1 @@ +ceph_api \ No newline at end of file diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index e69de29b..1e21b23c 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -0,0 +1,41 @@ +[tox] +envlist = pep8,py34 +skipsdist = True +skip_missing_interpreters = True + +[testenv] +basepython = python2.7 +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 + TERM=linux + INTERFACE_PATH={toxinidir}/interfaces + LAYER_PATH={toxinidir}/layers + JUJU_REPOSITORY={toxinidir}/build +passenv = http_proxy https_proxy +install_command = + pip install {opts} {packages} +deps = + -r{toxinidir}/requirements.txt + +[testenv:build] +commands = + charm-build --log-level DEBUG -o {toxinidir}/build --name ceph-mon src + +[testenv:venv] +commands = {posargs} + +[testenv:pep8] +commands = flake8 {posargs} reactive lib unit_tests + +[testenv:py34] +basepython = python3.4 +deps = -r{toxinidir}/test-requirements.txt +commands = ostestr -p {toxinidir}/build/trusty/ceph-mon {posargs} + +[testenv:py35] +basepython = python3.5 +deps = -r{toxinidir}/test-requirements.txt +commands = ostestr -p {toxinidir}/build/trusty/ceph-mon {posargs} + +[flake8] +ignore = E402,E226 \ No newline at end of file From 27c42106b4a9f295577dcf2fdf4cb01e53fdca0f Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 11 Aug 2016 08:31:28 -0400 Subject: [PATCH 1184/2699] Clean up dependency chain This includes a resync of charms_ceph to raise the directory one level The charms_ceph change that we're syncing in changes the name of the ceph.py file into the __init__.py file to remove the second level of namespacing Change-Id: I0122752fa2882d98d47e14ac50bfa6d14a08a50c --- ceph-mon/.gitignore | 3 +- ceph-mon/Makefile | 2 +- ceph-mon/hooks/ceph_hooks.py | 4 +- ceph-mon/lib/__init__.py | 0 ceph-mon/lib/ceph/__init__.py | 1183 +++++++++++++++++++ ceph-mon/lib/ceph/ceph/__init__.py | 1 - ceph-mon/lib/ceph/ceph/ceph.py | 1183 ------------------- ceph-mon/lib/ceph/{ceph => }/ceph_broker.py | 10 +- ceph-mon/unit_tests/test_ceph_ops.py | 2 +- 9 files changed, 1198 insertions(+), 1190 deletions(-) create mode 100644 ceph-mon/lib/__init__.py delete mode 100644 ceph-mon/lib/ceph/ceph/__init__.py delete mode 100644 ceph-mon/lib/ceph/ceph/ceph.py rename ceph-mon/lib/ceph/{ceph => }/ceph_broker.py (97%) diff --git a/ceph-mon/.gitignore b/ceph-mon/.gitignore index 7d2fd1fb..db5910ca 100644 --- a/ceph-mon/.gitignore +++ b/ceph-mon/.gitignore @@ -5,4 +5,5 @@ bin .tox *.sw[nop] .idea -*.pyc \ No newline at end of file +*.pyc +func-results.json diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index eb6a2867..21d1af05 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -25,7 +25,7 @@ ch-sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml git-sync: bin/git_sync.py - $(PYTHON) bin/git_sync.py -d lib/ceph -s https://github.com/CanonicalLtd/charms_ceph.git + $(PYTHON) bin/git_sync.py -d lib -s https://github.com/CanonicalLtd/charms_ceph.git sync: git-sync ch-sync diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 45df5bdb..d78d93b8 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -23,8 +23,8 @@ import time sys.path.append('lib') -from ceph.ceph import ceph -from ceph.ceph.ceph_broker import ( +import ceph +from ceph.ceph_broker import ( process_requests ) diff --git a/ceph-mon/lib/__init__.py b/ceph-mon/lib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index e69de29b..4b68e039 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -0,0 +1,1183 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import ctypes +import json +import subprocess +import time +import os +import re +import sys +import errno +import shutil + +from charmhelpers.core import hookenv + +from charmhelpers.core.host import ( + mkdir, + chownr, + service_restart, + lsb_release, + cmp_pkgrevno, service_stop, mounts) +from charmhelpers.core.hookenv import ( + log, + ERROR, + cached, + status_set, + WARNING, DEBUG) +from charmhelpers.core.services import render_template +from charmhelpers.fetch import ( + apt_cache +) + +from charmhelpers.contrib.storage.linux.utils import ( + is_block_device, + zap_disk, + is_device_mounted) +from utils import ( + get_unit_hostname, +) + + +LEADER = 'leader' +PEON = 'peon' +QUORUM = [LEADER, PEON] + +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] + +LinkSpeed = { + "BASE_10": 10, + "BASE_100": 100, + "BASE_1000": 1000, + "GBASE_10": 10000, + "GBASE_40": 40000, + "GBASE_100": 100000, + "UNKNOWN": None +} + +# Mapping of adapter speed to sysctl settings +NETWORK_ADAPTER_SYSCTLS = { + # 10Gb + LinkSpeed["GBASE_10"]: { + 'net.core.rmem_default': 524287, + 'net.core.wmem_default': 524287, + 'net.core.rmem_max': 524287, + 'net.core.wmem_max': 524287, + 'net.core.optmem_max': 524287, + 'net.core.netdev_max_backlog': 300000, + 'net.ipv4.tcp_rmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_wmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_mem': '10000000 10000000 10000000' + }, + # Mellanox 10/40Gb + LinkSpeed["GBASE_40"]: { + 'net.ipv4.tcp_timestamps': 0, + 'net.ipv4.tcp_sack': 1, + 'net.core.netdev_max_backlog': 250000, + 'net.core.rmem_max': 4194304, + 'net.core.wmem_max': 4194304, + 'net.core.rmem_default': 4194304, + 'net.core.wmem_default': 4194304, + 'net.core.optmem_max': 4194304, + 'net.ipv4.tcp_rmem': '4096 87380 4194304', + 'net.ipv4.tcp_wmem': '4096 65536 4194304', + 'net.ipv4.tcp_low_latency': 1, + 'net.ipv4.tcp_adv_win_scale': 1 + } +} + + +def save_sysctls(sysctl_dict, save_location): + """ + Persist the sysctls to the hard drive. + :param sysctl_dict: dict + :param save_location: path to save the settings to + :raise: IOError if anything goes wrong with writing. + """ + try: + # Persist the settings for reboots + with open(save_location, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + except IOError as e: + log("Unable to persist sysctl settings to {}. Error {}".format( + save_location, e.message), level=ERROR) + raise + + +def tune_nic(network_interface): + """ + This will set optimal sysctls for the particular network adapter. + :param network_interface: string The network adapter name. + """ + speed = get_link_speed(network_interface) + if speed in NETWORK_ADAPTER_SYSCTLS: + status_set('maintenance', 'Tuning device {}'.format( + network_interface)) + sysctl_file = os.path.join( + os.sep, + 'etc', + 'sysctl.d', + '51-ceph-osd-charm-{}.conf'.format(network_interface)) + try: + log("Saving sysctl_file: {} values: {}".format( + sysctl_file, NETWORK_ADAPTER_SYSCTLS[speed]), + level=DEBUG) + save_sysctls(sysctl_dict=NETWORK_ADAPTER_SYSCTLS[speed], + save_location=sysctl_file) + except IOError as e: + log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " + "failed. {}".format(network_interface, e.message), + level=ERROR) + + try: + # Apply the settings + log("Applying sysctl settings", level=DEBUG) + subprocess.check_output(["sysctl", "-p", sysctl_file]) + except subprocess.CalledProcessError as err: + log('sysctl -p {} failed with error {}'.format(sysctl_file, + err.output), + level=ERROR) + else: + log("No settings found for network adapter: {}".format( + network_interface), level=DEBUG) + + +def get_link_speed(network_interface): + """ + This will find the link speed for a given network device. Returns None + if an error occurs. + :param network_interface: string The network adapter interface. + :return: LinkSpeed + """ + speed_path = os.path.join(os.sep, 'sys', 'class', 'net', + network_interface, 'speed') + # I'm not sure where else we'd check if this doesn't exist + if not os.path.exists(speed_path): + return LinkSpeed["UNKNOWN"] + + try: + with open(speed_path, 'r') as sysfs: + nic_speed = sysfs.readlines() + + # Did we actually read anything? + if not nic_speed: + return LinkSpeed["UNKNOWN"] + + # Try to find a sysctl match for this particular speed + for name, speed in LinkSpeed.items(): + if speed == int(nic_speed[0].strip()): + return speed + # Default to UNKNOWN if we can't find a match + return LinkSpeed["UNKNOWN"] + except IOError as e: + log("Unable to open {path} because of error: {error}".format( + path=speed_path, + error=e.message), level='error') + return LinkSpeed["UNKNOWN"] + + +def persist_settings(settings_dict): + # Write all settings to /etc/hdparm.conf + """ + This will persist the hard drive settings to the /etc/hdparm.conf file + The settings_dict should be in the form of {"uuid": {"key":"value"}} + :param settings_dict: dict of settings to save + """ + hdparm_path = os.path.join(os.sep, 'etc', 'hdparm.conf') + try: + with open(hdparm_path, 'w') as hdparm: + hdparm.write(render_template('hdparm.conf', settings_dict)) + except IOError as err: + log("Unable to open {path} because of error: {error}".format( + path=hdparm_path, + error=err.message), level=ERROR) + + +def set_max_sectors_kb(dev_name, max_sectors_size): + """ + This function sets the max_sectors_kb size of a given block device. + :param dev_name: Name of the block device to query + :param max_sectors_size: int of the max_sectors_size to save + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + try: + with open(max_sectors_kb_path, 'w') as f: + f.write(max_sectors_size) + except IOError as e: + log('Failed to write max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e.message), level=ERROR) + + +def get_max_sectors_kb(dev_name): + """ + This function gets the max_sectors_kb size of a given block device. + :param dev_name: Name of the block device to query + :return: int which is either the max_sectors_kb or 0 on error. + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + + # Read in what Linux has set by default + if os.path.exists(max_sectors_kb_path): + try: + with open(max_sectors_kb_path, 'r') as f: + max_sectors_kb = f.read().strip() + return int(max_sectors_kb) + except IOError as e: + log('Failed to read max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e.message), level=ERROR) + # Bail. + return 0 + return 0 + + +def get_max_hw_sectors_kb(dev_name): + """ + This function gets the max_hw_sectors_kb for a given block device. + :param dev_name: Name of the block device to query + :return: int which is either the max_hw_sectors_kb or 0 on error. + """ + max_hw_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_hw_sectors_kb') + # Read in what the hardware supports + if os.path.exists(max_hw_sectors_kb_path): + try: + with open(max_hw_sectors_kb_path, 'r') as f: + max_hw_sectors_kb = f.read().strip() + return int(max_hw_sectors_kb) + except IOError as e: + log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( + max_hw_sectors_kb_path, e.message), level=ERROR) + return 0 + return 0 + + +def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): + """ + This function sets the hard drive read ahead. + :param dev_name: Name of the block device to set read ahead on. + :param read_ahead_sectors: int How many sectors to read ahead. + """ + try: + # Set the read ahead sectors to 256 + log('Setting read ahead to {} for device {}'.format( + read_ahead_sectors, + dev_name)) + subprocess.check_output(['hdparm', + '-a{}'.format(read_ahead_sectors), + dev_name]) + except subprocess.CalledProcessError as e: + log('hdparm failed with error: {}'.format(e.output), + level=ERROR) + + +def get_block_uuid(block_dev): + """ + This queries blkid to get the uuid for a block device. + :param block_dev: Name of the block device to query. + :return: The UUID of the device or None on Error. + """ + try: + block_info = subprocess.check_output( + ['blkid', '-o', 'export', block_dev]) + for tag in block_info.split('\n'): + parts = tag.split('=') + if parts[0] == 'UUID': + return parts[1] + return None + except subprocess.CalledProcessError as err: + log('get_block_uuid failed with error: {}'.format(err.output), + level=ERROR) + return None + + +def check_max_sectors(save_settings_dict, + block_dev, + uuid): + """ + Tune the max_hw_sectors if needed. + make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb or at + least 1MB for spinning disks + If the box has a RAID card with cache this could go much bigger. + :param save_settings_dict: The dict used to persist settings + :param block_dev: A block device name: Example: /dev/sda + :param uuid: The uuid of the block device + """ + dev_name = None + path_parts = os.path.split(block_dev) + if len(path_parts) == 2: + dev_name = path_parts[1] + else: + log('Unable to determine the block device name from path: {}'.format( + block_dev)) + # Play it safe and bail + return + max_sectors_kb = get_max_sectors_kb(dev_name=dev_name) + max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name) + + if max_sectors_kb < max_hw_sectors_kb: + # OK we have a situation where the hardware supports more than Linux is + # currently requesting + config_max_sectors_kb = hookenv.config('max-sectors-kb') + if config_max_sectors_kb < max_hw_sectors_kb: + # Set the max_sectors_kb to the config.yaml value if it is less + # than the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, config_max_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid][ + "read_ahead_sect"] = config_max_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=config_max_sectors_kb) + else: + # Set to the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, max_hw_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=max_hw_sectors_kb) + else: + log('max_sectors_kb match max_hw_sectors_kb. No change needed for ' + 'device: {}'.format(block_dev)) + + +def tune_dev(block_dev): + """ + Try to make some intelligent decisions with HDD tuning. Future work will + include optimizing SSDs. + This function will change the read ahead sectors and the max write + sectors for each block device. + :param block_dev: A block device name: Example: /dev/sda + """ + uuid = get_block_uuid(block_dev) + if uuid is None: + log('block device {} uuid is None. Unable to save to ' + 'hdparm.conf'.format(block_dev), level=DEBUG) + save_settings_dict = {} + log('Tuning device {}'.format(block_dev)) + status_set('maintenance', 'Tuning device {}'.format(block_dev)) + set_hdd_read_ahead(block_dev) + save_settings_dict["drive_settings"] = {} + save_settings_dict["drive_settings"][uuid] = {} + save_settings_dict["drive_settings"][uuid]['read_ahead_sect'] = 256 + + check_max_sectors(block_dev=block_dev, + save_settings_dict=save_settings_dict, + uuid=uuid) + + persist_settings(settings_dict=save_settings_dict) + status_set('maintenance', 'Finished tuning device {}'.format(block_dev)) + + +def ceph_user(): + if get_version() > 1: + return 'ceph' + else: + return "root" + + +class CrushLocation(object): + def __init__(self, + name, + identifier, + host, + rack, + row, + datacenter, + chassis, + root): + self.name = name + self.identifier = identifier + self.host = host + self.rack = rack + self.row = row + self.datacenter = datacenter + self.chassis = chassis + self.root = root + + def __str__(self): + return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ + "chassis :{} root: {}".format(self.name, self.identifier, + self.host, self.rack, self.row, + self.datacenter, self.chassis, + self.root) + + def __eq__(self, other): + return not self.name < other.name and not other.name < self.name + + def __ne__(self, other): + return self.name < other.name or other.name < self.name + + def __gt__(self, other): + return self.name > other.name + + def __ge__(self, other): + return not self.name < other.name + + def __le__(self, other): + return self.name < other.name + + +def get_osd_tree(service): + """ + Returns the current osd map in JSON. + :return: List. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + tree = subprocess.check_output( + ['ceph', '--id', service, + 'osd', 'tree', '--format=json']) + try: + json_tree = json.loads(tree) + crush_list = [] + # Make sure children are present in the json + if not json_tree['nodes']: + return None + child_ids = json_tree['nodes'][0]['children'] + for child in json_tree['nodes']: + if child['id'] in child_ids: + crush_list.append( + CrushLocation( + name=child.get('name'), + identifier=child['id'], + host=child.get('host'), + rack=child.get('rack'), + row=child.get('row'), + datacenter=child.get('datacenter'), + chassis=child.get('chassis'), + root=child.get('root') + ) + ) + return crush_list + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + +def get_local_osd_ids(): + """ + This will list the /var/lib/ceph/osd/* directories and try + to split the ID off of the directory name and return it in + a list + + :return: list. A list of osd identifiers :raise: OSError if + something goes wrong with listing the directory. + """ + osd_ids = [] + osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') + if os.path.exists(osd_path): + try: + dirs = os.listdir(osd_path) + for osd_dir in dirs: + osd_id = osd_dir.split('-')[1] + if _is_int(osd_id): + osd_ids.append(osd_id) + except OSError: + raise + return osd_ids + + +def get_local_mon_ids(): + """ + This will list the /var/lib/ceph/mon/* directories and try + to split the ID off of the directory name and return it in + a list + + :return: list. A list of monitor identifiers :raise: OSError if + something goes wrong with listing the directory. + """ + mon_ids = [] + mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') + if os.path.exists(mon_path): + try: + dirs = os.listdir(mon_path) + for mon_dir in dirs: + # Basically this takes everything after ceph- as the monitor ID + match = re.search('ceph-(?P.*)', mon_dir) + if match: + mon_ids.append(match.group('mon_id')) + except OSError: + raise + return mon_ids + + +def _is_int(v): + """Return True if the object v can be turned into an integer.""" + try: + int(v) + return True + except ValueError: + return False + + +def get_version(): + """Derive Ceph release from an installed package.""" + import apt_pkg as apt + + cache = apt_cache() + package = "ceph" + try: + pkg = cache[package] + except: + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation ' \ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match('^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + return float(vers) + + +def error_out(msg): + log("FATAL ERROR: %s" % msg, + level=ERROR) + sys.exit(1) + + +def is_quorum(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] in QUORUM: + return True + else: + return False + else: + return False + + +def is_leader(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] == LEADER: + return True + else: + return False + else: + return False + + +def wait_for_quorum(): + while not is_quorum(): + log("Waiting for quorum to be reached") + time.sleep(3) + + +def add_bootstrap_hint(peer): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "add_bootstrap_peer_hint", + peer + ] + if os.path.exists(asok): + # Ignore any errors for this call + subprocess.call(cmd) + + +DISK_FORMATS = [ + 'xfs', + 'ext4', + 'btrfs' +] + +CEPH_PARTITIONS = [ + '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data + '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data + '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal +] + + +def umount(mount_point): + """ + This function unmounts a mounted directory forcibly. This will + be used for unmounting broken hard drive mounts which may hang. + If umount returns EBUSY this will lazy unmount. + :param mount_point: str. A String representing the filesystem mount point + :return: int. Returns 0 on success. errno otherwise. + """ + libc_path = ctypes.util.find_library("c") + libc = ctypes.CDLL(libc_path, use_errno=True) + + # First try to umount with MNT_FORCE + ret = libc.umount(mount_point, 1) + if ret < 0: + err = ctypes.get_errno() + if err == errno.EBUSY: + # Detach from try. IE lazy umount + ret = libc.umount(mount_point, 2) + if ret < 0: + err = ctypes.get_errno() + return err + return 0 + else: + return err + return 0 + + +def replace_osd(dead_osd_number, + dead_osd_device, + new_osd_device, + osd_format, + osd_journal, + reformat_osd=False, + ignore_errors=False): + """ + This function will automate the replacement of a failed osd disk as much + as possible. It will revoke the keys for the old osd, remove it from the + crush map and then add a new osd into the cluster. + :param dead_osd_number: The osd number found in ceph osd tree. Example: 99 + :param dead_osd_device: The physical device. Example: /dev/sda + :param osd_format: + :param osd_journal: + :param reformat_osd: + :param ignore_errors: + """ + host_mounts = mounts() + mount_point = None + for mount in host_mounts: + if mount[1] == dead_osd_device: + mount_point = mount[0] + # need to convert dev to osd number + # also need to get the mounted drive so we can tell the admin to + # replace it + try: + # Drop this osd out of the cluster. This will begin a + # rebalance operation + status_set('maintenance', 'Removing osd {}'.format(dead_osd_number)) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'out', + 'osd.{}'.format(dead_osd_number)]) + + # Kill the osd process if it's not already dead + if systemd(): + service_stop('ceph-osd@{}'.format(dead_osd_number)) + else: + subprocess.check_output(['stop', 'ceph-osd', 'id={}'.format( + dead_osd_number)]), + # umount if still mounted + ret = umount(mount_point) + if ret < 0: + raise RuntimeError('umount {} failed with error: {}'.format( + mount_point, os.strerror(ret))) + # Clean up the old mount point + shutil.rmtree(mount_point) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'crush', 'remove', + 'osd.{}'.format(dead_osd_number)]) + # Revoke the OSDs access keys + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'auth', 'del', + 'osd.{}'.format(dead_osd_number)]) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'rm', + 'osd.{}'.format(dead_osd_number)]) + status_set('maintenance', 'Setting up replacement osd {}'.format( + new_osd_device)) + osdize(new_osd_device, + osd_format, + osd_journal, + reformat_osd, + ignore_errors) + except subprocess.CalledProcessError as e: + log('replace_osd failed with error: ' + e.output) + + +def is_osd_disk(dev): + try: + info = subprocess.check_output(['sgdisk', '-i', '1', dev]) + info = info.split("\n") # IGNORE:E1103 + for line in info: + for ptype in CEPH_PARTITIONS: + sig = 'Partition GUID code: {}'.format(ptype) + if line.startswith(sig): + return True + except subprocess.CalledProcessError: + pass + return False + + +def start_osds(devices): + # Scan for ceph block devices + rescan_osd_devices() + if cmp_pkgrevno('ceph', "0.56.6") >= 0: + # Use ceph-disk activate for directory based OSD's + for dev_or_path in devices: + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): + subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) + + +def rescan_osd_devices(): + cmd = [ + 'udevadm', 'trigger', + '--subsystem-match=block', '--action=add' + ] + + subprocess.call(cmd) + + +_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" +_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" + + +def is_bootstrapped(): + return os.path.exists(_bootstrap_keyring) + + +def wait_for_bootstrap(): + while not is_bootstrapped(): + time.sleep(3) + + +def import_osd_bootstrap_key(key): + if not os.path.exists(_bootstrap_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _bootstrap_keyring, + '--create-keyring', + '--name=client.bootstrap-osd', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + + +def import_osd_upgrade_key(key): + if not os.path.exists(_upgrade_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _upgrade_keyring, + '--create-keyring', + '--name=client.osd-upgrade', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + + +def generate_monitor_secret(): + cmd = [ + 'ceph-authtool', + '/dev/stdout', + '--name=mon.', + '--gen-key' + ] + res = subprocess.check_output(cmd) + + return "{}==".format(res.split('=')[1].strip()) + +# OSD caps taken from ceph-create-keys +_osd_bootstrap_caps = { + 'mon': [ + 'allow command osd create ...', + 'allow command osd crush set ...', + r'allow command auth add * osd allow\ * mon allow\ rwx', + 'allow command mon getmap' + ] +} + +_osd_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-osd' + ] +} + + +def parse_key(raw_key): + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(raw_key.splitlines()) == 1: + key = raw_key + else: + for element in raw_key.splitlines(): + if 'key' in element: + key = element.split(' = ')[1].strip() # IGNORE:E1103 + return key + + +def get_osd_bootstrap_key(): + try: + # Attempt to get/create a key using the OSD bootstrap profile first + key = get_named_key('bootstrap-osd', + _osd_bootstrap_caps_profile) + except: + # If that fails try with the older style permissions + key = get_named_key('bootstrap-osd', + _osd_bootstrap_caps) + return key + + +_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" + + +def import_radosgw_key(key): + if not os.path.exists(_radosgw_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _radosgw_keyring, + '--create-keyring', + '--name=client.radosgw.gateway', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + +# OSD caps taken from ceph-create-keys +_radosgw_caps = { + 'mon': ['allow rw'], + 'osd': ['allow rwx'] +} +_upgrade_caps = { + 'mon': ['allow rwx'] +} + + +def get_radosgw_key(): + return get_named_key('radosgw.gateway', _radosgw_caps) + + +_default_caps = { + 'mon': ['allow rw'], + 'osd': ['allow rwx'] +} + +admin_caps = { + 'mds': ['allow'], + 'mon': ['allow *'], + 'osd': ['allow *'] +} + +osd_upgrade_caps = { + 'mon': ['allow command "config-key"', + 'allow command "osd tree"', + 'allow command "config-key list"', + 'allow command "config-key put"', + 'allow command "config-key get"', + 'allow command "config-key exists"', + 'allow command "osd out"', + 'allow command "osd in"', + 'allow command "osd rm"', + 'allow command "auth del"', + ] +} + + +def get_upgrade_key(): + return get_named_key('upgrade-osd', _upgrade_caps) + + +def get_named_key(name, caps=None): + caps = caps or _default_caps + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + get_unit_hostname() + ), + 'auth', 'get-or-create', 'client.{}'.format(name), + ] + # Add capabilities + for subsystem, subcaps in caps.iteritems(): + cmd.extend([ + subsystem, + '; '.join(subcaps), + ]) + return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + + +def upgrade_key_caps(key, caps): + """ Upgrade key to have capabilities caps """ + if not is_leader(): + # Not the MON leader OR not clustered + return + cmd = [ + "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key + ] + for subsystem, subcaps in caps.iteritems(): + cmd.extend([subsystem, '; '.join(subcaps)]) + subprocess.check_call(cmd) + + +@cached +def systemd(): + return (lsb_release()['DISTRIB_CODENAME'] >= 'vivid') + + +def bootstrap_monitor_cluster(secret): + hostname = get_unit_hostname() + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + done = '{}/done'.format(path) + if systemd(): + init_marker = '{}/systemd'.format(path) + else: + init_marker = '{}/upstart'.format(path) + + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) + + if os.path.exists(done): + log('bootstrap_monitor_cluster: mon already initialized.') + else: + # Ceph >= 0.61.3 needs this for ceph-mon fs creation + mkdir('/var/run/ceph', owner=ceph_user(), + group=ceph_user(), perms=0o755) + mkdir(path, owner=ceph_user(), group=ceph_user()) + # end changes for Ceph >= 0.61.3 + try: + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring', '--name=mon.', + '--add-key={}'.format(secret), + '--cap', 'mon', 'allow *']) + + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + chownr(path, ceph_user(), ceph_user()) + with open(done, 'w'): + pass + with open(init_marker, 'w'): + pass + + if systemd(): + subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) + service_restart('ceph-mon') + else: + service_restart('ceph-mon-all') + except: + raise + finally: + os.unlink(keyring) + + +def update_monfs(): + hostname = get_unit_hostname() + monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + if systemd(): + init_marker = '{}/systemd'.format(monfs) + else: + init_marker = '{}/upstart'.format(monfs) + if os.path.exists(monfs) and not os.path.exists(init_marker): + # Mark mon as managed by upstart so that + # it gets start correctly on reboots + with open(init_marker, 'w'): + pass + + +def maybe_zap_journal(journal_dev): + if is_osd_disk(journal_dev): + log('Looks like {} is already an OSD data' + ' or journal, skipping.'.format(journal_dev)) + return + zap_disk(journal_dev) + log("Zapped journal device {}".format(journal_dev)) + + +def get_partitions(dev): + cmd = ['partx', '--raw', '--noheadings', dev] + try: + out = subprocess.check_output(cmd).splitlines() + log("get partitions: {}".format(out), level=DEBUG) + return out + except subprocess.CalledProcessError as e: + log("Can't get info for {0}: {1}".format(dev, e.output)) + return [] + + +def find_least_used_journal(journal_devices): + usages = map(lambda a: (len(get_partitions(a)), a), journal_devices) + least = min(usages, key=lambda t: t[0]) + return least[1] + + +def osdize(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False, encrypt=False): + if dev.startswith('/dev'): + osdize_dev(dev, osd_format, osd_journal, + reformat_osd, ignore_errors, encrypt) + else: + osdize_dir(dev, encrypt) + + +def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False, encrypt=False): + if not os.path.exists(dev): + log('Path {} does not exist - bailing'.format(dev)) + return + + if not is_block_device(dev): + log('Path {} is not a block device - bailing'.format(dev)) + return + + if is_osd_disk(dev) and not reformat_osd: + log('Looks like {} is already an' + ' OSD data or journal, skipping.'.format(dev)) + return + + if is_device_mounted(dev): + log('Looks like {} is in use, skipping.'.format(dev)) + return + + status_set('maintenance', 'Initializing device {}'.format(dev)) + cmd = ['ceph-disk', 'prepare'] + # Later versions of ceph support more options + if cmp_pkgrevno('ceph', '0.60') >= 0: + if encrypt: + cmd.append('--dmcrypt') + if cmp_pkgrevno('ceph', '0.48.3') >= 0: + if osd_format: + cmd.append('--fs-type') + cmd.append(osd_format) + if reformat_osd: + cmd.append('--zap-disk') + cmd.append(dev) + if osd_journal: + least_used = find_least_used_journal(osd_journal) + cmd.append(least_used) + else: + # Just provide the device - no other options + # for older versions of ceph + cmd.append(dev) + if reformat_osd: + zap_disk(dev) + + try: + log("osdize cmd: {}".format(cmd)) + subprocess.check_call(cmd) + except subprocess.CalledProcessError as e: + if ignore_errors: + log('Unable to initialize device: {}'.format(dev), WARNING) + else: + log('Unable to initialize device: {}'.format(dev), ERROR) + raise e + + +def osdize_dir(path, encrypt=False): + if os.path.exists(os.path.join(path, 'upstart')): + log('Path {} is already configured as an OSD - bailing'.format(path)) + return + + if cmp_pkgrevno('ceph', "0.56.6") < 0: + log('Unable to use directories for OSDs with ceph < 0.56.6', + level=ERROR) + raise + + mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) + chownr('/var/lib/ceph', ceph_user(), ceph_user()) + cmd = [ + 'sudo', '-u', ceph_user(), + 'ceph-disk', + 'prepare', + '--data-dir', + path + ] + if cmp_pkgrevno('ceph', '0.60') >= 0: + if encrypt: + cmd.append('--dmcrypt') + log("osdize dir cmd: {}".format(cmd)) + subprocess.check_call(cmd) + + +def filesystem_mounted(fs): + return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 + + +def get_running_osds(): + """Returns a list of the pids of the current running OSD daemons""" + cmd = ['pgrep', 'ceph-osd'] + try: + result = subprocess.check_output(cmd) + return result.split() + except subprocess.CalledProcessError: + return [] diff --git a/ceph-mon/lib/ceph/ceph/__init__.py b/ceph-mon/lib/ceph/ceph/__init__.py deleted file mode 100644 index 9847ec9e..00000000 --- a/ceph-mon/lib/ceph/ceph/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'chris' diff --git a/ceph-mon/lib/ceph/ceph/ceph.py b/ceph-mon/lib/ceph/ceph/ceph.py deleted file mode 100644 index 4b68e039..00000000 --- a/ceph-mon/lib/ceph/ceph/ceph.py +++ /dev/null @@ -1,1183 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import ctypes -import json -import subprocess -import time -import os -import re -import sys -import errno -import shutil - -from charmhelpers.core import hookenv - -from charmhelpers.core.host import ( - mkdir, - chownr, - service_restart, - lsb_release, - cmp_pkgrevno, service_stop, mounts) -from charmhelpers.core.hookenv import ( - log, - ERROR, - cached, - status_set, - WARNING, DEBUG) -from charmhelpers.core.services import render_template -from charmhelpers.fetch import ( - apt_cache -) - -from charmhelpers.contrib.storage.linux.utils import ( - is_block_device, - zap_disk, - is_device_mounted) -from utils import ( - get_unit_hostname, -) - - -LEADER = 'leader' -PEON = 'peon' -QUORUM = [LEADER, PEON] - -PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] - -LinkSpeed = { - "BASE_10": 10, - "BASE_100": 100, - "BASE_1000": 1000, - "GBASE_10": 10000, - "GBASE_40": 40000, - "GBASE_100": 100000, - "UNKNOWN": None -} - -# Mapping of adapter speed to sysctl settings -NETWORK_ADAPTER_SYSCTLS = { - # 10Gb - LinkSpeed["GBASE_10"]: { - 'net.core.rmem_default': 524287, - 'net.core.wmem_default': 524287, - 'net.core.rmem_max': 524287, - 'net.core.wmem_max': 524287, - 'net.core.optmem_max': 524287, - 'net.core.netdev_max_backlog': 300000, - 'net.ipv4.tcp_rmem': '10000000 10000000 10000000', - 'net.ipv4.tcp_wmem': '10000000 10000000 10000000', - 'net.ipv4.tcp_mem': '10000000 10000000 10000000' - }, - # Mellanox 10/40Gb - LinkSpeed["GBASE_40"]: { - 'net.ipv4.tcp_timestamps': 0, - 'net.ipv4.tcp_sack': 1, - 'net.core.netdev_max_backlog': 250000, - 'net.core.rmem_max': 4194304, - 'net.core.wmem_max': 4194304, - 'net.core.rmem_default': 4194304, - 'net.core.wmem_default': 4194304, - 'net.core.optmem_max': 4194304, - 'net.ipv4.tcp_rmem': '4096 87380 4194304', - 'net.ipv4.tcp_wmem': '4096 65536 4194304', - 'net.ipv4.tcp_low_latency': 1, - 'net.ipv4.tcp_adv_win_scale': 1 - } -} - - -def save_sysctls(sysctl_dict, save_location): - """ - Persist the sysctls to the hard drive. - :param sysctl_dict: dict - :param save_location: path to save the settings to - :raise: IOError if anything goes wrong with writing. - """ - try: - # Persist the settings for reboots - with open(save_location, "w") as fd: - for key, value in sysctl_dict.items(): - fd.write("{}={}\n".format(key, value)) - - except IOError as e: - log("Unable to persist sysctl settings to {}. Error {}".format( - save_location, e.message), level=ERROR) - raise - - -def tune_nic(network_interface): - """ - This will set optimal sysctls for the particular network adapter. - :param network_interface: string The network adapter name. - """ - speed = get_link_speed(network_interface) - if speed in NETWORK_ADAPTER_SYSCTLS: - status_set('maintenance', 'Tuning device {}'.format( - network_interface)) - sysctl_file = os.path.join( - os.sep, - 'etc', - 'sysctl.d', - '51-ceph-osd-charm-{}.conf'.format(network_interface)) - try: - log("Saving sysctl_file: {} values: {}".format( - sysctl_file, NETWORK_ADAPTER_SYSCTLS[speed]), - level=DEBUG) - save_sysctls(sysctl_dict=NETWORK_ADAPTER_SYSCTLS[speed], - save_location=sysctl_file) - except IOError as e: - log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " - "failed. {}".format(network_interface, e.message), - level=ERROR) - - try: - # Apply the settings - log("Applying sysctl settings", level=DEBUG) - subprocess.check_output(["sysctl", "-p", sysctl_file]) - except subprocess.CalledProcessError as err: - log('sysctl -p {} failed with error {}'.format(sysctl_file, - err.output), - level=ERROR) - else: - log("No settings found for network adapter: {}".format( - network_interface), level=DEBUG) - - -def get_link_speed(network_interface): - """ - This will find the link speed for a given network device. Returns None - if an error occurs. - :param network_interface: string The network adapter interface. - :return: LinkSpeed - """ - speed_path = os.path.join(os.sep, 'sys', 'class', 'net', - network_interface, 'speed') - # I'm not sure where else we'd check if this doesn't exist - if not os.path.exists(speed_path): - return LinkSpeed["UNKNOWN"] - - try: - with open(speed_path, 'r') as sysfs: - nic_speed = sysfs.readlines() - - # Did we actually read anything? - if not nic_speed: - return LinkSpeed["UNKNOWN"] - - # Try to find a sysctl match for this particular speed - for name, speed in LinkSpeed.items(): - if speed == int(nic_speed[0].strip()): - return speed - # Default to UNKNOWN if we can't find a match - return LinkSpeed["UNKNOWN"] - except IOError as e: - log("Unable to open {path} because of error: {error}".format( - path=speed_path, - error=e.message), level='error') - return LinkSpeed["UNKNOWN"] - - -def persist_settings(settings_dict): - # Write all settings to /etc/hdparm.conf - """ - This will persist the hard drive settings to the /etc/hdparm.conf file - The settings_dict should be in the form of {"uuid": {"key":"value"}} - :param settings_dict: dict of settings to save - """ - hdparm_path = os.path.join(os.sep, 'etc', 'hdparm.conf') - try: - with open(hdparm_path, 'w') as hdparm: - hdparm.write(render_template('hdparm.conf', settings_dict)) - except IOError as err: - log("Unable to open {path} because of error: {error}".format( - path=hdparm_path, - error=err.message), level=ERROR) - - -def set_max_sectors_kb(dev_name, max_sectors_size): - """ - This function sets the max_sectors_kb size of a given block device. - :param dev_name: Name of the block device to query - :param max_sectors_size: int of the max_sectors_size to save - """ - max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_sectors_kb') - try: - with open(max_sectors_kb_path, 'w') as f: - f.write(max_sectors_size) - except IOError as e: - log('Failed to write max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e.message), level=ERROR) - - -def get_max_sectors_kb(dev_name): - """ - This function gets the max_sectors_kb size of a given block device. - :param dev_name: Name of the block device to query - :return: int which is either the max_sectors_kb or 0 on error. - """ - max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_sectors_kb') - - # Read in what Linux has set by default - if os.path.exists(max_sectors_kb_path): - try: - with open(max_sectors_kb_path, 'r') as f: - max_sectors_kb = f.read().strip() - return int(max_sectors_kb) - except IOError as e: - log('Failed to read max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e.message), level=ERROR) - # Bail. - return 0 - return 0 - - -def get_max_hw_sectors_kb(dev_name): - """ - This function gets the max_hw_sectors_kb for a given block device. - :param dev_name: Name of the block device to query - :return: int which is either the max_hw_sectors_kb or 0 on error. - """ - max_hw_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_hw_sectors_kb') - # Read in what the hardware supports - if os.path.exists(max_hw_sectors_kb_path): - try: - with open(max_hw_sectors_kb_path, 'r') as f: - max_hw_sectors_kb = f.read().strip() - return int(max_hw_sectors_kb) - except IOError as e: - log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( - max_hw_sectors_kb_path, e.message), level=ERROR) - return 0 - return 0 - - -def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): - """ - This function sets the hard drive read ahead. - :param dev_name: Name of the block device to set read ahead on. - :param read_ahead_sectors: int How many sectors to read ahead. - """ - try: - # Set the read ahead sectors to 256 - log('Setting read ahead to {} for device {}'.format( - read_ahead_sectors, - dev_name)) - subprocess.check_output(['hdparm', - '-a{}'.format(read_ahead_sectors), - dev_name]) - except subprocess.CalledProcessError as e: - log('hdparm failed with error: {}'.format(e.output), - level=ERROR) - - -def get_block_uuid(block_dev): - """ - This queries blkid to get the uuid for a block device. - :param block_dev: Name of the block device to query. - :return: The UUID of the device or None on Error. - """ - try: - block_info = subprocess.check_output( - ['blkid', '-o', 'export', block_dev]) - for tag in block_info.split('\n'): - parts = tag.split('=') - if parts[0] == 'UUID': - return parts[1] - return None - except subprocess.CalledProcessError as err: - log('get_block_uuid failed with error: {}'.format(err.output), - level=ERROR) - return None - - -def check_max_sectors(save_settings_dict, - block_dev, - uuid): - """ - Tune the max_hw_sectors if needed. - make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb or at - least 1MB for spinning disks - If the box has a RAID card with cache this could go much bigger. - :param save_settings_dict: The dict used to persist settings - :param block_dev: A block device name: Example: /dev/sda - :param uuid: The uuid of the block device - """ - dev_name = None - path_parts = os.path.split(block_dev) - if len(path_parts) == 2: - dev_name = path_parts[1] - else: - log('Unable to determine the block device name from path: {}'.format( - block_dev)) - # Play it safe and bail - return - max_sectors_kb = get_max_sectors_kb(dev_name=dev_name) - max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name) - - if max_sectors_kb < max_hw_sectors_kb: - # OK we have a situation where the hardware supports more than Linux is - # currently requesting - config_max_sectors_kb = hookenv.config('max-sectors-kb') - if config_max_sectors_kb < max_hw_sectors_kb: - # Set the max_sectors_kb to the config.yaml value if it is less - # than the max_hw_sectors_kb - log('Setting max_sectors_kb for device {} to {}'.format( - dev_name, config_max_sectors_kb)) - save_settings_dict[ - "drive_settings"][uuid][ - "read_ahead_sect"] = config_max_sectors_kb - set_max_sectors_kb(dev_name=dev_name, - max_sectors_size=config_max_sectors_kb) - else: - # Set to the max_hw_sectors_kb - log('Setting max_sectors_kb for device {} to {}'.format( - dev_name, max_hw_sectors_kb)) - save_settings_dict[ - "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb - set_max_sectors_kb(dev_name=dev_name, - max_sectors_size=max_hw_sectors_kb) - else: - log('max_sectors_kb match max_hw_sectors_kb. No change needed for ' - 'device: {}'.format(block_dev)) - - -def tune_dev(block_dev): - """ - Try to make some intelligent decisions with HDD tuning. Future work will - include optimizing SSDs. - This function will change the read ahead sectors and the max write - sectors for each block device. - :param block_dev: A block device name: Example: /dev/sda - """ - uuid = get_block_uuid(block_dev) - if uuid is None: - log('block device {} uuid is None. Unable to save to ' - 'hdparm.conf'.format(block_dev), level=DEBUG) - save_settings_dict = {} - log('Tuning device {}'.format(block_dev)) - status_set('maintenance', 'Tuning device {}'.format(block_dev)) - set_hdd_read_ahead(block_dev) - save_settings_dict["drive_settings"] = {} - save_settings_dict["drive_settings"][uuid] = {} - save_settings_dict["drive_settings"][uuid]['read_ahead_sect'] = 256 - - check_max_sectors(block_dev=block_dev, - save_settings_dict=save_settings_dict, - uuid=uuid) - - persist_settings(settings_dict=save_settings_dict) - status_set('maintenance', 'Finished tuning device {}'.format(block_dev)) - - -def ceph_user(): - if get_version() > 1: - return 'ceph' - else: - return "root" - - -class CrushLocation(object): - def __init__(self, - name, - identifier, - host, - rack, - row, - datacenter, - chassis, - root): - self.name = name - self.identifier = identifier - self.host = host - self.rack = rack - self.row = row - self.datacenter = datacenter - self.chassis = chassis - self.root = root - - def __str__(self): - return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ - "chassis :{} root: {}".format(self.name, self.identifier, - self.host, self.rack, self.row, - self.datacenter, self.chassis, - self.root) - - def __eq__(self, other): - return not self.name < other.name and not other.name < self.name - - def __ne__(self, other): - return self.name < other.name or other.name < self.name - - def __gt__(self, other): - return self.name > other.name - - def __ge__(self, other): - return not self.name < other.name - - def __le__(self, other): - return self.name < other.name - - -def get_osd_tree(service): - """ - Returns the current osd map in JSON. - :return: List. :raise: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails - """ - try: - tree = subprocess.check_output( - ['ceph', '--id', service, - 'osd', 'tree', '--format=json']) - try: - json_tree = json.loads(tree) - crush_list = [] - # Make sure children are present in the json - if not json_tree['nodes']: - return None - child_ids = json_tree['nodes'][0]['children'] - for child in json_tree['nodes']: - if child['id'] in child_ids: - crush_list.append( - CrushLocation( - name=child.get('name'), - identifier=child['id'], - host=child.get('host'), - rack=child.get('rack'), - row=child.get('row'), - datacenter=child.get('datacenter'), - chassis=child.get('chassis'), - root=child.get('root') - ) - ) - return crush_list - except ValueError as v: - log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v.message)) - raise - except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( - e.message)) - raise - - -def get_local_osd_ids(): - """ - This will list the /var/lib/ceph/osd/* directories and try - to split the ID off of the directory name and return it in - a list - - :return: list. A list of osd identifiers :raise: OSError if - something goes wrong with listing the directory. - """ - osd_ids = [] - osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') - if os.path.exists(osd_path): - try: - dirs = os.listdir(osd_path) - for osd_dir in dirs: - osd_id = osd_dir.split('-')[1] - if _is_int(osd_id): - osd_ids.append(osd_id) - except OSError: - raise - return osd_ids - - -def get_local_mon_ids(): - """ - This will list the /var/lib/ceph/mon/* directories and try - to split the ID off of the directory name and return it in - a list - - :return: list. A list of monitor identifiers :raise: OSError if - something goes wrong with listing the directory. - """ - mon_ids = [] - mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') - if os.path.exists(mon_path): - try: - dirs = os.listdir(mon_path) - for mon_dir in dirs: - # Basically this takes everything after ceph- as the monitor ID - match = re.search('ceph-(?P.*)', mon_dir) - if match: - mon_ids.append(match.group('mon_id')) - except OSError: - raise - return mon_ids - - -def _is_int(v): - """Return True if the object v can be turned into an integer.""" - try: - int(v) - return True - except ValueError: - return False - - -def get_version(): - """Derive Ceph release from an installed package.""" - import apt_pkg as apt - - cache = apt_cache() - package = "ceph" - try: - pkg = cache[package] - except: - # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation ' \ - 'candidate: %s' % package - error_out(e) - - if not pkg.current_ver: - # package is known, but no version is currently installed. - e = 'Could not determine version of uninstalled package: %s' % package - error_out(e) - - vers = apt.upstream_version(pkg.current_ver.ver_str) - - # x.y match only for 20XX.X - # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', vers) - - if match: - vers = match.group(0) - return float(vers) - - -def error_out(msg): - log("FATAL ERROR: %s" % msg, - level=ERROR) - sys.exit(1) - - -def is_quorum(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(subprocess.check_output(cmd)) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] in QUORUM: - return True - else: - return False - else: - return False - - -def is_leader(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(subprocess.check_output(cmd)) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] == LEADER: - return True - else: - return False - else: - return False - - -def wait_for_quorum(): - while not is_quorum(): - log("Waiting for quorum to be reached") - time.sleep(3) - - -def add_bootstrap_hint(peer): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "add_bootstrap_peer_hint", - peer - ] - if os.path.exists(asok): - # Ignore any errors for this call - subprocess.call(cmd) - - -DISK_FORMATS = [ - 'xfs', - 'ext4', - 'btrfs' -] - -CEPH_PARTITIONS = [ - '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data - '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data - '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal -] - - -def umount(mount_point): - """ - This function unmounts a mounted directory forcibly. This will - be used for unmounting broken hard drive mounts which may hang. - If umount returns EBUSY this will lazy unmount. - :param mount_point: str. A String representing the filesystem mount point - :return: int. Returns 0 on success. errno otherwise. - """ - libc_path = ctypes.util.find_library("c") - libc = ctypes.CDLL(libc_path, use_errno=True) - - # First try to umount with MNT_FORCE - ret = libc.umount(mount_point, 1) - if ret < 0: - err = ctypes.get_errno() - if err == errno.EBUSY: - # Detach from try. IE lazy umount - ret = libc.umount(mount_point, 2) - if ret < 0: - err = ctypes.get_errno() - return err - return 0 - else: - return err - return 0 - - -def replace_osd(dead_osd_number, - dead_osd_device, - new_osd_device, - osd_format, - osd_journal, - reformat_osd=False, - ignore_errors=False): - """ - This function will automate the replacement of a failed osd disk as much - as possible. It will revoke the keys for the old osd, remove it from the - crush map and then add a new osd into the cluster. - :param dead_osd_number: The osd number found in ceph osd tree. Example: 99 - :param dead_osd_device: The physical device. Example: /dev/sda - :param osd_format: - :param osd_journal: - :param reformat_osd: - :param ignore_errors: - """ - host_mounts = mounts() - mount_point = None - for mount in host_mounts: - if mount[1] == dead_osd_device: - mount_point = mount[0] - # need to convert dev to osd number - # also need to get the mounted drive so we can tell the admin to - # replace it - try: - # Drop this osd out of the cluster. This will begin a - # rebalance operation - status_set('maintenance', 'Removing osd {}'.format(dead_osd_number)) - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'out', - 'osd.{}'.format(dead_osd_number)]) - - # Kill the osd process if it's not already dead - if systemd(): - service_stop('ceph-osd@{}'.format(dead_osd_number)) - else: - subprocess.check_output(['stop', 'ceph-osd', 'id={}'.format( - dead_osd_number)]), - # umount if still mounted - ret = umount(mount_point) - if ret < 0: - raise RuntimeError('umount {} failed with error: {}'.format( - mount_point, os.strerror(ret))) - # Clean up the old mount point - shutil.rmtree(mount_point) - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'crush', 'remove', - 'osd.{}'.format(dead_osd_number)]) - # Revoke the OSDs access keys - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'auth', 'del', - 'osd.{}'.format(dead_osd_number)]) - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'rm', - 'osd.{}'.format(dead_osd_number)]) - status_set('maintenance', 'Setting up replacement osd {}'.format( - new_osd_device)) - osdize(new_osd_device, - osd_format, - osd_journal, - reformat_osd, - ignore_errors) - except subprocess.CalledProcessError as e: - log('replace_osd failed with error: ' + e.output) - - -def is_osd_disk(dev): - try: - info = subprocess.check_output(['sgdisk', '-i', '1', dev]) - info = info.split("\n") # IGNORE:E1103 - for line in info: - for ptype in CEPH_PARTITIONS: - sig = 'Partition GUID code: {}'.format(ptype) - if line.startswith(sig): - return True - except subprocess.CalledProcessError: - pass - return False - - -def start_osds(devices): - # Scan for ceph block devices - rescan_osd_devices() - if cmp_pkgrevno('ceph', "0.56.6") >= 0: - # Use ceph-disk activate for directory based OSD's - for dev_or_path in devices: - if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) - - -def rescan_osd_devices(): - cmd = [ - 'udevadm', 'trigger', - '--subsystem-match=block', '--action=add' - ] - - subprocess.call(cmd) - - -_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" -_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" - - -def is_bootstrapped(): - return os.path.exists(_bootstrap_keyring) - - -def wait_for_bootstrap(): - while not is_bootstrapped(): - time.sleep(3) - - -def import_osd_bootstrap_key(key): - if not os.path.exists(_bootstrap_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _bootstrap_keyring, - '--create-keyring', - '--name=client.bootstrap-osd', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - -def import_osd_upgrade_key(key): - if not os.path.exists(_upgrade_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _upgrade_keyring, - '--create-keyring', - '--name=client.osd-upgrade', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - -def generate_monitor_secret(): - cmd = [ - 'ceph-authtool', - '/dev/stdout', - '--name=mon.', - '--gen-key' - ] - res = subprocess.check_output(cmd) - - return "{}==".format(res.split('=')[1].strip()) - -# OSD caps taken from ceph-create-keys -_osd_bootstrap_caps = { - 'mon': [ - 'allow command osd create ...', - 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', - 'allow command mon getmap' - ] -} - -_osd_bootstrap_caps_profile = { - 'mon': [ - 'allow profile bootstrap-osd' - ] -} - - -def parse_key(raw_key): - # get-or-create appears to have different output depending - # on whether its 'get' or 'create' - # 'create' just returns the key, 'get' is more verbose and - # needs parsing - key = None - if len(raw_key.splitlines()) == 1: - key = raw_key - else: - for element in raw_key.splitlines(): - if 'key' in element: - key = element.split(' = ')[1].strip() # IGNORE:E1103 - return key - - -def get_osd_bootstrap_key(): - try: - # Attempt to get/create a key using the OSD bootstrap profile first - key = get_named_key('bootstrap-osd', - _osd_bootstrap_caps_profile) - except: - # If that fails try with the older style permissions - key = get_named_key('bootstrap-osd', - _osd_bootstrap_caps) - return key - - -_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" - - -def import_radosgw_key(key): - if not os.path.exists(_radosgw_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _radosgw_keyring, - '--create-keyring', - '--name=client.radosgw.gateway', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - -# OSD caps taken from ceph-create-keys -_radosgw_caps = { - 'mon': ['allow rw'], - 'osd': ['allow rwx'] -} -_upgrade_caps = { - 'mon': ['allow rwx'] -} - - -def get_radosgw_key(): - return get_named_key('radosgw.gateway', _radosgw_caps) - - -_default_caps = { - 'mon': ['allow rw'], - 'osd': ['allow rwx'] -} - -admin_caps = { - 'mds': ['allow'], - 'mon': ['allow *'], - 'osd': ['allow *'] -} - -osd_upgrade_caps = { - 'mon': ['allow command "config-key"', - 'allow command "osd tree"', - 'allow command "config-key list"', - 'allow command "config-key put"', - 'allow command "config-key get"', - 'allow command "config-key exists"', - 'allow command "osd out"', - 'allow command "osd in"', - 'allow command "osd rm"', - 'allow command "auth del"', - ] -} - - -def get_upgrade_key(): - return get_named_key('upgrade-osd', _upgrade_caps) - - -def get_named_key(name, caps=None): - caps = caps or _default_caps - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - get_unit_hostname() - ), - 'auth', 'get-or-create', 'client.{}'.format(name), - ] - # Add capabilities - for subsystem, subcaps in caps.iteritems(): - cmd.extend([ - subsystem, - '; '.join(subcaps), - ]) - return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 - - -def upgrade_key_caps(key, caps): - """ Upgrade key to have capabilities caps """ - if not is_leader(): - # Not the MON leader OR not clustered - return - cmd = [ - "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key - ] - for subsystem, subcaps in caps.iteritems(): - cmd.extend([subsystem, '; '.join(subcaps)]) - subprocess.check_call(cmd) - - -@cached -def systemd(): - return (lsb_release()['DISTRIB_CODENAME'] >= 'vivid') - - -def bootstrap_monitor_cluster(secret): - hostname = get_unit_hostname() - path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - done = '{}/done'.format(path) - if systemd(): - init_marker = '{}/systemd'.format(path) - else: - init_marker = '{}/upstart'.format(path) - - keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) - - if os.path.exists(done): - log('bootstrap_monitor_cluster: mon already initialized.') - else: - # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', owner=ceph_user(), - group=ceph_user(), perms=0o755) - mkdir(path, owner=ceph_user(), group=ceph_user()) - # end changes for Ceph >= 0.61.3 - try: - subprocess.check_call(['ceph-authtool', keyring, - '--create-keyring', '--name=mon.', - '--add-key={}'.format(secret), - '--cap', 'mon', 'allow *']) - - subprocess.check_call(['ceph-mon', '--mkfs', - '-i', hostname, - '--keyring', keyring]) - chownr(path, ceph_user(), ceph_user()) - with open(done, 'w'): - pass - with open(init_marker, 'w'): - pass - - if systemd(): - subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) - service_restart('ceph-mon') - else: - service_restart('ceph-mon-all') - except: - raise - finally: - os.unlink(keyring) - - -def update_monfs(): - hostname = get_unit_hostname() - monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - if systemd(): - init_marker = '{}/systemd'.format(monfs) - else: - init_marker = '{}/upstart'.format(monfs) - if os.path.exists(monfs) and not os.path.exists(init_marker): - # Mark mon as managed by upstart so that - # it gets start correctly on reboots - with open(init_marker, 'w'): - pass - - -def maybe_zap_journal(journal_dev): - if is_osd_disk(journal_dev): - log('Looks like {} is already an OSD data' - ' or journal, skipping.'.format(journal_dev)) - return - zap_disk(journal_dev) - log("Zapped journal device {}".format(journal_dev)) - - -def get_partitions(dev): - cmd = ['partx', '--raw', '--noheadings', dev] - try: - out = subprocess.check_output(cmd).splitlines() - log("get partitions: {}".format(out), level=DEBUG) - return out - except subprocess.CalledProcessError as e: - log("Can't get info for {0}: {1}".format(dev, e.output)) - return [] - - -def find_least_used_journal(journal_devices): - usages = map(lambda a: (len(get_partitions(a)), a), journal_devices) - least = min(usages, key=lambda t: t[0]) - return least[1] - - -def osdize(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False): - if dev.startswith('/dev'): - osdize_dev(dev, osd_format, osd_journal, - reformat_osd, ignore_errors, encrypt) - else: - osdize_dir(dev, encrypt) - - -def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False): - if not os.path.exists(dev): - log('Path {} does not exist - bailing'.format(dev)) - return - - if not is_block_device(dev): - log('Path {} is not a block device - bailing'.format(dev)) - return - - if is_osd_disk(dev) and not reformat_osd: - log('Looks like {} is already an' - ' OSD data or journal, skipping.'.format(dev)) - return - - if is_device_mounted(dev): - log('Looks like {} is in use, skipping.'.format(dev)) - return - - status_set('maintenance', 'Initializing device {}'.format(dev)) - cmd = ['ceph-disk', 'prepare'] - # Later versions of ceph support more options - if cmp_pkgrevno('ceph', '0.60') >= 0: - if encrypt: - cmd.append('--dmcrypt') - if cmp_pkgrevno('ceph', '0.48.3') >= 0: - if osd_format: - cmd.append('--fs-type') - cmd.append(osd_format) - if reformat_osd: - cmd.append('--zap-disk') - cmd.append(dev) - if osd_journal: - least_used = find_least_used_journal(osd_journal) - cmd.append(least_used) - else: - # Just provide the device - no other options - # for older versions of ceph - cmd.append(dev) - if reformat_osd: - zap_disk(dev) - - try: - log("osdize cmd: {}".format(cmd)) - subprocess.check_call(cmd) - except subprocess.CalledProcessError as e: - if ignore_errors: - log('Unable to initialize device: {}'.format(dev), WARNING) - else: - log('Unable to initialize device: {}'.format(dev), ERROR) - raise e - - -def osdize_dir(path, encrypt=False): - if os.path.exists(os.path.join(path, 'upstart')): - log('Path {} is already configured as an OSD - bailing'.format(path)) - return - - if cmp_pkgrevno('ceph', "0.56.6") < 0: - log('Unable to use directories for OSDs with ceph < 0.56.6', - level=ERROR) - raise - - mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) - chownr('/var/lib/ceph', ceph_user(), ceph_user()) - cmd = [ - 'sudo', '-u', ceph_user(), - 'ceph-disk', - 'prepare', - '--data-dir', - path - ] - if cmp_pkgrevno('ceph', '0.60') >= 0: - if encrypt: - cmd.append('--dmcrypt') - log("osdize dir cmd: {}".format(cmd)) - subprocess.check_call(cmd) - - -def filesystem_mounted(fs): - return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 - - -def get_running_osds(): - """Returns a list of the pids of the current running OSD daemons""" - cmd = ['pgrep', 'ceph-osd'] - try: - result = subprocess.check_output(cmd) - return result.split() - except subprocess.CalledProcessError: - return [] diff --git a/ceph-mon/lib/ceph/ceph/ceph_broker.py b/ceph-mon/lib/ceph/ceph_broker.py similarity index 97% rename from ceph-mon/lib/ceph/ceph/ceph_broker.py rename to ceph-mon/lib/ceph/ceph_broker.py index ab4d8022..d55e570b 100644 --- a/ceph-mon/lib/ceph/ceph/ceph_broker.py +++ b/ceph-mon/lib/ceph/ceph_broker.py @@ -22,10 +22,11 @@ INFO, ERROR, ) -from charmhelpers.contrib.storage.linux.ceph import ( +from charmhelpers.contrib.storage.linux.ceph import ( create_erasure_profile, delete_pool, erasure_profile_exists, + get_osds, pool_exists, pool_set, remove_pool_snapshot, @@ -190,7 +191,14 @@ def handle_replicated_pool(request, service): replicas = request.get('replicas') quota = request.get('max-bytes') weight = request.get('weight') + + # Optional params pg_num = request.get('pg_num') + if pg_num: + # Cap pg_num to max allowed just in case. + osds = get_osds(service) + if osds: + pg_num = min(pg_num, (len(osds) * 100 // replicas)) # Check for missing params if pool_name is None or replicas is None: diff --git a/ceph-mon/unit_tests/test_ceph_ops.py b/ceph-mon/unit_tests/test_ceph_ops.py index 8d24186d..3b821960 100644 --- a/ceph-mon/unit_tests/test_ceph_ops.py +++ b/ceph-mon/unit_tests/test_ceph_ops.py @@ -20,7 +20,7 @@ patch, ) -from ceph.ceph import ceph_broker +from ceph import ceph_broker class TestCephOps(unittest.TestCase): From 3b47d9afa0a281e8084b2470a6c885b5676df587 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 11 Aug 2016 08:21:32 -0400 Subject: [PATCH 1185/2699] Clean up dependency chain This includes a resync of charms_ceph to raise the directory one level The charms_ceph change that we're syncing in changes the name of the ceph.py file into the __init__.py file to remove the second level of namespacing Change-Id: I4eabbd313de2e9420667dc4acca177b2dbbf9581 --- ceph-osd/.gitignore | 1 + ceph-osd/Makefile | 2 +- ceph-osd/actions/pause_resume.py | 2 +- ceph-osd/hooks/ceph_hooks.py | 2 +- ceph-osd/lib/__init__.py | 0 ceph-osd/lib/ceph/__init__.py | 1183 +++++++++++++++++++ ceph-osd/lib/ceph/ceph/__init__.py | 1 - ceph-osd/lib/ceph/ceph/ceph.py | 1183 ------------------- ceph-osd/lib/ceph/{ceph => }/ceph_broker.py | 4 +- ceph-osd/unit_tests/test_replace_osd.py | 16 +- ceph-osd/unit_tests/test_tuning.py | 30 +- ceph-osd/unit_tests/test_upgrade_roll.py | 2 +- 12 files changed, 1213 insertions(+), 1213 deletions(-) create mode 100644 ceph-osd/lib/__init__.py delete mode 100644 ceph-osd/lib/ceph/ceph/__init__.py delete mode 100644 ceph-osd/lib/ceph/ceph/ceph.py rename ceph-osd/lib/ceph/{ceph => }/ceph_broker.py (99%) diff --git a/ceph-osd/.gitignore b/ceph-osd/.gitignore index 4219e517..660b48a2 100644 --- a/ceph-osd/.gitignore +++ b/ceph-osd/.gitignore @@ -7,3 +7,4 @@ bin *.pyc .unit-state.db .idea +func-results.json diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index 8255f93b..10d0ea8a 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -26,7 +26,7 @@ ch-sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml git-sync: bin/git_sync.py - $(PYTHON) bin/git_sync.py -d lib/ceph -s https://github.com/CanonicalLtd/charms_ceph.git + $(PYTHON) bin/git_sync.py -d lib -s https://github.com/CanonicalLtd/charms_ceph.git sync: git-sync ch-sync diff --git a/ceph-osd/actions/pause_resume.py b/ceph-osd/actions/pause_resume.py index 800c8814..f66bf74d 100755 --- a/ceph-osd/actions/pause_resume.py +++ b/ceph-osd/actions/pause_resume.py @@ -27,7 +27,7 @@ action_fail, ) -from ceph.ceph.ceph import get_local_osd_ids +from ceph import get_local_osd_ids from ceph_hooks import assess_status from utils import ( diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 81ae5f6b..28b83cb9 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -23,7 +23,7 @@ import netifaces sys.path.append('lib') -from ceph.ceph import ceph +import ceph from charmhelpers.core import hookenv from charmhelpers.core.hookenv import ( log, diff --git a/ceph-osd/lib/__init__.py b/ceph-osd/lib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/lib/ceph/__init__.py b/ceph-osd/lib/ceph/__init__.py index e69de29b..4b68e039 100644 --- a/ceph-osd/lib/ceph/__init__.py +++ b/ceph-osd/lib/ceph/__init__.py @@ -0,0 +1,1183 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import ctypes +import json +import subprocess +import time +import os +import re +import sys +import errno +import shutil + +from charmhelpers.core import hookenv + +from charmhelpers.core.host import ( + mkdir, + chownr, + service_restart, + lsb_release, + cmp_pkgrevno, service_stop, mounts) +from charmhelpers.core.hookenv import ( + log, + ERROR, + cached, + status_set, + WARNING, DEBUG) +from charmhelpers.core.services import render_template +from charmhelpers.fetch import ( + apt_cache +) + +from charmhelpers.contrib.storage.linux.utils import ( + is_block_device, + zap_disk, + is_device_mounted) +from utils import ( + get_unit_hostname, +) + + +LEADER = 'leader' +PEON = 'peon' +QUORUM = [LEADER, PEON] + +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] + +LinkSpeed = { + "BASE_10": 10, + "BASE_100": 100, + "BASE_1000": 1000, + "GBASE_10": 10000, + "GBASE_40": 40000, + "GBASE_100": 100000, + "UNKNOWN": None +} + +# Mapping of adapter speed to sysctl settings +NETWORK_ADAPTER_SYSCTLS = { + # 10Gb + LinkSpeed["GBASE_10"]: { + 'net.core.rmem_default': 524287, + 'net.core.wmem_default': 524287, + 'net.core.rmem_max': 524287, + 'net.core.wmem_max': 524287, + 'net.core.optmem_max': 524287, + 'net.core.netdev_max_backlog': 300000, + 'net.ipv4.tcp_rmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_wmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_mem': '10000000 10000000 10000000' + }, + # Mellanox 10/40Gb + LinkSpeed["GBASE_40"]: { + 'net.ipv4.tcp_timestamps': 0, + 'net.ipv4.tcp_sack': 1, + 'net.core.netdev_max_backlog': 250000, + 'net.core.rmem_max': 4194304, + 'net.core.wmem_max': 4194304, + 'net.core.rmem_default': 4194304, + 'net.core.wmem_default': 4194304, + 'net.core.optmem_max': 4194304, + 'net.ipv4.tcp_rmem': '4096 87380 4194304', + 'net.ipv4.tcp_wmem': '4096 65536 4194304', + 'net.ipv4.tcp_low_latency': 1, + 'net.ipv4.tcp_adv_win_scale': 1 + } +} + + +def save_sysctls(sysctl_dict, save_location): + """ + Persist the sysctls to the hard drive. + :param sysctl_dict: dict + :param save_location: path to save the settings to + :raise: IOError if anything goes wrong with writing. + """ + try: + # Persist the settings for reboots + with open(save_location, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + except IOError as e: + log("Unable to persist sysctl settings to {}. Error {}".format( + save_location, e.message), level=ERROR) + raise + + +def tune_nic(network_interface): + """ + This will set optimal sysctls for the particular network adapter. + :param network_interface: string The network adapter name. + """ + speed = get_link_speed(network_interface) + if speed in NETWORK_ADAPTER_SYSCTLS: + status_set('maintenance', 'Tuning device {}'.format( + network_interface)) + sysctl_file = os.path.join( + os.sep, + 'etc', + 'sysctl.d', + '51-ceph-osd-charm-{}.conf'.format(network_interface)) + try: + log("Saving sysctl_file: {} values: {}".format( + sysctl_file, NETWORK_ADAPTER_SYSCTLS[speed]), + level=DEBUG) + save_sysctls(sysctl_dict=NETWORK_ADAPTER_SYSCTLS[speed], + save_location=sysctl_file) + except IOError as e: + log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " + "failed. {}".format(network_interface, e.message), + level=ERROR) + + try: + # Apply the settings + log("Applying sysctl settings", level=DEBUG) + subprocess.check_output(["sysctl", "-p", sysctl_file]) + except subprocess.CalledProcessError as err: + log('sysctl -p {} failed with error {}'.format(sysctl_file, + err.output), + level=ERROR) + else: + log("No settings found for network adapter: {}".format( + network_interface), level=DEBUG) + + +def get_link_speed(network_interface): + """ + This will find the link speed for a given network device. Returns None + if an error occurs. + :param network_interface: string The network adapter interface. + :return: LinkSpeed + """ + speed_path = os.path.join(os.sep, 'sys', 'class', 'net', + network_interface, 'speed') + # I'm not sure where else we'd check if this doesn't exist + if not os.path.exists(speed_path): + return LinkSpeed["UNKNOWN"] + + try: + with open(speed_path, 'r') as sysfs: + nic_speed = sysfs.readlines() + + # Did we actually read anything? + if not nic_speed: + return LinkSpeed["UNKNOWN"] + + # Try to find a sysctl match for this particular speed + for name, speed in LinkSpeed.items(): + if speed == int(nic_speed[0].strip()): + return speed + # Default to UNKNOWN if we can't find a match + return LinkSpeed["UNKNOWN"] + except IOError as e: + log("Unable to open {path} because of error: {error}".format( + path=speed_path, + error=e.message), level='error') + return LinkSpeed["UNKNOWN"] + + +def persist_settings(settings_dict): + # Write all settings to /etc/hdparm.conf + """ + This will persist the hard drive settings to the /etc/hdparm.conf file + The settings_dict should be in the form of {"uuid": {"key":"value"}} + :param settings_dict: dict of settings to save + """ + hdparm_path = os.path.join(os.sep, 'etc', 'hdparm.conf') + try: + with open(hdparm_path, 'w') as hdparm: + hdparm.write(render_template('hdparm.conf', settings_dict)) + except IOError as err: + log("Unable to open {path} because of error: {error}".format( + path=hdparm_path, + error=err.message), level=ERROR) + + +def set_max_sectors_kb(dev_name, max_sectors_size): + """ + This function sets the max_sectors_kb size of a given block device. + :param dev_name: Name of the block device to query + :param max_sectors_size: int of the max_sectors_size to save + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + try: + with open(max_sectors_kb_path, 'w') as f: + f.write(max_sectors_size) + except IOError as e: + log('Failed to write max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e.message), level=ERROR) + + +def get_max_sectors_kb(dev_name): + """ + This function gets the max_sectors_kb size of a given block device. + :param dev_name: Name of the block device to query + :return: int which is either the max_sectors_kb or 0 on error. + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + + # Read in what Linux has set by default + if os.path.exists(max_sectors_kb_path): + try: + with open(max_sectors_kb_path, 'r') as f: + max_sectors_kb = f.read().strip() + return int(max_sectors_kb) + except IOError as e: + log('Failed to read max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e.message), level=ERROR) + # Bail. + return 0 + return 0 + + +def get_max_hw_sectors_kb(dev_name): + """ + This function gets the max_hw_sectors_kb for a given block device. + :param dev_name: Name of the block device to query + :return: int which is either the max_hw_sectors_kb or 0 on error. + """ + max_hw_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_hw_sectors_kb') + # Read in what the hardware supports + if os.path.exists(max_hw_sectors_kb_path): + try: + with open(max_hw_sectors_kb_path, 'r') as f: + max_hw_sectors_kb = f.read().strip() + return int(max_hw_sectors_kb) + except IOError as e: + log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( + max_hw_sectors_kb_path, e.message), level=ERROR) + return 0 + return 0 + + +def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): + """ + This function sets the hard drive read ahead. + :param dev_name: Name of the block device to set read ahead on. + :param read_ahead_sectors: int How many sectors to read ahead. + """ + try: + # Set the read ahead sectors to 256 + log('Setting read ahead to {} for device {}'.format( + read_ahead_sectors, + dev_name)) + subprocess.check_output(['hdparm', + '-a{}'.format(read_ahead_sectors), + dev_name]) + except subprocess.CalledProcessError as e: + log('hdparm failed with error: {}'.format(e.output), + level=ERROR) + + +def get_block_uuid(block_dev): + """ + This queries blkid to get the uuid for a block device. + :param block_dev: Name of the block device to query. + :return: The UUID of the device or None on Error. + """ + try: + block_info = subprocess.check_output( + ['blkid', '-o', 'export', block_dev]) + for tag in block_info.split('\n'): + parts = tag.split('=') + if parts[0] == 'UUID': + return parts[1] + return None + except subprocess.CalledProcessError as err: + log('get_block_uuid failed with error: {}'.format(err.output), + level=ERROR) + return None + + +def check_max_sectors(save_settings_dict, + block_dev, + uuid): + """ + Tune the max_hw_sectors if needed. + make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb or at + least 1MB for spinning disks + If the box has a RAID card with cache this could go much bigger. + :param save_settings_dict: The dict used to persist settings + :param block_dev: A block device name: Example: /dev/sda + :param uuid: The uuid of the block device + """ + dev_name = None + path_parts = os.path.split(block_dev) + if len(path_parts) == 2: + dev_name = path_parts[1] + else: + log('Unable to determine the block device name from path: {}'.format( + block_dev)) + # Play it safe and bail + return + max_sectors_kb = get_max_sectors_kb(dev_name=dev_name) + max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name) + + if max_sectors_kb < max_hw_sectors_kb: + # OK we have a situation where the hardware supports more than Linux is + # currently requesting + config_max_sectors_kb = hookenv.config('max-sectors-kb') + if config_max_sectors_kb < max_hw_sectors_kb: + # Set the max_sectors_kb to the config.yaml value if it is less + # than the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, config_max_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid][ + "read_ahead_sect"] = config_max_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=config_max_sectors_kb) + else: + # Set to the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, max_hw_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=max_hw_sectors_kb) + else: + log('max_sectors_kb match max_hw_sectors_kb. No change needed for ' + 'device: {}'.format(block_dev)) + + +def tune_dev(block_dev): + """ + Try to make some intelligent decisions with HDD tuning. Future work will + include optimizing SSDs. + This function will change the read ahead sectors and the max write + sectors for each block device. + :param block_dev: A block device name: Example: /dev/sda + """ + uuid = get_block_uuid(block_dev) + if uuid is None: + log('block device {} uuid is None. Unable to save to ' + 'hdparm.conf'.format(block_dev), level=DEBUG) + save_settings_dict = {} + log('Tuning device {}'.format(block_dev)) + status_set('maintenance', 'Tuning device {}'.format(block_dev)) + set_hdd_read_ahead(block_dev) + save_settings_dict["drive_settings"] = {} + save_settings_dict["drive_settings"][uuid] = {} + save_settings_dict["drive_settings"][uuid]['read_ahead_sect'] = 256 + + check_max_sectors(block_dev=block_dev, + save_settings_dict=save_settings_dict, + uuid=uuid) + + persist_settings(settings_dict=save_settings_dict) + status_set('maintenance', 'Finished tuning device {}'.format(block_dev)) + + +def ceph_user(): + if get_version() > 1: + return 'ceph' + else: + return "root" + + +class CrushLocation(object): + def __init__(self, + name, + identifier, + host, + rack, + row, + datacenter, + chassis, + root): + self.name = name + self.identifier = identifier + self.host = host + self.rack = rack + self.row = row + self.datacenter = datacenter + self.chassis = chassis + self.root = root + + def __str__(self): + return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ + "chassis :{} root: {}".format(self.name, self.identifier, + self.host, self.rack, self.row, + self.datacenter, self.chassis, + self.root) + + def __eq__(self, other): + return not self.name < other.name and not other.name < self.name + + def __ne__(self, other): + return self.name < other.name or other.name < self.name + + def __gt__(self, other): + return self.name > other.name + + def __ge__(self, other): + return not self.name < other.name + + def __le__(self, other): + return self.name < other.name + + +def get_osd_tree(service): + """ + Returns the current osd map in JSON. + :return: List. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + tree = subprocess.check_output( + ['ceph', '--id', service, + 'osd', 'tree', '--format=json']) + try: + json_tree = json.loads(tree) + crush_list = [] + # Make sure children are present in the json + if not json_tree['nodes']: + return None + child_ids = json_tree['nodes'][0]['children'] + for child in json_tree['nodes']: + if child['id'] in child_ids: + crush_list.append( + CrushLocation( + name=child.get('name'), + identifier=child['id'], + host=child.get('host'), + rack=child.get('rack'), + row=child.get('row'), + datacenter=child.get('datacenter'), + chassis=child.get('chassis'), + root=child.get('root') + ) + ) + return crush_list + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + +def get_local_osd_ids(): + """ + This will list the /var/lib/ceph/osd/* directories and try + to split the ID off of the directory name and return it in + a list + + :return: list. A list of osd identifiers :raise: OSError if + something goes wrong with listing the directory. + """ + osd_ids = [] + osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') + if os.path.exists(osd_path): + try: + dirs = os.listdir(osd_path) + for osd_dir in dirs: + osd_id = osd_dir.split('-')[1] + if _is_int(osd_id): + osd_ids.append(osd_id) + except OSError: + raise + return osd_ids + + +def get_local_mon_ids(): + """ + This will list the /var/lib/ceph/mon/* directories and try + to split the ID off of the directory name and return it in + a list + + :return: list. A list of monitor identifiers :raise: OSError if + something goes wrong with listing the directory. + """ + mon_ids = [] + mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') + if os.path.exists(mon_path): + try: + dirs = os.listdir(mon_path) + for mon_dir in dirs: + # Basically this takes everything after ceph- as the monitor ID + match = re.search('ceph-(?P.*)', mon_dir) + if match: + mon_ids.append(match.group('mon_id')) + except OSError: + raise + return mon_ids + + +def _is_int(v): + """Return True if the object v can be turned into an integer.""" + try: + int(v) + return True + except ValueError: + return False + + +def get_version(): + """Derive Ceph release from an installed package.""" + import apt_pkg as apt + + cache = apt_cache() + package = "ceph" + try: + pkg = cache[package] + except: + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation ' \ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match('^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + return float(vers) + + +def error_out(msg): + log("FATAL ERROR: %s" % msg, + level=ERROR) + sys.exit(1) + + +def is_quorum(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] in QUORUM: + return True + else: + return False + else: + return False + + +def is_leader(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] == LEADER: + return True + else: + return False + else: + return False + + +def wait_for_quorum(): + while not is_quorum(): + log("Waiting for quorum to be reached") + time.sleep(3) + + +def add_bootstrap_hint(peer): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "add_bootstrap_peer_hint", + peer + ] + if os.path.exists(asok): + # Ignore any errors for this call + subprocess.call(cmd) + + +DISK_FORMATS = [ + 'xfs', + 'ext4', + 'btrfs' +] + +CEPH_PARTITIONS = [ + '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data + '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data + '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal +] + + +def umount(mount_point): + """ + This function unmounts a mounted directory forcibly. This will + be used for unmounting broken hard drive mounts which may hang. + If umount returns EBUSY this will lazy unmount. + :param mount_point: str. A String representing the filesystem mount point + :return: int. Returns 0 on success. errno otherwise. + """ + libc_path = ctypes.util.find_library("c") + libc = ctypes.CDLL(libc_path, use_errno=True) + + # First try to umount with MNT_FORCE + ret = libc.umount(mount_point, 1) + if ret < 0: + err = ctypes.get_errno() + if err == errno.EBUSY: + # Detach from try. IE lazy umount + ret = libc.umount(mount_point, 2) + if ret < 0: + err = ctypes.get_errno() + return err + return 0 + else: + return err + return 0 + + +def replace_osd(dead_osd_number, + dead_osd_device, + new_osd_device, + osd_format, + osd_journal, + reformat_osd=False, + ignore_errors=False): + """ + This function will automate the replacement of a failed osd disk as much + as possible. It will revoke the keys for the old osd, remove it from the + crush map and then add a new osd into the cluster. + :param dead_osd_number: The osd number found in ceph osd tree. Example: 99 + :param dead_osd_device: The physical device. Example: /dev/sda + :param osd_format: + :param osd_journal: + :param reformat_osd: + :param ignore_errors: + """ + host_mounts = mounts() + mount_point = None + for mount in host_mounts: + if mount[1] == dead_osd_device: + mount_point = mount[0] + # need to convert dev to osd number + # also need to get the mounted drive so we can tell the admin to + # replace it + try: + # Drop this osd out of the cluster. This will begin a + # rebalance operation + status_set('maintenance', 'Removing osd {}'.format(dead_osd_number)) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'out', + 'osd.{}'.format(dead_osd_number)]) + + # Kill the osd process if it's not already dead + if systemd(): + service_stop('ceph-osd@{}'.format(dead_osd_number)) + else: + subprocess.check_output(['stop', 'ceph-osd', 'id={}'.format( + dead_osd_number)]), + # umount if still mounted + ret = umount(mount_point) + if ret < 0: + raise RuntimeError('umount {} failed with error: {}'.format( + mount_point, os.strerror(ret))) + # Clean up the old mount point + shutil.rmtree(mount_point) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'crush', 'remove', + 'osd.{}'.format(dead_osd_number)]) + # Revoke the OSDs access keys + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'auth', 'del', + 'osd.{}'.format(dead_osd_number)]) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'rm', + 'osd.{}'.format(dead_osd_number)]) + status_set('maintenance', 'Setting up replacement osd {}'.format( + new_osd_device)) + osdize(new_osd_device, + osd_format, + osd_journal, + reformat_osd, + ignore_errors) + except subprocess.CalledProcessError as e: + log('replace_osd failed with error: ' + e.output) + + +def is_osd_disk(dev): + try: + info = subprocess.check_output(['sgdisk', '-i', '1', dev]) + info = info.split("\n") # IGNORE:E1103 + for line in info: + for ptype in CEPH_PARTITIONS: + sig = 'Partition GUID code: {}'.format(ptype) + if line.startswith(sig): + return True + except subprocess.CalledProcessError: + pass + return False + + +def start_osds(devices): + # Scan for ceph block devices + rescan_osd_devices() + if cmp_pkgrevno('ceph', "0.56.6") >= 0: + # Use ceph-disk activate for directory based OSD's + for dev_or_path in devices: + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): + subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) + + +def rescan_osd_devices(): + cmd = [ + 'udevadm', 'trigger', + '--subsystem-match=block', '--action=add' + ] + + subprocess.call(cmd) + + +_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" +_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" + + +def is_bootstrapped(): + return os.path.exists(_bootstrap_keyring) + + +def wait_for_bootstrap(): + while not is_bootstrapped(): + time.sleep(3) + + +def import_osd_bootstrap_key(key): + if not os.path.exists(_bootstrap_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _bootstrap_keyring, + '--create-keyring', + '--name=client.bootstrap-osd', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + + +def import_osd_upgrade_key(key): + if not os.path.exists(_upgrade_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _upgrade_keyring, + '--create-keyring', + '--name=client.osd-upgrade', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + + +def generate_monitor_secret(): + cmd = [ + 'ceph-authtool', + '/dev/stdout', + '--name=mon.', + '--gen-key' + ] + res = subprocess.check_output(cmd) + + return "{}==".format(res.split('=')[1].strip()) + +# OSD caps taken from ceph-create-keys +_osd_bootstrap_caps = { + 'mon': [ + 'allow command osd create ...', + 'allow command osd crush set ...', + r'allow command auth add * osd allow\ * mon allow\ rwx', + 'allow command mon getmap' + ] +} + +_osd_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-osd' + ] +} + + +def parse_key(raw_key): + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(raw_key.splitlines()) == 1: + key = raw_key + else: + for element in raw_key.splitlines(): + if 'key' in element: + key = element.split(' = ')[1].strip() # IGNORE:E1103 + return key + + +def get_osd_bootstrap_key(): + try: + # Attempt to get/create a key using the OSD bootstrap profile first + key = get_named_key('bootstrap-osd', + _osd_bootstrap_caps_profile) + except: + # If that fails try with the older style permissions + key = get_named_key('bootstrap-osd', + _osd_bootstrap_caps) + return key + + +_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" + + +def import_radosgw_key(key): + if not os.path.exists(_radosgw_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _radosgw_keyring, + '--create-keyring', + '--name=client.radosgw.gateway', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + +# OSD caps taken from ceph-create-keys +_radosgw_caps = { + 'mon': ['allow rw'], + 'osd': ['allow rwx'] +} +_upgrade_caps = { + 'mon': ['allow rwx'] +} + + +def get_radosgw_key(): + return get_named_key('radosgw.gateway', _radosgw_caps) + + +_default_caps = { + 'mon': ['allow rw'], + 'osd': ['allow rwx'] +} + +admin_caps = { + 'mds': ['allow'], + 'mon': ['allow *'], + 'osd': ['allow *'] +} + +osd_upgrade_caps = { + 'mon': ['allow command "config-key"', + 'allow command "osd tree"', + 'allow command "config-key list"', + 'allow command "config-key put"', + 'allow command "config-key get"', + 'allow command "config-key exists"', + 'allow command "osd out"', + 'allow command "osd in"', + 'allow command "osd rm"', + 'allow command "auth del"', + ] +} + + +def get_upgrade_key(): + return get_named_key('upgrade-osd', _upgrade_caps) + + +def get_named_key(name, caps=None): + caps = caps or _default_caps + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + get_unit_hostname() + ), + 'auth', 'get-or-create', 'client.{}'.format(name), + ] + # Add capabilities + for subsystem, subcaps in caps.iteritems(): + cmd.extend([ + subsystem, + '; '.join(subcaps), + ]) + return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + + +def upgrade_key_caps(key, caps): + """ Upgrade key to have capabilities caps """ + if not is_leader(): + # Not the MON leader OR not clustered + return + cmd = [ + "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key + ] + for subsystem, subcaps in caps.iteritems(): + cmd.extend([subsystem, '; '.join(subcaps)]) + subprocess.check_call(cmd) + + +@cached +def systemd(): + return (lsb_release()['DISTRIB_CODENAME'] >= 'vivid') + + +def bootstrap_monitor_cluster(secret): + hostname = get_unit_hostname() + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + done = '{}/done'.format(path) + if systemd(): + init_marker = '{}/systemd'.format(path) + else: + init_marker = '{}/upstart'.format(path) + + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) + + if os.path.exists(done): + log('bootstrap_monitor_cluster: mon already initialized.') + else: + # Ceph >= 0.61.3 needs this for ceph-mon fs creation + mkdir('/var/run/ceph', owner=ceph_user(), + group=ceph_user(), perms=0o755) + mkdir(path, owner=ceph_user(), group=ceph_user()) + # end changes for Ceph >= 0.61.3 + try: + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring', '--name=mon.', + '--add-key={}'.format(secret), + '--cap', 'mon', 'allow *']) + + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + chownr(path, ceph_user(), ceph_user()) + with open(done, 'w'): + pass + with open(init_marker, 'w'): + pass + + if systemd(): + subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) + service_restart('ceph-mon') + else: + service_restart('ceph-mon-all') + except: + raise + finally: + os.unlink(keyring) + + +def update_monfs(): + hostname = get_unit_hostname() + monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + if systemd(): + init_marker = '{}/systemd'.format(monfs) + else: + init_marker = '{}/upstart'.format(monfs) + if os.path.exists(monfs) and not os.path.exists(init_marker): + # Mark mon as managed by upstart so that + # it gets start correctly on reboots + with open(init_marker, 'w'): + pass + + +def maybe_zap_journal(journal_dev): + if is_osd_disk(journal_dev): + log('Looks like {} is already an OSD data' + ' or journal, skipping.'.format(journal_dev)) + return + zap_disk(journal_dev) + log("Zapped journal device {}".format(journal_dev)) + + +def get_partitions(dev): + cmd = ['partx', '--raw', '--noheadings', dev] + try: + out = subprocess.check_output(cmd).splitlines() + log("get partitions: {}".format(out), level=DEBUG) + return out + except subprocess.CalledProcessError as e: + log("Can't get info for {0}: {1}".format(dev, e.output)) + return [] + + +def find_least_used_journal(journal_devices): + usages = map(lambda a: (len(get_partitions(a)), a), journal_devices) + least = min(usages, key=lambda t: t[0]) + return least[1] + + +def osdize(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False, encrypt=False): + if dev.startswith('/dev'): + osdize_dev(dev, osd_format, osd_journal, + reformat_osd, ignore_errors, encrypt) + else: + osdize_dir(dev, encrypt) + + +def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False, encrypt=False): + if not os.path.exists(dev): + log('Path {} does not exist - bailing'.format(dev)) + return + + if not is_block_device(dev): + log('Path {} is not a block device - bailing'.format(dev)) + return + + if is_osd_disk(dev) and not reformat_osd: + log('Looks like {} is already an' + ' OSD data or journal, skipping.'.format(dev)) + return + + if is_device_mounted(dev): + log('Looks like {} is in use, skipping.'.format(dev)) + return + + status_set('maintenance', 'Initializing device {}'.format(dev)) + cmd = ['ceph-disk', 'prepare'] + # Later versions of ceph support more options + if cmp_pkgrevno('ceph', '0.60') >= 0: + if encrypt: + cmd.append('--dmcrypt') + if cmp_pkgrevno('ceph', '0.48.3') >= 0: + if osd_format: + cmd.append('--fs-type') + cmd.append(osd_format) + if reformat_osd: + cmd.append('--zap-disk') + cmd.append(dev) + if osd_journal: + least_used = find_least_used_journal(osd_journal) + cmd.append(least_used) + else: + # Just provide the device - no other options + # for older versions of ceph + cmd.append(dev) + if reformat_osd: + zap_disk(dev) + + try: + log("osdize cmd: {}".format(cmd)) + subprocess.check_call(cmd) + except subprocess.CalledProcessError as e: + if ignore_errors: + log('Unable to initialize device: {}'.format(dev), WARNING) + else: + log('Unable to initialize device: {}'.format(dev), ERROR) + raise e + + +def osdize_dir(path, encrypt=False): + if os.path.exists(os.path.join(path, 'upstart')): + log('Path {} is already configured as an OSD - bailing'.format(path)) + return + + if cmp_pkgrevno('ceph', "0.56.6") < 0: + log('Unable to use directories for OSDs with ceph < 0.56.6', + level=ERROR) + raise + + mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) + chownr('/var/lib/ceph', ceph_user(), ceph_user()) + cmd = [ + 'sudo', '-u', ceph_user(), + 'ceph-disk', + 'prepare', + '--data-dir', + path + ] + if cmp_pkgrevno('ceph', '0.60') >= 0: + if encrypt: + cmd.append('--dmcrypt') + log("osdize dir cmd: {}".format(cmd)) + subprocess.check_call(cmd) + + +def filesystem_mounted(fs): + return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 + + +def get_running_osds(): + """Returns a list of the pids of the current running OSD daemons""" + cmd = ['pgrep', 'ceph-osd'] + try: + result = subprocess.check_output(cmd) + return result.split() + except subprocess.CalledProcessError: + return [] diff --git a/ceph-osd/lib/ceph/ceph/__init__.py b/ceph-osd/lib/ceph/ceph/__init__.py deleted file mode 100644 index 9847ec9e..00000000 --- a/ceph-osd/lib/ceph/ceph/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'chris' diff --git a/ceph-osd/lib/ceph/ceph/ceph.py b/ceph-osd/lib/ceph/ceph/ceph.py deleted file mode 100644 index 4b68e039..00000000 --- a/ceph-osd/lib/ceph/ceph/ceph.py +++ /dev/null @@ -1,1183 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import ctypes -import json -import subprocess -import time -import os -import re -import sys -import errno -import shutil - -from charmhelpers.core import hookenv - -from charmhelpers.core.host import ( - mkdir, - chownr, - service_restart, - lsb_release, - cmp_pkgrevno, service_stop, mounts) -from charmhelpers.core.hookenv import ( - log, - ERROR, - cached, - status_set, - WARNING, DEBUG) -from charmhelpers.core.services import render_template -from charmhelpers.fetch import ( - apt_cache -) - -from charmhelpers.contrib.storage.linux.utils import ( - is_block_device, - zap_disk, - is_device_mounted) -from utils import ( - get_unit_hostname, -) - - -LEADER = 'leader' -PEON = 'peon' -QUORUM = [LEADER, PEON] - -PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] - -LinkSpeed = { - "BASE_10": 10, - "BASE_100": 100, - "BASE_1000": 1000, - "GBASE_10": 10000, - "GBASE_40": 40000, - "GBASE_100": 100000, - "UNKNOWN": None -} - -# Mapping of adapter speed to sysctl settings -NETWORK_ADAPTER_SYSCTLS = { - # 10Gb - LinkSpeed["GBASE_10"]: { - 'net.core.rmem_default': 524287, - 'net.core.wmem_default': 524287, - 'net.core.rmem_max': 524287, - 'net.core.wmem_max': 524287, - 'net.core.optmem_max': 524287, - 'net.core.netdev_max_backlog': 300000, - 'net.ipv4.tcp_rmem': '10000000 10000000 10000000', - 'net.ipv4.tcp_wmem': '10000000 10000000 10000000', - 'net.ipv4.tcp_mem': '10000000 10000000 10000000' - }, - # Mellanox 10/40Gb - LinkSpeed["GBASE_40"]: { - 'net.ipv4.tcp_timestamps': 0, - 'net.ipv4.tcp_sack': 1, - 'net.core.netdev_max_backlog': 250000, - 'net.core.rmem_max': 4194304, - 'net.core.wmem_max': 4194304, - 'net.core.rmem_default': 4194304, - 'net.core.wmem_default': 4194304, - 'net.core.optmem_max': 4194304, - 'net.ipv4.tcp_rmem': '4096 87380 4194304', - 'net.ipv4.tcp_wmem': '4096 65536 4194304', - 'net.ipv4.tcp_low_latency': 1, - 'net.ipv4.tcp_adv_win_scale': 1 - } -} - - -def save_sysctls(sysctl_dict, save_location): - """ - Persist the sysctls to the hard drive. - :param sysctl_dict: dict - :param save_location: path to save the settings to - :raise: IOError if anything goes wrong with writing. - """ - try: - # Persist the settings for reboots - with open(save_location, "w") as fd: - for key, value in sysctl_dict.items(): - fd.write("{}={}\n".format(key, value)) - - except IOError as e: - log("Unable to persist sysctl settings to {}. Error {}".format( - save_location, e.message), level=ERROR) - raise - - -def tune_nic(network_interface): - """ - This will set optimal sysctls for the particular network adapter. - :param network_interface: string The network adapter name. - """ - speed = get_link_speed(network_interface) - if speed in NETWORK_ADAPTER_SYSCTLS: - status_set('maintenance', 'Tuning device {}'.format( - network_interface)) - sysctl_file = os.path.join( - os.sep, - 'etc', - 'sysctl.d', - '51-ceph-osd-charm-{}.conf'.format(network_interface)) - try: - log("Saving sysctl_file: {} values: {}".format( - sysctl_file, NETWORK_ADAPTER_SYSCTLS[speed]), - level=DEBUG) - save_sysctls(sysctl_dict=NETWORK_ADAPTER_SYSCTLS[speed], - save_location=sysctl_file) - except IOError as e: - log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " - "failed. {}".format(network_interface, e.message), - level=ERROR) - - try: - # Apply the settings - log("Applying sysctl settings", level=DEBUG) - subprocess.check_output(["sysctl", "-p", sysctl_file]) - except subprocess.CalledProcessError as err: - log('sysctl -p {} failed with error {}'.format(sysctl_file, - err.output), - level=ERROR) - else: - log("No settings found for network adapter: {}".format( - network_interface), level=DEBUG) - - -def get_link_speed(network_interface): - """ - This will find the link speed for a given network device. Returns None - if an error occurs. - :param network_interface: string The network adapter interface. - :return: LinkSpeed - """ - speed_path = os.path.join(os.sep, 'sys', 'class', 'net', - network_interface, 'speed') - # I'm not sure where else we'd check if this doesn't exist - if not os.path.exists(speed_path): - return LinkSpeed["UNKNOWN"] - - try: - with open(speed_path, 'r') as sysfs: - nic_speed = sysfs.readlines() - - # Did we actually read anything? - if not nic_speed: - return LinkSpeed["UNKNOWN"] - - # Try to find a sysctl match for this particular speed - for name, speed in LinkSpeed.items(): - if speed == int(nic_speed[0].strip()): - return speed - # Default to UNKNOWN if we can't find a match - return LinkSpeed["UNKNOWN"] - except IOError as e: - log("Unable to open {path} because of error: {error}".format( - path=speed_path, - error=e.message), level='error') - return LinkSpeed["UNKNOWN"] - - -def persist_settings(settings_dict): - # Write all settings to /etc/hdparm.conf - """ - This will persist the hard drive settings to the /etc/hdparm.conf file - The settings_dict should be in the form of {"uuid": {"key":"value"}} - :param settings_dict: dict of settings to save - """ - hdparm_path = os.path.join(os.sep, 'etc', 'hdparm.conf') - try: - with open(hdparm_path, 'w') as hdparm: - hdparm.write(render_template('hdparm.conf', settings_dict)) - except IOError as err: - log("Unable to open {path} because of error: {error}".format( - path=hdparm_path, - error=err.message), level=ERROR) - - -def set_max_sectors_kb(dev_name, max_sectors_size): - """ - This function sets the max_sectors_kb size of a given block device. - :param dev_name: Name of the block device to query - :param max_sectors_size: int of the max_sectors_size to save - """ - max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_sectors_kb') - try: - with open(max_sectors_kb_path, 'w') as f: - f.write(max_sectors_size) - except IOError as e: - log('Failed to write max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e.message), level=ERROR) - - -def get_max_sectors_kb(dev_name): - """ - This function gets the max_sectors_kb size of a given block device. - :param dev_name: Name of the block device to query - :return: int which is either the max_sectors_kb or 0 on error. - """ - max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_sectors_kb') - - # Read in what Linux has set by default - if os.path.exists(max_sectors_kb_path): - try: - with open(max_sectors_kb_path, 'r') as f: - max_sectors_kb = f.read().strip() - return int(max_sectors_kb) - except IOError as e: - log('Failed to read max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e.message), level=ERROR) - # Bail. - return 0 - return 0 - - -def get_max_hw_sectors_kb(dev_name): - """ - This function gets the max_hw_sectors_kb for a given block device. - :param dev_name: Name of the block device to query - :return: int which is either the max_hw_sectors_kb or 0 on error. - """ - max_hw_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_hw_sectors_kb') - # Read in what the hardware supports - if os.path.exists(max_hw_sectors_kb_path): - try: - with open(max_hw_sectors_kb_path, 'r') as f: - max_hw_sectors_kb = f.read().strip() - return int(max_hw_sectors_kb) - except IOError as e: - log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( - max_hw_sectors_kb_path, e.message), level=ERROR) - return 0 - return 0 - - -def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): - """ - This function sets the hard drive read ahead. - :param dev_name: Name of the block device to set read ahead on. - :param read_ahead_sectors: int How many sectors to read ahead. - """ - try: - # Set the read ahead sectors to 256 - log('Setting read ahead to {} for device {}'.format( - read_ahead_sectors, - dev_name)) - subprocess.check_output(['hdparm', - '-a{}'.format(read_ahead_sectors), - dev_name]) - except subprocess.CalledProcessError as e: - log('hdparm failed with error: {}'.format(e.output), - level=ERROR) - - -def get_block_uuid(block_dev): - """ - This queries blkid to get the uuid for a block device. - :param block_dev: Name of the block device to query. - :return: The UUID of the device or None on Error. - """ - try: - block_info = subprocess.check_output( - ['blkid', '-o', 'export', block_dev]) - for tag in block_info.split('\n'): - parts = tag.split('=') - if parts[0] == 'UUID': - return parts[1] - return None - except subprocess.CalledProcessError as err: - log('get_block_uuid failed with error: {}'.format(err.output), - level=ERROR) - return None - - -def check_max_sectors(save_settings_dict, - block_dev, - uuid): - """ - Tune the max_hw_sectors if needed. - make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb or at - least 1MB for spinning disks - If the box has a RAID card with cache this could go much bigger. - :param save_settings_dict: The dict used to persist settings - :param block_dev: A block device name: Example: /dev/sda - :param uuid: The uuid of the block device - """ - dev_name = None - path_parts = os.path.split(block_dev) - if len(path_parts) == 2: - dev_name = path_parts[1] - else: - log('Unable to determine the block device name from path: {}'.format( - block_dev)) - # Play it safe and bail - return - max_sectors_kb = get_max_sectors_kb(dev_name=dev_name) - max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name) - - if max_sectors_kb < max_hw_sectors_kb: - # OK we have a situation where the hardware supports more than Linux is - # currently requesting - config_max_sectors_kb = hookenv.config('max-sectors-kb') - if config_max_sectors_kb < max_hw_sectors_kb: - # Set the max_sectors_kb to the config.yaml value if it is less - # than the max_hw_sectors_kb - log('Setting max_sectors_kb for device {} to {}'.format( - dev_name, config_max_sectors_kb)) - save_settings_dict[ - "drive_settings"][uuid][ - "read_ahead_sect"] = config_max_sectors_kb - set_max_sectors_kb(dev_name=dev_name, - max_sectors_size=config_max_sectors_kb) - else: - # Set to the max_hw_sectors_kb - log('Setting max_sectors_kb for device {} to {}'.format( - dev_name, max_hw_sectors_kb)) - save_settings_dict[ - "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb - set_max_sectors_kb(dev_name=dev_name, - max_sectors_size=max_hw_sectors_kb) - else: - log('max_sectors_kb match max_hw_sectors_kb. No change needed for ' - 'device: {}'.format(block_dev)) - - -def tune_dev(block_dev): - """ - Try to make some intelligent decisions with HDD tuning. Future work will - include optimizing SSDs. - This function will change the read ahead sectors and the max write - sectors for each block device. - :param block_dev: A block device name: Example: /dev/sda - """ - uuid = get_block_uuid(block_dev) - if uuid is None: - log('block device {} uuid is None. Unable to save to ' - 'hdparm.conf'.format(block_dev), level=DEBUG) - save_settings_dict = {} - log('Tuning device {}'.format(block_dev)) - status_set('maintenance', 'Tuning device {}'.format(block_dev)) - set_hdd_read_ahead(block_dev) - save_settings_dict["drive_settings"] = {} - save_settings_dict["drive_settings"][uuid] = {} - save_settings_dict["drive_settings"][uuid]['read_ahead_sect'] = 256 - - check_max_sectors(block_dev=block_dev, - save_settings_dict=save_settings_dict, - uuid=uuid) - - persist_settings(settings_dict=save_settings_dict) - status_set('maintenance', 'Finished tuning device {}'.format(block_dev)) - - -def ceph_user(): - if get_version() > 1: - return 'ceph' - else: - return "root" - - -class CrushLocation(object): - def __init__(self, - name, - identifier, - host, - rack, - row, - datacenter, - chassis, - root): - self.name = name - self.identifier = identifier - self.host = host - self.rack = rack - self.row = row - self.datacenter = datacenter - self.chassis = chassis - self.root = root - - def __str__(self): - return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ - "chassis :{} root: {}".format(self.name, self.identifier, - self.host, self.rack, self.row, - self.datacenter, self.chassis, - self.root) - - def __eq__(self, other): - return not self.name < other.name and not other.name < self.name - - def __ne__(self, other): - return self.name < other.name or other.name < self.name - - def __gt__(self, other): - return self.name > other.name - - def __ge__(self, other): - return not self.name < other.name - - def __le__(self, other): - return self.name < other.name - - -def get_osd_tree(service): - """ - Returns the current osd map in JSON. - :return: List. :raise: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails - """ - try: - tree = subprocess.check_output( - ['ceph', '--id', service, - 'osd', 'tree', '--format=json']) - try: - json_tree = json.loads(tree) - crush_list = [] - # Make sure children are present in the json - if not json_tree['nodes']: - return None - child_ids = json_tree['nodes'][0]['children'] - for child in json_tree['nodes']: - if child['id'] in child_ids: - crush_list.append( - CrushLocation( - name=child.get('name'), - identifier=child['id'], - host=child.get('host'), - rack=child.get('rack'), - row=child.get('row'), - datacenter=child.get('datacenter'), - chassis=child.get('chassis'), - root=child.get('root') - ) - ) - return crush_list - except ValueError as v: - log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v.message)) - raise - except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( - e.message)) - raise - - -def get_local_osd_ids(): - """ - This will list the /var/lib/ceph/osd/* directories and try - to split the ID off of the directory name and return it in - a list - - :return: list. A list of osd identifiers :raise: OSError if - something goes wrong with listing the directory. - """ - osd_ids = [] - osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') - if os.path.exists(osd_path): - try: - dirs = os.listdir(osd_path) - for osd_dir in dirs: - osd_id = osd_dir.split('-')[1] - if _is_int(osd_id): - osd_ids.append(osd_id) - except OSError: - raise - return osd_ids - - -def get_local_mon_ids(): - """ - This will list the /var/lib/ceph/mon/* directories and try - to split the ID off of the directory name and return it in - a list - - :return: list. A list of monitor identifiers :raise: OSError if - something goes wrong with listing the directory. - """ - mon_ids = [] - mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') - if os.path.exists(mon_path): - try: - dirs = os.listdir(mon_path) - for mon_dir in dirs: - # Basically this takes everything after ceph- as the monitor ID - match = re.search('ceph-(?P.*)', mon_dir) - if match: - mon_ids.append(match.group('mon_id')) - except OSError: - raise - return mon_ids - - -def _is_int(v): - """Return True if the object v can be turned into an integer.""" - try: - int(v) - return True - except ValueError: - return False - - -def get_version(): - """Derive Ceph release from an installed package.""" - import apt_pkg as apt - - cache = apt_cache() - package = "ceph" - try: - pkg = cache[package] - except: - # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation ' \ - 'candidate: %s' % package - error_out(e) - - if not pkg.current_ver: - # package is known, but no version is currently installed. - e = 'Could not determine version of uninstalled package: %s' % package - error_out(e) - - vers = apt.upstream_version(pkg.current_ver.ver_str) - - # x.y match only for 20XX.X - # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', vers) - - if match: - vers = match.group(0) - return float(vers) - - -def error_out(msg): - log("FATAL ERROR: %s" % msg, - level=ERROR) - sys.exit(1) - - -def is_quorum(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(subprocess.check_output(cmd)) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] in QUORUM: - return True - else: - return False - else: - return False - - -def is_leader(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(subprocess.check_output(cmd)) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] == LEADER: - return True - else: - return False - else: - return False - - -def wait_for_quorum(): - while not is_quorum(): - log("Waiting for quorum to be reached") - time.sleep(3) - - -def add_bootstrap_hint(peer): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "add_bootstrap_peer_hint", - peer - ] - if os.path.exists(asok): - # Ignore any errors for this call - subprocess.call(cmd) - - -DISK_FORMATS = [ - 'xfs', - 'ext4', - 'btrfs' -] - -CEPH_PARTITIONS = [ - '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data - '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data - '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal -] - - -def umount(mount_point): - """ - This function unmounts a mounted directory forcibly. This will - be used for unmounting broken hard drive mounts which may hang. - If umount returns EBUSY this will lazy unmount. - :param mount_point: str. A String representing the filesystem mount point - :return: int. Returns 0 on success. errno otherwise. - """ - libc_path = ctypes.util.find_library("c") - libc = ctypes.CDLL(libc_path, use_errno=True) - - # First try to umount with MNT_FORCE - ret = libc.umount(mount_point, 1) - if ret < 0: - err = ctypes.get_errno() - if err == errno.EBUSY: - # Detach from try. IE lazy umount - ret = libc.umount(mount_point, 2) - if ret < 0: - err = ctypes.get_errno() - return err - return 0 - else: - return err - return 0 - - -def replace_osd(dead_osd_number, - dead_osd_device, - new_osd_device, - osd_format, - osd_journal, - reformat_osd=False, - ignore_errors=False): - """ - This function will automate the replacement of a failed osd disk as much - as possible. It will revoke the keys for the old osd, remove it from the - crush map and then add a new osd into the cluster. - :param dead_osd_number: The osd number found in ceph osd tree. Example: 99 - :param dead_osd_device: The physical device. Example: /dev/sda - :param osd_format: - :param osd_journal: - :param reformat_osd: - :param ignore_errors: - """ - host_mounts = mounts() - mount_point = None - for mount in host_mounts: - if mount[1] == dead_osd_device: - mount_point = mount[0] - # need to convert dev to osd number - # also need to get the mounted drive so we can tell the admin to - # replace it - try: - # Drop this osd out of the cluster. This will begin a - # rebalance operation - status_set('maintenance', 'Removing osd {}'.format(dead_osd_number)) - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'out', - 'osd.{}'.format(dead_osd_number)]) - - # Kill the osd process if it's not already dead - if systemd(): - service_stop('ceph-osd@{}'.format(dead_osd_number)) - else: - subprocess.check_output(['stop', 'ceph-osd', 'id={}'.format( - dead_osd_number)]), - # umount if still mounted - ret = umount(mount_point) - if ret < 0: - raise RuntimeError('umount {} failed with error: {}'.format( - mount_point, os.strerror(ret))) - # Clean up the old mount point - shutil.rmtree(mount_point) - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'crush', 'remove', - 'osd.{}'.format(dead_osd_number)]) - # Revoke the OSDs access keys - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'auth', 'del', - 'osd.{}'.format(dead_osd_number)]) - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'rm', - 'osd.{}'.format(dead_osd_number)]) - status_set('maintenance', 'Setting up replacement osd {}'.format( - new_osd_device)) - osdize(new_osd_device, - osd_format, - osd_journal, - reformat_osd, - ignore_errors) - except subprocess.CalledProcessError as e: - log('replace_osd failed with error: ' + e.output) - - -def is_osd_disk(dev): - try: - info = subprocess.check_output(['sgdisk', '-i', '1', dev]) - info = info.split("\n") # IGNORE:E1103 - for line in info: - for ptype in CEPH_PARTITIONS: - sig = 'Partition GUID code: {}'.format(ptype) - if line.startswith(sig): - return True - except subprocess.CalledProcessError: - pass - return False - - -def start_osds(devices): - # Scan for ceph block devices - rescan_osd_devices() - if cmp_pkgrevno('ceph', "0.56.6") >= 0: - # Use ceph-disk activate for directory based OSD's - for dev_or_path in devices: - if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) - - -def rescan_osd_devices(): - cmd = [ - 'udevadm', 'trigger', - '--subsystem-match=block', '--action=add' - ] - - subprocess.call(cmd) - - -_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" -_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" - - -def is_bootstrapped(): - return os.path.exists(_bootstrap_keyring) - - -def wait_for_bootstrap(): - while not is_bootstrapped(): - time.sleep(3) - - -def import_osd_bootstrap_key(key): - if not os.path.exists(_bootstrap_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _bootstrap_keyring, - '--create-keyring', - '--name=client.bootstrap-osd', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - -def import_osd_upgrade_key(key): - if not os.path.exists(_upgrade_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _upgrade_keyring, - '--create-keyring', - '--name=client.osd-upgrade', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - -def generate_monitor_secret(): - cmd = [ - 'ceph-authtool', - '/dev/stdout', - '--name=mon.', - '--gen-key' - ] - res = subprocess.check_output(cmd) - - return "{}==".format(res.split('=')[1].strip()) - -# OSD caps taken from ceph-create-keys -_osd_bootstrap_caps = { - 'mon': [ - 'allow command osd create ...', - 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', - 'allow command mon getmap' - ] -} - -_osd_bootstrap_caps_profile = { - 'mon': [ - 'allow profile bootstrap-osd' - ] -} - - -def parse_key(raw_key): - # get-or-create appears to have different output depending - # on whether its 'get' or 'create' - # 'create' just returns the key, 'get' is more verbose and - # needs parsing - key = None - if len(raw_key.splitlines()) == 1: - key = raw_key - else: - for element in raw_key.splitlines(): - if 'key' in element: - key = element.split(' = ')[1].strip() # IGNORE:E1103 - return key - - -def get_osd_bootstrap_key(): - try: - # Attempt to get/create a key using the OSD bootstrap profile first - key = get_named_key('bootstrap-osd', - _osd_bootstrap_caps_profile) - except: - # If that fails try with the older style permissions - key = get_named_key('bootstrap-osd', - _osd_bootstrap_caps) - return key - - -_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" - - -def import_radosgw_key(key): - if not os.path.exists(_radosgw_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _radosgw_keyring, - '--create-keyring', - '--name=client.radosgw.gateway', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - -# OSD caps taken from ceph-create-keys -_radosgw_caps = { - 'mon': ['allow rw'], - 'osd': ['allow rwx'] -} -_upgrade_caps = { - 'mon': ['allow rwx'] -} - - -def get_radosgw_key(): - return get_named_key('radosgw.gateway', _radosgw_caps) - - -_default_caps = { - 'mon': ['allow rw'], - 'osd': ['allow rwx'] -} - -admin_caps = { - 'mds': ['allow'], - 'mon': ['allow *'], - 'osd': ['allow *'] -} - -osd_upgrade_caps = { - 'mon': ['allow command "config-key"', - 'allow command "osd tree"', - 'allow command "config-key list"', - 'allow command "config-key put"', - 'allow command "config-key get"', - 'allow command "config-key exists"', - 'allow command "osd out"', - 'allow command "osd in"', - 'allow command "osd rm"', - 'allow command "auth del"', - ] -} - - -def get_upgrade_key(): - return get_named_key('upgrade-osd', _upgrade_caps) - - -def get_named_key(name, caps=None): - caps = caps or _default_caps - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - get_unit_hostname() - ), - 'auth', 'get-or-create', 'client.{}'.format(name), - ] - # Add capabilities - for subsystem, subcaps in caps.iteritems(): - cmd.extend([ - subsystem, - '; '.join(subcaps), - ]) - return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 - - -def upgrade_key_caps(key, caps): - """ Upgrade key to have capabilities caps """ - if not is_leader(): - # Not the MON leader OR not clustered - return - cmd = [ - "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key - ] - for subsystem, subcaps in caps.iteritems(): - cmd.extend([subsystem, '; '.join(subcaps)]) - subprocess.check_call(cmd) - - -@cached -def systemd(): - return (lsb_release()['DISTRIB_CODENAME'] >= 'vivid') - - -def bootstrap_monitor_cluster(secret): - hostname = get_unit_hostname() - path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - done = '{}/done'.format(path) - if systemd(): - init_marker = '{}/systemd'.format(path) - else: - init_marker = '{}/upstart'.format(path) - - keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) - - if os.path.exists(done): - log('bootstrap_monitor_cluster: mon already initialized.') - else: - # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', owner=ceph_user(), - group=ceph_user(), perms=0o755) - mkdir(path, owner=ceph_user(), group=ceph_user()) - # end changes for Ceph >= 0.61.3 - try: - subprocess.check_call(['ceph-authtool', keyring, - '--create-keyring', '--name=mon.', - '--add-key={}'.format(secret), - '--cap', 'mon', 'allow *']) - - subprocess.check_call(['ceph-mon', '--mkfs', - '-i', hostname, - '--keyring', keyring]) - chownr(path, ceph_user(), ceph_user()) - with open(done, 'w'): - pass - with open(init_marker, 'w'): - pass - - if systemd(): - subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) - service_restart('ceph-mon') - else: - service_restart('ceph-mon-all') - except: - raise - finally: - os.unlink(keyring) - - -def update_monfs(): - hostname = get_unit_hostname() - monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - if systemd(): - init_marker = '{}/systemd'.format(monfs) - else: - init_marker = '{}/upstart'.format(monfs) - if os.path.exists(monfs) and not os.path.exists(init_marker): - # Mark mon as managed by upstart so that - # it gets start correctly on reboots - with open(init_marker, 'w'): - pass - - -def maybe_zap_journal(journal_dev): - if is_osd_disk(journal_dev): - log('Looks like {} is already an OSD data' - ' or journal, skipping.'.format(journal_dev)) - return - zap_disk(journal_dev) - log("Zapped journal device {}".format(journal_dev)) - - -def get_partitions(dev): - cmd = ['partx', '--raw', '--noheadings', dev] - try: - out = subprocess.check_output(cmd).splitlines() - log("get partitions: {}".format(out), level=DEBUG) - return out - except subprocess.CalledProcessError as e: - log("Can't get info for {0}: {1}".format(dev, e.output)) - return [] - - -def find_least_used_journal(journal_devices): - usages = map(lambda a: (len(get_partitions(a)), a), journal_devices) - least = min(usages, key=lambda t: t[0]) - return least[1] - - -def osdize(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False): - if dev.startswith('/dev'): - osdize_dev(dev, osd_format, osd_journal, - reformat_osd, ignore_errors, encrypt) - else: - osdize_dir(dev, encrypt) - - -def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False): - if not os.path.exists(dev): - log('Path {} does not exist - bailing'.format(dev)) - return - - if not is_block_device(dev): - log('Path {} is not a block device - bailing'.format(dev)) - return - - if is_osd_disk(dev) and not reformat_osd: - log('Looks like {} is already an' - ' OSD data or journal, skipping.'.format(dev)) - return - - if is_device_mounted(dev): - log('Looks like {} is in use, skipping.'.format(dev)) - return - - status_set('maintenance', 'Initializing device {}'.format(dev)) - cmd = ['ceph-disk', 'prepare'] - # Later versions of ceph support more options - if cmp_pkgrevno('ceph', '0.60') >= 0: - if encrypt: - cmd.append('--dmcrypt') - if cmp_pkgrevno('ceph', '0.48.3') >= 0: - if osd_format: - cmd.append('--fs-type') - cmd.append(osd_format) - if reformat_osd: - cmd.append('--zap-disk') - cmd.append(dev) - if osd_journal: - least_used = find_least_used_journal(osd_journal) - cmd.append(least_used) - else: - # Just provide the device - no other options - # for older versions of ceph - cmd.append(dev) - if reformat_osd: - zap_disk(dev) - - try: - log("osdize cmd: {}".format(cmd)) - subprocess.check_call(cmd) - except subprocess.CalledProcessError as e: - if ignore_errors: - log('Unable to initialize device: {}'.format(dev), WARNING) - else: - log('Unable to initialize device: {}'.format(dev), ERROR) - raise e - - -def osdize_dir(path, encrypt=False): - if os.path.exists(os.path.join(path, 'upstart')): - log('Path {} is already configured as an OSD - bailing'.format(path)) - return - - if cmp_pkgrevno('ceph', "0.56.6") < 0: - log('Unable to use directories for OSDs with ceph < 0.56.6', - level=ERROR) - raise - - mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) - chownr('/var/lib/ceph', ceph_user(), ceph_user()) - cmd = [ - 'sudo', '-u', ceph_user(), - 'ceph-disk', - 'prepare', - '--data-dir', - path - ] - if cmp_pkgrevno('ceph', '0.60') >= 0: - if encrypt: - cmd.append('--dmcrypt') - log("osdize dir cmd: {}".format(cmd)) - subprocess.check_call(cmd) - - -def filesystem_mounted(fs): - return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 - - -def get_running_osds(): - """Returns a list of the pids of the current running OSD daemons""" - cmd = ['pgrep', 'ceph-osd'] - try: - result = subprocess.check_output(cmd) - return result.split() - except subprocess.CalledProcessError: - return [] diff --git a/ceph-osd/lib/ceph/ceph/ceph_broker.py b/ceph-osd/lib/ceph/ceph_broker.py similarity index 99% rename from ceph-osd/lib/ceph/ceph/ceph_broker.py rename to ceph-osd/lib/ceph/ceph_broker.py index da6c3424..d55e570b 100644 --- a/ceph-osd/lib/ceph/ceph/ceph_broker.py +++ b/ceph-osd/lib/ceph/ceph_broker.py @@ -22,7 +22,7 @@ INFO, ERROR, ) -from charmhelpers.contrib.storage.linux.ceph import ( +from charmhelpers.contrib.storage.linux.ceph import ( create_erasure_profile, delete_pool, erasure_profile_exists, @@ -191,7 +191,7 @@ def handle_replicated_pool(request, service): replicas = request.get('replicas') quota = request.get('max-bytes') weight = request.get('weight') - + # Optional params pg_num = request.get('pg_num') if pg_num: diff --git a/ceph-osd/unit_tests/test_replace_osd.py b/ceph-osd/unit_tests/test_replace_osd.py index 53109403..ce919382 100644 --- a/ceph-osd/unit_tests/test_replace_osd.py +++ b/ceph-osd/unit_tests/test_replace_osd.py @@ -18,7 +18,7 @@ from mock import call, Mock, patch import test_utils -from ceph.ceph import ceph +import ceph import replace_osd TO_PATCH = [ @@ -73,13 +73,13 @@ def test_umount(self): ]) assert ret == 0 - @patch('ceph.ceph.ceph.mounts') - @patch('ceph.ceph.ceph.subprocess') - @patch('ceph.ceph.ceph.umount') - @patch('ceph.ceph.ceph.osdize') - @patch('ceph.ceph.ceph.shutil') - @patch('ceph.ceph.ceph.systemd') - @patch('ceph.ceph.ceph.ceph_user') + @patch('ceph.mounts') + @patch('ceph.subprocess') + @patch('ceph.umount') + @patch('ceph.osdize') + @patch('ceph.shutil') + @patch('ceph.systemd') + @patch('ceph.ceph_user') def test_replace_osd(self, ceph_user, systemd, diff --git a/ceph-osd/unit_tests/test_tuning.py b/ceph-osd/unit_tests/test_tuning.py index 84358e53..61a69443 100644 --- a/ceph-osd/unit_tests/test_tuning.py +++ b/ceph-osd/unit_tests/test_tuning.py @@ -1,7 +1,7 @@ __author__ = 'Chris Holcombe ' from mock import patch, call import test_utils -from ceph.ceph import ceph +import ceph TO_PATCH = [ 'hookenv', @@ -16,8 +16,8 @@ def setUp(self): super(PerformanceTestCase, self).setUp(ceph, TO_PATCH) def test_tune_nic(self): - with patch('ceph.ceph.ceph.get_link_speed', return_value=10000): - with patch('ceph.ceph.ceph.save_sysctls') as save_sysctls: + with patch('ceph.get_link_speed', return_value=10000): + with patch('ceph.save_sysctls') as save_sysctls: ceph.tune_nic('eth0') save_sysctls.assert_has_calls( [ @@ -49,12 +49,12 @@ def test_get_block_uuid(self): uuid = ceph.get_block_uuid('/dev/sda1') self.assertEqual(uuid, '378f3c86-b21a-4172-832d-e2b3d4bc7511') - @patch('ceph.ceph.ceph.persist_settings') - @patch('ceph.ceph.ceph.set_hdd_read_ahead') - @patch('ceph.ceph.ceph.get_max_sectors_kb') - @patch('ceph.ceph.ceph.get_max_hw_sectors_kb') - @patch('ceph.ceph.ceph.set_max_sectors_kb') - @patch('ceph.ceph.ceph.get_block_uuid') + @patch('ceph.persist_settings') + @patch('ceph.set_hdd_read_ahead') + @patch('ceph.get_max_sectors_kb') + @patch('ceph.get_max_hw_sectors_kb') + @patch('ceph.set_max_sectors_kb') + @patch('ceph.get_block_uuid') def test_tune_dev(self, block_uuid, set_max_sectors_kb, @@ -84,12 +84,12 @@ def test_tune_dev(self, call('maintenance', 'Finished tuning device /dev/sda') ]) - @patch('ceph.ceph.ceph.persist_settings') - @patch('ceph.ceph.ceph.set_hdd_read_ahead') - @patch('ceph.ceph.ceph.get_max_sectors_kb') - @patch('ceph.ceph.ceph.get_max_hw_sectors_kb') - @patch('ceph.ceph.ceph.set_max_sectors_kb') - @patch('ceph.ceph.ceph.get_block_uuid') + @patch('ceph.persist_settings') + @patch('ceph.set_hdd_read_ahead') + @patch('ceph.get_max_sectors_kb') + @patch('ceph.get_max_hw_sectors_kb') + @patch('ceph.set_max_sectors_kb') + @patch('ceph.get_block_uuid') def test_tune_dev_2(self, block_uuid, set_max_sectors_kb, diff --git a/ceph-osd/unit_tests/test_upgrade_roll.py b/ceph-osd/unit_tests/test_upgrade_roll.py index 7fca5918..a3a6f260 100644 --- a/ceph-osd/unit_tests/test_upgrade_roll.py +++ b/ceph-osd/unit_tests/test_upgrade_roll.py @@ -16,7 +16,7 @@ from mock import patch, call, MagicMock -from ceph.ceph.ceph import CrushLocation +from ceph import CrushLocation import test_utils import ceph_hooks From bd49b765562114a82e1ce71d53ad410aefab1c22 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Wed, 10 Aug 2016 10:58:21 -0700 Subject: [PATCH 1186/2699] Allow multiple rolling upgrades The rolling upgrade code sets keys in the ceph monitor cluster to discover whether it can upgrade itself. This patch addresses an issue where the upgrade code was not taking into account multiple upgrades to newer ceph versions in a row. Closes-Bug: 1611719 Change-Id: Icc7c2f48517b9b4bc7d6526e2c89756341296054 --- ceph-mon/hooks/ceph_hooks.py | 31 +++++++++++++++--------- ceph-mon/unit_tests/test_upgrade_roll.py | 25 +++++++++++-------- 2 files changed, 34 insertions(+), 22 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 45df5bdb..112edda3 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -137,11 +137,14 @@ def check_for_upgrade(): pretty_print_upgrade_paths())) -def lock_and_roll(my_name): +def lock_and_roll(my_name, version): start_timestamp = time.time() - log('monitor_key_set {}_start {}'.format(my_name, start_timestamp)) - monitor_key_set('admin', "{}_start".format(my_name), start_timestamp) + log('monitor_key_set {}_{}_start {}'.format(my_name, + version, + start_timestamp)) + monitor_key_set('admin', "{}_{}_start".format(my_name, version), + start_timestamp) log("Rolling") # This should be quick upgrade_monitor() @@ -149,16 +152,19 @@ def lock_and_roll(my_name): stop_timestamp = time.time() # Set a key to inform others I am finished - log('monitor_key_set {}_done {}'.format(my_name, stop_timestamp)) - monitor_key_set('admin', "{}_done".format(my_name), stop_timestamp) + log('monitor_key_set {}_{}_done {}'.format(my_name, + version, + stop_timestamp)) + monitor_key_set('admin', "{}_{}_done".format(my_name, version), + stop_timestamp) -def wait_on_previous_node(previous_node): +def wait_on_previous_node(previous_node, version): log("Previous node is: {}".format(previous_node)) previous_node_finished = monitor_key_exists( 'admin', - "{}_done".format(previous_node)) + "{}_{}_done".format(previous_node, version)) while previous_node_finished is False: log("{} is not finished. Waiting".format(previous_node)) @@ -172,7 +178,7 @@ def wait_on_previous_node(previous_node): current_timestamp = time.time() previous_node_start_time = monitor_key_get( 'admin', - "{}_start".format(previous_node)) + "{}_{}_start".format(previous_node, version)) if (current_timestamp - (10 * 60)) > previous_node_start_time: # Previous node is probably dead. Lets move on if previous_node_start_time is not None: @@ -191,7 +197,7 @@ def wait_on_previous_node(previous_node): time.sleep(wait_time) previous_node_finished = monitor_key_exists( 'admin', - "{}_done".format(previous_node)) + "{}_{}_done".format(previous_node, version)) # Edge cases: @@ -224,14 +230,15 @@ def roll_monitor_cluster(new_version): if position == 0: # I'm first! Roll # First set a key to inform others I'm about to roll - lock_and_roll(my_name=my_name) + lock_and_roll(my_name=my_name, version=new_version) else: # Check if the previous node has finished status_set('blocked', 'Waiting on {} to finish upgrading'.format( mon_sorted_list[position - 1])) - wait_on_previous_node(previous_node=mon_sorted_list[position - 1]) - lock_and_roll(my_name=my_name) + wait_on_previous_node(previous_node=mon_sorted_list[position - 1], + version=new_version) + lock_and_roll(my_name=my_name, version=new_version) except ValueError: log("Failed to find {} in list {}.".format( my_name, mon_sorted_list)) diff --git a/ceph-mon/unit_tests/test_upgrade_roll.py b/ceph-mon/unit_tests/test_upgrade_roll.py index b8edcbd7..3960a07b 100644 --- a/ceph-mon/unit_tests/test_upgrade_roll.py +++ b/ceph-mon/unit_tests/test_upgrade_roll.py @@ -26,8 +26,10 @@ mock_apt.apt_pkg = MagicMock() with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: - mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: - lambda *args, **kwargs: f(*args, **kwargs)) + mock_dec.side_effect = ( + lambda *dargs, + **dkwargs: lambda f: lambda *args, + **kwargs: f(*args, **kwargs)) import ceph_hooks TO_PATCH = [ @@ -60,10 +62,10 @@ def config_side_effect(*args): def monitor_key_side_effect(*args): if args[1] == \ - 'ip-192-168-1-2_done': + 'ip-192-168-1-2_0.94.1_done': return False elif args[1] == \ - 'ip-192-168-1-2_start': + 'ip-192-168-1-2_0.94.1_start': # Return that the previous node started 9 minutes ago return previous_node_start_time @@ -89,7 +91,8 @@ def test_check_for_upgrade(self, roll_monitor_cluster): @patch('ceph_hooks.monitor_key_set') def test_lock_and_roll(self, monitor_key_set, upgrade_monitor): monitor_key_set.monitor_key_set.return_value = None - ceph_hooks.lock_and_roll(my_name='ip-192-168-1-2') + ceph_hooks.lock_and_roll(my_name='ip-192-168-1-2', + version='0.94.1') upgrade_monitor.assert_called_once_with() def test_upgrade_monitor(self): @@ -119,6 +122,7 @@ def test_roll_monitor_cluster_second(self, get_mon_map, wait_on_previous_node, lock_and_roll): + self.ceph.get_version.return_value = "0.94.1" wait_on_previous_node.return_value = None socket.gethostname.return_value = "ip-192-168-1-3" get_mon_map.return_value = { @@ -133,11 +137,12 @@ def test_roll_monitor_cluster_second(self, ] } } - ceph_hooks.roll_monitor_cluster('0.94.1') + ceph_hooks.roll_monitor_cluster(new_version='0.94.1') self.status_set.assert_called_with( 'blocked', 'Waiting on ip-192-168-1-2 to finish upgrading') - lock_and_roll.assert_called_with(my_name="ip-192-168-1-3") + lock_and_roll.assert_called_with(my_name="ip-192-168-1-3", + version='0.94.1') @patch.object(ceph_hooks, 'time') @patch('ceph_hooks.monitor_key_get') @@ -154,15 +159,15 @@ def fake_time(): monitor_key_get.side_effect = monitor_key_side_effect monitor_key_exists.return_value = False - ceph_hooks.wait_on_previous_node("ip-192-168-1-2") + ceph_hooks.wait_on_previous_node("ip-192-168-1-2", version='0.94.1') # Make sure we checked to see if the previous node started monitor_key_get.assert_has_calls( - [call('admin', 'ip-192-168-1-2_start')] + [call('admin', 'ip-192-168-1-2_0.94.1_start')] ) # Make sure we checked to see if the previous node was finished monitor_key_exists.assert_has_calls( - [call('admin', 'ip-192-168-1-2_done')] + [call('admin', 'ip-192-168-1-2_0.94.1_done')] ) # Make sure we waited at last once before proceeding self.log.assert_has_calls( From e2dfc88e00df79e69da00e7e99077d46fddedc3d Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Wed, 24 Aug 2016 11:42:01 -0700 Subject: [PATCH 1187/2699] Add Admin Relation OpenAttic is a manager for Ceph and requires access to the cephx admin keyring. This relation can be used by any Ceph management software that needs the admin key. Change-Id: Ied73e775bdf58f226f9b7ffcc6353ed1be3ec245 --- ceph-mon/hooks/admin-relation-changed | 1 + ceph-mon/hooks/admin-relation-joined | 1 + ceph-mon/hooks/ceph_hooks.py | 18 ++++++++++++++++++ ceph-mon/lib/ceph/__init__.py | 2 +- ceph-mon/metadata.yaml | 2 ++ 5 files changed, 23 insertions(+), 1 deletion(-) create mode 120000 ceph-mon/hooks/admin-relation-changed create mode 120000 ceph-mon/hooks/admin-relation-joined diff --git a/ceph-mon/hooks/admin-relation-changed b/ceph-mon/hooks/admin-relation-changed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/admin-relation-changed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/admin-relation-joined b/ceph-mon/hooks/admin-relation-joined new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/admin-relation-joined @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index d78d93b8..b2fe15f5 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -488,6 +488,8 @@ def notify_radosgws(): def notify_client(): for relid in relation_ids('client'): client_relation_joined(relid) + for relid in relation_ids('admin'): + admin_relation_joined(relid) def upgrade_keys(): @@ -576,6 +578,22 @@ def radosgw_relation(relid=None, unit=None): log('mon cluster not in quorum or no osds - deferring key provision') +@hooks.hook('admin-relation-changed') +@hooks.hook('admin-relation-joined') +def admin_relation_joined(relid=None): + if ceph.is_quorum(): + log('mon cluster in quorum - providing client with keys') + data = {'key': ceph.get_named_key(name='admin', caps=ceph.admin_caps), + 'fsid': leader_get('fsid'), + 'auth': config('auth-supported'), + 'mon_hosts': " ".join(get_mon_hosts()) + } + relation_set(relation_id=relid, + relation_settings=data) + else: + log('mon cluster not in quorum - deferring key provision') + + @hooks.hook('client-relation-joined') def client_relation_joined(relid=None): if ceph.is_quorum(): diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index 4b68e039..0b31ddaf 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -931,7 +931,7 @@ def get_radosgw_key(): } admin_caps = { - 'mds': ['allow'], + 'mds': ['allow *'], 'mon': ['allow *'], 'osd': ['allow *'] } diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 9c3969dd..3e7bd1e1 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -19,6 +19,8 @@ provides: nrpe-external-master: interface: nrpe-external-master scope: container + admin: + interface: ceph-admin client: interface: ceph-client osd: From b0f0c5ab748bacb7d26440c66270ff461cf7a344 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Thu, 1 Sep 2016 08:34:21 -0700 Subject: [PATCH 1188/2699] Allow multiple rolling upgrades The rolling upgrade code sets keys in the ceph osd cluster to discover whether it can upgrade itself. This patch addresses an issue where the upgrade code was not taking into account multiple upgrades to newer ceph versions in a row. Closes-Bug: 1611719 Change-Id: I467d95f3619b9ad2a9f4f46abee4e02b5d9703da --- ceph-osd/hooks/ceph_hooks.py | 32 +++++++++++++++--------- ceph-osd/unit_tests/test_upgrade_roll.py | 20 +++++++++------ 2 files changed, 32 insertions(+), 20 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 28b83cb9..88cdf7c5 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -126,11 +126,15 @@ def check_for_upgrade(): pretty_print_upgrade_paths())) -def lock_and_roll(my_name): +def lock_and_roll(my_name, version): start_timestamp = time.time() - log('monitor_key_set {}_start {}'.format(my_name, start_timestamp)) - monitor_key_set('osd-upgrade', "{}_start".format(my_name), start_timestamp) + log('monitor_key_set {}_start {}'.format( + my_name, + version, + start_timestamp)) + monitor_key_set('osd-upgrade', "{}_{}_start".format(my_name, version), + start_timestamp) log("Rolling") # This should be quick upgrade_osd() @@ -138,16 +142,19 @@ def lock_and_roll(my_name): stop_timestamp = time.time() # Set a key to inform others I am finished - log('monitor_key_set {}_done {}'.format(my_name, stop_timestamp)) - monitor_key_set('osd-upgrade', "{}_done".format(my_name), stop_timestamp) + log('monitor_key_set {}_{}_done {}'.format(my_name, + version, + stop_timestamp)) + monitor_key_set('osd-upgrade', "{}_{}_done".format(my_name, version), + stop_timestamp) -def wait_on_previous_node(previous_node): +def wait_on_previous_node(previous_node, version): log("Previous node is: {}".format(previous_node)) previous_node_finished = monitor_key_exists( 'osd-upgrade', - "{}_done".format(previous_node)) + "{}_{}_done".format(previous_node, version)) while previous_node_finished is False: log("{} is not finished. Waiting".format(previous_node)) @@ -161,7 +168,7 @@ def wait_on_previous_node(previous_node): current_timestamp = time.time() previous_node_start_time = monitor_key_get( 'osd-upgrade', - "{}_start".format(previous_node)) + "{}_{}_start".format(previous_node, version)) if (current_timestamp - (10 * 60)) > previous_node_start_time: # Previous node is probably dead. Lets move on if previous_node_start_time is not None: @@ -180,7 +187,7 @@ def wait_on_previous_node(previous_node): time.sleep(wait_time) previous_node_finished = monitor_key_exists( 'osd-upgrade', - "{}_done".format(previous_node)) + "{}_{}_done".format(previous_node, version)) def get_upgrade_position(osd_sorted_list, match_name): @@ -222,15 +229,16 @@ def roll_osd_cluster(new_version): if position == 0: # I'm first! Roll # First set a key to inform others I'm about to roll - lock_and_roll(my_name=my_name) + lock_and_roll(my_name=my_name, version=new_version) else: # Check if the previous node has finished status_set('blocked', 'Waiting on {} to finish upgrading'.format( osd_sorted_list[position - 1].name)) wait_on_previous_node( - previous_node=osd_sorted_list[position - 1].name) - lock_and_roll(my_name=my_name) + previous_node=osd_sorted_list[position - 1].name, + version=new_version) + lock_and_roll(my_name=my_name, version=new_version) except ValueError: log("Failed to find name {} in list {}".format( my_name, osd_sorted_list)) diff --git a/ceph-osd/unit_tests/test_upgrade_roll.py b/ceph-osd/unit_tests/test_upgrade_roll.py index a3a6f260..c88ea6e1 100644 --- a/ceph-osd/unit_tests/test_upgrade_roll.py +++ b/ceph-osd/unit_tests/test_upgrade_roll.py @@ -53,10 +53,10 @@ def config_side_effect(*args): def monitor_key_side_effect(*args): if args[1] == \ - 'ip-192-168-1-2_done': + 'ip-192-168-1-2_0.94.1_done': return False elif args[1] == \ - 'ip-192-168-1-2_start': + 'ip-192-168-1-2_0.94.1_start': # Return that the previous node started 9 minutes ago return previous_node_start_time @@ -82,7 +82,8 @@ def test_check_for_upgrade(self, roll_osd_cluster): @patch('ceph_hooks.monitor_key_set') def test_lock_and_roll(self, monitor_key_set, upgrade_osd): monitor_key_set.monitor_key_set.return_value = None - ceph_hooks.lock_and_roll(my_name='ip-192-168-1-2') + ceph_hooks.lock_and_roll(my_name='ip-192-168-1-2', + version='0.94.1') upgrade_osd.assert_called_once_with() def test_upgrade_osd(self): @@ -111,7 +112,8 @@ def test_roll_osd_cluster_first(self, self.ceph.get_osd_tree.return_value = "" get_upgrade_position.return_value = 0 ceph_hooks.roll_osd_cluster('0.94.1') - lock_and_roll.assert_called_with(my_name="ip-192-168-1-2") + lock_and_roll.assert_called_with(my_name="ip-192-168-1-2", + version="0.94.1") @patch('ceph_hooks.lock_and_roll') @patch('ceph_hooks.get_upgrade_position') @@ -147,7 +149,8 @@ def test_roll_osd_cluster_second(self, self.status_set.assert_called_with( 'blocked', 'Waiting on ip-192-168-1-2 to finish upgrading') - lock_and_roll.assert_called_with(my_name="ip-192-168-1-3") + lock_and_roll.assert_called_with(my_name="ip-192-168-1-3", + version="0.94.1") @patch('time.time', lambda *args: previous_node_start_time + 10 * 60 + 1) @patch('ceph_hooks.monitor_key_get') @@ -158,15 +161,16 @@ def test_wait_on_previous_node(self, monitor_key_get.side_effect = monitor_key_side_effect monitor_key_exists.return_value = False - ceph_hooks.wait_on_previous_node("ip-192-168-1-2") + ceph_hooks.wait_on_previous_node(previous_node="ip-192-168-1-2", + version='0.94.1') # Make sure we checked to see if the previous node started monitor_key_get.assert_has_calls( - [call('osd-upgrade', 'ip-192-168-1-2_start')] + [call('osd-upgrade', 'ip-192-168-1-2_0.94.1_start')] ) # Make sure we checked to see if the previous node was finished monitor_key_exists.assert_has_calls( - [call('osd-upgrade', 'ip-192-168-1-2_done')] + [call('osd-upgrade', 'ip-192-168-1-2_0.94.1_done')] ) # Make sure we waited at last once before proceeding self.log.assert_has_calls( From c8e3601d06459a9f1cc6dd8a808367f0ae80180e Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 6 Sep 2016 22:07:23 +0000 Subject: [PATCH 1189/2699] Update tox.ini files from release-tools gold copy All OpenStack Charms now contain identical tox.ini files, not to be modified or made unique within each charm repo. This is to ensure consistency across charm repos in tox target naming, approach and purpose, also giving the charm dev and test experience additional consistency. Change-Id: I2b5bb5a0b101609ca3c1e26abf3ecbe5b774d57f --- ceph-osd/tox.ini | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index b73c6444..491ccf15 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -1,3 +1,6 @@ +# Classic charm: ./tox.ini +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. [tox] envlist = pep8,py27 skipsdist = True @@ -6,10 +9,11 @@ skipsdist = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 AMULET_SETUP_TIMEOUT=2700 -passenv = HOME TERM AMULET_HTTP_PROXY AMULET_OS_VIP install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} +whitelist_externals = juju +passenv = HOME TERM AMULET_* [testenv:py27] basepython = python2.7 @@ -20,7 +24,7 @@ deps = -r{toxinidir}/requirements.txt basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} --exclude */charmhelpers hooks unit_tests tests actions +commands = flake8 {posargs} hooks unit_tests tests actions charm-proof [testenv:venv] @@ -72,4 +76,4 @@ commands = [flake8] ignore = E402,E226 -exclude = hooks/charmhelpers +exclude = */charmhelpers From 7d0ca12d95bff3eca0b4663e841b9c0d0c457aa2 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 1 Sep 2016 09:52:21 -0400 Subject: [PATCH 1190/2699] Fix issue with partition boundaries. os.rename does not support moving files over partition boundaries. The charm fails to deploy when /var is on a separate partition. Using shutil instead. Closes-Bug: 1618034 Change-Id: Ifd45425a623fe55fff33299134af5741f587604d --- ceph-osd/hooks/ceph_hooks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 28b83cb9..40fd78ed 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import os +import shutil import random import subprocess import sys @@ -403,7 +404,7 @@ def write_zapped_journals(journal_devs): log("write zapped: {}".format(journal_devs), level=DEBUG) zapfile.write('\n'.join(sorted(list(journal_devs)))) - os.rename(tmpfile, JOURNAL_ZAPPED) + shutil.move(tmpfile, JOURNAL_ZAPPED) def check_overlap(journaldevs, datadevs): From 37fb5b640c1b58325d364039cf5853de780598fe Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 8 Sep 2016 22:09:24 +0000 Subject: [PATCH 1191/2699] Update tox.ini files from release-tools gold copy All OpenStack Charms now contain identical tox.ini files, not to be modified or made unique within each charm repo. This is to ensure consistency across charm repos in tox target naming, approach and purpose, also giving the charm dev and test experience additional consistency. Also create empty dirs with .keep files where necessary. Some classic charms have actions and/or lib dirs, and some do not. In all classic charms, flake will now check those dirs to ensure lint coverage of existing or future content. Change-Id: I345e0b5f365bde8b7331a9dab53deb6984ec96cd --- ceph-mon/tox.ini | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 16d4fe94..d8d8d038 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -1,3 +1,6 @@ +# Classic charm: ./tox.ini +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. [tox] envlist = pep8,py27 skipsdist = True @@ -5,12 +8,13 @@ skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + CHARM_DIR={envdir} AMULET_SETUP_TIMEOUT=2700 -passenv = HOME TERM AMULET_* install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} -sitepackages = False +whitelist_externals = juju +passenv = HOME TERM AMULET_* [testenv:py27] basepython = python2.7 @@ -21,7 +25,7 @@ deps = -r{toxinidir}/requirements.txt basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} actions hooks unit_tests tests +commands = flake8 {posargs} hooks unit_tests tests actions lib charm-proof [testenv:venv] @@ -53,6 +57,15 @@ deps = -r{toxinidir}/requirements.txt commands = bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy +[testenv:func27-dfs] +# Charm Functional Test +# Run all deploy-from-source tests which are +x (may not always pass!) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy + [testenv:func27-dev] # Charm Functional Test # Run all development test targets which are +x (may not always pass!) @@ -64,4 +77,4 @@ commands = [flake8] ignore = E402,E226 -exclude = hooks/charmhelpers +exclude = */charmhelpers From 8ba26478eafda524e7923d7bad7f2368d69ad091 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 8 Sep 2016 22:09:26 +0000 Subject: [PATCH 1192/2699] Update tox.ini files from release-tools gold copy All OpenStack Charms now contain identical tox.ini files, not to be modified or made unique within each charm repo. This is to ensure consistency across charm repos in tox target naming, approach and purpose, also giving the charm dev and test experience additional consistency. Also create empty dirs with .keep files where necessary. Some classic charms have actions and/or lib dirs, and some do not. In all classic charms, flake will now check those dirs to ensure lint coverage of existing or future content. Manually fix up test names, Makefile and other legacy artifacts remaining from basic ceph-proxy on an old copy of ceph-mon. Change-Id: I54eb35d0e449f203237bdc9f065551a1fbc887a0 --- ceph-proxy/Makefile | 7 +-- ceph-proxy/lib/.keep | 3 + ceph-proxy/test-requirements.txt | 16 ++++++ ceph-proxy/tests/.gitkeep | 0 ...e-icehouse => gate-basic-precise-icehouse} | 0 ...ty-icehouse => gate-basic-trusty-icehouse} | 0 ...sic-trusty-juno => gate-basic-trusty-juno} | 0 ...sic-trusty-kilo => gate-basic-trusty-kilo} | 0 ...usty-liberty => gate-basic-trusty-liberty} | 0 ...trusty-mitaka => gate-basic-trusty-mitaka} | 0 ...c-wily-liberty => gate-basic-wily-liberty} | 0 ...xenial-mitaka => gate-basic-xenial-mitaka} | 0 ceph-proxy/tests/setup/00-setup | 17 ------ ceph-proxy/tests/tests.yaml | 39 ++++++------- ceph-proxy/tox.ini | 56 ++++++++++++++++++- 15 files changed, 90 insertions(+), 48 deletions(-) create mode 100644 ceph-proxy/lib/.keep delete mode 100644 ceph-proxy/tests/.gitkeep rename ceph-proxy/tests/{014-basic-precise-icehouse => gate-basic-precise-icehouse} (100%) rename ceph-proxy/tests/{015-basic-trusty-icehouse => gate-basic-trusty-icehouse} (100%) rename ceph-proxy/tests/{016-basic-trusty-juno => gate-basic-trusty-juno} (100%) rename ceph-proxy/tests/{017-basic-trusty-kilo => gate-basic-trusty-kilo} (100%) rename ceph-proxy/tests/{018-basic-trusty-liberty => gate-basic-trusty-liberty} (100%) rename ceph-proxy/tests/{019-basic-trusty-mitaka => gate-basic-trusty-mitaka} (100%) rename ceph-proxy/tests/{020-basic-wily-liberty => gate-basic-wily-liberty} (100%) rename ceph-proxy/tests/{021-basic-xenial-mitaka => gate-basic-xenial-mitaka} (100%) delete mode 100755 ceph-proxy/tests/setup/00-setup diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index c6109cc8..f63dfd0d 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -10,8 +10,7 @@ test: functional_test: @echo Starting Amulet tests... - @tests/setup/00-setup - @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 + @tox -e func27 bin/charm_helpers_sync.py: @mkdir -p bin @@ -21,7 +20,3 @@ bin/charm_helpers_sync.py: sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml - -publish: lint - bzr push lp:charms/ceph-osd - bzr push lp:charms/trusty/ceph-osd diff --git a/ceph-proxy/lib/.keep b/ceph-proxy/lib/.keep new file mode 100644 index 00000000..f49b91ae --- /dev/null +++ b/ceph-proxy/lib/.keep @@ -0,0 +1,3 @@ + This file was created by release-tools to ensure that this empty + directory is preserved in vcs re: lint check definitions in global + tox.ini files. This file can be removed if/when this dir is actually in use. diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 4faf2545..74baa120 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -7,3 +7,19 @@ flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 charm-tools>=2.0.0 requests==2.6.0 +# BEGIN: Amulet OpenStack Charm Helper Requirements +# Liberty client lower constraints +amulet>=1.14.3,<2.0 +bundletester>=0.6.1,<1.0 +python-ceilometerclient>=1.5.0,<2.0 +python-cinderclient>=1.4.0,<2.0 +python-glanceclient>=1.1.0,<2.0 +python-heatclient>=0.8.0,<1.0 +python-keystoneclient>=1.7.1,<2.0 +python-neutronclient>=3.1.0,<4.0 +python-novaclient>=2.30.1,<3.0 +python-openstackclient>=1.7.0,<2.0 +python-swiftclient>=2.6.0,<3.0 +pika>=0.10.0,<1.0 +distro-info +# END: Amulet OpenStack Charm Helper Requirements diff --git a/ceph-proxy/tests/.gitkeep b/ceph-proxy/tests/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-proxy/tests/014-basic-precise-icehouse b/ceph-proxy/tests/gate-basic-precise-icehouse similarity index 100% rename from ceph-proxy/tests/014-basic-precise-icehouse rename to ceph-proxy/tests/gate-basic-precise-icehouse diff --git a/ceph-proxy/tests/015-basic-trusty-icehouse b/ceph-proxy/tests/gate-basic-trusty-icehouse similarity index 100% rename from ceph-proxy/tests/015-basic-trusty-icehouse rename to ceph-proxy/tests/gate-basic-trusty-icehouse diff --git a/ceph-proxy/tests/016-basic-trusty-juno b/ceph-proxy/tests/gate-basic-trusty-juno similarity index 100% rename from ceph-proxy/tests/016-basic-trusty-juno rename to ceph-proxy/tests/gate-basic-trusty-juno diff --git a/ceph-proxy/tests/017-basic-trusty-kilo b/ceph-proxy/tests/gate-basic-trusty-kilo similarity index 100% rename from ceph-proxy/tests/017-basic-trusty-kilo rename to ceph-proxy/tests/gate-basic-trusty-kilo diff --git a/ceph-proxy/tests/018-basic-trusty-liberty b/ceph-proxy/tests/gate-basic-trusty-liberty similarity index 100% rename from ceph-proxy/tests/018-basic-trusty-liberty rename to ceph-proxy/tests/gate-basic-trusty-liberty diff --git a/ceph-proxy/tests/019-basic-trusty-mitaka b/ceph-proxy/tests/gate-basic-trusty-mitaka similarity index 100% rename from ceph-proxy/tests/019-basic-trusty-mitaka rename to ceph-proxy/tests/gate-basic-trusty-mitaka diff --git a/ceph-proxy/tests/020-basic-wily-liberty b/ceph-proxy/tests/gate-basic-wily-liberty similarity index 100% rename from ceph-proxy/tests/020-basic-wily-liberty rename to ceph-proxy/tests/gate-basic-wily-liberty diff --git a/ceph-proxy/tests/021-basic-xenial-mitaka b/ceph-proxy/tests/gate-basic-xenial-mitaka similarity index 100% rename from ceph-proxy/tests/021-basic-xenial-mitaka rename to ceph-proxy/tests/gate-basic-xenial-mitaka diff --git a/ceph-proxy/tests/setup/00-setup b/ceph-proxy/tests/setup/00-setup deleted file mode 100755 index 94e5611f..00000000 --- a/ceph-proxy/tests/setup/00-setup +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -set -ex - -sudo add-apt-repository --yes ppa:juju/stable -sudo apt-get update --yes -sudo apt-get install --yes amulet \ - distro-info-data \ - python-cinderclient \ - python-distro-info \ - python-glanceclient \ - python-heatclient \ - python-keystoneclient \ - python-neutronclient \ - python-novaclient \ - python-pika \ - python-swiftclient diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 49e721b3..e3185c6d 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -1,22 +1,17 @@ -bootstrap: true -reset: false -virtualenv: true -makefile: - - lint - - test -sources: - - ppa:juju/stable -packages: - - amulet - - distro-info-data - - python-ceilometerclient - - python-cinderclient - - python-distro-info - - python-glanceclient - - python-heatclient - - python-keystoneclient - - python-neutronclient - - python-novaclient - - python-pika - - python-swiftclient - - python-nose \ No newline at end of file +# Bootstrap the model if necessary. +bootstrap: True +# Re-use bootstrap node instead of destroying/re-bootstrapping. +reset: True +# Use tox/requirements to drive the venv instead of bundletester's venv feature. +virtualenv: False +# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet. +makefile: [] +# Do not specify juju PPA sources. Juju is presumed to be pre-installed +# and configured in all test runner environments. +#sources: +# Do not specify or rely on system packages. +#packages: +# Do not specify python packages here. Use test-requirements.txt +# and tox instead. ie. The venv is constructed before bundletester +# is invoked. +#python-packages: diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 9c02ada3..d8d8d038 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -1,3 +1,6 @@ +# Classic charm: ./tox.ini +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. [tox] envlist = pep8,py27 skipsdist = True @@ -5,10 +8,13 @@ skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + CHARM_DIR={envdir} + AMULET_SETUP_TIMEOUT=2700 install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} -sitepackages = False +whitelist_externals = juju +passenv = HOME TERM AMULET_* [testenv:py27] basepython = python2.7 @@ -19,12 +25,56 @@ deps = -r{toxinidir}/requirements.txt basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} actions hooks unit_tests tests +commands = flake8 {posargs} hooks unit_tests tests actions lib charm-proof [testenv:venv] commands = {posargs} +[testenv:func27-noop] +# DRY RUN - For Debug +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy + +[testenv:func27] +# Charm Functional Test +# Run all gate tests which are +x (expected to always pass) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy + +[testenv:func27-smoke] +# Charm Functional Test +# Run a specific test as an Amulet smoke test (expected to always pass) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy + +[testenv:func27-dfs] +# Charm Functional Test +# Run all deploy-from-source tests which are +x (may not always pass!) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy + +[testenv:func27-dev] +# Charm Functional Test +# Run all development test targets which are +x (may not always pass!) +basepython = python2.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy + [flake8] ignore = E402,E226 -exclude = hooks/charmhelpers +exclude = */charmhelpers From 6c371ce4ba896e0151b6d836206ceecc57f6df65 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 8 Sep 2016 22:09:27 +0000 Subject: [PATCH 1193/2699] Update tox.ini files from release-tools gold copy All OpenStack Charms now contain identical tox.ini files, not to be modified or made unique within each charm repo. This is to ensure consistency across charm repos in tox target naming, approach and purpose, also giving the charm dev and test experience additional consistency. Also create empty dirs with .keep files where necessary. Some classic charms have actions and/or lib dirs, and some do not. In all classic charms, flake will now check those dirs to ensure lint coverage of existing or future content. Change-Id: I23cfd47fcee90678b4ba818ddc1d9ace4edd54f3 --- ceph-radosgw/lib/.keep | 3 +++ ceph-radosgw/tox.ini | 11 ++++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) create mode 100644 ceph-radosgw/lib/.keep diff --git a/ceph-radosgw/lib/.keep b/ceph-radosgw/lib/.keep new file mode 100644 index 00000000..f49b91ae --- /dev/null +++ b/ceph-radosgw/lib/.keep @@ -0,0 +1,3 @@ + This file was created by release-tools to ensure that this empty + directory is preserved in vcs re: lint check definitions in global + tox.ini files. This file can be removed if/when this dir is actually in use. diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index b73c6444..d8d8d038 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -1,3 +1,6 @@ +# Classic charm: ./tox.ini +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. [tox] envlist = pep8,py27 skipsdist = True @@ -5,11 +8,13 @@ skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + CHARM_DIR={envdir} AMULET_SETUP_TIMEOUT=2700 -passenv = HOME TERM AMULET_HTTP_PROXY AMULET_OS_VIP install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} +whitelist_externals = juju +passenv = HOME TERM AMULET_* [testenv:py27] basepython = python2.7 @@ -20,7 +25,7 @@ deps = -r{toxinidir}/requirements.txt basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} --exclude */charmhelpers hooks unit_tests tests actions +commands = flake8 {posargs} hooks unit_tests tests actions lib charm-proof [testenv:venv] @@ -72,4 +77,4 @@ commands = [flake8] ignore = E402,E226 -exclude = hooks/charmhelpers +exclude = */charmhelpers From 985e4b97b9a11bcf8ccd55d7bafaf82a6a904dff Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 9 Sep 2016 19:42:53 +0000 Subject: [PATCH 1194/2699] Update tox.ini files from release-tools gold copy All OpenStack Charms now contain identical tox.ini files, not to be modified or made unique within each charm repo. This is to ensure consistency across charm repos in tox target naming, approach and purpose, also giving the charm dev and test experience additional consistency. Also create empty dirs with .keep files where necessary. Some classic charms have actions and/or lib dirs, and some do not. In all classic charms, flake will now check those dirs to ensure lint coverage of existing or future content. Change-Id: I657e3c0c0bdb372cb4e005045d7469b806fd5e3d --- ceph-osd/tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 491ccf15..d8d8d038 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -8,6 +8,7 @@ skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + CHARM_DIR={envdir} AMULET_SETUP_TIMEOUT=2700 install_command = pip install --allow-unverified python-apt {opts} {packages} @@ -24,7 +25,7 @@ deps = -r{toxinidir}/requirements.txt basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} hooks unit_tests tests actions +commands = flake8 {posargs} hooks unit_tests tests actions lib charm-proof [testenv:venv] From 9cec559dc7889748f92989467aa45516cecbeb81 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 14 Sep 2016 09:19:09 +0100 Subject: [PATCH 1195/2699] Add charm series data to metadata.yaml This optimizes the publication process to the charm store, and ensures that the charm code and version is consistent across all supported Ubuntu series. Change-Id: I4dfce514ccad4ee98ee724e4365eef846a98267b --- ceph-mon/metadata.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 3e7bd1e1..a22e2b0a 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -9,6 +9,11 @@ tags: - storage - file-servers - misc +series: + - xenial + - trusty + - precise + - yakkety peers: mon: interface: ceph From 555fb3ac10cd074a019fd1f487f6578bfe0cc70c Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 14 Sep 2016 09:19:53 +0100 Subject: [PATCH 1196/2699] Add charm series data to metadata.yaml This optimizes the publication process to the charm store, and ensures that the charm code and version is consistent across all supported Ubuntu series. Change-Id: Ifff786758eab3b5b06d68afb6b07d842a0377219 --- ceph-osd/metadata.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 01815ea0..75447355 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -10,6 +10,11 @@ tags: - storage - file-servers - misc +series: + - xenial + - trusty + - precise + - yakkety description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. @@ -30,4 +35,4 @@ storage: osd-journals: type: block multiple: - range: 0- \ No newline at end of file + range: 0- From f1e2f132d749ea73ca95620b8484b6a65e73c5bd Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 14 Sep 2016 09:20:40 +0100 Subject: [PATCH 1197/2699] Add charm series data to metadata.yaml This optimizes the publication process to the charm store, and ensures that the charm code and version is consistent across all supported Ubuntu series. Change-Id: I0421eb6a4093793d7bda69b06ba4b976b2573e10 --- ceph-radosgw/metadata.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index a7f215b5..06ed8e62 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -12,6 +12,11 @@ tags: - storage - file-servers - misc +series: + - xenial + - trusty + - precise + - yakkety extra-bindings: public: admin: From 692a0d7fc1b865af5cf8fade34b4521257ea3d65 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 15 Sep 2016 09:33:40 -0700 Subject: [PATCH 1198/2699] nearly complete --- ceph-fs/src/layer.yaml | 2 +- ceph-fs/src/metadata.yaml | 11 +- ceph-fs/src/reactive/ceph_fs.py | 140 ++++++++++++++++++++++++++ ceph-fs/src/reactive/charm_ceph_fs.py | 71 ------------- ceph-fs/src/templates/ceph.conf | 19 +--- 5 files changed, 151 insertions(+), 92 deletions(-) create mode 100644 ceph-fs/src/reactive/ceph_fs.py delete mode 100644 ceph-fs/src/reactive/charm_ceph_fs.py diff --git a/ceph-fs/src/layer.yaml b/ceph-fs/src/layer.yaml index c58e1127..07b04172 100644 --- a/ceph-fs/src/layer.yaml +++ b/ceph-fs/src/layer.yaml @@ -1 +1 @@ -includes: ['layer:ceph-base'] # if you use any interfaces, add them here +includes: ['layer:ceph-base', 'interface:ceph-admin'] # if you use any interfaces, add them here diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index bbe5656f..5f12fda8 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -1,4 +1,4 @@ -name: charm-ceph-fs +name: ceph-fs summary: Highly scalable distributed storage maintainer: OpenStack Charmers description: | @@ -9,8 +9,9 @@ tags: - storage - file-servers - misc +series: + - xenial subordinate: false -#provides: -#requires: - #ceph-mon-cephfs: - #interface: ceph-mon-cephfs +requires: + ceph-admin: + interface: ceph-admin diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py new file mode 100644 index 00000000..a4b62834 --- /dev/null +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -0,0 +1,140 @@ +import os +import subprocess +import json + +from charms.reactive import when, when_not, set_state + +from charms.apt import queue_install + +from charmhelpers.core.hookenv import ( + config, charm_name, + log, INFO, ERROR) + +from charmhelpers.core.host import service_restart + +from charmhelpers.contrib.network.ip import ( + get_address_in_network +) + +import jinja2 + +TEMPLATES_DIR = 'templates' + + +def render_template(template_name, context, template_dir=TEMPLATES_DIR): + templates = jinja2.Environment( + loader=jinja2.FileSystemLoader(template_dir)) + template = templates.get_template(template_name) + return template.render(context) + +@when_not('apt.installed.ceph-mds') +def install_cephfs(): + queue_install(['ceph-mds']) + + +@when('apt.installed.ceph-mds', 'ceph.installed') +@when_not('cephfs.started') +def setup_mds(): + log("I'm in setup_mds()") + # try: + # from rados import Error as RadosError + # from ceph_api import ceph_command + # except ImportError as err: + # log("rados is not installed yet: {}".format(err)) + # return + # TODO: Monitor needs a new CephFS relation + # TODO: Update with the conf file location + # osd = ceph_command.OsdCommand('/etc/ceph/ceph.conf') + # mds = ceph_command.MdsCommand('/etc/ceph/ceph.conf') + + try: + name = charm_name() + log("Creating cephfs_data pool", level=INFO) + data_pool = "{}_data".format(name) + # TODO: Update with better pg values + try: + subprocess.check_call(["ceph", "osd", "pool", "create", data_pool, "256"]) + except subprocess.CalledProcessError as err: + log("Creating data pool failed!") + raise(err) + # osd.osd_pool_create('cephfs_data', 256) + + log("Creating cephfs_metadata pool", level=INFO) + metadata_pool = "{}_metadata".format(name) + # TODO: Update with better pg values + try: + subprocess.check_call(["ceph", "osd", "pool", "create", metadata_pool, "256"]) + except subprocess.CalledProcessError as err: + log("Creating metadata pool failed!") + raise(err) + # osd.osd_pool_create('cephfs_metadata', 256) + + log("Creating ceph fs", level=INFO) + try: + subprocess.check_call(["ceph", "fs", "new", name, metadata_pool, data_pool]) + except subprocess.CalledProcessError as err: + log("Creating metadata pool failed!") + raise(err) + service_restart('ceph-mds') + set_state('cephfs.started') + # mds.mds_newfs(metadata='cephfs_metadata', data='cephfs_data', sure=["--yes-i-really-mean-it"]) + except subprocess.CalledProcessError as err: + log(message='Error: {}'.format(err), level=ERROR) + + +@when('ceph-admin.available') +# @when_not('cephfs.configured') +def config_changed(ceph_client): + charm_ceph_conf = os.path.join(os.sep, + 'etc', + 'ceph', + 'ceph.conf') + cephx_key = os.path.join(os.sep, + 'etc', + 'ceph', + 'ceph.client.admin.keyring') + + networks = get_networks('ceph-public-network') + public_network = ', '.join(networks) + + networks = get_networks('ceph-cluster-network') + cluster_network = ', '.join(networks) + + ceph_context = { + 'mon_hosts': ceph_client.mon_hosts(), + 'fsid': ceph_client.fsid(), + 'auth_supported': ceph_client.auth(), + 'use_syslog': str(config('use-syslog')).lower(), + 'ceph_public_network': public_network, + 'ceph_cluster_network': cluster_network, + 'loglevel': config('loglevel'), + } + + try: + with open(charm_ceph_conf, 'w') as ceph_conf: + ceph_conf.write(render_template('ceph.conf', ceph_context)) + except IOError as err: + log("IOError writing ceph.conf: {}".format(err)) + + try: + with open(cephx_key, 'w') as key_file: + key_file.write("[client.admin]\n\tkey = {}\n".format( + ceph_client.key() + )) + except IOError as err: + log("IOError writing ceph.client.admin.keyring: {}".format(err)) + set_state('cephfs.configured') + + +def get_networks(config_opt='ceph-public-network'): + """Get all configured networks from provided config option. + + If public network(s) are provided, go through them and return those for + which we have an address configured. + """ + networks = config(config_opt) + if networks: + networks = networks.split() + return [n for n in networks if get_address_in_network(n)] + + return [] diff --git a/ceph-fs/src/reactive/charm_ceph_fs.py b/ceph-fs/src/reactive/charm_ceph_fs.py deleted file mode 100644 index b661fe75..00000000 --- a/ceph-fs/src/reactive/charm_ceph_fs.py +++ /dev/null @@ -1,71 +0,0 @@ -from charms.reactive import when - -from charmhelpers.core.hookenv import ( - config, - log, INFO, ERROR) - -from charmhelpers.contrib.network.ip import ( - get_address_in_network -) - -@when('ceph.installed') -# @when('ceph-mon.available') -def setup_mds(mon): - log("I'm in setup_mds()") - try: - from rados import Error as RadosError - from ceph_api import ceph_command - except ImportError as err: - log("rados is not installed yet: {}".format(err)) - return - # TODO: Monitor needs a new CephFS relation - # TODO: Update with the conf file location - osd = ceph_command.OsdCommand('/etc/ceph/ceph.conf') - mds = ceph_command.MdsCommand('/etc/ceph/ceph.conf') - - try: - log("Creating cephfs_data pool", level=INFO) - # TODO: Update with better pg values - osd.osd_pool_create('cephfs_data', 256) - - log("Creating cephfs_metadata pool", level=INFO) - # TODO: Update with better pg values - osd.osd_pool_create('cephfs_metadata', 256) - - log("Creating ceph fs", level=INFO) - mds.mds_newfs(metadata='cephfs_metadata', data='cephfs_data', sure=["--yes-i-really-mean-it"]) - except RadosError as err: - log(message='Error: {}'.format(err.message), level=ERROR) - - -@when('config.changed', 'ceph-mon.available') -def config_changed(): - networks = get_networks('ceph-public-network') - public_network = ', '.join(networks) - - networks = get_networks('ceph-cluster-network') - cluster_network = ', '.join(networks) - - cephcontext = { - # 'mon_hosts': ' '.join(get_mon_hosts()), - # 'fsid': leader_get('fsid'), - 'osd_journal_size': config('osd-journal-size'), - 'use_syslog': str(config('use-syslog')).lower(), - 'ceph_public_network': public_network, - 'ceph_cluster_network': cluster_network, - 'loglevel': config('loglevel'), - } - - -def get_networks(config_opt='ceph-public-network'): - """Get all configured networks from provided config option. - - If public network(s) are provided, go through them and return those for - which we have an address configured. - """ - networks = config(config_opt) - if networks: - networks = networks.split() - return [n for n in networks if get_address_in_network(n)] - - return [] diff --git a/ceph-fs/src/templates/ceph.conf b/ceph-fs/src/templates/ceph.conf index ea9b7338..cd62c5a3 100644 --- a/ceph-fs/src/templates/ceph.conf +++ b/ceph-fs/src/templates/ceph.conf @@ -1,8 +1,8 @@ + [global] auth cluster required = {{ auth_supported }} auth service required = {{ auth_supported }} auth client required = {{ auth_supported }} -{% endif %} keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} @@ -14,20 +14,9 @@ mon cluster log to syslog = {{ use_syslog }} debug mon = {{ loglevel }}/5 debug osd = {{ loglevel }}/5 -{%- if ceph_public_network is string %} -public network = {{ ceph_public_network }} -{%- endif %} -{%- if ceph_cluster_network is string %} -cluster network = {{ ceph_cluster_network }} -{%- endif %} - -{% if public_addr %} -public addr = {{ public_addr }} -{% endif %} -{% if cluster_addr %} -cluster addr = {{ cluster_addr }} -{%- endif %} +[client] +log file = /var/log/ceph.log [mds] -keyring = /var/lib/ceph/mds/$cluster-$id/keyring +keyring = /etc/ceph/ceph.client.admin.keyring From f4ab931e0a56b9696f2447ff42c990453bd67cff Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 15 Sep 2016 09:54:00 -0700 Subject: [PATCH 1199/2699] Make the admin client keyring name configurable While this changes the keyring name to be configurable, it still defaults to 'admin', maintaining backwards compatability with any existing clients. Change-Id: Ida36fc28c6b2c1cecb5e96a07f22080a24f934a0 --- ceph-mon/hooks/ceph_hooks.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 0defa481..c87be4a6 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -589,8 +589,11 @@ def radosgw_relation(relid=None, unit=None): @hooks.hook('admin-relation-joined') def admin_relation_joined(relid=None): if ceph.is_quorum(): + name = relation_get('keyring-name') + if name is None: + name = 'admin' log('mon cluster in quorum - providing client with keys') - data = {'key': ceph.get_named_key(name='admin', caps=ceph.admin_caps), + data = {'key': ceph.get_named_key(name=name, caps=ceph.admin_caps), 'fsid': leader_get('fsid'), 'auth': config('auth-supported'), 'mon_hosts': " ".join(get_mon_hosts()) From 5da3c670cf357f22eb3982327ea18af97fc36b41 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Mon, 19 Sep 2016 15:28:16 -0700 Subject: [PATCH 1200/2699] Service crashes with wrong mds name but everything else works --- ceph-fs/src/layer.yaml | 3 +- ceph-fs/src/metadata.yaml | 2 + ceph-fs/src/reactive/ceph_fs.py | 74 ++++++++++++++++----------------- ceph-fs/src/templates/ceph.conf | 6 ++- 4 files changed, 46 insertions(+), 39 deletions(-) diff --git a/ceph-fs/src/layer.yaml b/ceph-fs/src/layer.yaml index 07b04172..ad554c4d 100644 --- a/ceph-fs/src/layer.yaml +++ b/ceph-fs/src/layer.yaml @@ -1 +1,2 @@ -includes: ['layer:ceph-base', 'interface:ceph-admin'] # if you use any interfaces, add them here +includes: ['layer:ceph-base', 'interface:/home/chris/repos/juju-interface-ceph-mds', 'interface:/home/chris/repos/juju-interface-ceph-admin'] # if you use any interfaces, add them here +repo: git@github.com:cholcombe973/charm-ceph-fs.git diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 5f12fda8..200a4384 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -13,5 +13,7 @@ series: - xenial subordinate: false requires: + ceph-mds: + interface: ceph-mds ceph-admin: interface: ceph-admin diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index a4b62834..e428f551 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -1,23 +1,20 @@ import os +import socket import subprocess -import json from charms.reactive import when, when_not, set_state - -from charms.apt import queue_install - from charmhelpers.core.hookenv import ( config, charm_name, log, INFO, ERROR) - from charmhelpers.core.host import service_restart - +from charmhelpers.contrib.storage.linux import ceph from charmhelpers.contrib.network.ip import ( get_address_in_network ) - import jinja2 +from charms.apt import queue_install + TEMPLATES_DIR = 'templates' @@ -27,72 +24,73 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): template = templates.get_template(template_name) return template.render(context) + @when_not('apt.installed.ceph-mds') def install_cephfs(): queue_install(['ceph-mds']) -@when('apt.installed.ceph-mds', 'ceph.installed') +@when('cephfs.configured') +@when('admin_key.saved') @when_not('cephfs.started') def setup_mds(): - log("I'm in setup_mds()") - # try: - # from rados import Error as RadosError - # from ceph_api import ceph_command - # except ImportError as err: - # log("rados is not installed yet: {}".format(err)) - # return - # TODO: Monitor needs a new CephFS relation - # TODO: Update with the conf file location - # osd = ceph_command.OsdCommand('/etc/ceph/ceph.conf') - # mds = ceph_command.MdsCommand('/etc/ceph/ceph.conf') - try: name = charm_name() log("Creating cephfs_data pool", level=INFO) data_pool = "{}_data".format(name) - # TODO: Update with better pg values try: - subprocess.check_call(["ceph", "osd", "pool", "create", data_pool, "256"]) + ceph.ReplicatedPool(name=data_pool, service='admin').create() except subprocess.CalledProcessError as err: log("Creating data pool failed!") - raise(err) - # osd.osd_pool_create('cephfs_data', 256) + raise err log("Creating cephfs_metadata pool", level=INFO) metadata_pool = "{}_metadata".format(name) - # TODO: Update with better pg values try: - subprocess.check_call(["ceph", "osd", "pool", "create", metadata_pool, "256"]) + ceph.ReplicatedPool(name=metadata_pool, service='admin').create() except subprocess.CalledProcessError as err: log("Creating metadata pool failed!") - raise(err) - # osd.osd_pool_create('cephfs_metadata', 256) + raise err log("Creating ceph fs", level=INFO) try: subprocess.check_call(["ceph", "fs", "new", name, metadata_pool, data_pool]) except subprocess.CalledProcessError as err: log("Creating metadata pool failed!") - raise(err) + raise err service_restart('ceph-mds') set_state('cephfs.started') - # mds.mds_newfs(metadata='cephfs_metadata', data='cephfs_data', sure=["--yes-i-really-mean-it"]) except subprocess.CalledProcessError as err: log(message='Error: {}'.format(err), level=ERROR) @when('ceph-admin.available') -# @when_not('cephfs.configured') +def handle_admin_key(ceph_client): + cephx_key = os.path.join(os.sep, + 'etc', + 'ceph', + 'ceph.client.admin.keyring') + try: + with open(cephx_key, 'w') as key_file: + key_file.write("[client.admin]\n\tkey = {}\n".format( + ceph_client.key() + )) + except IOError as err: + log("IOError writing mds-a.keyring: {}".format(err)) + set_state('admin_key.saved') + + +@when('ceph-mds.available') def config_changed(ceph_client): charm_ceph_conf = os.path.join(os.sep, 'etc', 'ceph', 'ceph.conf') - cephx_key = os.path.join(os.sep, - 'etc', - 'ceph', - 'ceph.client.admin.keyring') + key_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mds', 'ceph-a') + if not os.path.exists(key_path): + os.makedirs(key_path) + cephx_key = os.path.join(key_path, + 'keyring') networks = get_networks('ceph-public-network') public_network = ', '.join(networks) @@ -108,6 +106,8 @@ def config_changed(ceph_client): 'ceph_public_network': public_network, 'ceph_cluster_network': cluster_network, 'loglevel': config('loglevel'), + 'hostname': socket.gethostname(), + 'mds_name': 'a', } try: @@ -118,11 +118,11 @@ def config_changed(ceph_client): try: with open(cephx_key, 'w') as key_file: - key_file.write("[client.admin]\n\tkey = {}\n".format( + key_file.write("[mds.a]\n\tkey = {}\n".format( ceph_client.key() )) except IOError as err: - log("IOError writing ceph.client.admin.keyring: {}".format(err)) + log("IOError writing mds-a.keyring: {}".format(err)) set_state('cephfs.configured') diff --git a/ceph-fs/src/templates/ceph.conf b/ceph-fs/src/templates/ceph.conf index cd62c5a3..df795b09 100644 --- a/ceph-fs/src/templates/ceph.conf +++ b/ceph-fs/src/templates/ceph.conf @@ -18,5 +18,9 @@ debug osd = {{ loglevel }}/5 log file = /var/log/ceph.log [mds] -keyring = /etc/ceph/ceph.client.admin.keyring +keyring = /var/lib/ceph/mds/$cluster-$id/keyring + +[mds.{{ mds_name }}] +host = {{ hostname }} + From 1bd94a3c1872b2b5ddda70f7114121643e758598 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Tue, 20 Sep 2016 09:41:01 -0700 Subject: [PATCH 1201/2699] Use socket.gethostname instead of letters for mds names --- ceph-fs/src/layer.yaml | 2 +- ceph-fs/src/metadata.yaml | 2 -- ceph-fs/src/reactive/ceph_fs.py | 47 +++++++++++++++++---------------- 3 files changed, 25 insertions(+), 26 deletions(-) diff --git a/ceph-fs/src/layer.yaml b/ceph-fs/src/layer.yaml index ad554c4d..77414bb5 100644 --- a/ceph-fs/src/layer.yaml +++ b/ceph-fs/src/layer.yaml @@ -1,2 +1,2 @@ -includes: ['layer:ceph-base', 'interface:/home/chris/repos/juju-interface-ceph-mds', 'interface:/home/chris/repos/juju-interface-ceph-admin'] # if you use any interfaces, add them here +includes: ['layer:ceph-base', 'interface:/home/chris/repos/juju-interface-ceph-mds'] # if you use any interfaces, add them here repo: git@github.com:cholcombe973/charm-ceph-fs.git diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 200a4384..291ff86f 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -15,5 +15,3 @@ subordinate: false requires: ceph-mds: interface: ceph-mds - ceph-admin: - interface: ceph-admin diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index e428f551..b7e5b6ed 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -4,8 +4,7 @@ from charms.reactive import when, when_not, set_state from charmhelpers.core.hookenv import ( - config, charm_name, - log, INFO, ERROR) + config, log, INFO, ERROR) from charmhelpers.core.host import service_restart from charmhelpers.contrib.storage.linux import ceph from charmhelpers.contrib.network.ip import ( @@ -31,11 +30,10 @@ def install_cephfs(): @when('cephfs.configured') -@when('admin_key.saved') @when_not('cephfs.started') def setup_mds(): try: - name = charm_name() + name = socket.gethostname() log("Creating cephfs_data pool", level=INFO) data_pool = "{}_data".format(name) try: @@ -64,33 +62,27 @@ def setup_mds(): log(message='Error: {}'.format(err), level=ERROR) -@when('ceph-admin.available') -def handle_admin_key(ceph_client): - cephx_key = os.path.join(os.sep, - 'etc', - 'ceph', - 'ceph.client.admin.keyring') - try: - with open(cephx_key, 'w') as key_file: - key_file.write("[client.admin]\n\tkey = {}\n".format( - ceph_client.key() - )) - except IOError as err: - log("IOError writing mds-a.keyring: {}".format(err)) - set_state('admin_key.saved') - - @when('ceph-mds.available') def config_changed(ceph_client): charm_ceph_conf = os.path.join(os.sep, 'etc', 'ceph', 'ceph.conf') - key_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mds', 'ceph-a') + key_path = os.path.join(os.sep, + 'var', + 'lib', + 'ceph', + 'mds', + 'ceph-{}'.format(socket.gethostname()) + ) if not os.path.exists(key_path): os.makedirs(key_path) cephx_key = os.path.join(key_path, 'keyring') + admin_key = os.path.join(os.sep, + 'etc', + 'ceph', + 'ceph.client.admin.keyring') networks = get_networks('ceph-public-network') public_network = ', '.join(networks) @@ -107,7 +99,7 @@ def config_changed(ceph_client): 'ceph_cluster_network': cluster_network, 'loglevel': config('loglevel'), 'hostname': socket.gethostname(), - 'mds_name': 'a', + 'mds_name': socket.gethostname(), } try: @@ -116,9 +108,18 @@ def config_changed(ceph_client): except IOError as err: log("IOError writing ceph.conf: {}".format(err)) + try: + with open(admin_key, 'w') as key_file: + key_file.write("[client.admin]\n\tkey = {}\n".format( + ceph_client.admin_key() + )) + except IOError as err: + log("IOError writing admin.keyring: {}".format(err)) + try: with open(cephx_key, 'w') as key_file: - key_file.write("[mds.a]\n\tkey = {}\n".format( + key_file.write("[mds.{}]\n\tkey = {}\n".format( + socket.gethostname(), ceph_client.key() )) except IOError as err: From 76aac331017ed48f815c6cc98915675d18958e17 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Tue, 20 Sep 2016 10:05:54 -0700 Subject: [PATCH 1202/2699] Add status update messages --- ceph-fs/src/reactive/ceph_fs.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index b7e5b6ed..20fc3bba 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -4,7 +4,7 @@ from charms.reactive import when, when_not, set_state from charmhelpers.core.hookenv import ( - config, log, INFO, ERROR) + config, log, INFO, ERROR, status_set) from charmhelpers.core.host import service_restart from charmhelpers.contrib.storage.linux import ceph from charmhelpers.contrib.network.ip import ( @@ -34,6 +34,7 @@ def install_cephfs(): def setup_mds(): try: name = socket.gethostname() + status_set('maintenance', "Creating cephfs data pool") log("Creating cephfs_data pool", level=INFO) data_pool = "{}_data".format(name) try: @@ -42,6 +43,7 @@ def setup_mds(): log("Creating data pool failed!") raise err + status_set('maintenance', "Creating cephfs metadata pool") log("Creating cephfs_metadata pool", level=INFO) metadata_pool = "{}_metadata".format(name) try: @@ -50,6 +52,7 @@ def setup_mds(): log("Creating metadata pool failed!") raise err + status_set('maintenance', "Creating cephfs") log("Creating ceph fs", level=INFO) try: subprocess.check_call(["ceph", "fs", "new", name, metadata_pool, data_pool]) From dd03819aed3cb8f337df10a75547cbdf91e9e480 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 20 Sep 2016 14:03:05 +0100 Subject: [PATCH 1203/2699] Add support for application version Juju 2.0 provides support for display of the version of an application deployed by a charm in juju status. Insert the application_version_set function into the existing assess_status function - this gets called after all hook executions, and periodically after that, so any changes in package versions due to normal system updates will also be reflected in the status output. This review also includes a resync of charm-helpers to pickup hookenv support for this feature. Change-Id: Ic4c1231741383ef3cb864c3144ead211345f84ba --- ceph-mon/charm-helpers-hooks.yaml | 1 + ceph-mon/hooks/ceph_hooks.py | 10 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 16 +- .../contrib/openstack/amulet/deployment.py | 9 +- .../contrib/openstack/amulet/utils.py | 3 +- .../charmhelpers/contrib/openstack/neutron.py | 4 + .../charmhelpers/contrib/openstack/utils.py | 21 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 14 + ceph-mon/hooks/charmhelpers/core/host.py | 87 ++--- .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 56 +++ .../charmhelpers/core/host_factory/ubuntu.py | 56 +++ ceph-mon/hooks/charmhelpers/core/kernel.py | 36 +- .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 + .../core/kernel_factory/ubuntu.py | 13 + ceph-mon/hooks/charmhelpers/fetch/__init__.py | 324 ++--------------- ceph-mon/hooks/charmhelpers/fetch/bzrurl.py | 7 +- ceph-mon/hooks/charmhelpers/fetch/centos.py | 171 +++++++++ ceph-mon/hooks/charmhelpers/fetch/giturl.py | 7 +- ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 336 ++++++++++++++++++ ceph-mon/hooks/charmhelpers/osplatform.py | 19 + .../contrib/openstack/amulet/deployment.py | 9 +- .../contrib/openstack/amulet/utils.py | 3 +- ceph-mon/unit_tests/test_status.py | 7 + 25 files changed, 838 insertions(+), 388 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/core/host_factory/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/core/host_factory/centos.py create mode 100644 ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py create mode 100644 ceph-mon/hooks/charmhelpers/core/kernel_factory/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/core/kernel_factory/centos.py create mode 100644 ceph-mon/hooks/charmhelpers/core/kernel_factory/ubuntu.py create mode 100644 ceph-mon/hooks/charmhelpers/fetch/centos.py create mode 100644 ceph-mon/hooks/charmhelpers/fetch/ubuntu.py create mode 100644 ceph-mon/hooks/charmhelpers/osplatform.py diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index aa5351d9..e6d709ac 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -2,6 +2,7 @@ branch: lp:charm-helpers destination: hooks/charmhelpers include: - core + - osplatform - cli - fetch - contrib.storage.linux diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index c87be4a6..1f30b3e1 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -45,7 +45,8 @@ service_name, relations_of_type, status_set, - local_unit) + local_unit, + application_version_set) from charmhelpers.core.host import ( service_restart, mkdir, @@ -59,7 +60,8 @@ apt_install, apt_update, filter_installed_packages, - add_source + add_source, + get_upstream_version, ) from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.alternatives import install_alternative @@ -701,8 +703,12 @@ def update_nrpe_config(): nrpe_setup.write() +VERSION_PACKAGE = 'ceph-common' + + def assess_status(): '''Assess status of current unit''' + application_version_set(get_upstream_version(VERSION_PACKAGE)) moncount = int(config('monitor-count')) units = get_peer_units() # not enough peers and mon_count > 1 diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 0c39bd91..1410512a 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -109,6 +109,13 @@ # def local_monitors_relation_changed(): # update_nrpe_config() # +# 4.a If your charm is a subordinate charm set primary=False +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE(primary=False) +# # 5. ln -s hooks.py nrpe-external-master-relation-changed # ln -s hooks.py local-monitors-relation-changed @@ -221,9 +228,10 @@ class NRPE(object): nagios_exportdir = '/var/lib/nagios/export' nrpe_confdir = '/etc/nagios/nrpe.d' - def __init__(self, hostname=None): + def __init__(self, hostname=None, primary=True): super(NRPE, self).__init__() self.config = config() + self.primary = primary self.nagios_context = self.config['nagios_context'] if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: self.nagios_servicegroups = self.config['nagios_servicegroups'] @@ -239,6 +247,12 @@ def __init__(self, hostname=None): else: self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) self.checks = [] + # Iff in an nrpe-external-master relation hook, set primary status + relation = relation_ids('nrpe-external-master') + if relation: + log("Setting charm primary status {}".format(primary)) + for rid in relation_ids('nrpe-external-master'): + relation_set(relation_id=rid, relation_settings={'primary': self.primary}) def add_check(self, *args, **kwargs): self.checks.append(Check(*args, **kwargs)) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 6ce91dbe..d1d52137 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -220,7 +220,8 @@ def _get_openstack_release(self): self.trusty_icehouse, self.trusty_juno, self.utopic_juno, self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, self.wily_liberty, self.trusty_mitaka, - self.xenial_mitaka) = range(14) + self.xenial_mitaka, self.xenial_newton, + self.yakkety_newton) = range(16) releases = { ('precise', None): self.precise_essex, @@ -236,7 +237,10 @@ def _get_openstack_release(self): ('utopic', None): self.utopic_juno, ('vivid', None): self.vivid_kilo, ('wily', None): self.wily_liberty, - ('xenial', None): self.xenial_mitaka} + ('xenial', None): self.xenial_mitaka, + ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('yakkety', None): self.yakkety_newton, + } return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -254,6 +258,7 @@ def _get_openstack_release_string(self): ('vivid', 'kilo'), ('wily', 'liberty'), ('xenial', 'mitaka'), + ('yakkety', 'newton'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 355c8cbb..24b353ee 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -1044,7 +1044,8 @@ def connect_amqp_by_unit(self, sentry_unit, ssl=False, retry_delay=5, socket_timeout=1) connection = pika.BlockingConnection(parameters) - assert connection.server_properties['product'] == 'RabbitMQ' + assert connection.is_open is True + assert connection.is_closing is False self.log.debug('Connect OK') return connection except Exception as e: diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py index 03427b49..d1510dd3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py @@ -245,6 +245,10 @@ def neutron_plugins(): 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2') plugins['plumgrid']['server_packages'].remove( 'neutron-plugin-plumgrid') + if release >= 'mitaka': + plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') + plugins['nsx']['server_packages'].append('python-vmware-nsx') + plugins['nsx']['config'] = '/etc/neutron/nsx.ini' return plugins diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 9d3e3d89..e1cc7687 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -51,7 +51,8 @@ relation_set, service_name, status_set, - hook_name + hook_name, + application_version_set, ) from charmhelpers.contrib.storage.linux.lvm import ( @@ -80,7 +81,12 @@ service_resume, restart_on_change_helper, ) -from charmhelpers.fetch import apt_install, apt_cache, install_remote +from charmhelpers.fetch import ( + apt_install, + apt_cache, + install_remote, + get_upstream_version +) from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.openstack.exceptions import OSContextError @@ -1889,3 +1895,14 @@ def config_flags_parser(config_flags): flags[key.strip(post_strippers)] = value.rstrip(post_strippers) return flags + + +def os_application_version_set(package): + '''Set version of application for Juju 2.0 and later''' + application_version = get_upstream_version(package) + # NOTE(jamespage) if not able to figure out package version, fallback to + # openstack codename version detection. + if not application_version: + application_version_set(os_release(package)) + else: + application_version_set(application_version) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 48b2b9dc..996e81cc 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -843,6 +843,20 @@ def inner_translate_exc2(*args, **kwargs): return inner_translate_exc1 +def application_version_set(version): + """Charm authors may trigger this command from any hook to output what + version of the application is running. This could be a package version, + for instance postgres version 9.5. It could also be a build number or + version control revision identifier, for instance git sha 6fb7ba68. """ + + cmd = ['application-version-set'] + cmd.append(version) + try: + subprocess.check_call(cmd) + except OSError: + log("Application Version: {}".format(version)) + + @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def is_leader(): """Does the current unit hold the juju leadership diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 53068599..0f1b2f35 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -30,13 +30,29 @@ import hashlib import functools import itertools -from contextlib import contextmanager -from collections import OrderedDict - import six +from contextlib import contextmanager +from collections import OrderedDict from .hookenv import log from .fstab import Fstab +from charmhelpers.osplatform import get_platform + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.host_factory.ubuntu import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.host_factory.centos import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import def service_start(service_name): @@ -144,8 +160,11 @@ def service_running(service_name): return False else: # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running 'start/running' - if "start/running" in output: + # returns a consistent string to represent running + # 'start/running' + if ("start/running" in output or + "is running" in output or + "up and running" in output): return True elif os.path.exists(_INIT_D_CONF.format(service_name)): # Check System V scripts init script return codes @@ -153,18 +172,6 @@ def service_running(service_name): return False -def service_available(service_name): - """Determine whether a system service is available""" - try: - subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError as e: - return b'unrecognized service' not in e.output - else: - return True - - SYSTEMD_SYSTEM = '/run/systemd/system' @@ -173,8 +180,9 @@ def init_is_systemd(): return os.path.isdir(SYSTEMD_SYSTEM) -def adduser(username, password=None, shell='/bin/bash', system_user=False, - primary_group=None, secondary_groups=None, uid=None, home_dir=None): +def adduser(username, password=None, shell='/bin/bash', + system_user=False, primary_group=None, + secondary_groups=None, uid=None, home_dir=None): """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -286,17 +294,7 @@ def add_group(group_name, system_group=False, gid=None): log('group with gid {0} already exists!'.format(gid)) except KeyError: log('creating group {0}'.format(group_name)) - cmd = ['addgroup'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('--system') - else: - cmd.extend([ - '--group', - ]) - cmd.append(group_name) - subprocess.check_call(cmd) + add_new_group(group_name, system_group, gid) group_info = grp.getgrnam(group_name) return group_info @@ -541,16 +539,6 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False, return r -def lsb_release(): - """Return /etc/lsb-release in a dict""" - d = {} - with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - def pwgen(length=None): """Generate a random pasword.""" if length is None: @@ -674,25 +662,6 @@ def get_nic_hwaddr(nic): return hwaddr -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports apt_cache function from charmhelpers.fetch if - the pkgcache argument is None. Be sure to add charmhelpers.fetch if - you call this function, or pass an apt_pkg.Cache() instance. - """ - import apt_pkg - if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) - - @contextmanager def chdir(directory): """Change the current working directory to a different directory for a code diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/__init__.py b/ceph-mon/hooks/charmhelpers/core/host_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/centos.py b/ceph-mon/hooks/charmhelpers/core/host_factory/centos.py new file mode 100644 index 00000000..902d469f --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/centos.py @@ -0,0 +1,56 @@ +import subprocess +import yum +import os + + +def service_available(service_name): + # """Determine whether a system service is available.""" + if os.path.isdir('/run/systemd/system'): + cmd = ['systemctl', 'is-enabled', service_name] + else: + cmd = ['service', service_name, 'is-enabled'] + return subprocess.call(cmd) == 0 + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['groupadd'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('-r') + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/os-release in a dict.""" + d = {} + with open('/etc/os-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports YumBase function if the pkgcache argument + is None. + """ + if not pkgcache: + y = yum.YumBase() + packages = y.doPackageLists() + pkgcache = {i.Name: i.version for i in packages['installed']} + pkg = pkgcache[package] + if pkg > revno: + return 1 + if pkg < revno: + return -1 + return 0 diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py new file mode 100644 index 00000000..8c66af55 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -0,0 +1,56 @@ +import subprocess + + +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return b'unrecognized service' not in e.output + else: + return True + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. + """ + import apt_pkg + if not pkgcache: + from charmhelpers.fetch import apt_cache + pkgcache = apt_cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-mon/hooks/charmhelpers/core/kernel.py b/ceph-mon/hooks/charmhelpers/core/kernel.py index b166efec..2d404528 100644 --- a/ceph-mon/hooks/charmhelpers/core/kernel.py +++ b/ceph-mon/hooks/charmhelpers/core/kernel.py @@ -15,15 +15,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -__author__ = "Jorge Niedbalski " +import re +import subprocess +from charmhelpers.osplatform import get_platform from charmhelpers.core.hookenv import ( log, INFO ) -from subprocess import check_call, check_output -import re +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.kernel_factory.ubuntu import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.kernel_factory.centos import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import + +__author__ = "Jorge Niedbalski " def modprobe(module, persist=True): @@ -32,11 +45,9 @@ def modprobe(module, persist=True): log('Loading kernel module %s' % module, level=INFO) - check_call(cmd) + subprocess.check_call(cmd) if persist: - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module) + persistent_modprobe(module) def rmmod(module, force=False): @@ -46,21 +57,16 @@ def rmmod(module, force=False): cmd.append('-f') cmd.append(module) log('Removing kernel module %s' % module, level=INFO) - return check_call(cmd) + return subprocess.check_call(cmd) def lsmod(): """Shows what kernel modules are currently loaded""" - return check_output(['lsmod'], - universal_newlines=True) + return subprocess.check_output(['lsmod'], + universal_newlines=True) def is_module_loaded(module): """Checks if a kernel module is already loaded""" matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) return len(matches) > 0 - - -def update_initramfs(version='all'): - """Updates an initramfs image""" - return check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-mon/hooks/charmhelpers/core/kernel_factory/__init__.py b/ceph-mon/hooks/charmhelpers/core/kernel_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/core/kernel_factory/centos.py b/ceph-mon/hooks/charmhelpers/core/kernel_factory/centos.py new file mode 100644 index 00000000..1c402c11 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/kernel_factory/centos.py @@ -0,0 +1,17 @@ +import subprocess +import os + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + if not os.path.exists('/etc/rc.modules'): + open('/etc/rc.modules', 'a') + os.chmod('/etc/rc.modules', 111) + with open('/etc/rc.modules', 'r+') as modules: + if module not in modules.read(): + modules.write('modprobe %s\n' % module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["dracut", "-f", version]) diff --git a/ceph-mon/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/kernel_factory/ubuntu.py new file mode 100644 index 00000000..21559642 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/core/kernel_factory/ubuntu.py @@ -0,0 +1,13 @@ +import subprocess + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 52eaf824..ec5e0fe9 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -13,18 +13,12 @@ # limitations under the License. import importlib -from tempfile import NamedTemporaryFile -import time +from charmhelpers.osplatform import get_platform from yaml import safe_load -from charmhelpers.core.host import ( - lsb_release -) -import subprocess from charmhelpers.core.hookenv import ( config, log, ) -import os import six if six.PY3: @@ -33,87 +27,6 @@ from urlparse import urlparse, urlunparse -CLOUD_ARCHIVE = """# Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" -PROPOSED_POCKET = """# Proposed -deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted -""" -CLOUD_ARCHIVE_POCKETS = { - # Folsom - 'folsom': 'precise-updates/folsom', - 'precise-folsom': 'precise-updates/folsom', - 'precise-folsom/updates': 'precise-updates/folsom', - 'precise-updates/folsom': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'precise-folsom/proposed': 'precise-proposed/folsom', - 'precise-proposed/folsom': 'precise-proposed/folsom', - # Grizzly - 'grizzly': 'precise-updates/grizzly', - 'precise-grizzly': 'precise-updates/grizzly', - 'precise-grizzly/updates': 'precise-updates/grizzly', - 'precise-updates/grizzly': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly', - 'precise-grizzly/proposed': 'precise-proposed/grizzly', - 'precise-proposed/grizzly': 'precise-proposed/grizzly', - # Havana - 'havana': 'precise-updates/havana', - 'precise-havana': 'precise-updates/havana', - 'precise-havana/updates': 'precise-updates/havana', - 'precise-updates/havana': 'precise-updates/havana', - 'havana/proposed': 'precise-proposed/havana', - 'precise-havana/proposed': 'precise-proposed/havana', - 'precise-proposed/havana': 'precise-proposed/havana', - # Icehouse - 'icehouse': 'precise-updates/icehouse', - 'precise-icehouse': 'precise-updates/icehouse', - 'precise-icehouse/updates': 'precise-updates/icehouse', - 'precise-updates/icehouse': 'precise-updates/icehouse', - 'icehouse/proposed': 'precise-proposed/icehouse', - 'precise-icehouse/proposed': 'precise-proposed/icehouse', - 'precise-proposed/icehouse': 'precise-proposed/icehouse', - # Juno - 'juno': 'trusty-updates/juno', - 'trusty-juno': 'trusty-updates/juno', - 'trusty-juno/updates': 'trusty-updates/juno', - 'trusty-updates/juno': 'trusty-updates/juno', - 'juno/proposed': 'trusty-proposed/juno', - 'trusty-juno/proposed': 'trusty-proposed/juno', - 'trusty-proposed/juno': 'trusty-proposed/juno', - # Kilo - 'kilo': 'trusty-updates/kilo', - 'trusty-kilo': 'trusty-updates/kilo', - 'trusty-kilo/updates': 'trusty-updates/kilo', - 'trusty-updates/kilo': 'trusty-updates/kilo', - 'kilo/proposed': 'trusty-proposed/kilo', - 'trusty-kilo/proposed': 'trusty-proposed/kilo', - 'trusty-proposed/kilo': 'trusty-proposed/kilo', - # Liberty - 'liberty': 'trusty-updates/liberty', - 'trusty-liberty': 'trusty-updates/liberty', - 'trusty-liberty/updates': 'trusty-updates/liberty', - 'trusty-updates/liberty': 'trusty-updates/liberty', - 'liberty/proposed': 'trusty-proposed/liberty', - 'trusty-liberty/proposed': 'trusty-proposed/liberty', - 'trusty-proposed/liberty': 'trusty-proposed/liberty', - # Mitaka - 'mitaka': 'trusty-updates/mitaka', - 'trusty-mitaka': 'trusty-updates/mitaka', - 'trusty-mitaka/updates': 'trusty-updates/mitaka', - 'trusty-updates/mitaka': 'trusty-updates/mitaka', - 'mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', - # Newton - 'newton': 'xenial-updates/newton', - 'xenial-newton': 'xenial-updates/newton', - 'xenial-newton/updates': 'xenial-updates/newton', - 'xenial-updates/newton': 'xenial-updates/newton', - 'newton/proposed': 'xenial-proposed/newton', - 'xenial-newton/proposed': 'xenial-proposed/newton', - 'xenial-proposed/newton': 'xenial-proposed/newton', -} - # The order of this list is very important. Handlers should be listed in from # least- to most-specific URL matching. FETCH_HANDLERS = ( @@ -122,10 +35,6 @@ 'charmhelpers.fetch.giturl.GitUrlFetchHandler', ) -APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. -APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. -APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. - class SourceConfigError(Exception): pass @@ -163,180 +72,38 @@ def base_url(self, url): return urlunparse(parts) -def filter_installed_packages(packages): - """Returns a list of packages that require installation""" - cache = apt_cache() - _pkgs = [] - for package in packages: - try: - p = cache[package] - p.current_ver or _pkgs.append(package) - except KeyError: - log('Package {} has no installation candidate.'.format(package), - level='WARNING') - _pkgs.append(package) - return _pkgs - - -def apt_cache(in_memory=True, progress=None): - """Build and return an apt cache""" - from apt import apt_pkg - apt_pkg.init() - if in_memory: - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache(progress) - - -def apt_install(packages, options=None, fatal=False): - """Install one or more packages""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - cmd.append('install') - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - _run_apt_command(cmd, fatal) - - -def apt_upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - if dist: - cmd.append('dist-upgrade') - else: - cmd.append('upgrade') - log("Upgrading with options: {}".format(options)) - _run_apt_command(cmd, fatal) - - -def apt_update(fatal=False): - """Update local apt cache""" - cmd = ['apt-get', 'update'] - _run_apt_command(cmd, fatal) - - -def apt_purge(packages, fatal=False): - """Purge one or more packages""" - cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Purging {}".format(packages)) - _run_apt_command(cmd, fatal) - - -def apt_mark(packages, mark, fatal=False): - """Flag one or more packages using apt-mark""" - log("Marking {} as {}".format(packages, mark)) - cmd = ['apt-mark', mark] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - - if fatal: - subprocess.check_call(cmd, universal_newlines=True) - else: - subprocess.call(cmd, universal_newlines=True) - - -def apt_hold(packages, fatal=False): - return apt_mark(packages, 'hold', fatal=fatal) - - -def apt_unhold(packages, fatal=False): - return apt_mark(packages, 'unhold', fatal=fatal) - +__platform__ = get_platform() +module = "charmhelpers.fetch.%s" % __platform__ +fetch = importlib.import_module(module) -def add_source(source, key=None): - """Add a package source to this system. +filter_installed_packages = fetch.filter_installed_packages +install = fetch.install +upgrade = fetch.upgrade +update = fetch.update +purge = fetch.purge +add_source = fetch.add_source - @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples:: - - ppa:charmers/example - deb https://stub:key@private.example.com/ubuntu trusty main - - In addition: - 'proposed:' may be used to enable the standard 'proposed' - pocket for the release. - 'cloud:' may be used to activate official cloud archive pockets, - such as 'cloud:icehouse' - 'distro' may be used as a noop - - @param key: A key to be added to the system's APT keyring and used - to verify the signatures on packages. Ideally, this should be an - ASCII format GPG public key including the block headers. A GPG key - id may also be used, but be aware that only insecure protocols are - available to retrieve the actual public key from a public keyserver - placing your Juju environment at risk. ppa and cloud archive keys - are securely added automtically, so sould not be provided. - """ - if source is None: - log('Source is not present. Skipping') - return - - if (source.startswith('ppa:') or - source.startswith('http') or - source.startswith('deb ') or - source.startswith('cloud-archive:')): - subprocess.check_call(['add-apt-repository', '--yes', source]) - elif source.startswith('cloud:'): - apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - if pocket not in CLOUD_ARCHIVE_POCKETS: - raise SourceConfigError( - 'Unsupported cloud: source option %s' % - pocket) - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) - elif source == 'proposed': - release = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: - apt.write(PROPOSED_POCKET.format(release)) - elif source == 'distro': - pass - else: - log("Unknown source: {!r}".format(source)) - - if key: - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile('w+') as key_file: - key_file.write(key) - key_file.flush() - key_file.seek(0) - subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) - else: - # Note that hkp: is in no way a secure protocol. Using a - # GPG key id is pointless from a security POV unless you - # absolutely trust your network and DNS. - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) +if __platform__ == "ubuntu": + apt_cache = fetch.apt_cache + apt_install = fetch.install + apt_update = fetch.update + apt_upgrade = fetch.upgrade + apt_purge = fetch.purge + apt_mark = fetch.apt_mark + apt_hold = fetch.apt_hold + apt_unhold = fetch.apt_unhold + get_upstream_version = fetch.get_upstream_version +elif __platform__ == "centos": + yum_search = fetch.yum_search def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): - """ - Configure multiple sources from charm configuration. + """Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. Sources and their + The fragment needs to be included as a string. Sources and their corresponding keys are of the types supported by add_source(). Example config: @@ -368,12 +135,11 @@ def configure_sources(update=False, for source, key in zip(sources, keys): add_source(source, key) if update: - apt_update(fatal=True) + fetch.update(fatal=True) def install_remote(source, *args, **kwargs): - """ - Install a file tree from a remote source + """Install a file tree from a remote source. The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] @@ -406,6 +172,7 @@ def install_remote(source, *args, **kwargs): def install_from_config(config_var_name): + """Install a file from config.""" charm_config = config() source = charm_config[config_var_name] return install_remote(source) @@ -428,40 +195,3 @@ def plugins(fetch_handlers=None): log("FetchHandler {} not found, skipping plugin".format( handler_name)) return plugin_list - - -def _run_apt_command(cmd, fatal=False): - """ - Run an APT command, checking output and retrying if the fatal flag is set - to True. - - :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. - """ - env = os.environ.copy() - - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if fatal: - retry_count = 0 - result = None - - # If the command is considered "fatal", we need to retry if the apt - # lock was not acquired. - - while result is None or result == APT_NO_LOCK: - try: - result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > APT_NO_LOCK_RETRY_COUNT: - raise - result = e.returncode - log("Couldn't acquire DPKG lock. Will retry in {} seconds." - "".format(APT_NO_LOCK_RETRY_DELAY)) - time.sleep(APT_NO_LOCK_RETRY_DELAY) - - else: - subprocess.call(cmd, env=env) diff --git a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py index b3404d85..07cd0293 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py @@ -18,19 +18,20 @@ BaseFetchHandler, UnhandledSource, filter_installed_packages, - apt_install, + install, ) from charmhelpers.core.host import mkdir if filter_installed_packages(['bzr']) != []: - apt_install(['bzr']) + install(['bzr']) if filter_installed_packages(['bzr']) != []: raise NotImplementedError('Unable to install bzr') class BzrUrlFetchHandler(BaseFetchHandler): - """Handler for bazaar branches via generic and lp URLs""" + """Handler for bazaar branches via generic and lp URLs.""" + def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('bzr+ssh', 'lp', ''): diff --git a/ceph-mon/hooks/charmhelpers/fetch/centos.py b/ceph-mon/hooks/charmhelpers/fetch/centos.py new file mode 100644 index 00000000..604bbfb5 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/fetch/centos.py @@ -0,0 +1,171 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +import os +import time +import six +import yum + +from tempfile import NamedTemporaryFile +from charmhelpers.core.hookenv import log + +YUM_NO_LOCK = 1 # The return code for "couldn't acquire lock" in YUM. +YUM_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +YUM_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + yb = yum.YumBase() + package_list = yb.doPackageLists() + temp_cache = {p.base_package_name: 1 for p in package_list['installed']} + + _pkgs = [p for p in packages if not temp_cache.get(p, False)] + return _pkgs + + +def install(packages, options=None, fatal=False): + """Install one or more packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_yum_command(cmd, fatal) + + +def upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_yum_command(cmd, fatal) + + +def update(fatal=False): + """Update local yum cache.""" + cmd = ['yum', '--assumeyes', 'update'] + log("Update with fatal: {}".format(fatal)) + _run_yum_command(cmd, fatal) + + +def purge(packages, fatal=False): + """Purge one or more packages.""" + cmd = ['yum', '--assumeyes', 'remove'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_yum_command(cmd, fatal) + + +def yum_search(packages): + """Search for a package.""" + output = {} + cmd = ['yum', 'search'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Searching for {}".format(packages)) + result = subprocess.check_output(cmd) + for package in list(packages): + output[package] = package in result + return output + + +def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL with a rpm package + + @param key: A key to be added to the system's keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. + """ + if source is None: + log('Source is not present. Skipping') + return + + if source.startswith('http'): + directory = '/etc/yum.repos.d/' + for filename in os.listdir(directory): + with open(directory + filename, 'r') as rpm_file: + if source in rpm_file.read(): + break + else: + log("Add source: {!r}".format(source)) + # write in the charms.repo + with open(directory + 'Charms.repo', 'a') as rpm_file: + rpm_file.write('[%s]\n' % source[7:].replace('/', '_')) + rpm_file.write('name=%s\n' % source[7:]) + rpm_file.write('baseurl=%s\n\n' % source) + else: + log("Unknown source: {!r}".format(source)) + + if key: + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile('w+') as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['rpm', '--import', key_file]) + else: + subprocess.check_call(['rpm', '--import', key]) + + +def _run_yum_command(cmd, fatal=False): + """Run an YUM command. + + Checks the output and retry if the fatal flag is set to True. + + :param: cmd: str: The yum command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the yum + # lock was not acquired. + + while result is None or result == YUM_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > YUM_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire YUM lock. Will retry in {} seconds." + "".format(YUM_NO_LOCK_RETRY_DELAY)) + time.sleep(YUM_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) diff --git a/ceph-mon/hooks/charmhelpers/fetch/giturl.py b/ceph-mon/hooks/charmhelpers/fetch/giturl.py index f708d1ee..4cf21bc2 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/giturl.py @@ -18,17 +18,18 @@ BaseFetchHandler, UnhandledSource, filter_installed_packages, - apt_install, + install, ) if filter_installed_packages(['git']) != []: - apt_install(['git']) + install(['git']) if filter_installed_packages(['git']) != []: raise NotImplementedError('Unable to install git') class GitUrlFetchHandler(BaseFetchHandler): - """Handler for git branches via generic and github URLs""" + """Handler for git branches via generic and github URLs.""" + def can_handle(self, source): url_parts = self.parse_url(source) # TODO (mattyw) no support for ssh git@ yet diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py new file mode 100644 index 00000000..fce496b2 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -0,0 +1,336 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import six +import time +import subprocess + +from tempfile import NamedTemporaryFile +from charmhelpers.core.host import ( + lsb_release +) +from charmhelpers.core.hookenv import log +from charmhelpers.fetch import SourceConfigError + +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" + +PROPOSED_POCKET = """# Proposed +deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted +""" + +CLOUD_ARCHIVE_POCKETS = { + # Folsom + 'folsom': 'precise-updates/folsom', + 'precise-folsom': 'precise-updates/folsom', + 'precise-folsom/updates': 'precise-updates/folsom', + 'precise-updates/folsom': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'precise-folsom/proposed': 'precise-proposed/folsom', + 'precise-proposed/folsom': 'precise-proposed/folsom', + # Grizzly + 'grizzly': 'precise-updates/grizzly', + 'precise-grizzly': 'precise-updates/grizzly', + 'precise-grizzly/updates': 'precise-updates/grizzly', + 'precise-updates/grizzly': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'precise-grizzly/proposed': 'precise-proposed/grizzly', + 'precise-proposed/grizzly': 'precise-proposed/grizzly', + # Havana + 'havana': 'precise-updates/havana', + 'precise-havana': 'precise-updates/havana', + 'precise-havana/updates': 'precise-updates/havana', + 'precise-updates/havana': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', + 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', + # Juno + 'juno': 'trusty-updates/juno', + 'trusty-juno': 'trusty-updates/juno', + 'trusty-juno/updates': 'trusty-updates/juno', + 'trusty-updates/juno': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'trusty-juno/proposed': 'trusty-proposed/juno', + 'trusty-proposed/juno': 'trusty-proposed/juno', + # Kilo + 'kilo': 'trusty-updates/kilo', + 'trusty-kilo': 'trusty-updates/kilo', + 'trusty-kilo/updates': 'trusty-updates/kilo', + 'trusty-updates/kilo': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'trusty-kilo/proposed': 'trusty-proposed/kilo', + 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', + # Mitaka + 'mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka/updates': 'trusty-updates/mitaka', + 'trusty-updates/mitaka': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', + # Newton + 'newton': 'xenial-updates/newton', + 'xenial-newton': 'xenial-updates/newton', + 'xenial-newton/updates': 'xenial-updates/newton', + 'xenial-updates/newton': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', + 'xenial-newton/proposed': 'xenial-proposed/newton', + 'xenial-proposed/newton': 'xenial-proposed/newton', +} + +APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. +APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + cache = apt_cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_cache(in_memory=True, progress=None): + """Build and return an apt cache.""" + from apt import apt_pkg + apt_pkg.init() + if in_memory: + apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") + return apt_pkg.Cache(progress) + + +def install(packages, options=None, fatal=False): + """Install one or more packages.""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_apt_command(cmd, fatal) + + +def upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages.""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_apt_command(cmd, fatal) + + +def update(fatal=False): + """Update local apt cache.""" + cmd = ['apt-get', 'update'] + _run_apt_command(cmd, fatal) + + +def purge(packages, fatal=False): + """Purge one or more packages.""" + cmd = ['apt-get', '--assume-yes', 'purge'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_apt_command(cmd, fatal) + + +def apt_mark(packages, mark, fatal=False): + """Flag one or more packages using apt-mark.""" + log("Marking {} as {}".format(packages, mark)) + cmd = ['apt-mark', mark] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + + if fatal: + subprocess.check_call(cmd, universal_newlines=True) + else: + subprocess.call(cmd, universal_newlines=True) + + +def apt_hold(packages, fatal=False): + return apt_mark(packages, 'hold', fatal=fatal) + + +def apt_unhold(packages, fatal=False): + return apt_mark(packages, 'unhold', fatal=fatal) + + +def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples:: + + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + 'distro' may be used as a noop + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + """ + if source is None: + log('Source is not present. Skipping') + return + + if (source.startswith('ppa:') or + source.startswith('http') or + source.startswith('deb ') or + source.startswith('cloud-archive:')): + subprocess.check_call(['add-apt-repository', '--yes', source]) + elif source.startswith('cloud:'): + install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + elif source == 'proposed': + release = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass + else: + log("Unknown source: {!r}".format(source)) + + if key: + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile('w+') as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) + else: + # Note that hkp: is in no way a secure protocol. Using a + # GPG key id is pointless from a security POV unless you + # absolutely trust your network and DNS. + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv', + key]) + + +def _run_apt_command(cmd, fatal=False): + """Run an APT command. + + Checks the output and retries if the fatal flag is set + to True. + + :param: cmd: str: The apt command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the apt + # lock was not acquired. + + while result is None or result == APT_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > APT_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire DPKG lock. Will retry in {} seconds." + "".format(APT_NO_LOCK_RETRY_DELAY)) + time.sleep(APT_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) + + +def get_upstream_version(package): + """Determine upstream version based on installed package + + @returns None (if not installed) or the upstream version + """ + import apt_pkg + cache = apt_cache() + try: + pkg = cache[package] + except: + # the package is unknown to the current apt cache. + return None + + if not pkg.current_ver: + # package is known, but no version is currently installed. + return None + + return apt_pkg.upstream_version(pkg.current_ver.ver_str) diff --git a/ceph-mon/hooks/charmhelpers/osplatform.py b/ceph-mon/hooks/charmhelpers/osplatform.py new file mode 100644 index 00000000..ea490bbd --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/osplatform.py @@ -0,0 +1,19 @@ +import platform + + +def get_platform(): + """Return the current OS platform. + + For example: if current os platform is Ubuntu then a string "ubuntu" + will be returned (which is the name of the module). + This string is used to decide which platform module should be imported. + """ + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + if "Ubuntu" in current_platform: + return "ubuntu" + elif "CentOS" in current_platform: + return "centos" + else: + raise RuntimeError("This module is not supported on {}." + .format(current_platform)) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 6ce91dbe..d1d52137 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -220,7 +220,8 @@ def _get_openstack_release(self): self.trusty_icehouse, self.trusty_juno, self.utopic_juno, self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, self.wily_liberty, self.trusty_mitaka, - self.xenial_mitaka) = range(14) + self.xenial_mitaka, self.xenial_newton, + self.yakkety_newton) = range(16) releases = { ('precise', None): self.precise_essex, @@ -236,7 +237,10 @@ def _get_openstack_release(self): ('utopic', None): self.utopic_juno, ('vivid', None): self.vivid_kilo, ('wily', None): self.wily_liberty, - ('xenial', None): self.xenial_mitaka} + ('xenial', None): self.xenial_mitaka, + ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('yakkety', None): self.yakkety_newton, + } return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -254,6 +258,7 @@ def _get_openstack_release_string(self): ('vivid', 'kilo'), ('wily', 'liberty'), ('xenial', 'mitaka'), + ('yakkety', 'newton'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 355c8cbb..24b353ee 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -1044,7 +1044,8 @@ def connect_amqp_by_unit(self, sentry_unit, ssl=False, retry_delay=5, socket_timeout=1) connection = pika.BlockingConnection(parameters) - assert connection.server_properties['product'] == 'RabbitMQ' + assert connection.is_open is True + assert connection.is_closing is False self.log.debug('Connect OK') return connection except Exception as e: diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index e2792e68..eaab94bf 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -36,6 +36,8 @@ 'relation_get', 'related_units', 'local_unit', + 'application_version_set', + 'get_upstream_version', ] NO_PEERS = { @@ -61,18 +63,21 @@ def setUp(self): self.config.side_effect = self.test_config.get self.test_config.set('monitor-count', 3) self.local_unit.return_value = 'ceph-mon1' + self.get_upstream_version.return_value = '10.2.2' @mock.patch.object(hooks, 'get_peer_units') def test_assess_status_no_peers(self, _peer_units): _peer_units.return_value = NO_PEERS hooks.assess_status() self.status_set.assert_called_with('blocked', mock.ANY) + self.application_version_set.assert_called_with('10.2.2') @mock.patch.object(hooks, 'get_peer_units') def test_assess_status_peers_incomplete(self, _peer_units): _peer_units.return_value = ENOUGH_PEERS_INCOMPLETE hooks.assess_status() self.status_set.assert_called_with('waiting', mock.ANY) + self.application_version_set.assert_called_with('10.2.2') @mock.patch.object(hooks, 'get_peer_units') def test_assess_status_peers_complete_active(self, _peer_units): @@ -81,6 +86,7 @@ def test_assess_status_peers_complete_active(self, _peer_units): self.ceph.is_quorum.return_value = True hooks.assess_status() self.status_set.assert_called_with('active', mock.ANY) + self.application_version_set.assert_called_with('10.2.2') @mock.patch.object(hooks, 'get_peer_units') def test_assess_status_peers_complete_down(self, _peer_units): @@ -89,6 +95,7 @@ def test_assess_status_peers_complete_down(self, _peer_units): self.ceph.is_quorum.return_value = False hooks.assess_status() self.status_set.assert_called_with('blocked', mock.ANY) + self.application_version_set.assert_called_with('10.2.2') def test_get_peer_units_no_peers(self): self.relation_ids.return_value = ['mon:1'] From 6b5c12df6319a6004597bd47040adf9aa7ed0f0c Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 21 Sep 2016 09:47:09 +0100 Subject: [PATCH 1204/2699] Add support for application version Juju 2.0 provides support for display of the version of an application deployed by a charm in juju status. Insert the application_version_set function into the existing assess_status function - this gets called after all hook executions, and periodically after that, so any changes in package versions due to normal system updates will also be reflected in the status output. This review also includes a resync of charm-helpers to pickup hookenv support for this feature. Change-Id: If1ec3dcc5025d1a1f7e64f21481412ad630050ea --- ceph-osd/charm-helpers-hooks.yaml | 1 + ceph-osd/hooks/ceph_hooks.py | 6 + .../charmhelpers/contrib/charmsupport/nrpe.py | 42 ++- .../charmhelpers/contrib/openstack/utils.py | 21 +- .../contrib/storage/linux/ceph.py | 6 + ceph-osd/hooks/charmhelpers/core/hookenv.py | 14 + ceph-osd/hooks/charmhelpers/core/host.py | 87 ++--- .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 56 +++ .../charmhelpers/core/host_factory/ubuntu.py | 56 +++ ceph-osd/hooks/charmhelpers/core/kernel.py | 36 +- .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 + .../core/kernel_factory/ubuntu.py | 13 + ceph-osd/hooks/charmhelpers/fetch/__init__.py | 324 ++--------------- ceph-osd/hooks/charmhelpers/fetch/bzrurl.py | 7 +- ceph-osd/hooks/charmhelpers/fetch/centos.py | 171 +++++++++ ceph-osd/hooks/charmhelpers/fetch/giturl.py | 7 +- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 336 ++++++++++++++++++ ceph-osd/hooks/charmhelpers/osplatform.py | 19 + .../contrib/openstack/amulet/deployment.py | 9 +- .../contrib/openstack/amulet/utils.py | 119 ++++++- ceph-osd/unit_tests/test_status.py | 7 + 23 files changed, 963 insertions(+), 391 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/core/host_factory/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/core/host_factory/centos.py create mode 100644 ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py create mode 100644 ceph-osd/hooks/charmhelpers/core/kernel_factory/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/core/kernel_factory/centos.py create mode 100644 ceph-osd/hooks/charmhelpers/core/kernel_factory/ubuntu.py create mode 100644 ceph-osd/hooks/charmhelpers/fetch/centos.py create mode 100644 ceph-osd/hooks/charmhelpers/fetch/ubuntu.py create mode 100644 ceph-osd/hooks/charmhelpers/osplatform.py diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index 24c4f22a..b94ba72c 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -3,6 +3,7 @@ destination: hooks/charmhelpers include: - core - cli + - osplatform - fetch - contrib.python.packages - contrib.storage.linux diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 1d52e373..24c2b4e5 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -40,6 +40,7 @@ status_set, storage_get, storage_list, + application_version_set, ) from charmhelpers.core.host import ( umount, @@ -53,6 +54,7 @@ apt_install, apt_update, filter_installed_packages, + get_upstream_version, ) from charmhelpers.core.sysctl import create as create_sysctl from charmhelpers.core import host @@ -588,9 +590,13 @@ def update_nrpe_config(): nrpe_setup.write() +VERSION_PACKAGE = 'ceph-common' + + def assess_status(): """Assess status of current unit""" # check to see if the unit is paused. + application_version_set(get_upstream_version(VERSION_PACKAGE)) if is_unit_paused_set(): status_set('maintenance', "Paused. Use 'resume' action to resume normal service.") diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 17976fb5..1410512a 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -38,6 +38,7 @@ ) from charmhelpers.core.host import service +from charmhelpers.core import host # This module adds compatibility with the nrpe-external-master and plain nrpe # subordinate charms. To use it in your charm: @@ -108,6 +109,13 @@ # def local_monitors_relation_changed(): # update_nrpe_config() # +# 4.a If your charm is a subordinate charm set primary=False +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE(primary=False) +# # 5. ln -s hooks.py nrpe-external-master-relation-changed # ln -s hooks.py local-monitors-relation-changed @@ -220,9 +228,10 @@ class NRPE(object): nagios_exportdir = '/var/lib/nagios/export' nrpe_confdir = '/etc/nagios/nrpe.d' - def __init__(self, hostname=None): + def __init__(self, hostname=None, primary=True): super(NRPE, self).__init__() self.config = config() + self.primary = primary self.nagios_context = self.config['nagios_context'] if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: self.nagios_servicegroups = self.config['nagios_servicegroups'] @@ -238,6 +247,12 @@ def __init__(self, hostname=None): else: self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) self.checks = [] + # Iff in an nrpe-external-master relation hook, set primary status + relation = relation_ids('nrpe-external-master') + if relation: + log("Setting charm primary status {}".format(primary)) + for rid in relation_ids('nrpe-external-master'): + relation_set(relation_id=rid, relation_settings={'primary': self.primary}) def add_check(self, *args, **kwargs): self.checks.append(Check(*args, **kwargs)) @@ -332,16 +347,25 @@ def add_init_service_checks(nrpe, services, unit_name): :param str unit_name: Unit name to use in check description """ for svc in services: + # Don't add a check for these services from neutron-gateway + if svc in ['ext-port', 'os-charm-phy-nic-mtu']: + next + upstart_init = '/etc/init/%s.conf' % svc sysv_init = '/etc/init.d/%s' % svc - if os.path.exists(upstart_init): - # Don't add a check for these services from neutron-gateway - if svc not in ['ext-port', 'os-charm-phy-nic-mtu']: - nrpe.add_check( - shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_upstart_job %s' % svc - ) + + if host.init_is_systemd(): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_systemd.py %s' % svc + ) + elif os.path.exists(upstart_init): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % svc cron_file = ('*/5 * * * * root ' diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 9d3e3d89..e1cc7687 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -51,7 +51,8 @@ relation_set, service_name, status_set, - hook_name + hook_name, + application_version_set, ) from charmhelpers.contrib.storage.linux.lvm import ( @@ -80,7 +81,12 @@ service_resume, restart_on_change_helper, ) -from charmhelpers.fetch import apt_install, apt_cache, install_remote +from charmhelpers.fetch import ( + apt_install, + apt_cache, + install_remote, + get_upstream_version +) from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.openstack.exceptions import OSContextError @@ -1889,3 +1895,14 @@ def config_flags_parser(config_flags): flags[key.strip(post_strippers)] = value.rstrip(post_strippers) return flags + + +def os_application_version_set(package): + '''Set version of application for Juju 2.0 and later''' + application_version = get_upstream_version(package) + # NOTE(jamespage) if not able to figure out package version, fallback to + # openstack codename version detection. + if not application_version: + application_version_set(os_release(package)) + else: + application_version_set(application_version) diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index beff2703..edb536c7 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -87,6 +87,7 @@ DEFAULT_PGS_PER_OSD_TARGET = 100 DEFAULT_POOL_WEIGHT = 10.0 LEGACY_PG_COUNT = 200 +DEFAULT_MINIMUM_PGS = 2 def validator(value, valid_type, valid_range=None): @@ -266,6 +267,11 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size + # NOTE: ensure a sane minimum number of PGS otherwise we don't get any + # reasonable data distribution in minimal OSD configurations + if num_pg < DEFAULT_MINIMUM_PGS: + num_pg = DEFAULT_MINIMUM_PGS + # The CRUSH algorithm has a slight optimization for placement groups # with powers of 2 so find the nearest power of 2. If the nearest # power of 2 is more than 25% below the original value, the next diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 48b2b9dc..996e81cc 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -843,6 +843,20 @@ def inner_translate_exc2(*args, **kwargs): return inner_translate_exc1 +def application_version_set(version): + """Charm authors may trigger this command from any hook to output what + version of the application is running. This could be a package version, + for instance postgres version 9.5. It could also be a build number or + version control revision identifier, for instance git sha 6fb7ba68. """ + + cmd = ['application-version-set'] + cmd.append(version) + try: + subprocess.check_call(cmd) + except OSError: + log("Application Version: {}".format(version)) + + @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def is_leader(): """Does the current unit hold the juju leadership diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 53068599..0f1b2f35 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -30,13 +30,29 @@ import hashlib import functools import itertools -from contextlib import contextmanager -from collections import OrderedDict - import six +from contextlib import contextmanager +from collections import OrderedDict from .hookenv import log from .fstab import Fstab +from charmhelpers.osplatform import get_platform + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.host_factory.ubuntu import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.host_factory.centos import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import def service_start(service_name): @@ -144,8 +160,11 @@ def service_running(service_name): return False else: # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running 'start/running' - if "start/running" in output: + # returns a consistent string to represent running + # 'start/running' + if ("start/running" in output or + "is running" in output or + "up and running" in output): return True elif os.path.exists(_INIT_D_CONF.format(service_name)): # Check System V scripts init script return codes @@ -153,18 +172,6 @@ def service_running(service_name): return False -def service_available(service_name): - """Determine whether a system service is available""" - try: - subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError as e: - return b'unrecognized service' not in e.output - else: - return True - - SYSTEMD_SYSTEM = '/run/systemd/system' @@ -173,8 +180,9 @@ def init_is_systemd(): return os.path.isdir(SYSTEMD_SYSTEM) -def adduser(username, password=None, shell='/bin/bash', system_user=False, - primary_group=None, secondary_groups=None, uid=None, home_dir=None): +def adduser(username, password=None, shell='/bin/bash', + system_user=False, primary_group=None, + secondary_groups=None, uid=None, home_dir=None): """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -286,17 +294,7 @@ def add_group(group_name, system_group=False, gid=None): log('group with gid {0} already exists!'.format(gid)) except KeyError: log('creating group {0}'.format(group_name)) - cmd = ['addgroup'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('--system') - else: - cmd.extend([ - '--group', - ]) - cmd.append(group_name) - subprocess.check_call(cmd) + add_new_group(group_name, system_group, gid) group_info = grp.getgrnam(group_name) return group_info @@ -541,16 +539,6 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False, return r -def lsb_release(): - """Return /etc/lsb-release in a dict""" - d = {} - with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - def pwgen(length=None): """Generate a random pasword.""" if length is None: @@ -674,25 +662,6 @@ def get_nic_hwaddr(nic): return hwaddr -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports apt_cache function from charmhelpers.fetch if - the pkgcache argument is None. Be sure to add charmhelpers.fetch if - you call this function, or pass an apt_pkg.Cache() instance. - """ - import apt_pkg - if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) - - @contextmanager def chdir(directory): """Change the current working directory to a different directory for a code diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/__init__.py b/ceph-osd/hooks/charmhelpers/core/host_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/centos.py b/ceph-osd/hooks/charmhelpers/core/host_factory/centos.py new file mode 100644 index 00000000..902d469f --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/centos.py @@ -0,0 +1,56 @@ +import subprocess +import yum +import os + + +def service_available(service_name): + # """Determine whether a system service is available.""" + if os.path.isdir('/run/systemd/system'): + cmd = ['systemctl', 'is-enabled', service_name] + else: + cmd = ['service', service_name, 'is-enabled'] + return subprocess.call(cmd) == 0 + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['groupadd'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('-r') + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/os-release in a dict.""" + d = {} + with open('/etc/os-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports YumBase function if the pkgcache argument + is None. + """ + if not pkgcache: + y = yum.YumBase() + packages = y.doPackageLists() + pkgcache = {i.Name: i.version for i in packages['installed']} + pkg = pkgcache[package] + if pkg > revno: + return 1 + if pkg < revno: + return -1 + return 0 diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py new file mode 100644 index 00000000..8c66af55 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -0,0 +1,56 @@ +import subprocess + + +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return b'unrecognized service' not in e.output + else: + return True + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. + """ + import apt_pkg + if not pkgcache: + from charmhelpers.fetch import apt_cache + pkgcache = apt_cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-osd/hooks/charmhelpers/core/kernel.py b/ceph-osd/hooks/charmhelpers/core/kernel.py index b166efec..2d404528 100644 --- a/ceph-osd/hooks/charmhelpers/core/kernel.py +++ b/ceph-osd/hooks/charmhelpers/core/kernel.py @@ -15,15 +15,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -__author__ = "Jorge Niedbalski " +import re +import subprocess +from charmhelpers.osplatform import get_platform from charmhelpers.core.hookenv import ( log, INFO ) -from subprocess import check_call, check_output -import re +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.kernel_factory.ubuntu import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.kernel_factory.centos import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import + +__author__ = "Jorge Niedbalski " def modprobe(module, persist=True): @@ -32,11 +45,9 @@ def modprobe(module, persist=True): log('Loading kernel module %s' % module, level=INFO) - check_call(cmd) + subprocess.check_call(cmd) if persist: - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module) + persistent_modprobe(module) def rmmod(module, force=False): @@ -46,21 +57,16 @@ def rmmod(module, force=False): cmd.append('-f') cmd.append(module) log('Removing kernel module %s' % module, level=INFO) - return check_call(cmd) + return subprocess.check_call(cmd) def lsmod(): """Shows what kernel modules are currently loaded""" - return check_output(['lsmod'], - universal_newlines=True) + return subprocess.check_output(['lsmod'], + universal_newlines=True) def is_module_loaded(module): """Checks if a kernel module is already loaded""" matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) return len(matches) > 0 - - -def update_initramfs(version='all'): - """Updates an initramfs image""" - return check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-osd/hooks/charmhelpers/core/kernel_factory/__init__.py b/ceph-osd/hooks/charmhelpers/core/kernel_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/hooks/charmhelpers/core/kernel_factory/centos.py b/ceph-osd/hooks/charmhelpers/core/kernel_factory/centos.py new file mode 100644 index 00000000..1c402c11 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/kernel_factory/centos.py @@ -0,0 +1,17 @@ +import subprocess +import os + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + if not os.path.exists('/etc/rc.modules'): + open('/etc/rc.modules', 'a') + os.chmod('/etc/rc.modules', 111) + with open('/etc/rc.modules', 'r+') as modules: + if module not in modules.read(): + modules.write('modprobe %s\n' % module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["dracut", "-f", version]) diff --git a/ceph-osd/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/kernel_factory/ubuntu.py new file mode 100644 index 00000000..21559642 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/core/kernel_factory/ubuntu.py @@ -0,0 +1,13 @@ +import subprocess + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 52eaf824..ec5e0fe9 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -13,18 +13,12 @@ # limitations under the License. import importlib -from tempfile import NamedTemporaryFile -import time +from charmhelpers.osplatform import get_platform from yaml import safe_load -from charmhelpers.core.host import ( - lsb_release -) -import subprocess from charmhelpers.core.hookenv import ( config, log, ) -import os import six if six.PY3: @@ -33,87 +27,6 @@ from urlparse import urlparse, urlunparse -CLOUD_ARCHIVE = """# Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" -PROPOSED_POCKET = """# Proposed -deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted -""" -CLOUD_ARCHIVE_POCKETS = { - # Folsom - 'folsom': 'precise-updates/folsom', - 'precise-folsom': 'precise-updates/folsom', - 'precise-folsom/updates': 'precise-updates/folsom', - 'precise-updates/folsom': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'precise-folsom/proposed': 'precise-proposed/folsom', - 'precise-proposed/folsom': 'precise-proposed/folsom', - # Grizzly - 'grizzly': 'precise-updates/grizzly', - 'precise-grizzly': 'precise-updates/grizzly', - 'precise-grizzly/updates': 'precise-updates/grizzly', - 'precise-updates/grizzly': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly', - 'precise-grizzly/proposed': 'precise-proposed/grizzly', - 'precise-proposed/grizzly': 'precise-proposed/grizzly', - # Havana - 'havana': 'precise-updates/havana', - 'precise-havana': 'precise-updates/havana', - 'precise-havana/updates': 'precise-updates/havana', - 'precise-updates/havana': 'precise-updates/havana', - 'havana/proposed': 'precise-proposed/havana', - 'precise-havana/proposed': 'precise-proposed/havana', - 'precise-proposed/havana': 'precise-proposed/havana', - # Icehouse - 'icehouse': 'precise-updates/icehouse', - 'precise-icehouse': 'precise-updates/icehouse', - 'precise-icehouse/updates': 'precise-updates/icehouse', - 'precise-updates/icehouse': 'precise-updates/icehouse', - 'icehouse/proposed': 'precise-proposed/icehouse', - 'precise-icehouse/proposed': 'precise-proposed/icehouse', - 'precise-proposed/icehouse': 'precise-proposed/icehouse', - # Juno - 'juno': 'trusty-updates/juno', - 'trusty-juno': 'trusty-updates/juno', - 'trusty-juno/updates': 'trusty-updates/juno', - 'trusty-updates/juno': 'trusty-updates/juno', - 'juno/proposed': 'trusty-proposed/juno', - 'trusty-juno/proposed': 'trusty-proposed/juno', - 'trusty-proposed/juno': 'trusty-proposed/juno', - # Kilo - 'kilo': 'trusty-updates/kilo', - 'trusty-kilo': 'trusty-updates/kilo', - 'trusty-kilo/updates': 'trusty-updates/kilo', - 'trusty-updates/kilo': 'trusty-updates/kilo', - 'kilo/proposed': 'trusty-proposed/kilo', - 'trusty-kilo/proposed': 'trusty-proposed/kilo', - 'trusty-proposed/kilo': 'trusty-proposed/kilo', - # Liberty - 'liberty': 'trusty-updates/liberty', - 'trusty-liberty': 'trusty-updates/liberty', - 'trusty-liberty/updates': 'trusty-updates/liberty', - 'trusty-updates/liberty': 'trusty-updates/liberty', - 'liberty/proposed': 'trusty-proposed/liberty', - 'trusty-liberty/proposed': 'trusty-proposed/liberty', - 'trusty-proposed/liberty': 'trusty-proposed/liberty', - # Mitaka - 'mitaka': 'trusty-updates/mitaka', - 'trusty-mitaka': 'trusty-updates/mitaka', - 'trusty-mitaka/updates': 'trusty-updates/mitaka', - 'trusty-updates/mitaka': 'trusty-updates/mitaka', - 'mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', - # Newton - 'newton': 'xenial-updates/newton', - 'xenial-newton': 'xenial-updates/newton', - 'xenial-newton/updates': 'xenial-updates/newton', - 'xenial-updates/newton': 'xenial-updates/newton', - 'newton/proposed': 'xenial-proposed/newton', - 'xenial-newton/proposed': 'xenial-proposed/newton', - 'xenial-proposed/newton': 'xenial-proposed/newton', -} - # The order of this list is very important. Handlers should be listed in from # least- to most-specific URL matching. FETCH_HANDLERS = ( @@ -122,10 +35,6 @@ 'charmhelpers.fetch.giturl.GitUrlFetchHandler', ) -APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. -APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. -APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. - class SourceConfigError(Exception): pass @@ -163,180 +72,38 @@ def base_url(self, url): return urlunparse(parts) -def filter_installed_packages(packages): - """Returns a list of packages that require installation""" - cache = apt_cache() - _pkgs = [] - for package in packages: - try: - p = cache[package] - p.current_ver or _pkgs.append(package) - except KeyError: - log('Package {} has no installation candidate.'.format(package), - level='WARNING') - _pkgs.append(package) - return _pkgs - - -def apt_cache(in_memory=True, progress=None): - """Build and return an apt cache""" - from apt import apt_pkg - apt_pkg.init() - if in_memory: - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache(progress) - - -def apt_install(packages, options=None, fatal=False): - """Install one or more packages""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - cmd.append('install') - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - _run_apt_command(cmd, fatal) - - -def apt_upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - if dist: - cmd.append('dist-upgrade') - else: - cmd.append('upgrade') - log("Upgrading with options: {}".format(options)) - _run_apt_command(cmd, fatal) - - -def apt_update(fatal=False): - """Update local apt cache""" - cmd = ['apt-get', 'update'] - _run_apt_command(cmd, fatal) - - -def apt_purge(packages, fatal=False): - """Purge one or more packages""" - cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Purging {}".format(packages)) - _run_apt_command(cmd, fatal) - - -def apt_mark(packages, mark, fatal=False): - """Flag one or more packages using apt-mark""" - log("Marking {} as {}".format(packages, mark)) - cmd = ['apt-mark', mark] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - - if fatal: - subprocess.check_call(cmd, universal_newlines=True) - else: - subprocess.call(cmd, universal_newlines=True) - - -def apt_hold(packages, fatal=False): - return apt_mark(packages, 'hold', fatal=fatal) - - -def apt_unhold(packages, fatal=False): - return apt_mark(packages, 'unhold', fatal=fatal) - +__platform__ = get_platform() +module = "charmhelpers.fetch.%s" % __platform__ +fetch = importlib.import_module(module) -def add_source(source, key=None): - """Add a package source to this system. +filter_installed_packages = fetch.filter_installed_packages +install = fetch.install +upgrade = fetch.upgrade +update = fetch.update +purge = fetch.purge +add_source = fetch.add_source - @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples:: - - ppa:charmers/example - deb https://stub:key@private.example.com/ubuntu trusty main - - In addition: - 'proposed:' may be used to enable the standard 'proposed' - pocket for the release. - 'cloud:' may be used to activate official cloud archive pockets, - such as 'cloud:icehouse' - 'distro' may be used as a noop - - @param key: A key to be added to the system's APT keyring and used - to verify the signatures on packages. Ideally, this should be an - ASCII format GPG public key including the block headers. A GPG key - id may also be used, but be aware that only insecure protocols are - available to retrieve the actual public key from a public keyserver - placing your Juju environment at risk. ppa and cloud archive keys - are securely added automtically, so sould not be provided. - """ - if source is None: - log('Source is not present. Skipping') - return - - if (source.startswith('ppa:') or - source.startswith('http') or - source.startswith('deb ') or - source.startswith('cloud-archive:')): - subprocess.check_call(['add-apt-repository', '--yes', source]) - elif source.startswith('cloud:'): - apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - if pocket not in CLOUD_ARCHIVE_POCKETS: - raise SourceConfigError( - 'Unsupported cloud: source option %s' % - pocket) - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) - elif source == 'proposed': - release = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: - apt.write(PROPOSED_POCKET.format(release)) - elif source == 'distro': - pass - else: - log("Unknown source: {!r}".format(source)) - - if key: - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile('w+') as key_file: - key_file.write(key) - key_file.flush() - key_file.seek(0) - subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) - else: - # Note that hkp: is in no way a secure protocol. Using a - # GPG key id is pointless from a security POV unless you - # absolutely trust your network and DNS. - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) +if __platform__ == "ubuntu": + apt_cache = fetch.apt_cache + apt_install = fetch.install + apt_update = fetch.update + apt_upgrade = fetch.upgrade + apt_purge = fetch.purge + apt_mark = fetch.apt_mark + apt_hold = fetch.apt_hold + apt_unhold = fetch.apt_unhold + get_upstream_version = fetch.get_upstream_version +elif __platform__ == "centos": + yum_search = fetch.yum_search def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): - """ - Configure multiple sources from charm configuration. + """Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. Sources and their + The fragment needs to be included as a string. Sources and their corresponding keys are of the types supported by add_source(). Example config: @@ -368,12 +135,11 @@ def configure_sources(update=False, for source, key in zip(sources, keys): add_source(source, key) if update: - apt_update(fatal=True) + fetch.update(fatal=True) def install_remote(source, *args, **kwargs): - """ - Install a file tree from a remote source + """Install a file tree from a remote source. The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] @@ -406,6 +172,7 @@ def install_remote(source, *args, **kwargs): def install_from_config(config_var_name): + """Install a file from config.""" charm_config = config() source = charm_config[config_var_name] return install_remote(source) @@ -428,40 +195,3 @@ def plugins(fetch_handlers=None): log("FetchHandler {} not found, skipping plugin".format( handler_name)) return plugin_list - - -def _run_apt_command(cmd, fatal=False): - """ - Run an APT command, checking output and retrying if the fatal flag is set - to True. - - :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. - """ - env = os.environ.copy() - - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if fatal: - retry_count = 0 - result = None - - # If the command is considered "fatal", we need to retry if the apt - # lock was not acquired. - - while result is None or result == APT_NO_LOCK: - try: - result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > APT_NO_LOCK_RETRY_COUNT: - raise - result = e.returncode - log("Couldn't acquire DPKG lock. Will retry in {} seconds." - "".format(APT_NO_LOCK_RETRY_DELAY)) - time.sleep(APT_NO_LOCK_RETRY_DELAY) - - else: - subprocess.call(cmd, env=env) diff --git a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py index b3404d85..07cd0293 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py @@ -18,19 +18,20 @@ BaseFetchHandler, UnhandledSource, filter_installed_packages, - apt_install, + install, ) from charmhelpers.core.host import mkdir if filter_installed_packages(['bzr']) != []: - apt_install(['bzr']) + install(['bzr']) if filter_installed_packages(['bzr']) != []: raise NotImplementedError('Unable to install bzr') class BzrUrlFetchHandler(BaseFetchHandler): - """Handler for bazaar branches via generic and lp URLs""" + """Handler for bazaar branches via generic and lp URLs.""" + def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('bzr+ssh', 'lp', ''): diff --git a/ceph-osd/hooks/charmhelpers/fetch/centos.py b/ceph-osd/hooks/charmhelpers/fetch/centos.py new file mode 100644 index 00000000..604bbfb5 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/fetch/centos.py @@ -0,0 +1,171 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +import os +import time +import six +import yum + +from tempfile import NamedTemporaryFile +from charmhelpers.core.hookenv import log + +YUM_NO_LOCK = 1 # The return code for "couldn't acquire lock" in YUM. +YUM_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +YUM_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + yb = yum.YumBase() + package_list = yb.doPackageLists() + temp_cache = {p.base_package_name: 1 for p in package_list['installed']} + + _pkgs = [p for p in packages if not temp_cache.get(p, False)] + return _pkgs + + +def install(packages, options=None, fatal=False): + """Install one or more packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_yum_command(cmd, fatal) + + +def upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_yum_command(cmd, fatal) + + +def update(fatal=False): + """Update local yum cache.""" + cmd = ['yum', '--assumeyes', 'update'] + log("Update with fatal: {}".format(fatal)) + _run_yum_command(cmd, fatal) + + +def purge(packages, fatal=False): + """Purge one or more packages.""" + cmd = ['yum', '--assumeyes', 'remove'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_yum_command(cmd, fatal) + + +def yum_search(packages): + """Search for a package.""" + output = {} + cmd = ['yum', 'search'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Searching for {}".format(packages)) + result = subprocess.check_output(cmd) + for package in list(packages): + output[package] = package in result + return output + + +def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL with a rpm package + + @param key: A key to be added to the system's keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. + """ + if source is None: + log('Source is not present. Skipping') + return + + if source.startswith('http'): + directory = '/etc/yum.repos.d/' + for filename in os.listdir(directory): + with open(directory + filename, 'r') as rpm_file: + if source in rpm_file.read(): + break + else: + log("Add source: {!r}".format(source)) + # write in the charms.repo + with open(directory + 'Charms.repo', 'a') as rpm_file: + rpm_file.write('[%s]\n' % source[7:].replace('/', '_')) + rpm_file.write('name=%s\n' % source[7:]) + rpm_file.write('baseurl=%s\n\n' % source) + else: + log("Unknown source: {!r}".format(source)) + + if key: + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile('w+') as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['rpm', '--import', key_file]) + else: + subprocess.check_call(['rpm', '--import', key]) + + +def _run_yum_command(cmd, fatal=False): + """Run an YUM command. + + Checks the output and retry if the fatal flag is set to True. + + :param: cmd: str: The yum command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the yum + # lock was not acquired. + + while result is None or result == YUM_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > YUM_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire YUM lock. Will retry in {} seconds." + "".format(YUM_NO_LOCK_RETRY_DELAY)) + time.sleep(YUM_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) diff --git a/ceph-osd/hooks/charmhelpers/fetch/giturl.py b/ceph-osd/hooks/charmhelpers/fetch/giturl.py index f708d1ee..4cf21bc2 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/giturl.py @@ -18,17 +18,18 @@ BaseFetchHandler, UnhandledSource, filter_installed_packages, - apt_install, + install, ) if filter_installed_packages(['git']) != []: - apt_install(['git']) + install(['git']) if filter_installed_packages(['git']) != []: raise NotImplementedError('Unable to install git') class GitUrlFetchHandler(BaseFetchHandler): - """Handler for git branches via generic and github URLs""" + """Handler for git branches via generic and github URLs.""" + def can_handle(self, source): url_parts = self.parse_url(source) # TODO (mattyw) no support for ssh git@ yet diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py new file mode 100644 index 00000000..fce496b2 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -0,0 +1,336 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import six +import time +import subprocess + +from tempfile import NamedTemporaryFile +from charmhelpers.core.host import ( + lsb_release +) +from charmhelpers.core.hookenv import log +from charmhelpers.fetch import SourceConfigError + +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" + +PROPOSED_POCKET = """# Proposed +deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted +""" + +CLOUD_ARCHIVE_POCKETS = { + # Folsom + 'folsom': 'precise-updates/folsom', + 'precise-folsom': 'precise-updates/folsom', + 'precise-folsom/updates': 'precise-updates/folsom', + 'precise-updates/folsom': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'precise-folsom/proposed': 'precise-proposed/folsom', + 'precise-proposed/folsom': 'precise-proposed/folsom', + # Grizzly + 'grizzly': 'precise-updates/grizzly', + 'precise-grizzly': 'precise-updates/grizzly', + 'precise-grizzly/updates': 'precise-updates/grizzly', + 'precise-updates/grizzly': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'precise-grizzly/proposed': 'precise-proposed/grizzly', + 'precise-proposed/grizzly': 'precise-proposed/grizzly', + # Havana + 'havana': 'precise-updates/havana', + 'precise-havana': 'precise-updates/havana', + 'precise-havana/updates': 'precise-updates/havana', + 'precise-updates/havana': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', + 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', + # Juno + 'juno': 'trusty-updates/juno', + 'trusty-juno': 'trusty-updates/juno', + 'trusty-juno/updates': 'trusty-updates/juno', + 'trusty-updates/juno': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'trusty-juno/proposed': 'trusty-proposed/juno', + 'trusty-proposed/juno': 'trusty-proposed/juno', + # Kilo + 'kilo': 'trusty-updates/kilo', + 'trusty-kilo': 'trusty-updates/kilo', + 'trusty-kilo/updates': 'trusty-updates/kilo', + 'trusty-updates/kilo': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'trusty-kilo/proposed': 'trusty-proposed/kilo', + 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', + # Mitaka + 'mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka/updates': 'trusty-updates/mitaka', + 'trusty-updates/mitaka': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', + # Newton + 'newton': 'xenial-updates/newton', + 'xenial-newton': 'xenial-updates/newton', + 'xenial-newton/updates': 'xenial-updates/newton', + 'xenial-updates/newton': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', + 'xenial-newton/proposed': 'xenial-proposed/newton', + 'xenial-proposed/newton': 'xenial-proposed/newton', +} + +APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. +APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + cache = apt_cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_cache(in_memory=True, progress=None): + """Build and return an apt cache.""" + from apt import apt_pkg + apt_pkg.init() + if in_memory: + apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") + return apt_pkg.Cache(progress) + + +def install(packages, options=None, fatal=False): + """Install one or more packages.""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_apt_command(cmd, fatal) + + +def upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages.""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_apt_command(cmd, fatal) + + +def update(fatal=False): + """Update local apt cache.""" + cmd = ['apt-get', 'update'] + _run_apt_command(cmd, fatal) + + +def purge(packages, fatal=False): + """Purge one or more packages.""" + cmd = ['apt-get', '--assume-yes', 'purge'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_apt_command(cmd, fatal) + + +def apt_mark(packages, mark, fatal=False): + """Flag one or more packages using apt-mark.""" + log("Marking {} as {}".format(packages, mark)) + cmd = ['apt-mark', mark] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + + if fatal: + subprocess.check_call(cmd, universal_newlines=True) + else: + subprocess.call(cmd, universal_newlines=True) + + +def apt_hold(packages, fatal=False): + return apt_mark(packages, 'hold', fatal=fatal) + + +def apt_unhold(packages, fatal=False): + return apt_mark(packages, 'unhold', fatal=fatal) + + +def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples:: + + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + 'distro' may be used as a noop + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + """ + if source is None: + log('Source is not present. Skipping') + return + + if (source.startswith('ppa:') or + source.startswith('http') or + source.startswith('deb ') or + source.startswith('cloud-archive:')): + subprocess.check_call(['add-apt-repository', '--yes', source]) + elif source.startswith('cloud:'): + install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + elif source == 'proposed': + release = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass + else: + log("Unknown source: {!r}".format(source)) + + if key: + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile('w+') as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) + else: + # Note that hkp: is in no way a secure protocol. Using a + # GPG key id is pointless from a security POV unless you + # absolutely trust your network and DNS. + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv', + key]) + + +def _run_apt_command(cmd, fatal=False): + """Run an APT command. + + Checks the output and retries if the fatal flag is set + to True. + + :param: cmd: str: The apt command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the apt + # lock was not acquired. + + while result is None or result == APT_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > APT_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire DPKG lock. Will retry in {} seconds." + "".format(APT_NO_LOCK_RETRY_DELAY)) + time.sleep(APT_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) + + +def get_upstream_version(package): + """Determine upstream version based on installed package + + @returns None (if not installed) or the upstream version + """ + import apt_pkg + cache = apt_cache() + try: + pkg = cache[package] + except: + # the package is unknown to the current apt cache. + return None + + if not pkg.current_ver: + # package is known, but no version is currently installed. + return None + + return apt_pkg.upstream_version(pkg.current_ver.ver_str) diff --git a/ceph-osd/hooks/charmhelpers/osplatform.py b/ceph-osd/hooks/charmhelpers/osplatform.py new file mode 100644 index 00000000..ea490bbd --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/osplatform.py @@ -0,0 +1,19 @@ +import platform + + +def get_platform(): + """Return the current OS platform. + + For example: if current os platform is Ubuntu then a string "ubuntu" + will be returned (which is the name of the module). + This string is used to decide which platform module should be imported. + """ + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + if "Ubuntu" in current_platform: + return "ubuntu" + elif "CentOS" in current_platform: + return "centos" + else: + raise RuntimeError("This module is not supported on {}." + .format(current_platform)) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 6ce91dbe..d1d52137 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -220,7 +220,8 @@ def _get_openstack_release(self): self.trusty_icehouse, self.trusty_juno, self.utopic_juno, self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, self.wily_liberty, self.trusty_mitaka, - self.xenial_mitaka) = range(14) + self.xenial_mitaka, self.xenial_newton, + self.yakkety_newton) = range(16) releases = { ('precise', None): self.precise_essex, @@ -236,7 +237,10 @@ def _get_openstack_release(self): ('utopic', None): self.utopic_juno, ('vivid', None): self.vivid_kilo, ('wily', None): self.wily_liberty, - ('xenial', None): self.xenial_mitaka} + ('xenial', None): self.xenial_mitaka, + ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('yakkety', None): self.yakkety_newton, + } return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -254,6 +258,7 @@ def _get_openstack_release_string(self): ('vivid', 'kilo'), ('wily', 'liberty'), ('xenial', 'mitaka'), + ('yakkety', 'newton'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index 8040b570..24b353ee 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -83,6 +83,56 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, if not found: return 'endpoint not found' + def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate keystone v3 endpoint data. + + Validate the v3 endpoint data which has changed from v2. The + ports are used to find the matching endpoint. + + The new v3 endpoint data looks like: + + ['}, + region=RegionOne, + region_id=RegionOne, + service_id=17f842a0dc084b928e476fafe67e4095, + url=http://10.5.6.5:9312>, + '}, + region=RegionOne, + region_id=RegionOne, + service_id=72fc8736fb41435e8b3584205bb2cfa3, + url=http://10.5.6.6:35357/v3>, + ... ] + """ + self.log.debug('Validating v3 endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = [] + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if ((admin_port in ep.url and ep.interface == 'admin') or + (internal_port in ep.url and ep.interface == 'internal') or + (public_port in ep.url and ep.interface == 'public')): + found.append(ep.interface) + # note we ignore the links member. + actual = {'id': ep.id, + 'region': ep.region, + 'region_id': ep.region_id, + 'interface': self.not_null, + 'url': ep.url, + 'service_id': ep.service_id, } + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if len(found) != 3: + return 'Unexpected number of endpoints found' + def validate_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. @@ -100,6 +150,72 @@ def validate_svc_catalog_endpoint_data(self, expected, actual): return "endpoint {} does not exist".format(k) return ret + def validate_v3_svc_catalog_endpoint_data(self, expected, actual): + """Validate the keystone v3 catalog endpoint data. + + Validate a list of dictinaries that make up the keystone v3 service + catalogue. + + It is in the form of: + + + {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:35357/v3'}, + {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}, + {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}], + u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'f629388955bc407f8b11d8b7ca168086', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9312'}]} + + Note, that an added complication is that the order of admin, public, + internal against 'interface' in each region. + + Thus, the function sorts the expected and actual lists using the + interface key as a sort key, prior to the comparison. + """ + self.log.debug('Validating v3 service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + l_expected = sorted(v, key=lambda x: x['interface']) + l_actual = sorted(actual[k], key=lambda x: x['interface']) + if len(l_actual) != len(l_expected): + return ("endpoint {} has differing number of interfaces " + " - expected({}), actual({})" + .format(k, len(l_expected), len(l_actual))) + for i_expected, i_actual in zip(l_expected, l_actual): + self.log.debug("checking interface {}" + .format(i_expected['interface'])) + ret = self._validate_dict_data(i_expected, i_actual) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + def validate_tenant_data(self, expected, actual): """Validate tenant data. @@ -928,7 +1044,8 @@ def connect_amqp_by_unit(self, sentry_unit, ssl=False, retry_delay=5, socket_timeout=1) connection = pika.BlockingConnection(parameters) - assert connection.server_properties['product'] == 'RabbitMQ' + assert connection.is_open is True + assert connection.is_closing is False self.log.debug('Connect OK') return connection except Exception as e: diff --git a/ceph-osd/unit_tests/test_status.py b/ceph-osd/unit_tests/test_status.py index c5669a10..be13b420 100644 --- a/ceph-osd/unit_tests/test_status.py +++ b/ceph-osd/unit_tests/test_status.py @@ -30,6 +30,8 @@ 'relation_get', 'related_units', 'get_conf', + 'application_version_set', + 'get_upstream_version', ] CEPH_MONS = [ @@ -44,11 +46,13 @@ class ServiceStatusTestCase(test_utils.CharmTestCase): def setUp(self): super(ServiceStatusTestCase, self).setUp(hooks, TO_PATCH) self.config.side_effect = self.test_config.get + self.get_upstream_version.return_value = '10.2.2' def test_assess_status_no_monitor_relation(self): self.relation_ids.return_value = [] hooks.assess_status() self.status_set.assert_called_with('blocked', mock.ANY) + self.application_version_set.assert_called_with('10.2.2') def test_assess_status_monitor_relation_incomplete(self): self.relation_ids.return_value = ['mon:1'] @@ -56,6 +60,7 @@ def test_assess_status_monitor_relation_incomplete(self): self.get_conf.return_value = None hooks.assess_status() self.status_set.assert_called_with('waiting', mock.ANY) + self.application_version_set.assert_called_with('10.2.2') def test_assess_status_monitor_complete_no_disks(self): self.relation_ids.return_value = ['mon:1'] @@ -64,6 +69,7 @@ def test_assess_status_monitor_complete_no_disks(self): self.ceph.get_running_osds.return_value = [] hooks.assess_status() self.status_set.assert_called_with('blocked', mock.ANY) + self.application_version_set.assert_called_with('10.2.2') def test_assess_status_monitor_complete_disks(self): self.relation_ids.return_value = ['mon:1'] @@ -73,3 +79,4 @@ def test_assess_status_monitor_complete_disks(self): '67890'] hooks.assess_status() self.status_set.assert_called_with('active', mock.ANY) + self.application_version_set.assert_called_with('10.2.2') From f1c8bd40930b0c8ca3b98ab26f2492ba90e352eb Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 21 Sep 2016 10:15:24 +0100 Subject: [PATCH 1205/2699] Add support for application version Juju 2.0 provides support for display of the version of an application deployed by a charm in juju status. Insert the application_version_set function into the existing assess_status function - this gets called after all hook executions, and periodically after that, so any changes in package versions due to normal system updates will also be reflected in the status output. This review also includes a resync of charm-helpers to pickup hookenv support for this feature. Change-Id: Ia68d71fa7847faf18b36c68efff9955399a38a45 --- ceph-radosgw/charm-helpers-hooks.yaml | 1 + .../charmhelpers/contrib/charmsupport/nrpe.py | 42 ++- .../contrib/openstack/amulet/deployment.py | 9 +- .../contrib/openstack/amulet/utils.py | 119 ++++++- .../charmhelpers/contrib/openstack/neutron.py | 4 + .../charmhelpers/contrib/openstack/utils.py | 21 +- .../contrib/storage/linux/ceph.py | 6 + .../hooks/charmhelpers/core/hookenv.py | 14 + ceph-radosgw/hooks/charmhelpers/core/host.py | 87 ++--- .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 56 +++ .../charmhelpers/core/host_factory/ubuntu.py | 56 +++ .../hooks/charmhelpers/core/kernel.py | 36 +- .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 + .../core/kernel_factory/ubuntu.py | 13 + .../hooks/charmhelpers/fetch/__init__.py | 324 ++--------------- .../hooks/charmhelpers/fetch/bzrurl.py | 7 +- .../hooks/charmhelpers/fetch/centos.py | 171 +++++++++ .../hooks/charmhelpers/fetch/giturl.py | 7 +- .../hooks/charmhelpers/fetch/ubuntu.py | 336 ++++++++++++++++++ ceph-radosgw/hooks/charmhelpers/osplatform.py | 19 + ceph-radosgw/hooks/utils.py | 5 + .../contrib/openstack/amulet/deployment.py | 9 +- .../contrib/openstack/amulet/utils.py | 119 ++++++- .../unit_tests/test_ceph_radosgw_utils.py | 9 +- 26 files changed, 1092 insertions(+), 395 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/core/host_factory/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/host_factory/centos.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/kernel_factory/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/kernel_factory/centos.py create mode 100644 ceph-radosgw/hooks/charmhelpers/core/kernel_factory/ubuntu.py create mode 100644 ceph-radosgw/hooks/charmhelpers/fetch/centos.py create mode 100644 ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py create mode 100644 ceph-radosgw/hooks/charmhelpers/osplatform.py diff --git a/ceph-radosgw/charm-helpers-hooks.yaml b/ceph-radosgw/charm-helpers-hooks.yaml index 25fcc42b..0f259d6a 100644 --- a/ceph-radosgw/charm-helpers-hooks.yaml +++ b/ceph-radosgw/charm-helpers-hooks.yaml @@ -3,6 +3,7 @@ destination: hooks/charmhelpers include: - core - cli + - osplatform - fetch - contrib.python.packages - contrib.storage.linux diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 17976fb5..1410512a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -38,6 +38,7 @@ ) from charmhelpers.core.host import service +from charmhelpers.core import host # This module adds compatibility with the nrpe-external-master and plain nrpe # subordinate charms. To use it in your charm: @@ -108,6 +109,13 @@ # def local_monitors_relation_changed(): # update_nrpe_config() # +# 4.a If your charm is a subordinate charm set primary=False +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE(primary=False) +# # 5. ln -s hooks.py nrpe-external-master-relation-changed # ln -s hooks.py local-monitors-relation-changed @@ -220,9 +228,10 @@ class NRPE(object): nagios_exportdir = '/var/lib/nagios/export' nrpe_confdir = '/etc/nagios/nrpe.d' - def __init__(self, hostname=None): + def __init__(self, hostname=None, primary=True): super(NRPE, self).__init__() self.config = config() + self.primary = primary self.nagios_context = self.config['nagios_context'] if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: self.nagios_servicegroups = self.config['nagios_servicegroups'] @@ -238,6 +247,12 @@ def __init__(self, hostname=None): else: self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) self.checks = [] + # Iff in an nrpe-external-master relation hook, set primary status + relation = relation_ids('nrpe-external-master') + if relation: + log("Setting charm primary status {}".format(primary)) + for rid in relation_ids('nrpe-external-master'): + relation_set(relation_id=rid, relation_settings={'primary': self.primary}) def add_check(self, *args, **kwargs): self.checks.append(Check(*args, **kwargs)) @@ -332,16 +347,25 @@ def add_init_service_checks(nrpe, services, unit_name): :param str unit_name: Unit name to use in check description """ for svc in services: + # Don't add a check for these services from neutron-gateway + if svc in ['ext-port', 'os-charm-phy-nic-mtu']: + next + upstart_init = '/etc/init/%s.conf' % svc sysv_init = '/etc/init.d/%s' % svc - if os.path.exists(upstart_init): - # Don't add a check for these services from neutron-gateway - if svc not in ['ext-port', 'os-charm-phy-nic-mtu']: - nrpe.add_check( - shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_upstart_job %s' % svc - ) + + if host.init_is_systemd(): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_systemd.py %s' % svc + ) + elif os.path.exists(upstart_init): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % svc cron_file = ('*/5 * * * * root ' diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 6ce91dbe..d1d52137 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -220,7 +220,8 @@ def _get_openstack_release(self): self.trusty_icehouse, self.trusty_juno, self.utopic_juno, self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, self.wily_liberty, self.trusty_mitaka, - self.xenial_mitaka) = range(14) + self.xenial_mitaka, self.xenial_newton, + self.yakkety_newton) = range(16) releases = { ('precise', None): self.precise_essex, @@ -236,7 +237,10 @@ def _get_openstack_release(self): ('utopic', None): self.utopic_juno, ('vivid', None): self.vivid_kilo, ('wily', None): self.wily_liberty, - ('xenial', None): self.xenial_mitaka} + ('xenial', None): self.xenial_mitaka, + ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('yakkety', None): self.yakkety_newton, + } return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -254,6 +258,7 @@ def _get_openstack_release_string(self): ('vivid', 'kilo'), ('wily', 'liberty'), ('xenial', 'mitaka'), + ('yakkety', 'newton'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 8040b570..24b353ee 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -83,6 +83,56 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, if not found: return 'endpoint not found' + def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate keystone v3 endpoint data. + + Validate the v3 endpoint data which has changed from v2. The + ports are used to find the matching endpoint. + + The new v3 endpoint data looks like: + + ['}, + region=RegionOne, + region_id=RegionOne, + service_id=17f842a0dc084b928e476fafe67e4095, + url=http://10.5.6.5:9312>, + '}, + region=RegionOne, + region_id=RegionOne, + service_id=72fc8736fb41435e8b3584205bb2cfa3, + url=http://10.5.6.6:35357/v3>, + ... ] + """ + self.log.debug('Validating v3 endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = [] + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if ((admin_port in ep.url and ep.interface == 'admin') or + (internal_port in ep.url and ep.interface == 'internal') or + (public_port in ep.url and ep.interface == 'public')): + found.append(ep.interface) + # note we ignore the links member. + actual = {'id': ep.id, + 'region': ep.region, + 'region_id': ep.region_id, + 'interface': self.not_null, + 'url': ep.url, + 'service_id': ep.service_id, } + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if len(found) != 3: + return 'Unexpected number of endpoints found' + def validate_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. @@ -100,6 +150,72 @@ def validate_svc_catalog_endpoint_data(self, expected, actual): return "endpoint {} does not exist".format(k) return ret + def validate_v3_svc_catalog_endpoint_data(self, expected, actual): + """Validate the keystone v3 catalog endpoint data. + + Validate a list of dictinaries that make up the keystone v3 service + catalogue. + + It is in the form of: + + + {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:35357/v3'}, + {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}, + {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}], + u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'f629388955bc407f8b11d8b7ca168086', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9312'}]} + + Note, that an added complication is that the order of admin, public, + internal against 'interface' in each region. + + Thus, the function sorts the expected and actual lists using the + interface key as a sort key, prior to the comparison. + """ + self.log.debug('Validating v3 service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + l_expected = sorted(v, key=lambda x: x['interface']) + l_actual = sorted(actual[k], key=lambda x: x['interface']) + if len(l_actual) != len(l_expected): + return ("endpoint {} has differing number of interfaces " + " - expected({}), actual({})" + .format(k, len(l_expected), len(l_actual))) + for i_expected, i_actual in zip(l_expected, l_actual): + self.log.debug("checking interface {}" + .format(i_expected['interface'])) + ret = self._validate_dict_data(i_expected, i_actual) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + def validate_tenant_data(self, expected, actual): """Validate tenant data. @@ -928,7 +1044,8 @@ def connect_amqp_by_unit(self, sentry_unit, ssl=False, retry_delay=5, socket_timeout=1) connection = pika.BlockingConnection(parameters) - assert connection.server_properties['product'] == 'RabbitMQ' + assert connection.is_open is True + assert connection.is_closing is False self.log.debug('Connect OK') return connection except Exception as e: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index 03427b49..d1510dd3 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -245,6 +245,10 @@ def neutron_plugins(): 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2') plugins['plumgrid']['server_packages'].remove( 'neutron-plugin-plumgrid') + if release >= 'mitaka': + plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') + plugins['nsx']['server_packages'].append('python-vmware-nsx') + plugins['nsx']['config'] = '/etc/neutron/nsx.ini' return plugins diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 9d3e3d89..e1cc7687 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -51,7 +51,8 @@ relation_set, service_name, status_set, - hook_name + hook_name, + application_version_set, ) from charmhelpers.contrib.storage.linux.lvm import ( @@ -80,7 +81,12 @@ service_resume, restart_on_change_helper, ) -from charmhelpers.fetch import apt_install, apt_cache, install_remote +from charmhelpers.fetch import ( + apt_install, + apt_cache, + install_remote, + get_upstream_version +) from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.openstack.exceptions import OSContextError @@ -1889,3 +1895,14 @@ def config_flags_parser(config_flags): flags[key.strip(post_strippers)] = value.rstrip(post_strippers) return flags + + +def os_application_version_set(package): + '''Set version of application for Juju 2.0 and later''' + application_version = get_upstream_version(package) + # NOTE(jamespage) if not able to figure out package version, fallback to + # openstack codename version detection. + if not application_version: + application_version_set(os_release(package)) + else: + application_version_set(application_version) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index beff2703..edb536c7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -87,6 +87,7 @@ DEFAULT_PGS_PER_OSD_TARGET = 100 DEFAULT_POOL_WEIGHT = 10.0 LEGACY_PG_COUNT = 200 +DEFAULT_MINIMUM_PGS = 2 def validator(value, valid_type, valid_range=None): @@ -266,6 +267,11 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size + # NOTE: ensure a sane minimum number of PGS otherwise we don't get any + # reasonable data distribution in minimal OSD configurations + if num_pg < DEFAULT_MINIMUM_PGS: + num_pg = DEFAULT_MINIMUM_PGS + # The CRUSH algorithm has a slight optimization for placement groups # with powers of 2 so find the nearest power of 2. If the nearest # power of 2 is more than 25% below the original value, the next diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 48b2b9dc..996e81cc 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -843,6 +843,20 @@ def inner_translate_exc2(*args, **kwargs): return inner_translate_exc1 +def application_version_set(version): + """Charm authors may trigger this command from any hook to output what + version of the application is running. This could be a package version, + for instance postgres version 9.5. It could also be a build number or + version control revision identifier, for instance git sha 6fb7ba68. """ + + cmd = ['application-version-set'] + cmd.append(version) + try: + subprocess.check_call(cmd) + except OSError: + log("Application Version: {}".format(version)) + + @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def is_leader(): """Does the current unit hold the juju leadership diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 53068599..0f1b2f35 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -30,13 +30,29 @@ import hashlib import functools import itertools -from contextlib import contextmanager -from collections import OrderedDict - import six +from contextlib import contextmanager +from collections import OrderedDict from .hookenv import log from .fstab import Fstab +from charmhelpers.osplatform import get_platform + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.host_factory.ubuntu import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.host_factory.centos import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import def service_start(service_name): @@ -144,8 +160,11 @@ def service_running(service_name): return False else: # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running 'start/running' - if "start/running" in output: + # returns a consistent string to represent running + # 'start/running' + if ("start/running" in output or + "is running" in output or + "up and running" in output): return True elif os.path.exists(_INIT_D_CONF.format(service_name)): # Check System V scripts init script return codes @@ -153,18 +172,6 @@ def service_running(service_name): return False -def service_available(service_name): - """Determine whether a system service is available""" - try: - subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError as e: - return b'unrecognized service' not in e.output - else: - return True - - SYSTEMD_SYSTEM = '/run/systemd/system' @@ -173,8 +180,9 @@ def init_is_systemd(): return os.path.isdir(SYSTEMD_SYSTEM) -def adduser(username, password=None, shell='/bin/bash', system_user=False, - primary_group=None, secondary_groups=None, uid=None, home_dir=None): +def adduser(username, password=None, shell='/bin/bash', + system_user=False, primary_group=None, + secondary_groups=None, uid=None, home_dir=None): """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -286,17 +294,7 @@ def add_group(group_name, system_group=False, gid=None): log('group with gid {0} already exists!'.format(gid)) except KeyError: log('creating group {0}'.format(group_name)) - cmd = ['addgroup'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('--system') - else: - cmd.extend([ - '--group', - ]) - cmd.append(group_name) - subprocess.check_call(cmd) + add_new_group(group_name, system_group, gid) group_info = grp.getgrnam(group_name) return group_info @@ -541,16 +539,6 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False, return r -def lsb_release(): - """Return /etc/lsb-release in a dict""" - d = {} - with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - def pwgen(length=None): """Generate a random pasword.""" if length is None: @@ -674,25 +662,6 @@ def get_nic_hwaddr(nic): return hwaddr -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports apt_cache function from charmhelpers.fetch if - the pkgcache argument is None. Be sure to add charmhelpers.fetch if - you call this function, or pass an apt_pkg.Cache() instance. - """ - import apt_pkg - if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) - - @contextmanager def chdir(directory): """Change the current working directory to a different directory for a code diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/__init__.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/centos.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/centos.py new file mode 100644 index 00000000..902d469f --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/centos.py @@ -0,0 +1,56 @@ +import subprocess +import yum +import os + + +def service_available(service_name): + # """Determine whether a system service is available.""" + if os.path.isdir('/run/systemd/system'): + cmd = ['systemctl', 'is-enabled', service_name] + else: + cmd = ['service', service_name, 'is-enabled'] + return subprocess.call(cmd) == 0 + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['groupadd'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('-r') + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/os-release in a dict.""" + d = {} + with open('/etc/os-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports YumBase function if the pkgcache argument + is None. + """ + if not pkgcache: + y = yum.YumBase() + packages = y.doPackageLists() + pkgcache = {i.Name: i.version for i in packages['installed']} + pkg = pkgcache[package] + if pkg > revno: + return 1 + if pkg < revno: + return -1 + return 0 diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py new file mode 100644 index 00000000..8c66af55 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -0,0 +1,56 @@ +import subprocess + + +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return b'unrecognized service' not in e.output + else: + return True + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. + """ + import apt_pkg + if not pkgcache: + from charmhelpers.fetch import apt_cache + pkgcache = apt_cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-radosgw/hooks/charmhelpers/core/kernel.py b/ceph-radosgw/hooks/charmhelpers/core/kernel.py index b166efec..2d404528 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/kernel.py +++ b/ceph-radosgw/hooks/charmhelpers/core/kernel.py @@ -15,15 +15,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -__author__ = "Jorge Niedbalski " +import re +import subprocess +from charmhelpers.osplatform import get_platform from charmhelpers.core.hookenv import ( log, INFO ) -from subprocess import check_call, check_output -import re +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.kernel_factory.ubuntu import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.kernel_factory.centos import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import + +__author__ = "Jorge Niedbalski " def modprobe(module, persist=True): @@ -32,11 +45,9 @@ def modprobe(module, persist=True): log('Loading kernel module %s' % module, level=INFO) - check_call(cmd) + subprocess.check_call(cmd) if persist: - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module) + persistent_modprobe(module) def rmmod(module, force=False): @@ -46,21 +57,16 @@ def rmmod(module, force=False): cmd.append('-f') cmd.append(module) log('Removing kernel module %s' % module, level=INFO) - return check_call(cmd) + return subprocess.check_call(cmd) def lsmod(): """Shows what kernel modules are currently loaded""" - return check_output(['lsmod'], - universal_newlines=True) + return subprocess.check_output(['lsmod'], + universal_newlines=True) def is_module_loaded(module): """Checks if a kernel module is already loaded""" matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) return len(matches) > 0 - - -def update_initramfs(version='all'): - """Updates an initramfs image""" - return check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-radosgw/hooks/charmhelpers/core/kernel_factory/__init__.py b/ceph-radosgw/hooks/charmhelpers/core/kernel_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/hooks/charmhelpers/core/kernel_factory/centos.py b/ceph-radosgw/hooks/charmhelpers/core/kernel_factory/centos.py new file mode 100644 index 00000000..1c402c11 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/kernel_factory/centos.py @@ -0,0 +1,17 @@ +import subprocess +import os + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + if not os.path.exists('/etc/rc.modules'): + open('/etc/rc.modules', 'a') + os.chmod('/etc/rc.modules', 111) + with open('/etc/rc.modules', 'r+') as modules: + if module not in modules.read(): + modules.write('modprobe %s\n' % module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["dracut", "-f", version]) diff --git a/ceph-radosgw/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/kernel_factory/ubuntu.py new file mode 100644 index 00000000..21559642 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/core/kernel_factory/ubuntu.py @@ -0,0 +1,13 @@ +import subprocess + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 52eaf824..ec5e0fe9 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -13,18 +13,12 @@ # limitations under the License. import importlib -from tempfile import NamedTemporaryFile -import time +from charmhelpers.osplatform import get_platform from yaml import safe_load -from charmhelpers.core.host import ( - lsb_release -) -import subprocess from charmhelpers.core.hookenv import ( config, log, ) -import os import six if six.PY3: @@ -33,87 +27,6 @@ from urlparse import urlparse, urlunparse -CLOUD_ARCHIVE = """# Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" -PROPOSED_POCKET = """# Proposed -deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted -""" -CLOUD_ARCHIVE_POCKETS = { - # Folsom - 'folsom': 'precise-updates/folsom', - 'precise-folsom': 'precise-updates/folsom', - 'precise-folsom/updates': 'precise-updates/folsom', - 'precise-updates/folsom': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'precise-folsom/proposed': 'precise-proposed/folsom', - 'precise-proposed/folsom': 'precise-proposed/folsom', - # Grizzly - 'grizzly': 'precise-updates/grizzly', - 'precise-grizzly': 'precise-updates/grizzly', - 'precise-grizzly/updates': 'precise-updates/grizzly', - 'precise-updates/grizzly': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly', - 'precise-grizzly/proposed': 'precise-proposed/grizzly', - 'precise-proposed/grizzly': 'precise-proposed/grizzly', - # Havana - 'havana': 'precise-updates/havana', - 'precise-havana': 'precise-updates/havana', - 'precise-havana/updates': 'precise-updates/havana', - 'precise-updates/havana': 'precise-updates/havana', - 'havana/proposed': 'precise-proposed/havana', - 'precise-havana/proposed': 'precise-proposed/havana', - 'precise-proposed/havana': 'precise-proposed/havana', - # Icehouse - 'icehouse': 'precise-updates/icehouse', - 'precise-icehouse': 'precise-updates/icehouse', - 'precise-icehouse/updates': 'precise-updates/icehouse', - 'precise-updates/icehouse': 'precise-updates/icehouse', - 'icehouse/proposed': 'precise-proposed/icehouse', - 'precise-icehouse/proposed': 'precise-proposed/icehouse', - 'precise-proposed/icehouse': 'precise-proposed/icehouse', - # Juno - 'juno': 'trusty-updates/juno', - 'trusty-juno': 'trusty-updates/juno', - 'trusty-juno/updates': 'trusty-updates/juno', - 'trusty-updates/juno': 'trusty-updates/juno', - 'juno/proposed': 'trusty-proposed/juno', - 'trusty-juno/proposed': 'trusty-proposed/juno', - 'trusty-proposed/juno': 'trusty-proposed/juno', - # Kilo - 'kilo': 'trusty-updates/kilo', - 'trusty-kilo': 'trusty-updates/kilo', - 'trusty-kilo/updates': 'trusty-updates/kilo', - 'trusty-updates/kilo': 'trusty-updates/kilo', - 'kilo/proposed': 'trusty-proposed/kilo', - 'trusty-kilo/proposed': 'trusty-proposed/kilo', - 'trusty-proposed/kilo': 'trusty-proposed/kilo', - # Liberty - 'liberty': 'trusty-updates/liberty', - 'trusty-liberty': 'trusty-updates/liberty', - 'trusty-liberty/updates': 'trusty-updates/liberty', - 'trusty-updates/liberty': 'trusty-updates/liberty', - 'liberty/proposed': 'trusty-proposed/liberty', - 'trusty-liberty/proposed': 'trusty-proposed/liberty', - 'trusty-proposed/liberty': 'trusty-proposed/liberty', - # Mitaka - 'mitaka': 'trusty-updates/mitaka', - 'trusty-mitaka': 'trusty-updates/mitaka', - 'trusty-mitaka/updates': 'trusty-updates/mitaka', - 'trusty-updates/mitaka': 'trusty-updates/mitaka', - 'mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', - # Newton - 'newton': 'xenial-updates/newton', - 'xenial-newton': 'xenial-updates/newton', - 'xenial-newton/updates': 'xenial-updates/newton', - 'xenial-updates/newton': 'xenial-updates/newton', - 'newton/proposed': 'xenial-proposed/newton', - 'xenial-newton/proposed': 'xenial-proposed/newton', - 'xenial-proposed/newton': 'xenial-proposed/newton', -} - # The order of this list is very important. Handlers should be listed in from # least- to most-specific URL matching. FETCH_HANDLERS = ( @@ -122,10 +35,6 @@ 'charmhelpers.fetch.giturl.GitUrlFetchHandler', ) -APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. -APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. -APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. - class SourceConfigError(Exception): pass @@ -163,180 +72,38 @@ def base_url(self, url): return urlunparse(parts) -def filter_installed_packages(packages): - """Returns a list of packages that require installation""" - cache = apt_cache() - _pkgs = [] - for package in packages: - try: - p = cache[package] - p.current_ver or _pkgs.append(package) - except KeyError: - log('Package {} has no installation candidate.'.format(package), - level='WARNING') - _pkgs.append(package) - return _pkgs - - -def apt_cache(in_memory=True, progress=None): - """Build and return an apt cache""" - from apt import apt_pkg - apt_pkg.init() - if in_memory: - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache(progress) - - -def apt_install(packages, options=None, fatal=False): - """Install one or more packages""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - cmd.append('install') - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - _run_apt_command(cmd, fatal) - - -def apt_upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - if dist: - cmd.append('dist-upgrade') - else: - cmd.append('upgrade') - log("Upgrading with options: {}".format(options)) - _run_apt_command(cmd, fatal) - - -def apt_update(fatal=False): - """Update local apt cache""" - cmd = ['apt-get', 'update'] - _run_apt_command(cmd, fatal) - - -def apt_purge(packages, fatal=False): - """Purge one or more packages""" - cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Purging {}".format(packages)) - _run_apt_command(cmd, fatal) - - -def apt_mark(packages, mark, fatal=False): - """Flag one or more packages using apt-mark""" - log("Marking {} as {}".format(packages, mark)) - cmd = ['apt-mark', mark] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - - if fatal: - subprocess.check_call(cmd, universal_newlines=True) - else: - subprocess.call(cmd, universal_newlines=True) - - -def apt_hold(packages, fatal=False): - return apt_mark(packages, 'hold', fatal=fatal) - - -def apt_unhold(packages, fatal=False): - return apt_mark(packages, 'unhold', fatal=fatal) - +__platform__ = get_platform() +module = "charmhelpers.fetch.%s" % __platform__ +fetch = importlib.import_module(module) -def add_source(source, key=None): - """Add a package source to this system. +filter_installed_packages = fetch.filter_installed_packages +install = fetch.install +upgrade = fetch.upgrade +update = fetch.update +purge = fetch.purge +add_source = fetch.add_source - @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples:: - - ppa:charmers/example - deb https://stub:key@private.example.com/ubuntu trusty main - - In addition: - 'proposed:' may be used to enable the standard 'proposed' - pocket for the release. - 'cloud:' may be used to activate official cloud archive pockets, - such as 'cloud:icehouse' - 'distro' may be used as a noop - - @param key: A key to be added to the system's APT keyring and used - to verify the signatures on packages. Ideally, this should be an - ASCII format GPG public key including the block headers. A GPG key - id may also be used, but be aware that only insecure protocols are - available to retrieve the actual public key from a public keyserver - placing your Juju environment at risk. ppa and cloud archive keys - are securely added automtically, so sould not be provided. - """ - if source is None: - log('Source is not present. Skipping') - return - - if (source.startswith('ppa:') or - source.startswith('http') or - source.startswith('deb ') or - source.startswith('cloud-archive:')): - subprocess.check_call(['add-apt-repository', '--yes', source]) - elif source.startswith('cloud:'): - apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - if pocket not in CLOUD_ARCHIVE_POCKETS: - raise SourceConfigError( - 'Unsupported cloud: source option %s' % - pocket) - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) - elif source == 'proposed': - release = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: - apt.write(PROPOSED_POCKET.format(release)) - elif source == 'distro': - pass - else: - log("Unknown source: {!r}".format(source)) - - if key: - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile('w+') as key_file: - key_file.write(key) - key_file.flush() - key_file.seek(0) - subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) - else: - # Note that hkp: is in no way a secure protocol. Using a - # GPG key id is pointless from a security POV unless you - # absolutely trust your network and DNS. - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) +if __platform__ == "ubuntu": + apt_cache = fetch.apt_cache + apt_install = fetch.install + apt_update = fetch.update + apt_upgrade = fetch.upgrade + apt_purge = fetch.purge + apt_mark = fetch.apt_mark + apt_hold = fetch.apt_hold + apt_unhold = fetch.apt_unhold + get_upstream_version = fetch.get_upstream_version +elif __platform__ == "centos": + yum_search = fetch.yum_search def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): - """ - Configure multiple sources from charm configuration. + """Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. Sources and their + The fragment needs to be included as a string. Sources and their corresponding keys are of the types supported by add_source(). Example config: @@ -368,12 +135,11 @@ def configure_sources(update=False, for source, key in zip(sources, keys): add_source(source, key) if update: - apt_update(fatal=True) + fetch.update(fatal=True) def install_remote(source, *args, **kwargs): - """ - Install a file tree from a remote source + """Install a file tree from a remote source. The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] @@ -406,6 +172,7 @@ def install_remote(source, *args, **kwargs): def install_from_config(config_var_name): + """Install a file from config.""" charm_config = config() source = charm_config[config_var_name] return install_remote(source) @@ -428,40 +195,3 @@ def plugins(fetch_handlers=None): log("FetchHandler {} not found, skipping plugin".format( handler_name)) return plugin_list - - -def _run_apt_command(cmd, fatal=False): - """ - Run an APT command, checking output and retrying if the fatal flag is set - to True. - - :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. - """ - env = os.environ.copy() - - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if fatal: - retry_count = 0 - result = None - - # If the command is considered "fatal", we need to retry if the apt - # lock was not acquired. - - while result is None or result == APT_NO_LOCK: - try: - result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > APT_NO_LOCK_RETRY_COUNT: - raise - result = e.returncode - log("Couldn't acquire DPKG lock. Will retry in {} seconds." - "".format(APT_NO_LOCK_RETRY_DELAY)) - time.sleep(APT_NO_LOCK_RETRY_DELAY) - - else: - subprocess.call(cmd, env=env) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py index b3404d85..07cd0293 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py @@ -18,19 +18,20 @@ BaseFetchHandler, UnhandledSource, filter_installed_packages, - apt_install, + install, ) from charmhelpers.core.host import mkdir if filter_installed_packages(['bzr']) != []: - apt_install(['bzr']) + install(['bzr']) if filter_installed_packages(['bzr']) != []: raise NotImplementedError('Unable to install bzr') class BzrUrlFetchHandler(BaseFetchHandler): - """Handler for bazaar branches via generic and lp URLs""" + """Handler for bazaar branches via generic and lp URLs.""" + def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('bzr+ssh', 'lp', ''): diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/centos.py b/ceph-radosgw/hooks/charmhelpers/fetch/centos.py new file mode 100644 index 00000000..604bbfb5 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/fetch/centos.py @@ -0,0 +1,171 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +import os +import time +import six +import yum + +from tempfile import NamedTemporaryFile +from charmhelpers.core.hookenv import log + +YUM_NO_LOCK = 1 # The return code for "couldn't acquire lock" in YUM. +YUM_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +YUM_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + yb = yum.YumBase() + package_list = yb.doPackageLists() + temp_cache = {p.base_package_name: 1 for p in package_list['installed']} + + _pkgs = [p for p in packages if not temp_cache.get(p, False)] + return _pkgs + + +def install(packages, options=None, fatal=False): + """Install one or more packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_yum_command(cmd, fatal) + + +def upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_yum_command(cmd, fatal) + + +def update(fatal=False): + """Update local yum cache.""" + cmd = ['yum', '--assumeyes', 'update'] + log("Update with fatal: {}".format(fatal)) + _run_yum_command(cmd, fatal) + + +def purge(packages, fatal=False): + """Purge one or more packages.""" + cmd = ['yum', '--assumeyes', 'remove'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_yum_command(cmd, fatal) + + +def yum_search(packages): + """Search for a package.""" + output = {} + cmd = ['yum', 'search'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Searching for {}".format(packages)) + result = subprocess.check_output(cmd) + for package in list(packages): + output[package] = package in result + return output + + +def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL with a rpm package + + @param key: A key to be added to the system's keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. + """ + if source is None: + log('Source is not present. Skipping') + return + + if source.startswith('http'): + directory = '/etc/yum.repos.d/' + for filename in os.listdir(directory): + with open(directory + filename, 'r') as rpm_file: + if source in rpm_file.read(): + break + else: + log("Add source: {!r}".format(source)) + # write in the charms.repo + with open(directory + 'Charms.repo', 'a') as rpm_file: + rpm_file.write('[%s]\n' % source[7:].replace('/', '_')) + rpm_file.write('name=%s\n' % source[7:]) + rpm_file.write('baseurl=%s\n\n' % source) + else: + log("Unknown source: {!r}".format(source)) + + if key: + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile('w+') as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['rpm', '--import', key_file]) + else: + subprocess.check_call(['rpm', '--import', key]) + + +def _run_yum_command(cmd, fatal=False): + """Run an YUM command. + + Checks the output and retry if the fatal flag is set to True. + + :param: cmd: str: The yum command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the yum + # lock was not acquired. + + while result is None or result == YUM_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > YUM_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire YUM lock. Will retry in {} seconds." + "".format(YUM_NO_LOCK_RETRY_DELAY)) + time.sleep(YUM_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py index f708d1ee..4cf21bc2 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py @@ -18,17 +18,18 @@ BaseFetchHandler, UnhandledSource, filter_installed_packages, - apt_install, + install, ) if filter_installed_packages(['git']) != []: - apt_install(['git']) + install(['git']) if filter_installed_packages(['git']) != []: raise NotImplementedError('Unable to install git') class GitUrlFetchHandler(BaseFetchHandler): - """Handler for git branches via generic and github URLs""" + """Handler for git branches via generic and github URLs.""" + def can_handle(self, source): url_parts = self.parse_url(source) # TODO (mattyw) no support for ssh git@ yet diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py new file mode 100644 index 00000000..fce496b2 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -0,0 +1,336 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import six +import time +import subprocess + +from tempfile import NamedTemporaryFile +from charmhelpers.core.host import ( + lsb_release +) +from charmhelpers.core.hookenv import log +from charmhelpers.fetch import SourceConfigError + +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" + +PROPOSED_POCKET = """# Proposed +deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted +""" + +CLOUD_ARCHIVE_POCKETS = { + # Folsom + 'folsom': 'precise-updates/folsom', + 'precise-folsom': 'precise-updates/folsom', + 'precise-folsom/updates': 'precise-updates/folsom', + 'precise-updates/folsom': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'precise-folsom/proposed': 'precise-proposed/folsom', + 'precise-proposed/folsom': 'precise-proposed/folsom', + # Grizzly + 'grizzly': 'precise-updates/grizzly', + 'precise-grizzly': 'precise-updates/grizzly', + 'precise-grizzly/updates': 'precise-updates/grizzly', + 'precise-updates/grizzly': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'precise-grizzly/proposed': 'precise-proposed/grizzly', + 'precise-proposed/grizzly': 'precise-proposed/grizzly', + # Havana + 'havana': 'precise-updates/havana', + 'precise-havana': 'precise-updates/havana', + 'precise-havana/updates': 'precise-updates/havana', + 'precise-updates/havana': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', + 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', + # Juno + 'juno': 'trusty-updates/juno', + 'trusty-juno': 'trusty-updates/juno', + 'trusty-juno/updates': 'trusty-updates/juno', + 'trusty-updates/juno': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'trusty-juno/proposed': 'trusty-proposed/juno', + 'trusty-proposed/juno': 'trusty-proposed/juno', + # Kilo + 'kilo': 'trusty-updates/kilo', + 'trusty-kilo': 'trusty-updates/kilo', + 'trusty-kilo/updates': 'trusty-updates/kilo', + 'trusty-updates/kilo': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'trusty-kilo/proposed': 'trusty-proposed/kilo', + 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', + # Mitaka + 'mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka/updates': 'trusty-updates/mitaka', + 'trusty-updates/mitaka': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', + # Newton + 'newton': 'xenial-updates/newton', + 'xenial-newton': 'xenial-updates/newton', + 'xenial-newton/updates': 'xenial-updates/newton', + 'xenial-updates/newton': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', + 'xenial-newton/proposed': 'xenial-proposed/newton', + 'xenial-proposed/newton': 'xenial-proposed/newton', +} + +APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. +APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + cache = apt_cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_cache(in_memory=True, progress=None): + """Build and return an apt cache.""" + from apt import apt_pkg + apt_pkg.init() + if in_memory: + apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") + return apt_pkg.Cache(progress) + + +def install(packages, options=None, fatal=False): + """Install one or more packages.""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_apt_command(cmd, fatal) + + +def upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages.""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_apt_command(cmd, fatal) + + +def update(fatal=False): + """Update local apt cache.""" + cmd = ['apt-get', 'update'] + _run_apt_command(cmd, fatal) + + +def purge(packages, fatal=False): + """Purge one or more packages.""" + cmd = ['apt-get', '--assume-yes', 'purge'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_apt_command(cmd, fatal) + + +def apt_mark(packages, mark, fatal=False): + """Flag one or more packages using apt-mark.""" + log("Marking {} as {}".format(packages, mark)) + cmd = ['apt-mark', mark] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + + if fatal: + subprocess.check_call(cmd, universal_newlines=True) + else: + subprocess.call(cmd, universal_newlines=True) + + +def apt_hold(packages, fatal=False): + return apt_mark(packages, 'hold', fatal=fatal) + + +def apt_unhold(packages, fatal=False): + return apt_mark(packages, 'unhold', fatal=fatal) + + +def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples:: + + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + 'distro' may be used as a noop + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + """ + if source is None: + log('Source is not present. Skipping') + return + + if (source.startswith('ppa:') or + source.startswith('http') or + source.startswith('deb ') or + source.startswith('cloud-archive:')): + subprocess.check_call(['add-apt-repository', '--yes', source]) + elif source.startswith('cloud:'): + install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + elif source == 'proposed': + release = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass + else: + log("Unknown source: {!r}".format(source)) + + if key: + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile('w+') as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) + else: + # Note that hkp: is in no way a secure protocol. Using a + # GPG key id is pointless from a security POV unless you + # absolutely trust your network and DNS. + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv', + key]) + + +def _run_apt_command(cmd, fatal=False): + """Run an APT command. + + Checks the output and retries if the fatal flag is set + to True. + + :param: cmd: str: The apt command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the apt + # lock was not acquired. + + while result is None or result == APT_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > APT_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire DPKG lock. Will retry in {} seconds." + "".format(APT_NO_LOCK_RETRY_DELAY)) + time.sleep(APT_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) + + +def get_upstream_version(package): + """Determine upstream version based on installed package + + @returns None (if not installed) or the upstream version + """ + import apt_pkg + cache = apt_cache() + try: + pkg = cache[package] + except: + # the package is unknown to the current apt cache. + return None + + if not pkg.current_ver: + # package is known, but no version is currently installed. + return None + + return apt_pkg.upstream_version(pkg.current_ver.ver_str) diff --git a/ceph-radosgw/hooks/charmhelpers/osplatform.py b/ceph-radosgw/hooks/charmhelpers/osplatform.py new file mode 100644 index 00000000..ea490bbd --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/osplatform.py @@ -0,0 +1,19 @@ +import platform + + +def get_platform(): + """Return the current OS platform. + + For example: if current os platform is Ubuntu then a string "ubuntu" + will be returned (which is the name of the module). + This string is used to decide which platform module should be imported. + """ + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + if "Ubuntu" in current_platform: + return "ubuntu" + elif "CentOS" in current_platform: + return "centos" + else: + raise RuntimeError("This module is not supported on {}." + .format(current_platform)) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index b87dd0ca..b1b3e5b3 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -29,6 +29,7 @@ INFO, relation_get, relation_ids, + application_version_set, ) from charmhelpers.contrib.network.ip import ( format_ipv6_addr, @@ -55,6 +56,7 @@ apt_update, add_source, filter_installed_packages, + get_upstream_version, ) # NOTE: some packages are installed by the charm so may not be available @@ -98,6 +100,8 @@ APACHE_24_CONF = '/etc/apache2/sites-available/rgw.conf' APACHE_PORTS_CONF = '/etc/apache2/ports.conf' +VERSION_PACKAGE = 'radosgw' + BASE_RESOURCE_MAP = OrderedDict([ (HAPROXY_CONF, { 'contexts': [context.HAProxyContext(singlenode_mode=True), @@ -254,6 +258,7 @@ def assess_status(configs): @returns None - this function is executed for its side-effect """ assess_status_func(configs)() + application_version_set(get_upstream_version(VERSION_PACKAGE)) def assess_status_func(configs): diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 6ce91dbe..d1d52137 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -220,7 +220,8 @@ def _get_openstack_release(self): self.trusty_icehouse, self.trusty_juno, self.utopic_juno, self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, self.wily_liberty, self.trusty_mitaka, - self.xenial_mitaka) = range(14) + self.xenial_mitaka, self.xenial_newton, + self.yakkety_newton) = range(16) releases = { ('precise', None): self.precise_essex, @@ -236,7 +237,10 @@ def _get_openstack_release(self): ('utopic', None): self.utopic_juno, ('vivid', None): self.vivid_kilo, ('wily', None): self.wily_liberty, - ('xenial', None): self.xenial_mitaka} + ('xenial', None): self.xenial_mitaka, + ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('yakkety', None): self.yakkety_newton, + } return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -254,6 +258,7 @@ def _get_openstack_release_string(self): ('vivid', 'kilo'), ('wily', 'liberty'), ('xenial', 'mitaka'), + ('yakkety', 'newton'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 8040b570..24b353ee 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -83,6 +83,56 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, if not found: return 'endpoint not found' + def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate keystone v3 endpoint data. + + Validate the v3 endpoint data which has changed from v2. The + ports are used to find the matching endpoint. + + The new v3 endpoint data looks like: + + ['}, + region=RegionOne, + region_id=RegionOne, + service_id=17f842a0dc084b928e476fafe67e4095, + url=http://10.5.6.5:9312>, + '}, + region=RegionOne, + region_id=RegionOne, + service_id=72fc8736fb41435e8b3584205bb2cfa3, + url=http://10.5.6.6:35357/v3>, + ... ] + """ + self.log.debug('Validating v3 endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = [] + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if ((admin_port in ep.url and ep.interface == 'admin') or + (internal_port in ep.url and ep.interface == 'internal') or + (public_port in ep.url and ep.interface == 'public')): + found.append(ep.interface) + # note we ignore the links member. + actual = {'id': ep.id, + 'region': ep.region, + 'region_id': ep.region_id, + 'interface': self.not_null, + 'url': ep.url, + 'service_id': ep.service_id, } + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if len(found) != 3: + return 'Unexpected number of endpoints found' + def validate_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. @@ -100,6 +150,72 @@ def validate_svc_catalog_endpoint_data(self, expected, actual): return "endpoint {} does not exist".format(k) return ret + def validate_v3_svc_catalog_endpoint_data(self, expected, actual): + """Validate the keystone v3 catalog endpoint data. + + Validate a list of dictinaries that make up the keystone v3 service + catalogue. + + It is in the form of: + + + {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:35357/v3'}, + {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}, + {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}], + u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'f629388955bc407f8b11d8b7ca168086', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9312'}]} + + Note, that an added complication is that the order of admin, public, + internal against 'interface' in each region. + + Thus, the function sorts the expected and actual lists using the + interface key as a sort key, prior to the comparison. + """ + self.log.debug('Validating v3 service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + l_expected = sorted(v, key=lambda x: x['interface']) + l_actual = sorted(actual[k], key=lambda x: x['interface']) + if len(l_actual) != len(l_expected): + return ("endpoint {} has differing number of interfaces " + " - expected({}), actual({})" + .format(k, len(l_expected), len(l_actual))) + for i_expected, i_actual in zip(l_expected, l_actual): + self.log.debug("checking interface {}" + .format(i_expected['interface'])) + ret = self._validate_dict_data(i_expected, i_actual) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + def validate_tenant_data(self, expected, actual): """Validate tenant data. @@ -928,7 +1044,8 @@ def connect_amqp_by_unit(self, sentry_unit, ssl=False, retry_delay=5, socket_timeout=1) connection = pika.BlockingConnection(parameters) - assert connection.server_properties['product'] == 'RabbitMQ' + assert connection.is_open is True + assert connection.is_closing is False self.log.debug('Connect OK') return connection except Exception as e: diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index 205b648f..f2a5b8ff 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -26,12 +26,15 @@ from test_utils import CharmTestCase TO_PATCH = [ + 'application_version_set', + 'get_upstream_version', ] class CephRadosGWUtilTests(CharmTestCase): def setUp(self): - super(CephRadosGWUtilTests, self).setUp(None, TO_PATCH) + super(CephRadosGWUtilTests, self).setUp(utils, TO_PATCH) + self.get_upstream_version.return_value = '10.2.2' def test_assess_status(self): with patch.object(utils, 'assess_status_func') as asf: @@ -40,6 +43,10 @@ def test_assess_status(self): utils.assess_status('test-config') asf.assert_called_once_with('test-config') callee.assert_called_once_with() + self.get_upstream_version.assert_called_with( + utils.VERSION_PACKAGE + ) + self.application_version_set.assert_called_with('10.2.2') @patch.object(utils, 'get_optional_interfaces') @patch.object(utils, 'check_optional_relations') From 888ceeb62298f305cf529e64f9dc2b4740cfc362 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Tue, 6 Sep 2016 15:17:46 -0700 Subject: [PATCH 1206/2699] Move upgrade code to shared lib Moving the ceph mon upgrade code over to the ceph shared library. This will make it easier to make patches and have them be applied to all 3 charms at once. Change-Id: I203bc9859745a3a6c5c61b13d9ac18afaba17bc1 --- ceph-mon/hooks/ceph_hooks.py | 165 +-------- ceph-mon/lib/ceph/__init__.py | 450 ++++++++++++++++++++--- ceph-mon/lib/ceph/ceph_broker.py | 148 +++++++- ceph-mon/unit_tests/test_upgrade.py | 34 ++ ceph-mon/unit_tests/test_upgrade_roll.py | 177 --------- 5 files changed, 586 insertions(+), 388 deletions(-) create mode 100644 ceph-mon/unit_tests/test_upgrade.py delete mode 100644 ceph-mon/unit_tests/test_upgrade_roll.py diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 1f30b3e1..1925e634 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -15,12 +15,9 @@ # limitations under the License. import os -import random -import socket import subprocess import sys import uuid -import time sys.path.append('lib') import ceph @@ -52,10 +49,7 @@ mkdir, write_file, rsync, - cmp_pkgrevno, - service_stop, - service_start, - chownr) + cmp_pkgrevno) from charmhelpers.fetch import ( apt_install, apt_update, @@ -72,11 +66,7 @@ from charmhelpers.core.sysctl import create as create_sysctl from charmhelpers.core.templating import render from charmhelpers.contrib.storage.linux.ceph import ( - CephConfContext, - monitor_key_set, - monitor_key_exists, - monitor_key_get, - get_mon_map) + CephConfContext) from utils import ( get_networks, get_public_addr, @@ -130,7 +120,8 @@ def check_for_upgrade(): if new_version == upgrade_paths[old_version]: log("{} to {} is a valid upgrade path. Proceeding.".format( old_version, new_version)) - roll_monitor_cluster(new_version) + ceph.roll_monitor_cluster(new_version=new_version, + upgrade_key='admin') else: # Log a helpful error message log("Invalid upgrade path from {} to {}. " @@ -139,154 +130,6 @@ def check_for_upgrade(): pretty_print_upgrade_paths())) -def lock_and_roll(my_name, version): - start_timestamp = time.time() - - log('monitor_key_set {}_{}_start {}'.format(my_name, - version, - start_timestamp)) - monitor_key_set('admin', "{}_{}_start".format(my_name, version), - start_timestamp) - log("Rolling") - # This should be quick - upgrade_monitor() - log("Done") - - stop_timestamp = time.time() - # Set a key to inform others I am finished - log('monitor_key_set {}_{}_done {}'.format(my_name, - version, - stop_timestamp)) - monitor_key_set('admin', "{}_{}_done".format(my_name, version), - stop_timestamp) - - -def wait_on_previous_node(previous_node, version): - log("Previous node is: {}".format(previous_node)) - - previous_node_finished = monitor_key_exists( - 'admin', - "{}_{}_done".format(previous_node, version)) - - while previous_node_finished is False: - log("{} is not finished. Waiting".format(previous_node)) - # Has this node been trying to upgrade for longer than - # 10 minutes? - # If so then move on and consider that node dead. - - # NOTE: This assumes the clusters clocks are somewhat accurate - # If the hosts clock is really far off it may cause it to skip - # the previous node even though it shouldn't. - current_timestamp = time.time() - previous_node_start_time = monitor_key_get( - 'admin', - "{}_{}_start".format(previous_node, version)) - if (current_timestamp - (10 * 60)) > previous_node_start_time: - # Previous node is probably dead. Lets move on - if previous_node_start_time is not None: - log( - "Waited 10 mins on node {}. current time: {} > " - "previous node start time: {} Moving on".format( - previous_node, - (current_timestamp - (10 * 60)), - previous_node_start_time)) - return - else: - # I have to wait. Sleep a random amount of time and then - # check if I can lock,upgrade and roll. - wait_time = random.randrange(5, 30) - log('waiting for {} seconds'.format(wait_time)) - time.sleep(wait_time) - previous_node_finished = monitor_key_exists( - 'admin', - "{}_{}_done".format(previous_node, version)) - - -# Edge cases: -# 1. Previous node dies on upgrade, can we retry? -def roll_monitor_cluster(new_version): - """ - This is tricky to get right so here's what we're going to do. - There's 2 possible cases: Either I'm first in line or not. - If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous monitor is upgraded yet. - """ - log('roll_monitor_cluster called with {}'.format(new_version)) - my_name = socket.gethostname() - monitor_list = [] - mon_map = get_mon_map('admin') - if mon_map['monmap']['mons']: - for mon in mon_map['monmap']['mons']: - monitor_list.append(mon['name']) - else: - status_set('blocked', 'Unable to get monitor cluster information') - sys.exit(1) - log('monitor_list: {}'.format(monitor_list)) - - # A sorted list of osd unit names - mon_sorted_list = sorted(monitor_list) - - try: - position = mon_sorted_list.index(my_name) - log("upgrade position: {}".format(position)) - if position == 0: - # I'm first! Roll - # First set a key to inform others I'm about to roll - lock_and_roll(my_name=my_name, version=new_version) - else: - # Check if the previous node has finished - status_set('blocked', - 'Waiting on {} to finish upgrading'.format( - mon_sorted_list[position - 1])) - wait_on_previous_node(previous_node=mon_sorted_list[position - 1], - version=new_version) - lock_and_roll(my_name=my_name, version=new_version) - except ValueError: - log("Failed to find {} in list {}.".format( - my_name, mon_sorted_list)) - status_set('blocked', 'failed to upgrade monitor') - - -def upgrade_monitor(): - current_version = ceph.get_version() - status_set("maintenance", "Upgrading monitor") - log("Current ceph version is {}".format(current_version)) - new_version = config('release-version') - log("Upgrading to: {}".format(new_version)) - - try: - add_source(config('source'), config('key')) - apt_update(fatal=True) - except subprocess.CalledProcessError as err: - log("Adding the ceph source failed with message: {}".format( - err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - try: - if ceph.systemd(): - for mon_id in ceph.get_local_mon_ids(): - service_stop('ceph-mon@{}'.format(mon_id)) - else: - service_stop('ceph-mon-all') - apt_install(packages=ceph.PACKAGES, fatal=True) - - # Ensure the ownership of Ceph's directories is correct - chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), - owner=ceph.ceph_user(), - group=ceph.ceph_user()) - if ceph.systemd(): - for mon_id in ceph.get_local_mon_ids(): - service_start('ceph-mon@{}'.format(mon_id)) - else: - service_start('ceph-mon-all') - status_set("active", "") - except subprocess.CalledProcessError as err: - log("Stopping ceph and upgrading packages failed " - "with message: {}".format(err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - @hooks.hook('install.real') @harden() def install(): diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index 0b31ddaf..522e0876 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -12,7 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import ctypes +import collections import json +import random +import socket import subprocess import time import os @@ -22,38 +25,38 @@ import shutil from charmhelpers.core import hookenv - from charmhelpers.core.host import ( mkdir, chownr, service_restart, lsb_release, - cmp_pkgrevno, service_stop, mounts) + cmp_pkgrevno, service_stop, mounts, service_start) from charmhelpers.core.hookenv import ( log, ERROR, cached, status_set, - WARNING, DEBUG) + WARNING, DEBUG, config) from charmhelpers.core.services import render_template from charmhelpers.fetch import ( - apt_cache -) - + apt_cache, + add_source, apt_install, apt_update) +from charmhelpers.contrib.storage.linux.ceph import ( + monitor_key_set, + monitor_key_exists, + monitor_key_get, + get_mon_map) from charmhelpers.contrib.storage.linux.utils import ( is_block_device, zap_disk, is_device_mounted) -from utils import ( - get_unit_hostname, -) - LEADER = 'leader' PEON = 'peon' QUORUM = [LEADER, PEON] -PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', + 'radosgw', 'xfsprogs'] LinkSpeed = { "BASE_10": 10, @@ -567,7 +570,7 @@ def error_out(msg): def is_quorum(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) cmd = [ "sudo", "-u", @@ -594,7 +597,7 @@ def is_quorum(): def is_leader(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) cmd = [ "sudo", "-u", @@ -627,7 +630,7 @@ def wait_for_quorum(): def add_bootstrap_hint(peer): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) cmd = [ "sudo", "-u", @@ -921,41 +924,83 @@ def import_radosgw_key(key): } -def get_radosgw_key(): - return get_named_key('radosgw.gateway', _radosgw_caps) +def get_radosgw_key(pool_list): + return get_named_key(name='radosgw.gateway', + caps=_radosgw_caps, + pool_list=pool_list) -_default_caps = { - 'mon': ['allow rw'], - 'osd': ['allow rwx'] -} +def get_mds_key(name): + return create_named_keyring(entity='mds', + name=name, + caps=mds_caps) -admin_caps = { - 'mds': ['allow *'], - 'mon': ['allow *'], - 'osd': ['allow *'] -} -osd_upgrade_caps = { - 'mon': ['allow command "config-key"', - 'allow command "osd tree"', - 'allow command "config-key list"', - 'allow command "config-key put"', - 'allow command "config-key get"', - 'allow command "config-key exists"', - 'allow command "osd out"', - 'allow command "osd in"', - 'allow command "osd rm"', - 'allow command "auth del"', - ] -} +_default_caps = collections.OrderedDict([ + ('mon', ['allow rw']), + ('osd', ['allow rwx']), +]) + +admin_caps = collections.OrderedDict([ + ('mds', ['allow *']), + ('mon', ['allow *']), + ('osd', ['allow *']) +]) + +mds_caps = collections.OrderedDict([ + ('osd', ['allow *']), + ('mds', ['allow']), + ('mon', ['allow rwx']), +]) + +osd_upgrade_caps = collections.OrderedDict([ + ('mon', ['allow command "config-key"', + 'allow command "osd tree"', + 'allow command "config-key list"', + 'allow command "config-key put"', + 'allow command "config-key get"', + 'allow command "config-key exists"', + 'allow command "osd out"', + 'allow command "osd in"', + 'allow command "osd rm"', + 'allow command "auth del"', + ]) +]) + + +def create_named_keyring(entity, name, caps=None): + caps = caps or _default_caps + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', 'get-or-create', '{entity}.{name}'.format(entity=entity, + name=name), + ] + for subsystem, subcaps in caps.items(): + cmd.extend([subsystem, '; '.join(subcaps)]) + log("Calling subprocess.check_output: {}".format(cmd), level=DEBUG) + return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 def get_upgrade_key(): return get_named_key('upgrade-osd', _upgrade_caps) -def get_named_key(name, caps=None): +def get_named_key(name, caps=None, pool_list=None): + """ + Retrieve a specific named cephx key + :param name: String Name of key to get. + :param pool_list: The list of pools to give access to + :param caps: dict of cephx capabilities + :return: Returns a cephx key + """ caps = caps or _default_caps cmd = [ "sudo", @@ -965,16 +1010,20 @@ def get_named_key(name, caps=None): '--name', 'mon.', '--keyring', '/var/lib/ceph/mon/ceph-{}/keyring'.format( - get_unit_hostname() + socket.gethostname() ), 'auth', 'get-or-create', 'client.{}'.format(name), ] # Add capabilities - for subsystem, subcaps in caps.iteritems(): - cmd.extend([ - subsystem, - '; '.join(subcaps), - ]) + for subsystem, subcaps in caps.items(): + if subsystem == 'osd': + if pool_list: + # This will output a string similar to: + # "pool=rgw pool=rbd pool=something" + pools = " ".join(['pool={0}'.format(i) for i in pool_list]) + subcaps[0] = subcaps[0] + " " + pools + cmd.extend([subsystem, '; '.join(subcaps)]) + log("Calling subprocess.check_output: {}".format(cmd), level=DEBUG) return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 @@ -986,7 +1035,7 @@ def upgrade_key_caps(key, caps): cmd = [ "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key ] - for subsystem, subcaps in caps.iteritems(): + for subsystem, subcaps in caps.items(): cmd.extend([subsystem, '; '.join(subcaps)]) subprocess.check_call(cmd) @@ -997,7 +1046,7 @@ def systemd(): def bootstrap_monitor_cluster(secret): - hostname = get_unit_hostname() + hostname = socket.gethostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) done = '{}/done'.format(path) if systemd(): @@ -1042,7 +1091,7 @@ def bootstrap_monitor_cluster(secret): def update_monfs(): - hostname = get_unit_hostname() + hostname = socket.gethostname() monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) if systemd(): init_marker = '{}/systemd'.format(monfs) @@ -1181,3 +1230,308 @@ def get_running_osds(): return result.split() except subprocess.CalledProcessError: return [] + + +def wait_for_all_monitors_to_upgrade(new_version, upgrade_key): + """ + Fairly self explanatory name. This function will wait + for all monitors in the cluster to upgrade or it will + return after a timeout period has expired. + :param new_version: str of the version to watch + :param upgrade_key: the cephx key name to use + """ + done = False + start_time = time.time() + monitor_list = [] + + mon_map = get_mon_map('admin') + if mon_map['monmap']['mons']: + for mon in mon_map['monmap']['mons']: + monitor_list.append(mon['name']) + while not done: + try: + done = all(monitor_key_exists(upgrade_key, "{}_{}_{}_done".format( + "mon", mon, new_version + )) for mon in monitor_list) + current_time = time.time() + if current_time > (start_time + 10 * 60): + raise Exception + else: + # Wait 30 seconds and test again if all monitors are upgraded + time.sleep(30) + except subprocess.CalledProcessError: + raise + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +def roll_monitor_cluster(new_version, upgrade_key): + """ + This is tricky to get right so here's what we're going to do. + :param new_version: str of the version to upgrade to + :param upgrade_key: the cephx key name to use when upgrading + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous monitor is upgraded yet. + """ + log('roll_monitor_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + monitor_list = [] + mon_map = get_mon_map('admin') + if mon_map['monmap']['mons']: + for mon in mon_map['monmap']['mons']: + monitor_list.append(mon['name']) + else: + status_set('blocked', 'Unable to get monitor cluster information') + sys.exit(1) + log('monitor_list: {}'.format(monitor_list)) + + # A sorted list of osd unit names + mon_sorted_list = sorted(monitor_list) + + try: + position = mon_sorted_list.index(my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(upgrade_key=upgrade_key, + service='mon', + my_name=my_name, + version=new_version) + else: + # Check if the previous node has finished + status_set('blocked', + 'Waiting on {} to finish upgrading'.format( + mon_sorted_list[position - 1])) + wait_on_previous_node(upgrade_key=upgrade_key, + service='mon', + previous_node=mon_sorted_list[position - 1], + version=new_version) + lock_and_roll(upgrade_key=upgrade_key, + service='mon', + my_name=my_name, + version=new_version) + except ValueError: + log("Failed to find {} in list {}.".format( + my_name, mon_sorted_list)) + status_set('blocked', 'failed to upgrade monitor') + + +def upgrade_monitor(): + current_version = get_version() + status_set("maintenance", "Upgrading monitor") + log("Current ceph version is {}".format(current_version)) + new_version = config('release-version') + log("Upgrading to: {}".format(new_version)) + + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph source failed with message: {}".format( + err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + try: + if systemd(): + for mon_id in get_local_mon_ids(): + service_stop('ceph-mon@{}'.format(mon_id)) + else: + service_stop('ceph-mon-all') + apt_install(packages=PACKAGES, fatal=True) + + # Ensure the ownership of Ceph's directories is correct + chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), + owner=ceph_user(), + group=ceph_user()) + if systemd(): + for mon_id in get_local_mon_ids(): + service_start('ceph-mon@{}'.format(mon_id)) + else: + service_start('ceph-mon-all') + status_set("active", "") + except subprocess.CalledProcessError as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + +def lock_and_roll(upgrade_key, service, my_name, version): + start_timestamp = time.time() + + log('monitor_key_set {}_{}_{}_start {}'.format( + service, + my_name, + version, + start_timestamp)) + monitor_key_set(upgrade_key, "{}_{}_{}_start".format( + service, my_name, version), start_timestamp) + log("Rolling") + + # This should be quick + if service == 'osd': + upgrade_osd() + elif service == 'mon': + upgrade_monitor() + else: + log("Unknown service {}. Unable to upgrade".format(service), + level=ERROR) + log("Done") + + stop_timestamp = time.time() + # Set a key to inform others I am finished + log('monitor_key_set {}_{}_{}_done {}'.format(service, + my_name, + version, + stop_timestamp)) + monitor_key_set(upgrade_key, "{}_{}_{}_done".format(service, + my_name, + version), + stop_timestamp) + + +def wait_on_previous_node(upgrade_key, service, previous_node, version): + log("Previous node is: {}".format(previous_node)) + + previous_node_finished = monitor_key_exists( + upgrade_key, + "{}_{}_{}_done".format(service, previous_node, version)) + + while previous_node_finished is False: + log("{} is not finished. Waiting".format(previous_node)) + # Has this node been trying to upgrade for longer than + # 10 minutes? + # If so then move on and consider that node dead. + + # NOTE: This assumes the clusters clocks are somewhat accurate + # If the hosts clock is really far off it may cause it to skip + # the previous node even though it shouldn't. + current_timestamp = time.time() + previous_node_start_time = monitor_key_get( + upgrade_key, + "{}_{}_{}_start".format(service, previous_node, version)) + if (current_timestamp - (10 * 60)) > previous_node_start_time: + # Previous node is probably dead. Lets move on + if previous_node_start_time is not None: + log( + "Waited 10 mins on node {}. current time: {} > " + "previous node start time: {} Moving on".format( + previous_node, + (current_timestamp - (10 * 60)), + previous_node_start_time)) + return + else: + # I have to wait. Sleep a random amount of time and then + # check if I can lock,upgrade and roll. + wait_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + previous_node_finished = monitor_key_exists( + upgrade_key, + "{}_{}_{}_done".format(service, previous_node, version)) + + +def get_upgrade_position(osd_sorted_list, match_name): + for index, item in enumerate(osd_sorted_list): + if item.name == match_name: + return index + return None + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +# 2. This assumes that the osd failure domain is not set to osd. +# It rolls an entire server at a time. +def roll_osd_cluster(new_version, upgrade_key): + """ + This is tricky to get right so here's what we're going to do. + :param new_version: str of the version to upgrade to + :param upgrade_key: the cephx key name to use when upgrading + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous osd is upgraded yet. + + TODO: If you're not in the same failure domain it's safe to upgrade + 1. Examine all pools and adopt the most strict failure domain policy + Example: Pool 1: Failure domain = rack + Pool 2: Failure domain = host + Pool 3: Failure domain = row + + outcome: Failure domain = host + """ + log('roll_osd_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + osd_tree = get_osd_tree(service=upgrade_key) + # A sorted list of osd unit names + osd_sorted_list = sorted(osd_tree) + log("osd_sorted_list: {}".format(osd_sorted_list)) + + try: + position = get_upgrade_position(osd_sorted_list, my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(upgrade_key=upgrade_key, + service='osd', + my_name=my_name, + version=new_version) + else: + # Check if the previous node has finished + status_set('blocked', + 'Waiting on {} to finish upgrading'.format( + osd_sorted_list[position - 1].name)) + wait_on_previous_node( + upgrade_key=upgrade_key, + service='osd', + previous_node=osd_sorted_list[position - 1].name, + version=new_version) + lock_and_roll(upgrade_key=upgrade_key, + service='osd', + my_name=my_name, + version=new_version) + except ValueError: + log("Failed to find name {} in list {}".format( + my_name, osd_sorted_list)) + status_set('blocked', 'failed to upgrade osd') + + +def upgrade_osd(): + current_version = get_version() + status_set("maintenance", "Upgrading osd") + log("Current ceph version is {}".format(current_version)) + new_version = config('release-version') + log("Upgrading to: {}".format(new_version)) + + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph source failed with message: {}".format( + err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + try: + if systemd(): + for osd_id in get_local_osd_ids(): + service_stop('ceph-osd@{}'.format(osd_id)) + else: + service_stop('ceph-osd-all') + apt_install(packages=PACKAGES, fatal=True) + + # Ensure the ownership of Ceph's directories is correct + chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), + owner=ceph_user(), + group=ceph_user()) + if systemd(): + for osd_id in get_local_osd_ids(): + service_start('ceph-osd@{}'.format(osd_id)) + else: + service_start('ceph-osd-all') + except subprocess.CalledProcessError as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) diff --git a/ceph-mon/lib/ceph/ceph_broker.py b/ceph-mon/lib/ceph/ceph_broker.py index d55e570b..0ed9833e 100644 --- a/ceph-mon/lib/ceph/ceph_broker.py +++ b/ceph-mon/lib/ceph/ceph_broker.py @@ -15,6 +15,8 @@ # limitations under the License. import json +import os +from tempfile import NamedTemporaryFile from charmhelpers.core.hookenv import ( log, @@ -42,6 +44,8 @@ # This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ # This should do a decent job of preventing people from passing in bad values. # It will give a useful error message +from subprocess import check_output, CalledProcessError + POOL_KEYS = { # "Ceph Key Name": [Python type, [Valid Range]] "size": [int], @@ -56,8 +60,8 @@ "write_fadvise_dontneed": [bool], "noscrub": [bool], "nodeep-scrub": [bool], - "hit_set_type": [basestring, ["bloom", "explicit_hash", - "explicit_object"]], + "hit_set_type": [str, ["bloom", "explicit_hash", + "explicit_object"]], "hit_set_count": [int, [1, 1]], "hit_set_period": [int], "hit_set_fpp": [float, [0.0, 1.0]], @@ -289,6 +293,136 @@ def handle_set_pool_value(request, service): value=params['value']) +def handle_rgw_regionmap_update(request, service): + name = request.get('client-name') + if not name: + msg = "Missing rgw-region or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + try: + check_output(['radosgw-admin', + '--id', service, + 'regionmap', 'update', '--name', name]) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + + +def handle_rgw_regionmap_default(request, service): + region = request.get('rgw-region') + name = request.get('client-name') + if not region or not name: + msg = "Missing rgw-region or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + try: + check_output( + [ + 'radosgw-admin', + '--id', service, + 'regionmap', + 'default', + '--rgw-region', region, + '--name', name]) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + + +def handle_rgw_zone_set(request, service): + json_file = request.get('zone-json') + name = request.get('client-name') + region_name = request.get('region-name') + zone_name = request.get('zone-name') + if not json_file or not name or not region_name or not zone_name: + msg = "Missing json-file or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + infile = NamedTemporaryFile(delete=False) + with open(infile.name, 'w') as infile_handle: + infile_handle.write(json_file) + try: + check_output( + [ + 'radosgw-admin', + '--id', service, + 'zone', + 'set', + '--rgw-zone', zone_name, + '--infile', infile.name, + '--name', name, + ] + ) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + os.unlink(infile.name) + + +def handle_rgw_create_user(request, service): + user_id = request.get('rgw-uid') + display_name = request.get('display-name') + name = request.get('client-name') + if not name or not display_name or not user_id: + msg = "Missing client-name, display-name or rgw-uid" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + try: + create_output = check_output( + [ + 'radosgw-admin', + '--id', service, + 'user', + 'create', + '--uid', user_id, + '--display-name', display_name, + '--name', name, + '--system' + ] + ) + try: + user_json = json.loads(create_output) + return {'exit-code': 0, 'user': user_json} + except ValueError as err: + log(err, level=ERROR) + return {'exit-code': 1, 'stderr': err} + + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + + +def handle_rgw_region_set(request, service): + # radosgw-admin region set --infile us.json --name client.radosgw.us-east-1 + json_file = request.get('region-json') + name = request.get('client-name') + region_name = request.get('region-name') + zone_name = request.get('zone-name') + if not json_file or not name or not region_name or not zone_name: + msg = "Missing json-file or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + infile = NamedTemporaryFile(delete=False) + with open(infile.name, 'w') as infile_handle: + infile_handle.write(json_file) + try: + check_output( + [ + 'radosgw-admin', + '--id', service, + 'region', + 'set', + '--rgw-zone', zone_name, + '--infile', infile.name, + '--name', name, + ] + ) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + os.unlink(infile.name) + + def process_requests_v1(reqs): """Process v1 requests. @@ -341,6 +475,16 @@ def process_requests_v1(reqs): snapshot_name=snapshot_name) elif op == "set-pool-value": ret = handle_set_pool_value(request=req, service=svc) + elif op == "rgw-region-set": + ret = handle_rgw_region_set(request=req, service=svc) + elif op == "rgw-zone-set": + ret = handle_rgw_zone_set(request=req, service=svc) + elif op == "rgw-regionmap-update": + ret = handle_rgw_regionmap_update(request=req, service=svc) + elif op == "rgw-regionmap-default": + ret = handle_rgw_regionmap_default(request=req, service=svc) + elif op == "rgw-create-user": + ret = handle_rgw_create_user(request=req, service=svc) else: msg = "Unknown operation '%s'" % op log(msg, level=ERROR) diff --git a/ceph-mon/unit_tests/test_upgrade.py b/ceph-mon/unit_tests/test_upgrade.py new file mode 100644 index 00000000..1a74a4fa --- /dev/null +++ b/ceph-mon/unit_tests/test_upgrade.py @@ -0,0 +1,34 @@ +import unittest + +__author__ = 'Chris Holcombe ' + +from mock import patch, MagicMock +from ceph_hooks import check_for_upgrade + + +def config_side_effect(*args): + if args[0] == 'source': + return 'cloud:trusty-kilo' + elif args[0] == 'key': + return 'key' + elif args[0] == 'release-version': + return 'cloud:trusty-kilo' + + +class UpgradeRollingTestCase(unittest.TestCase): + @patch('ceph_hooks.hookenv') + @patch('ceph_hooks.host') + @patch('ceph_hooks.ceph.roll_monitor_cluster') + def test_check_for_upgrade(self, roll_monitor_cluster, host, hookenv): + host.lsb_release.return_value = { + 'DISTRIB_CODENAME': 'trusty', + } + previous_mock = MagicMock().return_value + previous_mock.previous.return_value = "cloud:trusty-juno" + hookenv.config.side_effect = [previous_mock, + config_side_effect('source')] + check_for_upgrade() + + roll_monitor_cluster.assert_called_with( + new_version='cloud:trusty-kilo', + upgrade_key='admin') diff --git a/ceph-mon/unit_tests/test_upgrade_roll.py b/ceph-mon/unit_tests/test_upgrade_roll.py deleted file mode 100644 index 3960a07b..00000000 --- a/ceph-mon/unit_tests/test_upgrade_roll.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import time - -from mock import patch, call, MagicMock - -import test_utils - -# python-apt is not installed as part of test-requirements but is imported by -# some charmhelpers modules so create a fake import. -mock_apt = MagicMock() -sys.modules['apt'] = mock_apt -mock_apt.apt_pkg = MagicMock() - -with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: - mock_dec.side_effect = ( - lambda *dargs, - **dkwargs: lambda f: lambda *args, - **kwargs: f(*args, **kwargs)) - import ceph_hooks - -TO_PATCH = [ - 'hookenv', - 'status_set', - 'config', - 'ceph', - 'log', - 'add_source', - 'apt_update', - 'apt_install', - 'service_stop', - 'service_start', - 'host', - 'chownr', -] - - -def config_side_effect(*args): - if args[0] == 'source': - return 'cloud:trusty-kilo' - elif args[0] == 'key': - return 'key' - elif args[0] == 'release-version': - return 'cloud:trusty-kilo' - - -previous_node_start_time = time.time() - (9 * 60) - - -def monitor_key_side_effect(*args): - if args[1] == \ - 'ip-192-168-1-2_0.94.1_done': - return False - elif args[1] == \ - 'ip-192-168-1-2_0.94.1_start': - # Return that the previous node started 9 minutes ago - return previous_node_start_time - - -class UpgradeRollingTestCase(test_utils.CharmTestCase): - def setUp(self): - super(UpgradeRollingTestCase, self).setUp(ceph_hooks, TO_PATCH) - - @patch('ceph_hooks.roll_monitor_cluster') - def test_check_for_upgrade(self, roll_monitor_cluster): - self.host.lsb_release.return_value = { - 'DISTRIB_CODENAME': 'trusty', - } - previous_mock = MagicMock().return_value - previous_mock.previous.return_value = "cloud:trusty-juno" - self.hookenv.config.side_effect = [previous_mock, - config_side_effect('source')] - ceph_hooks.check_for_upgrade() - - roll_monitor_cluster.assert_called_with('cloud:trusty-kilo') - - @patch('ceph_hooks.upgrade_monitor') - @patch('ceph_hooks.monitor_key_set') - def test_lock_and_roll(self, monitor_key_set, upgrade_monitor): - monitor_key_set.monitor_key_set.return_value = None - ceph_hooks.lock_and_roll(my_name='ip-192-168-1-2', - version='0.94.1') - upgrade_monitor.assert_called_once_with() - - def test_upgrade_monitor(self): - self.config.side_effect = config_side_effect - self.ceph.get_version.return_value = "0.80" - self.ceph.ceph_user.return_value = "ceph" - self.ceph.systemd.return_value = False - ceph_hooks.upgrade_monitor() - self.service_stop.assert_called_with('ceph-mon-all') - self.service_start.assert_called_with('ceph-mon-all') - self.status_set.assert_has_calls([ - call('maintenance', 'Upgrading monitor'), - call('active', '') - ]) - self.chownr.assert_has_calls( - [ - call(group='ceph', owner='ceph', path='/var/lib/ceph') - ] - ) - - @patch('ceph_hooks.lock_and_roll') - @patch('ceph_hooks.wait_on_previous_node') - @patch('ceph_hooks.get_mon_map') - @patch('ceph_hooks.socket') - def test_roll_monitor_cluster_second(self, - socket, - get_mon_map, - wait_on_previous_node, - lock_and_roll): - self.ceph.get_version.return_value = "0.94.1" - wait_on_previous_node.return_value = None - socket.gethostname.return_value = "ip-192-168-1-3" - get_mon_map.return_value = { - 'monmap': { - 'mons': [ - { - 'name': 'ip-192-168-1-2', - }, - { - 'name': 'ip-192-168-1-3', - }, - ] - } - } - ceph_hooks.roll_monitor_cluster(new_version='0.94.1') - self.status_set.assert_called_with( - 'blocked', - 'Waiting on ip-192-168-1-2 to finish upgrading') - lock_and_roll.assert_called_with(my_name="ip-192-168-1-3", - version='0.94.1') - - @patch.object(ceph_hooks, 'time') - @patch('ceph_hooks.monitor_key_get') - @patch('ceph_hooks.monitor_key_exists') - def test_wait_on_previous_node(self, monitor_key_exists, monitor_key_get, - mock_time): - tval = [previous_node_start_time] - - def fake_time(): - tval[0] += 100 - return tval[0] - - mock_time.time.side_effect = fake_time - monitor_key_get.side_effect = monitor_key_side_effect - monitor_key_exists.return_value = False - - ceph_hooks.wait_on_previous_node("ip-192-168-1-2", version='0.94.1') - - # Make sure we checked to see if the previous node started - monitor_key_get.assert_has_calls( - [call('admin', 'ip-192-168-1-2_0.94.1_start')] - ) - # Make sure we checked to see if the previous node was finished - monitor_key_exists.assert_has_calls( - [call('admin', 'ip-192-168-1-2_0.94.1_done')] - ) - # Make sure we waited at last once before proceeding - self.log.assert_has_calls( - [call('Previous node is: ip-192-168-1-2')], - [call('ip-192-168-1-2 is not finished. Waiting')], - ) - self.assertEqual(tval[0], previous_node_start_time + 700) From 6adc932f9018adfffc807501a6f154b8834484f0 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Tue, 6 Sep 2016 15:31:04 -0700 Subject: [PATCH 1207/2699] Move upgrade code to shared lib Moving the ceph mon upgrade code over to the ceph shared library. This will make it easier to make patches and have them be applied to all 3 charms at once. Change-Id: I541269d05e6ff8883233a21c78ebe9df89b9e797 --- ceph-osd/hooks/ceph_hooks.py | 174 +-------- ceph-osd/lib/ceph/__init__.py | 450 ++++++++++++++++++++--- ceph-osd/lib/ceph/ceph_broker.py | 148 +++++++- ceph-osd/unit_tests/test_upgrade.py | 35 ++ ceph-osd/unit_tests/test_upgrade_roll.py | 179 --------- 5 files changed, 587 insertions(+), 399 deletions(-) create mode 100644 ceph-osd/unit_tests/test_upgrade.py delete mode 100644 ceph-osd/unit_tests/test_upgrade_roll.py diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 24c2b4e5..3d04ee81 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -15,12 +15,9 @@ # limitations under the License. import os import shutil -import random -import subprocess import sys import tempfile import socket -import time import netifaces sys.path.append('lib') @@ -45,10 +42,7 @@ from charmhelpers.core.host import ( umount, mkdir, - cmp_pkgrevno, - service_stop, - service_start, - chownr) + cmp_pkgrevno) from charmhelpers.fetch import ( add_source, apt_install, @@ -75,10 +69,7 @@ format_ipv6_addr, ) from charmhelpers.contrib.storage.linux.ceph import ( - CephConfContext, - monitor_key_set, - monitor_key_exists, - monitor_key_get) + CephConfContext) from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden @@ -120,7 +111,8 @@ def check_for_upgrade(): if new_version == upgrade_paths[old_version]: log("{} to {} is a valid upgrade path. Proceeding.".format( old_version, new_version)) - roll_osd_cluster(new_version) + ceph.roll_osd_cluster(new_version=new_version, + upgrade_key='osd-upgrade') else: # Log a helpful error message log("Invalid upgrade path from {} to {}. " @@ -129,164 +121,6 @@ def check_for_upgrade(): pretty_print_upgrade_paths())) -def lock_and_roll(my_name, version): - start_timestamp = time.time() - - log('monitor_key_set {}_start {}'.format( - my_name, - version, - start_timestamp)) - monitor_key_set('osd-upgrade', "{}_{}_start".format(my_name, version), - start_timestamp) - log("Rolling") - # This should be quick - upgrade_osd() - log("Done") - - stop_timestamp = time.time() - # Set a key to inform others I am finished - log('monitor_key_set {}_{}_done {}'.format(my_name, - version, - stop_timestamp)) - monitor_key_set('osd-upgrade', "{}_{}_done".format(my_name, version), - stop_timestamp) - - -def wait_on_previous_node(previous_node, version): - log("Previous node is: {}".format(previous_node)) - - previous_node_finished = monitor_key_exists( - 'osd-upgrade', - "{}_{}_done".format(previous_node, version)) - - while previous_node_finished is False: - log("{} is not finished. Waiting".format(previous_node)) - # Has this node been trying to upgrade for longer than - # 10 minutes? - # If so then move on and consider that node dead. - - # NOTE: This assumes the clusters clocks are somewhat accurate - # If the hosts clock is really far off it may cause it to skip - # the previous node even though it shouldn't. - current_timestamp = time.time() - previous_node_start_time = monitor_key_get( - 'osd-upgrade', - "{}_{}_start".format(previous_node, version)) - if (current_timestamp - (10 * 60)) > previous_node_start_time: - # Previous node is probably dead. Lets move on - if previous_node_start_time is not None: - log( - "Waited 10 mins on node {}. current time: {} > " - "previous node start time: {} Moving on".format( - previous_node, - (current_timestamp - (10 * 60)), - previous_node_start_time)) - return - else: - # I have to wait. Sleep a random amount of time and then - # check if I can lock,upgrade and roll. - wait_time = random.randrange(5, 30) - log('waiting for {} seconds'.format(wait_time)) - time.sleep(wait_time) - previous_node_finished = monitor_key_exists( - 'osd-upgrade', - "{}_{}_done".format(previous_node, version)) - - -def get_upgrade_position(osd_sorted_list, match_name): - for index, item in enumerate(osd_sorted_list): - if item.name == match_name: - return index - return None - - -# Edge cases: -# 1. Previous node dies on upgrade, can we retry? -# 2. This assumes that the osd failure domain is not set to osd. -# It rolls an entire server at a time. -def roll_osd_cluster(new_version): - """ - This is tricky to get right so here's what we're going to do. - There's 2 possible cases: Either I'm first in line or not. - If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous osd is upgraded yet. - - TODO: If you're not in the same failure domain it's safe to upgrade - 1. Examine all pools and adopt the most strict failure domain policy - Example: Pool 1: Failure domain = rack - Pool 2: Failure domain = host - Pool 3: Failure domain = row - - outcome: Failure domain = host - """ - log('roll_osd_cluster called with {}'.format(new_version)) - my_name = socket.gethostname() - osd_tree = ceph.get_osd_tree(service='osd-upgrade') - # A sorted list of osd unit names - osd_sorted_list = sorted(osd_tree) - log("osd_sorted_list: {}".format(osd_sorted_list)) - - try: - position = get_upgrade_position(osd_sorted_list, my_name) - log("upgrade position: {}".format(position)) - if position == 0: - # I'm first! Roll - # First set a key to inform others I'm about to roll - lock_and_roll(my_name=my_name, version=new_version) - else: - # Check if the previous node has finished - status_set('blocked', - 'Waiting on {} to finish upgrading'.format( - osd_sorted_list[position - 1].name)) - wait_on_previous_node( - previous_node=osd_sorted_list[position - 1].name, - version=new_version) - lock_and_roll(my_name=my_name, version=new_version) - except ValueError: - log("Failed to find name {} in list {}".format( - my_name, osd_sorted_list)) - status_set('blocked', 'failed to upgrade osd') - - -def upgrade_osd(): - current_version = ceph.get_version() - status_set("maintenance", "Upgrading osd") - log("Current ceph version is {}".format(current_version)) - new_version = config('release-version') - log("Upgrading to: {}".format(new_version)) - - try: - add_source(config('source'), config('key')) - apt_update(fatal=True) - except subprocess.CalledProcessError as err: - log("Adding the ceph source failed with message: {}".format( - err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - try: - if ceph.systemd(): - for osd_id in ceph.get_local_osd_ids(): - service_stop('ceph-osd@{}'.format(osd_id)) - else: - service_stop('ceph-osd-all') - apt_install(packages=ceph.PACKAGES, fatal=True) - - # Ensure the ownership of Ceph's directories is correct - chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), - owner=ceph.ceph_user(), - group=ceph.ceph_user()) - if ceph.systemd(): - for osd_id in ceph.get_local_osd_ids(): - service_start('ceph-osd@{}'.format(osd_id)) - else: - service_start('ceph-osd-all') - except subprocess.CalledProcessError as err: - log("Stopping ceph and upgrading packages failed " - "with message: {}".format(err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - def tune_network_adapters(): interfaces = netifaces.interfaces() for interface in interfaces: diff --git a/ceph-osd/lib/ceph/__init__.py b/ceph-osd/lib/ceph/__init__.py index 4b68e039..522e0876 100644 --- a/ceph-osd/lib/ceph/__init__.py +++ b/ceph-osd/lib/ceph/__init__.py @@ -12,7 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import ctypes +import collections import json +import random +import socket import subprocess import time import os @@ -22,38 +25,38 @@ import shutil from charmhelpers.core import hookenv - from charmhelpers.core.host import ( mkdir, chownr, service_restart, lsb_release, - cmp_pkgrevno, service_stop, mounts) + cmp_pkgrevno, service_stop, mounts, service_start) from charmhelpers.core.hookenv import ( log, ERROR, cached, status_set, - WARNING, DEBUG) + WARNING, DEBUG, config) from charmhelpers.core.services import render_template from charmhelpers.fetch import ( - apt_cache -) - + apt_cache, + add_source, apt_install, apt_update) +from charmhelpers.contrib.storage.linux.ceph import ( + monitor_key_set, + monitor_key_exists, + monitor_key_get, + get_mon_map) from charmhelpers.contrib.storage.linux.utils import ( is_block_device, zap_disk, is_device_mounted) -from utils import ( - get_unit_hostname, -) - LEADER = 'leader' PEON = 'peon' QUORUM = [LEADER, PEON] -PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', + 'radosgw', 'xfsprogs'] LinkSpeed = { "BASE_10": 10, @@ -567,7 +570,7 @@ def error_out(msg): def is_quorum(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) cmd = [ "sudo", "-u", @@ -594,7 +597,7 @@ def is_quorum(): def is_leader(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) cmd = [ "sudo", "-u", @@ -627,7 +630,7 @@ def wait_for_quorum(): def add_bootstrap_hint(peer): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) cmd = [ "sudo", "-u", @@ -921,41 +924,83 @@ def import_radosgw_key(key): } -def get_radosgw_key(): - return get_named_key('radosgw.gateway', _radosgw_caps) +def get_radosgw_key(pool_list): + return get_named_key(name='radosgw.gateway', + caps=_radosgw_caps, + pool_list=pool_list) -_default_caps = { - 'mon': ['allow rw'], - 'osd': ['allow rwx'] -} +def get_mds_key(name): + return create_named_keyring(entity='mds', + name=name, + caps=mds_caps) -admin_caps = { - 'mds': ['allow'], - 'mon': ['allow *'], - 'osd': ['allow *'] -} -osd_upgrade_caps = { - 'mon': ['allow command "config-key"', - 'allow command "osd tree"', - 'allow command "config-key list"', - 'allow command "config-key put"', - 'allow command "config-key get"', - 'allow command "config-key exists"', - 'allow command "osd out"', - 'allow command "osd in"', - 'allow command "osd rm"', - 'allow command "auth del"', - ] -} +_default_caps = collections.OrderedDict([ + ('mon', ['allow rw']), + ('osd', ['allow rwx']), +]) + +admin_caps = collections.OrderedDict([ + ('mds', ['allow *']), + ('mon', ['allow *']), + ('osd', ['allow *']) +]) + +mds_caps = collections.OrderedDict([ + ('osd', ['allow *']), + ('mds', ['allow']), + ('mon', ['allow rwx']), +]) + +osd_upgrade_caps = collections.OrderedDict([ + ('mon', ['allow command "config-key"', + 'allow command "osd tree"', + 'allow command "config-key list"', + 'allow command "config-key put"', + 'allow command "config-key get"', + 'allow command "config-key exists"', + 'allow command "osd out"', + 'allow command "osd in"', + 'allow command "osd rm"', + 'allow command "auth del"', + ]) +]) + + +def create_named_keyring(entity, name, caps=None): + caps = caps or _default_caps + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', 'get-or-create', '{entity}.{name}'.format(entity=entity, + name=name), + ] + for subsystem, subcaps in caps.items(): + cmd.extend([subsystem, '; '.join(subcaps)]) + log("Calling subprocess.check_output: {}".format(cmd), level=DEBUG) + return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 def get_upgrade_key(): return get_named_key('upgrade-osd', _upgrade_caps) -def get_named_key(name, caps=None): +def get_named_key(name, caps=None, pool_list=None): + """ + Retrieve a specific named cephx key + :param name: String Name of key to get. + :param pool_list: The list of pools to give access to + :param caps: dict of cephx capabilities + :return: Returns a cephx key + """ caps = caps or _default_caps cmd = [ "sudo", @@ -965,16 +1010,20 @@ def get_named_key(name, caps=None): '--name', 'mon.', '--keyring', '/var/lib/ceph/mon/ceph-{}/keyring'.format( - get_unit_hostname() + socket.gethostname() ), 'auth', 'get-or-create', 'client.{}'.format(name), ] # Add capabilities - for subsystem, subcaps in caps.iteritems(): - cmd.extend([ - subsystem, - '; '.join(subcaps), - ]) + for subsystem, subcaps in caps.items(): + if subsystem == 'osd': + if pool_list: + # This will output a string similar to: + # "pool=rgw pool=rbd pool=something" + pools = " ".join(['pool={0}'.format(i) for i in pool_list]) + subcaps[0] = subcaps[0] + " " + pools + cmd.extend([subsystem, '; '.join(subcaps)]) + log("Calling subprocess.check_output: {}".format(cmd), level=DEBUG) return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 @@ -986,7 +1035,7 @@ def upgrade_key_caps(key, caps): cmd = [ "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key ] - for subsystem, subcaps in caps.iteritems(): + for subsystem, subcaps in caps.items(): cmd.extend([subsystem, '; '.join(subcaps)]) subprocess.check_call(cmd) @@ -997,7 +1046,7 @@ def systemd(): def bootstrap_monitor_cluster(secret): - hostname = get_unit_hostname() + hostname = socket.gethostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) done = '{}/done'.format(path) if systemd(): @@ -1042,7 +1091,7 @@ def bootstrap_monitor_cluster(secret): def update_monfs(): - hostname = get_unit_hostname() + hostname = socket.gethostname() monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) if systemd(): init_marker = '{}/systemd'.format(monfs) @@ -1181,3 +1230,308 @@ def get_running_osds(): return result.split() except subprocess.CalledProcessError: return [] + + +def wait_for_all_monitors_to_upgrade(new_version, upgrade_key): + """ + Fairly self explanatory name. This function will wait + for all monitors in the cluster to upgrade or it will + return after a timeout period has expired. + :param new_version: str of the version to watch + :param upgrade_key: the cephx key name to use + """ + done = False + start_time = time.time() + monitor_list = [] + + mon_map = get_mon_map('admin') + if mon_map['monmap']['mons']: + for mon in mon_map['monmap']['mons']: + monitor_list.append(mon['name']) + while not done: + try: + done = all(monitor_key_exists(upgrade_key, "{}_{}_{}_done".format( + "mon", mon, new_version + )) for mon in monitor_list) + current_time = time.time() + if current_time > (start_time + 10 * 60): + raise Exception + else: + # Wait 30 seconds and test again if all monitors are upgraded + time.sleep(30) + except subprocess.CalledProcessError: + raise + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +def roll_monitor_cluster(new_version, upgrade_key): + """ + This is tricky to get right so here's what we're going to do. + :param new_version: str of the version to upgrade to + :param upgrade_key: the cephx key name to use when upgrading + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous monitor is upgraded yet. + """ + log('roll_monitor_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + monitor_list = [] + mon_map = get_mon_map('admin') + if mon_map['monmap']['mons']: + for mon in mon_map['monmap']['mons']: + monitor_list.append(mon['name']) + else: + status_set('blocked', 'Unable to get monitor cluster information') + sys.exit(1) + log('monitor_list: {}'.format(monitor_list)) + + # A sorted list of osd unit names + mon_sorted_list = sorted(monitor_list) + + try: + position = mon_sorted_list.index(my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(upgrade_key=upgrade_key, + service='mon', + my_name=my_name, + version=new_version) + else: + # Check if the previous node has finished + status_set('blocked', + 'Waiting on {} to finish upgrading'.format( + mon_sorted_list[position - 1])) + wait_on_previous_node(upgrade_key=upgrade_key, + service='mon', + previous_node=mon_sorted_list[position - 1], + version=new_version) + lock_and_roll(upgrade_key=upgrade_key, + service='mon', + my_name=my_name, + version=new_version) + except ValueError: + log("Failed to find {} in list {}.".format( + my_name, mon_sorted_list)) + status_set('blocked', 'failed to upgrade monitor') + + +def upgrade_monitor(): + current_version = get_version() + status_set("maintenance", "Upgrading monitor") + log("Current ceph version is {}".format(current_version)) + new_version = config('release-version') + log("Upgrading to: {}".format(new_version)) + + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph source failed with message: {}".format( + err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + try: + if systemd(): + for mon_id in get_local_mon_ids(): + service_stop('ceph-mon@{}'.format(mon_id)) + else: + service_stop('ceph-mon-all') + apt_install(packages=PACKAGES, fatal=True) + + # Ensure the ownership of Ceph's directories is correct + chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), + owner=ceph_user(), + group=ceph_user()) + if systemd(): + for mon_id in get_local_mon_ids(): + service_start('ceph-mon@{}'.format(mon_id)) + else: + service_start('ceph-mon-all') + status_set("active", "") + except subprocess.CalledProcessError as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + +def lock_and_roll(upgrade_key, service, my_name, version): + start_timestamp = time.time() + + log('monitor_key_set {}_{}_{}_start {}'.format( + service, + my_name, + version, + start_timestamp)) + monitor_key_set(upgrade_key, "{}_{}_{}_start".format( + service, my_name, version), start_timestamp) + log("Rolling") + + # This should be quick + if service == 'osd': + upgrade_osd() + elif service == 'mon': + upgrade_monitor() + else: + log("Unknown service {}. Unable to upgrade".format(service), + level=ERROR) + log("Done") + + stop_timestamp = time.time() + # Set a key to inform others I am finished + log('monitor_key_set {}_{}_{}_done {}'.format(service, + my_name, + version, + stop_timestamp)) + monitor_key_set(upgrade_key, "{}_{}_{}_done".format(service, + my_name, + version), + stop_timestamp) + + +def wait_on_previous_node(upgrade_key, service, previous_node, version): + log("Previous node is: {}".format(previous_node)) + + previous_node_finished = monitor_key_exists( + upgrade_key, + "{}_{}_{}_done".format(service, previous_node, version)) + + while previous_node_finished is False: + log("{} is not finished. Waiting".format(previous_node)) + # Has this node been trying to upgrade for longer than + # 10 minutes? + # If so then move on and consider that node dead. + + # NOTE: This assumes the clusters clocks are somewhat accurate + # If the hosts clock is really far off it may cause it to skip + # the previous node even though it shouldn't. + current_timestamp = time.time() + previous_node_start_time = monitor_key_get( + upgrade_key, + "{}_{}_{}_start".format(service, previous_node, version)) + if (current_timestamp - (10 * 60)) > previous_node_start_time: + # Previous node is probably dead. Lets move on + if previous_node_start_time is not None: + log( + "Waited 10 mins on node {}. current time: {} > " + "previous node start time: {} Moving on".format( + previous_node, + (current_timestamp - (10 * 60)), + previous_node_start_time)) + return + else: + # I have to wait. Sleep a random amount of time and then + # check if I can lock,upgrade and roll. + wait_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + previous_node_finished = monitor_key_exists( + upgrade_key, + "{}_{}_{}_done".format(service, previous_node, version)) + + +def get_upgrade_position(osd_sorted_list, match_name): + for index, item in enumerate(osd_sorted_list): + if item.name == match_name: + return index + return None + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +# 2. This assumes that the osd failure domain is not set to osd. +# It rolls an entire server at a time. +def roll_osd_cluster(new_version, upgrade_key): + """ + This is tricky to get right so here's what we're going to do. + :param new_version: str of the version to upgrade to + :param upgrade_key: the cephx key name to use when upgrading + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous osd is upgraded yet. + + TODO: If you're not in the same failure domain it's safe to upgrade + 1. Examine all pools and adopt the most strict failure domain policy + Example: Pool 1: Failure domain = rack + Pool 2: Failure domain = host + Pool 3: Failure domain = row + + outcome: Failure domain = host + """ + log('roll_osd_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + osd_tree = get_osd_tree(service=upgrade_key) + # A sorted list of osd unit names + osd_sorted_list = sorted(osd_tree) + log("osd_sorted_list: {}".format(osd_sorted_list)) + + try: + position = get_upgrade_position(osd_sorted_list, my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(upgrade_key=upgrade_key, + service='osd', + my_name=my_name, + version=new_version) + else: + # Check if the previous node has finished + status_set('blocked', + 'Waiting on {} to finish upgrading'.format( + osd_sorted_list[position - 1].name)) + wait_on_previous_node( + upgrade_key=upgrade_key, + service='osd', + previous_node=osd_sorted_list[position - 1].name, + version=new_version) + lock_and_roll(upgrade_key=upgrade_key, + service='osd', + my_name=my_name, + version=new_version) + except ValueError: + log("Failed to find name {} in list {}".format( + my_name, osd_sorted_list)) + status_set('blocked', 'failed to upgrade osd') + + +def upgrade_osd(): + current_version = get_version() + status_set("maintenance", "Upgrading osd") + log("Current ceph version is {}".format(current_version)) + new_version = config('release-version') + log("Upgrading to: {}".format(new_version)) + + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph source failed with message: {}".format( + err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + try: + if systemd(): + for osd_id in get_local_osd_ids(): + service_stop('ceph-osd@{}'.format(osd_id)) + else: + service_stop('ceph-osd-all') + apt_install(packages=PACKAGES, fatal=True) + + # Ensure the ownership of Ceph's directories is correct + chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), + owner=ceph_user(), + group=ceph_user()) + if systemd(): + for osd_id in get_local_osd_ids(): + service_start('ceph-osd@{}'.format(osd_id)) + else: + service_start('ceph-osd-all') + except subprocess.CalledProcessError as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) diff --git a/ceph-osd/lib/ceph/ceph_broker.py b/ceph-osd/lib/ceph/ceph_broker.py index d55e570b..0ed9833e 100644 --- a/ceph-osd/lib/ceph/ceph_broker.py +++ b/ceph-osd/lib/ceph/ceph_broker.py @@ -15,6 +15,8 @@ # limitations under the License. import json +import os +from tempfile import NamedTemporaryFile from charmhelpers.core.hookenv import ( log, @@ -42,6 +44,8 @@ # This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ # This should do a decent job of preventing people from passing in bad values. # It will give a useful error message +from subprocess import check_output, CalledProcessError + POOL_KEYS = { # "Ceph Key Name": [Python type, [Valid Range]] "size": [int], @@ -56,8 +60,8 @@ "write_fadvise_dontneed": [bool], "noscrub": [bool], "nodeep-scrub": [bool], - "hit_set_type": [basestring, ["bloom", "explicit_hash", - "explicit_object"]], + "hit_set_type": [str, ["bloom", "explicit_hash", + "explicit_object"]], "hit_set_count": [int, [1, 1]], "hit_set_period": [int], "hit_set_fpp": [float, [0.0, 1.0]], @@ -289,6 +293,136 @@ def handle_set_pool_value(request, service): value=params['value']) +def handle_rgw_regionmap_update(request, service): + name = request.get('client-name') + if not name: + msg = "Missing rgw-region or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + try: + check_output(['radosgw-admin', + '--id', service, + 'regionmap', 'update', '--name', name]) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + + +def handle_rgw_regionmap_default(request, service): + region = request.get('rgw-region') + name = request.get('client-name') + if not region or not name: + msg = "Missing rgw-region or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + try: + check_output( + [ + 'radosgw-admin', + '--id', service, + 'regionmap', + 'default', + '--rgw-region', region, + '--name', name]) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + + +def handle_rgw_zone_set(request, service): + json_file = request.get('zone-json') + name = request.get('client-name') + region_name = request.get('region-name') + zone_name = request.get('zone-name') + if not json_file or not name or not region_name or not zone_name: + msg = "Missing json-file or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + infile = NamedTemporaryFile(delete=False) + with open(infile.name, 'w') as infile_handle: + infile_handle.write(json_file) + try: + check_output( + [ + 'radosgw-admin', + '--id', service, + 'zone', + 'set', + '--rgw-zone', zone_name, + '--infile', infile.name, + '--name', name, + ] + ) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + os.unlink(infile.name) + + +def handle_rgw_create_user(request, service): + user_id = request.get('rgw-uid') + display_name = request.get('display-name') + name = request.get('client-name') + if not name or not display_name or not user_id: + msg = "Missing client-name, display-name or rgw-uid" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + try: + create_output = check_output( + [ + 'radosgw-admin', + '--id', service, + 'user', + 'create', + '--uid', user_id, + '--display-name', display_name, + '--name', name, + '--system' + ] + ) + try: + user_json = json.loads(create_output) + return {'exit-code': 0, 'user': user_json} + except ValueError as err: + log(err, level=ERROR) + return {'exit-code': 1, 'stderr': err} + + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + + +def handle_rgw_region_set(request, service): + # radosgw-admin region set --infile us.json --name client.radosgw.us-east-1 + json_file = request.get('region-json') + name = request.get('client-name') + region_name = request.get('region-name') + zone_name = request.get('zone-name') + if not json_file or not name or not region_name or not zone_name: + msg = "Missing json-file or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + infile = NamedTemporaryFile(delete=False) + with open(infile.name, 'w') as infile_handle: + infile_handle.write(json_file) + try: + check_output( + [ + 'radosgw-admin', + '--id', service, + 'region', + 'set', + '--rgw-zone', zone_name, + '--infile', infile.name, + '--name', name, + ] + ) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + os.unlink(infile.name) + + def process_requests_v1(reqs): """Process v1 requests. @@ -341,6 +475,16 @@ def process_requests_v1(reqs): snapshot_name=snapshot_name) elif op == "set-pool-value": ret = handle_set_pool_value(request=req, service=svc) + elif op == "rgw-region-set": + ret = handle_rgw_region_set(request=req, service=svc) + elif op == "rgw-zone-set": + ret = handle_rgw_zone_set(request=req, service=svc) + elif op == "rgw-regionmap-update": + ret = handle_rgw_regionmap_update(request=req, service=svc) + elif op == "rgw-regionmap-default": + ret = handle_rgw_regionmap_default(request=req, service=svc) + elif op == "rgw-create-user": + ret = handle_rgw_create_user(request=req, service=svc) else: msg = "Unknown operation '%s'" % op log(msg, level=ERROR) diff --git a/ceph-osd/unit_tests/test_upgrade.py b/ceph-osd/unit_tests/test_upgrade.py new file mode 100644 index 00000000..7b213ca4 --- /dev/null +++ b/ceph-osd/unit_tests/test_upgrade.py @@ -0,0 +1,35 @@ +import unittest + +__author__ = 'Chris Holcombe ' + +from mock import patch, MagicMock + +from ceph_hooks import check_for_upgrade + + +def config_side_effect(*args): + if args[0] == 'source': + return 'cloud:trusty-kilo' + elif args[0] == 'key': + return 'key' + elif args[0] == 'release-version': + return 'cloud:trusty-kilo' + + +class UpgradeRollingTestCase(unittest.TestCase): + @patch('ceph_hooks.hookenv') + @patch('ceph_hooks.host') + @patch('ceph_hooks.ceph.roll_osd_cluster') + def test_check_for_upgrade(self, roll_osd_cluster, host, hookenv): + host.lsb_release.return_value = { + 'DISTRIB_CODENAME': 'trusty', + } + previous_mock = MagicMock().return_value + previous_mock.previous.return_value = "cloud:trusty-juno" + hookenv.config.side_effect = [previous_mock, + config_side_effect('source')] + check_for_upgrade() + + roll_osd_cluster.assert_called_with( + new_version='cloud:trusty-kilo', + upgrade_key='osd-upgrade') diff --git a/ceph-osd/unit_tests/test_upgrade_roll.py b/ceph-osd/unit_tests/test_upgrade_roll.py deleted file mode 100644 index c88ea6e1..00000000 --- a/ceph-osd/unit_tests/test_upgrade_roll.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import time - -from mock import patch, call, MagicMock - -from ceph import CrushLocation - -import test_utils -import ceph_hooks - -TO_PATCH = [ - 'apt_install', - 'apt_update', - 'add_source', - 'config', - 'ceph', - 'get_conf', - 'hookenv', - 'host', - 'log', - 'service_start', - 'service_stop', - 'socket', - 'status_set', - 'chownr', -] - - -def config_side_effect(*args): - if args[0] == 'source': - return 'cloud:trusty-kilo' - elif args[0] == 'key': - return 'key' - elif args[0] == 'release-version': - return 'cloud:trusty-kilo' - - -previous_node_start_time = time.time() - (9 * 60) - - -def monitor_key_side_effect(*args): - if args[1] == \ - 'ip-192-168-1-2_0.94.1_done': - return False - elif args[1] == \ - 'ip-192-168-1-2_0.94.1_start': - # Return that the previous node started 9 minutes ago - return previous_node_start_time - - -class UpgradeRollingTestCase(test_utils.CharmTestCase): - def setUp(self): - super(UpgradeRollingTestCase, self).setUp(ceph_hooks, TO_PATCH) - - @patch('ceph_hooks.roll_osd_cluster') - def test_check_for_upgrade(self, roll_osd_cluster): - self.host.lsb_release.return_value = { - 'DISTRIB_CODENAME': 'trusty', - } - previous_mock = MagicMock().return_value - previous_mock.previous.return_value = "cloud:trusty-juno" - self.hookenv.config.side_effect = [previous_mock, - config_side_effect('source')] - ceph_hooks.check_for_upgrade() - - roll_osd_cluster.assert_called_with('cloud:trusty-kilo') - - @patch('ceph_hooks.upgrade_osd') - @patch('ceph_hooks.monitor_key_set') - def test_lock_and_roll(self, monitor_key_set, upgrade_osd): - monitor_key_set.monitor_key_set.return_value = None - ceph_hooks.lock_and_roll(my_name='ip-192-168-1-2', - version='0.94.1') - upgrade_osd.assert_called_once_with() - - def test_upgrade_osd(self): - self.config.side_effect = config_side_effect - self.ceph.get_version.return_value = "0.80" - self.ceph.ceph_user.return_value = "ceph" - self.ceph.systemd.return_value = False - ceph_hooks.upgrade_osd() - self.service_stop.assert_called_with('ceph-osd-all') - self.service_start.assert_called_with('ceph-osd-all') - self.status_set.assert_has_calls([ - call('maintenance', 'Upgrading osd'), - ]) - self.chownr.assert_has_calls( - [ - call(group='ceph', owner='ceph', path='/var/lib/ceph') - ] - ) - - @patch('ceph_hooks.lock_and_roll') - @patch('ceph_hooks.get_upgrade_position') - def test_roll_osd_cluster_first(self, - get_upgrade_position, - lock_and_roll): - self.socket.gethostname.return_value = "ip-192-168-1-2" - self.ceph.get_osd_tree.return_value = "" - get_upgrade_position.return_value = 0 - ceph_hooks.roll_osd_cluster('0.94.1') - lock_and_roll.assert_called_with(my_name="ip-192-168-1-2", - version="0.94.1") - - @patch('ceph_hooks.lock_and_roll') - @patch('ceph_hooks.get_upgrade_position') - @patch('ceph_hooks.wait_on_previous_node') - def test_roll_osd_cluster_second(self, - wait_on_previous_node, - get_upgrade_position, - lock_and_roll): - wait_on_previous_node.return_value = None - self.socket.gethostname.return_value = "ip-192-168-1-3" - self.ceph.get_osd_tree.return_value = [ - CrushLocation( - name="ip-192-168-1-2", - identifier='a', - host='host-a', - rack='rack-a', - row='row-a', - datacenter='dc-1', - chassis='chassis-a', - root='ceph'), - CrushLocation( - name="ip-192-168-1-3", - identifier='a', - host='host-b', - rack='rack-a', - row='row-a', - datacenter='dc-1', - chassis='chassis-a', - root='ceph') - ] - get_upgrade_position.return_value = 1 - ceph_hooks.roll_osd_cluster('0.94.1') - self.status_set.assert_called_with( - 'blocked', - 'Waiting on ip-192-168-1-2 to finish upgrading') - lock_and_roll.assert_called_with(my_name="ip-192-168-1-3", - version="0.94.1") - - @patch('time.time', lambda *args: previous_node_start_time + 10 * 60 + 1) - @patch('ceph_hooks.monitor_key_get') - @patch('ceph_hooks.monitor_key_exists') - def test_wait_on_previous_node(self, - monitor_key_exists, - monitor_key_get): - monitor_key_get.side_effect = monitor_key_side_effect - monitor_key_exists.return_value = False - - ceph_hooks.wait_on_previous_node(previous_node="ip-192-168-1-2", - version='0.94.1') - - # Make sure we checked to see if the previous node started - monitor_key_get.assert_has_calls( - [call('osd-upgrade', 'ip-192-168-1-2_0.94.1_start')] - ) - # Make sure we checked to see if the previous node was finished - monitor_key_exists.assert_has_calls( - [call('osd-upgrade', 'ip-192-168-1-2_0.94.1_done')] - ) - # Make sure we waited at last once before proceeding - self.log.assert_has_calls( - [call('Previous node is: ip-192-168-1-2')], - [call('ip-192-168-1-2 is not finished. Waiting')], - ) From 148904d1f81a50bc0f05b51d380c73b7723926b8 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Fri, 23 Sep 2016 10:27:54 -0700 Subject: [PATCH 1208/2699] Update charms.ceph location Change-Id: Id5e45cf7ebd5836a05b856f1bc25706b487eca7c --- ceph-mon/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index 21d1af05..c4c67c1e 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -25,7 +25,7 @@ ch-sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml git-sync: bin/git_sync.py - $(PYTHON) bin/git_sync.py -d lib -s https://github.com/CanonicalLtd/charms_ceph.git + $(PYTHON) bin/git_sync.py -d lib -s https://github.com/openstack/charms.ceph.git sync: git-sync ch-sync From a3067f3cfab80a48bc9bcb36257f60f0b7407925 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Fri, 23 Sep 2016 10:29:12 -0700 Subject: [PATCH 1209/2699] Update charms.ceph location Change-Id: I5a86593fd1e81255e1accacfccf5da5984578413 --- ceph-osd/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index 10d0ea8a..b6b9b665 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -26,7 +26,7 @@ ch-sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml git-sync: bin/git_sync.py - $(PYTHON) bin/git_sync.py -d lib -s https://github.com/CanonicalLtd/charms_ceph.git + $(PYTHON) bin/git_sync.py -d lib -s https://github.com/openstack/charms.ceph.git sync: git-sync ch-sync From fbfc1d7e8a49a8957cacfa798dc8bf9d19fbed5e Mon Sep 17 00:00:00 2001 From: Ante Karamatic Date: Mon, 26 Sep 2016 12:19:36 +0200 Subject: [PATCH 1210/2699] Reorder importing python modules Path for python modules needs to be appended before the module is imported Closes-Bug: #1627685 Change-Id: I6001a7b01f3ba6c9a5179466a3165f2eace2e092 --- ceph-osd/actions/replace_osd.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-osd/actions/replace_osd.py b/ceph-osd/actions/replace_osd.py index df9c7583..69ba4a6f 100755 --- a/ceph-osd/actions/replace_osd.py +++ b/ceph-osd/actions/replace_osd.py @@ -14,12 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from charmhelpers.core.hookenv import action_get, log, config, action_fail - import os import sys -sys.path.append('hooks') +sys.path.append('hooks/') + +from charmhelpers.core.hookenv import action_get, log, config, action_fail import ceph From 2bd302b7cb3c86c966dcfa271df7b2deb1e2c8a4 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 26 Sep 2016 14:01:54 +0100 Subject: [PATCH 1211/2699] Fix radosgw relation broken by commit 888ceeb Change-Id: Ia21a5eb9238e0d731a603911f77c69e13af35c8a Closes-Bug: 1627718 --- ceph-mon/lib/ceph/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index 522e0876..8928efe2 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -924,7 +924,7 @@ def import_radosgw_key(key): } -def get_radosgw_key(pool_list): +def get_radosgw_key(pool_list=None): return get_named_key(name='radosgw.gateway', caps=_radosgw_caps, pool_list=pool_list) From bc139789557c6eb10954f33f41b917df903ac4ef Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Mon, 19 Sep 2016 10:30:28 -0700 Subject: [PATCH 1212/2699] Add MDS Relation Adding an mds relation to allow ceph-fs to come up properly. CephFS requires a few things that none of the other relations implemented. Change-Id: I7e8ab321a733a778c4ed1c52db3ea091dd993556 --- ceph-mon/hooks/ceph_hooks.py | 41 +++++++++++++++++++++++++++++ ceph-mon/hooks/mds-relation-changed | 1 + ceph-mon/hooks/mds-relation-joined | 1 + ceph-mon/metadata.yaml | 2 ++ 4 files changed, 45 insertions(+) create mode 120000 ceph-mon/hooks/mds-relation-changed create mode 120000 ceph-mon/hooks/mds-relation-joined diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 1925e634..090dc996 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -342,6 +342,9 @@ def notify_client(): client_relation_joined(relid) for relid in relation_ids('admin'): admin_relation_joined(relid) + for relid in relation_ids('mds'): + for unit in related_units(relid): + mds_relation_joined(relid=relid, unit=unit) def upgrade_keys(): @@ -430,6 +433,44 @@ def radosgw_relation(relid=None, unit=None): log('mon cluster not in quorum or no osds - deferring key provision') +@hooks.hook('mds-relation-changed') +@hooks.hook('mds-relation-joined') +def mds_relation_joined(relid=None, unit=None): + if ceph.is_quorum() and related_osds(): + log('mon cluster in quorum and OSDs related' + '- providing client with keys') + service_name = None + if not unit: + unit = remote_unit() + if relid is None: + units = [remote_unit()] + service_name = units[0].split('/')[0] + else: + units = related_units(relid) + if len(units) > 0: + service_name = units[0].split('/')[0] + + if service_name is not None: + public_addr = get_public_addr() + data = {'key': ceph.get_mds_key(service_name), + 'auth': config('auth-supported'), + 'ceph-public-address': public_addr} + settings = relation_get(rid=relid, unit=unit) + """Process broker request(s).""" + if 'broker_req' in settings: + if ceph.is_leader(): + rsp = process_requests(settings['broker_req']) + unit_id = unit.replace('/', '-') + unit_response_key = 'broker-rsp-' + unit_id + data[unit_response_key] = rsp + else: + log("Not leader - ignoring broker request", level=DEBUG) + + relation_set(relation_id=relid, relation_settings=data) + else: + log('mon cluster not in quorum - deferring key provision') + + @hooks.hook('admin-relation-changed') @hooks.hook('admin-relation-joined') def admin_relation_joined(relid=None): diff --git a/ceph-mon/hooks/mds-relation-changed b/ceph-mon/hooks/mds-relation-changed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/mds-relation-changed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/mds-relation-joined b/ceph-mon/hooks/mds-relation-joined new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/mds-relation-joined @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index a22e2b0a..54b50348 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -24,6 +24,8 @@ provides: nrpe-external-master: interface: nrpe-external-master scope: container + mds: + interface: ceph-mds admin: interface: ceph-admin client: From 400ef6e2359471b6edb8131361abd233b886d57e Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Mon, 28 Mar 2016 12:57:56 -0700 Subject: [PATCH 1213/2699] Add support for apparmor security profiles Install apparmor profile for ceph-osd processes, and provide associated configuration option to place any ceph-osd processes into enforce, complain, or disable apparmor profile mode. As this is the first release of this feature, default to disabled and allow charm users to test and provide feedback for this release. Change-Id: I4524c587ac70de13aa3a0cb912033e6eb44b0403 --- ceph-osd/charm-helpers-hooks.yaml | 6 + ceph-osd/config.yaml | 10 +- ceph-osd/files/apparmor/usr.bin.ceph-osd | 47 + ceph-osd/hooks/ceph_hooks.py | 55 +- .../contrib/hahelpers/__init__.py | 13 + .../charmhelpers/contrib/hahelpers/apache.py | 95 ++ .../charmhelpers/contrib/hahelpers/cluster.py | 363 ++++ .../contrib/hardening/ssh/checks/config.py | 37 +- .../hooks/charmhelpers/contrib/network/ip.py | 2 +- .../charmhelpers/contrib/openstack/context.py | 1508 +++++++++++++++++ .../charmhelpers/contrib/openstack/ip.py | 186 ++ .../charmhelpers/contrib/openstack/neutron.py | 388 +++++ ceph-osd/lib/ceph/__init__.py | 2 +- ceph-osd/unit_tests/test_ceph_hooks.py | 63 +- 14 files changed, 2767 insertions(+), 8 deletions(-) create mode 100644 ceph-osd/files/apparmor/usr.bin.ceph-osd create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hahelpers/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/context.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index b94ba72c..63321ed2 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -5,13 +5,19 @@ include: - cli - osplatform - fetch + - contrib.hahelpers: + - apache + - cluster - contrib.python.packages - contrib.storage.linux - contrib.openstack.alternatives - contrib.network.ip - contrib.openstack: - alternatives + - context - exceptions + - ip + - neutron - utils - contrib.charmsupport - contrib.hardening|inc=* diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index b54e82a0..778bc645 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -222,4 +222,12 @@ options: max_sectors_kb. For the network card this will detect the link speed and make appropriate sysctl changes. Enabling this option should generally be safe. - + aa-profile-mode: + type: string + default: 'disable' + description: | + Enable apparmor profile. Valid settings: 'complain', 'enforce' or 'disable'. + . + NOTE: changing the value of this option is disruptive to a running Ceph + cluster as all ceph-osd processes must be restarted as part of changing the + apparmor profile enforcement mode. diff --git a/ceph-osd/files/apparmor/usr.bin.ceph-osd b/ceph-osd/files/apparmor/usr.bin.ceph-osd new file mode 100644 index 00000000..b8856e6f --- /dev/null +++ b/ceph-osd/files/apparmor/usr.bin.ceph-osd @@ -0,0 +1,47 @@ +# vim:syntax=apparmor +# Author: Chris Holcombe +#include + +/usr/bin/ceph-osd { + #include + #include + #include + + /usr/bin/ceph-osd mr, + + capability setgid, + capability setuid, + capability dac_override, + capability dac_read_search, + + network inet stream, + network inet6 stream, + + /etc/ceph/* r, + + @{PROC}/@{pids}/auxv r, + @{PROC}/@{pids}/net/dev r, + @{PROC}/loadavg r, + + /run/ceph/* rw, + /srv/ceph/** rwk, + /tmp/ r, + /var/lib/ceph/** rwk, + /var/lib/charm/*/ceph.conf r, + /var/log/ceph/* rwk, + /var/run/ceph/* rwk, + /var/tmp/ r, + + /dev/ r, + /dev/** rw, + /sys/devices/** r, + /proc/partitions r, + /run/blkid/blkid.tab r, + + /bin/dash rix, + + /usr/bin/lsb_release rix, + /usr/share/distro-info/** r, + /etc/lsb-release r, + /etc/debian_version r, +} diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 3d04ee81..faa780da 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import glob import os import shutil import sys @@ -42,7 +43,9 @@ from charmhelpers.core.host import ( umount, mkdir, - cmp_pkgrevno) + cmp_pkgrevno, + service_reload, + service_restart) from charmhelpers.fetch import ( add_source, apt_install, @@ -52,7 +55,7 @@ ) from charmhelpers.core.sysctl import create as create_sysctl from charmhelpers.core import host - +from charmhelpers.contrib.openstack.context import AppArmorContext from utils import ( get_host_ip, get_networks, @@ -62,7 +65,6 @@ get_public_addr, get_cluster_addr, ) - from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( get_ipv6_addr, @@ -131,6 +133,52 @@ def tune_network_adapters(): ceph.tune_nic(interface) +def copy_profile_into_place(): + """ + Copy the apparmor profiles included with the charm + into the /etc/apparmor.d directory. + """ + apparmor_dir = os.path.join(os.sep, + 'etc', + 'apparmor.d') + + for x in glob.glob('files/apparmor/*'): + shutil.copy(x, apparmor_dir) + + +class CephOsdAppArmorContext(AppArmorContext): + """"Apparmor context for ceph-osd binary""" + def __init__(self): + super(CephOsdAppArmorContext, self).__init__() + self.aa_profile = 'usr.bin.ceph-osd' + + def __call__(self): + super(CephOsdAppArmorContext, self).__call__() + if not self.ctxt: + return self.ctxt + self._ctxt.update({'aa_profile': self.aa_profile}) + return self.ctxt + + +def install_apparmor_profile(): + """ + Install ceph apparmor profiles and configure + based on current setting of 'aa-profile-mode' + configuration option. + """ + log('Installing apparmor profile for ceph-osd') + copy_profile_into_place() + if config().changed('aa-profile-mode'): + aa_context = CephOsdAppArmorContext() + aa_context.setup_aa_profile() + service_reload('apparmor') + if ceph.systemd(): + for osd_id in ceph.get_local_osd_ids(): + service_restart('ceph-osd@{}'.format(osd_id)) + else: + service_restart('ceph-osd-all') + + @hooks.hook('install.real') @harden() def install(): @@ -282,6 +330,7 @@ def config_changed(): if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): umount(e_mountpoint) prepare_disks_and_activate() + install_apparmor_profile() @hooks.hook('storage.real') diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py new file mode 100644 index 00000000..d0c69942 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -0,0 +1,95 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import os +import subprocess + +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + relation_ids, + related_units as relation_list, + log, + INFO, +) + + +def get_cert(cn=None): + # TODO: deal with multiple https endpoints via charm config + cert = config_get('ssl_cert') + key = config_get('ssl_key') + if not (cert and key): + log("Inspecting identity-service relations for SSL certificate.", + level=INFO) + cert = key = None + if cn: + ssl_cert_attr = 'ssl_cert_{}'.format(cn) + ssl_key_attr = 'ssl_key_{}'.format(cn) + else: + ssl_cert_attr = 'ssl_cert' + ssl_key_attr = 'ssl_key' + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not cert: + cert = relation_get(ssl_cert_attr, + rid=r_id, unit=unit) + if not key: + key = relation_get(ssl_key_attr, + rid=r_id, unit=unit) + return (cert, key) + + +def get_ca_cert(): + ca_cert = config_get('ssl_ca') + if ca_cert is None: + log("Inspecting identity-service relations for CA SSL certificate.", + level=INFO) + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if ca_cert is None: + ca_cert = relation_get('ca_cert', + rid=r_id, unit=unit) + return ca_cert + + +def retrieve_ca_cert(cert_file): + cert = None + if os.path.isfile(cert_file): + with open(cert_file, 'r') as crt: + cert = crt.read() + return cert + + +def install_ca_cert(ca_cert): + if ca_cert: + cert_file = ('/usr/local/share/ca-certificates/' + 'keystone_juju_ca_cert.crt') + old_cert = retrieve_ca_cert(cert_file) + if old_cert and old_cert == ca_cert: + log("CA cert is the same as installed version", level=INFO) + else: + log("Installing new CA cert", level=INFO) + with open(cert_file, 'w') as crt: + crt.write(ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py new file mode 100644 index 00000000..e02350e0 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -0,0 +1,363 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# Adam Gandelman +# + +""" +Helpers for clustering and determining "cluster leadership" and other +clustering-related helpers. +""" + +import subprocess +import os + +from socket import gethostname as get_unit_hostname + +import six + +from charmhelpers.core.hookenv import ( + log, + relation_ids, + related_units as relation_list, + relation_get, + config as config_get, + INFO, + DEBUG, + WARNING, + unit_get, + is_leader as juju_is_leader, + status_set, +) +from charmhelpers.core.decorators import ( + retry_on_exception, +) +from charmhelpers.core.strutils import ( + bool_from_string, +) + +DC_RESOURCE_NAME = 'DC' + + +class HAIncompleteConfig(Exception): + pass + + +class HAIncorrectConfig(Exception): + pass + + +class CRMResourceNotFound(Exception): + pass + + +class CRMDCNotFound(Exception): + pass + + +def is_elected_leader(resource): + """ + Returns True if the charm executing this is the elected cluster leader. + + It relies on two mechanisms to determine leadership: + 1. If juju is sufficiently new and leadership election is supported, + the is_leader command will be used. + 2. If the charm is part of a corosync cluster, call corosync to + determine leadership. + 3. If the charm is not part of a corosync cluster, the leader is + determined as being "the alive unit with the lowest unit numer". In + other words, the oldest surviving unit. + """ + try: + return juju_is_leader() + except NotImplementedError: + log('Juju leadership election feature not enabled' + ', using fallback support', + level=WARNING) + + if is_clustered(): + if not is_crm_leader(resource): + log('Deferring action to CRM leader.', level=INFO) + return False + else: + peers = peer_units() + if peers and not oldest_peer(peers): + log('Deferring action to oldest service unit.', level=INFO) + return False + return True + + +def is_clustered(): + for r_id in (relation_ids('ha') or []): + for unit in (relation_list(r_id) or []): + clustered = relation_get('clustered', + rid=r_id, + unit=unit) + if clustered: + return True + return False + + +def is_crm_dc(): + """ + Determine leadership by querying the pacemaker Designated Controller + """ + cmd = ['crm', 'status'] + try: + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") + except subprocess.CalledProcessError as ex: + raise CRMDCNotFound(str(ex)) + + current_dc = '' + for line in status.split('\n'): + if line.startswith('Current DC'): + # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum + current_dc = line.split(':')[1].split()[0] + if current_dc == get_unit_hostname(): + return True + elif current_dc == 'NONE': + raise CRMDCNotFound('Current DC: NONE') + + return False + + +@retry_on_exception(5, base_delay=2, + exc_type=(CRMResourceNotFound, CRMDCNotFound)) +def is_crm_leader(resource, retry=False): + """ + Returns True if the charm calling this is the elected corosync leader, + as returned by calling the external "crm" command. + + We allow this operation to be retried to avoid the possibility of getting a + false negative. See LP #1396246 for more info. + """ + if resource == DC_RESOURCE_NAME: + return is_crm_dc() + cmd = ['crm', 'resource', 'show', resource] + try: + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") + except subprocess.CalledProcessError: + status = None + + if status and get_unit_hostname() in status: + return True + + if status and "resource %s is NOT running" % (resource) in status: + raise CRMResourceNotFound("CRM resource %s not found" % (resource)) + + return False + + +def is_leader(resource): + log("is_leader is deprecated. Please consider using is_crm_leader " + "instead.", level=WARNING) + return is_crm_leader(resource) + + +def peer_units(peer_relation="cluster"): + peers = [] + for r_id in (relation_ids(peer_relation) or []): + for unit in (relation_list(r_id) or []): + peers.append(unit) + return peers + + +def peer_ips(peer_relation='cluster', addr_key='private-address'): + '''Return a dict of peers and their private-address''' + peers = {} + for r_id in relation_ids(peer_relation): + for unit in relation_list(r_id): + peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) + return peers + + +def oldest_peer(peers): + """Determines who the oldest peer is by comparing unit numbers.""" + local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) + for peer in peers: + remote_unit_no = int(peer.split('/')[1]) + if remote_unit_no < local_unit_no: + return False + return True + + +def eligible_leader(resource): + log("eligible_leader is deprecated. Please consider using " + "is_elected_leader instead.", level=WARNING) + return is_elected_leader(resource) + + +def https(): + ''' + Determines whether enough data has been provided in configuration + or relation data to configure HTTPS + . + returns: boolean + ''' + use_https = config_get('use-https') + if use_https and bool_from_string(use_https): + return True + if config_get('ssl_cert') and config_get('ssl_key'): + return True + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN + rel_state = [ + relation_get('https_keystone', rid=r_id, unit=unit), + relation_get('ca_cert', rid=r_id, unit=unit), + ] + # NOTE: works around (LP: #1203241) + if (None not in rel_state) and ('' not in rel_state): + return True + return False + + +def determine_api_port(public_port, singlenode_mode=False): + ''' + Determine correct API server listening port based on + existence of HTTPS reverse proxy and/or haproxy. + + public_port: int: standard public port for given service + + singlenode_mode: boolean: Shuffle ports when only a single unit is present + + returns: int: the correct listening port for the API service + ''' + i = 0 + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): + i += 1 + if https(): + i += 1 + return public_port - (i * 10) + + +def determine_apache_port(public_port, singlenode_mode=False): + ''' + Description: Determine correct apache listening port based on public IP + + state of the cluster. + + public_port: int: standard public port for given service + + singlenode_mode: boolean: Shuffle ports when only a single unit is present + + returns: int: the correct listening port for the HAProxy service + ''' + i = 0 + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): + i += 1 + return public_port - (i * 10) + + +def get_hacluster_config(exclude_keys=None): + ''' + Obtains all relevant configuration from charm configuration required + for initiating a relation to hacluster: + + ha-bindiface, ha-mcastport, vip, os-internal-hostname, + os-admin-hostname, os-public-hostname, os-access-hostname + + param: exclude_keys: list of setting key(s) to be excluded. + returns: dict: A dict containing settings keyed by setting name. + raises: HAIncompleteConfig if settings are missing or incorrect. + ''' + settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname', + 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname'] + conf = {} + for setting in settings: + if exclude_keys and setting in exclude_keys: + continue + + conf[setting] = config_get(setting) + + if not valid_hacluster_config(): + raise HAIncorrectConfig('Insufficient or incorrect config data to ' + 'configure hacluster.') + return conf + + +def valid_hacluster_config(): + ''' + Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname + must be set. + + Note: ha-bindiface and ha-macastport both have defaults and will always + be set. We only care that either vip or dns-ha is set. + + :returns: boolean: valid config returns true. + raises: HAIncompatibileConfig if settings conflict. + raises: HAIncompleteConfig if settings are missing. + ''' + vip = config_get('vip') + dns = config_get('dns-ha') + if not(bool(vip) ^ bool(dns)): + msg = ('HA: Either vip or dns-ha must be set but not both in order to ' + 'use high availability') + status_set('blocked', msg) + raise HAIncorrectConfig(msg) + + # If dns-ha then one of os-*-hostname must be set + if dns: + dns_settings = ['os-internal-hostname', 'os-admin-hostname', + 'os-public-hostname', 'os-access-hostname'] + # At this point it is unknown if one or all of the possible + # network spaces are in HA. Validate at least one is set which is + # the minimum required. + for setting in dns_settings: + if config_get(setting): + log('DNS HA: At least one hostname is set {}: {}' + ''.format(setting, config_get(setting)), + level=DEBUG) + return True + + msg = ('DNS HA: At least one os-*-hostname(s) must be set to use ' + 'DNS HA') + status_set('blocked', msg) + raise HAIncompleteConfig(msg) + + log('VIP HA: VIP is set {}'.format(vip), level=DEBUG) + return True + + +def canonical_url(configs, vip_setting='vip'): + ''' + Returns the correct HTTP URL to this host given the state of HTTPS + configuration and hacluster. + + :configs : OSTemplateRenderer: A config tempating object to inspect for + a complete https context. + + :vip_setting: str: Setting in charm config that specifies + VIP address. + ''' + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + if is_clustered(): + addr = config_get(vip_setting) + else: + addr = unit_get('private-address') + return '%s://%s' % (scheme, addr) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py index 94e524e2..f3cac6d9 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -14,6 +14,11 @@ import os +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_iface_addr, + is_ip, +) from charmhelpers.core.hookenv import ( log, DEBUG, @@ -121,6 +126,36 @@ def get_ciphers(self, cbc_required): return cipher[weak_ciphers] + def get_listening(self, listen=['0.0.0.0']): + """Returns a list of addresses SSH can list on + + Turns input into a sensible list of IPs SSH can listen on. Input + must be a python list of interface names, IPs and/or CIDRs. + + :param listen: list of IPs, CIDRs, interface names + + :returns: list of IPs available on the host + """ + if listen == ['0.0.0.0']: + return listen + + value = [] + for network in listen: + try: + ip = get_address_in_network(network=network, fatal=True) + except ValueError: + if is_ip(network): + ip = network + else: + try: + ip = get_iface_addr(iface=network, fatal=False)[0] + except IndexError: + continue + value.append(ip) + if value == []: + return ['0.0.0.0'] + return value + def __call__(self): settings = utils.get_settings('ssh') if settings['common']['network_ipv6_enable']: @@ -180,7 +215,7 @@ def __call__(self): addr_family = 'inet' ctxt = { - 'ssh_ip': settings['server']['listen_to'], + 'ssh_ip': self.get_listening(settings['server']['listen_to']), 'password_auth_allowed': settings['server']['password_authentication'], 'ports': settings['common']['ports'], diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index d6dee17c..2d2026e4 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -406,7 +406,7 @@ def is_ip(address): # Test to see if already an IPv4/IPv6 address address = netaddr.IPAddress(address) return True - except netaddr.AddrFormatError: + except (netaddr.AddrFormatError, ValueError): return False diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py new file mode 100644 index 00000000..76737f22 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -0,0 +1,1508 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import json +import os +import re +import time +from base64 import b64decode +from subprocess import check_call, CalledProcessError + +import six + +from charmhelpers.fetch import ( + apt_install, + filter_installed_packages, +) +from charmhelpers.core.hookenv import ( + config, + is_relation_made, + local_unit, + log, + relation_get, + relation_ids, + related_units, + relation_set, + unit_get, + unit_private_ip, + charm_name, + DEBUG, + INFO, + WARNING, + ERROR, + status_set, +) + +from charmhelpers.core.sysctl import create as sysctl_create +from charmhelpers.core.strutils import bool_from_string +from charmhelpers.contrib.openstack.exceptions import OSContextError + +from charmhelpers.core.host import ( + get_bond_master, + is_phy_iface, + list_nics, + get_nic_hwaddr, + mkdir, + write_file, + pwgen, + lsb_release, +) +from charmhelpers.contrib.hahelpers.cluster import ( + determine_apache_port, + determine_api_port, + https, + is_clustered, +) +from charmhelpers.contrib.hahelpers.apache import ( + get_cert, + get_ca_cert, + install_ca_cert, +) +from charmhelpers.contrib.openstack.neutron import ( + neutron_plugin_attribute, + parse_data_port_mappings, +) +from charmhelpers.contrib.openstack.ip import ( + resolve_address, + INTERNAL, +) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_ipv4_addr, + get_ipv6_addr, + get_netmask_for_address, + format_ipv6_addr, + is_address_in_network, + is_bridge_member, +) +from charmhelpers.contrib.openstack.utils import ( + config_flags_parser, + get_host_ip, +) +from charmhelpers.core.unitdata import kv + +try: + import psutil +except ImportError: + apt_install('python-psutil', fatal=True) + import psutil + +CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' +ADDRESS_TYPES = ['admin', 'internal', 'public'] + + +def ensure_packages(packages): + """Install but do not upgrade required plugin packages.""" + required = filter_installed_packages(packages) + if required: + apt_install(required, fatal=True) + + +def context_complete(ctxt): + _missing = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + _missing.append(k) + + if _missing: + log('Missing required data: %s' % ' '.join(_missing), level=INFO) + return False + + return True + + +class OSContextGenerator(object): + """Base class for all context generators.""" + interfaces = [] + related = False + complete = False + missing_data = [] + + def __call__(self): + raise NotImplementedError + + def context_complete(self, ctxt): + """Check for missing data for the required context data. + Set self.missing_data if it exists and return False. + Set self.complete if no missing data and return True. + """ + # Fresh start + self.complete = False + self.missing_data = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + if k not in self.missing_data: + self.missing_data.append(k) + + if self.missing_data: + self.complete = False + log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO) + else: + self.complete = True + return self.complete + + def get_related(self): + """Check if any of the context interfaces have relation ids. + Set self.related and return True if one of the interfaces + has relation ids. + """ + # Fresh start + self.related = False + try: + for interface in self.interfaces: + if relation_ids(interface): + self.related = True + return self.related + except AttributeError as e: + log("{} {}" + "".format(self, e), 'INFO') + return self.related + + +class SharedDBContext(OSContextGenerator): + interfaces = ['shared-db'] + + def __init__(self, + database=None, user=None, relation_prefix=None, ssl_dir=None): + """Allows inspecting relation for settings prefixed with + relation_prefix. This is useful for parsing access for multiple + databases returned via the shared-db interface (eg, nova_password, + quantum_password) + """ + self.relation_prefix = relation_prefix + self.database = database + self.user = user + self.ssl_dir = ssl_dir + self.rel_name = self.interfaces[0] + + def __call__(self): + self.database = self.database or config('database') + self.user = self.user or config('database-user') + if None in [self.database, self.user]: + log("Could not generate shared_db context. Missing required charm " + "config options. (database name and user)", level=ERROR) + raise OSContextError + + ctxt = {} + + # NOTE(jamespage) if mysql charm provides a network upon which + # access to the database should be made, reconfigure relation + # with the service units local address and defer execution + access_network = relation_get('access-network') + if access_network is not None: + if self.relation_prefix is not None: + hostname_key = "{}_hostname".format(self.relation_prefix) + else: + hostname_key = "hostname" + access_hostname = get_address_in_network(access_network, + unit_get('private-address')) + set_hostname = relation_get(attribute=hostname_key, + unit=local_unit()) + if set_hostname != access_hostname: + relation_set(relation_settings={hostname_key: access_hostname}) + return None # Defer any further hook execution for now.... + + password_setting = 'password' + if self.relation_prefix: + password_setting = self.relation_prefix + '_password' + + for rid in relation_ids(self.interfaces[0]): + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + host = rdata.get('db_host') + host = format_ipv6_addr(host) or host + ctxt = { + 'database_host': host, + 'database': self.database, + 'database_user': self.user, + 'database_password': rdata.get(password_setting), + 'database_type': 'mysql' + } + if self.context_complete(ctxt): + db_ssl(rdata, ctxt, self.ssl_dir) + return ctxt + return {} + + +class PostgresqlDBContext(OSContextGenerator): + interfaces = ['pgsql-db'] + + def __init__(self, database=None): + self.database = database + + def __call__(self): + self.database = self.database or config('database') + if self.database is None: + log('Could not generate postgresql_db context. Missing required ' + 'charm config options. (database name)', level=ERROR) + raise OSContextError + + ctxt = {} + for rid in relation_ids(self.interfaces[0]): + self.related = True + for unit in related_units(rid): + rel_host = relation_get('host', rid=rid, unit=unit) + rel_user = relation_get('user', rid=rid, unit=unit) + rel_passwd = relation_get('password', rid=rid, unit=unit) + ctxt = {'database_host': rel_host, + 'database': self.database, + 'database_user': rel_user, + 'database_password': rel_passwd, + 'database_type': 'postgresql'} + if self.context_complete(ctxt): + return ctxt + + return {} + + +def db_ssl(rdata, ctxt, ssl_dir): + if 'ssl_ca' in rdata and ssl_dir: + ca_path = os.path.join(ssl_dir, 'db-client.ca') + with open(ca_path, 'w') as fh: + fh.write(b64decode(rdata['ssl_ca'])) + + ctxt['database_ssl_ca'] = ca_path + elif 'ssl_ca' in rdata: + log("Charm not setup for ssl support but ssl ca found", level=INFO) + return ctxt + + if 'ssl_cert' in rdata: + cert_path = os.path.join( + ssl_dir, 'db-client.cert') + if not os.path.exists(cert_path): + log("Waiting 1m for ssl client cert validity", level=INFO) + time.sleep(60) + + with open(cert_path, 'w') as fh: + fh.write(b64decode(rdata['ssl_cert'])) + + ctxt['database_ssl_cert'] = cert_path + key_path = os.path.join(ssl_dir, 'db-client.key') + with open(key_path, 'w') as fh: + fh.write(b64decode(rdata['ssl_key'])) + + ctxt['database_ssl_key'] = key_path + + return ctxt + + +class IdentityServiceContext(OSContextGenerator): + + def __init__(self, service=None, service_user=None, rel_name='identity-service'): + self.service = service + self.service_user = service_user + self.rel_name = rel_name + self.interfaces = [self.rel_name] + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + if self.service and self.service_user: + # This is required for pki token signing if we don't want /tmp to + # be used. + cachedir = '/var/cache/%s' % (self.service) + if not os.path.isdir(cachedir): + log("Creating service cache dir %s" % (cachedir), level=DEBUG) + mkdir(path=cachedir, owner=self.service_user, + group=self.service_user, perms=0o700) + + ctxt['signing_dir'] = cachedir + + for rid in relation_ids(self.rel_name): + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + serv_host = rdata.get('service_host') + serv_host = format_ipv6_addr(serv_host) or serv_host + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host + svc_protocol = rdata.get('service_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + api_version = rdata.get('api_version') or '2.0' + ctxt.update({'service_port': rdata.get('service_port'), + 'service_host': serv_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('service_tenant'), + 'admin_user': rdata.get('service_username'), + 'admin_password': rdata.get('service_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol, + 'api_version': api_version}) + + if self.context_complete(ctxt): + # NOTE(jamespage) this is required for >= icehouse + # so a missing value just indicates keystone needs + # upgrading + ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') + return ctxt + + return {} + + +class AMQPContext(OSContextGenerator): + + def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): + self.ssl_dir = ssl_dir + self.rel_name = rel_name + self.relation_prefix = relation_prefix + self.interfaces = [rel_name] + + def __call__(self): + log('Generating template context for amqp', level=DEBUG) + conf = config() + if self.relation_prefix: + user_setting = '%s-rabbit-user' % (self.relation_prefix) + vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) + else: + user_setting = 'rabbit-user' + vhost_setting = 'rabbit-vhost' + + try: + username = conf[user_setting] + vhost = conf[vhost_setting] + except KeyError as e: + log('Could not generate shared_db context. Missing required charm ' + 'config options: %s.' % e, level=ERROR) + raise OSContextError + + ctxt = {} + for rid in relation_ids(self.rel_name): + ha_vip_only = False + self.related = True + for unit in related_units(rid): + if relation_get('clustered', rid=rid, unit=unit): + ctxt['clustered'] = True + vip = relation_get('vip', rid=rid, unit=unit) + vip = format_ipv6_addr(vip) or vip + ctxt['rabbitmq_host'] = vip + else: + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + ctxt['rabbitmq_host'] = host + + ctxt.update({ + 'rabbitmq_user': username, + 'rabbitmq_password': relation_get('password', rid=rid, + unit=unit), + 'rabbitmq_virtual_host': vhost, + }) + + ssl_port = relation_get('ssl_port', rid=rid, unit=unit) + if ssl_port: + ctxt['rabbit_ssl_port'] = ssl_port + + ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) + if ssl_ca: + ctxt['rabbit_ssl_ca'] = ssl_ca + + if relation_get('ha_queues', rid=rid, unit=unit) is not None: + ctxt['rabbitmq_ha_queues'] = True + + ha_vip_only = relation_get('ha-vip-only', + rid=rid, unit=unit) is not None + + if self.context_complete(ctxt): + if 'rabbit_ssl_ca' in ctxt: + if not self.ssl_dir: + log("Charm not setup for ssl support but ssl ca " + "found", level=INFO) + break + + ca_path = os.path.join( + self.ssl_dir, 'rabbit-client-ca.pem') + with open(ca_path, 'w') as fh: + fh.write(b64decode(ctxt['rabbit_ssl_ca'])) + ctxt['rabbit_ssl_ca'] = ca_path + + # Sufficient information found = break out! + break + + # Used for active/active rabbitmq >= grizzly + if (('clustered' not in ctxt or ha_vip_only) and + len(related_units(rid)) > 1): + rabbitmq_hosts = [] + for unit in related_units(rid): + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + rabbitmq_hosts.append(host) + + ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + + oslo_messaging_flags = conf.get('oslo-messaging-flags', None) + if oslo_messaging_flags: + ctxt['oslo_messaging_flags'] = config_flags_parser( + oslo_messaging_flags) + + if not self.complete: + return {} + + return ctxt + + +class CephContext(OSContextGenerator): + """Generates context for /etc/ceph/ceph.conf templates.""" + interfaces = ['ceph'] + + def __call__(self): + if not relation_ids('ceph'): + return {} + + log('Generating template context for ceph', level=DEBUG) + mon_hosts = [] + ctxt = { + 'use_syslog': str(config('use-syslog')).lower() + } + for rid in relation_ids('ceph'): + for unit in related_units(rid): + if not ctxt.get('auth'): + ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) + if not ctxt.get('key'): + ctxt['key'] = relation_get('key', rid=rid, unit=unit) + ceph_pub_addr = relation_get('ceph-public-address', rid=rid, + unit=unit) + unit_priv_addr = relation_get('private-address', rid=rid, + unit=unit) + ceph_addr = ceph_pub_addr or unit_priv_addr + ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr + mon_hosts.append(ceph_addr) + + ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) + + if not os.path.isdir('/etc/ceph'): + os.mkdir('/etc/ceph') + + if not self.context_complete(ctxt): + return {} + + ensure_packages(['ceph-common']) + return ctxt + + +class HAProxyContext(OSContextGenerator): + """Provides half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + """ + interfaces = ['cluster'] + + def __init__(self, singlenode_mode=False): + self.singlenode_mode = singlenode_mode + + def __call__(self): + if not relation_ids('cluster') and not self.singlenode_mode: + return {} + + if config('prefer-ipv6'): + addr = get_ipv6_addr(exc_list=[config('vip')])[0] + else: + addr = get_host_ip(unit_get('private-address')) + + l_unit = local_unit().replace('/', '-') + cluster_hosts = {} + + # NOTE(jamespage): build out map of configured network endpoints + # and associated backends + for addr_type in ADDRESS_TYPES: + cfg_opt = 'os-{}-network'.format(addr_type) + laddr = get_address_in_network(config(cfg_opt)) + if laddr: + netmask = get_netmask_for_address(laddr) + cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, + netmask), + 'backends': {l_unit: laddr}} + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _laddr = relation_get('{}-address'.format(addr_type), + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[laddr]['backends'][_unit] = _laddr + + # NOTE(jamespage) add backend based on private address - this + # with either be the only backend or the fallback if no acls + # match in the frontend + cluster_hosts[addr] = {} + netmask = get_netmask_for_address(addr) + cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), + 'backends': {l_unit: addr}} + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _laddr = relation_get('private-address', + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[addr]['backends'][_unit] = _laddr + + ctxt = { + 'frontends': cluster_hosts, + 'default_backend': addr + } + + if config('haproxy-server-timeout'): + ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') + + if config('haproxy-client-timeout'): + ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') + + if config('haproxy-queue-timeout'): + ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout') + + if config('haproxy-connect-timeout'): + ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') + + if config('prefer-ipv6'): + ctxt['ipv6'] = True + ctxt['local_host'] = 'ip6-localhost' + ctxt['haproxy_host'] = '::' + else: + ctxt['local_host'] = '127.0.0.1' + ctxt['haproxy_host'] = '0.0.0.0' + + ctxt['stat_port'] = '8888' + + db = kv() + ctxt['stat_password'] = db.get('stat-password') + if not ctxt['stat_password']: + ctxt['stat_password'] = db.set('stat-password', + pwgen(32)) + db.flush() + + for frontend in cluster_hosts: + if (len(cluster_hosts[frontend]['backends']) > 1 or + self.singlenode_mode): + # Enable haproxy when we have enough peers. + log('Ensuring haproxy enabled in /etc/default/haproxy.', + level=DEBUG) + with open('/etc/default/haproxy', 'w') as out: + out.write('ENABLED=1\n') + + return ctxt + + log('HAProxy context is incomplete, this unit has no peers.', + level=INFO) + return {} + + +class ImageServiceContext(OSContextGenerator): + interfaces = ['image-service'] + + def __call__(self): + """Obtains the glance API server from the image-service relation. + Useful in nova and cinder (currently). + """ + log('Generating template context for image-service.', level=DEBUG) + rids = relation_ids('image-service') + if not rids: + return {} + + for rid in rids: + for unit in related_units(rid): + api_server = relation_get('glance-api-server', + rid=rid, unit=unit) + if api_server: + return {'glance_api_servers': api_server} + + log("ImageService context is incomplete. Missing required relation " + "data.", level=INFO) + return {} + + +class ApacheSSLContext(OSContextGenerator): + """Generates a context for an apache vhost configuration that configures + HTTPS reverse proxying for one or many endpoints. Generated context + looks something like:: + + { + 'namespace': 'cinder', + 'private_address': 'iscsi.mycinderhost.com', + 'endpoints': [(8776, 8766), (8777, 8767)] + } + + The endpoints list consists of a tuples mapping external ports + to internal ports. + """ + interfaces = ['https'] + + # charms should inherit this context and set external ports + # and service namespace accordingly. + external_ports = [] + service_namespace = None + + def enable_modules(self): + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] + check_call(cmd) + + def configure_cert(self, cn=None): + ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) + mkdir(path=ssl_dir) + cert, key = get_cert(cn) + if cn: + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + else: + cert_filename = 'cert' + key_filename = 'key' + + write_file(path=os.path.join(ssl_dir, cert_filename), + content=b64decode(cert)) + write_file(path=os.path.join(ssl_dir, key_filename), + content=b64decode(key)) + + def configure_ca(self): + ca_cert = get_ca_cert() + if ca_cert: + install_ca_cert(b64decode(ca_cert)) + + def canonical_names(self): + """Figure out which canonical names clients will access this service. + """ + cns = [] + for r_id in relation_ids('identity-service'): + for unit in related_units(r_id): + rdata = relation_get(rid=r_id, unit=unit) + for k in rdata: + if k.startswith('ssl_key_'): + cns.append(k.lstrip('ssl_key_')) + + return sorted(list(set(cns))) + + def get_network_addresses(self): + """For each network configured, return corresponding address and vip + (if available). + + Returns a list of tuples of the form: + + [(address_in_net_a, vip_in_net_a), + (address_in_net_b, vip_in_net_b), + ...] + + or, if no vip(s) available: + + [(address_in_net_a, address_in_net_a), + (address_in_net_b, address_in_net_b), + ...] + """ + addresses = [] + if config('vip'): + vips = config('vip').split() + else: + vips = [] + + for net_type in ['os-internal-network', 'os-admin-network', + 'os-public-network']: + addr = get_address_in_network(config(net_type), + unit_get('private-address')) + if len(vips) > 1 and is_clustered(): + if not config(net_type): + log("Multiple networks configured but net_type " + "is None (%s)." % net_type, level=WARNING) + continue + + for vip in vips: + if is_address_in_network(config(net_type), vip): + addresses.append((addr, vip)) + break + + elif is_clustered() and config('vip'): + addresses.append((addr, config('vip'))) + else: + addresses.append((addr, addr)) + + return sorted(addresses) + + def __call__(self): + if isinstance(self.external_ports, six.string_types): + self.external_ports = [self.external_ports] + + if not self.external_ports or not https(): + return {} + + self.configure_ca() + self.enable_modules() + + ctxt = {'namespace': self.service_namespace, + 'endpoints': [], + 'ext_ports': []} + + cns = self.canonical_names() + if cns: + for cn in cns: + self.configure_cert(cn) + else: + # Expect cert/key provided in config (currently assumed that ca + # uses ip for cn) + cn = resolve_address(endpoint_type=INTERNAL) + self.configure_cert(cn) + + addresses = self.get_network_addresses() + for address, endpoint in sorted(set(addresses)): + for api_port in self.external_ports: + ext_port = determine_apache_port(api_port, + singlenode_mode=True) + int_port = determine_api_port(api_port, singlenode_mode=True) + portmap = (address, endpoint, int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + ctxt['ext_ports'].append(int(ext_port)) + + ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) + return ctxt + + +class NeutronContext(OSContextGenerator): + interfaces = [] + + @property + def plugin(self): + return None + + @property + def network_manager(self): + return None + + @property + def packages(self): + return neutron_plugin_attribute(self.plugin, 'packages', + self.network_manager) + + @property + def neutron_security_groups(self): + return None + + def _ensure_packages(self): + for pkgs in self.packages: + ensure_packages(pkgs) + + def _save_flag_file(self): + if self.network_manager == 'quantum': + _file = '/etc/nova/quantum_plugin.conf' + else: + _file = '/etc/nova/neutron_plugin.conf' + + with open(_file, 'wb') as out: + out.write(self.plugin + '\n') + + def ovs_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'ovs', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return ovs_ctxt + + def nuage_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + nuage_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'vsp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return nuage_ctxt + + def nvp_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + nvp_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'nvp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return nvp_ctxt + + def n1kv_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + n1kv_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + n1kv_user_config_flags = config('n1kv-config-flags') + restrict_policy_profiles = config('n1kv-restrict-policy-profiles') + n1kv_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'n1kv', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': n1kv_config, + 'vsm_ip': config('n1kv-vsm-ip'), + 'vsm_username': config('n1kv-vsm-username'), + 'vsm_password': config('n1kv-vsm-password'), + 'restrict_policy_profiles': restrict_policy_profiles} + + if n1kv_user_config_flags: + flags = config_flags_parser(n1kv_user_config_flags) + n1kv_ctxt['user_config_flags'] = flags + + return n1kv_ctxt + + def calico_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + calico_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'Calico', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return calico_ctxt + + def neutron_ctxt(self): + if https(): + proto = 'https' + else: + proto = 'http' + + if is_clustered(): + host = config('vip') + else: + host = unit_get('private-address') + + ctxt = {'network_manager': self.network_manager, + 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} + return ctxt + + def pg_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'plumgrid', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + return ovs_ctxt + + def midonet_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + midonet_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + mido_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'midonet', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': midonet_config} + + return mido_ctxt + + def __call__(self): + if self.network_manager not in ['quantum', 'neutron']: + return {} + + if not self.plugin: + return {} + + ctxt = self.neutron_ctxt() + + if self.plugin == 'ovs': + ctxt.update(self.ovs_ctxt()) + elif self.plugin in ['nvp', 'nsx']: + ctxt.update(self.nvp_ctxt()) + elif self.plugin == 'n1kv': + ctxt.update(self.n1kv_ctxt()) + elif self.plugin == 'Calico': + ctxt.update(self.calico_ctxt()) + elif self.plugin == 'vsp': + ctxt.update(self.nuage_ctxt()) + elif self.plugin == 'plumgrid': + ctxt.update(self.pg_ctxt()) + elif self.plugin == 'midonet': + ctxt.update(self.midonet_ctxt()) + + alchemy_flags = config('neutron-alchemy-flags') + if alchemy_flags: + flags = config_flags_parser(alchemy_flags) + ctxt['neutron_alchemy_flags'] = flags + + self._save_flag_file() + return ctxt + + +class NeutronPortContext(OSContextGenerator): + + def resolve_ports(self, ports): + """Resolve NICs not yet bound to bridge(s) + + If hwaddress provided then returns resolved hwaddress otherwise NIC. + """ + if not ports: + return None + + hwaddr_to_nic = {} + hwaddr_to_ip = {} + for nic in list_nics(): + # Ignore virtual interfaces (bond masters will be identified from + # their slaves) + if not is_phy_iface(nic): + continue + + _nic = get_bond_master(nic) + if _nic: + log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), + level=DEBUG) + nic = _nic + + hwaddr = get_nic_hwaddr(nic) + hwaddr_to_nic[hwaddr] = nic + addresses = get_ipv4_addr(nic, fatal=False) + addresses += get_ipv6_addr(iface=nic, fatal=False) + hwaddr_to_ip[hwaddr] = addresses + + resolved = [] + mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) + for entry in ports: + if re.match(mac_regex, entry): + # NIC is in known NICs and does NOT hace an IP address + if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: + # If the nic is part of a bridge then don't use it + if is_bridge_member(hwaddr_to_nic[entry]): + continue + + # Entry is a MAC address for a valid interface that doesn't + # have an IP address assigned yet. + resolved.append(hwaddr_to_nic[entry]) + else: + # If the passed entry is not a MAC address, assume it's a valid + # interface, and that the user put it there on purpose (we can + # trust it to be the real external network). + resolved.append(entry) + + # Ensure no duplicates + return list(set(resolved)) + + +class OSConfigFlagContext(OSContextGenerator): + """Provides support for user-defined config flags. + + Users can define a comma-seperated list of key=value pairs + in the charm configuration and apply them at any point in + any file by using a template flag. + + Sometimes users might want config flags inserted within a + specific section so this class allows users to specify the + template flag name, allowing for multiple template flags + (sections) within the same context. + + NOTE: the value of config-flags may be a comma-separated list of + key=value pairs and some Openstack config files support + comma-separated lists as values. + """ + + def __init__(self, charm_flag='config-flags', + template_flag='user_config_flags'): + """ + :param charm_flag: config flags in charm configuration. + :param template_flag: insert point for user-defined flags in template + file. + """ + super(OSConfigFlagContext, self).__init__() + self._charm_flag = charm_flag + self._template_flag = template_flag + + def __call__(self): + config_flags = config(self._charm_flag) + if not config_flags: + return {} + + return {self._template_flag: + config_flags_parser(config_flags)} + + +class LibvirtConfigFlagsContext(OSContextGenerator): + """ + This context provides support for extending + the libvirt section through user-defined flags. + """ + def __call__(self): + ctxt = {} + libvirt_flags = config('libvirt-flags') + if libvirt_flags: + ctxt['libvirt_flags'] = config_flags_parser( + libvirt_flags) + return ctxt + + +class SubordinateConfigContext(OSContextGenerator): + + """ + Responsible for inspecting relations to subordinates that + may be exporting required config via a json blob. + + The subordinate interface allows subordinates to export their + configuration requirements to the principle for multiple config + files and multiple serivces. Ie, a subordinate that has interfaces + to both glance and nova may export to following yaml blob as json:: + + glance: + /etc/glance/glance-api.conf: + sections: + DEFAULT: + - [key1, value1] + /etc/glance/glance-registry.conf: + MYSECTION: + - [key2, value2] + nova: + /etc/nova/nova.conf: + sections: + DEFAULT: + - [key3, value3] + + + It is then up to the principle charms to subscribe this context to + the service+config file it is interestd in. Configuration data will + be available in the template context, in glance's case, as:: + + ctxt = { + ... other context ... + 'subordinate_configuration': { + 'DEFAULT': { + 'key1': 'value1', + }, + 'MYSECTION': { + 'key2': 'value2', + }, + } + } + """ + + def __init__(self, service, config_file, interface): + """ + :param service : Service name key to query in any subordinate + data found + :param config_file : Service's config file to query sections + :param interface : Subordinate interface to inspect + """ + self.config_file = config_file + if isinstance(service, list): + self.services = service + else: + self.services = [service] + if isinstance(interface, list): + self.interfaces = interface + else: + self.interfaces = [interface] + + def __call__(self): + ctxt = {'sections': {}} + rids = [] + for interface in self.interfaces: + rids.extend(relation_ids(interface)) + for rid in rids: + for unit in related_units(rid): + sub_config = relation_get('subordinate_configuration', + rid=rid, unit=unit) + if sub_config and sub_config != '': + try: + sub_config = json.loads(sub_config) + except: + log('Could not parse JSON from ' + 'subordinate_configuration setting from %s' + % rid, level=ERROR) + continue + + for service in self.services: + if service not in sub_config: + log('Found subordinate_configuration on %s but it ' + 'contained nothing for %s service' + % (rid, service), level=INFO) + continue + + sub_config = sub_config[service] + if self.config_file not in sub_config: + log('Found subordinate_configuration on %s but it ' + 'contained nothing for %s' + % (rid, self.config_file), level=INFO) + continue + + sub_config = sub_config[self.config_file] + for k, v in six.iteritems(sub_config): + if k == 'sections': + for section, config_list in six.iteritems(v): + log("adding section '%s'" % (section), + level=DEBUG) + if ctxt[k].get(section): + ctxt[k][section].extend(config_list) + else: + ctxt[k][section] = config_list + else: + ctxt[k] = v + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) + return ctxt + + +class LogLevelContext(OSContextGenerator): + + def __call__(self): + ctxt = {} + ctxt['debug'] = \ + False if config('debug') is None else config('debug') + ctxt['verbose'] = \ + False if config('verbose') is None else config('verbose') + + return ctxt + + +class SyslogContext(OSContextGenerator): + + def __call__(self): + ctxt = {'use_syslog': config('use-syslog')} + return ctxt + + +class BindHostContext(OSContextGenerator): + + def __call__(self): + if config('prefer-ipv6'): + return {'bind_host': '::'} + else: + return {'bind_host': '0.0.0.0'} + + +class WorkerConfigContext(OSContextGenerator): + + @property + def num_cpus(self): + # NOTE: use cpu_count if present (16.04 support) + if hasattr(psutil, 'cpu_count'): + return psutil.cpu_count() + else: + return psutil.NUM_CPUS + + def __call__(self): + multiplier = config('worker-multiplier') or 0 + count = int(self.num_cpus * multiplier) + if multiplier > 0 and count == 0: + count = 1 + ctxt = {"workers": count} + return ctxt + + +class ZeroMQContext(OSContextGenerator): + interfaces = ['zeromq-configuration'] + + def __call__(self): + ctxt = {} + if is_relation_made('zeromq-configuration', 'host'): + for rid in relation_ids('zeromq-configuration'): + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + ctxt['zmq_redis_address'] = relation_get( + 'zmq_redis_address', unit, rid) + + return ctxt + + +class NotificationDriverContext(OSContextGenerator): + + def __init__(self, zmq_relation='zeromq-configuration', + amqp_relation='amqp'): + """ + :param zmq_relation: Name of Zeromq relation to check + """ + self.zmq_relation = zmq_relation + self.amqp_relation = amqp_relation + + def __call__(self): + ctxt = {'notifications': 'False'} + if is_relation_made(self.amqp_relation): + ctxt['notifications'] = "True" + + return ctxt + + +class SysctlContext(OSContextGenerator): + """This context check if the 'sysctl' option exists on configuration + then creates a file with the loaded contents""" + def __call__(self): + sysctl_dict = config('sysctl') + if sysctl_dict: + sysctl_create(sysctl_dict, + '/etc/sysctl.d/50-{0}.conf'.format(charm_name())) + return {'sysctl': sysctl_dict} + + +class NeutronAPIContext(OSContextGenerator): + ''' + Inspects current neutron-plugin-api relation for neutron settings. Return + defaults if it is not present. + ''' + interfaces = ['neutron-plugin-api'] + + def __call__(self): + self.neutron_defaults = { + 'l2_population': { + 'rel_key': 'l2-population', + 'default': False, + }, + 'overlay_network_type': { + 'rel_key': 'overlay-network-type', + 'default': 'gre', + }, + 'neutron_security_groups': { + 'rel_key': 'neutron-security-groups', + 'default': False, + }, + 'network_device_mtu': { + 'rel_key': 'network-device-mtu', + 'default': None, + }, + 'enable_dvr': { + 'rel_key': 'enable-dvr', + 'default': False, + }, + 'enable_l3ha': { + 'rel_key': 'enable-l3ha', + 'default': False, + }, + } + ctxt = self.get_neutron_options({}) + for rid in relation_ids('neutron-plugin-api'): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if 'l2-population' in rdata: + ctxt.update(self.get_neutron_options(rdata)) + + return ctxt + + def get_neutron_options(self, rdata): + settings = {} + for nkey in self.neutron_defaults.keys(): + defv = self.neutron_defaults[nkey]['default'] + rkey = self.neutron_defaults[nkey]['rel_key'] + if rkey in rdata.keys(): + if type(defv) is bool: + settings[nkey] = bool_from_string(rdata[rkey]) + else: + settings[nkey] = rdata[rkey] + else: + settings[nkey] = defv + return settings + + +class ExternalPortContext(NeutronPortContext): + + def __call__(self): + ctxt = {} + ports = config('ext-port') + if ports: + ports = [p.strip() for p in ports.split()] + ports = self.resolve_ports(ports) + if ports: + ctxt = {"ext_port": ports[0]} + napi_settings = NeutronAPIContext()() + mtu = napi_settings.get('network_device_mtu') + if mtu: + ctxt['ext_port_mtu'] = mtu + + return ctxt + + +class DataPortContext(NeutronPortContext): + + def __call__(self): + ports = config('data-port') + if ports: + # Map of {port/mac:bridge} + portmap = parse_data_port_mappings(ports) + ports = portmap.keys() + # Resolve provided ports or mac addresses and filter out those + # already attached to a bridge. + resolved = self.resolve_ports(ports) + # FIXME: is this necessary? + normalized = {get_nic_hwaddr(port): port for port in resolved + if port not in ports} + normalized.update({port: port for port in resolved + if port in ports}) + if resolved: + return {normalized[port]: bridge for port, bridge in + six.iteritems(portmap) if port in normalized.keys()} + + return None + + +class PhyNICMTUContext(DataPortContext): + + def __call__(self): + ctxt = {} + mappings = super(PhyNICMTUContext, self).__call__() + if mappings and mappings.keys(): + ports = sorted(mappings.keys()) + napi_settings = NeutronAPIContext()() + mtu = napi_settings.get('network_device_mtu') + all_ports = set() + # If any of ports is a vlan device, its underlying device must have + # mtu applied first. + for port in ports: + for lport in glob.glob("/sys/class/net/%s/lower_*" % port): + lport = os.path.basename(lport) + all_ports.add(lport.split('_')[1]) + + all_ports = list(all_ports) + all_ports.extend(ports) + if mtu: + ctxt["devs"] = '\\n'.join(all_ports) + ctxt['mtu'] = mtu + + return ctxt + + +class NetworkServiceContext(OSContextGenerator): + + def __init__(self, rel_name='quantum-network-service'): + self.rel_name = rel_name + self.interfaces = [rel_name] + + def __call__(self): + for rid in relation_ids(self.rel_name): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + ctxt = { + 'keystone_host': rdata.get('keystone_host'), + 'service_port': rdata.get('service_port'), + 'auth_port': rdata.get('auth_port'), + 'service_tenant': rdata.get('service_tenant'), + 'service_username': rdata.get('service_username'), + 'service_password': rdata.get('service_password'), + 'quantum_host': rdata.get('quantum_host'), + 'quantum_port': rdata.get('quantum_port'), + 'quantum_url': rdata.get('quantum_url'), + 'region': rdata.get('region'), + 'service_protocol': + rdata.get('service_protocol') or 'http', + 'auth_protocol': + rdata.get('auth_protocol') or 'http', + 'api_version': + rdata.get('api_version') or '2.0', + } + if self.context_complete(ctxt): + return ctxt + return {} + + +class InternalEndpointContext(OSContextGenerator): + """Internal endpoint context. + + This context provides the endpoint type used for communication between + services e.g. between Nova and Cinder internally. Openstack uses Public + endpoints by default so this allows admins to optionally use internal + endpoints. + """ + def __call__(self): + return {'use_internal_endpoints': config('use-internal-endpoints')} + + +class AppArmorContext(OSContextGenerator): + """Base class for apparmor contexts.""" + + def __init__(self): + self._ctxt = None + self.aa_profile = None + self.aa_utils_packages = ['apparmor-utils'] + + @property + def ctxt(self): + if self._ctxt is not None: + return self._ctxt + self._ctxt = self._determine_ctxt() + return self._ctxt + + def _determine_ctxt(self): + """ + Validate aa-profile-mode settings is disable, enforce, or complain. + + :return ctxt: Dictionary of the apparmor profile or None + """ + if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: + ctxt = {'aa_profile_mode': config('aa-profile-mode'), + 'ubuntu_release': lsb_release()['DISTRIB_RELEASE']} + else: + ctxt = None + return ctxt + + def __call__(self): + return self.ctxt + + def install_aa_utils(self): + """ + Install packages required for apparmor configuration. + """ + log("Installing apparmor utils.") + ensure_packages(self.aa_utils_packages) + + def manually_disable_aa_profile(self): + """ + Manually disable an apparmor profile. + + If aa-profile-mode is set to disabled (default) this is required as the + template has been written but apparmor is yet unaware of the profile + and aa-disable aa-profile fails. Without this the profile would kick + into enforce mode on the next service restart. + + """ + profile_path = '/etc/apparmor.d' + disable_path = '/etc/apparmor.d/disable' + if not os.path.lexists(os.path.join(disable_path, self.aa_profile)): + os.symlink(os.path.join(profile_path, self.aa_profile), + os.path.join(disable_path, self.aa_profile)) + + def setup_aa_profile(self): + """ + Setup an apparmor profile. + The ctxt dictionary will contain the apparmor profile mode and + the apparmor profile name. + Makes calls out to aa-disable, aa-complain, or aa-enforce to setup + the apparmor profile. + """ + self() + if not self.ctxt: + log("Not enabling apparmor Profile") + return + self.install_aa_utils() + cmd = ['aa-{}'.format(self.ctxt['aa_profile_mode'])] + cmd.append(self.ctxt['aa_profile']) + log("Setting up the apparmor profile for {} in {} mode." + "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode'])) + try: + check_call(cmd) + except CalledProcessError as e: + # If aa-profile-mode is set to disabled (default) manual + # disabling is required as the template has been written but + # apparmor is yet unaware of the profile and aa-disable aa-profile + # fails. If aa-disable learns to read profile files first this can + # be removed. + if self.ctxt['aa_profile_mode'] == 'disable': + log("Manually disabling the apparmor profile for {}." + "".format(self.ctxt['aa_profile'])) + self.manually_disable_aa_profile() + return + status_set('blocked', "Apparmor profile {} failed to be set to {}." + "".format(self.ctxt['aa_profile'], + self.ctxt['aa_profile_mode'])) + raise e diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py new file mode 100644 index 00000000..d1476b1a --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py @@ -0,0 +1,186 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + config, + unit_get, + service_name, + network_get_primary_address, +) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + is_address_in_network, + is_ipv6, + get_ipv6_addr, + resolve_network_cidr, +) +from charmhelpers.contrib.hahelpers.cluster import is_clustered + +PUBLIC = 'public' +INTERNAL = 'int' +ADMIN = 'admin' +ACCESS = 'access' + +ADDRESS_MAP = { + PUBLIC: { + 'binding': 'public', + 'config': 'os-public-network', + 'fallback': 'public-address', + 'override': 'os-public-hostname', + }, + INTERNAL: { + 'binding': 'internal', + 'config': 'os-internal-network', + 'fallback': 'private-address', + 'override': 'os-internal-hostname', + }, + ADMIN: { + 'binding': 'admin', + 'config': 'os-admin-network', + 'fallback': 'private-address', + 'override': 'os-admin-hostname', + }, + ACCESS: { + 'binding': 'access', + 'config': 'access-network', + 'fallback': 'private-address', + 'override': 'os-access-hostname', + }, +} + + +def canonical_url(configs, endpoint_type=PUBLIC): + """Returns the correct HTTP URL to this host given the state of HTTPS + configuration, hacluster and charm configuration. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :param endpoint_type: str endpoint type to resolve. + :param returns: str base URL for services on the current service unit. + """ + scheme = _get_scheme(configs) + + address = resolve_address(endpoint_type) + if is_ipv6(address): + address = "[{}]".format(address) + + return '%s://%s' % (scheme, address) + + +def _get_scheme(configs): + """Returns the scheme to use for the url (either http or https) + depending upon whether https is in the configs value. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :returns: either 'http' or 'https' depending on whether https is + configured within the configs context. + """ + scheme = 'http' + if configs and 'https' in configs.complete_contexts(): + scheme = 'https' + return scheme + + +def _get_address_override(endpoint_type=PUBLIC): + """Returns any address overrides that the user has defined based on the + endpoint type. + + Note: this function allows for the service name to be inserted into the + address if the user specifies {service_name}.somehost.org. + + :param endpoint_type: the type of endpoint to retrieve the override + value for. + :returns: any endpoint address or hostname that the user has overridden + or None if an override is not present. + """ + override_key = ADDRESS_MAP[endpoint_type]['override'] + addr_override = config(override_key) + if not addr_override: + return None + else: + return addr_override.format(service_name=service_name()) + + +def resolve_address(endpoint_type=PUBLIC, override=True): + """Return unit address depending on net config. + + If unit is clustered with vip(s) and has net splits defined, return vip on + correct network. If clustered with no nets defined, return primary vip. + + If not clustered, return unit address ensuring address is on configured net + split if one is configured, or a Juju 2.0 extra-binding has been used. + + :param endpoint_type: Network endpoing type + :param override: Accept hostname overrides or not + """ + resolved_address = None + if override: + resolved_address = _get_address_override(endpoint_type) + if resolved_address: + return resolved_address + + vips = config('vip') + if vips: + vips = vips.split() + + net_type = ADDRESS_MAP[endpoint_type]['config'] + net_addr = config(net_type) + net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] + binding = ADDRESS_MAP[endpoint_type]['binding'] + clustered = is_clustered() + + if clustered and vips: + if net_addr: + for vip in vips: + if is_address_in_network(net_addr, vip): + resolved_address = vip + break + else: + # NOTE: endeavour to check vips against network space + # bindings + try: + bound_cidr = resolve_network_cidr( + network_get_primary_address(binding) + ) + for vip in vips: + if is_address_in_network(bound_cidr, vip): + resolved_address = vip + break + except NotImplementedError: + # If no net-splits configured and no support for extra + # bindings/network spaces so we expect a single vip + resolved_address = vips[0] + else: + if config('prefer-ipv6'): + fallback_addr = get_ipv6_addr(exc_list=vips)[0] + else: + fallback_addr = unit_get(net_fallback) + + if net_addr: + resolved_address = get_address_in_network(net_addr, fallback_addr) + else: + # NOTE: only try to use extra bindings if legacy network + # configuration is not in use + try: + resolved_address = network_get_primary_address(binding) + except NotImplementedError: + resolved_address = fallback_addr + + if resolved_address is None: + raise ValueError("Unable to resolve a suitable IP address based on " + "charm state and configuration. (net_type=%s, " + "clustered=%s)" % (net_type, clustered)) + + return resolved_address diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py new file mode 100644 index 00000000..08c86fa7 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py @@ -0,0 +1,388 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Various utilies for dealing with Neutron and the renaming from Quantum. + +import six +from subprocess import check_output + +from charmhelpers.core.hookenv import ( + config, + log, + ERROR, +) + +from charmhelpers.contrib.openstack.utils import os_release + + +def headers_package(): + """Ensures correct linux-headers for running kernel are installed, + for building DKMS package""" + kver = check_output(['uname', '-r']).decode('UTF-8').strip() + return 'linux-headers-%s' % kver + +QUANTUM_CONF_DIR = '/etc/quantum' + + +def kernel_version(): + """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ + kver = check_output(['uname', '-r']).decode('UTF-8').strip() + kver = kver.split('.') + return (int(kver[0]), int(kver[1])) + + +def determine_dkms_package(): + """ Determine which DKMS package should be used based on kernel version """ + # NOTE: 3.13 kernels have support for GRE and VXLAN native + if kernel_version() >= (3, 13): + return [] + else: + return [headers_package(), 'openvswitch-datapath-dkms'] + + +# legacy + + +def quantum_plugins(): + from charmhelpers.contrib.openstack import context + return { + 'ovs': { + 'config': '/etc/quantum/plugins/openvswitch/' + 'ovs_quantum_plugin.ini', + 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' + 'OVSQuantumPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=QUANTUM_CONF_DIR)], + 'services': ['quantum-plugin-openvswitch-agent'], + 'packages': [determine_dkms_package(), + ['quantum-plugin-openvswitch-agent']], + 'server_packages': ['quantum-server', + 'quantum-plugin-openvswitch'], + 'server_services': ['quantum-server'] + }, + 'nvp': { + 'config': '/etc/quantum/plugins/nicira/nvp.ini', + 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' + 'QuantumPlugin.NvpPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=QUANTUM_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['quantum-server', + 'quantum-plugin-nicira'], + 'server_services': ['quantum-server'] + } + } + +NEUTRON_CONF_DIR = '/etc/neutron' + + +def neutron_plugins(): + from charmhelpers.contrib.openstack import context + release = os_release('nova-common') + plugins = { + 'ovs': { + 'config': '/etc/neutron/plugins/openvswitch/' + 'ovs_neutron_plugin.ini', + 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' + 'OVSNeutronPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': ['neutron-plugin-openvswitch-agent'], + 'packages': [determine_dkms_package(), + ['neutron-plugin-openvswitch-agent']], + 'server_packages': ['neutron-server', + 'neutron-plugin-openvswitch'], + 'server_services': ['neutron-server'] + }, + 'nvp': { + 'config': '/etc/neutron/plugins/nicira/nvp.ini', + 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' + 'NeutronPlugin.NvpPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', + 'neutron-plugin-nicira'], + 'server_services': ['neutron-server'] + }, + 'nsx': { + 'config': '/etc/neutron/plugins/vmware/nsx.ini', + 'driver': 'vmware', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', + 'neutron-plugin-vmware'], + 'server_services': ['neutron-server'] + }, + 'n1kv': { + 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', + 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [determine_dkms_package(), + ['neutron-plugin-cisco']], + 'server_packages': ['neutron-server', + 'neutron-plugin-cisco'], + 'server_services': ['neutron-server'] + }, + 'Calico': { + 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', + 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': ['calico-felix', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata', + 'etcd'], + 'packages': [determine_dkms_package(), + ['calico-compute', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata', + 'etcd']], + 'server_packages': ['neutron-server', 'calico-control', 'etcd'], + 'server_services': ['neutron-server', 'etcd'] + }, + 'vsp': { + 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', + 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], + 'server_services': ['neutron-server'] + }, + 'plumgrid': { + 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', + 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', + 'contexts': [ + context.SharedDBContext(user=config('database-user'), + database=config('database'), + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': ['plumgrid-lxc', + 'iovisor-dkms'], + 'server_packages': ['neutron-server', + 'neutron-plugin-plumgrid'], + 'server_services': ['neutron-server'] + }, + 'midonet': { + 'config': '/etc/neutron/plugins/midonet/midonet.ini', + 'driver': 'midonet.neutron.plugin.MidonetPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [determine_dkms_package()], + 'server_packages': ['neutron-server', + 'python-neutron-plugin-midonet'], + 'server_services': ['neutron-server'] + } + } + if release >= 'icehouse': + # NOTE: patch in ml2 plugin for icehouse onwards + plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' + plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' + plugins['ovs']['server_packages'] = ['neutron-server', + 'neutron-plugin-ml2'] + # NOTE: patch in vmware renames nvp->nsx for icehouse onwards + plugins['nvp'] = plugins['nsx'] + if release >= 'kilo': + plugins['midonet']['driver'] = ( + 'neutron.plugins.midonet.plugin.MidonetPluginV2') + if release >= 'liberty': + plugins['midonet']['driver'] = ( + 'midonet.neutron.plugin_v1.MidonetPluginV2') + plugins['midonet']['server_packages'].remove( + 'python-neutron-plugin-midonet') + plugins['midonet']['server_packages'].append( + 'python-networking-midonet') + plugins['plumgrid']['driver'] = ( + 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2') + plugins['plumgrid']['server_packages'].remove( + 'neutron-plugin-plumgrid') + if release >= 'mitaka': + plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') + plugins['nsx']['server_packages'].append('python-vmware-nsx') + plugins['nsx']['config'] = '/etc/neutron/nsx.ini' + plugins['vsp']['driver'] = ( + 'nuage_neutron.plugins.nuage.plugin.NuagePlugin') + return plugins + + +def neutron_plugin_attribute(plugin, attr, net_manager=None): + manager = net_manager or network_manager() + if manager == 'quantum': + plugins = quantum_plugins() + elif manager == 'neutron': + plugins = neutron_plugins() + else: + log("Network manager '%s' does not support plugins." % (manager), + level=ERROR) + raise Exception + + try: + _plugin = plugins[plugin] + except KeyError: + log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) + raise Exception + + try: + return _plugin[attr] + except KeyError: + return None + + +def network_manager(): + ''' + Deals with the renaming of Quantum to Neutron in H and any situations + that require compatability (eg, deploying H with network-manager=quantum, + upgrading from G). + ''' + release = os_release('nova-common') + manager = config('network-manager').lower() + + if manager not in ['quantum', 'neutron']: + return manager + + if release in ['essex']: + # E does not support neutron + log('Neutron networking not supported in Essex.', level=ERROR) + raise Exception + elif release in ['folsom', 'grizzly']: + # neutron is named quantum in F and G + return 'quantum' + else: + # ensure accurate naming for all releases post-H + return 'neutron' + + +def parse_mappings(mappings, key_rvalue=False): + """By default mappings are lvalue keyed. + + If key_rvalue is True, the mapping will be reversed to allow multiple + configs for the same lvalue. + """ + parsed = {} + if mappings: + mappings = mappings.split() + for m in mappings: + p = m.partition(':') + + if key_rvalue: + key_index = 2 + val_index = 0 + # if there is no rvalue skip to next + if not p[1]: + continue + else: + key_index = 0 + val_index = 2 + + key = p[key_index].strip() + parsed[key] = p[val_index].strip() + + return parsed + + +def parse_bridge_mappings(mappings): + """Parse bridge mappings. + + Mappings must be a space-delimited list of provider:bridge mappings. + + Returns dict of the form {provider:bridge}. + """ + return parse_mappings(mappings) + + +def parse_data_port_mappings(mappings, default_bridge='br-data'): + """Parse data port mappings. + + Mappings must be a space-delimited list of bridge:port. + + Returns dict of the form {port:bridge} where ports may be mac addresses or + interface names. + """ + + # NOTE(dosaboy): we use rvalue for key to allow multiple values to be + # proposed for since it may be a mac address which will differ + # across units this allowing first-known-good to be chosen. + _mappings = parse_mappings(mappings, key_rvalue=True) + if not _mappings or list(_mappings.values()) == ['']: + if not mappings: + return {} + + # For backwards-compatibility we need to support port-only provided in + # config. + _mappings = {mappings.split()[0]: default_bridge} + + ports = _mappings.keys() + if len(set(ports)) != len(ports): + raise Exception("It is not allowed to have the same port configured " + "on more than one bridge") + + return _mappings + + +def parse_vlan_range_mappings(mappings): + """Parse vlan range mappings. + + Mappings must be a space-delimited list of provider:start:end mappings. + + The start:end range is optional and may be omitted. + + Returns dict of the form {provider: (start, end)}. + """ + _mappings = parse_mappings(mappings) + if not _mappings: + return {} + + mappings = {} + for p, r in six.iteritems(_mappings): + mappings[p] = tuple(r.split(':')) + + return mappings diff --git a/ceph-osd/lib/ceph/__init__.py b/ceph-osd/lib/ceph/__init__.py index 522e0876..8928efe2 100644 --- a/ceph-osd/lib/ceph/__init__.py +++ b/ceph-osd/lib/ceph/__init__.py @@ -924,7 +924,7 @@ def import_radosgw_key(key): } -def get_radosgw_key(pool_list): +def get_radosgw_key(pool_list=None): return get_named_key(name='radosgw.gateway', caps=_radosgw_caps, pool_list=pool_list) diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index ecc12a7a..d4878e2d 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -15,7 +15,7 @@ import copy import unittest -from mock import patch +from mock import patch, MagicMock, call import charmhelpers.contrib.storage.linux.ceph as ceph import ceph_hooks @@ -130,3 +130,64 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'short_object_len': True, 'use_syslog': 'true'} self.assertEqual(ctxt, expected) + + @patch.object(ceph_hooks, 'ceph') + @patch.object(ceph_hooks, 'service_restart') + @patch.object(ceph_hooks, 'service_reload') + @patch.object(ceph_hooks, 'copy_profile_into_place') + @patch.object(ceph_hooks, 'CephOsdAppArmorContext') + @patch.object(ceph_hooks, 'config') + def test_install_apparmor_profile(self, mock_config, + mock_apparmor_context, + mock_copy_profile_into_place, + mock_service_reload, + mock_service_restart, + mock_ceph): + '''Apparmor profile reloaded when config changes (upstart)''' + m_config = MagicMock() + m_config.changed.return_value = True + mock_config.return_value = m_config + m_aa_context = MagicMock() + mock_apparmor_context.return_value = m_aa_context + mock_ceph.systemd.return_value = False + + ceph_hooks.install_apparmor_profile() + + m_aa_context.setup_aa_profile.assert_called() + mock_copy_profile_into_place.assert_called() + m_config.changed.assert_called_with('aa-profile-mode') + mock_service_restart.assert_called_with('ceph-osd-all') + mock_service_reload.assert_called_with('apparmor') + + @patch.object(ceph_hooks, 'ceph') + @patch.object(ceph_hooks, 'service_restart') + @patch.object(ceph_hooks, 'service_reload') + @patch.object(ceph_hooks, 'copy_profile_into_place') + @patch.object(ceph_hooks, 'CephOsdAppArmorContext') + @patch.object(ceph_hooks, 'config') + def test_install_apparmor_profile_systemd(self, mock_config, + mock_apparmor_context, + mock_copy_profile_into_place, + mock_service_reload, + mock_service_restart, + mock_ceph): + '''Apparmor profile reloaded when config changes (systemd)''' + m_config = MagicMock() + m_config.changed.return_value = True + mock_config.return_value = m_config + m_aa_context = MagicMock() + mock_apparmor_context.return_value = m_aa_context + mock_ceph.systemd.return_value = True + mock_ceph.get_local_osd_ids.return_value = [0, 1, 2] + + ceph_hooks.install_apparmor_profile() + + m_aa_context.setup_aa_profile.assert_called() + mock_copy_profile_into_place.assert_called() + m_config.changed.assert_called_with('aa-profile-mode') + mock_service_reload.assert_called_with('apparmor') + mock_service_restart.assert_has_calls([ + call('ceph-osd@0'), + call('ceph-osd@1'), + call('ceph-osd@2'), + ]) From 6b14f6301491b4e59baa5c8b3e550b9d9b4717e3 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 28 Sep 2016 11:39:21 -0400 Subject: [PATCH 1214/2699] Fix mds relation THis commit re-adds the fsid into the relation Change-Id: I5394317272c458a02cfd9c6d9768987f8e3bee16 --- ceph-mon/hooks/ceph_hooks.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 090dc996..7c07b6de 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -452,9 +452,11 @@ def mds_relation_joined(relid=None, unit=None): if service_name is not None: public_addr = get_public_addr() - data = {'key': ceph.get_mds_key(service_name), - 'auth': config('auth-supported'), - 'ceph-public-address': public_addr} + data = { + 'fsid': leader_get('fsid'), + 'key': ceph.get_mds_key(service_name), + 'auth': config('auth-supported'), + 'ceph-public-address': public_addr} settings = relation_get(rid=relid, unit=unit) """Process broker request(s).""" if 'broker_req' in settings: From f7721f59f3d85c9fbcf08cddce1ff3d960290b69 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 28 Sep 2016 12:12:48 -0400 Subject: [PATCH 1215/2699] Remove the compiled python bytecode upgrade-charm could previously fail because of compiled bytecode remaining behind. This change makes the upgrade-charm hook remove any such compiled bytecode files Closes-Bug: 1628322 Change-Id: I37152749e553d1894430d2074bc7f82703dcc0cd --- ceph-mon/hooks/ceph_hooks.py | 2 +- ceph-mon/hooks/upgrade-charm | 7 ++++++- ceph-mon/hooks/upgrade-charm.real | 1 + 3 files changed, 8 insertions(+), 2 deletions(-) mode change 120000 => 100755 ceph-mon/hooks/upgrade-charm create mode 120000 ceph-mon/hooks/upgrade-charm.real diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 090dc996..ea9fcfbb 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -537,7 +537,7 @@ def client_relation_changed(): log('mon cluster not in quorum', level=DEBUG) -@hooks.hook('upgrade-charm') +@hooks.hook('upgrade-charm.real') @harden() def upgrade_charm(): emit_cephconf() diff --git a/ceph-mon/hooks/upgrade-charm b/ceph-mon/hooks/upgrade-charm deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/upgrade-charm +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/upgrade-charm b/ceph-mon/hooks/upgrade-charm new file mode 100755 index 00000000..440473d7 --- /dev/null +++ b/ceph-mon/hooks/upgrade-charm @@ -0,0 +1,6 @@ +#!/bin/bash +# Wrapper to ensure that old python bytecode isn't hanging around +# after we upgrade the charm with newer libraries +rm -rf **/*.pyc + +exec ./hooks/upgrade-charm.real diff --git a/ceph-mon/hooks/upgrade-charm.real b/ceph-mon/hooks/upgrade-charm.real new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/upgrade-charm.real @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file From eddb7b5d3025394d600eefa84541e43fcc38ba0b Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 29 Sep 2016 21:25:20 +0100 Subject: [PATCH 1216/2699] Provide mds bootstrap key over ceph-mds relation Rather than creating keys for individual mds units, just pass a key suitable for each unit to bootstrap its own mds key into the Ceph cluster, inline with the behaviour for bootstrapping osd's into the Ceph cluster. Change-Id: I1fa50d64728c6fa68089a30079954b49a3714034 Depends-On: I981aa7fa52c43eee1c6bce7f04d281d762e9b59a --- ceph-mon/hooks/ceph_hooks.py | 2 +- ceph-mon/lib/ceph/__init__.py | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 7c07b6de..64d2f19e 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -454,7 +454,7 @@ def mds_relation_joined(relid=None, unit=None): public_addr = get_public_addr() data = { 'fsid': leader_get('fsid'), - 'key': ceph.get_mds_key(service_name), + 'mds_bootstrap_key': ceph.get_mds_bootstrap_key(), 'auth': config('auth-supported'), 'ceph-public-address': public_addr} settings = relation_get(rid=relid, unit=unit) diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index 8928efe2..328512b8 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -936,6 +936,18 @@ def get_mds_key(name): caps=mds_caps) +_mds_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-mds' + ] +} + + +def get_mds_bootstrap_key(): + return get_named_key('bootstrap-mds', + _mds_bootstrap_caps_profile) + + _default_caps = collections.OrderedDict([ ('mon', ['allow rw']), ('osd', ['allow rwx']), From 1d4a04f36751f7ef4460ead8c48be3d2c26963cd Mon Sep 17 00:00:00 2001 From: Gabor Meszaros Date: Fri, 30 Sep 2016 16:48:49 +0200 Subject: [PATCH 1217/2699] Remove extraneous variable assignment Change-Id: I1dda547003158798cb4d8cd2d0ba57ae726df1a5 --- ceph-osd/hooks/ceph_hooks.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index faa780da..68867027 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -421,7 +421,6 @@ def get_devices(): def get_journal_devices(): if config('osd-journal'): - devices = config('osd-journal') devices = [l.strip() for l in config('osd-journal').split(' ')] else: devices = [] From 8eb3a4834bb445e63253283d401560a6135ee7e9 Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 30 Sep 2016 08:48:10 -0700 Subject: [PATCH 1218/2699] Pre-release charm-helpers sync 16.10 Get each charm up to date with lp:charm-helpers for release testing. Change-Id: I582ec6a9aeacc8418e4178f5cdfee3a6ddf28b24 --- .../contrib/hardening/ssh/checks/config.py | 37 ++++++++++- .../hooks/charmhelpers/contrib/network/ip.py | 2 +- .../contrib/openstack/amulet/deployment.py | 63 ++++++++++++++++--- .../charmhelpers/contrib/openstack/context.py | 6 +- .../charmhelpers/contrib/openstack/ip.py | 9 ++- .../charmhelpers/contrib/openstack/neutron.py | 2 + .../charmhelpers/contrib/openstack/utils.py | 2 +- ceph-mon/hooks/charmhelpers/payload/execd.py | 5 +- ceph-mon/lib/ceph/ceph_broker.py | 41 +++++++++++- .../contrib/openstack/amulet/deployment.py | 63 ++++++++++++++++--- 10 files changed, 203 insertions(+), 27 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py index 94e524e2..f3cac6d9 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -14,6 +14,11 @@ import os +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_iface_addr, + is_ip, +) from charmhelpers.core.hookenv import ( log, DEBUG, @@ -121,6 +126,36 @@ def get_ciphers(self, cbc_required): return cipher[weak_ciphers] + def get_listening(self, listen=['0.0.0.0']): + """Returns a list of addresses SSH can list on + + Turns input into a sensible list of IPs SSH can listen on. Input + must be a python list of interface names, IPs and/or CIDRs. + + :param listen: list of IPs, CIDRs, interface names + + :returns: list of IPs available on the host + """ + if listen == ['0.0.0.0']: + return listen + + value = [] + for network in listen: + try: + ip = get_address_in_network(network=network, fatal=True) + except ValueError: + if is_ip(network): + ip = network + else: + try: + ip = get_iface_addr(iface=network, fatal=False)[0] + except IndexError: + continue + value.append(ip) + if value == []: + return ['0.0.0.0'] + return value + def __call__(self): settings = utils.get_settings('ssh') if settings['common']['network_ipv6_enable']: @@ -180,7 +215,7 @@ def __call__(self): addr_family = 'inet' ctxt = { - 'ssh_ip': settings['server']['listen_to'], + 'ssh_ip': self.get_listening(settings['server']['listen_to']), 'password_auth_allowed': settings['server']['password_authentication'], 'ports': settings['common']['ports'], diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index d6dee17c..2d2026e4 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -406,7 +406,7 @@ def is_ip(address): # Test to see if already an IPv4/IPv6 address address = netaddr.IPAddress(address) return True - except netaddr.AddrFormatError: + except (netaddr.AddrFormatError, ValueError): return False diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index d1d52137..6fe8cf88 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -98,8 +98,47 @@ def _determine_branch_locations(self, other_services): return other_services - def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin/source.""" + def _add_services(self, this_service, other_services, use_source=None, + no_origin=None): + """Add services to the deployment and optionally set + openstack-origin/source. + + :param this_service dict: Service dictionary describing the service + whose amulet tests are being run + :param other_services dict: List of service dictionaries describing + the services needed to support the target + service + :param use_source list: List of services which use the 'source' config + option rather than 'openstack-origin' + :param no_origin list: List of services which do not support setting + the Cloud Archive. + Service Dict: + { + 'name': str charm-name, + 'units': int number of units, + 'constraints': dict of juju constraints, + 'location': str location of charm, + } + eg + this_service = { + 'name': 'openvswitch-odl', + 'constraints': {'mem': '8G'}, + } + other_services = [ + { + 'name': 'nova-compute', + 'units': 2, + 'constraints': {'mem': '4G'}, + 'location': cs:~bob/xenial/nova-compute + }, + { + 'name': 'mysql', + 'constraints': {'mem': '2G'}, + }, + {'neutron-api-odl'}] + use_source = ['mysql'] + no_origin = ['neutron-api-odl'] + """ self.log.info('OpenStackAmuletDeployment: adding services') other_services = self._determine_branch_locations(other_services) @@ -110,16 +149,22 @@ def _add_services(self, this_service, other_services): services = other_services services.append(this_service) + use_source = use_source or [] + no_origin = no_origin or [] + # Charms which should use the source config option - use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy'] + use_source = list(set( + use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', + 'ceph-proxy'])) # Charms which can not use openstack-origin, ie. many subordinates - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', - 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'] + no_origin = list(set( + no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', + 'nrpe', 'openvswitch-odl', 'neutron-api-odl', + 'odl-controller', 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'])) if self.openstack: for svc in services: diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 76737f22..b601a226 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -1421,9 +1421,9 @@ def __call__(self): class AppArmorContext(OSContextGenerator): """Base class for apparmor contexts.""" - def __init__(self): + def __init__(self, profile_name=None): self._ctxt = None - self.aa_profile = None + self.aa_profile = profile_name self.aa_utils_packages = ['apparmor-utils'] @property @@ -1442,6 +1442,8 @@ def _determine_ctxt(self): if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: ctxt = {'aa_profile_mode': config('aa-profile-mode'), 'ubuntu_release': lsb_release()['DISTRIB_RELEASE']} + if self.aa_profile: + ctxt['aa_profile'] = self.aa_profile else: ctxt = None return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py index 0fd3ac25..d1476b1a 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py @@ -30,6 +30,7 @@ PUBLIC = 'public' INTERNAL = 'int' ADMIN = 'admin' +ACCESS = 'access' ADDRESS_MAP = { PUBLIC: { @@ -49,7 +50,13 @@ 'config': 'os-admin-network', 'fallback': 'private-address', 'override': 'os-admin-hostname', - } + }, + ACCESS: { + 'binding': 'access', + 'config': 'access-network', + 'fallback': 'private-address', + 'override': 'os-access-hostname', + }, } diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py index d1510dd3..08c86fa7 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py @@ -249,6 +249,8 @@ def neutron_plugins(): plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') plugins['nsx']['server_packages'].append('python-vmware-nsx') plugins['nsx']['config'] = '/etc/neutron/nsx.ini' + plugins['vsp']['driver'] = ( + 'nuage_neutron.plugins.nuage.plugin.NuagePlugin') return plugins diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index e1cc7687..9abd4c31 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -151,7 +151,7 @@ ('mitaka', ['2.5.0', '2.6.0', '2.7.0']), ('newton', - ['2.8.0', '2.9.0']), + ['2.8.0', '2.9.0', '2.10.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-mon/hooks/charmhelpers/payload/execd.py b/ceph-mon/hooks/charmhelpers/payload/execd.py index 0c42090f..1502aa0b 100644 --- a/ceph-mon/hooks/charmhelpers/payload/execd.py +++ b/ceph-mon/hooks/charmhelpers/payload/execd.py @@ -47,11 +47,12 @@ def execd_submodule_paths(command, execd_dir=None): yield path -def execd_run(command, execd_dir=None, die_on_error=False, stderr=None): +def execd_run(command, execd_dir=None, die_on_error=True, stderr=subprocess.STDOUT): """Run command for each module within execd_dir which defines it.""" for submodule_path in execd_submodule_paths(command, execd_dir): try: - subprocess.check_call(submodule_path, shell=True, stderr=stderr) + subprocess.check_output(submodule_path, stderr=stderr, + universal_newlines=True) except subprocess.CalledProcessError as e: hookenv.log("Error ({}) running {}. Output: {}".format( e.returncode, e.cmd, e.output)) diff --git a/ceph-mon/lib/ceph/ceph_broker.py b/ceph-mon/lib/ceph/ceph_broker.py index 0ed9833e..77806b4d 100644 --- a/ceph-mon/lib/ceph/ceph_broker.py +++ b/ceph-mon/lib/ceph/ceph_broker.py @@ -392,6 +392,44 @@ def handle_rgw_create_user(request, service): return {'exit-code': 1, 'stderr': err.output} +def handle_create_cephfs(request, service): + """ + Create a new cephfs. + :param request: The broker request + :param service: The cephx user to run this command under + :return: + """ + cephfs_name = request.get('mds_name') + data_pool = request.get('data_pool') + metadata_pool = request.get('metadata_pool') + # Check if the user params were provided + if not cephfs_name or not data_pool or not metadata_pool: + msg = "Missing mds_name, data_pool or metadata_pool params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Sanity check that the required pools exist + if not pool_exists(service=service, name=data_pool): + msg = "CephFS data pool does not exist. Cannot create CephFS" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + if not pool_exists(service=service, name=metadata_pool): + msg = "CephFS metadata pool does not exist. Cannot create CephFS" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Finally create CephFS + try: + check_output(["ceph", + '--id', service, + "fs", "new", cephfs_name, + metadata_pool, + data_pool]) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + + def handle_rgw_region_set(request, service): # radosgw-admin region set --infile us.json --name client.radosgw.us-east-1 json_file = request.get('region-json') @@ -448,7 +486,8 @@ def process_requests_v1(reqs): ret = handle_erasure_pool(request=req, service=svc) else: ret = handle_replicated_pool(request=req, service=svc) - + elif op == "create-cephfs": + ret = handle_create_cephfs(request=req, service=svc) elif op == "create-cache-tier": ret = handle_create_cache_tier(request=req, service=svc) elif op == "remove-cache-tier": diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index d1d52137..6fe8cf88 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -98,8 +98,47 @@ def _determine_branch_locations(self, other_services): return other_services - def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin/source.""" + def _add_services(self, this_service, other_services, use_source=None, + no_origin=None): + """Add services to the deployment and optionally set + openstack-origin/source. + + :param this_service dict: Service dictionary describing the service + whose amulet tests are being run + :param other_services dict: List of service dictionaries describing + the services needed to support the target + service + :param use_source list: List of services which use the 'source' config + option rather than 'openstack-origin' + :param no_origin list: List of services which do not support setting + the Cloud Archive. + Service Dict: + { + 'name': str charm-name, + 'units': int number of units, + 'constraints': dict of juju constraints, + 'location': str location of charm, + } + eg + this_service = { + 'name': 'openvswitch-odl', + 'constraints': {'mem': '8G'}, + } + other_services = [ + { + 'name': 'nova-compute', + 'units': 2, + 'constraints': {'mem': '4G'}, + 'location': cs:~bob/xenial/nova-compute + }, + { + 'name': 'mysql', + 'constraints': {'mem': '2G'}, + }, + {'neutron-api-odl'}] + use_source = ['mysql'] + no_origin = ['neutron-api-odl'] + """ self.log.info('OpenStackAmuletDeployment: adding services') other_services = self._determine_branch_locations(other_services) @@ -110,16 +149,22 @@ def _add_services(self, this_service, other_services): services = other_services services.append(this_service) + use_source = use_source or [] + no_origin = no_origin or [] + # Charms which should use the source config option - use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy'] + use_source = list(set( + use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', + 'ceph-proxy'])) # Charms which can not use openstack-origin, ie. many subordinates - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', - 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'] + no_origin = list(set( + no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', + 'nrpe', 'openvswitch-odl', 'neutron-api-odl', + 'odl-controller', 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'])) if self.openstack: for svc in services: From 28daf0ccb8f9dab9f2ac344f6e2dc1e779a0fb1c Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 30 Sep 2016 08:49:39 -0700 Subject: [PATCH 1219/2699] Pre-release charm-helpers sync 16.10 Get each charm up to date with lp:charm-helpers for release testing. Change-Id: I93569f02dad733e75869df21dc125429b17ec2e5 --- .../charmhelpers/contrib/openstack/context.py | 6 +- .../charmhelpers/contrib/openstack/utils.py | 2 +- ceph-osd/lib/ceph/__init__.py | 12 ++++ ceph-osd/lib/ceph/ceph_broker.py | 41 +++++++++++- .../contrib/openstack/amulet/deployment.py | 63 ++++++++++++++++--- 5 files changed, 111 insertions(+), 13 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 76737f22..b601a226 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -1421,9 +1421,9 @@ def __call__(self): class AppArmorContext(OSContextGenerator): """Base class for apparmor contexts.""" - def __init__(self): + def __init__(self, profile_name=None): self._ctxt = None - self.aa_profile = None + self.aa_profile = profile_name self.aa_utils_packages = ['apparmor-utils'] @property @@ -1442,6 +1442,8 @@ def _determine_ctxt(self): if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: ctxt = {'aa_profile_mode': config('aa-profile-mode'), 'ubuntu_release': lsb_release()['DISTRIB_RELEASE']} + if self.aa_profile: + ctxt['aa_profile'] = self.aa_profile else: ctxt = None return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index e1cc7687..9abd4c31 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -151,7 +151,7 @@ ('mitaka', ['2.5.0', '2.6.0', '2.7.0']), ('newton', - ['2.8.0', '2.9.0']), + ['2.8.0', '2.9.0', '2.10.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-osd/lib/ceph/__init__.py b/ceph-osd/lib/ceph/__init__.py index 8928efe2..328512b8 100644 --- a/ceph-osd/lib/ceph/__init__.py +++ b/ceph-osd/lib/ceph/__init__.py @@ -936,6 +936,18 @@ def get_mds_key(name): caps=mds_caps) +_mds_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-mds' + ] +} + + +def get_mds_bootstrap_key(): + return get_named_key('bootstrap-mds', + _mds_bootstrap_caps_profile) + + _default_caps = collections.OrderedDict([ ('mon', ['allow rw']), ('osd', ['allow rwx']), diff --git a/ceph-osd/lib/ceph/ceph_broker.py b/ceph-osd/lib/ceph/ceph_broker.py index 0ed9833e..77806b4d 100644 --- a/ceph-osd/lib/ceph/ceph_broker.py +++ b/ceph-osd/lib/ceph/ceph_broker.py @@ -392,6 +392,44 @@ def handle_rgw_create_user(request, service): return {'exit-code': 1, 'stderr': err.output} +def handle_create_cephfs(request, service): + """ + Create a new cephfs. + :param request: The broker request + :param service: The cephx user to run this command under + :return: + """ + cephfs_name = request.get('mds_name') + data_pool = request.get('data_pool') + metadata_pool = request.get('metadata_pool') + # Check if the user params were provided + if not cephfs_name or not data_pool or not metadata_pool: + msg = "Missing mds_name, data_pool or metadata_pool params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Sanity check that the required pools exist + if not pool_exists(service=service, name=data_pool): + msg = "CephFS data pool does not exist. Cannot create CephFS" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + if not pool_exists(service=service, name=metadata_pool): + msg = "CephFS metadata pool does not exist. Cannot create CephFS" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Finally create CephFS + try: + check_output(["ceph", + '--id', service, + "fs", "new", cephfs_name, + metadata_pool, + data_pool]) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + + def handle_rgw_region_set(request, service): # radosgw-admin region set --infile us.json --name client.radosgw.us-east-1 json_file = request.get('region-json') @@ -448,7 +486,8 @@ def process_requests_v1(reqs): ret = handle_erasure_pool(request=req, service=svc) else: ret = handle_replicated_pool(request=req, service=svc) - + elif op == "create-cephfs": + ret = handle_create_cephfs(request=req, service=svc) elif op == "create-cache-tier": ret = handle_create_cache_tier(request=req, service=svc) elif op == "remove-cache-tier": diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index d1d52137..6fe8cf88 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -98,8 +98,47 @@ def _determine_branch_locations(self, other_services): return other_services - def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin/source.""" + def _add_services(self, this_service, other_services, use_source=None, + no_origin=None): + """Add services to the deployment and optionally set + openstack-origin/source. + + :param this_service dict: Service dictionary describing the service + whose amulet tests are being run + :param other_services dict: List of service dictionaries describing + the services needed to support the target + service + :param use_source list: List of services which use the 'source' config + option rather than 'openstack-origin' + :param no_origin list: List of services which do not support setting + the Cloud Archive. + Service Dict: + { + 'name': str charm-name, + 'units': int number of units, + 'constraints': dict of juju constraints, + 'location': str location of charm, + } + eg + this_service = { + 'name': 'openvswitch-odl', + 'constraints': {'mem': '8G'}, + } + other_services = [ + { + 'name': 'nova-compute', + 'units': 2, + 'constraints': {'mem': '4G'}, + 'location': cs:~bob/xenial/nova-compute + }, + { + 'name': 'mysql', + 'constraints': {'mem': '2G'}, + }, + {'neutron-api-odl'}] + use_source = ['mysql'] + no_origin = ['neutron-api-odl'] + """ self.log.info('OpenStackAmuletDeployment: adding services') other_services = self._determine_branch_locations(other_services) @@ -110,16 +149,22 @@ def _add_services(self, this_service, other_services): services = other_services services.append(this_service) + use_source = use_source or [] + no_origin = no_origin or [] + # Charms which should use the source config option - use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy'] + use_source = list(set( + use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', + 'ceph-proxy'])) # Charms which can not use openstack-origin, ie. many subordinates - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', - 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'] + no_origin = list(set( + no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', + 'nrpe', 'openvswitch-odl', 'neutron-api-odl', + 'odl-controller', 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'])) if self.openstack: for svc in services: From 4b182aa1f8f58c0119c3f64f0507fbd1417798c8 Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 30 Sep 2016 08:53:07 -0700 Subject: [PATCH 1220/2699] Pre-release charm-helpers sync 16.10 Get each charm up to date with lp:charm-helpers for release testing. Change-Id: I88e80453a9649096d913fefaef8c875b4a9ae174 --- .../contrib/hardening/ssh/checks/config.py | 37 ++++++++++- .../hooks/charmhelpers/contrib/network/ip.py | 2 +- .../contrib/openstack/amulet/deployment.py | 63 ++++++++++++++++--- .../charmhelpers/contrib/openstack/context.py | 6 +- .../charmhelpers/contrib/openstack/ip.py | 9 ++- .../charmhelpers/contrib/openstack/neutron.py | 2 + .../charmhelpers/contrib/openstack/utils.py | 2 +- .../hooks/charmhelpers/payload/execd.py | 5 +- .../contrib/openstack/amulet/deployment.py | 63 ++++++++++++++++--- 9 files changed, 163 insertions(+), 26 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py index 94e524e2..f3cac6d9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -14,6 +14,11 @@ import os +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_iface_addr, + is_ip, +) from charmhelpers.core.hookenv import ( log, DEBUG, @@ -121,6 +126,36 @@ def get_ciphers(self, cbc_required): return cipher[weak_ciphers] + def get_listening(self, listen=['0.0.0.0']): + """Returns a list of addresses SSH can list on + + Turns input into a sensible list of IPs SSH can listen on. Input + must be a python list of interface names, IPs and/or CIDRs. + + :param listen: list of IPs, CIDRs, interface names + + :returns: list of IPs available on the host + """ + if listen == ['0.0.0.0']: + return listen + + value = [] + for network in listen: + try: + ip = get_address_in_network(network=network, fatal=True) + except ValueError: + if is_ip(network): + ip = network + else: + try: + ip = get_iface_addr(iface=network, fatal=False)[0] + except IndexError: + continue + value.append(ip) + if value == []: + return ['0.0.0.0'] + return value + def __call__(self): settings = utils.get_settings('ssh') if settings['common']['network_ipv6_enable']: @@ -180,7 +215,7 @@ def __call__(self): addr_family = 'inet' ctxt = { - 'ssh_ip': settings['server']['listen_to'], + 'ssh_ip': self.get_listening(settings['server']['listen_to']), 'password_auth_allowed': settings['server']['password_authentication'], 'ports': settings['common']['ports'], diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index d6dee17c..2d2026e4 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -406,7 +406,7 @@ def is_ip(address): # Test to see if already an IPv4/IPv6 address address = netaddr.IPAddress(address) return True - except netaddr.AddrFormatError: + except (netaddr.AddrFormatError, ValueError): return False diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index d1d52137..6fe8cf88 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -98,8 +98,47 @@ def _determine_branch_locations(self, other_services): return other_services - def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin/source.""" + def _add_services(self, this_service, other_services, use_source=None, + no_origin=None): + """Add services to the deployment and optionally set + openstack-origin/source. + + :param this_service dict: Service dictionary describing the service + whose amulet tests are being run + :param other_services dict: List of service dictionaries describing + the services needed to support the target + service + :param use_source list: List of services which use the 'source' config + option rather than 'openstack-origin' + :param no_origin list: List of services which do not support setting + the Cloud Archive. + Service Dict: + { + 'name': str charm-name, + 'units': int number of units, + 'constraints': dict of juju constraints, + 'location': str location of charm, + } + eg + this_service = { + 'name': 'openvswitch-odl', + 'constraints': {'mem': '8G'}, + } + other_services = [ + { + 'name': 'nova-compute', + 'units': 2, + 'constraints': {'mem': '4G'}, + 'location': cs:~bob/xenial/nova-compute + }, + { + 'name': 'mysql', + 'constraints': {'mem': '2G'}, + }, + {'neutron-api-odl'}] + use_source = ['mysql'] + no_origin = ['neutron-api-odl'] + """ self.log.info('OpenStackAmuletDeployment: adding services') other_services = self._determine_branch_locations(other_services) @@ -110,16 +149,22 @@ def _add_services(self, this_service, other_services): services = other_services services.append(this_service) + use_source = use_source or [] + no_origin = no_origin or [] + # Charms which should use the source config option - use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy'] + use_source = list(set( + use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', + 'ceph-proxy'])) # Charms which can not use openstack-origin, ie. many subordinates - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', - 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'] + no_origin = list(set( + no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', + 'nrpe', 'openvswitch-odl', 'neutron-api-odl', + 'odl-controller', 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'])) if self.openstack: for svc in services: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 76737f22..b601a226 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -1421,9 +1421,9 @@ def __call__(self): class AppArmorContext(OSContextGenerator): """Base class for apparmor contexts.""" - def __init__(self): + def __init__(self, profile_name=None): self._ctxt = None - self.aa_profile = None + self.aa_profile = profile_name self.aa_utils_packages = ['apparmor-utils'] @property @@ -1442,6 +1442,8 @@ def _determine_ctxt(self): if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: ctxt = {'aa_profile_mode': config('aa-profile-mode'), 'ubuntu_release': lsb_release()['DISTRIB_RELEASE']} + if self.aa_profile: + ctxt['aa_profile'] = self.aa_profile else: ctxt = None return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index 0fd3ac25..d1476b1a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -30,6 +30,7 @@ PUBLIC = 'public' INTERNAL = 'int' ADMIN = 'admin' +ACCESS = 'access' ADDRESS_MAP = { PUBLIC: { @@ -49,7 +50,13 @@ 'config': 'os-admin-network', 'fallback': 'private-address', 'override': 'os-admin-hostname', - } + }, + ACCESS: { + 'binding': 'access', + 'config': 'access-network', + 'fallback': 'private-address', + 'override': 'os-access-hostname', + }, } diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index d1510dd3..08c86fa7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -249,6 +249,8 @@ def neutron_plugins(): plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') plugins['nsx']['server_packages'].append('python-vmware-nsx') plugins['nsx']['config'] = '/etc/neutron/nsx.ini' + plugins['vsp']['driver'] = ( + 'nuage_neutron.plugins.nuage.plugin.NuagePlugin') return plugins diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index e1cc7687..9abd4c31 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -151,7 +151,7 @@ ('mitaka', ['2.5.0', '2.6.0', '2.7.0']), ('newton', - ['2.8.0', '2.9.0']), + ['2.8.0', '2.9.0', '2.10.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-radosgw/hooks/charmhelpers/payload/execd.py b/ceph-radosgw/hooks/charmhelpers/payload/execd.py index 0c42090f..1502aa0b 100644 --- a/ceph-radosgw/hooks/charmhelpers/payload/execd.py +++ b/ceph-radosgw/hooks/charmhelpers/payload/execd.py @@ -47,11 +47,12 @@ def execd_submodule_paths(command, execd_dir=None): yield path -def execd_run(command, execd_dir=None, die_on_error=False, stderr=None): +def execd_run(command, execd_dir=None, die_on_error=True, stderr=subprocess.STDOUT): """Run command for each module within execd_dir which defines it.""" for submodule_path in execd_submodule_paths(command, execd_dir): try: - subprocess.check_call(submodule_path, shell=True, stderr=stderr) + subprocess.check_output(submodule_path, stderr=stderr, + universal_newlines=True) except subprocess.CalledProcessError as e: hookenv.log("Error ({}) running {}. Output: {}".format( e.returncode, e.cmd, e.output)) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index d1d52137..6fe8cf88 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -98,8 +98,47 @@ def _determine_branch_locations(self, other_services): return other_services - def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin/source.""" + def _add_services(self, this_service, other_services, use_source=None, + no_origin=None): + """Add services to the deployment and optionally set + openstack-origin/source. + + :param this_service dict: Service dictionary describing the service + whose amulet tests are being run + :param other_services dict: List of service dictionaries describing + the services needed to support the target + service + :param use_source list: List of services which use the 'source' config + option rather than 'openstack-origin' + :param no_origin list: List of services which do not support setting + the Cloud Archive. + Service Dict: + { + 'name': str charm-name, + 'units': int number of units, + 'constraints': dict of juju constraints, + 'location': str location of charm, + } + eg + this_service = { + 'name': 'openvswitch-odl', + 'constraints': {'mem': '8G'}, + } + other_services = [ + { + 'name': 'nova-compute', + 'units': 2, + 'constraints': {'mem': '4G'}, + 'location': cs:~bob/xenial/nova-compute + }, + { + 'name': 'mysql', + 'constraints': {'mem': '2G'}, + }, + {'neutron-api-odl'}] + use_source = ['mysql'] + no_origin = ['neutron-api-odl'] + """ self.log.info('OpenStackAmuletDeployment: adding services') other_services = self._determine_branch_locations(other_services) @@ -110,16 +149,22 @@ def _add_services(self, this_service, other_services): services = other_services services.append(this_service) + use_source = use_source or [] + no_origin = no_origin or [] + # Charms which should use the source config option - use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy'] + use_source = list(set( + use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', + 'ceph-proxy'])) # Charms which can not use openstack-origin, ie. many subordinates - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', - 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'] + no_origin = list(set( + no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', + 'nrpe', 'openvswitch-odl', 'neutron-api-odl', + 'odl-controller', 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'])) if self.openstack: for svc in services: From ce50784ad4416ce1bae8d35a62c9c1d4b05ad7b0 Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 30 Sep 2016 08:51:13 -0700 Subject: [PATCH 1221/2699] Pre-release charm-helpers sync 16.10 Get each charm up to date with lp:charm-helpers for release testing. Change-Id: I99a70e75891b3b89b0698227f90dea7bc17c421d --- ceph-proxy/charm-helpers-hooks.yaml | 3 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 42 ++- .../contrib/hardening/ssh/checks/config.py | 37 +- .../hooks/charmhelpers/contrib/network/ip.py | 2 +- .../charmhelpers/contrib/openstack/utils.py | 31 +- .../contrib/storage/linux/ceph.py | 6 + ceph-proxy/hooks/charmhelpers/core/hookenv.py | 14 + ceph-proxy/hooks/charmhelpers/core/host.py | 87 ++--- .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 56 +++ .../charmhelpers/core/host_factory/ubuntu.py | 56 +++ ceph-proxy/hooks/charmhelpers/core/kernel.py | 36 +- .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 + .../core/kernel_factory/ubuntu.py | 13 + .../hooks/charmhelpers/fetch/__init__.py | 324 ++--------------- ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py | 7 +- ceph-proxy/hooks/charmhelpers/fetch/centos.py | 171 +++++++++ ceph-proxy/hooks/charmhelpers/fetch/giturl.py | 7 +- ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py | 336 ++++++++++++++++++ ceph-proxy/hooks/charmhelpers/osplatform.py | 19 + .../hooks/charmhelpers/payload/execd.py | 5 +- .../charmhelpers/contrib/amulet/deployment.py | 10 +- .../contrib/openstack/amulet/deployment.py | 72 +++- .../contrib/openstack/amulet/utils.py | 119 ++++++- 25 files changed, 1061 insertions(+), 409 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/core/host_factory/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/host_factory/centos.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/kernel_factory/__init__.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/kernel_factory/centos.py create mode 100644 ceph-proxy/hooks/charmhelpers/core/kernel_factory/ubuntu.py create mode 100644 ceph-proxy/hooks/charmhelpers/fetch/centos.py create mode 100644 ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py create mode 100644 ceph-proxy/hooks/charmhelpers/osplatform.py diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index d12f6968..14aa3e0d 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -4,6 +4,7 @@ include: - core - cli - fetch + - osplatform - contrib.storage.linux: - utils - ceph @@ -17,4 +18,4 @@ include: - contrib.network.ip - contrib.charmsupport - contrib.hardening|inc=* - - contrib.python \ No newline at end of file + - contrib.python diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 17976fb5..1410512a 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -38,6 +38,7 @@ ) from charmhelpers.core.host import service +from charmhelpers.core import host # This module adds compatibility with the nrpe-external-master and plain nrpe # subordinate charms. To use it in your charm: @@ -108,6 +109,13 @@ # def local_monitors_relation_changed(): # update_nrpe_config() # +# 4.a If your charm is a subordinate charm set primary=False +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE(primary=False) +# # 5. ln -s hooks.py nrpe-external-master-relation-changed # ln -s hooks.py local-monitors-relation-changed @@ -220,9 +228,10 @@ class NRPE(object): nagios_exportdir = '/var/lib/nagios/export' nrpe_confdir = '/etc/nagios/nrpe.d' - def __init__(self, hostname=None): + def __init__(self, hostname=None, primary=True): super(NRPE, self).__init__() self.config = config() + self.primary = primary self.nagios_context = self.config['nagios_context'] if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: self.nagios_servicegroups = self.config['nagios_servicegroups'] @@ -238,6 +247,12 @@ def __init__(self, hostname=None): else: self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) self.checks = [] + # Iff in an nrpe-external-master relation hook, set primary status + relation = relation_ids('nrpe-external-master') + if relation: + log("Setting charm primary status {}".format(primary)) + for rid in relation_ids('nrpe-external-master'): + relation_set(relation_id=rid, relation_settings={'primary': self.primary}) def add_check(self, *args, **kwargs): self.checks.append(Check(*args, **kwargs)) @@ -332,16 +347,25 @@ def add_init_service_checks(nrpe, services, unit_name): :param str unit_name: Unit name to use in check description """ for svc in services: + # Don't add a check for these services from neutron-gateway + if svc in ['ext-port', 'os-charm-phy-nic-mtu']: + next + upstart_init = '/etc/init/%s.conf' % svc sysv_init = '/etc/init.d/%s' % svc - if os.path.exists(upstart_init): - # Don't add a check for these services from neutron-gateway - if svc not in ['ext-port', 'os-charm-phy-nic-mtu']: - nrpe.add_check( - shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_upstart_job %s' % svc - ) + + if host.init_is_systemd(): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_systemd.py %s' % svc + ) + elif os.path.exists(upstart_init): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % svc cron_file = ('*/5 * * * * root ' diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py index 94e524e2..f3cac6d9 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -14,6 +14,11 @@ import os +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_iface_addr, + is_ip, +) from charmhelpers.core.hookenv import ( log, DEBUG, @@ -121,6 +126,36 @@ def get_ciphers(self, cbc_required): return cipher[weak_ciphers] + def get_listening(self, listen=['0.0.0.0']): + """Returns a list of addresses SSH can list on + + Turns input into a sensible list of IPs SSH can listen on. Input + must be a python list of interface names, IPs and/or CIDRs. + + :param listen: list of IPs, CIDRs, interface names + + :returns: list of IPs available on the host + """ + if listen == ['0.0.0.0']: + return listen + + value = [] + for network in listen: + try: + ip = get_address_in_network(network=network, fatal=True) + except ValueError: + if is_ip(network): + ip = network + else: + try: + ip = get_iface_addr(iface=network, fatal=False)[0] + except IndexError: + continue + value.append(ip) + if value == []: + return ['0.0.0.0'] + return value + def __call__(self): settings = utils.get_settings('ssh') if settings['common']['network_ipv6_enable']: @@ -180,7 +215,7 @@ def __call__(self): addr_family = 'inet' ctxt = { - 'ssh_ip': settings['server']['listen_to'], + 'ssh_ip': self.get_listening(settings['server']['listen_to']), 'password_auth_allowed': settings['server']['password_authentication'], 'ports': settings['common']['ports'], diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index d6dee17c..2d2026e4 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -406,7 +406,7 @@ def is_ip(address): # Test to see if already an IPv4/IPv6 address address = netaddr.IPAddress(address) return True - except netaddr.AddrFormatError: + except (netaddr.AddrFormatError, ValueError): return False diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index 889ac044..9abd4c31 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -51,7 +51,8 @@ relation_set, service_name, status_set, - hook_name + hook_name, + application_version_set, ) from charmhelpers.contrib.storage.linux.lvm import ( @@ -80,7 +81,12 @@ service_resume, restart_on_change_helper, ) -from charmhelpers.fetch import apt_install, apt_cache, install_remote +from charmhelpers.fetch import ( + apt_install, + apt_cache, + install_remote, + get_upstream_version +) from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.openstack.exceptions import OSContextError @@ -145,7 +151,7 @@ ('mitaka', ['2.5.0', '2.6.0', '2.7.0']), ('newton', - ['2.8.0', '2.9.0']), + ['2.8.0', '2.9.0', '2.10.0']), ]) # >= Liberty version->codename mapping @@ -212,6 +218,7 @@ 'glance': 'git://github.com/openstack/glance', 'horizon': 'git://github.com/openstack/horizon', 'keystone': 'git://github.com/openstack/keystone', + 'networking-hyperv': 'git://github.com/openstack/networking-hyperv', 'neutron': 'git://github.com/openstack/neutron', 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas', 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas', @@ -761,6 +768,13 @@ def git_default_repos(projects_yaml): if service in ['neutron-api', 'neutron-gateway', 'neutron-openvswitch']: core_project = 'neutron' + if service == 'neutron-api': + repo = { + 'name': 'networking-hyperv', + 'repository': GIT_DEFAULT_REPOS['networking-hyperv'], + 'branch': branch, + } + repos.append(repo) for project in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas', 'nova']: repo = { @@ -1881,3 +1895,14 @@ def config_flags_parser(config_flags): flags[key.strip(post_strippers)] = value.rstrip(post_strippers) return flags + + +def os_application_version_set(package): + '''Set version of application for Juju 2.0 and later''' + application_version = get_upstream_version(package) + # NOTE(jamespage) if not able to figure out package version, fallback to + # openstack codename version detection. + if not application_version: + application_version_set(os_release(package)) + else: + application_version_set(application_version) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index beff2703..edb536c7 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -87,6 +87,7 @@ DEFAULT_PGS_PER_OSD_TARGET = 100 DEFAULT_POOL_WEIGHT = 10.0 LEGACY_PG_COUNT = 200 +DEFAULT_MINIMUM_PGS = 2 def validator(value, valid_type, valid_range=None): @@ -266,6 +267,11 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size + # NOTE: ensure a sane minimum number of PGS otherwise we don't get any + # reasonable data distribution in minimal OSD configurations + if num_pg < DEFAULT_MINIMUM_PGS: + num_pg = DEFAULT_MINIMUM_PGS + # The CRUSH algorithm has a slight optimization for placement groups # with powers of 2 so find the nearest power of 2. If the nearest # power of 2 is more than 25% below the original value, the next diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 48b2b9dc..996e81cc 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -843,6 +843,20 @@ def inner_translate_exc2(*args, **kwargs): return inner_translate_exc1 +def application_version_set(version): + """Charm authors may trigger this command from any hook to output what + version of the application is running. This could be a package version, + for instance postgres version 9.5. It could also be a build number or + version control revision identifier, for instance git sha 6fb7ba68. """ + + cmd = ['application-version-set'] + cmd.append(version) + try: + subprocess.check_call(cmd) + except OSError: + log("Application Version: {}".format(version)) + + @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def is_leader(): """Does the current unit hold the juju leadership diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 53068599..0f1b2f35 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -30,13 +30,29 @@ import hashlib import functools import itertools -from contextlib import contextmanager -from collections import OrderedDict - import six +from contextlib import contextmanager +from collections import OrderedDict from .hookenv import log from .fstab import Fstab +from charmhelpers.osplatform import get_platform + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.host_factory.ubuntu import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.host_factory.centos import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import def service_start(service_name): @@ -144,8 +160,11 @@ def service_running(service_name): return False else: # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running 'start/running' - if "start/running" in output: + # returns a consistent string to represent running + # 'start/running' + if ("start/running" in output or + "is running" in output or + "up and running" in output): return True elif os.path.exists(_INIT_D_CONF.format(service_name)): # Check System V scripts init script return codes @@ -153,18 +172,6 @@ def service_running(service_name): return False -def service_available(service_name): - """Determine whether a system service is available""" - try: - subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError as e: - return b'unrecognized service' not in e.output - else: - return True - - SYSTEMD_SYSTEM = '/run/systemd/system' @@ -173,8 +180,9 @@ def init_is_systemd(): return os.path.isdir(SYSTEMD_SYSTEM) -def adduser(username, password=None, shell='/bin/bash', system_user=False, - primary_group=None, secondary_groups=None, uid=None, home_dir=None): +def adduser(username, password=None, shell='/bin/bash', + system_user=False, primary_group=None, + secondary_groups=None, uid=None, home_dir=None): """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -286,17 +294,7 @@ def add_group(group_name, system_group=False, gid=None): log('group with gid {0} already exists!'.format(gid)) except KeyError: log('creating group {0}'.format(group_name)) - cmd = ['addgroup'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('--system') - else: - cmd.extend([ - '--group', - ]) - cmd.append(group_name) - subprocess.check_call(cmd) + add_new_group(group_name, system_group, gid) group_info = grp.getgrnam(group_name) return group_info @@ -541,16 +539,6 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False, return r -def lsb_release(): - """Return /etc/lsb-release in a dict""" - d = {} - with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - def pwgen(length=None): """Generate a random pasword.""" if length is None: @@ -674,25 +662,6 @@ def get_nic_hwaddr(nic): return hwaddr -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports apt_cache function from charmhelpers.fetch if - the pkgcache argument is None. Be sure to add charmhelpers.fetch if - you call this function, or pass an apt_pkg.Cache() instance. - """ - import apt_pkg - if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) - - @contextmanager def chdir(directory): """Change the current working directory to a different directory for a code diff --git a/ceph-proxy/hooks/charmhelpers/core/host_factory/__init__.py b/ceph-proxy/hooks/charmhelpers/core/host_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/charmhelpers/core/host_factory/centos.py b/ceph-proxy/hooks/charmhelpers/core/host_factory/centos.py new file mode 100644 index 00000000..902d469f --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/host_factory/centos.py @@ -0,0 +1,56 @@ +import subprocess +import yum +import os + + +def service_available(service_name): + # """Determine whether a system service is available.""" + if os.path.isdir('/run/systemd/system'): + cmd = ['systemctl', 'is-enabled', service_name] + else: + cmd = ['service', service_name, 'is-enabled'] + return subprocess.call(cmd) == 0 + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['groupadd'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('-r') + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/os-release in a dict.""" + d = {} + with open('/etc/os-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports YumBase function if the pkgcache argument + is None. + """ + if not pkgcache: + y = yum.YumBase() + packages = y.doPackageLists() + pkgcache = {i.Name: i.version for i in packages['installed']} + pkg = pkgcache[package] + if pkg > revno: + return 1 + if pkg < revno: + return -1 + return 0 diff --git a/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py new file mode 100644 index 00000000..8c66af55 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -0,0 +1,56 @@ +import subprocess + + +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return b'unrecognized service' not in e.output + else: + return True + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. + """ + import apt_pkg + if not pkgcache: + from charmhelpers.fetch import apt_cache + pkgcache = apt_cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-proxy/hooks/charmhelpers/core/kernel.py b/ceph-proxy/hooks/charmhelpers/core/kernel.py index b166efec..2d404528 100644 --- a/ceph-proxy/hooks/charmhelpers/core/kernel.py +++ b/ceph-proxy/hooks/charmhelpers/core/kernel.py @@ -15,15 +15,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -__author__ = "Jorge Niedbalski " +import re +import subprocess +from charmhelpers.osplatform import get_platform from charmhelpers.core.hookenv import ( log, INFO ) -from subprocess import check_call, check_output -import re +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.kernel_factory.ubuntu import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.kernel_factory.centos import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import + +__author__ = "Jorge Niedbalski " def modprobe(module, persist=True): @@ -32,11 +45,9 @@ def modprobe(module, persist=True): log('Loading kernel module %s' % module, level=INFO) - check_call(cmd) + subprocess.check_call(cmd) if persist: - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module) + persistent_modprobe(module) def rmmod(module, force=False): @@ -46,21 +57,16 @@ def rmmod(module, force=False): cmd.append('-f') cmd.append(module) log('Removing kernel module %s' % module, level=INFO) - return check_call(cmd) + return subprocess.check_call(cmd) def lsmod(): """Shows what kernel modules are currently loaded""" - return check_output(['lsmod'], - universal_newlines=True) + return subprocess.check_output(['lsmod'], + universal_newlines=True) def is_module_loaded(module): """Checks if a kernel module is already loaded""" matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) return len(matches) > 0 - - -def update_initramfs(version='all'): - """Updates an initramfs image""" - return check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-proxy/hooks/charmhelpers/core/kernel_factory/__init__.py b/ceph-proxy/hooks/charmhelpers/core/kernel_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/charmhelpers/core/kernel_factory/centos.py b/ceph-proxy/hooks/charmhelpers/core/kernel_factory/centos.py new file mode 100644 index 00000000..1c402c11 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/kernel_factory/centos.py @@ -0,0 +1,17 @@ +import subprocess +import os + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + if not os.path.exists('/etc/rc.modules'): + open('/etc/rc.modules', 'a') + os.chmod('/etc/rc.modules', 111) + with open('/etc/rc.modules', 'r+') as modules: + if module not in modules.read(): + modules.write('modprobe %s\n' % module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["dracut", "-f", version]) diff --git a/ceph-proxy/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-proxy/hooks/charmhelpers/core/kernel_factory/ubuntu.py new file mode 100644 index 00000000..21559642 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/core/kernel_factory/ubuntu.py @@ -0,0 +1,13 @@ +import subprocess + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 52eaf824..ec5e0fe9 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -13,18 +13,12 @@ # limitations under the License. import importlib -from tempfile import NamedTemporaryFile -import time +from charmhelpers.osplatform import get_platform from yaml import safe_load -from charmhelpers.core.host import ( - lsb_release -) -import subprocess from charmhelpers.core.hookenv import ( config, log, ) -import os import six if six.PY3: @@ -33,87 +27,6 @@ from urlparse import urlparse, urlunparse -CLOUD_ARCHIVE = """# Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" -PROPOSED_POCKET = """# Proposed -deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted -""" -CLOUD_ARCHIVE_POCKETS = { - # Folsom - 'folsom': 'precise-updates/folsom', - 'precise-folsom': 'precise-updates/folsom', - 'precise-folsom/updates': 'precise-updates/folsom', - 'precise-updates/folsom': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'precise-folsom/proposed': 'precise-proposed/folsom', - 'precise-proposed/folsom': 'precise-proposed/folsom', - # Grizzly - 'grizzly': 'precise-updates/grizzly', - 'precise-grizzly': 'precise-updates/grizzly', - 'precise-grizzly/updates': 'precise-updates/grizzly', - 'precise-updates/grizzly': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly', - 'precise-grizzly/proposed': 'precise-proposed/grizzly', - 'precise-proposed/grizzly': 'precise-proposed/grizzly', - # Havana - 'havana': 'precise-updates/havana', - 'precise-havana': 'precise-updates/havana', - 'precise-havana/updates': 'precise-updates/havana', - 'precise-updates/havana': 'precise-updates/havana', - 'havana/proposed': 'precise-proposed/havana', - 'precise-havana/proposed': 'precise-proposed/havana', - 'precise-proposed/havana': 'precise-proposed/havana', - # Icehouse - 'icehouse': 'precise-updates/icehouse', - 'precise-icehouse': 'precise-updates/icehouse', - 'precise-icehouse/updates': 'precise-updates/icehouse', - 'precise-updates/icehouse': 'precise-updates/icehouse', - 'icehouse/proposed': 'precise-proposed/icehouse', - 'precise-icehouse/proposed': 'precise-proposed/icehouse', - 'precise-proposed/icehouse': 'precise-proposed/icehouse', - # Juno - 'juno': 'trusty-updates/juno', - 'trusty-juno': 'trusty-updates/juno', - 'trusty-juno/updates': 'trusty-updates/juno', - 'trusty-updates/juno': 'trusty-updates/juno', - 'juno/proposed': 'trusty-proposed/juno', - 'trusty-juno/proposed': 'trusty-proposed/juno', - 'trusty-proposed/juno': 'trusty-proposed/juno', - # Kilo - 'kilo': 'trusty-updates/kilo', - 'trusty-kilo': 'trusty-updates/kilo', - 'trusty-kilo/updates': 'trusty-updates/kilo', - 'trusty-updates/kilo': 'trusty-updates/kilo', - 'kilo/proposed': 'trusty-proposed/kilo', - 'trusty-kilo/proposed': 'trusty-proposed/kilo', - 'trusty-proposed/kilo': 'trusty-proposed/kilo', - # Liberty - 'liberty': 'trusty-updates/liberty', - 'trusty-liberty': 'trusty-updates/liberty', - 'trusty-liberty/updates': 'trusty-updates/liberty', - 'trusty-updates/liberty': 'trusty-updates/liberty', - 'liberty/proposed': 'trusty-proposed/liberty', - 'trusty-liberty/proposed': 'trusty-proposed/liberty', - 'trusty-proposed/liberty': 'trusty-proposed/liberty', - # Mitaka - 'mitaka': 'trusty-updates/mitaka', - 'trusty-mitaka': 'trusty-updates/mitaka', - 'trusty-mitaka/updates': 'trusty-updates/mitaka', - 'trusty-updates/mitaka': 'trusty-updates/mitaka', - 'mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', - # Newton - 'newton': 'xenial-updates/newton', - 'xenial-newton': 'xenial-updates/newton', - 'xenial-newton/updates': 'xenial-updates/newton', - 'xenial-updates/newton': 'xenial-updates/newton', - 'newton/proposed': 'xenial-proposed/newton', - 'xenial-newton/proposed': 'xenial-proposed/newton', - 'xenial-proposed/newton': 'xenial-proposed/newton', -} - # The order of this list is very important. Handlers should be listed in from # least- to most-specific URL matching. FETCH_HANDLERS = ( @@ -122,10 +35,6 @@ 'charmhelpers.fetch.giturl.GitUrlFetchHandler', ) -APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. -APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. -APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. - class SourceConfigError(Exception): pass @@ -163,180 +72,38 @@ def base_url(self, url): return urlunparse(parts) -def filter_installed_packages(packages): - """Returns a list of packages that require installation""" - cache = apt_cache() - _pkgs = [] - for package in packages: - try: - p = cache[package] - p.current_ver or _pkgs.append(package) - except KeyError: - log('Package {} has no installation candidate.'.format(package), - level='WARNING') - _pkgs.append(package) - return _pkgs - - -def apt_cache(in_memory=True, progress=None): - """Build and return an apt cache""" - from apt import apt_pkg - apt_pkg.init() - if in_memory: - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache(progress) - - -def apt_install(packages, options=None, fatal=False): - """Install one or more packages""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - cmd.append('install') - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - _run_apt_command(cmd, fatal) - - -def apt_upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - if dist: - cmd.append('dist-upgrade') - else: - cmd.append('upgrade') - log("Upgrading with options: {}".format(options)) - _run_apt_command(cmd, fatal) - - -def apt_update(fatal=False): - """Update local apt cache""" - cmd = ['apt-get', 'update'] - _run_apt_command(cmd, fatal) - - -def apt_purge(packages, fatal=False): - """Purge one or more packages""" - cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Purging {}".format(packages)) - _run_apt_command(cmd, fatal) - - -def apt_mark(packages, mark, fatal=False): - """Flag one or more packages using apt-mark""" - log("Marking {} as {}".format(packages, mark)) - cmd = ['apt-mark', mark] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - - if fatal: - subprocess.check_call(cmd, universal_newlines=True) - else: - subprocess.call(cmd, universal_newlines=True) - - -def apt_hold(packages, fatal=False): - return apt_mark(packages, 'hold', fatal=fatal) - - -def apt_unhold(packages, fatal=False): - return apt_mark(packages, 'unhold', fatal=fatal) - +__platform__ = get_platform() +module = "charmhelpers.fetch.%s" % __platform__ +fetch = importlib.import_module(module) -def add_source(source, key=None): - """Add a package source to this system. +filter_installed_packages = fetch.filter_installed_packages +install = fetch.install +upgrade = fetch.upgrade +update = fetch.update +purge = fetch.purge +add_source = fetch.add_source - @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples:: - - ppa:charmers/example - deb https://stub:key@private.example.com/ubuntu trusty main - - In addition: - 'proposed:' may be used to enable the standard 'proposed' - pocket for the release. - 'cloud:' may be used to activate official cloud archive pockets, - such as 'cloud:icehouse' - 'distro' may be used as a noop - - @param key: A key to be added to the system's APT keyring and used - to verify the signatures on packages. Ideally, this should be an - ASCII format GPG public key including the block headers. A GPG key - id may also be used, but be aware that only insecure protocols are - available to retrieve the actual public key from a public keyserver - placing your Juju environment at risk. ppa and cloud archive keys - are securely added automtically, so sould not be provided. - """ - if source is None: - log('Source is not present. Skipping') - return - - if (source.startswith('ppa:') or - source.startswith('http') or - source.startswith('deb ') or - source.startswith('cloud-archive:')): - subprocess.check_call(['add-apt-repository', '--yes', source]) - elif source.startswith('cloud:'): - apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - if pocket not in CLOUD_ARCHIVE_POCKETS: - raise SourceConfigError( - 'Unsupported cloud: source option %s' % - pocket) - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) - elif source == 'proposed': - release = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: - apt.write(PROPOSED_POCKET.format(release)) - elif source == 'distro': - pass - else: - log("Unknown source: {!r}".format(source)) - - if key: - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile('w+') as key_file: - key_file.write(key) - key_file.flush() - key_file.seek(0) - subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) - else: - # Note that hkp: is in no way a secure protocol. Using a - # GPG key id is pointless from a security POV unless you - # absolutely trust your network and DNS. - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) +if __platform__ == "ubuntu": + apt_cache = fetch.apt_cache + apt_install = fetch.install + apt_update = fetch.update + apt_upgrade = fetch.upgrade + apt_purge = fetch.purge + apt_mark = fetch.apt_mark + apt_hold = fetch.apt_hold + apt_unhold = fetch.apt_unhold + get_upstream_version = fetch.get_upstream_version +elif __platform__ == "centos": + yum_search = fetch.yum_search def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): - """ - Configure multiple sources from charm configuration. + """Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. Sources and their + The fragment needs to be included as a string. Sources and their corresponding keys are of the types supported by add_source(). Example config: @@ -368,12 +135,11 @@ def configure_sources(update=False, for source, key in zip(sources, keys): add_source(source, key) if update: - apt_update(fatal=True) + fetch.update(fatal=True) def install_remote(source, *args, **kwargs): - """ - Install a file tree from a remote source + """Install a file tree from a remote source. The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] @@ -406,6 +172,7 @@ def install_remote(source, *args, **kwargs): def install_from_config(config_var_name): + """Install a file from config.""" charm_config = config() source = charm_config[config_var_name] return install_remote(source) @@ -428,40 +195,3 @@ def plugins(fetch_handlers=None): log("FetchHandler {} not found, skipping plugin".format( handler_name)) return plugin_list - - -def _run_apt_command(cmd, fatal=False): - """ - Run an APT command, checking output and retrying if the fatal flag is set - to True. - - :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. - """ - env = os.environ.copy() - - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if fatal: - retry_count = 0 - result = None - - # If the command is considered "fatal", we need to retry if the apt - # lock was not acquired. - - while result is None or result == APT_NO_LOCK: - try: - result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > APT_NO_LOCK_RETRY_COUNT: - raise - result = e.returncode - log("Couldn't acquire DPKG lock. Will retry in {} seconds." - "".format(APT_NO_LOCK_RETRY_DELAY)) - time.sleep(APT_NO_LOCK_RETRY_DELAY) - - else: - subprocess.call(cmd, env=env) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py index b3404d85..07cd0293 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py @@ -18,19 +18,20 @@ BaseFetchHandler, UnhandledSource, filter_installed_packages, - apt_install, + install, ) from charmhelpers.core.host import mkdir if filter_installed_packages(['bzr']) != []: - apt_install(['bzr']) + install(['bzr']) if filter_installed_packages(['bzr']) != []: raise NotImplementedError('Unable to install bzr') class BzrUrlFetchHandler(BaseFetchHandler): - """Handler for bazaar branches via generic and lp URLs""" + """Handler for bazaar branches via generic and lp URLs.""" + def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('bzr+ssh', 'lp', ''): diff --git a/ceph-proxy/hooks/charmhelpers/fetch/centos.py b/ceph-proxy/hooks/charmhelpers/fetch/centos.py new file mode 100644 index 00000000..604bbfb5 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/fetch/centos.py @@ -0,0 +1,171 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +import os +import time +import six +import yum + +from tempfile import NamedTemporaryFile +from charmhelpers.core.hookenv import log + +YUM_NO_LOCK = 1 # The return code for "couldn't acquire lock" in YUM. +YUM_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +YUM_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + yb = yum.YumBase() + package_list = yb.doPackageLists() + temp_cache = {p.base_package_name: 1 for p in package_list['installed']} + + _pkgs = [p for p in packages if not temp_cache.get(p, False)] + return _pkgs + + +def install(packages, options=None, fatal=False): + """Install one or more packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_yum_command(cmd, fatal) + + +def upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_yum_command(cmd, fatal) + + +def update(fatal=False): + """Update local yum cache.""" + cmd = ['yum', '--assumeyes', 'update'] + log("Update with fatal: {}".format(fatal)) + _run_yum_command(cmd, fatal) + + +def purge(packages, fatal=False): + """Purge one or more packages.""" + cmd = ['yum', '--assumeyes', 'remove'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_yum_command(cmd, fatal) + + +def yum_search(packages): + """Search for a package.""" + output = {} + cmd = ['yum', 'search'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Searching for {}".format(packages)) + result = subprocess.check_output(cmd) + for package in list(packages): + output[package] = package in result + return output + + +def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL with a rpm package + + @param key: A key to be added to the system's keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. + """ + if source is None: + log('Source is not present. Skipping') + return + + if source.startswith('http'): + directory = '/etc/yum.repos.d/' + for filename in os.listdir(directory): + with open(directory + filename, 'r') as rpm_file: + if source in rpm_file.read(): + break + else: + log("Add source: {!r}".format(source)) + # write in the charms.repo + with open(directory + 'Charms.repo', 'a') as rpm_file: + rpm_file.write('[%s]\n' % source[7:].replace('/', '_')) + rpm_file.write('name=%s\n' % source[7:]) + rpm_file.write('baseurl=%s\n\n' % source) + else: + log("Unknown source: {!r}".format(source)) + + if key: + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile('w+') as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['rpm', '--import', key_file]) + else: + subprocess.check_call(['rpm', '--import', key]) + + +def _run_yum_command(cmd, fatal=False): + """Run an YUM command. + + Checks the output and retry if the fatal flag is set to True. + + :param: cmd: str: The yum command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the yum + # lock was not acquired. + + while result is None or result == YUM_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > YUM_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire YUM lock. Will retry in {} seconds." + "".format(YUM_NO_LOCK_RETRY_DELAY)) + time.sleep(YUM_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py index f708d1ee..4cf21bc2 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py @@ -18,17 +18,18 @@ BaseFetchHandler, UnhandledSource, filter_installed_packages, - apt_install, + install, ) if filter_installed_packages(['git']) != []: - apt_install(['git']) + install(['git']) if filter_installed_packages(['git']) != []: raise NotImplementedError('Unable to install git') class GitUrlFetchHandler(BaseFetchHandler): - """Handler for git branches via generic and github URLs""" + """Handler for git branches via generic and github URLs.""" + def can_handle(self, source): url_parts = self.parse_url(source) # TODO (mattyw) no support for ssh git@ yet diff --git a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py new file mode 100644 index 00000000..fce496b2 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py @@ -0,0 +1,336 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import six +import time +import subprocess + +from tempfile import NamedTemporaryFile +from charmhelpers.core.host import ( + lsb_release +) +from charmhelpers.core.hookenv import log +from charmhelpers.fetch import SourceConfigError + +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" + +PROPOSED_POCKET = """# Proposed +deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted +""" + +CLOUD_ARCHIVE_POCKETS = { + # Folsom + 'folsom': 'precise-updates/folsom', + 'precise-folsom': 'precise-updates/folsom', + 'precise-folsom/updates': 'precise-updates/folsom', + 'precise-updates/folsom': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'precise-folsom/proposed': 'precise-proposed/folsom', + 'precise-proposed/folsom': 'precise-proposed/folsom', + # Grizzly + 'grizzly': 'precise-updates/grizzly', + 'precise-grizzly': 'precise-updates/grizzly', + 'precise-grizzly/updates': 'precise-updates/grizzly', + 'precise-updates/grizzly': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'precise-grizzly/proposed': 'precise-proposed/grizzly', + 'precise-proposed/grizzly': 'precise-proposed/grizzly', + # Havana + 'havana': 'precise-updates/havana', + 'precise-havana': 'precise-updates/havana', + 'precise-havana/updates': 'precise-updates/havana', + 'precise-updates/havana': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', + 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', + # Juno + 'juno': 'trusty-updates/juno', + 'trusty-juno': 'trusty-updates/juno', + 'trusty-juno/updates': 'trusty-updates/juno', + 'trusty-updates/juno': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'trusty-juno/proposed': 'trusty-proposed/juno', + 'trusty-proposed/juno': 'trusty-proposed/juno', + # Kilo + 'kilo': 'trusty-updates/kilo', + 'trusty-kilo': 'trusty-updates/kilo', + 'trusty-kilo/updates': 'trusty-updates/kilo', + 'trusty-updates/kilo': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'trusty-kilo/proposed': 'trusty-proposed/kilo', + 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', + # Mitaka + 'mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka/updates': 'trusty-updates/mitaka', + 'trusty-updates/mitaka': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', + # Newton + 'newton': 'xenial-updates/newton', + 'xenial-newton': 'xenial-updates/newton', + 'xenial-newton/updates': 'xenial-updates/newton', + 'xenial-updates/newton': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', + 'xenial-newton/proposed': 'xenial-proposed/newton', + 'xenial-proposed/newton': 'xenial-proposed/newton', +} + +APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. +APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + cache = apt_cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_cache(in_memory=True, progress=None): + """Build and return an apt cache.""" + from apt import apt_pkg + apt_pkg.init() + if in_memory: + apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") + return apt_pkg.Cache(progress) + + +def install(packages, options=None, fatal=False): + """Install one or more packages.""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_apt_command(cmd, fatal) + + +def upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages.""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_apt_command(cmd, fatal) + + +def update(fatal=False): + """Update local apt cache.""" + cmd = ['apt-get', 'update'] + _run_apt_command(cmd, fatal) + + +def purge(packages, fatal=False): + """Purge one or more packages.""" + cmd = ['apt-get', '--assume-yes', 'purge'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_apt_command(cmd, fatal) + + +def apt_mark(packages, mark, fatal=False): + """Flag one or more packages using apt-mark.""" + log("Marking {} as {}".format(packages, mark)) + cmd = ['apt-mark', mark] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + + if fatal: + subprocess.check_call(cmd, universal_newlines=True) + else: + subprocess.call(cmd, universal_newlines=True) + + +def apt_hold(packages, fatal=False): + return apt_mark(packages, 'hold', fatal=fatal) + + +def apt_unhold(packages, fatal=False): + return apt_mark(packages, 'unhold', fatal=fatal) + + +def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples:: + + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + 'distro' may be used as a noop + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + """ + if source is None: + log('Source is not present. Skipping') + return + + if (source.startswith('ppa:') or + source.startswith('http') or + source.startswith('deb ') or + source.startswith('cloud-archive:')): + subprocess.check_call(['add-apt-repository', '--yes', source]) + elif source.startswith('cloud:'): + install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + elif source == 'proposed': + release = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass + else: + log("Unknown source: {!r}".format(source)) + + if key: + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile('w+') as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) + else: + # Note that hkp: is in no way a secure protocol. Using a + # GPG key id is pointless from a security POV unless you + # absolutely trust your network and DNS. + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv', + key]) + + +def _run_apt_command(cmd, fatal=False): + """Run an APT command. + + Checks the output and retries if the fatal flag is set + to True. + + :param: cmd: str: The apt command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the apt + # lock was not acquired. + + while result is None or result == APT_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > APT_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire DPKG lock. Will retry in {} seconds." + "".format(APT_NO_LOCK_RETRY_DELAY)) + time.sleep(APT_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) + + +def get_upstream_version(package): + """Determine upstream version based on installed package + + @returns None (if not installed) or the upstream version + """ + import apt_pkg + cache = apt_cache() + try: + pkg = cache[package] + except: + # the package is unknown to the current apt cache. + return None + + if not pkg.current_ver: + # package is known, but no version is currently installed. + return None + + return apt_pkg.upstream_version(pkg.current_ver.ver_str) diff --git a/ceph-proxy/hooks/charmhelpers/osplatform.py b/ceph-proxy/hooks/charmhelpers/osplatform.py new file mode 100644 index 00000000..ea490bbd --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/osplatform.py @@ -0,0 +1,19 @@ +import platform + + +def get_platform(): + """Return the current OS platform. + + For example: if current os platform is Ubuntu then a string "ubuntu" + will be returned (which is the name of the module). + This string is used to decide which platform module should be imported. + """ + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + if "Ubuntu" in current_platform: + return "ubuntu" + elif "CentOS" in current_platform: + return "centos" + else: + raise RuntimeError("This module is not supported on {}." + .format(current_platform)) diff --git a/ceph-proxy/hooks/charmhelpers/payload/execd.py b/ceph-proxy/hooks/charmhelpers/payload/execd.py index 0c42090f..1502aa0b 100644 --- a/ceph-proxy/hooks/charmhelpers/payload/execd.py +++ b/ceph-proxy/hooks/charmhelpers/payload/execd.py @@ -47,11 +47,12 @@ def execd_submodule_paths(command, execd_dir=None): yield path -def execd_run(command, execd_dir=None, die_on_error=False, stderr=None): +def execd_run(command, execd_dir=None, die_on_error=True, stderr=subprocess.STDOUT): """Run command for each module within execd_dir which defines it.""" for submodule_path in execd_submodule_paths(command, execd_dir): try: - subprocess.check_call(submodule_path, shell=True, stderr=stderr) + subprocess.check_output(submodule_path, stderr=stderr, + universal_newlines=True) except subprocess.CalledProcessError as e: hookenv.log("Error ({}) running {}. Output: {}".format( e.returncode, e.cmd, e.output)) diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py index 0146236d..9c65518e 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py @@ -78,11 +78,15 @@ def _configure_services(self, configs): def _deploy(self): """Deploy environment and wait for all hooks to finish executing.""" + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) try: - self.d.setup(timeout=900) - self.d.sentry.wait(timeout=900) + self.d.setup(timeout=timeout) + self.d.sentry.wait(timeout=timeout) except amulet.helpers.TimeoutError: - amulet.raise_status(amulet.FAIL, msg="Deployment timed out") + amulet.raise_status( + amulet.FAIL, + msg="Deployment timed out ({}s)".format(timeout) + ) except Exception: raise diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 6ce91dbe..6fe8cf88 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -98,8 +98,47 @@ def _determine_branch_locations(self, other_services): return other_services - def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin/source.""" + def _add_services(self, this_service, other_services, use_source=None, + no_origin=None): + """Add services to the deployment and optionally set + openstack-origin/source. + + :param this_service dict: Service dictionary describing the service + whose amulet tests are being run + :param other_services dict: List of service dictionaries describing + the services needed to support the target + service + :param use_source list: List of services which use the 'source' config + option rather than 'openstack-origin' + :param no_origin list: List of services which do not support setting + the Cloud Archive. + Service Dict: + { + 'name': str charm-name, + 'units': int number of units, + 'constraints': dict of juju constraints, + 'location': str location of charm, + } + eg + this_service = { + 'name': 'openvswitch-odl', + 'constraints': {'mem': '8G'}, + } + other_services = [ + { + 'name': 'nova-compute', + 'units': 2, + 'constraints': {'mem': '4G'}, + 'location': cs:~bob/xenial/nova-compute + }, + { + 'name': 'mysql', + 'constraints': {'mem': '2G'}, + }, + {'neutron-api-odl'}] + use_source = ['mysql'] + no_origin = ['neutron-api-odl'] + """ self.log.info('OpenStackAmuletDeployment: adding services') other_services = self._determine_branch_locations(other_services) @@ -110,16 +149,22 @@ def _add_services(self, this_service, other_services): services = other_services services.append(this_service) + use_source = use_source or [] + no_origin = no_origin or [] + # Charms which should use the source config option - use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy'] + use_source = list(set( + use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', + 'ceph-proxy'])) # Charms which can not use openstack-origin, ie. many subordinates - no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', - 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'] + no_origin = list(set( + no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', + 'nrpe', 'openvswitch-odl', 'neutron-api-odl', + 'odl-controller', 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'])) if self.openstack: for svc in services: @@ -220,7 +265,8 @@ def _get_openstack_release(self): self.trusty_icehouse, self.trusty_juno, self.utopic_juno, self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, self.wily_liberty, self.trusty_mitaka, - self.xenial_mitaka) = range(14) + self.xenial_mitaka, self.xenial_newton, + self.yakkety_newton) = range(16) releases = { ('precise', None): self.precise_essex, @@ -236,7 +282,10 @@ def _get_openstack_release(self): ('utopic', None): self.utopic_juno, ('vivid', None): self.vivid_kilo, ('wily', None): self.wily_liberty, - ('xenial', None): self.xenial_mitaka} + ('xenial', None): self.xenial_mitaka, + ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('yakkety', None): self.yakkety_newton, + } return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -254,6 +303,7 @@ def _get_openstack_release_string(self): ('vivid', 'kilo'), ('wily', 'liberty'), ('xenial', 'mitaka'), + ('yakkety', 'newton'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index 8040b570..24b353ee 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -83,6 +83,56 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, if not found: return 'endpoint not found' + def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate keystone v3 endpoint data. + + Validate the v3 endpoint data which has changed from v2. The + ports are used to find the matching endpoint. + + The new v3 endpoint data looks like: + + ['}, + region=RegionOne, + region_id=RegionOne, + service_id=17f842a0dc084b928e476fafe67e4095, + url=http://10.5.6.5:9312>, + '}, + region=RegionOne, + region_id=RegionOne, + service_id=72fc8736fb41435e8b3584205bb2cfa3, + url=http://10.5.6.6:35357/v3>, + ... ] + """ + self.log.debug('Validating v3 endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = [] + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if ((admin_port in ep.url and ep.interface == 'admin') or + (internal_port in ep.url and ep.interface == 'internal') or + (public_port in ep.url and ep.interface == 'public')): + found.append(ep.interface) + # note we ignore the links member. + actual = {'id': ep.id, + 'region': ep.region, + 'region_id': ep.region_id, + 'interface': self.not_null, + 'url': ep.url, + 'service_id': ep.service_id, } + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if len(found) != 3: + return 'Unexpected number of endpoints found' + def validate_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. @@ -100,6 +150,72 @@ def validate_svc_catalog_endpoint_data(self, expected, actual): return "endpoint {} does not exist".format(k) return ret + def validate_v3_svc_catalog_endpoint_data(self, expected, actual): + """Validate the keystone v3 catalog endpoint data. + + Validate a list of dictinaries that make up the keystone v3 service + catalogue. + + It is in the form of: + + + {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:35357/v3'}, + {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}, + {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}], + u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'f629388955bc407f8b11d8b7ca168086', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9312'}]} + + Note, that an added complication is that the order of admin, public, + internal against 'interface' in each region. + + Thus, the function sorts the expected and actual lists using the + interface key as a sort key, prior to the comparison. + """ + self.log.debug('Validating v3 service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + l_expected = sorted(v, key=lambda x: x['interface']) + l_actual = sorted(actual[k], key=lambda x: x['interface']) + if len(l_actual) != len(l_expected): + return ("endpoint {} has differing number of interfaces " + " - expected({}), actual({})" + .format(k, len(l_expected), len(l_actual))) + for i_expected, i_actual in zip(l_expected, l_actual): + self.log.debug("checking interface {}" + .format(i_expected['interface'])) + ret = self._validate_dict_data(i_expected, i_actual) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + def validate_tenant_data(self, expected, actual): """Validate tenant data. @@ -928,7 +1044,8 @@ def connect_amqp_by_unit(self, sentry_unit, ssl=False, retry_delay=5, socket_timeout=1) connection = pika.BlockingConnection(parameters) - assert connection.server_properties['product'] == 'RabbitMQ' + assert connection.is_open is True + assert connection.is_closing is False self.log.debug('Connect OK') return connection except Exception as e: From b10f93df9cf914b48cdb7137cfa7f4bea22f0171 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 3 Oct 2016 13:16:30 -0400 Subject: [PATCH 1222/2699] Remove the compiled python bytecode upgrade-charm could previously fail because of compiled bytecode remaining behind. This change makes the upgrade-charm hook remove any such compiled bytecode files Closes-Bug: 1628322 Change-Id: I3886b514b47d6fc9fdfb350e2b3a2296472e40f6 --- ceph-osd/hooks/ceph_hooks.py | 2 +- ceph-osd/hooks/upgrade-charm | 7 ++++++- ceph-osd/hooks/upgrade-charm.real | 1 + 3 files changed, 8 insertions(+), 2 deletions(-) mode change 120000 => 100755 ceph-osd/hooks/upgrade-charm create mode 120000 ceph-osd/hooks/upgrade-charm.real diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 68867027..04db92b4 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -446,7 +446,7 @@ def mon_relation(): log('mon cluster has not yet provided conf') -@hooks.hook('upgrade-charm') +@hooks.hook('upgrade-charm.real') @harden() def upgrade_charm(): if get_fsid() and get_auth(): diff --git a/ceph-osd/hooks/upgrade-charm b/ceph-osd/hooks/upgrade-charm deleted file mode 120000 index 52d96630..00000000 --- a/ceph-osd/hooks/upgrade-charm +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/upgrade-charm b/ceph-osd/hooks/upgrade-charm new file mode 100755 index 00000000..440473d7 --- /dev/null +++ b/ceph-osd/hooks/upgrade-charm @@ -0,0 +1,6 @@ +#!/bin/bash +# Wrapper to ensure that old python bytecode isn't hanging around +# after we upgrade the charm with newer libraries +rm -rf **/*.pyc + +exec ./hooks/upgrade-charm.real diff --git a/ceph-osd/hooks/upgrade-charm.real b/ceph-osd/hooks/upgrade-charm.real new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-osd/hooks/upgrade-charm.real @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file From 3079f7747deb877bd786fc470cd574131d6b246d Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Thu, 29 Sep 2016 10:52:42 -0700 Subject: [PATCH 1223/2699] WIP to use ceph_broker --- ceph-fs/src/reactive/ceph_fs.py | 57 ++++----------------------------- 1 file changed, 6 insertions(+), 51 deletions(-) diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index 20fc3bba..b16426bf 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -4,9 +4,8 @@ from charms.reactive import when, when_not, set_state from charmhelpers.core.hookenv import ( - config, log, INFO, ERROR, status_set) + config, log, ERROR, service_name) from charmhelpers.core.host import service_restart -from charmhelpers.contrib.storage.linux import ceph from charmhelpers.contrib.network.ip import ( get_address_in_network ) @@ -30,35 +29,10 @@ def install_cephfs(): @when('cephfs.configured') +@when('cephfs.pools.created') @when_not('cephfs.started') def setup_mds(): try: - name = socket.gethostname() - status_set('maintenance', "Creating cephfs data pool") - log("Creating cephfs_data pool", level=INFO) - data_pool = "{}_data".format(name) - try: - ceph.ReplicatedPool(name=data_pool, service='admin').create() - except subprocess.CalledProcessError as err: - log("Creating data pool failed!") - raise err - - status_set('maintenance', "Creating cephfs metadata pool") - log("Creating cephfs_metadata pool", level=INFO) - metadata_pool = "{}_metadata".format(name) - try: - ceph.ReplicatedPool(name=metadata_pool, service='admin').create() - except subprocess.CalledProcessError as err: - log("Creating metadata pool failed!") - raise err - - status_set('maintenance', "Creating cephfs") - log("Creating ceph fs", level=INFO) - try: - subprocess.check_call(["ceph", "fs", "new", name, metadata_pool, data_pool]) - except subprocess.CalledProcessError as err: - log("Creating metadata pool failed!") - raise err service_restart('ceph-mds') set_state('cephfs.started') except subprocess.CalledProcessError as err: @@ -82,24 +56,11 @@ def config_changed(ceph_client): os.makedirs(key_path) cephx_key = os.path.join(key_path, 'keyring') - admin_key = os.path.join(os.sep, - 'etc', - 'ceph', - 'ceph.client.admin.keyring') - - networks = get_networks('ceph-public-network') - public_network = ', '.join(networks) - - networks = get_networks('ceph-cluster-network') - cluster_network = ', '.join(networks) - ceph_context = { - 'mon_hosts': ceph_client.mon_hosts(), 'fsid': ceph_client.fsid(), 'auth_supported': ceph_client.auth(), 'use_syslog': str(config('use-syslog')).lower(), - 'ceph_public_network': public_network, - 'ceph_cluster_network': cluster_network, + 'mon_hosts': ' '.join(ceph_client.mon_hosts()), 'loglevel': config('loglevel'), 'hostname': socket.gethostname(), 'mds_name': socket.gethostname(), @@ -111,21 +72,15 @@ def config_changed(ceph_client): except IOError as err: log("IOError writing ceph.conf: {}".format(err)) - try: - with open(admin_key, 'w') as key_file: - key_file.write("[client.admin]\n\tkey = {}\n".format( - ceph_client.admin_key() - )) - except IOError as err: - log("IOError writing admin.keyring: {}".format(err)) - try: with open(cephx_key, 'w') as key_file: key_file.write("[mds.{}]\n\tkey = {}\n".format( socket.gethostname(), - ceph_client.key() + ceph_client.mds_key() + # ceph_client.mds_bootstrap_key() )) except IOError as err: + log("IOError writing mds-a.keyring: {}".format(err)) set_state('cephfs.configured') From e1d906b55c1d2c07c5ca602232814ae8f27bbe24 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 7 Oct 2016 11:04:32 -0400 Subject: [PATCH 1224/2699] update top Canonical owned repo Change-Id: Id423ea2762fe6b356913e800e9b25dd35223dde2 --- ceph-mon/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index c4c67c1e..902ed078 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -19,7 +19,7 @@ bin/charm_helpers_sync.py: bin/git_sync.py: @mkdir -p bin - @wget -O bin/git_sync.py https://raw.githubusercontent.com/ChrisMacNaughton/git-sync/master/git_sync.py + @wget -O bin/git_sync.py https://raw.githubusercontent.com/CanonicalLtd/git-sync/master/git_sync.py ch-sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml From fa1590dea12ae2f95f141346fd61f20b653eaa18 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 7 Oct 2016 11:06:01 -0400 Subject: [PATCH 1225/2699] update top Canonical owned repo Change-Id: I0fd9a3fe971d354d4faf4d03cebce66dfe222cd5 --- ceph-osd/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index b6b9b665..4cb90c86 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -19,7 +19,7 @@ bin/charm_helpers_sync.py: bin/git_sync.py: @mkdir -p bin - @wget -O bin/git_sync.py https://raw.githubusercontent.com/ChrisMacNaughton/git-sync/master/git_sync.py + @wget -O bin/git_sync.py https://raw.githubusercontent.com/CanonicalLtd/git-sync/master/git_sync.py ch-sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml From 7d517f40d030c4e0e5fb5ae6284c0dff5042814f Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 22 Sep 2016 17:57:26 +0000 Subject: [PATCH 1226/2699] Update amulet test definitions for Newton - Remove Precise-Icehouse Amulet test definitions if they exist. - Add Xenial-Newton Amulet test definitions. - Add Yakkety-Newton Amulet test definitions. - Use the percona-cluster charm in tests instead of the mysql charm. Change-Id: Ie730fe3f882c91c91c84ff99fa89aab9a10d3a4c --- .../contrib/openstack/amulet/deployment.py | 2 +- .../contrib/openstack/amulet/utils.py | 11 ++--- .../charmhelpers/contrib/openstack/utils.py | 5 ++- ceph-mon/lib/ceph/__init__.py | 31 ++++++++++++++ ceph-mon/lib/ceph/ceph_broker.py | 16 ++++++-- ceph-mon/tests/basic_deployment.py | 40 +++++++++++-------- .../contrib/openstack/amulet/deployment.py | 2 +- .../contrib/openstack/amulet/utils.py | 11 ++--- ...cise-icehouse => gate-basic-xenial-newton} | 8 ++-- ceph-mon/tests/gate-basic-yakkety-newton | 23 +++++++++++ ceph-mon/tests/tests.yaml | 2 +- 11 files changed, 109 insertions(+), 42 deletions(-) rename ceph-mon/tests/{gate-basic-precise-icehouse => gate-basic-xenial-newton} (71%) create mode 100644 ceph-mon/tests/gate-basic-yakkety-newton diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 6fe8cf88..9e0b07fb 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -156,7 +156,7 @@ def _add_services(self, this_service, other_services, use_source=None, use_source = list(set( use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy'])) + 'ceph-proxy', 'percona-cluster', 'lxd'])) # Charms which can not use openstack-origin, ie. many subordinates no_origin = list(set( diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 24b353ee..e4546c8c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -306,10 +306,8 @@ def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" # NOTE(beisner): cinder python client doesn't accept tokens. - service_ip = \ - keystone_sentry.relation('shared-db', - 'mysql:shared-db')['private-address'] - ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) + keystone_ip = keystone_sentry.info['public-address'] + ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) return cinder_client.Client(username, password, tenant, ept) def authenticate_keystone_admin(self, keystone_sentry, user, password, @@ -317,10 +315,9 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, keystone_ip=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') - unit = keystone_sentry if not keystone_ip: - keystone_ip = unit.relation('shared-db', - 'mysql:shared-db')['private-address'] + keystone_ip = keystone_sentry.info['public-address'] + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) if not api_version or api_version == 2: ep = base_ep + "/v2.0" diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 9abd4c31..8c89c3a3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -229,6 +229,7 @@ GIT_DEFAULT_BRANCHES = { 'liberty': 'stable/liberty', 'mitaka': 'stable/mitaka', + 'newton': 'stable/newton', 'master': 'master', } @@ -735,12 +736,12 @@ def git_os_codename_install_source(projects_yaml): if projects in GIT_DEFAULT_BRANCHES.keys(): if projects == 'master': - return 'newton' + return 'ocata' return projects if 'release' in projects: if projects['release'] == 'master': - return 'newton' + return 'ocata' return projects['release'] return None diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index 328512b8..4afe5eb9 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -1244,6 +1244,37 @@ def get_running_osds(): return [] +def get_cephfs(service): + """ + List the Ceph Filesystems that exist + :rtype : list. Returns a list of the ceph filesystems + :param service: The service name to run the ceph command under + """ + if get_version() < 0.86: + # This command wasn't introduced until 0.86 ceph + return [] + try: + output = subprocess.check_output(["ceph", + '--id', service, + "fs", "ls"]) + if not output: + return [] + """ + Example subprocess output: + 'name: ip-172-31-23-165, metadata pool: ip-172-31-23-165_metadata, + data pools: [ip-172-31-23-165_data ]\n' + output: filesystems: ['ip-172-31-23-165'] + """ + filesystems = [] + for line in output.splitlines(): + parts = line.split(',') + for part in parts: + if "name" in part: + filesystems.append(part.split(' ')[1]) + except subprocess.CalledProcessError: + return [] + + def wait_for_all_monitors_to_upgrade(new_version, upgrade_key): """ Fairly self explanatory name. This function will wait diff --git a/ceph-mon/lib/ceph/ceph_broker.py b/ceph-mon/lib/ceph/ceph_broker.py index 77806b4d..0892961e 100644 --- a/ceph-mon/lib/ceph/ceph_broker.py +++ b/ceph-mon/lib/ceph/ceph_broker.py @@ -24,6 +24,7 @@ INFO, ERROR, ) +from ceph import get_cephfs from charmhelpers.contrib.storage.linux.ceph import ( create_erasure_profile, delete_pool, @@ -418,7 +419,12 @@ def handle_create_cephfs(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - # Finally create CephFS + if get_cephfs(service=service): + # CephFS new has already been called + log("CephFS already created") + return + + # Finally create CephFS try: check_output(["ceph", '--id', service, @@ -426,8 +432,12 @@ def handle_create_cephfs(request, service): metadata_pool, data_pool]) except CalledProcessError as err: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} + if err.returncode == 22: + log("CephFS already created") + return + else: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} def handle_rgw_region_set(request, service): diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index f98622c3..94c7baba 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -44,11 +44,12 @@ def __init__(self, series=None, openstack=None, source=None, stable=False): self._deploy() u.log.info('Waiting on extended status checks...') - exclude_services = ['mysql'] + exclude_services = [] # Wait for deployment ready msgs, except exclusions self._auto_wait_for_status(exclude_services=exclude_services) + self.d.sentry.wait() self._initialize_tests() def _add_services(self): @@ -59,29 +60,31 @@ def _add_services(self): compatible with the local charm (e.g. stable or next). """ this_service = {'name': 'ceph-mon', 'units': 3} - other_services = [{'name': 'mysql'}, - {'name': 'keystone'}, - {'name': 'ceph-osd', 'units': 3}, - {'name': 'rabbitmq-server'}, - {'name': 'nova-compute'}, - {'name': 'glance'}, - {'name': 'cinder'}] + other_services = [ + {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}}, + {'name': 'keystone'}, + {'name': 'ceph-osd', 'units': 3}, + {'name': 'rabbitmq-server'}, + {'name': 'nova-compute'}, + {'name': 'glance'}, + {'name': 'cinder'} + ] super(CephBasicDeployment, self)._add_services(this_service, other_services) def _add_relations(self): """Add all of the relations for the services.""" relations = { - 'nova-compute:shared-db': 'mysql:shared-db', + 'nova-compute:shared-db': 'percona-cluster:shared-db', 'nova-compute:amqp': 'rabbitmq-server:amqp', 'nova-compute:image-service': 'glance:image-service', 'nova-compute:ceph': 'ceph-mon:client', - 'keystone:shared-db': 'mysql:shared-db', - 'glance:shared-db': 'mysql:shared-db', + 'keystone:shared-db': 'percona-cluster:shared-db', + 'glance:shared-db': 'percona-cluster:shared-db', 'glance:identity-service': 'keystone:identity-service', 'glance:amqp': 'rabbitmq-server:amqp', 'glance:ceph': 'ceph-mon:client', - 'cinder:shared-db': 'mysql:shared-db', + 'cinder:shared-db': 'percona-cluster:shared-db', 'cinder:identity-service': 'keystone:identity-service', 'cinder:amqp': 'rabbitmq-server:amqp', 'cinder:image-service': 'glance:image-service', @@ -94,9 +97,15 @@ def _configure_services(self): """Configure all of the services.""" keystone_config = {'admin-password': 'openstack', 'admin-token': 'ubuntutesting'} - mysql_config = {'dataset-size': '50%'} cinder_config = {'block-device': 'None', 'glance-api-version': '2'} + pxc_config = { + 'dataset-size': '25%', + 'max-connections': 1000, + 'root-password': 'ChangeMe123', + 'sst-password': 'ChangeMe123', + } + # Include a non-existent device as osd-devices is a whitelist, # and this will catch cases where proposals attempt to change that. ceph_config = { @@ -115,7 +124,7 @@ def _configure_services(self): } configs = {'keystone': keystone_config, - 'mysql': mysql_config, + 'percona-cluster': pxc_config, 'cinder': cinder_config, 'ceph-mon': ceph_config, 'ceph-osd': ceph_osd_config} @@ -124,7 +133,7 @@ def _configure_services(self): def _initialize_tests(self): """Perform final initialization before tests get run.""" # Access the sentries for inspecting service units - self.mysql_sentry = self.d.sentry['mysql'][0] + self.pxc_sentry = self.d.sentry['percona-cluster'][0] self.keystone_sentry = self.d.sentry['keystone'][0] self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0] self.nova_sentry = self.d.sentry['nova-compute'][0] @@ -209,7 +218,6 @@ def test_102_services(self): """Verify the expected services are running on the service units.""" services = { - self.mysql_sentry: ['mysql'], self.rabbitmq_sentry: ['rabbitmq-server'], self.nova_sentry: ['nova-compute'], self.keystone_sentry: ['keystone'], diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 6fe8cf88..9e0b07fb 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -156,7 +156,7 @@ def _add_services(self, this_service, other_services, use_source=None, use_source = list(set( use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy'])) + 'ceph-proxy', 'percona-cluster', 'lxd'])) # Charms which can not use openstack-origin, ie. many subordinates no_origin = list(set( diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 24b353ee..e4546c8c 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -306,10 +306,8 @@ def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" # NOTE(beisner): cinder python client doesn't accept tokens. - service_ip = \ - keystone_sentry.relation('shared-db', - 'mysql:shared-db')['private-address'] - ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) + keystone_ip = keystone_sentry.info['public-address'] + ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) return cinder_client.Client(username, password, tenant, ept) def authenticate_keystone_admin(self, keystone_sentry, user, password, @@ -317,10 +315,9 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, keystone_ip=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') - unit = keystone_sentry if not keystone_ip: - keystone_ip = unit.relation('shared-db', - 'mysql:shared-db')['private-address'] + keystone_ip = keystone_sentry.info['public-address'] + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) if not api_version or api_version == 2: ep = base_ep + "/v2.0" diff --git a/ceph-mon/tests/gate-basic-precise-icehouse b/ceph-mon/tests/gate-basic-xenial-newton similarity index 71% rename from ceph-mon/tests/gate-basic-precise-icehouse rename to ceph-mon/tests/gate-basic-xenial-newton index 5957305e..40fc35e4 100755 --- a/ceph-mon/tests/gate-basic-precise-icehouse +++ b/ceph-mon/tests/gate-basic-xenial-newton @@ -14,12 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Amulet tests on a basic ceph deployment on precise-icehouse.""" +"""Amulet tests on a basic ceph deployment on xenial-newton.""" from basic_deployment import CephBasicDeployment if __name__ == '__main__': - deployment = CephBasicDeployment(series='precise', - openstack='cloud:precise-icehouse', - source='cloud:precise-updates/icehouse') + deployment = CephBasicDeployment(series='xenial', + openstack='cloud:xenial-newton', + source='cloud:xenial-updates/newton') deployment.run_tests() diff --git a/ceph-mon/tests/gate-basic-yakkety-newton b/ceph-mon/tests/gate-basic-yakkety-newton new file mode 100644 index 00000000..f2939866 --- /dev/null +++ b/ceph-mon/tests/gate-basic-yakkety-newton @@ -0,0 +1,23 @@ +#!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on yakkety-newton.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='yakkety') + deployment.run_tests() diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index e3185c6d..4cf93d01 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,6 +1,6 @@ # Bootstrap the model if necessary. bootstrap: True -# Re-use bootstrap node instead of destroying/re-bootstrapping. +# Re-use bootstrap node. reset: True # Use tox/requirements to drive the venv instead of bundletester's venv feature. virtualenv: False From ed29d8d6f9cd6ea891d590663bf75a0190053bb1 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 22 Sep 2016 17:57:32 +0000 Subject: [PATCH 1227/2699] Update amulet test definitions for Newton - Remove Precise-Icehouse Amulet test definitions if they exist. - Add Xenial-Newton Amulet test definitions. - Add Yakkety-Newton Amulet test definitions. - Use the percona-cluster charm in tests instead of the mysql charm. Change-Id: Icb3adfbe08c9db339499f69523f7b223dda94c0c --- .../charmhelpers/contrib/openstack/utils.py | 5 ++- ceph-osd/lib/ceph/__init__.py | 31 +++++++++++++++ ceph-osd/lib/ceph/ceph_broker.py | 16 ++++++-- ceph-osd/tests/basic_deployment.py | 39 ++++++++++++------- .../contrib/openstack/amulet/deployment.py | 2 +- .../contrib/openstack/amulet/utils.py | 11 ++---- ceph-osd/tests/gate-basic-precise-icehouse | 25 ------------ ...xenial-newton => gate-basic-xenial-newton} | 0 ...kkety-newton => gate-basic-yakkety-newton} | 0 ceph-osd/tests/tests.yaml | 2 +- 10 files changed, 77 insertions(+), 54 deletions(-) delete mode 100755 ceph-osd/tests/gate-basic-precise-icehouse rename ceph-osd/tests/{dev-basic-xenial-newton => gate-basic-xenial-newton} (100%) rename ceph-osd/tests/{dev-basic-yakkety-newton => gate-basic-yakkety-newton} (100%) mode change 100755 => 100644 diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 9abd4c31..8c89c3a3 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -229,6 +229,7 @@ GIT_DEFAULT_BRANCHES = { 'liberty': 'stable/liberty', 'mitaka': 'stable/mitaka', + 'newton': 'stable/newton', 'master': 'master', } @@ -735,12 +736,12 @@ def git_os_codename_install_source(projects_yaml): if projects in GIT_DEFAULT_BRANCHES.keys(): if projects == 'master': - return 'newton' + return 'ocata' return projects if 'release' in projects: if projects['release'] == 'master': - return 'newton' + return 'ocata' return projects['release'] return None diff --git a/ceph-osd/lib/ceph/__init__.py b/ceph-osd/lib/ceph/__init__.py index 328512b8..4afe5eb9 100644 --- a/ceph-osd/lib/ceph/__init__.py +++ b/ceph-osd/lib/ceph/__init__.py @@ -1244,6 +1244,37 @@ def get_running_osds(): return [] +def get_cephfs(service): + """ + List the Ceph Filesystems that exist + :rtype : list. Returns a list of the ceph filesystems + :param service: The service name to run the ceph command under + """ + if get_version() < 0.86: + # This command wasn't introduced until 0.86 ceph + return [] + try: + output = subprocess.check_output(["ceph", + '--id', service, + "fs", "ls"]) + if not output: + return [] + """ + Example subprocess output: + 'name: ip-172-31-23-165, metadata pool: ip-172-31-23-165_metadata, + data pools: [ip-172-31-23-165_data ]\n' + output: filesystems: ['ip-172-31-23-165'] + """ + filesystems = [] + for line in output.splitlines(): + parts = line.split(',') + for part in parts: + if "name" in part: + filesystems.append(part.split(' ')[1]) + except subprocess.CalledProcessError: + return [] + + def wait_for_all_monitors_to_upgrade(new_version, upgrade_key): """ Fairly self explanatory name. This function will wait diff --git a/ceph-osd/lib/ceph/ceph_broker.py b/ceph-osd/lib/ceph/ceph_broker.py index 77806b4d..0892961e 100644 --- a/ceph-osd/lib/ceph/ceph_broker.py +++ b/ceph-osd/lib/ceph/ceph_broker.py @@ -24,6 +24,7 @@ INFO, ERROR, ) +from ceph import get_cephfs from charmhelpers.contrib.storage.linux.ceph import ( create_erasure_profile, delete_pool, @@ -418,7 +419,12 @@ def handle_create_cephfs(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - # Finally create CephFS + if get_cephfs(service=service): + # CephFS new has already been called + log("CephFS already created") + return + + # Finally create CephFS try: check_output(["ceph", '--id', service, @@ -426,8 +432,12 @@ def handle_create_cephfs(request, service): metadata_pool, data_pool]) except CalledProcessError as err: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} + if err.returncode == 22: + log("CephFS already created") + return + else: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} def handle_rgw_region_set(request, service): diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 7b1154af..15d7e016 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -43,11 +43,12 @@ def __init__(self, series=None, openstack=None, source=None, self._deploy() u.log.info('Waiting on extended status checks...') - exclude_services = ['mysql'] + exclude_services = [] # Wait for deployment ready msgs, except exclusions self._auto_wait_for_status(exclude_services=exclude_services) + self.d.sentry.wait() self._initialize_tests() def _add_services(self): @@ -58,29 +59,31 @@ def _add_services(self): compatible with the local charm (e.g. stable or next). """ this_service = {'name': 'ceph-osd', 'units': 3} - other_services = [{'name': 'ceph-mon', 'units': 3}, - {'name': 'mysql'}, - {'name': 'keystone'}, - {'name': 'rabbitmq-server'}, - {'name': 'nova-compute'}, - {'name': 'glance'}, - {'name': 'cinder'}] + other_services = [ + {'name': 'ceph-mon', 'units': 3}, + {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}}, + {'name': 'keystone'}, + {'name': 'rabbitmq-server'}, + {'name': 'nova-compute'}, + {'name': 'glance'}, + {'name': 'cinder'} + ] super(CephOsdBasicDeployment, self)._add_services(this_service, other_services) def _add_relations(self): """Add all of the relations for the services.""" relations = { - 'nova-compute:shared-db': 'mysql:shared-db', + 'nova-compute:shared-db': 'percona-cluster:shared-db', 'nova-compute:amqp': 'rabbitmq-server:amqp', 'nova-compute:image-service': 'glance:image-service', 'nova-compute:ceph': 'ceph-mon:client', - 'keystone:shared-db': 'mysql:shared-db', - 'glance:shared-db': 'mysql:shared-db', + 'keystone:shared-db': 'percona-cluster:shared-db', + 'glance:shared-db': 'percona-cluster:shared-db', 'glance:identity-service': 'keystone:identity-service', 'glance:amqp': 'rabbitmq-server:amqp', 'glance:ceph': 'ceph-mon:client', - 'cinder:shared-db': 'mysql:shared-db', + 'cinder:shared-db': 'percona-cluster:shared-db', 'cinder:identity-service': 'keystone:identity-service', 'cinder:amqp': 'rabbitmq-server:amqp', 'cinder:image-service': 'glance:image-service', @@ -93,7 +96,13 @@ def _configure_services(self): """Configure all of the services.""" keystone_config = {'admin-password': 'openstack', 'admin-token': 'ubuntutesting'} - mysql_config = {'dataset-size': '50%'} + pxc_config = { + 'dataset-size': '25%', + 'max-connections': 1000, + 'root-password': 'ChangeMe123', + 'sst-password': 'ChangeMe123', + } + cinder_config = {'block-device': 'None', 'glance-api-version': '2'} ceph_config = { 'monitor-count': '3', @@ -111,7 +120,7 @@ def _configure_services(self): } configs = {'keystone': keystone_config, - 'mysql': mysql_config, + 'percona-cluster': pxc_config, 'cinder': cinder_config, 'ceph-mon': ceph_config, 'ceph-osd': ceph_osd_config} @@ -120,7 +129,7 @@ def _configure_services(self): def _initialize_tests(self): """Perform final initialization before tests get run.""" # Access the sentries for inspecting service units - self.mysql_sentry = self.d.sentry['mysql'][0] + self.pxc_sentry = self.d.sentry['percona-cluster'][0] self.keystone_sentry = self.d.sentry['keystone'][0] self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0] self.nova_sentry = self.d.sentry['nova-compute'][0] diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 6fe8cf88..9e0b07fb 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -156,7 +156,7 @@ def _add_services(self, this_service, other_services, use_source=None, use_source = list(set( use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy'])) + 'ceph-proxy', 'percona-cluster', 'lxd'])) # Charms which can not use openstack-origin, ie. many subordinates no_origin = list(set( diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index 24b353ee..e4546c8c 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -306,10 +306,8 @@ def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" # NOTE(beisner): cinder python client doesn't accept tokens. - service_ip = \ - keystone_sentry.relation('shared-db', - 'mysql:shared-db')['private-address'] - ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) + keystone_ip = keystone_sentry.info['public-address'] + ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) return cinder_client.Client(username, password, tenant, ept) def authenticate_keystone_admin(self, keystone_sentry, user, password, @@ -317,10 +315,9 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, keystone_ip=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') - unit = keystone_sentry if not keystone_ip: - keystone_ip = unit.relation('shared-db', - 'mysql:shared-db')['private-address'] + keystone_ip = keystone_sentry.info['public-address'] + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) if not api_version or api_version == 2: ep = base_ep + "/v2.0" diff --git a/ceph-osd/tests/gate-basic-precise-icehouse b/ceph-osd/tests/gate-basic-precise-icehouse deleted file mode 100755 index 65c2abad..00000000 --- a/ceph-osd/tests/gate-basic-precise-icehouse +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-osd deployment on precise-icehouse.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='precise', - openstack='cloud:precise-icehouse', - source='cloud:precise-updates/icehouse') - deployment.run_tests() diff --git a/ceph-osd/tests/dev-basic-xenial-newton b/ceph-osd/tests/gate-basic-xenial-newton similarity index 100% rename from ceph-osd/tests/dev-basic-xenial-newton rename to ceph-osd/tests/gate-basic-xenial-newton diff --git a/ceph-osd/tests/dev-basic-yakkety-newton b/ceph-osd/tests/gate-basic-yakkety-newton old mode 100755 new mode 100644 similarity index 100% rename from ceph-osd/tests/dev-basic-yakkety-newton rename to ceph-osd/tests/gate-basic-yakkety-newton diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index e3185c6d..4cf93d01 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,6 +1,6 @@ # Bootstrap the model if necessary. bootstrap: True -# Re-use bootstrap node instead of destroying/re-bootstrapping. +# Re-use bootstrap node. reset: True # Use tox/requirements to drive the venv instead of bundletester's venv feature. virtualenv: False From 77f1662ad1dbcf30f55339b30940f0f92fa726de Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 22 Sep 2016 17:57:39 +0000 Subject: [PATCH 1228/2699] Update amulet test definitions for Newton - Remove Precise-Icehouse Amulet test definitions if they exist. - Add Xenial-Newton Amulet test definitions. - Add Yakkety-Newton Amulet test definitions. - Use the percona-cluster charm in tests instead of the mysql charm. Change-Id: Ie29ee6af6b255ac1a1ac15f666b9d1b3ee9f3ee9 --- .../hooks/charmhelpers/contrib/openstack/utils.py | 5 +++-- ceph-proxy/tests/basic_deployment.py | 1 + .../contrib/openstack/amulet/deployment.py | 2 +- .../charmhelpers/contrib/openstack/amulet/utils.py | 11 ++++------- ceph-proxy/tests/gate-basic-precise-icehouse | 11 ----------- ceph-proxy/tests/gate-basic-trusty-juno | 11 ----------- ceph-proxy/tests/gate-basic-xenial-newton | 11 +++++++++++ ceph-proxy/tests/gate-basic-yakkety-newton | 9 +++++++++ ceph-proxy/tests/tests.yaml | 2 +- 9 files changed, 30 insertions(+), 33 deletions(-) delete mode 100755 ceph-proxy/tests/gate-basic-precise-icehouse delete mode 100755 ceph-proxy/tests/gate-basic-trusty-juno create mode 100755 ceph-proxy/tests/gate-basic-xenial-newton create mode 100644 ceph-proxy/tests/gate-basic-yakkety-newton diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index 9abd4c31..8c89c3a3 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -229,6 +229,7 @@ GIT_DEFAULT_BRANCHES = { 'liberty': 'stable/liberty', 'mitaka': 'stable/mitaka', + 'newton': 'stable/newton', 'master': 'master', } @@ -735,12 +736,12 @@ def git_os_codename_install_source(projects_yaml): if projects in GIT_DEFAULT_BRANCHES.keys(): if projects == 'master': - return 'newton' + return 'ocata' return projects if 'release' in projects: if projects['release'] == 'master': - return 'newton' + return 'ocata' return projects['release'] return None diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 4cddccf2..5b8f5ddd 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -34,6 +34,7 @@ def __init__(self, series=None, openstack=None, source=None, stable=False): self._auto_wait_for_status(exclude_services=exclude_services) self._configure_proxy() + self.d.sentry.wait() self._initialize_tests() self._auto_wait_for_status() diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 6fe8cf88..9e0b07fb 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -156,7 +156,7 @@ def _add_services(self, this_service, other_services, use_source=None, use_source = list(set( use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy'])) + 'ceph-proxy', 'percona-cluster', 'lxd'])) # Charms which can not use openstack-origin, ie. many subordinates no_origin = list(set( diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index 24b353ee..e4546c8c 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -306,10 +306,8 @@ def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" # NOTE(beisner): cinder python client doesn't accept tokens. - service_ip = \ - keystone_sentry.relation('shared-db', - 'mysql:shared-db')['private-address'] - ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) + keystone_ip = keystone_sentry.info['public-address'] + ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) return cinder_client.Client(username, password, tenant, ept) def authenticate_keystone_admin(self, keystone_sentry, user, password, @@ -317,10 +315,9 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, keystone_ip=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') - unit = keystone_sentry if not keystone_ip: - keystone_ip = unit.relation('shared-db', - 'mysql:shared-db')['private-address'] + keystone_ip = keystone_sentry.info['public-address'] + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) if not api_version or api_version == 2: ep = base_ep + "/v2.0" diff --git a/ceph-proxy/tests/gate-basic-precise-icehouse b/ceph-proxy/tests/gate-basic-precise-icehouse deleted file mode 100755 index 020cd751..00000000 --- a/ceph-proxy/tests/gate-basic-precise-icehouse +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on precise-icehouse.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='precise', - openstack='cloud:precise-icehouse', - source='cloud:precise-updates/icehouse') - deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-trusty-juno b/ceph-proxy/tests/gate-basic-trusty-juno deleted file mode 100755 index 28c7684e..00000000 --- a/ceph-proxy/tests/gate-basic-trusty-juno +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic ceph deployment on trusty-juno.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='trusty', - openstack='cloud:trusty-juno', - source='cloud:trusty-updates/juno') - deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-xenial-newton b/ceph-proxy/tests/gate-basic-xenial-newton new file mode 100755 index 00000000..f87e375e --- /dev/null +++ b/ceph-proxy/tests/gate-basic-xenial-newton @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on xenial-newton.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='xenial', + openstack='cloud:xenial-newton', + source='cloud:xenial-updates/newton') + deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-yakkety-newton b/ceph-proxy/tests/gate-basic-yakkety-newton new file mode 100644 index 00000000..b5ee9de4 --- /dev/null +++ b/ceph-proxy/tests/gate-basic-yakkety-newton @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph deployment on yakkety-newton.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='yakkety') + deployment.run_tests() diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index e3185c6d..4cf93d01 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -1,6 +1,6 @@ # Bootstrap the model if necessary. bootstrap: True -# Re-use bootstrap node instead of destroying/re-bootstrapping. +# Re-use bootstrap node. reset: True # Use tox/requirements to drive the venv instead of bundletester's venv feature. virtualenv: False From 7bffdc836c3bce36e76171aaf27715673d09cd96 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 22 Sep 2016 17:57:45 +0000 Subject: [PATCH 1229/2699] Update amulet test definitions for Newton - Remove Precise-Icehouse Amulet test definitions if they exist. - Add Xenial-Newton Amulet test definitions. - Add Yakkety-Newton Amulet test definitions. - Use the percona-cluster charm in tests instead of the mysql charm. Change-Id: Ia018e05d4030c72ffb6a94c840fb7fa4768a423a --- .../contrib/openstack/amulet/deployment.py | 2 +- .../contrib/openstack/amulet/utils.py | 11 ++--- .../charmhelpers/contrib/openstack/utils.py | 5 ++- ceph-radosgw/tests/basic_deployment.py | 40 +++++++++++-------- .../contrib/openstack/amulet/deployment.py | 2 +- .../contrib/openstack/amulet/utils.py | 11 ++--- .../tests/gate-basic-precise-icehouse | 25 ------------ ...xenial-newton => gate-basic-xenial-newton} | 0 ...kkety-newton => gate-basic-yakkety-newton} | 0 ceph-radosgw/tests/tests.yaml | 2 +- 10 files changed, 38 insertions(+), 60 deletions(-) delete mode 100755 ceph-radosgw/tests/gate-basic-precise-icehouse rename ceph-radosgw/tests/{dev-basic-xenial-newton => gate-basic-xenial-newton} (100%) rename ceph-radosgw/tests/{dev-basic-yakkety-newton => gate-basic-yakkety-newton} (100%) mode change 100755 => 100644 diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 6fe8cf88..9e0b07fb 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -156,7 +156,7 @@ def _add_services(self, this_service, other_services, use_source=None, use_source = list(set( use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy'])) + 'ceph-proxy', 'percona-cluster', 'lxd'])) # Charms which can not use openstack-origin, ie. many subordinates no_origin = list(set( diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 24b353ee..e4546c8c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -306,10 +306,8 @@ def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" # NOTE(beisner): cinder python client doesn't accept tokens. - service_ip = \ - keystone_sentry.relation('shared-db', - 'mysql:shared-db')['private-address'] - ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) + keystone_ip = keystone_sentry.info['public-address'] + ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) return cinder_client.Client(username, password, tenant, ept) def authenticate_keystone_admin(self, keystone_sentry, user, password, @@ -317,10 +315,9 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, keystone_ip=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') - unit = keystone_sentry if not keystone_ip: - keystone_ip = unit.relation('shared-db', - 'mysql:shared-db')['private-address'] + keystone_ip = keystone_sentry.info['public-address'] + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) if not api_version or api_version == 2: ep = base_ep + "/v2.0" diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 9abd4c31..8c89c3a3 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -229,6 +229,7 @@ GIT_DEFAULT_BRANCHES = { 'liberty': 'stable/liberty', 'mitaka': 'stable/mitaka', + 'newton': 'stable/newton', 'master': 'master', } @@ -735,12 +736,12 @@ def git_os_codename_install_source(projects_yaml): if projects in GIT_DEFAULT_BRANCHES.keys(): if projects == 'master': - return 'newton' + return 'ocata' return projects if 'release' in projects: if projects['release'] == 'master': - return 'newton' + return 'ocata' return projects['release'] return None diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index a926b13a..d202ce85 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -44,11 +44,12 @@ def __init__(self, series=None, openstack=None, source=None, stable=False): self._deploy() u.log.info('Waiting on extended status checks...') - exclude_services = ['mysql'] + exclude_services = [] # Wait for deployment ready msgs, except exclusions self._auto_wait_for_status(exclude_services=exclude_services) + self.d.sentry.wait() self._initialize_tests() def _add_services(self): @@ -59,29 +60,31 @@ def _add_services(self): compatible with the local charm (e.g. stable or next). """ this_service = {'name': 'ceph-radosgw'} - other_services = [{'name': 'ceph', 'units': 3}, - {'name': 'mysql'}, - {'name': 'keystone'}, - {'name': 'rabbitmq-server'}, - {'name': 'nova-compute'}, - {'name': 'glance'}, - {'name': 'cinder'}] + other_services = [ + {'name': 'ceph', 'units': 3}, + {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}}, + {'name': 'keystone'}, + {'name': 'rabbitmq-server'}, + {'name': 'nova-compute'}, + {'name': 'glance'}, + {'name': 'cinder'} + ] super(CephRadosGwBasicDeployment, self)._add_services(this_service, other_services) def _add_relations(self): """Add all of the relations for the services.""" relations = { - 'nova-compute:shared-db': 'mysql:shared-db', + 'nova-compute:shared-db': 'percona-cluster:shared-db', 'nova-compute:amqp': 'rabbitmq-server:amqp', 'nova-compute:image-service': 'glance:image-service', 'nova-compute:ceph': 'ceph:client', - 'keystone:shared-db': 'mysql:shared-db', - 'glance:shared-db': 'mysql:shared-db', + 'keystone:shared-db': 'percona-cluster:shared-db', + 'glance:shared-db': 'percona-cluster:shared-db', 'glance:identity-service': 'keystone:identity-service', 'glance:amqp': 'rabbitmq-server:amqp', 'glance:ceph': 'ceph:client', - 'cinder:shared-db': 'mysql:shared-db', + 'cinder:shared-db': 'percona-cluster:shared-db', 'cinder:identity-service': 'keystone:identity-service', 'cinder:amqp': 'rabbitmq-server:amqp', 'cinder:image-service': 'glance:image-service', @@ -95,7 +98,13 @@ def _configure_services(self): """Configure all of the services.""" keystone_config = {'admin-password': 'openstack', 'admin-token': 'ubuntutesting'} - mysql_config = {'dataset-size': '50%'} + pxc_config = { + 'dataset-size': '25%', + 'max-connections': 1000, + 'root-password': 'ChangeMe123', + 'sst-password': 'ChangeMe123', + } + cinder_config = {'block-device': 'None', 'glance-api-version': '2'} ceph_config = { 'monitor-count': '3', @@ -109,7 +118,7 @@ def _configure_services(self): radosgw_config = {"use-embedded-webserver": True} configs = {'keystone': keystone_config, - 'mysql': mysql_config, + 'percona-cluster': pxc_config, 'cinder': cinder_config, 'ceph': ceph_config, 'ceph-radosgw': radosgw_config} @@ -144,7 +153,7 @@ def _wait_on_action(self, action_id): def _initialize_tests(self): """Perform final initialization before tests get run.""" # Access the sentries for inspecting service units - self.mysql_sentry = self.d.sentry['mysql'][0] + self.pxc_sentry = self.d.sentry['percona-cluster'][0] self.keystone_sentry = self.d.sentry['keystone'][0] self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0] self.nova_sentry = self.d.sentry['nova-compute'][0] @@ -231,7 +240,6 @@ def test_102_services(self): """Verify the expected services are running on the service units.""" services = { - self.mysql_sentry: ['mysql'], self.rabbitmq_sentry: ['rabbitmq-server'], self.nova_sentry: ['nova-compute'], self.keystone_sentry: ['keystone'], diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 6fe8cf88..9e0b07fb 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -156,7 +156,7 @@ def _add_services(self, this_service, other_services, use_source=None, use_source = list(set( use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy'])) + 'ceph-proxy', 'percona-cluster', 'lxd'])) # Charms which can not use openstack-origin, ie. many subordinates no_origin = list(set( diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 24b353ee..e4546c8c 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -306,10 +306,8 @@ def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" # NOTE(beisner): cinder python client doesn't accept tokens. - service_ip = \ - keystone_sentry.relation('shared-db', - 'mysql:shared-db')['private-address'] - ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) + keystone_ip = keystone_sentry.info['public-address'] + ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) return cinder_client.Client(username, password, tenant, ept) def authenticate_keystone_admin(self, keystone_sentry, user, password, @@ -317,10 +315,9 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, keystone_ip=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') - unit = keystone_sentry if not keystone_ip: - keystone_ip = unit.relation('shared-db', - 'mysql:shared-db')['private-address'] + keystone_ip = keystone_sentry.info['public-address'] + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) if not api_version or api_version == 2: ep = base_ep + "/v2.0" diff --git a/ceph-radosgw/tests/gate-basic-precise-icehouse b/ceph-radosgw/tests/gate-basic-precise-icehouse deleted file mode 100755 index 925dfbda..00000000 --- a/ceph-radosgw/tests/gate-basic-precise-icehouse +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on precise-icehouse.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='precise', - openstack='cloud:precise-icehouse', - source='cloud:precise-updates/icehouse') - deployment.run_tests() diff --git a/ceph-radosgw/tests/dev-basic-xenial-newton b/ceph-radosgw/tests/gate-basic-xenial-newton similarity index 100% rename from ceph-radosgw/tests/dev-basic-xenial-newton rename to ceph-radosgw/tests/gate-basic-xenial-newton diff --git a/ceph-radosgw/tests/dev-basic-yakkety-newton b/ceph-radosgw/tests/gate-basic-yakkety-newton old mode 100755 new mode 100644 similarity index 100% rename from ceph-radosgw/tests/dev-basic-yakkety-newton rename to ceph-radosgw/tests/gate-basic-yakkety-newton diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index e3185c6d..4cf93d01 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,6 +1,6 @@ # Bootstrap the model if necessary. bootstrap: True -# Re-use bootstrap node instead of destroying/re-bootstrapping. +# Re-use bootstrap node. reset: True # Use tox/requirements to drive the venv instead of bundletester's venv feature. virtualenv: False From 06b508d73188e81495387a379b68e4018c874580 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 17 Oct 2016 09:22:15 +0100 Subject: [PATCH 1230/2699] Downgrade default key mon capabilities The 'w' capability for mon is no longer required by default, as the ceph broker in the ceph{-mon} charm is responsible for pool creation, not clients. Drop this permission (keys are automatically upgraded). Change-Id: I85ba55b7b929eb852046db354a745eb3beed2c51 Depends-On: Iefffe047214555a15c4201fca605f07ac39c8f5c Partial-Bug: 1424771 --- ceph-mon/lib/ceph/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index 4afe5eb9..b95a8260 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -949,7 +949,7 @@ def get_mds_bootstrap_key(): _default_caps = collections.OrderedDict([ - ('mon', ['allow rw']), + ('mon', ['allow r']), ('osd', ['allow rwx']), ]) From d5302a165721ce0190453d37e9f30d83229e8f4e Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 17 Oct 2016 16:24:07 -0400 Subject: [PATCH 1231/2699] Add minimum-size to osd-devices This stops an error that happens when size is not specified when adding storage via Juju storage hooks. Without a set minimum, Juju will give 1G to a disk which will cause ceph-disk to fail when connecting the new disk. Change-Id: Ib57314945b1f0bf8995029f5506543bc1b53c89b --- ceph-osd/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 75447355..5150c171 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -32,6 +32,7 @@ storage: type: block multiple: range: 0- + minimum-size: 1G osd-journals: type: block multiple: From a41c38720de72fb14dca84d89653f13591fb0662 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Fri, 7 Oct 2016 14:39:38 -0700 Subject: [PATCH 1232/2699] Mds Relation Small changes needed to make the mds relation function properly. The mds_relation now uses the remote unit name that to generate the cephx key. lib/ceph was also synced to the latest. Ceph_broker create cephfs is now idempotent. Change-Id: I8eb022030d833af6d1daa9b61265bde68f3e79b2 --- ceph-mon/hooks/ceph_hooks.py | 45 +++++++++++++------------------- ceph-mon/lib/ceph/ceph_broker.py | 2 +- 2 files changed, 19 insertions(+), 28 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 1048c1bf..ec78c305 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -439,36 +439,27 @@ def mds_relation_joined(relid=None, unit=None): if ceph.is_quorum() and related_osds(): log('mon cluster in quorum and OSDs related' '- providing client with keys') - service_name = None + mds_name = relation_get('mds-name') if not unit: unit = remote_unit() - if relid is None: - units = [remote_unit()] - service_name = units[0].split('/')[0] - else: - units = related_units(relid) - if len(units) > 0: - service_name = units[0].split('/')[0] + public_addr = get_public_addr() + data = { + 'fsid': leader_get('fsid'), + 'mds_key': ceph.get_mds_key(name=mds_name), + 'auth': config('auth-supported'), + 'ceph-public-address': public_addr} + settings = relation_get(rid=relid, unit=unit) + """Process broker request(s).""" + if 'broker_req' in settings: + if ceph.is_leader(): + rsp = process_requests(settings['broker_req']) + unit_id = unit.replace('/', '-') + unit_response_key = 'broker-rsp-' + unit_id + data[unit_response_key] = rsp + else: + log("Not leader - ignoring broker request", level=DEBUG) - if service_name is not None: - public_addr = get_public_addr() - data = { - 'fsid': leader_get('fsid'), - 'mds_bootstrap_key': ceph.get_mds_bootstrap_key(), - 'auth': config('auth-supported'), - 'ceph-public-address': public_addr} - settings = relation_get(rid=relid, unit=unit) - """Process broker request(s).""" - if 'broker_req' in settings: - if ceph.is_leader(): - rsp = process_requests(settings['broker_req']) - unit_id = unit.replace('/', '-') - unit_response_key = 'broker-rsp-' + unit_id - data[unit_response_key] = rsp - else: - log("Not leader - ignoring broker request", level=DEBUG) - - relation_set(relation_id=relid, relation_settings=data) + relation_set(relation_id=relid, relation_settings=data) else: log('mon cluster not in quorum - deferring key provision') diff --git a/ceph-mon/lib/ceph/ceph_broker.py b/ceph-mon/lib/ceph/ceph_broker.py index 0892961e..188f2001 100644 --- a/ceph-mon/lib/ceph/ceph_broker.py +++ b/ceph-mon/lib/ceph/ceph_broker.py @@ -424,7 +424,7 @@ def handle_create_cephfs(request, service): log("CephFS already created") return - # Finally create CephFS + # Finally create CephFS try: check_output(["ceph", '--id', service, From d0f420f5cc03f747204088ce4398ea7553e8e699 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Fri, 21 Oct 2016 10:47:14 -0700 Subject: [PATCH 1233/2699] Add relation param to setup_mds --- ceph-fs/src/reactive/ceph_fs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index b16426bf..e9415292 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -29,9 +29,9 @@ def install_cephfs(): @when('cephfs.configured') -@when('cephfs.pools.created') +@when('ceph-mds.pools.available') @when_not('cephfs.started') -def setup_mds(): +def setup_mds(relation): try: service_restart('ceph-mds') set_state('cephfs.started') From 71d82bd6e60f330448b5329d3d57b89e8b345099 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Mon, 24 Oct 2016 14:34:08 -0700 Subject: [PATCH 1234/2699] Add actions to set,get and remove quotas --- ceph-fs/src/README.md | 28 +++++++++++------ ceph-fs/src/actions.yaml | 18 +++++++---- ceph-fs/src/actions/get-quota | 1 + ceph-fs/src/actions/get-quota.py | 45 +++++++++++++++++++++++++++ ceph-fs/src/actions/remove-quota | 1 + ceph-fs/src/actions/remove-quota.py | 44 +++++++++++++++++++++++++++ ceph-fs/src/actions/set-quota | 1 + ceph-fs/src/actions/set-quota.py | 47 +++++++++++++++++++++++++++++ ceph-fs/src/config.yaml | 12 ++------ ceph-fs/src/layer.yaml | 2 +- ceph-fs/src/wheelhouse.txt | 2 +- 11 files changed, 174 insertions(+), 27 deletions(-) create mode 120000 ceph-fs/src/actions/get-quota create mode 100755 ceph-fs/src/actions/get-quota.py create mode 120000 ceph-fs/src/actions/remove-quota create mode 100755 ceph-fs/src/actions/remove-quota.py create mode 120000 ceph-fs/src/actions/set-quota create mode 100755 ceph-fs/src/actions/set-quota.py diff --git a/ceph-fs/src/README.md b/ceph-fs/src/README.md index dcbbee51..c01cee68 100644 --- a/ceph-fs/src/README.md +++ b/ceph-fs/src/README.md @@ -6,22 +6,30 @@ Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. This charm deploys a Ceph MDS cluster. -juju Usage ===== - -Boot things up by using:: + +Boot things up by using: juju deploy -n 3 --config ceph.yaml ceph-mon juju deploy -n 3 --config ceph.yaml ceph-osd - -You can then deploy this charm by simply doing:: - - juju deploy -n 3 --config ceph.yaml ceph-fs +In my example deployments on EC2 the following ceph.yaml will work: +``` +ceph-mon: + source: cloud:trusty-mitaka +ceph-osd: + osd-devices: /dev/xvdb + ephemeral-unmount: "/mnt" + source: cloud:trusty-mitaka +``` +You can then deploy this charm by simply doing: + + juju deploy --config ceph.yaml ceph-fs juju add-relation ceph-fs ceph-mon - -Once the ceph-mon and osd charms have bootstrapped the cluster, it will notify the ceph-fs charm. + +Once the ceph-mon and osd charms have bootstrapped the cluster, the ceph-mon +charm will notify the ceph-fs charm. Contact Information =================== @@ -30,4 +38,4 @@ Contact Information - [Ceph website](http://ceph.com) - [Ceph mailing lists](http://ceph.com/resources/mailing-list-irc/) -- [Ceph bug tracker](http://tracker.ceph.com/projects/ceph) \ No newline at end of file +- [Ceph bug tracker](http://tracker.ceph.com/projects/ceph) diff --git a/ceph-fs/src/actions.yaml b/ceph-fs/src/actions.yaml index 77651dc7..8620d24b 100644 --- a/ceph-fs/src/actions.yaml +++ b/ceph-fs/src/actions.yaml @@ -5,12 +5,14 @@ get-quota: type: boolean description: | The limit of how many files can be written. Use either this or - max-bytes but not both. + max-bytes but not both. The action tries max-files first and then + falls back on max-bytes if both are set max-bytes: type: integer description: | The maximum number of bytes that are allowed to be written. Use - either this or max-files but not both. + either this or max-files but not both. The action tries max-files + first and then falls back on max-bytes if both are set directory: type: string description: | @@ -24,12 +26,14 @@ remove-quota: type: boolean description: | The limit of how many files can be written. Use either this or - max-bytes but not both. + max-bytes but not both. The action tries max-files first and then + falls back on max-bytes if both are set max-bytes: type: integer description: | The maximum number of bytes that are allowed to be written. Use - either this or max-files but not both. + either this or max-files but not both. The action tries max-files + first and then falls back on max-bytes if both are set directory: type: string description: | @@ -43,12 +47,14 @@ set-quota: type: integer description: | The limit of how many files can be written. Use either this or - max-bytes but not both. + max-bytes but not both. The action tries max-files + first and then falls back on max-bytes if both are set max-bytes: type: integer description: | The maximum number of bytes that are allowed to be written. Use - either this or max-files but not both. + either this or max-files but not both. The action tries max-files + first and then falls back on max-bytes if both are set directory: type: string description: | diff --git a/ceph-fs/src/actions/get-quota b/ceph-fs/src/actions/get-quota new file mode 120000 index 00000000..a1d07b46 --- /dev/null +++ b/ceph-fs/src/actions/get-quota @@ -0,0 +1 @@ +get-quota.py \ No newline at end of file diff --git a/ceph-fs/src/actions/get-quota.py b/ceph-fs/src/actions/get-quota.py new file mode 100755 index 00000000..c9c17f66 --- /dev/null +++ b/ceph-fs/src/actions/get-quota.py @@ -0,0 +1,45 @@ +#!/usr/bin/python3 +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +from charmhelpers.core.hookenv import action_get, action_fail, action_set +import xattr + +__author__ = 'Chris Holcombe ' + + +def get_quota(): + max_files = action_get('max-files') + max_bytes = action_get('max-bytes') + directory = action_get('directory') + + if not os.path.exists(directory): + action_fail("Directory must exist before setting quota") + attr = "ceph.quota.{}" + if max_files: + attr.format("max_files") + elif max_bytes: + attr.format("max_bytes") + + try: + quota_value = xattr.getxattr(directory, attr) + action_set({'{} quota'.format(directory): quota_value}) + except IOError as err: + action_fail( + "Unable to get xattr on {}. Error: {}".format(directory, err)) + + +if __name__ == '__main__': + get_quota() diff --git a/ceph-fs/src/actions/remove-quota b/ceph-fs/src/actions/remove-quota new file mode 120000 index 00000000..dee30392 --- /dev/null +++ b/ceph-fs/src/actions/remove-quota @@ -0,0 +1 @@ +remove-quota.py \ No newline at end of file diff --git a/ceph-fs/src/actions/remove-quota.py b/ceph-fs/src/actions/remove-quota.py new file mode 100755 index 00000000..c068cc2d --- /dev/null +++ b/ceph-fs/src/actions/remove-quota.py @@ -0,0 +1,44 @@ +#!/usr/bin/python3 +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +from charmhelpers.core.hookenv import action_get, action_fail, action_set +import xattr + +__author__ = 'Chris Holcombe ' + + +def remove_quota(): + max_files = action_get('max-files') + max_bytes = action_get('max-bytes') + directory = action_get('directory') + + if not os.path.exists(directory): + action_fail("Directory must exist before setting quota") + attr = "ceph.quota.{}" + if max_files: + attr.format("max_files") + elif max_bytes: + attr.format("max_bytes") + + try: + xattr.setxattr(directory, attr, str(0)) + except IOError as err: + action_fail( + "Unable to set xattr on {}. Error: {}".format(directory, err)) + + +if __name__ == '__main__': + remove_quota() diff --git a/ceph-fs/src/actions/set-quota b/ceph-fs/src/actions/set-quota new file mode 120000 index 00000000..79af9e29 --- /dev/null +++ b/ceph-fs/src/actions/set-quota @@ -0,0 +1 @@ +set-quota.py \ No newline at end of file diff --git a/ceph-fs/src/actions/set-quota.py b/ceph-fs/src/actions/set-quota.py new file mode 100755 index 00000000..bba5a502 --- /dev/null +++ b/ceph-fs/src/actions/set-quota.py @@ -0,0 +1,47 @@ +#!/usr/bin/python3 +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'Chris Holcombe ' +import os +from charmhelpers.core.hookenv import action_get, action_fail +import xattr + + +def set_quota(): + max_files = action_get('max-files') + max_bytes = action_get('max-bytes') + directory = action_get('directory') + + if not os.path.exists(directory): + action_fail("Directory must exist before setting quota") + attr = "ceph.quota.{}" + value = None + if max_files: + attr.format("max_files") + value = str(max_files) + elif max_bytes: + attr.format("max_bytes") + value = str(max_bytes) + + try: + xattr.setxattr(directory, attr, value) + except IOError as err: + action_fail( + "Unable to set xattr on {}. Error: {}".format(directory, err)) + + +if __name__ == '__main__': + set_quota() diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index f0b9fa46..3b86e9ef 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -1,4 +1,7 @@ options: + apt: + packages: + - python3-pyxattr ceph-public-network: type: string default: @@ -8,15 +11,6 @@ options: . If multiple networks are to be used, a space-delimited list of a.b.c.d/x can be provided. - ceph-cluster-network: - type: string - default: - description: | - The IP address and netmask of the cluster (back-side) network (e.g., - 192.168.0.0/24) - . - If multiple networks are to be used, a space-delimited list of a.b.c.d/x - can be provided. loglevel: default: 1 type: int diff --git a/ceph-fs/src/layer.yaml b/ceph-fs/src/layer.yaml index 77414bb5..9f2a750b 100644 --- a/ceph-fs/src/layer.yaml +++ b/ceph-fs/src/layer.yaml @@ -1,2 +1,2 @@ -includes: ['layer:ceph-base', 'interface:/home/chris/repos/juju-interface-ceph-mds'] # if you use any interfaces, add them here +includes: ['layer:apt', 'layer:ceph-base', 'interface:/home/chris/repos/juju-interface-ceph-mds'] # if you use any interfaces, add them here repo: git@github.com:cholcombe973/charm-ceph-fs.git diff --git a/ceph-fs/src/wheelhouse.txt b/ceph-fs/src/wheelhouse.txt index f34b89e7..8501e11b 100644 --- a/ceph-fs/src/wheelhouse.txt +++ b/ceph-fs/src/wheelhouse.txt @@ -1 +1 @@ -ceph_api \ No newline at end of file +ceph_api From b2bad0a1b28a2c6b72a6eda1f5b0a040d0b085e9 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 2 Nov 2016 17:14:03 +0000 Subject: [PATCH 1235/2699] Add series metadata Change-Id: Ib5d31353b0d472512edfc96f5f3c1671f03f34f2 --- ceph-proxy/metadata.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 72acc5fc..4de85d73 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -9,6 +9,11 @@ tags: - storage - file-servers - misc +series: + - xenial + - trusty + - precise + - yakkety extra-bindings: public: cluster: From 53cbc87ca962ca7ab29a0eb45717a88f6363750c Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 15 Nov 2016 08:53:05 +0000 Subject: [PATCH 1236/2699] Ensure public-address provided on peer relation Change-Id: Idf3e688323e3cf7d994893f1e1fe6365ce81d571 Closes-Bug: 1641870 --- ceph-radosgw/hooks/hooks.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index cea979c8..8d586e82 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -44,11 +44,13 @@ from charmhelpers.payload.execd import execd_preinstall from charmhelpers.core.host import cmp_pkgrevno from charmhelpers.contrib.network.ip import ( + get_address_in_network, get_ipv6_addr, get_iface_for_address, get_netmask_for_address, is_ipv6, ) +from charmhelpers.contrib.openstack.context import ADDRESS_TYPES from charmhelpers.contrib.openstack.ip import ( canonical_url, PUBLIC, INTERNAL, ADMIN, @@ -254,6 +256,17 @@ def identity_changed(relid=None): @restart_on_change({'/etc/haproxy/haproxy.cfg': ['haproxy']}) def cluster_joined(rid=None): settings = {} + + for addr_type in ADDRESS_TYPES: + address = get_address_in_network( + config('os-{}-network'.format(addr_type)) + ) + if address: + relation_set( + relation_id=rid, + settings={'{}-address'.format(addr_type): address} + ) + if config('prefer-ipv6'): private_addr = get_ipv6_addr(exc_list=[config('vip')])[0] settings['private-address'] = private_addr From b34535a38c544eb17cc04d8d5cc102b384067b4f Mon Sep 17 00:00:00 2001 From: Nuno Santos Date: Tue, 15 Nov 2016 14:05:15 -0500 Subject: [PATCH 1237/2699] Add detail to ephemeral-unmount config option doc Add additional detail on usage of the ephemeral-unmount configuration option, making clear that the value should be the mountpoint path. Change-Id: Ifd0345d0bb80625978476222445bf9875d33793b --- ceph-osd/config.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 778bc645..0c47ca34 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -98,12 +98,13 @@ options: type: string default: description: | - Cloud instances provider ephermeral storage which is normally mounted + Cloud instances provide ephermeral storage which is normally mounted on /mnt. - Providing this option will force an unmount of the ephemeral device - so that it can be used as a OSD storage device. This is useful for - testing purposes (cloud deployment is not a typical use case). + Setting this option to the path of the ephemeral mountpoint will force + an unmount of the corresponding device so that it can be used as a OSD + storage device. This is useful for testing purposes (cloud deployment + is not a typical use case). source: type: string default: From 286fef9877d8c5f5385c954924eb798aee223d2d Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 23 Nov 2016 15:18:27 -0600 Subject: [PATCH 1238/2699] Update Amulet defs, series metadata and c-h sync - Sync charm helpers if applicable. - Fix test executable hashbags for virtualenv prep. - Add Yakkety-Newton Amulet test definitions. - Prep Xenial-Ocata Amulet test definitions (not yet enabled). - Prep Zesty-Ocata Amulet test definitions (not yet enabled). - Add Zesty charm series metadata. - Remove Precise charm series metadata if present. - Remove Precise Amulet test definitions if present. Change-Id: I37ffb25e72998f4e128222c314423d204282aded --- .../charmhelpers/contrib/openstack/utils.py | 22 ++++++++++-- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 2 ++ ceph-proxy/hooks/charmhelpers/core/host.py | 17 +++++++++ .../core/kernel_factory/ubuntu.py | 2 +- ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py | 8 +++++ ceph-proxy/metadata.yaml | 2 +- ceph-proxy/tests/basic_deployment.py | 2 +- .../charmhelpers/contrib/amulet/utils.py | 2 +- .../contrib/openstack/amulet/deployment.py | 35 +++++-------------- .../contrib/openstack/amulet/utils.py | 11 ++++++ ceph-proxy/tests/gate-basic-trusty-icehouse | 2 +- ceph-proxy/tests/gate-basic-trusty-kilo | 2 +- ceph-proxy/tests/gate-basic-trusty-liberty | 2 +- ceph-proxy/tests/gate-basic-trusty-mitaka | 2 +- ceph-proxy/tests/gate-basic-wily-liberty | 2 +- ceph-proxy/tests/gate-basic-xenial-mitaka | 2 +- ceph-proxy/tests/gate-basic-xenial-newton | 2 +- ceph-proxy/tests/gate-basic-xenial-ocata | 11 ++++++ ceph-proxy/tests/gate-basic-yakkety-newton | 2 +- ceph-proxy/tests/gate-basic-zesty-ocata | 9 +++++ 20 files changed, 99 insertions(+), 40 deletions(-) create mode 100644 ceph-proxy/tests/gate-basic-xenial-ocata mode change 100644 => 100755 ceph-proxy/tests/gate-basic-yakkety-newton create mode 100644 ceph-proxy/tests/gate-basic-zesty-ocata diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index 8c89c3a3..6d544e75 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -109,7 +109,7 @@ ('wily', 'liberty'), ('xenial', 'mitaka'), ('yakkety', 'newton'), - ('zebra', 'ocata'), # TODO: upload with real Z name + ('zesty', 'ocata'), ]) @@ -152,6 +152,8 @@ ['2.5.0', '2.6.0', '2.7.0']), ('newton', ['2.8.0', '2.9.0', '2.10.0']), + ('ocata', + ['2.11.0']), ]) # >= Liberty version->codename mapping @@ -410,14 +412,26 @@ def get_os_version_package(pkg, fatal=True): os_rel = None -def os_release(package, base='essex'): +def reset_os_release(): + '''Unset the cached os_release version''' + global os_rel + os_rel = None + + +def os_release(package, base='essex', reset_cache=False): ''' Returns OpenStack release codename from a cached global. + + If reset_cache then unset the cached os_release version and return the + freshly determined version. + If the codename can not be determined from either an installed package or the installation source, the earliest release supported by the charm should be returned. ''' global os_rel + if reset_cache: + reset_os_release() if os_rel: return os_rel os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or @@ -535,6 +549,9 @@ def configure_installation_source(rel): 'newton': 'xenial-updates/newton', 'newton/updates': 'xenial-updates/newton', 'newton/proposed': 'xenial-proposed/newton', + 'zesty': 'zesty-updates/ocata', + 'zesty/updates': 'xenial-updates/ocata', + 'zesty/proposed': 'xenial-proposed/ocata', } try: @@ -668,6 +685,7 @@ def clean_storage(block_device): else: zap_disk(block_device) + is_ip = ip.is_ip ns_query = ip.ns_query get_host_ip = ip.get_host_ip diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 996e81cc..94fc996c 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -332,6 +332,8 @@ def config(scope=None): config_cmd_line = ['config-get'] if scope is not None: config_cmd_line.append(scope) + else: + config_cmd_line.append('--all') config_cmd_line.append('--format=json') try: config_data = json.loads( diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 0f1b2f35..04cadb3a 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -732,3 +732,20 @@ def get_total_ram(): assert unit == 'kB', 'Unknown unit' return int(value) * 1024 # Classic, not KiB. raise NotImplementedError() + + +UPSTART_CONTAINER_TYPE = '/run/container_type' + + +def is_container(): + """Determine whether unit is running in a container + + @return: boolean indicating if unit is in a container + """ + if init_is_systemd(): + # Detect using systemd-detect-virt + return subprocess.call(['systemd-detect-virt', + '--container']) == 0 + else: + # Detect using upstart container file marker + return os.path.exists(UPSTART_CONTAINER_TYPE) diff --git a/ceph-proxy/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-proxy/hooks/charmhelpers/core/kernel_factory/ubuntu.py index 21559642..3de372fd 100644 --- a/ceph-proxy/hooks/charmhelpers/core/kernel_factory/ubuntu.py +++ b/ceph-proxy/hooks/charmhelpers/core/kernel_factory/ubuntu.py @@ -5,7 +5,7 @@ def persistent_modprobe(module): """Load a kernel module and configure for auto-load on reboot.""" with open('/etc/modules', 'r+') as modules: if module not in modules.read(): - modules.write(module) + modules.write(module + "\n") def update_initramfs(version='all'): diff --git a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py index fce496b2..39b9b801 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py @@ -105,6 +105,14 @@ 'newton/proposed': 'xenial-proposed/newton', 'xenial-newton/proposed': 'xenial-proposed/newton', 'xenial-proposed/newton': 'xenial-proposed/newton', + # Ocata + 'ocata': 'xenial-updates/ocata', + 'xenial-ocata': 'xenial-updates/ocata', + 'xenial-ocata/updates': 'xenial-updates/ocata', + 'xenial-updates/ocata': 'xenial-updates/ocata', + 'ocata/proposed': 'xenial-proposed/ocata', + 'xenial-ocata/proposed': 'xenial-proposed/ocata', + 'xenial-ocata/newton': 'xenial-proposed/ocata', } APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 4de85d73..7462ead1 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -11,8 +11,8 @@ tags: - misc series: - xenial + - zesty - trusty - - precise - yakkety extra-bindings: public: diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 5b8f5ddd..66cb8ec5 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python import amulet diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index a39ed4c8..8e13ab14 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -546,7 +546,7 @@ def get_process_id_list(self, sentry_unit, process_name, raise if it is present. :returns: List of process IDs """ - cmd = 'pidof -x {}'.format(process_name) + cmd = 'pidof -x "{}"'.format(process_name) if not expect_success: cmd += " || exit 0 && exit 1" output, code = sentry_unit.run(cmd) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 9e0b07fb..5c1ce457 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -69,9 +69,9 @@ def _determine_branch_locations(self, other_services): # Charms outside the ~openstack-charmers base_charms = { - 'mysql': ['precise', 'trusty'], - 'mongodb': ['precise', 'trusty'], - 'nrpe': ['precise', 'trusty', 'wily', 'xenial'], + 'mysql': ['trusty'], + 'mongodb': ['trusty'], + 'nrpe': ['trusty', 'xenial'], } for svc in other_services: @@ -260,31 +260,20 @@ def _get_openstack_release(self): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.precise_essex, self.precise_folsom, self.precise_grizzly, - self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.utopic_juno, - self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, - self.wily_liberty, self.trusty_mitaka, - self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton) = range(16) + (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, + self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, + self.yakkety_newton, self.xenial_ocata, self.zesty_ocata) = range(9) releases = { - ('precise', None): self.precise_essex, - ('precise', 'cloud:precise-folsom'): self.precise_folsom, - ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, - ('precise', 'cloud:precise-havana'): self.precise_havana, - ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo, - ('wily', None): self.wily_liberty, ('xenial', None): self.xenial_mitaka, ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, ('yakkety', None): self.yakkety_newton, + ('zesty', None): self.zesty_ocata, } return releases[(self.series, self.openstack)] @@ -294,16 +283,10 @@ def _get_openstack_release_string(self): Return a string representing the openstack release. """ releases = OrderedDict([ - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), ('xenial', 'mitaka'), ('yakkety', 'newton'), + ('zesty', 'ocata'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index e4546c8c..6a0ba837 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -28,6 +28,7 @@ from keystoneclient.auth.identity import v3 as keystone_id_v3 from keystoneclient import session as keystone_session from keystoneclient.v3 import client as keystone_client_v3 +from novaclient import exceptions import novaclient.client as nova_client import pika @@ -377,6 +378,16 @@ def authenticate_swift_user(self, keystone, user, password, tenant): tenant_name=tenant, auth_version='2.0') + def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", + ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): + """Create the specified flavor.""" + try: + nova.flavors.find(name=name) + except (exceptions.NotFound, exceptions.NoUniqueMatch): + self.log.debug('Creating flavor ({})'.format(name)) + nova.flavors.create(name, ram, vcpus, disk, flavorid, + ephemeral, swap, rxtx_factor, is_public) + def create_cirros_image(self, glance, image_name): """Download the latest cirros image and upload it to glance, validate and return a resource pointer. diff --git a/ceph-proxy/tests/gate-basic-trusty-icehouse b/ceph-proxy/tests/gate-basic-trusty-icehouse index f67fea91..c36fd00c 100755 --- a/ceph-proxy/tests/gate-basic-trusty-icehouse +++ b/ceph-proxy/tests/gate-basic-trusty-icehouse @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python """Amulet tests on a basic ceph deployment on trusty-icehouse.""" diff --git a/ceph-proxy/tests/gate-basic-trusty-kilo b/ceph-proxy/tests/gate-basic-trusty-kilo index 0a787b22..47c3296f 100755 --- a/ceph-proxy/tests/gate-basic-trusty-kilo +++ b/ceph-proxy/tests/gate-basic-trusty-kilo @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python """Amulet tests on a basic ceph deployment on trusty-kilo.""" diff --git a/ceph-proxy/tests/gate-basic-trusty-liberty b/ceph-proxy/tests/gate-basic-trusty-liberty index f339371b..cdd020b6 100755 --- a/ceph-proxy/tests/gate-basic-trusty-liberty +++ b/ceph-proxy/tests/gate-basic-trusty-liberty @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python """Amulet tests on a basic ceph deployment on trusty-liberty.""" diff --git a/ceph-proxy/tests/gate-basic-trusty-mitaka b/ceph-proxy/tests/gate-basic-trusty-mitaka index 2eca19d6..6157b03c 100755 --- a/ceph-proxy/tests/gate-basic-trusty-mitaka +++ b/ceph-proxy/tests/gate-basic-trusty-mitaka @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python """Amulet tests on a basic ceph deployment on trusty-mitaka.""" diff --git a/ceph-proxy/tests/gate-basic-wily-liberty b/ceph-proxy/tests/gate-basic-wily-liberty index b0d8096b..8e91324b 100755 --- a/ceph-proxy/tests/gate-basic-wily-liberty +++ b/ceph-proxy/tests/gate-basic-wily-liberty @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python """Amulet tests on a basic ceph deployment on wily-liberty.""" diff --git a/ceph-proxy/tests/gate-basic-xenial-mitaka b/ceph-proxy/tests/gate-basic-xenial-mitaka index ae3d3350..603c8c05 100755 --- a/ceph-proxy/tests/gate-basic-xenial-mitaka +++ b/ceph-proxy/tests/gate-basic-xenial-mitaka @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python """Amulet tests on a basic ceph deployment on xenial-mitaka.""" diff --git a/ceph-proxy/tests/gate-basic-xenial-newton b/ceph-proxy/tests/gate-basic-xenial-newton index f87e375e..26e06f74 100755 --- a/ceph-proxy/tests/gate-basic-xenial-newton +++ b/ceph-proxy/tests/gate-basic-xenial-newton @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python """Amulet tests on a basic ceph deployment on xenial-newton.""" diff --git a/ceph-proxy/tests/gate-basic-xenial-ocata b/ceph-proxy/tests/gate-basic-xenial-ocata new file mode 100644 index 00000000..1c3a430f --- /dev/null +++ b/ceph-proxy/tests/gate-basic-xenial-ocata @@ -0,0 +1,11 @@ +#!/usr/bin/env python + +"""Amulet tests on a basic ceph deployment on xenial-ocata.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='xenial', + openstack='cloud:xenial-ocata', + source='cloud:xenial-updates/ocata') + deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-yakkety-newton b/ceph-proxy/tests/gate-basic-yakkety-newton old mode 100644 new mode 100755 index b5ee9de4..cb5db9d3 --- a/ceph-proxy/tests/gate-basic-yakkety-newton +++ b/ceph-proxy/tests/gate-basic-yakkety-newton @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python """Amulet tests on a basic ceph deployment on yakkety-newton.""" diff --git a/ceph-proxy/tests/gate-basic-zesty-ocata b/ceph-proxy/tests/gate-basic-zesty-ocata new file mode 100644 index 00000000..4d540576 --- /dev/null +++ b/ceph-proxy/tests/gate-basic-zesty-ocata @@ -0,0 +1,9 @@ +#!/usr/bin/env python + +"""Amulet tests on a basic ceph deployment on zesty-ocata.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='zesty') + deployment.run_tests() From e0c305e22b990386555745862fad19b9150f8a08 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 23 Nov 2016 15:19:00 -0600 Subject: [PATCH 1239/2699] Update Amulet defs, series metadata and c-h sync - Sync charm helpers if applicable. - Fix test executable hashbags for virtualenv prep. - Add Yakkety-Newton Amulet test definitions. - Prep Xenial-Ocata Amulet test definitions (not yet enabled). - Prep Zesty-Ocata Amulet test definitions (not yet enabled). - Add Zesty charm series metadata. - Remove Precise charm series metadata if present. - Remove Precise Amulet test definitions if present. Change-Id: Ib402df72adf294dac1c565b96c1432bf5e65bf80 --- .../contrib/openstack/amulet/deployment.py | 35 +++++-------------- .../contrib/openstack/amulet/utils.py | 11 ++++++ .../charmhelpers/contrib/openstack/context.py | 6 +++- .../charmhelpers/contrib/openstack/neutron.py | 2 ++ .../templates/openstack_https_frontend | 3 ++ .../templates/openstack_https_frontend.conf | 3 ++ .../section-keystone-authtoken-mitaka | 5 +++ .../charmhelpers/contrib/openstack/utils.py | 22 ++++++++++-- .../hooks/charmhelpers/core/hookenv.py | 2 ++ ceph-radosgw/hooks/charmhelpers/core/host.py | 17 +++++++++ .../core/kernel_factory/ubuntu.py | 2 +- .../hooks/charmhelpers/fetch/ubuntu.py | 8 +++++ ceph-radosgw/metadata.yaml | 2 +- ceph-radosgw/tests/basic_deployment.py | 2 +- .../charmhelpers/contrib/amulet/utils.py | 2 +- .../contrib/openstack/amulet/deployment.py | 35 +++++-------------- .../contrib/openstack/amulet/utils.py | 11 ++++++ ceph-radosgw/tests/gate-basic-xenial-ocata | 25 +++++++++++++ ceph-radosgw/tests/gate-basic-yakkety-newton | 0 ceph-radosgw/tests/gate-basic-zesty-ocata | 23 ++++++++++++ 20 files changed, 157 insertions(+), 59 deletions(-) create mode 100644 ceph-radosgw/tests/gate-basic-xenial-ocata mode change 100644 => 100755 ceph-radosgw/tests/gate-basic-yakkety-newton create mode 100644 ceph-radosgw/tests/gate-basic-zesty-ocata diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 9e0b07fb..5c1ce457 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -69,9 +69,9 @@ def _determine_branch_locations(self, other_services): # Charms outside the ~openstack-charmers base_charms = { - 'mysql': ['precise', 'trusty'], - 'mongodb': ['precise', 'trusty'], - 'nrpe': ['precise', 'trusty', 'wily', 'xenial'], + 'mysql': ['trusty'], + 'mongodb': ['trusty'], + 'nrpe': ['trusty', 'xenial'], } for svc in other_services: @@ -260,31 +260,20 @@ def _get_openstack_release(self): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.precise_essex, self.precise_folsom, self.precise_grizzly, - self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.utopic_juno, - self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, - self.wily_liberty, self.trusty_mitaka, - self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton) = range(16) + (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, + self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, + self.yakkety_newton, self.xenial_ocata, self.zesty_ocata) = range(9) releases = { - ('precise', None): self.precise_essex, - ('precise', 'cloud:precise-folsom'): self.precise_folsom, - ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, - ('precise', 'cloud:precise-havana'): self.precise_havana, - ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo, - ('wily', None): self.wily_liberty, ('xenial', None): self.xenial_mitaka, ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, ('yakkety', None): self.yakkety_newton, + ('zesty', None): self.zesty_ocata, } return releases[(self.series, self.openstack)] @@ -294,16 +283,10 @@ def _get_openstack_release_string(self): Return a string representing the openstack release. """ releases = OrderedDict([ - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), ('xenial', 'mitaka'), ('yakkety', 'newton'), + ('zesty', 'ocata'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index e4546c8c..6a0ba837 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -28,6 +28,7 @@ from keystoneclient.auth.identity import v3 as keystone_id_v3 from keystoneclient import session as keystone_session from keystoneclient.v3 import client as keystone_client_v3 +from novaclient import exceptions import novaclient.client as nova_client import pika @@ -377,6 +378,16 @@ def authenticate_swift_user(self, keystone, user, password, tenant): tenant_name=tenant, auth_version='2.0') + def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", + ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): + """Create the specified flavor.""" + try: + nova.flavors.find(name=name) + except (exceptions.NotFound, exceptions.NoUniqueMatch): + self.log.debug('Creating flavor ({})'.format(name)) + nova.flavors.create(name, ram, vcpus, disk, flavorid, + ephemeral, swap, rxtx_factor, is_public) + def create_cirros_image(self, glance, image_name): """Download the latest cirros image and upload it to glance, validate and return a resource pointer. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index b601a226..d5b3a33b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -344,6 +344,10 @@ def __call__(self): 'auth_protocol': auth_protocol, 'api_version': api_version}) + if float(api_version) > 2: + ctxt.update({'admin_domain_name': + rdata.get('service_domain')}) + if self.context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs @@ -644,7 +648,7 @@ class ApacheSSLContext(OSContextGenerator): service_namespace = None def enable_modules(self): - cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers'] check_call(cmd) def configure_cert(self, cn=None): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index 08c86fa7..a8f1ed72 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -32,6 +32,7 @@ def headers_package(): kver = check_output(['uname', '-r']).decode('UTF-8').strip() return 'linux-headers-%s' % kver + QUANTUM_CONF_DIR = '/etc/quantum' @@ -91,6 +92,7 @@ def quantum_plugins(): } } + NEUTRON_CONF_DIR = '/etc/neutron' diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend index 6a923804..f614b3fa 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -9,10 +9,13 @@ Listen {{ ext_port }} SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 + SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} ProxyPass / http://localhost:{{ int }}/ ProxyPassReverse / http://localhost:{{ int }}/ ProxyPreserveHost on + RequestHeader set X-Forwarded-Proto "https" {% endfor -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf index 6a923804..f614b3fa 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -9,10 +9,13 @@ Listen {{ ext_port }} SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 + SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} ProxyPass / http://localhost:{{ int }}/ ProxyPassReverse / http://localhost:{{ int }}/ ProxyPreserveHost on + RequestHeader set X-Forwarded-Proto "https" {% endfor -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka index dd6f3641..7c6f0c35 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka @@ -3,8 +3,13 @@ auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} auth_type = password +{% if api_version == "3" -%} +project_domain_name = {{ admin_domain_name }} +user_domain_name = {{ admin_domain_name }} +{% else -%} project_domain_name = default user_domain_name = default +{% endif -%} project_name = {{ admin_tenant_name }} username = {{ admin_user }} password = {{ admin_password }} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 8c89c3a3..6d544e75 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -109,7 +109,7 @@ ('wily', 'liberty'), ('xenial', 'mitaka'), ('yakkety', 'newton'), - ('zebra', 'ocata'), # TODO: upload with real Z name + ('zesty', 'ocata'), ]) @@ -152,6 +152,8 @@ ['2.5.0', '2.6.0', '2.7.0']), ('newton', ['2.8.0', '2.9.0', '2.10.0']), + ('ocata', + ['2.11.0']), ]) # >= Liberty version->codename mapping @@ -410,14 +412,26 @@ def get_os_version_package(pkg, fatal=True): os_rel = None -def os_release(package, base='essex'): +def reset_os_release(): + '''Unset the cached os_release version''' + global os_rel + os_rel = None + + +def os_release(package, base='essex', reset_cache=False): ''' Returns OpenStack release codename from a cached global. + + If reset_cache then unset the cached os_release version and return the + freshly determined version. + If the codename can not be determined from either an installed package or the installation source, the earliest release supported by the charm should be returned. ''' global os_rel + if reset_cache: + reset_os_release() if os_rel: return os_rel os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or @@ -535,6 +549,9 @@ def configure_installation_source(rel): 'newton': 'xenial-updates/newton', 'newton/updates': 'xenial-updates/newton', 'newton/proposed': 'xenial-proposed/newton', + 'zesty': 'zesty-updates/ocata', + 'zesty/updates': 'xenial-updates/ocata', + 'zesty/proposed': 'xenial-proposed/ocata', } try: @@ -668,6 +685,7 @@ def clean_storage(block_device): else: zap_disk(block_device) + is_ip = ip.is_ip ns_query = ip.ns_query get_host_ip = ip.get_host_ip diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 996e81cc..94fc996c 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -332,6 +332,8 @@ def config(scope=None): config_cmd_line = ['config-get'] if scope is not None: config_cmd_line.append(scope) + else: + config_cmd_line.append('--all') config_cmd_line.append('--format=json') try: config_data = json.loads( diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 0f1b2f35..04cadb3a 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -732,3 +732,20 @@ def get_total_ram(): assert unit == 'kB', 'Unknown unit' return int(value) * 1024 # Classic, not KiB. raise NotImplementedError() + + +UPSTART_CONTAINER_TYPE = '/run/container_type' + + +def is_container(): + """Determine whether unit is running in a container + + @return: boolean indicating if unit is in a container + """ + if init_is_systemd(): + # Detect using systemd-detect-virt + return subprocess.call(['systemd-detect-virt', + '--container']) == 0 + else: + # Detect using upstart container file marker + return os.path.exists(UPSTART_CONTAINER_TYPE) diff --git a/ceph-radosgw/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/kernel_factory/ubuntu.py index 21559642..3de372fd 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/kernel_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/kernel_factory/ubuntu.py @@ -5,7 +5,7 @@ def persistent_modprobe(module): """Load a kernel module and configure for auto-load on reboot.""" with open('/etc/modules', 'r+') as modules: if module not in modules.read(): - modules.write(module) + modules.write(module + "\n") def update_initramfs(version='all'): diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index fce496b2..39b9b801 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -105,6 +105,14 @@ 'newton/proposed': 'xenial-proposed/newton', 'xenial-newton/proposed': 'xenial-proposed/newton', 'xenial-proposed/newton': 'xenial-proposed/newton', + # Ocata + 'ocata': 'xenial-updates/ocata', + 'xenial-ocata': 'xenial-updates/ocata', + 'xenial-ocata/updates': 'xenial-updates/ocata', + 'xenial-updates/ocata': 'xenial-updates/ocata', + 'ocata/proposed': 'xenial-proposed/ocata', + 'xenial-ocata/proposed': 'xenial-proposed/ocata', + 'xenial-ocata/newton': 'xenial-proposed/ocata', } APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 06ed8e62..34f29f41 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -14,8 +14,8 @@ tags: - misc series: - xenial + - zesty - trusty - - precise - yakkety extra-bindings: public: diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index d202ce85..e3ff55d7 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -250,7 +250,7 @@ def test_102_services(self): 'cinder-volume'], } - if self._get_openstack_release() < self.vivid_kilo: + if self._get_openstack_release() < self.xenial_mitaka: # For upstart systems only. Ceph services under systemd # are checked by process name instead. ceph_services = [ diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index a39ed4c8..8e13ab14 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -546,7 +546,7 @@ def get_process_id_list(self, sentry_unit, process_name, raise if it is present. :returns: List of process IDs """ - cmd = 'pidof -x {}'.format(process_name) + cmd = 'pidof -x "{}"'.format(process_name) if not expect_success: cmd += " || exit 0 && exit 1" output, code = sentry_unit.run(cmd) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 9e0b07fb..5c1ce457 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -69,9 +69,9 @@ def _determine_branch_locations(self, other_services): # Charms outside the ~openstack-charmers base_charms = { - 'mysql': ['precise', 'trusty'], - 'mongodb': ['precise', 'trusty'], - 'nrpe': ['precise', 'trusty', 'wily', 'xenial'], + 'mysql': ['trusty'], + 'mongodb': ['trusty'], + 'nrpe': ['trusty', 'xenial'], } for svc in other_services: @@ -260,31 +260,20 @@ def _get_openstack_release(self): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.precise_essex, self.precise_folsom, self.precise_grizzly, - self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.utopic_juno, - self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, - self.wily_liberty, self.trusty_mitaka, - self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton) = range(16) + (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, + self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, + self.yakkety_newton, self.xenial_ocata, self.zesty_ocata) = range(9) releases = { - ('precise', None): self.precise_essex, - ('precise', 'cloud:precise-folsom'): self.precise_folsom, - ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, - ('precise', 'cloud:precise-havana'): self.precise_havana, - ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo, - ('wily', None): self.wily_liberty, ('xenial', None): self.xenial_mitaka, ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, ('yakkety', None): self.yakkety_newton, + ('zesty', None): self.zesty_ocata, } return releases[(self.series, self.openstack)] @@ -294,16 +283,10 @@ def _get_openstack_release_string(self): Return a string representing the openstack release. """ releases = OrderedDict([ - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), ('xenial', 'mitaka'), ('yakkety', 'newton'), + ('zesty', 'ocata'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index e4546c8c..6a0ba837 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -28,6 +28,7 @@ from keystoneclient.auth.identity import v3 as keystone_id_v3 from keystoneclient import session as keystone_session from keystoneclient.v3 import client as keystone_client_v3 +from novaclient import exceptions import novaclient.client as nova_client import pika @@ -377,6 +378,16 @@ def authenticate_swift_user(self, keystone, user, password, tenant): tenant_name=tenant, auth_version='2.0') + def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", + ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): + """Create the specified flavor.""" + try: + nova.flavors.find(name=name) + except (exceptions.NotFound, exceptions.NoUniqueMatch): + self.log.debug('Creating flavor ({})'.format(name)) + nova.flavors.create(name, ram, vcpus, disk, flavorid, + ephemeral, swap, rxtx_factor, is_public) + def create_cirros_image(self, glance, image_name): """Download the latest cirros image and upload it to glance, validate and return a resource pointer. diff --git a/ceph-radosgw/tests/gate-basic-xenial-ocata b/ceph-radosgw/tests/gate-basic-xenial-ocata new file mode 100644 index 00000000..9007db12 --- /dev/null +++ b/ceph-radosgw/tests/gate-basic-xenial-ocata @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph-radosgw deployment on xenial-ocata.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='xenial', + openstack='cloud:xenial-ocata', + source='cloud:xenial-updates/ocata') + deployment.run_tests() diff --git a/ceph-radosgw/tests/gate-basic-yakkety-newton b/ceph-radosgw/tests/gate-basic-yakkety-newton old mode 100644 new mode 100755 diff --git a/ceph-radosgw/tests/gate-basic-zesty-ocata b/ceph-radosgw/tests/gate-basic-zesty-ocata new file mode 100644 index 00000000..fd101f68 --- /dev/null +++ b/ceph-radosgw/tests/gate-basic-zesty-ocata @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph-radosgw deployment on zesty-ocata.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='zesty') + deployment.run_tests() From a26a7c5339fb66d3fb51b58870f57e8ca63af35b Mon Sep 17 00:00:00 2001 From: chris Date: Mon, 28 Nov 2016 16:50:23 -0500 Subject: [PATCH 1240/2699] Update to install python-udev This is required as it is not a dependency of charms.ceph Change-Id: I9dc06c0e3b5fec2c6ed87f61798ed71db03c40fe Closes-Bug: 1645481 --- ceph-mon/hooks/install | 3 ++- ceph-mon/hooks/install_deps | 18 ++++++++++++++++++ ceph-mon/hooks/upgrade-charm | 1 + 3 files changed, 21 insertions(+), 1 deletion(-) create mode 100755 ceph-mon/hooks/install_deps diff --git a/ceph-mon/hooks/install b/ceph-mon/hooks/install index 29ff6894..0bdbf8d5 100755 --- a/ceph-mon/hooks/install +++ b/ceph-mon/hooks/install @@ -2,7 +2,7 @@ # Wrapper to deal with newer Ubuntu versions that don't have py2 installed # by default. -declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml' 'dnspython') +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') check_and_install() { pkg="${1}-${2}" @@ -17,4 +17,5 @@ for dep in ${DEPS[@]}; do check_and_install ${PYTHON} ${dep} done +./hooks/install_deps exec ./hooks/install.real diff --git a/ceph-mon/hooks/install_deps b/ceph-mon/hooks/install_deps new file mode 100755 index 00000000..da4ba5d8 --- /dev/null +++ b/ceph-mon/hooks/install_deps @@ -0,0 +1,18 @@ +#!/bin/bash +# Wrapper to ensure that python dependencies are installed before we get into +# the python part of the hook execution + +declare -a DEPS=('dnspython' 'pyudev') + +check_and_install() { + pkg="${1}-${2}" + if ! dpkg -s ${pkg} 2>&1 > /dev/null; then + apt-get -y install ${pkg} + fi +} + +PYTHON="python" + +for dep in ${DEPS[@]}; do + check_and_install ${PYTHON} ${dep} +done diff --git a/ceph-mon/hooks/upgrade-charm b/ceph-mon/hooks/upgrade-charm index 440473d7..6f3d75b4 100755 --- a/ceph-mon/hooks/upgrade-charm +++ b/ceph-mon/hooks/upgrade-charm @@ -3,4 +3,5 @@ # after we upgrade the charm with newer libraries rm -rf **/*.pyc +./hooks/install_deps exec ./hooks/upgrade-charm.real From fa3487dac282c0aa6781fad9c40dcaf4b42e8f2d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 9 Nov 2016 08:29:37 -0500 Subject: [PATCH 1241/2699] Add action to list unmounted disks This action is fairly simple in that it returns a list of unmounted disks This also includes a git-sync to pull in charms.ceph changes. Change-Id: I0daa514958799cf5899375335e8e9e684df27704 Closes-Bug: 1645481 --- ceph-osd/actions.yaml | 3 +- ceph-osd/actions/list-disks | 1 + ceph-osd/actions/list_disks.py | 35 + ceph-osd/hooks/install | 3 +- ceph-osd/hooks/install_deps | 18 + ceph-osd/hooks/upgrade-charm | 2 +- ceph-osd/lib/ceph/__init__.py | 92 +- ceph-osd/lib/ceph/ceph_helpers.py | 1381 +++++++++++++++++++++++++++++ ceph-osd/requirements.txt | 1 + 9 files changed, 1523 insertions(+), 13 deletions(-) create mode 120000 ceph-osd/actions/list-disks create mode 100755 ceph-osd/actions/list_disks.py create mode 100755 ceph-osd/hooks/install_deps create mode 100644 ceph-osd/lib/ceph/ceph_helpers.py diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 65ff76ab..945b065e 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -35,4 +35,5 @@ replace-osd: description: The replacement device to use. Example /dev/sdb. required: [osd-number, replacement-device] additionalProperties: false - +list-disks: + description: List the unmounted disk on the specified unit \ No newline at end of file diff --git a/ceph-osd/actions/list-disks b/ceph-osd/actions/list-disks new file mode 120000 index 00000000..ebe3b65f --- /dev/null +++ b/ceph-osd/actions/list-disks @@ -0,0 +1 @@ +list_disks.py \ No newline at end of file diff --git a/ceph-osd/actions/list_disks.py b/ceph-osd/actions/list_disks.py new file mode 100755 index 00000000..f92a3b69 --- /dev/null +++ b/ceph-osd/actions/list_disks.py @@ -0,0 +1,35 @@ +#!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +List unmounted devices. + +This script will get all block devices known by udev and check if they +are mounted so that we can give unmounted devices to the administrator. +""" + +import sys + +sys.path.append('hooks/') +sys.path.append('lib/') + +from charmhelpers.core.hookenv import action_set + +from ceph import unmounted_disks + +if __name__ == '__main__': + action_set({ + 'disks': unmounted_disks()}) diff --git a/ceph-osd/hooks/install b/ceph-osd/hooks/install index 29ff6894..0bdbf8d5 100755 --- a/ceph-osd/hooks/install +++ b/ceph-osd/hooks/install @@ -2,7 +2,7 @@ # Wrapper to deal with newer Ubuntu versions that don't have py2 installed # by default. -declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml' 'dnspython') +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') check_and_install() { pkg="${1}-${2}" @@ -17,4 +17,5 @@ for dep in ${DEPS[@]}; do check_and_install ${PYTHON} ${dep} done +./hooks/install_deps exec ./hooks/install.real diff --git a/ceph-osd/hooks/install_deps b/ceph-osd/hooks/install_deps new file mode 100755 index 00000000..da4ba5d8 --- /dev/null +++ b/ceph-osd/hooks/install_deps @@ -0,0 +1,18 @@ +#!/bin/bash +# Wrapper to ensure that python dependencies are installed before we get into +# the python part of the hook execution + +declare -a DEPS=('dnspython' 'pyudev') + +check_and_install() { + pkg="${1}-${2}" + if ! dpkg -s ${pkg} 2>&1 > /dev/null; then + apt-get -y install ${pkg} + fi +} + +PYTHON="python" + +for dep in ${DEPS[@]}; do + check_and_install ${PYTHON} ${dep} +done diff --git a/ceph-osd/hooks/upgrade-charm b/ceph-osd/hooks/upgrade-charm index 440473d7..43cae864 100755 --- a/ceph-osd/hooks/upgrade-charm +++ b/ceph-osd/hooks/upgrade-charm @@ -2,5 +2,5 @@ # Wrapper to ensure that old python bytecode isn't hanging around # after we upgrade the charm with newer libraries rm -rf **/*.pyc - +./hooks/install_deps exec ./hooks/upgrade-charm.real diff --git a/ceph-osd/lib/ceph/__init__.py b/ceph-osd/lib/ceph/__init__.py index 4afe5eb9..db3772b5 100644 --- a/ceph-osd/lib/ceph/__init__.py +++ b/ceph-osd/lib/ceph/__init__.py @@ -23,6 +23,7 @@ import sys import errno import shutil +import pyudev from charmhelpers.core import hookenv from charmhelpers.core.host import ( @@ -50,13 +51,15 @@ is_block_device, zap_disk, is_device_mounted) +from charmhelpers.contrib.openstack.utils import ( + get_os_codename_install_source) LEADER = 'leader' PEON = 'peon' QUORUM = [LEADER, PEON] PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', - 'radosgw', 'xfsprogs'] + 'radosgw', 'xfsprogs', 'python-pyudev'] LinkSpeed = { "BASE_10": 10, @@ -100,6 +103,23 @@ } +def unmounted_disks(): + """List of unmounted block devices on the current host.""" + disks = [] + context = pyudev.Context() + for device in context.list_devices(DEVTYPE='disk'): + if device['SUBSYSTEM'] == 'block': + matched = False + for block_type in [u'dm', u'loop', u'ram', u'nbd']: + if block_type in device.device_node: + matched = True + if matched: + continue + disks.append(device.device_node) + log("Found disks: {}".format(disks)) + return [disk for disk in disks if not is_device_mounted(disk)] + + def save_sysctls(sysctl_dict, save_location): """ Persist the sysctls to the hard drive. @@ -949,7 +969,7 @@ def get_mds_bootstrap_key(): _default_caps = collections.OrderedDict([ - ('mon', ['allow rw']), + ('mon', ['allow r']), ('osd', ['allow rwx']), ]) @@ -1344,7 +1364,7 @@ def roll_monitor_cluster(new_version, upgrade_key): version=new_version) else: # Check if the previous node has finished - status_set('blocked', + status_set('waiting', 'Waiting on {} to finish upgrading'.format( mon_sorted_list[position - 1])) wait_on_previous_node(upgrade_key=upgrade_key, @@ -1361,11 +1381,10 @@ def roll_monitor_cluster(new_version, upgrade_key): status_set('blocked', 'failed to upgrade monitor') -def upgrade_monitor(): +def upgrade_monitor(new_version): current_version = get_version() status_set("maintenance", "Upgrading monitor") log("Current ceph version is {}".format(current_version)) - new_version = config('release-version') log("Upgrading to: {}".format(new_version)) try: @@ -1393,7 +1412,6 @@ def upgrade_monitor(): service_start('ceph-mon@{}'.format(mon_id)) else: service_start('ceph-mon-all') - status_set("active", "") except subprocess.CalledProcessError as err: log("Stopping ceph and upgrading packages failed " "with message: {}".format(err.message)) @@ -1415,9 +1433,9 @@ def lock_and_roll(upgrade_key, service, my_name, version): # This should be quick if service == 'osd': - upgrade_osd() + upgrade_osd(version) elif service == 'mon': - upgrade_monitor() + upgrade_monitor(version) else: log("Unknown service {}. Unable to upgrade".format(service), level=ERROR) @@ -1541,11 +1559,10 @@ def roll_osd_cluster(new_version, upgrade_key): status_set('blocked', 'failed to upgrade osd') -def upgrade_osd(): +def upgrade_osd(new_version): current_version = get_version() status_set("maintenance", "Upgrading osd") log("Current ceph version is {}".format(current_version)) - new_version = config('release-version') log("Upgrading to: {}".format(new_version)) try: @@ -1578,3 +1595,58 @@ def upgrade_osd(): "with message: {}".format(err.message)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) + + +def list_pools(service): + """ + This will list the current pools that Ceph has + + :param service: String service id to run under + :return: list. Returns a list of the ceph pools. Raises CalledProcessError + if the subprocess fails to run. + """ + try: + pool_list = [] + pools = subprocess.check_output(['rados', '--id', service, 'lspools']) + for pool in pools.splitlines(): + pool_list.append(pool) + return pool_list + except subprocess.CalledProcessError as err: + log("rados lspools failed with error: {}".format(err.output)) + raise + + +# A dict of valid ceph upgrade paths. Mapping is old -> new +UPGRADE_PATHS = { + 'firefly': 'hammer', + 'hammer': 'jewel', +} + +# Map UCA codenames to ceph codenames +UCA_CODENAME_MAP = { + 'icehouse': 'firefly', + 'juno': 'firefly', + 'kilo': 'hammer', + 'liberty': 'hammer', + 'mitaka': 'jewel', +} + + +def pretty_print_upgrade_paths(): + '''Pretty print supported upgrade paths for ceph''' + lines = [] + for key, value in UPGRADE_PATHS.iteritems(): + lines.append("{} -> {}".format(key, value)) + return lines + + +def resolve_ceph_version(source): + ''' + Resolves a version of ceph based on source configuration + based on Ubuntu Cloud Archive pockets. + + @param: source: source configuration option of charm + @returns: ceph release codename or None if not resolvable + ''' + os_release = get_os_codename_install_source(source) + return UCA_CODENAME_MAP.get(os_release) diff --git a/ceph-osd/lib/ceph/ceph_helpers.py b/ceph-osd/lib/ceph/ceph_helpers.py new file mode 100644 index 00000000..124dbf87 --- /dev/null +++ b/ceph-osd/lib/ceph/ceph_helpers.py @@ -0,0 +1,1381 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import errno +import hashlib +import math +import six + +import os +import shutil +import json +import time +import uuid + +from subprocess import (check_call, check_output, CalledProcessError, ) +from charmhelpers.core.hookenv import (config, + local_unit, + relation_get, + relation_ids, + relation_set, + related_units, + log, + DEBUG, + INFO, + WARNING, + ERROR, ) +from charmhelpers.core.host import (mount, + mounts, + service_start, + service_stop, + service_running, + umount, ) +from charmhelpers.fetch import (apt_install, ) + +from charmhelpers.core.kernel import modprobe +from charmhelpers.contrib.openstack.utils import config_flags_parser + +KEYRING = '/etc/ceph/ceph.client.{}.keyring' +KEYFILE = '/etc/ceph/ceph.client.{}.key' + +CEPH_CONF = """[global] +auth supported = {auth} +keyring = {keyring} +mon host = {mon_hosts} +log to syslog = {use_syslog} +err to syslog = {use_syslog} +clog to syslog = {use_syslog} +""" + +# The number of placement groups per OSD to target for placement group +# calculations. This number is chosen as 100 due to the ceph PG Calc +# documentation recommending to choose 100 for clusters which are not +# expected to increase in the foreseeable future. Since the majority of the +# calculations are done on deployment, target the case of non-expanding +# clusters as the default. +DEFAULT_PGS_PER_OSD_TARGET = 100 +DEFAULT_POOL_WEIGHT = 10.0 +LEGACY_PG_COUNT = 200 + + +def validator(value, valid_type, valid_range=None): + """ + Used to validate these: http://docs.ceph.com/docs/master/rados/operations/ + pools/#set-pool-values + Example input: + validator(value=1, + valid_type=int, + valid_range=[0, 2]) + This says I'm testing value=1. It must be an int inclusive in [0,2] + + :param value: The value to validate + :param valid_type: The type that value should be. + :param valid_range: A range of values that value can assume. + :return: + """ + assert isinstance(value, valid_type), "{} is not a {}".format(value, + valid_type) + if valid_range is not None: + assert isinstance(valid_range, list), \ + "valid_range must be a list, was given {}".format(valid_range) + # If we're dealing with strings + if valid_type is six.string_types: + assert value in valid_range, \ + "{} is not in the list {}".format(value, valid_range) + # Integer, float should have a min and max + else: + if len(valid_range) != 2: + raise ValueError("Invalid valid_range list of {} for {}. " + "List must be [min,max]".format(valid_range, + value)) + assert value >= valid_range[0], \ + "{} is less than minimum allowed value of {}".format( + value, valid_range[0]) + assert value <= valid_range[1], \ + "{} is greater than maximum allowed value of {}".format( + value, valid_range[1]) + + +class PoolCreationError(Exception): + """ + A custom error to inform the caller that a pool creation failed. Provides + an error message + """ + + def __init__(self, message): + super(PoolCreationError, self).__init__(message) + + +class Pool(object): + """ + An object oriented approach to Ceph pool creation. This base class is + inherited by ReplicatedPool and ErasurePool. + Do not call create() on this base class as it will not do anything. + Instantiate a child class and call create(). + """ + + def __init__(self, service, name): + self.service = service + self.name = name + + # Create the pool if it doesn't exist already + # To be implemented by subclasses + def create(self): + pass + + def add_cache_tier(self, cache_pool, mode): + """ + Adds a new cache tier to an existing pool. + :param cache_pool: six.string_types. The cache tier pool name to add. + :param mode: six.string_types. The caching mode to use for this pool. + valid range = ["readonly", "writeback"] + :return: None + """ + # Check the input types and values + validator(value=cache_pool, valid_type=six.string_types) + validator(value=mode, + valid_type=six.string_types, + valid_range=["readonly", "writeback"]) + + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', + self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', + cache_pool, mode]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', + self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', + cache_pool, 'hit_set_type', 'bloom']) + + def remove_cache_tier(self, cache_pool): + """ + Removes a cache tier from Ceph. Flushes all dirty objects from + writeback pools and waits for that to complete. + :param cache_pool: six.string_types. The cache tier pool name to + remove. + :return: None + """ + # read-only is easy, writeback is much harder + mode = get_cache_mode(self.service, cache_pool) + version = ceph_version() + if mode == 'readonly': + check_call(['ceph', '--id', self.service, 'osd', 'tier', + 'cache-mode', cache_pool, 'none']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', + self.name, cache_pool]) + + elif mode == 'writeback': + pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', + 'cache-mode', cache_pool, 'forward'] + if version >= '10.1': + # Jewel added a mandatory flag + pool_forward_cmd.append('--yes-i-really-mean-it') + + check_call(pool_forward_cmd) + # Flush the cache and wait for it to return + check_call(['rados', '--id', self.service, '-p', cache_pool, + 'cache-flush-evict-all']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', + 'remove-overlay', self.name]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', + self.name, cache_pool]) + + def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): + """Return the number of placement groups to use when creating the pool. + + Returns the number of placement groups which should be specified when + creating the pool. This is based upon the calculation guidelines + provided by the Ceph Placement Group Calculator (located online at + http://ceph.com/pgcalc/). + + The number of placement groups are calculated using the following: + + (Target PGs per OSD) * (OSD #) * (%Data) + ---------------------------------------- + (Pool size) + + Per the upstream guidelines, the OSD # should really be considered + based on the number of OSDs which are eligible to be selected by the + pool. Since the pool creation doesn't specify any of CRUSH set rules, + the default rule will be dependent upon the type of pool being + created (replicated or erasure). + + This code makes no attempt to determine the number of OSDs which can be + selected for the specific rule, rather it is left to the user to tune + in the form of 'expected-osd-count' config option. + + :param pool_size: int. pool_size is either the number of replicas for + replicated pools or the K+M sum for erasure coded pools + :param percent_data: float. the percentage of data that is expected to + be contained in the pool for the specific OSD set. Default value + is to assume 10% of the data is for this pool, which is a + relatively low % of the data but allows for the pg_num to be + increased. NOTE: the default is primarily to handle the scenario + where related charms requiring pools has not been upgraded to + include an update to indicate their relative usage of the pools. + :return: int. The number of pgs to use. + """ + + # Note: This calculation follows the approach that is provided + # by the Ceph PG Calculator located at http://ceph.com/pgcalc/. + validator(value=pool_size, valid_type=int) + + # Ensure that percent data is set to something - even with a default + # it can be set to None, which would wreak havoc below. + if percent_data is None: + percent_data = DEFAULT_POOL_WEIGHT + + # If the expected-osd-count is specified, then use the max between + # the expected-osd-count and the actual osd_count + osd_list = get_osds(self.service) + expected = config('expected-osd-count') or 0 + + if osd_list: + osd_count = max(expected, len(osd_list)) + + # Log a message to provide some insight if the calculations claim + # to be off because someone is setting the expected count and + # there are more OSDs in reality. Try to make a proper guess + # based upon the cluster itself. + if expected and osd_count != expected: + log("Found more OSDs than provided expected count. " + "Using the actual count instead", INFO) + elif expected: + # Use the expected-osd-count in older ceph versions to allow for + # a more accurate pg calculations + osd_count = expected + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + return LEGACY_PG_COUNT + + percent_data /= 100.0 + target_pgs_per_osd = config( + 'pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET + num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size + + # The CRUSH algorithm has a slight optimization for placement groups + # with powers of 2 so find the nearest power of 2. If the nearest + # power of 2 is more than 25% below the original value, the next + # highest value is used. To do this, find the nearest power of 2 such + # that 2^n <= num_pg, check to see if its within the 25% tolerance. + exponent = math.floor(math.log(num_pg, 2)) + nearest = 2**exponent + if (num_pg - nearest) > (num_pg * 0.25): + # Choose the next highest power of 2 since the nearest is more + # than 25% below the original value. + return int(nearest * 2) + else: + return int(nearest) + + +class ReplicatedPool(Pool): + def __init__(self, + service, + name, + pg_num=None, + replicas=2, + percent_data=10.0): + super(ReplicatedPool, self).__init__(service=service, name=name) + self.replicas = replicas + if pg_num: + # Since the number of placement groups were specified, ensure + # that there aren't too many created. + max_pgs = self.get_pgs(self.replicas, 100.0) + self.pg_num = min(pg_num, max_pgs) + else: + self.pg_num = self.get_pgs(self.replicas, percent_data) + + def create(self): + if not pool_exists(self.service, self.name): + # Create it + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num)] + try: + check_call(cmd) + # Set the pool replica size + update_pool(client=self.service, + pool=self.name, + settings={'size': str(self.replicas)}) + except CalledProcessError: + raise + + +# Default jerasure erasure coded pool +class ErasurePool(Pool): + def __init__(self, + service, + name, + erasure_code_profile="default", + percent_data=10.0): + super(ErasurePool, self).__init__(service=service, name=name) + self.erasure_code_profile = erasure_code_profile + self.percent_data = percent_data + + def create(self): + if not pool_exists(self.service, self.name): + # Try to find the erasure profile information in order to properly + # size the number of placement groups. The size of an erasure + # coded placement group is calculated as k+m. + erasure_profile = get_erasure_profile(self.service, + self.erasure_code_profile) + + # Check for errors + if erasure_profile is None: + msg = ("Failed to discover erasure profile named " + "{}".format(self.erasure_code_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + if 'k' not in erasure_profile or 'm' not in erasure_profile: + # Error + msg = ("Unable to find k (data chunks) or m (coding chunks) " + "in erasure profile {}".format(erasure_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + + k = int(erasure_profile['k']) + m = int(erasure_profile['m']) + pgs = self.get_pgs(k + m, self.percent_data) + # Create it + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), 'erasure', + self.erasure_code_profile] + try: + check_call(cmd) + except CalledProcessError: + raise + + """Get an existing erasure code profile if it already exists. + Returns json formatted output""" + + +def get_mon_map(service): + """ + Returns the current monitor map. + :param service: six.string_types. The Ceph user name to run the command + under + :return: json string. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + mon_status = check_output(['ceph', '--id', service, 'mon_status', + '--format=json']) + try: + return json.loads(mon_status) + except ValueError as v: + log("Unable to parse mon_status json: {}. Error: {}".format( + mon_status, v.message)) + raise + except CalledProcessError as e: + log("mon_status command failed with message: {}".format(e.message)) + raise + + +def hash_monitor_names(service): + """ + Uses the get_mon_map() function to get information about the monitor + cluster. + Hash the name of each monitor. Return a sorted list of monitor hashes + in an ascending order. + :param service: six.string_types. The Ceph user name to run the command + under + :rtype : dict. json dict of monitor name, ip address and rank + example: { + 'name': 'ip-172-31-13-165', + 'rank': 0, + 'addr': '172.31.13.165:6789/0'} + """ + try: + hash_list = [] + monitor_list = get_mon_map(service=service) + if monitor_list['monmap']['mons']: + for mon in monitor_list['monmap']['mons']: + hash_list.append(hashlib.sha224(mon['name'].encode( + 'utf-8')).hexdigest()) + return sorted(hash_list) + else: + return None + except (ValueError, CalledProcessError): + raise + + +def monitor_key_delete(service, key): + """ + Delete a key and value pair from the monitor cluster + :param service: six.string_types. The Ceph user name to run the command + under + :param key: six.string_types. The key to delete. + """ + try: + check_output(['ceph', '--id', service, 'config-key', 'del', str(key)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format(e.output)) + raise + + +def monitor_key_set(service, key, value): + """ + Sets a key value pair on the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command + under + :param key: six.string_types. The key to set. + :param value: The value to set. This will be converted to a string + before setting + """ + try: + check_output(['ceph', '--id', service, 'config-key', 'put', str(key), + str(value)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format(e.output)) + raise + + +def monitor_key_get(service, key): + """ + Gets the value of an existing key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command + under + :param key: six.string_types. The key to search for. + :return: Returns the value of that key or None if not found. + """ + try: + output = check_output(['ceph', '--id', service, 'config-key', 'get', + str(key)]) + return output + except CalledProcessError as e: + log("Monitor config-key get failed with message: {}".format(e.output)) + return None + + +def monitor_key_exists(service, key): + """ + Searches for the existence of a key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command + under + :param key: six.string_types. The key to search for + :return: Returns True if the key exists, False if not and raises an + exception if an unknown error occurs. :raise: CalledProcessError if + an unknown error occurs + """ + try: + check_call(['ceph', '--id', service, 'config-key', 'exists', str(key)]) + # I can return true here regardless because Ceph returns + # ENOENT if the key wasn't found + return True + except CalledProcessError as e: + if e.returncode == errno.ENOENT: + return False + else: + log("Unknown error from ceph config-get exists: {} {}".format( + e.returncode, e.output)) + raise + + +def get_erasure_profile(service, name): + """ + :param service: six.string_types. The Ceph user name to run the command + under + :param name: + :return: + """ + try: + out = check_output( + ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', + name, '--format=json']) + return json.loads(out) + except (CalledProcessError, OSError, ValueError): + return None + + +def pool_set(service, pool_name, key, value): + """ + Sets a value for a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command + under + :param pool_name: six.string_types + :param key: six.string_types + :param value: + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value + ] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def snapshot_pool(service, pool_name, snapshot_name): + """ + Snapshots a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command + under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, + snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_snapshot(service, pool_name, snapshot_name): + """ + Remove a snapshot from a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command + under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, + snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +# max_bytes should be an int or long +def set_pool_quota(service, pool_name, max_bytes): + """ + :param service: six.string_types. The Ceph user name to run the command + under + :param pool_name: six.string_types + :param max_bytes: int or long + :return: None. Can raise CalledProcessError + """ + # Set a byte quota on a RADOS pool in ceph. + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, + 'max_bytes', str(max_bytes)] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_quota(service, pool_name): + """ + Set a byte quota on a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command + under + :param pool_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, + 'max_bytes', '0'] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_erasure_profile(service, profile_name): + """ + Create a new erasure code profile if one does not already exist for it. + Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/ + rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command + under + :param profile_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', + profile_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def create_erasure_profile(service, + profile_name, + erasure_plugin_name='jerasure', + failure_domain='host', + data_chunks=2, + coding_chunks=1, + locality=None, + durability_estimator=None): + """ + Create a new erasure code profile if one does not already exist for it. + Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/ + rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command + under + :param profile_name: six.string_types + :param erasure_plugin_name: six.string_types + :param failure_domain: six.string_types. One of ['chassis', 'datacenter', + 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) + :param data_chunks: int + :param coding_chunks: int + :param locality: int + :param durability_estimator: int + :return: None. Can raise CalledProcessError + """ + # Ensure this failure_domain is allowed by Ceph + validator(failure_domain, six.string_types, + ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', + 'region', 'room', 'root', 'row']) + + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', + profile_name, 'plugin=' + erasure_plugin_name, + 'k=' + str(data_chunks), 'm=' + str(coding_chunks), + 'ruleset_failure_domain=' + failure_domain] + if locality is not None and durability_estimator is not None: + raise ValueError( + "create_erasure_profile should be called with k, m and one of l " + "or c but not both.") + + # Add plugin specific information + if locality is not None: + # For local erasure codes + cmd.append('l=' + str(locality)) + if durability_estimator is not None: + # For Shec erasure codes + cmd.append('c=' + str(durability_estimator)) + + if erasure_profile_exists(service, profile_name): + cmd.append('--force') + + try: + check_call(cmd) + except CalledProcessError: + raise + + +def rename_pool(service, old_name, new_name): + """ + Rename a Ceph pool from old_name to new_name + :param service: six.string_types. The Ceph user name to run the command + under + :param old_name: six.string_types + :param new_name: six.string_types + :return: None + """ + validator(value=old_name, valid_type=six.string_types) + validator(value=new_name, valid_type=six.string_types) + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name + ] + check_call(cmd) + + +def erasure_profile_exists(service, name): + """ + Check to see if an Erasure code profile already exists. + :param service: six.string_types. The Ceph user name to run the command + under + :param name: six.string_types + :return: int or None + """ + validator(value=name, valid_type=six.string_types) + try: + check_call(['ceph', '--id', service, 'osd', 'erasure-code-profile', + 'get', name]) + return True + except CalledProcessError: + return False + + +def get_cache_mode(service, pool_name): + """ + Find the current caching mode of the pool_name given. + :param service: six.string_types. The Ceph user name to run the command + under + :param pool_name: six.string_types + :return: int or None + """ + validator(value=service, valid_type=six.string_types) + validator(value=pool_name, valid_type=six.string_types) + out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json' + ]) + try: + osd_json = json.loads(out) + for pool in osd_json['pools']: + if pool['pool_name'] == pool_name: + return pool['cache_mode'] + return None + except ValueError: + raise + + +def pool_exists(service, name): + """Check to see if a RADOS pool already exists.""" + try: + out = check_output(['rados', '--id', service, 'lspools']).decode( + 'UTF-8') + except CalledProcessError: + return False + + return name in out.split() + + +def get_osds(service): + """Return a list of all Ceph Object Storage Daemons currently in the + cluster. + """ + version = ceph_version() + if version and version >= '0.56': + return json.loads(check_output(['ceph', '--id', service, 'osd', 'ls', + '--format=json']).decode('UTF-8')) + + return None + + +def install(): + """Basic Ceph client installation.""" + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + """Check to see if a RADOS block device exists.""" + try: + out = check_output(['rbd', 'list', '--id', service, '--pool', pool + ]).decode('UTF-8') + except CalledProcessError: + return False + + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] + check_call(cmd) + + +def update_pool(client, pool, settings): + cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] + for k, v in six.iteritems(settings): + cmd.append(k) + cmd.append(v) + + check_call(cmd) + + +def create_pool(service, name, replicas=3, pg_num=None): + """Create a new RADOS pool.""" + if pool_exists(service, name): + log("Ceph pool {} already exists, skipping creation".format(name), + level=WARNING) + return + + if not pg_num: + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pg_num = (len(osds) * 100 // replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pg_num = 200 + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] + check_call(cmd) + + update_pool(service, name, settings={'size': str(replicas)}) + + +def delete_pool(service, name): + """Delete a RADOS pool from ceph.""" + cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, + '--yes-i-really-really-mean-it'] + check_call(cmd) + + +def _keyfile_path(service): + return KEYFILE.format(service) + + +def _keyring_path(service): + return KEYRING.format(service) + + +def create_keyring(service, key): + """Create a new Ceph keyring containing key.""" + keyring = _keyring_path(service) + if os.path.exists(keyring): + log('Ceph keyring exists at %s.' % keyring, level=WARNING) + return + + cmd = ['ceph-authtool', keyring, '--create-keyring', + '--name=client.{}'.format(service), '--add-key={}'.format(key)] + check_call(cmd) + log('Created new ceph keyring at %s.' % keyring, level=DEBUG) + + +def delete_keyring(service): + """Delete an existing Ceph keyring.""" + keyring = _keyring_path(service) + if not os.path.exists(keyring): + log('Keyring does not exist at %s' % keyring, level=WARNING) + return + + os.remove(keyring) + log('Deleted ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + """Create a file containing key.""" + keyfile = _keyfile_path(service) + if os.path.exists(keyfile): + log('Keyfile exists at %s.' % keyfile, level=WARNING) + return + + with open(keyfile, 'w') as fd: + fd.write(key) + + log('Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(relation='ceph'): + """Query named relation to determine current nodes.""" + hosts = [] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + + return hosts + + +def configure(service, key, auth, use_syslog): + """Perform basic configuration of Ceph.""" + create_keyring(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF.format(auth=auth, + keyring=_keyring_path(service), + mon_hosts=",".join(map(str, hosts)), + use_syslog=use_syslog)) + modprobe('rbd') + + +def image_mapped(name): + """Determine whether a RADOS block device is mapped locally.""" + try: + out = check_output(['rbd', 'showmapped']).decode('UTF-8') + except CalledProcessError: + return False + + return name in out + + +def map_block_storage(service, pool, image): + """Map a RADOS block device for local use.""" + cmd = [ + 'rbd', + 'map', + '{}/{}'.format(pool, image), + '--user', + service, + '--secret', + _keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + """Determine whether a filesytems is already mounted.""" + return fs in [f for f, m in mounts()] + + +def make_filesystem(blk_device, fstype='ext4', timeout=10): + """Make a new filesystem on the specified block device.""" + count = 0 + e_noent = os.errno.ENOENT + while not os.path.exists(blk_device): + if count >= timeout: + log('Gave up waiting on block device %s' % blk_device, level=ERROR) + raise IOError(e_noent, os.strerror(e_noent), blk_device) + + log('Waiting for block device %s to appear' % blk_device, level=DEBUG) + count += 1 + time.sleep(1) + else: + log('Formatting block device %s as filesystem %s.' % + (blk_device, fstype), + level=INFO) + check_call(['mkfs', '-t', fstype, blk_device]) + + +def place_data_on_block_device(blk_device, data_src_dst): + """Migrate data in data_src_dst to blk_device and then remount.""" + # mount block device into /mnt + mount(blk_device, '/mnt') + # copy data to /mnt + copy_files(data_src_dst, '/mnt') + # umount block device + umount('/mnt') + # Grab user/group ID's from original source + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + # re-mount where the data should originally be + # TODO: persist is currently a NO-OP in core.host + mount(blk_device, data_src_dst, persist=True) + # ensure original ownership of new mount. + os.chown(data_src_dst, uid, gid) + + +def copy_files(src, dst, symlinks=False, ignore=None): + """Copy files from src to dst.""" + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, + pool, + rbd_img, + sizemb, + mount_point, + blk_device, + fstype, + system_services=[], + replicas=3): + """NOTE: This function must only be called from a single service unit for + the same rbd_img otherwise data loss will occur. + + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being re-mounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('Creating new pool {}.'.format(pool), level=INFO) + create_pool(service, pool, replicas=replicas) + + if not rbd_exists(service, pool, rbd_img): + log('Creating RBD image ({}).'.format(rbd_img), level=INFO) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), + level=INFO) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if service_running(svc): + log('Stopping services {} prior to migrating data.' + .format(svc), + level=DEBUG) + service_stop(svc) + + place_data_on_block_device(blk_device, mount_point) + + for svc in system_services: + log('Starting service {} after migrating data.'.format(svc), + level=DEBUG) + service_start(svc) + + +def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): + """Ensures a ceph keyring is created for a named service and optionally + ensures user and group ownership. + + Returns False if no ceph key is available in relation state. + """ + key = None + for rid in relation_ids(relation): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break + + if not key: + return False + + create_keyring(service=service, key=key) + keyring = _keyring_path(service) + if user and group: + check_call(['chown', '%s.%s' % (user, group), keyring]) + + return True + + +def ceph_version(): + """Retrieve the local version of ceph.""" + if os.path.exists('/usr/bin/ceph'): + cmd = ['ceph', '-v'] + output = check_output(cmd).decode('US-ASCII') + output = output.split() + if len(output) > 3: + return output[2] + else: + return None + else: + return None + + +class CephBrokerRq(object): + """Ceph broker request. + + Multiple operations can be added to a request and sent to the Ceph broker + to be executed. + + Request is json-encoded for sending over the wire. + + The API is versioned and defaults to version 1. + """ + + def __init__(self, api_version=1, request_id=None): + self.api_version = api_version + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) + self.ops = [] + + def add_op_create_pool( + self, name, replica_count=3, + pg_num=None, weight=None): + """Adds an operation to create a pool. + + @param pg_num setting: optional setting. If not provided, this value + will be calculated by the broker based on how many OSDs are in the + cluster at the time of creation. Note that, if provided, this value + will be capped at the current available maximum. + @param weight: the percentage of data the pool makes up + """ + if pg_num and weight: + raise ValueError('pg_num and weight are mutually exclusive') + + self.ops.append({'op': 'create-pool', + 'name': name, + 'replicas': replica_count, + 'pg_num': pg_num, + 'weight': weight}) + + def set_ops(self, ops): + """Set request ops to provided value. + + Useful for injecting ops that come from a previous request + to allow comparisons to ensure validity. + """ + self.ops = ops + + @property + def request(self): + return json.dumps({'api-version': self.api_version, + 'ops': self.ops, + 'request-id': self.request_id}) + + def _ops_equal(self, other): + if len(self.ops) == len(other.ops): + for req_no in range(0, len(self.ops)): + for key in ['replicas', 'name', 'op', 'pg_num', 'weight']: + if self.ops[req_no].get(key) != other.ops[req_no].get(key): + return False + else: + return False + return True + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + if self.api_version == other.api_version and \ + self._ops_equal(other): + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + + +class CephBrokerRsp(object): + """Ceph broker response. + + Response is json-decoded and contents provided as methods/properties. + + The API is versioned and defaults to version 1. + """ + + def __init__(self, encoded_rsp): + self.api_version = None + self.rsp = json.loads(encoded_rsp) + + @property + def request_id(self): + return self.rsp.get('request-id') + + @property + def exit_code(self): + return self.rsp.get('exit-code') + + @property + def exit_msg(self): + return self.rsp.get('stderr') + +# Ceph Broker Conversation: +# If a charm needs an action to be taken by ceph it can create a CephBrokerRq +# and send that request to ceph via the ceph relation. The CephBrokerRq has a +# unique id so that the client can identity which CephBrokerRsp is associated +# with the request. Ceph will also respond to each client unit individually +# creating a response key per client unit eg glance/0 will get a CephBrokerRsp +# via key broker-rsp-glance-0 +# +# To use this the charm can just do something like: +# +# from charmhelpers.contrib.storage.linux.ceph import ( +# send_request_if_needed, +# is_request_complete, +# CephBrokerRq, +# ) +# +# @hooks.hook('ceph-relation-changed') +# def ceph_changed(): +# rq = CephBrokerRq() +# rq.add_op_create_pool(name='poolname', replica_count=3) +# +# if is_request_complete(rq): +# +# else: +# send_request_if_needed(get_ceph_request()) +# +# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example +# of glance having sent a request to ceph which ceph has successfully processed +# 'ceph:8': { +# 'ceph/0': { +# 'auth': 'cephx', +# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', +# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', +# 'ceph-public-address': '10.5.44.103', +# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', +# 'private-address': '10.5.44.103', +# }, +# 'glance/0': { +# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' +# '"ops": [{"replicas": 3, "name": "glance", ' +# '"op": "create-pool"}]}'), +# 'private-address': '10.5.44.109', +# }, +# } + + +def get_previous_request(rid): + """Return the last ceph broker request sent on a given relation + + @param rid: Relation id to query for request + """ + request = None + broker_req = relation_get(attribute='broker_req', + rid=rid, + unit=local_unit()) + if broker_req: + request_data = json.loads(broker_req) + request = CephBrokerRq(api_version=request_data['api-version'], + request_id=request_data['request-id']) + request.set_ops(request_data['ops']) + + return request + + +def get_request_states(request, relation='ceph'): + """Return a dict of requests per relation id with their corresponding + completion state. + + This allows a charm, which has a request for ceph, to see whether there is + an equivalent request already being processed and if so what state that + request is in. + + @param request: A CephBrokerRq object + """ + complete = [] + requests = {} + for rid in relation_ids(relation): + complete = False + previous_request = get_previous_request(rid) + if request == previous_request: + sent = True + complete = is_request_complete_for_rid(previous_request, rid) + else: + sent = False + complete = False + + requests[rid] = {'sent': sent, 'complete': complete, } + + return requests + + +def is_request_sent(request, relation='ceph'): + """Check to see if a functionally equivalent request has already been sent + + Returns True if a similair request has been sent + + @param request: A CephBrokerRq object + """ + states = get_request_states(request, relation=relation) + for rid in states.keys(): + if not states[rid]['sent']: + return False + + return True + + +def is_request_complete(request, relation='ceph'): + """Check to see if a functionally equivalent request has already been + completed + + Returns True if a similair request has been completed + + @param request: A CephBrokerRq object + """ + states = get_request_states(request, relation=relation) + for rid in states.keys(): + if not states[rid]['complete']: + return False + + return True + + +def is_request_complete_for_rid(request, rid): + """Check if a given request has been completed on the given relation + + @param request: A CephBrokerRq object + @param rid: Relation ID + """ + broker_key = get_broker_rsp_key() + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if rdata.get(broker_key): + rsp = CephBrokerRsp(rdata.get(broker_key)) + if rsp.request_id == request.request_id: + if not rsp.exit_code: + return True + else: + # The remote unit sent no reply targeted at this unit so either the + # remote ceph cluster does not support unit targeted replies or it + # has not processed our request yet. + if rdata.get('broker_rsp'): + request_data = json.loads(rdata['broker_rsp']) + if request_data.get('request-id'): + log('Ignoring legacy broker_rsp without unit key as remote' + ' service supports unit specific replies', + level=DEBUG) + else: + log('Using legacy broker_rsp as remote service does not ' + 'supports unit specific replies', + level=DEBUG) + rsp = CephBrokerRsp(rdata['broker_rsp']) + if not rsp.exit_code: + return True + + return False + + +def get_broker_rsp_key(): + """Return broker response key for this unit + + This is the key that ceph is going to use to pass request status + information back to this unit + """ + return 'broker-rsp-' + local_unit().replace('/', '-') + + +def send_request_if_needed(request, relation='ceph'): + """Send broker request if an equivalent request has not already been sent + + @param request: A CephBrokerRq object + """ + if is_request_sent(request, relation=relation): + log('Request already sent but not complete, not sending new request', + level=DEBUG) + else: + for rid in relation_ids(relation): + log('Sending request {}'.format(request.request_id), level=DEBUG) + relation_set(relation_id=rid, broker_req=request.request) + + +class CephConfContext(object): + """Ceph config (ceph.conf) context. + + Supports user-provided Ceph configuration settings. Use can provide a + dictionary as the value for the config-flags charm option containing + Ceph configuration settings keyede by their section in ceph.conf. + """ + + def __init__(self, permitted_sections=None): + self.permitted_sections = permitted_sections or [] + + def __call__(self): + conf = config('config-flags') + if not conf: + return {} + + conf = config_flags_parser(conf) + if type(conf) != dict: + log("Provided config-flags is not a dictionary - ignoring", + level=WARNING) + return {} + + permitted = self.permitted_sections + if permitted: + diff = set(conf.keys()).difference(set(permitted)) + if diff: + log("Config-flags contains invalid keys '%s' - they will be " + "ignored" % (', '.join(diff)), + level=WARNING) + + ceph_conf = {} + for key in conf: + if permitted and key not in permitted: + log("Ignoring key '%s'" % key, level=WARNING) + continue + + ceph_conf[key] = conf[key] + + return ceph_conf diff --git a/ceph-osd/requirements.txt b/ceph-osd/requirements.txt index 6a3271b0..db0af4d0 100644 --- a/ceph-osd/requirements.txt +++ b/ceph-osd/requirements.txt @@ -10,3 +10,4 @@ Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 dnspython>=1.12.0 psutil>=1.1.1,<2.0.0 +pyudev From 9c2ea56a31cede83f98e7f64175d804ea1211803 Mon Sep 17 00:00:00 2001 From: Adam Collard Date: Tue, 29 Nov 2016 16:04:50 +0000 Subject: [PATCH 1242/2699] Install lockfile-progs when related to nagios The nrpe ceph status script relies on lockfile-create, but lockfile-progs (package containing lockfile-create) was missing from the install. Install it when related to nagios, and on upgrade-charm when related to nagios. Change-Id: I87b68795c87135c879e0ac93ed3bfae3d01807fd Closes-Bug: #1629104 --- ceph-mon/hooks/ceph_hooks.py | 5 ++++- ceph-mon/unit_tests/test_ceph_hooks.py | 31 +++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index ec78c305..38b822c8 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -33,6 +33,7 @@ config, relation_ids, related_units, + is_relation_made, relation_get, relation_set, leader_set, leader_get, @@ -538,6 +539,8 @@ def upgrade_charm(): ceph.update_monfs() upgrade_keys() mon_relation_joined() + if is_relation_made("nrpe-external-master"): + update_nrpe_config() @hooks.hook('start') @@ -554,7 +557,7 @@ def start(): @hooks.hook('nrpe-external-master-relation-changed') def update_nrpe_config(): # python-dbus is used by check_upstart_job - apt_install('python-dbus') + apt_install(['python-dbus', 'lockfile-progs']) log('Refreshing nagios checks') if os.path.isdir(NAGIOS_PLUGINS): rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index a9702177..92786043 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -2,7 +2,7 @@ import unittest import sys -from mock import patch, MagicMock +from mock import patch, MagicMock, DEFAULT # python-apt is not installed as part of test-requirements but is imported by # some charmhelpers modules so create a fake import. @@ -115,3 +115,32 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'public_addr': '10.0.0.1', 'use_syslog': 'true'} self.assertEqual(ctxt, expected) + + def test_nrpe_dependency_installed(self): + with patch.multiple(ceph_hooks, + apt_install=DEFAULT, + rsync=DEFAULT, + log=DEFAULT, + write_file=DEFAULT, + nrpe=DEFAULT) as mocks: + ceph_hooks.update_nrpe_config() + mocks["apt_install"].assert_called_once_with( + ["python-dbus", "lockfile-progs"]) + + def test_upgrade_charm_with_nrpe_relation_installs_dependencies(self): + with patch.multiple( + ceph_hooks, + apt_install=DEFAULT, + rsync=DEFAULT, + log=DEFAULT, + write_file=DEFAULT, + nrpe=DEFAULT, + emit_cephconf=DEFAULT, + upgrade_keys=DEFAULT, + mon_relation_joined=DEFAULT, + is_relation_made=DEFAULT) as mocks, patch( + "charmhelpers.contrib.hardening.harden.config"): + mocks["is_relation_made"].return_value = True + ceph_hooks.upgrade_charm() + mocks["apt_install"].assert_called_with( + ["python-dbus", "lockfile-progs"]) From 12e4aab0e0d72dad3bfb2f4eaf9ae3e893a8d8f8 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 1 Dec 2016 09:19:17 -0500 Subject: [PATCH 1243/2699] Update Make jobs to separate out ceph lib sync Change-Id: Id7026e0109684d312eb36376f6f5b072a5282b37 --- ceph-osd/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index 4cb90c86..7f6d3fd0 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -25,10 +25,10 @@ ch-sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml -git-sync: bin/git_sync.py +ceph-sync: bin/git_sync.py $(PYTHON) bin/git_sync.py -d lib -s https://github.com/openstack/charms.ceph.git -sync: git-sync ch-sync +sync: ch-sync publish: lint bzr push lp:charms/ceph-osd From 5c93b3e2c138338a2088f9b8efcb65c061f51153 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 1 Dec 2016 10:05:29 -0500 Subject: [PATCH 1244/2699] add pyudev to requirements as well Change-Id: I0ecdaa31527df5ae00f9b50fa5910c4d81b3b3ac --- ceph-mon/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/requirements.txt b/ceph-mon/requirements.txt index 6a3271b0..db0af4d0 100644 --- a/ceph-mon/requirements.txt +++ b/ceph-mon/requirements.txt @@ -10,3 +10,4 @@ Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 dnspython>=1.12.0 psutil>=1.1.1,<2.0.0 +pyudev From 7c1d08278deaf7c90cc9d7f84ee0c97d77000004 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 1 Dec 2016 09:19:38 -0500 Subject: [PATCH 1245/2699] Update Make jobs to separate out ceph lib sync Change-Id: I5e1fe4a913f1baaa22ea73e34f3d1ba8edf82c5a --- ceph-mon/Makefile | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index 902ed078..d4b8b8d1 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -24,11 +24,12 @@ bin/git_sync.py: ch-sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml -git-sync: bin/git_sync.py + +ceph-sync: bin/git_sync.py $(PYTHON) bin/git_sync.py -d lib -s https://github.com/openstack/charms.ceph.git -sync: git-sync ch-sync +sync: ch-sync publish: lint test - bzr push lp:charms/ceph - bzr push lp:charms/trusty/ceph + bzr push lp:charms/ceph-mon + bzr push lp:charms/trusty/ceph-mon From ed39022a58594c5ac3969e7525aee6f28f9272b9 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Sat, 3 Dec 2016 09:46:34 -0600 Subject: [PATCH 1246/2699] Remove zesty series metadata The charm store does not yet recognize this series, and errors when it exists, thereby preventing push/release of dev charms into the charm store. Revisit after the following is resolved: - https://github.com/juju/charmstore/issues/695 Change-Id: Ibbb4cfe533b607f394fbffab6310a18bc667886d --- ceph-proxy/metadata.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 7462ead1..faf20ec2 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -11,7 +11,6 @@ tags: - misc series: - xenial - - zesty - trusty - yakkety extra-bindings: From c26ed353f22a35a86708f53f46ee3b0806b0ffdb Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Sat, 3 Dec 2016 09:46:39 -0600 Subject: [PATCH 1247/2699] Remove zesty series metadata The charm store does not yet recognize this series, and errors when it exists, thereby preventing push/release of dev charms into the charm store. Revisit after the following is resolved: - https://github.com/juju/charmstore/issues/695 Change-Id: I2ed40eefcd2e19cd2c0587129ed4d0b430e805b0 --- ceph-radosgw/metadata.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 34f29f41..bf1fad5e 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -14,7 +14,6 @@ tags: - misc series: - xenial - - zesty - trusty - yakkety extra-bindings: From 87534aaa2e12912db250f5ec5880d4b1922f3f45 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 23 Nov 2016 15:16:18 -0600 Subject: [PATCH 1248/2699] Update Amulet defs, series metadata and c-h sync - Sync charm helpers if applicable. - Fix test executable hashbags for virtualenv prep. - Add Yakkety-Newton Amulet test definitions. - Prep Xenial-Ocata Amulet test definitions (not yet enabled). - Prep Zesty-Ocata Amulet test definitions (not yet enabled). - Remove Precise charm series metadata if present. - Remove Precise Amulet test definitions if present. Change-Id: I00a4c855d4da01b22110f06bf2820bca0bcde7b9 --- .../contrib/openstack/amulet/deployment.py | 35 +- .../contrib/openstack/amulet/utils.py | 11 + .../charmhelpers/contrib/openstack/context.py | 6 +- .../charmhelpers/contrib/openstack/neutron.py | 2 + .../charmhelpers/contrib/openstack/utils.py | 22 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 2 + ceph-mon/hooks/charmhelpers/core/host.py | 17 + .../core/kernel_factory/ubuntu.py | 2 +- ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 8 + ceph-mon/lib/ceph/__init__.py | 90 +- ceph-mon/lib/ceph/ceph_broker.py | 2 +- ceph-mon/lib/ceph/ceph_helpers.py | 1514 +++++++++++++++++ ceph-mon/metadata.yaml | 1 - ceph-mon/tests/basic_deployment.py | 4 +- .../charmhelpers/contrib/amulet/utils.py | 2 +- .../contrib/openstack/amulet/deployment.py | 35 +- .../contrib/openstack/amulet/utils.py | 11 + ceph-mon/tests/gate-basic-trusty-icehouse | 2 +- ceph-mon/tests/gate-basic-trusty-kilo | 2 +- ceph-mon/tests/gate-basic-trusty-liberty | 2 +- ceph-mon/tests/gate-basic-trusty-mitaka | 2 +- ceph-mon/tests/gate-basic-xenial-mitaka | 2 +- ceph-mon/tests/gate-basic-xenial-newton | 2 +- ceph-mon/tests/gate-basic-xenial-ocata | 25 + ceph-mon/tests/gate-basic-yakkety-newton | 2 +- ceph-mon/tests/gate-basic-zesty-ocata | 23 + 26 files changed, 1749 insertions(+), 77 deletions(-) create mode 100644 ceph-mon/lib/ceph/ceph_helpers.py create mode 100644 ceph-mon/tests/gate-basic-xenial-ocata mode change 100644 => 100755 ceph-mon/tests/gate-basic-yakkety-newton create mode 100644 ceph-mon/tests/gate-basic-zesty-ocata diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 9e0b07fb..5c1ce457 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -69,9 +69,9 @@ def _determine_branch_locations(self, other_services): # Charms outside the ~openstack-charmers base_charms = { - 'mysql': ['precise', 'trusty'], - 'mongodb': ['precise', 'trusty'], - 'nrpe': ['precise', 'trusty', 'wily', 'xenial'], + 'mysql': ['trusty'], + 'mongodb': ['trusty'], + 'nrpe': ['trusty', 'xenial'], } for svc in other_services: @@ -260,31 +260,20 @@ def _get_openstack_release(self): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.precise_essex, self.precise_folsom, self.precise_grizzly, - self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.utopic_juno, - self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, - self.wily_liberty, self.trusty_mitaka, - self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton) = range(16) + (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, + self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, + self.yakkety_newton, self.xenial_ocata, self.zesty_ocata) = range(9) releases = { - ('precise', None): self.precise_essex, - ('precise', 'cloud:precise-folsom'): self.precise_folsom, - ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, - ('precise', 'cloud:precise-havana'): self.precise_havana, - ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo, - ('wily', None): self.wily_liberty, ('xenial', None): self.xenial_mitaka, ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, ('yakkety', None): self.yakkety_newton, + ('zesty', None): self.zesty_ocata, } return releases[(self.series, self.openstack)] @@ -294,16 +283,10 @@ def _get_openstack_release_string(self): Return a string representing the openstack release. """ releases = OrderedDict([ - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), ('xenial', 'mitaka'), ('yakkety', 'newton'), + ('zesty', 'ocata'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index e4546c8c..6a0ba837 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -28,6 +28,7 @@ from keystoneclient.auth.identity import v3 as keystone_id_v3 from keystoneclient import session as keystone_session from keystoneclient.v3 import client as keystone_client_v3 +from novaclient import exceptions import novaclient.client as nova_client import pika @@ -377,6 +378,16 @@ def authenticate_swift_user(self, keystone, user, password, tenant): tenant_name=tenant, auth_version='2.0') + def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", + ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): + """Create the specified flavor.""" + try: + nova.flavors.find(name=name) + except (exceptions.NotFound, exceptions.NoUniqueMatch): + self.log.debug('Creating flavor ({})'.format(name)) + nova.flavors.create(name, ram, vcpus, disk, flavorid, + ephemeral, swap, rxtx_factor, is_public) + def create_cirros_image(self, glance, image_name): """Download the latest cirros image and upload it to glance, validate and return a resource pointer. diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index b601a226..d5b3a33b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -344,6 +344,10 @@ def __call__(self): 'auth_protocol': auth_protocol, 'api_version': api_version}) + if float(api_version) > 2: + ctxt.update({'admin_domain_name': + rdata.get('service_domain')}) + if self.context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs @@ -644,7 +648,7 @@ class ApacheSSLContext(OSContextGenerator): service_namespace = None def enable_modules(self): - cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers'] check_call(cmd) def configure_cert(self, cn=None): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py index 08c86fa7..a8f1ed72 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py @@ -32,6 +32,7 @@ def headers_package(): kver = check_output(['uname', '-r']).decode('UTF-8').strip() return 'linux-headers-%s' % kver + QUANTUM_CONF_DIR = '/etc/quantum' @@ -91,6 +92,7 @@ def quantum_plugins(): } } + NEUTRON_CONF_DIR = '/etc/neutron' diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 8c89c3a3..6d544e75 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -109,7 +109,7 @@ ('wily', 'liberty'), ('xenial', 'mitaka'), ('yakkety', 'newton'), - ('zebra', 'ocata'), # TODO: upload with real Z name + ('zesty', 'ocata'), ]) @@ -152,6 +152,8 @@ ['2.5.0', '2.6.0', '2.7.0']), ('newton', ['2.8.0', '2.9.0', '2.10.0']), + ('ocata', + ['2.11.0']), ]) # >= Liberty version->codename mapping @@ -410,14 +412,26 @@ def get_os_version_package(pkg, fatal=True): os_rel = None -def os_release(package, base='essex'): +def reset_os_release(): + '''Unset the cached os_release version''' + global os_rel + os_rel = None + + +def os_release(package, base='essex', reset_cache=False): ''' Returns OpenStack release codename from a cached global. + + If reset_cache then unset the cached os_release version and return the + freshly determined version. + If the codename can not be determined from either an installed package or the installation source, the earliest release supported by the charm should be returned. ''' global os_rel + if reset_cache: + reset_os_release() if os_rel: return os_rel os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or @@ -535,6 +549,9 @@ def configure_installation_source(rel): 'newton': 'xenial-updates/newton', 'newton/updates': 'xenial-updates/newton', 'newton/proposed': 'xenial-proposed/newton', + 'zesty': 'zesty-updates/ocata', + 'zesty/updates': 'xenial-updates/ocata', + 'zesty/proposed': 'xenial-proposed/ocata', } try: @@ -668,6 +685,7 @@ def clean_storage(block_device): else: zap_disk(block_device) + is_ip = ip.is_ip ns_query = ip.ns_query get_host_ip = ip.get_host_ip diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 996e81cc..94fc996c 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -332,6 +332,8 @@ def config(scope=None): config_cmd_line = ['config-get'] if scope is not None: config_cmd_line.append(scope) + else: + config_cmd_line.append('--all') config_cmd_line.append('--format=json') try: config_data = json.loads( diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 0f1b2f35..04cadb3a 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -732,3 +732,20 @@ def get_total_ram(): assert unit == 'kB', 'Unknown unit' return int(value) * 1024 # Classic, not KiB. raise NotImplementedError() + + +UPSTART_CONTAINER_TYPE = '/run/container_type' + + +def is_container(): + """Determine whether unit is running in a container + + @return: boolean indicating if unit is in a container + """ + if init_is_systemd(): + # Detect using systemd-detect-virt + return subprocess.call(['systemd-detect-virt', + '--container']) == 0 + else: + # Detect using upstart container file marker + return os.path.exists(UPSTART_CONTAINER_TYPE) diff --git a/ceph-mon/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/kernel_factory/ubuntu.py index 21559642..3de372fd 100644 --- a/ceph-mon/hooks/charmhelpers/core/kernel_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/kernel_factory/ubuntu.py @@ -5,7 +5,7 @@ def persistent_modprobe(module): """Load a kernel module and configure for auto-load on reboot.""" with open('/etc/modules', 'r+') as modules: if module not in modules.read(): - modules.write(module) + modules.write(module + "\n") def update_initramfs(version='all'): diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index fce496b2..39b9b801 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -105,6 +105,14 @@ 'newton/proposed': 'xenial-proposed/newton', 'xenial-newton/proposed': 'xenial-proposed/newton', 'xenial-proposed/newton': 'xenial-proposed/newton', + # Ocata + 'ocata': 'xenial-updates/ocata', + 'xenial-ocata': 'xenial-updates/ocata', + 'xenial-ocata/updates': 'xenial-updates/ocata', + 'xenial-updates/ocata': 'xenial-updates/ocata', + 'ocata/proposed': 'xenial-proposed/ocata', + 'xenial-ocata/proposed': 'xenial-proposed/ocata', + 'xenial-ocata/newton': 'xenial-proposed/ocata', } APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index b95a8260..db3772b5 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -23,6 +23,7 @@ import sys import errno import shutil +import pyudev from charmhelpers.core import hookenv from charmhelpers.core.host import ( @@ -50,13 +51,15 @@ is_block_device, zap_disk, is_device_mounted) +from charmhelpers.contrib.openstack.utils import ( + get_os_codename_install_source) LEADER = 'leader' PEON = 'peon' QUORUM = [LEADER, PEON] PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', - 'radosgw', 'xfsprogs'] + 'radosgw', 'xfsprogs', 'python-pyudev'] LinkSpeed = { "BASE_10": 10, @@ -100,6 +103,23 @@ } +def unmounted_disks(): + """List of unmounted block devices on the current host.""" + disks = [] + context = pyudev.Context() + for device in context.list_devices(DEVTYPE='disk'): + if device['SUBSYSTEM'] == 'block': + matched = False + for block_type in [u'dm', u'loop', u'ram', u'nbd']: + if block_type in device.device_node: + matched = True + if matched: + continue + disks.append(device.device_node) + log("Found disks: {}".format(disks)) + return [disk for disk in disks if not is_device_mounted(disk)] + + def save_sysctls(sysctl_dict, save_location): """ Persist the sysctls to the hard drive. @@ -1344,7 +1364,7 @@ def roll_monitor_cluster(new_version, upgrade_key): version=new_version) else: # Check if the previous node has finished - status_set('blocked', + status_set('waiting', 'Waiting on {} to finish upgrading'.format( mon_sorted_list[position - 1])) wait_on_previous_node(upgrade_key=upgrade_key, @@ -1361,11 +1381,10 @@ def roll_monitor_cluster(new_version, upgrade_key): status_set('blocked', 'failed to upgrade monitor') -def upgrade_monitor(): +def upgrade_monitor(new_version): current_version = get_version() status_set("maintenance", "Upgrading monitor") log("Current ceph version is {}".format(current_version)) - new_version = config('release-version') log("Upgrading to: {}".format(new_version)) try: @@ -1393,7 +1412,6 @@ def upgrade_monitor(): service_start('ceph-mon@{}'.format(mon_id)) else: service_start('ceph-mon-all') - status_set("active", "") except subprocess.CalledProcessError as err: log("Stopping ceph and upgrading packages failed " "with message: {}".format(err.message)) @@ -1415,9 +1433,9 @@ def lock_and_roll(upgrade_key, service, my_name, version): # This should be quick if service == 'osd': - upgrade_osd() + upgrade_osd(version) elif service == 'mon': - upgrade_monitor() + upgrade_monitor(version) else: log("Unknown service {}. Unable to upgrade".format(service), level=ERROR) @@ -1541,11 +1559,10 @@ def roll_osd_cluster(new_version, upgrade_key): status_set('blocked', 'failed to upgrade osd') -def upgrade_osd(): +def upgrade_osd(new_version): current_version = get_version() status_set("maintenance", "Upgrading osd") log("Current ceph version is {}".format(current_version)) - new_version = config('release-version') log("Upgrading to: {}".format(new_version)) try: @@ -1578,3 +1595,58 @@ def upgrade_osd(): "with message: {}".format(err.message)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) + + +def list_pools(service): + """ + This will list the current pools that Ceph has + + :param service: String service id to run under + :return: list. Returns a list of the ceph pools. Raises CalledProcessError + if the subprocess fails to run. + """ + try: + pool_list = [] + pools = subprocess.check_output(['rados', '--id', service, 'lspools']) + for pool in pools.splitlines(): + pool_list.append(pool) + return pool_list + except subprocess.CalledProcessError as err: + log("rados lspools failed with error: {}".format(err.output)) + raise + + +# A dict of valid ceph upgrade paths. Mapping is old -> new +UPGRADE_PATHS = { + 'firefly': 'hammer', + 'hammer': 'jewel', +} + +# Map UCA codenames to ceph codenames +UCA_CODENAME_MAP = { + 'icehouse': 'firefly', + 'juno': 'firefly', + 'kilo': 'hammer', + 'liberty': 'hammer', + 'mitaka': 'jewel', +} + + +def pretty_print_upgrade_paths(): + '''Pretty print supported upgrade paths for ceph''' + lines = [] + for key, value in UPGRADE_PATHS.iteritems(): + lines.append("{} -> {}".format(key, value)) + return lines + + +def resolve_ceph_version(source): + ''' + Resolves a version of ceph based on source configuration + based on Ubuntu Cloud Archive pockets. + + @param: source: source configuration option of charm + @returns: ceph release codename or None if not resolvable + ''' + os_release = get_os_codename_install_source(source) + return UCA_CODENAME_MAP.get(os_release) diff --git a/ceph-mon/lib/ceph/ceph_broker.py b/ceph-mon/lib/ceph/ceph_broker.py index 188f2001..0892961e 100644 --- a/ceph-mon/lib/ceph/ceph_broker.py +++ b/ceph-mon/lib/ceph/ceph_broker.py @@ -424,7 +424,7 @@ def handle_create_cephfs(request, service): log("CephFS already created") return - # Finally create CephFS + # Finally create CephFS try: check_output(["ceph", '--id', service, diff --git a/ceph-mon/lib/ceph/ceph_helpers.py b/ceph-mon/lib/ceph/ceph_helpers.py new file mode 100644 index 00000000..31b19569 --- /dev/null +++ b/ceph-mon/lib/ceph/ceph_helpers.py @@ -0,0 +1,1514 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import errno +import hashlib +import math +import six + +import os +import shutil +import json +import time +import uuid +import re + +import subprocess +from subprocess import (check_call, check_output, CalledProcessError, ) +from charmhelpers.core.hookenv import (config, + local_unit, + relation_get, + relation_ids, + relation_set, + related_units, + log, + DEBUG, + INFO, + WARNING, + ERROR, ) +from charmhelpers.core.host import (mount, + mounts, + service_start, + service_stop, + service_running, + umount, ) +from charmhelpers.fetch import (apt_install, ) + +from charmhelpers.core.kernel import modprobe +from charmhelpers.contrib.openstack.utils import config_flags_parser + +KEYRING = '/etc/ceph/ceph.client.{}.keyring' +KEYFILE = '/etc/ceph/ceph.client.{}.key' + +CEPH_CONF = """[global] +auth supported = {auth} +keyring = {keyring} +mon host = {mon_hosts} +log to syslog = {use_syslog} +err to syslog = {use_syslog} +clog to syslog = {use_syslog} +""" + +CRUSH_BUCKET = """root {name} {{ + id {id} # do not change unnecessarily + # weight 0.000 + alg straw + hash 0 # rjenkins1 +}} + +rule {name} {{ + ruleset 0 + type replicated + min_size 1 + max_size 10 + step take {name} + step chooseleaf firstn 0 type host + step emit +}}""" + +# This regular expression looks for a string like: +# root NAME { +# id NUMBER +# so that we can extract NAME and ID from the crushmap +CRUSHMAP_BUCKETS_RE = re.compile(r"root\s+(.+)\s+\{\s*id\s+(-?\d+)") + +# This regular expression looks for ID strings in the crushmap like: +# id NUMBER +# so that we can extract the IDs from a crushmap +CRUSHMAP_ID_RE = re.compile(r"id\s+(-?\d+)") + +# The number of placement groups per OSD to target for placement group +# calculations. This number is chosen as 100 due to the ceph PG Calc +# documentation recommending to choose 100 for clusters which are not +# expected to increase in the foreseeable future. Since the majority of the +# calculations are done on deployment, target the case of non-expanding +# clusters as the default. +DEFAULT_PGS_PER_OSD_TARGET = 100 +DEFAULT_POOL_WEIGHT = 10.0 +LEGACY_PG_COUNT = 200 + + +def validator(value, valid_type, valid_range=None): + """ + Used to validate these: http://docs.ceph.com/docs/master/rados/operations/ + pools/#set-pool-values + Example input: + validator(value=1, + valid_type=int, + valid_range=[0, 2]) + This says I'm testing value=1. It must be an int inclusive in [0,2] + + :param value: The value to validate + :param valid_type: The type that value should be. + :param valid_range: A range of values that value can assume. + :return: + """ + assert isinstance(value, valid_type), "{} is not a {}".format(value, + valid_type) + if valid_range is not None: + assert isinstance(valid_range, list), \ + "valid_range must be a list, was given {}".format(valid_range) + # If we're dealing with strings + if valid_type is six.string_types: + assert value in valid_range, \ + "{} is not in the list {}".format(value, valid_range) + # Integer, float should have a min and max + else: + if len(valid_range) != 2: + raise ValueError("Invalid valid_range list of {} for {}. " + "List must be [min,max]".format(valid_range, + value)) + assert value >= valid_range[0], \ + "{} is less than minimum allowed value of {}".format( + value, valid_range[0]) + assert value <= valid_range[1], \ + "{} is greater than maximum allowed value of {}".format( + value, valid_range[1]) + + +class PoolCreationError(Exception): + """ + A custom error to inform the caller that a pool creation failed. Provides + an error message + """ + + def __init__(self, message): + super(PoolCreationError, self).__init__(message) + + +class Crushmap(object): + """An object oriented approach to Ceph crushmap management.""" + + def __init__(self): + """Iiitialize the Crushmap from Ceph""" + self._crushmap = self.load_crushmap() + roots = re.findall(CRUSHMAP_BUCKETS_RE, self._crushmap) + buckets = [] + ids = list(map( + lambda x: int(x), + re.findall(CRUSHMAP_ID_RE, self._crushmap))) + ids.sort() + if roots != []: + for root in roots: + buckets.append(Crushmap.Bucket(root[0], root[1], True)) + + self._buckets = buckets + if ids != []: + self._ids = ids + else: + self._ids = [0] + + def load_crushmap(self): + try: + crush = subprocess.Popen( + ('ceph', 'osd', 'getcrushmap'), + stdout=subprocess.PIPE) + return subprocess.check_output( + ('crushtool', '-d', '-'), + stdin=crush.stdout).decode('utf-8') + except Exception as e: + log("load_crushmap error: {}".format(e)) + raise "Failed to read Crushmap" + + def buckets(self): + """Return a list of buckets that are in the Crushmap.""" + return self._buckets + + def add_bucket(self, bucket_name): + """Add a named bucket to Ceph""" + new_id = min(self._ids) - 1 + self._ids.append(new_id) + self._buckets.append(Crushmap.Bucket(bucket_name, new_id)) + + def save(self): + """Persist Crushmap to Ceph""" + try: + crushmap = self.build_crushmap() + compiled = subprocess.Popen( + ('crushtool', '-c', '/dev/stdin', '-o', '/dev/stdout'), + stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + output = compiled.communicate(crushmap)[0] + ceph = subprocess.Popen( + ('ceph', 'osd', 'setcrushmap', '-i', '/dev/stdin'), + stdin=subprocess.PIPE) + ceph_output = ceph.communicate(input=output) + return ceph_output + except Exception as e: + log("save error: {}".format(e)) + raise "Failed to save crushmap" + + def build_crushmap(self): + """Modifies the curent crushmap to include the new buckets""" + tmp_crushmap = self._crushmap + for bucket in self._buckets: + if not bucket.default: + tmp_crushmap = "{}\n\n{}".format( + tmp_crushmap, + Crushmap.bucket_string(bucket.name, bucket.id)) + return tmp_crushmap + + @staticmethod + def bucket_string(name, id): + return CRUSH_BUCKET.format(name=name, id=id) + + class Bucket(object): + """An object that describes a Crush bucket.""" + + def __init__(self, name, id, default=False): + self.name = name + self.id = int(id) + self.default = default + + def __repr__(self): + return "Bucket {{Name: {name}, ID: {id}}}".format( + name=self.name, id=self.id) + + def __eq__(self, other): + """Override the default Equals behavior""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return NotImplemented + + def __ne__(self, other): + """Define a non-equality test""" + if isinstance(other, self.__class__): + return not self.__eq__(other) + return NotImplemented + + +class Pool(object): + """ + An object oriented approach to Ceph pool creation. This base class is + inherited by ReplicatedPool and ErasurePool. + Do not call create() on this base class as it will not do anything. + Instantiate a child class and call create(). + """ + + def __init__(self, service, name): + self.service = service + self.name = name + + # Create the pool if it doesn't exist already + # To be implemented by subclasses + def create(self): + pass + + def add_cache_tier(self, cache_pool, mode): + """ + Adds a new cache tier to an existing pool. + :param cache_pool: six.string_types. The cache tier pool name to add. + :param mode: six.string_types. The caching mode to use for this pool. + valid range = ["readonly", "writeback"] + :return: None + """ + # Check the input types and values + validator(value=cache_pool, valid_type=six.string_types) + validator(value=mode, + valid_type=six.string_types, + valid_range=["readonly", "writeback"]) + + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', + self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', + cache_pool, mode]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', + self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', + cache_pool, 'hit_set_type', 'bloom']) + + def remove_cache_tier(self, cache_pool): + """ + Removes a cache tier from Ceph. Flushes all dirty objects from + writeback pools and waits for that to complete. + :param cache_pool: six.string_types. The cache tier pool name to + remove. + :return: None + """ + # read-only is easy, writeback is much harder + mode = get_cache_mode(self.service, cache_pool) + version = ceph_version() + if mode == 'readonly': + check_call(['ceph', '--id', self.service, 'osd', 'tier', + 'cache-mode', cache_pool, 'none']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', + self.name, cache_pool]) + + elif mode == 'writeback': + pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', + 'cache-mode', cache_pool, 'forward'] + if version >= '10.1': + # Jewel added a mandatory flag + pool_forward_cmd.append('--yes-i-really-mean-it') + + check_call(pool_forward_cmd) + # Flush the cache and wait for it to return + check_call(['rados', '--id', self.service, '-p', cache_pool, + 'cache-flush-evict-all']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', + 'remove-overlay', self.name]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', + self.name, cache_pool]) + + def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): + """Return the number of placement groups to use when creating the pool. + + Returns the number of placement groups which should be specified when + creating the pool. This is based upon the calculation guidelines + provided by the Ceph Placement Group Calculator (located online at + http://ceph.com/pgcalc/). + + The number of placement groups are calculated using the following: + + (Target PGs per OSD) * (OSD #) * (%Data) + ---------------------------------------- + (Pool size) + + Per the upstream guidelines, the OSD # should really be considered + based on the number of OSDs which are eligible to be selected by the + pool. Since the pool creation doesn't specify any of CRUSH set rules, + the default rule will be dependent upon the type of pool being + created (replicated or erasure). + + This code makes no attempt to determine the number of OSDs which can be + selected for the specific rule, rather it is left to the user to tune + in the form of 'expected-osd-count' config option. + + :param pool_size: int. pool_size is either the number of replicas for + replicated pools or the K+M sum for erasure coded pools + :param percent_data: float. the percentage of data that is expected to + be contained in the pool for the specific OSD set. Default value + is to assume 10% of the data is for this pool, which is a + relatively low % of the data but allows for the pg_num to be + increased. NOTE: the default is primarily to handle the scenario + where related charms requiring pools has not been upgraded to + include an update to indicate their relative usage of the pools. + :return: int. The number of pgs to use. + """ + + # Note: This calculation follows the approach that is provided + # by the Ceph PG Calculator located at http://ceph.com/pgcalc/. + validator(value=pool_size, valid_type=int) + + # Ensure that percent data is set to something - even with a default + # it can be set to None, which would wreak havoc below. + if percent_data is None: + percent_data = DEFAULT_POOL_WEIGHT + + # If the expected-osd-count is specified, then use the max between + # the expected-osd-count and the actual osd_count + osd_list = get_osds(self.service) + expected = config('expected-osd-count') or 0 + + if osd_list: + osd_count = max(expected, len(osd_list)) + + # Log a message to provide some insight if the calculations claim + # to be off because someone is setting the expected count and + # there are more OSDs in reality. Try to make a proper guess + # based upon the cluster itself. + if expected and osd_count != expected: + log("Found more OSDs than provided expected count. " + "Using the actual count instead", INFO) + elif expected: + # Use the expected-osd-count in older ceph versions to allow for + # a more accurate pg calculations + osd_count = expected + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + return LEGACY_PG_COUNT + + percent_data /= 100.0 + target_pgs_per_osd = config( + 'pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET + num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size + + # The CRUSH algorithm has a slight optimization for placement groups + # with powers of 2 so find the nearest power of 2. If the nearest + # power of 2 is more than 25% below the original value, the next + # highest value is used. To do this, find the nearest power of 2 such + # that 2^n <= num_pg, check to see if its within the 25% tolerance. + exponent = math.floor(math.log(num_pg, 2)) + nearest = 2**exponent + if (num_pg - nearest) > (num_pg * 0.25): + # Choose the next highest power of 2 since the nearest is more + # than 25% below the original value. + return int(nearest * 2) + else: + return int(nearest) + + +class ReplicatedPool(Pool): + + def __init__(self, + service, + name, + pg_num=None, + replicas=2, + percent_data=10.0): + super(ReplicatedPool, self).__init__(service=service, name=name) + self.replicas = replicas + if pg_num: + # Since the number of placement groups were specified, ensure + # that there aren't too many created. + max_pgs = self.get_pgs(self.replicas, 100.0) + self.pg_num = min(pg_num, max_pgs) + else: + self.pg_num = self.get_pgs(self.replicas, percent_data) + + def create(self): + if not pool_exists(self.service, self.name): + # Create it + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num)] + try: + check_call(cmd) + # Set the pool replica size + update_pool(client=self.service, + pool=self.name, + settings={'size': str(self.replicas)}) + except CalledProcessError: + raise + + +# Default jerasure erasure coded pool +class ErasurePool(Pool): + + def __init__(self, + service, + name, + erasure_code_profile="default", + percent_data=10.0): + super(ErasurePool, self).__init__(service=service, name=name) + self.erasure_code_profile = erasure_code_profile + self.percent_data = percent_data + + def create(self): + if not pool_exists(self.service, self.name): + # Try to find the erasure profile information in order to properly + # size the number of placement groups. The size of an erasure + # coded placement group is calculated as k+m. + erasure_profile = get_erasure_profile(self.service, + self.erasure_code_profile) + + # Check for errors + if erasure_profile is None: + msg = ("Failed to discover erasure profile named " + "{}".format(self.erasure_code_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + if 'k' not in erasure_profile or 'm' not in erasure_profile: + # Error + msg = ("Unable to find k (data chunks) or m (coding chunks) " + "in erasure profile {}".format(erasure_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + + k = int(erasure_profile['k']) + m = int(erasure_profile['m']) + pgs = self.get_pgs(k + m, self.percent_data) + # Create it + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), 'erasure', + self.erasure_code_profile] + try: + check_call(cmd) + except CalledProcessError: + raise + + """Get an existing erasure code profile if it already exists. + Returns json formatted output""" + + +def get_mon_map(service): + """ + Returns the current monitor map. + :param service: six.string_types. The Ceph user name to run the command + under + :return: json string. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + mon_status = check_output(['ceph', '--id', service, 'mon_status', + '--format=json']) + try: + return json.loads(mon_status) + except ValueError as v: + log("Unable to parse mon_status json: {}. Error: {}".format( + mon_status, v.message)) + raise + except CalledProcessError as e: + log("mon_status command failed with message: {}".format(e.message)) + raise + + +def hash_monitor_names(service): + """ + Uses the get_mon_map() function to get information about the monitor + cluster. + Hash the name of each monitor. Return a sorted list of monitor hashes + in an ascending order. + :param service: six.string_types. The Ceph user name to run the command + under + :rtype : dict. json dict of monitor name, ip address and rank + example: { + 'name': 'ip-172-31-13-165', + 'rank': 0, + 'addr': '172.31.13.165:6789/0'} + """ + try: + hash_list = [] + monitor_list = get_mon_map(service=service) + if monitor_list['monmap']['mons']: + for mon in monitor_list['monmap']['mons']: + hash_list.append(hashlib.sha224(mon['name'].encode( + 'utf-8')).hexdigest()) + return sorted(hash_list) + else: + return None + except (ValueError, CalledProcessError): + raise + + +def monitor_key_delete(service, key): + """ + Delete a key and value pair from the monitor cluster + :param service: six.string_types. The Ceph user name to run the command + under + :param key: six.string_types. The key to delete. + """ + try: + check_output(['ceph', '--id', service, 'config-key', 'del', str(key)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format(e.output)) + raise + + +def monitor_key_set(service, key, value): + """ + Sets a key value pair on the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command + under + :param key: six.string_types. The key to set. + :param value: The value to set. This will be converted to a string + before setting + """ + try: + check_output(['ceph', '--id', service, 'config-key', 'put', str(key), + str(value)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format(e.output)) + raise + + +def monitor_key_get(service, key): + """ + Gets the value of an existing key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command + under + :param key: six.string_types. The key to search for. + :return: Returns the value of that key or None if not found. + """ + try: + output = check_output(['ceph', '--id', service, 'config-key', 'get', + str(key)]) + return output + except CalledProcessError as e: + log("Monitor config-key get failed with message: {}".format(e.output)) + return None + + +def monitor_key_exists(service, key): + """ + Searches for the existence of a key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command + under + :param key: six.string_types. The key to search for + :return: Returns True if the key exists, False if not and raises an + exception if an unknown error occurs. :raise: CalledProcessError if + an unknown error occurs + """ + try: + check_call(['ceph', '--id', service, 'config-key', 'exists', str(key)]) + # I can return true here regardless because Ceph returns + # ENOENT if the key wasn't found + return True + except CalledProcessError as e: + if e.returncode == errno.ENOENT: + return False + else: + log("Unknown error from ceph config-get exists: {} {}".format( + e.returncode, e.output)) + raise + + +def get_erasure_profile(service, name): + """ + :param service: six.string_types. The Ceph user name to run the command + under + :param name: + :return: + """ + try: + out = check_output( + ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', + name, '--format=json']) + return json.loads(out) + except (CalledProcessError, OSError, ValueError): + return None + + +def pool_set(service, pool_name, key, value): + """ + Sets a value for a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command + under + :param pool_name: six.string_types + :param key: six.string_types + :param value: + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value + ] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def snapshot_pool(service, pool_name, snapshot_name): + """ + Snapshots a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command + under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, + snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_snapshot(service, pool_name, snapshot_name): + """ + Remove a snapshot from a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command + under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, + snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +# max_bytes should be an int or long +def set_pool_quota(service, pool_name, max_bytes): + """ + :param service: six.string_types. The Ceph user name to run the command + under + :param pool_name: six.string_types + :param max_bytes: int or long + :return: None. Can raise CalledProcessError + """ + # Set a byte quota on a RADOS pool in ceph. + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, + 'max_bytes', str(max_bytes)] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_quota(service, pool_name): + """ + Set a byte quota on a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command + under + :param pool_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, + 'max_bytes', '0'] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_erasure_profile(service, profile_name): + """ + Create a new erasure code profile if one does not already exist for it. + Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/ + rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command + under + :param profile_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', + profile_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def create_erasure_profile(service, + profile_name, + erasure_plugin_name='jerasure', + failure_domain='host', + data_chunks=2, + coding_chunks=1, + locality=None, + durability_estimator=None): + """ + Create a new erasure code profile if one does not already exist for it. + Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/ + rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command + under + :param profile_name: six.string_types + :param erasure_plugin_name: six.string_types + :param failure_domain: six.string_types. One of ['chassis', 'datacenter', + 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) + :param data_chunks: int + :param coding_chunks: int + :param locality: int + :param durability_estimator: int + :return: None. Can raise CalledProcessError + """ + # Ensure this failure_domain is allowed by Ceph + validator(failure_domain, six.string_types, + ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', + 'region', 'room', 'root', 'row']) + + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', + profile_name, 'plugin=' + erasure_plugin_name, + 'k=' + str(data_chunks), 'm=' + str(coding_chunks), + 'ruleset_failure_domain=' + failure_domain] + if locality is not None and durability_estimator is not None: + raise ValueError( + "create_erasure_profile should be called with k, m and one of l " + "or c but not both.") + + # Add plugin specific information + if locality is not None: + # For local erasure codes + cmd.append('l=' + str(locality)) + if durability_estimator is not None: + # For Shec erasure codes + cmd.append('c=' + str(durability_estimator)) + + if erasure_profile_exists(service, profile_name): + cmd.append('--force') + + try: + check_call(cmd) + except CalledProcessError: + raise + + +def rename_pool(service, old_name, new_name): + """ + Rename a Ceph pool from old_name to new_name + :param service: six.string_types. The Ceph user name to run the command + under + :param old_name: six.string_types + :param new_name: six.string_types + :return: None + """ + validator(value=old_name, valid_type=six.string_types) + validator(value=new_name, valid_type=six.string_types) + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name + ] + check_call(cmd) + + +def erasure_profile_exists(service, name): + """ + Check to see if an Erasure code profile already exists. + :param service: six.string_types. The Ceph user name to run the command + under + :param name: six.string_types + :return: int or None + """ + validator(value=name, valid_type=six.string_types) + try: + check_call(['ceph', '--id', service, 'osd', 'erasure-code-profile', + 'get', name]) + return True + except CalledProcessError: + return False + + +def get_cache_mode(service, pool_name): + """ + Find the current caching mode of the pool_name given. + :param service: six.string_types. The Ceph user name to run the command + under + :param pool_name: six.string_types + :return: int or None + """ + validator(value=service, valid_type=six.string_types) + validator(value=pool_name, valid_type=six.string_types) + out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json' + ]) + try: + osd_json = json.loads(out) + for pool in osd_json['pools']: + if pool['pool_name'] == pool_name: + return pool['cache_mode'] + return None + except ValueError: + raise + + +def pool_exists(service, name): + """Check to see if a RADOS pool already exists.""" + try: + out = check_output(['rados', '--id', service, 'lspools']).decode( + 'UTF-8') + except CalledProcessError: + return False + + return name in out.split() + + +def get_osds(service): + """Return a list of all Ceph Object Storage Daemons currently in the + cluster. + """ + version = ceph_version() + if version and version >= '0.56': + return json.loads(check_output(['ceph', '--id', service, 'osd', 'ls', + '--format=json']).decode('UTF-8')) + + return None + + +def install(): + """Basic Ceph client installation.""" + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + """Check to see if a RADOS block device exists.""" + try: + out = check_output(['rbd', 'list', '--id', service, '--pool', pool + ]).decode('UTF-8') + except CalledProcessError: + return False + + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] + check_call(cmd) + + +def update_pool(client, pool, settings): + cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] + for k, v in six.iteritems(settings): + cmd.append(k) + cmd.append(v) + + check_call(cmd) + + +def create_pool(service, name, replicas=3, pg_num=None): + """Create a new RADOS pool.""" + if pool_exists(service, name): + log("Ceph pool {} already exists, skipping creation".format(name), + level=WARNING) + return + + if not pg_num: + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pg_num = (len(osds) * 100 // replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pg_num = 200 + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] + check_call(cmd) + + update_pool(service, name, settings={'size': str(replicas)}) + + +def delete_pool(service, name): + """Delete a RADOS pool from ceph.""" + cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, + '--yes-i-really-really-mean-it'] + check_call(cmd) + + +def _keyfile_path(service): + return KEYFILE.format(service) + + +def _keyring_path(service): + return KEYRING.format(service) + + +def create_keyring(service, key): + """Create a new Ceph keyring containing key.""" + keyring = _keyring_path(service) + if os.path.exists(keyring): + log('Ceph keyring exists at %s.' % keyring, level=WARNING) + return + + cmd = ['ceph-authtool', keyring, '--create-keyring', + '--name=client.{}'.format(service), '--add-key={}'.format(key)] + check_call(cmd) + log('Created new ceph keyring at %s.' % keyring, level=DEBUG) + + +def delete_keyring(service): + """Delete an existing Ceph keyring.""" + keyring = _keyring_path(service) + if not os.path.exists(keyring): + log('Keyring does not exist at %s' % keyring, level=WARNING) + return + + os.remove(keyring) + log('Deleted ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + """Create a file containing key.""" + keyfile = _keyfile_path(service) + if os.path.exists(keyfile): + log('Keyfile exists at %s.' % keyfile, level=WARNING) + return + + with open(keyfile, 'w') as fd: + fd.write(key) + + log('Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(relation='ceph'): + """Query named relation to determine current nodes.""" + hosts = [] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + + return hosts + + +def configure(service, key, auth, use_syslog): + """Perform basic configuration of Ceph.""" + create_keyring(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF.format(auth=auth, + keyring=_keyring_path(service), + mon_hosts=",".join(map(str, hosts)), + use_syslog=use_syslog)) + modprobe('rbd') + + +def image_mapped(name): + """Determine whether a RADOS block device is mapped locally.""" + try: + out = check_output(['rbd', 'showmapped']).decode('UTF-8') + except CalledProcessError: + return False + + return name in out + + +def map_block_storage(service, pool, image): + """Map a RADOS block device for local use.""" + cmd = [ + 'rbd', + 'map', + '{}/{}'.format(pool, image), + '--user', + service, + '--secret', + _keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + """Determine whether a filesytems is already mounted.""" + return fs in [f for f, m in mounts()] + + +def make_filesystem(blk_device, fstype='ext4', timeout=10): + """Make a new filesystem on the specified block device.""" + count = 0 + e_noent = os.errno.ENOENT + while not os.path.exists(blk_device): + if count >= timeout: + log('Gave up waiting on block device %s' % blk_device, level=ERROR) + raise IOError(e_noent, os.strerror(e_noent), blk_device) + + log('Waiting for block device %s to appear' % blk_device, level=DEBUG) + count += 1 + time.sleep(1) + else: + log('Formatting block device %s as filesystem %s.' % + (blk_device, fstype), + level=INFO) + check_call(['mkfs', '-t', fstype, blk_device]) + + +def place_data_on_block_device(blk_device, data_src_dst): + """Migrate data in data_src_dst to blk_device and then remount.""" + # mount block device into /mnt + mount(blk_device, '/mnt') + # copy data to /mnt + copy_files(data_src_dst, '/mnt') + # umount block device + umount('/mnt') + # Grab user/group ID's from original source + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + # re-mount where the data should originally be + # TODO: persist is currently a NO-OP in core.host + mount(blk_device, data_src_dst, persist=True) + # ensure original ownership of new mount. + os.chown(data_src_dst, uid, gid) + + +def copy_files(src, dst, symlinks=False, ignore=None): + """Copy files from src to dst.""" + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, + pool, + rbd_img, + sizemb, + mount_point, + blk_device, + fstype, + system_services=[], + replicas=3): + """NOTE: This function must only be called from a single service unit for + the same rbd_img otherwise data loss will occur. + + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being re-mounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('Creating new pool {}.'.format(pool), level=INFO) + create_pool(service, pool, replicas=replicas) + + if not rbd_exists(service, pool, rbd_img): + log('Creating RBD image ({}).'.format(rbd_img), level=INFO) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), + level=INFO) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if service_running(svc): + log('Stopping services {} prior to migrating data.' + .format(svc), + level=DEBUG) + service_stop(svc) + + place_data_on_block_device(blk_device, mount_point) + + for svc in system_services: + log('Starting service {} after migrating data.'.format(svc), + level=DEBUG) + service_start(svc) + + +def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): + """Ensures a ceph keyring is created for a named service and optionally + ensures user and group ownership. + + Returns False if no ceph key is available in relation state. + """ + key = None + for rid in relation_ids(relation): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break + + if not key: + return False + + create_keyring(service=service, key=key) + keyring = _keyring_path(service) + if user and group: + check_call(['chown', '%s.%s' % (user, group), keyring]) + + return True + + +def ceph_version(): + """Retrieve the local version of ceph.""" + if os.path.exists('/usr/bin/ceph'): + cmd = ['ceph', '-v'] + output = check_output(cmd).decode('US-ASCII') + output = output.split() + if len(output) > 3: + return output[2] + else: + return None + else: + return None + + +class CephBrokerRq(object): + """Ceph broker request. + + Multiple operations can be added to a request and sent to the Ceph broker + to be executed. + + Request is json-encoded for sending over the wire. + + The API is versioned and defaults to version 1. + """ + + def __init__(self, api_version=1, request_id=None): + self.api_version = api_version + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) + self.ops = [] + + def add_op_create_pool( + self, name, replica_count=3, + pg_num=None, weight=None): + """Adds an operation to create a pool. + + @param pg_num setting: optional setting. If not provided, this value + will be calculated by the broker based on how many OSDs are in the + cluster at the time of creation. Note that, if provided, this value + will be capped at the current available maximum. + @param weight: the percentage of data the pool makes up + """ + if pg_num and weight: + raise ValueError('pg_num and weight are mutually exclusive') + + self.ops.append({'op': 'create-pool', + 'name': name, + 'replicas': replica_count, + 'pg_num': pg_num, + 'weight': weight}) + + def set_ops(self, ops): + """Set request ops to provided value. + + Useful for injecting ops that come from a previous request + to allow comparisons to ensure validity. + """ + self.ops = ops + + @property + def request(self): + return json.dumps({'api-version': self.api_version, + 'ops': self.ops, + 'request-id': self.request_id}) + + def _ops_equal(self, other): + if len(self.ops) == len(other.ops): + for req_no in range(0, len(self.ops)): + for key in ['replicas', 'name', 'op', 'pg_num', 'weight']: + if self.ops[req_no].get(key) != other.ops[req_no].get(key): + return False + else: + return False + return True + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + if self.api_version == other.api_version and \ + self._ops_equal(other): + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + + +class CephBrokerRsp(object): + """Ceph broker response. + + Response is json-decoded and contents provided as methods/properties. + + The API is versioned and defaults to version 1. + """ + + def __init__(self, encoded_rsp): + self.api_version = None + self.rsp = json.loads(encoded_rsp) + + @property + def request_id(self): + return self.rsp.get('request-id') + + @property + def exit_code(self): + return self.rsp.get('exit-code') + + @property + def exit_msg(self): + return self.rsp.get('stderr') + +# Ceph Broker Conversation: +# If a charm needs an action to be taken by ceph it can create a CephBrokerRq +# and send that request to ceph via the ceph relation. The CephBrokerRq has a +# unique id so that the client can identity which CephBrokerRsp is associated +# with the request. Ceph will also respond to each client unit individually +# creating a response key per client unit eg glance/0 will get a CephBrokerRsp +# via key broker-rsp-glance-0 +# +# To use this the charm can just do something like: +# +# from charmhelpers.contrib.storage.linux.ceph import ( +# send_request_if_needed, +# is_request_complete, +# CephBrokerRq, +# ) +# +# @hooks.hook('ceph-relation-changed') +# def ceph_changed(): +# rq = CephBrokerRq() +# rq.add_op_create_pool(name='poolname', replica_count=3) +# +# if is_request_complete(rq): +# +# else: +# send_request_if_needed(get_ceph_request()) +# +# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example +# of glance having sent a request to ceph which ceph has successfully processed +# 'ceph:8': { +# 'ceph/0': { +# 'auth': 'cephx', +# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', +# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', +# 'ceph-public-address': '10.5.44.103', +# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', +# 'private-address': '10.5.44.103', +# }, +# 'glance/0': { +# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' +# '"ops": [{"replicas": 3, "name": "glance", ' +# '"op": "create-pool"}]}'), +# 'private-address': '10.5.44.109', +# }, +# } + + +def get_previous_request(rid): + """Return the last ceph broker request sent on a given relation + + @param rid: Relation id to query for request + """ + request = None + broker_req = relation_get(attribute='broker_req', + rid=rid, + unit=local_unit()) + if broker_req: + request_data = json.loads(broker_req) + request = CephBrokerRq(api_version=request_data['api-version'], + request_id=request_data['request-id']) + request.set_ops(request_data['ops']) + + return request + + +def get_request_states(request, relation='ceph'): + """Return a dict of requests per relation id with their corresponding + completion state. + + This allows a charm, which has a request for ceph, to see whether there is + an equivalent request already being processed and if so what state that + request is in. + + @param request: A CephBrokerRq object + """ + complete = [] + requests = {} + for rid in relation_ids(relation): + complete = False + previous_request = get_previous_request(rid) + if request == previous_request: + sent = True + complete = is_request_complete_for_rid(previous_request, rid) + else: + sent = False + complete = False + + requests[rid] = {'sent': sent, 'complete': complete, } + + return requests + + +def is_request_sent(request, relation='ceph'): + """Check to see if a functionally equivalent request has already been sent + + Returns True if a similair request has been sent + + @param request: A CephBrokerRq object + """ + states = get_request_states(request, relation=relation) + for rid in states.keys(): + if not states[rid]['sent']: + return False + + return True + + +def is_request_complete(request, relation='ceph'): + """Check to see if a functionally equivalent request has already been + completed + + Returns True if a similair request has been completed + + @param request: A CephBrokerRq object + """ + states = get_request_states(request, relation=relation) + for rid in states.keys(): + if not states[rid]['complete']: + return False + + return True + + +def is_request_complete_for_rid(request, rid): + """Check if a given request has been completed on the given relation + + @param request: A CephBrokerRq object + @param rid: Relation ID + """ + broker_key = get_broker_rsp_key() + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if rdata.get(broker_key): + rsp = CephBrokerRsp(rdata.get(broker_key)) + if rsp.request_id == request.request_id: + if not rsp.exit_code: + return True + else: + # The remote unit sent no reply targeted at this unit so either the + # remote ceph cluster does not support unit targeted replies or it + # has not processed our request yet. + if rdata.get('broker_rsp'): + request_data = json.loads(rdata['broker_rsp']) + if request_data.get('request-id'): + log('Ignoring legacy broker_rsp without unit key as remote' + ' service supports unit specific replies', + level=DEBUG) + else: + log('Using legacy broker_rsp as remote service does not ' + 'supports unit specific replies', + level=DEBUG) + rsp = CephBrokerRsp(rdata['broker_rsp']) + if not rsp.exit_code: + return True + + return False + + +def get_broker_rsp_key(): + """Return broker response key for this unit + + This is the key that ceph is going to use to pass request status + information back to this unit + """ + return 'broker-rsp-' + local_unit().replace('/', '-') + + +def send_request_if_needed(request, relation='ceph'): + """Send broker request if an equivalent request has not already been sent + + @param request: A CephBrokerRq object + """ + if is_request_sent(request, relation=relation): + log('Request already sent but not complete, not sending new request', + level=DEBUG) + else: + for rid in relation_ids(relation): + log('Sending request {}'.format(request.request_id), level=DEBUG) + relation_set(relation_id=rid, broker_req=request.request) + + +class CephConfContext(object): + """Ceph config (ceph.conf) context. + + Supports user-provided Ceph configuration settings. Use can provide a + dictionary as the value for the config-flags charm option containing + Ceph configuration settings keyede by their section in ceph.conf. + """ + + def __init__(self, permitted_sections=None): + self.permitted_sections = permitted_sections or [] + + def __call__(self): + conf = config('config-flags') + if not conf: + return {} + + conf = config_flags_parser(conf) + if type(conf) != dict: + log("Provided config-flags is not a dictionary - ignoring", + level=WARNING) + return {} + + permitted = self.permitted_sections + if permitted: + diff = set(conf.keys()).difference(set(permitted)) + if diff: + log("Config-flags contains invalid keys '%s' - they will be " + "ignored" % (', '.join(diff)), + level=WARNING) + + ceph_conf = {} + for key in conf: + if permitted and key not in permitted: + log("Ignoring key '%s'" % key, level=WARNING) + continue + + ceph_conf[key] = conf[key] + + return ceph_conf diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 54b50348..42810738 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -12,7 +12,6 @@ tags: series: - xenial - trusty - - precise - yakkety peers: mon: diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 94c7baba..47e24767 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # @@ -228,7 +228,7 @@ def test_102_services(self): 'cinder-volume'], } - if self._get_openstack_release() < self.vivid_kilo: + if self._get_openstack_release() < self.xenial_mitaka: # For upstart systems only. Ceph services under systemd # are checked by process name instead. ceph_services = [ diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index a39ed4c8..8e13ab14 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -546,7 +546,7 @@ def get_process_id_list(self, sentry_unit, process_name, raise if it is present. :returns: List of process IDs """ - cmd = 'pidof -x {}'.format(process_name) + cmd = 'pidof -x "{}"'.format(process_name) if not expect_success: cmd += " || exit 0 && exit 1" output, code = sentry_unit.run(cmd) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 9e0b07fb..5c1ce457 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -69,9 +69,9 @@ def _determine_branch_locations(self, other_services): # Charms outside the ~openstack-charmers base_charms = { - 'mysql': ['precise', 'trusty'], - 'mongodb': ['precise', 'trusty'], - 'nrpe': ['precise', 'trusty', 'wily', 'xenial'], + 'mysql': ['trusty'], + 'mongodb': ['trusty'], + 'nrpe': ['trusty', 'xenial'], } for svc in other_services: @@ -260,31 +260,20 @@ def _get_openstack_release(self): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.precise_essex, self.precise_folsom, self.precise_grizzly, - self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.utopic_juno, - self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, - self.wily_liberty, self.trusty_mitaka, - self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton) = range(16) + (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, + self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, + self.yakkety_newton, self.xenial_ocata, self.zesty_ocata) = range(9) releases = { - ('precise', None): self.precise_essex, - ('precise', 'cloud:precise-folsom'): self.precise_folsom, - ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, - ('precise', 'cloud:precise-havana'): self.precise_havana, - ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo, - ('wily', None): self.wily_liberty, ('xenial', None): self.xenial_mitaka, ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, ('yakkety', None): self.yakkety_newton, + ('zesty', None): self.zesty_ocata, } return releases[(self.series, self.openstack)] @@ -294,16 +283,10 @@ def _get_openstack_release_string(self): Return a string representing the openstack release. """ releases = OrderedDict([ - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), ('xenial', 'mitaka'), ('yakkety', 'newton'), + ('zesty', 'ocata'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index e4546c8c..6a0ba837 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -28,6 +28,7 @@ from keystoneclient.auth.identity import v3 as keystone_id_v3 from keystoneclient import session as keystone_session from keystoneclient.v3 import client as keystone_client_v3 +from novaclient import exceptions import novaclient.client as nova_client import pika @@ -377,6 +378,16 @@ def authenticate_swift_user(self, keystone, user, password, tenant): tenant_name=tenant, auth_version='2.0') + def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", + ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): + """Create the specified flavor.""" + try: + nova.flavors.find(name=name) + except (exceptions.NotFound, exceptions.NoUniqueMatch): + self.log.debug('Creating flavor ({})'.format(name)) + nova.flavors.create(name, ram, vcpus, disk, flavorid, + ephemeral, swap, rxtx_factor, is_public) + def create_cirros_image(self, glance, image_name): """Download the latest cirros image and upload it to glance, validate and return a resource pointer. diff --git a/ceph-mon/tests/gate-basic-trusty-icehouse b/ceph-mon/tests/gate-basic-trusty-icehouse index a8639fe4..8a987930 100755 --- a/ceph-mon/tests/gate-basic-trusty-icehouse +++ b/ceph-mon/tests/gate-basic-trusty-icehouse @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/tests/gate-basic-trusty-kilo b/ceph-mon/tests/gate-basic-trusty-kilo index c3315591..86e772a7 100755 --- a/ceph-mon/tests/gate-basic-trusty-kilo +++ b/ceph-mon/tests/gate-basic-trusty-kilo @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/tests/gate-basic-trusty-liberty b/ceph-mon/tests/gate-basic-trusty-liberty index d6542657..3dfa8b60 100755 --- a/ceph-mon/tests/gate-basic-trusty-liberty +++ b/ceph-mon/tests/gate-basic-trusty-liberty @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/tests/gate-basic-trusty-mitaka b/ceph-mon/tests/gate-basic-trusty-mitaka index e18197be..52b688fa 100755 --- a/ceph-mon/tests/gate-basic-trusty-mitaka +++ b/ceph-mon/tests/gate-basic-trusty-mitaka @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/tests/gate-basic-xenial-mitaka b/ceph-mon/tests/gate-basic-xenial-mitaka index 8d93f7b5..f8977000 100755 --- a/ceph-mon/tests/gate-basic-xenial-mitaka +++ b/ceph-mon/tests/gate-basic-xenial-mitaka @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/tests/gate-basic-xenial-newton b/ceph-mon/tests/gate-basic-xenial-newton index 40fc35e4..69bf0a5c 100755 --- a/ceph-mon/tests/gate-basic-xenial-newton +++ b/ceph-mon/tests/gate-basic-xenial-newton @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/tests/gate-basic-xenial-ocata b/ceph-mon/tests/gate-basic-xenial-ocata new file mode 100644 index 00000000..ec2713ce --- /dev/null +++ b/ceph-mon/tests/gate-basic-xenial-ocata @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on xenial-ocata.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='xenial', + openstack='cloud:xenial-ocata', + source='cloud:xenial-updates/ocata') + deployment.run_tests() diff --git a/ceph-mon/tests/gate-basic-yakkety-newton b/ceph-mon/tests/gate-basic-yakkety-newton old mode 100644 new mode 100755 index f2939866..9ae44008 --- a/ceph-mon/tests/gate-basic-yakkety-newton +++ b/ceph-mon/tests/gate-basic-yakkety-newton @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/tests/gate-basic-zesty-ocata b/ceph-mon/tests/gate-basic-zesty-ocata new file mode 100644 index 00000000..a6421933 --- /dev/null +++ b/ceph-mon/tests/gate-basic-zesty-ocata @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on zesty-ocata.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='zesty') + deployment.run_tests() From 5299d18b5727bb34e3b6396c873836ae2f85cc12 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 23 Nov 2016 15:17:55 -0600 Subject: [PATCH 1249/2699] Update Amulet defs, series metadata and c-h sync - Sync charm helpers if applicable. - Fix test executable hashbags for virtualenv prep. - Add Yakkety-Newton Amulet test definitions. - Prep Xenial-Ocata Amulet test definitions (not yet enabled). - Prep Zesty-Ocata Amulet test definitions (not yet enabled). - Remove Precise charm series metadata if present. - Remove Precise Amulet test definitions if present. Change-Id: I367b6e9e22464c0b85e71ad330234477699835e0 --- .../charmhelpers/contrib/openstack/context.py | 6 +- .../charmhelpers/contrib/openstack/neutron.py | 2 + .../charmhelpers/contrib/openstack/utils.py | 22 ++- ceph-osd/hooks/charmhelpers/core/hookenv.py | 2 + ceph-osd/hooks/charmhelpers/core/host.py | 17 ++ .../core/kernel_factory/ubuntu.py | 2 +- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 8 + ceph-osd/lib/ceph/ceph_helpers.py | 162 +++++++++++++++++- ceph-osd/metadata.yaml | 1 - ceph-osd/tests/basic_deployment.py | 2 +- .../charmhelpers/contrib/amulet/utils.py | 2 +- .../contrib/openstack/amulet/deployment.py | 35 +--- .../contrib/openstack/amulet/utils.py | 11 ++ ceph-osd/tests/gate-basic-xenial-ocata | 25 +++ ceph-osd/tests/gate-basic-yakkety-newton | 0 ceph-osd/tests/gate-basic-zesty-ocata | 23 +++ 16 files changed, 285 insertions(+), 35 deletions(-) create mode 100644 ceph-osd/tests/gate-basic-xenial-ocata mode change 100644 => 100755 ceph-osd/tests/gate-basic-yakkety-newton create mode 100644 ceph-osd/tests/gate-basic-zesty-ocata diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index b601a226..d5b3a33b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -344,6 +344,10 @@ def __call__(self): 'auth_protocol': auth_protocol, 'api_version': api_version}) + if float(api_version) > 2: + ctxt.update({'admin_domain_name': + rdata.get('service_domain')}) + if self.context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs @@ -644,7 +648,7 @@ class ApacheSSLContext(OSContextGenerator): service_namespace = None def enable_modules(self): - cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers'] check_call(cmd) def configure_cert(self, cn=None): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py index 08c86fa7..a8f1ed72 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py @@ -32,6 +32,7 @@ def headers_package(): kver = check_output(['uname', '-r']).decode('UTF-8').strip() return 'linux-headers-%s' % kver + QUANTUM_CONF_DIR = '/etc/quantum' @@ -91,6 +92,7 @@ def quantum_plugins(): } } + NEUTRON_CONF_DIR = '/etc/neutron' diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 8c89c3a3..6d544e75 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -109,7 +109,7 @@ ('wily', 'liberty'), ('xenial', 'mitaka'), ('yakkety', 'newton'), - ('zebra', 'ocata'), # TODO: upload with real Z name + ('zesty', 'ocata'), ]) @@ -152,6 +152,8 @@ ['2.5.0', '2.6.0', '2.7.0']), ('newton', ['2.8.0', '2.9.0', '2.10.0']), + ('ocata', + ['2.11.0']), ]) # >= Liberty version->codename mapping @@ -410,14 +412,26 @@ def get_os_version_package(pkg, fatal=True): os_rel = None -def os_release(package, base='essex'): +def reset_os_release(): + '''Unset the cached os_release version''' + global os_rel + os_rel = None + + +def os_release(package, base='essex', reset_cache=False): ''' Returns OpenStack release codename from a cached global. + + If reset_cache then unset the cached os_release version and return the + freshly determined version. + If the codename can not be determined from either an installed package or the installation source, the earliest release supported by the charm should be returned. ''' global os_rel + if reset_cache: + reset_os_release() if os_rel: return os_rel os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or @@ -535,6 +549,9 @@ def configure_installation_source(rel): 'newton': 'xenial-updates/newton', 'newton/updates': 'xenial-updates/newton', 'newton/proposed': 'xenial-proposed/newton', + 'zesty': 'zesty-updates/ocata', + 'zesty/updates': 'xenial-updates/ocata', + 'zesty/proposed': 'xenial-proposed/ocata', } try: @@ -668,6 +685,7 @@ def clean_storage(block_device): else: zap_disk(block_device) + is_ip = ip.is_ip ns_query = ip.ns_query get_host_ip = ip.get_host_ip diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 996e81cc..94fc996c 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -332,6 +332,8 @@ def config(scope=None): config_cmd_line = ['config-get'] if scope is not None: config_cmd_line.append(scope) + else: + config_cmd_line.append('--all') config_cmd_line.append('--format=json') try: config_data = json.loads( diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 0f1b2f35..04cadb3a 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -732,3 +732,20 @@ def get_total_ram(): assert unit == 'kB', 'Unknown unit' return int(value) * 1024 # Classic, not KiB. raise NotImplementedError() + + +UPSTART_CONTAINER_TYPE = '/run/container_type' + + +def is_container(): + """Determine whether unit is running in a container + + @return: boolean indicating if unit is in a container + """ + if init_is_systemd(): + # Detect using systemd-detect-virt + return subprocess.call(['systemd-detect-virt', + '--container']) == 0 + else: + # Detect using upstart container file marker + return os.path.exists(UPSTART_CONTAINER_TYPE) diff --git a/ceph-osd/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/kernel_factory/ubuntu.py index 21559642..3de372fd 100644 --- a/ceph-osd/hooks/charmhelpers/core/kernel_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/kernel_factory/ubuntu.py @@ -5,7 +5,7 @@ def persistent_modprobe(module): """Load a kernel module and configure for auto-load on reboot.""" with open('/etc/modules', 'r+') as modules: if module not in modules.read(): - modules.write(module) + modules.write(module + "\n") def update_initramfs(version='all'): diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index fce496b2..39b9b801 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -105,6 +105,14 @@ 'newton/proposed': 'xenial-proposed/newton', 'xenial-newton/proposed': 'xenial-proposed/newton', 'xenial-proposed/newton': 'xenial-proposed/newton', + # Ocata + 'ocata': 'xenial-updates/ocata', + 'xenial-ocata': 'xenial-updates/ocata', + 'xenial-ocata/updates': 'xenial-updates/ocata', + 'xenial-updates/ocata': 'xenial-updates/ocata', + 'ocata/proposed': 'xenial-proposed/ocata', + 'xenial-ocata/proposed': 'xenial-proposed/ocata', + 'xenial-ocata/newton': 'xenial-proposed/ocata', } APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. diff --git a/ceph-osd/lib/ceph/ceph_helpers.py b/ceph-osd/lib/ceph/ceph_helpers.py index 124dbf87..60cc73fa 100644 --- a/ceph-osd/lib/ceph/ceph_helpers.py +++ b/ceph-osd/lib/ceph/ceph_helpers.py @@ -25,6 +25,7 @@ import errno import hashlib import math +from charmhelpers.contrib.network.ip import format_ipv6_addr import six import os @@ -32,7 +33,9 @@ import json import time import uuid +import re +import subprocess from subprocess import (check_call, check_output, CalledProcessError, ) from charmhelpers.core.hookenv import (config, local_unit, @@ -54,7 +57,8 @@ from charmhelpers.fetch import (apt_install, ) from charmhelpers.core.kernel import modprobe -from charmhelpers.contrib.openstack.utils import config_flags_parser +from charmhelpers.contrib.openstack.utils import config_flags_parser, \ + get_host_ip KEYRING = '/etc/ceph/ceph.client.{}.keyring' KEYFILE = '/etc/ceph/ceph.client.{}.key' @@ -68,6 +72,34 @@ clog to syslog = {use_syslog} """ +CRUSH_BUCKET = """root {name} {{ + id {id} # do not change unnecessarily + # weight 0.000 + alg straw + hash 0 # rjenkins1 +}} + +rule {name} {{ + ruleset 0 + type replicated + min_size 1 + max_size 10 + step take {name} + step chooseleaf firstn 0 type host + step emit +}}""" + +# This regular expression looks for a string like: +# root NAME { +# id NUMBER +# so that we can extract NAME and ID from the crushmap +CRUSHMAP_BUCKETS_RE = re.compile(r"root\s+(.+)\s+\{\s*id\s+(-?\d+)") + +# This regular expression looks for ID strings in the crushmap like: +# id NUMBER +# so that we can extract the IDs from a crushmap +CRUSHMAP_ID_RE = re.compile(r"id\s+(-?\d+)") + # The number of placement groups per OSD to target for placement group # calculations. This number is chosen as 100 due to the ceph PG Calc # documentation recommending to choose 100 for clusters which are not @@ -127,6 +159,107 @@ def __init__(self, message): super(PoolCreationError, self).__init__(message) +class Crushmap(object): + """An object oriented approach to Ceph crushmap management.""" + + def __init__(self): + """Iiitialize the Crushmap from Ceph""" + self._crushmap = self.load_crushmap() + roots = re.findall(CRUSHMAP_BUCKETS_RE, self._crushmap) + buckets = [] + ids = list(map( + lambda x: int(x), + re.findall(CRUSHMAP_ID_RE, self._crushmap))) + ids.sort() + if roots != []: + for root in roots: + buckets.append(Crushmap.Bucket(root[0], root[1], True)) + + self._buckets = buckets + if ids != []: + self._ids = ids + else: + self._ids = [0] + + def load_crushmap(self): + try: + crush = subprocess.Popen( + ('ceph', 'osd', 'getcrushmap'), + stdout=subprocess.PIPE) + return subprocess.check_output( + ('crushtool', '-d', '-'), + stdin=crush.stdout).decode('utf-8') + except Exception as e: + log("load_crushmap error: {}".format(e)) + raise "Failed to read Crushmap" + + def buckets(self): + """Return a list of buckets that are in the Crushmap.""" + return self._buckets + + def add_bucket(self, bucket_name): + """Add a named bucket to Ceph""" + new_id = min(self._ids) - 1 + self._ids.append(new_id) + self._buckets.append(Crushmap.Bucket(bucket_name, new_id)) + + def save(self): + """Persist Crushmap to Ceph""" + try: + crushmap = self.build_crushmap() + compiled = subprocess.Popen( + ('crushtool', '-c', '/dev/stdin', '-o', '/dev/stdout'), + stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + output = compiled.communicate(crushmap)[0] + ceph = subprocess.Popen( + ('ceph', 'osd', 'setcrushmap', '-i', '/dev/stdin'), + stdin=subprocess.PIPE) + ceph_output = ceph.communicate(input=output) + return ceph_output + except Exception as e: + log("save error: {}".format(e)) + raise "Failed to save crushmap" + + def build_crushmap(self): + """Modifies the curent crushmap to include the new buckets""" + tmp_crushmap = self._crushmap + for bucket in self._buckets: + if not bucket.default: + tmp_crushmap = "{}\n\n{}".format( + tmp_crushmap, + Crushmap.bucket_string(bucket.name, bucket.id)) + return tmp_crushmap + + @staticmethod + def bucket_string(name, id): + return CRUSH_BUCKET.format(name=name, id=id) + + class Bucket(object): + """An object that describes a Crush bucket.""" + + def __init__(self, name, id, default=False): + self.name = name + self.id = int(id) + self.default = default + + def __repr__(self): + return "Bucket {{Name: {name}, ID: {id}}}".format( + name=self.name, id=self.id) + + def __eq__(self, other): + """Override the default Equals behavior""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return NotImplemented + + def __ne__(self, other): + """Define a non-equality test""" + if isinstance(other, self.__class__): + return not self.__eq__(other) + return NotImplemented + + class Pool(object): """ An object oriented approach to Ceph pool creation. This base class is @@ -280,7 +413,7 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): # highest value is used. To do this, find the nearest power of 2 such # that 2^n <= num_pg, check to see if its within the 25% tolerance. exponent = math.floor(math.log(num_pg, 2)) - nearest = 2**exponent + nearest = 2 ** exponent if (num_pg - nearest) > (num_pg * 0.25): # Choose the next highest power of 2 since the nearest is more # than 25% below the original value. @@ -1046,6 +1179,30 @@ def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): return True +def get_mon_hosts(): + """ + Helper function to gather up the ceph monitor host public addresses + :return: list. Returns a list of ip_address:port + """ + hosts = [] + for relid in relation_ids('mon'): + for unit in related_units(relid): + addr = \ + relation_get('ceph-public-address', + unit, + relid) or get_host_ip( + relation_get( + 'private-address', + unit, + relid)) + + if addr: + hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) + + hosts.sort() + return hosts + + def ceph_version(): """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): @@ -1160,6 +1317,7 @@ def exit_code(self): def exit_msg(self): return self.rsp.get('stderr') + # Ceph Broker Conversation: # If a charm needs an action to be taken by ceph it can create a CephBrokerRq # and send that request to ceph via the ceph relation. The CephBrokerRq has a diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 5150c171..62711158 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -13,7 +13,6 @@ tags: series: - xenial - trusty - - precise - yakkety description: | Ceph is a distributed storage and network file system designed to provide diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 15d7e016..32e38c8b 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -225,7 +225,7 @@ def test_102_services(self): 'cinder-volume'], } - if self._get_openstack_release() < self.vivid_kilo: + if self._get_openstack_release() < self.xenial_mitaka: # For upstart systems only. Ceph services under systemd # are checked by process name instead. ceph_services = [ diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index a39ed4c8..8e13ab14 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -546,7 +546,7 @@ def get_process_id_list(self, sentry_unit, process_name, raise if it is present. :returns: List of process IDs """ - cmd = 'pidof -x {}'.format(process_name) + cmd = 'pidof -x "{}"'.format(process_name) if not expect_success: cmd += " || exit 0 && exit 1" output, code = sentry_unit.run(cmd) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 9e0b07fb..5c1ce457 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -69,9 +69,9 @@ def _determine_branch_locations(self, other_services): # Charms outside the ~openstack-charmers base_charms = { - 'mysql': ['precise', 'trusty'], - 'mongodb': ['precise', 'trusty'], - 'nrpe': ['precise', 'trusty', 'wily', 'xenial'], + 'mysql': ['trusty'], + 'mongodb': ['trusty'], + 'nrpe': ['trusty', 'xenial'], } for svc in other_services: @@ -260,31 +260,20 @@ def _get_openstack_release(self): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.precise_essex, self.precise_folsom, self.precise_grizzly, - self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.utopic_juno, - self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, - self.wily_liberty, self.trusty_mitaka, - self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton) = range(16) + (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, + self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, + self.yakkety_newton, self.xenial_ocata, self.zesty_ocata) = range(9) releases = { - ('precise', None): self.precise_essex, - ('precise', 'cloud:precise-folsom'): self.precise_folsom, - ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, - ('precise', 'cloud:precise-havana'): self.precise_havana, - ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo, - ('wily', None): self.wily_liberty, ('xenial', None): self.xenial_mitaka, ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, ('yakkety', None): self.yakkety_newton, + ('zesty', None): self.zesty_ocata, } return releases[(self.series, self.openstack)] @@ -294,16 +283,10 @@ def _get_openstack_release_string(self): Return a string representing the openstack release. """ releases = OrderedDict([ - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), ('xenial', 'mitaka'), ('yakkety', 'newton'), + ('zesty', 'ocata'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index e4546c8c..6a0ba837 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -28,6 +28,7 @@ from keystoneclient.auth.identity import v3 as keystone_id_v3 from keystoneclient import session as keystone_session from keystoneclient.v3 import client as keystone_client_v3 +from novaclient import exceptions import novaclient.client as nova_client import pika @@ -377,6 +378,16 @@ def authenticate_swift_user(self, keystone, user, password, tenant): tenant_name=tenant, auth_version='2.0') + def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", + ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): + """Create the specified flavor.""" + try: + nova.flavors.find(name=name) + except (exceptions.NotFound, exceptions.NoUniqueMatch): + self.log.debug('Creating flavor ({})'.format(name)) + nova.flavors.create(name, ram, vcpus, disk, flavorid, + ephemeral, swap, rxtx_factor, is_public) + def create_cirros_image(self, glance, image_name): """Download the latest cirros image and upload it to glance, validate and return a resource pointer. diff --git a/ceph-osd/tests/gate-basic-xenial-ocata b/ceph-osd/tests/gate-basic-xenial-ocata new file mode 100644 index 00000000..2908c9b1 --- /dev/null +++ b/ceph-osd/tests/gate-basic-xenial-ocata @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph-osd deployment on xenial-ocata.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='xenial', + openstack='cloud:xenial-ocata', + source='cloud:xenial-updates/ocata') + deployment.run_tests() diff --git a/ceph-osd/tests/gate-basic-yakkety-newton b/ceph-osd/tests/gate-basic-yakkety-newton old mode 100644 new mode 100755 diff --git a/ceph-osd/tests/gate-basic-zesty-ocata b/ceph-osd/tests/gate-basic-zesty-ocata new file mode 100644 index 00000000..d2d61e1b --- /dev/null +++ b/ceph-osd/tests/gate-basic-zesty-ocata @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph-osd deployment on zesty-ocata.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='zesty') + deployment.run_tests() From 7a62504c2c5f58206e46a67232ae453162ba8025 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 7 Dec 2016 07:48:11 -0500 Subject: [PATCH 1250/2699] Add support for ceph-osd broker requests This is to support ceph-osd requesting movement of OSD devices into various buckets Change-Id: Ief548201e43860c88591c2ac814984a421c023e9 --- ceph-mon/hooks/ceph_hooks.py | 14 +++++++++++ ceph-mon/hooks/osd-relation-changed | 1 + ceph-mon/lib/ceph/__init__.py | 31 +++++++++++++++++++++-- ceph-mon/lib/ceph/ceph_broker.py | 38 ++++++++++++++++++++++++++++- ceph-mon/lib/ceph/ceph_helpers.py | 38 ++++++++++++++++++++++++++--- 5 files changed, 115 insertions(+), 7 deletions(-) create mode 120000 ceph-mon/hooks/osd-relation-changed diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 38b822c8..08c89d8d 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -362,6 +362,7 @@ def upgrade_keys(): @hooks.hook('osd-relation-joined') +@hooks.hook('osd-relation-changed') def osd_relation(relid=None): if ceph.is_quorum(): log('mon cluster in quorum - providing fsid & keys') @@ -374,6 +375,19 @@ def osd_relation(relid=None): 'osd_upgrade_key': ceph.get_named_key('osd-upgrade', caps=ceph.osd_upgrade_caps), } + + unit = remote_unit() + settings = relation_get(rid=relid, unit=unit) + """Process broker request(s).""" + if 'broker_req' in settings: + if ceph.is_leader(): + rsp = process_requests(settings['broker_req']) + unit_id = unit.replace('/', '-') + unit_response_key = 'broker-rsp-' + unit_id + data[unit_response_key] = rsp + else: + log("Not leader - ignoring broker request", level=DEBUG) + relation_set(relation_id=relid, relation_settings=data) # NOTE: radosgw key provision is gated on presence of OSD diff --git a/ceph-mon/hooks/osd-relation-changed b/ceph-mon/hooks/osd-relation-changed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/osd-relation-changed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index db3772b5..7f80b2c5 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -455,6 +455,33 @@ def __le__(self, other): return self.name < other.name +def get_osd_weight(osd_id): + """ + Returns the weight of the specified OSD + :return: Float :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + tree = subprocess.check_output( + ['ceph', 'osd', 'tree', '--format=json']) + try: + json_tree = json.loads(tree) + # Make sure children are present in the json + if not json_tree['nodes']: + return None + for device in json_tree['nodes']: + if device['type'] == 'osd' and device['name'] == osd_id: + return device['crush_weight'] + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + def get_osd_tree(service): """ Returns the current osd map in JSON. @@ -1216,12 +1243,12 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, try: log("osdize cmd: {}".format(cmd)) subprocess.check_call(cmd) - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: if ignore_errors: log('Unable to initialize device: {}'.format(dev), WARNING) else: log('Unable to initialize device: {}'.format(dev), ERROR) - raise e + raise def osdize_dir(path, encrypt=False): diff --git a/ceph-mon/lib/ceph/ceph_broker.py b/ceph-mon/lib/ceph/ceph_broker.py index 0892961e..33d0df8d 100644 --- a/ceph-mon/lib/ceph/ceph_broker.py +++ b/ceph-mon/lib/ceph/ceph_broker.py @@ -24,7 +24,11 @@ INFO, ERROR, ) -from ceph import get_cephfs +from ceph import ( + get_cephfs, + get_osd_weight +) +from ceph_helpers import Crushmap from charmhelpers.contrib.storage.linux.ceph import ( create_erasure_profile, delete_pool, @@ -360,6 +364,36 @@ def handle_rgw_zone_set(request, service): os.unlink(infile.name) +def handle_put_osd_in_bucket(request, service): + osd_id = request.get('osd') + target_bucket = request.get('bucket') + if not osd_id or not target_bucket: + msg = "Missing OSD ID or Bucket" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + crushmap = Crushmap() + try: + crushmap.ensure_bucket_is_present(target_bucket) + check_output( + [ + 'ceph', + '--id', service, + 'osd', + 'crush', + 'set', + str(osd_id), + str(get_osd_weight(osd_id)), + "root={}".format(target_bucket) + ] + ) + + except Exception as exc: + msg = "Failed to move OSD " \ + "{} into Bucket {} :: {}".format(osd_id, target_bucket, exc) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + def handle_rgw_create_user(request, service): user_id = request.get('rgw-uid') display_name = request.get('display-name') @@ -534,6 +568,8 @@ def process_requests_v1(reqs): ret = handle_rgw_regionmap_default(request=req, service=svc) elif op == "rgw-create-user": ret = handle_rgw_create_user(request=req, service=svc) + elif op == "move-osd-to-bucket": + ret = handle_put_osd_in_bucket(request=req, service=svc) else: msg = "Unknown operation '%s'" % op log(msg, level=ERROR) diff --git a/ceph-mon/lib/ceph/ceph_helpers.py b/ceph-mon/lib/ceph/ceph_helpers.py index 31b19569..8e5c807f 100644 --- a/ceph-mon/lib/ceph/ceph_helpers.py +++ b/ceph-mon/lib/ceph/ceph_helpers.py @@ -25,6 +25,7 @@ import errno import hashlib import math +from charmhelpers.contrib.network.ip import format_ipv6_addr import six import os @@ -56,7 +57,8 @@ from charmhelpers.fetch import (apt_install, ) from charmhelpers.core.kernel import modprobe -from charmhelpers.contrib.openstack.utils import config_flags_parser +from charmhelpers.contrib.openstack.utils import config_flags_parser, \ + get_host_ip KEYRING = '/etc/ceph/ceph.client.{}.keyring' KEYFILE = '/etc/ceph/ceph.client.{}.key' @@ -191,6 +193,11 @@ def load_crushmap(self): log("load_crushmap error: {}".format(e)) raise "Failed to read Crushmap" + def ensure_bucket_is_present(self, bucket_name): + if bucket_name not in [bucket.name for bucket in self.buckets()]: + self.add_bucket(bucket_name) + self.save() + def buckets(self): """Return a list of buckets that are in the Crushmap.""" return self._buckets @@ -411,7 +418,7 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): # highest value is used. To do this, find the nearest power of 2 such # that 2^n <= num_pg, check to see if its within the 25% tolerance. exponent = math.floor(math.log(num_pg, 2)) - nearest = 2**exponent + nearest = 2 ** exponent if (num_pg - nearest) > (num_pg * 0.25): # Choose the next highest power of 2 since the nearest is more # than 25% below the original value. @@ -421,7 +428,6 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): class ReplicatedPool(Pool): - def __init__(self, service, name, @@ -455,7 +461,6 @@ def create(self): # Default jerasure erasure coded pool class ErasurePool(Pool): - def __init__(self, service, name, @@ -1179,6 +1184,30 @@ def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): return True +def get_mon_hosts(): + """ + Helper function to gather up the ceph monitor host public addresses + :return: list. Returns a list of ip_address:port + """ + hosts = [] + for relid in relation_ids('mon'): + for unit in related_units(relid): + addr = \ + relation_get('ceph-public-address', + unit, + relid) or get_host_ip( + relation_get( + 'private-address', + unit, + relid)) + + if addr: + hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) + + hosts.sort() + return hosts + + def ceph_version(): """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): @@ -1293,6 +1322,7 @@ def exit_code(self): def exit_msg(self): return self.rsp.get('stderr') + # Ceph Broker Conversation: # If a charm needs an action to be taken by ceph it can create a CephBrokerRq # and send that request to ceph via the ceph relation. The CephBrokerRq has a From a6b36e8d972fe6308e98ed5e8f1ee711ad81868f Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 9 Dec 2016 15:00:52 -0500 Subject: [PATCH 1251/2699] Add availability_zone to the OSD configuration Addition of configurable availability_zone allows the administrator to deploy Ceph with two dimensions of crush locations, one from config and one from Juju's availability zone Change-Id: Ic4410a94171b1d77f2a7c2bc56ed4c0dabb2b2d8 --- ceph-osd/config.yaml | 5 +++++ ceph-osd/hooks/ceph_hooks.py | 20 ++++++++++++++------ 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 0c47ca34..c4ebda54 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -179,6 +179,11 @@ options: description: | Setting this to true will tell Ceph to replicate across Juju's Availability Zone instead of specifically by host. + availability_zone: + type: string + default: + description: | + Custom availablility zone to provide to Ceph for the OSD placement max-sectors-kb: default: 1048576 type: int diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 04db92b4..0b1b1718 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -190,9 +190,16 @@ def install(): def az_info(): - az_info = os.environ.get('JUJU_AVAILABILITY_ZONE') - log("AZ Info: " + az_info) - return az_info + az_info = "" + juju_az_info = os.environ.get('JUJU_AVAILABILITY_ZONE') + if juju_az_info: + az_info = "{} juju_availability_zone={}".format(az_info, juju_az_info) + config_az = config("availability_zone") + if config_az: + az_info = "{} config_availability_zone={}".format(az_info, config_az) + if az_info != "": + log("AZ Info: " + az_info) + return az_info def use_short_objects(): @@ -248,9 +255,10 @@ def get_ceph_context(): cephcontext['cluster_addr'] = get_cluster_addr() if config('customize-failure-domain'): - if az_info(): - cephcontext['crush_location'] = "root=default rack={} host={}" \ - .format(az_info(), socket.gethostname()) + az = az_info() + if az: + cephcontext['crush_location'] = "root=default {} host={}" \ + .format(az, socket.gethostname()) else: log( "Your Juju environment doesn't" From 55ab5dae71632d8fb911dfa3dcaeb63a902d75fe Mon Sep 17 00:00:00 2001 From: Sandor Zeestraten Date: Sat, 10 Dec 2016 07:46:35 +0100 Subject: [PATCH 1252/2699] Update code block indentation in readme Code blocks in the readme are indented with 3 spaces and not 4, which makes Markdown in GitHub and the Charm store not render them correctly. This updates the indentation to 4 spaces for all code blocks. Change-Id: I2b3e60233ca9c4b8230e18f5c253593815d7fe09 --- ceph-radosgw/README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/ceph-radosgw/README.md b/ceph-radosgw/README.md index a6729749..65643ad0 100644 --- a/ceph-radosgw/README.md +++ b/ceph-radosgw/README.md @@ -13,16 +13,16 @@ Usage In order to use this charm, it is assumed that you have already deployed a ceph storage cluster using the 'ceph' charm with something like this:: - juju deploy -n 3 --config ceph.yaml ceph + juju deploy -n 3 --config ceph.yaml ceph To deploy the RADOS gateway simple do:: - juju deploy ceph-radosgw - juju add-relation ceph-radosgw ceph + juju deploy ceph-radosgw + juju add-relation ceph-radosgw ceph You can then directly access the RADOS gateway by exposing the service:: - juju expose ceph-radosgw + juju expose ceph-radosgw The gateway can be accessed over port 80 (as show in juju status exposed ports). @@ -33,7 +33,7 @@ Access Note that you will need to login to one of the service units supporting the ceph charm to generate some access credentials:: - juju ssh ceph/0 \ + juju ssh ceph/0 \ 'sudo radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph"' For security reasons the ceph-radosgw charm is not set up with appropriate @@ -46,8 +46,8 @@ Ceph >= 0.55 integrates with Openstack Keystone for authentication of Swift requ This is enabled by relating the ceph-radosgw service with keystone:: - juju deploy keystone - juju add-relation keystone ceph-radosgw + juju deploy keystone + juju add-relation keystone ceph-radosgw If you try to relate the radosgw to keystone with an earlier version of ceph the hook will error out to let you know. From 09d4f329521b363d977fda885d7fc079bd1dfee2 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 7 Dec 2016 15:32:03 -0500 Subject: [PATCH 1253/2699] Move a disk with an action A new action is implemented to allow an administrator to move an OSD into a different Ceph bucket Change-Id: I6f9a2bfa12e97b4437cfac67747b62741de81e53 --- ceph-osd/actions.yaml | 13 +++++- ceph-osd/actions/add-disk | 1 + ceph-osd/actions/add_disk.py | 73 +++++++++++++++++++++++++++++++ ceph-osd/lib/ceph/ceph_broker.py | 38 +++++++++++++++- ceph-osd/lib/ceph/ceph_helpers.py | 5 +++ 5 files changed, 128 insertions(+), 2 deletions(-) create mode 120000 ceph-osd/actions/add-disk create mode 100755 ceph-osd/actions/add_disk.py diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 945b065e..edb16684 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -36,4 +36,15 @@ replace-osd: required: [osd-number, replacement-device] additionalProperties: false list-disks: - description: List the unmounted disk on the specified unit \ No newline at end of file + description: List the unmounted disk on the specified unit +add-disk: + description: Add disk(s) to Ceph + params: + osd-devices: + type: string + description: The devices to format and set up as osd volumes. + bucket: + type: string + description: The name of the bucket in Ceph to add these devices into + required: + - osd-devices diff --git a/ceph-osd/actions/add-disk b/ceph-osd/actions/add-disk new file mode 120000 index 00000000..4379d79b --- /dev/null +++ b/ceph-osd/actions/add-disk @@ -0,0 +1 @@ +add_disk.py \ No newline at end of file diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py new file mode 100755 index 00000000..bbe79148 --- /dev/null +++ b/ceph-osd/actions/add_disk.py @@ -0,0 +1,73 @@ +#!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import psutil +import sys + +sys.path.append('lib') +sys.path.append('hooks') + +from charmhelpers.core.hookenv import ( + config, + action_get, +) + +from charmhelpers.contrib.storage.linux.ceph import ( + CephBrokerRq, + send_request_if_needed, +) + +import ceph + +from ceph_hooks import ( + get_journal_devices, +) + + +def add_device(request, device_path, bucket=None): + ceph.osdize(dev, config('osd-format'), + get_journal_devices(), config('osd-reformat'), + config('ignore-device-errors'), + config('osd-encrypt')) + # Make it fast! + if config('autotune'): + ceph.tune_dev(dev) + mounts = filter(lambda disk: device_path + in disk.device, psutil.disk_partitions()) + if mounts: + osd = mounts[0] + osd_id = osd.mountpoint.split('/')[-1].split('-')[-1] + request.ops.append({ + 'op': 'move-osd-to-bucket', + 'osd': "osd.{}".format(osd_id), + 'bucket': bucket}) + return request + + +def get_devices(): + return [ + os.path.realpath(path) + for path in action_get('osd-devices').split(' ')] + + +if __name__ == "__main__": + request = CephBrokerRq() + for dev in get_devices(): + request = add_device(request=request, + device_path=dev, + bucket=action_get("bucket")) + send_request_if_needed(request, relation='mon') diff --git a/ceph-osd/lib/ceph/ceph_broker.py b/ceph-osd/lib/ceph/ceph_broker.py index 0892961e..33d0df8d 100644 --- a/ceph-osd/lib/ceph/ceph_broker.py +++ b/ceph-osd/lib/ceph/ceph_broker.py @@ -24,7 +24,11 @@ INFO, ERROR, ) -from ceph import get_cephfs +from ceph import ( + get_cephfs, + get_osd_weight +) +from ceph_helpers import Crushmap from charmhelpers.contrib.storage.linux.ceph import ( create_erasure_profile, delete_pool, @@ -360,6 +364,36 @@ def handle_rgw_zone_set(request, service): os.unlink(infile.name) +def handle_put_osd_in_bucket(request, service): + osd_id = request.get('osd') + target_bucket = request.get('bucket') + if not osd_id or not target_bucket: + msg = "Missing OSD ID or Bucket" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + crushmap = Crushmap() + try: + crushmap.ensure_bucket_is_present(target_bucket) + check_output( + [ + 'ceph', + '--id', service, + 'osd', + 'crush', + 'set', + str(osd_id), + str(get_osd_weight(osd_id)), + "root={}".format(target_bucket) + ] + ) + + except Exception as exc: + msg = "Failed to move OSD " \ + "{} into Bucket {} :: {}".format(osd_id, target_bucket, exc) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + def handle_rgw_create_user(request, service): user_id = request.get('rgw-uid') display_name = request.get('display-name') @@ -534,6 +568,8 @@ def process_requests_v1(reqs): ret = handle_rgw_regionmap_default(request=req, service=svc) elif op == "rgw-create-user": ret = handle_rgw_create_user(request=req, service=svc) + elif op == "move-osd-to-bucket": + ret = handle_put_osd_in_bucket(request=req, service=svc) else: msg = "Unknown operation '%s'" % op log(msg, level=ERROR) diff --git a/ceph-osd/lib/ceph/ceph_helpers.py b/ceph-osd/lib/ceph/ceph_helpers.py index 60cc73fa..8e5c807f 100644 --- a/ceph-osd/lib/ceph/ceph_helpers.py +++ b/ceph-osd/lib/ceph/ceph_helpers.py @@ -193,6 +193,11 @@ def load_crushmap(self): log("load_crushmap error: {}".format(e)) raise "Failed to read Crushmap" + def ensure_bucket_is_present(self, bucket_name): + if bucket_name not in [bucket.name for bucket in self.buckets()]: + self.add_bucket(bucket_name) + self.save() + def buckets(self): """Return a list of buckets that are in the Crushmap.""" return self._buckets From bb0748b29f868861616cccba3051bd9e9f757c46 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Thu, 22 Dec 2016 16:09:56 -0700 Subject: [PATCH 1254/2699] Skip osd-devices not absolute paths This change skips over any devices which does not start with a leading folder separator ('/'). Allowing such entries causes an OSD to be created out of the charm directory. This can be caused by something as innocuous as 2 spaces between devices. The result is that the root device is also running an OSD, which is undesirable. Change-Id: I0b5530dc4ec4306a9efedb090e583fb4e2089749 Closes-Bug: 1652175 --- ceph-osd/actions/add_disk.py | 10 +++++++--- ceph-osd/hooks/ceph_hooks.py | 15 +++++++++------ ceph-osd/unit_tests/test_config.py | 19 +++++++++++++++++++ 3 files changed, 35 insertions(+), 9 deletions(-) diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index bbe79148..b1b36202 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -59,9 +59,13 @@ def add_device(request, device_path, bucket=None): def get_devices(): - return [ - os.path.realpath(path) - for path in action_get('osd-devices').split(' ')] + devices = [] + for path in action_get('osd-devices').split(' '): + path = path.strip() + if os.path.isabs(path): + devices.append(path) + + return devices if __name__ == "__main__": diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 0b1b1718..e2bcaeb5 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -213,7 +213,7 @@ def use_short_objects(): if config('osd-format') in ('ext4'): return True for device in config('osd-devices'): - if not device.startswith('/dev'): + if device and not device.startswith('/dev'): # TODO: determine format of directory based # OSD location return True @@ -412,12 +412,15 @@ def reformat_osd(): def get_devices(): + devices = [] if config('osd-devices'): - devices = [ - os.path.realpath(path) - for path in config('osd-devices').split(' ')] - else: - devices = [] + for path in config('osd-devices').split(' '): + path = path.strip() + # Make sure its a device which is specified using an + # absolute path so that the current working directory + # or any relative path under this directory is not used + if os.path.isabs(path): + devices.append(os.path.realpath(path)) # List storage instances for the 'osd-devices' # store declared for this charm too, and add diff --git a/ceph-osd/unit_tests/test_config.py b/ceph-osd/unit_tests/test_config.py index b4a85722..c3ae347e 100644 --- a/ceph-osd/unit_tests/test_config.py +++ b/ceph-osd/unit_tests/test_config.py @@ -72,6 +72,25 @@ def test_get_devices_multiple(self): self.test_config.set("osd-devices", "{} {}".format(device1, device2)) self.assertEqual([device1, device2], hooks.get_devices()) + def test_get_devices_extra_spaces(self): + """ + Multiple spaces do not result in additional devices. + """ + device1 = os.path.join(self.tmp_dir, "device1") + device2 = os.path.join(self.tmp_dir, "device2") + self.test_config.set("osd-devices", "{} {}".format(device1, device2)) + self.assertEqual([device1, device2], hooks.get_devices()) + + def test_get_devices_non_absolute_path(self): + """ + Charm does not allow relative paths as this may result in a path + on the root device/within the charm directory. + """ + device1 = os.path.join(self.tmp_dir, "device1") + device2 = "foo" + self.test_config.set("osd-devices", "{} {}".format(device1, device2)) + self.assertEqual([device1], hooks.get_devices()) + def test_get_devices_symlink(self): """ If a symlink is specified in osd-devices, get_devices() resolves From fa5aa0b5b445c9ade74b98463012662a946ed335 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Thu, 5 Jan 2017 09:53:47 -0800 Subject: [PATCH 1255/2699] Notify Clients on OSD Creation The MDS client has a race condition where if the osds don't come up fast enough the rpc requests sent to ceph_broker will never complete. Change-Id: I3f78a5a3231377f5542b81cec27dd4531144cfd1 --- ceph-mon/hooks/ceph_hooks.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 08c89d8d..6be26359 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -393,6 +393,7 @@ def osd_relation(relid=None): # NOTE: radosgw key provision is gated on presence of OSD # units so ensure that any deferred hooks are processed notify_radosgws() + notify_client() else: log('mon cluster not in quorum - deferring fsid provision') @@ -453,7 +454,7 @@ def radosgw_relation(relid=None, unit=None): def mds_relation_joined(relid=None, unit=None): if ceph.is_quorum() and related_osds(): log('mon cluster in quorum and OSDs related' - '- providing client with keys') + '- providing mds client with keys') mds_name = relation_get('mds-name') if not unit: unit = remote_unit() @@ -472,11 +473,11 @@ def mds_relation_joined(relid=None, unit=None): unit_response_key = 'broker-rsp-' + unit_id data[unit_response_key] = rsp else: - log("Not leader - ignoring broker request", level=DEBUG) + log("Not leader - ignoring mds broker request", level=DEBUG) relation_set(relation_id=relid, relation_settings=data) else: - log('mon cluster not in quorum - deferring key provision') + log('Waiting on mon quorum or min osds before provisioning mds keys') @hooks.hook('admin-relation-changed') From f0e979f08fbe6a745583b7b7f2d755920cd5e258 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 10 Nov 2016 12:59:28 -0600 Subject: [PATCH 1256/2699] Add .gitreview and clean up repo Several updates to make this consistent with other OpenStack charms repos, and to enable Gerrit + OSCI test gates. - Update .gitignore. - Add .gitreview for gerrit. - Add copyright file, and copyright headers. - Add tox.ini and src/tox.ini - Add tests README - Fix layer.yaml - Add unit tests. - Add amulet tests. - fixed lint. See running bug/TODO list: - https://bugs.launchpad.net/charm-ceph-fs Change-Id: Iaec5f25706387f9b59b36e307e954318b23c9417 Depends-On: I3f78a5a3231377f5542b81cec27dd4531144cfd1 Closes-Bug: 1640895 Closes-Bug: 1640896 Closes-Bug: 1640898 Closes-Bug: 1640900 Closes-Bug: 1640902 Closes-Bug: 1653767 --- ceph-fs/.gitignore | 7 + ceph-fs/.gitreview | 4 + ceph-fs/.testr.conf | 8 + ceph-fs/requirements.txt | 4 + ceph-fs/src/actions.yaml | 4 +- ceph-fs/src/actions/__init__.py | 1 + ceph-fs/src/actions/get-quota | 2 +- .../actions/{get-quota.py => get_quota.py} | 4 +- ceph-fs/src/actions/remove-quota | 2 +- .../{remove-quota.py => remove_quota.py} | 7 +- ceph-fs/src/actions/set-quota | 2 +- .../actions/{set-quota.py => set_quota.py} | 4 +- ceph-fs/src/config.yaml | 3 - ceph-fs/src/copyright | 6 + ceph-fs/src/layer.yaml | 8 +- ceph-fs/src/metadata.yaml | 1 + ceph-fs/src/reactive/ceph_fs.py | 24 +- ceph-fs/src/test-requirements.txt | 23 + ceph-fs/src/tests/00-setup | 5 - ceph-fs/src/tests/10-deploy | 31 - ceph-fs/src/tests/README.md | 9 + ceph-fs/src/tests/basic_deployment.py | 264 ++++ ceph-fs/src/tests/charmhelpers/__init__.py | 36 + .../tests/charmhelpers/contrib/__init__.py | 13 + .../charmhelpers/contrib/amulet/__init__.py | 13 + .../charmhelpers/contrib/amulet/deployment.py | 97 ++ .../charmhelpers/contrib/amulet/utils.py | 827 ++++++++++++ .../contrib/openstack/__init__.py | 13 + .../contrib/openstack/amulet/__init__.py | 13 + .../contrib/openstack/amulet/deployment.py | 345 +++++ .../contrib/openstack/amulet/utils.py | 1124 +++++++++++++++++ ceph-fs/src/tests/gate-basic-xenial-mitaka | 23 + ceph-fs/src/tests/tests.yaml | 17 + ceph-fs/src/tox.ini | 53 + ceph-fs/test-requirements.txt | 7 + ceph-fs/tox.ini | 40 +- ceph-fs/unit_tests/__init__.py | 42 + ceph-fs/unit_tests/test_actions.py | 79 ++ ceph-fs/unit_tests/test_utils.py | 116 ++ 39 files changed, 3212 insertions(+), 69 deletions(-) create mode 100644 ceph-fs/.gitreview create mode 100644 ceph-fs/.testr.conf create mode 100644 ceph-fs/requirements.txt create mode 100644 ceph-fs/src/actions/__init__.py rename ceph-fs/src/actions/{get-quota.py => get_quota.py} (94%) rename ceph-fs/src/actions/{remove-quota.py => remove_quota.py} (89%) rename ceph-fs/src/actions/{set-quota.py => set_quota.py} (94%) create mode 100644 ceph-fs/src/copyright create mode 100644 ceph-fs/src/test-requirements.txt delete mode 100755 ceph-fs/src/tests/00-setup delete mode 100755 ceph-fs/src/tests/10-deploy create mode 100644 ceph-fs/src/tests/README.md create mode 100644 ceph-fs/src/tests/basic_deployment.py create mode 100644 ceph-fs/src/tests/charmhelpers/__init__.py create mode 100644 ceph-fs/src/tests/charmhelpers/contrib/__init__.py create mode 100644 ceph-fs/src/tests/charmhelpers/contrib/amulet/__init__.py create mode 100644 ceph-fs/src/tests/charmhelpers/contrib/amulet/deployment.py create mode 100644 ceph-fs/src/tests/charmhelpers/contrib/amulet/utils.py create mode 100644 ceph-fs/src/tests/charmhelpers/contrib/openstack/__init__.py create mode 100644 ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/__init__.py create mode 100644 ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/deployment.py create mode 100644 ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/utils.py create mode 100755 ceph-fs/src/tests/gate-basic-xenial-mitaka create mode 100644 ceph-fs/src/tests/tests.yaml create mode 100644 ceph-fs/src/tox.ini create mode 100644 ceph-fs/test-requirements.txt create mode 100644 ceph-fs/unit_tests/__init__.py create mode 100644 ceph-fs/unit_tests/test_actions.py create mode 100644 ceph-fs/unit_tests/test_utils.py diff --git a/ceph-fs/.gitignore b/ceph-fs/.gitignore index 485dee64..a759aa3f 100644 --- a/ceph-fs/.gitignore +++ b/ceph-fs/.gitignore @@ -1 +1,8 @@ +build +.tox +layers +interfaces +.testrepository +__pycache__ +*.pyc .idea diff --git a/ceph-fs/.gitreview b/ceph-fs/.gitreview new file mode 100644 index 00000000..c1131ca5 --- /dev/null +++ b/ceph-fs/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=openstack/charm-ceph-fs.git diff --git a/ceph-fs/.testr.conf b/ceph-fs/.testr.conf new file mode 100644 index 00000000..801646bb --- /dev/null +++ b/ceph-fs/.testr.conf @@ -0,0 +1,8 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ + OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ + OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ + ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION + +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/ceph-fs/requirements.txt b/ceph-fs/requirements.txt new file mode 100644 index 00000000..6fe30907 --- /dev/null +++ b/ceph-fs/requirements.txt @@ -0,0 +1,4 @@ +# Requirements to build the charm +charm-tools +simplejson +flake8 diff --git a/ceph-fs/src/actions.yaml b/ceph-fs/src/actions.yaml index 8620d24b..c3dd0f57 100644 --- a/ceph-fs/src/actions.yaml +++ b/ceph-fs/src/actions.yaml @@ -2,7 +2,7 @@ get-quota: description: View quota settings on a directory params: max-files: - type: boolean + type: integer description: | The limit of how many files can be written. Use either this or max-bytes but not both. The action tries max-files first and then @@ -23,7 +23,7 @@ remove-quota: description: Remove a quota on a directory params: max-files: - type: boolean + type: integer description: | The limit of how many files can be written. Use either this or max-bytes but not both. The action tries max-files first and then diff --git a/ceph-fs/src/actions/__init__.py b/ceph-fs/src/actions/__init__.py new file mode 100644 index 00000000..bd8bf091 --- /dev/null +++ b/ceph-fs/src/actions/__init__.py @@ -0,0 +1 @@ +__author__ = 'Chris Holcombe ' diff --git a/ceph-fs/src/actions/get-quota b/ceph-fs/src/actions/get-quota index a1d07b46..075a7983 120000 --- a/ceph-fs/src/actions/get-quota +++ b/ceph-fs/src/actions/get-quota @@ -1 +1 @@ -get-quota.py \ No newline at end of file +get_quota.py \ No newline at end of file diff --git a/ceph-fs/src/actions/get-quota.py b/ceph-fs/src/actions/get_quota.py similarity index 94% rename from ceph-fs/src/actions/get-quota.py rename to ceph-fs/src/actions/get_quota.py index c9c17f66..683395ad 100755 --- a/ceph-fs/src/actions/get-quota.py +++ b/ceph-fs/src/actions/get_quota.py @@ -29,9 +29,9 @@ def get_quota(): action_fail("Directory must exist before setting quota") attr = "ceph.quota.{}" if max_files: - attr.format("max_files") + attr = attr.format("max_files") elif max_bytes: - attr.format("max_bytes") + attr = attr.format("max_bytes") try: quota_value = xattr.getxattr(directory, attr) diff --git a/ceph-fs/src/actions/remove-quota b/ceph-fs/src/actions/remove-quota index dee30392..0e3dad46 120000 --- a/ceph-fs/src/actions/remove-quota +++ b/ceph-fs/src/actions/remove-quota @@ -1 +1 @@ -remove-quota.py \ No newline at end of file +remove_quota.py \ No newline at end of file diff --git a/ceph-fs/src/actions/remove-quota.py b/ceph-fs/src/actions/remove_quota.py similarity index 89% rename from ceph-fs/src/actions/remove-quota.py rename to ceph-fs/src/actions/remove_quota.py index c068cc2d..c647bdb1 100755 --- a/ceph-fs/src/actions/remove-quota.py +++ b/ceph-fs/src/actions/remove_quota.py @@ -14,7 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. import os -from charmhelpers.core.hookenv import action_get, action_fail, action_set + +from charmhelpers.core.hookenv import action_get, action_fail import xattr __author__ = 'Chris Holcombe ' @@ -29,9 +30,9 @@ def remove_quota(): action_fail("Directory must exist before setting quota") attr = "ceph.quota.{}" if max_files: - attr.format("max_files") + attr = attr.format("max_files") elif max_bytes: - attr.format("max_bytes") + attr = attr.format("max_bytes") try: xattr.setxattr(directory, attr, str(0)) diff --git a/ceph-fs/src/actions/set-quota b/ceph-fs/src/actions/set-quota index 79af9e29..9ab54db1 120000 --- a/ceph-fs/src/actions/set-quota +++ b/ceph-fs/src/actions/set-quota @@ -1 +1 @@ -set-quota.py \ No newline at end of file +set_quota.py \ No newline at end of file diff --git a/ceph-fs/src/actions/set-quota.py b/ceph-fs/src/actions/set_quota.py similarity index 94% rename from ceph-fs/src/actions/set-quota.py rename to ceph-fs/src/actions/set_quota.py index bba5a502..83b4429e 100755 --- a/ceph-fs/src/actions/set-quota.py +++ b/ceph-fs/src/actions/set_quota.py @@ -30,10 +30,10 @@ def set_quota(): attr = "ceph.quota.{}" value = None if max_files: - attr.format("max_files") + attr = attr.format("max_files") value = str(max_files) elif max_bytes: - attr.format("max_bytes") + attr = attr.format("max_bytes") value = str(max_bytes) try: diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index 3b86e9ef..bc40876c 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -1,7 +1,4 @@ options: - apt: - packages: - - python3-pyxattr ceph-public-network: type: string default: diff --git a/ceph-fs/src/copyright b/ceph-fs/src/copyright new file mode 100644 index 00000000..17795a5e --- /dev/null +++ b/ceph-fs/src/copyright @@ -0,0 +1,6 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2016, Canonical Ltd +License: Apache-2.0 + diff --git a/ceph-fs/src/layer.yaml b/ceph-fs/src/layer.yaml index 9f2a750b..c691470b 100644 --- a/ceph-fs/src/layer.yaml +++ b/ceph-fs/src/layer.yaml @@ -1,2 +1,6 @@ -includes: ['layer:apt', 'layer:ceph-base', 'interface:/home/chris/repos/juju-interface-ceph-mds'] # if you use any interfaces, add them here -repo: git@github.com:cholcombe973/charm-ceph-fs.git +includes: ['layer:apt', 'layer:ceph-base', 'interface:ceph-mds'] # if you use any interfaces, add them here +options: + apt: + packages: + - python3-pyxattr +repo: https://git.openstack.org/openstack/charm-ceph-fs diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 291ff86f..257aed4c 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -11,6 +11,7 @@ tags: - misc series: - xenial + - yakkety subordinate: false requires: ceph-mds: diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index e9415292..ca6ace6c 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -1,19 +1,38 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import os import socket import subprocess from charms.reactive import when, when_not, set_state from charmhelpers.core.hookenv import ( - config, log, ERROR, service_name) + application_version_set, config, log, ERROR) from charmhelpers.core.host import service_restart from charmhelpers.contrib.network.ip import ( get_address_in_network ) +from charmhelpers.fetch import ( + get_upstream_version, +) + import jinja2 from charms.apt import queue_install TEMPLATES_DIR = 'templates' +VERSION_PACKAGE = 'ceph-common' def render_template(template_name, context, template_dir=TEMPLATES_DIR): @@ -35,6 +54,7 @@ def setup_mds(relation): try: service_restart('ceph-mds') set_state('cephfs.started') + application_version_set(get_upstream_version(VERSION_PACKAGE)) except subprocess.CalledProcessError as err: log(message='Error: {}'.format(err), level=ERROR) @@ -77,10 +97,8 @@ def config_changed(ceph_client): key_file.write("[mds.{}]\n\tkey = {}\n".format( socket.gethostname(), ceph_client.mds_key() - # ceph_client.mds_bootstrap_key() )) except IOError as err: - log("IOError writing mds-a.keyring: {}".format(err)) set_state('cephfs.configured') diff --git a/ceph-fs/src/test-requirements.txt b/ceph-fs/src/test-requirements.txt new file mode 100644 index 00000000..613b0812 --- /dev/null +++ b/ceph-fs/src/test-requirements.txt @@ -0,0 +1,23 @@ +# charm-proof +charm-tools>=2.0.0 +# amulet deployment helpers +bzr+lp:charm-helpers#egg=charmhelpers +# BEGIN: Amulet OpenStack Charm Helper Requirements +# Liberty client lower constraints +amulet>=1.14.3,<2.0 +bundletester>=0.6.1,<1.0 +aodhclient>=0.1.0 +python-ceilometerclient>=1.5.0,<2.0 +python-cinderclient>=1.4.0,<2.0 +python-glanceclient>=1.1.0,<2.0 +python-heatclient>=0.8.0,<1.0 +python-keystoneclient>=1.7.1,<2.0 +python-neutronclient>=3.1.0,<4.0 +python-novaclient>=2.30.1,<3.0 +python-openstackclient>=1.7.0,<2.0 +python-swiftclient>=2.6.0,<3.0 +pika>=0.10.0,<1.0 +distro-info +# END: Amulet OpenStack Charm Helper Requirements +# NOTE: workaround for 14.04 pip/tox +pytz diff --git a/ceph-fs/src/tests/00-setup b/ceph-fs/src/tests/00-setup deleted file mode 100755 index f0616a56..00000000 --- a/ceph-fs/src/tests/00-setup +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -sudo add-apt-repository ppa:juju/stable -y -sudo apt-get update -sudo apt-get install amulet python-requests -y diff --git a/ceph-fs/src/tests/10-deploy b/ceph-fs/src/tests/10-deploy deleted file mode 100755 index d5778b04..00000000 --- a/ceph-fs/src/tests/10-deploy +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/python3 - -import amulet -import requests -import unittest - - -class TestCharm(unittest.TestCase): - def setUp(self): - self.d = amulet.Deployment() - - self.d.add('charm-ceph-fs') - self.d.expose('charm-ceph-fs') - - self.d.setup(timeout=900) - self.d.sentry.wait() - - self.unit = self.d.sentry['charm-ceph-fs'][0] - - def test_service(self): - # test we can access over http - page = requests.get('http://{}'.format(self.unit.info['public-address'])) - self.assertEqual(page.status_code, 200) - # Now you can use self.d.sentry[SERVICE][UNIT] to address each of the units and perform - # more in-depth steps. Each self.d.sentry[SERVICE][UNIT] has the following methods: - # - .info - An array of the information of that unit from Juju - # - .file(PATH) - Get the details of a file on that unit - # - .file_contents(PATH) - Get plain text output of PATH file from that unit - # - .directory(PATH) - Get details of directory - # - .directory_contents(PATH) - List files and folders in PATH on that unit - # - .relation(relation, service:rel) - Get relation data from return service diff --git a/ceph-fs/src/tests/README.md b/ceph-fs/src/tests/README.md new file mode 100644 index 00000000..046be7fb --- /dev/null +++ b/ceph-fs/src/tests/README.md @@ -0,0 +1,9 @@ +# Overview + +This directory provides Amulet tests to verify basic deployment functionality +from the perspective of this charm, its requirements and its features, as +exercised in a subset of the full OpenStack deployment test bundle topology. + +For full details on functional testing of OpenStack charms please refer to +the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing) +section of the OpenStack Charm Guide. diff --git a/ceph-fs/src/tests/basic_deployment.py b/ceph-fs/src/tests/basic_deployment.py new file mode 100644 index 00000000..28683bb5 --- /dev/null +++ b/ceph-fs/src/tests/basic_deployment.py @@ -0,0 +1,264 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import amulet + +from charmhelpers.contrib.openstack.amulet.deployment import ( + OpenStackAmuletDeployment +) +from charmhelpers.contrib.openstack.amulet.utils import ( # noqa + OpenStackAmuletUtils, + DEBUG, + ) + +# Use DEBUG to turn on debug logging +u = OpenStackAmuletUtils(DEBUG) + + +class CephFsBasicDeployment(OpenStackAmuletDeployment): + """Amulet tests on a basic ceph deployment.""" + + def __init__(self, series=None, openstack=None, source=None, stable=False): + """Deploy the entire test environment.""" + super(CephFsBasicDeployment, self).__init__(series, + openstack, + source, + stable) + self._add_services() + self._add_relations() + self._configure_services() + self._deploy() + + u.log.info('Waiting on extended status checks...') + exclude_services = [] + + # Wait for deployment ready msgs, except exclusions + self._auto_wait_for_status(exclude_services=exclude_services) + + self.d.sentry.wait() + self._initialize_tests() + + def _add_services(self, **kwargs): + """Add services + + Add the services that we're testing, where cephfs is local, + and the rest of the service are from lp branches that are + compatible with the local charm (e.g. stable or next). + :param **kwargs: + """ + this_service = {'name': 'ceph-fs', 'units': 1} + other_services = [ + {'name': 'ceph-mon', 'units': 3}, + {'name': 'ceph-osd', 'units': 3}, + ] + super(CephFsBasicDeployment, self)._add_services(this_service, + other_services) + + def _add_relations(self, **kwargs): + """Add all of the relations for the services. + :param **kwargs: + """ + relations = { + 'ceph-osd:mon': 'ceph-mon:osd', + 'ceph-fs:ceph-mds': 'ceph-mon:mds', + } + super(CephFsBasicDeployment, self)._add_relations(relations) + + def _configure_services(self, **kwargs): + """Configure all of the services. + :param **kwargs: + """ + # Include a non-existent device as osd-devices is a whitelist, + # and this will catch cases where proposals attempt to change that. + ceph_mon_config = { + 'monitor-count': '3', + 'auth-supported': 'none', + 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + } + + # Include a non-existent device as osd-devices is a whitelist, + # and this will catch cases where proposals attempt to change that. + ceph_osd_config = { + 'osd-reformat': 'yes', + 'ephemeral-unmount': '/mnt', + 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' + } + + configs = { + 'ceph-mon': ceph_mon_config, + 'ceph-osd': ceph_osd_config} + super(CephFsBasicDeployment, self)._configure_services(configs) + + def _initialize_tests(self): + """Perform final initialization before tests get run.""" + # Access the sentries for inspecting service units + self.ceph_osd0_sentry = self.d.sentry['ceph-osd'][0] + self.ceph_osd1_sentry = self.d.sentry['ceph-osd'][1] + self.ceph_osd2_sentry = self.d.sentry['ceph-osd'][2] + + self.ceph_mon0_sentry = self.d.sentry['ceph-mon'][0] + self.ceph_mon1_sentry = self.d.sentry['ceph-mon'][1] + self.ceph_mon2_sentry = self.d.sentry['ceph-mon'][2] + + self.ceph_mds_sentry = self.d.sentry['ceph-fs'][0] + + def test_100_ceph_processes(self): + """Verify that the expected service processes are running + on each ceph unit.""" + + # Process name and quantity of processes to expect on each unit + ceph_mon_processes = { + 'ceph-mon': 1 + } + ceph_osd_processes = { + 'ceph-osd': 2 + } + ceph_mds_processes = { + 'ceph-mds': 1 + } + + # Units with process names and PID quantities expected + expected_processes = { + self.ceph_mon0_sentry: ceph_mon_processes, + self.ceph_mon1_sentry: ceph_mon_processes, + self.ceph_mon2_sentry: ceph_mon_processes, + self.ceph_osd0_sentry: ceph_osd_processes, + self.ceph_osd1_sentry: ceph_osd_processes, + self.ceph_osd2_sentry: ceph_osd_processes, + self.ceph_mds_sentry: ceph_mds_processes + } + + actual_pids = u.get_unit_process_ids(expected_processes) + ret = u.validate_unit_process_ids(expected_processes, actual_pids) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_102_services(self): + """Verify the expected services are running on the service units.""" + services = {} + + if self._get_openstack_release() < self.xenial_mitaka: + # For upstart systems only. Ceph services under systemd + # are checked by process name instead. + ceph_services = [ + 'ceph-mon-all', + 'ceph-mon id=`hostname`' + ] + services[self.ceph_mon0_sentry] = ceph_services + services[self.ceph_mon1_sentry] = ceph_services + services[self.ceph_mon2_sentry] = ceph_services + + ceph_osd_services = [ + 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), + ] + + services[self.ceph_osd0_sentry] = ceph_osd_services + + ret = u.validate_services_by_name(services) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_300_ceph_config(self): + """Verify the data in the ceph config file.""" + u.log.debug('Checking ceph config file data...') + unit = self.ceph_mon0_sentry + conf = '/etc/ceph/ceph.conf' + expected = { + 'global': { + 'keyring': '/etc/ceph/$cluster.$name.keyring', + 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + 'log to syslog': 'false', + 'err to syslog': 'false', + 'clog to syslog': 'false', + 'mon cluster log to syslog': 'false', + 'auth cluster required': 'none', + 'auth service required': 'none', + 'auth client required': 'none' + }, + 'mon': { + 'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring' + }, + 'mds': { + 'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring' + }, + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "ceph config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_400_ceph_check_osd_pools(self): + """Check osd pools on all ceph units, expect them to be + identical, and expect specific pools to be present.""" + u.log.debug('Checking pools on ceph units...') + expected_pools = ['rbd', 'ceph-fs_data', 'ceph-fs_metadata'] + results = [] + sentries = [ + self.ceph_mon0_sentry, + self.ceph_mon1_sentry, + self.ceph_mon2_sentry + ] + + # Check for presence of expected pools on each unit + u.log.debug('Expected pools: {}'.format(expected_pools)) + for sentry_unit in sentries: + pools = u.get_ceph_pools(sentry_unit) + results.append(pools) + + for expected_pool in expected_pools: + if expected_pool not in pools: + msg = ('{} does not have pool: ' + '{}'.format(sentry_unit.info['unit_name'], + expected_pool)) + amulet.raise_status(amulet.FAIL, msg=msg) + u.log.debug('{} has (at least) the expected ' + 'pools.'.format(sentry_unit.info['unit_name'])) + + # Check that all units returned the same pool name:id data + ret = u.validate_list_of_identical_dicts(results) + if ret: + u.log.debug('Pool list results: {}'.format(results)) + msg = ('{}; Pool list results are not identical on all ' + 'ceph units.'.format(ret)) + amulet.raise_status(amulet.FAIL, msg=msg) + else: + u.log.debug('Pool list on all ceph units produced the ' + 'same results (OK).') + + def test_499_ceph_cmds_exit_zero(self): + """Check basic functionality of ceph cli commands against + all ceph units.""" + sentry_units = [ + self.ceph_mon0_sentry, + self.ceph_mon1_sentry, + self.ceph_mon2_sentry + ] + commands = [ + 'sudo ceph health', + 'sudo ceph mds stat', + 'sudo ceph pg stat', + 'sudo ceph osd stat', + 'sudo ceph mon stat', + 'sudo ceph fs ls', + ] + ret = u.check_commands_on_units(commands, sentry_units) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + # FYI: No restart check as ceph services do not restart + # when charm config changes, unless monitor count increases. diff --git a/ceph-fs/src/tests/charmhelpers/__init__.py b/ceph-fs/src/tests/charmhelpers/__init__.py new file mode 100644 index 00000000..48867880 --- /dev/null +++ b/ceph-fs/src/tests/charmhelpers/__init__.py @@ -0,0 +1,36 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa diff --git a/ceph-fs/src/tests/charmhelpers/contrib/__init__.py b/ceph-fs/src/tests/charmhelpers/contrib/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-fs/src/tests/charmhelpers/contrib/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-fs/src/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-fs/src/tests/charmhelpers/contrib/amulet/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-fs/src/tests/charmhelpers/contrib/amulet/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-fs/src/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-fs/src/tests/charmhelpers/contrib/amulet/deployment.py new file mode 100644 index 00000000..9c65518e --- /dev/null +++ b/ceph-fs/src/tests/charmhelpers/contrib/amulet/deployment.py @@ -0,0 +1,97 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import amulet +import os +import six + + +class AmuletDeployment(object): + """Amulet deployment. + + This class provides generic Amulet deployment and test runner + methods. + """ + + def __init__(self, series=None): + """Initialize the deployment environment.""" + self.series = None + + if series: + self.series = series + self.d = amulet.Deployment(series=self.series) + else: + self.d = amulet.Deployment() + + def _add_services(self, this_service, other_services): + """Add services. + + Add services to the deployment where this_service is the local charm + that we're testing and other_services are the other services that + are being used in the local amulet tests. + """ + if this_service['name'] != os.path.basename(os.getcwd()): + s = this_service['name'] + msg = "The charm's root directory name needs to be {}".format(s) + amulet.raise_status(amulet.FAIL, msg=msg) + + if 'units' not in this_service: + this_service['units'] = 1 + + self.d.add(this_service['name'], units=this_service['units'], + constraints=this_service.get('constraints')) + + for svc in other_services: + if 'location' in svc: + branch_location = svc['location'] + elif self.series: + branch_location = 'cs:{}/{}'.format(self.series, svc['name']), + else: + branch_location = None + + if 'units' not in svc: + svc['units'] = 1 + + self.d.add(svc['name'], charm=branch_location, units=svc['units'], + constraints=svc.get('constraints')) + + def _add_relations(self, relations): + """Add all of the relations for the services.""" + for k, v in six.iteritems(relations): + self.d.relate(k, v) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _deploy(self): + """Deploy environment and wait for all hooks to finish executing.""" + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) + try: + self.d.setup(timeout=timeout) + self.d.sentry.wait(timeout=timeout) + except amulet.helpers.TimeoutError: + amulet.raise_status( + amulet.FAIL, + msg="Deployment timed out ({}s)".format(timeout) + ) + except Exception: + raise + + def run_tests(self): + """Run all of the methods that are prefixed with 'test_'.""" + for test in dir(self): + if test.startswith('test_'): + getattr(self, test)() diff --git a/ceph-fs/src/tests/charmhelpers/contrib/amulet/utils.py b/ceph-fs/src/tests/charmhelpers/contrib/amulet/utils.py new file mode 100644 index 00000000..a39ed4c8 --- /dev/null +++ b/ceph-fs/src/tests/charmhelpers/contrib/amulet/utils.py @@ -0,0 +1,827 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import json +import logging +import os +import re +import socket +import subprocess +import sys +import time +import uuid + +import amulet +import distro_info +import six +from six.moves import configparser +if six.PY3: + from urllib import parse as urlparse +else: + import urlparse + + +class AmuletUtils(object): + """Amulet utilities. + + This class provides common utility functions that are used by Amulet + tests. + """ + + def __init__(self, log_level=logging.ERROR): + self.log = self.get_logger(level=log_level) + self.ubuntu_releases = self.get_ubuntu_releases() + + def get_logger(self, name="amulet-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def valid_ip(self, ip): + if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): + return True + else: + return False + + def valid_url(self, url): + p = re.compile( + r'^(?:http|ftp)s?://' + r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa + r'localhost|' + r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' + r'(?::\d+)?' + r'(?:/?|[/?]\S+)$', + re.IGNORECASE) + if p.match(url): + return True + else: + return False + + def get_ubuntu_release_from_sentry(self, sentry_unit): + """Get Ubuntu release codename from sentry unit. + + :param sentry_unit: amulet sentry/service unit pointer + :returns: list of strings - release codename, failure message + """ + msg = None + cmd = 'lsb_release -cs' + release, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} lsb_release: {}'.format( + sentry_unit.info['unit_name'], release)) + else: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, release, code)) + if release not in self.ubuntu_releases: + msg = ("Release ({}) not found in Ubuntu releases " + "({})".format(release, self.ubuntu_releases)) + return release, msg + + def validate_services(self, commands): + """Validate that lists of commands succeed on service units. Can be + used to verify system services are running on the corresponding + service units. + + :param commands: dict with sentry keys and arbitrary command list vals + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # /!\ DEPRECATION WARNING (beisner): + # New and existing tests should be rewritten to use + # validate_services_by_name() as it is aware of init systems. + self.log.warn('DEPRECATION WARNING: use ' + 'validate_services_by_name instead of validate_services ' + 'due to init system differences.') + + for k, v in six.iteritems(commands): + for cmd in v: + output, code = k.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(k.info['unit_name'], + cmd, code)) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def validate_services_by_name(self, sentry_services): + """Validate system service status by service name, automatically + detecting init system based on Ubuntu release codename. + + :param sentry_services: dict with sentry keys and svc list values + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # Point at which systemd became a thing + systemd_switch = self.ubuntu_releases.index('vivid') + + for sentry_unit, services_list in six.iteritems(sentry_services): + # Get lsb_release codename from unit + release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) + if ret: + return ret + + for service_name in services_list: + if (self.ubuntu_releases.index(release) >= systemd_switch or + service_name in ['rabbitmq-server', 'apache2']): + # init is systemd (or regular sysv) + cmd = 'sudo service {} status'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 + elif self.ubuntu_releases.index(release) < systemd_switch: + # init is upstart + cmd = 'sudo status {}'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 and "start/running" in output + + self.log.debug('{} `{}` returned ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code)) + if not service_running: + return u"command `{}` returned {} {}".format( + cmd, output, str(code)) + return None + + def _get_config(self, unit, filename): + """Get a ConfigParser object for parsing a unit's config file.""" + file_contents = unit.file_contents(filename) + + # NOTE(beisner): by default, ConfigParser does not handle options + # with no value, such as the flags used in the mysql my.cnf file. + # https://bugs.python.org/issue7005 + config = configparser.ConfigParser(allow_no_value=True) + config.readfp(io.StringIO(file_contents)) + return config + + def validate_config_data(self, sentry_unit, config_file, section, + expected): + """Validate config file data. + + Verify that the specified section of the config file contains + the expected option key:value pairs. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. + """ + self.log.debug('Validating config file data ({} in {} on {})' + '...'.format(section, config_file, + sentry_unit.info['unit_name'])) + config = self._get_config(sentry_unit, config_file) + + if section != 'DEFAULT' and not config.has_section(section): + return "section [{}] does not exist".format(section) + + for k in expected.keys(): + if not config.has_option(section, k): + return "section [{}] is missing option {}".format(section, k) + + actual = config.get(section, k) + v = expected[k] + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if actual != v: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual): + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + return None + + def _validate_dict_data(self, expected, actual): + """Validate dictionary data. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. + """ + self.log.debug('actual: {}'.format(repr(actual))) + self.log.debug('expected: {}'.format(repr(expected))) + + for k, v in six.iteritems(expected): + if k in actual: + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if v != actual[k]: + return "{}:{}".format(k, actual[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual[k]): + return "{}:{}".format(k, actual[k]) + else: + return "key '{}' does not exist".format(k) + return None + + def validate_relation_data(self, sentry_unit, relation, expected): + """Validate actual relation data based on expected relation data.""" + actual = sentry_unit.relation(relation[0], relation[1]) + return self._validate_dict_data(expected, actual) + + def _validate_list_data(self, expected, actual): + """Compare expected list vs actual list data.""" + for e in expected: + if e not in actual: + return "expected item {} not found in actual list".format(e) + return None + + def not_null(self, string): + if string is not None: + return True + else: + return False + + def _get_file_mtime(self, sentry_unit, filename): + """Get last modification time of file.""" + return sentry_unit.file_stat(filename)['mtime'] + + def _get_dir_mtime(self, sentry_unit, directory): + """Get last modification time of directory.""" + return sentry_unit.directory_stat(directory)['mtime'] + + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): + """Get start time of a process based on the last modification time + of the /proc/pid directory. + + :sentry_unit: The sentry unit to check for the service on + :service: service name to look for in process table + :pgrep_full: [Deprecated] Use full command line search mode with pgrep + :returns: epoch time of service process start + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + if pgrep_full is not None: + # /!\ DEPRECATION WARNING (beisner): + # No longer implemented, as pidof is now used instead of pgrep. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' + 'longer implemented re: lp 1474030.') + + pid_list = self.get_process_id_list(sentry_unit, service) + pid = pid_list[0] + proc_dir = '/proc/{}'.format(pid) + self.log.debug('Pid for {} on {}: {}'.format( + service, sentry_unit.info['unit_name'], pid)) + + return self._get_dir_mtime(sentry_unit, proc_dir) + + def service_restarted(self, sentry_unit, service, filename, + pgrep_full=None, sleep_time=20): + """Check if service was restarted. + + Compare a service's start time vs a file's last modification time + (such as a config file for that service) to determine if the service + has been restarted. + """ + # /!\ DEPRECATION WARNING (beisner): + # This method is prone to races in that no before-time is known. + # Use validate_service_config_changed instead. + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + self.log.warn('DEPRECATION WARNING: use ' + 'validate_service_config_changed instead of ' + 'service_restarted due to known races.') + + time.sleep(sleep_time) + if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= + self._get_file_mtime(sentry_unit, filename)): + return True + else: + return False + + def service_restarted_since(self, sentry_unit, mtime, service, + pgrep_full=None, sleep_time=20, + retry_count=30, retry_sleep_time=10): + """Check if service was been started after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry + + Returns: + bool: True if service found and its start time it newer than mtime, + False if service is older than mtime or if service was + not found. + """ + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s service restarted since %s on ' + '%s' % (service, mtime, unit_name)) + time.sleep(sleep_time) + proc_start_time = None + tries = 0 + while tries <= retry_count and not proc_start_time: + try: + proc_start_time = self._get_proc_start_time(sentry_unit, + service, + pgrep_full) + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'OK'.format(tries, service, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, proc may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'failed\n{}'.format(tries, service, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not proc_start_time: + self.log.warn('No proc start time found, assuming service did ' + 'not start') + return False + if proc_start_time >= mtime: + self.log.debug('Proc start time is newer than provided mtime' + '(%s >= %s) on %s (OK)' % (proc_start_time, + mtime, unit_name)) + return True + else: + self.log.warn('Proc start time (%s) is older than provided mtime ' + '(%s) on %s, service did not ' + 'restart' % (proc_start_time, mtime, unit_name)) + return False + + def config_updated_since(self, sentry_unit, filename, mtime, + sleep_time=20, retry_count=30, + retry_sleep_time=10): + """Check if file was modified after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check the file mtime on + filename (string): The file to check mtime of + mtime (float): The epoch time to check against + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry + + Returns: + bool: True if file was modified more recently than mtime, False if + file was modified before mtime, or if file not found. + """ + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s updated since %s on ' + '%s' % (filename, mtime, unit_name)) + time.sleep(sleep_time) + file_mtime = None + tries = 0 + while tries <= retry_count and not file_mtime: + try: + file_mtime = self._get_file_mtime(sentry_unit, filename) + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'OK'.format(tries, filename, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, file may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'failed\n{}'.format(tries, filename, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not file_mtime: + self.log.warn('Could not determine file mtime, assuming ' + 'file does not exist') + return False + + if file_mtime >= mtime: + self.log.debug('File mtime is newer than provided mtime ' + '(%s >= %s) on %s (OK)' % (file_mtime, + mtime, unit_name)) + return True + else: + self.log.warn('File mtime is older than provided mtime' + '(%s < on %s) on %s' % (file_mtime, + mtime, unit_name)) + return False + + def validate_service_config_changed(self, sentry_unit, mtime, service, + filename, pgrep_full=None, + sleep_time=20, retry_count=30, + retry_sleep_time=10): + """Check service and file were updated after mtime + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + filename (string): The file to check mtime of + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep in seconds to pass to test helpers + retry_count (int): If service is not found, how many times to retry + retry_sleep_time (int): Time in seconds to wait between retries + + Typical Usage: + u = OpenStackAmuletUtils(ERROR) + ... + mtime = u.get_sentry_time(self.cinder_sentry) + self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) + if not u.validate_service_config_changed(self.cinder_sentry, + mtime, + 'cinder-api', + '/etc/cinder/cinder.conf') + amulet.raise_status(amulet.FAIL, msg='update failed') + Returns: + bool: True if both service and file where updated/restarted after + mtime, False if service is older than mtime or if service was + not found or if filename was modified before mtime. + """ + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + service_restart = self.service_restarted_since( + sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + config_update = self.config_updated_since( + sentry_unit, + filename, + mtime, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + return service_restart and config_update + + def get_sentry_time(self, sentry_unit): + """Return current epoch time on a sentry""" + cmd = "date +'%s'" + return float(sentry_unit.run(cmd)[0]) + + def relation_error(self, name, data): + return 'unexpected relation data in {} - {}'.format(name, data) + + def endpoint_error(self, name, data): + return 'unexpected endpoint data in {} - {}'.format(name, data) + + def get_ubuntu_releases(self): + """Return a list of all Ubuntu releases in order of release.""" + _d = distro_info.UbuntuDistroInfo() + _release_list = _d.all + return _release_list + + def file_to_url(self, file_rel_path): + """Convert a relative file path to a file URL.""" + _abs_path = os.path.abspath(file_rel_path) + return urlparse.urlparse(_abs_path, scheme='file').geturl() + + def check_commands_on_units(self, commands, sentry_units): + """Check that all commands in a list exit zero on all + sentry units in a list. + + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + self.log.debug('Checking exit codes for {} commands on {} ' + 'sentry units...'.format(len(commands), + len(sentry_units))) + for sentry_unit in sentry_units: + for cmd in commands: + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + return ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + return None + + def get_process_id_list(self, sentry_unit, process_name, + expect_success=True): + """Get a list of process ID(s) from a single sentry juju unit + for a single process name. + + :param sentry_unit: Amulet sentry instance (juju unit) + :param process_name: Process name + :param expect_success: If False, expect the PID to be missing, + raise if it is present. + :returns: List of process IDs + """ + cmd = 'pidof -x {}'.format(process_name) + if not expect_success: + cmd += " || exit 0 && exit 1" + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output).split() + + def get_unit_process_ids(self, unit_processes, expect_success=True): + """Construct a dict containing unit sentries, process names, and + process IDs. + + :param unit_processes: A dictionary of Amulet sentry instance + to list of process names. + :param expect_success: if False expect the processes to not be + running, raise if they are. + :returns: Dictionary of Amulet sentry instance to dictionary + of process names to PIDs. + """ + pid_dict = {} + for sentry_unit, process_list in six.iteritems(unit_processes): + pid_dict[sentry_unit] = {} + for process in process_list: + pids = self.get_process_id_list( + sentry_unit, process, expect_success=expect_success) + pid_dict[sentry_unit].update({process: pids}) + return pid_dict + + def validate_unit_process_ids(self, expected, actual): + """Validate process id quantities for services on units.""" + self.log.debug('Checking units for running processes...') + self.log.debug('Expected PIDs: {}'.format(expected)) + self.log.debug('Actual PIDs: {}'.format(actual)) + + if len(actual) != len(expected): + return ('Unit count mismatch. expected, actual: {}, ' + '{} '.format(len(expected), len(actual))) + + for (e_sentry, e_proc_names) in six.iteritems(expected): + e_sentry_name = e_sentry.info['unit_name'] + if e_sentry in actual.keys(): + a_proc_names = actual[e_sentry] + else: + return ('Expected sentry ({}) not found in actual dict data.' + '{}'.format(e_sentry_name, e_sentry)) + + if len(e_proc_names.keys()) != len(a_proc_names.keys()): + return ('Process name count mismatch. expected, actual: {}, ' + '{}'.format(len(expected), len(actual))) + + for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ + zip(e_proc_names.items(), a_proc_names.items()): + if e_proc_name != a_proc_name: + return ('Process name mismatch. expected, actual: {}, ' + '{}'.format(e_proc_name, a_proc_name)) + + a_pids_length = len(a_pids) + fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' + '{}, {} ({})'.format(e_sentry_name, e_proc_name, + e_pids, a_pids_length, + a_pids)) + + # If expected is a list, ensure at least one PID quantity match + if isinstance(e_pids, list) and \ + a_pids_length not in e_pids: + return fail_msg + # If expected is not bool and not list, + # ensure PID quantities match + elif not isinstance(e_pids, bool) and \ + not isinstance(e_pids, list) and \ + a_pids_length != e_pids: + return fail_msg + # If expected is bool True, ensure 1 or more PIDs exist + elif isinstance(e_pids, bool) and \ + e_pids is True and a_pids_length < 1: + return fail_msg + # If expected is bool False, ensure 0 PIDs exist + elif isinstance(e_pids, bool) and \ + e_pids is False and a_pids_length != 0: + return fail_msg + else: + self.log.debug('PID check OK: {} {} {}: ' + '{}'.format(e_sentry_name, e_proc_name, + e_pids, a_pids)) + return None + + def validate_list_of_identical_dicts(self, list_of_dicts): + """Check that all dicts within a list are identical.""" + hashes = [] + for _dict in list_of_dicts: + hashes.append(hash(frozenset(_dict.items()))) + + self.log.debug('Hashes: {}'.format(hashes)) + if len(set(hashes)) == 1: + self.log.debug('Dicts within list are identical') + else: + return 'Dicts within list are not identical' + + return None + + def validate_sectionless_conf(self, file_contents, expected): + """A crude conf parser. Useful to inspect configuration files which + do not have section headers (as would be necessary in order to use + the configparser). Such as openstack-dashboard or rabbitmq confs.""" + for line in file_contents.split('\n'): + if '=' in line: + args = line.split('=') + if len(args) <= 1: + continue + key = args[0].strip() + value = args[1].strip() + if key in expected.keys(): + if expected[key] != value: + msg = ('Config mismatch. Expected, actual: {}, ' + '{}'.format(expected[key], value)) + amulet.raise_status(amulet.FAIL, msg=msg) + + def get_unit_hostnames(self, units): + """Return a dict of juju unit names to hostnames.""" + host_names = {} + for unit in units: + host_names[unit.info['unit_name']] = \ + str(unit.file_contents('/etc/hostname').strip()) + self.log.debug('Unit host names: {}'.format(host_names)) + return host_names + + def run_cmd_unit(self, sentry_unit, cmd): + """Run a command on a unit, return the output and exit code.""" + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` command returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + msg = ('{} `{}` command returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output), code + + def file_exists_on_unit(self, sentry_unit, file_name): + """Check if a file exists on a unit.""" + try: + sentry_unit.file_stat(file_name) + return True + except IOError: + return False + except Exception as e: + msg = 'Error checking file {}: {}'.format(file_name, e) + amulet.raise_status(amulet.FAIL, msg=msg) + + def file_contents_safe(self, sentry_unit, file_name, + max_wait=60, fatal=False): + """Get file contents from a sentry unit. Wrap amulet file_contents + with retry logic to address races where a file checks as existing, + but no longer exists by the time file_contents is called. + Return None if file not found. Optionally raise if fatal is True.""" + unit_name = sentry_unit.info['unit_name'] + file_contents = False + tries = 0 + while not file_contents and tries < (max_wait / 4): + try: + file_contents = sentry_unit.file_contents(file_name) + except IOError: + self.log.debug('Attempt {} to open file {} from {} ' + 'failed'.format(tries, file_name, + unit_name)) + time.sleep(4) + tries += 1 + + if file_contents: + return file_contents + elif not fatal: + return None + elif fatal: + msg = 'Failed to get file contents from unit.' + amulet.raise_status(amulet.FAIL, msg) + + def port_knock_tcp(self, host="localhost", port=22, timeout=15): + """Open a TCP socket to check for a listening sevice on a host. + + :param host: host name or IP address, default to localhost + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :returns: True if successful, False if connect failed + """ + + # Resolve host name if possible + try: + connect_host = socket.gethostbyname(host) + host_human = "{} ({})".format(connect_host, host) + except socket.error as e: + self.log.warn('Unable to resolve address: ' + '{} ({}) Trying anyway!'.format(host, e)) + connect_host = host + host_human = connect_host + + # Attempt socket connection + try: + knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + knock.settimeout(timeout) + knock.connect((connect_host, port)) + knock.close() + self.log.debug('Socket connect OK for host ' + '{} on port {}.'.format(host_human, port)) + return True + except socket.error as e: + self.log.debug('Socket connect FAIL for' + ' {} port {} ({})'.format(host_human, port, e)) + return False + + def port_knock_units(self, sentry_units, port=22, + timeout=15, expect_success=True): + """Open a TCP socket to check for a listening sevice on each + listed juju unit. + + :param sentry_units: list of sentry unit pointers + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :expect_success: True by default, set False to invert logic + :returns: None if successful, Failure message otherwise + """ + for unit in sentry_units: + host = unit.info['public-address'] + connected = self.port_knock_tcp(host, port, timeout) + if not connected and expect_success: + return 'Socket connect failed.' + elif connected and not expect_success: + return 'Socket connected unexpectedly.' + + def get_uuid_epoch_stamp(self): + """Returns a stamp string based on uuid4 and epoch time. Useful in + generating test messages which need to be unique-ish.""" + return '[{}-{}]'.format(uuid.uuid4(), time.time()) + +# amulet juju action helpers: + def run_action(self, unit_sentry, action, + _check_output=subprocess.check_output, + params=None): + """Run the named action on a given unit sentry. + + params a dict of parameters to use + _check_output parameter is used for dependency injection. + + @return action_id. + """ + unit_id = unit_sentry.info["unit_name"] + command = ["juju", "action", "do", "--format=json", unit_id, action] + if params is not None: + for key, value in params.iteritems(): + command.append("{}={}".format(key, value)) + self.log.info("Running command: %s\n" % " ".join(command)) + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + action_id = data[u'Action queued with id'] + return action_id + + def wait_on_action(self, action_id, _check_output=subprocess.check_output): + """Wait for a given action, returning if it completed or not. + + _check_output parameter is used for dependency injection. + """ + command = ["juju", "action", "fetch", "--format=json", "--wait=0", + action_id] + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + return data.get(u"status") == "completed" + + def status_get(self, unit): + """Return the current service status of this unit.""" + raw_status, return_code = unit.run( + "status-get --format=json --include-data") + if return_code != 0: + return ("unknown", "") + status = json.loads(raw_status) + return (status["status"], status["message"]) diff --git a/ceph-fs/src/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-fs/src/tests/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-fs/src/tests/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 00000000..9e0b07fb --- /dev/null +++ b/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,345 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import re +import sys +import six +from collections import OrderedDict +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') + self.openstack = openstack + self.source = source + self.stable = stable + + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + + self.log.info('OpenStackAmuletDeployment: determine branch locations') + + # Charms outside the ~openstack-charmers + base_charms = { + 'mysql': ['precise', 'trusty'], + 'mongodb': ['precise', 'trusty'], + 'nrpe': ['precise', 'trusty', 'wily', 'xenial'], + } + + for svc in other_services: + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if svc['name'] in base_charms: + # NOTE: not all charms have support for all series we + # want/need to test against, so fix to most recent + # that each base charm supports + target_series = self.series + if self.series not in base_charms[svc['name']]: + target_series = base_charms[svc['name']][-1] + svc['location'] = 'cs:{}/{}'.format(target_series, + svc['name']) + elif self.stable: + svc['location'] = 'cs:{}/{}'.format(self.series, + svc['name']) + else: + svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( + self.series, + svc['name'] + ) + + return other_services + + def _add_services(self, this_service, other_services, use_source=None, + no_origin=None): + """Add services to the deployment and optionally set + openstack-origin/source. + + :param this_service dict: Service dictionary describing the service + whose amulet tests are being run + :param other_services dict: List of service dictionaries describing + the services needed to support the target + service + :param use_source list: List of services which use the 'source' config + option rather than 'openstack-origin' + :param no_origin list: List of services which do not support setting + the Cloud Archive. + Service Dict: + { + 'name': str charm-name, + 'units': int number of units, + 'constraints': dict of juju constraints, + 'location': str location of charm, + } + eg + this_service = { + 'name': 'openvswitch-odl', + 'constraints': {'mem': '8G'}, + } + other_services = [ + { + 'name': 'nova-compute', + 'units': 2, + 'constraints': {'mem': '4G'}, + 'location': cs:~bob/xenial/nova-compute + }, + { + 'name': 'mysql', + 'constraints': {'mem': '2G'}, + }, + {'neutron-api-odl'}] + use_source = ['mysql'] + no_origin = ['neutron-api-odl'] + """ + self.log.info('OpenStackAmuletDeployment: adding services') + + other_services = self._determine_branch_locations(other_services) + + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + + services = other_services + services.append(this_service) + + use_source = use_source or [] + no_origin = no_origin or [] + + # Charms which should use the source config option + use_source = list(set( + use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', + 'ceph-proxy', 'percona-cluster', 'lxd'])) + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = list(set( + no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', + 'nrpe', 'openvswitch-odl', 'neutron-api-odl', + 'odl-controller', 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'])) + + if self.openstack: + for svc in services: + if svc['name'] not in use_source + no_origin: + config = {'openstack-origin': self.openstack} + self.d.configure(svc['name'], config) + + if self.source: + for svc in services: + if svc['name'] in use_source and svc['name'] not in no_origin: + config = {'source': self.source} + self.d.configure(svc['name'], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=1800): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + self.log.info('Waiting for extended status on units...') + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + # Must be ordered by OpenStack release (not by Ubuntu release): + (self.precise_essex, self.precise_folsom, self.precise_grizzly, + self.precise_havana, self.precise_icehouse, + self.trusty_icehouse, self.trusty_juno, self.utopic_juno, + self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, + self.wily_liberty, self.trusty_mitaka, + self.xenial_mitaka, self.xenial_newton, + self.yakkety_newton) = range(16) + + releases = { + ('precise', None): self.precise_essex, + ('precise', 'cloud:precise-folsom'): self.precise_folsom, + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, + ('precise', 'cloud:precise-havana'): self.precise_havana, + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, + ('trusty', None): self.trusty_icehouse, + ('trusty', 'cloud:trusty-juno'): self.trusty_juno, + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, + ('utopic', None): self.utopic_juno, + ('vivid', None): self.vivid_kilo, + ('wily', None): self.wily_liberty, + ('xenial', None): self.xenial_mitaka, + ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('yakkety', None): self.yakkety_newton, + } + return releases[(self.series, self.openstack)] + + def _get_openstack_release_string(self): + """Get openstack release string. + + Return a string representing the openstack release. + """ + releases = OrderedDict([ + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ('wily', 'liberty'), + ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ]) + if self.openstack: + os_origin = self.openstack.split(':')[1] + return os_origin.split('%s-' % self.series)[1].split('/')[0] + else: + return releases[self.series] + + def get_ceph_expected_pools(self, radosgw=False): + """Return a list of expected ceph pools in a ceph + cinder + glance + test scenario, based on OpenStack release and whether ceph radosgw + is flagged as present or not.""" + + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + pools = [ + 'rbd', + 'cinder', + 'glance' + ] + else: + # Juno or earlier + pools = [ + 'data', + 'metadata', + 'rbd', + 'cinder', + 'glance' + ] + + if radosgw: + pools.extend([ + '.rgw.root', + '.rgw.control', + '.rgw', + '.rgw.gc', + '.users.uid' + ]) + + return pools diff --git a/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 00000000..e4546c8c --- /dev/null +++ b/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,1124 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import amulet +import json +import logging +import os +import re +import six +import time +import urllib + +import cinderclient.v1.client as cinder_client +import glanceclient.v1.client as glance_client +import heatclient.v1.client as heat_client +import keystoneclient.v2_0 as keystone_client +from keystoneclient.auth.identity import v3 as keystone_id_v3 +from keystoneclient import session as keystone_session +from keystoneclient.v3 import client as keystone_client_v3 + +import novaclient.client as nova_client +import pika +import swiftclient + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + +NOVA_CLIENT_VERSION = "2" + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charm tests. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate keystone v3 endpoint data. + + Validate the v3 endpoint data which has changed from v2. The + ports are used to find the matching endpoint. + + The new v3 endpoint data looks like: + + ['}, + region=RegionOne, + region_id=RegionOne, + service_id=17f842a0dc084b928e476fafe67e4095, + url=http://10.5.6.5:9312>, + '}, + region=RegionOne, + region_id=RegionOne, + service_id=72fc8736fb41435e8b3584205bb2cfa3, + url=http://10.5.6.6:35357/v3>, + ... ] + """ + self.log.debug('Validating v3 endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = [] + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if ((admin_port in ep.url and ep.interface == 'admin') or + (internal_port in ep.url and ep.interface == 'internal') or + (public_port in ep.url and ep.interface == 'public')): + found.append(ep.interface) + # note we ignore the links member. + actual = {'id': ep.id, + 'region': ep.region, + 'region_id': ep.region_id, + 'interface': self.not_null, + 'url': ep.url, + 'service_id': ep.service_id, } + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if len(found) != 3: + return 'Unexpected number of endpoints found' + + def validate_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('Validating service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_v3_svc_catalog_endpoint_data(self, expected, actual): + """Validate the keystone v3 catalog endpoint data. + + Validate a list of dictinaries that make up the keystone v3 service + catalogue. + + It is in the form of: + + + {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:35357/v3'}, + {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}, + {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}], + u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'f629388955bc407f8b11d8b7ca168086', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9312'}]} + + Note, that an added complication is that the order of admin, public, + internal against 'interface' in each region. + + Thus, the function sorts the expected and actual lists using the + interface key as a sort key, prior to the comparison. + """ + self.log.debug('Validating v3 service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + l_expected = sorted(v, key=lambda x: x['interface']) + l_actual = sorted(actual[k], key=lambda x: x['interface']) + if len(l_actual) != len(l_expected): + return ("endpoint {} has differing number of interfaces " + " - expected({}), actual({})" + .format(k, len(l_expected), len(l_actual))) + for i_expected, i_actual in zip(l_expected, l_actual): + self.log.debug("checking interface {}" + .format(i_expected['interface'])) + ret = self._validate_dict_data(i_expected, i_actual) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('Validating tenant data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('Validating role data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual, api_version=None): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('Validating user data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + if e['name'] == act.name: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'id': act.id} + if api_version == 3: + a['default_project_id'] = getattr(act, + 'default_project_id', + 'none') + else: + a['tenantId'] = act.tenantId + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('Validating flavor data...') + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) + return tenant in [t.name for t in keystone.tenants.list()] + + def authenticate_cinder_admin(self, keystone_sentry, username, + password, tenant): + """Authenticates admin user with cinder.""" + # NOTE(beisner): cinder python client doesn't accept tokens. + keystone_ip = keystone_sentry.info['public-address'] + ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) + return cinder_client.Client(username, password, tenant, ept) + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant=None, api_version=None, + keystone_ip=None): + """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') + if not keystone_ip: + keystone_ip = keystone_sentry.info['public-address'] + + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name='admin_domain', + username=user, + password=password, + domain_name='admin_domain', + auth_url=ep, + ) + sess = keystone_session.Session(auth=auth) + return keystone_client_v3.Client(session=sess) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') + ep = keystone.service_catalog.url_for(service_type='image', + endpoint_type='adminURL') + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + endpoint_type='publicURL') + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def authenticate_swift_user(self, keystone, user, password, tenant): + """Authenticates a regular user with swift api.""" + self.log.debug('Authenticating swift user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :returns: glance image pointer + """ + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Download cirros image + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open('http://download.cirros-cloud.net/version/released') + version = f.read().strip() + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) + local_path = os.path.join('tests', cirros_img) + + if not os.path.exists(local_path): + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', + version, cirros_img) + opener.retrieve(cirros_url, local_path) + f.close() + + # Create glance image + with open(local_path) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + + # Wait for image to reach active status + img_id = image.id + ret = self.resource_reaches_status(glance.images, img_id, + expected_stat='active', + msg='Image status wait') + if not ret: + msg = 'Glance image failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new image + self.log.debug('Validating image attributes...') + val_img_name = glance.images.get(img_id).name + val_img_stat = glance.images.get(img_id).status + val_img_pub = glance.images.get(img_id).is_public + val_img_cfmt = glance.images.get(img_id).container_format + val_img_dfmt = glance.images.get(img_id).disk_format + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' + 'container fmt:{} disk fmt:{}'.format( + val_img_name, val_img_pub, img_id, + val_img_stat, val_img_cfmt, val_img_dfmt)) + + if val_img_name == image_name and val_img_stat == 'active' \ + and val_img_pub is True and val_img_cfmt == 'bare' \ + and val_img_dfmt == 'qcow2': + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) + return self.delete_resource(glance.images, image, msg='glance image') + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) + image = nova.images.find(name=image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) + return self.delete_resource(nova.servers, instance, + msg='nova instance') + + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, + img_id=None, src_vol_id=None, snap_id=None): + """Create cinder volume, optionally from a glance image, OR + optionally as a clone of an existing volume, OR optionally + from a snapshot. Wait for the new volume status to reach + the expected status, validate and return a resource pointer. + + :param vol_name: cinder volume display name + :param vol_size: size in gigabytes + :param img_id: optional glance image id + :param src_vol_id: optional source volume id to clone + :param snap_id: optional snapshot id to use + :returns: cinder volume pointer + """ + # Handle parameter input and avoid impossible combinations + if img_id and not src_vol_id and not snap_id: + # Create volume from image + self.log.debug('Creating cinder volume from glance image...') + bootable = 'true' + elif src_vol_id and not img_id and not snap_id: + # Clone an existing volume + self.log.debug('Cloning cinder volume...') + bootable = cinder.volumes.get(src_vol_id).bootable + elif snap_id and not src_vol_id and not img_id: + # Create volume from snapshot + self.log.debug('Creating cinder volume from snapshot...') + snap = cinder.volume_snapshots.find(id=snap_id) + vol_size = snap.size + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id + bootable = cinder.volumes.get(snap_vol_id).bootable + elif not img_id and not src_vol_id and not snap_id: + # Create volume + self.log.debug('Creating cinder volume...') + bootable = 'false' + else: + # Impossible combination of parameters + msg = ('Invalid method use - name:{} size:{} img_id:{} ' + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, + img_id, src_vol_id, + snap_id)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create new volume + try: + vol_new = cinder.volumes.create(display_name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except Exception as e: + msg = 'Failed to create volume: {}'.format(e) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Wait for volume to reach available status + ret = self.resource_reaches_status(cinder.volumes, vol_id, + expected_stat="available", + msg="Volume status wait") + if not ret: + msg = 'Cinder volume failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new volume + self.log.debug('Validating volume attributes...') + val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_boot = cinder.volumes.get(vol_id).bootable + val_vol_stat = cinder.volumes.get(vol_id).status + val_vol_size = cinder.volumes.get(vol_id).size + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' + '{} size:{}'.format(val_vol_name, vol_id, + val_vol_stat, val_vol_boot, + val_vol_size)) + + if val_vol_boot == bootable and val_vol_stat == 'available' \ + and val_vol_name == vol_name and val_vol_size == vol_size: + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return vol_new + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + self.log.debug('Deleting OpenStack resource ' + '{} ({})'.format(resource_id, msg)) + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) + return False + + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False + + def get_ceph_osd_id_cmd(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return ("`initctl list | grep 'ceph-osd ' | " + "awk 'NR=={} {{ print $2 }}' | " + "grep -o '[0-9]*'`".format(index + 1)) + + def get_ceph_pools(self, sentry_unit): + """Return a dict of ceph pools from a single ceph unit, with + pool name as keys, pool id as vals.""" + pools = {} + cmd = 'sudo ceph osd lspools' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, + for pool in str(output).split(','): + pool_id_name = pool.split(' ') + if len(pool_id_name) == 2: + pool_id = pool_id_name[0] + pool_name = pool_id_name[1] + pools[pool_name] = int(pool_id) + + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], + pools)) + return pools + + def get_ceph_df(self, sentry_unit): + """Return dict of ceph df json output, including ceph pool state. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :returns: Dict of ceph df output + """ + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return json.loads(output) + + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param pool_id: Ceph pool ID + :returns: List of pool name, object count, kb disk space used + """ + df = self.get_ceph_df(sentry_unit) + pool_name = df['pools'][pool_id]['name'] + obj_count = df['pools'][pool_id]['stats']['objects'] + kb_used = df['pools'][pool_id]['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) + return pool_name, obj_count, kb_used + + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes. The + 2nd element is expected to be greater than the 1st; 3rd is expected + to be less than the 2nd. + + :param samples: List containing 3 data samples + :param sample_type: String for logging and usage context + :returns: None if successful, Failure message otherwise + """ + original, created, deleted = range(3) + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + return ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + else: + self.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None + + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.is_open is True + assert connection.is_closing is False + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) diff --git a/ceph-fs/src/tests/gate-basic-xenial-mitaka b/ceph-fs/src/tests/gate-basic-xenial-mitaka new file mode 100755 index 00000000..734d993a --- /dev/null +++ b/ceph-fs/src/tests/gate-basic-xenial-mitaka @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on xenial-mitaka.""" + +from basic_deployment import CephFsBasicDeployment + +if __name__ == '__main__': + deployment = CephFsBasicDeployment(series='xenial') +deployment.run_tests() diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml new file mode 100644 index 00000000..e3185c6d --- /dev/null +++ b/ceph-fs/src/tests/tests.yaml @@ -0,0 +1,17 @@ +# Bootstrap the model if necessary. +bootstrap: True +# Re-use bootstrap node instead of destroying/re-bootstrapping. +reset: True +# Use tox/requirements to drive the venv instead of bundletester's venv feature. +virtualenv: False +# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet. +makefile: [] +# Do not specify juju PPA sources. Juju is presumed to be pre-installed +# and configured in all test runner environments. +#sources: +# Do not specify or rely on system packages. +#packages: +# Do not specify python packages here. Use test-requirements.txt +# and tox instead. ie. The venv is constructed before bundletester +# is invoked. +#python-packages: diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini new file mode 100644 index 00000000..479d7bb3 --- /dev/null +++ b/ceph-fs/src/tox.ini @@ -0,0 +1,53 @@ +# Source charm: ./src/tox.ini +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. +[tox] +envlist = pep8 +skipsdist = True + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 + AMULET_SETUP_TIMEOUT=2700 +whitelist_externals = juju +passenv = HOME TERM AMULET_* +deps = -r{toxinidir}/test-requirements.txt +install_command = + pip install --allow-unverified python-apt {opts} {packages} + +[testenv:pep8] +basepython = python2.7 +commands = charm-proof + +[testenv:func27-noop] +# DRY RUN - For Debug +basepython = python2.7 +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy + +[testenv:func27] +# Run all gate tests which are +x (expected to always pass) +basepython = python2.7 +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy + +[testenv:func27-smoke] +# Run a specific test as an Amulet smoke test (expected to always pass) +basepython = python2.7 +commands = + bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy + +[testenv:func27-dfs] +# Run all deploy-from-source tests which are +x (may not always pass!) +basepython = python2.7 +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy + +[testenv:func27-dev] +# Run all development test targets which are +x (may not always pass!) +basepython = python2.7 +commands = + bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy + +[testenv:venv] +commands = {posargs} diff --git a/ceph-fs/test-requirements.txt b/ceph-fs/test-requirements.txt new file mode 100644 index 00000000..9a0bed81 --- /dev/null +++ b/ceph-fs/test-requirements.txt @@ -0,0 +1,7 @@ +# Unit test requirements +flake8>=2.2.4,<=2.4.1 +os-testr>=0.4.1 +charms.reactive +mock>=1.2 +coverage>=3.6 +git+https://github.com/openstack/charms.openstack#egg=charms.openstack diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 1e21b23c..3ba2b233 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -1,41 +1,55 @@ +# Source charm: ./tox.ini +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. [tox] -envlist = pep8,py34 skipsdist = True +envlist = pep8,py34,py35 skip_missing_interpreters = True [testenv] -basepython = python2.7 setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 TERM=linux - INTERFACE_PATH={toxinidir}/interfaces LAYER_PATH={toxinidir}/layers + INTERFACE_PATH={toxinidir}/interfaces JUJU_REPOSITORY={toxinidir}/build passenv = http_proxy https_proxy install_command = pip install {opts} {packages} deps = - -r{toxinidir}/requirements.txt + -r{toxinidir}/requirements.txt [testenv:build] +basepython = python2.7 commands = - charm-build --log-level DEBUG -o {toxinidir}/build --name ceph-mon src + charm-build --log-level DEBUG -o {toxinidir}/build src {posargs} -[testenv:venv] -commands = {posargs} - -[testenv:pep8] -commands = flake8 {posargs} reactive lib unit_tests +[testenv:py27] +basepython = python2.7 +# Reactive source charms are Python3-only, but a py27 unit test target +# is required by OpenStack Governance. Remove this shim as soon as +# permitted. http://governance.openstack.org/reference/cti/python_cti.html +whitelist_externals = true +commands = true [testenv:py34] basepython = python3.4 deps = -r{toxinidir}/test-requirements.txt -commands = ostestr -p {toxinidir}/build/trusty/ceph-mon {posargs} +commands = ostestr {posargs} [testenv:py35] basepython = python3.5 deps = -r{toxinidir}/test-requirements.txt -commands = ostestr -p {toxinidir}/build/trusty/ceph-mon {posargs} +commands = ostestr {posargs} + +[testenv:pep8] +basepython = python3.5 +deps = -r{toxinidir}/test-requirements.txt +commands = flake8 {posargs} src unit_tests + +[testenv:venv] +commands = {posargs} [flake8] -ignore = E402,E226 \ No newline at end of file +# E402 ignore necessary for path append before sys module import in actions +ignore = E402 diff --git a/ceph-fs/unit_tests/__init__.py b/ceph-fs/unit_tests/__init__.py new file mode 100644 index 00000000..3e9250c6 --- /dev/null +++ b/ceph-fs/unit_tests/__init__.py @@ -0,0 +1,42 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import mock + +sys.path.append('src') + +apt_pkg = mock.MagicMock() +charmhelpers = mock.MagicMock() +sys.modules['apt_pkg'] = apt_pkg +sys.modules['charmhelpers'] = charmhelpers +sys.modules['charmhelpers.core'] = charmhelpers.core +sys.modules['charmhelpers.core.hookenv'] = charmhelpers.core.hookenv +sys.modules['charmhelpers.core.host'] = charmhelpers.core.host +sys.modules['charmhelpers.core.unitdata'] = charmhelpers.core.unitdata +sys.modules['charmhelpers.core.templating'] = charmhelpers.core.templating +sys.modules['charmhelpers.contrib'] = charmhelpers.contrib +sys.modules['charmhelpers.contrib.openstack'] = charmhelpers.contrib.openstack +sys.modules['charmhelpers.contrib.openstack.utils'] = ( + charmhelpers.contrib.openstack.utils) +sys.modules['charmhelpers.contrib.openstack.templating'] = ( + charmhelpers.contrib.openstack.templating) +sys.modules['charmhelpers.contrib.network'] = charmhelpers.contrib.network +sys.modules['charmhelpers.contrib.network.ip'] = ( + charmhelpers.contrib.network.ip) +sys.modules['charmhelpers.fetch'] = charmhelpers.fetch +sys.modules['charmhelpers.cli'] = charmhelpers.cli +sys.modules['charmhelpers.contrib.hahelpers'] = charmhelpers.contrib.hahelpers +sys.modules['charmhelpers.contrib.hahelpers.cluster'] = ( + charmhelpers.contrib.hahelpers.cluster) diff --git a/ceph-fs/unit_tests/test_actions.py b/ceph-fs/unit_tests/test_actions.py new file mode 100644 index 00000000..347556e6 --- /dev/null +++ b/ceph-fs/unit_tests/test_actions.py @@ -0,0 +1,79 @@ +import sys + +sys.path.append('src/actions') +import unittest +from mock import patch, call, Mock + +__author__ = 'Chris Holcombe ' + +sys.modules['action_set'] = Mock() +sys.modules['action_get'] = Mock() +sys.modules['action_fail'] = Mock() +sys.modules['xattr'] = Mock() +from get_quota import get_quota +from remove_quota import remove_quota +from set_quota import set_quota + + +def action_get_side_effect(*args): + if args[0] == 'max-files': + return 1024 + elif args[0] == 'max-bytes': + return 1024 + elif args[0] == 'directory': + return 'foo' + + +class CephActionsTestCase(unittest.TestCase): + @patch('get_quota.action_fail') + @patch('get_quota.action_set') + @patch('get_quota.action_get') + @patch('get_quota.os') + @patch('get_quota.xattr') + def test_get_quota(self, xattr, os, action_get, action_set, action_fail): + action_get.side_effect = action_get_side_effect + os.path.exists.return_value = True + xattr.getxattr.return_value = "1024" + get_quota() + action_get.assert_has_calls( + [call('max-files'), + call('max-bytes'), + call('directory')]) + action_fail.assert_not_called() + xattr.getxattr.assert_called_with('foo', + 'ceph.quota.max_files') + action_set.assert_called_with({'foo quota': "1024"}) + + @patch('set_quota.action_fail') + @patch('set_quota.action_get') + @patch('set_quota.os') + @patch('set_quota.xattr') + def test_set_quota(self, xattr, os, action_get, action_fail): + action_get.side_effect = action_get_side_effect + os.path.exists.return_value = True + set_quota() + xattr.setxattr.assert_called_with('foo', + 'ceph.quota.max_files', + '1024') + action_get.assert_has_calls( + [call('max-files'), + call('max-bytes'), + call('directory')]) + action_fail.assert_not_called() + + @patch('remove_quota.action_fail') + @patch('remove_quota.action_get') + @patch('remove_quota.os') + @patch('remove_quota.xattr') + def test_remove_quota(self, xattr, os, action_get, action_fail): + action_get.side_effect = action_get_side_effect + os.path.exists.return_value = True + remove_quota() + xattr.setxattr.assert_called_with('foo', + 'ceph.quota.max_files', + '0') + action_get.assert_has_calls( + [call('max-files'), + call('max-bytes'), + call('directory')]) + action_fail.assert_not_called() diff --git a/ceph-fs/unit_tests/test_utils.py b/ceph-fs/unit_tests/test_utils.py new file mode 100644 index 00000000..781901e4 --- /dev/null +++ b/ceph-fs/unit_tests/test_utils.py @@ -0,0 +1,116 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import unittest +import os +import yaml + +from mock import patch + + +def load_config(): + ''' + Walk backwords from __file__ looking for config.yaml, load and return the + 'options' section' + ''' + config = None + f = __file__ + while config is None: + d = os.path.dirname(f) + if os.path.isfile(os.path.join(d, 'config.yaml')): + config = os.path.join(d, 'config.yaml') + break + f = d + + if not config: + logging.error('Could not find config.yaml in any parent directory ' + 'of %s. ' % f) + raise Exception + + return yaml.safe_load(open(config).read())['options'] + + +def get_default_config(): + ''' + Load default charm config from config.yaml return as a dict. + If no default is set in config.yaml, its value is None. + ''' + default_config = {} + config = load_config() + for k, v in config.iteritems(): + if 'default' in v: + default_config[k] = v['default'] + else: + default_config[k] = None + return default_config + + +class CharmTestCase(unittest.TestCase): + + def setUp(self, obj, patches): + super(CharmTestCase, self).setUp() + self.patches = patches + self.obj = obj + self.test_config = TestConfig() + self.test_relation = TestRelation() + self.patch_all() + + def patch(self, method): + _m = patch.object(self.obj, method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def patch_all(self): + for method in self.patches: + setattr(self, method, self.patch(method)) + + +class TestConfig(object): + + def __init__(self): + self.config = get_default_config() + + def get(self, attr=None): + if not attr: + return self.get_all() + try: + return self.config[attr] + except KeyError: + return None + + def get_all(self): + return self.config + + def set(self, attr, value): + if attr not in self.config: + raise KeyError + self.config[attr] = value + + +class TestRelation(object): + + def __init__(self, relation_data={}): + self.relation_data = relation_data + + def set(self, relation_data): + self.relation_data = relation_data + + def get(self, attr=None, unit=None, rid=None): + if attr is None: + return self.relation_data + elif attr in self.relation_data: + return self.relation_data[attr] + return None From fa01a746e5e6ca0b8cc55a4bcb93d86f57b192bf Mon Sep 17 00:00:00 2001 From: James Page Date: Sat, 7 Jan 2017 16:07:36 +0000 Subject: [PATCH 1257/2699] Generalize upgrade paths for osds Make use of new charms.ceph utils to generalize the upgrade paths for OSD upgrades, ensuring that only supported upgrade paths are undertaken for Ubuntu 16.04 UCA pockets. Partial-Bug: 1611082 Change-Id: Ifbf3a7ffbb5ab17e839099658c7a474784ab4083 --- ceph-osd/hooks/ceph_hooks.py | 52 +++++++++++------------------ ceph-osd/unit_tests/test_upgrade.py | 7 ++-- 2 files changed, 25 insertions(+), 34 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 0b1b1718..235395bd 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -77,20 +77,6 @@ hooks = Hooks() -# A dict of valid ceph upgrade paths. Mapping is old -> new -upgrade_paths = { - 'cloud:trusty-juno': 'cloud:trusty-kilo', - 'cloud:trusty-kilo': 'cloud:trusty-liberty', - 'cloud:trusty-liberty': 'cloud:trusty-mitaka', -} - - -def pretty_print_upgrade_paths(): - lines = [] - for key, value in upgrade_paths.iteritems(): - lines.append("{} -> {}".format(key, value)) - return lines - def check_for_upgrade(): release_info = host.lsb_release() @@ -100,27 +86,29 @@ def check_for_upgrade(): return c = hookenv.config() - old_version = c.previous('source') + old_version = ceph.resolve_ceph_version(c.previous('source') or + 'distro') log('old_version: {}'.format(old_version)) - # Strip all whitespace - new_version = hookenv.config('source') - if new_version: - # replace all whitespace - new_version = new_version.replace(' ', '') + new_version = ceph.resolve_ceph_version(hookenv.config('source') or + 'distro') log('new_version: {}'.format(new_version)) - if old_version in upgrade_paths: - if new_version == upgrade_paths[old_version]: - log("{} to {} is a valid upgrade path. Proceeding.".format( - old_version, new_version)) - ceph.roll_osd_cluster(new_version=new_version, - upgrade_key='osd-upgrade') - else: - # Log a helpful error message - log("Invalid upgrade path from {} to {}. " - "Valid paths are: {}".format(old_version, - new_version, - pretty_print_upgrade_paths())) + if old_version == new_version: + log("No new ceph version detected, skipping upgrade.", DEBUG) + return + + if (old_version in ceph.UPGRADE_PATHS and + new_version == ceph.UPGRADE_PATHS[old_version]): + log("{} to {} is a valid upgrade path. Proceeding.".format( + old_version, new_version)) + ceph.roll_osd_cluster(new_version=new_version, + upgrade_key='osd-upgrade') + else: + # Log a helpful error message + log("Invalid upgrade path from {} to {}. " + "Valid paths are: {}".format(old_version, + new_version, + ceph.pretty_print_upgrade_paths())) def tune_network_adapters(): diff --git a/ceph-osd/unit_tests/test_upgrade.py b/ceph-osd/unit_tests/test_upgrade.py index 7b213ca4..c1c99da6 100644 --- a/ceph-osd/unit_tests/test_upgrade.py +++ b/ceph-osd/unit_tests/test_upgrade.py @@ -17,10 +17,13 @@ def config_side_effect(*args): class UpgradeRollingTestCase(unittest.TestCase): + @patch('ceph_hooks.ceph.resolve_ceph_version') @patch('ceph_hooks.hookenv') @patch('ceph_hooks.host') @patch('ceph_hooks.ceph.roll_osd_cluster') - def test_check_for_upgrade(self, roll_osd_cluster, host, hookenv): + def test_check_for_upgrade(self, roll_osd_cluster, host, hookenv, + version): + version.side_effect = ['firefly', 'hammer'] host.lsb_release.return_value = { 'DISTRIB_CODENAME': 'trusty', } @@ -31,5 +34,5 @@ def test_check_for_upgrade(self, roll_osd_cluster, host, hookenv): check_for_upgrade() roll_osd_cluster.assert_called_with( - new_version='cloud:trusty-kilo', + new_version='hammer', upgrade_key='osd-upgrade') From be66459b2fdeaf12e3d2527dc8390f9f0894b502 Mon Sep 17 00:00:00 2001 From: James Page Date: Sat, 7 Jan 2017 16:03:18 +0000 Subject: [PATCH 1258/2699] Generalize upgrade paths for mons Make use of the new utils provided in charms.ceph to map UCA pockets onto Ceph versions, and only perform upgrades when an upgrade path is detected between Ceph versions. Closes-Bug: 1611082 Change-Id: I002a79d97464f130d26cbde91db7478b5d443b70 --- ceph-mon/hooks/ceph_hooks.py | 46 ++++++++++------------------- ceph-mon/unit_tests/test_upgrade.py | 7 +++-- 2 files changed, 20 insertions(+), 33 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 6be26359..e2f79f15 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -85,20 +85,6 @@ STATUS_FILE = '/var/lib/nagios/cat-ceph-status.txt' STATUS_CRONFILE = '/etc/cron.d/cat-ceph-health' -# A dict of valid ceph upgrade paths. Mapping is old -> new -upgrade_paths = { - 'cloud:trusty-juno': 'cloud:trusty-kilo', - 'cloud:trusty-kilo': 'cloud:trusty-liberty', - 'cloud:trusty-liberty': 'cloud:trusty-mitaka', -} - - -def pretty_print_upgrade_paths(): - lines = [] - for key, value in upgrade_paths.iteritems(): - lines.append("{} -> {}".format(key, value)) - return lines - def check_for_upgrade(): release_info = host.lsb_release() @@ -108,27 +94,25 @@ def check_for_upgrade(): return c = hookenv.config() - old_version = c.previous('source') + old_version = ceph.resolve_ceph_version(c.previous('source') or + 'distro') log('old_version: {}'.format(old_version)) # Strip all whitespace - new_version = hookenv.config('source') - if new_version: - # replace all whitespace - new_version = new_version.replace(' ', '') + new_version = ceph.resolve_ceph_version(hookenv.config('source')) log('new_version: {}'.format(new_version)) - if old_version in upgrade_paths: - if new_version == upgrade_paths[old_version]: - log("{} to {} is a valid upgrade path. Proceeding.".format( - old_version, new_version)) - ceph.roll_monitor_cluster(new_version=new_version, - upgrade_key='admin') - else: - # Log a helpful error message - log("Invalid upgrade path from {} to {}. " - "Valid paths are: {}".format(old_version, - new_version, - pretty_print_upgrade_paths())) + if (old_version in ceph.UPGRADE_PATHS and + new_version == ceph.UPGRADE_PATHS[old_version]): + log("{} to {} is a valid upgrade path. Proceeding.".format( + old_version, new_version)) + ceph.roll_monitor_cluster(new_version=new_version, + upgrade_key='admin') + else: + # Log a helpful error message + log("Invalid upgrade path from {} to {}. " + "Valid paths are: {}".format(old_version, + new_version, + ceph.pretty_print_upgrade_paths())) @hooks.hook('install.real') diff --git a/ceph-mon/unit_tests/test_upgrade.py b/ceph-mon/unit_tests/test_upgrade.py index 1a74a4fa..7ecf228a 100644 --- a/ceph-mon/unit_tests/test_upgrade.py +++ b/ceph-mon/unit_tests/test_upgrade.py @@ -16,10 +16,13 @@ def config_side_effect(*args): class UpgradeRollingTestCase(unittest.TestCase): + @patch('ceph_hooks.ceph.resolve_ceph_version') @patch('ceph_hooks.hookenv') @patch('ceph_hooks.host') @patch('ceph_hooks.ceph.roll_monitor_cluster') - def test_check_for_upgrade(self, roll_monitor_cluster, host, hookenv): + def test_check_for_upgrade(self, roll_monitor_cluster, host, hookenv, + version): + version.side_effect = ['firefly', 'hammer'] host.lsb_release.return_value = { 'DISTRIB_CODENAME': 'trusty', } @@ -30,5 +33,5 @@ def test_check_for_upgrade(self, roll_monitor_cluster, host, hookenv): check_for_upgrade() roll_monitor_cluster.assert_called_with( - new_version='cloud:trusty-kilo', + new_version='hammer', upgrade_key='admin') From 1a0354ef6d64658cef63ddea221ab9d3c8e90ff1 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 12 Jan 2017 16:54:48 +0200 Subject: [PATCH 1259/2699] Remove /var/lib/ceph from updatedb This stops updatedb from indexing the storage locations Change-Id: Idd77f0fc087a06af874d2865bfec8c319a0b15dd Closes-bug: 1520226 --- ceph-osd/files/updatedb.conf | 6 ++++++ ceph-osd/hooks/ceph_hooks.py | 5 +++++ 2 files changed, 11 insertions(+) create mode 100644 ceph-osd/files/updatedb.conf diff --git a/ceph-osd/files/updatedb.conf b/ceph-osd/files/updatedb.conf new file mode 100644 index 00000000..890bd017 --- /dev/null +++ b/ceph-osd/files/updatedb.conf @@ -0,0 +1,6 @@ +# This file is owned by the Ceph charm and could be replaced at any time + +PRUNE_BIND_MOUNTS="yes" +# PRUNENAMES=".git .bzr .hg .svn" +PRUNEPATHS="/tmp /var/spool /media /home/.ecryptfs /var/lib/schroot /var/lib/ceph" +PRUNEFS="NFS nfs nfs4 rpc_pipefs afs binfmt_misc proc smbfs autofs iso9660 ncpfs coda devpts ftpfs devfs mfs shfs sysfs cifs lustre tmpfs usbfs udf fuse.glusterfs fuse.sshfs curlftpfs ecryptfs fusesmb devtmpfs" diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 235395bd..3aa98822 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -44,6 +44,7 @@ umount, mkdir, cmp_pkgrevno, + rsync, service_reload, service_restart) from charmhelpers.fetch import ( @@ -327,6 +328,10 @@ def config_changed(): umount(e_mountpoint) prepare_disks_and_activate() install_apparmor_profile() + script = os.path.join('/etc', 'updatedb.conf') + rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', + 'updatedb.conf'), + script) @hooks.hook('storage.real') From ffd6a4a9f9d4818c1ba0d241336e1e201452888a Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 18 Jan 2017 12:17:19 +0000 Subject: [PATCH 1260/2699] Drop apache+mod-fastcgi support The ceph project has focussed on running the RADOS Gateway using the embedded civetweb engine for many cycles now; mod-fastcgi is buggy and no longer provided in Ubuntu as of 17.04, so switch to always running in embedded mode. Existing installs will be upgraded to run in this mode, purging apache related packaging and configuration. Change-Id: I90e6c047d78de2b0ebf8c24bd2f2d6d1bfbd5c5d Closes-Bug: 1657370 --- ceph-radosgw/config.yaml | 20 ------- ceph-radosgw/hooks/ceph_radosgw_context.py | 60 ------------------- ceph-radosgw/hooks/hooks.py | 42 ++----------- ceph-radosgw/hooks/utils.py | 26 -------- ceph-radosgw/templates/ceph.conf | 6 -- ceph-radosgw/templates/ports.conf | 11 ---- ceph-radosgw/templates/rgw | 25 -------- ceph-radosgw/templates/rgw.conf | 25 -------- ceph-radosgw/tests/basic_deployment.py | 4 +- .../unit_tests/test_ceph_radosgw_context.py | 7 --- ceph-radosgw/unit_tests/test_hooks.py | 42 ------------- 11 files changed, 7 insertions(+), 261 deletions(-) delete mode 100644 ceph-radosgw/templates/ports.conf delete mode 100644 ceph-radosgw/templates/rgw delete mode 100644 ceph-radosgw/templates/rgw.conf diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 9abae24b..77768f00 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -75,26 +75,6 @@ options: default: False description: | If set to True, supporting services will log to syslog. - use-ceph-optimised-packages: - type: boolean - default: false - description: | - By default apache2 and libapache2-mod-fastcgi will be installed from the - Ubuntu archives. This option allows for an alternate ceph.com install - source which contains patched versions with added support for HTTP - 100-continue. See the following page for more info: - - http://ceph.com/docs/dumpling/radosgw/manual-install/#continue-support - use-embedded-webserver: - type: boolean - default: false - description: | - Newer versions of the Ceph RADOS Gateway support use of an embedded web - container instead of Apache + mod-fastcgi, avoiding some of the nuances - of using the stock mod-fastcgi packages from Ubuntu. - - Enable this option to disable use of Apache and enable the embedded - web container feature. dns-ha: type: boolean default: False diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 389538cc..3b225afa 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -16,9 +16,7 @@ import re import socket import tempfile -import glob import shutil -import subprocess from charmhelpers.contrib.openstack import context from charmhelpers.contrib.hahelpers.cluster import ( @@ -34,7 +32,6 @@ relation_ids, related_units, relation_get, - status_set, ) from charmhelpers.contrib.network.ip import ( format_ipv6_addr, @@ -44,55 +41,6 @@ from charmhelpers.contrib.storage.linux.ceph import CephConfContext -def is_apache_24(): - if os.path.exists('/etc/apache2/conf-available'): - return True - else: - return False - - -class ApacheContext(context.OSContextGenerator): - interfaces = ['http'] - service_namespace = 'ceph-radosgw' - - def __call__(self): - ctxt = {} - if config('use-embedded-webserver'): - log("Skipping ApacheContext since we are using the embedded " - "webserver") - return {} - - status_set('maintenance', 'configuring apache') - - src = 'files/www/*' - dst = '/var/www/' - log("Installing www scripts", level=DEBUG) - try: - for x in glob.glob(src): - shutil.copy(x, dst) - except IOError as e: - log("Error copying files from '%s' to '%s': %s" % (src, dst, e), - level=WARNING) - - try: - subprocess.check_call(['a2enmod', 'fastcgi']) - subprocess.check_call(['a2enmod', 'rewrite']) - except subprocess.CalledProcessError as e: - log("Error enabling apache modules - %s" % e, level=WARNING) - - try: - if is_apache_24(): - subprocess.check_call(['a2dissite', '000-default']) - else: - subprocess.check_call(['a2dissite', 'default']) - except subprocess.CalledProcessError as e: - log("Error disabling apache sites - %s" % e, level=WARNING) - - ctxt['hostname'] = socket.gethostname() - ctxt['port'] = determine_api_port(config('port'), singlenode_mode=True) - return ctxt - - class HAProxyContext(context.HAProxyContext): def __call__(self): @@ -220,7 +168,6 @@ def __call__(self): 'hostname': host, 'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0, 'use_syslog': str(config('use-syslog')).lower(), - 'embedded_webserver': config('use-embedded-webserver'), 'loglevel': config('loglevel'), 'port': port, 'ipv6': config('prefer-ipv6') @@ -232,13 +179,6 @@ def __call__(self): if all([os.path.isfile(p) for p in paths]): ctxt['cms'] = True - if (config('use-ceph-optimised-packages') and - not config('use-embedded-webserver')): - ctxt['disable_100_continue'] = False - else: - # NOTE: currently only applied if NOT using embedded webserver - ctxt['disable_100_continue'] = True - # NOTE(dosaboy): these sections must correspond to what is supported in # the config template. sections = ['global', 'client.radosgw.gateway'] diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 8d586e82..5a22e964 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -30,7 +30,6 @@ relation_set, log, DEBUG, - WARNING, Hooks, UnregisteredHookError, status_set, ) @@ -39,8 +38,8 @@ apt_install, apt_purge, add_source, + filter_installed_packages, ) -from charmhelpers.core.host import lsb_release from charmhelpers.payload.execd import execd_preinstall from charmhelpers.core.host import cmp_pkgrevno from charmhelpers.contrib.network.ip import ( @@ -86,21 +85,6 @@ NSS_DIR = '/var/lib/ceph/nss' -def install_ceph_optimised_packages(): - """Inktank provides patched/optimised packages for HTTP 100-continue - support that does has not yet been ported to upstream. These can - optionally be installed from ceph.com archives. - """ - prolog = "http://gitbuilder.ceph.com/" - epilog = "-x86_64-basic/ref/master" - rel = lsb_release()['DISTRIB_CODENAME'] - fastcgi_source = "%slibapache-mod-fastcgi-deb-%s%s" % (prolog, rel, epilog) - apache_source = "%sapache2-deb-%s%s" % (prolog, rel, epilog) - - for source in [fastcgi_source, apache_source]: - add_source(source, key='6EAEAE2203C3951A') - - PACKAGES = [ 'haproxy', 'libnss3-tools', @@ -119,18 +103,13 @@ def install_ceph_optimised_packages(): def install_packages(): - status_set('maintenance', 'Installing apt packages') add_source(config('source'), config('key')) - if (config('use-ceph-optimised-packages') and - not config('use-embedded-webserver')): - install_ceph_optimised_packages() - apt_update(fatal=True) - apt_install(PACKAGES, fatal=True) - if config('use-embedded-webserver'): - apt_purge(APACHE_PACKAGES) - else: - apt_install(APACHE_PACKAGES, fatal=True) + pkgs = filter_installed_packages(PACKAGES) + if pkgs: + status_set('maintenance', 'Installing radosgw packages') + apt_install(PACKAGES, fatal=True) + apt_purge(APACHE_PACKAGES) @hooks.hook('install.real') @@ -166,15 +145,6 @@ def config_changed(): CONFIGS.write_all() - if not config('use-embedded-webserver'): - try: - subprocess.check_call(['a2ensite', 'rgw']) - except subprocess.CalledProcessError as e: - log("Error enabling apache module 'rgw' - %s" % e, level=WARNING) - - # Ensure started but do a soft reload - subprocess.call(['service', 'apache2', 'start']) - subprocess.call(['service', 'apache2', 'reload']) update_nrpe_config() diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index b1b3e5b3..6d02a5d4 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -23,7 +23,6 @@ import ceph_radosgw_context from charmhelpers.core.hookenv import ( - config, log, DEBUG, INFO, @@ -96,9 +95,6 @@ TEMPLATES = 'templates/' HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' CEPH_CONF = '/etc/ceph/ceph.conf' -APACHE_CONF = '/etc/apache2/sites-available/rgw' -APACHE_24_CONF = '/etc/apache2/sites-available/rgw.conf' -APACHE_PORTS_CONF = '/etc/apache2/ports.conf' VERSION_PACKAGE = 'radosgw' @@ -108,18 +104,6 @@ ceph_radosgw_context.HAProxyContext()], 'services': ['haproxy'], }), - (APACHE_CONF, { - 'contexts': [ceph_radosgw_context.ApacheContext()], - 'services': ['apache2'], - }), - (APACHE_24_CONF, { - 'contexts': [ceph_radosgw_context.ApacheContext()], - 'services': ['apache2'], - }), - (APACHE_PORTS_CONF, { - 'contexts': [ceph_radosgw_context.ApacheContext()], - 'services': ['apache2'], - }), (CEPH_CONF, { 'contexts': [ceph_radosgw_context.MonContext()], 'services': ['radosgw'], @@ -140,16 +124,6 @@ def resource_map(): These will be managed for a single hook execution. """ - if not config('use-embedded-webserver'): - if os.path.exists('/etc/apache2/conf-available'): - BASE_RESOURCE_MAP.pop(APACHE_CONF) - else: - BASE_RESOURCE_MAP.pop(APACHE_24_CONF) - else: - BASE_RESOURCE_MAP.pop(APACHE_CONF) - BASE_RESOURCE_MAP.pop(APACHE_24_CONF) - BASE_RESOURCE_MAP.pop(APACHE_PORTS_CONF) - resource_map = deepcopy(BASE_RESOURCE_MAP) return resource_map diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index e35fb2d0..d2674e6c 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -28,13 +28,7 @@ rgw init timeout = 1200 keyring = /etc/ceph/keyring.rados.gateway rgw socket path = /tmp/radosgw.sock log file = /var/log/ceph/radosgw.log -{% if embedded_webserver %} rgw frontends = civetweb port={{ port }} -{% elif disable_100_continue %} -# Turn off 100-continue optimization as stock mod_fastcgi -# does not support it -rgw print continue = false -{% endif %} {% if auth_type == 'keystone' %} rgw keystone url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/ rgw keystone admin token = {{ admin_token }} diff --git a/ceph-radosgw/templates/ports.conf b/ceph-radosgw/templates/ports.conf deleted file mode 100644 index 9b011ec1..00000000 --- a/ceph-radosgw/templates/ports.conf +++ /dev/null @@ -1,11 +0,0 @@ -Listen {{ port }} - - - Listen 443 - - - - Listen 443 - - -# vim: syntax=apache ts=4 sw=4 sts=4 sr noet diff --git a/ceph-radosgw/templates/rgw b/ceph-radosgw/templates/rgw deleted file mode 100644 index 7a3e4724..00000000 --- a/ceph-radosgw/templates/rgw +++ /dev/null @@ -1,25 +0,0 @@ - - FastCgiExternalServer /var/www/s3gw.fcgi -socket /tmp/radosgw.sock - - - - ServerName {{ hostname }} - ServerAdmin ceph@ubuntu.com - DocumentRoot /var/www - RewriteEngine On - RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /s3gw.fcgi?page=$1¶ms=$2&%{QUERY_STRING} [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L] - - - Options +ExecCGI - AllowOverride All - SetHandler fastcgi-script - Order allow,deny - Allow from all - AuthBasicAuthoritative Off - - - AllowEncodedSlashes On - ErrorLog /var/log/apache2/error.log - CustomLog /var/log/apache2/access.log combined - ServerSignature Off - diff --git a/ceph-radosgw/templates/rgw.conf b/ceph-radosgw/templates/rgw.conf deleted file mode 100644 index 7a3e4724..00000000 --- a/ceph-radosgw/templates/rgw.conf +++ /dev/null @@ -1,25 +0,0 @@ - - FastCgiExternalServer /var/www/s3gw.fcgi -socket /tmp/radosgw.sock - - - - ServerName {{ hostname }} - ServerAdmin ceph@ubuntu.com - DocumentRoot /var/www - RewriteEngine On - RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /s3gw.fcgi?page=$1¶ms=$2&%{QUERY_STRING} [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L] - - - Options +ExecCGI - AllowOverride All - SetHandler fastcgi-script - Order allow,deny - Allow from all - AuthBasicAuthoritative Off - - - AllowEncodedSlashes On - ErrorLog /var/log/apache2/error.log - CustomLog /var/log/apache2/access.log combined - ServerSignature Off - diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index e3ff55d7..155d5aba 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -115,13 +115,11 @@ def _configure_services(self): 'ephemeral-unmount': '/mnt', 'osd-devices': '/dev/vdb /srv/ceph' } - radosgw_config = {"use-embedded-webserver": True} configs = {'keystone': keystone_config, 'percona-cluster': pxc_config, 'cinder': cinder_config, - 'ceph': ceph_config, - 'ceph-radosgw': radosgw_config} + 'ceph': ceph_config} super(CephRadosGwBasicDeployment, self)._configure_services(configs) def _run_action(self, unit_id, action, *args): diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index bdbdc965..79f5310a 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -28,7 +28,6 @@ 'related_units', 'cmp_pkgrevno', 'socket', - 'is_apache_24', ] @@ -182,8 +181,6 @@ def _relation_get(attr, unit, rid): self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] expect = { 'auth_supported': 'cephx', - 'embedded_webserver': False, - 'disable_100_continue': True, 'hostname': 'testhost', 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', 'old_auth': False, @@ -231,8 +228,6 @@ def _relation_get(attr, unit, rid): self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] expect = { 'auth_supported': 'none', - 'embedded_webserver': False, - 'disable_100_continue': True, 'hostname': 'testhost', 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', 'old_auth': False, @@ -262,8 +257,6 @@ def _relation_get(attr, unit, rid): self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] expect = { 'auth_supported': 'cephx', - 'embedded_webserver': False, - 'disable_100_continue': True, 'hostname': 'testhost', 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', 'old_auth': False, diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 905a408d..4687b040 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -13,7 +13,6 @@ # limitations under the License. from mock import ( - call, patch, ) @@ -42,7 +41,6 @@ 'get_iface_for_address', 'get_netmask_for_address', 'log', - 'lsb_release', 'open_port', 'os', 'relation_ids', @@ -67,50 +65,10 @@ def setUp(self): self.test_config.set('key', 'secretkey') self.test_config.set('use-syslog', False) - def test_install_ceph_optimised_packages(self): - self.lsb_release.return_value = {'DISTRIB_CODENAME': 'vivid'} - fastcgi_source = ( - 'http://gitbuilder.ceph.com/' - 'libapache-mod-fastcgi-deb-vivid-x86_64-basic/ref/master') - apache_source = ( - 'http://gitbuilder.ceph.com/' - 'apache2-deb-vivid-x86_64-basic/ref/master') - calls = [ - call(fastcgi_source, key='6EAEAE2203C3951A'), - call(apache_source, key='6EAEAE2203C3951A'), - ] - ceph_hooks.install_ceph_optimised_packages() - self.add_source.assert_has_calls(calls) - def test_install_packages(self): - self.test_config.set('use-ceph-optimised-packages', '') - ceph_hooks.install_packages() - self.add_source.assert_called_with('distro', 'secretkey') - self.assertTrue(self.apt_update.called) - self.apt_install.assert_called_with(['libapache2-mod-fastcgi', - 'apache2'], fatal=True) - - def test_install_optimised_packages_no_embedded(self): - self.test_config.set('use-ceph-optimised-packages', True) - self.test_config.set('use-embedded-webserver', False) - _install_packages = self.patch('install_ceph_optimised_packages') - ceph_hooks.install_packages() - self.add_source.assert_called_with('distro', 'secretkey') - self.assertTrue(self.apt_update.called) - self.assertTrue(_install_packages.called) - self.apt_install.assert_called_with(['libapache2-mod-fastcgi', - 'apache2'], fatal=True) - - def test_install_optimised_packages_embedded(self): - self.test_config.set('use-ceph-optimised-packages', True) - self.test_config.set('use-embedded-webserver', True) - _install_packages = self.patch('install_ceph_optimised_packages') ceph_hooks.install_packages() self.add_source.assert_called_with('distro', 'secretkey') self.assertTrue(self.apt_update.called) - self.assertFalse(_install_packages.called) - self.apt_install.assert_called_with(ceph_hooks.PACKAGES, - fatal=True) self.apt_purge.assert_called_with(['libapache2-mod-fastcgi', 'apache2']) From 161158bd98b9247e43d0eed7af76b596be941d03 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Jan 2017 08:38:40 +0000 Subject: [PATCH 1261/2699] Revert "Remove /var/lib/ceph from updatedb" ceph and swift-storage apps may end up on the same unit so a different approach is needed. This reverts commit 1a0354ef6d64658cef63ddea221ab9d3c8e90ff1. Change-Id: Id74e014d856718fbc5e4d714578b233145c9c047 --- ceph-osd/files/updatedb.conf | 6 ------ ceph-osd/hooks/ceph_hooks.py | 5 ----- 2 files changed, 11 deletions(-) delete mode 100644 ceph-osd/files/updatedb.conf diff --git a/ceph-osd/files/updatedb.conf b/ceph-osd/files/updatedb.conf deleted file mode 100644 index 890bd017..00000000 --- a/ceph-osd/files/updatedb.conf +++ /dev/null @@ -1,6 +0,0 @@ -# This file is owned by the Ceph charm and could be replaced at any time - -PRUNE_BIND_MOUNTS="yes" -# PRUNENAMES=".git .bzr .hg .svn" -PRUNEPATHS="/tmp /var/spool /media /home/.ecryptfs /var/lib/schroot /var/lib/ceph" -PRUNEFS="NFS nfs nfs4 rpc_pipefs afs binfmt_misc proc smbfs autofs iso9660 ncpfs coda devpts ftpfs devfs mfs shfs sysfs cifs lustre tmpfs usbfs udf fuse.glusterfs fuse.sshfs curlftpfs ecryptfs fusesmb devtmpfs" diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 3aa98822..235395bd 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -44,7 +44,6 @@ umount, mkdir, cmp_pkgrevno, - rsync, service_reload, service_restart) from charmhelpers.fetch import ( @@ -328,10 +327,6 @@ def config_changed(): umount(e_mountpoint) prepare_disks_and_activate() install_apparmor_profile() - script = os.path.join('/etc', 'updatedb.conf') - rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', - 'updatedb.conf'), - script) @hooks.hook('storage.real') From 8448155d3c089f836854a8b7e645cb596d0ef643 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 25 Jan 2017 08:33:06 -0500 Subject: [PATCH 1262/2699] Remove /var/lib/ceph from updatedb This stops updatedb from indexing the storage locations Closes-bug: 1520226 Change-Id: I3370af6c31689392b915f402e53a328f061ef4b0 --- ceph-osd/hooks/ceph_hooks.py | 6 +- .../hooks/charmhelpers/contrib/network/ip.py | 6 +- .../charmhelpers/contrib/openstack/context.py | 74 +++++++++++++++++++ .../charmhelpers/contrib/openstack/utils.py | 70 +++++++++++++++++- ceph-osd/hooks/charmhelpers/core/hookenv.py | 14 ++++ ceph-osd/hooks/charmhelpers/core/host.py | 33 ++++++++- .../charmhelpers/contrib/amulet/utils.py | 3 +- .../contrib/openstack/amulet/utils.py | 67 +++++++++++++++++ 8 files changed, 262 insertions(+), 11 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 235395bd..38e38dc0 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -45,7 +45,9 @@ mkdir, cmp_pkgrevno, service_reload, - service_restart) + service_restart, + add_to_updatedb_prunepath, +) from charmhelpers.fetch import ( add_source, apt_install, @@ -76,6 +78,7 @@ from charmhelpers.contrib.hardening.harden import harden hooks = Hooks() +STORAGE_MOUNT_PATH = '/var/lib/ceph' def check_for_upgrade(): @@ -327,6 +330,7 @@ def config_changed(): umount(e_mountpoint) prepare_disks_and_activate() install_apparmor_profile() + add_to_updatedb_prunepath(STORAGE_MOUNT_PATH) @hooks.hook('storage.real') diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 2d2026e4..f2f7dfbc 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -424,7 +424,11 @@ def ns_query(address): else: return None - answers = dns.resolver.query(address, rtype) + try: + answers = dns.resolver.query(address, rtype) + except dns.resolver.NXDOMAIN as e: + return None + if answers: return str(answers[0]) return None diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index d5b3a33b..42316331 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -14,6 +14,7 @@ import glob import json +import math import os import re import time @@ -90,6 +91,9 @@ from charmhelpers.contrib.openstack.utils import ( config_flags_parser, get_host_ip, + git_determine_usr_bin, + git_determine_python_path, + enable_memcache, ) from charmhelpers.core.unitdata import kv @@ -1207,6 +1211,43 @@ def __call__(self): return ctxt +class WSGIWorkerConfigContext(WorkerConfigContext): + + def __init__(self, name=None, script=None, admin_script=None, + public_script=None, process_weight=1.00, + admin_process_weight=0.75, public_process_weight=0.25): + self.service_name = name + self.user = name + self.group = name + self.script = script + self.admin_script = admin_script + self.public_script = public_script + self.process_weight = process_weight + self.admin_process_weight = admin_process_weight + self.public_process_weight = public_process_weight + + def __call__(self): + multiplier = config('worker-multiplier') or 1 + total_processes = self.num_cpus * multiplier + ctxt = { + "service_name": self.service_name, + "user": self.user, + "group": self.group, + "script": self.script, + "admin_script": self.admin_script, + "public_script": self.public_script, + "processes": int(math.ceil(self.process_weight * total_processes)), + "admin_processes": int(math.ceil(self.admin_process_weight * + total_processes)), + "public_processes": int(math.ceil(self.public_process_weight * + total_processes)), + "threads": 1, + "usr_bin": git_determine_usr_bin(), + "python_path": git_determine_python_path(), + } + return ctxt + + class ZeroMQContext(OSContextGenerator): interfaces = ['zeromq-configuration'] @@ -1512,3 +1553,36 @@ def setup_aa_profile(self): "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode'])) raise e + + +class MemcacheContext(OSContextGenerator): + """Memcache context + + This context provides options for configuring a local memcache client and + server + """ + + def __init__(self, package=None): + """ + @param package: Package to examine to extrapolate OpenStack release. + Used when charms have no openstack-origin config + option (ie subordinates) + """ + self.package = package + + def __call__(self): + ctxt = {} + ctxt['use_memcache'] = enable_memcache(package=self.package) + if ctxt['use_memcache']: + # Trusty version of memcached does not support ::1 as a listen + # address so use host file entry instead + if lsb_release()['DISTRIB_CODENAME'].lower() > 'trusty': + ctxt['memcache_server'] = '::1' + else: + ctxt['memcache_server'] = 'ip6-localhost' + ctxt['memcache_server_formatted'] = '[::1]' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = 'inet6:{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) + return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 6d544e75..80219d66 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -153,7 +153,7 @@ ('newton', ['2.8.0', '2.9.0', '2.10.0']), ('ocata', - ['2.11.0']), + ['2.11.0', '2.12.0']), ]) # >= Liberty version->codename mapping @@ -549,9 +549,9 @@ def configure_installation_source(rel): 'newton': 'xenial-updates/newton', 'newton/updates': 'xenial-updates/newton', 'newton/proposed': 'xenial-proposed/newton', - 'zesty': 'zesty-updates/ocata', - 'zesty/updates': 'xenial-updates/ocata', - 'zesty/proposed': 'xenial-proposed/ocata', + 'ocata': 'xenial-updates/ocata', + 'ocata/updates': 'xenial-updates/ocata', + 'ocata/proposed': 'xenial-proposed/ocata', } try: @@ -1119,6 +1119,35 @@ def git_generate_systemd_init_files(templates_dir): shutil.copyfile(service_source, service_dest) +def git_determine_usr_bin(): + """Return the /usr/bin path for Apache2 config. + + The /usr/bin path will be located in the virtualenv if the charm + is configured to deploy from source. + """ + if git_install_requested(): + projects_yaml = config('openstack-origin-git') + projects_yaml = git_default_repos(projects_yaml) + return os.path.join(git_pip_venv_dir(projects_yaml), 'bin') + else: + return '/usr/bin' + + +def git_determine_python_path(): + """Return the python-path for Apache2 config. + + Returns 'None' unless the charm is configured to deploy from source, + in which case the path of the virtualenv's site-packages is returned. + """ + if git_install_requested(): + projects_yaml = config('openstack-origin-git') + projects_yaml = git_default_repos(projects_yaml) + return os.path.join(git_pip_venv_dir(projects_yaml), + 'lib/python2.7/site-packages') + else: + return None + + def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts @@ -1925,3 +1954,36 @@ def os_application_version_set(package): application_version_set(os_release(package)) else: application_version_set(application_version) + + +def enable_memcache(source=None, release=None, package=None): + """Determine if memcache should be enabled on the local unit + + @param release: release of OpenStack currently deployed + @param package: package to derive OpenStack version deployed + @returns boolean Whether memcache should be enabled + """ + _release = None + if release: + _release = release + else: + _release = os_release(package, base='icehouse') + if not _release: + _release = get_os_codename_install_source(source) + + # TODO: this should be changed to a numeric comparison using a known list + # of releases and comparing by index. + return _release >= 'mitaka' + + +def token_cache_pkgs(source=None, release=None): + """Determine additional packages needed for token caching + + @param source: source string for charm + @param release: release of OpenStack currently deployed + @returns List of package to enable token caching + """ + packages = [] + if enable_memcache(source=source, release=release): + packages.extend(['memcached', 'python-memcache']) + return packages diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 94fc996c..d1cb68db 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -616,6 +616,20 @@ def close_port(port, protocol="TCP"): subprocess.check_call(_args) +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 04cadb3a..3638e65e 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -54,6 +54,7 @@ cmp_pkgrevno, ) # flake8: noqa -- ignore F401 for this import +UPDATEDB_PATH = '/etc/updatedb.conf' def service_start(service_name): """Start a system service""" @@ -306,15 +307,17 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) -def rsync(from_path, to_path, flags='-r', options=None): +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] cmd = ['/usr/bin/rsync', flags] + if timeout: + cmd = ['timeout', str(timeout)] + cmd cmd.extend(options) cmd.append(from_path) cmd.append(to_path) log(" ".join(cmd)) - return subprocess.check_output(cmd).decode('UTF-8').strip() + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() def symlink(source, destination): @@ -684,7 +687,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): :param str path: The string path to start changing ownership. :param str owner: The owner string to use when looking up the uid. :param str group: The group string to use when looking up the gid. - :param bool follow_links: Also Chown links if True + :param bool follow_links: Also follow and chown links if True :param bool chowntopdir: Also chown path itself if True """ uid = pwd.getpwnam(owner).pw_uid @@ -698,7 +701,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): broken_symlink = os.path.lexists(path) and not os.path.exists(path) if not broken_symlink: chown(path, uid, gid) - for root, dirs, files in os.walk(path): + for root, dirs, files in os.walk(path, followlinks=follow_links): for name in dirs + files: full = os.path.join(root, name) broken_symlink = os.path.lexists(full) and not os.path.exists(full) @@ -749,3 +752,25 @@ def is_container(): else: # Detect using upstart container file marker return os.path.exists(UPSTART_CONTAINER_TYPE) + + +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + with open(updatedb_path, 'r+') as f_id: + updatedb_text = f_id.read() + output = updatedb(updatedb_text, path) + f_id.seek(0) + f_id.write(output) + f_id.truncate() + + +def updatedb(updatedb_text, new_path): + lines = [line for line in updatedb_text.split("\n")] + for i, line in enumerate(lines): + if line.startswith("PRUNEPATHS="): + paths_line = line.split("=")[1].replace('"', '') + paths = paths_line.split(" ") + if new_path not in paths: + paths.append(new_path) + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) + output = "\n".join(lines) + return output diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index 8e13ab14..f9e4c3af 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -148,7 +148,8 @@ def validate_services_by_name(self, sentry_services): for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name in ['rabbitmq-server', 'apache2']): + service_name in ['rabbitmq-server', 'apache2', + 'memcached']): # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) output, code = sentry_unit.run(cmd) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index 6a0ba837..2b0a562e 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -1133,3 +1133,70 @@ def get_amqp_message_by_unit(self, sentry_unit, queue="test", else: msg = 'No message retrieved.' amulet.raise_status(amulet.FAIL, msg) + + def validate_memcache(self, sentry_unit, conf, os_release, + earliest_release=5, section='keystone_authtoken', + check_kvs=None): + """Check Memcache is running and is configured to be used + + Example call from Amulet test: + + def test_110_memcache(self): + u.validate_memcache(self.neutron_api_sentry, + '/etc/neutron/neutron.conf', + self._get_openstack_release()) + + :param sentry_unit: sentry unit + :param conf: OpenStack config file to check memcache settings + :param os_release: Current OpenStack release int code + :param earliest_release: Earliest Openstack release to check int code + :param section: OpenStack config file section to check + :param check_kvs: Dict of settings to check in config file + :returns: None + """ + if os_release < earliest_release: + self.log.debug('Skipping memcache checks for deployment. {} <' + 'mitaka'.format(os_release)) + return + _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} + self.log.debug('Checking memcached is running') + ret = self.validate_services_by_name({sentry_unit: ['memcached']}) + if ret: + amulet.raise_status(amulet.FAIL, msg='Memcache running check' + 'failed {}'.format(ret)) + else: + self.log.debug('OK') + self.log.debug('Checking memcache url is configured in {}'.format( + conf)) + if self.validate_config_data(sentry_unit, conf, section, _kvs): + message = "Memcache config error in: {}".format(conf) + amulet.raise_status(amulet.FAIL, msg=message) + else: + self.log.debug('OK') + self.log.debug('Checking memcache configuration in ' + '/etc/memcached.conf') + contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', + fatal=True) + ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') + if ubuntu_release <= 'trusty': + memcache_listen_addr = 'ip6-localhost' + else: + memcache_listen_addr = '::1' + expected = { + '-p': '11211', + '-l': memcache_listen_addr} + found = [] + for key, value in expected.items(): + for line in contents.split('\n'): + if line.startswith(key): + self.log.debug('Checking {} is set to {}'.format( + key, + value)) + assert value == line.split()[-1] + self.log.debug(line.split()[-1]) + found.append(key) + if sorted(found) == sorted(expected.keys()): + self.log.debug('OK') + else: + message = "Memcache config error in: /etc/memcached.conf" + amulet.raise_status(amulet.FAIL, msg=message) From 702ed91d22c7c091d6cabdb6d80030ef9ed62b74 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 1 Feb 2017 12:18:14 -0600 Subject: [PATCH 1263/2699] Remove old revision file This file is likely an artifact of old charm store revision tracking and should not be in the code tree. Change-Id: I44378d301c68f8ee771bdd0ba0c67c559c1b9200 --- ceph-mon/revision | 1 - 1 file changed, 1 deletion(-) delete mode 100644 ceph-mon/revision diff --git a/ceph-mon/revision b/ceph-mon/revision deleted file mode 100644 index ffda4e73..00000000 --- a/ceph-mon/revision +++ /dev/null @@ -1 +0,0 @@ -105 \ No newline at end of file From 89247f9f7b9288867a84ffe7832cf58f3f8b4cc4 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 2 Feb 2017 08:21:08 -0500 Subject: [PATCH 1264/2699] Remove precise in readme links Change-Id: Ie3270770de3ca3764a057686b2d4d01a8f821cdb Closes-bug: 1629706 --- ceph-mon/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 5d66b597..4dc3f41e 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -51,8 +51,8 @@ This charm supports pausing and resuming ceph's health functions on a cluster, f You can use the Ceph OSD and Ceph Radosgw charms: -- [Ceph OSD](https://jujucharms.com/precise/ceph-osd) -- [Ceph Rados Gateway](https://jujucharms.com/precise/ceph-radosgw) +- [Ceph OSD](https://jujucharms.com/ceph-osd) +- [Ceph Rados Gateway](https://jujucharms.com/ceph-radosgw) ## Network Space support @@ -79,7 +79,7 @@ Please refer to the [Ceph Network Reference](http://docs.ceph.com/docs/master/ra # Contact Information -## Authors +## Authors - Paul Collins , - James Page From 0ece2f198334c8ef71351828dd497735a1632f7b Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Tue, 17 Jan 2017 11:18:00 -0800 Subject: [PATCH 1265/2699] Rolling Upgrade Rolling upgrade documentation Change-Id: Idcc4e478f68210752daefd5c9cbb76b334fc1413 --- ceph-mon/README.md | 39 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 5d66b597..8a2cc3f5 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -54,6 +54,43 @@ You can use the Ceph OSD and Ceph Radosgw charms: - [Ceph OSD](https://jujucharms.com/precise/ceph-osd) - [Ceph Rados Gateway](https://jujucharms.com/precise/ceph-radosgw) +## Rolling Upgrades + +ceph-mon and ceph-osd charms have the ability to initiate a rolling upgrade. +This is initiated by setting the config value for `source`. To perform a +rolling upgrade first set the source for ceph-mon. Watch `juju status`. +Once the monitor cluster is upgraded proceed to setting the ceph-osd source +setting. Again watch `juju status` for output. The monitors and osds will +sort themselves into a known order and upgrade one by one. As each server is +upgrading the upgrade code will down all the monitor or osd processes on that +server, apply the update and then restart them. You will notice in the +`juju status` output that the servers will tell you which previous server they +are waiting on. + +#### Supported Upgrade Paths +Currently the following upgrade paths are supported using +the [Ubuntu Cloud Archive](https://wiki.ubuntu.com/OpenStack/CloudArchive): +- trusty-firefly -> trusty-hammer +- trusty-hammer -> trusty-jewel + +Firefly is available in Trusty, Hammer is in Trusty-Juno (end of life), +Trusty-Kilo, Trusty-Liberty, and Jewel is available in Trusty-Mitaka. + +For example if the current config source setting is: `cloud:trusty-liberty` +changing that to `cloud:trusty-mitaka` will initiate a rolling upgrade of +the monitor cluster from hammer to jewel. + +#### Edge cases +There's an edge case in the upgrade code where if the previous node never +starts upgrading itself then the rolling upgrade can hang forever. If you +notice this has happened it can be fixed by setting the appropriate key in the +ceph monitor cluster. The monitor cluster will have +keys that look like `ceph-mon_ip-ceph-mon-0_1484680239.573482_start` and +`ceph-mon_ip-ceph-mon-0_1484680274.181742_stop`. What each server is looking for +is that stop key to indicate that the previous server upgraded successfully and +it's safe to take itself down. If the stop key is not present it will wait +10 minutes, then consider that server dead and move on. + ## Network Space support This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above. @@ -79,7 +116,7 @@ Please refer to the [Ceph Network Reference](http://docs.ceph.com/docs/master/ra # Contact Information -## Authors +## Authors - Paul Collins , - James Page From f65eef8d14102733d5d0558699030d3a352829d9 Mon Sep 17 00:00:00 2001 From: Anh Tran Date: Mon, 6 Feb 2017 10:51:13 +0700 Subject: [PATCH 1266/2699] Typo fix: maintainance => maintenance Change-Id: I5e6970d6c281cc4eba5247f1871030ab0677d56c --- ceph-mon/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 920ace94..d17c44b6 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -43,7 +43,7 @@ storage devices. ## Actions -This charm supports pausing and resuming ceph's health functions on a cluster, for example when doing maintainance on a machine. to pause or resume, call: +This charm supports pausing and resuming ceph's health functions on a cluster, for example when doing maintenance on a machine. to pause or resume, call: `juju action do --unit ceph-mon/0 pause-health` or `juju action do --unit ceph-mon/0 resume-health` From 8fd19cbaa2fe0c5bfadc9b1255733f49b2b965d3 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Wed, 2 Nov 2016 13:52:21 +0200 Subject: [PATCH 1267/2699] Collect Metrics Adding metrics collection to Ceph. Change-Id: I582f7e778fbcb01c518e66d34340f330f61c695a --- ceph-mon/hooks/ceph_hooks.py | 45 ++++++++++++++++++++- ceph-mon/hooks/charmhelpers/core/hookenv.py | 45 +++++++++++++++++++++ ceph-mon/hooks/collect-metrics | 1 + ceph-mon/metrics.yaml | 4 ++ 4 files changed, 94 insertions(+), 1 deletion(-) create mode 120000 ceph-mon/hooks/collect-metrics create mode 100644 ceph-mon/metrics.yaml diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index e2f79f15..96dbcb8a 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import decimal import os import subprocess import sys @@ -44,7 +45,7 @@ relations_of_type, status_set, local_unit, - application_version_set) + application_version_set, INFO, ERROR, add_metric) from charmhelpers.core.host import ( service_restart, mkdir, @@ -115,6 +116,48 @@ def check_for_upgrade(): ceph.pretty_print_upgrade_paths())) +@hooks.hook('collect-metrics') +def collect_metrics(): + if not is_leader(): + # Only collect metrics on the leader + return + log("Collecting metrics") + if not ceph.is_quorum(): + log("Waiting on Ceph monitor quorum before collecting metrics") + return + + try: + import rados + + cluster = rados.Rados( + conffile=os.path.join(os.sep, 'etc', 'ceph', 'ceph.conf')) + cluster.connect(timeout=60) # 1 minute timeout + log("Gathering pool stats", level=INFO) + pools = cluster.list_pools() + kb_used = 0 + for pool in pools: + with cluster.open_ioctx(pool) as ioctx: + pool_stats = ioctx.get_stats() + kb_used += pool_stats['num_kb'] + gb_used = str(kb_used / decimal.Decimal(2 ** 20)) + log("gb_used: {}".format(gb_used), level=INFO) + try: + add_metric('gb-used={}'.format(gb_used)) + except EnvironmentError as call_error: + log("add-metric failed with error: {}," + "skipping metrics collection".format(str(call_error)), + level=ERROR) + log("Disconnecting from Ceph", level=DEBUG) + cluster.shutdown() + except (rados.IOError, + rados.ObjectNotFound, + rados.NoData, + rados.NoSpace, + rados.PermissionError) as rados_error: + log("librados failed with error: {}, skipping metrics " + "collection".format(str(rados_error)), level=ERROR) + + @hooks.hook('install.real') @harden() def install(): diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 94fc996c..e44e22bf 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -616,6 +616,20 @@ def close_port(port, protocol="TCP"): subprocess.check_call(_args) +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1021,3 +1035,34 @@ def network_get_primary_address(binding): ''' cmd = ['network-get', '--primary-address', binding] return subprocess.check_output(cmd).decode('UTF-8').strip() + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') diff --git a/ceph-mon/hooks/collect-metrics b/ceph-mon/hooks/collect-metrics new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/collect-metrics @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/metrics.yaml b/ceph-mon/metrics.yaml new file mode 100644 index 00000000..e75f3640 --- /dev/null +++ b/ceph-mon/metrics.yaml @@ -0,0 +1,4 @@ +metrics: + gb-used: + type: gauge + description: Total number of gb bytes used From 50c7f90dbe66b029a468de95bb4563eee864fe66 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Mon, 6 Feb 2017 10:44:34 -0800 Subject: [PATCH 1268/2699] Public Network Support Add public network support to CephFS. This patch also adds in support for ipv6 and juju network spaces. Closes-Bug: 1659338 Change-Id: Ibfb6377f5831f26da302308818c093c9d7e715dc --- ceph-fs/src/config.yaml | 20 ++++++++ ceph-fs/src/metadata.yaml | 2 + ceph-fs/src/reactive/ceph_fs.py | 84 +++++++++++++++++++++++++++++++-- ceph-fs/src/templates/ceph.conf | 7 +++ 4 files changed, 108 insertions(+), 5 deletions(-) diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index bc40876c..20dd015f 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -30,3 +30,23 @@ options: default: False description: | If set to True, supporting services will log to syslog. + ceph-public-network: + type: string + default: + description: | + The IP address and netmask of the public (front-side) network (e.g., + 192.168.0.0/24). + If multiple networks are to be used, a space-delimited list of a.b.c.d/x + can be provided. + prefer-ipv6: + type: boolean + default: False + description: | + If True enables IPv6 support. The charm will expect network interfaces + to be configured with an IPv6 address. If set to False (default) IPv4 + is expected. + + NOTE: these charms do not currently support IPv6 privacy extension. In + order for this charm to function correctly, the privacy extension must be + disabled and a non-temporary address must be configured/available on + your network interface. \ No newline at end of file diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 257aed4c..5f0b5117 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -16,3 +16,5 @@ subordinate: false requires: ceph-mds: interface: ceph-mds +extra-bindings: + public: diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index ca6ace6c..72158b64 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -18,19 +18,27 @@ from charms.reactive import when, when_not, set_state from charmhelpers.core.hookenv import ( - application_version_set, config, log, ERROR) + application_version_set, config, log, ERROR, cached, DEBUG, unit_get, + network_get_primary_address, + status_set) from charmhelpers.core.host import service_restart from charmhelpers.contrib.network.ip import ( - get_address_in_network -) + get_address_in_network, + get_ipv6_addr) + from charmhelpers.fetch import ( get_upstream_version, -) - + apt_install, filter_installed_packages) import jinja2 from charms.apt import queue_install +try: + import dns.resolver +except ImportError: + apt_install(filter_installed_packages(['python-dnspython']), + fatal=True) + import dns.resolver TEMPLATES_DIR = 'templates' VERSION_PACKAGE = 'ceph-common' @@ -76,6 +84,7 @@ def config_changed(ceph_client): os.makedirs(key_path) cephx_key = os.path.join(key_path, 'keyring') + ceph_context = { 'fsid': ceph_client.fsid(), 'auth_supported': ceph_client.auth(), @@ -86,6 +95,15 @@ def config_changed(ceph_client): 'mds_name': socket.gethostname(), } + networks = get_networks('ceph-public-network') + if networks: + ceph_context['ceph_public_network'] = ', '.join(networks) + elif config('prefer-ipv6'): + dynamic_ipv6_address = get_ipv6_addr()[0] + ceph_context['public_addr'] = dynamic_ipv6_address + else: + ceph_context['public_addr'] = get_public_addr() + try: with open(charm_ceph_conf, 'w') as ceph_conf: ceph_conf.write(render_template('ceph.conf', ceph_context)) @@ -115,3 +133,59 @@ def get_networks(config_opt='ceph-public-network'): return [n for n in networks if get_address_in_network(n)] return [] + + +@cached +def get_public_addr(): + if config('ceph-public-network'): + return get_network_addrs('ceph-public-network')[0] + + try: + return network_get_primary_address('public') + except NotImplementedError: + log("network-get not supported", DEBUG) + + return get_host_ip() + + +@cached +def get_host_ip(hostname=None): + if config('prefer-ipv6'): + return get_ipv6_addr()[0] + + hostname = hostname or unit_get('private-address') + try: + # Test to see if already an IPv4 address + socket.inet_aton(hostname) + return hostname + except socket.error: + # This may throw an NXDOMAIN exception; in which case + # things are badly broken so just let it kill the hook + answers = dns.resolver.query(hostname, 'A') + if answers: + return answers[0].address + + +def get_network_addrs(config_opt): + """Get all configured public networks addresses. + + If public network(s) are provided, go through them and return the + addresses we have configured on any of those networks. + """ + addrs = [] + networks = config(config_opt) + if networks: + networks = networks.split() + addrs = [get_address_in_network(n) for n in networks] + addrs = [a for a in addrs if a] + + if not addrs: + if networks: + msg = ("Could not find an address on any of '%s' - resolve this " + "error to retry" % networks) + status_set('blocked', msg) + raise Exception(msg) + else: + return [get_host_ip()] + + return addrs diff --git a/ceph-fs/src/templates/ceph.conf b/ceph-fs/src/templates/ceph.conf index df795b09..9490e8c0 100644 --- a/ceph-fs/src/templates/ceph.conf +++ b/ceph-fs/src/templates/ceph.conf @@ -14,6 +14,13 @@ mon cluster log to syslog = {{ use_syslog }} debug mon = {{ loglevel }}/5 debug osd = {{ loglevel }}/5 +{% if ceph_public_network %} +public network = {{ ceph_public_network }} +{%- endif %} +{%- if public_addr %} +public addr = {{ public_addr }} +{%- endif %} + [client] log file = /var/log/ceph.log From e65484b3d1d49b29b85b22829356fc2265b024b6 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 10 Feb 2017 09:06:17 +0000 Subject: [PATCH 1269/2699] mds: ensure relation_get is scoped for mds-name mds_relation_joined can be called outside of the scope of the mds-relation-changed hook execution; ensure that the relation id and unit name parameters are used so that MDS keys with a name of 'None' are not created. Change-Id: Ied8d5306ab77b8e68efd0a8e5df4087a067f0367 --- ceph-mon/hooks/ceph_hooks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 96dbcb8a..4df16aad 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -482,7 +482,8 @@ def mds_relation_joined(relid=None, unit=None): if ceph.is_quorum() and related_osds(): log('mon cluster in quorum and OSDs related' '- providing mds client with keys') - mds_name = relation_get('mds-name') + mds_name = relation_get(attribute='mds-name', + rid=relid, unit=unit) if not unit: unit = remote_unit() public_addr = get_public_addr() From 6e51f790e2d83d0e48bb77703710eed987ce7f61 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 10 Feb 2017 07:56:15 -0500 Subject: [PATCH 1270/2699] Sync in charms.ceph This brings in the new broker change to restrict key access by groups Change-Id: I9c3a973f996feec5383b174ef5a6a454ed4572c5 Partial-Bug: 1424771 --- ceph-mon/lib/ceph/__init__.py | 86 +++++++++++---- ceph-mon/lib/ceph/ceph_broker.py | 179 ++++++++++++++++++++++++++++++- 2 files changed, 244 insertions(+), 21 deletions(-) diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index 7f80b2c5..e87aef9f 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from _ctypes import POINTER, byref import ctypes import collections import json @@ -309,22 +310,52 @@ def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): def get_block_uuid(block_dev): """ - This queries blkid to get the uuid for a block device. + This queries blkid to get the uuid for a block device. Note: This function + needs to be called with root priv. It will raise an error otherwise. :param block_dev: Name of the block device to query. - :return: The UUID of the device or None on Error. + :return: The UUID of the device or None on Error. Raises OSError """ try: - block_info = subprocess.check_output( - ['blkid', '-o', 'export', block_dev]) - for tag in block_info.split('\n'): - parts = tag.split('=') - if parts[0] == 'UUID': - return parts[1] - return None - except subprocess.CalledProcessError as err: - log('get_block_uuid failed with error: {}'.format(err.output), + blkid = ctypes.cdll.LoadLibrary("libblkid.so") + # Header signature + # extern int blkid_probe_lookup_value(blkid_probe pr, const char *name, + # const char **data, size_t *len); + blkid.blkid_new_probe_from_filename.argtypes = [ctypes.c_char_p] + blkid.blkid_probe_lookup_value.argtypes = [ctypes.c_void_p, + ctypes.c_char_p, + POINTER(ctypes.c_char_p), + POINTER(ctypes.c_ulong)] + except OSError as err: + log('get_block_uuid loading libblkid.so failed with error: {}'.format( + os.strerror(err.errno)), level=ERROR) + raise err + if not os.path.exists(block_dev): return None + probe = blkid.blkid_new_probe_from_filename(ctypes.c_char_p(block_dev)) + if probe < 0: + log('get_block_uuid new_probe_from_filename failed: {}'.format( + os.strerror(probe)), + level=ERROR) + raise OSError(probe, os.strerror(probe)) + result = blkid.blkid_do_probe(probe) + if result != 0: + log('get_block_uuid do_probe failed with error: {}'.format( + os.strerror(result)), + level=ERROR) + raise OSError(result, os.strerror(result)) + uuid = ctypes.c_char_p() + result = blkid.blkid_probe_lookup_value(probe, + ctypes.c_char_p( + 'UUID'.encode('ascii')), + byref(uuid), None) + if result < 0: + log('get_block_uuid lookup_value failed with error: {}'.format( + os.strerror(result)), + level=ERROR) + raise OSError(result, os.strerror(result)) + blkid.blkid_free_probe(probe) + return ctypes.string_at(uuid).decode('ascii') def check_max_sectors(save_settings_dict, @@ -390,6 +421,7 @@ def tune_dev(block_dev): if uuid is None: log('block device {} uuid is None. Unable to save to ' 'hdparm.conf'.format(block_dev), level=DEBUG) + return save_settings_dict = {} log('Tuning device {}'.format(block_dev)) status_set('maintenance', 'Tuning device {}'.format(block_dev)) @@ -1430,10 +1462,17 @@ def upgrade_monitor(new_version): service_stop('ceph-mon-all') apt_install(packages=PACKAGES, fatal=True) - # Ensure the ownership of Ceph's directories is correct - chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), - owner=ceph_user(), - group=ceph_user()) + # Ensure the files and directories under /var/lib/ceph is chowned + # properly as part of the move to the Jewel release, which moved the + # ceph daemons to running as ceph:ceph instead of root:root. + if new_version == 'jewel': + # Ensure the ownership of Ceph's directories is correct + owner = ceph_user() + chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), + owner=owner, + group=owner, + follow_links=True) + if systemd(): for mon_id in get_local_mon_ids(): service_start('ceph-mon@{}'.format(mon_id)) @@ -1608,10 +1647,18 @@ def upgrade_osd(new_version): service_stop('ceph-osd-all') apt_install(packages=PACKAGES, fatal=True) - # Ensure the ownership of Ceph's directories is correct - chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), - owner=ceph_user(), - group=ceph_user()) + # Ensure the files and directories under /var/lib/ceph is chowned + # properly as part of the move to the Jewel release, which moved the + # ceph daemons to running as ceph:ceph instead of root:root. Only do + # it when necessary as this is an expensive operation to run. + if new_version == 'jewel': + owner = ceph_user() + status_set('maintenance', 'Updating file ownership for OSDs') + chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), + owner=owner, + group=owner, + follow_links=True) + if systemd(): for osd_id in get_local_osd_ids(): service_start('ceph-osd@{}'.format(osd_id)) @@ -1642,7 +1689,6 @@ def list_pools(service): log("rados lspools failed with error: {}".format(err.output)) raise - # A dict of valid ceph upgrade paths. Mapping is old -> new UPGRADE_PATHS = { 'firefly': 'hammer', diff --git a/ceph-mon/lib/ceph/ceph_broker.py b/ceph-mon/lib/ceph/ceph_broker.py index 33d0df8d..f15b9bd4 100644 --- a/ceph-mon/lib/ceph/ceph_broker.py +++ b/ceph-mon/lib/ceph/ceph_broker.py @@ -34,6 +34,8 @@ delete_pool, erasure_profile_exists, get_osds, + monitor_key_get, + monitor_key_set, pool_exists, pool_set, remove_pool_snapshot, @@ -49,7 +51,7 @@ # This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ # This should do a decent job of preventing people from passing in bad values. # It will give a useful error message -from subprocess import check_output, CalledProcessError +from subprocess import check_call, check_output, CalledProcessError POOL_KEYS = { # "Ceph Key Name": [Python type, [Valid Range]] @@ -157,11 +159,169 @@ def handle_create_erasure_profile(request, service): data_chunks=k, coding_chunks=m, locality=l) +def handle_add_permissions_to_key(request, service): + """ + Groups are defined by the key cephx.groups.(namespace-)?-(name). This key + will contain a dict serialized to JSON with data about the group, including + pools and members. + + A group can optionally have a namespace defined that will be used to + further restrict pool access. + """ + service_name = request.get('name') + group_name = request.get('group') + group_namespace = request.get('group-namespace') + if group_namespace: + group_name = "{}-{}".format(group_namespace, group_name) + group = get_group(group_name=group_name) + service_obj = get_service_groups(service=service_name, + namespace=group_namespace) + format("Service object: {}".format(service_obj)) + permission = request.get('group-permission') or "rwx" + if service_name not in group['services']: + group['services'].append(service_name) + save_group(group=group, group_name=group_name) + if permission not in service_obj['group_names']: + service_obj['group_names'][permission] = [] + if group_name not in service_obj['group_names'][permission]: + service_obj['group_names'][permission].append(group_name) + save_service(service=service_obj, service_name=service_name) + service_obj['groups'][group_name] = group + update_service_permissions(service_name, service_obj, group_namespace) + + +def update_service_permissions(service, service_obj=None, namespace=None): + """Update the key permissions for the named client in Ceph""" + if not service_obj: + service_obj = get_service_groups(service=service, namespace=namespace) + permissions = pool_permission_list_for_service(service_obj) + call = ['ceph', 'auth', 'caps', 'client.{}'.format(service)] + permissions + try: + check_call(call) + except CalledProcessError as e: + log("Error updating key capabilities: {}".format(e)) + + +def add_pool_to_group(pool, group, namespace=None): + """Add a named pool to a named group""" + group_name = group + if namespace: + group_name = "{}-{}".format(namespace, group_name) + group = get_group(group_name=group_name) + group["pools"].append(pool) + save_group(group, group_name=group_name) + for service in group['services']: + update_service_permissions(service, namespace=namespace) + + +def pool_permission_list_for_service(service): + """Build the permission string for Ceph for a given service""" + permissions = "" + permission_types = {} + for permission, group in service["group_names"].items(): + if permission not in permission_types: + permission_types[permission] = [] + for item in group: + permission_types[permission].append(item) + for permission, groups in permission_types.items(): + permission = " allow {}".format(permission) + for group in groups: + for pool in service['groups'][group]['pools']: + permission = "{} pool={}".format(permission, pool) + permissions += permission + return ["mon", "allow r", "osd", permissions.strip()] + + +def get_service_groups(service, namespace=None): + """ + Services are objects stored with some metadata, they look like (for a + service named "nova"): + { + group_names: {'rwx': ['images']}, + groups: {} + } + After populating the group, it looks like: + { + group_names: {'rwx': ['images']}, + groups: { + 1 'images': { + pools: ['glance'], + services: ['nova'] + } + } + } + """ + service_json = monitor_key_get(service='admin', + key="cephx.services.{}".format(service)) + try: + service = json.loads(service_json) + except TypeError: + service = None + except ValueError: + service = None + if service: + for permission, groups in service['group_names'].items(): + for group in groups: + name = group + if namespace: + name = "{}-{}".format(namespace, name) + service['groups'][group] = get_group(group_name=name) + else: + service = {'group_names': {}, 'groups': {}} + return service + + +def get_group(group_name): + """ + A group is a structure to hold data about a named group, structured as: + { + pools: ['glance'], + services: ['nova'] + } + """ + group_key = get_group_key(group_name=group_name) + group_json = monitor_key_get(service='admin', key=group_key) + try: + group = json.loads(group_json) + except TypeError: + group = None + except ValueError: + group = None + if not group: + group = { + 'pools': [], + 'services': [] + } + return group + + +def save_service(service_name, service): + """Persist a service in the monitor cluster""" + service['groups'] = {} + return monitor_key_set(service='admin', + key="cephx.services.{}".format(service_name), + value=json.dumps(service)) + + +def save_group(group, group_name): + """Persist a group in the monitor cluster""" + group_key = get_group_key(group_name=group_name) + return monitor_key_set(service='admin', + key=group_key, + value=json.dumps(group)) + + +def get_group_key(group_name): + """Build group key""" + return 'cephx.groups.{}'.format(group_name) + + def handle_erasure_pool(request, service): pool_name = request.get('name') erasure_profile = request.get('erasure-profile') quota = request.get('max-bytes') weight = request.get('weight') + group_name = request.get('group') if erasure_profile is None: erasure_profile = "default-canonical" @@ -172,6 +332,13 @@ def handle_erasure_pool(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} + if group_name: + group_namespace = request.get('group-namespace') + # Add the pool to the group named "group_name" + add_pool_to_group(pool=pool_name, + group=group_name, + namespace=group_namespace) + # TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds if not erasure_profile_exists(service=service, name=erasure_profile): # TODO: Fail and tell them to create the profile or default @@ -200,6 +367,7 @@ def handle_replicated_pool(request, service): replicas = request.get('replicas') quota = request.get('max-bytes') weight = request.get('weight') + group_name = request.get('group') # Optional params pg_num = request.get('pg_num') @@ -215,6 +383,13 @@ def handle_replicated_pool(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} + if group_name: + group_namespace = request.get('group-namespace') + # Add the pool to the group named "group_name" + add_pool_to_group(pool=pool_name, + group=group_name, + namespace=group_namespace) + kwargs = {} if pg_num: kwargs['pg_num'] = pg_num @@ -570,6 +745,8 @@ def process_requests_v1(reqs): ret = handle_rgw_create_user(request=req, service=svc) elif op == "move-osd-to-bucket": ret = handle_put_osd_in_bucket(request=req, service=svc) + elif op == "add-permissions-to-key": + ret = handle_add_permissions_to_key(request=req, service=svc) else: msg = "Unknown operation '%s'" % op log(msg, level=ERROR) From ad8b11f91076497b744138d26876d4e615ff3849 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 10 Feb 2017 16:12:04 +0000 Subject: [PATCH 1271/2699] Revert "Collect Metrics" This reverts commit 8fd19cbaa2fe0c5bfadc9b1255733f49b2b965d3. Change-Id: I27e83a54767801862e16539a565816c9621def7c --- ceph-mon/hooks/ceph_hooks.py | 45 +-------------------- ceph-mon/hooks/charmhelpers/core/hookenv.py | 45 --------------------- ceph-mon/hooks/collect-metrics | 1 - ceph-mon/metrics.yaml | 4 -- 4 files changed, 1 insertion(+), 94 deletions(-) delete mode 120000 ceph-mon/hooks/collect-metrics delete mode 100644 ceph-mon/metrics.yaml diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 96dbcb8a..e2f79f15 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import decimal import os import subprocess import sys @@ -45,7 +44,7 @@ relations_of_type, status_set, local_unit, - application_version_set, INFO, ERROR, add_metric) + application_version_set) from charmhelpers.core.host import ( service_restart, mkdir, @@ -116,48 +115,6 @@ def check_for_upgrade(): ceph.pretty_print_upgrade_paths())) -@hooks.hook('collect-metrics') -def collect_metrics(): - if not is_leader(): - # Only collect metrics on the leader - return - log("Collecting metrics") - if not ceph.is_quorum(): - log("Waiting on Ceph monitor quorum before collecting metrics") - return - - try: - import rados - - cluster = rados.Rados( - conffile=os.path.join(os.sep, 'etc', 'ceph', 'ceph.conf')) - cluster.connect(timeout=60) # 1 minute timeout - log("Gathering pool stats", level=INFO) - pools = cluster.list_pools() - kb_used = 0 - for pool in pools: - with cluster.open_ioctx(pool) as ioctx: - pool_stats = ioctx.get_stats() - kb_used += pool_stats['num_kb'] - gb_used = str(kb_used / decimal.Decimal(2 ** 20)) - log("gb_used: {}".format(gb_used), level=INFO) - try: - add_metric('gb-used={}'.format(gb_used)) - except EnvironmentError as call_error: - log("add-metric failed with error: {}," - "skipping metrics collection".format(str(call_error)), - level=ERROR) - log("Disconnecting from Ceph", level=DEBUG) - cluster.shutdown() - except (rados.IOError, - rados.ObjectNotFound, - rados.NoData, - rados.NoSpace, - rados.PermissionError) as rados_error: - log("librados failed with error: {}, skipping metrics " - "collection".format(str(rados_error)), level=ERROR) - - @hooks.hook('install.real') @harden() def install(): diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index e44e22bf..94fc996c 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -616,20 +616,6 @@ def close_port(port, protocol="TCP"): subprocess.check_call(_args) -def open_ports(start, end, protocol="TCP"): - """Opens a range of service network ports""" - _args = ['open-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def close_ports(start, end, protocol="TCP"): - """Close a range of service network ports""" - _args = ['close-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1035,34 +1021,3 @@ def network_get_primary_address(binding): ''' cmd = ['network-get', '--primary-address', binding] return subprocess.check_output(cmd).decode('UTF-8').strip() - - -def add_metric(*args, **kwargs): - """Add metric values. Values may be expressed with keyword arguments. For - metric names containing dashes, these may be expressed as one or more - 'key=value' positional arguments. May only be called from the collect-metrics - hook.""" - _args = ['add-metric'] - _kvpairs = [] - _kvpairs.extend(args) - _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) - _args.extend(sorted(_kvpairs)) - try: - subprocess.check_call(_args) - return - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) - log(log_message, level='INFO') - - -def meter_status(): - """Get the meter status, if running in the meter-status-changed hook.""" - return os.environ.get('JUJU_METER_STATUS') - - -def meter_info(): - """Get the meter status information, if running in the meter-status-changed - hook.""" - return os.environ.get('JUJU_METER_INFO') diff --git a/ceph-mon/hooks/collect-metrics b/ceph-mon/hooks/collect-metrics deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/collect-metrics +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/metrics.yaml b/ceph-mon/metrics.yaml deleted file mode 100644 index e75f3640..00000000 --- a/ceph-mon/metrics.yaml +++ /dev/null @@ -1,4 +0,0 @@ -metrics: - gb-used: - type: gauge - description: Total number of gb bytes used From a0af736aed7c021ec40d0e260b6255331b5f41b6 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 9 Feb 2017 14:33:21 +0000 Subject: [PATCH 1272/2699] General tidy of charm ready for release Switch functional tests to use cephx; Tests should really align to default configuration, which is to have cephx enabled for a deployment. Updated README to be a little simpler, and default to Xenial rather than Trusty. Added proper Ceph icon. Drop layer dependency on ceph-base - it brings in loads of stuff we don't need. Tidy config.yaml to drop duplicate ceph-public-network. Depends-On: Ied8d5306ab77b8e68efd0a8e5df4087a067f0367 Change-Id: I4ab5438db93ae613e9232ca42df1c23614e212e6 --- ceph-fs/src/README.md | 16 +- ceph-fs/src/config.yaml | 11 +- ceph-fs/src/icon.svg | 693 +++++++++++++++----------- ceph-fs/src/layer.yaml | 2 +- ceph-fs/src/tests/basic_deployment.py | 12 +- ceph-fs/src/wheelhouse.txt | 2 + 6 files changed, 427 insertions(+), 309 deletions(-) diff --git a/ceph-fs/src/README.md b/ceph-fs/src/README.md index c01cee68..6f85c5ed 100644 --- a/ceph-fs/src/README.md +++ b/ceph-fs/src/README.md @@ -12,20 +12,14 @@ Usage Boot things up by using: - juju deploy -n 3 --config ceph.yaml ceph-mon - juju deploy -n 3 --config ceph.yaml ceph-osd + juju deploy -n 3 ceph-mon + juju deploy -n 3 ceph-osd + In my example deployments on EC2 the following ceph.yaml will work: -``` -ceph-mon: - source: cloud:trusty-mitaka -ceph-osd: - osd-devices: /dev/xvdb - ephemeral-unmount: "/mnt" - source: cloud:trusty-mitaka -``` + You can then deploy this charm by simply doing: - juju deploy --config ceph.yaml ceph-fs + juju deploy ceph-fs juju add-relation ceph-fs ceph-mon Once the ceph-mon and osd charms have bootstrapped the cluster, the ceph-mon diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index 20dd015f..124b2fe2 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -1,13 +1,4 @@ options: - ceph-public-network: - type: string - default: - description: | - The IP address and netmask of the public (front-side) network (e.g., - 192.168.0.0/24) - . - If multiple networks are to be used, a space-delimited list of a.b.c.d/x - can be provided. loglevel: default: 1 type: int @@ -49,4 +40,4 @@ options: NOTE: these charms do not currently support IPv6 privacy extension. In order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on - your network interface. \ No newline at end of file + your network interface. diff --git a/ceph-fs/src/icon.svg b/ceph-fs/src/icon.svg index e092eef7..de53ab2e 100644 --- a/ceph-fs/src/icon.svg +++ b/ceph-fs/src/icon.svg @@ -1,279 +1,414 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + diff --git a/ceph-fs/src/layer.yaml b/ceph-fs/src/layer.yaml index c691470b..d22dc1e9 100644 --- a/ceph-fs/src/layer.yaml +++ b/ceph-fs/src/layer.yaml @@ -1,4 +1,4 @@ -includes: ['layer:apt', 'layer:ceph-base', 'interface:ceph-mds'] # if you use any interfaces, add them here +includes: ['layer:apt', 'layer:ceph-base', 'interface:ceph-mds'] options: apt: packages: diff --git a/ceph-fs/src/tests/basic_deployment.py b/ceph-fs/src/tests/basic_deployment.py index 28683bb5..689a750a 100644 --- a/ceph-fs/src/tests/basic_deployment.py +++ b/ceph-fs/src/tests/basic_deployment.py @@ -81,14 +81,10 @@ def _configure_services(self, **kwargs): """Configure all of the services. :param **kwargs: """ - # Include a non-existent device as osd-devices is a whitelist, - # and this will catch cases where proposals attempt to change that. + # NOTE(jamespage): fix fsid to allow later validation ceph_mon_config = { - 'monitor-count': '3', - 'auth-supported': 'none', 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', } - # Include a non-existent device as osd-devices is a whitelist, # and this will catch cases where proposals attempt to change that. ceph_osd_config = { @@ -184,9 +180,9 @@ def test_300_ceph_config(self): 'err to syslog': 'false', 'clog to syslog': 'false', 'mon cluster log to syslog': 'false', - 'auth cluster required': 'none', - 'auth service required': 'none', - 'auth client required': 'none' + 'auth cluster required': 'cephx', + 'auth service required': 'cephx', + 'auth client required': 'cephx' }, 'mon': { 'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring' diff --git a/ceph-fs/src/wheelhouse.txt b/ceph-fs/src/wheelhouse.txt index 8501e11b..bbb9970c 100644 --- a/ceph-fs/src/wheelhouse.txt +++ b/ceph-fs/src/wheelhouse.txt @@ -1 +1,3 @@ +netifaces +dnspython3 ceph_api From ba45873c54990b59e81e37d14bc377311bf889e7 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 14 Feb 2017 18:08:13 +0000 Subject: [PATCH 1273/2699] Add support for cephx pool grouping and permissions Sync charmhelpers and add configuration option to allow access to ceph pools to be limited based on grouping. The radosgw will only require access to pools in the 'objects' group - which probably won't be shared with other services, but does ensure that compromise on a radosgw unit does not allow access to volumes and instances on the same underlying ceph backend. Added creation of .rgw.root pool to broker requests to ensure that all pools are added to the 'objects' group, avoiding the need for rw permissionson the radosgw.gateway key. Change-Id: I88953b44e9994c1d0235f617faaeb892b7abed9c Partial-Bug: 1424771 --- ceph-radosgw/charm-helpers-tests.yaml | 1 + ceph-radosgw/config.yaml | 5 + ceph-radosgw/hooks/ceph.py | 43 +- .../hooks/charmhelpers/contrib/network/ip.py | 6 +- .../contrib/openstack/amulet/utils.py | 174 ++- .../charmhelpers/contrib/openstack/context.py | 74 ++ .../openstack/templates/memcached.conf | 53 + .../section-keystone-authtoken-mitaka | 3 + .../templates/wsgi-openstack-api.conf | 100 ++ .../charmhelpers/contrib/openstack/utils.py | 70 +- .../contrib/storage/linux/ceph.py | 16 +- .../hooks/charmhelpers/core/hookenv.py | 45 + ceph-radosgw/hooks/charmhelpers/core/host.py | 227 +++- ceph-radosgw/hooks/charmhelpers/osplatform.py | 6 + ceph-radosgw/hooks/hooks.py | 11 +- .../charmhelpers/contrib/amulet/utils.py | 3 +- .../contrib/openstack/amulet/utils.py | 174 ++- .../tests/charmhelpers/core/__init__.py | 13 + .../tests/charmhelpers/core/decorators.py | 55 + ceph-radosgw/tests/charmhelpers/core/files.py | 43 + ceph-radosgw/tests/charmhelpers/core/fstab.py | 132 ++ .../tests/charmhelpers/core/hookenv.py | 1068 +++++++++++++++++ ceph-radosgw/tests/charmhelpers/core/host.py | 918 ++++++++++++++ .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 56 + .../charmhelpers/core/host_factory/ubuntu.py | 56 + .../tests/charmhelpers/core/hugepage.py | 69 ++ .../tests/charmhelpers/core/kernel.py | 72 ++ .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 + .../core/kernel_factory/ubuntu.py | 13 + .../charmhelpers/core/services/__init__.py | 16 + .../tests/charmhelpers/core/services/base.py | 351 ++++++ .../charmhelpers/core/services/helpers.py | 290 +++++ .../tests/charmhelpers/core/strutils.py | 70 ++ .../tests/charmhelpers/core/sysctl.py | 54 + .../tests/charmhelpers/core/templating.py | 84 ++ .../tests/charmhelpers/core/unitdata.py | 518 ++++++++ ceph-radosgw/unit_tests/test_ceph.py | 139 ++- 39 files changed, 4912 insertions(+), 133 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/memcached.conf create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf create mode 100644 ceph-radosgw/tests/charmhelpers/core/__init__.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/decorators.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/files.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/fstab.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/hookenv.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/host.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/host_factory/__init__.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/hugepage.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/kernel.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/kernel_factory/__init__.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/kernel_factory/centos.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/kernel_factory/ubuntu.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/services/__init__.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/services/base.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/services/helpers.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/strutils.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/sysctl.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/templating.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/unitdata.py diff --git a/ceph-radosgw/charm-helpers-tests.yaml b/ceph-radosgw/charm-helpers-tests.yaml index 48b12f6f..883089de 100644 --- a/ceph-radosgw/charm-helpers-tests.yaml +++ b/ceph-radosgw/charm-helpers-tests.yaml @@ -1,5 +1,6 @@ branch: lp:charm-helpers destination: tests/charmhelpers include: + - core - contrib.amulet - contrib.openstack.amulet diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 77768f00..d0f1f9e6 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -264,4 +264,9 @@ options: description: | Apply system hardening. Supports a space-delimited list of modules to run. Supported modules currently include os, ssh, apache and mysql. + restrict-ceph-pools: + default: False + type: boolean + description: | + Optionally restrict Ceph key permissions to access pools as required. diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index b55dd2c8..2a87962a 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -250,6 +250,23 @@ def get_create_rgw_pools_rq(prefix=None): http://docs.ceph.com/docs/master/radosgw/config/#create-pools for list of supported/required pools. """ + def _add_light_pool(rq, pool, pg_num, prefix=None): + # Per the Ceph PG Calculator, all of the lightweight pools get 0.10% + # of the data by default and only the .rgw.buckets.* get higher values + weights = { + '.rgw.buckets.index': 1.00, + '.rgw.buckets.extra': 1.00 + } + w = weights.get(pool, 0.10) + if prefix: + pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) + if pg_num > 0: + rq.add_op_create_pool(name=pool, replica_count=replicas, + pg_num=pg_num, group='objects') + else: + rq.add_op_create_pool(name=pool, replica_count=replicas, + weight=w, group='objects') + from apt import apt_pkg apt_pkg.init() @@ -272,7 +289,7 @@ def get_create_rgw_pools_rq(prefix=None): for pool in heavy: pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) rq.add_op_create_pool(name=pool, replica_count=replicas, - weight=bucket_weight) + weight=bucket_weight, group='objects') # NOTE: we want these pools to have a smaller pg_num/pgp_num than the # others since they are not expected to contain as much data @@ -289,20 +306,18 @@ def get_create_rgw_pools_rq(prefix=None): '.users.email', '.users.swift', '.users.uid'] - weights = { - '.rgw.buckets.index': 1.00, - '.rgw.buckets.extra': 1.00 - } pg_num = config('rgw-lightweight-pool-pg-num') for pool in light: - # Per the Ceph PG Calculator, all of the lightweight pools get 0.10% - # of the data by default and only the .rgw.buckets.* get higher values - w = weights.get(pool, 0.10) - pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) - if pg_num > 0: - rq.add_op_create_pool(name=pool, replica_count=replicas, - pg_num=pg_num) - else: - rq.add_op_create_pool(name=pool, replica_count=replicas, weight=w) + _add_light_pool(rq, pool, pg_num, prefix) + + if prefix: + light_unprefixed = ['.rgw.root'] + for pool in light_unprefixed: + _add_light_pool(rq, pool, pg_num) + + if config('restrict-ceph-pools'): + rq.add_op_request_access_to_group(name="objects", + permission='rwx', + key_name='radosgw.gateway') return rq diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index 2d2026e4..e141fc12 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -424,7 +424,11 @@ def ns_query(address): else: return None - answers = dns.resolver.query(address, rtype) + try: + answers = dns.resolver.query(address, rtype) + except dns.resolver.NXDOMAIN: + return None + if answers: return str(answers[0]) return None diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 6a0ba837..401c0328 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -20,6 +20,7 @@ import six import time import urllib +import urlparse import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client @@ -37,6 +38,7 @@ from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) +from charmhelpers.core.decorators import retry_on_exception DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -303,6 +305,46 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(5, base_delay=10) + def keystone_wait_for_propagation(self, sentry_relation_pairs, + api_version): + """Iterate over list of sentry and relation tuples and verify that + api_version has the expected value. + + :param sentry_relation_pairs: list of sentry, relation name tuples used + for monitoring propagation of relation + data + :param api_version: api_version to expect in relation data + :returns: None if successful. Raise on error. + """ + for (sentry, relation_name) in sentry_relation_pairs: + rel = sentry.relation('identity-service', + relation_name) + self.log.debug('keystone relation data: {}'.format(rel)) + if rel['api_version'] != str(api_version): + raise Exception("api_version not propagated through relation" + " data yet ('{}' != '{}')." + "".format(rel['api_version'], api_version)) + + def keystone_configure_api_version(self, sentry_relation_pairs, deployment, + api_version): + """Configure preferred-api-version of keystone in deployment and + monitor provided list of relation objects for propagation + before returning to caller. + + :param sentry_relation_pairs: list of sentry, relation tuples used for + monitoring propagation of relation data + :param deployment: deployment to configure + :param api_version: value preferred-api-version will be set to + :returns: None if successful. Raise on error. + """ + self.log.debug("Setting keystone preferred-api-version: '{}'" + "".format(api_version)) + + config = {'preferred-api-version': api_version} + deployment.d.configure('keystone', config) + self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) + def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" @@ -311,6 +353,37 @@ def authenticate_cinder_admin(self, keystone_sentry, username, ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) return cinder_client.Client(username, password, tenant, ept) + def authenticate_keystone(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Authenticate with Keystone""" + self.log.debug('Authenticating with keystone...') + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=username, password=password, + tenant_name=project_name, + auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name=user_domain_name, + username=username, + password=password, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + auth_url=ep + ) + return keystone_client_v3.Client( + session=keystone_session.Session(auth=auth) + ) + def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, keystone_ip=None): @@ -319,30 +392,28 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, if not keystone_ip: keystone_ip = keystone_sentry.info['public-address'] - base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) - else: - ep = base_ep + "/v3" - auth = keystone_id_v3.Password( - user_domain_name='admin_domain', - username=user, - password=password, - domain_name='admin_domain', - auth_url=ep, - ) - sess = keystone_session.Session(auth=auth) - return keystone_client_v3.Client(session=sess) + user_domain_name = None + domain_name = None + if api_version == 3: + user_domain_name = 'admin_domain' + domain_name = user_domain_name + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant, + api_version=api_version, + user_domain_name=user_domain_name, + domain_name=domain_name, + admin_port=True) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) + keystone_ip = urlparse.urlparse(ep).hostname + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant) def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" @@ -1133,3 +1204,70 @@ def get_amqp_message_by_unit(self, sentry_unit, queue="test", else: msg = 'No message retrieved.' amulet.raise_status(amulet.FAIL, msg) + + def validate_memcache(self, sentry_unit, conf, os_release, + earliest_release=5, section='keystone_authtoken', + check_kvs=None): + """Check Memcache is running and is configured to be used + + Example call from Amulet test: + + def test_110_memcache(self): + u.validate_memcache(self.neutron_api_sentry, + '/etc/neutron/neutron.conf', + self._get_openstack_release()) + + :param sentry_unit: sentry unit + :param conf: OpenStack config file to check memcache settings + :param os_release: Current OpenStack release int code + :param earliest_release: Earliest Openstack release to check int code + :param section: OpenStack config file section to check + :param check_kvs: Dict of settings to check in config file + :returns: None + """ + if os_release < earliest_release: + self.log.debug('Skipping memcache checks for deployment. {} <' + 'mitaka'.format(os_release)) + return + _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} + self.log.debug('Checking memcached is running') + ret = self.validate_services_by_name({sentry_unit: ['memcached']}) + if ret: + amulet.raise_status(amulet.FAIL, msg='Memcache running check' + 'failed {}'.format(ret)) + else: + self.log.debug('OK') + self.log.debug('Checking memcache url is configured in {}'.format( + conf)) + if self.validate_config_data(sentry_unit, conf, section, _kvs): + message = "Memcache config error in: {}".format(conf) + amulet.raise_status(amulet.FAIL, msg=message) + else: + self.log.debug('OK') + self.log.debug('Checking memcache configuration in ' + '/etc/memcached.conf') + contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', + fatal=True) + ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') + if ubuntu_release <= 'trusty': + memcache_listen_addr = 'ip6-localhost' + else: + memcache_listen_addr = '::1' + expected = { + '-p': '11211', + '-l': memcache_listen_addr} + found = [] + for key, value in expected.items(): + for line in contents.split('\n'): + if line.startswith(key): + self.log.debug('Checking {} is set to {}'.format( + key, + value)) + assert value == line.split()[-1] + self.log.debug(line.split()[-1]) + found.append(key) + if sorted(found) == sorted(expected.keys()): + self.log.debug('OK') + else: + message = "Memcache config error in: /etc/memcached.conf" + amulet.raise_status(amulet.FAIL, msg=message) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index d5b3a33b..42316331 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -14,6 +14,7 @@ import glob import json +import math import os import re import time @@ -90,6 +91,9 @@ from charmhelpers.contrib.openstack.utils import ( config_flags_parser, get_host_ip, + git_determine_usr_bin, + git_determine_python_path, + enable_memcache, ) from charmhelpers.core.unitdata import kv @@ -1207,6 +1211,43 @@ def __call__(self): return ctxt +class WSGIWorkerConfigContext(WorkerConfigContext): + + def __init__(self, name=None, script=None, admin_script=None, + public_script=None, process_weight=1.00, + admin_process_weight=0.75, public_process_weight=0.25): + self.service_name = name + self.user = name + self.group = name + self.script = script + self.admin_script = admin_script + self.public_script = public_script + self.process_weight = process_weight + self.admin_process_weight = admin_process_weight + self.public_process_weight = public_process_weight + + def __call__(self): + multiplier = config('worker-multiplier') or 1 + total_processes = self.num_cpus * multiplier + ctxt = { + "service_name": self.service_name, + "user": self.user, + "group": self.group, + "script": self.script, + "admin_script": self.admin_script, + "public_script": self.public_script, + "processes": int(math.ceil(self.process_weight * total_processes)), + "admin_processes": int(math.ceil(self.admin_process_weight * + total_processes)), + "public_processes": int(math.ceil(self.public_process_weight * + total_processes)), + "threads": 1, + "usr_bin": git_determine_usr_bin(), + "python_path": git_determine_python_path(), + } + return ctxt + + class ZeroMQContext(OSContextGenerator): interfaces = ['zeromq-configuration'] @@ -1512,3 +1553,36 @@ def setup_aa_profile(self): "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode'])) raise e + + +class MemcacheContext(OSContextGenerator): + """Memcache context + + This context provides options for configuring a local memcache client and + server + """ + + def __init__(self, package=None): + """ + @param package: Package to examine to extrapolate OpenStack release. + Used when charms have no openstack-origin config + option (ie subordinates) + """ + self.package = package + + def __call__(self): + ctxt = {} + ctxt['use_memcache'] = enable_memcache(package=self.package) + if ctxt['use_memcache']: + # Trusty version of memcached does not support ::1 as a listen + # address so use host file entry instead + if lsb_release()['DISTRIB_CODENAME'].lower() > 'trusty': + ctxt['memcache_server'] = '::1' + else: + ctxt['memcache_server'] = 'ip6-localhost' + ctxt['memcache_server_formatted'] = '[::1]' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = 'inet6:{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) + return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/memcached.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/memcached.conf new file mode 100644 index 00000000..26cb037c --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/memcached.conf @@ -0,0 +1,53 @@ +############################################################################### +# [ WARNING ] +# memcached configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### + +# memcached default config file +# 2003 - Jay Bonci +# This configuration file is read by the start-memcached script provided as +# part of the Debian GNU/Linux distribution. + +# Run memcached as a daemon. This command is implied, and is not needed for the +# daemon to run. See the README.Debian that comes with this package for more +# information. +-d + +# Log memcached's output to /var/log/memcached +logfile /var/log/memcached.log + +# Be verbose +# -v + +# Be even more verbose (print client commands as well) +# -vv + +# Start with a cap of 64 megs of memory. It's reasonable, and the daemon default +# Note that the daemon will grow to this size, but does not start out holding this much +# memory +-m 64 + +# Default connection port is 11211 +-p {{ memcache_port }} + +# Run the daemon as root. The start-memcached will default to running as root if no +# -u command is present in this config file +-u memcache + +# Specify which IP address to listen on. The default is to listen on all IP addresses +# This parameter is one of the only security measures that memcached has, so make sure +# it's listening on a firewalled interface. +-l {{ memcache_server }} + +# Limit the number of simultaneous incoming connections. The daemon default is 1024 +# -c 1024 + +# Lock down all paged memory. Consult with the README and homepage before you do this +# -k + +# Return error when memory is exhausted (rather than removing items) +# -M + +# Maximize core file limit +# -r diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka index 7c6f0c35..8e6889e0 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka @@ -14,4 +14,7 @@ project_name = {{ admin_tenant_name }} username = {{ admin_user }} password = {{ admin_password }} signing_dir = {{ signing_dir }} +{% if use_memcache == true %} +memcached_servers = {{ memcache_url }} +{% endif -%} {% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf new file mode 100644 index 00000000..315b2a3f --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf @@ -0,0 +1,100 @@ +# Configuration file maintained by Juju. Local changes may be overwritten. + +{% if port -%} +Listen {{ port }} +{% endif -%} + +{% if admin_port -%} +Listen {{ admin_port }} +{% endif -%} + +{% if public_port -%} +Listen {{ public_port }} +{% endif -%} + +{% if port -%} + + WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ +{% if python_path -%} + python-path={{ python_path }} \ +{% endif -%} + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }} + WSGIScriptAlias / {{ script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} + +{% if admin_port -%} + + WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ +{% if python_path -%} + python-path={{ python_path }} \ +{% endif -%} + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-admin + WSGIScriptAlias / {{ admin_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} + +{% if public_port -%} + + WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ +{% if python_path -%} + python-path={{ python_path }} \ +{% endif -%} + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-public + WSGIScriptAlias / {{ public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 6d544e75..80219d66 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -153,7 +153,7 @@ ('newton', ['2.8.0', '2.9.0', '2.10.0']), ('ocata', - ['2.11.0']), + ['2.11.0', '2.12.0']), ]) # >= Liberty version->codename mapping @@ -549,9 +549,9 @@ def configure_installation_source(rel): 'newton': 'xenial-updates/newton', 'newton/updates': 'xenial-updates/newton', 'newton/proposed': 'xenial-proposed/newton', - 'zesty': 'zesty-updates/ocata', - 'zesty/updates': 'xenial-updates/ocata', - 'zesty/proposed': 'xenial-proposed/ocata', + 'ocata': 'xenial-updates/ocata', + 'ocata/updates': 'xenial-updates/ocata', + 'ocata/proposed': 'xenial-proposed/ocata', } try: @@ -1119,6 +1119,35 @@ def git_generate_systemd_init_files(templates_dir): shutil.copyfile(service_source, service_dest) +def git_determine_usr_bin(): + """Return the /usr/bin path for Apache2 config. + + The /usr/bin path will be located in the virtualenv if the charm + is configured to deploy from source. + """ + if git_install_requested(): + projects_yaml = config('openstack-origin-git') + projects_yaml = git_default_repos(projects_yaml) + return os.path.join(git_pip_venv_dir(projects_yaml), 'bin') + else: + return '/usr/bin' + + +def git_determine_python_path(): + """Return the python-path for Apache2 config. + + Returns 'None' unless the charm is configured to deploy from source, + in which case the path of the virtualenv's site-packages is returned. + """ + if git_install_requested(): + projects_yaml = config('openstack-origin-git') + projects_yaml = git_default_repos(projects_yaml) + return os.path.join(git_pip_venv_dir(projects_yaml), + 'lib/python2.7/site-packages') + else: + return None + + def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts @@ -1925,3 +1954,36 @@ def os_application_version_set(package): application_version_set(os_release(package)) else: application_version_set(application_version) + + +def enable_memcache(source=None, release=None, package=None): + """Determine if memcache should be enabled on the local unit + + @param release: release of OpenStack currently deployed + @param package: package to derive OpenStack version deployed + @returns boolean Whether memcache should be enabled + """ + _release = None + if release: + _release = release + else: + _release = os_release(package, base='icehouse') + if not _release: + _release = get_os_codename_install_source(source) + + # TODO: this should be changed to a numeric comparison using a known list + # of releases and comparing by index. + return _release >= 'mitaka' + + +def token_cache_pkgs(source=None, release=None): + """Determine additional packages needed for token caching + + @param source: source string for charm + @param release: release of OpenStack currently deployed + @returns List of package to enable token caching + """ + packages = [] + if enable_memcache(source=source, release=release): + packages.extend(['memcached', 'python-memcache']) + return packages diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index edb536c7..ae7f3f93 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -40,6 +40,7 @@ ) from charmhelpers.core.hookenv import ( config, + service_name, local_unit, relation_get, relation_ids, @@ -1043,8 +1044,18 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] + def add_op_request_access_to_group(self, name, namespace=None, + permission=None, key_name=None): + """ + Adds the requested permissions to the current service's Ceph key, + allowing the key to access only the specified pools + """ + self.ops.append({'op': 'add-permissions-to-key', 'group': name, + 'namespace': namespace, 'name': key_name or service_name(), + 'group-permission': permission}) + def add_op_create_pool(self, name, replica_count=3, pg_num=None, - weight=None): + weight=None, group=None, namespace=None): """Adds an operation to create a pool. @param pg_num setting: optional setting. If not provided, this value @@ -1058,7 +1069,8 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight}) + 'weight': weight, 'group': group, + 'group-namespace': namespace}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 94fc996c..e44e22bf 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -616,6 +616,20 @@ def close_port(port, protocol="TCP"): subprocess.check_call(_args) +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1021,3 +1035,34 @@ def network_get_primary_address(binding): ''' cmd = ['network-get', '--primary-address', binding] return subprocess.check_output(cmd).decode('UTF-8').strip() + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 04cadb3a..edbb72ff 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -54,38 +54,138 @@ cmp_pkgrevno, ) # flake8: noqa -- ignore F401 for this import +UPDATEDB_PATH = '/etc/updatedb.conf' + +def service_start(service_name, **kwargs): + """Start a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('start', service_name, **kwargs) + + +def service_stop(service_name, **kwargs): + """Stop a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). -def service_start(service_name): - """Start a system service""" - return service('start', service_name) + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('stop', service_name, **kwargs) -def service_stop(service_name): - """Stop a system service""" - return service('stop', service_name) +def service_restart(service_name, **kwargs): + """Restart a system service. + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). -def service_restart(service_name): - """Restart a system service""" + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_restart('ceph-osd', id=4) + + :param service_name: the name of the service to restart + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ return service('restart', service_name) -def service_reload(service_name, restart_on_failure=False): +def service_reload(service_name, restart_on_failure=False, **kwargs): """Reload a system service, optionally falling back to restart if - reload fails""" - service_result = service('reload', service_name) + reload fails. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_reload('ceph-osd', id=4) + + :param service_name: the name of the service to reload + :param restart_on_failure: boolean indicating whether to fallback to a + restart if the reload fails. + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + service_result = service('reload', service_name, **kwargs) if not service_result and restart_on_failure: - service_result = service('restart', service_name) + service_result = service('restart', service_name, **kwargs) return service_result -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", + **kwargs): """Pause a system service. - Stop it, and prevent it from starting again at boot.""" + Stop it, and prevent it from starting again at boot. + + :param service_name: the name of the service to pause + :param init_dir: path to the upstart init directory + :param initd_dir: path to the sysv init directory + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems which do not support + key=value arguments via the commandline. + """ stopped = True - if service_running(service_name): - stopped = service_stop(service_name) + if service_running(service_name, **kwargs): + stopped = service_stop(service_name, **kwargs) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): @@ -106,10 +206,19 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): def service_resume(service_name, init_dir="/etc/init", - initd_dir="/etc/init.d"): + initd_dir="/etc/init.d", **kwargs): """Resume a system service. - Reenable starting again at boot. Start the service""" + Reenable starting again at boot. Start the service. + + :param service_name: the name of the service to resume + :param init_dir: the path to the init dir + :param initd dir: the path to the initd dir + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): @@ -126,19 +235,28 @@ def service_resume(service_name, init_dir="/etc/init", "Unable to detect {0} as SystemD, Upstart {1} or" " SysV {2}".format( service_name, upstart_file, sysv_file)) + started = service_running(service_name, **kwargs) - started = service_running(service_name) if not started: - started = service_start(service_name) + started = service_start(service_name, **kwargs) return started -def service(action, service_name): - """Control a system service""" +def service(action, service_name, **kwargs): + """Control a system service. + + :param action: the action to take on the service + :param service_name: the name of the service to perform th action on + :param **kwargs: additional params to be passed to the service command in + the form of key=value. + """ if init_is_systemd(): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) return subprocess.call(cmd) == 0 @@ -146,15 +264,26 @@ def service(action, service_name): _INIT_D_CONF = "/etc/init.d/{}" -def service_running(service_name): - """Determine whether a system service is running""" +def service_running(service_name, **kwargs): + """Determine whether a system service is running. + + :param service_name: the name of the service + :param **kwargs: additional args to pass to the service command. This is + used to pass additional key=value arguments to the + service command line for managing specific instance + units (e.g. service ceph-osd status id=2). The kwargs + are ignored in systemd services. + """ if init_is_systemd(): return service('is-active', service_name) else: if os.path.exists(_UPSTART_CONF.format(service_name)): try: - output = subprocess.check_output( - ['status', service_name], + cmd = ['status', service_name] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False @@ -306,15 +435,17 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) -def rsync(from_path, to_path, flags='-r', options=None): +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] cmd = ['/usr/bin/rsync', flags] + if timeout: + cmd = ['timeout', str(timeout)] + cmd cmd.extend(options) cmd.append(from_path) cmd.append(to_path) log(" ".join(cmd)) - return subprocess.check_output(cmd).decode('UTF-8').strip() + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() def symlink(source, destination): @@ -684,7 +815,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): :param str path: The string path to start changing ownership. :param str owner: The owner string to use when looking up the uid. :param str group: The group string to use when looking up the gid. - :param bool follow_links: Also Chown links if True + :param bool follow_links: Also follow and chown links if True :param bool chowntopdir: Also chown path itself if True """ uid = pwd.getpwnam(owner).pw_uid @@ -698,7 +829,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): broken_symlink = os.path.lexists(path) and not os.path.exists(path) if not broken_symlink: chown(path, uid, gid) - for root, dirs, files in os.walk(path): + for root, dirs, files in os.walk(path, followlinks=follow_links): for name in dirs + files: full = os.path.join(root, name) broken_symlink = os.path.lexists(full) and not os.path.exists(full) @@ -718,6 +849,20 @@ def lchownr(path, owner, group): chownr(path, owner, group, follow_links=False) +def owner(path): + """Returns a tuple containing the username & groupname owning the path. + + :param str path: the string path to retrieve the ownership + :return tuple(str, str): A (username, groupname) tuple containing the + name of the user and group owning the path. + :raises OSError: if the specified path does not exist + """ + stat = os.stat(path) + username = pwd.getpwuid(stat.st_uid)[0] + groupname = grp.getgrgid(stat.st_gid)[0] + return username, groupname + + def get_total_ram(): """The total amount of system RAM in bytes. @@ -749,3 +894,25 @@ def is_container(): else: # Detect using upstart container file marker return os.path.exists(UPSTART_CONTAINER_TYPE) + + +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + with open(updatedb_path, 'r+') as f_id: + updatedb_text = f_id.read() + output = updatedb(updatedb_text, path) + f_id.seek(0) + f_id.write(output) + f_id.truncate() + + +def updatedb(updatedb_text, new_path): + lines = [line for line in updatedb_text.split("\n")] + for i, line in enumerate(lines): + if line.startswith("PRUNEPATHS="): + paths_line = line.split("=")[1].replace('"', '') + paths = paths_line.split(" ") + if new_path not in paths: + paths.append(new_path) + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) + output = "\n".join(lines) + return output diff --git a/ceph-radosgw/hooks/charmhelpers/osplatform.py b/ceph-radosgw/hooks/charmhelpers/osplatform.py index ea490bbd..d9a4d5c0 100644 --- a/ceph-radosgw/hooks/charmhelpers/osplatform.py +++ b/ceph-radosgw/hooks/charmhelpers/osplatform.py @@ -8,12 +8,18 @@ def get_platform(): will be returned (which is the name of the module). This string is used to decide which platform module should be imported. """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warings *not* disabled, as we certainly need to fix this. tuple_platform = platform.linux_distribution() current_platform = tuple_platform[0] if "Ubuntu" in current_platform: return "ubuntu" elif "CentOS" in current_platform: return "centos" + elif "debian" in current_platform: + # Stock Python does not detect Ubuntu and instead returns debian. + # Or at least it does in some build environments like Travis CI + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 5a22e964..6b613ea3 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -143,6 +143,12 @@ def config_changed(): for r_id in relation_ids('cluster'): cluster_joined(rid=r_id) + # NOTE(jamespage): Re-exec mon relation for any changes to + # enable ceph pool permissions restrictions + for r_id in relation_ids('mon'): + for unit in related_units(r_id): + mon_relation(r_id, unit) + CONFIGS.write_all() update_nrpe_config() @@ -151,13 +157,14 @@ def config_changed(): @hooks.hook('mon-relation-departed', 'mon-relation-changed') @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) -def mon_relation(): +def mon_relation(rid=None, unit=None): rq = ceph.get_create_rgw_pools_rq( prefix=config('pool-prefix')) if is_request_complete(rq, relation='mon'): log('Broker request complete', level=DEBUG) CONFIGS.write_all() - key = relation_get('radosgw_key') + key = relation_get(attribute='radosgw_key', + rid=rid, unit=unit) if key: ceph.import_radosgw_key(key) if not is_unit_paused_set(): diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index 8e13ab14..f9e4c3af 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -148,7 +148,8 @@ def validate_services_by_name(self, sentry_services): for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name in ['rabbitmq-server', 'apache2']): + service_name in ['rabbitmq-server', 'apache2', + 'memcached']): # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) output, code = sentry_unit.run(cmd) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 6a0ba837..401c0328 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -20,6 +20,7 @@ import six import time import urllib +import urlparse import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client @@ -37,6 +38,7 @@ from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) +from charmhelpers.core.decorators import retry_on_exception DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -303,6 +305,46 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(5, base_delay=10) + def keystone_wait_for_propagation(self, sentry_relation_pairs, + api_version): + """Iterate over list of sentry and relation tuples and verify that + api_version has the expected value. + + :param sentry_relation_pairs: list of sentry, relation name tuples used + for monitoring propagation of relation + data + :param api_version: api_version to expect in relation data + :returns: None if successful. Raise on error. + """ + for (sentry, relation_name) in sentry_relation_pairs: + rel = sentry.relation('identity-service', + relation_name) + self.log.debug('keystone relation data: {}'.format(rel)) + if rel['api_version'] != str(api_version): + raise Exception("api_version not propagated through relation" + " data yet ('{}' != '{}')." + "".format(rel['api_version'], api_version)) + + def keystone_configure_api_version(self, sentry_relation_pairs, deployment, + api_version): + """Configure preferred-api-version of keystone in deployment and + monitor provided list of relation objects for propagation + before returning to caller. + + :param sentry_relation_pairs: list of sentry, relation tuples used for + monitoring propagation of relation data + :param deployment: deployment to configure + :param api_version: value preferred-api-version will be set to + :returns: None if successful. Raise on error. + """ + self.log.debug("Setting keystone preferred-api-version: '{}'" + "".format(api_version)) + + config = {'preferred-api-version': api_version} + deployment.d.configure('keystone', config) + self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) + def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" @@ -311,6 +353,37 @@ def authenticate_cinder_admin(self, keystone_sentry, username, ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) return cinder_client.Client(username, password, tenant, ept) + def authenticate_keystone(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Authenticate with Keystone""" + self.log.debug('Authenticating with keystone...') + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=username, password=password, + tenant_name=project_name, + auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name=user_domain_name, + username=username, + password=password, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + auth_url=ep + ) + return keystone_client_v3.Client( + session=keystone_session.Session(auth=auth) + ) + def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, keystone_ip=None): @@ -319,30 +392,28 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, if not keystone_ip: keystone_ip = keystone_sentry.info['public-address'] - base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) - else: - ep = base_ep + "/v3" - auth = keystone_id_v3.Password( - user_domain_name='admin_domain', - username=user, - password=password, - domain_name='admin_domain', - auth_url=ep, - ) - sess = keystone_session.Session(auth=auth) - return keystone_client_v3.Client(session=sess) + user_domain_name = None + domain_name = None + if api_version == 3: + user_domain_name = 'admin_domain' + domain_name = user_domain_name + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant, + api_version=api_version, + user_domain_name=user_domain_name, + domain_name=domain_name, + admin_port=True) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) + keystone_ip = urlparse.urlparse(ep).hostname + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant) def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" @@ -1133,3 +1204,70 @@ def get_amqp_message_by_unit(self, sentry_unit, queue="test", else: msg = 'No message retrieved.' amulet.raise_status(amulet.FAIL, msg) + + def validate_memcache(self, sentry_unit, conf, os_release, + earliest_release=5, section='keystone_authtoken', + check_kvs=None): + """Check Memcache is running and is configured to be used + + Example call from Amulet test: + + def test_110_memcache(self): + u.validate_memcache(self.neutron_api_sentry, + '/etc/neutron/neutron.conf', + self._get_openstack_release()) + + :param sentry_unit: sentry unit + :param conf: OpenStack config file to check memcache settings + :param os_release: Current OpenStack release int code + :param earliest_release: Earliest Openstack release to check int code + :param section: OpenStack config file section to check + :param check_kvs: Dict of settings to check in config file + :returns: None + """ + if os_release < earliest_release: + self.log.debug('Skipping memcache checks for deployment. {} <' + 'mitaka'.format(os_release)) + return + _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} + self.log.debug('Checking memcached is running') + ret = self.validate_services_by_name({sentry_unit: ['memcached']}) + if ret: + amulet.raise_status(amulet.FAIL, msg='Memcache running check' + 'failed {}'.format(ret)) + else: + self.log.debug('OK') + self.log.debug('Checking memcache url is configured in {}'.format( + conf)) + if self.validate_config_data(sentry_unit, conf, section, _kvs): + message = "Memcache config error in: {}".format(conf) + amulet.raise_status(amulet.FAIL, msg=message) + else: + self.log.debug('OK') + self.log.debug('Checking memcache configuration in ' + '/etc/memcached.conf') + contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', + fatal=True) + ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') + if ubuntu_release <= 'trusty': + memcache_listen_addr = 'ip6-localhost' + else: + memcache_listen_addr = '::1' + expected = { + '-p': '11211', + '-l': memcache_listen_addr} + found = [] + for key, value in expected.items(): + for line in contents.split('\n'): + if line.startswith(key): + self.log.debug('Checking {} is set to {}'.format( + key, + value)) + assert value == line.split()[-1] + self.log.debug(line.split()[-1]) + found.append(key) + if sorted(found) == sorted(expected.keys()): + self.log.debug('OK') + else: + message = "Memcache config error in: /etc/memcached.conf" + amulet.raise_status(amulet.FAIL, msg=message) diff --git a/ceph-radosgw/tests/charmhelpers/core/__init__.py b/ceph-radosgw/tests/charmhelpers/core/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/tests/charmhelpers/core/decorators.py b/ceph-radosgw/tests/charmhelpers/core/decorators.py new file mode 100644 index 00000000..6ad41ee4 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/decorators.py @@ -0,0 +1,55 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 diff --git a/ceph-radosgw/tests/charmhelpers/core/files.py b/ceph-radosgw/tests/charmhelpers/core/files.py new file mode 100644 index 00000000..fdd82b75 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/files.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/ceph-radosgw/tests/charmhelpers/core/fstab.py b/ceph-radosgw/tests/charmhelpers/core/fstab.py new file mode 100644 index 00000000..d9fa9152 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/fstab.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import os + +__author__ = 'Jorge Niedbalski R. ' + + +class Fstab(io.FileIO): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = int(d) + self.p = int(p) + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + super(Fstab, self).__init__(self._path, 'rb+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + line = line.decode('us-ascii') + try: + if line.strip() and not line.strip().startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write((str(entry) + '\n').encode('us-ascii')) + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = [l.decode('us-ascii') for l in self.readlines()] + + found = False + for index, line in enumerate(lines): + if line.strip() and not line.strip().startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines).encode('us-ascii')) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/ceph-radosgw/tests/charmhelpers/core/hookenv.py b/ceph-radosgw/tests/charmhelpers/core/hookenv.py new file mode 100644 index 00000000..e44e22bf --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/hookenv.py @@ -0,0 +1,1068 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +from __future__ import print_function +import copy +from distutils.version import LooseVersion +from functools import wraps +import glob +import os +import json +import yaml +import subprocess +import sys +import errno +import tempfile +from subprocess import CalledProcessError + +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + """Cache return values for multiple executions of func + args + + For example:: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + """ + @wraps(func) + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res + wrapper._wrapped = func + return wrapper + + +def flush(key): + """Flushes any entries from function cache where the + key is found in the function+args """ + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + """Write a message to the juju log""" + command = ['juju-log'] + if level: + command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message] + # Missing juju-log should not cause failures in unit tests + # Send log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + if level: + message = "{}: {}".format(level, message) + message = "juju-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + +class Serializable(UserDict): + """Wrapper, an object that can be serialized to yaml or json""" + + def __init__(self, obj): + # wrap the object + UserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + """Serialize the object to json""" + return json.dumps(self.data) + + def yaml(self): + """Serialize the object to yaml""" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + """Determine whether we're running in a relation hook""" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + """The scope for the current relation hook""" + return os.environ.get('JUJU_RELATION', None) + + +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') + + +def local_unit(): + """Local unit ID""" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + """The remote unit for the current relation hook""" + return os.environ.get('JUJU_REMOTE_UNIT', None) + + +def service_name(): + """The name service group this unit belongs to""" + return local_unit().split('/')[0] + + +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + +def hook_name(): + """The name of the currently executing hook""" + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) + + +class Config(dict): + """A dictionary representation of the charm's config.yaml, with some + extra features: + + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. + + Example usage:: + + >>> # inside a hook + >>> from charmhelpers.core import hookenv + >>> config = hookenv.config() + >>> config['foo'] + 'bar' + >>> # store a new key/value for later use + >>> config['mykey'] = 'myval' + + + >>> # user runs `juju set mycharm foo=baz` + >>> # now we're inside subsequent config-changed hook + >>> config = hookenv.config() + >>> config['foo'] + 'baz' + >>> # test to see if this val has changed since last hook + >>> config.changed('foo') + True + >>> # what was the previous value? + >>> config.previous('foo') + 'bar' + >>> # keys/values that we add are preserved across hooks + >>> config['mykey'] + 'myval' + + """ + CONFIG_FILE_NAME = '.juju-persistent-config' + + def __init__(self, *args, **kw): + super(Config, self).__init__(*args, **kw) + self.implicit_save = True + self._prev_dict = None + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) + if os.path.exists(self.path): + self.load_previous() + atexit(self._implicit_save) + + def load_previous(self, path=None): + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. + + :param path: + + File path from which to load the previous config. If `None`, + config is loaded from the default location. If `path` is + specified, subsequent `save()` calls will write to the same + path. + + """ + self.path = path or self.path + with open(self.path) as f: + self._prev_dict = json.load(f) + for k, v in copy.deepcopy(self._prev_dict).items(): + if k not in self: + self[k] = v + + def changed(self, key): + """Return True if the current value for this key is different from + the previous value. + + """ + if self._prev_dict is None: + return True + return self.previous(key) != self.get(key) + + def previous(self, key): + """Return previous value for this key, or None if there + is no previous value. + + """ + if self._prev_dict: + return self._prev_dict.get(key) + return None + + def save(self): + """Save this config to disk. + + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. + + """ + with open(self.path, 'w') as f: + json.dump(self, f) + + def _implicit_save(self): + if self.implicit_save: + self.save() + + +@cached +def config(scope=None): + """Juju charm configuration""" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + else: + config_cmd_line.append('--all') + config_cmd_line.append('--format=json') + try: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + if scope is not None: + return config_data + return Config(config_data) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + """Get relation information""" + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except CalledProcessError as e: + if e.returncode == 2: + return None + raise + + +def relation_set(relation_id=None, relation_settings=None, **kwargs): + """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} + relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + settings = relation_settings.copy() + settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) + if accepts_file: + # --file was introduced in Juju 1.23.2. Use it by default if + # available, since otherwise we'll break if the relation data is + # too big. Ideally we should tell relation-set to read the data from + # stdin, but that feature is broken in 1.23.2: Bug #1454678. + with tempfile.NamedTemporaryFile(delete=False) as settings_file: + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) + subprocess.check_call( + relation_cmd_line + ["--file", settings_file.name]) + os.remove(settings_file.name) + else: + for key, value in settings.items(): + if value is None: + relation_cmd_line.append('{}='.format(key)) + else: + relation_cmd_line.append('{}={}'.format(key, value)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + +@cached +def relation_ids(reltype=None): + """A list of relation_ids""" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] + return [] + + +@cached +def related_units(relid=None): + """A list of related units""" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] + + +@cached +def relation_for_unit(unit=None, rid=None): + """Get the json represenation of a unit's relation""" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + """Get relations of a specific relation ID""" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + """Get relations of a specific type""" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + +@cached +def relation_types(): + """Get a list of relation types supported by this charm""" + rel_types = [] + md = metadata() + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + return rel_types + + +@cached +def peer_relation_id(): + '''Get the peers relation id if a peers relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peers'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peers``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peers'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + +@cached +def relations(): + """Get a nested dictionary of relation data for all related units""" + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +@cached +def is_relation_made(relation, keys='private-address'): + ''' + Determine whether a relation is established by checking for + presence of key(s). If a list of keys is provided, they + must all be present for the relation to be identified as made + ''' + if isinstance(keys, str): + keys = [keys] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + context = {} + for k in keys: + context[k] = relation_get(k, rid=r_id, + unit=unit) + if None not in context.values(): + return True + return False + + +def open_port(port, protocol="TCP"): + """Open a service network port""" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + """Close a service network port""" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + """Get the unit ID for the remote unit""" + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + +def unit_private_ip(): + """Get this unit's private IP address""" + return unit_get('private-address') + + +@cached +def storage_get(attribute=None, storage_id=None): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=None): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + +class UnregisteredHookError(Exception): + """Raised when an undefined hook is called""" + pass + + +class Hooks(object): + """A convenient handler for hook functions. + + Example:: + + hooks = Hooks() + + # register a hook, taking its name from the function name + @hooks.hook() + def install(): + pass # your code here + + # register a hook, providing a custom hook name + @hooks.hook("config-changed") + def config_changed(): + pass # your code here + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + + def __init__(self, config_save=None): + super(Hooks, self).__init__() + self._hooks = {} + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save + + def register(self, name, function): + """Register a hook""" + self._hooks[name] = function + + def execute(self, args): + """Execute a registered hook based on args[0]""" + _run_atstart() + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + """Decorator, registering them as hooks""" + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +def charm_dir(): + """Return the root directory of the current charm""" + return os.environ.get('CHARM_DIR') + + +@cached +def action_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['action-get'] + if key is not None: + cmd.append(key) + cmd.append('--format=json') + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return action_data + + +def action_set(values): + """Sets the values to be returned after the action finishes""" + cmd = ['action-set'] + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def action_fail(message): + """Sets the action status to failed and sets the error message. + + The results set by action_set are preserved.""" + subprocess.check_call(['action-fail', message]) + + +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + +def status_set(workload_state, message): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message unstead. + + workload_state -- valid juju workload state. + message -- status update message + """ + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] + if workload_state not in valid_states: + raise ValueError( + '{!r} is not a valid workload state'.format(workload_state) + ) + cmd = ['status-set', workload_state, message] + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" + + """ + cmd = ['status-get', "--format=json", "--include-data"] + try: + raw_status = subprocess.check_output(cmd) + except OSError as e: + if e.errno == errno.ENOENT: + return ('unknown', "") + else: + raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + @wraps(f) + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +def application_version_set(version): + """Charm authors may trigger this command from any hook to output what + version of the application is running. This could be a package version, + for instance postgres version 9.5. It could also be a build number or + version control revision identifier, for instance git sha 6fb7ba68. """ + + cmd = ['application-version-set'] + cmd.append(version) + try: + subprocess.check_call(cmd) + except OSError: + log("Application Version: {}".format(version)) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.items(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_register(ptype, klass, pid): + """ is used while a hook is running to let Juju know that a + payload has been started.""" + cmd = ['payload-register'] + for x in [ptype, klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_unregister(klass, pid): + """ is used while a hook is running to let Juju know + that a payload has been manually stopped. The and provided + must match a payload that has been previously registered with juju using + payload-register.""" + cmd = ['payload-unregister'] + for x in [klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_status_set(klass, pid, status): + """is used to update the current status of a registered payload. + The and provided must match a payload that has been previously + registered with juju using payload-register. The must be one of the + follow: starting, started, stopping, stopped""" + cmd = ['payload-status-set'] + for x in [klass, pid, status]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def resource_get(name): + """used to fetch the resource path of the given name. + + must match a name of defined resource in metadata.yaml + + returns either a path or False if resource not available + """ + if not name: + return False + + cmd = ['resource-get', name] + try: + return subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError: + return False + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +@cached +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get_primary_address(binding): + ''' + Retrieve the primary network address for a named binding + + :param binding: string. The name of a relation of extra-binding + :return: string. The primary IP address for the named binding + :raise: NotImplementedError if run on Juju < 2.0 + ''' + cmd = ['network-get', '--primary-address', binding] + return subprocess.check_output(cmd).decode('UTF-8').strip() + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') diff --git a/ceph-radosgw/tests/charmhelpers/core/host.py b/ceph-radosgw/tests/charmhelpers/core/host.py new file mode 100644 index 00000000..edbb72ff --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/host.py @@ -0,0 +1,918 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import os +import re +import pwd +import glob +import grp +import random +import string +import subprocess +import hashlib +import functools +import itertools +import six + +from contextlib import contextmanager +from collections import OrderedDict +from .hookenv import log +from .fstab import Fstab +from charmhelpers.osplatform import get_platform + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.host_factory.ubuntu import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.host_factory.centos import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import + +UPDATEDB_PATH = '/etc/updatedb.conf' + +def service_start(service_name, **kwargs): + """Start a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('start', service_name, **kwargs) + + +def service_stop(service_name, **kwargs): + """Stop a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('stop', service_name, **kwargs) + + +def service_restart(service_name, **kwargs): + """Restart a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_restart('ceph-osd', id=4) + + :param service_name: the name of the service to restart + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + return service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False, **kwargs): + """Reload a system service, optionally falling back to restart if + reload fails. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_reload('ceph-osd', id=4) + + :param service_name: the name of the service to reload + :param restart_on_failure: boolean indicating whether to fallback to a + restart if the reload fails. + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + service_result = service('reload', service_name, **kwargs) + if not service_result and restart_on_failure: + service_result = service('restart', service_name, **kwargs) + return service_result + + +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", + **kwargs): + """Pause a system service. + + Stop it, and prevent it from starting again at boot. + + :param service_name: the name of the service to pause + :param init_dir: path to the upstart init directory + :param initd_dir: path to the sysv init directory + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems which do not support + key=value arguments via the commandline. + """ + stopped = True + if service_running(service_name, **kwargs): + stopped = service_stop(service_name, **kwargs) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('disable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + return stopped + + +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d", **kwargs): + """Resume a system service. + + Reenable starting again at boot. Start the service. + + :param service_name: the name of the service to resume + :param init_dir: the path to the init dir + :param initd dir: the path to the initd dir + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('enable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + started = service_running(service_name, **kwargs) + + if not started: + started = service_start(service_name, **kwargs) + return started + + +def service(action, service_name, **kwargs): + """Control a system service. + + :param action: the action to take on the service + :param service_name: the name of the service to perform th action on + :param **kwargs: additional params to be passed to the service command in + the form of key=value. + """ + if init_is_systemd(): + cmd = ['systemctl', action, service_name] + else: + cmd = ['service', service_name, action] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + return subprocess.call(cmd) == 0 + + +_UPSTART_CONF = "/etc/init/{}.conf" +_INIT_D_CONF = "/etc/init.d/{}" + + +def service_running(service_name, **kwargs): + """Determine whether a system service is running. + + :param service_name: the name of the service + :param **kwargs: additional args to pass to the service command. This is + used to pass additional key=value arguments to the + service command line for managing specific instance + units (e.g. service ceph-osd status id=2). The kwargs + are ignored in systemd services. + """ + if init_is_systemd(): + return service('is-active', service_name) + else: + if os.path.exists(_UPSTART_CONF.format(service_name)): + try: + cmd = ['status', service_name] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: + return False + else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running + # 'start/running' + if ("start/running" in output or + "is running" in output or + "up and running" in output): + return True + elif os.path.exists(_INIT_D_CONF.format(service_name)): + # Check System V scripts init script return codes + return service('status', service_name) + return False + + +SYSTEMD_SYSTEM = '/run/systemd/system' + + +def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" + return os.path.isdir(SYSTEMD_SYSTEM) + + +def adduser(username, password=None, shell='/bin/bash', + system_user=False, primary_group=None, + secondary_groups=None, uid=None, home_dir=None): + """Add a user to the system. + + Will log but otherwise succeed if the user already exists. + + :param str username: Username to create + :param str password: Password for user; if ``None``, create a system user + :param str shell: The default shell for the user + :param bool system_user: Whether to create a login or system user + :param str primary_group: Primary group for user; defaults to username + :param list secondary_groups: Optional list of additional groups + :param int uid: UID for user being created + :param str home_dir: Home directory for user + + :returns: The password database entry struct, as returned by `pwd.getpwnam` + """ + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + if uid: + user_info = pwd.getpwuid(int(uid)) + log('user with uid {0} already exists!'.format(uid)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if uid: + cmd.extend(['--uid', str(uid)]) + if home_dir: + cmd.extend(['--home', str(home_dir)]) + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + if not primary_group: + try: + grp.getgrnam(username) + primary_group = username # avoid "group exists" error + except KeyError: + pass + if primary_group: + cmd.extend(['-g', primary_group]) + if secondary_groups: + cmd.extend(['-G', ','.join(secondary_groups)]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + +def uid_exists(uid): + """Check if a uid exists""" + try: + pwd.getpwuid(uid) + uid_exists = True + except KeyError: + uid_exists = False + return uid_exists + + +def group_exists(groupname): + """Check if a group exists""" + try: + grp.getgrnam(groupname) + group_exists = True + except KeyError: + group_exists = False + return group_exists + + +def gid_exists(gid): + """Check if a gid exists""" + try: + grp.getgrgid(gid) + gid_exists = True + except KeyError: + gid_exists = False + return gid_exists + + +def add_group(group_name, system_group=False, gid=None): + """Add a group to the system + + Will log but otherwise succeed if the group already exists. + + :param str group_name: group to create + :param bool system_group: Create system group + :param int gid: GID for user being created + + :returns: The password database entry struct, as returned by `grp.getgrnam` + """ + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + if gid: + group_info = grp.getgrgid(gid) + log('group with gid {0} already exists!'.format(gid)) + except KeyError: + log('creating group {0}'.format(group_name)) + add_new_group(group_name, system_group, gid) + group_info = grp.getgrnam(group_name) + return group_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = ['gpasswd', '-a', username, group] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): + """Replicate the contents of a path""" + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + if timeout: + cmd = ['timeout', str(timeout)] + cmd + cmd.extend(options) + cmd.append(from_path) + cmd.append(to_path) + log(" ".join(cmd)) + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() + + +def symlink(source, destination): + """Create a symbolic link""" + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source, + destination, + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0o555, force=False): + """Create a directory""" + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + realpath = os.path.abspath(path) + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + os.makedirs(realpath, perms) + elif not path_exists: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + os.chmod(realpath, perms) + + +def write_file(path, content, owner='root', group='root', perms=0o444): + """Create or overwrite a file with the contents of a byte string.""" + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + + +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab""" + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file""" + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): + """Mount a filesystem at a particular mountpoint""" + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + + if persist: + return fstab_add(device, mountpoint, filesystem, options=options) + return True + + +def umount(mountpoint, persist=False): + """Unmount a filesystem""" + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + + if persist: + return fstab_remove(mountpoint) + return True + + +def mounts(): + """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + +def file_hash(path, hash_type='md5'): + """Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ + if os.path.exists(path): + h = getattr(hashlib, hash_type)() + with open(path, 'rb') as source: + h.update(source.read()) + return h.hexdigest() + else: + return None + + +def path_hash(path): + """Generate a hash checksum of all files matching 'path'. Standard + wildcards like '*' and '?' are supported, see documentation for the 'glob' + module for more information. + + :return: dict: A { filename: hash } dictionary for all matched files. + Empty if none found. + """ + return { + filename: file_hash(filename) + for filename in glob.iglob(path) + } + + +def check_hash(path, checksum, hash_type='md5'): + """Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + """A class derived from Value error to indicate the checksum failed.""" + pass + + +def restart_on_change(restart_map, stopstart=False, restart_functions=None): + """Restart services based on configuration files changing + + This function is used a decorator, for example:: + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + '/etc/apache/sites-enabled/*': [ 'apache2' ] + }) + def config_changed(): + pass # your code here + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. The apache2 service would be + restarted if any file matching the pattern got changed, created + or removed. Standard wildcards are supported, see documentation + for the 'glob' module for more information. + + @param restart_map: {path_file_name: [service_name, ...] + @param stopstart: DEFAULT false; whether to stop, start OR restart + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result from decorated function + """ + def wrap(f): + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) + return wrapped_f + return wrap + + +def restart_on_change_helper(lambda_f, restart_map, stopstart=False, + restart_functions=None): + """Helper function to perform the restart_on_change function. + + This is provided for decorators to restart services if files described + in the restart_map have changed after an invocation of lambda_f(). + + @param lambda_f: function to call. + @param restart_map: {file: [service, ...]} + @param stopstart: whether to stop, start or restart a service + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result of lambda_f() + """ + if restart_functions is None: + restart_functions = {} + checksums = {path: path_hash(path) for path in restart_map} + r = lambda_f() + # create a list of lists of the services to restart + restarts = [restart_map[path] + for path in restart_map + if path_hash(path) != checksums[path]] + # create a flat list of ordered services without duplicates from lists + services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) + if services_list: + actions = ('stop', 'start') if stopstart else ('restart',) + for service_name in services_list: + if service_name in restart_functions: + restart_functions[service_name](service_name) + else: + for action in actions: + service(action, service_name) + return r + + +def pwgen(length=None): + """Generate a random pasword.""" + if length is None: + # A random length is ok to use a weak PRNG + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.ascii_letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the + # actual password + random_generator = random.SystemRandom() + random_chars = [ + random_generator.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) + + +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): + """Return a list of nics of given type(s)""" + if isinstance(nic_type, six.string_types): + int_types = [nic_type] + else: + int_types = nic_type + + interfaces = [] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile('^[0-9]+:\s+(.+):') + for line in ip_output: + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) + + return interfaces + + +def set_nic_mtu(nic, mtu): + """Set the Maximum Transmission Unit (MTU) on a network interface.""" + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + """Return the Maximum Transmission Unit (MTU) for a network interface.""" + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + """Return the Media Access Control (MAC) for a network interface.""" + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr + + +@contextmanager +def chdir(directory): + """Change the current working directory to a different directory for a code + block and return the previous directory after the block exits. Useful to + run commands from a specificed directory. + + :param str directory: The directory path to change to for this context. + """ + cur = os.getcwd() + try: + yield os.chdir(directory) + finally: + os.chdir(cur) + + +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + :param bool follow_links: Also follow and chown links if True + :param bool chowntopdir: Also chown path itself if True + """ + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + if follow_links: + chown = os.chown + else: + chown = os.lchown + + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) + for root, dirs, files in os.walk(path, followlinks=follow_links): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + chown(full, uid, gid) + + +def lchownr(path, owner, group): + """Recursively change user and group ownership of files and directories + in a given path, not following symbolic links. See the documentation for + 'os.lchown' for more information. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + """ + chownr(path, owner, group, follow_links=False) + + +def owner(path): + """Returns a tuple containing the username & groupname owning the path. + + :param str path: the string path to retrieve the ownership + :return tuple(str, str): A (username, groupname) tuple containing the + name of the user and group owning the path. + :raises OSError: if the specified path does not exist + """ + stat = os.stat(path) + username = pwd.getpwuid(stat.st_uid)[0] + groupname = grp.getgrgid(stat.st_gid)[0] + return username, groupname + + +def get_total_ram(): + """The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + """ + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() + + +UPSTART_CONTAINER_TYPE = '/run/container_type' + + +def is_container(): + """Determine whether unit is running in a container + + @return: boolean indicating if unit is in a container + """ + if init_is_systemd(): + # Detect using systemd-detect-virt + return subprocess.call(['systemd-detect-virt', + '--container']) == 0 + else: + # Detect using upstart container file marker + return os.path.exists(UPSTART_CONTAINER_TYPE) + + +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + with open(updatedb_path, 'r+') as f_id: + updatedb_text = f_id.read() + output = updatedb(updatedb_text, path) + f_id.seek(0) + f_id.write(output) + f_id.truncate() + + +def updatedb(updatedb_text, new_path): + lines = [line for line in updatedb_text.split("\n")] + for i, line in enumerate(lines): + if line.startswith("PRUNEPATHS="): + paths_line = line.split("=")[1].replace('"', '') + paths = paths_line.split(" ") + if new_path not in paths: + paths.append(new_path) + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) + output = "\n".join(lines) + return output diff --git a/ceph-radosgw/tests/charmhelpers/core/host_factory/__init__.py b/ceph-radosgw/tests/charmhelpers/core/host_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py b/ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py new file mode 100644 index 00000000..902d469f --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py @@ -0,0 +1,56 @@ +import subprocess +import yum +import os + + +def service_available(service_name): + # """Determine whether a system service is available.""" + if os.path.isdir('/run/systemd/system'): + cmd = ['systemctl', 'is-enabled', service_name] + else: + cmd = ['service', service_name, 'is-enabled'] + return subprocess.call(cmd) == 0 + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['groupadd'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('-r') + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/os-release in a dict.""" + d = {} + with open('/etc/os-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports YumBase function if the pkgcache argument + is None. + """ + if not pkgcache: + y = yum.YumBase() + packages = y.doPackageLists() + pkgcache = {i.Name: i.version for i in packages['installed']} + pkg = pkgcache[package] + if pkg > revno: + return 1 + if pkg < revno: + return -1 + return 0 diff --git a/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py new file mode 100644 index 00000000..8c66af55 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py @@ -0,0 +1,56 @@ +import subprocess + + +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return b'unrecognized service' not in e.output + else: + return True + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. + """ + import apt_pkg + if not pkgcache: + from charmhelpers.fetch import apt_cache + pkgcache = apt_cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-radosgw/tests/charmhelpers/core/hugepage.py b/ceph-radosgw/tests/charmhelpers/core/hugepage.py new file mode 100644 index 00000000..54b5b5e2 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/hugepage.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True, set_shmmax=False): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) diff --git a/ceph-radosgw/tests/charmhelpers/core/kernel.py b/ceph-radosgw/tests/charmhelpers/core/kernel.py new file mode 100644 index 00000000..2d404528 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/kernel.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import subprocess + +from charmhelpers.osplatform import get_platform +from charmhelpers.core.hookenv import ( + log, + INFO +) + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.kernel_factory.ubuntu import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.kernel_factory.centos import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import + +__author__ = "Jorge Niedbalski " + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + subprocess.check_call(cmd) + if persist: + persistent_modprobe(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return subprocess.check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return subprocess.check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 diff --git a/ceph-radosgw/tests/charmhelpers/core/kernel_factory/__init__.py b/ceph-radosgw/tests/charmhelpers/core/kernel_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/tests/charmhelpers/core/kernel_factory/centos.py b/ceph-radosgw/tests/charmhelpers/core/kernel_factory/centos.py new file mode 100644 index 00000000..1c402c11 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/kernel_factory/centos.py @@ -0,0 +1,17 @@ +import subprocess +import os + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + if not os.path.exists('/etc/rc.modules'): + open('/etc/rc.modules', 'a') + os.chmod('/etc/rc.modules', 111) + with open('/etc/rc.modules', 'r+') as modules: + if module not in modules.read(): + modules.write('modprobe %s\n' % module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["dracut", "-f", version]) diff --git a/ceph-radosgw/tests/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-radosgw/tests/charmhelpers/core/kernel_factory/ubuntu.py new file mode 100644 index 00000000..3de372fd --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/kernel_factory/ubuntu.py @@ -0,0 +1,13 @@ +import subprocess + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module + "\n") + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-radosgw/tests/charmhelpers/core/services/__init__.py b/ceph-radosgw/tests/charmhelpers/core/services/__init__.py new file mode 100644 index 00000000..61fd074e --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/services/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/ceph-radosgw/tests/charmhelpers/core/services/base.py b/ceph-radosgw/tests/charmhelpers/core/services/base.py new file mode 100644 index 00000000..ca9dc996 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/services/base.py @@ -0,0 +1,351 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +from inspect import getargspec +from collections import Iterable, OrderedDict + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = OrderedDict() + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hookenv._run_atstart() + try: + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.reconfigure_services() + self.provide_data() + except SystemExit as x: + if x.code is None or x.code == 0: + hookenv._run_atexit() + hookenv._run_atexit() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + + The `provide_data()` method can optionally accept two parameters: + + * ``remote_service`` The name of the remote service that the data will + be provided to. The `provide_data()` method will be called once + for each connected service (not unit). This allows the method to + tailor its data to the given service. + * ``service_ready`` Whether or not the service definition had all of + its requirements met, and thus the ``data_ready`` callbacks run. + + Note that the ``provided_data`` methods are now called **after** the + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks + a chance to generate any data necessary for the providing to the remote + services. + """ + for service_name, service in self.services.items(): + service_ready = self.is_ready(service_name) + for provider in service.get('provided_data', []): + for relid in hookenv.relation_ids(provider.name): + units = hookenv.related_units(relid) + if not units: + continue + remote_service = units[0].split('/')[0] + argspec = getargspec(provider.provide_data) + if len(argspec.args) > 1: + data = provider.provide_data(remote_service, service_ready) + else: + data = provider.provide_data() + if data: + hookenv.relation_set(relid, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-radosgw/tests/charmhelpers/core/services/helpers.py b/ceph-radosgw/tests/charmhelpers/core/services/helpers.py new file mode 100644 index 00000000..3e6e30d2 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/services/helpers.py @@ -0,0 +1,290 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import yaml + +from charmhelpers.core import hookenv +from charmhelpers.core import host +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = None + interface = None + + def __init__(self, name=None, additional_required_keys=None): + if not hasattr(self, 'required_keys'): + self.required_keys = [] + + if name is not None: + self.name = name + if additional_required_keys: + self.required_keys.extend(additional_required_keys) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'user', 'password', 'database'] + RelationContext.__init__(self, *args, **kwargs) + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'port'] + RelationContext.__init__(self, *args, **kwargs) + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0o600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` + + :param str target: The target to write the rendered template to (or None) + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file + :param partial on_change_action: functools partial to be executed when + rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader + + :return str: The rendered template + """ + def __init__(self, source, target, + owner='root', group='root', perms=0o444, + on_change_action=None, template_loader=None): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + self.on_change_action = on_change_action + self.template_loader = template_loader + + def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) + service = manager.get_service(service_name) + context = {'ctx': {}} + for ctx in service.get('required_data', []): + context.update(ctx) + context['ctx'].update(ctx) + + result = templating.render(self.source, self.target, context, + self.owner, self.group, self.perms, + template_loader=self.template_loader) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() + + return result + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/ceph-radosgw/tests/charmhelpers/core/strutils.py b/ceph-radosgw/tests/charmhelpers/core/strutils.py new file mode 100644 index 00000000..dd9b9717 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/strutils.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import re + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't', 'on']: + return True + elif value in ['n', 'no', 'false', 'f', 'off']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if not matches: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/ceph-radosgw/tests/charmhelpers/core/sysctl.py b/ceph-radosgw/tests/charmhelpers/core/sysctl.py new file mode 100644 index 00000000..6e413e31 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/sysctl.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, +) + +__author__ = 'Jorge Niedbalski R. ' + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict_parsed.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-radosgw/tests/charmhelpers/core/templating.py b/ceph-radosgw/tests/charmhelpers/core/templating.py new file mode 100644 index 00000000..7b801a34 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/templating.py @@ -0,0 +1,84 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. It can also be `None`, in which + case no file will be written. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + The rendered template will be written to the file as well as being returned + as a string. + + Note: Using this requires python-jinja2 or python3-jinja2; if it is not + installed, calling this will attempt to use charmhelpers.fetch.apt_install + to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + if sys.version_info.major == 2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + if target is not None: + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) + return content diff --git a/ceph-radosgw/tests/charmhelpers/core/unitdata.py b/ceph-radosgw/tests/charmhelpers/core/unitdata.py new file mode 100644 index 00000000..54ec969f --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/unitdata.py @@ -0,0 +1,518 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import itertools +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are not persisted unless :meth:`flush` is called. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def get(self, key, default=None, record=False): + self.cursor.execute('select data from kv where key=?', [key]) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) + result = self.cursor.fetchall() + + if not result: + return {} + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + """ + Remove a key from the database entirely. + """ + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ + serialized = json.dumps(value) + + self.cursor.execute('select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', dict(data['env'])) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index 7f8df64d..33c23fda 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -233,20 +233,36 @@ def test_create_rgw_pools_rq_with_prefix(self, mock_broker): self.test_config.set('rgw-buckets-pool-weight', 19) ceph.get_create_rgw_pools_rq(prefix='us-east') mock_broker.assert_has_calls([ - call(replica_count=3, weight=19, name='us-east.rgw.buckets'), - call(pg_num=10, replica_count=3, name='us-east.rgw'), - call(pg_num=10, replica_count=3, name='us-east.rgw.root'), - call(pg_num=10, replica_count=3, name='us-east.rgw.control'), - call(pg_num=10, replica_count=3, name='us-east.rgw.gc'), - call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.index'), - call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.extra'), - call(pg_num=10, replica_count=3, name='us-east.log'), - call(pg_num=10, replica_count=3, name='us-east.intent-log'), - call(pg_num=10, replica_count=3, name='us-east.usage'), - call(pg_num=10, replica_count=3, name='us-east.users'), - call(pg_num=10, replica_count=3, name='us-east.users.email'), - call(pg_num=10, replica_count=3, name='us-east.users.swift'), - call(pg_num=10, replica_count=3, name='us-east.users.uid')] + call(replica_count=3, weight=19, name='us-east.rgw.buckets', + group='objects'), + call(pg_num=10, replica_count=3, name='us-east.rgw', + group='objects'), + call(pg_num=10, replica_count=3, name='us-east.rgw.root', + group='objects'), + call(pg_num=10, replica_count=3, name='us-east.rgw.control', + group='objects'), + call(pg_num=10, replica_count=3, name='us-east.rgw.gc', + group='objects'), + call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.index', + group='objects'), + call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.extra', + group='objects'), + call(pg_num=10, replica_count=3, name='us-east.log', + group='objects'), + call(pg_num=10, replica_count=3, name='us-east.intent-log', + group='objects'), + call(pg_num=10, replica_count=3, name='us-east.usage', + group='objects'), + call(pg_num=10, replica_count=3, name='us-east.users', + group='objects'), + call(pg_num=10, replica_count=3, name='us-east.users.email', + group='objects'), + call(pg_num=10, replica_count=3, name='us-east.users.swift', + group='objects'), + call(pg_num=10, replica_count=3, name='us-east.users.uid', + group='objects'), + call(pg_num=10, replica_count=3, name='.rgw.root', + group='objects')] ) @patch.object(mock_apt.apt_pkg, 'version_compare', lambda *args: -1) @@ -258,48 +274,85 @@ def test_create_rgw_pools_rq_no_prefix_pre_jewel(self, mock_broker): self.test_config.set('rgw-buckets-pool-weight', 19) ceph.get_create_rgw_pools_rq(prefix=None) mock_broker.assert_has_calls([ - call(weight=19, replica_count=3, name='.rgw.buckets'), - call(weight=0.10, replica_count=3, name='.rgw'), - call(weight=0.10, replica_count=3, name='.rgw.root'), - call(weight=0.10, replica_count=3, name='.rgw.control'), - call(weight=0.10, replica_count=3, name='.rgw.gc'), - call(weight=1.00, replica_count=3, name='.rgw.buckets.index'), - call(weight=1.00, replica_count=3, name='.rgw.buckets.extra'), - call(weight=0.10, replica_count=3, name='.log'), - call(weight=0.10, replica_count=3, name='.intent-log'), - call(weight=0.10, replica_count=3, name='.usage'), - call(weight=0.10, replica_count=3, name='.users'), - call(weight=0.10, replica_count=3, name='.users.email'), - call(weight=0.10, replica_count=3, name='.users.swift'), - call(weight=0.10, replica_count=3, name='.users.uid')] + call(weight=19, replica_count=3, name='.rgw.buckets', + group='objects'), + call(weight=0.10, replica_count=3, name='.rgw', + group='objects'), + call(weight=0.10, replica_count=3, name='.rgw.root', + group='objects'), + call(weight=0.10, replica_count=3, name='.rgw.control', + group='objects'), + call(weight=0.10, replica_count=3, name='.rgw.gc', + group='objects'), + call(weight=1.00, replica_count=3, name='.rgw.buckets.index', + group='objects'), + call(weight=1.00, replica_count=3, name='.rgw.buckets.extra', + group='objects'), + call(weight=0.10, replica_count=3, name='.log', + group='objects'), + call(weight=0.10, replica_count=3, name='.intent-log', + group='objects'), + call(weight=0.10, replica_count=3, name='.usage', + group='objects'), + call(weight=0.10, replica_count=3, name='.users', + group='objects'), + call(weight=0.10, replica_count=3, name='.users.email', + group='objects'), + call(weight=0.10, replica_count=3, name='.users.swift', + group='objects'), + call(weight=0.10, replica_count=3, name='.users.uid', + group='objects')] ) @patch.object(mock_apt.apt_pkg, 'version_compare', lambda *args: 0) + @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' + '.add_op_request_access_to_group') @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' '.add_op_create_pool') - def test_create_rgw_pools_rq_no_prefix_post_jewel(self, mock_broker): + def test_create_rgw_pools_rq_no_prefix_post_jewel(self, mock_broker, + mock_request_access): self.test_config.set('rgw-lightweight-pool-pg-num', -1) self.test_config.set('ceph-osd-replication-count', 3) self.test_config.set('rgw-buckets-pool-weight', 19) + self.test_config.set('restrict-ceph-pools', True) ceph.get_create_rgw_pools_rq(prefix=None) mock_broker.assert_has_calls([ - call(weight=19, replica_count=3, name='default.rgw.buckets'), - call(weight=0.10, replica_count=3, name='default.rgw'), - call(weight=0.10, replica_count=3, name='default.rgw.root'), - call(weight=0.10, replica_count=3, name='default.rgw.control'), - call(weight=0.10, replica_count=3, name='default.rgw.gc'), + call(weight=19, replica_count=3, name='default.rgw.buckets', + group='objects'), + call(weight=0.10, replica_count=3, name='default.rgw', + group='objects'), + call(weight=0.10, replica_count=3, name='default.rgw.root', + group='objects'), + call(weight=0.10, replica_count=3, name='default.rgw.control', + group='objects'), + call(weight=0.10, replica_count=3, name='default.rgw.gc', + group='objects'), call(weight=1.00, replica_count=3, - name='default.rgw.buckets.index'), + name='default.rgw.buckets.index', + group='objects'), call(weight=1.00, replica_count=3, - name='default.rgw.buckets.extra'), - call(weight=0.10, replica_count=3, name='default.log'), - call(weight=0.10, replica_count=3, name='default.intent-log'), - call(weight=0.10, replica_count=3, name='default.usage'), - call(weight=0.10, replica_count=3, name='default.users'), - call(weight=0.10, replica_count=3, name='default.users.email'), - call(weight=0.10, replica_count=3, name='default.users.swift'), - call(weight=0.10, replica_count=3, name='default.users.uid')] + name='default.rgw.buckets.extra', + group='objects'), + call(weight=0.10, replica_count=3, name='default.log', + group='objects'), + call(weight=0.10, replica_count=3, name='default.intent-log', + group='objects'), + call(weight=0.10, replica_count=3, name='default.usage', + group='objects'), + call(weight=0.10, replica_count=3, name='default.users', + group='objects'), + call(weight=0.10, replica_count=3, name='default.users.email', + group='objects'), + call(weight=0.10, replica_count=3, name='default.users.swift', + group='objects'), + call(weight=0.10, replica_count=3, name='default.users.uid', + group='objects'), + call(weight=0.10, replica_count=3, name='.rgw.root', + group='objects')] ) + mock_request_access.assert_called_with(key_name='radosgw.gateway', + name='objects', + permission='rwx') @patch.object(mock_apt.apt_pkg, 'version_compare', lambda *args: -1) @patch.object(utils, 'lsb_release', From 8698acce76c579fb64a4fa73f1fc154f40e49652 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 14 Feb 2017 13:06:17 -0600 Subject: [PATCH 1274/2699] remove upgrade_keys This function is no longer necessary as we do not need to ensure that the remote units can create their own pools Partial-Bug: 1424771 Change-Id: Id94c983b9631ac5a5c0a43813b2157724b148a87 --- ceph-mon/hooks/ceph_hooks.py | 14 -------------- ceph-mon/unit_tests/test_ceph_hooks.py | 1 - 2 files changed, 15 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index e2f79f15..35ef57a4 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -332,19 +332,6 @@ def notify_client(): mds_relation_joined(relid=relid, unit=unit) -def upgrade_keys(): - """ Ceph now required mon allow rw for pool creation """ - if len(relation_ids('radosgw')) > 0: - ceph.upgrade_key_caps('client.radosgw.gateway', - ceph._radosgw_caps) - for relid in relation_ids('client'): - units = related_units(relid) - if len(units) > 0: - service_name = units[0].split('/')[0] - ceph.upgrade_key_caps('client.{}'.format(service_name), - ceph._default_caps) - - @hooks.hook('osd-relation-joined') @hooks.hook('osd-relation-changed') def osd_relation(relid=None): @@ -536,7 +523,6 @@ def upgrade_charm(): emit_cephconf() apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) ceph.update_monfs() - upgrade_keys() mon_relation_joined() if is_relation_made("nrpe-external-master"): update_nrpe_config() diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 92786043..095ed1dd 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -136,7 +136,6 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies(self): write_file=DEFAULT, nrpe=DEFAULT, emit_cephconf=DEFAULT, - upgrade_keys=DEFAULT, mon_relation_joined=DEFAULT, is_relation_made=DEFAULT) as mocks, patch( "charmhelpers.contrib.hardening.harden.config"): From 74e46165a072e0f8ff1f7bd4ae30907bd3c16503 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 14 Feb 2017 13:23:21 -0600 Subject: [PATCH 1275/2699] Sync back in charms.ceph Change-Id: I5d8956792a2de53d9d0f34b241206cb62295dcac Partial-Bug: 1424771 --- ceph-mon/lib/ceph/__init__.py | 20 +++++++++++++ ceph-mon/lib/ceph/ceph_broker.py | 51 +++++++++++++++++++++++--------- 2 files changed, 57 insertions(+), 14 deletions(-) diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index e87aef9f..26f1b91f 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -1092,6 +1092,26 @@ def get_named_key(name, caps=None, pool_list=None): :param caps: dict of cephx capabilities :return: Returns a cephx key """ + try: + # Does the key already exist? + output = subprocess.check_output( + [ + 'sudo', + '-u', ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', + 'get', + 'client.{}'.format(name), + ]).strip() + return parse_key(output) + except subprocess.CalledProcessError: + # Couldn't get the key, time to create it! + log("Creating new key for {}".format(name), level=DEBUG) caps = caps or _default_caps cmd = [ "sudo", diff --git a/ceph-mon/lib/ceph/ceph_broker.py b/ceph-mon/lib/ceph/ceph_broker.py index f15b9bd4..8a998058 100644 --- a/ceph-mon/lib/ceph/ceph_broker.py +++ b/ceph-mon/lib/ceph/ceph_broker.py @@ -186,7 +186,8 @@ def handle_add_permissions_to_key(request, service): if group_name not in service_obj['group_names'][permission]: service_obj['group_names'][permission].append(group_name) save_service(service=service_obj, service_name=service_name) - service_obj['groups'][group_name] = group + service_obj['groups'] = _build_service_groups(service_obj, + group_namespace) update_service_permissions(service_name, service_obj, group_namespace) @@ -208,7 +209,8 @@ def add_pool_to_group(pool, group, namespace=None): if namespace: group_name = "{}-{}".format(namespace, group_name) group = get_group(group_name=group_name) - group["pools"].append(pool) + if pool not in group['pools']: + group["pools"].append(pool) save_group(group, group_name=group_name) for service in group['services']: update_service_permissions(service, namespace=namespace) @@ -216,7 +218,7 @@ def add_pool_to_group(pool, group, namespace=None): def pool_permission_list_for_service(service): """Build the permission string for Ceph for a given service""" - permissions = "" + permissions = [] permission_types = {} for permission, group in service["group_names"].items(): if permission not in permission_types: @@ -224,12 +226,11 @@ def pool_permission_list_for_service(service): for item in group: permission_types[permission].append(item) for permission, groups in permission_types.items(): - permission = " allow {}".format(permission) + permission = "allow {}".format(permission) for group in groups: for pool in service['groups'][group]['pools']: - permission = "{} pool={}".format(permission, pool) - permissions += permission - return ["mon", "allow r", "osd", permissions.strip()] + permissions.append("{} pool={}".format(permission, pool)) + return ["mon", "allow r", "osd", ', '.join(permissions)] def get_service_groups(service, namespace=None): @@ -244,7 +245,7 @@ def get_service_groups(service, namespace=None): { group_names: {'rwx': ['images']}, groups: { - 1 'images': { + 'images': { pools: ['glance'], services: ['nova'] } @@ -260,17 +261,39 @@ def get_service_groups(service, namespace=None): except ValueError: service = None if service: - for permission, groups in service['group_names'].items(): - for group in groups: - name = group - if namespace: - name = "{}-{}".format(namespace, name) - service['groups'][group] = get_group(group_name=name) + service['groups'] = _build_service_groups(service, namespace) else: service = {'group_names': {}, 'groups': {}} return service +def _build_service_groups(service, namespace=None): + '''Rebuild the 'groups' dict for a service group + + :returns: dict: dictionary keyed by group name of the following + format: + + { + 'images': { + pools: ['glance'], + services: ['nova', 'glance] + }, + 'vms':{ + pools: ['nova'], + services: ['nova'] + } + } + ''' + all_groups = {} + for _, groups in service['group_names'].items(): + for group in groups: + name = group + if namespace: + name = "{}-{}".format(namespace, name) + all_groups[group] = get_group(group_name=name) + return all_groups + + def get_group(group_name): """ A group is a structure to hold data about a named group, structured as: From 79ff9dd3111138f04a7d72212b01ac84d0342698 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 14 Feb 2017 11:46:06 -0800 Subject: [PATCH 1276/2699] Pre-release charm-helpers sync 17.02 Get each charm up to date with lp:charm-helpers for release testing. Change-Id: I3fd359025a7be468fcc856cab7f42894a318e90f --- ceph-radosgw/charm-helpers-tests.yaml | 1 + .../hooks/charmhelpers/contrib/network/ip.py | 6 +- .../contrib/openstack/amulet/utils.py | 174 ++- .../charmhelpers/contrib/openstack/context.py | 74 ++ .../openstack/templates/memcached.conf | 53 + .../section-keystone-authtoken-mitaka | 3 + .../templates/wsgi-openstack-api.conf | 100 ++ .../charmhelpers/contrib/openstack/utils.py | 70 +- .../contrib/storage/linux/ceph.py | 16 +- .../hooks/charmhelpers/core/hookenv.py | 45 + ceph-radosgw/hooks/charmhelpers/core/host.py | 227 +++- ceph-radosgw/hooks/charmhelpers/osplatform.py | 6 + .../charmhelpers/contrib/amulet/utils.py | 3 +- .../contrib/openstack/amulet/utils.py | 174 ++- .../tests/charmhelpers/core/__init__.py | 13 + .../tests/charmhelpers/core/decorators.py | 55 + ceph-radosgw/tests/charmhelpers/core/files.py | 43 + ceph-radosgw/tests/charmhelpers/core/fstab.py | 132 ++ .../tests/charmhelpers/core/hookenv.py | 1068 +++++++++++++++++ ceph-radosgw/tests/charmhelpers/core/host.py | 918 ++++++++++++++ .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 56 + .../charmhelpers/core/host_factory/ubuntu.py | 56 + .../tests/charmhelpers/core/hugepage.py | 69 ++ .../tests/charmhelpers/core/kernel.py | 72 ++ .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 + .../core/kernel_factory/ubuntu.py | 13 + .../charmhelpers/core/services/__init__.py | 16 + .../tests/charmhelpers/core/services/base.py | 351 ++++++ .../charmhelpers/core/services/helpers.py | 290 +++++ .../tests/charmhelpers/core/strutils.py | 70 ++ .../tests/charmhelpers/core/sysctl.py | 54 + .../tests/charmhelpers/core/templating.py | 84 ++ .../tests/charmhelpers/core/unitdata.py | 518 ++++++++ 35 files changed, 4773 insertions(+), 74 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/memcached.conf create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf create mode 100644 ceph-radosgw/tests/charmhelpers/core/__init__.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/decorators.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/files.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/fstab.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/hookenv.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/host.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/host_factory/__init__.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/hugepage.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/kernel.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/kernel_factory/__init__.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/kernel_factory/centos.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/kernel_factory/ubuntu.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/services/__init__.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/services/base.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/services/helpers.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/strutils.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/sysctl.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/templating.py create mode 100644 ceph-radosgw/tests/charmhelpers/core/unitdata.py diff --git a/ceph-radosgw/charm-helpers-tests.yaml b/ceph-radosgw/charm-helpers-tests.yaml index 48b12f6f..e5063253 100644 --- a/ceph-radosgw/charm-helpers-tests.yaml +++ b/ceph-radosgw/charm-helpers-tests.yaml @@ -3,3 +3,4 @@ destination: tests/charmhelpers include: - contrib.amulet - contrib.openstack.amulet + - core diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index 2d2026e4..e141fc12 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -424,7 +424,11 @@ def ns_query(address): else: return None - answers = dns.resolver.query(address, rtype) + try: + answers = dns.resolver.query(address, rtype) + except dns.resolver.NXDOMAIN: + return None + if answers: return str(answers[0]) return None diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 6a0ba837..401c0328 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -20,6 +20,7 @@ import six import time import urllib +import urlparse import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client @@ -37,6 +38,7 @@ from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) +from charmhelpers.core.decorators import retry_on_exception DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -303,6 +305,46 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(5, base_delay=10) + def keystone_wait_for_propagation(self, sentry_relation_pairs, + api_version): + """Iterate over list of sentry and relation tuples and verify that + api_version has the expected value. + + :param sentry_relation_pairs: list of sentry, relation name tuples used + for monitoring propagation of relation + data + :param api_version: api_version to expect in relation data + :returns: None if successful. Raise on error. + """ + for (sentry, relation_name) in sentry_relation_pairs: + rel = sentry.relation('identity-service', + relation_name) + self.log.debug('keystone relation data: {}'.format(rel)) + if rel['api_version'] != str(api_version): + raise Exception("api_version not propagated through relation" + " data yet ('{}' != '{}')." + "".format(rel['api_version'], api_version)) + + def keystone_configure_api_version(self, sentry_relation_pairs, deployment, + api_version): + """Configure preferred-api-version of keystone in deployment and + monitor provided list of relation objects for propagation + before returning to caller. + + :param sentry_relation_pairs: list of sentry, relation tuples used for + monitoring propagation of relation data + :param deployment: deployment to configure + :param api_version: value preferred-api-version will be set to + :returns: None if successful. Raise on error. + """ + self.log.debug("Setting keystone preferred-api-version: '{}'" + "".format(api_version)) + + config = {'preferred-api-version': api_version} + deployment.d.configure('keystone', config) + self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) + def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" @@ -311,6 +353,37 @@ def authenticate_cinder_admin(self, keystone_sentry, username, ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) return cinder_client.Client(username, password, tenant, ept) + def authenticate_keystone(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Authenticate with Keystone""" + self.log.debug('Authenticating with keystone...') + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=username, password=password, + tenant_name=project_name, + auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name=user_domain_name, + username=username, + password=password, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + auth_url=ep + ) + return keystone_client_v3.Client( + session=keystone_session.Session(auth=auth) + ) + def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, keystone_ip=None): @@ -319,30 +392,28 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, if not keystone_ip: keystone_ip = keystone_sentry.info['public-address'] - base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) - else: - ep = base_ep + "/v3" - auth = keystone_id_v3.Password( - user_domain_name='admin_domain', - username=user, - password=password, - domain_name='admin_domain', - auth_url=ep, - ) - sess = keystone_session.Session(auth=auth) - return keystone_client_v3.Client(session=sess) + user_domain_name = None + domain_name = None + if api_version == 3: + user_domain_name = 'admin_domain' + domain_name = user_domain_name + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant, + api_version=api_version, + user_domain_name=user_domain_name, + domain_name=domain_name, + admin_port=True) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) + keystone_ip = urlparse.urlparse(ep).hostname + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant) def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" @@ -1133,3 +1204,70 @@ def get_amqp_message_by_unit(self, sentry_unit, queue="test", else: msg = 'No message retrieved.' amulet.raise_status(amulet.FAIL, msg) + + def validate_memcache(self, sentry_unit, conf, os_release, + earliest_release=5, section='keystone_authtoken', + check_kvs=None): + """Check Memcache is running and is configured to be used + + Example call from Amulet test: + + def test_110_memcache(self): + u.validate_memcache(self.neutron_api_sentry, + '/etc/neutron/neutron.conf', + self._get_openstack_release()) + + :param sentry_unit: sentry unit + :param conf: OpenStack config file to check memcache settings + :param os_release: Current OpenStack release int code + :param earliest_release: Earliest Openstack release to check int code + :param section: OpenStack config file section to check + :param check_kvs: Dict of settings to check in config file + :returns: None + """ + if os_release < earliest_release: + self.log.debug('Skipping memcache checks for deployment. {} <' + 'mitaka'.format(os_release)) + return + _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} + self.log.debug('Checking memcached is running') + ret = self.validate_services_by_name({sentry_unit: ['memcached']}) + if ret: + amulet.raise_status(amulet.FAIL, msg='Memcache running check' + 'failed {}'.format(ret)) + else: + self.log.debug('OK') + self.log.debug('Checking memcache url is configured in {}'.format( + conf)) + if self.validate_config_data(sentry_unit, conf, section, _kvs): + message = "Memcache config error in: {}".format(conf) + amulet.raise_status(amulet.FAIL, msg=message) + else: + self.log.debug('OK') + self.log.debug('Checking memcache configuration in ' + '/etc/memcached.conf') + contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', + fatal=True) + ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') + if ubuntu_release <= 'trusty': + memcache_listen_addr = 'ip6-localhost' + else: + memcache_listen_addr = '::1' + expected = { + '-p': '11211', + '-l': memcache_listen_addr} + found = [] + for key, value in expected.items(): + for line in contents.split('\n'): + if line.startswith(key): + self.log.debug('Checking {} is set to {}'.format( + key, + value)) + assert value == line.split()[-1] + self.log.debug(line.split()[-1]) + found.append(key) + if sorted(found) == sorted(expected.keys()): + self.log.debug('OK') + else: + message = "Memcache config error in: /etc/memcached.conf" + amulet.raise_status(amulet.FAIL, msg=message) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index d5b3a33b..42316331 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -14,6 +14,7 @@ import glob import json +import math import os import re import time @@ -90,6 +91,9 @@ from charmhelpers.contrib.openstack.utils import ( config_flags_parser, get_host_ip, + git_determine_usr_bin, + git_determine_python_path, + enable_memcache, ) from charmhelpers.core.unitdata import kv @@ -1207,6 +1211,43 @@ def __call__(self): return ctxt +class WSGIWorkerConfigContext(WorkerConfigContext): + + def __init__(self, name=None, script=None, admin_script=None, + public_script=None, process_weight=1.00, + admin_process_weight=0.75, public_process_weight=0.25): + self.service_name = name + self.user = name + self.group = name + self.script = script + self.admin_script = admin_script + self.public_script = public_script + self.process_weight = process_weight + self.admin_process_weight = admin_process_weight + self.public_process_weight = public_process_weight + + def __call__(self): + multiplier = config('worker-multiplier') or 1 + total_processes = self.num_cpus * multiplier + ctxt = { + "service_name": self.service_name, + "user": self.user, + "group": self.group, + "script": self.script, + "admin_script": self.admin_script, + "public_script": self.public_script, + "processes": int(math.ceil(self.process_weight * total_processes)), + "admin_processes": int(math.ceil(self.admin_process_weight * + total_processes)), + "public_processes": int(math.ceil(self.public_process_weight * + total_processes)), + "threads": 1, + "usr_bin": git_determine_usr_bin(), + "python_path": git_determine_python_path(), + } + return ctxt + + class ZeroMQContext(OSContextGenerator): interfaces = ['zeromq-configuration'] @@ -1512,3 +1553,36 @@ def setup_aa_profile(self): "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode'])) raise e + + +class MemcacheContext(OSContextGenerator): + """Memcache context + + This context provides options for configuring a local memcache client and + server + """ + + def __init__(self, package=None): + """ + @param package: Package to examine to extrapolate OpenStack release. + Used when charms have no openstack-origin config + option (ie subordinates) + """ + self.package = package + + def __call__(self): + ctxt = {} + ctxt['use_memcache'] = enable_memcache(package=self.package) + if ctxt['use_memcache']: + # Trusty version of memcached does not support ::1 as a listen + # address so use host file entry instead + if lsb_release()['DISTRIB_CODENAME'].lower() > 'trusty': + ctxt['memcache_server'] = '::1' + else: + ctxt['memcache_server'] = 'ip6-localhost' + ctxt['memcache_server_formatted'] = '[::1]' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = 'inet6:{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) + return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/memcached.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/memcached.conf new file mode 100644 index 00000000..26cb037c --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/memcached.conf @@ -0,0 +1,53 @@ +############################################################################### +# [ WARNING ] +# memcached configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### + +# memcached default config file +# 2003 - Jay Bonci +# This configuration file is read by the start-memcached script provided as +# part of the Debian GNU/Linux distribution. + +# Run memcached as a daemon. This command is implied, and is not needed for the +# daemon to run. See the README.Debian that comes with this package for more +# information. +-d + +# Log memcached's output to /var/log/memcached +logfile /var/log/memcached.log + +# Be verbose +# -v + +# Be even more verbose (print client commands as well) +# -vv + +# Start with a cap of 64 megs of memory. It's reasonable, and the daemon default +# Note that the daemon will grow to this size, but does not start out holding this much +# memory +-m 64 + +# Default connection port is 11211 +-p {{ memcache_port }} + +# Run the daemon as root. The start-memcached will default to running as root if no +# -u command is present in this config file +-u memcache + +# Specify which IP address to listen on. The default is to listen on all IP addresses +# This parameter is one of the only security measures that memcached has, so make sure +# it's listening on a firewalled interface. +-l {{ memcache_server }} + +# Limit the number of simultaneous incoming connections. The daemon default is 1024 +# -c 1024 + +# Lock down all paged memory. Consult with the README and homepage before you do this +# -k + +# Return error when memory is exhausted (rather than removing items) +# -M + +# Maximize core file limit +# -r diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka index 7c6f0c35..8e6889e0 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka @@ -14,4 +14,7 @@ project_name = {{ admin_tenant_name }} username = {{ admin_user }} password = {{ admin_password }} signing_dir = {{ signing_dir }} +{% if use_memcache == true %} +memcached_servers = {{ memcache_url }} +{% endif -%} {% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf new file mode 100644 index 00000000..315b2a3f --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf @@ -0,0 +1,100 @@ +# Configuration file maintained by Juju. Local changes may be overwritten. + +{% if port -%} +Listen {{ port }} +{% endif -%} + +{% if admin_port -%} +Listen {{ admin_port }} +{% endif -%} + +{% if public_port -%} +Listen {{ public_port }} +{% endif -%} + +{% if port -%} + + WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ +{% if python_path -%} + python-path={{ python_path }} \ +{% endif -%} + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }} + WSGIScriptAlias / {{ script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} + +{% if admin_port -%} + + WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ +{% if python_path -%} + python-path={{ python_path }} \ +{% endif -%} + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-admin + WSGIScriptAlias / {{ admin_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} + +{% if public_port -%} + + WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ +{% if python_path -%} + python-path={{ python_path }} \ +{% endif -%} + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-public + WSGIScriptAlias / {{ public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 6d544e75..80219d66 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -153,7 +153,7 @@ ('newton', ['2.8.0', '2.9.0', '2.10.0']), ('ocata', - ['2.11.0']), + ['2.11.0', '2.12.0']), ]) # >= Liberty version->codename mapping @@ -549,9 +549,9 @@ def configure_installation_source(rel): 'newton': 'xenial-updates/newton', 'newton/updates': 'xenial-updates/newton', 'newton/proposed': 'xenial-proposed/newton', - 'zesty': 'zesty-updates/ocata', - 'zesty/updates': 'xenial-updates/ocata', - 'zesty/proposed': 'xenial-proposed/ocata', + 'ocata': 'xenial-updates/ocata', + 'ocata/updates': 'xenial-updates/ocata', + 'ocata/proposed': 'xenial-proposed/ocata', } try: @@ -1119,6 +1119,35 @@ def git_generate_systemd_init_files(templates_dir): shutil.copyfile(service_source, service_dest) +def git_determine_usr_bin(): + """Return the /usr/bin path for Apache2 config. + + The /usr/bin path will be located in the virtualenv if the charm + is configured to deploy from source. + """ + if git_install_requested(): + projects_yaml = config('openstack-origin-git') + projects_yaml = git_default_repos(projects_yaml) + return os.path.join(git_pip_venv_dir(projects_yaml), 'bin') + else: + return '/usr/bin' + + +def git_determine_python_path(): + """Return the python-path for Apache2 config. + + Returns 'None' unless the charm is configured to deploy from source, + in which case the path of the virtualenv's site-packages is returned. + """ + if git_install_requested(): + projects_yaml = config('openstack-origin-git') + projects_yaml = git_default_repos(projects_yaml) + return os.path.join(git_pip_venv_dir(projects_yaml), + 'lib/python2.7/site-packages') + else: + return None + + def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts @@ -1925,3 +1954,36 @@ def os_application_version_set(package): application_version_set(os_release(package)) else: application_version_set(application_version) + + +def enable_memcache(source=None, release=None, package=None): + """Determine if memcache should be enabled on the local unit + + @param release: release of OpenStack currently deployed + @param package: package to derive OpenStack version deployed + @returns boolean Whether memcache should be enabled + """ + _release = None + if release: + _release = release + else: + _release = os_release(package, base='icehouse') + if not _release: + _release = get_os_codename_install_source(source) + + # TODO: this should be changed to a numeric comparison using a known list + # of releases and comparing by index. + return _release >= 'mitaka' + + +def token_cache_pkgs(source=None, release=None): + """Determine additional packages needed for token caching + + @param source: source string for charm + @param release: release of OpenStack currently deployed + @returns List of package to enable token caching + """ + packages = [] + if enable_memcache(source=source, release=release): + packages.extend(['memcached', 'python-memcache']) + return packages diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index edb536c7..ae7f3f93 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -40,6 +40,7 @@ ) from charmhelpers.core.hookenv import ( config, + service_name, local_unit, relation_get, relation_ids, @@ -1043,8 +1044,18 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] + def add_op_request_access_to_group(self, name, namespace=None, + permission=None, key_name=None): + """ + Adds the requested permissions to the current service's Ceph key, + allowing the key to access only the specified pools + """ + self.ops.append({'op': 'add-permissions-to-key', 'group': name, + 'namespace': namespace, 'name': key_name or service_name(), + 'group-permission': permission}) + def add_op_create_pool(self, name, replica_count=3, pg_num=None, - weight=None): + weight=None, group=None, namespace=None): """Adds an operation to create a pool. @param pg_num setting: optional setting. If not provided, this value @@ -1058,7 +1069,8 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight}) + 'weight': weight, 'group': group, + 'group-namespace': namespace}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 94fc996c..e44e22bf 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -616,6 +616,20 @@ def close_port(port, protocol="TCP"): subprocess.check_call(_args) +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1021,3 +1035,34 @@ def network_get_primary_address(binding): ''' cmd = ['network-get', '--primary-address', binding] return subprocess.check_output(cmd).decode('UTF-8').strip() + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 04cadb3a..edbb72ff 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -54,38 +54,138 @@ cmp_pkgrevno, ) # flake8: noqa -- ignore F401 for this import +UPDATEDB_PATH = '/etc/updatedb.conf' + +def service_start(service_name, **kwargs): + """Start a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('start', service_name, **kwargs) + + +def service_stop(service_name, **kwargs): + """Stop a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). -def service_start(service_name): - """Start a system service""" - return service('start', service_name) + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('stop', service_name, **kwargs) -def service_stop(service_name): - """Stop a system service""" - return service('stop', service_name) +def service_restart(service_name, **kwargs): + """Restart a system service. + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). -def service_restart(service_name): - """Restart a system service""" + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_restart('ceph-osd', id=4) + + :param service_name: the name of the service to restart + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ return service('restart', service_name) -def service_reload(service_name, restart_on_failure=False): +def service_reload(service_name, restart_on_failure=False, **kwargs): """Reload a system service, optionally falling back to restart if - reload fails""" - service_result = service('reload', service_name) + reload fails. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_reload('ceph-osd', id=4) + + :param service_name: the name of the service to reload + :param restart_on_failure: boolean indicating whether to fallback to a + restart if the reload fails. + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + service_result = service('reload', service_name, **kwargs) if not service_result and restart_on_failure: - service_result = service('restart', service_name) + service_result = service('restart', service_name, **kwargs) return service_result -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", + **kwargs): """Pause a system service. - Stop it, and prevent it from starting again at boot.""" + Stop it, and prevent it from starting again at boot. + + :param service_name: the name of the service to pause + :param init_dir: path to the upstart init directory + :param initd_dir: path to the sysv init directory + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems which do not support + key=value arguments via the commandline. + """ stopped = True - if service_running(service_name): - stopped = service_stop(service_name) + if service_running(service_name, **kwargs): + stopped = service_stop(service_name, **kwargs) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): @@ -106,10 +206,19 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): def service_resume(service_name, init_dir="/etc/init", - initd_dir="/etc/init.d"): + initd_dir="/etc/init.d", **kwargs): """Resume a system service. - Reenable starting again at boot. Start the service""" + Reenable starting again at boot. Start the service. + + :param service_name: the name of the service to resume + :param init_dir: the path to the init dir + :param initd dir: the path to the initd dir + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): @@ -126,19 +235,28 @@ def service_resume(service_name, init_dir="/etc/init", "Unable to detect {0} as SystemD, Upstart {1} or" " SysV {2}".format( service_name, upstart_file, sysv_file)) + started = service_running(service_name, **kwargs) - started = service_running(service_name) if not started: - started = service_start(service_name) + started = service_start(service_name, **kwargs) return started -def service(action, service_name): - """Control a system service""" +def service(action, service_name, **kwargs): + """Control a system service. + + :param action: the action to take on the service + :param service_name: the name of the service to perform th action on + :param **kwargs: additional params to be passed to the service command in + the form of key=value. + """ if init_is_systemd(): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) return subprocess.call(cmd) == 0 @@ -146,15 +264,26 @@ def service(action, service_name): _INIT_D_CONF = "/etc/init.d/{}" -def service_running(service_name): - """Determine whether a system service is running""" +def service_running(service_name, **kwargs): + """Determine whether a system service is running. + + :param service_name: the name of the service + :param **kwargs: additional args to pass to the service command. This is + used to pass additional key=value arguments to the + service command line for managing specific instance + units (e.g. service ceph-osd status id=2). The kwargs + are ignored in systemd services. + """ if init_is_systemd(): return service('is-active', service_name) else: if os.path.exists(_UPSTART_CONF.format(service_name)): try: - output = subprocess.check_output( - ['status', service_name], + cmd = ['status', service_name] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False @@ -306,15 +435,17 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) -def rsync(from_path, to_path, flags='-r', options=None): +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] cmd = ['/usr/bin/rsync', flags] + if timeout: + cmd = ['timeout', str(timeout)] + cmd cmd.extend(options) cmd.append(from_path) cmd.append(to_path) log(" ".join(cmd)) - return subprocess.check_output(cmd).decode('UTF-8').strip() + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() def symlink(source, destination): @@ -684,7 +815,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): :param str path: The string path to start changing ownership. :param str owner: The owner string to use when looking up the uid. :param str group: The group string to use when looking up the gid. - :param bool follow_links: Also Chown links if True + :param bool follow_links: Also follow and chown links if True :param bool chowntopdir: Also chown path itself if True """ uid = pwd.getpwnam(owner).pw_uid @@ -698,7 +829,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): broken_symlink = os.path.lexists(path) and not os.path.exists(path) if not broken_symlink: chown(path, uid, gid) - for root, dirs, files in os.walk(path): + for root, dirs, files in os.walk(path, followlinks=follow_links): for name in dirs + files: full = os.path.join(root, name) broken_symlink = os.path.lexists(full) and not os.path.exists(full) @@ -718,6 +849,20 @@ def lchownr(path, owner, group): chownr(path, owner, group, follow_links=False) +def owner(path): + """Returns a tuple containing the username & groupname owning the path. + + :param str path: the string path to retrieve the ownership + :return tuple(str, str): A (username, groupname) tuple containing the + name of the user and group owning the path. + :raises OSError: if the specified path does not exist + """ + stat = os.stat(path) + username = pwd.getpwuid(stat.st_uid)[0] + groupname = grp.getgrgid(stat.st_gid)[0] + return username, groupname + + def get_total_ram(): """The total amount of system RAM in bytes. @@ -749,3 +894,25 @@ def is_container(): else: # Detect using upstart container file marker return os.path.exists(UPSTART_CONTAINER_TYPE) + + +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + with open(updatedb_path, 'r+') as f_id: + updatedb_text = f_id.read() + output = updatedb(updatedb_text, path) + f_id.seek(0) + f_id.write(output) + f_id.truncate() + + +def updatedb(updatedb_text, new_path): + lines = [line for line in updatedb_text.split("\n")] + for i, line in enumerate(lines): + if line.startswith("PRUNEPATHS="): + paths_line = line.split("=")[1].replace('"', '') + paths = paths_line.split(" ") + if new_path not in paths: + paths.append(new_path) + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) + output = "\n".join(lines) + return output diff --git a/ceph-radosgw/hooks/charmhelpers/osplatform.py b/ceph-radosgw/hooks/charmhelpers/osplatform.py index ea490bbd..d9a4d5c0 100644 --- a/ceph-radosgw/hooks/charmhelpers/osplatform.py +++ b/ceph-radosgw/hooks/charmhelpers/osplatform.py @@ -8,12 +8,18 @@ def get_platform(): will be returned (which is the name of the module). This string is used to decide which platform module should be imported. """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warings *not* disabled, as we certainly need to fix this. tuple_platform = platform.linux_distribution() current_platform = tuple_platform[0] if "Ubuntu" in current_platform: return "ubuntu" elif "CentOS" in current_platform: return "centos" + elif "debian" in current_platform: + # Stock Python does not detect Ubuntu and instead returns debian. + # Or at least it does in some build environments like Travis CI + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index 8e13ab14..f9e4c3af 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -148,7 +148,8 @@ def validate_services_by_name(self, sentry_services): for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name in ['rabbitmq-server', 'apache2']): + service_name in ['rabbitmq-server', 'apache2', + 'memcached']): # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) output, code = sentry_unit.run(cmd) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 6a0ba837..401c0328 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -20,6 +20,7 @@ import six import time import urllib +import urlparse import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client @@ -37,6 +38,7 @@ from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) +from charmhelpers.core.decorators import retry_on_exception DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -303,6 +305,46 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(5, base_delay=10) + def keystone_wait_for_propagation(self, sentry_relation_pairs, + api_version): + """Iterate over list of sentry and relation tuples and verify that + api_version has the expected value. + + :param sentry_relation_pairs: list of sentry, relation name tuples used + for monitoring propagation of relation + data + :param api_version: api_version to expect in relation data + :returns: None if successful. Raise on error. + """ + for (sentry, relation_name) in sentry_relation_pairs: + rel = sentry.relation('identity-service', + relation_name) + self.log.debug('keystone relation data: {}'.format(rel)) + if rel['api_version'] != str(api_version): + raise Exception("api_version not propagated through relation" + " data yet ('{}' != '{}')." + "".format(rel['api_version'], api_version)) + + def keystone_configure_api_version(self, sentry_relation_pairs, deployment, + api_version): + """Configure preferred-api-version of keystone in deployment and + monitor provided list of relation objects for propagation + before returning to caller. + + :param sentry_relation_pairs: list of sentry, relation tuples used for + monitoring propagation of relation data + :param deployment: deployment to configure + :param api_version: value preferred-api-version will be set to + :returns: None if successful. Raise on error. + """ + self.log.debug("Setting keystone preferred-api-version: '{}'" + "".format(api_version)) + + config = {'preferred-api-version': api_version} + deployment.d.configure('keystone', config) + self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) + def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" @@ -311,6 +353,37 @@ def authenticate_cinder_admin(self, keystone_sentry, username, ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) return cinder_client.Client(username, password, tenant, ept) + def authenticate_keystone(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Authenticate with Keystone""" + self.log.debug('Authenticating with keystone...') + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=username, password=password, + tenant_name=project_name, + auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name=user_domain_name, + username=username, + password=password, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + auth_url=ep + ) + return keystone_client_v3.Client( + session=keystone_session.Session(auth=auth) + ) + def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, keystone_ip=None): @@ -319,30 +392,28 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, if not keystone_ip: keystone_ip = keystone_sentry.info['public-address'] - base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) - else: - ep = base_ep + "/v3" - auth = keystone_id_v3.Password( - user_domain_name='admin_domain', - username=user, - password=password, - domain_name='admin_domain', - auth_url=ep, - ) - sess = keystone_session.Session(auth=auth) - return keystone_client_v3.Client(session=sess) + user_domain_name = None + domain_name = None + if api_version == 3: + user_domain_name = 'admin_domain' + domain_name = user_domain_name + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant, + api_version=api_version, + user_domain_name=user_domain_name, + domain_name=domain_name, + admin_port=True) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) + keystone_ip = urlparse.urlparse(ep).hostname + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant) def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" @@ -1133,3 +1204,70 @@ def get_amqp_message_by_unit(self, sentry_unit, queue="test", else: msg = 'No message retrieved.' amulet.raise_status(amulet.FAIL, msg) + + def validate_memcache(self, sentry_unit, conf, os_release, + earliest_release=5, section='keystone_authtoken', + check_kvs=None): + """Check Memcache is running and is configured to be used + + Example call from Amulet test: + + def test_110_memcache(self): + u.validate_memcache(self.neutron_api_sentry, + '/etc/neutron/neutron.conf', + self._get_openstack_release()) + + :param sentry_unit: sentry unit + :param conf: OpenStack config file to check memcache settings + :param os_release: Current OpenStack release int code + :param earliest_release: Earliest Openstack release to check int code + :param section: OpenStack config file section to check + :param check_kvs: Dict of settings to check in config file + :returns: None + """ + if os_release < earliest_release: + self.log.debug('Skipping memcache checks for deployment. {} <' + 'mitaka'.format(os_release)) + return + _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} + self.log.debug('Checking memcached is running') + ret = self.validate_services_by_name({sentry_unit: ['memcached']}) + if ret: + amulet.raise_status(amulet.FAIL, msg='Memcache running check' + 'failed {}'.format(ret)) + else: + self.log.debug('OK') + self.log.debug('Checking memcache url is configured in {}'.format( + conf)) + if self.validate_config_data(sentry_unit, conf, section, _kvs): + message = "Memcache config error in: {}".format(conf) + amulet.raise_status(amulet.FAIL, msg=message) + else: + self.log.debug('OK') + self.log.debug('Checking memcache configuration in ' + '/etc/memcached.conf') + contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', + fatal=True) + ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') + if ubuntu_release <= 'trusty': + memcache_listen_addr = 'ip6-localhost' + else: + memcache_listen_addr = '::1' + expected = { + '-p': '11211', + '-l': memcache_listen_addr} + found = [] + for key, value in expected.items(): + for line in contents.split('\n'): + if line.startswith(key): + self.log.debug('Checking {} is set to {}'.format( + key, + value)) + assert value == line.split()[-1] + self.log.debug(line.split()[-1]) + found.append(key) + if sorted(found) == sorted(expected.keys()): + self.log.debug('OK') + else: + message = "Memcache config error in: /etc/memcached.conf" + amulet.raise_status(amulet.FAIL, msg=message) diff --git a/ceph-radosgw/tests/charmhelpers/core/__init__.py b/ceph-radosgw/tests/charmhelpers/core/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/tests/charmhelpers/core/decorators.py b/ceph-radosgw/tests/charmhelpers/core/decorators.py new file mode 100644 index 00000000..6ad41ee4 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/decorators.py @@ -0,0 +1,55 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 diff --git a/ceph-radosgw/tests/charmhelpers/core/files.py b/ceph-radosgw/tests/charmhelpers/core/files.py new file mode 100644 index 00000000..fdd82b75 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/files.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/ceph-radosgw/tests/charmhelpers/core/fstab.py b/ceph-radosgw/tests/charmhelpers/core/fstab.py new file mode 100644 index 00000000..d9fa9152 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/fstab.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import os + +__author__ = 'Jorge Niedbalski R. ' + + +class Fstab(io.FileIO): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = int(d) + self.p = int(p) + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + super(Fstab, self).__init__(self._path, 'rb+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + line = line.decode('us-ascii') + try: + if line.strip() and not line.strip().startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write((str(entry) + '\n').encode('us-ascii')) + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = [l.decode('us-ascii') for l in self.readlines()] + + found = False + for index, line in enumerate(lines): + if line.strip() and not line.strip().startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines).encode('us-ascii')) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/ceph-radosgw/tests/charmhelpers/core/hookenv.py b/ceph-radosgw/tests/charmhelpers/core/hookenv.py new file mode 100644 index 00000000..e44e22bf --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/hookenv.py @@ -0,0 +1,1068 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +from __future__ import print_function +import copy +from distutils.version import LooseVersion +from functools import wraps +import glob +import os +import json +import yaml +import subprocess +import sys +import errno +import tempfile +from subprocess import CalledProcessError + +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + """Cache return values for multiple executions of func + args + + For example:: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + """ + @wraps(func) + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res + wrapper._wrapped = func + return wrapper + + +def flush(key): + """Flushes any entries from function cache where the + key is found in the function+args """ + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + """Write a message to the juju log""" + command = ['juju-log'] + if level: + command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message] + # Missing juju-log should not cause failures in unit tests + # Send log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + if level: + message = "{}: {}".format(level, message) + message = "juju-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + +class Serializable(UserDict): + """Wrapper, an object that can be serialized to yaml or json""" + + def __init__(self, obj): + # wrap the object + UserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + """Serialize the object to json""" + return json.dumps(self.data) + + def yaml(self): + """Serialize the object to yaml""" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + """Determine whether we're running in a relation hook""" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + """The scope for the current relation hook""" + return os.environ.get('JUJU_RELATION', None) + + +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') + + +def local_unit(): + """Local unit ID""" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + """The remote unit for the current relation hook""" + return os.environ.get('JUJU_REMOTE_UNIT', None) + + +def service_name(): + """The name service group this unit belongs to""" + return local_unit().split('/')[0] + + +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + +def hook_name(): + """The name of the currently executing hook""" + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) + + +class Config(dict): + """A dictionary representation of the charm's config.yaml, with some + extra features: + + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. + + Example usage:: + + >>> # inside a hook + >>> from charmhelpers.core import hookenv + >>> config = hookenv.config() + >>> config['foo'] + 'bar' + >>> # store a new key/value for later use + >>> config['mykey'] = 'myval' + + + >>> # user runs `juju set mycharm foo=baz` + >>> # now we're inside subsequent config-changed hook + >>> config = hookenv.config() + >>> config['foo'] + 'baz' + >>> # test to see if this val has changed since last hook + >>> config.changed('foo') + True + >>> # what was the previous value? + >>> config.previous('foo') + 'bar' + >>> # keys/values that we add are preserved across hooks + >>> config['mykey'] + 'myval' + + """ + CONFIG_FILE_NAME = '.juju-persistent-config' + + def __init__(self, *args, **kw): + super(Config, self).__init__(*args, **kw) + self.implicit_save = True + self._prev_dict = None + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) + if os.path.exists(self.path): + self.load_previous() + atexit(self._implicit_save) + + def load_previous(self, path=None): + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. + + :param path: + + File path from which to load the previous config. If `None`, + config is loaded from the default location. If `path` is + specified, subsequent `save()` calls will write to the same + path. + + """ + self.path = path or self.path + with open(self.path) as f: + self._prev_dict = json.load(f) + for k, v in copy.deepcopy(self._prev_dict).items(): + if k not in self: + self[k] = v + + def changed(self, key): + """Return True if the current value for this key is different from + the previous value. + + """ + if self._prev_dict is None: + return True + return self.previous(key) != self.get(key) + + def previous(self, key): + """Return previous value for this key, or None if there + is no previous value. + + """ + if self._prev_dict: + return self._prev_dict.get(key) + return None + + def save(self): + """Save this config to disk. + + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. + + """ + with open(self.path, 'w') as f: + json.dump(self, f) + + def _implicit_save(self): + if self.implicit_save: + self.save() + + +@cached +def config(scope=None): + """Juju charm configuration""" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + else: + config_cmd_line.append('--all') + config_cmd_line.append('--format=json') + try: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + if scope is not None: + return config_data + return Config(config_data) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + """Get relation information""" + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except CalledProcessError as e: + if e.returncode == 2: + return None + raise + + +def relation_set(relation_id=None, relation_settings=None, **kwargs): + """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} + relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + settings = relation_settings.copy() + settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) + if accepts_file: + # --file was introduced in Juju 1.23.2. Use it by default if + # available, since otherwise we'll break if the relation data is + # too big. Ideally we should tell relation-set to read the data from + # stdin, but that feature is broken in 1.23.2: Bug #1454678. + with tempfile.NamedTemporaryFile(delete=False) as settings_file: + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) + subprocess.check_call( + relation_cmd_line + ["--file", settings_file.name]) + os.remove(settings_file.name) + else: + for key, value in settings.items(): + if value is None: + relation_cmd_line.append('{}='.format(key)) + else: + relation_cmd_line.append('{}={}'.format(key, value)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + +@cached +def relation_ids(reltype=None): + """A list of relation_ids""" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] + return [] + + +@cached +def related_units(relid=None): + """A list of related units""" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] + + +@cached +def relation_for_unit(unit=None, rid=None): + """Get the json represenation of a unit's relation""" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + """Get relations of a specific relation ID""" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + """Get relations of a specific type""" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + +@cached +def relation_types(): + """Get a list of relation types supported by this charm""" + rel_types = [] + md = metadata() + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + return rel_types + + +@cached +def peer_relation_id(): + '''Get the peers relation id if a peers relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peers'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peers``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peers'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + +@cached +def relations(): + """Get a nested dictionary of relation data for all related units""" + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +@cached +def is_relation_made(relation, keys='private-address'): + ''' + Determine whether a relation is established by checking for + presence of key(s). If a list of keys is provided, they + must all be present for the relation to be identified as made + ''' + if isinstance(keys, str): + keys = [keys] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + context = {} + for k in keys: + context[k] = relation_get(k, rid=r_id, + unit=unit) + if None not in context.values(): + return True + return False + + +def open_port(port, protocol="TCP"): + """Open a service network port""" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + """Close a service network port""" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + """Get the unit ID for the remote unit""" + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + +def unit_private_ip(): + """Get this unit's private IP address""" + return unit_get('private-address') + + +@cached +def storage_get(attribute=None, storage_id=None): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=None): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + +class UnregisteredHookError(Exception): + """Raised when an undefined hook is called""" + pass + + +class Hooks(object): + """A convenient handler for hook functions. + + Example:: + + hooks = Hooks() + + # register a hook, taking its name from the function name + @hooks.hook() + def install(): + pass # your code here + + # register a hook, providing a custom hook name + @hooks.hook("config-changed") + def config_changed(): + pass # your code here + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + + def __init__(self, config_save=None): + super(Hooks, self).__init__() + self._hooks = {} + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save + + def register(self, name, function): + """Register a hook""" + self._hooks[name] = function + + def execute(self, args): + """Execute a registered hook based on args[0]""" + _run_atstart() + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + """Decorator, registering them as hooks""" + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +def charm_dir(): + """Return the root directory of the current charm""" + return os.environ.get('CHARM_DIR') + + +@cached +def action_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['action-get'] + if key is not None: + cmd.append(key) + cmd.append('--format=json') + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return action_data + + +def action_set(values): + """Sets the values to be returned after the action finishes""" + cmd = ['action-set'] + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def action_fail(message): + """Sets the action status to failed and sets the error message. + + The results set by action_set are preserved.""" + subprocess.check_call(['action-fail', message]) + + +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + +def status_set(workload_state, message): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message unstead. + + workload_state -- valid juju workload state. + message -- status update message + """ + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] + if workload_state not in valid_states: + raise ValueError( + '{!r} is not a valid workload state'.format(workload_state) + ) + cmd = ['status-set', workload_state, message] + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" + + """ + cmd = ['status-get', "--format=json", "--include-data"] + try: + raw_status = subprocess.check_output(cmd) + except OSError as e: + if e.errno == errno.ENOENT: + return ('unknown', "") + else: + raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + @wraps(f) + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +def application_version_set(version): + """Charm authors may trigger this command from any hook to output what + version of the application is running. This could be a package version, + for instance postgres version 9.5. It could also be a build number or + version control revision identifier, for instance git sha 6fb7ba68. """ + + cmd = ['application-version-set'] + cmd.append(version) + try: + subprocess.check_call(cmd) + except OSError: + log("Application Version: {}".format(version)) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.items(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_register(ptype, klass, pid): + """ is used while a hook is running to let Juju know that a + payload has been started.""" + cmd = ['payload-register'] + for x in [ptype, klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_unregister(klass, pid): + """ is used while a hook is running to let Juju know + that a payload has been manually stopped. The and provided + must match a payload that has been previously registered with juju using + payload-register.""" + cmd = ['payload-unregister'] + for x in [klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_status_set(klass, pid, status): + """is used to update the current status of a registered payload. + The and provided must match a payload that has been previously + registered with juju using payload-register. The must be one of the + follow: starting, started, stopping, stopped""" + cmd = ['payload-status-set'] + for x in [klass, pid, status]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def resource_get(name): + """used to fetch the resource path of the given name. + + must match a name of defined resource in metadata.yaml + + returns either a path or False if resource not available + """ + if not name: + return False + + cmd = ['resource-get', name] + try: + return subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError: + return False + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +@cached +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get_primary_address(binding): + ''' + Retrieve the primary network address for a named binding + + :param binding: string. The name of a relation of extra-binding + :return: string. The primary IP address for the named binding + :raise: NotImplementedError if run on Juju < 2.0 + ''' + cmd = ['network-get', '--primary-address', binding] + return subprocess.check_output(cmd).decode('UTF-8').strip() + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') diff --git a/ceph-radosgw/tests/charmhelpers/core/host.py b/ceph-radosgw/tests/charmhelpers/core/host.py new file mode 100644 index 00000000..edbb72ff --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/host.py @@ -0,0 +1,918 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import os +import re +import pwd +import glob +import grp +import random +import string +import subprocess +import hashlib +import functools +import itertools +import six + +from contextlib import contextmanager +from collections import OrderedDict +from .hookenv import log +from .fstab import Fstab +from charmhelpers.osplatform import get_platform + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.host_factory.ubuntu import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.host_factory.centos import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import + +UPDATEDB_PATH = '/etc/updatedb.conf' + +def service_start(service_name, **kwargs): + """Start a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('start', service_name, **kwargs) + + +def service_stop(service_name, **kwargs): + """Stop a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('stop', service_name, **kwargs) + + +def service_restart(service_name, **kwargs): + """Restart a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_restart('ceph-osd', id=4) + + :param service_name: the name of the service to restart + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + return service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False, **kwargs): + """Reload a system service, optionally falling back to restart if + reload fails. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_reload('ceph-osd', id=4) + + :param service_name: the name of the service to reload + :param restart_on_failure: boolean indicating whether to fallback to a + restart if the reload fails. + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + service_result = service('reload', service_name, **kwargs) + if not service_result and restart_on_failure: + service_result = service('restart', service_name, **kwargs) + return service_result + + +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", + **kwargs): + """Pause a system service. + + Stop it, and prevent it from starting again at boot. + + :param service_name: the name of the service to pause + :param init_dir: path to the upstart init directory + :param initd_dir: path to the sysv init directory + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems which do not support + key=value arguments via the commandline. + """ + stopped = True + if service_running(service_name, **kwargs): + stopped = service_stop(service_name, **kwargs) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('disable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + return stopped + + +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d", **kwargs): + """Resume a system service. + + Reenable starting again at boot. Start the service. + + :param service_name: the name of the service to resume + :param init_dir: the path to the init dir + :param initd dir: the path to the initd dir + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('enable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + started = service_running(service_name, **kwargs) + + if not started: + started = service_start(service_name, **kwargs) + return started + + +def service(action, service_name, **kwargs): + """Control a system service. + + :param action: the action to take on the service + :param service_name: the name of the service to perform th action on + :param **kwargs: additional params to be passed to the service command in + the form of key=value. + """ + if init_is_systemd(): + cmd = ['systemctl', action, service_name] + else: + cmd = ['service', service_name, action] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + return subprocess.call(cmd) == 0 + + +_UPSTART_CONF = "/etc/init/{}.conf" +_INIT_D_CONF = "/etc/init.d/{}" + + +def service_running(service_name, **kwargs): + """Determine whether a system service is running. + + :param service_name: the name of the service + :param **kwargs: additional args to pass to the service command. This is + used to pass additional key=value arguments to the + service command line for managing specific instance + units (e.g. service ceph-osd status id=2). The kwargs + are ignored in systemd services. + """ + if init_is_systemd(): + return service('is-active', service_name) + else: + if os.path.exists(_UPSTART_CONF.format(service_name)): + try: + cmd = ['status', service_name] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: + return False + else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running + # 'start/running' + if ("start/running" in output or + "is running" in output or + "up and running" in output): + return True + elif os.path.exists(_INIT_D_CONF.format(service_name)): + # Check System V scripts init script return codes + return service('status', service_name) + return False + + +SYSTEMD_SYSTEM = '/run/systemd/system' + + +def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" + return os.path.isdir(SYSTEMD_SYSTEM) + + +def adduser(username, password=None, shell='/bin/bash', + system_user=False, primary_group=None, + secondary_groups=None, uid=None, home_dir=None): + """Add a user to the system. + + Will log but otherwise succeed if the user already exists. + + :param str username: Username to create + :param str password: Password for user; if ``None``, create a system user + :param str shell: The default shell for the user + :param bool system_user: Whether to create a login or system user + :param str primary_group: Primary group for user; defaults to username + :param list secondary_groups: Optional list of additional groups + :param int uid: UID for user being created + :param str home_dir: Home directory for user + + :returns: The password database entry struct, as returned by `pwd.getpwnam` + """ + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + if uid: + user_info = pwd.getpwuid(int(uid)) + log('user with uid {0} already exists!'.format(uid)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if uid: + cmd.extend(['--uid', str(uid)]) + if home_dir: + cmd.extend(['--home', str(home_dir)]) + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + if not primary_group: + try: + grp.getgrnam(username) + primary_group = username # avoid "group exists" error + except KeyError: + pass + if primary_group: + cmd.extend(['-g', primary_group]) + if secondary_groups: + cmd.extend(['-G', ','.join(secondary_groups)]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + +def uid_exists(uid): + """Check if a uid exists""" + try: + pwd.getpwuid(uid) + uid_exists = True + except KeyError: + uid_exists = False + return uid_exists + + +def group_exists(groupname): + """Check if a group exists""" + try: + grp.getgrnam(groupname) + group_exists = True + except KeyError: + group_exists = False + return group_exists + + +def gid_exists(gid): + """Check if a gid exists""" + try: + grp.getgrgid(gid) + gid_exists = True + except KeyError: + gid_exists = False + return gid_exists + + +def add_group(group_name, system_group=False, gid=None): + """Add a group to the system + + Will log but otherwise succeed if the group already exists. + + :param str group_name: group to create + :param bool system_group: Create system group + :param int gid: GID for user being created + + :returns: The password database entry struct, as returned by `grp.getgrnam` + """ + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + if gid: + group_info = grp.getgrgid(gid) + log('group with gid {0} already exists!'.format(gid)) + except KeyError: + log('creating group {0}'.format(group_name)) + add_new_group(group_name, system_group, gid) + group_info = grp.getgrnam(group_name) + return group_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = ['gpasswd', '-a', username, group] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): + """Replicate the contents of a path""" + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + if timeout: + cmd = ['timeout', str(timeout)] + cmd + cmd.extend(options) + cmd.append(from_path) + cmd.append(to_path) + log(" ".join(cmd)) + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() + + +def symlink(source, destination): + """Create a symbolic link""" + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source, + destination, + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0o555, force=False): + """Create a directory""" + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + realpath = os.path.abspath(path) + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + os.makedirs(realpath, perms) + elif not path_exists: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + os.chmod(realpath, perms) + + +def write_file(path, content, owner='root', group='root', perms=0o444): + """Create or overwrite a file with the contents of a byte string.""" + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + + +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab""" + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file""" + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): + """Mount a filesystem at a particular mountpoint""" + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + + if persist: + return fstab_add(device, mountpoint, filesystem, options=options) + return True + + +def umount(mountpoint, persist=False): + """Unmount a filesystem""" + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + + if persist: + return fstab_remove(mountpoint) + return True + + +def mounts(): + """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + +def file_hash(path, hash_type='md5'): + """Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ + if os.path.exists(path): + h = getattr(hashlib, hash_type)() + with open(path, 'rb') as source: + h.update(source.read()) + return h.hexdigest() + else: + return None + + +def path_hash(path): + """Generate a hash checksum of all files matching 'path'. Standard + wildcards like '*' and '?' are supported, see documentation for the 'glob' + module for more information. + + :return: dict: A { filename: hash } dictionary for all matched files. + Empty if none found. + """ + return { + filename: file_hash(filename) + for filename in glob.iglob(path) + } + + +def check_hash(path, checksum, hash_type='md5'): + """Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + """A class derived from Value error to indicate the checksum failed.""" + pass + + +def restart_on_change(restart_map, stopstart=False, restart_functions=None): + """Restart services based on configuration files changing + + This function is used a decorator, for example:: + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + '/etc/apache/sites-enabled/*': [ 'apache2' ] + }) + def config_changed(): + pass # your code here + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. The apache2 service would be + restarted if any file matching the pattern got changed, created + or removed. Standard wildcards are supported, see documentation + for the 'glob' module for more information. + + @param restart_map: {path_file_name: [service_name, ...] + @param stopstart: DEFAULT false; whether to stop, start OR restart + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result from decorated function + """ + def wrap(f): + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) + return wrapped_f + return wrap + + +def restart_on_change_helper(lambda_f, restart_map, stopstart=False, + restart_functions=None): + """Helper function to perform the restart_on_change function. + + This is provided for decorators to restart services if files described + in the restart_map have changed after an invocation of lambda_f(). + + @param lambda_f: function to call. + @param restart_map: {file: [service, ...]} + @param stopstart: whether to stop, start or restart a service + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result of lambda_f() + """ + if restart_functions is None: + restart_functions = {} + checksums = {path: path_hash(path) for path in restart_map} + r = lambda_f() + # create a list of lists of the services to restart + restarts = [restart_map[path] + for path in restart_map + if path_hash(path) != checksums[path]] + # create a flat list of ordered services without duplicates from lists + services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) + if services_list: + actions = ('stop', 'start') if stopstart else ('restart',) + for service_name in services_list: + if service_name in restart_functions: + restart_functions[service_name](service_name) + else: + for action in actions: + service(action, service_name) + return r + + +def pwgen(length=None): + """Generate a random pasword.""" + if length is None: + # A random length is ok to use a weak PRNG + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.ascii_letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the + # actual password + random_generator = random.SystemRandom() + random_chars = [ + random_generator.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) + + +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): + """Return a list of nics of given type(s)""" + if isinstance(nic_type, six.string_types): + int_types = [nic_type] + else: + int_types = nic_type + + interfaces = [] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile('^[0-9]+:\s+(.+):') + for line in ip_output: + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) + + return interfaces + + +def set_nic_mtu(nic, mtu): + """Set the Maximum Transmission Unit (MTU) on a network interface.""" + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + """Return the Maximum Transmission Unit (MTU) for a network interface.""" + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + """Return the Media Access Control (MAC) for a network interface.""" + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr + + +@contextmanager +def chdir(directory): + """Change the current working directory to a different directory for a code + block and return the previous directory after the block exits. Useful to + run commands from a specificed directory. + + :param str directory: The directory path to change to for this context. + """ + cur = os.getcwd() + try: + yield os.chdir(directory) + finally: + os.chdir(cur) + + +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + :param bool follow_links: Also follow and chown links if True + :param bool chowntopdir: Also chown path itself if True + """ + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + if follow_links: + chown = os.chown + else: + chown = os.lchown + + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) + for root, dirs, files in os.walk(path, followlinks=follow_links): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + chown(full, uid, gid) + + +def lchownr(path, owner, group): + """Recursively change user and group ownership of files and directories + in a given path, not following symbolic links. See the documentation for + 'os.lchown' for more information. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + """ + chownr(path, owner, group, follow_links=False) + + +def owner(path): + """Returns a tuple containing the username & groupname owning the path. + + :param str path: the string path to retrieve the ownership + :return tuple(str, str): A (username, groupname) tuple containing the + name of the user and group owning the path. + :raises OSError: if the specified path does not exist + """ + stat = os.stat(path) + username = pwd.getpwuid(stat.st_uid)[0] + groupname = grp.getgrgid(stat.st_gid)[0] + return username, groupname + + +def get_total_ram(): + """The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + """ + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() + + +UPSTART_CONTAINER_TYPE = '/run/container_type' + + +def is_container(): + """Determine whether unit is running in a container + + @return: boolean indicating if unit is in a container + """ + if init_is_systemd(): + # Detect using systemd-detect-virt + return subprocess.call(['systemd-detect-virt', + '--container']) == 0 + else: + # Detect using upstart container file marker + return os.path.exists(UPSTART_CONTAINER_TYPE) + + +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + with open(updatedb_path, 'r+') as f_id: + updatedb_text = f_id.read() + output = updatedb(updatedb_text, path) + f_id.seek(0) + f_id.write(output) + f_id.truncate() + + +def updatedb(updatedb_text, new_path): + lines = [line for line in updatedb_text.split("\n")] + for i, line in enumerate(lines): + if line.startswith("PRUNEPATHS="): + paths_line = line.split("=")[1].replace('"', '') + paths = paths_line.split(" ") + if new_path not in paths: + paths.append(new_path) + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) + output = "\n".join(lines) + return output diff --git a/ceph-radosgw/tests/charmhelpers/core/host_factory/__init__.py b/ceph-radosgw/tests/charmhelpers/core/host_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py b/ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py new file mode 100644 index 00000000..902d469f --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py @@ -0,0 +1,56 @@ +import subprocess +import yum +import os + + +def service_available(service_name): + # """Determine whether a system service is available.""" + if os.path.isdir('/run/systemd/system'): + cmd = ['systemctl', 'is-enabled', service_name] + else: + cmd = ['service', service_name, 'is-enabled'] + return subprocess.call(cmd) == 0 + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['groupadd'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('-r') + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/os-release in a dict.""" + d = {} + with open('/etc/os-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports YumBase function if the pkgcache argument + is None. + """ + if not pkgcache: + y = yum.YumBase() + packages = y.doPackageLists() + pkgcache = {i.Name: i.version for i in packages['installed']} + pkg = pkgcache[package] + if pkg > revno: + return 1 + if pkg < revno: + return -1 + return 0 diff --git a/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py new file mode 100644 index 00000000..8c66af55 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py @@ -0,0 +1,56 @@ +import subprocess + + +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return b'unrecognized service' not in e.output + else: + return True + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. + """ + import apt_pkg + if not pkgcache: + from charmhelpers.fetch import apt_cache + pkgcache = apt_cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-radosgw/tests/charmhelpers/core/hugepage.py b/ceph-radosgw/tests/charmhelpers/core/hugepage.py new file mode 100644 index 00000000..54b5b5e2 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/hugepage.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True, set_shmmax=False): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) diff --git a/ceph-radosgw/tests/charmhelpers/core/kernel.py b/ceph-radosgw/tests/charmhelpers/core/kernel.py new file mode 100644 index 00000000..2d404528 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/kernel.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import subprocess + +from charmhelpers.osplatform import get_platform +from charmhelpers.core.hookenv import ( + log, + INFO +) + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.kernel_factory.ubuntu import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.kernel_factory.centos import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import + +__author__ = "Jorge Niedbalski " + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + subprocess.check_call(cmd) + if persist: + persistent_modprobe(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return subprocess.check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return subprocess.check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 diff --git a/ceph-radosgw/tests/charmhelpers/core/kernel_factory/__init__.py b/ceph-radosgw/tests/charmhelpers/core/kernel_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/tests/charmhelpers/core/kernel_factory/centos.py b/ceph-radosgw/tests/charmhelpers/core/kernel_factory/centos.py new file mode 100644 index 00000000..1c402c11 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/kernel_factory/centos.py @@ -0,0 +1,17 @@ +import subprocess +import os + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + if not os.path.exists('/etc/rc.modules'): + open('/etc/rc.modules', 'a') + os.chmod('/etc/rc.modules', 111) + with open('/etc/rc.modules', 'r+') as modules: + if module not in modules.read(): + modules.write('modprobe %s\n' % module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["dracut", "-f", version]) diff --git a/ceph-radosgw/tests/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-radosgw/tests/charmhelpers/core/kernel_factory/ubuntu.py new file mode 100644 index 00000000..3de372fd --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/kernel_factory/ubuntu.py @@ -0,0 +1,13 @@ +import subprocess + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module + "\n") + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-radosgw/tests/charmhelpers/core/services/__init__.py b/ceph-radosgw/tests/charmhelpers/core/services/__init__.py new file mode 100644 index 00000000..61fd074e --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/services/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/ceph-radosgw/tests/charmhelpers/core/services/base.py b/ceph-radosgw/tests/charmhelpers/core/services/base.py new file mode 100644 index 00000000..ca9dc996 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/services/base.py @@ -0,0 +1,351 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +from inspect import getargspec +from collections import Iterable, OrderedDict + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = OrderedDict() + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hookenv._run_atstart() + try: + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.reconfigure_services() + self.provide_data() + except SystemExit as x: + if x.code is None or x.code == 0: + hookenv._run_atexit() + hookenv._run_atexit() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + + The `provide_data()` method can optionally accept two parameters: + + * ``remote_service`` The name of the remote service that the data will + be provided to. The `provide_data()` method will be called once + for each connected service (not unit). This allows the method to + tailor its data to the given service. + * ``service_ready`` Whether or not the service definition had all of + its requirements met, and thus the ``data_ready`` callbacks run. + + Note that the ``provided_data`` methods are now called **after** the + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks + a chance to generate any data necessary for the providing to the remote + services. + """ + for service_name, service in self.services.items(): + service_ready = self.is_ready(service_name) + for provider in service.get('provided_data', []): + for relid in hookenv.relation_ids(provider.name): + units = hookenv.related_units(relid) + if not units: + continue + remote_service = units[0].split('/')[0] + argspec = getargspec(provider.provide_data) + if len(argspec.args) > 1: + data = provider.provide_data(remote_service, service_ready) + else: + data = provider.provide_data() + if data: + hookenv.relation_set(relid, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-radosgw/tests/charmhelpers/core/services/helpers.py b/ceph-radosgw/tests/charmhelpers/core/services/helpers.py new file mode 100644 index 00000000..3e6e30d2 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/services/helpers.py @@ -0,0 +1,290 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import yaml + +from charmhelpers.core import hookenv +from charmhelpers.core import host +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = None + interface = None + + def __init__(self, name=None, additional_required_keys=None): + if not hasattr(self, 'required_keys'): + self.required_keys = [] + + if name is not None: + self.name = name + if additional_required_keys: + self.required_keys.extend(additional_required_keys) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'user', 'password', 'database'] + RelationContext.__init__(self, *args, **kwargs) + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'port'] + RelationContext.__init__(self, *args, **kwargs) + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0o600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` + + :param str target: The target to write the rendered template to (or None) + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file + :param partial on_change_action: functools partial to be executed when + rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader + + :return str: The rendered template + """ + def __init__(self, source, target, + owner='root', group='root', perms=0o444, + on_change_action=None, template_loader=None): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + self.on_change_action = on_change_action + self.template_loader = template_loader + + def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) + service = manager.get_service(service_name) + context = {'ctx': {}} + for ctx in service.get('required_data', []): + context.update(ctx) + context['ctx'].update(ctx) + + result = templating.render(self.source, self.target, context, + self.owner, self.group, self.perms, + template_loader=self.template_loader) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() + + return result + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/ceph-radosgw/tests/charmhelpers/core/strutils.py b/ceph-radosgw/tests/charmhelpers/core/strutils.py new file mode 100644 index 00000000..dd9b9717 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/strutils.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import re + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't', 'on']: + return True + elif value in ['n', 'no', 'false', 'f', 'off']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if not matches: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/ceph-radosgw/tests/charmhelpers/core/sysctl.py b/ceph-radosgw/tests/charmhelpers/core/sysctl.py new file mode 100644 index 00000000..6e413e31 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/sysctl.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, +) + +__author__ = 'Jorge Niedbalski R. ' + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict_parsed.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-radosgw/tests/charmhelpers/core/templating.py b/ceph-radosgw/tests/charmhelpers/core/templating.py new file mode 100644 index 00000000..7b801a34 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/templating.py @@ -0,0 +1,84 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. It can also be `None`, in which + case no file will be written. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + The rendered template will be written to the file as well as being returned + as a string. + + Note: Using this requires python-jinja2 or python3-jinja2; if it is not + installed, calling this will attempt to use charmhelpers.fetch.apt_install + to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + if sys.version_info.major == 2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + if target is not None: + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) + return content diff --git a/ceph-radosgw/tests/charmhelpers/core/unitdata.py b/ceph-radosgw/tests/charmhelpers/core/unitdata.py new file mode 100644 index 00000000..54ec969f --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/core/unitdata.py @@ -0,0 +1,518 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import itertools +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are not persisted unless :meth:`flush` is called. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def get(self, key, default=None, record=False): + self.cursor.execute('select data from kv where key=?', [key]) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) + result = self.cursor.fetchall() + + if not result: + return {} + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + """ + Remove a key from the database entirely. + """ + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ + serialized = json.dumps(value) + + self.cursor.execute('select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', dict(data['env'])) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV From 610ab142a838006c6a55c6decc725a0ac25c6c37 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 14 Feb 2017 11:44:28 -0800 Subject: [PATCH 1277/2699] Pre-release charm-helpers sync 17.02 Get each charm up to date with lp:charm-helpers for release testing. Change-Id: Ia20542863ece59ee93d9883af83250b0d3208b60 --- ceph-osd/charm-helpers-tests.yaml | 1 + .../hooks/charmhelpers/contrib/network/ip.py | 2 +- .../contrib/storage/linux/ceph.py | 16 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 31 + ceph-osd/hooks/charmhelpers/core/host.py | 194 ++- ceph-osd/hooks/charmhelpers/osplatform.py | 6 + .../contrib/openstack/amulet/utils.py | 107 +- ceph-osd/tests/charmhelpers/core/__init__.py | 13 + .../tests/charmhelpers/core/decorators.py | 55 + ceph-osd/tests/charmhelpers/core/files.py | 43 + ceph-osd/tests/charmhelpers/core/fstab.py | 132 ++ ceph-osd/tests/charmhelpers/core/hookenv.py | 1068 +++++++++++++++++ ceph-osd/tests/charmhelpers/core/host.py | 918 ++++++++++++++ .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 56 + .../charmhelpers/core/host_factory/ubuntu.py | 56 + ceph-osd/tests/charmhelpers/core/hugepage.py | 69 ++ ceph-osd/tests/charmhelpers/core/kernel.py | 72 ++ .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 + .../core/kernel_factory/ubuntu.py | 13 + .../charmhelpers/core/services/__init__.py | 16 + .../tests/charmhelpers/core/services/base.py | 351 ++++++ .../charmhelpers/core/services/helpers.py | 290 +++++ ceph-osd/tests/charmhelpers/core/strutils.py | 70 ++ ceph-osd/tests/charmhelpers/core/sysctl.py | 54 + .../tests/charmhelpers/core/templating.py | 84 ++ ceph-osd/tests/charmhelpers/core/unitdata.py | 518 ++++++++ 28 files changed, 4205 insertions(+), 47 deletions(-) create mode 100644 ceph-osd/tests/charmhelpers/core/__init__.py create mode 100644 ceph-osd/tests/charmhelpers/core/decorators.py create mode 100644 ceph-osd/tests/charmhelpers/core/files.py create mode 100644 ceph-osd/tests/charmhelpers/core/fstab.py create mode 100644 ceph-osd/tests/charmhelpers/core/hookenv.py create mode 100644 ceph-osd/tests/charmhelpers/core/host.py create mode 100644 ceph-osd/tests/charmhelpers/core/host_factory/__init__.py create mode 100644 ceph-osd/tests/charmhelpers/core/host_factory/centos.py create mode 100644 ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py create mode 100644 ceph-osd/tests/charmhelpers/core/hugepage.py create mode 100644 ceph-osd/tests/charmhelpers/core/kernel.py create mode 100644 ceph-osd/tests/charmhelpers/core/kernel_factory/__init__.py create mode 100644 ceph-osd/tests/charmhelpers/core/kernel_factory/centos.py create mode 100644 ceph-osd/tests/charmhelpers/core/kernel_factory/ubuntu.py create mode 100644 ceph-osd/tests/charmhelpers/core/services/__init__.py create mode 100644 ceph-osd/tests/charmhelpers/core/services/base.py create mode 100644 ceph-osd/tests/charmhelpers/core/services/helpers.py create mode 100644 ceph-osd/tests/charmhelpers/core/strutils.py create mode 100644 ceph-osd/tests/charmhelpers/core/sysctl.py create mode 100644 ceph-osd/tests/charmhelpers/core/templating.py create mode 100644 ceph-osd/tests/charmhelpers/core/unitdata.py diff --git a/ceph-osd/charm-helpers-tests.yaml b/ceph-osd/charm-helpers-tests.yaml index 48b12f6f..e5063253 100644 --- a/ceph-osd/charm-helpers-tests.yaml +++ b/ceph-osd/charm-helpers-tests.yaml @@ -3,3 +3,4 @@ destination: tests/charmhelpers include: - contrib.amulet - contrib.openstack.amulet + - core diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index f2f7dfbc..e141fc12 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -426,7 +426,7 @@ def ns_query(address): try: answers = dns.resolver.query(address, rtype) - except dns.resolver.NXDOMAIN as e: + except dns.resolver.NXDOMAIN: return None if answers: diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index edb536c7..ae7f3f93 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -40,6 +40,7 @@ ) from charmhelpers.core.hookenv import ( config, + service_name, local_unit, relation_get, relation_ids, @@ -1043,8 +1044,18 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] + def add_op_request_access_to_group(self, name, namespace=None, + permission=None, key_name=None): + """ + Adds the requested permissions to the current service's Ceph key, + allowing the key to access only the specified pools + """ + self.ops.append({'op': 'add-permissions-to-key', 'group': name, + 'namespace': namespace, 'name': key_name or service_name(), + 'group-permission': permission}) + def add_op_create_pool(self, name, replica_count=3, pg_num=None, - weight=None): + weight=None, group=None, namespace=None): """Adds an operation to create a pool. @param pg_num setting: optional setting. If not provided, this value @@ -1058,7 +1069,8 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight}) + 'weight': weight, 'group': group, + 'group-namespace': namespace}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index d1cb68db..e44e22bf 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -1035,3 +1035,34 @@ def network_get_primary_address(binding): ''' cmd = ['network-get', '--primary-address', binding] return subprocess.check_output(cmd).decode('UTF-8').strip() + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 3638e65e..edbb72ff 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -56,37 +56,136 @@ UPDATEDB_PATH = '/etc/updatedb.conf' -def service_start(service_name): - """Start a system service""" - return service('start', service_name) +def service_start(service_name, **kwargs): + """Start a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('start', service_name, **kwargs) + + +def service_stop(service_name, **kwargs): + """Stop a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('stop', service_name, **kwargs) + + +def service_restart(service_name, **kwargs): + """Restart a system service. -def service_stop(service_name): - """Stop a system service""" - return service('stop', service_name) + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: -def service_restart(service_name): - """Restart a system service""" + service_restart('ceph-osd', id=4) + + :param service_name: the name of the service to restart + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ return service('restart', service_name) -def service_reload(service_name, restart_on_failure=False): +def service_reload(service_name, restart_on_failure=False, **kwargs): """Reload a system service, optionally falling back to restart if - reload fails""" - service_result = service('reload', service_name) + reload fails. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_reload('ceph-osd', id=4) + + :param service_name: the name of the service to reload + :param restart_on_failure: boolean indicating whether to fallback to a + restart if the reload fails. + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + service_result = service('reload', service_name, **kwargs) if not service_result and restart_on_failure: - service_result = service('restart', service_name) + service_result = service('restart', service_name, **kwargs) return service_result -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", + **kwargs): """Pause a system service. - Stop it, and prevent it from starting again at boot.""" + Stop it, and prevent it from starting again at boot. + + :param service_name: the name of the service to pause + :param init_dir: path to the upstart init directory + :param initd_dir: path to the sysv init directory + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems which do not support + key=value arguments via the commandline. + """ stopped = True - if service_running(service_name): - stopped = service_stop(service_name) + if service_running(service_name, **kwargs): + stopped = service_stop(service_name, **kwargs) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): @@ -107,10 +206,19 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): def service_resume(service_name, init_dir="/etc/init", - initd_dir="/etc/init.d"): + initd_dir="/etc/init.d", **kwargs): """Resume a system service. - Reenable starting again at boot. Start the service""" + Reenable starting again at boot. Start the service. + + :param service_name: the name of the service to resume + :param init_dir: the path to the init dir + :param initd dir: the path to the initd dir + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): @@ -127,19 +235,28 @@ def service_resume(service_name, init_dir="/etc/init", "Unable to detect {0} as SystemD, Upstart {1} or" " SysV {2}".format( service_name, upstart_file, sysv_file)) + started = service_running(service_name, **kwargs) - started = service_running(service_name) if not started: - started = service_start(service_name) + started = service_start(service_name, **kwargs) return started -def service(action, service_name): - """Control a system service""" +def service(action, service_name, **kwargs): + """Control a system service. + + :param action: the action to take on the service + :param service_name: the name of the service to perform th action on + :param **kwargs: additional params to be passed to the service command in + the form of key=value. + """ if init_is_systemd(): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) return subprocess.call(cmd) == 0 @@ -147,15 +264,26 @@ def service(action, service_name): _INIT_D_CONF = "/etc/init.d/{}" -def service_running(service_name): - """Determine whether a system service is running""" +def service_running(service_name, **kwargs): + """Determine whether a system service is running. + + :param service_name: the name of the service + :param **kwargs: additional args to pass to the service command. This is + used to pass additional key=value arguments to the + service command line for managing specific instance + units (e.g. service ceph-osd status id=2). The kwargs + are ignored in systemd services. + """ if init_is_systemd(): return service('is-active', service_name) else: if os.path.exists(_UPSTART_CONF.format(service_name)): try: - output = subprocess.check_output( - ['status', service_name], + cmd = ['status', service_name] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False @@ -721,6 +849,20 @@ def lchownr(path, owner, group): chownr(path, owner, group, follow_links=False) +def owner(path): + """Returns a tuple containing the username & groupname owning the path. + + :param str path: the string path to retrieve the ownership + :return tuple(str, str): A (username, groupname) tuple containing the + name of the user and group owning the path. + :raises OSError: if the specified path does not exist + """ + stat = os.stat(path) + username = pwd.getpwuid(stat.st_uid)[0] + groupname = grp.getgrgid(stat.st_gid)[0] + return username, groupname + + def get_total_ram(): """The total amount of system RAM in bytes. diff --git a/ceph-osd/hooks/charmhelpers/osplatform.py b/ceph-osd/hooks/charmhelpers/osplatform.py index ea490bbd..d9a4d5c0 100644 --- a/ceph-osd/hooks/charmhelpers/osplatform.py +++ b/ceph-osd/hooks/charmhelpers/osplatform.py @@ -8,12 +8,18 @@ def get_platform(): will be returned (which is the name of the module). This string is used to decide which platform module should be imported. """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warings *not* disabled, as we certainly need to fix this. tuple_platform = platform.linux_distribution() current_platform = tuple_platform[0] if "Ubuntu" in current_platform: return "ubuntu" elif "CentOS" in current_platform: return "centos" + elif "debian" in current_platform: + # Stock Python does not detect Ubuntu and instead returns debian. + # Or at least it does in some build environments like Travis CI + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index 2b0a562e..401c0328 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -20,6 +20,7 @@ import six import time import urllib +import urlparse import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client @@ -37,6 +38,7 @@ from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) +from charmhelpers.core.decorators import retry_on_exception DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -303,6 +305,46 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(5, base_delay=10) + def keystone_wait_for_propagation(self, sentry_relation_pairs, + api_version): + """Iterate over list of sentry and relation tuples and verify that + api_version has the expected value. + + :param sentry_relation_pairs: list of sentry, relation name tuples used + for monitoring propagation of relation + data + :param api_version: api_version to expect in relation data + :returns: None if successful. Raise on error. + """ + for (sentry, relation_name) in sentry_relation_pairs: + rel = sentry.relation('identity-service', + relation_name) + self.log.debug('keystone relation data: {}'.format(rel)) + if rel['api_version'] != str(api_version): + raise Exception("api_version not propagated through relation" + " data yet ('{}' != '{}')." + "".format(rel['api_version'], api_version)) + + def keystone_configure_api_version(self, sentry_relation_pairs, deployment, + api_version): + """Configure preferred-api-version of keystone in deployment and + monitor provided list of relation objects for propagation + before returning to caller. + + :param sentry_relation_pairs: list of sentry, relation tuples used for + monitoring propagation of relation data + :param deployment: deployment to configure + :param api_version: value preferred-api-version will be set to + :returns: None if successful. Raise on error. + """ + self.log.debug("Setting keystone preferred-api-version: '{}'" + "".format(api_version)) + + config = {'preferred-api-version': api_version} + deployment.d.configure('keystone', config) + self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) + def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" @@ -311,6 +353,37 @@ def authenticate_cinder_admin(self, keystone_sentry, username, ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) return cinder_client.Client(username, password, tenant, ept) + def authenticate_keystone(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Authenticate with Keystone""" + self.log.debug('Authenticating with keystone...') + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=username, password=password, + tenant_name=project_name, + auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name=user_domain_name, + username=username, + password=password, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + auth_url=ep + ) + return keystone_client_v3.Client( + session=keystone_session.Session(auth=auth) + ) + def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, keystone_ip=None): @@ -319,30 +392,28 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, if not keystone_ip: keystone_ip = keystone_sentry.info['public-address'] - base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) - else: - ep = base_ep + "/v3" - auth = keystone_id_v3.Password( - user_domain_name='admin_domain', - username=user, - password=password, - domain_name='admin_domain', - auth_url=ep, - ) - sess = keystone_session.Session(auth=auth) - return keystone_client_v3.Client(session=sess) + user_domain_name = None + domain_name = None + if api_version == 3: + user_domain_name = 'admin_domain' + domain_name = user_domain_name + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant, + api_version=api_version, + user_domain_name=user_domain_name, + domain_name=domain_name, + admin_port=True) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) + keystone_ip = urlparse.urlparse(ep).hostname + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant) def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" diff --git a/ceph-osd/tests/charmhelpers/core/__init__.py b/ceph-osd/tests/charmhelpers/core/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/tests/charmhelpers/core/decorators.py b/ceph-osd/tests/charmhelpers/core/decorators.py new file mode 100644 index 00000000..6ad41ee4 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/decorators.py @@ -0,0 +1,55 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 diff --git a/ceph-osd/tests/charmhelpers/core/files.py b/ceph-osd/tests/charmhelpers/core/files.py new file mode 100644 index 00000000..fdd82b75 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/files.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/ceph-osd/tests/charmhelpers/core/fstab.py b/ceph-osd/tests/charmhelpers/core/fstab.py new file mode 100644 index 00000000..d9fa9152 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/fstab.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import os + +__author__ = 'Jorge Niedbalski R. ' + + +class Fstab(io.FileIO): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = int(d) + self.p = int(p) + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + super(Fstab, self).__init__(self._path, 'rb+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + line = line.decode('us-ascii') + try: + if line.strip() and not line.strip().startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write((str(entry) + '\n').encode('us-ascii')) + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = [l.decode('us-ascii') for l in self.readlines()] + + found = False + for index, line in enumerate(lines): + if line.strip() and not line.strip().startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines).encode('us-ascii')) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/ceph-osd/tests/charmhelpers/core/hookenv.py b/ceph-osd/tests/charmhelpers/core/hookenv.py new file mode 100644 index 00000000..e44e22bf --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/hookenv.py @@ -0,0 +1,1068 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +from __future__ import print_function +import copy +from distutils.version import LooseVersion +from functools import wraps +import glob +import os +import json +import yaml +import subprocess +import sys +import errno +import tempfile +from subprocess import CalledProcessError + +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + """Cache return values for multiple executions of func + args + + For example:: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + """ + @wraps(func) + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res + wrapper._wrapped = func + return wrapper + + +def flush(key): + """Flushes any entries from function cache where the + key is found in the function+args """ + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + """Write a message to the juju log""" + command = ['juju-log'] + if level: + command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message] + # Missing juju-log should not cause failures in unit tests + # Send log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + if level: + message = "{}: {}".format(level, message) + message = "juju-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + +class Serializable(UserDict): + """Wrapper, an object that can be serialized to yaml or json""" + + def __init__(self, obj): + # wrap the object + UserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + """Serialize the object to json""" + return json.dumps(self.data) + + def yaml(self): + """Serialize the object to yaml""" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + """Determine whether we're running in a relation hook""" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + """The scope for the current relation hook""" + return os.environ.get('JUJU_RELATION', None) + + +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') + + +def local_unit(): + """Local unit ID""" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + """The remote unit for the current relation hook""" + return os.environ.get('JUJU_REMOTE_UNIT', None) + + +def service_name(): + """The name service group this unit belongs to""" + return local_unit().split('/')[0] + + +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + +def hook_name(): + """The name of the currently executing hook""" + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) + + +class Config(dict): + """A dictionary representation of the charm's config.yaml, with some + extra features: + + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. + + Example usage:: + + >>> # inside a hook + >>> from charmhelpers.core import hookenv + >>> config = hookenv.config() + >>> config['foo'] + 'bar' + >>> # store a new key/value for later use + >>> config['mykey'] = 'myval' + + + >>> # user runs `juju set mycharm foo=baz` + >>> # now we're inside subsequent config-changed hook + >>> config = hookenv.config() + >>> config['foo'] + 'baz' + >>> # test to see if this val has changed since last hook + >>> config.changed('foo') + True + >>> # what was the previous value? + >>> config.previous('foo') + 'bar' + >>> # keys/values that we add are preserved across hooks + >>> config['mykey'] + 'myval' + + """ + CONFIG_FILE_NAME = '.juju-persistent-config' + + def __init__(self, *args, **kw): + super(Config, self).__init__(*args, **kw) + self.implicit_save = True + self._prev_dict = None + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) + if os.path.exists(self.path): + self.load_previous() + atexit(self._implicit_save) + + def load_previous(self, path=None): + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. + + :param path: + + File path from which to load the previous config. If `None`, + config is loaded from the default location. If `path` is + specified, subsequent `save()` calls will write to the same + path. + + """ + self.path = path or self.path + with open(self.path) as f: + self._prev_dict = json.load(f) + for k, v in copy.deepcopy(self._prev_dict).items(): + if k not in self: + self[k] = v + + def changed(self, key): + """Return True if the current value for this key is different from + the previous value. + + """ + if self._prev_dict is None: + return True + return self.previous(key) != self.get(key) + + def previous(self, key): + """Return previous value for this key, or None if there + is no previous value. + + """ + if self._prev_dict: + return self._prev_dict.get(key) + return None + + def save(self): + """Save this config to disk. + + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. + + """ + with open(self.path, 'w') as f: + json.dump(self, f) + + def _implicit_save(self): + if self.implicit_save: + self.save() + + +@cached +def config(scope=None): + """Juju charm configuration""" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + else: + config_cmd_line.append('--all') + config_cmd_line.append('--format=json') + try: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + if scope is not None: + return config_data + return Config(config_data) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + """Get relation information""" + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except CalledProcessError as e: + if e.returncode == 2: + return None + raise + + +def relation_set(relation_id=None, relation_settings=None, **kwargs): + """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} + relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + settings = relation_settings.copy() + settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) + if accepts_file: + # --file was introduced in Juju 1.23.2. Use it by default if + # available, since otherwise we'll break if the relation data is + # too big. Ideally we should tell relation-set to read the data from + # stdin, but that feature is broken in 1.23.2: Bug #1454678. + with tempfile.NamedTemporaryFile(delete=False) as settings_file: + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) + subprocess.check_call( + relation_cmd_line + ["--file", settings_file.name]) + os.remove(settings_file.name) + else: + for key, value in settings.items(): + if value is None: + relation_cmd_line.append('{}='.format(key)) + else: + relation_cmd_line.append('{}={}'.format(key, value)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + +@cached +def relation_ids(reltype=None): + """A list of relation_ids""" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] + return [] + + +@cached +def related_units(relid=None): + """A list of related units""" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] + + +@cached +def relation_for_unit(unit=None, rid=None): + """Get the json represenation of a unit's relation""" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + """Get relations of a specific relation ID""" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + """Get relations of a specific type""" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + +@cached +def relation_types(): + """Get a list of relation types supported by this charm""" + rel_types = [] + md = metadata() + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + return rel_types + + +@cached +def peer_relation_id(): + '''Get the peers relation id if a peers relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peers'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peers``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peers'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + +@cached +def relations(): + """Get a nested dictionary of relation data for all related units""" + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +@cached +def is_relation_made(relation, keys='private-address'): + ''' + Determine whether a relation is established by checking for + presence of key(s). If a list of keys is provided, they + must all be present for the relation to be identified as made + ''' + if isinstance(keys, str): + keys = [keys] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + context = {} + for k in keys: + context[k] = relation_get(k, rid=r_id, + unit=unit) + if None not in context.values(): + return True + return False + + +def open_port(port, protocol="TCP"): + """Open a service network port""" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + """Close a service network port""" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + """Get the unit ID for the remote unit""" + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + +def unit_private_ip(): + """Get this unit's private IP address""" + return unit_get('private-address') + + +@cached +def storage_get(attribute=None, storage_id=None): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=None): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + +class UnregisteredHookError(Exception): + """Raised when an undefined hook is called""" + pass + + +class Hooks(object): + """A convenient handler for hook functions. + + Example:: + + hooks = Hooks() + + # register a hook, taking its name from the function name + @hooks.hook() + def install(): + pass # your code here + + # register a hook, providing a custom hook name + @hooks.hook("config-changed") + def config_changed(): + pass # your code here + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + + def __init__(self, config_save=None): + super(Hooks, self).__init__() + self._hooks = {} + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save + + def register(self, name, function): + """Register a hook""" + self._hooks[name] = function + + def execute(self, args): + """Execute a registered hook based on args[0]""" + _run_atstart() + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + """Decorator, registering them as hooks""" + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +def charm_dir(): + """Return the root directory of the current charm""" + return os.environ.get('CHARM_DIR') + + +@cached +def action_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['action-get'] + if key is not None: + cmd.append(key) + cmd.append('--format=json') + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return action_data + + +def action_set(values): + """Sets the values to be returned after the action finishes""" + cmd = ['action-set'] + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def action_fail(message): + """Sets the action status to failed and sets the error message. + + The results set by action_set are preserved.""" + subprocess.check_call(['action-fail', message]) + + +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + +def status_set(workload_state, message): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message unstead. + + workload_state -- valid juju workload state. + message -- status update message + """ + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] + if workload_state not in valid_states: + raise ValueError( + '{!r} is not a valid workload state'.format(workload_state) + ) + cmd = ['status-set', workload_state, message] + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" + + """ + cmd = ['status-get', "--format=json", "--include-data"] + try: + raw_status = subprocess.check_output(cmd) + except OSError as e: + if e.errno == errno.ENOENT: + return ('unknown', "") + else: + raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + @wraps(f) + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +def application_version_set(version): + """Charm authors may trigger this command from any hook to output what + version of the application is running. This could be a package version, + for instance postgres version 9.5. It could also be a build number or + version control revision identifier, for instance git sha 6fb7ba68. """ + + cmd = ['application-version-set'] + cmd.append(version) + try: + subprocess.check_call(cmd) + except OSError: + log("Application Version: {}".format(version)) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.items(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_register(ptype, klass, pid): + """ is used while a hook is running to let Juju know that a + payload has been started.""" + cmd = ['payload-register'] + for x in [ptype, klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_unregister(klass, pid): + """ is used while a hook is running to let Juju know + that a payload has been manually stopped. The and provided + must match a payload that has been previously registered with juju using + payload-register.""" + cmd = ['payload-unregister'] + for x in [klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_status_set(klass, pid, status): + """is used to update the current status of a registered payload. + The and provided must match a payload that has been previously + registered with juju using payload-register. The must be one of the + follow: starting, started, stopping, stopped""" + cmd = ['payload-status-set'] + for x in [klass, pid, status]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def resource_get(name): + """used to fetch the resource path of the given name. + + must match a name of defined resource in metadata.yaml + + returns either a path or False if resource not available + """ + if not name: + return False + + cmd = ['resource-get', name] + try: + return subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError: + return False + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +@cached +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get_primary_address(binding): + ''' + Retrieve the primary network address for a named binding + + :param binding: string. The name of a relation of extra-binding + :return: string. The primary IP address for the named binding + :raise: NotImplementedError if run on Juju < 2.0 + ''' + cmd = ['network-get', '--primary-address', binding] + return subprocess.check_output(cmd).decode('UTF-8').strip() + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') diff --git a/ceph-osd/tests/charmhelpers/core/host.py b/ceph-osd/tests/charmhelpers/core/host.py new file mode 100644 index 00000000..edbb72ff --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/host.py @@ -0,0 +1,918 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import os +import re +import pwd +import glob +import grp +import random +import string +import subprocess +import hashlib +import functools +import itertools +import six + +from contextlib import contextmanager +from collections import OrderedDict +from .hookenv import log +from .fstab import Fstab +from charmhelpers.osplatform import get_platform + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.host_factory.ubuntu import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.host_factory.centos import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import + +UPDATEDB_PATH = '/etc/updatedb.conf' + +def service_start(service_name, **kwargs): + """Start a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('start', service_name, **kwargs) + + +def service_stop(service_name, **kwargs): + """Stop a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('stop', service_name, **kwargs) + + +def service_restart(service_name, **kwargs): + """Restart a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_restart('ceph-osd', id=4) + + :param service_name: the name of the service to restart + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + return service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False, **kwargs): + """Reload a system service, optionally falling back to restart if + reload fails. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_reload('ceph-osd', id=4) + + :param service_name: the name of the service to reload + :param restart_on_failure: boolean indicating whether to fallback to a + restart if the reload fails. + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + service_result = service('reload', service_name, **kwargs) + if not service_result and restart_on_failure: + service_result = service('restart', service_name, **kwargs) + return service_result + + +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", + **kwargs): + """Pause a system service. + + Stop it, and prevent it from starting again at boot. + + :param service_name: the name of the service to pause + :param init_dir: path to the upstart init directory + :param initd_dir: path to the sysv init directory + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems which do not support + key=value arguments via the commandline. + """ + stopped = True + if service_running(service_name, **kwargs): + stopped = service_stop(service_name, **kwargs) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('disable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + return stopped + + +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d", **kwargs): + """Resume a system service. + + Reenable starting again at boot. Start the service. + + :param service_name: the name of the service to resume + :param init_dir: the path to the init dir + :param initd dir: the path to the initd dir + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('enable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + started = service_running(service_name, **kwargs) + + if not started: + started = service_start(service_name, **kwargs) + return started + + +def service(action, service_name, **kwargs): + """Control a system service. + + :param action: the action to take on the service + :param service_name: the name of the service to perform th action on + :param **kwargs: additional params to be passed to the service command in + the form of key=value. + """ + if init_is_systemd(): + cmd = ['systemctl', action, service_name] + else: + cmd = ['service', service_name, action] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + return subprocess.call(cmd) == 0 + + +_UPSTART_CONF = "/etc/init/{}.conf" +_INIT_D_CONF = "/etc/init.d/{}" + + +def service_running(service_name, **kwargs): + """Determine whether a system service is running. + + :param service_name: the name of the service + :param **kwargs: additional args to pass to the service command. This is + used to pass additional key=value arguments to the + service command line for managing specific instance + units (e.g. service ceph-osd status id=2). The kwargs + are ignored in systemd services. + """ + if init_is_systemd(): + return service('is-active', service_name) + else: + if os.path.exists(_UPSTART_CONF.format(service_name)): + try: + cmd = ['status', service_name] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: + return False + else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running + # 'start/running' + if ("start/running" in output or + "is running" in output or + "up and running" in output): + return True + elif os.path.exists(_INIT_D_CONF.format(service_name)): + # Check System V scripts init script return codes + return service('status', service_name) + return False + + +SYSTEMD_SYSTEM = '/run/systemd/system' + + +def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" + return os.path.isdir(SYSTEMD_SYSTEM) + + +def adduser(username, password=None, shell='/bin/bash', + system_user=False, primary_group=None, + secondary_groups=None, uid=None, home_dir=None): + """Add a user to the system. + + Will log but otherwise succeed if the user already exists. + + :param str username: Username to create + :param str password: Password for user; if ``None``, create a system user + :param str shell: The default shell for the user + :param bool system_user: Whether to create a login or system user + :param str primary_group: Primary group for user; defaults to username + :param list secondary_groups: Optional list of additional groups + :param int uid: UID for user being created + :param str home_dir: Home directory for user + + :returns: The password database entry struct, as returned by `pwd.getpwnam` + """ + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + if uid: + user_info = pwd.getpwuid(int(uid)) + log('user with uid {0} already exists!'.format(uid)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if uid: + cmd.extend(['--uid', str(uid)]) + if home_dir: + cmd.extend(['--home', str(home_dir)]) + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + if not primary_group: + try: + grp.getgrnam(username) + primary_group = username # avoid "group exists" error + except KeyError: + pass + if primary_group: + cmd.extend(['-g', primary_group]) + if secondary_groups: + cmd.extend(['-G', ','.join(secondary_groups)]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + +def uid_exists(uid): + """Check if a uid exists""" + try: + pwd.getpwuid(uid) + uid_exists = True + except KeyError: + uid_exists = False + return uid_exists + + +def group_exists(groupname): + """Check if a group exists""" + try: + grp.getgrnam(groupname) + group_exists = True + except KeyError: + group_exists = False + return group_exists + + +def gid_exists(gid): + """Check if a gid exists""" + try: + grp.getgrgid(gid) + gid_exists = True + except KeyError: + gid_exists = False + return gid_exists + + +def add_group(group_name, system_group=False, gid=None): + """Add a group to the system + + Will log but otherwise succeed if the group already exists. + + :param str group_name: group to create + :param bool system_group: Create system group + :param int gid: GID for user being created + + :returns: The password database entry struct, as returned by `grp.getgrnam` + """ + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + if gid: + group_info = grp.getgrgid(gid) + log('group with gid {0} already exists!'.format(gid)) + except KeyError: + log('creating group {0}'.format(group_name)) + add_new_group(group_name, system_group, gid) + group_info = grp.getgrnam(group_name) + return group_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = ['gpasswd', '-a', username, group] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): + """Replicate the contents of a path""" + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + if timeout: + cmd = ['timeout', str(timeout)] + cmd + cmd.extend(options) + cmd.append(from_path) + cmd.append(to_path) + log(" ".join(cmd)) + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() + + +def symlink(source, destination): + """Create a symbolic link""" + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source, + destination, + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0o555, force=False): + """Create a directory""" + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + realpath = os.path.abspath(path) + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + os.makedirs(realpath, perms) + elif not path_exists: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + os.chmod(realpath, perms) + + +def write_file(path, content, owner='root', group='root', perms=0o444): + """Create or overwrite a file with the contents of a byte string.""" + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + + +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab""" + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file""" + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): + """Mount a filesystem at a particular mountpoint""" + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + + if persist: + return fstab_add(device, mountpoint, filesystem, options=options) + return True + + +def umount(mountpoint, persist=False): + """Unmount a filesystem""" + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + + if persist: + return fstab_remove(mountpoint) + return True + + +def mounts(): + """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + +def file_hash(path, hash_type='md5'): + """Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ + if os.path.exists(path): + h = getattr(hashlib, hash_type)() + with open(path, 'rb') as source: + h.update(source.read()) + return h.hexdigest() + else: + return None + + +def path_hash(path): + """Generate a hash checksum of all files matching 'path'. Standard + wildcards like '*' and '?' are supported, see documentation for the 'glob' + module for more information. + + :return: dict: A { filename: hash } dictionary for all matched files. + Empty if none found. + """ + return { + filename: file_hash(filename) + for filename in glob.iglob(path) + } + + +def check_hash(path, checksum, hash_type='md5'): + """Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + """A class derived from Value error to indicate the checksum failed.""" + pass + + +def restart_on_change(restart_map, stopstart=False, restart_functions=None): + """Restart services based on configuration files changing + + This function is used a decorator, for example:: + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + '/etc/apache/sites-enabled/*': [ 'apache2' ] + }) + def config_changed(): + pass # your code here + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. The apache2 service would be + restarted if any file matching the pattern got changed, created + or removed. Standard wildcards are supported, see documentation + for the 'glob' module for more information. + + @param restart_map: {path_file_name: [service_name, ...] + @param stopstart: DEFAULT false; whether to stop, start OR restart + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result from decorated function + """ + def wrap(f): + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) + return wrapped_f + return wrap + + +def restart_on_change_helper(lambda_f, restart_map, stopstart=False, + restart_functions=None): + """Helper function to perform the restart_on_change function. + + This is provided for decorators to restart services if files described + in the restart_map have changed after an invocation of lambda_f(). + + @param lambda_f: function to call. + @param restart_map: {file: [service, ...]} + @param stopstart: whether to stop, start or restart a service + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result of lambda_f() + """ + if restart_functions is None: + restart_functions = {} + checksums = {path: path_hash(path) for path in restart_map} + r = lambda_f() + # create a list of lists of the services to restart + restarts = [restart_map[path] + for path in restart_map + if path_hash(path) != checksums[path]] + # create a flat list of ordered services without duplicates from lists + services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) + if services_list: + actions = ('stop', 'start') if stopstart else ('restart',) + for service_name in services_list: + if service_name in restart_functions: + restart_functions[service_name](service_name) + else: + for action in actions: + service(action, service_name) + return r + + +def pwgen(length=None): + """Generate a random pasword.""" + if length is None: + # A random length is ok to use a weak PRNG + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.ascii_letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the + # actual password + random_generator = random.SystemRandom() + random_chars = [ + random_generator.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) + + +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): + """Return a list of nics of given type(s)""" + if isinstance(nic_type, six.string_types): + int_types = [nic_type] + else: + int_types = nic_type + + interfaces = [] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile('^[0-9]+:\s+(.+):') + for line in ip_output: + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) + + return interfaces + + +def set_nic_mtu(nic, mtu): + """Set the Maximum Transmission Unit (MTU) on a network interface.""" + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + """Return the Maximum Transmission Unit (MTU) for a network interface.""" + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + """Return the Media Access Control (MAC) for a network interface.""" + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr + + +@contextmanager +def chdir(directory): + """Change the current working directory to a different directory for a code + block and return the previous directory after the block exits. Useful to + run commands from a specificed directory. + + :param str directory: The directory path to change to for this context. + """ + cur = os.getcwd() + try: + yield os.chdir(directory) + finally: + os.chdir(cur) + + +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + :param bool follow_links: Also follow and chown links if True + :param bool chowntopdir: Also chown path itself if True + """ + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + if follow_links: + chown = os.chown + else: + chown = os.lchown + + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) + for root, dirs, files in os.walk(path, followlinks=follow_links): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + chown(full, uid, gid) + + +def lchownr(path, owner, group): + """Recursively change user and group ownership of files and directories + in a given path, not following symbolic links. See the documentation for + 'os.lchown' for more information. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + """ + chownr(path, owner, group, follow_links=False) + + +def owner(path): + """Returns a tuple containing the username & groupname owning the path. + + :param str path: the string path to retrieve the ownership + :return tuple(str, str): A (username, groupname) tuple containing the + name of the user and group owning the path. + :raises OSError: if the specified path does not exist + """ + stat = os.stat(path) + username = pwd.getpwuid(stat.st_uid)[0] + groupname = grp.getgrgid(stat.st_gid)[0] + return username, groupname + + +def get_total_ram(): + """The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + """ + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() + + +UPSTART_CONTAINER_TYPE = '/run/container_type' + + +def is_container(): + """Determine whether unit is running in a container + + @return: boolean indicating if unit is in a container + """ + if init_is_systemd(): + # Detect using systemd-detect-virt + return subprocess.call(['systemd-detect-virt', + '--container']) == 0 + else: + # Detect using upstart container file marker + return os.path.exists(UPSTART_CONTAINER_TYPE) + + +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + with open(updatedb_path, 'r+') as f_id: + updatedb_text = f_id.read() + output = updatedb(updatedb_text, path) + f_id.seek(0) + f_id.write(output) + f_id.truncate() + + +def updatedb(updatedb_text, new_path): + lines = [line for line in updatedb_text.split("\n")] + for i, line in enumerate(lines): + if line.startswith("PRUNEPATHS="): + paths_line = line.split("=")[1].replace('"', '') + paths = paths_line.split(" ") + if new_path not in paths: + paths.append(new_path) + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) + output = "\n".join(lines) + return output diff --git a/ceph-osd/tests/charmhelpers/core/host_factory/__init__.py b/ceph-osd/tests/charmhelpers/core/host_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/tests/charmhelpers/core/host_factory/centos.py b/ceph-osd/tests/charmhelpers/core/host_factory/centos.py new file mode 100644 index 00000000..902d469f --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/host_factory/centos.py @@ -0,0 +1,56 @@ +import subprocess +import yum +import os + + +def service_available(service_name): + # """Determine whether a system service is available.""" + if os.path.isdir('/run/systemd/system'): + cmd = ['systemctl', 'is-enabled', service_name] + else: + cmd = ['service', service_name, 'is-enabled'] + return subprocess.call(cmd) == 0 + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['groupadd'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('-r') + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/os-release in a dict.""" + d = {} + with open('/etc/os-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports YumBase function if the pkgcache argument + is None. + """ + if not pkgcache: + y = yum.YumBase() + packages = y.doPackageLists() + pkgcache = {i.Name: i.version for i in packages['installed']} + pkg = pkgcache[package] + if pkg > revno: + return 1 + if pkg < revno: + return -1 + return 0 diff --git a/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py new file mode 100644 index 00000000..8c66af55 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py @@ -0,0 +1,56 @@ +import subprocess + + +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return b'unrecognized service' not in e.output + else: + return True + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. + """ + import apt_pkg + if not pkgcache: + from charmhelpers.fetch import apt_cache + pkgcache = apt_cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-osd/tests/charmhelpers/core/hugepage.py b/ceph-osd/tests/charmhelpers/core/hugepage.py new file mode 100644 index 00000000..54b5b5e2 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/hugepage.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True, set_shmmax=False): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) diff --git a/ceph-osd/tests/charmhelpers/core/kernel.py b/ceph-osd/tests/charmhelpers/core/kernel.py new file mode 100644 index 00000000..2d404528 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/kernel.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import subprocess + +from charmhelpers.osplatform import get_platform +from charmhelpers.core.hookenv import ( + log, + INFO +) + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.kernel_factory.ubuntu import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.kernel_factory.centos import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import + +__author__ = "Jorge Niedbalski " + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + subprocess.check_call(cmd) + if persist: + persistent_modprobe(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return subprocess.check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return subprocess.check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 diff --git a/ceph-osd/tests/charmhelpers/core/kernel_factory/__init__.py b/ceph-osd/tests/charmhelpers/core/kernel_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-osd/tests/charmhelpers/core/kernel_factory/centos.py b/ceph-osd/tests/charmhelpers/core/kernel_factory/centos.py new file mode 100644 index 00000000..1c402c11 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/kernel_factory/centos.py @@ -0,0 +1,17 @@ +import subprocess +import os + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + if not os.path.exists('/etc/rc.modules'): + open('/etc/rc.modules', 'a') + os.chmod('/etc/rc.modules', 111) + with open('/etc/rc.modules', 'r+') as modules: + if module not in modules.read(): + modules.write('modprobe %s\n' % module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["dracut", "-f", version]) diff --git a/ceph-osd/tests/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-osd/tests/charmhelpers/core/kernel_factory/ubuntu.py new file mode 100644 index 00000000..3de372fd --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/kernel_factory/ubuntu.py @@ -0,0 +1,13 @@ +import subprocess + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module + "\n") + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-osd/tests/charmhelpers/core/services/__init__.py b/ceph-osd/tests/charmhelpers/core/services/__init__.py new file mode 100644 index 00000000..61fd074e --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/services/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/ceph-osd/tests/charmhelpers/core/services/base.py b/ceph-osd/tests/charmhelpers/core/services/base.py new file mode 100644 index 00000000..ca9dc996 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/services/base.py @@ -0,0 +1,351 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +from inspect import getargspec +from collections import Iterable, OrderedDict + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = OrderedDict() + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hookenv._run_atstart() + try: + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.reconfigure_services() + self.provide_data() + except SystemExit as x: + if x.code is None or x.code == 0: + hookenv._run_atexit() + hookenv._run_atexit() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + + The `provide_data()` method can optionally accept two parameters: + + * ``remote_service`` The name of the remote service that the data will + be provided to. The `provide_data()` method will be called once + for each connected service (not unit). This allows the method to + tailor its data to the given service. + * ``service_ready`` Whether or not the service definition had all of + its requirements met, and thus the ``data_ready`` callbacks run. + + Note that the ``provided_data`` methods are now called **after** the + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks + a chance to generate any data necessary for the providing to the remote + services. + """ + for service_name, service in self.services.items(): + service_ready = self.is_ready(service_name) + for provider in service.get('provided_data', []): + for relid in hookenv.relation_ids(provider.name): + units = hookenv.related_units(relid) + if not units: + continue + remote_service = units[0].split('/')[0] + argspec = getargspec(provider.provide_data) + if len(argspec.args) > 1: + data = provider.provide_data(remote_service, service_ready) + else: + data = provider.provide_data() + if data: + hookenv.relation_set(relid, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-osd/tests/charmhelpers/core/services/helpers.py b/ceph-osd/tests/charmhelpers/core/services/helpers.py new file mode 100644 index 00000000..3e6e30d2 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/services/helpers.py @@ -0,0 +1,290 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import yaml + +from charmhelpers.core import hookenv +from charmhelpers.core import host +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = None + interface = None + + def __init__(self, name=None, additional_required_keys=None): + if not hasattr(self, 'required_keys'): + self.required_keys = [] + + if name is not None: + self.name = name + if additional_required_keys: + self.required_keys.extend(additional_required_keys) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'user', 'password', 'database'] + RelationContext.__init__(self, *args, **kwargs) + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'port'] + RelationContext.__init__(self, *args, **kwargs) + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0o600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` + + :param str target: The target to write the rendered template to (or None) + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file + :param partial on_change_action: functools partial to be executed when + rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader + + :return str: The rendered template + """ + def __init__(self, source, target, + owner='root', group='root', perms=0o444, + on_change_action=None, template_loader=None): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + self.on_change_action = on_change_action + self.template_loader = template_loader + + def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) + service = manager.get_service(service_name) + context = {'ctx': {}} + for ctx in service.get('required_data', []): + context.update(ctx) + context['ctx'].update(ctx) + + result = templating.render(self.source, self.target, context, + self.owner, self.group, self.perms, + template_loader=self.template_loader) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() + + return result + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/ceph-osd/tests/charmhelpers/core/strutils.py b/ceph-osd/tests/charmhelpers/core/strutils.py new file mode 100644 index 00000000..dd9b9717 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/strutils.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import re + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't', 'on']: + return True + elif value in ['n', 'no', 'false', 'f', 'off']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if not matches: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/ceph-osd/tests/charmhelpers/core/sysctl.py b/ceph-osd/tests/charmhelpers/core/sysctl.py new file mode 100644 index 00000000..6e413e31 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/sysctl.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, +) + +__author__ = 'Jorge Niedbalski R. ' + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict_parsed.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-osd/tests/charmhelpers/core/templating.py b/ceph-osd/tests/charmhelpers/core/templating.py new file mode 100644 index 00000000..7b801a34 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/templating.py @@ -0,0 +1,84 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. It can also be `None`, in which + case no file will be written. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + The rendered template will be written to the file as well as being returned + as a string. + + Note: Using this requires python-jinja2 or python3-jinja2; if it is not + installed, calling this will attempt to use charmhelpers.fetch.apt_install + to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + if sys.version_info.major == 2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + if target is not None: + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) + return content diff --git a/ceph-osd/tests/charmhelpers/core/unitdata.py b/ceph-osd/tests/charmhelpers/core/unitdata.py new file mode 100644 index 00000000..54ec969f --- /dev/null +++ b/ceph-osd/tests/charmhelpers/core/unitdata.py @@ -0,0 +1,518 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import itertools +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are not persisted unless :meth:`flush` is called. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def get(self, key, default=None, record=False): + self.cursor.execute('select data from kv where key=?', [key]) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) + result = self.cursor.fetchall() + + if not result: + return {} + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + """ + Remove a key from the database entirely. + """ + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ + serialized = json.dumps(value) + + self.cursor.execute('select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', dict(data['env'])) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV From 8fd90cc898488a80f544d68d5b71a9710b49c1d0 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 14 Feb 2017 11:43:42 -0800 Subject: [PATCH 1278/2699] Pre-release charm-helpers sync 17.02 Get each charm up to date with lp:charm-helpers for release testing. Change-Id: I3871bacf94f7f85589a01588839e1c3be78dc157 --- ceph-mon/charm-helpers-tests.yaml | 1 + .../hooks/charmhelpers/contrib/network/ip.py | 6 +- .../contrib/openstack/amulet/utils.py | 174 ++- .../charmhelpers/contrib/openstack/context.py | 74 ++ .../charmhelpers/contrib/openstack/utils.py | 70 +- .../contrib/storage/linux/ceph.py | 16 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 45 + ceph-mon/hooks/charmhelpers/core/host.py | 227 +++- ceph-mon/hooks/charmhelpers/osplatform.py | 6 + .../charmhelpers/contrib/amulet/utils.py | 3 +- .../contrib/openstack/amulet/utils.py | 174 ++- ceph-mon/tests/charmhelpers/core/__init__.py | 13 + .../tests/charmhelpers/core/decorators.py | 55 + ceph-mon/tests/charmhelpers/core/files.py | 43 + ceph-mon/tests/charmhelpers/core/fstab.py | 132 ++ ceph-mon/tests/charmhelpers/core/hookenv.py | 1068 +++++++++++++++++ ceph-mon/tests/charmhelpers/core/host.py | 918 ++++++++++++++ .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 56 + .../charmhelpers/core/host_factory/ubuntu.py | 56 + ceph-mon/tests/charmhelpers/core/hugepage.py | 69 ++ ceph-mon/tests/charmhelpers/core/kernel.py | 72 ++ .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 + .../core/kernel_factory/ubuntu.py | 13 + .../charmhelpers/core/services/__init__.py | 16 + .../tests/charmhelpers/core/services/base.py | 351 ++++++ .../charmhelpers/core/services/helpers.py | 290 +++++ ceph-mon/tests/charmhelpers/core/strutils.py | 70 ++ ceph-mon/tests/charmhelpers/core/sysctl.py | 54 + .../tests/charmhelpers/core/templating.py | 84 ++ ceph-mon/tests/charmhelpers/core/unitdata.py | 518 ++++++++ 32 files changed, 4617 insertions(+), 74 deletions(-) create mode 100644 ceph-mon/tests/charmhelpers/core/__init__.py create mode 100644 ceph-mon/tests/charmhelpers/core/decorators.py create mode 100644 ceph-mon/tests/charmhelpers/core/files.py create mode 100644 ceph-mon/tests/charmhelpers/core/fstab.py create mode 100644 ceph-mon/tests/charmhelpers/core/hookenv.py create mode 100644 ceph-mon/tests/charmhelpers/core/host.py create mode 100644 ceph-mon/tests/charmhelpers/core/host_factory/__init__.py create mode 100644 ceph-mon/tests/charmhelpers/core/host_factory/centos.py create mode 100644 ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py create mode 100644 ceph-mon/tests/charmhelpers/core/hugepage.py create mode 100644 ceph-mon/tests/charmhelpers/core/kernel.py create mode 100644 ceph-mon/tests/charmhelpers/core/kernel_factory/__init__.py create mode 100644 ceph-mon/tests/charmhelpers/core/kernel_factory/centos.py create mode 100644 ceph-mon/tests/charmhelpers/core/kernel_factory/ubuntu.py create mode 100644 ceph-mon/tests/charmhelpers/core/services/__init__.py create mode 100644 ceph-mon/tests/charmhelpers/core/services/base.py create mode 100644 ceph-mon/tests/charmhelpers/core/services/helpers.py create mode 100644 ceph-mon/tests/charmhelpers/core/strutils.py create mode 100644 ceph-mon/tests/charmhelpers/core/sysctl.py create mode 100644 ceph-mon/tests/charmhelpers/core/templating.py create mode 100644 ceph-mon/tests/charmhelpers/core/unitdata.py diff --git a/ceph-mon/charm-helpers-tests.yaml b/ceph-mon/charm-helpers-tests.yaml index 48b12f6f..e5063253 100644 --- a/ceph-mon/charm-helpers-tests.yaml +++ b/ceph-mon/charm-helpers-tests.yaml @@ -3,3 +3,4 @@ destination: tests/charmhelpers include: - contrib.amulet - contrib.openstack.amulet + - core diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 2d2026e4..e141fc12 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -424,7 +424,11 @@ def ns_query(address): else: return None - answers = dns.resolver.query(address, rtype) + try: + answers = dns.resolver.query(address, rtype) + except dns.resolver.NXDOMAIN: + return None + if answers: return str(answers[0]) return None diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 6a0ba837..401c0328 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -20,6 +20,7 @@ import six import time import urllib +import urlparse import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client @@ -37,6 +38,7 @@ from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) +from charmhelpers.core.decorators import retry_on_exception DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -303,6 +305,46 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(5, base_delay=10) + def keystone_wait_for_propagation(self, sentry_relation_pairs, + api_version): + """Iterate over list of sentry and relation tuples and verify that + api_version has the expected value. + + :param sentry_relation_pairs: list of sentry, relation name tuples used + for monitoring propagation of relation + data + :param api_version: api_version to expect in relation data + :returns: None if successful. Raise on error. + """ + for (sentry, relation_name) in sentry_relation_pairs: + rel = sentry.relation('identity-service', + relation_name) + self.log.debug('keystone relation data: {}'.format(rel)) + if rel['api_version'] != str(api_version): + raise Exception("api_version not propagated through relation" + " data yet ('{}' != '{}')." + "".format(rel['api_version'], api_version)) + + def keystone_configure_api_version(self, sentry_relation_pairs, deployment, + api_version): + """Configure preferred-api-version of keystone in deployment and + monitor provided list of relation objects for propagation + before returning to caller. + + :param sentry_relation_pairs: list of sentry, relation tuples used for + monitoring propagation of relation data + :param deployment: deployment to configure + :param api_version: value preferred-api-version will be set to + :returns: None if successful. Raise on error. + """ + self.log.debug("Setting keystone preferred-api-version: '{}'" + "".format(api_version)) + + config = {'preferred-api-version': api_version} + deployment.d.configure('keystone', config) + self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) + def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" @@ -311,6 +353,37 @@ def authenticate_cinder_admin(self, keystone_sentry, username, ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) return cinder_client.Client(username, password, tenant, ept) + def authenticate_keystone(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Authenticate with Keystone""" + self.log.debug('Authenticating with keystone...') + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=username, password=password, + tenant_name=project_name, + auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name=user_domain_name, + username=username, + password=password, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + auth_url=ep + ) + return keystone_client_v3.Client( + session=keystone_session.Session(auth=auth) + ) + def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, keystone_ip=None): @@ -319,30 +392,28 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, if not keystone_ip: keystone_ip = keystone_sentry.info['public-address'] - base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) - else: - ep = base_ep + "/v3" - auth = keystone_id_v3.Password( - user_domain_name='admin_domain', - username=user, - password=password, - domain_name='admin_domain', - auth_url=ep, - ) - sess = keystone_session.Session(auth=auth) - return keystone_client_v3.Client(session=sess) + user_domain_name = None + domain_name = None + if api_version == 3: + user_domain_name = 'admin_domain' + domain_name = user_domain_name + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant, + api_version=api_version, + user_domain_name=user_domain_name, + domain_name=domain_name, + admin_port=True) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) + keystone_ip = urlparse.urlparse(ep).hostname + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant) def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" @@ -1133,3 +1204,70 @@ def get_amqp_message_by_unit(self, sentry_unit, queue="test", else: msg = 'No message retrieved.' amulet.raise_status(amulet.FAIL, msg) + + def validate_memcache(self, sentry_unit, conf, os_release, + earliest_release=5, section='keystone_authtoken', + check_kvs=None): + """Check Memcache is running and is configured to be used + + Example call from Amulet test: + + def test_110_memcache(self): + u.validate_memcache(self.neutron_api_sentry, + '/etc/neutron/neutron.conf', + self._get_openstack_release()) + + :param sentry_unit: sentry unit + :param conf: OpenStack config file to check memcache settings + :param os_release: Current OpenStack release int code + :param earliest_release: Earliest Openstack release to check int code + :param section: OpenStack config file section to check + :param check_kvs: Dict of settings to check in config file + :returns: None + """ + if os_release < earliest_release: + self.log.debug('Skipping memcache checks for deployment. {} <' + 'mitaka'.format(os_release)) + return + _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} + self.log.debug('Checking memcached is running') + ret = self.validate_services_by_name({sentry_unit: ['memcached']}) + if ret: + amulet.raise_status(amulet.FAIL, msg='Memcache running check' + 'failed {}'.format(ret)) + else: + self.log.debug('OK') + self.log.debug('Checking memcache url is configured in {}'.format( + conf)) + if self.validate_config_data(sentry_unit, conf, section, _kvs): + message = "Memcache config error in: {}".format(conf) + amulet.raise_status(amulet.FAIL, msg=message) + else: + self.log.debug('OK') + self.log.debug('Checking memcache configuration in ' + '/etc/memcached.conf') + contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', + fatal=True) + ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') + if ubuntu_release <= 'trusty': + memcache_listen_addr = 'ip6-localhost' + else: + memcache_listen_addr = '::1' + expected = { + '-p': '11211', + '-l': memcache_listen_addr} + found = [] + for key, value in expected.items(): + for line in contents.split('\n'): + if line.startswith(key): + self.log.debug('Checking {} is set to {}'.format( + key, + value)) + assert value == line.split()[-1] + self.log.debug(line.split()[-1]) + found.append(key) + if sorted(found) == sorted(expected.keys()): + self.log.debug('OK') + else: + message = "Memcache config error in: /etc/memcached.conf" + amulet.raise_status(amulet.FAIL, msg=message) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index d5b3a33b..42316331 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -14,6 +14,7 @@ import glob import json +import math import os import re import time @@ -90,6 +91,9 @@ from charmhelpers.contrib.openstack.utils import ( config_flags_parser, get_host_ip, + git_determine_usr_bin, + git_determine_python_path, + enable_memcache, ) from charmhelpers.core.unitdata import kv @@ -1207,6 +1211,43 @@ def __call__(self): return ctxt +class WSGIWorkerConfigContext(WorkerConfigContext): + + def __init__(self, name=None, script=None, admin_script=None, + public_script=None, process_weight=1.00, + admin_process_weight=0.75, public_process_weight=0.25): + self.service_name = name + self.user = name + self.group = name + self.script = script + self.admin_script = admin_script + self.public_script = public_script + self.process_weight = process_weight + self.admin_process_weight = admin_process_weight + self.public_process_weight = public_process_weight + + def __call__(self): + multiplier = config('worker-multiplier') or 1 + total_processes = self.num_cpus * multiplier + ctxt = { + "service_name": self.service_name, + "user": self.user, + "group": self.group, + "script": self.script, + "admin_script": self.admin_script, + "public_script": self.public_script, + "processes": int(math.ceil(self.process_weight * total_processes)), + "admin_processes": int(math.ceil(self.admin_process_weight * + total_processes)), + "public_processes": int(math.ceil(self.public_process_weight * + total_processes)), + "threads": 1, + "usr_bin": git_determine_usr_bin(), + "python_path": git_determine_python_path(), + } + return ctxt + + class ZeroMQContext(OSContextGenerator): interfaces = ['zeromq-configuration'] @@ -1512,3 +1553,36 @@ def setup_aa_profile(self): "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode'])) raise e + + +class MemcacheContext(OSContextGenerator): + """Memcache context + + This context provides options for configuring a local memcache client and + server + """ + + def __init__(self, package=None): + """ + @param package: Package to examine to extrapolate OpenStack release. + Used when charms have no openstack-origin config + option (ie subordinates) + """ + self.package = package + + def __call__(self): + ctxt = {} + ctxt['use_memcache'] = enable_memcache(package=self.package) + if ctxt['use_memcache']: + # Trusty version of memcached does not support ::1 as a listen + # address so use host file entry instead + if lsb_release()['DISTRIB_CODENAME'].lower() > 'trusty': + ctxt['memcache_server'] = '::1' + else: + ctxt['memcache_server'] = 'ip6-localhost' + ctxt['memcache_server_formatted'] = '[::1]' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = 'inet6:{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) + return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 6d544e75..80219d66 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -153,7 +153,7 @@ ('newton', ['2.8.0', '2.9.0', '2.10.0']), ('ocata', - ['2.11.0']), + ['2.11.0', '2.12.0']), ]) # >= Liberty version->codename mapping @@ -549,9 +549,9 @@ def configure_installation_source(rel): 'newton': 'xenial-updates/newton', 'newton/updates': 'xenial-updates/newton', 'newton/proposed': 'xenial-proposed/newton', - 'zesty': 'zesty-updates/ocata', - 'zesty/updates': 'xenial-updates/ocata', - 'zesty/proposed': 'xenial-proposed/ocata', + 'ocata': 'xenial-updates/ocata', + 'ocata/updates': 'xenial-updates/ocata', + 'ocata/proposed': 'xenial-proposed/ocata', } try: @@ -1119,6 +1119,35 @@ def git_generate_systemd_init_files(templates_dir): shutil.copyfile(service_source, service_dest) +def git_determine_usr_bin(): + """Return the /usr/bin path for Apache2 config. + + The /usr/bin path will be located in the virtualenv if the charm + is configured to deploy from source. + """ + if git_install_requested(): + projects_yaml = config('openstack-origin-git') + projects_yaml = git_default_repos(projects_yaml) + return os.path.join(git_pip_venv_dir(projects_yaml), 'bin') + else: + return '/usr/bin' + + +def git_determine_python_path(): + """Return the python-path for Apache2 config. + + Returns 'None' unless the charm is configured to deploy from source, + in which case the path of the virtualenv's site-packages is returned. + """ + if git_install_requested(): + projects_yaml = config('openstack-origin-git') + projects_yaml = git_default_repos(projects_yaml) + return os.path.join(git_pip_venv_dir(projects_yaml), + 'lib/python2.7/site-packages') + else: + return None + + def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts @@ -1925,3 +1954,36 @@ def os_application_version_set(package): application_version_set(os_release(package)) else: application_version_set(application_version) + + +def enable_memcache(source=None, release=None, package=None): + """Determine if memcache should be enabled on the local unit + + @param release: release of OpenStack currently deployed + @param package: package to derive OpenStack version deployed + @returns boolean Whether memcache should be enabled + """ + _release = None + if release: + _release = release + else: + _release = os_release(package, base='icehouse') + if not _release: + _release = get_os_codename_install_source(source) + + # TODO: this should be changed to a numeric comparison using a known list + # of releases and comparing by index. + return _release >= 'mitaka' + + +def token_cache_pkgs(source=None, release=None): + """Determine additional packages needed for token caching + + @param source: source string for charm + @param release: release of OpenStack currently deployed + @returns List of package to enable token caching + """ + packages = [] + if enable_memcache(source=source, release=release): + packages.extend(['memcached', 'python-memcache']) + return packages diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index edb536c7..ae7f3f93 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -40,6 +40,7 @@ ) from charmhelpers.core.hookenv import ( config, + service_name, local_unit, relation_get, relation_ids, @@ -1043,8 +1044,18 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] + def add_op_request_access_to_group(self, name, namespace=None, + permission=None, key_name=None): + """ + Adds the requested permissions to the current service's Ceph key, + allowing the key to access only the specified pools + """ + self.ops.append({'op': 'add-permissions-to-key', 'group': name, + 'namespace': namespace, 'name': key_name or service_name(), + 'group-permission': permission}) + def add_op_create_pool(self, name, replica_count=3, pg_num=None, - weight=None): + weight=None, group=None, namespace=None): """Adds an operation to create a pool. @param pg_num setting: optional setting. If not provided, this value @@ -1058,7 +1069,8 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight}) + 'weight': weight, 'group': group, + 'group-namespace': namespace}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 94fc996c..e44e22bf 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -616,6 +616,20 @@ def close_port(port, protocol="TCP"): subprocess.check_call(_args) +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1021,3 +1035,34 @@ def network_get_primary_address(binding): ''' cmd = ['network-get', '--primary-address', binding] return subprocess.check_output(cmd).decode('UTF-8').strip() + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 04cadb3a..edbb72ff 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -54,38 +54,138 @@ cmp_pkgrevno, ) # flake8: noqa -- ignore F401 for this import +UPDATEDB_PATH = '/etc/updatedb.conf' + +def service_start(service_name, **kwargs): + """Start a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('start', service_name, **kwargs) + + +def service_stop(service_name, **kwargs): + """Stop a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). -def service_start(service_name): - """Start a system service""" - return service('start', service_name) + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('stop', service_name, **kwargs) -def service_stop(service_name): - """Stop a system service""" - return service('stop', service_name) +def service_restart(service_name, **kwargs): + """Restart a system service. + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). -def service_restart(service_name): - """Restart a system service""" + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_restart('ceph-osd', id=4) + + :param service_name: the name of the service to restart + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ return service('restart', service_name) -def service_reload(service_name, restart_on_failure=False): +def service_reload(service_name, restart_on_failure=False, **kwargs): """Reload a system service, optionally falling back to restart if - reload fails""" - service_result = service('reload', service_name) + reload fails. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_reload('ceph-osd', id=4) + + :param service_name: the name of the service to reload + :param restart_on_failure: boolean indicating whether to fallback to a + restart if the reload fails. + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + service_result = service('reload', service_name, **kwargs) if not service_result and restart_on_failure: - service_result = service('restart', service_name) + service_result = service('restart', service_name, **kwargs) return service_result -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", + **kwargs): """Pause a system service. - Stop it, and prevent it from starting again at boot.""" + Stop it, and prevent it from starting again at boot. + + :param service_name: the name of the service to pause + :param init_dir: path to the upstart init directory + :param initd_dir: path to the sysv init directory + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems which do not support + key=value arguments via the commandline. + """ stopped = True - if service_running(service_name): - stopped = service_stop(service_name) + if service_running(service_name, **kwargs): + stopped = service_stop(service_name, **kwargs) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): @@ -106,10 +206,19 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): def service_resume(service_name, init_dir="/etc/init", - initd_dir="/etc/init.d"): + initd_dir="/etc/init.d", **kwargs): """Resume a system service. - Reenable starting again at boot. Start the service""" + Reenable starting again at boot. Start the service. + + :param service_name: the name of the service to resume + :param init_dir: the path to the init dir + :param initd dir: the path to the initd dir + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): @@ -126,19 +235,28 @@ def service_resume(service_name, init_dir="/etc/init", "Unable to detect {0} as SystemD, Upstart {1} or" " SysV {2}".format( service_name, upstart_file, sysv_file)) + started = service_running(service_name, **kwargs) - started = service_running(service_name) if not started: - started = service_start(service_name) + started = service_start(service_name, **kwargs) return started -def service(action, service_name): - """Control a system service""" +def service(action, service_name, **kwargs): + """Control a system service. + + :param action: the action to take on the service + :param service_name: the name of the service to perform th action on + :param **kwargs: additional params to be passed to the service command in + the form of key=value. + """ if init_is_systemd(): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) return subprocess.call(cmd) == 0 @@ -146,15 +264,26 @@ def service(action, service_name): _INIT_D_CONF = "/etc/init.d/{}" -def service_running(service_name): - """Determine whether a system service is running""" +def service_running(service_name, **kwargs): + """Determine whether a system service is running. + + :param service_name: the name of the service + :param **kwargs: additional args to pass to the service command. This is + used to pass additional key=value arguments to the + service command line for managing specific instance + units (e.g. service ceph-osd status id=2). The kwargs + are ignored in systemd services. + """ if init_is_systemd(): return service('is-active', service_name) else: if os.path.exists(_UPSTART_CONF.format(service_name)): try: - output = subprocess.check_output( - ['status', service_name], + cmd = ['status', service_name] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False @@ -306,15 +435,17 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) -def rsync(from_path, to_path, flags='-r', options=None): +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] cmd = ['/usr/bin/rsync', flags] + if timeout: + cmd = ['timeout', str(timeout)] + cmd cmd.extend(options) cmd.append(from_path) cmd.append(to_path) log(" ".join(cmd)) - return subprocess.check_output(cmd).decode('UTF-8').strip() + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() def symlink(source, destination): @@ -684,7 +815,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): :param str path: The string path to start changing ownership. :param str owner: The owner string to use when looking up the uid. :param str group: The group string to use when looking up the gid. - :param bool follow_links: Also Chown links if True + :param bool follow_links: Also follow and chown links if True :param bool chowntopdir: Also chown path itself if True """ uid = pwd.getpwnam(owner).pw_uid @@ -698,7 +829,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): broken_symlink = os.path.lexists(path) and not os.path.exists(path) if not broken_symlink: chown(path, uid, gid) - for root, dirs, files in os.walk(path): + for root, dirs, files in os.walk(path, followlinks=follow_links): for name in dirs + files: full = os.path.join(root, name) broken_symlink = os.path.lexists(full) and not os.path.exists(full) @@ -718,6 +849,20 @@ def lchownr(path, owner, group): chownr(path, owner, group, follow_links=False) +def owner(path): + """Returns a tuple containing the username & groupname owning the path. + + :param str path: the string path to retrieve the ownership + :return tuple(str, str): A (username, groupname) tuple containing the + name of the user and group owning the path. + :raises OSError: if the specified path does not exist + """ + stat = os.stat(path) + username = pwd.getpwuid(stat.st_uid)[0] + groupname = grp.getgrgid(stat.st_gid)[0] + return username, groupname + + def get_total_ram(): """The total amount of system RAM in bytes. @@ -749,3 +894,25 @@ def is_container(): else: # Detect using upstart container file marker return os.path.exists(UPSTART_CONTAINER_TYPE) + + +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + with open(updatedb_path, 'r+') as f_id: + updatedb_text = f_id.read() + output = updatedb(updatedb_text, path) + f_id.seek(0) + f_id.write(output) + f_id.truncate() + + +def updatedb(updatedb_text, new_path): + lines = [line for line in updatedb_text.split("\n")] + for i, line in enumerate(lines): + if line.startswith("PRUNEPATHS="): + paths_line = line.split("=")[1].replace('"', '') + paths = paths_line.split(" ") + if new_path not in paths: + paths.append(new_path) + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) + output = "\n".join(lines) + return output diff --git a/ceph-mon/hooks/charmhelpers/osplatform.py b/ceph-mon/hooks/charmhelpers/osplatform.py index ea490bbd..d9a4d5c0 100644 --- a/ceph-mon/hooks/charmhelpers/osplatform.py +++ b/ceph-mon/hooks/charmhelpers/osplatform.py @@ -8,12 +8,18 @@ def get_platform(): will be returned (which is the name of the module). This string is used to decide which platform module should be imported. """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warings *not* disabled, as we certainly need to fix this. tuple_platform = platform.linux_distribution() current_platform = tuple_platform[0] if "Ubuntu" in current_platform: return "ubuntu" elif "CentOS" in current_platform: return "centos" + elif "debian" in current_platform: + # Stock Python does not detect Ubuntu and instead returns debian. + # Or at least it does in some build environments like Travis CI + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index 8e13ab14..f9e4c3af 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -148,7 +148,8 @@ def validate_services_by_name(self, sentry_services): for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name in ['rabbitmq-server', 'apache2']): + service_name in ['rabbitmq-server', 'apache2', + 'memcached']): # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) output, code = sentry_unit.run(cmd) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 6a0ba837..401c0328 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -20,6 +20,7 @@ import six import time import urllib +import urlparse import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client @@ -37,6 +38,7 @@ from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) +from charmhelpers.core.decorators import retry_on_exception DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -303,6 +305,46 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(5, base_delay=10) + def keystone_wait_for_propagation(self, sentry_relation_pairs, + api_version): + """Iterate over list of sentry and relation tuples and verify that + api_version has the expected value. + + :param sentry_relation_pairs: list of sentry, relation name tuples used + for monitoring propagation of relation + data + :param api_version: api_version to expect in relation data + :returns: None if successful. Raise on error. + """ + for (sentry, relation_name) in sentry_relation_pairs: + rel = sentry.relation('identity-service', + relation_name) + self.log.debug('keystone relation data: {}'.format(rel)) + if rel['api_version'] != str(api_version): + raise Exception("api_version not propagated through relation" + " data yet ('{}' != '{}')." + "".format(rel['api_version'], api_version)) + + def keystone_configure_api_version(self, sentry_relation_pairs, deployment, + api_version): + """Configure preferred-api-version of keystone in deployment and + monitor provided list of relation objects for propagation + before returning to caller. + + :param sentry_relation_pairs: list of sentry, relation tuples used for + monitoring propagation of relation data + :param deployment: deployment to configure + :param api_version: value preferred-api-version will be set to + :returns: None if successful. Raise on error. + """ + self.log.debug("Setting keystone preferred-api-version: '{}'" + "".format(api_version)) + + config = {'preferred-api-version': api_version} + deployment.d.configure('keystone', config) + self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) + def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" @@ -311,6 +353,37 @@ def authenticate_cinder_admin(self, keystone_sentry, username, ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) return cinder_client.Client(username, password, tenant, ept) + def authenticate_keystone(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Authenticate with Keystone""" + self.log.debug('Authenticating with keystone...') + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=username, password=password, + tenant_name=project_name, + auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name=user_domain_name, + username=username, + password=password, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + auth_url=ep + ) + return keystone_client_v3.Client( + session=keystone_session.Session(auth=auth) + ) + def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, keystone_ip=None): @@ -319,30 +392,28 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, if not keystone_ip: keystone_ip = keystone_sentry.info['public-address'] - base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) - else: - ep = base_ep + "/v3" - auth = keystone_id_v3.Password( - user_domain_name='admin_domain', - username=user, - password=password, - domain_name='admin_domain', - auth_url=ep, - ) - sess = keystone_session.Session(auth=auth) - return keystone_client_v3.Client(session=sess) + user_domain_name = None + domain_name = None + if api_version == 3: + user_domain_name = 'admin_domain' + domain_name = user_domain_name + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant, + api_version=api_version, + user_domain_name=user_domain_name, + domain_name=domain_name, + admin_port=True) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) + keystone_ip = urlparse.urlparse(ep).hostname + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant) def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" @@ -1133,3 +1204,70 @@ def get_amqp_message_by_unit(self, sentry_unit, queue="test", else: msg = 'No message retrieved.' amulet.raise_status(amulet.FAIL, msg) + + def validate_memcache(self, sentry_unit, conf, os_release, + earliest_release=5, section='keystone_authtoken', + check_kvs=None): + """Check Memcache is running and is configured to be used + + Example call from Amulet test: + + def test_110_memcache(self): + u.validate_memcache(self.neutron_api_sentry, + '/etc/neutron/neutron.conf', + self._get_openstack_release()) + + :param sentry_unit: sentry unit + :param conf: OpenStack config file to check memcache settings + :param os_release: Current OpenStack release int code + :param earliest_release: Earliest Openstack release to check int code + :param section: OpenStack config file section to check + :param check_kvs: Dict of settings to check in config file + :returns: None + """ + if os_release < earliest_release: + self.log.debug('Skipping memcache checks for deployment. {} <' + 'mitaka'.format(os_release)) + return + _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} + self.log.debug('Checking memcached is running') + ret = self.validate_services_by_name({sentry_unit: ['memcached']}) + if ret: + amulet.raise_status(amulet.FAIL, msg='Memcache running check' + 'failed {}'.format(ret)) + else: + self.log.debug('OK') + self.log.debug('Checking memcache url is configured in {}'.format( + conf)) + if self.validate_config_data(sentry_unit, conf, section, _kvs): + message = "Memcache config error in: {}".format(conf) + amulet.raise_status(amulet.FAIL, msg=message) + else: + self.log.debug('OK') + self.log.debug('Checking memcache configuration in ' + '/etc/memcached.conf') + contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', + fatal=True) + ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') + if ubuntu_release <= 'trusty': + memcache_listen_addr = 'ip6-localhost' + else: + memcache_listen_addr = '::1' + expected = { + '-p': '11211', + '-l': memcache_listen_addr} + found = [] + for key, value in expected.items(): + for line in contents.split('\n'): + if line.startswith(key): + self.log.debug('Checking {} is set to {}'.format( + key, + value)) + assert value == line.split()[-1] + self.log.debug(line.split()[-1]) + found.append(key) + if sorted(found) == sorted(expected.keys()): + self.log.debug('OK') + else: + message = "Memcache config error in: /etc/memcached.conf" + amulet.raise_status(amulet.FAIL, msg=message) diff --git a/ceph-mon/tests/charmhelpers/core/__init__.py b/ceph-mon/tests/charmhelpers/core/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/tests/charmhelpers/core/decorators.py b/ceph-mon/tests/charmhelpers/core/decorators.py new file mode 100644 index 00000000..6ad41ee4 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/decorators.py @@ -0,0 +1,55 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 diff --git a/ceph-mon/tests/charmhelpers/core/files.py b/ceph-mon/tests/charmhelpers/core/files.py new file mode 100644 index 00000000..fdd82b75 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/files.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/ceph-mon/tests/charmhelpers/core/fstab.py b/ceph-mon/tests/charmhelpers/core/fstab.py new file mode 100644 index 00000000..d9fa9152 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/fstab.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import os + +__author__ = 'Jorge Niedbalski R. ' + + +class Fstab(io.FileIO): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = int(d) + self.p = int(p) + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + super(Fstab, self).__init__(self._path, 'rb+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + line = line.decode('us-ascii') + try: + if line.strip() and not line.strip().startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write((str(entry) + '\n').encode('us-ascii')) + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = [l.decode('us-ascii') for l in self.readlines()] + + found = False + for index, line in enumerate(lines): + if line.strip() and not line.strip().startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines).encode('us-ascii')) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/ceph-mon/tests/charmhelpers/core/hookenv.py b/ceph-mon/tests/charmhelpers/core/hookenv.py new file mode 100644 index 00000000..e44e22bf --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/hookenv.py @@ -0,0 +1,1068 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +from __future__ import print_function +import copy +from distutils.version import LooseVersion +from functools import wraps +import glob +import os +import json +import yaml +import subprocess +import sys +import errno +import tempfile +from subprocess import CalledProcessError + +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + """Cache return values for multiple executions of func + args + + For example:: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + """ + @wraps(func) + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res + wrapper._wrapped = func + return wrapper + + +def flush(key): + """Flushes any entries from function cache where the + key is found in the function+args """ + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + """Write a message to the juju log""" + command = ['juju-log'] + if level: + command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message] + # Missing juju-log should not cause failures in unit tests + # Send log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + if level: + message = "{}: {}".format(level, message) + message = "juju-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + +class Serializable(UserDict): + """Wrapper, an object that can be serialized to yaml or json""" + + def __init__(self, obj): + # wrap the object + UserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + """Serialize the object to json""" + return json.dumps(self.data) + + def yaml(self): + """Serialize the object to yaml""" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + """Determine whether we're running in a relation hook""" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + """The scope for the current relation hook""" + return os.environ.get('JUJU_RELATION', None) + + +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') + + +def local_unit(): + """Local unit ID""" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + """The remote unit for the current relation hook""" + return os.environ.get('JUJU_REMOTE_UNIT', None) + + +def service_name(): + """The name service group this unit belongs to""" + return local_unit().split('/')[0] + + +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + +def hook_name(): + """The name of the currently executing hook""" + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) + + +class Config(dict): + """A dictionary representation of the charm's config.yaml, with some + extra features: + + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. + + Example usage:: + + >>> # inside a hook + >>> from charmhelpers.core import hookenv + >>> config = hookenv.config() + >>> config['foo'] + 'bar' + >>> # store a new key/value for later use + >>> config['mykey'] = 'myval' + + + >>> # user runs `juju set mycharm foo=baz` + >>> # now we're inside subsequent config-changed hook + >>> config = hookenv.config() + >>> config['foo'] + 'baz' + >>> # test to see if this val has changed since last hook + >>> config.changed('foo') + True + >>> # what was the previous value? + >>> config.previous('foo') + 'bar' + >>> # keys/values that we add are preserved across hooks + >>> config['mykey'] + 'myval' + + """ + CONFIG_FILE_NAME = '.juju-persistent-config' + + def __init__(self, *args, **kw): + super(Config, self).__init__(*args, **kw) + self.implicit_save = True + self._prev_dict = None + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) + if os.path.exists(self.path): + self.load_previous() + atexit(self._implicit_save) + + def load_previous(self, path=None): + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. + + :param path: + + File path from which to load the previous config. If `None`, + config is loaded from the default location. If `path` is + specified, subsequent `save()` calls will write to the same + path. + + """ + self.path = path or self.path + with open(self.path) as f: + self._prev_dict = json.load(f) + for k, v in copy.deepcopy(self._prev_dict).items(): + if k not in self: + self[k] = v + + def changed(self, key): + """Return True if the current value for this key is different from + the previous value. + + """ + if self._prev_dict is None: + return True + return self.previous(key) != self.get(key) + + def previous(self, key): + """Return previous value for this key, or None if there + is no previous value. + + """ + if self._prev_dict: + return self._prev_dict.get(key) + return None + + def save(self): + """Save this config to disk. + + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. + + """ + with open(self.path, 'w') as f: + json.dump(self, f) + + def _implicit_save(self): + if self.implicit_save: + self.save() + + +@cached +def config(scope=None): + """Juju charm configuration""" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + else: + config_cmd_line.append('--all') + config_cmd_line.append('--format=json') + try: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + if scope is not None: + return config_data + return Config(config_data) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + """Get relation information""" + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except CalledProcessError as e: + if e.returncode == 2: + return None + raise + + +def relation_set(relation_id=None, relation_settings=None, **kwargs): + """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} + relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + settings = relation_settings.copy() + settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) + if accepts_file: + # --file was introduced in Juju 1.23.2. Use it by default if + # available, since otherwise we'll break if the relation data is + # too big. Ideally we should tell relation-set to read the data from + # stdin, but that feature is broken in 1.23.2: Bug #1454678. + with tempfile.NamedTemporaryFile(delete=False) as settings_file: + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) + subprocess.check_call( + relation_cmd_line + ["--file", settings_file.name]) + os.remove(settings_file.name) + else: + for key, value in settings.items(): + if value is None: + relation_cmd_line.append('{}='.format(key)) + else: + relation_cmd_line.append('{}={}'.format(key, value)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + +@cached +def relation_ids(reltype=None): + """A list of relation_ids""" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] + return [] + + +@cached +def related_units(relid=None): + """A list of related units""" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] + + +@cached +def relation_for_unit(unit=None, rid=None): + """Get the json represenation of a unit's relation""" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + """Get relations of a specific relation ID""" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + """Get relations of a specific type""" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + +@cached +def relation_types(): + """Get a list of relation types supported by this charm""" + rel_types = [] + md = metadata() + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + return rel_types + + +@cached +def peer_relation_id(): + '''Get the peers relation id if a peers relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peers'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peers``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peers'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + +@cached +def relations(): + """Get a nested dictionary of relation data for all related units""" + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +@cached +def is_relation_made(relation, keys='private-address'): + ''' + Determine whether a relation is established by checking for + presence of key(s). If a list of keys is provided, they + must all be present for the relation to be identified as made + ''' + if isinstance(keys, str): + keys = [keys] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + context = {} + for k in keys: + context[k] = relation_get(k, rid=r_id, + unit=unit) + if None not in context.values(): + return True + return False + + +def open_port(port, protocol="TCP"): + """Open a service network port""" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + """Close a service network port""" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + """Get the unit ID for the remote unit""" + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + +def unit_private_ip(): + """Get this unit's private IP address""" + return unit_get('private-address') + + +@cached +def storage_get(attribute=None, storage_id=None): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=None): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + +class UnregisteredHookError(Exception): + """Raised when an undefined hook is called""" + pass + + +class Hooks(object): + """A convenient handler for hook functions. + + Example:: + + hooks = Hooks() + + # register a hook, taking its name from the function name + @hooks.hook() + def install(): + pass # your code here + + # register a hook, providing a custom hook name + @hooks.hook("config-changed") + def config_changed(): + pass # your code here + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + + def __init__(self, config_save=None): + super(Hooks, self).__init__() + self._hooks = {} + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save + + def register(self, name, function): + """Register a hook""" + self._hooks[name] = function + + def execute(self, args): + """Execute a registered hook based on args[0]""" + _run_atstart() + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + """Decorator, registering them as hooks""" + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +def charm_dir(): + """Return the root directory of the current charm""" + return os.environ.get('CHARM_DIR') + + +@cached +def action_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['action-get'] + if key is not None: + cmd.append(key) + cmd.append('--format=json') + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return action_data + + +def action_set(values): + """Sets the values to be returned after the action finishes""" + cmd = ['action-set'] + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def action_fail(message): + """Sets the action status to failed and sets the error message. + + The results set by action_set are preserved.""" + subprocess.check_call(['action-fail', message]) + + +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + +def status_set(workload_state, message): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message unstead. + + workload_state -- valid juju workload state. + message -- status update message + """ + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] + if workload_state not in valid_states: + raise ValueError( + '{!r} is not a valid workload state'.format(workload_state) + ) + cmd = ['status-set', workload_state, message] + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" + + """ + cmd = ['status-get', "--format=json", "--include-data"] + try: + raw_status = subprocess.check_output(cmd) + except OSError as e: + if e.errno == errno.ENOENT: + return ('unknown', "") + else: + raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + @wraps(f) + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +def application_version_set(version): + """Charm authors may trigger this command from any hook to output what + version of the application is running. This could be a package version, + for instance postgres version 9.5. It could also be a build number or + version control revision identifier, for instance git sha 6fb7ba68. """ + + cmd = ['application-version-set'] + cmd.append(version) + try: + subprocess.check_call(cmd) + except OSError: + log("Application Version: {}".format(version)) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.items(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_register(ptype, klass, pid): + """ is used while a hook is running to let Juju know that a + payload has been started.""" + cmd = ['payload-register'] + for x in [ptype, klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_unregister(klass, pid): + """ is used while a hook is running to let Juju know + that a payload has been manually stopped. The and provided + must match a payload that has been previously registered with juju using + payload-register.""" + cmd = ['payload-unregister'] + for x in [klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_status_set(klass, pid, status): + """is used to update the current status of a registered payload. + The and provided must match a payload that has been previously + registered with juju using payload-register. The must be one of the + follow: starting, started, stopping, stopped""" + cmd = ['payload-status-set'] + for x in [klass, pid, status]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def resource_get(name): + """used to fetch the resource path of the given name. + + must match a name of defined resource in metadata.yaml + + returns either a path or False if resource not available + """ + if not name: + return False + + cmd = ['resource-get', name] + try: + return subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError: + return False + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +@cached +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get_primary_address(binding): + ''' + Retrieve the primary network address for a named binding + + :param binding: string. The name of a relation of extra-binding + :return: string. The primary IP address for the named binding + :raise: NotImplementedError if run on Juju < 2.0 + ''' + cmd = ['network-get', '--primary-address', binding] + return subprocess.check_output(cmd).decode('UTF-8').strip() + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') diff --git a/ceph-mon/tests/charmhelpers/core/host.py b/ceph-mon/tests/charmhelpers/core/host.py new file mode 100644 index 00000000..edbb72ff --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/host.py @@ -0,0 +1,918 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import os +import re +import pwd +import glob +import grp +import random +import string +import subprocess +import hashlib +import functools +import itertools +import six + +from contextlib import contextmanager +from collections import OrderedDict +from .hookenv import log +from .fstab import Fstab +from charmhelpers.osplatform import get_platform + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.host_factory.ubuntu import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.host_factory.centos import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import + +UPDATEDB_PATH = '/etc/updatedb.conf' + +def service_start(service_name, **kwargs): + """Start a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('start', service_name, **kwargs) + + +def service_stop(service_name, **kwargs): + """Stop a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('stop', service_name, **kwargs) + + +def service_restart(service_name, **kwargs): + """Restart a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_restart('ceph-osd', id=4) + + :param service_name: the name of the service to restart + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + return service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False, **kwargs): + """Reload a system service, optionally falling back to restart if + reload fails. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_reload('ceph-osd', id=4) + + :param service_name: the name of the service to reload + :param restart_on_failure: boolean indicating whether to fallback to a + restart if the reload fails. + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + service_result = service('reload', service_name, **kwargs) + if not service_result and restart_on_failure: + service_result = service('restart', service_name, **kwargs) + return service_result + + +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", + **kwargs): + """Pause a system service. + + Stop it, and prevent it from starting again at boot. + + :param service_name: the name of the service to pause + :param init_dir: path to the upstart init directory + :param initd_dir: path to the sysv init directory + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems which do not support + key=value arguments via the commandline. + """ + stopped = True + if service_running(service_name, **kwargs): + stopped = service_stop(service_name, **kwargs) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('disable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + return stopped + + +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d", **kwargs): + """Resume a system service. + + Reenable starting again at boot. Start the service. + + :param service_name: the name of the service to resume + :param init_dir: the path to the init dir + :param initd dir: the path to the initd dir + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('enable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + started = service_running(service_name, **kwargs) + + if not started: + started = service_start(service_name, **kwargs) + return started + + +def service(action, service_name, **kwargs): + """Control a system service. + + :param action: the action to take on the service + :param service_name: the name of the service to perform th action on + :param **kwargs: additional params to be passed to the service command in + the form of key=value. + """ + if init_is_systemd(): + cmd = ['systemctl', action, service_name] + else: + cmd = ['service', service_name, action] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + return subprocess.call(cmd) == 0 + + +_UPSTART_CONF = "/etc/init/{}.conf" +_INIT_D_CONF = "/etc/init.d/{}" + + +def service_running(service_name, **kwargs): + """Determine whether a system service is running. + + :param service_name: the name of the service + :param **kwargs: additional args to pass to the service command. This is + used to pass additional key=value arguments to the + service command line for managing specific instance + units (e.g. service ceph-osd status id=2). The kwargs + are ignored in systemd services. + """ + if init_is_systemd(): + return service('is-active', service_name) + else: + if os.path.exists(_UPSTART_CONF.format(service_name)): + try: + cmd = ['status', service_name] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: + return False + else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running + # 'start/running' + if ("start/running" in output or + "is running" in output or + "up and running" in output): + return True + elif os.path.exists(_INIT_D_CONF.format(service_name)): + # Check System V scripts init script return codes + return service('status', service_name) + return False + + +SYSTEMD_SYSTEM = '/run/systemd/system' + + +def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" + return os.path.isdir(SYSTEMD_SYSTEM) + + +def adduser(username, password=None, shell='/bin/bash', + system_user=False, primary_group=None, + secondary_groups=None, uid=None, home_dir=None): + """Add a user to the system. + + Will log but otherwise succeed if the user already exists. + + :param str username: Username to create + :param str password: Password for user; if ``None``, create a system user + :param str shell: The default shell for the user + :param bool system_user: Whether to create a login or system user + :param str primary_group: Primary group for user; defaults to username + :param list secondary_groups: Optional list of additional groups + :param int uid: UID for user being created + :param str home_dir: Home directory for user + + :returns: The password database entry struct, as returned by `pwd.getpwnam` + """ + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + if uid: + user_info = pwd.getpwuid(int(uid)) + log('user with uid {0} already exists!'.format(uid)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if uid: + cmd.extend(['--uid', str(uid)]) + if home_dir: + cmd.extend(['--home', str(home_dir)]) + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + if not primary_group: + try: + grp.getgrnam(username) + primary_group = username # avoid "group exists" error + except KeyError: + pass + if primary_group: + cmd.extend(['-g', primary_group]) + if secondary_groups: + cmd.extend(['-G', ','.join(secondary_groups)]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + +def uid_exists(uid): + """Check if a uid exists""" + try: + pwd.getpwuid(uid) + uid_exists = True + except KeyError: + uid_exists = False + return uid_exists + + +def group_exists(groupname): + """Check if a group exists""" + try: + grp.getgrnam(groupname) + group_exists = True + except KeyError: + group_exists = False + return group_exists + + +def gid_exists(gid): + """Check if a gid exists""" + try: + grp.getgrgid(gid) + gid_exists = True + except KeyError: + gid_exists = False + return gid_exists + + +def add_group(group_name, system_group=False, gid=None): + """Add a group to the system + + Will log but otherwise succeed if the group already exists. + + :param str group_name: group to create + :param bool system_group: Create system group + :param int gid: GID for user being created + + :returns: The password database entry struct, as returned by `grp.getgrnam` + """ + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + if gid: + group_info = grp.getgrgid(gid) + log('group with gid {0} already exists!'.format(gid)) + except KeyError: + log('creating group {0}'.format(group_name)) + add_new_group(group_name, system_group, gid) + group_info = grp.getgrnam(group_name) + return group_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = ['gpasswd', '-a', username, group] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): + """Replicate the contents of a path""" + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + if timeout: + cmd = ['timeout', str(timeout)] + cmd + cmd.extend(options) + cmd.append(from_path) + cmd.append(to_path) + log(" ".join(cmd)) + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() + + +def symlink(source, destination): + """Create a symbolic link""" + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source, + destination, + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0o555, force=False): + """Create a directory""" + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + realpath = os.path.abspath(path) + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + os.makedirs(realpath, perms) + elif not path_exists: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + os.chmod(realpath, perms) + + +def write_file(path, content, owner='root', group='root', perms=0o444): + """Create or overwrite a file with the contents of a byte string.""" + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + + +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab""" + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file""" + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): + """Mount a filesystem at a particular mountpoint""" + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + + if persist: + return fstab_add(device, mountpoint, filesystem, options=options) + return True + + +def umount(mountpoint, persist=False): + """Unmount a filesystem""" + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + + if persist: + return fstab_remove(mountpoint) + return True + + +def mounts(): + """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + +def file_hash(path, hash_type='md5'): + """Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ + if os.path.exists(path): + h = getattr(hashlib, hash_type)() + with open(path, 'rb') as source: + h.update(source.read()) + return h.hexdigest() + else: + return None + + +def path_hash(path): + """Generate a hash checksum of all files matching 'path'. Standard + wildcards like '*' and '?' are supported, see documentation for the 'glob' + module for more information. + + :return: dict: A { filename: hash } dictionary for all matched files. + Empty if none found. + """ + return { + filename: file_hash(filename) + for filename in glob.iglob(path) + } + + +def check_hash(path, checksum, hash_type='md5'): + """Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + """A class derived from Value error to indicate the checksum failed.""" + pass + + +def restart_on_change(restart_map, stopstart=False, restart_functions=None): + """Restart services based on configuration files changing + + This function is used a decorator, for example:: + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + '/etc/apache/sites-enabled/*': [ 'apache2' ] + }) + def config_changed(): + pass # your code here + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. The apache2 service would be + restarted if any file matching the pattern got changed, created + or removed. Standard wildcards are supported, see documentation + for the 'glob' module for more information. + + @param restart_map: {path_file_name: [service_name, ...] + @param stopstart: DEFAULT false; whether to stop, start OR restart + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result from decorated function + """ + def wrap(f): + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) + return wrapped_f + return wrap + + +def restart_on_change_helper(lambda_f, restart_map, stopstart=False, + restart_functions=None): + """Helper function to perform the restart_on_change function. + + This is provided for decorators to restart services if files described + in the restart_map have changed after an invocation of lambda_f(). + + @param lambda_f: function to call. + @param restart_map: {file: [service, ...]} + @param stopstart: whether to stop, start or restart a service + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result of lambda_f() + """ + if restart_functions is None: + restart_functions = {} + checksums = {path: path_hash(path) for path in restart_map} + r = lambda_f() + # create a list of lists of the services to restart + restarts = [restart_map[path] + for path in restart_map + if path_hash(path) != checksums[path]] + # create a flat list of ordered services without duplicates from lists + services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) + if services_list: + actions = ('stop', 'start') if stopstart else ('restart',) + for service_name in services_list: + if service_name in restart_functions: + restart_functions[service_name](service_name) + else: + for action in actions: + service(action, service_name) + return r + + +def pwgen(length=None): + """Generate a random pasword.""" + if length is None: + # A random length is ok to use a weak PRNG + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.ascii_letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the + # actual password + random_generator = random.SystemRandom() + random_chars = [ + random_generator.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) + + +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): + """Return a list of nics of given type(s)""" + if isinstance(nic_type, six.string_types): + int_types = [nic_type] + else: + int_types = nic_type + + interfaces = [] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile('^[0-9]+:\s+(.+):') + for line in ip_output: + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) + + return interfaces + + +def set_nic_mtu(nic, mtu): + """Set the Maximum Transmission Unit (MTU) on a network interface.""" + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + """Return the Maximum Transmission Unit (MTU) for a network interface.""" + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + """Return the Media Access Control (MAC) for a network interface.""" + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr + + +@contextmanager +def chdir(directory): + """Change the current working directory to a different directory for a code + block and return the previous directory after the block exits. Useful to + run commands from a specificed directory. + + :param str directory: The directory path to change to for this context. + """ + cur = os.getcwd() + try: + yield os.chdir(directory) + finally: + os.chdir(cur) + + +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + :param bool follow_links: Also follow and chown links if True + :param bool chowntopdir: Also chown path itself if True + """ + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + if follow_links: + chown = os.chown + else: + chown = os.lchown + + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) + for root, dirs, files in os.walk(path, followlinks=follow_links): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + chown(full, uid, gid) + + +def lchownr(path, owner, group): + """Recursively change user and group ownership of files and directories + in a given path, not following symbolic links. See the documentation for + 'os.lchown' for more information. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + """ + chownr(path, owner, group, follow_links=False) + + +def owner(path): + """Returns a tuple containing the username & groupname owning the path. + + :param str path: the string path to retrieve the ownership + :return tuple(str, str): A (username, groupname) tuple containing the + name of the user and group owning the path. + :raises OSError: if the specified path does not exist + """ + stat = os.stat(path) + username = pwd.getpwuid(stat.st_uid)[0] + groupname = grp.getgrgid(stat.st_gid)[0] + return username, groupname + + +def get_total_ram(): + """The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + """ + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() + + +UPSTART_CONTAINER_TYPE = '/run/container_type' + + +def is_container(): + """Determine whether unit is running in a container + + @return: boolean indicating if unit is in a container + """ + if init_is_systemd(): + # Detect using systemd-detect-virt + return subprocess.call(['systemd-detect-virt', + '--container']) == 0 + else: + # Detect using upstart container file marker + return os.path.exists(UPSTART_CONTAINER_TYPE) + + +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + with open(updatedb_path, 'r+') as f_id: + updatedb_text = f_id.read() + output = updatedb(updatedb_text, path) + f_id.seek(0) + f_id.write(output) + f_id.truncate() + + +def updatedb(updatedb_text, new_path): + lines = [line for line in updatedb_text.split("\n")] + for i, line in enumerate(lines): + if line.startswith("PRUNEPATHS="): + paths_line = line.split("=")[1].replace('"', '') + paths = paths_line.split(" ") + if new_path not in paths: + paths.append(new_path) + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) + output = "\n".join(lines) + return output diff --git a/ceph-mon/tests/charmhelpers/core/host_factory/__init__.py b/ceph-mon/tests/charmhelpers/core/host_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/tests/charmhelpers/core/host_factory/centos.py b/ceph-mon/tests/charmhelpers/core/host_factory/centos.py new file mode 100644 index 00000000..902d469f --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/host_factory/centos.py @@ -0,0 +1,56 @@ +import subprocess +import yum +import os + + +def service_available(service_name): + # """Determine whether a system service is available.""" + if os.path.isdir('/run/systemd/system'): + cmd = ['systemctl', 'is-enabled', service_name] + else: + cmd = ['service', service_name, 'is-enabled'] + return subprocess.call(cmd) == 0 + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['groupadd'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('-r') + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/os-release in a dict.""" + d = {} + with open('/etc/os-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports YumBase function if the pkgcache argument + is None. + """ + if not pkgcache: + y = yum.YumBase() + packages = y.doPackageLists() + pkgcache = {i.Name: i.version for i in packages['installed']} + pkg = pkgcache[package] + if pkg > revno: + return 1 + if pkg < revno: + return -1 + return 0 diff --git a/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py new file mode 100644 index 00000000..8c66af55 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py @@ -0,0 +1,56 @@ +import subprocess + + +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return b'unrecognized service' not in e.output + else: + return True + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. + """ + import apt_pkg + if not pkgcache: + from charmhelpers.fetch import apt_cache + pkgcache = apt_cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-mon/tests/charmhelpers/core/hugepage.py b/ceph-mon/tests/charmhelpers/core/hugepage.py new file mode 100644 index 00000000..54b5b5e2 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/hugepage.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True, set_shmmax=False): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) diff --git a/ceph-mon/tests/charmhelpers/core/kernel.py b/ceph-mon/tests/charmhelpers/core/kernel.py new file mode 100644 index 00000000..2d404528 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/kernel.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import subprocess + +from charmhelpers.osplatform import get_platform +from charmhelpers.core.hookenv import ( + log, + INFO +) + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.kernel_factory.ubuntu import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.kernel_factory.centos import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import + +__author__ = "Jorge Niedbalski " + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + subprocess.check_call(cmd) + if persist: + persistent_modprobe(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return subprocess.check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return subprocess.check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 diff --git a/ceph-mon/tests/charmhelpers/core/kernel_factory/__init__.py b/ceph-mon/tests/charmhelpers/core/kernel_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/tests/charmhelpers/core/kernel_factory/centos.py b/ceph-mon/tests/charmhelpers/core/kernel_factory/centos.py new file mode 100644 index 00000000..1c402c11 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/kernel_factory/centos.py @@ -0,0 +1,17 @@ +import subprocess +import os + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + if not os.path.exists('/etc/rc.modules'): + open('/etc/rc.modules', 'a') + os.chmod('/etc/rc.modules', 111) + with open('/etc/rc.modules', 'r+') as modules: + if module not in modules.read(): + modules.write('modprobe %s\n' % module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["dracut", "-f", version]) diff --git a/ceph-mon/tests/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-mon/tests/charmhelpers/core/kernel_factory/ubuntu.py new file mode 100644 index 00000000..3de372fd --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/kernel_factory/ubuntu.py @@ -0,0 +1,13 @@ +import subprocess + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module + "\n") + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-mon/tests/charmhelpers/core/services/__init__.py b/ceph-mon/tests/charmhelpers/core/services/__init__.py new file mode 100644 index 00000000..61fd074e --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/services/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/ceph-mon/tests/charmhelpers/core/services/base.py b/ceph-mon/tests/charmhelpers/core/services/base.py new file mode 100644 index 00000000..ca9dc996 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/services/base.py @@ -0,0 +1,351 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +from inspect import getargspec +from collections import Iterable, OrderedDict + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = OrderedDict() + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hookenv._run_atstart() + try: + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.reconfigure_services() + self.provide_data() + except SystemExit as x: + if x.code is None or x.code == 0: + hookenv._run_atexit() + hookenv._run_atexit() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + + The `provide_data()` method can optionally accept two parameters: + + * ``remote_service`` The name of the remote service that the data will + be provided to. The `provide_data()` method will be called once + for each connected service (not unit). This allows the method to + tailor its data to the given service. + * ``service_ready`` Whether or not the service definition had all of + its requirements met, and thus the ``data_ready`` callbacks run. + + Note that the ``provided_data`` methods are now called **after** the + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks + a chance to generate any data necessary for the providing to the remote + services. + """ + for service_name, service in self.services.items(): + service_ready = self.is_ready(service_name) + for provider in service.get('provided_data', []): + for relid in hookenv.relation_ids(provider.name): + units = hookenv.related_units(relid) + if not units: + continue + remote_service = units[0].split('/')[0] + argspec = getargspec(provider.provide_data) + if len(argspec.args) > 1: + data = provider.provide_data(remote_service, service_ready) + else: + data = provider.provide_data() + if data: + hookenv.relation_set(relid, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-mon/tests/charmhelpers/core/services/helpers.py b/ceph-mon/tests/charmhelpers/core/services/helpers.py new file mode 100644 index 00000000..3e6e30d2 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/services/helpers.py @@ -0,0 +1,290 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import yaml + +from charmhelpers.core import hookenv +from charmhelpers.core import host +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = None + interface = None + + def __init__(self, name=None, additional_required_keys=None): + if not hasattr(self, 'required_keys'): + self.required_keys = [] + + if name is not None: + self.name = name + if additional_required_keys: + self.required_keys.extend(additional_required_keys) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'user', 'password', 'database'] + RelationContext.__init__(self, *args, **kwargs) + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'port'] + RelationContext.__init__(self, *args, **kwargs) + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0o600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` + + :param str target: The target to write the rendered template to (or None) + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file + :param partial on_change_action: functools partial to be executed when + rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader + + :return str: The rendered template + """ + def __init__(self, source, target, + owner='root', group='root', perms=0o444, + on_change_action=None, template_loader=None): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + self.on_change_action = on_change_action + self.template_loader = template_loader + + def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) + service = manager.get_service(service_name) + context = {'ctx': {}} + for ctx in service.get('required_data', []): + context.update(ctx) + context['ctx'].update(ctx) + + result = templating.render(self.source, self.target, context, + self.owner, self.group, self.perms, + template_loader=self.template_loader) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() + + return result + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/ceph-mon/tests/charmhelpers/core/strutils.py b/ceph-mon/tests/charmhelpers/core/strutils.py new file mode 100644 index 00000000..dd9b9717 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/strutils.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import re + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't', 'on']: + return True + elif value in ['n', 'no', 'false', 'f', 'off']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if not matches: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/ceph-mon/tests/charmhelpers/core/sysctl.py b/ceph-mon/tests/charmhelpers/core/sysctl.py new file mode 100644 index 00000000..6e413e31 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/sysctl.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, +) + +__author__ = 'Jorge Niedbalski R. ' + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict_parsed.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-mon/tests/charmhelpers/core/templating.py b/ceph-mon/tests/charmhelpers/core/templating.py new file mode 100644 index 00000000..7b801a34 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/templating.py @@ -0,0 +1,84 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. It can also be `None`, in which + case no file will be written. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + The rendered template will be written to the file as well as being returned + as a string. + + Note: Using this requires python-jinja2 or python3-jinja2; if it is not + installed, calling this will attempt to use charmhelpers.fetch.apt_install + to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + if sys.version_info.major == 2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + if target is not None: + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) + return content diff --git a/ceph-mon/tests/charmhelpers/core/unitdata.py b/ceph-mon/tests/charmhelpers/core/unitdata.py new file mode 100644 index 00000000..54ec969f --- /dev/null +++ b/ceph-mon/tests/charmhelpers/core/unitdata.py @@ -0,0 +1,518 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import itertools +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are not persisted unless :meth:`flush` is called. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def get(self, key, default=None, record=False): + self.cursor.execute('select data from kv where key=?', [key]) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) + result = self.cursor.fetchall() + + if not result: + return {} + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + """ + Remove a key from the database entirely. + """ + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ + serialized = json.dumps(value) + + self.cursor.execute('select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', dict(data['env'])) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV From de5573085ba8728b53d11d28ca96bc853a1a1965 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 14 Feb 2017 11:45:17 -0800 Subject: [PATCH 1279/2699] Pre-release charm-helpers sync 17.02 Get each charm up to date with lp:charm-helpers for release testing. Change-Id: I2b3e5889be3f509ac8b0f317387feba12314cc11 --- ceph-proxy/charm-helpers-tests.yaml | 1 + .../hooks/charmhelpers/contrib/network/ip.py | 6 +- .../charmhelpers/contrib/openstack/utils.py | 70 +- .../contrib/storage/linux/ceph.py | 16 +- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 45 + ceph-proxy/hooks/charmhelpers/core/host.py | 227 +++- ceph-proxy/hooks/charmhelpers/osplatform.py | 6 + ceph-proxy/tests/basic_deployment.py | 2 - .../charmhelpers/contrib/amulet/utils.py | 3 +- .../contrib/openstack/amulet/utils.py | 174 ++- .../tests/charmhelpers/core/__init__.py | 13 + .../tests/charmhelpers/core/decorators.py | 55 + ceph-proxy/tests/charmhelpers/core/files.py | 43 + ceph-proxy/tests/charmhelpers/core/fstab.py | 132 ++ ceph-proxy/tests/charmhelpers/core/hookenv.py | 1068 +++++++++++++++++ ceph-proxy/tests/charmhelpers/core/host.py | 918 ++++++++++++++ .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 56 + .../charmhelpers/core/host_factory/ubuntu.py | 56 + .../tests/charmhelpers/core/hugepage.py | 69 ++ ceph-proxy/tests/charmhelpers/core/kernel.py | 72 ++ .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 + .../core/kernel_factory/ubuntu.py | 13 + .../charmhelpers/core/services/__init__.py | 16 + .../tests/charmhelpers/core/services/base.py | 351 ++++++ .../charmhelpers/core/services/helpers.py | 290 +++++ .../tests/charmhelpers/core/strutils.py | 70 ++ ceph-proxy/tests/charmhelpers/core/sysctl.py | 54 + .../tests/charmhelpers/core/templating.py | 84 ++ .../tests/charmhelpers/core/unitdata.py | 518 ++++++++ 31 files changed, 4387 insertions(+), 58 deletions(-) create mode 100644 ceph-proxy/tests/charmhelpers/core/__init__.py create mode 100644 ceph-proxy/tests/charmhelpers/core/decorators.py create mode 100644 ceph-proxy/tests/charmhelpers/core/files.py create mode 100644 ceph-proxy/tests/charmhelpers/core/fstab.py create mode 100644 ceph-proxy/tests/charmhelpers/core/hookenv.py create mode 100644 ceph-proxy/tests/charmhelpers/core/host.py create mode 100644 ceph-proxy/tests/charmhelpers/core/host_factory/__init__.py create mode 100644 ceph-proxy/tests/charmhelpers/core/host_factory/centos.py create mode 100644 ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py create mode 100644 ceph-proxy/tests/charmhelpers/core/hugepage.py create mode 100644 ceph-proxy/tests/charmhelpers/core/kernel.py create mode 100644 ceph-proxy/tests/charmhelpers/core/kernel_factory/__init__.py create mode 100644 ceph-proxy/tests/charmhelpers/core/kernel_factory/centos.py create mode 100644 ceph-proxy/tests/charmhelpers/core/kernel_factory/ubuntu.py create mode 100644 ceph-proxy/tests/charmhelpers/core/services/__init__.py create mode 100644 ceph-proxy/tests/charmhelpers/core/services/base.py create mode 100644 ceph-proxy/tests/charmhelpers/core/services/helpers.py create mode 100644 ceph-proxy/tests/charmhelpers/core/strutils.py create mode 100644 ceph-proxy/tests/charmhelpers/core/sysctl.py create mode 100644 ceph-proxy/tests/charmhelpers/core/templating.py create mode 100644 ceph-proxy/tests/charmhelpers/core/unitdata.py diff --git a/ceph-proxy/charm-helpers-tests.yaml b/ceph-proxy/charm-helpers-tests.yaml index 48b12f6f..e5063253 100644 --- a/ceph-proxy/charm-helpers-tests.yaml +++ b/ceph-proxy/charm-helpers-tests.yaml @@ -3,3 +3,4 @@ destination: tests/charmhelpers include: - contrib.amulet - contrib.openstack.amulet + - core diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 2d2026e4..e141fc12 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -424,7 +424,11 @@ def ns_query(address): else: return None - answers = dns.resolver.query(address, rtype) + try: + answers = dns.resolver.query(address, rtype) + except dns.resolver.NXDOMAIN: + return None + if answers: return str(answers[0]) return None diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index 6d544e75..80219d66 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -153,7 +153,7 @@ ('newton', ['2.8.0', '2.9.0', '2.10.0']), ('ocata', - ['2.11.0']), + ['2.11.0', '2.12.0']), ]) # >= Liberty version->codename mapping @@ -549,9 +549,9 @@ def configure_installation_source(rel): 'newton': 'xenial-updates/newton', 'newton/updates': 'xenial-updates/newton', 'newton/proposed': 'xenial-proposed/newton', - 'zesty': 'zesty-updates/ocata', - 'zesty/updates': 'xenial-updates/ocata', - 'zesty/proposed': 'xenial-proposed/ocata', + 'ocata': 'xenial-updates/ocata', + 'ocata/updates': 'xenial-updates/ocata', + 'ocata/proposed': 'xenial-proposed/ocata', } try: @@ -1119,6 +1119,35 @@ def git_generate_systemd_init_files(templates_dir): shutil.copyfile(service_source, service_dest) +def git_determine_usr_bin(): + """Return the /usr/bin path for Apache2 config. + + The /usr/bin path will be located in the virtualenv if the charm + is configured to deploy from source. + """ + if git_install_requested(): + projects_yaml = config('openstack-origin-git') + projects_yaml = git_default_repos(projects_yaml) + return os.path.join(git_pip_venv_dir(projects_yaml), 'bin') + else: + return '/usr/bin' + + +def git_determine_python_path(): + """Return the python-path for Apache2 config. + + Returns 'None' unless the charm is configured to deploy from source, + in which case the path of the virtualenv's site-packages is returned. + """ + if git_install_requested(): + projects_yaml = config('openstack-origin-git') + projects_yaml = git_default_repos(projects_yaml) + return os.path.join(git_pip_venv_dir(projects_yaml), + 'lib/python2.7/site-packages') + else: + return None + + def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts @@ -1925,3 +1954,36 @@ def os_application_version_set(package): application_version_set(os_release(package)) else: application_version_set(application_version) + + +def enable_memcache(source=None, release=None, package=None): + """Determine if memcache should be enabled on the local unit + + @param release: release of OpenStack currently deployed + @param package: package to derive OpenStack version deployed + @returns boolean Whether memcache should be enabled + """ + _release = None + if release: + _release = release + else: + _release = os_release(package, base='icehouse') + if not _release: + _release = get_os_codename_install_source(source) + + # TODO: this should be changed to a numeric comparison using a known list + # of releases and comparing by index. + return _release >= 'mitaka' + + +def token_cache_pkgs(source=None, release=None): + """Determine additional packages needed for token caching + + @param source: source string for charm + @param release: release of OpenStack currently deployed + @returns List of package to enable token caching + """ + packages = [] + if enable_memcache(source=source, release=release): + packages.extend(['memcached', 'python-memcache']) + return packages diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index edb536c7..ae7f3f93 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -40,6 +40,7 @@ ) from charmhelpers.core.hookenv import ( config, + service_name, local_unit, relation_get, relation_ids, @@ -1043,8 +1044,18 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] + def add_op_request_access_to_group(self, name, namespace=None, + permission=None, key_name=None): + """ + Adds the requested permissions to the current service's Ceph key, + allowing the key to access only the specified pools + """ + self.ops.append({'op': 'add-permissions-to-key', 'group': name, + 'namespace': namespace, 'name': key_name or service_name(), + 'group-permission': permission}) + def add_op_create_pool(self, name, replica_count=3, pg_num=None, - weight=None): + weight=None, group=None, namespace=None): """Adds an operation to create a pool. @param pg_num setting: optional setting. If not provided, this value @@ -1058,7 +1069,8 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight}) + 'weight': weight, 'group': group, + 'group-namespace': namespace}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 94fc996c..e44e22bf 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -616,6 +616,20 @@ def close_port(port, protocol="TCP"): subprocess.check_call(_args) +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1021,3 +1035,34 @@ def network_get_primary_address(binding): ''' cmd = ['network-get', '--primary-address', binding] return subprocess.check_output(cmd).decode('UTF-8').strip() + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 04cadb3a..edbb72ff 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -54,38 +54,138 @@ cmp_pkgrevno, ) # flake8: noqa -- ignore F401 for this import +UPDATEDB_PATH = '/etc/updatedb.conf' + +def service_start(service_name, **kwargs): + """Start a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('start', service_name, **kwargs) + + +def service_stop(service_name, **kwargs): + """Stop a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). -def service_start(service_name): - """Start a system service""" - return service('start', service_name) + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('stop', service_name, **kwargs) -def service_stop(service_name): - """Stop a system service""" - return service('stop', service_name) +def service_restart(service_name, **kwargs): + """Restart a system service. + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). -def service_restart(service_name): - """Restart a system service""" + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_restart('ceph-osd', id=4) + + :param service_name: the name of the service to restart + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ return service('restart', service_name) -def service_reload(service_name, restart_on_failure=False): +def service_reload(service_name, restart_on_failure=False, **kwargs): """Reload a system service, optionally falling back to restart if - reload fails""" - service_result = service('reload', service_name) + reload fails. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_reload('ceph-osd', id=4) + + :param service_name: the name of the service to reload + :param restart_on_failure: boolean indicating whether to fallback to a + restart if the reload fails. + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + service_result = service('reload', service_name, **kwargs) if not service_result and restart_on_failure: - service_result = service('restart', service_name) + service_result = service('restart', service_name, **kwargs) return service_result -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", + **kwargs): """Pause a system service. - Stop it, and prevent it from starting again at boot.""" + Stop it, and prevent it from starting again at boot. + + :param service_name: the name of the service to pause + :param init_dir: path to the upstart init directory + :param initd_dir: path to the sysv init directory + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems which do not support + key=value arguments via the commandline. + """ stopped = True - if service_running(service_name): - stopped = service_stop(service_name) + if service_running(service_name, **kwargs): + stopped = service_stop(service_name, **kwargs) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): @@ -106,10 +206,19 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): def service_resume(service_name, init_dir="/etc/init", - initd_dir="/etc/init.d"): + initd_dir="/etc/init.d", **kwargs): """Resume a system service. - Reenable starting again at boot. Start the service""" + Reenable starting again at boot. Start the service. + + :param service_name: the name of the service to resume + :param init_dir: the path to the init dir + :param initd dir: the path to the initd dir + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): @@ -126,19 +235,28 @@ def service_resume(service_name, init_dir="/etc/init", "Unable to detect {0} as SystemD, Upstart {1} or" " SysV {2}".format( service_name, upstart_file, sysv_file)) + started = service_running(service_name, **kwargs) - started = service_running(service_name) if not started: - started = service_start(service_name) + started = service_start(service_name, **kwargs) return started -def service(action, service_name): - """Control a system service""" +def service(action, service_name, **kwargs): + """Control a system service. + + :param action: the action to take on the service + :param service_name: the name of the service to perform th action on + :param **kwargs: additional params to be passed to the service command in + the form of key=value. + """ if init_is_systemd(): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) return subprocess.call(cmd) == 0 @@ -146,15 +264,26 @@ def service(action, service_name): _INIT_D_CONF = "/etc/init.d/{}" -def service_running(service_name): - """Determine whether a system service is running""" +def service_running(service_name, **kwargs): + """Determine whether a system service is running. + + :param service_name: the name of the service + :param **kwargs: additional args to pass to the service command. This is + used to pass additional key=value arguments to the + service command line for managing specific instance + units (e.g. service ceph-osd status id=2). The kwargs + are ignored in systemd services. + """ if init_is_systemd(): return service('is-active', service_name) else: if os.path.exists(_UPSTART_CONF.format(service_name)): try: - output = subprocess.check_output( - ['status', service_name], + cmd = ['status', service_name] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False @@ -306,15 +435,17 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) -def rsync(from_path, to_path, flags='-r', options=None): +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] cmd = ['/usr/bin/rsync', flags] + if timeout: + cmd = ['timeout', str(timeout)] + cmd cmd.extend(options) cmd.append(from_path) cmd.append(to_path) log(" ".join(cmd)) - return subprocess.check_output(cmd).decode('UTF-8').strip() + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() def symlink(source, destination): @@ -684,7 +815,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): :param str path: The string path to start changing ownership. :param str owner: The owner string to use when looking up the uid. :param str group: The group string to use when looking up the gid. - :param bool follow_links: Also Chown links if True + :param bool follow_links: Also follow and chown links if True :param bool chowntopdir: Also chown path itself if True """ uid = pwd.getpwnam(owner).pw_uid @@ -698,7 +829,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): broken_symlink = os.path.lexists(path) and not os.path.exists(path) if not broken_symlink: chown(path, uid, gid) - for root, dirs, files in os.walk(path): + for root, dirs, files in os.walk(path, followlinks=follow_links): for name in dirs + files: full = os.path.join(root, name) broken_symlink = os.path.lexists(full) and not os.path.exists(full) @@ -718,6 +849,20 @@ def lchownr(path, owner, group): chownr(path, owner, group, follow_links=False) +def owner(path): + """Returns a tuple containing the username & groupname owning the path. + + :param str path: the string path to retrieve the ownership + :return tuple(str, str): A (username, groupname) tuple containing the + name of the user and group owning the path. + :raises OSError: if the specified path does not exist + """ + stat = os.stat(path) + username = pwd.getpwuid(stat.st_uid)[0] + groupname = grp.getgrgid(stat.st_gid)[0] + return username, groupname + + def get_total_ram(): """The total amount of system RAM in bytes. @@ -749,3 +894,25 @@ def is_container(): else: # Detect using upstart container file marker return os.path.exists(UPSTART_CONTAINER_TYPE) + + +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + with open(updatedb_path, 'r+') as f_id: + updatedb_text = f_id.read() + output = updatedb(updatedb_text, path) + f_id.seek(0) + f_id.write(output) + f_id.truncate() + + +def updatedb(updatedb_text, new_path): + lines = [line for line in updatedb_text.split("\n")] + for i, line in enumerate(lines): + if line.startswith("PRUNEPATHS="): + paths_line = line.split("=")[1].replace('"', '') + paths = paths_line.split(" ") + if new_path not in paths: + paths.append(new_path) + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) + output = "\n".join(lines) + return output diff --git a/ceph-proxy/hooks/charmhelpers/osplatform.py b/ceph-proxy/hooks/charmhelpers/osplatform.py index ea490bbd..d9a4d5c0 100644 --- a/ceph-proxy/hooks/charmhelpers/osplatform.py +++ b/ceph-proxy/hooks/charmhelpers/osplatform.py @@ -8,12 +8,18 @@ def get_platform(): will be returned (which is the name of the module). This string is used to decide which platform module should be imported. """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warings *not* disabled, as we certainly need to fix this. tuple_platform = platform.linux_distribution() current_platform = tuple_platform[0] if "Ubuntu" in current_platform: return "ubuntu" elif "CentOS" in current_platform: return "centos" + elif "debian" in current_platform: + # Stock Python does not detect Ubuntu and instead returns debian. + # Or at least it does in some build environments like Travis CI + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 66cb8ec5..a1e96f12 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -76,13 +76,11 @@ def _configure_services(self): 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' } - radosgw_config = {"use-embedded-webserver": True} proxy_config = { 'source': self.source } configs = {'ceph-mon': ceph_config, 'ceph-osd': ceph_osd_config, - 'ceph-radosgw': radosgw_config, 'ceph-proxy': proxy_config} super(CephBasicDeployment, self)._configure_services(configs) diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index 8e13ab14..f9e4c3af 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -148,7 +148,8 @@ def validate_services_by_name(self, sentry_services): for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name in ['rabbitmq-server', 'apache2']): + service_name in ['rabbitmq-server', 'apache2', + 'memcached']): # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) output, code = sentry_unit.run(cmd) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index 6a0ba837..401c0328 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -20,6 +20,7 @@ import six import time import urllib +import urlparse import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client @@ -37,6 +38,7 @@ from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) +from charmhelpers.core.decorators import retry_on_exception DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -303,6 +305,46 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(5, base_delay=10) + def keystone_wait_for_propagation(self, sentry_relation_pairs, + api_version): + """Iterate over list of sentry and relation tuples and verify that + api_version has the expected value. + + :param sentry_relation_pairs: list of sentry, relation name tuples used + for monitoring propagation of relation + data + :param api_version: api_version to expect in relation data + :returns: None if successful. Raise on error. + """ + for (sentry, relation_name) in sentry_relation_pairs: + rel = sentry.relation('identity-service', + relation_name) + self.log.debug('keystone relation data: {}'.format(rel)) + if rel['api_version'] != str(api_version): + raise Exception("api_version not propagated through relation" + " data yet ('{}' != '{}')." + "".format(rel['api_version'], api_version)) + + def keystone_configure_api_version(self, sentry_relation_pairs, deployment, + api_version): + """Configure preferred-api-version of keystone in deployment and + monitor provided list of relation objects for propagation + before returning to caller. + + :param sentry_relation_pairs: list of sentry, relation tuples used for + monitoring propagation of relation data + :param deployment: deployment to configure + :param api_version: value preferred-api-version will be set to + :returns: None if successful. Raise on error. + """ + self.log.debug("Setting keystone preferred-api-version: '{}'" + "".format(api_version)) + + config = {'preferred-api-version': api_version} + deployment.d.configure('keystone', config) + self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) + def authenticate_cinder_admin(self, keystone_sentry, username, password, tenant): """Authenticates admin user with cinder.""" @@ -311,6 +353,37 @@ def authenticate_cinder_admin(self, keystone_sentry, username, ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) return cinder_client.Client(username, password, tenant, ept) + def authenticate_keystone(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Authenticate with Keystone""" + self.log.debug('Authenticating with keystone...') + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=username, password=password, + tenant_name=project_name, + auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name=user_domain_name, + username=username, + password=password, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + auth_url=ep + ) + return keystone_client_v3.Client( + session=keystone_session.Session(auth=auth) + ) + def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, keystone_ip=None): @@ -319,30 +392,28 @@ def authenticate_keystone_admin(self, keystone_sentry, user, password, if not keystone_ip: keystone_ip = keystone_sentry.info['public-address'] - base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) - else: - ep = base_ep + "/v3" - auth = keystone_id_v3.Password( - user_domain_name='admin_domain', - username=user, - password=password, - domain_name='admin_domain', - auth_url=ep, - ) - sess = keystone_session.Session(auth=auth) - return keystone_client_v3.Client(session=sess) + user_domain_name = None + domain_name = None + if api_version == 3: + user_domain_name = 'admin_domain' + domain_name = user_domain_name + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant, + api_version=api_version, + user_domain_name=user_domain_name, + domain_name=domain_name, + admin_port=True) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) + keystone_ip = urlparse.urlparse(ep).hostname + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant) def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" @@ -1133,3 +1204,70 @@ def get_amqp_message_by_unit(self, sentry_unit, queue="test", else: msg = 'No message retrieved.' amulet.raise_status(amulet.FAIL, msg) + + def validate_memcache(self, sentry_unit, conf, os_release, + earliest_release=5, section='keystone_authtoken', + check_kvs=None): + """Check Memcache is running and is configured to be used + + Example call from Amulet test: + + def test_110_memcache(self): + u.validate_memcache(self.neutron_api_sentry, + '/etc/neutron/neutron.conf', + self._get_openstack_release()) + + :param sentry_unit: sentry unit + :param conf: OpenStack config file to check memcache settings + :param os_release: Current OpenStack release int code + :param earliest_release: Earliest Openstack release to check int code + :param section: OpenStack config file section to check + :param check_kvs: Dict of settings to check in config file + :returns: None + """ + if os_release < earliest_release: + self.log.debug('Skipping memcache checks for deployment. {} <' + 'mitaka'.format(os_release)) + return + _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} + self.log.debug('Checking memcached is running') + ret = self.validate_services_by_name({sentry_unit: ['memcached']}) + if ret: + amulet.raise_status(amulet.FAIL, msg='Memcache running check' + 'failed {}'.format(ret)) + else: + self.log.debug('OK') + self.log.debug('Checking memcache url is configured in {}'.format( + conf)) + if self.validate_config_data(sentry_unit, conf, section, _kvs): + message = "Memcache config error in: {}".format(conf) + amulet.raise_status(amulet.FAIL, msg=message) + else: + self.log.debug('OK') + self.log.debug('Checking memcache configuration in ' + '/etc/memcached.conf') + contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', + fatal=True) + ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') + if ubuntu_release <= 'trusty': + memcache_listen_addr = 'ip6-localhost' + else: + memcache_listen_addr = '::1' + expected = { + '-p': '11211', + '-l': memcache_listen_addr} + found = [] + for key, value in expected.items(): + for line in contents.split('\n'): + if line.startswith(key): + self.log.debug('Checking {} is set to {}'.format( + key, + value)) + assert value == line.split()[-1] + self.log.debug(line.split()[-1]) + found.append(key) + if sorted(found) == sorted(expected.keys()): + self.log.debug('OK') + else: + message = "Memcache config error in: /etc/memcached.conf" + amulet.raise_status(amulet.FAIL, msg=message) diff --git a/ceph-proxy/tests/charmhelpers/core/__init__.py b/ceph-proxy/tests/charmhelpers/core/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/tests/charmhelpers/core/decorators.py b/ceph-proxy/tests/charmhelpers/core/decorators.py new file mode 100644 index 00000000..6ad41ee4 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/decorators.py @@ -0,0 +1,55 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 diff --git a/ceph-proxy/tests/charmhelpers/core/files.py b/ceph-proxy/tests/charmhelpers/core/files.py new file mode 100644 index 00000000..fdd82b75 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/files.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/ceph-proxy/tests/charmhelpers/core/fstab.py b/ceph-proxy/tests/charmhelpers/core/fstab.py new file mode 100644 index 00000000..d9fa9152 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/fstab.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import os + +__author__ = 'Jorge Niedbalski R. ' + + +class Fstab(io.FileIO): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = int(d) + self.p = int(p) + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + super(Fstab, self).__init__(self._path, 'rb+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + line = line.decode('us-ascii') + try: + if line.strip() and not line.strip().startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write((str(entry) + '\n').encode('us-ascii')) + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = [l.decode('us-ascii') for l in self.readlines()] + + found = False + for index, line in enumerate(lines): + if line.strip() and not line.strip().startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines).encode('us-ascii')) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/ceph-proxy/tests/charmhelpers/core/hookenv.py b/ceph-proxy/tests/charmhelpers/core/hookenv.py new file mode 100644 index 00000000..e44e22bf --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/hookenv.py @@ -0,0 +1,1068 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +from __future__ import print_function +import copy +from distutils.version import LooseVersion +from functools import wraps +import glob +import os +import json +import yaml +import subprocess +import sys +import errno +import tempfile +from subprocess import CalledProcessError + +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + """Cache return values for multiple executions of func + args + + For example:: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + """ + @wraps(func) + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res + wrapper._wrapped = func + return wrapper + + +def flush(key): + """Flushes any entries from function cache where the + key is found in the function+args """ + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + """Write a message to the juju log""" + command = ['juju-log'] + if level: + command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message] + # Missing juju-log should not cause failures in unit tests + # Send log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + if level: + message = "{}: {}".format(level, message) + message = "juju-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + +class Serializable(UserDict): + """Wrapper, an object that can be serialized to yaml or json""" + + def __init__(self, obj): + # wrap the object + UserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + """Serialize the object to json""" + return json.dumps(self.data) + + def yaml(self): + """Serialize the object to yaml""" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + """Determine whether we're running in a relation hook""" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + """The scope for the current relation hook""" + return os.environ.get('JUJU_RELATION', None) + + +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') + + +def local_unit(): + """Local unit ID""" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + """The remote unit for the current relation hook""" + return os.environ.get('JUJU_REMOTE_UNIT', None) + + +def service_name(): + """The name service group this unit belongs to""" + return local_unit().split('/')[0] + + +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + +def hook_name(): + """The name of the currently executing hook""" + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) + + +class Config(dict): + """A dictionary representation of the charm's config.yaml, with some + extra features: + + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. + + Example usage:: + + >>> # inside a hook + >>> from charmhelpers.core import hookenv + >>> config = hookenv.config() + >>> config['foo'] + 'bar' + >>> # store a new key/value for later use + >>> config['mykey'] = 'myval' + + + >>> # user runs `juju set mycharm foo=baz` + >>> # now we're inside subsequent config-changed hook + >>> config = hookenv.config() + >>> config['foo'] + 'baz' + >>> # test to see if this val has changed since last hook + >>> config.changed('foo') + True + >>> # what was the previous value? + >>> config.previous('foo') + 'bar' + >>> # keys/values that we add are preserved across hooks + >>> config['mykey'] + 'myval' + + """ + CONFIG_FILE_NAME = '.juju-persistent-config' + + def __init__(self, *args, **kw): + super(Config, self).__init__(*args, **kw) + self.implicit_save = True + self._prev_dict = None + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) + if os.path.exists(self.path): + self.load_previous() + atexit(self._implicit_save) + + def load_previous(self, path=None): + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. + + :param path: + + File path from which to load the previous config. If `None`, + config is loaded from the default location. If `path` is + specified, subsequent `save()` calls will write to the same + path. + + """ + self.path = path or self.path + with open(self.path) as f: + self._prev_dict = json.load(f) + for k, v in copy.deepcopy(self._prev_dict).items(): + if k not in self: + self[k] = v + + def changed(self, key): + """Return True if the current value for this key is different from + the previous value. + + """ + if self._prev_dict is None: + return True + return self.previous(key) != self.get(key) + + def previous(self, key): + """Return previous value for this key, or None if there + is no previous value. + + """ + if self._prev_dict: + return self._prev_dict.get(key) + return None + + def save(self): + """Save this config to disk. + + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. + + """ + with open(self.path, 'w') as f: + json.dump(self, f) + + def _implicit_save(self): + if self.implicit_save: + self.save() + + +@cached +def config(scope=None): + """Juju charm configuration""" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + else: + config_cmd_line.append('--all') + config_cmd_line.append('--format=json') + try: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + if scope is not None: + return config_data + return Config(config_data) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + """Get relation information""" + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except CalledProcessError as e: + if e.returncode == 2: + return None + raise + + +def relation_set(relation_id=None, relation_settings=None, **kwargs): + """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} + relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + settings = relation_settings.copy() + settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) + if accepts_file: + # --file was introduced in Juju 1.23.2. Use it by default if + # available, since otherwise we'll break if the relation data is + # too big. Ideally we should tell relation-set to read the data from + # stdin, but that feature is broken in 1.23.2: Bug #1454678. + with tempfile.NamedTemporaryFile(delete=False) as settings_file: + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) + subprocess.check_call( + relation_cmd_line + ["--file", settings_file.name]) + os.remove(settings_file.name) + else: + for key, value in settings.items(): + if value is None: + relation_cmd_line.append('{}='.format(key)) + else: + relation_cmd_line.append('{}={}'.format(key, value)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + +@cached +def relation_ids(reltype=None): + """A list of relation_ids""" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] + return [] + + +@cached +def related_units(relid=None): + """A list of related units""" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] + + +@cached +def relation_for_unit(unit=None, rid=None): + """Get the json represenation of a unit's relation""" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + """Get relations of a specific relation ID""" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + """Get relations of a specific type""" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + +@cached +def relation_types(): + """Get a list of relation types supported by this charm""" + rel_types = [] + md = metadata() + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + return rel_types + + +@cached +def peer_relation_id(): + '''Get the peers relation id if a peers relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peers'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peers``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peers'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + +@cached +def relations(): + """Get a nested dictionary of relation data for all related units""" + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +@cached +def is_relation_made(relation, keys='private-address'): + ''' + Determine whether a relation is established by checking for + presence of key(s). If a list of keys is provided, they + must all be present for the relation to be identified as made + ''' + if isinstance(keys, str): + keys = [keys] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + context = {} + for k in keys: + context[k] = relation_get(k, rid=r_id, + unit=unit) + if None not in context.values(): + return True + return False + + +def open_port(port, protocol="TCP"): + """Open a service network port""" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + """Close a service network port""" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + """Get the unit ID for the remote unit""" + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + +def unit_private_ip(): + """Get this unit's private IP address""" + return unit_get('private-address') + + +@cached +def storage_get(attribute=None, storage_id=None): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=None): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + +class UnregisteredHookError(Exception): + """Raised when an undefined hook is called""" + pass + + +class Hooks(object): + """A convenient handler for hook functions. + + Example:: + + hooks = Hooks() + + # register a hook, taking its name from the function name + @hooks.hook() + def install(): + pass # your code here + + # register a hook, providing a custom hook name + @hooks.hook("config-changed") + def config_changed(): + pass # your code here + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + + def __init__(self, config_save=None): + super(Hooks, self).__init__() + self._hooks = {} + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save + + def register(self, name, function): + """Register a hook""" + self._hooks[name] = function + + def execute(self, args): + """Execute a registered hook based on args[0]""" + _run_atstart() + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + """Decorator, registering them as hooks""" + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +def charm_dir(): + """Return the root directory of the current charm""" + return os.environ.get('CHARM_DIR') + + +@cached +def action_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['action-get'] + if key is not None: + cmd.append(key) + cmd.append('--format=json') + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return action_data + + +def action_set(values): + """Sets the values to be returned after the action finishes""" + cmd = ['action-set'] + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def action_fail(message): + """Sets the action status to failed and sets the error message. + + The results set by action_set are preserved.""" + subprocess.check_call(['action-fail', message]) + + +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + +def status_set(workload_state, message): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message unstead. + + workload_state -- valid juju workload state. + message -- status update message + """ + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] + if workload_state not in valid_states: + raise ValueError( + '{!r} is not a valid workload state'.format(workload_state) + ) + cmd = ['status-set', workload_state, message] + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" + + """ + cmd = ['status-get', "--format=json", "--include-data"] + try: + raw_status = subprocess.check_output(cmd) + except OSError as e: + if e.errno == errno.ENOENT: + return ('unknown', "") + else: + raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + @wraps(f) + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +def application_version_set(version): + """Charm authors may trigger this command from any hook to output what + version of the application is running. This could be a package version, + for instance postgres version 9.5. It could also be a build number or + version control revision identifier, for instance git sha 6fb7ba68. """ + + cmd = ['application-version-set'] + cmd.append(version) + try: + subprocess.check_call(cmd) + except OSError: + log("Application Version: {}".format(version)) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.items(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_register(ptype, klass, pid): + """ is used while a hook is running to let Juju know that a + payload has been started.""" + cmd = ['payload-register'] + for x in [ptype, klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_unregister(klass, pid): + """ is used while a hook is running to let Juju know + that a payload has been manually stopped. The and provided + must match a payload that has been previously registered with juju using + payload-register.""" + cmd = ['payload-unregister'] + for x in [klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_status_set(klass, pid, status): + """is used to update the current status of a registered payload. + The and provided must match a payload that has been previously + registered with juju using payload-register. The must be one of the + follow: starting, started, stopping, stopped""" + cmd = ['payload-status-set'] + for x in [klass, pid, status]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def resource_get(name): + """used to fetch the resource path of the given name. + + must match a name of defined resource in metadata.yaml + + returns either a path or False if resource not available + """ + if not name: + return False + + cmd = ['resource-get', name] + try: + return subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError: + return False + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +@cached +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get_primary_address(binding): + ''' + Retrieve the primary network address for a named binding + + :param binding: string. The name of a relation of extra-binding + :return: string. The primary IP address for the named binding + :raise: NotImplementedError if run on Juju < 2.0 + ''' + cmd = ['network-get', '--primary-address', binding] + return subprocess.check_output(cmd).decode('UTF-8').strip() + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') diff --git a/ceph-proxy/tests/charmhelpers/core/host.py b/ceph-proxy/tests/charmhelpers/core/host.py new file mode 100644 index 00000000..edbb72ff --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/host.py @@ -0,0 +1,918 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import os +import re +import pwd +import glob +import grp +import random +import string +import subprocess +import hashlib +import functools +import itertools +import six + +from contextlib import contextmanager +from collections import OrderedDict +from .hookenv import log +from .fstab import Fstab +from charmhelpers.osplatform import get_platform + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.host_factory.ubuntu import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.host_factory.centos import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import + +UPDATEDB_PATH = '/etc/updatedb.conf' + +def service_start(service_name, **kwargs): + """Start a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('start', service_name, **kwargs) + + +def service_stop(service_name, **kwargs): + """Stop a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('stop', service_name, **kwargs) + + +def service_restart(service_name, **kwargs): + """Restart a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_restart('ceph-osd', id=4) + + :param service_name: the name of the service to restart + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + return service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False, **kwargs): + """Reload a system service, optionally falling back to restart if + reload fails. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_reload('ceph-osd', id=4) + + :param service_name: the name of the service to reload + :param restart_on_failure: boolean indicating whether to fallback to a + restart if the reload fails. + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + service_result = service('reload', service_name, **kwargs) + if not service_result and restart_on_failure: + service_result = service('restart', service_name, **kwargs) + return service_result + + +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", + **kwargs): + """Pause a system service. + + Stop it, and prevent it from starting again at boot. + + :param service_name: the name of the service to pause + :param init_dir: path to the upstart init directory + :param initd_dir: path to the sysv init directory + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems which do not support + key=value arguments via the commandline. + """ + stopped = True + if service_running(service_name, **kwargs): + stopped = service_stop(service_name, **kwargs) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('disable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + return stopped + + +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d", **kwargs): + """Resume a system service. + + Reenable starting again at boot. Start the service. + + :param service_name: the name of the service to resume + :param init_dir: the path to the init dir + :param initd dir: the path to the initd dir + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('enable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + started = service_running(service_name, **kwargs) + + if not started: + started = service_start(service_name, **kwargs) + return started + + +def service(action, service_name, **kwargs): + """Control a system service. + + :param action: the action to take on the service + :param service_name: the name of the service to perform th action on + :param **kwargs: additional params to be passed to the service command in + the form of key=value. + """ + if init_is_systemd(): + cmd = ['systemctl', action, service_name] + else: + cmd = ['service', service_name, action] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + return subprocess.call(cmd) == 0 + + +_UPSTART_CONF = "/etc/init/{}.conf" +_INIT_D_CONF = "/etc/init.d/{}" + + +def service_running(service_name, **kwargs): + """Determine whether a system service is running. + + :param service_name: the name of the service + :param **kwargs: additional args to pass to the service command. This is + used to pass additional key=value arguments to the + service command line for managing specific instance + units (e.g. service ceph-osd status id=2). The kwargs + are ignored in systemd services. + """ + if init_is_systemd(): + return service('is-active', service_name) + else: + if os.path.exists(_UPSTART_CONF.format(service_name)): + try: + cmd = ['status', service_name] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: + return False + else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running + # 'start/running' + if ("start/running" in output or + "is running" in output or + "up and running" in output): + return True + elif os.path.exists(_INIT_D_CONF.format(service_name)): + # Check System V scripts init script return codes + return service('status', service_name) + return False + + +SYSTEMD_SYSTEM = '/run/systemd/system' + + +def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" + return os.path.isdir(SYSTEMD_SYSTEM) + + +def adduser(username, password=None, shell='/bin/bash', + system_user=False, primary_group=None, + secondary_groups=None, uid=None, home_dir=None): + """Add a user to the system. + + Will log but otherwise succeed if the user already exists. + + :param str username: Username to create + :param str password: Password for user; if ``None``, create a system user + :param str shell: The default shell for the user + :param bool system_user: Whether to create a login or system user + :param str primary_group: Primary group for user; defaults to username + :param list secondary_groups: Optional list of additional groups + :param int uid: UID for user being created + :param str home_dir: Home directory for user + + :returns: The password database entry struct, as returned by `pwd.getpwnam` + """ + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + if uid: + user_info = pwd.getpwuid(int(uid)) + log('user with uid {0} already exists!'.format(uid)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if uid: + cmd.extend(['--uid', str(uid)]) + if home_dir: + cmd.extend(['--home', str(home_dir)]) + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + if not primary_group: + try: + grp.getgrnam(username) + primary_group = username # avoid "group exists" error + except KeyError: + pass + if primary_group: + cmd.extend(['-g', primary_group]) + if secondary_groups: + cmd.extend(['-G', ','.join(secondary_groups)]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + +def uid_exists(uid): + """Check if a uid exists""" + try: + pwd.getpwuid(uid) + uid_exists = True + except KeyError: + uid_exists = False + return uid_exists + + +def group_exists(groupname): + """Check if a group exists""" + try: + grp.getgrnam(groupname) + group_exists = True + except KeyError: + group_exists = False + return group_exists + + +def gid_exists(gid): + """Check if a gid exists""" + try: + grp.getgrgid(gid) + gid_exists = True + except KeyError: + gid_exists = False + return gid_exists + + +def add_group(group_name, system_group=False, gid=None): + """Add a group to the system + + Will log but otherwise succeed if the group already exists. + + :param str group_name: group to create + :param bool system_group: Create system group + :param int gid: GID for user being created + + :returns: The password database entry struct, as returned by `grp.getgrnam` + """ + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + if gid: + group_info = grp.getgrgid(gid) + log('group with gid {0} already exists!'.format(gid)) + except KeyError: + log('creating group {0}'.format(group_name)) + add_new_group(group_name, system_group, gid) + group_info = grp.getgrnam(group_name) + return group_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = ['gpasswd', '-a', username, group] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): + """Replicate the contents of a path""" + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + if timeout: + cmd = ['timeout', str(timeout)] + cmd + cmd.extend(options) + cmd.append(from_path) + cmd.append(to_path) + log(" ".join(cmd)) + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() + + +def symlink(source, destination): + """Create a symbolic link""" + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source, + destination, + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0o555, force=False): + """Create a directory""" + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + realpath = os.path.abspath(path) + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + os.makedirs(realpath, perms) + elif not path_exists: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + os.chmod(realpath, perms) + + +def write_file(path, content, owner='root', group='root', perms=0o444): + """Create or overwrite a file with the contents of a byte string.""" + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + + +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab""" + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file""" + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): + """Mount a filesystem at a particular mountpoint""" + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + + if persist: + return fstab_add(device, mountpoint, filesystem, options=options) + return True + + +def umount(mountpoint, persist=False): + """Unmount a filesystem""" + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + + if persist: + return fstab_remove(mountpoint) + return True + + +def mounts(): + """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + +def file_hash(path, hash_type='md5'): + """Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ + if os.path.exists(path): + h = getattr(hashlib, hash_type)() + with open(path, 'rb') as source: + h.update(source.read()) + return h.hexdigest() + else: + return None + + +def path_hash(path): + """Generate a hash checksum of all files matching 'path'. Standard + wildcards like '*' and '?' are supported, see documentation for the 'glob' + module for more information. + + :return: dict: A { filename: hash } dictionary for all matched files. + Empty if none found. + """ + return { + filename: file_hash(filename) + for filename in glob.iglob(path) + } + + +def check_hash(path, checksum, hash_type='md5'): + """Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + """A class derived from Value error to indicate the checksum failed.""" + pass + + +def restart_on_change(restart_map, stopstart=False, restart_functions=None): + """Restart services based on configuration files changing + + This function is used a decorator, for example:: + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + '/etc/apache/sites-enabled/*': [ 'apache2' ] + }) + def config_changed(): + pass # your code here + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. The apache2 service would be + restarted if any file matching the pattern got changed, created + or removed. Standard wildcards are supported, see documentation + for the 'glob' module for more information. + + @param restart_map: {path_file_name: [service_name, ...] + @param stopstart: DEFAULT false; whether to stop, start OR restart + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result from decorated function + """ + def wrap(f): + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) + return wrapped_f + return wrap + + +def restart_on_change_helper(lambda_f, restart_map, stopstart=False, + restart_functions=None): + """Helper function to perform the restart_on_change function. + + This is provided for decorators to restart services if files described + in the restart_map have changed after an invocation of lambda_f(). + + @param lambda_f: function to call. + @param restart_map: {file: [service, ...]} + @param stopstart: whether to stop, start or restart a service + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result of lambda_f() + """ + if restart_functions is None: + restart_functions = {} + checksums = {path: path_hash(path) for path in restart_map} + r = lambda_f() + # create a list of lists of the services to restart + restarts = [restart_map[path] + for path in restart_map + if path_hash(path) != checksums[path]] + # create a flat list of ordered services without duplicates from lists + services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) + if services_list: + actions = ('stop', 'start') if stopstart else ('restart',) + for service_name in services_list: + if service_name in restart_functions: + restart_functions[service_name](service_name) + else: + for action in actions: + service(action, service_name) + return r + + +def pwgen(length=None): + """Generate a random pasword.""" + if length is None: + # A random length is ok to use a weak PRNG + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.ascii_letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the + # actual password + random_generator = random.SystemRandom() + random_chars = [ + random_generator.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) + + +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): + """Return a list of nics of given type(s)""" + if isinstance(nic_type, six.string_types): + int_types = [nic_type] + else: + int_types = nic_type + + interfaces = [] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile('^[0-9]+:\s+(.+):') + for line in ip_output: + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) + + return interfaces + + +def set_nic_mtu(nic, mtu): + """Set the Maximum Transmission Unit (MTU) on a network interface.""" + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + """Return the Maximum Transmission Unit (MTU) for a network interface.""" + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + """Return the Media Access Control (MAC) for a network interface.""" + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr + + +@contextmanager +def chdir(directory): + """Change the current working directory to a different directory for a code + block and return the previous directory after the block exits. Useful to + run commands from a specificed directory. + + :param str directory: The directory path to change to for this context. + """ + cur = os.getcwd() + try: + yield os.chdir(directory) + finally: + os.chdir(cur) + + +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + :param bool follow_links: Also follow and chown links if True + :param bool chowntopdir: Also chown path itself if True + """ + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + if follow_links: + chown = os.chown + else: + chown = os.lchown + + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) + for root, dirs, files in os.walk(path, followlinks=follow_links): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + chown(full, uid, gid) + + +def lchownr(path, owner, group): + """Recursively change user and group ownership of files and directories + in a given path, not following symbolic links. See the documentation for + 'os.lchown' for more information. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + """ + chownr(path, owner, group, follow_links=False) + + +def owner(path): + """Returns a tuple containing the username & groupname owning the path. + + :param str path: the string path to retrieve the ownership + :return tuple(str, str): A (username, groupname) tuple containing the + name of the user and group owning the path. + :raises OSError: if the specified path does not exist + """ + stat = os.stat(path) + username = pwd.getpwuid(stat.st_uid)[0] + groupname = grp.getgrgid(stat.st_gid)[0] + return username, groupname + + +def get_total_ram(): + """The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + """ + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() + + +UPSTART_CONTAINER_TYPE = '/run/container_type' + + +def is_container(): + """Determine whether unit is running in a container + + @return: boolean indicating if unit is in a container + """ + if init_is_systemd(): + # Detect using systemd-detect-virt + return subprocess.call(['systemd-detect-virt', + '--container']) == 0 + else: + # Detect using upstart container file marker + return os.path.exists(UPSTART_CONTAINER_TYPE) + + +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + with open(updatedb_path, 'r+') as f_id: + updatedb_text = f_id.read() + output = updatedb(updatedb_text, path) + f_id.seek(0) + f_id.write(output) + f_id.truncate() + + +def updatedb(updatedb_text, new_path): + lines = [line for line in updatedb_text.split("\n")] + for i, line in enumerate(lines): + if line.startswith("PRUNEPATHS="): + paths_line = line.split("=")[1].replace('"', '') + paths = paths_line.split(" ") + if new_path not in paths: + paths.append(new_path) + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) + output = "\n".join(lines) + return output diff --git a/ceph-proxy/tests/charmhelpers/core/host_factory/__init__.py b/ceph-proxy/tests/charmhelpers/core/host_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/tests/charmhelpers/core/host_factory/centos.py b/ceph-proxy/tests/charmhelpers/core/host_factory/centos.py new file mode 100644 index 00000000..902d469f --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/host_factory/centos.py @@ -0,0 +1,56 @@ +import subprocess +import yum +import os + + +def service_available(service_name): + # """Determine whether a system service is available.""" + if os.path.isdir('/run/systemd/system'): + cmd = ['systemctl', 'is-enabled', service_name] + else: + cmd = ['service', service_name, 'is-enabled'] + return subprocess.call(cmd) == 0 + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['groupadd'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('-r') + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/os-release in a dict.""" + d = {} + with open('/etc/os-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports YumBase function if the pkgcache argument + is None. + """ + if not pkgcache: + y = yum.YumBase() + packages = y.doPackageLists() + pkgcache = {i.Name: i.version for i in packages['installed']} + pkg = pkgcache[package] + if pkg > revno: + return 1 + if pkg < revno: + return -1 + return 0 diff --git a/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py new file mode 100644 index 00000000..8c66af55 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py @@ -0,0 +1,56 @@ +import subprocess + + +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return b'unrecognized service' not in e.output + else: + return True + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. + """ + import apt_pkg + if not pkgcache: + from charmhelpers.fetch import apt_cache + pkgcache = apt_cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-proxy/tests/charmhelpers/core/hugepage.py b/ceph-proxy/tests/charmhelpers/core/hugepage.py new file mode 100644 index 00000000..54b5b5e2 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/hugepage.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True, set_shmmax=False): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) diff --git a/ceph-proxy/tests/charmhelpers/core/kernel.py b/ceph-proxy/tests/charmhelpers/core/kernel.py new file mode 100644 index 00000000..2d404528 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/kernel.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import subprocess + +from charmhelpers.osplatform import get_platform +from charmhelpers.core.hookenv import ( + log, + INFO +) + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.kernel_factory.ubuntu import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.kernel_factory.centos import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import + +__author__ = "Jorge Niedbalski " + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + subprocess.check_call(cmd) + if persist: + persistent_modprobe(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return subprocess.check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return subprocess.check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 diff --git a/ceph-proxy/tests/charmhelpers/core/kernel_factory/__init__.py b/ceph-proxy/tests/charmhelpers/core/kernel_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/tests/charmhelpers/core/kernel_factory/centos.py b/ceph-proxy/tests/charmhelpers/core/kernel_factory/centos.py new file mode 100644 index 00000000..1c402c11 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/kernel_factory/centos.py @@ -0,0 +1,17 @@ +import subprocess +import os + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + if not os.path.exists('/etc/rc.modules'): + open('/etc/rc.modules', 'a') + os.chmod('/etc/rc.modules', 111) + with open('/etc/rc.modules', 'r+') as modules: + if module not in modules.read(): + modules.write('modprobe %s\n' % module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["dracut", "-f", version]) diff --git a/ceph-proxy/tests/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-proxy/tests/charmhelpers/core/kernel_factory/ubuntu.py new file mode 100644 index 00000000..3de372fd --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/kernel_factory/ubuntu.py @@ -0,0 +1,13 @@ +import subprocess + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module + "\n") + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-proxy/tests/charmhelpers/core/services/__init__.py b/ceph-proxy/tests/charmhelpers/core/services/__init__.py new file mode 100644 index 00000000..61fd074e --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/services/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/ceph-proxy/tests/charmhelpers/core/services/base.py b/ceph-proxy/tests/charmhelpers/core/services/base.py new file mode 100644 index 00000000..ca9dc996 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/services/base.py @@ -0,0 +1,351 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +from inspect import getargspec +from collections import Iterable, OrderedDict + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = OrderedDict() + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hookenv._run_atstart() + try: + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.reconfigure_services() + self.provide_data() + except SystemExit as x: + if x.code is None or x.code == 0: + hookenv._run_atexit() + hookenv._run_atexit() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + + The `provide_data()` method can optionally accept two parameters: + + * ``remote_service`` The name of the remote service that the data will + be provided to. The `provide_data()` method will be called once + for each connected service (not unit). This allows the method to + tailor its data to the given service. + * ``service_ready`` Whether or not the service definition had all of + its requirements met, and thus the ``data_ready`` callbacks run. + + Note that the ``provided_data`` methods are now called **after** the + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks + a chance to generate any data necessary for the providing to the remote + services. + """ + for service_name, service in self.services.items(): + service_ready = self.is_ready(service_name) + for provider in service.get('provided_data', []): + for relid in hookenv.relation_ids(provider.name): + units = hookenv.related_units(relid) + if not units: + continue + remote_service = units[0].split('/')[0] + argspec = getargspec(provider.provide_data) + if len(argspec.args) > 1: + data = provider.provide_data(remote_service, service_ready) + else: + data = provider.provide_data() + if data: + hookenv.relation_set(relid, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-proxy/tests/charmhelpers/core/services/helpers.py b/ceph-proxy/tests/charmhelpers/core/services/helpers.py new file mode 100644 index 00000000..3e6e30d2 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/services/helpers.py @@ -0,0 +1,290 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import yaml + +from charmhelpers.core import hookenv +from charmhelpers.core import host +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = None + interface = None + + def __init__(self, name=None, additional_required_keys=None): + if not hasattr(self, 'required_keys'): + self.required_keys = [] + + if name is not None: + self.name = name + if additional_required_keys: + self.required_keys.extend(additional_required_keys) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'user', 'password', 'database'] + RelationContext.__init__(self, *args, **kwargs) + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'port'] + RelationContext.__init__(self, *args, **kwargs) + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0o600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` + + :param str target: The target to write the rendered template to (or None) + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file + :param partial on_change_action: functools partial to be executed when + rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader + + :return str: The rendered template + """ + def __init__(self, source, target, + owner='root', group='root', perms=0o444, + on_change_action=None, template_loader=None): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + self.on_change_action = on_change_action + self.template_loader = template_loader + + def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) + service = manager.get_service(service_name) + context = {'ctx': {}} + for ctx in service.get('required_data', []): + context.update(ctx) + context['ctx'].update(ctx) + + result = templating.render(self.source, self.target, context, + self.owner, self.group, self.perms, + template_loader=self.template_loader) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() + + return result + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/ceph-proxy/tests/charmhelpers/core/strutils.py b/ceph-proxy/tests/charmhelpers/core/strutils.py new file mode 100644 index 00000000..dd9b9717 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/strutils.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import re + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't', 'on']: + return True + elif value in ['n', 'no', 'false', 'f', 'off']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if not matches: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/ceph-proxy/tests/charmhelpers/core/sysctl.py b/ceph-proxy/tests/charmhelpers/core/sysctl.py new file mode 100644 index 00000000..6e413e31 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/sysctl.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, +) + +__author__ = 'Jorge Niedbalski R. ' + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict_parsed.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-proxy/tests/charmhelpers/core/templating.py b/ceph-proxy/tests/charmhelpers/core/templating.py new file mode 100644 index 00000000..7b801a34 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/templating.py @@ -0,0 +1,84 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. It can also be `None`, in which + case no file will be written. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + The rendered template will be written to the file as well as being returned + as a string. + + Note: Using this requires python-jinja2 or python3-jinja2; if it is not + installed, calling this will attempt to use charmhelpers.fetch.apt_install + to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + if sys.version_info.major == 2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + if target is not None: + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) + return content diff --git a/ceph-proxy/tests/charmhelpers/core/unitdata.py b/ceph-proxy/tests/charmhelpers/core/unitdata.py new file mode 100644 index 00000000..54ec969f --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/core/unitdata.py @@ -0,0 +1,518 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import itertools +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are not persisted unless :meth:`flush` is called. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def get(self, key, default=None, record=False): + self.cursor.execute('select data from kv where key=?', [key]) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) + result = self.cursor.fetchall() + + if not result: + return {} + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + """ + Remove a key from the database entirely. + """ + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ + serialized = json.dumps(value) + + self.cursor.execute('select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', dict(data['env'])) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV From 02e0d081c12c37457dbacc61547cbe1ab46b266d Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 7 Feb 2017 10:20:19 +0100 Subject: [PATCH 1280/2699] Add Keystone v3 support Change-Id: I5bb974064f0980a3f599eae3e2ba86b405f917ac Closes-Bug: 1585708 --- ceph-radosgw/hooks/ceph_radosgw_context.py | 2 + ceph-radosgw/templates/ceph.conf | 8 + ceph-radosgw/tests/basic_deployment.py | 156 +++++++++++++++++- .../unit_tests/test_ceph_radosgw_context.py | 10 +- 4 files changed, 170 insertions(+), 6 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 3b225afa..2fa1c05d 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -79,6 +79,8 @@ def __call__(self): ctxt['admin_token'] = \ relation_get('admin_token', unit, relid) + if cmp_pkgrevno('radosgw', "10.2.0") >= 0: + ctxt['auth_keystone_v3_supported'] = True ctxt['auth_type'] = 'keystone' ctxt['user_roles'] = config('operator-roles') ctxt['cache_size'] = config('cache-size') diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index d2674e6c..392ec031 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -31,7 +31,15 @@ log file = /var/log/ceph/radosgw.log rgw frontends = civetweb port={{ port }} {% if auth_type == 'keystone' %} rgw keystone url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/ +{% if auth_keystone_v3_supported and api_version == '3' -%} +rgw keystone api version = 3 +rgw keystone admin user = {{ admin_user }} +rgw keystone admin password = {{ admin_password }} +rgw keystone admin domain = {{ admin_domain_name }} +rgw keystone admin project = {{ admin_tenant_name }} +{% else -%} rgw keystone admin token = {{ admin_token }} +{% endif -%} rgw keystone accepted roles = {{ user_roles }} rgw keystone token cache size = {{ cache_size }} rgw keystone revocation interval = {{ revocation_check_interval }} diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 155d5aba..f59c4ea7 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -15,7 +15,9 @@ # limitations under the License. import amulet +import keystoneclient import subprocess +import swiftclient import json import time from charmhelpers.contrib.openstack.amulet.deployment import ( @@ -184,10 +186,21 @@ def _initialize_tests(self): description='demo tenant', enabled=True) self.keystone.roles.create(name=self.demo_role) - self.keystone.users.create(name=self.demo_user, - password='password', - tenant_id=tenant.id, - email='demo@demo.com') + user = self.keystone.users.create(name=self.demo_user, + password='password', + tenant_id=tenant.id, + email='demo@demo.com') + + # Grant Member role to demo_user + roles = [r._info for r in self.keystone.roles.list()] + for r in roles: + if r['name'].lower() == 'member': + self.keystone_member_role_id = r['id'] + + self.keystone.roles.add_user_role( + user=user.id, + role=self.keystone_member_role_id, + tenant=tenant.id) # Authenticate demo user with keystone self.keystone_demo = u.authenticate_keystone_user(self.keystone, @@ -211,6 +224,77 @@ def _initialize_tests(self): password=ks_obj_rel['service_password'], tenant=ks_obj_rel['service_tenant']) + self.keystone_v3 = None + + def _initialize_keystone_v3(self): + u.log.debug('Initializing Keystone v3 tests...') + if self.keystone_v3 is not None: + u.log.debug('...allready initialized.') + return + + se_rels = [(self.keystone_sentry, 'ceph-radosgw:identity-service')] + u.keystone_configure_api_version(se_rels, self, 3) + + # Prepare Keystone Client with a domain scoped token for admin user + self.keystone_v3 = u.authenticate_keystone( + self.keystone_sentry.info['public-address'], + 'admin', 'openstack', api_version=3, + user_domain_name='admin_domain', domain_name='admin_domain' + ) + + # Create a demo domain, project and user + self.demo_domain = 'demoDomain' + self.demo_project = 'demoProject' + + try: + domain = self.keystone_v3.domains.create( + self.demo_domain, + description='Demo Domain', + enabled=True, + ) + except keystoneclient.exceptions.Conflict: + u.log.debug('Domain {} already exists, proceeding.' + ''.format(self.demo_domain)) + + try: + project = self.keystone_v3.projects.create( + self.demo_project, + domain, + description='Demo Project', + enabled=True, + ) + except keystoneclient.exceptions.Conflict: + u.log.debug('Project {} already exists in domain {}, proceeding.' + ''.format(self.demo_project, domain.name)) + + try: + user = self.keystone_v3.users.create( + self.demo_user, + domain=domain.id, + project=self.demo_project, + password='password', + email='demov3@demo.com', + description='Demo v3', + enabled=True, + ) + except keystoneclient.exceptions.Conflict: + u.log.debug('User {} already exists in domain {}, proceeding.' + ''.format(self.demo_user, domain.name)) + self.keystone_v3.roles.grant(self.keystone_member_role_id, + user=user.id, + project=project.id) + + # Prepare Keystone Client with a project scoped token for demo user + self.keystone_demo_v3 = u.authenticate_keystone( + self.keystone_sentry.info['public-address'], + self.demo_user, 'password', api_version=3, + user_domain_name=self.demo_domain, + project_domain_name=self.demo_domain, + project_name=self.demo_project, + ) + + u.log.debug('OK') + def test_100_ceph_processes(self): """Verify that the expected service processes are running on each ceph unit.""" @@ -519,6 +603,69 @@ def test_402_swift_api_connection(self): assert('content-type' in headers.keys()) assert(containers == []) + def test_403_swift_keystone_auth(self, api_version=2): + """Check Swift Object Storage functionlaity""" + u.log.debug('Check Swift Object Storage functionality (api_version={})' + ''.format(api_version)) + keystone_ip = self.keystone_sentry.info['public-address'] + base_ep = "http://{}:5000".format(keystone_ip.strip().decode('utf-8')) + if api_version == 3: + self._initialize_keystone_v3() + ep = base_ep + '/v3' + os_options = { + 'user_domain_name': self.demo_domain, + 'project_domain_name': self.demo_domain, + 'project_name': self.demo_project, + } + conn = swiftclient.client.Connection( + authurl=ep, + user=self.demo_user, + key='password', + os_options=os_options, + auth_version=api_version, + ) + else: + ep = base_ep + '/v2.0' + conn = swiftclient.client.Connection( + authurl=ep, + user=self.demo_user, + key='password', + tenant_name=self.demo_tenant, + auth_version=api_version, + ) + u.log.debug('Create container') + container = 'demo-container' + try: + conn.put_container(container) + except swiftclient.exceptions.ClientException as e: + if api_version == 3 and e.http_status == 409: + # Ceph RadosGW is currently configured with a global namespace + # for container names. Make use of this to verify that we + # cannot create a container with a name already taken by a + # same username authenticated in different domain in the + # previous run of this function. If / when we support per + # tenant namespace this logic must be replaced. + u.log.debug('v3 user not allowed to overwrite previously ' + 'created container created by v2 user...OK') + container = 'demo-container-v3' + conn.put_container(container) + else: + raise(e) + + resp_headers, containers = conn.get_account() + if (len(containers) and 'name' in containers[0] and + containers[0]['name'] == container): + u.log.debug('OK') + else: + amulet.raise_status(amulet.FAIL, 'container not created {} {}' + ''.format(resp_headers, containers)) + + def test_403_swift_keystone_auth_v3(self): + if self._get_openstack_release() >= self.trusty_liberty: + self.test_403_swift_keystone_auth(api_version=3) + else: + u.log.debug('Skipping test for openstack_release < trusty_liberty') + def test_498_radosgw_cmds_exit_zero(self): """Check basic functionality of radosgw cli commands against the ceph_radosgw unit.""" @@ -570,7 +717,6 @@ def test_910_pause_and_resume(self): assert self._wait_on_action(action_id), "Resume action failed." assert u.status_get(unit)[0] == "active" u.log.debug('OK') - # Note(beisner): need to add basic object store functional checks. # FYI: No restart check as ceph services do not restart # when charm config changes, unless monitor count increases. diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 79f5310a..e55249c3 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -73,7 +73,7 @@ def setUp(self): @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') @patch.object(charmhelpers.contrib.openstack.context, 'log') def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, - _format_ipv6_addr): + _format_ipv6_addr, jewel_installed=False): self.test_config.set('operator-roles', 'Babel') self.test_config.set('cache-size', '42') self.test_config.set('revocation-check-interval', '7500000') @@ -84,6 +84,9 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, _rids.return_value = 'rid1' _runits.return_value = 'runit' _ctxt_comp.return_value = True + self.cmp_pkgrevno.return_value = -1 + if jewel_installed: + self.cmp_pkgrevno.return_value = 0 id_data = { 'service_port': 9876, 'service_host': '127.0.0.4', @@ -114,8 +117,13 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, 'service_protocol': 'http', 'user_roles': 'Babel', } + if jewel_installed: + expect['auth_keystone_v3_supported'] = True self.assertEqual(expect, ids_ctxt()) + def test_ids_ctxt_jewel(self): + self.test_ids_ctxt(jewel_installed=True) + @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') From 19ccb86bb7fa0f453487da6e1dfe973b0e64a224 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 16 Feb 2017 15:00:09 -0500 Subject: [PATCH 1281/2699] Only check for upgrades if bootstrapped Only check for upgrade requests if the local unit is installed and bootstrapped, avoiding attempts to upgrade on initial execution of config-changed for trusty UCA pockets. Note that the upgrade process relies on a running ceph cluster. Change-Id: Ia3cfcedc6bdc4aff3f50f1bba8f524ca850bbf12 Closes-Bug: 1662943 --- ceph-mon/hooks/ceph_hooks.py | 5 ++++- ceph-mon/unit_tests/test_upgrade.py | 26 +++++++++++++++++++++++++- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index f3180771..8ed5e719 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -87,6 +87,10 @@ def check_for_upgrade(): + if not ceph.is_bootstrapped(): + log("Ceph is not bootstrapped, skipping upgrade checks.") + return + release_info = host.lsb_release() if not release_info['DISTRIB_CODENAME'] == 'trusty': log("Invalid upgrade path from {}. Only trusty is currently " @@ -180,7 +184,6 @@ def config_changed(): if config('prefer-ipv6'): assert_charm_supports_ipv6() - # Check if an upgrade was requested check_for_upgrade() log('Monitor hosts are ' + repr(get_mon_hosts())) diff --git a/ceph-mon/unit_tests/test_upgrade.py b/ceph-mon/unit_tests/test_upgrade.py index 7ecf228a..75be7196 100644 --- a/ceph-mon/unit_tests/test_upgrade.py +++ b/ceph-mon/unit_tests/test_upgrade.py @@ -16,12 +16,15 @@ def config_side_effect(*args): class UpgradeRollingTestCase(unittest.TestCase): + + @patch('ceph_hooks.ceph.is_bootstrapped') @patch('ceph_hooks.ceph.resolve_ceph_version') @patch('ceph_hooks.hookenv') @patch('ceph_hooks.host') @patch('ceph_hooks.ceph.roll_monitor_cluster') def test_check_for_upgrade(self, roll_monitor_cluster, host, hookenv, - version): + version, is_bootstrapped): + is_bootstrapped.return_value = True version.side_effect = ['firefly', 'hammer'] host.lsb_release.return_value = { 'DISTRIB_CODENAME': 'trusty', @@ -35,3 +38,24 @@ def test_check_for_upgrade(self, roll_monitor_cluster, host, hookenv, roll_monitor_cluster.assert_called_with( new_version='hammer', upgrade_key='admin') + + @patch('ceph_hooks.ceph.is_bootstrapped') + @patch('ceph_hooks.ceph.resolve_ceph_version') + @patch('ceph_hooks.hookenv') + @patch('ceph_hooks.host') + @patch('ceph_hooks.ceph.roll_monitor_cluster') + def test_check_for_upgrade_not_bootstrapped(self, roll_monitor_cluster, + host, hookenv, + version, is_bootstrapped): + is_bootstrapped.return_value = False + version.side_effect = ['firefly', 'hammer'] + host.lsb_release.return_value = { + 'DISTRIB_CODENAME': 'trusty', + } + previous_mock = MagicMock().return_value + previous_mock.previous.return_value = "cloud:trusty-juno" + hookenv.config.side_effect = [previous_mock, + config_side_effect('source')] + check_for_upgrade() + + roll_monitor_cluster.assert_not_called() From 9479a3a8490d005615c1271c163c505fee23815c Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Thu, 16 Feb 2017 11:47:01 -0800 Subject: [PATCH 1282/2699] Fix parse_key parse_key returns incorrect data when the cephx key contains [caps]. Change-Id: I61aa5fd888e19f778151239436fb7654d7cc48b5 Closes-Bug: 1665149 --- ceph-mon/lib/ceph/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index 26f1b91f..a453b44a 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -960,7 +960,7 @@ def parse_key(raw_key): else: for element in raw_key.splitlines(): if 'key' in element: - key = element.split(' = ')[1].strip() # IGNORE:E1103 + return element.split(' = ')[1].strip() # IGNORE:E1103 return key From 4f61b349eb82a4f150532f71ee73d1e5aba87445 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 16 Feb 2017 15:00:20 -0500 Subject: [PATCH 1283/2699] Only check for upgrades if bootstrapped Only check for upgrade requests if the local unit is installed and bootstrapped, avoiding attempts to upgrade on initial execution of config-changed for trusty UCA pockets. Note that the upgrade process relies on a running ceph cluster. Change-Id: Ic7e427368a373ed853111d837a0223a75b46ce8e Closes-Bug: 1662943 --- ceph-osd/hooks/ceph_hooks.py | 4 ++++ ceph-osd/unit_tests/test_upgrade.py | 26 +++++++++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 38e38dc0..d99db1e1 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -82,6 +82,10 @@ def check_for_upgrade(): + if not ceph.is_bootstrapped(): + log("Ceph is not bootstrapped, skipping upgrade checks.") + return + release_info = host.lsb_release() if not release_info['DISTRIB_CODENAME'] == 'trusty': log("Invalid upgrade path from {}. Only trusty is currently " diff --git a/ceph-osd/unit_tests/test_upgrade.py b/ceph-osd/unit_tests/test_upgrade.py index c1c99da6..080073c1 100644 --- a/ceph-osd/unit_tests/test_upgrade.py +++ b/ceph-osd/unit_tests/test_upgrade.py @@ -17,12 +17,15 @@ def config_side_effect(*args): class UpgradeRollingTestCase(unittest.TestCase): + + @patch('ceph_hooks.ceph.is_bootstrapped') @patch('ceph_hooks.ceph.resolve_ceph_version') @patch('ceph_hooks.hookenv') @patch('ceph_hooks.host') @patch('ceph_hooks.ceph.roll_osd_cluster') def test_check_for_upgrade(self, roll_osd_cluster, host, hookenv, - version): + version, is_bootstrapped): + is_bootstrapped.return_value = True version.side_effect = ['firefly', 'hammer'] host.lsb_release.return_value = { 'DISTRIB_CODENAME': 'trusty', @@ -36,3 +39,24 @@ def test_check_for_upgrade(self, roll_osd_cluster, host, hookenv, roll_osd_cluster.assert_called_with( new_version='hammer', upgrade_key='osd-upgrade') + + @patch('ceph_hooks.ceph.is_bootstrapped') + @patch('ceph_hooks.ceph.resolve_ceph_version') + @patch('ceph_hooks.hookenv') + @patch('ceph_hooks.host') + @patch('ceph_hooks.ceph.roll_monitor_cluster') + def test_check_for_upgrade_not_bootstrapped(self, roll_monitor_cluster, + host, hookenv, + version, is_bootstrapped): + is_bootstrapped.return_value = False + version.side_effect = ['firefly', 'hammer'] + host.lsb_release.return_value = { + 'DISTRIB_CODENAME': 'trusty', + } + previous_mock = MagicMock().return_value + previous_mock.previous.return_value = "cloud:trusty-juno" + hookenv.config.side_effect = [previous_mock, + config_side_effect('source')] + check_for_upgrade() + + roll_monitor_cluster.assert_not_called() From aadf0f37e34084f716422cb6d7e445701a43eef5 Mon Sep 17 00:00:00 2001 From: Ante Karamatic Date: Tue, 15 Nov 2016 17:50:17 +0100 Subject: [PATCH 1284/2699] Check cumulative number of units in all osd relations Instead of comparing minimal number of OSD units with number of units in each osd relation, we should check the total number of OSD units, supporting deployments whether OSD's reside in multiple instances of the ceph-osd application. Closes-Bug: 1642487 Change-Id: Ie6c503b5a03a22367b27f1c851d29f947d193388 --- ceph-mon/hooks/ceph_hooks.py | 6 ++-- ceph-mon/unit_tests/test_ceph_hooks.py | 45 +++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 480f750b..4b517253 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -377,9 +377,11 @@ def related_osds(num_units=3): @return: boolean indicating whether the required number of units where detected. ''' + units = 0 for r_id in relation_ids('osd'): - if len(related_units(r_id)) >= num_units: - return True + units += len(related_units(r_id)) + if units >= num_units: + return True return False diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 095ed1dd..2d5a4b33 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -2,7 +2,7 @@ import unittest import sys -from mock import patch, MagicMock, DEFAULT +from mock import patch, MagicMock, DEFAULT, call # python-apt is not installed as part of test-requirements but is imported by # some charmhelpers modules so create a fake import. @@ -143,3 +143,46 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies(self): ceph_hooks.upgrade_charm() mocks["apt_install"].assert_called_with( ["python-dbus", "lockfile-progs"]) + + +class RelatedUnitsTestCase(unittest.TestCase): + + _units = { + 'osd:0': ['ceph-osd-a/0', + 'ceph-osd-a/1', + 'ceph-osd-a/2'], + 'osd:23': ['ceph-osd-b/1', + 'ceph-osd-b/2', + 'ceph-osd-b/3'], + } + + def setUp(self): + super(RelatedUnitsTestCase, self).setUp() + + @patch.object(ceph_hooks, 'relation_ids') + @patch.object(ceph_hooks, 'related_units') + def test_related_ods_single_relation(self, + related_units, + relation_ids): + relation_ids.return_value = ['osd:0'] + related_units.side_effect = lambda x: self._units.get(x) + self.assertTrue(ceph_hooks.related_osds()) + self.assertFalse(ceph_hooks.related_osds(6)) + relation_ids.assert_called_with('osd') + related_units.assert_called_with('osd:0') + + @patch.object(ceph_hooks, 'relation_ids') + @patch.object(ceph_hooks, 'related_units') + def test_related_ods_multi_relation(self, + related_units, + relation_ids): + relation_ids.return_value = ['osd:0', 'osd:23'] + related_units.side_effect = lambda x: self._units.get(x) + self.assertTrue(ceph_hooks.related_osds()) + self.assertTrue(ceph_hooks.related_osds(6)) + self.assertFalse(ceph_hooks.related_osds(9)) + relation_ids.assert_called_with('osd') + related_units.assert_has_calls([ + call('osd:0'), + call('osd:23') + ]) From 282dc677ce4dac53795db791b0aec14c6db418c3 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 21 Feb 2017 14:59:07 -0500 Subject: [PATCH 1285/2699] Remove ceph-base layer This migrates the required functionality from the Ceph base layer into this one to remove the requirement on ceph-base Change-Id: I136aeeb24a5dddadf4c9306ffe324653ed5b1a66 --- ceph-fs/src/config.yaml | 6 +++ ceph-fs/src/layer.yaml | 2 +- ceph-fs/src/reactive/ceph_fs.py | 68 +++++++++++++++++++++++++++++++-- 3 files changed, 72 insertions(+), 4 deletions(-) diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index 124b2fe2..aa944462 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -16,6 +16,12 @@ options: Note that a minimum ceph version of 0.48.2 is required for use with this charm which is NOT provided by the packages in the main Ubuntu archive for precise but is provided in the Ubuntu cloud archive. + key: + type: string + default: + description: | + Key ID to import to the apt keyring to support use with arbitary source + configuration from outside of Launchpad archives or PPA's. use-syslog: type: boolean default: False diff --git a/ceph-fs/src/layer.yaml b/ceph-fs/src/layer.yaml index d22dc1e9..5a6786a2 100644 --- a/ceph-fs/src/layer.yaml +++ b/ceph-fs/src/layer.yaml @@ -1,4 +1,4 @@ -includes: ['layer:apt', 'layer:ceph-base', 'interface:ceph-mds'] +includes: ['layer:basic', 'layer:apt', 'interface:ceph-mds'] options: apt: packages: diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index 72158b64..e8a6b3ed 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -16,10 +16,12 @@ import socket import subprocess -from charms.reactive import when, when_not, set_state +from charms import reactive +from charms.reactive import when, when_not, set_state, is_state +from charmhelpers.core import hookenv from charmhelpers.core.hookenv import ( application_version_set, config, log, ERROR, cached, DEBUG, unit_get, - network_get_primary_address, + network_get_primary_address, relation_ids, status_set) from charmhelpers.core.host import service_restart from charmhelpers.contrib.network.ip import ( @@ -31,7 +33,9 @@ apt_install, filter_installed_packages) import jinja2 -from charms.apt import queue_install +from charms.apt import queue_install, add_source + +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] try: import dns.resolver @@ -50,6 +54,12 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): return template.render(context) +@when_not('apt.installed.ceph') +def install_ceph_base(): + add_source(config('source'), key=config('key')) + queue_install(PACKAGES) + + @when_not('apt.installed.ceph-mds') def install_cephfs(): queue_install(['ceph-mds']) @@ -189,3 +199,55 @@ def get_network_addrs(config_opt): return [get_host_ip()] return addrs + + +def assess_status(): + """Assess status of current unit""" + statuses = set([]) + messages = set([]) + if is_state('cephfs.started'): + (status, message) = log_mds() + statuses.add(status) + messages.add(message) + if 'blocked' in statuses: + status = 'blocked' + elif 'waiting' in statuses: + status = 'waiting' + else: + status = 'active' + message = '; '.join(messages) + status_set(status, message) + + +def get_running_mds(): + """Returns a list of the pids of the current running MDS daemons""" + cmd = ['pgrep', 'ceph-mds'] + try: + result = subprocess.check_output(cmd).decode('utf-8') + return result.split() + except subprocess.CalledProcessError: + return [] + + +def log_mds(): + if len(relation_ids('ceph-mds')) < 1: + return 'blocked', 'Missing relation: monitor' + running_mds = get_running_mds() + if not running_mds: + return 'blocked', 'No MDS detected using current configuration' + else: + return 'active', 'Unit is ready ({} MDS)'.format(len(running_mds)) + +# Per https://github.com/juju-solutions/charms.reactive/issues/33, +# this module may be imported multiple times so ensure the +# initialization hook is only registered once. I have to piggy back +# onto the namespace of a module imported before reactive discovery +# to do this. +if not hasattr(reactive, '_ceph_log_registered'): + # We need to register this to run every hook, not just during install + # and config-changed, to protect against race conditions. If we don't + # do this, then the config in the hook environment may show updates + # to running hooks well before the config-changed hook has been invoked + # and the intialization provided an opertunity to be run. + hookenv.atexit(assess_status) + reactive._ceph_log_registered = True From 4f693064dd1a35f31a038912d9ad2c4ce79016b5 Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Fri, 3 Mar 2017 11:30:35 -0300 Subject: [PATCH 1286/2699] Update url to file bugs in README.md file Change-Id: I5d0ce5bf5449edb98b1e2e92a301334ec73793a5 Closes-Bug: 1669803 --- ceph-proxy/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/README.md b/ceph-proxy/README.md index 9421e43c..7951addc 100644 --- a/ceph-proxy/README.md +++ b/ceph-proxy/README.md @@ -12,7 +12,7 @@ Your config.yaml needs to provide the monitor-hosts and fsid options like below `config.yaml`: ```yaml ceph-proxy: - monitor-hosts: IP_ADDRESS:PORT IP ADDRESS:PORT + monitor-hosts: IP_ADDRESS:PORT IP ADDRESS:PORT fsid: FSID ``` @@ -26,7 +26,7 @@ This charm noes NOT insert itself between the clusters, but merely makes the ext - Chris MacNaughton -Report bugs on [Launchpad](http://bugs.launchpad.net/charms/+source/ceph-proxy/+filebug) +Report bugs on [Launchpad](http://bugs.launchpad.net/charm-ceph-proxy/+filebug) ## Ceph From 1a726d05e325359d52ea654b3a0c99fdea77d8ab Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Fri, 3 Mar 2017 14:21:07 -0300 Subject: [PATCH 1287/2699] charm-helpers sync to pickup changes to CephContext CephContext now is capable of dealing with a space separated list of IPs in the ceph-public-address field of the relation with ceph. This allows ceph-proxy to pass a list of monitor hosts instead of a single one. Change-Id: Iff04f93911bd8fcecf5afeb71feda853b30d74d6 Partial-Bug: 1642430 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 32 +++- .../contrib/hardening/templating.py | 6 +- .../hooks/charmhelpers/contrib/network/ip.py | 65 ++++++- .../contrib/openstack/amulet/utils.py | 12 +- .../charmhelpers/contrib/openstack/context.py | 39 +++- .../contrib/openstack/ha/utils.py | 11 ++ .../contrib/openstack/keystone.py | 178 ++++++++++++++++++ .../contrib/openstack/templating.py | 10 +- .../charmhelpers/contrib/openstack/utils.py | 2 +- .../charmhelpers/contrib/python/packages.py | 11 +- ceph-radosgw/hooks/charmhelpers/core/host.py | 2 + ceph-radosgw/hooks/charmhelpers/fetch/snap.py | 122 ++++++++++++ .../hooks/charmhelpers/fetch/ubuntu.py | 80 +++++--- .../charmhelpers/contrib/amulet/utils.py | 29 ++- .../contrib/openstack/amulet/utils.py | 12 +- ceph-radosgw/tests/charmhelpers/core/host.py | 2 + 16 files changed, 531 insertions(+), 82 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py create mode 100644 ceph-radosgw/hooks/charmhelpers/fetch/snap.py diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 1410512a..9646b838 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -227,6 +227,7 @@ class NRPE(object): nagios_logdir = '/var/log/nagios' nagios_exportdir = '/var/lib/nagios/export' nrpe_confdir = '/etc/nagios/nrpe.d' + homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server def __init__(self, hostname=None, primary=True): super(NRPE, self).__init__() @@ -338,13 +339,14 @@ def get_nagios_unit_name(relation_name='nrpe-external-master'): return unit -def add_init_service_checks(nrpe, services, unit_name): +def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): """ Add checks for each service in list :param NRPE nrpe: NRPE object to add check to :param list services: List of services to check :param str unit_name: Unit name to use in check description + :param bool immediate_check: For sysv init, run the service check immediately """ for svc in services: # Don't add a check for these services from neutron-gateway @@ -368,21 +370,31 @@ def add_init_service_checks(nrpe, services, unit_name): ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % svc - cron_file = ('*/5 * * * * root ' - '/usr/local/lib/nagios/plugins/check_exit_status.pl ' - '-s /etc/init.d/%s status > ' - '/var/lib/nagios/service-check-%s.txt\n' % (svc, - svc) - ) + checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) + croncmd = ( + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-s /etc/init.d/%s status' % svc + ) + cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) f = open(cronpath, 'w') f.write(cron_file) f.close() nrpe.add_check( shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_status_file.py -f ' - '/var/lib/nagios/service-check-%s.txt' % svc, + description='service check {%s}' % unit_name, + check_cmd='check_status_file.py -f %s' % checkpath, ) + # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail + # (LP: #1670223). + if immediate_check and os.path.isdir(nrpe.homedir): + f = open(checkpath, 'w') + subprocess.call( + croncmd.split(), + stdout=f, + stderr=subprocess.STDOUT + ) + f.close() + os.chmod(checkpath, 0o644) def copy_nrpe_checks(): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py index 2174c645..5b6765f7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py @@ -13,6 +13,7 @@ # limitations under the License. import os +import six from charmhelpers.core.hookenv import ( log, @@ -26,7 +27,10 @@ from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_update apt_update(fatal=True) - apt_install('python-jinja2', fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index e141fc12..54c76a72 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -20,25 +20,37 @@ from functools import partial -from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( + config, log, + network_get_primary_address, + unit_get, WARNING, ) +from charmhelpers.core.host import ( + lsb_release, +) + try: import netifaces except ImportError: apt_update(fatal=True) - apt_install('python-netifaces', fatal=True) + if six.PY2: + apt_install('python-netifaces', fatal=True) + else: + apt_install('python3-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: apt_update(fatal=True) - apt_install('python-netaddr', fatal=True) + if six.PY2: + apt_install('python-netaddr', fatal=True) + else: + apt_install('python3-netaddr', fatal=True) import netaddr @@ -414,7 +426,10 @@ def ns_query(address): try: import dns.resolver except ImportError: - apt_install('python-dnspython', fatal=True) + if six.PY2: + apt_install('python-dnspython', fatal=True) + else: + apt_install('python3-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): @@ -462,7 +477,10 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - apt_install("python-dnspython", fatal=True) + if six.PY2: + apt_install("python-dnspython", fatal=True) + else: + apt_install("python3-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) @@ -499,3 +517,40 @@ def port_has_listener(address, port): cmd = ['nc', '-z', address, str(port)] result = subprocess.call(cmd) return not(bool(result)) + + +def assert_charm_supports_ipv6(): + """Check whether we are able to support charms ipv6.""" + if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + raise Exception("IPv6 is not supported in the charms for Ubuntu " + "versions less than Trusty 14.04") + + +def get_relation_ip(interface, config_override=None): + """Return this unit's IP for the given relation. + + Allow for an arbitrary interface to use with network-get to select an IP. + Handle all address selection options including configuration parameter + override and IPv6. + + Usage: get_relation_ip('amqp', config_override='access-network') + + @param interface: string name of the relation. + @param config_override: string name of the config option for network + override. Supports legacy network override configuration parameters. + @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. + @returns IPv6 or IPv4 address + """ + + fallback = get_host_ip(unit_get('private-address')) + if config('prefer-ipv6'): + assert_charm_supports_ipv6() + return get_ipv6_addr()[0] + elif config_override and config(config_override): + return get_address_in_network(config(config_override), + fallback) + else: + try: + return network_get_primary_address(interface) + except NotImplementedError: + return fallback diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 401c0328..1f4cf42e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -32,6 +32,7 @@ from novaclient import exceptions import novaclient.client as nova_client +import novaclient import pika import swiftclient @@ -434,9 +435,14 @@ def authenticate_nova_user(self, keystone, user, password, tenant): self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) + if novaclient.__version__[0] >= "7": + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, password=password, + project_name=tenant, auth_url=ep) + else: + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) def authenticate_swift_user(self, keystone, user, password, tenant): """Authenticates a regular user with swift api.""" diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 42316331..6cdbbbbf 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -100,7 +100,10 @@ try: import psutil except ImportError: - apt_install('python-psutil', fatal=True) + if six.PY2: + apt_install('python-psutil', fatal=True) + else: + apt_install('python3-psutil', fatal=True) import psutil CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -392,16 +395,20 @@ def __call__(self): for rid in relation_ids(self.rel_name): ha_vip_only = False self.related = True + transport_hosts = None + rabbitmq_port = '5672' for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): ctxt['clustered'] = True vip = relation_get('vip', rid=rid, unit=unit) vip = format_ipv6_addr(vip) or vip ctxt['rabbitmq_host'] = vip + transport_hosts = [vip] else: host = relation_get('private-address', rid=rid, unit=unit) host = format_ipv6_addr(host) or host ctxt['rabbitmq_host'] = host + transport_hosts = [host] ctxt.update({ 'rabbitmq_user': username, @@ -413,6 +420,7 @@ def __call__(self): ssl_port = relation_get('ssl_port', rid=rid, unit=unit) if ssl_port: ctxt['rabbit_ssl_port'] = ssl_port + rabbitmq_port = ssl_port ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) if ssl_ca: @@ -450,6 +458,20 @@ def __call__(self): rabbitmq_hosts.append(host) ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + transport_hosts = rabbitmq_hosts + + if transport_hosts: + transport_url_hosts = '' + for host in transport_hosts: + if transport_url_hosts: + format_string = ",{}:{}@{}:{}" + else: + format_string = "{}:{}@{}:{}" + transport_url_hosts += format_string.format( + ctxt['rabbitmq_user'], ctxt['rabbitmq_password'], + host, rabbitmq_port) + ctxt['transport_url'] = "rabbit://{}/{}".format( + transport_url_hosts, vhost) oslo_messaging_flags = conf.get('oslo-messaging-flags', None) if oslo_messaging_flags: @@ -481,13 +503,16 @@ def __call__(self): ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) if not ctxt.get('key'): ctxt['key'] = relation_get('key', rid=rid, unit=unit) - ceph_pub_addr = relation_get('ceph-public-address', rid=rid, + + ceph_addrs = relation_get('ceph-public-address', rid=rid, + unit=unit) + if ceph_addrs: + for addr in ceph_addrs.split(' '): + mon_hosts.append(format_ipv6_addr(addr) or addr) + else: + priv_addr = relation_get('private-address', rid=rid, unit=unit) - unit_priv_addr = relation_get('private-address', rid=rid, - unit=unit) - ceph_addr = ceph_pub_addr or unit_priv_addr - ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr - mon_hosts.append(ceph_addr) + mon_hosts.append(format_ipv6_addr(priv_addr) or priv_addr) ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py index 1f5310bb..254a90e7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -126,3 +126,14 @@ def assert_charm_supports_dns_ha(): status_set('blocked', msg) raise DNSHAException(msg) return True + + +def expect_ha(): + """ Determine if the unit expects to be in HA + + Check for VIP or dns-ha settings which indicate the unit should expect to + be related to hacluster. + + @returns boolean + """ + return config('vip') or config('dns-ha') diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py new file mode 100644 index 00000000..a15a03fa --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +from charmhelpers.fetch import apt_install +from charmhelpers.contrib.openstack.context import IdentityServiceContext +from charmhelpers.core.hookenv import ( + log, + ERROR, +) + + +def get_api_suffix(api_version): + """Return the formatted api suffix for the given version + @param api_version: version of the keystone endpoint + @returns the api suffix formatted according to the given api + version + """ + return 'v2.0' if api_version in (2, "2.0") else 'v3' + + +def format_endpoint(schema, addr, port, api_version): + """Return a formatted keystone endpoint + @param schema: http or https + @param addr: ipv4/ipv6 host of the keystone service + @param port: port of the keystone service + @param api_version: 2 or 3 + @returns a fully formatted keystone endpoint + """ + return '{}://{}:{}/{}/'.format(schema, addr, port, + get_api_suffix(api_version)) + + +def get_keystone_manager(endpoint, api_version, **kwargs): + """Return a keystonemanager for the correct API version + + @param endpoint: the keystone endpoint to point client at + @param api_version: version of the keystone api the client should use + @param kwargs: token or username/tenant/password information + @returns keystonemanager class used for interrogating keystone + """ + if api_version == 2: + return KeystoneManager2(endpoint, **kwargs) + if api_version == 3: + return KeystoneManager3(endpoint, **kwargs) + raise ValueError('No manager found for api version {}'.format(api_version)) + + +def get_keystone_manager_from_identity_service_context(): + """Return a keystonmanager generated from a + instance of charmhelpers.contrib.openstack.context.IdentityServiceContext + @returns keystonamenager instance + """ + context = IdentityServiceContext()() + if not context: + msg = "Identity service context cannot be generated" + log(msg, level=ERROR) + raise ValueError(msg) + + endpoint = format_endpoint(context['service_protocol'], + context['service_host'], + context['service_port'], + context['api_version']) + + if context['api_version'] in (2, "2.0"): + api_version = 2 + else: + api_version = 3 + + return get_keystone_manager(endpoint, api_version, + username=context['admin_user'], + password=context['admin_password'], + tenant_name=context['admin_tenant_name']) + + +class KeystoneManager(object): + + def resolve_service_id(self, service_name=None, service_type=None): + """Find the service_id of a given service""" + services = [s._info for s in self.api.services.list()] + + service_name = service_name.lower() + for s in services: + name = s['name'].lower() + if service_type and service_name: + if (service_name == name and service_type == s['type']): + return s['id'] + elif service_name and service_name == name: + return s['id'] + elif service_type and service_type == s['type']: + return s['id'] + return None + + def service_exists(self, service_name=None, service_type=None): + """Determine if the given service exists on the service list""" + return self.resolve_service_id(service_name, service_type) is not None + + +class KeystoneManager2(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + + self.api_version = 2 + + token = kwargs.get("token", None) + if token: + api = client.Client(endpoint=endpoint, token=token) + else: + auth = v2.Password(username=kwargs.get("username"), + password=kwargs.get("password"), + tenant_name=kwargs.get("tenant_name"), + auth_url=endpoint) + sess = session.Session(auth=auth) + api = client.Client(session=sess) + + self.api = api + + +class KeystoneManager3(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + + self.api_version = 3 + + token = kwargs.get("token", None) + if token: + auth = token_endpoint.Token(endpoint=endpoint, + token=token) + sess = session.Session(auth=auth) + else: + auth = v3.Password(auth_url=endpoint, + user_id=kwargs.get("username"), + password=kwargs.get("password"), + project_id=kwargs.get("tenant_name")) + sess = session.Session(auth=auth) + + self.api = client.Client(session=sess) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py index 89588951..934baf5d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py @@ -28,7 +28,10 @@ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: apt_update(fatal=True) - apt_install('python-jinja2', fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions @@ -207,7 +210,10 @@ def __init__(self, templates_dir, openstack_release): # if this code is running, the object is created pre-install hook. # jinja2 shouldn't get touched until the module is reloaded on next # hook execution, with proper jinja2 bits successfully imported. - apt_install('python-jinja2') + if six.PY2: + apt_install('python-jinja2') + else: + apt_install('python3-jinja2') def register(self, config_file, contexts): """ diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 80219d66..7e8ecff4 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -153,7 +153,7 @@ ('newton', ['2.8.0', '2.9.0', '2.10.0']), ('ocata', - ['2.11.0', '2.12.0']), + ['2.11.0', '2.12.0', '2.13.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py index e29bd1bb..6e95028b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py @@ -16,6 +16,7 @@ # limitations under the License. import os +import six import subprocess import sys @@ -39,7 +40,10 @@ def pip_execute(*args, **kwargs): from pip import main as _pip_execute except ImportError: apt_update() - apt_install('python-pip') + if six.PY2: + apt_install('python-pip') + else: + apt_install('python3-pip') from pip import main as _pip_execute _pip_execute(*args, **kwargs) finally: @@ -136,7 +140,10 @@ def pip_list(): def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" - apt_install('python-virtualenv') + if six.PY2: + apt_install('python-virtualenv') + else: + apt_install('python3-virtualenv') if path: venv_path = path diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index edbb72ff..05edfa50 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -306,6 +306,8 @@ def service_running(service_name, **kwargs): def init_is_systemd(): """Return True if the host system uses systemd, False otherwise.""" + if lsb_release()['DISTRIB_CODENAME'] == 'trusty': + return False return os.path.isdir(SYSTEMD_SYSTEM) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/snap.py b/ceph-radosgw/hooks/charmhelpers/fetch/snap.py new file mode 100644 index 00000000..23c707b0 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/fetch/snap.py @@ -0,0 +1,122 @@ +# Copyright 2014-2017 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Charm helpers snap for classic charms. + +If writing reactive charms, use the snap layer: +https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html +""" +import subprocess +from os import environ +from time import sleep +from charmhelpers.core.hookenv import log + +__author__ = 'Joseph Borg ' + +SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved). +SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. +SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +class CouldNotAcquireLockException(Exception): + pass + + +def _snap_exec(commands): + """ + Execute snap commands. + + :param commands: List commands + :return: Integer exit code + """ + assert type(commands) == list + + retry_count = 0 + return_code = None + + while return_code is None or return_code == SNAP_NO_LOCK: + try: + return_code = subprocess.check_call(['snap'] + commands, env=environ) + except subprocess.CalledProcessError as e: + retry_count += + 1 + if retry_count > SNAP_NO_LOCK_RETRY_COUNT: + raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT) + return_code = e.returncode + log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN') + sleep(SNAP_NO_LOCK_RETRY_DELAY) + + return return_code + + +def snap_install(packages, *flags): + """ + Install a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to install command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Installing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with option(s) "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['install'] + flags + packages) + + +def snap_remove(packages, *flags): + """ + Remove a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to remove command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Removing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['remove'] + flags + packages) + + +def snap_refresh(packages, *flags): + """ + Refresh / Update snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to refresh command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Refreshing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['refresh'] + flags + packages) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 39b9b801..82ac80ff 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -116,8 +116,8 @@ } APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. -APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. -APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. +CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. +CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times. def filter_installed_packages(packages): @@ -249,7 +249,8 @@ def add_source(source, key=None): source.startswith('http') or source.startswith('deb ') or source.startswith('cloud-archive:')): - subprocess.check_call(['add-apt-repository', '--yes', source]) + cmd = ['add-apt-repository', '--yes', source] + _run_with_retries(cmd) elif source.startswith('cloud:'): install(filter_installed_packages(['ubuntu-cloud-keyring']), fatal=True) @@ -286,41 +287,60 @@ def add_source(source, key=None): key]) -def _run_apt_command(cmd, fatal=False): - """Run an APT command. - - Checks the output and retries if the fatal flag is set - to True. +def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), + retry_message="", cmd_env=None): + """Run a command and retry until success or max_retries is reached. :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. + :param: max_retries: int: The number of retries to attempt on a fatal + command. Defaults to CMD_RETRY_COUNT. + :param: retry_exitcodes: tuple: Optional additional exit codes to retry. + Defaults to retry on exit code 1. + :param: retry_message: str: Optional log prefix emitted during retries. + :param: cmd_env: dict: Environment variables to add to the command run. """ + env = os.environ.copy() + if cmd_env: + env.update(cmd_env) - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' + if not retry_message: + retry_message = "Failed executing '{}'".format(" ".join(cmd)) + retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) - if fatal: - retry_count = 0 - result = None - - # If the command is considered "fatal", we need to retry if the apt - # lock was not acquired. - - while result is None or result == APT_NO_LOCK: - try: - result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > APT_NO_LOCK_RETRY_COUNT: - raise - result = e.returncode - log("Couldn't acquire DPKG lock. Will retry in {} seconds." - "".format(APT_NO_LOCK_RETRY_DELAY)) - time.sleep(APT_NO_LOCK_RETRY_DELAY) + retry_count = 0 + result = None + + retry_results = (None,) + retry_exitcodes + while result in retry_results: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > max_retries: + raise + result = e.returncode + log(retry_message) + time.sleep(CMD_RETRY_DELAY) + +def _run_apt_command(cmd, fatal=False): + """Run an apt command with optional retries. + + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. + cmd_env = { + 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} + + if fatal: + _run_with_retries( + cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), + retry_message="Couldn't acquire DPKG lock") else: + env = os.environ.copy() + env.update(cmd_env) subprocess.call(cmd, env=env) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index f9e4c3af..8a6b7644 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -785,37 +785,30 @@ def get_uuid_epoch_stamp(self): generating test messages which need to be unique-ish.""" return '[{}-{}]'.format(uuid.uuid4(), time.time()) -# amulet juju action helpers: + # amulet juju action helpers: def run_action(self, unit_sentry, action, _check_output=subprocess.check_output, params=None): - """Run the named action on a given unit sentry. + """Translate to amulet's built in run_action(). Deprecated. + + Run the named action on a given unit sentry. params a dict of parameters to use - _check_output parameter is used for dependency injection. + _check_output parameter is no longer used @return action_id. """ - unit_id = unit_sentry.info["unit_name"] - command = ["juju", "action", "do", "--format=json", unit_id, action] - if params is not None: - for key, value in params.iteritems(): - command.append("{}={}".format(key, value)) - self.log.info("Running command: %s\n" % " ".join(command)) - output = _check_output(command, universal_newlines=True) - data = json.loads(output) - action_id = data[u'Action queued with id'] - return action_id + self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been ' + 'deprecated for amulet.run_action') + return unit_sentry.run_action(action, action_args=params) def wait_on_action(self, action_id, _check_output=subprocess.check_output): """Wait for a given action, returning if it completed or not. - _check_output parameter is used for dependency injection. + action_id a string action uuid + _check_output parameter is no longer used """ - command = ["juju", "action", "fetch", "--format=json", "--wait=0", - action_id] - output = _check_output(command, universal_newlines=True) - data = json.loads(output) + data = amulet.actions.get_action_output(action_id, full_output=True) return data.get(u"status") == "completed" def status_get(self, unit): diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 401c0328..1f4cf42e 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -32,6 +32,7 @@ from novaclient import exceptions import novaclient.client as nova_client +import novaclient import pika import swiftclient @@ -434,9 +435,14 @@ def authenticate_nova_user(self, keystone, user, password, tenant): self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) + if novaclient.__version__[0] >= "7": + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, password=password, + project_name=tenant, auth_url=ep) + else: + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) def authenticate_swift_user(self, keystone, user, password, tenant): """Authenticates a regular user with swift api.""" diff --git a/ceph-radosgw/tests/charmhelpers/core/host.py b/ceph-radosgw/tests/charmhelpers/core/host.py index edbb72ff..05edfa50 100644 --- a/ceph-radosgw/tests/charmhelpers/core/host.py +++ b/ceph-radosgw/tests/charmhelpers/core/host.py @@ -306,6 +306,8 @@ def service_running(service_name, **kwargs): def init_is_systemd(): """Return True if the host system uses systemd, False otherwise.""" + if lsb_release()['DISTRIB_CODENAME'] == 'trusty': + return False return os.path.isdir(SYSTEMD_SYSTEM) From 3ca2d956daad5f63b6bd329b3fbbf7e7fabf94f2 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 7 Mar 2017 11:07:47 -0800 Subject: [PATCH 1288/2699] Enable Ocata Amulet Tests - Add Zesty as a supported series to metadata.yaml. - Turn on Xenial-Ocata Amulet test definitions. - Sync charm helpers to get Juju 2.x amulet compatibility. - Keeping Zesty-Ocata Amulet test definitions turned off until the metadata.yaml changes propagate to the charm store. Change-Id: Ia63b663255f5dc52a2a755e8c309b05ba8662a6a --- .../charmhelpers/contrib/charmsupport/nrpe.py | 32 +++-- .../contrib/hardening/templating.py | 6 +- .../hooks/charmhelpers/contrib/network/ip.py | 65 +++++++++- .../charmhelpers/contrib/openstack/utils.py | 2 +- .../charmhelpers/contrib/python/packages.py | 11 +- ceph-proxy/hooks/charmhelpers/core/host.py | 2 + ceph-proxy/hooks/charmhelpers/fetch/snap.py | 122 ++++++++++++++++++ ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py | 80 +++++++----- ceph-proxy/metadata.yaml | 1 + .../charmhelpers/contrib/amulet/utils.py | 29 ++--- .../contrib/openstack/amulet/utils.py | 12 +- ceph-proxy/tests/charmhelpers/core/host.py | 2 + ceph-proxy/tests/gate-basic-xenial-ocata | 0 13 files changed, 294 insertions(+), 70 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/fetch/snap.py mode change 100644 => 100755 ceph-proxy/tests/gate-basic-xenial-ocata diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 1410512a..9646b838 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -227,6 +227,7 @@ class NRPE(object): nagios_logdir = '/var/log/nagios' nagios_exportdir = '/var/lib/nagios/export' nrpe_confdir = '/etc/nagios/nrpe.d' + homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server def __init__(self, hostname=None, primary=True): super(NRPE, self).__init__() @@ -338,13 +339,14 @@ def get_nagios_unit_name(relation_name='nrpe-external-master'): return unit -def add_init_service_checks(nrpe, services, unit_name): +def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): """ Add checks for each service in list :param NRPE nrpe: NRPE object to add check to :param list services: List of services to check :param str unit_name: Unit name to use in check description + :param bool immediate_check: For sysv init, run the service check immediately """ for svc in services: # Don't add a check for these services from neutron-gateway @@ -368,21 +370,31 @@ def add_init_service_checks(nrpe, services, unit_name): ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % svc - cron_file = ('*/5 * * * * root ' - '/usr/local/lib/nagios/plugins/check_exit_status.pl ' - '-s /etc/init.d/%s status > ' - '/var/lib/nagios/service-check-%s.txt\n' % (svc, - svc) - ) + checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) + croncmd = ( + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-s /etc/init.d/%s status' % svc + ) + cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) f = open(cronpath, 'w') f.write(cron_file) f.close() nrpe.add_check( shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_status_file.py -f ' - '/var/lib/nagios/service-check-%s.txt' % svc, + description='service check {%s}' % unit_name, + check_cmd='check_status_file.py -f %s' % checkpath, ) + # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail + # (LP: #1670223). + if immediate_check and os.path.isdir(nrpe.homedir): + f = open(checkpath, 'w') + subprocess.call( + croncmd.split(), + stdout=f, + stderr=subprocess.STDOUT + ) + f.close() + os.chmod(checkpath, 0o644) def copy_nrpe_checks(): diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/templating.py index 2174c645..5b6765f7 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/templating.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/templating.py @@ -13,6 +13,7 @@ # limitations under the License. import os +import six from charmhelpers.core.hookenv import ( log, @@ -26,7 +27,10 @@ from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_update apt_update(fatal=True) - apt_install('python-jinja2', fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index e141fc12..54c76a72 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -20,25 +20,37 @@ from functools import partial -from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( + config, log, + network_get_primary_address, + unit_get, WARNING, ) +from charmhelpers.core.host import ( + lsb_release, +) + try: import netifaces except ImportError: apt_update(fatal=True) - apt_install('python-netifaces', fatal=True) + if six.PY2: + apt_install('python-netifaces', fatal=True) + else: + apt_install('python3-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: apt_update(fatal=True) - apt_install('python-netaddr', fatal=True) + if six.PY2: + apt_install('python-netaddr', fatal=True) + else: + apt_install('python3-netaddr', fatal=True) import netaddr @@ -414,7 +426,10 @@ def ns_query(address): try: import dns.resolver except ImportError: - apt_install('python-dnspython', fatal=True) + if six.PY2: + apt_install('python-dnspython', fatal=True) + else: + apt_install('python3-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): @@ -462,7 +477,10 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - apt_install("python-dnspython", fatal=True) + if six.PY2: + apt_install("python-dnspython", fatal=True) + else: + apt_install("python3-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) @@ -499,3 +517,40 @@ def port_has_listener(address, port): cmd = ['nc', '-z', address, str(port)] result = subprocess.call(cmd) return not(bool(result)) + + +def assert_charm_supports_ipv6(): + """Check whether we are able to support charms ipv6.""" + if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + raise Exception("IPv6 is not supported in the charms for Ubuntu " + "versions less than Trusty 14.04") + + +def get_relation_ip(interface, config_override=None): + """Return this unit's IP for the given relation. + + Allow for an arbitrary interface to use with network-get to select an IP. + Handle all address selection options including configuration parameter + override and IPv6. + + Usage: get_relation_ip('amqp', config_override='access-network') + + @param interface: string name of the relation. + @param config_override: string name of the config option for network + override. Supports legacy network override configuration parameters. + @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. + @returns IPv6 or IPv4 address + """ + + fallback = get_host_ip(unit_get('private-address')) + if config('prefer-ipv6'): + assert_charm_supports_ipv6() + return get_ipv6_addr()[0] + elif config_override and config(config_override): + return get_address_in_network(config(config_override), + fallback) + else: + try: + return network_get_primary_address(interface) + except NotImplementedError: + return fallback diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index 80219d66..7e8ecff4 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -153,7 +153,7 @@ ('newton', ['2.8.0', '2.9.0', '2.10.0']), ('ocata', - ['2.11.0', '2.12.0']), + ['2.11.0', '2.12.0', '2.13.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-proxy/hooks/charmhelpers/contrib/python/packages.py b/ceph-proxy/hooks/charmhelpers/contrib/python/packages.py index e29bd1bb..6e95028b 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/python/packages.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/python/packages.py @@ -16,6 +16,7 @@ # limitations under the License. import os +import six import subprocess import sys @@ -39,7 +40,10 @@ def pip_execute(*args, **kwargs): from pip import main as _pip_execute except ImportError: apt_update() - apt_install('python-pip') + if six.PY2: + apt_install('python-pip') + else: + apt_install('python3-pip') from pip import main as _pip_execute _pip_execute(*args, **kwargs) finally: @@ -136,7 +140,10 @@ def pip_list(): def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" - apt_install('python-virtualenv') + if six.PY2: + apt_install('python-virtualenv') + else: + apt_install('python3-virtualenv') if path: venv_path = path diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index edbb72ff..05edfa50 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -306,6 +306,8 @@ def service_running(service_name, **kwargs): def init_is_systemd(): """Return True if the host system uses systemd, False otherwise.""" + if lsb_release()['DISTRIB_CODENAME'] == 'trusty': + return False return os.path.isdir(SYSTEMD_SYSTEM) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/snap.py b/ceph-proxy/hooks/charmhelpers/fetch/snap.py new file mode 100644 index 00000000..23c707b0 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/fetch/snap.py @@ -0,0 +1,122 @@ +# Copyright 2014-2017 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Charm helpers snap for classic charms. + +If writing reactive charms, use the snap layer: +https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html +""" +import subprocess +from os import environ +from time import sleep +from charmhelpers.core.hookenv import log + +__author__ = 'Joseph Borg ' + +SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved). +SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. +SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +class CouldNotAcquireLockException(Exception): + pass + + +def _snap_exec(commands): + """ + Execute snap commands. + + :param commands: List commands + :return: Integer exit code + """ + assert type(commands) == list + + retry_count = 0 + return_code = None + + while return_code is None or return_code == SNAP_NO_LOCK: + try: + return_code = subprocess.check_call(['snap'] + commands, env=environ) + except subprocess.CalledProcessError as e: + retry_count += + 1 + if retry_count > SNAP_NO_LOCK_RETRY_COUNT: + raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT) + return_code = e.returncode + log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN') + sleep(SNAP_NO_LOCK_RETRY_DELAY) + + return return_code + + +def snap_install(packages, *flags): + """ + Install a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to install command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Installing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with option(s) "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['install'] + flags + packages) + + +def snap_remove(packages, *flags): + """ + Remove a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to remove command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Removing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['remove'] + flags + packages) + + +def snap_refresh(packages, *flags): + """ + Refresh / Update snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to refresh command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Refreshing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['refresh'] + flags + packages) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py index 39b9b801..82ac80ff 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py @@ -116,8 +116,8 @@ } APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. -APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. -APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. +CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. +CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times. def filter_installed_packages(packages): @@ -249,7 +249,8 @@ def add_source(source, key=None): source.startswith('http') or source.startswith('deb ') or source.startswith('cloud-archive:')): - subprocess.check_call(['add-apt-repository', '--yes', source]) + cmd = ['add-apt-repository', '--yes', source] + _run_with_retries(cmd) elif source.startswith('cloud:'): install(filter_installed_packages(['ubuntu-cloud-keyring']), fatal=True) @@ -286,41 +287,60 @@ def add_source(source, key=None): key]) -def _run_apt_command(cmd, fatal=False): - """Run an APT command. - - Checks the output and retries if the fatal flag is set - to True. +def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), + retry_message="", cmd_env=None): + """Run a command and retry until success or max_retries is reached. :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. + :param: max_retries: int: The number of retries to attempt on a fatal + command. Defaults to CMD_RETRY_COUNT. + :param: retry_exitcodes: tuple: Optional additional exit codes to retry. + Defaults to retry on exit code 1. + :param: retry_message: str: Optional log prefix emitted during retries. + :param: cmd_env: dict: Environment variables to add to the command run. """ + env = os.environ.copy() + if cmd_env: + env.update(cmd_env) - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' + if not retry_message: + retry_message = "Failed executing '{}'".format(" ".join(cmd)) + retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) - if fatal: - retry_count = 0 - result = None - - # If the command is considered "fatal", we need to retry if the apt - # lock was not acquired. - - while result is None or result == APT_NO_LOCK: - try: - result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > APT_NO_LOCK_RETRY_COUNT: - raise - result = e.returncode - log("Couldn't acquire DPKG lock. Will retry in {} seconds." - "".format(APT_NO_LOCK_RETRY_DELAY)) - time.sleep(APT_NO_LOCK_RETRY_DELAY) + retry_count = 0 + result = None + + retry_results = (None,) + retry_exitcodes + while result in retry_results: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > max_retries: + raise + result = e.returncode + log(retry_message) + time.sleep(CMD_RETRY_DELAY) + +def _run_apt_command(cmd, fatal=False): + """Run an apt command with optional retries. + + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. + cmd_env = { + 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} + + if fatal: + _run_with_retries( + cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), + retry_message="Couldn't acquire DPKG lock") else: + env = os.environ.copy() + env.update(cmd_env) subprocess.call(cmd, env=env) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index faf20ec2..7462ead1 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -11,6 +11,7 @@ tags: - misc series: - xenial + - zesty - trusty - yakkety extra-bindings: diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py index f9e4c3af..8a6b7644 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py @@ -785,37 +785,30 @@ def get_uuid_epoch_stamp(self): generating test messages which need to be unique-ish.""" return '[{}-{}]'.format(uuid.uuid4(), time.time()) -# amulet juju action helpers: + # amulet juju action helpers: def run_action(self, unit_sentry, action, _check_output=subprocess.check_output, params=None): - """Run the named action on a given unit sentry. + """Translate to amulet's built in run_action(). Deprecated. + + Run the named action on a given unit sentry. params a dict of parameters to use - _check_output parameter is used for dependency injection. + _check_output parameter is no longer used @return action_id. """ - unit_id = unit_sentry.info["unit_name"] - command = ["juju", "action", "do", "--format=json", unit_id, action] - if params is not None: - for key, value in params.iteritems(): - command.append("{}={}".format(key, value)) - self.log.info("Running command: %s\n" % " ".join(command)) - output = _check_output(command, universal_newlines=True) - data = json.loads(output) - action_id = data[u'Action queued with id'] - return action_id + self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been ' + 'deprecated for amulet.run_action') + return unit_sentry.run_action(action, action_args=params) def wait_on_action(self, action_id, _check_output=subprocess.check_output): """Wait for a given action, returning if it completed or not. - _check_output parameter is used for dependency injection. + action_id a string action uuid + _check_output parameter is no longer used """ - command = ["juju", "action", "fetch", "--format=json", "--wait=0", - action_id] - output = _check_output(command, universal_newlines=True) - data = json.loads(output) + data = amulet.actions.get_action_output(action_id, full_output=True) return data.get(u"status") == "completed" def status_get(self, unit): diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index 401c0328..1f4cf42e 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -32,6 +32,7 @@ from novaclient import exceptions import novaclient.client as nova_client +import novaclient import pika import swiftclient @@ -434,9 +435,14 @@ def authenticate_nova_user(self, keystone, user, password, tenant): self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) + if novaclient.__version__[0] >= "7": + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, password=password, + project_name=tenant, auth_url=ep) + else: + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) def authenticate_swift_user(self, keystone, user, password, tenant): """Authenticates a regular user with swift api.""" diff --git a/ceph-proxy/tests/charmhelpers/core/host.py b/ceph-proxy/tests/charmhelpers/core/host.py index edbb72ff..05edfa50 100644 --- a/ceph-proxy/tests/charmhelpers/core/host.py +++ b/ceph-proxy/tests/charmhelpers/core/host.py @@ -306,6 +306,8 @@ def service_running(service_name, **kwargs): def init_is_systemd(): """Return True if the host system uses systemd, False otherwise.""" + if lsb_release()['DISTRIB_CODENAME'] == 'trusty': + return False return os.path.isdir(SYSTEMD_SYSTEM) diff --git a/ceph-proxy/tests/gate-basic-xenial-ocata b/ceph-proxy/tests/gate-basic-xenial-ocata old mode 100644 new mode 100755 From de6d8b6b18608ca97fab9007c889701bd1a51e7f Mon Sep 17 00:00:00 2001 From: Jorge Niedbalski Date: Thu, 16 Mar 2017 17:07:41 -0300 Subject: [PATCH 1289/2699] Force a charmhelpers sync to 0.14.0. This is a partial fix required for fixing LP: #1671861, this change is intended to retrigger the build using the latest charmhelpers release available at https://pypi.python.org/pypi/charmhelpers/0.14.0 that includes the changes introduced on the public bug. Change-Id: Icf36ff6efe60313c305f292c9a7dadaf079eb8ae Closes-Bug: #1671861 Signed-off-by: Jorge Niedbalski --- ceph-fs/rebuild | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 ceph-fs/rebuild diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild new file mode 100644 index 00000000..3eb2fae7 --- /dev/null +++ b/ceph-fs/rebuild @@ -0,0 +1,5 @@ +# This file is used to trigger rebuilds +# when dependencies of the charm change, +# but nothing in the charm needs to. +# simply change the uuid to something new +265df5c7-af0d-45ee-9dfd-194054186a37 From 9833a594bc084baae751d800d81869f0ec3e224f Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 7 Mar 2017 11:05:55 -0800 Subject: [PATCH 1290/2699] Enable Ocata Amulet Tests - Add Zesty as a supported series to metadata.yaml. - Turn on Xenial-Ocata Amulet test definitions. - Sync charm helpers to get Juju 2.x amulet compatibility. - Keeping Zesty-Ocata Amulet test definitions turned off until the metadata.yaml changes propagate to the charm store. Change-Id: I6f59db20a955a00fb3497095de683f93680537ec --- ceph-fs/src/metadata.yaml | 1 + ceph-fs/src/tests/gate-basic-xenial-ocata | 23 +++++++++++++++++++++++ ceph-fs/src/tests/gate-basic-zesty-ocata | 23 +++++++++++++++++++++++ ceph-fs/src/tox.ini | 2 +- 4 files changed, 48 insertions(+), 1 deletion(-) create mode 100755 ceph-fs/src/tests/gate-basic-xenial-ocata create mode 100644 ceph-fs/src/tests/gate-basic-zesty-ocata diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 5f0b5117..591b1b2e 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -11,6 +11,7 @@ tags: - misc series: - xenial + - zesty - yakkety subordinate: false requires: diff --git a/ceph-fs/src/tests/gate-basic-xenial-ocata b/ceph-fs/src/tests/gate-basic-xenial-ocata new file mode 100755 index 00000000..141a3d80 --- /dev/null +++ b/ceph-fs/src/tests/gate-basic-xenial-ocata @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on xenial-ocata.""" + +from basic_deployment import CephFsBasicDeployment + +if __name__ == '__main__': + deployment = CephFsBasicDeployment(series='xenial') + deployment.run_tests() diff --git a/ceph-fs/src/tests/gate-basic-zesty-ocata b/ceph-fs/src/tests/gate-basic-zesty-ocata new file mode 100644 index 00000000..dce0829c --- /dev/null +++ b/ceph-fs/src/tests/gate-basic-zesty-ocata @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on zesty-ocata.""" + +from basic_deployment import CephFsBasicDeployment + +if __name__ == '__main__': + deployment = CephFsBasicDeployment(series='zesty') + deployment.run_tests() diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index 479d7bb3..3bc4d0bb 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -10,7 +10,7 @@ setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 AMULET_SETUP_TIMEOUT=2700 whitelist_externals = juju -passenv = HOME TERM AMULET_* +passenv = HOME TERM AMULET_* CS_API_URL deps = -r{toxinidir}/test-requirements.txt install_command = pip install --allow-unverified python-apt {opts} {packages} From 63e6e3010befdaaaea5a28132d66b8987e7b7c9b Mon Sep 17 00:00:00 2001 From: Jorge Niedbalski Date: Tue, 21 Mar 2017 12:59:57 -0300 Subject: [PATCH 1291/2699] Force a rebuild after interface-ceph-mds change. Force a charm rebuild after fixing LP: #1659645. Closes-Bug: #1659645 Change-Id: Id38734bc1a18d03711d02d8e47e4aeefd6e828ee Signed-off-by: Jorge Niedbalski --- ceph-fs/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index 3eb2fae7..a04b6798 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -265df5c7-af0d-45ee-9dfd-194054186a37 +d6e45297-5d9a-4123-b1ec-81e3fee1a4b6 From e9d9717dcf125675ca9d9817f4637bcbab3fa6ca Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 7 Mar 2017 11:06:34 -0800 Subject: [PATCH 1292/2699] Enable Ocata Amulet Tests - Add Zesty as a supported series to metadata.yaml. - Turn on Xenial-Ocata Amulet test definitions. - Sync charm helpers to get Juju 2.x amulet compatibility. - Keeping Zesty-Ocata Amulet test definitions turned off until the metadata.yaml changes propagate to the charm store. - Resync tox.ini to resolve amulet test failures. Change-Id: I75d0b5e0ff5ba3619e5f24567deb77c404b822cb --- .../charmhelpers/contrib/charmsupport/nrpe.py | 32 +++- .../contrib/hardening/templating.py | 6 +- .../hooks/charmhelpers/contrib/network/ip.py | 65 ++++++- .../contrib/openstack/amulet/utils.py | 12 +- .../charmhelpers/contrib/openstack/context.py | 39 +++- .../contrib/openstack/ha/utils.py | 11 ++ .../contrib/openstack/keystone.py | 178 ++++++++++++++++++ .../contrib/openstack/templating.py | 10 +- .../charmhelpers/contrib/openstack/utils.py | 2 +- .../charmhelpers/contrib/python/packages.py | 11 +- ceph-mon/hooks/charmhelpers/core/host.py | 2 + ceph-mon/hooks/charmhelpers/fetch/snap.py | 122 ++++++++++++ ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 80 +++++--- ceph-mon/metadata.yaml | 1 + ceph-mon/tests/basic_deployment.py | 19 +- .../charmhelpers/contrib/amulet/utils.py | 29 ++- .../contrib/openstack/amulet/utils.py | 12 +- ceph-mon/tests/charmhelpers/core/host.py | 2 + ceph-mon/tests/gate-basic-xenial-ocata | 0 ceph-mon/tox.ini | 7 +- 20 files changed, 550 insertions(+), 90 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py create mode 100644 ceph-mon/hooks/charmhelpers/fetch/snap.py mode change 100644 => 100755 ceph-mon/tests/gate-basic-xenial-ocata diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 1410512a..9646b838 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -227,6 +227,7 @@ class NRPE(object): nagios_logdir = '/var/log/nagios' nagios_exportdir = '/var/lib/nagios/export' nrpe_confdir = '/etc/nagios/nrpe.d' + homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server def __init__(self, hostname=None, primary=True): super(NRPE, self).__init__() @@ -338,13 +339,14 @@ def get_nagios_unit_name(relation_name='nrpe-external-master'): return unit -def add_init_service_checks(nrpe, services, unit_name): +def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): """ Add checks for each service in list :param NRPE nrpe: NRPE object to add check to :param list services: List of services to check :param str unit_name: Unit name to use in check description + :param bool immediate_check: For sysv init, run the service check immediately """ for svc in services: # Don't add a check for these services from neutron-gateway @@ -368,21 +370,31 @@ def add_init_service_checks(nrpe, services, unit_name): ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % svc - cron_file = ('*/5 * * * * root ' - '/usr/local/lib/nagios/plugins/check_exit_status.pl ' - '-s /etc/init.d/%s status > ' - '/var/lib/nagios/service-check-%s.txt\n' % (svc, - svc) - ) + checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) + croncmd = ( + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-s /etc/init.d/%s status' % svc + ) + cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) f = open(cronpath, 'w') f.write(cron_file) f.close() nrpe.add_check( shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_status_file.py -f ' - '/var/lib/nagios/service-check-%s.txt' % svc, + description='service check {%s}' % unit_name, + check_cmd='check_status_file.py -f %s' % checkpath, ) + # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail + # (LP: #1670223). + if immediate_check and os.path.isdir(nrpe.homedir): + f = open(checkpath, 'w') + subprocess.call( + croncmd.split(), + stdout=f, + stderr=subprocess.STDOUT + ) + f.close() + os.chmod(checkpath, 0o644) def copy_nrpe_checks(): diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py index 2174c645..5b6765f7 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py @@ -13,6 +13,7 @@ # limitations under the License. import os +import six from charmhelpers.core.hookenv import ( log, @@ -26,7 +27,10 @@ from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_update apt_update(fatal=True) - apt_install('python-jinja2', fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index e141fc12..54c76a72 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -20,25 +20,37 @@ from functools import partial -from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( + config, log, + network_get_primary_address, + unit_get, WARNING, ) +from charmhelpers.core.host import ( + lsb_release, +) + try: import netifaces except ImportError: apt_update(fatal=True) - apt_install('python-netifaces', fatal=True) + if six.PY2: + apt_install('python-netifaces', fatal=True) + else: + apt_install('python3-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: apt_update(fatal=True) - apt_install('python-netaddr', fatal=True) + if six.PY2: + apt_install('python-netaddr', fatal=True) + else: + apt_install('python3-netaddr', fatal=True) import netaddr @@ -414,7 +426,10 @@ def ns_query(address): try: import dns.resolver except ImportError: - apt_install('python-dnspython', fatal=True) + if six.PY2: + apt_install('python-dnspython', fatal=True) + else: + apt_install('python3-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): @@ -462,7 +477,10 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - apt_install("python-dnspython", fatal=True) + if six.PY2: + apt_install("python-dnspython", fatal=True) + else: + apt_install("python3-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) @@ -499,3 +517,40 @@ def port_has_listener(address, port): cmd = ['nc', '-z', address, str(port)] result = subprocess.call(cmd) return not(bool(result)) + + +def assert_charm_supports_ipv6(): + """Check whether we are able to support charms ipv6.""" + if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + raise Exception("IPv6 is not supported in the charms for Ubuntu " + "versions less than Trusty 14.04") + + +def get_relation_ip(interface, config_override=None): + """Return this unit's IP for the given relation. + + Allow for an arbitrary interface to use with network-get to select an IP. + Handle all address selection options including configuration parameter + override and IPv6. + + Usage: get_relation_ip('amqp', config_override='access-network') + + @param interface: string name of the relation. + @param config_override: string name of the config option for network + override. Supports legacy network override configuration parameters. + @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. + @returns IPv6 or IPv4 address + """ + + fallback = get_host_ip(unit_get('private-address')) + if config('prefer-ipv6'): + assert_charm_supports_ipv6() + return get_ipv6_addr()[0] + elif config_override and config(config_override): + return get_address_in_network(config(config_override), + fallback) + else: + try: + return network_get_primary_address(interface) + except NotImplementedError: + return fallback diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 401c0328..1f4cf42e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -32,6 +32,7 @@ from novaclient import exceptions import novaclient.client as nova_client +import novaclient import pika import swiftclient @@ -434,9 +435,14 @@ def authenticate_nova_user(self, keystone, user, password, tenant): self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) + if novaclient.__version__[0] >= "7": + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, password=password, + project_name=tenant, auth_url=ep) + else: + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) def authenticate_swift_user(self, keystone, user, password, tenant): """Authenticates a regular user with swift api.""" diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 42316331..6cdbbbbf 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -100,7 +100,10 @@ try: import psutil except ImportError: - apt_install('python-psutil', fatal=True) + if six.PY2: + apt_install('python-psutil', fatal=True) + else: + apt_install('python3-psutil', fatal=True) import psutil CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -392,16 +395,20 @@ def __call__(self): for rid in relation_ids(self.rel_name): ha_vip_only = False self.related = True + transport_hosts = None + rabbitmq_port = '5672' for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): ctxt['clustered'] = True vip = relation_get('vip', rid=rid, unit=unit) vip = format_ipv6_addr(vip) or vip ctxt['rabbitmq_host'] = vip + transport_hosts = [vip] else: host = relation_get('private-address', rid=rid, unit=unit) host = format_ipv6_addr(host) or host ctxt['rabbitmq_host'] = host + transport_hosts = [host] ctxt.update({ 'rabbitmq_user': username, @@ -413,6 +420,7 @@ def __call__(self): ssl_port = relation_get('ssl_port', rid=rid, unit=unit) if ssl_port: ctxt['rabbit_ssl_port'] = ssl_port + rabbitmq_port = ssl_port ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) if ssl_ca: @@ -450,6 +458,20 @@ def __call__(self): rabbitmq_hosts.append(host) ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + transport_hosts = rabbitmq_hosts + + if transport_hosts: + transport_url_hosts = '' + for host in transport_hosts: + if transport_url_hosts: + format_string = ",{}:{}@{}:{}" + else: + format_string = "{}:{}@{}:{}" + transport_url_hosts += format_string.format( + ctxt['rabbitmq_user'], ctxt['rabbitmq_password'], + host, rabbitmq_port) + ctxt['transport_url'] = "rabbit://{}/{}".format( + transport_url_hosts, vhost) oslo_messaging_flags = conf.get('oslo-messaging-flags', None) if oslo_messaging_flags: @@ -481,13 +503,16 @@ def __call__(self): ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) if not ctxt.get('key'): ctxt['key'] = relation_get('key', rid=rid, unit=unit) - ceph_pub_addr = relation_get('ceph-public-address', rid=rid, + + ceph_addrs = relation_get('ceph-public-address', rid=rid, + unit=unit) + if ceph_addrs: + for addr in ceph_addrs.split(' '): + mon_hosts.append(format_ipv6_addr(addr) or addr) + else: + priv_addr = relation_get('private-address', rid=rid, unit=unit) - unit_priv_addr = relation_get('private-address', rid=rid, - unit=unit) - ceph_addr = ceph_pub_addr or unit_priv_addr - ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr - mon_hosts.append(ceph_addr) + mon_hosts.append(format_ipv6_addr(priv_addr) or priv_addr) ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py index 1f5310bb..254a90e7 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -126,3 +126,14 @@ def assert_charm_supports_dns_ha(): status_set('blocked', msg) raise DNSHAException(msg) return True + + +def expect_ha(): + """ Determine if the unit expects to be in HA + + Check for VIP or dns-ha settings which indicate the unit should expect to + be related to hacluster. + + @returns boolean + """ + return config('vip') or config('dns-ha') diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py new file mode 100644 index 00000000..a15a03fa --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +from charmhelpers.fetch import apt_install +from charmhelpers.contrib.openstack.context import IdentityServiceContext +from charmhelpers.core.hookenv import ( + log, + ERROR, +) + + +def get_api_suffix(api_version): + """Return the formatted api suffix for the given version + @param api_version: version of the keystone endpoint + @returns the api suffix formatted according to the given api + version + """ + return 'v2.0' if api_version in (2, "2.0") else 'v3' + + +def format_endpoint(schema, addr, port, api_version): + """Return a formatted keystone endpoint + @param schema: http or https + @param addr: ipv4/ipv6 host of the keystone service + @param port: port of the keystone service + @param api_version: 2 or 3 + @returns a fully formatted keystone endpoint + """ + return '{}://{}:{}/{}/'.format(schema, addr, port, + get_api_suffix(api_version)) + + +def get_keystone_manager(endpoint, api_version, **kwargs): + """Return a keystonemanager for the correct API version + + @param endpoint: the keystone endpoint to point client at + @param api_version: version of the keystone api the client should use + @param kwargs: token or username/tenant/password information + @returns keystonemanager class used for interrogating keystone + """ + if api_version == 2: + return KeystoneManager2(endpoint, **kwargs) + if api_version == 3: + return KeystoneManager3(endpoint, **kwargs) + raise ValueError('No manager found for api version {}'.format(api_version)) + + +def get_keystone_manager_from_identity_service_context(): + """Return a keystonmanager generated from a + instance of charmhelpers.contrib.openstack.context.IdentityServiceContext + @returns keystonamenager instance + """ + context = IdentityServiceContext()() + if not context: + msg = "Identity service context cannot be generated" + log(msg, level=ERROR) + raise ValueError(msg) + + endpoint = format_endpoint(context['service_protocol'], + context['service_host'], + context['service_port'], + context['api_version']) + + if context['api_version'] in (2, "2.0"): + api_version = 2 + else: + api_version = 3 + + return get_keystone_manager(endpoint, api_version, + username=context['admin_user'], + password=context['admin_password'], + tenant_name=context['admin_tenant_name']) + + +class KeystoneManager(object): + + def resolve_service_id(self, service_name=None, service_type=None): + """Find the service_id of a given service""" + services = [s._info for s in self.api.services.list()] + + service_name = service_name.lower() + for s in services: + name = s['name'].lower() + if service_type and service_name: + if (service_name == name and service_type == s['type']): + return s['id'] + elif service_name and service_name == name: + return s['id'] + elif service_type and service_type == s['type']: + return s['id'] + return None + + def service_exists(self, service_name=None, service_type=None): + """Determine if the given service exists on the service list""" + return self.resolve_service_id(service_name, service_type) is not None + + +class KeystoneManager2(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + + self.api_version = 2 + + token = kwargs.get("token", None) + if token: + api = client.Client(endpoint=endpoint, token=token) + else: + auth = v2.Password(username=kwargs.get("username"), + password=kwargs.get("password"), + tenant_name=kwargs.get("tenant_name"), + auth_url=endpoint) + sess = session.Session(auth=auth) + api = client.Client(session=sess) + + self.api = api + + +class KeystoneManager3(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + + self.api_version = 3 + + token = kwargs.get("token", None) + if token: + auth = token_endpoint.Token(endpoint=endpoint, + token=token) + sess = session.Session(auth=auth) + else: + auth = v3.Password(auth_url=endpoint, + user_id=kwargs.get("username"), + password=kwargs.get("password"), + project_id=kwargs.get("tenant_name")) + sess = session.Session(auth=auth) + + self.api = client.Client(session=sess) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py index 89588951..934baf5d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py @@ -28,7 +28,10 @@ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: apt_update(fatal=True) - apt_install('python-jinja2', fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions @@ -207,7 +210,10 @@ def __init__(self, templates_dir, openstack_release): # if this code is running, the object is created pre-install hook. # jinja2 shouldn't get touched until the module is reloaded on next # hook execution, with proper jinja2 bits successfully imported. - apt_install('python-jinja2') + if six.PY2: + apt_install('python-jinja2') + else: + apt_install('python3-jinja2') def register(self, config_file, contexts): """ diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 80219d66..7e8ecff4 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -153,7 +153,7 @@ ('newton', ['2.8.0', '2.9.0', '2.10.0']), ('ocata', - ['2.11.0', '2.12.0']), + ['2.11.0', '2.12.0', '2.13.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-mon/hooks/charmhelpers/contrib/python/packages.py b/ceph-mon/hooks/charmhelpers/contrib/python/packages.py index e29bd1bb..6e95028b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/python/packages.py +++ b/ceph-mon/hooks/charmhelpers/contrib/python/packages.py @@ -16,6 +16,7 @@ # limitations under the License. import os +import six import subprocess import sys @@ -39,7 +40,10 @@ def pip_execute(*args, **kwargs): from pip import main as _pip_execute except ImportError: apt_update() - apt_install('python-pip') + if six.PY2: + apt_install('python-pip') + else: + apt_install('python3-pip') from pip import main as _pip_execute _pip_execute(*args, **kwargs) finally: @@ -136,7 +140,10 @@ def pip_list(): def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" - apt_install('python-virtualenv') + if six.PY2: + apt_install('python-virtualenv') + else: + apt_install('python3-virtualenv') if path: venv_path = path diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index edbb72ff..05edfa50 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -306,6 +306,8 @@ def service_running(service_name, **kwargs): def init_is_systemd(): """Return True if the host system uses systemd, False otherwise.""" + if lsb_release()['DISTRIB_CODENAME'] == 'trusty': + return False return os.path.isdir(SYSTEMD_SYSTEM) diff --git a/ceph-mon/hooks/charmhelpers/fetch/snap.py b/ceph-mon/hooks/charmhelpers/fetch/snap.py new file mode 100644 index 00000000..23c707b0 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/fetch/snap.py @@ -0,0 +1,122 @@ +# Copyright 2014-2017 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Charm helpers snap for classic charms. + +If writing reactive charms, use the snap layer: +https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html +""" +import subprocess +from os import environ +from time import sleep +from charmhelpers.core.hookenv import log + +__author__ = 'Joseph Borg ' + +SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved). +SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. +SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +class CouldNotAcquireLockException(Exception): + pass + + +def _snap_exec(commands): + """ + Execute snap commands. + + :param commands: List commands + :return: Integer exit code + """ + assert type(commands) == list + + retry_count = 0 + return_code = None + + while return_code is None or return_code == SNAP_NO_LOCK: + try: + return_code = subprocess.check_call(['snap'] + commands, env=environ) + except subprocess.CalledProcessError as e: + retry_count += + 1 + if retry_count > SNAP_NO_LOCK_RETRY_COUNT: + raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT) + return_code = e.returncode + log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN') + sleep(SNAP_NO_LOCK_RETRY_DELAY) + + return return_code + + +def snap_install(packages, *flags): + """ + Install a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to install command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Installing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with option(s) "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['install'] + flags + packages) + + +def snap_remove(packages, *flags): + """ + Remove a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to remove command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Removing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['remove'] + flags + packages) + + +def snap_refresh(packages, *flags): + """ + Refresh / Update snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to refresh command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Refreshing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['refresh'] + flags + packages) diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 39b9b801..82ac80ff 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -116,8 +116,8 @@ } APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. -APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. -APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. +CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. +CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times. def filter_installed_packages(packages): @@ -249,7 +249,8 @@ def add_source(source, key=None): source.startswith('http') or source.startswith('deb ') or source.startswith('cloud-archive:')): - subprocess.check_call(['add-apt-repository', '--yes', source]) + cmd = ['add-apt-repository', '--yes', source] + _run_with_retries(cmd) elif source.startswith('cloud:'): install(filter_installed_packages(['ubuntu-cloud-keyring']), fatal=True) @@ -286,41 +287,60 @@ def add_source(source, key=None): key]) -def _run_apt_command(cmd, fatal=False): - """Run an APT command. - - Checks the output and retries if the fatal flag is set - to True. +def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), + retry_message="", cmd_env=None): + """Run a command and retry until success or max_retries is reached. :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. + :param: max_retries: int: The number of retries to attempt on a fatal + command. Defaults to CMD_RETRY_COUNT. + :param: retry_exitcodes: tuple: Optional additional exit codes to retry. + Defaults to retry on exit code 1. + :param: retry_message: str: Optional log prefix emitted during retries. + :param: cmd_env: dict: Environment variables to add to the command run. """ + env = os.environ.copy() + if cmd_env: + env.update(cmd_env) - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' + if not retry_message: + retry_message = "Failed executing '{}'".format(" ".join(cmd)) + retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) - if fatal: - retry_count = 0 - result = None - - # If the command is considered "fatal", we need to retry if the apt - # lock was not acquired. - - while result is None or result == APT_NO_LOCK: - try: - result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > APT_NO_LOCK_RETRY_COUNT: - raise - result = e.returncode - log("Couldn't acquire DPKG lock. Will retry in {} seconds." - "".format(APT_NO_LOCK_RETRY_DELAY)) - time.sleep(APT_NO_LOCK_RETRY_DELAY) + retry_count = 0 + result = None + + retry_results = (None,) + retry_exitcodes + while result in retry_results: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > max_retries: + raise + result = e.returncode + log(retry_message) + time.sleep(CMD_RETRY_DELAY) + +def _run_apt_command(cmd, fatal=False): + """Run an apt command with optional retries. + + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. + cmd_env = { + 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} + + if fatal: + _run_with_retries( + cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), + retry_message="Couldn't acquire DPKG lock") else: + env = os.environ.copy() + env.update(cmd_env) subprocess.call(cmd, env=env) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 42810738..b47aeef5 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -11,6 +11,7 @@ tags: - misc series: - xenial + - zesty - trusty - yakkety peers: diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 47e24767..d8ff9bb6 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -61,7 +61,7 @@ def _add_services(self): """ this_service = {'name': 'ceph-mon', 'units': 3} other_services = [ - {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}}, + {'name': 'percona-cluster'}, {'name': 'keystone'}, {'name': 'ceph-osd', 'units': 3}, {'name': 'rabbitmq-server'}, @@ -100,10 +100,7 @@ def _configure_services(self): cinder_config = {'block-device': 'None', 'glance-api-version': '2'} pxc_config = { - 'dataset-size': '25%', 'max-connections': 1000, - 'root-password': 'ChangeMe123', - 'sst-password': 'ChangeMe123', } # Include a non-existent device as osd-devices is a whitelist, @@ -223,11 +220,13 @@ def test_102_services(self): self.keystone_sentry: ['keystone'], self.glance_sentry: ['glance-registry', 'glance-api'], - self.cinder_sentry: ['cinder-api', - 'cinder-scheduler', + self.cinder_sentry: ['cinder-scheduler', 'cinder-volume'], } + if self._get_openstack_release() < self.xenial_ocata: + services[self.cinder_sentry].append('cinder-api') + if self._get_openstack_release() < self.xenial_mitaka: # For upstart systems only. Ceph services under systemd # are checked by process name instead. @@ -379,8 +378,14 @@ def test_302_cinder_rbd_config(self): u.log.debug('Checking cinder (rbd) config file data...') unit = self.cinder_sentry conf = '/etc/cinder/cinder.conf' + # NOTE(jamespage): Deal with section config for >= ocata. + if self._get_openstack_release() >= self.xenial_ocata: + section_key = 'CEPH' + else: + section_key = 'DEFAULT' + expected = { - 'DEFAULT': { + section_key: { 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' } } diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py index f9e4c3af..8a6b7644 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py @@ -785,37 +785,30 @@ def get_uuid_epoch_stamp(self): generating test messages which need to be unique-ish.""" return '[{}-{}]'.format(uuid.uuid4(), time.time()) -# amulet juju action helpers: + # amulet juju action helpers: def run_action(self, unit_sentry, action, _check_output=subprocess.check_output, params=None): - """Run the named action on a given unit sentry. + """Translate to amulet's built in run_action(). Deprecated. + + Run the named action on a given unit sentry. params a dict of parameters to use - _check_output parameter is used for dependency injection. + _check_output parameter is no longer used @return action_id. """ - unit_id = unit_sentry.info["unit_name"] - command = ["juju", "action", "do", "--format=json", unit_id, action] - if params is not None: - for key, value in params.iteritems(): - command.append("{}={}".format(key, value)) - self.log.info("Running command: %s\n" % " ".join(command)) - output = _check_output(command, universal_newlines=True) - data = json.loads(output) - action_id = data[u'Action queued with id'] - return action_id + self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been ' + 'deprecated for amulet.run_action') + return unit_sentry.run_action(action, action_args=params) def wait_on_action(self, action_id, _check_output=subprocess.check_output): """Wait for a given action, returning if it completed or not. - _check_output parameter is used for dependency injection. + action_id a string action uuid + _check_output parameter is no longer used """ - command = ["juju", "action", "fetch", "--format=json", "--wait=0", - action_id] - output = _check_output(command, universal_newlines=True) - data = json.loads(output) + data = amulet.actions.get_action_output(action_id, full_output=True) return data.get(u"status") == "completed" def status_get(self, unit): diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 401c0328..1f4cf42e 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -32,6 +32,7 @@ from novaclient import exceptions import novaclient.client as nova_client +import novaclient import pika import swiftclient @@ -434,9 +435,14 @@ def authenticate_nova_user(self, keystone, user, password, tenant): self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) + if novaclient.__version__[0] >= "7": + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, password=password, + project_name=tenant, auth_url=ep) + else: + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) def authenticate_swift_user(self, keystone, user, password, tenant): """Authenticates a regular user with swift api.""" diff --git a/ceph-mon/tests/charmhelpers/core/host.py b/ceph-mon/tests/charmhelpers/core/host.py index edbb72ff..05edfa50 100644 --- a/ceph-mon/tests/charmhelpers/core/host.py +++ b/ceph-mon/tests/charmhelpers/core/host.py @@ -306,6 +306,8 @@ def service_running(service_name, **kwargs): def init_is_systemd(): """Return True if the host system uses systemd, False otherwise.""" + if lsb_release()['DISTRIB_CODENAME'] == 'trusty': + return False return os.path.isdir(SYSTEMD_SYSTEM) diff --git a/ceph-mon/tests/gate-basic-xenial-ocata b/ceph-mon/tests/gate-basic-xenial-ocata old mode 100644 new mode 100755 diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index d8d8d038..6f1aeace 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -14,13 +14,18 @@ install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} whitelist_externals = juju -passenv = HOME TERM AMULET_* +passenv = HOME TERM AMULET_* CS_API_URL [testenv:py27] basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py35] +basepython = python3.5 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python2.7 deps = -r{toxinidir}/requirements.txt From bce3acee2b718c8fa69627a4398934158e4a8193 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 7 Mar 2017 11:07:10 -0800 Subject: [PATCH 1293/2699] Enable Ocata Amulet Tests - Add Zesty as a supported series to metadata.yaml. - Turn on Xenial-Ocata Amulet test definitions. - Sync charm helpers to get Juju 2.x amulet compatibility. - Keeping Zesty-Ocata Amulet test definitions turned off until the metadata.yaml changes propagate to the charm store. - Resync tox.ini to resolve amulet unit test failures. Change-Id: Ia14904339099d84d09a2fce171740459a76a8367 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 32 +++-- .../contrib/hardening/templating.py | 6 +- .../hooks/charmhelpers/contrib/network/ip.py | 65 +++++++++- .../charmhelpers/contrib/openstack/context.py | 39 +++++- .../charmhelpers/contrib/openstack/utils.py | 2 +- .../charmhelpers/contrib/python/packages.py | 11 +- ceph-osd/hooks/charmhelpers/core/host.py | 2 + ceph-osd/hooks/charmhelpers/fetch/snap.py | 122 ++++++++++++++++++ ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 80 +++++++----- ceph-osd/metadata.yaml | 1 + ceph-osd/tests/basic_deployment.py | 21 ++- .../charmhelpers/contrib/amulet/utils.py | 29 ++--- .../contrib/openstack/amulet/utils.py | 12 +- ceph-osd/tests/charmhelpers/core/host.py | 2 + ceph-osd/tests/gate-basic-xenial-ocata | 0 ceph-osd/tox.ini | 7 +- 16 files changed, 346 insertions(+), 85 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/fetch/snap.py mode change 100644 => 100755 ceph-osd/tests/gate-basic-xenial-ocata diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 1410512a..9646b838 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -227,6 +227,7 @@ class NRPE(object): nagios_logdir = '/var/log/nagios' nagios_exportdir = '/var/lib/nagios/export' nrpe_confdir = '/etc/nagios/nrpe.d' + homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server def __init__(self, hostname=None, primary=True): super(NRPE, self).__init__() @@ -338,13 +339,14 @@ def get_nagios_unit_name(relation_name='nrpe-external-master'): return unit -def add_init_service_checks(nrpe, services, unit_name): +def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): """ Add checks for each service in list :param NRPE nrpe: NRPE object to add check to :param list services: List of services to check :param str unit_name: Unit name to use in check description + :param bool immediate_check: For sysv init, run the service check immediately """ for svc in services: # Don't add a check for these services from neutron-gateway @@ -368,21 +370,31 @@ def add_init_service_checks(nrpe, services, unit_name): ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % svc - cron_file = ('*/5 * * * * root ' - '/usr/local/lib/nagios/plugins/check_exit_status.pl ' - '-s /etc/init.d/%s status > ' - '/var/lib/nagios/service-check-%s.txt\n' % (svc, - svc) - ) + checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) + croncmd = ( + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-s /etc/init.d/%s status' % svc + ) + cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) f = open(cronpath, 'w') f.write(cron_file) f.close() nrpe.add_check( shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_status_file.py -f ' - '/var/lib/nagios/service-check-%s.txt' % svc, + description='service check {%s}' % unit_name, + check_cmd='check_status_file.py -f %s' % checkpath, ) + # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail + # (LP: #1670223). + if immediate_check and os.path.isdir(nrpe.homedir): + f = open(checkpath, 'w') + subprocess.call( + croncmd.split(), + stdout=f, + stderr=subprocess.STDOUT + ) + f.close() + os.chmod(checkpath, 0o644) def copy_nrpe_checks(): diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py index 2174c645..5b6765f7 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py @@ -13,6 +13,7 @@ # limitations under the License. import os +import six from charmhelpers.core.hookenv import ( log, @@ -26,7 +27,10 @@ from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_update apt_update(fatal=True) - apt_install('python-jinja2', fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index e141fc12..54c76a72 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -20,25 +20,37 @@ from functools import partial -from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( + config, log, + network_get_primary_address, + unit_get, WARNING, ) +from charmhelpers.core.host import ( + lsb_release, +) + try: import netifaces except ImportError: apt_update(fatal=True) - apt_install('python-netifaces', fatal=True) + if six.PY2: + apt_install('python-netifaces', fatal=True) + else: + apt_install('python3-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: apt_update(fatal=True) - apt_install('python-netaddr', fatal=True) + if six.PY2: + apt_install('python-netaddr', fatal=True) + else: + apt_install('python3-netaddr', fatal=True) import netaddr @@ -414,7 +426,10 @@ def ns_query(address): try: import dns.resolver except ImportError: - apt_install('python-dnspython', fatal=True) + if six.PY2: + apt_install('python-dnspython', fatal=True) + else: + apt_install('python3-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): @@ -462,7 +477,10 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - apt_install("python-dnspython", fatal=True) + if six.PY2: + apt_install("python-dnspython", fatal=True) + else: + apt_install("python3-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) @@ -499,3 +517,40 @@ def port_has_listener(address, port): cmd = ['nc', '-z', address, str(port)] result = subprocess.call(cmd) return not(bool(result)) + + +def assert_charm_supports_ipv6(): + """Check whether we are able to support charms ipv6.""" + if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + raise Exception("IPv6 is not supported in the charms for Ubuntu " + "versions less than Trusty 14.04") + + +def get_relation_ip(interface, config_override=None): + """Return this unit's IP for the given relation. + + Allow for an arbitrary interface to use with network-get to select an IP. + Handle all address selection options including configuration parameter + override and IPv6. + + Usage: get_relation_ip('amqp', config_override='access-network') + + @param interface: string name of the relation. + @param config_override: string name of the config option for network + override. Supports legacy network override configuration parameters. + @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. + @returns IPv6 or IPv4 address + """ + + fallback = get_host_ip(unit_get('private-address')) + if config('prefer-ipv6'): + assert_charm_supports_ipv6() + return get_ipv6_addr()[0] + elif config_override and config(config_override): + return get_address_in_network(config(config_override), + fallback) + else: + try: + return network_get_primary_address(interface) + except NotImplementedError: + return fallback diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 42316331..6cdbbbbf 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -100,7 +100,10 @@ try: import psutil except ImportError: - apt_install('python-psutil', fatal=True) + if six.PY2: + apt_install('python-psutil', fatal=True) + else: + apt_install('python3-psutil', fatal=True) import psutil CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -392,16 +395,20 @@ def __call__(self): for rid in relation_ids(self.rel_name): ha_vip_only = False self.related = True + transport_hosts = None + rabbitmq_port = '5672' for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): ctxt['clustered'] = True vip = relation_get('vip', rid=rid, unit=unit) vip = format_ipv6_addr(vip) or vip ctxt['rabbitmq_host'] = vip + transport_hosts = [vip] else: host = relation_get('private-address', rid=rid, unit=unit) host = format_ipv6_addr(host) or host ctxt['rabbitmq_host'] = host + transport_hosts = [host] ctxt.update({ 'rabbitmq_user': username, @@ -413,6 +420,7 @@ def __call__(self): ssl_port = relation_get('ssl_port', rid=rid, unit=unit) if ssl_port: ctxt['rabbit_ssl_port'] = ssl_port + rabbitmq_port = ssl_port ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) if ssl_ca: @@ -450,6 +458,20 @@ def __call__(self): rabbitmq_hosts.append(host) ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + transport_hosts = rabbitmq_hosts + + if transport_hosts: + transport_url_hosts = '' + for host in transport_hosts: + if transport_url_hosts: + format_string = ",{}:{}@{}:{}" + else: + format_string = "{}:{}@{}:{}" + transport_url_hosts += format_string.format( + ctxt['rabbitmq_user'], ctxt['rabbitmq_password'], + host, rabbitmq_port) + ctxt['transport_url'] = "rabbit://{}/{}".format( + transport_url_hosts, vhost) oslo_messaging_flags = conf.get('oslo-messaging-flags', None) if oslo_messaging_flags: @@ -481,13 +503,16 @@ def __call__(self): ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) if not ctxt.get('key'): ctxt['key'] = relation_get('key', rid=rid, unit=unit) - ceph_pub_addr = relation_get('ceph-public-address', rid=rid, + + ceph_addrs = relation_get('ceph-public-address', rid=rid, + unit=unit) + if ceph_addrs: + for addr in ceph_addrs.split(' '): + mon_hosts.append(format_ipv6_addr(addr) or addr) + else: + priv_addr = relation_get('private-address', rid=rid, unit=unit) - unit_priv_addr = relation_get('private-address', rid=rid, - unit=unit) - ceph_addr = ceph_pub_addr or unit_priv_addr - ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr - mon_hosts.append(ceph_addr) + mon_hosts.append(format_ipv6_addr(priv_addr) or priv_addr) ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 80219d66..7e8ecff4 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -153,7 +153,7 @@ ('newton', ['2.8.0', '2.9.0', '2.10.0']), ('ocata', - ['2.11.0', '2.12.0']), + ['2.11.0', '2.12.0', '2.13.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-osd/hooks/charmhelpers/contrib/python/packages.py b/ceph-osd/hooks/charmhelpers/contrib/python/packages.py index e29bd1bb..6e95028b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/python/packages.py +++ b/ceph-osd/hooks/charmhelpers/contrib/python/packages.py @@ -16,6 +16,7 @@ # limitations under the License. import os +import six import subprocess import sys @@ -39,7 +40,10 @@ def pip_execute(*args, **kwargs): from pip import main as _pip_execute except ImportError: apt_update() - apt_install('python-pip') + if six.PY2: + apt_install('python-pip') + else: + apt_install('python3-pip') from pip import main as _pip_execute _pip_execute(*args, **kwargs) finally: @@ -136,7 +140,10 @@ def pip_list(): def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" - apt_install('python-virtualenv') + if six.PY2: + apt_install('python-virtualenv') + else: + apt_install('python3-virtualenv') if path: venv_path = path diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index edbb72ff..05edfa50 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -306,6 +306,8 @@ def service_running(service_name, **kwargs): def init_is_systemd(): """Return True if the host system uses systemd, False otherwise.""" + if lsb_release()['DISTRIB_CODENAME'] == 'trusty': + return False return os.path.isdir(SYSTEMD_SYSTEM) diff --git a/ceph-osd/hooks/charmhelpers/fetch/snap.py b/ceph-osd/hooks/charmhelpers/fetch/snap.py new file mode 100644 index 00000000..23c707b0 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/fetch/snap.py @@ -0,0 +1,122 @@ +# Copyright 2014-2017 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Charm helpers snap for classic charms. + +If writing reactive charms, use the snap layer: +https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html +""" +import subprocess +from os import environ +from time import sleep +from charmhelpers.core.hookenv import log + +__author__ = 'Joseph Borg ' + +SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved). +SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. +SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +class CouldNotAcquireLockException(Exception): + pass + + +def _snap_exec(commands): + """ + Execute snap commands. + + :param commands: List commands + :return: Integer exit code + """ + assert type(commands) == list + + retry_count = 0 + return_code = None + + while return_code is None or return_code == SNAP_NO_LOCK: + try: + return_code = subprocess.check_call(['snap'] + commands, env=environ) + except subprocess.CalledProcessError as e: + retry_count += + 1 + if retry_count > SNAP_NO_LOCK_RETRY_COUNT: + raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT) + return_code = e.returncode + log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN') + sleep(SNAP_NO_LOCK_RETRY_DELAY) + + return return_code + + +def snap_install(packages, *flags): + """ + Install a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to install command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Installing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with option(s) "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['install'] + flags + packages) + + +def snap_remove(packages, *flags): + """ + Remove a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to remove command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Removing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['remove'] + flags + packages) + + +def snap_refresh(packages, *flags): + """ + Refresh / Update snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to refresh command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Refreshing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['refresh'] + flags + packages) diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 39b9b801..82ac80ff 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -116,8 +116,8 @@ } APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. -APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. -APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. +CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. +CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times. def filter_installed_packages(packages): @@ -249,7 +249,8 @@ def add_source(source, key=None): source.startswith('http') or source.startswith('deb ') or source.startswith('cloud-archive:')): - subprocess.check_call(['add-apt-repository', '--yes', source]) + cmd = ['add-apt-repository', '--yes', source] + _run_with_retries(cmd) elif source.startswith('cloud:'): install(filter_installed_packages(['ubuntu-cloud-keyring']), fatal=True) @@ -286,41 +287,60 @@ def add_source(source, key=None): key]) -def _run_apt_command(cmd, fatal=False): - """Run an APT command. - - Checks the output and retries if the fatal flag is set - to True. +def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), + retry_message="", cmd_env=None): + """Run a command and retry until success or max_retries is reached. :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. + :param: max_retries: int: The number of retries to attempt on a fatal + command. Defaults to CMD_RETRY_COUNT. + :param: retry_exitcodes: tuple: Optional additional exit codes to retry. + Defaults to retry on exit code 1. + :param: retry_message: str: Optional log prefix emitted during retries. + :param: cmd_env: dict: Environment variables to add to the command run. """ + env = os.environ.copy() + if cmd_env: + env.update(cmd_env) - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' + if not retry_message: + retry_message = "Failed executing '{}'".format(" ".join(cmd)) + retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) - if fatal: - retry_count = 0 - result = None - - # If the command is considered "fatal", we need to retry if the apt - # lock was not acquired. - - while result is None or result == APT_NO_LOCK: - try: - result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > APT_NO_LOCK_RETRY_COUNT: - raise - result = e.returncode - log("Couldn't acquire DPKG lock. Will retry in {} seconds." - "".format(APT_NO_LOCK_RETRY_DELAY)) - time.sleep(APT_NO_LOCK_RETRY_DELAY) + retry_count = 0 + result = None + + retry_results = (None,) + retry_exitcodes + while result in retry_results: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > max_retries: + raise + result = e.returncode + log(retry_message) + time.sleep(CMD_RETRY_DELAY) + +def _run_apt_command(cmd, fatal=False): + """Run an apt command with optional retries. + + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. + cmd_env = { + 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} + + if fatal: + _run_with_retries( + cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), + retry_message="Couldn't acquire DPKG lock") else: + env = os.environ.copy() + env.update(cmd_env) subprocess.call(cmd, env=env) diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 62711158..f8a0ef09 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -12,6 +12,7 @@ tags: - misc series: - xenial + - zesty - trusty - yakkety description: | diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 32e38c8b..8a90cf18 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -61,7 +61,7 @@ def _add_services(self): this_service = {'name': 'ceph-osd', 'units': 3} other_services = [ {'name': 'ceph-mon', 'units': 3}, - {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}}, + {'name': 'percona-cluster'}, {'name': 'keystone'}, {'name': 'rabbitmq-server'}, {'name': 'nova-compute'}, @@ -97,10 +97,7 @@ def _configure_services(self): keystone_config = {'admin-password': 'openstack', 'admin-token': 'ubuntutesting'} pxc_config = { - 'dataset-size': '25%', 'max-connections': 1000, - 'root-password': 'ChangeMe123', - 'sst-password': 'ChangeMe123', } cinder_config = {'block-device': 'None', 'glance-api-version': '2'} @@ -220,11 +217,15 @@ def test_102_services(self): services = { self.glance_sentry: ['glance-registry', 'glance-api'], - self.cinder_sentry: ['cinder-api', - 'cinder-scheduler', + self.cinder_sentry: ['cinder-scheduler', 'cinder-volume'], } + if self._get_openstack_release() < self.xenial_ocata: + services[self.cinder_sentry].append('cinder-api') + else: + services[self.cinder_sentry].append('apache2') + if self._get_openstack_release() < self.xenial_mitaka: # For upstart systems only. Ceph services under systemd # are checked by process name instead. @@ -356,8 +357,14 @@ def test_302_cinder_rbd_config(self): u.log.debug('Checking cinder (rbd) config file data...') unit = self.cinder_sentry conf = '/etc/cinder/cinder.conf' + # NOTE(jamespage): Deal with section config for >= ocata. + if self._get_openstack_release() >= self.xenial_ocata: + section_key = 'CEPH' + else: + section_key = 'DEFAULT' + expected = { - 'DEFAULT': { + section_key: { 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' } } diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py index f9e4c3af..8a6b7644 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py @@ -785,37 +785,30 @@ def get_uuid_epoch_stamp(self): generating test messages which need to be unique-ish.""" return '[{}-{}]'.format(uuid.uuid4(), time.time()) -# amulet juju action helpers: + # amulet juju action helpers: def run_action(self, unit_sentry, action, _check_output=subprocess.check_output, params=None): - """Run the named action on a given unit sentry. + """Translate to amulet's built in run_action(). Deprecated. + + Run the named action on a given unit sentry. params a dict of parameters to use - _check_output parameter is used for dependency injection. + _check_output parameter is no longer used @return action_id. """ - unit_id = unit_sentry.info["unit_name"] - command = ["juju", "action", "do", "--format=json", unit_id, action] - if params is not None: - for key, value in params.iteritems(): - command.append("{}={}".format(key, value)) - self.log.info("Running command: %s\n" % " ".join(command)) - output = _check_output(command, universal_newlines=True) - data = json.loads(output) - action_id = data[u'Action queued with id'] - return action_id + self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been ' + 'deprecated for amulet.run_action') + return unit_sentry.run_action(action, action_args=params) def wait_on_action(self, action_id, _check_output=subprocess.check_output): """Wait for a given action, returning if it completed or not. - _check_output parameter is used for dependency injection. + action_id a string action uuid + _check_output parameter is no longer used """ - command = ["juju", "action", "fetch", "--format=json", "--wait=0", - action_id] - output = _check_output(command, universal_newlines=True) - data = json.loads(output) + data = amulet.actions.get_action_output(action_id, full_output=True) return data.get(u"status") == "completed" def status_get(self, unit): diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index 401c0328..1f4cf42e 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -32,6 +32,7 @@ from novaclient import exceptions import novaclient.client as nova_client +import novaclient import pika import swiftclient @@ -434,9 +435,14 @@ def authenticate_nova_user(self, keystone, user, password, tenant): self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) + if novaclient.__version__[0] >= "7": + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, password=password, + project_name=tenant, auth_url=ep) + else: + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) def authenticate_swift_user(self, keystone, user, password, tenant): """Authenticates a regular user with swift api.""" diff --git a/ceph-osd/tests/charmhelpers/core/host.py b/ceph-osd/tests/charmhelpers/core/host.py index edbb72ff..05edfa50 100644 --- a/ceph-osd/tests/charmhelpers/core/host.py +++ b/ceph-osd/tests/charmhelpers/core/host.py @@ -306,6 +306,8 @@ def service_running(service_name, **kwargs): def init_is_systemd(): """Return True if the host system uses systemd, False otherwise.""" + if lsb_release()['DISTRIB_CODENAME'] == 'trusty': + return False return os.path.isdir(SYSTEMD_SYSTEM) diff --git a/ceph-osd/tests/gate-basic-xenial-ocata b/ceph-osd/tests/gate-basic-xenial-ocata old mode 100644 new mode 100755 diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index d8d8d038..6f1aeace 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -14,13 +14,18 @@ install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} whitelist_externals = juju -passenv = HOME TERM AMULET_* +passenv = HOME TERM AMULET_* CS_API_URL [testenv:py27] basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py35] +basepython = python3.5 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python2.7 deps = -r{toxinidir}/requirements.txt From c10e9ad1c913f34e6888c072b8b08dd965938417 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 7 Mar 2017 11:08:22 -0800 Subject: [PATCH 1294/2699] Enable Ocata Amulet Tests - Add Zesty as a supported series to metadata.yaml. - Turn on Xenial-Ocata Amulet test definitions. - Sync charm helpers to get Juju 2.x amulet compatibility. - Keeping Zesty-Ocata Amulet test definitions turned off until the metadata.yaml changes propagate to the charm store. - Resync tox.ini resolving amulet full recheck failures. Change-Id: I1864ee41be6815f9779669cab8ad8691bd78ffa5 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 32 +++- .../contrib/hardening/templating.py | 6 +- .../hooks/charmhelpers/contrib/network/ip.py | 65 ++++++- .../contrib/openstack/amulet/utils.py | 12 +- .../charmhelpers/contrib/openstack/context.py | 39 +++- .../contrib/openstack/ha/utils.py | 11 ++ .../contrib/openstack/keystone.py | 178 ++++++++++++++++++ .../contrib/openstack/templating.py | 10 +- .../charmhelpers/contrib/openstack/utils.py | 2 +- .../charmhelpers/contrib/python/packages.py | 11 +- ceph-radosgw/hooks/charmhelpers/core/host.py | 2 + ceph-radosgw/hooks/charmhelpers/fetch/snap.py | 122 ++++++++++++ .../hooks/charmhelpers/fetch/ubuntu.py | 80 +++++--- ceph-radosgw/metadata.yaml | 1 + ceph-radosgw/tests/basic_deployment.py | 16 +- .../charmhelpers/contrib/amulet/utils.py | 29 ++- .../contrib/openstack/amulet/utils.py | 12 +- ceph-radosgw/tests/charmhelpers/core/host.py | 2 + ceph-radosgw/tests/gate-basic-xenial-ocata | 0 ceph-radosgw/tox.ini | 7 +- 20 files changed, 551 insertions(+), 86 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py create mode 100644 ceph-radosgw/hooks/charmhelpers/fetch/snap.py mode change 100644 => 100755 ceph-radosgw/tests/gate-basic-xenial-ocata diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 1410512a..9646b838 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -227,6 +227,7 @@ class NRPE(object): nagios_logdir = '/var/log/nagios' nagios_exportdir = '/var/lib/nagios/export' nrpe_confdir = '/etc/nagios/nrpe.d' + homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server def __init__(self, hostname=None, primary=True): super(NRPE, self).__init__() @@ -338,13 +339,14 @@ def get_nagios_unit_name(relation_name='nrpe-external-master'): return unit -def add_init_service_checks(nrpe, services, unit_name): +def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): """ Add checks for each service in list :param NRPE nrpe: NRPE object to add check to :param list services: List of services to check :param str unit_name: Unit name to use in check description + :param bool immediate_check: For sysv init, run the service check immediately """ for svc in services: # Don't add a check for these services from neutron-gateway @@ -368,21 +370,31 @@ def add_init_service_checks(nrpe, services, unit_name): ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % svc - cron_file = ('*/5 * * * * root ' - '/usr/local/lib/nagios/plugins/check_exit_status.pl ' - '-s /etc/init.d/%s status > ' - '/var/lib/nagios/service-check-%s.txt\n' % (svc, - svc) - ) + checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) + croncmd = ( + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-s /etc/init.d/%s status' % svc + ) + cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) f = open(cronpath, 'w') f.write(cron_file) f.close() nrpe.add_check( shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_status_file.py -f ' - '/var/lib/nagios/service-check-%s.txt' % svc, + description='service check {%s}' % unit_name, + check_cmd='check_status_file.py -f %s' % checkpath, ) + # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail + # (LP: #1670223). + if immediate_check and os.path.isdir(nrpe.homedir): + f = open(checkpath, 'w') + subprocess.call( + croncmd.split(), + stdout=f, + stderr=subprocess.STDOUT + ) + f.close() + os.chmod(checkpath, 0o644) def copy_nrpe_checks(): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py index 2174c645..5b6765f7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py @@ -13,6 +13,7 @@ # limitations under the License. import os +import six from charmhelpers.core.hookenv import ( log, @@ -26,7 +27,10 @@ from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_update apt_update(fatal=True) - apt_install('python-jinja2', fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index e141fc12..54c76a72 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -20,25 +20,37 @@ from functools import partial -from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( + config, log, + network_get_primary_address, + unit_get, WARNING, ) +from charmhelpers.core.host import ( + lsb_release, +) + try: import netifaces except ImportError: apt_update(fatal=True) - apt_install('python-netifaces', fatal=True) + if six.PY2: + apt_install('python-netifaces', fatal=True) + else: + apt_install('python3-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: apt_update(fatal=True) - apt_install('python-netaddr', fatal=True) + if six.PY2: + apt_install('python-netaddr', fatal=True) + else: + apt_install('python3-netaddr', fatal=True) import netaddr @@ -414,7 +426,10 @@ def ns_query(address): try: import dns.resolver except ImportError: - apt_install('python-dnspython', fatal=True) + if six.PY2: + apt_install('python-dnspython', fatal=True) + else: + apt_install('python3-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): @@ -462,7 +477,10 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - apt_install("python-dnspython", fatal=True) + if six.PY2: + apt_install("python-dnspython", fatal=True) + else: + apt_install("python3-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) @@ -499,3 +517,40 @@ def port_has_listener(address, port): cmd = ['nc', '-z', address, str(port)] result = subprocess.call(cmd) return not(bool(result)) + + +def assert_charm_supports_ipv6(): + """Check whether we are able to support charms ipv6.""" + if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + raise Exception("IPv6 is not supported in the charms for Ubuntu " + "versions less than Trusty 14.04") + + +def get_relation_ip(interface, config_override=None): + """Return this unit's IP for the given relation. + + Allow for an arbitrary interface to use with network-get to select an IP. + Handle all address selection options including configuration parameter + override and IPv6. + + Usage: get_relation_ip('amqp', config_override='access-network') + + @param interface: string name of the relation. + @param config_override: string name of the config option for network + override. Supports legacy network override configuration parameters. + @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. + @returns IPv6 or IPv4 address + """ + + fallback = get_host_ip(unit_get('private-address')) + if config('prefer-ipv6'): + assert_charm_supports_ipv6() + return get_ipv6_addr()[0] + elif config_override and config(config_override): + return get_address_in_network(config(config_override), + fallback) + else: + try: + return network_get_primary_address(interface) + except NotImplementedError: + return fallback diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 401c0328..1f4cf42e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -32,6 +32,7 @@ from novaclient import exceptions import novaclient.client as nova_client +import novaclient import pika import swiftclient @@ -434,9 +435,14 @@ def authenticate_nova_user(self, keystone, user, password, tenant): self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) + if novaclient.__version__[0] >= "7": + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, password=password, + project_name=tenant, auth_url=ep) + else: + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) def authenticate_swift_user(self, keystone, user, password, tenant): """Authenticates a regular user with swift api.""" diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 42316331..6cdbbbbf 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -100,7 +100,10 @@ try: import psutil except ImportError: - apt_install('python-psutil', fatal=True) + if six.PY2: + apt_install('python-psutil', fatal=True) + else: + apt_install('python3-psutil', fatal=True) import psutil CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -392,16 +395,20 @@ def __call__(self): for rid in relation_ids(self.rel_name): ha_vip_only = False self.related = True + transport_hosts = None + rabbitmq_port = '5672' for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): ctxt['clustered'] = True vip = relation_get('vip', rid=rid, unit=unit) vip = format_ipv6_addr(vip) or vip ctxt['rabbitmq_host'] = vip + transport_hosts = [vip] else: host = relation_get('private-address', rid=rid, unit=unit) host = format_ipv6_addr(host) or host ctxt['rabbitmq_host'] = host + transport_hosts = [host] ctxt.update({ 'rabbitmq_user': username, @@ -413,6 +420,7 @@ def __call__(self): ssl_port = relation_get('ssl_port', rid=rid, unit=unit) if ssl_port: ctxt['rabbit_ssl_port'] = ssl_port + rabbitmq_port = ssl_port ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) if ssl_ca: @@ -450,6 +458,20 @@ def __call__(self): rabbitmq_hosts.append(host) ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + transport_hosts = rabbitmq_hosts + + if transport_hosts: + transport_url_hosts = '' + for host in transport_hosts: + if transport_url_hosts: + format_string = ",{}:{}@{}:{}" + else: + format_string = "{}:{}@{}:{}" + transport_url_hosts += format_string.format( + ctxt['rabbitmq_user'], ctxt['rabbitmq_password'], + host, rabbitmq_port) + ctxt['transport_url'] = "rabbit://{}/{}".format( + transport_url_hosts, vhost) oslo_messaging_flags = conf.get('oslo-messaging-flags', None) if oslo_messaging_flags: @@ -481,13 +503,16 @@ def __call__(self): ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) if not ctxt.get('key'): ctxt['key'] = relation_get('key', rid=rid, unit=unit) - ceph_pub_addr = relation_get('ceph-public-address', rid=rid, + + ceph_addrs = relation_get('ceph-public-address', rid=rid, + unit=unit) + if ceph_addrs: + for addr in ceph_addrs.split(' '): + mon_hosts.append(format_ipv6_addr(addr) or addr) + else: + priv_addr = relation_get('private-address', rid=rid, unit=unit) - unit_priv_addr = relation_get('private-address', rid=rid, - unit=unit) - ceph_addr = ceph_pub_addr or unit_priv_addr - ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr - mon_hosts.append(ceph_addr) + mon_hosts.append(format_ipv6_addr(priv_addr) or priv_addr) ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py index 1f5310bb..254a90e7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -126,3 +126,14 @@ def assert_charm_supports_dns_ha(): status_set('blocked', msg) raise DNSHAException(msg) return True + + +def expect_ha(): + """ Determine if the unit expects to be in HA + + Check for VIP or dns-ha settings which indicate the unit should expect to + be related to hacluster. + + @returns boolean + """ + return config('vip') or config('dns-ha') diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py new file mode 100644 index 00000000..a15a03fa --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +from charmhelpers.fetch import apt_install +from charmhelpers.contrib.openstack.context import IdentityServiceContext +from charmhelpers.core.hookenv import ( + log, + ERROR, +) + + +def get_api_suffix(api_version): + """Return the formatted api suffix for the given version + @param api_version: version of the keystone endpoint + @returns the api suffix formatted according to the given api + version + """ + return 'v2.0' if api_version in (2, "2.0") else 'v3' + + +def format_endpoint(schema, addr, port, api_version): + """Return a formatted keystone endpoint + @param schema: http or https + @param addr: ipv4/ipv6 host of the keystone service + @param port: port of the keystone service + @param api_version: 2 or 3 + @returns a fully formatted keystone endpoint + """ + return '{}://{}:{}/{}/'.format(schema, addr, port, + get_api_suffix(api_version)) + + +def get_keystone_manager(endpoint, api_version, **kwargs): + """Return a keystonemanager for the correct API version + + @param endpoint: the keystone endpoint to point client at + @param api_version: version of the keystone api the client should use + @param kwargs: token or username/tenant/password information + @returns keystonemanager class used for interrogating keystone + """ + if api_version == 2: + return KeystoneManager2(endpoint, **kwargs) + if api_version == 3: + return KeystoneManager3(endpoint, **kwargs) + raise ValueError('No manager found for api version {}'.format(api_version)) + + +def get_keystone_manager_from_identity_service_context(): + """Return a keystonmanager generated from a + instance of charmhelpers.contrib.openstack.context.IdentityServiceContext + @returns keystonamenager instance + """ + context = IdentityServiceContext()() + if not context: + msg = "Identity service context cannot be generated" + log(msg, level=ERROR) + raise ValueError(msg) + + endpoint = format_endpoint(context['service_protocol'], + context['service_host'], + context['service_port'], + context['api_version']) + + if context['api_version'] in (2, "2.0"): + api_version = 2 + else: + api_version = 3 + + return get_keystone_manager(endpoint, api_version, + username=context['admin_user'], + password=context['admin_password'], + tenant_name=context['admin_tenant_name']) + + +class KeystoneManager(object): + + def resolve_service_id(self, service_name=None, service_type=None): + """Find the service_id of a given service""" + services = [s._info for s in self.api.services.list()] + + service_name = service_name.lower() + for s in services: + name = s['name'].lower() + if service_type and service_name: + if (service_name == name and service_type == s['type']): + return s['id'] + elif service_name and service_name == name: + return s['id'] + elif service_type and service_type == s['type']: + return s['id'] + return None + + def service_exists(self, service_name=None, service_type=None): + """Determine if the given service exists on the service list""" + return self.resolve_service_id(service_name, service_type) is not None + + +class KeystoneManager2(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + + self.api_version = 2 + + token = kwargs.get("token", None) + if token: + api = client.Client(endpoint=endpoint, token=token) + else: + auth = v2.Password(username=kwargs.get("username"), + password=kwargs.get("password"), + tenant_name=kwargs.get("tenant_name"), + auth_url=endpoint) + sess = session.Session(auth=auth) + api = client.Client(session=sess) + + self.api = api + + +class KeystoneManager3(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + + self.api_version = 3 + + token = kwargs.get("token", None) + if token: + auth = token_endpoint.Token(endpoint=endpoint, + token=token) + sess = session.Session(auth=auth) + else: + auth = v3.Password(auth_url=endpoint, + user_id=kwargs.get("username"), + password=kwargs.get("password"), + project_id=kwargs.get("tenant_name")) + sess = session.Session(auth=auth) + + self.api = client.Client(session=sess) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py index 89588951..934baf5d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py @@ -28,7 +28,10 @@ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: apt_update(fatal=True) - apt_install('python-jinja2', fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions @@ -207,7 +210,10 @@ def __init__(self, templates_dir, openstack_release): # if this code is running, the object is created pre-install hook. # jinja2 shouldn't get touched until the module is reloaded on next # hook execution, with proper jinja2 bits successfully imported. - apt_install('python-jinja2') + if six.PY2: + apt_install('python-jinja2') + else: + apt_install('python3-jinja2') def register(self, config_file, contexts): """ diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 80219d66..7e8ecff4 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -153,7 +153,7 @@ ('newton', ['2.8.0', '2.9.0', '2.10.0']), ('ocata', - ['2.11.0', '2.12.0']), + ['2.11.0', '2.12.0', '2.13.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py index e29bd1bb..6e95028b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py @@ -16,6 +16,7 @@ # limitations under the License. import os +import six import subprocess import sys @@ -39,7 +40,10 @@ def pip_execute(*args, **kwargs): from pip import main as _pip_execute except ImportError: apt_update() - apt_install('python-pip') + if six.PY2: + apt_install('python-pip') + else: + apt_install('python3-pip') from pip import main as _pip_execute _pip_execute(*args, **kwargs) finally: @@ -136,7 +140,10 @@ def pip_list(): def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" - apt_install('python-virtualenv') + if six.PY2: + apt_install('python-virtualenv') + else: + apt_install('python3-virtualenv') if path: venv_path = path diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index edbb72ff..05edfa50 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -306,6 +306,8 @@ def service_running(service_name, **kwargs): def init_is_systemd(): """Return True if the host system uses systemd, False otherwise.""" + if lsb_release()['DISTRIB_CODENAME'] == 'trusty': + return False return os.path.isdir(SYSTEMD_SYSTEM) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/snap.py b/ceph-radosgw/hooks/charmhelpers/fetch/snap.py new file mode 100644 index 00000000..23c707b0 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/fetch/snap.py @@ -0,0 +1,122 @@ +# Copyright 2014-2017 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Charm helpers snap for classic charms. + +If writing reactive charms, use the snap layer: +https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html +""" +import subprocess +from os import environ +from time import sleep +from charmhelpers.core.hookenv import log + +__author__ = 'Joseph Borg ' + +SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved). +SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. +SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +class CouldNotAcquireLockException(Exception): + pass + + +def _snap_exec(commands): + """ + Execute snap commands. + + :param commands: List commands + :return: Integer exit code + """ + assert type(commands) == list + + retry_count = 0 + return_code = None + + while return_code is None or return_code == SNAP_NO_LOCK: + try: + return_code = subprocess.check_call(['snap'] + commands, env=environ) + except subprocess.CalledProcessError as e: + retry_count += + 1 + if retry_count > SNAP_NO_LOCK_RETRY_COUNT: + raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT) + return_code = e.returncode + log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN') + sleep(SNAP_NO_LOCK_RETRY_DELAY) + + return return_code + + +def snap_install(packages, *flags): + """ + Install a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to install command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Installing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with option(s) "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['install'] + flags + packages) + + +def snap_remove(packages, *flags): + """ + Remove a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to remove command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Removing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['remove'] + flags + packages) + + +def snap_refresh(packages, *flags): + """ + Refresh / Update snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to refresh command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Refreshing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['refresh'] + flags + packages) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 39b9b801..82ac80ff 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -116,8 +116,8 @@ } APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. -APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. -APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. +CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. +CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times. def filter_installed_packages(packages): @@ -249,7 +249,8 @@ def add_source(source, key=None): source.startswith('http') or source.startswith('deb ') or source.startswith('cloud-archive:')): - subprocess.check_call(['add-apt-repository', '--yes', source]) + cmd = ['add-apt-repository', '--yes', source] + _run_with_retries(cmd) elif source.startswith('cloud:'): install(filter_installed_packages(['ubuntu-cloud-keyring']), fatal=True) @@ -286,41 +287,60 @@ def add_source(source, key=None): key]) -def _run_apt_command(cmd, fatal=False): - """Run an APT command. - - Checks the output and retries if the fatal flag is set - to True. +def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), + retry_message="", cmd_env=None): + """Run a command and retry until success or max_retries is reached. :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. + :param: max_retries: int: The number of retries to attempt on a fatal + command. Defaults to CMD_RETRY_COUNT. + :param: retry_exitcodes: tuple: Optional additional exit codes to retry. + Defaults to retry on exit code 1. + :param: retry_message: str: Optional log prefix emitted during retries. + :param: cmd_env: dict: Environment variables to add to the command run. """ + env = os.environ.copy() + if cmd_env: + env.update(cmd_env) - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' + if not retry_message: + retry_message = "Failed executing '{}'".format(" ".join(cmd)) + retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) - if fatal: - retry_count = 0 - result = None - - # If the command is considered "fatal", we need to retry if the apt - # lock was not acquired. - - while result is None or result == APT_NO_LOCK: - try: - result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > APT_NO_LOCK_RETRY_COUNT: - raise - result = e.returncode - log("Couldn't acquire DPKG lock. Will retry in {} seconds." - "".format(APT_NO_LOCK_RETRY_DELAY)) - time.sleep(APT_NO_LOCK_RETRY_DELAY) + retry_count = 0 + result = None + + retry_results = (None,) + retry_exitcodes + while result in retry_results: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > max_retries: + raise + result = e.returncode + log(retry_message) + time.sleep(CMD_RETRY_DELAY) + +def _run_apt_command(cmd, fatal=False): + """Run an apt command with optional retries. + + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. + cmd_env = { + 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} + + if fatal: + _run_with_retries( + cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), + retry_message="Couldn't acquire DPKG lock") else: + env = os.environ.copy() + env.update(cmd_env) subprocess.call(cmd, env=env) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index bf1fad5e..34f29f41 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -14,6 +14,7 @@ tags: - misc series: - xenial + - zesty - trusty - yakkety extra-bindings: diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index f59c4ea7..3bc3366e 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -327,11 +327,15 @@ def test_102_services(self): self.keystone_sentry: ['keystone'], self.glance_sentry: ['glance-registry', 'glance-api'], - self.cinder_sentry: ['cinder-api', - 'cinder-scheduler', + self.cinder_sentry: ['cinder-scheduler', 'cinder-volume'], } + if self._get_openstack_release() < self.xenial_mitaka: + services[self.cinder_sentry].append('cinder-api') + else: + services[self.cinder_sentry].append('apache2') + if self._get_openstack_release() < self.xenial_mitaka: # For upstart systems only. Ceph services under systemd # are checked by process name instead. @@ -487,8 +491,14 @@ def test_302_cinder_rbd_config(self): u.log.debug('Checking cinder (rbd) config file data...') unit = self.cinder_sentry conf = '/etc/cinder/cinder.conf' + # NOTE(jamespage): Deal with section config for >= ocata. + if self._get_openstack_release() >= self.xenial_ocata: + section_key = 'CEPH' + else: + section_key = 'DEFAULT' + expected = { - 'DEFAULT': { + section_key: { 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' } } diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py index f9e4c3af..8a6b7644 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py @@ -785,37 +785,30 @@ def get_uuid_epoch_stamp(self): generating test messages which need to be unique-ish.""" return '[{}-{}]'.format(uuid.uuid4(), time.time()) -# amulet juju action helpers: + # amulet juju action helpers: def run_action(self, unit_sentry, action, _check_output=subprocess.check_output, params=None): - """Run the named action on a given unit sentry. + """Translate to amulet's built in run_action(). Deprecated. + + Run the named action on a given unit sentry. params a dict of parameters to use - _check_output parameter is used for dependency injection. + _check_output parameter is no longer used @return action_id. """ - unit_id = unit_sentry.info["unit_name"] - command = ["juju", "action", "do", "--format=json", unit_id, action] - if params is not None: - for key, value in params.iteritems(): - command.append("{}={}".format(key, value)) - self.log.info("Running command: %s\n" % " ".join(command)) - output = _check_output(command, universal_newlines=True) - data = json.loads(output) - action_id = data[u'Action queued with id'] - return action_id + self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been ' + 'deprecated for amulet.run_action') + return unit_sentry.run_action(action, action_args=params) def wait_on_action(self, action_id, _check_output=subprocess.check_output): """Wait for a given action, returning if it completed or not. - _check_output parameter is used for dependency injection. + action_id a string action uuid + _check_output parameter is no longer used """ - command = ["juju", "action", "fetch", "--format=json", "--wait=0", - action_id] - output = _check_output(command, universal_newlines=True) - data = json.loads(output) + data = amulet.actions.get_action_output(action_id, full_output=True) return data.get(u"status") == "completed" def status_get(self, unit): diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 401c0328..1f4cf42e 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -32,6 +32,7 @@ from novaclient import exceptions import novaclient.client as nova_client +import novaclient import pika import swiftclient @@ -434,9 +435,14 @@ def authenticate_nova_user(self, keystone, user, password, tenant): self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) + if novaclient.__version__[0] >= "7": + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, password=password, + project_name=tenant, auth_url=ep) + else: + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) def authenticate_swift_user(self, keystone, user, password, tenant): """Authenticates a regular user with swift api.""" diff --git a/ceph-radosgw/tests/charmhelpers/core/host.py b/ceph-radosgw/tests/charmhelpers/core/host.py index edbb72ff..05edfa50 100644 --- a/ceph-radosgw/tests/charmhelpers/core/host.py +++ b/ceph-radosgw/tests/charmhelpers/core/host.py @@ -306,6 +306,8 @@ def service_running(service_name, **kwargs): def init_is_systemd(): """Return True if the host system uses systemd, False otherwise.""" + if lsb_release()['DISTRIB_CODENAME'] == 'trusty': + return False return os.path.isdir(SYSTEMD_SYSTEM) diff --git a/ceph-radosgw/tests/gate-basic-xenial-ocata b/ceph-radosgw/tests/gate-basic-xenial-ocata old mode 100644 new mode 100755 diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index d8d8d038..6f1aeace 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -14,13 +14,18 @@ install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} whitelist_externals = juju -passenv = HOME TERM AMULET_* +passenv = HOME TERM AMULET_* CS_API_URL [testenv:py27] basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py35] +basepython = python3.5 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python2.7 deps = -r{toxinidir}/requirements.txt From c49afde1eed1296951015439bcc6975d19a1ab4b Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Mon, 27 Mar 2017 18:47:51 -0300 Subject: [PATCH 1295/2699] Handle list of addresses in ceph-public-address charmhelpers' CephContext class is capable of handling a list of IP addresses in the ceph-public-address attribute, this allows ceph-proxy to hand the list of monitor hosts. Closes-Bug: 1642430 Change-Id: I2bbff167fce2c75a3a619b658f0c569c6d5be3d5 --- ceph-radosgw/hooks/ceph_radosgw_context.py | 29 ++++++------ .../unit_tests/test_ceph_radosgw_context.py | 44 +++++++++++++++++-- 2 files changed, 57 insertions(+), 16 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 2fa1c05d..3ba6b781 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -35,7 +35,6 @@ ) from charmhelpers.contrib.network.ip import ( format_ipv6_addr, - get_host_ip, get_ipv6_addr, ) from charmhelpers.contrib.storage.linux.ceph import CephConfContext @@ -125,7 +124,7 @@ def ensure_host_resolvable_v6(hostname): shutil.rmtree(dtmp) -class MonContext(context.OSContextGenerator): +class MonContext(context.CephContext): interfaces = ['ceph-radosgw'] def __call__(self): @@ -133,17 +132,21 @@ def __call__(self): return {} mon_hosts = [] auths = [] - for relid in relation_ids('mon'): - for unit in related_units(relid): - ceph_public_addr = relation_get('ceph-public-address', unit, - relid) - if ceph_public_addr: - host_ip = format_ipv6_addr(ceph_public_addr) or \ - get_host_ip(ceph_public_addr) - mon_hosts.append('{}:6789'.format(host_ip)) - _auth = relation_get('auth', unit, relid) - if _auth: - auths.append(_auth) + + for rid in relation_ids('mon'): + for unit in related_units(rid): + _auth = relation_get('auth', rid=rid, unit=unit) + if _auth: + auths.append(_auth) + + ceph_pub_addr = relation_get('ceph-public-address', rid=rid, + unit=unit) + unit_priv_addr = relation_get('private-address', rid=rid, + unit=unit) + ceph_addr = ceph_pub_addr or unit_priv_addr + ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr + if ceph_addr: + mon_hosts.append(ceph_addr) if len(set(auths)) != 1: e = ("Inconsistent or absent auth returned by mon units. Setting " diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index e55249c3..e0f91367 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -190,7 +190,7 @@ def _relation_get(attr, unit, rid): expect = { 'auth_supported': 'cephx', 'hostname': 'testhost', - 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', + 'mon_hosts': '10.5.4.1 10.5.4.2 10.5.4.3', 'old_auth': False, 'use_syslog': 'false', 'loglevel': 1, @@ -208,6 +208,44 @@ def _relation_get(attr, unit, rid): self.assertEqual(expect, mon_ctxt()) self.assertTrue(mock_ensure_rsv_v6.called) + @patch.object(ceph, 'config', lambda *args: + '{"client.radosgw.gateway": {"rgw init timeout": 60}}') + @patch.object(context, 'ensure_host_resolvable_v6') + def test_list_of_addresses_from_ceph_proxy(self, mock_ensure_rsv_v6): + self.socket.gethostname.return_value = 'testhost' + mon_ctxt = context.MonContext() + addresses = ['10.5.4.1 10.5.4.2 10.5.4.3'] + + def _relation_get(attr, unit, rid): + if attr == 'ceph-public-address': + return addresses.pop() + elif attr == 'auth': + return 'cephx' + + self.relation_get.side_effect = _relation_get + self.relation_ids.return_value = ['mon:6'] + self.related_units.return_value = ['ceph-proxy/0'] + expect = { + 'auth_supported': 'cephx', + 'hostname': 'testhost', + 'mon_hosts': '10.5.4.1 10.5.4.2 10.5.4.3', + 'old_auth': False, + 'use_syslog': 'false', + 'loglevel': 1, + 'port': 70, + 'client_radosgw_gateway': {'rgw init timeout': 60}, + 'ipv6': False + } + self.assertEqual(expect, mon_ctxt()) + self.assertFalse(mock_ensure_rsv_v6.called) + + self.test_config.set('prefer-ipv6', True) + addresses = ['10.5.4.1 10.5.4.2 10.5.4.3'] + expect['ipv6'] = True + expect['port'] = "[::]:%s" % (70) + self.assertEqual(expect, mon_ctxt()) + self.assertTrue(mock_ensure_rsv_v6.called) + @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') def test_ctxt_missing_data(self): @@ -237,7 +275,7 @@ def _relation_get(attr, unit, rid): expect = { 'auth_supported': 'none', 'hostname': 'testhost', - 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', + 'mon_hosts': '10.5.4.1 10.5.4.2 10.5.4.3', 'old_auth': False, 'use_syslog': 'false', 'loglevel': 1, @@ -266,7 +304,7 @@ def _relation_get(attr, unit, rid): expect = { 'auth_supported': 'cephx', 'hostname': 'testhost', - 'mon_hosts': '10.5.4.1:6789 10.5.4.2:6789 10.5.4.3:6789', + 'mon_hosts': '10.5.4.1 10.5.4.2 10.5.4.3', 'old_auth': False, 'use_syslog': 'false', 'loglevel': 1, From 67144644e1f0124843120906249594d7e5d229e8 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Mon, 13 Mar 2017 16:54:18 -0700 Subject: [PATCH 1296/2699] Upgrade OSDs one at a time when changing ownership Some upgrade scenarios (hammer->jewel) require that the ownership of the ceph osd directories are changed from root:root to ceph:ceph. This patch improves the upgrade experience by upgrading one OSD at a time as opposed to stopping all services, changing file ownership, and then restarting all services at once. This patch makes use of the `setuser match path` directive in the ceph.conf, which causes the ceph daemon to start as the owner of the OSD's root directory. This allows the ceph OSDs to continue running should an unforeseen incident occur as part of this upgrade. Change-Id: I00fdbe0fd113c56209429341f0a10797e5baee5a Closes-Bug: #1662591 --- ceph-osd/charm-helpers-tests.yaml | 1 + ceph-osd/hooks/ceph_hooks.py | 39 +- .../contrib/hardening/apache/checks/config.py | 10 +- .../{hardening.conf => 99-hardening.conf} | 20 +- .../contrib/hardening/audits/__init__.py | 13 +- .../contrib/hardening/defaults/apache.yaml | 5 +- .../hardening/defaults/apache.yaml.schema | 3 + .../contrib/hardening/defaults/os.yaml | 1 + .../contrib/hardening/defaults/os.yaml.schema | 1 + .../contrib/hardening/host/checks/profile.py | 10 +- .../hardening/host/templates/99-hardening.sh | 5 + .../contrib/hardening/ssh/checks/config.py | 20 +- .../hooks/charmhelpers/contrib/network/ip.py | 54 ++- .../charmhelpers/contrib/openstack/context.py | 35 +- .../charmhelpers/contrib/openstack/neutron.py | 19 +- .../charmhelpers/contrib/openstack/utils.py | 40 +- ceph-osd/hooks/charmhelpers/core/host.py | 2 + .../charmhelpers/core/host_factory/centos.py | 16 + .../charmhelpers/core/host_factory/ubuntu.py | 32 ++ ceph-osd/hooks/charmhelpers/core/strutils.py | 53 +++ ceph-osd/lib/ceph/__init__.py | 430 +++++++++++++++--- ceph-osd/lib/ceph/ceph_broker.py | 204 ++++++++- ceph-osd/lib/ceph/ceph_helpers.py | 31 +- ceph-osd/lib/setup.py | 85 ++++ ceph-osd/templates/ceph.conf | 3 + .../contrib/openstack/amulet/utils.py | 3 +- ceph-osd/tests/charmhelpers/core/host.py | 2 + .../charmhelpers/core/host_factory/centos.py | 16 + .../charmhelpers/core/host_factory/ubuntu.py | 32 ++ ceph-osd/tests/charmhelpers/core/strutils.py | 53 +++ ceph-osd/tests/charmhelpers/osplatform.py | 25 + ceph-osd/unit_tests/test_ceph_hooks.py | 3 + ceph-osd/unit_tests/test_replace_osd.py | 16 +- ceph-osd/unit_tests/test_tuning.py | 65 +-- ceph-osd/unit_tests/test_upgrade.py | 38 +- 35 files changed, 1182 insertions(+), 203 deletions(-) rename ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/{hardening.conf => 99-hardening.conf} (56%) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh create mode 100644 ceph-osd/lib/setup.py create mode 100644 ceph-osd/tests/charmhelpers/osplatform.py diff --git a/ceph-osd/charm-helpers-tests.yaml b/ceph-osd/charm-helpers-tests.yaml index e5063253..b0de9df6 100644 --- a/ceph-osd/charm-helpers-tests.yaml +++ b/ceph-osd/charm-helpers-tests.yaml @@ -4,3 +4,4 @@ include: - contrib.amulet - contrib.openstack.amulet - core + - osplatform diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index f15d6241..06372057 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -26,8 +26,9 @@ from charmhelpers.core import hookenv from charmhelpers.core.hookenv import ( log, - ERROR, DEBUG, + ERROR, + INFO, config, relation_ids, related_units, @@ -100,16 +101,27 @@ def check_for_upgrade(): 'distro') log('new_version: {}'.format(new_version)) - if old_version == new_version: + # May be in a previous upgrade that was failed if the directories + # still need an ownership update. Check this condition. + resuming_upgrade = ceph.dirs_need_ownership_update('osd') + + if old_version == new_version and not resuming_upgrade: log("No new ceph version detected, skipping upgrade.", DEBUG) return - if (old_version in ceph.UPGRADE_PATHS and - new_version == ceph.UPGRADE_PATHS[old_version]): - log("{} to {} is a valid upgrade path. Proceeding.".format( - old_version, new_version)) + if (ceph.UPGRADE_PATHS.get(old_version) == new_version) or\ + resuming_upgrade: + if old_version == new_version: + log('Attempting to resume possibly failed upgrade.', + INFO) + else: + log("{} to {} is a valid upgrade path. Proceeding.".format( + old_version, new_version)) + + emit_cephconf(upgrading=True) ceph.roll_osd_cluster(new_version=new_version, upgrade_key='osd-upgrade') + emit_cephconf(upgrading=False) else: # Log a helpful error message log("Invalid upgrade path from {} to {}. " @@ -215,7 +227,14 @@ def use_short_objects(): return False -def get_ceph_context(): +def get_ceph_context(upgrading=False): + """Returns the current context dictionary for generating ceph.conf + + :param upgrading: bool - determines if the context is invoked as + part of an upgrade proedure Setting this to true + causes settings useful during an upgrade to be + defined in the ceph.conf file + """ mon_hosts = get_mon_hosts() log('Monitor hosts are ' + repr(mon_hosts)) @@ -237,6 +256,7 @@ def get_ceph_context(): 'loglevel': config('loglevel'), 'dio': str(config('use-direct-io')).lower(), 'short_object_len': use_short_objects(), + 'upgrade_in_progress': upgrading, } if config('prefer-ipv6'): @@ -267,14 +287,15 @@ def get_ceph_context(): return cephcontext -def emit_cephconf(): +def emit_cephconf(upgrading=False): # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(), group=ceph.ceph_user()) with open(charm_ceph_conf, 'w') as cephconf: - cephconf.write(render_template('ceph.conf', get_ceph_context())) + context = get_ceph_context(upgrading) + cephconf.write(render_template('ceph.conf', context)) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 90) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 51b636f7..b18b263d 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -26,6 +26,7 @@ DirectoryPermissionAudit, NoReadWriteForOther, TemplatedFile, + DeletedFile ) from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR @@ -52,13 +53,13 @@ def get_audits(): 'mods-available/alias.conf'), context, TEMPLATES_DIR, - mode=0o0755, + mode=0o0640, user='root', service_actions=[{'service': 'apache2', 'actions': ['restart']}]), TemplatedFile(os.path.join(settings['common']['apache_dir'], - 'conf-enabled/hardening.conf'), + 'conf-enabled/99-hardening.conf'), context, TEMPLATES_DIR, mode=0o0640, @@ -69,11 +70,13 @@ def get_audits(): DirectoryPermissionAudit(settings['common']['apache_dir'], user='root', group='root', - mode=0o640), + mode=0o0750), DisabledModuleAudit(settings['hardening']['modules_to_disable']), NoReadWriteForOther(settings['common']['apache_dir']), + + DeletedFile(['/var/www/html/index.html']) ] return audits @@ -94,5 +97,4 @@ def __call__(self): ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', out).group(1) ctxt['apache_icondir'] = '/usr/share/apache2/icons/' - ctxt['traceenable'] = settings['hardening']['traceenable'] return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf similarity index 56% rename from ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf rename to ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf index 07945418..22b68041 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf @@ -4,15 +4,29 @@ ############################################################################### - + # http://httpd.apache.org/docs/2.4/upgrading.html {% if apache_version > '2.2' -%} Require all granted {% else -%} - Order Allow,Deny - Deny from all + Order Allow,Deny + Deny from all {% endif %} + + Options -Indexes -FollowSymLinks + AllowOverride None + + + + Options -Indexes -FollowSymLinks + AllowOverride None + + TraceEnable {{ traceenable }} +ServerTokens {{ servertokens }} + +SSLHonorCipherOrder {{ honor_cipher_order }} +SSLCipherSuite {{ cipher_suite }} diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/__init__.py index 9bf9c3c6..6dd5b05f 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/__init__.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/__init__.py @@ -49,13 +49,6 @@ def _take_action(self): # Invoke the callback if there is one. if hasattr(self.unless, '__call__'): - results = self.unless() - if results: - return False - else: - return True - - if self.unless: - return False - else: - return True + return not self.unless() + + return not self.unless diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml index e5ada29f..0f940d4c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml @@ -10,4 +10,7 @@ common: hardening: traceenable: 'off' allowed_http_methods: "GET POST" - modules_to_disable: [ cgi, cgid ] \ No newline at end of file + modules_to_disable: [ cgi, cgid ] + servertokens: 'Prod' + honor_cipher_order: 'on' + cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES' diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema index 227589b5..c112137c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema @@ -7,3 +7,6 @@ common: hardening: allowed_http_methods: modules_to_disable: + servertokens: + honor_cipher_order: + cipher_suite: diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml index ddd4286c..9a8627b5 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml @@ -58,6 +58,7 @@ security: rsync kernel_enable_module_loading: True # (type:boolean) kernel_enable_core_dump: False # (type:boolean) + ssh_tmout: 300 sysctl: kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128 diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema index 88b3966e..cc3b9c20 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema @@ -34,6 +34,7 @@ security: packages_list: kernel_enable_module_loading: kernel_enable_core_dump: + ssh_tmout: sysctl: kernel_secure_sysrq: kernel_enable_sysrq: diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/profile.py index 56d65263..2727428d 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/profile.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/profile.py @@ -25,7 +25,6 @@ def get_audits(): audits = [] settings = utils.get_settings('os') - # If core dumps are not enabled, then don't allow core dumps to be # created as they may contain sensitive information. if not settings['security']['kernel_enable_core_dump']: @@ -33,11 +32,18 @@ def get_audits(): ProfileContext(), template_dir=TEMPLATES_DIR, mode=0o0755, user='root', group='root')) + if settings['security']['ssh_tmout']: + audits.append(TemplatedFile('/etc/profile.d/99-hardening.sh', + ProfileContext(), + template_dir=TEMPLATES_DIR, + mode=0o0644, user='root', group='root')) return audits class ProfileContext(object): def __call__(self): - ctxt = {} + settings = utils.get_settings('os') + ctxt = {'ssh_tmout': + settings['security']['ssh_tmout']} return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh new file mode 100644 index 00000000..616cef46 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh @@ -0,0 +1,5 @@ +TMOUT={{ tmout }} +readonly TMOUT +export TMOUT + +readonly HISTFILE diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py index f3cac6d9..41bed2d1 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -27,7 +27,10 @@ apt_install, apt_update, ) -from charmhelpers.core.host import lsb_release +from charmhelpers.core.host import ( + lsb_release, + CompareHostReleases, +) from charmhelpers.contrib.hardening.audits.file import ( TemplatedFile, FileContentAudit, @@ -68,7 +71,8 @@ def get_macs(self, allow_weak_mac): 'weak': default + ',hmac-sha1'} # Use newer ciphers on Ubuntu Trusty and above - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG) macs = macs_66 @@ -96,7 +100,8 @@ def get_kexs(self, allow_weak_kex): 'weak': weak} # Use newer kex on Ubuntu Trusty and above - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': log('Detected Ubuntu 14.04 or newer, using new key exchange ' 'algorithms', level=DEBUG) kex = kex_66 @@ -119,7 +124,8 @@ def get_ciphers(self, cbc_required): 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'} # Use newer ciphers on ubuntu Trusty and above - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': log('Detected Ubuntu 14.04 or newer, using new ciphers', level=DEBUG) cipher = ciphers_66 @@ -291,7 +297,8 @@ def is_compliant(self, *args, **kwargs): self.fail_cases = [] settings = utils.get_settings('ssh') - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': if not settings['server']['weak_hmac']: self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') else: @@ -364,7 +371,8 @@ def is_compliant(self, *args, **kwargs): self.fail_cases = [] settings = utils.get_settings('ssh') - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': if not settings['server']['weak_hmac']: self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') else: diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 54c76a72..7451af9c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -31,6 +31,7 @@ from charmhelpers.core.host import ( lsb_release, + CompareHostReleases, ) try: @@ -67,6 +68,24 @@ def no_ip_found_error_out(network): raise ValueError(errmsg) +def _get_ipv6_network_from_address(address): + """Get an netaddr.IPNetwork for the given IPv6 address + :param address: a dict as returned by netifaces.ifaddresses + :returns netaddr.IPNetwork: None if the address is a link local or loopback + address + """ + if address['addr'].startswith('fe80') or address['addr'] == "::1": + return None + + prefix = address['netmask'].split("/") + if len(prefix) > 1: + netmask = prefix[1] + else: + netmask = address['netmask'] + return netaddr.IPNetwork("%s/%s" % (address['addr'], + netmask)) + + def get_address_in_network(network, fallback=None, fatal=False): """Get an IPv4 or IPv6 address within the network from the host. @@ -100,11 +119,9 @@ def get_address_in_network(network, fallback=None, fatal=False): if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - if cidr in network: - return str(cidr.ip) + cidr = _get_ipv6_network_from_address(addr) + if cidr and cidr in network: + return str(cidr.ip) if fallback is not None: return fallback @@ -180,18 +197,18 @@ def _get_for_address(address, key): if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - network = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - cidr = network.cidr - if address in cidr: - if key == 'iface': - return iface - elif key == 'netmask' and cidr: - return str(cidr).split('/')[1] - else: - return addr[key] - + network = _get_ipv6_network_from_address(addr) + if not network: + continue + + cidr = network.cidr + if address in cidr: + if key == 'iface': + return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] + else: + return addr[key] return None @@ -521,7 +538,8 @@ def port_has_listener(address, port): def assert_charm_supports_ipv6(): """Check whether we are able to support charms ipv6.""" - if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(release) < "trusty": raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 6cdbbbbf..7876145d 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -59,6 +59,7 @@ write_file, pwgen, lsb_release, + CompareHostReleases, ) from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, @@ -155,7 +156,8 @@ def context_complete(self, ctxt): if self.missing_data: self.complete = False - log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO) + log('Missing required data: %s' % ' '.join(self.missing_data), + level=INFO) else: self.complete = True return self.complete @@ -213,8 +215,9 @@ def __call__(self): hostname_key = "{}_hostname".format(self.relation_prefix) else: hostname_key = "hostname" - access_hostname = get_address_in_network(access_network, - unit_get('private-address')) + access_hostname = get_address_in_network( + access_network, + unit_get('private-address')) set_hostname = relation_get(attribute=hostname_key, unit=local_unit()) if set_hostname != access_hostname: @@ -308,7 +311,10 @@ def db_ssl(rdata, ctxt, ssl_dir): class IdentityServiceContext(OSContextGenerator): - def __init__(self, service=None, service_user=None, rel_name='identity-service'): + def __init__(self, + service=None, + service_user=None, + rel_name='identity-service'): self.service = service self.service_user = service_user self.rel_name = rel_name @@ -457,19 +463,17 @@ def __call__(self): host = format_ipv6_addr(host) or host rabbitmq_hosts.append(host) - ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + rabbitmq_hosts = sorted(rabbitmq_hosts) + ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) transport_hosts = rabbitmq_hosts if transport_hosts: - transport_url_hosts = '' - for host in transport_hosts: - if transport_url_hosts: - format_string = ",{}:{}@{}:{}" - else: - format_string = "{}:{}@{}:{}" - transport_url_hosts += format_string.format( - ctxt['rabbitmq_user'], ctxt['rabbitmq_password'], - host, rabbitmq_port) + transport_url_hosts = ','.join([ + "{}:{}@{}:{}".format(ctxt['rabbitmq_user'], + ctxt['rabbitmq_password'], + host_, + rabbitmq_port) + for host_ in transport_hosts]) ctxt['transport_url'] = "rabbit://{}/{}".format( transport_url_hosts, vhost) @@ -1601,7 +1605,8 @@ def __call__(self): if ctxt['use_memcache']: # Trusty version of memcached does not support ::1 as a listen # address so use host file entry instead - if lsb_release()['DISTRIB_CODENAME'].lower() > 'trusty': + release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(release) > 'trusty': ctxt['memcache_server'] = '::1' else: ctxt['memcache_server'] = 'ip6-localhost' diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py index a8f1ed72..37fa0eb0 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py @@ -23,7 +23,10 @@ ERROR, ) -from charmhelpers.contrib.openstack.utils import os_release +from charmhelpers.contrib.openstack.utils import ( + os_release, + CompareOpenStackReleases, +) def headers_package(): @@ -198,7 +201,8 @@ def neutron_plugins(): }, 'plumgrid': { 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', - 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', + 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin' + '.plumgrid_plugin.NeutronPluginPLUMgridV2'), 'contexts': [ context.SharedDBContext(user=config('database-user'), database=config('database'), @@ -225,7 +229,7 @@ def neutron_plugins(): 'server_services': ['neutron-server'] } } - if release >= 'icehouse': + if CompareOpenStackReleases(release) >= 'icehouse': # NOTE: patch in ml2 plugin for icehouse onwards plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' @@ -233,10 +237,10 @@ def neutron_plugins(): 'neutron-plugin-ml2'] # NOTE: patch in vmware renames nvp->nsx for icehouse onwards plugins['nvp'] = plugins['nsx'] - if release >= 'kilo': + if CompareOpenStackReleases(release) >= 'kilo': plugins['midonet']['driver'] = ( 'neutron.plugins.midonet.plugin.MidonetPluginV2') - if release >= 'liberty': + if CompareOpenStackReleases(release) >= 'liberty': plugins['midonet']['driver'] = ( 'midonet.neutron.plugin_v1.MidonetPluginV2') plugins['midonet']['server_packages'].remove( @@ -244,10 +248,11 @@ def neutron_plugins(): plugins['midonet']['server_packages'].append( 'python-networking-midonet') plugins['plumgrid']['driver'] = ( - 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2') + 'networking_plumgrid.neutron.plugins' + '.plugin.NeutronPluginPLUMgridV2') plugins['plumgrid']['server_packages'].remove( 'neutron-plugin-plumgrid') - if release >= 'mitaka': + if CompareOpenStackReleases(release) >= 'mitaka': plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') plugins['nsx']['server_packages'].append('python-vmware-nsx') plugins['nsx']['config'] = '/etc/neutron/nsx.ini' diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 7e8ecff4..e13450c1 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -33,9 +33,7 @@ from charmhelpers.contrib.network import ip -from charmhelpers.core import ( - unitdata, -) +from charmhelpers.core import unitdata from charmhelpers.core.hookenv import ( action_fail, @@ -55,6 +53,8 @@ application_version_set, ) +from charmhelpers.core.strutils import BasicStringComparator + from charmhelpers.contrib.storage.linux.lvm import ( deactivate_lvm_volume_group, is_lvm_physical_volume, @@ -97,6 +97,22 @@ DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 'restricted main multiverse universe') +OPENSTACK_RELEASES = ( + 'diablo', + 'essex', + 'folsom', + 'grizzly', + 'havana', + 'icehouse', + 'juno', + 'kilo', + 'liberty', + 'mitaka', + 'newton', + 'ocata', + 'pike', +) + UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('oneiric', 'diablo'), ('precise', 'essex'), @@ -238,6 +254,17 @@ DEFAULT_LOOPBACK_SIZE = '5G' +class CompareOpenStackReleases(BasicStringComparator): + """Provide comparisons of OpenStack releases. + + Use in the form of + + if CompareOpenStackReleases(release) > 'mitaka': + # do something with mitaka + """ + _list = OPENSTACK_RELEASES + + def error_out(msg): juju_log("FATAL ERROR: %s" % msg, level='ERROR') sys.exit(1) @@ -1066,7 +1093,8 @@ def git_generate_systemd_init_files(templates_dir): shutil.copyfile(init_in_source, init_source) with open(init_source, 'a') as outfile: - template = '/usr/share/openstack-pkg-tools/init-script-template' + template = ('/usr/share/openstack-pkg-tools/' + 'init-script-template') with open(template) as infile: outfile.write('\n\n{}'.format(infile.read())) @@ -1971,9 +1999,7 @@ def enable_memcache(source=None, release=None, package=None): if not _release: _release = get_os_codename_install_source(source) - # TODO: this should be changed to a numeric comparison using a known list - # of releases and comparing by index. - return _release >= 'mitaka' + return CompareOpenStackReleases(_release) >= 'mitaka' def token_cache_pkgs(source=None, release=None): diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 05edfa50..0ee5cb9f 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -45,6 +45,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( @@ -52,6 +53,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import UPDATEDB_PATH = '/etc/updatedb.conf' diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/centos.py b/ceph-osd/hooks/charmhelpers/core/host_factory/centos.py index 902d469f..7781a396 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/centos.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/centos.py @@ -2,6 +2,22 @@ import yum import os +from charmhelpers.core.strutils import BasicStringComparator + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Host releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + + def __init__(self, item): + raise NotImplementedError( + "CompareHostReleases() is not implemented for CentOS") + def service_available(service_name): # """Determine whether a system service is available.""" diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index 8c66af55..0448288c 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,37 @@ import subprocess +from charmhelpers.core.strutils import BasicStringComparator + + +UBUNTU_RELEASES = ( + 'lucid', + 'maverick', + 'natty', + 'oneiric', + 'precise', + 'quantal', + 'raring', + 'saucy', + 'trusty', + 'utopic', + 'vivid', + 'wily', + 'xenial', + 'yakkety', + 'zesty', +) + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Ubuntu releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + _list = UBUNTU_RELEASES + def service_available(service_name): """Determine whether a system service is available""" diff --git a/ceph-osd/hooks/charmhelpers/core/strutils.py b/ceph-osd/hooks/charmhelpers/core/strutils.py index dd9b9717..685dabde 100644 --- a/ceph-osd/hooks/charmhelpers/core/strutils.py +++ b/ceph-osd/hooks/charmhelpers/core/strutils.py @@ -68,3 +68,56 @@ def bytes_from_string(value): msg = "Unable to interpret string value '%s' as bytes" % (value) raise ValueError(msg) return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + + +class BasicStringComparator(object): + """Provides a class that will compare strings from an iterator type object. + Used to provide > and < comparisons on strings that may not necessarily be + alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the + z-wrap. + """ + + _list = None + + def __init__(self, item): + if self._list is None: + raise Exception("Must define the _list in the class definition!") + try: + self.index = self._list.index(item) + except Exception: + raise KeyError("Item '{}' is not in list '{}'" + .format(item, self._list)) + + def __eq__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index == self._list.index(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index < self._list.index(other) + + def __ge__(self, other): + return not self.__lt__(other) + + def __gt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index > self._list.index(other) + + def __le__(self, other): + return not self.__gt__(other) + + def __str__(self): + """Always give back the item at the index so it can be used in + comparisons like: + + s_mitaka = CompareOpenStack('mitaka') + s_newton = CompareOpenstack('newton') + + assert s_newton > s_mitaka + + @returns: + """ + return self._list[self.index] diff --git a/ceph-osd/lib/ceph/__init__.py b/ceph-osd/lib/ceph/__init__.py index db3772b5..7acbc521 100644 --- a/ceph-osd/lib/ceph/__init__.py +++ b/ceph-osd/lib/ceph/__init__.py @@ -25,20 +25,28 @@ import shutil import pyudev +from datetime import datetime + from charmhelpers.core import hookenv +from charmhelpers.core import templating from charmhelpers.core.host import ( - mkdir, chownr, - service_restart, + cmp_pkgrevno, lsb_release, - cmp_pkgrevno, service_stop, mounts, service_start) + mkdir, + mounts, + owner, + service_restart, + service_start, + service_stop) from charmhelpers.core.hookenv import ( - log, - ERROR, cached, + config, + log, status_set, - WARNING, DEBUG, config) -from charmhelpers.core.services import render_template + DEBUG, + ERROR, + WARNING) from charmhelpers.fetch import ( apt_cache, add_source, apt_install, apt_update) @@ -54,6 +62,12 @@ from charmhelpers.contrib.openstack.utils import ( get_os_codename_install_source) +from ceph.ceph_helpers import check_output + +CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph') +OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd') +HDPARM_FILE = os.path.join(os.sep, 'etc', 'hdparm.conf') + LEADER = 'leader' PEON = 'peon' QUORUM = [LEADER, PEON] @@ -167,7 +181,7 @@ def tune_nic(network_interface): try: # Apply the settings log("Applying sysctl settings", level=DEBUG) - subprocess.check_output(["sysctl", "-p", sysctl_file]) + check_output(["sysctl", "-p", sysctl_file]) except subprocess.CalledProcessError as err: log('sysctl -p {} failed with error {}'.format(sysctl_file, err.output), @@ -218,14 +232,21 @@ def persist_settings(settings_dict): The settings_dict should be in the form of {"uuid": {"key":"value"}} :param settings_dict: dict of settings to save """ - hdparm_path = os.path.join(os.sep, 'etc', 'hdparm.conf') + if not settings_dict: + return + try: - with open(hdparm_path, 'w') as hdparm: - hdparm.write(render_template('hdparm.conf', settings_dict)) + templating.render(source='hdparm.conf', target=HDPARM_FILE, + context=settings_dict) except IOError as err: log("Unable to open {path} because of error: {error}".format( - path=hdparm_path, - error=err.message), level=ERROR) + path=HDPARM_FILE, error=err.message), level=ERROR) + except Exception as e: + # The templating.render can raise a jinja2 exception if the + # template is not found. Rather than polluting the import + # space of this charm, simply catch Exception + log('Unable to render {path} due to error: {error}'.format( + path=HDPARM_FILE, error=e.message), level=ERROR) def set_max_sectors_kb(dev_name, max_sectors_size): @@ -299,9 +320,9 @@ def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): log('Setting read ahead to {} for device {}'.format( read_ahead_sectors, dev_name)) - subprocess.check_output(['hdparm', - '-a{}'.format(read_ahead_sectors), - dev_name]) + check_output(['hdparm', + '-a{}'.format(read_ahead_sectors), + dev_name]) except subprocess.CalledProcessError as e: log('hdparm failed with error: {}'.format(e.output), level=ERROR) @@ -314,7 +335,7 @@ def get_block_uuid(block_dev): :return: The UUID of the device or None on Error. """ try: - block_info = subprocess.check_output( + block_info = check_output( ['blkid', '-o', 'export', block_dev]) for tag in block_info.split('\n'): parts = tag.split('=') @@ -390,6 +411,7 @@ def tune_dev(block_dev): if uuid is None: log('block device {} uuid is None. Unable to save to ' 'hdparm.conf'.format(block_dev), level=DEBUG) + return save_settings_dict = {} log('Tuning device {}'.format(block_dev)) status_set('maintenance', 'Tuning device {}'.format(block_dev)) @@ -455,6 +477,33 @@ def __le__(self, other): return self.name < other.name +def get_osd_weight(osd_id): + """ + Returns the weight of the specified OSD + :return: Float :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + tree = check_output( + ['ceph', 'osd', 'tree', '--format=json']) + try: + json_tree = json.loads(tree) + # Make sure children are present in the json + if not json_tree['nodes']: + return None + for device in json_tree['nodes']: + if device['type'] == 'osd' and device['name'] == osd_id: + return device['crush_weight'] + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + def get_osd_tree(service): """ Returns the current osd map in JSON. @@ -462,7 +511,7 @@ def get_osd_tree(service): Also raises CalledProcessError if our ceph command fails """ try: - tree = subprocess.check_output( + tree = check_output( ['ceph', '--id', service, 'osd', 'tree', '--format=json']) try: @@ -497,6 +546,43 @@ def get_osd_tree(service): raise +def _get_child_dirs(path): + """Returns a list of directory names in the specified path. + + :param path: a full path listing of the parent directory to return child + directory names + :return: list. A list of child directories under the parent directory + :raises: ValueError if the specified path does not exist or is not a + directory, + OSError if an error occurs reading the directory listing + """ + if not os.path.exists(path): + raise ValueError('Specfied path "%s" does not exist' % path) + if not os.path.isdir(path): + raise ValueError('Specified path "%s" is not a directory' % path) + + files_in_dir = [os.path.join(path, f) for f in os.listdir(path)] + return list(filter(os.path.isdir, files_in_dir)) + + +def _get_osd_num_from_dirname(dirname): + """Parses the dirname and returns the OSD id. + + Parses a string in the form of 'ceph-{osd#}' and returns the osd number + from the directory name. + + :param dirname: the directory name to return the OSD number from + :return int: the osd number the directory name corresponds to + :raises ValueError: if the osd number cannot be parsed from the provided + directory name. + """ + match = re.search('ceph-(?P\d+)', dirname) + if not match: + raise ValueError("dirname not in correct format: %s" % dirname) + + return match.group('osd_id') + + def get_local_osd_ids(): """ This will list the /var/lib/ceph/osd/* directories and try @@ -602,7 +688,7 @@ def is_quorum(): ] if os.path.exists(asok): try: - result = json.loads(subprocess.check_output(cmd)) + result = json.loads(check_output(cmd)) except subprocess.CalledProcessError: return False except ValueError: @@ -629,7 +715,7 @@ def is_leader(): ] if os.path.exists(asok): try: - result = json.loads(subprocess.check_output(cmd)) + result = json.loads(check_output(cmd)) except subprocess.CalledProcessError: return False except ValueError: @@ -736,7 +822,7 @@ def replace_osd(dead_osd_number, # Drop this osd out of the cluster. This will begin a # rebalance operation status_set('maintenance', 'Removing osd {}'.format(dead_osd_number)) - subprocess.check_output([ + check_output([ 'ceph', '--id', 'osd-upgrade', @@ -747,8 +833,8 @@ def replace_osd(dead_osd_number, if systemd(): service_stop('ceph-osd@{}'.format(dead_osd_number)) else: - subprocess.check_output(['stop', 'ceph-osd', 'id={}'.format( - dead_osd_number)]), + check_output(['stop', 'ceph-osd', 'id={}'.format( + dead_osd_number)]) # umount if still mounted ret = umount(mount_point) if ret < 0: @@ -756,20 +842,20 @@ def replace_osd(dead_osd_number, mount_point, os.strerror(ret))) # Clean up the old mount point shutil.rmtree(mount_point) - subprocess.check_output([ + check_output([ 'ceph', '--id', 'osd-upgrade', 'osd', 'crush', 'remove', 'osd.{}'.format(dead_osd_number)]) # Revoke the OSDs access keys - subprocess.check_output([ + check_output([ 'ceph', '--id', 'osd-upgrade', 'auth', 'del', 'osd.{}'.format(dead_osd_number)]) - subprocess.check_output([ + check_output([ 'ceph', '--id', 'osd-upgrade', @@ -788,7 +874,7 @@ def replace_osd(dead_osd_number, def is_osd_disk(dev): try: - info = subprocess.check_output(['sgdisk', '-i', '1', dev]) + info = check_output(['sgdisk', '-i', '1', dev]) info = info.split("\n") # IGNORE:E1103 for line in info: for ptype in CEPH_PARTITIONS: @@ -869,7 +955,7 @@ def generate_monitor_secret(): '--name=mon.', '--gen-key' ] - res = subprocess.check_output(cmd) + res = check_output(cmd) return "{}==".format(res.split('=')[1].strip()) @@ -901,7 +987,7 @@ def parse_key(raw_key): else: for element in raw_key.splitlines(): if 'key' in element: - key = element.split(' = ')[1].strip() # IGNORE:E1103 + return element.split(' = ')[1].strip() # IGNORE:E1103 return key @@ -1017,8 +1103,8 @@ def create_named_keyring(entity, name, caps=None): ] for subsystem, subcaps in caps.items(): cmd.extend([subsystem, '; '.join(subcaps)]) - log("Calling subprocess.check_output: {}".format(cmd), level=DEBUG) - return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + log("Calling check_output: {}".format(cmd), level=DEBUG) + return parse_key(check_output(cmd).strip()) # IGNORE:E1103 def get_upgrade_key(): @@ -1033,6 +1119,26 @@ def get_named_key(name, caps=None, pool_list=None): :param caps: dict of cephx capabilities :return: Returns a cephx key """ + try: + # Does the key already exist? + output = check_output( + [ + 'sudo', + '-u', ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', + 'get', + 'client.{}'.format(name), + ]).strip() + return parse_key(output) + except subprocess.CalledProcessError: + # Couldn't get the key, time to create it! + log("Creating new key for {}".format(name), level=DEBUG) caps = caps or _default_caps cmd = [ "sudo", @@ -1055,8 +1161,8 @@ def get_named_key(name, caps=None, pool_list=None): pools = " ".join(['pool={0}'.format(i) for i in pool_list]) subcaps[0] = subcaps[0] + " " + pools cmd.extend([subsystem, '; '.join(subcaps)]) - log("Calling subprocess.check_output: {}".format(cmd), level=DEBUG) - return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + log("Calling check_output: {}".format(cmd), level=DEBUG) + return parse_key(check_output(cmd).strip()) # IGNORE:E1103 def upgrade_key_caps(key, caps): @@ -1148,7 +1254,7 @@ def maybe_zap_journal(journal_dev): def get_partitions(dev): cmd = ['partx', '--raw', '--noheadings', dev] try: - out = subprocess.check_output(cmd).splitlines() + out = check_output(cmd).splitlines() log("get partitions: {}".format(out), level=DEBUG) return out except subprocess.CalledProcessError as e: @@ -1216,12 +1322,12 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, try: log("osdize cmd: {}".format(cmd)) subprocess.check_call(cmd) - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: if ignore_errors: log('Unable to initialize device: {}'.format(dev), WARNING) else: log('Unable to initialize device: {}'.format(dev), ERROR) - raise e + raise def osdize_dir(path, encrypt=False): @@ -1258,7 +1364,7 @@ def get_running_osds(): """Returns a list of the pids of the current running OSD daemons""" cmd = ['pgrep', 'ceph-osd'] try: - result = subprocess.check_output(cmd) + result = check_output(cmd) return result.split() except subprocess.CalledProcessError: return [] @@ -1274,9 +1380,9 @@ def get_cephfs(service): # This command wasn't introduced until 0.86 ceph return [] try: - output = subprocess.check_output(["ceph", - '--id', service, - "fs", "ls"]) + output = check_output(["ceph", + '--id', service, + "fs", "ls"]) if not output: return [] """ @@ -1403,10 +1509,17 @@ def upgrade_monitor(new_version): service_stop('ceph-mon-all') apt_install(packages=PACKAGES, fatal=True) - # Ensure the ownership of Ceph's directories is correct - chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), - owner=ceph_user(), - group=ceph_user()) + # Ensure the files and directories under /var/lib/ceph is chowned + # properly as part of the move to the Jewel release, which moved the + # ceph daemons to running as ceph:ceph instead of root:root. + if new_version == 'jewel': + # Ensure the ownership of Ceph's directories is correct + owner = ceph_user() + chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), + owner=owner, + group=owner, + follow_links=True) + if systemd(): for mon_id in get_local_mon_ids(): service_start('ceph-mon@{}'.format(mon_id)) @@ -1447,6 +1560,7 @@ def lock_and_roll(upgrade_key, service, my_name, version): my_name, version, stop_timestamp)) + status_set('maintenance', 'Finishing upgrade') monitor_key_set(upgrade_key, "{}_{}_{}_done".format(service, my_name, version), @@ -1569,34 +1683,198 @@ def upgrade_osd(new_version): add_source(config('source'), config('key')) apt_update(fatal=True) except subprocess.CalledProcessError as err: - log("Adding the ceph source failed with message: {}".format( + log("Adding the ceph sources failed with message: {}".format( err.message)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) + try: - if systemd(): - for osd_id in get_local_osd_ids(): - service_stop('ceph-osd@{}'.format(osd_id)) - else: - service_stop('ceph-osd-all') + # Upgrade the packages before restarting the daemons. + status_set('maintenance', 'Upgrading packages to %s' % new_version) apt_install(packages=PACKAGES, fatal=True) - # Ensure the ownership of Ceph's directories is correct - chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), - owner=ceph_user(), - group=ceph_user()) - if systemd(): - for osd_id in get_local_osd_ids(): - service_start('ceph-osd@{}'.format(osd_id)) - else: - service_start('ceph-osd-all') - except subprocess.CalledProcessError as err: + # If the upgrade does not need an ownership update of any of the + # directories in the osd service directory, then simply restart + # all of the OSDs at the same time as this will be the fastest + # way to update the code on the node. + if not dirs_need_ownership_update('osd'): + log('Restarting all OSDs to load new binaries', DEBUG) + service_restart('ceph-osd-all') + return + + # Need to change the ownership of all directories which are not OSD + # directories as well. + # TODO - this should probably be moved to the general upgrade function + # and done before mon/osd. + update_owner(CEPH_BASE_DIR, recurse_dirs=False) + non_osd_dirs = filter(lambda x: not x == 'osd', + os.listdir(CEPH_BASE_DIR)) + non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x), + non_osd_dirs) + for path in non_osd_dirs: + update_owner(path) + + # Fast service restart wasn't an option because each of the OSD + # directories need the ownership updated for all the files on + # the OSD. Walk through the OSDs one-by-one upgrading the OSD. + for osd_dir in _get_child_dirs(OSD_BASE_DIR): + try: + osd_num = _get_osd_num_from_dirname(osd_dir) + _upgrade_single_osd(osd_num, osd_dir) + except ValueError as ex: + # Directory could not be parsed - junk directory? + log('Could not parse osd directory %s: %s' % (osd_dir, ex), + WARNING) + continue + + except (subprocess.CalledProcessError, IOError) as err: log("Stopping ceph and upgrading packages failed " "with message: {}".format(err.message)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) +def _upgrade_single_osd(osd_num, osd_dir): + """Upgrades the single OSD directory. + + :param osd_num: the num of the OSD + :param osd_dir: the directory of the OSD to upgrade + :raises CalledProcessError: if an error occurs in a command issued as part + of the upgrade process + :raises IOError: if an error occurs reading/writing to a file as part + of the upgrade process + """ + stop_osd(osd_num) + disable_osd(osd_num) + update_owner(osd_dir) + enable_osd(osd_num) + start_osd(osd_num) + + +def stop_osd(osd_num): + """Stops the specified OSD number. + + :param osd_num: the osd number to stop + """ + if systemd(): + service_stop('ceph-osd@{}'.format(osd_num)) + else: + service_stop('ceph-osd', id=osd_num) + + +def start_osd(osd_num): + """Starts the specified OSD number. + + :param osd_num: the osd number to start. + """ + if systemd(): + service_start('ceph-osd@{}'.format(osd_num)) + else: + service_start('ceph-osd', id=osd_num) + + +def disable_osd(osd_num): + """Disables the specified OSD number. + + Ensures that the specified osd will not be automatically started at the + next reboot of the system. Due to differences between init systems, + this method cannot make any guarantees that the specified osd cannot be + started manually. + + :param osd_num: the osd id which should be disabled. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + to disable the OSD + :raises IOError, OSError: if the attempt to read/remove the ready file in + an upstart enabled system fails + """ + if systemd(): + # When running under systemd, the individual ceph-osd daemons run as + # templated units and can be directly addressed by referring to the + # templated service name ceph-osd@. Additionally, systemd + # allows one to disable a specific templated unit by running the + # 'systemctl disable ceph-osd@' command. When disabled, the + # OSD should remain disabled until re-enabled via systemd. + # Note: disabling an already disabled service in systemd returns 0, so + # no need to check whether it is enabled or not. + cmd = ['systemctl', 'disable', 'ceph-osd@{}'.format(osd_num)] + subprocess.check_call(cmd) + else: + # Neither upstart nor the ceph-osd upstart script provides for + # disabling the starting of an OSD automatically. The specific OSD + # cannot be prevented from running manually, however it can be + # prevented from running automatically on reboot by removing the + # 'ready' file in the OSD's root directory. This is due to the + # ceph-osd-all upstart script checking for the presence of this file + # before starting the OSD. + ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), + 'ready') + if os.path.exists(ready_file): + os.unlink(ready_file) + + +def enable_osd(osd_num): + """Enables the specified OSD number. + + Ensures that the specified osd_num will be enabled and ready to start + automatically in the event of a reboot. + + :param osd_num: the osd id which should be enabled. + :raises CalledProcessError: if the call to the systemd command issued + fails when enabling the service + :raises IOError: if the attempt to write the ready file in an usptart + enabled system fails + """ + if systemd(): + cmd = ['systemctl', 'enable', 'ceph-osd@{}'.format(osd_num)] + subprocess.check_call(cmd) + else: + # When running on upstart, the OSDs are started via the ceph-osd-all + # upstart script which will only start the osd if it has a 'ready' + # file. Make sure that file exists. + ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), + 'ready') + with open(ready_file, 'w') as f: + f.write('ready') + + # Make sure the correct user owns the file. It shouldn't be necessary + # as the upstart script should run with root privileges, but its better + # to have all the files matching ownership. + update_owner(ready_file) + + +def update_owner(path, recurse_dirs=True): + """Changes the ownership of the specified path. + + Changes the ownership of the specified path to the new ceph daemon user + using the system's native chown functionality. This may take awhile, + so this method will issue a set_status for any changes of ownership which + recurses into directory structures. + + :param path: the path to recursively change ownership for + :param recurse_dirs: boolean indicating whether to recursively change the + ownership of all the files in a path's subtree or to + simply change the ownership of the path. + :raises CalledProcessError: if an error occurs issuing the chown system + command + """ + user = ceph_user() + user_group = '{ceph_user}:{ceph_user}'.format(ceph_user=user) + cmd = ['chown', user_group, path] + if os.path.isdir(path) and recurse_dirs: + status_set('maintenance', ('Updating ownership of %s to %s' % + (path, user))) + cmd.insert(1, '-R') + + log('Changing ownership of {path} to {user}'.format( + path=path, user=user_group), DEBUG) + start = datetime.now() + subprocess.check_call(cmd) + elapsed_time = (datetime.now() - start) + + log('Took {secs} seconds to change the ownership of path: {path}'.format( + secs=elapsed_time.total_seconds(), path=path), DEBUG) + + def list_pools(service): """ This will list the current pools that Ceph has @@ -1607,7 +1885,7 @@ def list_pools(service): """ try: pool_list = [] - pools = subprocess.check_output(['rados', '--id', service, 'lspools']) + pools = check_output(['rados', '--id', service, 'lspools']) for pool in pools.splitlines(): pool_list.append(pool) return pool_list @@ -1616,6 +1894,36 @@ def list_pools(service): raise +def dirs_need_ownership_update(service): + """Determines if directories still need change of ownership. + + Examines the set of directories under the /var/lib/ceph/{service} directory + and determines if they have the correct ownership or not. This is + necessary due to the upgrade from Hammer to Jewel where the daemon user + changes from root: to ceph:. + + :param service: the name of the service folder to check (e.g. osd, mon) + :return: boolean. True if the directories need a change of ownership, + False otherwise. + :raises IOError: if an error occurs reading the file stats from one of + the child directories. + :raises OSError: if the specified path does not exist or some other error + """ + expected_owner = expected_group = ceph_user() + path = os.path.join(CEPH_BASE_DIR, service) + for child in _get_child_dirs(path): + curr_owner, curr_group = owner(child) + + if (curr_owner == expected_owner) and (curr_group == expected_group): + continue + + log('Directory "%s" needs its ownership updated' % child, DEBUG) + return True + + # All child directories had the expected ownership + return False + + # A dict of valid ceph upgrade paths. Mapping is old -> new UPGRADE_PATHS = { 'firefly': 'hammer', diff --git a/ceph-osd/lib/ceph/ceph_broker.py b/ceph-osd/lib/ceph/ceph_broker.py index 33d0df8d..f1e59daf 100644 --- a/ceph-osd/lib/ceph/ceph_broker.py +++ b/ceph-osd/lib/ceph/ceph_broker.py @@ -28,12 +28,14 @@ get_cephfs, get_osd_weight ) -from ceph_helpers import Crushmap +from ceph.ceph_helpers import Crushmap from charmhelpers.contrib.storage.linux.ceph import ( create_erasure_profile, delete_pool, erasure_profile_exists, get_osds, + monitor_key_get, + monitor_key_set, pool_exists, pool_set, remove_pool_snapshot, @@ -49,7 +51,7 @@ # This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ # This should do a decent job of preventing people from passing in bad values. # It will give a useful error message -from subprocess import check_output, CalledProcessError +from subprocess import check_call, check_output, CalledProcessError POOL_KEYS = { # "Ceph Key Name": [Python type, [Valid Range]] @@ -157,11 +159,192 @@ def handle_create_erasure_profile(request, service): data_chunks=k, coding_chunks=m, locality=l) +def handle_add_permissions_to_key(request, service): + """ + Groups are defined by the key cephx.groups.(namespace-)?-(name). This key + will contain a dict serialized to JSON with data about the group, including + pools and members. + + A group can optionally have a namespace defined that will be used to + further restrict pool access. + """ + service_name = request.get('name') + group_name = request.get('group') + group_namespace = request.get('group-namespace') + if group_namespace: + group_name = "{}-{}".format(group_namespace, group_name) + group = get_group(group_name=group_name) + service_obj = get_service_groups(service=service_name, + namespace=group_namespace) + format("Service object: {}".format(service_obj)) + permission = request.get('group-permission') or "rwx" + if service_name not in group['services']: + group['services'].append(service_name) + save_group(group=group, group_name=group_name) + if permission not in service_obj['group_names']: + service_obj['group_names'][permission] = [] + if group_name not in service_obj['group_names'][permission]: + service_obj['group_names'][permission].append(group_name) + save_service(service=service_obj, service_name=service_name) + service_obj['groups'] = _build_service_groups(service_obj, + group_namespace) + update_service_permissions(service_name, service_obj, group_namespace) + + +def update_service_permissions(service, service_obj=None, namespace=None): + """Update the key permissions for the named client in Ceph""" + if not service_obj: + service_obj = get_service_groups(service=service, namespace=namespace) + permissions = pool_permission_list_for_service(service_obj) + call = ['ceph', 'auth', 'caps', 'client.{}'.format(service)] + permissions + try: + check_call(call) + except CalledProcessError as e: + log("Error updating key capabilities: {}".format(e)) + + +def add_pool_to_group(pool, group, namespace=None): + """Add a named pool to a named group""" + group_name = group + if namespace: + group_name = "{}-{}".format(namespace, group_name) + group = get_group(group_name=group_name) + if pool not in group['pools']: + group["pools"].append(pool) + save_group(group, group_name=group_name) + for service in group['services']: + update_service_permissions(service, namespace=namespace) + + +def pool_permission_list_for_service(service): + """Build the permission string for Ceph for a given service""" + permissions = [] + permission_types = {} + for permission, group in service["group_names"].items(): + if permission not in permission_types: + permission_types[permission] = [] + for item in group: + permission_types[permission].append(item) + for permission, groups in permission_types.items(): + permission = "allow {}".format(permission) + for group in groups: + for pool in service['groups'][group]['pools']: + permissions.append("{} pool={}".format(permission, pool)) + return ["mon", "allow r", "osd", ', '.join(permissions)] + + +def get_service_groups(service, namespace=None): + """ + Services are objects stored with some metadata, they look like (for a + service named "nova"): + { + group_names: {'rwx': ['images']}, + groups: {} + } + After populating the group, it looks like: + { + group_names: {'rwx': ['images']}, + groups: { + 'images': { + pools: ['glance'], + services: ['nova'] + } + } + } + """ + service_json = monitor_key_get(service='admin', + key="cephx.services.{}".format(service)) + try: + service = json.loads(service_json) + except TypeError: + service = None + except ValueError: + service = None + if service: + service['groups'] = _build_service_groups(service, namespace) + else: + service = {'group_names': {}, 'groups': {}} + return service + + +def _build_service_groups(service, namespace=None): + '''Rebuild the 'groups' dict for a service group + + :returns: dict: dictionary keyed by group name of the following + format: + + { + 'images': { + pools: ['glance'], + services: ['nova', 'glance] + }, + 'vms':{ + pools: ['nova'], + services: ['nova'] + } + } + ''' + all_groups = {} + for _, groups in service['group_names'].items(): + for group in groups: + name = group + if namespace: + name = "{}-{}".format(namespace, name) + all_groups[group] = get_group(group_name=name) + return all_groups + + +def get_group(group_name): + """ + A group is a structure to hold data about a named group, structured as: + { + pools: ['glance'], + services: ['nova'] + } + """ + group_key = get_group_key(group_name=group_name) + group_json = monitor_key_get(service='admin', key=group_key) + try: + group = json.loads(group_json) + except TypeError: + group = None + except ValueError: + group = None + if not group: + group = { + 'pools': [], + 'services': [] + } + return group + + +def save_service(service_name, service): + """Persist a service in the monitor cluster""" + service['groups'] = {} + return monitor_key_set(service='admin', + key="cephx.services.{}".format(service_name), + value=json.dumps(service)) + + +def save_group(group, group_name): + """Persist a group in the monitor cluster""" + group_key = get_group_key(group_name=group_name) + return monitor_key_set(service='admin', + key=group_key, + value=json.dumps(group)) + + +def get_group_key(group_name): + """Build group key""" + return 'cephx.groups.{}'.format(group_name) + + def handle_erasure_pool(request, service): pool_name = request.get('name') erasure_profile = request.get('erasure-profile') quota = request.get('max-bytes') weight = request.get('weight') + group_name = request.get('group') if erasure_profile is None: erasure_profile = "default-canonical" @@ -172,6 +355,13 @@ def handle_erasure_pool(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} + if group_name: + group_namespace = request.get('group-namespace') + # Add the pool to the group named "group_name" + add_pool_to_group(pool=pool_name, + group=group_name, + namespace=group_namespace) + # TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds if not erasure_profile_exists(service=service, name=erasure_profile): # TODO: Fail and tell them to create the profile or default @@ -200,6 +390,7 @@ def handle_replicated_pool(request, service): replicas = request.get('replicas') quota = request.get('max-bytes') weight = request.get('weight') + group_name = request.get('group') # Optional params pg_num = request.get('pg_num') @@ -215,6 +406,13 @@ def handle_replicated_pool(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} + if group_name: + group_namespace = request.get('group-namespace') + # Add the pool to the group named "group_name" + add_pool_to_group(pool=pool_name, + group=group_name, + namespace=group_namespace) + kwargs = {} if pg_num: kwargs['pg_num'] = pg_num @@ -570,6 +768,8 @@ def process_requests_v1(reqs): ret = handle_rgw_create_user(request=req, service=svc) elif op == "move-osd-to-bucket": ret = handle_put_osd_in_bucket(request=req, service=svc) + elif op == "add-permissions-to-key": + ret = handle_add_permissions_to_key(request=req, service=svc) else: msg = "Unknown operation '%s'" % op log(msg, level=ERROR) diff --git a/ceph-osd/lib/ceph/ceph_helpers.py b/ceph-osd/lib/ceph/ceph_helpers.py index 8e5c807f..11f5dd8c 100644 --- a/ceph-osd/lib/ceph/ceph_helpers.py +++ b/ceph-osd/lib/ceph/ceph_helpers.py @@ -36,7 +36,11 @@ import re import subprocess -from subprocess import (check_call, check_output, CalledProcessError, ) +from subprocess import ( + check_call, + check_output as s_check_output, + CalledProcessError, +) from charmhelpers.core.hookenv import (config, local_unit, relation_get, @@ -111,6 +115,15 @@ LEGACY_PG_COUNT = 200 +def check_output(*args, **kwargs): + ''' + Helper wrapper for py2/3 compat with subprocess.check_output + + @returns str: UTF-8 decoded representation of output + ''' + return s_check_output(*args, **kwargs).decode('UTF-8') + + def validator(value, valid_type, valid_range=None): """ Used to validate these: http://docs.ceph.com/docs/master/rados/operations/ @@ -188,7 +201,7 @@ def load_crushmap(self): stdout=subprocess.PIPE) return subprocess.check_output( ('crushtool', '-d', '-'), - stdin=crush.stdout).decode('utf-8') + stdin=crush.stdout) except Exception as e: log("load_crushmap error: {}".format(e)) raise "Failed to read Crushmap" @@ -565,7 +578,8 @@ def monitor_key_delete(service, key): :param key: six.string_types. The key to delete. """ try: - check_output(['ceph', '--id', service, 'config-key', 'del', str(key)]) + check_output(['ceph', '--id', service, + 'config-key', 'del', str(key)]) except CalledProcessError as e: log("Monitor config-key put failed with message: {}".format(e.output)) raise @@ -867,8 +881,7 @@ def get_cache_mode(service, pool_name): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, 'lspools']).decode( - 'UTF-8') + out = check_output(['rados', '--id', service, 'lspools']) except CalledProcessError: return False @@ -882,7 +895,7 @@ def get_osds(service): version = ceph_version() if version and version >= '0.56': return json.loads(check_output(['ceph', '--id', service, 'osd', 'ls', - '--format=json']).decode('UTF-8')) + '--format=json'])) return None @@ -900,7 +913,7 @@ def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: out = check_output(['rbd', 'list', '--id', service, '--pool', pool - ]).decode('UTF-8') + ]) except CalledProcessError: return False @@ -1025,7 +1038,7 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']).decode('UTF-8') + out = check_output(['rbd', 'showmapped']) except CalledProcessError: return False @@ -1212,7 +1225,7 @@ def ceph_version(): """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): cmd = ['ceph', '-v'] - output = check_output(cmd).decode('US-ASCII') + output = check_output(cmd) output = output.split() if len(output) > 3: return output[2] diff --git a/ceph-osd/lib/setup.py b/ceph-osd/lib/setup.py new file mode 100644 index 00000000..139c80d6 --- /dev/null +++ b/ceph-osd/lib/setup.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function + +import os +import sys +from setuptools import setup, find_packages +from setuptools.command.test import test as TestCommand + +version = "0.0.1.dev1" +install_require = [ +] + +tests_require = [ + 'tox >= 2.3.1', +] + + +class Tox(TestCommand): + + user_options = [('tox-args=', 'a', "Arguments to pass to tox")] + + def initialize_options(self): + TestCommand.initialize_options(self) + self.tox_args = None + + def finalize_options(self): + TestCommand.finalize_options(self) + self.test_args = [] + self.test_suite = True + + def run_tests(self): + # import here, cause outside the eggs aren't loaded + import tox + import shlex + args = self.tox_args + # remove the 'test' arg from argv as tox passes it to ostestr which + # breaks it. + sys.argv.pop() + if args: + args = shlex.split(self.tox_args) + errno = tox.cmdline(args=args) + sys.exit(errno) + + +if sys.argv[-1] == 'publish': + os.system("python setup.py sdist upload") + os.system("python setup.py bdist_wheel upload") + sys.exit() + + +if sys.argv[-1] == 'tag': + os.system("git tag -a %s -m 'version %s'" % (version, version)) + os.system("git push --tags") + sys.exit() + + +setup( + name='charms.ceph', + version=version, + description='Provide base module for ceph charms.', + classifiers=[ + "Development Status :: 2 - Pre-Alpha", + "Intended Audience :: Developers", + "Topic :: System", + "Topic :: System :: Installation/Setup", + "Topic :: System :: Software Distribution", + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.5", + "License :: OSI Approved :: Apache Software License", + ], + url='https://github.com/openstack/charms.ceph', + author='OpenStack Charmers', + author_email='openstack-dev@lists.openstack.org', + license='Apache-2.0: http://www.apache.org/licenses/LICENSE-2.0', + packages=find_packages(exclude=["unit_tests"]), + zip_safe=False, + cmdclass={'test': Tox}, + install_requires=install_require, + extras_require={ + 'testing': tests_require, + }, + tests_require=tests_require, +) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 4491baef..fb1993f6 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -30,6 +30,9 @@ cluster addr = {{ cluster_addr }} {%- if crush_location %} osd crush location = {{crush_location}} {%- endif %} +{%- if upgrade_in_progress %} +setuser match path = /var/lib/ceph/$type/$cluster-$id +{%- endif %} {% if global -%} # The following are user-provided options provided via the config-flags charm option. # User-provided [global] section config diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index 1f4cf42e..346e6fea 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -40,6 +40,7 @@ AmuletUtils ) from charmhelpers.core.decorators import retry_on_exception +from charmhelpers.core.host import CompareHostReleases DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -1255,7 +1256,7 @@ def test_110_memcache(self): contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', fatal=True) ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if ubuntu_release <= 'trusty': + if CompareHostReleases(ubuntu_release) <= 'trusty': memcache_listen_addr = 'ip6-localhost' else: memcache_listen_addr = '::1' diff --git a/ceph-osd/tests/charmhelpers/core/host.py b/ceph-osd/tests/charmhelpers/core/host.py index 05edfa50..0ee5cb9f 100644 --- a/ceph-osd/tests/charmhelpers/core/host.py +++ b/ceph-osd/tests/charmhelpers/core/host.py @@ -45,6 +45,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( @@ -52,6 +53,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import UPDATEDB_PATH = '/etc/updatedb.conf' diff --git a/ceph-osd/tests/charmhelpers/core/host_factory/centos.py b/ceph-osd/tests/charmhelpers/core/host_factory/centos.py index 902d469f..7781a396 100644 --- a/ceph-osd/tests/charmhelpers/core/host_factory/centos.py +++ b/ceph-osd/tests/charmhelpers/core/host_factory/centos.py @@ -2,6 +2,22 @@ import yum import os +from charmhelpers.core.strutils import BasicStringComparator + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Host releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + + def __init__(self, item): + raise NotImplementedError( + "CompareHostReleases() is not implemented for CentOS") + def service_available(service_name): # """Determine whether a system service is available.""" diff --git a/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py index 8c66af55..0448288c 100644 --- a/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,37 @@ import subprocess +from charmhelpers.core.strutils import BasicStringComparator + + +UBUNTU_RELEASES = ( + 'lucid', + 'maverick', + 'natty', + 'oneiric', + 'precise', + 'quantal', + 'raring', + 'saucy', + 'trusty', + 'utopic', + 'vivid', + 'wily', + 'xenial', + 'yakkety', + 'zesty', +) + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Ubuntu releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + _list = UBUNTU_RELEASES + def service_available(service_name): """Determine whether a system service is available""" diff --git a/ceph-osd/tests/charmhelpers/core/strutils.py b/ceph-osd/tests/charmhelpers/core/strutils.py index dd9b9717..685dabde 100644 --- a/ceph-osd/tests/charmhelpers/core/strutils.py +++ b/ceph-osd/tests/charmhelpers/core/strutils.py @@ -68,3 +68,56 @@ def bytes_from_string(value): msg = "Unable to interpret string value '%s' as bytes" % (value) raise ValueError(msg) return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + + +class BasicStringComparator(object): + """Provides a class that will compare strings from an iterator type object. + Used to provide > and < comparisons on strings that may not necessarily be + alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the + z-wrap. + """ + + _list = None + + def __init__(self, item): + if self._list is None: + raise Exception("Must define the _list in the class definition!") + try: + self.index = self._list.index(item) + except Exception: + raise KeyError("Item '{}' is not in list '{}'" + .format(item, self._list)) + + def __eq__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index == self._list.index(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index < self._list.index(other) + + def __ge__(self, other): + return not self.__lt__(other) + + def __gt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index > self._list.index(other) + + def __le__(self, other): + return not self.__gt__(other) + + def __str__(self): + """Always give back the item at the index so it can be used in + comparisons like: + + s_mitaka = CompareOpenStack('mitaka') + s_newton = CompareOpenstack('newton') + + assert s_newton > s_mitaka + + @returns: + """ + return self._list[self.index] diff --git a/ceph-osd/tests/charmhelpers/osplatform.py b/ceph-osd/tests/charmhelpers/osplatform.py new file mode 100644 index 00000000..d9a4d5c0 --- /dev/null +++ b/ceph-osd/tests/charmhelpers/osplatform.py @@ -0,0 +1,25 @@ +import platform + + +def get_platform(): + """Return the current OS platform. + + For example: if current os platform is Ubuntu then a string "ubuntu" + will be returned (which is the name of the module). + This string is used to decide which platform module should be imported. + """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warings *not* disabled, as we certainly need to fix this. + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + if "Ubuntu" in current_platform: + return "ubuntu" + elif "CentOS" in current_platform: + return "centos" + elif "debian" in current_platform: + # Stock Python does not detect Ubuntu and instead returns debian. + # Or at least it does in some build environments like Travis CI + return "ubuntu" + else: + raise RuntimeError("This module is not supported on {}." + .format(current_platform)) diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index d4878e2d..99dd4d2d 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -62,6 +62,7 @@ def test_get_ceph_context(self, mock_config, mock_config2): 'osd_journal_size': 1024, 'public_addr': '10.0.0.1', 'short_object_len': True, + 'upgrade_in_progress': False, 'use_syslog': 'true'} self.assertEqual(ctxt, expected) @@ -94,6 +95,7 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): 'osd_journal_size': 1024, 'public_addr': '10.0.0.1', 'short_object_len': True, + 'upgrade_in_progress': False, 'use_syslog': 'true'} self.assertEqual(ctxt, expected) @@ -128,6 +130,7 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'osd_journal_size': 1024, 'public_addr': '10.0.0.1', 'short_object_len': True, + 'upgrade_in_progress': False, 'use_syslog': 'true'} self.assertEqual(ctxt, expected) diff --git a/ceph-osd/unit_tests/test_replace_osd.py b/ceph-osd/unit_tests/test_replace_osd.py index ce919382..c74d10e1 100644 --- a/ceph-osd/unit_tests/test_replace_osd.py +++ b/ceph-osd/unit_tests/test_replace_osd.py @@ -74,23 +74,17 @@ def test_umount(self): assert ret == 0 @patch('ceph.mounts') - @patch('ceph.subprocess') + @patch('ceph.check_output') @patch('ceph.umount') @patch('ceph.osdize') @patch('ceph.shutil') @patch('ceph.systemd') @patch('ceph.ceph_user') - def test_replace_osd(self, - ceph_user, - systemd, - shutil, - osdize, - umount, - subprocess, - mounts): + def test_replace_osd(self, ceph_user, systemd, shutil, osdize, umount, + check_output, mounts): ceph_user.return_value = "ceph" mounts.return_value = [['/var/lib/ceph/osd/ceph-a', '/dev/sda']] - subprocess.check_output.return_value = True + check_output.return_value = True self.status_set.return_value = None systemd.return_value = False umount.return_value = 0 @@ -103,7 +97,7 @@ def test_replace_osd(self, osd_journal=None, reformat_osd=False, ignore_errors=False) - subprocess.check_output.assert_has_calls( + check_output.assert_has_calls( [ call(['ceph', '--id', 'osd-upgrade', 'osd', 'out', 'osd.0']), diff --git a/ceph-osd/unit_tests/test_tuning.py b/ceph-osd/unit_tests/test_tuning.py index 61a69443..1a403448 100644 --- a/ceph-osd/unit_tests/test_tuning.py +++ b/ceph-osd/unit_tests/test_tuning.py @@ -6,7 +6,6 @@ TO_PATCH = [ 'hookenv', 'status_set', - 'subprocess', 'log', ] @@ -15,36 +14,37 @@ class PerformanceTestCase(test_utils.CharmTestCase): def setUp(self): super(PerformanceTestCase, self).setUp(ceph, TO_PATCH) - def test_tune_nic(self): - with patch('ceph.get_link_speed', return_value=10000): - with patch('ceph.save_sysctls') as save_sysctls: - ceph.tune_nic('eth0') - save_sysctls.assert_has_calls( - [ - call( - save_location='/etc/sysctl.d/' - '51-ceph-osd-charm-eth0.conf', - sysctl_dict={ - 'net.core.rmem_max': 524287, - 'net.core.wmem_max': 524287, - 'net.core.rmem_default': 524287, - 'net.ipv4.tcp_wmem': - '10000000 10000000 10000000', - 'net.core.netdev_max_backlog': 300000, - 'net.core.optmem_max': 524287, - 'net.ipv4.tcp_mem': - '10000000 10000000 10000000', - 'net.ipv4.tcp_rmem': - '10000000 10000000 10000000', - 'net.core.wmem_default': 524287}) - ]) - self.status_set.assert_has_calls( - [ - call('maintenance', 'Tuning device eth0'), - ]) + @patch.object(ceph, 'check_output') + @patch.object(ceph, 'get_link_speed') + @patch.object(ceph, 'save_sysctls') + def test_tune_nic(self, save_sysctls, get_link_speed, check_output): + get_link_speed.return_value = 10000 + ceph.tune_nic('eth0') + save_sysctls.assert_has_calls([ + call( + save_location='/etc/sysctl.d/51-ceph-osd-charm-eth0.conf', + sysctl_dict={ + 'net.core.rmem_max': 524287, + 'net.core.wmem_max': 524287, + 'net.core.rmem_default': 524287, + 'net.ipv4.tcp_wmem': '10000000 10000000 10000000', + 'net.core.netdev_max_backlog': 300000, + 'net.core.optmem_max': 524287, + 'net.ipv4.tcp_mem': '10000000 10000000 10000000', + 'net.ipv4.tcp_rmem': '10000000 10000000 10000000', + 'net.core.wmem_default': 524287 + }) + ]) + check_output.assert_called_with(['sysctl', '-p', + '/etc/sysctl.d/' + '51-ceph-osd-charm-eth0.conf']) + self.status_set.assert_has_calls([ + call('maintenance', 'Tuning device eth0'), + ]) - def test_get_block_uuid(self): - self.subprocess.check_output.return_value = \ + @patch('ceph.check_output') + def test_get_block_uuid(self, check_output): + check_output.return_value = \ 'UUID=378f3c86-b21a-4172-832d-e2b3d4bc7511\nTYPE=ext2\n' uuid = ceph.get_block_uuid('/dev/sda1') self.assertEqual(uuid, '378f3c86-b21a-4172-832d-e2b3d4bc7511') @@ -118,8 +118,9 @@ def test_tune_dev_2(self, call('maintenance', 'Finished tuning device /dev/sda') ]) - def test_set_hdd_read_ahead(self): + @patch('ceph.check_output') + def test_set_hdd_read_ahead(self, check_output): ceph.set_hdd_read_ahead(dev_name='/dev/sda') - self.subprocess.check_output.assert_called_with( + check_output.assert_called_with( ['hdparm', '-a256', '/dev/sda'] ) diff --git a/ceph-osd/unit_tests/test_upgrade.py b/ceph-osd/unit_tests/test_upgrade.py index 080073c1..4d3bf0a3 100644 --- a/ceph-osd/unit_tests/test_upgrade.py +++ b/ceph-osd/unit_tests/test_upgrade.py @@ -2,7 +2,7 @@ __author__ = 'Chris Holcombe ' -from mock import patch, MagicMock +from mock import call, patch, MagicMock from ceph_hooks import check_for_upgrade @@ -18,13 +18,17 @@ def config_side_effect(*args): class UpgradeRollingTestCase(unittest.TestCase): + @patch('ceph_hooks.ceph.dirs_need_ownership_update') @patch('ceph_hooks.ceph.is_bootstrapped') @patch('ceph_hooks.ceph.resolve_ceph_version') + @patch('ceph_hooks.emit_cephconf') @patch('ceph_hooks.hookenv') @patch('ceph_hooks.host') @patch('ceph_hooks.ceph.roll_osd_cluster') def test_check_for_upgrade(self, roll_osd_cluster, host, hookenv, - version, is_bootstrapped): + emit_cephconf, version, is_bootstrapped, + dirs_need_ownership_update): + dirs_need_ownership_update.return_value = False is_bootstrapped.return_value = True version.side_effect = ['firefly', 'hammer'] host.lsb_release.return_value = { @@ -36,9 +40,33 @@ def test_check_for_upgrade(self, roll_osd_cluster, host, hookenv, config_side_effect('source')] check_for_upgrade() - roll_osd_cluster.assert_called_with( - new_version='hammer', - upgrade_key='osd-upgrade') + roll_osd_cluster.assert_called_with(new_version='hammer', + upgrade_key='osd-upgrade') + emit_cephconf.assert_has_calls([call(upgrading=True), + call(upgrading=False)]) + + @patch('ceph_hooks.ceph.dirs_need_ownership_update') + @patch('ceph_hooks.ceph.is_bootstrapped') + @patch('ceph_hooks.ceph.resolve_ceph_version') + @patch('ceph_hooks.emit_cephconf') + @patch('ceph_hooks.hookenv') + @patch('ceph_hooks.host.lsb_release') + @patch('ceph_hooks.ceph.roll_osd_cluster') + def test_resume_failed_upgrade(self, roll_osd_cluster, lsb_release, + hookenv, emit_cephconf, version, + is_bootstrapped, + dirs_need_ownership_update): + dirs_need_ownership_update.return_value = True + is_bootstrapped.return_value = True + version.side_effect = ['jewel', 'jewel'] + lsb_release.return_value = {'DISTRIB_CODENAME': 'trusty'} + + check_for_upgrade() + + roll_osd_cluster.assert_called_with(new_version='jewel', + upgrade_key='osd-upgrade') + emit_cephconf.assert_has_calls([call(upgrading=True), + call(upgrading=False)]) @patch('ceph_hooks.ceph.is_bootstrapped') @patch('ceph_hooks.ceph.resolve_ceph_version') From 61754dc9f2fdba7e8163e54ae5b1f051591b2604 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 29 Mar 2017 15:06:34 +0100 Subject: [PATCH 1297/2699] Fix alphanumeric comparisons for openstack and ubuntu releases - sync charmhelpers with fix-alpha helpers - fix up code where the alpha comparisons are done Change-Id: I10e3e689aba490118b84b6b9e58df1ba5632d07d Related-Bug: #1659575 --- ceph-proxy/charm-helpers-tests.yaml | 1 + ceph-proxy/hooks/ceph.py | 12 +++-- .../contrib/hardening/apache/checks/config.py | 10 ++-- .../{hardening.conf => 99-hardening.conf} | 20 +++++-- .../contrib/hardening/audits/__init__.py | 13 ++--- .../contrib/hardening/defaults/apache.yaml | 5 +- .../hardening/defaults/apache.yaml.schema | 3 ++ .../contrib/hardening/defaults/os.yaml | 1 + .../contrib/hardening/defaults/os.yaml.schema | 1 + .../contrib/hardening/host/checks/profile.py | 10 +++- .../hardening/host/templates/99-hardening.sh | 5 ++ .../contrib/hardening/ssh/checks/config.py | 20 ++++--- .../hooks/charmhelpers/contrib/network/ip.py | 54 ++++++++++++------- .../charmhelpers/contrib/openstack/utils.py | 40 +++++++++++--- .../contrib/storage/linux/ceph.py | 18 ++++--- ceph-proxy/hooks/charmhelpers/core/host.py | 2 + .../charmhelpers/core/host_factory/centos.py | 16 ++++++ .../charmhelpers/core/host_factory/ubuntu.py | 32 +++++++++++ .../hooks/charmhelpers/core/strutils.py | 53 ++++++++++++++++++ ceph-proxy/hooks/utils.py | 13 +++-- .../contrib/openstack/amulet/utils.py | 3 +- ceph-proxy/tests/charmhelpers/core/host.py | 2 + .../charmhelpers/core/host_factory/centos.py | 16 ++++++ .../charmhelpers/core/host_factory/ubuntu.py | 32 +++++++++++ .../tests/charmhelpers/core/strutils.py | 53 ++++++++++++++++++ ceph-proxy/tests/charmhelpers/osplatform.py | 25 +++++++++ ceph-proxy/tox.ini | 2 +- 27 files changed, 392 insertions(+), 70 deletions(-) rename ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/{hardening.conf => 99-hardening.conf} (56%) create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh create mode 100644 ceph-proxy/tests/charmhelpers/osplatform.py diff --git a/ceph-proxy/charm-helpers-tests.yaml b/ceph-proxy/charm-helpers-tests.yaml index e5063253..b0de9df6 100644 --- a/ceph-proxy/charm-helpers-tests.yaml +++ b/ceph-proxy/charm-helpers-tests.yaml @@ -4,3 +4,4 @@ include: - contrib.amulet - contrib.openstack.amulet - core + - osplatform diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index a3bc52d6..44b28497 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -15,19 +15,23 @@ from charmhelpers.contrib.storage.linux.utils import ( is_block_device, zap_disk, - is_device_mounted) + is_device_mounted, +) from charmhelpers.core.host import ( mkdir, chownr, service_restart, lsb_release, - cmp_pkgrevno) + cmp_pkgrevno, + CompareHostReleases, +) from charmhelpers.core.hookenv import ( log, ERROR, cached, status_set, - WARNING) + WARNING, +) from charmhelpers.fetch import ( apt_cache ) @@ -403,7 +407,7 @@ def upgrade_key_caps(key, caps): @cached def systemd(): - return (lsb_release()['DISTRIB_CODENAME'] >= 'vivid') + return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' def bootstrap_monitor_cluster(secret): diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 51b636f7..b18b263d 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -26,6 +26,7 @@ DirectoryPermissionAudit, NoReadWriteForOther, TemplatedFile, + DeletedFile ) from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR @@ -52,13 +53,13 @@ def get_audits(): 'mods-available/alias.conf'), context, TEMPLATES_DIR, - mode=0o0755, + mode=0o0640, user='root', service_actions=[{'service': 'apache2', 'actions': ['restart']}]), TemplatedFile(os.path.join(settings['common']['apache_dir'], - 'conf-enabled/hardening.conf'), + 'conf-enabled/99-hardening.conf'), context, TEMPLATES_DIR, mode=0o0640, @@ -69,11 +70,13 @@ def get_audits(): DirectoryPermissionAudit(settings['common']['apache_dir'], user='root', group='root', - mode=0o640), + mode=0o0750), DisabledModuleAudit(settings['hardening']['modules_to_disable']), NoReadWriteForOther(settings['common']['apache_dir']), + + DeletedFile(['/var/www/html/index.html']) ] return audits @@ -94,5 +97,4 @@ def __call__(self): ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', out).group(1) ctxt['apache_icondir'] = '/usr/share/apache2/icons/' - ctxt['traceenable'] = settings['hardening']['traceenable'] return ctxt diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf similarity index 56% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf rename to ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf index 07945418..22b68041 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf @@ -4,15 +4,29 @@ ############################################################################### - + # http://httpd.apache.org/docs/2.4/upgrading.html {% if apache_version > '2.2' -%} Require all granted {% else -%} - Order Allow,Deny - Deny from all + Order Allow,Deny + Deny from all {% endif %} + + Options -Indexes -FollowSymLinks + AllowOverride None + + + + Options -Indexes -FollowSymLinks + AllowOverride None + + TraceEnable {{ traceenable }} +ServerTokens {{ servertokens }} + +SSLHonorCipherOrder {{ honor_cipher_order }} +SSLCipherSuite {{ cipher_suite }} diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/__init__.py index 9bf9c3c6..6dd5b05f 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/__init__.py @@ -49,13 +49,6 @@ def _take_action(self): # Invoke the callback if there is one. if hasattr(self.unless, '__call__'): - results = self.unless() - if results: - return False - else: - return True - - if self.unless: - return False - else: - return True + return not self.unless() + + return not self.unless diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml index e5ada29f..0f940d4c 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml @@ -10,4 +10,7 @@ common: hardening: traceenable: 'off' allowed_http_methods: "GET POST" - modules_to_disable: [ cgi, cgid ] \ No newline at end of file + modules_to_disable: [ cgi, cgid ] + servertokens: 'Prod' + honor_cipher_order: 'on' + cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES' diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema index 227589b5..c112137c 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema @@ -7,3 +7,6 @@ common: hardening: allowed_http_methods: modules_to_disable: + servertokens: + honor_cipher_order: + cipher_suite: diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml index ddd4286c..9a8627b5 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml @@ -58,6 +58,7 @@ security: rsync kernel_enable_module_loading: True # (type:boolean) kernel_enable_core_dump: False # (type:boolean) + ssh_tmout: 300 sysctl: kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128 diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema index 88b3966e..cc3b9c20 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema @@ -34,6 +34,7 @@ security: packages_list: kernel_enable_module_loading: kernel_enable_core_dump: + ssh_tmout: sysctl: kernel_secure_sysrq: kernel_enable_sysrq: diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/profile.py index 56d65263..2727428d 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/profile.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/profile.py @@ -25,7 +25,6 @@ def get_audits(): audits = [] settings = utils.get_settings('os') - # If core dumps are not enabled, then don't allow core dumps to be # created as they may contain sensitive information. if not settings['security']['kernel_enable_core_dump']: @@ -33,11 +32,18 @@ def get_audits(): ProfileContext(), template_dir=TEMPLATES_DIR, mode=0o0755, user='root', group='root')) + if settings['security']['ssh_tmout']: + audits.append(TemplatedFile('/etc/profile.d/99-hardening.sh', + ProfileContext(), + template_dir=TEMPLATES_DIR, + mode=0o0644, user='root', group='root')) return audits class ProfileContext(object): def __call__(self): - ctxt = {} + settings = utils.get_settings('os') + ctxt = {'ssh_tmout': + settings['security']['ssh_tmout']} return ctxt diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh new file mode 100644 index 00000000..616cef46 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh @@ -0,0 +1,5 @@ +TMOUT={{ tmout }} +readonly TMOUT +export TMOUT + +readonly HISTFILE diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py index f3cac6d9..41bed2d1 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -27,7 +27,10 @@ apt_install, apt_update, ) -from charmhelpers.core.host import lsb_release +from charmhelpers.core.host import ( + lsb_release, + CompareHostReleases, +) from charmhelpers.contrib.hardening.audits.file import ( TemplatedFile, FileContentAudit, @@ -68,7 +71,8 @@ def get_macs(self, allow_weak_mac): 'weak': default + ',hmac-sha1'} # Use newer ciphers on Ubuntu Trusty and above - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG) macs = macs_66 @@ -96,7 +100,8 @@ def get_kexs(self, allow_weak_kex): 'weak': weak} # Use newer kex on Ubuntu Trusty and above - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': log('Detected Ubuntu 14.04 or newer, using new key exchange ' 'algorithms', level=DEBUG) kex = kex_66 @@ -119,7 +124,8 @@ def get_ciphers(self, cbc_required): 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'} # Use newer ciphers on ubuntu Trusty and above - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': log('Detected Ubuntu 14.04 or newer, using new ciphers', level=DEBUG) cipher = ciphers_66 @@ -291,7 +297,8 @@ def is_compliant(self, *args, **kwargs): self.fail_cases = [] settings = utils.get_settings('ssh') - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': if not settings['server']['weak_hmac']: self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') else: @@ -364,7 +371,8 @@ def is_compliant(self, *args, **kwargs): self.fail_cases = [] settings = utils.get_settings('ssh') - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': if not settings['server']['weak_hmac']: self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') else: diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 54c76a72..7451af9c 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -31,6 +31,7 @@ from charmhelpers.core.host import ( lsb_release, + CompareHostReleases, ) try: @@ -67,6 +68,24 @@ def no_ip_found_error_out(network): raise ValueError(errmsg) +def _get_ipv6_network_from_address(address): + """Get an netaddr.IPNetwork for the given IPv6 address + :param address: a dict as returned by netifaces.ifaddresses + :returns netaddr.IPNetwork: None if the address is a link local or loopback + address + """ + if address['addr'].startswith('fe80') or address['addr'] == "::1": + return None + + prefix = address['netmask'].split("/") + if len(prefix) > 1: + netmask = prefix[1] + else: + netmask = address['netmask'] + return netaddr.IPNetwork("%s/%s" % (address['addr'], + netmask)) + + def get_address_in_network(network, fallback=None, fatal=False): """Get an IPv4 or IPv6 address within the network from the host. @@ -100,11 +119,9 @@ def get_address_in_network(network, fallback=None, fatal=False): if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - if cidr in network: - return str(cidr.ip) + cidr = _get_ipv6_network_from_address(addr) + if cidr and cidr in network: + return str(cidr.ip) if fallback is not None: return fallback @@ -180,18 +197,18 @@ def _get_for_address(address, key): if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - network = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - cidr = network.cidr - if address in cidr: - if key == 'iface': - return iface - elif key == 'netmask' and cidr: - return str(cidr).split('/')[1] - else: - return addr[key] - + network = _get_ipv6_network_from_address(addr) + if not network: + continue + + cidr = network.cidr + if address in cidr: + if key == 'iface': + return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] + else: + return addr[key] return None @@ -521,7 +538,8 @@ def port_has_listener(address, port): def assert_charm_supports_ipv6(): """Check whether we are able to support charms ipv6.""" - if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(release) < "trusty": raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index 7e8ecff4..e13450c1 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -33,9 +33,7 @@ from charmhelpers.contrib.network import ip -from charmhelpers.core import ( - unitdata, -) +from charmhelpers.core import unitdata from charmhelpers.core.hookenv import ( action_fail, @@ -55,6 +53,8 @@ application_version_set, ) +from charmhelpers.core.strutils import BasicStringComparator + from charmhelpers.contrib.storage.linux.lvm import ( deactivate_lvm_volume_group, is_lvm_physical_volume, @@ -97,6 +97,22 @@ DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 'restricted main multiverse universe') +OPENSTACK_RELEASES = ( + 'diablo', + 'essex', + 'folsom', + 'grizzly', + 'havana', + 'icehouse', + 'juno', + 'kilo', + 'liberty', + 'mitaka', + 'newton', + 'ocata', + 'pike', +) + UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('oneiric', 'diablo'), ('precise', 'essex'), @@ -238,6 +254,17 @@ DEFAULT_LOOPBACK_SIZE = '5G' +class CompareOpenStackReleases(BasicStringComparator): + """Provide comparisons of OpenStack releases. + + Use in the form of + + if CompareOpenStackReleases(release) > 'mitaka': + # do something with mitaka + """ + _list = OPENSTACK_RELEASES + + def error_out(msg): juju_log("FATAL ERROR: %s" % msg, level='ERROR') sys.exit(1) @@ -1066,7 +1093,8 @@ def git_generate_systemd_init_files(templates_dir): shutil.copyfile(init_in_source, init_source) with open(init_source, 'a') as outfile: - template = '/usr/share/openstack-pkg-tools/init-script-template' + template = ('/usr/share/openstack-pkg-tools/' + 'init-script-template') with open(template) as infile: outfile.write('\n\n{}'.format(infile.read())) @@ -1971,9 +1999,7 @@ def enable_memcache(source=None, release=None, package=None): if not _release: _release = get_os_codename_install_source(source) - # TODO: this should be changed to a numeric comparison using a known list - # of releases and comparing by index. - return _release >= 'mitaka' + return CompareOpenStackReleases(_release) >= 'mitaka' def token_cache_pkgs(source=None, release=None): diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index ae7f3f93..9417d684 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -987,18 +987,20 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, service_start(svc) -def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): +def ensure_ceph_keyring(service, user=None, group=None, + relation='ceph', key=None): """Ensures a ceph keyring is created for a named service and optionally ensures user and group ownership. - Returns False if no ceph key is available in relation state. + @returns boolean: Flag to indicate whether a key was successfully written + to disk based on either relation data or a supplied key """ - key = None - for rid in relation_ids(relation): - for unit in related_units(rid): - key = relation_get('key', rid=rid, unit=unit) - if key: - break + if not key: + for rid in relation_ids(relation): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break if not key: return False diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 05edfa50..0ee5cb9f 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -45,6 +45,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( @@ -52,6 +53,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import UPDATEDB_PATH = '/etc/updatedb.conf' diff --git a/ceph-proxy/hooks/charmhelpers/core/host_factory/centos.py b/ceph-proxy/hooks/charmhelpers/core/host_factory/centos.py index 902d469f..7781a396 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host_factory/centos.py +++ b/ceph-proxy/hooks/charmhelpers/core/host_factory/centos.py @@ -2,6 +2,22 @@ import yum import os +from charmhelpers.core.strutils import BasicStringComparator + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Host releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + + def __init__(self, item): + raise NotImplementedError( + "CompareHostReleases() is not implemented for CentOS") + def service_available(service_name): # """Determine whether a system service is available.""" diff --git a/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py index 8c66af55..0448288c 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,37 @@ import subprocess +from charmhelpers.core.strutils import BasicStringComparator + + +UBUNTU_RELEASES = ( + 'lucid', + 'maverick', + 'natty', + 'oneiric', + 'precise', + 'quantal', + 'raring', + 'saucy', + 'trusty', + 'utopic', + 'vivid', + 'wily', + 'xenial', + 'yakkety', + 'zesty', +) + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Ubuntu releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + _list = UBUNTU_RELEASES + def service_available(service_name): """Determine whether a system service is available""" diff --git a/ceph-proxy/hooks/charmhelpers/core/strutils.py b/ceph-proxy/hooks/charmhelpers/core/strutils.py index dd9b9717..685dabde 100644 --- a/ceph-proxy/hooks/charmhelpers/core/strutils.py +++ b/ceph-proxy/hooks/charmhelpers/core/strutils.py @@ -68,3 +68,56 @@ def bytes_from_string(value): msg = "Unable to interpret string value '%s' as bytes" % (value) raise ValueError(msg) return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + + +class BasicStringComparator(object): + """Provides a class that will compare strings from an iterator type object. + Used to provide > and < comparisons on strings that may not necessarily be + alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the + z-wrap. + """ + + _list = None + + def __init__(self, item): + if self._list is None: + raise Exception("Must define the _list in the class definition!") + try: + self.index = self._list.index(item) + except Exception: + raise KeyError("Item '{}' is not in list '{}'" + .format(item, self._list)) + + def __eq__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index == self._list.index(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index < self._list.index(other) + + def __ge__(self, other): + return not self.__lt__(other) + + def __gt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index > self._list.index(other) + + def __le__(self, other): + return not self.__gt__(other) + + def __str__(self): + """Always give back the item at the index so it can be used in + comparisons like: + + s_mitaka = CompareOpenStack('mitaka') + s_newton = CompareOpenstack('newton') + + assert s_newton > s_mitaka + + @returns: + """ + return self._list[self.index] diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index b61912a9..5b68a1e7 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -15,20 +15,22 @@ config, status_set, network_get_primary_address, - log, DEBUG, + log, + DEBUG, ) from charmhelpers.fetch import ( apt_install, - filter_installed_packages + filter_installed_packages, ) from charmhelpers.core.host import ( - lsb_release + lsb_release, + CompareHostReleases, ) from charmhelpers.contrib.network.ip import ( get_address_in_network, - get_ipv6_addr + get_ipv6_addr, ) try: @@ -141,6 +143,7 @@ def get_network_addrs(config_opt): def assert_charm_supports_ipv6(): """Check whether we are able to support charms ipv6.""" - if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) < "trusty": raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index 1f4cf42e..346e6fea 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -40,6 +40,7 @@ AmuletUtils ) from charmhelpers.core.decorators import retry_on_exception +from charmhelpers.core.host import CompareHostReleases DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -1255,7 +1256,7 @@ def test_110_memcache(self): contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', fatal=True) ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if ubuntu_release <= 'trusty': + if CompareHostReleases(ubuntu_release) <= 'trusty': memcache_listen_addr = 'ip6-localhost' else: memcache_listen_addr = '::1' diff --git a/ceph-proxy/tests/charmhelpers/core/host.py b/ceph-proxy/tests/charmhelpers/core/host.py index 05edfa50..0ee5cb9f 100644 --- a/ceph-proxy/tests/charmhelpers/core/host.py +++ b/ceph-proxy/tests/charmhelpers/core/host.py @@ -45,6 +45,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( @@ -52,6 +53,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import UPDATEDB_PATH = '/etc/updatedb.conf' diff --git a/ceph-proxy/tests/charmhelpers/core/host_factory/centos.py b/ceph-proxy/tests/charmhelpers/core/host_factory/centos.py index 902d469f..7781a396 100644 --- a/ceph-proxy/tests/charmhelpers/core/host_factory/centos.py +++ b/ceph-proxy/tests/charmhelpers/core/host_factory/centos.py @@ -2,6 +2,22 @@ import yum import os +from charmhelpers.core.strutils import BasicStringComparator + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Host releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + + def __init__(self, item): + raise NotImplementedError( + "CompareHostReleases() is not implemented for CentOS") + def service_available(service_name): # """Determine whether a system service is available.""" diff --git a/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py index 8c66af55..0448288c 100644 --- a/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,37 @@ import subprocess +from charmhelpers.core.strutils import BasicStringComparator + + +UBUNTU_RELEASES = ( + 'lucid', + 'maverick', + 'natty', + 'oneiric', + 'precise', + 'quantal', + 'raring', + 'saucy', + 'trusty', + 'utopic', + 'vivid', + 'wily', + 'xenial', + 'yakkety', + 'zesty', +) + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Ubuntu releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + _list = UBUNTU_RELEASES + def service_available(service_name): """Determine whether a system service is available""" diff --git a/ceph-proxy/tests/charmhelpers/core/strutils.py b/ceph-proxy/tests/charmhelpers/core/strutils.py index dd9b9717..685dabde 100644 --- a/ceph-proxy/tests/charmhelpers/core/strutils.py +++ b/ceph-proxy/tests/charmhelpers/core/strutils.py @@ -68,3 +68,56 @@ def bytes_from_string(value): msg = "Unable to interpret string value '%s' as bytes" % (value) raise ValueError(msg) return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + + +class BasicStringComparator(object): + """Provides a class that will compare strings from an iterator type object. + Used to provide > and < comparisons on strings that may not necessarily be + alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the + z-wrap. + """ + + _list = None + + def __init__(self, item): + if self._list is None: + raise Exception("Must define the _list in the class definition!") + try: + self.index = self._list.index(item) + except Exception: + raise KeyError("Item '{}' is not in list '{}'" + .format(item, self._list)) + + def __eq__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index == self._list.index(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index < self._list.index(other) + + def __ge__(self, other): + return not self.__lt__(other) + + def __gt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index > self._list.index(other) + + def __le__(self, other): + return not self.__gt__(other) + + def __str__(self): + """Always give back the item at the index so it can be used in + comparisons like: + + s_mitaka = CompareOpenStack('mitaka') + s_newton = CompareOpenstack('newton') + + assert s_newton > s_mitaka + + @returns: + """ + return self._list[self.index] diff --git a/ceph-proxy/tests/charmhelpers/osplatform.py b/ceph-proxy/tests/charmhelpers/osplatform.py new file mode 100644 index 00000000..d9a4d5c0 --- /dev/null +++ b/ceph-proxy/tests/charmhelpers/osplatform.py @@ -0,0 +1,25 @@ +import platform + + +def get_platform(): + """Return the current OS platform. + + For example: if current os platform is Ubuntu then a string "ubuntu" + will be returned (which is the name of the module). + This string is used to decide which platform module should be imported. + """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warings *not* disabled, as we certainly need to fix this. + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + if "Ubuntu" in current_platform: + return "ubuntu" + elif "CentOS" in current_platform: + return "centos" + elif "debian" in current_platform: + # Stock Python does not detect Ubuntu and instead returns debian. + # Or at least it does in some build environments like Travis CI + return "ubuntu" + else: + raise RuntimeError("This module is not supported on {}." + .format(current_platform)) diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index d8d8d038..1610be31 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -14,7 +14,7 @@ install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} whitelist_externals = juju -passenv = HOME TERM AMULET_* +passenv = HOME TERM AMULET_* CS_API_* [testenv:py27] basepython = python2.7 From 82931d844f43fc8b9c91be59896690506deef375 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 29 Mar 2017 15:20:40 +0100 Subject: [PATCH 1298/2699] Fix alphanumeric comparisons for openstack and ubuntu releases - sync charmhelpers with fix-alpha helpers - fix up code where the alpha comparisons are done Change-Id: Iacbb082b9db0cff48ddd595cc1eaa8440aa16892 Related-Bug: #1659575 --- ceph-radosgw/charm-helpers-tests.yaml | 2 +- .../contrib/hardening/apache/checks/config.py | 10 ++-- .../{hardening.conf => 99-hardening.conf} | 20 +++++-- .../contrib/hardening/audits/__init__.py | 13 ++--- .../contrib/hardening/defaults/apache.yaml | 5 +- .../hardening/defaults/apache.yaml.schema | 3 ++ .../contrib/hardening/defaults/os.yaml | 1 + .../contrib/hardening/defaults/os.yaml.schema | 1 + .../contrib/hardening/host/checks/profile.py | 10 +++- .../hardening/host/templates/99-hardening.sh | 5 ++ .../contrib/hardening/ssh/checks/config.py | 20 ++++--- .../hooks/charmhelpers/contrib/network/ip.py | 54 ++++++++++++------- .../contrib/openstack/amulet/utils.py | 3 +- .../charmhelpers/contrib/openstack/context.py | 35 ++++++------ .../charmhelpers/contrib/openstack/neutron.py | 19 ++++--- .../charmhelpers/contrib/openstack/utils.py | 40 +++++++++++--- .../contrib/storage/linux/ceph.py | 18 ++++--- ceph-radosgw/hooks/charmhelpers/core/host.py | 2 + .../charmhelpers/core/host_factory/centos.py | 16 ++++++ .../charmhelpers/core/host_factory/ubuntu.py | 32 +++++++++++ .../hooks/charmhelpers/core/strutils.py | 53 ++++++++++++++++++ ceph-radosgw/hooks/utils.py | 3 +- .../contrib/openstack/amulet/utils.py | 3 +- ceph-radosgw/tests/charmhelpers/core/host.py | 2 + .../charmhelpers/core/host_factory/centos.py | 16 ++++++ .../charmhelpers/core/host_factory/ubuntu.py | 32 +++++++++++ .../tests/charmhelpers/core/strutils.py | 53 ++++++++++++++++++ ceph-radosgw/tests/charmhelpers/osplatform.py | 25 +++++++++ ceph-radosgw/tox.ini | 2 +- 29 files changed, 412 insertions(+), 86 deletions(-) rename ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/{hardening.conf => 99-hardening.conf} (56%) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh create mode 100644 ceph-radosgw/tests/charmhelpers/osplatform.py diff --git a/ceph-radosgw/charm-helpers-tests.yaml b/ceph-radosgw/charm-helpers-tests.yaml index 87c812fc..7e150c19 100644 --- a/ceph-radosgw/charm-helpers-tests.yaml +++ b/ceph-radosgw/charm-helpers-tests.yaml @@ -4,4 +4,4 @@ include: - core - contrib.amulet - contrib.openstack.amulet - - core + - osplatform diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 51b636f7..b18b263d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -26,6 +26,7 @@ DirectoryPermissionAudit, NoReadWriteForOther, TemplatedFile, + DeletedFile ) from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR @@ -52,13 +53,13 @@ def get_audits(): 'mods-available/alias.conf'), context, TEMPLATES_DIR, - mode=0o0755, + mode=0o0640, user='root', service_actions=[{'service': 'apache2', 'actions': ['restart']}]), TemplatedFile(os.path.join(settings['common']['apache_dir'], - 'conf-enabled/hardening.conf'), + 'conf-enabled/99-hardening.conf'), context, TEMPLATES_DIR, mode=0o0640, @@ -69,11 +70,13 @@ def get_audits(): DirectoryPermissionAudit(settings['common']['apache_dir'], user='root', group='root', - mode=0o640), + mode=0o0750), DisabledModuleAudit(settings['hardening']['modules_to_disable']), NoReadWriteForOther(settings['common']['apache_dir']), + + DeletedFile(['/var/www/html/index.html']) ] return audits @@ -94,5 +97,4 @@ def __call__(self): ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', out).group(1) ctxt['apache_icondir'] = '/usr/share/apache2/icons/' - ctxt['traceenable'] = settings['hardening']['traceenable'] return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf similarity index 56% rename from ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf rename to ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf index 07945418..22b68041 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf @@ -4,15 +4,29 @@ ############################################################################### - + # http://httpd.apache.org/docs/2.4/upgrading.html {% if apache_version > '2.2' -%} Require all granted {% else -%} - Order Allow,Deny - Deny from all + Order Allow,Deny + Deny from all {% endif %} + + Options -Indexes -FollowSymLinks + AllowOverride None + + + + Options -Indexes -FollowSymLinks + AllowOverride None + + TraceEnable {{ traceenable }} +ServerTokens {{ servertokens }} + +SSLHonorCipherOrder {{ honor_cipher_order }} +SSLCipherSuite {{ cipher_suite }} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/__init__.py index 9bf9c3c6..6dd5b05f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/__init__.py @@ -49,13 +49,6 @@ def _take_action(self): # Invoke the callback if there is one. if hasattr(self.unless, '__call__'): - results = self.unless() - if results: - return False - else: - return True - - if self.unless: - return False - else: - return True + return not self.unless() + + return not self.unless diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml index e5ada29f..0f940d4c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml @@ -10,4 +10,7 @@ common: hardening: traceenable: 'off' allowed_http_methods: "GET POST" - modules_to_disable: [ cgi, cgid ] \ No newline at end of file + modules_to_disable: [ cgi, cgid ] + servertokens: 'Prod' + honor_cipher_order: 'on' + cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES' diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema index 227589b5..c112137c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema @@ -7,3 +7,6 @@ common: hardening: allowed_http_methods: modules_to_disable: + servertokens: + honor_cipher_order: + cipher_suite: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml index ddd4286c..9a8627b5 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml @@ -58,6 +58,7 @@ security: rsync kernel_enable_module_loading: True # (type:boolean) kernel_enable_core_dump: False # (type:boolean) + ssh_tmout: 300 sysctl: kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128 diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema index 88b3966e..cc3b9c20 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema @@ -34,6 +34,7 @@ security: packages_list: kernel_enable_module_loading: kernel_enable_core_dump: + ssh_tmout: sysctl: kernel_secure_sysrq: kernel_enable_sysrq: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/profile.py index 56d65263..2727428d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/profile.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/profile.py @@ -25,7 +25,6 @@ def get_audits(): audits = [] settings = utils.get_settings('os') - # If core dumps are not enabled, then don't allow core dumps to be # created as they may contain sensitive information. if not settings['security']['kernel_enable_core_dump']: @@ -33,11 +32,18 @@ def get_audits(): ProfileContext(), template_dir=TEMPLATES_DIR, mode=0o0755, user='root', group='root')) + if settings['security']['ssh_tmout']: + audits.append(TemplatedFile('/etc/profile.d/99-hardening.sh', + ProfileContext(), + template_dir=TEMPLATES_DIR, + mode=0o0644, user='root', group='root')) return audits class ProfileContext(object): def __call__(self): - ctxt = {} + settings = utils.get_settings('os') + ctxt = {'ssh_tmout': + settings['security']['ssh_tmout']} return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh new file mode 100644 index 00000000..616cef46 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh @@ -0,0 +1,5 @@ +TMOUT={{ tmout }} +readonly TMOUT +export TMOUT + +readonly HISTFILE diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py index f3cac6d9..41bed2d1 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -27,7 +27,10 @@ apt_install, apt_update, ) -from charmhelpers.core.host import lsb_release +from charmhelpers.core.host import ( + lsb_release, + CompareHostReleases, +) from charmhelpers.contrib.hardening.audits.file import ( TemplatedFile, FileContentAudit, @@ -68,7 +71,8 @@ def get_macs(self, allow_weak_mac): 'weak': default + ',hmac-sha1'} # Use newer ciphers on Ubuntu Trusty and above - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG) macs = macs_66 @@ -96,7 +100,8 @@ def get_kexs(self, allow_weak_kex): 'weak': weak} # Use newer kex on Ubuntu Trusty and above - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': log('Detected Ubuntu 14.04 or newer, using new key exchange ' 'algorithms', level=DEBUG) kex = kex_66 @@ -119,7 +124,8 @@ def get_ciphers(self, cbc_required): 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'} # Use newer ciphers on ubuntu Trusty and above - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': log('Detected Ubuntu 14.04 or newer, using new ciphers', level=DEBUG) cipher = ciphers_66 @@ -291,7 +297,8 @@ def is_compliant(self, *args, **kwargs): self.fail_cases = [] settings = utils.get_settings('ssh') - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': if not settings['server']['weak_hmac']: self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') else: @@ -364,7 +371,8 @@ def is_compliant(self, *args, **kwargs): self.fail_cases = [] settings = utils.get_settings('ssh') - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': if not settings['server']['weak_hmac']: self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') else: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index 54c76a72..7451af9c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -31,6 +31,7 @@ from charmhelpers.core.host import ( lsb_release, + CompareHostReleases, ) try: @@ -67,6 +68,24 @@ def no_ip_found_error_out(network): raise ValueError(errmsg) +def _get_ipv6_network_from_address(address): + """Get an netaddr.IPNetwork for the given IPv6 address + :param address: a dict as returned by netifaces.ifaddresses + :returns netaddr.IPNetwork: None if the address is a link local or loopback + address + """ + if address['addr'].startswith('fe80') or address['addr'] == "::1": + return None + + prefix = address['netmask'].split("/") + if len(prefix) > 1: + netmask = prefix[1] + else: + netmask = address['netmask'] + return netaddr.IPNetwork("%s/%s" % (address['addr'], + netmask)) + + def get_address_in_network(network, fallback=None, fatal=False): """Get an IPv4 or IPv6 address within the network from the host. @@ -100,11 +119,9 @@ def get_address_in_network(network, fallback=None, fatal=False): if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - if cidr in network: - return str(cidr.ip) + cidr = _get_ipv6_network_from_address(addr) + if cidr and cidr in network: + return str(cidr.ip) if fallback is not None: return fallback @@ -180,18 +197,18 @@ def _get_for_address(address, key): if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - network = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - cidr = network.cidr - if address in cidr: - if key == 'iface': - return iface - elif key == 'netmask' and cidr: - return str(cidr).split('/')[1] - else: - return addr[key] - + network = _get_ipv6_network_from_address(addr) + if not network: + continue + + cidr = network.cidr + if address in cidr: + if key == 'iface': + return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] + else: + return addr[key] return None @@ -521,7 +538,8 @@ def port_has_listener(address, port): def assert_charm_supports_ipv6(): """Check whether we are able to support charms ipv6.""" - if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(release) < "trusty": raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 1f4cf42e..346e6fea 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -40,6 +40,7 @@ AmuletUtils ) from charmhelpers.core.decorators import retry_on_exception +from charmhelpers.core.host import CompareHostReleases DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -1255,7 +1256,7 @@ def test_110_memcache(self): contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', fatal=True) ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if ubuntu_release <= 'trusty': + if CompareHostReleases(ubuntu_release) <= 'trusty': memcache_listen_addr = 'ip6-localhost' else: memcache_listen_addr = '::1' diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 6cdbbbbf..7876145d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -59,6 +59,7 @@ write_file, pwgen, lsb_release, + CompareHostReleases, ) from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, @@ -155,7 +156,8 @@ def context_complete(self, ctxt): if self.missing_data: self.complete = False - log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO) + log('Missing required data: %s' % ' '.join(self.missing_data), + level=INFO) else: self.complete = True return self.complete @@ -213,8 +215,9 @@ def __call__(self): hostname_key = "{}_hostname".format(self.relation_prefix) else: hostname_key = "hostname" - access_hostname = get_address_in_network(access_network, - unit_get('private-address')) + access_hostname = get_address_in_network( + access_network, + unit_get('private-address')) set_hostname = relation_get(attribute=hostname_key, unit=local_unit()) if set_hostname != access_hostname: @@ -308,7 +311,10 @@ def db_ssl(rdata, ctxt, ssl_dir): class IdentityServiceContext(OSContextGenerator): - def __init__(self, service=None, service_user=None, rel_name='identity-service'): + def __init__(self, + service=None, + service_user=None, + rel_name='identity-service'): self.service = service self.service_user = service_user self.rel_name = rel_name @@ -457,19 +463,17 @@ def __call__(self): host = format_ipv6_addr(host) or host rabbitmq_hosts.append(host) - ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + rabbitmq_hosts = sorted(rabbitmq_hosts) + ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) transport_hosts = rabbitmq_hosts if transport_hosts: - transport_url_hosts = '' - for host in transport_hosts: - if transport_url_hosts: - format_string = ",{}:{}@{}:{}" - else: - format_string = "{}:{}@{}:{}" - transport_url_hosts += format_string.format( - ctxt['rabbitmq_user'], ctxt['rabbitmq_password'], - host, rabbitmq_port) + transport_url_hosts = ','.join([ + "{}:{}@{}:{}".format(ctxt['rabbitmq_user'], + ctxt['rabbitmq_password'], + host_, + rabbitmq_port) + for host_ in transport_hosts]) ctxt['transport_url'] = "rabbit://{}/{}".format( transport_url_hosts, vhost) @@ -1601,7 +1605,8 @@ def __call__(self): if ctxt['use_memcache']: # Trusty version of memcached does not support ::1 as a listen # address so use host file entry instead - if lsb_release()['DISTRIB_CODENAME'].lower() > 'trusty': + release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(release) > 'trusty': ctxt['memcache_server'] = '::1' else: ctxt['memcache_server'] = 'ip6-localhost' diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index a8f1ed72..37fa0eb0 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -23,7 +23,10 @@ ERROR, ) -from charmhelpers.contrib.openstack.utils import os_release +from charmhelpers.contrib.openstack.utils import ( + os_release, + CompareOpenStackReleases, +) def headers_package(): @@ -198,7 +201,8 @@ def neutron_plugins(): }, 'plumgrid': { 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', - 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', + 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin' + '.plumgrid_plugin.NeutronPluginPLUMgridV2'), 'contexts': [ context.SharedDBContext(user=config('database-user'), database=config('database'), @@ -225,7 +229,7 @@ def neutron_plugins(): 'server_services': ['neutron-server'] } } - if release >= 'icehouse': + if CompareOpenStackReleases(release) >= 'icehouse': # NOTE: patch in ml2 plugin for icehouse onwards plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' @@ -233,10 +237,10 @@ def neutron_plugins(): 'neutron-plugin-ml2'] # NOTE: patch in vmware renames nvp->nsx for icehouse onwards plugins['nvp'] = plugins['nsx'] - if release >= 'kilo': + if CompareOpenStackReleases(release) >= 'kilo': plugins['midonet']['driver'] = ( 'neutron.plugins.midonet.plugin.MidonetPluginV2') - if release >= 'liberty': + if CompareOpenStackReleases(release) >= 'liberty': plugins['midonet']['driver'] = ( 'midonet.neutron.plugin_v1.MidonetPluginV2') plugins['midonet']['server_packages'].remove( @@ -244,10 +248,11 @@ def neutron_plugins(): plugins['midonet']['server_packages'].append( 'python-networking-midonet') plugins['plumgrid']['driver'] = ( - 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2') + 'networking_plumgrid.neutron.plugins' + '.plugin.NeutronPluginPLUMgridV2') plugins['plumgrid']['server_packages'].remove( 'neutron-plugin-plumgrid') - if release >= 'mitaka': + if CompareOpenStackReleases(release) >= 'mitaka': plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') plugins['nsx']['server_packages'].append('python-vmware-nsx') plugins['nsx']['config'] = '/etc/neutron/nsx.ini' diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 7e8ecff4..e13450c1 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -33,9 +33,7 @@ from charmhelpers.contrib.network import ip -from charmhelpers.core import ( - unitdata, -) +from charmhelpers.core import unitdata from charmhelpers.core.hookenv import ( action_fail, @@ -55,6 +53,8 @@ application_version_set, ) +from charmhelpers.core.strutils import BasicStringComparator + from charmhelpers.contrib.storage.linux.lvm import ( deactivate_lvm_volume_group, is_lvm_physical_volume, @@ -97,6 +97,22 @@ DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 'restricted main multiverse universe') +OPENSTACK_RELEASES = ( + 'diablo', + 'essex', + 'folsom', + 'grizzly', + 'havana', + 'icehouse', + 'juno', + 'kilo', + 'liberty', + 'mitaka', + 'newton', + 'ocata', + 'pike', +) + UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('oneiric', 'diablo'), ('precise', 'essex'), @@ -238,6 +254,17 @@ DEFAULT_LOOPBACK_SIZE = '5G' +class CompareOpenStackReleases(BasicStringComparator): + """Provide comparisons of OpenStack releases. + + Use in the form of + + if CompareOpenStackReleases(release) > 'mitaka': + # do something with mitaka + """ + _list = OPENSTACK_RELEASES + + def error_out(msg): juju_log("FATAL ERROR: %s" % msg, level='ERROR') sys.exit(1) @@ -1066,7 +1093,8 @@ def git_generate_systemd_init_files(templates_dir): shutil.copyfile(init_in_source, init_source) with open(init_source, 'a') as outfile: - template = '/usr/share/openstack-pkg-tools/init-script-template' + template = ('/usr/share/openstack-pkg-tools/' + 'init-script-template') with open(template) as infile: outfile.write('\n\n{}'.format(infile.read())) @@ -1971,9 +1999,7 @@ def enable_memcache(source=None, release=None, package=None): if not _release: _release = get_os_codename_install_source(source) - # TODO: this should be changed to a numeric comparison using a known list - # of releases and comparing by index. - return _release >= 'mitaka' + return CompareOpenStackReleases(_release) >= 'mitaka' def token_cache_pkgs(source=None, release=None): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index ae7f3f93..9417d684 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -987,18 +987,20 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, service_start(svc) -def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): +def ensure_ceph_keyring(service, user=None, group=None, + relation='ceph', key=None): """Ensures a ceph keyring is created for a named service and optionally ensures user and group ownership. - Returns False if no ceph key is available in relation state. + @returns boolean: Flag to indicate whether a key was successfully written + to disk based on either relation data or a supplied key """ - key = None - for rid in relation_ids(relation): - for unit in related_units(rid): - key = relation_get('key', rid=rid, unit=unit) - if key: - break + if not key: + for rid in relation_ids(relation): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break if not key: return False diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 05edfa50..0ee5cb9f 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -45,6 +45,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( @@ -52,6 +53,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import UPDATEDB_PATH = '/etc/updatedb.conf' diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/centos.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/centos.py index 902d469f..7781a396 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/centos.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/centos.py @@ -2,6 +2,22 @@ import yum import os +from charmhelpers.core.strutils import BasicStringComparator + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Host releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + + def __init__(self, item): + raise NotImplementedError( + "CompareHostReleases() is not implemented for CentOS") + def service_available(service_name): # """Determine whether a system service is available.""" diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index 8c66af55..0448288c 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,37 @@ import subprocess +from charmhelpers.core.strutils import BasicStringComparator + + +UBUNTU_RELEASES = ( + 'lucid', + 'maverick', + 'natty', + 'oneiric', + 'precise', + 'quantal', + 'raring', + 'saucy', + 'trusty', + 'utopic', + 'vivid', + 'wily', + 'xenial', + 'yakkety', + 'zesty', +) + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Ubuntu releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + _list = UBUNTU_RELEASES + def service_available(service_name): """Determine whether a system service is available""" diff --git a/ceph-radosgw/hooks/charmhelpers/core/strutils.py b/ceph-radosgw/hooks/charmhelpers/core/strutils.py index dd9b9717..685dabde 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/strutils.py +++ b/ceph-radosgw/hooks/charmhelpers/core/strutils.py @@ -68,3 +68,56 @@ def bytes_from_string(value): msg = "Unable to interpret string value '%s' as bytes" % (value) raise ValueError(msg) return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + + +class BasicStringComparator(object): + """Provides a class that will compare strings from an iterator type object. + Used to provide > and < comparisons on strings that may not necessarily be + alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the + z-wrap. + """ + + _list = None + + def __init__(self, item): + if self._list is None: + raise Exception("Must define the _list in the class definition!") + try: + self.index = self._list.index(item) + except Exception: + raise KeyError("Item '{}' is not in list '{}'" + .format(item, self._list)) + + def __eq__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index == self._list.index(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index < self._list.index(other) + + def __ge__(self, other): + return not self.__lt__(other) + + def __gt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index > self._list.index(other) + + def __le__(self, other): + return not self.__gt__(other) + + def __str__(self): + """Always give back the item at the index so it can be used in + comparisons like: + + s_mitaka = CompareOpenStack('mitaka') + s_newton = CompareOpenstack('newton') + + assert s_newton > s_mitaka + + @returns: + """ + return self._list[self.index] diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 6d02a5d4..afddf8c7 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -48,6 +48,7 @@ cmp_pkgrevno, lsb_release, mkdir, + CompareHostReleases, ) from charmhelpers.fetch import ( apt_cache, @@ -203,7 +204,7 @@ def check_optional_relations(configs): def setup_ipv6(): ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower() - if ubuntu_rel < "trusty": + if CompareHostReleases(ubuntu_rel) < "trusty": raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 1f4cf42e..346e6fea 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -40,6 +40,7 @@ AmuletUtils ) from charmhelpers.core.decorators import retry_on_exception +from charmhelpers.core.host import CompareHostReleases DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -1255,7 +1256,7 @@ def test_110_memcache(self): contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', fatal=True) ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if ubuntu_release <= 'trusty': + if CompareHostReleases(ubuntu_release) <= 'trusty': memcache_listen_addr = 'ip6-localhost' else: memcache_listen_addr = '::1' diff --git a/ceph-radosgw/tests/charmhelpers/core/host.py b/ceph-radosgw/tests/charmhelpers/core/host.py index 05edfa50..0ee5cb9f 100644 --- a/ceph-radosgw/tests/charmhelpers/core/host.py +++ b/ceph-radosgw/tests/charmhelpers/core/host.py @@ -45,6 +45,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( @@ -52,6 +53,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import UPDATEDB_PATH = '/etc/updatedb.conf' diff --git a/ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py b/ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py index 902d469f..7781a396 100644 --- a/ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py +++ b/ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py @@ -2,6 +2,22 @@ import yum import os +from charmhelpers.core.strutils import BasicStringComparator + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Host releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + + def __init__(self, item): + raise NotImplementedError( + "CompareHostReleases() is not implemented for CentOS") + def service_available(service_name): # """Determine whether a system service is available.""" diff --git a/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py index 8c66af55..0448288c 100644 --- a/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,37 @@ import subprocess +from charmhelpers.core.strutils import BasicStringComparator + + +UBUNTU_RELEASES = ( + 'lucid', + 'maverick', + 'natty', + 'oneiric', + 'precise', + 'quantal', + 'raring', + 'saucy', + 'trusty', + 'utopic', + 'vivid', + 'wily', + 'xenial', + 'yakkety', + 'zesty', +) + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Ubuntu releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + _list = UBUNTU_RELEASES + def service_available(service_name): """Determine whether a system service is available""" diff --git a/ceph-radosgw/tests/charmhelpers/core/strutils.py b/ceph-radosgw/tests/charmhelpers/core/strutils.py index dd9b9717..685dabde 100644 --- a/ceph-radosgw/tests/charmhelpers/core/strutils.py +++ b/ceph-radosgw/tests/charmhelpers/core/strutils.py @@ -68,3 +68,56 @@ def bytes_from_string(value): msg = "Unable to interpret string value '%s' as bytes" % (value) raise ValueError(msg) return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + + +class BasicStringComparator(object): + """Provides a class that will compare strings from an iterator type object. + Used to provide > and < comparisons on strings that may not necessarily be + alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the + z-wrap. + """ + + _list = None + + def __init__(self, item): + if self._list is None: + raise Exception("Must define the _list in the class definition!") + try: + self.index = self._list.index(item) + except Exception: + raise KeyError("Item '{}' is not in list '{}'" + .format(item, self._list)) + + def __eq__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index == self._list.index(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index < self._list.index(other) + + def __ge__(self, other): + return not self.__lt__(other) + + def __gt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index > self._list.index(other) + + def __le__(self, other): + return not self.__gt__(other) + + def __str__(self): + """Always give back the item at the index so it can be used in + comparisons like: + + s_mitaka = CompareOpenStack('mitaka') + s_newton = CompareOpenstack('newton') + + assert s_newton > s_mitaka + + @returns: + """ + return self._list[self.index] diff --git a/ceph-radosgw/tests/charmhelpers/osplatform.py b/ceph-radosgw/tests/charmhelpers/osplatform.py new file mode 100644 index 00000000..d9a4d5c0 --- /dev/null +++ b/ceph-radosgw/tests/charmhelpers/osplatform.py @@ -0,0 +1,25 @@ +import platform + + +def get_platform(): + """Return the current OS platform. + + For example: if current os platform is Ubuntu then a string "ubuntu" + will be returned (which is the name of the module). + This string is used to decide which platform module should be imported. + """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warings *not* disabled, as we certainly need to fix this. + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + if "Ubuntu" in current_platform: + return "ubuntu" + elif "CentOS" in current_platform: + return "centos" + elif "debian" in current_platform: + # Stock Python does not detect Ubuntu and instead returns debian. + # Or at least it does in some build environments like Travis CI + return "ubuntu" + else: + raise RuntimeError("This module is not supported on {}." + .format(current_platform)) diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 6f1aeace..7c2936e3 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -14,7 +14,7 @@ install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} whitelist_externals = juju -passenv = HOME TERM AMULET_* CS_API_URL +passenv = HOME TERM AMULET_* CS_API_* [testenv:py27] basepython = python2.7 From 9924a597d5f92be0484694076a4a23cae3078dfa Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Mon, 6 Mar 2017 13:59:50 -0800 Subject: [PATCH 1299/2699] Import Existing Clusters Adding a config.yaml variable to allow existing ceph monitor clusters to be imported. This patch will use the config variable first and the get_mon_hosts function second in order to determine which mons are in the cluster. Change-Id: Ia06c509edb18615a9582f0184b03196a04ce9a0f --- ceph-mon/README.md | 2 ++ ceph-mon/config.yaml | 7 +++++++ ceph-mon/hooks/ceph_hooks.py | 4 ++-- ceph-mon/unit_tests/test_ceph_hooks.py | 1 + 4 files changed, 12 insertions(+), 2 deletions(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index d17c44b6..f92a039d 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -114,6 +114,8 @@ Please refer to the [Ceph Network Reference](http://docs.ceph.com/docs/master/ra **NOTE**: Existing deployments using ceph-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. +**NOTE**: The mon-hosts field is only used to migrate existing clusters to a juju managed solution and should be left blank otherwise. + # Contact Information ## Authors diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 1d76b2f5..473aa50f 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -56,6 +56,13 @@ options: How many nodes to wait for before trying to create the monitor cluster this number needs to be odd, and more than three is a waste except for very large clusters. + mon-hosts: + type: string + default: + description: | + A comma separated list of ceph mon hosts to use. This field is only + used to migrate an existing cluster to a juju managed solution + and should be left blank otherwise. expected-osd-count: type: int default: 0 diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 2c975429..321b0ffc 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -137,7 +137,7 @@ def get_ceph_context(): cephcontext = { 'auth_supported': config('auth-supported'), - 'mon_hosts': ' '.join(get_mon_hosts()), + 'mon_hosts': config('mon-hosts') or ' '.join(get_mon_hosts()), 'fsid': leader_get('fsid'), 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'use_syslog': str(config('use-syslog')).lower(), @@ -468,7 +468,7 @@ def admin_relation_joined(relid=None): data = {'key': ceph.get_named_key(name=name, caps=ceph.admin_caps), 'fsid': leader_get('fsid'), 'auth': config('auth-supported'), - 'mon_hosts': " ".join(get_mon_hosts()) + 'mon_hosts': config('mon-hosts') or " ".join(get_mon_hosts()) } relation_set(relation_id=relid, relation_settings=data) diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 2d5a4b33..5decb5ac 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -22,6 +22,7 @@ 'osd-journal-size': 1024, 'use-direct-io': True, 'osd-format': 'ext4', + 'mon-hosts': '', 'prefer-ipv6': False} From bd4b72e49237d5b534da244307b65a6a422564c6 Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Fri, 17 Feb 2017 14:25:47 -0300 Subject: [PATCH 1300/2699] Set ceph-public-address with monitor-hosts config charms capable of relating to ceph build the list of monitor hosts from the ceph-public-address in the relation data or simply using the private-address field provided by juju. This patch populates ceph-public-address in the relations with the list of monitor hosts provided in the configuration, so clients connect directly to the non juju managed ceph and not to ceph-proxy IP. Closes-Bug: #1642430 Change-Id: Ib2e3c6a8b0d9fc0b6d729d845a802945b64fc74c --- ceph-proxy/hooks/ceph_hooks.py | 26 ++--- ceph-proxy/unit_tests/test_ceph_hooks.py | 132 +++++++++++++++++++++++ 2 files changed, 146 insertions(+), 12 deletions(-) create mode 100644 ceph-proxy/unit_tests/test_ceph_hooks.py diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 74588875..960eeba2 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -18,6 +18,7 @@ log, DEBUG, config, + is_leader, relation_ids, related_units, relation_get, @@ -44,11 +45,7 @@ process_requests ) -from utils import ( - get_public_addr, - get_unit_hostname, -) - +from utils import get_unit_hostname from charmhelpers.contrib.hardening.harden import harden hooks = Hooks() @@ -137,12 +134,12 @@ def radosgw_relation(relid=None, unit=None): if ready(): log('mon cluster in quorum and osds related ' '- providing radosgw with keys') - public_addr = get_public_addr() + ceph_addrs = config('monitor-hosts') data = { 'fsid': config('fsid'), 'radosgw_key': ceph.get_radosgw_key(), - 'auth': 'cephx', - 'ceph-public-address': public_addr, + 'auth': config('auth-supported'), + 'ceph-public-address': ceph_addrs, } settings = relation_get(rid=relid, unit=unit) @@ -153,6 +150,7 @@ def radosgw_relation(relid=None, unit=None): unit_response_key = 'broker-rsp-' + unit_id data[unit_response_key] = rsp + log('relation_set (%s): %s' % (relid, str(data)), level=DEBUG) relation_set(relation_id=relid, relation_settings=data) else: log('FSID or admin key not provided, please configure them') @@ -171,10 +169,12 @@ def client_relation_joined(relid=None): service_name = units[0].split('/')[0] if service_name is not None: - public_addr = get_public_addr() + ceph_addrs = config('monitor-hosts') data = {'key': ceph.get_named_key(service_name), - 'auth': 'cephx', - 'ceph-public-address': public_addr} + 'auth': config('auth-supported'), + 'ceph-public-address': ceph_addrs} + + log('relation_set (%s): %s' % (relid, str(data)), level=DEBUG) relation_set(relation_id=relid, relation_settings=data) else: @@ -187,7 +187,8 @@ def client_relation_changed(): if ready(): settings = relation_get() if 'broker_req' in settings: - if not ceph.is_leader(): + # the request is processed only by the leader as reported by juju + if not is_leader(): log("Not leader - ignoring broker request", level=DEBUG) else: rsp = process_requests(settings['broker_req']) @@ -199,6 +200,7 @@ def client_relation_changed(): 'broker_rsp': rsp, unit_response_key: rsp, } + log('relation_set: %s' % str(data), level=DEBUG) relation_set(relation_settings=data) else: log('FSID or admin key not provided, please configure them') diff --git a/ceph-proxy/unit_tests/test_ceph_hooks.py b/ceph-proxy/unit_tests/test_ceph_hooks.py new file mode 100644 index 00000000..802fce98 --- /dev/null +++ b/ceph-proxy/unit_tests/test_ceph_hooks.py @@ -0,0 +1,132 @@ +import mock +import sys + +# python-apt is not installed as part of test-requirements but is imported by +# some charmhelpers modules so create a fake import. +mock_apt = mock.MagicMock() +sys.modules['apt'] = mock_apt +mock_apt.apt_pkg = mock.MagicMock() + +mock_apt_pkg = mock.MagicMock() +sys.modules['apt_pkg'] = mock_apt_pkg +mock_apt_pkg.upstream_version = mock.MagicMock() +mock_apt_pkg.upstream_version.return_value = '10.1.2-0ubuntu1' + +import test_utils +import ceph_hooks as hooks + +CEPH_KEY = 'AQDmP6dYWto6AhAAPKMkuvdFZYPRaiboU27IsA==' +CEPH_GET_KEY = """[client.admin] + key = %s + caps mds = "allow *" + caps mon = "allow *" + caps osd = "allow *" +""" % CEPH_KEY + +TO_PATCH = [ + 'config', + 'install_alternative', + 'mkdir', + 'related_units', + 'relation_get', + 'relation_ids', + 'relation_set', + 'remote_unit', + 'render', + 'service_name', + 'log' +] + + +def fake_log(message, level=None): + print("juju-log %s: %s" % (level, message)) + + +class TestHooks(test_utils.CharmTestCase): + def setUp(self): + super(TestHooks, self).setUp(hooks, TO_PATCH) + self.service_name.return_value = 'ceph-service' + self.config.side_effect = lambda x: self.test_config.get(x) + self.remote_unit.return_value = 'client/0' + self.log.side_effect = fake_log + + @mock.patch('subprocess.check_output') + def test_radosgw_realtion(self, mock_check_output): + + settings = {'ceph-public-address': '127.0.0.1:1234 [::1]:4321', + 'radosgw_key': CEPH_KEY, + 'auth': 'cephx', + 'fsid': 'some-fsid'} + + mock_check_output.return_value = CEPH_GET_KEY + self.relation_get.return_value = {} + self.test_config.set('monitor-hosts', settings['ceph-public-address']) + self.test_config.set('fsid', settings['fsid']) + self.test_config.set('admin-key', 'some-admin-key') + hooks.radosgw_relation() + self.relation_set.assert_called_with(relation_id=None, + relation_settings=settings) + + @mock.patch('ceph.ceph_user') + @mock.patch.object(hooks, 'radosgw_relation') + @mock.patch.object(hooks, 'client_relation_joined') + def test_emit_cephconf(self, mock_client_rel, mock_rgw_rel, + mock_ceph_user): + mock_ceph_user.return_value = 'ceph-user' + self.test_config.set('monitor-hosts', '127.0.0.1:1234') + self.test_config.set('fsid', 'abc123') + self.test_config.set('admin-key', 'key123') + + def c(k): + x = {'radosgw': ['rados:1'], + 'client': ['client:1'], + 'rados:1': ['rados/1']} + return x[k] + + self.relation_ids.side_effect = c + self.related_units.side_effect = c + + hooks.emit_cephconf() + + context = {'auth_supported': self.test_config.get('auth-supported'), + 'mon_hosts': self.test_config.get('monitor-hosts'), + 'fsid': self.test_config.get('fsid'), + 'use_syslog': str(self.test_config.get( + 'use-syslog')).lower(), + 'loglevel': self.test_config.get('loglevel')} + + dirname = '/var/lib/charm/ceph-service' + self.mkdir.assert_called_with(dirname, owner='ceph-user', + group='ceph-user') + self.render.assert_any_call('ceph.conf', + '%s/ceph.conf' % dirname, + context, perms=0o644) + self.install_alternative.assert_called_with('ceph.conf', + '/etc/ceph/ceph.conf', + '%s/ceph.conf' % dirname, + 100) + keyring = 'ceph.client.admin.keyring' + context = {'admin_key': self.test_config.get('admin-key')} + self.render.assert_any_call(keyring, + '/etc/ceph/' + keyring, + context, owner='ceph-user', perms=0o600) + + mock_rgw_rel.assert_called_with(relid='rados:1', unit='rados/1') + mock_client_rel.assert_called_with('client:1') + + @mock.patch('subprocess.check_output') + def test_client_relation_joined(self, mock_check_output): + mock_check_output.return_value = CEPH_GET_KEY + self.test_config.set('monitor-hosts', '127.0.0.1:1234') + self.test_config.set('fsid', 'abc123') + self.test_config.set('admin-key', 'some-admin-key') + self.related_units.return_value = ['client/0'] + + hooks.client_relation_joined('client:1') + + data = {'key': CEPH_KEY, + 'auth': 'cephx', + 'ceph-public-address': self.test_config.get('monitor-hosts')} + + self.relation_set.assert_called_with(relation_id='client:1', + relation_settings=data) From 6dc9f3d29589200a37d7646a8f05eb34e9d24277 Mon Sep 17 00:00:00 2001 From: Ante Karamatic Date: Thu, 30 Mar 2017 14:40:26 +0800 Subject: [PATCH 1301/2699] Allow ceph-osd to create temporary links within OSD's filesystem AppArmor profile prevents link operation within /var/lib/ceph/osd/*. This leads to daemon coredump. This patch ensures ceph-osd is able to create links. Change-Id: Ia03baac0fec7f134f53254b18e5498a87656817f Closes-Bug: #1677470 --- ceph-osd/files/apparmor/usr.bin.ceph-osd | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/files/apparmor/usr.bin.ceph-osd b/ceph-osd/files/apparmor/usr.bin.ceph-osd index b8856e6f..8edab6ed 100644 --- a/ceph-osd/files/apparmor/usr.bin.ceph-osd +++ b/ceph-osd/files/apparmor/usr.bin.ceph-osd @@ -27,6 +27,7 @@ /srv/ceph/** rwk, /tmp/ r, /var/lib/ceph/** rwk, + /var/lib/ceph/osd/** l, /var/lib/charm/*/ceph.conf r, /var/log/ceph/* rwk, /var/run/ceph/* rwk, From 7d9a9a1ec37dbb3770882c9ca1be40b98a27e019 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Fri, 31 Mar 2017 10:12:38 -0700 Subject: [PATCH 1302/2699] Change mon-host config to monitor-hosts for consistency The ceph-proxy charm and this charm both allow the specification of monitor hosts for different purposes. This change brings the ceph-mon charm in agreement with the ceph-proxy charm regarding what the name of the config option should be. Change-Id: I0defbbdf09710109bd7edf25987e3aef266117ac --- ceph-mon/README.md | 2 +- ceph-mon/config.yaml | 4 ++-- ceph-mon/hooks/ceph_hooks.py | 5 +++-- ceph-mon/unit_tests/test_ceph_hooks.py | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index f92a039d..c7bc789f 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -114,7 +114,7 @@ Please refer to the [Ceph Network Reference](http://docs.ceph.com/docs/master/ra **NOTE**: Existing deployments using ceph-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. -**NOTE**: The mon-hosts field is only used to migrate existing clusters to a juju managed solution and should be left blank otherwise. +**NOTE**: The monitor-hosts field is only used to migrate existing clusters to a juju managed solution and should be left blank otherwise. # Contact Information diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 473aa50f..15be5780 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -56,11 +56,11 @@ options: How many nodes to wait for before trying to create the monitor cluster this number needs to be odd, and more than three is a waste except for very large clusters. - mon-hosts: + monitor-hosts: type: string default: description: | - A comma separated list of ceph mon hosts to use. This field is only + A space separated list of ceph mon hosts to use. This field is only used to migrate an existing cluster to a juju managed solution and should be left blank otherwise. expected-osd-count: diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 321b0ffc..3074c2d2 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -137,7 +137,7 @@ def get_ceph_context(): cephcontext = { 'auth_supported': config('auth-supported'), - 'mon_hosts': config('mon-hosts') or ' '.join(get_mon_hosts()), + 'mon_hosts': config('monitor-hosts') or ' '.join(get_mon_hosts()), 'fsid': leader_get('fsid'), 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'use_syslog': str(config('use-syslog')).lower(), @@ -465,10 +465,11 @@ def admin_relation_joined(relid=None): if name is None: name = 'admin' log('mon cluster in quorum - providing client with keys') + mon_hosts = config('monitor-hosts') or ' '.join(get_mon_hosts()) data = {'key': ceph.get_named_key(name=name, caps=ceph.admin_caps), 'fsid': leader_get('fsid'), 'auth': config('auth-supported'), - 'mon_hosts': config('mon-hosts') or " ".join(get_mon_hosts()) + 'mon_hosts': mon_hosts, } relation_set(relation_id=relid, relation_settings=data) diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 5decb5ac..96f5b7b5 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -22,7 +22,7 @@ 'osd-journal-size': 1024, 'use-direct-io': True, 'osd-format': 'ext4', - 'mon-hosts': '', + 'monitor-hosts': '', 'prefer-ipv6': False} From 4443bcbce8d4dcf16639161dd21a1890e27da915 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 5 Apr 2017 11:42:33 +0100 Subject: [PATCH 1303/2699] Fix incorrectly applied cluster relation settings This is a second pass at a fix for bug 1641870. Change-Id: Id691e9746bad6ff7c59fde1349755fe3dd2c7f43 Closes-Bug: 1641870 --- ceph-radosgw/hooks/hooks.py | 5 +---- ceph-radosgw/unit_tests/test_hooks.py | 24 +++++++++++++++++++++++- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 6b613ea3..38de5edf 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -239,10 +239,7 @@ def cluster_joined(rid=None): config('os-{}-network'.format(addr_type)) ) if address: - relation_set( - relation_id=rid, - settings={'{}-address'.format(addr_type): address} - ) + settings['{}-address'.format(addr_type)] = address if config('prefer-ipv6'): private_addr = get_ipv6_addr(exc_list=[config('vip')])[0] diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 4687b040..007cc35d 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -13,7 +13,7 @@ # limitations under the License. from mock import ( - patch, + patch, call ) from test_utils import ( @@ -218,6 +218,28 @@ def test_canonical_url_ipv6(self, _config, _unit_get, _is_clustered): self.assertEquals(ceph_hooks.canonical_url({}, PUBLIC), 'http://[%s]' % ipv6_addr) + @patch.object(ceph_hooks, 'get_address_in_network') + def test_cluster_joined(self, mock_get_addr): + addrs = {'10.0.0.0/24': '10.0.0.1', + '10.0.1.0/24': '10.0.1.1', + '10.0.2.0/24': '10.0.2.1'} + + def fake_get_address_in_network(network): + return addrs.get(network) + + mock_get_addr.side_effect = fake_get_address_in_network + + self.test_config.set('os-public-network', '10.0.0.0/24') + self.test_config.set('os-admin-network', '10.0.1.0/24') + self.test_config.set('os-internal-network', '10.0.2.0/24') + + ceph_hooks.cluster_joined() + self.relation_set.assert_has_calls( + [call(relation_id=None, + **{'admin-address': '10.0.1.1', + 'internal-address': '10.0.2.1', + 'public-address': '10.0.0.1'})]) + def test_cluster_changed(self): _id_joined = self.patch('identity_joined') self.relation_ids.return_value = ['rid'] From 6217f6d7d9a4db8008b45efe7a4b938e01179351 Mon Sep 17 00:00:00 2001 From: Sandor Zeestraten Date: Sun, 9 Apr 2017 12:07:58 +0200 Subject: [PATCH 1304/2699] Update docs for no-longer mandatory config options Specifying the `fsid` and `monitor-secret` config options are no longer mandatory when deploying ceph-mon as of [1]. This updates the readme and config option descriptions to reflect this. [1]: https://github.com/openstack/charm-ceph-mon/commit/52c345d4 Change-Id: I0d2e0be05ff5845e3c287e131a553810faecb038 Closes-Bug: 1661737 --- ceph-mon/README.md | 36 +++++------------------------------- ceph-mon/config.yaml | 24 +++++++++++++++--------- 2 files changed, 20 insertions(+), 40 deletions(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index f92a039d..a38d5e34 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -3,43 +3,17 @@ Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. -This charm deploys a Ceph cluster. -juju +This charm deploys a Ceph monitor cluster. # Usage -The ceph charm has two pieces of mandatory configuration for which no defaults -are provided. You _must_ set these configuration options before deployment or the charm will not work: - - fsid: - uuid specific to a ceph cluster used to ensure that different - clusters don't get mixed up - use `uuid` to generate one. - - monitor-secret: - a ceph generated key used by the daemons that manage to cluster - to control security. You can use the ceph-authtool command to - generate one: - - ceph-authtool /dev/stdout --name=mon. --gen-key - -These two pieces of configuration must NOT be changed post bootstrap; attempting -to do this will cause a reconfiguration error and new service units will not join -the existing ceph cluster. - -At a minimum you must provide a juju config file during initial deployment -with the fsid and monitor-secret options (contents of cepy.yaml below): - - ceph: - fsid: ecbb8960-0e21-11e2-b495-83a88f44db01 - monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg== - Boot things up by using: - juju deploy -n 3 --config ceph.yaml ceph + juju deploy -n 3 ceph-mon -By default the ceph cluster will not bootstrap until 3 service units have been -deployed and started; this is to ensure that a quorum is achieved prior to adding -storage devices. +By default the ceph-mon cluster will not bootstrap until 3 service units have +been deployed and started; this is to ensure that a quorum is achieved prior to +adding storage devices. ## Actions diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 473aa50f..7e6d8ed3 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -7,10 +7,13 @@ options: type: string default: description: | - fsid of the ceph cluster. To generate a suitable value use `uuid` - . - This configuration element is mandatory and the service will fail on - install if it is not provided. + The unique identifier (fsid) of the Ceph cluster. + + To generate a suitable value use `uuid`. + If left empty, an fsid will be generated. + + NOTE: Changing this configuration after deployment is not supported and + new service units will not be able to join the cluster. config-flags: type: string default: @@ -43,12 +46,15 @@ options: type: string default: description: | - This value will become the mon. key. To generate a suitable value use: - . + The Ceph secret key used by Ceph monitors. This value will become the + mon.key. To generate a suitable value use: + ceph-authtool /dev/stdout --name=mon. --gen-key - . - This configuration element is mandatory and the service will fail on - install if it is not provided. + + If left empty, a secret key will be generated. + + NOTE: Changing this configuration after deployment is not supported and + new service units will not be able to join the cluster. monitor-count: type: int default: 3 From f367b33d3f94b724d408e3ba72fe5ceab53d1188 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 29 Mar 2017 14:27:51 +0100 Subject: [PATCH 1305/2699] Fix alphanumeric comparisons for openstack and ubuntu releases - sync charmhelpers with fix-alpha helpers - fix up code where the alpha comparisons are done Change-Id: I904f5faf2bd8fc28204d5e26c94877708cb98d7e Related-Bug: #1659575 --- ceph-osd/hooks/utils.py | 9 ++++++--- ceph-osd/lib/ceph/__init__.py | 23 ++++++++++++++++------- ceph-osd/tox.ini | 2 +- 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 2acad66d..ea218860 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -19,7 +19,8 @@ cached, config, network_get_primary_address, - log, DEBUG, + log, + DEBUG, status_set, ) from charmhelpers.core import unitdata @@ -29,7 +30,8 @@ ) from charmhelpers.core.host import ( - lsb_release + lsb_release, + CompareHostReleases, ) from charmhelpers.contrib.network.ip import ( @@ -163,7 +165,8 @@ def get_network_addrs(config_opt): def assert_charm_supports_ipv6(): """Check whether we are able to support charms ipv6.""" - if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) < "trusty": raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") diff --git a/ceph-osd/lib/ceph/__init__.py b/ceph-osd/lib/ceph/__init__.py index 7acbc521..32c7ce6c 100644 --- a/ceph-osd/lib/ceph/__init__.py +++ b/ceph-osd/lib/ceph/__init__.py @@ -38,7 +38,9 @@ owner, service_restart, service_start, - service_stop) + service_stop, + CompareHostReleases, +) from charmhelpers.core.hookenv import ( cached, config, @@ -46,21 +48,28 @@ status_set, DEBUG, ERROR, - WARNING) + WARNING, +) from charmhelpers.fetch import ( apt_cache, - add_source, apt_install, apt_update) + add_source, + apt_install, + apt_update, +) from charmhelpers.contrib.storage.linux.ceph import ( monitor_key_set, monitor_key_exists, monitor_key_get, - get_mon_map) + get_mon_map, +) from charmhelpers.contrib.storage.linux.utils import ( is_block_device, zap_disk, - is_device_mounted) + is_device_mounted, +) from charmhelpers.contrib.openstack.utils import ( - get_os_codename_install_source) + get_os_codename_install_source, +) from ceph.ceph_helpers import check_output @@ -1180,7 +1189,7 @@ def upgrade_key_caps(key, caps): @cached def systemd(): - return (lsb_release()['DISTRIB_CODENAME'] >= 'vivid') + return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' def bootstrap_monitor_cluster(secret): diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 6f1aeace..7c2936e3 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -14,7 +14,7 @@ install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} whitelist_externals = juju -passenv = HOME TERM AMULET_* CS_API_URL +passenv = HOME TERM AMULET_* CS_API_* [testenv:py27] basepython = python2.7 From 64dae2cbbed6ce29d1c635c99d856b9aef1b03d1 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 29 Mar 2017 13:28:45 +0100 Subject: [PATCH 1306/2699] Fix alphanumeric comparisons for openstack and ubuntu releases - sync charmhelpers with fix-alpha helpers - fix up code where the alpha comparisons are done Change-Id: I6efa2096237d8cbc6e869fc0f43dc622d4bea8e7 Related-Bug: #1659575 --- ceph-mon/charm-helpers-tests.yaml | 1 + .../charmhelpers/contrib/charmsupport/nrpe.py | 2 +- .../contrib/hardening/apache/checks/config.py | 10 +- .../{hardening.conf => 99-hardening.conf} | 20 ++- .../contrib/hardening/audits/__init__.py | 13 +- .../contrib/hardening/defaults/apache.yaml | 5 +- .../hardening/defaults/apache.yaml.schema | 3 + .../contrib/hardening/defaults/os.yaml | 1 + .../contrib/hardening/defaults/os.yaml.schema | 1 + .../contrib/hardening/host/checks/profile.py | 10 +- .../hardening/host/templates/99-hardening.sh | 5 + .../contrib/hardening/ssh/checks/config.py | 20 ++- .../hooks/charmhelpers/contrib/network/ip.py | 113 +++++++++------ .../contrib/openstack/amulet/utils.py | 5 +- .../charmhelpers/contrib/openstack/context.py | 132 ++++++++++++------ .../charmhelpers/contrib/openstack/neutron.py | 19 ++- .../charmhelpers/contrib/openstack/utils.py | 40 +++++- .../contrib/storage/linux/ceph.py | 18 +-- ceph-mon/hooks/charmhelpers/core/host.py | 6 +- .../charmhelpers/core/host_factory/centos.py | 16 +++ .../charmhelpers/core/host_factory/ubuntu.py | 32 +++++ ceph-mon/hooks/charmhelpers/core/strutils.py | 53 +++++++ ceph-mon/hooks/utils.py | 9 +- ceph-mon/lib/ceph/__init__.py | 28 +++- .../contrib/openstack/amulet/utils.py | 5 +- ceph-mon/tests/charmhelpers/core/host.py | 6 +- .../charmhelpers/core/host_factory/centos.py | 16 +++ .../charmhelpers/core/host_factory/ubuntu.py | 32 +++++ ceph-mon/tests/charmhelpers/core/strutils.py | 53 +++++++ ceph-mon/tests/charmhelpers/osplatform.py | 25 ++++ ceph-mon/tox.ini | 2 +- 31 files changed, 555 insertions(+), 146 deletions(-) rename ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/{hardening.conf => 99-hardening.conf} (56%) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh create mode 100644 ceph-mon/tests/charmhelpers/osplatform.py diff --git a/ceph-mon/charm-helpers-tests.yaml b/ceph-mon/charm-helpers-tests.yaml index e5063253..b0de9df6 100644 --- a/ceph-mon/charm-helpers-tests.yaml +++ b/ceph-mon/charm-helpers-tests.yaml @@ -4,3 +4,4 @@ include: - contrib.amulet - contrib.openstack.amulet - core + - osplatform diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 9646b838..8240249e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -373,7 +373,7 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) croncmd = ( '/usr/local/lib/nagios/plugins/check_exit_status.pl ' - '-s /etc/init.d/%s status' % svc + '-e -s /etc/init.d/%s status' % svc ) cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) f = open(cronpath, 'w') diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 51b636f7..b18b263d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -26,6 +26,7 @@ DirectoryPermissionAudit, NoReadWriteForOther, TemplatedFile, + DeletedFile ) from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR @@ -52,13 +53,13 @@ def get_audits(): 'mods-available/alias.conf'), context, TEMPLATES_DIR, - mode=0o0755, + mode=0o0640, user='root', service_actions=[{'service': 'apache2', 'actions': ['restart']}]), TemplatedFile(os.path.join(settings['common']['apache_dir'], - 'conf-enabled/hardening.conf'), + 'conf-enabled/99-hardening.conf'), context, TEMPLATES_DIR, mode=0o0640, @@ -69,11 +70,13 @@ def get_audits(): DirectoryPermissionAudit(settings['common']['apache_dir'], user='root', group='root', - mode=0o640), + mode=0o0750), DisabledModuleAudit(settings['hardening']['modules_to_disable']), NoReadWriteForOther(settings['common']['apache_dir']), + + DeletedFile(['/var/www/html/index.html']) ] return audits @@ -94,5 +97,4 @@ def __call__(self): ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', out).group(1) ctxt['apache_icondir'] = '/usr/share/apache2/icons/' - ctxt['traceenable'] = settings['hardening']['traceenable'] return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf similarity index 56% rename from ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf rename to ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf index 07945418..22b68041 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf @@ -4,15 +4,29 @@ ############################################################################### - + # http://httpd.apache.org/docs/2.4/upgrading.html {% if apache_version > '2.2' -%} Require all granted {% else -%} - Order Allow,Deny - Deny from all + Order Allow,Deny + Deny from all {% endif %} + + Options -Indexes -FollowSymLinks + AllowOverride None + + + + Options -Indexes -FollowSymLinks + AllowOverride None + + TraceEnable {{ traceenable }} +ServerTokens {{ servertokens }} + +SSLHonorCipherOrder {{ honor_cipher_order }} +SSLCipherSuite {{ cipher_suite }} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py index 9bf9c3c6..6dd5b05f 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py @@ -49,13 +49,6 @@ def _take_action(self): # Invoke the callback if there is one. if hasattr(self.unless, '__call__'): - results = self.unless() - if results: - return False - else: - return True - - if self.unless: - return False - else: - return True + return not self.unless() + + return not self.unless diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml index e5ada29f..0f940d4c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml @@ -10,4 +10,7 @@ common: hardening: traceenable: 'off' allowed_http_methods: "GET POST" - modules_to_disable: [ cgi, cgid ] \ No newline at end of file + modules_to_disable: [ cgi, cgid ] + servertokens: 'Prod' + honor_cipher_order: 'on' + cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES' diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema index 227589b5..c112137c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema @@ -7,3 +7,6 @@ common: hardening: allowed_http_methods: modules_to_disable: + servertokens: + honor_cipher_order: + cipher_suite: diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml index ddd4286c..9a8627b5 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml @@ -58,6 +58,7 @@ security: rsync kernel_enable_module_loading: True # (type:boolean) kernel_enable_core_dump: False # (type:boolean) + ssh_tmout: 300 sysctl: kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128 diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema index 88b3966e..cc3b9c20 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema @@ -34,6 +34,7 @@ security: packages_list: kernel_enable_module_loading: kernel_enable_core_dump: + ssh_tmout: sysctl: kernel_secure_sysrq: kernel_enable_sysrq: diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py index 56d65263..2727428d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py @@ -25,7 +25,6 @@ def get_audits(): audits = [] settings = utils.get_settings('os') - # If core dumps are not enabled, then don't allow core dumps to be # created as they may contain sensitive information. if not settings['security']['kernel_enable_core_dump']: @@ -33,11 +32,18 @@ def get_audits(): ProfileContext(), template_dir=TEMPLATES_DIR, mode=0o0755, user='root', group='root')) + if settings['security']['ssh_tmout']: + audits.append(TemplatedFile('/etc/profile.d/99-hardening.sh', + ProfileContext(), + template_dir=TEMPLATES_DIR, + mode=0o0644, user='root', group='root')) return audits class ProfileContext(object): def __call__(self): - ctxt = {} + settings = utils.get_settings('os') + ctxt = {'ssh_tmout': + settings['security']['ssh_tmout']} return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh new file mode 100644 index 00000000..616cef46 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh @@ -0,0 +1,5 @@ +TMOUT={{ tmout }} +readonly TMOUT +export TMOUT + +readonly HISTFILE diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py index f3cac6d9..41bed2d1 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -27,7 +27,10 @@ apt_install, apt_update, ) -from charmhelpers.core.host import lsb_release +from charmhelpers.core.host import ( + lsb_release, + CompareHostReleases, +) from charmhelpers.contrib.hardening.audits.file import ( TemplatedFile, FileContentAudit, @@ -68,7 +71,8 @@ def get_macs(self, allow_weak_mac): 'weak': default + ',hmac-sha1'} # Use newer ciphers on Ubuntu Trusty and above - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG) macs = macs_66 @@ -96,7 +100,8 @@ def get_kexs(self, allow_weak_kex): 'weak': weak} # Use newer kex on Ubuntu Trusty and above - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': log('Detected Ubuntu 14.04 or newer, using new key exchange ' 'algorithms', level=DEBUG) kex = kex_66 @@ -119,7 +124,8 @@ def get_ciphers(self, cbc_required): 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'} # Use newer ciphers on ubuntu Trusty and above - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': log('Detected Ubuntu 14.04 or newer, using new ciphers', level=DEBUG) cipher = ciphers_66 @@ -291,7 +297,8 @@ def is_compliant(self, *args, **kwargs): self.fail_cases = [] settings = utils.get_settings('ssh') - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': if not settings['server']['weak_hmac']: self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') else: @@ -364,7 +371,8 @@ def is_compliant(self, *args, **kwargs): self.fail_cases = [] settings = utils.get_settings('ssh') - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': if not settings['server']['weak_hmac']: self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') else: diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 54c76a72..fc3f5e3e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -31,6 +31,7 @@ from charmhelpers.core.host import ( lsb_release, + CompareHostReleases, ) try: @@ -67,6 +68,24 @@ def no_ip_found_error_out(network): raise ValueError(errmsg) +def _get_ipv6_network_from_address(address): + """Get an netaddr.IPNetwork for the given IPv6 address + :param address: a dict as returned by netifaces.ifaddresses + :returns netaddr.IPNetwork: None if the address is a link local or loopback + address + """ + if address['addr'].startswith('fe80') or address['addr'] == "::1": + return None + + prefix = address['netmask'].split("/") + if len(prefix) > 1: + netmask = prefix[1] + else: + netmask = address['netmask'] + return netaddr.IPNetwork("%s/%s" % (address['addr'], + netmask)) + + def get_address_in_network(network, fallback=None, fatal=False): """Get an IPv4 or IPv6 address within the network from the host. @@ -92,19 +111,17 @@ def get_address_in_network(network, fallback=None, fatal=False): for iface in netifaces.interfaces(): addresses = netifaces.ifaddresses(iface) if network.version == 4 and netifaces.AF_INET in addresses: - addr = addresses[netifaces.AF_INET][0]['addr'] - netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - if cidr in network: - return str(cidr.ip) + for addr in addresses[netifaces.AF_INET]: + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - if cidr in network: - return str(cidr.ip) + cidr = _get_ipv6_network_from_address(addr) + if cidr and cidr in network: + return str(cidr.ip) if fallback is not None: return fallback @@ -180,18 +197,18 @@ def _get_for_address(address, key): if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - network = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - cidr = network.cidr - if address in cidr: - if key == 'iface': - return iface - elif key == 'netmask' and cidr: - return str(cidr).split('/')[1] - else: - return addr[key] - + network = _get_ipv6_network_from_address(addr) + if not network: + continue + + cidr = network.cidr + if address in cidr: + if key == 'iface': + return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] + else: + return addr[key] return None @@ -222,6 +239,16 @@ def format_ipv6_addr(address): return None +def is_ipv6_disabled(): + try: + result = subprocess.check_output( + ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], + stderr=subprocess.STDOUT) + return "net.ipv6.conf.all.disable_ipv6 = 1" in result + except subprocess.CalledProcessError: + return True + + def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): """Return the assigned IP address for a given interface, if any. @@ -521,36 +548,44 @@ def port_has_listener(address, port): def assert_charm_supports_ipv6(): """Check whether we are able to support charms ipv6.""" - if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(release) < "trusty": raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") -def get_relation_ip(interface, config_override=None): - """Return this unit's IP for the given relation. +def get_relation_ip(interface, cidr_network=None): + """Return this unit's IP for the given interface. Allow for an arbitrary interface to use with network-get to select an IP. - Handle all address selection options including configuration parameter - override and IPv6. + Handle all address selection options including passed cidr network and + IPv6. - Usage: get_relation_ip('amqp', config_override='access-network') + Usage: get_relation_ip('amqp', cidr_network='10.0.0.0/8') @param interface: string name of the relation. - @param config_override: string name of the config option for network - override. Supports legacy network override configuration parameters. + @param cidr_network: string CIDR Network to select an address from. @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. @returns IPv6 or IPv4 address """ + # Select the interface address first + # For possible use as a fallback bellow with get_address_in_network + try: + # Get the interface specific IP + address = network_get_primary_address(interface) + except NotImplementedError: + # If network-get is not available + address = get_host_ip(unit_get('private-address')) - fallback = get_host_ip(unit_get('private-address')) if config('prefer-ipv6'): + # Currently IPv6 has priority, eventually we want IPv6 to just be + # another network space. assert_charm_supports_ipv6() return get_ipv6_addr()[0] - elif config_override and config(config_override): - return get_address_in_network(config(config_override), - fallback) - else: - try: - return network_get_primary_address(interface) - except NotImplementedError: - return fallback + elif cidr_network: + # If a specific CIDR network is passed get the address from that + # network. + return get_address_in_network(cidr_network, address) + + # Return the interface address + return address diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 1f4cf42e..bcef4cd0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -40,6 +40,7 @@ AmuletUtils ) from charmhelpers.core.decorators import retry_on_exception +from charmhelpers.core.host import CompareHostReleases DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -546,7 +547,7 @@ def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" self.log.debug('Creating instance ' '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.images.find(name=image_name) + image = nova.glance.find_image(image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, flavor=flavor) @@ -1255,7 +1256,7 @@ def test_110_memcache(self): contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', fatal=True) ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if ubuntu_release <= 'trusty': + if CompareHostReleases(ubuntu_release) <= 'trusty': memcache_listen_addr = 'ip6-localhost' else: memcache_listen_addr = '::1' diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 6cdbbbbf..2adf2cb8 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -59,6 +59,8 @@ write_file, pwgen, lsb_release, + CompareHostReleases, + is_container, ) from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, @@ -87,6 +89,7 @@ format_ipv6_addr, is_address_in_network, is_bridge_member, + is_ipv6_disabled, ) from charmhelpers.contrib.openstack.utils import ( config_flags_parser, @@ -108,6 +111,7 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' ADDRESS_TYPES = ['admin', 'internal', 'public'] +HAPROXY_RUN_DIR = '/var/run/haproxy/' def ensure_packages(packages): @@ -155,7 +159,8 @@ def context_complete(self, ctxt): if self.missing_data: self.complete = False - log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO) + log('Missing required data: %s' % ' '.join(self.missing_data), + level=INFO) else: self.complete = True return self.complete @@ -213,8 +218,9 @@ def __call__(self): hostname_key = "{}_hostname".format(self.relation_prefix) else: hostname_key = "hostname" - access_hostname = get_address_in_network(access_network, - unit_get('private-address')) + access_hostname = get_address_in_network( + access_network, + unit_get('private-address')) set_hostname = relation_get(attribute=hostname_key, unit=local_unit()) if set_hostname != access_hostname: @@ -308,7 +314,10 @@ def db_ssl(rdata, ctxt, ssl_dir): class IdentityServiceContext(OSContextGenerator): - def __init__(self, service=None, service_user=None, rel_name='identity-service'): + def __init__(self, + service=None, + service_user=None, + rel_name='identity-service'): self.service = service self.service_user = service_user self.rel_name = rel_name @@ -457,19 +466,17 @@ def __call__(self): host = format_ipv6_addr(host) or host rabbitmq_hosts.append(host) - ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + rabbitmq_hosts = sorted(rabbitmq_hosts) + ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) transport_hosts = rabbitmq_hosts if transport_hosts: - transport_url_hosts = '' - for host in transport_hosts: - if transport_url_hosts: - format_string = ",{}:{}@{}:{}" - else: - format_string = "{}:{}@{}:{}" - transport_url_hosts += format_string.format( - ctxt['rabbitmq_user'], ctxt['rabbitmq_password'], - host, rabbitmq_port) + transport_url_hosts = ','.join([ + "{}:{}@{}:{}".format(ctxt['rabbitmq_user'], + ctxt['rabbitmq_password'], + host_, + rabbitmq_port) + for host_ in transport_hosts]) ctxt['transport_url'] = "rabbit://{}/{}".format( transport_url_hosts, vhost) @@ -530,6 +537,8 @@ class HAProxyContext(OSContextGenerator): """Provides half a context for the haproxy template, which describes all peers to be included in the cluster. Each charm needs to include its own context generator that describes the port mapping. + + :side effect: mkdir is called on HAPROXY_RUN_DIR """ interfaces = ['cluster'] @@ -537,6 +546,8 @@ def __init__(self, singlenode_mode=False): self.singlenode_mode = singlenode_mode def __call__(self): + if not os.path.isdir(HAPROXY_RUN_DIR): + mkdir(path=HAPROXY_RUN_DIR) if not relation_ids('cluster') and not self.singlenode_mode: return {} @@ -1217,22 +1228,54 @@ def __call__(self): return {'bind_host': '0.0.0.0'} -class WorkerConfigContext(OSContextGenerator): +MAX_DEFAULT_WORKERS = 4 +DEFAULT_MULTIPLIER = 2 - @property - def num_cpus(self): - # NOTE: use cpu_count if present (16.04 support) - if hasattr(psutil, 'cpu_count'): - return psutil.cpu_count() - else: - return psutil.NUM_CPUS + +def _calculate_workers(): + ''' + Determine the number of worker processes based on the CPU + count of the unit containing the application. + + Workers will be limited to MAX_DEFAULT_WORKERS in + container environments where no worker-multipler configuration + option been set. + + @returns int: number of worker processes to use + ''' + multiplier = config('worker-multiplier') or DEFAULT_MULTIPLIER + count = int(_num_cpus() * multiplier) + if multiplier > 0 and count == 0: + count = 1 + + if config('worker-multiplier') is None and is_container(): + # NOTE(jamespage): Limit unconfigured worker-multiplier + # to MAX_DEFAULT_WORKERS to avoid insane + # worker configuration in LXD containers + # on large servers + # Reference: https://pad.lv/1665270 + count = min(count, MAX_DEFAULT_WORKERS) + + return count + + +def _num_cpus(): + ''' + Compatibility wrapper for calculating the number of CPU's + a unit has. + + @returns: int: number of CPU cores detected + ''' + try: + return psutil.cpu_count() + except AttributeError: + return psutil.NUM_CPUS + + +class WorkerConfigContext(OSContextGenerator): def __call__(self): - multiplier = config('worker-multiplier') or 0 - count = int(self.num_cpus * multiplier) - if multiplier > 0 and count == 0: - count = 1 - ctxt = {"workers": count} + ctxt = {"workers": _calculate_workers()} return ctxt @@ -1240,7 +1283,7 @@ class WSGIWorkerConfigContext(WorkerConfigContext): def __init__(self, name=None, script=None, admin_script=None, public_script=None, process_weight=1.00, - admin_process_weight=0.75, public_process_weight=0.25): + admin_process_weight=0.25, public_process_weight=0.75): self.service_name = name self.user = name self.group = name @@ -1252,8 +1295,7 @@ def __init__(self, name=None, script=None, admin_script=None, self.public_process_weight = public_process_weight def __call__(self): - multiplier = config('worker-multiplier') or 1 - total_processes = self.num_cpus * multiplier + total_processes = _calculate_workers() ctxt = { "service_name": self.service_name, "user": self.user, @@ -1584,7 +1626,7 @@ class MemcacheContext(OSContextGenerator): """Memcache context This context provides options for configuring a local memcache client and - server + server for both IPv4 and IPv6 """ def __init__(self, package=None): @@ -1601,13 +1643,25 @@ def __call__(self): if ctxt['use_memcache']: # Trusty version of memcached does not support ::1 as a listen # address so use host file entry instead - if lsb_release()['DISTRIB_CODENAME'].lower() > 'trusty': - ctxt['memcache_server'] = '::1' + release = lsb_release()['DISTRIB_CODENAME'].lower() + if is_ipv6_disabled(): + if CompareHostReleases(release) > 'trusty': + ctxt['memcache_server'] = '127.0.0.1' + else: + ctxt['memcache_server'] = 'localhost' + ctxt['memcache_server_formatted'] = '127.0.0.1' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = '{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) else: - ctxt['memcache_server'] = 'ip6-localhost' - ctxt['memcache_server_formatted'] = '[::1]' - ctxt['memcache_port'] = '11211' - ctxt['memcache_url'] = 'inet6:{}:{}'.format( - ctxt['memcache_server_formatted'], - ctxt['memcache_port']) + if CompareHostReleases(release) > 'trusty': + ctxt['memcache_server'] = '::1' + else: + ctxt['memcache_server'] = 'ip6-localhost' + ctxt['memcache_server_formatted'] = '[::1]' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = 'inet6:{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py index a8f1ed72..37fa0eb0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py @@ -23,7 +23,10 @@ ERROR, ) -from charmhelpers.contrib.openstack.utils import os_release +from charmhelpers.contrib.openstack.utils import ( + os_release, + CompareOpenStackReleases, +) def headers_package(): @@ -198,7 +201,8 @@ def neutron_plugins(): }, 'plumgrid': { 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', - 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', + 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin' + '.plumgrid_plugin.NeutronPluginPLUMgridV2'), 'contexts': [ context.SharedDBContext(user=config('database-user'), database=config('database'), @@ -225,7 +229,7 @@ def neutron_plugins(): 'server_services': ['neutron-server'] } } - if release >= 'icehouse': + if CompareOpenStackReleases(release) >= 'icehouse': # NOTE: patch in ml2 plugin for icehouse onwards plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' @@ -233,10 +237,10 @@ def neutron_plugins(): 'neutron-plugin-ml2'] # NOTE: patch in vmware renames nvp->nsx for icehouse onwards plugins['nvp'] = plugins['nsx'] - if release >= 'kilo': + if CompareOpenStackReleases(release) >= 'kilo': plugins['midonet']['driver'] = ( 'neutron.plugins.midonet.plugin.MidonetPluginV2') - if release >= 'liberty': + if CompareOpenStackReleases(release) >= 'liberty': plugins['midonet']['driver'] = ( 'midonet.neutron.plugin_v1.MidonetPluginV2') plugins['midonet']['server_packages'].remove( @@ -244,10 +248,11 @@ def neutron_plugins(): plugins['midonet']['server_packages'].append( 'python-networking-midonet') plugins['plumgrid']['driver'] = ( - 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2') + 'networking_plumgrid.neutron.plugins' + '.plugin.NeutronPluginPLUMgridV2') plugins['plumgrid']['server_packages'].remove( 'neutron-plugin-plumgrid') - if release >= 'mitaka': + if CompareOpenStackReleases(release) >= 'mitaka': plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') plugins['nsx']['server_packages'].append('python-vmware-nsx') plugins['nsx']['config'] = '/etc/neutron/nsx.ini' diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 7e8ecff4..e13450c1 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -33,9 +33,7 @@ from charmhelpers.contrib.network import ip -from charmhelpers.core import ( - unitdata, -) +from charmhelpers.core import unitdata from charmhelpers.core.hookenv import ( action_fail, @@ -55,6 +53,8 @@ application_version_set, ) +from charmhelpers.core.strutils import BasicStringComparator + from charmhelpers.contrib.storage.linux.lvm import ( deactivate_lvm_volume_group, is_lvm_physical_volume, @@ -97,6 +97,22 @@ DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 'restricted main multiverse universe') +OPENSTACK_RELEASES = ( + 'diablo', + 'essex', + 'folsom', + 'grizzly', + 'havana', + 'icehouse', + 'juno', + 'kilo', + 'liberty', + 'mitaka', + 'newton', + 'ocata', + 'pike', +) + UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('oneiric', 'diablo'), ('precise', 'essex'), @@ -238,6 +254,17 @@ DEFAULT_LOOPBACK_SIZE = '5G' +class CompareOpenStackReleases(BasicStringComparator): + """Provide comparisons of OpenStack releases. + + Use in the form of + + if CompareOpenStackReleases(release) > 'mitaka': + # do something with mitaka + """ + _list = OPENSTACK_RELEASES + + def error_out(msg): juju_log("FATAL ERROR: %s" % msg, level='ERROR') sys.exit(1) @@ -1066,7 +1093,8 @@ def git_generate_systemd_init_files(templates_dir): shutil.copyfile(init_in_source, init_source) with open(init_source, 'a') as outfile: - template = '/usr/share/openstack-pkg-tools/init-script-template' + template = ('/usr/share/openstack-pkg-tools/' + 'init-script-template') with open(template) as infile: outfile.write('\n\n{}'.format(infile.read())) @@ -1971,9 +1999,7 @@ def enable_memcache(source=None, release=None, package=None): if not _release: _release = get_os_codename_install_source(source) - # TODO: this should be changed to a numeric comparison using a known list - # of releases and comparing by index. - return _release >= 'mitaka' + return CompareOpenStackReleases(_release) >= 'mitaka' def token_cache_pkgs(source=None, release=None): diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index ae7f3f93..9417d684 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -987,18 +987,20 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, service_start(svc) -def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): +def ensure_ceph_keyring(service, user=None, group=None, + relation='ceph', key=None): """Ensures a ceph keyring is created for a named service and optionally ensures user and group ownership. - Returns False if no ceph key is available in relation state. + @returns boolean: Flag to indicate whether a key was successfully written + to disk based on either relation data or a supplied key """ - key = None - for rid in relation_ids(relation): - for unit in related_units(rid): - key = relation_get('key', rid=rid, unit=unit) - if key: - break + if not key: + for rid in relation_ids(relation): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break if not key: return False diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 05edfa50..88e80a49 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -45,6 +45,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( @@ -52,6 +53,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import UPDATEDB_PATH = '/etc/updatedb.conf' @@ -189,7 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('disable', service_name) + service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -222,7 +224,7 @@ def service_resume(service_name, init_dir="/etc/init", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('enable', service_name) + service('unmask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/centos.py b/ceph-mon/hooks/charmhelpers/core/host_factory/centos.py index 902d469f..7781a396 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/centos.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/centos.py @@ -2,6 +2,22 @@ import yum import os +from charmhelpers.core.strutils import BasicStringComparator + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Host releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + + def __init__(self, item): + raise NotImplementedError( + "CompareHostReleases() is not implemented for CentOS") + def service_available(service_name): # """Determine whether a system service is available.""" diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py index 8c66af55..0448288c 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,37 @@ import subprocess +from charmhelpers.core.strutils import BasicStringComparator + + +UBUNTU_RELEASES = ( + 'lucid', + 'maverick', + 'natty', + 'oneiric', + 'precise', + 'quantal', + 'raring', + 'saucy', + 'trusty', + 'utopic', + 'vivid', + 'wily', + 'xenial', + 'yakkety', + 'zesty', +) + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Ubuntu releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + _list = UBUNTU_RELEASES + def service_available(service_name): """Determine whether a system service is available""" diff --git a/ceph-mon/hooks/charmhelpers/core/strutils.py b/ceph-mon/hooks/charmhelpers/core/strutils.py index dd9b9717..685dabde 100644 --- a/ceph-mon/hooks/charmhelpers/core/strutils.py +++ b/ceph-mon/hooks/charmhelpers/core/strutils.py @@ -68,3 +68,56 @@ def bytes_from_string(value): msg = "Unable to interpret string value '%s' as bytes" % (value) raise ValueError(msg) return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + + +class BasicStringComparator(object): + """Provides a class that will compare strings from an iterator type object. + Used to provide > and < comparisons on strings that may not necessarily be + alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the + z-wrap. + """ + + _list = None + + def __init__(self, item): + if self._list is None: + raise Exception("Must define the _list in the class definition!") + try: + self.index = self._list.index(item) + except Exception: + raise KeyError("Item '{}' is not in list '{}'" + .format(item, self._list)) + + def __eq__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index == self._list.index(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index < self._list.index(other) + + def __ge__(self, other): + return not self.__lt__(other) + + def __gt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index > self._list.index(other) + + def __le__(self, other): + return not self.__gt__(other) + + def __str__(self): + """Always give back the item at the index so it can be used in + comparisons like: + + s_mitaka = CompareOpenStack('mitaka') + s_newton = CompareOpenstack('newton') + + assert s_newton > s_mitaka + + @returns: + """ + return self._list[self.index] diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 33b02fb7..f4bc81e2 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -20,7 +20,8 @@ config, status_set, network_get_primary_address, - log, DEBUG, + log, + DEBUG, ) from charmhelpers.fetch import ( apt_install, @@ -28,7 +29,8 @@ ) from charmhelpers.core.host import ( - lsb_release + lsb_release, + CompareHostReleases, ) from charmhelpers.contrib.network.ip import ( @@ -146,6 +148,7 @@ def get_network_addrs(config_opt): def assert_charm_supports_ipv6(): """Check whether we are able to support charms ipv6.""" - if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) < "trusty": raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index a453b44a..db205d5a 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -32,28 +32,42 @@ chownr, service_restart, lsb_release, - cmp_pkgrevno, service_stop, mounts, service_start) + cmp_pkgrevno, + service_stop, + mounts, + service_start, + CompareHostReleases, +) from charmhelpers.core.hookenv import ( log, ERROR, cached, status_set, - WARNING, DEBUG, config) + WARNING, + DEBUG, + config +) from charmhelpers.core.services import render_template from charmhelpers.fetch import ( apt_cache, - add_source, apt_install, apt_update) + add_source, + apt_install, + apt_update +) from charmhelpers.contrib.storage.linux.ceph import ( monitor_key_set, monitor_key_exists, monitor_key_get, - get_mon_map) + get_mon_map +) from charmhelpers.contrib.storage.linux.utils import ( is_block_device, zap_disk, - is_device_mounted) + is_device_mounted +) from charmhelpers.contrib.openstack.utils import ( - get_os_codename_install_source) + get_os_codename_install_source +) LEADER = 'leader' PEON = 'peon' @@ -1153,7 +1167,7 @@ def upgrade_key_caps(key, caps): @cached def systemd(): - return (lsb_release()['DISTRIB_CODENAME'] >= 'vivid') + return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' def bootstrap_monitor_cluster(secret): diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 1f4cf42e..bcef4cd0 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -40,6 +40,7 @@ AmuletUtils ) from charmhelpers.core.decorators import retry_on_exception +from charmhelpers.core.host import CompareHostReleases DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -546,7 +547,7 @@ def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" self.log.debug('Creating instance ' '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.images.find(name=image_name) + image = nova.glance.find_image(image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, flavor=flavor) @@ -1255,7 +1256,7 @@ def test_110_memcache(self): contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', fatal=True) ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if ubuntu_release <= 'trusty': + if CompareHostReleases(ubuntu_release) <= 'trusty': memcache_listen_addr = 'ip6-localhost' else: memcache_listen_addr = '::1' diff --git a/ceph-mon/tests/charmhelpers/core/host.py b/ceph-mon/tests/charmhelpers/core/host.py index 05edfa50..88e80a49 100644 --- a/ceph-mon/tests/charmhelpers/core/host.py +++ b/ceph-mon/tests/charmhelpers/core/host.py @@ -45,6 +45,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( @@ -52,6 +53,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import UPDATEDB_PATH = '/etc/updatedb.conf' @@ -189,7 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('disable', service_name) + service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -222,7 +224,7 @@ def service_resume(service_name, init_dir="/etc/init", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('enable', service_name) + service('unmask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) diff --git a/ceph-mon/tests/charmhelpers/core/host_factory/centos.py b/ceph-mon/tests/charmhelpers/core/host_factory/centos.py index 902d469f..7781a396 100644 --- a/ceph-mon/tests/charmhelpers/core/host_factory/centos.py +++ b/ceph-mon/tests/charmhelpers/core/host_factory/centos.py @@ -2,6 +2,22 @@ import yum import os +from charmhelpers.core.strutils import BasicStringComparator + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Host releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + + def __init__(self, item): + raise NotImplementedError( + "CompareHostReleases() is not implemented for CentOS") + def service_available(service_name): # """Determine whether a system service is available.""" diff --git a/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py index 8c66af55..0448288c 100644 --- a/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,37 @@ import subprocess +from charmhelpers.core.strutils import BasicStringComparator + + +UBUNTU_RELEASES = ( + 'lucid', + 'maverick', + 'natty', + 'oneiric', + 'precise', + 'quantal', + 'raring', + 'saucy', + 'trusty', + 'utopic', + 'vivid', + 'wily', + 'xenial', + 'yakkety', + 'zesty', +) + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Ubuntu releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + _list = UBUNTU_RELEASES + def service_available(service_name): """Determine whether a system service is available""" diff --git a/ceph-mon/tests/charmhelpers/core/strutils.py b/ceph-mon/tests/charmhelpers/core/strutils.py index dd9b9717..685dabde 100644 --- a/ceph-mon/tests/charmhelpers/core/strutils.py +++ b/ceph-mon/tests/charmhelpers/core/strutils.py @@ -68,3 +68,56 @@ def bytes_from_string(value): msg = "Unable to interpret string value '%s' as bytes" % (value) raise ValueError(msg) return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + + +class BasicStringComparator(object): + """Provides a class that will compare strings from an iterator type object. + Used to provide > and < comparisons on strings that may not necessarily be + alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the + z-wrap. + """ + + _list = None + + def __init__(self, item): + if self._list is None: + raise Exception("Must define the _list in the class definition!") + try: + self.index = self._list.index(item) + except Exception: + raise KeyError("Item '{}' is not in list '{}'" + .format(item, self._list)) + + def __eq__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index == self._list.index(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index < self._list.index(other) + + def __ge__(self, other): + return not self.__lt__(other) + + def __gt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index > self._list.index(other) + + def __le__(self, other): + return not self.__gt__(other) + + def __str__(self): + """Always give back the item at the index so it can be used in + comparisons like: + + s_mitaka = CompareOpenStack('mitaka') + s_newton = CompareOpenstack('newton') + + assert s_newton > s_mitaka + + @returns: + """ + return self._list[self.index] diff --git a/ceph-mon/tests/charmhelpers/osplatform.py b/ceph-mon/tests/charmhelpers/osplatform.py new file mode 100644 index 00000000..d9a4d5c0 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/osplatform.py @@ -0,0 +1,25 @@ +import platform + + +def get_platform(): + """Return the current OS platform. + + For example: if current os platform is Ubuntu then a string "ubuntu" + will be returned (which is the name of the module). + This string is used to decide which platform module should be imported. + """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warings *not* disabled, as we certainly need to fix this. + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + if "Ubuntu" in current_platform: + return "ubuntu" + elif "CentOS" in current_platform: + return "centos" + elif "debian" in current_platform: + # Stock Python does not detect Ubuntu and instead returns debian. + # Or at least it does in some build environments like Travis CI + return "ubuntu" + else: + raise RuntimeError("This module is not supported on {}." + .format(current_platform)) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 6f1aeace..7c2936e3 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -14,7 +14,7 @@ install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} whitelist_externals = juju -passenv = HOME TERM AMULET_* CS_API_URL +passenv = HOME TERM AMULET_* CS_API_* [testenv:py27] basepython = python2.7 From 2477bc97e4bc9fe0eddff924d6bac5f853e0fafa Mon Sep 17 00:00:00 2001 From: David Ames Date: Thu, 27 Apr 2017 11:06:47 -0700 Subject: [PATCH 1307/2699] Enable Zesty-Ocata Amulet Tests - Turn on Zesty-Ocata Amulet test definitions. - Standardize test-requirements.txt - Sync charm helpers for various fixes Change-Id: Iae05c0767b09f455a479dd781bdb7baf9a645c7b --- ceph-mon/charm-helpers-tests.yaml | 1 + .../charmhelpers/contrib/charmsupport/nrpe.py | 2 +- .../contrib/hardening/apache/checks/config.py | 10 +- .../{hardening.conf => 99-hardening.conf} | 20 ++- .../contrib/hardening/audits/__init__.py | 13 +- .../contrib/hardening/defaults/apache.yaml | 5 +- .../hardening/defaults/apache.yaml.schema | 3 + .../contrib/hardening/defaults/os.yaml | 1 + .../contrib/hardening/defaults/os.yaml.schema | 1 + .../contrib/hardening/host/checks/profile.py | 10 +- .../hardening/host/templates/99-hardening.sh | 5 + .../contrib/hardening/ssh/checks/config.py | 20 ++- .../hooks/charmhelpers/contrib/network/ip.py | 113 +++++++++------ .../contrib/openstack/amulet/utils.py | 5 +- .../charmhelpers/contrib/openstack/context.py | 132 ++++++++++++------ .../charmhelpers/contrib/openstack/neutron.py | 19 ++- .../charmhelpers/contrib/openstack/utils.py | 40 +++++- .../contrib/storage/linux/ceph.py | 18 +-- ceph-mon/hooks/charmhelpers/core/host.py | 6 +- .../charmhelpers/core/host_factory/centos.py | 16 +++ .../charmhelpers/core/host_factory/ubuntu.py | 32 +++++ ceph-mon/hooks/charmhelpers/core/strutils.py | 53 +++++++ ceph-mon/test-requirements.txt | 18 ++- .../contrib/openstack/amulet/utils.py | 5 +- ceph-mon/tests/charmhelpers/core/host.py | 6 +- .../charmhelpers/core/host_factory/centos.py | 16 +++ .../charmhelpers/core/host_factory/ubuntu.py | 32 +++++ ceph-mon/tests/charmhelpers/core/strutils.py | 53 +++++++ ceph-mon/tests/charmhelpers/osplatform.py | 25 ++++ ceph-mon/tests/gate-basic-zesty-ocata | 0 ceph-mon/tox.ini | 2 +- 31 files changed, 539 insertions(+), 143 deletions(-) rename ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/{hardening.conf => 99-hardening.conf} (56%) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh create mode 100644 ceph-mon/tests/charmhelpers/osplatform.py mode change 100644 => 100755 ceph-mon/tests/gate-basic-zesty-ocata diff --git a/ceph-mon/charm-helpers-tests.yaml b/ceph-mon/charm-helpers-tests.yaml index e5063253..b0de9df6 100644 --- a/ceph-mon/charm-helpers-tests.yaml +++ b/ceph-mon/charm-helpers-tests.yaml @@ -4,3 +4,4 @@ include: - contrib.amulet - contrib.openstack.amulet - core + - osplatform diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 9646b838..8240249e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -373,7 +373,7 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) croncmd = ( '/usr/local/lib/nagios/plugins/check_exit_status.pl ' - '-s /etc/init.d/%s status' % svc + '-e -s /etc/init.d/%s status' % svc ) cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) f = open(cronpath, 'w') diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 51b636f7..b18b263d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -26,6 +26,7 @@ DirectoryPermissionAudit, NoReadWriteForOther, TemplatedFile, + DeletedFile ) from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR @@ -52,13 +53,13 @@ def get_audits(): 'mods-available/alias.conf'), context, TEMPLATES_DIR, - mode=0o0755, + mode=0o0640, user='root', service_actions=[{'service': 'apache2', 'actions': ['restart']}]), TemplatedFile(os.path.join(settings['common']['apache_dir'], - 'conf-enabled/hardening.conf'), + 'conf-enabled/99-hardening.conf'), context, TEMPLATES_DIR, mode=0o0640, @@ -69,11 +70,13 @@ def get_audits(): DirectoryPermissionAudit(settings['common']['apache_dir'], user='root', group='root', - mode=0o640), + mode=0o0750), DisabledModuleAudit(settings['hardening']['modules_to_disable']), NoReadWriteForOther(settings['common']['apache_dir']), + + DeletedFile(['/var/www/html/index.html']) ] return audits @@ -94,5 +97,4 @@ def __call__(self): ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', out).group(1) ctxt['apache_icondir'] = '/usr/share/apache2/icons/' - ctxt['traceenable'] = settings['hardening']['traceenable'] return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf similarity index 56% rename from ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf rename to ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf index 07945418..22b68041 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf @@ -4,15 +4,29 @@ ############################################################################### - + # http://httpd.apache.org/docs/2.4/upgrading.html {% if apache_version > '2.2' -%} Require all granted {% else -%} - Order Allow,Deny - Deny from all + Order Allow,Deny + Deny from all {% endif %} + + Options -Indexes -FollowSymLinks + AllowOverride None + + + + Options -Indexes -FollowSymLinks + AllowOverride None + + TraceEnable {{ traceenable }} +ServerTokens {{ servertokens }} + +SSLHonorCipherOrder {{ honor_cipher_order }} +SSLCipherSuite {{ cipher_suite }} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py index 9bf9c3c6..6dd5b05f 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py @@ -49,13 +49,6 @@ def _take_action(self): # Invoke the callback if there is one. if hasattr(self.unless, '__call__'): - results = self.unless() - if results: - return False - else: - return True - - if self.unless: - return False - else: - return True + return not self.unless() + + return not self.unless diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml index e5ada29f..0f940d4c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml @@ -10,4 +10,7 @@ common: hardening: traceenable: 'off' allowed_http_methods: "GET POST" - modules_to_disable: [ cgi, cgid ] \ No newline at end of file + modules_to_disable: [ cgi, cgid ] + servertokens: 'Prod' + honor_cipher_order: 'on' + cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES' diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema index 227589b5..c112137c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema @@ -7,3 +7,6 @@ common: hardening: allowed_http_methods: modules_to_disable: + servertokens: + honor_cipher_order: + cipher_suite: diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml index ddd4286c..9a8627b5 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml @@ -58,6 +58,7 @@ security: rsync kernel_enable_module_loading: True # (type:boolean) kernel_enable_core_dump: False # (type:boolean) + ssh_tmout: 300 sysctl: kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128 diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema index 88b3966e..cc3b9c20 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema @@ -34,6 +34,7 @@ security: packages_list: kernel_enable_module_loading: kernel_enable_core_dump: + ssh_tmout: sysctl: kernel_secure_sysrq: kernel_enable_sysrq: diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py index 56d65263..2727428d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py @@ -25,7 +25,6 @@ def get_audits(): audits = [] settings = utils.get_settings('os') - # If core dumps are not enabled, then don't allow core dumps to be # created as they may contain sensitive information. if not settings['security']['kernel_enable_core_dump']: @@ -33,11 +32,18 @@ def get_audits(): ProfileContext(), template_dir=TEMPLATES_DIR, mode=0o0755, user='root', group='root')) + if settings['security']['ssh_tmout']: + audits.append(TemplatedFile('/etc/profile.d/99-hardening.sh', + ProfileContext(), + template_dir=TEMPLATES_DIR, + mode=0o0644, user='root', group='root')) return audits class ProfileContext(object): def __call__(self): - ctxt = {} + settings = utils.get_settings('os') + ctxt = {'ssh_tmout': + settings['security']['ssh_tmout']} return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh new file mode 100644 index 00000000..616cef46 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh @@ -0,0 +1,5 @@ +TMOUT={{ tmout }} +readonly TMOUT +export TMOUT + +readonly HISTFILE diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py index f3cac6d9..41bed2d1 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -27,7 +27,10 @@ apt_install, apt_update, ) -from charmhelpers.core.host import lsb_release +from charmhelpers.core.host import ( + lsb_release, + CompareHostReleases, +) from charmhelpers.contrib.hardening.audits.file import ( TemplatedFile, FileContentAudit, @@ -68,7 +71,8 @@ def get_macs(self, allow_weak_mac): 'weak': default + ',hmac-sha1'} # Use newer ciphers on Ubuntu Trusty and above - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG) macs = macs_66 @@ -96,7 +100,8 @@ def get_kexs(self, allow_weak_kex): 'weak': weak} # Use newer kex on Ubuntu Trusty and above - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': log('Detected Ubuntu 14.04 or newer, using new key exchange ' 'algorithms', level=DEBUG) kex = kex_66 @@ -119,7 +124,8 @@ def get_ciphers(self, cbc_required): 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'} # Use newer ciphers on ubuntu Trusty and above - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': log('Detected Ubuntu 14.04 or newer, using new ciphers', level=DEBUG) cipher = ciphers_66 @@ -291,7 +297,8 @@ def is_compliant(self, *args, **kwargs): self.fail_cases = [] settings = utils.get_settings('ssh') - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': if not settings['server']['weak_hmac']: self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') else: @@ -364,7 +371,8 @@ def is_compliant(self, *args, **kwargs): self.fail_cases = [] settings = utils.get_settings('ssh') - if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty': + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': if not settings['server']['weak_hmac']: self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') else: diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 54c76a72..fc3f5e3e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -31,6 +31,7 @@ from charmhelpers.core.host import ( lsb_release, + CompareHostReleases, ) try: @@ -67,6 +68,24 @@ def no_ip_found_error_out(network): raise ValueError(errmsg) +def _get_ipv6_network_from_address(address): + """Get an netaddr.IPNetwork for the given IPv6 address + :param address: a dict as returned by netifaces.ifaddresses + :returns netaddr.IPNetwork: None if the address is a link local or loopback + address + """ + if address['addr'].startswith('fe80') or address['addr'] == "::1": + return None + + prefix = address['netmask'].split("/") + if len(prefix) > 1: + netmask = prefix[1] + else: + netmask = address['netmask'] + return netaddr.IPNetwork("%s/%s" % (address['addr'], + netmask)) + + def get_address_in_network(network, fallback=None, fatal=False): """Get an IPv4 or IPv6 address within the network from the host. @@ -92,19 +111,17 @@ def get_address_in_network(network, fallback=None, fatal=False): for iface in netifaces.interfaces(): addresses = netifaces.ifaddresses(iface) if network.version == 4 and netifaces.AF_INET in addresses: - addr = addresses[netifaces.AF_INET][0]['addr'] - netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - if cidr in network: - return str(cidr.ip) + for addr in addresses[netifaces.AF_INET]: + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - if cidr in network: - return str(cidr.ip) + cidr = _get_ipv6_network_from_address(addr) + if cidr and cidr in network: + return str(cidr.ip) if fallback is not None: return fallback @@ -180,18 +197,18 @@ def _get_for_address(address, key): if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - network = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - cidr = network.cidr - if address in cidr: - if key == 'iface': - return iface - elif key == 'netmask' and cidr: - return str(cidr).split('/')[1] - else: - return addr[key] - + network = _get_ipv6_network_from_address(addr) + if not network: + continue + + cidr = network.cidr + if address in cidr: + if key == 'iface': + return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] + else: + return addr[key] return None @@ -222,6 +239,16 @@ def format_ipv6_addr(address): return None +def is_ipv6_disabled(): + try: + result = subprocess.check_output( + ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], + stderr=subprocess.STDOUT) + return "net.ipv6.conf.all.disable_ipv6 = 1" in result + except subprocess.CalledProcessError: + return True + + def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): """Return the assigned IP address for a given interface, if any. @@ -521,36 +548,44 @@ def port_has_listener(address, port): def assert_charm_supports_ipv6(): """Check whether we are able to support charms ipv6.""" - if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": + release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(release) < "trusty": raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") -def get_relation_ip(interface, config_override=None): - """Return this unit's IP for the given relation. +def get_relation_ip(interface, cidr_network=None): + """Return this unit's IP for the given interface. Allow for an arbitrary interface to use with network-get to select an IP. - Handle all address selection options including configuration parameter - override and IPv6. + Handle all address selection options including passed cidr network and + IPv6. - Usage: get_relation_ip('amqp', config_override='access-network') + Usage: get_relation_ip('amqp', cidr_network='10.0.0.0/8') @param interface: string name of the relation. - @param config_override: string name of the config option for network - override. Supports legacy network override configuration parameters. + @param cidr_network: string CIDR Network to select an address from. @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. @returns IPv6 or IPv4 address """ + # Select the interface address first + # For possible use as a fallback bellow with get_address_in_network + try: + # Get the interface specific IP + address = network_get_primary_address(interface) + except NotImplementedError: + # If network-get is not available + address = get_host_ip(unit_get('private-address')) - fallback = get_host_ip(unit_get('private-address')) if config('prefer-ipv6'): + # Currently IPv6 has priority, eventually we want IPv6 to just be + # another network space. assert_charm_supports_ipv6() return get_ipv6_addr()[0] - elif config_override and config(config_override): - return get_address_in_network(config(config_override), - fallback) - else: - try: - return network_get_primary_address(interface) - except NotImplementedError: - return fallback + elif cidr_network: + # If a specific CIDR network is passed get the address from that + # network. + return get_address_in_network(cidr_network, address) + + # Return the interface address + return address diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 1f4cf42e..bcef4cd0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -40,6 +40,7 @@ AmuletUtils ) from charmhelpers.core.decorators import retry_on_exception +from charmhelpers.core.host import CompareHostReleases DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -546,7 +547,7 @@ def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" self.log.debug('Creating instance ' '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.images.find(name=image_name) + image = nova.glance.find_image(image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, flavor=flavor) @@ -1255,7 +1256,7 @@ def test_110_memcache(self): contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', fatal=True) ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if ubuntu_release <= 'trusty': + if CompareHostReleases(ubuntu_release) <= 'trusty': memcache_listen_addr = 'ip6-localhost' else: memcache_listen_addr = '::1' diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 6cdbbbbf..2adf2cb8 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -59,6 +59,8 @@ write_file, pwgen, lsb_release, + CompareHostReleases, + is_container, ) from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, @@ -87,6 +89,7 @@ format_ipv6_addr, is_address_in_network, is_bridge_member, + is_ipv6_disabled, ) from charmhelpers.contrib.openstack.utils import ( config_flags_parser, @@ -108,6 +111,7 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' ADDRESS_TYPES = ['admin', 'internal', 'public'] +HAPROXY_RUN_DIR = '/var/run/haproxy/' def ensure_packages(packages): @@ -155,7 +159,8 @@ def context_complete(self, ctxt): if self.missing_data: self.complete = False - log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO) + log('Missing required data: %s' % ' '.join(self.missing_data), + level=INFO) else: self.complete = True return self.complete @@ -213,8 +218,9 @@ def __call__(self): hostname_key = "{}_hostname".format(self.relation_prefix) else: hostname_key = "hostname" - access_hostname = get_address_in_network(access_network, - unit_get('private-address')) + access_hostname = get_address_in_network( + access_network, + unit_get('private-address')) set_hostname = relation_get(attribute=hostname_key, unit=local_unit()) if set_hostname != access_hostname: @@ -308,7 +314,10 @@ def db_ssl(rdata, ctxt, ssl_dir): class IdentityServiceContext(OSContextGenerator): - def __init__(self, service=None, service_user=None, rel_name='identity-service'): + def __init__(self, + service=None, + service_user=None, + rel_name='identity-service'): self.service = service self.service_user = service_user self.rel_name = rel_name @@ -457,19 +466,17 @@ def __call__(self): host = format_ipv6_addr(host) or host rabbitmq_hosts.append(host) - ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + rabbitmq_hosts = sorted(rabbitmq_hosts) + ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) transport_hosts = rabbitmq_hosts if transport_hosts: - transport_url_hosts = '' - for host in transport_hosts: - if transport_url_hosts: - format_string = ",{}:{}@{}:{}" - else: - format_string = "{}:{}@{}:{}" - transport_url_hosts += format_string.format( - ctxt['rabbitmq_user'], ctxt['rabbitmq_password'], - host, rabbitmq_port) + transport_url_hosts = ','.join([ + "{}:{}@{}:{}".format(ctxt['rabbitmq_user'], + ctxt['rabbitmq_password'], + host_, + rabbitmq_port) + for host_ in transport_hosts]) ctxt['transport_url'] = "rabbit://{}/{}".format( transport_url_hosts, vhost) @@ -530,6 +537,8 @@ class HAProxyContext(OSContextGenerator): """Provides half a context for the haproxy template, which describes all peers to be included in the cluster. Each charm needs to include its own context generator that describes the port mapping. + + :side effect: mkdir is called on HAPROXY_RUN_DIR """ interfaces = ['cluster'] @@ -537,6 +546,8 @@ def __init__(self, singlenode_mode=False): self.singlenode_mode = singlenode_mode def __call__(self): + if not os.path.isdir(HAPROXY_RUN_DIR): + mkdir(path=HAPROXY_RUN_DIR) if not relation_ids('cluster') and not self.singlenode_mode: return {} @@ -1217,22 +1228,54 @@ def __call__(self): return {'bind_host': '0.0.0.0'} -class WorkerConfigContext(OSContextGenerator): +MAX_DEFAULT_WORKERS = 4 +DEFAULT_MULTIPLIER = 2 - @property - def num_cpus(self): - # NOTE: use cpu_count if present (16.04 support) - if hasattr(psutil, 'cpu_count'): - return psutil.cpu_count() - else: - return psutil.NUM_CPUS + +def _calculate_workers(): + ''' + Determine the number of worker processes based on the CPU + count of the unit containing the application. + + Workers will be limited to MAX_DEFAULT_WORKERS in + container environments where no worker-multipler configuration + option been set. + + @returns int: number of worker processes to use + ''' + multiplier = config('worker-multiplier') or DEFAULT_MULTIPLIER + count = int(_num_cpus() * multiplier) + if multiplier > 0 and count == 0: + count = 1 + + if config('worker-multiplier') is None and is_container(): + # NOTE(jamespage): Limit unconfigured worker-multiplier + # to MAX_DEFAULT_WORKERS to avoid insane + # worker configuration in LXD containers + # on large servers + # Reference: https://pad.lv/1665270 + count = min(count, MAX_DEFAULT_WORKERS) + + return count + + +def _num_cpus(): + ''' + Compatibility wrapper for calculating the number of CPU's + a unit has. + + @returns: int: number of CPU cores detected + ''' + try: + return psutil.cpu_count() + except AttributeError: + return psutil.NUM_CPUS + + +class WorkerConfigContext(OSContextGenerator): def __call__(self): - multiplier = config('worker-multiplier') or 0 - count = int(self.num_cpus * multiplier) - if multiplier > 0 and count == 0: - count = 1 - ctxt = {"workers": count} + ctxt = {"workers": _calculate_workers()} return ctxt @@ -1240,7 +1283,7 @@ class WSGIWorkerConfigContext(WorkerConfigContext): def __init__(self, name=None, script=None, admin_script=None, public_script=None, process_weight=1.00, - admin_process_weight=0.75, public_process_weight=0.25): + admin_process_weight=0.25, public_process_weight=0.75): self.service_name = name self.user = name self.group = name @@ -1252,8 +1295,7 @@ def __init__(self, name=None, script=None, admin_script=None, self.public_process_weight = public_process_weight def __call__(self): - multiplier = config('worker-multiplier') or 1 - total_processes = self.num_cpus * multiplier + total_processes = _calculate_workers() ctxt = { "service_name": self.service_name, "user": self.user, @@ -1584,7 +1626,7 @@ class MemcacheContext(OSContextGenerator): """Memcache context This context provides options for configuring a local memcache client and - server + server for both IPv4 and IPv6 """ def __init__(self, package=None): @@ -1601,13 +1643,25 @@ def __call__(self): if ctxt['use_memcache']: # Trusty version of memcached does not support ::1 as a listen # address so use host file entry instead - if lsb_release()['DISTRIB_CODENAME'].lower() > 'trusty': - ctxt['memcache_server'] = '::1' + release = lsb_release()['DISTRIB_CODENAME'].lower() + if is_ipv6_disabled(): + if CompareHostReleases(release) > 'trusty': + ctxt['memcache_server'] = '127.0.0.1' + else: + ctxt['memcache_server'] = 'localhost' + ctxt['memcache_server_formatted'] = '127.0.0.1' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = '{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) else: - ctxt['memcache_server'] = 'ip6-localhost' - ctxt['memcache_server_formatted'] = '[::1]' - ctxt['memcache_port'] = '11211' - ctxt['memcache_url'] = 'inet6:{}:{}'.format( - ctxt['memcache_server_formatted'], - ctxt['memcache_port']) + if CompareHostReleases(release) > 'trusty': + ctxt['memcache_server'] = '::1' + else: + ctxt['memcache_server'] = 'ip6-localhost' + ctxt['memcache_server_formatted'] = '[::1]' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = 'inet6:{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py index a8f1ed72..37fa0eb0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py @@ -23,7 +23,10 @@ ERROR, ) -from charmhelpers.contrib.openstack.utils import os_release +from charmhelpers.contrib.openstack.utils import ( + os_release, + CompareOpenStackReleases, +) def headers_package(): @@ -198,7 +201,8 @@ def neutron_plugins(): }, 'plumgrid': { 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', - 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', + 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin' + '.plumgrid_plugin.NeutronPluginPLUMgridV2'), 'contexts': [ context.SharedDBContext(user=config('database-user'), database=config('database'), @@ -225,7 +229,7 @@ def neutron_plugins(): 'server_services': ['neutron-server'] } } - if release >= 'icehouse': + if CompareOpenStackReleases(release) >= 'icehouse': # NOTE: patch in ml2 plugin for icehouse onwards plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' @@ -233,10 +237,10 @@ def neutron_plugins(): 'neutron-plugin-ml2'] # NOTE: patch in vmware renames nvp->nsx for icehouse onwards plugins['nvp'] = plugins['nsx'] - if release >= 'kilo': + if CompareOpenStackReleases(release) >= 'kilo': plugins['midonet']['driver'] = ( 'neutron.plugins.midonet.plugin.MidonetPluginV2') - if release >= 'liberty': + if CompareOpenStackReleases(release) >= 'liberty': plugins['midonet']['driver'] = ( 'midonet.neutron.plugin_v1.MidonetPluginV2') plugins['midonet']['server_packages'].remove( @@ -244,10 +248,11 @@ def neutron_plugins(): plugins['midonet']['server_packages'].append( 'python-networking-midonet') plugins['plumgrid']['driver'] = ( - 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2') + 'networking_plumgrid.neutron.plugins' + '.plugin.NeutronPluginPLUMgridV2') plugins['plumgrid']['server_packages'].remove( 'neutron-plugin-plumgrid') - if release >= 'mitaka': + if CompareOpenStackReleases(release) >= 'mitaka': plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') plugins['nsx']['server_packages'].append('python-vmware-nsx') plugins['nsx']['config'] = '/etc/neutron/nsx.ini' diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 7e8ecff4..e13450c1 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -33,9 +33,7 @@ from charmhelpers.contrib.network import ip -from charmhelpers.core import ( - unitdata, -) +from charmhelpers.core import unitdata from charmhelpers.core.hookenv import ( action_fail, @@ -55,6 +53,8 @@ application_version_set, ) +from charmhelpers.core.strutils import BasicStringComparator + from charmhelpers.contrib.storage.linux.lvm import ( deactivate_lvm_volume_group, is_lvm_physical_volume, @@ -97,6 +97,22 @@ DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 'restricted main multiverse universe') +OPENSTACK_RELEASES = ( + 'diablo', + 'essex', + 'folsom', + 'grizzly', + 'havana', + 'icehouse', + 'juno', + 'kilo', + 'liberty', + 'mitaka', + 'newton', + 'ocata', + 'pike', +) + UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('oneiric', 'diablo'), ('precise', 'essex'), @@ -238,6 +254,17 @@ DEFAULT_LOOPBACK_SIZE = '5G' +class CompareOpenStackReleases(BasicStringComparator): + """Provide comparisons of OpenStack releases. + + Use in the form of + + if CompareOpenStackReleases(release) > 'mitaka': + # do something with mitaka + """ + _list = OPENSTACK_RELEASES + + def error_out(msg): juju_log("FATAL ERROR: %s" % msg, level='ERROR') sys.exit(1) @@ -1066,7 +1093,8 @@ def git_generate_systemd_init_files(templates_dir): shutil.copyfile(init_in_source, init_source) with open(init_source, 'a') as outfile: - template = '/usr/share/openstack-pkg-tools/init-script-template' + template = ('/usr/share/openstack-pkg-tools/' + 'init-script-template') with open(template) as infile: outfile.write('\n\n{}'.format(infile.read())) @@ -1971,9 +1999,7 @@ def enable_memcache(source=None, release=None, package=None): if not _release: _release = get_os_codename_install_source(source) - # TODO: this should be changed to a numeric comparison using a known list - # of releases and comparing by index. - return _release >= 'mitaka' + return CompareOpenStackReleases(_release) >= 'mitaka' def token_cache_pkgs(source=None, release=None): diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index ae7f3f93..9417d684 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -987,18 +987,20 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, service_start(svc) -def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): +def ensure_ceph_keyring(service, user=None, group=None, + relation='ceph', key=None): """Ensures a ceph keyring is created for a named service and optionally ensures user and group ownership. - Returns False if no ceph key is available in relation state. + @returns boolean: Flag to indicate whether a key was successfully written + to disk based on either relation data or a supplied key """ - key = None - for rid in relation_ids(relation): - for unit in related_units(rid): - key = relation_get('key', rid=rid, unit=unit) - if key: - break + if not key: + for rid in relation_ids(relation): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break if not key: return False diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 05edfa50..88e80a49 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -45,6 +45,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( @@ -52,6 +53,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import UPDATEDB_PATH = '/etc/updatedb.conf' @@ -189,7 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('disable', service_name) + service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -222,7 +224,7 @@ def service_resume(service_name, init_dir="/etc/init", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('enable', service_name) + service('unmask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/centos.py b/ceph-mon/hooks/charmhelpers/core/host_factory/centos.py index 902d469f..7781a396 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/centos.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/centos.py @@ -2,6 +2,22 @@ import yum import os +from charmhelpers.core.strutils import BasicStringComparator + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Host releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + + def __init__(self, item): + raise NotImplementedError( + "CompareHostReleases() is not implemented for CentOS") + def service_available(service_name): # """Determine whether a system service is available.""" diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py index 8c66af55..0448288c 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,37 @@ import subprocess +from charmhelpers.core.strutils import BasicStringComparator + + +UBUNTU_RELEASES = ( + 'lucid', + 'maverick', + 'natty', + 'oneiric', + 'precise', + 'quantal', + 'raring', + 'saucy', + 'trusty', + 'utopic', + 'vivid', + 'wily', + 'xenial', + 'yakkety', + 'zesty', +) + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Ubuntu releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + _list = UBUNTU_RELEASES + def service_available(service_name): """Determine whether a system service is available""" diff --git a/ceph-mon/hooks/charmhelpers/core/strutils.py b/ceph-mon/hooks/charmhelpers/core/strutils.py index dd9b9717..685dabde 100644 --- a/ceph-mon/hooks/charmhelpers/core/strutils.py +++ b/ceph-mon/hooks/charmhelpers/core/strutils.py @@ -68,3 +68,56 @@ def bytes_from_string(value): msg = "Unable to interpret string value '%s' as bytes" % (value) raise ValueError(msg) return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + + +class BasicStringComparator(object): + """Provides a class that will compare strings from an iterator type object. + Used to provide > and < comparisons on strings that may not necessarily be + alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the + z-wrap. + """ + + _list = None + + def __init__(self, item): + if self._list is None: + raise Exception("Must define the _list in the class definition!") + try: + self.index = self._list.index(item) + except Exception: + raise KeyError("Item '{}' is not in list '{}'" + .format(item, self._list)) + + def __eq__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index == self._list.index(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index < self._list.index(other) + + def __ge__(self, other): + return not self.__lt__(other) + + def __gt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index > self._list.index(other) + + def __le__(self, other): + return not self.__gt__(other) + + def __str__(self): + """Always give back the item at the index so it can be used in + comparisons like: + + s_mitaka = CompareOpenStack('mitaka') + s_newton = CompareOpenstack('newton') + + assert s_newton > s_mitaka + + @returns: + """ + return self._list[self.index] diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 06972943..9edd4bbf 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -11,13 +11,17 @@ requests==2.6.0 # Liberty client lower constraints amulet>=1.14.3,<2.0 bundletester>=0.6.1,<1.0 -python-ceilometerclient>=1.5.0,<2.0 -python-cinderclient>=1.4.0,<2.0 -python-glanceclient>=1.1.0,<2.0 -python-heatclient>=0.8.0,<1.0 -python-novaclient>=2.30.1,<3.0 -python-openstackclient>=1.7.0,<2.0 -python-swiftclient>=2.6.0,<3.0 +python-ceilometerclient>=1.5.0 +python-cinderclient>=1.4.0 +python-glanceclient>=1.1.0 +python-heatclient>=0.8.0 +python-keystoneclient>=1.7.1 +python-neutronclient>=3.1.0 +python-novaclient>=2.30.1 +python-openstackclient>=1.7.0 +python-swiftclient>=2.6.0 pika>=0.10.0,<1.0 distro-info # END: Amulet OpenStack Charm Helper Requirements +# NOTE: workaround for 14.04 pip/tox +pytz diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 1f4cf42e..bcef4cd0 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -40,6 +40,7 @@ AmuletUtils ) from charmhelpers.core.decorators import retry_on_exception +from charmhelpers.core.host import CompareHostReleases DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -546,7 +547,7 @@ def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" self.log.debug('Creating instance ' '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.images.find(name=image_name) + image = nova.glance.find_image(image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, flavor=flavor) @@ -1255,7 +1256,7 @@ def test_110_memcache(self): contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', fatal=True) ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if ubuntu_release <= 'trusty': + if CompareHostReleases(ubuntu_release) <= 'trusty': memcache_listen_addr = 'ip6-localhost' else: memcache_listen_addr = '::1' diff --git a/ceph-mon/tests/charmhelpers/core/host.py b/ceph-mon/tests/charmhelpers/core/host.py index 05edfa50..88e80a49 100644 --- a/ceph-mon/tests/charmhelpers/core/host.py +++ b/ceph-mon/tests/charmhelpers/core/host.py @@ -45,6 +45,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( @@ -52,6 +53,7 @@ add_new_group, lsb_release, cmp_pkgrevno, + CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import UPDATEDB_PATH = '/etc/updatedb.conf' @@ -189,7 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('disable', service_name) + service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -222,7 +224,7 @@ def service_resume(service_name, init_dir="/etc/init", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('enable', service_name) + service('unmask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) diff --git a/ceph-mon/tests/charmhelpers/core/host_factory/centos.py b/ceph-mon/tests/charmhelpers/core/host_factory/centos.py index 902d469f..7781a396 100644 --- a/ceph-mon/tests/charmhelpers/core/host_factory/centos.py +++ b/ceph-mon/tests/charmhelpers/core/host_factory/centos.py @@ -2,6 +2,22 @@ import yum import os +from charmhelpers.core.strutils import BasicStringComparator + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Host releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + + def __init__(self, item): + raise NotImplementedError( + "CompareHostReleases() is not implemented for CentOS") + def service_available(service_name): # """Determine whether a system service is available.""" diff --git a/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py index 8c66af55..0448288c 100644 --- a/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,37 @@ import subprocess +from charmhelpers.core.strutils import BasicStringComparator + + +UBUNTU_RELEASES = ( + 'lucid', + 'maverick', + 'natty', + 'oneiric', + 'precise', + 'quantal', + 'raring', + 'saucy', + 'trusty', + 'utopic', + 'vivid', + 'wily', + 'xenial', + 'yakkety', + 'zesty', +) + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Ubuntu releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + _list = UBUNTU_RELEASES + def service_available(service_name): """Determine whether a system service is available""" diff --git a/ceph-mon/tests/charmhelpers/core/strutils.py b/ceph-mon/tests/charmhelpers/core/strutils.py index dd9b9717..685dabde 100644 --- a/ceph-mon/tests/charmhelpers/core/strutils.py +++ b/ceph-mon/tests/charmhelpers/core/strutils.py @@ -68,3 +68,56 @@ def bytes_from_string(value): msg = "Unable to interpret string value '%s' as bytes" % (value) raise ValueError(msg) return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + + +class BasicStringComparator(object): + """Provides a class that will compare strings from an iterator type object. + Used to provide > and < comparisons on strings that may not necessarily be + alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the + z-wrap. + """ + + _list = None + + def __init__(self, item): + if self._list is None: + raise Exception("Must define the _list in the class definition!") + try: + self.index = self._list.index(item) + except Exception: + raise KeyError("Item '{}' is not in list '{}'" + .format(item, self._list)) + + def __eq__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index == self._list.index(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index < self._list.index(other) + + def __ge__(self, other): + return not self.__lt__(other) + + def __gt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index > self._list.index(other) + + def __le__(self, other): + return not self.__gt__(other) + + def __str__(self): + """Always give back the item at the index so it can be used in + comparisons like: + + s_mitaka = CompareOpenStack('mitaka') + s_newton = CompareOpenstack('newton') + + assert s_newton > s_mitaka + + @returns: + """ + return self._list[self.index] diff --git a/ceph-mon/tests/charmhelpers/osplatform.py b/ceph-mon/tests/charmhelpers/osplatform.py new file mode 100644 index 00000000..d9a4d5c0 --- /dev/null +++ b/ceph-mon/tests/charmhelpers/osplatform.py @@ -0,0 +1,25 @@ +import platform + + +def get_platform(): + """Return the current OS platform. + + For example: if current os platform is Ubuntu then a string "ubuntu" + will be returned (which is the name of the module). + This string is used to decide which platform module should be imported. + """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warings *not* disabled, as we certainly need to fix this. + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + if "Ubuntu" in current_platform: + return "ubuntu" + elif "CentOS" in current_platform: + return "centos" + elif "debian" in current_platform: + # Stock Python does not detect Ubuntu and instead returns debian. + # Or at least it does in some build environments like Travis CI + return "ubuntu" + else: + raise RuntimeError("This module is not supported on {}." + .format(current_platform)) diff --git a/ceph-mon/tests/gate-basic-zesty-ocata b/ceph-mon/tests/gate-basic-zesty-ocata old mode 100644 new mode 100755 diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 6f1aeace..7c2936e3 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -14,7 +14,7 @@ install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} whitelist_externals = juju -passenv = HOME TERM AMULET_* CS_API_URL +passenv = HOME TERM AMULET_* CS_API_* [testenv:py27] basepython = python2.7 From cb84c34bb9d56933b112de973be934087a37334a Mon Sep 17 00:00:00 2001 From: David Ames Date: Thu, 27 Apr 2017 11:07:50 -0700 Subject: [PATCH 1308/2699] Enable Zesty-Ocata Amulet Tests - Turn on Zesty-Ocata Amulet test definitions. - Standardize test-requirements.txt - Sync charm helpers for various fixes Change-Id: I20bd84a97d741c4303a60ef40568e59f6fc54f1b --- .../charmhelpers/contrib/charmsupport/nrpe.py | 2 +- .../hooks/charmhelpers/contrib/network/ip.py | 59 +++++++---- .../charmhelpers/contrib/openstack/context.py | 99 ++++++++++++++----- .../contrib/storage/linux/ceph.py | 18 ++-- ceph-osd/hooks/charmhelpers/core/host.py | 4 +- ceph-osd/test-requirements.txt | 20 ++-- .../contrib/openstack/amulet/utils.py | 2 +- ceph-osd/tests/charmhelpers/core/host.py | 4 +- ceph-osd/tests/gate-basic-zesty-ocata | 0 9 files changed, 139 insertions(+), 69 deletions(-) mode change 100644 => 100755 ceph-osd/tests/gate-basic-zesty-ocata diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 9646b838..8240249e 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -373,7 +373,7 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) croncmd = ( '/usr/local/lib/nagios/plugins/check_exit_status.pl ' - '-s /etc/init.d/%s status' % svc + '-e -s /etc/init.d/%s status' % svc ) cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) f = open(cronpath, 'w') diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 7451af9c..fc3f5e3e 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -111,11 +111,11 @@ def get_address_in_network(network, fallback=None, fatal=False): for iface in netifaces.interfaces(): addresses = netifaces.ifaddresses(iface) if network.version == 4 and netifaces.AF_INET in addresses: - addr = addresses[netifaces.AF_INET][0]['addr'] - netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - if cidr in network: - return str(cidr.ip) + for addr in addresses[netifaces.AF_INET]: + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: @@ -239,6 +239,16 @@ def format_ipv6_addr(address): return None +def is_ipv6_disabled(): + try: + result = subprocess.check_output( + ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], + stderr=subprocess.STDOUT) + return "net.ipv6.conf.all.disable_ipv6 = 1" in result + except subprocess.CalledProcessError: + return True + + def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): """Return the assigned IP address for a given interface, if any. @@ -544,31 +554,38 @@ def assert_charm_supports_ipv6(): "versions less than Trusty 14.04") -def get_relation_ip(interface, config_override=None): - """Return this unit's IP for the given relation. +def get_relation_ip(interface, cidr_network=None): + """Return this unit's IP for the given interface. Allow for an arbitrary interface to use with network-get to select an IP. - Handle all address selection options including configuration parameter - override and IPv6. + Handle all address selection options including passed cidr network and + IPv6. - Usage: get_relation_ip('amqp', config_override='access-network') + Usage: get_relation_ip('amqp', cidr_network='10.0.0.0/8') @param interface: string name of the relation. - @param config_override: string name of the config option for network - override. Supports legacy network override configuration parameters. + @param cidr_network: string CIDR Network to select an address from. @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. @returns IPv6 or IPv4 address """ + # Select the interface address first + # For possible use as a fallback bellow with get_address_in_network + try: + # Get the interface specific IP + address = network_get_primary_address(interface) + except NotImplementedError: + # If network-get is not available + address = get_host_ip(unit_get('private-address')) - fallback = get_host_ip(unit_get('private-address')) if config('prefer-ipv6'): + # Currently IPv6 has priority, eventually we want IPv6 to just be + # another network space. assert_charm_supports_ipv6() return get_ipv6_addr()[0] - elif config_override and config(config_override): - return get_address_in_network(config(config_override), - fallback) - else: - try: - return network_get_primary_address(interface) - except NotImplementedError: - return fallback + elif cidr_network: + # If a specific CIDR network is passed get the address from that + # network. + return get_address_in_network(cidr_network, address) + + # Return the interface address + return address diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 7876145d..2adf2cb8 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -60,6 +60,7 @@ pwgen, lsb_release, CompareHostReleases, + is_container, ) from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, @@ -88,6 +89,7 @@ format_ipv6_addr, is_address_in_network, is_bridge_member, + is_ipv6_disabled, ) from charmhelpers.contrib.openstack.utils import ( config_flags_parser, @@ -109,6 +111,7 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' ADDRESS_TYPES = ['admin', 'internal', 'public'] +HAPROXY_RUN_DIR = '/var/run/haproxy/' def ensure_packages(packages): @@ -534,6 +537,8 @@ class HAProxyContext(OSContextGenerator): """Provides half a context for the haproxy template, which describes all peers to be included in the cluster. Each charm needs to include its own context generator that describes the port mapping. + + :side effect: mkdir is called on HAPROXY_RUN_DIR """ interfaces = ['cluster'] @@ -541,6 +546,8 @@ def __init__(self, singlenode_mode=False): self.singlenode_mode = singlenode_mode def __call__(self): + if not os.path.isdir(HAPROXY_RUN_DIR): + mkdir(path=HAPROXY_RUN_DIR) if not relation_ids('cluster') and not self.singlenode_mode: return {} @@ -1221,22 +1228,54 @@ def __call__(self): return {'bind_host': '0.0.0.0'} -class WorkerConfigContext(OSContextGenerator): +MAX_DEFAULT_WORKERS = 4 +DEFAULT_MULTIPLIER = 2 - @property - def num_cpus(self): - # NOTE: use cpu_count if present (16.04 support) - if hasattr(psutil, 'cpu_count'): - return psutil.cpu_count() - else: - return psutil.NUM_CPUS + +def _calculate_workers(): + ''' + Determine the number of worker processes based on the CPU + count of the unit containing the application. + + Workers will be limited to MAX_DEFAULT_WORKERS in + container environments where no worker-multipler configuration + option been set. + + @returns int: number of worker processes to use + ''' + multiplier = config('worker-multiplier') or DEFAULT_MULTIPLIER + count = int(_num_cpus() * multiplier) + if multiplier > 0 and count == 0: + count = 1 + + if config('worker-multiplier') is None and is_container(): + # NOTE(jamespage): Limit unconfigured worker-multiplier + # to MAX_DEFAULT_WORKERS to avoid insane + # worker configuration in LXD containers + # on large servers + # Reference: https://pad.lv/1665270 + count = min(count, MAX_DEFAULT_WORKERS) + + return count + + +def _num_cpus(): + ''' + Compatibility wrapper for calculating the number of CPU's + a unit has. + + @returns: int: number of CPU cores detected + ''' + try: + return psutil.cpu_count() + except AttributeError: + return psutil.NUM_CPUS + + +class WorkerConfigContext(OSContextGenerator): def __call__(self): - multiplier = config('worker-multiplier') or 0 - count = int(self.num_cpus * multiplier) - if multiplier > 0 and count == 0: - count = 1 - ctxt = {"workers": count} + ctxt = {"workers": _calculate_workers()} return ctxt @@ -1244,7 +1283,7 @@ class WSGIWorkerConfigContext(WorkerConfigContext): def __init__(self, name=None, script=None, admin_script=None, public_script=None, process_weight=1.00, - admin_process_weight=0.75, public_process_weight=0.25): + admin_process_weight=0.25, public_process_weight=0.75): self.service_name = name self.user = name self.group = name @@ -1256,8 +1295,7 @@ def __init__(self, name=None, script=None, admin_script=None, self.public_process_weight = public_process_weight def __call__(self): - multiplier = config('worker-multiplier') or 1 - total_processes = self.num_cpus * multiplier + total_processes = _calculate_workers() ctxt = { "service_name": self.service_name, "user": self.user, @@ -1588,7 +1626,7 @@ class MemcacheContext(OSContextGenerator): """Memcache context This context provides options for configuring a local memcache client and - server + server for both IPv4 and IPv6 """ def __init__(self, package=None): @@ -1606,13 +1644,24 @@ def __call__(self): # Trusty version of memcached does not support ::1 as a listen # address so use host file entry instead release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(release) > 'trusty': - ctxt['memcache_server'] = '::1' + if is_ipv6_disabled(): + if CompareHostReleases(release) > 'trusty': + ctxt['memcache_server'] = '127.0.0.1' + else: + ctxt['memcache_server'] = 'localhost' + ctxt['memcache_server_formatted'] = '127.0.0.1' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = '{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) else: - ctxt['memcache_server'] = 'ip6-localhost' - ctxt['memcache_server_formatted'] = '[::1]' - ctxt['memcache_port'] = '11211' - ctxt['memcache_url'] = 'inet6:{}:{}'.format( - ctxt['memcache_server_formatted'], - ctxt['memcache_port']) + if CompareHostReleases(release) > 'trusty': + ctxt['memcache_server'] = '::1' + else: + ctxt['memcache_server'] = 'ip6-localhost' + ctxt['memcache_server_formatted'] = '[::1]' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = 'inet6:{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index ae7f3f93..9417d684 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -987,18 +987,20 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, service_start(svc) -def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): +def ensure_ceph_keyring(service, user=None, group=None, + relation='ceph', key=None): """Ensures a ceph keyring is created for a named service and optionally ensures user and group ownership. - Returns False if no ceph key is available in relation state. + @returns boolean: Flag to indicate whether a key was successfully written + to disk based on either relation data or a supplied key """ - key = None - for rid in relation_ids(relation): - for unit in related_units(rid): - key = relation_get('key', rid=rid, unit=unit) - if key: - break + if not key: + for rid in relation_ids(relation): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break if not key: return False diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 0ee5cb9f..88e80a49 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -191,7 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('disable', service_name) + service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -224,7 +224,7 @@ def service_resume(service_name, init_dir="/etc/init", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('enable', service_name) + service('unmask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 74baa120..9edd4bbf 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -11,15 +11,17 @@ requests==2.6.0 # Liberty client lower constraints amulet>=1.14.3,<2.0 bundletester>=0.6.1,<1.0 -python-ceilometerclient>=1.5.0,<2.0 -python-cinderclient>=1.4.0,<2.0 -python-glanceclient>=1.1.0,<2.0 -python-heatclient>=0.8.0,<1.0 -python-keystoneclient>=1.7.1,<2.0 -python-neutronclient>=3.1.0,<4.0 -python-novaclient>=2.30.1,<3.0 -python-openstackclient>=1.7.0,<2.0 -python-swiftclient>=2.6.0,<3.0 +python-ceilometerclient>=1.5.0 +python-cinderclient>=1.4.0 +python-glanceclient>=1.1.0 +python-heatclient>=0.8.0 +python-keystoneclient>=1.7.1 +python-neutronclient>=3.1.0 +python-novaclient>=2.30.1 +python-openstackclient>=1.7.0 +python-swiftclient>=2.6.0 pika>=0.10.0,<1.0 distro-info # END: Amulet OpenStack Charm Helper Requirements +# NOTE: workaround for 14.04 pip/tox +pytz diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index 346e6fea..bcef4cd0 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -547,7 +547,7 @@ def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" self.log.debug('Creating instance ' '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.images.find(name=image_name) + image = nova.glance.find_image(image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, flavor=flavor) diff --git a/ceph-osd/tests/charmhelpers/core/host.py b/ceph-osd/tests/charmhelpers/core/host.py index 0ee5cb9f..88e80a49 100644 --- a/ceph-osd/tests/charmhelpers/core/host.py +++ b/ceph-osd/tests/charmhelpers/core/host.py @@ -191,7 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('disable', service_name) + service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -224,7 +224,7 @@ def service_resume(service_name, init_dir="/etc/init", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('enable', service_name) + service('unmask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) diff --git a/ceph-osd/tests/gate-basic-zesty-ocata b/ceph-osd/tests/gate-basic-zesty-ocata old mode 100644 new mode 100755 From 2534089992d7bc762f537dc56077e99ce1275047 Mon Sep 17 00:00:00 2001 From: David Ames Date: Thu, 27 Apr 2017 11:09:28 -0700 Subject: [PATCH 1309/2699] Enable Zesty-Ocata Amulet Tests - Turn on Zesty-Ocata Amulet test definitions. - Standardize test-requirements.txt - Sync charm helpers for various fixes Change-Id: Ia9a42cd4a76a6fa8a70e7092b9d61657816d2d65 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 2 +- .../hooks/charmhelpers/contrib/network/ip.py | 59 +++++++---- .../contrib/openstack/amulet/utils.py | 2 +- .../charmhelpers/contrib/openstack/context.py | 99 ++++++++++++++----- .../contrib/openstack/templates/haproxy.cfg | 11 +++ ceph-radosgw/hooks/charmhelpers/core/host.py | 4 +- ceph-radosgw/test-requirements.txt | 20 ++-- ceph-radosgw/tests/basic_deployment.py | 38 +------ .../contrib/openstack/amulet/utils.py | 2 +- ceph-radosgw/tests/charmhelpers/core/host.py | 4 +- ceph-radosgw/tests/gate-basic-zesty-ocata | 0 .../unit_tests/test_ceph_radosgw_context.py | 3 +- 12 files changed, 147 insertions(+), 97 deletions(-) mode change 100644 => 100755 ceph-radosgw/tests/gate-basic-zesty-ocata diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 9646b838..8240249e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -373,7 +373,7 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) croncmd = ( '/usr/local/lib/nagios/plugins/check_exit_status.pl ' - '-s /etc/init.d/%s status' % svc + '-e -s /etc/init.d/%s status' % svc ) cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) f = open(cronpath, 'w') diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index 7451af9c..fc3f5e3e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -111,11 +111,11 @@ def get_address_in_network(network, fallback=None, fatal=False): for iface in netifaces.interfaces(): addresses = netifaces.ifaddresses(iface) if network.version == 4 and netifaces.AF_INET in addresses: - addr = addresses[netifaces.AF_INET][0]['addr'] - netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - if cidr in network: - return str(cidr.ip) + for addr in addresses[netifaces.AF_INET]: + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: @@ -239,6 +239,16 @@ def format_ipv6_addr(address): return None +def is_ipv6_disabled(): + try: + result = subprocess.check_output( + ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], + stderr=subprocess.STDOUT) + return "net.ipv6.conf.all.disable_ipv6 = 1" in result + except subprocess.CalledProcessError: + return True + + def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): """Return the assigned IP address for a given interface, if any. @@ -544,31 +554,38 @@ def assert_charm_supports_ipv6(): "versions less than Trusty 14.04") -def get_relation_ip(interface, config_override=None): - """Return this unit's IP for the given relation. +def get_relation_ip(interface, cidr_network=None): + """Return this unit's IP for the given interface. Allow for an arbitrary interface to use with network-get to select an IP. - Handle all address selection options including configuration parameter - override and IPv6. + Handle all address selection options including passed cidr network and + IPv6. - Usage: get_relation_ip('amqp', config_override='access-network') + Usage: get_relation_ip('amqp', cidr_network='10.0.0.0/8') @param interface: string name of the relation. - @param config_override: string name of the config option for network - override. Supports legacy network override configuration parameters. + @param cidr_network: string CIDR Network to select an address from. @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. @returns IPv6 or IPv4 address """ + # Select the interface address first + # For possible use as a fallback bellow with get_address_in_network + try: + # Get the interface specific IP + address = network_get_primary_address(interface) + except NotImplementedError: + # If network-get is not available + address = get_host_ip(unit_get('private-address')) - fallback = get_host_ip(unit_get('private-address')) if config('prefer-ipv6'): + # Currently IPv6 has priority, eventually we want IPv6 to just be + # another network space. assert_charm_supports_ipv6() return get_ipv6_addr()[0] - elif config_override and config(config_override): - return get_address_in_network(config(config_override), - fallback) - else: - try: - return network_get_primary_address(interface) - except NotImplementedError: - return fallback + elif cidr_network: + # If a specific CIDR network is passed get the address from that + # network. + return get_address_in_network(cidr_network, address) + + # Return the interface address + return address diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 346e6fea..bcef4cd0 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -547,7 +547,7 @@ def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" self.log.debug('Creating instance ' '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.images.find(name=image_name) + image = nova.glance.find_image(image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, flavor=flavor) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 7876145d..2adf2cb8 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -60,6 +60,7 @@ pwgen, lsb_release, CompareHostReleases, + is_container, ) from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, @@ -88,6 +89,7 @@ format_ipv6_addr, is_address_in_network, is_bridge_member, + is_ipv6_disabled, ) from charmhelpers.contrib.openstack.utils import ( config_flags_parser, @@ -109,6 +111,7 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' ADDRESS_TYPES = ['admin', 'internal', 'public'] +HAPROXY_RUN_DIR = '/var/run/haproxy/' def ensure_packages(packages): @@ -534,6 +537,8 @@ class HAProxyContext(OSContextGenerator): """Provides half a context for the haproxy template, which describes all peers to be included in the cluster. Each charm needs to include its own context generator that describes the port mapping. + + :side effect: mkdir is called on HAPROXY_RUN_DIR """ interfaces = ['cluster'] @@ -541,6 +546,8 @@ def __init__(self, singlenode_mode=False): self.singlenode_mode = singlenode_mode def __call__(self): + if not os.path.isdir(HAPROXY_RUN_DIR): + mkdir(path=HAPROXY_RUN_DIR) if not relation_ids('cluster') and not self.singlenode_mode: return {} @@ -1221,22 +1228,54 @@ def __call__(self): return {'bind_host': '0.0.0.0'} -class WorkerConfigContext(OSContextGenerator): +MAX_DEFAULT_WORKERS = 4 +DEFAULT_MULTIPLIER = 2 - @property - def num_cpus(self): - # NOTE: use cpu_count if present (16.04 support) - if hasattr(psutil, 'cpu_count'): - return psutil.cpu_count() - else: - return psutil.NUM_CPUS + +def _calculate_workers(): + ''' + Determine the number of worker processes based on the CPU + count of the unit containing the application. + + Workers will be limited to MAX_DEFAULT_WORKERS in + container environments where no worker-multipler configuration + option been set. + + @returns int: number of worker processes to use + ''' + multiplier = config('worker-multiplier') or DEFAULT_MULTIPLIER + count = int(_num_cpus() * multiplier) + if multiplier > 0 and count == 0: + count = 1 + + if config('worker-multiplier') is None and is_container(): + # NOTE(jamespage): Limit unconfigured worker-multiplier + # to MAX_DEFAULT_WORKERS to avoid insane + # worker configuration in LXD containers + # on large servers + # Reference: https://pad.lv/1665270 + count = min(count, MAX_DEFAULT_WORKERS) + + return count + + +def _num_cpus(): + ''' + Compatibility wrapper for calculating the number of CPU's + a unit has. + + @returns: int: number of CPU cores detected + ''' + try: + return psutil.cpu_count() + except AttributeError: + return psutil.NUM_CPUS + + +class WorkerConfigContext(OSContextGenerator): def __call__(self): - multiplier = config('worker-multiplier') or 0 - count = int(self.num_cpus * multiplier) - if multiplier > 0 and count == 0: - count = 1 - ctxt = {"workers": count} + ctxt = {"workers": _calculate_workers()} return ctxt @@ -1244,7 +1283,7 @@ class WSGIWorkerConfigContext(WorkerConfigContext): def __init__(self, name=None, script=None, admin_script=None, public_script=None, process_weight=1.00, - admin_process_weight=0.75, public_process_weight=0.25): + admin_process_weight=0.25, public_process_weight=0.75): self.service_name = name self.user = name self.group = name @@ -1256,8 +1295,7 @@ def __init__(self, name=None, script=None, admin_script=None, self.public_process_weight = public_process_weight def __call__(self): - multiplier = config('worker-multiplier') or 1 - total_processes = self.num_cpus * multiplier + total_processes = _calculate_workers() ctxt = { "service_name": self.service_name, "user": self.user, @@ -1588,7 +1626,7 @@ class MemcacheContext(OSContextGenerator): """Memcache context This context provides options for configuring a local memcache client and - server + server for both IPv4 and IPv6 """ def __init__(self, package=None): @@ -1606,13 +1644,24 @@ def __call__(self): # Trusty version of memcached does not support ::1 as a listen # address so use host file entry instead release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(release) > 'trusty': - ctxt['memcache_server'] = '::1' + if is_ipv6_disabled(): + if CompareHostReleases(release) > 'trusty': + ctxt['memcache_server'] = '127.0.0.1' + else: + ctxt['memcache_server'] = 'localhost' + ctxt['memcache_server_formatted'] = '127.0.0.1' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = '{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) else: - ctxt['memcache_server'] = 'ip6-localhost' - ctxt['memcache_server_formatted'] = '[::1]' - ctxt['memcache_port'] = '11211' - ctxt['memcache_url'] = 'inet6:{}:{}'.format( - ctxt['memcache_server_formatted'], - ctxt['memcache_port']) + if CompareHostReleases(release) > 'trusty': + ctxt['memcache_server'] = '::1' + else: + ctxt['memcache_server'] = 'ip6-localhost' + ctxt['memcache_server_formatted'] = '[::1]' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = 'inet6:{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 32b62767..54fba39d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -5,6 +5,8 @@ global user haproxy group haproxy spread-checks 0 + stats socket /var/run/haproxy/admin.sock mode 600 level admin + stats timeout 2m defaults log global @@ -58,6 +60,15 @@ frontend tcp-in_{{ service }} {% for frontend in frontends -%} backend {{ service }}_{{ frontend }} balance leastconn + {% if backend_options -%} + {% if backend_options[service] -%} + {% for option in backend_options[service] -%} + {% for key, value in option.items() -%} + {{ key }} {{ value }} + {% endfor -%} + {% endfor -%} + {% endif -%} + {% endif -%} {% for unit, address in frontends[frontend]['backends'].items() -%} server {{ unit }} {{ address }}:{{ ports[1] }} check {% endfor %} diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 0ee5cb9f..88e80a49 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -191,7 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('disable', service_name) + service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -224,7 +224,7 @@ def service_resume(service_name, init_dir="/etc/init", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('enable', service_name) + service('unmask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 74baa120..9edd4bbf 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -11,15 +11,17 @@ requests==2.6.0 # Liberty client lower constraints amulet>=1.14.3,<2.0 bundletester>=0.6.1,<1.0 -python-ceilometerclient>=1.5.0,<2.0 -python-cinderclient>=1.4.0,<2.0 -python-glanceclient>=1.1.0,<2.0 -python-heatclient>=0.8.0,<1.0 -python-keystoneclient>=1.7.1,<2.0 -python-neutronclient>=3.1.0,<4.0 -python-novaclient>=2.30.1,<3.0 -python-openstackclient>=1.7.0,<2.0 -python-swiftclient>=2.6.0,<3.0 +python-ceilometerclient>=1.5.0 +python-cinderclient>=1.4.0 +python-glanceclient>=1.1.0 +python-heatclient>=0.8.0 +python-keystoneclient>=1.7.1 +python-neutronclient>=3.1.0 +python-novaclient>=2.30.1 +python-openstackclient>=1.7.0 +python-swiftclient>=2.6.0 pika>=0.10.0,<1.0 distro-info # END: Amulet OpenStack Charm Helper Requirements +# NOTE: workaround for 14.04 pip/tox +pytz diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 3bc3366e..ac1fba18 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -16,10 +16,7 @@ import amulet import keystoneclient -import subprocess import swiftclient -import json -import time from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment ) @@ -124,32 +121,6 @@ def _configure_services(self): 'ceph': ceph_config} super(CephRadosGwBasicDeployment, self)._configure_services(configs) - def _run_action(self, unit_id, action, *args): - command = ["juju", "action", "do", "--format=json", unit_id, action] - command.extend(args) - print("Running command: %s\n" % " ".join(command)) - output = subprocess.check_output(command) - output_json = output.decode(encoding="UTF-8") - data = json.loads(output_json) - action_id = data[u'Action queued with id'] - return action_id - - def _wait_on_action(self, action_id): - command = ["juju", "action", "fetch", "--format=json", action_id] - while True: - try: - output = subprocess.check_output(command) - except Exception as e: - print(e) - return False - output_json = output.decode(encoding="UTF-8") - data = json.loads(output_json) - if data[u"status"] == "completed": - return True - elif data[u"status"] == "failed": - return False - time.sleep(2) - def _initialize_tests(self): """Perform final initialization before tests get run.""" # Access the sentries for inspecting service units @@ -715,16 +686,15 @@ def test_910_pause_and_resume(self): """The services can be paused and resumed. """ u.log.debug('Checking pause and resume actions...') unit = self.ceph_radosgw_sentry - unit_name = unit.info['unit_name'] assert u.status_get(unit)[0] == "active" - action_id = self._run_action(unit_name, "pause") - assert self._wait_on_action(action_id), "Pause action failed." + action_id = u.run_action(unit, "pause") + assert u.wait_on_action(action_id), "Pause action failed." assert u.status_get(unit)[0] == "maintenance" - action_id = self._run_action(unit_name, "resume") - assert self._wait_on_action(action_id), "Resume action failed." + action_id = u.run_action(unit, "resume") + assert u.wait_on_action(action_id), "Resume action failed." assert u.status_get(unit)[0] == "active" u.log.debug('OK') diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 346e6fea..bcef4cd0 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -547,7 +547,7 @@ def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" self.log.debug('Creating instance ' '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.images.find(name=image_name) + image = nova.glance.find_image(image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, flavor=flavor) diff --git a/ceph-radosgw/tests/charmhelpers/core/host.py b/ceph-radosgw/tests/charmhelpers/core/host.py index 0ee5cb9f..88e80a49 100644 --- a/ceph-radosgw/tests/charmhelpers/core/host.py +++ b/ceph-radosgw/tests/charmhelpers/core/host.py @@ -191,7 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('disable', service_name) + service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -224,7 +224,7 @@ def service_resume(service_name, init_dir="/etc/init", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('enable', service_name) + service('unmask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) diff --git a/ceph-radosgw/tests/gate-basic-zesty-ocata b/ceph-radosgw/tests/gate-basic-zesty-ocata old mode 100644 new mode 100755 diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index e0f91367..ec81ad31 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -37,6 +37,7 @@ def setUp(self): self.relation_get.side_effect = self.test_relation.get self.config.side_effect = self.test_config.get + @patch('charmhelpers.contrib.openstack.context.mkdir') @patch('charmhelpers.contrib.openstack.context.unit_get') @patch('charmhelpers.contrib.openstack.context.local_unit') @patch('charmhelpers.contrib.openstack.context.get_host_ip') @@ -45,7 +46,7 @@ def setUp(self): @patch('charmhelpers.contrib.openstack.context.relation_ids') @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') def test_ctxt(self, _harelation_ids, _ctxtrelation_ids, _haconfig, - _ctxtconfig, _get_host_ip, _local_unit, _unit_get): + _ctxtconfig, _get_host_ip, _local_unit, _unit_get, _mkdir): _get_host_ip.return_value = '10.0.0.10' _unit_get.return_value = '10.0.0.10' _ctxtconfig.side_effect = self.test_config.get From bb749e82ece2bec38e1591854789e14d595718af Mon Sep 17 00:00:00 2001 From: David Ames Date: Thu, 27 Apr 2017 11:08:36 -0700 Subject: [PATCH 1310/2699] Enable Zesty-Ocata Amulet Tests - Turn on Zesty-Ocata Amulet test definitions. - Standardize test-requirements.txt - Sync charm helpers for various fixes Change-Id: I1f161137ef00fe835bce8944285341713c2eadfa --- .../charmhelpers/contrib/charmsupport/nrpe.py | 2 +- .../hooks/charmhelpers/contrib/network/ip.py | 59 ++++++++++++------- ceph-proxy/hooks/charmhelpers/core/host.py | 4 +- ceph-proxy/test-requirements.txt | 20 ++++--- .../contrib/openstack/amulet/utils.py | 2 +- ceph-proxy/tests/charmhelpers/core/host.py | 4 +- ceph-proxy/tests/gate-basic-wily-liberty | 9 --- ceph-proxy/tests/gate-basic-zesty-ocata | 0 ceph-proxy/tox.ini | 5 ++ 9 files changed, 60 insertions(+), 45 deletions(-) delete mode 100755 ceph-proxy/tests/gate-basic-wily-liberty mode change 100644 => 100755 ceph-proxy/tests/gate-basic-zesty-ocata diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 9646b838..8240249e 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -373,7 +373,7 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) croncmd = ( '/usr/local/lib/nagios/plugins/check_exit_status.pl ' - '-s /etc/init.d/%s status' % svc + '-e -s /etc/init.d/%s status' % svc ) cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) f = open(cronpath, 'w') diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index 7451af9c..fc3f5e3e 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -111,11 +111,11 @@ def get_address_in_network(network, fallback=None, fatal=False): for iface in netifaces.interfaces(): addresses = netifaces.ifaddresses(iface) if network.version == 4 and netifaces.AF_INET in addresses: - addr = addresses[netifaces.AF_INET][0]['addr'] - netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - if cidr in network: - return str(cidr.ip) + for addr in addresses[netifaces.AF_INET]: + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: @@ -239,6 +239,16 @@ def format_ipv6_addr(address): return None +def is_ipv6_disabled(): + try: + result = subprocess.check_output( + ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], + stderr=subprocess.STDOUT) + return "net.ipv6.conf.all.disable_ipv6 = 1" in result + except subprocess.CalledProcessError: + return True + + def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): """Return the assigned IP address for a given interface, if any. @@ -544,31 +554,38 @@ def assert_charm_supports_ipv6(): "versions less than Trusty 14.04") -def get_relation_ip(interface, config_override=None): - """Return this unit's IP for the given relation. +def get_relation_ip(interface, cidr_network=None): + """Return this unit's IP for the given interface. Allow for an arbitrary interface to use with network-get to select an IP. - Handle all address selection options including configuration parameter - override and IPv6. + Handle all address selection options including passed cidr network and + IPv6. - Usage: get_relation_ip('amqp', config_override='access-network') + Usage: get_relation_ip('amqp', cidr_network='10.0.0.0/8') @param interface: string name of the relation. - @param config_override: string name of the config option for network - override. Supports legacy network override configuration parameters. + @param cidr_network: string CIDR Network to select an address from. @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. @returns IPv6 or IPv4 address """ + # Select the interface address first + # For possible use as a fallback bellow with get_address_in_network + try: + # Get the interface specific IP + address = network_get_primary_address(interface) + except NotImplementedError: + # If network-get is not available + address = get_host_ip(unit_get('private-address')) - fallback = get_host_ip(unit_get('private-address')) if config('prefer-ipv6'): + # Currently IPv6 has priority, eventually we want IPv6 to just be + # another network space. assert_charm_supports_ipv6() return get_ipv6_addr()[0] - elif config_override and config(config_override): - return get_address_in_network(config(config_override), - fallback) - else: - try: - return network_get_primary_address(interface) - except NotImplementedError: - return fallback + elif cidr_network: + # If a specific CIDR network is passed get the address from that + # network. + return get_address_in_network(cidr_network, address) + + # Return the interface address + return address diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 0ee5cb9f..88e80a49 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -191,7 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('disable', service_name) + service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -224,7 +224,7 @@ def service_resume(service_name, init_dir="/etc/init", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('enable', service_name) + service('unmask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 74baa120..9edd4bbf 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -11,15 +11,17 @@ requests==2.6.0 # Liberty client lower constraints amulet>=1.14.3,<2.0 bundletester>=0.6.1,<1.0 -python-ceilometerclient>=1.5.0,<2.0 -python-cinderclient>=1.4.0,<2.0 -python-glanceclient>=1.1.0,<2.0 -python-heatclient>=0.8.0,<1.0 -python-keystoneclient>=1.7.1,<2.0 -python-neutronclient>=3.1.0,<4.0 -python-novaclient>=2.30.1,<3.0 -python-openstackclient>=1.7.0,<2.0 -python-swiftclient>=2.6.0,<3.0 +python-ceilometerclient>=1.5.0 +python-cinderclient>=1.4.0 +python-glanceclient>=1.1.0 +python-heatclient>=0.8.0 +python-keystoneclient>=1.7.1 +python-neutronclient>=3.1.0 +python-novaclient>=2.30.1 +python-openstackclient>=1.7.0 +python-swiftclient>=2.6.0 pika>=0.10.0,<1.0 distro-info # END: Amulet OpenStack Charm Helper Requirements +# NOTE: workaround for 14.04 pip/tox +pytz diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index 346e6fea..bcef4cd0 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -547,7 +547,7 @@ def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" self.log.debug('Creating instance ' '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.images.find(name=image_name) + image = nova.glance.find_image(image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, flavor=flavor) diff --git a/ceph-proxy/tests/charmhelpers/core/host.py b/ceph-proxy/tests/charmhelpers/core/host.py index 0ee5cb9f..88e80a49 100644 --- a/ceph-proxy/tests/charmhelpers/core/host.py +++ b/ceph-proxy/tests/charmhelpers/core/host.py @@ -191,7 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('disable', service_name) + service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -224,7 +224,7 @@ def service_resume(service_name, init_dir="/etc/init", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): - service('enable', service_name) + service('unmask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) diff --git a/ceph-proxy/tests/gate-basic-wily-liberty b/ceph-proxy/tests/gate-basic-wily-liberty deleted file mode 100755 index 8e91324b..00000000 --- a/ceph-proxy/tests/gate-basic-wily-liberty +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on wily-liberty.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='wily') - deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-zesty-ocata b/ceph-proxy/tests/gate-basic-zesty-ocata old mode 100644 new mode 100755 diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 1610be31..7c2936e3 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -21,6 +21,11 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py35] +basepython = python3.5 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python2.7 deps = -r{toxinidir}/requirements.txt From e337ff8b660ac2907b8be20b04a6611837315147 Mon Sep 17 00:00:00 2001 From: David Ames Date: Thu, 27 Apr 2017 11:04:52 -0700 Subject: [PATCH 1311/2699] Enable Zesty-Ocata Amulet Tests - Turn on Zesty-Ocata Amulet test definitions. - Standardize test-requirements.txt - Sync charm helpers for various fixes Change-Id: Icecfbedb4d567a10ef48ec095442204cc3de35de --- ceph-fs/src/test-requirements.txt | 30 +- ceph-fs/src/tests/basic_deployment.py | 15 +- ceph-fs/src/tests/charmhelpers/__init__.py | 36 - .../tests/charmhelpers/contrib/__init__.py | 13 - .../charmhelpers/contrib/amulet/__init__.py | 13 - .../charmhelpers/contrib/amulet/deployment.py | 97 -- .../charmhelpers/contrib/amulet/utils.py | 827 ------------ .../contrib/openstack/__init__.py | 13 - .../contrib/openstack/amulet/deployment.py | 345 ----- .../contrib/openstack/amulet/utils.py | 1124 ----------------- ceph-fs/src/tests/gate-basic-xenial-ocata | 4 +- .../__init__.py => gate-basic-yakkety-newton} | 12 +- ceph-fs/src/tests/gate-basic-zesty-ocata | 0 ceph-fs/src/tox.ini | 2 +- 14 files changed, 47 insertions(+), 2484 deletions(-) delete mode 100644 ceph-fs/src/tests/charmhelpers/__init__.py delete mode 100644 ceph-fs/src/tests/charmhelpers/contrib/__init__.py delete mode 100644 ceph-fs/src/tests/charmhelpers/contrib/amulet/__init__.py delete mode 100644 ceph-fs/src/tests/charmhelpers/contrib/amulet/deployment.py delete mode 100644 ceph-fs/src/tests/charmhelpers/contrib/amulet/utils.py delete mode 100644 ceph-fs/src/tests/charmhelpers/contrib/openstack/__init__.py delete mode 100644 ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/deployment.py delete mode 100644 ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/utils.py rename ceph-fs/src/tests/{charmhelpers/contrib/openstack/amulet/__init__.py => gate-basic-yakkety-newton} (65%) mode change 100644 => 100755 mode change 100644 => 100755 ceph-fs/src/tests/gate-basic-zesty-ocata diff --git a/ceph-fs/src/test-requirements.txt b/ceph-fs/src/test-requirements.txt index 613b0812..3cb4ae76 100644 --- a/ceph-fs/src/test-requirements.txt +++ b/ceph-fs/src/test-requirements.txt @@ -1,5 +1,12 @@ -# charm-proof +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +coverage>=3.6 +mock>=1.2 +flake8>=2.2.4,<=2.4.1 +os-testr>=0.4.1 charm-tools>=2.0.0 +requests==2.6.0 # amulet deployment helpers bzr+lp:charm-helpers#egg=charmhelpers # BEGIN: Amulet OpenStack Charm Helper Requirements @@ -7,15 +14,18 @@ bzr+lp:charm-helpers#egg=charmhelpers amulet>=1.14.3,<2.0 bundletester>=0.6.1,<1.0 aodhclient>=0.1.0 -python-ceilometerclient>=1.5.0,<2.0 -python-cinderclient>=1.4.0,<2.0 -python-glanceclient>=1.1.0,<2.0 -python-heatclient>=0.8.0,<1.0 -python-keystoneclient>=1.7.1,<2.0 -python-neutronclient>=3.1.0,<4.0 -python-novaclient>=2.30.1,<3.0 -python-openstackclient>=1.7.0,<2.0 -python-swiftclient>=2.6.0,<3.0 +python-barbicanclient>=4.0.1 +python-ceilometerclient>=1.5.0 +python-cinderclient>=1.4.0 +python-designateclient>=1.5 +python-glanceclient>=1.1.0 +python-heatclient>=0.8.0 +python-keystoneclient>=1.7.1 +python-manilaclient>=1.8.1 +python-neutronclient>=3.1.0 +python-novaclient>=2.30.1 +python-openstackclient>=1.7.0 +python-swiftclient>=2.6.0 pika>=0.10.0,<1.0 distro-info # END: Amulet OpenStack Charm Helper Requirements diff --git a/ceph-fs/src/tests/basic_deployment.py b/ceph-fs/src/tests/basic_deployment.py index 689a750a..ec2b1ce0 100644 --- a/ceph-fs/src/tests/basic_deployment.py +++ b/ceph-fs/src/tests/basic_deployment.py @@ -59,13 +59,15 @@ def _add_services(self, **kwargs): compatible with the local charm (e.g. stable or next). :param **kwargs: """ + no_origin = ['ceph-fs'] this_service = {'name': 'ceph-fs', 'units': 1} other_services = [ {'name': 'ceph-mon', 'units': 3}, {'name': 'ceph-osd', 'units': 3}, ] super(CephFsBasicDeployment, self)._add_services(this_service, - other_services) + other_services, + no_origin=no_origin) def _add_relations(self, **kwargs): """Add all of the relations for the services. @@ -81,21 +83,28 @@ def _configure_services(self, **kwargs): """Configure all of the services. :param **kwargs: """ + ceph_fs_config = { + 'source': self.source, + } # NOTE(jamespage): fix fsid to allow later validation ceph_mon_config = { 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + 'source': self.source, } # Include a non-existent device as osd-devices is a whitelist, # and this will catch cases where proposals attempt to change that. ceph_osd_config = { 'osd-reformat': 'yes', 'ephemeral-unmount': '/mnt', - 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' + 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent', + 'source': self.source, } configs = { 'ceph-mon': ceph_mon_config, - 'ceph-osd': ceph_osd_config} + 'ceph-osd': ceph_osd_config, + 'ceph-fs': ceph_fs_config, + } super(CephFsBasicDeployment, self)._configure_services(configs) def _initialize_tests(self): diff --git a/ceph-fs/src/tests/charmhelpers/__init__.py b/ceph-fs/src/tests/charmhelpers/__init__.py deleted file mode 100644 index 48867880..00000000 --- a/ceph-fs/src/tests/charmhelpers/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Bootstrap charm-helpers, installing its dependencies if necessary using -# only standard libraries. -import subprocess -import sys - -try: - import six # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa - -try: - import yaml # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa diff --git a/ceph-fs/src/tests/charmhelpers/contrib/__init__.py b/ceph-fs/src/tests/charmhelpers/contrib/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-fs/src/tests/charmhelpers/contrib/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-fs/src/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-fs/src/tests/charmhelpers/contrib/amulet/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-fs/src/tests/charmhelpers/contrib/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-fs/src/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-fs/src/tests/charmhelpers/contrib/amulet/deployment.py deleted file mode 100644 index 9c65518e..00000000 --- a/ceph-fs/src/tests/charmhelpers/contrib/amulet/deployment.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import os -import six - - -class AmuletDeployment(object): - """Amulet deployment. - - This class provides generic Amulet deployment and test runner - methods. - """ - - def __init__(self, series=None): - """Initialize the deployment environment.""" - self.series = None - - if series: - self.series = series - self.d = amulet.Deployment(series=self.series) - else: - self.d = amulet.Deployment() - - def _add_services(self, this_service, other_services): - """Add services. - - Add services to the deployment where this_service is the local charm - that we're testing and other_services are the other services that - are being used in the local amulet tests. - """ - if this_service['name'] != os.path.basename(os.getcwd()): - s = this_service['name'] - msg = "The charm's root directory name needs to be {}".format(s) - amulet.raise_status(amulet.FAIL, msg=msg) - - if 'units' not in this_service: - this_service['units'] = 1 - - self.d.add(this_service['name'], units=this_service['units'], - constraints=this_service.get('constraints')) - - for svc in other_services: - if 'location' in svc: - branch_location = svc['location'] - elif self.series: - branch_location = 'cs:{}/{}'.format(self.series, svc['name']), - else: - branch_location = None - - if 'units' not in svc: - svc['units'] = 1 - - self.d.add(svc['name'], charm=branch_location, units=svc['units'], - constraints=svc.get('constraints')) - - def _add_relations(self, relations): - """Add all of the relations for the services.""" - for k, v in six.iteritems(relations): - self.d.relate(k, v) - - def _configure_services(self, configs): - """Configure all of the services.""" - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _deploy(self): - """Deploy environment and wait for all hooks to finish executing.""" - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) - try: - self.d.setup(timeout=timeout) - self.d.sentry.wait(timeout=timeout) - except amulet.helpers.TimeoutError: - amulet.raise_status( - amulet.FAIL, - msg="Deployment timed out ({}s)".format(timeout) - ) - except Exception: - raise - - def run_tests(self): - """Run all of the methods that are prefixed with 'test_'.""" - for test in dir(self): - if test.startswith('test_'): - getattr(self, test)() diff --git a/ceph-fs/src/tests/charmhelpers/contrib/amulet/utils.py b/ceph-fs/src/tests/charmhelpers/contrib/amulet/utils.py deleted file mode 100644 index a39ed4c8..00000000 --- a/ceph-fs/src/tests/charmhelpers/contrib/amulet/utils.py +++ /dev/null @@ -1,827 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import json -import logging -import os -import re -import socket -import subprocess -import sys -import time -import uuid - -import amulet -import distro_info -import six -from six.moves import configparser -if six.PY3: - from urllib import parse as urlparse -else: - import urlparse - - -class AmuletUtils(object): - """Amulet utilities. - - This class provides common utility functions that are used by Amulet - tests. - """ - - def __init__(self, log_level=logging.ERROR): - self.log = self.get_logger(level=log_level) - self.ubuntu_releases = self.get_ubuntu_releases() - - def get_logger(self, name="amulet-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def valid_ip(self, ip): - if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): - return True - else: - return False - - def valid_url(self, url): - p = re.compile( - r'^(?:http|ftp)s?://' - r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa - r'localhost|' - r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' - r'(?::\d+)?' - r'(?:/?|[/?]\S+)$', - re.IGNORECASE) - if p.match(url): - return True - else: - return False - - def get_ubuntu_release_from_sentry(self, sentry_unit): - """Get Ubuntu release codename from sentry unit. - - :param sentry_unit: amulet sentry/service unit pointer - :returns: list of strings - release codename, failure message - """ - msg = None - cmd = 'lsb_release -cs' - release, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} lsb_release: {}'.format( - sentry_unit.info['unit_name'], release)) - else: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, release, code)) - if release not in self.ubuntu_releases: - msg = ("Release ({}) not found in Ubuntu releases " - "({})".format(release, self.ubuntu_releases)) - return release, msg - - def validate_services(self, commands): - """Validate that lists of commands succeed on service units. Can be - used to verify system services are running on the corresponding - service units. - - :param commands: dict with sentry keys and arbitrary command list vals - :returns: None if successful, Failure string message otherwise - """ - self.log.debug('Checking status of system services...') - - # /!\ DEPRECATION WARNING (beisner): - # New and existing tests should be rewritten to use - # validate_services_by_name() as it is aware of init systems. - self.log.warn('DEPRECATION WARNING: use ' - 'validate_services_by_name instead of validate_services ' - 'due to init system differences.') - - for k, v in six.iteritems(commands): - for cmd in v: - output, code = k.run(cmd) - self.log.debug('{} `{}` returned ' - '{}'.format(k.info['unit_name'], - cmd, code)) - if code != 0: - return "command `{}` returned {}".format(cmd, str(code)) - return None - - def validate_services_by_name(self, sentry_services): - """Validate system service status by service name, automatically - detecting init system based on Ubuntu release codename. - - :param sentry_services: dict with sentry keys and svc list values - :returns: None if successful, Failure string message otherwise - """ - self.log.debug('Checking status of system services...') - - # Point at which systemd became a thing - systemd_switch = self.ubuntu_releases.index('vivid') - - for sentry_unit, services_list in six.iteritems(sentry_services): - # Get lsb_release codename from unit - release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) - if ret: - return ret - - for service_name in services_list: - if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name in ['rabbitmq-server', 'apache2']): - # init is systemd (or regular sysv) - cmd = 'sudo service {} status'.format(service_name) - output, code = sentry_unit.run(cmd) - service_running = code == 0 - elif self.ubuntu_releases.index(release) < systemd_switch: - # init is upstart - cmd = 'sudo status {}'.format(service_name) - output, code = sentry_unit.run(cmd) - service_running = code == 0 and "start/running" in output - - self.log.debug('{} `{}` returned ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code)) - if not service_running: - return u"command `{}` returned {} {}".format( - cmd, output, str(code)) - return None - - def _get_config(self, unit, filename): - """Get a ConfigParser object for parsing a unit's config file.""" - file_contents = unit.file_contents(filename) - - # NOTE(beisner): by default, ConfigParser does not handle options - # with no value, such as the flags used in the mysql my.cnf file. - # https://bugs.python.org/issue7005 - config = configparser.ConfigParser(allow_no_value=True) - config.readfp(io.StringIO(file_contents)) - return config - - def validate_config_data(self, sentry_unit, config_file, section, - expected): - """Validate config file data. - - Verify that the specified section of the config file contains - the expected option key:value pairs. - - Compare expected dictionary data vs actual dictionary data. - The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluates a variable and returns a - bool. - """ - self.log.debug('Validating config file data ({} in {} on {})' - '...'.format(section, config_file, - sentry_unit.info['unit_name'])) - config = self._get_config(sentry_unit, config_file) - - if section != 'DEFAULT' and not config.has_section(section): - return "section [{}] does not exist".format(section) - - for k in expected.keys(): - if not config.has_option(section, k): - return "section [{}] is missing option {}".format(section, k) - - actual = config.get(section, k) - v = expected[k] - if (isinstance(v, six.string_types) or - isinstance(v, bool) or - isinstance(v, six.integer_types)): - # handle explicit values - if actual != v: - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) - # handle function pointers, such as not_null or valid_ip - elif not v(actual): - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) - return None - - def _validate_dict_data(self, expected, actual): - """Validate dictionary data. - - Compare expected dictionary data vs actual dictionary data. - The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluates a variable and returns a - bool. - """ - self.log.debug('actual: {}'.format(repr(actual))) - self.log.debug('expected: {}'.format(repr(expected))) - - for k, v in six.iteritems(expected): - if k in actual: - if (isinstance(v, six.string_types) or - isinstance(v, bool) or - isinstance(v, six.integer_types)): - # handle explicit values - if v != actual[k]: - return "{}:{}".format(k, actual[k]) - # handle function pointers, such as not_null or valid_ip - elif not v(actual[k]): - return "{}:{}".format(k, actual[k]) - else: - return "key '{}' does not exist".format(k) - return None - - def validate_relation_data(self, sentry_unit, relation, expected): - """Validate actual relation data based on expected relation data.""" - actual = sentry_unit.relation(relation[0], relation[1]) - return self._validate_dict_data(expected, actual) - - def _validate_list_data(self, expected, actual): - """Compare expected list vs actual list data.""" - for e in expected: - if e not in actual: - return "expected item {} not found in actual list".format(e) - return None - - def not_null(self, string): - if string is not None: - return True - else: - return False - - def _get_file_mtime(self, sentry_unit, filename): - """Get last modification time of file.""" - return sentry_unit.file_stat(filename)['mtime'] - - def _get_dir_mtime(self, sentry_unit, directory): - """Get last modification time of directory.""" - return sentry_unit.directory_stat(directory)['mtime'] - - def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): - """Get start time of a process based on the last modification time - of the /proc/pid directory. - - :sentry_unit: The sentry unit to check for the service on - :service: service name to look for in process table - :pgrep_full: [Deprecated] Use full command line search mode with pgrep - :returns: epoch time of service process start - :param commands: list of bash commands - :param sentry_units: list of sentry unit pointers - :returns: None if successful; Failure message otherwise - """ - if pgrep_full is not None: - # /!\ DEPRECATION WARNING (beisner): - # No longer implemented, as pidof is now used instead of pgrep. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' - 'longer implemented re: lp 1474030.') - - pid_list = self.get_process_id_list(sentry_unit, service) - pid = pid_list[0] - proc_dir = '/proc/{}'.format(pid) - self.log.debug('Pid for {} on {}: {}'.format( - service, sentry_unit.info['unit_name'], pid)) - - return self._get_dir_mtime(sentry_unit, proc_dir) - - def service_restarted(self, sentry_unit, service, filename, - pgrep_full=None, sleep_time=20): - """Check if service was restarted. - - Compare a service's start time vs a file's last modification time - (such as a config file for that service) to determine if the service - has been restarted. - """ - # /!\ DEPRECATION WARNING (beisner): - # This method is prone to races in that no before-time is known. - # Use validate_service_config_changed instead. - - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - self.log.warn('DEPRECATION WARNING: use ' - 'validate_service_config_changed instead of ' - 'service_restarted due to known races.') - - time.sleep(sleep_time) - if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= - self._get_file_mtime(sentry_unit, filename)): - return True - else: - return False - - def service_restarted_since(self, sentry_unit, mtime, service, - pgrep_full=None, sleep_time=20, - retry_count=30, retry_sleep_time=10): - """Check if service was been started after a given time. - - Args: - sentry_unit (sentry): The sentry unit to check for the service on - mtime (float): The epoch time to check against - service (string): service name to look for in process table - pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Initial sleep time (s) before looking for file - retry_sleep_time (int): Time (s) to sleep between retries - retry_count (int): If file is not found, how many times to retry - - Returns: - bool: True if service found and its start time it newer than mtime, - False if service is older than mtime or if service was - not found. - """ - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - - unit_name = sentry_unit.info['unit_name'] - self.log.debug('Checking that %s service restarted since %s on ' - '%s' % (service, mtime, unit_name)) - time.sleep(sleep_time) - proc_start_time = None - tries = 0 - while tries <= retry_count and not proc_start_time: - try: - proc_start_time = self._get_proc_start_time(sentry_unit, - service, - pgrep_full) - self.log.debug('Attempt {} to get {} proc start time on {} ' - 'OK'.format(tries, service, unit_name)) - except IOError as e: - # NOTE(beisner) - race avoidance, proc may not exist yet. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.debug('Attempt {} to get {} proc start time on {} ' - 'failed\n{}'.format(tries, service, - unit_name, e)) - time.sleep(retry_sleep_time) - tries += 1 - - if not proc_start_time: - self.log.warn('No proc start time found, assuming service did ' - 'not start') - return False - if proc_start_time >= mtime: - self.log.debug('Proc start time is newer than provided mtime' - '(%s >= %s) on %s (OK)' % (proc_start_time, - mtime, unit_name)) - return True - else: - self.log.warn('Proc start time (%s) is older than provided mtime ' - '(%s) on %s, service did not ' - 'restart' % (proc_start_time, mtime, unit_name)) - return False - - def config_updated_since(self, sentry_unit, filename, mtime, - sleep_time=20, retry_count=30, - retry_sleep_time=10): - """Check if file was modified after a given time. - - Args: - sentry_unit (sentry): The sentry unit to check the file mtime on - filename (string): The file to check mtime of - mtime (float): The epoch time to check against - sleep_time (int): Initial sleep time (s) before looking for file - retry_sleep_time (int): Time (s) to sleep between retries - retry_count (int): If file is not found, how many times to retry - - Returns: - bool: True if file was modified more recently than mtime, False if - file was modified before mtime, or if file not found. - """ - unit_name = sentry_unit.info['unit_name'] - self.log.debug('Checking that %s updated since %s on ' - '%s' % (filename, mtime, unit_name)) - time.sleep(sleep_time) - file_mtime = None - tries = 0 - while tries <= retry_count and not file_mtime: - try: - file_mtime = self._get_file_mtime(sentry_unit, filename) - self.log.debug('Attempt {} to get {} file mtime on {} ' - 'OK'.format(tries, filename, unit_name)) - except IOError as e: - # NOTE(beisner) - race avoidance, file may not exist yet. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.debug('Attempt {} to get {} file mtime on {} ' - 'failed\n{}'.format(tries, filename, - unit_name, e)) - time.sleep(retry_sleep_time) - tries += 1 - - if not file_mtime: - self.log.warn('Could not determine file mtime, assuming ' - 'file does not exist') - return False - - if file_mtime >= mtime: - self.log.debug('File mtime is newer than provided mtime ' - '(%s >= %s) on %s (OK)' % (file_mtime, - mtime, unit_name)) - return True - else: - self.log.warn('File mtime is older than provided mtime' - '(%s < on %s) on %s' % (file_mtime, - mtime, unit_name)) - return False - - def validate_service_config_changed(self, sentry_unit, mtime, service, - filename, pgrep_full=None, - sleep_time=20, retry_count=30, - retry_sleep_time=10): - """Check service and file were updated after mtime - - Args: - sentry_unit (sentry): The sentry unit to check for the service on - mtime (float): The epoch time to check against - service (string): service name to look for in process table - filename (string): The file to check mtime of - pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Initial sleep in seconds to pass to test helpers - retry_count (int): If service is not found, how many times to retry - retry_sleep_time (int): Time in seconds to wait between retries - - Typical Usage: - u = OpenStackAmuletUtils(ERROR) - ... - mtime = u.get_sentry_time(self.cinder_sentry) - self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) - if not u.validate_service_config_changed(self.cinder_sentry, - mtime, - 'cinder-api', - '/etc/cinder/cinder.conf') - amulet.raise_status(amulet.FAIL, msg='update failed') - Returns: - bool: True if both service and file where updated/restarted after - mtime, False if service is older than mtime or if service was - not found or if filename was modified before mtime. - """ - - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - - service_restart = self.service_restarted_since( - sentry_unit, mtime, - service, - pgrep_full=pgrep_full, - sleep_time=sleep_time, - retry_count=retry_count, - retry_sleep_time=retry_sleep_time) - - config_update = self.config_updated_since( - sentry_unit, - filename, - mtime, - sleep_time=sleep_time, - retry_count=retry_count, - retry_sleep_time=retry_sleep_time) - - return service_restart and config_update - - def get_sentry_time(self, sentry_unit): - """Return current epoch time on a sentry""" - cmd = "date +'%s'" - return float(sentry_unit.run(cmd)[0]) - - def relation_error(self, name, data): - return 'unexpected relation data in {} - {}'.format(name, data) - - def endpoint_error(self, name, data): - return 'unexpected endpoint data in {} - {}'.format(name, data) - - def get_ubuntu_releases(self): - """Return a list of all Ubuntu releases in order of release.""" - _d = distro_info.UbuntuDistroInfo() - _release_list = _d.all - return _release_list - - def file_to_url(self, file_rel_path): - """Convert a relative file path to a file URL.""" - _abs_path = os.path.abspath(file_rel_path) - return urlparse.urlparse(_abs_path, scheme='file').geturl() - - def check_commands_on_units(self, commands, sentry_units): - """Check that all commands in a list exit zero on all - sentry units in a list. - - :param commands: list of bash commands - :param sentry_units: list of sentry unit pointers - :returns: None if successful; Failure message otherwise - """ - self.log.debug('Checking exit codes for {} commands on {} ' - 'sentry units...'.format(len(commands), - len(sentry_units))) - for sentry_unit in sentry_units: - for cmd in commands: - output, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} `{}` returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - else: - return ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - return None - - def get_process_id_list(self, sentry_unit, process_name, - expect_success=True): - """Get a list of process ID(s) from a single sentry juju unit - for a single process name. - - :param sentry_unit: Amulet sentry instance (juju unit) - :param process_name: Process name - :param expect_success: If False, expect the PID to be missing, - raise if it is present. - :returns: List of process IDs - """ - cmd = 'pidof -x {}'.format(process_name) - if not expect_success: - cmd += " || exit 0 && exit 1" - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return str(output).split() - - def get_unit_process_ids(self, unit_processes, expect_success=True): - """Construct a dict containing unit sentries, process names, and - process IDs. - - :param unit_processes: A dictionary of Amulet sentry instance - to list of process names. - :param expect_success: if False expect the processes to not be - running, raise if they are. - :returns: Dictionary of Amulet sentry instance to dictionary - of process names to PIDs. - """ - pid_dict = {} - for sentry_unit, process_list in six.iteritems(unit_processes): - pid_dict[sentry_unit] = {} - for process in process_list: - pids = self.get_process_id_list( - sentry_unit, process, expect_success=expect_success) - pid_dict[sentry_unit].update({process: pids}) - return pid_dict - - def validate_unit_process_ids(self, expected, actual): - """Validate process id quantities for services on units.""" - self.log.debug('Checking units for running processes...') - self.log.debug('Expected PIDs: {}'.format(expected)) - self.log.debug('Actual PIDs: {}'.format(actual)) - - if len(actual) != len(expected): - return ('Unit count mismatch. expected, actual: {}, ' - '{} '.format(len(expected), len(actual))) - - for (e_sentry, e_proc_names) in six.iteritems(expected): - e_sentry_name = e_sentry.info['unit_name'] - if e_sentry in actual.keys(): - a_proc_names = actual[e_sentry] - else: - return ('Expected sentry ({}) not found in actual dict data.' - '{}'.format(e_sentry_name, e_sentry)) - - if len(e_proc_names.keys()) != len(a_proc_names.keys()): - return ('Process name count mismatch. expected, actual: {}, ' - '{}'.format(len(expected), len(actual))) - - for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ - zip(e_proc_names.items(), a_proc_names.items()): - if e_proc_name != a_proc_name: - return ('Process name mismatch. expected, actual: {}, ' - '{}'.format(e_proc_name, a_proc_name)) - - a_pids_length = len(a_pids) - fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' - '{}, {} ({})'.format(e_sentry_name, e_proc_name, - e_pids, a_pids_length, - a_pids)) - - # If expected is a list, ensure at least one PID quantity match - if isinstance(e_pids, list) and \ - a_pids_length not in e_pids: - return fail_msg - # If expected is not bool and not list, - # ensure PID quantities match - elif not isinstance(e_pids, bool) and \ - not isinstance(e_pids, list) and \ - a_pids_length != e_pids: - return fail_msg - # If expected is bool True, ensure 1 or more PIDs exist - elif isinstance(e_pids, bool) and \ - e_pids is True and a_pids_length < 1: - return fail_msg - # If expected is bool False, ensure 0 PIDs exist - elif isinstance(e_pids, bool) and \ - e_pids is False and a_pids_length != 0: - return fail_msg - else: - self.log.debug('PID check OK: {} {} {}: ' - '{}'.format(e_sentry_name, e_proc_name, - e_pids, a_pids)) - return None - - def validate_list_of_identical_dicts(self, list_of_dicts): - """Check that all dicts within a list are identical.""" - hashes = [] - for _dict in list_of_dicts: - hashes.append(hash(frozenset(_dict.items()))) - - self.log.debug('Hashes: {}'.format(hashes)) - if len(set(hashes)) == 1: - self.log.debug('Dicts within list are identical') - else: - return 'Dicts within list are not identical' - - return None - - def validate_sectionless_conf(self, file_contents, expected): - """A crude conf parser. Useful to inspect configuration files which - do not have section headers (as would be necessary in order to use - the configparser). Such as openstack-dashboard or rabbitmq confs.""" - for line in file_contents.split('\n'): - if '=' in line: - args = line.split('=') - if len(args) <= 1: - continue - key = args[0].strip() - value = args[1].strip() - if key in expected.keys(): - if expected[key] != value: - msg = ('Config mismatch. Expected, actual: {}, ' - '{}'.format(expected[key], value)) - amulet.raise_status(amulet.FAIL, msg=msg) - - def get_unit_hostnames(self, units): - """Return a dict of juju unit names to hostnames.""" - host_names = {} - for unit in units: - host_names[unit.info['unit_name']] = \ - str(unit.file_contents('/etc/hostname').strip()) - self.log.debug('Unit host names: {}'.format(host_names)) - return host_names - - def run_cmd_unit(self, sentry_unit, cmd): - """Run a command on a unit, return the output and exit code.""" - output, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} `{}` command returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - else: - msg = ('{} `{}` command returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return str(output), code - - def file_exists_on_unit(self, sentry_unit, file_name): - """Check if a file exists on a unit.""" - try: - sentry_unit.file_stat(file_name) - return True - except IOError: - return False - except Exception as e: - msg = 'Error checking file {}: {}'.format(file_name, e) - amulet.raise_status(amulet.FAIL, msg=msg) - - def file_contents_safe(self, sentry_unit, file_name, - max_wait=60, fatal=False): - """Get file contents from a sentry unit. Wrap amulet file_contents - with retry logic to address races where a file checks as existing, - but no longer exists by the time file_contents is called. - Return None if file not found. Optionally raise if fatal is True.""" - unit_name = sentry_unit.info['unit_name'] - file_contents = False - tries = 0 - while not file_contents and tries < (max_wait / 4): - try: - file_contents = sentry_unit.file_contents(file_name) - except IOError: - self.log.debug('Attempt {} to open file {} from {} ' - 'failed'.format(tries, file_name, - unit_name)) - time.sleep(4) - tries += 1 - - if file_contents: - return file_contents - elif not fatal: - return None - elif fatal: - msg = 'Failed to get file contents from unit.' - amulet.raise_status(amulet.FAIL, msg) - - def port_knock_tcp(self, host="localhost", port=22, timeout=15): - """Open a TCP socket to check for a listening sevice on a host. - - :param host: host name or IP address, default to localhost - :param port: TCP port number, default to 22 - :param timeout: Connect timeout, default to 15 seconds - :returns: True if successful, False if connect failed - """ - - # Resolve host name if possible - try: - connect_host = socket.gethostbyname(host) - host_human = "{} ({})".format(connect_host, host) - except socket.error as e: - self.log.warn('Unable to resolve address: ' - '{} ({}) Trying anyway!'.format(host, e)) - connect_host = host - host_human = connect_host - - # Attempt socket connection - try: - knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - knock.settimeout(timeout) - knock.connect((connect_host, port)) - knock.close() - self.log.debug('Socket connect OK for host ' - '{} on port {}.'.format(host_human, port)) - return True - except socket.error as e: - self.log.debug('Socket connect FAIL for' - ' {} port {} ({})'.format(host_human, port, e)) - return False - - def port_knock_units(self, sentry_units, port=22, - timeout=15, expect_success=True): - """Open a TCP socket to check for a listening sevice on each - listed juju unit. - - :param sentry_units: list of sentry unit pointers - :param port: TCP port number, default to 22 - :param timeout: Connect timeout, default to 15 seconds - :expect_success: True by default, set False to invert logic - :returns: None if successful, Failure message otherwise - """ - for unit in sentry_units: - host = unit.info['public-address'] - connected = self.port_knock_tcp(host, port, timeout) - if not connected and expect_success: - return 'Socket connect failed.' - elif connected and not expect_success: - return 'Socket connected unexpectedly.' - - def get_uuid_epoch_stamp(self): - """Returns a stamp string based on uuid4 and epoch time. Useful in - generating test messages which need to be unique-ish.""" - return '[{}-{}]'.format(uuid.uuid4(), time.time()) - -# amulet juju action helpers: - def run_action(self, unit_sentry, action, - _check_output=subprocess.check_output, - params=None): - """Run the named action on a given unit sentry. - - params a dict of parameters to use - _check_output parameter is used for dependency injection. - - @return action_id. - """ - unit_id = unit_sentry.info["unit_name"] - command = ["juju", "action", "do", "--format=json", unit_id, action] - if params is not None: - for key, value in params.iteritems(): - command.append("{}={}".format(key, value)) - self.log.info("Running command: %s\n" % " ".join(command)) - output = _check_output(command, universal_newlines=True) - data = json.loads(output) - action_id = data[u'Action queued with id'] - return action_id - - def wait_on_action(self, action_id, _check_output=subprocess.check_output): - """Wait for a given action, returning if it completed or not. - - _check_output parameter is used for dependency injection. - """ - command = ["juju", "action", "fetch", "--format=json", "--wait=0", - action_id] - output = _check_output(command, universal_newlines=True) - data = json.loads(output) - return data.get(u"status") == "completed" - - def status_get(self, unit): - """Return the current service status of this unit.""" - raw_status, return_code = unit.run( - "status-get --format=json --include-data") - if return_code != 0: - return ("unknown", "") - status = json.loads(raw_status) - return (status["status"], status["message"]) diff --git a/ceph-fs/src/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-fs/src/tests/charmhelpers/contrib/openstack/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-fs/src/tests/charmhelpers/contrib/openstack/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/deployment.py deleted file mode 100644 index 9e0b07fb..00000000 --- a/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ /dev/null @@ -1,345 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import re -import sys -import six -from collections import OrderedDict -from charmhelpers.contrib.amulet.deployment import ( - AmuletDeployment -) - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - - -class OpenStackAmuletDeployment(AmuletDeployment): - """OpenStack amulet deployment. - - This class inherits from AmuletDeployment and has additional support - that is specifically for use by OpenStack charms. - """ - - def __init__(self, series=None, openstack=None, source=None, - stable=True, log_level=DEBUG): - """Initialize the deployment environment.""" - super(OpenStackAmuletDeployment, self).__init__(series) - self.log = self.get_logger(level=log_level) - self.log.info('OpenStackAmuletDeployment: init') - self.openstack = openstack - self.source = source - self.stable = stable - - def get_logger(self, name="deployment-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def _determine_branch_locations(self, other_services): - """Determine the branch locations for the other services. - - Determine if the local branch being tested is derived from its - stable or next (dev) branch, and based on this, use the corresonding - stable or next branches for the other_services.""" - - self.log.info('OpenStackAmuletDeployment: determine branch locations') - - # Charms outside the ~openstack-charmers - base_charms = { - 'mysql': ['precise', 'trusty'], - 'mongodb': ['precise', 'trusty'], - 'nrpe': ['precise', 'trusty', 'wily', 'xenial'], - } - - for svc in other_services: - # If a location has been explicitly set, use it - if svc.get('location'): - continue - if svc['name'] in base_charms: - # NOTE: not all charms have support for all series we - # want/need to test against, so fix to most recent - # that each base charm supports - target_series = self.series - if self.series not in base_charms[svc['name']]: - target_series = base_charms[svc['name']][-1] - svc['location'] = 'cs:{}/{}'.format(target_series, - svc['name']) - elif self.stable: - svc['location'] = 'cs:{}/{}'.format(self.series, - svc['name']) - else: - svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( - self.series, - svc['name'] - ) - - return other_services - - def _add_services(self, this_service, other_services, use_source=None, - no_origin=None): - """Add services to the deployment and optionally set - openstack-origin/source. - - :param this_service dict: Service dictionary describing the service - whose amulet tests are being run - :param other_services dict: List of service dictionaries describing - the services needed to support the target - service - :param use_source list: List of services which use the 'source' config - option rather than 'openstack-origin' - :param no_origin list: List of services which do not support setting - the Cloud Archive. - Service Dict: - { - 'name': str charm-name, - 'units': int number of units, - 'constraints': dict of juju constraints, - 'location': str location of charm, - } - eg - this_service = { - 'name': 'openvswitch-odl', - 'constraints': {'mem': '8G'}, - } - other_services = [ - { - 'name': 'nova-compute', - 'units': 2, - 'constraints': {'mem': '4G'}, - 'location': cs:~bob/xenial/nova-compute - }, - { - 'name': 'mysql', - 'constraints': {'mem': '2G'}, - }, - {'neutron-api-odl'}] - use_source = ['mysql'] - no_origin = ['neutron-api-odl'] - """ - self.log.info('OpenStackAmuletDeployment: adding services') - - other_services = self._determine_branch_locations(other_services) - - super(OpenStackAmuletDeployment, self)._add_services(this_service, - other_services) - - services = other_services - services.append(this_service) - - use_source = use_source or [] - no_origin = no_origin or [] - - # Charms which should use the source config option - use_source = list(set( - use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy', 'percona-cluster', 'lxd'])) - - # Charms which can not use openstack-origin, ie. many subordinates - no_origin = list(set( - no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', - 'nrpe', 'openvswitch-odl', 'neutron-api-odl', - 'odl-controller', 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'])) - - if self.openstack: - for svc in services: - if svc['name'] not in use_source + no_origin: - config = {'openstack-origin': self.openstack} - self.d.configure(svc['name'], config) - - if self.source: - for svc in services: - if svc['name'] in use_source and svc['name'] not in no_origin: - config = {'source': self.source} - self.d.configure(svc['name'], config) - - def _configure_services(self, configs): - """Configure all of the services.""" - self.log.info('OpenStackAmuletDeployment: configure services') - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=1800): - """Wait for all units to have a specific extended status, except - for any defined as excluded. Unless specified via message, any - status containing any case of 'ready' will be considered a match. - - Examples of message usage: - - Wait for all unit status to CONTAIN any case of 'ready' or 'ok': - message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) - - Wait for all units to reach this status (exact match): - message = re.compile('^Unit is ready and clustered$') - - Wait for all units to reach any one of these (exact match): - message = re.compile('Unit is ready|OK|Ready') - - Wait for at least one unit to reach this status (exact match): - message = {'ready'} - - See Amulet's sentry.wait_for_messages() for message usage detail. - https://github.com/juju/amulet/blob/master/amulet/sentry.py - - :param message: Expected status match - :param exclude_services: List of juju service names to ignore, - not to be used in conjuction with include_only. - :param include_only: List of juju service names to exclusively check, - not to be used in conjuction with exclude_services. - :param timeout: Maximum time in seconds to wait for status match - :returns: None. Raises if timeout is hit. - """ - self.log.info('Waiting for extended status on units...') - - all_services = self.d.services.keys() - - if exclude_services and include_only: - raise ValueError('exclude_services can not be used ' - 'with include_only') - - if message: - if isinstance(message, re._pattern_type): - match = message.pattern - else: - match = message - - self.log.debug('Custom extended status wait match: ' - '{}'.format(match)) - else: - self.log.debug('Default extended status wait match: contains ' - 'READY (case-insensitive)') - message = re.compile('.*ready.*', re.IGNORECASE) - - if exclude_services: - self.log.debug('Excluding services from extended status match: ' - '{}'.format(exclude_services)) - else: - exclude_services = [] - - if include_only: - services = include_only - else: - services = list(set(all_services) - set(exclude_services)) - - self.log.debug('Waiting up to {}s for extended status on services: ' - '{}'.format(timeout, services)) - service_messages = {service: message for service in services} - self.d.sentry.wait_for_messages(service_messages, timeout=timeout) - self.log.info('OK') - - def _get_openstack_release(self): - """Get openstack release. - - Return an integer representing the enum value of the openstack - release. - """ - # Must be ordered by OpenStack release (not by Ubuntu release): - (self.precise_essex, self.precise_folsom, self.precise_grizzly, - self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.utopic_juno, - self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, - self.wily_liberty, self.trusty_mitaka, - self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton) = range(16) - - releases = { - ('precise', None): self.precise_essex, - ('precise', 'cloud:precise-folsom'): self.precise_folsom, - ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, - ('precise', 'cloud:precise-havana'): self.precise_havana, - ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, - ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-juno'): self.trusty_juno, - ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, - ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, - ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo, - ('wily', None): self.wily_liberty, - ('xenial', None): self.xenial_mitaka, - ('xenial', 'cloud:xenial-newton'): self.xenial_newton, - ('yakkety', None): self.yakkety_newton, - } - return releases[(self.series, self.openstack)] - - def _get_openstack_release_string(self): - """Get openstack release string. - - Return a string representing the openstack release. - """ - releases = OrderedDict([ - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), - ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ]) - if self.openstack: - os_origin = self.openstack.split(':')[1] - return os_origin.split('%s-' % self.series)[1].split('/')[0] - else: - return releases[self.series] - - def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools in a ceph + cinder + glance - test scenario, based on OpenStack release and whether ceph radosgw - is flagged as present or not.""" - - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later - pools = [ - 'rbd', - 'cinder', - 'glance' - ] - else: - # Juno or earlier - pools = [ - 'data', - 'metadata', - 'rbd', - 'cinder', - 'glance' - ] - - if radosgw: - pools.extend([ - '.rgw.root', - '.rgw.control', - '.rgw', - '.rgw.gc', - '.users.uid' - ]) - - return pools diff --git a/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/utils.py deleted file mode 100644 index e4546c8c..00000000 --- a/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ /dev/null @@ -1,1124 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import json -import logging -import os -import re -import six -import time -import urllib - -import cinderclient.v1.client as cinder_client -import glanceclient.v1.client as glance_client -import heatclient.v1.client as heat_client -import keystoneclient.v2_0 as keystone_client -from keystoneclient.auth.identity import v3 as keystone_id_v3 -from keystoneclient import session as keystone_session -from keystoneclient.v3 import client as keystone_client_v3 - -import novaclient.client as nova_client -import pika -import swiftclient - -from charmhelpers.contrib.amulet.utils import ( - AmuletUtils -) - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - -NOVA_CLIENT_VERSION = "2" - - -class OpenStackAmuletUtils(AmuletUtils): - """OpenStack amulet utilities. - - This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charm tests. - """ - - def __init__(self, log_level=ERROR): - """Initialize the deployment environment.""" - super(OpenStackAmuletUtils, self).__init__(log_level) - - def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): - """Validate endpoint data. - - Validate actual endpoint data vs expected endpoint data. The ports - are used to find the matching endpoint. - """ - self.log.debug('Validating endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = False - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if (admin_port in ep.adminurl and - internal_port in ep.internalurl and - public_port in ep.publicurl): - found = True - actual = {'id': ep.id, - 'region': ep.region, - 'adminurl': ep.adminurl, - 'internalurl': ep.internalurl, - 'publicurl': ep.publicurl, - 'service_id': ep.service_id} - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if not found: - return 'endpoint not found' - - def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): - """Validate keystone v3 endpoint data. - - Validate the v3 endpoint data which has changed from v2. The - ports are used to find the matching endpoint. - - The new v3 endpoint data looks like: - - ['}, - region=RegionOne, - region_id=RegionOne, - service_id=17f842a0dc084b928e476fafe67e4095, - url=http://10.5.6.5:9312>, - '}, - region=RegionOne, - region_id=RegionOne, - service_id=72fc8736fb41435e8b3584205bb2cfa3, - url=http://10.5.6.6:35357/v3>, - ... ] - """ - self.log.debug('Validating v3 endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = [] - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if ((admin_port in ep.url and ep.interface == 'admin') or - (internal_port in ep.url and ep.interface == 'internal') or - (public_port in ep.url and ep.interface == 'public')): - found.append(ep.interface) - # note we ignore the links member. - actual = {'id': ep.id, - 'region': ep.region, - 'region_id': ep.region_id, - 'interface': self.not_null, - 'url': ep.url, - 'service_id': ep.service_id, } - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if len(found) != 3: - return 'Unexpected number of endpoints found' - - def validate_svc_catalog_endpoint_data(self, expected, actual): - """Validate service catalog endpoint data. - - Validate a list of actual service catalog endpoints vs a list of - expected service catalog endpoints. - """ - self.log.debug('Validating service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - ret = self._validate_dict_data(expected[k][0], actual[k][0]) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_v3_svc_catalog_endpoint_data(self, expected, actual): - """Validate the keystone v3 catalog endpoint data. - - Validate a list of dictinaries that make up the keystone v3 service - catalogue. - - It is in the form of: - - - {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:35357/v3'}, - {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}, - {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}], - u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'f629388955bc407f8b11d8b7ca168086', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9312'}]} - - Note, that an added complication is that the order of admin, public, - internal against 'interface' in each region. - - Thus, the function sorts the expected and actual lists using the - interface key as a sort key, prior to the comparison. - """ - self.log.debug('Validating v3 service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - l_expected = sorted(v, key=lambda x: x['interface']) - l_actual = sorted(actual[k], key=lambda x: x['interface']) - if len(l_actual) != len(l_expected): - return ("endpoint {} has differing number of interfaces " - " - expected({}), actual({})" - .format(k, len(l_expected), len(l_actual))) - for i_expected, i_actual in zip(l_expected, l_actual): - self.log.debug("checking interface {}" - .format(i_expected['interface'])) - ret = self._validate_dict_data(i_expected, i_actual) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_tenant_data(self, expected, actual): - """Validate tenant data. - - Validate a list of actual tenant data vs list of expected tenant - data. - """ - self.log.debug('Validating tenant data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'enabled': act.enabled, 'description': act.description, - 'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected tenant data - {}".format(ret) - if not found: - return "tenant {} does not exist".format(e['name']) - return ret - - def validate_role_data(self, expected, actual): - """Validate role data. - - Validate a list of actual role data vs a list of expected role - data. - """ - self.log.debug('Validating role data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected role data - {}".format(ret) - if not found: - return "role {} does not exist".format(e['name']) - return ret - - def validate_user_data(self, expected, actual, api_version=None): - """Validate user data. - - Validate a list of actual user data vs a list of expected user - data. - """ - self.log.debug('Validating user data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - if e['name'] == act.name: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'id': act.id} - if api_version == 3: - a['default_project_id'] = getattr(act, - 'default_project_id', - 'none') - else: - a['tenantId'] = act.tenantId - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected user data - {}".format(ret) - if not found: - return "user {} does not exist".format(e['name']) - return ret - - def validate_flavor_data(self, expected, actual): - """Validate flavor data. - - Validate a list of actual flavors vs a list of expected flavors. - """ - self.log.debug('Validating flavor data...') - self.log.debug('actual: {}'.format(repr(actual))) - act = [a.name for a in actual] - return self._validate_list_data(expected, act) - - def tenant_exists(self, keystone, tenant): - """Return True if tenant exists.""" - self.log.debug('Checking if tenant exists ({})...'.format(tenant)) - return tenant in [t.name for t in keystone.tenants.list()] - - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant): - """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - keystone_ip = keystone_sentry.info['public-address'] - ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) - return cinder_client.Client(username, password, tenant, ept) - - def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant=None, api_version=None, - keystone_ip=None): - """Authenticates admin user with the keystone admin endpoint.""" - self.log.debug('Authenticating keystone admin...') - if not keystone_ip: - keystone_ip = keystone_sentry.info['public-address'] - - base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) - else: - ep = base_ep + "/v3" - auth = keystone_id_v3.Password( - user_domain_name='admin_domain', - username=user, - password=password, - domain_name='admin_domain', - auth_url=ep, - ) - sess = keystone_session.Session(auth=auth) - return keystone_client_v3.Client(session=sess) - - def authenticate_keystone_user(self, keystone, user, password, tenant): - """Authenticates a regular user with the keystone public endpoint.""" - self.log.debug('Authenticating keystone user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) - - def authenticate_glance_admin(self, keystone): - """Authenticates admin user with glance.""" - self.log.debug('Authenticating glance admin...') - ep = keystone.service_catalog.url_for(service_type='image', - endpoint_type='adminURL') - return glance_client.Client(ep, token=keystone.auth_token) - - def authenticate_heat_admin(self, keystone): - """Authenticates the admin user with heat.""" - self.log.debug('Authenticating heat admin...') - ep = keystone.service_catalog.url_for(service_type='orchestration', - endpoint_type='publicURL') - return heat_client.Client(endpoint=ep, token=keystone.auth_token) - - def authenticate_nova_user(self, keystone, user, password, tenant): - """Authenticates a regular user with nova-api.""" - self.log.debug('Authenticating nova user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) - - def authenticate_swift_user(self, keystone, user, password, tenant): - """Authenticates a regular user with swift api.""" - self.log.debug('Authenticating swift user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') - - def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection - :param image_name: display name for new image - :returns: glance image pointer - """ - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) - - # Download cirros image - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - local_path = os.path.join('tests', cirros_img) - - if not os.path.exists(local_path): - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - opener.retrieve(cirros_url, local_path) - f.close() - - # Create glance image - with open(local_path) as f: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', data=f) - - # Wait for image to reach active status - img_id = image.id - ret = self.resource_reaches_status(glance.images, img_id, - expected_stat='active', - msg='Image status wait') - if not ret: - msg = 'Glance image failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new image - self.log.debug('Validating image attributes...') - val_img_name = glance.images.get(img_id).name - val_img_stat = glance.images.get(img_id).status - val_img_pub = glance.images.get(img_id).is_public - val_img_cfmt = glance.images.get(img_id).container_format - val_img_dfmt = glance.images.get(img_id).disk_format - msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' - 'container fmt:{} disk fmt:{}'.format( - val_img_name, val_img_pub, img_id, - val_img_stat, val_img_cfmt, val_img_dfmt)) - - if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == 'bare' \ - and val_img_dfmt == 'qcow2': - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return image - - def delete_image(self, glance, image): - """Delete the specified image.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_image.') - self.log.debug('Deleting glance image ({})...'.format(image)) - return self.delete_resource(glance.images, image, msg='glance image') - - def create_instance(self, nova, image_name, instance_name, flavor): - """Create the specified instance.""" - self.log.debug('Creating instance ' - '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.images.find(name=image_name) - flavor = nova.flavors.find(name=flavor) - instance = nova.servers.create(name=instance_name, image=image, - flavor=flavor) - - count = 1 - status = instance.status - while status != 'ACTIVE' and count < 60: - time.sleep(3) - instance = nova.servers.get(instance.id) - status = instance.status - self.log.debug('instance status: {}'.format(status)) - count += 1 - - if status != 'ACTIVE': - self.log.error('instance creation timed out') - return None - - return instance - - def delete_instance(self, nova, instance): - """Delete the specified instance.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_instance.') - self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, - msg='nova instance') - - def create_or_get_keypair(self, nova, keypair_name="testkey"): - """Create a new keypair, or return pointer if it already exists.""" - try: - _keypair = nova.keypairs.get(keypair_name) - self.log.debug('Keypair ({}) already exists, ' - 'using it.'.format(keypair_name)) - return _keypair - except: - self.log.debug('Keypair ({}) does not exist, ' - 'creating it.'.format(keypair_name)) - - _keypair = nova.keypairs.create(name=keypair_name) - return _keypair - - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, - img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, OR - optionally as a clone of an existing volume, OR optionally - from a snapshot. Wait for the new volume status to reach - the expected status, validate and return a resource pointer. - - :param vol_name: cinder volume display name - :param vol_size: size in gigabytes - :param img_id: optional glance image id - :param src_vol_id: optional source volume id to clone - :param snap_id: optional snapshot id to use - :returns: cinder volume pointer - """ - # Handle parameter input and avoid impossible combinations - if img_id and not src_vol_id and not snap_id: - # Create volume from image - self.log.debug('Creating cinder volume from glance image...') - bootable = 'true' - elif src_vol_id and not img_id and not snap_id: - # Clone an existing volume - self.log.debug('Cloning cinder volume...') - bootable = cinder.volumes.get(src_vol_id).bootable - elif snap_id and not src_vol_id and not img_id: - # Create volume from snapshot - self.log.debug('Creating cinder volume from snapshot...') - snap = cinder.volume_snapshots.find(id=snap_id) - vol_size = snap.size - snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id - bootable = cinder.volumes.get(snap_vol_id).bootable - elif not img_id and not src_vol_id and not snap_id: - # Create volume - self.log.debug('Creating cinder volume...') - bootable = 'false' - else: - # Impossible combination of parameters - msg = ('Invalid method use - name:{} size:{} img_id:{} ' - 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, - img_id, src_vol_id, - snap_id)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create new volume - try: - vol_new = cinder.volumes.create(display_name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except Exception as e: - msg = 'Failed to create volume: {}'.format(e) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Wait for volume to reach available status - ret = self.resource_reaches_status(cinder.volumes, vol_id, - expected_stat="available", - msg="Volume status wait") - if not ret: - msg = 'Cinder volume failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new volume - self.log.debug('Validating volume attributes...') - val_vol_name = cinder.volumes.get(vol_id).display_name - val_vol_boot = cinder.volumes.get(vol_id).bootable - val_vol_stat = cinder.volumes.get(vol_id).status - val_vol_size = cinder.volumes.get(vol_id).size - msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' - '{} size:{}'.format(val_vol_name, vol_id, - val_vol_stat, val_vol_boot, - val_vol_size)) - - if val_vol_boot == bootable and val_vol_stat == 'available' \ - and val_vol_name == vol_name and val_vol_size == vol_size: - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return vol_new - - def delete_resource(self, resource, resource_id, - msg="resource", max_wait=120): - """Delete one openstack resource, such as one instance, keypair, - image, volume, stack, etc., and confirm deletion within max wait time. - - :param resource: pointer to os resource type, ex:glance_client.images - :param resource_id: unique name or id for the openstack resource - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, otherwise False - """ - self.log.debug('Deleting OpenStack resource ' - '{} ({})'.format(resource_id, msg)) - num_before = len(list(resource.list())) - resource.delete(resource_id) - - tries = 0 - num_after = len(list(resource.list())) - while num_after != (num_before - 1) and tries < (max_wait / 4): - self.log.debug('{} delete check: ' - '{} [{}:{}] {}'.format(msg, tries, - num_before, - num_after, - resource_id)) - time.sleep(4) - num_after = len(list(resource.list())) - tries += 1 - - self.log.debug('{}: expected, actual count = {}, ' - '{}'.format(msg, num_before - 1, num_after)) - - if num_after == (num_before - 1): - return True - else: - self.log.error('{} delete timed out'.format(msg)) - return False - - def resource_reaches_status(self, resource, resource_id, - expected_stat='available', - msg='resource', max_wait=120): - """Wait for an openstack resources status to reach an - expected status within a specified time. Useful to confirm that - nova instances, cinder vols, snapshots, glance images, heat stacks - and other resources eventually reach the expected status. - - :param resource: pointer to os resource type, ex: heat_client.stacks - :param resource_id: unique id for the openstack resource - :param expected_stat: status to expect resource to reach - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, False if status is not reached - """ - - tries = 0 - resource_stat = resource.get(resource_id).status - while resource_stat != expected_stat and tries < (max_wait / 4): - self.log.debug('{} status check: ' - '{} [{}:{}] {}'.format(msg, tries, - resource_stat, - expected_stat, - resource_id)) - time.sleep(4) - resource_stat = resource.get(resource_id).status - tries += 1 - - self.log.debug('{}: expected, actual status = {}, ' - '{}'.format(msg, resource_stat, expected_stat)) - - if resource_stat == expected_stat: - return True - else: - self.log.debug('{} never reached expected status: ' - '{}'.format(resource_id, expected_stat)) - return False - - def get_ceph_osd_id_cmd(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return ("`initctl list | grep 'ceph-osd ' | " - "awk 'NR=={} {{ print $2 }}' | " - "grep -o '[0-9]*'`".format(index + 1)) - - def get_ceph_pools(self, sentry_unit): - """Return a dict of ceph pools from a single ceph unit, with - pool name as keys, pool id as vals.""" - pools = {} - cmd = 'sudo ceph osd lspools' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, - for pool in str(output).split(','): - pool_id_name = pool.split(' ') - if len(pool_id_name) == 2: - pool_id = pool_id_name[0] - pool_name = pool_id_name[1] - pools[pool_name] = int(pool_id) - - self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], - pools)) - return pools - - def get_ceph_df(self, sentry_unit): - """Return dict of ceph df json output, including ceph pool state. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :returns: Dict of ceph df output - """ - cmd = 'sudo ceph df --format=json' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return json.loads(output) - - def get_ceph_pool_sample(self, sentry_unit, pool_id=0): - """Take a sample of attributes of a ceph pool, returning ceph - pool name, object count and disk space used for the specified - pool ID number. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :param pool_id: Ceph pool ID - :returns: List of pool name, object count, kb disk space used - """ - df = self.get_ceph_df(sentry_unit) - pool_name = df['pools'][pool_id]['name'] - obj_count = df['pools'][pool_id]['stats']['objects'] - kb_used = df['pools'][pool_id]['stats']['kb_used'] - self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, pool_id, - obj_count, kb_used)) - return pool_name, obj_count, kb_used - - def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): - """Validate ceph pool samples taken over time, such as pool - object counts or pool kb used, before adding, after adding, and - after deleting items which affect those pool attributes. The - 2nd element is expected to be greater than the 1st; 3rd is expected - to be less than the 2nd. - - :param samples: List containing 3 data samples - :param sample_type: String for logging and usage context - :returns: None if successful, Failure message otherwise - """ - original, created, deleted = range(3) - if samples[created] <= samples[original] or \ - samples[deleted] >= samples[created]: - return ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - else: - self.log.debug('Ceph {} samples (OK): ' - '{}'.format(sample_type, samples)) - return None - - # rabbitmq/amqp specific helpers: - - def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): - """Wait for rmq units extended status to show cluster readiness, - after an optional initial sleep period. Initial sleep is likely - necessary to be effective following a config change, as status - message may not instantly update to non-ready.""" - - if init_sleep: - time.sleep(init_sleep) - - message = re.compile('^Unit is ready and clustered$') - deployment._auto_wait_for_status(message=message, - timeout=timeout, - include_only=['rabbitmq-server']) - - def add_rmq_test_user(self, sentry_units, - username="testuser1", password="changeme"): - """Add a test user via the first rmq juju unit, check connection as - the new user against all sentry units. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful. Raise on error. - """ - self.log.debug('Adding rmq user ({})...'.format(username)) - - # Check that user does not already exist - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - if username in output: - self.log.warning('User ({}) already exists, returning ' - 'gracefully.'.format(username)) - return - - perms = '".*" ".*" ".*"' - cmds = ['rabbitmqctl add_user {} {}'.format(username, password), - 'rabbitmqctl set_permissions {} {}'.format(username, perms)] - - # Add user via first unit - for cmd in cmds: - output, _ = self.run_cmd_unit(sentry_units[0], cmd) - - # Check connection against the other sentry_units - self.log.debug('Checking user connect against units...') - for sentry_unit in sentry_units: - connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, - username=username, - password=password) - connection.close() - - def delete_rmq_test_user(self, sentry_units, username="testuser1"): - """Delete a rabbitmq user via the first rmq juju unit. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful or no such user. - """ - self.log.debug('Deleting rmq user ({})...'.format(username)) - - # Check that the user exists - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - - if username not in output: - self.log.warning('User ({}) does not exist, returning ' - 'gracefully.'.format(username)) - return - - # Delete the user - cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) - - def get_rmq_cluster_status(self, sentry_unit): - """Execute rabbitmq cluster status command on a unit and return - the full output. - - :param unit: sentry unit - :returns: String containing console output of cluster status command - """ - cmd = 'rabbitmqctl cluster_status' - output, _ = self.run_cmd_unit(sentry_unit, cmd) - self.log.debug('{} cluster_status:\n{}'.format( - sentry_unit.info['unit_name'], output)) - return str(output) - - def get_rmq_cluster_running_nodes(self, sentry_unit): - """Parse rabbitmqctl cluster_status output string, return list of - running rabbitmq cluster nodes. - - :param unit: sentry unit - :returns: List containing node names of running nodes - """ - # NOTE(beisner): rabbitmqctl cluster_status output is not - # json-parsable, do string chop foo, then json.loads that. - str_stat = self.get_rmq_cluster_status(sentry_unit) - if 'running_nodes' in str_stat: - pos_start = str_stat.find("{running_nodes,") + 15 - pos_end = str_stat.find("]},", pos_start) + 1 - str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') - run_nodes = json.loads(str_run_nodes) - return run_nodes - else: - return [] - - def validate_rmq_cluster_running_nodes(self, sentry_units): - """Check that all rmq unit hostnames are represented in the - cluster_status output of all units. - - :param host_names: dict of juju unit names to host names - :param units: list of sentry unit pointers (all rmq units) - :returns: None if successful, otherwise return error message - """ - host_names = self.get_unit_hostnames(sentry_units) - errors = [] - - # Query every unit for cluster_status running nodes - for query_unit in sentry_units: - query_unit_name = query_unit.info['unit_name'] - running_nodes = self.get_rmq_cluster_running_nodes(query_unit) - - # Confirm that every unit is represented in the queried unit's - # cluster_status running nodes output. - for validate_unit in sentry_units: - val_host_name = host_names[validate_unit.info['unit_name']] - val_node_name = 'rabbit@{}'.format(val_host_name) - - if val_node_name not in running_nodes: - errors.append('Cluster member check failed on {}: {} not ' - 'in {}\n'.format(query_unit_name, - val_node_name, - running_nodes)) - if errors: - return ''.join(errors) - - def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): - """Check a single juju rmq unit for ssl and port in the config file.""" - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - conf_file = '/etc/rabbitmq/rabbitmq.config' - conf_contents = str(self.file_contents_safe(sentry_unit, - conf_file, max_wait=16)) - # Checks - conf_ssl = 'ssl' in conf_contents - conf_port = str(port) in conf_contents - - # Port explicitly checked in config - if port and conf_port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif port and not conf_port and conf_ssl: - self.log.debug('SSL is enabled @{} but not on port {} ' - '({})'.format(host, port, unit_name)) - return False - # Port not checked (useful when checking that ssl is disabled) - elif not port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif not conf_ssl: - self.log.debug('SSL not enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return False - else: - msg = ('Unknown condition when checking SSL status @{}:{} ' - '({})'.format(host, port, unit_name)) - amulet.raise_status(amulet.FAIL, msg) - - def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): - """Check that ssl is enabled on rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :param port: optional ssl port override to validate - :returns: None if successful, otherwise return error message - """ - for sentry_unit in sentry_units: - if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): - return ('Unexpected condition: ssl is disabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def validate_rmq_ssl_disabled_units(self, sentry_units): - """Check that ssl is enabled on listed rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :returns: True if successful. Raise on error. - """ - for sentry_unit in sentry_units: - if self.rmq_ssl_is_enabled_on_unit(sentry_unit): - return ('Unexpected condition: ssl is enabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def configure_rmq_ssl_on(self, sentry_units, deployment, - port=None, max_wait=60): - """Turn ssl charm config option on, with optional non-default - ssl port specification. Confirm that it is enabled on every - unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param port: amqp port, use defaults if None - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: on') - - # Enable RMQ SSL - config = {'ssl': 'on'} - if port: - config['ssl_port'] = port - - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): - """Turn ssl charm config option off, confirm that it is disabled - on every unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: off') - - # Disable RMQ SSL - config = {'ssl': 'off'} - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def connect_amqp_by_unit(self, sentry_unit, ssl=False, - port=None, fatal=True, - username="testuser1", password="changeme"): - """Establish and return a pika amqp connection to the rabbitmq service - running on a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :param fatal: boolean, default to True (raises on connect error) - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: pika amqp connection pointer or None if failed and non-fatal - """ - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - # Default port logic if port is not specified - if ssl and not port: - port = 5671 - elif not ssl and not port: - port = 5672 - - self.log.debug('Connecting to amqp on {}:{} ({}) as ' - '{}...'.format(host, port, unit_name, username)) - - try: - credentials = pika.PlainCredentials(username, password) - parameters = pika.ConnectionParameters(host=host, port=port, - credentials=credentials, - ssl=ssl, - connection_attempts=3, - retry_delay=5, - socket_timeout=1) - connection = pika.BlockingConnection(parameters) - assert connection.is_open is True - assert connection.is_closing is False - self.log.debug('Connect OK') - return connection - except Exception as e: - msg = ('amqp connection failed to {}:{} as ' - '{} ({})'.format(host, port, username, str(e))) - if fatal: - amulet.raise_status(amulet.FAIL, msg) - else: - self.log.warn(msg) - return None - - def publish_amqp_message_by_unit(self, sentry_unit, message, - queue="test", ssl=False, - username="testuser1", - password="changeme", - port=None): - """Publish an amqp message to a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param message: amqp message string - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: None. Raises exception if publish failed. - """ - self.log.debug('Publishing message to {} queue:\n{}'.format(queue, - message)) - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - - # NOTE(beisner): extra debug here re: pika hang potential: - # https://github.com/pika/pika/issues/297 - # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw - self.log.debug('Defining channel...') - channel = connection.channel() - self.log.debug('Declaring queue...') - channel.queue_declare(queue=queue, auto_delete=False, durable=True) - self.log.debug('Publishing message...') - channel.basic_publish(exchange='', routing_key=queue, body=message) - self.log.debug('Closing channel...') - channel.close() - self.log.debug('Closing connection...') - connection.close() - - def get_amqp_message_by_unit(self, sentry_unit, queue="test", - username="testuser1", - password="changeme", - ssl=False, port=None): - """Get an amqp message from a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: amqp message body as string. Raise if get fails. - """ - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - channel = connection.channel() - method_frame, _, body = channel.basic_get(queue) - - if method_frame: - self.log.debug('Retreived message from {} queue:\n{}'.format(queue, - body)) - channel.basic_ack(method_frame.delivery_tag) - channel.close() - connection.close() - return body - else: - msg = 'No message retrieved.' - amulet.raise_status(amulet.FAIL, msg) diff --git a/ceph-fs/src/tests/gate-basic-xenial-ocata b/ceph-fs/src/tests/gate-basic-xenial-ocata index 141a3d80..86f6a645 100755 --- a/ceph-fs/src/tests/gate-basic-xenial-ocata +++ b/ceph-fs/src/tests/gate-basic-xenial-ocata @@ -19,5 +19,7 @@ from basic_deployment import CephFsBasicDeployment if __name__ == '__main__': - deployment = CephFsBasicDeployment(series='xenial') + deployment = CephFsBasicDeployment(series='xenial', + openstack='cloud:xenial-ocata', + source='cloud:xenial-updates/ocata') deployment.run_tests() diff --git a/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-fs/src/tests/gate-basic-yakkety-newton old mode 100644 new mode 100755 similarity index 65% rename from ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/__init__.py rename to ceph-fs/src/tests/gate-basic-yakkety-newton index d7567b86..f1846705 --- a/ceph-fs/src/tests/charmhelpers/contrib/openstack/amulet/__init__.py +++ b/ceph-fs/src/tests/gate-basic-yakkety-newton @@ -1,4 +1,6 @@ -# Copyright 2014-2015 Canonical Limited. +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,3 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +"""Amulet tests on a basic ceph deployment on yakkety-newton.""" + +from basic_deployment import CephFsBasicDeployment + +if __name__ == '__main__': + deployment = CephFsBasicDeployment(series='yakkety') + deployment.run_tests() diff --git a/ceph-fs/src/tests/gate-basic-zesty-ocata b/ceph-fs/src/tests/gate-basic-zesty-ocata old mode 100644 new mode 100755 diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index 3bc4d0bb..276db2fc 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -10,7 +10,7 @@ setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 AMULET_SETUP_TIMEOUT=2700 whitelist_externals = juju -passenv = HOME TERM AMULET_* CS_API_URL +passenv = HOME TERM AMULET_* CS_API_* deps = -r{toxinidir}/test-requirements.txt install_command = pip install --allow-unverified python-apt {opts} {packages} From 7e90290471a90c57c4f859f1ffdb3bbf7de411c1 Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 5 May 2017 10:15:04 -0700 Subject: [PATCH 1312/2699] Network space aware address for cluster relation Use the get_relation_ip function for selecting addresses for the cluster relationship. Including overrides for the admin, internal, and public config settings or extra bindings. Partial-Bug: #1687439 Change-Id: Ie1280cd2068b97075f1257c2ca95042dc2aa689f --- ceph-radosgw/hooks/hooks.py | 18 ++++++---------- ceph-radosgw/unit_tests/test_hooks.py | 31 +++++++++++---------------- 2 files changed, 20 insertions(+), 29 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 38de5edf..cb3a01b5 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -25,7 +25,6 @@ relation_ids, related_units, config, - unit_get, open_port, relation_set, log, @@ -43,8 +42,7 @@ from charmhelpers.payload.execd import execd_preinstall from charmhelpers.core.host import cmp_pkgrevno from charmhelpers.contrib.network.ip import ( - get_address_in_network, - get_ipv6_addr, + get_relation_ip, get_iface_for_address, get_netmask_for_address, is_ipv6, @@ -175,7 +173,7 @@ def mon_relation(rid=None, unit=None): @hooks.hook('gateway-relation-joined') def gateway_relation(): - relation_set(hostname=unit_get('private-address'), + relation_set(hostname=get_relation_ip('gateway-relation'), port=config('port')) @@ -235,17 +233,15 @@ def cluster_joined(rid=None): settings = {} for addr_type in ADDRESS_TYPES: - address = get_address_in_network( - config('os-{}-network'.format(addr_type)) - ) + address = get_relation_ip( + addr_type, + cidr_network=config('os-{}-network'.format(addr_type))) if address: settings['{}-address'.format(addr_type)] = address - if config('prefer-ipv6'): - private_addr = get_ipv6_addr(exc_list=[config('vip')])[0] - settings['private-address'] = private_addr + settings['private-address'] = get_relation_ip('cluster') - relation_set(relation_id=rid, **settings) + relation_set(relation_id=rid, relation_settings=settings) @hooks.hook('cluster-relation-changed') diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 007cc35d..a379b4e5 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -50,9 +50,9 @@ 'status_set', 'subprocess', 'sys', - 'unit_get', 'get_hacluster_config', 'update_dns_ha_resource_params', + 'get_relation_ip', ] @@ -125,9 +125,9 @@ def test_mon_relation_send_broker_request(self, self.assertTrue(mock_send_request_if_needed.called) def test_gateway_relation(self): - self.unit_get.return_value = 'myserver' + self.get_relation_ip.return_value = '10.0.0.1' ceph_hooks.gateway_relation() - self.relation_set.assert_called_with(hostname='myserver', port=80) + self.relation_set.assert_called_with(hostname='10.0.0.1', port=80) def test_start(self): ceph_hooks.start() @@ -166,7 +166,6 @@ def test_identity_joined(self, _config, _resolve_address): _config.side_effect = self.test_config.get self.test_config.set('region', 'region1') self.test_config.set('operator-roles', 'admin') - self.unit_get.return_value = 'myserv' ceph_hooks.identity_joined(relid='rid') self.relation_set.assert_called_with( service='swift', @@ -218,17 +217,11 @@ def test_canonical_url_ipv6(self, _config, _unit_get, _is_clustered): self.assertEquals(ceph_hooks.canonical_url({}, PUBLIC), 'http://[%s]' % ipv6_addr) - @patch.object(ceph_hooks, 'get_address_in_network') - def test_cluster_joined(self, mock_get_addr): - addrs = {'10.0.0.0/24': '10.0.0.1', - '10.0.1.0/24': '10.0.1.1', - '10.0.2.0/24': '10.0.2.1'} - - def fake_get_address_in_network(network): - return addrs.get(network) - - mock_get_addr.side_effect = fake_get_address_in_network - + def test_cluster_joined(self): + self.get_relation_ip.side_effect = ['10.0.0.1', + '10.0.1.1', + '10.0.2.1', + '10.0.3.1'] self.test_config.set('os-public-network', '10.0.0.0/24') self.test_config.set('os-admin-network', '10.0.1.0/24') self.test_config.set('os-internal-network', '10.0.2.0/24') @@ -236,9 +229,11 @@ def fake_get_address_in_network(network): ceph_hooks.cluster_joined() self.relation_set.assert_has_calls( [call(relation_id=None, - **{'admin-address': '10.0.1.1', - 'internal-address': '10.0.2.1', - 'public-address': '10.0.0.1'})]) + relation_settings={ + 'admin-address': '10.0.0.1', + 'public-address': '10.0.2.1', + 'internal-address': '10.0.1.1', + 'private-address': '10.0.3.1'})]) def test_cluster_changed(self): _id_joined = self.patch('identity_joined') From 84e0bcb2a7e293fa39c04590cf2970bca5d992b0 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 12 May 2017 07:04:44 +0200 Subject: [PATCH 1313/2699] Updates for pike b1 Resync charm helpers for pike version support. Add pike tests, but leave disabled until all charms support pike. Change-Id: Ie5fd7cf71beac5b1b88522c8beb42118d0e177e9 --- .../contrib/openstack/amulet/deployment.py | 6 ++- .../charmhelpers/contrib/openstack/context.py | 4 ++ .../charmhelpers/contrib/openstack/utils.py | 39 +++++++++++++++++++ .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 16 ++++++++ .../contrib/openstack/amulet/deployment.py | 6 ++- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-mon/tests/gate-basic-artful-pike | 23 +++++++++++ ceph-mon/tests/gate-basic-xenial-pike | 25 ++++++++++++ 9 files changed, 119 insertions(+), 2 deletions(-) create mode 100644 ceph-mon/tests/gate-basic-artful-pike create mode 100644 ceph-mon/tests/gate-basic-xenial-pike diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 5c1ce457..5c041d2c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -262,7 +262,8 @@ def _get_openstack_release(self): # Must be ordered by OpenStack release (not by Ubuntu release): (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata) = range(9) + self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, + self.xenial_pike, self.artful_pike) = range(11) releases = { ('trusty', None): self.trusty_icehouse, @@ -272,8 +273,10 @@ def _get_openstack_release(self): ('xenial', None): self.xenial_mitaka, ('xenial', 'cloud:xenial-newton'): self.xenial_newton, ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, + ('xenial', 'cloud:xenial-pike'): self.xenial_pike, ('yakkety', None): self.yakkety_newton, ('zesty', None): self.zesty_ocata, + ('artful', None): self.artful_pike, } return releases[(self.series, self.openstack)] @@ -287,6 +290,7 @@ def _get_openstack_release_string(self): ('xenial', 'mitaka'), ('yakkety', 'newton'), ('zesty', 'ocata'), + ('artful', 'pike'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 2adf2cb8..ea93159d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -1393,6 +1393,10 @@ def __call__(self): 'rel_key': 'enable-l3ha', 'default': False, }, + 'dns_domain': { + 'rel_key': 'dns-domain', + 'default': None, + }, } ctxt = self.get_neutron_options({}) for rid in relation_ids('neutron-plugin-api'): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index e13450c1..161c786b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -111,6 +111,8 @@ 'newton', 'ocata', 'pike', + 'queens', + 'rocky', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -126,6 +128,7 @@ ('xenial', 'mitaka'), ('yakkety', 'newton'), ('zesty', 'ocata'), + ('artful', 'pike'), ]) @@ -142,6 +145,7 @@ ('2016.1', 'mitaka'), ('2016.2', 'newton'), ('2017.1', 'ocata'), + ('2017.2', 'pike'), ]) # The ugly duckling - must list releases oldest to newest @@ -170,6 +174,8 @@ ['2.8.0', '2.9.0', '2.10.0']), ('ocata', ['2.11.0', '2.12.0', '2.13.0']), + ('pike', + ['2.13.0']), ]) # >= Liberty version->codename mapping @@ -179,54 +185,81 @@ ('13', 'mitaka'), ('14', 'newton'), ('15', 'ocata'), + ('16', 'pike'), + ('17', 'queens'), + ('18', 'rocky'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), ('8', 'mitaka'), ('9', 'newton'), ('10', 'ocata'), + ('11', 'pike'), + ('12', 'queens'), + ('13', 'rocky'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), ('8', 'mitaka'), ('9', 'newton'), ('10', 'ocata'), + ('11', 'pike'), + ('12', 'queens'), + ('13', 'rocky'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), ('9', 'mitaka'), ('10', 'newton'), ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), ('9', 'mitaka'), ('10', 'newton'), ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), ('6', 'mitaka'), ('7', 'newton'), ('8', 'ocata'), + ('9', 'pike'), + ('10', 'queens'), + ('11', 'rocky'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), ('6', 'mitaka'), ('7', 'newton'), ('8', 'ocata'), + ('9', 'pike'), + ('10', 'queens'), + ('11', 'rocky'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), ('12', 'mitaka'), ('13', 'newton'), ('14', 'ocata'), + ('15', 'pike'), + ('16', 'queens'), + ('17', 'rocky'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), ('9', 'mitaka'), ('10', 'newton'), ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), ]), } @@ -579,6 +612,12 @@ def configure_installation_source(rel): 'ocata': 'xenial-updates/ocata', 'ocata/updates': 'xenial-updates/ocata', 'ocata/proposed': 'xenial-proposed/ocata', + 'pike': 'xenial-updates/pike', + 'pike/updates': 'xenial-updates/pike', + 'pike/proposed': 'xenial-proposed/pike', + 'queens': 'xenial-updates/queens', + 'queens/updates': 'xenial-updates/queens', + 'queens/proposed': 'xenial-proposed/queens', } try: diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py index 0448288c..d8dc378a 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -19,6 +19,7 @@ 'xenial', 'yakkety', 'zesty', + 'artful', ) diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 82ac80ff..7bc6cc7e 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -113,6 +113,22 @@ 'ocata/proposed': 'xenial-proposed/ocata', 'xenial-ocata/proposed': 'xenial-proposed/ocata', 'xenial-ocata/newton': 'xenial-proposed/ocata', + # Pike + 'pike': 'xenial-updates/pike', + 'xenial-pike': 'xenial-updates/pike', + 'xenial-pike/updates': 'xenial-updates/pike', + 'xenial-updates/pike': 'xenial-updates/pike', + 'pike/proposed': 'xenial-proposed/pike', + 'xenial-pike/proposed': 'xenial-proposed/pike', + 'xenial-pike/newton': 'xenial-proposed/pike', + # Queens + 'queens': 'xenial-updates/queens', + 'xenial-queens': 'xenial-updates/queens', + 'xenial-queens/updates': 'xenial-updates/queens', + 'xenial-updates/queens': 'xenial-updates/queens', + 'queens/proposed': 'xenial-proposed/queens', + 'xenial-queens/proposed': 'xenial-proposed/queens', + 'xenial-queens/newton': 'xenial-proposed/queens', } APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 5c1ce457..5c041d2c 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -262,7 +262,8 @@ def _get_openstack_release(self): # Must be ordered by OpenStack release (not by Ubuntu release): (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata) = range(9) + self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, + self.xenial_pike, self.artful_pike) = range(11) releases = { ('trusty', None): self.trusty_icehouse, @@ -272,8 +273,10 @@ def _get_openstack_release(self): ('xenial', None): self.xenial_mitaka, ('xenial', 'cloud:xenial-newton'): self.xenial_newton, ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, + ('xenial', 'cloud:xenial-pike'): self.xenial_pike, ('yakkety', None): self.yakkety_newton, ('zesty', None): self.zesty_ocata, + ('artful', None): self.artful_pike, } return releases[(self.series, self.openstack)] @@ -287,6 +290,7 @@ def _get_openstack_release_string(self): ('xenial', 'mitaka'), ('yakkety', 'newton'), ('zesty', 'ocata'), + ('artful', 'pike'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py index 0448288c..d8dc378a 100644 --- a/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py @@ -19,6 +19,7 @@ 'xenial', 'yakkety', 'zesty', + 'artful', ) diff --git a/ceph-mon/tests/gate-basic-artful-pike b/ceph-mon/tests/gate-basic-artful-pike new file mode 100644 index 00000000..58c1b549 --- /dev/null +++ b/ceph-mon/tests/gate-basic-artful-pike @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on artful-pike.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='artful') + deployment.run_tests() diff --git a/ceph-mon/tests/gate-basic-xenial-pike b/ceph-mon/tests/gate-basic-xenial-pike new file mode 100644 index 00000000..2fafe3b0 --- /dev/null +++ b/ceph-mon/tests/gate-basic-xenial-pike @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on xenial-pike.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='xenial', + openstack='cloud:xenial-pike', + source='cloud:xenial-updates/pike') + deployment.run_tests() From f4c9ee70a23cc1d279ccb0b9152f52a58cb4819e Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 12 May 2017 07:07:35 +0200 Subject: [PATCH 1314/2699] Updates for pike b1 Resync charmhelpers for pike version support. Add pike tests but leave disabled until all charms support pike. Change-Id: Iafcbca139ca18afd3243ba05cf5170dd1ca57214 --- .../charmhelpers/contrib/openstack/context.py | 4 ++ .../charmhelpers/contrib/openstack/utils.py | 39 +++++++++++++++++++ .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 16 ++++++++ .../contrib/openstack/amulet/deployment.py | 6 ++- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-osd/tests/gate-basic-artful-pike | 23 +++++++++++ ceph-osd/tests/gate-basic-xenial-pike | 25 ++++++++++++ 8 files changed, 114 insertions(+), 1 deletion(-) create mode 100644 ceph-osd/tests/gate-basic-artful-pike create mode 100644 ceph-osd/tests/gate-basic-xenial-pike diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 2adf2cb8..ea93159d 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -1393,6 +1393,10 @@ def __call__(self): 'rel_key': 'enable-l3ha', 'default': False, }, + 'dns_domain': { + 'rel_key': 'dns-domain', + 'default': None, + }, } ctxt = self.get_neutron_options({}) for rid in relation_ids('neutron-plugin-api'): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index e13450c1..161c786b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -111,6 +111,8 @@ 'newton', 'ocata', 'pike', + 'queens', + 'rocky', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -126,6 +128,7 @@ ('xenial', 'mitaka'), ('yakkety', 'newton'), ('zesty', 'ocata'), + ('artful', 'pike'), ]) @@ -142,6 +145,7 @@ ('2016.1', 'mitaka'), ('2016.2', 'newton'), ('2017.1', 'ocata'), + ('2017.2', 'pike'), ]) # The ugly duckling - must list releases oldest to newest @@ -170,6 +174,8 @@ ['2.8.0', '2.9.0', '2.10.0']), ('ocata', ['2.11.0', '2.12.0', '2.13.0']), + ('pike', + ['2.13.0']), ]) # >= Liberty version->codename mapping @@ -179,54 +185,81 @@ ('13', 'mitaka'), ('14', 'newton'), ('15', 'ocata'), + ('16', 'pike'), + ('17', 'queens'), + ('18', 'rocky'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), ('8', 'mitaka'), ('9', 'newton'), ('10', 'ocata'), + ('11', 'pike'), + ('12', 'queens'), + ('13', 'rocky'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), ('8', 'mitaka'), ('9', 'newton'), ('10', 'ocata'), + ('11', 'pike'), + ('12', 'queens'), + ('13', 'rocky'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), ('9', 'mitaka'), ('10', 'newton'), ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), ('9', 'mitaka'), ('10', 'newton'), ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), ('6', 'mitaka'), ('7', 'newton'), ('8', 'ocata'), + ('9', 'pike'), + ('10', 'queens'), + ('11', 'rocky'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), ('6', 'mitaka'), ('7', 'newton'), ('8', 'ocata'), + ('9', 'pike'), + ('10', 'queens'), + ('11', 'rocky'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), ('12', 'mitaka'), ('13', 'newton'), ('14', 'ocata'), + ('15', 'pike'), + ('16', 'queens'), + ('17', 'rocky'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), ('9', 'mitaka'), ('10', 'newton'), ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), ]), } @@ -579,6 +612,12 @@ def configure_installation_source(rel): 'ocata': 'xenial-updates/ocata', 'ocata/updates': 'xenial-updates/ocata', 'ocata/proposed': 'xenial-proposed/ocata', + 'pike': 'xenial-updates/pike', + 'pike/updates': 'xenial-updates/pike', + 'pike/proposed': 'xenial-proposed/pike', + 'queens': 'xenial-updates/queens', + 'queens/updates': 'xenial-updates/queens', + 'queens/proposed': 'xenial-proposed/queens', } try: diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index 0448288c..d8dc378a 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -19,6 +19,7 @@ 'xenial', 'yakkety', 'zesty', + 'artful', ) diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 82ac80ff..7bc6cc7e 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -113,6 +113,22 @@ 'ocata/proposed': 'xenial-proposed/ocata', 'xenial-ocata/proposed': 'xenial-proposed/ocata', 'xenial-ocata/newton': 'xenial-proposed/ocata', + # Pike + 'pike': 'xenial-updates/pike', + 'xenial-pike': 'xenial-updates/pike', + 'xenial-pike/updates': 'xenial-updates/pike', + 'xenial-updates/pike': 'xenial-updates/pike', + 'pike/proposed': 'xenial-proposed/pike', + 'xenial-pike/proposed': 'xenial-proposed/pike', + 'xenial-pike/newton': 'xenial-proposed/pike', + # Queens + 'queens': 'xenial-updates/queens', + 'xenial-queens': 'xenial-updates/queens', + 'xenial-queens/updates': 'xenial-updates/queens', + 'xenial-updates/queens': 'xenial-updates/queens', + 'queens/proposed': 'xenial-proposed/queens', + 'xenial-queens/proposed': 'xenial-proposed/queens', + 'xenial-queens/newton': 'xenial-proposed/queens', } APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 5c1ce457..5c041d2c 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -262,7 +262,8 @@ def _get_openstack_release(self): # Must be ordered by OpenStack release (not by Ubuntu release): (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata) = range(9) + self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, + self.xenial_pike, self.artful_pike) = range(11) releases = { ('trusty', None): self.trusty_icehouse, @@ -272,8 +273,10 @@ def _get_openstack_release(self): ('xenial', None): self.xenial_mitaka, ('xenial', 'cloud:xenial-newton'): self.xenial_newton, ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, + ('xenial', 'cloud:xenial-pike'): self.xenial_pike, ('yakkety', None): self.yakkety_newton, ('zesty', None): self.zesty_ocata, + ('artful', None): self.artful_pike, } return releases[(self.series, self.openstack)] @@ -287,6 +290,7 @@ def _get_openstack_release_string(self): ('xenial', 'mitaka'), ('yakkety', 'newton'), ('zesty', 'ocata'), + ('artful', 'pike'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py index 0448288c..d8dc378a 100644 --- a/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py @@ -19,6 +19,7 @@ 'xenial', 'yakkety', 'zesty', + 'artful', ) diff --git a/ceph-osd/tests/gate-basic-artful-pike b/ceph-osd/tests/gate-basic-artful-pike new file mode 100644 index 00000000..d15bfcdb --- /dev/null +++ b/ceph-osd/tests/gate-basic-artful-pike @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph-osd deployment on artful-pike.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='artful') + deployment.run_tests() diff --git a/ceph-osd/tests/gate-basic-xenial-pike b/ceph-osd/tests/gate-basic-xenial-pike new file mode 100644 index 00000000..cc4a9cd2 --- /dev/null +++ b/ceph-osd/tests/gate-basic-xenial-pike @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph-osd deployment on xenial-pike.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='xenial', + openstack='cloud:xenial-pike', + source='cloud:xenial-updates/pike') + deployment.run_tests() From c84d14efe3001bfaa53e81c76b203fdd82a967d4 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 12 May 2017 07:10:23 +0200 Subject: [PATCH 1315/2699] Updates for pike b1 Resync charmhelpers for pike version support. Add pike tests but leave disabled until all charms support pike. Change-Id: I721710f0e4353340bc0e970304580bb21fc59321 --- .../contrib/openstack/amulet/deployment.py | 6 ++- .../charmhelpers/contrib/openstack/context.py | 4 ++ .../charmhelpers/contrib/openstack/utils.py | 39 +++++++++++++++++++ .../charmhelpers/core/host_factory/ubuntu.py | 1 + .../hooks/charmhelpers/fetch/ubuntu.py | 16 ++++++++ .../contrib/openstack/amulet/deployment.py | 6 ++- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-radosgw/tests/gate-basic-artful-pike | 23 +++++++++++ ceph-radosgw/tests/gate-basic-xenial-pike | 25 ++++++++++++ 9 files changed, 119 insertions(+), 2 deletions(-) create mode 100644 ceph-radosgw/tests/gate-basic-artful-pike create mode 100644 ceph-radosgw/tests/gate-basic-xenial-pike diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 5c1ce457..5c041d2c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -262,7 +262,8 @@ def _get_openstack_release(self): # Must be ordered by OpenStack release (not by Ubuntu release): (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata) = range(9) + self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, + self.xenial_pike, self.artful_pike) = range(11) releases = { ('trusty', None): self.trusty_icehouse, @@ -272,8 +273,10 @@ def _get_openstack_release(self): ('xenial', None): self.xenial_mitaka, ('xenial', 'cloud:xenial-newton'): self.xenial_newton, ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, + ('xenial', 'cloud:xenial-pike'): self.xenial_pike, ('yakkety', None): self.yakkety_newton, ('zesty', None): self.zesty_ocata, + ('artful', None): self.artful_pike, } return releases[(self.series, self.openstack)] @@ -287,6 +290,7 @@ def _get_openstack_release_string(self): ('xenial', 'mitaka'), ('yakkety', 'newton'), ('zesty', 'ocata'), + ('artful', 'pike'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 2adf2cb8..ea93159d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -1393,6 +1393,10 @@ def __call__(self): 'rel_key': 'enable-l3ha', 'default': False, }, + 'dns_domain': { + 'rel_key': 'dns-domain', + 'default': None, + }, } ctxt = self.get_neutron_options({}) for rid in relation_ids('neutron-plugin-api'): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index e13450c1..161c786b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -111,6 +111,8 @@ 'newton', 'ocata', 'pike', + 'queens', + 'rocky', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -126,6 +128,7 @@ ('xenial', 'mitaka'), ('yakkety', 'newton'), ('zesty', 'ocata'), + ('artful', 'pike'), ]) @@ -142,6 +145,7 @@ ('2016.1', 'mitaka'), ('2016.2', 'newton'), ('2017.1', 'ocata'), + ('2017.2', 'pike'), ]) # The ugly duckling - must list releases oldest to newest @@ -170,6 +174,8 @@ ['2.8.0', '2.9.0', '2.10.0']), ('ocata', ['2.11.0', '2.12.0', '2.13.0']), + ('pike', + ['2.13.0']), ]) # >= Liberty version->codename mapping @@ -179,54 +185,81 @@ ('13', 'mitaka'), ('14', 'newton'), ('15', 'ocata'), + ('16', 'pike'), + ('17', 'queens'), + ('18', 'rocky'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), ('8', 'mitaka'), ('9', 'newton'), ('10', 'ocata'), + ('11', 'pike'), + ('12', 'queens'), + ('13', 'rocky'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), ('8', 'mitaka'), ('9', 'newton'), ('10', 'ocata'), + ('11', 'pike'), + ('12', 'queens'), + ('13', 'rocky'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), ('9', 'mitaka'), ('10', 'newton'), ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), ('9', 'mitaka'), ('10', 'newton'), ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), ('6', 'mitaka'), ('7', 'newton'), ('8', 'ocata'), + ('9', 'pike'), + ('10', 'queens'), + ('11', 'rocky'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), ('6', 'mitaka'), ('7', 'newton'), ('8', 'ocata'), + ('9', 'pike'), + ('10', 'queens'), + ('11', 'rocky'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), ('12', 'mitaka'), ('13', 'newton'), ('14', 'ocata'), + ('15', 'pike'), + ('16', 'queens'), + ('17', 'rocky'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), ('9', 'mitaka'), ('10', 'newton'), ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), ]), } @@ -579,6 +612,12 @@ def configure_installation_source(rel): 'ocata': 'xenial-updates/ocata', 'ocata/updates': 'xenial-updates/ocata', 'ocata/proposed': 'xenial-proposed/ocata', + 'pike': 'xenial-updates/pike', + 'pike/updates': 'xenial-updates/pike', + 'pike/proposed': 'xenial-proposed/pike', + 'queens': 'xenial-updates/queens', + 'queens/updates': 'xenial-updates/queens', + 'queens/proposed': 'xenial-proposed/queens', } try: diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index 0448288c..d8dc378a 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -19,6 +19,7 @@ 'xenial', 'yakkety', 'zesty', + 'artful', ) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 82ac80ff..7bc6cc7e 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -113,6 +113,22 @@ 'ocata/proposed': 'xenial-proposed/ocata', 'xenial-ocata/proposed': 'xenial-proposed/ocata', 'xenial-ocata/newton': 'xenial-proposed/ocata', + # Pike + 'pike': 'xenial-updates/pike', + 'xenial-pike': 'xenial-updates/pike', + 'xenial-pike/updates': 'xenial-updates/pike', + 'xenial-updates/pike': 'xenial-updates/pike', + 'pike/proposed': 'xenial-proposed/pike', + 'xenial-pike/proposed': 'xenial-proposed/pike', + 'xenial-pike/newton': 'xenial-proposed/pike', + # Queens + 'queens': 'xenial-updates/queens', + 'xenial-queens': 'xenial-updates/queens', + 'xenial-queens/updates': 'xenial-updates/queens', + 'xenial-updates/queens': 'xenial-updates/queens', + 'queens/proposed': 'xenial-proposed/queens', + 'xenial-queens/proposed': 'xenial-proposed/queens', + 'xenial-queens/newton': 'xenial-proposed/queens', } APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 5c1ce457..5c041d2c 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -262,7 +262,8 @@ def _get_openstack_release(self): # Must be ordered by OpenStack release (not by Ubuntu release): (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata) = range(9) + self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, + self.xenial_pike, self.artful_pike) = range(11) releases = { ('trusty', None): self.trusty_icehouse, @@ -272,8 +273,10 @@ def _get_openstack_release(self): ('xenial', None): self.xenial_mitaka, ('xenial', 'cloud:xenial-newton'): self.xenial_newton, ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, + ('xenial', 'cloud:xenial-pike'): self.xenial_pike, ('yakkety', None): self.yakkety_newton, ('zesty', None): self.zesty_ocata, + ('artful', None): self.artful_pike, } return releases[(self.series, self.openstack)] @@ -287,6 +290,7 @@ def _get_openstack_release_string(self): ('xenial', 'mitaka'), ('yakkety', 'newton'), ('zesty', 'ocata'), + ('artful', 'pike'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py index 0448288c..d8dc378a 100644 --- a/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py @@ -19,6 +19,7 @@ 'xenial', 'yakkety', 'zesty', + 'artful', ) diff --git a/ceph-radosgw/tests/gate-basic-artful-pike b/ceph-radosgw/tests/gate-basic-artful-pike new file mode 100644 index 00000000..6ac2a443 --- /dev/null +++ b/ceph-radosgw/tests/gate-basic-artful-pike @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph-radosgw deployment on artful-pike.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='artful') + deployment.run_tests() diff --git a/ceph-radosgw/tests/gate-basic-xenial-pike b/ceph-radosgw/tests/gate-basic-xenial-pike new file mode 100644 index 00000000..dbf86912 --- /dev/null +++ b/ceph-radosgw/tests/gate-basic-xenial-pike @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph-radosgw deployment on xenial-pike.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='xenial', + openstack='cloud:xenial-pike', + source='cloud:xenial-updates/pike') + deployment.run_tests() From 9cd582b264af16ed35041fade77e2b17e7911022 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 16 May 2017 11:03:55 -0700 Subject: [PATCH 1316/2699] Do not install NTP when installed in a container Use determine_packages() to avoid installing NTP when in a container. Sync charms.ceph to get ceph.determine_packages(). Change-Id: I7a9e8a95b066fc330c1443b87b88f8b7f6b0baee Partial-Bug: #169051 --- ceph-mon/hooks/ceph_hooks.py | 5 +- ceph-mon/lib/ceph/__init__.py | 610 ++++++++++++++++++++++++------ ceph-mon/lib/ceph/ceph_broker.py | 6 +- ceph-mon/lib/ceph/ceph_helpers.py | 31 +- ceph-mon/lib/setup.py | 85 +++++ 5 files changed, 600 insertions(+), 137 deletions(-) create mode 100644 ceph-mon/lib/setup.py diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 321b0ffc..c1476b78 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -125,7 +125,7 @@ def install(): execd_preinstall() add_source(config('source'), config('key')) apt_update(fatal=True) - apt_install(packages=ceph.PACKAGES, fatal=True) + apt_install(packages=ceph.determine_packages(), fatal=True) def get_ceph_context(): @@ -527,7 +527,8 @@ def client_relation_changed(): @harden() def upgrade_charm(): emit_cephconf() - apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) + apt_install(packages=filter_installed_packages( + ceph.determine_packages()), fatal=True) ceph.update_monfs() mon_relation_joined() if is_relation_made("nrpe-external-master"): diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index db205d5a..ad67965a 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from _ctypes import POINTER, byref import ctypes import collections import json @@ -26,49 +25,56 @@ import shutil import pyudev +from datetime import datetime + from charmhelpers.core import hookenv +from charmhelpers.core import templating from charmhelpers.core.host import ( - mkdir, chownr, - service_restart, - lsb_release, cmp_pkgrevno, - service_stop, + lsb_release, + mkdir, mounts, + owner, + service_restart, service_start, + service_stop, CompareHostReleases, + is_container, ) from charmhelpers.core.hookenv import ( - log, - ERROR, cached, + config, + log, status_set, - WARNING, DEBUG, - config + ERROR, + WARNING, ) -from charmhelpers.core.services import render_template from charmhelpers.fetch import ( apt_cache, - add_source, - apt_install, - apt_update -) + add_source, apt_install, apt_update) from charmhelpers.contrib.storage.linux.ceph import ( monitor_key_set, monitor_key_exists, monitor_key_get, - get_mon_map + get_mon_map, ) from charmhelpers.contrib.storage.linux.utils import ( is_block_device, zap_disk, - is_device_mounted + is_device_mounted, ) from charmhelpers.contrib.openstack.utils import ( - get_os_codename_install_source + get_os_codename_install_source, ) +from ceph.ceph_helpers import check_output + +CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph') +OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd') +HDPARM_FILE = os.path.join(os.sep, 'etc', 'hdparm.conf') + LEADER = 'leader' PEON = 'peon' QUORUM = [LEADER, PEON] @@ -118,6 +124,42 @@ } +class Partition(object): + def __init__(self, name, number, size, start, end, sectors, uuid): + """ + A block device partition + :param name: Name of block device + :param number: Partition number + :param size: Capacity of the device + :param start: Starting block + :param end: Ending block + :param sectors: Number of blocks + :param uuid: UUID of the partition + """ + self.name = name, + self.number = number + self.size = size + self.start = start + self.end = end + self.sectors = sectors + self.uuid = uuid + + def __str__(self): + return "number: {} start: {} end: {} sectors: {} size: {} " \ + "name: {} uuid: {}".format(self.number, self.start, + self.end, + self.sectors, self.size, + self.name, self.uuid) + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + return not self.__eq__(other) + + def unmounted_disks(): """List of unmounted block devices on the current host.""" disks = [] @@ -182,7 +224,7 @@ def tune_nic(network_interface): try: # Apply the settings log("Applying sysctl settings", level=DEBUG) - subprocess.check_output(["sysctl", "-p", sysctl_file]) + check_output(["sysctl", "-p", sysctl_file]) except subprocess.CalledProcessError as err: log('sysctl -p {} failed with error {}'.format(sysctl_file, err.output), @@ -233,14 +275,21 @@ def persist_settings(settings_dict): The settings_dict should be in the form of {"uuid": {"key":"value"}} :param settings_dict: dict of settings to save """ - hdparm_path = os.path.join(os.sep, 'etc', 'hdparm.conf') + if not settings_dict: + return + try: - with open(hdparm_path, 'w') as hdparm: - hdparm.write(render_template('hdparm.conf', settings_dict)) + templating.render(source='hdparm.conf', target=HDPARM_FILE, + context=settings_dict) except IOError as err: log("Unable to open {path} because of error: {error}".format( - path=hdparm_path, - error=err.message), level=ERROR) + path=HDPARM_FILE, error=err.message), level=ERROR) + except Exception as e: + # The templating.render can raise a jinja2 exception if the + # template is not found. Rather than polluting the import + # space of this charm, simply catch Exception + log('Unable to render {path} due to error: {error}'.format( + path=HDPARM_FILE, error=e.message), level=ERROR) def set_max_sectors_kb(dev_name, max_sectors_size): @@ -314,9 +363,9 @@ def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): log('Setting read ahead to {} for device {}'.format( read_ahead_sectors, dev_name)) - subprocess.check_output(['hdparm', - '-a{}'.format(read_ahead_sectors), - dev_name]) + check_output(['hdparm', + '-a{}'.format(read_ahead_sectors), + dev_name]) except subprocess.CalledProcessError as e: log('hdparm failed with error: {}'.format(e.output), level=ERROR) @@ -324,52 +373,22 @@ def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): def get_block_uuid(block_dev): """ - This queries blkid to get the uuid for a block device. Note: This function - needs to be called with root priv. It will raise an error otherwise. + This queries blkid to get the uuid for a block device. :param block_dev: Name of the block device to query. - :return: The UUID of the device or None on Error. Raises OSError + :return: The UUID of the device or None on Error. """ try: - blkid = ctypes.cdll.LoadLibrary("libblkid.so") - # Header signature - # extern int blkid_probe_lookup_value(blkid_probe pr, const char *name, - # const char **data, size_t *len); - blkid.blkid_new_probe_from_filename.argtypes = [ctypes.c_char_p] - blkid.blkid_probe_lookup_value.argtypes = [ctypes.c_void_p, - ctypes.c_char_p, - POINTER(ctypes.c_char_p), - POINTER(ctypes.c_ulong)] - except OSError as err: - log('get_block_uuid loading libblkid.so failed with error: {}'.format( - os.strerror(err.errno)), - level=ERROR) - raise err - if not os.path.exists(block_dev): + block_info = check_output( + ['blkid', '-o', 'export', block_dev]) + for tag in block_info.split('\n'): + parts = tag.split('=') + if parts[0] == 'UUID': + return parts[1] return None - probe = blkid.blkid_new_probe_from_filename(ctypes.c_char_p(block_dev)) - if probe < 0: - log('get_block_uuid new_probe_from_filename failed: {}'.format( - os.strerror(probe)), - level=ERROR) - raise OSError(probe, os.strerror(probe)) - result = blkid.blkid_do_probe(probe) - if result != 0: - log('get_block_uuid do_probe failed with error: {}'.format( - os.strerror(result)), - level=ERROR) - raise OSError(result, os.strerror(result)) - uuid = ctypes.c_char_p() - result = blkid.blkid_probe_lookup_value(probe, - ctypes.c_char_p( - 'UUID'.encode('ascii')), - byref(uuid), None) - if result < 0: - log('get_block_uuid lookup_value failed with error: {}'.format( - os.strerror(result)), + except subprocess.CalledProcessError as err: + log('get_block_uuid failed with error: {}'.format(err.output), level=ERROR) - raise OSError(result, os.strerror(result)) - blkid.blkid_free_probe(probe) - return ctypes.string_at(uuid).decode('ascii') + return None def check_max_sectors(save_settings_dict, @@ -508,7 +527,7 @@ def get_osd_weight(osd_id): Also raises CalledProcessError if our ceph command fails """ try: - tree = subprocess.check_output( + tree = check_output( ['ceph', 'osd', 'tree', '--format=json']) try: json_tree = json.loads(tree) @@ -535,7 +554,7 @@ def get_osd_tree(service): Also raises CalledProcessError if our ceph command fails """ try: - tree = subprocess.check_output( + tree = check_output( ['ceph', '--id', service, 'osd', 'tree', '--format=json']) try: @@ -570,6 +589,43 @@ def get_osd_tree(service): raise +def _get_child_dirs(path): + """Returns a list of directory names in the specified path. + + :param path: a full path listing of the parent directory to return child + directory names + :return: list. A list of child directories under the parent directory + :raises: ValueError if the specified path does not exist or is not a + directory, + OSError if an error occurs reading the directory listing + """ + if not os.path.exists(path): + raise ValueError('Specfied path "%s" does not exist' % path) + if not os.path.isdir(path): + raise ValueError('Specified path "%s" is not a directory' % path) + + files_in_dir = [os.path.join(path, f) for f in os.listdir(path)] + return list(filter(os.path.isdir, files_in_dir)) + + +def _get_osd_num_from_dirname(dirname): + """Parses the dirname and returns the OSD id. + + Parses a string in the form of 'ceph-{osd#}' and returns the osd number + from the directory name. + + :param dirname: the directory name to return the OSD number from + :return int: the osd number the directory name corresponds to + :raises ValueError: if the osd number cannot be parsed from the provided + directory name. + """ + match = re.search('ceph-(?P\d+)', dirname) + if not match: + raise ValueError("dirname not in correct format: %s" % dirname) + + return match.group('osd_id') + + def get_local_osd_ids(): """ This will list the /var/lib/ceph/osd/* directories and try @@ -675,7 +731,7 @@ def is_quorum(): ] if os.path.exists(asok): try: - result = json.loads(subprocess.check_output(cmd)) + result = json.loads(check_output(cmd)) except subprocess.CalledProcessError: return False except ValueError: @@ -702,7 +758,7 @@ def is_leader(): ] if os.path.exists(asok): try: - result = json.loads(subprocess.check_output(cmd)) + result = json.loads(check_output(cmd)) except subprocess.CalledProcessError: return False except ValueError: @@ -746,9 +802,12 @@ def add_bootstrap_hint(peer): ] CEPH_PARTITIONS = [ + '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # ceph encrypted disk in creation + '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # ceph encrypted journal '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal + '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # ceph disk in creation ] @@ -809,7 +868,7 @@ def replace_osd(dead_osd_number, # Drop this osd out of the cluster. This will begin a # rebalance operation status_set('maintenance', 'Removing osd {}'.format(dead_osd_number)) - subprocess.check_output([ + check_output([ 'ceph', '--id', 'osd-upgrade', @@ -820,8 +879,8 @@ def replace_osd(dead_osd_number, if systemd(): service_stop('ceph-osd@{}'.format(dead_osd_number)) else: - subprocess.check_output(['stop', 'ceph-osd', 'id={}'.format( - dead_osd_number)]), + check_output(['stop', 'ceph-osd', 'id={}'.format( + dead_osd_number)]) # umount if still mounted ret = umount(mount_point) if ret < 0: @@ -829,20 +888,20 @@ def replace_osd(dead_osd_number, mount_point, os.strerror(ret))) # Clean up the old mount point shutil.rmtree(mount_point) - subprocess.check_output([ + check_output([ 'ceph', '--id', 'osd-upgrade', 'osd', 'crush', 'remove', 'osd.{}'.format(dead_osd_number)]) # Revoke the OSDs access keys - subprocess.check_output([ + check_output([ 'ceph', '--id', 'osd-upgrade', 'auth', 'del', 'osd.{}'.format(dead_osd_number)]) - subprocess.check_output([ + check_output([ 'ceph', '--id', 'osd-upgrade', @@ -859,17 +918,48 @@ def replace_osd(dead_osd_number, log('replace_osd failed with error: ' + e.output) -def is_osd_disk(dev): +def get_partition_list(dev): + """ + Lists the partitions of a block device + :param dev: Path to a block device. ex: /dev/sda + :return: :raise: Returns a list of Partition objects. + Raises CalledProcessException if lsblk fails + """ + partitions_list = [] try: - info = subprocess.check_output(['sgdisk', '-i', '1', dev]) - info = info.split("\n") # IGNORE:E1103 - for line in info: - for ptype in CEPH_PARTITIONS: - sig = 'Partition GUID code: {}'.format(ptype) - if line.startswith(sig): - return True + partitions = get_partitions(dev) + # For each line of output + for partition in partitions: + parts = partition.split() + partitions_list.append( + Partition(number=parts[0], + start=parts[1], + end=parts[2], + sectors=parts[3], + size=parts[4], + name=parts[5], + uuid=parts[6]) + ) + return partitions_list except subprocess.CalledProcessError: - pass + raise + + +def is_osd_disk(dev): + partitions = get_partition_list(dev) + for partition in partitions: + try: + info = check_output(['sgdisk', '-i', partition.number, dev]) + info = info.split("\n") # IGNORE:E1103 + for line in info: + for ptype in CEPH_PARTITIONS: + sig = 'Partition GUID code: {}'.format(ptype) + if line.startswith(sig): + return True + except subprocess.CalledProcessError as e: + log("sgdisk inspection of partition {} on {} failed with " + "error: {}. Skipping".format(partition.minor, dev, e.message), + level=ERROR) return False @@ -942,7 +1032,7 @@ def generate_monitor_secret(): '--name=mon.', '--gen-key' ] - res = subprocess.check_output(cmd) + res = check_output(cmd) return "{}==".format(res.split('=')[1].strip()) @@ -1090,8 +1180,8 @@ def create_named_keyring(entity, name, caps=None): ] for subsystem, subcaps in caps.items(): cmd.extend([subsystem, '; '.join(subcaps)]) - log("Calling subprocess.check_output: {}".format(cmd), level=DEBUG) - return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + log("Calling check_output: {}".format(cmd), level=DEBUG) + return parse_key(check_output(cmd).strip()) # IGNORE:E1103 def get_upgrade_key(): @@ -1108,7 +1198,7 @@ def get_named_key(name, caps=None, pool_list=None): """ try: # Does the key already exist? - output = subprocess.check_output( + output = check_output( [ 'sudo', '-u', ceph_user(), @@ -1148,8 +1238,8 @@ def get_named_key(name, caps=None, pool_list=None): pools = " ".join(['pool={0}'.format(i) for i in pool_list]) subcaps[0] = subcaps[0] + " " + pools cmd.extend([subsystem, '; '.join(subcaps)]) - log("Calling subprocess.check_output: {}".format(cmd), level=DEBUG) - return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + log("Calling check_output: {}".format(cmd), level=DEBUG) + return parse_key(check_output(cmd).strip()) # IGNORE:E1103 def upgrade_key_caps(key, caps): @@ -1241,7 +1331,7 @@ def maybe_zap_journal(journal_dev): def get_partitions(dev): cmd = ['partx', '--raw', '--noheadings', dev] try: - out = subprocess.check_output(cmd).splitlines() + out = check_output(cmd).splitlines() log("get partitions: {}".format(out), level=DEBUG) return out except subprocess.CalledProcessError as e: @@ -1325,7 +1415,7 @@ def osdize_dir(path, encrypt=False): if cmp_pkgrevno('ceph', "0.56.6") < 0: log('Unable to use directories for OSDs with ceph < 0.56.6', level=ERROR) - raise + return mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) chownr('/var/lib/ceph', ceph_user(), ceph_user()) @@ -1351,7 +1441,7 @@ def get_running_osds(): """Returns a list of the pids of the current running OSD daemons""" cmd = ['pgrep', 'ceph-osd'] try: - result = subprocess.check_output(cmd) + result = check_output(cmd) return result.split() except subprocess.CalledProcessError: return [] @@ -1367,9 +1457,9 @@ def get_cephfs(service): # This command wasn't introduced until 0.86 ceph return [] try: - output = subprocess.check_output(["ceph", - '--id', service, - "fs", "ls"]) + output = check_output(["ceph", + '--id', service, + "fs", "ls"]) if not output: return [] """ @@ -1494,7 +1584,7 @@ def upgrade_monitor(new_version): service_stop('ceph-mon@{}'.format(mon_id)) else: service_stop('ceph-mon-all') - apt_install(packages=PACKAGES, fatal=True) + apt_install(packages=determine_packages(), fatal=True) # Ensure the files and directories under /var/lib/ceph is chowned # properly as part of the move to the Jewel release, which moved the @@ -1547,6 +1637,7 @@ def lock_and_roll(upgrade_key, service, my_name, version): my_name, version, stop_timestamp)) + status_set('maintenance', 'Finishing upgrade') monitor_key_set(upgrade_key, "{}_{}_{}_done".format(service, my_name, version), @@ -1669,42 +1760,198 @@ def upgrade_osd(new_version): add_source(config('source'), config('key')) apt_update(fatal=True) except subprocess.CalledProcessError as err: - log("Adding the ceph source failed with message: {}".format( + log("Adding the ceph sources failed with message: {}".format( err.message)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) - try: - if systemd(): - for osd_id in get_local_osd_ids(): - service_stop('ceph-osd@{}'.format(osd_id)) - else: - service_stop('ceph-osd-all') - apt_install(packages=PACKAGES, fatal=True) - # Ensure the files and directories under /var/lib/ceph is chowned - # properly as part of the move to the Jewel release, which moved the - # ceph daemons to running as ceph:ceph instead of root:root. Only do - # it when necessary as this is an expensive operation to run. - if new_version == 'jewel': - owner = ceph_user() - status_set('maintenance', 'Updating file ownership for OSDs') - chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), - owner=owner, - group=owner, - follow_links=True) + try: + # Upgrade the packages before restarting the daemons. + status_set('maintenance', 'Upgrading packages to %s' % new_version) + apt_install(packages=determine_packages(), fatal=True) + + # If the upgrade does not need an ownership update of any of the + # directories in the osd service directory, then simply restart + # all of the OSDs at the same time as this will be the fastest + # way to update the code on the node. + if not dirs_need_ownership_update('osd'): + log('Restarting all OSDs to load new binaries', DEBUG) + service_restart('ceph-osd-all') + return + + # Need to change the ownership of all directories which are not OSD + # directories as well. + # TODO - this should probably be moved to the general upgrade function + # and done before mon/osd. + update_owner(CEPH_BASE_DIR, recurse_dirs=False) + non_osd_dirs = filter(lambda x: not x == 'osd', + os.listdir(CEPH_BASE_DIR)) + non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x), + non_osd_dirs) + for path in non_osd_dirs: + update_owner(path) + + # Fast service restart wasn't an option because each of the OSD + # directories need the ownership updated for all the files on + # the OSD. Walk through the OSDs one-by-one upgrading the OSD. + for osd_dir in _get_child_dirs(OSD_BASE_DIR): + try: + osd_num = _get_osd_num_from_dirname(osd_dir) + _upgrade_single_osd(osd_num, osd_dir) + except ValueError as ex: + # Directory could not be parsed - junk directory? + log('Could not parse osd directory %s: %s' % (osd_dir, ex), + WARNING) + continue - if systemd(): - for osd_id in get_local_osd_ids(): - service_start('ceph-osd@{}'.format(osd_id)) - else: - service_start('ceph-osd-all') - except subprocess.CalledProcessError as err: + except (subprocess.CalledProcessError, IOError) as err: log("Stopping ceph and upgrading packages failed " "with message: {}".format(err.message)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) +def _upgrade_single_osd(osd_num, osd_dir): + """Upgrades the single OSD directory. + + :param osd_num: the num of the OSD + :param osd_dir: the directory of the OSD to upgrade + :raises CalledProcessError: if an error occurs in a command issued as part + of the upgrade process + :raises IOError: if an error occurs reading/writing to a file as part + of the upgrade process + """ + stop_osd(osd_num) + disable_osd(osd_num) + update_owner(osd_dir) + enable_osd(osd_num) + start_osd(osd_num) + + +def stop_osd(osd_num): + """Stops the specified OSD number. + + :param osd_num: the osd number to stop + """ + if systemd(): + service_stop('ceph-osd@{}'.format(osd_num)) + else: + service_stop('ceph-osd', id=osd_num) + + +def start_osd(osd_num): + """Starts the specified OSD number. + + :param osd_num: the osd number to start. + """ + if systemd(): + service_start('ceph-osd@{}'.format(osd_num)) + else: + service_start('ceph-osd', id=osd_num) + + +def disable_osd(osd_num): + """Disables the specified OSD number. + + Ensures that the specified osd will not be automatically started at the + next reboot of the system. Due to differences between init systems, + this method cannot make any guarantees that the specified osd cannot be + started manually. + + :param osd_num: the osd id which should be disabled. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + to disable the OSD + :raises IOError, OSError: if the attempt to read/remove the ready file in + an upstart enabled system fails + """ + if systemd(): + # When running under systemd, the individual ceph-osd daemons run as + # templated units and can be directly addressed by referring to the + # templated service name ceph-osd@. Additionally, systemd + # allows one to disable a specific templated unit by running the + # 'systemctl disable ceph-osd@' command. When disabled, the + # OSD should remain disabled until re-enabled via systemd. + # Note: disabling an already disabled service in systemd returns 0, so + # no need to check whether it is enabled or not. + cmd = ['systemctl', 'disable', 'ceph-osd@{}'.format(osd_num)] + subprocess.check_call(cmd) + else: + # Neither upstart nor the ceph-osd upstart script provides for + # disabling the starting of an OSD automatically. The specific OSD + # cannot be prevented from running manually, however it can be + # prevented from running automatically on reboot by removing the + # 'ready' file in the OSD's root directory. This is due to the + # ceph-osd-all upstart script checking for the presence of this file + # before starting the OSD. + ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), + 'ready') + if os.path.exists(ready_file): + os.unlink(ready_file) + + +def enable_osd(osd_num): + """Enables the specified OSD number. + + Ensures that the specified osd_num will be enabled and ready to start + automatically in the event of a reboot. + + :param osd_num: the osd id which should be enabled. + :raises CalledProcessError: if the call to the systemd command issued + fails when enabling the service + :raises IOError: if the attempt to write the ready file in an usptart + enabled system fails + """ + if systemd(): + cmd = ['systemctl', 'enable', 'ceph-osd@{}'.format(osd_num)] + subprocess.check_call(cmd) + else: + # When running on upstart, the OSDs are started via the ceph-osd-all + # upstart script which will only start the osd if it has a 'ready' + # file. Make sure that file exists. + ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), + 'ready') + with open(ready_file, 'w') as f: + f.write('ready') + + # Make sure the correct user owns the file. It shouldn't be necessary + # as the upstart script should run with root privileges, but its better + # to have all the files matching ownership. + update_owner(ready_file) + + +def update_owner(path, recurse_dirs=True): + """Changes the ownership of the specified path. + + Changes the ownership of the specified path to the new ceph daemon user + using the system's native chown functionality. This may take awhile, + so this method will issue a set_status for any changes of ownership which + recurses into directory structures. + + :param path: the path to recursively change ownership for + :param recurse_dirs: boolean indicating whether to recursively change the + ownership of all the files in a path's subtree or to + simply change the ownership of the path. + :raises CalledProcessError: if an error occurs issuing the chown system + command + """ + user = ceph_user() + user_group = '{ceph_user}:{ceph_user}'.format(ceph_user=user) + cmd = ['chown', user_group, path] + if os.path.isdir(path) and recurse_dirs: + status_set('maintenance', ('Updating ownership of %s to %s' % + (path, user))) + cmd.insert(1, '-R') + + log('Changing ownership of {path} to {user}'.format( + path=path, user=user_group), DEBUG) + start = datetime.now() + subprocess.check_call(cmd) + elapsed_time = (datetime.now() - start) + + log('Took {secs} seconds to change the ownership of path: {path}'.format( + secs=elapsed_time.total_seconds(), path=path), DEBUG) + + def list_pools(service): """ This will list the current pools that Ceph has @@ -1715,7 +1962,7 @@ def list_pools(service): """ try: pool_list = [] - pools = subprocess.check_output(['rados', '--id', service, 'lspools']) + pools = check_output(['rados', '--id', service, 'lspools']) for pool in pools.splitlines(): pool_list.append(pool) return pool_list @@ -1723,6 +1970,36 @@ def list_pools(service): log("rados lspools failed with error: {}".format(err.output)) raise + +def dirs_need_ownership_update(service): + """Determines if directories still need change of ownership. + + Examines the set of directories under the /var/lib/ceph/{service} directory + and determines if they have the correct ownership or not. This is + necessary due to the upgrade from Hammer to Jewel where the daemon user + changes from root: to ceph:. + + :param service: the name of the service folder to check (e.g. osd, mon) + :return: boolean. True if the directories need a change of ownership, + False otherwise. + :raises IOError: if an error occurs reading the file stats from one of + the child directories. + :raises OSError: if the specified path does not exist or some other error + """ + expected_owner = expected_group = ceph_user() + path = os.path.join(CEPH_BASE_DIR, service) + for child in _get_child_dirs(path): + curr_owner, curr_group = owner(child) + + if (curr_owner == expected_owner) and (curr_group == expected_group): + continue + + log('Directory "%s" needs its ownership updated' % child, DEBUG) + return True + + # All child directories had the expected ownership + return False + # A dict of valid ceph upgrade paths. Mapping is old -> new UPGRADE_PATHS = { 'firefly': 'hammer', @@ -1757,3 +2034,86 @@ def resolve_ceph_version(source): ''' os_release = get_os_codename_install_source(source) return UCA_CODENAME_MAP.get(os_release) + + +def get_ceph_pg_stat(): + """ + Returns the result of ceph pg stat + :return: dict + """ + try: + tree = check_output(['ceph', 'pg', 'stat', '--format=json']) + try: + json_tree = json.loads(tree) + if not json_tree['num_pg_by_state']: + return None + return json_tree + except ValueError as v: + log("Unable to parse ceph pg stat json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph pg stat command failed with message: {}".format( + e.message)) + raise + + +def get_ceph_health(): + """ + Returns the health of the cluster from a 'ceph health' + :return: dict + Also raises CalledProcessError if our ceph command fails + To get the overall status, use get_ceph_health()['overall_status'] + """ + try: + tree = check_output( + ['ceph', 'health', '--format=json']) + try: + json_tree = json.loads(tree) + # Make sure children are present in the json + if not json_tree['overall_status']: + return None + return json_tree + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + +def reweight_osd(osd_num, new_weight): + """ + Changes the crush weight of an OSD to the value specified. + :param osd_num: the osd id which should be changed + :param new_weight: the new weight for the OSD + :returns: bool. True if output looks right, else false. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + """ + try: + cmd_result = subprocess.check_output( + ['ceph', 'osd', 'crush', 'reweight', "osd.{}".format(osd_num), + new_weight], stderr=subprocess.STDOUT) + expected_result = "reweighted item id {ID} name \'osd.{ID}\'".format( + ID=osd_num) + " to {}".format(new_weight) + log(cmd_result) + if expected_result in cmd_result: + return True + return False + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + +def determine_packages(): + ''' + Determines packages for installation. + + @returns: list of ceph packages + ''' + if is_container(): + PACKAGES.remove('ntp') + return PACKAGES diff --git a/ceph-mon/lib/ceph/ceph_broker.py b/ceph-mon/lib/ceph/ceph_broker.py index 8a998058..1f6db8c8 100644 --- a/ceph-mon/lib/ceph/ceph_broker.py +++ b/ceph-mon/lib/ceph/ceph_broker.py @@ -28,7 +28,7 @@ get_cephfs, get_osd_weight ) -from ceph_helpers import Crushmap +from ceph.ceph_helpers import Crushmap from charmhelpers.contrib.storage.linux.ceph import ( create_erasure_profile, delete_pool, @@ -168,6 +168,8 @@ def handle_add_permissions_to_key(request, service): A group can optionally have a namespace defined that will be used to further restrict pool access. """ + resp = {'exit-code': 0} + service_name = request.get('name') group_name = request.get('group') group_namespace = request.get('group-namespace') @@ -190,6 +192,8 @@ def handle_add_permissions_to_key(request, service): group_namespace) update_service_permissions(service_name, service_obj, group_namespace) + return resp + def update_service_permissions(service, service_obj=None, namespace=None): """Update the key permissions for the named client in Ceph""" diff --git a/ceph-mon/lib/ceph/ceph_helpers.py b/ceph-mon/lib/ceph/ceph_helpers.py index 8e5c807f..11f5dd8c 100644 --- a/ceph-mon/lib/ceph/ceph_helpers.py +++ b/ceph-mon/lib/ceph/ceph_helpers.py @@ -36,7 +36,11 @@ import re import subprocess -from subprocess import (check_call, check_output, CalledProcessError, ) +from subprocess import ( + check_call, + check_output as s_check_output, + CalledProcessError, +) from charmhelpers.core.hookenv import (config, local_unit, relation_get, @@ -111,6 +115,15 @@ LEGACY_PG_COUNT = 200 +def check_output(*args, **kwargs): + ''' + Helper wrapper for py2/3 compat with subprocess.check_output + + @returns str: UTF-8 decoded representation of output + ''' + return s_check_output(*args, **kwargs).decode('UTF-8') + + def validator(value, valid_type, valid_range=None): """ Used to validate these: http://docs.ceph.com/docs/master/rados/operations/ @@ -188,7 +201,7 @@ def load_crushmap(self): stdout=subprocess.PIPE) return subprocess.check_output( ('crushtool', '-d', '-'), - stdin=crush.stdout).decode('utf-8') + stdin=crush.stdout) except Exception as e: log("load_crushmap error: {}".format(e)) raise "Failed to read Crushmap" @@ -565,7 +578,8 @@ def monitor_key_delete(service, key): :param key: six.string_types. The key to delete. """ try: - check_output(['ceph', '--id', service, 'config-key', 'del', str(key)]) + check_output(['ceph', '--id', service, + 'config-key', 'del', str(key)]) except CalledProcessError as e: log("Monitor config-key put failed with message: {}".format(e.output)) raise @@ -867,8 +881,7 @@ def get_cache_mode(service, pool_name): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, 'lspools']).decode( - 'UTF-8') + out = check_output(['rados', '--id', service, 'lspools']) except CalledProcessError: return False @@ -882,7 +895,7 @@ def get_osds(service): version = ceph_version() if version and version >= '0.56': return json.loads(check_output(['ceph', '--id', service, 'osd', 'ls', - '--format=json']).decode('UTF-8')) + '--format=json'])) return None @@ -900,7 +913,7 @@ def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: out = check_output(['rbd', 'list', '--id', service, '--pool', pool - ]).decode('UTF-8') + ]) except CalledProcessError: return False @@ -1025,7 +1038,7 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']).decode('UTF-8') + out = check_output(['rbd', 'showmapped']) except CalledProcessError: return False @@ -1212,7 +1225,7 @@ def ceph_version(): """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): cmd = ['ceph', '-v'] - output = check_output(cmd).decode('US-ASCII') + output = check_output(cmd) output = output.split() if len(output) > 3: return output[2] diff --git a/ceph-mon/lib/setup.py b/ceph-mon/lib/setup.py new file mode 100644 index 00000000..139c80d6 --- /dev/null +++ b/ceph-mon/lib/setup.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function + +import os +import sys +from setuptools import setup, find_packages +from setuptools.command.test import test as TestCommand + +version = "0.0.1.dev1" +install_require = [ +] + +tests_require = [ + 'tox >= 2.3.1', +] + + +class Tox(TestCommand): + + user_options = [('tox-args=', 'a', "Arguments to pass to tox")] + + def initialize_options(self): + TestCommand.initialize_options(self) + self.tox_args = None + + def finalize_options(self): + TestCommand.finalize_options(self) + self.test_args = [] + self.test_suite = True + + def run_tests(self): + # import here, cause outside the eggs aren't loaded + import tox + import shlex + args = self.tox_args + # remove the 'test' arg from argv as tox passes it to ostestr which + # breaks it. + sys.argv.pop() + if args: + args = shlex.split(self.tox_args) + errno = tox.cmdline(args=args) + sys.exit(errno) + + +if sys.argv[-1] == 'publish': + os.system("python setup.py sdist upload") + os.system("python setup.py bdist_wheel upload") + sys.exit() + + +if sys.argv[-1] == 'tag': + os.system("git tag -a %s -m 'version %s'" % (version, version)) + os.system("git push --tags") + sys.exit() + + +setup( + name='charms.ceph', + version=version, + description='Provide base module for ceph charms.', + classifiers=[ + "Development Status :: 2 - Pre-Alpha", + "Intended Audience :: Developers", + "Topic :: System", + "Topic :: System :: Installation/Setup", + "Topic :: System :: Software Distribution", + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.5", + "License :: OSI Approved :: Apache Software License", + ], + url='https://github.com/openstack/charms.ceph', + author='OpenStack Charmers', + author_email='openstack-dev@lists.openstack.org', + license='Apache-2.0: http://www.apache.org/licenses/LICENSE-2.0', + packages=find_packages(exclude=["unit_tests"]), + zip_safe=False, + cmdclass={'test': Tox}, + install_requires=install_require, + extras_require={ + 'testing': tests_require, + }, + tests_require=tests_require, +) From a3b7a2e4cf64bf042fbc695d3bb98cd3ef48cf4f Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 16 May 2017 09:31:51 -0700 Subject: [PATCH 1317/2699] Do not install NTP when installed in a container Use determine_packages() to avoid installing NTP when in a container. Sync charms.ceph to get ceph.determine_packages(). Partial-Bug: #1690513 Change-Id: I274f2bad0edece04eb7782f94803d991cb2973dd --- ceph-osd/hooks/ceph_hooks.py | 4 +- ceph-osd/lib/ceph/__init__.py | 184 ++++++++++++++++++++++++++++--- ceph-osd/lib/ceph/ceph_broker.py | 4 + 3 files changed, 173 insertions(+), 19 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 06372057..20a18dca 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -191,7 +191,7 @@ def install_apparmor_profile(): def install(): add_source(config('source'), config('key')) apt_update(fatal=True) - apt_install(packages=ceph.PACKAGES, fatal=True) + apt_install(packages=ceph.determine_packages(), fatal=True) if config('autotune'): tune_network_adapters() @@ -479,7 +479,7 @@ def mon_relation(): def upgrade_charm(): if get_fsid() and get_auth(): emit_cephconf() - apt_install(packages=filter_installed_packages(ceph.PACKAGES), + apt_install(packages=filter_installed_packages(ceph.determine_packages()), fatal=True) diff --git a/ceph-osd/lib/ceph/__init__.py b/ceph-osd/lib/ceph/__init__.py index 32c7ce6c..ad67965a 100644 --- a/ceph-osd/lib/ceph/__init__.py +++ b/ceph-osd/lib/ceph/__init__.py @@ -40,6 +40,7 @@ service_start, service_stop, CompareHostReleases, + is_container, ) from charmhelpers.core.hookenv import ( cached, @@ -52,10 +53,7 @@ ) from charmhelpers.fetch import ( apt_cache, - add_source, - apt_install, - apt_update, -) + add_source, apt_install, apt_update) from charmhelpers.contrib.storage.linux.ceph import ( monitor_key_set, monitor_key_exists, @@ -126,6 +124,42 @@ } +class Partition(object): + def __init__(self, name, number, size, start, end, sectors, uuid): + """ + A block device partition + :param name: Name of block device + :param number: Partition number + :param size: Capacity of the device + :param start: Starting block + :param end: Ending block + :param sectors: Number of blocks + :param uuid: UUID of the partition + """ + self.name = name, + self.number = number + self.size = size + self.start = start + self.end = end + self.sectors = sectors + self.uuid = uuid + + def __str__(self): + return "number: {} start: {} end: {} sectors: {} size: {} " \ + "name: {} uuid: {}".format(self.number, self.start, + self.end, + self.sectors, self.size, + self.name, self.uuid) + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + return not self.__eq__(other) + + def unmounted_disks(): """List of unmounted block devices on the current host.""" disks = [] @@ -768,9 +802,12 @@ def add_bootstrap_hint(peer): ] CEPH_PARTITIONS = [ + '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # ceph encrypted disk in creation + '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # ceph encrypted journal '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal + '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # ceph disk in creation ] @@ -881,17 +918,48 @@ def replace_osd(dead_osd_number, log('replace_osd failed with error: ' + e.output) -def is_osd_disk(dev): +def get_partition_list(dev): + """ + Lists the partitions of a block device + :param dev: Path to a block device. ex: /dev/sda + :return: :raise: Returns a list of Partition objects. + Raises CalledProcessException if lsblk fails + """ + partitions_list = [] try: - info = check_output(['sgdisk', '-i', '1', dev]) - info = info.split("\n") # IGNORE:E1103 - for line in info: - for ptype in CEPH_PARTITIONS: - sig = 'Partition GUID code: {}'.format(ptype) - if line.startswith(sig): - return True + partitions = get_partitions(dev) + # For each line of output + for partition in partitions: + parts = partition.split() + partitions_list.append( + Partition(number=parts[0], + start=parts[1], + end=parts[2], + sectors=parts[3], + size=parts[4], + name=parts[5], + uuid=parts[6]) + ) + return partitions_list except subprocess.CalledProcessError: - pass + raise + + +def is_osd_disk(dev): + partitions = get_partition_list(dev) + for partition in partitions: + try: + info = check_output(['sgdisk', '-i', partition.number, dev]) + info = info.split("\n") # IGNORE:E1103 + for line in info: + for ptype in CEPH_PARTITIONS: + sig = 'Partition GUID code: {}'.format(ptype) + if line.startswith(sig): + return True + except subprocess.CalledProcessError as e: + log("sgdisk inspection of partition {} on {} failed with " + "error: {}. Skipping".format(partition.minor, dev, e.message), + level=ERROR) return False @@ -1347,7 +1415,7 @@ def osdize_dir(path, encrypt=False): if cmp_pkgrevno('ceph', "0.56.6") < 0: log('Unable to use directories for OSDs with ceph < 0.56.6', level=ERROR) - raise + return mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) chownr('/var/lib/ceph', ceph_user(), ceph_user()) @@ -1516,7 +1584,7 @@ def upgrade_monitor(new_version): service_stop('ceph-mon@{}'.format(mon_id)) else: service_stop('ceph-mon-all') - apt_install(packages=PACKAGES, fatal=True) + apt_install(packages=determine_packages(), fatal=True) # Ensure the files and directories under /var/lib/ceph is chowned # properly as part of the move to the Jewel release, which moved the @@ -1700,7 +1768,7 @@ def upgrade_osd(new_version): try: # Upgrade the packages before restarting the daemons. status_set('maintenance', 'Upgrading packages to %s' % new_version) - apt_install(packages=PACKAGES, fatal=True) + apt_install(packages=determine_packages(), fatal=True) # If the upgrade does not need an ownership update of any of the # directories in the osd service directory, then simply restart @@ -1932,7 +2000,6 @@ def dirs_need_ownership_update(service): # All child directories had the expected ownership return False - # A dict of valid ceph upgrade paths. Mapping is old -> new UPGRADE_PATHS = { 'firefly': 'hammer', @@ -1967,3 +2034,86 @@ def resolve_ceph_version(source): ''' os_release = get_os_codename_install_source(source) return UCA_CODENAME_MAP.get(os_release) + + +def get_ceph_pg_stat(): + """ + Returns the result of ceph pg stat + :return: dict + """ + try: + tree = check_output(['ceph', 'pg', 'stat', '--format=json']) + try: + json_tree = json.loads(tree) + if not json_tree['num_pg_by_state']: + return None + return json_tree + except ValueError as v: + log("Unable to parse ceph pg stat json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph pg stat command failed with message: {}".format( + e.message)) + raise + + +def get_ceph_health(): + """ + Returns the health of the cluster from a 'ceph health' + :return: dict + Also raises CalledProcessError if our ceph command fails + To get the overall status, use get_ceph_health()['overall_status'] + """ + try: + tree = check_output( + ['ceph', 'health', '--format=json']) + try: + json_tree = json.loads(tree) + # Make sure children are present in the json + if not json_tree['overall_status']: + return None + return json_tree + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + +def reweight_osd(osd_num, new_weight): + """ + Changes the crush weight of an OSD to the value specified. + :param osd_num: the osd id which should be changed + :param new_weight: the new weight for the OSD + :returns: bool. True if output looks right, else false. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + """ + try: + cmd_result = subprocess.check_output( + ['ceph', 'osd', 'crush', 'reweight', "osd.{}".format(osd_num), + new_weight], stderr=subprocess.STDOUT) + expected_result = "reweighted item id {ID} name \'osd.{ID}\'".format( + ID=osd_num) + " to {}".format(new_weight) + log(cmd_result) + if expected_result in cmd_result: + return True + return False + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + +def determine_packages(): + ''' + Determines packages for installation. + + @returns: list of ceph packages + ''' + if is_container(): + PACKAGES.remove('ntp') + return PACKAGES diff --git a/ceph-osd/lib/ceph/ceph_broker.py b/ceph-osd/lib/ceph/ceph_broker.py index f1e59daf..1f6db8c8 100644 --- a/ceph-osd/lib/ceph/ceph_broker.py +++ b/ceph-osd/lib/ceph/ceph_broker.py @@ -168,6 +168,8 @@ def handle_add_permissions_to_key(request, service): A group can optionally have a namespace defined that will be used to further restrict pool access. """ + resp = {'exit-code': 0} + service_name = request.get('name') group_name = request.get('group') group_namespace = request.get('group-namespace') @@ -190,6 +192,8 @@ def handle_add_permissions_to_key(request, service): group_namespace) update_service_permissions(service_name, service_obj, group_namespace) + return resp + def update_service_permissions(service, service_obj=None, namespace=None): """Update the key permissions for the named client in Ceph""" From 98b92fdbe60fbe06f29491e60fb6b8152b3313ac Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 16 May 2017 11:11:53 -0700 Subject: [PATCH 1318/2699] Do not install NTP when installed in a container Check if the charm is installed inside a container and do not install NTP if that is the case. Partial-Bug: #1690513 Change-Id: Ib290733a175197dee1261043a1e8ff2edc0b53e5 --- ceph-radosgw/hooks/hooks.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index cb3a01b5..fccad357 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -40,7 +40,10 @@ filter_installed_packages, ) from charmhelpers.payload.execd import execd_preinstall -from charmhelpers.core.host import cmp_pkgrevno +from charmhelpers.core.host import ( + cmp_pkgrevno, + is_container, +) from charmhelpers.contrib.network.ip import ( get_relation_ip, get_iface_for_address, @@ -103,6 +106,8 @@ def install_packages(): add_source(config('source'), config('key')) apt_update(fatal=True) + if is_container(): + PACKAGES.remove('ntp') pkgs = filter_installed_packages(PACKAGES) if pkgs: status_set('maintenance', 'Installing radosgw packages') From e063d3fa378d2be1c4d3da77881c4810f2613a05 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 30 May 2017 15:37:15 +0200 Subject: [PATCH 1319/2699] Fix single-monitor deployment Commit 52c345d48593930e5c446e58130fadd8c3171608 introduced auto-generation of 'monitor-secret' and 'fsid'. However, the auto-generated 'monitor-secret' is not used when doing single-monitor deployments. This commit fixes that. Change-Id: Iae41b3296c2dfd86823cac91f70d582951206261 Closes-Bug: #1694435 --- ceph-mon/hooks/ceph_hooks.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 29bc567b..c25138bb 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -220,9 +220,10 @@ def config_changed(): emit_cephconf() # Support use of single node ceph - if not ceph.is_bootstrapped() and int(config('monitor-count')) == 1: + if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1 and + is_leader()): status_set('maintenance', 'Bootstrapping single Ceph MON') - ceph.bootstrap_monitor_cluster(config('monitor-secret')) + ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) ceph.wait_for_bootstrap() From b431998d26f0f86dd9cf0359472797efb4cb07dc Mon Sep 17 00:00:00 2001 From: David Ames Date: Wed, 31 May 2017 15:48:42 -0700 Subject: [PATCH 1320/2699] Handle SSL and Keystone v3 When using SSL and Keystone v3 the charm was hardcoding v2.0 in the URL. This change adapts based on what is passed from keystone. Change-Id: Ib08730e6480b1b1706c450075ac2f7312e3e5f59 Closes-Bug: #1691875 --- .../hooks/charmhelpers/contrib/openstack/keystone.py | 2 +- ceph-radosgw/hooks/utils.py | 12 ++++++++---- ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py | 5 ++++- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py index a15a03fa..d7e02ccd 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py @@ -29,7 +29,7 @@ def get_api_suffix(api_version): @returns the api suffix formatted according to the given api version """ - return 'v2.0' if api_version in (2, "2.0") else 'v3' + return 'v2.0' if api_version in (2, "2", "2.0") else 'v3' def format_endpoint(schema, addr, port, api_version): diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index afddf8c7..cd25d007 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -43,6 +43,9 @@ pause_unit, resume_unit, ) +from charmhelpers.contrib.openstack.keystone import ( + format_endpoint, +) from charmhelpers.contrib.hahelpers.cluster import get_hacluster_config from charmhelpers.core.host import ( cmp_pkgrevno, @@ -435,7 +438,7 @@ def setup_keystone_certs(unit=None, rid=None): mkdir(certs_path) rdata = relation_get(unit=unit, rid=rid) - required = ['admin_token', 'auth_host', 'auth_port'] + required = ['admin_token', 'auth_host', 'auth_port', 'api_version'] settings = {key: rdata.get(key) for key in required} if not all(settings.values()): log("Missing relation settings ({}) - deferring cert setup".format( @@ -447,9 +450,10 @@ def setup_keystone_certs(unit=None, rid=None): if is_ipv6(settings.get('auth_host')): settings['auth_host'] = format_ipv6_addr(settings.get('auth_host')) - auth_endpoint = "{}://{}:{}/v2.0".format(auth_protocol, - settings['auth_host'], - settings['auth_port']) + auth_endpoint = format_endpoint(auth_protocol, + settings['auth_host'], + settings['auth_port'], + settings['api_version']) try: get_ks_ca_cert(settings['admin_token'], auth_endpoint, certs_path) diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index f2a5b8ff..37ed1d6a 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -28,6 +28,7 @@ TO_PATCH = [ 'application_version_set', 'get_upstream_version', + 'format_endpoint', ] @@ -103,9 +104,11 @@ def test_setup_keystone_certs(self, mock_mkdir, mock_relation_get, auth_port = 80 admin_token = '666' auth_url = 'http://%s:%s/v2.0' % (auth_host, auth_port) + self.format_endpoint.return_value = auth_url mock_relation_get.return_value = {'auth_host': auth_host, 'auth_port': auth_port, - 'admin_token': admin_token} + 'admin_token': admin_token, + 'api_version': '2'} utils.setup_keystone_certs() mock_get_ks_signing_cert.assert_has_calls([call(admin_token, auth_url, '/var/lib/ceph/nss')]) From b81f7565558163a96076b61f8e3026604750e389 Mon Sep 17 00:00:00 2001 From: Chris Glass Date: Wed, 31 May 2017 10:17:35 +0000 Subject: [PATCH 1321/2699] Allow the simple Swift auth to work Incase we do *not* use keystone as an authentication mechanism, let the built-in authentication work with this charm. Without this change, the Swift authentication itself will work, but the X-Storage-URL header will point to the port the storage daemon listens on - which is not open in the firewall (70). This change instead forces the URL to be "the unit's public IP" with the default port (80), on which haproxy is listening, and will do the right thing. Change-Id: Ia2b12153eca3074392aad6dea6ee995990f15633 Signed-off-by: Christopher Glass --- ceph-radosgw/hooks/ceph_radosgw_context.py | 11 +++++++++-- ceph-radosgw/templates/ceph.conf | 4 +++- ceph-radosgw/unit_tests/test_ceph_radosgw_context.py | 6 ++++++ 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 3ba6b781..ae6879e0 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -29,9 +29,10 @@ WARNING, config, log, - relation_ids, related_units, relation_get, + relation_ids, + unit_public_ip, ) from charmhelpers.contrib.network.ip import ( format_ipv6_addr, @@ -175,7 +176,13 @@ def __call__(self): 'use_syslog': str(config('use-syslog')).lower(), 'loglevel': config('loglevel'), 'port': port, - 'ipv6': config('prefer-ipv6') + 'ipv6': config('prefer-ipv6'), + # The public unit IP is only used in case the authentication is + # *Not* keystone - in which case it is used to make sure the + # storage endpoint returned by the built-in auth is the HAproxy + # (since it defaults to the port the service runs on, and that is + # not available externally). ~tribaal + 'unit_public_ip': unit_public_ip(), } certs_path = '/var/lib/ceph/nss' diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 392ec031..d89902e4 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -47,7 +47,9 @@ rgw s3 auth use keystone = true {% if cms -%} nss db path = /var/lib/ceph/nss {% endif %} -{% endif %} +{% else -%} +rgw swift url = http://{{ unit_public_ip }} +{% endif -%} {% if client_radosgw_gateway -%} # The following are user-provided options provided via the config-flags charm option. # User-provided [client.radosgw.gateway] section config diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index ec81ad31..f84fc692 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -28,6 +28,7 @@ 'related_units', 'cmp_pkgrevno', 'socket', + 'unit_public_ip', ] @@ -170,6 +171,7 @@ class MonContextTest(CharmTestCase): def setUp(self): super(MonContextTest, self).setUp(context, TO_PATCH) self.config.side_effect = self.test_config.get + self.unit_public_ip.return_value = '10.255.255.255' @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') @@ -193,6 +195,7 @@ def _relation_get(attr, unit, rid): 'hostname': 'testhost', 'mon_hosts': '10.5.4.1 10.5.4.2 10.5.4.3', 'old_auth': False, + 'unit_public_ip': '10.255.255.255', 'use_syslog': 'false', 'loglevel': 1, 'port': 70, @@ -231,6 +234,7 @@ def _relation_get(attr, unit, rid): 'hostname': 'testhost', 'mon_hosts': '10.5.4.1 10.5.4.2 10.5.4.3', 'old_auth': False, + 'unit_public_ip': '10.255.255.255', 'use_syslog': 'false', 'loglevel': 1, 'port': 70, @@ -278,6 +282,7 @@ def _relation_get(attr, unit, rid): 'hostname': 'testhost', 'mon_hosts': '10.5.4.1 10.5.4.2 10.5.4.3', 'old_auth': False, + 'unit_public_ip': '10.255.255.255', 'use_syslog': 'false', 'loglevel': 1, 'port': 70, @@ -307,6 +312,7 @@ def _relation_get(attr, unit, rid): 'hostname': 'testhost', 'mon_hosts': '10.5.4.1 10.5.4.2 10.5.4.3', 'old_auth': False, + 'unit_public_ip': '10.255.255.255', 'use_syslog': 'false', 'loglevel': 1, 'port': 70, From 137c6c7d3267e4d3e00ec32499abb9086e61059f Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Fri, 9 Jun 2017 11:38:48 -0700 Subject: [PATCH 1322/2699] Add lib folder to system path for replace_osd action The replace_osd action fails to run because the ceph import cannot be resolved without the lib folder existing in the system path. Change-Id: If85a3ac7edbb22abf11cff5429e6820322ded898 Closes-Bug: #1696958 --- ceph-osd/actions/replace_osd.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/actions/replace_osd.py b/ceph-osd/actions/replace_osd.py index 69ba4a6f..edfa1f7b 100755 --- a/ceph-osd/actions/replace_osd.py +++ b/ceph-osd/actions/replace_osd.py @@ -18,6 +18,7 @@ import sys sys.path.append('hooks/') +sys.path.append('lib/') from charmhelpers.core.hookenv import action_get, log, config, action_fail From 4da93f3f7fff5cc54970a3b4615dcd4c0b61f7ce Mon Sep 17 00:00:00 2001 From: Xav Paice Date: Wed, 28 Jun 2017 10:02:37 +1200 Subject: [PATCH 1323/2699] Update ceph and charmhelpers Straight update using the Makefile to update helper libraries. Change-Id: I820118b6cc8a0482e609ccbc7ed3c312b2277fdd --- ceph-osd/hooks/charmhelpers/__init__.py | 61 ++++ .../charmhelpers/contrib/charmsupport/nrpe.py | 7 + .../hooks/charmhelpers/contrib/network/ip.py | 6 +- .../charmhelpers/contrib/openstack/utils.py | 226 +++++-------- .../contrib/storage/linux/ceph.py | 42 +++ ceph-osd/hooks/charmhelpers/core/host.py | 2 + ceph-osd/hooks/charmhelpers/fetch/__init__.py | 26 +- ceph-osd/hooks/charmhelpers/fetch/centos.py | 2 +- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 304 ++++++++++++++---- ceph-osd/lib/ceph/__init__.py | 8 +- ceph-osd/tests/charmhelpers/__init__.py | 61 ++++ .../contrib/openstack/amulet/utils.py | 103 ++++-- ceph-osd/tests/charmhelpers/core/host.py | 2 + 13 files changed, 599 insertions(+), 251 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/__init__.py b/ceph-osd/hooks/charmhelpers/__init__.py index 48867880..e7aa4715 100644 --- a/ceph-osd/hooks/charmhelpers/__init__.py +++ b/ceph-osd/hooks/charmhelpers/__init__.py @@ -14,6 +14,11 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. +from __future__ import print_function +from __future__ import absolute_import + +import functools +import inspect import subprocess import sys @@ -34,3 +39,59 @@ else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) import yaml # flake8: noqa + + +# Holds a list of mapping of mangled function names that have been deprecated +# using the @deprecate decorator below. This is so that the warning is only +# printed once for each usage of the function. +__deprecated_functions = {} + + +def deprecate(warning, date=None, log=None): + """Add a deprecation warning the first time the function is used. + The date, which is a string in semi-ISO8660 format indicate the year-month + that the function is officially going to be removed. + + usage: + + @deprecate('use core/fetch/add_source() instead', '2017-04') + def contributed_add_source_thing(...): + ... + + And it then prints to the log ONCE that the function is deprecated. + The reason for passing the logging function (log) is so that hookenv.log + can be used for a charm if needed. + + :param warning: String to indicat where it has moved ot. + :param date: optional sting, in YYYY-MM format to indicate when the + function will definitely (probably) be removed. + :param log: The log function to call to log. If not, logs to stdout + """ + def wrap(f): + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + try: + module = inspect.getmodule(f) + file = inspect.getsourcefile(f) + lines = inspect.getsourcelines(f) + f_name = "{}-{}-{}..{}-{}".format( + module.__name__, file, lines[0], lines[-1], f.__name__) + except (IOError, TypeError): + # assume it was local, so just use the name of the function + f_name = f.__name__ + if f_name not in __deprecated_functions: + __deprecated_functions[f_name] = True + s = "DEPRECATION WARNING: Function {} is being removed".format( + f.__name__) + if date: + s = "{} on/around {}".format(s, date) + if warning: + s = "{} : {}".format(s, warning) + if log: + log(s) + else: + print(s) + return f(*args, **kwargs) + return wrapped_f + return wrap diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 8240249e..424b7f76 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -193,6 +193,13 @@ def write(self, nagios_context, hostname, nagios_servicegroups): nrpe_check_file = self._get_check_filename() with open(nrpe_check_file, 'w') as nrpe_check_config: nrpe_check_config.write("# check {}\n".format(self.shortname)) + if nagios_servicegroups: + nrpe_check_config.write( + "# The following header was added automatically by juju\n") + nrpe_check_config.write( + "# Modifying it will affect nagios monitoring and alerting\n") + nrpe_check_config.write( + "# servicegroups: {}\n".format(nagios_servicegroups)) nrpe_check_config.write("command[{}]={}\n".format( self.command, self.check_cmd)) diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index fc3f5e3e..d7e6debf 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -243,11 +243,13 @@ def is_ipv6_disabled(): try: result = subprocess.check_output( ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], - stderr=subprocess.STDOUT) - return "net.ipv6.conf.all.disable_ipv6 = 1" in result + stderr=subprocess.STDOUT, + universal_newlines=True) except subprocess.CalledProcessError: return True + return "net.ipv6.conf.all.disable_ipv6 = 1" in result + def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 161c786b..cfdd829d 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -26,11 +26,12 @@ import shutil import six -import tempfile import traceback import uuid import yaml +from charmhelpers import deprecate + from charmhelpers.contrib.network import ip from charmhelpers.core import unitdata @@ -41,7 +42,6 @@ config, log as juju_log, charm_dir, - DEBUG, INFO, ERROR, related_units, @@ -82,9 +82,12 @@ restart_on_change_helper, ) from charmhelpers.fetch import ( - apt_install, apt_cache, install_remote, + import_key as fetch_import_key, + add_source as fetch_add_source, + SourceConfigError, + GPGKeyError, get_upstream_version ) from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk @@ -469,13 +472,14 @@ def get_os_version_package(pkg, fatal=True): # error_out(e) -os_rel = None +# Module local cache variable for the os_release. +_os_rel = None def reset_os_release(): '''Unset the cached os_release version''' - global os_rel - os_rel = None + global _os_rel + _os_rel = None def os_release(package, base='essex', reset_cache=False): @@ -489,150 +493,77 @@ def os_release(package, base='essex', reset_cache=False): the installation source, the earliest release supported by the charm should be returned. ''' - global os_rel + global _os_rel if reset_cache: reset_os_release() - if os_rel: - return os_rel - os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or - get_os_codename_package(package, fatal=False) or - get_os_codename_install_source(config('openstack-origin')) or - base) - return os_rel + if _os_rel: + return _os_rel + _os_rel = ( + git_os_codename_install_source(config('openstack-origin-git')) or + get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config('openstack-origin')) or + base) + return _os_rel +@deprecate("moved to charmhelpers.fetch.import_key()", "2017-07", log=juju_log) def import_key(keyid): - key = keyid.strip() - if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and - key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): - juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG) - juju_log("Importing ASCII Armor PGP key", level=DEBUG) - with tempfile.NamedTemporaryFile() as keyfile: - with open(keyfile.name, 'w') as fd: - fd.write(key) - fd.write("\n") - - cmd = ['apt-key', 'add', keyfile.name] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error_out("Error importing PGP key '%s'" % key) - else: - juju_log("PGP key found (looks like Radix64 format)", level=DEBUG) - juju_log("Importing PGP key from keyserver", level=DEBUG) - cmd = ['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error_out("Error importing PGP key '%s'" % key) - - -def get_source_and_pgp_key(input): - """Look for a pgp key ID or ascii-armor key in the given input.""" - index = input.strip() - index = input.rfind('|') - if index < 0: - return input, None - - key = input[index + 1:].strip('|') - source = input[:index] - return source, key - - -def configure_installation_source(rel): - '''Configure apt installation source.''' - if rel == 'distro': - return - elif rel == 'distro-proposed': - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: - f.write(DISTRO_PROPOSED % ubuntu_rel) - elif rel[:4] == "ppa:": - src, key = get_source_and_pgp_key(rel) - if key: - import_key(key) - - subprocess.check_call(["add-apt-repository", "-y", src]) - elif rel[:3] == "deb": - src, key = get_source_and_pgp_key(rel) - if key: - import_key(key) - - with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: - f.write(src) - elif rel[:6] == 'cloud:': - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - rel = rel.split(':')[1] - u_rel = rel.split('-')[0] - ca_rel = rel.split('-')[1] - - if u_rel != ubuntu_rel: - e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ - 'version (%s)' % (ca_rel, ubuntu_rel) - error_out(e) + """Import a key, either ASCII armored, or a GPG key id. - if 'staging' in ca_rel: - # staging is just a regular PPA. - os_rel = ca_rel.split('/')[0] - ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel - cmd = 'add-apt-repository -y %s' % ppa - subprocess.check_call(cmd.split(' ')) - return - - # map charm config options to actual archive pockets. - pockets = { - 'folsom': 'precise-updates/folsom', - 'folsom/updates': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'grizzly': 'precise-updates/grizzly', - 'grizzly/updates': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly', - 'havana': 'precise-updates/havana', - 'havana/updates': 'precise-updates/havana', - 'havana/proposed': 'precise-proposed/havana', - 'icehouse': 'precise-updates/icehouse', - 'icehouse/updates': 'precise-updates/icehouse', - 'icehouse/proposed': 'precise-proposed/icehouse', - 'juno': 'trusty-updates/juno', - 'juno/updates': 'trusty-updates/juno', - 'juno/proposed': 'trusty-proposed/juno', - 'kilo': 'trusty-updates/kilo', - 'kilo/updates': 'trusty-updates/kilo', - 'kilo/proposed': 'trusty-proposed/kilo', - 'liberty': 'trusty-updates/liberty', - 'liberty/updates': 'trusty-updates/liberty', - 'liberty/proposed': 'trusty-proposed/liberty', - 'mitaka': 'trusty-updates/mitaka', - 'mitaka/updates': 'trusty-updates/mitaka', - 'mitaka/proposed': 'trusty-proposed/mitaka', - 'newton': 'xenial-updates/newton', - 'newton/updates': 'xenial-updates/newton', - 'newton/proposed': 'xenial-proposed/newton', - 'ocata': 'xenial-updates/ocata', - 'ocata/updates': 'xenial-updates/ocata', - 'ocata/proposed': 'xenial-proposed/ocata', - 'pike': 'xenial-updates/pike', - 'pike/updates': 'xenial-updates/pike', - 'pike/proposed': 'xenial-proposed/pike', - 'queens': 'xenial-updates/queens', - 'queens/updates': 'xenial-updates/queens', - 'queens/proposed': 'xenial-proposed/queens', - } + @param keyid: the key in ASCII armor format, or a GPG key id. + @raises SystemExit() via sys.exit() on failure. + """ + try: + return fetch_import_key(keyid) + except GPGKeyError as e: + error_out("Could not import key: {}".format(str(e))) - try: - pocket = pockets[ca_rel] - except KeyError: - e = 'Invalid Cloud Archive release specified: %s' % rel - error_out(e) - src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) - apt_install('ubuntu-cloud-keyring', fatal=True) +def get_source_and_pgp_key(source_and_key): + """Look for a pgp key ID or ascii-armor key in the given input. - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: - f.write(src) - else: - error_out("Invalid openstack-release specified: %s" % rel) + :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is + optional. + :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id + if there was no '|' in the source_and_key string. + """ + try: + source, key = source_and_key.split('|', 2) + return source, key or None + except ValueError: + return source_and_key, None + + +@deprecate("use charmhelpers.fetch.add_source() instead.", + "2017-07", log=juju_log) +def configure_installation_source(source_plus_key): + """Configure an installation source. + + The functionality is provided by charmhelpers.fetch.add_source() + The difference between the two functions is that add_source() signature + requires the key to be passed directly, whereas this function passes an + optional key by appending '|' to the end of the source specificiation + 'source'. + + Another difference from add_source() is that the function calls sys.exit(1) + if the configuration fails, whereas add_source() raises + SourceConfigurationError(). Another difference, is that add_source() + silently fails (with a juju_log command) if there is no matching source to + configure, whereas this function fails with a sys.exit(1) + + :param source: String_plus_key -- see above for details. + + Note that the behaviour on error is to log the error to the juju log and + then call sys.exit(1). + """ + # extract the key if there is one, denoted by a '|' in the rel + source, key = get_source_and_pgp_key(source_plus_key) + + # handle the ordinary sources via add_source + try: + fetch_add_source(source, key, fail_invalid=True) + except SourceConfigError as se: + error_out(str(se)) def config_value_changed(option): @@ -677,7 +608,6 @@ def openstack_upgrade_available(package): :returns: bool: : Returns True if configured installation source offers a newer version of package. - """ import apt_pkg as apt @@ -2052,3 +1982,15 @@ def token_cache_pkgs(source=None, release=None): if enable_memcache(source=source, release=release): packages.extend(['memcached', 'python-memcache']) return packages + + +def update_json_file(filename, items): + """Updates the json `filename` with a given dict. + :param filename: json filename (i.e.: /etc/glance/policy.json) + :param items: dict of items to update + """ + with open(filename) as fd: + policy = json.load(fd) + policy.update(items) + with open(filename, "w") as fd: + fd.write(json.dumps(policy, indent=4)) diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 9417d684..1f0540a1 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -63,6 +63,7 @@ from charmhelpers.fetch import ( apt_install, ) +from charmhelpers.core.unitdata import kv from charmhelpers.core.kernel import modprobe from charmhelpers.contrib.openstack.utils import config_flags_parser @@ -1314,6 +1315,47 @@ def send_request_if_needed(request, relation='ceph'): relation_set(relation_id=rid, broker_req=request.request) +def is_broker_action_done(action, rid=None, unit=None): + """Check whether broker action has completed yet. + + @param action: name of action to be performed + @returns True if action complete otherwise False + """ + rdata = relation_get(rid, unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + if not broker_rsp: + return False + + rsp = CephBrokerRsp(broker_rsp) + unit_name = local_unit().partition('/')[2] + key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) + kvstore = kv() + val = kvstore.get(key=key) + if val and val == rsp.request_id: + return True + + return False + + +def mark_broker_action_done(action, rid=None, unit=None): + """Mark action as having been completed. + + @param action: name of action to be performed + @returns None + """ + rdata = relation_get(rid, unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + if not broker_rsp: + return + + rsp = CephBrokerRsp(broker_rsp) + unit_name = local_unit().partition('/')[2] + key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) + kvstore = kv() + kvstore.set(key=key, value=rsp.request_id) + kvstore.flush() + + class CephConfContext(object): """Ceph config (ceph.conf) context. diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 88e80a49..b0043cbe 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -191,6 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): + service('disable', service_name) service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( @@ -225,6 +226,7 @@ def service_resume(service_name, init_dir="/etc/init", sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): service('unmask', service_name) + service('enable', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index ec5e0fe9..480a6276 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -48,6 +48,13 @@ class AptLockError(Exception): pass +class GPGKeyError(Exception): + """Exception occurs when a GPG key cannot be fetched or used. The message + indicates what the problem is. + """ + pass + + class BaseFetchHandler(object): """Base class for FetchHandler implementations in fetch plugins""" @@ -77,21 +84,22 @@ def base_url(self, url): fetch = importlib.import_module(module) filter_installed_packages = fetch.filter_installed_packages -install = fetch.install -upgrade = fetch.upgrade -update = fetch.update -purge = fetch.purge +install = fetch.apt_install +upgrade = fetch.apt_upgrade +update = _fetch_update = fetch.apt_update +purge = fetch.apt_purge add_source = fetch.add_source if __platform__ == "ubuntu": apt_cache = fetch.apt_cache - apt_install = fetch.install - apt_update = fetch.update - apt_upgrade = fetch.upgrade - apt_purge = fetch.purge + apt_install = fetch.apt_install + apt_update = fetch.apt_update + apt_upgrade = fetch.apt_upgrade + apt_purge = fetch.apt_purge apt_mark = fetch.apt_mark apt_hold = fetch.apt_hold apt_unhold = fetch.apt_unhold + import_key = fetch.import_key get_upstream_version = fetch.get_upstream_version elif __platform__ == "centos": yum_search = fetch.yum_search @@ -135,7 +143,7 @@ def configure_sources(update=False, for source, key in zip(sources, keys): add_source(source, key) if update: - fetch.update(fatal=True) + _fetch_update(fatal=True) def install_remote(source, *args, **kwargs): diff --git a/ceph-osd/hooks/charmhelpers/fetch/centos.py b/ceph-osd/hooks/charmhelpers/fetch/centos.py index 604bbfb5..a91dcff0 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/centos.py +++ b/ceph-osd/hooks/charmhelpers/fetch/centos.py @@ -132,7 +132,7 @@ def add_source(source, key=None): key_file.write(key) key_file.flush() key_file.seek(0) - subprocess.check_call(['rpm', '--import', key_file]) + subprocess.check_call(['rpm', '--import', key_file.name]) else: subprocess.check_call(['rpm', '--import', key]) diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 7bc6cc7e..57b5fb61 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -12,29 +12,47 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections import OrderedDict import os +import platform +import re import six import time import subprocess - from tempfile import NamedTemporaryFile + from charmhelpers.core.host import ( lsb_release ) -from charmhelpers.core.hookenv import log -from charmhelpers.fetch import SourceConfigError - +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.fetch import SourceConfigError, GPGKeyError + +PROPOSED_POCKET = ( + "# Proposed\n" + "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe " + "multiverse restricted\n") +PROPOSED_PORTS_POCKET = ( + "# Proposed\n" + "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe " + "multiverse restricted\n") +# Only supports 64bit and ppc64 at the moment. +ARCH_TO_PROPOSED_POCKET = { + 'x86_64': PROPOSED_POCKET, + 'ppc64le': PROPOSED_PORTS_POCKET, + 'aarch64': PROPOSED_PORTS_POCKET, +} +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ - -PROPOSED_POCKET = """# Proposed -deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted -""" - CLOUD_ARCHIVE_POCKETS = { # Folsom 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', 'precise-folsom': 'precise-updates/folsom', 'precise-folsom/updates': 'precise-updates/folsom', 'precise-updates/folsom': 'precise-updates/folsom', @@ -43,6 +61,7 @@ 'precise-proposed/folsom': 'precise-proposed/folsom', # Grizzly 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', 'precise-grizzly': 'precise-updates/grizzly', 'precise-grizzly/updates': 'precise-updates/grizzly', 'precise-updates/grizzly': 'precise-updates/grizzly', @@ -51,6 +70,7 @@ 'precise-proposed/grizzly': 'precise-proposed/grizzly', # Havana 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', 'precise-havana': 'precise-updates/havana', 'precise-havana/updates': 'precise-updates/havana', 'precise-updates/havana': 'precise-updates/havana', @@ -59,6 +79,7 @@ 'precise-proposed/havana': 'precise-proposed/havana', # Icehouse 'icehouse': 'precise-updates/icehouse', + 'icehouse/updates': 'precise-updates/icehouse', 'precise-icehouse': 'precise-updates/icehouse', 'precise-icehouse/updates': 'precise-updates/icehouse', 'precise-updates/icehouse': 'precise-updates/icehouse', @@ -67,6 +88,7 @@ 'precise-proposed/icehouse': 'precise-proposed/icehouse', # Juno 'juno': 'trusty-updates/juno', + 'juno/updates': 'trusty-updates/juno', 'trusty-juno': 'trusty-updates/juno', 'trusty-juno/updates': 'trusty-updates/juno', 'trusty-updates/juno': 'trusty-updates/juno', @@ -75,6 +97,7 @@ 'trusty-proposed/juno': 'trusty-proposed/juno', # Kilo 'kilo': 'trusty-updates/kilo', + 'kilo/updates': 'trusty-updates/kilo', 'trusty-kilo': 'trusty-updates/kilo', 'trusty-kilo/updates': 'trusty-updates/kilo', 'trusty-updates/kilo': 'trusty-updates/kilo', @@ -83,6 +106,7 @@ 'trusty-proposed/kilo': 'trusty-proposed/kilo', # Liberty 'liberty': 'trusty-updates/liberty', + 'liberty/updates': 'trusty-updates/liberty', 'trusty-liberty': 'trusty-updates/liberty', 'trusty-liberty/updates': 'trusty-updates/liberty', 'trusty-updates/liberty': 'trusty-updates/liberty', @@ -91,6 +115,7 @@ 'trusty-proposed/liberty': 'trusty-proposed/liberty', # Mitaka 'mitaka': 'trusty-updates/mitaka', + 'mitaka/updates': 'trusty-updates/mitaka', 'trusty-mitaka': 'trusty-updates/mitaka', 'trusty-mitaka/updates': 'trusty-updates/mitaka', 'trusty-updates/mitaka': 'trusty-updates/mitaka', @@ -99,6 +124,7 @@ 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', # Newton 'newton': 'xenial-updates/newton', + 'newton/updates': 'xenial-updates/newton', 'xenial-newton': 'xenial-updates/newton', 'xenial-newton/updates': 'xenial-updates/newton', 'xenial-updates/newton': 'xenial-updates/newton', @@ -107,6 +133,7 @@ 'xenial-proposed/newton': 'xenial-proposed/newton', # Ocata 'ocata': 'xenial-updates/ocata', + 'ocata/updates': 'xenial-updates/ocata', 'xenial-ocata': 'xenial-updates/ocata', 'xenial-ocata/updates': 'xenial-updates/ocata', 'xenial-updates/ocata': 'xenial-updates/ocata', @@ -131,6 +158,7 @@ 'xenial-queens/newton': 'xenial-proposed/queens', } + APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times. @@ -161,7 +189,7 @@ def apt_cache(in_memory=True, progress=None): return apt_pkg.Cache(progress) -def install(packages, options=None, fatal=False): +def apt_install(packages, options=None, fatal=False): """Install one or more packages.""" if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -178,7 +206,7 @@ def install(packages, options=None, fatal=False): _run_apt_command(cmd, fatal) -def upgrade(options=None, fatal=False, dist=False): +def apt_upgrade(options=None, fatal=False, dist=False): """Upgrade all packages.""" if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -193,13 +221,13 @@ def upgrade(options=None, fatal=False, dist=False): _run_apt_command(cmd, fatal) -def update(fatal=False): +def apt_update(fatal=False): """Update local apt cache.""" cmd = ['apt-get', 'update'] _run_apt_command(cmd, fatal) -def purge(packages, fatal=False): +def apt_purge(packages, fatal=False): """Purge one or more packages.""" cmd = ['apt-get', '--assume-yes', 'purge'] if isinstance(packages, six.string_types): @@ -233,7 +261,45 @@ def apt_unhold(packages, fatal=False): return apt_mark(packages, 'unhold', fatal=fatal) -def add_source(source, key=None): +def import_key(keyid): + """Import a key in either ASCII Armor or Radix64 format. + + `keyid` is either the keyid to fetch from a PGP server, or + the key in ASCII armor foramt. + + :param keyid: String of key (or key id). + :raises: GPGKeyError if the key could not be imported + """ + key = keyid.strip() + if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and + key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): + log("PGP key found (looks like ASCII Armor format)", level=DEBUG) + log("Importing ASCII Armor PGP key", level=DEBUG) + with NamedTemporaryFile() as keyfile: + with open(keyfile.name, 'w') as fd: + fd.write(key) + fd.write("\n") + cmd = ['apt-key', 'add', keyfile.name] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error = "Error importing PGP key '{}'".format(key) + log(error) + raise GPGKeyError(error) + else: + log("PGP key found (looks like Radix64 format)", level=DEBUG) + log("Importing PGP key from keyserver", level=DEBUG) + cmd = ['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error = "Error importing PGP key '{}'".format(key) + log(error) + raise GPGKeyError(error) + + +def add_source(source, key=None, fail_invalid=False): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by @@ -249,6 +315,33 @@ def add_source(source, key=None): such as 'cloud:icehouse' 'distro' may be used as a noop + Full list of source specifications supported by the function are: + + 'distro': A NOP; i.e. it has no effect. + 'proposed': the proposed deb spec [2] is wrtten to + /etc/apt/sources.list/proposed + 'distro-proposed': adds -proposed to the debs [2] + 'ppa:': add-apt-repository --yes + 'deb ': add-apt-repository --yes deb + 'http://....': add-apt-repository --yes http://... + 'cloud-archive:': add-apt-repository -yes cloud-archive: + 'cloud:[-staging]': specify a Cloud Archive pocket with + optional staging version. If staging is used then the staging PPA [2] + with be used. If staging is NOT used then the cloud archive [3] will be + added, and the 'ubuntu-cloud-keyring' package will be added for the + current distro. + + Otherwise the source is not recognised and this is logged to the juju log. + However, no error is raised, unless sys_error_on_exit is True. + + [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main + where {} is replaced with the derived pocket name. + [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \ + main universe multiverse restricted + where {} is replaced with the lsb_release codename (e.g. xenial) + [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu + to /etc/apt/sources.list.d/cloud-archive-list + @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an ASCII format GPG public key including the block headers. A GPG key @@ -256,51 +349,141 @@ def add_source(source, key=None): available to retrieve the actual public key from a public keyserver placing your Juju environment at risk. ppa and cloud archive keys are securely added automtically, so sould not be provided. + + @param fail_invalid: (boolean) if True, then the function raises a + SourceConfigError is there is no matching installation source. + + @raises SourceConfigError() if for cloud:, the is not a + valid pocket in CLOUD_ARCHIVE_POCKETS """ + _mapping = OrderedDict([ + (r"^distro$", lambda: None), # This is a NOP + (r"^(?:proposed|distro-proposed)$", _add_proposed), + (r"^cloud-archive:(.*)$", _add_apt_repository), + (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), + (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), + (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), + (r"^cloud:(.*)$", _add_cloud_pocket), + ]) if source is None: - log('Source is not present. Skipping') - return - - if (source.startswith('ppa:') or - source.startswith('http') or - source.startswith('deb ') or - source.startswith('cloud-archive:')): - cmd = ['add-apt-repository', '--yes', source] - _run_with_retries(cmd) - elif source.startswith('cloud:'): - install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - if pocket not in CLOUD_ARCHIVE_POCKETS: - raise SourceConfigError( - 'Unsupported cloud: source option %s' % - pocket) - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) - elif source == 'proposed': - release = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: - apt.write(PROPOSED_POCKET.format(release)) - elif source == 'distro': - pass + source = '' + for r, fn in six.iteritems(_mapping): + m = re.match(r, source) + if m: + # call the assoicated function with the captured groups + # raises SourceConfigError on error. + fn(*m.groups()) + if key: + try: + import_key(key) + except GPGKeyError as e: + raise SourceConfigError(str(e)) + break else: - log("Unknown source: {!r}".format(source)) - - if key: - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile('w+') as key_file: - key_file.write(key) - key_file.flush() - key_file.seek(0) - subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) - else: - # Note that hkp: is in no way a secure protocol. Using a - # GPG key id is pointless from a security POV unless you - # absolutely trust your network and DNS. - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) + # nothing matched. log an error and maybe sys.exit + err = "Unknown source: {!r}".format(source) + log(err) + if fail_invalid: + raise SourceConfigError(err) + + +def _add_proposed(): + """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list + + Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for + the deb line. + + For intel architecutres PROPOSED_POCKET is used for the release, but for + other architectures PROPOSED_PORTS_POCKET is used for the release. + """ + release = lsb_release()['DISTRIB_CODENAME'] + arch = platform.machine() + if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): + raise SourceConfigError("Arch {} not supported for (distro-)proposed" + .format(arch)) + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release)) + + +def _add_apt_repository(spec): + """Add the spec using add_apt_repository + + :param spec: the parameter to pass to add_apt_repository + """ + _run_with_retries(['add-apt-repository', '--yes', spec]) + + +def _add_cloud_pocket(pocket): + """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list + + Note that this overwrites the existing file if there is one. + + This function also converts the simple pocket in to the actual pocket using + the CLOUD_ARCHIVE_POCKETS mapping. + + :param pocket: string representing the pocket to add a deb spec for. + :raises: SourceConfigError if the cloud pocket doesn't exist or the + requested release doesn't match the current distro version. + """ + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + + +def _add_cloud_staging(cloud_archive_release, openstack_release): + """Add the cloud staging repository which is in + ppa:ubuntu-cloud-archive/-staging + + This function checks that the cloud_archive_release matches the current + codename for the distro that charm is being installed on. + + :param cloud_archive_release: string, codename for the release. + :param openstack_release: String, codename for the openstack release. + :raises: SourceConfigError if the cloud_archive_release doesn't match the + current version of the os. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release) + cmd = 'add-apt-repository -y {}'.format(ppa) + _run_with_retries(cmd.split(' ')) + + +def _add_cloud_distro_check(cloud_archive_release, openstack_release): + """Add the cloud pocket, but also check the cloud_archive_release against + the current distro, and use the openstack_release as the full lookup. + + This just calls _add_cloud_pocket() with the openstack_release as pocket + to get the correct cloud-archive.list for dpkg to work with. + + :param cloud_archive_release:String, codename for the distro release. + :param openstack_release: String, spec for the release to look up in the + CLOUD_ARCHIVE_POCKETS + :raises: SourceConfigError if this is the wrong distro, or the pocket spec + doesn't exist. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release)) + + +def _verify_is_ubuntu_rel(release, os_release): + """Verify that the release is in the same as the current ubuntu release. + + :param release: String, lowercase for the release. + :param os_release: String, the os_release being asked for + :raises: SourceConfigError if the release is not the same as the ubuntu + release. + """ + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + if release != ubuntu_rel: + raise SourceConfigError( + 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' + 'version ({})'.format(release, os_release, ubuntu_rel)) def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), @@ -316,9 +499,12 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), :param: cmd_env: dict: Environment variables to add to the command run. """ - env = os.environ.copy() + env = None + kwargs = {} if cmd_env: + env = os.environ.copy() env.update(cmd_env) + kwargs['env'] = env if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -330,7 +516,8 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_results = (None,) + retry_exitcodes while result in retry_results: try: - result = subprocess.check_call(cmd, env=env) + # result = subprocess.check_call(cmd, env=env) + result = subprocess.check_call(cmd, **kwargs) except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > max_retries: @@ -343,6 +530,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), def _run_apt_command(cmd, fatal=False): """Run an apt command with optional retries. + :param: cmd: str: The apt command to run. :param: fatal: bool: Whether the command's output should be checked and retried. """ diff --git a/ceph-osd/lib/ceph/__init__.py b/ceph-osd/lib/ceph/__init__.py index ad67965a..ae5618fe 100644 --- a/ceph-osd/lib/ceph/__init__.py +++ b/ceph-osd/lib/ceph/__init__.py @@ -2060,14 +2060,14 @@ def get_ceph_pg_stat(): def get_ceph_health(): """ - Returns the health of the cluster from a 'ceph health' + Returns the health of the cluster from a 'ceph status' :return: dict Also raises CalledProcessError if our ceph command fails To get the overall status, use get_ceph_health()['overall_status'] """ try: tree = check_output( - ['ceph', 'health', '--format=json']) + ['ceph', 'status', '--format=json']) try: json_tree = json.loads(tree) # Make sure children are present in the json @@ -2079,7 +2079,7 @@ def get_ceph_health(): tree, v.message)) raise except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( + log("ceph status command failed with message: {}".format( e.message)) raise @@ -2103,7 +2103,7 @@ def reweight_osd(osd_num, new_weight): return True return False except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( + log("ceph osd crush reweight command failed with message: {}".format( e.message)) raise diff --git a/ceph-osd/tests/charmhelpers/__init__.py b/ceph-osd/tests/charmhelpers/__init__.py index 48867880..e7aa4715 100644 --- a/ceph-osd/tests/charmhelpers/__init__.py +++ b/ceph-osd/tests/charmhelpers/__init__.py @@ -14,6 +14,11 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. +from __future__ import print_function +from __future__ import absolute_import + +import functools +import inspect import subprocess import sys @@ -34,3 +39,59 @@ else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) import yaml # flake8: noqa + + +# Holds a list of mapping of mangled function names that have been deprecated +# using the @deprecate decorator below. This is so that the warning is only +# printed once for each usage of the function. +__deprecated_functions = {} + + +def deprecate(warning, date=None, log=None): + """Add a deprecation warning the first time the function is used. + The date, which is a string in semi-ISO8660 format indicate the year-month + that the function is officially going to be removed. + + usage: + + @deprecate('use core/fetch/add_source() instead', '2017-04') + def contributed_add_source_thing(...): + ... + + And it then prints to the log ONCE that the function is deprecated. + The reason for passing the logging function (log) is so that hookenv.log + can be used for a charm if needed. + + :param warning: String to indicat where it has moved ot. + :param date: optional sting, in YYYY-MM format to indicate when the + function will definitely (probably) be removed. + :param log: The log function to call to log. If not, logs to stdout + """ + def wrap(f): + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + try: + module = inspect.getmodule(f) + file = inspect.getsourcefile(f) + lines = inspect.getsourcelines(f) + f_name = "{}-{}-{}..{}-{}".format( + module.__name__, file, lines[0], lines[-1], f.__name__) + except (IOError, TypeError): + # assume it was local, so just use the name of the function + f_name = f.__name__ + if f_name not in __deprecated_functions: + __deprecated_functions[f_name] = True + s = "DEPRECATION WARNING: Function {} is being removed".format( + f.__name__) + if date: + s = "{} on/around {}".format(s, date) + if warning: + s = "{} : {}".format(s, warning) + if log: + log(s) + else: + print(s) + return f(*args, **kwargs) + return wrapped_f + return wrap diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index bcef4cd0..c8edbf65 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -25,9 +25,12 @@ import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client -import keystoneclient.v2_0 as keystone_client -from keystoneclient.auth.identity import v3 as keystone_id_v3 -from keystoneclient import session as keystone_session +from keystoneclient.v2_0 import client as keystone_client +from keystoneauth1.identity import ( + v3, + v2, +) +from keystoneauth1 import session as keystone_session from keystoneclient.v3 import client as keystone_client_v3 from novaclient import exceptions @@ -368,12 +371,20 @@ def authenticate_keystone(self, keystone_ip, username, password, port) if not api_version or api_version == 2: ep = base_ep + "/v2.0" - return keystone_client.Client(username=username, password=password, - tenant_name=project_name, - auth_url=ep) + auth = v2.Password( + username=username, + password=password, + tenant_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + client = keystone_client.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client else: ep = base_ep + "/v3" - auth = keystone_id_v3.Password( + auth = v3.Password( user_domain_name=user_domain_name, username=username, password=password, @@ -382,36 +393,45 @@ def authenticate_keystone(self, keystone_ip, username, password, project_name=project_name, auth_url=ep ) - return keystone_client_v3.Client( - session=keystone_session.Session(auth=auth) - ) + sess = keystone_session.Session(auth=auth) + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, - keystone_ip=None): + keystone_ip=None, user_domain_name=None, + project_domain_name=None, + project_name=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') if not keystone_ip: keystone_ip = keystone_sentry.info['public-address'] - user_domain_name = None - domain_name = None - if api_version == 3: + # To support backward compatibility usage of this function + if not project_name: + project_name = tenant + if api_version == 3 and not user_domain_name: user_domain_name = 'admin_domain' - domain_name = user_domain_name - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant, - api_version=api_version, - user_domain_name=user_domain_name, - domain_name=domain_name, - admin_port=True) + if api_version == 3 and not project_domain_name: + project_domain_name = 'admin_domain' + if api_version == 3 and not project_name: + project_name = 'admin' + + return self.authenticate_keystone( + keystone_ip, user, password, + api_version=api_version, + user_domain_name=user_domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + admin_port=True) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') + interface='publicURL') keystone_ip = urlparse.urlparse(ep).hostname return self.authenticate_keystone(keystone_ip, user, password, @@ -421,22 +441,32 @@ def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', - endpoint_type='adminURL') - return glance_client.Client(ep, token=keystone.auth_token) + interface='adminURL') + if keystone.session: + return glance_client.Client(ep, session=keystone.session) + else: + return glance_client.Client(ep, token=keystone.auth_token) def authenticate_heat_admin(self, keystone): """Authenticates the admin user with heat.""" self.log.debug('Authenticating heat admin...') ep = keystone.service_catalog.url_for(service_type='orchestration', - endpoint_type='publicURL') - return heat_client.Client(endpoint=ep, token=keystone.auth_token) + interface='publicURL') + if keystone.session: + return heat_client.Client(endpoint=ep, session=keystone.session) + else: + return heat_client.Client(endpoint=ep, token=keystone.auth_token) def authenticate_nova_user(self, keystone, user, password, tenant): """Authenticates a regular user with nova-api.""" self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - if novaclient.__version__[0] >= "7": + interface='publicURL') + if keystone.session: + return nova_client.Client(NOVA_CLIENT_VERSION, + session=keystone.session, + auth_url=ep) + elif novaclient.__version__[0] >= "7": return nova_client.Client(NOVA_CLIENT_VERSION, username=user, password=password, project_name=tenant, auth_url=ep) @@ -449,12 +479,15 @@ def authenticate_swift_user(self, keystone, user, password, tenant): """Authenticates a regular user with swift api.""" self.log.debug('Authenticating swift user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') + interface='publicURL') + if keystone.session: + return swiftclient.Connection(session=keystone.session) + else: + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): diff --git a/ceph-osd/tests/charmhelpers/core/host.py b/ceph-osd/tests/charmhelpers/core/host.py index 88e80a49..b0043cbe 100644 --- a/ceph-osd/tests/charmhelpers/core/host.py +++ b/ceph-osd/tests/charmhelpers/core/host.py @@ -191,6 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): + service('disable', service_name) service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( @@ -225,6 +226,7 @@ def service_resume(service_name, init_dir="/etc/init", sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): service('unmask', service_name) + service('enable', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) From 12d20a1cef8d3c6129a00a5fbefb18b8cf961360 Mon Sep 17 00:00:00 2001 From: Mario Splivalo Date: Mon, 12 Jun 2017 13:42:01 +0200 Subject: [PATCH 1324/2699] Cleanup config.yaml Change-Id: Iff544d471e99400529ff1c98ccce348276e6786b --- ceph-radosgw/config.yaml | 285 ++++++++++++++++++++------------------- 1 file changed, 145 insertions(+), 140 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index d0f1f9e6..3a8443dd 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -1,21 +1,24 @@ options: loglevel: - default: 1 type: int + default: 1 description: RadosGW debug level. Max is 20. source: type: string default: description: | - Optional configuration to support use of additional sources such as: - - - ppa:myteam/ppa - - cloud:trusty-proposed/kilo - - http://my.archive.com/ubuntu main - - The last option should be used in conjunction with the key configuration - option. - + Optional repository from which to install. May be one of the following: + distro (default), ppa:somecustom/ppa, a deb url sources entry, + or a supported Ubuntu Cloud Archive e.g. + . + cloud:- + cloud:-/updates + cloud:-/staging + cloud:-/proposed + . + See https://wiki.ubuntu.com/OpenStack/CloudArchive for info on which + cloud archives are available and supported. + . Note that a minimum ceph version of 0.48.2 is required for use with this charm which is NOT provided by the packages in the main Ubuntu archive for precise but is provided in the Ubuntu cloud archive. @@ -25,51 +28,118 @@ options: description: | Key ID to import to the apt keyring to support use with arbitary source configuration from outside of Launchpad archives or PPA's. + harden: + type: string + default: + description: | + Apply system hardening. Supports a space-delimited list of modules + to run. Supported modules currently include os, ssh, apache and mysql. config-flags: type: string default: description: | - User provided Ceph configuration. Supports a string representation of - a python dictionary where each top-level key represents a section in - the ceph.conf template. You may only use sections supported in the - template. - . - WARNING: this is not the recommended way to configure the underlying - services that this charm installs and is used at the user's own risk. - This option is mainly provided as a stop-gap for users that either - want to test the effect of modifying some config or who have found - a critical bug in the way the charm has configured their services - and need it fixed immediately. We ask that whenever this is used, - that the user consider opening a bug on this charm at - http://bugs.launchpad.net/charms providing an explanation of why the - config was needed so that we may consider it for inclusion as a - natively supported config in the the charm. + User provided Ceph configuration. Supports a string representation of + a python dictionary where each top-level key represents a section in + the ceph.conf template. You may only use sections supported in the + template. + . + WARNING: this is not the recommended way to configure the underlying + services that this charm installs and is used at the user's own risk. + This option is mainly provided as a stop-gap for users that either + want to test the effect of modifying some config or who have found + a critical bug in the way the charm has configured their services + and need it fixed immediately. We ask that whenever this is used, + that the user consider opening a bug on this charm at + http://bugs.launchpad.net/charms providing an explanation of why the + config was needed so that we may consider it for inclusion as a + natively supported config in the the charm. port: type: int default: 80 description: | The port that the RADOS Gateway will listen on. + prefer-ipv6: + type: boolean + default: False + description: | + If True enables IPv6 support. The charm will expect network interfaces + to be configured with an IPv6 address. If set to False (default) IPv4 + is expected. + . + NOTE: these charms do not currently support IPv6 privacy extension. In + order for this charm to function correctly, the privacy extension must be + disabled and a non-temporary address must be configured/available on + your network interface. + pool-prefix: + type: string + default: + description: | + The rados gateway stores objects in many different pools. If you would + like to have multiple rados gateways each pointing to a separate set of + pools set this prefix. The charm will then set up a new set of pools. + If your prefix has a dash in it that will be used to split the prefix + into region and zone. Please read the documentation on federated rados + gateways for more information on region and zone. + restrict-ceph-pools: + type: boolean + default: False + description: | + Optionally restrict Ceph key permissions to access pools as required. + ceph-osd-replication-count: + type: int + default: 3 + description: | + This value dictates the number of replicas ceph must make of any object + it stores within RGW pools. Note that once the RGW pools have been + created, changing this value will not have any effect (although it can be + changed in ceph by manually configuring your ceph cluster). + rgw-buckets-pool-weight: + type: int + default: 20 + description: | + Defines a relative weighting of the pool as a percentage of the total + amount of data in the Ceph cluster. This effectively weights the number + of placement groups for the pool created to be appropriately portioned + to the amount of data expected. For example, if the amount of data loaded + into the RADOS Gateway/S3 interface is expected to be reserved for or + consume 20% of the data in the Ceph cluster, then this value would be + specified as 20. + rgw-lightweight-pool-pg-num: + type: int + default: -1 + description: | + When the Rados Gatway is installed it, by default, creates pools with + pg_num 8 which, in the majority of cases is suboptimal. A few rgw pools + tend to carry more data than others e.g. .rgw.buckets tends to be larger + than most. So, for pools with greater requirements than others the charm + will apply the optimal value i.e. corresponding to the number of OSDs + up+in the cluster at the time the pool is created. For others it will use + this value which can be altered depending on how big you cluster is. Note + that once a pool has been created, changes to this setting will be + ignored. Setting this value to -1, enables the number of placement + groups to be calculated based on the Ceph placement group calculator. # Keystone integration operator-roles: - default: "Member,Admin" type: string + default: "Member,Admin" description: | Comma-separated list of Swift operator roles; used when integrating with OpenStack Keystone. region: - default: RegionOne type: string + default: RegionOne description: | OpenStack region that the RADOS gateway supports; used when integrating with OpenStack Keystone. cache-size: - default: 500 type: int + default: 500 description: Number of keystone tokens to hold in local cache. revocation-check-interval: - default: 600 type: int + default: 600 description: Interval between revocation checks to keystone. + # HA config use-syslog: type: boolean default: False @@ -79,14 +149,14 @@ options: type: boolean default: False description: | - Use DNS HA with MAAS 2.0. Note if this is set do not set vip - settings below. + Use DNS HA with MAAS 2.0. Note if this is set do not set vip + settings below. vip: type: string default: description: | Virtual IP(s) to use to front API services in HA configuration. - + . If multiple networks are being used, a VIP should be provided for each network, separated by spaces. ha-bindiface: @@ -101,13 +171,36 @@ options: description: | Default multicast port number that will be used to communicate between HA Cluster nodes. - # Network configuration options - # by default all access is over 'private-address' + haproxy-server-timeout: + type: int + default: + description: | + Server timeout configuration in ms for haproxy, used in HA + configurations. If not provided, default value of 30000ms is used. + haproxy-client-timeout: + type: int + default: + description: | + Client timeout configuration in ms for haproxy, used in HA + configurations. If not provided, default value of 30000ms is used. + haproxy-queue-timeout: + type: int + default: + description: | + Queue timeout configuration in ms for haproxy, used in HA + configurations. If not provided, default value of 5000ms is used. + haproxy-connect-timeout: + type: int + default: + description: | + Connect timeout configuration in ms for haproxy, used in HA + configurations. If not provided, default value of 5000ms is used. + # Network config (by default all access is over 'private-address') os-admin-network: type: string default: description: | - The IP address and netmask of the OpenStack Admin network (e.g., + The IP address and netmask of the OpenStack Admin network (e.g. 192.168.0.0/24) . This network will be used for admin endpoints. @@ -115,7 +208,7 @@ options: type: string default: description: | - The IP address and netmask of the OpenStack Internal network (e.g., + The IP address and netmask of the OpenStack Internal network (e.g. 192.168.0.0/24) . This network will be used for internal endpoints. @@ -123,7 +216,7 @@ options: type: string default: description: | - The IP address and netmask of the OpenStack Public network (e.g., + The IP address and netmask of the OpenStack Public network (e.g. 192.168.0.0/24) . This network will be used for public endpoints. @@ -133,23 +226,23 @@ options: description: | The hostname or address of the public endpoints created for ceph-radosgw in the keystone identity provider. - + . This value will be used for public endpoints. For example, an os-public-hostname set to 'files.example.com' with will create the following public endpoint for the ceph-radosgw: - + . https://files.example.com:80/swift/v1 os-internal-hostname: type: string default: description: | - The hostname or address of the internal endpoints created for ceph-radosgw - in the keystone identity provider. - + The hostname or address of the internal endpoints created for + ceph-radosgw in the keystone identity provider. + . This value will be used for internal endpoints. For example, an os-internal-hostname set to 'files.internal.example.com' with will create the following internal endpoint for the ceph-radosgw: - + . https://files.internal.example.com:80/swift/v1 os-admin-hostname: type: string @@ -157,116 +250,28 @@ options: description: | The hostname or address of the admin endpoints created for ceph-radosgw in the keystone identity provider. - + . This value will be used for admin endpoints. For example, an os-admin-hostname set to 'files.admin.example.com' with will create the following admin endpoint for the ceph-radosgw: - - https://files.admin.example.com:80/swift/v1 - ceph-osd-replication-count: - type: int - default: 3 - description: | - This value dictates the number of replicas ceph must make of any object - it stores within RGW pools. Note that once the RGW pools have been - created, changing this value will not have any effect (although it can be - changed in ceph by manually configuring your ceph cluster). - rgw-buckets-pool-weight: - type: int - default: 20 - description: | - Defines a relative weighting of the pool as a percentage of the total - amount of data in the Ceph cluster. This effectively weights the number - of placement groups for the pool created to be appropriately portioned - to the amount of data expected. For example, if the amount of data loaded - into the RADOS Gateway/S3 interface is expected to be reserved for or - consume 20% of the data in the Ceph cluster, then this value would be - specified as 20. - rgw-lightweight-pool-pg-num: - type: int - default: -1 - description: | - When the Rados Gatway is installed it, by default, creates pools with - pg_num 8 which, in the majority of cases is suboptimal. A few rgw pools - tend to carry more data than others e.g. .rgw.buckets tends to be larger - than most. So, for pools with greater requirements than others the charm - will apply the optimal value i.e. corresponding to the number of OSDs - up+in the cluster at the time the pool is created. For others it will use - this value which can be altered depending on how big you cluster is. Note - that once a pool has been created, changes to this setting will be - ignored. Setting this value to -1, enables the number of placement - groups to be calculated based on the Ceph placement group calculator. - haproxy-server-timeout: - type: int - default: - description: | - Server timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 30000ms is used. - haproxy-client-timeout: - type: int - default: - description: | - Client timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 30000ms is used. - haproxy-queue-timeout: - type: int - default: - description: | - Queue timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 5000ms is used. - haproxy-connect-timeout: - type: int - default: - description: | - Connect timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 5000ms is used. - prefer-ipv6: - type: boolean - default: False - description: | - If True enables IPv6 support. The charm will expect network interfaces - to be configured with an IPv6 address. If set to False (default) IPv4 - is expected. . - NOTE: these charms do not currently support IPv6 privacy extension. In - order for this charm to function correctly, the privacy extension must be - disabled and a non-temporary address must be configured/available on - your network interface. - pool-prefix: - type: string - default: - description: | - The rados gateway stores objects in many different pools. If you would - like to have multiple rados gateways each pointing to a separate set of - pools set this prefix. The charm will then set up a new set of pools. - If your prefix has a dash in it that will be used to split the prefix - into region and zone. Please read the documentation on federated rados - gateways for more information on region and zone. + https://files.admin.example.com:80/swift/v1 + # Monitoring config nagios_context: - default: "juju" type: string + default: "juju" description: | Used by the nrpe-external-master subordinate charm. A string that will be prepended to instance name to set the host name in nagios. So for instance the hostname would be something like: + . juju-myservice-0 + . If you're running multiple environments with the same services in them this allows you to differentiate between them. nagios_servicegroups: - default: "" type: string + default: "" description: | - A comma-separated list of nagios servicegroups. - If left empty, the nagios_context will be used as the servicegroup - harden: - default: - type: string - description: | - Apply system hardening. Supports a space-delimited list of modules - to run. Supported modules currently include os, ssh, apache and mysql. - restrict-ceph-pools: - default: False - type: boolean - description: | - Optionally restrict Ceph key permissions to access pools as required. - + A comma-separated list of nagios servicegroups. If left empty, + the nagios_context will be used as the servicegroup From fd773cbfd359aaa33c11d89da0cc1c78477ddb00 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 7 Jul 2017 08:41:25 +0100 Subject: [PATCH 1325/2699] Resync charms.ceph for Luminous support Change-Id: If7b46cc7c122fe8e05e3564d53da0fba83d6cf0a --- ceph-mon/lib/ceph/__init__.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index ad67965a..51b717d2 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -1299,6 +1299,15 @@ def bootstrap_monitor_cluster(secret): service_restart('ceph-mon') else: service_restart('ceph-mon-all') + + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + # NOTE(jamespage): Later ceph releases require explicit + # call to ceph-create-keys to setup the + # admin keys for the cluster; this command + # will wait for quorum in the cluster before + # returning. + cmd = ['ceph-create-keys', '--id', hostname] + subprocess.check_call(cmd) except: raise finally: @@ -2060,14 +2069,14 @@ def get_ceph_pg_stat(): def get_ceph_health(): """ - Returns the health of the cluster from a 'ceph health' + Returns the health of the cluster from a 'ceph status' :return: dict Also raises CalledProcessError if our ceph command fails To get the overall status, use get_ceph_health()['overall_status'] """ try: tree = check_output( - ['ceph', 'health', '--format=json']) + ['ceph', 'status', '--format=json']) try: json_tree = json.loads(tree) # Make sure children are present in the json @@ -2079,7 +2088,7 @@ def get_ceph_health(): tree, v.message)) raise except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( + log("ceph status command failed with message: {}".format( e.message)) raise @@ -2103,7 +2112,7 @@ def reweight_osd(osd_num, new_weight): return True return False except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( + log("ceph osd crush reweight command failed with message: {}".format( e.message)) raise From ba01b9ccc4d2fad6dca4c625034a74af81ebbde8 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 7 Jul 2017 09:48:49 +0100 Subject: [PATCH 1326/2699] Add bluestore support for OSD's Add highly experimental support for bluestore storage format for OSD devices; this is disabled by default and should only be enabled in deployments where loss of data does not present a problem! Change-Id: I21beff9ce535f1b5c16d7f6f51c35126cc7da43e Depends-On: I36f7aa9d7b96ec5c9eaa7a3a970593f9ca14cb34 --- ceph-osd/actions/add_disk.py | 3 +- ceph-osd/config.yaml | 6 ++++ ceph-osd/hooks/ceph_hooks.py | 4 ++- ceph-osd/lib/ceph/__init__.py | 23 +++++++++++-- ceph-osd/templates/ceph.conf | 4 +++ ceph-osd/unit_tests/test_ceph_hooks.py | 45 +++++++++++++++++++++++--- 6 files changed, 76 insertions(+), 9 deletions(-) diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index b1b36202..9e303494 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -42,7 +42,8 @@ def add_device(request, device_path, bucket=None): ceph.osdize(dev, config('osd-format'), get_journal_devices(), config('osd-reformat'), config('ignore-device-errors'), - config('osd-encrypt')) + config('osd-encrypt'), + config('bluestore')) # Make it fast! if config('autotune'): ceph.tune_dev(dev) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index c4ebda54..3ccb9ef0 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -64,6 +64,12 @@ options: btrfs (experimental and not recommended) Only supported with ceph >= 0.48.3. + bluestore: + type: boolean + default: false + description: | + Use experimental bluestore storage format for OSD devices; only supported + in Ceph Jewel (10.2.0) or later. osd-reformat: type: string default: diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 20a18dca..a22da6e6 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -257,6 +257,7 @@ def get_ceph_context(upgrading=False): 'dio': str(config('use-direct-io')).lower(), 'short_object_len': use_short_objects(), 'upgrade_in_progress': upgrading, + 'bluestore': config('bluestore'), } if config('prefer-ipv6'): @@ -376,7 +377,8 @@ def prepare_disks_and_activate(): ceph.osdize(dev, config('osd-format'), osd_journal, config('osd-reformat'), config('ignore-device-errors'), - config('osd-encrypt')) + config('osd-encrypt'), + config('bluestore')) # Make it fast! if config('autotune'): ceph.tune_dev(dev) diff --git a/ceph-osd/lib/ceph/__init__.py b/ceph-osd/lib/ceph/__init__.py index ae5618fe..7f8d3a4b 100644 --- a/ceph-osd/lib/ceph/__init__.py +++ b/ceph-osd/lib/ceph/__init__.py @@ -1299,6 +1299,15 @@ def bootstrap_monitor_cluster(secret): service_restart('ceph-mon') else: service_restart('ceph-mon-all') + + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + # NOTE(jamespage): Later ceph releases require explicit + # call to ceph-create-keys to setup the + # admin keys for the cluster; this command + # will wait for quorum in the cluster before + # returning. + cmd = ['ceph-create-keys', '--id', hostname] + subprocess.check_call(cmd) except: raise finally: @@ -1346,16 +1355,17 @@ def find_least_used_journal(journal_devices): def osdize(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False): + ignore_errors=False, encrypt=False, bluestore=False): if dev.startswith('/dev'): osdize_dev(dev, osd_format, osd_journal, - reformat_osd, ignore_errors, encrypt) + reformat_osd, ignore_errors, encrypt, + bluestore) else: osdize_dir(dev, encrypt) def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False): + ignore_errors=False, encrypt=False, bluestore=False): if not os.path.exists(dev): log('Path {} does not exist - bailing'.format(dev)) return @@ -1383,9 +1393,16 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, if osd_format: cmd.append('--fs-type') cmd.append(osd_format) + if reformat_osd: cmd.append('--zap-disk') + + # NOTE(jamespage): enable experimental bluestore support + if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: + cmd.append('--bluestore') + cmd.append(dev) + if osd_journal: least_used = find_least_used_journal(osd_journal) cmd.append(least_used) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index fb1993f6..f7b530c9 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -41,6 +41,10 @@ setuser match path = /var/lib/ceph/$type/$cluster-$id {% endfor %} {% endif %} +{% if bluestore -%} +enable experimental unrecoverable data corrupting features = bluestore rocksdb +{%- endif %} + [client.osd-upgrade] keyring = /var/lib/ceph/osd/ceph.client.osd-upgrade.keyring diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 99dd4d2d..8594afbc 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -28,7 +28,8 @@ 'use-direct-io': True, 'osd-format': 'ext4', 'prefer-ipv6': False, - 'customize-failure-domain': False} + 'customize-failure-domain': False, + 'bluestore': False} class CephHooksTestCase(unittest.TestCase): @@ -63,7 +64,41 @@ def test_get_ceph_context(self, mock_config, mock_config2): 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, - 'use_syslog': 'true'} + 'use_syslog': 'true', + 'bluestore': False} + self.assertEqual(ctxt, expected) + + @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') + @patch.object(ceph_hooks, 'get_auth', lambda *args: False) + @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") + @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") + @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) + @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', + '10.0.0.2']) + @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph, 'config') + @patch.object(ceph_hooks, 'config') + def test_get_ceph_context_bluestore(self, mock_config, mock_config2): + config = copy.deepcopy(CHARM_CONFIG) + config['bluestore'] = True + mock_config.side_effect = lambda key: config[key] + mock_config2.side_effect = lambda key: config[key] + ctxt = ceph_hooks.get_ceph_context() + expected = {'auth_supported': False, + 'ceph_cluster_network': '', + 'ceph_public_network': '', + 'cluster_addr': '10.1.0.1', + 'dio': 'true', + 'fsid': '1234', + 'loglevel': 1, + 'mon_hosts': '10.0.0.1 10.0.0.2', + 'old_auth': False, + 'osd_journal_size': 1024, + 'public_addr': '10.0.0.1', + 'short_object_len': True, + 'upgrade_in_progress': False, + 'use_syslog': 'true', + 'bluestore': True} self.assertEqual(ctxt, expected) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @@ -96,7 +131,8 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, - 'use_syslog': 'true'} + 'use_syslog': 'true', + 'bluestore': False} self.assertEqual(ctxt, expected) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @@ -131,7 +167,8 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, - 'use_syslog': 'true'} + 'use_syslog': 'true', + 'bluestore': False} self.assertEqual(ctxt, expected) @patch.object(ceph_hooks, 'ceph') From 42a1cc4b2549aef453a05e89adfd9f15d7e93345 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Wed, 12 Jul 2017 16:47:19 -0700 Subject: [PATCH 1327/2699] Check existence of ceph binaries in add-storage hook Add a check to make sure that the ceph package is installed in the add-storage shim. The add-storage hook is run prior to the install hook in order to provide storage for the install phase of the charm in cases where it is needed. The ceph-osd charm converts Juju storage into OSDs and doesn't need the storage for the install hook, so just skip the hook if its run early. Change-Id: I7b7518f52d0b5ad947b0809af8ad67d342211779 Closes-Bug: #1675186 --- ceph-osd/hooks/add-storage | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/ceph-osd/hooks/add-storage b/ceph-osd/hooks/add-storage index 5b02c170..cec4d4cf 100755 --- a/ceph-osd/hooks/add-storage +++ b/ceph-osd/hooks/add-storage @@ -1,7 +1,17 @@ #!/bin/bash -# Wrapper to deal with newer Ubuntu versions that don't have py2 installed -# by default. +# shim used to determine that the ceph packages have been installed +# before running hook execution. The add-storage hook fires before +# the install hook in order to provide storage for charms which need +# it at install time, however the storage added for the ceph-osd +# application will be used to create OSDs, which require the ceph +# binaries, bootstrapping the node, etc. +# +# Note: this doesn't wait to ensure that ceph is bootstrapped because +# that logic is already existing in the charm's hook. -dpkg -l|grep 'python-apt ' || exit 0 +if ! dpkg -s ceph > /dev/null 2>&1; then + juju-log "Ceph not yet installed." + exit 0 +fi -exec ./hooks/storage.real \ No newline at end of file +exec ./hooks/storage.real From aeac8acb880de1ef490715fcc30c870349d4c8b6 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 7 Jul 2017 17:16:55 +0200 Subject: [PATCH 1328/2699] add default features support Change-Id: I0f0491180561aa75c43aac4a64acc9170622a6e6 --- ceph-mon/config.yaml | 12 ++++++++++++ ceph-mon/hooks/ceph_hooks.py | 2 ++ ceph-mon/templates/ceph.conf | 4 ++++ 3 files changed, 18 insertions(+) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 26ba1a95..52be3af8 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -196,3 +196,15 @@ options: description: | Apply system hardening. Supports a space-delimited list of modules to run. Supported modules currently include os, ssh, apache and mysql. + default-rbd-features: + default: + type: int + description: | + Restrict the rbd features used to the specified level. If set, this will + inform clients that they should set the config value `rbd default + features`, for example: + + rbd default features = 1 + + This needs to be set to 1 when deploying a cloud with the nova-lxd + hypervisor. \ No newline at end of file diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index c25138bb..0b203894 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -496,6 +496,8 @@ def client_relation_joined(relid=None): data = {'key': ceph.get_named_key(service_name), 'auth': config('auth-supported'), 'ceph-public-address': public_addr} + if config('rbd-features'): + data['rbd_features'] = config('rbd-features') relation_set(relation_id=relid, relation_settings=data) else: diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index b48270fe..25be0167 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -37,6 +37,10 @@ cluster addr = {{ cluster_addr }} {% endfor %} {% endif %} +{% if rbd_features %} +rbd default features = {{ rbd_features }} +{% endif %} + [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring {% if mon -%} From b2f8999af4063172e7508a6403aa1eaf950a8aee Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 12 Jun 2017 11:40:49 +0200 Subject: [PATCH 1329/2699] Cleanup config.yaml Change-Id: Iac96aa23468eb5fd860e7ad92975f506d1b805a4 --- ceph-mon/config.yaml | 163 ++++++++++++++++++++++--------------------- 1 file changed, 83 insertions(+), 80 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 52be3af8..8156bf1c 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -1,45 +1,74 @@ options: loglevel: - default: 1 type: int + default: 1 description: Mon and OSD debug level. Max is 20. + use-syslog: + type: boolean + default: False + description: | + If set to True, supporting services will log to syslog. + source: + type: string + default: + description: | + Optional configuration to support use of additional sources such as: + . + - ppa:myteam/ppa + - cloud:xenial-proposed/ocata + - http://my.archive.com/ubuntu main + . + The last option should be used in conjunction with the key configuration + option. + key: + type: string + default: + description: | + Key ID to import to the apt keyring to support use with arbitary source + configuration from outside of Launchpad archives or PPA's. + harden: + type: string + default: + description: | + Apply system hardening. Supports a space-delimited list of modules + to run. Supported modules currently include os, ssh, apache and mysql. fsid: type: string default: description: | The unique identifier (fsid) of the Ceph cluster. - - To generate a suitable value use `uuid`. + . + To generate a suitable value use `uuidgen`. If left empty, an fsid will be generated. - + . NOTE: Changing this configuration after deployment is not supported and new service units will not be able to join the cluster. config-flags: type: string default: description: | - User provided Ceph configuration. Supports a string representation of - a python dictionary where each top-level key represents a section in - the ceph.conf template. You may only use sections supported in the - template. - . - WARNING: this is not the recommended way to configure the underlying - services that this charm installs and is used at the user's own risk. - This option is mainly provided as a stop-gap for users that either - want to test the effect of modifying some config or who have found - a critical bug in the way the charm has configured their services - and need it fixed immediately. We ask that whenever this is used, - that the user consider opening a bug on this charm at - http://bugs.launchpad.net/charms providing an explanation of why the - config was needed so that we may consider it for inclusion as a - natively supported config in the the charm. + User provided Ceph configuration. Supports a string representation of + a python dictionary where each top-level key represents a section in + the ceph.conf template. You may only use sections supported in the + template. + . + WARNING: this is not the recommended way to configure the underlying + services that this charm installs and is used at the user's own risk. + This option is mainly provided as a stop-gap for users that either + want to test the effect of modifying some config or who have found + a critical bug in the way the charm has configured their services + and need it fixed immediately. We ask that whenever this is used, + that the user consider opening a bug on this charm at + http://bugs.launchpad.net/charms providing an explanation of why the + config was needed so that we may consider it for inclusion as a + natively supported config in the the charm. auth-supported: type: string default: cephx description: | Which authentication flavour to use. . - Valid options are "cephx" and "none". If "none" is specified, + Valid options are "cephx" and "none". If "none" is specified, keys will still be created and deployed so that it can be enabled later. monitor-secret: @@ -48,44 +77,50 @@ options: description: | The Ceph secret key used by Ceph monitors. This value will become the mon.key. To generate a suitable value use: - + . ceph-authtool /dev/stdout --name=mon. --gen-key - + . If left empty, a secret key will be generated. - + . NOTE: Changing this configuration after deployment is not supported and new service units will not be able to join the cluster. monitor-count: type: int default: 3 description: | - How many nodes to wait for before trying to create the monitor cluster - this number needs to be odd, and more than three is a waste except for - very large clusters. + Number of ceph-mon units to wait for before attempting to bootstrap the + monitor cluster. For production clusters the default value of 3 ceph-mon + units is normally a good choice. + . + For test and development environments you can enable single-unit + deployment by setting this to 1. + . + NOTE: To establish quorum and enable partition tolerance a odd number of + ceph-mon units is required. monitor-hosts: type: string default: description: | - A space separated list of ceph mon hosts to use. This field is only - used to migrate an existing cluster to a juju managed solution - and should be left blank otherwise. + A space-separated list of ceph mon hosts to use. This field is only used + to migrate an existing cluster to a juju-managed solution and should + otherwise be left unset. expected-osd-count: type: int default: 0 description: | - Provides an expected number of OSDs for the cluster. This value is used - when calculating the number of placement groups for a pool creation. - The number of placement groups for new pools are based upon the actual - number of OSDs in the cluster or the expected-osd-count, whichever is - greater. A value of 0 will cause the charm to only consider the OSDs - which are in the cluster. + Number of OSDs expected to be deployed in the cluster. This value is used + for calculating the number of placement groups on pool creation. The + number of placement groups for new pools are based on the actual number + of OSDs in the cluster or the expected-osd-count, whichever is greater + A value of 0 will cause the charm to only consider the actual number of + OSDs in the cluster. pgs-per-osd: type: int default: 100 description: | The number of placement groups per OSD to target. It is important to properly size the number of placement groups per OSD as too many - or too few placement groups oer OSD may cause resource constraints and + or too few placement groups per OSD may cause resource constraints and performance degradation. This value comes from the recommendation of the Ceph placement group calculator (http://ceph.com/pgcalc/) and recommended values are: @@ -96,33 +131,6 @@ options: foreseeable future. 300 - If the cluster OSD count is expected to increase between 2x and 3x in the foreseeable future. - source: - type: string - default: - description: | - Optional configuration to support use of additional sources such as: - - - ppa:myteam/ppa - - cloud:trusty-proposed/kilo - - http://my.archive.com/ubuntu main - - The last option should be used in conjunction with the key configuration - option. - - Note that a minimum ceph version of 0.48.2 is required for use with this - charm which is NOT provided by the packages in the main Ubuntu archive - for precise but is provided in the Ubuntu cloud archive. - key: - type: string - default: - description: | - Key ID to import to the apt keyring to support use with arbitary source - configuration from outside of Launchpad archives or PPA's. - use-syslog: - type: boolean - default: False - description: | - If set to True, supporting services will log to syslog. ceph-public-network: type: string default: @@ -148,7 +156,7 @@ options: If True enables IPv6 support. The charm will expect network interfaces to be configured with an IPv6 address. If set to False (default) IPv4 is expected. - + . NOTE: these charms do not currently support IPv6 privacy extension. In order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on @@ -172,39 +180,34 @@ options: nagios_context: type: string default: "juju" - type: string description: | Used by the nrpe-external-master subordinate charm. - A string that will be prepended to instance name to set the host name + A string that will be prepended to instance name to set the hostname in nagios. So for instance the hostname would be something like: + . juju-myservice-0 + . If you're running multiple environments with the same services in them this allows you to differentiate between them. nagios_servicegroups: - default: "" type: string + default: "" description: | - A comma-separated list of nagios servicegroups. - If left empty, the nagios_context will be used as the servicegroup + A comma-separated list of nagios servicegroups. If left empty, the + nagios_context will be used as the servicegroup. use-direct-io: - default: True type: boolean + default: True description: Configure use of direct IO for OSD journals. - harden: - default: - type: string - description: | - Apply system hardening. Supports a space-delimited list of modules - to run. Supported modules currently include os, ssh, apache and mysql. default-rbd-features: - default: type: int + default: description: | Restrict the rbd features used to the specified level. If set, this will inform clients that they should set the config value `rbd default features`, for example: - + . rbd default features = 1 - + . This needs to be set to 1 when deploying a cloud with the nova-lxd - hypervisor. \ No newline at end of file + hypervisor. From d92cf97e10aa8ef2c809a3ca886e26df1525955a Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 14 Jun 2017 13:15:32 +0200 Subject: [PATCH 1330/2699] config.yaml: Cleanup Change-Id: I86e1695b0b08dd275b3f198835288d1d4a3c95d7 --- ceph-osd/config.yaml | 154 +++++++++++++++++++++---------------------- 1 file changed, 77 insertions(+), 77 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 3ccb9ef0..1cfbf642 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -1,68 +1,98 @@ options: loglevel: - default: 1 type: int + default: 1 description: OSD debug level. Max is 20. + source: + type: string + default: + description: | + Optional configuration to support use of additional sources such as: + . + - ppa:myteam/ppa + - cloud:xenial-proposed/ocata + - http://my.archive.com/ubuntu main + . + The last option should be used in conjunction with the key configuration + option. + key: + type: string + default: + description: | + Key ID to import to the apt keyring to support use with arbitary source + configuration from outside of Launchpad archives or PPA's. + use-syslog: + type: boolean + default: False + description: | + If set to True, supporting services will log to syslog. + harden: + type: string + default: + description: | + Apply system hardening. Supports a space-delimited list of modules + to run. Supported modules currently include os, ssh, apache and mysql. config-flags: type: string default: description: | - User provided Ceph configuration. Supports a string representation of - a python dictionary where each top-level key represents a section in - the ceph.conf template. You may only use sections supported in the - template. - . - WARNING: this is not the recommended way to configure the underlying - services that this charm installs and is used at the user's own risk. - This option is mainly provided as a stop-gap for users that either - want to test the effect of modifying some config or who have found - a critical bug in the way the charm has configured their services - and need it fixed immediately. We ask that whenever this is used, - that the user consider opening a bug on this charm at - http://bugs.launchpad.net/charms providing an explanation of why the - config was needed so that we may consider it for inclusion as a - natively supported config in the the charm. + User provided Ceph configuration. Supports a string representation of + a python dictionary where each top-level key represents a section in + the ceph.conf template. You may only use sections supported in the + template. + . + WARNING: this is not the recommended way to configure the underlying + services that this charm installs and is used at the user's own risk. + This option is mainly provided as a stop-gap for users that either + want to test the effect of modifying some config or who have found + a critical bug in the way the charm has configured their services + and need it fixed immediately. We ask that whenever this is used, + that the user consider opening a bug on this charm at + http://bugs.launchpad.net/charms providing an explanation of why the + config was needed so that we may consider it for inclusion as a + natively supported config in the the charm. osd-devices: type: string default: /dev/vdb description: | - The devices to format and set up as osd volumes. - + The devices to format and set up as OSD volumes. + . These devices are the range of devices that will be checked for and used across all service units, in addition to any volumes attached via the --storage flag during deployment. - + . For ceph >= 0.56.6 these can also be directories instead of devices - the charm assumes anything not starting with /dev is a directory instead. osd-journal: type: string default: description: | - The device to use as a shared journal drive for all OSD's. By default - no journal device will be used. - + The device to use as a shared journal drive for all OSD's. By default + a journal partition will be created on each OSD volume device for use by + that OSD. + . Only supported with ceph >= 0.48.3. osd-journal-size: type: int default: 1024 description: | - Ceph osd journal size. The journal size should be at least twice the + Ceph OSD journal size. The journal size should be at least twice the product of the expected drive speed multiplied by filestore max sync interval. However, the most common practice is to partition the journal drive (often an SSD), and mount it such that Ceph uses the entire partition for the journal. - + . Only supported with ceph >= 0.48.3. osd-format: type: string default: xfs description: | Format of filesystem to use for OSD devices; supported formats include: - + . xfs (Default >= 0.48.3) ext4 (Only option < 0.48.3) btrfs (experimental and not recommended) - + . Only supported with ceph >= 0.48.3. bluestore: type: boolean @@ -75,9 +105,9 @@ options: default: description: | By default, the charm will not re-format a device that already looks - as if it might be an OSD device. This is a safeguard to try to + as if it might be an OSD device. This is a safeguard to try to prevent data loss. - + . Specifying this option (any value) forces a reformat of any OSD devices found which are not already mounted. osd-encrypt: @@ -87,7 +117,7 @@ options: By default, the charm will not encrypt Ceph OSD devices; however, by setting osd-encrypt to True, Ceph's dmcrypt support will be used to encrypt OSD devices. - + . Specifying this option on a running Ceph OSD node will have no effect until new disks are added, at which point new disks will be encrypted. ignore-device-errors: @@ -97,7 +127,7 @@ options: By default, the charm will raise errors if a whitelisted device is found, but for some reason the charm is unable to initialize the device for use by Ceph. - + . Setting this option to 'True' will result in the charm classifying such problems as warnings only and will not result in a hook error. ephemeral-unmount: @@ -106,38 +136,11 @@ options: description: | Cloud instances provide ephermeral storage which is normally mounted on /mnt. - + . Setting this option to the path of the ephemeral mountpoint will force an unmount of the corresponding device so that it can be used as a OSD - storage device. This is useful for testing purposes (cloud deployment + storage device. This is useful for testing purposes (cloud deployment is not a typical use case). - source: - type: string - default: - description: | - Optional configuration to support use of additional sources such as: - - - ppa:myteam/ppa - - cloud:trusty-proposed/kilo - - http://my.archive.com/ubuntu main - - The last option should be used in conjunction with the key configuration - option. - - Note that a minimum ceph version of 0.48.2 is required for use with this - charm which is NOT provided by the packages in the main Ubuntu archive - for precise but is provided in the Ubuntu cloud archive. - key: - type: string - default: - description: | - Key ID to import to the apt keyring to support use with arbitary source - configuration from outside of Launchpad archives or PPA's. - use-syslog: - type: boolean - default: False - description: | - If set to True, supporting services will log to syslog. ceph-public-network: type: string default: @@ -189,14 +192,14 @@ options: type: string default: description: | - Custom availablility zone to provide to Ceph for the OSD placement + Custom availability zone to provide to Ceph for the OSD placement max-sectors-kb: - default: 1048576 type: int + default: 1048576 description: | This parameter will adjust every block device in your server to allow - greater IO operation sizes. If you have a RAID card with cache on it - consider tuning this much higher than the 1MB default. 1MB is a safe + greater IO operation sizes. If you have a RAID card with cache on it + consider tuning this much higher than the 1MB default. 1MB is a safe default for spinning HDDs that don't have much cache. nagios_context: type: string @@ -204,42 +207,39 @@ options: type: string description: | Used by the nrpe-external-master subordinate charm. - A string that will be prepended to instance name to set the host name + A string that will be prepended to instance name to set the hostname in nagios. So for instance the hostname would be something like: + . juju-myservice-0 + . If you're running multiple environments with the same services in them this allows you to differentiate between them. nagios_servicegroups: - default: "" type: string + default: "" description: | A comma-separated list of nagios servicegroups. If left empty, the nagios_context will be used as the servicegroup use-direct-io: - default: True type: boolean + default: True description: Configure use of direct IO for OSD journals. - harden: - default: - type: string - description: | - Apply system hardening. Supports a space-delimited list of modules - to run. Supported modules currently include os, ssh, apache and mysql. autotune: - default: False type: boolean + default: False description: | Enabling this option will attempt to tune your network card sysctls and hard drive settings. This changes hard drive read ahead settings and - max_sectors_kb. For the network card this will detect the link speed + max_sectors_kb. For the network card this will detect the link speed and make appropriate sysctl changes. Enabling this option should generally be safe. aa-profile-mode: type: string default: 'disable' description: | - Enable apparmor profile. Valid settings: 'complain', 'enforce' or 'disable'. + Enable apparmor profile. Valid settings: 'complain', 'enforce' or + 'disable'. . NOTE: changing the value of this option is disruptive to a running Ceph - cluster as all ceph-osd processes must be restarted as part of changing the - apparmor profile enforcement mode. + cluster as all ceph-osd processes must be restarted as part of changing + the apparmor profile enforcement mode. From 632a442a2a1ef1c495a6e91b8c51ae7e3452b3ec Mon Sep 17 00:00:00 2001 From: Mario Splivalo Date: Mon, 12 Jun 2017 14:56:58 +0200 Subject: [PATCH 1331/2699] Cleanup config.yaml Change-Id: I62ac2f5656b63586e42b1c90cf71d30f35da39be --- ceph-proxy/config.yaml | 86 ++++++++++++++++++++---------------------- 1 file changed, 41 insertions(+), 45 deletions(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index b77b3b25..d8780fe9 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -1,7 +1,41 @@ options: + loglevel: + type: int + default: 1 + description: Mon and OSD debug level. Max is 20. + use-syslog: + type: boolean + default: False + description: | + Setting this to True will allow supporting services to log to syslog. + source: + type: string + default: "" + description: | + Repository from which to install. May be one of the following: + distro (default), ppa:somecustom/ppa, a deb url sources entry, + or a supported Ubuntu Cloud Archive e.g. + . + cloud:- + cloud:-/updates + cloud:-/staging + cloud:-/proposed + . + See https://wiki.ubuntu.com/OpenStack/CloudArchive for info on which + cloud archives are available and supported. + . + NOTE: updating this setting to a source that is known to provide + a later version of OpenStack will trigger a software upgrade unless + action-managed-upgrade is set to True. + key: + type: string + default: "" + description: | + Key ID to import to the apt keyring to support use with arbitrary source + configuration from outside of Launchpad archives or PPA's. fsid: type: string - default: + default: "" description: | fsid of the ceph cluster. To generate a suitable value use `uuid` . @@ -9,57 +43,19 @@ options: install if it is not provided. monitor-hosts: type: string - default: + default: "" description: | - Space separated list of existing monitor hosts, in the format + Space-delimited list of existing monitor hosts, in the format {IP / Hostname}:{port} {IP / Hostname}:{port} admin-key: type: string - default: - description: | - Admin cephx key for existing Ceph cluster + default: "" + description: Admin cephx key for existing Ceph cluster auth-supported: type: string default: cephx description: | Which authentication flavour to use. . - Valid options are "cephx" and "none". If "none" is specified, - keys will still be created and deployed so that it can be - enabled later. - # mon-key: - # type: string - # default: - # description: | - # Monitor cephx key - use-syslog: - type: boolean - default: False - description: | - If set to True, supporting services will log to syslog. - loglevel: - default: 1 - type: int - description: Mon and OSD debug level. Max is 20. - source: - type: string - default: - description: | - Optional configuration to support use of additional sources such as: - - - ppa:myteam/ppa - - cloud:trusty-proposed/kilo - - http://my.archive.com/ubuntu main - - The last option should be used in conjunction with the key configuration - option. - - Note that a minimum ceph version of 0.48.2 is required for use with this - charm which is NOT provided by the packages in the main Ubuntu archive - for precise but is provided in the Ubuntu cloud archive. - key: - type: string - default: - description: | - Key ID to import to the apt keyring to support use with arbitary source - configuration from outside of Launchpad archives or PPA's. + Valid options are "cephx" and "none". If "none" is specified, keys will + still be created and deployed so that it can be enabled later. From bce3bed41a9121ca34ad9bc9ec0076d08fc02964 Mon Sep 17 00:00:00 2001 From: Chris Holcombe Date: Wed, 19 Oct 2016 11:27:20 -0700 Subject: [PATCH 1332/2699] Crushmap Update Action This action takes a json definition of a crushmap and applies it. Change-Id: Icaabc89016be22aa16d2f813a746842fd30fd989 --- ceph-mon/actions.yaml | 12 ++++++++ ceph-mon/actions/crushmap-update | 1 + ceph-mon/actions/crushmap-update.py | 46 +++++++++++++++++++++++++++++ 3 files changed, 59 insertions(+) create mode 120000 ceph-mon/actions/crushmap-update create mode 100755 ceph-mon/actions/crushmap-update.py diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 3f8e5dfe..67fe4c88 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -214,3 +214,15 @@ pool-get: description: Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#get-pool-values required: [key, pool-name] additionalProperties: false +crushmap-update: + description: | + Apply a json crushmap definition. This will throw away the existing + ceph crushmap and apply the new definition. Use with extreme caution. + WARNING - This function is extremely dangerous if misused. It can very + easily break your cluster in unexpected ways. + params: + map: + type: string + description: The json crushmap blob + required: [map] + additionalProperties: false diff --git a/ceph-mon/actions/crushmap-update b/ceph-mon/actions/crushmap-update new file mode 120000 index 00000000..af530e0d --- /dev/null +++ b/ceph-mon/actions/crushmap-update @@ -0,0 +1 @@ +crushmap-update.py \ No newline at end of file diff --git a/ceph-mon/actions/crushmap-update.py b/ceph-mon/actions/crushmap-update.py new file mode 100755 index 00000000..d90e50bf --- /dev/null +++ b/ceph-mon/actions/crushmap-update.py @@ -0,0 +1,46 @@ +#!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import base64 +from charmhelpers.core.hookenv import action_get, action_fail +from subprocess import check_output, CalledProcessError, PIPE, Popen + + +def update_crushmap(): + try: + encoded_text = action_get("map") + json_map = base64.b64decode(encoded_text) + try: + # This needs json_map passed to it from stdin + crushtool = Popen( + ["crushtool", "-o", "compiled_crushmap", "-m", "compile"], + stdin=PIPE) + crushtool_stdout, crushtool_stderr = crushtool.communicate( + input=json_map) + if crushtool_stderr is not None: + action_fail( + "Failed to compile json: {}".format(crushtool_stderr)) + check_output( + ["ceph", "osd", "setcrushmap", "-i", "compiled_crushmap"]) + except (CalledProcessError, OSError) as err2: + action_fail("Crush compile or load failed with error: {}".format( + err2.output)) + except TypeError as err: + action_fail( + "Unable to base64 decode: {}. Error: {}".format(encoded_text, err)) + + +if __name__ == '__main__': + update_crushmap() From 6b0fb773fadec383079b250d52613ac7eb7e9e92 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 31 Jul 2017 13:44:03 -0500 Subject: [PATCH 1333/2699] Remove reference to old config yaml Closes-Bug: #1707701 Change-Id: Idcf84a118302f495ca2b148719a412d3c093164e --- ceph-fs/src/README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/ceph-fs/src/README.md b/ceph-fs/src/README.md index 6f85c5ed..6eacef60 100644 --- a/ceph-fs/src/README.md +++ b/ceph-fs/src/README.md @@ -15,8 +15,6 @@ Boot things up by using: juju deploy -n 3 ceph-mon juju deploy -n 3 ceph-osd -In my example deployments on EC2 the following ceph.yaml will work: - You can then deploy this charm by simply doing: juju deploy ceph-fs From 2a3efe61c2ec00bd0a1e5adf0435feb434c620d7 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 31 Jul 2017 13:48:41 -0500 Subject: [PATCH 1334/2699] Update charm icon Change-Id: I12de6b87c7b2a89d63f022834a52ff2da0b2899d Closes-bug: #1686739 --- ceph-fs/src/icon.svg | 177 +++++++++---------------------------------- 1 file changed, 37 insertions(+), 140 deletions(-) diff --git a/ceph-fs/src/icon.svg b/ceph-fs/src/icon.svg index de53ab2e..e9383990 100644 --- a/ceph-fs/src/icon.svg +++ b/ceph-fs/src/icon.svg @@ -14,8 +14,9 @@ height="96" id="svg6517" version="1.1" - inkscape:version="0.48+devel r12304" - sodipodi:docname="ceph01.svg.2013_04_25_16_07_37.0.svg"> + inkscape:version="0.91+devel r" + sodipodi:docname="ceph.svg" + viewBox="0 0 96 96"> + style="display:inline;fill:#ff00ff;fill-opacity:1;stroke:none"> - - - - + d="M -9,-9 H 605 V 222 H -9 Z" + inkscape:connector-curvature="0" /> + d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129" + inkscape:connector-curvature="0" /> + d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129" + inkscape:connector-curvature="0" /> - - - - @@ -233,7 +207,7 @@ clipPathUnits="userSpaceOnUse"> + inkscape:snap-center="true" + inkscape:snap-page="true"> + id="guide823" + inkscape:locked="false" /> + id="guide825" + inkscape:locked="false" /> + id="guide827" + inkscape:locked="false" /> + id="guide829" + inkscape:locked="false" /> @@ -308,7 +287,7 @@ image/svg+xml - + @@ -319,96 +298,14 @@ transform="translate(268,-635.29076)" style="display:inline"> - + id="path6455" /> - - - From 450cbc8d6447199f3bc9500e56dbb76833701113 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 31 Jul 2017 14:13:33 -0500 Subject: [PATCH 1335/2699] Update charm icon Change-Id: Ic9e4cde4a498f572d7373dda5ed105ccefe0756a Closes-bug: #1686739 --- ceph-proxy/icon.svg | 177 +++++++++----------------------------------- 1 file changed, 37 insertions(+), 140 deletions(-) diff --git a/ceph-proxy/icon.svg b/ceph-proxy/icon.svg index de53ab2e..e9383990 100644 --- a/ceph-proxy/icon.svg +++ b/ceph-proxy/icon.svg @@ -14,8 +14,9 @@ height="96" id="svg6517" version="1.1" - inkscape:version="0.48+devel r12304" - sodipodi:docname="ceph01.svg.2013_04_25_16_07_37.0.svg"> + inkscape:version="0.91+devel r" + sodipodi:docname="ceph.svg" + viewBox="0 0 96 96"> + style="display:inline;fill:#ff00ff;fill-opacity:1;stroke:none"> - - - - + d="M -9,-9 H 605 V 222 H -9 Z" + inkscape:connector-curvature="0" /> + d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129" + inkscape:connector-curvature="0" /> + d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129" + inkscape:connector-curvature="0" /> - - - - @@ -233,7 +207,7 @@ clipPathUnits="userSpaceOnUse"> + inkscape:snap-center="true" + inkscape:snap-page="true"> + id="guide823" + inkscape:locked="false" /> + id="guide825" + inkscape:locked="false" /> + id="guide827" + inkscape:locked="false" /> + id="guide829" + inkscape:locked="false" /> @@ -308,7 +287,7 @@ image/svg+xml - + @@ -319,96 +298,14 @@ transform="translate(268,-635.29076)" style="display:inline"> - + id="path6455" /> - - - From 6ac8fa2f1258e34f7f34bcb1ba16db2e93e30afc Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 31 Jul 2017 14:15:31 -0500 Subject: [PATCH 1336/2699] Update charm icon Change-Id: I2e0018caf6463ad5ba5ad7a7eaa4e3ffeae3f0a3 Closes-bug: #1686739 --- ceph-mon/icon.svg | 177 ++++++++++------------------------------------ 1 file changed, 37 insertions(+), 140 deletions(-) diff --git a/ceph-mon/icon.svg b/ceph-mon/icon.svg index de53ab2e..e9383990 100644 --- a/ceph-mon/icon.svg +++ b/ceph-mon/icon.svg @@ -14,8 +14,9 @@ height="96" id="svg6517" version="1.1" - inkscape:version="0.48+devel r12304" - sodipodi:docname="ceph01.svg.2013_04_25_16_07_37.0.svg"> + inkscape:version="0.91+devel r" + sodipodi:docname="ceph.svg" + viewBox="0 0 96 96"> + style="display:inline;fill:#ff00ff;fill-opacity:1;stroke:none"> - - - - + d="M -9,-9 H 605 V 222 H -9 Z" + inkscape:connector-curvature="0" /> + d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129" + inkscape:connector-curvature="0" /> + d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129" + inkscape:connector-curvature="0" /> - - - - @@ -233,7 +207,7 @@ clipPathUnits="userSpaceOnUse"> + inkscape:snap-center="true" + inkscape:snap-page="true"> + id="guide823" + inkscape:locked="false" /> + id="guide825" + inkscape:locked="false" /> + id="guide827" + inkscape:locked="false" /> + id="guide829" + inkscape:locked="false" /> @@ -308,7 +287,7 @@ image/svg+xml - + @@ -319,96 +298,14 @@ transform="translate(268,-635.29076)" style="display:inline"> - + id="path6455" /> - - - From 51b73b3e866abf5cf71fa9e22a03c2dc8492590f Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 31 Jul 2017 14:16:25 -0500 Subject: [PATCH 1337/2699] Update charm icon Change-Id: I542e169004123635683b0e24a015885d43007a7e Closes-bug: #1686739 --- ceph-osd/icon.svg | 177 ++++++++++------------------------------------ 1 file changed, 37 insertions(+), 140 deletions(-) diff --git a/ceph-osd/icon.svg b/ceph-osd/icon.svg index de53ab2e..e9383990 100644 --- a/ceph-osd/icon.svg +++ b/ceph-osd/icon.svg @@ -14,8 +14,9 @@ height="96" id="svg6517" version="1.1" - inkscape:version="0.48+devel r12304" - sodipodi:docname="ceph01.svg.2013_04_25_16_07_37.0.svg"> + inkscape:version="0.91+devel r" + sodipodi:docname="ceph.svg" + viewBox="0 0 96 96"> + style="display:inline;fill:#ff00ff;fill-opacity:1;stroke:none"> - - - - + d="M -9,-9 H 605 V 222 H -9 Z" + inkscape:connector-curvature="0" /> + d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129" + inkscape:connector-curvature="0" /> + d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129" + inkscape:connector-curvature="0" /> - - - - @@ -233,7 +207,7 @@ clipPathUnits="userSpaceOnUse"> + inkscape:snap-center="true" + inkscape:snap-page="true"> + id="guide823" + inkscape:locked="false" /> + id="guide825" + inkscape:locked="false" /> + id="guide827" + inkscape:locked="false" /> + id="guide829" + inkscape:locked="false" /> @@ -308,7 +287,7 @@ image/svg+xml - + @@ -319,96 +298,14 @@ transform="translate(268,-635.29076)" style="display:inline"> - + id="path6455" /> - - - From f0ec8ce0f631b67e02d61941ad4439d135ea8c81 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 31 Jul 2017 14:19:30 -0500 Subject: [PATCH 1338/2699] Update charm icon Change-Id: I8021cae2f2f5dbe0ca61bf6602097b990a88ea4a Closes-bug: #1686739 --- ceph-radosgw/icon.svg | 177 +++++++++--------------------------------- 1 file changed, 37 insertions(+), 140 deletions(-) diff --git a/ceph-radosgw/icon.svg b/ceph-radosgw/icon.svg index de53ab2e..e9383990 100644 --- a/ceph-radosgw/icon.svg +++ b/ceph-radosgw/icon.svg @@ -14,8 +14,9 @@ height="96" id="svg6517" version="1.1" - inkscape:version="0.48+devel r12304" - sodipodi:docname="ceph01.svg.2013_04_25_16_07_37.0.svg"> + inkscape:version="0.91+devel r" + sodipodi:docname="ceph.svg" + viewBox="0 0 96 96"> + style="display:inline;fill:#ff00ff;fill-opacity:1;stroke:none"> - - - - + d="M -9,-9 H 605 V 222 H -9 Z" + inkscape:connector-curvature="0" /> + d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129" + inkscape:connector-curvature="0" /> + d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129" + inkscape:connector-curvature="0" /> - - - - @@ -233,7 +207,7 @@ clipPathUnits="userSpaceOnUse"> + inkscape:snap-center="true" + inkscape:snap-page="true"> + id="guide823" + inkscape:locked="false" /> + id="guide825" + inkscape:locked="false" /> + id="guide827" + inkscape:locked="false" /> + id="guide829" + inkscape:locked="false" /> @@ -308,7 +287,7 @@ image/svg+xml - + @@ -319,96 +298,14 @@ transform="translate(268,-635.29076)" style="display:inline"> - + id="path6455" /> - - - From 72c0b9c36c7775439710f8de2fc43b1ca195ed9a Mon Sep 17 00:00:00 2001 From: Andrew McLeod Date: Wed, 9 Aug 2017 14:20:55 -0600 Subject: [PATCH 1339/2699] Modify tests.yaml which specifies bundletester config params with the following key:value pairs: - reset_timeout: 600 Change-Id: I418febadc043abf21995916bb9ce26dfc3c9d32e --- ceph-fs/src/tests/tests.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index e3185c6d..af79ff11 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -15,3 +15,4 @@ makefile: [] # and tox instead. ie. The venv is constructed before bundletester # is invoked. #python-packages: +reset_timeout: 600 From e3629479ad4b9671c75cd67c1ac6897c2d6ff3c1 Mon Sep 17 00:00:00 2001 From: Andrew McLeod Date: Wed, 9 Aug 2017 14:23:22 -0600 Subject: [PATCH 1340/2699] Modify tests.yaml which specifies bundletester config params with the following key:value pairs: - reset_timeout: 600 Change-Id: I09e41945bcfed90395447a0197398749e2dc0026 --- ceph-mon/tests/tests.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 4cf93d01..a03e7bad 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -15,3 +15,4 @@ makefile: [] # and tox instead. ie. The venv is constructed before bundletester # is invoked. #python-packages: +reset_timeout: 600 From a9e7738547f0047d3511c26d733a39a78ad1fbe4 Mon Sep 17 00:00:00 2001 From: Andrew McLeod Date: Wed, 9 Aug 2017 14:23:35 -0600 Subject: [PATCH 1341/2699] Modify tests.yaml which specifies bundletester config params with the following key:value pairs: - reset_timeout: 600 Change-Id: I8a35d5ec734663bc48932a9ee142fb433d540bc9 --- ceph-osd/tests/tests.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 4cf93d01..a03e7bad 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -15,3 +15,4 @@ makefile: [] # and tox instead. ie. The venv is constructed before bundletester # is invoked. #python-packages: +reset_timeout: 600 From 6550c9668e51336ed8443e5b92bd37c184a6e38d Mon Sep 17 00:00:00 2001 From: Andrew McLeod Date: Wed, 9 Aug 2017 14:23:47 -0600 Subject: [PATCH 1342/2699] Modify tests.yaml which specifies bundletester config params with the following key:value pairs: - reset_timeout: 600 Change-Id: I64a207feaf1fc377b118d2ac7b29d12436f9834f --- ceph-proxy/tests/tests.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 4cf93d01..a03e7bad 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -15,3 +15,4 @@ makefile: [] # and tox instead. ie. The venv is constructed before bundletester # is invoked. #python-packages: +reset_timeout: 600 From a310025d4fa8b41f68f4173578fc10c502d949ae Mon Sep 17 00:00:00 2001 From: Andrew McLeod Date: Wed, 9 Aug 2017 14:24:02 -0600 Subject: [PATCH 1343/2699] Modify tests.yaml which specifies bundletester config params with the following key:value pairs: - reset_timeout: 600 Change-Id: Ie7d1709380c3d8a02052d0c07a6e1ba731df3b3f --- ceph-radosgw/tests/tests.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 4cf93d01..a03e7bad 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -15,3 +15,4 @@ makefile: [] # and tox instead. ie. The venv is constructed before bundletester # is invoked. #python-packages: +reset_timeout: 600 From 0f8cf2460d1fa1d007ede82db5ad3ec703afa863 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 10 Aug 2017 15:39:02 +0100 Subject: [PATCH 1344/2699] keystone: PKI token format removal As of Pike, the OpenStack charms no longer generate the certificates and CA used to sign token revocation lists as this is associated with the PKI token format, which has been removed from OpenStack in favor of UUID or Fernet formats. Soft-fail on cert retrieval if an InternalServerError is thrown; this is most likely due to the fact that the keystone WSGI server cannot find the relevant files on the underlying filesystem. Change-Id: Ib592e7e47e10bed2d59c9136a3267f9c7ce8da83 Closes-Bug: 1709189 --- ceph-radosgw/hooks/utils.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index cd25d007..8c5f55d5 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -73,12 +73,14 @@ from keystoneclient.exceptions import ( ConnectionRefused, Forbidden, + InternalServerError, ) except ImportError: # Juno and older from keystoneclient.exceptions import ( ConnectionError as ConnectionRefused, Forbidden, + InternalServerError, ) except ImportError: keystoneclient = None @@ -352,7 +354,8 @@ def get_ks_cert(ksclient, auth_endpoint, cert_type): # Juno and older cert = requests.request('GET', "{}/certificates/{}". format(auth_endpoint, cert_type)).text - except (ConnectionRefused, requests.exceptions.ConnectionError, Forbidden): + except (ConnectionRefused, requests.exceptions.ConnectionError, + Forbidden, InternalServerError): raise KSCertSetupException("Error connecting to keystone") return cert From 4a5703b7128d5ef1191a1362c81deb41ad9560de Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 8 Aug 2017 20:42:01 +0000 Subject: [PATCH 1345/2699] Bootstrap ceph-mgr daemon As of the Luminous release, Ceph requires ceph-mgr daemons be run to have a fully functional cluster. [0] [0] http://docs.ceph.com/docs/master/mgr/administrator/ Change-Id: Ie8c13f3e7ada000e074d2c4f7d996872cd3ceeef Closes-bug: #1709061 --- ceph-mon/hooks/ceph_hooks.py | 9 +++++++++ ceph-mon/lib/ceph/__init__.py | 35 ++++++++++++++++++++++++++++++++--- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 0b203894..bbc45e8b 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -16,6 +16,7 @@ import os import subprocess +import socket import sys import uuid @@ -225,6 +226,9 @@ def config_changed(): status_set('maintenance', 'Bootstrapping single Ceph MON') ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) ceph.wait_for_bootstrap() + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + status_set('maintenance', 'Bootstrapping single Ceph MGR') + ceph.bootstrap_manager() def get_mon_hosts(): @@ -280,6 +284,9 @@ def mon_relation(): ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) ceph.wait_for_bootstrap() ceph.wait_for_quorum() + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + status_set('maintenance', 'Bootstrapping Ceph MGR') + ceph.bootstrap_manager() # If we can and want to if is_leader() and config('customize-failure-domain'): # But only if the environment supports it @@ -547,6 +554,8 @@ def start(): service_restart('ceph-mon') else: service_restart('ceph-mon-all') + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + service_restart('ceph-mgr@{}'.format(socket.gethostname())) @hooks.hook('nrpe-external-master-relation-joined') diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index 51b717d2..6aaf4fae 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -1314,6 +1314,27 @@ def bootstrap_monitor_cluster(secret): os.unlink(keyring) +def bootstrap_manager(): + hostname = socket.gethostname() + path = '/var/lib/ceph/mgr/ceph-{}'.format(hostname) + keyring = os.path.join(path, 'keyring') + + if os.path.exists(keyring): + log('bootstrap_manager: mgr already initialized.') + else: + mkdir(path, owner=ceph_user(), group=ceph_user()) + subprocess.check_call(['ceph', 'auth', 'get-or-create', + 'mgr.{}'.format(hostname), 'mon', + 'allow profile mgr', 'osd', 'allow *', + 'mds', 'allow *', '--out-file', + keyring]) + chownr(path, ceph_user(), ceph_user()) + + unit = 'ceph-mgr@{}'.format(hostname) + subprocess.check_call(['systemctl', 'enable', unit]) + service_restart(unit) + + def update_monfs(): hostname = socket.gethostname() monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) @@ -1355,16 +1376,17 @@ def find_least_used_journal(journal_devices): def osdize(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False): + ignore_errors=False, encrypt=False, bluestore=False): if dev.startswith('/dev'): osdize_dev(dev, osd_format, osd_journal, - reformat_osd, ignore_errors, encrypt) + reformat_osd, ignore_errors, encrypt, + bluestore) else: osdize_dir(dev, encrypt) def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False): + ignore_errors=False, encrypt=False, bluestore=False): if not os.path.exists(dev): log('Path {} does not exist - bailing'.format(dev)) return @@ -1392,9 +1414,16 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, if osd_format: cmd.append('--fs-type') cmd.append(osd_format) + if reformat_osd: cmd.append('--zap-disk') + + # NOTE(jamespage): enable experimental bluestore support + if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: + cmd.append('--bluestore') + cmd.append(dev) + if osd_journal: least_used = find_least_used_journal(osd_journal) cmd.append(least_used) From acd1fd96bfd812997612322590de9a6e88413bfb Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 14 Aug 2017 14:46:38 +0100 Subject: [PATCH 1346/2699] Sync charms.ceph to get code cleanup changes Also had to fix some imports due to changes implemented as part of the cleanup. Change-Id: Ie232828056a7f15525f820e8e106264b22697168 --- ceph-osd/actions/pause_resume.py | 2 +- ceph-osd/hooks/ceph_hooks.py | 2 +- ceph-osd/lib/__init__.py | 0 ceph-osd/lib/ceph/__init__.py | 2136 ---------------- .../lib/ceph/{ceph_broker.py => broker.py} | 115 +- ceph-osd/lib/ceph/ceph_helpers.py | 1557 ------------ ceph-osd/lib/ceph/crush_utils.py | 149 ++ ceph-osd/lib/ceph/utils.py | 2199 +++++++++++++++++ ceph-osd/lib/setup.py | 85 - ceph-osd/unit_tests/test_replace_osd.py | 16 +- ceph-osd/unit_tests/test_tuning.py | 32 +- 11 files changed, 2468 insertions(+), 3825 deletions(-) delete mode 100644 ceph-osd/lib/__init__.py rename ceph-osd/lib/ceph/{ceph_broker.py => broker.py} (88%) delete mode 100644 ceph-osd/lib/ceph/ceph_helpers.py create mode 100644 ceph-osd/lib/ceph/crush_utils.py create mode 100644 ceph-osd/lib/ceph/utils.py delete mode 100644 ceph-osd/lib/setup.py diff --git a/ceph-osd/actions/pause_resume.py b/ceph-osd/actions/pause_resume.py index f66bf74d..a8ae4d41 100755 --- a/ceph-osd/actions/pause_resume.py +++ b/ceph-osd/actions/pause_resume.py @@ -27,7 +27,7 @@ action_fail, ) -from ceph import get_local_osd_ids +from ceph.utils import get_local_osd_ids from ceph_hooks import assess_status from utils import ( diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index a22da6e6..6104a2c0 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -22,7 +22,7 @@ import netifaces sys.path.append('lib') -import ceph +import ceph.utils as ceph from charmhelpers.core import hookenv from charmhelpers.core.hookenv import ( log, diff --git a/ceph-osd/lib/__init__.py b/ceph-osd/lib/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-osd/lib/ceph/__init__.py b/ceph-osd/lib/ceph/__init__.py index 7f8d3a4b..e69de29b 100644 --- a/ceph-osd/lib/ceph/__init__.py +++ b/ceph-osd/lib/ceph/__init__.py @@ -1,2136 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import ctypes -import collections -import json -import random -import socket -import subprocess -import time -import os -import re -import sys -import errno -import shutil -import pyudev - -from datetime import datetime - -from charmhelpers.core import hookenv -from charmhelpers.core import templating -from charmhelpers.core.host import ( - chownr, - cmp_pkgrevno, - lsb_release, - mkdir, - mounts, - owner, - service_restart, - service_start, - service_stop, - CompareHostReleases, - is_container, -) -from charmhelpers.core.hookenv import ( - cached, - config, - log, - status_set, - DEBUG, - ERROR, - WARNING, -) -from charmhelpers.fetch import ( - apt_cache, - add_source, apt_install, apt_update) -from charmhelpers.contrib.storage.linux.ceph import ( - monitor_key_set, - monitor_key_exists, - monitor_key_get, - get_mon_map, -) -from charmhelpers.contrib.storage.linux.utils import ( - is_block_device, - zap_disk, - is_device_mounted, -) -from charmhelpers.contrib.openstack.utils import ( - get_os_codename_install_source, -) - -from ceph.ceph_helpers import check_output - -CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph') -OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd') -HDPARM_FILE = os.path.join(os.sep, 'etc', 'hdparm.conf') - -LEADER = 'leader' -PEON = 'peon' -QUORUM = [LEADER, PEON] - -PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', - 'radosgw', 'xfsprogs', 'python-pyudev'] - -LinkSpeed = { - "BASE_10": 10, - "BASE_100": 100, - "BASE_1000": 1000, - "GBASE_10": 10000, - "GBASE_40": 40000, - "GBASE_100": 100000, - "UNKNOWN": None -} - -# Mapping of adapter speed to sysctl settings -NETWORK_ADAPTER_SYSCTLS = { - # 10Gb - LinkSpeed["GBASE_10"]: { - 'net.core.rmem_default': 524287, - 'net.core.wmem_default': 524287, - 'net.core.rmem_max': 524287, - 'net.core.wmem_max': 524287, - 'net.core.optmem_max': 524287, - 'net.core.netdev_max_backlog': 300000, - 'net.ipv4.tcp_rmem': '10000000 10000000 10000000', - 'net.ipv4.tcp_wmem': '10000000 10000000 10000000', - 'net.ipv4.tcp_mem': '10000000 10000000 10000000' - }, - # Mellanox 10/40Gb - LinkSpeed["GBASE_40"]: { - 'net.ipv4.tcp_timestamps': 0, - 'net.ipv4.tcp_sack': 1, - 'net.core.netdev_max_backlog': 250000, - 'net.core.rmem_max': 4194304, - 'net.core.wmem_max': 4194304, - 'net.core.rmem_default': 4194304, - 'net.core.wmem_default': 4194304, - 'net.core.optmem_max': 4194304, - 'net.ipv4.tcp_rmem': '4096 87380 4194304', - 'net.ipv4.tcp_wmem': '4096 65536 4194304', - 'net.ipv4.tcp_low_latency': 1, - 'net.ipv4.tcp_adv_win_scale': 1 - } -} - - -class Partition(object): - def __init__(self, name, number, size, start, end, sectors, uuid): - """ - A block device partition - :param name: Name of block device - :param number: Partition number - :param size: Capacity of the device - :param start: Starting block - :param end: Ending block - :param sectors: Number of blocks - :param uuid: UUID of the partition - """ - self.name = name, - self.number = number - self.size = size - self.start = start - self.end = end - self.sectors = sectors - self.uuid = uuid - - def __str__(self): - return "number: {} start: {} end: {} sectors: {} size: {} " \ - "name: {} uuid: {}".format(self.number, self.start, - self.end, - self.sectors, self.size, - self.name, self.uuid) - - def __eq__(self, other): - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - return not self.__eq__(other) - - -def unmounted_disks(): - """List of unmounted block devices on the current host.""" - disks = [] - context = pyudev.Context() - for device in context.list_devices(DEVTYPE='disk'): - if device['SUBSYSTEM'] == 'block': - matched = False - for block_type in [u'dm', u'loop', u'ram', u'nbd']: - if block_type in device.device_node: - matched = True - if matched: - continue - disks.append(device.device_node) - log("Found disks: {}".format(disks)) - return [disk for disk in disks if not is_device_mounted(disk)] - - -def save_sysctls(sysctl_dict, save_location): - """ - Persist the sysctls to the hard drive. - :param sysctl_dict: dict - :param save_location: path to save the settings to - :raise: IOError if anything goes wrong with writing. - """ - try: - # Persist the settings for reboots - with open(save_location, "w") as fd: - for key, value in sysctl_dict.items(): - fd.write("{}={}\n".format(key, value)) - - except IOError as e: - log("Unable to persist sysctl settings to {}. Error {}".format( - save_location, e.message), level=ERROR) - raise - - -def tune_nic(network_interface): - """ - This will set optimal sysctls for the particular network adapter. - :param network_interface: string The network adapter name. - """ - speed = get_link_speed(network_interface) - if speed in NETWORK_ADAPTER_SYSCTLS: - status_set('maintenance', 'Tuning device {}'.format( - network_interface)) - sysctl_file = os.path.join( - os.sep, - 'etc', - 'sysctl.d', - '51-ceph-osd-charm-{}.conf'.format(network_interface)) - try: - log("Saving sysctl_file: {} values: {}".format( - sysctl_file, NETWORK_ADAPTER_SYSCTLS[speed]), - level=DEBUG) - save_sysctls(sysctl_dict=NETWORK_ADAPTER_SYSCTLS[speed], - save_location=sysctl_file) - except IOError as e: - log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " - "failed. {}".format(network_interface, e.message), - level=ERROR) - - try: - # Apply the settings - log("Applying sysctl settings", level=DEBUG) - check_output(["sysctl", "-p", sysctl_file]) - except subprocess.CalledProcessError as err: - log('sysctl -p {} failed with error {}'.format(sysctl_file, - err.output), - level=ERROR) - else: - log("No settings found for network adapter: {}".format( - network_interface), level=DEBUG) - - -def get_link_speed(network_interface): - """ - This will find the link speed for a given network device. Returns None - if an error occurs. - :param network_interface: string The network adapter interface. - :return: LinkSpeed - """ - speed_path = os.path.join(os.sep, 'sys', 'class', 'net', - network_interface, 'speed') - # I'm not sure where else we'd check if this doesn't exist - if not os.path.exists(speed_path): - return LinkSpeed["UNKNOWN"] - - try: - with open(speed_path, 'r') as sysfs: - nic_speed = sysfs.readlines() - - # Did we actually read anything? - if not nic_speed: - return LinkSpeed["UNKNOWN"] - - # Try to find a sysctl match for this particular speed - for name, speed in LinkSpeed.items(): - if speed == int(nic_speed[0].strip()): - return speed - # Default to UNKNOWN if we can't find a match - return LinkSpeed["UNKNOWN"] - except IOError as e: - log("Unable to open {path} because of error: {error}".format( - path=speed_path, - error=e.message), level='error') - return LinkSpeed["UNKNOWN"] - - -def persist_settings(settings_dict): - # Write all settings to /etc/hdparm.conf - """ - This will persist the hard drive settings to the /etc/hdparm.conf file - The settings_dict should be in the form of {"uuid": {"key":"value"}} - :param settings_dict: dict of settings to save - """ - if not settings_dict: - return - - try: - templating.render(source='hdparm.conf', target=HDPARM_FILE, - context=settings_dict) - except IOError as err: - log("Unable to open {path} because of error: {error}".format( - path=HDPARM_FILE, error=err.message), level=ERROR) - except Exception as e: - # The templating.render can raise a jinja2 exception if the - # template is not found. Rather than polluting the import - # space of this charm, simply catch Exception - log('Unable to render {path} due to error: {error}'.format( - path=HDPARM_FILE, error=e.message), level=ERROR) - - -def set_max_sectors_kb(dev_name, max_sectors_size): - """ - This function sets the max_sectors_kb size of a given block device. - :param dev_name: Name of the block device to query - :param max_sectors_size: int of the max_sectors_size to save - """ - max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_sectors_kb') - try: - with open(max_sectors_kb_path, 'w') as f: - f.write(max_sectors_size) - except IOError as e: - log('Failed to write max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e.message), level=ERROR) - - -def get_max_sectors_kb(dev_name): - """ - This function gets the max_sectors_kb size of a given block device. - :param dev_name: Name of the block device to query - :return: int which is either the max_sectors_kb or 0 on error. - """ - max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_sectors_kb') - - # Read in what Linux has set by default - if os.path.exists(max_sectors_kb_path): - try: - with open(max_sectors_kb_path, 'r') as f: - max_sectors_kb = f.read().strip() - return int(max_sectors_kb) - except IOError as e: - log('Failed to read max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e.message), level=ERROR) - # Bail. - return 0 - return 0 - - -def get_max_hw_sectors_kb(dev_name): - """ - This function gets the max_hw_sectors_kb for a given block device. - :param dev_name: Name of the block device to query - :return: int which is either the max_hw_sectors_kb or 0 on error. - """ - max_hw_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_hw_sectors_kb') - # Read in what the hardware supports - if os.path.exists(max_hw_sectors_kb_path): - try: - with open(max_hw_sectors_kb_path, 'r') as f: - max_hw_sectors_kb = f.read().strip() - return int(max_hw_sectors_kb) - except IOError as e: - log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( - max_hw_sectors_kb_path, e.message), level=ERROR) - return 0 - return 0 - - -def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): - """ - This function sets the hard drive read ahead. - :param dev_name: Name of the block device to set read ahead on. - :param read_ahead_sectors: int How many sectors to read ahead. - """ - try: - # Set the read ahead sectors to 256 - log('Setting read ahead to {} for device {}'.format( - read_ahead_sectors, - dev_name)) - check_output(['hdparm', - '-a{}'.format(read_ahead_sectors), - dev_name]) - except subprocess.CalledProcessError as e: - log('hdparm failed with error: {}'.format(e.output), - level=ERROR) - - -def get_block_uuid(block_dev): - """ - This queries blkid to get the uuid for a block device. - :param block_dev: Name of the block device to query. - :return: The UUID of the device or None on Error. - """ - try: - block_info = check_output( - ['blkid', '-o', 'export', block_dev]) - for tag in block_info.split('\n'): - parts = tag.split('=') - if parts[0] == 'UUID': - return parts[1] - return None - except subprocess.CalledProcessError as err: - log('get_block_uuid failed with error: {}'.format(err.output), - level=ERROR) - return None - - -def check_max_sectors(save_settings_dict, - block_dev, - uuid): - """ - Tune the max_hw_sectors if needed. - make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb or at - least 1MB for spinning disks - If the box has a RAID card with cache this could go much bigger. - :param save_settings_dict: The dict used to persist settings - :param block_dev: A block device name: Example: /dev/sda - :param uuid: The uuid of the block device - """ - dev_name = None - path_parts = os.path.split(block_dev) - if len(path_parts) == 2: - dev_name = path_parts[1] - else: - log('Unable to determine the block device name from path: {}'.format( - block_dev)) - # Play it safe and bail - return - max_sectors_kb = get_max_sectors_kb(dev_name=dev_name) - max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name) - - if max_sectors_kb < max_hw_sectors_kb: - # OK we have a situation where the hardware supports more than Linux is - # currently requesting - config_max_sectors_kb = hookenv.config('max-sectors-kb') - if config_max_sectors_kb < max_hw_sectors_kb: - # Set the max_sectors_kb to the config.yaml value if it is less - # than the max_hw_sectors_kb - log('Setting max_sectors_kb for device {} to {}'.format( - dev_name, config_max_sectors_kb)) - save_settings_dict[ - "drive_settings"][uuid][ - "read_ahead_sect"] = config_max_sectors_kb - set_max_sectors_kb(dev_name=dev_name, - max_sectors_size=config_max_sectors_kb) - else: - # Set to the max_hw_sectors_kb - log('Setting max_sectors_kb for device {} to {}'.format( - dev_name, max_hw_sectors_kb)) - save_settings_dict[ - "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb - set_max_sectors_kb(dev_name=dev_name, - max_sectors_size=max_hw_sectors_kb) - else: - log('max_sectors_kb match max_hw_sectors_kb. No change needed for ' - 'device: {}'.format(block_dev)) - - -def tune_dev(block_dev): - """ - Try to make some intelligent decisions with HDD tuning. Future work will - include optimizing SSDs. - This function will change the read ahead sectors and the max write - sectors for each block device. - :param block_dev: A block device name: Example: /dev/sda - """ - uuid = get_block_uuid(block_dev) - if uuid is None: - log('block device {} uuid is None. Unable to save to ' - 'hdparm.conf'.format(block_dev), level=DEBUG) - return - save_settings_dict = {} - log('Tuning device {}'.format(block_dev)) - status_set('maintenance', 'Tuning device {}'.format(block_dev)) - set_hdd_read_ahead(block_dev) - save_settings_dict["drive_settings"] = {} - save_settings_dict["drive_settings"][uuid] = {} - save_settings_dict["drive_settings"][uuid]['read_ahead_sect'] = 256 - - check_max_sectors(block_dev=block_dev, - save_settings_dict=save_settings_dict, - uuid=uuid) - - persist_settings(settings_dict=save_settings_dict) - status_set('maintenance', 'Finished tuning device {}'.format(block_dev)) - - -def ceph_user(): - if get_version() > 1: - return 'ceph' - else: - return "root" - - -class CrushLocation(object): - def __init__(self, - name, - identifier, - host, - rack, - row, - datacenter, - chassis, - root): - self.name = name - self.identifier = identifier - self.host = host - self.rack = rack - self.row = row - self.datacenter = datacenter - self.chassis = chassis - self.root = root - - def __str__(self): - return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ - "chassis :{} root: {}".format(self.name, self.identifier, - self.host, self.rack, self.row, - self.datacenter, self.chassis, - self.root) - - def __eq__(self, other): - return not self.name < other.name and not other.name < self.name - - def __ne__(self, other): - return self.name < other.name or other.name < self.name - - def __gt__(self, other): - return self.name > other.name - - def __ge__(self, other): - return not self.name < other.name - - def __le__(self, other): - return self.name < other.name - - -def get_osd_weight(osd_id): - """ - Returns the weight of the specified OSD - :return: Float :raise: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails - """ - try: - tree = check_output( - ['ceph', 'osd', 'tree', '--format=json']) - try: - json_tree = json.loads(tree) - # Make sure children are present in the json - if not json_tree['nodes']: - return None - for device in json_tree['nodes']: - if device['type'] == 'osd' and device['name'] == osd_id: - return device['crush_weight'] - except ValueError as v: - log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v.message)) - raise - except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( - e.message)) - raise - - -def get_osd_tree(service): - """ - Returns the current osd map in JSON. - :return: List. :raise: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails - """ - try: - tree = check_output( - ['ceph', '--id', service, - 'osd', 'tree', '--format=json']) - try: - json_tree = json.loads(tree) - crush_list = [] - # Make sure children are present in the json - if not json_tree['nodes']: - return None - child_ids = json_tree['nodes'][0]['children'] - for child in json_tree['nodes']: - if child['id'] in child_ids: - crush_list.append( - CrushLocation( - name=child.get('name'), - identifier=child['id'], - host=child.get('host'), - rack=child.get('rack'), - row=child.get('row'), - datacenter=child.get('datacenter'), - chassis=child.get('chassis'), - root=child.get('root') - ) - ) - return crush_list - except ValueError as v: - log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v.message)) - raise - except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( - e.message)) - raise - - -def _get_child_dirs(path): - """Returns a list of directory names in the specified path. - - :param path: a full path listing of the parent directory to return child - directory names - :return: list. A list of child directories under the parent directory - :raises: ValueError if the specified path does not exist or is not a - directory, - OSError if an error occurs reading the directory listing - """ - if not os.path.exists(path): - raise ValueError('Specfied path "%s" does not exist' % path) - if not os.path.isdir(path): - raise ValueError('Specified path "%s" is not a directory' % path) - - files_in_dir = [os.path.join(path, f) for f in os.listdir(path)] - return list(filter(os.path.isdir, files_in_dir)) - - -def _get_osd_num_from_dirname(dirname): - """Parses the dirname and returns the OSD id. - - Parses a string in the form of 'ceph-{osd#}' and returns the osd number - from the directory name. - - :param dirname: the directory name to return the OSD number from - :return int: the osd number the directory name corresponds to - :raises ValueError: if the osd number cannot be parsed from the provided - directory name. - """ - match = re.search('ceph-(?P\d+)', dirname) - if not match: - raise ValueError("dirname not in correct format: %s" % dirname) - - return match.group('osd_id') - - -def get_local_osd_ids(): - """ - This will list the /var/lib/ceph/osd/* directories and try - to split the ID off of the directory name and return it in - a list - - :return: list. A list of osd identifiers :raise: OSError if - something goes wrong with listing the directory. - """ - osd_ids = [] - osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') - if os.path.exists(osd_path): - try: - dirs = os.listdir(osd_path) - for osd_dir in dirs: - osd_id = osd_dir.split('-')[1] - if _is_int(osd_id): - osd_ids.append(osd_id) - except OSError: - raise - return osd_ids - - -def get_local_mon_ids(): - """ - This will list the /var/lib/ceph/mon/* directories and try - to split the ID off of the directory name and return it in - a list - - :return: list. A list of monitor identifiers :raise: OSError if - something goes wrong with listing the directory. - """ - mon_ids = [] - mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') - if os.path.exists(mon_path): - try: - dirs = os.listdir(mon_path) - for mon_dir in dirs: - # Basically this takes everything after ceph- as the monitor ID - match = re.search('ceph-(?P.*)', mon_dir) - if match: - mon_ids.append(match.group('mon_id')) - except OSError: - raise - return mon_ids - - -def _is_int(v): - """Return True if the object v can be turned into an integer.""" - try: - int(v) - return True - except ValueError: - return False - - -def get_version(): - """Derive Ceph release from an installed package.""" - import apt_pkg as apt - - cache = apt_cache() - package = "ceph" - try: - pkg = cache[package] - except: - # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation ' \ - 'candidate: %s' % package - error_out(e) - - if not pkg.current_ver: - # package is known, but no version is currently installed. - e = 'Could not determine version of uninstalled package: %s' % package - error_out(e) - - vers = apt.upstream_version(pkg.current_ver.ver_str) - - # x.y match only for 20XX.X - # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', vers) - - if match: - vers = match.group(0) - return float(vers) - - -def error_out(msg): - log("FATAL ERROR: %s" % msg, - level=ERROR) - sys.exit(1) - - -def is_quorum(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(check_output(cmd)) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] in QUORUM: - return True - else: - return False - else: - return False - - -def is_leader(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(check_output(cmd)) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] == LEADER: - return True - else: - return False - else: - return False - - -def wait_for_quorum(): - while not is_quorum(): - log("Waiting for quorum to be reached") - time.sleep(3) - - -def add_bootstrap_hint(peer): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "add_bootstrap_peer_hint", - peer - ] - if os.path.exists(asok): - # Ignore any errors for this call - subprocess.call(cmd) - - -DISK_FORMATS = [ - 'xfs', - 'ext4', - 'btrfs' -] - -CEPH_PARTITIONS = [ - '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # ceph encrypted disk in creation - '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # ceph encrypted journal - '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data - '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data - '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal - '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # ceph disk in creation -] - - -def umount(mount_point): - """ - This function unmounts a mounted directory forcibly. This will - be used for unmounting broken hard drive mounts which may hang. - If umount returns EBUSY this will lazy unmount. - :param mount_point: str. A String representing the filesystem mount point - :return: int. Returns 0 on success. errno otherwise. - """ - libc_path = ctypes.util.find_library("c") - libc = ctypes.CDLL(libc_path, use_errno=True) - - # First try to umount with MNT_FORCE - ret = libc.umount(mount_point, 1) - if ret < 0: - err = ctypes.get_errno() - if err == errno.EBUSY: - # Detach from try. IE lazy umount - ret = libc.umount(mount_point, 2) - if ret < 0: - err = ctypes.get_errno() - return err - return 0 - else: - return err - return 0 - - -def replace_osd(dead_osd_number, - dead_osd_device, - new_osd_device, - osd_format, - osd_journal, - reformat_osd=False, - ignore_errors=False): - """ - This function will automate the replacement of a failed osd disk as much - as possible. It will revoke the keys for the old osd, remove it from the - crush map and then add a new osd into the cluster. - :param dead_osd_number: The osd number found in ceph osd tree. Example: 99 - :param dead_osd_device: The physical device. Example: /dev/sda - :param osd_format: - :param osd_journal: - :param reformat_osd: - :param ignore_errors: - """ - host_mounts = mounts() - mount_point = None - for mount in host_mounts: - if mount[1] == dead_osd_device: - mount_point = mount[0] - # need to convert dev to osd number - # also need to get the mounted drive so we can tell the admin to - # replace it - try: - # Drop this osd out of the cluster. This will begin a - # rebalance operation - status_set('maintenance', 'Removing osd {}'.format(dead_osd_number)) - check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'out', - 'osd.{}'.format(dead_osd_number)]) - - # Kill the osd process if it's not already dead - if systemd(): - service_stop('ceph-osd@{}'.format(dead_osd_number)) - else: - check_output(['stop', 'ceph-osd', 'id={}'.format( - dead_osd_number)]) - # umount if still mounted - ret = umount(mount_point) - if ret < 0: - raise RuntimeError('umount {} failed with error: {}'.format( - mount_point, os.strerror(ret))) - # Clean up the old mount point - shutil.rmtree(mount_point) - check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'crush', 'remove', - 'osd.{}'.format(dead_osd_number)]) - # Revoke the OSDs access keys - check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'auth', 'del', - 'osd.{}'.format(dead_osd_number)]) - check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'rm', - 'osd.{}'.format(dead_osd_number)]) - status_set('maintenance', 'Setting up replacement osd {}'.format( - new_osd_device)) - osdize(new_osd_device, - osd_format, - osd_journal, - reformat_osd, - ignore_errors) - except subprocess.CalledProcessError as e: - log('replace_osd failed with error: ' + e.output) - - -def get_partition_list(dev): - """ - Lists the partitions of a block device - :param dev: Path to a block device. ex: /dev/sda - :return: :raise: Returns a list of Partition objects. - Raises CalledProcessException if lsblk fails - """ - partitions_list = [] - try: - partitions = get_partitions(dev) - # For each line of output - for partition in partitions: - parts = partition.split() - partitions_list.append( - Partition(number=parts[0], - start=parts[1], - end=parts[2], - sectors=parts[3], - size=parts[4], - name=parts[5], - uuid=parts[6]) - ) - return partitions_list - except subprocess.CalledProcessError: - raise - - -def is_osd_disk(dev): - partitions = get_partition_list(dev) - for partition in partitions: - try: - info = check_output(['sgdisk', '-i', partition.number, dev]) - info = info.split("\n") # IGNORE:E1103 - for line in info: - for ptype in CEPH_PARTITIONS: - sig = 'Partition GUID code: {}'.format(ptype) - if line.startswith(sig): - return True - except subprocess.CalledProcessError as e: - log("sgdisk inspection of partition {} on {} failed with " - "error: {}. Skipping".format(partition.minor, dev, e.message), - level=ERROR) - return False - - -def start_osds(devices): - # Scan for ceph block devices - rescan_osd_devices() - if cmp_pkgrevno('ceph', "0.56.6") >= 0: - # Use ceph-disk activate for directory based OSD's - for dev_or_path in devices: - if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) - - -def rescan_osd_devices(): - cmd = [ - 'udevadm', 'trigger', - '--subsystem-match=block', '--action=add' - ] - - subprocess.call(cmd) - - -_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" -_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" - - -def is_bootstrapped(): - return os.path.exists(_bootstrap_keyring) - - -def wait_for_bootstrap(): - while not is_bootstrapped(): - time.sleep(3) - - -def import_osd_bootstrap_key(key): - if not os.path.exists(_bootstrap_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _bootstrap_keyring, - '--create-keyring', - '--name=client.bootstrap-osd', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - -def import_osd_upgrade_key(key): - if not os.path.exists(_upgrade_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _upgrade_keyring, - '--create-keyring', - '--name=client.osd-upgrade', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - -def generate_monitor_secret(): - cmd = [ - 'ceph-authtool', - '/dev/stdout', - '--name=mon.', - '--gen-key' - ] - res = check_output(cmd) - - return "{}==".format(res.split('=')[1].strip()) - -# OSD caps taken from ceph-create-keys -_osd_bootstrap_caps = { - 'mon': [ - 'allow command osd create ...', - 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', - 'allow command mon getmap' - ] -} - -_osd_bootstrap_caps_profile = { - 'mon': [ - 'allow profile bootstrap-osd' - ] -} - - -def parse_key(raw_key): - # get-or-create appears to have different output depending - # on whether its 'get' or 'create' - # 'create' just returns the key, 'get' is more verbose and - # needs parsing - key = None - if len(raw_key.splitlines()) == 1: - key = raw_key - else: - for element in raw_key.splitlines(): - if 'key' in element: - return element.split(' = ')[1].strip() # IGNORE:E1103 - return key - - -def get_osd_bootstrap_key(): - try: - # Attempt to get/create a key using the OSD bootstrap profile first - key = get_named_key('bootstrap-osd', - _osd_bootstrap_caps_profile) - except: - # If that fails try with the older style permissions - key = get_named_key('bootstrap-osd', - _osd_bootstrap_caps) - return key - - -_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" - - -def import_radosgw_key(key): - if not os.path.exists(_radosgw_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _radosgw_keyring, - '--create-keyring', - '--name=client.radosgw.gateway', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - -# OSD caps taken from ceph-create-keys -_radosgw_caps = { - 'mon': ['allow rw'], - 'osd': ['allow rwx'] -} -_upgrade_caps = { - 'mon': ['allow rwx'] -} - - -def get_radosgw_key(pool_list=None): - return get_named_key(name='radosgw.gateway', - caps=_radosgw_caps, - pool_list=pool_list) - - -def get_mds_key(name): - return create_named_keyring(entity='mds', - name=name, - caps=mds_caps) - - -_mds_bootstrap_caps_profile = { - 'mon': [ - 'allow profile bootstrap-mds' - ] -} - - -def get_mds_bootstrap_key(): - return get_named_key('bootstrap-mds', - _mds_bootstrap_caps_profile) - - -_default_caps = collections.OrderedDict([ - ('mon', ['allow r']), - ('osd', ['allow rwx']), -]) - -admin_caps = collections.OrderedDict([ - ('mds', ['allow *']), - ('mon', ['allow *']), - ('osd', ['allow *']) -]) - -mds_caps = collections.OrderedDict([ - ('osd', ['allow *']), - ('mds', ['allow']), - ('mon', ['allow rwx']), -]) - -osd_upgrade_caps = collections.OrderedDict([ - ('mon', ['allow command "config-key"', - 'allow command "osd tree"', - 'allow command "config-key list"', - 'allow command "config-key put"', - 'allow command "config-key get"', - 'allow command "config-key exists"', - 'allow command "osd out"', - 'allow command "osd in"', - 'allow command "osd rm"', - 'allow command "auth del"', - ]) -]) - - -def create_named_keyring(entity, name, caps=None): - caps = caps or _default_caps - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', 'get-or-create', '{entity}.{name}'.format(entity=entity, - name=name), - ] - for subsystem, subcaps in caps.items(): - cmd.extend([subsystem, '; '.join(subcaps)]) - log("Calling check_output: {}".format(cmd), level=DEBUG) - return parse_key(check_output(cmd).strip()) # IGNORE:E1103 - - -def get_upgrade_key(): - return get_named_key('upgrade-osd', _upgrade_caps) - - -def get_named_key(name, caps=None, pool_list=None): - """ - Retrieve a specific named cephx key - :param name: String Name of key to get. - :param pool_list: The list of pools to give access to - :param caps: dict of cephx capabilities - :return: Returns a cephx key - """ - try: - # Does the key already exist? - output = check_output( - [ - 'sudo', - '-u', ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', - 'get', - 'client.{}'.format(name), - ]).strip() - return parse_key(output) - except subprocess.CalledProcessError: - # Couldn't get the key, time to create it! - log("Creating new key for {}".format(name), level=DEBUG) - caps = caps or _default_caps - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', 'get-or-create', 'client.{}'.format(name), - ] - # Add capabilities - for subsystem, subcaps in caps.items(): - if subsystem == 'osd': - if pool_list: - # This will output a string similar to: - # "pool=rgw pool=rbd pool=something" - pools = " ".join(['pool={0}'.format(i) for i in pool_list]) - subcaps[0] = subcaps[0] + " " + pools - cmd.extend([subsystem, '; '.join(subcaps)]) - log("Calling check_output: {}".format(cmd), level=DEBUG) - return parse_key(check_output(cmd).strip()) # IGNORE:E1103 - - -def upgrade_key_caps(key, caps): - """ Upgrade key to have capabilities caps """ - if not is_leader(): - # Not the MON leader OR not clustered - return - cmd = [ - "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key - ] - for subsystem, subcaps in caps.items(): - cmd.extend([subsystem, '; '.join(subcaps)]) - subprocess.check_call(cmd) - - -@cached -def systemd(): - return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' - - -def bootstrap_monitor_cluster(secret): - hostname = socket.gethostname() - path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - done = '{}/done'.format(path) - if systemd(): - init_marker = '{}/systemd'.format(path) - else: - init_marker = '{}/upstart'.format(path) - - keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) - - if os.path.exists(done): - log('bootstrap_monitor_cluster: mon already initialized.') - else: - # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', owner=ceph_user(), - group=ceph_user(), perms=0o755) - mkdir(path, owner=ceph_user(), group=ceph_user()) - # end changes for Ceph >= 0.61.3 - try: - subprocess.check_call(['ceph-authtool', keyring, - '--create-keyring', '--name=mon.', - '--add-key={}'.format(secret), - '--cap', 'mon', 'allow *']) - - subprocess.check_call(['ceph-mon', '--mkfs', - '-i', hostname, - '--keyring', keyring]) - chownr(path, ceph_user(), ceph_user()) - with open(done, 'w'): - pass - with open(init_marker, 'w'): - pass - - if systemd(): - subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) - service_restart('ceph-mon') - else: - service_restart('ceph-mon-all') - - if cmp_pkgrevno('ceph', '12.0.0') >= 0: - # NOTE(jamespage): Later ceph releases require explicit - # call to ceph-create-keys to setup the - # admin keys for the cluster; this command - # will wait for quorum in the cluster before - # returning. - cmd = ['ceph-create-keys', '--id', hostname] - subprocess.check_call(cmd) - except: - raise - finally: - os.unlink(keyring) - - -def update_monfs(): - hostname = socket.gethostname() - monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - if systemd(): - init_marker = '{}/systemd'.format(monfs) - else: - init_marker = '{}/upstart'.format(monfs) - if os.path.exists(monfs) and not os.path.exists(init_marker): - # Mark mon as managed by upstart so that - # it gets start correctly on reboots - with open(init_marker, 'w'): - pass - - -def maybe_zap_journal(journal_dev): - if is_osd_disk(journal_dev): - log('Looks like {} is already an OSD data' - ' or journal, skipping.'.format(journal_dev)) - return - zap_disk(journal_dev) - log("Zapped journal device {}".format(journal_dev)) - - -def get_partitions(dev): - cmd = ['partx', '--raw', '--noheadings', dev] - try: - out = check_output(cmd).splitlines() - log("get partitions: {}".format(out), level=DEBUG) - return out - except subprocess.CalledProcessError as e: - log("Can't get info for {0}: {1}".format(dev, e.output)) - return [] - - -def find_least_used_journal(journal_devices): - usages = map(lambda a: (len(get_partitions(a)), a), journal_devices) - least = min(usages, key=lambda t: t[0]) - return least[1] - - -def osdize(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False, bluestore=False): - if dev.startswith('/dev'): - osdize_dev(dev, osd_format, osd_journal, - reformat_osd, ignore_errors, encrypt, - bluestore) - else: - osdize_dir(dev, encrypt) - - -def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False, bluestore=False): - if not os.path.exists(dev): - log('Path {} does not exist - bailing'.format(dev)) - return - - if not is_block_device(dev): - log('Path {} is not a block device - bailing'.format(dev)) - return - - if is_osd_disk(dev) and not reformat_osd: - log('Looks like {} is already an' - ' OSD data or journal, skipping.'.format(dev)) - return - - if is_device_mounted(dev): - log('Looks like {} is in use, skipping.'.format(dev)) - return - - status_set('maintenance', 'Initializing device {}'.format(dev)) - cmd = ['ceph-disk', 'prepare'] - # Later versions of ceph support more options - if cmp_pkgrevno('ceph', '0.60') >= 0: - if encrypt: - cmd.append('--dmcrypt') - if cmp_pkgrevno('ceph', '0.48.3') >= 0: - if osd_format: - cmd.append('--fs-type') - cmd.append(osd_format) - - if reformat_osd: - cmd.append('--zap-disk') - - # NOTE(jamespage): enable experimental bluestore support - if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: - cmd.append('--bluestore') - - cmd.append(dev) - - if osd_journal: - least_used = find_least_used_journal(osd_journal) - cmd.append(least_used) - else: - # Just provide the device - no other options - # for older versions of ceph - cmd.append(dev) - if reformat_osd: - zap_disk(dev) - - try: - log("osdize cmd: {}".format(cmd)) - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - if ignore_errors: - log('Unable to initialize device: {}'.format(dev), WARNING) - else: - log('Unable to initialize device: {}'.format(dev), ERROR) - raise - - -def osdize_dir(path, encrypt=False): - if os.path.exists(os.path.join(path, 'upstart')): - log('Path {} is already configured as an OSD - bailing'.format(path)) - return - - if cmp_pkgrevno('ceph', "0.56.6") < 0: - log('Unable to use directories for OSDs with ceph < 0.56.6', - level=ERROR) - return - - mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) - chownr('/var/lib/ceph', ceph_user(), ceph_user()) - cmd = [ - 'sudo', '-u', ceph_user(), - 'ceph-disk', - 'prepare', - '--data-dir', - path - ] - if cmp_pkgrevno('ceph', '0.60') >= 0: - if encrypt: - cmd.append('--dmcrypt') - log("osdize dir cmd: {}".format(cmd)) - subprocess.check_call(cmd) - - -def filesystem_mounted(fs): - return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 - - -def get_running_osds(): - """Returns a list of the pids of the current running OSD daemons""" - cmd = ['pgrep', 'ceph-osd'] - try: - result = check_output(cmd) - return result.split() - except subprocess.CalledProcessError: - return [] - - -def get_cephfs(service): - """ - List the Ceph Filesystems that exist - :rtype : list. Returns a list of the ceph filesystems - :param service: The service name to run the ceph command under - """ - if get_version() < 0.86: - # This command wasn't introduced until 0.86 ceph - return [] - try: - output = check_output(["ceph", - '--id', service, - "fs", "ls"]) - if not output: - return [] - """ - Example subprocess output: - 'name: ip-172-31-23-165, metadata pool: ip-172-31-23-165_metadata, - data pools: [ip-172-31-23-165_data ]\n' - output: filesystems: ['ip-172-31-23-165'] - """ - filesystems = [] - for line in output.splitlines(): - parts = line.split(',') - for part in parts: - if "name" in part: - filesystems.append(part.split(' ')[1]) - except subprocess.CalledProcessError: - return [] - - -def wait_for_all_monitors_to_upgrade(new_version, upgrade_key): - """ - Fairly self explanatory name. This function will wait - for all monitors in the cluster to upgrade or it will - return after a timeout period has expired. - :param new_version: str of the version to watch - :param upgrade_key: the cephx key name to use - """ - done = False - start_time = time.time() - monitor_list = [] - - mon_map = get_mon_map('admin') - if mon_map['monmap']['mons']: - for mon in mon_map['monmap']['mons']: - monitor_list.append(mon['name']) - while not done: - try: - done = all(monitor_key_exists(upgrade_key, "{}_{}_{}_done".format( - "mon", mon, new_version - )) for mon in monitor_list) - current_time = time.time() - if current_time > (start_time + 10 * 60): - raise Exception - else: - # Wait 30 seconds and test again if all monitors are upgraded - time.sleep(30) - except subprocess.CalledProcessError: - raise - - -# Edge cases: -# 1. Previous node dies on upgrade, can we retry? -def roll_monitor_cluster(new_version, upgrade_key): - """ - This is tricky to get right so here's what we're going to do. - :param new_version: str of the version to upgrade to - :param upgrade_key: the cephx key name to use when upgrading - There's 2 possible cases: Either I'm first in line or not. - If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous monitor is upgraded yet. - """ - log('roll_monitor_cluster called with {}'.format(new_version)) - my_name = socket.gethostname() - monitor_list = [] - mon_map = get_mon_map('admin') - if mon_map['monmap']['mons']: - for mon in mon_map['monmap']['mons']: - monitor_list.append(mon['name']) - else: - status_set('blocked', 'Unable to get monitor cluster information') - sys.exit(1) - log('monitor_list: {}'.format(monitor_list)) - - # A sorted list of osd unit names - mon_sorted_list = sorted(monitor_list) - - try: - position = mon_sorted_list.index(my_name) - log("upgrade position: {}".format(position)) - if position == 0: - # I'm first! Roll - # First set a key to inform others I'm about to roll - lock_and_roll(upgrade_key=upgrade_key, - service='mon', - my_name=my_name, - version=new_version) - else: - # Check if the previous node has finished - status_set('waiting', - 'Waiting on {} to finish upgrading'.format( - mon_sorted_list[position - 1])) - wait_on_previous_node(upgrade_key=upgrade_key, - service='mon', - previous_node=mon_sorted_list[position - 1], - version=new_version) - lock_and_roll(upgrade_key=upgrade_key, - service='mon', - my_name=my_name, - version=new_version) - except ValueError: - log("Failed to find {} in list {}.".format( - my_name, mon_sorted_list)) - status_set('blocked', 'failed to upgrade monitor') - - -def upgrade_monitor(new_version): - current_version = get_version() - status_set("maintenance", "Upgrading monitor") - log("Current ceph version is {}".format(current_version)) - log("Upgrading to: {}".format(new_version)) - - try: - add_source(config('source'), config('key')) - apt_update(fatal=True) - except subprocess.CalledProcessError as err: - log("Adding the ceph source failed with message: {}".format( - err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - try: - if systemd(): - for mon_id in get_local_mon_ids(): - service_stop('ceph-mon@{}'.format(mon_id)) - else: - service_stop('ceph-mon-all') - apt_install(packages=determine_packages(), fatal=True) - - # Ensure the files and directories under /var/lib/ceph is chowned - # properly as part of the move to the Jewel release, which moved the - # ceph daemons to running as ceph:ceph instead of root:root. - if new_version == 'jewel': - # Ensure the ownership of Ceph's directories is correct - owner = ceph_user() - chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), - owner=owner, - group=owner, - follow_links=True) - - if systemd(): - for mon_id in get_local_mon_ids(): - service_start('ceph-mon@{}'.format(mon_id)) - else: - service_start('ceph-mon-all') - except subprocess.CalledProcessError as err: - log("Stopping ceph and upgrading packages failed " - "with message: {}".format(err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - -def lock_and_roll(upgrade_key, service, my_name, version): - start_timestamp = time.time() - - log('monitor_key_set {}_{}_{}_start {}'.format( - service, - my_name, - version, - start_timestamp)) - monitor_key_set(upgrade_key, "{}_{}_{}_start".format( - service, my_name, version), start_timestamp) - log("Rolling") - - # This should be quick - if service == 'osd': - upgrade_osd(version) - elif service == 'mon': - upgrade_monitor(version) - else: - log("Unknown service {}. Unable to upgrade".format(service), - level=ERROR) - log("Done") - - stop_timestamp = time.time() - # Set a key to inform others I am finished - log('monitor_key_set {}_{}_{}_done {}'.format(service, - my_name, - version, - stop_timestamp)) - status_set('maintenance', 'Finishing upgrade') - monitor_key_set(upgrade_key, "{}_{}_{}_done".format(service, - my_name, - version), - stop_timestamp) - - -def wait_on_previous_node(upgrade_key, service, previous_node, version): - log("Previous node is: {}".format(previous_node)) - - previous_node_finished = monitor_key_exists( - upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version)) - - while previous_node_finished is False: - log("{} is not finished. Waiting".format(previous_node)) - # Has this node been trying to upgrade for longer than - # 10 minutes? - # If so then move on and consider that node dead. - - # NOTE: This assumes the clusters clocks are somewhat accurate - # If the hosts clock is really far off it may cause it to skip - # the previous node even though it shouldn't. - current_timestamp = time.time() - previous_node_start_time = monitor_key_get( - upgrade_key, - "{}_{}_{}_start".format(service, previous_node, version)) - if (current_timestamp - (10 * 60)) > previous_node_start_time: - # Previous node is probably dead. Lets move on - if previous_node_start_time is not None: - log( - "Waited 10 mins on node {}. current time: {} > " - "previous node start time: {} Moving on".format( - previous_node, - (current_timestamp - (10 * 60)), - previous_node_start_time)) - return - else: - # I have to wait. Sleep a random amount of time and then - # check if I can lock,upgrade and roll. - wait_time = random.randrange(5, 30) - log('waiting for {} seconds'.format(wait_time)) - time.sleep(wait_time) - previous_node_finished = monitor_key_exists( - upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version)) - - -def get_upgrade_position(osd_sorted_list, match_name): - for index, item in enumerate(osd_sorted_list): - if item.name == match_name: - return index - return None - - -# Edge cases: -# 1. Previous node dies on upgrade, can we retry? -# 2. This assumes that the osd failure domain is not set to osd. -# It rolls an entire server at a time. -def roll_osd_cluster(new_version, upgrade_key): - """ - This is tricky to get right so here's what we're going to do. - :param new_version: str of the version to upgrade to - :param upgrade_key: the cephx key name to use when upgrading - There's 2 possible cases: Either I'm first in line or not. - If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous osd is upgraded yet. - - TODO: If you're not in the same failure domain it's safe to upgrade - 1. Examine all pools and adopt the most strict failure domain policy - Example: Pool 1: Failure domain = rack - Pool 2: Failure domain = host - Pool 3: Failure domain = row - - outcome: Failure domain = host - """ - log('roll_osd_cluster called with {}'.format(new_version)) - my_name = socket.gethostname() - osd_tree = get_osd_tree(service=upgrade_key) - # A sorted list of osd unit names - osd_sorted_list = sorted(osd_tree) - log("osd_sorted_list: {}".format(osd_sorted_list)) - - try: - position = get_upgrade_position(osd_sorted_list, my_name) - log("upgrade position: {}".format(position)) - if position == 0: - # I'm first! Roll - # First set a key to inform others I'm about to roll - lock_and_roll(upgrade_key=upgrade_key, - service='osd', - my_name=my_name, - version=new_version) - else: - # Check if the previous node has finished - status_set('blocked', - 'Waiting on {} to finish upgrading'.format( - osd_sorted_list[position - 1].name)) - wait_on_previous_node( - upgrade_key=upgrade_key, - service='osd', - previous_node=osd_sorted_list[position - 1].name, - version=new_version) - lock_and_roll(upgrade_key=upgrade_key, - service='osd', - my_name=my_name, - version=new_version) - except ValueError: - log("Failed to find name {} in list {}".format( - my_name, osd_sorted_list)) - status_set('blocked', 'failed to upgrade osd') - - -def upgrade_osd(new_version): - current_version = get_version() - status_set("maintenance", "Upgrading osd") - log("Current ceph version is {}".format(current_version)) - log("Upgrading to: {}".format(new_version)) - - try: - add_source(config('source'), config('key')) - apt_update(fatal=True) - except subprocess.CalledProcessError as err: - log("Adding the ceph sources failed with message: {}".format( - err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - try: - # Upgrade the packages before restarting the daemons. - status_set('maintenance', 'Upgrading packages to %s' % new_version) - apt_install(packages=determine_packages(), fatal=True) - - # If the upgrade does not need an ownership update of any of the - # directories in the osd service directory, then simply restart - # all of the OSDs at the same time as this will be the fastest - # way to update the code on the node. - if not dirs_need_ownership_update('osd'): - log('Restarting all OSDs to load new binaries', DEBUG) - service_restart('ceph-osd-all') - return - - # Need to change the ownership of all directories which are not OSD - # directories as well. - # TODO - this should probably be moved to the general upgrade function - # and done before mon/osd. - update_owner(CEPH_BASE_DIR, recurse_dirs=False) - non_osd_dirs = filter(lambda x: not x == 'osd', - os.listdir(CEPH_BASE_DIR)) - non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x), - non_osd_dirs) - for path in non_osd_dirs: - update_owner(path) - - # Fast service restart wasn't an option because each of the OSD - # directories need the ownership updated for all the files on - # the OSD. Walk through the OSDs one-by-one upgrading the OSD. - for osd_dir in _get_child_dirs(OSD_BASE_DIR): - try: - osd_num = _get_osd_num_from_dirname(osd_dir) - _upgrade_single_osd(osd_num, osd_dir) - except ValueError as ex: - # Directory could not be parsed - junk directory? - log('Could not parse osd directory %s: %s' % (osd_dir, ex), - WARNING) - continue - - except (subprocess.CalledProcessError, IOError) as err: - log("Stopping ceph and upgrading packages failed " - "with message: {}".format(err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - -def _upgrade_single_osd(osd_num, osd_dir): - """Upgrades the single OSD directory. - - :param osd_num: the num of the OSD - :param osd_dir: the directory of the OSD to upgrade - :raises CalledProcessError: if an error occurs in a command issued as part - of the upgrade process - :raises IOError: if an error occurs reading/writing to a file as part - of the upgrade process - """ - stop_osd(osd_num) - disable_osd(osd_num) - update_owner(osd_dir) - enable_osd(osd_num) - start_osd(osd_num) - - -def stop_osd(osd_num): - """Stops the specified OSD number. - - :param osd_num: the osd number to stop - """ - if systemd(): - service_stop('ceph-osd@{}'.format(osd_num)) - else: - service_stop('ceph-osd', id=osd_num) - - -def start_osd(osd_num): - """Starts the specified OSD number. - - :param osd_num: the osd number to start. - """ - if systemd(): - service_start('ceph-osd@{}'.format(osd_num)) - else: - service_start('ceph-osd', id=osd_num) - - -def disable_osd(osd_num): - """Disables the specified OSD number. - - Ensures that the specified osd will not be automatically started at the - next reboot of the system. Due to differences between init systems, - this method cannot make any guarantees that the specified osd cannot be - started manually. - - :param osd_num: the osd id which should be disabled. - :raises CalledProcessError: if an error occurs invoking the systemd cmd - to disable the OSD - :raises IOError, OSError: if the attempt to read/remove the ready file in - an upstart enabled system fails - """ - if systemd(): - # When running under systemd, the individual ceph-osd daemons run as - # templated units and can be directly addressed by referring to the - # templated service name ceph-osd@. Additionally, systemd - # allows one to disable a specific templated unit by running the - # 'systemctl disable ceph-osd@' command. When disabled, the - # OSD should remain disabled until re-enabled via systemd. - # Note: disabling an already disabled service in systemd returns 0, so - # no need to check whether it is enabled or not. - cmd = ['systemctl', 'disable', 'ceph-osd@{}'.format(osd_num)] - subprocess.check_call(cmd) - else: - # Neither upstart nor the ceph-osd upstart script provides for - # disabling the starting of an OSD automatically. The specific OSD - # cannot be prevented from running manually, however it can be - # prevented from running automatically on reboot by removing the - # 'ready' file in the OSD's root directory. This is due to the - # ceph-osd-all upstart script checking for the presence of this file - # before starting the OSD. - ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), - 'ready') - if os.path.exists(ready_file): - os.unlink(ready_file) - - -def enable_osd(osd_num): - """Enables the specified OSD number. - - Ensures that the specified osd_num will be enabled and ready to start - automatically in the event of a reboot. - - :param osd_num: the osd id which should be enabled. - :raises CalledProcessError: if the call to the systemd command issued - fails when enabling the service - :raises IOError: if the attempt to write the ready file in an usptart - enabled system fails - """ - if systemd(): - cmd = ['systemctl', 'enable', 'ceph-osd@{}'.format(osd_num)] - subprocess.check_call(cmd) - else: - # When running on upstart, the OSDs are started via the ceph-osd-all - # upstart script which will only start the osd if it has a 'ready' - # file. Make sure that file exists. - ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), - 'ready') - with open(ready_file, 'w') as f: - f.write('ready') - - # Make sure the correct user owns the file. It shouldn't be necessary - # as the upstart script should run with root privileges, but its better - # to have all the files matching ownership. - update_owner(ready_file) - - -def update_owner(path, recurse_dirs=True): - """Changes the ownership of the specified path. - - Changes the ownership of the specified path to the new ceph daemon user - using the system's native chown functionality. This may take awhile, - so this method will issue a set_status for any changes of ownership which - recurses into directory structures. - - :param path: the path to recursively change ownership for - :param recurse_dirs: boolean indicating whether to recursively change the - ownership of all the files in a path's subtree or to - simply change the ownership of the path. - :raises CalledProcessError: if an error occurs issuing the chown system - command - """ - user = ceph_user() - user_group = '{ceph_user}:{ceph_user}'.format(ceph_user=user) - cmd = ['chown', user_group, path] - if os.path.isdir(path) and recurse_dirs: - status_set('maintenance', ('Updating ownership of %s to %s' % - (path, user))) - cmd.insert(1, '-R') - - log('Changing ownership of {path} to {user}'.format( - path=path, user=user_group), DEBUG) - start = datetime.now() - subprocess.check_call(cmd) - elapsed_time = (datetime.now() - start) - - log('Took {secs} seconds to change the ownership of path: {path}'.format( - secs=elapsed_time.total_seconds(), path=path), DEBUG) - - -def list_pools(service): - """ - This will list the current pools that Ceph has - - :param service: String service id to run under - :return: list. Returns a list of the ceph pools. Raises CalledProcessError - if the subprocess fails to run. - """ - try: - pool_list = [] - pools = check_output(['rados', '--id', service, 'lspools']) - for pool in pools.splitlines(): - pool_list.append(pool) - return pool_list - except subprocess.CalledProcessError as err: - log("rados lspools failed with error: {}".format(err.output)) - raise - - -def dirs_need_ownership_update(service): - """Determines if directories still need change of ownership. - - Examines the set of directories under the /var/lib/ceph/{service} directory - and determines if they have the correct ownership or not. This is - necessary due to the upgrade from Hammer to Jewel where the daemon user - changes from root: to ceph:. - - :param service: the name of the service folder to check (e.g. osd, mon) - :return: boolean. True if the directories need a change of ownership, - False otherwise. - :raises IOError: if an error occurs reading the file stats from one of - the child directories. - :raises OSError: if the specified path does not exist or some other error - """ - expected_owner = expected_group = ceph_user() - path = os.path.join(CEPH_BASE_DIR, service) - for child in _get_child_dirs(path): - curr_owner, curr_group = owner(child) - - if (curr_owner == expected_owner) and (curr_group == expected_group): - continue - - log('Directory "%s" needs its ownership updated' % child, DEBUG) - return True - - # All child directories had the expected ownership - return False - -# A dict of valid ceph upgrade paths. Mapping is old -> new -UPGRADE_PATHS = { - 'firefly': 'hammer', - 'hammer': 'jewel', -} - -# Map UCA codenames to ceph codenames -UCA_CODENAME_MAP = { - 'icehouse': 'firefly', - 'juno': 'firefly', - 'kilo': 'hammer', - 'liberty': 'hammer', - 'mitaka': 'jewel', -} - - -def pretty_print_upgrade_paths(): - '''Pretty print supported upgrade paths for ceph''' - lines = [] - for key, value in UPGRADE_PATHS.iteritems(): - lines.append("{} -> {}".format(key, value)) - return lines - - -def resolve_ceph_version(source): - ''' - Resolves a version of ceph based on source configuration - based on Ubuntu Cloud Archive pockets. - - @param: source: source configuration option of charm - @returns: ceph release codename or None if not resolvable - ''' - os_release = get_os_codename_install_source(source) - return UCA_CODENAME_MAP.get(os_release) - - -def get_ceph_pg_stat(): - """ - Returns the result of ceph pg stat - :return: dict - """ - try: - tree = check_output(['ceph', 'pg', 'stat', '--format=json']) - try: - json_tree = json.loads(tree) - if not json_tree['num_pg_by_state']: - return None - return json_tree - except ValueError as v: - log("Unable to parse ceph pg stat json: {}. Error: {}".format( - tree, v.message)) - raise - except subprocess.CalledProcessError as e: - log("ceph pg stat command failed with message: {}".format( - e.message)) - raise - - -def get_ceph_health(): - """ - Returns the health of the cluster from a 'ceph status' - :return: dict - Also raises CalledProcessError if our ceph command fails - To get the overall status, use get_ceph_health()['overall_status'] - """ - try: - tree = check_output( - ['ceph', 'status', '--format=json']) - try: - json_tree = json.loads(tree) - # Make sure children are present in the json - if not json_tree['overall_status']: - return None - return json_tree - except ValueError as v: - log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v.message)) - raise - except subprocess.CalledProcessError as e: - log("ceph status command failed with message: {}".format( - e.message)) - raise - - -def reweight_osd(osd_num, new_weight): - """ - Changes the crush weight of an OSD to the value specified. - :param osd_num: the osd id which should be changed - :param new_weight: the new weight for the OSD - :returns: bool. True if output looks right, else false. - :raises CalledProcessError: if an error occurs invoking the systemd cmd - """ - try: - cmd_result = subprocess.check_output( - ['ceph', 'osd', 'crush', 'reweight', "osd.{}".format(osd_num), - new_weight], stderr=subprocess.STDOUT) - expected_result = "reweighted item id {ID} name \'osd.{ID}\'".format( - ID=osd_num) + " to {}".format(new_weight) - log(cmd_result) - if expected_result in cmd_result: - return True - return False - except subprocess.CalledProcessError as e: - log("ceph osd crush reweight command failed with message: {}".format( - e.message)) - raise - - -def determine_packages(): - ''' - Determines packages for installation. - - @returns: list of ceph packages - ''' - if is_container(): - PACKAGES.remove('ntp') - return PACKAGES diff --git a/ceph-osd/lib/ceph/ceph_broker.py b/ceph-osd/lib/ceph/broker.py similarity index 88% rename from ceph-osd/lib/ceph/ceph_broker.py rename to ceph-osd/lib/ceph/broker.py index 1f6db8c8..b071b91e 100644 --- a/ceph-osd/lib/ceph/ceph_broker.py +++ b/ceph-osd/lib/ceph/broker.py @@ -1,5 +1,3 @@ -#!/usr/bin/python -# # Copyright 2016 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,19 +14,21 @@ import json import os + from tempfile import NamedTemporaryFile +from ceph.utils import ( + get_cephfs, + get_osd_weight +) +from ceph.crush_utils import Crushmap + from charmhelpers.core.hookenv import ( log, DEBUG, INFO, ERROR, ) -from ceph import ( - get_cephfs, - get_osd_weight -) -from ceph.ceph_helpers import Crushmap from charmhelpers.contrib.storage.linux.ceph import ( create_erasure_profile, delete_pool, @@ -112,6 +112,9 @@ def process_requests(reqs): This is a versioned api. API version must be supplied by the client making the request. + + :param reqs: dict of request parameters. + :returns: dict. exit-code and reason if not 0 """ request_id = reqs.get('request-id') try: @@ -140,6 +143,12 @@ def process_requests(reqs): def handle_create_erasure_profile(request, service): + """Create an erasure profile. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ # "local" | "shec" or it defaults to "jerasure" erasure_type = request.get('erasure-type') # "host" | "rack" or it defaults to "host" # Any valid Ceph bucket @@ -160,10 +169,9 @@ def handle_create_erasure_profile(request, service): def handle_add_permissions_to_key(request, service): - """ - Groups are defined by the key cephx.groups.(namespace-)?-(name). This key - will contain a dict serialized to JSON with data about the group, including - pools and members. + """Groups are defined by the key cephx.groups.(namespace-)?-(name). This + key will contain a dict serialized to JSON with data about the group, + including pools and members. A group can optionally have a namespace defined that will be used to further restrict pool access. @@ -238,8 +246,7 @@ def pool_permission_list_for_service(service): def get_service_groups(service, namespace=None): - """ - Services are objects stored with some metadata, they look like (for a + """Services are objects stored with some metadata, they look like (for a service named "nova"): { group_names: {'rwx': ['images']}, @@ -272,7 +279,7 @@ def get_service_groups(service, namespace=None): def _build_service_groups(service, namespace=None): - '''Rebuild the 'groups' dict for a service group + """Rebuild the 'groups' dict for a service group :returns: dict: dictionary keyed by group name of the following format: @@ -287,7 +294,7 @@ def _build_service_groups(service, namespace=None): services: ['nova'] } } - ''' + """ all_groups = {} for _, groups in service['group_names'].items(): for group in groups: @@ -299,8 +306,7 @@ def _build_service_groups(service, namespace=None): def get_group(group_name): - """ - A group is a structure to hold data about a named group, structured as: + """A group is a structure to hold data about a named group, structured as: { pools: ['glance'], services: ['nova'] @@ -344,6 +350,12 @@ def get_group_key(group_name): def handle_erasure_pool(request, service): + """Create a new erasure coded pool. + + :param request: dict of request operations and params. + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0. + """ pool_name = request.get('name') erasure_profile = request.get('erasure-profile') quota = request.get('max-bytes') @@ -390,6 +402,12 @@ def handle_erasure_pool(request, service): def handle_replicated_pool(request, service): + """Create a new replicated pool. + + :param request: dict of request operations and params. + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0. + """ pool_name = request.get('name') replicas = request.get('replicas') quota = request.get('max-bytes') @@ -441,6 +459,13 @@ def handle_replicated_pool(request, service): def handle_create_cache_tier(request, service): + """Create a cache tier on a cold pool. Modes supported are + "writeback" and "readonly". + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ # mode = "writeback" | "readonly" storage_pool = request.get('cold-pool') cache_pool = request.get('hot-pool') @@ -462,6 +487,12 @@ def handle_create_cache_tier(request, service): def handle_remove_cache_tier(request, service): + """Remove a cache tier from the cold pool. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ storage_pool = request.get('cold-pool') cache_pool = request.get('hot-pool') # cache and storage pool must exist first @@ -477,6 +508,12 @@ def handle_remove_cache_tier(request, service): def handle_set_pool_value(request, service): + """Sets an arbitrary pool value. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ # Set arbitrary pool values params = {'pool': request.get('name'), 'key': request.get('key'), @@ -501,6 +538,12 @@ def handle_set_pool_value(request, service): def handle_rgw_regionmap_update(request, service): + """Change the radosgw region map. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ name = request.get('client-name') if not name: msg = "Missing rgw-region or client-name params" @@ -516,6 +559,12 @@ def handle_rgw_regionmap_update(request, service): def handle_rgw_regionmap_default(request, service): + """Create a radosgw region map. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ region = request.get('rgw-region') name = request.get('client-name') if not region or not name: @@ -537,6 +586,12 @@ def handle_rgw_regionmap_default(request, service): def handle_rgw_zone_set(request, service): + """Create a radosgw zone. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ json_file = request.get('zone-json') name = request.get('client-name') region_name = request.get('region-name') @@ -567,6 +622,12 @@ def handle_rgw_zone_set(request, service): def handle_put_osd_in_bucket(request, service): + """Move an osd into a specified crush bucket. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ osd_id = request.get('osd') target_bucket = request.get('bucket') if not osd_id or not target_bucket: @@ -597,6 +658,12 @@ def handle_put_osd_in_bucket(request, service): def handle_rgw_create_user(request, service): + """Create a new rados gateway user. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ user_id = request.get('rgw-uid') display_name = request.get('display-name') name = request.get('client-name') @@ -630,11 +697,11 @@ def handle_rgw_create_user(request, service): def handle_create_cephfs(request, service): - """ - Create a new cephfs. + """Create a new cephfs. + :param request: The broker request - :param service: The cephx user to run this command under - :return: + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 """ cephfs_name = request.get('mds_name') data_pool = request.get('data_pool') @@ -678,6 +745,12 @@ def handle_create_cephfs(request, service): def handle_rgw_region_set(request, service): # radosgw-admin region set --infile us.json --name client.radosgw.us-east-1 + """Set the rados gateway region. + + :param request: dict. The broker request. + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ json_file = request.get('region-json') name = request.get('client-name') region_name = request.get('region-name') diff --git a/ceph-osd/lib/ceph/ceph_helpers.py b/ceph-osd/lib/ceph/ceph_helpers.py deleted file mode 100644 index 11f5dd8c..00000000 --- a/ceph-osd/lib/ceph/ceph_helpers.py +++ /dev/null @@ -1,1557 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2012 Canonical Ltd. -# -# This file is sourced from lp:openstack-charm-helpers -# -# Authors: -# James Page -# Adam Gandelman -# - -import errno -import hashlib -import math -from charmhelpers.contrib.network.ip import format_ipv6_addr -import six - -import os -import shutil -import json -import time -import uuid -import re - -import subprocess -from subprocess import ( - check_call, - check_output as s_check_output, - CalledProcessError, -) -from charmhelpers.core.hookenv import (config, - local_unit, - relation_get, - relation_ids, - relation_set, - related_units, - log, - DEBUG, - INFO, - WARNING, - ERROR, ) -from charmhelpers.core.host import (mount, - mounts, - service_start, - service_stop, - service_running, - umount, ) -from charmhelpers.fetch import (apt_install, ) - -from charmhelpers.core.kernel import modprobe -from charmhelpers.contrib.openstack.utils import config_flags_parser, \ - get_host_ip - -KEYRING = '/etc/ceph/ceph.client.{}.keyring' -KEYFILE = '/etc/ceph/ceph.client.{}.key' - -CEPH_CONF = """[global] -auth supported = {auth} -keyring = {keyring} -mon host = {mon_hosts} -log to syslog = {use_syslog} -err to syslog = {use_syslog} -clog to syslog = {use_syslog} -""" - -CRUSH_BUCKET = """root {name} {{ - id {id} # do not change unnecessarily - # weight 0.000 - alg straw - hash 0 # rjenkins1 -}} - -rule {name} {{ - ruleset 0 - type replicated - min_size 1 - max_size 10 - step take {name} - step chooseleaf firstn 0 type host - step emit -}}""" - -# This regular expression looks for a string like: -# root NAME { -# id NUMBER -# so that we can extract NAME and ID from the crushmap -CRUSHMAP_BUCKETS_RE = re.compile(r"root\s+(.+)\s+\{\s*id\s+(-?\d+)") - -# This regular expression looks for ID strings in the crushmap like: -# id NUMBER -# so that we can extract the IDs from a crushmap -CRUSHMAP_ID_RE = re.compile(r"id\s+(-?\d+)") - -# The number of placement groups per OSD to target for placement group -# calculations. This number is chosen as 100 due to the ceph PG Calc -# documentation recommending to choose 100 for clusters which are not -# expected to increase in the foreseeable future. Since the majority of the -# calculations are done on deployment, target the case of non-expanding -# clusters as the default. -DEFAULT_PGS_PER_OSD_TARGET = 100 -DEFAULT_POOL_WEIGHT = 10.0 -LEGACY_PG_COUNT = 200 - - -def check_output(*args, **kwargs): - ''' - Helper wrapper for py2/3 compat with subprocess.check_output - - @returns str: UTF-8 decoded representation of output - ''' - return s_check_output(*args, **kwargs).decode('UTF-8') - - -def validator(value, valid_type, valid_range=None): - """ - Used to validate these: http://docs.ceph.com/docs/master/rados/operations/ - pools/#set-pool-values - Example input: - validator(value=1, - valid_type=int, - valid_range=[0, 2]) - This says I'm testing value=1. It must be an int inclusive in [0,2] - - :param value: The value to validate - :param valid_type: The type that value should be. - :param valid_range: A range of values that value can assume. - :return: - """ - assert isinstance(value, valid_type), "{} is not a {}".format(value, - valid_type) - if valid_range is not None: - assert isinstance(valid_range, list), \ - "valid_range must be a list, was given {}".format(valid_range) - # If we're dealing with strings - if valid_type is six.string_types: - assert value in valid_range, \ - "{} is not in the list {}".format(value, valid_range) - # Integer, float should have a min and max - else: - if len(valid_range) != 2: - raise ValueError("Invalid valid_range list of {} for {}. " - "List must be [min,max]".format(valid_range, - value)) - assert value >= valid_range[0], \ - "{} is less than minimum allowed value of {}".format( - value, valid_range[0]) - assert value <= valid_range[1], \ - "{} is greater than maximum allowed value of {}".format( - value, valid_range[1]) - - -class PoolCreationError(Exception): - """ - A custom error to inform the caller that a pool creation failed. Provides - an error message - """ - - def __init__(self, message): - super(PoolCreationError, self).__init__(message) - - -class Crushmap(object): - """An object oriented approach to Ceph crushmap management.""" - - def __init__(self): - """Iiitialize the Crushmap from Ceph""" - self._crushmap = self.load_crushmap() - roots = re.findall(CRUSHMAP_BUCKETS_RE, self._crushmap) - buckets = [] - ids = list(map( - lambda x: int(x), - re.findall(CRUSHMAP_ID_RE, self._crushmap))) - ids.sort() - if roots != []: - for root in roots: - buckets.append(Crushmap.Bucket(root[0], root[1], True)) - - self._buckets = buckets - if ids != []: - self._ids = ids - else: - self._ids = [0] - - def load_crushmap(self): - try: - crush = subprocess.Popen( - ('ceph', 'osd', 'getcrushmap'), - stdout=subprocess.PIPE) - return subprocess.check_output( - ('crushtool', '-d', '-'), - stdin=crush.stdout) - except Exception as e: - log("load_crushmap error: {}".format(e)) - raise "Failed to read Crushmap" - - def ensure_bucket_is_present(self, bucket_name): - if bucket_name not in [bucket.name for bucket in self.buckets()]: - self.add_bucket(bucket_name) - self.save() - - def buckets(self): - """Return a list of buckets that are in the Crushmap.""" - return self._buckets - - def add_bucket(self, bucket_name): - """Add a named bucket to Ceph""" - new_id = min(self._ids) - 1 - self._ids.append(new_id) - self._buckets.append(Crushmap.Bucket(bucket_name, new_id)) - - def save(self): - """Persist Crushmap to Ceph""" - try: - crushmap = self.build_crushmap() - compiled = subprocess.Popen( - ('crushtool', '-c', '/dev/stdin', '-o', '/dev/stdout'), - stdin=subprocess.PIPE, - stdout=subprocess.PIPE) - output = compiled.communicate(crushmap)[0] - ceph = subprocess.Popen( - ('ceph', 'osd', 'setcrushmap', '-i', '/dev/stdin'), - stdin=subprocess.PIPE) - ceph_output = ceph.communicate(input=output) - return ceph_output - except Exception as e: - log("save error: {}".format(e)) - raise "Failed to save crushmap" - - def build_crushmap(self): - """Modifies the curent crushmap to include the new buckets""" - tmp_crushmap = self._crushmap - for bucket in self._buckets: - if not bucket.default: - tmp_crushmap = "{}\n\n{}".format( - tmp_crushmap, - Crushmap.bucket_string(bucket.name, bucket.id)) - return tmp_crushmap - - @staticmethod - def bucket_string(name, id): - return CRUSH_BUCKET.format(name=name, id=id) - - class Bucket(object): - """An object that describes a Crush bucket.""" - - def __init__(self, name, id, default=False): - self.name = name - self.id = int(id) - self.default = default - - def __repr__(self): - return "Bucket {{Name: {name}, ID: {id}}}".format( - name=self.name, id=self.id) - - def __eq__(self, other): - """Override the default Equals behavior""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return NotImplemented - - def __ne__(self, other): - """Define a non-equality test""" - if isinstance(other, self.__class__): - return not self.__eq__(other) - return NotImplemented - - -class Pool(object): - """ - An object oriented approach to Ceph pool creation. This base class is - inherited by ReplicatedPool and ErasurePool. - Do not call create() on this base class as it will not do anything. - Instantiate a child class and call create(). - """ - - def __init__(self, service, name): - self.service = service - self.name = name - - # Create the pool if it doesn't exist already - # To be implemented by subclasses - def create(self): - pass - - def add_cache_tier(self, cache_pool, mode): - """ - Adds a new cache tier to an existing pool. - :param cache_pool: six.string_types. The cache tier pool name to add. - :param mode: six.string_types. The caching mode to use for this pool. - valid range = ["readonly", "writeback"] - :return: None - """ - # Check the input types and values - validator(value=cache_pool, valid_type=six.string_types) - validator(value=mode, - valid_type=six.string_types, - valid_range=["readonly", "writeback"]) - - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', - self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', - cache_pool, mode]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', - self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', - cache_pool, 'hit_set_type', 'bloom']) - - def remove_cache_tier(self, cache_pool): - """ - Removes a cache tier from Ceph. Flushes all dirty objects from - writeback pools and waits for that to complete. - :param cache_pool: six.string_types. The cache tier pool name to - remove. - :return: None - """ - # read-only is easy, writeback is much harder - mode = get_cache_mode(self.service, cache_pool) - version = ceph_version() - if mode == 'readonly': - check_call(['ceph', '--id', self.service, 'osd', 'tier', - 'cache-mode', cache_pool, 'none']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', - self.name, cache_pool]) - - elif mode == 'writeback': - pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', - 'cache-mode', cache_pool, 'forward'] - if version >= '10.1': - # Jewel added a mandatory flag - pool_forward_cmd.append('--yes-i-really-mean-it') - - check_call(pool_forward_cmd) - # Flush the cache and wait for it to return - check_call(['rados', '--id', self.service, '-p', cache_pool, - 'cache-flush-evict-all']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', - 'remove-overlay', self.name]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', - self.name, cache_pool]) - - def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): - """Return the number of placement groups to use when creating the pool. - - Returns the number of placement groups which should be specified when - creating the pool. This is based upon the calculation guidelines - provided by the Ceph Placement Group Calculator (located online at - http://ceph.com/pgcalc/). - - The number of placement groups are calculated using the following: - - (Target PGs per OSD) * (OSD #) * (%Data) - ---------------------------------------- - (Pool size) - - Per the upstream guidelines, the OSD # should really be considered - based on the number of OSDs which are eligible to be selected by the - pool. Since the pool creation doesn't specify any of CRUSH set rules, - the default rule will be dependent upon the type of pool being - created (replicated or erasure). - - This code makes no attempt to determine the number of OSDs which can be - selected for the specific rule, rather it is left to the user to tune - in the form of 'expected-osd-count' config option. - - :param pool_size: int. pool_size is either the number of replicas for - replicated pools or the K+M sum for erasure coded pools - :param percent_data: float. the percentage of data that is expected to - be contained in the pool for the specific OSD set. Default value - is to assume 10% of the data is for this pool, which is a - relatively low % of the data but allows for the pg_num to be - increased. NOTE: the default is primarily to handle the scenario - where related charms requiring pools has not been upgraded to - include an update to indicate their relative usage of the pools. - :return: int. The number of pgs to use. - """ - - # Note: This calculation follows the approach that is provided - # by the Ceph PG Calculator located at http://ceph.com/pgcalc/. - validator(value=pool_size, valid_type=int) - - # Ensure that percent data is set to something - even with a default - # it can be set to None, which would wreak havoc below. - if percent_data is None: - percent_data = DEFAULT_POOL_WEIGHT - - # If the expected-osd-count is specified, then use the max between - # the expected-osd-count and the actual osd_count - osd_list = get_osds(self.service) - expected = config('expected-osd-count') or 0 - - if osd_list: - osd_count = max(expected, len(osd_list)) - - # Log a message to provide some insight if the calculations claim - # to be off because someone is setting the expected count and - # there are more OSDs in reality. Try to make a proper guess - # based upon the cluster itself. - if expected and osd_count != expected: - log("Found more OSDs than provided expected count. " - "Using the actual count instead", INFO) - elif expected: - # Use the expected-osd-count in older ceph versions to allow for - # a more accurate pg calculations - osd_count = expected - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - return LEGACY_PG_COUNT - - percent_data /= 100.0 - target_pgs_per_osd = config( - 'pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET - num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size - - # The CRUSH algorithm has a slight optimization for placement groups - # with powers of 2 so find the nearest power of 2. If the nearest - # power of 2 is more than 25% below the original value, the next - # highest value is used. To do this, find the nearest power of 2 such - # that 2^n <= num_pg, check to see if its within the 25% tolerance. - exponent = math.floor(math.log(num_pg, 2)) - nearest = 2 ** exponent - if (num_pg - nearest) > (num_pg * 0.25): - # Choose the next highest power of 2 since the nearest is more - # than 25% below the original value. - return int(nearest * 2) - else: - return int(nearest) - - -class ReplicatedPool(Pool): - def __init__(self, - service, - name, - pg_num=None, - replicas=2, - percent_data=10.0): - super(ReplicatedPool, self).__init__(service=service, name=name) - self.replicas = replicas - if pg_num: - # Since the number of placement groups were specified, ensure - # that there aren't too many created. - max_pgs = self.get_pgs(self.replicas, 100.0) - self.pg_num = min(pg_num, max_pgs) - else: - self.pg_num = self.get_pgs(self.replicas, percent_data) - - def create(self): - if not pool_exists(self.service, self.name): - # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num)] - try: - check_call(cmd) - # Set the pool replica size - update_pool(client=self.service, - pool=self.name, - settings={'size': str(self.replicas)}) - except CalledProcessError: - raise - - -# Default jerasure erasure coded pool -class ErasurePool(Pool): - def __init__(self, - service, - name, - erasure_code_profile="default", - percent_data=10.0): - super(ErasurePool, self).__init__(service=service, name=name) - self.erasure_code_profile = erasure_code_profile - self.percent_data = percent_data - - def create(self): - if not pool_exists(self.service, self.name): - # Try to find the erasure profile information in order to properly - # size the number of placement groups. The size of an erasure - # coded placement group is calculated as k+m. - erasure_profile = get_erasure_profile(self.service, - self.erasure_code_profile) - - # Check for errors - if erasure_profile is None: - msg = ("Failed to discover erasure profile named " - "{}".format(self.erasure_code_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - if 'k' not in erasure_profile or 'm' not in erasure_profile: - # Error - msg = ("Unable to find k (data chunks) or m (coding chunks) " - "in erasure profile {}".format(erasure_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - - k = int(erasure_profile['k']) - m = int(erasure_profile['m']) - pgs = self.get_pgs(k + m, self.percent_data) - # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(pgs), str(pgs), 'erasure', - self.erasure_code_profile] - try: - check_call(cmd) - except CalledProcessError: - raise - - """Get an existing erasure code profile if it already exists. - Returns json formatted output""" - - -def get_mon_map(service): - """ - Returns the current monitor map. - :param service: six.string_types. The Ceph user name to run the command - under - :return: json string. :raise: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails - """ - try: - mon_status = check_output(['ceph', '--id', service, 'mon_status', - '--format=json']) - try: - return json.loads(mon_status) - except ValueError as v: - log("Unable to parse mon_status json: {}. Error: {}".format( - mon_status, v.message)) - raise - except CalledProcessError as e: - log("mon_status command failed with message: {}".format(e.message)) - raise - - -def hash_monitor_names(service): - """ - Uses the get_mon_map() function to get information about the monitor - cluster. - Hash the name of each monitor. Return a sorted list of monitor hashes - in an ascending order. - :param service: six.string_types. The Ceph user name to run the command - under - :rtype : dict. json dict of monitor name, ip address and rank - example: { - 'name': 'ip-172-31-13-165', - 'rank': 0, - 'addr': '172.31.13.165:6789/0'} - """ - try: - hash_list = [] - monitor_list = get_mon_map(service=service) - if monitor_list['monmap']['mons']: - for mon in monitor_list['monmap']['mons']: - hash_list.append(hashlib.sha224(mon['name'].encode( - 'utf-8')).hexdigest()) - return sorted(hash_list) - else: - return None - except (ValueError, CalledProcessError): - raise - - -def monitor_key_delete(service, key): - """ - Delete a key and value pair from the monitor cluster - :param service: six.string_types. The Ceph user name to run the command - under - :param key: six.string_types. The key to delete. - """ - try: - check_output(['ceph', '--id', service, - 'config-key', 'del', str(key)]) - except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format(e.output)) - raise - - -def monitor_key_set(service, key, value): - """ - Sets a key value pair on the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command - under - :param key: six.string_types. The key to set. - :param value: The value to set. This will be converted to a string - before setting - """ - try: - check_output(['ceph', '--id', service, 'config-key', 'put', str(key), - str(value)]) - except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format(e.output)) - raise - - -def monitor_key_get(service, key): - """ - Gets the value of an existing key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command - under - :param key: six.string_types. The key to search for. - :return: Returns the value of that key or None if not found. - """ - try: - output = check_output(['ceph', '--id', service, 'config-key', 'get', - str(key)]) - return output - except CalledProcessError as e: - log("Monitor config-key get failed with message: {}".format(e.output)) - return None - - -def monitor_key_exists(service, key): - """ - Searches for the existence of a key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command - under - :param key: six.string_types. The key to search for - :return: Returns True if the key exists, False if not and raises an - exception if an unknown error occurs. :raise: CalledProcessError if - an unknown error occurs - """ - try: - check_call(['ceph', '--id', service, 'config-key', 'exists', str(key)]) - # I can return true here regardless because Ceph returns - # ENOENT if the key wasn't found - return True - except CalledProcessError as e: - if e.returncode == errno.ENOENT: - return False - else: - log("Unknown error from ceph config-get exists: {} {}".format( - e.returncode, e.output)) - raise - - -def get_erasure_profile(service, name): - """ - :param service: six.string_types. The Ceph user name to run the command - under - :param name: - :return: - """ - try: - out = check_output( - ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', - name, '--format=json']) - return json.loads(out) - except (CalledProcessError, OSError, ValueError): - return None - - -def pool_set(service, pool_name, key, value): - """ - Sets a value for a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command - under - :param pool_name: six.string_types - :param key: six.string_types - :param value: - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value - ] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def snapshot_pool(service, pool_name, snapshot_name): - """ - Snapshots a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command - under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, - snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def remove_pool_snapshot(service, pool_name, snapshot_name): - """ - Remove a snapshot from a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command - under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, - snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise - - -# max_bytes should be an int or long -def set_pool_quota(service, pool_name, max_bytes): - """ - :param service: six.string_types. The Ceph user name to run the command - under - :param pool_name: six.string_types - :param max_bytes: int or long - :return: None. Can raise CalledProcessError - """ - # Set a byte quota on a RADOS pool in ceph. - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, - 'max_bytes', str(max_bytes)] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def remove_pool_quota(service, pool_name): - """ - Set a byte quota on a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command - under - :param pool_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, - 'max_bytes', '0'] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def remove_erasure_profile(service, profile_name): - """ - Create a new erasure code profile if one does not already exist for it. - Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/ - rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command - under - :param profile_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', - profile_name] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def create_erasure_profile(service, - profile_name, - erasure_plugin_name='jerasure', - failure_domain='host', - data_chunks=2, - coding_chunks=1, - locality=None, - durability_estimator=None): - """ - Create a new erasure code profile if one does not already exist for it. - Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/ - rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command - under - :param profile_name: six.string_types - :param erasure_plugin_name: six.string_types - :param failure_domain: six.string_types. One of ['chassis', 'datacenter', - 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) - :param data_chunks: int - :param coding_chunks: int - :param locality: int - :param durability_estimator: int - :return: None. Can raise CalledProcessError - """ - # Ensure this failure_domain is allowed by Ceph - validator(failure_domain, six.string_types, - ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', - 'region', 'room', 'root', 'row']) - - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', - profile_name, 'plugin=' + erasure_plugin_name, - 'k=' + str(data_chunks), 'm=' + str(coding_chunks), - 'ruleset_failure_domain=' + failure_domain] - if locality is not None and durability_estimator is not None: - raise ValueError( - "create_erasure_profile should be called with k, m and one of l " - "or c but not both.") - - # Add plugin specific information - if locality is not None: - # For local erasure codes - cmd.append('l=' + str(locality)) - if durability_estimator is not None: - # For Shec erasure codes - cmd.append('c=' + str(durability_estimator)) - - if erasure_profile_exists(service, profile_name): - cmd.append('--force') - - try: - check_call(cmd) - except CalledProcessError: - raise - - -def rename_pool(service, old_name, new_name): - """ - Rename a Ceph pool from old_name to new_name - :param service: six.string_types. The Ceph user name to run the command - under - :param old_name: six.string_types - :param new_name: six.string_types - :return: None - """ - validator(value=old_name, valid_type=six.string_types) - validator(value=new_name, valid_type=six.string_types) - - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name - ] - check_call(cmd) - - -def erasure_profile_exists(service, name): - """ - Check to see if an Erasure code profile already exists. - :param service: six.string_types. The Ceph user name to run the command - under - :param name: six.string_types - :return: int or None - """ - validator(value=name, valid_type=six.string_types) - try: - check_call(['ceph', '--id', service, 'osd', 'erasure-code-profile', - 'get', name]) - return True - except CalledProcessError: - return False - - -def get_cache_mode(service, pool_name): - """ - Find the current caching mode of the pool_name given. - :param service: six.string_types. The Ceph user name to run the command - under - :param pool_name: six.string_types - :return: int or None - """ - validator(value=service, valid_type=six.string_types) - validator(value=pool_name, valid_type=six.string_types) - out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json' - ]) - try: - osd_json = json.loads(out) - for pool in osd_json['pools']: - if pool['pool_name'] == pool_name: - return pool['cache_mode'] - return None - except ValueError: - raise - - -def pool_exists(service, name): - """Check to see if a RADOS pool already exists.""" - try: - out = check_output(['rados', '--id', service, 'lspools']) - except CalledProcessError: - return False - - return name in out.split() - - -def get_osds(service): - """Return a list of all Ceph Object Storage Daemons currently in the - cluster. - """ - version = ceph_version() - if version and version >= '0.56': - return json.loads(check_output(['ceph', '--id', service, 'osd', 'ls', - '--format=json'])) - - return None - - -def install(): - """Basic Ceph client installation.""" - ceph_dir = "/etc/ceph" - if not os.path.exists(ceph_dir): - os.mkdir(ceph_dir) - - apt_install('ceph-common', fatal=True) - - -def rbd_exists(service, pool, rbd_img): - """Check to see if a RADOS block device exists.""" - try: - out = check_output(['rbd', 'list', '--id', service, '--pool', pool - ]) - except CalledProcessError: - return False - - return rbd_img in out - - -def create_rbd_image(service, pool, image, sizemb): - """Create a new RADOS block device.""" - cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, - '--pool', pool] - check_call(cmd) - - -def update_pool(client, pool, settings): - cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] - for k, v in six.iteritems(settings): - cmd.append(k) - cmd.append(v) - - check_call(cmd) - - -def create_pool(service, name, replicas=3, pg_num=None): - """Create a new RADOS pool.""" - if pool_exists(service, name): - log("Ceph pool {} already exists, skipping creation".format(name), - level=WARNING) - return - - if not pg_num: - # Calculate the number of placement groups based - # on upstream recommended best practices. - osds = get_osds(service) - if osds: - pg_num = (len(osds) * 100 // replicas) - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - pg_num = 200 - - cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] - check_call(cmd) - - update_pool(service, name, settings={'size': str(replicas)}) - - -def delete_pool(service, name): - """Delete a RADOS pool from ceph.""" - cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, - '--yes-i-really-really-mean-it'] - check_call(cmd) - - -def _keyfile_path(service): - return KEYFILE.format(service) - - -def _keyring_path(service): - return KEYRING.format(service) - - -def create_keyring(service, key): - """Create a new Ceph keyring containing key.""" - keyring = _keyring_path(service) - if os.path.exists(keyring): - log('Ceph keyring exists at %s.' % keyring, level=WARNING) - return - - cmd = ['ceph-authtool', keyring, '--create-keyring', - '--name=client.{}'.format(service), '--add-key={}'.format(key)] - check_call(cmd) - log('Created new ceph keyring at %s.' % keyring, level=DEBUG) - - -def delete_keyring(service): - """Delete an existing Ceph keyring.""" - keyring = _keyring_path(service) - if not os.path.exists(keyring): - log('Keyring does not exist at %s' % keyring, level=WARNING) - return - - os.remove(keyring) - log('Deleted ring at %s.' % keyring, level=INFO) - - -def create_key_file(service, key): - """Create a file containing key.""" - keyfile = _keyfile_path(service) - if os.path.exists(keyfile): - log('Keyfile exists at %s.' % keyfile, level=WARNING) - return - - with open(keyfile, 'w') as fd: - fd.write(key) - - log('Created new keyfile at %s.' % keyfile, level=INFO) - - -def get_ceph_nodes(relation='ceph'): - """Query named relation to determine current nodes.""" - hosts = [] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - hosts.append(relation_get('private-address', unit=unit, rid=r_id)) - - return hosts - - -def configure(service, key, auth, use_syslog): - """Perform basic configuration of Ceph.""" - create_keyring(service, key) - create_key_file(service, key) - hosts = get_ceph_nodes() - with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: - ceph_conf.write(CEPH_CONF.format(auth=auth, - keyring=_keyring_path(service), - mon_hosts=",".join(map(str, hosts)), - use_syslog=use_syslog)) - modprobe('rbd') - - -def image_mapped(name): - """Determine whether a RADOS block device is mapped locally.""" - try: - out = check_output(['rbd', 'showmapped']) - except CalledProcessError: - return False - - return name in out - - -def map_block_storage(service, pool, image): - """Map a RADOS block device for local use.""" - cmd = [ - 'rbd', - 'map', - '{}/{}'.format(pool, image), - '--user', - service, - '--secret', - _keyfile_path(service), - ] - check_call(cmd) - - -def filesystem_mounted(fs): - """Determine whether a filesytems is already mounted.""" - return fs in [f for f, m in mounts()] - - -def make_filesystem(blk_device, fstype='ext4', timeout=10): - """Make a new filesystem on the specified block device.""" - count = 0 - e_noent = os.errno.ENOENT - while not os.path.exists(blk_device): - if count >= timeout: - log('Gave up waiting on block device %s' % blk_device, level=ERROR) - raise IOError(e_noent, os.strerror(e_noent), blk_device) - - log('Waiting for block device %s to appear' % blk_device, level=DEBUG) - count += 1 - time.sleep(1) - else: - log('Formatting block device %s as filesystem %s.' % - (blk_device, fstype), - level=INFO) - check_call(['mkfs', '-t', fstype, blk_device]) - - -def place_data_on_block_device(blk_device, data_src_dst): - """Migrate data in data_src_dst to blk_device and then remount.""" - # mount block device into /mnt - mount(blk_device, '/mnt') - # copy data to /mnt - copy_files(data_src_dst, '/mnt') - # umount block device - umount('/mnt') - # Grab user/group ID's from original source - _dir = os.stat(data_src_dst) - uid = _dir.st_uid - gid = _dir.st_gid - # re-mount where the data should originally be - # TODO: persist is currently a NO-OP in core.host - mount(blk_device, data_src_dst, persist=True) - # ensure original ownership of new mount. - os.chown(data_src_dst, uid, gid) - - -def copy_files(src, dst, symlinks=False, ignore=None): - """Copy files from src to dst.""" - for item in os.listdir(src): - s = os.path.join(src, item) - d = os.path.join(dst, item) - if os.path.isdir(s): - shutil.copytree(s, d, symlinks, ignore) - else: - shutil.copy2(s, d) - - -def ensure_ceph_storage(service, - pool, - rbd_img, - sizemb, - mount_point, - blk_device, - fstype, - system_services=[], - replicas=3): - """NOTE: This function must only be called from a single service unit for - the same rbd_img otherwise data loss will occur. - - Ensures given pool and RBD image exists, is mapped to a block device, - and the device is formatted and mounted at the given mount_point. - - If formatting a device for the first time, data existing at mount_point - will be migrated to the RBD device before being re-mounted. - - All services listed in system_services will be stopped prior to data - migration and restarted when complete. - """ - # Ensure pool, RBD image, RBD mappings are in place. - if not pool_exists(service, pool): - log('Creating new pool {}.'.format(pool), level=INFO) - create_pool(service, pool, replicas=replicas) - - if not rbd_exists(service, pool, rbd_img): - log('Creating RBD image ({}).'.format(rbd_img), level=INFO) - create_rbd_image(service, pool, rbd_img, sizemb) - - if not image_mapped(rbd_img): - log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), - level=INFO) - map_block_storage(service, pool, rbd_img) - - # make file system - # TODO: What happens if for whatever reason this is run again and - # the data is already in the rbd device and/or is mounted?? - # When it is mounted already, it will fail to make the fs - # XXX: This is really sketchy! Need to at least add an fstab entry - # otherwise this hook will blow away existing data if its executed - # after a reboot. - if not filesystem_mounted(mount_point): - make_filesystem(blk_device, fstype) - - for svc in system_services: - if service_running(svc): - log('Stopping services {} prior to migrating data.' - .format(svc), - level=DEBUG) - service_stop(svc) - - place_data_on_block_device(blk_device, mount_point) - - for svc in system_services: - log('Starting service {} after migrating data.'.format(svc), - level=DEBUG) - service_start(svc) - - -def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): - """Ensures a ceph keyring is created for a named service and optionally - ensures user and group ownership. - - Returns False if no ceph key is available in relation state. - """ - key = None - for rid in relation_ids(relation): - for unit in related_units(rid): - key = relation_get('key', rid=rid, unit=unit) - if key: - break - - if not key: - return False - - create_keyring(service=service, key=key) - keyring = _keyring_path(service) - if user and group: - check_call(['chown', '%s.%s' % (user, group), keyring]) - - return True - - -def get_mon_hosts(): - """ - Helper function to gather up the ceph monitor host public addresses - :return: list. Returns a list of ip_address:port - """ - hosts = [] - for relid in relation_ids('mon'): - for unit in related_units(relid): - addr = \ - relation_get('ceph-public-address', - unit, - relid) or get_host_ip( - relation_get( - 'private-address', - unit, - relid)) - - if addr: - hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) - - hosts.sort() - return hosts - - -def ceph_version(): - """Retrieve the local version of ceph.""" - if os.path.exists('/usr/bin/ceph'): - cmd = ['ceph', '-v'] - output = check_output(cmd) - output = output.split() - if len(output) > 3: - return output[2] - else: - return None - else: - return None - - -class CephBrokerRq(object): - """Ceph broker request. - - Multiple operations can be added to a request and sent to the Ceph broker - to be executed. - - Request is json-encoded for sending over the wire. - - The API is versioned and defaults to version 1. - """ - - def __init__(self, api_version=1, request_id=None): - self.api_version = api_version - if request_id: - self.request_id = request_id - else: - self.request_id = str(uuid.uuid1()) - self.ops = [] - - def add_op_create_pool( - self, name, replica_count=3, - pg_num=None, weight=None): - """Adds an operation to create a pool. - - @param pg_num setting: optional setting. If not provided, this value - will be calculated by the broker based on how many OSDs are in the - cluster at the time of creation. Note that, if provided, this value - will be capped at the current available maximum. - @param weight: the percentage of data the pool makes up - """ - if pg_num and weight: - raise ValueError('pg_num and weight are mutually exclusive') - - self.ops.append({'op': 'create-pool', - 'name': name, - 'replicas': replica_count, - 'pg_num': pg_num, - 'weight': weight}) - - def set_ops(self, ops): - """Set request ops to provided value. - - Useful for injecting ops that come from a previous request - to allow comparisons to ensure validity. - """ - self.ops = ops - - @property - def request(self): - return json.dumps({'api-version': self.api_version, - 'ops': self.ops, - 'request-id': self.request_id}) - - def _ops_equal(self, other): - if len(self.ops) == len(other.ops): - for req_no in range(0, len(self.ops)): - for key in ['replicas', 'name', 'op', 'pg_num', 'weight']: - if self.ops[req_no].get(key) != other.ops[req_no].get(key): - return False - else: - return False - return True - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - if self.api_version == other.api_version and \ - self._ops_equal(other): - return True - else: - return False - - def __ne__(self, other): - return not self.__eq__(other) - - -class CephBrokerRsp(object): - """Ceph broker response. - - Response is json-decoded and contents provided as methods/properties. - - The API is versioned and defaults to version 1. - """ - - def __init__(self, encoded_rsp): - self.api_version = None - self.rsp = json.loads(encoded_rsp) - - @property - def request_id(self): - return self.rsp.get('request-id') - - @property - def exit_code(self): - return self.rsp.get('exit-code') - - @property - def exit_msg(self): - return self.rsp.get('stderr') - - -# Ceph Broker Conversation: -# If a charm needs an action to be taken by ceph it can create a CephBrokerRq -# and send that request to ceph via the ceph relation. The CephBrokerRq has a -# unique id so that the client can identity which CephBrokerRsp is associated -# with the request. Ceph will also respond to each client unit individually -# creating a response key per client unit eg glance/0 will get a CephBrokerRsp -# via key broker-rsp-glance-0 -# -# To use this the charm can just do something like: -# -# from charmhelpers.contrib.storage.linux.ceph import ( -# send_request_if_needed, -# is_request_complete, -# CephBrokerRq, -# ) -# -# @hooks.hook('ceph-relation-changed') -# def ceph_changed(): -# rq = CephBrokerRq() -# rq.add_op_create_pool(name='poolname', replica_count=3) -# -# if is_request_complete(rq): -# -# else: -# send_request_if_needed(get_ceph_request()) -# -# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example -# of glance having sent a request to ceph which ceph has successfully processed -# 'ceph:8': { -# 'ceph/0': { -# 'auth': 'cephx', -# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', -# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', -# 'ceph-public-address': '10.5.44.103', -# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', -# 'private-address': '10.5.44.103', -# }, -# 'glance/0': { -# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' -# '"ops": [{"replicas": 3, "name": "glance", ' -# '"op": "create-pool"}]}'), -# 'private-address': '10.5.44.109', -# }, -# } - - -def get_previous_request(rid): - """Return the last ceph broker request sent on a given relation - - @param rid: Relation id to query for request - """ - request = None - broker_req = relation_get(attribute='broker_req', - rid=rid, - unit=local_unit()) - if broker_req: - request_data = json.loads(broker_req) - request = CephBrokerRq(api_version=request_data['api-version'], - request_id=request_data['request-id']) - request.set_ops(request_data['ops']) - - return request - - -def get_request_states(request, relation='ceph'): - """Return a dict of requests per relation id with their corresponding - completion state. - - This allows a charm, which has a request for ceph, to see whether there is - an equivalent request already being processed and if so what state that - request is in. - - @param request: A CephBrokerRq object - """ - complete = [] - requests = {} - for rid in relation_ids(relation): - complete = False - previous_request = get_previous_request(rid) - if request == previous_request: - sent = True - complete = is_request_complete_for_rid(previous_request, rid) - else: - sent = False - complete = False - - requests[rid] = {'sent': sent, 'complete': complete, } - - return requests - - -def is_request_sent(request, relation='ceph'): - """Check to see if a functionally equivalent request has already been sent - - Returns True if a similair request has been sent - - @param request: A CephBrokerRq object - """ - states = get_request_states(request, relation=relation) - for rid in states.keys(): - if not states[rid]['sent']: - return False - - return True - - -def is_request_complete(request, relation='ceph'): - """Check to see if a functionally equivalent request has already been - completed - - Returns True if a similair request has been completed - - @param request: A CephBrokerRq object - """ - states = get_request_states(request, relation=relation) - for rid in states.keys(): - if not states[rid]['complete']: - return False - - return True - - -def is_request_complete_for_rid(request, rid): - """Check if a given request has been completed on the given relation - - @param request: A CephBrokerRq object - @param rid: Relation ID - """ - broker_key = get_broker_rsp_key() - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - if rdata.get(broker_key): - rsp = CephBrokerRsp(rdata.get(broker_key)) - if rsp.request_id == request.request_id: - if not rsp.exit_code: - return True - else: - # The remote unit sent no reply targeted at this unit so either the - # remote ceph cluster does not support unit targeted replies or it - # has not processed our request yet. - if rdata.get('broker_rsp'): - request_data = json.loads(rdata['broker_rsp']) - if request_data.get('request-id'): - log('Ignoring legacy broker_rsp without unit key as remote' - ' service supports unit specific replies', - level=DEBUG) - else: - log('Using legacy broker_rsp as remote service does not ' - 'supports unit specific replies', - level=DEBUG) - rsp = CephBrokerRsp(rdata['broker_rsp']) - if not rsp.exit_code: - return True - - return False - - -def get_broker_rsp_key(): - """Return broker response key for this unit - - This is the key that ceph is going to use to pass request status - information back to this unit - """ - return 'broker-rsp-' + local_unit().replace('/', '-') - - -def send_request_if_needed(request, relation='ceph'): - """Send broker request if an equivalent request has not already been sent - - @param request: A CephBrokerRq object - """ - if is_request_sent(request, relation=relation): - log('Request already sent but not complete, not sending new request', - level=DEBUG) - else: - for rid in relation_ids(relation): - log('Sending request {}'.format(request.request_id), level=DEBUG) - relation_set(relation_id=rid, broker_req=request.request) - - -class CephConfContext(object): - """Ceph config (ceph.conf) context. - - Supports user-provided Ceph configuration settings. Use can provide a - dictionary as the value for the config-flags charm option containing - Ceph configuration settings keyede by their section in ceph.conf. - """ - - def __init__(self, permitted_sections=None): - self.permitted_sections = permitted_sections or [] - - def __call__(self): - conf = config('config-flags') - if not conf: - return {} - - conf = config_flags_parser(conf) - if type(conf) != dict: - log("Provided config-flags is not a dictionary - ignoring", - level=WARNING) - return {} - - permitted = self.permitted_sections - if permitted: - diff = set(conf.keys()).difference(set(permitted)) - if diff: - log("Config-flags contains invalid keys '%s' - they will be " - "ignored" % (', '.join(diff)), - level=WARNING) - - ceph_conf = {} - for key in conf: - if permitted and key not in permitted: - log("Ignoring key '%s'" % key, level=WARNING) - continue - - ceph_conf[key] = conf[key] - - return ceph_conf diff --git a/ceph-osd/lib/ceph/crush_utils.py b/ceph-osd/lib/ceph/crush_utils.py new file mode 100644 index 00000000..1c777f34 --- /dev/null +++ b/ceph-osd/lib/ceph/crush_utils.py @@ -0,0 +1,149 @@ +# Copyright 2014 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +from subprocess import check_output, CalledProcessError + +from charmhelpers.core.hookenv import ( + log, + ERROR, +) + +CRUSH_BUCKET = """root {name} {{ + id {id} # do not change unnecessarily + # weight 0.000 + alg straw + hash 0 # rjenkins1 +}} + +rule {name} {{ + ruleset 0 + type replicated + min_size 1 + max_size 10 + step take {name} + step chooseleaf firstn 0 type host + step emit +}}""" + +# This regular expression looks for a string like: +# root NAME { +# id NUMBER +# so that we can extract NAME and ID from the crushmap +CRUSHMAP_BUCKETS_RE = re.compile(r"root\s+(.+)\s+\{\s*id\s+(-?\d+)") + +# This regular expression looks for ID strings in the crushmap like: +# id NUMBER +# so that we can extract the IDs from a crushmap +CRUSHMAP_ID_RE = re.compile(r"id\s+(-?\d+)") + + +class Crushmap(object): + """An object oriented approach to Ceph crushmap management.""" + + def __init__(self): + self._crushmap = self.load_crushmap() + roots = re.findall(CRUSHMAP_BUCKETS_RE, self._crushmap) + buckets = [] + ids = list(map( + lambda x: int(x), + re.findall(CRUSHMAP_ID_RE, self._crushmap))) + ids.sort() + if roots != []: + for root in roots: + buckets.append(CRUSHBucket(root[0], root[1], True)) + + self._buckets = buckets + if ids != []: + self._ids = ids + else: + self._ids = [0] + + def load_crushmap(self): + try: + crush = check_output(['ceph', 'osd', 'getcrushmap']) + return check_output(['crushtool', '-d', '-'], stdin=crush.stdout) + except CalledProcessError as e: + log("Error occured while loading and decompiling CRUSH map:" + "{}".format(e), ERROR) + raise "Failed to read CRUSH map" + + def ensure_bucket_is_present(self, bucket_name): + if bucket_name not in [bucket.name for bucket in self.buckets()]: + self.add_bucket(bucket_name) + self.save() + + def buckets(self): + """Return a list of buckets that are in the Crushmap.""" + return self._buckets + + def add_bucket(self, bucket_name): + """Add a named bucket to Ceph""" + new_id = min(self._ids) - 1 + self._ids.append(new_id) + self._buckets.append(CRUSHBucket(bucket_name, new_id)) + + def save(self): + """Persist Crushmap to Ceph""" + try: + crushmap = self.build_crushmap() + compiled = check_output(['crushtool', '-c', '/dev/stdin', '-o', + '/dev/stdout'], stdin=crushmap) + ceph_output = check_output(['ceph', 'osd', 'setcrushmap', '-i', + '/dev/stdin'], stdin=compiled) + return ceph_output + except CalledProcessError as e: + log("save error: {}".format(e)) + raise "Failed to save CRUSH map." + + def build_crushmap(self): + """Modifies the current CRUSH map to include the new buckets""" + tmp_crushmap = self._crushmap + for bucket in self._buckets: + if not bucket.default: + tmp_crushmap = "{}\n\n{}".format( + tmp_crushmap, + Crushmap.bucket_string(bucket.name, bucket.id)) + + return tmp_crushmap + + @staticmethod + def bucket_string(name, id): + return CRUSH_BUCKET.format(name=name, id=id) + + +class CRUSHBucket(object): + """CRUSH bucket description object.""" + + def __init__(self, name, id, default=False): + self.name = name + self.id = int(id) + self.default = default + + def __repr__(self): + return "Bucket {{Name: {name}, ID: {id}}}".format( + name=self.name, id=self.id) + + def __eq__(self, other): + """Override the default Equals behavior""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return NotImplemented + + def __ne__(self, other): + """Define a non-equality test""" + if isinstance(other, self.__class__): + return not self.__eq__(other) + return NotImplemented diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py new file mode 100644 index 00000000..b96dabbf --- /dev/null +++ b/ceph-osd/lib/ceph/utils.py @@ -0,0 +1,2199 @@ +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import ctypes +import errno +import json +import os +import pyudev +import random +import re +import socket +import subprocess +import sys +import time +import shutil + +from datetime import datetime + +from charmhelpers.core import hookenv +from charmhelpers.core import templating +from charmhelpers.core.host import ( + chownr, + cmp_pkgrevno, + lsb_release, + mkdir, + mounts, + owner, + service_restart, + service_start, + service_stop, + CompareHostReleases, + is_container, +) +from charmhelpers.core.hookenv import ( + cached, + config, + log, + status_set, + DEBUG, + ERROR, + WARNING, +) +from charmhelpers.fetch import ( + apt_cache, + add_source, apt_install, apt_update +) +from charmhelpers.contrib.storage.linux.ceph import ( + get_mon_map, + monitor_key_set, + monitor_key_exists, + monitor_key_get, +) +from charmhelpers.contrib.storage.linux.utils import ( + is_block_device, + is_device_mounted, + zap_disk, +) +from charmhelpers.contrib.openstack.utils import ( + get_os_codename_install_source, +) + +CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph') +OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd') +HDPARM_FILE = os.path.join(os.sep, 'etc', 'hdparm.conf') + +LEADER = 'leader' +PEON = 'peon' +QUORUM = [LEADER, PEON] + +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', + 'radosgw', 'xfsprogs', 'python-pyudev'] + +LinkSpeed = { + "BASE_10": 10, + "BASE_100": 100, + "BASE_1000": 1000, + "GBASE_10": 10000, + "GBASE_40": 40000, + "GBASE_100": 100000, + "UNKNOWN": None +} + +# Mapping of adapter speed to sysctl settings +NETWORK_ADAPTER_SYSCTLS = { + # 10Gb + LinkSpeed["GBASE_10"]: { + 'net.core.rmem_default': 524287, + 'net.core.wmem_default': 524287, + 'net.core.rmem_max': 524287, + 'net.core.wmem_max': 524287, + 'net.core.optmem_max': 524287, + 'net.core.netdev_max_backlog': 300000, + 'net.ipv4.tcp_rmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_wmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_mem': '10000000 10000000 10000000' + }, + # Mellanox 10/40Gb + LinkSpeed["GBASE_40"]: { + 'net.ipv4.tcp_timestamps': 0, + 'net.ipv4.tcp_sack': 1, + 'net.core.netdev_max_backlog': 250000, + 'net.core.rmem_max': 4194304, + 'net.core.wmem_max': 4194304, + 'net.core.rmem_default': 4194304, + 'net.core.wmem_default': 4194304, + 'net.core.optmem_max': 4194304, + 'net.ipv4.tcp_rmem': '4096 87380 4194304', + 'net.ipv4.tcp_wmem': '4096 65536 4194304', + 'net.ipv4.tcp_low_latency': 1, + 'net.ipv4.tcp_adv_win_scale': 1 + } +} + + +class Partition(object): + def __init__(self, name, number, size, start, end, sectors, uuid): + """A block device partition. + + :param name: Name of block device + :param number: Partition number + :param size: Capacity of the device + :param start: Starting block + :param end: Ending block + :param sectors: Number of blocks + :param uuid: UUID of the partition + """ + self.name = name, + self.number = number + self.size = size + self.start = start + self.end = end + self.sectors = sectors + self.uuid = uuid + + def __str__(self): + return "number: {} start: {} end: {} sectors: {} size: {} " \ + "name: {} uuid: {}".format(self.number, self.start, + self.end, + self.sectors, self.size, + self.name, self.uuid) + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + return not self.__eq__(other) + + +def unmounted_disks(): + """List of unmounted block devices on the current host.""" + disks = [] + context = pyudev.Context() + for device in context.list_devices(DEVTYPE='disk'): + if device['SUBSYSTEM'] == 'block': + matched = False + for block_type in [u'dm', u'loop', u'ram', u'nbd']: + if block_type in device.device_node: + matched = True + if matched: + continue + disks.append(device.device_node) + log("Found disks: {}".format(disks)) + return [disk for disk in disks if not is_device_mounted(disk)] + + +def save_sysctls(sysctl_dict, save_location): + """Persist the sysctls to the hard drive. + + :param sysctl_dict: dict + :param save_location: path to save the settings to + :raises: IOError if anything goes wrong with writing. + """ + try: + # Persist the settings for reboots + with open(save_location, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + except IOError as e: + log("Unable to persist sysctl settings to {}. Error {}".format( + save_location, e.message), level=ERROR) + raise + + +def tune_nic(network_interface): + """This will set optimal sysctls for the particular network adapter. + + :param network_interface: string The network adapter name. + """ + speed = get_link_speed(network_interface) + if speed in NETWORK_ADAPTER_SYSCTLS: + status_set('maintenance', 'Tuning device {}'.format( + network_interface)) + sysctl_file = os.path.join( + os.sep, + 'etc', + 'sysctl.d', + '51-ceph-osd-charm-{}.conf'.format(network_interface)) + try: + log("Saving sysctl_file: {} values: {}".format( + sysctl_file, NETWORK_ADAPTER_SYSCTLS[speed]), + level=DEBUG) + save_sysctls(sysctl_dict=NETWORK_ADAPTER_SYSCTLS[speed], + save_location=sysctl_file) + except IOError as e: + log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " + "failed. {}".format(network_interface, e.message), + level=ERROR) + + try: + # Apply the settings + log("Applying sysctl settings", level=DEBUG) + subprocess.check_output(["sysctl", "-p", sysctl_file]) + except subprocess.CalledProcessError as err: + log('sysctl -p {} failed with error {}'.format(sysctl_file, + err.output), + level=ERROR) + else: + log("No settings found for network adapter: {}".format( + network_interface), level=DEBUG) + + +def get_link_speed(network_interface): + """This will find the link speed for a given network device. Returns None + if an error occurs. + :param network_interface: string The network adapter interface. + :returns: LinkSpeed + """ + speed_path = os.path.join(os.sep, 'sys', 'class', 'net', + network_interface, 'speed') + # I'm not sure where else we'd check if this doesn't exist + if not os.path.exists(speed_path): + return LinkSpeed["UNKNOWN"] + + try: + with open(speed_path, 'r') as sysfs: + nic_speed = sysfs.readlines() + + # Did we actually read anything? + if not nic_speed: + return LinkSpeed["UNKNOWN"] + + # Try to find a sysctl match for this particular speed + for name, speed in LinkSpeed.items(): + if speed == int(nic_speed[0].strip()): + return speed + # Default to UNKNOWN if we can't find a match + return LinkSpeed["UNKNOWN"] + except IOError as e: + log("Unable to open {path} because of error: {error}".format( + path=speed_path, + error=e.message), level='error') + return LinkSpeed["UNKNOWN"] + + +def persist_settings(settings_dict): + # Write all settings to /etc/hdparm.conf + """ This will persist the hard drive settings to the /etc/hdparm.conf file + + The settings_dict should be in the form of {"uuid": {"key":"value"}} + + :param settings_dict: dict of settings to save + """ + if not settings_dict: + return + + try: + templating.render(source='hdparm.conf', target=HDPARM_FILE, + context=settings_dict) + except IOError as err: + log("Unable to open {path} because of error: {error}".format( + path=HDPARM_FILE, error=err.message), level=ERROR) + except Exception as e: + # The templating.render can raise a jinja2 exception if the + # template is not found. Rather than polluting the import + # space of this charm, simply catch Exception + log('Unable to render {path} due to error: {error}'.format( + path=HDPARM_FILE, error=e.message), level=ERROR) + + +def set_max_sectors_kb(dev_name, max_sectors_size): + """This function sets the max_sectors_kb size of a given block device. + + :param dev_name: Name of the block device to query + :param max_sectors_size: int of the max_sectors_size to save + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + try: + with open(max_sectors_kb_path, 'w') as f: + f.write(max_sectors_size) + except IOError as e: + log('Failed to write max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e.message), level=ERROR) + + +def get_max_sectors_kb(dev_name): + """This function gets the max_sectors_kb size of a given block device. + + :param dev_name: Name of the block device to query + :returns: int which is either the max_sectors_kb or 0 on error. + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + + # Read in what Linux has set by default + if os.path.exists(max_sectors_kb_path): + try: + with open(max_sectors_kb_path, 'r') as f: + max_sectors_kb = f.read().strip() + return int(max_sectors_kb) + except IOError as e: + log('Failed to read max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e.message), level=ERROR) + # Bail. + return 0 + return 0 + + +def get_max_hw_sectors_kb(dev_name): + """This function gets the max_hw_sectors_kb for a given block device. + + :param dev_name: Name of the block device to query + :returns: int which is either the max_hw_sectors_kb or 0 on error. + """ + max_hw_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_hw_sectors_kb') + # Read in what the hardware supports + if os.path.exists(max_hw_sectors_kb_path): + try: + with open(max_hw_sectors_kb_path, 'r') as f: + max_hw_sectors_kb = f.read().strip() + return int(max_hw_sectors_kb) + except IOError as e: + log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( + max_hw_sectors_kb_path, e.message), level=ERROR) + return 0 + return 0 + + +def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): + """This function sets the hard drive read ahead. + + :param dev_name: Name of the block device to set read ahead on. + :param read_ahead_sectors: int How many sectors to read ahead. + """ + try: + # Set the read ahead sectors to 256 + log('Setting read ahead to {} for device {}'.format( + read_ahead_sectors, + dev_name)) + subprocess.check_output(['hdparm', + '-a{}'.format(read_ahead_sectors), + dev_name]) + except subprocess.CalledProcessError as e: + log('hdparm failed with error: {}'.format(e.output), + level=ERROR) + + +def get_block_uuid(block_dev): + """This queries blkid to get the uuid for a block device. + + :param block_dev: Name of the block device to query. + :returns: The UUID of the device or None on Error. + """ + try: + block_info = subprocess.check_output( + ['blkid', '-o', 'export', block_dev]) + for tag in block_info.split('\n'): + parts = tag.split('=') + if parts[0] == 'UUID': + return parts[1] + return None + except subprocess.CalledProcessError as err: + log('get_block_uuid failed with error: {}'.format(err.output), + level=ERROR) + return None + + +def check_max_sectors(save_settings_dict, + block_dev, + uuid): + """Tune the max_hw_sectors if needed. + + make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb or at + least 1MB for spinning disks + If the box has a RAID card with cache this could go much bigger. + + :param save_settings_dict: The dict used to persist settings + :param block_dev: A block device name: Example: /dev/sda + :param uuid: The uuid of the block device + """ + dev_name = None + path_parts = os.path.split(block_dev) + if len(path_parts) == 2: + dev_name = path_parts[1] + else: + log('Unable to determine the block device name from path: {}'.format( + block_dev)) + # Play it safe and bail + return + max_sectors_kb = get_max_sectors_kb(dev_name=dev_name) + max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name) + + if max_sectors_kb < max_hw_sectors_kb: + # OK we have a situation where the hardware supports more than Linux is + # currently requesting + config_max_sectors_kb = hookenv.config('max-sectors-kb') + if config_max_sectors_kb < max_hw_sectors_kb: + # Set the max_sectors_kb to the config.yaml value if it is less + # than the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, config_max_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid][ + "read_ahead_sect"] = config_max_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=config_max_sectors_kb) + else: + # Set to the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, max_hw_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=max_hw_sectors_kb) + else: + log('max_sectors_kb match max_hw_sectors_kb. No change needed for ' + 'device: {}'.format(block_dev)) + + +def tune_dev(block_dev): + """Try to make some intelligent decisions with HDD tuning. Future work will + include optimizing SSDs. + + This function will change the read ahead sectors and the max write + sectors for each block device. + + :param block_dev: A block device name: Example: /dev/sda + """ + uuid = get_block_uuid(block_dev) + if uuid is None: + log('block device {} uuid is None. Unable to save to ' + 'hdparm.conf'.format(block_dev), level=DEBUG) + return + save_settings_dict = {} + log('Tuning device {}'.format(block_dev)) + status_set('maintenance', 'Tuning device {}'.format(block_dev)) + set_hdd_read_ahead(block_dev) + save_settings_dict["drive_settings"] = {} + save_settings_dict["drive_settings"][uuid] = {} + save_settings_dict["drive_settings"][uuid]['read_ahead_sect'] = 256 + + check_max_sectors(block_dev=block_dev, + save_settings_dict=save_settings_dict, + uuid=uuid) + + persist_settings(settings_dict=save_settings_dict) + status_set('maintenance', 'Finished tuning device {}'.format(block_dev)) + + +def ceph_user(): + if get_version() > 1: + return 'ceph' + else: + return "root" + + +class CrushLocation(object): + def __init__(self, + name, + identifier, + host, + rack, + row, + datacenter, + chassis, + root): + self.name = name + self.identifier = identifier + self.host = host + self.rack = rack + self.row = row + self.datacenter = datacenter + self.chassis = chassis + self.root = root + + def __str__(self): + return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ + "chassis :{} root: {}".format(self.name, self.identifier, + self.host, self.rack, self.row, + self.datacenter, self.chassis, + self.root) + + def __eq__(self, other): + return not self.name < other.name and not other.name < self.name + + def __ne__(self, other): + return self.name < other.name or other.name < self.name + + def __gt__(self, other): + return self.name > other.name + + def __ge__(self, other): + return not self.name < other.name + + def __le__(self, other): + return self.name < other.name + + +def get_osd_weight(osd_id): + """Returns the weight of the specified OSD. + + :returns: Float + :raises: ValueError if the monmap fails to parse. + :raises: CalledProcessError if our ceph command fails. + """ + try: + tree = subprocess.check_output( + ['ceph', 'osd', 'tree', '--format=json']) + try: + json_tree = json.loads(tree) + # Make sure children are present in the json + if not json_tree['nodes']: + return None + for device in json_tree['nodes']: + if device['type'] == 'osd' and device['name'] == osd_id: + return device['crush_weight'] + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + +def get_osd_tree(service): + """Returns the current osd map in JSON. + + :returns: List. + :raises: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + tree = subprocess.check_output( + ['ceph', '--id', service, + 'osd', 'tree', '--format=json']) + try: + json_tree = json.loads(tree) + crush_list = [] + # Make sure children are present in the json + if not json_tree['nodes']: + return None + child_ids = json_tree['nodes'][0]['children'] + for child in json_tree['nodes']: + if child['id'] in child_ids: + crush_list.append( + CrushLocation( + name=child.get('name'), + identifier=child['id'], + host=child.get('host'), + rack=child.get('rack'), + row=child.get('row'), + datacenter=child.get('datacenter'), + chassis=child.get('chassis'), + root=child.get('root') + ) + ) + return crush_list + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + +def _get_child_dirs(path): + """Returns a list of directory names in the specified path. + + :param path: a full path listing of the parent directory to return child + directory names + :returns: list. A list of child directories under the parent directory + :raises: ValueError if the specified path does not exist or is not a + directory, + OSError if an error occurs reading the directory listing + """ + if not os.path.exists(path): + raise ValueError('Specfied path "%s" does not exist' % path) + if not os.path.isdir(path): + raise ValueError('Specified path "%s" is not a directory' % path) + + files_in_dir = [os.path.join(path, f) for f in os.listdir(path)] + return list(filter(os.path.isdir, files_in_dir)) + + +def _get_osd_num_from_dirname(dirname): + """Parses the dirname and returns the OSD id. + + Parses a string in the form of 'ceph-{osd#}' and returns the osd number + from the directory name. + + :param dirname: the directory name to return the OSD number from + :return int: the osd number the directory name corresponds to + :raises ValueError: if the osd number cannot be parsed from the provided + directory name. + """ + match = re.search('ceph-(?P\d+)', dirname) + if not match: + raise ValueError("dirname not in correct format: %s" % dirname) + + return match.group('osd_id') + + +def get_local_osd_ids(): + """This will list the /var/lib/ceph/osd/* directories and try + to split the ID off of the directory name and return it in + a list. + + :returns: list. A list of osd identifiers + :raises: OSError if something goes wrong with listing the directory. + """ + osd_ids = [] + osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') + if os.path.exists(osd_path): + try: + dirs = os.listdir(osd_path) + for osd_dir in dirs: + osd_id = osd_dir.split('-')[1] + if _is_int(osd_id): + osd_ids.append(osd_id) + except OSError: + raise + return osd_ids + + +def get_local_mon_ids(): + """This will list the /var/lib/ceph/mon/* directories and try + to split the ID off of the directory name and return it in + a list. + + :returns: list. A list of monitor identifiers + :raises: OSError if something goes wrong with listing the directory. + """ + mon_ids = [] + mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') + if os.path.exists(mon_path): + try: + dirs = os.listdir(mon_path) + for mon_dir in dirs: + # Basically this takes everything after ceph- as the monitor ID + match = re.search('ceph-(?P.*)', mon_dir) + if match: + mon_ids.append(match.group('mon_id')) + except OSError: + raise + return mon_ids + + +def _is_int(v): + """Return True if the object v can be turned into an integer.""" + try: + int(v) + return True + except ValueError: + return False + + +def get_version(): + """Derive Ceph release from an installed package.""" + import apt_pkg as apt + + cache = apt_cache() + package = "ceph" + try: + pkg = cache[package] + except: + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation ' \ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match('^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + return float(vers) + + +def error_out(msg): + log("FATAL ERROR: %s" % msg, + level=ERROR) + sys.exit(1) + + +def is_quorum(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] in QUORUM: + return True + else: + return False + else: + return False + + +def is_leader(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] == LEADER: + return True + else: + return False + else: + return False + + +def wait_for_quorum(): + while not is_quorum(): + log("Waiting for quorum to be reached") + time.sleep(3) + + +def add_bootstrap_hint(peer): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "add_bootstrap_peer_hint", + peer + ] + if os.path.exists(asok): + # Ignore any errors for this call + subprocess.call(cmd) + + +DISK_FORMATS = [ + 'xfs', + 'ext4', + 'btrfs' +] + +CEPH_PARTITIONS = [ + '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # ceph encrypted disk in creation + '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # ceph encrypted journal + '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data + '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data + '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal + '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # ceph disk in creation +] + + +def umount(mount_point): + """This function unmounts a mounted directory forcibly. This will + be used for unmounting broken hard drive mounts which may hang. + + If umount returns EBUSY this will lazy unmount. + + :param mount_point: str. A String representing the filesystem mount point + :returns: int. Returns 0 on success. errno otherwise. + """ + libc_path = ctypes.util.find_library("c") + libc = ctypes.CDLL(libc_path, use_errno=True) + + # First try to umount with MNT_FORCE + ret = libc.umount(mount_point, 1) + if ret < 0: + err = ctypes.get_errno() + if err == errno.EBUSY: + # Detach from try. IE lazy umount + ret = libc.umount(mount_point, 2) + if ret < 0: + err = ctypes.get_errno() + return err + return 0 + else: + return err + return 0 + + +def replace_osd(dead_osd_number, + dead_osd_device, + new_osd_device, + osd_format, + osd_journal, + reformat_osd=False, + ignore_errors=False): + """This function will automate the replacement of a failed osd disk as much + as possible. It will revoke the keys for the old osd, remove it from the + crush map and then add a new osd into the cluster. + + :param dead_osd_number: The osd number found in ceph osd tree. Example: 99 + :param dead_osd_device: The physical device. Example: /dev/sda + :param osd_format: + :param osd_journal: + :param reformat_osd: + :param ignore_errors: + """ + host_mounts = mounts() + mount_point = None + for mount in host_mounts: + if mount[1] == dead_osd_device: + mount_point = mount[0] + # need to convert dev to osd number + # also need to get the mounted drive so we can tell the admin to + # replace it + try: + # Drop this osd out of the cluster. This will begin a + # rebalance operation + status_set('maintenance', 'Removing osd {}'.format(dead_osd_number)) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'out', + 'osd.{}'.format(dead_osd_number)]) + + # Kill the osd process if it's not already dead + if systemd(): + service_stop('ceph-osd@{}'.format(dead_osd_number)) + else: + subprocess.check_output(['stop', 'ceph-osd', 'id={}'.format( + dead_osd_number)]) + # umount if still mounted + ret = umount(mount_point) + if ret < 0: + raise RuntimeError('umount {} failed with error: {}'.format( + mount_point, os.strerror(ret))) + # Clean up the old mount point + shutil.rmtree(mount_point) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'crush', 'remove', + 'osd.{}'.format(dead_osd_number)]) + # Revoke the OSDs access keys + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'auth', 'del', + 'osd.{}'.format(dead_osd_number)]) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'rm', + 'osd.{}'.format(dead_osd_number)]) + status_set('maintenance', 'Setting up replacement osd {}'.format( + new_osd_device)) + osdize(new_osd_device, + osd_format, + osd_journal, + reformat_osd, + ignore_errors) + except subprocess.CalledProcessError as e: + log('replace_osd failed with error: ' + e.output) + + +def get_partition_list(dev): + """Lists the partitions of a block device. + + :param dev: Path to a block device. ex: /dev/sda + :returns: Returns a list of Partition objects. + :raises: CalledProcessException if lsblk fails + """ + partitions_list = [] + try: + partitions = get_partitions(dev) + # For each line of output + for partition in partitions: + parts = partition.split() + partitions_list.append( + Partition(number=parts[0], + start=parts[1], + end=parts[2], + sectors=parts[3], + size=parts[4], + name=parts[5], + uuid=parts[6]) + ) + return partitions_list + except subprocess.CalledProcessError: + raise + + +def is_osd_disk(dev): + partitions = get_partition_list(dev) + for partition in partitions: + try: + info = subprocess.check_output(['sgdisk', '-i', partition.number, + dev]) + info = info.split("\n") # IGNORE:E1103 + for line in info: + for ptype in CEPH_PARTITIONS: + sig = 'Partition GUID code: {}'.format(ptype) + if line.startswith(sig): + return True + except subprocess.CalledProcessError as e: + log("sgdisk inspection of partition {} on {} failed with " + "error: {}. Skipping".format(partition.minor, dev, e.message), + level=ERROR) + return False + + +def start_osds(devices): + # Scan for ceph block devices + rescan_osd_devices() + if cmp_pkgrevno('ceph', "0.56.6") >= 0: + # Use ceph-disk activate for directory based OSD's + for dev_or_path in devices: + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): + subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) + + +def rescan_osd_devices(): + cmd = [ + 'udevadm', 'trigger', + '--subsystem-match=block', '--action=add' + ] + + subprocess.call(cmd) + + +_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" +_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" + + +def is_bootstrapped(): + return os.path.exists(_bootstrap_keyring) + + +def wait_for_bootstrap(): + while not is_bootstrapped(): + time.sleep(3) + + +def import_osd_bootstrap_key(key): + if not os.path.exists(_bootstrap_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _bootstrap_keyring, + '--create-keyring', + '--name=client.bootstrap-osd', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + + +def import_osd_upgrade_key(key): + if not os.path.exists(_upgrade_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _upgrade_keyring, + '--create-keyring', + '--name=client.osd-upgrade', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + + +def generate_monitor_secret(): + cmd = [ + 'ceph-authtool', + '/dev/stdout', + '--name=mon.', + '--gen-key' + ] + res = subprocess.check_output(cmd) + + return "{}==".format(res.split('=')[1].strip()) + +# OSD caps taken from ceph-create-keys +_osd_bootstrap_caps = { + 'mon': [ + 'allow command osd create ...', + 'allow command osd crush set ...', + r'allow command auth add * osd allow\ * mon allow\ rwx', + 'allow command mon getmap' + ] +} + +_osd_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-osd' + ] +} + + +def parse_key(raw_key): + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(raw_key.splitlines()) == 1: + key = raw_key + else: + for element in raw_key.splitlines(): + if 'key' in element: + return element.split(' = ')[1].strip() # IGNORE:E1103 + return key + + +def get_osd_bootstrap_key(): + try: + # Attempt to get/create a key using the OSD bootstrap profile first + key = get_named_key('bootstrap-osd', + _osd_bootstrap_caps_profile) + except: + # If that fails try with the older style permissions + key = get_named_key('bootstrap-osd', + _osd_bootstrap_caps) + return key + + +_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" + + +def import_radosgw_key(key): + if not os.path.exists(_radosgw_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _radosgw_keyring, + '--create-keyring', + '--name=client.radosgw.gateway', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + +# OSD caps taken from ceph-create-keys +_radosgw_caps = { + 'mon': ['allow rw'], + 'osd': ['allow rwx'] +} +_upgrade_caps = { + 'mon': ['allow rwx'] +} + + +def get_radosgw_key(pool_list=None): + return get_named_key(name='radosgw.gateway', + caps=_radosgw_caps, + pool_list=pool_list) + + +def get_mds_key(name): + return create_named_keyring(entity='mds', + name=name, + caps=mds_caps) + + +_mds_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-mds' + ] +} + + +def get_mds_bootstrap_key(): + return get_named_key('bootstrap-mds', + _mds_bootstrap_caps_profile) + + +_default_caps = collections.OrderedDict([ + ('mon', ['allow r']), + ('osd', ['allow rwx']), +]) + +admin_caps = collections.OrderedDict([ + ('mds', ['allow *']), + ('mon', ['allow *']), + ('osd', ['allow *']) +]) + +mds_caps = collections.OrderedDict([ + ('osd', ['allow *']), + ('mds', ['allow']), + ('mon', ['allow rwx']), +]) + +osd_upgrade_caps = collections.OrderedDict([ + ('mon', ['allow command "config-key"', + 'allow command "osd tree"', + 'allow command "config-key list"', + 'allow command "config-key put"', + 'allow command "config-key get"', + 'allow command "config-key exists"', + 'allow command "osd out"', + 'allow command "osd in"', + 'allow command "osd rm"', + 'allow command "auth del"', + ]) +]) + + +def create_named_keyring(entity, name, caps=None): + caps = caps or _default_caps + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', 'get-or-create', '{entity}.{name}'.format(entity=entity, + name=name), + ] + for subsystem, subcaps in caps.items(): + cmd.extend([subsystem, '; '.join(subcaps)]) + log("Calling check_output: {}".format(cmd), level=DEBUG) + return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + + +def get_upgrade_key(): + return get_named_key('upgrade-osd', _upgrade_caps) + + +def get_named_key(name, caps=None, pool_list=None): + """Retrieve a specific named cephx key. + + :param name: String Name of key to get. + :param pool_list: The list of pools to give access to + :param caps: dict of cephx capabilities + :returns: Returns a cephx key + """ + try: + # Does the key already exist? + output = subprocess.check_output( + [ + 'sudo', + '-u', ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', + 'get', + 'client.{}'.format(name), + ]).strip() + return parse_key(output) + except subprocess.CalledProcessError: + # Couldn't get the key, time to create it! + log("Creating new key for {}".format(name), level=DEBUG) + caps = caps or _default_caps + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', 'get-or-create', 'client.{}'.format(name), + ] + # Add capabilities + for subsystem, subcaps in caps.items(): + if subsystem == 'osd': + if pool_list: + # This will output a string similar to: + # "pool=rgw pool=rbd pool=something" + pools = " ".join(['pool={0}'.format(i) for i in pool_list]) + subcaps[0] = subcaps[0] + " " + pools + cmd.extend([subsystem, '; '.join(subcaps)]) + + log("Calling check_output: {}".format(cmd), level=DEBUG) + return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + + +def upgrade_key_caps(key, caps): + """ Upgrade key to have capabilities caps """ + if not is_leader(): + # Not the MON leader OR not clustered + return + cmd = [ + "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key + ] + for subsystem, subcaps in caps.items(): + cmd.extend([subsystem, '; '.join(subcaps)]) + subprocess.check_call(cmd) + + +@cached +def systemd(): + return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' + + +def bootstrap_monitor_cluster(secret): + hostname = socket.gethostname() + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + done = '{}/done'.format(path) + if systemd(): + init_marker = '{}/systemd'.format(path) + else: + init_marker = '{}/upstart'.format(path) + + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) + + if os.path.exists(done): + log('bootstrap_monitor_cluster: mon already initialized.') + else: + # Ceph >= 0.61.3 needs this for ceph-mon fs creation + mkdir('/var/run/ceph', owner=ceph_user(), + group=ceph_user(), perms=0o755) + mkdir(path, owner=ceph_user(), group=ceph_user()) + # end changes for Ceph >= 0.61.3 + try: + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring', '--name=mon.', + '--add-key={}'.format(secret), + '--cap', 'mon', 'allow *']) + + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + chownr(path, ceph_user(), ceph_user()) + with open(done, 'w'): + pass + with open(init_marker, 'w'): + pass + + if systemd(): + subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) + service_restart('ceph-mon') + else: + service_restart('ceph-mon-all') + + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + # NOTE(jamespage): Later ceph releases require explicit + # call to ceph-create-keys to setup the + # admin keys for the cluster; this command + # will wait for quorum in the cluster before + # returning. + cmd = ['ceph-create-keys', '--id', hostname] + subprocess.check_call(cmd) + except: + raise + finally: + os.unlink(keyring) + + +def update_monfs(): + hostname = socket.gethostname() + monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + if systemd(): + init_marker = '{}/systemd'.format(monfs) + else: + init_marker = '{}/upstart'.format(monfs) + if os.path.exists(monfs) and not os.path.exists(init_marker): + # Mark mon as managed by upstart so that + # it gets start correctly on reboots + with open(init_marker, 'w'): + pass + + +def maybe_zap_journal(journal_dev): + if is_osd_disk(journal_dev): + log('Looks like {} is already an OSD data' + ' or journal, skipping.'.format(journal_dev)) + return + zap_disk(journal_dev) + log("Zapped journal device {}".format(journal_dev)) + + +def get_partitions(dev): + cmd = ['partx', '--raw', '--noheadings', dev] + try: + out = subprocess.check_output(cmd).splitlines() + log("get partitions: {}".format(out), level=DEBUG) + return out + except subprocess.CalledProcessError as e: + log("Can't get info for {0}: {1}".format(dev, e.output)) + return [] + + +def find_least_used_journal(journal_devices): + usages = map(lambda a: (len(get_partitions(a)), a), journal_devices) + least = min(usages, key=lambda t: t[0]) + return least[1] + + +def osdize(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False, encrypt=False, bluestore=False): + if dev.startswith('/dev'): + osdize_dev(dev, osd_format, osd_journal, + reformat_osd, ignore_errors, encrypt, + bluestore) + else: + osdize_dir(dev, encrypt) + + +def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False, encrypt=False, bluestore=False): + if not os.path.exists(dev): + log('Path {} does not exist - bailing'.format(dev)) + return + + if not is_block_device(dev): + log('Path {} is not a block device - bailing'.format(dev)) + return + + if is_osd_disk(dev) and not reformat_osd: + log('Looks like {} is already an' + ' OSD data or journal, skipping.'.format(dev)) + return + + if is_device_mounted(dev): + log('Looks like {} is in use, skipping.'.format(dev)) + return + + status_set('maintenance', 'Initializing device {}'.format(dev)) + cmd = ['ceph-disk', 'prepare'] + # Later versions of ceph support more options + if cmp_pkgrevno('ceph', '0.60') >= 0: + if encrypt: + cmd.append('--dmcrypt') + if cmp_pkgrevno('ceph', '0.48.3') >= 0: + if osd_format: + cmd.append('--fs-type') + cmd.append(osd_format) + + if reformat_osd: + cmd.append('--zap-disk') + + # NOTE(jamespage): enable experimental bluestore support + if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: + cmd.append('--bluestore') + + cmd.append(dev) + + if osd_journal: + least_used = find_least_used_journal(osd_journal) + cmd.append(least_used) + else: + # Just provide the device - no other options + # for older versions of ceph + cmd.append(dev) + if reformat_osd: + zap_disk(dev) + + try: + log("osdize cmd: {}".format(cmd)) + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + if ignore_errors: + log('Unable to initialize device: {}'.format(dev), WARNING) + else: + log('Unable to initialize device: {}'.format(dev), ERROR) + raise + + +def osdize_dir(path, encrypt=False): + """Ask ceph-disk to prepare a directory to become an osd. + + :param path: str. The directory to osdize + :param encrypt: bool. Should the OSD directory be encrypted at rest + :returns: None + """ + if os.path.exists(os.path.join(path, 'upstart')): + log('Path {} is already configured as an OSD - bailing'.format(path)) + return + + if cmp_pkgrevno('ceph', "0.56.6") < 0: + log('Unable to use directories for OSDs with ceph < 0.56.6', + level=ERROR) + return + + mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) + chownr('/var/lib/ceph', ceph_user(), ceph_user()) + cmd = [ + 'sudo', '-u', ceph_user(), + 'ceph-disk', + 'prepare', + '--data-dir', + path + ] + if cmp_pkgrevno('ceph', '0.60') >= 0: + if encrypt: + cmd.append('--dmcrypt') + log("osdize dir cmd: {}".format(cmd)) + subprocess.check_call(cmd) + + +def filesystem_mounted(fs): + return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 + + +def get_running_osds(): + """Returns a list of the pids of the current running OSD daemons""" + cmd = ['pgrep', 'ceph-osd'] + try: + result = subprocess.check_output(cmd) + return result.split() + except subprocess.CalledProcessError: + return [] + + +def get_cephfs(service): + """List the Ceph Filesystems that exist. + + :param service: The service name to run the ceph command under + :returns: list. Returns a list of the ceph filesystems + """ + if get_version() < 0.86: + # This command wasn't introduced until 0.86 ceph + return [] + try: + output = subprocess.check_output(["ceph", '--id', service, "fs", "ls"]) + if not output: + return [] + """ + Example subprocess output: + 'name: ip-172-31-23-165, metadata pool: ip-172-31-23-165_metadata, + data pools: [ip-172-31-23-165_data ]\n' + output: filesystems: ['ip-172-31-23-165'] + """ + filesystems = [] + for line in output.splitlines(): + parts = line.split(',') + for part in parts: + if "name" in part: + filesystems.append(part.split(' ')[1]) + except subprocess.CalledProcessError: + return [] + + +def wait_for_all_monitors_to_upgrade(new_version, upgrade_key): + """Fairly self explanatory name. This function will wait + for all monitors in the cluster to upgrade or it will + return after a timeout period has expired. + + :param new_version: str of the version to watch + :param upgrade_key: the cephx key name to use + """ + done = False + start_time = time.time() + monitor_list = [] + + mon_map = get_mon_map('admin') + if mon_map['monmap']['mons']: + for mon in mon_map['monmap']['mons']: + monitor_list.append(mon['name']) + while not done: + try: + done = all(monitor_key_exists(upgrade_key, "{}_{}_{}_done".format( + "mon", mon, new_version + )) for mon in monitor_list) + current_time = time.time() + if current_time > (start_time + 10 * 60): + raise Exception + else: + # Wait 30 seconds and test again if all monitors are upgraded + time.sleep(30) + except subprocess.CalledProcessError: + raise + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +def roll_monitor_cluster(new_version, upgrade_key): + """This is tricky to get right so here's what we're going to do. + + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous monitor is upgraded yet. + + :param new_version: str of the version to upgrade to + :param upgrade_key: the cephx key name to use when upgrading + """ + log('roll_monitor_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + monitor_list = [] + mon_map = get_mon_map('admin') + if mon_map['monmap']['mons']: + for mon in mon_map['monmap']['mons']: + monitor_list.append(mon['name']) + else: + status_set('blocked', 'Unable to get monitor cluster information') + sys.exit(1) + log('monitor_list: {}'.format(monitor_list)) + + # A sorted list of osd unit names + mon_sorted_list = sorted(monitor_list) + + try: + position = mon_sorted_list.index(my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(upgrade_key=upgrade_key, + service='mon', + my_name=my_name, + version=new_version) + else: + # Check if the previous node has finished + status_set('waiting', + 'Waiting on {} to finish upgrading'.format( + mon_sorted_list[position - 1])) + wait_on_previous_node(upgrade_key=upgrade_key, + service='mon', + previous_node=mon_sorted_list[position - 1], + version=new_version) + lock_and_roll(upgrade_key=upgrade_key, + service='mon', + my_name=my_name, + version=new_version) + except ValueError: + log("Failed to find {} in list {}.".format( + my_name, mon_sorted_list)) + status_set('blocked', 'failed to upgrade monitor') + + +def upgrade_monitor(new_version): + """Upgrade the current ceph monitor to the new version + + :param new_version: String version to upgrade to. + """ + current_version = get_version() + status_set("maintenance", "Upgrading monitor") + log("Current ceph version is {}".format(current_version)) + log("Upgrading to: {}".format(new_version)) + + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph source failed with message: {}".format( + err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + try: + if systemd(): + for mon_id in get_local_mon_ids(): + service_stop('ceph-mon@{}'.format(mon_id)) + else: + service_stop('ceph-mon-all') + apt_install(packages=determine_packages(), fatal=True) + + # Ensure the files and directories under /var/lib/ceph is chowned + # properly as part of the move to the Jewel release, which moved the + # ceph daemons to running as ceph:ceph instead of root:root. + if new_version == 'jewel': + # Ensure the ownership of Ceph's directories is correct + owner = ceph_user() + chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), + owner=owner, + group=owner, + follow_links=True) + + if systemd(): + for mon_id in get_local_mon_ids(): + service_start('ceph-mon@{}'.format(mon_id)) + else: + service_start('ceph-mon-all') + except subprocess.CalledProcessError as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + +def lock_and_roll(upgrade_key, service, my_name, version): + """Create a lock on the ceph monitor cluster and upgrade. + + :param upgrade_key: str. The cephx key to use + :param service: str. The cephx id to use + :param my_name: str. The current hostname + :param version: str. The version we are upgrading to + """ + start_timestamp = time.time() + + log('monitor_key_set {}_{}_{}_start {}'.format( + service, + my_name, + version, + start_timestamp)) + monitor_key_set(upgrade_key, "{}_{}_{}_start".format( + service, my_name, version), start_timestamp) + log("Rolling") + + # This should be quick + if service == 'osd': + upgrade_osd(version) + elif service == 'mon': + upgrade_monitor(version) + else: + log("Unknown service {}. Unable to upgrade".format(service), + level=ERROR) + log("Done") + + stop_timestamp = time.time() + # Set a key to inform others I am finished + log('monitor_key_set {}_{}_{}_done {}'.format(service, + my_name, + version, + stop_timestamp)) + status_set('maintenance', 'Finishing upgrade') + monitor_key_set(upgrade_key, "{}_{}_{}_done".format(service, + my_name, + version), + stop_timestamp) + + +def wait_on_previous_node(upgrade_key, service, previous_node, version): + """A lock that sleeps the current thread while waiting for the previous + node to finish upgrading. + + :param upgrade_key: + :param service: str. the cephx id to use + :param previous_node: str. The name of the previous node to wait on + :param version: str. The version we are upgrading to + :returns: None + """ + log("Previous node is: {}".format(previous_node)) + + previous_node_finished = monitor_key_exists( + upgrade_key, + "{}_{}_{}_done".format(service, previous_node, version)) + + while previous_node_finished is False: + log("{} is not finished. Waiting".format(previous_node)) + # Has this node been trying to upgrade for longer than + # 10 minutes? + # If so then move on and consider that node dead. + + # NOTE: This assumes the clusters clocks are somewhat accurate + # If the hosts clock is really far off it may cause it to skip + # the previous node even though it shouldn't. + current_timestamp = time.time() + previous_node_start_time = monitor_key_get( + upgrade_key, + "{}_{}_{}_start".format(service, previous_node, version)) + if (current_timestamp - (10 * 60)) > previous_node_start_time: + # Previous node is probably dead. Lets move on + if previous_node_start_time is not None: + log( + "Waited 10 mins on node {}. current time: {} > " + "previous node start time: {} Moving on".format( + previous_node, + (current_timestamp - (10 * 60)), + previous_node_start_time)) + return + else: + # I have to wait. Sleep a random amount of time and then + # check if I can lock,upgrade and roll. + wait_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + previous_node_finished = monitor_key_exists( + upgrade_key, + "{}_{}_{}_done".format(service, previous_node, version)) + + +def get_upgrade_position(osd_sorted_list, match_name): + """Return the upgrade position for the given osd. + + :param osd_sorted_list: list. Osds sorted + :param match_name: str. The osd name to match + :returns: int. The position or None if not found + """ + for index, item in enumerate(osd_sorted_list): + if item.name == match_name: + return index + return None + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +# 2. This assumes that the osd failure domain is not set to osd. +# It rolls an entire server at a time. +def roll_osd_cluster(new_version, upgrade_key): + """This is tricky to get right so here's what we're going to do. + + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous osd is upgraded yet. + + TODO: If you're not in the same failure domain it's safe to upgrade + 1. Examine all pools and adopt the most strict failure domain policy + Example: Pool 1: Failure domain = rack + Pool 2: Failure domain = host + Pool 3: Failure domain = row + + outcome: Failure domain = host + + :param new_version: str of the version to upgrade to + :param upgrade_key: the cephx key name to use when upgrading + """ + log('roll_osd_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + osd_tree = get_osd_tree(service=upgrade_key) + # A sorted list of osd unit names + osd_sorted_list = sorted(osd_tree) + log("osd_sorted_list: {}".format(osd_sorted_list)) + + try: + position = get_upgrade_position(osd_sorted_list, my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(upgrade_key=upgrade_key, + service='osd', + my_name=my_name, + version=new_version) + else: + # Check if the previous node has finished + status_set('blocked', + 'Waiting on {} to finish upgrading'.format( + osd_sorted_list[position - 1].name)) + wait_on_previous_node( + upgrade_key=upgrade_key, + service='osd', + previous_node=osd_sorted_list[position - 1].name, + version=new_version) + lock_and_roll(upgrade_key=upgrade_key, + service='osd', + my_name=my_name, + version=new_version) + except ValueError: + log("Failed to find name {} in list {}".format( + my_name, osd_sorted_list)) + status_set('blocked', 'failed to upgrade osd') + + +def upgrade_osd(new_version): + """Upgrades the current osd + + :param new_version: str. The new version to upgrade to + """ + current_version = get_version() + status_set("maintenance", "Upgrading osd") + log("Current ceph version is {}".format(current_version)) + log("Upgrading to: {}".format(new_version)) + + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph sources failed with message: {}".format( + err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + try: + # Upgrade the packages before restarting the daemons. + status_set('maintenance', 'Upgrading packages to %s' % new_version) + apt_install(packages=determine_packages(), fatal=True) + + # If the upgrade does not need an ownership update of any of the + # directories in the osd service directory, then simply restart + # all of the OSDs at the same time as this will be the fastest + # way to update the code on the node. + if not dirs_need_ownership_update('osd'): + log('Restarting all OSDs to load new binaries', DEBUG) + service_restart('ceph-osd-all') + return + + # Need to change the ownership of all directories which are not OSD + # directories as well. + # TODO - this should probably be moved to the general upgrade function + # and done before mon/osd. + update_owner(CEPH_BASE_DIR, recurse_dirs=False) + non_osd_dirs = filter(lambda x: not x == 'osd', + os.listdir(CEPH_BASE_DIR)) + non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x), + non_osd_dirs) + for path in non_osd_dirs: + update_owner(path) + + # Fast service restart wasn't an option because each of the OSD + # directories need the ownership updated for all the files on + # the OSD. Walk through the OSDs one-by-one upgrading the OSD. + for osd_dir in _get_child_dirs(OSD_BASE_DIR): + try: + osd_num = _get_osd_num_from_dirname(osd_dir) + _upgrade_single_osd(osd_num, osd_dir) + except ValueError as ex: + # Directory could not be parsed - junk directory? + log('Could not parse osd directory %s: %s' % (osd_dir, ex), + WARNING) + continue + + except (subprocess.CalledProcessError, IOError) as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + +def _upgrade_single_osd(osd_num, osd_dir): + """Upgrades the single OSD directory. + + :param osd_num: the num of the OSD + :param osd_dir: the directory of the OSD to upgrade + :raises CalledProcessError: if an error occurs in a command issued as part + of the upgrade process + :raises IOError: if an error occurs reading/writing to a file as part + of the upgrade process + """ + stop_osd(osd_num) + disable_osd(osd_num) + update_owner(osd_dir) + enable_osd(osd_num) + start_osd(osd_num) + + +def stop_osd(osd_num): + """Stops the specified OSD number. + + :param osd_num: the osd number to stop + """ + if systemd(): + service_stop('ceph-osd@{}'.format(osd_num)) + else: + service_stop('ceph-osd', id=osd_num) + + +def start_osd(osd_num): + """Starts the specified OSD number. + + :param osd_num: the osd number to start. + """ + if systemd(): + service_start('ceph-osd@{}'.format(osd_num)) + else: + service_start('ceph-osd', id=osd_num) + + +def disable_osd(osd_num): + """Disables the specified OSD number. + + Ensures that the specified osd will not be automatically started at the + next reboot of the system. Due to differences between init systems, + this method cannot make any guarantees that the specified osd cannot be + started manually. + + :param osd_num: the osd id which should be disabled. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + to disable the OSD + :raises IOError, OSError: if the attempt to read/remove the ready file in + an upstart enabled system fails + """ + if systemd(): + # When running under systemd, the individual ceph-osd daemons run as + # templated units and can be directly addressed by referring to the + # templated service name ceph-osd@. Additionally, systemd + # allows one to disable a specific templated unit by running the + # 'systemctl disable ceph-osd@' command. When disabled, the + # OSD should remain disabled until re-enabled via systemd. + # Note: disabling an already disabled service in systemd returns 0, so + # no need to check whether it is enabled or not. + cmd = ['systemctl', 'disable', 'ceph-osd@{}'.format(osd_num)] + subprocess.check_call(cmd) + else: + # Neither upstart nor the ceph-osd upstart script provides for + # disabling the starting of an OSD automatically. The specific OSD + # cannot be prevented from running manually, however it can be + # prevented from running automatically on reboot by removing the + # 'ready' file in the OSD's root directory. This is due to the + # ceph-osd-all upstart script checking for the presence of this file + # before starting the OSD. + ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), + 'ready') + if os.path.exists(ready_file): + os.unlink(ready_file) + + +def enable_osd(osd_num): + """Enables the specified OSD number. + + Ensures that the specified osd_num will be enabled and ready to start + automatically in the event of a reboot. + + :param osd_num: the osd id which should be enabled. + :raises CalledProcessError: if the call to the systemd command issued + fails when enabling the service + :raises IOError: if the attempt to write the ready file in an usptart + enabled system fails + """ + if systemd(): + cmd = ['systemctl', 'enable', 'ceph-osd@{}'.format(osd_num)] + subprocess.check_call(cmd) + else: + # When running on upstart, the OSDs are started via the ceph-osd-all + # upstart script which will only start the osd if it has a 'ready' + # file. Make sure that file exists. + ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), + 'ready') + with open(ready_file, 'w') as f: + f.write('ready') + + # Make sure the correct user owns the file. It shouldn't be necessary + # as the upstart script should run with root privileges, but its better + # to have all the files matching ownership. + update_owner(ready_file) + + +def update_owner(path, recurse_dirs=True): + """Changes the ownership of the specified path. + + Changes the ownership of the specified path to the new ceph daemon user + using the system's native chown functionality. This may take awhile, + so this method will issue a set_status for any changes of ownership which + recurses into directory structures. + + :param path: the path to recursively change ownership for + :param recurse_dirs: boolean indicating whether to recursively change the + ownership of all the files in a path's subtree or to + simply change the ownership of the path. + :raises CalledProcessError: if an error occurs issuing the chown system + command + """ + user = ceph_user() + user_group = '{ceph_user}:{ceph_user}'.format(ceph_user=user) + cmd = ['chown', user_group, path] + if os.path.isdir(path) and recurse_dirs: + status_set('maintenance', ('Updating ownership of %s to %s' % + (path, user))) + cmd.insert(1, '-R') + + log('Changing ownership of {path} to {user}'.format( + path=path, user=user_group), DEBUG) + start = datetime.now() + subprocess.check_call(cmd) + elapsed_time = (datetime.now() - start) + + log('Took {secs} seconds to change the ownership of path: {path}'.format( + secs=elapsed_time.total_seconds(), path=path), DEBUG) + + +def list_pools(service): + """This will list the current pools that Ceph has + + :param service: String service id to run under + :returns: list. Returns a list of the ceph pools. + :raises: CalledProcessError if the subprocess fails to run. + """ + try: + pool_list = [] + pools = subprocess.check_output(['rados', '--id', service, 'lspools']) + for pool in pools.splitlines(): + pool_list.append(pool) + return pool_list + except subprocess.CalledProcessError as err: + log("rados lspools failed with error: {}".format(err.output)) + raise + + +def dirs_need_ownership_update(service): + """Determines if directories still need change of ownership. + + Examines the set of directories under the /var/lib/ceph/{service} directory + and determines if they have the correct ownership or not. This is + necessary due to the upgrade from Hammer to Jewel where the daemon user + changes from root: to ceph:. + + :param service: the name of the service folder to check (e.g. osd, mon) + :returns: boolean. True if the directories need a change of ownership, + False otherwise. + :raises IOError: if an error occurs reading the file stats from one of + the child directories. + :raises OSError: if the specified path does not exist or some other error + """ + expected_owner = expected_group = ceph_user() + path = os.path.join(CEPH_BASE_DIR, service) + for child in _get_child_dirs(path): + curr_owner, curr_group = owner(child) + + if (curr_owner == expected_owner) and (curr_group == expected_group): + continue + + log('Directory "%s" needs its ownership updated' % child, DEBUG) + return True + + # All child directories had the expected ownership + return False + +# A dict of valid ceph upgrade paths. Mapping is old -> new +UPGRADE_PATHS = { + 'firefly': 'hammer', + 'hammer': 'jewel', +} + +# Map UCA codenames to ceph codenames +UCA_CODENAME_MAP = { + 'icehouse': 'firefly', + 'juno': 'firefly', + 'kilo': 'hammer', + 'liberty': 'hammer', + 'mitaka': 'jewel', + 'newton': 'jewel', + 'ocata': 'jewel', +} + + +def pretty_print_upgrade_paths(): + """Pretty print supported upgrade paths for ceph""" + lines = [] + for key, value in UPGRADE_PATHS.iteritems(): + lines.append("{} -> {}".format(key, value)) + return lines + + +def resolve_ceph_version(source): + """Resolves a version of ceph based on source configuration + based on Ubuntu Cloud Archive pockets. + + @param: source: source configuration option of charm + :returns: ceph release codename or None if not resolvable + """ + os_release = get_os_codename_install_source(source) + return UCA_CODENAME_MAP.get(os_release) + + +def get_ceph_pg_stat(): + """Returns the result of ceph pg stat. + + :returns: dict + """ + try: + tree = subprocess.check_output(['ceph', 'pg', 'stat', '--format=json']) + try: + json_tree = json.loads(tree) + if not json_tree['num_pg_by_state']: + return None + return json_tree + except ValueError as v: + log("Unable to parse ceph pg stat json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph pg stat command failed with message: {}".format( + e.message)) + raise + + +def get_ceph_health(): + """Returns the health of the cluster from a 'ceph status' + + :returns: dict tree of ceph status + :raises: CalledProcessError if our ceph command fails to get the overall + status, use get_ceph_health()['overall_status']. + """ + try: + tree = subprocess.check_output( + ['ceph', 'status', '--format=json']) + try: + json_tree = json.loads(tree) + # Make sure children are present in the json + if not json_tree['overall_status']: + return None + + return json_tree + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph status command failed with message: {}".format( + e.message)) + raise + + +def reweight_osd(osd_num, new_weight): + """Changes the crush weight of an OSD to the value specified. + + :param osd_num: the osd id which should be changed + :param new_weight: the new weight for the OSD + :returns: bool. True if output looks right, else false. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + """ + try: + cmd_result = subprocess.check_output( + ['ceph', 'osd', 'crush', 'reweight', "osd.{}".format(osd_num), + new_weight], stderr=subprocess.STDOUT) + expected_result = "reweighted item id {ID} name \'osd.{ID}\'".format( + ID=osd_num) + " to {}".format(new_weight) + log(cmd_result) + if expected_result in cmd_result: + return True + return False + except subprocess.CalledProcessError as e: + log("ceph osd crush reweight command failed with message: {}".format( + e.message)) + raise + + +def determine_packages(): + """Determines packages for installation. + + :returns: list of ceph packages + """ + if is_container(): + PACKAGES.remove('ntp') + + return PACKAGES + + +def bootstrap_manager(): + hostname = socket.gethostname() + path = '/var/lib/ceph/mgr/ceph-{}'.format(hostname) + keyring = os.path.join(path, 'keyring') + + if os.path.exists(keyring): + log('bootstrap_manager: mgr already initialized.') + else: + mkdir(path, owner=ceph_user(), group=ceph_user()) + subprocess.check_call(['ceph', 'auth', 'get-or-create', + 'mgr.{}'.format(hostname), 'mon', + 'allow profile mgr', 'osd', 'allow *', + 'mds', 'allow *', '--out-file', + keyring]) + chownr(path, ceph_user(), ceph_user()) + + unit = 'ceph-mgr@{}'.format(hostname) + subprocess.check_call(['systemctl', 'enable', unit]) + service_restart(unit) diff --git a/ceph-osd/lib/setup.py b/ceph-osd/lib/setup.py deleted file mode 100644 index 139c80d6..00000000 --- a/ceph-osd/lib/setup.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import print_function - -import os -import sys -from setuptools import setup, find_packages -from setuptools.command.test import test as TestCommand - -version = "0.0.1.dev1" -install_require = [ -] - -tests_require = [ - 'tox >= 2.3.1', -] - - -class Tox(TestCommand): - - user_options = [('tox-args=', 'a', "Arguments to pass to tox")] - - def initialize_options(self): - TestCommand.initialize_options(self) - self.tox_args = None - - def finalize_options(self): - TestCommand.finalize_options(self) - self.test_args = [] - self.test_suite = True - - def run_tests(self): - # import here, cause outside the eggs aren't loaded - import tox - import shlex - args = self.tox_args - # remove the 'test' arg from argv as tox passes it to ostestr which - # breaks it. - sys.argv.pop() - if args: - args = shlex.split(self.tox_args) - errno = tox.cmdline(args=args) - sys.exit(errno) - - -if sys.argv[-1] == 'publish': - os.system("python setup.py sdist upload") - os.system("python setup.py bdist_wheel upload") - sys.exit() - - -if sys.argv[-1] == 'tag': - os.system("git tag -a %s -m 'version %s'" % (version, version)) - os.system("git push --tags") - sys.exit() - - -setup( - name='charms.ceph', - version=version, - description='Provide base module for ceph charms.', - classifiers=[ - "Development Status :: 2 - Pre-Alpha", - "Intended Audience :: Developers", - "Topic :: System", - "Topic :: System :: Installation/Setup", - "Topic :: System :: Software Distribution", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", - "License :: OSI Approved :: Apache Software License", - ], - url='https://github.com/openstack/charms.ceph', - author='OpenStack Charmers', - author_email='openstack-dev@lists.openstack.org', - license='Apache-2.0: http://www.apache.org/licenses/LICENSE-2.0', - packages=find_packages(exclude=["unit_tests"]), - zip_safe=False, - cmdclass={'test': Tox}, - install_requires=install_require, - extras_require={ - 'testing': tests_require, - }, - tests_require=tests_require, -) diff --git a/ceph-osd/unit_tests/test_replace_osd.py b/ceph-osd/unit_tests/test_replace_osd.py index c74d10e1..d8494a01 100644 --- a/ceph-osd/unit_tests/test_replace_osd.py +++ b/ceph-osd/unit_tests/test_replace_osd.py @@ -18,7 +18,7 @@ from mock import call, Mock, patch import test_utils -import ceph +import ceph.utils as ceph import replace_osd TO_PATCH = [ @@ -73,13 +73,13 @@ def test_umount(self): ]) assert ret == 0 - @patch('ceph.mounts') - @patch('ceph.check_output') - @patch('ceph.umount') - @patch('ceph.osdize') - @patch('ceph.shutil') - @patch('ceph.systemd') - @patch('ceph.ceph_user') + @patch.object(ceph, 'mounts') + @patch.object(ceph.subprocess, 'check_output') + @patch.object(ceph, 'umount') + @patch.object(ceph, 'osdize') + @patch.object(ceph, 'shutil') + @patch.object(ceph, 'systemd') + @patch.object(ceph, 'ceph_user') def test_replace_osd(self, ceph_user, systemd, shutil, osdize, umount, check_output, mounts): ceph_user.return_value = "ceph" diff --git a/ceph-osd/unit_tests/test_tuning.py b/ceph-osd/unit_tests/test_tuning.py index 1a403448..b7d62c6c 100644 --- a/ceph-osd/unit_tests/test_tuning.py +++ b/ceph-osd/unit_tests/test_tuning.py @@ -1,7 +1,7 @@ __author__ = 'Chris Holcombe ' from mock import patch, call import test_utils -import ceph +import ceph.utils as ceph TO_PATCH = [ 'hookenv', @@ -14,7 +14,7 @@ class PerformanceTestCase(test_utils.CharmTestCase): def setUp(self): super(PerformanceTestCase, self).setUp(ceph, TO_PATCH) - @patch.object(ceph, 'check_output') + @patch.object(ceph.subprocess, 'check_output') @patch.object(ceph, 'get_link_speed') @patch.object(ceph, 'save_sysctls') def test_tune_nic(self, save_sysctls, get_link_speed, check_output): @@ -42,19 +42,19 @@ def test_tune_nic(self, save_sysctls, get_link_speed, check_output): call('maintenance', 'Tuning device eth0'), ]) - @patch('ceph.check_output') + @patch.object(ceph.subprocess, 'check_output') def test_get_block_uuid(self, check_output): check_output.return_value = \ 'UUID=378f3c86-b21a-4172-832d-e2b3d4bc7511\nTYPE=ext2\n' uuid = ceph.get_block_uuid('/dev/sda1') self.assertEqual(uuid, '378f3c86-b21a-4172-832d-e2b3d4bc7511') - @patch('ceph.persist_settings') - @patch('ceph.set_hdd_read_ahead') - @patch('ceph.get_max_sectors_kb') - @patch('ceph.get_max_hw_sectors_kb') - @patch('ceph.set_max_sectors_kb') - @patch('ceph.get_block_uuid') + @patch.object(ceph, 'persist_settings') + @patch.object(ceph, 'set_hdd_read_ahead') + @patch.object(ceph, 'get_max_sectors_kb') + @patch.object(ceph, 'get_max_hw_sectors_kb') + @patch.object(ceph, 'set_max_sectors_kb') + @patch.object(ceph, 'get_block_uuid') def test_tune_dev(self, block_uuid, set_max_sectors_kb, @@ -84,12 +84,12 @@ def test_tune_dev(self, call('maintenance', 'Finished tuning device /dev/sda') ]) - @patch('ceph.persist_settings') - @patch('ceph.set_hdd_read_ahead') - @patch('ceph.get_max_sectors_kb') - @patch('ceph.get_max_hw_sectors_kb') - @patch('ceph.set_max_sectors_kb') - @patch('ceph.get_block_uuid') + @patch.object(ceph, 'persist_settings') + @patch.object(ceph, 'set_hdd_read_ahead') + @patch.object(ceph, 'get_max_sectors_kb') + @patch.object(ceph, 'get_max_hw_sectors_kb') + @patch.object(ceph, 'set_max_sectors_kb') + @patch.object(ceph, 'get_block_uuid') def test_tune_dev_2(self, block_uuid, set_max_sectors_kb, @@ -118,7 +118,7 @@ def test_tune_dev_2(self, call('maintenance', 'Finished tuning device /dev/sda') ]) - @patch('ceph.check_output') + @patch.object(ceph.subprocess, 'check_output') def test_set_hdd_read_ahead(self, check_output): ceph.set_hdd_read_ahead(dev_name='/dev/sda') check_output.assert_called_with( From fc964836a104c09b3b5197fb3257d01d013bff52 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 14 Aug 2017 14:50:39 +0100 Subject: [PATCH 1347/2699] Sync charms.ceph to get code cleanup changes Also had to fix some imports due to changes implemented as part of the cleanup. Change-Id: Ibe20386e4c8a19949c00c45d332936ed32a971fa --- ceph-mon/hooks/ceph_hooks.py | 4 +- ceph-mon/lib/__init__.py | 0 ceph-mon/lib/ceph/__init__.py | 2157 ---------------- .../lib/ceph/{ceph_broker.py => broker.py} | 115 +- ceph-mon/lib/ceph/ceph_helpers.py | 1557 ------------ ceph-mon/lib/ceph/crush_utils.py | 149 ++ ceph-mon/lib/ceph/utils.py | 2199 +++++++++++++++++ ceph-mon/lib/setup.py | 85 - ceph-mon/unit_tests/test_ceph_ops.py | 84 +- 9 files changed, 2486 insertions(+), 3864 deletions(-) delete mode 100644 ceph-mon/lib/__init__.py rename ceph-mon/lib/ceph/{ceph_broker.py => broker.py} (88%) delete mode 100644 ceph-mon/lib/ceph/ceph_helpers.py create mode 100644 ceph-mon/lib/ceph/crush_utils.py create mode 100644 ceph-mon/lib/ceph/utils.py delete mode 100644 ceph-mon/lib/setup.py diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index bbc45e8b..8b7a9c97 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -21,8 +21,8 @@ import uuid sys.path.append('lib') -import ceph -from ceph.ceph_broker import ( +import ceph.utils as ceph +from ceph.broker import ( process_requests ) diff --git a/ceph-mon/lib/__init__.py b/ceph-mon/lib/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/ceph/__init__.py index 6aaf4fae..e69de29b 100644 --- a/ceph-mon/lib/ceph/__init__.py +++ b/ceph-mon/lib/ceph/__init__.py @@ -1,2157 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import ctypes -import collections -import json -import random -import socket -import subprocess -import time -import os -import re -import sys -import errno -import shutil -import pyudev - -from datetime import datetime - -from charmhelpers.core import hookenv -from charmhelpers.core import templating -from charmhelpers.core.host import ( - chownr, - cmp_pkgrevno, - lsb_release, - mkdir, - mounts, - owner, - service_restart, - service_start, - service_stop, - CompareHostReleases, - is_container, -) -from charmhelpers.core.hookenv import ( - cached, - config, - log, - status_set, - DEBUG, - ERROR, - WARNING, -) -from charmhelpers.fetch import ( - apt_cache, - add_source, apt_install, apt_update) -from charmhelpers.contrib.storage.linux.ceph import ( - monitor_key_set, - monitor_key_exists, - monitor_key_get, - get_mon_map, -) -from charmhelpers.contrib.storage.linux.utils import ( - is_block_device, - zap_disk, - is_device_mounted, -) -from charmhelpers.contrib.openstack.utils import ( - get_os_codename_install_source, -) - -from ceph.ceph_helpers import check_output - -CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph') -OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd') -HDPARM_FILE = os.path.join(os.sep, 'etc', 'hdparm.conf') - -LEADER = 'leader' -PEON = 'peon' -QUORUM = [LEADER, PEON] - -PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', - 'radosgw', 'xfsprogs', 'python-pyudev'] - -LinkSpeed = { - "BASE_10": 10, - "BASE_100": 100, - "BASE_1000": 1000, - "GBASE_10": 10000, - "GBASE_40": 40000, - "GBASE_100": 100000, - "UNKNOWN": None -} - -# Mapping of adapter speed to sysctl settings -NETWORK_ADAPTER_SYSCTLS = { - # 10Gb - LinkSpeed["GBASE_10"]: { - 'net.core.rmem_default': 524287, - 'net.core.wmem_default': 524287, - 'net.core.rmem_max': 524287, - 'net.core.wmem_max': 524287, - 'net.core.optmem_max': 524287, - 'net.core.netdev_max_backlog': 300000, - 'net.ipv4.tcp_rmem': '10000000 10000000 10000000', - 'net.ipv4.tcp_wmem': '10000000 10000000 10000000', - 'net.ipv4.tcp_mem': '10000000 10000000 10000000' - }, - # Mellanox 10/40Gb - LinkSpeed["GBASE_40"]: { - 'net.ipv4.tcp_timestamps': 0, - 'net.ipv4.tcp_sack': 1, - 'net.core.netdev_max_backlog': 250000, - 'net.core.rmem_max': 4194304, - 'net.core.wmem_max': 4194304, - 'net.core.rmem_default': 4194304, - 'net.core.wmem_default': 4194304, - 'net.core.optmem_max': 4194304, - 'net.ipv4.tcp_rmem': '4096 87380 4194304', - 'net.ipv4.tcp_wmem': '4096 65536 4194304', - 'net.ipv4.tcp_low_latency': 1, - 'net.ipv4.tcp_adv_win_scale': 1 - } -} - - -class Partition(object): - def __init__(self, name, number, size, start, end, sectors, uuid): - """ - A block device partition - :param name: Name of block device - :param number: Partition number - :param size: Capacity of the device - :param start: Starting block - :param end: Ending block - :param sectors: Number of blocks - :param uuid: UUID of the partition - """ - self.name = name, - self.number = number - self.size = size - self.start = start - self.end = end - self.sectors = sectors - self.uuid = uuid - - def __str__(self): - return "number: {} start: {} end: {} sectors: {} size: {} " \ - "name: {} uuid: {}".format(self.number, self.start, - self.end, - self.sectors, self.size, - self.name, self.uuid) - - def __eq__(self, other): - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - return not self.__eq__(other) - - -def unmounted_disks(): - """List of unmounted block devices on the current host.""" - disks = [] - context = pyudev.Context() - for device in context.list_devices(DEVTYPE='disk'): - if device['SUBSYSTEM'] == 'block': - matched = False - for block_type in [u'dm', u'loop', u'ram', u'nbd']: - if block_type in device.device_node: - matched = True - if matched: - continue - disks.append(device.device_node) - log("Found disks: {}".format(disks)) - return [disk for disk in disks if not is_device_mounted(disk)] - - -def save_sysctls(sysctl_dict, save_location): - """ - Persist the sysctls to the hard drive. - :param sysctl_dict: dict - :param save_location: path to save the settings to - :raise: IOError if anything goes wrong with writing. - """ - try: - # Persist the settings for reboots - with open(save_location, "w") as fd: - for key, value in sysctl_dict.items(): - fd.write("{}={}\n".format(key, value)) - - except IOError as e: - log("Unable to persist sysctl settings to {}. Error {}".format( - save_location, e.message), level=ERROR) - raise - - -def tune_nic(network_interface): - """ - This will set optimal sysctls for the particular network adapter. - :param network_interface: string The network adapter name. - """ - speed = get_link_speed(network_interface) - if speed in NETWORK_ADAPTER_SYSCTLS: - status_set('maintenance', 'Tuning device {}'.format( - network_interface)) - sysctl_file = os.path.join( - os.sep, - 'etc', - 'sysctl.d', - '51-ceph-osd-charm-{}.conf'.format(network_interface)) - try: - log("Saving sysctl_file: {} values: {}".format( - sysctl_file, NETWORK_ADAPTER_SYSCTLS[speed]), - level=DEBUG) - save_sysctls(sysctl_dict=NETWORK_ADAPTER_SYSCTLS[speed], - save_location=sysctl_file) - except IOError as e: - log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " - "failed. {}".format(network_interface, e.message), - level=ERROR) - - try: - # Apply the settings - log("Applying sysctl settings", level=DEBUG) - check_output(["sysctl", "-p", sysctl_file]) - except subprocess.CalledProcessError as err: - log('sysctl -p {} failed with error {}'.format(sysctl_file, - err.output), - level=ERROR) - else: - log("No settings found for network adapter: {}".format( - network_interface), level=DEBUG) - - -def get_link_speed(network_interface): - """ - This will find the link speed for a given network device. Returns None - if an error occurs. - :param network_interface: string The network adapter interface. - :return: LinkSpeed - """ - speed_path = os.path.join(os.sep, 'sys', 'class', 'net', - network_interface, 'speed') - # I'm not sure where else we'd check if this doesn't exist - if not os.path.exists(speed_path): - return LinkSpeed["UNKNOWN"] - - try: - with open(speed_path, 'r') as sysfs: - nic_speed = sysfs.readlines() - - # Did we actually read anything? - if not nic_speed: - return LinkSpeed["UNKNOWN"] - - # Try to find a sysctl match for this particular speed - for name, speed in LinkSpeed.items(): - if speed == int(nic_speed[0].strip()): - return speed - # Default to UNKNOWN if we can't find a match - return LinkSpeed["UNKNOWN"] - except IOError as e: - log("Unable to open {path} because of error: {error}".format( - path=speed_path, - error=e.message), level='error') - return LinkSpeed["UNKNOWN"] - - -def persist_settings(settings_dict): - # Write all settings to /etc/hdparm.conf - """ - This will persist the hard drive settings to the /etc/hdparm.conf file - The settings_dict should be in the form of {"uuid": {"key":"value"}} - :param settings_dict: dict of settings to save - """ - if not settings_dict: - return - - try: - templating.render(source='hdparm.conf', target=HDPARM_FILE, - context=settings_dict) - except IOError as err: - log("Unable to open {path} because of error: {error}".format( - path=HDPARM_FILE, error=err.message), level=ERROR) - except Exception as e: - # The templating.render can raise a jinja2 exception if the - # template is not found. Rather than polluting the import - # space of this charm, simply catch Exception - log('Unable to render {path} due to error: {error}'.format( - path=HDPARM_FILE, error=e.message), level=ERROR) - - -def set_max_sectors_kb(dev_name, max_sectors_size): - """ - This function sets the max_sectors_kb size of a given block device. - :param dev_name: Name of the block device to query - :param max_sectors_size: int of the max_sectors_size to save - """ - max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_sectors_kb') - try: - with open(max_sectors_kb_path, 'w') as f: - f.write(max_sectors_size) - except IOError as e: - log('Failed to write max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e.message), level=ERROR) - - -def get_max_sectors_kb(dev_name): - """ - This function gets the max_sectors_kb size of a given block device. - :param dev_name: Name of the block device to query - :return: int which is either the max_sectors_kb or 0 on error. - """ - max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_sectors_kb') - - # Read in what Linux has set by default - if os.path.exists(max_sectors_kb_path): - try: - with open(max_sectors_kb_path, 'r') as f: - max_sectors_kb = f.read().strip() - return int(max_sectors_kb) - except IOError as e: - log('Failed to read max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e.message), level=ERROR) - # Bail. - return 0 - return 0 - - -def get_max_hw_sectors_kb(dev_name): - """ - This function gets the max_hw_sectors_kb for a given block device. - :param dev_name: Name of the block device to query - :return: int which is either the max_hw_sectors_kb or 0 on error. - """ - max_hw_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_hw_sectors_kb') - # Read in what the hardware supports - if os.path.exists(max_hw_sectors_kb_path): - try: - with open(max_hw_sectors_kb_path, 'r') as f: - max_hw_sectors_kb = f.read().strip() - return int(max_hw_sectors_kb) - except IOError as e: - log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( - max_hw_sectors_kb_path, e.message), level=ERROR) - return 0 - return 0 - - -def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): - """ - This function sets the hard drive read ahead. - :param dev_name: Name of the block device to set read ahead on. - :param read_ahead_sectors: int How many sectors to read ahead. - """ - try: - # Set the read ahead sectors to 256 - log('Setting read ahead to {} for device {}'.format( - read_ahead_sectors, - dev_name)) - check_output(['hdparm', - '-a{}'.format(read_ahead_sectors), - dev_name]) - except subprocess.CalledProcessError as e: - log('hdparm failed with error: {}'.format(e.output), - level=ERROR) - - -def get_block_uuid(block_dev): - """ - This queries blkid to get the uuid for a block device. - :param block_dev: Name of the block device to query. - :return: The UUID of the device or None on Error. - """ - try: - block_info = check_output( - ['blkid', '-o', 'export', block_dev]) - for tag in block_info.split('\n'): - parts = tag.split('=') - if parts[0] == 'UUID': - return parts[1] - return None - except subprocess.CalledProcessError as err: - log('get_block_uuid failed with error: {}'.format(err.output), - level=ERROR) - return None - - -def check_max_sectors(save_settings_dict, - block_dev, - uuid): - """ - Tune the max_hw_sectors if needed. - make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb or at - least 1MB for spinning disks - If the box has a RAID card with cache this could go much bigger. - :param save_settings_dict: The dict used to persist settings - :param block_dev: A block device name: Example: /dev/sda - :param uuid: The uuid of the block device - """ - dev_name = None - path_parts = os.path.split(block_dev) - if len(path_parts) == 2: - dev_name = path_parts[1] - else: - log('Unable to determine the block device name from path: {}'.format( - block_dev)) - # Play it safe and bail - return - max_sectors_kb = get_max_sectors_kb(dev_name=dev_name) - max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name) - - if max_sectors_kb < max_hw_sectors_kb: - # OK we have a situation where the hardware supports more than Linux is - # currently requesting - config_max_sectors_kb = hookenv.config('max-sectors-kb') - if config_max_sectors_kb < max_hw_sectors_kb: - # Set the max_sectors_kb to the config.yaml value if it is less - # than the max_hw_sectors_kb - log('Setting max_sectors_kb for device {} to {}'.format( - dev_name, config_max_sectors_kb)) - save_settings_dict[ - "drive_settings"][uuid][ - "read_ahead_sect"] = config_max_sectors_kb - set_max_sectors_kb(dev_name=dev_name, - max_sectors_size=config_max_sectors_kb) - else: - # Set to the max_hw_sectors_kb - log('Setting max_sectors_kb for device {} to {}'.format( - dev_name, max_hw_sectors_kb)) - save_settings_dict[ - "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb - set_max_sectors_kb(dev_name=dev_name, - max_sectors_size=max_hw_sectors_kb) - else: - log('max_sectors_kb match max_hw_sectors_kb. No change needed for ' - 'device: {}'.format(block_dev)) - - -def tune_dev(block_dev): - """ - Try to make some intelligent decisions with HDD tuning. Future work will - include optimizing SSDs. - This function will change the read ahead sectors and the max write - sectors for each block device. - :param block_dev: A block device name: Example: /dev/sda - """ - uuid = get_block_uuid(block_dev) - if uuid is None: - log('block device {} uuid is None. Unable to save to ' - 'hdparm.conf'.format(block_dev), level=DEBUG) - return - save_settings_dict = {} - log('Tuning device {}'.format(block_dev)) - status_set('maintenance', 'Tuning device {}'.format(block_dev)) - set_hdd_read_ahead(block_dev) - save_settings_dict["drive_settings"] = {} - save_settings_dict["drive_settings"][uuid] = {} - save_settings_dict["drive_settings"][uuid]['read_ahead_sect'] = 256 - - check_max_sectors(block_dev=block_dev, - save_settings_dict=save_settings_dict, - uuid=uuid) - - persist_settings(settings_dict=save_settings_dict) - status_set('maintenance', 'Finished tuning device {}'.format(block_dev)) - - -def ceph_user(): - if get_version() > 1: - return 'ceph' - else: - return "root" - - -class CrushLocation(object): - def __init__(self, - name, - identifier, - host, - rack, - row, - datacenter, - chassis, - root): - self.name = name - self.identifier = identifier - self.host = host - self.rack = rack - self.row = row - self.datacenter = datacenter - self.chassis = chassis - self.root = root - - def __str__(self): - return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ - "chassis :{} root: {}".format(self.name, self.identifier, - self.host, self.rack, self.row, - self.datacenter, self.chassis, - self.root) - - def __eq__(self, other): - return not self.name < other.name and not other.name < self.name - - def __ne__(self, other): - return self.name < other.name or other.name < self.name - - def __gt__(self, other): - return self.name > other.name - - def __ge__(self, other): - return not self.name < other.name - - def __le__(self, other): - return self.name < other.name - - -def get_osd_weight(osd_id): - """ - Returns the weight of the specified OSD - :return: Float :raise: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails - """ - try: - tree = check_output( - ['ceph', 'osd', 'tree', '--format=json']) - try: - json_tree = json.loads(tree) - # Make sure children are present in the json - if not json_tree['nodes']: - return None - for device in json_tree['nodes']: - if device['type'] == 'osd' and device['name'] == osd_id: - return device['crush_weight'] - except ValueError as v: - log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v.message)) - raise - except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( - e.message)) - raise - - -def get_osd_tree(service): - """ - Returns the current osd map in JSON. - :return: List. :raise: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails - """ - try: - tree = check_output( - ['ceph', '--id', service, - 'osd', 'tree', '--format=json']) - try: - json_tree = json.loads(tree) - crush_list = [] - # Make sure children are present in the json - if not json_tree['nodes']: - return None - child_ids = json_tree['nodes'][0]['children'] - for child in json_tree['nodes']: - if child['id'] in child_ids: - crush_list.append( - CrushLocation( - name=child.get('name'), - identifier=child['id'], - host=child.get('host'), - rack=child.get('rack'), - row=child.get('row'), - datacenter=child.get('datacenter'), - chassis=child.get('chassis'), - root=child.get('root') - ) - ) - return crush_list - except ValueError as v: - log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v.message)) - raise - except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( - e.message)) - raise - - -def _get_child_dirs(path): - """Returns a list of directory names in the specified path. - - :param path: a full path listing of the parent directory to return child - directory names - :return: list. A list of child directories under the parent directory - :raises: ValueError if the specified path does not exist or is not a - directory, - OSError if an error occurs reading the directory listing - """ - if not os.path.exists(path): - raise ValueError('Specfied path "%s" does not exist' % path) - if not os.path.isdir(path): - raise ValueError('Specified path "%s" is not a directory' % path) - - files_in_dir = [os.path.join(path, f) for f in os.listdir(path)] - return list(filter(os.path.isdir, files_in_dir)) - - -def _get_osd_num_from_dirname(dirname): - """Parses the dirname and returns the OSD id. - - Parses a string in the form of 'ceph-{osd#}' and returns the osd number - from the directory name. - - :param dirname: the directory name to return the OSD number from - :return int: the osd number the directory name corresponds to - :raises ValueError: if the osd number cannot be parsed from the provided - directory name. - """ - match = re.search('ceph-(?P\d+)', dirname) - if not match: - raise ValueError("dirname not in correct format: %s" % dirname) - - return match.group('osd_id') - - -def get_local_osd_ids(): - """ - This will list the /var/lib/ceph/osd/* directories and try - to split the ID off of the directory name and return it in - a list - - :return: list. A list of osd identifiers :raise: OSError if - something goes wrong with listing the directory. - """ - osd_ids = [] - osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') - if os.path.exists(osd_path): - try: - dirs = os.listdir(osd_path) - for osd_dir in dirs: - osd_id = osd_dir.split('-')[1] - if _is_int(osd_id): - osd_ids.append(osd_id) - except OSError: - raise - return osd_ids - - -def get_local_mon_ids(): - """ - This will list the /var/lib/ceph/mon/* directories and try - to split the ID off of the directory name and return it in - a list - - :return: list. A list of monitor identifiers :raise: OSError if - something goes wrong with listing the directory. - """ - mon_ids = [] - mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') - if os.path.exists(mon_path): - try: - dirs = os.listdir(mon_path) - for mon_dir in dirs: - # Basically this takes everything after ceph- as the monitor ID - match = re.search('ceph-(?P.*)', mon_dir) - if match: - mon_ids.append(match.group('mon_id')) - except OSError: - raise - return mon_ids - - -def _is_int(v): - """Return True if the object v can be turned into an integer.""" - try: - int(v) - return True - except ValueError: - return False - - -def get_version(): - """Derive Ceph release from an installed package.""" - import apt_pkg as apt - - cache = apt_cache() - package = "ceph" - try: - pkg = cache[package] - except: - # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation ' \ - 'candidate: %s' % package - error_out(e) - - if not pkg.current_ver: - # package is known, but no version is currently installed. - e = 'Could not determine version of uninstalled package: %s' % package - error_out(e) - - vers = apt.upstream_version(pkg.current_ver.ver_str) - - # x.y match only for 20XX.X - # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', vers) - - if match: - vers = match.group(0) - return float(vers) - - -def error_out(msg): - log("FATAL ERROR: %s" % msg, - level=ERROR) - sys.exit(1) - - -def is_quorum(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(check_output(cmd)) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] in QUORUM: - return True - else: - return False - else: - return False - - -def is_leader(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(check_output(cmd)) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] == LEADER: - return True - else: - return False - else: - return False - - -def wait_for_quorum(): - while not is_quorum(): - log("Waiting for quorum to be reached") - time.sleep(3) - - -def add_bootstrap_hint(peer): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "add_bootstrap_peer_hint", - peer - ] - if os.path.exists(asok): - # Ignore any errors for this call - subprocess.call(cmd) - - -DISK_FORMATS = [ - 'xfs', - 'ext4', - 'btrfs' -] - -CEPH_PARTITIONS = [ - '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # ceph encrypted disk in creation - '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # ceph encrypted journal - '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data - '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data - '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal - '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # ceph disk in creation -] - - -def umount(mount_point): - """ - This function unmounts a mounted directory forcibly. This will - be used for unmounting broken hard drive mounts which may hang. - If umount returns EBUSY this will lazy unmount. - :param mount_point: str. A String representing the filesystem mount point - :return: int. Returns 0 on success. errno otherwise. - """ - libc_path = ctypes.util.find_library("c") - libc = ctypes.CDLL(libc_path, use_errno=True) - - # First try to umount with MNT_FORCE - ret = libc.umount(mount_point, 1) - if ret < 0: - err = ctypes.get_errno() - if err == errno.EBUSY: - # Detach from try. IE lazy umount - ret = libc.umount(mount_point, 2) - if ret < 0: - err = ctypes.get_errno() - return err - return 0 - else: - return err - return 0 - - -def replace_osd(dead_osd_number, - dead_osd_device, - new_osd_device, - osd_format, - osd_journal, - reformat_osd=False, - ignore_errors=False): - """ - This function will automate the replacement of a failed osd disk as much - as possible. It will revoke the keys for the old osd, remove it from the - crush map and then add a new osd into the cluster. - :param dead_osd_number: The osd number found in ceph osd tree. Example: 99 - :param dead_osd_device: The physical device. Example: /dev/sda - :param osd_format: - :param osd_journal: - :param reformat_osd: - :param ignore_errors: - """ - host_mounts = mounts() - mount_point = None - for mount in host_mounts: - if mount[1] == dead_osd_device: - mount_point = mount[0] - # need to convert dev to osd number - # also need to get the mounted drive so we can tell the admin to - # replace it - try: - # Drop this osd out of the cluster. This will begin a - # rebalance operation - status_set('maintenance', 'Removing osd {}'.format(dead_osd_number)) - check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'out', - 'osd.{}'.format(dead_osd_number)]) - - # Kill the osd process if it's not already dead - if systemd(): - service_stop('ceph-osd@{}'.format(dead_osd_number)) - else: - check_output(['stop', 'ceph-osd', 'id={}'.format( - dead_osd_number)]) - # umount if still mounted - ret = umount(mount_point) - if ret < 0: - raise RuntimeError('umount {} failed with error: {}'.format( - mount_point, os.strerror(ret))) - # Clean up the old mount point - shutil.rmtree(mount_point) - check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'crush', 'remove', - 'osd.{}'.format(dead_osd_number)]) - # Revoke the OSDs access keys - check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'auth', 'del', - 'osd.{}'.format(dead_osd_number)]) - check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'rm', - 'osd.{}'.format(dead_osd_number)]) - status_set('maintenance', 'Setting up replacement osd {}'.format( - new_osd_device)) - osdize(new_osd_device, - osd_format, - osd_journal, - reformat_osd, - ignore_errors) - except subprocess.CalledProcessError as e: - log('replace_osd failed with error: ' + e.output) - - -def get_partition_list(dev): - """ - Lists the partitions of a block device - :param dev: Path to a block device. ex: /dev/sda - :return: :raise: Returns a list of Partition objects. - Raises CalledProcessException if lsblk fails - """ - partitions_list = [] - try: - partitions = get_partitions(dev) - # For each line of output - for partition in partitions: - parts = partition.split() - partitions_list.append( - Partition(number=parts[0], - start=parts[1], - end=parts[2], - sectors=parts[3], - size=parts[4], - name=parts[5], - uuid=parts[6]) - ) - return partitions_list - except subprocess.CalledProcessError: - raise - - -def is_osd_disk(dev): - partitions = get_partition_list(dev) - for partition in partitions: - try: - info = check_output(['sgdisk', '-i', partition.number, dev]) - info = info.split("\n") # IGNORE:E1103 - for line in info: - for ptype in CEPH_PARTITIONS: - sig = 'Partition GUID code: {}'.format(ptype) - if line.startswith(sig): - return True - except subprocess.CalledProcessError as e: - log("sgdisk inspection of partition {} on {} failed with " - "error: {}. Skipping".format(partition.minor, dev, e.message), - level=ERROR) - return False - - -def start_osds(devices): - # Scan for ceph block devices - rescan_osd_devices() - if cmp_pkgrevno('ceph', "0.56.6") >= 0: - # Use ceph-disk activate for directory based OSD's - for dev_or_path in devices: - if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) - - -def rescan_osd_devices(): - cmd = [ - 'udevadm', 'trigger', - '--subsystem-match=block', '--action=add' - ] - - subprocess.call(cmd) - - -_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" -_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" - - -def is_bootstrapped(): - return os.path.exists(_bootstrap_keyring) - - -def wait_for_bootstrap(): - while not is_bootstrapped(): - time.sleep(3) - - -def import_osd_bootstrap_key(key): - if not os.path.exists(_bootstrap_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _bootstrap_keyring, - '--create-keyring', - '--name=client.bootstrap-osd', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - -def import_osd_upgrade_key(key): - if not os.path.exists(_upgrade_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _upgrade_keyring, - '--create-keyring', - '--name=client.osd-upgrade', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - -def generate_monitor_secret(): - cmd = [ - 'ceph-authtool', - '/dev/stdout', - '--name=mon.', - '--gen-key' - ] - res = check_output(cmd) - - return "{}==".format(res.split('=')[1].strip()) - -# OSD caps taken from ceph-create-keys -_osd_bootstrap_caps = { - 'mon': [ - 'allow command osd create ...', - 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', - 'allow command mon getmap' - ] -} - -_osd_bootstrap_caps_profile = { - 'mon': [ - 'allow profile bootstrap-osd' - ] -} - - -def parse_key(raw_key): - # get-or-create appears to have different output depending - # on whether its 'get' or 'create' - # 'create' just returns the key, 'get' is more verbose and - # needs parsing - key = None - if len(raw_key.splitlines()) == 1: - key = raw_key - else: - for element in raw_key.splitlines(): - if 'key' in element: - return element.split(' = ')[1].strip() # IGNORE:E1103 - return key - - -def get_osd_bootstrap_key(): - try: - # Attempt to get/create a key using the OSD bootstrap profile first - key = get_named_key('bootstrap-osd', - _osd_bootstrap_caps_profile) - except: - # If that fails try with the older style permissions - key = get_named_key('bootstrap-osd', - _osd_bootstrap_caps) - return key - - -_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" - - -def import_radosgw_key(key): - if not os.path.exists(_radosgw_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _radosgw_keyring, - '--create-keyring', - '--name=client.radosgw.gateway', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - -# OSD caps taken from ceph-create-keys -_radosgw_caps = { - 'mon': ['allow rw'], - 'osd': ['allow rwx'] -} -_upgrade_caps = { - 'mon': ['allow rwx'] -} - - -def get_radosgw_key(pool_list=None): - return get_named_key(name='radosgw.gateway', - caps=_radosgw_caps, - pool_list=pool_list) - - -def get_mds_key(name): - return create_named_keyring(entity='mds', - name=name, - caps=mds_caps) - - -_mds_bootstrap_caps_profile = { - 'mon': [ - 'allow profile bootstrap-mds' - ] -} - - -def get_mds_bootstrap_key(): - return get_named_key('bootstrap-mds', - _mds_bootstrap_caps_profile) - - -_default_caps = collections.OrderedDict([ - ('mon', ['allow r']), - ('osd', ['allow rwx']), -]) - -admin_caps = collections.OrderedDict([ - ('mds', ['allow *']), - ('mon', ['allow *']), - ('osd', ['allow *']) -]) - -mds_caps = collections.OrderedDict([ - ('osd', ['allow *']), - ('mds', ['allow']), - ('mon', ['allow rwx']), -]) - -osd_upgrade_caps = collections.OrderedDict([ - ('mon', ['allow command "config-key"', - 'allow command "osd tree"', - 'allow command "config-key list"', - 'allow command "config-key put"', - 'allow command "config-key get"', - 'allow command "config-key exists"', - 'allow command "osd out"', - 'allow command "osd in"', - 'allow command "osd rm"', - 'allow command "auth del"', - ]) -]) - - -def create_named_keyring(entity, name, caps=None): - caps = caps or _default_caps - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', 'get-or-create', '{entity}.{name}'.format(entity=entity, - name=name), - ] - for subsystem, subcaps in caps.items(): - cmd.extend([subsystem, '; '.join(subcaps)]) - log("Calling check_output: {}".format(cmd), level=DEBUG) - return parse_key(check_output(cmd).strip()) # IGNORE:E1103 - - -def get_upgrade_key(): - return get_named_key('upgrade-osd', _upgrade_caps) - - -def get_named_key(name, caps=None, pool_list=None): - """ - Retrieve a specific named cephx key - :param name: String Name of key to get. - :param pool_list: The list of pools to give access to - :param caps: dict of cephx capabilities - :return: Returns a cephx key - """ - try: - # Does the key already exist? - output = check_output( - [ - 'sudo', - '-u', ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', - 'get', - 'client.{}'.format(name), - ]).strip() - return parse_key(output) - except subprocess.CalledProcessError: - # Couldn't get the key, time to create it! - log("Creating new key for {}".format(name), level=DEBUG) - caps = caps or _default_caps - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', 'get-or-create', 'client.{}'.format(name), - ] - # Add capabilities - for subsystem, subcaps in caps.items(): - if subsystem == 'osd': - if pool_list: - # This will output a string similar to: - # "pool=rgw pool=rbd pool=something" - pools = " ".join(['pool={0}'.format(i) for i in pool_list]) - subcaps[0] = subcaps[0] + " " + pools - cmd.extend([subsystem, '; '.join(subcaps)]) - log("Calling check_output: {}".format(cmd), level=DEBUG) - return parse_key(check_output(cmd).strip()) # IGNORE:E1103 - - -def upgrade_key_caps(key, caps): - """ Upgrade key to have capabilities caps """ - if not is_leader(): - # Not the MON leader OR not clustered - return - cmd = [ - "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key - ] - for subsystem, subcaps in caps.items(): - cmd.extend([subsystem, '; '.join(subcaps)]) - subprocess.check_call(cmd) - - -@cached -def systemd(): - return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' - - -def bootstrap_monitor_cluster(secret): - hostname = socket.gethostname() - path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - done = '{}/done'.format(path) - if systemd(): - init_marker = '{}/systemd'.format(path) - else: - init_marker = '{}/upstart'.format(path) - - keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) - - if os.path.exists(done): - log('bootstrap_monitor_cluster: mon already initialized.') - else: - # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', owner=ceph_user(), - group=ceph_user(), perms=0o755) - mkdir(path, owner=ceph_user(), group=ceph_user()) - # end changes for Ceph >= 0.61.3 - try: - subprocess.check_call(['ceph-authtool', keyring, - '--create-keyring', '--name=mon.', - '--add-key={}'.format(secret), - '--cap', 'mon', 'allow *']) - - subprocess.check_call(['ceph-mon', '--mkfs', - '-i', hostname, - '--keyring', keyring]) - chownr(path, ceph_user(), ceph_user()) - with open(done, 'w'): - pass - with open(init_marker, 'w'): - pass - - if systemd(): - subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) - service_restart('ceph-mon') - else: - service_restart('ceph-mon-all') - - if cmp_pkgrevno('ceph', '12.0.0') >= 0: - # NOTE(jamespage): Later ceph releases require explicit - # call to ceph-create-keys to setup the - # admin keys for the cluster; this command - # will wait for quorum in the cluster before - # returning. - cmd = ['ceph-create-keys', '--id', hostname] - subprocess.check_call(cmd) - except: - raise - finally: - os.unlink(keyring) - - -def bootstrap_manager(): - hostname = socket.gethostname() - path = '/var/lib/ceph/mgr/ceph-{}'.format(hostname) - keyring = os.path.join(path, 'keyring') - - if os.path.exists(keyring): - log('bootstrap_manager: mgr already initialized.') - else: - mkdir(path, owner=ceph_user(), group=ceph_user()) - subprocess.check_call(['ceph', 'auth', 'get-or-create', - 'mgr.{}'.format(hostname), 'mon', - 'allow profile mgr', 'osd', 'allow *', - 'mds', 'allow *', '--out-file', - keyring]) - chownr(path, ceph_user(), ceph_user()) - - unit = 'ceph-mgr@{}'.format(hostname) - subprocess.check_call(['systemctl', 'enable', unit]) - service_restart(unit) - - -def update_monfs(): - hostname = socket.gethostname() - monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - if systemd(): - init_marker = '{}/systemd'.format(monfs) - else: - init_marker = '{}/upstart'.format(monfs) - if os.path.exists(monfs) and not os.path.exists(init_marker): - # Mark mon as managed by upstart so that - # it gets start correctly on reboots - with open(init_marker, 'w'): - pass - - -def maybe_zap_journal(journal_dev): - if is_osd_disk(journal_dev): - log('Looks like {} is already an OSD data' - ' or journal, skipping.'.format(journal_dev)) - return - zap_disk(journal_dev) - log("Zapped journal device {}".format(journal_dev)) - - -def get_partitions(dev): - cmd = ['partx', '--raw', '--noheadings', dev] - try: - out = check_output(cmd).splitlines() - log("get partitions: {}".format(out), level=DEBUG) - return out - except subprocess.CalledProcessError as e: - log("Can't get info for {0}: {1}".format(dev, e.output)) - return [] - - -def find_least_used_journal(journal_devices): - usages = map(lambda a: (len(get_partitions(a)), a), journal_devices) - least = min(usages, key=lambda t: t[0]) - return least[1] - - -def osdize(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False, bluestore=False): - if dev.startswith('/dev'): - osdize_dev(dev, osd_format, osd_journal, - reformat_osd, ignore_errors, encrypt, - bluestore) - else: - osdize_dir(dev, encrypt) - - -def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False, bluestore=False): - if not os.path.exists(dev): - log('Path {} does not exist - bailing'.format(dev)) - return - - if not is_block_device(dev): - log('Path {} is not a block device - bailing'.format(dev)) - return - - if is_osd_disk(dev) and not reformat_osd: - log('Looks like {} is already an' - ' OSD data or journal, skipping.'.format(dev)) - return - - if is_device_mounted(dev): - log('Looks like {} is in use, skipping.'.format(dev)) - return - - status_set('maintenance', 'Initializing device {}'.format(dev)) - cmd = ['ceph-disk', 'prepare'] - # Later versions of ceph support more options - if cmp_pkgrevno('ceph', '0.60') >= 0: - if encrypt: - cmd.append('--dmcrypt') - if cmp_pkgrevno('ceph', '0.48.3') >= 0: - if osd_format: - cmd.append('--fs-type') - cmd.append(osd_format) - - if reformat_osd: - cmd.append('--zap-disk') - - # NOTE(jamespage): enable experimental bluestore support - if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: - cmd.append('--bluestore') - - cmd.append(dev) - - if osd_journal: - least_used = find_least_used_journal(osd_journal) - cmd.append(least_used) - else: - # Just provide the device - no other options - # for older versions of ceph - cmd.append(dev) - if reformat_osd: - zap_disk(dev) - - try: - log("osdize cmd: {}".format(cmd)) - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - if ignore_errors: - log('Unable to initialize device: {}'.format(dev), WARNING) - else: - log('Unable to initialize device: {}'.format(dev), ERROR) - raise - - -def osdize_dir(path, encrypt=False): - if os.path.exists(os.path.join(path, 'upstart')): - log('Path {} is already configured as an OSD - bailing'.format(path)) - return - - if cmp_pkgrevno('ceph', "0.56.6") < 0: - log('Unable to use directories for OSDs with ceph < 0.56.6', - level=ERROR) - return - - mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) - chownr('/var/lib/ceph', ceph_user(), ceph_user()) - cmd = [ - 'sudo', '-u', ceph_user(), - 'ceph-disk', - 'prepare', - '--data-dir', - path - ] - if cmp_pkgrevno('ceph', '0.60') >= 0: - if encrypt: - cmd.append('--dmcrypt') - log("osdize dir cmd: {}".format(cmd)) - subprocess.check_call(cmd) - - -def filesystem_mounted(fs): - return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 - - -def get_running_osds(): - """Returns a list of the pids of the current running OSD daemons""" - cmd = ['pgrep', 'ceph-osd'] - try: - result = check_output(cmd) - return result.split() - except subprocess.CalledProcessError: - return [] - - -def get_cephfs(service): - """ - List the Ceph Filesystems that exist - :rtype : list. Returns a list of the ceph filesystems - :param service: The service name to run the ceph command under - """ - if get_version() < 0.86: - # This command wasn't introduced until 0.86 ceph - return [] - try: - output = check_output(["ceph", - '--id', service, - "fs", "ls"]) - if not output: - return [] - """ - Example subprocess output: - 'name: ip-172-31-23-165, metadata pool: ip-172-31-23-165_metadata, - data pools: [ip-172-31-23-165_data ]\n' - output: filesystems: ['ip-172-31-23-165'] - """ - filesystems = [] - for line in output.splitlines(): - parts = line.split(',') - for part in parts: - if "name" in part: - filesystems.append(part.split(' ')[1]) - except subprocess.CalledProcessError: - return [] - - -def wait_for_all_monitors_to_upgrade(new_version, upgrade_key): - """ - Fairly self explanatory name. This function will wait - for all monitors in the cluster to upgrade or it will - return after a timeout period has expired. - :param new_version: str of the version to watch - :param upgrade_key: the cephx key name to use - """ - done = False - start_time = time.time() - monitor_list = [] - - mon_map = get_mon_map('admin') - if mon_map['monmap']['mons']: - for mon in mon_map['monmap']['mons']: - monitor_list.append(mon['name']) - while not done: - try: - done = all(monitor_key_exists(upgrade_key, "{}_{}_{}_done".format( - "mon", mon, new_version - )) for mon in monitor_list) - current_time = time.time() - if current_time > (start_time + 10 * 60): - raise Exception - else: - # Wait 30 seconds and test again if all monitors are upgraded - time.sleep(30) - except subprocess.CalledProcessError: - raise - - -# Edge cases: -# 1. Previous node dies on upgrade, can we retry? -def roll_monitor_cluster(new_version, upgrade_key): - """ - This is tricky to get right so here's what we're going to do. - :param new_version: str of the version to upgrade to - :param upgrade_key: the cephx key name to use when upgrading - There's 2 possible cases: Either I'm first in line or not. - If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous monitor is upgraded yet. - """ - log('roll_monitor_cluster called with {}'.format(new_version)) - my_name = socket.gethostname() - monitor_list = [] - mon_map = get_mon_map('admin') - if mon_map['monmap']['mons']: - for mon in mon_map['monmap']['mons']: - monitor_list.append(mon['name']) - else: - status_set('blocked', 'Unable to get monitor cluster information') - sys.exit(1) - log('monitor_list: {}'.format(monitor_list)) - - # A sorted list of osd unit names - mon_sorted_list = sorted(monitor_list) - - try: - position = mon_sorted_list.index(my_name) - log("upgrade position: {}".format(position)) - if position == 0: - # I'm first! Roll - # First set a key to inform others I'm about to roll - lock_and_roll(upgrade_key=upgrade_key, - service='mon', - my_name=my_name, - version=new_version) - else: - # Check if the previous node has finished - status_set('waiting', - 'Waiting on {} to finish upgrading'.format( - mon_sorted_list[position - 1])) - wait_on_previous_node(upgrade_key=upgrade_key, - service='mon', - previous_node=mon_sorted_list[position - 1], - version=new_version) - lock_and_roll(upgrade_key=upgrade_key, - service='mon', - my_name=my_name, - version=new_version) - except ValueError: - log("Failed to find {} in list {}.".format( - my_name, mon_sorted_list)) - status_set('blocked', 'failed to upgrade monitor') - - -def upgrade_monitor(new_version): - current_version = get_version() - status_set("maintenance", "Upgrading monitor") - log("Current ceph version is {}".format(current_version)) - log("Upgrading to: {}".format(new_version)) - - try: - add_source(config('source'), config('key')) - apt_update(fatal=True) - except subprocess.CalledProcessError as err: - log("Adding the ceph source failed with message: {}".format( - err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - try: - if systemd(): - for mon_id in get_local_mon_ids(): - service_stop('ceph-mon@{}'.format(mon_id)) - else: - service_stop('ceph-mon-all') - apt_install(packages=determine_packages(), fatal=True) - - # Ensure the files and directories under /var/lib/ceph is chowned - # properly as part of the move to the Jewel release, which moved the - # ceph daemons to running as ceph:ceph instead of root:root. - if new_version == 'jewel': - # Ensure the ownership of Ceph's directories is correct - owner = ceph_user() - chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), - owner=owner, - group=owner, - follow_links=True) - - if systemd(): - for mon_id in get_local_mon_ids(): - service_start('ceph-mon@{}'.format(mon_id)) - else: - service_start('ceph-mon-all') - except subprocess.CalledProcessError as err: - log("Stopping ceph and upgrading packages failed " - "with message: {}".format(err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - -def lock_and_roll(upgrade_key, service, my_name, version): - start_timestamp = time.time() - - log('monitor_key_set {}_{}_{}_start {}'.format( - service, - my_name, - version, - start_timestamp)) - monitor_key_set(upgrade_key, "{}_{}_{}_start".format( - service, my_name, version), start_timestamp) - log("Rolling") - - # This should be quick - if service == 'osd': - upgrade_osd(version) - elif service == 'mon': - upgrade_monitor(version) - else: - log("Unknown service {}. Unable to upgrade".format(service), - level=ERROR) - log("Done") - - stop_timestamp = time.time() - # Set a key to inform others I am finished - log('monitor_key_set {}_{}_{}_done {}'.format(service, - my_name, - version, - stop_timestamp)) - status_set('maintenance', 'Finishing upgrade') - monitor_key_set(upgrade_key, "{}_{}_{}_done".format(service, - my_name, - version), - stop_timestamp) - - -def wait_on_previous_node(upgrade_key, service, previous_node, version): - log("Previous node is: {}".format(previous_node)) - - previous_node_finished = monitor_key_exists( - upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version)) - - while previous_node_finished is False: - log("{} is not finished. Waiting".format(previous_node)) - # Has this node been trying to upgrade for longer than - # 10 minutes? - # If so then move on and consider that node dead. - - # NOTE: This assumes the clusters clocks are somewhat accurate - # If the hosts clock is really far off it may cause it to skip - # the previous node even though it shouldn't. - current_timestamp = time.time() - previous_node_start_time = monitor_key_get( - upgrade_key, - "{}_{}_{}_start".format(service, previous_node, version)) - if (current_timestamp - (10 * 60)) > previous_node_start_time: - # Previous node is probably dead. Lets move on - if previous_node_start_time is not None: - log( - "Waited 10 mins on node {}. current time: {} > " - "previous node start time: {} Moving on".format( - previous_node, - (current_timestamp - (10 * 60)), - previous_node_start_time)) - return - else: - # I have to wait. Sleep a random amount of time and then - # check if I can lock,upgrade and roll. - wait_time = random.randrange(5, 30) - log('waiting for {} seconds'.format(wait_time)) - time.sleep(wait_time) - previous_node_finished = monitor_key_exists( - upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version)) - - -def get_upgrade_position(osd_sorted_list, match_name): - for index, item in enumerate(osd_sorted_list): - if item.name == match_name: - return index - return None - - -# Edge cases: -# 1. Previous node dies on upgrade, can we retry? -# 2. This assumes that the osd failure domain is not set to osd. -# It rolls an entire server at a time. -def roll_osd_cluster(new_version, upgrade_key): - """ - This is tricky to get right so here's what we're going to do. - :param new_version: str of the version to upgrade to - :param upgrade_key: the cephx key name to use when upgrading - There's 2 possible cases: Either I'm first in line or not. - If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous osd is upgraded yet. - - TODO: If you're not in the same failure domain it's safe to upgrade - 1. Examine all pools and adopt the most strict failure domain policy - Example: Pool 1: Failure domain = rack - Pool 2: Failure domain = host - Pool 3: Failure domain = row - - outcome: Failure domain = host - """ - log('roll_osd_cluster called with {}'.format(new_version)) - my_name = socket.gethostname() - osd_tree = get_osd_tree(service=upgrade_key) - # A sorted list of osd unit names - osd_sorted_list = sorted(osd_tree) - log("osd_sorted_list: {}".format(osd_sorted_list)) - - try: - position = get_upgrade_position(osd_sorted_list, my_name) - log("upgrade position: {}".format(position)) - if position == 0: - # I'm first! Roll - # First set a key to inform others I'm about to roll - lock_and_roll(upgrade_key=upgrade_key, - service='osd', - my_name=my_name, - version=new_version) - else: - # Check if the previous node has finished - status_set('blocked', - 'Waiting on {} to finish upgrading'.format( - osd_sorted_list[position - 1].name)) - wait_on_previous_node( - upgrade_key=upgrade_key, - service='osd', - previous_node=osd_sorted_list[position - 1].name, - version=new_version) - lock_and_roll(upgrade_key=upgrade_key, - service='osd', - my_name=my_name, - version=new_version) - except ValueError: - log("Failed to find name {} in list {}".format( - my_name, osd_sorted_list)) - status_set('blocked', 'failed to upgrade osd') - - -def upgrade_osd(new_version): - current_version = get_version() - status_set("maintenance", "Upgrading osd") - log("Current ceph version is {}".format(current_version)) - log("Upgrading to: {}".format(new_version)) - - try: - add_source(config('source'), config('key')) - apt_update(fatal=True) - except subprocess.CalledProcessError as err: - log("Adding the ceph sources failed with message: {}".format( - err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - try: - # Upgrade the packages before restarting the daemons. - status_set('maintenance', 'Upgrading packages to %s' % new_version) - apt_install(packages=determine_packages(), fatal=True) - - # If the upgrade does not need an ownership update of any of the - # directories in the osd service directory, then simply restart - # all of the OSDs at the same time as this will be the fastest - # way to update the code on the node. - if not dirs_need_ownership_update('osd'): - log('Restarting all OSDs to load new binaries', DEBUG) - service_restart('ceph-osd-all') - return - - # Need to change the ownership of all directories which are not OSD - # directories as well. - # TODO - this should probably be moved to the general upgrade function - # and done before mon/osd. - update_owner(CEPH_BASE_DIR, recurse_dirs=False) - non_osd_dirs = filter(lambda x: not x == 'osd', - os.listdir(CEPH_BASE_DIR)) - non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x), - non_osd_dirs) - for path in non_osd_dirs: - update_owner(path) - - # Fast service restart wasn't an option because each of the OSD - # directories need the ownership updated for all the files on - # the OSD. Walk through the OSDs one-by-one upgrading the OSD. - for osd_dir in _get_child_dirs(OSD_BASE_DIR): - try: - osd_num = _get_osd_num_from_dirname(osd_dir) - _upgrade_single_osd(osd_num, osd_dir) - except ValueError as ex: - # Directory could not be parsed - junk directory? - log('Could not parse osd directory %s: %s' % (osd_dir, ex), - WARNING) - continue - - except (subprocess.CalledProcessError, IOError) as err: - log("Stopping ceph and upgrading packages failed " - "with message: {}".format(err.message)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - -def _upgrade_single_osd(osd_num, osd_dir): - """Upgrades the single OSD directory. - - :param osd_num: the num of the OSD - :param osd_dir: the directory of the OSD to upgrade - :raises CalledProcessError: if an error occurs in a command issued as part - of the upgrade process - :raises IOError: if an error occurs reading/writing to a file as part - of the upgrade process - """ - stop_osd(osd_num) - disable_osd(osd_num) - update_owner(osd_dir) - enable_osd(osd_num) - start_osd(osd_num) - - -def stop_osd(osd_num): - """Stops the specified OSD number. - - :param osd_num: the osd number to stop - """ - if systemd(): - service_stop('ceph-osd@{}'.format(osd_num)) - else: - service_stop('ceph-osd', id=osd_num) - - -def start_osd(osd_num): - """Starts the specified OSD number. - - :param osd_num: the osd number to start. - """ - if systemd(): - service_start('ceph-osd@{}'.format(osd_num)) - else: - service_start('ceph-osd', id=osd_num) - - -def disable_osd(osd_num): - """Disables the specified OSD number. - - Ensures that the specified osd will not be automatically started at the - next reboot of the system. Due to differences between init systems, - this method cannot make any guarantees that the specified osd cannot be - started manually. - - :param osd_num: the osd id which should be disabled. - :raises CalledProcessError: if an error occurs invoking the systemd cmd - to disable the OSD - :raises IOError, OSError: if the attempt to read/remove the ready file in - an upstart enabled system fails - """ - if systemd(): - # When running under systemd, the individual ceph-osd daemons run as - # templated units and can be directly addressed by referring to the - # templated service name ceph-osd@. Additionally, systemd - # allows one to disable a specific templated unit by running the - # 'systemctl disable ceph-osd@' command. When disabled, the - # OSD should remain disabled until re-enabled via systemd. - # Note: disabling an already disabled service in systemd returns 0, so - # no need to check whether it is enabled or not. - cmd = ['systemctl', 'disable', 'ceph-osd@{}'.format(osd_num)] - subprocess.check_call(cmd) - else: - # Neither upstart nor the ceph-osd upstart script provides for - # disabling the starting of an OSD automatically. The specific OSD - # cannot be prevented from running manually, however it can be - # prevented from running automatically on reboot by removing the - # 'ready' file in the OSD's root directory. This is due to the - # ceph-osd-all upstart script checking for the presence of this file - # before starting the OSD. - ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), - 'ready') - if os.path.exists(ready_file): - os.unlink(ready_file) - - -def enable_osd(osd_num): - """Enables the specified OSD number. - - Ensures that the specified osd_num will be enabled and ready to start - automatically in the event of a reboot. - - :param osd_num: the osd id which should be enabled. - :raises CalledProcessError: if the call to the systemd command issued - fails when enabling the service - :raises IOError: if the attempt to write the ready file in an usptart - enabled system fails - """ - if systemd(): - cmd = ['systemctl', 'enable', 'ceph-osd@{}'.format(osd_num)] - subprocess.check_call(cmd) - else: - # When running on upstart, the OSDs are started via the ceph-osd-all - # upstart script which will only start the osd if it has a 'ready' - # file. Make sure that file exists. - ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), - 'ready') - with open(ready_file, 'w') as f: - f.write('ready') - - # Make sure the correct user owns the file. It shouldn't be necessary - # as the upstart script should run with root privileges, but its better - # to have all the files matching ownership. - update_owner(ready_file) - - -def update_owner(path, recurse_dirs=True): - """Changes the ownership of the specified path. - - Changes the ownership of the specified path to the new ceph daemon user - using the system's native chown functionality. This may take awhile, - so this method will issue a set_status for any changes of ownership which - recurses into directory structures. - - :param path: the path to recursively change ownership for - :param recurse_dirs: boolean indicating whether to recursively change the - ownership of all the files in a path's subtree or to - simply change the ownership of the path. - :raises CalledProcessError: if an error occurs issuing the chown system - command - """ - user = ceph_user() - user_group = '{ceph_user}:{ceph_user}'.format(ceph_user=user) - cmd = ['chown', user_group, path] - if os.path.isdir(path) and recurse_dirs: - status_set('maintenance', ('Updating ownership of %s to %s' % - (path, user))) - cmd.insert(1, '-R') - - log('Changing ownership of {path} to {user}'.format( - path=path, user=user_group), DEBUG) - start = datetime.now() - subprocess.check_call(cmd) - elapsed_time = (datetime.now() - start) - - log('Took {secs} seconds to change the ownership of path: {path}'.format( - secs=elapsed_time.total_seconds(), path=path), DEBUG) - - -def list_pools(service): - """ - This will list the current pools that Ceph has - - :param service: String service id to run under - :return: list. Returns a list of the ceph pools. Raises CalledProcessError - if the subprocess fails to run. - """ - try: - pool_list = [] - pools = check_output(['rados', '--id', service, 'lspools']) - for pool in pools.splitlines(): - pool_list.append(pool) - return pool_list - except subprocess.CalledProcessError as err: - log("rados lspools failed with error: {}".format(err.output)) - raise - - -def dirs_need_ownership_update(service): - """Determines if directories still need change of ownership. - - Examines the set of directories under the /var/lib/ceph/{service} directory - and determines if they have the correct ownership or not. This is - necessary due to the upgrade from Hammer to Jewel where the daemon user - changes from root: to ceph:. - - :param service: the name of the service folder to check (e.g. osd, mon) - :return: boolean. True if the directories need a change of ownership, - False otherwise. - :raises IOError: if an error occurs reading the file stats from one of - the child directories. - :raises OSError: if the specified path does not exist or some other error - """ - expected_owner = expected_group = ceph_user() - path = os.path.join(CEPH_BASE_DIR, service) - for child in _get_child_dirs(path): - curr_owner, curr_group = owner(child) - - if (curr_owner == expected_owner) and (curr_group == expected_group): - continue - - log('Directory "%s" needs its ownership updated' % child, DEBUG) - return True - - # All child directories had the expected ownership - return False - -# A dict of valid ceph upgrade paths. Mapping is old -> new -UPGRADE_PATHS = { - 'firefly': 'hammer', - 'hammer': 'jewel', -} - -# Map UCA codenames to ceph codenames -UCA_CODENAME_MAP = { - 'icehouse': 'firefly', - 'juno': 'firefly', - 'kilo': 'hammer', - 'liberty': 'hammer', - 'mitaka': 'jewel', -} - - -def pretty_print_upgrade_paths(): - '''Pretty print supported upgrade paths for ceph''' - lines = [] - for key, value in UPGRADE_PATHS.iteritems(): - lines.append("{} -> {}".format(key, value)) - return lines - - -def resolve_ceph_version(source): - ''' - Resolves a version of ceph based on source configuration - based on Ubuntu Cloud Archive pockets. - - @param: source: source configuration option of charm - @returns: ceph release codename or None if not resolvable - ''' - os_release = get_os_codename_install_source(source) - return UCA_CODENAME_MAP.get(os_release) - - -def get_ceph_pg_stat(): - """ - Returns the result of ceph pg stat - :return: dict - """ - try: - tree = check_output(['ceph', 'pg', 'stat', '--format=json']) - try: - json_tree = json.loads(tree) - if not json_tree['num_pg_by_state']: - return None - return json_tree - except ValueError as v: - log("Unable to parse ceph pg stat json: {}. Error: {}".format( - tree, v.message)) - raise - except subprocess.CalledProcessError as e: - log("ceph pg stat command failed with message: {}".format( - e.message)) - raise - - -def get_ceph_health(): - """ - Returns the health of the cluster from a 'ceph status' - :return: dict - Also raises CalledProcessError if our ceph command fails - To get the overall status, use get_ceph_health()['overall_status'] - """ - try: - tree = check_output( - ['ceph', 'status', '--format=json']) - try: - json_tree = json.loads(tree) - # Make sure children are present in the json - if not json_tree['overall_status']: - return None - return json_tree - except ValueError as v: - log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v.message)) - raise - except subprocess.CalledProcessError as e: - log("ceph status command failed with message: {}".format( - e.message)) - raise - - -def reweight_osd(osd_num, new_weight): - """ - Changes the crush weight of an OSD to the value specified. - :param osd_num: the osd id which should be changed - :param new_weight: the new weight for the OSD - :returns: bool. True if output looks right, else false. - :raises CalledProcessError: if an error occurs invoking the systemd cmd - """ - try: - cmd_result = subprocess.check_output( - ['ceph', 'osd', 'crush', 'reweight', "osd.{}".format(osd_num), - new_weight], stderr=subprocess.STDOUT) - expected_result = "reweighted item id {ID} name \'osd.{ID}\'".format( - ID=osd_num) + " to {}".format(new_weight) - log(cmd_result) - if expected_result in cmd_result: - return True - return False - except subprocess.CalledProcessError as e: - log("ceph osd crush reweight command failed with message: {}".format( - e.message)) - raise - - -def determine_packages(): - ''' - Determines packages for installation. - - @returns: list of ceph packages - ''' - if is_container(): - PACKAGES.remove('ntp') - return PACKAGES diff --git a/ceph-mon/lib/ceph/ceph_broker.py b/ceph-mon/lib/ceph/broker.py similarity index 88% rename from ceph-mon/lib/ceph/ceph_broker.py rename to ceph-mon/lib/ceph/broker.py index 1f6db8c8..b071b91e 100644 --- a/ceph-mon/lib/ceph/ceph_broker.py +++ b/ceph-mon/lib/ceph/broker.py @@ -1,5 +1,3 @@ -#!/usr/bin/python -# # Copyright 2016 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,19 +14,21 @@ import json import os + from tempfile import NamedTemporaryFile +from ceph.utils import ( + get_cephfs, + get_osd_weight +) +from ceph.crush_utils import Crushmap + from charmhelpers.core.hookenv import ( log, DEBUG, INFO, ERROR, ) -from ceph import ( - get_cephfs, - get_osd_weight -) -from ceph.ceph_helpers import Crushmap from charmhelpers.contrib.storage.linux.ceph import ( create_erasure_profile, delete_pool, @@ -112,6 +112,9 @@ def process_requests(reqs): This is a versioned api. API version must be supplied by the client making the request. + + :param reqs: dict of request parameters. + :returns: dict. exit-code and reason if not 0 """ request_id = reqs.get('request-id') try: @@ -140,6 +143,12 @@ def process_requests(reqs): def handle_create_erasure_profile(request, service): + """Create an erasure profile. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ # "local" | "shec" or it defaults to "jerasure" erasure_type = request.get('erasure-type') # "host" | "rack" or it defaults to "host" # Any valid Ceph bucket @@ -160,10 +169,9 @@ def handle_create_erasure_profile(request, service): def handle_add_permissions_to_key(request, service): - """ - Groups are defined by the key cephx.groups.(namespace-)?-(name). This key - will contain a dict serialized to JSON with data about the group, including - pools and members. + """Groups are defined by the key cephx.groups.(namespace-)?-(name). This + key will contain a dict serialized to JSON with data about the group, + including pools and members. A group can optionally have a namespace defined that will be used to further restrict pool access. @@ -238,8 +246,7 @@ def pool_permission_list_for_service(service): def get_service_groups(service, namespace=None): - """ - Services are objects stored with some metadata, they look like (for a + """Services are objects stored with some metadata, they look like (for a service named "nova"): { group_names: {'rwx': ['images']}, @@ -272,7 +279,7 @@ def get_service_groups(service, namespace=None): def _build_service_groups(service, namespace=None): - '''Rebuild the 'groups' dict for a service group + """Rebuild the 'groups' dict for a service group :returns: dict: dictionary keyed by group name of the following format: @@ -287,7 +294,7 @@ def _build_service_groups(service, namespace=None): services: ['nova'] } } - ''' + """ all_groups = {} for _, groups in service['group_names'].items(): for group in groups: @@ -299,8 +306,7 @@ def _build_service_groups(service, namespace=None): def get_group(group_name): - """ - A group is a structure to hold data about a named group, structured as: + """A group is a structure to hold data about a named group, structured as: { pools: ['glance'], services: ['nova'] @@ -344,6 +350,12 @@ def get_group_key(group_name): def handle_erasure_pool(request, service): + """Create a new erasure coded pool. + + :param request: dict of request operations and params. + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0. + """ pool_name = request.get('name') erasure_profile = request.get('erasure-profile') quota = request.get('max-bytes') @@ -390,6 +402,12 @@ def handle_erasure_pool(request, service): def handle_replicated_pool(request, service): + """Create a new replicated pool. + + :param request: dict of request operations and params. + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0. + """ pool_name = request.get('name') replicas = request.get('replicas') quota = request.get('max-bytes') @@ -441,6 +459,13 @@ def handle_replicated_pool(request, service): def handle_create_cache_tier(request, service): + """Create a cache tier on a cold pool. Modes supported are + "writeback" and "readonly". + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ # mode = "writeback" | "readonly" storage_pool = request.get('cold-pool') cache_pool = request.get('hot-pool') @@ -462,6 +487,12 @@ def handle_create_cache_tier(request, service): def handle_remove_cache_tier(request, service): + """Remove a cache tier from the cold pool. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ storage_pool = request.get('cold-pool') cache_pool = request.get('hot-pool') # cache and storage pool must exist first @@ -477,6 +508,12 @@ def handle_remove_cache_tier(request, service): def handle_set_pool_value(request, service): + """Sets an arbitrary pool value. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ # Set arbitrary pool values params = {'pool': request.get('name'), 'key': request.get('key'), @@ -501,6 +538,12 @@ def handle_set_pool_value(request, service): def handle_rgw_regionmap_update(request, service): + """Change the radosgw region map. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ name = request.get('client-name') if not name: msg = "Missing rgw-region or client-name params" @@ -516,6 +559,12 @@ def handle_rgw_regionmap_update(request, service): def handle_rgw_regionmap_default(request, service): + """Create a radosgw region map. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ region = request.get('rgw-region') name = request.get('client-name') if not region or not name: @@ -537,6 +586,12 @@ def handle_rgw_regionmap_default(request, service): def handle_rgw_zone_set(request, service): + """Create a radosgw zone. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ json_file = request.get('zone-json') name = request.get('client-name') region_name = request.get('region-name') @@ -567,6 +622,12 @@ def handle_rgw_zone_set(request, service): def handle_put_osd_in_bucket(request, service): + """Move an osd into a specified crush bucket. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ osd_id = request.get('osd') target_bucket = request.get('bucket') if not osd_id or not target_bucket: @@ -597,6 +658,12 @@ def handle_put_osd_in_bucket(request, service): def handle_rgw_create_user(request, service): + """Create a new rados gateway user. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ user_id = request.get('rgw-uid') display_name = request.get('display-name') name = request.get('client-name') @@ -630,11 +697,11 @@ def handle_rgw_create_user(request, service): def handle_create_cephfs(request, service): - """ - Create a new cephfs. + """Create a new cephfs. + :param request: The broker request - :param service: The cephx user to run this command under - :return: + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 """ cephfs_name = request.get('mds_name') data_pool = request.get('data_pool') @@ -678,6 +745,12 @@ def handle_create_cephfs(request, service): def handle_rgw_region_set(request, service): # radosgw-admin region set --infile us.json --name client.radosgw.us-east-1 + """Set the rados gateway region. + + :param request: dict. The broker request. + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ json_file = request.get('region-json') name = request.get('client-name') region_name = request.get('region-name') diff --git a/ceph-mon/lib/ceph/ceph_helpers.py b/ceph-mon/lib/ceph/ceph_helpers.py deleted file mode 100644 index 11f5dd8c..00000000 --- a/ceph-mon/lib/ceph/ceph_helpers.py +++ /dev/null @@ -1,1557 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2012 Canonical Ltd. -# -# This file is sourced from lp:openstack-charm-helpers -# -# Authors: -# James Page -# Adam Gandelman -# - -import errno -import hashlib -import math -from charmhelpers.contrib.network.ip import format_ipv6_addr -import six - -import os -import shutil -import json -import time -import uuid -import re - -import subprocess -from subprocess import ( - check_call, - check_output as s_check_output, - CalledProcessError, -) -from charmhelpers.core.hookenv import (config, - local_unit, - relation_get, - relation_ids, - relation_set, - related_units, - log, - DEBUG, - INFO, - WARNING, - ERROR, ) -from charmhelpers.core.host import (mount, - mounts, - service_start, - service_stop, - service_running, - umount, ) -from charmhelpers.fetch import (apt_install, ) - -from charmhelpers.core.kernel import modprobe -from charmhelpers.contrib.openstack.utils import config_flags_parser, \ - get_host_ip - -KEYRING = '/etc/ceph/ceph.client.{}.keyring' -KEYFILE = '/etc/ceph/ceph.client.{}.key' - -CEPH_CONF = """[global] -auth supported = {auth} -keyring = {keyring} -mon host = {mon_hosts} -log to syslog = {use_syslog} -err to syslog = {use_syslog} -clog to syslog = {use_syslog} -""" - -CRUSH_BUCKET = """root {name} {{ - id {id} # do not change unnecessarily - # weight 0.000 - alg straw - hash 0 # rjenkins1 -}} - -rule {name} {{ - ruleset 0 - type replicated - min_size 1 - max_size 10 - step take {name} - step chooseleaf firstn 0 type host - step emit -}}""" - -# This regular expression looks for a string like: -# root NAME { -# id NUMBER -# so that we can extract NAME and ID from the crushmap -CRUSHMAP_BUCKETS_RE = re.compile(r"root\s+(.+)\s+\{\s*id\s+(-?\d+)") - -# This regular expression looks for ID strings in the crushmap like: -# id NUMBER -# so that we can extract the IDs from a crushmap -CRUSHMAP_ID_RE = re.compile(r"id\s+(-?\d+)") - -# The number of placement groups per OSD to target for placement group -# calculations. This number is chosen as 100 due to the ceph PG Calc -# documentation recommending to choose 100 for clusters which are not -# expected to increase in the foreseeable future. Since the majority of the -# calculations are done on deployment, target the case of non-expanding -# clusters as the default. -DEFAULT_PGS_PER_OSD_TARGET = 100 -DEFAULT_POOL_WEIGHT = 10.0 -LEGACY_PG_COUNT = 200 - - -def check_output(*args, **kwargs): - ''' - Helper wrapper for py2/3 compat with subprocess.check_output - - @returns str: UTF-8 decoded representation of output - ''' - return s_check_output(*args, **kwargs).decode('UTF-8') - - -def validator(value, valid_type, valid_range=None): - """ - Used to validate these: http://docs.ceph.com/docs/master/rados/operations/ - pools/#set-pool-values - Example input: - validator(value=1, - valid_type=int, - valid_range=[0, 2]) - This says I'm testing value=1. It must be an int inclusive in [0,2] - - :param value: The value to validate - :param valid_type: The type that value should be. - :param valid_range: A range of values that value can assume. - :return: - """ - assert isinstance(value, valid_type), "{} is not a {}".format(value, - valid_type) - if valid_range is not None: - assert isinstance(valid_range, list), \ - "valid_range must be a list, was given {}".format(valid_range) - # If we're dealing with strings - if valid_type is six.string_types: - assert value in valid_range, \ - "{} is not in the list {}".format(value, valid_range) - # Integer, float should have a min and max - else: - if len(valid_range) != 2: - raise ValueError("Invalid valid_range list of {} for {}. " - "List must be [min,max]".format(valid_range, - value)) - assert value >= valid_range[0], \ - "{} is less than minimum allowed value of {}".format( - value, valid_range[0]) - assert value <= valid_range[1], \ - "{} is greater than maximum allowed value of {}".format( - value, valid_range[1]) - - -class PoolCreationError(Exception): - """ - A custom error to inform the caller that a pool creation failed. Provides - an error message - """ - - def __init__(self, message): - super(PoolCreationError, self).__init__(message) - - -class Crushmap(object): - """An object oriented approach to Ceph crushmap management.""" - - def __init__(self): - """Iiitialize the Crushmap from Ceph""" - self._crushmap = self.load_crushmap() - roots = re.findall(CRUSHMAP_BUCKETS_RE, self._crushmap) - buckets = [] - ids = list(map( - lambda x: int(x), - re.findall(CRUSHMAP_ID_RE, self._crushmap))) - ids.sort() - if roots != []: - for root in roots: - buckets.append(Crushmap.Bucket(root[0], root[1], True)) - - self._buckets = buckets - if ids != []: - self._ids = ids - else: - self._ids = [0] - - def load_crushmap(self): - try: - crush = subprocess.Popen( - ('ceph', 'osd', 'getcrushmap'), - stdout=subprocess.PIPE) - return subprocess.check_output( - ('crushtool', '-d', '-'), - stdin=crush.stdout) - except Exception as e: - log("load_crushmap error: {}".format(e)) - raise "Failed to read Crushmap" - - def ensure_bucket_is_present(self, bucket_name): - if bucket_name not in [bucket.name for bucket in self.buckets()]: - self.add_bucket(bucket_name) - self.save() - - def buckets(self): - """Return a list of buckets that are in the Crushmap.""" - return self._buckets - - def add_bucket(self, bucket_name): - """Add a named bucket to Ceph""" - new_id = min(self._ids) - 1 - self._ids.append(new_id) - self._buckets.append(Crushmap.Bucket(bucket_name, new_id)) - - def save(self): - """Persist Crushmap to Ceph""" - try: - crushmap = self.build_crushmap() - compiled = subprocess.Popen( - ('crushtool', '-c', '/dev/stdin', '-o', '/dev/stdout'), - stdin=subprocess.PIPE, - stdout=subprocess.PIPE) - output = compiled.communicate(crushmap)[0] - ceph = subprocess.Popen( - ('ceph', 'osd', 'setcrushmap', '-i', '/dev/stdin'), - stdin=subprocess.PIPE) - ceph_output = ceph.communicate(input=output) - return ceph_output - except Exception as e: - log("save error: {}".format(e)) - raise "Failed to save crushmap" - - def build_crushmap(self): - """Modifies the curent crushmap to include the new buckets""" - tmp_crushmap = self._crushmap - for bucket in self._buckets: - if not bucket.default: - tmp_crushmap = "{}\n\n{}".format( - tmp_crushmap, - Crushmap.bucket_string(bucket.name, bucket.id)) - return tmp_crushmap - - @staticmethod - def bucket_string(name, id): - return CRUSH_BUCKET.format(name=name, id=id) - - class Bucket(object): - """An object that describes a Crush bucket.""" - - def __init__(self, name, id, default=False): - self.name = name - self.id = int(id) - self.default = default - - def __repr__(self): - return "Bucket {{Name: {name}, ID: {id}}}".format( - name=self.name, id=self.id) - - def __eq__(self, other): - """Override the default Equals behavior""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return NotImplemented - - def __ne__(self, other): - """Define a non-equality test""" - if isinstance(other, self.__class__): - return not self.__eq__(other) - return NotImplemented - - -class Pool(object): - """ - An object oriented approach to Ceph pool creation. This base class is - inherited by ReplicatedPool and ErasurePool. - Do not call create() on this base class as it will not do anything. - Instantiate a child class and call create(). - """ - - def __init__(self, service, name): - self.service = service - self.name = name - - # Create the pool if it doesn't exist already - # To be implemented by subclasses - def create(self): - pass - - def add_cache_tier(self, cache_pool, mode): - """ - Adds a new cache tier to an existing pool. - :param cache_pool: six.string_types. The cache tier pool name to add. - :param mode: six.string_types. The caching mode to use for this pool. - valid range = ["readonly", "writeback"] - :return: None - """ - # Check the input types and values - validator(value=cache_pool, valid_type=six.string_types) - validator(value=mode, - valid_type=six.string_types, - valid_range=["readonly", "writeback"]) - - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', - self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', - cache_pool, mode]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', - self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', - cache_pool, 'hit_set_type', 'bloom']) - - def remove_cache_tier(self, cache_pool): - """ - Removes a cache tier from Ceph. Flushes all dirty objects from - writeback pools and waits for that to complete. - :param cache_pool: six.string_types. The cache tier pool name to - remove. - :return: None - """ - # read-only is easy, writeback is much harder - mode = get_cache_mode(self.service, cache_pool) - version = ceph_version() - if mode == 'readonly': - check_call(['ceph', '--id', self.service, 'osd', 'tier', - 'cache-mode', cache_pool, 'none']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', - self.name, cache_pool]) - - elif mode == 'writeback': - pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', - 'cache-mode', cache_pool, 'forward'] - if version >= '10.1': - # Jewel added a mandatory flag - pool_forward_cmd.append('--yes-i-really-mean-it') - - check_call(pool_forward_cmd) - # Flush the cache and wait for it to return - check_call(['rados', '--id', self.service, '-p', cache_pool, - 'cache-flush-evict-all']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', - 'remove-overlay', self.name]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', - self.name, cache_pool]) - - def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): - """Return the number of placement groups to use when creating the pool. - - Returns the number of placement groups which should be specified when - creating the pool. This is based upon the calculation guidelines - provided by the Ceph Placement Group Calculator (located online at - http://ceph.com/pgcalc/). - - The number of placement groups are calculated using the following: - - (Target PGs per OSD) * (OSD #) * (%Data) - ---------------------------------------- - (Pool size) - - Per the upstream guidelines, the OSD # should really be considered - based on the number of OSDs which are eligible to be selected by the - pool. Since the pool creation doesn't specify any of CRUSH set rules, - the default rule will be dependent upon the type of pool being - created (replicated or erasure). - - This code makes no attempt to determine the number of OSDs which can be - selected for the specific rule, rather it is left to the user to tune - in the form of 'expected-osd-count' config option. - - :param pool_size: int. pool_size is either the number of replicas for - replicated pools or the K+M sum for erasure coded pools - :param percent_data: float. the percentage of data that is expected to - be contained in the pool for the specific OSD set. Default value - is to assume 10% of the data is for this pool, which is a - relatively low % of the data but allows for the pg_num to be - increased. NOTE: the default is primarily to handle the scenario - where related charms requiring pools has not been upgraded to - include an update to indicate their relative usage of the pools. - :return: int. The number of pgs to use. - """ - - # Note: This calculation follows the approach that is provided - # by the Ceph PG Calculator located at http://ceph.com/pgcalc/. - validator(value=pool_size, valid_type=int) - - # Ensure that percent data is set to something - even with a default - # it can be set to None, which would wreak havoc below. - if percent_data is None: - percent_data = DEFAULT_POOL_WEIGHT - - # If the expected-osd-count is specified, then use the max between - # the expected-osd-count and the actual osd_count - osd_list = get_osds(self.service) - expected = config('expected-osd-count') or 0 - - if osd_list: - osd_count = max(expected, len(osd_list)) - - # Log a message to provide some insight if the calculations claim - # to be off because someone is setting the expected count and - # there are more OSDs in reality. Try to make a proper guess - # based upon the cluster itself. - if expected and osd_count != expected: - log("Found more OSDs than provided expected count. " - "Using the actual count instead", INFO) - elif expected: - # Use the expected-osd-count in older ceph versions to allow for - # a more accurate pg calculations - osd_count = expected - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - return LEGACY_PG_COUNT - - percent_data /= 100.0 - target_pgs_per_osd = config( - 'pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET - num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size - - # The CRUSH algorithm has a slight optimization for placement groups - # with powers of 2 so find the nearest power of 2. If the nearest - # power of 2 is more than 25% below the original value, the next - # highest value is used. To do this, find the nearest power of 2 such - # that 2^n <= num_pg, check to see if its within the 25% tolerance. - exponent = math.floor(math.log(num_pg, 2)) - nearest = 2 ** exponent - if (num_pg - nearest) > (num_pg * 0.25): - # Choose the next highest power of 2 since the nearest is more - # than 25% below the original value. - return int(nearest * 2) - else: - return int(nearest) - - -class ReplicatedPool(Pool): - def __init__(self, - service, - name, - pg_num=None, - replicas=2, - percent_data=10.0): - super(ReplicatedPool, self).__init__(service=service, name=name) - self.replicas = replicas - if pg_num: - # Since the number of placement groups were specified, ensure - # that there aren't too many created. - max_pgs = self.get_pgs(self.replicas, 100.0) - self.pg_num = min(pg_num, max_pgs) - else: - self.pg_num = self.get_pgs(self.replicas, percent_data) - - def create(self): - if not pool_exists(self.service, self.name): - # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num)] - try: - check_call(cmd) - # Set the pool replica size - update_pool(client=self.service, - pool=self.name, - settings={'size': str(self.replicas)}) - except CalledProcessError: - raise - - -# Default jerasure erasure coded pool -class ErasurePool(Pool): - def __init__(self, - service, - name, - erasure_code_profile="default", - percent_data=10.0): - super(ErasurePool, self).__init__(service=service, name=name) - self.erasure_code_profile = erasure_code_profile - self.percent_data = percent_data - - def create(self): - if not pool_exists(self.service, self.name): - # Try to find the erasure profile information in order to properly - # size the number of placement groups. The size of an erasure - # coded placement group is calculated as k+m. - erasure_profile = get_erasure_profile(self.service, - self.erasure_code_profile) - - # Check for errors - if erasure_profile is None: - msg = ("Failed to discover erasure profile named " - "{}".format(self.erasure_code_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - if 'k' not in erasure_profile or 'm' not in erasure_profile: - # Error - msg = ("Unable to find k (data chunks) or m (coding chunks) " - "in erasure profile {}".format(erasure_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - - k = int(erasure_profile['k']) - m = int(erasure_profile['m']) - pgs = self.get_pgs(k + m, self.percent_data) - # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(pgs), str(pgs), 'erasure', - self.erasure_code_profile] - try: - check_call(cmd) - except CalledProcessError: - raise - - """Get an existing erasure code profile if it already exists. - Returns json formatted output""" - - -def get_mon_map(service): - """ - Returns the current monitor map. - :param service: six.string_types. The Ceph user name to run the command - under - :return: json string. :raise: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails - """ - try: - mon_status = check_output(['ceph', '--id', service, 'mon_status', - '--format=json']) - try: - return json.loads(mon_status) - except ValueError as v: - log("Unable to parse mon_status json: {}. Error: {}".format( - mon_status, v.message)) - raise - except CalledProcessError as e: - log("mon_status command failed with message: {}".format(e.message)) - raise - - -def hash_monitor_names(service): - """ - Uses the get_mon_map() function to get information about the monitor - cluster. - Hash the name of each monitor. Return a sorted list of monitor hashes - in an ascending order. - :param service: six.string_types. The Ceph user name to run the command - under - :rtype : dict. json dict of monitor name, ip address and rank - example: { - 'name': 'ip-172-31-13-165', - 'rank': 0, - 'addr': '172.31.13.165:6789/0'} - """ - try: - hash_list = [] - monitor_list = get_mon_map(service=service) - if monitor_list['monmap']['mons']: - for mon in monitor_list['monmap']['mons']: - hash_list.append(hashlib.sha224(mon['name'].encode( - 'utf-8')).hexdigest()) - return sorted(hash_list) - else: - return None - except (ValueError, CalledProcessError): - raise - - -def monitor_key_delete(service, key): - """ - Delete a key and value pair from the monitor cluster - :param service: six.string_types. The Ceph user name to run the command - under - :param key: six.string_types. The key to delete. - """ - try: - check_output(['ceph', '--id', service, - 'config-key', 'del', str(key)]) - except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format(e.output)) - raise - - -def monitor_key_set(service, key, value): - """ - Sets a key value pair on the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command - under - :param key: six.string_types. The key to set. - :param value: The value to set. This will be converted to a string - before setting - """ - try: - check_output(['ceph', '--id', service, 'config-key', 'put', str(key), - str(value)]) - except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format(e.output)) - raise - - -def monitor_key_get(service, key): - """ - Gets the value of an existing key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command - under - :param key: six.string_types. The key to search for. - :return: Returns the value of that key or None if not found. - """ - try: - output = check_output(['ceph', '--id', service, 'config-key', 'get', - str(key)]) - return output - except CalledProcessError as e: - log("Monitor config-key get failed with message: {}".format(e.output)) - return None - - -def monitor_key_exists(service, key): - """ - Searches for the existence of a key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command - under - :param key: six.string_types. The key to search for - :return: Returns True if the key exists, False if not and raises an - exception if an unknown error occurs. :raise: CalledProcessError if - an unknown error occurs - """ - try: - check_call(['ceph', '--id', service, 'config-key', 'exists', str(key)]) - # I can return true here regardless because Ceph returns - # ENOENT if the key wasn't found - return True - except CalledProcessError as e: - if e.returncode == errno.ENOENT: - return False - else: - log("Unknown error from ceph config-get exists: {} {}".format( - e.returncode, e.output)) - raise - - -def get_erasure_profile(service, name): - """ - :param service: six.string_types. The Ceph user name to run the command - under - :param name: - :return: - """ - try: - out = check_output( - ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', - name, '--format=json']) - return json.loads(out) - except (CalledProcessError, OSError, ValueError): - return None - - -def pool_set(service, pool_name, key, value): - """ - Sets a value for a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command - under - :param pool_name: six.string_types - :param key: six.string_types - :param value: - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value - ] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def snapshot_pool(service, pool_name, snapshot_name): - """ - Snapshots a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command - under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, - snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def remove_pool_snapshot(service, pool_name, snapshot_name): - """ - Remove a snapshot from a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command - under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, - snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise - - -# max_bytes should be an int or long -def set_pool_quota(service, pool_name, max_bytes): - """ - :param service: six.string_types. The Ceph user name to run the command - under - :param pool_name: six.string_types - :param max_bytes: int or long - :return: None. Can raise CalledProcessError - """ - # Set a byte quota on a RADOS pool in ceph. - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, - 'max_bytes', str(max_bytes)] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def remove_pool_quota(service, pool_name): - """ - Set a byte quota on a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command - under - :param pool_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, - 'max_bytes', '0'] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def remove_erasure_profile(service, profile_name): - """ - Create a new erasure code profile if one does not already exist for it. - Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/ - rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command - under - :param profile_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', - profile_name] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def create_erasure_profile(service, - profile_name, - erasure_plugin_name='jerasure', - failure_domain='host', - data_chunks=2, - coding_chunks=1, - locality=None, - durability_estimator=None): - """ - Create a new erasure code profile if one does not already exist for it. - Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/ - rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command - under - :param profile_name: six.string_types - :param erasure_plugin_name: six.string_types - :param failure_domain: six.string_types. One of ['chassis', 'datacenter', - 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) - :param data_chunks: int - :param coding_chunks: int - :param locality: int - :param durability_estimator: int - :return: None. Can raise CalledProcessError - """ - # Ensure this failure_domain is allowed by Ceph - validator(failure_domain, six.string_types, - ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', - 'region', 'room', 'root', 'row']) - - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', - profile_name, 'plugin=' + erasure_plugin_name, - 'k=' + str(data_chunks), 'm=' + str(coding_chunks), - 'ruleset_failure_domain=' + failure_domain] - if locality is not None and durability_estimator is not None: - raise ValueError( - "create_erasure_profile should be called with k, m and one of l " - "or c but not both.") - - # Add plugin specific information - if locality is not None: - # For local erasure codes - cmd.append('l=' + str(locality)) - if durability_estimator is not None: - # For Shec erasure codes - cmd.append('c=' + str(durability_estimator)) - - if erasure_profile_exists(service, profile_name): - cmd.append('--force') - - try: - check_call(cmd) - except CalledProcessError: - raise - - -def rename_pool(service, old_name, new_name): - """ - Rename a Ceph pool from old_name to new_name - :param service: six.string_types. The Ceph user name to run the command - under - :param old_name: six.string_types - :param new_name: six.string_types - :return: None - """ - validator(value=old_name, valid_type=six.string_types) - validator(value=new_name, valid_type=six.string_types) - - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name - ] - check_call(cmd) - - -def erasure_profile_exists(service, name): - """ - Check to see if an Erasure code profile already exists. - :param service: six.string_types. The Ceph user name to run the command - under - :param name: six.string_types - :return: int or None - """ - validator(value=name, valid_type=six.string_types) - try: - check_call(['ceph', '--id', service, 'osd', 'erasure-code-profile', - 'get', name]) - return True - except CalledProcessError: - return False - - -def get_cache_mode(service, pool_name): - """ - Find the current caching mode of the pool_name given. - :param service: six.string_types. The Ceph user name to run the command - under - :param pool_name: six.string_types - :return: int or None - """ - validator(value=service, valid_type=six.string_types) - validator(value=pool_name, valid_type=six.string_types) - out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json' - ]) - try: - osd_json = json.loads(out) - for pool in osd_json['pools']: - if pool['pool_name'] == pool_name: - return pool['cache_mode'] - return None - except ValueError: - raise - - -def pool_exists(service, name): - """Check to see if a RADOS pool already exists.""" - try: - out = check_output(['rados', '--id', service, 'lspools']) - except CalledProcessError: - return False - - return name in out.split() - - -def get_osds(service): - """Return a list of all Ceph Object Storage Daemons currently in the - cluster. - """ - version = ceph_version() - if version and version >= '0.56': - return json.loads(check_output(['ceph', '--id', service, 'osd', 'ls', - '--format=json'])) - - return None - - -def install(): - """Basic Ceph client installation.""" - ceph_dir = "/etc/ceph" - if not os.path.exists(ceph_dir): - os.mkdir(ceph_dir) - - apt_install('ceph-common', fatal=True) - - -def rbd_exists(service, pool, rbd_img): - """Check to see if a RADOS block device exists.""" - try: - out = check_output(['rbd', 'list', '--id', service, '--pool', pool - ]) - except CalledProcessError: - return False - - return rbd_img in out - - -def create_rbd_image(service, pool, image, sizemb): - """Create a new RADOS block device.""" - cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, - '--pool', pool] - check_call(cmd) - - -def update_pool(client, pool, settings): - cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] - for k, v in six.iteritems(settings): - cmd.append(k) - cmd.append(v) - - check_call(cmd) - - -def create_pool(service, name, replicas=3, pg_num=None): - """Create a new RADOS pool.""" - if pool_exists(service, name): - log("Ceph pool {} already exists, skipping creation".format(name), - level=WARNING) - return - - if not pg_num: - # Calculate the number of placement groups based - # on upstream recommended best practices. - osds = get_osds(service) - if osds: - pg_num = (len(osds) * 100 // replicas) - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - pg_num = 200 - - cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] - check_call(cmd) - - update_pool(service, name, settings={'size': str(replicas)}) - - -def delete_pool(service, name): - """Delete a RADOS pool from ceph.""" - cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, - '--yes-i-really-really-mean-it'] - check_call(cmd) - - -def _keyfile_path(service): - return KEYFILE.format(service) - - -def _keyring_path(service): - return KEYRING.format(service) - - -def create_keyring(service, key): - """Create a new Ceph keyring containing key.""" - keyring = _keyring_path(service) - if os.path.exists(keyring): - log('Ceph keyring exists at %s.' % keyring, level=WARNING) - return - - cmd = ['ceph-authtool', keyring, '--create-keyring', - '--name=client.{}'.format(service), '--add-key={}'.format(key)] - check_call(cmd) - log('Created new ceph keyring at %s.' % keyring, level=DEBUG) - - -def delete_keyring(service): - """Delete an existing Ceph keyring.""" - keyring = _keyring_path(service) - if not os.path.exists(keyring): - log('Keyring does not exist at %s' % keyring, level=WARNING) - return - - os.remove(keyring) - log('Deleted ring at %s.' % keyring, level=INFO) - - -def create_key_file(service, key): - """Create a file containing key.""" - keyfile = _keyfile_path(service) - if os.path.exists(keyfile): - log('Keyfile exists at %s.' % keyfile, level=WARNING) - return - - with open(keyfile, 'w') as fd: - fd.write(key) - - log('Created new keyfile at %s.' % keyfile, level=INFO) - - -def get_ceph_nodes(relation='ceph'): - """Query named relation to determine current nodes.""" - hosts = [] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - hosts.append(relation_get('private-address', unit=unit, rid=r_id)) - - return hosts - - -def configure(service, key, auth, use_syslog): - """Perform basic configuration of Ceph.""" - create_keyring(service, key) - create_key_file(service, key) - hosts = get_ceph_nodes() - with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: - ceph_conf.write(CEPH_CONF.format(auth=auth, - keyring=_keyring_path(service), - mon_hosts=",".join(map(str, hosts)), - use_syslog=use_syslog)) - modprobe('rbd') - - -def image_mapped(name): - """Determine whether a RADOS block device is mapped locally.""" - try: - out = check_output(['rbd', 'showmapped']) - except CalledProcessError: - return False - - return name in out - - -def map_block_storage(service, pool, image): - """Map a RADOS block device for local use.""" - cmd = [ - 'rbd', - 'map', - '{}/{}'.format(pool, image), - '--user', - service, - '--secret', - _keyfile_path(service), - ] - check_call(cmd) - - -def filesystem_mounted(fs): - """Determine whether a filesytems is already mounted.""" - return fs in [f for f, m in mounts()] - - -def make_filesystem(blk_device, fstype='ext4', timeout=10): - """Make a new filesystem on the specified block device.""" - count = 0 - e_noent = os.errno.ENOENT - while not os.path.exists(blk_device): - if count >= timeout: - log('Gave up waiting on block device %s' % blk_device, level=ERROR) - raise IOError(e_noent, os.strerror(e_noent), blk_device) - - log('Waiting for block device %s to appear' % blk_device, level=DEBUG) - count += 1 - time.sleep(1) - else: - log('Formatting block device %s as filesystem %s.' % - (blk_device, fstype), - level=INFO) - check_call(['mkfs', '-t', fstype, blk_device]) - - -def place_data_on_block_device(blk_device, data_src_dst): - """Migrate data in data_src_dst to blk_device and then remount.""" - # mount block device into /mnt - mount(blk_device, '/mnt') - # copy data to /mnt - copy_files(data_src_dst, '/mnt') - # umount block device - umount('/mnt') - # Grab user/group ID's from original source - _dir = os.stat(data_src_dst) - uid = _dir.st_uid - gid = _dir.st_gid - # re-mount where the data should originally be - # TODO: persist is currently a NO-OP in core.host - mount(blk_device, data_src_dst, persist=True) - # ensure original ownership of new mount. - os.chown(data_src_dst, uid, gid) - - -def copy_files(src, dst, symlinks=False, ignore=None): - """Copy files from src to dst.""" - for item in os.listdir(src): - s = os.path.join(src, item) - d = os.path.join(dst, item) - if os.path.isdir(s): - shutil.copytree(s, d, symlinks, ignore) - else: - shutil.copy2(s, d) - - -def ensure_ceph_storage(service, - pool, - rbd_img, - sizemb, - mount_point, - blk_device, - fstype, - system_services=[], - replicas=3): - """NOTE: This function must only be called from a single service unit for - the same rbd_img otherwise data loss will occur. - - Ensures given pool and RBD image exists, is mapped to a block device, - and the device is formatted and mounted at the given mount_point. - - If formatting a device for the first time, data existing at mount_point - will be migrated to the RBD device before being re-mounted. - - All services listed in system_services will be stopped prior to data - migration and restarted when complete. - """ - # Ensure pool, RBD image, RBD mappings are in place. - if not pool_exists(service, pool): - log('Creating new pool {}.'.format(pool), level=INFO) - create_pool(service, pool, replicas=replicas) - - if not rbd_exists(service, pool, rbd_img): - log('Creating RBD image ({}).'.format(rbd_img), level=INFO) - create_rbd_image(service, pool, rbd_img, sizemb) - - if not image_mapped(rbd_img): - log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), - level=INFO) - map_block_storage(service, pool, rbd_img) - - # make file system - # TODO: What happens if for whatever reason this is run again and - # the data is already in the rbd device and/or is mounted?? - # When it is mounted already, it will fail to make the fs - # XXX: This is really sketchy! Need to at least add an fstab entry - # otherwise this hook will blow away existing data if its executed - # after a reboot. - if not filesystem_mounted(mount_point): - make_filesystem(blk_device, fstype) - - for svc in system_services: - if service_running(svc): - log('Stopping services {} prior to migrating data.' - .format(svc), - level=DEBUG) - service_stop(svc) - - place_data_on_block_device(blk_device, mount_point) - - for svc in system_services: - log('Starting service {} after migrating data.'.format(svc), - level=DEBUG) - service_start(svc) - - -def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): - """Ensures a ceph keyring is created for a named service and optionally - ensures user and group ownership. - - Returns False if no ceph key is available in relation state. - """ - key = None - for rid in relation_ids(relation): - for unit in related_units(rid): - key = relation_get('key', rid=rid, unit=unit) - if key: - break - - if not key: - return False - - create_keyring(service=service, key=key) - keyring = _keyring_path(service) - if user and group: - check_call(['chown', '%s.%s' % (user, group), keyring]) - - return True - - -def get_mon_hosts(): - """ - Helper function to gather up the ceph monitor host public addresses - :return: list. Returns a list of ip_address:port - """ - hosts = [] - for relid in relation_ids('mon'): - for unit in related_units(relid): - addr = \ - relation_get('ceph-public-address', - unit, - relid) or get_host_ip( - relation_get( - 'private-address', - unit, - relid)) - - if addr: - hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) - - hosts.sort() - return hosts - - -def ceph_version(): - """Retrieve the local version of ceph.""" - if os.path.exists('/usr/bin/ceph'): - cmd = ['ceph', '-v'] - output = check_output(cmd) - output = output.split() - if len(output) > 3: - return output[2] - else: - return None - else: - return None - - -class CephBrokerRq(object): - """Ceph broker request. - - Multiple operations can be added to a request and sent to the Ceph broker - to be executed. - - Request is json-encoded for sending over the wire. - - The API is versioned and defaults to version 1. - """ - - def __init__(self, api_version=1, request_id=None): - self.api_version = api_version - if request_id: - self.request_id = request_id - else: - self.request_id = str(uuid.uuid1()) - self.ops = [] - - def add_op_create_pool( - self, name, replica_count=3, - pg_num=None, weight=None): - """Adds an operation to create a pool. - - @param pg_num setting: optional setting. If not provided, this value - will be calculated by the broker based on how many OSDs are in the - cluster at the time of creation. Note that, if provided, this value - will be capped at the current available maximum. - @param weight: the percentage of data the pool makes up - """ - if pg_num and weight: - raise ValueError('pg_num and weight are mutually exclusive') - - self.ops.append({'op': 'create-pool', - 'name': name, - 'replicas': replica_count, - 'pg_num': pg_num, - 'weight': weight}) - - def set_ops(self, ops): - """Set request ops to provided value. - - Useful for injecting ops that come from a previous request - to allow comparisons to ensure validity. - """ - self.ops = ops - - @property - def request(self): - return json.dumps({'api-version': self.api_version, - 'ops': self.ops, - 'request-id': self.request_id}) - - def _ops_equal(self, other): - if len(self.ops) == len(other.ops): - for req_no in range(0, len(self.ops)): - for key in ['replicas', 'name', 'op', 'pg_num', 'weight']: - if self.ops[req_no].get(key) != other.ops[req_no].get(key): - return False - else: - return False - return True - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - if self.api_version == other.api_version and \ - self._ops_equal(other): - return True - else: - return False - - def __ne__(self, other): - return not self.__eq__(other) - - -class CephBrokerRsp(object): - """Ceph broker response. - - Response is json-decoded and contents provided as methods/properties. - - The API is versioned and defaults to version 1. - """ - - def __init__(self, encoded_rsp): - self.api_version = None - self.rsp = json.loads(encoded_rsp) - - @property - def request_id(self): - return self.rsp.get('request-id') - - @property - def exit_code(self): - return self.rsp.get('exit-code') - - @property - def exit_msg(self): - return self.rsp.get('stderr') - - -# Ceph Broker Conversation: -# If a charm needs an action to be taken by ceph it can create a CephBrokerRq -# and send that request to ceph via the ceph relation. The CephBrokerRq has a -# unique id so that the client can identity which CephBrokerRsp is associated -# with the request. Ceph will also respond to each client unit individually -# creating a response key per client unit eg glance/0 will get a CephBrokerRsp -# via key broker-rsp-glance-0 -# -# To use this the charm can just do something like: -# -# from charmhelpers.contrib.storage.linux.ceph import ( -# send_request_if_needed, -# is_request_complete, -# CephBrokerRq, -# ) -# -# @hooks.hook('ceph-relation-changed') -# def ceph_changed(): -# rq = CephBrokerRq() -# rq.add_op_create_pool(name='poolname', replica_count=3) -# -# if is_request_complete(rq): -# -# else: -# send_request_if_needed(get_ceph_request()) -# -# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example -# of glance having sent a request to ceph which ceph has successfully processed -# 'ceph:8': { -# 'ceph/0': { -# 'auth': 'cephx', -# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', -# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', -# 'ceph-public-address': '10.5.44.103', -# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', -# 'private-address': '10.5.44.103', -# }, -# 'glance/0': { -# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' -# '"ops": [{"replicas": 3, "name": "glance", ' -# '"op": "create-pool"}]}'), -# 'private-address': '10.5.44.109', -# }, -# } - - -def get_previous_request(rid): - """Return the last ceph broker request sent on a given relation - - @param rid: Relation id to query for request - """ - request = None - broker_req = relation_get(attribute='broker_req', - rid=rid, - unit=local_unit()) - if broker_req: - request_data = json.loads(broker_req) - request = CephBrokerRq(api_version=request_data['api-version'], - request_id=request_data['request-id']) - request.set_ops(request_data['ops']) - - return request - - -def get_request_states(request, relation='ceph'): - """Return a dict of requests per relation id with their corresponding - completion state. - - This allows a charm, which has a request for ceph, to see whether there is - an equivalent request already being processed and if so what state that - request is in. - - @param request: A CephBrokerRq object - """ - complete = [] - requests = {} - for rid in relation_ids(relation): - complete = False - previous_request = get_previous_request(rid) - if request == previous_request: - sent = True - complete = is_request_complete_for_rid(previous_request, rid) - else: - sent = False - complete = False - - requests[rid] = {'sent': sent, 'complete': complete, } - - return requests - - -def is_request_sent(request, relation='ceph'): - """Check to see if a functionally equivalent request has already been sent - - Returns True if a similair request has been sent - - @param request: A CephBrokerRq object - """ - states = get_request_states(request, relation=relation) - for rid in states.keys(): - if not states[rid]['sent']: - return False - - return True - - -def is_request_complete(request, relation='ceph'): - """Check to see if a functionally equivalent request has already been - completed - - Returns True if a similair request has been completed - - @param request: A CephBrokerRq object - """ - states = get_request_states(request, relation=relation) - for rid in states.keys(): - if not states[rid]['complete']: - return False - - return True - - -def is_request_complete_for_rid(request, rid): - """Check if a given request has been completed on the given relation - - @param request: A CephBrokerRq object - @param rid: Relation ID - """ - broker_key = get_broker_rsp_key() - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - if rdata.get(broker_key): - rsp = CephBrokerRsp(rdata.get(broker_key)) - if rsp.request_id == request.request_id: - if not rsp.exit_code: - return True - else: - # The remote unit sent no reply targeted at this unit so either the - # remote ceph cluster does not support unit targeted replies or it - # has not processed our request yet. - if rdata.get('broker_rsp'): - request_data = json.loads(rdata['broker_rsp']) - if request_data.get('request-id'): - log('Ignoring legacy broker_rsp without unit key as remote' - ' service supports unit specific replies', - level=DEBUG) - else: - log('Using legacy broker_rsp as remote service does not ' - 'supports unit specific replies', - level=DEBUG) - rsp = CephBrokerRsp(rdata['broker_rsp']) - if not rsp.exit_code: - return True - - return False - - -def get_broker_rsp_key(): - """Return broker response key for this unit - - This is the key that ceph is going to use to pass request status - information back to this unit - """ - return 'broker-rsp-' + local_unit().replace('/', '-') - - -def send_request_if_needed(request, relation='ceph'): - """Send broker request if an equivalent request has not already been sent - - @param request: A CephBrokerRq object - """ - if is_request_sent(request, relation=relation): - log('Request already sent but not complete, not sending new request', - level=DEBUG) - else: - for rid in relation_ids(relation): - log('Sending request {}'.format(request.request_id), level=DEBUG) - relation_set(relation_id=rid, broker_req=request.request) - - -class CephConfContext(object): - """Ceph config (ceph.conf) context. - - Supports user-provided Ceph configuration settings. Use can provide a - dictionary as the value for the config-flags charm option containing - Ceph configuration settings keyede by their section in ceph.conf. - """ - - def __init__(self, permitted_sections=None): - self.permitted_sections = permitted_sections or [] - - def __call__(self): - conf = config('config-flags') - if not conf: - return {} - - conf = config_flags_parser(conf) - if type(conf) != dict: - log("Provided config-flags is not a dictionary - ignoring", - level=WARNING) - return {} - - permitted = self.permitted_sections - if permitted: - diff = set(conf.keys()).difference(set(permitted)) - if diff: - log("Config-flags contains invalid keys '%s' - they will be " - "ignored" % (', '.join(diff)), - level=WARNING) - - ceph_conf = {} - for key in conf: - if permitted and key not in permitted: - log("Ignoring key '%s'" % key, level=WARNING) - continue - - ceph_conf[key] = conf[key] - - return ceph_conf diff --git a/ceph-mon/lib/ceph/crush_utils.py b/ceph-mon/lib/ceph/crush_utils.py new file mode 100644 index 00000000..1c777f34 --- /dev/null +++ b/ceph-mon/lib/ceph/crush_utils.py @@ -0,0 +1,149 @@ +# Copyright 2014 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +from subprocess import check_output, CalledProcessError + +from charmhelpers.core.hookenv import ( + log, + ERROR, +) + +CRUSH_BUCKET = """root {name} {{ + id {id} # do not change unnecessarily + # weight 0.000 + alg straw + hash 0 # rjenkins1 +}} + +rule {name} {{ + ruleset 0 + type replicated + min_size 1 + max_size 10 + step take {name} + step chooseleaf firstn 0 type host + step emit +}}""" + +# This regular expression looks for a string like: +# root NAME { +# id NUMBER +# so that we can extract NAME and ID from the crushmap +CRUSHMAP_BUCKETS_RE = re.compile(r"root\s+(.+)\s+\{\s*id\s+(-?\d+)") + +# This regular expression looks for ID strings in the crushmap like: +# id NUMBER +# so that we can extract the IDs from a crushmap +CRUSHMAP_ID_RE = re.compile(r"id\s+(-?\d+)") + + +class Crushmap(object): + """An object oriented approach to Ceph crushmap management.""" + + def __init__(self): + self._crushmap = self.load_crushmap() + roots = re.findall(CRUSHMAP_BUCKETS_RE, self._crushmap) + buckets = [] + ids = list(map( + lambda x: int(x), + re.findall(CRUSHMAP_ID_RE, self._crushmap))) + ids.sort() + if roots != []: + for root in roots: + buckets.append(CRUSHBucket(root[0], root[1], True)) + + self._buckets = buckets + if ids != []: + self._ids = ids + else: + self._ids = [0] + + def load_crushmap(self): + try: + crush = check_output(['ceph', 'osd', 'getcrushmap']) + return check_output(['crushtool', '-d', '-'], stdin=crush.stdout) + except CalledProcessError as e: + log("Error occured while loading and decompiling CRUSH map:" + "{}".format(e), ERROR) + raise "Failed to read CRUSH map" + + def ensure_bucket_is_present(self, bucket_name): + if bucket_name not in [bucket.name for bucket in self.buckets()]: + self.add_bucket(bucket_name) + self.save() + + def buckets(self): + """Return a list of buckets that are in the Crushmap.""" + return self._buckets + + def add_bucket(self, bucket_name): + """Add a named bucket to Ceph""" + new_id = min(self._ids) - 1 + self._ids.append(new_id) + self._buckets.append(CRUSHBucket(bucket_name, new_id)) + + def save(self): + """Persist Crushmap to Ceph""" + try: + crushmap = self.build_crushmap() + compiled = check_output(['crushtool', '-c', '/dev/stdin', '-o', + '/dev/stdout'], stdin=crushmap) + ceph_output = check_output(['ceph', 'osd', 'setcrushmap', '-i', + '/dev/stdin'], stdin=compiled) + return ceph_output + except CalledProcessError as e: + log("save error: {}".format(e)) + raise "Failed to save CRUSH map." + + def build_crushmap(self): + """Modifies the current CRUSH map to include the new buckets""" + tmp_crushmap = self._crushmap + for bucket in self._buckets: + if not bucket.default: + tmp_crushmap = "{}\n\n{}".format( + tmp_crushmap, + Crushmap.bucket_string(bucket.name, bucket.id)) + + return tmp_crushmap + + @staticmethod + def bucket_string(name, id): + return CRUSH_BUCKET.format(name=name, id=id) + + +class CRUSHBucket(object): + """CRUSH bucket description object.""" + + def __init__(self, name, id, default=False): + self.name = name + self.id = int(id) + self.default = default + + def __repr__(self): + return "Bucket {{Name: {name}, ID: {id}}}".format( + name=self.name, id=self.id) + + def __eq__(self, other): + """Override the default Equals behavior""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return NotImplemented + + def __ne__(self, other): + """Define a non-equality test""" + if isinstance(other, self.__class__): + return not self.__eq__(other) + return NotImplemented diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py new file mode 100644 index 00000000..b96dabbf --- /dev/null +++ b/ceph-mon/lib/ceph/utils.py @@ -0,0 +1,2199 @@ +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import ctypes +import errno +import json +import os +import pyudev +import random +import re +import socket +import subprocess +import sys +import time +import shutil + +from datetime import datetime + +from charmhelpers.core import hookenv +from charmhelpers.core import templating +from charmhelpers.core.host import ( + chownr, + cmp_pkgrevno, + lsb_release, + mkdir, + mounts, + owner, + service_restart, + service_start, + service_stop, + CompareHostReleases, + is_container, +) +from charmhelpers.core.hookenv import ( + cached, + config, + log, + status_set, + DEBUG, + ERROR, + WARNING, +) +from charmhelpers.fetch import ( + apt_cache, + add_source, apt_install, apt_update +) +from charmhelpers.contrib.storage.linux.ceph import ( + get_mon_map, + monitor_key_set, + monitor_key_exists, + monitor_key_get, +) +from charmhelpers.contrib.storage.linux.utils import ( + is_block_device, + is_device_mounted, + zap_disk, +) +from charmhelpers.contrib.openstack.utils import ( + get_os_codename_install_source, +) + +CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph') +OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd') +HDPARM_FILE = os.path.join(os.sep, 'etc', 'hdparm.conf') + +LEADER = 'leader' +PEON = 'peon' +QUORUM = [LEADER, PEON] + +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', + 'radosgw', 'xfsprogs', 'python-pyudev'] + +LinkSpeed = { + "BASE_10": 10, + "BASE_100": 100, + "BASE_1000": 1000, + "GBASE_10": 10000, + "GBASE_40": 40000, + "GBASE_100": 100000, + "UNKNOWN": None +} + +# Mapping of adapter speed to sysctl settings +NETWORK_ADAPTER_SYSCTLS = { + # 10Gb + LinkSpeed["GBASE_10"]: { + 'net.core.rmem_default': 524287, + 'net.core.wmem_default': 524287, + 'net.core.rmem_max': 524287, + 'net.core.wmem_max': 524287, + 'net.core.optmem_max': 524287, + 'net.core.netdev_max_backlog': 300000, + 'net.ipv4.tcp_rmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_wmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_mem': '10000000 10000000 10000000' + }, + # Mellanox 10/40Gb + LinkSpeed["GBASE_40"]: { + 'net.ipv4.tcp_timestamps': 0, + 'net.ipv4.tcp_sack': 1, + 'net.core.netdev_max_backlog': 250000, + 'net.core.rmem_max': 4194304, + 'net.core.wmem_max': 4194304, + 'net.core.rmem_default': 4194304, + 'net.core.wmem_default': 4194304, + 'net.core.optmem_max': 4194304, + 'net.ipv4.tcp_rmem': '4096 87380 4194304', + 'net.ipv4.tcp_wmem': '4096 65536 4194304', + 'net.ipv4.tcp_low_latency': 1, + 'net.ipv4.tcp_adv_win_scale': 1 + } +} + + +class Partition(object): + def __init__(self, name, number, size, start, end, sectors, uuid): + """A block device partition. + + :param name: Name of block device + :param number: Partition number + :param size: Capacity of the device + :param start: Starting block + :param end: Ending block + :param sectors: Number of blocks + :param uuid: UUID of the partition + """ + self.name = name, + self.number = number + self.size = size + self.start = start + self.end = end + self.sectors = sectors + self.uuid = uuid + + def __str__(self): + return "number: {} start: {} end: {} sectors: {} size: {} " \ + "name: {} uuid: {}".format(self.number, self.start, + self.end, + self.sectors, self.size, + self.name, self.uuid) + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + return not self.__eq__(other) + + +def unmounted_disks(): + """List of unmounted block devices on the current host.""" + disks = [] + context = pyudev.Context() + for device in context.list_devices(DEVTYPE='disk'): + if device['SUBSYSTEM'] == 'block': + matched = False + for block_type in [u'dm', u'loop', u'ram', u'nbd']: + if block_type in device.device_node: + matched = True + if matched: + continue + disks.append(device.device_node) + log("Found disks: {}".format(disks)) + return [disk for disk in disks if not is_device_mounted(disk)] + + +def save_sysctls(sysctl_dict, save_location): + """Persist the sysctls to the hard drive. + + :param sysctl_dict: dict + :param save_location: path to save the settings to + :raises: IOError if anything goes wrong with writing. + """ + try: + # Persist the settings for reboots + with open(save_location, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + except IOError as e: + log("Unable to persist sysctl settings to {}. Error {}".format( + save_location, e.message), level=ERROR) + raise + + +def tune_nic(network_interface): + """This will set optimal sysctls for the particular network adapter. + + :param network_interface: string The network adapter name. + """ + speed = get_link_speed(network_interface) + if speed in NETWORK_ADAPTER_SYSCTLS: + status_set('maintenance', 'Tuning device {}'.format( + network_interface)) + sysctl_file = os.path.join( + os.sep, + 'etc', + 'sysctl.d', + '51-ceph-osd-charm-{}.conf'.format(network_interface)) + try: + log("Saving sysctl_file: {} values: {}".format( + sysctl_file, NETWORK_ADAPTER_SYSCTLS[speed]), + level=DEBUG) + save_sysctls(sysctl_dict=NETWORK_ADAPTER_SYSCTLS[speed], + save_location=sysctl_file) + except IOError as e: + log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " + "failed. {}".format(network_interface, e.message), + level=ERROR) + + try: + # Apply the settings + log("Applying sysctl settings", level=DEBUG) + subprocess.check_output(["sysctl", "-p", sysctl_file]) + except subprocess.CalledProcessError as err: + log('sysctl -p {} failed with error {}'.format(sysctl_file, + err.output), + level=ERROR) + else: + log("No settings found for network adapter: {}".format( + network_interface), level=DEBUG) + + +def get_link_speed(network_interface): + """This will find the link speed for a given network device. Returns None + if an error occurs. + :param network_interface: string The network adapter interface. + :returns: LinkSpeed + """ + speed_path = os.path.join(os.sep, 'sys', 'class', 'net', + network_interface, 'speed') + # I'm not sure where else we'd check if this doesn't exist + if not os.path.exists(speed_path): + return LinkSpeed["UNKNOWN"] + + try: + with open(speed_path, 'r') as sysfs: + nic_speed = sysfs.readlines() + + # Did we actually read anything? + if not nic_speed: + return LinkSpeed["UNKNOWN"] + + # Try to find a sysctl match for this particular speed + for name, speed in LinkSpeed.items(): + if speed == int(nic_speed[0].strip()): + return speed + # Default to UNKNOWN if we can't find a match + return LinkSpeed["UNKNOWN"] + except IOError as e: + log("Unable to open {path} because of error: {error}".format( + path=speed_path, + error=e.message), level='error') + return LinkSpeed["UNKNOWN"] + + +def persist_settings(settings_dict): + # Write all settings to /etc/hdparm.conf + """ This will persist the hard drive settings to the /etc/hdparm.conf file + + The settings_dict should be in the form of {"uuid": {"key":"value"}} + + :param settings_dict: dict of settings to save + """ + if not settings_dict: + return + + try: + templating.render(source='hdparm.conf', target=HDPARM_FILE, + context=settings_dict) + except IOError as err: + log("Unable to open {path} because of error: {error}".format( + path=HDPARM_FILE, error=err.message), level=ERROR) + except Exception as e: + # The templating.render can raise a jinja2 exception if the + # template is not found. Rather than polluting the import + # space of this charm, simply catch Exception + log('Unable to render {path} due to error: {error}'.format( + path=HDPARM_FILE, error=e.message), level=ERROR) + + +def set_max_sectors_kb(dev_name, max_sectors_size): + """This function sets the max_sectors_kb size of a given block device. + + :param dev_name: Name of the block device to query + :param max_sectors_size: int of the max_sectors_size to save + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + try: + with open(max_sectors_kb_path, 'w') as f: + f.write(max_sectors_size) + except IOError as e: + log('Failed to write max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e.message), level=ERROR) + + +def get_max_sectors_kb(dev_name): + """This function gets the max_sectors_kb size of a given block device. + + :param dev_name: Name of the block device to query + :returns: int which is either the max_sectors_kb or 0 on error. + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + + # Read in what Linux has set by default + if os.path.exists(max_sectors_kb_path): + try: + with open(max_sectors_kb_path, 'r') as f: + max_sectors_kb = f.read().strip() + return int(max_sectors_kb) + except IOError as e: + log('Failed to read max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e.message), level=ERROR) + # Bail. + return 0 + return 0 + + +def get_max_hw_sectors_kb(dev_name): + """This function gets the max_hw_sectors_kb for a given block device. + + :param dev_name: Name of the block device to query + :returns: int which is either the max_hw_sectors_kb or 0 on error. + """ + max_hw_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_hw_sectors_kb') + # Read in what the hardware supports + if os.path.exists(max_hw_sectors_kb_path): + try: + with open(max_hw_sectors_kb_path, 'r') as f: + max_hw_sectors_kb = f.read().strip() + return int(max_hw_sectors_kb) + except IOError as e: + log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( + max_hw_sectors_kb_path, e.message), level=ERROR) + return 0 + return 0 + + +def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): + """This function sets the hard drive read ahead. + + :param dev_name: Name of the block device to set read ahead on. + :param read_ahead_sectors: int How many sectors to read ahead. + """ + try: + # Set the read ahead sectors to 256 + log('Setting read ahead to {} for device {}'.format( + read_ahead_sectors, + dev_name)) + subprocess.check_output(['hdparm', + '-a{}'.format(read_ahead_sectors), + dev_name]) + except subprocess.CalledProcessError as e: + log('hdparm failed with error: {}'.format(e.output), + level=ERROR) + + +def get_block_uuid(block_dev): + """This queries blkid to get the uuid for a block device. + + :param block_dev: Name of the block device to query. + :returns: The UUID of the device or None on Error. + """ + try: + block_info = subprocess.check_output( + ['blkid', '-o', 'export', block_dev]) + for tag in block_info.split('\n'): + parts = tag.split('=') + if parts[0] == 'UUID': + return parts[1] + return None + except subprocess.CalledProcessError as err: + log('get_block_uuid failed with error: {}'.format(err.output), + level=ERROR) + return None + + +def check_max_sectors(save_settings_dict, + block_dev, + uuid): + """Tune the max_hw_sectors if needed. + + make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb or at + least 1MB for spinning disks + If the box has a RAID card with cache this could go much bigger. + + :param save_settings_dict: The dict used to persist settings + :param block_dev: A block device name: Example: /dev/sda + :param uuid: The uuid of the block device + """ + dev_name = None + path_parts = os.path.split(block_dev) + if len(path_parts) == 2: + dev_name = path_parts[1] + else: + log('Unable to determine the block device name from path: {}'.format( + block_dev)) + # Play it safe and bail + return + max_sectors_kb = get_max_sectors_kb(dev_name=dev_name) + max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name) + + if max_sectors_kb < max_hw_sectors_kb: + # OK we have a situation where the hardware supports more than Linux is + # currently requesting + config_max_sectors_kb = hookenv.config('max-sectors-kb') + if config_max_sectors_kb < max_hw_sectors_kb: + # Set the max_sectors_kb to the config.yaml value if it is less + # than the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, config_max_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid][ + "read_ahead_sect"] = config_max_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=config_max_sectors_kb) + else: + # Set to the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, max_hw_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=max_hw_sectors_kb) + else: + log('max_sectors_kb match max_hw_sectors_kb. No change needed for ' + 'device: {}'.format(block_dev)) + + +def tune_dev(block_dev): + """Try to make some intelligent decisions with HDD tuning. Future work will + include optimizing SSDs. + + This function will change the read ahead sectors and the max write + sectors for each block device. + + :param block_dev: A block device name: Example: /dev/sda + """ + uuid = get_block_uuid(block_dev) + if uuid is None: + log('block device {} uuid is None. Unable to save to ' + 'hdparm.conf'.format(block_dev), level=DEBUG) + return + save_settings_dict = {} + log('Tuning device {}'.format(block_dev)) + status_set('maintenance', 'Tuning device {}'.format(block_dev)) + set_hdd_read_ahead(block_dev) + save_settings_dict["drive_settings"] = {} + save_settings_dict["drive_settings"][uuid] = {} + save_settings_dict["drive_settings"][uuid]['read_ahead_sect'] = 256 + + check_max_sectors(block_dev=block_dev, + save_settings_dict=save_settings_dict, + uuid=uuid) + + persist_settings(settings_dict=save_settings_dict) + status_set('maintenance', 'Finished tuning device {}'.format(block_dev)) + + +def ceph_user(): + if get_version() > 1: + return 'ceph' + else: + return "root" + + +class CrushLocation(object): + def __init__(self, + name, + identifier, + host, + rack, + row, + datacenter, + chassis, + root): + self.name = name + self.identifier = identifier + self.host = host + self.rack = rack + self.row = row + self.datacenter = datacenter + self.chassis = chassis + self.root = root + + def __str__(self): + return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ + "chassis :{} root: {}".format(self.name, self.identifier, + self.host, self.rack, self.row, + self.datacenter, self.chassis, + self.root) + + def __eq__(self, other): + return not self.name < other.name and not other.name < self.name + + def __ne__(self, other): + return self.name < other.name or other.name < self.name + + def __gt__(self, other): + return self.name > other.name + + def __ge__(self, other): + return not self.name < other.name + + def __le__(self, other): + return self.name < other.name + + +def get_osd_weight(osd_id): + """Returns the weight of the specified OSD. + + :returns: Float + :raises: ValueError if the monmap fails to parse. + :raises: CalledProcessError if our ceph command fails. + """ + try: + tree = subprocess.check_output( + ['ceph', 'osd', 'tree', '--format=json']) + try: + json_tree = json.loads(tree) + # Make sure children are present in the json + if not json_tree['nodes']: + return None + for device in json_tree['nodes']: + if device['type'] == 'osd' and device['name'] == osd_id: + return device['crush_weight'] + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + +def get_osd_tree(service): + """Returns the current osd map in JSON. + + :returns: List. + :raises: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + tree = subprocess.check_output( + ['ceph', '--id', service, + 'osd', 'tree', '--format=json']) + try: + json_tree = json.loads(tree) + crush_list = [] + # Make sure children are present in the json + if not json_tree['nodes']: + return None + child_ids = json_tree['nodes'][0]['children'] + for child in json_tree['nodes']: + if child['id'] in child_ids: + crush_list.append( + CrushLocation( + name=child.get('name'), + identifier=child['id'], + host=child.get('host'), + rack=child.get('rack'), + row=child.get('row'), + datacenter=child.get('datacenter'), + chassis=child.get('chassis'), + root=child.get('root') + ) + ) + return crush_list + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e.message)) + raise + + +def _get_child_dirs(path): + """Returns a list of directory names in the specified path. + + :param path: a full path listing of the parent directory to return child + directory names + :returns: list. A list of child directories under the parent directory + :raises: ValueError if the specified path does not exist or is not a + directory, + OSError if an error occurs reading the directory listing + """ + if not os.path.exists(path): + raise ValueError('Specfied path "%s" does not exist' % path) + if not os.path.isdir(path): + raise ValueError('Specified path "%s" is not a directory' % path) + + files_in_dir = [os.path.join(path, f) for f in os.listdir(path)] + return list(filter(os.path.isdir, files_in_dir)) + + +def _get_osd_num_from_dirname(dirname): + """Parses the dirname and returns the OSD id. + + Parses a string in the form of 'ceph-{osd#}' and returns the osd number + from the directory name. + + :param dirname: the directory name to return the OSD number from + :return int: the osd number the directory name corresponds to + :raises ValueError: if the osd number cannot be parsed from the provided + directory name. + """ + match = re.search('ceph-(?P\d+)', dirname) + if not match: + raise ValueError("dirname not in correct format: %s" % dirname) + + return match.group('osd_id') + + +def get_local_osd_ids(): + """This will list the /var/lib/ceph/osd/* directories and try + to split the ID off of the directory name and return it in + a list. + + :returns: list. A list of osd identifiers + :raises: OSError if something goes wrong with listing the directory. + """ + osd_ids = [] + osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') + if os.path.exists(osd_path): + try: + dirs = os.listdir(osd_path) + for osd_dir in dirs: + osd_id = osd_dir.split('-')[1] + if _is_int(osd_id): + osd_ids.append(osd_id) + except OSError: + raise + return osd_ids + + +def get_local_mon_ids(): + """This will list the /var/lib/ceph/mon/* directories and try + to split the ID off of the directory name and return it in + a list. + + :returns: list. A list of monitor identifiers + :raises: OSError if something goes wrong with listing the directory. + """ + mon_ids = [] + mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') + if os.path.exists(mon_path): + try: + dirs = os.listdir(mon_path) + for mon_dir in dirs: + # Basically this takes everything after ceph- as the monitor ID + match = re.search('ceph-(?P.*)', mon_dir) + if match: + mon_ids.append(match.group('mon_id')) + except OSError: + raise + return mon_ids + + +def _is_int(v): + """Return True if the object v can be turned into an integer.""" + try: + int(v) + return True + except ValueError: + return False + + +def get_version(): + """Derive Ceph release from an installed package.""" + import apt_pkg as apt + + cache = apt_cache() + package = "ceph" + try: + pkg = cache[package] + except: + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation ' \ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match('^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + return float(vers) + + +def error_out(msg): + log("FATAL ERROR: %s" % msg, + level=ERROR) + sys.exit(1) + + +def is_quorum(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] in QUORUM: + return True + else: + return False + else: + return False + + +def is_leader(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(subprocess.check_output(cmd)) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] == LEADER: + return True + else: + return False + else: + return False + + +def wait_for_quorum(): + while not is_quorum(): + log("Waiting for quorum to be reached") + time.sleep(3) + + +def add_bootstrap_hint(peer): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "add_bootstrap_peer_hint", + peer + ] + if os.path.exists(asok): + # Ignore any errors for this call + subprocess.call(cmd) + + +DISK_FORMATS = [ + 'xfs', + 'ext4', + 'btrfs' +] + +CEPH_PARTITIONS = [ + '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # ceph encrypted disk in creation + '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # ceph encrypted journal + '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data + '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data + '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal + '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # ceph disk in creation +] + + +def umount(mount_point): + """This function unmounts a mounted directory forcibly. This will + be used for unmounting broken hard drive mounts which may hang. + + If umount returns EBUSY this will lazy unmount. + + :param mount_point: str. A String representing the filesystem mount point + :returns: int. Returns 0 on success. errno otherwise. + """ + libc_path = ctypes.util.find_library("c") + libc = ctypes.CDLL(libc_path, use_errno=True) + + # First try to umount with MNT_FORCE + ret = libc.umount(mount_point, 1) + if ret < 0: + err = ctypes.get_errno() + if err == errno.EBUSY: + # Detach from try. IE lazy umount + ret = libc.umount(mount_point, 2) + if ret < 0: + err = ctypes.get_errno() + return err + return 0 + else: + return err + return 0 + + +def replace_osd(dead_osd_number, + dead_osd_device, + new_osd_device, + osd_format, + osd_journal, + reformat_osd=False, + ignore_errors=False): + """This function will automate the replacement of a failed osd disk as much + as possible. It will revoke the keys for the old osd, remove it from the + crush map and then add a new osd into the cluster. + + :param dead_osd_number: The osd number found in ceph osd tree. Example: 99 + :param dead_osd_device: The physical device. Example: /dev/sda + :param osd_format: + :param osd_journal: + :param reformat_osd: + :param ignore_errors: + """ + host_mounts = mounts() + mount_point = None + for mount in host_mounts: + if mount[1] == dead_osd_device: + mount_point = mount[0] + # need to convert dev to osd number + # also need to get the mounted drive so we can tell the admin to + # replace it + try: + # Drop this osd out of the cluster. This will begin a + # rebalance operation + status_set('maintenance', 'Removing osd {}'.format(dead_osd_number)) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'out', + 'osd.{}'.format(dead_osd_number)]) + + # Kill the osd process if it's not already dead + if systemd(): + service_stop('ceph-osd@{}'.format(dead_osd_number)) + else: + subprocess.check_output(['stop', 'ceph-osd', 'id={}'.format( + dead_osd_number)]) + # umount if still mounted + ret = umount(mount_point) + if ret < 0: + raise RuntimeError('umount {} failed with error: {}'.format( + mount_point, os.strerror(ret))) + # Clean up the old mount point + shutil.rmtree(mount_point) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'crush', 'remove', + 'osd.{}'.format(dead_osd_number)]) + # Revoke the OSDs access keys + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'auth', 'del', + 'osd.{}'.format(dead_osd_number)]) + subprocess.check_output([ + 'ceph', + '--id', + 'osd-upgrade', + 'osd', 'rm', + 'osd.{}'.format(dead_osd_number)]) + status_set('maintenance', 'Setting up replacement osd {}'.format( + new_osd_device)) + osdize(new_osd_device, + osd_format, + osd_journal, + reformat_osd, + ignore_errors) + except subprocess.CalledProcessError as e: + log('replace_osd failed with error: ' + e.output) + + +def get_partition_list(dev): + """Lists the partitions of a block device. + + :param dev: Path to a block device. ex: /dev/sda + :returns: Returns a list of Partition objects. + :raises: CalledProcessException if lsblk fails + """ + partitions_list = [] + try: + partitions = get_partitions(dev) + # For each line of output + for partition in partitions: + parts = partition.split() + partitions_list.append( + Partition(number=parts[0], + start=parts[1], + end=parts[2], + sectors=parts[3], + size=parts[4], + name=parts[5], + uuid=parts[6]) + ) + return partitions_list + except subprocess.CalledProcessError: + raise + + +def is_osd_disk(dev): + partitions = get_partition_list(dev) + for partition in partitions: + try: + info = subprocess.check_output(['sgdisk', '-i', partition.number, + dev]) + info = info.split("\n") # IGNORE:E1103 + for line in info: + for ptype in CEPH_PARTITIONS: + sig = 'Partition GUID code: {}'.format(ptype) + if line.startswith(sig): + return True + except subprocess.CalledProcessError as e: + log("sgdisk inspection of partition {} on {} failed with " + "error: {}. Skipping".format(partition.minor, dev, e.message), + level=ERROR) + return False + + +def start_osds(devices): + # Scan for ceph block devices + rescan_osd_devices() + if cmp_pkgrevno('ceph', "0.56.6") >= 0: + # Use ceph-disk activate for directory based OSD's + for dev_or_path in devices: + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): + subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) + + +def rescan_osd_devices(): + cmd = [ + 'udevadm', 'trigger', + '--subsystem-match=block', '--action=add' + ] + + subprocess.call(cmd) + + +_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" +_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" + + +def is_bootstrapped(): + return os.path.exists(_bootstrap_keyring) + + +def wait_for_bootstrap(): + while not is_bootstrapped(): + time.sleep(3) + + +def import_osd_bootstrap_key(key): + if not os.path.exists(_bootstrap_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _bootstrap_keyring, + '--create-keyring', + '--name=client.bootstrap-osd', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + + +def import_osd_upgrade_key(key): + if not os.path.exists(_upgrade_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _upgrade_keyring, + '--create-keyring', + '--name=client.osd-upgrade', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + + +def generate_monitor_secret(): + cmd = [ + 'ceph-authtool', + '/dev/stdout', + '--name=mon.', + '--gen-key' + ] + res = subprocess.check_output(cmd) + + return "{}==".format(res.split('=')[1].strip()) + +# OSD caps taken from ceph-create-keys +_osd_bootstrap_caps = { + 'mon': [ + 'allow command osd create ...', + 'allow command osd crush set ...', + r'allow command auth add * osd allow\ * mon allow\ rwx', + 'allow command mon getmap' + ] +} + +_osd_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-osd' + ] +} + + +def parse_key(raw_key): + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(raw_key.splitlines()) == 1: + key = raw_key + else: + for element in raw_key.splitlines(): + if 'key' in element: + return element.split(' = ')[1].strip() # IGNORE:E1103 + return key + + +def get_osd_bootstrap_key(): + try: + # Attempt to get/create a key using the OSD bootstrap profile first + key = get_named_key('bootstrap-osd', + _osd_bootstrap_caps_profile) + except: + # If that fails try with the older style permissions + key = get_named_key('bootstrap-osd', + _osd_bootstrap_caps) + return key + + +_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" + + +def import_radosgw_key(key): + if not os.path.exists(_radosgw_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _radosgw_keyring, + '--create-keyring', + '--name=client.radosgw.gateway', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + +# OSD caps taken from ceph-create-keys +_radosgw_caps = { + 'mon': ['allow rw'], + 'osd': ['allow rwx'] +} +_upgrade_caps = { + 'mon': ['allow rwx'] +} + + +def get_radosgw_key(pool_list=None): + return get_named_key(name='radosgw.gateway', + caps=_radosgw_caps, + pool_list=pool_list) + + +def get_mds_key(name): + return create_named_keyring(entity='mds', + name=name, + caps=mds_caps) + + +_mds_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-mds' + ] +} + + +def get_mds_bootstrap_key(): + return get_named_key('bootstrap-mds', + _mds_bootstrap_caps_profile) + + +_default_caps = collections.OrderedDict([ + ('mon', ['allow r']), + ('osd', ['allow rwx']), +]) + +admin_caps = collections.OrderedDict([ + ('mds', ['allow *']), + ('mon', ['allow *']), + ('osd', ['allow *']) +]) + +mds_caps = collections.OrderedDict([ + ('osd', ['allow *']), + ('mds', ['allow']), + ('mon', ['allow rwx']), +]) + +osd_upgrade_caps = collections.OrderedDict([ + ('mon', ['allow command "config-key"', + 'allow command "osd tree"', + 'allow command "config-key list"', + 'allow command "config-key put"', + 'allow command "config-key get"', + 'allow command "config-key exists"', + 'allow command "osd out"', + 'allow command "osd in"', + 'allow command "osd rm"', + 'allow command "auth del"', + ]) +]) + + +def create_named_keyring(entity, name, caps=None): + caps = caps or _default_caps + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', 'get-or-create', '{entity}.{name}'.format(entity=entity, + name=name), + ] + for subsystem, subcaps in caps.items(): + cmd.extend([subsystem, '; '.join(subcaps)]) + log("Calling check_output: {}".format(cmd), level=DEBUG) + return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + + +def get_upgrade_key(): + return get_named_key('upgrade-osd', _upgrade_caps) + + +def get_named_key(name, caps=None, pool_list=None): + """Retrieve a specific named cephx key. + + :param name: String Name of key to get. + :param pool_list: The list of pools to give access to + :param caps: dict of cephx capabilities + :returns: Returns a cephx key + """ + try: + # Does the key already exist? + output = subprocess.check_output( + [ + 'sudo', + '-u', ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', + 'get', + 'client.{}'.format(name), + ]).strip() + return parse_key(output) + except subprocess.CalledProcessError: + # Couldn't get the key, time to create it! + log("Creating new key for {}".format(name), level=DEBUG) + caps = caps or _default_caps + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', 'get-or-create', 'client.{}'.format(name), + ] + # Add capabilities + for subsystem, subcaps in caps.items(): + if subsystem == 'osd': + if pool_list: + # This will output a string similar to: + # "pool=rgw pool=rbd pool=something" + pools = " ".join(['pool={0}'.format(i) for i in pool_list]) + subcaps[0] = subcaps[0] + " " + pools + cmd.extend([subsystem, '; '.join(subcaps)]) + + log("Calling check_output: {}".format(cmd), level=DEBUG) + return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + + +def upgrade_key_caps(key, caps): + """ Upgrade key to have capabilities caps """ + if not is_leader(): + # Not the MON leader OR not clustered + return + cmd = [ + "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key + ] + for subsystem, subcaps in caps.items(): + cmd.extend([subsystem, '; '.join(subcaps)]) + subprocess.check_call(cmd) + + +@cached +def systemd(): + return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' + + +def bootstrap_monitor_cluster(secret): + hostname = socket.gethostname() + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + done = '{}/done'.format(path) + if systemd(): + init_marker = '{}/systemd'.format(path) + else: + init_marker = '{}/upstart'.format(path) + + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) + + if os.path.exists(done): + log('bootstrap_monitor_cluster: mon already initialized.') + else: + # Ceph >= 0.61.3 needs this for ceph-mon fs creation + mkdir('/var/run/ceph', owner=ceph_user(), + group=ceph_user(), perms=0o755) + mkdir(path, owner=ceph_user(), group=ceph_user()) + # end changes for Ceph >= 0.61.3 + try: + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring', '--name=mon.', + '--add-key={}'.format(secret), + '--cap', 'mon', 'allow *']) + + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + chownr(path, ceph_user(), ceph_user()) + with open(done, 'w'): + pass + with open(init_marker, 'w'): + pass + + if systemd(): + subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) + service_restart('ceph-mon') + else: + service_restart('ceph-mon-all') + + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + # NOTE(jamespage): Later ceph releases require explicit + # call to ceph-create-keys to setup the + # admin keys for the cluster; this command + # will wait for quorum in the cluster before + # returning. + cmd = ['ceph-create-keys', '--id', hostname] + subprocess.check_call(cmd) + except: + raise + finally: + os.unlink(keyring) + + +def update_monfs(): + hostname = socket.gethostname() + monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + if systemd(): + init_marker = '{}/systemd'.format(monfs) + else: + init_marker = '{}/upstart'.format(monfs) + if os.path.exists(monfs) and not os.path.exists(init_marker): + # Mark mon as managed by upstart so that + # it gets start correctly on reboots + with open(init_marker, 'w'): + pass + + +def maybe_zap_journal(journal_dev): + if is_osd_disk(journal_dev): + log('Looks like {} is already an OSD data' + ' or journal, skipping.'.format(journal_dev)) + return + zap_disk(journal_dev) + log("Zapped journal device {}".format(journal_dev)) + + +def get_partitions(dev): + cmd = ['partx', '--raw', '--noheadings', dev] + try: + out = subprocess.check_output(cmd).splitlines() + log("get partitions: {}".format(out), level=DEBUG) + return out + except subprocess.CalledProcessError as e: + log("Can't get info for {0}: {1}".format(dev, e.output)) + return [] + + +def find_least_used_journal(journal_devices): + usages = map(lambda a: (len(get_partitions(a)), a), journal_devices) + least = min(usages, key=lambda t: t[0]) + return least[1] + + +def osdize(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False, encrypt=False, bluestore=False): + if dev.startswith('/dev'): + osdize_dev(dev, osd_format, osd_journal, + reformat_osd, ignore_errors, encrypt, + bluestore) + else: + osdize_dir(dev, encrypt) + + +def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, + ignore_errors=False, encrypt=False, bluestore=False): + if not os.path.exists(dev): + log('Path {} does not exist - bailing'.format(dev)) + return + + if not is_block_device(dev): + log('Path {} is not a block device - bailing'.format(dev)) + return + + if is_osd_disk(dev) and not reformat_osd: + log('Looks like {} is already an' + ' OSD data or journal, skipping.'.format(dev)) + return + + if is_device_mounted(dev): + log('Looks like {} is in use, skipping.'.format(dev)) + return + + status_set('maintenance', 'Initializing device {}'.format(dev)) + cmd = ['ceph-disk', 'prepare'] + # Later versions of ceph support more options + if cmp_pkgrevno('ceph', '0.60') >= 0: + if encrypt: + cmd.append('--dmcrypt') + if cmp_pkgrevno('ceph', '0.48.3') >= 0: + if osd_format: + cmd.append('--fs-type') + cmd.append(osd_format) + + if reformat_osd: + cmd.append('--zap-disk') + + # NOTE(jamespage): enable experimental bluestore support + if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: + cmd.append('--bluestore') + + cmd.append(dev) + + if osd_journal: + least_used = find_least_used_journal(osd_journal) + cmd.append(least_used) + else: + # Just provide the device - no other options + # for older versions of ceph + cmd.append(dev) + if reformat_osd: + zap_disk(dev) + + try: + log("osdize cmd: {}".format(cmd)) + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + if ignore_errors: + log('Unable to initialize device: {}'.format(dev), WARNING) + else: + log('Unable to initialize device: {}'.format(dev), ERROR) + raise + + +def osdize_dir(path, encrypt=False): + """Ask ceph-disk to prepare a directory to become an osd. + + :param path: str. The directory to osdize + :param encrypt: bool. Should the OSD directory be encrypted at rest + :returns: None + """ + if os.path.exists(os.path.join(path, 'upstart')): + log('Path {} is already configured as an OSD - bailing'.format(path)) + return + + if cmp_pkgrevno('ceph', "0.56.6") < 0: + log('Unable to use directories for OSDs with ceph < 0.56.6', + level=ERROR) + return + + mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) + chownr('/var/lib/ceph', ceph_user(), ceph_user()) + cmd = [ + 'sudo', '-u', ceph_user(), + 'ceph-disk', + 'prepare', + '--data-dir', + path + ] + if cmp_pkgrevno('ceph', '0.60') >= 0: + if encrypt: + cmd.append('--dmcrypt') + log("osdize dir cmd: {}".format(cmd)) + subprocess.check_call(cmd) + + +def filesystem_mounted(fs): + return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 + + +def get_running_osds(): + """Returns a list of the pids of the current running OSD daemons""" + cmd = ['pgrep', 'ceph-osd'] + try: + result = subprocess.check_output(cmd) + return result.split() + except subprocess.CalledProcessError: + return [] + + +def get_cephfs(service): + """List the Ceph Filesystems that exist. + + :param service: The service name to run the ceph command under + :returns: list. Returns a list of the ceph filesystems + """ + if get_version() < 0.86: + # This command wasn't introduced until 0.86 ceph + return [] + try: + output = subprocess.check_output(["ceph", '--id', service, "fs", "ls"]) + if not output: + return [] + """ + Example subprocess output: + 'name: ip-172-31-23-165, metadata pool: ip-172-31-23-165_metadata, + data pools: [ip-172-31-23-165_data ]\n' + output: filesystems: ['ip-172-31-23-165'] + """ + filesystems = [] + for line in output.splitlines(): + parts = line.split(',') + for part in parts: + if "name" in part: + filesystems.append(part.split(' ')[1]) + except subprocess.CalledProcessError: + return [] + + +def wait_for_all_monitors_to_upgrade(new_version, upgrade_key): + """Fairly self explanatory name. This function will wait + for all monitors in the cluster to upgrade or it will + return after a timeout period has expired. + + :param new_version: str of the version to watch + :param upgrade_key: the cephx key name to use + """ + done = False + start_time = time.time() + monitor_list = [] + + mon_map = get_mon_map('admin') + if mon_map['monmap']['mons']: + for mon in mon_map['monmap']['mons']: + monitor_list.append(mon['name']) + while not done: + try: + done = all(monitor_key_exists(upgrade_key, "{}_{}_{}_done".format( + "mon", mon, new_version + )) for mon in monitor_list) + current_time = time.time() + if current_time > (start_time + 10 * 60): + raise Exception + else: + # Wait 30 seconds and test again if all monitors are upgraded + time.sleep(30) + except subprocess.CalledProcessError: + raise + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +def roll_monitor_cluster(new_version, upgrade_key): + """This is tricky to get right so here's what we're going to do. + + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous monitor is upgraded yet. + + :param new_version: str of the version to upgrade to + :param upgrade_key: the cephx key name to use when upgrading + """ + log('roll_monitor_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + monitor_list = [] + mon_map = get_mon_map('admin') + if mon_map['monmap']['mons']: + for mon in mon_map['monmap']['mons']: + monitor_list.append(mon['name']) + else: + status_set('blocked', 'Unable to get monitor cluster information') + sys.exit(1) + log('monitor_list: {}'.format(monitor_list)) + + # A sorted list of osd unit names + mon_sorted_list = sorted(monitor_list) + + try: + position = mon_sorted_list.index(my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(upgrade_key=upgrade_key, + service='mon', + my_name=my_name, + version=new_version) + else: + # Check if the previous node has finished + status_set('waiting', + 'Waiting on {} to finish upgrading'.format( + mon_sorted_list[position - 1])) + wait_on_previous_node(upgrade_key=upgrade_key, + service='mon', + previous_node=mon_sorted_list[position - 1], + version=new_version) + lock_and_roll(upgrade_key=upgrade_key, + service='mon', + my_name=my_name, + version=new_version) + except ValueError: + log("Failed to find {} in list {}.".format( + my_name, mon_sorted_list)) + status_set('blocked', 'failed to upgrade monitor') + + +def upgrade_monitor(new_version): + """Upgrade the current ceph monitor to the new version + + :param new_version: String version to upgrade to. + """ + current_version = get_version() + status_set("maintenance", "Upgrading monitor") + log("Current ceph version is {}".format(current_version)) + log("Upgrading to: {}".format(new_version)) + + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph source failed with message: {}".format( + err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + try: + if systemd(): + for mon_id in get_local_mon_ids(): + service_stop('ceph-mon@{}'.format(mon_id)) + else: + service_stop('ceph-mon-all') + apt_install(packages=determine_packages(), fatal=True) + + # Ensure the files and directories under /var/lib/ceph is chowned + # properly as part of the move to the Jewel release, which moved the + # ceph daemons to running as ceph:ceph instead of root:root. + if new_version == 'jewel': + # Ensure the ownership of Ceph's directories is correct + owner = ceph_user() + chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), + owner=owner, + group=owner, + follow_links=True) + + if systemd(): + for mon_id in get_local_mon_ids(): + service_start('ceph-mon@{}'.format(mon_id)) + else: + service_start('ceph-mon-all') + except subprocess.CalledProcessError as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + +def lock_and_roll(upgrade_key, service, my_name, version): + """Create a lock on the ceph monitor cluster and upgrade. + + :param upgrade_key: str. The cephx key to use + :param service: str. The cephx id to use + :param my_name: str. The current hostname + :param version: str. The version we are upgrading to + """ + start_timestamp = time.time() + + log('monitor_key_set {}_{}_{}_start {}'.format( + service, + my_name, + version, + start_timestamp)) + monitor_key_set(upgrade_key, "{}_{}_{}_start".format( + service, my_name, version), start_timestamp) + log("Rolling") + + # This should be quick + if service == 'osd': + upgrade_osd(version) + elif service == 'mon': + upgrade_monitor(version) + else: + log("Unknown service {}. Unable to upgrade".format(service), + level=ERROR) + log("Done") + + stop_timestamp = time.time() + # Set a key to inform others I am finished + log('monitor_key_set {}_{}_{}_done {}'.format(service, + my_name, + version, + stop_timestamp)) + status_set('maintenance', 'Finishing upgrade') + monitor_key_set(upgrade_key, "{}_{}_{}_done".format(service, + my_name, + version), + stop_timestamp) + + +def wait_on_previous_node(upgrade_key, service, previous_node, version): + """A lock that sleeps the current thread while waiting for the previous + node to finish upgrading. + + :param upgrade_key: + :param service: str. the cephx id to use + :param previous_node: str. The name of the previous node to wait on + :param version: str. The version we are upgrading to + :returns: None + """ + log("Previous node is: {}".format(previous_node)) + + previous_node_finished = monitor_key_exists( + upgrade_key, + "{}_{}_{}_done".format(service, previous_node, version)) + + while previous_node_finished is False: + log("{} is not finished. Waiting".format(previous_node)) + # Has this node been trying to upgrade for longer than + # 10 minutes? + # If so then move on and consider that node dead. + + # NOTE: This assumes the clusters clocks are somewhat accurate + # If the hosts clock is really far off it may cause it to skip + # the previous node even though it shouldn't. + current_timestamp = time.time() + previous_node_start_time = monitor_key_get( + upgrade_key, + "{}_{}_{}_start".format(service, previous_node, version)) + if (current_timestamp - (10 * 60)) > previous_node_start_time: + # Previous node is probably dead. Lets move on + if previous_node_start_time is not None: + log( + "Waited 10 mins on node {}. current time: {} > " + "previous node start time: {} Moving on".format( + previous_node, + (current_timestamp - (10 * 60)), + previous_node_start_time)) + return + else: + # I have to wait. Sleep a random amount of time and then + # check if I can lock,upgrade and roll. + wait_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + previous_node_finished = monitor_key_exists( + upgrade_key, + "{}_{}_{}_done".format(service, previous_node, version)) + + +def get_upgrade_position(osd_sorted_list, match_name): + """Return the upgrade position for the given osd. + + :param osd_sorted_list: list. Osds sorted + :param match_name: str. The osd name to match + :returns: int. The position or None if not found + """ + for index, item in enumerate(osd_sorted_list): + if item.name == match_name: + return index + return None + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +# 2. This assumes that the osd failure domain is not set to osd. +# It rolls an entire server at a time. +def roll_osd_cluster(new_version, upgrade_key): + """This is tricky to get right so here's what we're going to do. + + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous osd is upgraded yet. + + TODO: If you're not in the same failure domain it's safe to upgrade + 1. Examine all pools and adopt the most strict failure domain policy + Example: Pool 1: Failure domain = rack + Pool 2: Failure domain = host + Pool 3: Failure domain = row + + outcome: Failure domain = host + + :param new_version: str of the version to upgrade to + :param upgrade_key: the cephx key name to use when upgrading + """ + log('roll_osd_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + osd_tree = get_osd_tree(service=upgrade_key) + # A sorted list of osd unit names + osd_sorted_list = sorted(osd_tree) + log("osd_sorted_list: {}".format(osd_sorted_list)) + + try: + position = get_upgrade_position(osd_sorted_list, my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(upgrade_key=upgrade_key, + service='osd', + my_name=my_name, + version=new_version) + else: + # Check if the previous node has finished + status_set('blocked', + 'Waiting on {} to finish upgrading'.format( + osd_sorted_list[position - 1].name)) + wait_on_previous_node( + upgrade_key=upgrade_key, + service='osd', + previous_node=osd_sorted_list[position - 1].name, + version=new_version) + lock_and_roll(upgrade_key=upgrade_key, + service='osd', + my_name=my_name, + version=new_version) + except ValueError: + log("Failed to find name {} in list {}".format( + my_name, osd_sorted_list)) + status_set('blocked', 'failed to upgrade osd') + + +def upgrade_osd(new_version): + """Upgrades the current osd + + :param new_version: str. The new version to upgrade to + """ + current_version = get_version() + status_set("maintenance", "Upgrading osd") + log("Current ceph version is {}".format(current_version)) + log("Upgrading to: {}".format(new_version)) + + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph sources failed with message: {}".format( + err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + try: + # Upgrade the packages before restarting the daemons. + status_set('maintenance', 'Upgrading packages to %s' % new_version) + apt_install(packages=determine_packages(), fatal=True) + + # If the upgrade does not need an ownership update of any of the + # directories in the osd service directory, then simply restart + # all of the OSDs at the same time as this will be the fastest + # way to update the code on the node. + if not dirs_need_ownership_update('osd'): + log('Restarting all OSDs to load new binaries', DEBUG) + service_restart('ceph-osd-all') + return + + # Need to change the ownership of all directories which are not OSD + # directories as well. + # TODO - this should probably be moved to the general upgrade function + # and done before mon/osd. + update_owner(CEPH_BASE_DIR, recurse_dirs=False) + non_osd_dirs = filter(lambda x: not x == 'osd', + os.listdir(CEPH_BASE_DIR)) + non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x), + non_osd_dirs) + for path in non_osd_dirs: + update_owner(path) + + # Fast service restart wasn't an option because each of the OSD + # directories need the ownership updated for all the files on + # the OSD. Walk through the OSDs one-by-one upgrading the OSD. + for osd_dir in _get_child_dirs(OSD_BASE_DIR): + try: + osd_num = _get_osd_num_from_dirname(osd_dir) + _upgrade_single_osd(osd_num, osd_dir) + except ValueError as ex: + # Directory could not be parsed - junk directory? + log('Could not parse osd directory %s: %s' % (osd_dir, ex), + WARNING) + continue + + except (subprocess.CalledProcessError, IOError) as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err.message)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + +def _upgrade_single_osd(osd_num, osd_dir): + """Upgrades the single OSD directory. + + :param osd_num: the num of the OSD + :param osd_dir: the directory of the OSD to upgrade + :raises CalledProcessError: if an error occurs in a command issued as part + of the upgrade process + :raises IOError: if an error occurs reading/writing to a file as part + of the upgrade process + """ + stop_osd(osd_num) + disable_osd(osd_num) + update_owner(osd_dir) + enable_osd(osd_num) + start_osd(osd_num) + + +def stop_osd(osd_num): + """Stops the specified OSD number. + + :param osd_num: the osd number to stop + """ + if systemd(): + service_stop('ceph-osd@{}'.format(osd_num)) + else: + service_stop('ceph-osd', id=osd_num) + + +def start_osd(osd_num): + """Starts the specified OSD number. + + :param osd_num: the osd number to start. + """ + if systemd(): + service_start('ceph-osd@{}'.format(osd_num)) + else: + service_start('ceph-osd', id=osd_num) + + +def disable_osd(osd_num): + """Disables the specified OSD number. + + Ensures that the specified osd will not be automatically started at the + next reboot of the system. Due to differences between init systems, + this method cannot make any guarantees that the specified osd cannot be + started manually. + + :param osd_num: the osd id which should be disabled. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + to disable the OSD + :raises IOError, OSError: if the attempt to read/remove the ready file in + an upstart enabled system fails + """ + if systemd(): + # When running under systemd, the individual ceph-osd daemons run as + # templated units and can be directly addressed by referring to the + # templated service name ceph-osd@. Additionally, systemd + # allows one to disable a specific templated unit by running the + # 'systemctl disable ceph-osd@' command. When disabled, the + # OSD should remain disabled until re-enabled via systemd. + # Note: disabling an already disabled service in systemd returns 0, so + # no need to check whether it is enabled or not. + cmd = ['systemctl', 'disable', 'ceph-osd@{}'.format(osd_num)] + subprocess.check_call(cmd) + else: + # Neither upstart nor the ceph-osd upstart script provides for + # disabling the starting of an OSD automatically. The specific OSD + # cannot be prevented from running manually, however it can be + # prevented from running automatically on reboot by removing the + # 'ready' file in the OSD's root directory. This is due to the + # ceph-osd-all upstart script checking for the presence of this file + # before starting the OSD. + ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), + 'ready') + if os.path.exists(ready_file): + os.unlink(ready_file) + + +def enable_osd(osd_num): + """Enables the specified OSD number. + + Ensures that the specified osd_num will be enabled and ready to start + automatically in the event of a reboot. + + :param osd_num: the osd id which should be enabled. + :raises CalledProcessError: if the call to the systemd command issued + fails when enabling the service + :raises IOError: if the attempt to write the ready file in an usptart + enabled system fails + """ + if systemd(): + cmd = ['systemctl', 'enable', 'ceph-osd@{}'.format(osd_num)] + subprocess.check_call(cmd) + else: + # When running on upstart, the OSDs are started via the ceph-osd-all + # upstart script which will only start the osd if it has a 'ready' + # file. Make sure that file exists. + ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), + 'ready') + with open(ready_file, 'w') as f: + f.write('ready') + + # Make sure the correct user owns the file. It shouldn't be necessary + # as the upstart script should run with root privileges, but its better + # to have all the files matching ownership. + update_owner(ready_file) + + +def update_owner(path, recurse_dirs=True): + """Changes the ownership of the specified path. + + Changes the ownership of the specified path to the new ceph daemon user + using the system's native chown functionality. This may take awhile, + so this method will issue a set_status for any changes of ownership which + recurses into directory structures. + + :param path: the path to recursively change ownership for + :param recurse_dirs: boolean indicating whether to recursively change the + ownership of all the files in a path's subtree or to + simply change the ownership of the path. + :raises CalledProcessError: if an error occurs issuing the chown system + command + """ + user = ceph_user() + user_group = '{ceph_user}:{ceph_user}'.format(ceph_user=user) + cmd = ['chown', user_group, path] + if os.path.isdir(path) and recurse_dirs: + status_set('maintenance', ('Updating ownership of %s to %s' % + (path, user))) + cmd.insert(1, '-R') + + log('Changing ownership of {path} to {user}'.format( + path=path, user=user_group), DEBUG) + start = datetime.now() + subprocess.check_call(cmd) + elapsed_time = (datetime.now() - start) + + log('Took {secs} seconds to change the ownership of path: {path}'.format( + secs=elapsed_time.total_seconds(), path=path), DEBUG) + + +def list_pools(service): + """This will list the current pools that Ceph has + + :param service: String service id to run under + :returns: list. Returns a list of the ceph pools. + :raises: CalledProcessError if the subprocess fails to run. + """ + try: + pool_list = [] + pools = subprocess.check_output(['rados', '--id', service, 'lspools']) + for pool in pools.splitlines(): + pool_list.append(pool) + return pool_list + except subprocess.CalledProcessError as err: + log("rados lspools failed with error: {}".format(err.output)) + raise + + +def dirs_need_ownership_update(service): + """Determines if directories still need change of ownership. + + Examines the set of directories under the /var/lib/ceph/{service} directory + and determines if they have the correct ownership or not. This is + necessary due to the upgrade from Hammer to Jewel where the daemon user + changes from root: to ceph:. + + :param service: the name of the service folder to check (e.g. osd, mon) + :returns: boolean. True if the directories need a change of ownership, + False otherwise. + :raises IOError: if an error occurs reading the file stats from one of + the child directories. + :raises OSError: if the specified path does not exist or some other error + """ + expected_owner = expected_group = ceph_user() + path = os.path.join(CEPH_BASE_DIR, service) + for child in _get_child_dirs(path): + curr_owner, curr_group = owner(child) + + if (curr_owner == expected_owner) and (curr_group == expected_group): + continue + + log('Directory "%s" needs its ownership updated' % child, DEBUG) + return True + + # All child directories had the expected ownership + return False + +# A dict of valid ceph upgrade paths. Mapping is old -> new +UPGRADE_PATHS = { + 'firefly': 'hammer', + 'hammer': 'jewel', +} + +# Map UCA codenames to ceph codenames +UCA_CODENAME_MAP = { + 'icehouse': 'firefly', + 'juno': 'firefly', + 'kilo': 'hammer', + 'liberty': 'hammer', + 'mitaka': 'jewel', + 'newton': 'jewel', + 'ocata': 'jewel', +} + + +def pretty_print_upgrade_paths(): + """Pretty print supported upgrade paths for ceph""" + lines = [] + for key, value in UPGRADE_PATHS.iteritems(): + lines.append("{} -> {}".format(key, value)) + return lines + + +def resolve_ceph_version(source): + """Resolves a version of ceph based on source configuration + based on Ubuntu Cloud Archive pockets. + + @param: source: source configuration option of charm + :returns: ceph release codename or None if not resolvable + """ + os_release = get_os_codename_install_source(source) + return UCA_CODENAME_MAP.get(os_release) + + +def get_ceph_pg_stat(): + """Returns the result of ceph pg stat. + + :returns: dict + """ + try: + tree = subprocess.check_output(['ceph', 'pg', 'stat', '--format=json']) + try: + json_tree = json.loads(tree) + if not json_tree['num_pg_by_state']: + return None + return json_tree + except ValueError as v: + log("Unable to parse ceph pg stat json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph pg stat command failed with message: {}".format( + e.message)) + raise + + +def get_ceph_health(): + """Returns the health of the cluster from a 'ceph status' + + :returns: dict tree of ceph status + :raises: CalledProcessError if our ceph command fails to get the overall + status, use get_ceph_health()['overall_status']. + """ + try: + tree = subprocess.check_output( + ['ceph', 'status', '--format=json']) + try: + json_tree = json.loads(tree) + # Make sure children are present in the json + if not json_tree['overall_status']: + return None + + return json_tree + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v.message)) + raise + except subprocess.CalledProcessError as e: + log("ceph status command failed with message: {}".format( + e.message)) + raise + + +def reweight_osd(osd_num, new_weight): + """Changes the crush weight of an OSD to the value specified. + + :param osd_num: the osd id which should be changed + :param new_weight: the new weight for the OSD + :returns: bool. True if output looks right, else false. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + """ + try: + cmd_result = subprocess.check_output( + ['ceph', 'osd', 'crush', 'reweight', "osd.{}".format(osd_num), + new_weight], stderr=subprocess.STDOUT) + expected_result = "reweighted item id {ID} name \'osd.{ID}\'".format( + ID=osd_num) + " to {}".format(new_weight) + log(cmd_result) + if expected_result in cmd_result: + return True + return False + except subprocess.CalledProcessError as e: + log("ceph osd crush reweight command failed with message: {}".format( + e.message)) + raise + + +def determine_packages(): + """Determines packages for installation. + + :returns: list of ceph packages + """ + if is_container(): + PACKAGES.remove('ntp') + + return PACKAGES + + +def bootstrap_manager(): + hostname = socket.gethostname() + path = '/var/lib/ceph/mgr/ceph-{}'.format(hostname) + keyring = os.path.join(path, 'keyring') + + if os.path.exists(keyring): + log('bootstrap_manager: mgr already initialized.') + else: + mkdir(path, owner=ceph_user(), group=ceph_user()) + subprocess.check_call(['ceph', 'auth', 'get-or-create', + 'mgr.{}'.format(hostname), 'mon', + 'allow profile mgr', 'osd', 'allow *', + 'mds', 'allow *', '--out-file', + keyring]) + chownr(path, ceph_user(), ceph_user()) + + unit = 'ceph-mgr@{}'.format(hostname) + subprocess.check_call(['systemctl', 'enable', unit]) + service_restart(unit) diff --git a/ceph-mon/lib/setup.py b/ceph-mon/lib/setup.py deleted file mode 100644 index 139c80d6..00000000 --- a/ceph-mon/lib/setup.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import print_function - -import os -import sys -from setuptools import setup, find_packages -from setuptools.command.test import test as TestCommand - -version = "0.0.1.dev1" -install_require = [ -] - -tests_require = [ - 'tox >= 2.3.1', -] - - -class Tox(TestCommand): - - user_options = [('tox-args=', 'a', "Arguments to pass to tox")] - - def initialize_options(self): - TestCommand.initialize_options(self) - self.tox_args = None - - def finalize_options(self): - TestCommand.finalize_options(self) - self.test_args = [] - self.test_suite = True - - def run_tests(self): - # import here, cause outside the eggs aren't loaded - import tox - import shlex - args = self.tox_args - # remove the 'test' arg from argv as tox passes it to ostestr which - # breaks it. - sys.argv.pop() - if args: - args = shlex.split(self.tox_args) - errno = tox.cmdline(args=args) - sys.exit(errno) - - -if sys.argv[-1] == 'publish': - os.system("python setup.py sdist upload") - os.system("python setup.py bdist_wheel upload") - sys.exit() - - -if sys.argv[-1] == 'tag': - os.system("git tag -a %s -m 'version %s'" % (version, version)) - os.system("git push --tags") - sys.exit() - - -setup( - name='charms.ceph', - version=version, - description='Provide base module for ceph charms.', - classifiers=[ - "Development Status :: 2 - Pre-Alpha", - "Intended Audience :: Developers", - "Topic :: System", - "Topic :: System :: Installation/Setup", - "Topic :: System :: Software Distribution", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", - "License :: OSI Approved :: Apache Software License", - ], - url='https://github.com/openstack/charms.ceph', - author='OpenStack Charmers', - author_email='openstack-dev@lists.openstack.org', - license='Apache-2.0: http://www.apache.org/licenses/LICENSE-2.0', - packages=find_packages(exclude=["unit_tests"]), - zip_safe=False, - cmdclass={'test': Tox}, - install_requires=install_require, - extras_require={ - 'testing': tests_require, - }, - tests_require=tests_require, -) diff --git a/ceph-mon/unit_tests/test_ceph_ops.py b/ceph-mon/unit_tests/test_ceph_ops.py index 3b821960..5f17e03e 100644 --- a/ceph-mon/unit_tests/test_ceph_ops.py +++ b/ceph-mon/unit_tests/test_ceph_ops.py @@ -20,13 +20,13 @@ patch, ) -from ceph import ceph_broker +from ceph import broker class TestCephOps(unittest.TestCase): - @patch.object(ceph_broker, 'create_erasure_profile') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + @patch.object(broker, 'create_erasure_profile') + @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_create_erasure_profile(self, mock_create_erasure): req = json.dumps({'api-version': 1, 'ops': [{ @@ -37,7 +37,7 @@ def test_create_erasure_profile(self, mock_create_erasure): 'k': 3, 'm': 2, }]}) - rc = ceph_broker.process_requests(req) + rc = broker.process_requests(req) mock_create_erasure.assert_called_with(service='admin', profile_name='foo', coding_chunks=2, @@ -47,9 +47,9 @@ def test_create_erasure_profile(self, mock_create_erasure): erasure_plugin_name='jerasure') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @patch.object(ceph_broker, 'pool_exists') - @patch.object(ceph_broker, 'ReplicatedPool') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + @patch.object(broker, 'pool_exists') + @patch.object(broker, 'ReplicatedPool') + @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_process_requests_create_replicated_pool(self, mock_replicated_pool, mock_pool_exists): @@ -61,15 +61,15 @@ def test_process_requests_create_replicated_pool(self, 'name': 'foo', 'replicas': 3 }]}) - rc = ceph_broker.process_requests(reqs) + rc = broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') calls = [call(name=u'foo', service='admin', replicas=3)] mock_replicated_pool.assert_has_calls(calls) self.assertEqual(json.loads(rc), {'exit-code': 0}) - @patch.object(ceph_broker, 'pool_exists') - @patch.object(ceph_broker, 'ReplicatedPool') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + @patch.object(broker, 'pool_exists') + @patch.object(broker, 'ReplicatedPool') + @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_process_requests_replicated_pool_weight(self, mock_replicated_pool, mock_pool_exists): @@ -82,15 +82,15 @@ def test_process_requests_replicated_pool_weight(self, 'weight': 40.0, 'replicas': 3 }]}) - rc = ceph_broker.process_requests(reqs) + rc = broker.process_requests(reqs) mock_pool_exists.assert_called_with(service='admin', name='foo') calls = [call(name=u'foo', service='admin', replicas=3, percent_data=40.0)] mock_replicated_pool.assert_has_calls(calls) self.assertEqual(json.loads(rc), {'exit-code': 0}) - @patch.object(ceph_broker, 'delete_pool') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + @patch.object(broker, 'delete_pool') + @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_process_requests_delete_pool(self, mock_delete_pool): reqs = json.dumps({'api-version': 1, @@ -99,14 +99,14 @@ def test_process_requests_delete_pool(self, 'name': 'foo', }]}) mock_delete_pool.return_value = {'exit-code': 0} - rc = ceph_broker.process_requests(reqs) + rc = broker.process_requests(reqs) mock_delete_pool.assert_called_with(service='admin', name='foo') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @patch.object(ceph_broker, 'pool_exists') - @patch.object(ceph_broker.ErasurePool, 'create') - @patch.object(ceph_broker, 'erasure_profile_exists') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + @patch.object(broker, 'pool_exists') + @patch.object(broker.ErasurePool, 'create') + @patch.object(broker, 'erasure_profile_exists') + @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_process_requests_create_erasure_pool(self, mock_profile_exists, mock_erasure_pool, mock_pool_exists): @@ -118,15 +118,15 @@ def test_process_requests_create_erasure_pool(self, mock_profile_exists, 'name': 'foo', 'erasure-profile': 'default' }]}) - rc = ceph_broker.process_requests(reqs) + rc = broker.process_requests(reqs) mock_profile_exists.assert_called_with(service='admin', name='default') mock_pool_exists.assert_called_with(service='admin', name='foo') mock_erasure_pool.assert_called_with() self.assertEqual(json.loads(rc), {'exit-code': 0}) - @patch.object(ceph_broker, 'pool_exists') - @patch.object(ceph_broker.Pool, 'add_cache_tier') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + @patch.object(broker, 'pool_exists') + @patch.object(broker.Pool, 'add_cache_tier') + @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_process_requests_create_cache_tier(self, mock_pool, mock_pool_exists): mock_pool_exists.return_value = True @@ -138,16 +138,16 @@ def test_process_requests_create_cache_tier(self, mock_pool, 'mode': 'writeback', 'erasure-profile': 'default' }]}) - rc = ceph_broker.process_requests(reqs) + rc = broker.process_requests(reqs) mock_pool_exists.assert_any_call(service='admin', name='foo') mock_pool_exists.assert_any_call(service='admin', name='foo-ssd') mock_pool.assert_called_with(cache_pool='foo-ssd', mode='writeback') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @patch.object(ceph_broker, 'pool_exists') - @patch.object(ceph_broker.Pool, 'remove_cache_tier') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + @patch.object(broker, 'pool_exists') + @patch.object(broker.Pool, 'remove_cache_tier') + @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_process_requests_remove_cache_tier(self, mock_pool, mock_pool_exists): mock_pool_exists.return_value = True @@ -156,14 +156,14 @@ def test_process_requests_remove_cache_tier(self, mock_pool, 'op': 'remove-cache-tier', 'hot-pool': 'foo-ssd', }]}) - rc = ceph_broker.process_requests(reqs) + rc = broker.process_requests(reqs) mock_pool_exists.assert_any_call(service='admin', name='foo-ssd') mock_pool.assert_called_with(cache_pool='foo-ssd') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @patch.object(ceph_broker, 'snapshot_pool') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + @patch.object(broker, 'snapshot_pool') + @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_snapshot_pool(self, mock_snapshot_pool): reqs = json.dumps({'api-version': 1, 'ops': [{ @@ -172,14 +172,14 @@ def test_snapshot_pool(self, mock_snapshot_pool): 'snapshot-name': 'foo-snap1', }]}) mock_snapshot_pool.return_value = {'exit-code': 0} - rc = ceph_broker.process_requests(reqs) + rc = broker.process_requests(reqs) mock_snapshot_pool.assert_called_with(service='admin', pool_name='foo', snapshot_name='foo-snap1') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @patch.object(ceph_broker, 'rename_pool') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + @patch.object(broker, 'rename_pool') + @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_rename_pool(self, mock_rename_pool): reqs = json.dumps({'api-version': 1, 'ops': [{ @@ -188,14 +188,14 @@ def test_rename_pool(self, mock_rename_pool): 'new-name': 'foo2', }]}) mock_rename_pool.return_value = {'exit-code': 0} - rc = ceph_broker.process_requests(reqs) + rc = broker.process_requests(reqs) mock_rename_pool.assert_called_with(service='admin', old_name='foo', new_name='foo2') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @patch.object(ceph_broker, 'remove_pool_snapshot') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + @patch.object(broker, 'remove_pool_snapshot') + @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_remove_pool_snapshot(self, mock_snapshot_pool): reqs = json.dumps({'api-version': 1, 'ops': [{ @@ -204,14 +204,14 @@ def test_remove_pool_snapshot(self, mock_snapshot_pool): 'snapshot-name': 'foo-snap1', }]}) mock_snapshot_pool.return_value = {'exit-code': 0} - rc = ceph_broker.process_requests(reqs) + rc = broker.process_requests(reqs) mock_snapshot_pool.assert_called_with(service='admin', pool_name='foo', snapshot_name='foo-snap1') self.assertEqual(json.loads(rc), {'exit-code': 0}) - @patch.object(ceph_broker, 'pool_set') - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + @patch.object(broker, 'pool_set') + @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_set_pool_value(self, mock_set_pool): reqs = json.dumps({'api-version': 1, 'ops': [{ @@ -221,14 +221,14 @@ def test_set_pool_value(self, mock_set_pool): 'value': 3, }]}) mock_set_pool.return_value = {'exit-code': 0} - rc = ceph_broker.process_requests(reqs) + rc = broker.process_requests(reqs) mock_set_pool.assert_called_with(service='admin', pool_name='foo', key='size', value=3) self.assertEqual(json.loads(rc), {'exit-code': 0}) - @patch.object(ceph_broker, 'log', lambda *args, **kwargs: None) + @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_set_invalid_pool_value(self): reqs = json.dumps({'api-version': 1, 'ops': [{ @@ -237,5 +237,5 @@ def test_set_invalid_pool_value(self): 'key': 'size', 'value': 'abc', }]}) - rc = ceph_broker.process_requests(reqs) + rc = broker.process_requests(reqs) self.assertEqual(json.loads(rc)['exit-code'], 1) From e557935219119eff8efcd1c630e2d76afe607fc4 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 15 Aug 2017 10:11:30 -0700 Subject: [PATCH 1348/2699] Dual Stack VIPs Enable dual stack IPv4 and IPv6 VIPs on the same interface. HAProxy always listens on both IPv4 and IPv6 allowing connectivity on either protocol. charm-helpers sync for HAProxy template changes. Change-Id: Ibc95f322df29857df1c16f1ac1ebe04b5a2bc748 --- ceph-radosgw/hooks/charmhelpers/__init__.py | 61 +++ .../charmhelpers/contrib/charmsupport/nrpe.py | 9 +- .../hooks/charmhelpers/contrib/network/ip.py | 6 +- .../contrib/openstack/amulet/utils.py | 103 +++-- .../charmhelpers/contrib/openstack/context.py | 96 +++-- .../contrib/openstack/templates/ceph.conf | 5 +- .../contrib/openstack/templates/haproxy.cfg | 6 +- .../templates/section-oslo-notifications | 8 + .../contrib/openstack/templating.py | 7 +- .../charmhelpers/contrib/openstack/utils.py | 361 ++++++++++-------- .../contrib/storage/linux/bcache.py | 74 ++++ .../contrib/storage/linux/ceph.py | 44 ++- .../hooks/charmhelpers/core/hookenv.py | 40 ++ ceph-radosgw/hooks/charmhelpers/core/host.py | 38 +- .../hooks/charmhelpers/fetch/__init__.py | 26 +- .../hooks/charmhelpers/fetch/centos.py | 2 +- ceph-radosgw/hooks/charmhelpers/fetch/snap.py | 22 +- .../hooks/charmhelpers/fetch/ubuntu.py | 327 +++++++++++++--- ceph-radosgw/hooks/hooks.py | 9 + ceph-radosgw/tests/charmhelpers/__init__.py | 61 +++ .../contrib/openstack/amulet/utils.py | 103 +++-- .../tests/charmhelpers/core/hookenv.py | 40 ++ ceph-radosgw/tests/charmhelpers/core/host.py | 38 +- 23 files changed, 1135 insertions(+), 351 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/bcache.py diff --git a/ceph-radosgw/hooks/charmhelpers/__init__.py b/ceph-radosgw/hooks/charmhelpers/__init__.py index 48867880..e7aa4715 100644 --- a/ceph-radosgw/hooks/charmhelpers/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/__init__.py @@ -14,6 +14,11 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. +from __future__ import print_function +from __future__ import absolute_import + +import functools +import inspect import subprocess import sys @@ -34,3 +39,59 @@ else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) import yaml # flake8: noqa + + +# Holds a list of mapping of mangled function names that have been deprecated +# using the @deprecate decorator below. This is so that the warning is only +# printed once for each usage of the function. +__deprecated_functions = {} + + +def deprecate(warning, date=None, log=None): + """Add a deprecation warning the first time the function is used. + The date, which is a string in semi-ISO8660 format indicate the year-month + that the function is officially going to be removed. + + usage: + + @deprecate('use core/fetch/add_source() instead', '2017-04') + def contributed_add_source_thing(...): + ... + + And it then prints to the log ONCE that the function is deprecated. + The reason for passing the logging function (log) is so that hookenv.log + can be used for a charm if needed. + + :param warning: String to indicat where it has moved ot. + :param date: optional sting, in YYYY-MM format to indicate when the + function will definitely (probably) be removed. + :param log: The log function to call to log. If not, logs to stdout + """ + def wrap(f): + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + try: + module = inspect.getmodule(f) + file = inspect.getsourcefile(f) + lines = inspect.getsourcelines(f) + f_name = "{}-{}-{}..{}-{}".format( + module.__name__, file, lines[0], lines[-1], f.__name__) + except (IOError, TypeError): + # assume it was local, so just use the name of the function + f_name = f.__name__ + if f_name not in __deprecated_functions: + __deprecated_functions[f_name] = True + s = "DEPRECATION WARNING: Function {} is being removed".format( + f.__name__) + if date: + s = "{} on/around {}".format(s, date) + if warning: + s = "{} : {}".format(s, warning) + if log: + log(s) + else: + print(s) + return f(*args, **kwargs) + return wrapped_f + return wrap diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 8240249e..80d574dc 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -125,7 +125,7 @@ class CheckException(Exception): class Check(object): - shortname_re = '[A-Za-z0-9-_]+$' + shortname_re = '[A-Za-z0-9-_.]+$' service_template = (""" #--------------------------------------------------- # This file is Juju managed @@ -193,6 +193,13 @@ def write(self, nagios_context, hostname, nagios_servicegroups): nrpe_check_file = self._get_check_filename() with open(nrpe_check_file, 'w') as nrpe_check_config: nrpe_check_config.write("# check {}\n".format(self.shortname)) + if nagios_servicegroups: + nrpe_check_config.write( + "# The following header was added automatically by juju\n") + nrpe_check_config.write( + "# Modifying it will affect nagios monitoring and alerting\n") + nrpe_check_config.write( + "# servicegroups: {}\n".format(nagios_servicegroups)) nrpe_check_config.write("command[{}]={}\n".format( self.command, self.check_cmd)) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index fc3f5e3e..d7e6debf 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -243,11 +243,13 @@ def is_ipv6_disabled(): try: result = subprocess.check_output( ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], - stderr=subprocess.STDOUT) - return "net.ipv6.conf.all.disable_ipv6 = 1" in result + stderr=subprocess.STDOUT, + universal_newlines=True) except subprocess.CalledProcessError: return True + return "net.ipv6.conf.all.disable_ipv6 = 1" in result + def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index bcef4cd0..c8edbf65 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -25,9 +25,12 @@ import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client -import keystoneclient.v2_0 as keystone_client -from keystoneclient.auth.identity import v3 as keystone_id_v3 -from keystoneclient import session as keystone_session +from keystoneclient.v2_0 import client as keystone_client +from keystoneauth1.identity import ( + v3, + v2, +) +from keystoneauth1 import session as keystone_session from keystoneclient.v3 import client as keystone_client_v3 from novaclient import exceptions @@ -368,12 +371,20 @@ def authenticate_keystone(self, keystone_ip, username, password, port) if not api_version or api_version == 2: ep = base_ep + "/v2.0" - return keystone_client.Client(username=username, password=password, - tenant_name=project_name, - auth_url=ep) + auth = v2.Password( + username=username, + password=password, + tenant_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + client = keystone_client.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client else: ep = base_ep + "/v3" - auth = keystone_id_v3.Password( + auth = v3.Password( user_domain_name=user_domain_name, username=username, password=password, @@ -382,36 +393,45 @@ def authenticate_keystone(self, keystone_ip, username, password, project_name=project_name, auth_url=ep ) - return keystone_client_v3.Client( - session=keystone_session.Session(auth=auth) - ) + sess = keystone_session.Session(auth=auth) + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, - keystone_ip=None): + keystone_ip=None, user_domain_name=None, + project_domain_name=None, + project_name=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') if not keystone_ip: keystone_ip = keystone_sentry.info['public-address'] - user_domain_name = None - domain_name = None - if api_version == 3: + # To support backward compatibility usage of this function + if not project_name: + project_name = tenant + if api_version == 3 and not user_domain_name: user_domain_name = 'admin_domain' - domain_name = user_domain_name - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant, - api_version=api_version, - user_domain_name=user_domain_name, - domain_name=domain_name, - admin_port=True) + if api_version == 3 and not project_domain_name: + project_domain_name = 'admin_domain' + if api_version == 3 and not project_name: + project_name = 'admin' + + return self.authenticate_keystone( + keystone_ip, user, password, + api_version=api_version, + user_domain_name=user_domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + admin_port=True) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') + interface='publicURL') keystone_ip = urlparse.urlparse(ep).hostname return self.authenticate_keystone(keystone_ip, user, password, @@ -421,22 +441,32 @@ def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', - endpoint_type='adminURL') - return glance_client.Client(ep, token=keystone.auth_token) + interface='adminURL') + if keystone.session: + return glance_client.Client(ep, session=keystone.session) + else: + return glance_client.Client(ep, token=keystone.auth_token) def authenticate_heat_admin(self, keystone): """Authenticates the admin user with heat.""" self.log.debug('Authenticating heat admin...') ep = keystone.service_catalog.url_for(service_type='orchestration', - endpoint_type='publicURL') - return heat_client.Client(endpoint=ep, token=keystone.auth_token) + interface='publicURL') + if keystone.session: + return heat_client.Client(endpoint=ep, session=keystone.session) + else: + return heat_client.Client(endpoint=ep, token=keystone.auth_token) def authenticate_nova_user(self, keystone, user, password, tenant): """Authenticates a regular user with nova-api.""" self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - if novaclient.__version__[0] >= "7": + interface='publicURL') + if keystone.session: + return nova_client.Client(NOVA_CLIENT_VERSION, + session=keystone.session, + auth_url=ep) + elif novaclient.__version__[0] >= "7": return nova_client.Client(NOVA_CLIENT_VERSION, username=user, password=password, project_name=tenant, auth_url=ep) @@ -449,12 +479,15 @@ def authenticate_swift_user(self, keystone, user, password, tenant): """Authenticates a regular user with swift api.""" self.log.debug('Authenticating swift user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') + interface='publicURL') + if keystone.session: + return swiftclient.Connection(session=keystone.session) + else: + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index ea93159d..f67f3265 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -41,9 +41,9 @@ charm_name, DEBUG, INFO, - WARNING, ERROR, status_set, + network_get_primary_address ) from charmhelpers.core.sysctl import create as sysctl_create @@ -80,6 +80,9 @@ from charmhelpers.contrib.openstack.ip import ( resolve_address, INTERNAL, + ADMIN, + PUBLIC, + ADDRESS_MAP, ) from charmhelpers.contrib.network.ip import ( get_address_in_network, @@ -87,7 +90,6 @@ get_ipv6_addr, get_netmask_for_address, format_ipv6_addr, - is_address_in_network, is_bridge_member, is_ipv6_disabled, ) @@ -97,6 +99,7 @@ git_determine_usr_bin, git_determine_python_path, enable_memcache, + snap_install_requested, ) from charmhelpers.core.unitdata import kv @@ -244,6 +247,11 @@ def __call__(self): 'database_password': rdata.get(password_setting), 'database_type': 'mysql' } + # Note(coreycb): We can drop mysql+pymysql if we want when the + # following review lands, though it seems mysql+pymysql would + # be preferred. https://review.openstack.org/#/c/462190/ + if snap_install_requested(): + ctxt['database_type'] = 'mysql+pymysql' if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) return ctxt @@ -510,6 +518,10 @@ def __call__(self): ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) if not ctxt.get('key'): ctxt['key'] = relation_get('key', rid=rid, unit=unit) + if not ctxt.get('rbd_features'): + default_features = relation_get('rbd-features', rid=rid, unit=unit) + if default_features is not None: + ctxt['rbd_features'] = default_features ceph_addrs = relation_get('ceph-public-address', rid=rid, unit=unit) @@ -610,7 +622,6 @@ def __call__(self): ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') if config('prefer-ipv6'): - ctxt['ipv6'] = True ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' else: @@ -726,11 +737,17 @@ def canonical_names(self): return sorted(list(set(cns))) def get_network_addresses(self): - """For each network configured, return corresponding address and vip - (if available). + """For each network configured, return corresponding address and + hostnamr or vip (if available). Returns a list of tuples of the form: + [(address_in_net_a, hostname_in_net_a), + (address_in_net_b, hostname_in_net_b), + ...] + + or, if no hostnames(s) available: + [(address_in_net_a, vip_in_net_a), (address_in_net_b, vip_in_net_b), ...] @@ -742,32 +759,27 @@ def get_network_addresses(self): ...] """ addresses = [] - if config('vip'): - vips = config('vip').split() - else: - vips = [] - - for net_type in ['os-internal-network', 'os-admin-network', - 'os-public-network']: - addr = get_address_in_network(config(net_type), - unit_get('private-address')) - if len(vips) > 1 and is_clustered(): - if not config(net_type): - log("Multiple networks configured but net_type " - "is None (%s)." % net_type, level=WARNING) - continue - - for vip in vips: - if is_address_in_network(config(net_type), vip): - addresses.append((addr, vip)) - break - - elif is_clustered() and config('vip'): - addresses.append((addr, config('vip'))) + for net_type in [INTERNAL, ADMIN, PUBLIC]: + net_config = config(ADDRESS_MAP[net_type]['config']) + # NOTE(jamespage): Fallback must always be private address + # as this is used to bind services on the + # local unit. + fallback = unit_get("private-address") + if net_config: + addr = get_address_in_network(net_config, + fallback) else: - addresses.append((addr, addr)) + try: + addr = network_get_primary_address( + ADDRESS_MAP[net_type]['binding'] + ) + except NotImplementedError: + addr = fallback + + endpoint = resolve_address(net_type) + addresses.append((addr, endpoint)) - return sorted(addresses) + return sorted(set(addresses)) def __call__(self): if isinstance(self.external_ports, six.string_types): @@ -794,7 +806,7 @@ def __call__(self): self.configure_cert(cn) addresses = self.get_network_addresses() - for address, endpoint in sorted(set(addresses)): + for address, endpoint in addresses: for api_port in self.external_ports: ext_port = determine_apache_port(api_port, singlenode_mode=True) @@ -1397,14 +1409,38 @@ def __call__(self): 'rel_key': 'dns-domain', 'default': None, }, + 'polling_interval': { + 'rel_key': 'polling-interval', + 'default': 2, + }, + 'rpc_response_timeout': { + 'rel_key': 'rpc-response-timeout', + 'default': 60, + }, + 'report_interval': { + 'rel_key': 'report-interval', + 'default': 30, + }, + 'enable_qos': { + 'rel_key': 'enable-qos', + 'default': False, + }, } ctxt = self.get_neutron_options({}) for rid in relation_ids('neutron-plugin-api'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) + # The l2-population key is used by the context as a way of + # checking if the api service on the other end is sending data + # in a recent format. if 'l2-population' in rdata: ctxt.update(self.get_neutron_options(rdata)) + if ctxt['enable_qos']: + ctxt['extension_drivers'] = 'qos' + else: + ctxt['extension_drivers'] = '' + return ctxt def get_neutron_options(self, rdata): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf index 33ceee25..ed5c4f10 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf @@ -1,6 +1,6 @@ ############################################################################### # [ WARNING ] -# cinder configuration file maintained by Juju +# ceph configuration file maintained by Juju # local changes may be overwritten. ############################################################################### [global] @@ -12,6 +12,9 @@ mon host = {{ mon_hosts }} log to syslog = {{ use_syslog }} err to syslog = {{ use_syslog }} clog to syslog = {{ use_syslog }} +{% if rbd_features %} +rbd default features = {{ rbd_features }} +{% endif %} [client] {% if rbd_client_cache_settings -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 54fba39d..2e660450 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -1,6 +1,6 @@ global - log {{ local_host }} local0 - log {{ local_host }} local1 notice + log /var/lib/haproxy/dev/log local0 + log /var/lib/haproxy/dev/log local1 notice maxconn 20000 user haproxy group haproxy @@ -48,9 +48,7 @@ listen stats {% for service, ports in service_ports.items() -%} frontend tcp-in_{{ service }} bind *:{{ ports[0] }} - {% if ipv6 -%} bind :::{{ ports[0] }} - {% endif -%} {% for frontend in frontends -%} acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications new file mode 100644 index 00000000..5dccd4bb --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications @@ -0,0 +1,8 @@ +{% if transport_url -%} +[oslo_messaging_notifications] +driver = messagingv2 +transport_url = {{ transport_url }} +{% if notification_topics -%} +topics = {{ notification_topics }} +{% endif -%} +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py index 934baf5d..d8c1fc7f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py @@ -20,7 +20,8 @@ from charmhelpers.core.hookenv import ( log, ERROR, - INFO + INFO, + TRACE ) from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES @@ -80,8 +81,10 @@ def get_loader(templates_dir, os_release): loaders.insert(0, FileSystemLoader(tmpl_dir)) if rel == os_release: break + # demote this log to the lowest level; we don't really need to see these + # lots in production even when debugging. log('Creating choice loader with dirs: %s' % - [l.searchpath for l in loaders], level=INFO) + [l.searchpath for l in loaders], level=TRACE) return ChoiceLoader(loaders) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 161c786b..837a1674 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -26,11 +26,12 @@ import shutil import six -import tempfile import traceback import uuid import yaml +from charmhelpers import deprecate + from charmhelpers.contrib.network import ip from charmhelpers.core import unitdata @@ -41,7 +42,6 @@ config, log as juju_log, charm_dir, - DEBUG, INFO, ERROR, related_units, @@ -51,6 +51,7 @@ status_set, hook_name, application_version_set, + cached, ) from charmhelpers.core.strutils import BasicStringComparator @@ -82,11 +83,21 @@ restart_on_change_helper, ) from charmhelpers.fetch import ( - apt_install, apt_cache, install_remote, + import_key as fetch_import_key, + add_source as fetch_add_source, + SourceConfigError, + GPGKeyError, get_upstream_version ) + +from charmhelpers.fetch.snap import ( + snap_install, + snap_refresh, + SNAP_CHANNELS, +) + from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.openstack.exceptions import OSContextError @@ -175,7 +186,7 @@ ('ocata', ['2.11.0', '2.12.0', '2.13.0']), ('pike', - ['2.13.0']), + ['2.13.0', '2.15.0']), ]) # >= Liberty version->codename mapping @@ -324,8 +335,10 @@ def get_os_codename_install_source(src): return ca_rel # Best guess match based on deb string provided - if src.startswith('deb') or src.startswith('ppa'): - for k, v in six.iteritems(OPENSTACK_CODENAMES): + if (src.startswith('deb') or + src.startswith('ppa') or + src.startswith('snap')): + for v in OPENSTACK_CODENAMES.values(): if v in src: return v @@ -394,6 +407,19 @@ def get_swift_codename(version): def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' + + if snap_install_requested(): + cmd = ['snap', 'list', package] + try: + out = subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + return None + lines = out.split('\n') + for line in lines: + if package in line: + # Second item in list is Version + return line.split()[1] + import apt_pkg as apt cache = apt_cache() @@ -469,13 +495,14 @@ def get_os_version_package(pkg, fatal=True): # error_out(e) -os_rel = None +# Module local cache variable for the os_release. +_os_rel = None def reset_os_release(): '''Unset the cached os_release version''' - global os_rel - os_rel = None + global _os_rel + _os_rel = None def os_release(package, base='essex', reset_cache=False): @@ -489,150 +516,77 @@ def os_release(package, base='essex', reset_cache=False): the installation source, the earliest release supported by the charm should be returned. ''' - global os_rel + global _os_rel if reset_cache: reset_os_release() - if os_rel: - return os_rel - os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or - get_os_codename_package(package, fatal=False) or - get_os_codename_install_source(config('openstack-origin')) or - base) - return os_rel + if _os_rel: + return _os_rel + _os_rel = ( + git_os_codename_install_source(config('openstack-origin-git')) or + get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config('openstack-origin')) or + base) + return _os_rel +@deprecate("moved to charmhelpers.fetch.import_key()", "2017-07", log=juju_log) def import_key(keyid): - key = keyid.strip() - if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and - key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): - juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG) - juju_log("Importing ASCII Armor PGP key", level=DEBUG) - with tempfile.NamedTemporaryFile() as keyfile: - with open(keyfile.name, 'w') as fd: - fd.write(key) - fd.write("\n") - - cmd = ['apt-key', 'add', keyfile.name] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error_out("Error importing PGP key '%s'" % key) - else: - juju_log("PGP key found (looks like Radix64 format)", level=DEBUG) - juju_log("Importing PGP key from keyserver", level=DEBUG) - cmd = ['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error_out("Error importing PGP key '%s'" % key) - - -def get_source_and_pgp_key(input): - """Look for a pgp key ID or ascii-armor key in the given input.""" - index = input.strip() - index = input.rfind('|') - if index < 0: - return input, None - - key = input[index + 1:].strip('|') - source = input[:index] - return source, key - - -def configure_installation_source(rel): - '''Configure apt installation source.''' - if rel == 'distro': - return - elif rel == 'distro-proposed': - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: - f.write(DISTRO_PROPOSED % ubuntu_rel) - elif rel[:4] == "ppa:": - src, key = get_source_and_pgp_key(rel) - if key: - import_key(key) - - subprocess.check_call(["add-apt-repository", "-y", src]) - elif rel[:3] == "deb": - src, key = get_source_and_pgp_key(rel) - if key: - import_key(key) - - with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: - f.write(src) - elif rel[:6] == 'cloud:': - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - rel = rel.split(':')[1] - u_rel = rel.split('-')[0] - ca_rel = rel.split('-')[1] - - if u_rel != ubuntu_rel: - e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ - 'version (%s)' % (ca_rel, ubuntu_rel) - error_out(e) + """Import a key, either ASCII armored, or a GPG key id. - if 'staging' in ca_rel: - # staging is just a regular PPA. - os_rel = ca_rel.split('/')[0] - ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel - cmd = 'add-apt-repository -y %s' % ppa - subprocess.check_call(cmd.split(' ')) - return - - # map charm config options to actual archive pockets. - pockets = { - 'folsom': 'precise-updates/folsom', - 'folsom/updates': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'grizzly': 'precise-updates/grizzly', - 'grizzly/updates': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly', - 'havana': 'precise-updates/havana', - 'havana/updates': 'precise-updates/havana', - 'havana/proposed': 'precise-proposed/havana', - 'icehouse': 'precise-updates/icehouse', - 'icehouse/updates': 'precise-updates/icehouse', - 'icehouse/proposed': 'precise-proposed/icehouse', - 'juno': 'trusty-updates/juno', - 'juno/updates': 'trusty-updates/juno', - 'juno/proposed': 'trusty-proposed/juno', - 'kilo': 'trusty-updates/kilo', - 'kilo/updates': 'trusty-updates/kilo', - 'kilo/proposed': 'trusty-proposed/kilo', - 'liberty': 'trusty-updates/liberty', - 'liberty/updates': 'trusty-updates/liberty', - 'liberty/proposed': 'trusty-proposed/liberty', - 'mitaka': 'trusty-updates/mitaka', - 'mitaka/updates': 'trusty-updates/mitaka', - 'mitaka/proposed': 'trusty-proposed/mitaka', - 'newton': 'xenial-updates/newton', - 'newton/updates': 'xenial-updates/newton', - 'newton/proposed': 'xenial-proposed/newton', - 'ocata': 'xenial-updates/ocata', - 'ocata/updates': 'xenial-updates/ocata', - 'ocata/proposed': 'xenial-proposed/ocata', - 'pike': 'xenial-updates/pike', - 'pike/updates': 'xenial-updates/pike', - 'pike/proposed': 'xenial-proposed/pike', - 'queens': 'xenial-updates/queens', - 'queens/updates': 'xenial-updates/queens', - 'queens/proposed': 'xenial-proposed/queens', - } + @param keyid: the key in ASCII armor format, or a GPG key id. + @raises SystemExit() via sys.exit() on failure. + """ + try: + return fetch_import_key(keyid) + except GPGKeyError as e: + error_out("Could not import key: {}".format(str(e))) - try: - pocket = pockets[ca_rel] - except KeyError: - e = 'Invalid Cloud Archive release specified: %s' % rel - error_out(e) - src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) - apt_install('ubuntu-cloud-keyring', fatal=True) +def get_source_and_pgp_key(source_and_key): + """Look for a pgp key ID or ascii-armor key in the given input. - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: - f.write(src) - else: - error_out("Invalid openstack-release specified: %s" % rel) + :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is + optional. + :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id + if there was no '|' in the source_and_key string. + """ + try: + source, key = source_and_key.split('|', 2) + return source, key or None + except ValueError: + return source_and_key, None + + +@deprecate("use charmhelpers.fetch.add_source() instead.", + "2017-07", log=juju_log) +def configure_installation_source(source_plus_key): + """Configure an installation source. + + The functionality is provided by charmhelpers.fetch.add_source() + The difference between the two functions is that add_source() signature + requires the key to be passed directly, whereas this function passes an + optional key by appending '|' to the end of the source specificiation + 'source'. + + Another difference from add_source() is that the function calls sys.exit(1) + if the configuration fails, whereas add_source() raises + SourceConfigurationError(). Another difference, is that add_source() + silently fails (with a juju_log command) if there is no matching source to + configure, whereas this function fails with a sys.exit(1) + + :param source: String_plus_key -- see above for details. + + Note that the behaviour on error is to log the error to the juju log and + then call sys.exit(1). + """ + # extract the key if there is one, denoted by a '|' in the rel + source, key = get_source_and_pgp_key(source_plus_key) + + # handle the ordinary sources via add_source + try: + fetch_add_source(source, key, fail_invalid=True) + except SourceConfigError as se: + error_out(str(se)) def config_value_changed(option): @@ -677,12 +631,14 @@ def openstack_upgrade_available(package): :returns: bool: : Returns True if configured installation source offers a newer version of package. - """ import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) + if not cur_vers: + # The package has not been installed yet do not attempt upgrade + return False if "swift" in package: codename = get_os_codename_install_source(src) avail_vers = get_os_version_codename_swift(codename) @@ -1933,6 +1889,30 @@ def wrapped_f(*args, **kwargs): return wrap +def ordered(orderme): + """Converts the provided dictionary into a collections.OrderedDict. + + The items in the returned OrderedDict will be inserted based on the + natural sort order of the keys. Nested dictionaries will also be sorted + in order to ensure fully predictable ordering. + + :param orderme: the dict to order + :return: collections.OrderedDict + :raises: ValueError: if `orderme` isn't a dict instance. + """ + if not isinstance(orderme, dict): + raise ValueError('argument must be a dict type') + + result = OrderedDict() + for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): + if isinstance(v, dict): + result[k] = ordered(v) + else: + result[k] = v + + return result + + def config_flags_parser(config_flags): """Parses config flags string into dict. @@ -1944,15 +1924,13 @@ def config_flags_parser(config_flags): example, a string in the format of 'key1=value1, key2=value2' will return a dict of: - {'key1': 'value1', - 'key2': 'value2'}. + {'key1': 'value1', 'key2': 'value2'}. 2. A string in the above format, but supporting a comma-delimited list of values for the same key. For example, a string in the format of 'key1=value1, key2=value3,value4,value5' will return a dict of: - {'key1', 'value1', - 'key2', 'value2,value3,value4'} + {'key1': 'value1', 'key2': 'value2,value3,value4'} 3. A string containing a colon character (:) prior to an equal character (=) will be treated as yaml and parsed as such. This can be @@ -1972,7 +1950,7 @@ def config_flags_parser(config_flags): equals = config_flags.find('=') if colon > 0: if colon < equals or equals < 0: - return yaml.safe_load(config_flags) + return ordered(yaml.safe_load(config_flags)) if config_flags.find('==') >= 0: juju_log("config_flags is not in expected format (key=value)", @@ -1985,7 +1963,7 @@ def config_flags_parser(config_flags): # split on '='. split = config_flags.strip(' =').split('=') limit = len(split) - flags = {} + flags = OrderedDict() for i in range(0, limit - 1): current = split[i] next = split[i + 1] @@ -2052,3 +2030,84 @@ def token_cache_pkgs(source=None, release=None): if enable_memcache(source=source, release=release): packages.extend(['memcached', 'python-memcache']) return packages + + +def update_json_file(filename, items): + """Updates the json `filename` with a given dict. + :param filename: json filename (i.e.: /etc/glance/policy.json) + :param items: dict of items to update + """ + with open(filename) as fd: + policy = json.load(fd) + policy.update(items) + with open(filename, "w") as fd: + fd.write(json.dumps(policy, indent=4)) + + +@cached +def snap_install_requested(): + """ Determine if installing from snaps + + If openstack-origin is of the form snap:channel-series-release + and channel is in SNAPS_CHANNELS return True. + """ + origin = config('openstack-origin') or "" + if not origin.startswith('snap:'): + return False + + _src = origin[5:] + channel, series, release = _src.split('-') + if channel.lower() in SNAP_CHANNELS: + return True + return False + + +def get_snaps_install_info_from_origin(snaps, src, mode='classic'): + """Generate a dictionary of snap install information from origin + + @param snaps: List of snaps + @param src: String of openstack-origin or source of the form + snap:channel-series-track + @param mode: String classic, devmode or jailmode + @returns: Dictionary of snaps with channels and modes + """ + + if not src.startswith('snap:'): + juju_log("Snap source is not a snap origin", 'WARN') + return {} + + _src = src[5:] + _channel, _series, _release = _src.split('-') + channel = '--channel={}/{}'.format(_release, _channel) + + return {snap: {'channel': channel, 'mode': mode} + for snap in snaps} + + +def install_os_snaps(snaps, refresh=False): + """Install OpenStack snaps from channel and with mode + + @param snaps: Dictionary of snaps with channels and modes of the form: + {'snap_name': {'channel': 'snap_channel', + 'mode': 'snap_mode'}} + Where channel a snapstore channel and mode is --classic, --devmode or + --jailmode. + @param post_snap_install: Callback function to run after snaps have been + installed + """ + + def _ensure_flag(flag): + if flag.startswith('--'): + return flag + return '--{}'.format(flag) + + if refresh: + for snap in snaps.keys(): + snap_refresh(snap, + _ensure_flag(snaps[snap]['channel']), + _ensure_flag(snaps[snap]['mode'])) + else: + for snap in snaps.keys(): + snap_install(snap, + _ensure_flag(snaps[snap]['channel']), + _ensure_flag(snaps[snap]['mode'])) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/bcache.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/bcache.py new file mode 100644 index 00000000..605991e1 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/bcache.py @@ -0,0 +1,74 @@ +# Copyright 2017 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import json + +from charmhelpers.core.hookenv import log + +stats_intervals = ['stats_day', 'stats_five_minute', + 'stats_hour', 'stats_total'] + +SYSFS = '/sys' + + +class Bcache(object): + """Bcache behaviour + """ + + def __init__(self, cachepath): + self.cachepath = cachepath + + @classmethod + def fromdevice(cls, devname): + return cls('{}/block/{}/bcache'.format(SYSFS, devname)) + + def __str__(self): + return self.cachepath + + def get_stats(self, interval): + """Get cache stats + """ + intervaldir = 'stats_{}'.format(interval) + path = "{}/{}".format(self.cachepath, intervaldir) + out = dict() + for elem in os.listdir(path): + out[elem] = open('{}/{}'.format(path, elem)).read().strip() + return out + + +def get_bcache_fs(): + """Return all cache sets + """ + cachesetroot = "{}/fs/bcache".format(SYSFS) + try: + dirs = os.listdir(cachesetroot) + except OSError: + log("No bcache fs found") + return [] + cacheset = set([Bcache('{}/{}'.format(cachesetroot, d)) for d in dirs if not d.startswith('register')]) + return cacheset + + +def get_stats_action(cachespec, interval): + """Action for getting bcache statistics for a given cachespec. + Cachespec can either be a device name, eg. 'sdb', which will retrieve + cache stats for the given device, or 'global', which will retrieve stats + for all cachesets + """ + if cachespec == 'global': + caches = get_bcache_fs() + else: + caches = [Bcache.fromdevice(cachespec)] + res = dict((c.cachepath, c.get_stats(interval)) for c in caches) + return json.dumps(res, indent=4, separators=(',', ': ')) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 9417d684..e5a01b1b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -63,6 +63,7 @@ from charmhelpers.fetch import ( apt_install, ) +from charmhelpers.core.unitdata import kv from charmhelpers.core.kernel import modprobe from charmhelpers.contrib.openstack.utils import config_flags_parser @@ -1314,6 +1315,47 @@ def send_request_if_needed(request, relation='ceph'): relation_set(relation_id=rid, broker_req=request.request) +def is_broker_action_done(action, rid=None, unit=None): + """Check whether broker action has completed yet. + + @param action: name of action to be performed + @returns True if action complete otherwise False + """ + rdata = relation_get(rid, unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + if not broker_rsp: + return False + + rsp = CephBrokerRsp(broker_rsp) + unit_name = local_unit().partition('/')[2] + key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) + kvstore = kv() + val = kvstore.get(key=key) + if val and val == rsp.request_id: + return True + + return False + + +def mark_broker_action_done(action, rid=None, unit=None): + """Mark action as having been completed. + + @param action: name of action to be performed + @returns None + """ + rdata = relation_get(rid, unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + if not broker_rsp: + return + + rsp = CephBrokerRsp(broker_rsp) + unit_name = local_unit().partition('/')[2] + key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) + kvstore = kv() + kvstore.set(key=key, value=rsp.request_id) + kvstore.flush() + + class CephConfContext(object): """Ceph config (ceph.conf) context. @@ -1330,7 +1372,7 @@ def __call__(self): return {} conf = config_flags_parser(conf) - if type(conf) != dict: + if not isinstance(conf, dict): log("Provided config-flags is not a dictionary - ignoring", level=WARNING) return {} diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index e44e22bf..12f37b28 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -43,6 +43,7 @@ WARNING = "WARNING" INFO = "INFO" DEBUG = "DEBUG" +TRACE = "TRACE" MARKER = object() cache = {} @@ -202,6 +203,27 @@ def service_name(): return local_unit().split('/')[0] +def principal_unit(): + """Returns the principal unit of this unit, otherwise None""" + # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT + principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) + # If it's empty, then this unit is the principal + if principal_unit == '': + return os.environ['JUJU_UNIT_NAME'] + elif principal_unit is not None: + return principal_unit + # For Juju 2.1 and below, let's try work out the principle unit by + # the various charms' metadata.yaml. + for reltype in relation_types(): + for rid in relation_ids(reltype): + for unit in related_units(rid): + md = _metadata_unit(unit) + subordinate = md.pop('subordinate', None) + if not subordinate: + return unit + return None + + @cached def remote_service_name(relid=None): """The remote service name for a given relation-id (or the current relation)""" @@ -478,6 +500,21 @@ def metadata(): return yaml.safe_load(md) +def _metadata_unit(unit): + """Given the name of a unit (e.g. apache2/0), get the unit charm's + metadata.yaml. Very similar to metadata() but allows us to inspect + other units. Unit needs to be co-located, such as a subordinate or + principal/primary. + + :returns: metadata.yaml as a python object. + + """ + basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) + unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) + with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + return yaml.safe_load(md) + + @cached def relation_types(): """Get a list of relation types supported by this charm""" @@ -753,6 +790,9 @@ def wrapper(decorated): def charm_dir(): """Return the root directory of the current charm""" + d = os.environ.get('JUJU_CHARM_DIR') + if d is not None: + return d return os.environ.get('CHARM_DIR') diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 88e80a49..5656e2f5 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log +from .hookenv import log, DEBUG from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -191,6 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): + service('disable', service_name) service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( @@ -225,6 +226,7 @@ def service_resume(service_name, init_dir="/etc/init", sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): service('unmask', service_name) + service('enable', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -485,13 +487,37 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a byte string.""" - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - target.write(content) + # lets see if we can grab the file and compare the context, to avoid doing + # a write. + existing_content = None + existing_uid, existing_gid = None, None + try: + with open(path, 'rb') as target: + existing_content = target.read() + stat = os.stat(path) + existing_uid, existing_gid = stat.st_uid, stat.st_gid + except: + pass + if content != existing_content: + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), + level=DEBUG) + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + return + # the contents were the same, but we might still need to change the + # ownership. + if existing_uid != uid: + log("Changing uid on already existing content: {} -> {}" + .format(existing_uid, uid), level=DEBUG) + os.chown(path, uid, -1) + if existing_gid != gid: + log("Changing gid on already existing content: {} -> {}" + .format(existing_gid, gid), level=DEBUG) + os.chown(path, -1, gid) def fstab_remove(mp): diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index ec5e0fe9..480a6276 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -48,6 +48,13 @@ class AptLockError(Exception): pass +class GPGKeyError(Exception): + """Exception occurs when a GPG key cannot be fetched or used. The message + indicates what the problem is. + """ + pass + + class BaseFetchHandler(object): """Base class for FetchHandler implementations in fetch plugins""" @@ -77,21 +84,22 @@ def base_url(self, url): fetch = importlib.import_module(module) filter_installed_packages = fetch.filter_installed_packages -install = fetch.install -upgrade = fetch.upgrade -update = fetch.update -purge = fetch.purge +install = fetch.apt_install +upgrade = fetch.apt_upgrade +update = _fetch_update = fetch.apt_update +purge = fetch.apt_purge add_source = fetch.add_source if __platform__ == "ubuntu": apt_cache = fetch.apt_cache - apt_install = fetch.install - apt_update = fetch.update - apt_upgrade = fetch.upgrade - apt_purge = fetch.purge + apt_install = fetch.apt_install + apt_update = fetch.apt_update + apt_upgrade = fetch.apt_upgrade + apt_purge = fetch.apt_purge apt_mark = fetch.apt_mark apt_hold = fetch.apt_hold apt_unhold = fetch.apt_unhold + import_key = fetch.import_key get_upstream_version = fetch.get_upstream_version elif __platform__ == "centos": yum_search = fetch.yum_search @@ -135,7 +143,7 @@ def configure_sources(update=False, for source, key in zip(sources, keys): add_source(source, key) if update: - fetch.update(fatal=True) + _fetch_update(fatal=True) def install_remote(source, *args, **kwargs): diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/centos.py b/ceph-radosgw/hooks/charmhelpers/fetch/centos.py index 604bbfb5..a91dcff0 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/centos.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/centos.py @@ -132,7 +132,7 @@ def add_source(source, key=None): key_file.write(key) key_file.flush() key_file.seek(0) - subprocess.check_call(['rpm', '--import', key_file]) + subprocess.check_call(['rpm', '--import', key_file.name]) else: subprocess.check_call(['rpm', '--import', key]) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/snap.py b/ceph-radosgw/hooks/charmhelpers/fetch/snap.py index 23c707b0..112a54c3 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/snap.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/snap.py @@ -18,15 +18,23 @@ https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html """ import subprocess -from os import environ +import os from time import sleep from charmhelpers.core.hookenv import log __author__ = 'Joseph Borg ' -SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved). +# The return code for "couldn't acquire lock" in Snap +# (hopefully this will be improved). +SNAP_NO_LOCK = 1 SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. +SNAP_CHANNELS = [ + 'edge', + 'beta', + 'candidate', + 'stable', +] class CouldNotAcquireLockException(Exception): @@ -47,13 +55,17 @@ def _snap_exec(commands): while return_code is None or return_code == SNAP_NO_LOCK: try: - return_code = subprocess.check_call(['snap'] + commands, env=environ) + return_code = subprocess.check_call(['snap'] + commands, + env=os.environ) except subprocess.CalledProcessError as e: retry_count += + 1 if retry_count > SNAP_NO_LOCK_RETRY_COUNT: - raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT) + raise CouldNotAcquireLockException( + 'Could not aquire lock after {} attempts' + .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode - log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN') + log('Snap failed to acquire lock, trying again in {} seconds.' + .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN')) sleep(SNAP_NO_LOCK_RETRY_DELAY) return return_code diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 7bc6cc7e..40e1cb5b 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -12,29 +12,48 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections import OrderedDict import os +import platform +import re import six import time import subprocess - from tempfile import NamedTemporaryFile + from charmhelpers.core.host import ( lsb_release ) -from charmhelpers.core.hookenv import log -from charmhelpers.fetch import SourceConfigError - +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) +from charmhelpers.fetch import SourceConfigError, GPGKeyError + +PROPOSED_POCKET = ( + "# Proposed\n" + "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe " + "multiverse restricted\n") +PROPOSED_PORTS_POCKET = ( + "# Proposed\n" + "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe " + "multiverse restricted\n") +# Only supports 64bit and ppc64 at the moment. +ARCH_TO_PROPOSED_POCKET = { + 'x86_64': PROPOSED_POCKET, + 'ppc64le': PROPOSED_PORTS_POCKET, + 'aarch64': PROPOSED_PORTS_POCKET, +} +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ - -PROPOSED_POCKET = """# Proposed -deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted -""" - CLOUD_ARCHIVE_POCKETS = { # Folsom 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', 'precise-folsom': 'precise-updates/folsom', 'precise-folsom/updates': 'precise-updates/folsom', 'precise-updates/folsom': 'precise-updates/folsom', @@ -43,6 +62,7 @@ 'precise-proposed/folsom': 'precise-proposed/folsom', # Grizzly 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', 'precise-grizzly': 'precise-updates/grizzly', 'precise-grizzly/updates': 'precise-updates/grizzly', 'precise-updates/grizzly': 'precise-updates/grizzly', @@ -51,6 +71,7 @@ 'precise-proposed/grizzly': 'precise-proposed/grizzly', # Havana 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', 'precise-havana': 'precise-updates/havana', 'precise-havana/updates': 'precise-updates/havana', 'precise-updates/havana': 'precise-updates/havana', @@ -59,6 +80,7 @@ 'precise-proposed/havana': 'precise-proposed/havana', # Icehouse 'icehouse': 'precise-updates/icehouse', + 'icehouse/updates': 'precise-updates/icehouse', 'precise-icehouse': 'precise-updates/icehouse', 'precise-icehouse/updates': 'precise-updates/icehouse', 'precise-updates/icehouse': 'precise-updates/icehouse', @@ -67,6 +89,7 @@ 'precise-proposed/icehouse': 'precise-proposed/icehouse', # Juno 'juno': 'trusty-updates/juno', + 'juno/updates': 'trusty-updates/juno', 'trusty-juno': 'trusty-updates/juno', 'trusty-juno/updates': 'trusty-updates/juno', 'trusty-updates/juno': 'trusty-updates/juno', @@ -75,6 +98,7 @@ 'trusty-proposed/juno': 'trusty-proposed/juno', # Kilo 'kilo': 'trusty-updates/kilo', + 'kilo/updates': 'trusty-updates/kilo', 'trusty-kilo': 'trusty-updates/kilo', 'trusty-kilo/updates': 'trusty-updates/kilo', 'trusty-updates/kilo': 'trusty-updates/kilo', @@ -83,6 +107,7 @@ 'trusty-proposed/kilo': 'trusty-proposed/kilo', # Liberty 'liberty': 'trusty-updates/liberty', + 'liberty/updates': 'trusty-updates/liberty', 'trusty-liberty': 'trusty-updates/liberty', 'trusty-liberty/updates': 'trusty-updates/liberty', 'trusty-updates/liberty': 'trusty-updates/liberty', @@ -91,6 +116,7 @@ 'trusty-proposed/liberty': 'trusty-proposed/liberty', # Mitaka 'mitaka': 'trusty-updates/mitaka', + 'mitaka/updates': 'trusty-updates/mitaka', 'trusty-mitaka': 'trusty-updates/mitaka', 'trusty-mitaka/updates': 'trusty-updates/mitaka', 'trusty-updates/mitaka': 'trusty-updates/mitaka', @@ -99,6 +125,7 @@ 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', # Newton 'newton': 'xenial-updates/newton', + 'newton/updates': 'xenial-updates/newton', 'xenial-newton': 'xenial-updates/newton', 'xenial-newton/updates': 'xenial-updates/newton', 'xenial-updates/newton': 'xenial-updates/newton', @@ -107,12 +134,13 @@ 'xenial-proposed/newton': 'xenial-proposed/newton', # Ocata 'ocata': 'xenial-updates/ocata', + 'ocata/updates': 'xenial-updates/ocata', 'xenial-ocata': 'xenial-updates/ocata', 'xenial-ocata/updates': 'xenial-updates/ocata', 'xenial-updates/ocata': 'xenial-updates/ocata', 'ocata/proposed': 'xenial-proposed/ocata', 'xenial-ocata/proposed': 'xenial-proposed/ocata', - 'xenial-ocata/newton': 'xenial-proposed/ocata', + 'xenial-proposed/ocata': 'xenial-proposed/ocata', # Pike 'pike': 'xenial-updates/pike', 'xenial-pike': 'xenial-updates/pike', @@ -120,7 +148,7 @@ 'xenial-updates/pike': 'xenial-updates/pike', 'pike/proposed': 'xenial-proposed/pike', 'xenial-pike/proposed': 'xenial-proposed/pike', - 'xenial-pike/newton': 'xenial-proposed/pike', + 'xenial-proposed/pike': 'xenial-proposed/pike', # Queens 'queens': 'xenial-updates/queens', 'xenial-queens': 'xenial-updates/queens', @@ -128,12 +156,13 @@ 'xenial-updates/queens': 'xenial-updates/queens', 'queens/proposed': 'xenial-proposed/queens', 'xenial-queens/proposed': 'xenial-proposed/queens', - 'xenial-queens/newton': 'xenial-proposed/queens', + 'xenial-proposed/queens': 'xenial-proposed/queens', } + APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. -CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times. +CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times. def filter_installed_packages(packages): @@ -161,7 +190,7 @@ def apt_cache(in_memory=True, progress=None): return apt_pkg.Cache(progress) -def install(packages, options=None, fatal=False): +def apt_install(packages, options=None, fatal=False): """Install one or more packages.""" if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -178,7 +207,7 @@ def install(packages, options=None, fatal=False): _run_apt_command(cmd, fatal) -def upgrade(options=None, fatal=False, dist=False): +def apt_upgrade(options=None, fatal=False, dist=False): """Upgrade all packages.""" if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -193,13 +222,13 @@ def upgrade(options=None, fatal=False, dist=False): _run_apt_command(cmd, fatal) -def update(fatal=False): +def apt_update(fatal=False): """Update local apt cache.""" cmd = ['apt-get', 'update'] _run_apt_command(cmd, fatal) -def purge(packages, fatal=False): +def apt_purge(packages, fatal=False): """Purge one or more packages.""" cmd = ['apt-get', '--assume-yes', 'purge'] if isinstance(packages, six.string_types): @@ -233,7 +262,58 @@ def apt_unhold(packages, fatal=False): return apt_mark(packages, 'unhold', fatal=fatal) -def add_source(source, key=None): +def import_key(key): + """Import an ASCII Armor key. + + /!\ A Radix64 format keyid is also supported for backwards + compatibility, but should never be used; the key retrieval + mechanism is insecure and subject to man-in-the-middle attacks + voiding all signature checks using that key. + + :param keyid: The key in ASCII armor format, + including BEGIN and END markers. + :raises: GPGKeyError if the key could not be imported + """ + key = key.strip() + if '-' in key or '\n' in key: + # Send everything not obviously a keyid to GPG to import, as + # we trust its validation better than our own. eg. handling + # comments before the key. + log("PGP key found (looks like ASCII Armor format)", level=DEBUG) + if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and + '-----END PGP PUBLIC KEY BLOCK-----' in key): + log("Importing ASCII Armor PGP key", level=DEBUG) + with NamedTemporaryFile() as keyfile: + with open(keyfile.name, 'w') as fd: + fd.write(key) + fd.write("\n") + cmd = ['apt-key', 'add', keyfile.name] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error = "Error importing PGP key '{}'".format(key) + log(error) + raise GPGKeyError(error) + else: + raise GPGKeyError("ASCII armor markers missing from GPG key") + else: + # We should only send things obviously not a keyid offsite + # via this unsecured protocol, as it may be a secret or part + # of one. + log("PGP key found (looks like Radix64 format)", level=WARNING) + log("INSECURLY importing PGP key from keyserver; " + "full key not provided.", level=WARNING) + cmd = ['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error = "Error importing PGP key '{}'".format(key) + log(error) + raise GPGKeyError(error) + + +def add_source(source, key=None, fail_invalid=False): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by @@ -249,6 +329,33 @@ def add_source(source, key=None): such as 'cloud:icehouse' 'distro' may be used as a noop + Full list of source specifications supported by the function are: + + 'distro': A NOP; i.e. it has no effect. + 'proposed': the proposed deb spec [2] is wrtten to + /etc/apt/sources.list/proposed + 'distro-proposed': adds -proposed to the debs [2] + 'ppa:': add-apt-repository --yes + 'deb ': add-apt-repository --yes deb + 'http://....': add-apt-repository --yes http://... + 'cloud-archive:': add-apt-repository -yes cloud-archive: + 'cloud:[-staging]': specify a Cloud Archive pocket with + optional staging version. If staging is used then the staging PPA [2] + with be used. If staging is NOT used then the cloud archive [3] will be + added, and the 'ubuntu-cloud-keyring' package will be added for the + current distro. + + Otherwise the source is not recognised and this is logged to the juju log. + However, no error is raised, unless sys_error_on_exit is True. + + [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main + where {} is replaced with the derived pocket name. + [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \ + main universe multiverse restricted + where {} is replaced with the lsb_release codename (e.g. xenial) + [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu + to /etc/apt/sources.list.d/cloud-archive-list + @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an ASCII format GPG public key including the block headers. A GPG key @@ -256,51 +363,142 @@ def add_source(source, key=None): available to retrieve the actual public key from a public keyserver placing your Juju environment at risk. ppa and cloud archive keys are securely added automtically, so sould not be provided. + + @param fail_invalid: (boolean) if True, then the function raises a + SourceConfigError is there is no matching installation source. + + @raises SourceConfigError() if for cloud:, the is not a + valid pocket in CLOUD_ARCHIVE_POCKETS """ + _mapping = OrderedDict([ + (r"^distro$", lambda: None), # This is a NOP + (r"^(?:proposed|distro-proposed)$", _add_proposed), + (r"^cloud-archive:(.*)$", _add_apt_repository), + (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), + (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), + (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), + (r"^cloud:(.*)$", _add_cloud_pocket), + (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), + ]) if source is None: - log('Source is not present. Skipping') - return - - if (source.startswith('ppa:') or - source.startswith('http') or - source.startswith('deb ') or - source.startswith('cloud-archive:')): - cmd = ['add-apt-repository', '--yes', source] - _run_with_retries(cmd) - elif source.startswith('cloud:'): - install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - if pocket not in CLOUD_ARCHIVE_POCKETS: - raise SourceConfigError( - 'Unsupported cloud: source option %s' % - pocket) - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) - elif source == 'proposed': - release = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: - apt.write(PROPOSED_POCKET.format(release)) - elif source == 'distro': - pass + source = '' + for r, fn in six.iteritems(_mapping): + m = re.match(r, source) + if m: + # call the assoicated function with the captured groups + # raises SourceConfigError on error. + fn(*m.groups()) + if key: + try: + import_key(key) + except GPGKeyError as e: + raise SourceConfigError(str(e)) + break else: - log("Unknown source: {!r}".format(source)) - - if key: - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile('w+') as key_file: - key_file.write(key) - key_file.flush() - key_file.seek(0) - subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) - else: - # Note that hkp: is in no way a secure protocol. Using a - # GPG key id is pointless from a security POV unless you - # absolutely trust your network and DNS. - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) + # nothing matched. log an error and maybe sys.exit + err = "Unknown source: {!r}".format(source) + log(err) + if fail_invalid: + raise SourceConfigError(err) + + +def _add_proposed(): + """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list + + Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for + the deb line. + + For intel architecutres PROPOSED_POCKET is used for the release, but for + other architectures PROPOSED_PORTS_POCKET is used for the release. + """ + release = lsb_release()['DISTRIB_CODENAME'] + arch = platform.machine() + if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): + raise SourceConfigError("Arch {} not supported for (distro-)proposed" + .format(arch)) + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release)) + + +def _add_apt_repository(spec): + """Add the spec using add_apt_repository + + :param spec: the parameter to pass to add_apt_repository + """ + _run_with_retries(['add-apt-repository', '--yes', spec]) + + +def _add_cloud_pocket(pocket): + """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list + + Note that this overwrites the existing file if there is one. + + This function also converts the simple pocket in to the actual pocket using + the CLOUD_ARCHIVE_POCKETS mapping. + + :param pocket: string representing the pocket to add a deb spec for. + :raises: SourceConfigError if the cloud pocket doesn't exist or the + requested release doesn't match the current distro version. + """ + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + + +def _add_cloud_staging(cloud_archive_release, openstack_release): + """Add the cloud staging repository which is in + ppa:ubuntu-cloud-archive/-staging + + This function checks that the cloud_archive_release matches the current + codename for the distro that charm is being installed on. + + :param cloud_archive_release: string, codename for the release. + :param openstack_release: String, codename for the openstack release. + :raises: SourceConfigError if the cloud_archive_release doesn't match the + current version of the os. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release) + cmd = 'add-apt-repository -y {}'.format(ppa) + _run_with_retries(cmd.split(' ')) + + +def _add_cloud_distro_check(cloud_archive_release, openstack_release): + """Add the cloud pocket, but also check the cloud_archive_release against + the current distro, and use the openstack_release as the full lookup. + + This just calls _add_cloud_pocket() with the openstack_release as pocket + to get the correct cloud-archive.list for dpkg to work with. + + :param cloud_archive_release:String, codename for the distro release. + :param openstack_release: String, spec for the release to look up in the + CLOUD_ARCHIVE_POCKETS + :raises: SourceConfigError if this is the wrong distro, or the pocket spec + doesn't exist. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release)) + + +def _verify_is_ubuntu_rel(release, os_release): + """Verify that the release is in the same as the current ubuntu release. + + :param release: String, lowercase for the release. + :param os_release: String, the os_release being asked for + :raises: SourceConfigError if the release is not the same as the ubuntu + release. + """ + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + if release != ubuntu_rel: + raise SourceConfigError( + 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' + 'version ({})'.format(release, os_release, ubuntu_rel)) def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), @@ -316,9 +514,12 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), :param: cmd_env: dict: Environment variables to add to the command run. """ - env = os.environ.copy() + env = None + kwargs = {} if cmd_env: + env = os.environ.copy() env.update(cmd_env) + kwargs['env'] = env if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -330,7 +531,8 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_results = (None,) + retry_exitcodes while result in retry_results: try: - result = subprocess.check_call(cmd, env=env) + # result = subprocess.check_call(cmd, env=env) + result = subprocess.check_call(cmd, **kwargs) except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > max_retries: @@ -343,6 +545,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), def _run_apt_command(cmd, fatal=False): """Run an apt command with optional retries. + :param: cmd: str: The apt command to run. :param: fatal: bool: Whether the command's output should be checked and retried. """ diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index fccad357..5aca8df1 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -29,6 +29,7 @@ relation_set, log, DEBUG, + WARNING, Hooks, UnregisteredHookError, status_set, ) @@ -287,6 +288,14 @@ def ha_relation_joined(relation_id=None): if iface is not None: vip_key = 'res_cephrg_{}_vip'.format(iface) + if vip_key in vip_group: + if vip not in resource_params[vip_key]: + vip_key = '{}_{}'.format(vip_key, vip_params) + else: + log("Resource '%s' (vip='%s') already exists in " + "vip group - skipping" % (vip_key, vip), WARNING) + continue + resources[vip_key] = res_rgw_vip resource_params[vip_key] = ( 'params {ip}="{vip}" cidr_netmask="{netmask}"' diff --git a/ceph-radosgw/tests/charmhelpers/__init__.py b/ceph-radosgw/tests/charmhelpers/__init__.py index 48867880..e7aa4715 100644 --- a/ceph-radosgw/tests/charmhelpers/__init__.py +++ b/ceph-radosgw/tests/charmhelpers/__init__.py @@ -14,6 +14,11 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. +from __future__ import print_function +from __future__ import absolute_import + +import functools +import inspect import subprocess import sys @@ -34,3 +39,59 @@ else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) import yaml # flake8: noqa + + +# Holds a list of mapping of mangled function names that have been deprecated +# using the @deprecate decorator below. This is so that the warning is only +# printed once for each usage of the function. +__deprecated_functions = {} + + +def deprecate(warning, date=None, log=None): + """Add a deprecation warning the first time the function is used. + The date, which is a string in semi-ISO8660 format indicate the year-month + that the function is officially going to be removed. + + usage: + + @deprecate('use core/fetch/add_source() instead', '2017-04') + def contributed_add_source_thing(...): + ... + + And it then prints to the log ONCE that the function is deprecated. + The reason for passing the logging function (log) is so that hookenv.log + can be used for a charm if needed. + + :param warning: String to indicat where it has moved ot. + :param date: optional sting, in YYYY-MM format to indicate when the + function will definitely (probably) be removed. + :param log: The log function to call to log. If not, logs to stdout + """ + def wrap(f): + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + try: + module = inspect.getmodule(f) + file = inspect.getsourcefile(f) + lines = inspect.getsourcelines(f) + f_name = "{}-{}-{}..{}-{}".format( + module.__name__, file, lines[0], lines[-1], f.__name__) + except (IOError, TypeError): + # assume it was local, so just use the name of the function + f_name = f.__name__ + if f_name not in __deprecated_functions: + __deprecated_functions[f_name] = True + s = "DEPRECATION WARNING: Function {} is being removed".format( + f.__name__) + if date: + s = "{} on/around {}".format(s, date) + if warning: + s = "{} : {}".format(s, warning) + if log: + log(s) + else: + print(s) + return f(*args, **kwargs) + return wrapped_f + return wrap diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index bcef4cd0..c8edbf65 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -25,9 +25,12 @@ import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client -import keystoneclient.v2_0 as keystone_client -from keystoneclient.auth.identity import v3 as keystone_id_v3 -from keystoneclient import session as keystone_session +from keystoneclient.v2_0 import client as keystone_client +from keystoneauth1.identity import ( + v3, + v2, +) +from keystoneauth1 import session as keystone_session from keystoneclient.v3 import client as keystone_client_v3 from novaclient import exceptions @@ -368,12 +371,20 @@ def authenticate_keystone(self, keystone_ip, username, password, port) if not api_version or api_version == 2: ep = base_ep + "/v2.0" - return keystone_client.Client(username=username, password=password, - tenant_name=project_name, - auth_url=ep) + auth = v2.Password( + username=username, + password=password, + tenant_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + client = keystone_client.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client else: ep = base_ep + "/v3" - auth = keystone_id_v3.Password( + auth = v3.Password( user_domain_name=user_domain_name, username=username, password=password, @@ -382,36 +393,45 @@ def authenticate_keystone(self, keystone_ip, username, password, project_name=project_name, auth_url=ep ) - return keystone_client_v3.Client( - session=keystone_session.Session(auth=auth) - ) + sess = keystone_session.Session(auth=auth) + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, - keystone_ip=None): + keystone_ip=None, user_domain_name=None, + project_domain_name=None, + project_name=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') if not keystone_ip: keystone_ip = keystone_sentry.info['public-address'] - user_domain_name = None - domain_name = None - if api_version == 3: + # To support backward compatibility usage of this function + if not project_name: + project_name = tenant + if api_version == 3 and not user_domain_name: user_domain_name = 'admin_domain' - domain_name = user_domain_name - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant, - api_version=api_version, - user_domain_name=user_domain_name, - domain_name=domain_name, - admin_port=True) + if api_version == 3 and not project_domain_name: + project_domain_name = 'admin_domain' + if api_version == 3 and not project_name: + project_name = 'admin' + + return self.authenticate_keystone( + keystone_ip, user, password, + api_version=api_version, + user_domain_name=user_domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + admin_port=True) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') + interface='publicURL') keystone_ip = urlparse.urlparse(ep).hostname return self.authenticate_keystone(keystone_ip, user, password, @@ -421,22 +441,32 @@ def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', - endpoint_type='adminURL') - return glance_client.Client(ep, token=keystone.auth_token) + interface='adminURL') + if keystone.session: + return glance_client.Client(ep, session=keystone.session) + else: + return glance_client.Client(ep, token=keystone.auth_token) def authenticate_heat_admin(self, keystone): """Authenticates the admin user with heat.""" self.log.debug('Authenticating heat admin...') ep = keystone.service_catalog.url_for(service_type='orchestration', - endpoint_type='publicURL') - return heat_client.Client(endpoint=ep, token=keystone.auth_token) + interface='publicURL') + if keystone.session: + return heat_client.Client(endpoint=ep, session=keystone.session) + else: + return heat_client.Client(endpoint=ep, token=keystone.auth_token) def authenticate_nova_user(self, keystone, user, password, tenant): """Authenticates a regular user with nova-api.""" self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - if novaclient.__version__[0] >= "7": + interface='publicURL') + if keystone.session: + return nova_client.Client(NOVA_CLIENT_VERSION, + session=keystone.session, + auth_url=ep) + elif novaclient.__version__[0] >= "7": return nova_client.Client(NOVA_CLIENT_VERSION, username=user, password=password, project_name=tenant, auth_url=ep) @@ -449,12 +479,15 @@ def authenticate_swift_user(self, keystone, user, password, tenant): """Authenticates a regular user with swift api.""" self.log.debug('Authenticating swift user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') + interface='publicURL') + if keystone.session: + return swiftclient.Connection(session=keystone.session) + else: + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): diff --git a/ceph-radosgw/tests/charmhelpers/core/hookenv.py b/ceph-radosgw/tests/charmhelpers/core/hookenv.py index e44e22bf..12f37b28 100644 --- a/ceph-radosgw/tests/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/tests/charmhelpers/core/hookenv.py @@ -43,6 +43,7 @@ WARNING = "WARNING" INFO = "INFO" DEBUG = "DEBUG" +TRACE = "TRACE" MARKER = object() cache = {} @@ -202,6 +203,27 @@ def service_name(): return local_unit().split('/')[0] +def principal_unit(): + """Returns the principal unit of this unit, otherwise None""" + # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT + principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) + # If it's empty, then this unit is the principal + if principal_unit == '': + return os.environ['JUJU_UNIT_NAME'] + elif principal_unit is not None: + return principal_unit + # For Juju 2.1 and below, let's try work out the principle unit by + # the various charms' metadata.yaml. + for reltype in relation_types(): + for rid in relation_ids(reltype): + for unit in related_units(rid): + md = _metadata_unit(unit) + subordinate = md.pop('subordinate', None) + if not subordinate: + return unit + return None + + @cached def remote_service_name(relid=None): """The remote service name for a given relation-id (or the current relation)""" @@ -478,6 +500,21 @@ def metadata(): return yaml.safe_load(md) +def _metadata_unit(unit): + """Given the name of a unit (e.g. apache2/0), get the unit charm's + metadata.yaml. Very similar to metadata() but allows us to inspect + other units. Unit needs to be co-located, such as a subordinate or + principal/primary. + + :returns: metadata.yaml as a python object. + + """ + basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) + unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) + with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + return yaml.safe_load(md) + + @cached def relation_types(): """Get a list of relation types supported by this charm""" @@ -753,6 +790,9 @@ def wrapper(decorated): def charm_dir(): """Return the root directory of the current charm""" + d = os.environ.get('JUJU_CHARM_DIR') + if d is not None: + return d return os.environ.get('CHARM_DIR') diff --git a/ceph-radosgw/tests/charmhelpers/core/host.py b/ceph-radosgw/tests/charmhelpers/core/host.py index 88e80a49..5656e2f5 100644 --- a/ceph-radosgw/tests/charmhelpers/core/host.py +++ b/ceph-radosgw/tests/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log +from .hookenv import log, DEBUG from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -191,6 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): + service('disable', service_name) service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( @@ -225,6 +226,7 @@ def service_resume(service_name, init_dir="/etc/init", sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): service('unmask', service_name) + service('enable', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -485,13 +487,37 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a byte string.""" - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - target.write(content) + # lets see if we can grab the file and compare the context, to avoid doing + # a write. + existing_content = None + existing_uid, existing_gid = None, None + try: + with open(path, 'rb') as target: + existing_content = target.read() + stat = os.stat(path) + existing_uid, existing_gid = stat.st_uid, stat.st_gid + except: + pass + if content != existing_content: + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), + level=DEBUG) + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + return + # the contents were the same, but we might still need to change the + # ownership. + if existing_uid != uid: + log("Changing uid on already existing content: {} -> {}" + .format(existing_uid, uid), level=DEBUG) + os.chown(path, uid, -1) + if existing_gid != gid: + log("Changing gid on already existing content: {} -> {}" + .format(existing_gid, gid), level=DEBUG) + os.chown(path, -1, gid) def fstab_remove(mp): From a9a78bef47ec2706a31548e9d1552d29a68f0f0d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 16 Aug 2017 14:40:14 +0200 Subject: [PATCH 1349/2699] Sync charms.ceph changes - ensure filestore is used by default - allow upgrades to luminous Change-Id: I85722ce38d6c4be6f6afe4ac15a74151a3cd003d Depends-on: I8205f7c7c63ec30900c4afdc76df174d3d9a8466 --- ceph-mon/lib/ceph/utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index b96dabbf..e0fd6be4 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -1405,6 +1405,8 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, # NOTE(jamespage): enable experimental bluestore support if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: cmd.append('--bluestore') + elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: + cmd.append('--filestore') cmd.append(dev) @@ -2061,6 +2063,7 @@ def dirs_need_ownership_update(service): UPGRADE_PATHS = { 'firefly': 'hammer', 'hammer': 'jewel', + 'jewel': 'luminous', } # Map UCA codenames to ceph codenames @@ -2072,6 +2075,8 @@ def dirs_need_ownership_update(service): 'mitaka': 'jewel', 'newton': 'jewel', 'ocata': 'jewel', + 'pike': 'luminous', + 'queens': 'luminous', } From 2cea9ad5116f0cf7e5008763141e432c2075fc0d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 16 Aug 2017 14:40:19 +0200 Subject: [PATCH 1350/2699] Sync charms.ceph changes - ensure filestore is used by default - allow upgrades to luminous Change-Id: I00aa4fcba53d9bd28592b9ba1b89f74d869148c2 Depends-on: I8205f7c7c63ec30900c4afdc76df174d3d9a8466 --- ceph-osd/lib/ceph/utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index b96dabbf..e0fd6be4 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -1405,6 +1405,8 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, # NOTE(jamespage): enable experimental bluestore support if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: cmd.append('--bluestore') + elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: + cmd.append('--filestore') cmd.append(dev) @@ -2061,6 +2063,7 @@ def dirs_need_ownership_update(service): UPGRADE_PATHS = { 'firefly': 'hammer', 'hammer': 'jewel', + 'jewel': 'luminous', } # Map UCA codenames to ceph codenames @@ -2072,6 +2075,8 @@ def dirs_need_ownership_update(service): 'mitaka': 'jewel', 'newton': 'jewel', 'ocata': 'jewel', + 'pike': 'luminous', + 'queens': 'luminous', } From 3723d66d08e0a44167d97ef4cfa0492764d52049 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 21 Aug 2017 16:12:21 +0200 Subject: [PATCH 1351/2699] remove trusty requirement to allow xenial upgrades Closes-Bug: #1709962 Closes-Bug: #1710645 Change-Id: I442072e0c10d0df95c40232f0191f5cc8b63ee8c --- ceph-mon/hooks/ceph_hooks.py | 7 ------- ceph-mon/unit_tests/test_upgrade.py | 12 ++---------- 2 files changed, 2 insertions(+), 17 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 8b7a9c97..93eed209 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -26,7 +26,6 @@ process_requests ) -from charmhelpers.core import host from charmhelpers.core import hookenv from charmhelpers.core.hookenv import ( log, @@ -92,12 +91,6 @@ def check_for_upgrade(): log("Ceph is not bootstrapped, skipping upgrade checks.") return - release_info = host.lsb_release() - if not release_info['DISTRIB_CODENAME'] == 'trusty': - log("Invalid upgrade path from {}. Only trusty is currently " - "supported".format(release_info['DISTRIB_CODENAME'])) - return - c = hookenv.config() old_version = ceph.resolve_ceph_version(c.previous('source') or 'distro') diff --git a/ceph-mon/unit_tests/test_upgrade.py b/ceph-mon/unit_tests/test_upgrade.py index 75be7196..f860f61e 100644 --- a/ceph-mon/unit_tests/test_upgrade.py +++ b/ceph-mon/unit_tests/test_upgrade.py @@ -20,15 +20,11 @@ class UpgradeRollingTestCase(unittest.TestCase): @patch('ceph_hooks.ceph.is_bootstrapped') @patch('ceph_hooks.ceph.resolve_ceph_version') @patch('ceph_hooks.hookenv') - @patch('ceph_hooks.host') @patch('ceph_hooks.ceph.roll_monitor_cluster') - def test_check_for_upgrade(self, roll_monitor_cluster, host, hookenv, + def test_check_for_upgrade(self, roll_monitor_cluster, hookenv, version, is_bootstrapped): is_bootstrapped.return_value = True version.side_effect = ['firefly', 'hammer'] - host.lsb_release.return_value = { - 'DISTRIB_CODENAME': 'trusty', - } previous_mock = MagicMock().return_value previous_mock.previous.return_value = "cloud:trusty-juno" hookenv.config.side_effect = [previous_mock, @@ -42,16 +38,12 @@ def test_check_for_upgrade(self, roll_monitor_cluster, host, hookenv, @patch('ceph_hooks.ceph.is_bootstrapped') @patch('ceph_hooks.ceph.resolve_ceph_version') @patch('ceph_hooks.hookenv') - @patch('ceph_hooks.host') @patch('ceph_hooks.ceph.roll_monitor_cluster') def test_check_for_upgrade_not_bootstrapped(self, roll_monitor_cluster, - host, hookenv, + hookenv, version, is_bootstrapped): is_bootstrapped.return_value = False version.side_effect = ['firefly', 'hammer'] - host.lsb_release.return_value = { - 'DISTRIB_CODENAME': 'trusty', - } previous_mock = MagicMock().return_value previous_mock.previous.return_value = "cloud:trusty-juno" hookenv.config.side_effect = [previous_mock, From fc52dd76caf1a90d9242f9bf874ff4d0e16a5fd7 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 21 Aug 2017 16:12:05 +0200 Subject: [PATCH 1352/2699] remove trusty requirement to allow xenial upgrades Closes-Bug: #1709962 Closes-Bug: #1710645 Change-Id: I1b6d91f0f09f0142f4470d8ae3eea650165a0575 --- ceph-osd/hooks/ceph_hooks.py | 7 ------- ceph-osd/unit_tests/test_upgrade.py | 16 +++------------- 2 files changed, 3 insertions(+), 20 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 6104a2c0..658ce1c6 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -57,7 +57,6 @@ get_upstream_version, ) from charmhelpers.core.sysctl import create as create_sysctl -from charmhelpers.core import host from charmhelpers.contrib.openstack.context import AppArmorContext from utils import ( get_host_ip, @@ -87,12 +86,6 @@ def check_for_upgrade(): log("Ceph is not bootstrapped, skipping upgrade checks.") return - release_info = host.lsb_release() - if not release_info['DISTRIB_CODENAME'] == 'trusty': - log("Invalid upgrade path from {}. Only trusty is currently " - "supported".format(release_info['DISTRIB_CODENAME'])) - return - c = hookenv.config() old_version = ceph.resolve_ceph_version(c.previous('source') or 'distro') diff --git a/ceph-osd/unit_tests/test_upgrade.py b/ceph-osd/unit_tests/test_upgrade.py index 4d3bf0a3..383519c8 100644 --- a/ceph-osd/unit_tests/test_upgrade.py +++ b/ceph-osd/unit_tests/test_upgrade.py @@ -23,17 +23,13 @@ class UpgradeRollingTestCase(unittest.TestCase): @patch('ceph_hooks.ceph.resolve_ceph_version') @patch('ceph_hooks.emit_cephconf') @patch('ceph_hooks.hookenv') - @patch('ceph_hooks.host') @patch('ceph_hooks.ceph.roll_osd_cluster') - def test_check_for_upgrade(self, roll_osd_cluster, host, hookenv, + def test_check_for_upgrade(self, roll_osd_cluster, hookenv, emit_cephconf, version, is_bootstrapped, dirs_need_ownership_update): dirs_need_ownership_update.return_value = False is_bootstrapped.return_value = True version.side_effect = ['firefly', 'hammer'] - host.lsb_release.return_value = { - 'DISTRIB_CODENAME': 'trusty', - } previous_mock = MagicMock().return_value previous_mock.previous.return_value = "cloud:trusty-juno" hookenv.config.side_effect = [previous_mock, @@ -50,16 +46,14 @@ def test_check_for_upgrade(self, roll_osd_cluster, host, hookenv, @patch('ceph_hooks.ceph.resolve_ceph_version') @patch('ceph_hooks.emit_cephconf') @patch('ceph_hooks.hookenv') - @patch('ceph_hooks.host.lsb_release') @patch('ceph_hooks.ceph.roll_osd_cluster') - def test_resume_failed_upgrade(self, roll_osd_cluster, lsb_release, + def test_resume_failed_upgrade(self, roll_osd_cluster, hookenv, emit_cephconf, version, is_bootstrapped, dirs_need_ownership_update): dirs_need_ownership_update.return_value = True is_bootstrapped.return_value = True version.side_effect = ['jewel', 'jewel'] - lsb_release.return_value = {'DISTRIB_CODENAME': 'trusty'} check_for_upgrade() @@ -71,16 +65,12 @@ def test_resume_failed_upgrade(self, roll_osd_cluster, lsb_release, @patch('ceph_hooks.ceph.is_bootstrapped') @patch('ceph_hooks.ceph.resolve_ceph_version') @patch('ceph_hooks.hookenv') - @patch('ceph_hooks.host') @patch('ceph_hooks.ceph.roll_monitor_cluster') def test_check_for_upgrade_not_bootstrapped(self, roll_monitor_cluster, - host, hookenv, + hookenv, version, is_bootstrapped): is_bootstrapped.return_value = False version.side_effect = ['firefly', 'hammer'] - host.lsb_release.return_value = { - 'DISTRIB_CODENAME': 'trusty', - } previous_mock = MagicMock().return_value previous_mock.previous.return_value = "cloud:trusty-juno" hookenv.config.side_effect = [previous_mock, From b248df28bbf8406d2e501362f7de9266bcb125e8 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 23 Aug 2017 09:51:25 -0500 Subject: [PATCH 1353/2699] Remove deprecated series metadata and tests Change-Id: I96b2e848babc60a7930966152d5d2585ee1c238f --- ceph-fs/src/metadata.yaml | 1 - ceph-fs/src/tests/gate-basic-yakkety-newton | 23 --------------------- 2 files changed, 24 deletions(-) delete mode 100755 ceph-fs/src/tests/gate-basic-yakkety-newton diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 591b1b2e..bb9ed281 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -12,7 +12,6 @@ tags: series: - xenial - zesty - - yakkety subordinate: false requires: ceph-mds: diff --git a/ceph-fs/src/tests/gate-basic-yakkety-newton b/ceph-fs/src/tests/gate-basic-yakkety-newton deleted file mode 100755 index f1846705..00000000 --- a/ceph-fs/src/tests/gate-basic-yakkety-newton +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on yakkety-newton.""" - -from basic_deployment import CephFsBasicDeployment - -if __name__ == '__main__': - deployment = CephFsBasicDeployment(series='yakkety') - deployment.run_tests() From a9d8298bf1371c289d64fb985dc962510b44e7e0 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 23 Aug 2017 09:51:34 -0500 Subject: [PATCH 1354/2699] Remove deprecated series metadata and tests Change-Id: I9fc41c55e50238a27443adb2829a53333b282a8f --- ceph-mon/metadata.yaml | 1 - ceph-mon/tests/gate-basic-yakkety-newton | 23 ----------------------- 2 files changed, 24 deletions(-) delete mode 100755 ceph-mon/tests/gate-basic-yakkety-newton diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index b47aeef5..82f9d191 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -13,7 +13,6 @@ series: - xenial - zesty - trusty - - yakkety peers: mon: interface: ceph diff --git a/ceph-mon/tests/gate-basic-yakkety-newton b/ceph-mon/tests/gate-basic-yakkety-newton deleted file mode 100755 index 9ae44008..00000000 --- a/ceph-mon/tests/gate-basic-yakkety-newton +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on yakkety-newton.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='yakkety') - deployment.run_tests() From 72c180a98777f36c79564315e5c714d34e05f4b2 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 23 Aug 2017 09:51:42 -0500 Subject: [PATCH 1355/2699] Remove deprecated series metadata and tests Change-Id: I7f7eb751b1996415ede24ac61f51bad685735da0 --- ceph-osd/metadata.yaml | 1 - ceph-osd/tests/gate-basic-yakkety-newton | 23 ----------------------- 2 files changed, 24 deletions(-) delete mode 100755 ceph-osd/tests/gate-basic-yakkety-newton diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index f8a0ef09..8c61f070 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -14,7 +14,6 @@ series: - xenial - zesty - trusty - - yakkety description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. diff --git a/ceph-osd/tests/gate-basic-yakkety-newton b/ceph-osd/tests/gate-basic-yakkety-newton deleted file mode 100755 index aac31445..00000000 --- a/ceph-osd/tests/gate-basic-yakkety-newton +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-osd deployment on yakkety-newton.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='yakkety') - deployment.run_tests() From 8831eb785adaf9e8f27b88c7bc791652a86c657d Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 23 Aug 2017 09:51:51 -0500 Subject: [PATCH 1356/2699] Remove deprecated series metadata and tests Change-Id: I79283330e7bae711c6d8866460861217bcb55a75 --- ceph-proxy/metadata.yaml | 1 - ceph-proxy/tests/gate-basic-yakkety-newton | 9 --------- 2 files changed, 10 deletions(-) delete mode 100755 ceph-proxy/tests/gate-basic-yakkety-newton diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 7462ead1..cf682d52 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -13,7 +13,6 @@ series: - xenial - zesty - trusty - - yakkety extra-bindings: public: cluster: diff --git a/ceph-proxy/tests/gate-basic-yakkety-newton b/ceph-proxy/tests/gate-basic-yakkety-newton deleted file mode 100755 index cb5db9d3..00000000 --- a/ceph-proxy/tests/gate-basic-yakkety-newton +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on yakkety-newton.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='yakkety') - deployment.run_tests() From c9d6145651e4e1f355687475f8cf9b9a40091d60 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 23 Aug 2017 09:52:01 -0500 Subject: [PATCH 1357/2699] Remove deprecated series metadata and tests Change-Id: I921ca911e9908cf606a1531fc3958e3d49efcff1 --- ceph-radosgw/metadata.yaml | 1 - ceph-radosgw/tests/gate-basic-yakkety-newton | 23 -------------------- 2 files changed, 24 deletions(-) delete mode 100755 ceph-radosgw/tests/gate-basic-yakkety-newton diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 34f29f41..4c3737b8 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -16,7 +16,6 @@ series: - xenial - zesty - trusty - - yakkety extra-bindings: public: admin: diff --git a/ceph-radosgw/tests/gate-basic-yakkety-newton b/ceph-radosgw/tests/gate-basic-yakkety-newton deleted file mode 100755 index 0b32576e..00000000 --- a/ceph-radosgw/tests/gate-basic-yakkety-newton +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on yakkety-newton.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='yakkety') - deployment.run_tests() From 8bc28e28fe20540c6cabb70b44b94a90917a9d12 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 24 Aug 2017 16:46:44 -0500 Subject: [PATCH 1358/2699] Sync charm-helpers Change-Id: I8b299c620ca80f6f560e2e57dcf67d6b6fc77e64 --- ceph-mon/hooks/charmhelpers/__init__.py | 61 +++ .../charmhelpers/contrib/charmsupport/nrpe.py | 9 +- .../contrib/hardening/apache/checks/config.py | 5 +- .../hooks/charmhelpers/contrib/network/ip.py | 6 +- .../contrib/openstack/amulet/utils.py | 103 +++-- .../charmhelpers/contrib/openstack/context.py | 96 +++-- .../contrib/openstack/keystone.py | 2 +- .../contrib/openstack/templating.py | 7 +- .../charmhelpers/contrib/openstack/utils.py | 361 ++++++++++-------- .../contrib/storage/linux/bcache.py | 74 ++++ .../contrib/storage/linux/ceph.py | 44 ++- ceph-mon/hooks/charmhelpers/core/hookenv.py | 40 ++ ceph-mon/hooks/charmhelpers/core/host.py | 38 +- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 26 +- ceph-mon/hooks/charmhelpers/fetch/centos.py | 2 +- ceph-mon/hooks/charmhelpers/fetch/snap.py | 22 +- ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 327 +++++++++++++--- ceph-mon/tests/charmhelpers/__init__.py | 61 +++ .../contrib/openstack/amulet/utils.py | 103 +++-- ceph-mon/tests/charmhelpers/core/hookenv.py | 40 ++ ceph-mon/tests/charmhelpers/core/host.py | 38 +- 21 files changed, 1116 insertions(+), 349 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/storage/linux/bcache.py diff --git a/ceph-mon/hooks/charmhelpers/__init__.py b/ceph-mon/hooks/charmhelpers/__init__.py index 48867880..e7aa4715 100644 --- a/ceph-mon/hooks/charmhelpers/__init__.py +++ b/ceph-mon/hooks/charmhelpers/__init__.py @@ -14,6 +14,11 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. +from __future__ import print_function +from __future__ import absolute_import + +import functools +import inspect import subprocess import sys @@ -34,3 +39,59 @@ else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) import yaml # flake8: noqa + + +# Holds a list of mapping of mangled function names that have been deprecated +# using the @deprecate decorator below. This is so that the warning is only +# printed once for each usage of the function. +__deprecated_functions = {} + + +def deprecate(warning, date=None, log=None): + """Add a deprecation warning the first time the function is used. + The date, which is a string in semi-ISO8660 format indicate the year-month + that the function is officially going to be removed. + + usage: + + @deprecate('use core/fetch/add_source() instead', '2017-04') + def contributed_add_source_thing(...): + ... + + And it then prints to the log ONCE that the function is deprecated. + The reason for passing the logging function (log) is so that hookenv.log + can be used for a charm if needed. + + :param warning: String to indicat where it has moved ot. + :param date: optional sting, in YYYY-MM format to indicate when the + function will definitely (probably) be removed. + :param log: The log function to call to log. If not, logs to stdout + """ + def wrap(f): + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + try: + module = inspect.getmodule(f) + file = inspect.getsourcefile(f) + lines = inspect.getsourcelines(f) + f_name = "{}-{}-{}..{}-{}".format( + module.__name__, file, lines[0], lines[-1], f.__name__) + except (IOError, TypeError): + # assume it was local, so just use the name of the function + f_name = f.__name__ + if f_name not in __deprecated_functions: + __deprecated_functions[f_name] = True + s = "DEPRECATION WARNING: Function {} is being removed".format( + f.__name__) + if date: + s = "{} on/around {}".format(s, date) + if warning: + s = "{} : {}".format(s, warning) + if log: + log(s) + else: + print(s) + return f(*args, **kwargs) + return wrapped_f + return wrap diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 8240249e..80d574dc 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -125,7 +125,7 @@ class CheckException(Exception): class Check(object): - shortname_re = '[A-Za-z0-9-_]+$' + shortname_re = '[A-Za-z0-9-_.]+$' service_template = (""" #--------------------------------------------------- # This file is Juju managed @@ -193,6 +193,13 @@ def write(self, nagios_context, hostname, nagios_servicegroups): nrpe_check_file = self._get_check_filename() with open(nrpe_check_file, 'w') as nrpe_check_config: nrpe_check_config.write("# check {}\n".format(self.shortname)) + if nagios_servicegroups: + nrpe_check_config.write( + "# The following header was added automatically by juju\n") + nrpe_check_config.write( + "# Modifying it will affect nagios monitoring and alerting\n") + nrpe_check_config.write( + "# servicegroups: {}\n".format(nagios_servicegroups)) nrpe_check_config.write("command[{}]={}\n".format( self.command, self.check_cmd)) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index b18b263d..06482aac 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -46,8 +46,9 @@ def get_audits(): context = ApacheConfContext() settings = utils.get_settings('apache') audits = [ - FilePermissionAudit(paths='/etc/apache2/apache2.conf', user='root', - group='root', mode=0o0640), + FilePermissionAudit(paths=os.path.join( + settings['common']['apache_dir'], 'apache2.conf'), + user='root', group='root', mode=0o0640), TemplatedFile(os.path.join(settings['common']['apache_dir'], 'mods-available/alias.conf'), diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index fc3f5e3e..d7e6debf 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -243,11 +243,13 @@ def is_ipv6_disabled(): try: result = subprocess.check_output( ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], - stderr=subprocess.STDOUT) - return "net.ipv6.conf.all.disable_ipv6 = 1" in result + stderr=subprocess.STDOUT, + universal_newlines=True) except subprocess.CalledProcessError: return True + return "net.ipv6.conf.all.disable_ipv6 = 1" in result + def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index bcef4cd0..c8edbf65 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -25,9 +25,12 @@ import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client -import keystoneclient.v2_0 as keystone_client -from keystoneclient.auth.identity import v3 as keystone_id_v3 -from keystoneclient import session as keystone_session +from keystoneclient.v2_0 import client as keystone_client +from keystoneauth1.identity import ( + v3, + v2, +) +from keystoneauth1 import session as keystone_session from keystoneclient.v3 import client as keystone_client_v3 from novaclient import exceptions @@ -368,12 +371,20 @@ def authenticate_keystone(self, keystone_ip, username, password, port) if not api_version or api_version == 2: ep = base_ep + "/v2.0" - return keystone_client.Client(username=username, password=password, - tenant_name=project_name, - auth_url=ep) + auth = v2.Password( + username=username, + password=password, + tenant_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + client = keystone_client.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client else: ep = base_ep + "/v3" - auth = keystone_id_v3.Password( + auth = v3.Password( user_domain_name=user_domain_name, username=username, password=password, @@ -382,36 +393,45 @@ def authenticate_keystone(self, keystone_ip, username, password, project_name=project_name, auth_url=ep ) - return keystone_client_v3.Client( - session=keystone_session.Session(auth=auth) - ) + sess = keystone_session.Session(auth=auth) + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, - keystone_ip=None): + keystone_ip=None, user_domain_name=None, + project_domain_name=None, + project_name=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') if not keystone_ip: keystone_ip = keystone_sentry.info['public-address'] - user_domain_name = None - domain_name = None - if api_version == 3: + # To support backward compatibility usage of this function + if not project_name: + project_name = tenant + if api_version == 3 and not user_domain_name: user_domain_name = 'admin_domain' - domain_name = user_domain_name - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant, - api_version=api_version, - user_domain_name=user_domain_name, - domain_name=domain_name, - admin_port=True) + if api_version == 3 and not project_domain_name: + project_domain_name = 'admin_domain' + if api_version == 3 and not project_name: + project_name = 'admin' + + return self.authenticate_keystone( + keystone_ip, user, password, + api_version=api_version, + user_domain_name=user_domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + admin_port=True) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') + interface='publicURL') keystone_ip = urlparse.urlparse(ep).hostname return self.authenticate_keystone(keystone_ip, user, password, @@ -421,22 +441,32 @@ def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', - endpoint_type='adminURL') - return glance_client.Client(ep, token=keystone.auth_token) + interface='adminURL') + if keystone.session: + return glance_client.Client(ep, session=keystone.session) + else: + return glance_client.Client(ep, token=keystone.auth_token) def authenticate_heat_admin(self, keystone): """Authenticates the admin user with heat.""" self.log.debug('Authenticating heat admin...') ep = keystone.service_catalog.url_for(service_type='orchestration', - endpoint_type='publicURL') - return heat_client.Client(endpoint=ep, token=keystone.auth_token) + interface='publicURL') + if keystone.session: + return heat_client.Client(endpoint=ep, session=keystone.session) + else: + return heat_client.Client(endpoint=ep, token=keystone.auth_token) def authenticate_nova_user(self, keystone, user, password, tenant): """Authenticates a regular user with nova-api.""" self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - if novaclient.__version__[0] >= "7": + interface='publicURL') + if keystone.session: + return nova_client.Client(NOVA_CLIENT_VERSION, + session=keystone.session, + auth_url=ep) + elif novaclient.__version__[0] >= "7": return nova_client.Client(NOVA_CLIENT_VERSION, username=user, password=password, project_name=tenant, auth_url=ep) @@ -449,12 +479,15 @@ def authenticate_swift_user(self, keystone, user, password, tenant): """Authenticates a regular user with swift api.""" self.log.debug('Authenticating swift user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') + interface='publicURL') + if keystone.session: + return swiftclient.Connection(session=keystone.session) + else: + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index ea93159d..f67f3265 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -41,9 +41,9 @@ charm_name, DEBUG, INFO, - WARNING, ERROR, status_set, + network_get_primary_address ) from charmhelpers.core.sysctl import create as sysctl_create @@ -80,6 +80,9 @@ from charmhelpers.contrib.openstack.ip import ( resolve_address, INTERNAL, + ADMIN, + PUBLIC, + ADDRESS_MAP, ) from charmhelpers.contrib.network.ip import ( get_address_in_network, @@ -87,7 +90,6 @@ get_ipv6_addr, get_netmask_for_address, format_ipv6_addr, - is_address_in_network, is_bridge_member, is_ipv6_disabled, ) @@ -97,6 +99,7 @@ git_determine_usr_bin, git_determine_python_path, enable_memcache, + snap_install_requested, ) from charmhelpers.core.unitdata import kv @@ -244,6 +247,11 @@ def __call__(self): 'database_password': rdata.get(password_setting), 'database_type': 'mysql' } + # Note(coreycb): We can drop mysql+pymysql if we want when the + # following review lands, though it seems mysql+pymysql would + # be preferred. https://review.openstack.org/#/c/462190/ + if snap_install_requested(): + ctxt['database_type'] = 'mysql+pymysql' if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) return ctxt @@ -510,6 +518,10 @@ def __call__(self): ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) if not ctxt.get('key'): ctxt['key'] = relation_get('key', rid=rid, unit=unit) + if not ctxt.get('rbd_features'): + default_features = relation_get('rbd-features', rid=rid, unit=unit) + if default_features is not None: + ctxt['rbd_features'] = default_features ceph_addrs = relation_get('ceph-public-address', rid=rid, unit=unit) @@ -610,7 +622,6 @@ def __call__(self): ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') if config('prefer-ipv6'): - ctxt['ipv6'] = True ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' else: @@ -726,11 +737,17 @@ def canonical_names(self): return sorted(list(set(cns))) def get_network_addresses(self): - """For each network configured, return corresponding address and vip - (if available). + """For each network configured, return corresponding address and + hostnamr or vip (if available). Returns a list of tuples of the form: + [(address_in_net_a, hostname_in_net_a), + (address_in_net_b, hostname_in_net_b), + ...] + + or, if no hostnames(s) available: + [(address_in_net_a, vip_in_net_a), (address_in_net_b, vip_in_net_b), ...] @@ -742,32 +759,27 @@ def get_network_addresses(self): ...] """ addresses = [] - if config('vip'): - vips = config('vip').split() - else: - vips = [] - - for net_type in ['os-internal-network', 'os-admin-network', - 'os-public-network']: - addr = get_address_in_network(config(net_type), - unit_get('private-address')) - if len(vips) > 1 and is_clustered(): - if not config(net_type): - log("Multiple networks configured but net_type " - "is None (%s)." % net_type, level=WARNING) - continue - - for vip in vips: - if is_address_in_network(config(net_type), vip): - addresses.append((addr, vip)) - break - - elif is_clustered() and config('vip'): - addresses.append((addr, config('vip'))) + for net_type in [INTERNAL, ADMIN, PUBLIC]: + net_config = config(ADDRESS_MAP[net_type]['config']) + # NOTE(jamespage): Fallback must always be private address + # as this is used to bind services on the + # local unit. + fallback = unit_get("private-address") + if net_config: + addr = get_address_in_network(net_config, + fallback) else: - addresses.append((addr, addr)) + try: + addr = network_get_primary_address( + ADDRESS_MAP[net_type]['binding'] + ) + except NotImplementedError: + addr = fallback + + endpoint = resolve_address(net_type) + addresses.append((addr, endpoint)) - return sorted(addresses) + return sorted(set(addresses)) def __call__(self): if isinstance(self.external_ports, six.string_types): @@ -794,7 +806,7 @@ def __call__(self): self.configure_cert(cn) addresses = self.get_network_addresses() - for address, endpoint in sorted(set(addresses)): + for address, endpoint in addresses: for api_port in self.external_ports: ext_port = determine_apache_port(api_port, singlenode_mode=True) @@ -1397,14 +1409,38 @@ def __call__(self): 'rel_key': 'dns-domain', 'default': None, }, + 'polling_interval': { + 'rel_key': 'polling-interval', + 'default': 2, + }, + 'rpc_response_timeout': { + 'rel_key': 'rpc-response-timeout', + 'default': 60, + }, + 'report_interval': { + 'rel_key': 'report-interval', + 'default': 30, + }, + 'enable_qos': { + 'rel_key': 'enable-qos', + 'default': False, + }, } ctxt = self.get_neutron_options({}) for rid in relation_ids('neutron-plugin-api'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) + # The l2-population key is used by the context as a way of + # checking if the api service on the other end is sending data + # in a recent format. if 'l2-population' in rdata: ctxt.update(self.get_neutron_options(rdata)) + if ctxt['enable_qos']: + ctxt['extension_drivers'] = 'qos' + else: + ctxt['extension_drivers'] = '' + return ctxt def get_neutron_options(self, rdata): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py index a15a03fa..d7e02ccd 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py @@ -29,7 +29,7 @@ def get_api_suffix(api_version): @returns the api suffix formatted according to the given api version """ - return 'v2.0' if api_version in (2, "2.0") else 'v3' + return 'v2.0' if api_version in (2, "2", "2.0") else 'v3' def format_endpoint(schema, addr, port, api_version): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py index 934baf5d..d8c1fc7f 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py @@ -20,7 +20,8 @@ from charmhelpers.core.hookenv import ( log, ERROR, - INFO + INFO, + TRACE ) from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES @@ -80,8 +81,10 @@ def get_loader(templates_dir, os_release): loaders.insert(0, FileSystemLoader(tmpl_dir)) if rel == os_release: break + # demote this log to the lowest level; we don't really need to see these + # lots in production even when debugging. log('Creating choice loader with dirs: %s' % - [l.searchpath for l in loaders], level=INFO) + [l.searchpath for l in loaders], level=TRACE) return ChoiceLoader(loaders) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 161c786b..837a1674 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -26,11 +26,12 @@ import shutil import six -import tempfile import traceback import uuid import yaml +from charmhelpers import deprecate + from charmhelpers.contrib.network import ip from charmhelpers.core import unitdata @@ -41,7 +42,6 @@ config, log as juju_log, charm_dir, - DEBUG, INFO, ERROR, related_units, @@ -51,6 +51,7 @@ status_set, hook_name, application_version_set, + cached, ) from charmhelpers.core.strutils import BasicStringComparator @@ -82,11 +83,21 @@ restart_on_change_helper, ) from charmhelpers.fetch import ( - apt_install, apt_cache, install_remote, + import_key as fetch_import_key, + add_source as fetch_add_source, + SourceConfigError, + GPGKeyError, get_upstream_version ) + +from charmhelpers.fetch.snap import ( + snap_install, + snap_refresh, + SNAP_CHANNELS, +) + from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.openstack.exceptions import OSContextError @@ -175,7 +186,7 @@ ('ocata', ['2.11.0', '2.12.0', '2.13.0']), ('pike', - ['2.13.0']), + ['2.13.0', '2.15.0']), ]) # >= Liberty version->codename mapping @@ -324,8 +335,10 @@ def get_os_codename_install_source(src): return ca_rel # Best guess match based on deb string provided - if src.startswith('deb') or src.startswith('ppa'): - for k, v in six.iteritems(OPENSTACK_CODENAMES): + if (src.startswith('deb') or + src.startswith('ppa') or + src.startswith('snap')): + for v in OPENSTACK_CODENAMES.values(): if v in src: return v @@ -394,6 +407,19 @@ def get_swift_codename(version): def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' + + if snap_install_requested(): + cmd = ['snap', 'list', package] + try: + out = subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + return None + lines = out.split('\n') + for line in lines: + if package in line: + # Second item in list is Version + return line.split()[1] + import apt_pkg as apt cache = apt_cache() @@ -469,13 +495,14 @@ def get_os_version_package(pkg, fatal=True): # error_out(e) -os_rel = None +# Module local cache variable for the os_release. +_os_rel = None def reset_os_release(): '''Unset the cached os_release version''' - global os_rel - os_rel = None + global _os_rel + _os_rel = None def os_release(package, base='essex', reset_cache=False): @@ -489,150 +516,77 @@ def os_release(package, base='essex', reset_cache=False): the installation source, the earliest release supported by the charm should be returned. ''' - global os_rel + global _os_rel if reset_cache: reset_os_release() - if os_rel: - return os_rel - os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or - get_os_codename_package(package, fatal=False) or - get_os_codename_install_source(config('openstack-origin')) or - base) - return os_rel + if _os_rel: + return _os_rel + _os_rel = ( + git_os_codename_install_source(config('openstack-origin-git')) or + get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config('openstack-origin')) or + base) + return _os_rel +@deprecate("moved to charmhelpers.fetch.import_key()", "2017-07", log=juju_log) def import_key(keyid): - key = keyid.strip() - if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and - key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): - juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG) - juju_log("Importing ASCII Armor PGP key", level=DEBUG) - with tempfile.NamedTemporaryFile() as keyfile: - with open(keyfile.name, 'w') as fd: - fd.write(key) - fd.write("\n") - - cmd = ['apt-key', 'add', keyfile.name] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error_out("Error importing PGP key '%s'" % key) - else: - juju_log("PGP key found (looks like Radix64 format)", level=DEBUG) - juju_log("Importing PGP key from keyserver", level=DEBUG) - cmd = ['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error_out("Error importing PGP key '%s'" % key) - - -def get_source_and_pgp_key(input): - """Look for a pgp key ID or ascii-armor key in the given input.""" - index = input.strip() - index = input.rfind('|') - if index < 0: - return input, None - - key = input[index + 1:].strip('|') - source = input[:index] - return source, key - - -def configure_installation_source(rel): - '''Configure apt installation source.''' - if rel == 'distro': - return - elif rel == 'distro-proposed': - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: - f.write(DISTRO_PROPOSED % ubuntu_rel) - elif rel[:4] == "ppa:": - src, key = get_source_and_pgp_key(rel) - if key: - import_key(key) - - subprocess.check_call(["add-apt-repository", "-y", src]) - elif rel[:3] == "deb": - src, key = get_source_and_pgp_key(rel) - if key: - import_key(key) - - with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: - f.write(src) - elif rel[:6] == 'cloud:': - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - rel = rel.split(':')[1] - u_rel = rel.split('-')[0] - ca_rel = rel.split('-')[1] - - if u_rel != ubuntu_rel: - e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ - 'version (%s)' % (ca_rel, ubuntu_rel) - error_out(e) + """Import a key, either ASCII armored, or a GPG key id. - if 'staging' in ca_rel: - # staging is just a regular PPA. - os_rel = ca_rel.split('/')[0] - ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel - cmd = 'add-apt-repository -y %s' % ppa - subprocess.check_call(cmd.split(' ')) - return - - # map charm config options to actual archive pockets. - pockets = { - 'folsom': 'precise-updates/folsom', - 'folsom/updates': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'grizzly': 'precise-updates/grizzly', - 'grizzly/updates': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly', - 'havana': 'precise-updates/havana', - 'havana/updates': 'precise-updates/havana', - 'havana/proposed': 'precise-proposed/havana', - 'icehouse': 'precise-updates/icehouse', - 'icehouse/updates': 'precise-updates/icehouse', - 'icehouse/proposed': 'precise-proposed/icehouse', - 'juno': 'trusty-updates/juno', - 'juno/updates': 'trusty-updates/juno', - 'juno/proposed': 'trusty-proposed/juno', - 'kilo': 'trusty-updates/kilo', - 'kilo/updates': 'trusty-updates/kilo', - 'kilo/proposed': 'trusty-proposed/kilo', - 'liberty': 'trusty-updates/liberty', - 'liberty/updates': 'trusty-updates/liberty', - 'liberty/proposed': 'trusty-proposed/liberty', - 'mitaka': 'trusty-updates/mitaka', - 'mitaka/updates': 'trusty-updates/mitaka', - 'mitaka/proposed': 'trusty-proposed/mitaka', - 'newton': 'xenial-updates/newton', - 'newton/updates': 'xenial-updates/newton', - 'newton/proposed': 'xenial-proposed/newton', - 'ocata': 'xenial-updates/ocata', - 'ocata/updates': 'xenial-updates/ocata', - 'ocata/proposed': 'xenial-proposed/ocata', - 'pike': 'xenial-updates/pike', - 'pike/updates': 'xenial-updates/pike', - 'pike/proposed': 'xenial-proposed/pike', - 'queens': 'xenial-updates/queens', - 'queens/updates': 'xenial-updates/queens', - 'queens/proposed': 'xenial-proposed/queens', - } + @param keyid: the key in ASCII armor format, or a GPG key id. + @raises SystemExit() via sys.exit() on failure. + """ + try: + return fetch_import_key(keyid) + except GPGKeyError as e: + error_out("Could not import key: {}".format(str(e))) - try: - pocket = pockets[ca_rel] - except KeyError: - e = 'Invalid Cloud Archive release specified: %s' % rel - error_out(e) - src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) - apt_install('ubuntu-cloud-keyring', fatal=True) +def get_source_and_pgp_key(source_and_key): + """Look for a pgp key ID or ascii-armor key in the given input. - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: - f.write(src) - else: - error_out("Invalid openstack-release specified: %s" % rel) + :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is + optional. + :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id + if there was no '|' in the source_and_key string. + """ + try: + source, key = source_and_key.split('|', 2) + return source, key or None + except ValueError: + return source_and_key, None + + +@deprecate("use charmhelpers.fetch.add_source() instead.", + "2017-07", log=juju_log) +def configure_installation_source(source_plus_key): + """Configure an installation source. + + The functionality is provided by charmhelpers.fetch.add_source() + The difference between the two functions is that add_source() signature + requires the key to be passed directly, whereas this function passes an + optional key by appending '|' to the end of the source specificiation + 'source'. + + Another difference from add_source() is that the function calls sys.exit(1) + if the configuration fails, whereas add_source() raises + SourceConfigurationError(). Another difference, is that add_source() + silently fails (with a juju_log command) if there is no matching source to + configure, whereas this function fails with a sys.exit(1) + + :param source: String_plus_key -- see above for details. + + Note that the behaviour on error is to log the error to the juju log and + then call sys.exit(1). + """ + # extract the key if there is one, denoted by a '|' in the rel + source, key = get_source_and_pgp_key(source_plus_key) + + # handle the ordinary sources via add_source + try: + fetch_add_source(source, key, fail_invalid=True) + except SourceConfigError as se: + error_out(str(se)) def config_value_changed(option): @@ -677,12 +631,14 @@ def openstack_upgrade_available(package): :returns: bool: : Returns True if configured installation source offers a newer version of package. - """ import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) + if not cur_vers: + # The package has not been installed yet do not attempt upgrade + return False if "swift" in package: codename = get_os_codename_install_source(src) avail_vers = get_os_version_codename_swift(codename) @@ -1933,6 +1889,30 @@ def wrapped_f(*args, **kwargs): return wrap +def ordered(orderme): + """Converts the provided dictionary into a collections.OrderedDict. + + The items in the returned OrderedDict will be inserted based on the + natural sort order of the keys. Nested dictionaries will also be sorted + in order to ensure fully predictable ordering. + + :param orderme: the dict to order + :return: collections.OrderedDict + :raises: ValueError: if `orderme` isn't a dict instance. + """ + if not isinstance(orderme, dict): + raise ValueError('argument must be a dict type') + + result = OrderedDict() + for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): + if isinstance(v, dict): + result[k] = ordered(v) + else: + result[k] = v + + return result + + def config_flags_parser(config_flags): """Parses config flags string into dict. @@ -1944,15 +1924,13 @@ def config_flags_parser(config_flags): example, a string in the format of 'key1=value1, key2=value2' will return a dict of: - {'key1': 'value1', - 'key2': 'value2'}. + {'key1': 'value1', 'key2': 'value2'}. 2. A string in the above format, but supporting a comma-delimited list of values for the same key. For example, a string in the format of 'key1=value1, key2=value3,value4,value5' will return a dict of: - {'key1', 'value1', - 'key2', 'value2,value3,value4'} + {'key1': 'value1', 'key2': 'value2,value3,value4'} 3. A string containing a colon character (:) prior to an equal character (=) will be treated as yaml and parsed as such. This can be @@ -1972,7 +1950,7 @@ def config_flags_parser(config_flags): equals = config_flags.find('=') if colon > 0: if colon < equals or equals < 0: - return yaml.safe_load(config_flags) + return ordered(yaml.safe_load(config_flags)) if config_flags.find('==') >= 0: juju_log("config_flags is not in expected format (key=value)", @@ -1985,7 +1963,7 @@ def config_flags_parser(config_flags): # split on '='. split = config_flags.strip(' =').split('=') limit = len(split) - flags = {} + flags = OrderedDict() for i in range(0, limit - 1): current = split[i] next = split[i + 1] @@ -2052,3 +2030,84 @@ def token_cache_pkgs(source=None, release=None): if enable_memcache(source=source, release=release): packages.extend(['memcached', 'python-memcache']) return packages + + +def update_json_file(filename, items): + """Updates the json `filename` with a given dict. + :param filename: json filename (i.e.: /etc/glance/policy.json) + :param items: dict of items to update + """ + with open(filename) as fd: + policy = json.load(fd) + policy.update(items) + with open(filename, "w") as fd: + fd.write(json.dumps(policy, indent=4)) + + +@cached +def snap_install_requested(): + """ Determine if installing from snaps + + If openstack-origin is of the form snap:channel-series-release + and channel is in SNAPS_CHANNELS return True. + """ + origin = config('openstack-origin') or "" + if not origin.startswith('snap:'): + return False + + _src = origin[5:] + channel, series, release = _src.split('-') + if channel.lower() in SNAP_CHANNELS: + return True + return False + + +def get_snaps_install_info_from_origin(snaps, src, mode='classic'): + """Generate a dictionary of snap install information from origin + + @param snaps: List of snaps + @param src: String of openstack-origin or source of the form + snap:channel-series-track + @param mode: String classic, devmode or jailmode + @returns: Dictionary of snaps with channels and modes + """ + + if not src.startswith('snap:'): + juju_log("Snap source is not a snap origin", 'WARN') + return {} + + _src = src[5:] + _channel, _series, _release = _src.split('-') + channel = '--channel={}/{}'.format(_release, _channel) + + return {snap: {'channel': channel, 'mode': mode} + for snap in snaps} + + +def install_os_snaps(snaps, refresh=False): + """Install OpenStack snaps from channel and with mode + + @param snaps: Dictionary of snaps with channels and modes of the form: + {'snap_name': {'channel': 'snap_channel', + 'mode': 'snap_mode'}} + Where channel a snapstore channel and mode is --classic, --devmode or + --jailmode. + @param post_snap_install: Callback function to run after snaps have been + installed + """ + + def _ensure_flag(flag): + if flag.startswith('--'): + return flag + return '--{}'.format(flag) + + if refresh: + for snap in snaps.keys(): + snap_refresh(snap, + _ensure_flag(snaps[snap]['channel']), + _ensure_flag(snaps[snap]['mode'])) + else: + for snap in snaps.keys(): + snap_install(snap, + _ensure_flag(snaps[snap]['channel']), + _ensure_flag(snaps[snap]['mode'])) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/bcache.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/bcache.py new file mode 100644 index 00000000..605991e1 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/bcache.py @@ -0,0 +1,74 @@ +# Copyright 2017 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import json + +from charmhelpers.core.hookenv import log + +stats_intervals = ['stats_day', 'stats_five_minute', + 'stats_hour', 'stats_total'] + +SYSFS = '/sys' + + +class Bcache(object): + """Bcache behaviour + """ + + def __init__(self, cachepath): + self.cachepath = cachepath + + @classmethod + def fromdevice(cls, devname): + return cls('{}/block/{}/bcache'.format(SYSFS, devname)) + + def __str__(self): + return self.cachepath + + def get_stats(self, interval): + """Get cache stats + """ + intervaldir = 'stats_{}'.format(interval) + path = "{}/{}".format(self.cachepath, intervaldir) + out = dict() + for elem in os.listdir(path): + out[elem] = open('{}/{}'.format(path, elem)).read().strip() + return out + + +def get_bcache_fs(): + """Return all cache sets + """ + cachesetroot = "{}/fs/bcache".format(SYSFS) + try: + dirs = os.listdir(cachesetroot) + except OSError: + log("No bcache fs found") + return [] + cacheset = set([Bcache('{}/{}'.format(cachesetroot, d)) for d in dirs if not d.startswith('register')]) + return cacheset + + +def get_stats_action(cachespec, interval): + """Action for getting bcache statistics for a given cachespec. + Cachespec can either be a device name, eg. 'sdb', which will retrieve + cache stats for the given device, or 'global', which will retrieve stats + for all cachesets + """ + if cachespec == 'global': + caches = get_bcache_fs() + else: + caches = [Bcache.fromdevice(cachespec)] + res = dict((c.cachepath, c.get_stats(interval)) for c in caches) + return json.dumps(res, indent=4, separators=(',', ': ')) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 9417d684..e5a01b1b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -63,6 +63,7 @@ from charmhelpers.fetch import ( apt_install, ) +from charmhelpers.core.unitdata import kv from charmhelpers.core.kernel import modprobe from charmhelpers.contrib.openstack.utils import config_flags_parser @@ -1314,6 +1315,47 @@ def send_request_if_needed(request, relation='ceph'): relation_set(relation_id=rid, broker_req=request.request) +def is_broker_action_done(action, rid=None, unit=None): + """Check whether broker action has completed yet. + + @param action: name of action to be performed + @returns True if action complete otherwise False + """ + rdata = relation_get(rid, unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + if not broker_rsp: + return False + + rsp = CephBrokerRsp(broker_rsp) + unit_name = local_unit().partition('/')[2] + key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) + kvstore = kv() + val = kvstore.get(key=key) + if val and val == rsp.request_id: + return True + + return False + + +def mark_broker_action_done(action, rid=None, unit=None): + """Mark action as having been completed. + + @param action: name of action to be performed + @returns None + """ + rdata = relation_get(rid, unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + if not broker_rsp: + return + + rsp = CephBrokerRsp(broker_rsp) + unit_name = local_unit().partition('/')[2] + key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) + kvstore = kv() + kvstore.set(key=key, value=rsp.request_id) + kvstore.flush() + + class CephConfContext(object): """Ceph config (ceph.conf) context. @@ -1330,7 +1372,7 @@ def __call__(self): return {} conf = config_flags_parser(conf) - if type(conf) != dict: + if not isinstance(conf, dict): log("Provided config-flags is not a dictionary - ignoring", level=WARNING) return {} diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index e44e22bf..12f37b28 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -43,6 +43,7 @@ WARNING = "WARNING" INFO = "INFO" DEBUG = "DEBUG" +TRACE = "TRACE" MARKER = object() cache = {} @@ -202,6 +203,27 @@ def service_name(): return local_unit().split('/')[0] +def principal_unit(): + """Returns the principal unit of this unit, otherwise None""" + # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT + principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) + # If it's empty, then this unit is the principal + if principal_unit == '': + return os.environ['JUJU_UNIT_NAME'] + elif principal_unit is not None: + return principal_unit + # For Juju 2.1 and below, let's try work out the principle unit by + # the various charms' metadata.yaml. + for reltype in relation_types(): + for rid in relation_ids(reltype): + for unit in related_units(rid): + md = _metadata_unit(unit) + subordinate = md.pop('subordinate', None) + if not subordinate: + return unit + return None + + @cached def remote_service_name(relid=None): """The remote service name for a given relation-id (or the current relation)""" @@ -478,6 +500,21 @@ def metadata(): return yaml.safe_load(md) +def _metadata_unit(unit): + """Given the name of a unit (e.g. apache2/0), get the unit charm's + metadata.yaml. Very similar to metadata() but allows us to inspect + other units. Unit needs to be co-located, such as a subordinate or + principal/primary. + + :returns: metadata.yaml as a python object. + + """ + basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) + unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) + with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + return yaml.safe_load(md) + + @cached def relation_types(): """Get a list of relation types supported by this charm""" @@ -753,6 +790,9 @@ def wrapper(decorated): def charm_dir(): """Return the root directory of the current charm""" + d = os.environ.get('JUJU_CHARM_DIR') + if d is not None: + return d return os.environ.get('CHARM_DIR') diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 88e80a49..5656e2f5 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log +from .hookenv import log, DEBUG from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -191,6 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): + service('disable', service_name) service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( @@ -225,6 +226,7 @@ def service_resume(service_name, init_dir="/etc/init", sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): service('unmask', service_name) + service('enable', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -485,13 +487,37 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a byte string.""" - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - target.write(content) + # lets see if we can grab the file and compare the context, to avoid doing + # a write. + existing_content = None + existing_uid, existing_gid = None, None + try: + with open(path, 'rb') as target: + existing_content = target.read() + stat = os.stat(path) + existing_uid, existing_gid = stat.st_uid, stat.st_gid + except: + pass + if content != existing_content: + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), + level=DEBUG) + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + return + # the contents were the same, but we might still need to change the + # ownership. + if existing_uid != uid: + log("Changing uid on already existing content: {} -> {}" + .format(existing_uid, uid), level=DEBUG) + os.chown(path, uid, -1) + if existing_gid != gid: + log("Changing gid on already existing content: {} -> {}" + .format(existing_gid, gid), level=DEBUG) + os.chown(path, -1, gid) def fstab_remove(mp): diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index ec5e0fe9..480a6276 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -48,6 +48,13 @@ class AptLockError(Exception): pass +class GPGKeyError(Exception): + """Exception occurs when a GPG key cannot be fetched or used. The message + indicates what the problem is. + """ + pass + + class BaseFetchHandler(object): """Base class for FetchHandler implementations in fetch plugins""" @@ -77,21 +84,22 @@ def base_url(self, url): fetch = importlib.import_module(module) filter_installed_packages = fetch.filter_installed_packages -install = fetch.install -upgrade = fetch.upgrade -update = fetch.update -purge = fetch.purge +install = fetch.apt_install +upgrade = fetch.apt_upgrade +update = _fetch_update = fetch.apt_update +purge = fetch.apt_purge add_source = fetch.add_source if __platform__ == "ubuntu": apt_cache = fetch.apt_cache - apt_install = fetch.install - apt_update = fetch.update - apt_upgrade = fetch.upgrade - apt_purge = fetch.purge + apt_install = fetch.apt_install + apt_update = fetch.apt_update + apt_upgrade = fetch.apt_upgrade + apt_purge = fetch.apt_purge apt_mark = fetch.apt_mark apt_hold = fetch.apt_hold apt_unhold = fetch.apt_unhold + import_key = fetch.import_key get_upstream_version = fetch.get_upstream_version elif __platform__ == "centos": yum_search = fetch.yum_search @@ -135,7 +143,7 @@ def configure_sources(update=False, for source, key in zip(sources, keys): add_source(source, key) if update: - fetch.update(fatal=True) + _fetch_update(fatal=True) def install_remote(source, *args, **kwargs): diff --git a/ceph-mon/hooks/charmhelpers/fetch/centos.py b/ceph-mon/hooks/charmhelpers/fetch/centos.py index 604bbfb5..a91dcff0 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/centos.py +++ b/ceph-mon/hooks/charmhelpers/fetch/centos.py @@ -132,7 +132,7 @@ def add_source(source, key=None): key_file.write(key) key_file.flush() key_file.seek(0) - subprocess.check_call(['rpm', '--import', key_file]) + subprocess.check_call(['rpm', '--import', key_file.name]) else: subprocess.check_call(['rpm', '--import', key]) diff --git a/ceph-mon/hooks/charmhelpers/fetch/snap.py b/ceph-mon/hooks/charmhelpers/fetch/snap.py index 23c707b0..112a54c3 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/snap.py +++ b/ceph-mon/hooks/charmhelpers/fetch/snap.py @@ -18,15 +18,23 @@ https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html """ import subprocess -from os import environ +import os from time import sleep from charmhelpers.core.hookenv import log __author__ = 'Joseph Borg ' -SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved). +# The return code for "couldn't acquire lock" in Snap +# (hopefully this will be improved). +SNAP_NO_LOCK = 1 SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. +SNAP_CHANNELS = [ + 'edge', + 'beta', + 'candidate', + 'stable', +] class CouldNotAcquireLockException(Exception): @@ -47,13 +55,17 @@ def _snap_exec(commands): while return_code is None or return_code == SNAP_NO_LOCK: try: - return_code = subprocess.check_call(['snap'] + commands, env=environ) + return_code = subprocess.check_call(['snap'] + commands, + env=os.environ) except subprocess.CalledProcessError as e: retry_count += + 1 if retry_count > SNAP_NO_LOCK_RETRY_COUNT: - raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT) + raise CouldNotAcquireLockException( + 'Could not aquire lock after {} attempts' + .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode - log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN') + log('Snap failed to acquire lock, trying again in {} seconds.' + .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN')) sleep(SNAP_NO_LOCK_RETRY_DELAY) return return_code diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 7bc6cc7e..40e1cb5b 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -12,29 +12,48 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections import OrderedDict import os +import platform +import re import six import time import subprocess - from tempfile import NamedTemporaryFile + from charmhelpers.core.host import ( lsb_release ) -from charmhelpers.core.hookenv import log -from charmhelpers.fetch import SourceConfigError - +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) +from charmhelpers.fetch import SourceConfigError, GPGKeyError + +PROPOSED_POCKET = ( + "# Proposed\n" + "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe " + "multiverse restricted\n") +PROPOSED_PORTS_POCKET = ( + "# Proposed\n" + "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe " + "multiverse restricted\n") +# Only supports 64bit and ppc64 at the moment. +ARCH_TO_PROPOSED_POCKET = { + 'x86_64': PROPOSED_POCKET, + 'ppc64le': PROPOSED_PORTS_POCKET, + 'aarch64': PROPOSED_PORTS_POCKET, +} +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ - -PROPOSED_POCKET = """# Proposed -deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted -""" - CLOUD_ARCHIVE_POCKETS = { # Folsom 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', 'precise-folsom': 'precise-updates/folsom', 'precise-folsom/updates': 'precise-updates/folsom', 'precise-updates/folsom': 'precise-updates/folsom', @@ -43,6 +62,7 @@ 'precise-proposed/folsom': 'precise-proposed/folsom', # Grizzly 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', 'precise-grizzly': 'precise-updates/grizzly', 'precise-grizzly/updates': 'precise-updates/grizzly', 'precise-updates/grizzly': 'precise-updates/grizzly', @@ -51,6 +71,7 @@ 'precise-proposed/grizzly': 'precise-proposed/grizzly', # Havana 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', 'precise-havana': 'precise-updates/havana', 'precise-havana/updates': 'precise-updates/havana', 'precise-updates/havana': 'precise-updates/havana', @@ -59,6 +80,7 @@ 'precise-proposed/havana': 'precise-proposed/havana', # Icehouse 'icehouse': 'precise-updates/icehouse', + 'icehouse/updates': 'precise-updates/icehouse', 'precise-icehouse': 'precise-updates/icehouse', 'precise-icehouse/updates': 'precise-updates/icehouse', 'precise-updates/icehouse': 'precise-updates/icehouse', @@ -67,6 +89,7 @@ 'precise-proposed/icehouse': 'precise-proposed/icehouse', # Juno 'juno': 'trusty-updates/juno', + 'juno/updates': 'trusty-updates/juno', 'trusty-juno': 'trusty-updates/juno', 'trusty-juno/updates': 'trusty-updates/juno', 'trusty-updates/juno': 'trusty-updates/juno', @@ -75,6 +98,7 @@ 'trusty-proposed/juno': 'trusty-proposed/juno', # Kilo 'kilo': 'trusty-updates/kilo', + 'kilo/updates': 'trusty-updates/kilo', 'trusty-kilo': 'trusty-updates/kilo', 'trusty-kilo/updates': 'trusty-updates/kilo', 'trusty-updates/kilo': 'trusty-updates/kilo', @@ -83,6 +107,7 @@ 'trusty-proposed/kilo': 'trusty-proposed/kilo', # Liberty 'liberty': 'trusty-updates/liberty', + 'liberty/updates': 'trusty-updates/liberty', 'trusty-liberty': 'trusty-updates/liberty', 'trusty-liberty/updates': 'trusty-updates/liberty', 'trusty-updates/liberty': 'trusty-updates/liberty', @@ -91,6 +116,7 @@ 'trusty-proposed/liberty': 'trusty-proposed/liberty', # Mitaka 'mitaka': 'trusty-updates/mitaka', + 'mitaka/updates': 'trusty-updates/mitaka', 'trusty-mitaka': 'trusty-updates/mitaka', 'trusty-mitaka/updates': 'trusty-updates/mitaka', 'trusty-updates/mitaka': 'trusty-updates/mitaka', @@ -99,6 +125,7 @@ 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', # Newton 'newton': 'xenial-updates/newton', + 'newton/updates': 'xenial-updates/newton', 'xenial-newton': 'xenial-updates/newton', 'xenial-newton/updates': 'xenial-updates/newton', 'xenial-updates/newton': 'xenial-updates/newton', @@ -107,12 +134,13 @@ 'xenial-proposed/newton': 'xenial-proposed/newton', # Ocata 'ocata': 'xenial-updates/ocata', + 'ocata/updates': 'xenial-updates/ocata', 'xenial-ocata': 'xenial-updates/ocata', 'xenial-ocata/updates': 'xenial-updates/ocata', 'xenial-updates/ocata': 'xenial-updates/ocata', 'ocata/proposed': 'xenial-proposed/ocata', 'xenial-ocata/proposed': 'xenial-proposed/ocata', - 'xenial-ocata/newton': 'xenial-proposed/ocata', + 'xenial-proposed/ocata': 'xenial-proposed/ocata', # Pike 'pike': 'xenial-updates/pike', 'xenial-pike': 'xenial-updates/pike', @@ -120,7 +148,7 @@ 'xenial-updates/pike': 'xenial-updates/pike', 'pike/proposed': 'xenial-proposed/pike', 'xenial-pike/proposed': 'xenial-proposed/pike', - 'xenial-pike/newton': 'xenial-proposed/pike', + 'xenial-proposed/pike': 'xenial-proposed/pike', # Queens 'queens': 'xenial-updates/queens', 'xenial-queens': 'xenial-updates/queens', @@ -128,12 +156,13 @@ 'xenial-updates/queens': 'xenial-updates/queens', 'queens/proposed': 'xenial-proposed/queens', 'xenial-queens/proposed': 'xenial-proposed/queens', - 'xenial-queens/newton': 'xenial-proposed/queens', + 'xenial-proposed/queens': 'xenial-proposed/queens', } + APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. -CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times. +CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times. def filter_installed_packages(packages): @@ -161,7 +190,7 @@ def apt_cache(in_memory=True, progress=None): return apt_pkg.Cache(progress) -def install(packages, options=None, fatal=False): +def apt_install(packages, options=None, fatal=False): """Install one or more packages.""" if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -178,7 +207,7 @@ def install(packages, options=None, fatal=False): _run_apt_command(cmd, fatal) -def upgrade(options=None, fatal=False, dist=False): +def apt_upgrade(options=None, fatal=False, dist=False): """Upgrade all packages.""" if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -193,13 +222,13 @@ def upgrade(options=None, fatal=False, dist=False): _run_apt_command(cmd, fatal) -def update(fatal=False): +def apt_update(fatal=False): """Update local apt cache.""" cmd = ['apt-get', 'update'] _run_apt_command(cmd, fatal) -def purge(packages, fatal=False): +def apt_purge(packages, fatal=False): """Purge one or more packages.""" cmd = ['apt-get', '--assume-yes', 'purge'] if isinstance(packages, six.string_types): @@ -233,7 +262,58 @@ def apt_unhold(packages, fatal=False): return apt_mark(packages, 'unhold', fatal=fatal) -def add_source(source, key=None): +def import_key(key): + """Import an ASCII Armor key. + + /!\ A Radix64 format keyid is also supported for backwards + compatibility, but should never be used; the key retrieval + mechanism is insecure and subject to man-in-the-middle attacks + voiding all signature checks using that key. + + :param keyid: The key in ASCII armor format, + including BEGIN and END markers. + :raises: GPGKeyError if the key could not be imported + """ + key = key.strip() + if '-' in key or '\n' in key: + # Send everything not obviously a keyid to GPG to import, as + # we trust its validation better than our own. eg. handling + # comments before the key. + log("PGP key found (looks like ASCII Armor format)", level=DEBUG) + if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and + '-----END PGP PUBLIC KEY BLOCK-----' in key): + log("Importing ASCII Armor PGP key", level=DEBUG) + with NamedTemporaryFile() as keyfile: + with open(keyfile.name, 'w') as fd: + fd.write(key) + fd.write("\n") + cmd = ['apt-key', 'add', keyfile.name] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error = "Error importing PGP key '{}'".format(key) + log(error) + raise GPGKeyError(error) + else: + raise GPGKeyError("ASCII armor markers missing from GPG key") + else: + # We should only send things obviously not a keyid offsite + # via this unsecured protocol, as it may be a secret or part + # of one. + log("PGP key found (looks like Radix64 format)", level=WARNING) + log("INSECURLY importing PGP key from keyserver; " + "full key not provided.", level=WARNING) + cmd = ['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error = "Error importing PGP key '{}'".format(key) + log(error) + raise GPGKeyError(error) + + +def add_source(source, key=None, fail_invalid=False): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by @@ -249,6 +329,33 @@ def add_source(source, key=None): such as 'cloud:icehouse' 'distro' may be used as a noop + Full list of source specifications supported by the function are: + + 'distro': A NOP; i.e. it has no effect. + 'proposed': the proposed deb spec [2] is wrtten to + /etc/apt/sources.list/proposed + 'distro-proposed': adds -proposed to the debs [2] + 'ppa:': add-apt-repository --yes + 'deb ': add-apt-repository --yes deb + 'http://....': add-apt-repository --yes http://... + 'cloud-archive:': add-apt-repository -yes cloud-archive: + 'cloud:[-staging]': specify a Cloud Archive pocket with + optional staging version. If staging is used then the staging PPA [2] + with be used. If staging is NOT used then the cloud archive [3] will be + added, and the 'ubuntu-cloud-keyring' package will be added for the + current distro. + + Otherwise the source is not recognised and this is logged to the juju log. + However, no error is raised, unless sys_error_on_exit is True. + + [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main + where {} is replaced with the derived pocket name. + [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \ + main universe multiverse restricted + where {} is replaced with the lsb_release codename (e.g. xenial) + [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu + to /etc/apt/sources.list.d/cloud-archive-list + @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an ASCII format GPG public key including the block headers. A GPG key @@ -256,51 +363,142 @@ def add_source(source, key=None): available to retrieve the actual public key from a public keyserver placing your Juju environment at risk. ppa and cloud archive keys are securely added automtically, so sould not be provided. + + @param fail_invalid: (boolean) if True, then the function raises a + SourceConfigError is there is no matching installation source. + + @raises SourceConfigError() if for cloud:, the is not a + valid pocket in CLOUD_ARCHIVE_POCKETS """ + _mapping = OrderedDict([ + (r"^distro$", lambda: None), # This is a NOP + (r"^(?:proposed|distro-proposed)$", _add_proposed), + (r"^cloud-archive:(.*)$", _add_apt_repository), + (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), + (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), + (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), + (r"^cloud:(.*)$", _add_cloud_pocket), + (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), + ]) if source is None: - log('Source is not present. Skipping') - return - - if (source.startswith('ppa:') or - source.startswith('http') or - source.startswith('deb ') or - source.startswith('cloud-archive:')): - cmd = ['add-apt-repository', '--yes', source] - _run_with_retries(cmd) - elif source.startswith('cloud:'): - install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - if pocket not in CLOUD_ARCHIVE_POCKETS: - raise SourceConfigError( - 'Unsupported cloud: source option %s' % - pocket) - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) - elif source == 'proposed': - release = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: - apt.write(PROPOSED_POCKET.format(release)) - elif source == 'distro': - pass + source = '' + for r, fn in six.iteritems(_mapping): + m = re.match(r, source) + if m: + # call the assoicated function with the captured groups + # raises SourceConfigError on error. + fn(*m.groups()) + if key: + try: + import_key(key) + except GPGKeyError as e: + raise SourceConfigError(str(e)) + break else: - log("Unknown source: {!r}".format(source)) - - if key: - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile('w+') as key_file: - key_file.write(key) - key_file.flush() - key_file.seek(0) - subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) - else: - # Note that hkp: is in no way a secure protocol. Using a - # GPG key id is pointless from a security POV unless you - # absolutely trust your network and DNS. - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) + # nothing matched. log an error and maybe sys.exit + err = "Unknown source: {!r}".format(source) + log(err) + if fail_invalid: + raise SourceConfigError(err) + + +def _add_proposed(): + """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list + + Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for + the deb line. + + For intel architecutres PROPOSED_POCKET is used for the release, but for + other architectures PROPOSED_PORTS_POCKET is used for the release. + """ + release = lsb_release()['DISTRIB_CODENAME'] + arch = platform.machine() + if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): + raise SourceConfigError("Arch {} not supported for (distro-)proposed" + .format(arch)) + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release)) + + +def _add_apt_repository(spec): + """Add the spec using add_apt_repository + + :param spec: the parameter to pass to add_apt_repository + """ + _run_with_retries(['add-apt-repository', '--yes', spec]) + + +def _add_cloud_pocket(pocket): + """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list + + Note that this overwrites the existing file if there is one. + + This function also converts the simple pocket in to the actual pocket using + the CLOUD_ARCHIVE_POCKETS mapping. + + :param pocket: string representing the pocket to add a deb spec for. + :raises: SourceConfigError if the cloud pocket doesn't exist or the + requested release doesn't match the current distro version. + """ + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + + +def _add_cloud_staging(cloud_archive_release, openstack_release): + """Add the cloud staging repository which is in + ppa:ubuntu-cloud-archive/-staging + + This function checks that the cloud_archive_release matches the current + codename for the distro that charm is being installed on. + + :param cloud_archive_release: string, codename for the release. + :param openstack_release: String, codename for the openstack release. + :raises: SourceConfigError if the cloud_archive_release doesn't match the + current version of the os. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release) + cmd = 'add-apt-repository -y {}'.format(ppa) + _run_with_retries(cmd.split(' ')) + + +def _add_cloud_distro_check(cloud_archive_release, openstack_release): + """Add the cloud pocket, but also check the cloud_archive_release against + the current distro, and use the openstack_release as the full lookup. + + This just calls _add_cloud_pocket() with the openstack_release as pocket + to get the correct cloud-archive.list for dpkg to work with. + + :param cloud_archive_release:String, codename for the distro release. + :param openstack_release: String, spec for the release to look up in the + CLOUD_ARCHIVE_POCKETS + :raises: SourceConfigError if this is the wrong distro, or the pocket spec + doesn't exist. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release)) + + +def _verify_is_ubuntu_rel(release, os_release): + """Verify that the release is in the same as the current ubuntu release. + + :param release: String, lowercase for the release. + :param os_release: String, the os_release being asked for + :raises: SourceConfigError if the release is not the same as the ubuntu + release. + """ + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + if release != ubuntu_rel: + raise SourceConfigError( + 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' + 'version ({})'.format(release, os_release, ubuntu_rel)) def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), @@ -316,9 +514,12 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), :param: cmd_env: dict: Environment variables to add to the command run. """ - env = os.environ.copy() + env = None + kwargs = {} if cmd_env: + env = os.environ.copy() env.update(cmd_env) + kwargs['env'] = env if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -330,7 +531,8 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_results = (None,) + retry_exitcodes while result in retry_results: try: - result = subprocess.check_call(cmd, env=env) + # result = subprocess.check_call(cmd, env=env) + result = subprocess.check_call(cmd, **kwargs) except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > max_retries: @@ -343,6 +545,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), def _run_apt_command(cmd, fatal=False): """Run an apt command with optional retries. + :param: cmd: str: The apt command to run. :param: fatal: bool: Whether the command's output should be checked and retried. """ diff --git a/ceph-mon/tests/charmhelpers/__init__.py b/ceph-mon/tests/charmhelpers/__init__.py index 48867880..e7aa4715 100644 --- a/ceph-mon/tests/charmhelpers/__init__.py +++ b/ceph-mon/tests/charmhelpers/__init__.py @@ -14,6 +14,11 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. +from __future__ import print_function +from __future__ import absolute_import + +import functools +import inspect import subprocess import sys @@ -34,3 +39,59 @@ else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) import yaml # flake8: noqa + + +# Holds a list of mapping of mangled function names that have been deprecated +# using the @deprecate decorator below. This is so that the warning is only +# printed once for each usage of the function. +__deprecated_functions = {} + + +def deprecate(warning, date=None, log=None): + """Add a deprecation warning the first time the function is used. + The date, which is a string in semi-ISO8660 format indicate the year-month + that the function is officially going to be removed. + + usage: + + @deprecate('use core/fetch/add_source() instead', '2017-04') + def contributed_add_source_thing(...): + ... + + And it then prints to the log ONCE that the function is deprecated. + The reason for passing the logging function (log) is so that hookenv.log + can be used for a charm if needed. + + :param warning: String to indicat where it has moved ot. + :param date: optional sting, in YYYY-MM format to indicate when the + function will definitely (probably) be removed. + :param log: The log function to call to log. If not, logs to stdout + """ + def wrap(f): + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + try: + module = inspect.getmodule(f) + file = inspect.getsourcefile(f) + lines = inspect.getsourcelines(f) + f_name = "{}-{}-{}..{}-{}".format( + module.__name__, file, lines[0], lines[-1], f.__name__) + except (IOError, TypeError): + # assume it was local, so just use the name of the function + f_name = f.__name__ + if f_name not in __deprecated_functions: + __deprecated_functions[f_name] = True + s = "DEPRECATION WARNING: Function {} is being removed".format( + f.__name__) + if date: + s = "{} on/around {}".format(s, date) + if warning: + s = "{} : {}".format(s, warning) + if log: + log(s) + else: + print(s) + return f(*args, **kwargs) + return wrapped_f + return wrap diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index bcef4cd0..c8edbf65 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -25,9 +25,12 @@ import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client -import keystoneclient.v2_0 as keystone_client -from keystoneclient.auth.identity import v3 as keystone_id_v3 -from keystoneclient import session as keystone_session +from keystoneclient.v2_0 import client as keystone_client +from keystoneauth1.identity import ( + v3, + v2, +) +from keystoneauth1 import session as keystone_session from keystoneclient.v3 import client as keystone_client_v3 from novaclient import exceptions @@ -368,12 +371,20 @@ def authenticate_keystone(self, keystone_ip, username, password, port) if not api_version or api_version == 2: ep = base_ep + "/v2.0" - return keystone_client.Client(username=username, password=password, - tenant_name=project_name, - auth_url=ep) + auth = v2.Password( + username=username, + password=password, + tenant_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + client = keystone_client.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client else: ep = base_ep + "/v3" - auth = keystone_id_v3.Password( + auth = v3.Password( user_domain_name=user_domain_name, username=username, password=password, @@ -382,36 +393,45 @@ def authenticate_keystone(self, keystone_ip, username, password, project_name=project_name, auth_url=ep ) - return keystone_client_v3.Client( - session=keystone_session.Session(auth=auth) - ) + sess = keystone_session.Session(auth=auth) + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, - keystone_ip=None): + keystone_ip=None, user_domain_name=None, + project_domain_name=None, + project_name=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') if not keystone_ip: keystone_ip = keystone_sentry.info['public-address'] - user_domain_name = None - domain_name = None - if api_version == 3: + # To support backward compatibility usage of this function + if not project_name: + project_name = tenant + if api_version == 3 and not user_domain_name: user_domain_name = 'admin_domain' - domain_name = user_domain_name - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant, - api_version=api_version, - user_domain_name=user_domain_name, - domain_name=domain_name, - admin_port=True) + if api_version == 3 and not project_domain_name: + project_domain_name = 'admin_domain' + if api_version == 3 and not project_name: + project_name = 'admin' + + return self.authenticate_keystone( + keystone_ip, user, password, + api_version=api_version, + user_domain_name=user_domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + admin_port=True) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') + interface='publicURL') keystone_ip = urlparse.urlparse(ep).hostname return self.authenticate_keystone(keystone_ip, user, password, @@ -421,22 +441,32 @@ def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', - endpoint_type='adminURL') - return glance_client.Client(ep, token=keystone.auth_token) + interface='adminURL') + if keystone.session: + return glance_client.Client(ep, session=keystone.session) + else: + return glance_client.Client(ep, token=keystone.auth_token) def authenticate_heat_admin(self, keystone): """Authenticates the admin user with heat.""" self.log.debug('Authenticating heat admin...') ep = keystone.service_catalog.url_for(service_type='orchestration', - endpoint_type='publicURL') - return heat_client.Client(endpoint=ep, token=keystone.auth_token) + interface='publicURL') + if keystone.session: + return heat_client.Client(endpoint=ep, session=keystone.session) + else: + return heat_client.Client(endpoint=ep, token=keystone.auth_token) def authenticate_nova_user(self, keystone, user, password, tenant): """Authenticates a regular user with nova-api.""" self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - if novaclient.__version__[0] >= "7": + interface='publicURL') + if keystone.session: + return nova_client.Client(NOVA_CLIENT_VERSION, + session=keystone.session, + auth_url=ep) + elif novaclient.__version__[0] >= "7": return nova_client.Client(NOVA_CLIENT_VERSION, username=user, password=password, project_name=tenant, auth_url=ep) @@ -449,12 +479,15 @@ def authenticate_swift_user(self, keystone, user, password, tenant): """Authenticates a regular user with swift api.""" self.log.debug('Authenticating swift user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') + interface='publicURL') + if keystone.session: + return swiftclient.Connection(session=keystone.session) + else: + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): diff --git a/ceph-mon/tests/charmhelpers/core/hookenv.py b/ceph-mon/tests/charmhelpers/core/hookenv.py index e44e22bf..12f37b28 100644 --- a/ceph-mon/tests/charmhelpers/core/hookenv.py +++ b/ceph-mon/tests/charmhelpers/core/hookenv.py @@ -43,6 +43,7 @@ WARNING = "WARNING" INFO = "INFO" DEBUG = "DEBUG" +TRACE = "TRACE" MARKER = object() cache = {} @@ -202,6 +203,27 @@ def service_name(): return local_unit().split('/')[0] +def principal_unit(): + """Returns the principal unit of this unit, otherwise None""" + # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT + principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) + # If it's empty, then this unit is the principal + if principal_unit == '': + return os.environ['JUJU_UNIT_NAME'] + elif principal_unit is not None: + return principal_unit + # For Juju 2.1 and below, let's try work out the principle unit by + # the various charms' metadata.yaml. + for reltype in relation_types(): + for rid in relation_ids(reltype): + for unit in related_units(rid): + md = _metadata_unit(unit) + subordinate = md.pop('subordinate', None) + if not subordinate: + return unit + return None + + @cached def remote_service_name(relid=None): """The remote service name for a given relation-id (or the current relation)""" @@ -478,6 +500,21 @@ def metadata(): return yaml.safe_load(md) +def _metadata_unit(unit): + """Given the name of a unit (e.g. apache2/0), get the unit charm's + metadata.yaml. Very similar to metadata() but allows us to inspect + other units. Unit needs to be co-located, such as a subordinate or + principal/primary. + + :returns: metadata.yaml as a python object. + + """ + basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) + unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) + with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + return yaml.safe_load(md) + + @cached def relation_types(): """Get a list of relation types supported by this charm""" @@ -753,6 +790,9 @@ def wrapper(decorated): def charm_dir(): """Return the root directory of the current charm""" + d = os.environ.get('JUJU_CHARM_DIR') + if d is not None: + return d return os.environ.get('CHARM_DIR') diff --git a/ceph-mon/tests/charmhelpers/core/host.py b/ceph-mon/tests/charmhelpers/core/host.py index 88e80a49..5656e2f5 100644 --- a/ceph-mon/tests/charmhelpers/core/host.py +++ b/ceph-mon/tests/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log +from .hookenv import log, DEBUG from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -191,6 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): + service('disable', service_name) service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( @@ -225,6 +226,7 @@ def service_resume(service_name, init_dir="/etc/init", sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): service('unmask', service_name) + service('enable', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -485,13 +487,37 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a byte string.""" - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - target.write(content) + # lets see if we can grab the file and compare the context, to avoid doing + # a write. + existing_content = None + existing_uid, existing_gid = None, None + try: + with open(path, 'rb') as target: + existing_content = target.read() + stat = os.stat(path) + existing_uid, existing_gid = stat.st_uid, stat.st_gid + except: + pass + if content != existing_content: + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), + level=DEBUG) + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + return + # the contents were the same, but we might still need to change the + # ownership. + if existing_uid != uid: + log("Changing uid on already existing content: {} -> {}" + .format(existing_uid, uid), level=DEBUG) + os.chown(path, uid, -1) + if existing_gid != gid: + log("Changing gid on already existing content: {} -> {}" + .format(existing_gid, gid), level=DEBUG) + os.chown(path, -1, gid) def fstab_remove(mp): From 04d7e769004f237f46485706b2e31e3fb7c50f9a Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 24 Aug 2017 16:47:27 -0500 Subject: [PATCH 1359/2699] Sync charm-helpers Change-Id: I12b0ba1b814cbba2dbb3474de5c0b180df03628a --- .../charmhelpers/contrib/charmsupport/nrpe.py | 2 +- .../contrib/hardening/apache/checks/config.py | 5 +- .../charmhelpers/contrib/openstack/context.py | 96 +++++++++---- .../charmhelpers/contrib/openstack/utils.py | 135 ++++++++++++++++-- .../contrib/storage/linux/bcache.py | 74 ++++++++++ .../contrib/storage/linux/ceph.py | 2 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 40 ++++++ ceph-osd/hooks/charmhelpers/core/host.py | 36 ++++- ceph-osd/hooks/charmhelpers/fetch/snap.py | 22 ++- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 67 +++++---- ceph-osd/tests/charmhelpers/core/hookenv.py | 40 ++++++ ceph-osd/tests/charmhelpers/core/host.py | 36 ++++- 12 files changed, 469 insertions(+), 86 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/storage/linux/bcache.py diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 424b7f76..80d574dc 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -125,7 +125,7 @@ class CheckException(Exception): class Check(object): - shortname_re = '[A-Za-z0-9-_]+$' + shortname_re = '[A-Za-z0-9-_.]+$' service_template = (""" #--------------------------------------------------- # This file is Juju managed diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index b18b263d..06482aac 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -46,8 +46,9 @@ def get_audits(): context = ApacheConfContext() settings = utils.get_settings('apache') audits = [ - FilePermissionAudit(paths='/etc/apache2/apache2.conf', user='root', - group='root', mode=0o0640), + FilePermissionAudit(paths=os.path.join( + settings['common']['apache_dir'], 'apache2.conf'), + user='root', group='root', mode=0o0640), TemplatedFile(os.path.join(settings['common']['apache_dir'], 'mods-available/alias.conf'), diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index ea93159d..f67f3265 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -41,9 +41,9 @@ charm_name, DEBUG, INFO, - WARNING, ERROR, status_set, + network_get_primary_address ) from charmhelpers.core.sysctl import create as sysctl_create @@ -80,6 +80,9 @@ from charmhelpers.contrib.openstack.ip import ( resolve_address, INTERNAL, + ADMIN, + PUBLIC, + ADDRESS_MAP, ) from charmhelpers.contrib.network.ip import ( get_address_in_network, @@ -87,7 +90,6 @@ get_ipv6_addr, get_netmask_for_address, format_ipv6_addr, - is_address_in_network, is_bridge_member, is_ipv6_disabled, ) @@ -97,6 +99,7 @@ git_determine_usr_bin, git_determine_python_path, enable_memcache, + snap_install_requested, ) from charmhelpers.core.unitdata import kv @@ -244,6 +247,11 @@ def __call__(self): 'database_password': rdata.get(password_setting), 'database_type': 'mysql' } + # Note(coreycb): We can drop mysql+pymysql if we want when the + # following review lands, though it seems mysql+pymysql would + # be preferred. https://review.openstack.org/#/c/462190/ + if snap_install_requested(): + ctxt['database_type'] = 'mysql+pymysql' if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) return ctxt @@ -510,6 +518,10 @@ def __call__(self): ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) if not ctxt.get('key'): ctxt['key'] = relation_get('key', rid=rid, unit=unit) + if not ctxt.get('rbd_features'): + default_features = relation_get('rbd-features', rid=rid, unit=unit) + if default_features is not None: + ctxt['rbd_features'] = default_features ceph_addrs = relation_get('ceph-public-address', rid=rid, unit=unit) @@ -610,7 +622,6 @@ def __call__(self): ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') if config('prefer-ipv6'): - ctxt['ipv6'] = True ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' else: @@ -726,11 +737,17 @@ def canonical_names(self): return sorted(list(set(cns))) def get_network_addresses(self): - """For each network configured, return corresponding address and vip - (if available). + """For each network configured, return corresponding address and + hostnamr or vip (if available). Returns a list of tuples of the form: + [(address_in_net_a, hostname_in_net_a), + (address_in_net_b, hostname_in_net_b), + ...] + + or, if no hostnames(s) available: + [(address_in_net_a, vip_in_net_a), (address_in_net_b, vip_in_net_b), ...] @@ -742,32 +759,27 @@ def get_network_addresses(self): ...] """ addresses = [] - if config('vip'): - vips = config('vip').split() - else: - vips = [] - - for net_type in ['os-internal-network', 'os-admin-network', - 'os-public-network']: - addr = get_address_in_network(config(net_type), - unit_get('private-address')) - if len(vips) > 1 and is_clustered(): - if not config(net_type): - log("Multiple networks configured but net_type " - "is None (%s)." % net_type, level=WARNING) - continue - - for vip in vips: - if is_address_in_network(config(net_type), vip): - addresses.append((addr, vip)) - break - - elif is_clustered() and config('vip'): - addresses.append((addr, config('vip'))) + for net_type in [INTERNAL, ADMIN, PUBLIC]: + net_config = config(ADDRESS_MAP[net_type]['config']) + # NOTE(jamespage): Fallback must always be private address + # as this is used to bind services on the + # local unit. + fallback = unit_get("private-address") + if net_config: + addr = get_address_in_network(net_config, + fallback) else: - addresses.append((addr, addr)) + try: + addr = network_get_primary_address( + ADDRESS_MAP[net_type]['binding'] + ) + except NotImplementedError: + addr = fallback + + endpoint = resolve_address(net_type) + addresses.append((addr, endpoint)) - return sorted(addresses) + return sorted(set(addresses)) def __call__(self): if isinstance(self.external_ports, six.string_types): @@ -794,7 +806,7 @@ def __call__(self): self.configure_cert(cn) addresses = self.get_network_addresses() - for address, endpoint in sorted(set(addresses)): + for address, endpoint in addresses: for api_port in self.external_ports: ext_port = determine_apache_port(api_port, singlenode_mode=True) @@ -1397,14 +1409,38 @@ def __call__(self): 'rel_key': 'dns-domain', 'default': None, }, + 'polling_interval': { + 'rel_key': 'polling-interval', + 'default': 2, + }, + 'rpc_response_timeout': { + 'rel_key': 'rpc-response-timeout', + 'default': 60, + }, + 'report_interval': { + 'rel_key': 'report-interval', + 'default': 30, + }, + 'enable_qos': { + 'rel_key': 'enable-qos', + 'default': False, + }, } ctxt = self.get_neutron_options({}) for rid in relation_ids('neutron-plugin-api'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) + # The l2-population key is used by the context as a way of + # checking if the api service on the other end is sending data + # in a recent format. if 'l2-population' in rdata: ctxt.update(self.get_neutron_options(rdata)) + if ctxt['enable_qos']: + ctxt['extension_drivers'] = 'qos' + else: + ctxt['extension_drivers'] = '' + return ctxt def get_neutron_options(self, rdata): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index cfdd829d..837a1674 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -51,6 +51,7 @@ status_set, hook_name, application_version_set, + cached, ) from charmhelpers.core.strutils import BasicStringComparator @@ -90,6 +91,13 @@ GPGKeyError, get_upstream_version ) + +from charmhelpers.fetch.snap import ( + snap_install, + snap_refresh, + SNAP_CHANNELS, +) + from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.openstack.exceptions import OSContextError @@ -178,7 +186,7 @@ ('ocata', ['2.11.0', '2.12.0', '2.13.0']), ('pike', - ['2.13.0']), + ['2.13.0', '2.15.0']), ]) # >= Liberty version->codename mapping @@ -327,8 +335,10 @@ def get_os_codename_install_source(src): return ca_rel # Best guess match based on deb string provided - if src.startswith('deb') or src.startswith('ppa'): - for k, v in six.iteritems(OPENSTACK_CODENAMES): + if (src.startswith('deb') or + src.startswith('ppa') or + src.startswith('snap')): + for v in OPENSTACK_CODENAMES.values(): if v in src: return v @@ -397,6 +407,19 @@ def get_swift_codename(version): def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' + + if snap_install_requested(): + cmd = ['snap', 'list', package] + try: + out = subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + return None + lines = out.split('\n') + for line in lines: + if package in line: + # Second item in list is Version + return line.split()[1] + import apt_pkg as apt cache = apt_cache() @@ -613,6 +636,9 @@ def openstack_upgrade_available(package): import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) + if not cur_vers: + # The package has not been installed yet do not attempt upgrade + return False if "swift" in package: codename = get_os_codename_install_source(src) avail_vers = get_os_version_codename_swift(codename) @@ -1863,6 +1889,30 @@ def wrapped_f(*args, **kwargs): return wrap +def ordered(orderme): + """Converts the provided dictionary into a collections.OrderedDict. + + The items in the returned OrderedDict will be inserted based on the + natural sort order of the keys. Nested dictionaries will also be sorted + in order to ensure fully predictable ordering. + + :param orderme: the dict to order + :return: collections.OrderedDict + :raises: ValueError: if `orderme` isn't a dict instance. + """ + if not isinstance(orderme, dict): + raise ValueError('argument must be a dict type') + + result = OrderedDict() + for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): + if isinstance(v, dict): + result[k] = ordered(v) + else: + result[k] = v + + return result + + def config_flags_parser(config_flags): """Parses config flags string into dict. @@ -1874,15 +1924,13 @@ def config_flags_parser(config_flags): example, a string in the format of 'key1=value1, key2=value2' will return a dict of: - {'key1': 'value1', - 'key2': 'value2'}. + {'key1': 'value1', 'key2': 'value2'}. 2. A string in the above format, but supporting a comma-delimited list of values for the same key. For example, a string in the format of 'key1=value1, key2=value3,value4,value5' will return a dict of: - {'key1', 'value1', - 'key2', 'value2,value3,value4'} + {'key1': 'value1', 'key2': 'value2,value3,value4'} 3. A string containing a colon character (:) prior to an equal character (=) will be treated as yaml and parsed as such. This can be @@ -1902,7 +1950,7 @@ def config_flags_parser(config_flags): equals = config_flags.find('=') if colon > 0: if colon < equals or equals < 0: - return yaml.safe_load(config_flags) + return ordered(yaml.safe_load(config_flags)) if config_flags.find('==') >= 0: juju_log("config_flags is not in expected format (key=value)", @@ -1915,7 +1963,7 @@ def config_flags_parser(config_flags): # split on '='. split = config_flags.strip(' =').split('=') limit = len(split) - flags = {} + flags = OrderedDict() for i in range(0, limit - 1): current = split[i] next = split[i + 1] @@ -1994,3 +2042,72 @@ def update_json_file(filename, items): policy.update(items) with open(filename, "w") as fd: fd.write(json.dumps(policy, indent=4)) + + +@cached +def snap_install_requested(): + """ Determine if installing from snaps + + If openstack-origin is of the form snap:channel-series-release + and channel is in SNAPS_CHANNELS return True. + """ + origin = config('openstack-origin') or "" + if not origin.startswith('snap:'): + return False + + _src = origin[5:] + channel, series, release = _src.split('-') + if channel.lower() in SNAP_CHANNELS: + return True + return False + + +def get_snaps_install_info_from_origin(snaps, src, mode='classic'): + """Generate a dictionary of snap install information from origin + + @param snaps: List of snaps + @param src: String of openstack-origin or source of the form + snap:channel-series-track + @param mode: String classic, devmode or jailmode + @returns: Dictionary of snaps with channels and modes + """ + + if not src.startswith('snap:'): + juju_log("Snap source is not a snap origin", 'WARN') + return {} + + _src = src[5:] + _channel, _series, _release = _src.split('-') + channel = '--channel={}/{}'.format(_release, _channel) + + return {snap: {'channel': channel, 'mode': mode} + for snap in snaps} + + +def install_os_snaps(snaps, refresh=False): + """Install OpenStack snaps from channel and with mode + + @param snaps: Dictionary of snaps with channels and modes of the form: + {'snap_name': {'channel': 'snap_channel', + 'mode': 'snap_mode'}} + Where channel a snapstore channel and mode is --classic, --devmode or + --jailmode. + @param post_snap_install: Callback function to run after snaps have been + installed + """ + + def _ensure_flag(flag): + if flag.startswith('--'): + return flag + return '--{}'.format(flag) + + if refresh: + for snap in snaps.keys(): + snap_refresh(snap, + _ensure_flag(snaps[snap]['channel']), + _ensure_flag(snaps[snap]['mode'])) + else: + for snap in snaps.keys(): + snap_install(snap, + _ensure_flag(snaps[snap]['channel']), + _ensure_flag(snaps[snap]['mode'])) diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/bcache.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/bcache.py new file mode 100644 index 00000000..605991e1 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/bcache.py @@ -0,0 +1,74 @@ +# Copyright 2017 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import json + +from charmhelpers.core.hookenv import log + +stats_intervals = ['stats_day', 'stats_five_minute', + 'stats_hour', 'stats_total'] + +SYSFS = '/sys' + + +class Bcache(object): + """Bcache behaviour + """ + + def __init__(self, cachepath): + self.cachepath = cachepath + + @classmethod + def fromdevice(cls, devname): + return cls('{}/block/{}/bcache'.format(SYSFS, devname)) + + def __str__(self): + return self.cachepath + + def get_stats(self, interval): + """Get cache stats + """ + intervaldir = 'stats_{}'.format(interval) + path = "{}/{}".format(self.cachepath, intervaldir) + out = dict() + for elem in os.listdir(path): + out[elem] = open('{}/{}'.format(path, elem)).read().strip() + return out + + +def get_bcache_fs(): + """Return all cache sets + """ + cachesetroot = "{}/fs/bcache".format(SYSFS) + try: + dirs = os.listdir(cachesetroot) + except OSError: + log("No bcache fs found") + return [] + cacheset = set([Bcache('{}/{}'.format(cachesetroot, d)) for d in dirs if not d.startswith('register')]) + return cacheset + + +def get_stats_action(cachespec, interval): + """Action for getting bcache statistics for a given cachespec. + Cachespec can either be a device name, eg. 'sdb', which will retrieve + cache stats for the given device, or 'global', which will retrieve stats + for all cachesets + """ + if cachespec == 'global': + caches = get_bcache_fs() + else: + caches = [Bcache.fromdevice(cachespec)] + res = dict((c.cachepath, c.get_stats(interval)) for c in caches) + return json.dumps(res, indent=4, separators=(',', ': ')) diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1f0540a1..e5a01b1b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1372,7 +1372,7 @@ def __call__(self): return {} conf = config_flags_parser(conf) - if type(conf) != dict: + if not isinstance(conf, dict): log("Provided config-flags is not a dictionary - ignoring", level=WARNING) return {} diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index e44e22bf..12f37b28 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -43,6 +43,7 @@ WARNING = "WARNING" INFO = "INFO" DEBUG = "DEBUG" +TRACE = "TRACE" MARKER = object() cache = {} @@ -202,6 +203,27 @@ def service_name(): return local_unit().split('/')[0] +def principal_unit(): + """Returns the principal unit of this unit, otherwise None""" + # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT + principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) + # If it's empty, then this unit is the principal + if principal_unit == '': + return os.environ['JUJU_UNIT_NAME'] + elif principal_unit is not None: + return principal_unit + # For Juju 2.1 and below, let's try work out the principle unit by + # the various charms' metadata.yaml. + for reltype in relation_types(): + for rid in relation_ids(reltype): + for unit in related_units(rid): + md = _metadata_unit(unit) + subordinate = md.pop('subordinate', None) + if not subordinate: + return unit + return None + + @cached def remote_service_name(relid=None): """The remote service name for a given relation-id (or the current relation)""" @@ -478,6 +500,21 @@ def metadata(): return yaml.safe_load(md) +def _metadata_unit(unit): + """Given the name of a unit (e.g. apache2/0), get the unit charm's + metadata.yaml. Very similar to metadata() but allows us to inspect + other units. Unit needs to be co-located, such as a subordinate or + principal/primary. + + :returns: metadata.yaml as a python object. + + """ + basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) + unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) + with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + return yaml.safe_load(md) + + @cached def relation_types(): """Get a list of relation types supported by this charm""" @@ -753,6 +790,9 @@ def wrapper(decorated): def charm_dir(): """Return the root directory of the current charm""" + d = os.environ.get('JUJU_CHARM_DIR') + if d is not None: + return d return os.environ.get('CHARM_DIR') diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index b0043cbe..5656e2f5 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log +from .hookenv import log, DEBUG from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -487,13 +487,37 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a byte string.""" - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - target.write(content) + # lets see if we can grab the file and compare the context, to avoid doing + # a write. + existing_content = None + existing_uid, existing_gid = None, None + try: + with open(path, 'rb') as target: + existing_content = target.read() + stat = os.stat(path) + existing_uid, existing_gid = stat.st_uid, stat.st_gid + except: + pass + if content != existing_content: + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), + level=DEBUG) + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + return + # the contents were the same, but we might still need to change the + # ownership. + if existing_uid != uid: + log("Changing uid on already existing content: {} -> {}" + .format(existing_uid, uid), level=DEBUG) + os.chown(path, uid, -1) + if existing_gid != gid: + log("Changing gid on already existing content: {} -> {}" + .format(existing_gid, gid), level=DEBUG) + os.chown(path, -1, gid) def fstab_remove(mp): diff --git a/ceph-osd/hooks/charmhelpers/fetch/snap.py b/ceph-osd/hooks/charmhelpers/fetch/snap.py index 23c707b0..112a54c3 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/snap.py +++ b/ceph-osd/hooks/charmhelpers/fetch/snap.py @@ -18,15 +18,23 @@ https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html """ import subprocess -from os import environ +import os from time import sleep from charmhelpers.core.hookenv import log __author__ = 'Joseph Borg ' -SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved). +# The return code for "couldn't acquire lock" in Snap +# (hopefully this will be improved). +SNAP_NO_LOCK = 1 SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. +SNAP_CHANNELS = [ + 'edge', + 'beta', + 'candidate', + 'stable', +] class CouldNotAcquireLockException(Exception): @@ -47,13 +55,17 @@ def _snap_exec(commands): while return_code is None or return_code == SNAP_NO_LOCK: try: - return_code = subprocess.check_call(['snap'] + commands, env=environ) + return_code = subprocess.check_call(['snap'] + commands, + env=os.environ) except subprocess.CalledProcessError as e: retry_count += + 1 if retry_count > SNAP_NO_LOCK_RETRY_COUNT: - raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT) + raise CouldNotAcquireLockException( + 'Could not aquire lock after {} attempts' + .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode - log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN') + log('Snap failed to acquire lock, trying again in {} seconds.' + .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN')) sleep(SNAP_NO_LOCK_RETRY_DELAY) return return_code diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 57b5fb61..40e1cb5b 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -27,6 +27,7 @@ from charmhelpers.core.hookenv import ( log, DEBUG, + WARNING, ) from charmhelpers.fetch import SourceConfigError, GPGKeyError @@ -139,7 +140,7 @@ 'xenial-updates/ocata': 'xenial-updates/ocata', 'ocata/proposed': 'xenial-proposed/ocata', 'xenial-ocata/proposed': 'xenial-proposed/ocata', - 'xenial-ocata/newton': 'xenial-proposed/ocata', + 'xenial-proposed/ocata': 'xenial-proposed/ocata', # Pike 'pike': 'xenial-updates/pike', 'xenial-pike': 'xenial-updates/pike', @@ -147,7 +148,7 @@ 'xenial-updates/pike': 'xenial-updates/pike', 'pike/proposed': 'xenial-proposed/pike', 'xenial-pike/proposed': 'xenial-proposed/pike', - 'xenial-pike/newton': 'xenial-proposed/pike', + 'xenial-proposed/pike': 'xenial-proposed/pike', # Queens 'queens': 'xenial-updates/queens', 'xenial-queens': 'xenial-updates/queens', @@ -155,13 +156,13 @@ 'xenial-updates/queens': 'xenial-updates/queens', 'queens/proposed': 'xenial-proposed/queens', 'xenial-queens/proposed': 'xenial-proposed/queens', - 'xenial-queens/newton': 'xenial-proposed/queens', + 'xenial-proposed/queens': 'xenial-proposed/queens', } APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. -CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times. +CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times. def filter_installed_packages(packages): @@ -261,34 +262,47 @@ def apt_unhold(packages, fatal=False): return apt_mark(packages, 'unhold', fatal=fatal) -def import_key(keyid): - """Import a key in either ASCII Armor or Radix64 format. +def import_key(key): + """Import an ASCII Armor key. - `keyid` is either the keyid to fetch from a PGP server, or - the key in ASCII armor foramt. + /!\ A Radix64 format keyid is also supported for backwards + compatibility, but should never be used; the key retrieval + mechanism is insecure and subject to man-in-the-middle attacks + voiding all signature checks using that key. - :param keyid: String of key (or key id). + :param keyid: The key in ASCII armor format, + including BEGIN and END markers. :raises: GPGKeyError if the key could not be imported """ - key = keyid.strip() - if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and - key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): + key = key.strip() + if '-' in key or '\n' in key: + # Send everything not obviously a keyid to GPG to import, as + # we trust its validation better than our own. eg. handling + # comments before the key. log("PGP key found (looks like ASCII Armor format)", level=DEBUG) - log("Importing ASCII Armor PGP key", level=DEBUG) - with NamedTemporaryFile() as keyfile: - with open(keyfile.name, 'w') as fd: - fd.write(key) - fd.write("\n") - cmd = ['apt-key', 'add', keyfile.name] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error = "Error importing PGP key '{}'".format(key) - log(error) - raise GPGKeyError(error) + if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and + '-----END PGP PUBLIC KEY BLOCK-----' in key): + log("Importing ASCII Armor PGP key", level=DEBUG) + with NamedTemporaryFile() as keyfile: + with open(keyfile.name, 'w') as fd: + fd.write(key) + fd.write("\n") + cmd = ['apt-key', 'add', keyfile.name] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error = "Error importing PGP key '{}'".format(key) + log(error) + raise GPGKeyError(error) + else: + raise GPGKeyError("ASCII armor markers missing from GPG key") else: - log("PGP key found (looks like Radix64 format)", level=DEBUG) - log("Importing PGP key from keyserver", level=DEBUG) + # We should only send things obviously not a keyid offsite + # via this unsecured protocol, as it may be a secret or part + # of one. + log("PGP key found (looks like Radix64 format)", level=WARNING) + log("INSECURLY importing PGP key from keyserver; " + "full key not provided.", level=WARNING) cmd = ['apt-key', 'adv', '--keyserver', 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] try: @@ -364,6 +378,7 @@ def add_source(source, key=None, fail_invalid=False): (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), (r"^cloud:(.*)$", _add_cloud_pocket), + (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), ]) if source is None: source = '' diff --git a/ceph-osd/tests/charmhelpers/core/hookenv.py b/ceph-osd/tests/charmhelpers/core/hookenv.py index e44e22bf..12f37b28 100644 --- a/ceph-osd/tests/charmhelpers/core/hookenv.py +++ b/ceph-osd/tests/charmhelpers/core/hookenv.py @@ -43,6 +43,7 @@ WARNING = "WARNING" INFO = "INFO" DEBUG = "DEBUG" +TRACE = "TRACE" MARKER = object() cache = {} @@ -202,6 +203,27 @@ def service_name(): return local_unit().split('/')[0] +def principal_unit(): + """Returns the principal unit of this unit, otherwise None""" + # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT + principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) + # If it's empty, then this unit is the principal + if principal_unit == '': + return os.environ['JUJU_UNIT_NAME'] + elif principal_unit is not None: + return principal_unit + # For Juju 2.1 and below, let's try work out the principle unit by + # the various charms' metadata.yaml. + for reltype in relation_types(): + for rid in relation_ids(reltype): + for unit in related_units(rid): + md = _metadata_unit(unit) + subordinate = md.pop('subordinate', None) + if not subordinate: + return unit + return None + + @cached def remote_service_name(relid=None): """The remote service name for a given relation-id (or the current relation)""" @@ -478,6 +500,21 @@ def metadata(): return yaml.safe_load(md) +def _metadata_unit(unit): + """Given the name of a unit (e.g. apache2/0), get the unit charm's + metadata.yaml. Very similar to metadata() but allows us to inspect + other units. Unit needs to be co-located, such as a subordinate or + principal/primary. + + :returns: metadata.yaml as a python object. + + """ + basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) + unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) + with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + return yaml.safe_load(md) + + @cached def relation_types(): """Get a list of relation types supported by this charm""" @@ -753,6 +790,9 @@ def wrapper(decorated): def charm_dir(): """Return the root directory of the current charm""" + d = os.environ.get('JUJU_CHARM_DIR') + if d is not None: + return d return os.environ.get('CHARM_DIR') diff --git a/ceph-osd/tests/charmhelpers/core/host.py b/ceph-osd/tests/charmhelpers/core/host.py index b0043cbe..5656e2f5 100644 --- a/ceph-osd/tests/charmhelpers/core/host.py +++ b/ceph-osd/tests/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log +from .hookenv import log, DEBUG from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -487,13 +487,37 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a byte string.""" - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - target.write(content) + # lets see if we can grab the file and compare the context, to avoid doing + # a write. + existing_content = None + existing_uid, existing_gid = None, None + try: + with open(path, 'rb') as target: + existing_content = target.read() + stat = os.stat(path) + existing_uid, existing_gid = stat.st_uid, stat.st_gid + except: + pass + if content != existing_content: + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), + level=DEBUG) + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + return + # the contents were the same, but we might still need to change the + # ownership. + if existing_uid != uid: + log("Changing uid on already existing content: {} -> {}" + .format(existing_uid, uid), level=DEBUG) + os.chown(path, uid, -1) + if existing_gid != gid: + log("Changing gid on already existing content: {} -> {}" + .format(existing_gid, gid), level=DEBUG) + os.chown(path, -1, gid) def fstab_remove(mp): From e73c2ba7a2fd50d15072bdd982e242c0489e6949 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 24 Aug 2017 16:48:04 -0500 Subject: [PATCH 1360/2699] Sync charm-helpers Change-Id: I5ef76ecf8171bc96b1dfdc33b6df90fc130b3a76 --- ceph-proxy/hooks/charmhelpers/__init__.py | 61 +++ .../charmhelpers/contrib/charmsupport/nrpe.py | 9 +- .../contrib/hardening/apache/checks/config.py | 5 +- .../hooks/charmhelpers/contrib/network/ip.py | 6 +- .../charmhelpers/contrib/openstack/utils.py | 386 +++++++++++------- .../contrib/storage/linux/ceph.py | 44 +- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 40 ++ ceph-proxy/hooks/charmhelpers/core/host.py | 38 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + .../hooks/charmhelpers/fetch/__init__.py | 26 +- ceph-proxy/hooks/charmhelpers/fetch/centos.py | 2 +- ceph-proxy/hooks/charmhelpers/fetch/snap.py | 22 +- ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py | 339 ++++++++++++--- ceph-proxy/tests/charmhelpers/__init__.py | 61 +++ .../contrib/openstack/amulet/deployment.py | 6 +- .../contrib/openstack/amulet/utils.py | 103 +++-- ceph-proxy/tests/charmhelpers/core/hookenv.py | 40 ++ ceph-proxy/tests/charmhelpers/core/host.py | 38 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + 19 files changed, 955 insertions(+), 273 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/__init__.py b/ceph-proxy/hooks/charmhelpers/__init__.py index 48867880..e7aa4715 100644 --- a/ceph-proxy/hooks/charmhelpers/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/__init__.py @@ -14,6 +14,11 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. +from __future__ import print_function +from __future__ import absolute_import + +import functools +import inspect import subprocess import sys @@ -34,3 +39,59 @@ else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) import yaml # flake8: noqa + + +# Holds a list of mapping of mangled function names that have been deprecated +# using the @deprecate decorator below. This is so that the warning is only +# printed once for each usage of the function. +__deprecated_functions = {} + + +def deprecate(warning, date=None, log=None): + """Add a deprecation warning the first time the function is used. + The date, which is a string in semi-ISO8660 format indicate the year-month + that the function is officially going to be removed. + + usage: + + @deprecate('use core/fetch/add_source() instead', '2017-04') + def contributed_add_source_thing(...): + ... + + And it then prints to the log ONCE that the function is deprecated. + The reason for passing the logging function (log) is so that hookenv.log + can be used for a charm if needed. + + :param warning: String to indicat where it has moved ot. + :param date: optional sting, in YYYY-MM format to indicate when the + function will definitely (probably) be removed. + :param log: The log function to call to log. If not, logs to stdout + """ + def wrap(f): + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + try: + module = inspect.getmodule(f) + file = inspect.getsourcefile(f) + lines = inspect.getsourcelines(f) + f_name = "{}-{}-{}..{}-{}".format( + module.__name__, file, lines[0], lines[-1], f.__name__) + except (IOError, TypeError): + # assume it was local, so just use the name of the function + f_name = f.__name__ + if f_name not in __deprecated_functions: + __deprecated_functions[f_name] = True + s = "DEPRECATION WARNING: Function {} is being removed".format( + f.__name__) + if date: + s = "{} on/around {}".format(s, date) + if warning: + s = "{} : {}".format(s, warning) + if log: + log(s) + else: + print(s) + return f(*args, **kwargs) + return wrapped_f + return wrap diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 8240249e..80d574dc 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -125,7 +125,7 @@ class CheckException(Exception): class Check(object): - shortname_re = '[A-Za-z0-9-_]+$' + shortname_re = '[A-Za-z0-9-_.]+$' service_template = (""" #--------------------------------------------------- # This file is Juju managed @@ -193,6 +193,13 @@ def write(self, nagios_context, hostname, nagios_servicegroups): nrpe_check_file = self._get_check_filename() with open(nrpe_check_file, 'w') as nrpe_check_config: nrpe_check_config.write("# check {}\n".format(self.shortname)) + if nagios_servicegroups: + nrpe_check_config.write( + "# The following header was added automatically by juju\n") + nrpe_check_config.write( + "# Modifying it will affect nagios monitoring and alerting\n") + nrpe_check_config.write( + "# servicegroups: {}\n".format(nagios_servicegroups)) nrpe_check_config.write("command[{}]={}\n".format( self.command, self.check_cmd)) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index b18b263d..06482aac 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -46,8 +46,9 @@ def get_audits(): context = ApacheConfContext() settings = utils.get_settings('apache') audits = [ - FilePermissionAudit(paths='/etc/apache2/apache2.conf', user='root', - group='root', mode=0o0640), + FilePermissionAudit(paths=os.path.join( + settings['common']['apache_dir'], 'apache2.conf'), + user='root', group='root', mode=0o0640), TemplatedFile(os.path.join(settings['common']['apache_dir'], 'mods-available/alias.conf'), diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index fc3f5e3e..d7e6debf 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -243,11 +243,13 @@ def is_ipv6_disabled(): try: result = subprocess.check_output( ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], - stderr=subprocess.STDOUT) - return "net.ipv6.conf.all.disable_ipv6 = 1" in result + stderr=subprocess.STDOUT, + universal_newlines=True) except subprocess.CalledProcessError: return True + return "net.ipv6.conf.all.disable_ipv6 = 1" in result + def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index e13450c1..837a1674 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -26,11 +26,12 @@ import shutil import six -import tempfile import traceback import uuid import yaml +from charmhelpers import deprecate + from charmhelpers.contrib.network import ip from charmhelpers.core import unitdata @@ -41,7 +42,6 @@ config, log as juju_log, charm_dir, - DEBUG, INFO, ERROR, related_units, @@ -51,6 +51,7 @@ status_set, hook_name, application_version_set, + cached, ) from charmhelpers.core.strutils import BasicStringComparator @@ -82,11 +83,21 @@ restart_on_change_helper, ) from charmhelpers.fetch import ( - apt_install, apt_cache, install_remote, + import_key as fetch_import_key, + add_source as fetch_add_source, + SourceConfigError, + GPGKeyError, get_upstream_version ) + +from charmhelpers.fetch.snap import ( + snap_install, + snap_refresh, + SNAP_CHANNELS, +) + from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.openstack.exceptions import OSContextError @@ -111,6 +122,8 @@ 'newton', 'ocata', 'pike', + 'queens', + 'rocky', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -126,6 +139,7 @@ ('xenial', 'mitaka'), ('yakkety', 'newton'), ('zesty', 'ocata'), + ('artful', 'pike'), ]) @@ -142,6 +156,7 @@ ('2016.1', 'mitaka'), ('2016.2', 'newton'), ('2017.1', 'ocata'), + ('2017.2', 'pike'), ]) # The ugly duckling - must list releases oldest to newest @@ -170,6 +185,8 @@ ['2.8.0', '2.9.0', '2.10.0']), ('ocata', ['2.11.0', '2.12.0', '2.13.0']), + ('pike', + ['2.13.0', '2.15.0']), ]) # >= Liberty version->codename mapping @@ -179,54 +196,81 @@ ('13', 'mitaka'), ('14', 'newton'), ('15', 'ocata'), + ('16', 'pike'), + ('17', 'queens'), + ('18', 'rocky'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), ('8', 'mitaka'), ('9', 'newton'), ('10', 'ocata'), + ('11', 'pike'), + ('12', 'queens'), + ('13', 'rocky'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), ('8', 'mitaka'), ('9', 'newton'), ('10', 'ocata'), + ('11', 'pike'), + ('12', 'queens'), + ('13', 'rocky'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), ('9', 'mitaka'), ('10', 'newton'), ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), ('9', 'mitaka'), ('10', 'newton'), ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), ('6', 'mitaka'), ('7', 'newton'), ('8', 'ocata'), + ('9', 'pike'), + ('10', 'queens'), + ('11', 'rocky'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), ('6', 'mitaka'), ('7', 'newton'), ('8', 'ocata'), + ('9', 'pike'), + ('10', 'queens'), + ('11', 'rocky'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), ('12', 'mitaka'), ('13', 'newton'), ('14', 'ocata'), + ('15', 'pike'), + ('16', 'queens'), + ('17', 'rocky'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), ('9', 'mitaka'), ('10', 'newton'), ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), ]), } @@ -291,8 +335,10 @@ def get_os_codename_install_source(src): return ca_rel # Best guess match based on deb string provided - if src.startswith('deb') or src.startswith('ppa'): - for k, v in six.iteritems(OPENSTACK_CODENAMES): + if (src.startswith('deb') or + src.startswith('ppa') or + src.startswith('snap')): + for v in OPENSTACK_CODENAMES.values(): if v in src: return v @@ -361,6 +407,19 @@ def get_swift_codename(version): def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' + + if snap_install_requested(): + cmd = ['snap', 'list', package] + try: + out = subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + return None + lines = out.split('\n') + for line in lines: + if package in line: + # Second item in list is Version + return line.split()[1] + import apt_pkg as apt cache = apt_cache() @@ -436,13 +495,14 @@ def get_os_version_package(pkg, fatal=True): # error_out(e) -os_rel = None +# Module local cache variable for the os_release. +_os_rel = None def reset_os_release(): '''Unset the cached os_release version''' - global os_rel - os_rel = None + global _os_rel + _os_rel = None def os_release(package, base='essex', reset_cache=False): @@ -456,144 +516,77 @@ def os_release(package, base='essex', reset_cache=False): the installation source, the earliest release supported by the charm should be returned. ''' - global os_rel + global _os_rel if reset_cache: reset_os_release() - if os_rel: - return os_rel - os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or - get_os_codename_package(package, fatal=False) or - get_os_codename_install_source(config('openstack-origin')) or - base) - return os_rel + if _os_rel: + return _os_rel + _os_rel = ( + git_os_codename_install_source(config('openstack-origin-git')) or + get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config('openstack-origin')) or + base) + return _os_rel +@deprecate("moved to charmhelpers.fetch.import_key()", "2017-07", log=juju_log) def import_key(keyid): - key = keyid.strip() - if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and - key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): - juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG) - juju_log("Importing ASCII Armor PGP key", level=DEBUG) - with tempfile.NamedTemporaryFile() as keyfile: - with open(keyfile.name, 'w') as fd: - fd.write(key) - fd.write("\n") - - cmd = ['apt-key', 'add', keyfile.name] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error_out("Error importing PGP key '%s'" % key) - else: - juju_log("PGP key found (looks like Radix64 format)", level=DEBUG) - juju_log("Importing PGP key from keyserver", level=DEBUG) - cmd = ['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error_out("Error importing PGP key '%s'" % key) - - -def get_source_and_pgp_key(input): - """Look for a pgp key ID or ascii-armor key in the given input.""" - index = input.strip() - index = input.rfind('|') - if index < 0: - return input, None - - key = input[index + 1:].strip('|') - source = input[:index] - return source, key - - -def configure_installation_source(rel): - '''Configure apt installation source.''' - if rel == 'distro': - return - elif rel == 'distro-proposed': - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: - f.write(DISTRO_PROPOSED % ubuntu_rel) - elif rel[:4] == "ppa:": - src, key = get_source_and_pgp_key(rel) - if key: - import_key(key) - - subprocess.check_call(["add-apt-repository", "-y", src]) - elif rel[:3] == "deb": - src, key = get_source_and_pgp_key(rel) - if key: - import_key(key) - - with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: - f.write(src) - elif rel[:6] == 'cloud:': - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - rel = rel.split(':')[1] - u_rel = rel.split('-')[0] - ca_rel = rel.split('-')[1] - - if u_rel != ubuntu_rel: - e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ - 'version (%s)' % (ca_rel, ubuntu_rel) - error_out(e) + """Import a key, either ASCII armored, or a GPG key id. - if 'staging' in ca_rel: - # staging is just a regular PPA. - os_rel = ca_rel.split('/')[0] - ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel - cmd = 'add-apt-repository -y %s' % ppa - subprocess.check_call(cmd.split(' ')) - return - - # map charm config options to actual archive pockets. - pockets = { - 'folsom': 'precise-updates/folsom', - 'folsom/updates': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'grizzly': 'precise-updates/grizzly', - 'grizzly/updates': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly', - 'havana': 'precise-updates/havana', - 'havana/updates': 'precise-updates/havana', - 'havana/proposed': 'precise-proposed/havana', - 'icehouse': 'precise-updates/icehouse', - 'icehouse/updates': 'precise-updates/icehouse', - 'icehouse/proposed': 'precise-proposed/icehouse', - 'juno': 'trusty-updates/juno', - 'juno/updates': 'trusty-updates/juno', - 'juno/proposed': 'trusty-proposed/juno', - 'kilo': 'trusty-updates/kilo', - 'kilo/updates': 'trusty-updates/kilo', - 'kilo/proposed': 'trusty-proposed/kilo', - 'liberty': 'trusty-updates/liberty', - 'liberty/updates': 'trusty-updates/liberty', - 'liberty/proposed': 'trusty-proposed/liberty', - 'mitaka': 'trusty-updates/mitaka', - 'mitaka/updates': 'trusty-updates/mitaka', - 'mitaka/proposed': 'trusty-proposed/mitaka', - 'newton': 'xenial-updates/newton', - 'newton/updates': 'xenial-updates/newton', - 'newton/proposed': 'xenial-proposed/newton', - 'ocata': 'xenial-updates/ocata', - 'ocata/updates': 'xenial-updates/ocata', - 'ocata/proposed': 'xenial-proposed/ocata', - } + @param keyid: the key in ASCII armor format, or a GPG key id. + @raises SystemExit() via sys.exit() on failure. + """ + try: + return fetch_import_key(keyid) + except GPGKeyError as e: + error_out("Could not import key: {}".format(str(e))) - try: - pocket = pockets[ca_rel] - except KeyError: - e = 'Invalid Cloud Archive release specified: %s' % rel - error_out(e) - src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) - apt_install('ubuntu-cloud-keyring', fatal=True) +def get_source_and_pgp_key(source_and_key): + """Look for a pgp key ID or ascii-armor key in the given input. - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: - f.write(src) - else: - error_out("Invalid openstack-release specified: %s" % rel) + :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is + optional. + :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id + if there was no '|' in the source_and_key string. + """ + try: + source, key = source_and_key.split('|', 2) + return source, key or None + except ValueError: + return source_and_key, None + + +@deprecate("use charmhelpers.fetch.add_source() instead.", + "2017-07", log=juju_log) +def configure_installation_source(source_plus_key): + """Configure an installation source. + + The functionality is provided by charmhelpers.fetch.add_source() + The difference between the two functions is that add_source() signature + requires the key to be passed directly, whereas this function passes an + optional key by appending '|' to the end of the source specificiation + 'source'. + + Another difference from add_source() is that the function calls sys.exit(1) + if the configuration fails, whereas add_source() raises + SourceConfigurationError(). Another difference, is that add_source() + silently fails (with a juju_log command) if there is no matching source to + configure, whereas this function fails with a sys.exit(1) + + :param source: String_plus_key -- see above for details. + + Note that the behaviour on error is to log the error to the juju log and + then call sys.exit(1). + """ + # extract the key if there is one, denoted by a '|' in the rel + source, key = get_source_and_pgp_key(source_plus_key) + + # handle the ordinary sources via add_source + try: + fetch_add_source(source, key, fail_invalid=True) + except SourceConfigError as se: + error_out(str(se)) def config_value_changed(option): @@ -638,12 +631,14 @@ def openstack_upgrade_available(package): :returns: bool: : Returns True if configured installation source offers a newer version of package. - """ import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) + if not cur_vers: + # The package has not been installed yet do not attempt upgrade + return False if "swift" in package: codename = get_os_codename_install_source(src) avail_vers = get_os_version_codename_swift(codename) @@ -1894,6 +1889,30 @@ def wrapped_f(*args, **kwargs): return wrap +def ordered(orderme): + """Converts the provided dictionary into a collections.OrderedDict. + + The items in the returned OrderedDict will be inserted based on the + natural sort order of the keys. Nested dictionaries will also be sorted + in order to ensure fully predictable ordering. + + :param orderme: the dict to order + :return: collections.OrderedDict + :raises: ValueError: if `orderme` isn't a dict instance. + """ + if not isinstance(orderme, dict): + raise ValueError('argument must be a dict type') + + result = OrderedDict() + for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): + if isinstance(v, dict): + result[k] = ordered(v) + else: + result[k] = v + + return result + + def config_flags_parser(config_flags): """Parses config flags string into dict. @@ -1905,15 +1924,13 @@ def config_flags_parser(config_flags): example, a string in the format of 'key1=value1, key2=value2' will return a dict of: - {'key1': 'value1', - 'key2': 'value2'}. + {'key1': 'value1', 'key2': 'value2'}. 2. A string in the above format, but supporting a comma-delimited list of values for the same key. For example, a string in the format of 'key1=value1, key2=value3,value4,value5' will return a dict of: - {'key1', 'value1', - 'key2', 'value2,value3,value4'} + {'key1': 'value1', 'key2': 'value2,value3,value4'} 3. A string containing a colon character (:) prior to an equal character (=) will be treated as yaml and parsed as such. This can be @@ -1933,7 +1950,7 @@ def config_flags_parser(config_flags): equals = config_flags.find('=') if colon > 0: if colon < equals or equals < 0: - return yaml.safe_load(config_flags) + return ordered(yaml.safe_load(config_flags)) if config_flags.find('==') >= 0: juju_log("config_flags is not in expected format (key=value)", @@ -1946,7 +1963,7 @@ def config_flags_parser(config_flags): # split on '='. split = config_flags.strip(' =').split('=') limit = len(split) - flags = {} + flags = OrderedDict() for i in range(0, limit - 1): current = split[i] next = split[i + 1] @@ -2013,3 +2030,84 @@ def token_cache_pkgs(source=None, release=None): if enable_memcache(source=source, release=release): packages.extend(['memcached', 'python-memcache']) return packages + + +def update_json_file(filename, items): + """Updates the json `filename` with a given dict. + :param filename: json filename (i.e.: /etc/glance/policy.json) + :param items: dict of items to update + """ + with open(filename) as fd: + policy = json.load(fd) + policy.update(items) + with open(filename, "w") as fd: + fd.write(json.dumps(policy, indent=4)) + + +@cached +def snap_install_requested(): + """ Determine if installing from snaps + + If openstack-origin is of the form snap:channel-series-release + and channel is in SNAPS_CHANNELS return True. + """ + origin = config('openstack-origin') or "" + if not origin.startswith('snap:'): + return False + + _src = origin[5:] + channel, series, release = _src.split('-') + if channel.lower() in SNAP_CHANNELS: + return True + return False + + +def get_snaps_install_info_from_origin(snaps, src, mode='classic'): + """Generate a dictionary of snap install information from origin + + @param snaps: List of snaps + @param src: String of openstack-origin or source of the form + snap:channel-series-track + @param mode: String classic, devmode or jailmode + @returns: Dictionary of snaps with channels and modes + """ + + if not src.startswith('snap:'): + juju_log("Snap source is not a snap origin", 'WARN') + return {} + + _src = src[5:] + _channel, _series, _release = _src.split('-') + channel = '--channel={}/{}'.format(_release, _channel) + + return {snap: {'channel': channel, 'mode': mode} + for snap in snaps} + + +def install_os_snaps(snaps, refresh=False): + """Install OpenStack snaps from channel and with mode + + @param snaps: Dictionary of snaps with channels and modes of the form: + {'snap_name': {'channel': 'snap_channel', + 'mode': 'snap_mode'}} + Where channel a snapstore channel and mode is --classic, --devmode or + --jailmode. + @param post_snap_install: Callback function to run after snaps have been + installed + """ + + def _ensure_flag(flag): + if flag.startswith('--'): + return flag + return '--{}'.format(flag) + + if refresh: + for snap in snaps.keys(): + snap_refresh(snap, + _ensure_flag(snaps[snap]['channel']), + _ensure_flag(snaps[snap]['mode'])) + else: + for snap in snaps.keys(): + snap_install(snap, + _ensure_flag(snaps[snap]['channel']), + _ensure_flag(snaps[snap]['mode'])) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 9417d684..e5a01b1b 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -63,6 +63,7 @@ from charmhelpers.fetch import ( apt_install, ) +from charmhelpers.core.unitdata import kv from charmhelpers.core.kernel import modprobe from charmhelpers.contrib.openstack.utils import config_flags_parser @@ -1314,6 +1315,47 @@ def send_request_if_needed(request, relation='ceph'): relation_set(relation_id=rid, broker_req=request.request) +def is_broker_action_done(action, rid=None, unit=None): + """Check whether broker action has completed yet. + + @param action: name of action to be performed + @returns True if action complete otherwise False + """ + rdata = relation_get(rid, unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + if not broker_rsp: + return False + + rsp = CephBrokerRsp(broker_rsp) + unit_name = local_unit().partition('/')[2] + key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) + kvstore = kv() + val = kvstore.get(key=key) + if val and val == rsp.request_id: + return True + + return False + + +def mark_broker_action_done(action, rid=None, unit=None): + """Mark action as having been completed. + + @param action: name of action to be performed + @returns None + """ + rdata = relation_get(rid, unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + if not broker_rsp: + return + + rsp = CephBrokerRsp(broker_rsp) + unit_name = local_unit().partition('/')[2] + key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) + kvstore = kv() + kvstore.set(key=key, value=rsp.request_id) + kvstore.flush() + + class CephConfContext(object): """Ceph config (ceph.conf) context. @@ -1330,7 +1372,7 @@ def __call__(self): return {} conf = config_flags_parser(conf) - if type(conf) != dict: + if not isinstance(conf, dict): log("Provided config-flags is not a dictionary - ignoring", level=WARNING) return {} diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index e44e22bf..12f37b28 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -43,6 +43,7 @@ WARNING = "WARNING" INFO = "INFO" DEBUG = "DEBUG" +TRACE = "TRACE" MARKER = object() cache = {} @@ -202,6 +203,27 @@ def service_name(): return local_unit().split('/')[0] +def principal_unit(): + """Returns the principal unit of this unit, otherwise None""" + # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT + principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) + # If it's empty, then this unit is the principal + if principal_unit == '': + return os.environ['JUJU_UNIT_NAME'] + elif principal_unit is not None: + return principal_unit + # For Juju 2.1 and below, let's try work out the principle unit by + # the various charms' metadata.yaml. + for reltype in relation_types(): + for rid in relation_ids(reltype): + for unit in related_units(rid): + md = _metadata_unit(unit) + subordinate = md.pop('subordinate', None) + if not subordinate: + return unit + return None + + @cached def remote_service_name(relid=None): """The remote service name for a given relation-id (or the current relation)""" @@ -478,6 +500,21 @@ def metadata(): return yaml.safe_load(md) +def _metadata_unit(unit): + """Given the name of a unit (e.g. apache2/0), get the unit charm's + metadata.yaml. Very similar to metadata() but allows us to inspect + other units. Unit needs to be co-located, such as a subordinate or + principal/primary. + + :returns: metadata.yaml as a python object. + + """ + basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) + unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) + with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + return yaml.safe_load(md) + + @cached def relation_types(): """Get a list of relation types supported by this charm""" @@ -753,6 +790,9 @@ def wrapper(decorated): def charm_dir(): """Return the root directory of the current charm""" + d = os.environ.get('JUJU_CHARM_DIR') + if d is not None: + return d return os.environ.get('CHARM_DIR') diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 88e80a49..5656e2f5 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log +from .hookenv import log, DEBUG from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -191,6 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): + service('disable', service_name) service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( @@ -225,6 +226,7 @@ def service_resume(service_name, init_dir="/etc/init", sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): service('unmask', service_name) + service('enable', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -485,13 +487,37 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a byte string.""" - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - target.write(content) + # lets see if we can grab the file and compare the context, to avoid doing + # a write. + existing_content = None + existing_uid, existing_gid = None, None + try: + with open(path, 'rb') as target: + existing_content = target.read() + stat = os.stat(path) + existing_uid, existing_gid = stat.st_uid, stat.st_gid + except: + pass + if content != existing_content: + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), + level=DEBUG) + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + return + # the contents were the same, but we might still need to change the + # ownership. + if existing_uid != uid: + log("Changing uid on already existing content: {} -> {}" + .format(existing_uid, uid), level=DEBUG) + os.chown(path, uid, -1) + if existing_gid != gid: + log("Changing gid on already existing content: {} -> {}" + .format(existing_gid, gid), level=DEBUG) + os.chown(path, -1, gid) def fstab_remove(mp): diff --git a/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py index 0448288c..d8dc378a 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -19,6 +19,7 @@ 'xenial', 'yakkety', 'zesty', + 'artful', ) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index ec5e0fe9..480a6276 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -48,6 +48,13 @@ class AptLockError(Exception): pass +class GPGKeyError(Exception): + """Exception occurs when a GPG key cannot be fetched or used. The message + indicates what the problem is. + """ + pass + + class BaseFetchHandler(object): """Base class for FetchHandler implementations in fetch plugins""" @@ -77,21 +84,22 @@ def base_url(self, url): fetch = importlib.import_module(module) filter_installed_packages = fetch.filter_installed_packages -install = fetch.install -upgrade = fetch.upgrade -update = fetch.update -purge = fetch.purge +install = fetch.apt_install +upgrade = fetch.apt_upgrade +update = _fetch_update = fetch.apt_update +purge = fetch.apt_purge add_source = fetch.add_source if __platform__ == "ubuntu": apt_cache = fetch.apt_cache - apt_install = fetch.install - apt_update = fetch.update - apt_upgrade = fetch.upgrade - apt_purge = fetch.purge + apt_install = fetch.apt_install + apt_update = fetch.apt_update + apt_upgrade = fetch.apt_upgrade + apt_purge = fetch.apt_purge apt_mark = fetch.apt_mark apt_hold = fetch.apt_hold apt_unhold = fetch.apt_unhold + import_key = fetch.import_key get_upstream_version = fetch.get_upstream_version elif __platform__ == "centos": yum_search = fetch.yum_search @@ -135,7 +143,7 @@ def configure_sources(update=False, for source, key in zip(sources, keys): add_source(source, key) if update: - fetch.update(fatal=True) + _fetch_update(fatal=True) def install_remote(source, *args, **kwargs): diff --git a/ceph-proxy/hooks/charmhelpers/fetch/centos.py b/ceph-proxy/hooks/charmhelpers/fetch/centos.py index 604bbfb5..a91dcff0 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/centos.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/centos.py @@ -132,7 +132,7 @@ def add_source(source, key=None): key_file.write(key) key_file.flush() key_file.seek(0) - subprocess.check_call(['rpm', '--import', key_file]) + subprocess.check_call(['rpm', '--import', key_file.name]) else: subprocess.check_call(['rpm', '--import', key]) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/snap.py b/ceph-proxy/hooks/charmhelpers/fetch/snap.py index 23c707b0..112a54c3 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/snap.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/snap.py @@ -18,15 +18,23 @@ https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html """ import subprocess -from os import environ +import os from time import sleep from charmhelpers.core.hookenv import log __author__ = 'Joseph Borg ' -SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved). +# The return code for "couldn't acquire lock" in Snap +# (hopefully this will be improved). +SNAP_NO_LOCK = 1 SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. +SNAP_CHANNELS = [ + 'edge', + 'beta', + 'candidate', + 'stable', +] class CouldNotAcquireLockException(Exception): @@ -47,13 +55,17 @@ def _snap_exec(commands): while return_code is None or return_code == SNAP_NO_LOCK: try: - return_code = subprocess.check_call(['snap'] + commands, env=environ) + return_code = subprocess.check_call(['snap'] + commands, + env=os.environ) except subprocess.CalledProcessError as e: retry_count += + 1 if retry_count > SNAP_NO_LOCK_RETRY_COUNT: - raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT) + raise CouldNotAcquireLockException( + 'Could not aquire lock after {} attempts' + .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode - log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN') + log('Snap failed to acquire lock, trying again in {} seconds.' + .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN')) sleep(SNAP_NO_LOCK_RETRY_DELAY) return return_code diff --git a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py index 82ac80ff..40e1cb5b 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py @@ -12,29 +12,48 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections import OrderedDict import os +import platform +import re import six import time import subprocess - from tempfile import NamedTemporaryFile + from charmhelpers.core.host import ( lsb_release ) -from charmhelpers.core.hookenv import log -from charmhelpers.fetch import SourceConfigError - +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) +from charmhelpers.fetch import SourceConfigError, GPGKeyError + +PROPOSED_POCKET = ( + "# Proposed\n" + "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe " + "multiverse restricted\n") +PROPOSED_PORTS_POCKET = ( + "# Proposed\n" + "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe " + "multiverse restricted\n") +# Only supports 64bit and ppc64 at the moment. +ARCH_TO_PROPOSED_POCKET = { + 'x86_64': PROPOSED_POCKET, + 'ppc64le': PROPOSED_PORTS_POCKET, + 'aarch64': PROPOSED_PORTS_POCKET, +} +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ - -PROPOSED_POCKET = """# Proposed -deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted -""" - CLOUD_ARCHIVE_POCKETS = { # Folsom 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', 'precise-folsom': 'precise-updates/folsom', 'precise-folsom/updates': 'precise-updates/folsom', 'precise-updates/folsom': 'precise-updates/folsom', @@ -43,6 +62,7 @@ 'precise-proposed/folsom': 'precise-proposed/folsom', # Grizzly 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', 'precise-grizzly': 'precise-updates/grizzly', 'precise-grizzly/updates': 'precise-updates/grizzly', 'precise-updates/grizzly': 'precise-updates/grizzly', @@ -51,6 +71,7 @@ 'precise-proposed/grizzly': 'precise-proposed/grizzly', # Havana 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', 'precise-havana': 'precise-updates/havana', 'precise-havana/updates': 'precise-updates/havana', 'precise-updates/havana': 'precise-updates/havana', @@ -59,6 +80,7 @@ 'precise-proposed/havana': 'precise-proposed/havana', # Icehouse 'icehouse': 'precise-updates/icehouse', + 'icehouse/updates': 'precise-updates/icehouse', 'precise-icehouse': 'precise-updates/icehouse', 'precise-icehouse/updates': 'precise-updates/icehouse', 'precise-updates/icehouse': 'precise-updates/icehouse', @@ -67,6 +89,7 @@ 'precise-proposed/icehouse': 'precise-proposed/icehouse', # Juno 'juno': 'trusty-updates/juno', + 'juno/updates': 'trusty-updates/juno', 'trusty-juno': 'trusty-updates/juno', 'trusty-juno/updates': 'trusty-updates/juno', 'trusty-updates/juno': 'trusty-updates/juno', @@ -75,6 +98,7 @@ 'trusty-proposed/juno': 'trusty-proposed/juno', # Kilo 'kilo': 'trusty-updates/kilo', + 'kilo/updates': 'trusty-updates/kilo', 'trusty-kilo': 'trusty-updates/kilo', 'trusty-kilo/updates': 'trusty-updates/kilo', 'trusty-updates/kilo': 'trusty-updates/kilo', @@ -83,6 +107,7 @@ 'trusty-proposed/kilo': 'trusty-proposed/kilo', # Liberty 'liberty': 'trusty-updates/liberty', + 'liberty/updates': 'trusty-updates/liberty', 'trusty-liberty': 'trusty-updates/liberty', 'trusty-liberty/updates': 'trusty-updates/liberty', 'trusty-updates/liberty': 'trusty-updates/liberty', @@ -91,6 +116,7 @@ 'trusty-proposed/liberty': 'trusty-proposed/liberty', # Mitaka 'mitaka': 'trusty-updates/mitaka', + 'mitaka/updates': 'trusty-updates/mitaka', 'trusty-mitaka': 'trusty-updates/mitaka', 'trusty-mitaka/updates': 'trusty-updates/mitaka', 'trusty-updates/mitaka': 'trusty-updates/mitaka', @@ -99,6 +125,7 @@ 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', # Newton 'newton': 'xenial-updates/newton', + 'newton/updates': 'xenial-updates/newton', 'xenial-newton': 'xenial-updates/newton', 'xenial-newton/updates': 'xenial-updates/newton', 'xenial-updates/newton': 'xenial-updates/newton', @@ -107,17 +134,35 @@ 'xenial-proposed/newton': 'xenial-proposed/newton', # Ocata 'ocata': 'xenial-updates/ocata', + 'ocata/updates': 'xenial-updates/ocata', 'xenial-ocata': 'xenial-updates/ocata', 'xenial-ocata/updates': 'xenial-updates/ocata', 'xenial-updates/ocata': 'xenial-updates/ocata', 'ocata/proposed': 'xenial-proposed/ocata', 'xenial-ocata/proposed': 'xenial-proposed/ocata', - 'xenial-ocata/newton': 'xenial-proposed/ocata', + 'xenial-proposed/ocata': 'xenial-proposed/ocata', + # Pike + 'pike': 'xenial-updates/pike', + 'xenial-pike': 'xenial-updates/pike', + 'xenial-pike/updates': 'xenial-updates/pike', + 'xenial-updates/pike': 'xenial-updates/pike', + 'pike/proposed': 'xenial-proposed/pike', + 'xenial-pike/proposed': 'xenial-proposed/pike', + 'xenial-proposed/pike': 'xenial-proposed/pike', + # Queens + 'queens': 'xenial-updates/queens', + 'xenial-queens': 'xenial-updates/queens', + 'xenial-queens/updates': 'xenial-updates/queens', + 'xenial-updates/queens': 'xenial-updates/queens', + 'queens/proposed': 'xenial-proposed/queens', + 'xenial-queens/proposed': 'xenial-proposed/queens', + 'xenial-proposed/queens': 'xenial-proposed/queens', } + APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. -CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times. +CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times. def filter_installed_packages(packages): @@ -145,7 +190,7 @@ def apt_cache(in_memory=True, progress=None): return apt_pkg.Cache(progress) -def install(packages, options=None, fatal=False): +def apt_install(packages, options=None, fatal=False): """Install one or more packages.""" if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -162,7 +207,7 @@ def install(packages, options=None, fatal=False): _run_apt_command(cmd, fatal) -def upgrade(options=None, fatal=False, dist=False): +def apt_upgrade(options=None, fatal=False, dist=False): """Upgrade all packages.""" if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -177,13 +222,13 @@ def upgrade(options=None, fatal=False, dist=False): _run_apt_command(cmd, fatal) -def update(fatal=False): +def apt_update(fatal=False): """Update local apt cache.""" cmd = ['apt-get', 'update'] _run_apt_command(cmd, fatal) -def purge(packages, fatal=False): +def apt_purge(packages, fatal=False): """Purge one or more packages.""" cmd = ['apt-get', '--assume-yes', 'purge'] if isinstance(packages, six.string_types): @@ -217,7 +262,58 @@ def apt_unhold(packages, fatal=False): return apt_mark(packages, 'unhold', fatal=fatal) -def add_source(source, key=None): +def import_key(key): + """Import an ASCII Armor key. + + /!\ A Radix64 format keyid is also supported for backwards + compatibility, but should never be used; the key retrieval + mechanism is insecure and subject to man-in-the-middle attacks + voiding all signature checks using that key. + + :param keyid: The key in ASCII armor format, + including BEGIN and END markers. + :raises: GPGKeyError if the key could not be imported + """ + key = key.strip() + if '-' in key or '\n' in key: + # Send everything not obviously a keyid to GPG to import, as + # we trust its validation better than our own. eg. handling + # comments before the key. + log("PGP key found (looks like ASCII Armor format)", level=DEBUG) + if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and + '-----END PGP PUBLIC KEY BLOCK-----' in key): + log("Importing ASCII Armor PGP key", level=DEBUG) + with NamedTemporaryFile() as keyfile: + with open(keyfile.name, 'w') as fd: + fd.write(key) + fd.write("\n") + cmd = ['apt-key', 'add', keyfile.name] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error = "Error importing PGP key '{}'".format(key) + log(error) + raise GPGKeyError(error) + else: + raise GPGKeyError("ASCII armor markers missing from GPG key") + else: + # We should only send things obviously not a keyid offsite + # via this unsecured protocol, as it may be a secret or part + # of one. + log("PGP key found (looks like Radix64 format)", level=WARNING) + log("INSECURLY importing PGP key from keyserver; " + "full key not provided.", level=WARNING) + cmd = ['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + error = "Error importing PGP key '{}'".format(key) + log(error) + raise GPGKeyError(error) + + +def add_source(source, key=None, fail_invalid=False): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by @@ -233,6 +329,33 @@ def add_source(source, key=None): such as 'cloud:icehouse' 'distro' may be used as a noop + Full list of source specifications supported by the function are: + + 'distro': A NOP; i.e. it has no effect. + 'proposed': the proposed deb spec [2] is wrtten to + /etc/apt/sources.list/proposed + 'distro-proposed': adds -proposed to the debs [2] + 'ppa:': add-apt-repository --yes + 'deb ': add-apt-repository --yes deb + 'http://....': add-apt-repository --yes http://... + 'cloud-archive:': add-apt-repository -yes cloud-archive: + 'cloud:[-staging]': specify a Cloud Archive pocket with + optional staging version. If staging is used then the staging PPA [2] + with be used. If staging is NOT used then the cloud archive [3] will be + added, and the 'ubuntu-cloud-keyring' package will be added for the + current distro. + + Otherwise the source is not recognised and this is logged to the juju log. + However, no error is raised, unless sys_error_on_exit is True. + + [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main + where {} is replaced with the derived pocket name. + [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \ + main universe multiverse restricted + where {} is replaced with the lsb_release codename (e.g. xenial) + [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu + to /etc/apt/sources.list.d/cloud-archive-list + @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an ASCII format GPG public key including the block headers. A GPG key @@ -240,51 +363,142 @@ def add_source(source, key=None): available to retrieve the actual public key from a public keyserver placing your Juju environment at risk. ppa and cloud archive keys are securely added automtically, so sould not be provided. + + @param fail_invalid: (boolean) if True, then the function raises a + SourceConfigError is there is no matching installation source. + + @raises SourceConfigError() if for cloud:, the is not a + valid pocket in CLOUD_ARCHIVE_POCKETS """ + _mapping = OrderedDict([ + (r"^distro$", lambda: None), # This is a NOP + (r"^(?:proposed|distro-proposed)$", _add_proposed), + (r"^cloud-archive:(.*)$", _add_apt_repository), + (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), + (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), + (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), + (r"^cloud:(.*)$", _add_cloud_pocket), + (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), + ]) if source is None: - log('Source is not present. Skipping') - return - - if (source.startswith('ppa:') or - source.startswith('http') or - source.startswith('deb ') or - source.startswith('cloud-archive:')): - cmd = ['add-apt-repository', '--yes', source] - _run_with_retries(cmd) - elif source.startswith('cloud:'): - install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - if pocket not in CLOUD_ARCHIVE_POCKETS: - raise SourceConfigError( - 'Unsupported cloud: source option %s' % - pocket) - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) - elif source == 'proposed': - release = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: - apt.write(PROPOSED_POCKET.format(release)) - elif source == 'distro': - pass + source = '' + for r, fn in six.iteritems(_mapping): + m = re.match(r, source) + if m: + # call the assoicated function with the captured groups + # raises SourceConfigError on error. + fn(*m.groups()) + if key: + try: + import_key(key) + except GPGKeyError as e: + raise SourceConfigError(str(e)) + break else: - log("Unknown source: {!r}".format(source)) - - if key: - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile('w+') as key_file: - key_file.write(key) - key_file.flush() - key_file.seek(0) - subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) - else: - # Note that hkp: is in no way a secure protocol. Using a - # GPG key id is pointless from a security POV unless you - # absolutely trust your network and DNS. - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) + # nothing matched. log an error and maybe sys.exit + err = "Unknown source: {!r}".format(source) + log(err) + if fail_invalid: + raise SourceConfigError(err) + + +def _add_proposed(): + """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list + + Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for + the deb line. + + For intel architecutres PROPOSED_POCKET is used for the release, but for + other architectures PROPOSED_PORTS_POCKET is used for the release. + """ + release = lsb_release()['DISTRIB_CODENAME'] + arch = platform.machine() + if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): + raise SourceConfigError("Arch {} not supported for (distro-)proposed" + .format(arch)) + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release)) + + +def _add_apt_repository(spec): + """Add the spec using add_apt_repository + + :param spec: the parameter to pass to add_apt_repository + """ + _run_with_retries(['add-apt-repository', '--yes', spec]) + + +def _add_cloud_pocket(pocket): + """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list + + Note that this overwrites the existing file if there is one. + + This function also converts the simple pocket in to the actual pocket using + the CLOUD_ARCHIVE_POCKETS mapping. + + :param pocket: string representing the pocket to add a deb spec for. + :raises: SourceConfigError if the cloud pocket doesn't exist or the + requested release doesn't match the current distro version. + """ + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + + +def _add_cloud_staging(cloud_archive_release, openstack_release): + """Add the cloud staging repository which is in + ppa:ubuntu-cloud-archive/-staging + + This function checks that the cloud_archive_release matches the current + codename for the distro that charm is being installed on. + + :param cloud_archive_release: string, codename for the release. + :param openstack_release: String, codename for the openstack release. + :raises: SourceConfigError if the cloud_archive_release doesn't match the + current version of the os. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release) + cmd = 'add-apt-repository -y {}'.format(ppa) + _run_with_retries(cmd.split(' ')) + + +def _add_cloud_distro_check(cloud_archive_release, openstack_release): + """Add the cloud pocket, but also check the cloud_archive_release against + the current distro, and use the openstack_release as the full lookup. + + This just calls _add_cloud_pocket() with the openstack_release as pocket + to get the correct cloud-archive.list for dpkg to work with. + + :param cloud_archive_release:String, codename for the distro release. + :param openstack_release: String, spec for the release to look up in the + CLOUD_ARCHIVE_POCKETS + :raises: SourceConfigError if this is the wrong distro, or the pocket spec + doesn't exist. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release)) + + +def _verify_is_ubuntu_rel(release, os_release): + """Verify that the release is in the same as the current ubuntu release. + + :param release: String, lowercase for the release. + :param os_release: String, the os_release being asked for + :raises: SourceConfigError if the release is not the same as the ubuntu + release. + """ + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + if release != ubuntu_rel: + raise SourceConfigError( + 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' + 'version ({})'.format(release, os_release, ubuntu_rel)) def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), @@ -300,9 +514,12 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), :param: cmd_env: dict: Environment variables to add to the command run. """ - env = os.environ.copy() + env = None + kwargs = {} if cmd_env: + env = os.environ.copy() env.update(cmd_env) + kwargs['env'] = env if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -314,7 +531,8 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_results = (None,) + retry_exitcodes while result in retry_results: try: - result = subprocess.check_call(cmd, env=env) + # result = subprocess.check_call(cmd, env=env) + result = subprocess.check_call(cmd, **kwargs) except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > max_retries: @@ -327,6 +545,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), def _run_apt_command(cmd, fatal=False): """Run an apt command with optional retries. + :param: cmd: str: The apt command to run. :param: fatal: bool: Whether the command's output should be checked and retried. """ diff --git a/ceph-proxy/tests/charmhelpers/__init__.py b/ceph-proxy/tests/charmhelpers/__init__.py index 48867880..e7aa4715 100644 --- a/ceph-proxy/tests/charmhelpers/__init__.py +++ b/ceph-proxy/tests/charmhelpers/__init__.py @@ -14,6 +14,11 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. +from __future__ import print_function +from __future__ import absolute_import + +import functools +import inspect import subprocess import sys @@ -34,3 +39,59 @@ else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) import yaml # flake8: noqa + + +# Holds a list of mapping of mangled function names that have been deprecated +# using the @deprecate decorator below. This is so that the warning is only +# printed once for each usage of the function. +__deprecated_functions = {} + + +def deprecate(warning, date=None, log=None): + """Add a deprecation warning the first time the function is used. + The date, which is a string in semi-ISO8660 format indicate the year-month + that the function is officially going to be removed. + + usage: + + @deprecate('use core/fetch/add_source() instead', '2017-04') + def contributed_add_source_thing(...): + ... + + And it then prints to the log ONCE that the function is deprecated. + The reason for passing the logging function (log) is so that hookenv.log + can be used for a charm if needed. + + :param warning: String to indicat where it has moved ot. + :param date: optional sting, in YYYY-MM format to indicate when the + function will definitely (probably) be removed. + :param log: The log function to call to log. If not, logs to stdout + """ + def wrap(f): + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + try: + module = inspect.getmodule(f) + file = inspect.getsourcefile(f) + lines = inspect.getsourcelines(f) + f_name = "{}-{}-{}..{}-{}".format( + module.__name__, file, lines[0], lines[-1], f.__name__) + except (IOError, TypeError): + # assume it was local, so just use the name of the function + f_name = f.__name__ + if f_name not in __deprecated_functions: + __deprecated_functions[f_name] = True + s = "DEPRECATION WARNING: Function {} is being removed".format( + f.__name__) + if date: + s = "{} on/around {}".format(s, date) + if warning: + s = "{} : {}".format(s, warning) + if log: + log(s) + else: + print(s) + return f(*args, **kwargs) + return wrapped_f + return wrap diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 5c1ce457..5c041d2c 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -262,7 +262,8 @@ def _get_openstack_release(self): # Must be ordered by OpenStack release (not by Ubuntu release): (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata) = range(9) + self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, + self.xenial_pike, self.artful_pike) = range(11) releases = { ('trusty', None): self.trusty_icehouse, @@ -272,8 +273,10 @@ def _get_openstack_release(self): ('xenial', None): self.xenial_mitaka, ('xenial', 'cloud:xenial-newton'): self.xenial_newton, ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, + ('xenial', 'cloud:xenial-pike'): self.xenial_pike, ('yakkety', None): self.yakkety_newton, ('zesty', None): self.zesty_ocata, + ('artful', None): self.artful_pike, } return releases[(self.series, self.openstack)] @@ -287,6 +290,7 @@ def _get_openstack_release_string(self): ('xenial', 'mitaka'), ('yakkety', 'newton'), ('zesty', 'ocata'), + ('artful', 'pike'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index bcef4cd0..c8edbf65 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -25,9 +25,12 @@ import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client -import keystoneclient.v2_0 as keystone_client -from keystoneclient.auth.identity import v3 as keystone_id_v3 -from keystoneclient import session as keystone_session +from keystoneclient.v2_0 import client as keystone_client +from keystoneauth1.identity import ( + v3, + v2, +) +from keystoneauth1 import session as keystone_session from keystoneclient.v3 import client as keystone_client_v3 from novaclient import exceptions @@ -368,12 +371,20 @@ def authenticate_keystone(self, keystone_ip, username, password, port) if not api_version or api_version == 2: ep = base_ep + "/v2.0" - return keystone_client.Client(username=username, password=password, - tenant_name=project_name, - auth_url=ep) + auth = v2.Password( + username=username, + password=password, + tenant_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + client = keystone_client.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client else: ep = base_ep + "/v3" - auth = keystone_id_v3.Password( + auth = v3.Password( user_domain_name=user_domain_name, username=username, password=password, @@ -382,36 +393,45 @@ def authenticate_keystone(self, keystone_ip, username, password, project_name=project_name, auth_url=ep ) - return keystone_client_v3.Client( - session=keystone_session.Session(auth=auth) - ) + sess = keystone_session.Session(auth=auth) + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, - keystone_ip=None): + keystone_ip=None, user_domain_name=None, + project_domain_name=None, + project_name=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') if not keystone_ip: keystone_ip = keystone_sentry.info['public-address'] - user_domain_name = None - domain_name = None - if api_version == 3: + # To support backward compatibility usage of this function + if not project_name: + project_name = tenant + if api_version == 3 and not user_domain_name: user_domain_name = 'admin_domain' - domain_name = user_domain_name - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant, - api_version=api_version, - user_domain_name=user_domain_name, - domain_name=domain_name, - admin_port=True) + if api_version == 3 and not project_domain_name: + project_domain_name = 'admin_domain' + if api_version == 3 and not project_name: + project_name = 'admin' + + return self.authenticate_keystone( + keystone_ip, user, password, + api_version=api_version, + user_domain_name=user_domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + admin_port=True) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') + interface='publicURL') keystone_ip = urlparse.urlparse(ep).hostname return self.authenticate_keystone(keystone_ip, user, password, @@ -421,22 +441,32 @@ def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', - endpoint_type='adminURL') - return glance_client.Client(ep, token=keystone.auth_token) + interface='adminURL') + if keystone.session: + return glance_client.Client(ep, session=keystone.session) + else: + return glance_client.Client(ep, token=keystone.auth_token) def authenticate_heat_admin(self, keystone): """Authenticates the admin user with heat.""" self.log.debug('Authenticating heat admin...') ep = keystone.service_catalog.url_for(service_type='orchestration', - endpoint_type='publicURL') - return heat_client.Client(endpoint=ep, token=keystone.auth_token) + interface='publicURL') + if keystone.session: + return heat_client.Client(endpoint=ep, session=keystone.session) + else: + return heat_client.Client(endpoint=ep, token=keystone.auth_token) def authenticate_nova_user(self, keystone, user, password, tenant): """Authenticates a regular user with nova-api.""" self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - if novaclient.__version__[0] >= "7": + interface='publicURL') + if keystone.session: + return nova_client.Client(NOVA_CLIENT_VERSION, + session=keystone.session, + auth_url=ep) + elif novaclient.__version__[0] >= "7": return nova_client.Client(NOVA_CLIENT_VERSION, username=user, password=password, project_name=tenant, auth_url=ep) @@ -449,12 +479,15 @@ def authenticate_swift_user(self, keystone, user, password, tenant): """Authenticates a regular user with swift api.""" self.log.debug('Authenticating swift user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') + interface='publicURL') + if keystone.session: + return swiftclient.Connection(session=keystone.session) + else: + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): diff --git a/ceph-proxy/tests/charmhelpers/core/hookenv.py b/ceph-proxy/tests/charmhelpers/core/hookenv.py index e44e22bf..12f37b28 100644 --- a/ceph-proxy/tests/charmhelpers/core/hookenv.py +++ b/ceph-proxy/tests/charmhelpers/core/hookenv.py @@ -43,6 +43,7 @@ WARNING = "WARNING" INFO = "INFO" DEBUG = "DEBUG" +TRACE = "TRACE" MARKER = object() cache = {} @@ -202,6 +203,27 @@ def service_name(): return local_unit().split('/')[0] +def principal_unit(): + """Returns the principal unit of this unit, otherwise None""" + # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT + principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) + # If it's empty, then this unit is the principal + if principal_unit == '': + return os.environ['JUJU_UNIT_NAME'] + elif principal_unit is not None: + return principal_unit + # For Juju 2.1 and below, let's try work out the principle unit by + # the various charms' metadata.yaml. + for reltype in relation_types(): + for rid in relation_ids(reltype): + for unit in related_units(rid): + md = _metadata_unit(unit) + subordinate = md.pop('subordinate', None) + if not subordinate: + return unit + return None + + @cached def remote_service_name(relid=None): """The remote service name for a given relation-id (or the current relation)""" @@ -478,6 +500,21 @@ def metadata(): return yaml.safe_load(md) +def _metadata_unit(unit): + """Given the name of a unit (e.g. apache2/0), get the unit charm's + metadata.yaml. Very similar to metadata() but allows us to inspect + other units. Unit needs to be co-located, such as a subordinate or + principal/primary. + + :returns: metadata.yaml as a python object. + + """ + basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) + unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) + with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + return yaml.safe_load(md) + + @cached def relation_types(): """Get a list of relation types supported by this charm""" @@ -753,6 +790,9 @@ def wrapper(decorated): def charm_dir(): """Return the root directory of the current charm""" + d = os.environ.get('JUJU_CHARM_DIR') + if d is not None: + return d return os.environ.get('CHARM_DIR') diff --git a/ceph-proxy/tests/charmhelpers/core/host.py b/ceph-proxy/tests/charmhelpers/core/host.py index 88e80a49..5656e2f5 100644 --- a/ceph-proxy/tests/charmhelpers/core/host.py +++ b/ceph-proxy/tests/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log +from .hookenv import log, DEBUG from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -191,6 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): + service('disable', service_name) service('mask', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( @@ -225,6 +226,7 @@ def service_resume(service_name, init_dir="/etc/init", sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(): service('unmask', service_name) + service('enable', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) @@ -485,13 +487,37 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a byte string.""" - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - target.write(content) + # lets see if we can grab the file and compare the context, to avoid doing + # a write. + existing_content = None + existing_uid, existing_gid = None, None + try: + with open(path, 'rb') as target: + existing_content = target.read() + stat = os.stat(path) + existing_uid, existing_gid = stat.st_uid, stat.st_gid + except: + pass + if content != existing_content: + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), + level=DEBUG) + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + return + # the contents were the same, but we might still need to change the + # ownership. + if existing_uid != uid: + log("Changing uid on already existing content: {} -> {}" + .format(existing_uid, uid), level=DEBUG) + os.chown(path, uid, -1) + if existing_gid != gid: + log("Changing gid on already existing content: {} -> {}" + .format(existing_gid, gid), level=DEBUG) + os.chown(path, -1, gid) def fstab_remove(mp): diff --git a/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py index 0448288c..d8dc378a 100644 --- a/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py @@ -19,6 +19,7 @@ 'xenial', 'yakkety', 'zesty', + 'artful', ) From 102c97d19bc03f5f52e3b61fdd613375132ef044 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 24 Aug 2017 16:48:44 -0500 Subject: [PATCH 1361/2699] Sync charm-helpers Change-Id: I6ecd27d221b85da1b399c1de013ae65fcb500135 --- .../charmhelpers/contrib/hardening/apache/checks/config.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index b18b263d..06482aac 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -46,8 +46,9 @@ def get_audits(): context = ApacheConfContext() settings = utils.get_settings('apache') audits = [ - FilePermissionAudit(paths='/etc/apache2/apache2.conf', user='root', - group='root', mode=0o0640), + FilePermissionAudit(paths=os.path.join( + settings['common']['apache_dir'], 'apache2.conf'), + user='root', group='root', mode=0o0640), TemplatedFile(os.path.join(settings['common']['apache_dir'], 'mods-available/alias.conf'), From 4c2e4fd830477570de597154a54e09df9212dfbf Mon Sep 17 00:00:00 2001 From: Graham Burgess Date: Thu, 10 Aug 2017 11:45:37 -0700 Subject: [PATCH 1362/2699] Add show-disk-free action to ceph-mon charm Added action show-disk-free that shows the output from ceph osd df tree. This makes it easier to determine how balanced the data is spread over the OSDs. Closes-Bug: #1709950 Change-Id: Idecb0fb5ee48b2a24a8991c64a9feeff1950f67b --- ceph-mon/actions.yaml | 17 +++++++++------- ceph-mon/actions/show-disk-free | 1 + ceph-mon/actions/show-disk-free.py | 32 ++++++++++++++++++++++++++++++ 3 files changed, 43 insertions(+), 7 deletions(-) create mode 120000 ceph-mon/actions/show-disk-free create mode 100755 ceph-mon/actions/show-disk-free.py diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 67fe4c88..860ca087 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -43,7 +43,7 @@ remove-cache-tier: create-pool: description: Creates a pool params: - name: + name: type: string description: The name of the pool profile-name: @@ -115,7 +115,7 @@ create-erasure-profile: get-erasure-profile: description: Display an erasure code profile. params: - name: + name: type: string description: The name of the profile required: [name] @@ -137,7 +137,7 @@ list-pools: set-pool-max-bytes: description: Set pool quotas for the maximum number of bytes. params: - max: + max: type: integer description: The name of the pool pool-name: @@ -170,10 +170,10 @@ pool-statistics: snapshot-pool: description: Snapshot a pool params: - pool-name: + pool-name: type: string description: The name of the pool - snapshot-name: + snapshot-name: type: string description: The name of the snapshot required: [snapshot-name, pool-name] @@ -181,10 +181,10 @@ snapshot-pool: remove-pool-snapshot: description: Remove a pool snapshot params: - pool-name: + pool-name: type: string description: The name of the pool - snapshot-name: + snapshot-name: type: string description: The name of the snapshot required: [snapshot-name, pool-name] @@ -226,3 +226,6 @@ crushmap-update: description: The json crushmap blob required: [map] additionalProperties: false +show-disk-free: + description: Show disk utilization by host and OSD. + additionalProperties: false diff --git a/ceph-mon/actions/show-disk-free b/ceph-mon/actions/show-disk-free new file mode 120000 index 00000000..85abbc3c --- /dev/null +++ b/ceph-mon/actions/show-disk-free @@ -0,0 +1 @@ +show-disk-free.py \ No newline at end of file diff --git a/ceph-mon/actions/show-disk-free.py b/ceph-mon/actions/show-disk-free.py new file mode 100755 index 00000000..b21e4afb --- /dev/null +++ b/ceph-mon/actions/show-disk-free.py @@ -0,0 +1,32 @@ +#!/usr/bin/python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +sys.path.append('hooks') +from subprocess import check_output, CalledProcessError +from charmhelpers.core.hookenv import log, action_set, action_fail + +if __name__ == '__main__': + try: + out = check_output(['ceph', '--id', 'admin', + 'osd', 'df', 'tree']).decode('UTF-8') + action_set({'message': out}) + except CalledProcessError as e: + log(e) + action_fail( + "ceph osd df tree failed with message: {}".format(e.message) + ) From d2abd01134255cc5764d213c82918efe79137094 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 25 Aug 2017 10:14:41 +0100 Subject: [PATCH 1363/2699] Add explanation to bluestore config opt Explain that for Ceph Luminous, which uses Bluestore as the default backend for OSDs, if the bluetsore option is set to false then OSDs will continue to use Filestore. Change-Id: I0cb65310f98562ec959018fad538e9006f1c41f6 --- ceph-osd/config.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 1cfbf642..c8866ece 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -100,6 +100,9 @@ options: description: | Use experimental bluestore storage format for OSD devices; only supported in Ceph Jewel (10.2.0) or later. + . + Note that despite bluestore being the default for Ceph Luminous, if this + option is False, OSDs will still use filestore. osd-reformat: type: string default: From a24b940c71b6ef75ca4e2a2f502d415c88f6b697 Mon Sep 17 00:00:00 2001 From: Dmitrii Shcherbakov Date: Mon, 28 Aug 2017 11:26:35 +0300 Subject: [PATCH 1364/2699] fix JUJU_AVAILABILITY_ZONE usage juju_availability_zone is not a valid configuration setting, let's use 'rack' instead. Change-Id: I6763377f253e6feb92d7c4c31eefc600cce3a8b8 Closes-Bug: #1684330 --- ceph-osd/hooks/ceph_hooks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 658ce1c6..46c0103d 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -191,12 +191,12 @@ def install(): def az_info(): az_info = "" + config_az = config("availability_zone") juju_az_info = os.environ.get('JUJU_AVAILABILITY_ZONE') if juju_az_info: - az_info = "{} juju_availability_zone={}".format(az_info, juju_az_info) - config_az = config("availability_zone") + az_info = "{} rack={}".format(az_info, juju_az_info) if config_az: - az_info = "{} config_availability_zone={}".format(az_info, config_az) + az_info = "{} row={}".format(az_info, config_az) if az_info != "": log("AZ Info: " + az_info) return az_info From 576a9e0d30dc074f489afe586f20e740ee5d079d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 28 Aug 2017 14:45:53 +0200 Subject: [PATCH 1365/2699] sync in changes from charms.ceph Change-Id: I3580c9e70ac77726be40b41b70d115ce0db2aaad --- ceph-mon/lib/ceph/utils.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index e0fd6be4..12265498 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -1366,7 +1366,7 @@ def osdize(dev, osd_format, osd_journal, reformat_osd=False, reformat_osd, ignore_errors, encrypt, bluestore) else: - osdize_dir(dev, encrypt) + osdize_dir(dev, encrypt, bluestore) def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, @@ -1395,7 +1395,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, if encrypt: cmd.append('--dmcrypt') if cmp_pkgrevno('ceph', '0.48.3') >= 0: - if osd_format: + if osd_format and not bluestore: cmd.append('--fs-type') cmd.append(osd_format) @@ -1431,7 +1431,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, raise -def osdize_dir(path, encrypt=False): +def osdize_dir(path, encrypt=False, bluestore=False): """Ask ceph-disk to prepare a directory to become an osd. :param path: str. The directory to osdize @@ -1459,6 +1459,12 @@ def osdize_dir(path, encrypt=False): if cmp_pkgrevno('ceph', '0.60') >= 0: if encrypt: cmd.append('--dmcrypt') + + # NOTE(icey): enable experimental bluestore support + if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: + cmd.append('--bluestore') + elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: + cmd.append('--filestore') log("osdize dir cmd: {}".format(cmd)) subprocess.check_call(cmd) From fb0ca39a2fe050bb403d3377d6b3583273f2f6f1 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 28 Aug 2017 14:45:58 +0200 Subject: [PATCH 1366/2699] sync in changes from charms.ceph Change-Id: Iaa2ddfa4db639db1a142e7b025a26012fb45e3de --- ceph-osd/lib/ceph/utils.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index e0fd6be4..12265498 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -1366,7 +1366,7 @@ def osdize(dev, osd_format, osd_journal, reformat_osd=False, reformat_osd, ignore_errors, encrypt, bluestore) else: - osdize_dir(dev, encrypt) + osdize_dir(dev, encrypt, bluestore) def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, @@ -1395,7 +1395,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, if encrypt: cmd.append('--dmcrypt') if cmp_pkgrevno('ceph', '0.48.3') >= 0: - if osd_format: + if osd_format and not bluestore: cmd.append('--fs-type') cmd.append(osd_format) @@ -1431,7 +1431,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, raise -def osdize_dir(path, encrypt=False): +def osdize_dir(path, encrypt=False, bluestore=False): """Ask ceph-disk to prepare a directory to become an osd. :param path: str. The directory to osdize @@ -1459,6 +1459,12 @@ def osdize_dir(path, encrypt=False): if cmp_pkgrevno('ceph', '0.60') >= 0: if encrypt: cmd.append('--dmcrypt') + + # NOTE(icey): enable experimental bluestore support + if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: + cmd.append('--bluestore') + elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: + cmd.append('--filestore') log("osdize dir cmd: {}".format(cmd)) subprocess.check_call(cmd) From 76ceeef0f575810d87f6881c79a3c5310da0c653 Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 25 Aug 2017 16:30:02 -0700 Subject: [PATCH 1367/2699] Use ApacheSSLContext to enable SSL object storage Enable SSL object storage using ApacheSSLContext. Change-Id: Id044afc8c07696a5447eb9dc4836470203372090 Closes-Bug: #1690826 Closes-Bug: #1708464 --- ceph-radosgw/hooks/ceph_radosgw_context.py | 11 +- ceph-radosgw/hooks/hooks.py | 46 +++++-- ceph-radosgw/hooks/utils.py | 73 +++++++++-- .../unit_tests/test_ceph_radosgw_context.py | 6 + .../unit_tests/test_ceph_radosgw_utils.py | 114 ++++++++---------- ceph-radosgw/unit_tests/test_hooks.py | 11 +- 6 files changed, 167 insertions(+), 94 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index ae6879e0..ce7975c1 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -41,6 +41,15 @@ from charmhelpers.contrib.storage.linux.ceph import CephConfContext +class ApacheSSLContext(context.ApacheSSLContext): + interfaces = ['https'] + service_namespace = 'ceph-radosgw' + + def __call__(self): + self.external_ports = [config('port')] + return super(ApacheSSLContext, self).__call__() + + class HAProxyContext(context.HAProxyContext): def __call__(self): @@ -163,7 +172,7 @@ def __call__(self): if config('prefer-ipv6'): ensure_host_resolvable_v6(host) - port = determine_apache_port(config('port'), singlenode_mode=True) + port = determine_api_port(config('port'), singlenode_mode=True) if config('prefer-ipv6'): port = "[::]:%s" % (port) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 5aca8df1..ea1fdaec 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -44,6 +44,7 @@ from charmhelpers.core.host import ( cmp_pkgrevno, is_container, + service_reload, ) from charmhelpers.contrib.network.ip import ( get_relation_ip, @@ -78,6 +79,7 @@ services, assess_status, setup_keystone_certs, + disable_unused_apache_sites, ) from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden @@ -96,11 +98,11 @@ # since python-keystoneclient does not pull in icehouse # version 'radosgw', + 'apache2' ] APACHE_PACKAGES = [ 'libapache2-mod-fastcgi', - 'apache2', ] @@ -114,6 +116,7 @@ def install_packages(): status_set('maintenance', 'Installing radosgw packages') apt_install(PACKAGES, fatal=True) apt_purge(APACHE_PACKAGES) + disable_unused_apache_sites() @hooks.hook('install.real') @@ -136,6 +139,7 @@ def install(): @harden() def config_changed(): install_packages() + disable_unused_apache_sites() if config('prefer-ipv6'): status_set('maintenance', 'configuring ipv6') @@ -154,6 +158,7 @@ def config_changed(): mon_relation(r_id, unit) CONFIGS.write_all() + configure_https() update_nrpe_config() @@ -205,11 +210,11 @@ def identity_joined(relid=None): sys.exit(1) port = config('port') - admin_url = '%s:%i/swift' % (canonical_url(None, ADMIN), port) + admin_url = '%s:%i/swift' % (canonical_url(CONFIGS, ADMIN), port) internal_url = '%s:%s/swift/v1' % \ - (canonical_url(None, INTERNAL), port) + (canonical_url(CONFIGS, INTERNAL), port) public_url = '%s:%s/swift/v1' % \ - (canonical_url(None, PUBLIC), port) + (canonical_url(CONFIGS, PUBLIC), port) relation_set(service='swift', region=config('region'), public_url=public_url, internal_url=internal_url, @@ -217,12 +222,6 @@ def identity_joined(relid=None): requested_roles=config('operator-roles'), relation_id=relid) - if relid: - for unit in related_units(relid): - setup_keystone_certs(unit=unit, rid=relid) - else: - setup_keystone_certs() - @hooks.hook('identity-service-relation-changed') @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) @@ -231,6 +230,7 @@ def identity_changed(relid=None): CONFIGS.write_all() if not is_unit_paused_set(): restart() + configure_https() @hooks.hook('cluster-relation-joined') @@ -351,6 +351,32 @@ def update_nrpe_config(): nrpe_setup.write() +def configure_https(): + '''Enables SSL API Apache config if appropriate and kicks + identity-service and image-service with any required + updates + ''' + CONFIGS.write_all() + if 'https' in CONFIGS.complete_contexts(): + cmd = ['a2ensite', 'openstack_https_frontend'] + subprocess.check_call(cmd) + else: + cmd = ['a2dissite', 'openstack_https_frontend'] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + # The site is not yet enabled or + # https is not configured + pass + + # TODO: improve this by checking if local CN certs are available + # first then checking reload status (see LP #1433114). + if not is_unit_paused_set(): + service_reload('apache2', restart_on_failure=True) + + setup_keystone_certs(CONFIGS) + + @hooks.hook('update-status') @harden() def update_status(): diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 8c5f55d5..2349446e 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -28,6 +28,7 @@ INFO, relation_get, relation_ids, + related_units, application_version_set, ) from charmhelpers.contrib.network.ip import ( @@ -46,7 +47,10 @@ from charmhelpers.contrib.openstack.keystone import ( format_endpoint, ) -from charmhelpers.contrib.hahelpers.cluster import get_hacluster_config +from charmhelpers.contrib.hahelpers.cluster import ( + get_hacluster_config, + https, +) from charmhelpers.core.host import ( cmp_pkgrevno, lsb_release, @@ -104,6 +108,12 @@ VERSION_PACKAGE = 'radosgw' +UNUSED_APACHE_SITE_FILES = ["/etc/apache2/sites-available/000-default.conf"] +APACHE_PORTS_FILE = "/etc/apache2/ports.conf" +APACHE_SITE_CONF = '/etc/apache2/sites-available/openstack_https_frontend' +APACHE_SITE_24_CONF = '/etc/apache2/sites-available/' \ + 'openstack_https_frontend.conf' + BASE_RESOURCE_MAP = OrderedDict([ (HAPROXY_CONF, { 'contexts': [context.HAProxyContext(singlenode_mode=True), @@ -114,6 +124,14 @@ 'contexts': [ceph_radosgw_context.MonContext()], 'services': ['radosgw'], }), + (APACHE_SITE_CONF, { + 'contexts': [ceph_radosgw_context.ApacheSSLContext()], + 'services': ['apache2'], + }), + (APACHE_SITE_24_CONF, { + 'contexts': [ceph_radosgw_context.ApacheSSLContext()], + 'services': ['apache2'], + }), ]) @@ -131,6 +149,12 @@ def resource_map(): These will be managed for a single hook execution. """ resource_map = deepcopy(BASE_RESOURCE_MAP) + + if os.path.exists('/etc/apache2/conf-available'): + resource_map.pop(APACHE_SITE_CONF) + else: + resource_map.pop(APACHE_SITE_24_CONF) + return resource_map @@ -156,7 +180,10 @@ def services(): _services = [] for v in BASE_RESOURCE_MAP.values(): _services.extend(v.get('services', [])) - return list(set(_services)) + _set_services = set(_services) + if not https(): + _set_services.remove('apache2') + return list(_set_services) def enable_pocket(pocket): @@ -370,7 +397,9 @@ def get_ks_ca_cert(admin_token, auth_endpoint, certs_path): :param certs_path: Path to local certs store :returns: None """ - ksclient = client.Client(token=admin_token, endpoint=auth_endpoint) + + ksclient = keystoneclient.httpclient.HTTPClient(token=admin_token, + endpoint=auth_endpoint) ca_cert = get_ks_cert(ksclient, auth_endpoint, 'ca') if ca_cert: try: @@ -428,7 +457,7 @@ def get_ks_signing_cert(admin_token, auth_endpoint, certs_path): @defer_if_unavailable(['keystoneclient']) -def setup_keystone_certs(unit=None, rid=None): +def setup_keystone_certs(CONFIGS): """ Get CA and signing certs from Keystone used to decrypt revoked token list. @@ -440,14 +469,20 @@ def setup_keystone_certs(unit=None, rid=None): if not os.path.exists(certs_path): mkdir(certs_path) - rdata = relation_get(unit=unit, rid=rid) - required = ['admin_token', 'auth_host', 'auth_port', 'api_version'] - settings = {key: rdata.get(key) for key in required} - if not all(settings.values()): - log("Missing relation settings ({}) - deferring cert setup".format( - ', '.join([k for k in settings if not settings[k]])), + # Do not continue until identity-relation is complete + if 'identity-service' not in CONFIGS.complete_contexts(): + log("Missing relation settings - deferring cert setup", level=DEBUG) return + rdata = {} + for relid in relation_ids('identity-service'): + for unit in related_units(relid): + rdata = relation_get(unit=unit, rid=relid) + if rdata: + break + + required = ['admin_token', 'auth_host', 'auth_port', 'api_version'] + settings = {key: rdata.get(key) for key in required} auth_protocol = rdata.get('auth_protocol', 'http') if is_ipv6(settings.get('auth_host')): @@ -463,3 +498,21 @@ def setup_keystone_certs(unit=None, rid=None): get_ks_signing_cert(settings['admin_token'], auth_endpoint, certs_path) except KSCertSetupException as e: log("Keystone certs setup incomplete - {}".format(e), level=INFO) + + +def disable_unused_apache_sites(): + """Ensure that unused apache configurations are disabled to prevent them + from conflicting with the charm-provided version. + """ + for apache_site_file in UNUSED_APACHE_SITE_FILES: + apache_site = apache_site_file.split('/')[-1].split('.')[0] + if os.path.exists(apache_site_file): + try: + # Try it cleanly + subprocess.check_call(['a2dissite', apache_site]) + except subprocess.CalledProcessError: + # Remove the file + os.remove(apache_site_file) + + with open(APACHE_PORTS_FILE, 'w') as ports: + ports.write("") diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index f84fc692..a69cb62d 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -29,6 +29,7 @@ 'cmp_pkgrevno', 'socket', 'unit_public_ip', + 'determine_api_port' ] @@ -54,6 +55,7 @@ def test_ctxt(self, _harelation_ids, _ctxtrelation_ids, _haconfig, _haconfig.side_effect = self.test_config.get _harelation_ids.return_value = [] haproxy_context = context.HAProxyContext() + self.determine_api_port.return_value = 70 expect = { 'cephradosgw_bind_port': 70, 'service_ports': {'cephradosgw-server': [80, 70]} @@ -190,6 +192,7 @@ def _relation_get(attr, unit, rid): self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] + self.determine_api_port.return_value = 70 expect = { 'auth_supported': 'cephx', 'hostname': 'testhost', @@ -229,6 +232,7 @@ def _relation_get(attr, unit, rid): self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] self.related_units.return_value = ['ceph-proxy/0'] + self.determine_api_port.return_value = 70 expect = { 'auth_supported': 'cephx', 'hostname': 'testhost', @@ -277,6 +281,7 @@ def _relation_get(attr, unit, rid): self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] + self.determine_api_port.return_value = 70 expect = { 'auth_supported': 'none', 'hostname': 'testhost', @@ -307,6 +312,7 @@ def _relation_get(attr, unit, rid): self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] + self.determine_api_port.return_value = 70 expect = { 'auth_supported': 'cephx', 'hostname': 'testhost', diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index 37ed1d6a..93576b21 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - from mock import ( call, patch, @@ -29,6 +27,7 @@ 'application_version_set', 'get_upstream_version', 'format_endpoint', + 'https', ] @@ -90,8 +89,11 @@ def test_pause_resume_helper(self, services): # ports=None whilst port checks are disabled. f.assert_called_once_with('assessor', services='s1', ports=None) + @patch.object(utils, 'related_units') + @patch.object(utils, 'relation_ids') @patch.dict('sys.modules', {'requests': MagicMock(), - 'keystoneclient': MagicMock()}) + 'keystoneclient': MagicMock(), + 'httpclient': MagicMock()}) @patch.object(utils, 'is_ipv6', lambda addr: False) @patch.object(utils, 'get_ks_signing_cert') @patch.object(utils, 'get_ks_ca_cert') @@ -99,93 +101,71 @@ def test_pause_resume_helper(self, services): @patch.object(utils, 'mkdir') def test_setup_keystone_certs(self, mock_mkdir, mock_relation_get, mock_get_ks_ca_cert, - mock_get_ks_signing_cert): + mock_get_ks_signing_cert, + mock_relation_ids, mock_related_units): auth_host = 'foo/bar' auth_port = 80 admin_token = '666' auth_url = 'http://%s:%s/v2.0' % (auth_host, auth_port) self.format_endpoint.return_value = auth_url + configs = MagicMock() + configs.complete_contexts.return_value = ['identity-service'] + mock_relation_ids.return_value = ['identity-service:5'] + mock_related_units.return_value = ['keystone/1'] mock_relation_get.return_value = {'auth_host': auth_host, 'auth_port': auth_port, 'admin_token': admin_token, 'api_version': '2'} - utils.setup_keystone_certs() + utils.setup_keystone_certs(configs) mock_get_ks_signing_cert.assert_has_calls([call(admin_token, auth_url, '/var/lib/ceph/nss')]) mock_get_ks_ca_cert.assert_has_calls([call(admin_token, auth_url, '/var/lib/ceph/nss')]) - def test_get_ks_signing_cert(self): + @patch.object(utils, 'get_ks_cert') + @patch.object(utils.subprocess, 'Popen') + @patch.object(utils.subprocess, 'check_output') + def test_get_ks_signing_cert(self, mock_check_output, mock_Popen, + mock_get_ks_cert): auth_host = 'foo/bar' auth_port = 80 admin_token = '666' auth_url = 'http://%s:%s/v2.0' % (auth_host, auth_port) - mock_ksclient = MagicMock m = mock_open() - with patch.dict('sys.modules', - {'requests': MagicMock(), - 'keystoneclient': mock_ksclient, - 'keystoneclient.exceptions': MagicMock(), - 'keystoneclient.exceptions.ConnectionRefused': - MagicMock(), - 'keystoneclient.exceptions.Forbidden': MagicMock(), - 'keystoneclient.v2_0': MagicMock(), - 'keystoneclient.v2_0.client': MagicMock()}): - # Reimport - del sys.modules['utils'] - import utils - with patch.object(utils, 'subprocess') as mock_subprocess: - with patch.object(utils, 'open', m, create=True): - mock_certificates = MagicMock() - mock_ksclient.certificates = mock_certificates - mock_certificates.get_signing_certificate.return_value = \ - 'signing_cert_data' - utils.get_ks_signing_cert(admin_token, auth_url, - '/foo/bar') - mock_certificates.get_signing_certificate.return_value = \ - None - self.assertRaises(utils.KSCertSetupException, - utils.get_ks_signing_cert, admin_token, - auth_url, '/foo/bar') - - c = ['openssl', 'x509', '-in', - '/foo/bar/signing_certificate.pem', - '-pubkey'] - mock_subprocess.check_output.assert_called_with(c) - - def test_get_ks_ca_cert(self): + with patch.object(utils, 'open', m, create=True): + + mock_get_ks_cert.return_value = 'signing_cert_data' + utils.get_ks_signing_cert(admin_token, auth_url, '/foo/bar') + + mock_get_ks_cert.return_value = None + with self.assertRaises(utils.KSCertSetupException): + utils.get_ks_signing_cert(admin_token, auth_url, '/foo/bar') + + c = ['openssl', 'x509', '-in', + '/foo/bar/signing_certificate.pem', + '-pubkey'] + mock_check_output.assert_called_with(c) + + @patch.object(utils, 'get_ks_cert') + @patch.object(utils.subprocess, 'Popen') + @patch.object(utils.subprocess, 'check_output') + def test_get_ks_ca_cert(self, mock_check_output, mock_Popen, + mock_get_ks_cert): auth_host = 'foo/bar' auth_port = 80 admin_token = '666' auth_url = 'http://%s:%s/v2.0' % (auth_host, auth_port) - mock_ksclient = MagicMock m = mock_open() - with patch.dict('sys.modules', - {'requests': MagicMock(), - 'keystoneclient': mock_ksclient, - 'keystoneclient.exceptions': MagicMock(), - 'keystoneclient.exceptions.ConnectionRefused': - MagicMock(), - 'keystoneclient.exceptions.Forbidden': MagicMock(), - 'keystoneclient.v2_0': MagicMock(), - 'keystoneclient.v2_0.client': MagicMock()}): - # Reimport - del sys.modules['utils'] - import utils - with patch.object(utils, 'subprocess') as mock_subprocess: - with patch.object(utils, 'open', m, create=True): - mock_certificates = MagicMock() - mock_ksclient.certificates = mock_certificates - mock_certificates.get_ca_certificate.return_value = \ - 'ca_cert_data' - utils.get_ks_ca_cert(admin_token, auth_url, '/foo/bar') - mock_certificates.get_ca_certificate.return_value = None - self.assertRaises(utils.KSCertSetupException, - utils.get_ks_ca_cert, admin_token, - auth_url, '/foo/bar') - - c = ['openssl', 'x509', '-in', '/foo/bar/ca.pem', - '-pubkey'] - mock_subprocess.check_output.assert_called_with(c) + with patch.object(utils, 'open', m, create=True): + mock_get_ks_cert.return_value = 'ca_cert_data' + utils.get_ks_ca_cert(admin_token, auth_url, '/foo/bar') + + mock_get_ks_cert.return_value = None + with self.assertRaises(utils.KSCertSetupException): + utils.get_ks_ca_cert(admin_token, auth_url, '/foo/bar') + + c = ['openssl', 'x509', '-in', '/foo/bar/ca.pem', + '-pubkey'] + mock_check_output.assert_called_with(c) diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index a379b4e5..08c677eb 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -53,6 +53,9 @@ 'get_hacluster_config', 'update_dns_ha_resource_params', 'get_relation_ip', + 'disable_unused_apache_sites', + 'service_reload', + 'setup_keystone_certs', ] @@ -69,8 +72,7 @@ def test_install_packages(self): ceph_hooks.install_packages() self.add_source.assert_called_with('distro', 'secretkey') self.assertTrue(self.apt_update.called) - self.apt_purge.assert_called_with(['libapache2-mod-fastcgi', - 'apache2']) + self.apt_purge.assert_called_with(['libapache2-mod-fastcgi']) def test_install(self): _install_packages = self.patch('install_packages') @@ -144,15 +146,12 @@ def test_restart(self): cmd = ['service', 'radosgw', 'restart'] self.subprocess.call.assert_called_with(cmd) - @patch.object(ceph_hooks, 'setup_keystone_certs') @patch('charmhelpers.contrib.openstack.ip.service_name', lambda *args: 'ceph-radosgw') @patch('charmhelpers.contrib.openstack.ip.config') - def test_identity_joined_early_version(self, _config, - mock_setup_keystone_certs): + def test_identity_joined_early_version(self, _config): self.cmp_pkgrevno.return_value = -1 ceph_hooks.identity_joined() - self.assertTrue(mock_setup_keystone_certs.called) self.sys.exit.assert_called_with(1) @patch('charmhelpers.contrib.openstack.ip.service_name', From eb64bac444a83a93289b903b592dd11dfcc805b0 Mon Sep 17 00:00:00 2001 From: Dmitrii Shcherbakov Date: Mon, 28 Aug 2017 16:12:46 +0300 Subject: [PATCH 1368/2699] use a non-legacy bluestore option on Luminous+ the 'experimental' option is no longer needed as of Luminous release https://github.com/ceph/ceph/blob/luminous/src/common/legacy_config_opts.h#L79 Change-Id: Idbbb69acec92b2f2efca80691ca73a2030bcf633 --- ceph-osd/hooks/ceph_hooks.py | 1 + ceph-osd/templates/ceph.conf | 10 +++- ceph-osd/unit_tests/test_ceph_hooks.py | 81 ++++++++++++++++++++++++-- 3 files changed, 87 insertions(+), 5 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 658ce1c6..88f17471 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -251,6 +251,7 @@ def get_ceph_context(upgrading=False): 'short_object_len': use_short_objects(), 'upgrade_in_progress': upgrading, 'bluestore': config('bluestore'), + 'bluestore_experimental': cmp_pkgrevno('ceph', '12.1.0') < 0, } if config('prefer-ipv6'): diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index f7b530c9..fb45e36a 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -41,7 +41,7 @@ setuser match path = /var/lib/ceph/$type/$cluster-$id {% endfor %} {% endif %} -{% if bluestore -%} +{% if bluestore_experimental and bluestore -%} enable experimental unrecoverable data corrupting features = bluestore rocksdb {%- endif %} @@ -56,9 +56,17 @@ keyring = /var/lib/ceph/mds/$cluster-$id/keyring [osd] keyring = /var/lib/ceph/osd/$cluster-$id/keyring + +{% if bluestore -%} +{% if not bluestore_experimental -%} +osd objectstore = bluestore +{%- endif -%} +{%- else %} osd journal size = {{ osd_journal_size }} filestore xattr use omap = true journal dio = {{ dio }} +{%- endif %} + {%- if short_object_len %} osd max object name len = 256 osd max object namespace len = 64 diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 8594afbc..c3b95e2d 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -65,7 +65,42 @@ def test_get_ceph_context(self, mock_config, mock_config2): 'short_object_len': True, 'upgrade_in_progress': False, 'use_syslog': 'true', - 'bluestore': False} + 'bluestore': False, + 'bluestore_experimental': False} + self.assertEqual(ctxt, expected) + + @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') + @patch.object(ceph_hooks, 'get_auth', lambda *args: False) + @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") + @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") + @patch.object(ceph_hooks, 'cmp_pkgrevno', + lambda pkg, ver: -1 if ver == '12.1.0' else 1) + @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', + '10.0.0.2']) + @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph, 'config') + @patch.object(ceph_hooks, 'config') + def test_get_ceph_context_filestore_old(self, mock_config, mock_config2): + config = copy.deepcopy(CHARM_CONFIG) + mock_config.side_effect = lambda key: config[key] + mock_config2.side_effect = lambda key: config[key] + ctxt = ceph_hooks.get_ceph_context() + expected = {'auth_supported': False, + 'ceph_cluster_network': '', + 'ceph_public_network': '', + 'cluster_addr': '10.1.0.1', + 'dio': 'true', + 'fsid': '1234', + 'loglevel': 1, + 'mon_hosts': '10.0.0.1 10.0.0.2', + 'old_auth': False, + 'osd_journal_size': 1024, + 'public_addr': '10.0.0.1', + 'short_object_len': True, + 'upgrade_in_progress': False, + 'use_syslog': 'true', + 'bluestore': False, + 'bluestore_experimental': True} self.assertEqual(ctxt, expected) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @@ -98,7 +133,43 @@ def test_get_ceph_context_bluestore(self, mock_config, mock_config2): 'short_object_len': True, 'upgrade_in_progress': False, 'use_syslog': 'true', - 'bluestore': True} + 'bluestore': True, + 'bluestore_experimental': False} + self.assertEqual(ctxt, expected) + + @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') + @patch.object(ceph_hooks, 'get_auth', lambda *args: False) + @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") + @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") + @patch.object(ceph_hooks, 'cmp_pkgrevno', + lambda pkg, ver: -1 if ver == '12.1.0' else 1) + @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', + '10.0.0.2']) + @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph, 'config') + @patch.object(ceph_hooks, 'config') + def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2): + config = copy.deepcopy(CHARM_CONFIG) + config['bluestore'] = True + mock_config.side_effect = lambda key: config[key] + mock_config2.side_effect = lambda key: config[key] + ctxt = ceph_hooks.get_ceph_context() + expected = {'auth_supported': False, + 'ceph_cluster_network': '', + 'ceph_public_network': '', + 'cluster_addr': '10.1.0.1', + 'dio': 'true', + 'fsid': '1234', + 'loglevel': 1, + 'mon_hosts': '10.0.0.1 10.0.0.2', + 'old_auth': False, + 'osd_journal_size': 1024, + 'public_addr': '10.0.0.1', + 'short_object_len': True, + 'upgrade_in_progress': False, + 'use_syslog': 'true', + 'bluestore': True, + 'bluestore_experimental': True} self.assertEqual(ctxt, expected) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @@ -132,7 +203,8 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): 'short_object_len': True, 'upgrade_in_progress': False, 'use_syslog': 'true', - 'bluestore': False} + 'bluestore': False, + 'bluestore_experimental': False} self.assertEqual(ctxt, expected) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @@ -168,7 +240,8 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'short_object_len': True, 'upgrade_in_progress': False, 'use_syslog': 'true', - 'bluestore': False} + 'bluestore': False, + 'bluestore_experimental': False} self.assertEqual(ctxt, expected) @patch.object(ceph_hooks, 'ceph') From 275843500c6c5393de2063b1103af6157b948c0d Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 1 Sep 2017 14:58:45 -0700 Subject: [PATCH 1369/2699] Ensure crgw works with Keystone v3 The URLs for SSL certificates are different and the v2 client functions do not work. This change fixes these issues. Add missing config.yaml options. Change-Id: Ia4c7b508e70f690493b098c6fb07d24a340bc2a6 Closes-Bug: #1708464 --- ceph-radosgw/config.yaml | 46 ++++++++++++ ceph-radosgw/hooks/utils.py | 71 ++++++++++++++----- .../unit_tests/test_ceph_radosgw_utils.py | 65 ++++++++++++----- 3 files changed, 146 insertions(+), 36 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 3a8443dd..b2d3cd5d 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -275,3 +275,49 @@ options: description: | A comma-separated list of nagios servicegroups. If left empty, the nagios_context will be used as the servicegroup + # HAProxy Parameters + haproxy-server-timeout: + type: int + default: + description: | + Server timeout configuration in ms for haproxy, used in HA + configurations. If not provided, default value of 30000ms is used. + haproxy-client-timeout: + type: int + default: + description: | + Client timeout configuration in ms for haproxy, used in HA + configurations. If not provided, default value of 30000ms is used. + haproxy-queue-timeout: + type: int + default: + description: | + Queue timeout configuration in ms for haproxy, used in HA + configurations. If not provided, default value of 5000ms is used. + haproxy-connect-timeout: + type: int + default: + description: | + Connect timeout configuration in ms for haproxy, used in HA + configurations. If not provided, default value of 5000ms is used. + + # External SSL Parameters + ssl_cert: + type: string + default: + description: | + SSL certificate to install and use for API ports. Setting this value + and ssl_key will enable reverse proxying, point Glance's entry in the + Keystone catalog to use https, and override any certificate and key + issued by Keystone (if it is configured to do so). + ssl_key: + type: string + default: + description: SSL key to use with certificate specified as ssl_cert. + ssl_ca: + type: string + default: + description: | + SSL CA to use with the certificate and key provided - this is only + required if you are providing a privately signed ssl_cert and ssl_key. + diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 2349446e..37c5bc97 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -25,6 +25,7 @@ from charmhelpers.core.hookenv import ( log, DEBUG, + ERROR, INFO, relation_get, relation_ids, @@ -72,6 +73,7 @@ try: import keystoneclient from keystoneclient.v2_0 import client + from keystoneclient.v3 import client as client_v3 try: # Kilo and newer from keystoneclient.exceptions import ( @@ -362,25 +364,32 @@ def _inner2_defer_if_unavailable(*args, **kwargs): def get_ks_cert(ksclient, auth_endpoint, cert_type): """Get certificate from keystone. - :param admin_token: Keystone admin token + :param ksclient: Keystone client :param auth_endpoint: Keystone auth endpoint url :param certs_path: Path to local certs store :returns: certificate """ + if ksclient.version == 'v3': + if cert_type == 'signing': + cert_type = 'certificates' + request = ("{}OS-SIMPLE-CERT/{}" + "".format(auth_endpoint, cert_type)) + else: + request = "{}/certificates/{}".format(auth_endpoint, cert_type) + try: try: # Kilo and newer if cert_type == 'ca': cert = ksclient.certificates.get_ca_certificate() - elif cert_type == 'signing': + elif cert_type in ['signing', 'certificates']: cert = ksclient.certificates.get_signing_certificate() else: raise KSCertSetupException("Invalid cert type " "'{}'".format(cert_type)) except AttributeError: - # Juno and older - cert = requests.request('GET', "{}/certificates/{}". - format(auth_endpoint, cert_type)).text + # Keystone v3 or Juno and older + cert = requests.request('GET', request).text except (ConnectionRefused, requests.exceptions.ConnectionError, Forbidden, InternalServerError): raise KSCertSetupException("Error connecting to keystone") @@ -389,17 +398,15 @@ def get_ks_cert(ksclient, auth_endpoint, cert_type): @defer_if_unavailable(['keystoneclient']) -def get_ks_ca_cert(admin_token, auth_endpoint, certs_path): +def get_ks_ca_cert(ksclient, auth_endpoint, certs_path): """"Get and store keystone CA certificate. - :param admin_token: Keystone admin token + :param ksclient: Keystone client :param auth_endpoint: Keystone auth endpoint url :param certs_path: Path to local certs store :returns: None """ - ksclient = keystoneclient.httpclient.HTTPClient(token=admin_token, - endpoint=auth_endpoint) ca_cert = get_ks_cert(ksclient, auth_endpoint, 'ca') if ca_cert: try: @@ -424,15 +431,14 @@ def get_ks_ca_cert(admin_token, auth_endpoint, certs_path): @defer_if_unavailable(['keystoneclient']) -def get_ks_signing_cert(admin_token, auth_endpoint, certs_path): +def get_ks_signing_cert(ksclient, auth_endpoint, certs_path): """"Get and store keystone signing certificate. - :param admin_token: Keystone admin token + :param ksclient: Keystone client :param auth_endpoint: Keystone auth endpoint url :param certs_path: Path to local certs store :returns: None """ - ksclient = client.Client(token=admin_token, endpoint=auth_endpoint) signing_cert = get_ks_cert(ksclient, auth_endpoint, 'signing') if signing_cert: try: @@ -474,8 +480,32 @@ def setup_keystone_certs(CONFIGS): log("Missing relation settings - deferring cert setup", level=DEBUG) return + + ksclient = get_keystone_client_from_relation() + if not ksclient: + log("Failed to get keystoneclient", level=ERROR) + return + + auth_endpoint = ksclient.auth_endpoint + + try: + get_ks_ca_cert(ksclient, auth_endpoint, certs_path) + get_ks_signing_cert(ksclient, auth_endpoint, certs_path) + except KSCertSetupException as e: + log("Keystone certs setup incomplete - {}".format(e), level=INFO) + + +# TODO: Move to charmhelpers +# TODO: Make it session aware +def get_keystone_client_from_relation(relation_type='identity-service'): + """ Get keystone client from relation data + + :param relation_type: Relation to keystone + :returns: Keystone client + """ + rdata = {} - for relid in relation_ids('identity-service'): + for relid in relation_ids(relation_type): for unit in related_units(relid): rdata = relation_get(unit=unit, rid=relid) if rdata: @@ -488,16 +518,21 @@ def setup_keystone_certs(CONFIGS): if is_ipv6(settings.get('auth_host')): settings['auth_host'] = format_ipv6_addr(settings.get('auth_host')) + api_version = rdata.get('api_version') auth_endpoint = format_endpoint(auth_protocol, settings['auth_host'], settings['auth_port'], settings['api_version']) - try: - get_ks_ca_cert(settings['admin_token'], auth_endpoint, certs_path) - get_ks_signing_cert(settings['admin_token'], auth_endpoint, certs_path) - except KSCertSetupException as e: - log("Keystone certs setup incomplete - {}".format(e), level=INFO) + if api_version and '3' in api_version: + ksclient = client_v3.Client(token=settings['admin_token'], + endpoint=auth_endpoint) + else: + ksclient = client.Client(token=settings['admin_token'], + endpoint=auth_endpoint) + # Add simple way to retrieve keystone auth endpoint + ksclient.auth_endpoint = auth_endpoint + return ksclient def disable_unused_apache_sites(): diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index 93576b21..6b5807f3 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -89,39 +89,68 @@ def test_pause_resume_helper(self, services): # ports=None whilst port checks are disabled. f.assert_called_once_with('assessor', services='s1', ports=None) - @patch.object(utils, 'related_units') - @patch.object(utils, 'relation_ids') - @patch.dict('sys.modules', {'requests': MagicMock(), - 'keystoneclient': MagicMock(), - 'httpclient': MagicMock()}) + @patch.object(utils, 'get_keystone_client_from_relation') @patch.object(utils, 'is_ipv6', lambda addr: False) @patch.object(utils, 'get_ks_signing_cert') @patch.object(utils, 'get_ks_ca_cert') - @patch.object(utils, 'relation_get') @patch.object(utils, 'mkdir') - def test_setup_keystone_certs(self, mock_mkdir, mock_relation_get, + def test_setup_keystone_certs(self, mock_mkdir, mock_get_ks_ca_cert, mock_get_ks_signing_cert, - mock_relation_ids, mock_related_units): + mock_get_keystone_client): auth_host = 'foo/bar' auth_port = 80 - admin_token = '666' auth_url = 'http://%s:%s/v2.0' % (auth_host, auth_port) - self.format_endpoint.return_value = auth_url + mock_ksclient = MagicMock() + mock_ksclient.auth_endpoint = auth_url + mock_get_keystone_client.return_value = mock_ksclient + configs = MagicMock() configs.complete_contexts.return_value = ['identity-service'] - mock_relation_ids.return_value = ['identity-service:5'] - mock_related_units.return_value = ['keystone/1'] - mock_relation_get.return_value = {'auth_host': auth_host, - 'auth_port': auth_port, - 'admin_token': admin_token, - 'api_version': '2'} + utils.setup_keystone_certs(configs) - mock_get_ks_signing_cert.assert_has_calls([call(admin_token, auth_url, + mock_get_ks_signing_cert.assert_has_calls([call(mock_ksclient, + auth_url, '/var/lib/ceph/nss')]) - mock_get_ks_ca_cert.assert_has_calls([call(admin_token, auth_url, + mock_get_ks_ca_cert.assert_has_calls([call(mock_ksclient, auth_url, '/var/lib/ceph/nss')]) + @patch.object(utils, 'client_v3') + @patch.object(utils, 'client') + @patch.object(utils, 'related_units') + @patch.object(utils, 'relation_ids') + @patch.object(utils, 'is_ipv6', lambda addr: False) + @patch.object(utils, 'relation_get') + def test_get_keystone_client_from_relation(self, mock_relation_get, + mock_relation_ids, + mock_related_units, + mock_client, + mock_client_v3): + auth_host = 'foo/bar' + auth_port = 80 + admin_token = '666' + auth_url = 'http://%s:%s/v2.0' % (auth_host, auth_port) + self.format_endpoint.return_value = auth_url + mock_relation_ids.return_value = ['identity-service:5'] + mock_related_units.return_value = ['keystone/1'] + rel_data = {'auth_host': auth_host, + 'auth_port': auth_port, + 'admin_token': admin_token, + 'api_version': '2'} + + mock_relation_get.return_value = rel_data + utils.get_keystone_client_from_relation() + mock_client.Client.assert_called_with(endpoint=auth_url, + token=admin_token) + + auth_url = 'http://%s:%s/v3' % (auth_host, auth_port) + self.format_endpoint.return_value = auth_url + rel_data['api_version'] = '3' + mock_relation_get.return_value = rel_data + utils.get_keystone_client_from_relation() + mock_client_v3.Client.assert_called_with(endpoint=auth_url, + token=admin_token) + @patch.object(utils, 'get_ks_cert') @patch.object(utils.subprocess, 'Popen') @patch.object(utils.subprocess, 'check_output') From 881e464fd71a8919f1f8f4857c178a1b1ea9f469 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 4 Sep 2017 13:38:06 +0100 Subject: [PATCH 1370/2699] Ensure required keystone settings available When setting up keystone certs we must validate whether the settings we require are available before called the keystone client. Change-Id: I4bf09fe7bf7f5a136aa92cf8b74b1b4d0e87543f Closes-Bug: 1714942 --- ceph-radosgw/hooks/utils.py | 13 +++++--- .../unit_tests/test_ceph_radosgw_utils.py | 31 ++++++++++++++++--- 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 37c5bc97..9c2b0e2f 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -503,16 +503,21 @@ def get_keystone_client_from_relation(relation_type='identity-service'): :param relation_type: Relation to keystone :returns: Keystone client """ + required = ['admin_token', 'auth_host', 'auth_port', 'api_version'] + settings = {} rdata = {} for relid in relation_ids(relation_type): for unit in related_units(relid): - rdata = relation_get(unit=unit, rid=relid) - if rdata: + rdata = relation_get(unit=unit, rid=relid) or {} + if set(required).issubset(set(rdata.keys())): + settings = {key: rdata.get(key) for key in required} break - required = ['admin_token', 'auth_host', 'auth_port', 'api_version'] - settings = {key: rdata.get(key) for key in required} + if not settings: + log("Required settings not yet provided by any identity-service " + "relation units", INFO) + return None auth_protocol = rdata.get('auth_protocol', 'http') if is_ipv6(settings.get('auth_host')): diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index 6b5807f3..8df4214d 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -121,11 +121,11 @@ def test_setup_keystone_certs(self, mock_mkdir, @patch.object(utils, 'relation_ids') @patch.object(utils, 'is_ipv6', lambda addr: False) @patch.object(utils, 'relation_get') - def test_get_keystone_client_from_relation(self, mock_relation_get, - mock_relation_ids, - mock_related_units, - mock_client, - mock_client_v3): + def test_get_ks_client_from_relation(self, mock_relation_get, + mock_relation_ids, + mock_related_units, + mock_client, + mock_client_v3): auth_host = 'foo/bar' auth_port = 80 admin_token = '666' @@ -151,6 +151,27 @@ def test_get_keystone_client_from_relation(self, mock_relation_get, mock_client_v3.Client.assert_called_with(endpoint=auth_url, token=admin_token) + @patch.object(utils, 'client_v3') + @patch.object(utils, 'client') + @patch.object(utils, 'related_units') + @patch.object(utils, 'relation_ids') + @patch.object(utils, 'is_ipv6', lambda addr: False) + @patch.object(utils, 'relation_get') + def test_get_ks_client_from_relation_not_available(self, mock_relation_get, + mock_relation_ids, + mock_related_units, + mock_client, + mock_client_v3): + mock_relation_ids.return_value = ['identity-service:5'] + mock_related_units.return_value = ['keystone/1'] + rel_data = {'auth_port': '5000', + 'admin_token': 'foo', + 'api_version': '2'} + + mock_relation_get.return_value = rel_data + ksclient = utils.get_keystone_client_from_relation() + self.assertIsNone(ksclient) + @patch.object(utils, 'get_ks_cert') @patch.object(utils.subprocess, 'Popen') @patch.object(utils.subprocess, 'check_output') From a1336065fed1f818285ad9591e9c01c051fce5a2 Mon Sep 17 00:00:00 2001 From: zhangyangyang Date: Mon, 11 Sep 2017 11:41:36 +0800 Subject: [PATCH 1371/2699] change assert(Not)Equals to assert(Not)Equal According to http://docs.python.org/2/library/unittest.html assert(Not)Equals is a deprecated alias of assert(Not)Equal. Change-Id: I4cc9976ba8c18447803f7e7e8843fb0b8139b612 Closes-Bug: #1329757 --- ceph-radosgw/unit_tests/test_ceph_radosgw_context.py | 2 +- ceph-radosgw/unit_tests/test_hooks.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index a69cb62d..68ff4cbe 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -165,7 +165,7 @@ def test_ids_ctxt_no_admin_token(self, _log, _rids, _runits, _rget, def test_ids_ctxt_no_rels(self, _log, _rids): _rids.return_value = [] ids_ctxt = context.IdentityServiceContext() - self.assertEquals(ids_ctxt(), None) + self.assertEqual(ids_ctxt(), None) class MonContextTest(CharmTestCase): diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 08c677eb..f62f9839 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -213,8 +213,8 @@ def test_canonical_url_ipv6(self, _config, _unit_get, _is_clustered): _config.side_effect = self.test_config.get _unit_get.return_value = ipv6_addr _is_clustered.return_value = False - self.assertEquals(ceph_hooks.canonical_url({}, PUBLIC), - 'http://[%s]' % ipv6_addr) + self.assertEqual(ceph_hooks.canonical_url({}, PUBLIC), + 'http://[%s]' % ipv6_addr) def test_cluster_joined(self): self.get_relation_ip.side_effect = ['10.0.0.1', From 5aaf8a4985435dca6c3d527c63cb485c41833652 Mon Sep 17 00:00:00 2001 From: zhangyangyang Date: Mon, 11 Sep 2017 21:51:39 +0800 Subject: [PATCH 1372/2699] change assert(Not)Equals to assert(Not)Equal According to http://docs.python.org/2/library/unittest.html assert(Not)Equals is a deprecated alias of assert(Not)Equal. Change-Id: Id746a6bc4a51eca5f024211df4d28f93b862e4b2 Closes-Bug: #1329757 --- ceph-mon/unit_tests/test_status.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index eaab94bf..4dfd75fa 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -100,18 +100,18 @@ def test_assess_status_peers_complete_down(self, _peer_units): def test_get_peer_units_no_peers(self): self.relation_ids.return_value = ['mon:1'] self.related_units.return_value = [] - self.assertEquals({'ceph-mon1': True}, - hooks.get_peer_units()) + self.assertEqual({'ceph-mon1': True}, + hooks.get_peer_units()) def test_get_peer_units_peers_incomplete(self): self.relation_ids.return_value = ['mon:1'] self.related_units.return_value = ['ceph-mon2', 'ceph-mon3'] self.relation_get.return_value = None - self.assertEquals({'ceph-mon1': True, - 'ceph-mon2': False, - 'ceph-mon3': False}, - hooks.get_peer_units()) + self.assertEqual({'ceph-mon1': True, + 'ceph-mon2': False, + 'ceph-mon3': False}, + hooks.get_peer_units()) def test_get_peer_units_peers_complete(self): self.relation_ids.return_value = ['mon:1'] @@ -119,7 +119,7 @@ def test_get_peer_units_peers_complete(self): 'ceph-mon3'] self.relation_get.side_effect = ['ceph-mon2', 'ceph-mon3'] - self.assertEquals({'ceph-mon1': True, - 'ceph-mon2': True, - 'ceph-mon3': True}, - hooks.get_peer_units()) + self.assertEqual({'ceph-mon1': True, + 'ceph-mon2': True, + 'ceph-mon3': True}, + hooks.get_peer_units()) From c8daf547b89bfcf13436474b55d6608b55aff6d7 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 12 Sep 2017 15:38:30 +0200 Subject: [PATCH 1373/2699] Rename config variable to match expectation Change-Id: I0d611ca69268357650c651677184288225f3ec8b Closes-Bug: #1716682 --- ceph-mon/hooks/ceph_hooks.py | 7 ++++-- ceph-mon/unit_tests/test_ceph_hooks.py | 33 +++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 93eed209..42a9a21b 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -151,6 +151,9 @@ def get_ceph_context(): cephcontext['public_addr'] = get_public_addr() cephcontext['cluster_addr'] = get_cluster_addr() + if config('default-rbd-features'): + cephcontext['rbd_features'] = config('default-rbd-features') + # NOTE(dosaboy): these sections must correspond to what is supported in the # config template. sections = ['global', 'mds', 'mon'] @@ -496,8 +499,8 @@ def client_relation_joined(relid=None): data = {'key': ceph.get_named_key(service_name), 'auth': config('auth-supported'), 'ceph-public-address': public_addr} - if config('rbd-features'): - data['rbd_features'] = config('rbd-features') + if config('default-rbd-features'): + data['rbd-features'] = config('default-rbd-features') relation_set(relation_id=relid, relation_settings=data) else: diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 96f5b7b5..44288f94 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -23,7 +23,8 @@ 'use-direct-io': True, 'osd-format': 'ext4', 'monitor-hosts': '', - 'prefer-ipv6': False} + 'prefer-ipv6': False, + 'default-rbd-features': None} class CephHooksTestCase(unittest.TestCase): @@ -57,6 +58,36 @@ def test_get_ceph_context(self, mock_config, mock_config2): 'use_syslog': 'true'} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") + @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") + @patch.object(ceph_hooks, 'cmp_pkgrevno', + lambda pkg, ver: -1 if ver == '12.1.0' else 1) + @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', + '10.0.0.2']) + @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph_hooks, 'leader_get', lambda *args: '1234') + @patch.object(ceph, 'config') + @patch.object(ceph_hooks, 'config') + def test_get_ceph_context_rbd_features(self, mock_config, mock_config2): + config = copy.deepcopy(CHARM_CONFIG) + config['default-rbd-features'] = 1 + mock_config.side_effect = lambda key: config[key] + mock_config2.side_effect = lambda key: config[key] + ctxt = ceph_hooks.get_ceph_context() + expected = {'auth_supported': False, + 'ceph_cluster_network': '', + 'ceph_public_network': '', + 'cluster_addr': '10.1.0.1', + 'dio': 'true', + 'fsid': '1234', + 'loglevel': 1, + 'mon_hosts': '10.0.0.1 10.0.0.2', + 'old_auth': False, + 'public_addr': '10.0.0.1', + 'use_syslog': 'true', + 'rbd_features': 1} + self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) From 9f52ef7a53e087bd838874ee64cc79720e9000cf Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 14 Sep 2017 10:39:52 -0600 Subject: [PATCH 1374/2699] Drop configuration for global keyring Drop explicit global configuration of keyring, supporting installation of the ceph/ceph-mon/ceph-osd charms in the same machine. Change-Id: Ib4afd01fbcc4478ce90de5bd464b7829ecc5da7e Closes-Bug: 1681750 --- ceph-osd/templates/ceph.conf | 2 +- ceph-osd/tests/basic_deployment.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index fb45e36a..1da9f5da 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -6,7 +6,7 @@ auth cluster required = {{ auth_supported }} auth service required = {{ auth_supported }} auth client required = {{ auth_supported }} {%- endif %} -keyring = /etc/ceph/$cluster.$name.keyring + mon host = {{ mon_hosts }} fsid = {{ fsid }} diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 8a90cf18..43c46ef4 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -327,7 +327,6 @@ def test_300_ceph_osd_config(self): 'auth cluster required': 'none', 'auth service required': 'none', 'auth client required': 'none', - 'keyring': '/etc/ceph/$cluster.$name.keyring', 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 'log to syslog': 'false', 'err to syslog': 'false', From a392d97772de94fa703e5ea51b8c9898ec3c7abb Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 14 Sep 2017 10:38:29 -0600 Subject: [PATCH 1375/2699] Drop configuration for global keyring Drop explicit global configuration of keyring, supporting installation of the ceph/ceph-mon/ceph-osd charms in the same machine. Change-Id: I87ae9d5863f8e9489a4ebf8ceee9412c2ba68657 Closes-Bug: 1681750 --- ceph-mon/templates/ceph.conf | 2 +- ceph-mon/tests/basic_deployment.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 25be0167..b19cde13 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -6,7 +6,7 @@ auth cluster required = {{ auth_supported }} auth service required = {{ auth_supported }} auth client required = {{ auth_supported }} {%- endif %} -keyring = /etc/ceph/$cluster.$name.keyring + mon host = {{ mon_hosts }} fsid = {{ fsid }} diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index d8ff9bb6..ced14ea9 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -349,7 +349,6 @@ def test_300_ceph_config(self): conf = '/etc/ceph/ceph.conf' expected = { 'global': { - 'keyring': '/etc/ceph/$cluster.$name.keyring', 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 'log to syslog': 'false', 'err to syslog': 'false', From 81f30a791d829f5f3c32aed8b68d3c23080159e5 Mon Sep 17 00:00:00 2001 From: Xav Paice Date: Wed, 3 May 2017 16:04:01 +1200 Subject: [PATCH 1376/2699] Add option for OSD initial weight In small clusters, adding OSDs at their full weight causes massive IO workload which makes performance unacceptable. This adds a config option to change the initial weight, we can set it to 0 or something small for clusters that would be affected. Closes-Bug: 1716783 Change-Id: Idadfd565fbda9ffc3952de73c5c58a0dc1dc69c9 --- ceph-osd/config.yaml | 12 ++++++++++++ ceph-osd/hooks/ceph_hooks.py | 1 + ceph-osd/templates/ceph.conf | 3 +++ ceph-osd/unit_tests/test_ceph_hooks.py | 9 ++++++++- 4 files changed, 24 insertions(+), 1 deletion(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index c8866ece..aa716d22 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -123,6 +123,18 @@ options: . Specifying this option on a running Ceph OSD node will have no effect until new disks are added, at which point new disks will be encrypted. + crush-initial-weight: + type: float + default: + description: | + The initial crush weight for newly added osds into crushmap. Use this + option only if you wish to set the weight for newly added OSDs in order + to gradually increase the weight over time. Be very aware that setting + this overrides the default setting, which can lead to imbalance in the + cluster, especially if there are OSDs of different sizes in use. By + default, the initial crush weight for the newly added osd is set to its + volume size in TB. Leave this option unset to use the default provided + by Ceph itself. This option only affects NEW OSDs, not existing ones. ignore-device-errors: type: boolean default: False diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index add17ffc..b4ad3e26 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -242,6 +242,7 @@ def get_ceph_context(upgrading=False): 'mon_hosts': ' '.join(mon_hosts), 'fsid': get_fsid(), 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, + 'crush_initial_weight': config('crush-initial-weight'), 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': public_network, diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index fb45e36a..fc31ba8d 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -33,6 +33,9 @@ osd crush location = {{crush_location}} {%- if upgrade_in_progress %} setuser match path = /var/lib/ceph/$type/$cluster-$id {%- endif %} +{%- if crush_initial_weight %} +osd crush initial weight = {{ crush_initial_weight }} +{%- endif %} {% if global -%} # The following are user-provided options provided via the config-flags charm option. # User-provided [global] section config diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index c3b95e2d..874603cf 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -29,7 +29,8 @@ 'osd-format': 'ext4', 'prefer-ipv6': False, 'customize-failure-domain': False, - 'bluestore': False} + 'bluestore': False, + 'crush-initial-weight': '0'} class CephHooksTestCase(unittest.TestCase): @@ -60,6 +61,7 @@ def test_get_ceph_context(self, mock_config, mock_config2): 'loglevel': 1, 'mon_hosts': '10.0.0.1 10.0.0.2', 'old_auth': False, + 'crush_initial_weight': '0', 'osd_journal_size': 1024, 'public_addr': '10.0.0.1', 'short_object_len': True, @@ -94,6 +96,7 @@ def test_get_ceph_context_filestore_old(self, mock_config, mock_config2): 'loglevel': 1, 'mon_hosts': '10.0.0.1 10.0.0.2', 'old_auth': False, + 'crush_initial_weight': '0', 'osd_journal_size': 1024, 'public_addr': '10.0.0.1', 'short_object_len': True, @@ -128,6 +131,7 @@ def test_get_ceph_context_bluestore(self, mock_config, mock_config2): 'loglevel': 1, 'mon_hosts': '10.0.0.1 10.0.0.2', 'old_auth': False, + 'crush_initial_weight': '0', 'osd_journal_size': 1024, 'public_addr': '10.0.0.1', 'short_object_len': True, @@ -163,6 +167,7 @@ def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2): 'loglevel': 1, 'mon_hosts': '10.0.0.1 10.0.0.2', 'old_auth': False, + 'crush_initial_weight': '0', 'osd_journal_size': 1024, 'public_addr': '10.0.0.1', 'short_object_len': True, @@ -198,6 +203,7 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): 'mon_hosts': '10.0.0.1 10.0.0.2', 'old_auth': False, 'osd': {'osd max write size': 1024}, + 'crush_initial_weight': '0', 'osd_journal_size': 1024, 'public_addr': '10.0.0.1', 'short_object_len': True, @@ -235,6 +241,7 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'mon_hosts': '10.0.0.1 10.0.0.2', 'old_auth': False, 'osd': {'osd max write size': 1024}, + 'crush_initial_weight': '0', 'osd_journal_size': 1024, 'public_addr': '10.0.0.1', 'short_object_len': True, From a4856363b99386fc5c2ba5b01cf201951df3104e Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 25 Sep 2017 11:20:26 +0100 Subject: [PATCH 1377/2699] Handle missing PKI certs for Keystone v3 deploys OpenStack Pike drops PKI support, and the keystone charm no longer configures PKI signing certs for revocation lists@Pike. Previous changes fixed issues with Keystone v2 based deployments@Pike; however the Keystone v3 retrieval code did not inspect the status code on the requests base response during certificate retrieval. Ensure that a OK status code is returned from Keystone for v3 deploys, ensuring that Pike based v3 deployments continue to function. Change-Id: I603115a8e298aa8dedbdcea195b27bb8a6c0c71e Closes-Bug: 1718467 --- ceph-radosgw/hooks/utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 9c2b0e2f..ccb839b6 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -389,7 +389,11 @@ def get_ks_cert(ksclient, auth_endpoint, cert_type): "'{}'".format(cert_type)) except AttributeError: # Keystone v3 or Juno and older - cert = requests.request('GET', request).text + response = requests.request('GET', request) + if response.status_code == requests.codes.ok: + cert = response.text + else: + raise KSCertSetupException("Unable to retrieve certificate") except (ConnectionRefused, requests.exceptions.ConnectionError, Forbidden, InternalServerError): raise KSCertSetupException("Error connecting to keystone") From 20d9df691326a7cb6b615e017d4d607f7d3ec1d2 Mon Sep 17 00:00:00 2001 From: Dmitrii Shcherbakov Date: Mon, 25 Sep 2017 21:33:20 +0300 Subject: [PATCH 1378/2699] sync changes from charms.ceph Change-Id: I8a83e15a77306cca4baa665c31cb9363f7cbde83 Depends-On: I483ee9dae4ce69c71ae06359d0fb96aaa1c56cbc --- ceph-osd/lib/ceph/utils.py | 44 +++++++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 12265498..0fd3e801 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -51,6 +51,8 @@ DEBUG, ERROR, WARNING, + storage_get, + storage_list, ) from charmhelpers.fetch import ( apt_cache, @@ -1353,12 +1355,38 @@ def get_partitions(dev): return [] -def find_least_used_journal(journal_devices): - usages = map(lambda a: (len(get_partitions(a)), a), journal_devices) +def find_least_used_utility_device(utility_devices): + """ + Find a utility device which has the smallest number of partitions + among other devices in the supplied list. + + :utility_devices: A list of devices to be used for filestore journal + or bluestore wal or db. + :return: string device name + """ + + usages = map(lambda a: (len(get_partitions(a)), a), utility_devices) least = min(usages, key=lambda t: t[0]) return least[1] +def get_devices(name): + """ Merge config and juju storage based devices + + :name: THe name of the device type, eg: wal, osd, journal + :returns: Set(device names), which are strings + """ + if config(name): + devices = [l.strip() for l in config(name).split(' ')] + else: + devices = [] + storage_ids = storage_list(name) + devices.extend((storage_get('location', s) for s in storage_ids)) + devices = filter(os.path.exists, devices) + + return set(devices) + + def osdize(dev, osd_format, osd_journal, reformat_osd=False, ignore_errors=False, encrypt=False, bluestore=False): if dev.startswith('/dev'): @@ -1405,13 +1433,23 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, # NOTE(jamespage): enable experimental bluestore support if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: cmd.append('--bluestore') + wal = get_devices('bluestore-wal') + if wal: + cmd.append('--block.wal') + least_used_wal = find_least_used_utility_device(wal) + cmd.append(least_used_wal) + db = get_devices('bluestore-db') + if db: + cmd.append('--block.db') + least_used_db = find_least_used_utility_device(db) + cmd.append(least_used_db) elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: cmd.append('--filestore') cmd.append(dev) if osd_journal: - least_used = find_least_used_journal(osd_journal) + least_used = find_least_used_utility_device(osd_journal) cmd.append(least_used) else: # Just provide the device - no other options From 23a72a529cb22592b88c3ac11f5badc3b4ff7af2 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 25 Sep 2017 18:26:02 +0200 Subject: [PATCH 1379/2699] Update repo to do ch-sync from Git Change-Id: Ib1577abe79449d5e085dc29021ce1aaefe0cc3e7 --- ceph-mon/Makefile | 4 ++-- ceph-mon/charm-helpers-hooks.yaml | 2 +- ceph-mon/charm-helpers-tests.yaml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index d4b8b8d1..5fdba5df 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -14,8 +14,8 @@ functional_test: bin/charm_helpers_sync.py: @mkdir -p bin - @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ - > bin/charm_helpers_sync.py + @curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py + bin/git_sync.py: @mkdir -p bin diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index e6d709ac..75ab3b33 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +repo: https://github.com/juju/charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/charm-helpers-tests.yaml b/ceph-mon/charm-helpers-tests.yaml index b0de9df6..f64f0dde 100644 --- a/ceph-mon/charm-helpers-tests.yaml +++ b/ceph-mon/charm-helpers-tests.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +repo: https://github.com/juju/charm-helpers destination: tests/charmhelpers include: - contrib.amulet From 6c3f211e4ebd8ef0c7526a5f1789823425d45261 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 25 Sep 2017 18:26:02 +0200 Subject: [PATCH 1380/2699] Update repo to do ch-sync from Git Change-Id: I7bd5b62d3bfdd11334b3a8ce25692c1343a5c515 --- ceph-osd/Makefile | 4 ++-- ceph-osd/charm-helpers-hooks.yaml | 2 +- ceph-osd/charm-helpers-tests.yaml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index 7f6d3fd0..7609385a 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -14,8 +14,8 @@ functional_test: bin/charm_helpers_sync.py: @mkdir -p bin - @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ - > bin/charm_helpers_sync.py + @curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py + bin/git_sync.py: @mkdir -p bin diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index 63321ed2..ab7e3bad 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +repo: https://github.com/juju/charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-osd/charm-helpers-tests.yaml b/ceph-osd/charm-helpers-tests.yaml index b0de9df6..f64f0dde 100644 --- a/ceph-osd/charm-helpers-tests.yaml +++ b/ceph-osd/charm-helpers-tests.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +repo: https://github.com/juju/charm-helpers destination: tests/charmhelpers include: - contrib.amulet From 36252d359559681f99e6234dd3e4108e5b672a52 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 25 Sep 2017 18:26:02 +0200 Subject: [PATCH 1381/2699] Update repo to do ch-sync from Git Change-Id: Ie01f45ef28ed5c07a3d18fa86f7185b4e1727923 --- ceph-radosgw/Makefile | 3 +-- ceph-radosgw/charm-helpers-hooks.yaml | 2 +- ceph-radosgw/charm-helpers-tests.yaml | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index ec26c512..c772127d 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -14,8 +14,7 @@ functional_test: bin/charm_helpers_sync.py: @mkdir -p bin - @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ - > bin/charm_helpers_sync.py + @curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py sync: bin/charm_helpers_sync.py @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml diff --git a/ceph-radosgw/charm-helpers-hooks.yaml b/ceph-radosgw/charm-helpers-hooks.yaml index 0f259d6a..370de9ae 100644 --- a/ceph-radosgw/charm-helpers-hooks.yaml +++ b/ceph-radosgw/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +repo: https://github.com/juju/charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-radosgw/charm-helpers-tests.yaml b/ceph-radosgw/charm-helpers-tests.yaml index 7e150c19..3a8c294e 100644 --- a/ceph-radosgw/charm-helpers-tests.yaml +++ b/ceph-radosgw/charm-helpers-tests.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +repo: https://github.com/juju/charm-helpers destination: tests/charmhelpers include: - core From 7a1e370e7d5d9e589df75898f31c6516d1fc6a6f Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 25 Sep 2017 18:26:02 +0200 Subject: [PATCH 1382/2699] Update repo to do ch-sync from Git Change-Id: I59511ae8fa4495e3bf14052777293373436e5c67 --- ceph-proxy/Makefile | 3 +-- ceph-proxy/charm-helpers-hooks.yaml | 2 +- ceph-proxy/charm-helpers-tests.yaml | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index f63dfd0d..89f4eb1f 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -14,8 +14,7 @@ functional_test: bin/charm_helpers_sync.py: @mkdir -p bin - @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ - > bin/charm_helpers_sync.py + @curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index 14aa3e0d..e4767c51 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +repo: https://github.com/juju/charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-proxy/charm-helpers-tests.yaml b/ceph-proxy/charm-helpers-tests.yaml index b0de9df6..f64f0dde 100644 --- a/ceph-proxy/charm-helpers-tests.yaml +++ b/ceph-proxy/charm-helpers-tests.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +repo: https://github.com/juju/charm-helpers destination: tests/charmhelpers include: - contrib.amulet From 89d00bf5e7f7dde18aad42df4a9234076a7d257b Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 26 Sep 2017 09:43:48 -0400 Subject: [PATCH 1383/2699] Update from charms.ceph This change attempts to bandaid a bug with occaisonal failures to authenticate with a ceph quorum Change-Id: Ib6109c6201486985be12778240b02c2a0e6e4fd4 Partial-Bug: #1719436 Depends-On: I1b44f87522a283c9e6d06064687a2330ea23e354 --- ceph-mon/lib/ceph/utils.py | 114 +++++++++++++++++++++++++++---------- 1 file changed, 83 insertions(+), 31 deletions(-) diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 12265498..343a759a 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -30,6 +30,7 @@ from charmhelpers.core import hookenv from charmhelpers.core import templating +from charmhelpers.core.decorators import retry_on_exception from charmhelpers.core.host import ( chownr, cmp_pkgrevno, @@ -51,6 +52,8 @@ DEBUG, ERROR, WARNING, + storage_get, + storage_list, ) from charmhelpers.fetch import ( apt_cache, @@ -1285,40 +1288,53 @@ def bootstrap_monitor_cluster(secret): mkdir(path, owner=ceph_user(), group=ceph_user()) # end changes for Ceph >= 0.61.3 try: - subprocess.check_call(['ceph-authtool', keyring, - '--create-keyring', '--name=mon.', - '--add-key={}'.format(secret), - '--cap', 'mon', 'allow *']) - - subprocess.check_call(['ceph-mon', '--mkfs', - '-i', hostname, - '--keyring', keyring]) - chownr(path, ceph_user(), ceph_user()) - with open(done, 'w'): - pass - with open(init_marker, 'w'): - pass - - if systemd(): - subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) - service_restart('ceph-mon') - else: - service_restart('ceph-mon-all') - - if cmp_pkgrevno('ceph', '12.0.0') >= 0: - # NOTE(jamespage): Later ceph releases require explicit - # call to ceph-create-keys to setup the - # admin keys for the cluster; this command - # will wait for quorum in the cluster before - # returning. - cmd = ['ceph-create-keys', '--id', hostname] - subprocess.check_call(cmd) + add_keyring_to_ceph(keyring, + secret, + hostname, + path, + done, + init_marker) + except: raise finally: os.unlink(keyring) +@retry_on_exception(3, base_delay=5) +def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring', '--name=mon.', + '--add-key={}'.format(secret), + '--cap', 'mon', 'allow *']) + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + chownr(path, ceph_user(), ceph_user()) + with open(done, 'w'): + pass + with open(init_marker, 'w'): + pass + + if systemd(): + subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) + service_restart('ceph-mon') + else: + service_restart('ceph-mon-all') + + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + # NOTE(jamespage): Later ceph releases require explicit + # call to ceph-create-keys to setup the + # admin keys for the cluster; this command + # will wait for quorum in the cluster before + # returning. + cmd = ['ceph-create-keys', '--id', hostname] + subprocess.check_call(cmd) + osstat = os.stat("/etc/ceph/ceph.client.admin.keyring") + if not osstat.st_size: + raise Exception + + def update_monfs(): hostname = socket.gethostname() monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) @@ -1353,12 +1369,38 @@ def get_partitions(dev): return [] -def find_least_used_journal(journal_devices): - usages = map(lambda a: (len(get_partitions(a)), a), journal_devices) +def find_least_used_utility_device(utility_devices): + """ + Find a utility device which has the smallest number of partitions + among other devices in the supplied list. + + :utility_devices: A list of devices to be used for filestore journal + or bluestore wal or db. + :return: string device name + """ + + usages = map(lambda a: (len(get_partitions(a)), a), utility_devices) least = min(usages, key=lambda t: t[0]) return least[1] +def get_devices(name): + """ Merge config and juju storage based devices + + :name: THe name of the device type, eg: wal, osd, journal + :returns: Set(device names), which are strings + """ + if config(name): + devices = [l.strip() for l in config(name).split(' ')] + else: + devices = [] + storage_ids = storage_list(name) + devices.extend((storage_get('location', s) for s in storage_ids)) + devices = filter(os.path.exists, devices) + + return set(devices) + + def osdize(dev, osd_format, osd_journal, reformat_osd=False, ignore_errors=False, encrypt=False, bluestore=False): if dev.startswith('/dev'): @@ -1405,13 +1447,23 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, # NOTE(jamespage): enable experimental bluestore support if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: cmd.append('--bluestore') + wal = get_devices('bluestore-wal') + if wal: + cmd.append('--block.wal') + least_used_wal = find_least_used_utility_device(wal) + cmd.append(least_used_wal) + db = get_devices('bluestore-db') + if db: + cmd.append('--block.db') + least_used_db = find_least_used_utility_device(db) + cmd.append(least_used_db) elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: cmd.append('--filestore') cmd.append(dev) if osd_journal: - least_used = find_least_used_journal(osd_journal) + least_used = find_least_used_utility_device(osd_journal) cmd.append(least_used) else: # Just provide the device - no other options From 61ab5694c650f0373881e3781aef84ee8760646e Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 26 Sep 2017 11:38:58 -0400 Subject: [PATCH 1384/2699] Add Artful dev series metadata Change-Id: I9da2e7d7981c9f4cf6f48b9a42a1b8e7cf2ee07e --- ceph-proxy/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index cf682d52..bd159d09 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -11,6 +11,7 @@ tags: - misc series: - xenial + - artful - zesty - trusty extra-bindings: From 0fb01336edc815b2840674e8a4baf63b5aa2d1fa Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 26 Sep 2017 11:38:46 -0400 Subject: [PATCH 1385/2699] Add Artful dev series metadata Change-Id: Ie3777a7e028eeef59fdd354329371fc3237e64ae --- ceph-mon/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 82f9d191..32139968 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -11,6 +11,7 @@ tags: - misc series: - xenial + - artful - zesty - trusty peers: From 7cb23fc88fdff1ae671648986a4351d7c267e058 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 26 Sep 2017 11:39:05 -0400 Subject: [PATCH 1386/2699] Add Artful dev series metadata Change-Id: I83085110c427702bbe23174a671a4cec92146f8c --- ceph-radosgw/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 4c3737b8..95356f1c 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -14,6 +14,7 @@ tags: - misc series: - xenial + - artful - zesty - trusty extra-bindings: From 811c534290caadf2bf592a75f686860bcb88289a Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 26 Sep 2017 11:38:52 -0400 Subject: [PATCH 1387/2699] Add Artful dev series metadata Change-Id: I8b66ca368dcf6ec55d4c558e96f156c4859bad63 --- ceph-osd/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 8c61f070..c709dbbb 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -12,6 +12,7 @@ tags: - misc series: - xenial + - artful - zesty - trusty description: | From 179cfeda92d744965ceb9f148dd48923ef4939fb Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 27 Sep 2017 08:40:57 -0400 Subject: [PATCH 1388/2699] Remove keyring from expected ceph.conf The global keyring was removed from ceph-mon configuration to better support colocating ceph-mon and ceph-osd. Change-Id: Ic1ce5f7fa4f0ba7f36b96b3204634b010d1346d3 Closes-Bug: #1719869 --- ceph-fs/src/tests/basic_deployment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-fs/src/tests/basic_deployment.py b/ceph-fs/src/tests/basic_deployment.py index ec2b1ce0..2a1c2e8f 100644 --- a/ceph-fs/src/tests/basic_deployment.py +++ b/ceph-fs/src/tests/basic_deployment.py @@ -183,7 +183,6 @@ def test_300_ceph_config(self): conf = '/etc/ceph/ceph.conf' expected = { 'global': { - 'keyring': '/etc/ceph/$cluster.$name.keyring', 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 'log to syslog': 'false', 'err to syslog': 'false', From 4a3d0c125e4222a0acc3e529ff49a665991570de Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 26 Sep 2017 11:38:39 -0400 Subject: [PATCH 1389/2699] Add Artful dev series metadata Change-Id: I136250a5987f42c86f858389aa426d4635545992 --- ceph-fs/src/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index bb9ed281..8d1b80d0 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -11,6 +11,7 @@ tags: - misc series: - xenial + - artful - zesty subordinate: false requires: From 59874a753473c47fab76865e695c962bfa022771 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 28 Sep 2017 03:52:49 +0000 Subject: [PATCH 1390/2699] Update amulet test definitions Change-Id: Iacd3ad7f0aa02b59288c045817672fcb387e6da4 --- ceph-proxy/tests/gate-basic-artful-pike | 9 +++++++++ ceph-proxy/tests/gate-basic-xenial-pike | 11 +++++++++++ 2 files changed, 20 insertions(+) create mode 100755 ceph-proxy/tests/gate-basic-artful-pike create mode 100755 ceph-proxy/tests/gate-basic-xenial-pike diff --git a/ceph-proxy/tests/gate-basic-artful-pike b/ceph-proxy/tests/gate-basic-artful-pike new file mode 100755 index 00000000..5815e9dd --- /dev/null +++ b/ceph-proxy/tests/gate-basic-artful-pike @@ -0,0 +1,9 @@ +#!/usr/bin/env python + +"""Amulet tests on a basic ceph deployment on artful-pike.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='artful') + deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-xenial-pike b/ceph-proxy/tests/gate-basic-xenial-pike new file mode 100755 index 00000000..8f4410fd --- /dev/null +++ b/ceph-proxy/tests/gate-basic-xenial-pike @@ -0,0 +1,11 @@ +#!/usr/bin/env python + +"""Amulet tests on a basic ceph deployment on xenial-pike.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='xenial', + openstack='cloud:xenial-pike', + source='cloud:xenial-updates/pike') + deployment.run_tests() From 3bba9cde0504aa04beb82a8f8eb8ef3793c28f73 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Thu, 14 Sep 2017 14:09:43 -0700 Subject: [PATCH 1391/2699] Add ceph-bootstrap relation for ceph charm migration This commit adds the no-bootstrap config option and a new relation for sharing existing monitor information with the ceph-mon charm (e.g. the fsid and monitor-secret). Change-Id: Iced246b79572142df5608bf731b6b2759ea81fd0 Implements-Blueprint: charm-ceph-migration --- ceph-mon/config.yaml | 9 ++ .../hooks/bootstrap-source-relation-changed | 1 + .../hooks/bootstrap-source-relation-departed | 1 + ceph-mon/hooks/ceph_hooks.py | 132 +++++++++++++--- ceph-mon/hooks/leader-settings-changed | 1 + ceph-mon/metadata.yaml | 3 + ceph-mon/unit_tests/test_ceph_hooks.py | 142 +++++++++++++++++- ceph-mon/unit_tests/test_status.py | 8 + ceph-mon/unit_tests/test_utils.py | 32 +++- 9 files changed, 302 insertions(+), 27 deletions(-) create mode 120000 ceph-mon/hooks/bootstrap-source-relation-changed create mode 120000 ceph-mon/hooks/bootstrap-source-relation-departed create mode 120000 ceph-mon/hooks/leader-settings-changed diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 8156bf1c..39688be6 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -211,3 +211,12 @@ options: . This needs to be set to 1 when deploying a cloud with the nova-lxd hypervisor. + no-bootstrap: + type: boolean + default: False + description: | + Causes the charm to not do any of the initial bootstrapping of the + Ceph monitor cluster. This is only intended to be used when migrating + from the ceph all-in-one charm to a ceph-mon / ceph-osd deployment. + Refer to the Charm Deployment guide at https://docs.openstack.org/charm-deployment-guide/latest/ + for more information. diff --git a/ceph-mon/hooks/bootstrap-source-relation-changed b/ceph-mon/hooks/bootstrap-source-relation-changed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/bootstrap-source-relation-changed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/bootstrap-source-relation-departed b/ceph-mon/hooks/bootstrap-source-relation-departed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/bootstrap-source-relation-departed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 42a9a21b..9de2cd75 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -178,6 +178,9 @@ def emit_cephconf(): @hooks.hook('config-changed') @harden() def config_changed(): + # Get the cfg object so we can see if the no-bootstrap value has changed + # and triggered this hook invocation + cfg = config() if config('prefer-ipv6'): assert_charm_supports_ipv6() @@ -192,24 +195,32 @@ def config_changed(): update_nrpe_config() if is_leader(): - if not leader_get('fsid') or not leader_get('monitor-secret'): - if config('fsid'): - fsid = config('fsid') - else: - fsid = "{}".format(uuid.uuid1()) - if config('monitor-secret'): - mon_secret = config('monitor-secret') - else: - mon_secret = "{}".format(ceph.generate_monitor_secret()) - status_set('maintenance', 'Creating FSID and Monitor Secret') - opts = { - 'fsid': fsid, - 'monitor-secret': mon_secret, - } - log("Settings for the cluster are: {}".format(opts)) - leader_set(opts) - else: - if leader_get('fsid') is None or leader_get('monitor-secret') is None: + if not config('no-bootstrap'): + if not leader_get('fsid') or not leader_get('monitor-secret'): + if config('fsid'): + fsid = config('fsid') + else: + fsid = "{}".format(uuid.uuid1()) + if config('monitor-secret'): + mon_secret = config('monitor-secret') + else: + mon_secret = "{}".format(ceph.generate_monitor_secret()) + status_set('maintenance', 'Creating FSID and Monitor Secret') + opts = { + 'fsid': fsid, + 'monitor-secret': mon_secret, + } + log("Settings for the cluster are: {}".format(opts)) + leader_set(opts) + elif cfg.changed('no-bootstrap') and \ + is_relation_made('bootstrap-source'): + # User changed the no-bootstrap config option, we're the leader, + # and the bootstrap-source relation has been made. The charm should + # be in a blocked state indicating that the no-bootstrap option + # must be set. This block is invoked when the user is trying to + # get out of that scenario by enabling no-bootstrap. + bootstrap_source_relation_changed() + elif leader_get('fsid') is None or leader_get('monitor-secret') is None: log('still waiting for leader to setup keys') status_set('waiting', 'Waiting for leader to setup keys') sys.exit(0) @@ -232,7 +243,11 @@ def get_mon_hosts(): addr = get_public_addr() hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) - for relid in relation_ids('mon'): + rel_ids = relation_ids('mon') + if config('no-bootstrap'): + rel_ids += relation_ids('bootstrap-source') + + for relid in rel_ids: for unit in related_units(relid): addr = relation_get('ceph-public-address', unit, relid) if addr is not None: @@ -265,8 +280,70 @@ def mon_relation_joined(): relation_settings={'ceph-public-address': public_addr}) +@hooks.hook('bootstrap-source-relation-changed') +def bootstrap_source_relation_changed(): + """Handles relation data changes on the bootstrap-source relation. + + The bootstrap-source relation to share remote bootstrap information with + the ceph-mon charm. This relation is used to exchange the remote + ceph-public-addresses which are used for the mon's, the fsid, and the + monitor-secret. + """ + if not config('no-bootstrap'): + status_set('blocked', 'Cannot join the bootstrap-source relation when ' + 'no-bootstrap is False') + return + + if not is_leader(): + log('Deferring leader-setting updates to the leader unit') + return + + curr_fsid = leader_get('fsid') + curr_secret = leader_get('monitor-secret') + for relid in relation_ids('bootstrap-source'): + for unit in related_units(relid=relid): + mon_secret = relation_get('monitor-secret', unit, relid) + fsid = relation_get('fsid', unit, relid) + + if not (mon_secret and fsid): + log('Relation data is not ready as the fsid or the ' + 'monitor-secret are missing from the relation: ' + 'mon_secret = %s and fsid = %s ' % (mon_secret, fsid)) + continue + + if not (curr_fsid or curr_secret): + curr_fsid = fsid + curr_secret = mon_secret + else: + # The fsids and secrets need to match or the local monitors + # will fail to join the mon cluster. If they don't, + # bail because something needs to be investigated. + assert curr_fsid == fsid, \ + "bootstrap fsid '%s' != current fsid '%s'" % ( + fsid, curr_fsid) + assert curr_secret == mon_secret, \ + "bootstrap secret '%s' != current secret '%s'" % ( + mon_secret, curr_secret) + + opts = { + 'fsid': fsid, + 'monitor-secret': mon_secret, + } + + log('Updating leader settings for fsid and monitor-secret ' + 'from remote relation data: %s' % opts) + leader_set(opts) + + # The leader unit needs to bootstrap itself as it won't receive the + # leader-settings-changed hook elsewhere. + if curr_fsid: + mon_relation() + + @hooks.hook('mon-relation-departed', - 'mon-relation-changed') + 'mon-relation-changed', + 'leader-settings-changed', + 'bootstrap-source-relation-departed') def mon_relation(): if leader_get('monitor-secret') is None: log('still waiting for leader to setup keys') @@ -320,7 +397,8 @@ def mon_relation(): def notify_osds(): for relid in relation_ids('osd'): - osd_relation(relid) + for unit in related_units(relid): + osd_relation(relid=relid, unit=unit) def notify_radosgws(): @@ -341,7 +419,7 @@ def notify_client(): @hooks.hook('osd-relation-joined') @hooks.hook('osd-relation-changed') -def osd_relation(relid=None): +def osd_relation(relid=None, unit=None): if ceph.is_quorum(): log('mon cluster in quorum - providing fsid & keys') public_addr = get_public_addr() @@ -354,7 +432,7 @@ def osd_relation(relid=None): caps=ceph.osd_upgrade_caps), } - unit = remote_unit() + unit = unit or remote_unit() settings = relation_get(rid=relid, unit=unit) """Process broker request(s).""" if 'broker_req' in settings: @@ -590,6 +668,14 @@ def update_nrpe_config(): def assess_status(): '''Assess status of current unit''' application_version_set(get_upstream_version(VERSION_PACKAGE)) + + # Check that the no-bootstrap config option is set in conjunction with + # having the bootstrap-source relation established + if not config('no-bootstrap') and is_relation_made('bootstrap-source'): + status_set('blocked', 'Cannot join the bootstrap-source relation when ' + 'no-bootstrap is False') + return + moncount = int(config('monitor-count')) units = get_peer_units() # not enough peers and mon_count > 1 diff --git a/ceph-mon/hooks/leader-settings-changed b/ceph-mon/hooks/leader-settings-changed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/leader-settings-changed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 82f9d191..bf379593 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -36,3 +36,6 @@ provides: nrpe-external-master: interface: nrpe-external-master scope: container +requires: + bootstrap-source: + interface: ceph-bootstrap diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 44288f94..ef7a0786 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -11,8 +11,28 @@ mock_apt.apt_pkg = MagicMock() import charmhelpers.contrib.storage.linux.ceph as ceph -import ceph_hooks +import test_utils +with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + import ceph_hooks + + +TO_PATCH = [ + 'config', + 'is_leader', + 'is_relation_made', + 'leader_get', + 'leader_set', + 'log', + 'mon_relation', + 'relation_ids', + 'related_units', + 'relation_get', + 'relations_of_type', + 'status_set', +] CHARM_CONFIG = {'config-flags': '', 'auth-supported': False, @@ -193,7 +213,7 @@ def setUp(self): @patch.object(ceph_hooks, 'relation_ids') @patch.object(ceph_hooks, 'related_units') - def test_related_ods_single_relation(self, + def test_related_osd_single_relation(self, related_units, relation_ids): relation_ids.return_value = ['osd:0'] @@ -205,7 +225,7 @@ def test_related_ods_single_relation(self, @patch.object(ceph_hooks, 'relation_ids') @patch.object(ceph_hooks, 'related_units') - def test_related_ods_multi_relation(self, + def test_related_osd_multi_relation(self, related_units, relation_ids): relation_ids.return_value = ['osd:0', 'osd:23'] @@ -218,3 +238,119 @@ def test_related_ods_multi_relation(self, call('osd:0'), call('osd:23') ]) + + +class BootstrapSourceTestCase(test_utils.CharmTestCase): + + def setUp(self): + super(BootstrapSourceTestCase, self).setUp(ceph_hooks, TO_PATCH) + self.config.side_effect = self.test_config.get + self.leader_get.side_effect = self.test_leader_settings.get + self.leader_set.side_effect = self.test_leader_settings.set + self.relation_get.side_effect = self.test_relation.get + self.test_config.set('no-bootstrap', True) + self.is_leader.return_value = True + self.relation_ids.return_value = ['bootstrap-source:0'] + self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] + + def test_bootstrap_source_no_bootstrap(self): + """Ensure the config option of no-bootstrap is set to continue""" + self.test_config.set('no-bootstrap', False) + ceph_hooks.bootstrap_source_relation_changed() + self.status_set.assert_called_once_with('blocked', + 'Cannot join the ' + 'bootstrap-source relation ' + 'when no-bootstrap is False') + + def test_bootstrap_source_not_leader(self): + """Ensure the processing is deferred to the leader""" + self.is_leader.return_value = False + ceph_hooks.bootstrap_source_relation_changed() + self.assertEqual(self.leader_set.call_count, 0) + + def test_bootstrap_source_relation_data_not_ready(self): + """Ensures no bootstrapping done if relation data not present""" + ceph_hooks.bootstrap_source_relation_changed() + expected_calls = [] + relid = 'bootstrap-source:0' + for unit in ('ceph/0', 'ceph/1', 'ceph/2'): + expected_calls.append(call('monitor-secret', unit, relid)) + expected_calls.append(call('fsid', unit, relid)) + self.relation_get.has_calls(expected_calls) + self.assertEqual(self.leader_set.call_count, 0) + self.assertEqual(self.mon_relation.call_count, 0) + + def test_bootstrap_source_good_path(self): + """Tests the good path where all is setup and relations established""" + self.test_relation.set({'monitor-secret': 'abcd', + 'fsid': '1234'}) + ceph_hooks.bootstrap_source_relation_changed() + self.leader_set.assert_called_with({'fsid': '1234', + 'monitor-secret': 'abcd'}) + self.mon_relation.assert_called_once_with() + + def test_bootstrap_source_different_fsid_secret(self): + """Tests where the bootstrap relation has a different fsid""" + self.test_relation.set({'monitor-secret': 'abcd', + 'fsid': '1234'}) + self.test_leader_settings.set({'monitor-secret': 'mysecret', + 'fsid': '7890'}) + self.assertRaises(AssertionError, + ceph_hooks.bootstrap_source_relation_changed) + + @patch.object(ceph_hooks, 'emit_cephconf') + @patch.object(ceph_hooks, 'create_sysctl') + @patch.object(ceph_hooks, 'check_for_upgrade') + @patch.object(ceph_hooks, 'get_mon_hosts') + @patch.object(ceph_hooks, 'bootstrap_source_relation_changed') + def test_config_changed_no_bootstrap_changed(self, + bootstrap_source_rel_changed, + get_mon_hosts, + check_for_upgrade, + create_sysctl, + emit_ceph_conf): + """Tests that changing no-bootstrap invokes the bs relation changed""" + self.relations_of_type.return_value = [] + self.is_relation_made.return_value = True + self.test_config.set_changed('no-bootstrap', True) + ceph_hooks.config_changed() + bootstrap_source_rel_changed.assert_called_once() + + @patch.object(ceph_hooks, 'get_public_addr') + def test_get_mon_hosts(self, get_public_addr): + """Tests that bootstrap-source relations are used""" + unit_addrs = { + 'mon:0': { + 'ceph-mon/0': '172.16.0.2', + 'ceph-mon/1': '172.16.0.3', + }, + 'bootstrap-source:1': { + 'ceph/0': '172.16.10.2', + 'ceph/1': '172.16.10.3', + 'cehp/2': '172.16.10.4', + } + } + + def rel_ids_side_effect(relname): + for key in unit_addrs.keys(): + if key.split(':')[0] == relname: + return [key] + return None + + def rel_get_side_effect(attr, unit, relid): + return unit_addrs[relid][unit] + + def rel_units_side_effect(relid): + if relid in unit_addrs: + return unit_addrs[relid].keys() + return [] + + self.relation_ids.side_effect = rel_ids_side_effect + self.related_units.side_effect = rel_units_side_effect + get_public_addr.return_value = '172.16.0.4' + self.relation_get.side_effect = rel_get_side_effect + hosts = ceph_hooks.get_mon_hosts() + self.assertEqual(hosts, [ + '172.16.0.2:6789', '172.16.0.3:6789', '172.16.0.4:6789', + '172.16.10.2:6789', '172.16.10.3:6789', '172.16.10.4:6789', + ]) diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index 4dfd75fa..33f924f5 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -32,6 +32,7 @@ 'status_set', 'config', 'ceph', + 'is_relation_made', 'relation_ids', 'relation_get', 'related_units', @@ -64,6 +65,7 @@ def setUp(self): self.test_config.set('monitor-count', 3) self.local_unit.return_value = 'ceph-mon1' self.get_upstream_version.return_value = '10.2.2' + self.is_relation_made.return_value = False @mock.patch.object(hooks, 'get_peer_units') def test_assess_status_no_peers(self, _peer_units): @@ -123,3 +125,9 @@ def test_get_peer_units_peers_complete(self): 'ceph-mon2': True, 'ceph-mon3': True}, hooks.get_peer_units()) + + def test_no_bootstrap_not_set(self): + self.is_relation_made.return_value = True + hooks.assess_status() + self.status_set.assert_called_with('blocked', mock.ANY) + self.application_version_set.assert_called_with('10.2.2') diff --git a/ceph-mon/unit_tests/test_utils.py b/ceph-mon/unit_tests/test_utils.py index 97d3ee84..1665c161 100644 --- a/ceph-mon/unit_tests/test_utils.py +++ b/ceph-mon/unit_tests/test_utils.py @@ -66,6 +66,7 @@ def setUp(self, obj, patches): self.obj = obj self.test_config = TestConfig() self.test_relation = TestRelation() + self.test_leader_settings = TestLeaderSettings() self.patch_all() def patch(self, method): @@ -83,10 +84,14 @@ class TestConfig(object): def __init__(self): self.config = get_default_config() + self.config_changed = {} + self.config_changed.setdefault(False) def get(self, attr=None): if not attr: - return self.get_all() + # Return a copy of self to allow emulation closer to what + # hookenv.config() returns (not a dict). + return self try: return self.config[attr] except KeyError: @@ -100,6 +105,15 @@ def set(self, attr, value): raise KeyError self.config[attr] = value + def __getitem__(self, item): + return self.config[item] + + def changed(self, attr): + return self.config_changed[attr] + + def set_changed(self, attr, changed=True): + self.config_changed[attr] = changed + class TestRelation(object): @@ -117,6 +131,22 @@ def get(self, attr=None, unit=None, rid=None): return None +class TestLeaderSettings(object): + + def __init__(self, settings={}): + self.settings = settings + + def set(self, settings): + self.settings = settings + + def get(self, attr=None): + if attr is None: + return self.settings + elif attr in self.settings: + return self.settings[attr] + return None + + @contextmanager def patch_open(): '''Patch open() to allow mocking both open() itself and the file that is From fc2af4115d88d79599b2d1e790c499d7434d532b Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 3 Oct 2017 04:28:25 +0000 Subject: [PATCH 1392/2699] Update requirements for git charmhelpers Change-Id: Ia72ee80797e8b1c4f62ca089a46462b34e34f4db --- ceph-fs/src/test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/src/test-requirements.txt b/ceph-fs/src/test-requirements.txt index 3cb4ae76..16d0adb9 100644 --- a/ceph-fs/src/test-requirements.txt +++ b/ceph-fs/src/test-requirements.txt @@ -8,7 +8,7 @@ os-testr>=0.4.1 charm-tools>=2.0.0 requests==2.6.0 # amulet deployment helpers -bzr+lp:charm-helpers#egg=charmhelpers +git+https://github.com/juju/charm-helpers#egg=charmhelpers # BEGIN: Amulet OpenStack Charm Helper Requirements # Liberty client lower constraints amulet>=1.14.3,<2.0 From 02c70dd453904c7d804537445d1b817d6e5db955 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 28 Sep 2017 03:52:27 +0000 Subject: [PATCH 1393/2699] Update amulet test definitions Change-Id: Iaf46e2339e8d6afda6524b3fdeae6c0af780c0c2 --- ceph-fs/src/tests/basic_deployment.py | 6 +++++- ceph-fs/src/tests/gate-basic-artful-pike | 23 ++++++++++++++++++++++ ceph-fs/src/tests/gate-basic-xenial-mitaka | 2 +- 3 files changed, 29 insertions(+), 2 deletions(-) create mode 100755 ceph-fs/src/tests/gate-basic-artful-pike diff --git a/ceph-fs/src/tests/basic_deployment.py b/ceph-fs/src/tests/basic_deployment.py index 2a1c2e8f..5827ff6b 100644 --- a/ceph-fs/src/tests/basic_deployment.py +++ b/ceph-fs/src/tests/basic_deployment.py @@ -210,7 +210,11 @@ def test_400_ceph_check_osd_pools(self): """Check osd pools on all ceph units, expect them to be identical, and expect specific pools to be present.""" u.log.debug('Checking pools on ceph units...') - expected_pools = ['rbd', 'ceph-fs_data', 'ceph-fs_metadata'] + + if self._get_openstack_release() >= self.xenial_pike: + expected_pools = ['ceph-fs_data', 'ceph-fs_metadata'] + else: + expected_pools = ['rbd', 'ceph-fs_data', 'ceph-fs_metadata'] results = [] sentries = [ self.ceph_mon0_sentry, diff --git a/ceph-fs/src/tests/gate-basic-artful-pike b/ceph-fs/src/tests/gate-basic-artful-pike new file mode 100755 index 00000000..f7293202 --- /dev/null +++ b/ceph-fs/src/tests/gate-basic-artful-pike @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on artful-pike.""" + +from basic_deployment import CephFsBasicDeployment + +if __name__ == '__main__': + deployment = CephFsBasicDeployment(series='artful') + deployment.run_tests() diff --git a/ceph-fs/src/tests/gate-basic-xenial-mitaka b/ceph-fs/src/tests/gate-basic-xenial-mitaka index 734d993a..b9bc393d 100755 --- a/ceph-fs/src/tests/gate-basic-xenial-mitaka +++ b/ceph-fs/src/tests/gate-basic-xenial-mitaka @@ -20,4 +20,4 @@ from basic_deployment import CephFsBasicDeployment if __name__ == '__main__': deployment = CephFsBasicDeployment(series='xenial') -deployment.run_tests() + deployment.run_tests() From 8af8cbda0888566d3ced53b77f07de1646c23692 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 23 Oct 2017 13:36:56 +0200 Subject: [PATCH 1394/2699] Update functional test model to use cinder-ceph subordinate Change-Id: I2d441da31e8e3b6570bf237661bf22c294d8ee73 Related-Bug: #1719742 --- ceph-osd/tests/basic_deployment.py | 19 ++++++++----------- .../contrib/openstack/amulet/deployment.py | 4 ++-- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 43c46ef4..70e51af0 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -66,7 +66,8 @@ def _add_services(self): {'name': 'rabbitmq-server'}, {'name': 'nova-compute'}, {'name': 'glance'}, - {'name': 'cinder'} + {'name': 'cinder'}, + {'name': 'cinder-ceph'}, ] super(CephOsdBasicDeployment, self)._add_services(this_service, other_services) @@ -87,8 +88,9 @@ def _add_relations(self): 'cinder:identity-service': 'keystone:identity-service', 'cinder:amqp': 'rabbitmq-server:amqp', 'cinder:image-service': 'glance:image-service', - 'cinder:ceph': 'ceph-mon:client', - 'ceph-osd:mon': 'ceph-mon:osd' + 'cinder-ceph:storage-backend': 'cinder:storage-backend', + 'cinder-ceph:ceph': 'ceph-mon:client', + 'ceph-osd:mon': 'ceph-mon:osd', } super(CephOsdBasicDeployment, self)._add_relations(relations) @@ -356,12 +358,7 @@ def test_302_cinder_rbd_config(self): u.log.debug('Checking cinder (rbd) config file data...') unit = self.cinder_sentry conf = '/etc/cinder/cinder.conf' - # NOTE(jamespage): Deal with section config for >= ocata. - if self._get_openstack_release() >= self.xenial_ocata: - section_key = 'CEPH' - else: - section_key = 'DEFAULT' - + section_key = 'cinder-ceph' expected = { section_key: { 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' @@ -468,7 +465,7 @@ def test_410_ceph_cinder_vol_create(self): obj_count_samples = [] pool_size_samples = [] pools = u.get_ceph_pools(self.ceph0_sentry) - cinder_pool = pools['cinder'] + cinder_pool = pools['cinder-ceph'] # Check ceph cinder pool object count, disk space usage and pool name u.log.debug('Checking ceph cinder pool original samples...') @@ -477,7 +474,7 @@ def test_410_ceph_cinder_vol_create(self): obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) - expected = 'cinder' + expected = 'cinder-ceph' if pool_name != expected: msg = ('Ceph pool {} unexpected name (actual, expected): ' '{}. {}'.format(cinder_pool, pool_name, expected)) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 5c041d2c..fc20a76d 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -307,7 +307,7 @@ def get_ceph_expected_pools(self, radosgw=False): # Kilo or later pools = [ 'rbd', - 'cinder', + 'cinder-ceph', 'glance' ] else: @@ -316,7 +316,7 @@ def get_ceph_expected_pools(self, radosgw=False): 'data', 'metadata', 'rbd', - 'cinder', + 'cinder-ceph', 'glance' ] From dc2d63d1d57108fc1295c365be1eb730d2ef18ed Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 23 Oct 2017 09:20:54 +0200 Subject: [PATCH 1395/2699] Fix imports for actions Commit acd1fd96bfd812997612322590de9a6e88413bfb code cleanup did not address actions. Fixed. Change-Id: Ic5de38e4d56022e3f72e5dcb08f6a4253e3306ee Closes-bug: #1726275 --- ceph-osd/actions/add_disk.py | 17 ++++++++++------- ceph-osd/actions/list_disks.py | 2 +- ceph-osd/actions/replace_osd.py | 14 ++++++++------ 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index 9e303494..123fa7fb 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -31,7 +31,10 @@ send_request_if_needed, ) -import ceph +from ceph.utils import ( + osdize, + tune_dev, +) from ceph_hooks import ( get_journal_devices, @@ -39,14 +42,14 @@ def add_device(request, device_path, bucket=None): - ceph.osdize(dev, config('osd-format'), - get_journal_devices(), config('osd-reformat'), - config('ignore-device-errors'), - config('osd-encrypt'), - config('bluestore')) + osdize(dev, config('osd-format'), + get_journal_devices(), config('osd-reformat'), + config('ignore-device-errors'), + config('osd-encrypt'), + config('bluestore')) # Make it fast! if config('autotune'): - ceph.tune_dev(dev) + tune_dev(dev) mounts = filter(lambda disk: device_path in disk.device, psutil.disk_partitions()) if mounts: diff --git a/ceph-osd/actions/list_disks.py b/ceph-osd/actions/list_disks.py index f92a3b69..7bf971f6 100755 --- a/ceph-osd/actions/list_disks.py +++ b/ceph-osd/actions/list_disks.py @@ -28,7 +28,7 @@ from charmhelpers.core.hookenv import action_set -from ceph import unmounted_disks +from ceph.utils import unmounted_disks if __name__ == '__main__': action_set({ diff --git a/ceph-osd/actions/replace_osd.py b/ceph-osd/actions/replace_osd.py index edfa1f7b..b23a10d7 100755 --- a/ceph-osd/actions/replace_osd.py +++ b/ceph-osd/actions/replace_osd.py @@ -22,7 +22,9 @@ from charmhelpers.core.hookenv import action_get, log, config, action_fail -import ceph +from ceph.utils import ( + replace_osd, +) """ Given a OSD number this script will attempt to turn that back into a mount @@ -90,8 +92,8 @@ def get_device_number(osd_number): osd_format = config('osd-format') osd_journal = config('osd-journal') - ceph.replace_osd(dead_osd_number=dead_osd_number, - dead_osd_device="/dev/{}".format(device_name), - new_osd_device=replacement_device, - osd_format=osd_format, - osd_journal=osd_journal) + replace_osd(dead_osd_number=dead_osd_number, + dead_osd_device="/dev/{}".format(device_name), + new_osd_device=replacement_device, + osd_format=osd_format, + osd_journal=osd_journal) From eb4a97d6cee4fc0c709381f729fc68a0ba327053 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 23 Oct 2017 23:50:44 +0200 Subject: [PATCH 1396/2699] Update functional test model to use cinder-ceph subordinate Change-Id: I82054066df7440a9396b1e193d1f1059e567a769 Related-Bug: #1719742 --- ceph-radosgw/tests/basic_deployment.py | 15 ++++++--------- .../contrib/openstack/amulet/deployment.py | 4 ++-- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index ac1fba18..c9b39863 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -66,7 +66,8 @@ def _add_services(self): {'name': 'rabbitmq-server'}, {'name': 'nova-compute'}, {'name': 'glance'}, - {'name': 'cinder'} + {'name': 'cinder'}, + {'name': 'cinder-ceph'}, ] super(CephRadosGwBasicDeployment, self)._add_services(this_service, other_services) @@ -87,9 +88,10 @@ def _add_relations(self): 'cinder:identity-service': 'keystone:identity-service', 'cinder:amqp': 'rabbitmq-server:amqp', 'cinder:image-service': 'glance:image-service', - 'cinder:ceph': 'ceph:client', + 'cinder-ceph:storage-backend': 'cinder:storage-backend', + 'cinder-ceph:ceph': 'ceph:client', 'ceph-radosgw:mon': 'ceph:radosgw', - 'ceph-radosgw:identity-service': 'keystone:identity-service' + 'ceph-radosgw:identity-service': 'keystone:identity-service', } super(CephRadosGwBasicDeployment, self)._add_relations(relations) @@ -462,12 +464,7 @@ def test_302_cinder_rbd_config(self): u.log.debug('Checking cinder (rbd) config file data...') unit = self.cinder_sentry conf = '/etc/cinder/cinder.conf' - # NOTE(jamespage): Deal with section config for >= ocata. - if self._get_openstack_release() >= self.xenial_ocata: - section_key = 'CEPH' - else: - section_key = 'DEFAULT' - + section_key = 'cinder-ceph' expected = { section_key: { 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 5c041d2c..fc20a76d 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -307,7 +307,7 @@ def get_ceph_expected_pools(self, radosgw=False): # Kilo or later pools = [ 'rbd', - 'cinder', + 'cinder-ceph', 'glance' ] else: @@ -316,7 +316,7 @@ def get_ceph_expected_pools(self, radosgw=False): 'data', 'metadata', 'rbd', - 'cinder', + 'cinder-ceph', 'glance' ] From 88749259e178c24ec6c9d61d31095b29c32dd87a Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 23 Oct 2017 23:44:56 +0200 Subject: [PATCH 1397/2699] Update functional test model to use cinder-ceph subordinate Change-Id: I8d7ffe6b06c08e56a6dc9a3a9bc20db01506c8f2 Related-Bug: #1719742 --- ceph-mon/tests/basic_deployment.py | 24 +++++++++---------- .../contrib/openstack/amulet/deployment.py | 4 ++-- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index ced14ea9..caa92197 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -67,7 +67,8 @@ def _add_services(self): {'name': 'rabbitmq-server'}, {'name': 'nova-compute'}, {'name': 'glance'}, - {'name': 'cinder'} + {'name': 'cinder'}, + {'name': 'cinder-ceph'}, ] super(CephBasicDeployment, self)._add_services(this_service, other_services) @@ -88,8 +89,9 @@ def _add_relations(self): 'cinder:identity-service': 'keystone:identity-service', 'cinder:amqp': 'rabbitmq-server:amqp', 'cinder:image-service': 'glance:image-service', - 'cinder:ceph': 'ceph-mon:client', - 'ceph-osd:mon': 'ceph-mon:osd' + 'cinder-ceph:storage-backend': 'cinder:storage-backend', + 'cinder-ceph:ceph': 'ceph-mon:client', + 'ceph-osd:mon': 'ceph-mon:osd', } super(CephBasicDeployment, self)._add_relations(relations) @@ -136,6 +138,7 @@ def _initialize_tests(self): self.nova_sentry = self.d.sentry['nova-compute'][0] self.glance_sentry = self.d.sentry['glance'][0] self.cinder_sentry = self.d.sentry['cinder'][0] + self.cinder_ceph_sentry = self.d.sentry['cinder-ceph'][0] self.ceph_osd_sentry = self.d.sentry['ceph-osd'][0] self.ceph0_sentry = self.d.sentry['ceph-mon'][0] self.ceph1_sentry = self.d.sentry['ceph-mon'][1] @@ -316,7 +319,7 @@ def test_204_ceph_cinder_client_relation(self): """Verify the ceph to cinder ceph-client relation data.""" u.log.debug('Checking ceph:cinder ceph relation data...') unit = self.ceph2_sentry - relation = ['client', 'cinder:ceph'] + relation = ['client', 'cinder-ceph:ceph'] expected = { 'private-address': u.valid_ip, 'auth': 'none', @@ -331,7 +334,7 @@ def test_204_ceph_cinder_client_relation(self): def test_205_cinder_ceph_client_relation(self): """Verify the cinder to ceph ceph-client relation data.""" u.log.debug('Checking cinder:ceph ceph relation data...') - unit = self.cinder_sentry + unit = self.cinder_ceph_sentry relation = ['ceph', 'ceph-mon:client'] expected = { 'private-address': u.valid_ip @@ -377,12 +380,7 @@ def test_302_cinder_rbd_config(self): u.log.debug('Checking cinder (rbd) config file data...') unit = self.cinder_sentry conf = '/etc/cinder/cinder.conf' - # NOTE(jamespage): Deal with section config for >= ocata. - if self._get_openstack_release() >= self.xenial_ocata: - section_key = 'CEPH' - else: - section_key = 'DEFAULT' - + section_key = 'cinder-ceph' expected = { section_key: { 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' @@ -578,7 +576,7 @@ def test_410_ceph_cinder_vol_create(self): obj_count_samples = [] pool_size_samples = [] pools = u.get_ceph_pools(self.ceph0_sentry) - cinder_pool = pools['cinder'] + cinder_pool = pools['cinder-ceph'] # Check ceph cinder pool object count, disk space usage and pool name u.log.debug('Checking ceph cinder pool original samples...') @@ -587,7 +585,7 @@ def test_410_ceph_cinder_vol_create(self): obj_count_samples.append(obj_count) pool_size_samples.append(kb_used) - expected = 'cinder' + expected = 'cinder-ceph' if pool_name != expected: msg = ('Ceph pool {} unexpected name (actual, expected): ' '{}. {}'.format(cinder_pool, pool_name, expected)) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 5c041d2c..fc20a76d 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -307,7 +307,7 @@ def get_ceph_expected_pools(self, radosgw=False): # Kilo or later pools = [ 'rbd', - 'cinder', + 'cinder-ceph', 'glance' ] else: @@ -316,7 +316,7 @@ def get_ceph_expected_pools(self, radosgw=False): 'data', 'metadata', 'rbd', - 'cinder', + 'cinder-ceph', 'glance' ] From 9e1f23c3575e0dba2528812e7a41149c38828d7f Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 24 Oct 2017 12:04:49 +0100 Subject: [PATCH 1398/2699] Fix actions broken import Change-Id: I1f5f24fb31cdb04cf00a1ecbbd942f84cd7b19f5 Partial-Bug: 1726275 --- ceph-mon/actions/pool-set.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-mon/actions/pool-set.py b/ceph-mon/actions/pool-set.py index 850071e7..577b4a53 100755 --- a/ceph-mon/actions/pool-set.py +++ b/ceph-mon/actions/pool-set.py @@ -17,10 +17,11 @@ from subprocess import CalledProcessError import sys +sys.path.append('lib') sys.path.append('hooks') from charmhelpers.core.hookenv import action_get, log, action_fail -from ceph_broker import handle_set_pool_value +from ceph.broker import handle_set_pool_value if __name__ == '__main__': name = action_get("pool-name") From df03e5c8d1283dfc494497cbe7d7085f456122a5 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 25 Oct 2017 14:21:46 +0100 Subject: [PATCH 1399/2699] Fix up the imports on a recent change to follow coding spec Essentially, we want to avoid doing: from module import f1, f2, f2 and instead doL import module module.f1() This helps with reading the code and understanding where the functions come from. Change-Id: I1bc06441dc5595e8a0c84a5b5c3db5d88b68a4f2 --- ceph-osd/actions/add_disk.py | 44 ++++++++++++--------------------- ceph-osd/actions/list_disks.py | 8 +++--- ceph-osd/actions/replace_osd.py | 36 +++++++++++++-------------- 3 files changed, 38 insertions(+), 50 deletions(-) diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index 123fa7fb..e8d2f159 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -21,35 +21,23 @@ sys.path.append('lib') sys.path.append('hooks') -from charmhelpers.core.hookenv import ( - config, - action_get, -) +import charmhelpers.contrib.storage.linux.ceph as ch_ceph +import charmhelpers.core.hookenv as hookenv -from charmhelpers.contrib.storage.linux.ceph import ( - CephBrokerRq, - send_request_if_needed, -) - -from ceph.utils import ( - osdize, - tune_dev, -) - -from ceph_hooks import ( - get_journal_devices, -) +import ceph_hooks +import ceph.utils def add_device(request, device_path, bucket=None): - osdize(dev, config('osd-format'), - get_journal_devices(), config('osd-reformat'), - config('ignore-device-errors'), - config('osd-encrypt'), - config('bluestore')) + ceph.utils.osdize(dev, hookenv.config('osd-format'), + ceph_hooks.get_journal_devices(), + hookenv.config('osd-reformat'), + hookenv.config('ignore-device-errors'), + hookenv.config('osd-encrypt'), + hookenv.config('bluestore')) # Make it fast! - if config('autotune'): - tune_dev(dev) + if hookenv.config('autotune'): + ceph.utils.tune_dev(dev) mounts = filter(lambda disk: device_path in disk.device, psutil.disk_partitions()) if mounts: @@ -64,7 +52,7 @@ def add_device(request, device_path, bucket=None): def get_devices(): devices = [] - for path in action_get('osd-devices').split(' '): + for path in hookenv.action_get('osd-devices').split(' '): path = path.strip() if os.path.isabs(path): devices.append(path) @@ -73,9 +61,9 @@ def get_devices(): if __name__ == "__main__": - request = CephBrokerRq() + request = ch_ceph.CephBrokerRq() for dev in get_devices(): request = add_device(request=request, device_path=dev, - bucket=action_get("bucket")) - send_request_if_needed(request, relation='mon') + bucket=hookenv.action_get("bucket")) + ch_ceph.send_request_if_needed(request, relation='mon') diff --git a/ceph-osd/actions/list_disks.py b/ceph-osd/actions/list_disks.py index 7bf971f6..25a6cdf0 100755 --- a/ceph-osd/actions/list_disks.py +++ b/ceph-osd/actions/list_disks.py @@ -26,10 +26,10 @@ sys.path.append('hooks/') sys.path.append('lib/') -from charmhelpers.core.hookenv import action_set +import charmhelpers.core.hookenv as hookenv -from ceph.utils import unmounted_disks +import ceph.utils if __name__ == '__main__': - action_set({ - 'disks': unmounted_disks()}) + hookenv.action_set({ + 'disks': ceph.utils.unmounted_disks()}) diff --git a/ceph-osd/actions/replace_osd.py b/ceph-osd/actions/replace_osd.py index b23a10d7..8d9f2ec1 100755 --- a/ceph-osd/actions/replace_osd.py +++ b/ceph-osd/actions/replace_osd.py @@ -20,11 +20,9 @@ sys.path.append('hooks/') sys.path.append('lib/') -from charmhelpers.core.hookenv import action_get, log, config, action_fail +import charmhelpers.core.hookenv as hookenv -from ceph.utils import ( - replace_osd, -) +import ceph.utils """ Given a OSD number this script will attempt to turn that back into a mount @@ -38,9 +36,11 @@ def get_disk_stats(): with open('/proc/diskstats', 'r') as diskstats: return diskstats.readlines() except IOError as err: - log('Could not open /proc/diskstats. Error: {}'.format(err.message)) - action_fail('replace-osd failed because /proc/diskstats could not ' - 'be opened {}'.format(err.message)) + hookenv.log('Could not open /proc/diskstats. Error: {}' + .format(err.message)) + hookenv.action_fail( + 'replace-osd failed because /proc/diskstats could not ' + 'be opened {}'.format(err.message)) return None @@ -64,8 +64,8 @@ def lookup_device_name(major_number, minor_number): # Found our device. Return its name return parts[2] except ValueError as value_err: - log('Could not convert {} or {} into an integer. Error: {}' - .format(parts[0], parts[1], value_err.message)) + hookenv.log('Could not convert {} or {} into an integer. Error: {}' + .format(parts[0], parts[1], value_err.message)) continue return None @@ -85,15 +85,15 @@ def get_device_number(osd_number): if __name__ == '__main__': - dead_osd_number = action_get("osd-number") - replacement_device = action_get("replacement-device") + dead_osd_number = hookenv.action_get("osd-number") + replacement_device = hookenv.action_get("replacement-device") major, minor = get_device_number(dead_osd_number) device_name = lookup_device_name(major, minor) - osd_format = config('osd-format') - osd_journal = config('osd-journal') + osd_format = hookenv.config('osd-format') + osd_journal = hookenv.config('osd-journal') - replace_osd(dead_osd_number=dead_osd_number, - dead_osd_device="/dev/{}".format(device_name), - new_osd_device=replacement_device, - osd_format=osd_format, - osd_journal=osd_journal) + ceph.utils.replace_osd(dead_osd_number=dead_osd_number, + dead_osd_device="/dev/{}".format(device_name), + new_osd_device=replacement_device, + osd_format=osd_format, + osd_journal=osd_journal) From 2978c7221cd50006678bc1d5c2977f8d9540a443 Mon Sep 17 00:00:00 2001 From: Marian Gasparovic Date: Thu, 26 Oct 2017 13:58:19 +0200 Subject: [PATCH 1400/2699] Plugin should return also a reason for warning from ceph. Change-Id: I9247f374ce88e0c208252b6a37d82fad407cc84a Signed-off-by: Marian Gasparovic --- ceph-mon/files/nagios/check_ceph_status.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/ceph-mon/files/nagios/check_ceph_status.py b/ceph-mon/files/nagios/check_ceph_status.py index cb8d1a1a..09ee5f8d 100755 --- a/ceph-mon/files/nagios/check_ceph_status.py +++ b/ceph-mon/files/nagios/check_ceph_status.py @@ -15,19 +15,28 @@ def check_ceph_status(args): nagios_plugin.check_file_freshness(args.status_file, 3600) with open(args.status_file, "r") as f: lines = f.readlines() - status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) else: lines = subprocess.check_output(["ceph", "status"]).split('\n') - status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) + status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) - if ('health' not in status_data - or 'monmap' not in status_data - or 'osdmap'not in status_data): + if ('health' not in status_data or + 'monmap' not in status_data or + 'osdmap' not in status_data): raise nagios_plugin.UnknownError('UNKNOWN: status data is incomplete') if status_data['health'] != 'HEALTH_OK': - msg = 'CRITICAL: ceph health status: "{}"'.format(status_data['health']) + msg = 'CRITICAL: ceph health status: "{}'.format(status_data['health']) + if (len(status_data['health'].split(' '))) == 1: + a = iter(lines) + for line in a: + if re.search('health', line) is not None: + msg1 = next(a) + msg += " " + msg += msg1.strip() + break + msg += '"' raise nagios_plugin.CriticalError(msg) + osds = re.search("^.*: (\d+) osds: (\d+) up, (\d+) in", status_data['osdmap']) if osds.group(1) > osds.group(2): # not all OSDs are "up" msg = 'CRITICAL: Some OSDs are not up. Total: {}, up: {}'.format( From aeb9a66098d36504c8df3e47b35ce25e9021ab9a Mon Sep 17 00:00:00 2001 From: Marian Gasparovic Date: Thu, 26 Oct 2017 14:05:20 +0200 Subject: [PATCH 1401/2699] Plugin should return also a reason for warning from ceph. Change-Id: Ic8612eb123ec8335a6a867f0775116dba3a68dce Signed-off-by: Marian Gasparovic --- ceph-osd/files/nagios/check_ceph_status.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/ceph-osd/files/nagios/check_ceph_status.py b/ceph-osd/files/nagios/check_ceph_status.py index cb8d1a1a..09ee5f8d 100755 --- a/ceph-osd/files/nagios/check_ceph_status.py +++ b/ceph-osd/files/nagios/check_ceph_status.py @@ -15,19 +15,28 @@ def check_ceph_status(args): nagios_plugin.check_file_freshness(args.status_file, 3600) with open(args.status_file, "r") as f: lines = f.readlines() - status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) else: lines = subprocess.check_output(["ceph", "status"]).split('\n') - status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) + status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) - if ('health' not in status_data - or 'monmap' not in status_data - or 'osdmap'not in status_data): + if ('health' not in status_data or + 'monmap' not in status_data or + 'osdmap' not in status_data): raise nagios_plugin.UnknownError('UNKNOWN: status data is incomplete') if status_data['health'] != 'HEALTH_OK': - msg = 'CRITICAL: ceph health status: "{}"'.format(status_data['health']) + msg = 'CRITICAL: ceph health status: "{}'.format(status_data['health']) + if (len(status_data['health'].split(' '))) == 1: + a = iter(lines) + for line in a: + if re.search('health', line) is not None: + msg1 = next(a) + msg += " " + msg += msg1.strip() + break + msg += '"' raise nagios_plugin.CriticalError(msg) + osds = re.search("^.*: (\d+) osds: (\d+) up, (\d+) in", status_data['osdmap']) if osds.group(1) > osds.group(2): # not all OSDs are "up" msg = 'CRITICAL: Some OSDs are not up. Total: {}, up: {}'.format( From 4113b27947256be41ecf7518c2ff1ea01622599c Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 1 Nov 2017 16:35:17 +0000 Subject: [PATCH 1402/2699] Ensure upgrade keyring exists prior to upgrade checks During ceph to ceph-osd/ceph-mon migrations, the bootstrap keyring for the cluster will be in place as the ceph-osd units are started alongside existing ceph units. Switch this check to look for the upgrade keyring, which won't be in place until the ceph-osd <-> ceph-mon relation is complete, at which point in time a) the unit has the correct access to perform the upgrade and b) the previous/current version check code will not trip over due to the previous value of the source option being None, resulting in a fallback to 'distro' as the previous source of ceph. Change-Id: I10895c60aeb543a10461676e4455ed6b5e2fdb46 Closes-Bug: 1729369 --- ceph-osd/.gitignore | 1 + ceph-osd/hooks/ceph_hooks.py | 4 ++-- ceph-osd/unit_tests/test_upgrade.py | 24 +++++++++++++++--------- 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/ceph-osd/.gitignore b/ceph-osd/.gitignore index 660b48a2..c3fd0b63 100644 --- a/ceph-osd/.gitignore +++ b/ceph-osd/.gitignore @@ -2,6 +2,7 @@ .project .tox .testrepository +.stestr bin *.sw[nop] *.pyc diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index b4ad3e26..8253d8b8 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -82,8 +82,8 @@ def check_for_upgrade(): - if not ceph.is_bootstrapped(): - log("Ceph is not bootstrapped, skipping upgrade checks.") + if not os.path.exists(ceph._upgrade_keyring): + log("Ceph upgrade keyring not detected, skipping upgrade checks.") return c = hookenv.config() diff --git a/ceph-osd/unit_tests/test_upgrade.py b/ceph-osd/unit_tests/test_upgrade.py index 383519c8..ad876214 100644 --- a/ceph-osd/unit_tests/test_upgrade.py +++ b/ceph-osd/unit_tests/test_upgrade.py @@ -19,16 +19,16 @@ def config_side_effect(*args): class UpgradeRollingTestCase(unittest.TestCase): @patch('ceph_hooks.ceph.dirs_need_ownership_update') - @patch('ceph_hooks.ceph.is_bootstrapped') + @patch('ceph_hooks.os.path.exists') @patch('ceph_hooks.ceph.resolve_ceph_version') @patch('ceph_hooks.emit_cephconf') @patch('ceph_hooks.hookenv') @patch('ceph_hooks.ceph.roll_osd_cluster') def test_check_for_upgrade(self, roll_osd_cluster, hookenv, - emit_cephconf, version, is_bootstrapped, + emit_cephconf, version, exists, dirs_need_ownership_update): dirs_need_ownership_update.return_value = False - is_bootstrapped.return_value = True + exists.return_value = True version.side_effect = ['firefly', 'hammer'] previous_mock = MagicMock().return_value previous_mock.previous.return_value = "cloud:trusty-juno" @@ -40,19 +40,21 @@ def test_check_for_upgrade(self, roll_osd_cluster, hookenv, upgrade_key='osd-upgrade') emit_cephconf.assert_has_calls([call(upgrading=True), call(upgrading=False)]) + exists.assert_called_with( + "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring") @patch('ceph_hooks.ceph.dirs_need_ownership_update') - @patch('ceph_hooks.ceph.is_bootstrapped') + @patch('ceph_hooks.os.path.exists') @patch('ceph_hooks.ceph.resolve_ceph_version') @patch('ceph_hooks.emit_cephconf') @patch('ceph_hooks.hookenv') @patch('ceph_hooks.ceph.roll_osd_cluster') def test_resume_failed_upgrade(self, roll_osd_cluster, hookenv, emit_cephconf, version, - is_bootstrapped, + exists, dirs_need_ownership_update): dirs_need_ownership_update.return_value = True - is_bootstrapped.return_value = True + exists.return_value = True version.side_effect = ['jewel', 'jewel'] check_for_upgrade() @@ -61,15 +63,17 @@ def test_resume_failed_upgrade(self, roll_osd_cluster, upgrade_key='osd-upgrade') emit_cephconf.assert_has_calls([call(upgrading=True), call(upgrading=False)]) + exists.assert_called_with( + "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring") - @patch('ceph_hooks.ceph.is_bootstrapped') + @patch('ceph_hooks.os.path.exists') @patch('ceph_hooks.ceph.resolve_ceph_version') @patch('ceph_hooks.hookenv') @patch('ceph_hooks.ceph.roll_monitor_cluster') def test_check_for_upgrade_not_bootstrapped(self, roll_monitor_cluster, hookenv, - version, is_bootstrapped): - is_bootstrapped.return_value = False + version, exists): + exists.return_value = False version.side_effect = ['firefly', 'hammer'] previous_mock = MagicMock().return_value previous_mock.previous.return_value = "cloud:trusty-juno" @@ -78,3 +82,5 @@ def test_check_for_upgrade_not_bootstrapped(self, roll_monitor_cluster, check_for_upgrade() roll_monitor_cluster.assert_not_called() + exists.assert_called_with( + "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring") From 847d8f62b920ff938ba61f72f4a6fd83090fb6ef Mon Sep 17 00:00:00 2001 From: Xav Paice Date: Wed, 3 May 2017 14:54:38 +1200 Subject: [PATCH 1403/2699] Add options for osd backfill pressure Added options for osd_max_backfills and osd_recovery_max_active, if we should want to override the default. Change-Id: Iaeb93d3068b1fab242acf2d741c36be5f4b29b57 Closes-bug: #1661560 --- ceph-osd/config.yaml | 18 ++++++++++++++++++ ceph-osd/hooks/ceph_hooks.py | 2 ++ ceph-osd/templates/ceph.conf | 6 ++++++ ceph-osd/unit_tests/test_ceph_hooks.py | 14 ++++++++++++++ 4 files changed, 40 insertions(+) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index aa716d22..e61f1fec 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -135,6 +135,24 @@ options: default, the initial crush weight for the newly added osd is set to its volume size in TB. Leave this option unset to use the default provided by Ceph itself. This option only affects NEW OSDs, not existing ones. + osd-max-backfills: + type: int + default: + description: | + The maximum number of backfills allowed to or from a single OSD. + . + Setting this option on a running Ceph OSD node will not affect running + OSD devices, but will add the setting to ceph.conf for the next restart. + osd-recovery-max-active: + type: int + default: + description: | + The number of active recovery requests per OSD at one time. More requests + will accelerate recovery, but the requests places an increased load on the + cluster. + . + Setting this option on a running Ceph OSD node will not affect running + OSD devices, but will add the setting to ceph.conf for the next restart. ignore-device-errors: type: boolean default: False diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 8253d8b8..ed6bd7bd 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -244,6 +244,8 @@ def get_ceph_context(upgrading=False): 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'crush_initial_weight': config('crush-initial-weight'), 'osd_journal_size': config('osd-journal-size'), + 'osd_max_backfills': config('osd-max-backfills'), + 'osd_recovery_max_active': config('osd-recovery-max-active'), 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': public_network, 'ceph_cluster_network': cluster_network, diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 4a967cf0..bbef3dee 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -74,6 +74,12 @@ journal dio = {{ dio }} osd max object name len = 256 osd max object namespace len = 64 {% endif %} +{%- if osd_max_backfills %} +osd max backfills = {{ osd_max_backfills }} +{%- endif %} +{%- if osd_recovery_max_active %} +osd recovery max active = {{ osd_recovery_max_active }} +{%- endif %} {% if osd -%} # The following are user-provided options provided via the config-flags charm option. {% for key in osd -%} diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 874603cf..9ec304e6 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -25,6 +25,8 @@ 'loglevel': 1, 'use-syslog': True, 'osd-journal-size': 1024, + 'osd-max-backfills': 1, + 'osd-recovery-max-active': 2, 'use-direct-io': True, 'osd-format': 'ext4', 'prefer-ipv6': False, @@ -63,6 +65,8 @@ def test_get_ceph_context(self, mock_config, mock_config2): 'old_auth': False, 'crush_initial_weight': '0', 'osd_journal_size': 1024, + 'osd_max_backfills': 1, + 'osd_recovery_max_active': 2, 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, @@ -98,6 +102,8 @@ def test_get_ceph_context_filestore_old(self, mock_config, mock_config2): 'old_auth': False, 'crush_initial_weight': '0', 'osd_journal_size': 1024, + 'osd_max_backfills': 1, + 'osd_recovery_max_active': 2, 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, @@ -133,6 +139,8 @@ def test_get_ceph_context_bluestore(self, mock_config, mock_config2): 'old_auth': False, 'crush_initial_weight': '0', 'osd_journal_size': 1024, + 'osd_max_backfills': 1, + 'osd_recovery_max_active': 2, 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, @@ -169,6 +177,8 @@ def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2): 'old_auth': False, 'crush_initial_weight': '0', 'osd_journal_size': 1024, + 'osd_max_backfills': 1, + 'osd_recovery_max_active': 2, 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, @@ -205,6 +215,8 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): 'osd': {'osd max write size': 1024}, 'crush_initial_weight': '0', 'osd_journal_size': 1024, + 'osd_max_backfills': 1, + 'osd_recovery_max_active': 2, 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, @@ -243,6 +255,8 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'osd': {'osd max write size': 1024}, 'crush_initial_weight': '0', 'osd_journal_size': 1024, + 'osd_max_backfills': 1, + 'osd_recovery_max_active': 2, 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, From 097681963b18c43f5c337d80b428bcde105df875 Mon Sep 17 00:00:00 2001 From: Xav Paice Date: Fri, 5 May 2017 21:50:56 +1200 Subject: [PATCH 1404/2699] Update ceph nagios plugin Changes ceph plugin so it ignores ceph rebalancing unless there is a large percentage of misplaced/degraded objects (return warning for that). Adds config options to tweak that monitoring, and also just warn if nodeep-scrub was deliberately set. Includes some basic unit tests. Change-Id: I317448cd769597068a706d3944d9d5419e0445c1 --- ceph-mon/config.yaml | 16 ++ ceph-mon/files/nagios/check_ceph_status.py | 228 +++++++++++++++--- ceph-mon/files/nagios/collect_ceph_status.sh | 2 +- ceph-mon/hooks/ceph_hooks.py | 10 +- ceph-mon/unit_tests/ceph_crit.json | 226 +++++++++++++++++ ceph-mon/unit_tests/ceph_nodeepscrub.json | 177 ++++++++++++++ ceph-mon/unit_tests/ceph_ok.json | 1 + ceph-mon/unit_tests/ceph_params.json | 222 +++++++++++++++++ ceph-mon/unit_tests/ceph_warn.json | 1 + ceph-mon/unit_tests/test_ceph_hooks.py | 18 +- ceph-mon/unit_tests/test_check_ceph_status.py | 75 ++++++ 11 files changed, 933 insertions(+), 43 deletions(-) create mode 100644 ceph-mon/unit_tests/ceph_crit.json create mode 100644 ceph-mon/unit_tests/ceph_nodeepscrub.json create mode 100644 ceph-mon/unit_tests/ceph_ok.json create mode 100644 ceph-mon/unit_tests/ceph_params.json create mode 100644 ceph-mon/unit_tests/ceph_warn.json create mode 100644 ceph-mon/unit_tests/test_check_ceph_status.py diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 39688be6..e718c372 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -195,6 +195,22 @@ options: description: | A comma-separated list of nagios servicegroups. If left empty, the nagios_context will be used as the servicegroup. + nagios_degraded_thresh: + default: 1.0 + type: float + description: "Threshold for degraded ratio (0.1 = 10%)" + nagios_misplaced_thresh: + default: 10.0 + type: float + description: "Threshold for misplaced ratio (0.1 = 10%)" + nagios_recovery_rate: + default: '1' + type: string + description: Recovery rate below which we consider recovery to be stalled + nagios_ignore_nodeepscub: + default: False + type: boolean + description: Whether to ignore the nodeep-scrub flag use-direct-io: type: boolean default: True diff --git a/ceph-mon/files/nagios/check_ceph_status.py b/ceph-mon/files/nagios/check_ceph_status.py index 09ee5f8d..2df223ff 100755 --- a/ceph-mon/files/nagios/check_ceph_status.py +++ b/ceph-mon/files/nagios/check_ceph_status.py @@ -1,53 +1,205 @@ #!/usr/bin/env python -# Copyright (C) 2014 Canonical -# All Rights Reserved -# Author: Jacek Nykis +# Copyright (C) 2005, 2006, 2007, 2012 James Troup +# Copyright (C) 2014, 2017 Canonical +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Authors: Jacek Nykis +# Xav Paice +# James Troup import re import argparse +import json +import os import subprocess -import nagios_plugin +import sys +import time +import traceback + + +class CriticalError(Exception): + """This indicates a critical error.""" + pass + + +class WarnError(Exception): + """This indicates a warning condition.""" + pass + + +class UnknownError(Exception): + """This indicates a unknown error was encountered.""" + pass + + +def check_file_freshness(filename, newer_than=3600): + """ + Check a file exists, is readable and is newer than seconds (where + defaults to 3600). + """ + # First check the file exists and is readable + if not os.path.exists(filename): + raise CriticalError("%s: does not exist." % (filename)) + if os.access(filename, os.R_OK) == 0: + raise CriticalError("%s: is not readable." % (filename)) + + # Then ensure the file is up-to-date enough + mtime = os.stat(filename).st_mtime + last_modified = time.time() - mtime + if last_modified > newer_than: + raise CriticalError("%s: was last modified on %s and is too old " + "(> %s seconds)." + % (filename, time.ctime(mtime), newer_than)) + if last_modified < 0: + raise CriticalError("%s: was last modified on %s which is in the " + "future." + % (filename, time.ctime(mtime))) def check_ceph_status(args): + """ + Used to check the status of a Ceph cluster. Uses the output of 'ceph + status' to determine if health is OK, and if not, should we alert on that + situation. + + If status is HEALTH_OK then this function returns OK with no further check. + Otherwise, look for known situations which could cause ceph status to + return not OK, but things which represent general operations and don't + warrant a pager event. These include OSD reweight actions, and + nodeep-scrub flag setting, with limits for the amount of misplaced data. + + :param args: argparse object formatted in the convention of generic Nagios + checks + :returns string, describing the status of the ceph cluster. + """ + + ignorable = (r'\d+ pgs (?:backfill|degraded|recovery_wait|stuck unclean)|' + 'recovery \d+\/\d+ objects (?:degraded|misplaced)') + if args.ignore_nodeepscrub: + ignorable = ignorable + '|nodeep-scrub flag\(s\) set' + status_critical = False if args.status_file: - nagios_plugin.check_file_freshness(args.status_file, 3600) - with open(args.status_file, "r") as f: - lines = f.readlines() + check_file_freshness(args.status_file) + with open(args.status_file) as f: + tree = f.read() + status_data = json.loads(tree) else: - lines = subprocess.check_output(["ceph", "status"]).split('\n') - status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) - - if ('health' not in status_data or - 'monmap' not in status_data or - 'osdmap' not in status_data): - raise nagios_plugin.UnknownError('UNKNOWN: status data is incomplete') - - if status_data['health'] != 'HEALTH_OK': - msg = 'CRITICAL: ceph health status: "{}'.format(status_data['health']) - if (len(status_data['health'].split(' '))) == 1: - a = iter(lines) - for line in a: - if re.search('health', line) is not None: - msg1 = next(a) - msg += " " - msg += msg1.strip() - break - msg += '"' - raise nagios_plugin.CriticalError(msg) - - osds = re.search("^.*: (\d+) osds: (\d+) up, (\d+) in", status_data['osdmap']) - if osds.group(1) > osds.group(2): # not all OSDs are "up" - msg = 'CRITICAL: Some OSDs are not up. Total: {}, up: {}'.format( - osds.group(1), osds.group(2)) - raise nagios_plugin.CriticalError(msg) - print "All OK" + try: + tree = subprocess.check_output(['ceph', + 'status', + '--format', 'json']) + except subprocess.CalledProcessError as e: + raise UnknownError( + "UNKNOWN: ceph status command failed with error: {}".format(e)) + status_data = json.loads(tree) + required_keys = ['health', 'monmap', 'pgmap'] + if not all(key in status_data.keys() for key in required_keys): + raise UnknownError('UNKNOWN: status data is incomplete') -if __name__ == '__main__': + if status_data['health']['overall_status'] != 'HEALTH_OK': + # Health is not OK, check if any lines are not in our list of OK + # any lines that don't match, check is critical + status_msg = [] + for status in status_data['health']['summary']: + if not re.match(ignorable, status['summary']): + status_critical = True + status_msg.append(status['summary']) + # If we got this far, then the status is not OK but the status lines + # are all in our list of things we consider to be operational tasks. + # Check the thresholds and return CRITICAL if exceeded, + # otherwise there's something not accounted for and we want to know + # about it with a WARN alert. + degraded_ratio = status_data['pgmap'].get('degraded_ratio', 0.0) + if degraded_ratio > args.degraded_thresh: + status_critical = True + status_msg.append("Degraded ratio: {}".format(degraded_ratio)) + misplaced_ratio = status_data['pgmap'].get('misplaced_ratio', 0.0) + if misplaced_ratio > args.misplaced_thresh: + status_critical = True + status_msg.append("Misplaced ratio: {}".format(misplaced_ratio)) + recovering = status_data['pgmap'].get('recovering_objects_per_sec', + 0.0) + if recovering < args.recovery_rate: + status_critical = True + status_msg.append("Recovering objects/sec {}".format(recovering)) + if status_critical: + msg = 'CRITICAL: ceph health: "{} {}"'.format( + status_data['health']['overall_status'], + ", ".join(status_msg)) + raise CriticalError(msg) + if status_data['health']['overall_status'] == 'HEALTH_WARN': + msg = "WARNING: {}".format(", ".join(status_msg)) + raise WarnError(msg) + message = "All OK" + print(message) + return message + + +def parse_args(args): parser = argparse.ArgumentParser(description='Check ceph status') parser.add_argument('-f', '--file', dest='status_file', - default=False, help='Optional file with "ceph status" output') - args = parser.parse_args() - nagios_plugin.try_check(check_ceph_status, args) + default=False, + help='Optional file with "ceph status" output. ' + 'Generally useful for testing, and if the Nagios ' + 'user account does not have rights for the Ceph ' + 'config files.') + parser.add_argument('--degraded_thresh', dest='degraded_thresh', + default=1, type=float, + help="Threshold for degraded ratio (0.1 = 10%)") + parser.add_argument('--misplaced_thresh', dest='misplaced_thresh', + default=10, type=float, + help="Threshold for misplaced ratio (0.1 = 10%)") + parser.add_argument('--recovery_rate', dest='recovery_rate', + default=1, type=int, + help="Recovery rate below which we consider recovery " + "to be stalled") + parser.add_argument('--ignore_nodeepscrub', dest='ignore_nodeepscrub', + default=False, action='store_true', + help="Whether to ignore the nodeep-scrub flag. If " + "the nodeep-scrub flag is set, the check returns " + "warning if this param is passed, otherwise " + "returns critical.") + return parser.parse_args(args) + + +def main(args): + EXIT_CODES = {'ok': 0, 'warning': 1, 'critical': 2, 'unknown': 3} + exitcode = 'ok' + try: + msg = check_ceph_status(args) + except UnknownError as msg: + print(msg) + exitcode = 'unknown' + except CriticalError as msg: + print(msg) + exitcode = 'critical' + except WarnError as msg: + print(msg) + exitcode = 'critical' + except: + print("%s raised unknown exception '%s'" % ('check_ceph_status', + sys.exc_info()[0])) + print('=' * 60) + traceback.print_exc(file=sys.stdout) + print('=' * 60) + exitcode = 'unknown' + return EXIT_CODES[exitcode] + + +if __name__ == '__main__': + args = parse_args(sys.argv[1:]) + status = main(args) + sys.exit(status) diff --git a/ceph-mon/files/nagios/collect_ceph_status.sh b/ceph-mon/files/nagios/collect_ceph_status.sh index dbdd3acf..2f72a42c 100755 --- a/ceph-mon/files/nagios/collect_ceph_status.sh +++ b/ceph-mon/files/nagios/collect_ceph_status.sh @@ -15,4 +15,4 @@ if [ ! -d $DATA_DIR ]; then mkdir -p $DATA_DIR fi -ceph status >${DATA_DIR}/cat-ceph-status.txt +ceph status --format json >${DATA_DIR}/cat-ceph-status.txt diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 9de2cd75..7e158383 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -654,10 +654,18 @@ def update_nrpe_config(): hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) + check_cmd = 'check_ceph_status.py -f {} --degraded_thresh {}' \ + ' --misplaced_thresh {}' \ + ' --recovery_rate {}'.format(STATUS_FILE, + config('nagios_degraded_thresh'), + config('nagios_misplaced_thresh'), + config('nagios_recovery_rate')) + if config('nagios_ignore_nodeepscub'): + check_cmd = check_cmd + ' --ignore_nodeepscrub' nrpe_setup.add_check( shortname="ceph", description='Check Ceph health {%s}' % current_unit, - check_cmd='check_ceph_status.py -f {}'.format(STATUS_FILE) + check_cmd=check_cmd ) nrpe_setup.write() diff --git a/ceph-mon/unit_tests/ceph_crit.json b/ceph-mon/unit_tests/ceph_crit.json new file mode 100644 index 00000000..faa23cef --- /dev/null +++ b/ceph-mon/unit_tests/ceph_crit.json @@ -0,0 +1,226 @@ +{ + "health": { + "health": { + "health_services": [ + { + "mons": [ + { + "name": "juju-2691ab-1-lxd-1", + "kb_total": 155284096, + "kb_used": 1247744, + "kb_avail": 154036352, + "avail_percent": 99, + "last_updated": "2017-05-17 03:31:35.562497", + "store_stats": { + "bytes_total": 1012055342, + "bytes_sst": 0, + "bytes_log": 29673298, + "bytes_misc": 982382044, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + }, + { + "name": "juju-2691ab-13-lxd-0", + "kb_total": 153820288, + "kb_used": 1361280, + "kb_avail": 152459008, + "avail_percent": 99, + "last_updated": "2017-05-17 03:31:04.097201", + "store_stats": { + "bytes_total": 1370003168, + "bytes_sst": 0, + "bytes_log": 29813159, + "bytes_misc": 1340190009, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + }, + { + "name": "juju-2691ab-2-lxd-1", + "kb_total": 155251072, + "kb_used": 1373440, + "kb_avail": 153877632, + "avail_percent": 99, + "last_updated": "2017-05-17 03:31:20.684777", + "store_stats": { + "bytes_total": 1400974192, + "bytes_sst": 0, + "bytes_log": 1129945, + "bytes_misc": 1399844247, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + } + ] + } + ] + }, + "timechecks": { + "epoch": 32, + "round": 24492, + "round_status": "finished", + "mons": [ + { + "name": "juju-2691ab-1-lxd-1", + "skew": 0, + "latency": 0, + "health": "HEALTH_OK" + }, + { + "name": "juju-2691ab-13-lxd-0", + "skew": 0.000919, + "latency": 0.001036, + "health": "HEALTH_OK" + }, + { + "name": "juju-2691ab-2-lxd-1", + "skew": 0, + "latency": 0.001009, + "health": "HEALTH_OK" + } + ] + }, + "summary": [ + { + "severity": "HEALTH_WARN", + "summary": "48 pgs backfill_wait" + }, + { + "severity": "HEALTH_WARN", + "summary": "45 pgs backfilling" + }, + { + "severity": "HEALTH_WARN", + "summary": "1 pgs degraded" + }, + { + "severity": "HEALTH_WARN", + "summary": "1 pgs recovery_wait" + }, + { + "severity": "HEALTH_WARN", + "summary": "22 pgs stuck unclean" + }, + { + "severity": "HEALTH_WARN", + "summary": "recovery 14/46842755 objects degraded (0.000%)" + }, + { + "severity": "HEALTH_WARN", + "summary": "recovery 448540/46842755 objects misplaced (0.958%)" + }, + { + "severity": "HEALTH_CRITICAL", + "summary": "Test critical status message" + } + ], + "overall_status": "HEALTH_CRITICAL", + "detail": [] + }, + "fsid": "ca9451f1-5c4f-4e85-bb14-a08dfc0568f7", + "election_epoch": 32, + "quorum": [ + 0, + 1, + 2 + ], + "quorum_names": [ + "juju-2691ab-1-lxd-1", + "juju-2691ab-13-lxd-0", + "juju-2691ab-2-lxd-1" + ], + "monmap": { + "epoch": 1, + "fsid": "ca9451f1-5c4f-4e85-bb14-a08dfc0568f7", + "modified": "2016-12-03 08:09:21.854671", + "created": "2016-12-03 08:09:21.854671", + "mons": [ + { + "rank": 0, + "name": "juju-2691ab-1-lxd-1", + "addr": "10.182.254.221:6789/0" + }, + { + "rank": 1, + "name": "juju-2691ab-13-lxd-0", + "addr": "10.182.254.229:6789/0" + }, + { + "rank": 2, + "name": "juju-2691ab-2-lxd-1", + "addr": "10.182.254.242:6789/0" + } + ] + }, + "osdmap": { + "osdmap": { + "epoch": 141540, + "num_osds": 314, + "num_up_osds": 314, + "num_in_osds": 314, + "full": false, + "nearfull": false, + "num_remapped_pgs": 92 + } + }, + "pgmap": { + "pgs_by_state": [ + { + "state_name": "active+clean", + "count": 9274 + }, + { + "state_name": "active+remapped+wait_backfill", + "count": 48 + }, + { + "state_name": "active+remapped+backfilling", + "count": 45 + }, + { + "state_name": "active+clean+scrubbing+deep", + "count": 9 + }, + { + "state_name": "active+remapped", + "count": 2 + }, + { + "state_name": "active+recovery_wait+degraded", + "count": 1 + }, + { + "state_name": "active+clean+scrubbing", + "count": 1 + } + ], + "version": 13885884, + "num_pgs": 9380, + "data_bytes": 64713222471610, + "bytes_used": 193613093122048, + "bytes_avail": 690058090491904, + "bytes_total": 883671183613952, + "degraded_objects": 14, + "degraded_total": 46842755, + "degraded_ratio": 0, + "misplaced_objects": 448540, + "misplaced_total": 46842755, + "misplaced_ratio": 0.009575, + "recovering_objects_per_sec": 389, + "recovering_bytes_per_sec": 1629711746, + "recovering_keys_per_sec": 0, + "num_objects_recovered": 218, + "num_bytes_recovered": 912252928, + "num_keys_recovered": 0, + "read_bytes_sec": 117041457, + "write_bytes_sec": 293414043, + "read_op_per_sec": 5282, + "write_op_per_sec": 5270 + }, + "fsmap": { + "epoch": 1, + "by_rank": [] + } +} + diff --git a/ceph-mon/unit_tests/ceph_nodeepscrub.json b/ceph-mon/unit_tests/ceph_nodeepscrub.json new file mode 100644 index 00000000..fe3aedf8 --- /dev/null +++ b/ceph-mon/unit_tests/ceph_nodeepscrub.json @@ -0,0 +1,177 @@ +{ + "health": { + "health": { + "health_services": [ + { + "mons": [ + { + "name": "node1", + "kb_total": 140956600, + "kb_used": 15916132, + "kb_avail": 117857208, + "avail_percent": 83, + "last_updated": "2017-05-17 03:23:11.248297", + "store_stats": { + "bytes_total": 140014259, + "bytes_sst": 0, + "bytes_log": 13670758, + "bytes_misc": 126343501, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + }, + { + "name": "node2", + "kb_total": 70395920, + "kb_used": 10532504, + "kb_avail": 56264436, + "avail_percent": 79, + "last_updated": "2017-05-17 03:23:16.952673", + "store_stats": { + "bytes_total": 315512452, + "bytes_sst": 0, + "bytes_log": 21691698, + "bytes_misc": 293820754, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + }, + { + "name": "juju-machine-85-lxc-10", + "kb_total": 131927524, + "kb_used": 79521024, + "kb_avail": 45954016, + "avail_percent": 34, + "last_updated": "2017-05-17 03:23:13.794034", + "store_stats": { + "bytes_total": 89036349, + "bytes_sst": 0, + "bytes_log": 21055337, + "bytes_misc": 67981012, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + } + ] + } + ] + }, + "timechecks": { + "epoch": 280, + "round": 19874, + "round_status": "finished", + "mons": [ + { + "name": "node1", + "skew": "0.000000", + "latency": "0.000000", + "health": "HEALTH_OK" + }, + { + "name": "node2", + "skew": "-0.000000", + "latency": "0.000866", + "health": "HEALTH_OK" + }, + { + "name": "juju-machine-85-lxc-10", + "skew": "-0.000000", + "latency": "0.018848", + "health": "HEALTH_OK" + } + ] + }, + "summary": [ + { + "severity": "HEALTH_WARN", + "summary": "nodeep-scrub flag(s) set" + } + ], + "overall_status": "HEALTH_WARN", + "detail": [] + }, + "fsid": "some_fsid", + "election_epoch": 280, + "quorum": [ + 0, + 1, + 2 + ], + "quorum_names": [ + "node1", + "node2", + "juju-machine-85-lxc-10" + ], + "monmap": { + "epoch": 3, + "fsid": "some_fsid", + "modified": "2016-11-25 00:08:51.235813", + "created": "0.000000", + "mons": [ + { + "rank": 0, + "name": "node1", + "addr": "10.24.0.15:6789/0" + }, + { + "rank": 1, + "name": "node2", + "addr": "10.24.0.17:6789/0" + }, + { + "rank": 2, + "name": "juju-machine-85-lxc-10", + "addr": "10.24.0.195:6789/0" + } + ] + }, + "osdmap": { + "osdmap": { + "epoch": 37820, + "num_osds": 46, + "num_up_osds": 46, + "num_in_osds": 46, + "full": false, + "nearfull": false + } + }, + "pgmap": { + "pgs_by_state": [ + { + "state_name": "active+clean", + "count": 1988 + }, + { + "state_name": "active+remapped+wait_backfill", + "count": 3 + }, + { + "state_name": "active+remapped+backfilling", + "count": 1 + } + ], + "version": 58873447, + "num_pgs": 1992, + "data_bytes": 35851846298041, + "bytes_used": 107730678743040, + "bytes_avail": 63413590548480, + "bytes_total": 171144269291520, + "degraded_objects": 0, + "degraded_total": 25759217, + "degraded_ratio": 0, + "recovering_objects_per_sec": 17, + "recovering_bytes_per_sec": 72552794, + "recovering_keys_per_sec": 0, + "read_bytes_sec": 23935944, + "write_bytes_sec": 7024503, + "op_per_sec": 5332 + }, + "mdsmap": { + "epoch": 1, + "up": 0, + "in": 0, + "max": 1, + "by_rank": [] + } +} + diff --git a/ceph-mon/unit_tests/ceph_ok.json b/ceph-mon/unit_tests/ceph_ok.json new file mode 100644 index 00000000..2eafbc15 --- /dev/null +++ b/ceph-mon/unit_tests/ceph_ok.json @@ -0,0 +1 @@ +{"health":{"health":{"health_services":[{"mons":[{"name":"somehost-2","kb_total":384443444,"kb_used":254122936,"kb_avail":110768868,"avail_percent":28,"last_updated":"2017-06-28 07:22:57.268852","store_stats":{"bytes_total":563914940,"bytes_sst":0,"bytes_log":1201349,"bytes_misc":562713591,"last_updated":"0.000000"},"health":"HEALTH_OK"},{"name":"somehost-3","kb_total":384443444,"kb_used":181563008,"kb_avail":183328796,"avail_percent":47,"last_updated":"2017-06-28 07:22:09.013733","store_stats":{"bytes_total":584703758,"bytes_sst":0,"bytes_log":17361907,"bytes_misc":567341851,"last_updated":"0.000000"},"health":"HEALTH_OK"},{"name":"somehost-4","kb_total":384443444,"kb_used":278218520,"kb_avail":86673284,"avail_percent":22,"last_updated":"2017-06-28 07:22:31.725105","store_stats":{"bytes_total":598087748,"bytes_sst":0,"bytes_log":26273616,"bytes_misc":571814132,"last_updated":"0.000000"},"health":"HEALTH_OK"}]}]},"timechecks":{"epoch":52,"round":35412,"round_status":"finished","mons":[{"name":"somehost-2","skew":0.000000,"latency":0.000000,"health":"HEALTH_OK"},{"name":"somehost-3","skew":-0.001662,"latency":0.000531,"health":"HEALTH_OK"},{"name":"somehost-4","skew":-0.000034,"latency":0.000425,"health":"HEALTH_OK"}]},"summary":[],"overall_status":"HEALTH_OK","detail":[]},"fsid":"9486fd14-676d-481c-aa16-77b071a315d8","election_epoch":52,"quorum":[0,1,2],"quorum_names":["somehost-2","somehost-3","somehost-4"],"monmap":{"epoch":1,"fsid":"9486fd14-676d-481c-aa16-77b071a315d8","modified":"2016-08-09 06:33:15.685755","created":"2016-08-09 06:33:15.685755","mons":[{"rank":0,"name":"somehost-2","addr":"10.28.2.21:6789\/0"},{"rank":1,"name":"somehost-3","addr":"10.28.2.22:6789\/0"},{"rank":2,"name":"somehost-4","addr":"10.28.2.23:6789\/0"}]},"osdmap":{"osdmap":{"epoch":11122,"num_osds":42,"num_up_osds":42,"num_in_osds":42,"full":false,"nearfull":false,"num_remapped_pgs":0}},"pgmap":{"pgs_by_state":[{"state_name":"active+clean","count":12350},{"state_name":"active+clean+scrubbing+deep","count":2}],"version":25999715,"num_pgs":12352,"data_bytes":13428555112092,"bytes_used":40180090028032,"bytes_avail":43795596517376,"bytes_total":83975686545408,"read_bytes_sec":92475,"write_bytes_sec":5309194,"read_op_per_sec":367,"write_op_per_sec":506},"fsmap":{"epoch":1,"by_rank":[]}} diff --git a/ceph-mon/unit_tests/ceph_params.json b/ceph-mon/unit_tests/ceph_params.json new file mode 100644 index 00000000..4b4f6efb --- /dev/null +++ b/ceph-mon/unit_tests/ceph_params.json @@ -0,0 +1,222 @@ +{ + "health": { + "health": { + "health_services": [ + { + "mons": [ + { + "name": "juju-2691ab-1-lxd-1", + "kb_total": 155284096, + "kb_used": 1247744, + "kb_avail": 154036352, + "avail_percent": 99, + "last_updated": "2017-05-17 03:31:35.562497", + "store_stats": { + "bytes_total": 1012055342, + "bytes_sst": 0, + "bytes_log": 29673298, + "bytes_misc": 982382044, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + }, + { + "name": "juju-2691ab-13-lxd-0", + "kb_total": 153820288, + "kb_used": 1361280, + "kb_avail": 152459008, + "avail_percent": 99, + "last_updated": "2017-05-17 03:31:04.097201", + "store_stats": { + "bytes_total": 1370003168, + "bytes_sst": 0, + "bytes_log": 29813159, + "bytes_misc": 1340190009, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + }, + { + "name": "juju-2691ab-2-lxd-1", + "kb_total": 155251072, + "kb_used": 1373440, + "kb_avail": 153877632, + "avail_percent": 99, + "last_updated": "2017-05-17 03:31:20.684777", + "store_stats": { + "bytes_total": 1400974192, + "bytes_sst": 0, + "bytes_log": 1129945, + "bytes_misc": 1399844247, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + } + ] + } + ] + }, + "timechecks": { + "epoch": 32, + "round": 24492, + "round_status": "finished", + "mons": [ + { + "name": "juju-2691ab-1-lxd-1", + "skew": 0, + "latency": 0, + "health": "HEALTH_OK" + }, + { + "name": "juju-2691ab-13-lxd-0", + "skew": 0.000919, + "latency": 0.001036, + "health": "HEALTH_OK" + }, + { + "name": "juju-2691ab-2-lxd-1", + "skew": 0, + "latency": 0.001009, + "health": "HEALTH_OK" + } + ] + }, + "summary": [ + { + "severity": "HEALTH_WARN", + "summary": "48 pgs backfill_wait" + }, + { + "severity": "HEALTH_WARN", + "summary": "45 pgs backfilling" + }, + { + "severity": "HEALTH_WARN", + "summary": "1 pgs degraded" + }, + { + "severity": "HEALTH_WARN", + "summary": "1 pgs recovery_wait" + }, + { + "severity": "HEALTH_WARN", + "summary": "22 pgs stuck unclean" + }, + { + "severity": "HEALTH_WARN", + "summary": "recovery lots/bignumber objects degraded (15%)" + }, + { + "severity": "HEALTH_WARN", + "summary": "recovery 448540/46842755 objects misplaced (0.958%)" + } + ], + "overall_status": "HEALTH_WARN", + "detail": [] + }, + "fsid": "ca9451f1-5c4f-4e85-bb14-a08dfc0568f7", + "election_epoch": 32, + "quorum": [ + 0, + 1, + 2 + ], + "quorum_names": [ + "juju-2691ab-1-lxd-1", + "juju-2691ab-13-lxd-0", + "juju-2691ab-2-lxd-1" + ], + "monmap": { + "epoch": 1, + "fsid": "ca9451f1-5c4f-4e85-bb14-a08dfc0568f7", + "modified": "2016-12-03 08:09:21.854671", + "created": "2016-12-03 08:09:21.854671", + "mons": [ + { + "rank": 0, + "name": "juju-2691ab-1-lxd-1", + "addr": "10.182.254.221:6789/0" + }, + { + "rank": 1, + "name": "juju-2691ab-13-lxd-0", + "addr": "10.182.254.229:6789/0" + }, + { + "rank": 2, + "name": "juju-2691ab-2-lxd-1", + "addr": "10.182.254.242:6789/0" + } + ] + }, + "osdmap": { + "osdmap": { + "epoch": 141540, + "num_osds": 314, + "num_up_osds": 314, + "num_in_osds": 314, + "full": false, + "nearfull": false, + "num_remapped_pgs": 92 + } + }, + "pgmap": { + "pgs_by_state": [ + { + "state_name": "active+clean", + "count": 9274 + }, + { + "state_name": "active+remapped+wait_backfill", + "count": 48 + }, + { + "state_name": "active+remapped+backfilling", + "count": 45 + }, + { + "state_name": "active+clean+scrubbing+deep", + "count": 9 + }, + { + "state_name": "active+remapped", + "count": 2 + }, + { + "state_name": "active+recovery_wait+degraded", + "count": 1 + }, + { + "state_name": "active+clean+scrubbing", + "count": 1 + } + ], + "version": 13885884, + "num_pgs": 9380, + "data_bytes": 64713222471610, + "bytes_used": 193613093122048, + "bytes_avail": 690058090491904, + "bytes_total": 883671183613952, + "degraded_objects": 14, + "degraded_total": 46842755, + "degraded_ratio": 0, + "misplaced_objects": 448540, + "misplaced_total": 46842755, + "misplaced_ratio": 0.15, + "recovering_objects_per_sec": 389, + "recovering_bytes_per_sec": 1629711746, + "recovering_keys_per_sec": 0, + "num_objects_recovered": 218, + "num_bytes_recovered": 912252928, + "num_keys_recovered": 0, + "read_bytes_sec": 117041457, + "write_bytes_sec": 293414043, + "read_op_per_sec": 5282, + "write_op_per_sec": 5270 + }, + "fsmap": { + "epoch": 1, + "by_rank": [] + } +} + diff --git a/ceph-mon/unit_tests/ceph_warn.json b/ceph-mon/unit_tests/ceph_warn.json new file mode 100644 index 00000000..45c81578 --- /dev/null +++ b/ceph-mon/unit_tests/ceph_warn.json @@ -0,0 +1 @@ +{"health":{"health":{"health_services":[{"mons":[{"name":"juju-2691ab-1-lxd-1","kb_total":155284096,"kb_used":1247744,"kb_avail":154036352,"avail_percent":99,"last_updated":"2017-05-17 03:31:35.562497","store_stats":{"bytes_total":1012055342,"bytes_sst":0,"bytes_log":29673298,"bytes_misc":982382044,"last_updated":"0.000000"},"health":"HEALTH_OK"},{"name":"juju-2691ab-13-lxd-0","kb_total":153820288,"kb_used":1361280,"kb_avail":152459008,"avail_percent":99,"last_updated":"2017-05-17 03:31:04.097201","store_stats":{"bytes_total":1370003168,"bytes_sst":0,"bytes_log":29813159,"bytes_misc":1340190009,"last_updated":"0.000000"},"health":"HEALTH_OK"},{"name":"juju-2691ab-2-lxd-1","kb_total":155251072,"kb_used":1373440,"kb_avail":153877632,"avail_percent":99,"last_updated":"2017-05-17 03:31:20.684777","store_stats":{"bytes_total":1400974192,"bytes_sst":0,"bytes_log":1129945,"bytes_misc":1399844247,"last_updated":"0.000000"},"health":"HEALTH_OK"}]}]},"timechecks":{"epoch":32,"round":24492,"round_status":"finished","mons":[{"name":"juju-2691ab-1-lxd-1","skew":0.000000,"latency":0.000000,"health":"HEALTH_OK"},{"name":"juju-2691ab-13-lxd-0","skew":0.000919,"latency":0.001036,"health":"HEALTH_OK"},{"name":"juju-2691ab-2-lxd-1","skew":0.000000,"latency":0.001009,"health":"HEALTH_OK"}]},"summary":[{"severity":"HEALTH_WARN","summary":"48 pgs backfill_wait"},{"severity":"HEALTH_WARN","summary":"45 pgs backfilling"},{"severity":"HEALTH_WARN","summary":"1 pgs degraded"},{"severity":"HEALTH_WARN","summary":"1 pgs recovery_wait"},{"severity":"HEALTH_WARN","summary":"22 pgs stuck unclean"},{"severity":"HEALTH_WARN","summary":"recovery 14\/46842755 objects degraded (0.000%)"},{"severity":"HEALTH_WARN","summary":"recovery 448540\/46842755 objects misplaced (0.958%)"}],"overall_status":"HEALTH_WARN","detail":[]},"fsid":"ca9451f1-5c4f-4e85-bb14-a08dfc0568f7","election_epoch":32,"quorum":[0,1,2],"quorum_names":["juju-2691ab-1-lxd-1","juju-2691ab-13-lxd-0","juju-2691ab-2-lxd-1"],"monmap":{"epoch":1,"fsid":"ca9451f1-5c4f-4e85-bb14-a08dfc0568f7","modified":"2016-12-03 08:09:21.854671","created":"2016-12-03 08:09:21.854671","mons":[{"rank":0,"name":"juju-2691ab-1-lxd-1","addr":"10.182.254.221:6789\/0"},{"rank":1,"name":"juju-2691ab-13-lxd-0","addr":"10.182.254.229:6789\/0"},{"rank":2,"name":"juju-2691ab-2-lxd-1","addr":"10.182.254.242:6789\/0"}]},"osdmap":{"osdmap":{"epoch":141540,"num_osds":314,"num_up_osds":314,"num_in_osds":314,"full":false,"nearfull":false,"num_remapped_pgs":92}},"pgmap":{"pgs_by_state":[{"state_name":"active+clean","count":9274},{"state_name":"active+remapped+wait_backfill","count":48},{"state_name":"active+remapped+backfilling","count":45},{"state_name":"active+clean+scrubbing+deep","count":9},{"state_name":"active+remapped","count":2},{"state_name":"active+recovery_wait+degraded","count":1},{"state_name":"active+clean+scrubbing","count":1}],"version":13885884,"num_pgs":9380,"data_bytes":64713222471610,"bytes_used":193613093122048,"bytes_avail":690058090491904,"bytes_total":883671183613952,"degraded_objects":14,"degraded_total":46842755,"degraded_ratio":0.000000,"misplaced_objects":448540,"misplaced_total":46842755,"misplaced_ratio":0.009575,"recovering_objects_per_sec":389,"recovering_bytes_per_sec":1629711746,"recovering_keys_per_sec":0,"num_objects_recovered":218,"num_bytes_recovered":912252928,"num_keys_recovered":0,"read_bytes_sec":117041457,"write_bytes_sec":293414043,"read_op_per_sec":5282,"write_op_per_sec":5270},"fsmap":{"epoch":1,"by_rank":[]}} diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index ef7a0786..7c98fd02 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -44,7 +44,11 @@ 'osd-format': 'ext4', 'monitor-hosts': '', 'prefer-ipv6': False, - 'default-rbd-features': None} + 'default-rbd-features': None, + 'nagios_degraded_thresh': '1', + 'nagios_misplaced_thresh': '10', + 'nagios_recovery_rate': '1', + 'nagios_ignore_nodeepscub': False} class CephHooksTestCase(unittest.TestCase): @@ -168,7 +172,10 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'use_syslog': 'true'} self.assertEqual(ctxt, expected) - def test_nrpe_dependency_installed(self): + @patch.object(ceph_hooks, 'config') + def test_nrpe_dependency_installed(self, mock_config): + config = copy.deepcopy(CHARM_CONFIG) + mock_config.side_effect = lambda key: config[key] with patch.multiple(ceph_hooks, apt_install=DEFAULT, rsync=DEFAULT, @@ -179,7 +186,12 @@ def test_nrpe_dependency_installed(self): mocks["apt_install"].assert_called_once_with( ["python-dbus", "lockfile-progs"]) - def test_upgrade_charm_with_nrpe_relation_installs_dependencies(self): + @patch.object(ceph_hooks, 'config') + def test_upgrade_charm_with_nrpe_relation_installs_dependencies( + self, + mock_config): + config = copy.deepcopy(CHARM_CONFIG) + mock_config.side_effect = lambda key: config[key] with patch.multiple( ceph_hooks, apt_install=DEFAULT, diff --git a/ceph-mon/unit_tests/test_check_ceph_status.py b/ceph-mon/unit_tests/test_check_ceph_status.py new file mode 100644 index 00000000..64c3e903 --- /dev/null +++ b/ceph-mon/unit_tests/test_check_ceph_status.py @@ -0,0 +1,75 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import os +import sys + +from mock import patch + +# import the module we want to test +os.sys.path.insert(1, os.path.join(sys.path[0], 'files/nagios')) +import check_ceph_status + + +@patch('subprocess.check_output') +class NagiosTestCase(unittest.TestCase): + + def test_health_ok(self, mock_subprocess): + with open('unit_tests/ceph_ok.json') as f: + tree = f.read() + mock_subprocess.return_value = tree + args = check_ceph_status.parse_args(['--degraded_thresh', '1']) + check_output = check_ceph_status.check_ceph_status(args) + self.assertRegexpMatches(check_output, r"^All OK$") + + def test_health_warn(self, mock_subprocess): + with open('unit_tests/ceph_warn.json') as f: + tree = f.read() + mock_subprocess.return_value = tree + args = check_ceph_status.parse_args(['--degraded_thresh', '1']) + self.assertRaises(check_ceph_status.WarnError, + lambda: check_ceph_status.check_ceph_status(args)) + + def test_health_crit(self, mock_subprocess): + with open('unit_tests/ceph_crit.json') as f: + tree = f.read() + mock_subprocess.return_value = tree + args = check_ceph_status.parse_args(['--degraded_thresh', '1']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) + + def test_health_lotsdegraded(self, mock_subprocess): + with open('unit_tests/ceph_params.json') as f: + tree = f.read() + mock_subprocess.return_value = tree + args = check_ceph_status.parse_args(['--degraded_thresh', '1']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) + + def test_health_nodeepscrub(self, mock_subprocess): + with open('unit_tests/ceph_nodeepscrub.json') as f: + tree = f.read() + mock_subprocess.return_value = tree + args = check_ceph_status.parse_args(['--degraded_thresh', '1']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) + + def test_health_nodeepscrubok(self, mock_subprocess): + with open('unit_tests/ceph_nodeepscrub.json') as f: + tree = f.read() + mock_subprocess.return_value = tree + args = check_ceph_status.parse_args(['--ignore_nodeepscrub']) + self.assertRaises(check_ceph_status.WarnError, + lambda: check_ceph_status.check_ceph_status(args)) From 8dd1f9fcc4a3c1917537b32a1c9cefee4b4ee53d Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 7 Nov 2017 07:54:01 +0100 Subject: [PATCH 1405/2699] Add unit tests for get_devices and get_journal_devices Change-Id: I78ea1d1c861070e4d4b1d171ded7286ebb636600 --- ceph-osd/unit_tests/test_ceph_hooks.py | 30 +++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 874603cf..feba3848 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -18,8 +18,11 @@ from mock import patch, MagicMock, call import charmhelpers.contrib.storage.linux.ceph as ceph -import ceph_hooks +with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + import ceph_hooks CHARM_CONFIG = {'config-flags': '', 'loglevel': 1, @@ -311,3 +314,28 @@ def test_install_apparmor_profile_systemd(self, mock_config, call('ceph-osd@1'), call('ceph-osd@2'), ]) + + @patch.object(ceph_hooks, 'storage_list') + @patch.object(ceph_hooks, 'config') + def test_get_devices(self, mock_config, mock_storage_list): + '''Devices returned as expected''' + config = {'osd-devices': '/dev/vda /dev/vdb'} + mock_config.side_effect = lambda key: config[key] + mock_storage_list.return_value = [] + devices = ceph_hooks.get_devices() + self.assertEqual(devices, ['/dev/vda', '/dev/vdb']) + + @patch('os.path.exists') + @patch.object(ceph_hooks, 'storage_list') + @patch.object(ceph_hooks, 'config') + def test_get_journal_devices(self, mock_config, mock_storage_list, + mock_os_path_exists): + '''Devices returned as expected''' + config = {'osd-journal': '/dev/vda /dev/vdb'} + mock_config.side_effect = lambda key: config[key] + mock_storage_list.return_value = [] + mock_os_path_exists.return_value = True + devices = ceph_hooks.get_journal_devices() + mock_storage_list.assert_called() + mock_os_path_exists.assert_called() + self.assertEqual(devices, set(['/dev/vda', '/dev/vdb'])) From 507251a03ebc63993d13673f6c99d23ccd54b46b Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Tue, 7 Nov 2017 19:52:54 -0700 Subject: [PATCH 1406/2699] Change `osd crush location` to `crush location` Upstream Ceph removed the `osd crush location` option in commit f9db479a14d9103a2b7c0a24d958fe5fff94100e [0]. This causes new clusters deployed from the Pike UCA (Luminous) using the customize-failure-domain option to not create or move the OSD to the correct spot in the OSD tree. The end result is that placement groups will fail to peer because there are no racks to select hosts and OSDs from. Instead, the charm should set the more generic `crush location` option in the ceph.conf file. It is and has been supported since the Firefly (trusty-icehouse) version of Ceph. [0] https://github.com/ceph/ceph/commit/f9db479a14d9103a2b7c0a24d958fe5fff94100e Change-Id: I0b7055b20f54096a2f33583079326aee17726355 Closes-Bug: 1730839 --- ceph-osd/templates/ceph.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 4a967cf0..15ba70d4 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -28,7 +28,7 @@ public addr = {{ public_addr }} cluster addr = {{ cluster_addr }} {%- endif %} {%- if crush_location %} -osd crush location = {{crush_location}} +crush location = {{crush_location}} {%- endif %} {%- if upgrade_in_progress %} setuser match path = /var/lib/ceph/$type/$cluster-$id From c4b02446987ee439a50001e8575181d06e81e314 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 6 Nov 2017 15:03:24 +0100 Subject: [PATCH 1407/2699] Add actions to blacklist osd-devices The blacklist actions allow for adding and removing devices to a unit-local list of devices to be skipped during osd initialization. This list will be used to override the application level, and thereby deployment wide, 'osd-devices' configuration option on a individual unit basis. The pre-existing list-disk action is extended to return list of blacklisted devices under the 'blacklist' key. Change-Id: I28a3c5d6076fb496dead3fe3387d9bbbbe9ec083 Closes-Bug: #1730267 --- ceph-osd/actions.yaml | 36 +++++ ceph-osd/actions/blacklist-add-disk | 1 + ceph-osd/actions/blacklist-remove-disk | 1 + ceph-osd/actions/blacklist.py | 102 +++++++++++++ ceph-osd/actions/list_disks.py | 5 +- ceph-osd/hooks/ceph_hooks.py | 12 +- ceph-osd/hooks/utils.py | 6 + ceph-osd/tests/basic_deployment.py | 49 ++++++ ceph-osd/unit_tests/test_actions_blacklist.py | 141 ++++++++++++++++++ ceph-osd/unit_tests/test_ceph_hooks.py | 35 +++++ 10 files changed, 386 insertions(+), 2 deletions(-) create mode 120000 ceph-osd/actions/blacklist-add-disk create mode 120000 ceph-osd/actions/blacklist-remove-disk create mode 100755 ceph-osd/actions/blacklist.py create mode 100644 ceph-osd/unit_tests/test_actions_blacklist.py diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index edb16684..7858d51f 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -48,3 +48,39 @@ add-disk: description: The name of the bucket in Ceph to add these devices into required: - osd-devices +blacklist-add-disk: + description: | + Add disk(s) to blacklist. Blacklisted disks will not be + initialized for use with Ceph even if listed in the application + level osd-devices configuration option. + . + The current blacklist can be viewed with list-disks action. + . + NOTE: This action and blacklist will not have any effect on + already initialized disks. + params: + osd-devices: + type: string + description: | + A space-separated list of devices to add to blacklist. + . + Each element should be a absolute path to a device node or filesystem + directory (the latter is supported for ceph >= 0.56.6). + . + Example: '/dev/vdb /var/tmp/test-osd' + required: + - osd-devices +blacklist-remove-disk: + description: Remove disk(s) from blacklist. + params: + osd-devices: + type: string + description: | + A space-separated list of devices to remove from blacklist. + . + Each element should be a existing entry in the units blacklist. + Use list-disks action to list current blacklist entries. + . + Example: '/dev/vdb /var/tmp/test-osd' + required: + - osd-devices diff --git a/ceph-osd/actions/blacklist-add-disk b/ceph-osd/actions/blacklist-add-disk new file mode 120000 index 00000000..d3da513e --- /dev/null +++ b/ceph-osd/actions/blacklist-add-disk @@ -0,0 +1 @@ +blacklist.py \ No newline at end of file diff --git a/ceph-osd/actions/blacklist-remove-disk b/ceph-osd/actions/blacklist-remove-disk new file mode 120000 index 00000000..d3da513e --- /dev/null +++ b/ceph-osd/actions/blacklist-remove-disk @@ -0,0 +1 @@ +blacklist.py \ No newline at end of file diff --git a/ceph-osd/actions/blacklist.py b/ceph-osd/actions/blacklist.py new file mode 100755 index 00000000..994c29ff --- /dev/null +++ b/ceph-osd/actions/blacklist.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python +# +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +sys.path.append('hooks') + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.unitdata as unitdata + +BLACKLIST_KEY = 'osd-blacklist' + + +class Error(Exception): + def __init__(self, message): + self.message = message + + def __str__(self): + return repr(self.message) + + +def get_devices(): + """Parse 'osd-devices' action parameter, returns list.""" + devices = [] + for path in hookenv.action_get('osd-devices').split(' '): + path = path.strip() + if not os.path.isabs(path): + raise Error('{}: Not absolute path.'.format(path)) + devices.append(path) + return devices + + +def blacklist_add(): + """ + Add devices given in 'osd-devices' action parameter to + unit-local devices blacklist. + """ + db = unitdata.kv() + blacklist = db.get(BLACKLIST_KEY, []) + for device in get_devices(): + if not os.path.exists(device): + raise Error('{}: No such file or directory.'.format(device)) + if device not in blacklist: + blacklist.append(device) + db.set(BLACKLIST_KEY, blacklist) + db.flush() + + +def blacklist_remove(): + """ + Remove devices given in 'osd-devices' action parameter from + unit-local devices blacklist. + """ + db = unitdata.kv() + blacklist = db.get(BLACKLIST_KEY, []) + for device in get_devices(): + try: + blacklist.remove(device) + except ValueError: + raise Error('{}: Device not in blacklist.'.format(device)) + db.set(BLACKLIST_KEY, blacklist) + db.flush() + + +# A dictionary of all the defined actions to callables +ACTIONS = { + "blacklist-add-disk": blacklist_add, + "blacklist-remove-disk": blacklist_remove, +} + + +def main(args): + """Main program""" + action_name = os.path.basename(args[0]) + try: + action = ACTIONS[action_name] + except KeyError: + return "Action {} undefined".format(action_name) + else: + try: + action() + except Exception as e: + hookenv.action_fail("Action {} failed: {}" + "".format(action_name, str(e))) + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/ceph-osd/actions/list_disks.py b/ceph-osd/actions/list_disks.py index 25a6cdf0..116041b2 100755 --- a/ceph-osd/actions/list_disks.py +++ b/ceph-osd/actions/list_disks.py @@ -29,7 +29,10 @@ import charmhelpers.core.hookenv as hookenv import ceph.utils +import utils if __name__ == '__main__': hookenv.action_set({ - 'disks': ceph.utils.unmounted_disks()}) + 'disks': ceph.utils.unmounted_disks(), + 'blacklist': utils.get_blacklist(), + }) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 8253d8b8..30e7be31 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -66,6 +66,7 @@ is_unit_paused_set, get_public_addr, get_cluster_addr, + get_blacklist, ) from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( @@ -441,7 +442,11 @@ def get_devices(): # their block device paths to the list. storage_ids = storage_list('osd-devices') devices.extend((storage_get('location', s) for s in storage_ids)) - return devices + + # Filter out any devices in the action managed unit-local device blacklist + return filter( + lambda device: device not in get_blacklist(), devices + ) def get_journal_devices(): @@ -451,6 +456,11 @@ def get_journal_devices(): devices = [] storage_ids = storage_list('osd-journals') devices.extend((storage_get('location', s) for s in storage_ids)) + + # Filter out any devices in the action managed unit-local device blacklist + devices = filter( + lambda device: device not in get_blacklist(), devices + ) devices = filter(os.path.exists, devices) return set(devices) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index ea218860..512d1180 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -207,3 +207,9 @@ def is_unit_paused_set(): return not(not(kv.get('unit-paused'))) except: return False + + +def get_blacklist(): + """Get blacklist stored in the local kv() store""" + db = unitdata.kv() + return db.get('osd-blacklist', []) diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 70e51af0..dfa86480 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -672,3 +672,52 @@ def test_910_pause_and_resume(self): assert u.wait_on_action(action_id), "Resume action failed." assert u.status_get(sentry_unit)[0] == "active" u.log.debug('OK') + + def test_911_blacklist(self): + """The blacklist actions execute and behave as expected. """ + u.log.debug('Checking blacklist-add-disk and' + 'blacklist-remove-disk actions...') + sentry_unit = self.ceph_osd_sentry + + assert u.status_get(sentry_unit)[0] == "active" + + # Attempt to add device with non-absolute path should fail + action_id = u.run_action(sentry_unit, + "blacklist-add-disk", + params={"osd-devices": "vda"}) + assert not u.wait_on_action(action_id), "completed" + assert u.status_get(sentry_unit)[0] == "active" + + # Attempt to add device with non-existent path should fail + action_id = u.run_action(sentry_unit, + "blacklist-add-disk", + params={"osd-devices": "/non-existent"}) + assert not u.wait_on_action(action_id), "completed" + assert u.status_get(sentry_unit)[0] == "active" + + # Attempt to add device with existent path should succeed + action_id = u.run_action(sentry_unit, + "blacklist-add-disk", + params={"osd-devices": "/dev/vda"}) + assert u.wait_on_action(action_id), "completed" + assert u.status_get(sentry_unit)[0] == "active" + + # Attempt to remove listed device should always succeed + action_id = u.run_action(sentry_unit, + "blacklist-remove-disk", + params={"osd-devices": "/dev/vda"}) + assert u.wait_on_action(action_id), "completed" + assert u.status_get(sentry_unit)[0] == "active" + u.log.debug('OK') + + def test_912_list_disks(self): + """The list-disks action execute. """ + u.log.debug('Checking list-disks action...') + sentry_unit = self.ceph_osd_sentry + + assert u.status_get(sentry_unit)[0] == "active" + + action_id = u.run_action(sentry_unit, "list-disks") + assert u.wait_on_action(action_id), "completed" + assert u.status_get(sentry_unit)[0] == "active" + u.log.debug('OK') diff --git a/ceph-osd/unit_tests/test_actions_blacklist.py b/ceph-osd/unit_tests/test_actions_blacklist.py new file mode 100644 index 00000000..a74e96fd --- /dev/null +++ b/ceph-osd/unit_tests/test_actions_blacklist.py @@ -0,0 +1,141 @@ +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from charmhelpers.core import hookenv + +from actions import blacklist + +from test_utils import CharmTestCase + + +class BlacklistActionTests(CharmTestCase): + def setUp(self): + super(BlacklistActionTests, self).setUp( + blacklist, []) + + @mock.patch('os.path.isabs') + @mock.patch('os.path.exists') + @mock.patch('charmhelpers.core.unitdata.kv') + @mock.patch('charmhelpers.core.hookenv.action_get') + def test_add_disk(self, _action_get, _kv, _exists, _isabs): + """Add device with absolute and existent path succeeds""" + _action_get.return_value = '/dev/vda' + _kv.return_value = _kv + _kv.get.return_value = [] + _exists.return_value = True + _isabs.return_value = True + blacklist.blacklist_add() + _exists.assert_called() + _isabs.assert_called() + _kv.get.assert_called_with('osd-blacklist', []) + _kv.set.assert_called_with('osd-blacklist', ['/dev/vda']) + _kv.flush.assert_called() + + @mock.patch('os.path.isabs') + @mock.patch('os.path.exists') + @mock.patch('charmhelpers.core.unitdata.kv') + @mock.patch('charmhelpers.core.hookenv.action_get') + def test_add_disk_nonexistent(self, _action_get, _kv, _exists, _isabs): + """Add device with non-existent path raises exception""" + _action_get.return_value = '/dev/vda' + _kv.return_value = _kv + _kv.get.return_value = [] + _exists.return_value = False + _isabs.return_value = True + with self.assertRaises(blacklist.Error): + blacklist.blacklist_add() + _isabs.assert_called() + _exists.assert_called() + _kv.get.assert_called_with('osd-blacklist', []) + assert not _kv.set.called + assert not _kv.flush.called + + @mock.patch('os.path.isabs') + @mock.patch('os.path.exists') + @mock.patch('charmhelpers.core.unitdata.kv') + @mock.patch('charmhelpers.core.hookenv.action_get') + def test_add_disk_nonabsolute(self, _action_get, _kv, _exists, _isabs): + """Add device with non-absolute path raises exception""" + _action_get.return_value = 'vda' + _kv.return_value = _kv + _kv.get.return_value = [] + _exists.return_value = True + _isabs.return_value = False + with self.assertRaises(blacklist.Error): + blacklist.blacklist_add() + _isabs.assert_called() + _kv.get.assert_called_with('osd-blacklist', []) + assert not _exists.called + assert not _kv.set.called + assert not _kv.flush.called + + @mock.patch('charmhelpers.core.unitdata.kv') + @mock.patch('charmhelpers.core.hookenv.action_get') + def test_remove_disk(self, _action_get, _kv): + """Remove action succeeds, and regardless of existence of device""" + _action_get.return_value = '/nonexistent2' + _kv.return_value = _kv + _kv.get.return_value = ['/nonexistent1', '/nonexistent2'] + blacklist.blacklist_remove() + _kv.get.assert_called_with('osd-blacklist', []) + _kv.set.assert_called_with('osd-blacklist', ['/nonexistent1']) + _kv.flush.assert_called() + + @mock.patch('charmhelpers.core.unitdata.kv') + @mock.patch('charmhelpers.core.hookenv.action_get') + def test_remove_disk_nonlisted(self, _action_get, _kv): + """Remove action raises on removal of device not in list""" + _action_get.return_value = '/nonexistent3' + _kv.return_value = _kv + _kv.get.return_value = ['/nonexistent1', '/nonexistent2'] + with self.assertRaises(blacklist.Error): + blacklist.blacklist_remove() + _kv.get.assert_called_with('osd-blacklist', []) + assert not _kv.set.called + assert not _kv.flush.called + + +class MainTestCase(CharmTestCase): + def setUp(self): + super(MainTestCase, self).setUp(hookenv, ["action_fail"]) + + def test_invokes_action(self): + dummy_calls = [] + + def dummy_action(): + dummy_calls.append(True) + + with mock.patch.dict(blacklist.ACTIONS, {"foo": dummy_action}): + blacklist.main(["foo"]) + self.assertEqual(dummy_calls, [True]) + + def test_unknown_action(self): + """Unknown actions aren't a traceback.""" + exit_string = blacklist.main(["foo"]) + self.assertEqual("Action foo undefined", exit_string) + + def test_failing_action(self): + """Actions which traceback trigger action_fail() calls.""" + dummy_calls = [] + + self.action_fail.side_effect = dummy_calls.append + + def dummy_action(): + raise ValueError("uh oh") + + with mock.patch.dict(blacklist.ACTIONS, {"foo": dummy_action}): + blacklist.main(["foo"]) + self.assertEqual(dummy_calls, ["Action foo failed: uh oh"]) diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index feba3848..16c28da0 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -339,3 +339,38 @@ def test_get_journal_devices(self, mock_config, mock_storage_list, mock_storage_list.assert_called() mock_os_path_exists.assert_called() self.assertEqual(devices, set(['/dev/vda', '/dev/vdb'])) + + @patch.object(ceph_hooks, 'get_blacklist') + @patch.object(ceph_hooks, 'storage_list') + @patch.object(ceph_hooks, 'config') + def test_get_devices_blacklist(self, mock_config, mock_storage_list, + mock_get_blacklist): + '''Devices returned as expected when blacklist in effect''' + config = {'osd-devices': '/dev/vda /dev/vdb'} + mock_config.side_effect = lambda key: config[key] + mock_storage_list.return_value = [] + mock_get_blacklist.return_value = ['/dev/vda'] + devices = ceph_hooks.get_devices() + mock_storage_list.assert_called() + mock_get_blacklist.assert_called() + self.assertEqual(devices, ['/dev/vdb']) + + @patch('os.path.exists') + @patch.object(ceph_hooks, 'get_blacklist') + @patch.object(ceph_hooks, 'storage_list') + @patch.object(ceph_hooks, 'config') + def test_get_journal_devices_blacklist(self, mock_config, + mock_storage_list, + mock_get_blacklist, + mock_os_path_exists): + '''Devices returned as expected when blacklist in effect''' + config = {'osd-journal': '/dev/vda /dev/vdb'} + mock_config.side_effect = lambda key: config[key] + mock_storage_list.return_value = [] + mock_get_blacklist.return_value = ['/dev/vda'] + mock_os_path_exists.return_value = True + devices = ceph_hooks.get_journal_devices() + mock_storage_list.assert_called() + mock_os_path_exists.assert_called() + mock_get_blacklist.assert_called() + self.assertEqual(devices, set(['/dev/vdb'])) From be18d9455aa108a958d961b7a194f069984b9445 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Fri, 27 Oct 2017 17:25:35 +0100 Subject: [PATCH 1408/2699] Migrate charm to work with Python3 only Various changes to migrate the charm to work with Python 3. The tox.ini has been modified to inlcude py35 and py36 targets for testing against Python 3.5 (xenial, zesty), and Python 3.6 (artful+). Change-Id: I009de528428aaca555b49f3fc17704dcf5f2a28c --- ceph-mon/.gitignore | 2 + ceph-mon/actions/ceph_ops.py | 3 +- ceph-mon/actions/create-cache-tier.py | 2 +- ceph-mon/actions/create-erasure-profile.py | 2 +- ceph-mon/actions/create-pool.py | 2 +- ceph-mon/actions/crushmap-update.py | 2 +- ceph-mon/actions/delete-erasure-profile.py | 2 +- ceph-mon/actions/delete-pool.py | 2 +- ceph-mon/actions/get-erasure-profile.py | 2 +- ceph-mon/actions/list-erasure-profiles.py | 2 +- ceph-mon/actions/list-pools.py | 2 +- ceph-mon/actions/pool-get.py | 2 +- ceph-mon/actions/pool-set.py | 2 +- ceph-mon/actions/pool-statistics.py | 2 +- ceph-mon/actions/remove-cache-tier.py | 2 +- ceph-mon/actions/remove-pool-snapshot.py | 2 +- ceph-mon/actions/rename-pool.py | 2 +- ceph-mon/actions/set-pool-max-bytes.py | 2 +- ceph-mon/actions/show-disk-free.py | 2 +- ceph-mon/actions/snapshot-pool.py | 2 +- ceph-mon/files/nagios/check_ceph_status.py | 9 +- ceph-mon/hooks/ceph_hooks.py | 17 ++- .../charmhelpers/contrib/charmsupport/nrpe.py | 10 +- .../contrib/hardening/audits/apache.py | 4 +- .../hooks/charmhelpers/contrib/network/ip.py | 4 +- .../contrib/openstack/alternatives.py | 13 ++ .../contrib/openstack/amulet/deployment.py | 30 +++-- .../contrib/openstack/amulet/utils.py | 36 ++++-- .../charmhelpers/contrib/openstack/context.py | 40 +++---- .../contrib/openstack/ha/utils.py | 11 +- .../charmhelpers/contrib/openstack/neutron.py | 61 ++-------- .../contrib/openstack/templating.py | 2 + .../charmhelpers/contrib/openstack/utils.py | 38 +++--- .../contrib/storage/linux/ceph.py | 42 +++++-- .../charmhelpers/contrib/storage/linux/lvm.py | 8 +- .../contrib/storage/linux/utils.py | 2 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 112 ++++++++++++++++-- ceph-mon/hooks/charmhelpers/core/host.py | 73 +++++++++++- ceph-mon/hooks/charmhelpers/core/strutils.py | 16 ++- ceph-mon/hooks/charmhelpers/core/unitdata.py | 2 +- ceph-mon/hooks/charmhelpers/fetch/snap.py | 16 +++ ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 2 +- ceph-mon/hooks/install | 6 +- ceph-mon/hooks/install_deps | 2 +- ceph-mon/lib/ceph/broker.py | 34 +++--- ceph-mon/lib/ceph/crush_utils.py | 19 +-- ceph-mon/lib/ceph/utils.py | 108 ++++++++++++----- ceph-mon/test-requirements.txt | 4 +- .../contrib/openstack/amulet/deployment.py | 26 +++- .../contrib/openstack/amulet/utils.py | 36 ++++-- ceph-mon/tests/charmhelpers/core/hookenv.py | 112 ++++++++++++++++-- ceph-mon/tests/charmhelpers/core/host.py | 73 +++++++++++- ceph-mon/tests/charmhelpers/core/strutils.py | 16 ++- ceph-mon/tests/charmhelpers/core/unitdata.py | 2 +- ceph-mon/tox.ini | 10 +- ceph-mon/unit_tests/__init__.py | 1 + ceph-mon/unit_tests/test_check_ceph_status.py | 12 +- ceph-mon/unit_tests/test_utils.py | 4 +- 58 files changed, 777 insertions(+), 277 deletions(-) diff --git a/ceph-mon/.gitignore b/ceph-mon/.gitignore index db5910ca..b7e47dbe 100644 --- a/ceph-mon/.gitignore +++ b/ceph-mon/.gitignore @@ -7,3 +7,5 @@ bin .idea *.pyc func-results.json +.stestr +__pycache__ diff --git a/ceph-mon/actions/ceph_ops.py b/ceph-mon/actions/ceph_ops.py index c4df90f3..fe88ccbf 100755 --- a/ceph-mon/actions/ceph_ops.py +++ b/ceph-mon/actions/ceph_ops.py @@ -60,7 +60,8 @@ def pool_get(): key = action_get("key") pool_name = action_get("pool_name") try: - value = check_output(['ceph', 'osd', 'pool', 'get', pool_name, key]) + value = (check_output(['ceph', 'osd', 'pool', 'get', pool_name, key]) + .decode('UTF-8')) return value except CalledProcessError as e: action_fail(e.message) diff --git a/ceph-mon/actions/create-cache-tier.py b/ceph-mon/actions/create-cache-tier.py index 614bdb05..09e4c594 100755 --- a/ceph-mon/actions/create-cache-tier.py +++ b/ceph-mon/actions/create-cache-tier.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/create-erasure-profile.py b/ceph-mon/actions/create-erasure-profile.py index a468058b..661f400c 100755 --- a/ceph-mon/actions/create-erasure-profile.py +++ b/ceph-mon/actions/create-erasure-profile.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/create-pool.py b/ceph-mon/actions/create-pool.py index a0123bf7..ba1dd260 100755 --- a/ceph-mon/actions/create-pool.py +++ b/ceph-mon/actions/create-pool.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/crushmap-update.py b/ceph-mon/actions/crushmap-update.py index d90e50bf..c4aa13f0 100755 --- a/ceph-mon/actions/crushmap-update.py +++ b/ceph-mon/actions/crushmap-update.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/delete-erasure-profile.py b/ceph-mon/actions/delete-erasure-profile.py index 49d27114..0b45563a 100755 --- a/ceph-mon/actions/delete-erasure-profile.py +++ b/ceph-mon/actions/delete-erasure-profile.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/delete-pool.py b/ceph-mon/actions/delete-pool.py index a7881596..62e73a6a 100755 --- a/ceph-mon/actions/delete-pool.py +++ b/ceph-mon/actions/delete-pool.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/get-erasure-profile.py b/ceph-mon/actions/get-erasure-profile.py index 92307119..a259e748 100755 --- a/ceph-mon/actions/get-erasure-profile.py +++ b/ceph-mon/actions/get-erasure-profile.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/list-erasure-profiles.py b/ceph-mon/actions/list-erasure-profiles.py index 038db2c0..2d88a44e 100755 --- a/ceph-mon/actions/list-erasure-profiles.py +++ b/ceph-mon/actions/list-erasure-profiles.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/list-pools.py b/ceph-mon/actions/list-pools.py index 1784a42a..976c660f 100755 --- a/ceph-mon/actions/list-pools.py +++ b/ceph-mon/actions/list-pools.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/pool-get.py b/ceph-mon/actions/pool-get.py index aa5faa29..7fa8c6b6 100755 --- a/ceph-mon/actions/pool-get.py +++ b/ceph-mon/actions/pool-get.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/pool-set.py b/ceph-mon/actions/pool-set.py index 577b4a53..218814ce 100755 --- a/ceph-mon/actions/pool-set.py +++ b/ceph-mon/actions/pool-set.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/pool-statistics.py b/ceph-mon/actions/pool-statistics.py index 54358d58..369b5f0b 100755 --- a/ceph-mon/actions/pool-statistics.py +++ b/ceph-mon/actions/pool-statistics.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/remove-cache-tier.py b/ceph-mon/actions/remove-cache-tier.py index 2572515a..2da89388 100755 --- a/ceph-mon/actions/remove-cache-tier.py +++ b/ceph-mon/actions/remove-cache-tier.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/remove-pool-snapshot.py b/ceph-mon/actions/remove-pool-snapshot.py index ea2eaf8e..ad60932e 100755 --- a/ceph-mon/actions/remove-pool-snapshot.py +++ b/ceph-mon/actions/remove-pool-snapshot.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/rename-pool.py b/ceph-mon/actions/rename-pool.py index d6da3db9..4e53ce61 100755 --- a/ceph-mon/actions/rename-pool.py +++ b/ceph-mon/actions/rename-pool.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/set-pool-max-bytes.py b/ceph-mon/actions/set-pool-max-bytes.py index cd44af69..5d0098da 100755 --- a/ceph-mon/actions/set-pool-max-bytes.py +++ b/ceph-mon/actions/set-pool-max-bytes.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/show-disk-free.py b/ceph-mon/actions/show-disk-free.py index b21e4afb..ca2a629b 100755 --- a/ceph-mon/actions/show-disk-free.py +++ b/ceph-mon/actions/show-disk-free.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/actions/snapshot-pool.py b/ceph-mon/actions/snapshot-pool.py index 5f03e0c2..4d071cdd 100755 --- a/ceph-mon/actions/snapshot-pool.py +++ b/ceph-mon/actions/snapshot-pool.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-mon/files/nagios/check_ceph_status.py b/ceph-mon/files/nagios/check_ceph_status.py index 2df223ff..98275a51 100755 --- a/ceph-mon/files/nagios/check_ceph_status.py +++ b/ceph-mon/files/nagios/check_ceph_status.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (C) 2005, 2006, 2007, 2012 James Troup # Copyright (C) 2014, 2017 Canonical @@ -97,9 +97,10 @@ def check_ceph_status(args): status_data = json.loads(tree) else: try: - tree = subprocess.check_output(['ceph', - 'status', - '--format', 'json']) + tree = (subprocess.check_output(['ceph', + 'status', + '--format', 'json']) + .decode('UTF-8')) except subprocess.CalledProcessError as e: raise UnknownError( "UNKNOWN: ceph status command failed with error: {}".format(e)) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 7e158383..e042a4a7 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # @@ -254,8 +254,7 @@ def get_mon_hosts(): hosts.append('{}:6789'.format( format_ipv6_addr(addr) or addr)) - hosts.sort() - return hosts + return sorted(hosts) def get_peer_units(): @@ -308,7 +307,7 @@ def bootstrap_source_relation_changed(): if not (mon_secret and fsid): log('Relation data is not ready as the fsid or the ' 'monitor-secret are missing from the relation: ' - 'mon_secret = %s and fsid = %s ' % (mon_secret, fsid)) + 'mon_secret = {} and fsid = {} '.format(mon_secret, fsid)) continue if not (curr_fsid or curr_secret): @@ -319,10 +318,10 @@ def bootstrap_source_relation_changed(): # will fail to join the mon cluster. If they don't, # bail because something needs to be investigated. assert curr_fsid == fsid, \ - "bootstrap fsid '%s' != current fsid '%s'" % ( + "bootstrap fsid '{}' != current fsid '{}'".format( fsid, curr_fsid) assert curr_secret == mon_secret, \ - "bootstrap secret '%s' != current secret '%s'" % ( + "bootstrap secret '{}' != current secret '{}'".format( mon_secret, curr_secret) opts = { @@ -331,7 +330,7 @@ def bootstrap_source_relation_changed(): } log('Updating leader settings for fsid and monitor-secret ' - 'from remote relation data: %s' % opts) + 'from remote relation data: {}'.format(opts)) leader_set(opts) # The leader unit needs to bootstrap itself as it won't receive the @@ -664,7 +663,7 @@ def update_nrpe_config(): check_cmd = check_cmd + ' --ignore_nodeepscrub' nrpe_setup.add_check( shortname="ceph", - description='Check Ceph health {%s}' % current_unit, + description='Check Ceph health {{{}}}'.format(current_unit), check_cmd=check_cmd ) nrpe_setup.write() @@ -693,7 +692,7 @@ def assess_status(): return # mon_count > 1, peers, but no ceph-public-address - ready = sum(1 for unit_ready in units.itervalues() if unit_ready) + ready = sum(1 for unit_ready in units.values() if unit_ready) if ready < moncount: status_set('waiting', 'Peer units detected, waiting for addresses') return diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 80d574dc..1c55b30f 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -30,6 +30,7 @@ from charmhelpers.core.hookenv import ( config, + hook_name, local_unit, log, relation_ids, @@ -285,7 +286,7 @@ def write(self): try: nagios_uid = pwd.getpwnam('nagios').pw_uid nagios_gid = grp.getgrnam('nagios').gr_gid - except: + except Exception: log("Nagios user not set up, nrpe checks not updated") return @@ -302,7 +303,12 @@ def write(self): "command": nrpecheck.command, } - service('restart', 'nagios-nrpe-server') + # update-status hooks are configured to firing every 5 minutes by + # default. When nagios-nrpe-server is restarted, the nagios server + # reports checks failing causing unneccessary alerts. Let's not restart + # on update-status hooks. + if not hook_name() == 'update-status': + service('restart', 'nagios-nrpe-server') monitor_ids = relation_ids("local-monitors") + \ relation_ids("nrpe-external-master") diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py index d812948a..d32bf44e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -70,12 +70,12 @@ def _get_loaded_modules(): """Returns the modules which are enabled in Apache.""" output = subprocess.check_output(['apache2ctl', '-M']) modules = [] - for line in output.strip().split(): + for line in output.splitlines(): # Each line of the enabled module output looks like: # module_name (static|shared) # Plus a header line at the top of the output which is stripped # out by the regex. - matcher = re.search(r'^ (\S*)', line) + matcher = re.search(r'^ (\S*)_module (\S*)', line) if matcher: modules.append(matcher.group(1)) return modules diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index d7e6debf..a871ce37 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -490,7 +490,7 @@ def get_host_ip(hostname, fallback=None): if not ip_addr: try: ip_addr = socket.gethostbyname(hostname) - except: + except Exception: log("Failed to resolve hostname '%s'" % (hostname), level=WARNING) return fallback @@ -518,7 +518,7 @@ def get_hostname(address, fqdn=True): if not result: try: result = socket.gethostbyaddr(address)[0] - except: + except Exception: return None else: result = address diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py index 1501641e..547de09c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -29,3 +29,16 @@ def install_alternative(name, target, source, priority=50): target, name, source, str(priority) ] subprocess.check_call(cmd) + + +def remove_alternative(name, source): + """Remove an installed alternative configuration file + + :param name: string name of the alternative to remove + :param source: string full path to alternative to remove + """ + cmd = [ + 'update-alternatives', '--remove', + name, source + ] + subprocess.check_call(cmd) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 5c041d2c..13a12f62 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -250,7 +250,14 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, self.log.debug('Waiting up to {}s for extended status on services: ' '{}'.format(timeout, services)) service_messages = {service: message for service in services} + + # Check for idleness + self.d.sentry.wait() + # Check for error states and bail early + self.d.sentry.wait_for_status(self.d.juju_env, services) + # Check for ready messages self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') def _get_openstack_release(self): @@ -303,20 +310,27 @@ def get_ceph_expected_pools(self, radosgw=False): test scenario, based on OpenStack release and whether ceph radosgw is flagged as present or not.""" - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later + if self._get_openstack_release() == self.trusty_icehouse: + # Icehouse pools = [ + 'data', + 'metadata', 'rbd', - 'cinder', + 'cinder-ceph', 'glance' ] - else: - # Juno or earlier + elif (self.trusty_kilo <= self._get_openstack_release() <= + self.zesty_ocata): + # Kilo through Ocata pools = [ - 'data', - 'metadata', 'rbd', - 'cinder', + 'cinder-ceph', + 'glance' + ] + else: + # Pike and later + pools = [ + 'cinder-ceph', 'glance' ] diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index c8edbf65..b71b2b19 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -23,6 +23,7 @@ import urlparse import cinderclient.v1.client as cinder_client +import cinderclient.v2.client as cinder_clientv2 import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client @@ -42,7 +43,6 @@ from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) -from charmhelpers.core.decorators import retry_on_exception from charmhelpers.core.host import CompareHostReleases DEBUG = logging.DEBUG @@ -310,7 +310,6 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] - @retry_on_exception(5, base_delay=10) def keystone_wait_for_propagation(self, sentry_relation_pairs, api_version): """Iterate over list of sentry and relation tuples and verify that @@ -326,7 +325,7 @@ def keystone_wait_for_propagation(self, sentry_relation_pairs, rel = sentry.relation('identity-service', relation_name) self.log.debug('keystone relation data: {}'.format(rel)) - if rel['api_version'] != str(api_version): + if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." "".format(rel['api_version'], api_version)) @@ -348,15 +347,19 @@ def keystone_configure_api_version(self, sentry_relation_pairs, deployment, config = {'preferred-api-version': api_version} deployment.d.configure('keystone', config) + deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant): + password, tenant, api_version=2): """Authenticates admin user with cinder.""" # NOTE(beisner): cinder python client doesn't accept tokens. keystone_ip = keystone_sentry.info['public-address'] ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) - return cinder_client.Client(username, password, tenant, ept) + _clients = { + 1: cinder_client.Client, + 2: cinder_clientv2.Client} + return _clients[api_version](username, password, tenant, ept) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -617,13 +620,25 @@ def create_or_get_keypair(self, nova, keypair_name="testkey"): self.log.debug('Keypair ({}) already exists, ' 'using it.'.format(keypair_name)) return _keypair - except: + except Exception: self.log.debug('Keypair ({}) does not exist, ' 'creating it.'.format(keypair_name)) _keypair = nova.keypairs.create(name=keypair_name) return _keypair + def _get_cinder_obj_name(self, cinder_object): + """Retrieve name of cinder object. + + :param cinder_object: cinder snapshot or volume object + :returns: str cinder object name + """ + # v1 objects store name in 'display_name' attr but v2+ use 'name' + try: + return cinder_object.display_name + except AttributeError: + return cinder_object.name + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, img_id=None, src_vol_id=None, snap_id=None): """Create cinder volume, optionally from a glance image, OR @@ -674,6 +689,13 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, source_volid=src_vol_id, snapshot_id=snap_id) vol_id = vol_new.id + except TypeError: + vol_new = cinder.volumes.create(name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id except Exception as e: msg = 'Failed to create volume: {}'.format(e) amulet.raise_status(amulet.FAIL, msg=msg) @@ -688,7 +710,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, # Re-validate new volume self.log.debug('Validating volume attributes...') - val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) val_vol_boot = cinder.volumes.get(vol_id).bootable val_vol_stat = cinder.volumes.get(vol_id).status val_vol_size = cinder.volumes.get(vol_id).size diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index f67f3265..ece75df8 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections import glob import json import math @@ -578,11 +579,14 @@ def __call__(self): laddr = get_address_in_network(config(cfg_opt)) if laddr: netmask = get_netmask_for_address(laddr) - cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, - netmask), - 'backends': {l_unit: laddr}} + cluster_hosts[laddr] = { + 'network': "{}/{}".format(laddr, + netmask), + 'backends': collections.OrderedDict([(l_unit, + laddr)]) + } for rid in relation_ids('cluster'): - for unit in related_units(rid): + for unit in sorted(related_units(rid)): _laddr = relation_get('{}-address'.format(addr_type), rid=rid, unit=unit) if _laddr: @@ -594,10 +598,13 @@ def __call__(self): # match in the frontend cluster_hosts[addr] = {} netmask = get_netmask_for_address(addr) - cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), - 'backends': {l_unit: addr}} + cluster_hosts[addr] = { + 'network': "{}/{}".format(addr, netmask), + 'backends': collections.OrderedDict([(l_unit, + addr)]) + } for rid in relation_ids('cluster'): - for unit in related_units(rid): + for unit in sorted(related_units(rid)): _laddr = relation_get('private-address', rid=rid, unit=unit) if _laddr: @@ -628,6 +635,8 @@ def __call__(self): ctxt['local_host'] = '127.0.0.1' ctxt['haproxy_host'] = '0.0.0.0' + ctxt['ipv6_enabled'] = not is_ipv6_disabled() + ctxt['stat_port'] = '8888' db = kv() @@ -802,8 +811,9 @@ def __call__(self): else: # Expect cert/key provided in config (currently assumed that ca # uses ip for cn) - cn = resolve_address(endpoint_type=INTERNAL) - self.configure_cert(cn) + for net_type in (INTERNAL, ADMIN, PUBLIC): + cn = resolve_address(endpoint_type=net_type) + self.configure_cert(cn) addresses = self.get_network_addresses() for address, endpoint in addresses: @@ -843,15 +853,6 @@ def _ensure_packages(self): for pkgs in self.packages: ensure_packages(pkgs) - def _save_flag_file(self): - if self.network_manager == 'quantum': - _file = '/etc/nova/quantum_plugin.conf' - else: - _file = '/etc/nova/neutron_plugin.conf' - - with open(_file, 'wb') as out: - out.write(self.plugin + '\n') - def ovs_ctxt(self): driver = neutron_plugin_attribute(self.plugin, 'driver', self.network_manager) @@ -996,7 +997,6 @@ def __call__(self): flags = config_flags_parser(alchemy_flags) ctxt['neutron_alchemy_flags'] = flags - self._save_flag_file() return ctxt @@ -1176,7 +1176,7 @@ def __call__(self): if sub_config and sub_config != '': try: sub_config = json.loads(sub_config) - except: + except Exception: log('Could not parse JSON from ' 'subordinate_configuration setting from %s' % rid, level=ERROR) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py index 254a90e7..9a4d79c1 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -82,15 +82,18 @@ def update_dns_ha_resource_params(resources, resource_params, continue m = re.search('os-(.+?)-hostname', setting) if m: - networkspace = m.group(1) + endpoint_type = m.group(1) + # resolve_address's ADDRESS_MAP uses 'int' not 'internal' + if endpoint_type == 'internal': + endpoint_type = 'int' else: msg = ('Unexpected DNS hostname setting: {}. ' - 'Cannot determine network space name' + 'Cannot determine endpoint_type name' ''.format(setting)) status_set('blocked', msg) raise DNSHAException(msg) - hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace) + hostname_key = 'res_{}_{}_hostname'.format(charm_name(), endpoint_type) if hostname_key in hostname_group: log('DNS HA: Resource {}: {} already exists in ' 'hostname group - skipping'.format(hostname_key, hostname), @@ -101,7 +104,7 @@ def update_dns_ha_resource_params(resources, resource_params, resources[hostname_key] = crm_ocf resource_params[hostname_key] = ( 'params fqdn="{}" ip_address="{}" ' - ''.format(hostname, resolve_address(endpoint_type=networkspace, + ''.format(hostname, resolve_address(endpoint_type=endpoint_type, override=False))) if len(hostname_group) >= 1: diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py index 37fa0eb0..0f847f56 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py @@ -59,18 +59,13 @@ def determine_dkms_package(): def quantum_plugins(): - from charmhelpers.contrib.openstack import context return { 'ovs': { 'config': '/etc/quantum/plugins/openvswitch/' 'ovs_quantum_plugin.ini', 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' 'OVSQuantumPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=QUANTUM_CONF_DIR)], + 'contexts': [], 'services': ['quantum-plugin-openvswitch-agent'], 'packages': [determine_dkms_package(), ['quantum-plugin-openvswitch-agent']], @@ -82,11 +77,7 @@ def quantum_plugins(): 'config': '/etc/quantum/plugins/nicira/nvp.ini', 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' 'QuantumPlugin.NvpPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=QUANTUM_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['quantum-server', @@ -100,7 +91,6 @@ def quantum_plugins(): def neutron_plugins(): - from charmhelpers.contrib.openstack import context release = os_release('nova-common') plugins = { 'ovs': { @@ -108,11 +98,7 @@ def neutron_plugins(): 'ovs_neutron_plugin.ini', 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' 'OVSNeutronPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': ['neutron-plugin-openvswitch-agent'], 'packages': [determine_dkms_package(), ['neutron-plugin-openvswitch-agent']], @@ -124,11 +110,7 @@ def neutron_plugins(): 'config': '/etc/neutron/plugins/nicira/nvp.ini', 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' 'NeutronPlugin.NvpPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['neutron-server', @@ -138,11 +120,7 @@ def neutron_plugins(): 'nsx': { 'config': '/etc/neutron/plugins/vmware/nsx.ini', 'driver': 'vmware', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['neutron-server', @@ -152,11 +130,7 @@ def neutron_plugins(): 'n1kv': { 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [determine_dkms_package(), ['neutron-plugin-cisco']], @@ -167,11 +141,7 @@ def neutron_plugins(): 'Calico': { 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': ['calico-felix', 'bird', 'neutron-dhcp-agent', @@ -189,11 +159,7 @@ def neutron_plugins(): 'vsp': { 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], @@ -203,10 +169,7 @@ def neutron_plugins(): 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin' '.plumgrid_plugin.NeutronPluginPLUMgridV2'), - 'contexts': [ - context.SharedDBContext(user=config('database-user'), - database=config('database'), - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': ['plumgrid-lxc', 'iovisor-dkms'], @@ -217,11 +180,7 @@ def neutron_plugins(): 'midonet': { 'config': '/etc/neutron/plugins/midonet/midonet.ini', 'driver': 'midonet.neutron.plugin.MidonetPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [determine_dkms_package()], 'server_packages': ['neutron-server', diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py index d8c1fc7f..77490e4d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py @@ -272,6 +272,8 @@ def write(self, config_file): raise OSConfigException _out = self.render(config_file) + if six.PY3: + _out = _out.encode('UTF-8') with open(config_file, 'wb') as out: out.write(_out) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 837a1674..a1267136 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -95,7 +95,7 @@ from charmhelpers.fetch.snap import ( snap_install, snap_refresh, - SNAP_CHANNELS, + valid_snap_channel, ) from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk @@ -412,6 +412,8 @@ def get_os_codename_package(package, fatal=True): cmd = ['snap', 'list', package] try: out = subprocess.check_output(cmd) + if six.PY3: + out = out.decode('UTF-8') except subprocess.CalledProcessError as e: return None lines = out.split('\n') @@ -426,7 +428,7 @@ def get_os_codename_package(package, fatal=True): try: pkg = cache[package] - except: + except Exception: if not fatal: return None # the package is unknown to the current apt cache. @@ -579,6 +581,9 @@ def configure_installation_source(source_plus_key): Note that the behaviour on error is to log the error to the juju log and then call sys.exit(1). """ + if source_plus_key.startswith('snap'): + # Do nothing for snap installs + return # extract the key if there is one, denoted by a '|' in the rel source, key = get_source_and_pgp_key(source_plus_key) @@ -615,7 +620,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): juju_rc_path = "%s/%s" % (charm_dir(), script_path) if not os.path.exists(os.path.dirname(juju_rc_path)): os.mkdir(os.path.dirname(juju_rc_path)) - with open(juju_rc_path, 'wb') as rc_script: + with open(juju_rc_path, 'wt') as rc_script: rc_script.write( "#!/bin/bash\n") [rc_script.write('export %s=%s\n' % (u, p)) @@ -794,7 +799,7 @@ def git_default_repos(projects_yaml): service = service_name() core_project = service - for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): + for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES): if projects_yaml == default: # add the requirements repo first @@ -1615,7 +1620,7 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs): upgrade_callback(configs=configs) action_set({'outcome': 'success, upgrade completed.'}) ret = True - except: + except Exception: action_set({'outcome': 'upgrade failed, see traceback.'}) action_set({'traceback': traceback.format_exc()}) action_fail('do_openstack_upgrade resulted in an ' @@ -1720,7 +1725,7 @@ def is_unit_paused_set(): kv = t[0] # transform something truth-y into a Boolean. return not(not(kv.get('unit-paused'))) - except: + except Exception: return False @@ -2048,7 +2053,7 @@ def update_json_file(filename, items): def snap_install_requested(): """ Determine if installing from snaps - If openstack-origin is of the form snap:channel-series-release + If openstack-origin is of the form snap:track/channel[/branch] and channel is in SNAPS_CHANNELS return True. """ origin = config('openstack-origin') or "" @@ -2056,10 +2061,12 @@ def snap_install_requested(): return False _src = origin[5:] - channel, series, release = _src.split('-') - if channel.lower() in SNAP_CHANNELS: - return True - return False + if '/' in _src: + channel = _src.split('/')[1] + else: + # Handle snap:track with no channel + channel = 'stable' + return valid_snap_channel(channel) def get_snaps_install_info_from_origin(snaps, src, mode='classic'): @@ -2067,7 +2074,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'): @param snaps: List of snaps @param src: String of openstack-origin or source of the form - snap:channel-series-track + snap:track/channel @param mode: String classic, devmode or jailmode @returns: Dictionary of snaps with channels and modes """ @@ -2077,8 +2084,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'): return {} _src = src[5:] - _channel, _series, _release = _src.split('-') - channel = '--channel={}/{}'.format(_release, _channel) + channel = '--channel={}'.format(_src) return {snap: {'channel': channel, 'mode': mode} for snap in snaps} @@ -2090,8 +2096,8 @@ def install_os_snaps(snaps, refresh=False): @param snaps: Dictionary of snaps with channels and modes of the form: {'snap_name': {'channel': 'snap_channel', 'mode': 'snap_mode'}} - Where channel a snapstore channel and mode is --classic, --devmode or - --jailmode. + Where channel is a snapstore channel and mode is --classic, --devmode + or --jailmode. @param post_snap_install: Callback function to run after snaps have been installed """ diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index e5a01b1b..39231612 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -370,9 +370,10 @@ def get_mon_map(service): Also raises CalledProcessError if our ceph command fails """ try: - mon_status = check_output( - ['ceph', '--id', service, - 'mon_status', '--format=json']) + mon_status = check_output(['ceph', '--id', service, + 'mon_status', '--format=json']) + if six.PY3: + mon_status = mon_status.decode('UTF-8') try: return json.loads(mon_status) except ValueError as v: @@ -457,7 +458,7 @@ def monitor_key_get(service, key): try: output = check_output( ['ceph', '--id', service, - 'config-key', 'get', str(key)]) + 'config-key', 'get', str(key)]).decode('UTF-8') return output except CalledProcessError as e: log("Monitor config-key get failed with message: {}".format( @@ -500,6 +501,8 @@ def get_erasure_profile(service, name): out = check_output(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', name, '--format=json']) + if six.PY3: + out = out.decode('UTF-8') return json.loads(out) except (CalledProcessError, OSError, ValueError): return None @@ -686,7 +689,10 @@ def get_cache_mode(service, pool_name): """ validator(value=service, valid_type=six.string_types) validator(value=pool_name, valid_type=six.string_types) - out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) + out = check_output(['ceph', '--id', service, + 'osd', 'dump', '--format=json']) + if six.PY3: + out = out.decode('UTF-8') try: osd_json = json.loads(out) for pool in osd_json['pools']: @@ -700,8 +706,9 @@ def get_cache_mode(service, pool_name): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, - 'lspools']).decode('UTF-8') + out = check_output(['rados', '--id', service, 'lspools']) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -714,9 +721,12 @@ def get_osds(service): """ version = ceph_version() if version and version >= '0.56': - return json.loads(check_output(['ceph', '--id', service, - 'osd', 'ls', - '--format=json']).decode('UTF-8')) + out = check_output(['ceph', '--id', service, + 'osd', 'ls', + '--format=json']) + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) return None @@ -734,7 +744,9 @@ def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]).decode('UTF-8') + service, '--pool', pool]) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -859,7 +871,9 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']).decode('UTF-8') + out = check_output(['rbd', 'showmapped']) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -1018,7 +1032,9 @@ def ceph_version(): """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): cmd = ['ceph', '-v'] - output = check_output(cmd).decode('US-ASCII') + output = check_output(cmd) + if six.PY3: + output = output.decode('UTF-8') output = output.split() if len(output) > 3: return output[2] diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py index 4719f53c..7f2a0604 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -74,10 +74,10 @@ def list_lvm_volume_group(block_device): ''' vg = None pvd = check_output(['pvdisplay', block_device]).splitlines() - for l in pvd: - l = l.decode('UTF-8') - if l.strip().startswith('VG Name'): - vg = ' '.join(l.strip().split()[2:]) + for lvm in pvd: + lvm = lvm.decode('UTF-8') + if lvm.strip().startswith('VG Name'): + vg = ' '.join(lvm.strip().split()[2:]) return vg diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index 3dc0df68..c9428894 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -64,6 +64,6 @@ def is_device_mounted(device): ''' try: out = check_output(['lsblk', '-P', device]).decode('UTF-8') - except: + except Exception: return False return bool(re.search(r'MOUNTPOINT=".+"', out)) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 12f37b28..5a88f798 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -22,6 +22,7 @@ import copy from distutils.version import LooseVersion from functools import wraps +from collections import namedtuple import glob import os import json @@ -218,6 +219,8 @@ def principal_unit(): for rid in relation_ids(reltype): for unit in related_units(rid): md = _metadata_unit(unit) + if not md: + continue subordinate = md.pop('subordinate', None) if not subordinate: return unit @@ -511,7 +514,10 @@ def _metadata_unit(unit): """ basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') + if not os.path.exists(joineddir): + return None + with open(joineddir) as md: return yaml.safe_load(md) @@ -639,18 +645,31 @@ def is_relation_made(relation, keys='private-address'): return False +def _port_op(op_name, port, protocol="TCP"): + """Open or close a service network port""" + _args = [op_name] + icmp = protocol.upper() == "ICMP" + if icmp: + _args.append(protocol) + else: + _args.append('{}/{}'.format(port, protocol)) + try: + subprocess.check_call(_args) + except subprocess.CalledProcessError: + # Older Juju pre 2.3 doesn't support ICMP + # so treat it as a no-op if it fails. + if not icmp: + raise + + def open_port(port, protocol="TCP"): """Open a service network port""" - _args = ['open-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('open-port', port, protocol) def close_port(port, protocol="TCP"): """Close a service network port""" - _args = ['close-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('close-port', port, protocol) def open_ports(start, end, protocol="TCP"): @@ -667,6 +686,17 @@ def close_ports(start, end, protocol="TCP"): subprocess.check_call(_args) +def opened_ports(): + """Get the opened ports + + *Note that this will only show ports opened in a previous hook* + + :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` + """ + _args = ['opened-ports', '--format=json'] + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1077,6 +1107,35 @@ def network_get_primary_address(binding): return subprocess.check_output(cmd).decode('UTF-8').strip() +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get(endpoint, relation_id=None): + """ + Retrieve the network details for a relation endpoint + + :param endpoint: string. The name of a relation endpoint + :param relation_id: int. The ID of the relation for the current context. + :return: dict. The loaded YAML output of the network-get query. + :raise: NotImplementedError if run on Juju < 2.1 + """ + cmd = ['network-get', endpoint, '--format', 'yaml'] + if relation_id: + cmd.append('-r') + cmd.append(relation_id) + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + # Early versions of Juju 2.0.x required the --primary-address argument. + # We catch that condition here and raise NotImplementedError since + # the requested semantics are not available - the caller can then + # use the network_get_primary_address() method instead. + if '--primary-address is currently required' in e.output.decode('UTF-8'): + raise NotImplementedError + raise + return yaml.safe_load(response) + + def add_metric(*args, **kwargs): """Add metric values. Values may be expressed with keyword arguments. For metric names containing dashes, these may be expressed as one or more @@ -1106,3 +1165,42 @@ def meter_info(): """Get the meter status information, if running in the meter-status-changed hook.""" return os.environ.get('JUJU_METER_INFO') + + +def iter_units_for_relation_name(relation_name): + """Iterate through all units in a relation + + Generator that iterates through all the units in a relation and yields + a named tuple with rid and unit field names. + + Usage: + data = [(u.rid, u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param relation_name: string relation name + :yield: Named Tuple with rid and unit field names + """ + RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') + for rid in relation_ids(relation_name): + for unit in related_units(rid): + yield RelatedUnit(rid, unit) + + +def ingress_address(rid=None, unit=None): + """ + Retrieve the ingress-address from a relation when available. Otherwise, + return the private-address. This function is to be used on the consuming + side of the relation. + + Usage: + addresses = [ingress_address(rid=u.rid, unit=u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: string IP address + """ + settings = relation_get(rid=rid, unit=unit) + return (settings.get('ingress-address') or + settings.get('private-address')) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 5656e2f5..5cc5c86b 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log, DEBUG +from .hookenv import log, DEBUG, local_unit from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -441,6 +441,49 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) +def chage(username, lastday=None, expiredate=None, inactive=None, + mindays=None, maxdays=None, root=None, warndays=None): + """Change user password expiry information + + :param str username: User to update + :param str lastday: Set when password was changed in YYYY-MM-DD format + :param str expiredate: Set when user's account will no longer be + accessible in YYYY-MM-DD format. + -1 will remove an account expiration date. + :param str inactive: Set the number of days of inactivity after a password + has expired before the account is locked. + -1 will remove an account's inactivity. + :param str mindays: Set the minimum number of days between password + changes to MIN_DAYS. + 0 indicates the password can be changed anytime. + :param str maxdays: Set the maximum number of days during which a + password is valid. + -1 as MAX_DAYS will remove checking maxdays + :param str root: Apply changes in the CHROOT_DIR directory + :param str warndays: Set the number of days of warning before a password + change is required + :raises subprocess.CalledProcessError: if call to chage fails + """ + cmd = ['chage'] + if root: + cmd.extend(['--root', root]) + if lastday: + cmd.extend(['--lastday', lastday]) + if expiredate: + cmd.extend(['--expiredate', expiredate]) + if inactive: + cmd.extend(['--inactive', inactive]) + if mindays: + cmd.extend(['--mindays', mindays]) + if maxdays: + cmd.extend(['--maxdays', maxdays]) + if warndays: + cmd.extend(['--warndays', warndays]) + cmd.append(username) + subprocess.check_call(cmd) + +remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -946,3 +989,31 @@ def updatedb(updatedb_text, new_path): lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) output = "\n".join(lines) return output + + +def modulo_distribution(modulo=3, wait=30): + """ Modulo distribution + + This helper uses the unit number, a modulo value and a constant wait time + to produce a calculated wait time distribution. This is useful in large + scale deployments to distribute load during an expensive operation such as + service restarts. + + If you have 1000 nodes that need to restart 100 at a time 1 minute at a + time: + + time.wait(modulo_distribution(modulo=100, wait=60)) + restart() + + If you need restarts to happen serially set modulo to the exact number of + nodes and set a high constant wait time: + + time.wait(modulo_distribution(modulo=10, wait=120)) + restart() + + @param modulo: int The modulo number creates the group distribution + @param wait: int The constant time wait value + @return: int Calculated time to wait for unit operation + """ + unit_number = int(local_unit().split('/')[1]) + return (unit_number % modulo) * wait diff --git a/ceph-mon/hooks/charmhelpers/core/strutils.py b/ceph-mon/hooks/charmhelpers/core/strutils.py index 685dabde..e8df0452 100644 --- a/ceph-mon/hooks/charmhelpers/core/strutils.py +++ b/ceph-mon/hooks/charmhelpers/core/strutils.py @@ -61,13 +61,19 @@ def bytes_from_string(value): if isinstance(value, six.string_types): value = six.text_type(value) else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) + msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if not matches: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + if matches: + size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + else: + # Assume that value passed in is bytes + try: + size = int(value) + except ValueError: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return size class BasicStringComparator(object): diff --git a/ceph-mon/hooks/charmhelpers/core/unitdata.py b/ceph-mon/hooks/charmhelpers/core/unitdata.py index 54ec969f..7af875c2 100644 --- a/ceph-mon/hooks/charmhelpers/core/unitdata.py +++ b/ceph-mon/hooks/charmhelpers/core/unitdata.py @@ -358,7 +358,7 @@ def hook_scope(self, name=""): try: yield self.revision self.revision = None - except: + except Exception: self.flush(False) self.revision = None raise diff --git a/ceph-mon/hooks/charmhelpers/fetch/snap.py b/ceph-mon/hooks/charmhelpers/fetch/snap.py index 112a54c3..395836c7 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/snap.py +++ b/ceph-mon/hooks/charmhelpers/fetch/snap.py @@ -41,6 +41,10 @@ class CouldNotAcquireLockException(Exception): pass +class InvalidSnapChannel(Exception): + pass + + def _snap_exec(commands): """ Execute snap commands. @@ -132,3 +136,15 @@ def snap_refresh(packages, *flags): log(message, level='INFO') return _snap_exec(['refresh'] + flags + packages) + + +def valid_snap_channel(channel): + """ Validate snap channel exists + + :raises InvalidSnapChannel: When channel does not exist + :return: Boolean + """ + if channel.lower() in SNAP_CHANNELS: + return True + else: + raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel)) diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 40e1cb5b..910e96a6 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -572,7 +572,7 @@ def get_upstream_version(package): cache = apt_cache() try: pkg = cache[package] - except: + except Exception: # the package is unknown to the current apt cache. return None diff --git a/ceph-mon/hooks/install b/ceph-mon/hooks/install index 0bdbf8d5..9a2f9353 100755 --- a/ceph-mon/hooks/install +++ b/ceph-mon/hooks/install @@ -1,6 +1,6 @@ #!/bin/bash -# Wrapper to deal with newer Ubuntu versions that don't have py2 installed -# by default. +# ensure that the python3 bits are installed, whichever version of ubunut +# is being installed. declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') @@ -11,7 +11,7 @@ check_and_install() { fi } -PYTHON="python" +PYTHON="python3" for dep in ${DEPS[@]}; do check_and_install ${PYTHON} ${dep} diff --git a/ceph-mon/hooks/install_deps b/ceph-mon/hooks/install_deps index da4ba5d8..bb600820 100755 --- a/ceph-mon/hooks/install_deps +++ b/ceph-mon/hooks/install_deps @@ -11,7 +11,7 @@ check_and_install() { fi } -PYTHON="python" +PYTHON="python3" for dep in ${DEPS[@]}; do check_and_install ${PYTHON} ${dep} diff --git a/ceph-mon/lib/ceph/broker.py b/ceph-mon/lib/ceph/broker.py index b071b91e..95ee7799 100644 --- a/ceph-mon/lib/ceph/broker.py +++ b/ceph-mon/lib/ceph/broker.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections import json import os @@ -134,7 +135,7 @@ def process_requests(reqs): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - msg = ("Missing or invalid api version (%s)" % version) + msg = ("Missing or invalid api version ({})".format(version)) resp = {'exit-code': 1, 'stderr': msg} if request_id: resp['request-id'] = request_id @@ -231,7 +232,7 @@ def add_pool_to_group(pool, group, namespace=None): def pool_permission_list_for_service(service): """Build the permission string for Ceph for a given service""" permissions = [] - permission_types = {} + permission_types = collections.OrderedDict() for permission, group in service["group_names"].items(): if permission not in permission_types: permission_types[permission] = [] @@ -267,9 +268,7 @@ def get_service_groups(service, namespace=None): key="cephx.services.{}".format(service)) try: service = json.loads(service_json) - except TypeError: - service = None - except ValueError: + except (TypeError, ValueError): service = None if service: service['groups'] = _build_service_groups(service, namespace) @@ -296,7 +295,7 @@ def _build_service_groups(service, namespace=None): } """ all_groups = {} - for _, groups in service['group_names'].items(): + for groups in service['group_names'].values(): for group in groups: name = group if namespace: @@ -316,9 +315,7 @@ def get_group(group_name): group_json = monitor_key_get(service='admin', key=group_key) try: group = json.loads(group_json) - except TypeError: - group = None - except ValueError: + except (TypeError, ValueError): group = None if not group: group = { @@ -391,9 +388,8 @@ def handle_erasure_pool(request, service): percent_data=weight) # Ok make the erasure pool if not pool_exists(service=service, name=pool_name): - log("Creating pool '%s' (erasure_profile=%s)" % (pool.name, - erasure_profile), - level=INFO) + log("Creating pool '{}' (erasure_profile={})" + .format(pool.name, erasure_profile), level=INFO) pool.create() # Set a quota if requested @@ -446,11 +442,11 @@ def handle_replicated_pool(request, service): pool = ReplicatedPool(service=service, name=pool_name, **kwargs) if not pool_exists(service=service, name=pool_name): - log("Creating pool '%s' (replicas=%s)" % (pool.name, replicas), + log("Creating pool '{}' (replicas={})".format(pool.name, replicas), level=INFO) pool.create() else: - log("Pool '%s' already exists - skipping create" % pool.name, + log("Pool '{}' already exists - skipping create".format(pool.name), level=DEBUG) # Set a quota if requested @@ -519,7 +515,7 @@ def handle_set_pool_value(request, service): 'key': request.get('key'), 'value': request.get('value')} if params['key'] not in POOL_KEYS: - msg = "Invalid key '%s'" % params['key'] + msg = "Invalid key '{}'".format(params['key']) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} @@ -685,7 +681,7 @@ def handle_rgw_create_user(request, service): ] ) try: - user_json = json.loads(create_output) + user_json = json.loads(str(create_output.decode('UTF-8'))) return {'exit-code': 0, 'user': user_json} except ValueError as err: log(err, level=ERROR) @@ -790,10 +786,10 @@ def process_requests_v1(reqs): operation failed along with an explanation). """ ret = None - log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) + log("Processing {} ceph broker requests".format(len(reqs)), level=INFO) for req in reqs: op = req.get('op') - log("Processing op='%s'" % op, level=DEBUG) + log("Processing op='{}'".format(op), level=DEBUG) # Use admin client since we do not have other client key locations # setup to use them for these operations. svc = 'admin' @@ -848,7 +844,7 @@ def process_requests_v1(reqs): elif op == "add-permissions-to-key": ret = handle_add_permissions_to_key(request=req, service=svc) else: - msg = "Unknown operation '%s'" % op + msg = "Unknown operation '{}'".format(op) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} diff --git a/ceph-mon/lib/ceph/crush_utils.py b/ceph-mon/lib/ceph/crush_utils.py index 1c777f34..8b6876c1 100644 --- a/ceph-mon/lib/ceph/crush_utils.py +++ b/ceph-mon/lib/ceph/crush_utils.py @@ -60,7 +60,7 @@ def __init__(self): ids = list(map( lambda x: int(x), re.findall(CRUSHMAP_ID_RE, self._crushmap))) - ids.sort() + ids = sorted(ids) if roots != []: for root in roots: buckets.append(CRUSHBucket(root[0], root[1], True)) @@ -73,8 +73,11 @@ def __init__(self): def load_crushmap(self): try: - crush = check_output(['ceph', 'osd', 'getcrushmap']) - return check_output(['crushtool', '-d', '-'], stdin=crush.stdout) + crush = str(check_output(['ceph', 'osd', 'getcrushmap']) + .decode('UTF-8')) + return str(check_output(['crushtool', '-d', '-'], + stdin=crush.stdout) + .decode('UTF-8')) except CalledProcessError as e: log("Error occured while loading and decompiling CRUSH map:" "{}".format(e), ERROR) @@ -99,10 +102,12 @@ def save(self): """Persist Crushmap to Ceph""" try: crushmap = self.build_crushmap() - compiled = check_output(['crushtool', '-c', '/dev/stdin', '-o', - '/dev/stdout'], stdin=crushmap) - ceph_output = check_output(['ceph', 'osd', 'setcrushmap', '-i', - '/dev/stdin'], stdin=compiled) + compiled = str(check_output(['crushtool', '-c', '/dev/stdin', '-o', + '/dev/stdout'], stdin=crushmap) + .decode('UTF-8')) + ceph_output = str(check_output(['ceph', 'osd', 'setcrushmap', '-i', + '/dev/stdin'], stdin=compiled) + .decode('UTF-8')) return ceph_output except CalledProcessError as e: log("save error: {}".format(e)) diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 343a759a..6ab697f0 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -381,8 +381,9 @@ def get_block_uuid(block_dev): :returns: The UUID of the device or None on Error. """ try: - block_info = subprocess.check_output( - ['blkid', '-o', 'export', block_dev]) + block_info = str(subprocess + .check_output(['blkid', '-o', 'export', block_dev]) + .decode('UTF-8')) for tag in block_info.split('\n'): parts = tag.split('=') if parts[0] == 'UUID': @@ -533,8 +534,9 @@ def get_osd_weight(osd_id): :raises: CalledProcessError if our ceph command fails. """ try: - tree = subprocess.check_output( - ['ceph', 'osd', 'tree', '--format=json']) + tree = str(subprocess + .check_output(['ceph', 'osd', 'tree', '--format=json']) + .decode('UTF-8')) try: json_tree = json.loads(tree) # Make sure children are present in the json @@ -561,9 +563,10 @@ def get_osd_tree(service): Also raises CalledProcessError if our ceph command fails """ try: - tree = subprocess.check_output( - ['ceph', '--id', service, - 'osd', 'tree', '--format=json']) + tree = str(subprocess + .check_output(['ceph', '--id', service, + 'osd', 'tree', '--format=json']) + .decode('UTF-8')) try: json_tree = json.loads(tree) crush_list = [] @@ -628,7 +631,7 @@ def _get_osd_num_from_dirname(dirname): """ match = re.search('ceph-(?P\d+)', dirname) if not match: - raise ValueError("dirname not in correct format: %s" % dirname) + raise ValueError("dirname not in correct format: {}".format(dirname)) return match.group('osd_id') @@ -718,7 +721,7 @@ def get_version(): def error_out(msg): - log("FATAL ERROR: %s" % msg, + log("FATAL ERROR: {}".format(msg), level=ERROR) sys.exit(1) @@ -736,7 +739,9 @@ def is_quorum(): ] if os.path.exists(asok): try: - result = json.loads(subprocess.check_output(cmd)) + result = json.loads(str(subprocess + .check_output(cmd) + .decode('UTF-8'))) except subprocess.CalledProcessError: return False except ValueError: @@ -763,7 +768,9 @@ def is_leader(): ] if os.path.exists(asok): try: - result = json.loads(subprocess.check_output(cmd)) + result = json.loads(str(subprocess + .check_output(cmd) + .decode('UTF-8'))) except subprocess.CalledProcessError: return False except ValueError: @@ -955,8 +962,9 @@ def is_osd_disk(dev): partitions = get_partition_list(dev) for partition in partitions: try: - info = subprocess.check_output(['sgdisk', '-i', partition.number, - dev]) + info = str(subprocess + .check_output(['sgdisk', '-i', partition.number, dev]) + .decode('UTF-8')) info = info.split("\n") # IGNORE:E1103 for line in info: for ptype in CEPH_PARTITIONS: @@ -1039,7 +1047,7 @@ def generate_monitor_secret(): '--name=mon.', '--gen-key' ] - res = subprocess.check_output(cmd) + res = str(subprocess.check_output(cmd).decode('UTF-8')) return "{}==".format(res.split('=')[1].strip()) @@ -1188,7 +1196,10 @@ def create_named_keyring(entity, name, caps=None): for subsystem, subcaps in caps.items(): cmd.extend([subsystem, '; '.join(subcaps)]) log("Calling check_output: {}".format(cmd), level=DEBUG) - return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + return (parse_key(str(subprocess + .check_output(cmd) + .decode('UTF-8')) + .strip())) # IGNORE:E1103 def get_upgrade_key(): @@ -1205,7 +1216,7 @@ def get_named_key(name, caps=None, pool_list=None): """ try: # Does the key already exist? - output = subprocess.check_output( + output = str(subprocess.check_output( [ 'sudo', '-u', ceph_user(), @@ -1218,7 +1229,7 @@ def get_named_key(name, caps=None, pool_list=None): 'auth', 'get', 'client.{}'.format(name), - ]).strip() + ]).decode('UTF-8')).strip() return parse_key(output) except subprocess.CalledProcessError: # Couldn't get the key, time to create it! @@ -1247,7 +1258,10 @@ def get_named_key(name, caps=None, pool_list=None): cmd.extend([subsystem, '; '.join(subcaps)]) log("Calling check_output: {}".format(cmd), level=DEBUG) - return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + return parse_key(str(subprocess + .check_output(cmd) + .decode('UTF-8')) + .strip()) # IGNORE:E1103 def upgrade_key_caps(key, caps): @@ -1361,7 +1375,7 @@ def maybe_zap_journal(journal_dev): def get_partitions(dev): cmd = ['partx', '--raw', '--noheadings', dev] try: - out = subprocess.check_output(cmd).splitlines() + out = str(subprocess.check_output(cmd).decode('UTF-8')).splitlines() log("get partitions: {}".format(out), level=DEBUG) return out except subprocess.CalledProcessError as e: @@ -1529,7 +1543,7 @@ def get_running_osds(): """Returns a list of the pids of the current running OSD daemons""" cmd = ['pgrep', 'ceph-osd'] try: - result = subprocess.check_output(cmd) + result = str(subprocess.check_output(cmd).decode('UTF-8')) return result.split() except subprocess.CalledProcessError: return [] @@ -1545,7 +1559,9 @@ def get_cephfs(service): # This command wasn't introduced until 0.86 ceph return [] try: - output = subprocess.check_output(["ceph", '--id', service, "fs", "ls"]) + output = str(subprocess + .check_output(["ceph", '--id', service, "fs", "ls"]) + .decode('UTF-8')) if not output: return [] """ @@ -2079,7 +2095,9 @@ def list_pools(service): """ try: pool_list = [] - pools = subprocess.check_output(['rados', '--id', service, 'lspools']) + pools = str(subprocess + .check_output(['rados', '--id', service, 'lspools']) + .decode('UTF-8')) for pool in pools.splitlines(): pool_list.append(pool) return pool_list @@ -2140,10 +2158,8 @@ def dirs_need_ownership_update(service): def pretty_print_upgrade_paths(): """Pretty print supported upgrade paths for ceph""" - lines = [] - for key, value in UPGRADE_PATHS.iteritems(): - lines.append("{} -> {}".format(key, value)) - return lines + return ["{} -> {}".format(key, value) + for key, value in UPGRADE_PATHS.iteritems()] def resolve_ceph_version(source): @@ -2163,7 +2179,9 @@ def get_ceph_pg_stat(): :returns: dict """ try: - tree = subprocess.check_output(['ceph', 'pg', 'stat', '--format=json']) + tree = str(subprocess + .check_output(['ceph', 'pg', 'stat', '--format=json']) + .decode('UTF-8')) try: json_tree = json.loads(tree) if not json_tree['num_pg_by_state']: @@ -2187,8 +2205,9 @@ def get_ceph_health(): status, use get_ceph_health()['overall_status']. """ try: - tree = subprocess.check_output( - ['ceph', 'status', '--format=json']) + tree = str(subprocess + .check_output(['ceph', 'status', '--format=json']) + .decode('UTF-8')) try: json_tree = json.loads(tree) # Make sure children are present in the json @@ -2215,9 +2234,12 @@ def reweight_osd(osd_num, new_weight): :raises CalledProcessError: if an error occurs invoking the systemd cmd """ try: - cmd_result = subprocess.check_output( - ['ceph', 'osd', 'crush', 'reweight', "osd.{}".format(osd_num), - new_weight], stderr=subprocess.STDOUT) + cmd_result = str(subprocess + .check_output(['ceph', 'osd', 'crush', + 'reweight', "osd.{}".format(osd_num), + new_weight], + stderr=subprocess.STDOUT) + .decode('UTF-8')) expected_result = "reweighted item id {ID} name \'osd.{ID}\'".format( ID=osd_num) + " to {}".format(new_weight) log(cmd_result) @@ -2260,3 +2282,25 @@ def bootstrap_manager(): unit = 'ceph-mgr@{}'.format(hostname) subprocess.check_call(['systemctl', 'enable', unit]) service_restart(unit) + + +def osd_noout(enable): + """Sets or unsets 'noout' + + :param enable: bool. True to set noout, False to unset. + :returns: bool. True if output looks right. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + """ + operation = { + True: 'set', + False: 'unset', + } + try: + subprocess.check_call(['ceph', '--id', 'admin', + 'osd', operation[enable], + 'noout']) + log('running ceph osd {} noout'.format(operation[enable])) + return True + except subprocess.CalledProcessError as e: + log(e) + raise diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 9edd4bbf..6757a47d 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -5,12 +5,12 @@ coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 -charm-tools>=2.0.0 +charm-tools>=2.0.0;python_version=='2.7' # cheetah templates aren't availble in Python 3+ requests==2.6.0 # BEGIN: Amulet OpenStack Charm Helper Requirements # Liberty client lower constraints amulet>=1.14.3,<2.0 -bundletester>=0.6.1,<1.0 +bundletester>=0.6.1,<1.0;python_version=='2.7' # cheetah templates aren't availble in Python 3+ python-ceilometerclient>=1.5.0 python-cinderclient>=1.4.0 python-glanceclient>=1.1.0 diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index fc20a76d..13a12f62 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -250,7 +250,14 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, self.log.debug('Waiting up to {}s for extended status on services: ' '{}'.format(timeout, services)) service_messages = {service: message for service in services} + + # Check for idleness + self.d.sentry.wait() + # Check for error states and bail early + self.d.sentry.wait_for_status(self.d.juju_env, services) + # Check for ready messages self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') def _get_openstack_release(self): @@ -303,22 +310,29 @@ def get_ceph_expected_pools(self, radosgw=False): test scenario, based on OpenStack release and whether ceph radosgw is flagged as present or not.""" - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later + if self._get_openstack_release() == self.trusty_icehouse: + # Icehouse pools = [ + 'data', + 'metadata', 'rbd', 'cinder-ceph', 'glance' ] - else: - # Juno or earlier + elif (self.trusty_kilo <= self._get_openstack_release() <= + self.zesty_ocata): + # Kilo through Ocata pools = [ - 'data', - 'metadata', 'rbd', 'cinder-ceph', 'glance' ] + else: + # Pike and later + pools = [ + 'cinder-ceph', + 'glance' + ] if radosgw: pools.extend([ diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index c8edbf65..b71b2b19 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -23,6 +23,7 @@ import urlparse import cinderclient.v1.client as cinder_client +import cinderclient.v2.client as cinder_clientv2 import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client @@ -42,7 +43,6 @@ from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) -from charmhelpers.core.decorators import retry_on_exception from charmhelpers.core.host import CompareHostReleases DEBUG = logging.DEBUG @@ -310,7 +310,6 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] - @retry_on_exception(5, base_delay=10) def keystone_wait_for_propagation(self, sentry_relation_pairs, api_version): """Iterate over list of sentry and relation tuples and verify that @@ -326,7 +325,7 @@ def keystone_wait_for_propagation(self, sentry_relation_pairs, rel = sentry.relation('identity-service', relation_name) self.log.debug('keystone relation data: {}'.format(rel)) - if rel['api_version'] != str(api_version): + if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." "".format(rel['api_version'], api_version)) @@ -348,15 +347,19 @@ def keystone_configure_api_version(self, sentry_relation_pairs, deployment, config = {'preferred-api-version': api_version} deployment.d.configure('keystone', config) + deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant): + password, tenant, api_version=2): """Authenticates admin user with cinder.""" # NOTE(beisner): cinder python client doesn't accept tokens. keystone_ip = keystone_sentry.info['public-address'] ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) - return cinder_client.Client(username, password, tenant, ept) + _clients = { + 1: cinder_client.Client, + 2: cinder_clientv2.Client} + return _clients[api_version](username, password, tenant, ept) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -617,13 +620,25 @@ def create_or_get_keypair(self, nova, keypair_name="testkey"): self.log.debug('Keypair ({}) already exists, ' 'using it.'.format(keypair_name)) return _keypair - except: + except Exception: self.log.debug('Keypair ({}) does not exist, ' 'creating it.'.format(keypair_name)) _keypair = nova.keypairs.create(name=keypair_name) return _keypair + def _get_cinder_obj_name(self, cinder_object): + """Retrieve name of cinder object. + + :param cinder_object: cinder snapshot or volume object + :returns: str cinder object name + """ + # v1 objects store name in 'display_name' attr but v2+ use 'name' + try: + return cinder_object.display_name + except AttributeError: + return cinder_object.name + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, img_id=None, src_vol_id=None, snap_id=None): """Create cinder volume, optionally from a glance image, OR @@ -674,6 +689,13 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, source_volid=src_vol_id, snapshot_id=snap_id) vol_id = vol_new.id + except TypeError: + vol_new = cinder.volumes.create(name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id except Exception as e: msg = 'Failed to create volume: {}'.format(e) amulet.raise_status(amulet.FAIL, msg=msg) @@ -688,7 +710,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, # Re-validate new volume self.log.debug('Validating volume attributes...') - val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) val_vol_boot = cinder.volumes.get(vol_id).bootable val_vol_stat = cinder.volumes.get(vol_id).status val_vol_size = cinder.volumes.get(vol_id).size diff --git a/ceph-mon/tests/charmhelpers/core/hookenv.py b/ceph-mon/tests/charmhelpers/core/hookenv.py index 12f37b28..5a88f798 100644 --- a/ceph-mon/tests/charmhelpers/core/hookenv.py +++ b/ceph-mon/tests/charmhelpers/core/hookenv.py @@ -22,6 +22,7 @@ import copy from distutils.version import LooseVersion from functools import wraps +from collections import namedtuple import glob import os import json @@ -218,6 +219,8 @@ def principal_unit(): for rid in relation_ids(reltype): for unit in related_units(rid): md = _metadata_unit(unit) + if not md: + continue subordinate = md.pop('subordinate', None) if not subordinate: return unit @@ -511,7 +514,10 @@ def _metadata_unit(unit): """ basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') + if not os.path.exists(joineddir): + return None + with open(joineddir) as md: return yaml.safe_load(md) @@ -639,18 +645,31 @@ def is_relation_made(relation, keys='private-address'): return False +def _port_op(op_name, port, protocol="TCP"): + """Open or close a service network port""" + _args = [op_name] + icmp = protocol.upper() == "ICMP" + if icmp: + _args.append(protocol) + else: + _args.append('{}/{}'.format(port, protocol)) + try: + subprocess.check_call(_args) + except subprocess.CalledProcessError: + # Older Juju pre 2.3 doesn't support ICMP + # so treat it as a no-op if it fails. + if not icmp: + raise + + def open_port(port, protocol="TCP"): """Open a service network port""" - _args = ['open-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('open-port', port, protocol) def close_port(port, protocol="TCP"): """Close a service network port""" - _args = ['close-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('close-port', port, protocol) def open_ports(start, end, protocol="TCP"): @@ -667,6 +686,17 @@ def close_ports(start, end, protocol="TCP"): subprocess.check_call(_args) +def opened_ports(): + """Get the opened ports + + *Note that this will only show ports opened in a previous hook* + + :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` + """ + _args = ['opened-ports', '--format=json'] + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1077,6 +1107,35 @@ def network_get_primary_address(binding): return subprocess.check_output(cmd).decode('UTF-8').strip() +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get(endpoint, relation_id=None): + """ + Retrieve the network details for a relation endpoint + + :param endpoint: string. The name of a relation endpoint + :param relation_id: int. The ID of the relation for the current context. + :return: dict. The loaded YAML output of the network-get query. + :raise: NotImplementedError if run on Juju < 2.1 + """ + cmd = ['network-get', endpoint, '--format', 'yaml'] + if relation_id: + cmd.append('-r') + cmd.append(relation_id) + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + # Early versions of Juju 2.0.x required the --primary-address argument. + # We catch that condition here and raise NotImplementedError since + # the requested semantics are not available - the caller can then + # use the network_get_primary_address() method instead. + if '--primary-address is currently required' in e.output.decode('UTF-8'): + raise NotImplementedError + raise + return yaml.safe_load(response) + + def add_metric(*args, **kwargs): """Add metric values. Values may be expressed with keyword arguments. For metric names containing dashes, these may be expressed as one or more @@ -1106,3 +1165,42 @@ def meter_info(): """Get the meter status information, if running in the meter-status-changed hook.""" return os.environ.get('JUJU_METER_INFO') + + +def iter_units_for_relation_name(relation_name): + """Iterate through all units in a relation + + Generator that iterates through all the units in a relation and yields + a named tuple with rid and unit field names. + + Usage: + data = [(u.rid, u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param relation_name: string relation name + :yield: Named Tuple with rid and unit field names + """ + RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') + for rid in relation_ids(relation_name): + for unit in related_units(rid): + yield RelatedUnit(rid, unit) + + +def ingress_address(rid=None, unit=None): + """ + Retrieve the ingress-address from a relation when available. Otherwise, + return the private-address. This function is to be used on the consuming + side of the relation. + + Usage: + addresses = [ingress_address(rid=u.rid, unit=u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: string IP address + """ + settings = relation_get(rid=rid, unit=unit) + return (settings.get('ingress-address') or + settings.get('private-address')) diff --git a/ceph-mon/tests/charmhelpers/core/host.py b/ceph-mon/tests/charmhelpers/core/host.py index 5656e2f5..5cc5c86b 100644 --- a/ceph-mon/tests/charmhelpers/core/host.py +++ b/ceph-mon/tests/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log, DEBUG +from .hookenv import log, DEBUG, local_unit from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -441,6 +441,49 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) +def chage(username, lastday=None, expiredate=None, inactive=None, + mindays=None, maxdays=None, root=None, warndays=None): + """Change user password expiry information + + :param str username: User to update + :param str lastday: Set when password was changed in YYYY-MM-DD format + :param str expiredate: Set when user's account will no longer be + accessible in YYYY-MM-DD format. + -1 will remove an account expiration date. + :param str inactive: Set the number of days of inactivity after a password + has expired before the account is locked. + -1 will remove an account's inactivity. + :param str mindays: Set the minimum number of days between password + changes to MIN_DAYS. + 0 indicates the password can be changed anytime. + :param str maxdays: Set the maximum number of days during which a + password is valid. + -1 as MAX_DAYS will remove checking maxdays + :param str root: Apply changes in the CHROOT_DIR directory + :param str warndays: Set the number of days of warning before a password + change is required + :raises subprocess.CalledProcessError: if call to chage fails + """ + cmd = ['chage'] + if root: + cmd.extend(['--root', root]) + if lastday: + cmd.extend(['--lastday', lastday]) + if expiredate: + cmd.extend(['--expiredate', expiredate]) + if inactive: + cmd.extend(['--inactive', inactive]) + if mindays: + cmd.extend(['--mindays', mindays]) + if maxdays: + cmd.extend(['--maxdays', maxdays]) + if warndays: + cmd.extend(['--warndays', warndays]) + cmd.append(username) + subprocess.check_call(cmd) + +remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -946,3 +989,31 @@ def updatedb(updatedb_text, new_path): lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) output = "\n".join(lines) return output + + +def modulo_distribution(modulo=3, wait=30): + """ Modulo distribution + + This helper uses the unit number, a modulo value and a constant wait time + to produce a calculated wait time distribution. This is useful in large + scale deployments to distribute load during an expensive operation such as + service restarts. + + If you have 1000 nodes that need to restart 100 at a time 1 minute at a + time: + + time.wait(modulo_distribution(modulo=100, wait=60)) + restart() + + If you need restarts to happen serially set modulo to the exact number of + nodes and set a high constant wait time: + + time.wait(modulo_distribution(modulo=10, wait=120)) + restart() + + @param modulo: int The modulo number creates the group distribution + @param wait: int The constant time wait value + @return: int Calculated time to wait for unit operation + """ + unit_number = int(local_unit().split('/')[1]) + return (unit_number % modulo) * wait diff --git a/ceph-mon/tests/charmhelpers/core/strutils.py b/ceph-mon/tests/charmhelpers/core/strutils.py index 685dabde..e8df0452 100644 --- a/ceph-mon/tests/charmhelpers/core/strutils.py +++ b/ceph-mon/tests/charmhelpers/core/strutils.py @@ -61,13 +61,19 @@ def bytes_from_string(value): if isinstance(value, six.string_types): value = six.text_type(value) else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) + msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if not matches: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + if matches: + size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + else: + # Assume that value passed in is bytes + try: + size = int(value) + except ValueError: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return size class BasicStringComparator(object): diff --git a/ceph-mon/tests/charmhelpers/core/unitdata.py b/ceph-mon/tests/charmhelpers/core/unitdata.py index 54ec969f..7af875c2 100644 --- a/ceph-mon/tests/charmhelpers/core/unitdata.py +++ b/ceph-mon/tests/charmhelpers/core/unitdata.py @@ -358,7 +358,7 @@ def hook_scope(self, name=""): try: yield self.revision self.revision = None - except: + except Exception: self.flush(False) self.revision = None raise diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 7c2936e3..a8188146 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -2,8 +2,9 @@ # This file is managed centrally by release-tools and should not be modified # within individual charm repos. [tox] -envlist = pep8,py27 +envlist = pep8,py27,py35,py36 skipsdist = True +skip_missing_interpreters = True [testenv] setenv = VIRTUAL_ENV={envdir} @@ -20,12 +21,19 @@ passenv = HOME TERM AMULET_* CS_API_* basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +# temporarily disable py27 +commands = /bin/true [testenv:py35] basepython = python3.5 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python2.7 deps = -r{toxinidir}/requirements.txt diff --git a/ceph-mon/unit_tests/__init__.py b/ceph-mon/unit_tests/__init__.py index 8d6182b7..2e3f304c 100644 --- a/ceph-mon/unit_tests/__init__.py +++ b/ceph-mon/unit_tests/__init__.py @@ -15,3 +15,4 @@ import sys sys.path.append('hooks') sys.path.append('lib') +sys.path.append('unit_tests') diff --git a/ceph-mon/unit_tests/test_check_ceph_status.py b/ceph-mon/unit_tests/test_check_ceph_status.py index 64c3e903..021b8dd1 100644 --- a/ceph-mon/unit_tests/test_check_ceph_status.py +++ b/ceph-mon/unit_tests/test_check_ceph_status.py @@ -29,7 +29,7 @@ class NagiosTestCase(unittest.TestCase): def test_health_ok(self, mock_subprocess): with open('unit_tests/ceph_ok.json') as f: tree = f.read() - mock_subprocess.return_value = tree + mock_subprocess.return_value = tree.encode('UTF-8') args = check_ceph_status.parse_args(['--degraded_thresh', '1']) check_output = check_ceph_status.check_ceph_status(args) self.assertRegexpMatches(check_output, r"^All OK$") @@ -37,7 +37,7 @@ def test_health_ok(self, mock_subprocess): def test_health_warn(self, mock_subprocess): with open('unit_tests/ceph_warn.json') as f: tree = f.read() - mock_subprocess.return_value = tree + mock_subprocess.return_value = tree.encode('UTF-8') args = check_ceph_status.parse_args(['--degraded_thresh', '1']) self.assertRaises(check_ceph_status.WarnError, lambda: check_ceph_status.check_ceph_status(args)) @@ -45,7 +45,7 @@ def test_health_warn(self, mock_subprocess): def test_health_crit(self, mock_subprocess): with open('unit_tests/ceph_crit.json') as f: tree = f.read() - mock_subprocess.return_value = tree + mock_subprocess.return_value = tree.encode('UTF-8') args = check_ceph_status.parse_args(['--degraded_thresh', '1']) self.assertRaises(check_ceph_status.CriticalError, lambda: check_ceph_status.check_ceph_status(args)) @@ -53,7 +53,7 @@ def test_health_crit(self, mock_subprocess): def test_health_lotsdegraded(self, mock_subprocess): with open('unit_tests/ceph_params.json') as f: tree = f.read() - mock_subprocess.return_value = tree + mock_subprocess.return_value = tree.encode('UTF-8') args = check_ceph_status.parse_args(['--degraded_thresh', '1']) self.assertRaises(check_ceph_status.CriticalError, lambda: check_ceph_status.check_ceph_status(args)) @@ -61,7 +61,7 @@ def test_health_lotsdegraded(self, mock_subprocess): def test_health_nodeepscrub(self, mock_subprocess): with open('unit_tests/ceph_nodeepscrub.json') as f: tree = f.read() - mock_subprocess.return_value = tree + mock_subprocess.return_value = tree.encode('UTF-8') args = check_ceph_status.parse_args(['--degraded_thresh', '1']) self.assertRaises(check_ceph_status.CriticalError, lambda: check_ceph_status.check_ceph_status(args)) @@ -69,7 +69,7 @@ def test_health_nodeepscrub(self, mock_subprocess): def test_health_nodeepscrubok(self, mock_subprocess): with open('unit_tests/ceph_nodeepscrub.json') as f: tree = f.read() - mock_subprocess.return_value = tree + mock_subprocess.return_value = tree.encode('UTF-8') args = check_ceph_status.parse_args(['--ignore_nodeepscrub']) self.assertRaises(check_ceph_status.WarnError, lambda: check_ceph_status.check_ceph_status(args)) diff --git a/ceph-mon/unit_tests/test_utils.py b/ceph-mon/unit_tests/test_utils.py index 1665c161..bc33c4ff 100644 --- a/ceph-mon/unit_tests/test_utils.py +++ b/ceph-mon/unit_tests/test_utils.py @@ -37,7 +37,7 @@ def load_config(): if not config: logging.error('Could not find config.yaml in any parent directory ' - 'of %s. ' % f) + 'of {}. '.format(f)) raise Exception return yaml.safe_load(open(config).read())['options'] @@ -50,7 +50,7 @@ def get_default_config(): ''' default_config = {} config = load_config() - for k, v in config.iteritems(): + for k, v in config.items(): if 'default' in v: default_config[k] = v['default'] else: From e16150dca5841697ff27060093d7bc91fe75518a Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Fri, 17 Nov 2017 08:57:20 +0000 Subject: [PATCH 1409/2699] Bring ceph-osd to Python 3 * Synced version of charm-helpers * Synced version of charms.ceph to bring in Py3 compatible library methods. Change-Id: I5ac45740f48a71d9cb0c5943472fc8590a723514 --- ceph-osd/actions/add_disk.py | 2 +- ceph-osd/actions/blacklist.py | 2 +- ceph-osd/actions/list_disks.py | 2 +- ceph-osd/actions/pause_resume.py | 2 +- ceph-osd/actions/replace_osd.py | 4 +- ceph-osd/files/nagios/check_ceph_status.py | 9 +- ceph-osd/hooks/ceph_hooks.py | 25 +-- .../charmhelpers/contrib/charmsupport/nrpe.py | 10 +- .../charmhelpers/contrib/hahelpers/cluster.py | 30 +++ .../contrib/hardening/audits/apache.py | 4 +- .../hooks/charmhelpers/contrib/network/ip.py | 4 +- .../contrib/openstack/alternatives.py | 13 ++ .../charmhelpers/contrib/openstack/context.py | 40 ++-- .../charmhelpers/contrib/openstack/neutron.py | 61 +----- .../charmhelpers/contrib/openstack/utils.py | 36 ++-- .../contrib/storage/linux/ceph.py | 42 +++-- .../charmhelpers/contrib/storage/linux/lvm.py | 8 +- .../contrib/storage/linux/utils.py | 2 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 112 ++++++++++- ceph-osd/hooks/charmhelpers/core/host.py | 73 ++++++- ceph-osd/hooks/charmhelpers/core/strutils.py | 16 +- ceph-osd/hooks/charmhelpers/core/unitdata.py | 2 +- ceph-osd/hooks/charmhelpers/fetch/snap.py | 16 ++ ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 2 +- ceph-osd/hooks/install | 2 +- ceph-osd/hooks/install_deps | 2 +- ceph-osd/hooks/upgrade-charm | 3 +- ceph-osd/hooks/utils.py | 8 +- ceph-osd/lib/ceph/broker.py | 34 ++-- ceph-osd/lib/ceph/crush_utils.py | 19 +- ceph-osd/lib/ceph/utils.py | 178 ++++++++++++------ ceph-osd/test-requirements.txt | 4 +- ceph-osd/tests/basic_deployment.py | 8 +- .../contrib/openstack/amulet/deployment.py | 26 ++- .../contrib/openstack/amulet/utils.py | 36 +++- ceph-osd/tests/charmhelpers/core/hookenv.py | 112 ++++++++++- ceph-osd/tests/charmhelpers/core/host.py | 73 ++++++- ceph-osd/tests/charmhelpers/core/strutils.py | 16 +- ceph-osd/tests/charmhelpers/core/unitdata.py | 2 +- ceph-osd/tox.ini | 10 +- ceph-osd/unit_tests/__init__.py | 1 + ceph-osd/unit_tests/test_replace_osd.py | 2 +- ceph-osd/unit_tests/test_tuning.py | 2 +- ceph-osd/unit_tests/test_utils.py | 6 +- 44 files changed, 779 insertions(+), 282 deletions(-) diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index e8d2f159..158d6388 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-osd/actions/blacklist.py b/ceph-osd/actions/blacklist.py index 994c29ff..9f7f39a3 100755 --- a/ceph-osd/actions/blacklist.py +++ b/ceph-osd/actions/blacklist.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # Copyright 2017 Canonical Ltd # diff --git a/ceph-osd/actions/list_disks.py b/ceph-osd/actions/list_disks.py index 116041b2..6bd3bf9b 100755 --- a/ceph-osd/actions/list_disks.py +++ b/ceph-osd/actions/list_disks.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-osd/actions/pause_resume.py b/ceph-osd/actions/pause_resume.py index a8ae4d41..c8d5778b 100755 --- a/ceph-osd/actions/pause_resume.py +++ b/ceph-osd/actions/pause_resume.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-osd/actions/replace_osd.py b/ceph-osd/actions/replace_osd.py index 8d9f2ec1..5705db03 100755 --- a/ceph-osd/actions/replace_osd.py +++ b/ceph-osd/actions/replace_osd.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # @@ -33,7 +33,7 @@ def get_disk_stats(): try: # https://www.kernel.org/doc/Documentation/iostats.txt - with open('/proc/diskstats', 'r') as diskstats: + with open('/proc/diskstats', 'rt', encoding='UTF-8') as diskstats: return diskstats.readlines() except IOError as err: hookenv.log('Could not open /proc/diskstats. Error: {}' diff --git a/ceph-osd/files/nagios/check_ceph_status.py b/ceph-osd/files/nagios/check_ceph_status.py index 09ee5f8d..cc21591a 100755 --- a/ceph-osd/files/nagios/check_ceph_status.py +++ b/ceph-osd/files/nagios/check_ceph_status.py @@ -13,10 +13,13 @@ def check_ceph_status(args): if args.status_file: nagios_plugin.check_file_freshness(args.status_file, 3600) - with open(args.status_file, "r") as f: + with open(args.status_file, "rt", encoding='UTF-8') as f: lines = f.readlines() else: - lines = subprocess.check_output(["ceph", "status"]).split('\n') + lines = (subprocess + .check_output(["ceph", "status"]) + .decode('UTF-8') + .split('\n')) status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) if ('health' not in status_data or @@ -42,7 +45,7 @@ def check_ceph_status(args): msg = 'CRITICAL: Some OSDs are not up. Total: {}, up: {}'.format( osds.group(1), osds.group(2)) raise nagios_plugin.CriticalError(msg) - print "All OK" + print("All OK") if __name__ == '__main__': diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 928a49fb..6c2d1aa1 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # @@ -304,7 +304,7 @@ def emit_cephconf(upgrading=False): def read_zapped_journals(): if os.path.exists(JOURNAL_ZAPPED): - with open(JOURNAL_ZAPPED) as zapfile: + with open(JOURNAL_ZAPPED, 'rt', encoding='UTF-8') as zapfile: zapped = set( filter(None, [l.strip() for l in zapfile.readlines()])) @@ -318,7 +318,7 @@ def write_zapped_journals(journal_devs): with os.fdopen(tmpfh, 'wb') as zapfile: log("write zapped: {}".format(journal_devs), level=DEBUG) - zapfile.write('\n'.join(sorted(list(journal_devs)))) + zapfile.write('\n'.join(sorted(list(journal_devs))).encode('UTF-8')) shutil.move(tmpfile, JOURNAL_ZAPPED) @@ -399,8 +399,7 @@ def get_mon_hosts(): if addr: hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) - hosts.sort() - return hosts + return sorted(hosts) def get_fsid(): @@ -446,9 +445,8 @@ def get_devices(): devices.extend((storage_get('location', s) for s in storage_ids)) # Filter out any devices in the action managed unit-local device blacklist - return filter( - lambda device: device not in get_blacklist(), devices - ) + _blacklist = get_blacklist() + return [device for device in devices if device not in _blacklist] def get_journal_devices(): @@ -460,12 +458,9 @@ def get_journal_devices(): devices.extend((storage_get('location', s) for s in storage_ids)) # Filter out any devices in the action managed unit-local device blacklist - devices = filter( - lambda device: device not in get_blacklist(), devices - ) - devices = filter(os.path.exists, devices) - - return set(devices) + _blacklist = get_blacklist() + return set(device for device in devices + if device not in _blacklist and os.path.exists(device)) @hooks.hook('mon-relation-changed', @@ -496,7 +491,7 @@ def upgrade_charm(): 'nrpe-external-master-relation-changed') def update_nrpe_config(): # python-dbus is used by check_upstart_job - apt_install('python-dbus') + apt_install('python3-dbus') hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 80d574dc..1c55b30f 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -30,6 +30,7 @@ from charmhelpers.core.hookenv import ( config, + hook_name, local_unit, log, relation_ids, @@ -285,7 +286,7 @@ def write(self): try: nagios_uid = pwd.getpwnam('nagios').pw_uid nagios_gid = grp.getgrnam('nagios').gr_gid - except: + except Exception: log("Nagios user not set up, nrpe checks not updated") return @@ -302,7 +303,12 @@ def write(self): "command": nrpecheck.command, } - service('restart', 'nagios-nrpe-server') + # update-status hooks are configured to firing every 5 minutes by + # default. When nagios-nrpe-server is restarted, the nagios server + # reports checks failing causing unneccessary alerts. Let's not restart + # on update-status hooks. + if not hook_name() == 'update-status': + service('restart', 'nagios-nrpe-server') monitor_ids = relation_ids("local-monitors") + \ relation_ids("nrpe-external-master") diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py index e02350e0..4207e42c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -27,6 +27,7 @@ import subprocess import os +import time from socket import gethostname as get_unit_hostname @@ -45,6 +46,9 @@ is_leader as juju_is_leader, status_set, ) +from charmhelpers.core.host import ( + modulo_distribution, +) from charmhelpers.core.decorators import ( retry_on_exception, ) @@ -361,3 +365,29 @@ def canonical_url(configs, vip_setting='vip'): else: addr = unit_get('private-address') return '%s://%s' % (scheme, addr) + + +def distributed_wait(modulo=None, wait=None, operation_name='operation'): + ''' Distribute operations by waiting based on modulo_distribution + + If modulo and or wait are not set, check config_get for those values. + + :param modulo: int The modulo number creates the group distribution + :param wait: int The constant time wait value + :param operation_name: string Operation name for status message + i.e. 'restart' + :side effect: Calls config_get() + :side effect: Calls log() + :side effect: Calls status_set() + :side effect: Calls time.sleep() + ''' + if modulo is None: + modulo = config_get('modulo-nodes') + if wait is None: + wait = config_get('known-wait') + calculated_wait = modulo_distribution(modulo=modulo, wait=wait) + msg = "Waiting {} seconds for {} ...".format(calculated_wait, + operation_name) + log(msg, DEBUG) + status_set('maintenance', msg) + time.sleep(calculated_wait) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py index d812948a..d32bf44e 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -70,12 +70,12 @@ def _get_loaded_modules(): """Returns the modules which are enabled in Apache.""" output = subprocess.check_output(['apache2ctl', '-M']) modules = [] - for line in output.strip().split(): + for line in output.splitlines(): # Each line of the enabled module output looks like: # module_name (static|shared) # Plus a header line at the top of the output which is stripped # out by the regex. - matcher = re.search(r'^ (\S*)', line) + matcher = re.search(r'^ (\S*)_module (\S*)', line) if matcher: modules.append(matcher.group(1)) return modules diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index d7e6debf..a871ce37 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -490,7 +490,7 @@ def get_host_ip(hostname, fallback=None): if not ip_addr: try: ip_addr = socket.gethostbyname(hostname) - except: + except Exception: log("Failed to resolve hostname '%s'" % (hostname), level=WARNING) return fallback @@ -518,7 +518,7 @@ def get_hostname(address, fqdn=True): if not result: try: result = socket.gethostbyaddr(address)[0] - except: + except Exception: return None else: result = address diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py index 1501641e..547de09c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -29,3 +29,16 @@ def install_alternative(name, target, source, priority=50): target, name, source, str(priority) ] subprocess.check_call(cmd) + + +def remove_alternative(name, source): + """Remove an installed alternative configuration file + + :param name: string name of the alternative to remove + :param source: string full path to alternative to remove + """ + cmd = [ + 'update-alternatives', '--remove', + name, source + ] + subprocess.check_call(cmd) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index f67f3265..ece75df8 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections import glob import json import math @@ -578,11 +579,14 @@ def __call__(self): laddr = get_address_in_network(config(cfg_opt)) if laddr: netmask = get_netmask_for_address(laddr) - cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, - netmask), - 'backends': {l_unit: laddr}} + cluster_hosts[laddr] = { + 'network': "{}/{}".format(laddr, + netmask), + 'backends': collections.OrderedDict([(l_unit, + laddr)]) + } for rid in relation_ids('cluster'): - for unit in related_units(rid): + for unit in sorted(related_units(rid)): _laddr = relation_get('{}-address'.format(addr_type), rid=rid, unit=unit) if _laddr: @@ -594,10 +598,13 @@ def __call__(self): # match in the frontend cluster_hosts[addr] = {} netmask = get_netmask_for_address(addr) - cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), - 'backends': {l_unit: addr}} + cluster_hosts[addr] = { + 'network': "{}/{}".format(addr, netmask), + 'backends': collections.OrderedDict([(l_unit, + addr)]) + } for rid in relation_ids('cluster'): - for unit in related_units(rid): + for unit in sorted(related_units(rid)): _laddr = relation_get('private-address', rid=rid, unit=unit) if _laddr: @@ -628,6 +635,8 @@ def __call__(self): ctxt['local_host'] = '127.0.0.1' ctxt['haproxy_host'] = '0.0.0.0' + ctxt['ipv6_enabled'] = not is_ipv6_disabled() + ctxt['stat_port'] = '8888' db = kv() @@ -802,8 +811,9 @@ def __call__(self): else: # Expect cert/key provided in config (currently assumed that ca # uses ip for cn) - cn = resolve_address(endpoint_type=INTERNAL) - self.configure_cert(cn) + for net_type in (INTERNAL, ADMIN, PUBLIC): + cn = resolve_address(endpoint_type=net_type) + self.configure_cert(cn) addresses = self.get_network_addresses() for address, endpoint in addresses: @@ -843,15 +853,6 @@ def _ensure_packages(self): for pkgs in self.packages: ensure_packages(pkgs) - def _save_flag_file(self): - if self.network_manager == 'quantum': - _file = '/etc/nova/quantum_plugin.conf' - else: - _file = '/etc/nova/neutron_plugin.conf' - - with open(_file, 'wb') as out: - out.write(self.plugin + '\n') - def ovs_ctxt(self): driver = neutron_plugin_attribute(self.plugin, 'driver', self.network_manager) @@ -996,7 +997,6 @@ def __call__(self): flags = config_flags_parser(alchemy_flags) ctxt['neutron_alchemy_flags'] = flags - self._save_flag_file() return ctxt @@ -1176,7 +1176,7 @@ def __call__(self): if sub_config and sub_config != '': try: sub_config = json.loads(sub_config) - except: + except Exception: log('Could not parse JSON from ' 'subordinate_configuration setting from %s' % rid, level=ERROR) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py index 37fa0eb0..0f847f56 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py @@ -59,18 +59,13 @@ def determine_dkms_package(): def quantum_plugins(): - from charmhelpers.contrib.openstack import context return { 'ovs': { 'config': '/etc/quantum/plugins/openvswitch/' 'ovs_quantum_plugin.ini', 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' 'OVSQuantumPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=QUANTUM_CONF_DIR)], + 'contexts': [], 'services': ['quantum-plugin-openvswitch-agent'], 'packages': [determine_dkms_package(), ['quantum-plugin-openvswitch-agent']], @@ -82,11 +77,7 @@ def quantum_plugins(): 'config': '/etc/quantum/plugins/nicira/nvp.ini', 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' 'QuantumPlugin.NvpPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=QUANTUM_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['quantum-server', @@ -100,7 +91,6 @@ def quantum_plugins(): def neutron_plugins(): - from charmhelpers.contrib.openstack import context release = os_release('nova-common') plugins = { 'ovs': { @@ -108,11 +98,7 @@ def neutron_plugins(): 'ovs_neutron_plugin.ini', 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' 'OVSNeutronPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': ['neutron-plugin-openvswitch-agent'], 'packages': [determine_dkms_package(), ['neutron-plugin-openvswitch-agent']], @@ -124,11 +110,7 @@ def neutron_plugins(): 'config': '/etc/neutron/plugins/nicira/nvp.ini', 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' 'NeutronPlugin.NvpPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['neutron-server', @@ -138,11 +120,7 @@ def neutron_plugins(): 'nsx': { 'config': '/etc/neutron/plugins/vmware/nsx.ini', 'driver': 'vmware', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['neutron-server', @@ -152,11 +130,7 @@ def neutron_plugins(): 'n1kv': { 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [determine_dkms_package(), ['neutron-plugin-cisco']], @@ -167,11 +141,7 @@ def neutron_plugins(): 'Calico': { 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': ['calico-felix', 'bird', 'neutron-dhcp-agent', @@ -189,11 +159,7 @@ def neutron_plugins(): 'vsp': { 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], @@ -203,10 +169,7 @@ def neutron_plugins(): 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin' '.plumgrid_plugin.NeutronPluginPLUMgridV2'), - 'contexts': [ - context.SharedDBContext(user=config('database-user'), - database=config('database'), - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': ['plumgrid-lxc', 'iovisor-dkms'], @@ -217,11 +180,7 @@ def neutron_plugins(): 'midonet': { 'config': '/etc/neutron/plugins/midonet/midonet.ini', 'driver': 'midonet.neutron.plugin.MidonetPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [determine_dkms_package()], 'server_packages': ['neutron-server', diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 837a1674..b073c77b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -95,7 +95,7 @@ from charmhelpers.fetch.snap import ( snap_install, snap_refresh, - SNAP_CHANNELS, + valid_snap_channel, ) from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk @@ -426,7 +426,7 @@ def get_os_codename_package(package, fatal=True): try: pkg = cache[package] - except: + except Exception: if not fatal: return None # the package is unknown to the current apt cache. @@ -579,6 +579,9 @@ def configure_installation_source(source_plus_key): Note that the behaviour on error is to log the error to the juju log and then call sys.exit(1). """ + if source_plus_key.startswith('snap'): + # Do nothing for snap installs + return # extract the key if there is one, denoted by a '|' in the rel source, key = get_source_and_pgp_key(source_plus_key) @@ -615,7 +618,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): juju_rc_path = "%s/%s" % (charm_dir(), script_path) if not os.path.exists(os.path.dirname(juju_rc_path)): os.mkdir(os.path.dirname(juju_rc_path)) - with open(juju_rc_path, 'wb') as rc_script: + with open(juju_rc_path, 'wt') as rc_script: rc_script.write( "#!/bin/bash\n") [rc_script.write('export %s=%s\n' % (u, p)) @@ -794,7 +797,7 @@ def git_default_repos(projects_yaml): service = service_name() core_project = service - for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): + for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES): if projects_yaml == default: # add the requirements repo first @@ -1615,7 +1618,7 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs): upgrade_callback(configs=configs) action_set({'outcome': 'success, upgrade completed.'}) ret = True - except: + except Exception: action_set({'outcome': 'upgrade failed, see traceback.'}) action_set({'traceback': traceback.format_exc()}) action_fail('do_openstack_upgrade resulted in an ' @@ -1720,7 +1723,7 @@ def is_unit_paused_set(): kv = t[0] # transform something truth-y into a Boolean. return not(not(kv.get('unit-paused'))) - except: + except Exception: return False @@ -2048,7 +2051,7 @@ def update_json_file(filename, items): def snap_install_requested(): """ Determine if installing from snaps - If openstack-origin is of the form snap:channel-series-release + If openstack-origin is of the form snap:track/channel[/branch] and channel is in SNAPS_CHANNELS return True. """ origin = config('openstack-origin') or "" @@ -2056,10 +2059,12 @@ def snap_install_requested(): return False _src = origin[5:] - channel, series, release = _src.split('-') - if channel.lower() in SNAP_CHANNELS: - return True - return False + if '/' in _src: + channel = _src.split('/')[1] + else: + # Handle snap:track with no channel + channel = 'stable' + return valid_snap_channel(channel) def get_snaps_install_info_from_origin(snaps, src, mode='classic'): @@ -2067,7 +2072,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'): @param snaps: List of snaps @param src: String of openstack-origin or source of the form - snap:channel-series-track + snap:track/channel @param mode: String classic, devmode or jailmode @returns: Dictionary of snaps with channels and modes """ @@ -2077,8 +2082,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'): return {} _src = src[5:] - _channel, _series, _release = _src.split('-') - channel = '--channel={}/{}'.format(_release, _channel) + channel = '--channel={}'.format(_src) return {snap: {'channel': channel, 'mode': mode} for snap in snaps} @@ -2090,8 +2094,8 @@ def install_os_snaps(snaps, refresh=False): @param snaps: Dictionary of snaps with channels and modes of the form: {'snap_name': {'channel': 'snap_channel', 'mode': 'snap_mode'}} - Where channel a snapstore channel and mode is --classic, --devmode or - --jailmode. + Where channel is a snapstore channel and mode is --classic, --devmode + or --jailmode. @param post_snap_install: Callback function to run after snaps have been installed """ diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index e5a01b1b..39231612 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -370,9 +370,10 @@ def get_mon_map(service): Also raises CalledProcessError if our ceph command fails """ try: - mon_status = check_output( - ['ceph', '--id', service, - 'mon_status', '--format=json']) + mon_status = check_output(['ceph', '--id', service, + 'mon_status', '--format=json']) + if six.PY3: + mon_status = mon_status.decode('UTF-8') try: return json.loads(mon_status) except ValueError as v: @@ -457,7 +458,7 @@ def monitor_key_get(service, key): try: output = check_output( ['ceph', '--id', service, - 'config-key', 'get', str(key)]) + 'config-key', 'get', str(key)]).decode('UTF-8') return output except CalledProcessError as e: log("Monitor config-key get failed with message: {}".format( @@ -500,6 +501,8 @@ def get_erasure_profile(service, name): out = check_output(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', name, '--format=json']) + if six.PY3: + out = out.decode('UTF-8') return json.loads(out) except (CalledProcessError, OSError, ValueError): return None @@ -686,7 +689,10 @@ def get_cache_mode(service, pool_name): """ validator(value=service, valid_type=six.string_types) validator(value=pool_name, valid_type=six.string_types) - out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) + out = check_output(['ceph', '--id', service, + 'osd', 'dump', '--format=json']) + if six.PY3: + out = out.decode('UTF-8') try: osd_json = json.loads(out) for pool in osd_json['pools']: @@ -700,8 +706,9 @@ def get_cache_mode(service, pool_name): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, - 'lspools']).decode('UTF-8') + out = check_output(['rados', '--id', service, 'lspools']) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -714,9 +721,12 @@ def get_osds(service): """ version = ceph_version() if version and version >= '0.56': - return json.loads(check_output(['ceph', '--id', service, - 'osd', 'ls', - '--format=json']).decode('UTF-8')) + out = check_output(['ceph', '--id', service, + 'osd', 'ls', + '--format=json']) + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) return None @@ -734,7 +744,9 @@ def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]).decode('UTF-8') + service, '--pool', pool]) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -859,7 +871,9 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']).decode('UTF-8') + out = check_output(['rbd', 'showmapped']) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -1018,7 +1032,9 @@ def ceph_version(): """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): cmd = ['ceph', '-v'] - output = check_output(cmd).decode('US-ASCII') + output = check_output(cmd) + if six.PY3: + output = output.decode('UTF-8') output = output.split() if len(output) > 3: return output[2] diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py index 4719f53c..7f2a0604 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -74,10 +74,10 @@ def list_lvm_volume_group(block_device): ''' vg = None pvd = check_output(['pvdisplay', block_device]).splitlines() - for l in pvd: - l = l.decode('UTF-8') - if l.strip().startswith('VG Name'): - vg = ' '.join(l.strip().split()[2:]) + for lvm in pvd: + lvm = lvm.decode('UTF-8') + if lvm.strip().startswith('VG Name'): + vg = ' '.join(lvm.strip().split()[2:]) return vg diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index 3dc0df68..c9428894 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -64,6 +64,6 @@ def is_device_mounted(device): ''' try: out = check_output(['lsblk', '-P', device]).decode('UTF-8') - except: + except Exception: return False return bool(re.search(r'MOUNTPOINT=".+"', out)) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 12f37b28..5a88f798 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -22,6 +22,7 @@ import copy from distutils.version import LooseVersion from functools import wraps +from collections import namedtuple import glob import os import json @@ -218,6 +219,8 @@ def principal_unit(): for rid in relation_ids(reltype): for unit in related_units(rid): md = _metadata_unit(unit) + if not md: + continue subordinate = md.pop('subordinate', None) if not subordinate: return unit @@ -511,7 +514,10 @@ def _metadata_unit(unit): """ basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') + if not os.path.exists(joineddir): + return None + with open(joineddir) as md: return yaml.safe_load(md) @@ -639,18 +645,31 @@ def is_relation_made(relation, keys='private-address'): return False +def _port_op(op_name, port, protocol="TCP"): + """Open or close a service network port""" + _args = [op_name] + icmp = protocol.upper() == "ICMP" + if icmp: + _args.append(protocol) + else: + _args.append('{}/{}'.format(port, protocol)) + try: + subprocess.check_call(_args) + except subprocess.CalledProcessError: + # Older Juju pre 2.3 doesn't support ICMP + # so treat it as a no-op if it fails. + if not icmp: + raise + + def open_port(port, protocol="TCP"): """Open a service network port""" - _args = ['open-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('open-port', port, protocol) def close_port(port, protocol="TCP"): """Close a service network port""" - _args = ['close-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('close-port', port, protocol) def open_ports(start, end, protocol="TCP"): @@ -667,6 +686,17 @@ def close_ports(start, end, protocol="TCP"): subprocess.check_call(_args) +def opened_ports(): + """Get the opened ports + + *Note that this will only show ports opened in a previous hook* + + :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` + """ + _args = ['opened-ports', '--format=json'] + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1077,6 +1107,35 @@ def network_get_primary_address(binding): return subprocess.check_output(cmd).decode('UTF-8').strip() +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get(endpoint, relation_id=None): + """ + Retrieve the network details for a relation endpoint + + :param endpoint: string. The name of a relation endpoint + :param relation_id: int. The ID of the relation for the current context. + :return: dict. The loaded YAML output of the network-get query. + :raise: NotImplementedError if run on Juju < 2.1 + """ + cmd = ['network-get', endpoint, '--format', 'yaml'] + if relation_id: + cmd.append('-r') + cmd.append(relation_id) + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + # Early versions of Juju 2.0.x required the --primary-address argument. + # We catch that condition here and raise NotImplementedError since + # the requested semantics are not available - the caller can then + # use the network_get_primary_address() method instead. + if '--primary-address is currently required' in e.output.decode('UTF-8'): + raise NotImplementedError + raise + return yaml.safe_load(response) + + def add_metric(*args, **kwargs): """Add metric values. Values may be expressed with keyword arguments. For metric names containing dashes, these may be expressed as one or more @@ -1106,3 +1165,42 @@ def meter_info(): """Get the meter status information, if running in the meter-status-changed hook.""" return os.environ.get('JUJU_METER_INFO') + + +def iter_units_for_relation_name(relation_name): + """Iterate through all units in a relation + + Generator that iterates through all the units in a relation and yields + a named tuple with rid and unit field names. + + Usage: + data = [(u.rid, u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param relation_name: string relation name + :yield: Named Tuple with rid and unit field names + """ + RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') + for rid in relation_ids(relation_name): + for unit in related_units(rid): + yield RelatedUnit(rid, unit) + + +def ingress_address(rid=None, unit=None): + """ + Retrieve the ingress-address from a relation when available. Otherwise, + return the private-address. This function is to be used on the consuming + side of the relation. + + Usage: + addresses = [ingress_address(rid=u.rid, unit=u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: string IP address + """ + settings = relation_get(rid=rid, unit=unit) + return (settings.get('ingress-address') or + settings.get('private-address')) diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 5656e2f5..5cc5c86b 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log, DEBUG +from .hookenv import log, DEBUG, local_unit from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -441,6 +441,49 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) +def chage(username, lastday=None, expiredate=None, inactive=None, + mindays=None, maxdays=None, root=None, warndays=None): + """Change user password expiry information + + :param str username: User to update + :param str lastday: Set when password was changed in YYYY-MM-DD format + :param str expiredate: Set when user's account will no longer be + accessible in YYYY-MM-DD format. + -1 will remove an account expiration date. + :param str inactive: Set the number of days of inactivity after a password + has expired before the account is locked. + -1 will remove an account's inactivity. + :param str mindays: Set the minimum number of days between password + changes to MIN_DAYS. + 0 indicates the password can be changed anytime. + :param str maxdays: Set the maximum number of days during which a + password is valid. + -1 as MAX_DAYS will remove checking maxdays + :param str root: Apply changes in the CHROOT_DIR directory + :param str warndays: Set the number of days of warning before a password + change is required + :raises subprocess.CalledProcessError: if call to chage fails + """ + cmd = ['chage'] + if root: + cmd.extend(['--root', root]) + if lastday: + cmd.extend(['--lastday', lastday]) + if expiredate: + cmd.extend(['--expiredate', expiredate]) + if inactive: + cmd.extend(['--inactive', inactive]) + if mindays: + cmd.extend(['--mindays', mindays]) + if maxdays: + cmd.extend(['--maxdays', maxdays]) + if warndays: + cmd.extend(['--warndays', warndays]) + cmd.append(username) + subprocess.check_call(cmd) + +remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -946,3 +989,31 @@ def updatedb(updatedb_text, new_path): lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) output = "\n".join(lines) return output + + +def modulo_distribution(modulo=3, wait=30): + """ Modulo distribution + + This helper uses the unit number, a modulo value and a constant wait time + to produce a calculated wait time distribution. This is useful in large + scale deployments to distribute load during an expensive operation such as + service restarts. + + If you have 1000 nodes that need to restart 100 at a time 1 minute at a + time: + + time.wait(modulo_distribution(modulo=100, wait=60)) + restart() + + If you need restarts to happen serially set modulo to the exact number of + nodes and set a high constant wait time: + + time.wait(modulo_distribution(modulo=10, wait=120)) + restart() + + @param modulo: int The modulo number creates the group distribution + @param wait: int The constant time wait value + @return: int Calculated time to wait for unit operation + """ + unit_number = int(local_unit().split('/')[1]) + return (unit_number % modulo) * wait diff --git a/ceph-osd/hooks/charmhelpers/core/strutils.py b/ceph-osd/hooks/charmhelpers/core/strutils.py index 685dabde..e8df0452 100644 --- a/ceph-osd/hooks/charmhelpers/core/strutils.py +++ b/ceph-osd/hooks/charmhelpers/core/strutils.py @@ -61,13 +61,19 @@ def bytes_from_string(value): if isinstance(value, six.string_types): value = six.text_type(value) else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) + msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if not matches: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + if matches: + size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + else: + # Assume that value passed in is bytes + try: + size = int(value) + except ValueError: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return size class BasicStringComparator(object): diff --git a/ceph-osd/hooks/charmhelpers/core/unitdata.py b/ceph-osd/hooks/charmhelpers/core/unitdata.py index 54ec969f..7af875c2 100644 --- a/ceph-osd/hooks/charmhelpers/core/unitdata.py +++ b/ceph-osd/hooks/charmhelpers/core/unitdata.py @@ -358,7 +358,7 @@ def hook_scope(self, name=""): try: yield self.revision self.revision = None - except: + except Exception: self.flush(False) self.revision = None raise diff --git a/ceph-osd/hooks/charmhelpers/fetch/snap.py b/ceph-osd/hooks/charmhelpers/fetch/snap.py index 112a54c3..395836c7 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/snap.py +++ b/ceph-osd/hooks/charmhelpers/fetch/snap.py @@ -41,6 +41,10 @@ class CouldNotAcquireLockException(Exception): pass +class InvalidSnapChannel(Exception): + pass + + def _snap_exec(commands): """ Execute snap commands. @@ -132,3 +136,15 @@ def snap_refresh(packages, *flags): log(message, level='INFO') return _snap_exec(['refresh'] + flags + packages) + + +def valid_snap_channel(channel): + """ Validate snap channel exists + + :raises InvalidSnapChannel: When channel does not exist + :return: Boolean + """ + if channel.lower() in SNAP_CHANNELS: + return True + else: + raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel)) diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 40e1cb5b..910e96a6 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -572,7 +572,7 @@ def get_upstream_version(package): cache = apt_cache() try: pkg = cache[package] - except: + except Exception: # the package is unknown to the current apt cache. return None diff --git a/ceph-osd/hooks/install b/ceph-osd/hooks/install index 0bdbf8d5..eb5eee3b 100755 --- a/ceph-osd/hooks/install +++ b/ceph-osd/hooks/install @@ -11,7 +11,7 @@ check_and_install() { fi } -PYTHON="python" +PYTHON="python3" for dep in ${DEPS[@]}; do check_and_install ${PYTHON} ${dep} diff --git a/ceph-osd/hooks/install_deps b/ceph-osd/hooks/install_deps index da4ba5d8..bb600820 100755 --- a/ceph-osd/hooks/install_deps +++ b/ceph-osd/hooks/install_deps @@ -11,7 +11,7 @@ check_and_install() { fi } -PYTHON="python" +PYTHON="python3" for dep in ${DEPS[@]}; do check_and_install ${PYTHON} ${dep} diff --git a/ceph-osd/hooks/upgrade-charm b/ceph-osd/hooks/upgrade-charm index 43cae864..71fc9ce7 100755 --- a/ceph-osd/hooks/upgrade-charm +++ b/ceph-osd/hooks/upgrade-charm @@ -1,6 +1,7 @@ #!/bin/bash # Wrapper to ensure that old python bytecode isn't hanging around # after we upgrade the charm with newer libraries -rm -rf **/*.pyc +find . -iname '*.pyc' -delete +find . -name '__pycache__' -prune -exec rm -rf "{}" \; ./hooks/install_deps exec ./hooks/upgrade-charm.real diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 512d1180..b49254c5 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -44,14 +44,14 @@ try: import jinja2 except ImportError: - apt_install(filter_installed_packages(['python-jinja2']), + apt_install(filter_installed_packages(['python3-jinja2']), fatal=True) import jinja2 try: import dns.resolver except ImportError: - apt_install(filter_installed_packages(['python-dnspython']), + apt_install(filter_installed_packages(['python3-dnspython']), fatal=True) import dns.resolver @@ -65,9 +65,9 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): def enable_pocket(pocket): apt_sources = "/etc/apt/sources.list" - with open(apt_sources, "r") as sources: + with open(apt_sources, "rt", encoding='UTF-8') as sources: lines = sources.readlines() - with open(apt_sources, "w") as sources: + with open(apt_sources, "wt", encoding='UTF-8') as sources: for line in lines: if pocket in line: sources.write(re.sub('^# deb', 'deb', line)) diff --git a/ceph-osd/lib/ceph/broker.py b/ceph-osd/lib/ceph/broker.py index b071b91e..95ee7799 100644 --- a/ceph-osd/lib/ceph/broker.py +++ b/ceph-osd/lib/ceph/broker.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections import json import os @@ -134,7 +135,7 @@ def process_requests(reqs): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - msg = ("Missing or invalid api version (%s)" % version) + msg = ("Missing or invalid api version ({})".format(version)) resp = {'exit-code': 1, 'stderr': msg} if request_id: resp['request-id'] = request_id @@ -231,7 +232,7 @@ def add_pool_to_group(pool, group, namespace=None): def pool_permission_list_for_service(service): """Build the permission string for Ceph for a given service""" permissions = [] - permission_types = {} + permission_types = collections.OrderedDict() for permission, group in service["group_names"].items(): if permission not in permission_types: permission_types[permission] = [] @@ -267,9 +268,7 @@ def get_service_groups(service, namespace=None): key="cephx.services.{}".format(service)) try: service = json.loads(service_json) - except TypeError: - service = None - except ValueError: + except (TypeError, ValueError): service = None if service: service['groups'] = _build_service_groups(service, namespace) @@ -296,7 +295,7 @@ def _build_service_groups(service, namespace=None): } """ all_groups = {} - for _, groups in service['group_names'].items(): + for groups in service['group_names'].values(): for group in groups: name = group if namespace: @@ -316,9 +315,7 @@ def get_group(group_name): group_json = monitor_key_get(service='admin', key=group_key) try: group = json.loads(group_json) - except TypeError: - group = None - except ValueError: + except (TypeError, ValueError): group = None if not group: group = { @@ -391,9 +388,8 @@ def handle_erasure_pool(request, service): percent_data=weight) # Ok make the erasure pool if not pool_exists(service=service, name=pool_name): - log("Creating pool '%s' (erasure_profile=%s)" % (pool.name, - erasure_profile), - level=INFO) + log("Creating pool '{}' (erasure_profile={})" + .format(pool.name, erasure_profile), level=INFO) pool.create() # Set a quota if requested @@ -446,11 +442,11 @@ def handle_replicated_pool(request, service): pool = ReplicatedPool(service=service, name=pool_name, **kwargs) if not pool_exists(service=service, name=pool_name): - log("Creating pool '%s' (replicas=%s)" % (pool.name, replicas), + log("Creating pool '{}' (replicas={})".format(pool.name, replicas), level=INFO) pool.create() else: - log("Pool '%s' already exists - skipping create" % pool.name, + log("Pool '{}' already exists - skipping create".format(pool.name), level=DEBUG) # Set a quota if requested @@ -519,7 +515,7 @@ def handle_set_pool_value(request, service): 'key': request.get('key'), 'value': request.get('value')} if params['key'] not in POOL_KEYS: - msg = "Invalid key '%s'" % params['key'] + msg = "Invalid key '{}'".format(params['key']) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} @@ -685,7 +681,7 @@ def handle_rgw_create_user(request, service): ] ) try: - user_json = json.loads(create_output) + user_json = json.loads(str(create_output.decode('UTF-8'))) return {'exit-code': 0, 'user': user_json} except ValueError as err: log(err, level=ERROR) @@ -790,10 +786,10 @@ def process_requests_v1(reqs): operation failed along with an explanation). """ ret = None - log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) + log("Processing {} ceph broker requests".format(len(reqs)), level=INFO) for req in reqs: op = req.get('op') - log("Processing op='%s'" % op, level=DEBUG) + log("Processing op='{}'".format(op), level=DEBUG) # Use admin client since we do not have other client key locations # setup to use them for these operations. svc = 'admin' @@ -848,7 +844,7 @@ def process_requests_v1(reqs): elif op == "add-permissions-to-key": ret = handle_add_permissions_to_key(request=req, service=svc) else: - msg = "Unknown operation '%s'" % op + msg = "Unknown operation '{}'".format(op) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} diff --git a/ceph-osd/lib/ceph/crush_utils.py b/ceph-osd/lib/ceph/crush_utils.py index 1c777f34..8b6876c1 100644 --- a/ceph-osd/lib/ceph/crush_utils.py +++ b/ceph-osd/lib/ceph/crush_utils.py @@ -60,7 +60,7 @@ def __init__(self): ids = list(map( lambda x: int(x), re.findall(CRUSHMAP_ID_RE, self._crushmap))) - ids.sort() + ids = sorted(ids) if roots != []: for root in roots: buckets.append(CRUSHBucket(root[0], root[1], True)) @@ -73,8 +73,11 @@ def __init__(self): def load_crushmap(self): try: - crush = check_output(['ceph', 'osd', 'getcrushmap']) - return check_output(['crushtool', '-d', '-'], stdin=crush.stdout) + crush = str(check_output(['ceph', 'osd', 'getcrushmap']) + .decode('UTF-8')) + return str(check_output(['crushtool', '-d', '-'], + stdin=crush.stdout) + .decode('UTF-8')) except CalledProcessError as e: log("Error occured while loading and decompiling CRUSH map:" "{}".format(e), ERROR) @@ -99,10 +102,12 @@ def save(self): """Persist Crushmap to Ceph""" try: crushmap = self.build_crushmap() - compiled = check_output(['crushtool', '-c', '/dev/stdin', '-o', - '/dev/stdout'], stdin=crushmap) - ceph_output = check_output(['ceph', 'osd', 'setcrushmap', '-i', - '/dev/stdin'], stdin=compiled) + compiled = str(check_output(['crushtool', '-c', '/dev/stdin', '-o', + '/dev/stdout'], stdin=crushmap) + .decode('UTF-8')) + ceph_output = str(check_output(['ceph', 'osd', 'setcrushmap', '-i', + '/dev/stdin'], stdin=compiled) + .decode('UTF-8')) return ceph_output except CalledProcessError as e: log("save error: {}".format(e)) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 0fd3e801..6ab697f0 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -30,6 +30,7 @@ from charmhelpers.core import hookenv from charmhelpers.core import templating +from charmhelpers.core.decorators import retry_on_exception from charmhelpers.core.host import ( chownr, cmp_pkgrevno, @@ -380,8 +381,9 @@ def get_block_uuid(block_dev): :returns: The UUID of the device or None on Error. """ try: - block_info = subprocess.check_output( - ['blkid', '-o', 'export', block_dev]) + block_info = str(subprocess + .check_output(['blkid', '-o', 'export', block_dev]) + .decode('UTF-8')) for tag in block_info.split('\n'): parts = tag.split('=') if parts[0] == 'UUID': @@ -532,8 +534,9 @@ def get_osd_weight(osd_id): :raises: CalledProcessError if our ceph command fails. """ try: - tree = subprocess.check_output( - ['ceph', 'osd', 'tree', '--format=json']) + tree = str(subprocess + .check_output(['ceph', 'osd', 'tree', '--format=json']) + .decode('UTF-8')) try: json_tree = json.loads(tree) # Make sure children are present in the json @@ -560,9 +563,10 @@ def get_osd_tree(service): Also raises CalledProcessError if our ceph command fails """ try: - tree = subprocess.check_output( - ['ceph', '--id', service, - 'osd', 'tree', '--format=json']) + tree = str(subprocess + .check_output(['ceph', '--id', service, + 'osd', 'tree', '--format=json']) + .decode('UTF-8')) try: json_tree = json.loads(tree) crush_list = [] @@ -627,7 +631,7 @@ def _get_osd_num_from_dirname(dirname): """ match = re.search('ceph-(?P\d+)', dirname) if not match: - raise ValueError("dirname not in correct format: %s" % dirname) + raise ValueError("dirname not in correct format: {}".format(dirname)) return match.group('osd_id') @@ -717,7 +721,7 @@ def get_version(): def error_out(msg): - log("FATAL ERROR: %s" % msg, + log("FATAL ERROR: {}".format(msg), level=ERROR) sys.exit(1) @@ -735,7 +739,9 @@ def is_quorum(): ] if os.path.exists(asok): try: - result = json.loads(subprocess.check_output(cmd)) + result = json.loads(str(subprocess + .check_output(cmd) + .decode('UTF-8'))) except subprocess.CalledProcessError: return False except ValueError: @@ -762,7 +768,9 @@ def is_leader(): ] if os.path.exists(asok): try: - result = json.loads(subprocess.check_output(cmd)) + result = json.loads(str(subprocess + .check_output(cmd) + .decode('UTF-8'))) except subprocess.CalledProcessError: return False except ValueError: @@ -954,8 +962,9 @@ def is_osd_disk(dev): partitions = get_partition_list(dev) for partition in partitions: try: - info = subprocess.check_output(['sgdisk', '-i', partition.number, - dev]) + info = str(subprocess + .check_output(['sgdisk', '-i', partition.number, dev]) + .decode('UTF-8')) info = info.split("\n") # IGNORE:E1103 for line in info: for ptype in CEPH_PARTITIONS: @@ -1038,7 +1047,7 @@ def generate_monitor_secret(): '--name=mon.', '--gen-key' ] - res = subprocess.check_output(cmd) + res = str(subprocess.check_output(cmd).decode('UTF-8')) return "{}==".format(res.split('=')[1].strip()) @@ -1187,7 +1196,10 @@ def create_named_keyring(entity, name, caps=None): for subsystem, subcaps in caps.items(): cmd.extend([subsystem, '; '.join(subcaps)]) log("Calling check_output: {}".format(cmd), level=DEBUG) - return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + return (parse_key(str(subprocess + .check_output(cmd) + .decode('UTF-8')) + .strip())) # IGNORE:E1103 def get_upgrade_key(): @@ -1204,7 +1216,7 @@ def get_named_key(name, caps=None, pool_list=None): """ try: # Does the key already exist? - output = subprocess.check_output( + output = str(subprocess.check_output( [ 'sudo', '-u', ceph_user(), @@ -1217,7 +1229,7 @@ def get_named_key(name, caps=None, pool_list=None): 'auth', 'get', 'client.{}'.format(name), - ]).strip() + ]).decode('UTF-8')).strip() return parse_key(output) except subprocess.CalledProcessError: # Couldn't get the key, time to create it! @@ -1246,7 +1258,10 @@ def get_named_key(name, caps=None, pool_list=None): cmd.extend([subsystem, '; '.join(subcaps)]) log("Calling check_output: {}".format(cmd), level=DEBUG) - return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + return parse_key(str(subprocess + .check_output(cmd) + .decode('UTF-8')) + .strip()) # IGNORE:E1103 def upgrade_key_caps(key, caps): @@ -1287,40 +1302,53 @@ def bootstrap_monitor_cluster(secret): mkdir(path, owner=ceph_user(), group=ceph_user()) # end changes for Ceph >= 0.61.3 try: - subprocess.check_call(['ceph-authtool', keyring, - '--create-keyring', '--name=mon.', - '--add-key={}'.format(secret), - '--cap', 'mon', 'allow *']) - - subprocess.check_call(['ceph-mon', '--mkfs', - '-i', hostname, - '--keyring', keyring]) - chownr(path, ceph_user(), ceph_user()) - with open(done, 'w'): - pass - with open(init_marker, 'w'): - pass - - if systemd(): - subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) - service_restart('ceph-mon') - else: - service_restart('ceph-mon-all') - - if cmp_pkgrevno('ceph', '12.0.0') >= 0: - # NOTE(jamespage): Later ceph releases require explicit - # call to ceph-create-keys to setup the - # admin keys for the cluster; this command - # will wait for quorum in the cluster before - # returning. - cmd = ['ceph-create-keys', '--id', hostname] - subprocess.check_call(cmd) + add_keyring_to_ceph(keyring, + secret, + hostname, + path, + done, + init_marker) + except: raise finally: os.unlink(keyring) +@retry_on_exception(3, base_delay=5) +def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring', '--name=mon.', + '--add-key={}'.format(secret), + '--cap', 'mon', 'allow *']) + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + chownr(path, ceph_user(), ceph_user()) + with open(done, 'w'): + pass + with open(init_marker, 'w'): + pass + + if systemd(): + subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) + service_restart('ceph-mon') + else: + service_restart('ceph-mon-all') + + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + # NOTE(jamespage): Later ceph releases require explicit + # call to ceph-create-keys to setup the + # admin keys for the cluster; this command + # will wait for quorum in the cluster before + # returning. + cmd = ['ceph-create-keys', '--id', hostname] + subprocess.check_call(cmd) + osstat = os.stat("/etc/ceph/ceph.client.admin.keyring") + if not osstat.st_size: + raise Exception + + def update_monfs(): hostname = socket.gethostname() monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) @@ -1347,7 +1375,7 @@ def maybe_zap_journal(journal_dev): def get_partitions(dev): cmd = ['partx', '--raw', '--noheadings', dev] try: - out = subprocess.check_output(cmd).splitlines() + out = str(subprocess.check_output(cmd).decode('UTF-8')).splitlines() log("get partitions: {}".format(out), level=DEBUG) return out except subprocess.CalledProcessError as e: @@ -1515,7 +1543,7 @@ def get_running_osds(): """Returns a list of the pids of the current running OSD daemons""" cmd = ['pgrep', 'ceph-osd'] try: - result = subprocess.check_output(cmd) + result = str(subprocess.check_output(cmd).decode('UTF-8')) return result.split() except subprocess.CalledProcessError: return [] @@ -1531,7 +1559,9 @@ def get_cephfs(service): # This command wasn't introduced until 0.86 ceph return [] try: - output = subprocess.check_output(["ceph", '--id', service, "fs", "ls"]) + output = str(subprocess + .check_output(["ceph", '--id', service, "fs", "ls"]) + .decode('UTF-8')) if not output: return [] """ @@ -2065,7 +2095,9 @@ def list_pools(service): """ try: pool_list = [] - pools = subprocess.check_output(['rados', '--id', service, 'lspools']) + pools = str(subprocess + .check_output(['rados', '--id', service, 'lspools']) + .decode('UTF-8')) for pool in pools.splitlines(): pool_list.append(pool) return pool_list @@ -2126,10 +2158,8 @@ def dirs_need_ownership_update(service): def pretty_print_upgrade_paths(): """Pretty print supported upgrade paths for ceph""" - lines = [] - for key, value in UPGRADE_PATHS.iteritems(): - lines.append("{} -> {}".format(key, value)) - return lines + return ["{} -> {}".format(key, value) + for key, value in UPGRADE_PATHS.iteritems()] def resolve_ceph_version(source): @@ -2149,7 +2179,9 @@ def get_ceph_pg_stat(): :returns: dict """ try: - tree = subprocess.check_output(['ceph', 'pg', 'stat', '--format=json']) + tree = str(subprocess + .check_output(['ceph', 'pg', 'stat', '--format=json']) + .decode('UTF-8')) try: json_tree = json.loads(tree) if not json_tree['num_pg_by_state']: @@ -2173,8 +2205,9 @@ def get_ceph_health(): status, use get_ceph_health()['overall_status']. """ try: - tree = subprocess.check_output( - ['ceph', 'status', '--format=json']) + tree = str(subprocess + .check_output(['ceph', 'status', '--format=json']) + .decode('UTF-8')) try: json_tree = json.loads(tree) # Make sure children are present in the json @@ -2201,9 +2234,12 @@ def reweight_osd(osd_num, new_weight): :raises CalledProcessError: if an error occurs invoking the systemd cmd """ try: - cmd_result = subprocess.check_output( - ['ceph', 'osd', 'crush', 'reweight', "osd.{}".format(osd_num), - new_weight], stderr=subprocess.STDOUT) + cmd_result = str(subprocess + .check_output(['ceph', 'osd', 'crush', + 'reweight', "osd.{}".format(osd_num), + new_weight], + stderr=subprocess.STDOUT) + .decode('UTF-8')) expected_result = "reweighted item id {ID} name \'osd.{ID}\'".format( ID=osd_num) + " to {}".format(new_weight) log(cmd_result) @@ -2246,3 +2282,25 @@ def bootstrap_manager(): unit = 'ceph-mgr@{}'.format(hostname) subprocess.check_call(['systemctl', 'enable', unit]) service_restart(unit) + + +def osd_noout(enable): + """Sets or unsets 'noout' + + :param enable: bool. True to set noout, False to unset. + :returns: bool. True if output looks right. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + """ + operation = { + True: 'set', + False: 'unset', + } + try: + subprocess.check_call(['ceph', '--id', 'admin', + 'osd', operation[enable], + 'noout']) + log('running ceph osd {} noout'.format(operation[enable])) + return True + except subprocess.CalledProcessError as e: + log(e) + raise diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 9edd4bbf..6757a47d 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -5,12 +5,12 @@ coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 -charm-tools>=2.0.0 +charm-tools>=2.0.0;python_version=='2.7' # cheetah templates aren't availble in Python 3+ requests==2.6.0 # BEGIN: Amulet OpenStack Charm Helper Requirements # Liberty client lower constraints amulet>=1.14.3,<2.0 -bundletester>=0.6.1,<1.0 +bundletester>=0.6.1,<1.0;python_version=='2.7' # cheetah templates aren't availble in Python 3+ python-ceilometerclient>=1.5.0 python-cinderclient>=1.4.0 python-glanceclient>=1.1.0 diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index dfa86480..bb23dc68 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -347,7 +347,7 @@ def test_300_ceph_osd_config(self): }, } - for section, pairs in expected.iteritems(): + for section, pairs in expected.items(): ret = u.validate_config_data(unit, conf, section, pairs) if ret: message = "ceph config error: {}".format(ret) @@ -364,7 +364,7 @@ def test_302_cinder_rbd_config(self): 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' } } - for section, pairs in expected.iteritems(): + for section, pairs in expected.items(): ret = u.validate_config_data(unit, conf, section, pairs) if ret: message = "cinder (rbd) config error: {}".format(ret) @@ -394,7 +394,7 @@ def test_304_glance_rbd_config(self): section = 'DEFAULT' expected = {section: config} - for section, pairs in expected.iteritems(): + for section, pairs in expected.items(): ret = u.validate_config_data(unit, conf, section, pairs) if ret: message = "glance (rbd) config error: {}".format(ret) @@ -411,7 +411,7 @@ def test_306_nova_rbd_config(self): 'rbd_secret_uuid': u.not_null } } - for section, pairs in expected.iteritems(): + for section, pairs in expected.items(): ret = u.validate_config_data(unit, conf, section, pairs) if ret: message = "nova (rbd) config error: {}".format(ret) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index fc20a76d..42d6b1f2 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -250,7 +250,14 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, self.log.debug('Waiting up to {}s for extended status on services: ' '{}'.format(timeout, services)) service_messages = {service: message for service in services} + + # Check for idleness + self.d.sentry.wait() + # Check for error states and bail early + self.d.sentry.wait_for_status(self.d.juju_env, services) + # Check for ready messages self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') def _get_openstack_release(self): @@ -303,22 +310,29 @@ def get_ceph_expected_pools(self, radosgw=False): test scenario, based on OpenStack release and whether ceph radosgw is flagged as present or not.""" - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later + if self._get_openstack_release() <= self.trusty_icehouse: + # Juno or earlier pools = [ + 'data', + 'metadata', 'rbd', 'cinder-ceph', 'glance' ] - else: - # Juno or earlier + elif (self.trusty_kilo <= self._get_openstack_release() <= + self.zesty_ocata): + # Kilo through Ocata pools = [ - 'data', - 'metadata', 'rbd', 'cinder-ceph', 'glance' ] + else: + # Pike and later + pools = [ + 'cinder-ceph', + 'glance' + ] if radosgw: pools.extend([ diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index c8edbf65..b71b2b19 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -23,6 +23,7 @@ import urlparse import cinderclient.v1.client as cinder_client +import cinderclient.v2.client as cinder_clientv2 import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client @@ -42,7 +43,6 @@ from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) -from charmhelpers.core.decorators import retry_on_exception from charmhelpers.core.host import CompareHostReleases DEBUG = logging.DEBUG @@ -310,7 +310,6 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] - @retry_on_exception(5, base_delay=10) def keystone_wait_for_propagation(self, sentry_relation_pairs, api_version): """Iterate over list of sentry and relation tuples and verify that @@ -326,7 +325,7 @@ def keystone_wait_for_propagation(self, sentry_relation_pairs, rel = sentry.relation('identity-service', relation_name) self.log.debug('keystone relation data: {}'.format(rel)) - if rel['api_version'] != str(api_version): + if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." "".format(rel['api_version'], api_version)) @@ -348,15 +347,19 @@ def keystone_configure_api_version(self, sentry_relation_pairs, deployment, config = {'preferred-api-version': api_version} deployment.d.configure('keystone', config) + deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant): + password, tenant, api_version=2): """Authenticates admin user with cinder.""" # NOTE(beisner): cinder python client doesn't accept tokens. keystone_ip = keystone_sentry.info['public-address'] ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) - return cinder_client.Client(username, password, tenant, ept) + _clients = { + 1: cinder_client.Client, + 2: cinder_clientv2.Client} + return _clients[api_version](username, password, tenant, ept) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -617,13 +620,25 @@ def create_or_get_keypair(self, nova, keypair_name="testkey"): self.log.debug('Keypair ({}) already exists, ' 'using it.'.format(keypair_name)) return _keypair - except: + except Exception: self.log.debug('Keypair ({}) does not exist, ' 'creating it.'.format(keypair_name)) _keypair = nova.keypairs.create(name=keypair_name) return _keypair + def _get_cinder_obj_name(self, cinder_object): + """Retrieve name of cinder object. + + :param cinder_object: cinder snapshot or volume object + :returns: str cinder object name + """ + # v1 objects store name in 'display_name' attr but v2+ use 'name' + try: + return cinder_object.display_name + except AttributeError: + return cinder_object.name + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, img_id=None, src_vol_id=None, snap_id=None): """Create cinder volume, optionally from a glance image, OR @@ -674,6 +689,13 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, source_volid=src_vol_id, snapshot_id=snap_id) vol_id = vol_new.id + except TypeError: + vol_new = cinder.volumes.create(name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id except Exception as e: msg = 'Failed to create volume: {}'.format(e) amulet.raise_status(amulet.FAIL, msg=msg) @@ -688,7 +710,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, # Re-validate new volume self.log.debug('Validating volume attributes...') - val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) val_vol_boot = cinder.volumes.get(vol_id).bootable val_vol_stat = cinder.volumes.get(vol_id).status val_vol_size = cinder.volumes.get(vol_id).size diff --git a/ceph-osd/tests/charmhelpers/core/hookenv.py b/ceph-osd/tests/charmhelpers/core/hookenv.py index 12f37b28..5a88f798 100644 --- a/ceph-osd/tests/charmhelpers/core/hookenv.py +++ b/ceph-osd/tests/charmhelpers/core/hookenv.py @@ -22,6 +22,7 @@ import copy from distutils.version import LooseVersion from functools import wraps +from collections import namedtuple import glob import os import json @@ -218,6 +219,8 @@ def principal_unit(): for rid in relation_ids(reltype): for unit in related_units(rid): md = _metadata_unit(unit) + if not md: + continue subordinate = md.pop('subordinate', None) if not subordinate: return unit @@ -511,7 +514,10 @@ def _metadata_unit(unit): """ basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') + if not os.path.exists(joineddir): + return None + with open(joineddir) as md: return yaml.safe_load(md) @@ -639,18 +645,31 @@ def is_relation_made(relation, keys='private-address'): return False +def _port_op(op_name, port, protocol="TCP"): + """Open or close a service network port""" + _args = [op_name] + icmp = protocol.upper() == "ICMP" + if icmp: + _args.append(protocol) + else: + _args.append('{}/{}'.format(port, protocol)) + try: + subprocess.check_call(_args) + except subprocess.CalledProcessError: + # Older Juju pre 2.3 doesn't support ICMP + # so treat it as a no-op if it fails. + if not icmp: + raise + + def open_port(port, protocol="TCP"): """Open a service network port""" - _args = ['open-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('open-port', port, protocol) def close_port(port, protocol="TCP"): """Close a service network port""" - _args = ['close-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('close-port', port, protocol) def open_ports(start, end, protocol="TCP"): @@ -667,6 +686,17 @@ def close_ports(start, end, protocol="TCP"): subprocess.check_call(_args) +def opened_ports(): + """Get the opened ports + + *Note that this will only show ports opened in a previous hook* + + :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` + """ + _args = ['opened-ports', '--format=json'] + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1077,6 +1107,35 @@ def network_get_primary_address(binding): return subprocess.check_output(cmd).decode('UTF-8').strip() +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get(endpoint, relation_id=None): + """ + Retrieve the network details for a relation endpoint + + :param endpoint: string. The name of a relation endpoint + :param relation_id: int. The ID of the relation for the current context. + :return: dict. The loaded YAML output of the network-get query. + :raise: NotImplementedError if run on Juju < 2.1 + """ + cmd = ['network-get', endpoint, '--format', 'yaml'] + if relation_id: + cmd.append('-r') + cmd.append(relation_id) + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + # Early versions of Juju 2.0.x required the --primary-address argument. + # We catch that condition here and raise NotImplementedError since + # the requested semantics are not available - the caller can then + # use the network_get_primary_address() method instead. + if '--primary-address is currently required' in e.output.decode('UTF-8'): + raise NotImplementedError + raise + return yaml.safe_load(response) + + def add_metric(*args, **kwargs): """Add metric values. Values may be expressed with keyword arguments. For metric names containing dashes, these may be expressed as one or more @@ -1106,3 +1165,42 @@ def meter_info(): """Get the meter status information, if running in the meter-status-changed hook.""" return os.environ.get('JUJU_METER_INFO') + + +def iter_units_for_relation_name(relation_name): + """Iterate through all units in a relation + + Generator that iterates through all the units in a relation and yields + a named tuple with rid and unit field names. + + Usage: + data = [(u.rid, u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param relation_name: string relation name + :yield: Named Tuple with rid and unit field names + """ + RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') + for rid in relation_ids(relation_name): + for unit in related_units(rid): + yield RelatedUnit(rid, unit) + + +def ingress_address(rid=None, unit=None): + """ + Retrieve the ingress-address from a relation when available. Otherwise, + return the private-address. This function is to be used on the consuming + side of the relation. + + Usage: + addresses = [ingress_address(rid=u.rid, unit=u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: string IP address + """ + settings = relation_get(rid=rid, unit=unit) + return (settings.get('ingress-address') or + settings.get('private-address')) diff --git a/ceph-osd/tests/charmhelpers/core/host.py b/ceph-osd/tests/charmhelpers/core/host.py index 5656e2f5..5cc5c86b 100644 --- a/ceph-osd/tests/charmhelpers/core/host.py +++ b/ceph-osd/tests/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log, DEBUG +from .hookenv import log, DEBUG, local_unit from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -441,6 +441,49 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) +def chage(username, lastday=None, expiredate=None, inactive=None, + mindays=None, maxdays=None, root=None, warndays=None): + """Change user password expiry information + + :param str username: User to update + :param str lastday: Set when password was changed in YYYY-MM-DD format + :param str expiredate: Set when user's account will no longer be + accessible in YYYY-MM-DD format. + -1 will remove an account expiration date. + :param str inactive: Set the number of days of inactivity after a password + has expired before the account is locked. + -1 will remove an account's inactivity. + :param str mindays: Set the minimum number of days between password + changes to MIN_DAYS. + 0 indicates the password can be changed anytime. + :param str maxdays: Set the maximum number of days during which a + password is valid. + -1 as MAX_DAYS will remove checking maxdays + :param str root: Apply changes in the CHROOT_DIR directory + :param str warndays: Set the number of days of warning before a password + change is required + :raises subprocess.CalledProcessError: if call to chage fails + """ + cmd = ['chage'] + if root: + cmd.extend(['--root', root]) + if lastday: + cmd.extend(['--lastday', lastday]) + if expiredate: + cmd.extend(['--expiredate', expiredate]) + if inactive: + cmd.extend(['--inactive', inactive]) + if mindays: + cmd.extend(['--mindays', mindays]) + if maxdays: + cmd.extend(['--maxdays', maxdays]) + if warndays: + cmd.extend(['--warndays', warndays]) + cmd.append(username) + subprocess.check_call(cmd) + +remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -946,3 +989,31 @@ def updatedb(updatedb_text, new_path): lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) output = "\n".join(lines) return output + + +def modulo_distribution(modulo=3, wait=30): + """ Modulo distribution + + This helper uses the unit number, a modulo value and a constant wait time + to produce a calculated wait time distribution. This is useful in large + scale deployments to distribute load during an expensive operation such as + service restarts. + + If you have 1000 nodes that need to restart 100 at a time 1 minute at a + time: + + time.wait(modulo_distribution(modulo=100, wait=60)) + restart() + + If you need restarts to happen serially set modulo to the exact number of + nodes and set a high constant wait time: + + time.wait(modulo_distribution(modulo=10, wait=120)) + restart() + + @param modulo: int The modulo number creates the group distribution + @param wait: int The constant time wait value + @return: int Calculated time to wait for unit operation + """ + unit_number = int(local_unit().split('/')[1]) + return (unit_number % modulo) * wait diff --git a/ceph-osd/tests/charmhelpers/core/strutils.py b/ceph-osd/tests/charmhelpers/core/strutils.py index 685dabde..e8df0452 100644 --- a/ceph-osd/tests/charmhelpers/core/strutils.py +++ b/ceph-osd/tests/charmhelpers/core/strutils.py @@ -61,13 +61,19 @@ def bytes_from_string(value): if isinstance(value, six.string_types): value = six.text_type(value) else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) + msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if not matches: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + if matches: + size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + else: + # Assume that value passed in is bytes + try: + size = int(value) + except ValueError: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return size class BasicStringComparator(object): diff --git a/ceph-osd/tests/charmhelpers/core/unitdata.py b/ceph-osd/tests/charmhelpers/core/unitdata.py index 54ec969f..7af875c2 100644 --- a/ceph-osd/tests/charmhelpers/core/unitdata.py +++ b/ceph-osd/tests/charmhelpers/core/unitdata.py @@ -358,7 +358,7 @@ def hook_scope(self, name=""): try: yield self.revision self.revision = None - except: + except Exception: self.flush(False) self.revision = None raise diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 7c2936e3..a8188146 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -2,8 +2,9 @@ # This file is managed centrally by release-tools and should not be modified # within individual charm repos. [tox] -envlist = pep8,py27 +envlist = pep8,py27,py35,py36 skipsdist = True +skip_missing_interpreters = True [testenv] setenv = VIRTUAL_ENV={envdir} @@ -20,12 +21,19 @@ passenv = HOME TERM AMULET_* CS_API_* basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +# temporarily disable py27 +commands = /bin/true [testenv:py35] basepython = python3.5 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python2.7 deps = -r{toxinidir}/requirements.txt diff --git a/ceph-osd/unit_tests/__init__.py b/ceph-osd/unit_tests/__init__.py index 84f643d0..633fa7da 100644 --- a/ceph-osd/unit_tests/__init__.py +++ b/ceph-osd/unit_tests/__init__.py @@ -16,3 +16,4 @@ sys.path.append('hooks') sys.path.append('lib') sys.path.append('actions') +sys.path.append('unit_tests') diff --git a/ceph-osd/unit_tests/test_replace_osd.py b/ceph-osd/unit_tests/test_replace_osd.py index d8494a01..141ffbd2 100644 --- a/ceph-osd/unit_tests/test_replace_osd.py +++ b/ceph-osd/unit_tests/test_replace_osd.py @@ -121,7 +121,7 @@ def test_lookup_device_name(self, disk_stats): @patch('replace_osd.os.lstat') def test_get_device_number(self, lstat): lstat.return_value = posix.stat_result([ - 16877, 16, 51729L, 3, 0, 0, 217, 0, 1458086872, 1458086872 + 16877, 16, 51729, 3, 0, 0, 217, 0, 1458086872, 1458086872 ]) major, minor = replace_osd.get_device_number(1) assert major == 202 diff --git a/ceph-osd/unit_tests/test_tuning.py b/ceph-osd/unit_tests/test_tuning.py index b7d62c6c..e9abb33a 100644 --- a/ceph-osd/unit_tests/test_tuning.py +++ b/ceph-osd/unit_tests/test_tuning.py @@ -45,7 +45,7 @@ def test_tune_nic(self, save_sysctls, get_link_speed, check_output): @patch.object(ceph.subprocess, 'check_output') def test_get_block_uuid(self, check_output): check_output.return_value = \ - 'UUID=378f3c86-b21a-4172-832d-e2b3d4bc7511\nTYPE=ext2\n' + b'UUID=378f3c86-b21a-4172-832d-e2b3d4bc7511\nTYPE=ext2\n' uuid = ceph.get_block_uuid('/dev/sda1') self.assertEqual(uuid, '378f3c86-b21a-4172-832d-e2b3d4bc7511') diff --git a/ceph-osd/unit_tests/test_utils.py b/ceph-osd/unit_tests/test_utils.py index ceb34cab..941ddb09 100644 --- a/ceph-osd/unit_tests/test_utils.py +++ b/ceph-osd/unit_tests/test_utils.py @@ -44,7 +44,7 @@ def load_config(): if not config: logging.error('Could not find config.yaml in any parent directory ' - 'of %s. ' % f) + 'of {}. '.format(f)) raise Exception return yaml.safe_load(open(config).read())['options'] @@ -57,7 +57,7 @@ def get_default_config(): ''' default_config = {} config = load_config() - for k, v in config.iteritems(): + for k, v in config.items(): if 'default' in v: default_config[k] = v['default'] else: @@ -138,5 +138,5 @@ def stub_open(*args, **kwargs): mock_open(*args, **kwargs) yield mock_file - with patch('__builtin__.open', stub_open): + with patch('builtins.open', stub_open): yield mock_open, mock_file From 4fb443039b5b2635f907a7bed9597163a37765c4 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 17 Nov 2017 16:40:11 +0000 Subject: [PATCH 1410/2699] Avoiding conflicting CRUSH bucket keys As of Ceph Luminous, bucket keys within the CRUSH map must be unique; The root bucket is always called 'default' so remap Juju and configuration provided bucket keys to 'default-{row,rack}' ensuring that keys are unique. Change-Id: I7fa3dd9e001cca40e678e8983a1d7ed19d51e2fe Closes-Bug: 1729911 --- ceph-osd/hooks/ceph_hooks.py | 8 ++++ ceph-osd/unit_tests/test_ceph_hooks.py | 51 ++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 928a49fb..22925d10 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -195,8 +195,16 @@ def az_info(): config_az = config("availability_zone") juju_az_info = os.environ.get('JUJU_AVAILABILITY_ZONE') if juju_az_info: + # NOTE(jamespage): avoid conflicting key with root + # of crush hierarchy + if juju_az_info == 'default': + juju_az_info = 'default-rack' az_info = "{} rack={}".format(az_info, juju_az_info) if config_az: + # NOTE(jamespage): avoid conflicting key with root + # of crush hierarchy + if config_az == 'default': + config_az = 'default-row' az_info = "{} row={}".format(az_info, config_az) if az_info != "": log("AZ Info: " + az_info) diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index f40d07fe..6be78cc1 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -388,3 +388,54 @@ def test_get_journal_devices_blacklist(self, mock_config, mock_os_path_exists.assert_called() mock_get_blacklist.assert_called() self.assertEqual(devices, set(['/dev/vdb'])) + + @patch.object(ceph_hooks, 'log') + @patch.object(ceph_hooks, 'config') + @patch('os.environ') + def test_az_info_unset(self, environ, config, log): + config.return_value = None + environ.get.return_value = None + + self.assertEqual(ceph_hooks.az_info(), None) + + config.assert_called_with('availability_zone') + environ.get.assert_called_with('JUJU_AVAILABILITY_ZONE') + + @patch.object(ceph_hooks, 'log') + @patch.object(ceph_hooks, 'config') + @patch('os.environ') + def test_az_info_config(self, environ, config, log): + config.return_value = 'dc-01' + environ.get.return_value = None + + self.assertEqual(ceph_hooks.az_info(), + ' row=dc-01') + + config.assert_called_with('availability_zone') + environ.get.assert_called_with('JUJU_AVAILABILITY_ZONE') + + @patch.object(ceph_hooks, 'log') + @patch.object(ceph_hooks, 'config') + @patch('os.environ') + def test_az_info_juju_az(self, environ, config, log): + config.return_value = 'dc-01' + environ.get.return_value = 'zone1' + + self.assertEqual(ceph_hooks.az_info(), + ' rack=zone1 row=dc-01') + + config.assert_called_with('availability_zone') + environ.get.assert_called_with('JUJU_AVAILABILITY_ZONE') + + @patch.object(ceph_hooks, 'log') + @patch.object(ceph_hooks, 'config') + @patch('os.environ') + def test_az_info_default_remap(self, environ, config, log): + config.return_value = 'default' + environ.get.return_value = 'default' + + self.assertEqual(ceph_hooks.az_info(), + ' rack=default-rack row=default-row') + + config.assert_called_with('availability_zone') + environ.get.assert_called_with('JUJU_AVAILABILITY_ZONE') From 295213d8c88c00029837a94b614fd51a02b78d4f Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 22 Nov 2017 18:23:21 +0000 Subject: [PATCH 1411/2699] Sync charm-helpers Change-Id: I2944c6ec19f795d52b28baf4a7f6afefdd6e07d4 --- .../charmhelpers/contrib/openstack/amulet/deployment.py | 6 +++++- ceph-mon/hooks/charmhelpers/contrib/openstack/context.py | 8 ++++---- ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py | 4 ++++ .../charmhelpers/contrib/openstack/amulet/deployment.py | 6 +++++- 4 files changed, 18 insertions(+), 6 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 13a12f62..e37f2834 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -270,7 +270,8 @@ def _get_openstack_release(self): (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike) = range(11) + self.xenial_pike, self.artful_pike, self.xenial_queens, + self.bionic_queens,) = range(13) releases = { ('trusty', None): self.trusty_icehouse, @@ -281,9 +282,11 @@ def _get_openstack_release(self): ('xenial', 'cloud:xenial-newton'): self.xenial_newton, ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, ('xenial', 'cloud:xenial-pike'): self.xenial_pike, + ('xenial', 'cloud:xenial-queens'): self.xenial_queens, ('yakkety', None): self.yakkety_newton, ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, + ('bionic', None): self.bionic_queens, } return releases[(self.series, self.openstack)] @@ -298,6 +301,7 @@ def _get_openstack_release_string(self): ('yakkety', 'newton'), ('zesty', 'ocata'), ('artful', 'pike'), + ('bionic', 'queens'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index ece75df8..e6c0e9fe 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -293,7 +293,7 @@ def __call__(self): def db_ssl(rdata, ctxt, ssl_dir): if 'ssl_ca' in rdata and ssl_dir: ca_path = os.path.join(ssl_dir, 'db-client.ca') - with open(ca_path, 'w') as fh: + with open(ca_path, 'wb') as fh: fh.write(b64decode(rdata['ssl_ca'])) ctxt['database_ssl_ca'] = ca_path @@ -308,12 +308,12 @@ def db_ssl(rdata, ctxt, ssl_dir): log("Waiting 1m for ssl client cert validity", level=INFO) time.sleep(60) - with open(cert_path, 'w') as fh: + with open(cert_path, 'wb') as fh: fh.write(b64decode(rdata['ssl_cert'])) ctxt['database_ssl_cert'] = cert_path key_path = os.path.join(ssl_dir, 'db-client.key') - with open(key_path, 'w') as fh: + with open(key_path, 'wb') as fh: fh.write(b64decode(rdata['ssl_key'])) ctxt['database_ssl_key'] = key_path @@ -459,7 +459,7 @@ def __call__(self): ca_path = os.path.join( self.ssl_dir, 'rabbit-client-ca.pem') - with open(ca_path, 'w') as fh: + with open(ca_path, 'wb') as fh: fh.write(b64decode(ctxt['rabbit_ssl_ca'])) ctxt['rabbit_ssl_ca'] = ca_path diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index a1267136..8a541d40 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -140,6 +140,7 @@ ('yakkety', 'newton'), ('zesty', 'ocata'), ('artful', 'pike'), + ('bionic', 'queens'), ]) @@ -157,6 +158,7 @@ ('2016.2', 'newton'), ('2017.1', 'ocata'), ('2017.2', 'pike'), + ('2018.1', 'queens'), ]) # The ugly duckling - must list releases oldest to newest @@ -187,6 +189,8 @@ ['2.11.0', '2.12.0', '2.13.0']), ('pike', ['2.13.0', '2.15.0']), + ('queens', + ['2.16.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 13a12f62..e37f2834 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -270,7 +270,8 @@ def _get_openstack_release(self): (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike) = range(11) + self.xenial_pike, self.artful_pike, self.xenial_queens, + self.bionic_queens,) = range(13) releases = { ('trusty', None): self.trusty_icehouse, @@ -281,9 +282,11 @@ def _get_openstack_release(self): ('xenial', 'cloud:xenial-newton'): self.xenial_newton, ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, ('xenial', 'cloud:xenial-pike'): self.xenial_pike, + ('xenial', 'cloud:xenial-queens'): self.xenial_queens, ('yakkety', None): self.yakkety_newton, ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, + ('bionic', None): self.bionic_queens, } return releases[(self.series, self.openstack)] @@ -298,6 +301,7 @@ def _get_openstack_release_string(self): ('yakkety', 'newton'), ('zesty', 'ocata'), ('artful', 'pike'), + ('bionic', 'queens'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] From f4b29bb01ce762070707020b4e29b495b0c33136 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 22 Nov 2017 18:23:30 +0000 Subject: [PATCH 1412/2699] Sync charm-helpers Change-Id: I7521ba297ab6a9d23246ca39b7cf508fd0b50f48 --- .../hooks/charmhelpers/contrib/hahelpers/apache.py | 2 +- .../hooks/charmhelpers/contrib/openstack/context.py | 8 ++++---- ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py | 6 ++++++ .../contrib/openstack/amulet/deployment.py | 10 +++++++--- 4 files changed, 18 insertions(+), 8 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py index d0c69942..22acb683 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -90,6 +90,6 @@ def install_ca_cert(ca_cert): log("CA cert is the same as installed version", level=INFO) else: log("Installing new CA cert", level=INFO) - with open(cert_file, 'w') as crt: + with open(cert_file, 'wb') as crt: crt.write(ca_cert) subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index ece75df8..e6c0e9fe 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -293,7 +293,7 @@ def __call__(self): def db_ssl(rdata, ctxt, ssl_dir): if 'ssl_ca' in rdata and ssl_dir: ca_path = os.path.join(ssl_dir, 'db-client.ca') - with open(ca_path, 'w') as fh: + with open(ca_path, 'wb') as fh: fh.write(b64decode(rdata['ssl_ca'])) ctxt['database_ssl_ca'] = ca_path @@ -308,12 +308,12 @@ def db_ssl(rdata, ctxt, ssl_dir): log("Waiting 1m for ssl client cert validity", level=INFO) time.sleep(60) - with open(cert_path, 'w') as fh: + with open(cert_path, 'wb') as fh: fh.write(b64decode(rdata['ssl_cert'])) ctxt['database_ssl_cert'] = cert_path key_path = os.path.join(ssl_dir, 'db-client.key') - with open(key_path, 'w') as fh: + with open(key_path, 'wb') as fh: fh.write(b64decode(rdata['ssl_key'])) ctxt['database_ssl_key'] = key_path @@ -459,7 +459,7 @@ def __call__(self): ca_path = os.path.join( self.ssl_dir, 'rabbit-client-ca.pem') - with open(ca_path, 'w') as fh: + with open(ca_path, 'wb') as fh: fh.write(b64decode(ctxt['rabbit_ssl_ca'])) ctxt['rabbit_ssl_ca'] = ca_path diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index b073c77b..8a541d40 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -140,6 +140,7 @@ ('yakkety', 'newton'), ('zesty', 'ocata'), ('artful', 'pike'), + ('bionic', 'queens'), ]) @@ -157,6 +158,7 @@ ('2016.2', 'newton'), ('2017.1', 'ocata'), ('2017.2', 'pike'), + ('2018.1', 'queens'), ]) # The ugly duckling - must list releases oldest to newest @@ -187,6 +189,8 @@ ['2.11.0', '2.12.0', '2.13.0']), ('pike', ['2.13.0', '2.15.0']), + ('queens', + ['2.16.0']), ]) # >= Liberty version->codename mapping @@ -412,6 +416,8 @@ def get_os_codename_package(package, fatal=True): cmd = ['snap', 'list', package] try: out = subprocess.check_output(cmd) + if six.PY3: + out = out.decode('UTF-8') except subprocess.CalledProcessError as e: return None lines = out.split('\n') diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 42d6b1f2..e37f2834 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -270,7 +270,8 @@ def _get_openstack_release(self): (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike) = range(11) + self.xenial_pike, self.artful_pike, self.xenial_queens, + self.bionic_queens,) = range(13) releases = { ('trusty', None): self.trusty_icehouse, @@ -281,9 +282,11 @@ def _get_openstack_release(self): ('xenial', 'cloud:xenial-newton'): self.xenial_newton, ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, ('xenial', 'cloud:xenial-pike'): self.xenial_pike, + ('xenial', 'cloud:xenial-queens'): self.xenial_queens, ('yakkety', None): self.yakkety_newton, ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, + ('bionic', None): self.bionic_queens, } return releases[(self.series, self.openstack)] @@ -298,6 +301,7 @@ def _get_openstack_release_string(self): ('yakkety', 'newton'), ('zesty', 'ocata'), ('artful', 'pike'), + ('bionic', 'queens'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] @@ -310,8 +314,8 @@ def get_ceph_expected_pools(self, radosgw=False): test scenario, based on OpenStack release and whether ceph radosgw is flagged as present or not.""" - if self._get_openstack_release() <= self.trusty_icehouse: - # Juno or earlier + if self._get_openstack_release() == self.trusty_icehouse: + # Icehouse pools = [ 'data', 'metadata', From 0c18329ac6668f8f038266edfef0ca2487686008 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 22 Nov 2017 18:23:45 +0000 Subject: [PATCH 1413/2699] Sync charm-helpers Change-Id: I6ff4a1b812057e3a4d67b992dd7b8e4582d8c12c --- .../charmhelpers/contrib/charmsupport/nrpe.py | 10 +- .../contrib/hardening/audits/apache.py | 4 +- .../hooks/charmhelpers/contrib/network/ip.py | 4 +- .../contrib/openstack/alternatives.py | 13 ++ .../charmhelpers/contrib/openstack/utils.py | 42 ++++--- .../charmhelpers/contrib/python/debug.py | 2 +- .../contrib/storage/linux/ceph.py | 42 +++++-- .../charmhelpers/contrib/storage/linux/lvm.py | 8 +- .../contrib/storage/linux/utils.py | 2 +- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 112 ++++++++++++++++-- ceph-proxy/hooks/charmhelpers/core/host.py | 73 +++++++++++- .../hooks/charmhelpers/core/strutils.py | 16 ++- .../hooks/charmhelpers/core/unitdata.py | 2 +- ceph-proxy/hooks/charmhelpers/fetch/snap.py | 16 +++ ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py | 2 +- .../contrib/openstack/amulet/deployment.py | 36 ++++-- .../contrib/openstack/amulet/utils.py | 36 ++++-- ceph-proxy/tests/charmhelpers/core/hookenv.py | 112 ++++++++++++++++-- ceph-proxy/tests/charmhelpers/core/host.py | 73 +++++++++++- .../tests/charmhelpers/core/strutils.py | 16 ++- .../tests/charmhelpers/core/unitdata.py | 2 +- 21 files changed, 537 insertions(+), 86 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 80d574dc..1c55b30f 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -30,6 +30,7 @@ from charmhelpers.core.hookenv import ( config, + hook_name, local_unit, log, relation_ids, @@ -285,7 +286,7 @@ def write(self): try: nagios_uid = pwd.getpwnam('nagios').pw_uid nagios_gid = grp.getgrnam('nagios').gr_gid - except: + except Exception: log("Nagios user not set up, nrpe checks not updated") return @@ -302,7 +303,12 @@ def write(self): "command": nrpecheck.command, } - service('restart', 'nagios-nrpe-server') + # update-status hooks are configured to firing every 5 minutes by + # default. When nagios-nrpe-server is restarted, the nagios server + # reports checks failing causing unneccessary alerts. Let's not restart + # on update-status hooks. + if not hook_name() == 'update-status': + service('restart', 'nagios-nrpe-server') monitor_ids = relation_ids("local-monitors") + \ relation_ids("nrpe-external-master") diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py index d812948a..d32bf44e 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -70,12 +70,12 @@ def _get_loaded_modules(): """Returns the modules which are enabled in Apache.""" output = subprocess.check_output(['apache2ctl', '-M']) modules = [] - for line in output.strip().split(): + for line in output.splitlines(): # Each line of the enabled module output looks like: # module_name (static|shared) # Plus a header line at the top of the output which is stripped # out by the regex. - matcher = re.search(r'^ (\S*)', line) + matcher = re.search(r'^ (\S*)_module (\S*)', line) if matcher: modules.append(matcher.group(1)) return modules diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index d7e6debf..a871ce37 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -490,7 +490,7 @@ def get_host_ip(hostname, fallback=None): if not ip_addr: try: ip_addr = socket.gethostbyname(hostname) - except: + except Exception: log("Failed to resolve hostname '%s'" % (hostname), level=WARNING) return fallback @@ -518,7 +518,7 @@ def get_hostname(address, fqdn=True): if not result: try: result = socket.gethostbyaddr(address)[0] - except: + except Exception: return None else: result = address diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py index 1501641e..547de09c 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -29,3 +29,16 @@ def install_alternative(name, target, source, priority=50): target, name, source, str(priority) ] subprocess.check_call(cmd) + + +def remove_alternative(name, source): + """Remove an installed alternative configuration file + + :param name: string name of the alternative to remove + :param source: string full path to alternative to remove + """ + cmd = [ + 'update-alternatives', '--remove', + name, source + ] + subprocess.check_call(cmd) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index 837a1674..8a541d40 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -95,7 +95,7 @@ from charmhelpers.fetch.snap import ( snap_install, snap_refresh, - SNAP_CHANNELS, + valid_snap_channel, ) from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk @@ -140,6 +140,7 @@ ('yakkety', 'newton'), ('zesty', 'ocata'), ('artful', 'pike'), + ('bionic', 'queens'), ]) @@ -157,6 +158,7 @@ ('2016.2', 'newton'), ('2017.1', 'ocata'), ('2017.2', 'pike'), + ('2018.1', 'queens'), ]) # The ugly duckling - must list releases oldest to newest @@ -187,6 +189,8 @@ ['2.11.0', '2.12.0', '2.13.0']), ('pike', ['2.13.0', '2.15.0']), + ('queens', + ['2.16.0']), ]) # >= Liberty version->codename mapping @@ -412,6 +416,8 @@ def get_os_codename_package(package, fatal=True): cmd = ['snap', 'list', package] try: out = subprocess.check_output(cmd) + if six.PY3: + out = out.decode('UTF-8') except subprocess.CalledProcessError as e: return None lines = out.split('\n') @@ -426,7 +432,7 @@ def get_os_codename_package(package, fatal=True): try: pkg = cache[package] - except: + except Exception: if not fatal: return None # the package is unknown to the current apt cache. @@ -579,6 +585,9 @@ def configure_installation_source(source_plus_key): Note that the behaviour on error is to log the error to the juju log and then call sys.exit(1). """ + if source_plus_key.startswith('snap'): + # Do nothing for snap installs + return # extract the key if there is one, denoted by a '|' in the rel source, key = get_source_and_pgp_key(source_plus_key) @@ -615,7 +624,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): juju_rc_path = "%s/%s" % (charm_dir(), script_path) if not os.path.exists(os.path.dirname(juju_rc_path)): os.mkdir(os.path.dirname(juju_rc_path)) - with open(juju_rc_path, 'wb') as rc_script: + with open(juju_rc_path, 'wt') as rc_script: rc_script.write( "#!/bin/bash\n") [rc_script.write('export %s=%s\n' % (u, p)) @@ -794,7 +803,7 @@ def git_default_repos(projects_yaml): service = service_name() core_project = service - for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): + for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES): if projects_yaml == default: # add the requirements repo first @@ -1615,7 +1624,7 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs): upgrade_callback(configs=configs) action_set({'outcome': 'success, upgrade completed.'}) ret = True - except: + except Exception: action_set({'outcome': 'upgrade failed, see traceback.'}) action_set({'traceback': traceback.format_exc()}) action_fail('do_openstack_upgrade resulted in an ' @@ -1720,7 +1729,7 @@ def is_unit_paused_set(): kv = t[0] # transform something truth-y into a Boolean. return not(not(kv.get('unit-paused'))) - except: + except Exception: return False @@ -2048,7 +2057,7 @@ def update_json_file(filename, items): def snap_install_requested(): """ Determine if installing from snaps - If openstack-origin is of the form snap:channel-series-release + If openstack-origin is of the form snap:track/channel[/branch] and channel is in SNAPS_CHANNELS return True. """ origin = config('openstack-origin') or "" @@ -2056,10 +2065,12 @@ def snap_install_requested(): return False _src = origin[5:] - channel, series, release = _src.split('-') - if channel.lower() in SNAP_CHANNELS: - return True - return False + if '/' in _src: + channel = _src.split('/')[1] + else: + # Handle snap:track with no channel + channel = 'stable' + return valid_snap_channel(channel) def get_snaps_install_info_from_origin(snaps, src, mode='classic'): @@ -2067,7 +2078,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'): @param snaps: List of snaps @param src: String of openstack-origin or source of the form - snap:channel-series-track + snap:track/channel @param mode: String classic, devmode or jailmode @returns: Dictionary of snaps with channels and modes """ @@ -2077,8 +2088,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'): return {} _src = src[5:] - _channel, _series, _release = _src.split('-') - channel = '--channel={}/{}'.format(_release, _channel) + channel = '--channel={}'.format(_src) return {snap: {'channel': channel, 'mode': mode} for snap in snaps} @@ -2090,8 +2100,8 @@ def install_os_snaps(snaps, refresh=False): @param snaps: Dictionary of snaps with channels and modes of the form: {'snap_name': {'channel': 'snap_channel', 'mode': 'snap_mode'}} - Where channel a snapstore channel and mode is --classic, --devmode or - --jailmode. + Where channel is a snapstore channel and mode is --classic, --devmode + or --jailmode. @param post_snap_install: Callback function to run after snaps have been installed """ diff --git a/ceph-proxy/hooks/charmhelpers/contrib/python/debug.py b/ceph-proxy/hooks/charmhelpers/contrib/python/debug.py index 7d04dfa5..d2142c75 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/python/debug.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/python/debug.py @@ -49,6 +49,6 @@ def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT): open_port(port) debugger = Rpdb(addr=addr, port=port) debugger.set_trace(sys._getframe().f_back) - except: + except Exception: _error("Cannot start a remote debug session on %s:%s" % (addr, port)) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index e5a01b1b..39231612 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -370,9 +370,10 @@ def get_mon_map(service): Also raises CalledProcessError if our ceph command fails """ try: - mon_status = check_output( - ['ceph', '--id', service, - 'mon_status', '--format=json']) + mon_status = check_output(['ceph', '--id', service, + 'mon_status', '--format=json']) + if six.PY3: + mon_status = mon_status.decode('UTF-8') try: return json.loads(mon_status) except ValueError as v: @@ -457,7 +458,7 @@ def monitor_key_get(service, key): try: output = check_output( ['ceph', '--id', service, - 'config-key', 'get', str(key)]) + 'config-key', 'get', str(key)]).decode('UTF-8') return output except CalledProcessError as e: log("Monitor config-key get failed with message: {}".format( @@ -500,6 +501,8 @@ def get_erasure_profile(service, name): out = check_output(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', name, '--format=json']) + if six.PY3: + out = out.decode('UTF-8') return json.loads(out) except (CalledProcessError, OSError, ValueError): return None @@ -686,7 +689,10 @@ def get_cache_mode(service, pool_name): """ validator(value=service, valid_type=six.string_types) validator(value=pool_name, valid_type=six.string_types) - out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) + out = check_output(['ceph', '--id', service, + 'osd', 'dump', '--format=json']) + if six.PY3: + out = out.decode('UTF-8') try: osd_json = json.loads(out) for pool in osd_json['pools']: @@ -700,8 +706,9 @@ def get_cache_mode(service, pool_name): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, - 'lspools']).decode('UTF-8') + out = check_output(['rados', '--id', service, 'lspools']) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -714,9 +721,12 @@ def get_osds(service): """ version = ceph_version() if version and version >= '0.56': - return json.loads(check_output(['ceph', '--id', service, - 'osd', 'ls', - '--format=json']).decode('UTF-8')) + out = check_output(['ceph', '--id', service, + 'osd', 'ls', + '--format=json']) + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) return None @@ -734,7 +744,9 @@ def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]).decode('UTF-8') + service, '--pool', pool]) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -859,7 +871,9 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']).decode('UTF-8') + out = check_output(['rbd', 'showmapped']) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -1018,7 +1032,9 @@ def ceph_version(): """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): cmd = ['ceph', '-v'] - output = check_output(cmd).decode('US-ASCII') + output = check_output(cmd) + if six.PY3: + output = output.decode('UTF-8') output = output.split() if len(output) > 3: return output[2] diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py index 4719f53c..7f2a0604 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -74,10 +74,10 @@ def list_lvm_volume_group(block_device): ''' vg = None pvd = check_output(['pvdisplay', block_device]).splitlines() - for l in pvd: - l = l.decode('UTF-8') - if l.strip().startswith('VG Name'): - vg = ' '.join(l.strip().split()[2:]) + for lvm in pvd: + lvm = lvm.decode('UTF-8') + if lvm.strip().startswith('VG Name'): + vg = ' '.join(lvm.strip().split()[2:]) return vg diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index 3dc0df68..c9428894 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -64,6 +64,6 @@ def is_device_mounted(device): ''' try: out = check_output(['lsblk', '-P', device]).decode('UTF-8') - except: + except Exception: return False return bool(re.search(r'MOUNTPOINT=".+"', out)) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 12f37b28..5a88f798 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -22,6 +22,7 @@ import copy from distutils.version import LooseVersion from functools import wraps +from collections import namedtuple import glob import os import json @@ -218,6 +219,8 @@ def principal_unit(): for rid in relation_ids(reltype): for unit in related_units(rid): md = _metadata_unit(unit) + if not md: + continue subordinate = md.pop('subordinate', None) if not subordinate: return unit @@ -511,7 +514,10 @@ def _metadata_unit(unit): """ basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') + if not os.path.exists(joineddir): + return None + with open(joineddir) as md: return yaml.safe_load(md) @@ -639,18 +645,31 @@ def is_relation_made(relation, keys='private-address'): return False +def _port_op(op_name, port, protocol="TCP"): + """Open or close a service network port""" + _args = [op_name] + icmp = protocol.upper() == "ICMP" + if icmp: + _args.append(protocol) + else: + _args.append('{}/{}'.format(port, protocol)) + try: + subprocess.check_call(_args) + except subprocess.CalledProcessError: + # Older Juju pre 2.3 doesn't support ICMP + # so treat it as a no-op if it fails. + if not icmp: + raise + + def open_port(port, protocol="TCP"): """Open a service network port""" - _args = ['open-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('open-port', port, protocol) def close_port(port, protocol="TCP"): """Close a service network port""" - _args = ['close-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('close-port', port, protocol) def open_ports(start, end, protocol="TCP"): @@ -667,6 +686,17 @@ def close_ports(start, end, protocol="TCP"): subprocess.check_call(_args) +def opened_ports(): + """Get the opened ports + + *Note that this will only show ports opened in a previous hook* + + :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` + """ + _args = ['opened-ports', '--format=json'] + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1077,6 +1107,35 @@ def network_get_primary_address(binding): return subprocess.check_output(cmd).decode('UTF-8').strip() +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get(endpoint, relation_id=None): + """ + Retrieve the network details for a relation endpoint + + :param endpoint: string. The name of a relation endpoint + :param relation_id: int. The ID of the relation for the current context. + :return: dict. The loaded YAML output of the network-get query. + :raise: NotImplementedError if run on Juju < 2.1 + """ + cmd = ['network-get', endpoint, '--format', 'yaml'] + if relation_id: + cmd.append('-r') + cmd.append(relation_id) + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + # Early versions of Juju 2.0.x required the --primary-address argument. + # We catch that condition here and raise NotImplementedError since + # the requested semantics are not available - the caller can then + # use the network_get_primary_address() method instead. + if '--primary-address is currently required' in e.output.decode('UTF-8'): + raise NotImplementedError + raise + return yaml.safe_load(response) + + def add_metric(*args, **kwargs): """Add metric values. Values may be expressed with keyword arguments. For metric names containing dashes, these may be expressed as one or more @@ -1106,3 +1165,42 @@ def meter_info(): """Get the meter status information, if running in the meter-status-changed hook.""" return os.environ.get('JUJU_METER_INFO') + + +def iter_units_for_relation_name(relation_name): + """Iterate through all units in a relation + + Generator that iterates through all the units in a relation and yields + a named tuple with rid and unit field names. + + Usage: + data = [(u.rid, u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param relation_name: string relation name + :yield: Named Tuple with rid and unit field names + """ + RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') + for rid in relation_ids(relation_name): + for unit in related_units(rid): + yield RelatedUnit(rid, unit) + + +def ingress_address(rid=None, unit=None): + """ + Retrieve the ingress-address from a relation when available. Otherwise, + return the private-address. This function is to be used on the consuming + side of the relation. + + Usage: + addresses = [ingress_address(rid=u.rid, unit=u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: string IP address + """ + settings = relation_get(rid=rid, unit=unit) + return (settings.get('ingress-address') or + settings.get('private-address')) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 5656e2f5..5cc5c86b 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log, DEBUG +from .hookenv import log, DEBUG, local_unit from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -441,6 +441,49 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) +def chage(username, lastday=None, expiredate=None, inactive=None, + mindays=None, maxdays=None, root=None, warndays=None): + """Change user password expiry information + + :param str username: User to update + :param str lastday: Set when password was changed in YYYY-MM-DD format + :param str expiredate: Set when user's account will no longer be + accessible in YYYY-MM-DD format. + -1 will remove an account expiration date. + :param str inactive: Set the number of days of inactivity after a password + has expired before the account is locked. + -1 will remove an account's inactivity. + :param str mindays: Set the minimum number of days between password + changes to MIN_DAYS. + 0 indicates the password can be changed anytime. + :param str maxdays: Set the maximum number of days during which a + password is valid. + -1 as MAX_DAYS will remove checking maxdays + :param str root: Apply changes in the CHROOT_DIR directory + :param str warndays: Set the number of days of warning before a password + change is required + :raises subprocess.CalledProcessError: if call to chage fails + """ + cmd = ['chage'] + if root: + cmd.extend(['--root', root]) + if lastday: + cmd.extend(['--lastday', lastday]) + if expiredate: + cmd.extend(['--expiredate', expiredate]) + if inactive: + cmd.extend(['--inactive', inactive]) + if mindays: + cmd.extend(['--mindays', mindays]) + if maxdays: + cmd.extend(['--maxdays', maxdays]) + if warndays: + cmd.extend(['--warndays', warndays]) + cmd.append(username) + subprocess.check_call(cmd) + +remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -946,3 +989,31 @@ def updatedb(updatedb_text, new_path): lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) output = "\n".join(lines) return output + + +def modulo_distribution(modulo=3, wait=30): + """ Modulo distribution + + This helper uses the unit number, a modulo value and a constant wait time + to produce a calculated wait time distribution. This is useful in large + scale deployments to distribute load during an expensive operation such as + service restarts. + + If you have 1000 nodes that need to restart 100 at a time 1 minute at a + time: + + time.wait(modulo_distribution(modulo=100, wait=60)) + restart() + + If you need restarts to happen serially set modulo to the exact number of + nodes and set a high constant wait time: + + time.wait(modulo_distribution(modulo=10, wait=120)) + restart() + + @param modulo: int The modulo number creates the group distribution + @param wait: int The constant time wait value + @return: int Calculated time to wait for unit operation + """ + unit_number = int(local_unit().split('/')[1]) + return (unit_number % modulo) * wait diff --git a/ceph-proxy/hooks/charmhelpers/core/strutils.py b/ceph-proxy/hooks/charmhelpers/core/strutils.py index 685dabde..e8df0452 100644 --- a/ceph-proxy/hooks/charmhelpers/core/strutils.py +++ b/ceph-proxy/hooks/charmhelpers/core/strutils.py @@ -61,13 +61,19 @@ def bytes_from_string(value): if isinstance(value, six.string_types): value = six.text_type(value) else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) + msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if not matches: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + if matches: + size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + else: + # Assume that value passed in is bytes + try: + size = int(value) + except ValueError: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return size class BasicStringComparator(object): diff --git a/ceph-proxy/hooks/charmhelpers/core/unitdata.py b/ceph-proxy/hooks/charmhelpers/core/unitdata.py index 54ec969f..7af875c2 100644 --- a/ceph-proxy/hooks/charmhelpers/core/unitdata.py +++ b/ceph-proxy/hooks/charmhelpers/core/unitdata.py @@ -358,7 +358,7 @@ def hook_scope(self, name=""): try: yield self.revision self.revision = None - except: + except Exception: self.flush(False) self.revision = None raise diff --git a/ceph-proxy/hooks/charmhelpers/fetch/snap.py b/ceph-proxy/hooks/charmhelpers/fetch/snap.py index 112a54c3..395836c7 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/snap.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/snap.py @@ -41,6 +41,10 @@ class CouldNotAcquireLockException(Exception): pass +class InvalidSnapChannel(Exception): + pass + + def _snap_exec(commands): """ Execute snap commands. @@ -132,3 +136,15 @@ def snap_refresh(packages, *flags): log(message, level='INFO') return _snap_exec(['refresh'] + flags + packages) + + +def valid_snap_channel(channel): + """ Validate snap channel exists + + :raises InvalidSnapChannel: When channel does not exist + :return: Boolean + """ + if channel.lower() in SNAP_CHANNELS: + return True + else: + raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel)) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py index 40e1cb5b..910e96a6 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py @@ -572,7 +572,7 @@ def get_upstream_version(package): cache = apt_cache() try: pkg = cache[package] - except: + except Exception: # the package is unknown to the current apt cache. return None diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 5c041d2c..e37f2834 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -250,7 +250,14 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, self.log.debug('Waiting up to {}s for extended status on services: ' '{}'.format(timeout, services)) service_messages = {service: message for service in services} + + # Check for idleness + self.d.sentry.wait() + # Check for error states and bail early + self.d.sentry.wait_for_status(self.d.juju_env, services) + # Check for ready messages self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') def _get_openstack_release(self): @@ -263,7 +270,8 @@ def _get_openstack_release(self): (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike) = range(11) + self.xenial_pike, self.artful_pike, self.xenial_queens, + self.bionic_queens,) = range(13) releases = { ('trusty', None): self.trusty_icehouse, @@ -274,9 +282,11 @@ def _get_openstack_release(self): ('xenial', 'cloud:xenial-newton'): self.xenial_newton, ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, ('xenial', 'cloud:xenial-pike'): self.xenial_pike, + ('xenial', 'cloud:xenial-queens'): self.xenial_queens, ('yakkety', None): self.yakkety_newton, ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, + ('bionic', None): self.bionic_queens, } return releases[(self.series, self.openstack)] @@ -291,6 +301,7 @@ def _get_openstack_release_string(self): ('yakkety', 'newton'), ('zesty', 'ocata'), ('artful', 'pike'), + ('bionic', 'queens'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] @@ -303,20 +314,27 @@ def get_ceph_expected_pools(self, radosgw=False): test scenario, based on OpenStack release and whether ceph radosgw is flagged as present or not.""" - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later + if self._get_openstack_release() == self.trusty_icehouse: + # Icehouse pools = [ + 'data', + 'metadata', 'rbd', - 'cinder', + 'cinder-ceph', 'glance' ] - else: - # Juno or earlier + elif (self.trusty_kilo <= self._get_openstack_release() <= + self.zesty_ocata): + # Kilo through Ocata pools = [ - 'data', - 'metadata', 'rbd', - 'cinder', + 'cinder-ceph', + 'glance' + ] + else: + # Pike and later + pools = [ + 'cinder-ceph', 'glance' ] diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index c8edbf65..b71b2b19 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -23,6 +23,7 @@ import urlparse import cinderclient.v1.client as cinder_client +import cinderclient.v2.client as cinder_clientv2 import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client @@ -42,7 +43,6 @@ from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) -from charmhelpers.core.decorators import retry_on_exception from charmhelpers.core.host import CompareHostReleases DEBUG = logging.DEBUG @@ -310,7 +310,6 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] - @retry_on_exception(5, base_delay=10) def keystone_wait_for_propagation(self, sentry_relation_pairs, api_version): """Iterate over list of sentry and relation tuples and verify that @@ -326,7 +325,7 @@ def keystone_wait_for_propagation(self, sentry_relation_pairs, rel = sentry.relation('identity-service', relation_name) self.log.debug('keystone relation data: {}'.format(rel)) - if rel['api_version'] != str(api_version): + if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." "".format(rel['api_version'], api_version)) @@ -348,15 +347,19 @@ def keystone_configure_api_version(self, sentry_relation_pairs, deployment, config = {'preferred-api-version': api_version} deployment.d.configure('keystone', config) + deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant): + password, tenant, api_version=2): """Authenticates admin user with cinder.""" # NOTE(beisner): cinder python client doesn't accept tokens. keystone_ip = keystone_sentry.info['public-address'] ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) - return cinder_client.Client(username, password, tenant, ept) + _clients = { + 1: cinder_client.Client, + 2: cinder_clientv2.Client} + return _clients[api_version](username, password, tenant, ept) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -617,13 +620,25 @@ def create_or_get_keypair(self, nova, keypair_name="testkey"): self.log.debug('Keypair ({}) already exists, ' 'using it.'.format(keypair_name)) return _keypair - except: + except Exception: self.log.debug('Keypair ({}) does not exist, ' 'creating it.'.format(keypair_name)) _keypair = nova.keypairs.create(name=keypair_name) return _keypair + def _get_cinder_obj_name(self, cinder_object): + """Retrieve name of cinder object. + + :param cinder_object: cinder snapshot or volume object + :returns: str cinder object name + """ + # v1 objects store name in 'display_name' attr but v2+ use 'name' + try: + return cinder_object.display_name + except AttributeError: + return cinder_object.name + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, img_id=None, src_vol_id=None, snap_id=None): """Create cinder volume, optionally from a glance image, OR @@ -674,6 +689,13 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, source_volid=src_vol_id, snapshot_id=snap_id) vol_id = vol_new.id + except TypeError: + vol_new = cinder.volumes.create(name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id except Exception as e: msg = 'Failed to create volume: {}'.format(e) amulet.raise_status(amulet.FAIL, msg=msg) @@ -688,7 +710,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, # Re-validate new volume self.log.debug('Validating volume attributes...') - val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) val_vol_boot = cinder.volumes.get(vol_id).bootable val_vol_stat = cinder.volumes.get(vol_id).status val_vol_size = cinder.volumes.get(vol_id).size diff --git a/ceph-proxy/tests/charmhelpers/core/hookenv.py b/ceph-proxy/tests/charmhelpers/core/hookenv.py index 12f37b28..5a88f798 100644 --- a/ceph-proxy/tests/charmhelpers/core/hookenv.py +++ b/ceph-proxy/tests/charmhelpers/core/hookenv.py @@ -22,6 +22,7 @@ import copy from distutils.version import LooseVersion from functools import wraps +from collections import namedtuple import glob import os import json @@ -218,6 +219,8 @@ def principal_unit(): for rid in relation_ids(reltype): for unit in related_units(rid): md = _metadata_unit(unit) + if not md: + continue subordinate = md.pop('subordinate', None) if not subordinate: return unit @@ -511,7 +514,10 @@ def _metadata_unit(unit): """ basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') + if not os.path.exists(joineddir): + return None + with open(joineddir) as md: return yaml.safe_load(md) @@ -639,18 +645,31 @@ def is_relation_made(relation, keys='private-address'): return False +def _port_op(op_name, port, protocol="TCP"): + """Open or close a service network port""" + _args = [op_name] + icmp = protocol.upper() == "ICMP" + if icmp: + _args.append(protocol) + else: + _args.append('{}/{}'.format(port, protocol)) + try: + subprocess.check_call(_args) + except subprocess.CalledProcessError: + # Older Juju pre 2.3 doesn't support ICMP + # so treat it as a no-op if it fails. + if not icmp: + raise + + def open_port(port, protocol="TCP"): """Open a service network port""" - _args = ['open-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('open-port', port, protocol) def close_port(port, protocol="TCP"): """Close a service network port""" - _args = ['close-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('close-port', port, protocol) def open_ports(start, end, protocol="TCP"): @@ -667,6 +686,17 @@ def close_ports(start, end, protocol="TCP"): subprocess.check_call(_args) +def opened_ports(): + """Get the opened ports + + *Note that this will only show ports opened in a previous hook* + + :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` + """ + _args = ['opened-ports', '--format=json'] + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1077,6 +1107,35 @@ def network_get_primary_address(binding): return subprocess.check_output(cmd).decode('UTF-8').strip() +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get(endpoint, relation_id=None): + """ + Retrieve the network details for a relation endpoint + + :param endpoint: string. The name of a relation endpoint + :param relation_id: int. The ID of the relation for the current context. + :return: dict. The loaded YAML output of the network-get query. + :raise: NotImplementedError if run on Juju < 2.1 + """ + cmd = ['network-get', endpoint, '--format', 'yaml'] + if relation_id: + cmd.append('-r') + cmd.append(relation_id) + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + # Early versions of Juju 2.0.x required the --primary-address argument. + # We catch that condition here and raise NotImplementedError since + # the requested semantics are not available - the caller can then + # use the network_get_primary_address() method instead. + if '--primary-address is currently required' in e.output.decode('UTF-8'): + raise NotImplementedError + raise + return yaml.safe_load(response) + + def add_metric(*args, **kwargs): """Add metric values. Values may be expressed with keyword arguments. For metric names containing dashes, these may be expressed as one or more @@ -1106,3 +1165,42 @@ def meter_info(): """Get the meter status information, if running in the meter-status-changed hook.""" return os.environ.get('JUJU_METER_INFO') + + +def iter_units_for_relation_name(relation_name): + """Iterate through all units in a relation + + Generator that iterates through all the units in a relation and yields + a named tuple with rid and unit field names. + + Usage: + data = [(u.rid, u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param relation_name: string relation name + :yield: Named Tuple with rid and unit field names + """ + RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') + for rid in relation_ids(relation_name): + for unit in related_units(rid): + yield RelatedUnit(rid, unit) + + +def ingress_address(rid=None, unit=None): + """ + Retrieve the ingress-address from a relation when available. Otherwise, + return the private-address. This function is to be used on the consuming + side of the relation. + + Usage: + addresses = [ingress_address(rid=u.rid, unit=u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: string IP address + """ + settings = relation_get(rid=rid, unit=unit) + return (settings.get('ingress-address') or + settings.get('private-address')) diff --git a/ceph-proxy/tests/charmhelpers/core/host.py b/ceph-proxy/tests/charmhelpers/core/host.py index 5656e2f5..5cc5c86b 100644 --- a/ceph-proxy/tests/charmhelpers/core/host.py +++ b/ceph-proxy/tests/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log, DEBUG +from .hookenv import log, DEBUG, local_unit from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -441,6 +441,49 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) +def chage(username, lastday=None, expiredate=None, inactive=None, + mindays=None, maxdays=None, root=None, warndays=None): + """Change user password expiry information + + :param str username: User to update + :param str lastday: Set when password was changed in YYYY-MM-DD format + :param str expiredate: Set when user's account will no longer be + accessible in YYYY-MM-DD format. + -1 will remove an account expiration date. + :param str inactive: Set the number of days of inactivity after a password + has expired before the account is locked. + -1 will remove an account's inactivity. + :param str mindays: Set the minimum number of days between password + changes to MIN_DAYS. + 0 indicates the password can be changed anytime. + :param str maxdays: Set the maximum number of days during which a + password is valid. + -1 as MAX_DAYS will remove checking maxdays + :param str root: Apply changes in the CHROOT_DIR directory + :param str warndays: Set the number of days of warning before a password + change is required + :raises subprocess.CalledProcessError: if call to chage fails + """ + cmd = ['chage'] + if root: + cmd.extend(['--root', root]) + if lastday: + cmd.extend(['--lastday', lastday]) + if expiredate: + cmd.extend(['--expiredate', expiredate]) + if inactive: + cmd.extend(['--inactive', inactive]) + if mindays: + cmd.extend(['--mindays', mindays]) + if maxdays: + cmd.extend(['--maxdays', maxdays]) + if warndays: + cmd.extend(['--warndays', warndays]) + cmd.append(username) + subprocess.check_call(cmd) + +remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -946,3 +989,31 @@ def updatedb(updatedb_text, new_path): lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) output = "\n".join(lines) return output + + +def modulo_distribution(modulo=3, wait=30): + """ Modulo distribution + + This helper uses the unit number, a modulo value and a constant wait time + to produce a calculated wait time distribution. This is useful in large + scale deployments to distribute load during an expensive operation such as + service restarts. + + If you have 1000 nodes that need to restart 100 at a time 1 minute at a + time: + + time.wait(modulo_distribution(modulo=100, wait=60)) + restart() + + If you need restarts to happen serially set modulo to the exact number of + nodes and set a high constant wait time: + + time.wait(modulo_distribution(modulo=10, wait=120)) + restart() + + @param modulo: int The modulo number creates the group distribution + @param wait: int The constant time wait value + @return: int Calculated time to wait for unit operation + """ + unit_number = int(local_unit().split('/')[1]) + return (unit_number % modulo) * wait diff --git a/ceph-proxy/tests/charmhelpers/core/strutils.py b/ceph-proxy/tests/charmhelpers/core/strutils.py index 685dabde..e8df0452 100644 --- a/ceph-proxy/tests/charmhelpers/core/strutils.py +++ b/ceph-proxy/tests/charmhelpers/core/strutils.py @@ -61,13 +61,19 @@ def bytes_from_string(value): if isinstance(value, six.string_types): value = six.text_type(value) else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) + msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if not matches: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + if matches: + size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + else: + # Assume that value passed in is bytes + try: + size = int(value) + except ValueError: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return size class BasicStringComparator(object): diff --git a/ceph-proxy/tests/charmhelpers/core/unitdata.py b/ceph-proxy/tests/charmhelpers/core/unitdata.py index 54ec969f..7af875c2 100644 --- a/ceph-proxy/tests/charmhelpers/core/unitdata.py +++ b/ceph-proxy/tests/charmhelpers/core/unitdata.py @@ -358,7 +358,7 @@ def hook_scope(self, name=""): try: yield self.revision self.revision = None - except: + except Exception: self.flush(False) self.revision = None raise From 991cf03f84a677c0ab1cb1f2cee88d95bb93e1a5 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 22 Nov 2017 18:23:55 +0000 Subject: [PATCH 1414/2699] Sync charm-helpers Change-Id: I0d4bd7203c9ce52f8d913f78a18ee7ac5ee8b518 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 10 +- .../charmhelpers/contrib/hahelpers/apache.py | 2 +- .../charmhelpers/contrib/hahelpers/cluster.py | 30 +++++ .../contrib/hardening/audits/apache.py | 4 +- .../hooks/charmhelpers/contrib/network/ip.py | 4 +- .../contrib/openstack/alternatives.py | 13 ++ .../contrib/openstack/amulet/deployment.py | 36 ++++-- .../contrib/openstack/amulet/utils.py | 36 ++++-- .../charmhelpers/contrib/openstack/context.py | 48 ++++---- .../contrib/openstack/files/check_haproxy.sh | 2 +- .../contrib/openstack/ha/utils.py | 11 +- .../charmhelpers/contrib/openstack/neutron.py | 61 ++-------- .../contrib/openstack/templates/ceph.conf | 4 +- .../contrib/openstack/templates/haproxy.cfg | 2 + .../openstack/templates/section-oslo-cache | 6 + .../contrib/openstack/templating.py | 2 + .../charmhelpers/contrib/openstack/utils.py | 42 ++++--- .../contrib/storage/linux/ceph.py | 42 +++++-- .../charmhelpers/contrib/storage/linux/lvm.py | 8 +- .../contrib/storage/linux/utils.py | 2 +- .../hooks/charmhelpers/core/hookenv.py | 112 ++++++++++++++++-- ceph-radosgw/hooks/charmhelpers/core/host.py | 73 +++++++++++- .../hooks/charmhelpers/core/strutils.py | 16 ++- .../hooks/charmhelpers/core/unitdata.py | 2 +- ceph-radosgw/hooks/charmhelpers/fetch/snap.py | 16 +++ .../hooks/charmhelpers/fetch/ubuntu.py | 2 +- .../contrib/openstack/amulet/deployment.py | 32 +++-- .../contrib/openstack/amulet/utils.py | 36 ++++-- .../tests/charmhelpers/core/hookenv.py | 112 ++++++++++++++++-- ceph-radosgw/tests/charmhelpers/core/host.py | 73 +++++++++++- .../tests/charmhelpers/core/strutils.py | 16 ++- .../tests/charmhelpers/core/unitdata.py | 2 +- 32 files changed, 675 insertions(+), 182 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 80d574dc..1c55b30f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -30,6 +30,7 @@ from charmhelpers.core.hookenv import ( config, + hook_name, local_unit, log, relation_ids, @@ -285,7 +286,7 @@ def write(self): try: nagios_uid = pwd.getpwnam('nagios').pw_uid nagios_gid = grp.getgrnam('nagios').gr_gid - except: + except Exception: log("Nagios user not set up, nrpe checks not updated") return @@ -302,7 +303,12 @@ def write(self): "command": nrpecheck.command, } - service('restart', 'nagios-nrpe-server') + # update-status hooks are configured to firing every 5 minutes by + # default. When nagios-nrpe-server is restarted, the nagios server + # reports checks failing causing unneccessary alerts. Let's not restart + # on update-status hooks. + if not hook_name() == 'update-status': + service('restart', 'nagios-nrpe-server') monitor_ids = relation_ids("local-monitors") + \ relation_ids("nrpe-external-master") diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py index d0c69942..22acb683 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -90,6 +90,6 @@ def install_ca_cert(ca_cert): log("CA cert is the same as installed version", level=INFO) else: log("Installing new CA cert", level=INFO) - with open(cert_file, 'w') as crt: + with open(cert_file, 'wb') as crt: crt.write(ca_cert) subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index e02350e0..4207e42c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -27,6 +27,7 @@ import subprocess import os +import time from socket import gethostname as get_unit_hostname @@ -45,6 +46,9 @@ is_leader as juju_is_leader, status_set, ) +from charmhelpers.core.host import ( + modulo_distribution, +) from charmhelpers.core.decorators import ( retry_on_exception, ) @@ -361,3 +365,29 @@ def canonical_url(configs, vip_setting='vip'): else: addr = unit_get('private-address') return '%s://%s' % (scheme, addr) + + +def distributed_wait(modulo=None, wait=None, operation_name='operation'): + ''' Distribute operations by waiting based on modulo_distribution + + If modulo and or wait are not set, check config_get for those values. + + :param modulo: int The modulo number creates the group distribution + :param wait: int The constant time wait value + :param operation_name: string Operation name for status message + i.e. 'restart' + :side effect: Calls config_get() + :side effect: Calls log() + :side effect: Calls status_set() + :side effect: Calls time.sleep() + ''' + if modulo is None: + modulo = config_get('modulo-nodes') + if wait is None: + wait = config_get('known-wait') + calculated_wait = modulo_distribution(modulo=modulo, wait=wait) + msg = "Waiting {} seconds for {} ...".format(calculated_wait, + operation_name) + log(msg, DEBUG) + status_set('maintenance', msg) + time.sleep(calculated_wait) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py index d812948a..d32bf44e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -70,12 +70,12 @@ def _get_loaded_modules(): """Returns the modules which are enabled in Apache.""" output = subprocess.check_output(['apache2ctl', '-M']) modules = [] - for line in output.strip().split(): + for line in output.splitlines(): # Each line of the enabled module output looks like: # module_name (static|shared) # Plus a header line at the top of the output which is stripped # out by the regex. - matcher = re.search(r'^ (\S*)', line) + matcher = re.search(r'^ (\S*)_module (\S*)', line) if matcher: modules.append(matcher.group(1)) return modules diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index d7e6debf..a871ce37 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -490,7 +490,7 @@ def get_host_ip(hostname, fallback=None): if not ip_addr: try: ip_addr = socket.gethostbyname(hostname) - except: + except Exception: log("Failed to resolve hostname '%s'" % (hostname), level=WARNING) return fallback @@ -518,7 +518,7 @@ def get_hostname(address, fqdn=True): if not result: try: result = socket.gethostbyaddr(address)[0] - except: + except Exception: return None else: result = address diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py index 1501641e..547de09c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -29,3 +29,16 @@ def install_alternative(name, target, source, priority=50): target, name, source, str(priority) ] subprocess.check_call(cmd) + + +def remove_alternative(name, source): + """Remove an installed alternative configuration file + + :param name: string name of the alternative to remove + :param source: string full path to alternative to remove + """ + cmd = [ + 'update-alternatives', '--remove', + name, source + ] + subprocess.check_call(cmd) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 5c041d2c..e37f2834 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -250,7 +250,14 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, self.log.debug('Waiting up to {}s for extended status on services: ' '{}'.format(timeout, services)) service_messages = {service: message for service in services} + + # Check for idleness + self.d.sentry.wait() + # Check for error states and bail early + self.d.sentry.wait_for_status(self.d.juju_env, services) + # Check for ready messages self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') def _get_openstack_release(self): @@ -263,7 +270,8 @@ def _get_openstack_release(self): (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike) = range(11) + self.xenial_pike, self.artful_pike, self.xenial_queens, + self.bionic_queens,) = range(13) releases = { ('trusty', None): self.trusty_icehouse, @@ -274,9 +282,11 @@ def _get_openstack_release(self): ('xenial', 'cloud:xenial-newton'): self.xenial_newton, ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, ('xenial', 'cloud:xenial-pike'): self.xenial_pike, + ('xenial', 'cloud:xenial-queens'): self.xenial_queens, ('yakkety', None): self.yakkety_newton, ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, + ('bionic', None): self.bionic_queens, } return releases[(self.series, self.openstack)] @@ -291,6 +301,7 @@ def _get_openstack_release_string(self): ('yakkety', 'newton'), ('zesty', 'ocata'), ('artful', 'pike'), + ('bionic', 'queens'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] @@ -303,20 +314,27 @@ def get_ceph_expected_pools(self, radosgw=False): test scenario, based on OpenStack release and whether ceph radosgw is flagged as present or not.""" - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later + if self._get_openstack_release() == self.trusty_icehouse: + # Icehouse pools = [ + 'data', + 'metadata', 'rbd', - 'cinder', + 'cinder-ceph', 'glance' ] - else: - # Juno or earlier + elif (self.trusty_kilo <= self._get_openstack_release() <= + self.zesty_ocata): + # Kilo through Ocata pools = [ - 'data', - 'metadata', 'rbd', - 'cinder', + 'cinder-ceph', + 'glance' + ] + else: + # Pike and later + pools = [ + 'cinder-ceph', 'glance' ] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index c8edbf65..b71b2b19 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -23,6 +23,7 @@ import urlparse import cinderclient.v1.client as cinder_client +import cinderclient.v2.client as cinder_clientv2 import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client @@ -42,7 +43,6 @@ from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) -from charmhelpers.core.decorators import retry_on_exception from charmhelpers.core.host import CompareHostReleases DEBUG = logging.DEBUG @@ -310,7 +310,6 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] - @retry_on_exception(5, base_delay=10) def keystone_wait_for_propagation(self, sentry_relation_pairs, api_version): """Iterate over list of sentry and relation tuples and verify that @@ -326,7 +325,7 @@ def keystone_wait_for_propagation(self, sentry_relation_pairs, rel = sentry.relation('identity-service', relation_name) self.log.debug('keystone relation data: {}'.format(rel)) - if rel['api_version'] != str(api_version): + if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." "".format(rel['api_version'], api_version)) @@ -348,15 +347,19 @@ def keystone_configure_api_version(self, sentry_relation_pairs, deployment, config = {'preferred-api-version': api_version} deployment.d.configure('keystone', config) + deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant): + password, tenant, api_version=2): """Authenticates admin user with cinder.""" # NOTE(beisner): cinder python client doesn't accept tokens. keystone_ip = keystone_sentry.info['public-address'] ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) - return cinder_client.Client(username, password, tenant, ept) + _clients = { + 1: cinder_client.Client, + 2: cinder_clientv2.Client} + return _clients[api_version](username, password, tenant, ept) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -617,13 +620,25 @@ def create_or_get_keypair(self, nova, keypair_name="testkey"): self.log.debug('Keypair ({}) already exists, ' 'using it.'.format(keypair_name)) return _keypair - except: + except Exception: self.log.debug('Keypair ({}) does not exist, ' 'creating it.'.format(keypair_name)) _keypair = nova.keypairs.create(name=keypair_name) return _keypair + def _get_cinder_obj_name(self, cinder_object): + """Retrieve name of cinder object. + + :param cinder_object: cinder snapshot or volume object + :returns: str cinder object name + """ + # v1 objects store name in 'display_name' attr but v2+ use 'name' + try: + return cinder_object.display_name + except AttributeError: + return cinder_object.name + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, img_id=None, src_vol_id=None, snap_id=None): """Create cinder volume, optionally from a glance image, OR @@ -674,6 +689,13 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, source_volid=src_vol_id, snapshot_id=snap_id) vol_id = vol_new.id + except TypeError: + vol_new = cinder.volumes.create(name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id except Exception as e: msg = 'Failed to create volume: {}'.format(e) amulet.raise_status(amulet.FAIL, msg=msg) @@ -688,7 +710,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, # Re-validate new volume self.log.debug('Validating volume attributes...') - val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) val_vol_boot = cinder.volumes.get(vol_id).bootable val_vol_stat = cinder.volumes.get(vol_id).status val_vol_size = cinder.volumes.get(vol_id).size diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index f67f3265..e6c0e9fe 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections import glob import json import math @@ -292,7 +293,7 @@ def __call__(self): def db_ssl(rdata, ctxt, ssl_dir): if 'ssl_ca' in rdata and ssl_dir: ca_path = os.path.join(ssl_dir, 'db-client.ca') - with open(ca_path, 'w') as fh: + with open(ca_path, 'wb') as fh: fh.write(b64decode(rdata['ssl_ca'])) ctxt['database_ssl_ca'] = ca_path @@ -307,12 +308,12 @@ def db_ssl(rdata, ctxt, ssl_dir): log("Waiting 1m for ssl client cert validity", level=INFO) time.sleep(60) - with open(cert_path, 'w') as fh: + with open(cert_path, 'wb') as fh: fh.write(b64decode(rdata['ssl_cert'])) ctxt['database_ssl_cert'] = cert_path key_path = os.path.join(ssl_dir, 'db-client.key') - with open(key_path, 'w') as fh: + with open(key_path, 'wb') as fh: fh.write(b64decode(rdata['ssl_key'])) ctxt['database_ssl_key'] = key_path @@ -458,7 +459,7 @@ def __call__(self): ca_path = os.path.join( self.ssl_dir, 'rabbit-client-ca.pem') - with open(ca_path, 'w') as fh: + with open(ca_path, 'wb') as fh: fh.write(b64decode(ctxt['rabbit_ssl_ca'])) ctxt['rabbit_ssl_ca'] = ca_path @@ -578,11 +579,14 @@ def __call__(self): laddr = get_address_in_network(config(cfg_opt)) if laddr: netmask = get_netmask_for_address(laddr) - cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, - netmask), - 'backends': {l_unit: laddr}} + cluster_hosts[laddr] = { + 'network': "{}/{}".format(laddr, + netmask), + 'backends': collections.OrderedDict([(l_unit, + laddr)]) + } for rid in relation_ids('cluster'): - for unit in related_units(rid): + for unit in sorted(related_units(rid)): _laddr = relation_get('{}-address'.format(addr_type), rid=rid, unit=unit) if _laddr: @@ -594,10 +598,13 @@ def __call__(self): # match in the frontend cluster_hosts[addr] = {} netmask = get_netmask_for_address(addr) - cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), - 'backends': {l_unit: addr}} + cluster_hosts[addr] = { + 'network': "{}/{}".format(addr, netmask), + 'backends': collections.OrderedDict([(l_unit, + addr)]) + } for rid in relation_ids('cluster'): - for unit in related_units(rid): + for unit in sorted(related_units(rid)): _laddr = relation_get('private-address', rid=rid, unit=unit) if _laddr: @@ -628,6 +635,8 @@ def __call__(self): ctxt['local_host'] = '127.0.0.1' ctxt['haproxy_host'] = '0.0.0.0' + ctxt['ipv6_enabled'] = not is_ipv6_disabled() + ctxt['stat_port'] = '8888' db = kv() @@ -802,8 +811,9 @@ def __call__(self): else: # Expect cert/key provided in config (currently assumed that ca # uses ip for cn) - cn = resolve_address(endpoint_type=INTERNAL) - self.configure_cert(cn) + for net_type in (INTERNAL, ADMIN, PUBLIC): + cn = resolve_address(endpoint_type=net_type) + self.configure_cert(cn) addresses = self.get_network_addresses() for address, endpoint in addresses: @@ -843,15 +853,6 @@ def _ensure_packages(self): for pkgs in self.packages: ensure_packages(pkgs) - def _save_flag_file(self): - if self.network_manager == 'quantum': - _file = '/etc/nova/quantum_plugin.conf' - else: - _file = '/etc/nova/neutron_plugin.conf' - - with open(_file, 'wb') as out: - out.write(self.plugin + '\n') - def ovs_ctxt(self): driver = neutron_plugin_attribute(self.plugin, 'driver', self.network_manager) @@ -996,7 +997,6 @@ def __call__(self): flags = config_flags_parser(alchemy_flags) ctxt['neutron_alchemy_flags'] = flags - self._save_flag_file() return ctxt @@ -1176,7 +1176,7 @@ def __call__(self): if sub_config and sub_config != '': try: sub_config = json.loads(sub_config) - except: + except Exception: log('Could not parse JSON from ' 'subordinate_configuration setting from %s' % rid, level=ERROR) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh index 0df07176..7aab129a 100755 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh @@ -9,7 +9,7 @@ CRITICAL=0 NOTACTIVE='' LOGFILE=/var/log/nagios/check_haproxy.log -AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}') +AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $4}') typeset -i N_INSTANCES=0 for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py index 254a90e7..9a4d79c1 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -82,15 +82,18 @@ def update_dns_ha_resource_params(resources, resource_params, continue m = re.search('os-(.+?)-hostname', setting) if m: - networkspace = m.group(1) + endpoint_type = m.group(1) + # resolve_address's ADDRESS_MAP uses 'int' not 'internal' + if endpoint_type == 'internal': + endpoint_type = 'int' else: msg = ('Unexpected DNS hostname setting: {}. ' - 'Cannot determine network space name' + 'Cannot determine endpoint_type name' ''.format(setting)) status_set('blocked', msg) raise DNSHAException(msg) - hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace) + hostname_key = 'res_{}_{}_hostname'.format(charm_name(), endpoint_type) if hostname_key in hostname_group: log('DNS HA: Resource {}: {} already exists in ' 'hostname group - skipping'.format(hostname_key, hostname), @@ -101,7 +104,7 @@ def update_dns_ha_resource_params(resources, resource_params, resources[hostname_key] = crm_ocf resource_params[hostname_key] = ( 'params fqdn="{}" ip_address="{}" ' - ''.format(hostname, resolve_address(endpoint_type=networkspace, + ''.format(hostname, resolve_address(endpoint_type=endpoint_type, override=False))) if len(hostname_group) >= 1: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index 37fa0eb0..0f847f56 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -59,18 +59,13 @@ def determine_dkms_package(): def quantum_plugins(): - from charmhelpers.contrib.openstack import context return { 'ovs': { 'config': '/etc/quantum/plugins/openvswitch/' 'ovs_quantum_plugin.ini', 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' 'OVSQuantumPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=QUANTUM_CONF_DIR)], + 'contexts': [], 'services': ['quantum-plugin-openvswitch-agent'], 'packages': [determine_dkms_package(), ['quantum-plugin-openvswitch-agent']], @@ -82,11 +77,7 @@ def quantum_plugins(): 'config': '/etc/quantum/plugins/nicira/nvp.ini', 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' 'QuantumPlugin.NvpPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=QUANTUM_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['quantum-server', @@ -100,7 +91,6 @@ def quantum_plugins(): def neutron_plugins(): - from charmhelpers.contrib.openstack import context release = os_release('nova-common') plugins = { 'ovs': { @@ -108,11 +98,7 @@ def neutron_plugins(): 'ovs_neutron_plugin.ini', 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' 'OVSNeutronPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': ['neutron-plugin-openvswitch-agent'], 'packages': [determine_dkms_package(), ['neutron-plugin-openvswitch-agent']], @@ -124,11 +110,7 @@ def neutron_plugins(): 'config': '/etc/neutron/plugins/nicira/nvp.ini', 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' 'NeutronPlugin.NvpPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['neutron-server', @@ -138,11 +120,7 @@ def neutron_plugins(): 'nsx': { 'config': '/etc/neutron/plugins/vmware/nsx.ini', 'driver': 'vmware', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['neutron-server', @@ -152,11 +130,7 @@ def neutron_plugins(): 'n1kv': { 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [determine_dkms_package(), ['neutron-plugin-cisco']], @@ -167,11 +141,7 @@ def neutron_plugins(): 'Calico': { 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': ['calico-felix', 'bird', 'neutron-dhcp-agent', @@ -189,11 +159,7 @@ def neutron_plugins(): 'vsp': { 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], @@ -203,10 +169,7 @@ def neutron_plugins(): 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin' '.plumgrid_plugin.NeutronPluginPLUMgridV2'), - 'contexts': [ - context.SharedDBContext(user=config('database-user'), - database=config('database'), - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': ['plumgrid-lxc', 'iovisor-dkms'], @@ -217,11 +180,7 @@ def neutron_plugins(): 'midonet': { 'config': '/etc/neutron/plugins/midonet/midonet.ini', 'driver': 'midonet.neutron.plugin.MidonetPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [determine_dkms_package()], 'server_packages': ['neutron-server', diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf index ed5c4f10..a11ce8ab 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf @@ -18,7 +18,7 @@ rbd default features = {{ rbd_features }} [client] {% if rbd_client_cache_settings -%} -{% for key, value in rbd_client_cache_settings.iteritems() -%} +{% for key, value in rbd_client_cache_settings.items() -%} {{ key }} = {{ value }} {% endfor -%} -{%- endif %} \ No newline at end of file +{%- endif %} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 2e660450..ebc8a68a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -48,7 +48,9 @@ listen stats {% for service, ports in service_ports.items() -%} frontend tcp-in_{{ service }} bind *:{{ ports[0] }} + {% if ipv6_enabled -%} bind :::{{ ports[0] }} + {% endif -%} {% for frontend in frontends -%} acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache new file mode 100644 index 00000000..e056a32a --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache @@ -0,0 +1,6 @@ +[cache] +{% if memcache_url %} +enabled = true +backend = oslo_cache.memcache_pool +memcache_servers = {{ memcache_url }} +{% endif %} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py index d8c1fc7f..77490e4d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py @@ -272,6 +272,8 @@ def write(self, config_file): raise OSConfigException _out = self.render(config_file) + if six.PY3: + _out = _out.encode('UTF-8') with open(config_file, 'wb') as out: out.write(_out) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 837a1674..8a541d40 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -95,7 +95,7 @@ from charmhelpers.fetch.snap import ( snap_install, snap_refresh, - SNAP_CHANNELS, + valid_snap_channel, ) from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk @@ -140,6 +140,7 @@ ('yakkety', 'newton'), ('zesty', 'ocata'), ('artful', 'pike'), + ('bionic', 'queens'), ]) @@ -157,6 +158,7 @@ ('2016.2', 'newton'), ('2017.1', 'ocata'), ('2017.2', 'pike'), + ('2018.1', 'queens'), ]) # The ugly duckling - must list releases oldest to newest @@ -187,6 +189,8 @@ ['2.11.0', '2.12.0', '2.13.0']), ('pike', ['2.13.0', '2.15.0']), + ('queens', + ['2.16.0']), ]) # >= Liberty version->codename mapping @@ -412,6 +416,8 @@ def get_os_codename_package(package, fatal=True): cmd = ['snap', 'list', package] try: out = subprocess.check_output(cmd) + if six.PY3: + out = out.decode('UTF-8') except subprocess.CalledProcessError as e: return None lines = out.split('\n') @@ -426,7 +432,7 @@ def get_os_codename_package(package, fatal=True): try: pkg = cache[package] - except: + except Exception: if not fatal: return None # the package is unknown to the current apt cache. @@ -579,6 +585,9 @@ def configure_installation_source(source_plus_key): Note that the behaviour on error is to log the error to the juju log and then call sys.exit(1). """ + if source_plus_key.startswith('snap'): + # Do nothing for snap installs + return # extract the key if there is one, denoted by a '|' in the rel source, key = get_source_and_pgp_key(source_plus_key) @@ -615,7 +624,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): juju_rc_path = "%s/%s" % (charm_dir(), script_path) if not os.path.exists(os.path.dirname(juju_rc_path)): os.mkdir(os.path.dirname(juju_rc_path)) - with open(juju_rc_path, 'wb') as rc_script: + with open(juju_rc_path, 'wt') as rc_script: rc_script.write( "#!/bin/bash\n") [rc_script.write('export %s=%s\n' % (u, p)) @@ -794,7 +803,7 @@ def git_default_repos(projects_yaml): service = service_name() core_project = service - for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): + for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES): if projects_yaml == default: # add the requirements repo first @@ -1615,7 +1624,7 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs): upgrade_callback(configs=configs) action_set({'outcome': 'success, upgrade completed.'}) ret = True - except: + except Exception: action_set({'outcome': 'upgrade failed, see traceback.'}) action_set({'traceback': traceback.format_exc()}) action_fail('do_openstack_upgrade resulted in an ' @@ -1720,7 +1729,7 @@ def is_unit_paused_set(): kv = t[0] # transform something truth-y into a Boolean. return not(not(kv.get('unit-paused'))) - except: + except Exception: return False @@ -2048,7 +2057,7 @@ def update_json_file(filename, items): def snap_install_requested(): """ Determine if installing from snaps - If openstack-origin is of the form snap:channel-series-release + If openstack-origin is of the form snap:track/channel[/branch] and channel is in SNAPS_CHANNELS return True. """ origin = config('openstack-origin') or "" @@ -2056,10 +2065,12 @@ def snap_install_requested(): return False _src = origin[5:] - channel, series, release = _src.split('-') - if channel.lower() in SNAP_CHANNELS: - return True - return False + if '/' in _src: + channel = _src.split('/')[1] + else: + # Handle snap:track with no channel + channel = 'stable' + return valid_snap_channel(channel) def get_snaps_install_info_from_origin(snaps, src, mode='classic'): @@ -2067,7 +2078,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'): @param snaps: List of snaps @param src: String of openstack-origin or source of the form - snap:channel-series-track + snap:track/channel @param mode: String classic, devmode or jailmode @returns: Dictionary of snaps with channels and modes """ @@ -2077,8 +2088,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'): return {} _src = src[5:] - _channel, _series, _release = _src.split('-') - channel = '--channel={}/{}'.format(_release, _channel) + channel = '--channel={}'.format(_src) return {snap: {'channel': channel, 'mode': mode} for snap in snaps} @@ -2090,8 +2100,8 @@ def install_os_snaps(snaps, refresh=False): @param snaps: Dictionary of snaps with channels and modes of the form: {'snap_name': {'channel': 'snap_channel', 'mode': 'snap_mode'}} - Where channel a snapstore channel and mode is --classic, --devmode or - --jailmode. + Where channel is a snapstore channel and mode is --classic, --devmode + or --jailmode. @param post_snap_install: Callback function to run after snaps have been installed """ diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index e5a01b1b..39231612 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -370,9 +370,10 @@ def get_mon_map(service): Also raises CalledProcessError if our ceph command fails """ try: - mon_status = check_output( - ['ceph', '--id', service, - 'mon_status', '--format=json']) + mon_status = check_output(['ceph', '--id', service, + 'mon_status', '--format=json']) + if six.PY3: + mon_status = mon_status.decode('UTF-8') try: return json.loads(mon_status) except ValueError as v: @@ -457,7 +458,7 @@ def monitor_key_get(service, key): try: output = check_output( ['ceph', '--id', service, - 'config-key', 'get', str(key)]) + 'config-key', 'get', str(key)]).decode('UTF-8') return output except CalledProcessError as e: log("Monitor config-key get failed with message: {}".format( @@ -500,6 +501,8 @@ def get_erasure_profile(service, name): out = check_output(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', name, '--format=json']) + if six.PY3: + out = out.decode('UTF-8') return json.loads(out) except (CalledProcessError, OSError, ValueError): return None @@ -686,7 +689,10 @@ def get_cache_mode(service, pool_name): """ validator(value=service, valid_type=six.string_types) validator(value=pool_name, valid_type=six.string_types) - out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) + out = check_output(['ceph', '--id', service, + 'osd', 'dump', '--format=json']) + if six.PY3: + out = out.decode('UTF-8') try: osd_json = json.loads(out) for pool in osd_json['pools']: @@ -700,8 +706,9 @@ def get_cache_mode(service, pool_name): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, - 'lspools']).decode('UTF-8') + out = check_output(['rados', '--id', service, 'lspools']) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -714,9 +721,12 @@ def get_osds(service): """ version = ceph_version() if version and version >= '0.56': - return json.loads(check_output(['ceph', '--id', service, - 'osd', 'ls', - '--format=json']).decode('UTF-8')) + out = check_output(['ceph', '--id', service, + 'osd', 'ls', + '--format=json']) + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) return None @@ -734,7 +744,9 @@ def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]).decode('UTF-8') + service, '--pool', pool]) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -859,7 +871,9 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']).decode('UTF-8') + out = check_output(['rbd', 'showmapped']) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -1018,7 +1032,9 @@ def ceph_version(): """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): cmd = ['ceph', '-v'] - output = check_output(cmd).decode('US-ASCII') + output = check_output(cmd) + if six.PY3: + output = output.decode('UTF-8') output = output.split() if len(output) > 3: return output[2] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py index 4719f53c..7f2a0604 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -74,10 +74,10 @@ def list_lvm_volume_group(block_device): ''' vg = None pvd = check_output(['pvdisplay', block_device]).splitlines() - for l in pvd: - l = l.decode('UTF-8') - if l.strip().startswith('VG Name'): - vg = ' '.join(l.strip().split()[2:]) + for lvm in pvd: + lvm = lvm.decode('UTF-8') + if lvm.strip().startswith('VG Name'): + vg = ' '.join(lvm.strip().split()[2:]) return vg diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py index 3dc0df68..c9428894 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -64,6 +64,6 @@ def is_device_mounted(device): ''' try: out = check_output(['lsblk', '-P', device]).decode('UTF-8') - except: + except Exception: return False return bool(re.search(r'MOUNTPOINT=".+"', out)) diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 12f37b28..5a88f798 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -22,6 +22,7 @@ import copy from distutils.version import LooseVersion from functools import wraps +from collections import namedtuple import glob import os import json @@ -218,6 +219,8 @@ def principal_unit(): for rid in relation_ids(reltype): for unit in related_units(rid): md = _metadata_unit(unit) + if not md: + continue subordinate = md.pop('subordinate', None) if not subordinate: return unit @@ -511,7 +514,10 @@ def _metadata_unit(unit): """ basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') + if not os.path.exists(joineddir): + return None + with open(joineddir) as md: return yaml.safe_load(md) @@ -639,18 +645,31 @@ def is_relation_made(relation, keys='private-address'): return False +def _port_op(op_name, port, protocol="TCP"): + """Open or close a service network port""" + _args = [op_name] + icmp = protocol.upper() == "ICMP" + if icmp: + _args.append(protocol) + else: + _args.append('{}/{}'.format(port, protocol)) + try: + subprocess.check_call(_args) + except subprocess.CalledProcessError: + # Older Juju pre 2.3 doesn't support ICMP + # so treat it as a no-op if it fails. + if not icmp: + raise + + def open_port(port, protocol="TCP"): """Open a service network port""" - _args = ['open-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('open-port', port, protocol) def close_port(port, protocol="TCP"): """Close a service network port""" - _args = ['close-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('close-port', port, protocol) def open_ports(start, end, protocol="TCP"): @@ -667,6 +686,17 @@ def close_ports(start, end, protocol="TCP"): subprocess.check_call(_args) +def opened_ports(): + """Get the opened ports + + *Note that this will only show ports opened in a previous hook* + + :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` + """ + _args = ['opened-ports', '--format=json'] + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1077,6 +1107,35 @@ def network_get_primary_address(binding): return subprocess.check_output(cmd).decode('UTF-8').strip() +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get(endpoint, relation_id=None): + """ + Retrieve the network details for a relation endpoint + + :param endpoint: string. The name of a relation endpoint + :param relation_id: int. The ID of the relation for the current context. + :return: dict. The loaded YAML output of the network-get query. + :raise: NotImplementedError if run on Juju < 2.1 + """ + cmd = ['network-get', endpoint, '--format', 'yaml'] + if relation_id: + cmd.append('-r') + cmd.append(relation_id) + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + # Early versions of Juju 2.0.x required the --primary-address argument. + # We catch that condition here and raise NotImplementedError since + # the requested semantics are not available - the caller can then + # use the network_get_primary_address() method instead. + if '--primary-address is currently required' in e.output.decode('UTF-8'): + raise NotImplementedError + raise + return yaml.safe_load(response) + + def add_metric(*args, **kwargs): """Add metric values. Values may be expressed with keyword arguments. For metric names containing dashes, these may be expressed as one or more @@ -1106,3 +1165,42 @@ def meter_info(): """Get the meter status information, if running in the meter-status-changed hook.""" return os.environ.get('JUJU_METER_INFO') + + +def iter_units_for_relation_name(relation_name): + """Iterate through all units in a relation + + Generator that iterates through all the units in a relation and yields + a named tuple with rid and unit field names. + + Usage: + data = [(u.rid, u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param relation_name: string relation name + :yield: Named Tuple with rid and unit field names + """ + RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') + for rid in relation_ids(relation_name): + for unit in related_units(rid): + yield RelatedUnit(rid, unit) + + +def ingress_address(rid=None, unit=None): + """ + Retrieve the ingress-address from a relation when available. Otherwise, + return the private-address. This function is to be used on the consuming + side of the relation. + + Usage: + addresses = [ingress_address(rid=u.rid, unit=u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: string IP address + """ + settings = relation_get(rid=rid, unit=unit) + return (settings.get('ingress-address') or + settings.get('private-address')) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 5656e2f5..5cc5c86b 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log, DEBUG +from .hookenv import log, DEBUG, local_unit from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -441,6 +441,49 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) +def chage(username, lastday=None, expiredate=None, inactive=None, + mindays=None, maxdays=None, root=None, warndays=None): + """Change user password expiry information + + :param str username: User to update + :param str lastday: Set when password was changed in YYYY-MM-DD format + :param str expiredate: Set when user's account will no longer be + accessible in YYYY-MM-DD format. + -1 will remove an account expiration date. + :param str inactive: Set the number of days of inactivity after a password + has expired before the account is locked. + -1 will remove an account's inactivity. + :param str mindays: Set the minimum number of days between password + changes to MIN_DAYS. + 0 indicates the password can be changed anytime. + :param str maxdays: Set the maximum number of days during which a + password is valid. + -1 as MAX_DAYS will remove checking maxdays + :param str root: Apply changes in the CHROOT_DIR directory + :param str warndays: Set the number of days of warning before a password + change is required + :raises subprocess.CalledProcessError: if call to chage fails + """ + cmd = ['chage'] + if root: + cmd.extend(['--root', root]) + if lastday: + cmd.extend(['--lastday', lastday]) + if expiredate: + cmd.extend(['--expiredate', expiredate]) + if inactive: + cmd.extend(['--inactive', inactive]) + if mindays: + cmd.extend(['--mindays', mindays]) + if maxdays: + cmd.extend(['--maxdays', maxdays]) + if warndays: + cmd.extend(['--warndays', warndays]) + cmd.append(username) + subprocess.check_call(cmd) + +remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -946,3 +989,31 @@ def updatedb(updatedb_text, new_path): lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) output = "\n".join(lines) return output + + +def modulo_distribution(modulo=3, wait=30): + """ Modulo distribution + + This helper uses the unit number, a modulo value and a constant wait time + to produce a calculated wait time distribution. This is useful in large + scale deployments to distribute load during an expensive operation such as + service restarts. + + If you have 1000 nodes that need to restart 100 at a time 1 minute at a + time: + + time.wait(modulo_distribution(modulo=100, wait=60)) + restart() + + If you need restarts to happen serially set modulo to the exact number of + nodes and set a high constant wait time: + + time.wait(modulo_distribution(modulo=10, wait=120)) + restart() + + @param modulo: int The modulo number creates the group distribution + @param wait: int The constant time wait value + @return: int Calculated time to wait for unit operation + """ + unit_number = int(local_unit().split('/')[1]) + return (unit_number % modulo) * wait diff --git a/ceph-radosgw/hooks/charmhelpers/core/strutils.py b/ceph-radosgw/hooks/charmhelpers/core/strutils.py index 685dabde..e8df0452 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/strutils.py +++ b/ceph-radosgw/hooks/charmhelpers/core/strutils.py @@ -61,13 +61,19 @@ def bytes_from_string(value): if isinstance(value, six.string_types): value = six.text_type(value) else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) + msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if not matches: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + if matches: + size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + else: + # Assume that value passed in is bytes + try: + size = int(value) + except ValueError: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return size class BasicStringComparator(object): diff --git a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py index 54ec969f..7af875c2 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py +++ b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py @@ -358,7 +358,7 @@ def hook_scope(self, name=""): try: yield self.revision self.revision = None - except: + except Exception: self.flush(False) self.revision = None raise diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/snap.py b/ceph-radosgw/hooks/charmhelpers/fetch/snap.py index 112a54c3..395836c7 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/snap.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/snap.py @@ -41,6 +41,10 @@ class CouldNotAcquireLockException(Exception): pass +class InvalidSnapChannel(Exception): + pass + + def _snap_exec(commands): """ Execute snap commands. @@ -132,3 +136,15 @@ def snap_refresh(packages, *flags): log(message, level='INFO') return _snap_exec(['refresh'] + flags + packages) + + +def valid_snap_channel(channel): + """ Validate snap channel exists + + :raises InvalidSnapChannel: When channel does not exist + :return: Boolean + """ + if channel.lower() in SNAP_CHANNELS: + return True + else: + raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel)) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 40e1cb5b..910e96a6 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -572,7 +572,7 @@ def get_upstream_version(package): cache = apt_cache() try: pkg = cache[package] - except: + except Exception: # the package is unknown to the current apt cache. return None diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index fc20a76d..e37f2834 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -250,7 +250,14 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, self.log.debug('Waiting up to {}s for extended status on services: ' '{}'.format(timeout, services)) service_messages = {service: message for service in services} + + # Check for idleness + self.d.sentry.wait() + # Check for error states and bail early + self.d.sentry.wait_for_status(self.d.juju_env, services) + # Check for ready messages self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + self.log.info('OK') def _get_openstack_release(self): @@ -263,7 +270,8 @@ def _get_openstack_release(self): (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike) = range(11) + self.xenial_pike, self.artful_pike, self.xenial_queens, + self.bionic_queens,) = range(13) releases = { ('trusty', None): self.trusty_icehouse, @@ -274,9 +282,11 @@ def _get_openstack_release(self): ('xenial', 'cloud:xenial-newton'): self.xenial_newton, ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, ('xenial', 'cloud:xenial-pike'): self.xenial_pike, + ('xenial', 'cloud:xenial-queens'): self.xenial_queens, ('yakkety', None): self.yakkety_newton, ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, + ('bionic', None): self.bionic_queens, } return releases[(self.series, self.openstack)] @@ -291,6 +301,7 @@ def _get_openstack_release_string(self): ('yakkety', 'newton'), ('zesty', 'ocata'), ('artful', 'pike'), + ('bionic', 'queens'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] @@ -303,22 +314,29 @@ def get_ceph_expected_pools(self, radosgw=False): test scenario, based on OpenStack release and whether ceph radosgw is flagged as present or not.""" - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later + if self._get_openstack_release() == self.trusty_icehouse: + # Icehouse pools = [ + 'data', + 'metadata', 'rbd', 'cinder-ceph', 'glance' ] - else: - # Juno or earlier + elif (self.trusty_kilo <= self._get_openstack_release() <= + self.zesty_ocata): + # Kilo through Ocata pools = [ - 'data', - 'metadata', 'rbd', 'cinder-ceph', 'glance' ] + else: + # Pike and later + pools = [ + 'cinder-ceph', + 'glance' + ] if radosgw: pools.extend([ diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index c8edbf65..b71b2b19 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -23,6 +23,7 @@ import urlparse import cinderclient.v1.client as cinder_client +import cinderclient.v2.client as cinder_clientv2 import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client @@ -42,7 +43,6 @@ from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) -from charmhelpers.core.decorators import retry_on_exception from charmhelpers.core.host import CompareHostReleases DEBUG = logging.DEBUG @@ -310,7 +310,6 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] - @retry_on_exception(5, base_delay=10) def keystone_wait_for_propagation(self, sentry_relation_pairs, api_version): """Iterate over list of sentry and relation tuples and verify that @@ -326,7 +325,7 @@ def keystone_wait_for_propagation(self, sentry_relation_pairs, rel = sentry.relation('identity-service', relation_name) self.log.debug('keystone relation data: {}'.format(rel)) - if rel['api_version'] != str(api_version): + if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." "".format(rel['api_version'], api_version)) @@ -348,15 +347,19 @@ def keystone_configure_api_version(self, sentry_relation_pairs, deployment, config = {'preferred-api-version': api_version} deployment.d.configure('keystone', config) + deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant): + password, tenant, api_version=2): """Authenticates admin user with cinder.""" # NOTE(beisner): cinder python client doesn't accept tokens. keystone_ip = keystone_sentry.info['public-address'] ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) - return cinder_client.Client(username, password, tenant, ept) + _clients = { + 1: cinder_client.Client, + 2: cinder_clientv2.Client} + return _clients[api_version](username, password, tenant, ept) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -617,13 +620,25 @@ def create_or_get_keypair(self, nova, keypair_name="testkey"): self.log.debug('Keypair ({}) already exists, ' 'using it.'.format(keypair_name)) return _keypair - except: + except Exception: self.log.debug('Keypair ({}) does not exist, ' 'creating it.'.format(keypair_name)) _keypair = nova.keypairs.create(name=keypair_name) return _keypair + def _get_cinder_obj_name(self, cinder_object): + """Retrieve name of cinder object. + + :param cinder_object: cinder snapshot or volume object + :returns: str cinder object name + """ + # v1 objects store name in 'display_name' attr but v2+ use 'name' + try: + return cinder_object.display_name + except AttributeError: + return cinder_object.name + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, img_id=None, src_vol_id=None, snap_id=None): """Create cinder volume, optionally from a glance image, OR @@ -674,6 +689,13 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, source_volid=src_vol_id, snapshot_id=snap_id) vol_id = vol_new.id + except TypeError: + vol_new = cinder.volumes.create(name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id except Exception as e: msg = 'Failed to create volume: {}'.format(e) amulet.raise_status(amulet.FAIL, msg=msg) @@ -688,7 +710,7 @@ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, # Re-validate new volume self.log.debug('Validating volume attributes...') - val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) val_vol_boot = cinder.volumes.get(vol_id).bootable val_vol_stat = cinder.volumes.get(vol_id).status val_vol_size = cinder.volumes.get(vol_id).size diff --git a/ceph-radosgw/tests/charmhelpers/core/hookenv.py b/ceph-radosgw/tests/charmhelpers/core/hookenv.py index 12f37b28..5a88f798 100644 --- a/ceph-radosgw/tests/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/tests/charmhelpers/core/hookenv.py @@ -22,6 +22,7 @@ import copy from distutils.version import LooseVersion from functools import wraps +from collections import namedtuple import glob import os import json @@ -218,6 +219,8 @@ def principal_unit(): for rid in relation_ids(reltype): for unit in related_units(rid): md = _metadata_unit(unit) + if not md: + continue subordinate = md.pop('subordinate', None) if not subordinate: return unit @@ -511,7 +514,10 @@ def _metadata_unit(unit): """ basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') + if not os.path.exists(joineddir): + return None + with open(joineddir) as md: return yaml.safe_load(md) @@ -639,18 +645,31 @@ def is_relation_made(relation, keys='private-address'): return False +def _port_op(op_name, port, protocol="TCP"): + """Open or close a service network port""" + _args = [op_name] + icmp = protocol.upper() == "ICMP" + if icmp: + _args.append(protocol) + else: + _args.append('{}/{}'.format(port, protocol)) + try: + subprocess.check_call(_args) + except subprocess.CalledProcessError: + # Older Juju pre 2.3 doesn't support ICMP + # so treat it as a no-op if it fails. + if not icmp: + raise + + def open_port(port, protocol="TCP"): """Open a service network port""" - _args = ['open-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('open-port', port, protocol) def close_port(port, protocol="TCP"): """Close a service network port""" - _args = ['close-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('close-port', port, protocol) def open_ports(start, end, protocol="TCP"): @@ -667,6 +686,17 @@ def close_ports(start, end, protocol="TCP"): subprocess.check_call(_args) +def opened_ports(): + """Get the opened ports + + *Note that this will only show ports opened in a previous hook* + + :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` + """ + _args = ['opened-ports', '--format=json'] + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1077,6 +1107,35 @@ def network_get_primary_address(binding): return subprocess.check_output(cmd).decode('UTF-8').strip() +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get(endpoint, relation_id=None): + """ + Retrieve the network details for a relation endpoint + + :param endpoint: string. The name of a relation endpoint + :param relation_id: int. The ID of the relation for the current context. + :return: dict. The loaded YAML output of the network-get query. + :raise: NotImplementedError if run on Juju < 2.1 + """ + cmd = ['network-get', endpoint, '--format', 'yaml'] + if relation_id: + cmd.append('-r') + cmd.append(relation_id) + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + # Early versions of Juju 2.0.x required the --primary-address argument. + # We catch that condition here and raise NotImplementedError since + # the requested semantics are not available - the caller can then + # use the network_get_primary_address() method instead. + if '--primary-address is currently required' in e.output.decode('UTF-8'): + raise NotImplementedError + raise + return yaml.safe_load(response) + + def add_metric(*args, **kwargs): """Add metric values. Values may be expressed with keyword arguments. For metric names containing dashes, these may be expressed as one or more @@ -1106,3 +1165,42 @@ def meter_info(): """Get the meter status information, if running in the meter-status-changed hook.""" return os.environ.get('JUJU_METER_INFO') + + +def iter_units_for_relation_name(relation_name): + """Iterate through all units in a relation + + Generator that iterates through all the units in a relation and yields + a named tuple with rid and unit field names. + + Usage: + data = [(u.rid, u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param relation_name: string relation name + :yield: Named Tuple with rid and unit field names + """ + RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') + for rid in relation_ids(relation_name): + for unit in related_units(rid): + yield RelatedUnit(rid, unit) + + +def ingress_address(rid=None, unit=None): + """ + Retrieve the ingress-address from a relation when available. Otherwise, + return the private-address. This function is to be used on the consuming + side of the relation. + + Usage: + addresses = [ingress_address(rid=u.rid, unit=u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: string IP address + """ + settings = relation_get(rid=rid, unit=unit) + return (settings.get('ingress-address') or + settings.get('private-address')) diff --git a/ceph-radosgw/tests/charmhelpers/core/host.py b/ceph-radosgw/tests/charmhelpers/core/host.py index 5656e2f5..5cc5c86b 100644 --- a/ceph-radosgw/tests/charmhelpers/core/host.py +++ b/ceph-radosgw/tests/charmhelpers/core/host.py @@ -34,7 +34,7 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log, DEBUG +from .hookenv import log, DEBUG, local_unit from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -441,6 +441,49 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) +def chage(username, lastday=None, expiredate=None, inactive=None, + mindays=None, maxdays=None, root=None, warndays=None): + """Change user password expiry information + + :param str username: User to update + :param str lastday: Set when password was changed in YYYY-MM-DD format + :param str expiredate: Set when user's account will no longer be + accessible in YYYY-MM-DD format. + -1 will remove an account expiration date. + :param str inactive: Set the number of days of inactivity after a password + has expired before the account is locked. + -1 will remove an account's inactivity. + :param str mindays: Set the minimum number of days between password + changes to MIN_DAYS. + 0 indicates the password can be changed anytime. + :param str maxdays: Set the maximum number of days during which a + password is valid. + -1 as MAX_DAYS will remove checking maxdays + :param str root: Apply changes in the CHROOT_DIR directory + :param str warndays: Set the number of days of warning before a password + change is required + :raises subprocess.CalledProcessError: if call to chage fails + """ + cmd = ['chage'] + if root: + cmd.extend(['--root', root]) + if lastday: + cmd.extend(['--lastday', lastday]) + if expiredate: + cmd.extend(['--expiredate', expiredate]) + if inactive: + cmd.extend(['--inactive', inactive]) + if mindays: + cmd.extend(['--mindays', mindays]) + if maxdays: + cmd.extend(['--maxdays', maxdays]) + if warndays: + cmd.extend(['--warndays', warndays]) + cmd.append(username) + subprocess.check_call(cmd) + +remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -946,3 +989,31 @@ def updatedb(updatedb_text, new_path): lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) output = "\n".join(lines) return output + + +def modulo_distribution(modulo=3, wait=30): + """ Modulo distribution + + This helper uses the unit number, a modulo value and a constant wait time + to produce a calculated wait time distribution. This is useful in large + scale deployments to distribute load during an expensive operation such as + service restarts. + + If you have 1000 nodes that need to restart 100 at a time 1 minute at a + time: + + time.wait(modulo_distribution(modulo=100, wait=60)) + restart() + + If you need restarts to happen serially set modulo to the exact number of + nodes and set a high constant wait time: + + time.wait(modulo_distribution(modulo=10, wait=120)) + restart() + + @param modulo: int The modulo number creates the group distribution + @param wait: int The constant time wait value + @return: int Calculated time to wait for unit operation + """ + unit_number = int(local_unit().split('/')[1]) + return (unit_number % modulo) * wait diff --git a/ceph-radosgw/tests/charmhelpers/core/strutils.py b/ceph-radosgw/tests/charmhelpers/core/strutils.py index 685dabde..e8df0452 100644 --- a/ceph-radosgw/tests/charmhelpers/core/strutils.py +++ b/ceph-radosgw/tests/charmhelpers/core/strutils.py @@ -61,13 +61,19 @@ def bytes_from_string(value): if isinstance(value, six.string_types): value = six.text_type(value) else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) + msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if not matches: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + if matches: + size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + else: + # Assume that value passed in is bytes + try: + size = int(value) + except ValueError: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return size class BasicStringComparator(object): diff --git a/ceph-radosgw/tests/charmhelpers/core/unitdata.py b/ceph-radosgw/tests/charmhelpers/core/unitdata.py index 54ec969f..7af875c2 100644 --- a/ceph-radosgw/tests/charmhelpers/core/unitdata.py +++ b/ceph-radosgw/tests/charmhelpers/core/unitdata.py @@ -358,7 +358,7 @@ def hook_scope(self, name=""): try: yield self.revision self.revision = None - except: + except Exception: self.flush(False) self.revision = None raise From 1debb243ec32fb13a7d33eff4c51be680d544245 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 27 Nov 2017 11:01:26 +0000 Subject: [PATCH 1415/2699] Process client broker requests post bootstrap Its possible that a remote client unit will present its broker request prior to the Ceph MON cluster being bootstrapped; ensure that any client-relation-changed hooks are re-executed after bootstrap to process any pending broker requests. Change-Id: I30123183ad3d4a0078fbb92941b2b8e8aadd6d1c Closes-Bug: 1734620 --- ceph-mon/hooks/ceph_hooks.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index e042a4a7..e9fec6e9 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -409,6 +409,8 @@ def notify_radosgws(): def notify_client(): for relid in relation_ids('client'): client_relation_joined(relid) + for unit in related_units(relid): + client_relation_changed(relid, unit) for relid in relation_ids('admin'): admin_relation_joined(relid) for relid in relation_ids('mds'): @@ -585,10 +587,12 @@ def client_relation_joined(relid=None): @hooks.hook('client-relation-changed') -def client_relation_changed(): +def client_relation_changed(relid=None, unit=None): """Process broker requests from ceph client relations.""" if ceph.is_quorum(): - settings = relation_get() + if not unit: + unit = remote_unit() + settings = relation_get(rid=relid, unit=unit) if 'broker_req' in settings: if not ceph.is_leader(): log("Not leader - ignoring broker request", level=DEBUG) @@ -602,7 +606,8 @@ def client_relation_changed(): 'broker_rsp': rsp, unit_response_key: rsp, } - relation_set(relation_settings=data) + relation_set(relation_id=relid, + relation_settings=data) else: log('mon cluster not in quorum', level=DEBUG) From 4d90b9137fca4ce08c552535ee29920062414baa Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 29 Nov 2017 09:20:12 +0000 Subject: [PATCH 1416/2699] Resync charmhelpers for py3 fixes Includes fix for failed sysctl tuning with lxdbr0. Change-Id: I53de0d6dd9e9c4035f02405d172e1e2e5224daf0 Closes-Bug: 1735070 --- ceph-osd/hooks/charmhelpers/core/host.py | 2 + ceph-osd/lib/ceph/utils.py | 48 +++++++++---------- .../contrib/openstack/amulet/deployment.py | 12 +++-- ceph-osd/tests/charmhelpers/core/host.py | 2 + 4 files changed, 35 insertions(+), 29 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 5cc5c86b..fd14d60f 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') target.write(content) return # the contents were the same, but we might still need to change the diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 6ab697f0..33703f05 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -195,7 +195,7 @@ def save_sysctls(sysctl_dict, save_location): except IOError as e: log("Unable to persist sysctl settings to {}. Error {}".format( - save_location, e.message), level=ERROR) + save_location, e), level=ERROR) raise @@ -221,7 +221,7 @@ def tune_nic(network_interface): save_location=sysctl_file) except IOError as e: log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " - "failed. {}".format(network_interface, e.message), + "failed. {}".format(network_interface, e), level=ERROR) try: @@ -266,7 +266,7 @@ def get_link_speed(network_interface): except IOError as e: log("Unable to open {path} because of error: {error}".format( path=speed_path, - error=e.message), level='error') + error=e), level='error') return LinkSpeed["UNKNOWN"] @@ -286,13 +286,13 @@ def persist_settings(settings_dict): context=settings_dict) except IOError as err: log("Unable to open {path} because of error: {error}".format( - path=HDPARM_FILE, error=err.message), level=ERROR) + path=HDPARM_FILE, error=err), level=ERROR) except Exception as e: # The templating.render can raise a jinja2 exception if the # template is not found. Rather than polluting the import # space of this charm, simply catch Exception log('Unable to render {path} due to error: {error}'.format( - path=HDPARM_FILE, error=e.message), level=ERROR) + path=HDPARM_FILE, error=e), level=ERROR) def set_max_sectors_kb(dev_name, max_sectors_size): @@ -308,7 +308,7 @@ def set_max_sectors_kb(dev_name, max_sectors_size): f.write(max_sectors_size) except IOError as e: log('Failed to write max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e.message), level=ERROR) + max_sectors_kb_path, e), level=ERROR) def get_max_sectors_kb(dev_name): @@ -328,7 +328,7 @@ def get_max_sectors_kb(dev_name): return int(max_sectors_kb) except IOError as e: log('Failed to read max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e.message), level=ERROR) + max_sectors_kb_path, e), level=ERROR) # Bail. return 0 return 0 @@ -350,7 +350,7 @@ def get_max_hw_sectors_kb(dev_name): return int(max_hw_sectors_kb) except IOError as e: log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( - max_hw_sectors_kb_path, e.message), level=ERROR) + max_hw_sectors_kb_path, e), level=ERROR) return 0 return 0 @@ -547,11 +547,11 @@ def get_osd_weight(osd_id): return device['crush_weight'] except ValueError as v: log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v.message)) + tree, v)) raise except subprocess.CalledProcessError as e: log("ceph osd tree command failed with message: {}".format( - e.message)) + e)) raise @@ -591,11 +591,11 @@ def get_osd_tree(service): return crush_list except ValueError as v: log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v.message)) + tree, v)) raise except subprocess.CalledProcessError as e: log("ceph osd tree command failed with message: {}".format( - e.message)) + e)) raise @@ -973,7 +973,7 @@ def is_osd_disk(dev): return True except subprocess.CalledProcessError as e: log("sgdisk inspection of partition {} on {} failed with " - "error: {}. Skipping".format(partition.minor, dev, e.message), + "error: {}. Skipping".format(partition.minor, dev, e), level=ERROR) return False @@ -1682,7 +1682,7 @@ def upgrade_monitor(new_version): apt_update(fatal=True) except subprocess.CalledProcessError as err: log("Adding the ceph source failed with message: {}".format( - err.message)) + err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) try: @@ -1711,7 +1711,7 @@ def upgrade_monitor(new_version): service_start('ceph-mon-all') except subprocess.CalledProcessError as err: log("Stopping ceph and upgrading packages failed " - "with message: {}".format(err.message)) + "with message: {}".format(err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -1895,7 +1895,7 @@ def upgrade_osd(new_version): apt_update(fatal=True) except subprocess.CalledProcessError as err: log("Adding the ceph sources failed with message: {}".format( - err.message)) + err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -1940,7 +1940,7 @@ def upgrade_osd(new_version): except (subprocess.CalledProcessError, IOError) as err: log("Stopping ceph and upgrading packages failed " - "with message: {}".format(err.message)) + "with message: {}".format(err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2189,11 +2189,10 @@ def get_ceph_pg_stat(): return json_tree except ValueError as v: log("Unable to parse ceph pg stat json: {}. Error: {}".format( - tree, v.message)) + tree, v)) raise except subprocess.CalledProcessError as e: - log("ceph pg stat command failed with message: {}".format( - e.message)) + log("ceph pg stat command failed with message: {}".format(e)) raise @@ -2217,11 +2216,10 @@ def get_ceph_health(): return json_tree except ValueError as v: log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v.message)) + tree, v)) raise except subprocess.CalledProcessError as e: - log("ceph status command failed with message: {}".format( - e.message)) + log("ceph status command failed with message: {}".format(e)) raise @@ -2247,8 +2245,8 @@ def reweight_osd(osd_num, new_weight): return True return False except subprocess.CalledProcessError as e: - log("ceph osd crush reweight command failed with message: {}".format( - e.message)) + log("ceph osd crush reweight command failed" + " with message: {}".format(e)) raise diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index e37f2834..5afbbd87 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +import os import re import sys import six @@ -185,7 +186,7 @@ def _configure_services(self, configs): self.d.configure(service, config) def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=1800): + include_only=None, timeout=None): """Wait for all units to have a specific extended status, except for any defined as excluded. Unless specified via message, any status containing any case of 'ready' will be considered a match. @@ -215,7 +216,10 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, :param timeout: Maximum time in seconds to wait for status match :returns: None. Raises if timeout is hit. """ - self.log.info('Waiting for extended status on units...') + if not timeout: + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) + self.log.info('Waiting for extended status on units for {}s...' + ''.format(timeout)) all_services = self.d.services.keys() @@ -252,9 +256,9 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, service_messages = {service: message for service in services} # Check for idleness - self.d.sentry.wait() + self.d.sentry.wait(timeout=timeout) # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services) + self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) # Check for ready messages self.d.sentry.wait_for_messages(service_messages, timeout=timeout) diff --git a/ceph-osd/tests/charmhelpers/core/host.py b/ceph-osd/tests/charmhelpers/core/host.py index 5cc5c86b..fd14d60f 100644 --- a/ceph-osd/tests/charmhelpers/core/host.py +++ b/ceph-osd/tests/charmhelpers/core/host.py @@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') target.write(content) return # the contents were the same, but we might still need to change the From 8fc15429801139c93363a040988349a2a48077c5 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 29 Nov 2017 09:24:19 +0000 Subject: [PATCH 1417/2699] Resync charmhelpers for py3 fixes Includes fix for write_file encoding under py3. Change-Id: Id0e3677f4f877b23c95902140cc4f24736f2768c Closes-Bug: 1735068 --- .../contrib/openstack/amulet/deployment.py | 12 ++++++++---- ceph-mon/hooks/charmhelpers/core/host.py | 2 ++ .../contrib/openstack/amulet/deployment.py | 12 ++++++++---- ceph-mon/tests/charmhelpers/core/host.py | 2 ++ 4 files changed, 20 insertions(+), 8 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index e37f2834..5afbbd87 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +import os import re import sys import six @@ -185,7 +186,7 @@ def _configure_services(self, configs): self.d.configure(service, config) def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=1800): + include_only=None, timeout=None): """Wait for all units to have a specific extended status, except for any defined as excluded. Unless specified via message, any status containing any case of 'ready' will be considered a match. @@ -215,7 +216,10 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, :param timeout: Maximum time in seconds to wait for status match :returns: None. Raises if timeout is hit. """ - self.log.info('Waiting for extended status on units...') + if not timeout: + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) + self.log.info('Waiting for extended status on units for {}s...' + ''.format(timeout)) all_services = self.d.services.keys() @@ -252,9 +256,9 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, service_messages = {service: message for service in services} # Check for idleness - self.d.sentry.wait() + self.d.sentry.wait(timeout=timeout) # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services) + self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) # Check for ready messages self.d.sentry.wait_for_messages(service_messages, timeout=timeout) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 5cc5c86b..fd14d60f 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') target.write(content) return # the contents were the same, but we might still need to change the diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index e37f2834..5afbbd87 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +import os import re import sys import six @@ -185,7 +186,7 @@ def _configure_services(self, configs): self.d.configure(service, config) def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=1800): + include_only=None, timeout=None): """Wait for all units to have a specific extended status, except for any defined as excluded. Unless specified via message, any status containing any case of 'ready' will be considered a match. @@ -215,7 +216,10 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, :param timeout: Maximum time in seconds to wait for status match :returns: None. Raises if timeout is hit. """ - self.log.info('Waiting for extended status on units...') + if not timeout: + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) + self.log.info('Waiting for extended status on units for {}s...' + ''.format(timeout)) all_services = self.d.services.keys() @@ -252,9 +256,9 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, service_messages = {service: message for service in services} # Check for idleness - self.d.sentry.wait() + self.d.sentry.wait(timeout=timeout) # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services) + self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) # Check for ready messages self.d.sentry.wait_for_messages(service_messages, timeout=timeout) diff --git a/ceph-mon/tests/charmhelpers/core/host.py b/ceph-mon/tests/charmhelpers/core/host.py index 5cc5c86b..fd14d60f 100644 --- a/ceph-mon/tests/charmhelpers/core/host.py +++ b/ceph-mon/tests/charmhelpers/core/host.py @@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') target.write(content) return # the contents were the same, but we might still need to change the From 74a1801f257eb8a030f0d5b51f61a4d024f2af74 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Thu, 30 Nov 2017 10:44:40 +0000 Subject: [PATCH 1418/2699] Move from .testr.conf to .stestr.conf Change-Id: I4a19bb49e4e29540a642be9ffc7ed9ba5cdd8896 --- ceph-mon/.stestr.conf | 3 +++ ceph-mon/.testr.conf | 8 -------- 2 files changed, 3 insertions(+), 8 deletions(-) create mode 100644 ceph-mon/.stestr.conf delete mode 100644 ceph-mon/.testr.conf diff --git a/ceph-mon/.stestr.conf b/ceph-mon/.stestr.conf new file mode 100644 index 00000000..5fcccaca --- /dev/null +++ b/ceph-mon/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/ceph-mon/.testr.conf b/ceph-mon/.testr.conf deleted file mode 100644 index 801646bb..00000000 --- a/ceph-mon/.testr.conf +++ /dev/null @@ -1,8 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION - -test_id_option=--load-list $IDFILE -test_list_option=--list From ee6511f45550c3be8ae12f16776721daa6439600 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 1 Dec 2017 13:35:27 +0000 Subject: [PATCH 1419/2699] charms.ceph sync for py3 bug fix Change-Id: Id0add7b995e424a5a1c77c80e242fc6ffca54c87 Partial-Bug: #1735720 --- ceph-osd/lib/ceph/utils.py | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 33703f05..76656304 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -944,15 +944,27 @@ def get_partition_list(dev): # For each line of output for partition in partitions: parts = partition.split() - partitions_list.append( - Partition(number=parts[0], - start=parts[1], - end=parts[2], - sectors=parts[3], - size=parts[4], - name=parts[5], - uuid=parts[6]) - ) + try: + partitions_list.append( + Partition(number=parts[0], + start=parts[1], + end=parts[2], + sectors=parts[3], + size=parts[4], + name=parts[5], + uuid=parts[6]) + ) + except IndexError: + partitions_list.append( + Partition(number=parts[0], + start=parts[1], + end=parts[2], + sectors=parts[3], + size=parts[4], + name="", + uuid=parts[5]) + ) + return partitions_list except subprocess.CalledProcessError: raise @@ -2159,7 +2171,7 @@ def dirs_need_ownership_update(service): def pretty_print_upgrade_paths(): """Pretty print supported upgrade paths for ceph""" return ["{} -> {}".format(key, value) - for key, value in UPGRADE_PATHS.iteritems()] + for key, value in UPGRADE_PATHS.items()] def resolve_ceph_version(source): From 9291f64750bf431aee72d2b4ea348908b110a97a Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 1 Dec 2017 13:37:28 +0000 Subject: [PATCH 1420/2699] charms.ceph sync for py3 bug fix Change-Id: Iac68df226b30e570a74f2e4b59d36020e759f42d Partial-Bug: #1735720 --- ceph-mon/lib/ceph/utils.py | 80 +++++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 35 deletions(-) diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 6ab697f0..76656304 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -195,7 +195,7 @@ def save_sysctls(sysctl_dict, save_location): except IOError as e: log("Unable to persist sysctl settings to {}. Error {}".format( - save_location, e.message), level=ERROR) + save_location, e), level=ERROR) raise @@ -221,7 +221,7 @@ def tune_nic(network_interface): save_location=sysctl_file) except IOError as e: log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " - "failed. {}".format(network_interface, e.message), + "failed. {}".format(network_interface, e), level=ERROR) try: @@ -266,7 +266,7 @@ def get_link_speed(network_interface): except IOError as e: log("Unable to open {path} because of error: {error}".format( path=speed_path, - error=e.message), level='error') + error=e), level='error') return LinkSpeed["UNKNOWN"] @@ -286,13 +286,13 @@ def persist_settings(settings_dict): context=settings_dict) except IOError as err: log("Unable to open {path} because of error: {error}".format( - path=HDPARM_FILE, error=err.message), level=ERROR) + path=HDPARM_FILE, error=err), level=ERROR) except Exception as e: # The templating.render can raise a jinja2 exception if the # template is not found. Rather than polluting the import # space of this charm, simply catch Exception log('Unable to render {path} due to error: {error}'.format( - path=HDPARM_FILE, error=e.message), level=ERROR) + path=HDPARM_FILE, error=e), level=ERROR) def set_max_sectors_kb(dev_name, max_sectors_size): @@ -308,7 +308,7 @@ def set_max_sectors_kb(dev_name, max_sectors_size): f.write(max_sectors_size) except IOError as e: log('Failed to write max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e.message), level=ERROR) + max_sectors_kb_path, e), level=ERROR) def get_max_sectors_kb(dev_name): @@ -328,7 +328,7 @@ def get_max_sectors_kb(dev_name): return int(max_sectors_kb) except IOError as e: log('Failed to read max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e.message), level=ERROR) + max_sectors_kb_path, e), level=ERROR) # Bail. return 0 return 0 @@ -350,7 +350,7 @@ def get_max_hw_sectors_kb(dev_name): return int(max_hw_sectors_kb) except IOError as e: log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( - max_hw_sectors_kb_path, e.message), level=ERROR) + max_hw_sectors_kb_path, e), level=ERROR) return 0 return 0 @@ -547,11 +547,11 @@ def get_osd_weight(osd_id): return device['crush_weight'] except ValueError as v: log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v.message)) + tree, v)) raise except subprocess.CalledProcessError as e: log("ceph osd tree command failed with message: {}".format( - e.message)) + e)) raise @@ -591,11 +591,11 @@ def get_osd_tree(service): return crush_list except ValueError as v: log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v.message)) + tree, v)) raise except subprocess.CalledProcessError as e: log("ceph osd tree command failed with message: {}".format( - e.message)) + e)) raise @@ -944,15 +944,27 @@ def get_partition_list(dev): # For each line of output for partition in partitions: parts = partition.split() - partitions_list.append( - Partition(number=parts[0], - start=parts[1], - end=parts[2], - sectors=parts[3], - size=parts[4], - name=parts[5], - uuid=parts[6]) - ) + try: + partitions_list.append( + Partition(number=parts[0], + start=parts[1], + end=parts[2], + sectors=parts[3], + size=parts[4], + name=parts[5], + uuid=parts[6]) + ) + except IndexError: + partitions_list.append( + Partition(number=parts[0], + start=parts[1], + end=parts[2], + sectors=parts[3], + size=parts[4], + name="", + uuid=parts[5]) + ) + return partitions_list except subprocess.CalledProcessError: raise @@ -973,7 +985,7 @@ def is_osd_disk(dev): return True except subprocess.CalledProcessError as e: log("sgdisk inspection of partition {} on {} failed with " - "error: {}. Skipping".format(partition.minor, dev, e.message), + "error: {}. Skipping".format(partition.minor, dev, e), level=ERROR) return False @@ -1682,7 +1694,7 @@ def upgrade_monitor(new_version): apt_update(fatal=True) except subprocess.CalledProcessError as err: log("Adding the ceph source failed with message: {}".format( - err.message)) + err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) try: @@ -1711,7 +1723,7 @@ def upgrade_monitor(new_version): service_start('ceph-mon-all') except subprocess.CalledProcessError as err: log("Stopping ceph and upgrading packages failed " - "with message: {}".format(err.message)) + "with message: {}".format(err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -1895,7 +1907,7 @@ def upgrade_osd(new_version): apt_update(fatal=True) except subprocess.CalledProcessError as err: log("Adding the ceph sources failed with message: {}".format( - err.message)) + err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -1940,7 +1952,7 @@ def upgrade_osd(new_version): except (subprocess.CalledProcessError, IOError) as err: log("Stopping ceph and upgrading packages failed " - "with message: {}".format(err.message)) + "with message: {}".format(err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2159,7 +2171,7 @@ def dirs_need_ownership_update(service): def pretty_print_upgrade_paths(): """Pretty print supported upgrade paths for ceph""" return ["{} -> {}".format(key, value) - for key, value in UPGRADE_PATHS.iteritems()] + for key, value in UPGRADE_PATHS.items()] def resolve_ceph_version(source): @@ -2189,11 +2201,10 @@ def get_ceph_pg_stat(): return json_tree except ValueError as v: log("Unable to parse ceph pg stat json: {}. Error: {}".format( - tree, v.message)) + tree, v)) raise except subprocess.CalledProcessError as e: - log("ceph pg stat command failed with message: {}".format( - e.message)) + log("ceph pg stat command failed with message: {}".format(e)) raise @@ -2217,11 +2228,10 @@ def get_ceph_health(): return json_tree except ValueError as v: log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v.message)) + tree, v)) raise except subprocess.CalledProcessError as e: - log("ceph status command failed with message: {}".format( - e.message)) + log("ceph status command failed with message: {}".format(e)) raise @@ -2247,8 +2257,8 @@ def reweight_osd(osd_num, new_weight): return True return False except subprocess.CalledProcessError as e: - log("ceph osd crush reweight command failed with message: {}".format( - e.message)) + log("ceph osd crush reweight command failed" + " with message: {}".format(e)) raise From 3d43a4fce0d9601120adff79736f5b6dd6822f24 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 4 Dec 2017 15:52:10 +0000 Subject: [PATCH 1421/2699] Fix up exception logging for py3 It used "e.message", which py3 doesn't have. Thus, this uses str(e) which works on both py2 and py3. Change-Id: I34a5de9430ce8fabdf715e83867f0d69336676f1 --- ceph-mon/actions/ceph_ops.py | 6 +++--- ceph-mon/actions/create-cache-tier.py | 6 +++--- ceph-mon/actions/create-erasure-profile.py | 8 ++++---- ceph-mon/actions/create-pool.py | 2 +- ceph-mon/actions/delete-erasure-profile.py | 6 +++--- ceph-mon/actions/list-erasure-profiles.py | 4 ++-- ceph-mon/actions/list-pools.py | 2 +- ceph-mon/actions/pool-get.py | 2 +- ceph-mon/actions/pool-set.py | 4 ++-- ceph-mon/actions/pool-statistics.py | 2 +- ceph-mon/actions/remove-cache-tier.py | 5 ++--- ceph-mon/actions/remove-pool-snapshot.py | 4 ++-- ceph-mon/actions/rename-pool.py | 2 +- ceph-mon/actions/set-pool-max-bytes.py | 2 +- ceph-mon/actions/show-disk-free.py | 3 +-- ceph-mon/actions/snapshot-pool.py | 2 +- ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py | 8 ++++---- 17 files changed, 33 insertions(+), 35 deletions(-) diff --git a/ceph-mon/actions/ceph_ops.py b/ceph-mon/actions/ceph_ops.py index fe88ccbf..d23ad017 100755 --- a/ceph-mon/actions/ceph_ops.py +++ b/ceph-mon/actions/ceph_ops.py @@ -53,7 +53,7 @@ def list_pools(): rados.NoData, rados.NoSpace, rados.PermissionError) as e: - action_fail(e.message) + action_fail(str(e)) def pool_get(): @@ -64,7 +64,7 @@ def pool_get(): .decode('UTF-8')) return value except CalledProcessError as e: - action_fail(e.message) + action_fail(str(e)) def set_pool(): @@ -89,7 +89,7 @@ def pool_stats(): rados.NoData, rados.NoSpace, rados.PermissionError) as e: - action_fail(e.message) + action_fail(str(e)) def delete_pool_snapshot(): diff --git a/ceph-mon/actions/create-cache-tier.py b/ceph-mon/actions/create-cache-tier.py index 09e4c594..0ef212ed 100755 --- a/ceph-mon/actions/create-cache-tier.py +++ b/ceph-mon/actions/create-cache-tier.py @@ -45,10 +45,10 @@ def make_cache_tier(): try: pool.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) except CalledProcessError as err: - log("Add cache tier failed with message: {}".format( - err.message)) + log("Add cache tier failed with message: {}" + .format(str(err))) action_fail("create-cache-tier failed. Add cache tier failed with " - "message: {}".format(err.message)) + "message: {}".format(str(err))) if __name__ == '__main__': diff --git a/ceph-mon/actions/create-erasure-profile.py b/ceph-mon/actions/create-erasure-profile.py index 661f400c..75e43c56 100755 --- a/ceph-mon/actions/create-erasure-profile.py +++ b/ceph-mon/actions/create-erasure-profile.py @@ -46,7 +46,7 @@ def make_erasure_profile(): except CalledProcessError as e: log(e) action_fail("Create erasure profile failed with " - "message: {}".format(e.message)) + "message: {}".format(str(e))) elif plugin == "isa": k = action_get("data-chunks") m = action_get("coding-chunks") @@ -60,7 +60,7 @@ def make_erasure_profile(): except CalledProcessError as e: log(e) action_fail("Create erasure profile failed with " - "message: {}".format(e.message)) + "message: {}".format(str(e))) elif plugin == "local": k = action_get("data-chunks") m = action_get("coding-chunks") @@ -76,7 +76,7 @@ def make_erasure_profile(): except CalledProcessError as e: log(e) action_fail("Create erasure profile failed with " - "message: {}".format(e.message)) + "message: {}".format(str(e))) elif plugin == "shec": k = action_get("data-chunks") m = action_get("coding-chunks") @@ -92,7 +92,7 @@ def make_erasure_profile(): except CalledProcessError as e: log(e) action_fail("Create erasure profile failed with " - "message: {}".format(e.message)) + "message: {}".format(str(e))) else: # Unknown erasure plugin action_fail("Unknown erasure-plugin type of {}. " diff --git a/ceph-mon/actions/create-pool.py b/ceph-mon/actions/create-pool.py index ba1dd260..aa4a27d2 100755 --- a/ceph-mon/actions/create-pool.py +++ b/ceph-mon/actions/create-pool.py @@ -46,7 +46,7 @@ def create_pool(): "is allowed".format(pool_type)) except CalledProcessError as e: action_fail("Pool creation failed because of a failed process. " - "Ret Code: {} Message: {}".format(e.returncode, e.message)) + "Ret Code: {} Message: {}".format(e.returncode, str(e))) if __name__ == '__main__': diff --git a/ceph-mon/actions/delete-erasure-profile.py b/ceph-mon/actions/delete-erasure-profile.py index 0b45563a..17dc2ef5 100755 --- a/ceph-mon/actions/delete-erasure-profile.py +++ b/ceph-mon/actions/delete-erasure-profile.py @@ -31,10 +31,10 @@ def delete_erasure_profile(): try: remove_erasure_profile(service='admin', profile_name=name) except CalledProcessError as e: - log("Remove erasure profile failed with error {}".format(e.message), + log("Remove erasure profile failed with error {}".format(str(e)), level="ERROR") - action_fail("Remove erasure profile failed with error: {}".format( - e.message)) + action_fail("Remove erasure profile failed with error: {}" + .format(str(e))) if __name__ == '__main__': diff --git a/ceph-mon/actions/list-erasure-profiles.py b/ceph-mon/actions/list-erasure-profiles.py index 2d88a44e..c26804ec 100755 --- a/ceph-mon/actions/list-erasure-profiles.py +++ b/ceph-mon/actions/list-erasure-profiles.py @@ -32,5 +32,5 @@ action_set({'message': out}) except CalledProcessError as e: log(e) - action_fail("Listing erasure profiles failed with error: {}".format( - e.message)) + action_fail("Listing erasure profiles failed with error: {}" + .format(str(e))) diff --git a/ceph-mon/actions/list-pools.py b/ceph-mon/actions/list-pools.py index 976c660f..aa4ca745 100755 --- a/ceph-mon/actions/list-pools.py +++ b/ceph-mon/actions/list-pools.py @@ -28,4 +28,4 @@ action_set({'message': out}) except CalledProcessError as e: log(e) - action_fail("List pools failed with error: {}".format(e.message)) + action_fail("List pools failed with error: {}".format(str(e))) diff --git a/ceph-mon/actions/pool-get.py b/ceph-mon/actions/pool-get.py index 7fa8c6b6..c5315818 100755 --- a/ceph-mon/actions/pool-get.py +++ b/ceph-mon/actions/pool-get.py @@ -30,4 +30,4 @@ action_set({'message': out}) except CalledProcessError as e: log(e) - action_fail("Pool get failed with message: {}".format(e.message)) + action_fail("Pool get failed with message: {}".format(str(e))) diff --git a/ceph-mon/actions/pool-set.py b/ceph-mon/actions/pool-set.py index 218814ce..fa743624 100755 --- a/ceph-mon/actions/pool-set.py +++ b/ceph-mon/actions/pool-set.py @@ -34,6 +34,6 @@ try: handle_set_pool_value(service='admin', request=request) except CalledProcessError as e: - log(e.message) + log(str(e)) action_fail("Setting pool key: {} and value: {} failed with " - "message: {}".format(key, value, e.message)) + "message: {}".format(key, value, str(e))) diff --git a/ceph-mon/actions/pool-statistics.py b/ceph-mon/actions/pool-statistics.py index 369b5f0b..30635fb3 100755 --- a/ceph-mon/actions/pool-statistics.py +++ b/ceph-mon/actions/pool-statistics.py @@ -27,4 +27,4 @@ action_set({'message': out}) except CalledProcessError as e: log(e) - action_fail("ceph df failed with message: {}".format(e.message)) + action_fail("ceph df failed with message: {}".format(str(e))) diff --git a/ceph-mon/actions/remove-cache-tier.py b/ceph-mon/actions/remove-cache-tier.py index 2da89388..e0c3444f 100755 --- a/ceph-mon/actions/remove-cache-tier.py +++ b/ceph-mon/actions/remove-cache-tier.py @@ -46,10 +46,9 @@ def delete_cache_tier(): try: pool.remove_cache_tier(cache_pool=cache_pool) except CalledProcessError as err: - log("Removing the cache tier failed with message: {}".format( - err.message)) + log("Removing the cache tier failed with message: {}".format(str(err))) action_fail("remove-cache-tier failed. Removing the cache tier failed " - "with message: {}".format(err.message)) + "with message: {}".format(str(err))) if __name__ == '__main__': diff --git a/ceph-mon/actions/remove-pool-snapshot.py b/ceph-mon/actions/remove-pool-snapshot.py index ad60932e..d535f370 100755 --- a/ceph-mon/actions/remove-pool-snapshot.py +++ b/ceph-mon/actions/remove-pool-snapshot.py @@ -30,5 +30,5 @@ snapshot_name=snapname) except CalledProcessError as e: log(e) - action_fail("Remove pool snapshot failed with message: {}".format( - e.message)) + action_fail("Remove pool snapshot failed with message: {}" + .format(str(e))) diff --git a/ceph-mon/actions/rename-pool.py b/ceph-mon/actions/rename-pool.py index 4e53ce61..2d769c1d 100755 --- a/ceph-mon/actions/rename-pool.py +++ b/ceph-mon/actions/rename-pool.py @@ -28,4 +28,4 @@ rename_pool(service='admin', old_name=name, new_name=new_name) except CalledProcessError as e: log(e) - action_fail("Renaming pool failed with message: {}".format(e.message)) + action_fail("Renaming pool failed with message: {}".format(str(e))) diff --git a/ceph-mon/actions/set-pool-max-bytes.py b/ceph-mon/actions/set-pool-max-bytes.py index 5d0098da..2d549923 100755 --- a/ceph-mon/actions/set-pool-max-bytes.py +++ b/ceph-mon/actions/set-pool-max-bytes.py @@ -28,4 +28,4 @@ set_pool_quota(service='admin', pool_name=name, max_bytes=max_bytes) except CalledProcessError as e: log(e) - action_fail("Set pool quota failed with message: {}".format(e.message)) + action_fail("Set pool quota failed with message: {}".format(str(e))) diff --git a/ceph-mon/actions/show-disk-free.py b/ceph-mon/actions/show-disk-free.py index ca2a629b..2ba7894f 100755 --- a/ceph-mon/actions/show-disk-free.py +++ b/ceph-mon/actions/show-disk-free.py @@ -28,5 +28,4 @@ except CalledProcessError as e: log(e) action_fail( - "ceph osd df tree failed with message: {}".format(e.message) - ) + "ceph osd df tree failed with message: {}".format(str(e))) diff --git a/ceph-mon/actions/snapshot-pool.py b/ceph-mon/actions/snapshot-pool.py index 4d071cdd..fdd007ce 100755 --- a/ceph-mon/actions/snapshot-pool.py +++ b/ceph-mon/actions/snapshot-pool.py @@ -30,4 +30,4 @@ snapshot_name=snapname) except CalledProcessError as e: log(e) - action_fail("Snapshot pool failed with message: {}".format(e.message)) + action_fail("Snapshot pool failed with message: {}".format(str(e))) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 39231612..0d9bacfd 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -377,12 +377,12 @@ def get_mon_map(service): try: return json.loads(mon_status) except ValueError as v: - log("Unable to parse mon_status json: {}. Error: {}".format( - mon_status, v.message)) + log("Unable to parse mon_status json: {}. Error: {}" + .format(mon_status, str(v))) raise except CalledProcessError as e: - log("mon_status command failed with message: {}".format( - e.message)) + log("mon_status command failed with message: {}" + .format(str(e))) raise From 1b9a1f7a4111a009740188db98477c00f930633f Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 5 Dec 2017 05:27:00 +0000 Subject: [PATCH 1422/2699] Add Bionic and remove Zesty series and tests Bionic, being the next LTS, is important to enable for dev and test as early as possible ahead of 18.02. Zesty goes EOL in Jan 2018. The next stable charms release (18.02) will not provide Zesty series support, as it was an interim (non-LTS) release. Change-Id: Id066e4444b5eec17229f1731ccfb725a423b1e13 --- ceph-fs/src/metadata.yaml | 2 +- ...ic-zesty-ocata => dev-basic-bionic-queens} | 4 +-- ceph-fs/src/tests/dev-basic-xenial-queens | 25 +++++++++++++++++++ 3 files changed, 28 insertions(+), 3 deletions(-) rename ceph-fs/src/tests/{gate-basic-zesty-ocata => dev-basic-bionic-queens} (85%) create mode 100755 ceph-fs/src/tests/dev-basic-xenial-queens diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 8d1b80d0..6f0f9cf0 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -11,8 +11,8 @@ tags: - misc series: - xenial + - bionic - artful - - zesty subordinate: false requires: ceph-mds: diff --git a/ceph-fs/src/tests/gate-basic-zesty-ocata b/ceph-fs/src/tests/dev-basic-bionic-queens similarity index 85% rename from ceph-fs/src/tests/gate-basic-zesty-ocata rename to ceph-fs/src/tests/dev-basic-bionic-queens index dce0829c..f48d42fd 100755 --- a/ceph-fs/src/tests/gate-basic-zesty-ocata +++ b/ceph-fs/src/tests/dev-basic-bionic-queens @@ -14,10 +14,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Amulet tests on a basic ceph deployment on zesty-ocata.""" +"""Amulet tests on a basic ceph deployment on bionic-queens.""" from basic_deployment import CephFsBasicDeployment if __name__ == '__main__': - deployment = CephFsBasicDeployment(series='zesty') + deployment = CephFsBasicDeployment(series='bionic') deployment.run_tests() diff --git a/ceph-fs/src/tests/dev-basic-xenial-queens b/ceph-fs/src/tests/dev-basic-xenial-queens new file mode 100755 index 00000000..2d0dd71a --- /dev/null +++ b/ceph-fs/src/tests/dev-basic-xenial-queens @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on xenial-queens.""" + +from basic_deployment import CephFsBasicDeployment + +if __name__ == '__main__': + deployment = CephFsBasicDeployment(series='xenial', + openstack='cloud:xenial-queens', + source='cloud:xenial-updates/queens') + deployment.run_tests() From 2d478b5765914a0e503cf5ff283ed3feb00e77e8 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 5 Dec 2017 05:27:06 +0000 Subject: [PATCH 1423/2699] Add Bionic and remove Zesty series and tests Bionic, being the next LTS, is important to enable for dev and test as early as possible ahead of 18.02. Zesty goes EOL in Jan 2018. The next stable charms release (18.02) will not provide Zesty series support, as it was an interim (non-LTS) release. Change-Id: Id5bb7415e930772d4bca124a3c09bb5e3bafa009 --- ceph-mon/metadata.yaml | 2 +- ...ic-zesty-ocata => dev-basic-bionic-queens} | 4 +-- ceph-mon/tests/dev-basic-xenial-queens | 25 +++++++++++++++++++ 3 files changed, 28 insertions(+), 3 deletions(-) rename ceph-mon/tests/{gate-basic-zesty-ocata => dev-basic-bionic-queens} (85%) create mode 100755 ceph-mon/tests/dev-basic-xenial-queens diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index af5fd794..f4a233ef 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -11,8 +11,8 @@ tags: - misc series: - xenial + - bionic - artful - - zesty - trusty peers: mon: diff --git a/ceph-mon/tests/gate-basic-zesty-ocata b/ceph-mon/tests/dev-basic-bionic-queens similarity index 85% rename from ceph-mon/tests/gate-basic-zesty-ocata rename to ceph-mon/tests/dev-basic-bionic-queens index a6421933..e531990d 100755 --- a/ceph-mon/tests/gate-basic-zesty-ocata +++ b/ceph-mon/tests/dev-basic-bionic-queens @@ -14,10 +14,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Amulet tests on a basic ceph deployment on zesty-ocata.""" +"""Amulet tests on a basic ceph deployment on bionic-queens.""" from basic_deployment import CephBasicDeployment if __name__ == '__main__': - deployment = CephBasicDeployment(series='zesty') + deployment = CephBasicDeployment(series='bionic') deployment.run_tests() diff --git a/ceph-mon/tests/dev-basic-xenial-queens b/ceph-mon/tests/dev-basic-xenial-queens new file mode 100755 index 00000000..5fa16a57 --- /dev/null +++ b/ceph-mon/tests/dev-basic-xenial-queens @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on xenial-queens.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='xenial', + openstack='cloud:xenial-queens', + source='cloud:xenial-updates/queens') + deployment.run_tests() From d06427580badd09e168d3f17555424e58972b2a0 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 5 Dec 2017 05:27:13 +0000 Subject: [PATCH 1424/2699] Add Bionic and remove Zesty series and tests Bionic, being the next LTS, is important to enable for dev and test as early as possible ahead of 18.02. Zesty goes EOL in Jan 2018. The next stable charms release (18.02) will not provide Zesty series support, as it was an interim (non-LTS) release. Change-Id: If8d82e92a6768af127e68d18c65dfbc3b97f49f9 --- ceph-osd/metadata.yaml | 2 +- ...ic-zesty-ocata => dev-basic-bionic-queens} | 4 +-- ceph-osd/tests/dev-basic-xenial-queens | 25 +++++++++++++++++++ 3 files changed, 28 insertions(+), 3 deletions(-) rename ceph-osd/tests/{gate-basic-zesty-ocata => dev-basic-bionic-queens} (84%) create mode 100755 ceph-osd/tests/dev-basic-xenial-queens diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index c709dbbb..1931cd06 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -12,8 +12,8 @@ tags: - misc series: - xenial + - bionic - artful - - zesty - trusty description: | Ceph is a distributed storage and network file system designed to provide diff --git a/ceph-osd/tests/gate-basic-zesty-ocata b/ceph-osd/tests/dev-basic-bionic-queens similarity index 84% rename from ceph-osd/tests/gate-basic-zesty-ocata rename to ceph-osd/tests/dev-basic-bionic-queens index d2d61e1b..0dafe812 100755 --- a/ceph-osd/tests/gate-basic-zesty-ocata +++ b/ceph-osd/tests/dev-basic-bionic-queens @@ -14,10 +14,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Amulet tests on a basic ceph-osd deployment on zesty-ocata.""" +"""Amulet tests on a basic ceph-osd deployment on bionic-queens.""" from basic_deployment import CephOsdBasicDeployment if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='zesty') + deployment = CephOsdBasicDeployment(series='bionic') deployment.run_tests() diff --git a/ceph-osd/tests/dev-basic-xenial-queens b/ceph-osd/tests/dev-basic-xenial-queens new file mode 100755 index 00000000..61c7b06d --- /dev/null +++ b/ceph-osd/tests/dev-basic-xenial-queens @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph-osd deployment on xenial-queens.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='xenial', + openstack='cloud:xenial-queens', + source='cloud:xenial-updates/queens') + deployment.run_tests() From c260463359ebacaad5bb9ea97fa6307760d82734 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 5 Dec 2017 05:27:19 +0000 Subject: [PATCH 1425/2699] Add Bionic and remove Zesty series and tests Bionic, being the next LTS, is important to enable for dev and test as early as possible ahead of 18.02. Zesty goes EOL in Jan 2018. The next stable charms release (18.02) will not provide Zesty series support, as it was an interim (non-LTS) release. Change-Id: Ia0ea3d0ce22856c9a705417f258b6d0484bb0882 --- ceph-proxy/metadata.yaml | 2 +- ...gate-basic-zesty-ocata => dev-basic-bionic-queens} | 4 ++-- ceph-proxy/tests/dev-basic-xenial-queens | 11 +++++++++++ 3 files changed, 14 insertions(+), 3 deletions(-) rename ceph-proxy/tests/{gate-basic-zesty-ocata => dev-basic-bionic-queens} (52%) create mode 100755 ceph-proxy/tests/dev-basic-xenial-queens diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index bd159d09..f6a23424 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -11,8 +11,8 @@ tags: - misc series: - xenial + - bionic - artful - - zesty - trusty extra-bindings: public: diff --git a/ceph-proxy/tests/gate-basic-zesty-ocata b/ceph-proxy/tests/dev-basic-bionic-queens similarity index 52% rename from ceph-proxy/tests/gate-basic-zesty-ocata rename to ceph-proxy/tests/dev-basic-bionic-queens index 4d540576..7179b93b 100755 --- a/ceph-proxy/tests/gate-basic-zesty-ocata +++ b/ceph-proxy/tests/dev-basic-bionic-queens @@ -1,9 +1,9 @@ #!/usr/bin/env python -"""Amulet tests on a basic ceph deployment on zesty-ocata.""" +"""Amulet tests on a basic ceph deployment on bionic-queens.""" from basic_deployment import CephBasicDeployment if __name__ == '__main__': - deployment = CephBasicDeployment(series='zesty') + deployment = CephBasicDeployment(series='bionic') deployment.run_tests() diff --git a/ceph-proxy/tests/dev-basic-xenial-queens b/ceph-proxy/tests/dev-basic-xenial-queens new file mode 100755 index 00000000..829ce932 --- /dev/null +++ b/ceph-proxy/tests/dev-basic-xenial-queens @@ -0,0 +1,11 @@ +#!/usr/bin/env python + +"""Amulet tests on a basic ceph deployment on xenial-queens.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='xenial', + openstack='cloud:xenial-queens', + source='cloud:xenial-updates/queens') + deployment.run_tests() From 45041712e6fe64fb510e5489370d9aa505a79e36 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 5 Dec 2017 05:27:26 +0000 Subject: [PATCH 1426/2699] Add Bionic and remove Zesty series and tests Bionic, being the next LTS, is important to enable for dev and test as early as possible ahead of 18.02. Zesty goes EOL in Jan 2018. The next stable charms release (18.02) will not provide Zesty series support, as it was an interim (non-LTS) release. Change-Id: I5585cb843ddf5d941c618829004bce3cf0bb1a82 --- ceph-radosgw/metadata.yaml | 2 +- ...ic-zesty-ocata => dev-basic-bionic-queens} | 4 +-- ceph-radosgw/tests/dev-basic-xenial-queens | 25 +++++++++++++++++++ 3 files changed, 28 insertions(+), 3 deletions(-) rename ceph-radosgw/tests/{gate-basic-zesty-ocata => dev-basic-bionic-queens} (84%) create mode 100755 ceph-radosgw/tests/dev-basic-xenial-queens diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 95356f1c..22d923fb 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -14,8 +14,8 @@ tags: - misc series: - xenial + - bionic - artful - - zesty - trusty extra-bindings: public: diff --git a/ceph-radosgw/tests/gate-basic-zesty-ocata b/ceph-radosgw/tests/dev-basic-bionic-queens similarity index 84% rename from ceph-radosgw/tests/gate-basic-zesty-ocata rename to ceph-radosgw/tests/dev-basic-bionic-queens index fd101f68..a1246845 100755 --- a/ceph-radosgw/tests/gate-basic-zesty-ocata +++ b/ceph-radosgw/tests/dev-basic-bionic-queens @@ -14,10 +14,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Amulet tests on a basic ceph-radosgw deployment on zesty-ocata.""" +"""Amulet tests on a basic ceph-radosgw deployment on bionic-queens.""" from basic_deployment import CephRadosGwBasicDeployment if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='zesty') + deployment = CephRadosGwBasicDeployment(series='bionic') deployment.run_tests() diff --git a/ceph-radosgw/tests/dev-basic-xenial-queens b/ceph-radosgw/tests/dev-basic-xenial-queens new file mode 100755 index 00000000..fff90006 --- /dev/null +++ b/ceph-radosgw/tests/dev-basic-xenial-queens @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph-radosgw deployment on xenial-queens.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='xenial', + openstack='cloud:xenial-queens', + source='cloud:xenial-updates/queens') + deployment.run_tests() From 599ed240ddbc37caefe4d3a5a5a70db2ea7a05b2 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 5 Dec 2017 18:55:28 +0000 Subject: [PATCH 1427/2699] Py3 Fixes Charm-helpers sync for python3 fixes. Update Exception.message to use str(Exception) Change-Id: I0e354d0d07f220e3285eb3e863498918c82df2ca --- ceph-osd/actions/replace_osd.py | 6 +++--- ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py | 2 ++ ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py | 8 ++++---- ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py | 1 + 5 files changed, 11 insertions(+), 7 deletions(-) diff --git a/ceph-osd/actions/replace_osd.py b/ceph-osd/actions/replace_osd.py index 5705db03..297ec177 100755 --- a/ceph-osd/actions/replace_osd.py +++ b/ceph-osd/actions/replace_osd.py @@ -37,10 +37,10 @@ def get_disk_stats(): return diskstats.readlines() except IOError as err: hookenv.log('Could not open /proc/diskstats. Error: {}' - .format(err.message)) + .format(str(err))) hookenv.action_fail( 'replace-osd failed because /proc/diskstats could not ' - 'be opened {}'.format(err.message)) + 'be opened {}'.format(str(err))) return None @@ -65,7 +65,7 @@ def lookup_device_name(major_number, minor_number): return parts[2] except ValueError as value_err: hookenv.log('Could not convert {} or {} into an integer. Error: {}' - .format(parts[0], parts[1], value_err.message)) + .format(parts[0], parts[1], str(value_err))) continue return None diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 8a541d40..9e5af342 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -392,6 +392,8 @@ def get_swift_codename(version): releases = UBUNTU_OPENSTACK_RELEASE release = [k for k, v in six.iteritems(releases) if codename in v] ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) + if six.PY3: + ret = ret.decode('UTF-8') if codename in ret or release[0] in ret: return codename elif len(codenames) == 1: diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 39231612..0d9bacfd 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -377,12 +377,12 @@ def get_mon_map(service): try: return json.loads(mon_status) except ValueError as v: - log("Unable to parse mon_status json: {}. Error: {}".format( - mon_status, v.message)) + log("Unable to parse mon_status json: {}. Error: {}" + .format(mon_status, str(v))) raise except CalledProcessError as e: - log("mon_status command failed with message: {}".format( - e.message)) + log("mon_status command failed with message: {}" + .format(str(e))) raise diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index d8dc378a..99451b59 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -20,6 +20,7 @@ 'yakkety', 'zesty', 'artful', + 'bionic', ) diff --git a/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py index d8dc378a..99451b59 100644 --- a/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py @@ -20,6 +20,7 @@ 'yakkety', 'zesty', 'artful', + 'bionic', ) From 8f994aa11a29d768120548f11b021be835b46057 Mon Sep 17 00:00:00 2001 From: Andrew McLeod Date: Tue, 21 Nov 2017 12:23:36 +1300 Subject: [PATCH 1428/2699] Enable xenial-pike amulet test Make default func27-smoke xenial-pike Charm-helpers sync Review and merge https://github.com/juju/charm-helpers/pull/69 first. Change-Id: I7a810cc743d242c652959ef87ea5b5f371c480a1 --- ceph-mon/tests/basic_deployment.py | 9 ++++++--- .../tests/charmhelpers/contrib/openstack/amulet/utils.py | 9 ++++++--- ceph-mon/tests/gate-basic-xenial-pike | 0 ceph-mon/tox.ini | 2 +- 4 files changed, 13 insertions(+), 7 deletions(-) mode change 100644 => 100755 ceph-mon/tests/gate-basic-xenial-pike diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index caa92197..67091164 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -619,9 +619,12 @@ def test_410_ceph_cinder_vol_create(self): if ret: amulet.raise_status(amulet.FAIL, msg=ret) - # Validate ceph cinder pool disk space usage samples over time - ret = u.validate_ceph_pool_samples(pool_size_samples, - "cinder pool disk usage") + # Luminous (pike) ceph seems more efficient at disk usage so we cannot + # grantee the ordering of kb_used + if self._get_openstack_release() < self.xenial_pike: + # Validate ceph cinder pool disk space usage samples over time + ret = u.validate_ceph_pool_samples(pool_size_samples, + "cinder pool disk usage") if ret: amulet.raise_status(amulet.FAIL, msg=ret) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index b71b2b19..87f364d1 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -858,9 +858,12 @@ def get_ceph_pool_sample(self, sentry_unit, pool_id=0): :returns: List of pool name, object count, kb disk space used """ df = self.get_ceph_df(sentry_unit) - pool_name = df['pools'][pool_id]['name'] - obj_count = df['pools'][pool_id]['stats']['objects'] - kb_used = df['pools'][pool_id]['stats']['kb_used'] + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' '{} kb used'.format(pool_name, pool_id, obj_count, kb_used)) diff --git a/ceph-mon/tests/gate-basic-xenial-pike b/ceph-mon/tests/gate-basic-xenial-pike old mode 100644 new mode 100755 diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index a8188146..b8559a91 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -68,7 +68,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy [testenv:func27-dfs] # Charm Functional Test From 9c4a7ab69008a92bb3c157067aa4fca69e7b01d8 Mon Sep 17 00:00:00 2001 From: Andrew McLeod Date: Tue, 21 Nov 2017 12:30:16 +1300 Subject: [PATCH 1429/2699] Enable xenial-pike amulet test Make default func27-smoke xenial-pike Charm-helpers sync Change-Id: I321befa86209f83494ed660fae6212c88dbd358b --- ceph-radosgw/tests/gate-basic-xenial-pike | 0 ceph-radosgw/tox.ini | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) mode change 100644 => 100755 ceph-radosgw/tests/gate-basic-xenial-pike diff --git a/ceph-radosgw/tests/gate-basic-xenial-pike b/ceph-radosgw/tests/gate-basic-xenial-pike old mode 100644 new mode 100755 diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 7c2936e3..6d44f4b9 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -60,7 +60,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy [testenv:func27-dfs] # Charm Functional Test From 6b45906655e584f633d074ca47ffbb0718cf4ad7 Mon Sep 17 00:00:00 2001 From: Andrew McLeod Date: Tue, 21 Nov 2017 12:26:09 +1300 Subject: [PATCH 1430/2699] Enable xenial-pike amulet test Make default func27-smoke xenial-pike Charm-helpers sync Review and merge https://github.com/juju/charm-helpers/pull/69 first. Change-Id: I32a04dfc995929840f8c544e1d5ed58066515210 --- ceph-osd/tests/basic_deployment.py | 9 ++++++--- .../tests/charmhelpers/contrib/openstack/amulet/utils.py | 9 ++++++--- ceph-osd/tests/gate-basic-xenial-pike | 0 ceph-osd/tox.ini | 2 +- 4 files changed, 13 insertions(+), 7 deletions(-) mode change 100644 => 100755 ceph-osd/tests/gate-basic-xenial-pike diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index bb23dc68..8850da5e 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -508,9 +508,12 @@ def test_410_ceph_cinder_vol_create(self): if ret: amulet.raise_status(amulet.FAIL, msg=ret) - # Validate ceph cinder pool disk space usage samples over time - ret = u.validate_ceph_pool_samples(pool_size_samples, - "cinder pool disk usage") + # Luminous (pike) ceph seems more efficient at disk usage so we cannot + # grantee the ordering of kb_used + if self._get_openstack_release() < self.xenial_mitaka: + # Validate ceph cinder pool disk space usage samples over time + ret = u.validate_ceph_pool_samples(pool_size_samples, + "cinder pool disk usage") if ret: amulet.raise_status(amulet.FAIL, msg=ret) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index b71b2b19..87f364d1 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -858,9 +858,12 @@ def get_ceph_pool_sample(self, sentry_unit, pool_id=0): :returns: List of pool name, object count, kb disk space used """ df = self.get_ceph_df(sentry_unit) - pool_name = df['pools'][pool_id]['name'] - obj_count = df['pools'][pool_id]['stats']['objects'] - kb_used = df['pools'][pool_id]['stats']['kb_used'] + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' '{} kb used'.format(pool_name, pool_id, obj_count, kb_used)) diff --git a/ceph-osd/tests/gate-basic-xenial-pike b/ceph-osd/tests/gate-basic-xenial-pike old mode 100644 new mode 100755 diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index a8188146..b8559a91 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -68,7 +68,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy [testenv:func27-dfs] # Charm Functional Test From 672fba187694085d9e65bca8b75e9ba395f74da7 Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 8 Dec 2017 15:31:24 -0800 Subject: [PATCH 1431/2699] Make xenial-pike the default smoke test Change-Id: I4b500716f08d2041044a204951750887e34fb87a --- ceph-proxy/tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 7c2936e3..6d44f4b9 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -60,7 +60,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy [testenv:func27-dfs] # Charm Functional Test From 145e1b40bcfca1fb188b41555a920f609e900057 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 11 Dec 2017 17:11:06 +0100 Subject: [PATCH 1432/2699] Sync to add in Bionic support Change-Id: Ie1df1653085da0ecea841e99469216aeec2ae391 --- .../contrib/openstack/amulet/utils.py | 9 ++-- .../charmhelpers/contrib/openstack/context.py | 14 +++++- .../charmhelpers/contrib/openstack/utils.py | 2 + .../charmhelpers/contrib/storage/linux/lvm.py | 50 +++++++++++++++++++ .../charmhelpers/core/host_factory/ubuntu.py | 1 + .../charmhelpers/core/host_factory/ubuntu.py | 1 + 6 files changed, 73 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index b71b2b19..87f364d1 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -858,9 +858,12 @@ def get_ceph_pool_sample(self, sentry_unit, pool_id=0): :returns: List of pool name, object count, kb disk space used """ df = self.get_ceph_df(sentry_unit) - pool_name = df['pools'][pool_id]['name'] - obj_count = df['pools'][pool_id]['stats']['objects'] - kb_used = df['pools'][pool_id]['stats']['kb_used'] + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' '{} kb used'.format(pool_name, pool_id, obj_count, kb_used)) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index e6c0e9fe..70850c1b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -101,6 +101,8 @@ git_determine_python_path, enable_memcache, snap_install_requested, + CompareOpenStackReleases, + os_release, ) from charmhelpers.core.unitdata import kv @@ -1566,8 +1568,18 @@ class InternalEndpointContext(OSContextGenerator): endpoints by default so this allows admins to optionally use internal endpoints. """ + def __init__(self, ost_rel_check_pkg_name): + self.ost_rel_check_pkg_name = ost_rel_check_pkg_name + def __call__(self): - return {'use_internal_endpoints': config('use-internal-endpoints')} + ctxt = {'use_internal_endpoints': config('use-internal-endpoints')} + rel = os_release(self.ost_rel_check_pkg_name, base='icehouse') + if CompareOpenStackReleases(rel) >= 'pike': + ctxt['volume_api_version'] = '3' + else: + ctxt['volume_api_version'] = '2' + + return ctxt class AppArmorContext(OSContextGenerator): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 8a541d40..9e5af342 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -392,6 +392,8 @@ def get_swift_codename(version): releases = UBUNTU_OPENSTACK_RELEASE release = [k for k, v in six.iteritems(releases) if codename in v] ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) + if six.PY3: + ret = ret.decode('UTF-8') if codename in ret or release[0] in ret: return codename elif len(codenames) == 1: diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py index 7f2a0604..79a7a245 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import functools from subprocess import ( CalledProcessError, check_call, @@ -101,3 +102,52 @@ def create_lvm_volume_group(volume_group, block_device): :block_device: str: Full path of PV-initialized block device. ''' check_call(['vgcreate', volume_group, block_device]) + + +def list_logical_volumes(select_criteria=None, path_mode=False): + ''' + List logical volumes + + :param select_criteria: str: Limit list to those volumes matching this + criteria (see 'lvs -S help' for more details) + :param path_mode: bool: return logical volume name in 'vg/lv' format, this + format is required for some commands like lvextend + :returns: [str]: List of logical volumes + ''' + lv_diplay_attr = 'lv_name' + if path_mode: + # Parsing output logic relies on the column order + lv_diplay_attr = 'vg_name,' + lv_diplay_attr + cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings'] + if select_criteria: + cmd.extend(['--select', select_criteria]) + lvs = [] + for lv in check_output(cmd).decode('UTF-8').splitlines(): + if not lv: + continue + if path_mode: + lvs.append('/'.join(lv.strip().split())) + else: + lvs.append(lv.strip()) + return lvs + + +list_thin_logical_volume_pools = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^t') + +list_thin_logical_volumes = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^V') + + +def extend_logical_volume_by_device(lv_name, block_device): + ''' + Extends the size of logical volume lv_name by the amount of free space on + physical volume block_device. + + :param lv_name: str: name of logical volume to be extended (vg/lv format) + :param block_device: str: name of block_device to be allocated to lv_name + ''' + cmd = ['lvextend', lv_name, block_device] + check_call(cmd) diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py index d8dc378a..99451b59 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -20,6 +20,7 @@ 'yakkety', 'zesty', 'artful', + 'bionic', ) diff --git a/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py index d8dc378a..99451b59 100644 --- a/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py @@ -20,6 +20,7 @@ 'yakkety', 'zesty', 'artful', + 'bionic', ) From 79f5c5cda1d9ce1b0688ae506167aeddfce793a1 Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 8 Dec 2017 15:32:42 -0800 Subject: [PATCH 1433/2699] Make xenial-pike the default smoke test Change-Id: Ib6a8db01d826ea604477cfa0963a4bd56aea198a --- ceph-fs/src/tests/gate-basic-xenial-pike | 25 ++++++++++++++++++++++++ ceph-fs/src/tox.ini | 2 +- 2 files changed, 26 insertions(+), 1 deletion(-) create mode 100755 ceph-fs/src/tests/gate-basic-xenial-pike diff --git a/ceph-fs/src/tests/gate-basic-xenial-pike b/ceph-fs/src/tests/gate-basic-xenial-pike new file mode 100755 index 00000000..9af16420 --- /dev/null +++ b/ceph-fs/src/tests/gate-basic-xenial-pike @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on xenial-pike.""" + +from basic_deployment import CephFsBasicDeployment + +if __name__ == '__main__': + deployment = CephFsBasicDeployment(series='xenial', + openstack='cloud:xenial-pike', + source='cloud:xenial-updates/pike') + deployment.run_tests() diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index 276db2fc..f201a203 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -35,7 +35,7 @@ commands = # Run a specific test as an Amulet smoke test (expected to always pass) basepython = python2.7 commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy [testenv:func27-dfs] # Run all deploy-from-source tests which are +x (may not always pass!) From 6527d352598c921e97cfaf02e46625904a53121e Mon Sep 17 00:00:00 2001 From: David Ames Date: Mon, 11 Dec 2017 11:36:27 -0800 Subject: [PATCH 1434/2699] Update HAProxy default timeout values The default HAProxy timeout values are fairly strict. On a busy cloud it is common to exceed one or more of these timeouts. The only indication that HAProxy has exceeded a timeout and dropped the connection is errors such as "BadStatusLine" or "EOF." These can be very difficult to diagnose when intermittent. This charm-helpers sync pulls in the change to update the default timeout values to more real world settings. These values have been extensively tested in ServerStack. Configured values will not be overridden. Partial Bug: #1736171 Change-Id: I312dd56ecf55ad67485305e57f2807a5ea6975cd --- ceph-radosgw/config.yaml | 16 +++--- .../contrib/openstack/amulet/deployment.py | 12 +++-- .../contrib/openstack/amulet/utils.py | 9 ++-- .../charmhelpers/contrib/openstack/context.py | 14 +++++- .../contrib/openstack/templates/haproxy.cfg | 8 +-- .../charmhelpers/contrib/openstack/utils.py | 2 + .../contrib/storage/linux/ceph.py | 8 +-- .../charmhelpers/contrib/storage/linux/lvm.py | 50 +++++++++++++++++++ ceph-radosgw/hooks/charmhelpers/core/host.py | 2 + .../charmhelpers/core/host_factory/ubuntu.py | 1 + .../contrib/openstack/amulet/deployment.py | 12 +++-- .../contrib/openstack/amulet/utils.py | 9 ++-- ceph-radosgw/tests/charmhelpers/core/host.py | 2 + .../charmhelpers/core/host_factory/ubuntu.py | 1 + 14 files changed, 115 insertions(+), 31 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index b2d3cd5d..02e4b8f1 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -176,25 +176,25 @@ options: default: description: | Server timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 30000ms is used. + configurations. If not provided, default value of 90000ms is used. haproxy-client-timeout: type: int default: description: | Client timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 30000ms is used. + configurations. If not provided, default value of 90000ms is used. haproxy-queue-timeout: type: int default: description: | Queue timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 5000ms is used. + configurations. If not provided, default value of 9000ms is used. haproxy-connect-timeout: type: int default: description: | Connect timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 5000ms is used. + configurations. If not provided, default value of 9000ms is used. # Network config (by default all access is over 'private-address') os-admin-network: type: string @@ -281,25 +281,25 @@ options: default: description: | Server timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 30000ms is used. + configurations. If not provided, default value of 90000ms is used. haproxy-client-timeout: type: int default: description: | Client timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 30000ms is used. + configurations. If not provided, default value of 90000ms is used. haproxy-queue-timeout: type: int default: description: | Queue timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 5000ms is used. + configurations. If not provided, default value of 9000ms is used. haproxy-connect-timeout: type: int default: description: | Connect timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 5000ms is used. + configurations. If not provided, default value of 9000ms is used. # External SSL Parameters ssl_cert: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index e37f2834..5afbbd87 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +import os import re import sys import six @@ -185,7 +186,7 @@ def _configure_services(self, configs): self.d.configure(service, config) def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=1800): + include_only=None, timeout=None): """Wait for all units to have a specific extended status, except for any defined as excluded. Unless specified via message, any status containing any case of 'ready' will be considered a match. @@ -215,7 +216,10 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, :param timeout: Maximum time in seconds to wait for status match :returns: None. Raises if timeout is hit. """ - self.log.info('Waiting for extended status on units...') + if not timeout: + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) + self.log.info('Waiting for extended status on units for {}s...' + ''.format(timeout)) all_services = self.d.services.keys() @@ -252,9 +256,9 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, service_messages = {service: message for service in services} # Check for idleness - self.d.sentry.wait() + self.d.sentry.wait(timeout=timeout) # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services) + self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) # Check for ready messages self.d.sentry.wait_for_messages(service_messages, timeout=timeout) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index b71b2b19..87f364d1 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -858,9 +858,12 @@ def get_ceph_pool_sample(self, sentry_unit, pool_id=0): :returns: List of pool name, object count, kb disk space used """ df = self.get_ceph_df(sentry_unit) - pool_name = df['pools'][pool_id]['name'] - obj_count = df['pools'][pool_id]['stats']['objects'] - kb_used = df['pools'][pool_id]['stats']['kb_used'] + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' '{} kb used'.format(pool_name, pool_id, obj_count, kb_used)) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index e6c0e9fe..70850c1b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -101,6 +101,8 @@ git_determine_python_path, enable_memcache, snap_install_requested, + CompareOpenStackReleases, + os_release, ) from charmhelpers.core.unitdata import kv @@ -1566,8 +1568,18 @@ class InternalEndpointContext(OSContextGenerator): endpoints by default so this allows admins to optionally use internal endpoints. """ + def __init__(self, ost_rel_check_pkg_name): + self.ost_rel_check_pkg_name = ost_rel_check_pkg_name + def __call__(self): - return {'use_internal_endpoints': config('use-internal-endpoints')} + ctxt = {'use_internal_endpoints': config('use-internal-endpoints')} + rel = os_release(self.ost_rel_check_pkg_name, base='icehouse') + if CompareOpenStackReleases(rel) >= 'pike': + ctxt['volume_api_version'] = '3' + else: + ctxt['volume_api_version'] = '2' + + return ctxt class AppArmorContext(OSContextGenerator): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index ebc8a68a..d36af2aa 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -17,22 +17,22 @@ defaults {%- if haproxy_queue_timeout %} timeout queue {{ haproxy_queue_timeout }} {%- else %} - timeout queue 5000 + timeout queue 9000 {%- endif %} {%- if haproxy_connect_timeout %} timeout connect {{ haproxy_connect_timeout }} {%- else %} - timeout connect 5000 + timeout connect 9000 {%- endif %} {%- if haproxy_client_timeout %} timeout client {{ haproxy_client_timeout }} {%- else %} - timeout client 30000 + timeout client 90000 {%- endif %} {%- if haproxy_server_timeout %} timeout server {{ haproxy_server_timeout }} {%- else %} - timeout server 30000 + timeout server 90000 {%- endif %} listen stats diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 8a541d40..9e5af342 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -392,6 +392,8 @@ def get_swift_codename(version): releases = UBUNTU_OPENSTACK_RELEASE release = [k for k, v in six.iteritems(releases) if codename in v] ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) + if six.PY3: + ret = ret.decode('UTF-8') if codename in ret or release[0] in ret: return codename elif len(codenames) == 1: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 39231612..0d9bacfd 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -377,12 +377,12 @@ def get_mon_map(service): try: return json.loads(mon_status) except ValueError as v: - log("Unable to parse mon_status json: {}. Error: {}".format( - mon_status, v.message)) + log("Unable to parse mon_status json: {}. Error: {}" + .format(mon_status, str(v))) raise except CalledProcessError as e: - log("mon_status command failed with message: {}".format( - e.message)) + log("mon_status command failed with message: {}" + .format(str(e))) raise diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py index 7f2a0604..79a7a245 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import functools from subprocess import ( CalledProcessError, check_call, @@ -101,3 +102,52 @@ def create_lvm_volume_group(volume_group, block_device): :block_device: str: Full path of PV-initialized block device. ''' check_call(['vgcreate', volume_group, block_device]) + + +def list_logical_volumes(select_criteria=None, path_mode=False): + ''' + List logical volumes + + :param select_criteria: str: Limit list to those volumes matching this + criteria (see 'lvs -S help' for more details) + :param path_mode: bool: return logical volume name in 'vg/lv' format, this + format is required for some commands like lvextend + :returns: [str]: List of logical volumes + ''' + lv_diplay_attr = 'lv_name' + if path_mode: + # Parsing output logic relies on the column order + lv_diplay_attr = 'vg_name,' + lv_diplay_attr + cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings'] + if select_criteria: + cmd.extend(['--select', select_criteria]) + lvs = [] + for lv in check_output(cmd).decode('UTF-8').splitlines(): + if not lv: + continue + if path_mode: + lvs.append('/'.join(lv.strip().split())) + else: + lvs.append(lv.strip()) + return lvs + + +list_thin_logical_volume_pools = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^t') + +list_thin_logical_volumes = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^V') + + +def extend_logical_volume_by_device(lv_name, block_device): + ''' + Extends the size of logical volume lv_name by the amount of free space on + physical volume block_device. + + :param lv_name: str: name of logical volume to be extended (vg/lv format) + :param block_device: str: name of block_device to be allocated to lv_name + ''' + cmd = ['lvextend', lv_name, block_device] + check_call(cmd) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 5cc5c86b..fd14d60f 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') target.write(content) return # the contents were the same, but we might still need to change the diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index d8dc378a..99451b59 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -20,6 +20,7 @@ 'yakkety', 'zesty', 'artful', + 'bionic', ) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index e37f2834..5afbbd87 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +import os import re import sys import six @@ -185,7 +186,7 @@ def _configure_services(self, configs): self.d.configure(service, config) def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=1800): + include_only=None, timeout=None): """Wait for all units to have a specific extended status, except for any defined as excluded. Unless specified via message, any status containing any case of 'ready' will be considered a match. @@ -215,7 +216,10 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, :param timeout: Maximum time in seconds to wait for status match :returns: None. Raises if timeout is hit. """ - self.log.info('Waiting for extended status on units...') + if not timeout: + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) + self.log.info('Waiting for extended status on units for {}s...' + ''.format(timeout)) all_services = self.d.services.keys() @@ -252,9 +256,9 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, service_messages = {service: message for service in services} # Check for idleness - self.d.sentry.wait() + self.d.sentry.wait(timeout=timeout) # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services) + self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) # Check for ready messages self.d.sentry.wait_for_messages(service_messages, timeout=timeout) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index b71b2b19..87f364d1 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -858,9 +858,12 @@ def get_ceph_pool_sample(self, sentry_unit, pool_id=0): :returns: List of pool name, object count, kb disk space used """ df = self.get_ceph_df(sentry_unit) - pool_name = df['pools'][pool_id]['name'] - obj_count = df['pools'][pool_id]['stats']['objects'] - kb_used = df['pools'][pool_id]['stats']['kb_used'] + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' '{} kb used'.format(pool_name, pool_id, obj_count, kb_used)) diff --git a/ceph-radosgw/tests/charmhelpers/core/host.py b/ceph-radosgw/tests/charmhelpers/core/host.py index 5cc5c86b..fd14d60f 100644 --- a/ceph-radosgw/tests/charmhelpers/core/host.py +++ b/ceph-radosgw/tests/charmhelpers/core/host.py @@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') target.write(content) return # the contents were the same, but we might still need to change the diff --git a/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py index d8dc378a..99451b59 100644 --- a/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py @@ -20,6 +20,7 @@ 'yakkety', 'zesty', 'artful', + 'bionic', ) From 648c40326d8a024f0004b389c7c6d7fb41764cb7 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 14 Dec 2017 09:59:28 +0000 Subject: [PATCH 1435/2699] Ensure remote_unit is only used in relation hooks This patch stops mon_relation() from using remote_unit() when not in a relation hook. This is needed because remote_unit() uses the JUJU_REMOTE_UNIT environment variable which is only available in relation hook executions. Closes-Bug: #1738154 Change-Id: I2ffe5a07d69495aa0af1d1a58a7d45c2813659f2 --- ceph-mon/hooks/ceph_hooks.py | 2 +- ceph-mon/unit_tests/test_ceph_hooks.py | 33 ++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index e9fec6e9..7f8876cd 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -598,7 +598,7 @@ def client_relation_changed(relid=None, unit=None): log("Not leader - ignoring broker request", level=DEBUG) else: rsp = process_requests(settings['broker_req']) - unit_id = remote_unit().replace('/', '-') + unit_id = unit.replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id # broker_rsp is being left for backward compatibility, # unit_response_key superscedes it diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 7c98fd02..ad955f86 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -251,6 +251,39 @@ def test_related_osd_multi_relation(self, call('osd:23') ]) + @patch.object(ceph_hooks.ceph, 'is_quorum') + @patch.object(ceph_hooks, 'remote_unit') + @patch.object(ceph_hooks, 'relation_get') + @patch.object(ceph_hooks.ceph, 'is_leader') + @patch.object(ceph_hooks, 'process_requests') + @patch.object(ceph_hooks, 'relation_set') + def test_client_relation_changed_non_rel_hook(self, relation_set, + process_requests, + is_leader, + relation_get, + remote_unit, + is_quorum): + # Check for LP #1738154 + process_requests.return_value = 'AOK' + is_leader.return_value = True + relation_get.return_value = {'broker_req': 'req'} + remote_unit.return_value = None + is_quorum.return_value = True + ceph_hooks.client_relation_changed(relid='rel1', unit='glance/0') + relation_set.assert_called_once_with( + relation_id='rel1', + relation_settings={ + 'broker-rsp-glance-0': 'AOK', + 'broker_rsp': 'AOK'}) + relation_set.reset_mock() + remote_unit.return_value = 'glance/0' + ceph_hooks.client_relation_changed() + relation_set.assert_called_once_with( + relation_id=None, + relation_settings={ + 'broker-rsp-glance-0': 'AOK', + 'broker_rsp': 'AOK'}) + class BootstrapSourceTestCase(test_utils.CharmTestCase): From 2eb2e5de4accf3cca02eb49ea02b75d5e2ba329d Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 14 Dec 2017 14:11:02 +0000 Subject: [PATCH 1436/2699] ch-sync and ceph-sync to pickup 1696073 fixes Sync charmhelpers and charms.ceph code to pickup fixes for Bug #1696073 Change-Id: Icf844ec7d33f2e558dee7935fe5fa3d7f08e0d59 Closes-Bug: #1696073 --- .../charmhelpers/contrib/openstack/utils.py | 15 +++++++++-- .../contrib/storage/linux/ceph.py | 25 ++++++++++++++----- ceph-mon/lib/ceph/broker.py | 12 +++++++-- 3 files changed, 42 insertions(+), 10 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 9e5af342..e1d852db 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -2045,14 +2045,25 @@ def token_cache_pkgs(source=None, release=None): def update_json_file(filename, items): """Updates the json `filename` with a given dict. - :param filename: json filename (i.e.: /etc/glance/policy.json) + :param filename: path to json file (e.g. /etc/glance/policy.json) :param items: dict of items to update """ + if not items: + return + with open(filename) as fd: policy = json.load(fd) + + # Compare before and after and if nothing has changed don't write the file + # since that could cause unnecessary service restarts. + before = json.dumps(policy, indent=4, sort_keys=True) policy.update(items) + after = json.dumps(policy, indent=4, sort_keys=True) + if before == after: + return + with open(filename, "w") as fd: - fd.write(json.dumps(policy, indent=4)) + fd.write(after) @cached diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 0d9bacfd..87621c47 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1064,14 +1064,24 @@ def __init__(self, api_version=1, request_id=None): self.ops = [] def add_op_request_access_to_group(self, name, namespace=None, - permission=None, key_name=None): + permission=None, key_name=None, + object_prefix_permissions=None): """ Adds the requested permissions to the current service's Ceph key, - allowing the key to access only the specified pools + allowing the key to access only the specified pools or + object prefixes. object_prefix_permissions should be a dictionary + keyed on the permission with the corresponding value being a list + of prefixes to apply that permission to. + { + 'rwx': ['prefix1', 'prefix2'], + 'class-read': ['prefix3']} """ - self.ops.append({'op': 'add-permissions-to-key', 'group': name, - 'namespace': namespace, 'name': key_name or service_name(), - 'group-permission': permission}) + self.ops.append({ + 'op': 'add-permissions-to-key', 'group': name, + 'namespace': namespace, + 'name': key_name or service_name(), + 'group-permission': permission, + 'object-prefix-permissions': object_prefix_permissions}) def add_op_create_pool(self, name, replica_count=3, pg_num=None, weight=None, group=None, namespace=None): @@ -1107,7 +1117,10 @@ def request(self): def _ops_equal(self, other): if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in ['replicas', 'name', 'op', 'pg_num', 'weight']: + for key in [ + 'replicas', 'name', 'op', 'pg_num', 'weight', + 'group', 'group-namespace', 'group-permission', + 'object-prefix-permissions']: if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: diff --git a/ceph-mon/lib/ceph/broker.py b/ceph-mon/lib/ceph/broker.py index 95ee7799..1c0b9286 100644 --- a/ceph-mon/lib/ceph/broker.py +++ b/ceph-mon/lib/ceph/broker.py @@ -187,6 +187,9 @@ def handle_add_permissions_to_key(request, service): group = get_group(group_name=group_name) service_obj = get_service_groups(service=service_name, namespace=group_namespace) + if request.get('object-prefix-permissions'): + service_obj['object_prefix_perms'] = request.get( + 'object-prefix-permissions') format("Service object: {}".format(service_obj)) permission = request.get('group-permission') or "rwx" if service_name not in group['services']: @@ -233,7 +236,7 @@ def pool_permission_list_for_service(service): """Build the permission string for Ceph for a given service""" permissions = [] permission_types = collections.OrderedDict() - for permission, group in service["group_names"].items(): + for permission, group in sorted(service["group_names"].items()): if permission not in permission_types: permission_types[permission] = [] for item in group: @@ -241,8 +244,13 @@ def pool_permission_list_for_service(service): for permission, groups in permission_types.items(): permission = "allow {}".format(permission) for group in groups: - for pool in service['groups'][group]['pools']: + for pool in service['groups'][group].get('pools', []): permissions.append("{} pool={}".format(permission, pool)) + for permission, prefixes in sorted( + service.get("object_prefix_perms", {}).items()): + for prefix in prefixes: + permissions.append("allow {} object_prefix {}".format(permission, + prefix)) return ["mon", "allow r", "osd", ', '.join(permissions)] From 6b2f0227230f56f0262a4463c3b36088fde22162 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 20 Dec 2017 12:01:24 +0000 Subject: [PATCH 1437/2699] Fix upgrades from older charm versions Ensure that netaddr and netifaces are installed on upgrade from older py2 based charms to the newer py3 based execution. Change-Id: I1c7b2eb545e5ef5b40ab2db5a1a968ceb7d916e1 Closes-Bug: 1738979 --- ceph-osd/hooks/install | 2 +- ceph-osd/hooks/install_deps | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/install b/ceph-osd/hooks/install index eb5eee3b..96836422 100755 --- a/ceph-osd/hooks/install +++ b/ceph-osd/hooks/install @@ -2,7 +2,7 @@ # Wrapper to deal with newer Ubuntu versions that don't have py2 installed # by default. -declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') +declare -a DEPS=('apt' 'pip' 'yaml') check_and_install() { pkg="${1}-${2}" diff --git a/ceph-osd/hooks/install_deps b/ceph-osd/hooks/install_deps index bb600820..3375e7a0 100755 --- a/ceph-osd/hooks/install_deps +++ b/ceph-osd/hooks/install_deps @@ -2,7 +2,7 @@ # Wrapper to ensure that python dependencies are installed before we get into # the python part of the hook execution -declare -a DEPS=('dnspython' 'pyudev') +declare -a DEPS=('dnspython' 'pyudev' 'netaddr' 'netifaces') check_and_install() { pkg="${1}-${2}" From a7e7b069074a2f812e44fa59b2df33ddcb128d55 Mon Sep 17 00:00:00 2001 From: Dmitrii Shcherbakov Date: Mon, 28 Aug 2017 16:13:52 +0300 Subject: [PATCH 1438/2699] add bluestore-specific config options Adds bluestore-specific options related to the metadata-only journal. The options allow a user to control: 1. path to a bluestore wal (block special file or regular file) 2. path to a bluestore db (block special file or regular file) 3. size of both Their configuration works similarly to the FileStore journal. If paths are not specified both WAL and DB will be collocated on the same block device as data. Other options can be configured via an existing config-flags option if needed. http://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/ Closes-Bug: #1710474 Change-Id: Ia85092230d4dcb0435354deb276012f923547393 Depends-On: I483ee9dae4ce69c71ae06359d0fb96aaa1c56cbc Depends-On: Idbbb69acec92b2f2efca80691ca73a2030bcf633 --- ceph-osd/config.yaml | 24 ++++++++++++++ ceph-osd/hooks/ceph_hooks.py | 2 ++ ceph-osd/lib/ceph/utils.py | 2 ++ ceph-osd/metadata.yaml | 8 +++++ ceph-osd/templates/ceph.conf | 6 ++++ ceph-osd/unit_tests/test_ceph_hooks.py | 44 ++++++++++++++++++++++---- 6 files changed, 79 insertions(+), 7 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index e61f1fec..70ec475c 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -72,6 +72,16 @@ options: that OSD. . Only supported with ceph >= 0.48.3. + bluestore-wal: + type: string + default: + description: | + Path to a BlueStore WAL block device or file. + bluestore-db: + type: string + default: + description: | + Path to a BlueStore WAL db block device or file osd-journal-size: type: int default: 1024 @@ -83,6 +93,20 @@ options: partition for the journal. . Only supported with ceph >= 0.48.3. + bluestore-block-wal-size: + type: int + default: 0 + description: | + Size of a partition or file to use for BlueStore WAL (RocksDB WAL) + A default value is not set as it is calculated by ceph-disk if + not specified. + bluestore-block-db-size: + type: int + default: 0 + description: | + Size of a partition or file to use for BlueStore metadata + or RocksDB SSTs. A default value is not set as it is calculated + by ceph-disk if not specified. osd-format: type: string default: xfs diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 3a8fdefe..9ec92ccb 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -264,6 +264,8 @@ def get_ceph_context(upgrading=False): 'upgrade_in_progress': upgrading, 'bluestore': config('bluestore'), 'bluestore_experimental': cmp_pkgrevno('ceph', '12.1.0') < 0, + 'bluestore_block_wal_size': config('bluestore-block-wal-size'), + 'bluestore_block_db_size': config('bluestore-block-db-size'), } if config('prefer-ipv6'): diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 76656304..4e1bfe36 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -1477,11 +1477,13 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, if wal: cmd.append('--block.wal') least_used_wal = find_least_used_utility_device(wal) + cmd.append(least_used_wal) db = get_devices('bluestore-db') if db: cmd.append('--block.db') least_used_db = find_least_used_utility_device(db) + cmd.append(least_used_db) elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: cmd.append('--filestore') diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 1931cd06..f75f6872 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -37,3 +37,11 @@ storage: type: block multiple: range: 0- + bluestore-db: + type: block + multiple: + range: 0- + bluestore-wal: + type: block + multiple: + range: 0- diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index a4363069..9f2c8144 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -64,6 +64,12 @@ keyring = /var/lib/ceph/osd/$cluster-$id/keyring {% if not bluestore_experimental -%} osd objectstore = bluestore {%- endif -%} +{% if bluestore_block_wal_size -%} +bluestore block wal size = {{ bluestore_block_wal_size }} +{%- endif %} +{% if bluestore_block_db_size -%} +bluestore block db size = {{ bluestore_block_db_size }} +{%- endif %} {%- else %} osd journal size = {{ osd_journal_size }} filestore xattr use omap = true diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 6be78cc1..09d8f2d3 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -1,4 +1,5 @@ # Copyright 2016 Canonical Ltd + # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -35,7 +36,16 @@ 'prefer-ipv6': False, 'customize-failure-domain': False, 'bluestore': False, - 'crush-initial-weight': '0'} + 'crush-initial-weight': '0', + 'bluestore': False, + 'bluestore-block-wal-size': 0, + 'bluestore-block-db-size': 0, + 'bluestore-wal': None, + 'bluestore-db': None} + + +BLUESTORE_WAL_TEST_SIZE = 128 * 2 ** 20 +BLUESTORE_DB_TEST_SIZE = 2 * 2 ** 30 class CephHooksTestCase(unittest.TestCase): @@ -75,7 +85,9 @@ def test_get_ceph_context(self, mock_config, mock_config2): 'upgrade_in_progress': False, 'use_syslog': 'true', 'bluestore': False, - 'bluestore_experimental': False} + 'bluestore_experimental': False, + 'bluestore_block_wal_size': 0, + 'bluestore_block_db_size': 0} self.assertEqual(ctxt, expected) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @@ -112,7 +124,9 @@ def test_get_ceph_context_filestore_old(self, mock_config, mock_config2): 'upgrade_in_progress': False, 'use_syslog': 'true', 'bluestore': False, - 'bluestore_experimental': True} + 'bluestore_experimental': True, + 'bluestore_block_wal_size': 0, + 'bluestore_block_db_size': 0} self.assertEqual(ctxt, expected) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @@ -128,6 +142,12 @@ def test_get_ceph_context_filestore_old(self, mock_config, mock_config2): def test_get_ceph_context_bluestore(self, mock_config, mock_config2): config = copy.deepcopy(CHARM_CONFIG) config['bluestore'] = True + BLUESTORE_WAL = '/dev/sdb /dev/sdc' + BLUESTORE_DB = '/dev/sdb /dev/sdc' + config['bluestore-block-wal-size'] = BLUESTORE_WAL_TEST_SIZE + config['bluestore-block-db-size'] = BLUESTORE_DB_TEST_SIZE + config['bluestore-wal'] = BLUESTORE_WAL + config['bluestore-db'] = BLUESTORE_DB mock_config.side_effect = lambda key: config[key] mock_config2.side_effect = lambda key: config[key] ctxt = ceph_hooks.get_ceph_context() @@ -149,7 +169,9 @@ def test_get_ceph_context_bluestore(self, mock_config, mock_config2): 'upgrade_in_progress': False, 'use_syslog': 'true', 'bluestore': True, - 'bluestore_experimental': False} + 'bluestore_experimental': False, + 'bluestore_block_wal_size': BLUESTORE_WAL_TEST_SIZE, + 'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE} self.assertEqual(ctxt, expected) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @@ -166,6 +188,8 @@ def test_get_ceph_context_bluestore(self, mock_config, mock_config2): def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2): config = copy.deepcopy(CHARM_CONFIG) config['bluestore'] = True + config['bluestore-block-wal-size'] = BLUESTORE_WAL_TEST_SIZE + config['bluestore-block-db-size'] = BLUESTORE_DB_TEST_SIZE mock_config.side_effect = lambda key: config[key] mock_config2.side_effect = lambda key: config[key] ctxt = ceph_hooks.get_ceph_context() @@ -187,7 +211,9 @@ def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2): 'upgrade_in_progress': False, 'use_syslog': 'true', 'bluestore': True, - 'bluestore_experimental': True} + 'bluestore_experimental': True, + 'bluestore_block_wal_size': BLUESTORE_WAL_TEST_SIZE, + 'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE} self.assertEqual(ctxt, expected) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @@ -225,7 +251,9 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): 'upgrade_in_progress': False, 'use_syslog': 'true', 'bluestore': False, - 'bluestore_experimental': False} + 'bluestore_experimental': False, + 'bluestore_block_wal_size': 0, + 'bluestore_block_db_size': 0} self.assertEqual(ctxt, expected) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @@ -265,7 +293,9 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'upgrade_in_progress': False, 'use_syslog': 'true', 'bluestore': False, - 'bluestore_experimental': False} + 'bluestore_experimental': False, + 'bluestore_block_wal_size': 0, + 'bluestore_block_db_size': 0} self.assertEqual(ctxt, expected) @patch.object(ceph_hooks, 'ceph') From b2779fdbd0b25f59d8b2052cbffa35c7d6eeadf3 Mon Sep 17 00:00:00 2001 From: Nguyen Hung Phuong Date: Tue, 2 Jan 2018 14:04:11 +0700 Subject: [PATCH 1439/2699] Use assertRegex instead of assertRegexpMatches In Python3, assertRegexpMatches & assertNotRegexpMatches are deprecated in favor of assertRegex and assertNotRegex Change-Id: If08a6f163bed2dde5ac56416a50754f4a8d38b2e --- ceph-mon/unit_tests/test_check_ceph_status.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/unit_tests/test_check_ceph_status.py b/ceph-mon/unit_tests/test_check_ceph_status.py index 021b8dd1..eeb13606 100644 --- a/ceph-mon/unit_tests/test_check_ceph_status.py +++ b/ceph-mon/unit_tests/test_check_ceph_status.py @@ -32,7 +32,7 @@ def test_health_ok(self, mock_subprocess): mock_subprocess.return_value = tree.encode('UTF-8') args = check_ceph_status.parse_args(['--degraded_thresh', '1']) check_output = check_ceph_status.check_ceph_status(args) - self.assertRegexpMatches(check_output, r"^All OK$") + self.assertRegex(check_output, r"^All OK$") def test_health_warn(self, mock_subprocess): with open('unit_tests/ceph_warn.json') as f: From 75cfc3e2e3ba34db39262da3797f05af78404e70 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Jan 2018 09:12:20 +0000 Subject: [PATCH 1440/2699] Resync ceph helpers for misc fixes Pickup fixes for upgrade from Jewel -> Luminous. Change-Id: Id0694b5116e604efbe1c5259de332ae0c4bae574 Closes-Bug: 1742082 Closes-Bug: 1742083 Closes-Bug: 1742120 Closes-Bug: 1742079 Closes-Bug: 1742408 --- ceph-mon/lib/ceph/broker.py | 4 +- ceph-mon/lib/ceph/utils.py | 87 +++++++++++++++++++++++-------------- 2 files changed, 57 insertions(+), 34 deletions(-) diff --git a/ceph-mon/lib/ceph/broker.py b/ceph-mon/lib/ceph/broker.py index 1c0b9286..8ba2e7a9 100644 --- a/ceph-mon/lib/ceph/broker.py +++ b/ceph-mon/lib/ceph/broker.py @@ -338,7 +338,7 @@ def save_service(service_name, service): service['groups'] = {} return monitor_key_set(service='admin', key="cephx.services.{}".format(service_name), - value=json.dumps(service)) + value=json.dumps(service, sort_keys=True)) def save_group(group, group_name): @@ -346,7 +346,7 @@ def save_group(group, group_name): group_key = get_group_key(group_name=group_name) return monitor_key_set(service='admin', key=group_key, - value=json.dumps(group)) + value=json.dumps(group, sort_keys=True)) def get_group_key(group_name): diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 76656304..2915225c 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -1311,7 +1311,8 @@ def bootstrap_monitor_cluster(secret): # Ceph >= 0.61.3 needs this for ceph-mon fs creation mkdir('/var/run/ceph', owner=ceph_user(), group=ceph_user(), perms=0o755) - mkdir(path, owner=ceph_user(), group=ceph_user()) + mkdir(path, owner=ceph_user(), group=ceph_user(), + perms=0o755) # end changes for Ceph >= 0.61.3 try: add_keyring_to_ceph(keyring, @@ -1673,12 +1674,23 @@ def roll_monitor_cluster(new_version, upgrade_key): service='mon', my_name=my_name, version=new_version) + # NOTE(jamespage): + # Wait until all monitors have upgraded before bootstrapping + # the ceph-mgr daemons due to use of new mgr keyring profiles + if new_version == 'luminous': + wait_for_all_monitors_to_upgrade(new_version=new_version, + upgrade_key=upgrade_key) + bootstrap_manager() except ValueError: log("Failed to find {} in list {}.".format( my_name, mon_sorted_list)) status_set('blocked', 'failed to upgrade monitor') +# TODO(jamespage): +# Mimic support will need to ensure that ceph-mgr daemons are also +# restarted during upgrades - probably through use of one of the +# high level systemd targets shipped by the packaging. def upgrade_monitor(new_version): """Upgrade the current ceph monitor to the new version @@ -1699,26 +1711,31 @@ def upgrade_monitor(new_version): sys.exit(1) try: if systemd(): - for mon_id in get_local_mon_ids(): - service_stop('ceph-mon@{}'.format(mon_id)) + service_stop('ceph-mon') else: service_stop('ceph-mon-all') apt_install(packages=determine_packages(), fatal=True) + owner = ceph_user() + # Ensure the files and directories under /var/lib/ceph is chowned # properly as part of the move to the Jewel release, which moved the # ceph daemons to running as ceph:ceph instead of root:root. if new_version == 'jewel': # Ensure the ownership of Ceph's directories is correct - owner = ceph_user() chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), owner=owner, group=owner, follow_links=True) + # Ensure that mon directory is user writable + hostname = socket.gethostname() + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + mkdir(path, owner=ceph_user(), group=ceph_user(), + perms=0o755) + if systemd(): - for mon_id in get_local_mon_ids(): - service_start('ceph-mon@{}'.format(mon_id)) + service_start('ceph-mon') else: service_start('ceph-mon-all') except subprocess.CalledProcessError as err: @@ -1799,25 +1816,28 @@ def wait_on_previous_node(upgrade_key, service, previous_node, version): previous_node_start_time = monitor_key_get( upgrade_key, "{}_{}_{}_start".format(service, previous_node, version)) - if (current_timestamp - (10 * 60)) > previous_node_start_time: - # Previous node is probably dead. Lets move on - if previous_node_start_time is not None: - log( - "Waited 10 mins on node {}. current time: {} > " - "previous node start time: {} Moving on".format( - previous_node, - (current_timestamp - (10 * 60)), - previous_node_start_time)) - return - else: - # I have to wait. Sleep a random amount of time and then - # check if I can lock,upgrade and roll. - wait_time = random.randrange(5, 30) - log('waiting for {} seconds'.format(wait_time)) - time.sleep(wait_time) - previous_node_finished = monitor_key_exists( - upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version)) + if (previous_node_start_time is not None and + ((current_timestamp - (10 * 60)) > + float(previous_node_start_time))): + # NOTE(jamespage): + # Previous node is probably dead as we've been waiting + # for 10 minutes - lets move on and upgrade + log("Waited 10 mins on node {}. current time: {} > " + "previous node start time: {} Moving on".format( + previous_node, + (current_timestamp - (10 * 60)), + previous_node_start_time)) + return + # NOTE(jamespage) + # Previous node has not started, or started less than + # 10 minutes ago - sleep a random amount of time and + # then check again. + wait_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + previous_node_finished = monitor_key_exists( + upgrade_key, + "{}_{}_{}_done".format(service, previous_node, version)) def get_upgrade_position(osd_sorted_list, match_name): @@ -1874,7 +1894,7 @@ def roll_osd_cluster(new_version, upgrade_key): version=new_version) else: # Check if the previous node has finished - status_set('blocked', + status_set('waiting', 'Waiting on {} to finish upgrading'.format( osd_sorted_list[position - 1].name)) wait_on_previous_node( @@ -1922,7 +1942,10 @@ def upgrade_osd(new_version): # way to update the code on the node. if not dirs_need_ownership_update('osd'): log('Restarting all OSDs to load new binaries', DEBUG) - service_restart('ceph-osd-all') + if systemd(): + service_restart('ceph-osd.target') + else: + service_restart('ceph-osd-all') return # Need to change the ownership of all directories which are not OSD @@ -2148,11 +2171,11 @@ def dirs_need_ownership_update(service): return False # A dict of valid ceph upgrade paths. Mapping is old -> new -UPGRADE_PATHS = { - 'firefly': 'hammer', - 'hammer': 'jewel', - 'jewel': 'luminous', -} +UPGRADE_PATHS = collections.OrderedDict([ + ('firefly', 'hammer'), + ('hammer', 'jewel'), + ('jewel', 'luminous'), +]) # Map UCA codenames to ceph codenames UCA_CODENAME_MAP = { From 0fe6075bbbc8f07fde0b258202c989c0cd5f6f13 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Jan 2018 11:31:53 +0000 Subject: [PATCH 1441/2699] Resync ceph helpers for misc fixes Pickup fixes for upgrade from Jewel -> Luminous. Change-Id: I3ffeebc38464096724ca0fd40f1cf29cf3e7fe4f Closes-Bug: 1742082 Closes-Bug: 1742083 Closes-Bug: 1742120 Closes-Bug: 1742079 Closes-Bug: 1742408 --- ceph-osd/lib/ceph/broker.py | 16 +++++-- ceph-osd/lib/ceph/utils.py | 89 +++++++++++++++++++++++-------------- 2 files changed, 67 insertions(+), 38 deletions(-) diff --git a/ceph-osd/lib/ceph/broker.py b/ceph-osd/lib/ceph/broker.py index 95ee7799..8ba2e7a9 100644 --- a/ceph-osd/lib/ceph/broker.py +++ b/ceph-osd/lib/ceph/broker.py @@ -187,6 +187,9 @@ def handle_add_permissions_to_key(request, service): group = get_group(group_name=group_name) service_obj = get_service_groups(service=service_name, namespace=group_namespace) + if request.get('object-prefix-permissions'): + service_obj['object_prefix_perms'] = request.get( + 'object-prefix-permissions') format("Service object: {}".format(service_obj)) permission = request.get('group-permission') or "rwx" if service_name not in group['services']: @@ -233,7 +236,7 @@ def pool_permission_list_for_service(service): """Build the permission string for Ceph for a given service""" permissions = [] permission_types = collections.OrderedDict() - for permission, group in service["group_names"].items(): + for permission, group in sorted(service["group_names"].items()): if permission not in permission_types: permission_types[permission] = [] for item in group: @@ -241,8 +244,13 @@ def pool_permission_list_for_service(service): for permission, groups in permission_types.items(): permission = "allow {}".format(permission) for group in groups: - for pool in service['groups'][group]['pools']: + for pool in service['groups'][group].get('pools', []): permissions.append("{} pool={}".format(permission, pool)) + for permission, prefixes in sorted( + service.get("object_prefix_perms", {}).items()): + for prefix in prefixes: + permissions.append("allow {} object_prefix {}".format(permission, + prefix)) return ["mon", "allow r", "osd", ', '.join(permissions)] @@ -330,7 +338,7 @@ def save_service(service_name, service): service['groups'] = {} return monitor_key_set(service='admin', key="cephx.services.{}".format(service_name), - value=json.dumps(service)) + value=json.dumps(service, sort_keys=True)) def save_group(group, group_name): @@ -338,7 +346,7 @@ def save_group(group, group_name): group_key = get_group_key(group_name=group_name) return monitor_key_set(service='admin', key=group_key, - value=json.dumps(group)) + value=json.dumps(group, sort_keys=True)) def get_group_key(group_name): diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 4e1bfe36..2915225c 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -1311,7 +1311,8 @@ def bootstrap_monitor_cluster(secret): # Ceph >= 0.61.3 needs this for ceph-mon fs creation mkdir('/var/run/ceph', owner=ceph_user(), group=ceph_user(), perms=0o755) - mkdir(path, owner=ceph_user(), group=ceph_user()) + mkdir(path, owner=ceph_user(), group=ceph_user(), + perms=0o755) # end changes for Ceph >= 0.61.3 try: add_keyring_to_ceph(keyring, @@ -1477,13 +1478,11 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, if wal: cmd.append('--block.wal') least_used_wal = find_least_used_utility_device(wal) - cmd.append(least_used_wal) db = get_devices('bluestore-db') if db: cmd.append('--block.db') least_used_db = find_least_used_utility_device(db) - cmd.append(least_used_db) elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: cmd.append('--filestore') @@ -1675,12 +1674,23 @@ def roll_monitor_cluster(new_version, upgrade_key): service='mon', my_name=my_name, version=new_version) + # NOTE(jamespage): + # Wait until all monitors have upgraded before bootstrapping + # the ceph-mgr daemons due to use of new mgr keyring profiles + if new_version == 'luminous': + wait_for_all_monitors_to_upgrade(new_version=new_version, + upgrade_key=upgrade_key) + bootstrap_manager() except ValueError: log("Failed to find {} in list {}.".format( my_name, mon_sorted_list)) status_set('blocked', 'failed to upgrade monitor') +# TODO(jamespage): +# Mimic support will need to ensure that ceph-mgr daemons are also +# restarted during upgrades - probably through use of one of the +# high level systemd targets shipped by the packaging. def upgrade_monitor(new_version): """Upgrade the current ceph monitor to the new version @@ -1701,26 +1711,31 @@ def upgrade_monitor(new_version): sys.exit(1) try: if systemd(): - for mon_id in get_local_mon_ids(): - service_stop('ceph-mon@{}'.format(mon_id)) + service_stop('ceph-mon') else: service_stop('ceph-mon-all') apt_install(packages=determine_packages(), fatal=True) + owner = ceph_user() + # Ensure the files and directories under /var/lib/ceph is chowned # properly as part of the move to the Jewel release, which moved the # ceph daemons to running as ceph:ceph instead of root:root. if new_version == 'jewel': # Ensure the ownership of Ceph's directories is correct - owner = ceph_user() chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), owner=owner, group=owner, follow_links=True) + # Ensure that mon directory is user writable + hostname = socket.gethostname() + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + mkdir(path, owner=ceph_user(), group=ceph_user(), + perms=0o755) + if systemd(): - for mon_id in get_local_mon_ids(): - service_start('ceph-mon@{}'.format(mon_id)) + service_start('ceph-mon') else: service_start('ceph-mon-all') except subprocess.CalledProcessError as err: @@ -1801,25 +1816,28 @@ def wait_on_previous_node(upgrade_key, service, previous_node, version): previous_node_start_time = monitor_key_get( upgrade_key, "{}_{}_{}_start".format(service, previous_node, version)) - if (current_timestamp - (10 * 60)) > previous_node_start_time: - # Previous node is probably dead. Lets move on - if previous_node_start_time is not None: - log( - "Waited 10 mins on node {}. current time: {} > " - "previous node start time: {} Moving on".format( - previous_node, - (current_timestamp - (10 * 60)), - previous_node_start_time)) - return - else: - # I have to wait. Sleep a random amount of time and then - # check if I can lock,upgrade and roll. - wait_time = random.randrange(5, 30) - log('waiting for {} seconds'.format(wait_time)) - time.sleep(wait_time) - previous_node_finished = monitor_key_exists( - upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version)) + if (previous_node_start_time is not None and + ((current_timestamp - (10 * 60)) > + float(previous_node_start_time))): + # NOTE(jamespage): + # Previous node is probably dead as we've been waiting + # for 10 minutes - lets move on and upgrade + log("Waited 10 mins on node {}. current time: {} > " + "previous node start time: {} Moving on".format( + previous_node, + (current_timestamp - (10 * 60)), + previous_node_start_time)) + return + # NOTE(jamespage) + # Previous node has not started, or started less than + # 10 minutes ago - sleep a random amount of time and + # then check again. + wait_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + previous_node_finished = monitor_key_exists( + upgrade_key, + "{}_{}_{}_done".format(service, previous_node, version)) def get_upgrade_position(osd_sorted_list, match_name): @@ -1876,7 +1894,7 @@ def roll_osd_cluster(new_version, upgrade_key): version=new_version) else: # Check if the previous node has finished - status_set('blocked', + status_set('waiting', 'Waiting on {} to finish upgrading'.format( osd_sorted_list[position - 1].name)) wait_on_previous_node( @@ -1924,7 +1942,10 @@ def upgrade_osd(new_version): # way to update the code on the node. if not dirs_need_ownership_update('osd'): log('Restarting all OSDs to load new binaries', DEBUG) - service_restart('ceph-osd-all') + if systemd(): + service_restart('ceph-osd.target') + else: + service_restart('ceph-osd-all') return # Need to change the ownership of all directories which are not OSD @@ -2150,11 +2171,11 @@ def dirs_need_ownership_update(service): return False # A dict of valid ceph upgrade paths. Mapping is old -> new -UPGRADE_PATHS = { - 'firefly': 'hammer', - 'hammer': 'jewel', - 'jewel': 'luminous', -} +UPGRADE_PATHS = collections.OrderedDict([ + ('firefly', 'hammer'), + ('hammer', 'jewel'), + ('jewel', 'luminous'), +]) # Map UCA codenames to ceph codenames UCA_CODENAME_MAP = { From 7f9381faba4c8f6ab8185b4a8f9f00e03f3f84ff Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 19 Jan 2018 12:07:27 +0000 Subject: [PATCH 1442/2699] Sync charm-helpers Notable issues resolved: openstack_upgrade_available() broken for swift https://bugs.launchpad.net/charm-swift-proxy/+bug/1743847 haproxy context doesn't consider bindings https://bugs.launchpad.net/charm-helpers/+bug/1735421 regression in haproxy check https://bugs.launchpad.net/charm-helpers/+bug/1743287 Change-Id: Icae43be0dd03bde4df2615b5bfcb5edc44c82be2 --- .../charmhelpers/contrib/openstack/context.py | 101 +++- .../contrib/openstack/ha/utils.py | 175 ++++++- .../charmhelpers/contrib/openstack/utils.py | 483 +----------------- .../contrib/storage/linux/ceph.py | 17 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 2 + ceph-mon/hooks/charmhelpers/core/unitdata.py | 2 + ceph-mon/tests/charmhelpers/core/hookenv.py | 2 + ceph-mon/tests/charmhelpers/core/unitdata.py | 2 + 8 files changed, 271 insertions(+), 513 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 70850c1b..7ada2760 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -93,12 +93,10 @@ format_ipv6_addr, is_bridge_member, is_ipv6_disabled, + get_relation_ip, ) from charmhelpers.contrib.openstack.utils import ( config_flags_parser, - get_host_ip, - git_determine_usr_bin, - git_determine_python_path, enable_memcache, snap_install_requested, CompareOpenStackReleases, @@ -334,10 +332,7 @@ def __init__(self, self.rel_name = rel_name self.interfaces = [self.rel_name] - def __call__(self): - log('Generating template context for ' + self.rel_name, level=DEBUG) - ctxt = {} - + def _setup_pki_cache(self): if self.service and self.service_user: # This is required for pki token signing if we don't want /tmp to # be used. @@ -347,6 +342,15 @@ def __call__(self): mkdir(path=cachedir, owner=self.service_user, group=self.service_user, perms=0o700) + return cachedir + return None + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + cachedir = self._setup_pki_cache() + if cachedir: ctxt['signing_dir'] = cachedir for rid in relation_ids(self.rel_name): @@ -385,6 +389,62 @@ def __call__(self): return {} +class IdentityCredentialsContext(IdentityServiceContext): + '''Context for identity-credentials interface type''' + + def __init__(self, + service=None, + service_user=None, + rel_name='identity-credentials'): + super(IdentityCredentialsContext, self).__init__(service, + service_user, + rel_name) + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + cachedir = self._setup_pki_cache() + if cachedir: + ctxt['signing_dir'] = cachedir + + for rid in relation_ids(self.rel_name): + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + credentials_host = rdata.get('credentials_host') + credentials_host = ( + format_ipv6_addr(credentials_host) or credentials_host + ) + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host + svc_protocol = rdata.get('credentials_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + api_version = rdata.get('api_version') or '2.0' + ctxt.update({ + 'service_port': rdata.get('credentials_port'), + 'service_host': credentials_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('credentials_project'), + 'admin_tenant_id': rdata.get('credentials_project_id'), + 'admin_user': rdata.get('credentials_username'), + 'admin_password': rdata.get('credentials_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol, + 'api_version': api_version + }) + + if float(api_version) > 2: + ctxt.update({'admin_domain_name': + rdata.get('domain')}) + + if self.context_complete(ctxt): + return ctxt + + return {} + + class AMQPContext(OSContextGenerator): def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): @@ -566,11 +626,6 @@ def __call__(self): if not relation_ids('cluster') and not self.singlenode_mode: return {} - if config('prefer-ipv6'): - addr = get_ipv6_addr(exc_list=[config('vip')])[0] - else: - addr = get_host_ip(unit_get('private-address')) - l_unit = local_unit().replace('/', '-') cluster_hosts = {} @@ -578,7 +633,15 @@ def __call__(self): # and associated backends for addr_type in ADDRESS_TYPES: cfg_opt = 'os-{}-network'.format(addr_type) - laddr = get_address_in_network(config(cfg_opt)) + # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather + # than 'internal' + if addr_type == 'internal': + _addr_map_type = INTERNAL + else: + _addr_map_type = addr_type + # Network spaces aware + laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'], + config(cfg_opt)) if laddr: netmask = get_netmask_for_address(laddr) cluster_hosts[laddr] = { @@ -589,15 +652,19 @@ def __call__(self): } for rid in relation_ids('cluster'): for unit in sorted(related_units(rid)): + # API Charms will need to set {addr_type}-address with + # get_relation_ip(addr_type) _laddr = relation_get('{}-address'.format(addr_type), rid=rid, unit=unit) if _laddr: _unit = unit.replace('/', '-') cluster_hosts[laddr]['backends'][_unit] = _laddr - # NOTE(jamespage) add backend based on private address - this - # with either be the only backend or the fallback if no acls + # NOTE(jamespage) add backend based on get_relation_ip - this + # will either be the only backend or the fallback if no acls # match in the frontend + # Network spaces aware + addr = get_relation_ip('cluster') cluster_hosts[addr] = {} netmask = get_netmask_for_address(addr) cluster_hosts[addr] = { @@ -607,6 +674,8 @@ def __call__(self): } for rid in relation_ids('cluster'): for unit in sorted(related_units(rid)): + # API Charms will need to set their private-address with + # get_relation_ip('cluster') _laddr = relation_get('private-address', rid=rid, unit=unit) if _laddr: @@ -1323,8 +1392,6 @@ def __call__(self): "public_processes": int(math.ceil(self.public_process_weight * total_processes)), "threads": 1, - "usr_bin": git_determine_usr_bin(), - "python_path": git_determine_python_path(), } return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py index 9a4d79c1..6060ae50 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -23,6 +23,8 @@ Helpers for high availability. """ +import json + import re from charmhelpers.core.hookenv import ( @@ -32,6 +34,7 @@ config, status_set, DEBUG, + WARNING, ) from charmhelpers.core.host import ( @@ -40,6 +43,23 @@ from charmhelpers.contrib.openstack.ip import ( resolve_address, + is_ipv6, +) + +from charmhelpers.contrib.network.ip import ( + get_iface_for_address, + get_netmask_for_address, +) + +from charmhelpers.contrib.hahelpers.cluster import ( + get_hacluster_config +) + +JSON_ENCODE_OPTIONS = dict( + sort_keys=True, + allow_nan=False, + indent=None, + separators=(',', ':'), ) @@ -53,8 +73,8 @@ class DNSHAException(Exception): def update_dns_ha_resource_params(resources, resource_params, relation_id=None, crm_ocf='ocf:maas:dns'): - """ Check for os-*-hostname settings and update resource dictionaries for - the HA relation. + """ Configure DNS-HA resources based on provided configuration and + update resource dictionaries for the HA relation. @param resources: Pointer to dictionary of resources. Usually instantiated in ha_joined(). @@ -64,7 +84,85 @@ def update_dns_ha_resource_params(resources, resource_params, @param crm_ocf: Corosync Open Cluster Framework resource agent to use for DNS HA """ + _relation_data = {'resources': {}, 'resource_params': {}} + update_hacluster_dns_ha(charm_name(), + _relation_data, + crm_ocf) + resources.update(_relation_data['resources']) + resource_params.update(_relation_data['resource_params']) + relation_set(relation_id=relation_id, groups=_relation_data['groups']) + + +def assert_charm_supports_dns_ha(): + """Validate prerequisites for DNS HA + The MAAS client is only available on Xenial or greater + + :raises DNSHAException: if release is < 16.04 + """ + if lsb_release().get('DISTRIB_RELEASE') < '16.04': + msg = ('DNS HA is only supported on 16.04 and greater ' + 'versions of Ubuntu.') + status_set('blocked', msg) + raise DNSHAException(msg) + return True + + +def expect_ha(): + """ Determine if the unit expects to be in HA + + Check for VIP or dns-ha settings which indicate the unit should expect to + be related to hacluster. + + @returns boolean + """ + return config('vip') or config('dns-ha') + + +def generate_ha_relation_data(service): + """ Generate relation data for ha relation + + Based on configuration options and unit interfaces, generate a json + encoded dict of relation data items for the hacluster relation, + providing configuration for DNS HA or VIP's + haproxy clone sets. + + @returns dict: json encoded data for use with relation_set + """ + _haproxy_res = 'res_{}_haproxy'.format(service) + _relation_data = { + 'resources': { + _haproxy_res: 'lsb:haproxy', + }, + 'resource_params': { + _haproxy_res: 'op monitor interval="5s"' + }, + 'init_services': { + _haproxy_res: 'haproxy' + }, + 'clones': { + 'cl_{}_haproxy'.format(service): _haproxy_res + }, + } + + if config('dns-ha'): + update_hacluster_dns_ha(service, _relation_data) + else: + update_hacluster_vip(service, _relation_data) + + return { + 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS) + for k, v in _relation_data.items() if v + } + +def update_hacluster_dns_ha(service, relation_data, + crm_ocf='ocf:maas:dns'): + """ Configure DNS-HA resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ # Validate the charm environment for DNS HA assert_charm_supports_dns_ha() @@ -93,7 +191,7 @@ def update_dns_ha_resource_params(resources, resource_params, status_set('blocked', msg) raise DNSHAException(msg) - hostname_key = 'res_{}_{}_hostname'.format(charm_name(), endpoint_type) + hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type) if hostname_key in hostname_group: log('DNS HA: Resource {}: {} already exists in ' 'hostname group - skipping'.format(hostname_key, hostname), @@ -101,42 +199,67 @@ def update_dns_ha_resource_params(resources, resource_params, continue hostname_group.append(hostname_key) - resources[hostname_key] = crm_ocf - resource_params[hostname_key] = ( - 'params fqdn="{}" ip_address="{}" ' - ''.format(hostname, resolve_address(endpoint_type=endpoint_type, - override=False))) + relation_data['resources'][hostname_key] = crm_ocf + relation_data['resource_params'][hostname_key] = ( + 'params fqdn="{}" ip_address="{}"' + .format(hostname, resolve_address(endpoint_type=endpoint_type, + override=False))) if len(hostname_group) >= 1: log('DNS HA: Hostname group is set with {} as members. ' 'Informing the ha relation'.format(' '.join(hostname_group)), DEBUG) - relation_set(relation_id=relation_id, groups={ - 'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)}) + relation_data['groups'] = { + 'grp_{}_hostnames'.format(service): ' '.join(hostname_group) + } else: msg = 'DNS HA: Hostname group has no members.' status_set('blocked', msg) raise DNSHAException(msg) -def assert_charm_supports_dns_ha(): - """Validate prerequisites for DNS HA - The MAAS client is only available on Xenial or greater +def update_hacluster_vip(service, relation_data): + """ Configure VIP resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. """ - if lsb_release().get('DISTRIB_RELEASE') < '16.04': - msg = ('DNS HA is only supported on 16.04 and greater ' - 'versions of Ubuntu.') - status_set('blocked', msg) - raise DNSHAException(msg) - return True + cluster_config = get_hacluster_config() + vip_group = [] + for vip in cluster_config['vip'].split(): + if is_ipv6(vip): + res_neutron_vip = 'ocf:heartbeat:IPv6addr' + vip_params = 'ipv6addr' + else: + res_neutron_vip = 'ocf:heartbeat:IPaddr2' + vip_params = 'ip' + iface = (get_iface_for_address(vip) or + config('vip_iface')) + netmask = (get_netmask_for_address(vip) or + config('vip_cidr')) -def expect_ha(): - """ Determine if the unit expects to be in HA + if iface is not None: + vip_key = 'res_{}_{}_vip'.format(service, iface) + if vip_key in vip_group: + if vip not in relation_data['resource_params'][vip_key]: + vip_key = '{}_{}'.format(vip_key, vip_params) + else: + log("Resource '%s' (vip='%s') already exists in " + "vip group - skipping" % (vip_key, vip), WARNING) + continue - Check for VIP or dns-ha settings which indicate the unit should expect to - be related to hacluster. + relation_data['resources'][vip_key] = res_neutron_vip + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}"'.format(ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask) + ) + vip_group.append(vip_key) - @returns boolean - """ - return config('vip') or config('dns-ha') + if len(vip_group) >= 1: + relation_data['groups'] = { + 'grp_{}_vips'.format(service): ' '.join(vip_group) + } diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index e1d852db..b753275d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -23,7 +23,6 @@ import re import itertools import functools -import shutil import six import traceback @@ -47,7 +46,6 @@ related_units, relation_ids, relation_set, - service_name, status_set, hook_name, application_version_set, @@ -68,11 +66,6 @@ port_has_listener, ) -from charmhelpers.contrib.python.packages import ( - pip_create_virtualenv, - pip_install, -) - from charmhelpers.core.host import ( lsb_release, mounts, @@ -84,7 +77,6 @@ ) from charmhelpers.fetch import ( apt_cache, - install_remote, import_key as fetch_import_key, add_source as fetch_add_source, SourceConfigError, @@ -278,27 +270,6 @@ ]), } -GIT_DEFAULT_REPOS = { - 'requirements': 'git://github.com/openstack/requirements', - 'cinder': 'git://github.com/openstack/cinder', - 'glance': 'git://github.com/openstack/glance', - 'horizon': 'git://github.com/openstack/horizon', - 'keystone': 'git://github.com/openstack/keystone', - 'networking-hyperv': 'git://github.com/openstack/networking-hyperv', - 'neutron': 'git://github.com/openstack/neutron', - 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas', - 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas', - 'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas', - 'nova': 'git://github.com/openstack/nova', -} - -GIT_DEFAULT_BRANCHES = { - 'liberty': 'stable/liberty', - 'mitaka': 'stable/mitaka', - 'newton': 'stable/newton', - 'master': 'master', -} - DEFAULT_LOOPBACK_SIZE = '5G' @@ -530,7 +501,6 @@ def os_release(package, base='essex', reset_cache=False): if _os_rel: return _os_rel _os_rel = ( - git_os_codename_install_source(config('openstack-origin-git')) or get_os_codename_package(package, fatal=False) or get_os_codename_install_source(config('openstack-origin')) or base) @@ -656,11 +626,6 @@ def openstack_upgrade_available(package): else: avail_vers = get_os_version_install_source(src) apt.init() - if "swift" in package: - major_cur_vers = cur_vers.split('.', 1)[0] - major_avail_vers = avail_vers.split('.', 1)[0] - major_diff = apt.version_compare(major_avail_vers, major_cur_vers) - return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0) return apt.version_compare(avail_vers, cur_vers) == 1 @@ -771,417 +736,6 @@ def wrapped_f(*args): return wrap -def git_install_requested(): - """ - Returns true if openstack-origin-git is specified. - """ - return config('openstack-origin-git') is not None - - -def git_os_codename_install_source(projects_yaml): - """ - Returns OpenStack codename of release being installed from source. - """ - if git_install_requested(): - projects = _git_yaml_load(projects_yaml) - - if projects in GIT_DEFAULT_BRANCHES.keys(): - if projects == 'master': - return 'ocata' - return projects - - if 'release' in projects: - if projects['release'] == 'master': - return 'ocata' - return projects['release'] - - return None - - -def git_default_repos(projects_yaml): - """ - Returns default repos if a default openstack-origin-git value is specified. - """ - service = service_name() - core_project = service - - for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES): - if projects_yaml == default: - - # add the requirements repo first - repo = { - 'name': 'requirements', - 'repository': GIT_DEFAULT_REPOS['requirements'], - 'branch': branch, - } - repos = [repo] - - # neutron-* and nova-* charms require some additional repos - if service in ['neutron-api', 'neutron-gateway', - 'neutron-openvswitch']: - core_project = 'neutron' - if service == 'neutron-api': - repo = { - 'name': 'networking-hyperv', - 'repository': GIT_DEFAULT_REPOS['networking-hyperv'], - 'branch': branch, - } - repos.append(repo) - for project in ['neutron-fwaas', 'neutron-lbaas', - 'neutron-vpnaas', 'nova']: - repo = { - 'name': project, - 'repository': GIT_DEFAULT_REPOS[project], - 'branch': branch, - } - repos.append(repo) - - elif service in ['nova-cloud-controller', 'nova-compute']: - core_project = 'nova' - repo = { - 'name': 'neutron', - 'repository': GIT_DEFAULT_REPOS['neutron'], - 'branch': branch, - } - repos.append(repo) - elif service == 'openstack-dashboard': - core_project = 'horizon' - - # finally add the current service's core project repo - repo = { - 'name': core_project, - 'repository': GIT_DEFAULT_REPOS[core_project], - 'branch': branch, - } - repos.append(repo) - - return yaml.dump(dict(repositories=repos, release=default)) - - return projects_yaml - - -def _git_yaml_load(projects_yaml): - """ - Load the specified yaml into a dictionary. - """ - if not projects_yaml: - return None - - return yaml.load(projects_yaml) - - -requirements_dir = None - - -def git_clone_and_install(projects_yaml, core_project): - """ - Clone/install all specified OpenStack repositories. - - The expected format of projects_yaml is: - - repositories: - - {name: keystone, - repository: 'git://git.openstack.org/openstack/keystone.git', - branch: 'stable/icehouse'} - - {name: requirements, - repository: 'git://git.openstack.org/openstack/requirements.git', - branch: 'stable/icehouse'} - - directory: /mnt/openstack-git - http_proxy: squid-proxy-url - https_proxy: squid-proxy-url - - The directory, http_proxy, and https_proxy keys are optional. - - """ - global requirements_dir - parent_dir = '/mnt/openstack-git' - http_proxy = None - - projects = _git_yaml_load(projects_yaml) - _git_validate_projects_yaml(projects, core_project) - - old_environ = dict(os.environ) - - if 'http_proxy' in projects.keys(): - http_proxy = projects['http_proxy'] - os.environ['http_proxy'] = projects['http_proxy'] - if 'https_proxy' in projects.keys(): - os.environ['https_proxy'] = projects['https_proxy'] - - if 'directory' in projects.keys(): - parent_dir = projects['directory'] - - pip_create_virtualenv(os.path.join(parent_dir, 'venv')) - - # Upgrade setuptools and pip from default virtualenv versions. The default - # versions in trusty break master OpenStack branch deployments. - for p in ['pip', 'setuptools']: - pip_install(p, upgrade=True, proxy=http_proxy, - venv=os.path.join(parent_dir, 'venv')) - - constraints = None - for p in projects['repositories']: - repo = p['repository'] - branch = p['branch'] - depth = '1' - if 'depth' in p.keys(): - depth = p['depth'] - if p['name'] == 'requirements': - repo_dir = _git_clone_and_install_single(repo, branch, depth, - parent_dir, http_proxy, - update_requirements=False) - requirements_dir = repo_dir - constraints = os.path.join(repo_dir, "upper-constraints.txt") - # upper-constraints didn't exist until after icehouse - if not os.path.isfile(constraints): - constraints = None - # use constraints unless project yaml sets use_constraints to false - if 'use_constraints' in projects.keys(): - if not projects['use_constraints']: - constraints = None - else: - repo_dir = _git_clone_and_install_single(repo, branch, depth, - parent_dir, http_proxy, - update_requirements=True, - constraints=constraints) - - os.environ = old_environ - - -def _git_validate_projects_yaml(projects, core_project): - """ - Validate the projects yaml. - """ - _git_ensure_key_exists('repositories', projects) - - for project in projects['repositories']: - _git_ensure_key_exists('name', project.keys()) - _git_ensure_key_exists('repository', project.keys()) - _git_ensure_key_exists('branch', project.keys()) - - if projects['repositories'][0]['name'] != 'requirements': - error_out('{} git repo must be specified first'.format('requirements')) - - if projects['repositories'][-1]['name'] != core_project: - error_out('{} git repo must be specified last'.format(core_project)) - - _git_ensure_key_exists('release', projects) - - -def _git_ensure_key_exists(key, keys): - """ - Ensure that key exists in keys. - """ - if key not in keys: - error_out('openstack-origin-git key \'{}\' is missing'.format(key)) - - -def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, - update_requirements, constraints=None): - """ - Clone and install a single git repository. - """ - if not os.path.exists(parent_dir): - juju_log('Directory already exists at {}. ' - 'No need to create directory.'.format(parent_dir)) - os.mkdir(parent_dir) - - juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) - repo_dir = install_remote( - repo, dest=parent_dir, branch=branch, depth=depth) - - venv = os.path.join(parent_dir, 'venv') - - if update_requirements: - if not requirements_dir: - error_out('requirements repo must be cloned before ' - 'updating from global requirements.') - _git_update_requirements(venv, repo_dir, requirements_dir) - - juju_log('Installing git repo from dir: {}'.format(repo_dir)) - if http_proxy: - pip_install(repo_dir, proxy=http_proxy, venv=venv, - constraints=constraints) - else: - pip_install(repo_dir, venv=venv, constraints=constraints) - - return repo_dir - - -def _git_update_requirements(venv, package_dir, reqs_dir): - """ - Update from global requirements. - - Update an OpenStack git directory's requirements.txt and - test-requirements.txt from global-requirements.txt. - """ - orig_dir = os.getcwd() - os.chdir(reqs_dir) - python = os.path.join(venv, 'bin/python') - cmd = [python, 'update.py', package_dir] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - package = os.path.basename(package_dir) - error_out("Error updating {} from " - "global-requirements.txt".format(package)) - os.chdir(orig_dir) - - -def git_pip_venv_dir(projects_yaml): - """ - Return the pip virtualenv path. - """ - parent_dir = '/mnt/openstack-git' - - projects = _git_yaml_load(projects_yaml) - - if 'directory' in projects.keys(): - parent_dir = projects['directory'] - - return os.path.join(parent_dir, 'venv') - - -def git_src_dir(projects_yaml, project): - """ - Return the directory where the specified project's source is located. - """ - parent_dir = '/mnt/openstack-git' - - projects = _git_yaml_load(projects_yaml) - - if 'directory' in projects.keys(): - parent_dir = projects['directory'] - - for p in projects['repositories']: - if p['name'] == project: - return os.path.join(parent_dir, os.path.basename(p['repository'])) - - return None - - -def git_yaml_value(projects_yaml, key): - """ - Return the value in projects_yaml for the specified key. - """ - projects = _git_yaml_load(projects_yaml) - - if key in projects.keys(): - return projects[key] - - return None - - -def git_generate_systemd_init_files(templates_dir): - """ - Generate systemd init files. - - Generates and installs systemd init units and script files based on the - *.init.in files contained in the templates_dir directory. - - This code is based on the openstack-pkg-tools package and its init - script generation, which is used by the OpenStack packages. - """ - for f in os.listdir(templates_dir): - # Create the init script and systemd unit file from the template - if f.endswith(".init.in"): - init_in_file = f - init_file = f[:-8] - service_file = "{}.service".format(init_file) - - init_in_source = os.path.join(templates_dir, init_in_file) - init_source = os.path.join(templates_dir, init_file) - service_source = os.path.join(templates_dir, service_file) - - init_dest = os.path.join('/etc/init.d', init_file) - service_dest = os.path.join('/lib/systemd/system', service_file) - - shutil.copyfile(init_in_source, init_source) - with open(init_source, 'a') as outfile: - template = ('/usr/share/openstack-pkg-tools/' - 'init-script-template') - with open(template) as infile: - outfile.write('\n\n{}'.format(infile.read())) - - cmd = ['pkgos-gen-systemd-unit', init_in_source] - subprocess.check_call(cmd) - - if os.path.exists(init_dest): - os.remove(init_dest) - if os.path.exists(service_dest): - os.remove(service_dest) - shutil.copyfile(init_source, init_dest) - shutil.copyfile(service_source, service_dest) - os.chmod(init_dest, 0o755) - - for f in os.listdir(templates_dir): - # If there's a service.in file, use it instead of the generated one - if f.endswith(".service.in"): - service_in_file = f - service_file = f[:-3] - - service_in_source = os.path.join(templates_dir, service_in_file) - service_source = os.path.join(templates_dir, service_file) - service_dest = os.path.join('/lib/systemd/system', service_file) - - shutil.copyfile(service_in_source, service_source) - - if os.path.exists(service_dest): - os.remove(service_dest) - shutil.copyfile(service_source, service_dest) - - for f in os.listdir(templates_dir): - # Generate the systemd unit if there's no existing .service.in - if f.endswith(".init.in"): - init_in_file = f - init_file = f[:-8] - service_in_file = "{}.service.in".format(init_file) - service_file = "{}.service".format(init_file) - - init_in_source = os.path.join(templates_dir, init_in_file) - service_in_source = os.path.join(templates_dir, service_in_file) - service_source = os.path.join(templates_dir, service_file) - service_dest = os.path.join('/lib/systemd/system', service_file) - - if not os.path.exists(service_in_source): - cmd = ['pkgos-gen-systemd-unit', init_in_source] - subprocess.check_call(cmd) - - if os.path.exists(service_dest): - os.remove(service_dest) - shutil.copyfile(service_source, service_dest) - - -def git_determine_usr_bin(): - """Return the /usr/bin path for Apache2 config. - - The /usr/bin path will be located in the virtualenv if the charm - is configured to deploy from source. - """ - if git_install_requested(): - projects_yaml = config('openstack-origin-git') - projects_yaml = git_default_repos(projects_yaml) - return os.path.join(git_pip_venv_dir(projects_yaml), 'bin') - else: - return '/usr/bin' - - -def git_determine_python_path(): - """Return the python-path for Apache2 config. - - Returns 'None' unless the charm is configured to deploy from source, - in which case the path of the virtualenv's site-packages is returned. - """ - if git_install_requested(): - projects_yaml = config('openstack-origin-git') - projects_yaml = git_default_repos(projects_yaml) - return os.path.join(git_pip_venv_dir(projects_yaml), - 'lib/python2.7/site-packages') - else: - return None - - def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts @@ -1615,27 +1169,24 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs): """ ret = False - if git_install_requested(): - action_set({'outcome': 'installed from source, skipped upgrade.'}) - else: - if openstack_upgrade_available(package): - if config('action-managed-upgrade'): - juju_log('Upgrading OpenStack release') - - try: - upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed.'}) - ret = True - except Exception: - action_set({'outcome': 'upgrade failed, see traceback.'}) - action_set({'traceback': traceback.format_exc()}) - action_fail('do_openstack_upgrade resulted in an ' - 'unexpected error') - else: - action_set({'outcome': 'action-managed-upgrade config is ' - 'False, skipped upgrade.'}) + if openstack_upgrade_available(package): + if config('action-managed-upgrade'): + juju_log('Upgrading OpenStack release') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed.'}) + ret = True + except Exception: + action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('do_openstack_upgrade resulted in an ' + 'unexpected error') else: - action_set({'outcome': 'no upgrade available.'}) + action_set({'outcome': 'action-managed-upgrade config is ' + 'False, skipped upgrade.'}) + else: + action_set({'outcome': 'no upgrade available.'}) return ret diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 87621c47..e13e60a6 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None): assert isinstance(valid_range, list), \ "valid_range must be a list, was given {}".format(valid_range) # If we're dealing with strings - if valid_type is six.string_types: + if isinstance(value, six.string_types): assert value in valid_range, \ "{} is not in the list {}".format(value, valid_range) # Integer, float should have a min and max @@ -517,7 +517,8 @@ def pool_set(service, pool_name, key, value): :param value: :return: None. Can raise CalledProcessError """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value] + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, + str(value).lower()] try: check_call(cmd) except CalledProcessError: @@ -621,16 +622,24 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' :param durability_estimator: int :return: None. Can raise CalledProcessError """ + version = ceph_version() + # Ensure this failure_domain is allowed by Ceph validator(failure_domain, six.string_types, ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, - 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks), - 'ruleset_failure_domain=' + failure_domain] + 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks) + ] if locality is not None and durability_estimator is not None: raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + # failure_domain changed in luminous + if version and version >= '12.0.0': + cmd.append('crush-failure-domain=' + failure_domain) + else: + cmd.append('ruleset-failure-domain=' + failure_domain) + # Add plugin specific information if locality is not None: # For local erasure codes diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 5a88f798..211ae87d 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -39,6 +39,7 @@ else: from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -344,6 +345,7 @@ def save(self): """ with open(self.path, 'w') as f: + os.fchmod(f.fileno(), 0o600) json.dump(self, f) def _implicit_save(self): diff --git a/ceph-mon/hooks/charmhelpers/core/unitdata.py b/ceph-mon/hooks/charmhelpers/core/unitdata.py index 7af875c2..6d7b4942 100644 --- a/ceph-mon/hooks/charmhelpers/core/unitdata.py +++ b/ceph-mon/hooks/charmhelpers/core/unitdata.py @@ -175,6 +175,8 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/ceph-mon/tests/charmhelpers/core/hookenv.py b/ceph-mon/tests/charmhelpers/core/hookenv.py index 5a88f798..211ae87d 100644 --- a/ceph-mon/tests/charmhelpers/core/hookenv.py +++ b/ceph-mon/tests/charmhelpers/core/hookenv.py @@ -39,6 +39,7 @@ else: from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -344,6 +345,7 @@ def save(self): """ with open(self.path, 'w') as f: + os.fchmod(f.fileno(), 0o600) json.dump(self, f) def _implicit_save(self): diff --git a/ceph-mon/tests/charmhelpers/core/unitdata.py b/ceph-mon/tests/charmhelpers/core/unitdata.py index 7af875c2..6d7b4942 100644 --- a/ceph-mon/tests/charmhelpers/core/unitdata.py +++ b/ceph-mon/tests/charmhelpers/core/unitdata.py @@ -175,6 +175,8 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None From ae80a19ba297995de769524c1e34d877037fb2c3 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 19 Jan 2018 12:07:37 +0000 Subject: [PATCH 1443/2699] Sync charm-helpers Notable issues resolved: openstack_upgrade_available() broken for swift https://bugs.launchpad.net/charm-swift-proxy/+bug/1743847 haproxy context doesn't consider bindings https://bugs.launchpad.net/charm-helpers/+bug/1735421 regression in haproxy check https://bugs.launchpad.net/charm-helpers/+bug/1743287 Change-Id: I86b23f0be2e5098696833996ca807d354fe72751 --- .../charmhelpers/contrib/openstack/context.py | 115 +++- .../charmhelpers/contrib/openstack/utils.py | 498 ++---------------- .../contrib/storage/linux/ceph.py | 42 +- .../charmhelpers/contrib/storage/linux/lvm.py | 50 ++ ceph-osd/hooks/charmhelpers/core/hookenv.py | 2 + ceph-osd/hooks/charmhelpers/core/unitdata.py | 2 + ceph-osd/tests/charmhelpers/core/hookenv.py | 2 + ceph-osd/tests/charmhelpers/core/unitdata.py | 2 + 8 files changed, 217 insertions(+), 496 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index e6c0e9fe..7ada2760 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -93,14 +93,14 @@ format_ipv6_addr, is_bridge_member, is_ipv6_disabled, + get_relation_ip, ) from charmhelpers.contrib.openstack.utils import ( config_flags_parser, - get_host_ip, - git_determine_usr_bin, - git_determine_python_path, enable_memcache, snap_install_requested, + CompareOpenStackReleases, + os_release, ) from charmhelpers.core.unitdata import kv @@ -332,10 +332,7 @@ def __init__(self, self.rel_name = rel_name self.interfaces = [self.rel_name] - def __call__(self): - log('Generating template context for ' + self.rel_name, level=DEBUG) - ctxt = {} - + def _setup_pki_cache(self): if self.service and self.service_user: # This is required for pki token signing if we don't want /tmp to # be used. @@ -345,6 +342,15 @@ def __call__(self): mkdir(path=cachedir, owner=self.service_user, group=self.service_user, perms=0o700) + return cachedir + return None + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + cachedir = self._setup_pki_cache() + if cachedir: ctxt['signing_dir'] = cachedir for rid in relation_ids(self.rel_name): @@ -383,6 +389,62 @@ def __call__(self): return {} +class IdentityCredentialsContext(IdentityServiceContext): + '''Context for identity-credentials interface type''' + + def __init__(self, + service=None, + service_user=None, + rel_name='identity-credentials'): + super(IdentityCredentialsContext, self).__init__(service, + service_user, + rel_name) + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + cachedir = self._setup_pki_cache() + if cachedir: + ctxt['signing_dir'] = cachedir + + for rid in relation_ids(self.rel_name): + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + credentials_host = rdata.get('credentials_host') + credentials_host = ( + format_ipv6_addr(credentials_host) or credentials_host + ) + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host + svc_protocol = rdata.get('credentials_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + api_version = rdata.get('api_version') or '2.0' + ctxt.update({ + 'service_port': rdata.get('credentials_port'), + 'service_host': credentials_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('credentials_project'), + 'admin_tenant_id': rdata.get('credentials_project_id'), + 'admin_user': rdata.get('credentials_username'), + 'admin_password': rdata.get('credentials_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol, + 'api_version': api_version + }) + + if float(api_version) > 2: + ctxt.update({'admin_domain_name': + rdata.get('domain')}) + + if self.context_complete(ctxt): + return ctxt + + return {} + + class AMQPContext(OSContextGenerator): def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): @@ -564,11 +626,6 @@ def __call__(self): if not relation_ids('cluster') and not self.singlenode_mode: return {} - if config('prefer-ipv6'): - addr = get_ipv6_addr(exc_list=[config('vip')])[0] - else: - addr = get_host_ip(unit_get('private-address')) - l_unit = local_unit().replace('/', '-') cluster_hosts = {} @@ -576,7 +633,15 @@ def __call__(self): # and associated backends for addr_type in ADDRESS_TYPES: cfg_opt = 'os-{}-network'.format(addr_type) - laddr = get_address_in_network(config(cfg_opt)) + # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather + # than 'internal' + if addr_type == 'internal': + _addr_map_type = INTERNAL + else: + _addr_map_type = addr_type + # Network spaces aware + laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'], + config(cfg_opt)) if laddr: netmask = get_netmask_for_address(laddr) cluster_hosts[laddr] = { @@ -587,15 +652,19 @@ def __call__(self): } for rid in relation_ids('cluster'): for unit in sorted(related_units(rid)): + # API Charms will need to set {addr_type}-address with + # get_relation_ip(addr_type) _laddr = relation_get('{}-address'.format(addr_type), rid=rid, unit=unit) if _laddr: _unit = unit.replace('/', '-') cluster_hosts[laddr]['backends'][_unit] = _laddr - # NOTE(jamespage) add backend based on private address - this - # with either be the only backend or the fallback if no acls + # NOTE(jamespage) add backend based on get_relation_ip - this + # will either be the only backend or the fallback if no acls # match in the frontend + # Network spaces aware + addr = get_relation_ip('cluster') cluster_hosts[addr] = {} netmask = get_netmask_for_address(addr) cluster_hosts[addr] = { @@ -605,6 +674,8 @@ def __call__(self): } for rid in relation_ids('cluster'): for unit in sorted(related_units(rid)): + # API Charms will need to set their private-address with + # get_relation_ip('cluster') _laddr = relation_get('private-address', rid=rid, unit=unit) if _laddr: @@ -1321,8 +1392,6 @@ def __call__(self): "public_processes": int(math.ceil(self.public_process_weight * total_processes)), "threads": 1, - "usr_bin": git_determine_usr_bin(), - "python_path": git_determine_python_path(), } return ctxt @@ -1566,8 +1635,18 @@ class InternalEndpointContext(OSContextGenerator): endpoints by default so this allows admins to optionally use internal endpoints. """ + def __init__(self, ost_rel_check_pkg_name): + self.ost_rel_check_pkg_name = ost_rel_check_pkg_name + def __call__(self): - return {'use_internal_endpoints': config('use-internal-endpoints')} + ctxt = {'use_internal_endpoints': config('use-internal-endpoints')} + rel = os_release(self.ost_rel_check_pkg_name, base='icehouse') + if CompareOpenStackReleases(rel) >= 'pike': + ctxt['volume_api_version'] = '3' + else: + ctxt['volume_api_version'] = '2' + + return ctxt class AppArmorContext(OSContextGenerator): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 9e5af342..b753275d 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -23,7 +23,6 @@ import re import itertools import functools -import shutil import six import traceback @@ -47,7 +46,6 @@ related_units, relation_ids, relation_set, - service_name, status_set, hook_name, application_version_set, @@ -68,11 +66,6 @@ port_has_listener, ) -from charmhelpers.contrib.python.packages import ( - pip_create_virtualenv, - pip_install, -) - from charmhelpers.core.host import ( lsb_release, mounts, @@ -84,7 +77,6 @@ ) from charmhelpers.fetch import ( apt_cache, - install_remote, import_key as fetch_import_key, add_source as fetch_add_source, SourceConfigError, @@ -278,27 +270,6 @@ ]), } -GIT_DEFAULT_REPOS = { - 'requirements': 'git://github.com/openstack/requirements', - 'cinder': 'git://github.com/openstack/cinder', - 'glance': 'git://github.com/openstack/glance', - 'horizon': 'git://github.com/openstack/horizon', - 'keystone': 'git://github.com/openstack/keystone', - 'networking-hyperv': 'git://github.com/openstack/networking-hyperv', - 'neutron': 'git://github.com/openstack/neutron', - 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas', - 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas', - 'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas', - 'nova': 'git://github.com/openstack/nova', -} - -GIT_DEFAULT_BRANCHES = { - 'liberty': 'stable/liberty', - 'mitaka': 'stable/mitaka', - 'newton': 'stable/newton', - 'master': 'master', -} - DEFAULT_LOOPBACK_SIZE = '5G' @@ -530,7 +501,6 @@ def os_release(package, base='essex', reset_cache=False): if _os_rel: return _os_rel _os_rel = ( - git_os_codename_install_source(config('openstack-origin-git')) or get_os_codename_package(package, fatal=False) or get_os_codename_install_source(config('openstack-origin')) or base) @@ -656,11 +626,6 @@ def openstack_upgrade_available(package): else: avail_vers = get_os_version_install_source(src) apt.init() - if "swift" in package: - major_cur_vers = cur_vers.split('.', 1)[0] - major_avail_vers = avail_vers.split('.', 1)[0] - major_diff = apt.version_compare(major_avail_vers, major_cur_vers) - return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0) return apt.version_compare(avail_vers, cur_vers) == 1 @@ -771,417 +736,6 @@ def wrapped_f(*args): return wrap -def git_install_requested(): - """ - Returns true if openstack-origin-git is specified. - """ - return config('openstack-origin-git') is not None - - -def git_os_codename_install_source(projects_yaml): - """ - Returns OpenStack codename of release being installed from source. - """ - if git_install_requested(): - projects = _git_yaml_load(projects_yaml) - - if projects in GIT_DEFAULT_BRANCHES.keys(): - if projects == 'master': - return 'ocata' - return projects - - if 'release' in projects: - if projects['release'] == 'master': - return 'ocata' - return projects['release'] - - return None - - -def git_default_repos(projects_yaml): - """ - Returns default repos if a default openstack-origin-git value is specified. - """ - service = service_name() - core_project = service - - for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES): - if projects_yaml == default: - - # add the requirements repo first - repo = { - 'name': 'requirements', - 'repository': GIT_DEFAULT_REPOS['requirements'], - 'branch': branch, - } - repos = [repo] - - # neutron-* and nova-* charms require some additional repos - if service in ['neutron-api', 'neutron-gateway', - 'neutron-openvswitch']: - core_project = 'neutron' - if service == 'neutron-api': - repo = { - 'name': 'networking-hyperv', - 'repository': GIT_DEFAULT_REPOS['networking-hyperv'], - 'branch': branch, - } - repos.append(repo) - for project in ['neutron-fwaas', 'neutron-lbaas', - 'neutron-vpnaas', 'nova']: - repo = { - 'name': project, - 'repository': GIT_DEFAULT_REPOS[project], - 'branch': branch, - } - repos.append(repo) - - elif service in ['nova-cloud-controller', 'nova-compute']: - core_project = 'nova' - repo = { - 'name': 'neutron', - 'repository': GIT_DEFAULT_REPOS['neutron'], - 'branch': branch, - } - repos.append(repo) - elif service == 'openstack-dashboard': - core_project = 'horizon' - - # finally add the current service's core project repo - repo = { - 'name': core_project, - 'repository': GIT_DEFAULT_REPOS[core_project], - 'branch': branch, - } - repos.append(repo) - - return yaml.dump(dict(repositories=repos, release=default)) - - return projects_yaml - - -def _git_yaml_load(projects_yaml): - """ - Load the specified yaml into a dictionary. - """ - if not projects_yaml: - return None - - return yaml.load(projects_yaml) - - -requirements_dir = None - - -def git_clone_and_install(projects_yaml, core_project): - """ - Clone/install all specified OpenStack repositories. - - The expected format of projects_yaml is: - - repositories: - - {name: keystone, - repository: 'git://git.openstack.org/openstack/keystone.git', - branch: 'stable/icehouse'} - - {name: requirements, - repository: 'git://git.openstack.org/openstack/requirements.git', - branch: 'stable/icehouse'} - - directory: /mnt/openstack-git - http_proxy: squid-proxy-url - https_proxy: squid-proxy-url - - The directory, http_proxy, and https_proxy keys are optional. - - """ - global requirements_dir - parent_dir = '/mnt/openstack-git' - http_proxy = None - - projects = _git_yaml_load(projects_yaml) - _git_validate_projects_yaml(projects, core_project) - - old_environ = dict(os.environ) - - if 'http_proxy' in projects.keys(): - http_proxy = projects['http_proxy'] - os.environ['http_proxy'] = projects['http_proxy'] - if 'https_proxy' in projects.keys(): - os.environ['https_proxy'] = projects['https_proxy'] - - if 'directory' in projects.keys(): - parent_dir = projects['directory'] - - pip_create_virtualenv(os.path.join(parent_dir, 'venv')) - - # Upgrade setuptools and pip from default virtualenv versions. The default - # versions in trusty break master OpenStack branch deployments. - for p in ['pip', 'setuptools']: - pip_install(p, upgrade=True, proxy=http_proxy, - venv=os.path.join(parent_dir, 'venv')) - - constraints = None - for p in projects['repositories']: - repo = p['repository'] - branch = p['branch'] - depth = '1' - if 'depth' in p.keys(): - depth = p['depth'] - if p['name'] == 'requirements': - repo_dir = _git_clone_and_install_single(repo, branch, depth, - parent_dir, http_proxy, - update_requirements=False) - requirements_dir = repo_dir - constraints = os.path.join(repo_dir, "upper-constraints.txt") - # upper-constraints didn't exist until after icehouse - if not os.path.isfile(constraints): - constraints = None - # use constraints unless project yaml sets use_constraints to false - if 'use_constraints' in projects.keys(): - if not projects['use_constraints']: - constraints = None - else: - repo_dir = _git_clone_and_install_single(repo, branch, depth, - parent_dir, http_proxy, - update_requirements=True, - constraints=constraints) - - os.environ = old_environ - - -def _git_validate_projects_yaml(projects, core_project): - """ - Validate the projects yaml. - """ - _git_ensure_key_exists('repositories', projects) - - for project in projects['repositories']: - _git_ensure_key_exists('name', project.keys()) - _git_ensure_key_exists('repository', project.keys()) - _git_ensure_key_exists('branch', project.keys()) - - if projects['repositories'][0]['name'] != 'requirements': - error_out('{} git repo must be specified first'.format('requirements')) - - if projects['repositories'][-1]['name'] != core_project: - error_out('{} git repo must be specified last'.format(core_project)) - - _git_ensure_key_exists('release', projects) - - -def _git_ensure_key_exists(key, keys): - """ - Ensure that key exists in keys. - """ - if key not in keys: - error_out('openstack-origin-git key \'{}\' is missing'.format(key)) - - -def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, - update_requirements, constraints=None): - """ - Clone and install a single git repository. - """ - if not os.path.exists(parent_dir): - juju_log('Directory already exists at {}. ' - 'No need to create directory.'.format(parent_dir)) - os.mkdir(parent_dir) - - juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) - repo_dir = install_remote( - repo, dest=parent_dir, branch=branch, depth=depth) - - venv = os.path.join(parent_dir, 'venv') - - if update_requirements: - if not requirements_dir: - error_out('requirements repo must be cloned before ' - 'updating from global requirements.') - _git_update_requirements(venv, repo_dir, requirements_dir) - - juju_log('Installing git repo from dir: {}'.format(repo_dir)) - if http_proxy: - pip_install(repo_dir, proxy=http_proxy, venv=venv, - constraints=constraints) - else: - pip_install(repo_dir, venv=venv, constraints=constraints) - - return repo_dir - - -def _git_update_requirements(venv, package_dir, reqs_dir): - """ - Update from global requirements. - - Update an OpenStack git directory's requirements.txt and - test-requirements.txt from global-requirements.txt. - """ - orig_dir = os.getcwd() - os.chdir(reqs_dir) - python = os.path.join(venv, 'bin/python') - cmd = [python, 'update.py', package_dir] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - package = os.path.basename(package_dir) - error_out("Error updating {} from " - "global-requirements.txt".format(package)) - os.chdir(orig_dir) - - -def git_pip_venv_dir(projects_yaml): - """ - Return the pip virtualenv path. - """ - parent_dir = '/mnt/openstack-git' - - projects = _git_yaml_load(projects_yaml) - - if 'directory' in projects.keys(): - parent_dir = projects['directory'] - - return os.path.join(parent_dir, 'venv') - - -def git_src_dir(projects_yaml, project): - """ - Return the directory where the specified project's source is located. - """ - parent_dir = '/mnt/openstack-git' - - projects = _git_yaml_load(projects_yaml) - - if 'directory' in projects.keys(): - parent_dir = projects['directory'] - - for p in projects['repositories']: - if p['name'] == project: - return os.path.join(parent_dir, os.path.basename(p['repository'])) - - return None - - -def git_yaml_value(projects_yaml, key): - """ - Return the value in projects_yaml for the specified key. - """ - projects = _git_yaml_load(projects_yaml) - - if key in projects.keys(): - return projects[key] - - return None - - -def git_generate_systemd_init_files(templates_dir): - """ - Generate systemd init files. - - Generates and installs systemd init units and script files based on the - *.init.in files contained in the templates_dir directory. - - This code is based on the openstack-pkg-tools package and its init - script generation, which is used by the OpenStack packages. - """ - for f in os.listdir(templates_dir): - # Create the init script and systemd unit file from the template - if f.endswith(".init.in"): - init_in_file = f - init_file = f[:-8] - service_file = "{}.service".format(init_file) - - init_in_source = os.path.join(templates_dir, init_in_file) - init_source = os.path.join(templates_dir, init_file) - service_source = os.path.join(templates_dir, service_file) - - init_dest = os.path.join('/etc/init.d', init_file) - service_dest = os.path.join('/lib/systemd/system', service_file) - - shutil.copyfile(init_in_source, init_source) - with open(init_source, 'a') as outfile: - template = ('/usr/share/openstack-pkg-tools/' - 'init-script-template') - with open(template) as infile: - outfile.write('\n\n{}'.format(infile.read())) - - cmd = ['pkgos-gen-systemd-unit', init_in_source] - subprocess.check_call(cmd) - - if os.path.exists(init_dest): - os.remove(init_dest) - if os.path.exists(service_dest): - os.remove(service_dest) - shutil.copyfile(init_source, init_dest) - shutil.copyfile(service_source, service_dest) - os.chmod(init_dest, 0o755) - - for f in os.listdir(templates_dir): - # If there's a service.in file, use it instead of the generated one - if f.endswith(".service.in"): - service_in_file = f - service_file = f[:-3] - - service_in_source = os.path.join(templates_dir, service_in_file) - service_source = os.path.join(templates_dir, service_file) - service_dest = os.path.join('/lib/systemd/system', service_file) - - shutil.copyfile(service_in_source, service_source) - - if os.path.exists(service_dest): - os.remove(service_dest) - shutil.copyfile(service_source, service_dest) - - for f in os.listdir(templates_dir): - # Generate the systemd unit if there's no existing .service.in - if f.endswith(".init.in"): - init_in_file = f - init_file = f[:-8] - service_in_file = "{}.service.in".format(init_file) - service_file = "{}.service".format(init_file) - - init_in_source = os.path.join(templates_dir, init_in_file) - service_in_source = os.path.join(templates_dir, service_in_file) - service_source = os.path.join(templates_dir, service_file) - service_dest = os.path.join('/lib/systemd/system', service_file) - - if not os.path.exists(service_in_source): - cmd = ['pkgos-gen-systemd-unit', init_in_source] - subprocess.check_call(cmd) - - if os.path.exists(service_dest): - os.remove(service_dest) - shutil.copyfile(service_source, service_dest) - - -def git_determine_usr_bin(): - """Return the /usr/bin path for Apache2 config. - - The /usr/bin path will be located in the virtualenv if the charm - is configured to deploy from source. - """ - if git_install_requested(): - projects_yaml = config('openstack-origin-git') - projects_yaml = git_default_repos(projects_yaml) - return os.path.join(git_pip_venv_dir(projects_yaml), 'bin') - else: - return '/usr/bin' - - -def git_determine_python_path(): - """Return the python-path for Apache2 config. - - Returns 'None' unless the charm is configured to deploy from source, - in which case the path of the virtualenv's site-packages is returned. - """ - if git_install_requested(): - projects_yaml = config('openstack-origin-git') - projects_yaml = git_default_repos(projects_yaml) - return os.path.join(git_pip_venv_dir(projects_yaml), - 'lib/python2.7/site-packages') - else: - return None - - def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts @@ -1615,27 +1169,24 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs): """ ret = False - if git_install_requested(): - action_set({'outcome': 'installed from source, skipped upgrade.'}) - else: - if openstack_upgrade_available(package): - if config('action-managed-upgrade'): - juju_log('Upgrading OpenStack release') - - try: - upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed.'}) - ret = True - except Exception: - action_set({'outcome': 'upgrade failed, see traceback.'}) - action_set({'traceback': traceback.format_exc()}) - action_fail('do_openstack_upgrade resulted in an ' - 'unexpected error') - else: - action_set({'outcome': 'action-managed-upgrade config is ' - 'False, skipped upgrade.'}) + if openstack_upgrade_available(package): + if config('action-managed-upgrade'): + juju_log('Upgrading OpenStack release') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed.'}) + ret = True + except Exception: + action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('do_openstack_upgrade resulted in an ' + 'unexpected error') else: - action_set({'outcome': 'no upgrade available.'}) + action_set({'outcome': 'action-managed-upgrade config is ' + 'False, skipped upgrade.'}) + else: + action_set({'outcome': 'no upgrade available.'}) return ret @@ -2045,14 +1596,25 @@ def token_cache_pkgs(source=None, release=None): def update_json_file(filename, items): """Updates the json `filename` with a given dict. - :param filename: json filename (i.e.: /etc/glance/policy.json) + :param filename: path to json file (e.g. /etc/glance/policy.json) :param items: dict of items to update """ + if not items: + return + with open(filename) as fd: policy = json.load(fd) + + # Compare before and after and if nothing has changed don't write the file + # since that could cause unnecessary service restarts. + before = json.dumps(policy, indent=4, sort_keys=True) policy.update(items) + after = json.dumps(policy, indent=4, sort_keys=True) + if before == after: + return + with open(filename, "w") as fd: - fd.write(json.dumps(policy, indent=4)) + fd.write(after) @cached diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 0d9bacfd..e13e60a6 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None): assert isinstance(valid_range, list), \ "valid_range must be a list, was given {}".format(valid_range) # If we're dealing with strings - if valid_type is six.string_types: + if isinstance(value, six.string_types): assert value in valid_range, \ "{} is not in the list {}".format(value, valid_range) # Integer, float should have a min and max @@ -517,7 +517,8 @@ def pool_set(service, pool_name, key, value): :param value: :return: None. Can raise CalledProcessError """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value] + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, + str(value).lower()] try: check_call(cmd) except CalledProcessError: @@ -621,16 +622,24 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' :param durability_estimator: int :return: None. Can raise CalledProcessError """ + version = ceph_version() + # Ensure this failure_domain is allowed by Ceph validator(failure_domain, six.string_types, ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, - 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks), - 'ruleset_failure_domain=' + failure_domain] + 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks) + ] if locality is not None and durability_estimator is not None: raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + # failure_domain changed in luminous + if version and version >= '12.0.0': + cmd.append('crush-failure-domain=' + failure_domain) + else: + cmd.append('ruleset-failure-domain=' + failure_domain) + # Add plugin specific information if locality is not None: # For local erasure codes @@ -1064,14 +1073,24 @@ def __init__(self, api_version=1, request_id=None): self.ops = [] def add_op_request_access_to_group(self, name, namespace=None, - permission=None, key_name=None): + permission=None, key_name=None, + object_prefix_permissions=None): """ Adds the requested permissions to the current service's Ceph key, - allowing the key to access only the specified pools + allowing the key to access only the specified pools or + object prefixes. object_prefix_permissions should be a dictionary + keyed on the permission with the corresponding value being a list + of prefixes to apply that permission to. + { + 'rwx': ['prefix1', 'prefix2'], + 'class-read': ['prefix3']} """ - self.ops.append({'op': 'add-permissions-to-key', 'group': name, - 'namespace': namespace, 'name': key_name or service_name(), - 'group-permission': permission}) + self.ops.append({ + 'op': 'add-permissions-to-key', 'group': name, + 'namespace': namespace, + 'name': key_name or service_name(), + 'group-permission': permission, + 'object-prefix-permissions': object_prefix_permissions}) def add_op_create_pool(self, name, replica_count=3, pg_num=None, weight=None, group=None, namespace=None): @@ -1107,7 +1126,10 @@ def request(self): def _ops_equal(self, other): if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in ['replicas', 'name', 'op', 'pg_num', 'weight']: + for key in [ + 'replicas', 'name', 'op', 'pg_num', 'weight', + 'group', 'group-namespace', 'group-permission', + 'object-prefix-permissions']: if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py index 7f2a0604..79a7a245 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import functools from subprocess import ( CalledProcessError, check_call, @@ -101,3 +102,52 @@ def create_lvm_volume_group(volume_group, block_device): :block_device: str: Full path of PV-initialized block device. ''' check_call(['vgcreate', volume_group, block_device]) + + +def list_logical_volumes(select_criteria=None, path_mode=False): + ''' + List logical volumes + + :param select_criteria: str: Limit list to those volumes matching this + criteria (see 'lvs -S help' for more details) + :param path_mode: bool: return logical volume name in 'vg/lv' format, this + format is required for some commands like lvextend + :returns: [str]: List of logical volumes + ''' + lv_diplay_attr = 'lv_name' + if path_mode: + # Parsing output logic relies on the column order + lv_diplay_attr = 'vg_name,' + lv_diplay_attr + cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings'] + if select_criteria: + cmd.extend(['--select', select_criteria]) + lvs = [] + for lv in check_output(cmd).decode('UTF-8').splitlines(): + if not lv: + continue + if path_mode: + lvs.append('/'.join(lv.strip().split())) + else: + lvs.append(lv.strip()) + return lvs + + +list_thin_logical_volume_pools = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^t') + +list_thin_logical_volumes = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^V') + + +def extend_logical_volume_by_device(lv_name, block_device): + ''' + Extends the size of logical volume lv_name by the amount of free space on + physical volume block_device. + + :param lv_name: str: name of logical volume to be extended (vg/lv format) + :param block_device: str: name of block_device to be allocated to lv_name + ''' + cmd = ['lvextend', lv_name, block_device] + check_call(cmd) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 5a88f798..211ae87d 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -39,6 +39,7 @@ else: from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -344,6 +345,7 @@ def save(self): """ with open(self.path, 'w') as f: + os.fchmod(f.fileno(), 0o600) json.dump(self, f) def _implicit_save(self): diff --git a/ceph-osd/hooks/charmhelpers/core/unitdata.py b/ceph-osd/hooks/charmhelpers/core/unitdata.py index 7af875c2..6d7b4942 100644 --- a/ceph-osd/hooks/charmhelpers/core/unitdata.py +++ b/ceph-osd/hooks/charmhelpers/core/unitdata.py @@ -175,6 +175,8 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/ceph-osd/tests/charmhelpers/core/hookenv.py b/ceph-osd/tests/charmhelpers/core/hookenv.py index 5a88f798..211ae87d 100644 --- a/ceph-osd/tests/charmhelpers/core/hookenv.py +++ b/ceph-osd/tests/charmhelpers/core/hookenv.py @@ -39,6 +39,7 @@ else: from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -344,6 +345,7 @@ def save(self): """ with open(self.path, 'w') as f: + os.fchmod(f.fileno(), 0o600) json.dump(self, f) def _implicit_save(self): diff --git a/ceph-osd/tests/charmhelpers/core/unitdata.py b/ceph-osd/tests/charmhelpers/core/unitdata.py index 7af875c2..6d7b4942 100644 --- a/ceph-osd/tests/charmhelpers/core/unitdata.py +++ b/ceph-osd/tests/charmhelpers/core/unitdata.py @@ -175,6 +175,8 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None From 5c8503e4895e616bfb1bb1bf035fc47806cdcd09 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 19 Jan 2018 12:07:46 +0000 Subject: [PATCH 1444/2699] Sync charm-helpers Notable issues resolved: openstack_upgrade_available() broken for swift https://bugs.launchpad.net/charm-swift-proxy/+bug/1743847 haproxy context doesn't consider bindings https://bugs.launchpad.net/charm-helpers/+bug/1735421 regression in haproxy check https://bugs.launchpad.net/charm-helpers/+bug/1743287 Change-Id: Ica4baeed1be9fb16ccadc21d45d3bb71899a0aed --- .../charmhelpers/contrib/openstack/utils.py | 500 ++---------------- .../contrib/storage/linux/ceph.py | 50 +- .../charmhelpers/contrib/storage/linux/lvm.py | 50 ++ ceph-proxy/hooks/charmhelpers/core/hookenv.py | 2 + ceph-proxy/hooks/charmhelpers/core/host.py | 2 + .../charmhelpers/core/host_factory/ubuntu.py | 1 + .../hooks/charmhelpers/core/unitdata.py | 2 + .../contrib/openstack/amulet/deployment.py | 12 +- .../contrib/openstack/amulet/utils.py | 9 +- ceph-proxy/tests/charmhelpers/core/hookenv.py | 2 + ceph-proxy/tests/charmhelpers/core/host.py | 2 + .../charmhelpers/core/host_factory/ubuntu.py | 1 + .../tests/charmhelpers/core/unitdata.py | 2 + 13 files changed, 146 insertions(+), 489 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index 8a541d40..b753275d 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -23,7 +23,6 @@ import re import itertools import functools -import shutil import six import traceback @@ -47,7 +46,6 @@ related_units, relation_ids, relation_set, - service_name, status_set, hook_name, application_version_set, @@ -68,11 +66,6 @@ port_has_listener, ) -from charmhelpers.contrib.python.packages import ( - pip_create_virtualenv, - pip_install, -) - from charmhelpers.core.host import ( lsb_release, mounts, @@ -84,7 +77,6 @@ ) from charmhelpers.fetch import ( apt_cache, - install_remote, import_key as fetch_import_key, add_source as fetch_add_source, SourceConfigError, @@ -278,27 +270,6 @@ ]), } -GIT_DEFAULT_REPOS = { - 'requirements': 'git://github.com/openstack/requirements', - 'cinder': 'git://github.com/openstack/cinder', - 'glance': 'git://github.com/openstack/glance', - 'horizon': 'git://github.com/openstack/horizon', - 'keystone': 'git://github.com/openstack/keystone', - 'networking-hyperv': 'git://github.com/openstack/networking-hyperv', - 'neutron': 'git://github.com/openstack/neutron', - 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas', - 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas', - 'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas', - 'nova': 'git://github.com/openstack/nova', -} - -GIT_DEFAULT_BRANCHES = { - 'liberty': 'stable/liberty', - 'mitaka': 'stable/mitaka', - 'newton': 'stable/newton', - 'master': 'master', -} - DEFAULT_LOOPBACK_SIZE = '5G' @@ -392,6 +363,8 @@ def get_swift_codename(version): releases = UBUNTU_OPENSTACK_RELEASE release = [k for k, v in six.iteritems(releases) if codename in v] ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) + if six.PY3: + ret = ret.decode('UTF-8') if codename in ret or release[0] in ret: return codename elif len(codenames) == 1: @@ -528,7 +501,6 @@ def os_release(package, base='essex', reset_cache=False): if _os_rel: return _os_rel _os_rel = ( - git_os_codename_install_source(config('openstack-origin-git')) or get_os_codename_package(package, fatal=False) or get_os_codename_install_source(config('openstack-origin')) or base) @@ -654,11 +626,6 @@ def openstack_upgrade_available(package): else: avail_vers = get_os_version_install_source(src) apt.init() - if "swift" in package: - major_cur_vers = cur_vers.split('.', 1)[0] - major_avail_vers = avail_vers.split('.', 1)[0] - major_diff = apt.version_compare(major_avail_vers, major_cur_vers) - return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0) return apt.version_compare(avail_vers, cur_vers) == 1 @@ -769,417 +736,6 @@ def wrapped_f(*args): return wrap -def git_install_requested(): - """ - Returns true if openstack-origin-git is specified. - """ - return config('openstack-origin-git') is not None - - -def git_os_codename_install_source(projects_yaml): - """ - Returns OpenStack codename of release being installed from source. - """ - if git_install_requested(): - projects = _git_yaml_load(projects_yaml) - - if projects in GIT_DEFAULT_BRANCHES.keys(): - if projects == 'master': - return 'ocata' - return projects - - if 'release' in projects: - if projects['release'] == 'master': - return 'ocata' - return projects['release'] - - return None - - -def git_default_repos(projects_yaml): - """ - Returns default repos if a default openstack-origin-git value is specified. - """ - service = service_name() - core_project = service - - for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES): - if projects_yaml == default: - - # add the requirements repo first - repo = { - 'name': 'requirements', - 'repository': GIT_DEFAULT_REPOS['requirements'], - 'branch': branch, - } - repos = [repo] - - # neutron-* and nova-* charms require some additional repos - if service in ['neutron-api', 'neutron-gateway', - 'neutron-openvswitch']: - core_project = 'neutron' - if service == 'neutron-api': - repo = { - 'name': 'networking-hyperv', - 'repository': GIT_DEFAULT_REPOS['networking-hyperv'], - 'branch': branch, - } - repos.append(repo) - for project in ['neutron-fwaas', 'neutron-lbaas', - 'neutron-vpnaas', 'nova']: - repo = { - 'name': project, - 'repository': GIT_DEFAULT_REPOS[project], - 'branch': branch, - } - repos.append(repo) - - elif service in ['nova-cloud-controller', 'nova-compute']: - core_project = 'nova' - repo = { - 'name': 'neutron', - 'repository': GIT_DEFAULT_REPOS['neutron'], - 'branch': branch, - } - repos.append(repo) - elif service == 'openstack-dashboard': - core_project = 'horizon' - - # finally add the current service's core project repo - repo = { - 'name': core_project, - 'repository': GIT_DEFAULT_REPOS[core_project], - 'branch': branch, - } - repos.append(repo) - - return yaml.dump(dict(repositories=repos, release=default)) - - return projects_yaml - - -def _git_yaml_load(projects_yaml): - """ - Load the specified yaml into a dictionary. - """ - if not projects_yaml: - return None - - return yaml.load(projects_yaml) - - -requirements_dir = None - - -def git_clone_and_install(projects_yaml, core_project): - """ - Clone/install all specified OpenStack repositories. - - The expected format of projects_yaml is: - - repositories: - - {name: keystone, - repository: 'git://git.openstack.org/openstack/keystone.git', - branch: 'stable/icehouse'} - - {name: requirements, - repository: 'git://git.openstack.org/openstack/requirements.git', - branch: 'stable/icehouse'} - - directory: /mnt/openstack-git - http_proxy: squid-proxy-url - https_proxy: squid-proxy-url - - The directory, http_proxy, and https_proxy keys are optional. - - """ - global requirements_dir - parent_dir = '/mnt/openstack-git' - http_proxy = None - - projects = _git_yaml_load(projects_yaml) - _git_validate_projects_yaml(projects, core_project) - - old_environ = dict(os.environ) - - if 'http_proxy' in projects.keys(): - http_proxy = projects['http_proxy'] - os.environ['http_proxy'] = projects['http_proxy'] - if 'https_proxy' in projects.keys(): - os.environ['https_proxy'] = projects['https_proxy'] - - if 'directory' in projects.keys(): - parent_dir = projects['directory'] - - pip_create_virtualenv(os.path.join(parent_dir, 'venv')) - - # Upgrade setuptools and pip from default virtualenv versions. The default - # versions in trusty break master OpenStack branch deployments. - for p in ['pip', 'setuptools']: - pip_install(p, upgrade=True, proxy=http_proxy, - venv=os.path.join(parent_dir, 'venv')) - - constraints = None - for p in projects['repositories']: - repo = p['repository'] - branch = p['branch'] - depth = '1' - if 'depth' in p.keys(): - depth = p['depth'] - if p['name'] == 'requirements': - repo_dir = _git_clone_and_install_single(repo, branch, depth, - parent_dir, http_proxy, - update_requirements=False) - requirements_dir = repo_dir - constraints = os.path.join(repo_dir, "upper-constraints.txt") - # upper-constraints didn't exist until after icehouse - if not os.path.isfile(constraints): - constraints = None - # use constraints unless project yaml sets use_constraints to false - if 'use_constraints' in projects.keys(): - if not projects['use_constraints']: - constraints = None - else: - repo_dir = _git_clone_and_install_single(repo, branch, depth, - parent_dir, http_proxy, - update_requirements=True, - constraints=constraints) - - os.environ = old_environ - - -def _git_validate_projects_yaml(projects, core_project): - """ - Validate the projects yaml. - """ - _git_ensure_key_exists('repositories', projects) - - for project in projects['repositories']: - _git_ensure_key_exists('name', project.keys()) - _git_ensure_key_exists('repository', project.keys()) - _git_ensure_key_exists('branch', project.keys()) - - if projects['repositories'][0]['name'] != 'requirements': - error_out('{} git repo must be specified first'.format('requirements')) - - if projects['repositories'][-1]['name'] != core_project: - error_out('{} git repo must be specified last'.format(core_project)) - - _git_ensure_key_exists('release', projects) - - -def _git_ensure_key_exists(key, keys): - """ - Ensure that key exists in keys. - """ - if key not in keys: - error_out('openstack-origin-git key \'{}\' is missing'.format(key)) - - -def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, - update_requirements, constraints=None): - """ - Clone and install a single git repository. - """ - if not os.path.exists(parent_dir): - juju_log('Directory already exists at {}. ' - 'No need to create directory.'.format(parent_dir)) - os.mkdir(parent_dir) - - juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) - repo_dir = install_remote( - repo, dest=parent_dir, branch=branch, depth=depth) - - venv = os.path.join(parent_dir, 'venv') - - if update_requirements: - if not requirements_dir: - error_out('requirements repo must be cloned before ' - 'updating from global requirements.') - _git_update_requirements(venv, repo_dir, requirements_dir) - - juju_log('Installing git repo from dir: {}'.format(repo_dir)) - if http_proxy: - pip_install(repo_dir, proxy=http_proxy, venv=venv, - constraints=constraints) - else: - pip_install(repo_dir, venv=venv, constraints=constraints) - - return repo_dir - - -def _git_update_requirements(venv, package_dir, reqs_dir): - """ - Update from global requirements. - - Update an OpenStack git directory's requirements.txt and - test-requirements.txt from global-requirements.txt. - """ - orig_dir = os.getcwd() - os.chdir(reqs_dir) - python = os.path.join(venv, 'bin/python') - cmd = [python, 'update.py', package_dir] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - package = os.path.basename(package_dir) - error_out("Error updating {} from " - "global-requirements.txt".format(package)) - os.chdir(orig_dir) - - -def git_pip_venv_dir(projects_yaml): - """ - Return the pip virtualenv path. - """ - parent_dir = '/mnt/openstack-git' - - projects = _git_yaml_load(projects_yaml) - - if 'directory' in projects.keys(): - parent_dir = projects['directory'] - - return os.path.join(parent_dir, 'venv') - - -def git_src_dir(projects_yaml, project): - """ - Return the directory where the specified project's source is located. - """ - parent_dir = '/mnt/openstack-git' - - projects = _git_yaml_load(projects_yaml) - - if 'directory' in projects.keys(): - parent_dir = projects['directory'] - - for p in projects['repositories']: - if p['name'] == project: - return os.path.join(parent_dir, os.path.basename(p['repository'])) - - return None - - -def git_yaml_value(projects_yaml, key): - """ - Return the value in projects_yaml for the specified key. - """ - projects = _git_yaml_load(projects_yaml) - - if key in projects.keys(): - return projects[key] - - return None - - -def git_generate_systemd_init_files(templates_dir): - """ - Generate systemd init files. - - Generates and installs systemd init units and script files based on the - *.init.in files contained in the templates_dir directory. - - This code is based on the openstack-pkg-tools package and its init - script generation, which is used by the OpenStack packages. - """ - for f in os.listdir(templates_dir): - # Create the init script and systemd unit file from the template - if f.endswith(".init.in"): - init_in_file = f - init_file = f[:-8] - service_file = "{}.service".format(init_file) - - init_in_source = os.path.join(templates_dir, init_in_file) - init_source = os.path.join(templates_dir, init_file) - service_source = os.path.join(templates_dir, service_file) - - init_dest = os.path.join('/etc/init.d', init_file) - service_dest = os.path.join('/lib/systemd/system', service_file) - - shutil.copyfile(init_in_source, init_source) - with open(init_source, 'a') as outfile: - template = ('/usr/share/openstack-pkg-tools/' - 'init-script-template') - with open(template) as infile: - outfile.write('\n\n{}'.format(infile.read())) - - cmd = ['pkgos-gen-systemd-unit', init_in_source] - subprocess.check_call(cmd) - - if os.path.exists(init_dest): - os.remove(init_dest) - if os.path.exists(service_dest): - os.remove(service_dest) - shutil.copyfile(init_source, init_dest) - shutil.copyfile(service_source, service_dest) - os.chmod(init_dest, 0o755) - - for f in os.listdir(templates_dir): - # If there's a service.in file, use it instead of the generated one - if f.endswith(".service.in"): - service_in_file = f - service_file = f[:-3] - - service_in_source = os.path.join(templates_dir, service_in_file) - service_source = os.path.join(templates_dir, service_file) - service_dest = os.path.join('/lib/systemd/system', service_file) - - shutil.copyfile(service_in_source, service_source) - - if os.path.exists(service_dest): - os.remove(service_dest) - shutil.copyfile(service_source, service_dest) - - for f in os.listdir(templates_dir): - # Generate the systemd unit if there's no existing .service.in - if f.endswith(".init.in"): - init_in_file = f - init_file = f[:-8] - service_in_file = "{}.service.in".format(init_file) - service_file = "{}.service".format(init_file) - - init_in_source = os.path.join(templates_dir, init_in_file) - service_in_source = os.path.join(templates_dir, service_in_file) - service_source = os.path.join(templates_dir, service_file) - service_dest = os.path.join('/lib/systemd/system', service_file) - - if not os.path.exists(service_in_source): - cmd = ['pkgos-gen-systemd-unit', init_in_source] - subprocess.check_call(cmd) - - if os.path.exists(service_dest): - os.remove(service_dest) - shutil.copyfile(service_source, service_dest) - - -def git_determine_usr_bin(): - """Return the /usr/bin path for Apache2 config. - - The /usr/bin path will be located in the virtualenv if the charm - is configured to deploy from source. - """ - if git_install_requested(): - projects_yaml = config('openstack-origin-git') - projects_yaml = git_default_repos(projects_yaml) - return os.path.join(git_pip_venv_dir(projects_yaml), 'bin') - else: - return '/usr/bin' - - -def git_determine_python_path(): - """Return the python-path for Apache2 config. - - Returns 'None' unless the charm is configured to deploy from source, - in which case the path of the virtualenv's site-packages is returned. - """ - if git_install_requested(): - projects_yaml = config('openstack-origin-git') - projects_yaml = git_default_repos(projects_yaml) - return os.path.join(git_pip_venv_dir(projects_yaml), - 'lib/python2.7/site-packages') - else: - return None - - def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts @@ -1613,27 +1169,24 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs): """ ret = False - if git_install_requested(): - action_set({'outcome': 'installed from source, skipped upgrade.'}) - else: - if openstack_upgrade_available(package): - if config('action-managed-upgrade'): - juju_log('Upgrading OpenStack release') - - try: - upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed.'}) - ret = True - except Exception: - action_set({'outcome': 'upgrade failed, see traceback.'}) - action_set({'traceback': traceback.format_exc()}) - action_fail('do_openstack_upgrade resulted in an ' - 'unexpected error') - else: - action_set({'outcome': 'action-managed-upgrade config is ' - 'False, skipped upgrade.'}) + if openstack_upgrade_available(package): + if config('action-managed-upgrade'): + juju_log('Upgrading OpenStack release') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed.'}) + ret = True + except Exception: + action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('do_openstack_upgrade resulted in an ' + 'unexpected error') else: - action_set({'outcome': 'no upgrade available.'}) + action_set({'outcome': 'action-managed-upgrade config is ' + 'False, skipped upgrade.'}) + else: + action_set({'outcome': 'no upgrade available.'}) return ret @@ -2043,14 +1596,25 @@ def token_cache_pkgs(source=None, release=None): def update_json_file(filename, items): """Updates the json `filename` with a given dict. - :param filename: json filename (i.e.: /etc/glance/policy.json) + :param filename: path to json file (e.g. /etc/glance/policy.json) :param items: dict of items to update """ + if not items: + return + with open(filename) as fd: policy = json.load(fd) + + # Compare before and after and if nothing has changed don't write the file + # since that could cause unnecessary service restarts. + before = json.dumps(policy, indent=4, sort_keys=True) policy.update(items) + after = json.dumps(policy, indent=4, sort_keys=True) + if before == after: + return + with open(filename, "w") as fd: - fd.write(json.dumps(policy, indent=4)) + fd.write(after) @cached diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 39231612..e13e60a6 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None): assert isinstance(valid_range, list), \ "valid_range must be a list, was given {}".format(valid_range) # If we're dealing with strings - if valid_type is six.string_types: + if isinstance(value, six.string_types): assert value in valid_range, \ "{} is not in the list {}".format(value, valid_range) # Integer, float should have a min and max @@ -377,12 +377,12 @@ def get_mon_map(service): try: return json.loads(mon_status) except ValueError as v: - log("Unable to parse mon_status json: {}. Error: {}".format( - mon_status, v.message)) + log("Unable to parse mon_status json: {}. Error: {}" + .format(mon_status, str(v))) raise except CalledProcessError as e: - log("mon_status command failed with message: {}".format( - e.message)) + log("mon_status command failed with message: {}" + .format(str(e))) raise @@ -517,7 +517,8 @@ def pool_set(service, pool_name, key, value): :param value: :return: None. Can raise CalledProcessError """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value] + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, + str(value).lower()] try: check_call(cmd) except CalledProcessError: @@ -621,16 +622,24 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' :param durability_estimator: int :return: None. Can raise CalledProcessError """ + version = ceph_version() + # Ensure this failure_domain is allowed by Ceph validator(failure_domain, six.string_types, ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, - 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks), - 'ruleset_failure_domain=' + failure_domain] + 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks) + ] if locality is not None and durability_estimator is not None: raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + # failure_domain changed in luminous + if version and version >= '12.0.0': + cmd.append('crush-failure-domain=' + failure_domain) + else: + cmd.append('ruleset-failure-domain=' + failure_domain) + # Add plugin specific information if locality is not None: # For local erasure codes @@ -1064,14 +1073,24 @@ def __init__(self, api_version=1, request_id=None): self.ops = [] def add_op_request_access_to_group(self, name, namespace=None, - permission=None, key_name=None): + permission=None, key_name=None, + object_prefix_permissions=None): """ Adds the requested permissions to the current service's Ceph key, - allowing the key to access only the specified pools + allowing the key to access only the specified pools or + object prefixes. object_prefix_permissions should be a dictionary + keyed on the permission with the corresponding value being a list + of prefixes to apply that permission to. + { + 'rwx': ['prefix1', 'prefix2'], + 'class-read': ['prefix3']} """ - self.ops.append({'op': 'add-permissions-to-key', 'group': name, - 'namespace': namespace, 'name': key_name or service_name(), - 'group-permission': permission}) + self.ops.append({ + 'op': 'add-permissions-to-key', 'group': name, + 'namespace': namespace, + 'name': key_name or service_name(), + 'group-permission': permission, + 'object-prefix-permissions': object_prefix_permissions}) def add_op_create_pool(self, name, replica_count=3, pg_num=None, weight=None, group=None, namespace=None): @@ -1107,7 +1126,10 @@ def request(self): def _ops_equal(self, other): if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in ['replicas', 'name', 'op', 'pg_num', 'weight']: + for key in [ + 'replicas', 'name', 'op', 'pg_num', 'weight', + 'group', 'group-namespace', 'group-permission', + 'object-prefix-permissions']: if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py index 7f2a0604..79a7a245 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import functools from subprocess import ( CalledProcessError, check_call, @@ -101,3 +102,52 @@ def create_lvm_volume_group(volume_group, block_device): :block_device: str: Full path of PV-initialized block device. ''' check_call(['vgcreate', volume_group, block_device]) + + +def list_logical_volumes(select_criteria=None, path_mode=False): + ''' + List logical volumes + + :param select_criteria: str: Limit list to those volumes matching this + criteria (see 'lvs -S help' for more details) + :param path_mode: bool: return logical volume name in 'vg/lv' format, this + format is required for some commands like lvextend + :returns: [str]: List of logical volumes + ''' + lv_diplay_attr = 'lv_name' + if path_mode: + # Parsing output logic relies on the column order + lv_diplay_attr = 'vg_name,' + lv_diplay_attr + cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings'] + if select_criteria: + cmd.extend(['--select', select_criteria]) + lvs = [] + for lv in check_output(cmd).decode('UTF-8').splitlines(): + if not lv: + continue + if path_mode: + lvs.append('/'.join(lv.strip().split())) + else: + lvs.append(lv.strip()) + return lvs + + +list_thin_logical_volume_pools = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^t') + +list_thin_logical_volumes = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^V') + + +def extend_logical_volume_by_device(lv_name, block_device): + ''' + Extends the size of logical volume lv_name by the amount of free space on + physical volume block_device. + + :param lv_name: str: name of logical volume to be extended (vg/lv format) + :param block_device: str: name of block_device to be allocated to lv_name + ''' + cmd = ['lvextend', lv_name, block_device] + check_call(cmd) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 5a88f798..211ae87d 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -39,6 +39,7 @@ else: from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -344,6 +345,7 @@ def save(self): """ with open(self.path, 'w') as f: + os.fchmod(f.fileno(), 0o600) json.dump(self, f) def _implicit_save(self): diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 5cc5c86b..fd14d60f 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') target.write(content) return # the contents were the same, but we might still need to change the diff --git a/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py index d8dc378a..99451b59 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -20,6 +20,7 @@ 'yakkety', 'zesty', 'artful', + 'bionic', ) diff --git a/ceph-proxy/hooks/charmhelpers/core/unitdata.py b/ceph-proxy/hooks/charmhelpers/core/unitdata.py index 7af875c2..6d7b4942 100644 --- a/ceph-proxy/hooks/charmhelpers/core/unitdata.py +++ b/ceph-proxy/hooks/charmhelpers/core/unitdata.py @@ -175,6 +175,8 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index e37f2834..5afbbd87 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +import os import re import sys import six @@ -185,7 +186,7 @@ def _configure_services(self, configs): self.d.configure(service, config) def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=1800): + include_only=None, timeout=None): """Wait for all units to have a specific extended status, except for any defined as excluded. Unless specified via message, any status containing any case of 'ready' will be considered a match. @@ -215,7 +216,10 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, :param timeout: Maximum time in seconds to wait for status match :returns: None. Raises if timeout is hit. """ - self.log.info('Waiting for extended status on units...') + if not timeout: + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) + self.log.info('Waiting for extended status on units for {}s...' + ''.format(timeout)) all_services = self.d.services.keys() @@ -252,9 +256,9 @@ def _auto_wait_for_status(self, message=None, exclude_services=None, service_messages = {service: message for service in services} # Check for idleness - self.d.sentry.wait() + self.d.sentry.wait(timeout=timeout) # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services) + self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) # Check for ready messages self.d.sentry.wait_for_messages(service_messages, timeout=timeout) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index b71b2b19..87f364d1 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -858,9 +858,12 @@ def get_ceph_pool_sample(self, sentry_unit, pool_id=0): :returns: List of pool name, object count, kb disk space used """ df = self.get_ceph_df(sentry_unit) - pool_name = df['pools'][pool_id]['name'] - obj_count = df['pools'][pool_id]['stats']['objects'] - kb_used = df['pools'][pool_id]['stats']['kb_used'] + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' '{} kb used'.format(pool_name, pool_id, obj_count, kb_used)) diff --git a/ceph-proxy/tests/charmhelpers/core/hookenv.py b/ceph-proxy/tests/charmhelpers/core/hookenv.py index 5a88f798..211ae87d 100644 --- a/ceph-proxy/tests/charmhelpers/core/hookenv.py +++ b/ceph-proxy/tests/charmhelpers/core/hookenv.py @@ -39,6 +39,7 @@ else: from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -344,6 +345,7 @@ def save(self): """ with open(self.path, 'w') as f: + os.fchmod(f.fileno(), 0o600) json.dump(self, f) def _implicit_save(self): diff --git a/ceph-proxy/tests/charmhelpers/core/host.py b/ceph-proxy/tests/charmhelpers/core/host.py index 5cc5c86b..fd14d60f 100644 --- a/ceph-proxy/tests/charmhelpers/core/host.py +++ b/ceph-proxy/tests/charmhelpers/core/host.py @@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') target.write(content) return # the contents were the same, but we might still need to change the diff --git a/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py index d8dc378a..99451b59 100644 --- a/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py @@ -20,6 +20,7 @@ 'yakkety', 'zesty', 'artful', + 'bionic', ) diff --git a/ceph-proxy/tests/charmhelpers/core/unitdata.py b/ceph-proxy/tests/charmhelpers/core/unitdata.py index 7af875c2..6d7b4942 100644 --- a/ceph-proxy/tests/charmhelpers/core/unitdata.py +++ b/ceph-proxy/tests/charmhelpers/core/unitdata.py @@ -175,6 +175,8 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None From 174069a2e96e264f7ce71c9a7368f1027090b4a1 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 19 Jan 2018 12:07:55 +0000 Subject: [PATCH 1445/2699] Sync charm-helpers Notable issues resolved: openstack_upgrade_available() broken for swift https://bugs.launchpad.net/charm-swift-proxy/+bug/1743847 haproxy context doesn't consider bindings https://bugs.launchpad.net/charm-helpers/+bug/1735421 regression in haproxy check https://bugs.launchpad.net/charm-helpers/+bug/1743287 Change-Id: I0c3b5d90238b3e5665455983616f58446a682429 --- .../charmhelpers/contrib/openstack/context.py | 101 +++- .../contrib/openstack/files/check_haproxy.sh | 2 +- .../files/check_haproxy_queue_depth.sh | 2 +- .../contrib/openstack/ha/utils.py | 175 +++++- .../templates/wsgi-openstack-api.conf | 15 +- .../charmhelpers/contrib/openstack/utils.py | 498 ++---------------- .../contrib/storage/linux/ceph.py | 42 +- .../hooks/charmhelpers/core/hookenv.py | 2 + .../hooks/charmhelpers/core/unitdata.py | 2 + .../tests/charmhelpers/core/hookenv.py | 2 + .../tests/charmhelpers/core/unitdata.py | 2 + .../unit_tests/test_ceph_radosgw_context.py | 7 +- 12 files changed, 312 insertions(+), 538 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 70850c1b..7ada2760 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -93,12 +93,10 @@ format_ipv6_addr, is_bridge_member, is_ipv6_disabled, + get_relation_ip, ) from charmhelpers.contrib.openstack.utils import ( config_flags_parser, - get_host_ip, - git_determine_usr_bin, - git_determine_python_path, enable_memcache, snap_install_requested, CompareOpenStackReleases, @@ -334,10 +332,7 @@ def __init__(self, self.rel_name = rel_name self.interfaces = [self.rel_name] - def __call__(self): - log('Generating template context for ' + self.rel_name, level=DEBUG) - ctxt = {} - + def _setup_pki_cache(self): if self.service and self.service_user: # This is required for pki token signing if we don't want /tmp to # be used. @@ -347,6 +342,15 @@ def __call__(self): mkdir(path=cachedir, owner=self.service_user, group=self.service_user, perms=0o700) + return cachedir + return None + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + cachedir = self._setup_pki_cache() + if cachedir: ctxt['signing_dir'] = cachedir for rid in relation_ids(self.rel_name): @@ -385,6 +389,62 @@ def __call__(self): return {} +class IdentityCredentialsContext(IdentityServiceContext): + '''Context for identity-credentials interface type''' + + def __init__(self, + service=None, + service_user=None, + rel_name='identity-credentials'): + super(IdentityCredentialsContext, self).__init__(service, + service_user, + rel_name) + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + cachedir = self._setup_pki_cache() + if cachedir: + ctxt['signing_dir'] = cachedir + + for rid in relation_ids(self.rel_name): + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + credentials_host = rdata.get('credentials_host') + credentials_host = ( + format_ipv6_addr(credentials_host) or credentials_host + ) + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host + svc_protocol = rdata.get('credentials_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + api_version = rdata.get('api_version') or '2.0' + ctxt.update({ + 'service_port': rdata.get('credentials_port'), + 'service_host': credentials_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('credentials_project'), + 'admin_tenant_id': rdata.get('credentials_project_id'), + 'admin_user': rdata.get('credentials_username'), + 'admin_password': rdata.get('credentials_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol, + 'api_version': api_version + }) + + if float(api_version) > 2: + ctxt.update({'admin_domain_name': + rdata.get('domain')}) + + if self.context_complete(ctxt): + return ctxt + + return {} + + class AMQPContext(OSContextGenerator): def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): @@ -566,11 +626,6 @@ def __call__(self): if not relation_ids('cluster') and not self.singlenode_mode: return {} - if config('prefer-ipv6'): - addr = get_ipv6_addr(exc_list=[config('vip')])[0] - else: - addr = get_host_ip(unit_get('private-address')) - l_unit = local_unit().replace('/', '-') cluster_hosts = {} @@ -578,7 +633,15 @@ def __call__(self): # and associated backends for addr_type in ADDRESS_TYPES: cfg_opt = 'os-{}-network'.format(addr_type) - laddr = get_address_in_network(config(cfg_opt)) + # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather + # than 'internal' + if addr_type == 'internal': + _addr_map_type = INTERNAL + else: + _addr_map_type = addr_type + # Network spaces aware + laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'], + config(cfg_opt)) if laddr: netmask = get_netmask_for_address(laddr) cluster_hosts[laddr] = { @@ -589,15 +652,19 @@ def __call__(self): } for rid in relation_ids('cluster'): for unit in sorted(related_units(rid)): + # API Charms will need to set {addr_type}-address with + # get_relation_ip(addr_type) _laddr = relation_get('{}-address'.format(addr_type), rid=rid, unit=unit) if _laddr: _unit = unit.replace('/', '-') cluster_hosts[laddr]['backends'][_unit] = _laddr - # NOTE(jamespage) add backend based on private address - this - # with either be the only backend or the fallback if no acls + # NOTE(jamespage) add backend based on get_relation_ip - this + # will either be the only backend or the fallback if no acls # match in the frontend + # Network spaces aware + addr = get_relation_ip('cluster') cluster_hosts[addr] = {} netmask = get_netmask_for_address(addr) cluster_hosts[addr] = { @@ -607,6 +674,8 @@ def __call__(self): } for rid in relation_ids('cluster'): for unit in sorted(related_units(rid)): + # API Charms will need to set their private-address with + # get_relation_ip('cluster') _laddr = relation_get('private-address', rid=rid, unit=unit) if _laddr: @@ -1323,8 +1392,6 @@ def __call__(self): "public_processes": int(math.ceil(self.public_process_weight * total_processes)), "threads": 1, - "usr_bin": git_determine_usr_bin(), - "python_path": git_determine_python_path(), } return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh index 7aab129a..1df55db4 100755 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh @@ -9,7 +9,7 @@ CRITICAL=0 NOTACTIVE='' LOGFILE=/var/log/nagios/check_haproxy.log -AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $4}') +AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}') typeset -i N_INSTANCES=0 for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh index 3ebb5329..91ce0246 100755 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh @@ -10,7 +10,7 @@ CURRQthrsh=0 MAXQthrsh=100 -AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') +AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}') HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py index 9a4d79c1..6060ae50 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -23,6 +23,8 @@ Helpers for high availability. """ +import json + import re from charmhelpers.core.hookenv import ( @@ -32,6 +34,7 @@ config, status_set, DEBUG, + WARNING, ) from charmhelpers.core.host import ( @@ -40,6 +43,23 @@ from charmhelpers.contrib.openstack.ip import ( resolve_address, + is_ipv6, +) + +from charmhelpers.contrib.network.ip import ( + get_iface_for_address, + get_netmask_for_address, +) + +from charmhelpers.contrib.hahelpers.cluster import ( + get_hacluster_config +) + +JSON_ENCODE_OPTIONS = dict( + sort_keys=True, + allow_nan=False, + indent=None, + separators=(',', ':'), ) @@ -53,8 +73,8 @@ class DNSHAException(Exception): def update_dns_ha_resource_params(resources, resource_params, relation_id=None, crm_ocf='ocf:maas:dns'): - """ Check for os-*-hostname settings and update resource dictionaries for - the HA relation. + """ Configure DNS-HA resources based on provided configuration and + update resource dictionaries for the HA relation. @param resources: Pointer to dictionary of resources. Usually instantiated in ha_joined(). @@ -64,7 +84,85 @@ def update_dns_ha_resource_params(resources, resource_params, @param crm_ocf: Corosync Open Cluster Framework resource agent to use for DNS HA """ + _relation_data = {'resources': {}, 'resource_params': {}} + update_hacluster_dns_ha(charm_name(), + _relation_data, + crm_ocf) + resources.update(_relation_data['resources']) + resource_params.update(_relation_data['resource_params']) + relation_set(relation_id=relation_id, groups=_relation_data['groups']) + + +def assert_charm_supports_dns_ha(): + """Validate prerequisites for DNS HA + The MAAS client is only available on Xenial or greater + + :raises DNSHAException: if release is < 16.04 + """ + if lsb_release().get('DISTRIB_RELEASE') < '16.04': + msg = ('DNS HA is only supported on 16.04 and greater ' + 'versions of Ubuntu.') + status_set('blocked', msg) + raise DNSHAException(msg) + return True + + +def expect_ha(): + """ Determine if the unit expects to be in HA + + Check for VIP or dns-ha settings which indicate the unit should expect to + be related to hacluster. + + @returns boolean + """ + return config('vip') or config('dns-ha') + + +def generate_ha_relation_data(service): + """ Generate relation data for ha relation + + Based on configuration options and unit interfaces, generate a json + encoded dict of relation data items for the hacluster relation, + providing configuration for DNS HA or VIP's + haproxy clone sets. + + @returns dict: json encoded data for use with relation_set + """ + _haproxy_res = 'res_{}_haproxy'.format(service) + _relation_data = { + 'resources': { + _haproxy_res: 'lsb:haproxy', + }, + 'resource_params': { + _haproxy_res: 'op monitor interval="5s"' + }, + 'init_services': { + _haproxy_res: 'haproxy' + }, + 'clones': { + 'cl_{}_haproxy'.format(service): _haproxy_res + }, + } + + if config('dns-ha'): + update_hacluster_dns_ha(service, _relation_data) + else: + update_hacluster_vip(service, _relation_data) + + return { + 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS) + for k, v in _relation_data.items() if v + } + +def update_hacluster_dns_ha(service, relation_data, + crm_ocf='ocf:maas:dns'): + """ Configure DNS-HA resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ # Validate the charm environment for DNS HA assert_charm_supports_dns_ha() @@ -93,7 +191,7 @@ def update_dns_ha_resource_params(resources, resource_params, status_set('blocked', msg) raise DNSHAException(msg) - hostname_key = 'res_{}_{}_hostname'.format(charm_name(), endpoint_type) + hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type) if hostname_key in hostname_group: log('DNS HA: Resource {}: {} already exists in ' 'hostname group - skipping'.format(hostname_key, hostname), @@ -101,42 +199,67 @@ def update_dns_ha_resource_params(resources, resource_params, continue hostname_group.append(hostname_key) - resources[hostname_key] = crm_ocf - resource_params[hostname_key] = ( - 'params fqdn="{}" ip_address="{}" ' - ''.format(hostname, resolve_address(endpoint_type=endpoint_type, - override=False))) + relation_data['resources'][hostname_key] = crm_ocf + relation_data['resource_params'][hostname_key] = ( + 'params fqdn="{}" ip_address="{}"' + .format(hostname, resolve_address(endpoint_type=endpoint_type, + override=False))) if len(hostname_group) >= 1: log('DNS HA: Hostname group is set with {} as members. ' 'Informing the ha relation'.format(' '.join(hostname_group)), DEBUG) - relation_set(relation_id=relation_id, groups={ - 'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)}) + relation_data['groups'] = { + 'grp_{}_hostnames'.format(service): ' '.join(hostname_group) + } else: msg = 'DNS HA: Hostname group has no members.' status_set('blocked', msg) raise DNSHAException(msg) -def assert_charm_supports_dns_ha(): - """Validate prerequisites for DNS HA - The MAAS client is only available on Xenial or greater +def update_hacluster_vip(service, relation_data): + """ Configure VIP resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. """ - if lsb_release().get('DISTRIB_RELEASE') < '16.04': - msg = ('DNS HA is only supported on 16.04 and greater ' - 'versions of Ubuntu.') - status_set('blocked', msg) - raise DNSHAException(msg) - return True + cluster_config = get_hacluster_config() + vip_group = [] + for vip in cluster_config['vip'].split(): + if is_ipv6(vip): + res_neutron_vip = 'ocf:heartbeat:IPv6addr' + vip_params = 'ipv6addr' + else: + res_neutron_vip = 'ocf:heartbeat:IPaddr2' + vip_params = 'ip' + iface = (get_iface_for_address(vip) or + config('vip_iface')) + netmask = (get_netmask_for_address(vip) or + config('vip_cidr')) -def expect_ha(): - """ Determine if the unit expects to be in HA + if iface is not None: + vip_key = 'res_{}_{}_vip'.format(service, iface) + if vip_key in vip_group: + if vip not in relation_data['resource_params'][vip_key]: + vip_key = '{}_{}'.format(vip_key, vip_params) + else: + log("Resource '%s' (vip='%s') already exists in " + "vip group - skipping" % (vip_key, vip), WARNING) + continue - Check for VIP or dns-ha settings which indicate the unit should expect to - be related to hacluster. + relation_data['resources'][vip_key] = res_neutron_vip + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}"'.format(ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask) + ) + vip_group.append(vip_key) - @returns boolean - """ - return config('vip') or config('dns-ha') + if len(vip_group) >= 1: + relation_data['groups'] = { + 'grp_{}_vips'.format(service): ' '.join(vip_group) + } diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf index 315b2a3f..e2e73b2c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf @@ -15,9 +15,6 @@ Listen {{ public_port }} {% if port -%} WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ -{% if python_path -%} - python-path={{ python_path }} \ -{% endif -%} display-name=%{GROUP} WSGIProcessGroup {{ service_name }} WSGIScriptAlias / {{ script }} @@ -29,7 +26,7 @@ Listen {{ public_port }} ErrorLog /var/log/apache2/{{ service_name }}_error.log CustomLog /var/log/apache2/{{ service_name }}_access.log combined - + = 2.4> Require all granted @@ -44,9 +41,6 @@ Listen {{ public_port }} {% if admin_port -%} WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ -{% if python_path -%} - python-path={{ python_path }} \ -{% endif -%} display-name=%{GROUP} WSGIProcessGroup {{ service_name }}-admin WSGIScriptAlias / {{ admin_script }} @@ -58,7 +52,7 @@ Listen {{ public_port }} ErrorLog /var/log/apache2/{{ service_name }}_error.log CustomLog /var/log/apache2/{{ service_name }}_access.log combined - + = 2.4> Require all granted @@ -73,9 +67,6 @@ Listen {{ public_port }} {% if public_port -%} WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ -{% if python_path -%} - python-path={{ python_path }} \ -{% endif -%} display-name=%{GROUP} WSGIProcessGroup {{ service_name }}-public WSGIScriptAlias / {{ public_script }} @@ -87,7 +78,7 @@ Listen {{ public_port }} ErrorLog /var/log/apache2/{{ service_name }}_error.log CustomLog /var/log/apache2/{{ service_name }}_access.log combined - + = 2.4> Require all granted diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 9e5af342..b753275d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -23,7 +23,6 @@ import re import itertools import functools -import shutil import six import traceback @@ -47,7 +46,6 @@ related_units, relation_ids, relation_set, - service_name, status_set, hook_name, application_version_set, @@ -68,11 +66,6 @@ port_has_listener, ) -from charmhelpers.contrib.python.packages import ( - pip_create_virtualenv, - pip_install, -) - from charmhelpers.core.host import ( lsb_release, mounts, @@ -84,7 +77,6 @@ ) from charmhelpers.fetch import ( apt_cache, - install_remote, import_key as fetch_import_key, add_source as fetch_add_source, SourceConfigError, @@ -278,27 +270,6 @@ ]), } -GIT_DEFAULT_REPOS = { - 'requirements': 'git://github.com/openstack/requirements', - 'cinder': 'git://github.com/openstack/cinder', - 'glance': 'git://github.com/openstack/glance', - 'horizon': 'git://github.com/openstack/horizon', - 'keystone': 'git://github.com/openstack/keystone', - 'networking-hyperv': 'git://github.com/openstack/networking-hyperv', - 'neutron': 'git://github.com/openstack/neutron', - 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas', - 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas', - 'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas', - 'nova': 'git://github.com/openstack/nova', -} - -GIT_DEFAULT_BRANCHES = { - 'liberty': 'stable/liberty', - 'mitaka': 'stable/mitaka', - 'newton': 'stable/newton', - 'master': 'master', -} - DEFAULT_LOOPBACK_SIZE = '5G' @@ -530,7 +501,6 @@ def os_release(package, base='essex', reset_cache=False): if _os_rel: return _os_rel _os_rel = ( - git_os_codename_install_source(config('openstack-origin-git')) or get_os_codename_package(package, fatal=False) or get_os_codename_install_source(config('openstack-origin')) or base) @@ -656,11 +626,6 @@ def openstack_upgrade_available(package): else: avail_vers = get_os_version_install_source(src) apt.init() - if "swift" in package: - major_cur_vers = cur_vers.split('.', 1)[0] - major_avail_vers = avail_vers.split('.', 1)[0] - major_diff = apt.version_compare(major_avail_vers, major_cur_vers) - return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0) return apt.version_compare(avail_vers, cur_vers) == 1 @@ -771,417 +736,6 @@ def wrapped_f(*args): return wrap -def git_install_requested(): - """ - Returns true if openstack-origin-git is specified. - """ - return config('openstack-origin-git') is not None - - -def git_os_codename_install_source(projects_yaml): - """ - Returns OpenStack codename of release being installed from source. - """ - if git_install_requested(): - projects = _git_yaml_load(projects_yaml) - - if projects in GIT_DEFAULT_BRANCHES.keys(): - if projects == 'master': - return 'ocata' - return projects - - if 'release' in projects: - if projects['release'] == 'master': - return 'ocata' - return projects['release'] - - return None - - -def git_default_repos(projects_yaml): - """ - Returns default repos if a default openstack-origin-git value is specified. - """ - service = service_name() - core_project = service - - for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES): - if projects_yaml == default: - - # add the requirements repo first - repo = { - 'name': 'requirements', - 'repository': GIT_DEFAULT_REPOS['requirements'], - 'branch': branch, - } - repos = [repo] - - # neutron-* and nova-* charms require some additional repos - if service in ['neutron-api', 'neutron-gateway', - 'neutron-openvswitch']: - core_project = 'neutron' - if service == 'neutron-api': - repo = { - 'name': 'networking-hyperv', - 'repository': GIT_DEFAULT_REPOS['networking-hyperv'], - 'branch': branch, - } - repos.append(repo) - for project in ['neutron-fwaas', 'neutron-lbaas', - 'neutron-vpnaas', 'nova']: - repo = { - 'name': project, - 'repository': GIT_DEFAULT_REPOS[project], - 'branch': branch, - } - repos.append(repo) - - elif service in ['nova-cloud-controller', 'nova-compute']: - core_project = 'nova' - repo = { - 'name': 'neutron', - 'repository': GIT_DEFAULT_REPOS['neutron'], - 'branch': branch, - } - repos.append(repo) - elif service == 'openstack-dashboard': - core_project = 'horizon' - - # finally add the current service's core project repo - repo = { - 'name': core_project, - 'repository': GIT_DEFAULT_REPOS[core_project], - 'branch': branch, - } - repos.append(repo) - - return yaml.dump(dict(repositories=repos, release=default)) - - return projects_yaml - - -def _git_yaml_load(projects_yaml): - """ - Load the specified yaml into a dictionary. - """ - if not projects_yaml: - return None - - return yaml.load(projects_yaml) - - -requirements_dir = None - - -def git_clone_and_install(projects_yaml, core_project): - """ - Clone/install all specified OpenStack repositories. - - The expected format of projects_yaml is: - - repositories: - - {name: keystone, - repository: 'git://git.openstack.org/openstack/keystone.git', - branch: 'stable/icehouse'} - - {name: requirements, - repository: 'git://git.openstack.org/openstack/requirements.git', - branch: 'stable/icehouse'} - - directory: /mnt/openstack-git - http_proxy: squid-proxy-url - https_proxy: squid-proxy-url - - The directory, http_proxy, and https_proxy keys are optional. - - """ - global requirements_dir - parent_dir = '/mnt/openstack-git' - http_proxy = None - - projects = _git_yaml_load(projects_yaml) - _git_validate_projects_yaml(projects, core_project) - - old_environ = dict(os.environ) - - if 'http_proxy' in projects.keys(): - http_proxy = projects['http_proxy'] - os.environ['http_proxy'] = projects['http_proxy'] - if 'https_proxy' in projects.keys(): - os.environ['https_proxy'] = projects['https_proxy'] - - if 'directory' in projects.keys(): - parent_dir = projects['directory'] - - pip_create_virtualenv(os.path.join(parent_dir, 'venv')) - - # Upgrade setuptools and pip from default virtualenv versions. The default - # versions in trusty break master OpenStack branch deployments. - for p in ['pip', 'setuptools']: - pip_install(p, upgrade=True, proxy=http_proxy, - venv=os.path.join(parent_dir, 'venv')) - - constraints = None - for p in projects['repositories']: - repo = p['repository'] - branch = p['branch'] - depth = '1' - if 'depth' in p.keys(): - depth = p['depth'] - if p['name'] == 'requirements': - repo_dir = _git_clone_and_install_single(repo, branch, depth, - parent_dir, http_proxy, - update_requirements=False) - requirements_dir = repo_dir - constraints = os.path.join(repo_dir, "upper-constraints.txt") - # upper-constraints didn't exist until after icehouse - if not os.path.isfile(constraints): - constraints = None - # use constraints unless project yaml sets use_constraints to false - if 'use_constraints' in projects.keys(): - if not projects['use_constraints']: - constraints = None - else: - repo_dir = _git_clone_and_install_single(repo, branch, depth, - parent_dir, http_proxy, - update_requirements=True, - constraints=constraints) - - os.environ = old_environ - - -def _git_validate_projects_yaml(projects, core_project): - """ - Validate the projects yaml. - """ - _git_ensure_key_exists('repositories', projects) - - for project in projects['repositories']: - _git_ensure_key_exists('name', project.keys()) - _git_ensure_key_exists('repository', project.keys()) - _git_ensure_key_exists('branch', project.keys()) - - if projects['repositories'][0]['name'] != 'requirements': - error_out('{} git repo must be specified first'.format('requirements')) - - if projects['repositories'][-1]['name'] != core_project: - error_out('{} git repo must be specified last'.format(core_project)) - - _git_ensure_key_exists('release', projects) - - -def _git_ensure_key_exists(key, keys): - """ - Ensure that key exists in keys. - """ - if key not in keys: - error_out('openstack-origin-git key \'{}\' is missing'.format(key)) - - -def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, - update_requirements, constraints=None): - """ - Clone and install a single git repository. - """ - if not os.path.exists(parent_dir): - juju_log('Directory already exists at {}. ' - 'No need to create directory.'.format(parent_dir)) - os.mkdir(parent_dir) - - juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) - repo_dir = install_remote( - repo, dest=parent_dir, branch=branch, depth=depth) - - venv = os.path.join(parent_dir, 'venv') - - if update_requirements: - if not requirements_dir: - error_out('requirements repo must be cloned before ' - 'updating from global requirements.') - _git_update_requirements(venv, repo_dir, requirements_dir) - - juju_log('Installing git repo from dir: {}'.format(repo_dir)) - if http_proxy: - pip_install(repo_dir, proxy=http_proxy, venv=venv, - constraints=constraints) - else: - pip_install(repo_dir, venv=venv, constraints=constraints) - - return repo_dir - - -def _git_update_requirements(venv, package_dir, reqs_dir): - """ - Update from global requirements. - - Update an OpenStack git directory's requirements.txt and - test-requirements.txt from global-requirements.txt. - """ - orig_dir = os.getcwd() - os.chdir(reqs_dir) - python = os.path.join(venv, 'bin/python') - cmd = [python, 'update.py', package_dir] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - package = os.path.basename(package_dir) - error_out("Error updating {} from " - "global-requirements.txt".format(package)) - os.chdir(orig_dir) - - -def git_pip_venv_dir(projects_yaml): - """ - Return the pip virtualenv path. - """ - parent_dir = '/mnt/openstack-git' - - projects = _git_yaml_load(projects_yaml) - - if 'directory' in projects.keys(): - parent_dir = projects['directory'] - - return os.path.join(parent_dir, 'venv') - - -def git_src_dir(projects_yaml, project): - """ - Return the directory where the specified project's source is located. - """ - parent_dir = '/mnt/openstack-git' - - projects = _git_yaml_load(projects_yaml) - - if 'directory' in projects.keys(): - parent_dir = projects['directory'] - - for p in projects['repositories']: - if p['name'] == project: - return os.path.join(parent_dir, os.path.basename(p['repository'])) - - return None - - -def git_yaml_value(projects_yaml, key): - """ - Return the value in projects_yaml for the specified key. - """ - projects = _git_yaml_load(projects_yaml) - - if key in projects.keys(): - return projects[key] - - return None - - -def git_generate_systemd_init_files(templates_dir): - """ - Generate systemd init files. - - Generates and installs systemd init units and script files based on the - *.init.in files contained in the templates_dir directory. - - This code is based on the openstack-pkg-tools package and its init - script generation, which is used by the OpenStack packages. - """ - for f in os.listdir(templates_dir): - # Create the init script and systemd unit file from the template - if f.endswith(".init.in"): - init_in_file = f - init_file = f[:-8] - service_file = "{}.service".format(init_file) - - init_in_source = os.path.join(templates_dir, init_in_file) - init_source = os.path.join(templates_dir, init_file) - service_source = os.path.join(templates_dir, service_file) - - init_dest = os.path.join('/etc/init.d', init_file) - service_dest = os.path.join('/lib/systemd/system', service_file) - - shutil.copyfile(init_in_source, init_source) - with open(init_source, 'a') as outfile: - template = ('/usr/share/openstack-pkg-tools/' - 'init-script-template') - with open(template) as infile: - outfile.write('\n\n{}'.format(infile.read())) - - cmd = ['pkgos-gen-systemd-unit', init_in_source] - subprocess.check_call(cmd) - - if os.path.exists(init_dest): - os.remove(init_dest) - if os.path.exists(service_dest): - os.remove(service_dest) - shutil.copyfile(init_source, init_dest) - shutil.copyfile(service_source, service_dest) - os.chmod(init_dest, 0o755) - - for f in os.listdir(templates_dir): - # If there's a service.in file, use it instead of the generated one - if f.endswith(".service.in"): - service_in_file = f - service_file = f[:-3] - - service_in_source = os.path.join(templates_dir, service_in_file) - service_source = os.path.join(templates_dir, service_file) - service_dest = os.path.join('/lib/systemd/system', service_file) - - shutil.copyfile(service_in_source, service_source) - - if os.path.exists(service_dest): - os.remove(service_dest) - shutil.copyfile(service_source, service_dest) - - for f in os.listdir(templates_dir): - # Generate the systemd unit if there's no existing .service.in - if f.endswith(".init.in"): - init_in_file = f - init_file = f[:-8] - service_in_file = "{}.service.in".format(init_file) - service_file = "{}.service".format(init_file) - - init_in_source = os.path.join(templates_dir, init_in_file) - service_in_source = os.path.join(templates_dir, service_in_file) - service_source = os.path.join(templates_dir, service_file) - service_dest = os.path.join('/lib/systemd/system', service_file) - - if not os.path.exists(service_in_source): - cmd = ['pkgos-gen-systemd-unit', init_in_source] - subprocess.check_call(cmd) - - if os.path.exists(service_dest): - os.remove(service_dest) - shutil.copyfile(service_source, service_dest) - - -def git_determine_usr_bin(): - """Return the /usr/bin path for Apache2 config. - - The /usr/bin path will be located in the virtualenv if the charm - is configured to deploy from source. - """ - if git_install_requested(): - projects_yaml = config('openstack-origin-git') - projects_yaml = git_default_repos(projects_yaml) - return os.path.join(git_pip_venv_dir(projects_yaml), 'bin') - else: - return '/usr/bin' - - -def git_determine_python_path(): - """Return the python-path for Apache2 config. - - Returns 'None' unless the charm is configured to deploy from source, - in which case the path of the virtualenv's site-packages is returned. - """ - if git_install_requested(): - projects_yaml = config('openstack-origin-git') - projects_yaml = git_default_repos(projects_yaml) - return os.path.join(git_pip_venv_dir(projects_yaml), - 'lib/python2.7/site-packages') - else: - return None - - def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts @@ -1615,27 +1169,24 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs): """ ret = False - if git_install_requested(): - action_set({'outcome': 'installed from source, skipped upgrade.'}) - else: - if openstack_upgrade_available(package): - if config('action-managed-upgrade'): - juju_log('Upgrading OpenStack release') - - try: - upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed.'}) - ret = True - except Exception: - action_set({'outcome': 'upgrade failed, see traceback.'}) - action_set({'traceback': traceback.format_exc()}) - action_fail('do_openstack_upgrade resulted in an ' - 'unexpected error') - else: - action_set({'outcome': 'action-managed-upgrade config is ' - 'False, skipped upgrade.'}) + if openstack_upgrade_available(package): + if config('action-managed-upgrade'): + juju_log('Upgrading OpenStack release') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed.'}) + ret = True + except Exception: + action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('do_openstack_upgrade resulted in an ' + 'unexpected error') else: - action_set({'outcome': 'no upgrade available.'}) + action_set({'outcome': 'action-managed-upgrade config is ' + 'False, skipped upgrade.'}) + else: + action_set({'outcome': 'no upgrade available.'}) return ret @@ -2045,14 +1596,25 @@ def token_cache_pkgs(source=None, release=None): def update_json_file(filename, items): """Updates the json `filename` with a given dict. - :param filename: json filename (i.e.: /etc/glance/policy.json) + :param filename: path to json file (e.g. /etc/glance/policy.json) :param items: dict of items to update """ + if not items: + return + with open(filename) as fd: policy = json.load(fd) + + # Compare before and after and if nothing has changed don't write the file + # since that could cause unnecessary service restarts. + before = json.dumps(policy, indent=4, sort_keys=True) policy.update(items) + after = json.dumps(policy, indent=4, sort_keys=True) + if before == after: + return + with open(filename, "w") as fd: - fd.write(json.dumps(policy, indent=4)) + fd.write(after) @cached diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 0d9bacfd..e13e60a6 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None): assert isinstance(valid_range, list), \ "valid_range must be a list, was given {}".format(valid_range) # If we're dealing with strings - if valid_type is six.string_types: + if isinstance(value, six.string_types): assert value in valid_range, \ "{} is not in the list {}".format(value, valid_range) # Integer, float should have a min and max @@ -517,7 +517,8 @@ def pool_set(service, pool_name, key, value): :param value: :return: None. Can raise CalledProcessError """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value] + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, + str(value).lower()] try: check_call(cmd) except CalledProcessError: @@ -621,16 +622,24 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' :param durability_estimator: int :return: None. Can raise CalledProcessError """ + version = ceph_version() + # Ensure this failure_domain is allowed by Ceph validator(failure_domain, six.string_types, ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, - 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks), - 'ruleset_failure_domain=' + failure_domain] + 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks) + ] if locality is not None and durability_estimator is not None: raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + # failure_domain changed in luminous + if version and version >= '12.0.0': + cmd.append('crush-failure-domain=' + failure_domain) + else: + cmd.append('ruleset-failure-domain=' + failure_domain) + # Add plugin specific information if locality is not None: # For local erasure codes @@ -1064,14 +1073,24 @@ def __init__(self, api_version=1, request_id=None): self.ops = [] def add_op_request_access_to_group(self, name, namespace=None, - permission=None, key_name=None): + permission=None, key_name=None, + object_prefix_permissions=None): """ Adds the requested permissions to the current service's Ceph key, - allowing the key to access only the specified pools + allowing the key to access only the specified pools or + object prefixes. object_prefix_permissions should be a dictionary + keyed on the permission with the corresponding value being a list + of prefixes to apply that permission to. + { + 'rwx': ['prefix1', 'prefix2'], + 'class-read': ['prefix3']} """ - self.ops.append({'op': 'add-permissions-to-key', 'group': name, - 'namespace': namespace, 'name': key_name or service_name(), - 'group-permission': permission}) + self.ops.append({ + 'op': 'add-permissions-to-key', 'group': name, + 'namespace': namespace, + 'name': key_name or service_name(), + 'group-permission': permission, + 'object-prefix-permissions': object_prefix_permissions}) def add_op_create_pool(self, name, replica_count=3, pg_num=None, weight=None, group=None, namespace=None): @@ -1107,7 +1126,10 @@ def request(self): def _ops_equal(self, other): if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in ['replicas', 'name', 'op', 'pg_num', 'weight']: + for key in [ + 'replicas', 'name', 'op', 'pg_num', 'weight', + 'group', 'group-namespace', 'group-permission', + 'object-prefix-permissions']: if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 5a88f798..211ae87d 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -39,6 +39,7 @@ else: from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -344,6 +345,7 @@ def save(self): """ with open(self.path, 'w') as f: + os.fchmod(f.fileno(), 0o600) json.dump(self, f) def _implicit_save(self): diff --git a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py index 7af875c2..6d7b4942 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py +++ b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py @@ -175,6 +175,8 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/ceph-radosgw/tests/charmhelpers/core/hookenv.py b/ceph-radosgw/tests/charmhelpers/core/hookenv.py index 5a88f798..211ae87d 100644 --- a/ceph-radosgw/tests/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/tests/charmhelpers/core/hookenv.py @@ -39,6 +39,7 @@ else: from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -344,6 +345,7 @@ def save(self): """ with open(self.path, 'w') as f: + os.fchmod(f.fileno(), 0o600) json.dump(self, f) def _implicit_save(self): diff --git a/ceph-radosgw/tests/charmhelpers/core/unitdata.py b/ceph-radosgw/tests/charmhelpers/core/unitdata.py index 7af875c2..6d7b4942 100644 --- a/ceph-radosgw/tests/charmhelpers/core/unitdata.py +++ b/ceph-radosgw/tests/charmhelpers/core/unitdata.py @@ -175,6 +175,8 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 68ff4cbe..3aba0188 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -39,18 +39,19 @@ def setUp(self): self.relation_get.side_effect = self.test_relation.get self.config.side_effect = self.test_config.get + @patch('charmhelpers.contrib.openstack.context.get_relation_ip') @patch('charmhelpers.contrib.openstack.context.mkdir') @patch('charmhelpers.contrib.openstack.context.unit_get') @patch('charmhelpers.contrib.openstack.context.local_unit') - @patch('charmhelpers.contrib.openstack.context.get_host_ip') @patch('charmhelpers.contrib.openstack.context.config') @patch('charmhelpers.contrib.hahelpers.cluster.config_get') @patch('charmhelpers.contrib.openstack.context.relation_ids') @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') def test_ctxt(self, _harelation_ids, _ctxtrelation_ids, _haconfig, - _ctxtconfig, _get_host_ip, _local_unit, _unit_get, _mkdir): - _get_host_ip.return_value = '10.0.0.10' + _ctxtconfig, _local_unit, _unit_get, _mkdir, + _get_relation_ip): _unit_get.return_value = '10.0.0.10' + _get_relation_ip.return_value = '10.0.0.10' _ctxtconfig.side_effect = self.test_config.get _haconfig.side_effect = self.test_config.get _harelation_ids.return_value = [] From 71f1fd3562e214b1655ddef3294b800dea181269 Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 19 Jan 2018 10:18:33 -0800 Subject: [PATCH 1446/2699] Rebuild for sync charm-helpers Change-Id: I50350c722959333fd25e355afde747d5bf615cac --- ceph-fs/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index a04b6798..e30bce25 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -d6e45297-5d9a-4123-b1ec-81e3fee1a4b6 +1925591e-fd45-11e7-b703-eb6b652fac69 From 7fc44b71beb6594580f5481ac6e7155e69b5639c Mon Sep 17 00:00:00 2001 From: Tilman Baumann Date: Fri, 19 Jan 2018 11:09:13 +0100 Subject: [PATCH 1447/2699] Drop duplicate config keys (yaml syntax error) The yamllint command reported hard errors because of duplicate keys in config.yaml Closes-Bug: #1747002 Change-Id: I3b7b36b46b3e3d35936ee03ba8b5d7fc1e7bb5db --- ceph-radosgw/config.yaml | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 02e4b8f1..2c484aad 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -171,30 +171,6 @@ options: description: | Default multicast port number that will be used to communicate between HA Cluster nodes. - haproxy-server-timeout: - type: int - default: - description: | - Server timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 90000ms is used. - haproxy-client-timeout: - type: int - default: - description: | - Client timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 90000ms is used. - haproxy-queue-timeout: - type: int - default: - description: | - Queue timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 9000ms is used. - haproxy-connect-timeout: - type: int - default: - description: | - Connect timeout configuration in ms for haproxy, used in HA - configurations. If not provided, default value of 9000ms is used. # Network config (by default all access is over 'private-address') os-admin-network: type: string @@ -320,4 +296,3 @@ options: description: | SSL CA to use with the certificate and key provided - this is only required if you are providing a privately signed ssl_cert and ssl_key. - From 6dc527f86a9815110198dd4dde7bd694725353a9 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 9 Feb 2018 11:21:31 +0000 Subject: [PATCH 1448/2699] apparmor: Fix use with directory based OSD's Ensure that directory based OSD's under /srv/ceph can hard link when apparmor is in enforce mode. If not, then links go missing over time and the ceph-osd daemons eventually abort. Change-Id: I7cc25f5d436204d1f47c9a3a67a15f27c16b7505 Closes-Bug: 1748426 --- ceph-osd/files/apparmor/usr.bin.ceph-osd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/files/apparmor/usr.bin.ceph-osd b/ceph-osd/files/apparmor/usr.bin.ceph-osd index 8edab6ed..04c5865b 100644 --- a/ceph-osd/files/apparmor/usr.bin.ceph-osd +++ b/ceph-osd/files/apparmor/usr.bin.ceph-osd @@ -24,7 +24,7 @@ @{PROC}/loadavg r, /run/ceph/* rw, - /srv/ceph/** rwk, + /srv/ceph/** rwkl, /tmp/ r, /var/lib/ceph/** rwk, /var/lib/ceph/osd/** l, From 50d73cad4c33a7ec916b999deb32978232b5a18c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 15 Feb 2018 15:03:07 +0100 Subject: [PATCH 1449/2699] Sync in charms.ceph change for udev settle Change-Id: Ideb8dbe8e6e43966baa83084fa0ea7eac2e2597c Closes-Bug: #1746118 --- ceph-osd/lib/ceph/utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 2915225c..ea70d955 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -1008,6 +1008,9 @@ def rescan_osd_devices(): subprocess.call(cmd) + cmd = ['udevadm', 'settle'] + subprocess.call(cmd) + _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" _upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" From 0313572a3849d1513f7084571c96ecefc083e52d Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 21 Feb 2018 08:00:52 -0600 Subject: [PATCH 1450/2699] Rebuild for sync charm-helpers Change-Id: Ifcb52bbf47bf5248bd93ceb4504bf0d93a8b28a2 --- ceph-fs/rebuild | 2 +- ceph-fs/src/tox.ini | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index e30bce25..d9074866 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -1925591e-fd45-11e7-b703-eb6b652fac69 +1b817f14-1742-11e8-bf02-13a179b934e9 diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index f201a203..799d7068 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -8,7 +8,7 @@ skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 - AMULET_SETUP_TIMEOUT=2700 + AMULET_SETUP_TIMEOUT=5400 whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* deps = -r{toxinidir}/test-requirements.txt From 9578e62a91c7e6e0d753a14dc7b6a08edaa79e0e Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 21 Feb 2018 07:32:48 -0600 Subject: [PATCH 1451/2699] Sync charm-helpers Change-Id: I9571e6a74c2f3da1afb4f2567bda4f3a78696578 --- .../hooks/charmhelpers/contrib/network/ip.py | 11 +- .../contrib/openstack/amulet/utils.py | 4 +- .../charmhelpers/contrib/openstack/context.py | 117 ++++++++++++++++-- .../contrib/openstack/templating.py | 95 ++++++++++---- ceph-mon/hooks/charmhelpers/core/hookenv.py | 16 ++- .../hooks/charmhelpers/core/templating.py | 27 ++-- .../contrib/openstack/amulet/utils.py | 4 +- ceph-mon/tests/charmhelpers/core/hookenv.py | 16 ++- .../tests/charmhelpers/core/templating.py | 27 ++-- ceph-mon/tox.ini | 2 +- 10 files changed, 258 insertions(+), 61 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index a871ce37..b13277bb 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -27,6 +27,7 @@ network_get_primary_address, unit_get, WARNING, + NoNetworkBinding, ) from charmhelpers.core.host import ( @@ -109,7 +110,12 @@ def get_address_in_network(network, fallback=None, fatal=False): _validate_cidr(network) network = netaddr.IPNetwork(network) for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) + try: + addresses = netifaces.ifaddresses(iface) + except ValueError: + # If an instance was deleted between + # netifaces.interfaces() run and now, its interfaces are gone + continue if network.version == 4 and netifaces.AF_INET in addresses: for addr in addresses[netifaces.AF_INET]: cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], @@ -578,6 +584,9 @@ def get_relation_ip(interface, cidr_network=None): except NotImplementedError: # If network-get is not available address = get_host_ip(unit_get('private-address')) + except NoNetworkBinding: + log("No network binding for {}".format(interface), WARNING) + address = get_host_ip(unit_get('private-address')) if config('prefer-ipv6'): # Currently IPv6 has priority, eventually we want IPv6 to just be diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 87f364d1..d93cff3c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -92,7 +92,7 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, return 'endpoint not found' def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, expected_num_eps=3): """Validate keystone v3 endpoint data. Validate the v3 endpoint data which has changed from v2. The @@ -138,7 +138,7 @@ def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, if ret: return 'unexpected endpoint data - {}'.format(ret) - if len(found) != 3: + if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' def validate_svc_catalog_endpoint_data(self, expected, actual): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 7ada2760..36cf32fc 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -617,7 +617,9 @@ class HAProxyContext(OSContextGenerator): """ interfaces = ['cluster'] - def __init__(self, singlenode_mode=False): + def __init__(self, singlenode_mode=False, + address_types=ADDRESS_TYPES): + self.address_types = address_types self.singlenode_mode = singlenode_mode def __call__(self): @@ -631,7 +633,7 @@ def __call__(self): # NOTE(jamespage): build out map of configured network endpoints # and associated backends - for addr_type in ADDRESS_TYPES: + for addr_type in self.address_types: cfg_opt = 'os-{}-network'.format(addr_type) # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather # than 'internal' @@ -1635,18 +1637,84 @@ class InternalEndpointContext(OSContextGenerator): endpoints by default so this allows admins to optionally use internal endpoints. """ - def __init__(self, ost_rel_check_pkg_name): - self.ost_rel_check_pkg_name = ost_rel_check_pkg_name - def __call__(self): - ctxt = {'use_internal_endpoints': config('use-internal-endpoints')} - rel = os_release(self.ost_rel_check_pkg_name, base='icehouse') + return {'use_internal_endpoints': config('use-internal-endpoints')} + + +class VolumeAPIContext(InternalEndpointContext): + """Volume API context. + + This context provides information regarding the volume endpoint to use + when communicating between services. It determines which version of the + API is appropriate for use. + + This value will be determined in the resulting context dictionary + returned from calling the VolumeAPIContext object. Information provided + by this context is as follows: + + volume_api_version: the volume api version to use, currently + 'v2' or 'v3' + volume_catalog_info: the information to use for a cinder client + configuration that consumes API endpoints from the keystone + catalog. This is defined as the type:name:endpoint_type string. + """ + # FIXME(wolsen) This implementation is based on the provider being able + # to specify the package version to check but does not guarantee that the + # volume service api version selected is available. In practice, it is + # quite likely the volume service *is* providing the v3 volume service. + # This should be resolved when the service-discovery spec is implemented. + def __init__(self, pkg): + """ + Creates a new VolumeAPIContext for use in determining which version + of the Volume API should be used for communication. A package codename + should be supplied for determining the currently installed OpenStack + version. + + :param pkg: the package codename to use in order to determine the + component version (e.g. nova-common). See + charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more. + """ + super(VolumeAPIContext, self).__init__() + self._ctxt = None + if not pkg: + raise ValueError('package name must be provided in order to ' + 'determine current OpenStack version.') + self.pkg = pkg + + @property + def ctxt(self): + if self._ctxt is not None: + return self._ctxt + self._ctxt = self._determine_ctxt() + return self._ctxt + + def _determine_ctxt(self): + """Determines the Volume API endpoint information. + + Determines the appropriate version of the API that should be used + as well as the catalog_info string that would be supplied. Returns + a dict containing the volume_api_version and the volume_catalog_info. + """ + rel = os_release(self.pkg, base='icehouse') + version = '2' if CompareOpenStackReleases(rel) >= 'pike': - ctxt['volume_api_version'] = '3' - else: - ctxt['volume_api_version'] = '2' + version = '3' + + service_type = 'volumev{version}'.format(version=version) + service_name = 'cinderv{version}'.format(version=version) + endpoint_type = 'publicURL' + if config('use-internal-endpoints'): + endpoint_type = 'internalURL' + catalog_info = '{type}:{name}:{endpoint}'.format( + type=service_type, name=service_name, endpoint=endpoint_type) + + return { + 'volume_api_version': version, + 'volume_catalog_info': catalog_info, + } - return ctxt + def __call__(self): + return self.ctxt class AppArmorContext(OSContextGenerator): @@ -1784,3 +1852,30 @@ def __call__(self): ctxt['memcache_server_formatted'], ctxt['memcache_port']) return ctxt + + +class EnsureDirContext(OSContextGenerator): + ''' + Serves as a generic context to create a directory as a side-effect. + + Useful for software that supports drop-in files (.d) in conjunction + with config option-based templates. Examples include: + * OpenStack oslo.policy drop-in files; + * systemd drop-in config files; + * other software that supports overriding defaults with .d files + + Another use-case is when a subordinate generates a configuration for + primary to render in a separate directory. + + Some software requires a user to create a target directory to be + scanned for drop-in files with a specific format. This is why this + context is needed to do that before rendering a template. + ''' + + def __init__(self, dirname): + '''Used merely to ensure that a given directory exists.''' + self.dirname = dirname + + def __call__(self): + mkdir(self.dirname) + return {} diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py index 77490e4d..a623315d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py @@ -93,7 +93,8 @@ class OSConfigTemplate(object): Associates a config file template with a list of context generators. Responsible for constructing a template context based on those generators. """ - def __init__(self, config_file, contexts): + + def __init__(self, config_file, contexts, config_template=None): self.config_file = config_file if hasattr(contexts, '__call__'): @@ -103,6 +104,8 @@ def __init__(self, config_file, contexts): self._complete_contexts = [] + self.config_template = config_template + def context(self): ctxt = {} for context in self.contexts: @@ -124,6 +127,11 @@ def complete_contexts(self): self.context() return self._complete_contexts + @property + def is_string_template(self): + """:returns: Boolean if this instance is a template initialised with a string""" + return self.config_template is not None + class OSConfigRenderer(object): """ @@ -148,6 +156,10 @@ class OSConfigRenderer(object): contexts=[context.IdentityServiceContext()]) configs.register(config_file='/etc/haproxy/haproxy.conf', contexts=[context.HAProxyContext()]) + configs.register(config_file='/etc/keystone/policy.d/extra.cfg', + contexts=[context.ExtraPolicyContext() + context.KeystoneContext()], + config_template=hookenv.config('extra-policy')) # write out a single config configs.write('/etc/nova/nova.conf') # write out all registered configs @@ -218,14 +230,23 @@ def __init__(self, templates_dir, openstack_release): else: apt_install('python3-jinja2') - def register(self, config_file, contexts): + def register(self, config_file, contexts, config_template=None): """ Register a config file with a list of context generators to be called during rendering. + config_template can be used to load a template from a string instead of + using template loaders and template files. + :param config_file (str): a path where a config file will be rendered + :param contexts (list): a list of context dictionaries with kv pairs + :param config_template (str): an optional template string to use """ - self.templates[config_file] = OSConfigTemplate(config_file=config_file, - contexts=contexts) - log('Registered config file: %s' % config_file, level=INFO) + self.templates[config_file] = OSConfigTemplate( + config_file=config_file, + contexts=contexts, + config_template=config_template + ) + log('Registered config file: {}'.format(config_file), + level=INFO) def _get_tmpl_env(self): if not self._tmpl_env: @@ -235,32 +256,58 @@ def _get_tmpl_env(self): def _get_template(self, template): self._get_tmpl_env() template = self._tmpl_env.get_template(template) - log('Loaded template from %s' % template.filename, level=INFO) + log('Loaded template from {}'.format(template.filename), + level=INFO) + return template + + def _get_template_from_string(self, ostmpl): + ''' + Get a jinja2 template object from a string. + :param ostmpl: OSConfigTemplate to use as a data source. + ''' + self._get_tmpl_env() + template = self._tmpl_env.from_string(ostmpl.config_template) + log('Loaded a template from a string for {}'.format( + ostmpl.config_file), + level=INFO) return template def render(self, config_file): if config_file not in self.templates: - log('Config not registered: %s' % config_file, level=ERROR) + log('Config not registered: {}'.format(config_file), level=ERROR) raise OSConfigException - ctxt = self.templates[config_file].context() - - _tmpl = os.path.basename(config_file) - try: - template = self._get_template(_tmpl) - except exceptions.TemplateNotFound: - # if no template is found with basename, try looking for it - # using a munged full path, eg: - # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf - _tmpl = '_'.join(config_file.split('/')[1:]) + + ostmpl = self.templates[config_file] + ctxt = ostmpl.context() + + if ostmpl.is_string_template: + template = self._get_template_from_string(ostmpl) + log('Rendering from a string template: ' + '{}'.format(config_file), + level=INFO) + else: + _tmpl = os.path.basename(config_file) try: template = self._get_template(_tmpl) - except exceptions.TemplateNotFound as e: - log('Could not load template from %s by %s or %s.' % - (self.templates_dir, os.path.basename(config_file), _tmpl), - level=ERROR) - raise e - - log('Rendering from template: %s' % _tmpl, level=INFO) + except exceptions.TemplateNotFound: + # if no template is found with basename, try looking + # for it using a munged full path, eg: + # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf + _tmpl = '_'.join(config_file.split('/')[1:]) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound as e: + log('Could not load template from {} by {} or {}.' + ''.format( + self.templates_dir, + os.path.basename(config_file), + _tmpl + ), + level=ERROR) + raise e + + log('Rendering from template: {}'.format(config_file), + level=INFO) return template.render(ctxt) def write(self, config_file): diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 211ae87d..7ed1cc4e 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -820,6 +820,10 @@ def wrapper(decorated): return wrapper +class NoNetworkBinding(Exception): + pass + + def charm_dir(): """Return the root directory of the current charm""" d = os.environ.get('JUJU_CHARM_DIR') @@ -1106,7 +1110,17 @@ def network_get_primary_address(binding): :raise: NotImplementedError if run on Juju < 2.0 ''' cmd = ['network-get', '--primary-address', binding] - return subprocess.check_output(cmd).decode('UTF-8').strip() + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + if 'no network config found for binding' in e.output.decode('UTF-8'): + raise NoNetworkBinding("No network binding for {}" + .format(binding)) + else: + raise + return response @translate_exc(from_exc=OSError, to_exc=NotImplementedError) diff --git a/ceph-mon/hooks/charmhelpers/core/templating.py b/ceph-mon/hooks/charmhelpers/core/templating.py index 7b801a34..9014015c 100644 --- a/ceph-mon/hooks/charmhelpers/core/templating.py +++ b/ceph-mon/hooks/charmhelpers/core/templating.py @@ -20,7 +20,8 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): + perms=0o444, templates_dir=None, encoding='UTF-8', + template_loader=None, config_template=None): """ Render a template. @@ -32,6 +33,9 @@ def render(source, target, context, owner='root', group='root', The context should be a dict containing the values to be replaced in the template. + config_template may be provided to render from a provided template instead + of loading from a file. + The `owner`, `group`, and `perms` options will be passed to `write_file`. If omitted, `templates_dir` defaults to the `templates` folder in the charm. @@ -65,14 +69,19 @@ def render(source, target, context, owner='root', group='root', if templates_dir is None: templates_dir = os.path.join(hookenv.charm_dir(), 'templates') template_env = Environment(loader=FileSystemLoader(templates_dir)) - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e + + # load from a string if provided explicitly + if config_template is not None: + template = template_env.from_string(config_template) + else: + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e content = template.render(context) if target is not None: target_dir = os.path.dirname(target) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 87f364d1..d93cff3c 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -92,7 +92,7 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, return 'endpoint not found' def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, expected_num_eps=3): """Validate keystone v3 endpoint data. Validate the v3 endpoint data which has changed from v2. The @@ -138,7 +138,7 @@ def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, if ret: return 'unexpected endpoint data - {}'.format(ret) - if len(found) != 3: + if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' def validate_svc_catalog_endpoint_data(self, expected, actual): diff --git a/ceph-mon/tests/charmhelpers/core/hookenv.py b/ceph-mon/tests/charmhelpers/core/hookenv.py index 211ae87d..7ed1cc4e 100644 --- a/ceph-mon/tests/charmhelpers/core/hookenv.py +++ b/ceph-mon/tests/charmhelpers/core/hookenv.py @@ -820,6 +820,10 @@ def wrapper(decorated): return wrapper +class NoNetworkBinding(Exception): + pass + + def charm_dir(): """Return the root directory of the current charm""" d = os.environ.get('JUJU_CHARM_DIR') @@ -1106,7 +1110,17 @@ def network_get_primary_address(binding): :raise: NotImplementedError if run on Juju < 2.0 ''' cmd = ['network-get', '--primary-address', binding] - return subprocess.check_output(cmd).decode('UTF-8').strip() + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + if 'no network config found for binding' in e.output.decode('UTF-8'): + raise NoNetworkBinding("No network binding for {}" + .format(binding)) + else: + raise + return response @translate_exc(from_exc=OSError, to_exc=NotImplementedError) diff --git a/ceph-mon/tests/charmhelpers/core/templating.py b/ceph-mon/tests/charmhelpers/core/templating.py index 7b801a34..9014015c 100644 --- a/ceph-mon/tests/charmhelpers/core/templating.py +++ b/ceph-mon/tests/charmhelpers/core/templating.py @@ -20,7 +20,8 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): + perms=0o444, templates_dir=None, encoding='UTF-8', + template_loader=None, config_template=None): """ Render a template. @@ -32,6 +33,9 @@ def render(source, target, context, owner='root', group='root', The context should be a dict containing the values to be replaced in the template. + config_template may be provided to render from a provided template instead + of loading from a file. + The `owner`, `group`, and `perms` options will be passed to `write_file`. If omitted, `templates_dir` defaults to the `templates` folder in the charm. @@ -65,14 +69,19 @@ def render(source, target, context, owner='root', group='root', if templates_dir is None: templates_dir = os.path.join(hookenv.charm_dir(), 'templates') template_env = Environment(loader=FileSystemLoader(templates_dir)) - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e + + # load from a string if provided explicitly + if config_template is not None: + template = template_env.from_string(config_template) + else: + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e content = template.render(context) if target is not None: target_dir = os.path.dirname(target) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index b8559a91..365444d3 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -10,7 +10,7 @@ skip_missing_interpreters = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} - AMULET_SETUP_TIMEOUT=2700 + AMULET_SETUP_TIMEOUT=5400 install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} From ad9a09078c2334ded2518ed53f27324fff1200dd Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 21 Feb 2018 07:33:06 -0600 Subject: [PATCH 1452/2699] Sync charm-helpers Change-Id: I6b3a8b4f97e42d820792db6533874405d3625364 --- .../hooks/charmhelpers/contrib/network/ip.py | 11 +- .../charmhelpers/contrib/openstack/context.py | 117 ++++++++++++++++-- ceph-osd/hooks/charmhelpers/core/hookenv.py | 16 ++- .../hooks/charmhelpers/core/templating.py | 27 ++-- .../contrib/openstack/amulet/utils.py | 4 +- ceph-osd/tests/charmhelpers/core/hookenv.py | 16 ++- .../tests/charmhelpers/core/templating.py | 27 ++-- ceph-osd/tox.ini | 2 +- 8 files changed, 185 insertions(+), 35 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index a871ce37..b13277bb 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -27,6 +27,7 @@ network_get_primary_address, unit_get, WARNING, + NoNetworkBinding, ) from charmhelpers.core.host import ( @@ -109,7 +110,12 @@ def get_address_in_network(network, fallback=None, fatal=False): _validate_cidr(network) network = netaddr.IPNetwork(network) for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) + try: + addresses = netifaces.ifaddresses(iface) + except ValueError: + # If an instance was deleted between + # netifaces.interfaces() run and now, its interfaces are gone + continue if network.version == 4 and netifaces.AF_INET in addresses: for addr in addresses[netifaces.AF_INET]: cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], @@ -578,6 +584,9 @@ def get_relation_ip(interface, cidr_network=None): except NotImplementedError: # If network-get is not available address = get_host_ip(unit_get('private-address')) + except NoNetworkBinding: + log("No network binding for {}".format(interface), WARNING) + address = get_host_ip(unit_get('private-address')) if config('prefer-ipv6'): # Currently IPv6 has priority, eventually we want IPv6 to just be diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 7ada2760..36cf32fc 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -617,7 +617,9 @@ class HAProxyContext(OSContextGenerator): """ interfaces = ['cluster'] - def __init__(self, singlenode_mode=False): + def __init__(self, singlenode_mode=False, + address_types=ADDRESS_TYPES): + self.address_types = address_types self.singlenode_mode = singlenode_mode def __call__(self): @@ -631,7 +633,7 @@ def __call__(self): # NOTE(jamespage): build out map of configured network endpoints # and associated backends - for addr_type in ADDRESS_TYPES: + for addr_type in self.address_types: cfg_opt = 'os-{}-network'.format(addr_type) # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather # than 'internal' @@ -1635,18 +1637,84 @@ class InternalEndpointContext(OSContextGenerator): endpoints by default so this allows admins to optionally use internal endpoints. """ - def __init__(self, ost_rel_check_pkg_name): - self.ost_rel_check_pkg_name = ost_rel_check_pkg_name - def __call__(self): - ctxt = {'use_internal_endpoints': config('use-internal-endpoints')} - rel = os_release(self.ost_rel_check_pkg_name, base='icehouse') + return {'use_internal_endpoints': config('use-internal-endpoints')} + + +class VolumeAPIContext(InternalEndpointContext): + """Volume API context. + + This context provides information regarding the volume endpoint to use + when communicating between services. It determines which version of the + API is appropriate for use. + + This value will be determined in the resulting context dictionary + returned from calling the VolumeAPIContext object. Information provided + by this context is as follows: + + volume_api_version: the volume api version to use, currently + 'v2' or 'v3' + volume_catalog_info: the information to use for a cinder client + configuration that consumes API endpoints from the keystone + catalog. This is defined as the type:name:endpoint_type string. + """ + # FIXME(wolsen) This implementation is based on the provider being able + # to specify the package version to check but does not guarantee that the + # volume service api version selected is available. In practice, it is + # quite likely the volume service *is* providing the v3 volume service. + # This should be resolved when the service-discovery spec is implemented. + def __init__(self, pkg): + """ + Creates a new VolumeAPIContext for use in determining which version + of the Volume API should be used for communication. A package codename + should be supplied for determining the currently installed OpenStack + version. + + :param pkg: the package codename to use in order to determine the + component version (e.g. nova-common). See + charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more. + """ + super(VolumeAPIContext, self).__init__() + self._ctxt = None + if not pkg: + raise ValueError('package name must be provided in order to ' + 'determine current OpenStack version.') + self.pkg = pkg + + @property + def ctxt(self): + if self._ctxt is not None: + return self._ctxt + self._ctxt = self._determine_ctxt() + return self._ctxt + + def _determine_ctxt(self): + """Determines the Volume API endpoint information. + + Determines the appropriate version of the API that should be used + as well as the catalog_info string that would be supplied. Returns + a dict containing the volume_api_version and the volume_catalog_info. + """ + rel = os_release(self.pkg, base='icehouse') + version = '2' if CompareOpenStackReleases(rel) >= 'pike': - ctxt['volume_api_version'] = '3' - else: - ctxt['volume_api_version'] = '2' + version = '3' + + service_type = 'volumev{version}'.format(version=version) + service_name = 'cinderv{version}'.format(version=version) + endpoint_type = 'publicURL' + if config('use-internal-endpoints'): + endpoint_type = 'internalURL' + catalog_info = '{type}:{name}:{endpoint}'.format( + type=service_type, name=service_name, endpoint=endpoint_type) + + return { + 'volume_api_version': version, + 'volume_catalog_info': catalog_info, + } - return ctxt + def __call__(self): + return self.ctxt class AppArmorContext(OSContextGenerator): @@ -1784,3 +1852,30 @@ def __call__(self): ctxt['memcache_server_formatted'], ctxt['memcache_port']) return ctxt + + +class EnsureDirContext(OSContextGenerator): + ''' + Serves as a generic context to create a directory as a side-effect. + + Useful for software that supports drop-in files (.d) in conjunction + with config option-based templates. Examples include: + * OpenStack oslo.policy drop-in files; + * systemd drop-in config files; + * other software that supports overriding defaults with .d files + + Another use-case is when a subordinate generates a configuration for + primary to render in a separate directory. + + Some software requires a user to create a target directory to be + scanned for drop-in files with a specific format. This is why this + context is needed to do that before rendering a template. + ''' + + def __init__(self, dirname): + '''Used merely to ensure that a given directory exists.''' + self.dirname = dirname + + def __call__(self): + mkdir(self.dirname) + return {} diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 211ae87d..7ed1cc4e 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -820,6 +820,10 @@ def wrapper(decorated): return wrapper +class NoNetworkBinding(Exception): + pass + + def charm_dir(): """Return the root directory of the current charm""" d = os.environ.get('JUJU_CHARM_DIR') @@ -1106,7 +1110,17 @@ def network_get_primary_address(binding): :raise: NotImplementedError if run on Juju < 2.0 ''' cmd = ['network-get', '--primary-address', binding] - return subprocess.check_output(cmd).decode('UTF-8').strip() + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + if 'no network config found for binding' in e.output.decode('UTF-8'): + raise NoNetworkBinding("No network binding for {}" + .format(binding)) + else: + raise + return response @translate_exc(from_exc=OSError, to_exc=NotImplementedError) diff --git a/ceph-osd/hooks/charmhelpers/core/templating.py b/ceph-osd/hooks/charmhelpers/core/templating.py index 7b801a34..9014015c 100644 --- a/ceph-osd/hooks/charmhelpers/core/templating.py +++ b/ceph-osd/hooks/charmhelpers/core/templating.py @@ -20,7 +20,8 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): + perms=0o444, templates_dir=None, encoding='UTF-8', + template_loader=None, config_template=None): """ Render a template. @@ -32,6 +33,9 @@ def render(source, target, context, owner='root', group='root', The context should be a dict containing the values to be replaced in the template. + config_template may be provided to render from a provided template instead + of loading from a file. + The `owner`, `group`, and `perms` options will be passed to `write_file`. If omitted, `templates_dir` defaults to the `templates` folder in the charm. @@ -65,14 +69,19 @@ def render(source, target, context, owner='root', group='root', if templates_dir is None: templates_dir = os.path.join(hookenv.charm_dir(), 'templates') template_env = Environment(loader=FileSystemLoader(templates_dir)) - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e + + # load from a string if provided explicitly + if config_template is not None: + template = template_env.from_string(config_template) + else: + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e content = template.render(context) if target is not None: target_dir = os.path.dirname(target) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index 87f364d1..d93cff3c 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -92,7 +92,7 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, return 'endpoint not found' def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, expected_num_eps=3): """Validate keystone v3 endpoint data. Validate the v3 endpoint data which has changed from v2. The @@ -138,7 +138,7 @@ def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, if ret: return 'unexpected endpoint data - {}'.format(ret) - if len(found) != 3: + if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' def validate_svc_catalog_endpoint_data(self, expected, actual): diff --git a/ceph-osd/tests/charmhelpers/core/hookenv.py b/ceph-osd/tests/charmhelpers/core/hookenv.py index 211ae87d..7ed1cc4e 100644 --- a/ceph-osd/tests/charmhelpers/core/hookenv.py +++ b/ceph-osd/tests/charmhelpers/core/hookenv.py @@ -820,6 +820,10 @@ def wrapper(decorated): return wrapper +class NoNetworkBinding(Exception): + pass + + def charm_dir(): """Return the root directory of the current charm""" d = os.environ.get('JUJU_CHARM_DIR') @@ -1106,7 +1110,17 @@ def network_get_primary_address(binding): :raise: NotImplementedError if run on Juju < 2.0 ''' cmd = ['network-get', '--primary-address', binding] - return subprocess.check_output(cmd).decode('UTF-8').strip() + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + if 'no network config found for binding' in e.output.decode('UTF-8'): + raise NoNetworkBinding("No network binding for {}" + .format(binding)) + else: + raise + return response @translate_exc(from_exc=OSError, to_exc=NotImplementedError) diff --git a/ceph-osd/tests/charmhelpers/core/templating.py b/ceph-osd/tests/charmhelpers/core/templating.py index 7b801a34..9014015c 100644 --- a/ceph-osd/tests/charmhelpers/core/templating.py +++ b/ceph-osd/tests/charmhelpers/core/templating.py @@ -20,7 +20,8 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): + perms=0o444, templates_dir=None, encoding='UTF-8', + template_loader=None, config_template=None): """ Render a template. @@ -32,6 +33,9 @@ def render(source, target, context, owner='root', group='root', The context should be a dict containing the values to be replaced in the template. + config_template may be provided to render from a provided template instead + of loading from a file. + The `owner`, `group`, and `perms` options will be passed to `write_file`. If omitted, `templates_dir` defaults to the `templates` folder in the charm. @@ -65,14 +69,19 @@ def render(source, target, context, owner='root', group='root', if templates_dir is None: templates_dir = os.path.join(hookenv.charm_dir(), 'templates') template_env = Environment(loader=FileSystemLoader(templates_dir)) - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e + + # load from a string if provided explicitly + if config_template is not None: + template = template_env.from_string(config_template) + else: + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e content = template.render(context) if target is not None: target_dir = os.path.dirname(target) diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index b8559a91..365444d3 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -10,7 +10,7 @@ skip_missing_interpreters = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} - AMULET_SETUP_TIMEOUT=2700 + AMULET_SETUP_TIMEOUT=5400 install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} From be46783a0d92ca38fa18b0ec1a71212afdb274e4 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 21 Feb 2018 07:33:21 -0600 Subject: [PATCH 1453/2699] Sync charm-helpers Change-Id: I21920e173b2421b3dd077655b9b01812064b7d68 --- .../hooks/charmhelpers/contrib/network/ip.py | 11 +++++++- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 16 ++++++++++- .../hooks/charmhelpers/core/templating.py | 27 ++++++++++++------- .../contrib/openstack/amulet/utils.py | 4 +-- ceph-proxy/tests/charmhelpers/core/hookenv.py | 16 ++++++++++- .../tests/charmhelpers/core/templating.py | 27 ++++++++++++------- ceph-proxy/tox.ini | 2 +- 7 files changed, 79 insertions(+), 24 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py index a871ce37..b13277bb 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py @@ -27,6 +27,7 @@ network_get_primary_address, unit_get, WARNING, + NoNetworkBinding, ) from charmhelpers.core.host import ( @@ -109,7 +110,12 @@ def get_address_in_network(network, fallback=None, fatal=False): _validate_cidr(network) network = netaddr.IPNetwork(network) for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) + try: + addresses = netifaces.ifaddresses(iface) + except ValueError: + # If an instance was deleted between + # netifaces.interfaces() run and now, its interfaces are gone + continue if network.version == 4 and netifaces.AF_INET in addresses: for addr in addresses[netifaces.AF_INET]: cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], @@ -578,6 +584,9 @@ def get_relation_ip(interface, cidr_network=None): except NotImplementedError: # If network-get is not available address = get_host_ip(unit_get('private-address')) + except NoNetworkBinding: + log("No network binding for {}".format(interface), WARNING) + address = get_host_ip(unit_get('private-address')) if config('prefer-ipv6'): # Currently IPv6 has priority, eventually we want IPv6 to just be diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 211ae87d..7ed1cc4e 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -820,6 +820,10 @@ def wrapper(decorated): return wrapper +class NoNetworkBinding(Exception): + pass + + def charm_dir(): """Return the root directory of the current charm""" d = os.environ.get('JUJU_CHARM_DIR') @@ -1106,7 +1110,17 @@ def network_get_primary_address(binding): :raise: NotImplementedError if run on Juju < 2.0 ''' cmd = ['network-get', '--primary-address', binding] - return subprocess.check_output(cmd).decode('UTF-8').strip() + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + if 'no network config found for binding' in e.output.decode('UTF-8'): + raise NoNetworkBinding("No network binding for {}" + .format(binding)) + else: + raise + return response @translate_exc(from_exc=OSError, to_exc=NotImplementedError) diff --git a/ceph-proxy/hooks/charmhelpers/core/templating.py b/ceph-proxy/hooks/charmhelpers/core/templating.py index 7b801a34..9014015c 100644 --- a/ceph-proxy/hooks/charmhelpers/core/templating.py +++ b/ceph-proxy/hooks/charmhelpers/core/templating.py @@ -20,7 +20,8 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): + perms=0o444, templates_dir=None, encoding='UTF-8', + template_loader=None, config_template=None): """ Render a template. @@ -32,6 +33,9 @@ def render(source, target, context, owner='root', group='root', The context should be a dict containing the values to be replaced in the template. + config_template may be provided to render from a provided template instead + of loading from a file. + The `owner`, `group`, and `perms` options will be passed to `write_file`. If omitted, `templates_dir` defaults to the `templates` folder in the charm. @@ -65,14 +69,19 @@ def render(source, target, context, owner='root', group='root', if templates_dir is None: templates_dir = os.path.join(hookenv.charm_dir(), 'templates') template_env = Environment(loader=FileSystemLoader(templates_dir)) - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e + + # load from a string if provided explicitly + if config_template is not None: + template = template_env.from_string(config_template) + else: + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e content = template.render(context) if target is not None: target_dir = os.path.dirname(target) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index 87f364d1..d93cff3c 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -92,7 +92,7 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, return 'endpoint not found' def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, expected_num_eps=3): """Validate keystone v3 endpoint data. Validate the v3 endpoint data which has changed from v2. The @@ -138,7 +138,7 @@ def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, if ret: return 'unexpected endpoint data - {}'.format(ret) - if len(found) != 3: + if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' def validate_svc_catalog_endpoint_data(self, expected, actual): diff --git a/ceph-proxy/tests/charmhelpers/core/hookenv.py b/ceph-proxy/tests/charmhelpers/core/hookenv.py index 211ae87d..7ed1cc4e 100644 --- a/ceph-proxy/tests/charmhelpers/core/hookenv.py +++ b/ceph-proxy/tests/charmhelpers/core/hookenv.py @@ -820,6 +820,10 @@ def wrapper(decorated): return wrapper +class NoNetworkBinding(Exception): + pass + + def charm_dir(): """Return the root directory of the current charm""" d = os.environ.get('JUJU_CHARM_DIR') @@ -1106,7 +1110,17 @@ def network_get_primary_address(binding): :raise: NotImplementedError if run on Juju < 2.0 ''' cmd = ['network-get', '--primary-address', binding] - return subprocess.check_output(cmd).decode('UTF-8').strip() + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + if 'no network config found for binding' in e.output.decode('UTF-8'): + raise NoNetworkBinding("No network binding for {}" + .format(binding)) + else: + raise + return response @translate_exc(from_exc=OSError, to_exc=NotImplementedError) diff --git a/ceph-proxy/tests/charmhelpers/core/templating.py b/ceph-proxy/tests/charmhelpers/core/templating.py index 7b801a34..9014015c 100644 --- a/ceph-proxy/tests/charmhelpers/core/templating.py +++ b/ceph-proxy/tests/charmhelpers/core/templating.py @@ -20,7 +20,8 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): + perms=0o444, templates_dir=None, encoding='UTF-8', + template_loader=None, config_template=None): """ Render a template. @@ -32,6 +33,9 @@ def render(source, target, context, owner='root', group='root', The context should be a dict containing the values to be replaced in the template. + config_template may be provided to render from a provided template instead + of loading from a file. + The `owner`, `group`, and `perms` options will be passed to `write_file`. If omitted, `templates_dir` defaults to the `templates` folder in the charm. @@ -65,14 +69,19 @@ def render(source, target, context, owner='root', group='root', if templates_dir is None: templates_dir = os.path.join(hookenv.charm_dir(), 'templates') template_env = Environment(loader=FileSystemLoader(templates_dir)) - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e + + # load from a string if provided explicitly + if config_template is not None: + template = template_env.from_string(config_template) + else: + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e content = template.render(context) if target is not None: target_dir = os.path.dirname(target) diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 6d44f4b9..dae53621 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -9,7 +9,7 @@ skipsdist = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} - AMULET_SETUP_TIMEOUT=2700 + AMULET_SETUP_TIMEOUT=5400 install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} From f717e5d7884a9e1c892c51f563531637676fb72b Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 21 Feb 2018 07:33:34 -0600 Subject: [PATCH 1454/2699] Sync charm-helpers Change-Id: I866f3f281799a610f59f3561864e6f411d4bb138 --- .../hooks/charmhelpers/contrib/network/ip.py | 11 +- .../contrib/openstack/amulet/utils.py | 4 +- .../charmhelpers/contrib/openstack/context.py | 117 ++++++++++++++++-- .../contrib/openstack/templating.py | 95 ++++++++++---- .../hooks/charmhelpers/core/hookenv.py | 16 ++- .../hooks/charmhelpers/core/templating.py | 27 ++-- .../contrib/openstack/amulet/utils.py | 4 +- .../tests/charmhelpers/core/hookenv.py | 16 ++- .../tests/charmhelpers/core/templating.py | 27 ++-- ceph-radosgw/tox.ini | 2 +- 10 files changed, 258 insertions(+), 61 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index a871ce37..b13277bb 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -27,6 +27,7 @@ network_get_primary_address, unit_get, WARNING, + NoNetworkBinding, ) from charmhelpers.core.host import ( @@ -109,7 +110,12 @@ def get_address_in_network(network, fallback=None, fatal=False): _validate_cidr(network) network = netaddr.IPNetwork(network) for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) + try: + addresses = netifaces.ifaddresses(iface) + except ValueError: + # If an instance was deleted between + # netifaces.interfaces() run and now, its interfaces are gone + continue if network.version == 4 and netifaces.AF_INET in addresses: for addr in addresses[netifaces.AF_INET]: cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], @@ -578,6 +584,9 @@ def get_relation_ip(interface, cidr_network=None): except NotImplementedError: # If network-get is not available address = get_host_ip(unit_get('private-address')) + except NoNetworkBinding: + log("No network binding for {}".format(interface), WARNING) + address = get_host_ip(unit_get('private-address')) if config('prefer-ipv6'): # Currently IPv6 has priority, eventually we want IPv6 to just be diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 87f364d1..d93cff3c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -92,7 +92,7 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, return 'endpoint not found' def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, expected_num_eps=3): """Validate keystone v3 endpoint data. Validate the v3 endpoint data which has changed from v2. The @@ -138,7 +138,7 @@ def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, if ret: return 'unexpected endpoint data - {}'.format(ret) - if len(found) != 3: + if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' def validate_svc_catalog_endpoint_data(self, expected, actual): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 7ada2760..36cf32fc 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -617,7 +617,9 @@ class HAProxyContext(OSContextGenerator): """ interfaces = ['cluster'] - def __init__(self, singlenode_mode=False): + def __init__(self, singlenode_mode=False, + address_types=ADDRESS_TYPES): + self.address_types = address_types self.singlenode_mode = singlenode_mode def __call__(self): @@ -631,7 +633,7 @@ def __call__(self): # NOTE(jamespage): build out map of configured network endpoints # and associated backends - for addr_type in ADDRESS_TYPES: + for addr_type in self.address_types: cfg_opt = 'os-{}-network'.format(addr_type) # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather # than 'internal' @@ -1635,18 +1637,84 @@ class InternalEndpointContext(OSContextGenerator): endpoints by default so this allows admins to optionally use internal endpoints. """ - def __init__(self, ost_rel_check_pkg_name): - self.ost_rel_check_pkg_name = ost_rel_check_pkg_name - def __call__(self): - ctxt = {'use_internal_endpoints': config('use-internal-endpoints')} - rel = os_release(self.ost_rel_check_pkg_name, base='icehouse') + return {'use_internal_endpoints': config('use-internal-endpoints')} + + +class VolumeAPIContext(InternalEndpointContext): + """Volume API context. + + This context provides information regarding the volume endpoint to use + when communicating between services. It determines which version of the + API is appropriate for use. + + This value will be determined in the resulting context dictionary + returned from calling the VolumeAPIContext object. Information provided + by this context is as follows: + + volume_api_version: the volume api version to use, currently + 'v2' or 'v3' + volume_catalog_info: the information to use for a cinder client + configuration that consumes API endpoints from the keystone + catalog. This is defined as the type:name:endpoint_type string. + """ + # FIXME(wolsen) This implementation is based on the provider being able + # to specify the package version to check but does not guarantee that the + # volume service api version selected is available. In practice, it is + # quite likely the volume service *is* providing the v3 volume service. + # This should be resolved when the service-discovery spec is implemented. + def __init__(self, pkg): + """ + Creates a new VolumeAPIContext for use in determining which version + of the Volume API should be used for communication. A package codename + should be supplied for determining the currently installed OpenStack + version. + + :param pkg: the package codename to use in order to determine the + component version (e.g. nova-common). See + charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more. + """ + super(VolumeAPIContext, self).__init__() + self._ctxt = None + if not pkg: + raise ValueError('package name must be provided in order to ' + 'determine current OpenStack version.') + self.pkg = pkg + + @property + def ctxt(self): + if self._ctxt is not None: + return self._ctxt + self._ctxt = self._determine_ctxt() + return self._ctxt + + def _determine_ctxt(self): + """Determines the Volume API endpoint information. + + Determines the appropriate version of the API that should be used + as well as the catalog_info string that would be supplied. Returns + a dict containing the volume_api_version and the volume_catalog_info. + """ + rel = os_release(self.pkg, base='icehouse') + version = '2' if CompareOpenStackReleases(rel) >= 'pike': - ctxt['volume_api_version'] = '3' - else: - ctxt['volume_api_version'] = '2' + version = '3' + + service_type = 'volumev{version}'.format(version=version) + service_name = 'cinderv{version}'.format(version=version) + endpoint_type = 'publicURL' + if config('use-internal-endpoints'): + endpoint_type = 'internalURL' + catalog_info = '{type}:{name}:{endpoint}'.format( + type=service_type, name=service_name, endpoint=endpoint_type) + + return { + 'volume_api_version': version, + 'volume_catalog_info': catalog_info, + } - return ctxt + def __call__(self): + return self.ctxt class AppArmorContext(OSContextGenerator): @@ -1784,3 +1852,30 @@ def __call__(self): ctxt['memcache_server_formatted'], ctxt['memcache_port']) return ctxt + + +class EnsureDirContext(OSContextGenerator): + ''' + Serves as a generic context to create a directory as a side-effect. + + Useful for software that supports drop-in files (.d) in conjunction + with config option-based templates. Examples include: + * OpenStack oslo.policy drop-in files; + * systemd drop-in config files; + * other software that supports overriding defaults with .d files + + Another use-case is when a subordinate generates a configuration for + primary to render in a separate directory. + + Some software requires a user to create a target directory to be + scanned for drop-in files with a specific format. This is why this + context is needed to do that before rendering a template. + ''' + + def __init__(self, dirname): + '''Used merely to ensure that a given directory exists.''' + self.dirname = dirname + + def __call__(self): + mkdir(self.dirname) + return {} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py index 77490e4d..a623315d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py @@ -93,7 +93,8 @@ class OSConfigTemplate(object): Associates a config file template with a list of context generators. Responsible for constructing a template context based on those generators. """ - def __init__(self, config_file, contexts): + + def __init__(self, config_file, contexts, config_template=None): self.config_file = config_file if hasattr(contexts, '__call__'): @@ -103,6 +104,8 @@ def __init__(self, config_file, contexts): self._complete_contexts = [] + self.config_template = config_template + def context(self): ctxt = {} for context in self.contexts: @@ -124,6 +127,11 @@ def complete_contexts(self): self.context() return self._complete_contexts + @property + def is_string_template(self): + """:returns: Boolean if this instance is a template initialised with a string""" + return self.config_template is not None + class OSConfigRenderer(object): """ @@ -148,6 +156,10 @@ class OSConfigRenderer(object): contexts=[context.IdentityServiceContext()]) configs.register(config_file='/etc/haproxy/haproxy.conf', contexts=[context.HAProxyContext()]) + configs.register(config_file='/etc/keystone/policy.d/extra.cfg', + contexts=[context.ExtraPolicyContext() + context.KeystoneContext()], + config_template=hookenv.config('extra-policy')) # write out a single config configs.write('/etc/nova/nova.conf') # write out all registered configs @@ -218,14 +230,23 @@ def __init__(self, templates_dir, openstack_release): else: apt_install('python3-jinja2') - def register(self, config_file, contexts): + def register(self, config_file, contexts, config_template=None): """ Register a config file with a list of context generators to be called during rendering. + config_template can be used to load a template from a string instead of + using template loaders and template files. + :param config_file (str): a path where a config file will be rendered + :param contexts (list): a list of context dictionaries with kv pairs + :param config_template (str): an optional template string to use """ - self.templates[config_file] = OSConfigTemplate(config_file=config_file, - contexts=contexts) - log('Registered config file: %s' % config_file, level=INFO) + self.templates[config_file] = OSConfigTemplate( + config_file=config_file, + contexts=contexts, + config_template=config_template + ) + log('Registered config file: {}'.format(config_file), + level=INFO) def _get_tmpl_env(self): if not self._tmpl_env: @@ -235,32 +256,58 @@ def _get_tmpl_env(self): def _get_template(self, template): self._get_tmpl_env() template = self._tmpl_env.get_template(template) - log('Loaded template from %s' % template.filename, level=INFO) + log('Loaded template from {}'.format(template.filename), + level=INFO) + return template + + def _get_template_from_string(self, ostmpl): + ''' + Get a jinja2 template object from a string. + :param ostmpl: OSConfigTemplate to use as a data source. + ''' + self._get_tmpl_env() + template = self._tmpl_env.from_string(ostmpl.config_template) + log('Loaded a template from a string for {}'.format( + ostmpl.config_file), + level=INFO) return template def render(self, config_file): if config_file not in self.templates: - log('Config not registered: %s' % config_file, level=ERROR) + log('Config not registered: {}'.format(config_file), level=ERROR) raise OSConfigException - ctxt = self.templates[config_file].context() - - _tmpl = os.path.basename(config_file) - try: - template = self._get_template(_tmpl) - except exceptions.TemplateNotFound: - # if no template is found with basename, try looking for it - # using a munged full path, eg: - # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf - _tmpl = '_'.join(config_file.split('/')[1:]) + + ostmpl = self.templates[config_file] + ctxt = ostmpl.context() + + if ostmpl.is_string_template: + template = self._get_template_from_string(ostmpl) + log('Rendering from a string template: ' + '{}'.format(config_file), + level=INFO) + else: + _tmpl = os.path.basename(config_file) try: template = self._get_template(_tmpl) - except exceptions.TemplateNotFound as e: - log('Could not load template from %s by %s or %s.' % - (self.templates_dir, os.path.basename(config_file), _tmpl), - level=ERROR) - raise e - - log('Rendering from template: %s' % _tmpl, level=INFO) + except exceptions.TemplateNotFound: + # if no template is found with basename, try looking + # for it using a munged full path, eg: + # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf + _tmpl = '_'.join(config_file.split('/')[1:]) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound as e: + log('Could not load template from {} by {} or {}.' + ''.format( + self.templates_dir, + os.path.basename(config_file), + _tmpl + ), + level=ERROR) + raise e + + log('Rendering from template: {}'.format(config_file), + level=INFO) return template.render(ctxt) def write(self, config_file): diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 211ae87d..7ed1cc4e 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -820,6 +820,10 @@ def wrapper(decorated): return wrapper +class NoNetworkBinding(Exception): + pass + + def charm_dir(): """Return the root directory of the current charm""" d = os.environ.get('JUJU_CHARM_DIR') @@ -1106,7 +1110,17 @@ def network_get_primary_address(binding): :raise: NotImplementedError if run on Juju < 2.0 ''' cmd = ['network-get', '--primary-address', binding] - return subprocess.check_output(cmd).decode('UTF-8').strip() + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + if 'no network config found for binding' in e.output.decode('UTF-8'): + raise NoNetworkBinding("No network binding for {}" + .format(binding)) + else: + raise + return response @translate_exc(from_exc=OSError, to_exc=NotImplementedError) diff --git a/ceph-radosgw/hooks/charmhelpers/core/templating.py b/ceph-radosgw/hooks/charmhelpers/core/templating.py index 7b801a34..9014015c 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/core/templating.py @@ -20,7 +20,8 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): + perms=0o444, templates_dir=None, encoding='UTF-8', + template_loader=None, config_template=None): """ Render a template. @@ -32,6 +33,9 @@ def render(source, target, context, owner='root', group='root', The context should be a dict containing the values to be replaced in the template. + config_template may be provided to render from a provided template instead + of loading from a file. + The `owner`, `group`, and `perms` options will be passed to `write_file`. If omitted, `templates_dir` defaults to the `templates` folder in the charm. @@ -65,14 +69,19 @@ def render(source, target, context, owner='root', group='root', if templates_dir is None: templates_dir = os.path.join(hookenv.charm_dir(), 'templates') template_env = Environment(loader=FileSystemLoader(templates_dir)) - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e + + # load from a string if provided explicitly + if config_template is not None: + template = template_env.from_string(config_template) + else: + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e content = template.render(context) if target is not None: target_dir = os.path.dirname(target) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 87f364d1..d93cff3c 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -92,7 +92,7 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, return 'endpoint not found' def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, expected_num_eps=3): """Validate keystone v3 endpoint data. Validate the v3 endpoint data which has changed from v2. The @@ -138,7 +138,7 @@ def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, if ret: return 'unexpected endpoint data - {}'.format(ret) - if len(found) != 3: + if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' def validate_svc_catalog_endpoint_data(self, expected, actual): diff --git a/ceph-radosgw/tests/charmhelpers/core/hookenv.py b/ceph-radosgw/tests/charmhelpers/core/hookenv.py index 211ae87d..7ed1cc4e 100644 --- a/ceph-radosgw/tests/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/tests/charmhelpers/core/hookenv.py @@ -820,6 +820,10 @@ def wrapper(decorated): return wrapper +class NoNetworkBinding(Exception): + pass + + def charm_dir(): """Return the root directory of the current charm""" d = os.environ.get('JUJU_CHARM_DIR') @@ -1106,7 +1110,17 @@ def network_get_primary_address(binding): :raise: NotImplementedError if run on Juju < 2.0 ''' cmd = ['network-get', '--primary-address', binding] - return subprocess.check_output(cmd).decode('UTF-8').strip() + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + if 'no network config found for binding' in e.output.decode('UTF-8'): + raise NoNetworkBinding("No network binding for {}" + .format(binding)) + else: + raise + return response @translate_exc(from_exc=OSError, to_exc=NotImplementedError) diff --git a/ceph-radosgw/tests/charmhelpers/core/templating.py b/ceph-radosgw/tests/charmhelpers/core/templating.py index 7b801a34..9014015c 100644 --- a/ceph-radosgw/tests/charmhelpers/core/templating.py +++ b/ceph-radosgw/tests/charmhelpers/core/templating.py @@ -20,7 +20,8 @@ def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): + perms=0o444, templates_dir=None, encoding='UTF-8', + template_loader=None, config_template=None): """ Render a template. @@ -32,6 +33,9 @@ def render(source, target, context, owner='root', group='root', The context should be a dict containing the values to be replaced in the template. + config_template may be provided to render from a provided template instead + of loading from a file. + The `owner`, `group`, and `perms` options will be passed to `write_file`. If omitted, `templates_dir` defaults to the `templates` folder in the charm. @@ -65,14 +69,19 @@ def render(source, target, context, owner='root', group='root', if templates_dir is None: templates_dir = os.path.join(hookenv.charm_dir(), 'templates') template_env = Environment(loader=FileSystemLoader(templates_dir)) - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e + + # load from a string if provided explicitly + if config_template is not None: + template = template_env.from_string(config_template) + else: + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e content = template.render(context) if target is not None: target_dir = os.path.dirname(target) diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 6d44f4b9..dae53621 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -9,7 +9,7 @@ skipsdist = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} - AMULET_SETUP_TIMEOUT=2700 + AMULET_SETUP_TIMEOUT=5400 install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} From e15d28a1ef067bbefa38e1b2bb2469b52f78b953 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 22 Feb 2018 15:37:07 +0100 Subject: [PATCH 1455/2699] Add xenial Queens amulet tests to gate Change-Id: I22e2a936d85796637eb13864951e76ee110b82e0 --- .../tests/{dev-basic-xenial-queens => gate-basic-xenial-queens} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename ceph-fs/src/tests/{dev-basic-xenial-queens => gate-basic-xenial-queens} (100%) diff --git a/ceph-fs/src/tests/dev-basic-xenial-queens b/ceph-fs/src/tests/gate-basic-xenial-queens similarity index 100% rename from ceph-fs/src/tests/dev-basic-xenial-queens rename to ceph-fs/src/tests/gate-basic-xenial-queens From ec8461091cd0f1ca6a9c88dc34346760d8e4e88c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 22 Feb 2018 15:01:38 +0100 Subject: [PATCH 1456/2699] Add xenial Queens amulet tests to gate Change-Id: I506298f31ab619f21b5acebdfcc9ae2557dac234 --- .../tests/{dev-basic-xenial-queens => gate-basic-xenial-queens} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename ceph-proxy/tests/{dev-basic-xenial-queens => gate-basic-xenial-queens} (100%) diff --git a/ceph-proxy/tests/dev-basic-xenial-queens b/ceph-proxy/tests/gate-basic-xenial-queens similarity index 100% rename from ceph-proxy/tests/dev-basic-xenial-queens rename to ceph-proxy/tests/gate-basic-xenial-queens From 2a1d70a04aaa301a0cc4fe23325dbb8c740a91b8 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 9 Mar 2018 15:51:13 +0100 Subject: [PATCH 1457/2699] Increase timeout and remove empty keyring for ceph-create-keys Sync relevant changes from charms.ceph. Default timeout of ceph-create-keys of 600 seconds is not adequate in all circumstances. When ceph-create-keys times out it leaves a empty keyring. The retry attempt will fail as long as the empty keyring exists. Remove it before retry attempt. Change-Id: I914be8a5a7dcd9676438de92bba7f91283232837 Closes-Bug: #1719436 --- ceph-mon/lib/ceph/utils.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 2915225c..db981fd9 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -1355,10 +1355,17 @@ def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): # admin keys for the cluster; this command # will wait for quorum in the cluster before # returning. - cmd = ['ceph-create-keys', '--id', hostname] + # NOTE(fnordahl): The default timeout in ceph-create-keys of 600 + # seconds is not adequate for all situations. + # LP#1719436 + cmd = ['ceph-create-keys', '--id', hostname, '--timeout', '1800'] subprocess.check_call(cmd) - osstat = os.stat("/etc/ceph/ceph.client.admin.keyring") + _client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' + osstat = os.stat(_client_admin_keyring) if not osstat.st_size: + # NOTE(fnordahl): Retry will fail as long as this file exists. + # LP#1719436 + os.remove(_client_admin_keyring) raise Exception From a52ac1a0eef0751bdf729887856a78d0e2a96f8d Mon Sep 17 00:00:00 2001 From: Tamas Erdei Date: Tue, 13 Mar 2018 14:24:25 +0100 Subject: [PATCH 1458/2699] Fix race condition in collect_ceph_status.sh There is a race condition between collect_ceph_status.sh writing the status file and check_ceph_status.py reading that file. This patch fixes that by directing ceph output into a temp file, and then replacing the old state file with the new temp file using an atomic mv operation. Change-Id: If332d187f8dcb9f7fcd8b4a47f791beb8e27eaaa Closes-Bug: 1755207 --- ceph-mon/files/nagios/collect_ceph_status.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ceph-mon/files/nagios/collect_ceph_status.sh b/ceph-mon/files/nagios/collect_ceph_status.sh index 2f72a42c..a2e284e2 100755 --- a/ceph-mon/files/nagios/collect_ceph_status.sh +++ b/ceph-mon/files/nagios/collect_ceph_status.sh @@ -14,5 +14,11 @@ DATA_DIR="/var/lib/nagios" if [ ! -d $DATA_DIR ]; then mkdir -p $DATA_DIR fi +DATA_FILE="${DATA_DIR}/cat-ceph-status.txt" +TMP_FILE=$(mktemp -p ${DATA_DIR}) -ceph status --format json >${DATA_DIR}/cat-ceph-status.txt +ceph status --format json >${TMP_FILE} + +chown root:nagios ${TMP_FILE} +chmod 0640 ${TMP_FILE} +mv ${TMP_FILE} ${DATA_FILE} From f62d7455ee02beba7fbba9b4fd0416ac83710c40 Mon Sep 17 00:00:00 2001 From: Xav Paice Date: Fri, 29 Sep 2017 16:03:33 +1300 Subject: [PATCH 1459/2699] Add set/unset noout actions Adds two new actions, to drive ceph osd set noout and it's opposite. Change-Id: I5b80c18d21a6e9118bb13ad9af6bc80d353e3436 Depends-On: I3d5fc96b9b69fd98b7d84b5aed6079f3b02c19ab --- ceph-mon/actions.yaml | 4 ++++ ceph-mon/actions/set-noout | 1 + ceph-mon/actions/set_noout.py | 28 ++++++++++++++++++++++++++++ ceph-mon/actions/unset-noout | 1 + ceph-mon/actions/unset_noout.py | 28 ++++++++++++++++++++++++++++ ceph-mon/tests/basic_deployment.py | 21 +++++++++++++++++++++ 6 files changed, 83 insertions(+) create mode 120000 ceph-mon/actions/set-noout create mode 100755 ceph-mon/actions/set_noout.py create mode 120000 ceph-mon/actions/unset-noout create mode 100755 ceph-mon/actions/unset_noout.py diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 860ca087..749a17f5 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -229,3 +229,7 @@ crushmap-update: show-disk-free: description: Show disk utilization by host and OSD. additionalProperties: false +set-noout: + description: Set ceph noout across the cluster. +unset-noout: + description: Unset ceph noout across the cluster. diff --git a/ceph-mon/actions/set-noout b/ceph-mon/actions/set-noout new file mode 120000 index 00000000..d2ac02d8 --- /dev/null +++ b/ceph-mon/actions/set-noout @@ -0,0 +1 @@ +set_noout.py \ No newline at end of file diff --git a/ceph-mon/actions/set_noout.py b/ceph-mon/actions/set_noout.py new file mode 100755 index 00000000..97aa3841 --- /dev/null +++ b/ceph-mon/actions/set_noout.py @@ -0,0 +1,28 @@ +#!/usr/bin/python +# +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +sys.path.append('hooks') +from charmhelpers.core.hookenv import action_set, action_fail +sys.path.append('lib') +from ceph.utils import osd_noout + +if __name__ == '__main__': + result = osd_noout(True) + if result: + action_set({'message': 'Ceph osd noout has been set'}) + else: + action_fail('Ceph osd noout failed to set') diff --git a/ceph-mon/actions/unset-noout b/ceph-mon/actions/unset-noout new file mode 120000 index 00000000..807c18a7 --- /dev/null +++ b/ceph-mon/actions/unset-noout @@ -0,0 +1 @@ +unset_noout.py \ No newline at end of file diff --git a/ceph-mon/actions/unset_noout.py b/ceph-mon/actions/unset_noout.py new file mode 100755 index 00000000..8ae9a393 --- /dev/null +++ b/ceph-mon/actions/unset_noout.py @@ -0,0 +1,28 @@ +#!/usr/bin/python +# +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +sys.path.append('hooks') +from charmhelpers.core.hookenv import action_set, action_fail +sys.path.append('lib') +from ceph.utils import osd_noout + +if __name__ == '__main__': + result = osd_noout(False) + if result: + action_set({'message': 'Ceph osd noout has been unset'}) + else: + action_fail('Ceph osd noout failed to unset') diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 67091164..15397c74 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -568,6 +568,27 @@ def test_403_cache_tier_actions(self): assert "cache_mode" not in pool_line, \ "cache_mode is still enabled on cache pool" + def test_404_set_noout_actions(self): + """Verify that set/unset noout works""" + u.log.debug("Testing set noout") + cmd = "ceph -s" + + sentry_unit = self.ceph0_sentry + action_id = u.run_action(sentry_unit, 'set-noout') + assert u.wait_on_action(action_id), "Set noout action failed." + + output, code = sentry_unit.run(cmd) + if 'noout' not in output: + amulet.raise_status(amulet.FAIL, msg="Missing noout") + + u.log.debug("Testing unset noout") + action_id = u.run_action(sentry_unit, 'unset-noout') + assert u.wait_on_action(action_id), "Unset noout action failed." + + output, code = sentry_unit.run(cmd) + if 'noout' in output: + amulet.raise_status(amulet.FAIL, msg="Still has noout") + def test_410_ceph_cinder_vol_create(self): """Create and confirm a ceph-backed cinder volume, and inspect ceph cinder pool object count as the volume is created From 10763e75a21ac71991162c7a0c542122a6ead385 Mon Sep 17 00:00:00 2001 From: James Hebden Date: Mon, 4 Dec 2017 12:48:41 +1100 Subject: [PATCH 1460/2699] Add get-health action to the Ceph mon charm * get-health - outputs `ceph health` output Including unit and functional tests for the above actions. Change-Id: Id4c0a89f2068a6f30025d4a165f84ad112b62cf7 Closes-Bug: #1720099 --- ceph-mon/actions.yaml | 2 + ceph-mon/actions/ceph_ops.py | 64 ++++++++++++++++++++++++- ceph-mon/actions/get-health | 6 +++ ceph-mon/tests/basic_deployment.py | 8 ++++ ceph-mon/unit_tests/__init__.py | 1 + ceph-mon/unit_tests/test_actions_mon.py | 54 +++++++++++++++++++++ 6 files changed, 134 insertions(+), 1 deletion(-) create mode 100755 ceph-mon/actions/get-health create mode 100644 ceph-mon/unit_tests/test_actions_mon.py diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 749a17f5..2d2f1729 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -2,6 +2,8 @@ pause-health: description: Pause ceph health operations across the entire ceph cluster resume-health: description: Resume ceph health operations across the entire ceph cluster +get-health: + description: Output the current cluster health reported by `ceph health` create-cache-tier: description: Create a new cache tier params: diff --git a/ceph-mon/actions/ceph_ops.py b/ceph-mon/actions/ceph_ops.py index d23ad017..86765dd0 100755 --- a/ceph-mon/actions/ceph_ops.py +++ b/ceph-mon/actions/ceph_ops.py @@ -13,11 +13,11 @@ # limitations under the License. from subprocess import CalledProcessError, check_output +import rados import sys sys.path.append('hooks') -import rados from charmhelpers.core.hookenv import log, action_get, action_fail from charmhelpers.contrib.storage.linux.ceph import pool_set, \ set_pool_quota, snapshot_pool, remove_pool_snapshot @@ -25,6 +25,7 @@ # Connect to Ceph via Librados and return a connection def connect(): + """Creates a connection to Ceph using librados.""" try: cluster = rados.Rados(conffile='/etc/ceph/ceph.conf') cluster.connect() @@ -38,11 +39,13 @@ def connect(): def create_crush_rule(): + """Stub function.""" # Shell out pass def list_pools(): + """Return a list of all Ceph pools.""" try: cluster = connect() pool_list = cluster.list_pools() @@ -56,7 +59,31 @@ def list_pools(): action_fail(str(e)) +def get_health(): + """ + Returns the output of 'ceph health'. + + On error, 'unknown' is returned. + """ + try: + value = check_output(['ceph', 'health']) + return value + except CalledProcessError as e: + action_fail(e.message) + return 'Getting health failed, health unknown' + + def pool_get(): + """ + Returns a key from a pool using 'ceph osd pool get'. + + The key is provided via the 'key' action parameter and the + pool provided by the 'pool_name' parameter. These are used when + running 'ceph osd pool get ', the result of + which is returned. + + On failure, 'unknown' will be returned. + """ key = action_get("key") pool_name = action_get("pool_name") try: @@ -65,9 +92,18 @@ def pool_get(): return value except CalledProcessError as e: action_fail(str(e)) + return 'unknown' def set_pool(): + """ + Sets an arbitrary key key in a Ceph pool. + + Sets the key specified by the action parameter 'key' to the value + specified in the action parameter 'value' for the pool specified + by the action parameter 'pool_name' using the charmhelpers + 'pool_set' function. + """ key = action_get("key") value = action_get("value") pool_name = action_get("pool_name") @@ -75,6 +111,11 @@ def set_pool(): def pool_stats(): + """ + Returns statistics for a pool. + + The pool name is provided by the action parameter 'pool-name'. + """ try: pool_name = action_get("pool-name") cluster = connect() @@ -93,6 +134,13 @@ def pool_stats(): def delete_pool_snapshot(): + """ + Delete a pool snapshot. + + Deletes a snapshot from the pool provided by the action + parameter 'pool-name', with the snapshot name provided by + action parameter 'snapshot-name' + """ pool_name = action_get("pool-name") snapshot_name = action_get("snapshot-name") remove_pool_snapshot(service='ceph', @@ -102,6 +150,13 @@ def delete_pool_snapshot(): # Note only one or the other can be set def set_pool_max_bytes(): + """ + Sets the max bytes quota for a pool. + + Sets the pool quota maximum bytes for the pool specified by + the action parameter 'pool-name' to the value specified by + the action parameter 'max' + """ pool_name = action_get("pool-name") max_bytes = action_get("max") set_pool_quota(service='ceph', @@ -110,6 +165,13 @@ def set_pool_max_bytes(): def snapshot_ceph_pool(): + """ + Snapshots a Ceph pool. + + Snapshots the pool provided in action parameter 'pool-name' and + uses the parameter provided in the action parameter 'snapshot-name' + as the name for the snapshot. + """ pool_name = action_get("pool-name") snapshot_name = action_get("snapshot-name") snapshot_pool(service='ceph', diff --git a/ceph-mon/actions/get-health b/ceph-mon/actions/get-health new file mode 100755 index 00000000..09cf08c2 --- /dev/null +++ b/ceph-mon/actions/get-health @@ -0,0 +1,6 @@ +#!/usr/bin/python + +from ceph_ops import get_health + +if __name__ == '__main__': + get_health() diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 15397c74..2aeaa2a8 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -708,6 +708,14 @@ def test_412_ceph_glance_image_create_delete(self): if ret: amulet.raise_status(amulet.FAIL, msg=ret) + def test_414_get_health_action(self): + """Verify that getting health works""" + u.log.debug("Testing get-health") + + sentry_unit = self.ceph0_sentry + action_id = u.run_action(sentry_unit, 'get-health') + assert u.wait_on_action(action_id), "HEALTH_OK" + def test_499_ceph_cmds_exit_zero(self): """Check basic functionality of ceph cli commands against all ceph units.""" diff --git a/ceph-mon/unit_tests/__init__.py b/ceph-mon/unit_tests/__init__.py index 2e3f304c..70342765 100644 --- a/ceph-mon/unit_tests/__init__.py +++ b/ceph-mon/unit_tests/__init__.py @@ -16,3 +16,4 @@ sys.path.append('hooks') sys.path.append('lib') sys.path.append('unit_tests') +sys.path.append('actions') diff --git a/ceph-mon/unit_tests/test_actions_mon.py b/ceph-mon/unit_tests/test_actions_mon.py new file mode 100644 index 00000000..a4425aa1 --- /dev/null +++ b/ceph-mon/unit_tests/test_actions_mon.py @@ -0,0 +1,54 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mock import mock +import sys + +from test_utils import CharmTestCase + +# python-apt is not installed as part of test-requirements but is imported by +# some charmhelpers modules so create a fake import. +mock_apt = mock.MagicMock() +sys.modules['apt'] = mock_apt +mock_apt.apt_pkg = mock.MagicMock() + +# mocking for rados +mock_rados = mock.MagicMock() +sys.modules['rados'] = mock_rados +mock_rados.connect = mock.MagicMock() + +# mocking for psutil +mock_psutil = mock.MagicMock() +sys.modules['psutil'] = mock_psutil +mock_psutil.disks = mock.MagicMock() + +with mock.patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + # import health actions as actions + import ceph_ops as actions + + +class OpsTestCase(CharmTestCase): + + def setUp(self): + super(OpsTestCase, self).setUp( + actions, ["check_output", + "action_get", + "action_fail", + "open"]) + + def test_get_health(self): + actions.get_health() + cmd = ['ceph', 'health'] + self.check_output.assert_called_once_with(cmd) From c406c106a8751569f93161bc90ac77e4d8f268a6 Mon Sep 17 00:00:00 2001 From: Tilman Baumann Date: Wed, 14 Mar 2018 15:21:07 +0100 Subject: [PATCH 1461/2699] Reload AppArmor when policies are changed AppArmor needs to be reloaded when new policies are written, not only when the 'aa-profile-mode' config option is changed. Change-Id: Ia2990cdc03f6fa0d3a38e4a3247422f0950ee0eb Closes-Bug: 1755823 --- ceph-osd/hooks/ceph_hooks.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 9ec92ccb..3bf94587 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -48,6 +48,7 @@ service_reload, service_restart, add_to_updatedb_prunepath, + restart_on_change ) from charmhelpers.fetch import ( add_source, @@ -134,6 +135,8 @@ def tune_network_adapters(): ceph.tune_nic(interface) +@restart_on_change({'/etc/apparmor.d/usr.bin.ceph-osd': ['apparmor']}, + restart_functions={'apparmor': service_reload}) def copy_profile_into_place(): """ Copy the apparmor profiles included with the charm From ca2ab89f6b19693dd04350391350457c61b9198a Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 28 Mar 2018 13:24:20 -0500 Subject: [PATCH 1462/2699] Update readme for apparmor Change-Id: I4afe123e8543441a9fee805dea1426ddd19a9416 --- ceph-osd/README.md | 15 +++++++++++++++ ceph-osd/config.yaml | 3 ++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/ceph-osd/README.md b/ceph-osd/README.md index 7b599a7d..b0271061 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -64,6 +64,21 @@ Please refer to the [Ceph Network Reference](http://docs.ceph.com/docs/master/ra **NOTE**: Existing deployments using ceph-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. +AppArmor Profiles +================= + +AppArmor is not enforced for Ceph by default. An AppArmor profile can be generated by the charm. However, great care must be taken. + +Changing the value of the ```aa-profile-mode``` option is disruptive to a running Ceph cluster as all ceph-osd processes must be restarted as part of changing the AppArmor profile enforcement mode. + +The generated AppArmor profile currently has a narrow supported use case, and it should always be verified in pre-production against the specific configurations and topologies intended for production. + +The AppArmor profile(s) which are generated by the charm should NOT yet be used in the following scenarios: + - When there are separate journal devices. + - On any version of Ceph prior to Luminous. + - On any version of Ubuntu other than 16.04. + - With Bluestore enabled. + Contact Information =================== diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 70ec475c..f236a70d 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -299,4 +299,5 @@ options: . NOTE: changing the value of this option is disruptive to a running Ceph cluster as all ceph-osd processes must be restarted as part of changing - the apparmor profile enforcement mode. + the apparmor profile enforcement mode. Always test in pre-production + before enabling AppArmor on a live cluster. From 1f0565a8efa15d47d088f95e4e659fd240dd9abc Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 28 Mar 2018 14:07:45 -0500 Subject: [PATCH 1463/2699] Update amulet test definitions Enable Queens, and set Queens as the smoke gate Remove soon-to-be deprecated combos Change-Id: I905aabc8d2168e87b6d341bac45a38991f26cc57 --- ...v-basic-bionic-queens => gate-basic-bionic-queens} | 0 ceph-proxy/tests/gate-basic-trusty-kilo | 11 ----------- ceph-proxy/tests/gate-basic-trusty-liberty | 11 ----------- ceph-proxy/tests/gate-basic-xenial-newton | 11 ----------- ceph-proxy/tox.ini | 2 +- 5 files changed, 1 insertion(+), 34 deletions(-) rename ceph-proxy/tests/{dev-basic-bionic-queens => gate-basic-bionic-queens} (100%) delete mode 100755 ceph-proxy/tests/gate-basic-trusty-kilo delete mode 100755 ceph-proxy/tests/gate-basic-trusty-liberty delete mode 100755 ceph-proxy/tests/gate-basic-xenial-newton diff --git a/ceph-proxy/tests/dev-basic-bionic-queens b/ceph-proxy/tests/gate-basic-bionic-queens similarity index 100% rename from ceph-proxy/tests/dev-basic-bionic-queens rename to ceph-proxy/tests/gate-basic-bionic-queens diff --git a/ceph-proxy/tests/gate-basic-trusty-kilo b/ceph-proxy/tests/gate-basic-trusty-kilo deleted file mode 100755 index 47c3296f..00000000 --- a/ceph-proxy/tests/gate-basic-trusty-kilo +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on trusty-kilo.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='trusty', - openstack='cloud:trusty-kilo', - source='cloud:trusty-updates/kilo') - deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-trusty-liberty b/ceph-proxy/tests/gate-basic-trusty-liberty deleted file mode 100755 index cdd020b6..00000000 --- a/ceph-proxy/tests/gate-basic-trusty-liberty +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on trusty-liberty.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='trusty', - openstack='cloud:trusty-liberty', - source='cloud:trusty-updates/liberty') - deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-xenial-newton b/ceph-proxy/tests/gate-basic-xenial-newton deleted file mode 100755 index 26e06f74..00000000 --- a/ceph-proxy/tests/gate-basic-xenial-newton +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on xenial-newton.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='xenial', - openstack='cloud:xenial-newton', - source='cloud:xenial-updates/newton') - deployment.run_tests() diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index dae53621..3d12993c 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -60,7 +60,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-queens --no-destroy [testenv:func27-dfs] # Charm Functional Test From 321ca12bf6cd95e2471da66b51cf6f97fb331628 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 2 Apr 2018 20:30:22 +0000 Subject: [PATCH 1464/2699] Update tox.ini to stop using unverified package As of pip 10.0, --allow-unverified is not permitted. Use of the flag in this repo was previously used to force installation of python-apt to accommodate certain unit tests. The unverified package, python-apt, is no longer necessary for test execution. Related-Bug: #1760720 Change-Id: I4907223e5a63226d8945de8b2d4f1e0dd11a6528 --- ceph-fs/src/tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index 799d7068..6ca8ba23 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -13,7 +13,7 @@ whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* deps = -r{toxinidir}/test-requirements.txt install_command = - pip install --allow-unverified python-apt {opts} {packages} + pip install {opts} {packages} [testenv:pep8] basepython = python2.7 From cb8cbd23cc0facff1e045aa9ae6d31cbf4a301bf Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 2 Apr 2018 20:30:49 +0000 Subject: [PATCH 1465/2699] Update tox.ini to stop using unverified package As of pip 10.0, --allow-unverified is not permitted. Use of the flag in this repo was previously used to force installation of python-apt to accommodate certain unit tests. The unverified package, python-apt, is no longer necessary for test execution. Related-Bug: #1760720 Change-Id: I0998532143995dd8ced64cdd5660f2d44cbbcbb9 --- ceph-radosgw/tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index dae53621..43190642 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -11,7 +11,7 @@ setenv = VIRTUAL_ENV={envdir} CHARM_DIR={envdir} AMULET_SETUP_TIMEOUT=5400 install_command = - pip install --allow-unverified python-apt {opts} {packages} + pip install {opts} {packages} commands = ostestr {posargs} whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* From 772beef80e9cd35dc475a92424e6a032eb796950 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 2 Apr 2018 20:30:29 +0000 Subject: [PATCH 1466/2699] Update tox.ini to stop using unverified package As of pip 10.0, --allow-unverified is not permitted. Use of the flag in this repo was previously used to force installation of python-apt to accommodate certain unit tests. The unverified package, python-apt, is no longer necessary for test execution. Related-Bug: #1760720 Change-Id: Ieb62ac460f605f5599206379b79075704a6fc3e9 --- ceph-mon/tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 365444d3..e5d01d8a 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -12,7 +12,7 @@ setenv = VIRTUAL_ENV={envdir} CHARM_DIR={envdir} AMULET_SETUP_TIMEOUT=5400 install_command = - pip install --allow-unverified python-apt {opts} {packages} + pip install {opts} {packages} commands = ostestr {posargs} whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* From 587d2912331ba66452fc179250f78b4f643f218b Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 2 Apr 2018 20:30:35 +0000 Subject: [PATCH 1467/2699] Update tox.ini to stop using unverified package As of pip 10.0, --allow-unverified is not permitted. Use of the flag in this repo was previously used to force installation of python-apt to accommodate certain unit tests. The unverified package, python-apt, is no longer necessary for test execution. Related-Bug: #1760720 Change-Id: I06e5afd0bfb627b2de64e93e51c182cf881e38d7 --- ceph-osd/tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 365444d3..e5d01d8a 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -12,7 +12,7 @@ setenv = VIRTUAL_ENV={envdir} CHARM_DIR={envdir} AMULET_SETUP_TIMEOUT=5400 install_command = - pip install --allow-unverified python-apt {opts} {packages} + pip install {opts} {packages} commands = ostestr {posargs} whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* From 6df69c8ebba53a060f03437ff61cf467a14462bc Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 2 Apr 2018 20:30:42 +0000 Subject: [PATCH 1468/2699] Update tox.ini to stop using unverified package As of pip 10.0, --allow-unverified is not permitted. Use of the flag in this repo was previously used to force installation of python-apt to accommodate certain unit tests. The unverified package, python-apt, is no longer necessary for test execution. Related-Bug: #1760720 Change-Id: Idfe5ad692a8657eaad7bb9ddc5a6ac33210cd38c --- ceph-proxy/tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index dae53621..43190642 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -11,7 +11,7 @@ setenv = VIRTUAL_ENV={envdir} CHARM_DIR={envdir} AMULET_SETUP_TIMEOUT=5400 install_command = - pip install --allow-unverified python-apt {opts} {packages} + pip install {opts} {packages} commands = ostestr {posargs} whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* From bebf938163173db867f463dbaae798cf1a6c2b82 Mon Sep 17 00:00:00 2001 From: Tilman Baumann Date: Tue, 3 Apr 2018 15:56:43 +0200 Subject: [PATCH 1469/2699] Reload OSD when AppArmor policies are changed When AppArmor profiles are updated, the OSD processes need to be restarted as well to make the new polocy effective. Policy files can change on charm upgrade. Change-Id: Ib0dbcfd11949451e3abc0b2b7477a5f474bae234 Partial-Bug: 1755823 --- ceph-osd/hooks/ceph_hooks.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 3bf94587..2d3bf9f1 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -135,8 +135,23 @@ def tune_network_adapters(): ceph.tune_nic(interface) -@restart_on_change({'/etc/apparmor.d/usr.bin.ceph-osd': ['apparmor']}, - restart_functions={'apparmor': service_reload}) +def aa_profile_changed(service_name='ceph-osd-all'): + """ + Reload AA profie and restart OSD processes. + """ + log("Loading new AppArmor profile") + service_reload('apparmor') + log("Restarting ceph-osd services with new AppArmor profile") + if ceph.systemd(): + for osd_id in ceph.get_local_osd_ids(): + service_restart('ceph-osd@{}'.format(osd_id)) + else: + service_restart(service_name) + + +@restart_on_change({ + '/etc/apparmor.d/usr.bin.ceph-osd': ['ceph-osd-all']}, + restart_functions={'ceph-osd-all': aa_profile_changed}) def copy_profile_into_place(): """ Copy the apparmor profiles included with the charm @@ -175,12 +190,7 @@ def install_apparmor_profile(): if config().changed('aa-profile-mode'): aa_context = CephOsdAppArmorContext() aa_context.setup_aa_profile() - service_reload('apparmor') - if ceph.systemd(): - for osd_id in ceph.get_local_osd_ids(): - service_restart('ceph-osd@{}'.format(osd_id)) - else: - service_restart('ceph-osd-all') + aa_profile_changed() @hooks.hook('install.real') From 067b53558ed7302af2e23981c8bee78f30ae8632 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 23 Mar 2018 13:23:27 +0000 Subject: [PATCH 1470/2699] luminous: ceph-volume switch Switch to using ceph-volume + LVM for managing block devices for Luminous and later; this is the upstream preferred approach to managing OSD devices, allowing for more flexibility in terms of use of crypto and logical volumes. Change-Id: I30c4d29e6f568ac2e30a45b1a7bc0e68685c3707 Depends-On: I1675b67d364ae6042129a8a717d4bdffff5bde92 --- ceph-osd/.pydevproject | 7 +- .../charmhelpers/contrib/hahelpers/apache.py | 3 +- .../charmhelpers/contrib/openstack/context.py | 1 + .../charmhelpers/contrib/openstack/utils.py | 2 +- .../charmhelpers/contrib/storage/linux/lvm.py | 29 ++ ceph-osd/hooks/charmhelpers/core/hookenv.py | 86 +++- .../hooks/charmhelpers/core/services/base.py | 21 +- ceph-osd/lib/ceph/utils.py | 384 +++++++++++++++--- ceph-osd/tests/basic_deployment.py | 5 +- .../contrib/openstack/amulet/deployment.py | 10 +- .../contrib/openstack/amulet/utils.py | 223 ++++++++-- ceph-osd/tests/charmhelpers/core/hookenv.py | 86 +++- .../tests/charmhelpers/core/services/base.py | 21 +- 13 files changed, 745 insertions(+), 133 deletions(-) diff --git a/ceph-osd/.pydevproject b/ceph-osd/.pydevproject index be2105d0..5ed03c7e 100644 --- a/ceph-osd/.pydevproject +++ b/ceph-osd/.pydevproject @@ -3,7 +3,10 @@ python 2.7 Default -/ceph-osd/hooks -/ceph-osd/unit_tests +/${PROJECT_DIR_NAME}/lib +/${PROJECT_DIR_NAME}/unit_tests +/${PROJECT_DIR_NAME}/tests +/${PROJECT_DIR_NAME}/hooks +/${PROJECT_DIR_NAME}/actions diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py index 22acb683..a8527047 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -65,7 +65,8 @@ def get_ca_cert(): if ca_cert is None: log("Inspecting identity-service relations for CA SSL certificate.", level=INFO) - for r_id in relation_ids('identity-service'): + for r_id in (relation_ids('identity-service') + + relation_ids('identity-credentials')): for unit in relation_list(r_id): if ca_cert is None: ca_cert = relation_get('ca_cert', diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 36cf32fc..6c4497b1 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -384,6 +384,7 @@ def __call__(self): # so a missing value just indicates keystone needs # upgrading ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') + ctxt['admin_domain_id'] = rdata.get('service_domain_id') return ctxt return {} diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index b753275d..e7194264 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -182,7 +182,7 @@ ('pike', ['2.13.0', '2.15.0']), ('queens', - ['2.16.0']), + ['2.16.0', '2.17.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py index 79a7a245..c8bde692 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -151,3 +151,32 @@ def extend_logical_volume_by_device(lv_name, block_device): ''' cmd = ['lvextend', lv_name, block_device] check_call(cmd) + + +def create_logical_volume(lv_name, volume_group, size=None): + ''' + Create a new logical volume in an existing volume group + + :param lv_name: str: name of logical volume to be created. + :param volume_group: str: Name of volume group to use for the new volume. + :param size: str: Size of logical volume to create (100% if not supplied) + :raises subprocess.CalledProcessError: in the event that the lvcreate fails. + ''' + if size: + check_call([ + 'lvcreate', + '--yes', + '-L', + '{}'.format(size), + '-n', lv_name, volume_group + ]) + # create the lv with all the space available, this is needed because the + # system call is different for LVM + else: + check_call([ + 'lvcreate', + '--yes', + '-l', + '100%FREE', + '-n', lv_name, volume_group + ]) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 7ed1cc4e..89f10240 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -27,6 +27,7 @@ import os import json import yaml +import re import subprocess import sys import errno @@ -67,7 +68,7 @@ def unit_get(attribute): @wraps(func) def wrapper(*args, **kwargs): global cache - key = str((func, args, kwargs)) + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: @@ -1043,7 +1044,6 @@ def juju_version(): universal_newlines=True).strip() -@cached def has_juju_version(minimum_version): """Return True if the Juju version is at least the provided version""" return LooseVersion(juju_version()) >= LooseVersion(minimum_version) @@ -1103,6 +1103,8 @@ def _run_atexit(): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get_primary_address(binding): ''' + Deprecated since Juju 2.3; use network_get() + Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding @@ -1123,7 +1125,6 @@ def network_get_primary_address(binding): return response -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get(endpoint, relation_id=None): """ Retrieve the network details for a relation endpoint @@ -1131,24 +1132,20 @@ def network_get(endpoint, relation_id=None): :param endpoint: string. The name of a relation endpoint :param relation_id: int. The ID of the relation for the current context. :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if run on Juju < 2.1 + :raise: NotImplementedError if request not supported by the Juju version. """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + cmd = ['network-get', endpoint, '--format', 'yaml'] if relation_id: cmd.append('-r') cmd.append(relation_id) - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - # Early versions of Juju 2.0.x required the --primary-address argument. - # We catch that condition here and raise NotImplementedError since - # the requested semantics are not available - the caller can then - # use the network_get_primary_address() method instead. - if '--primary-address is currently required' in e.output.decode('UTF-8'): - raise NotImplementedError - raise + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() return yaml.safe_load(response) @@ -1204,9 +1201,23 @@ def iter_units_for_relation_name(relation_name): def ingress_address(rid=None, unit=None): """ - Retrieve the ingress-address from a relation when available. Otherwise, - return the private-address. This function is to be used on the consuming - side of the relation. + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. Usage: addresses = [ingress_address(rid=u.rid, unit=u.unit) @@ -1220,3 +1231,40 @@ def ingress_address(rid=None, unit=None): settings = relation_get(rid=rid, unit=unit) return (settings.get('ingress-address') or settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen diff --git a/ceph-osd/hooks/charmhelpers/core/services/base.py b/ceph-osd/hooks/charmhelpers/core/services/base.py index ca9dc996..345b60dc 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/base.py +++ b/ceph-osd/hooks/charmhelpers/core/services/base.py @@ -313,17 +313,26 @@ def __call__(self, manager, service_name, event_name): with open(port_file) as fp: old_ports = fp.read().split(',') for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) with open(port_file, 'w') as fp: fp.write(','.join(str(port) for port in new_ports)) for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' if event_name == 'start': - hookenv.open_port(port) + hookenv.open_port(port, protocol) elif event_name == 'stop': - hookenv.close_port(port) + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports def service_stop(service_name): diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index ea70d955..a7e3f7df 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -15,6 +15,7 @@ import collections import ctypes import errno +import glob import json import os import pyudev @@ -25,6 +26,7 @@ import sys import time import shutil +import uuid from datetime import datetime @@ -73,6 +75,7 @@ from charmhelpers.contrib.openstack.utils import ( get_os_codename_install_source, ) +from charmhelpers.contrib.storage.linux import lvm CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph') OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd') @@ -83,7 +86,8 @@ QUORUM = [LEADER, PEON] PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', - 'radosgw', 'xfsprogs', 'python-pyudev'] + 'radosgw', 'xfsprogs', 'python-pyudev', + 'lvm2', 'parted'] LinkSpeed = { "BASE_10": 10, @@ -1358,10 +1362,17 @@ def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): # admin keys for the cluster; this command # will wait for quorum in the cluster before # returning. - cmd = ['ceph-create-keys', '--id', hostname] + # NOTE(fnordahl): The default timeout in ceph-create-keys of 600 + # seconds is not adequate for all situations. + # LP#1719436 + cmd = ['ceph-create-keys', '--id', hostname, '--timeout', '1800'] subprocess.check_call(cmd) - osstat = os.stat("/etc/ceph/ceph.client.admin.keyring") + _client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' + osstat = os.stat(_client_admin_keyring) if not osstat.st_size: + # NOTE(fnordahl): Retry will fail as long as this file exists. + # LP#1719436 + os.remove(_client_admin_keyring) raise Exception @@ -1399,17 +1410,36 @@ def get_partitions(dev): return [] -def find_least_used_utility_device(utility_devices): +def get_lvs(dev): + """ + List logical volumes for the provided block device + + :param: dev: Full path to block device. + :raises subprocess.CalledProcessError: in the event that any supporting + operation failed. + :returns: list: List of logical volumes provided by the block device + """ + pv_dev = _partition_name(dev) + if not lvm.is_lvm_physical_volume(pv_dev): + return [] + vg_name = lvm.list_lvm_volume_group(pv_dev) + return lvm.list_logical_volumes('vg_name={}'.format(vg_name)) + + +def find_least_used_utility_device(utility_devices, lvs=False): """ Find a utility device which has the smallest number of partitions among other devices in the supplied list. :utility_devices: A list of devices to be used for filestore journal or bluestore wal or db. + :lvs: flag to indicate whether inspection should be based on LVM LV's :return: string device name """ - - usages = map(lambda a: (len(get_partitions(a)), a), utility_devices) + if lvs: + usages = map(lambda a: (len(get_lvs(a)), a), utility_devices) + else: + usages = map(lambda a: (len(get_partitions(a)), a), utility_devices) least = min(usages, key=lambda t: t[0]) return least[1] @@ -1460,49 +1490,28 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, log('Looks like {} is in use, skipping.'.format(dev)) return - status_set('maintenance', 'Initializing device {}'.format(dev)) - cmd = ['ceph-disk', 'prepare'] - # Later versions of ceph support more options - if cmp_pkgrevno('ceph', '0.60') >= 0: - if encrypt: - cmd.append('--dmcrypt') - if cmp_pkgrevno('ceph', '0.48.3') >= 0: - if osd_format and not bluestore: - cmd.append('--fs-type') - cmd.append(osd_format) - - if reformat_osd: - cmd.append('--zap-disk') - - # NOTE(jamespage): enable experimental bluestore support - if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: - cmd.append('--bluestore') - wal = get_devices('bluestore-wal') - if wal: - cmd.append('--block.wal') - least_used_wal = find_least_used_utility_device(wal) - cmd.append(least_used_wal) - db = get_devices('bluestore-db') - if db: - cmd.append('--block.db') - least_used_db = find_least_used_utility_device(db) - cmd.append(least_used_db) - elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: - cmd.append('--filestore') - - cmd.append(dev) - - if osd_journal: - least_used = find_least_used_utility_device(osd_journal) - cmd.append(least_used) + if is_active_bluestore_device(dev): + log('{} is in use as an active bluestore block device,' + ' skipping.'.format(dev)) + return + + if reformat_osd: + zap_disk(dev) + + if cmp_pkgrevno('ceph', '12.2.4') >= 0: + cmd = _ceph_volume(dev, + osd_journal, + encrypt, + bluestore) else: - # Just provide the device - no other options - # for older versions of ceph - cmd.append(dev) - if reformat_osd: - zap_disk(dev) + cmd = _ceph_disk(dev, + osd_format, + osd_journal, + encrypt, + bluestore) try: + status_set('maintenance', 'Initializing device {}'.format(dev)) log("osdize cmd: {}".format(cmd)) subprocess.check_call(cmd) except subprocess.CalledProcessError: @@ -1513,6 +1522,289 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, raise +def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): + """ + Prepare a device for usage as a Ceph OSD using ceph-disk + + :param: dev: Full path to use for OSD block device setup + :param: osd_journal: List of block devices to use for OSD journals + :param: encrypt: Use block device encryption (unsupported) + :param: bluestore: Use bluestore storage for OSD + :returns: list. 'ceph-disk' command and required parameters for + execution by check_call + """ + cmd = ['ceph-disk', 'prepare'] + + if encrypt: + cmd.append('--dmcrypt') + + if osd_format and not bluestore: + cmd.append('--fs-type') + cmd.append(osd_format) + + # NOTE(jamespage): enable experimental bluestore support + if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: + cmd.append('--bluestore') + wal = get_devices('bluestore-wal') + if wal: + cmd.append('--block.wal') + least_used_wal = find_least_used_utility_device(wal) + cmd.append(least_used_wal) + db = get_devices('bluestore-db') + if db: + cmd.append('--block.db') + least_used_db = find_least_used_utility_device(db) + cmd.append(least_used_db) + elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: + cmd.append('--filestore') + + cmd.append(dev) + + if osd_journal: + least_used = find_least_used_utility_device(osd_journal) + cmd.append(least_used) + + return cmd + + +def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False): + """ + Prepare and activate a device for usage as a Ceph OSD using ceph-volume. + + This also includes creation of all PV's, VG's and LV's required to + support the initialization of the OSD. + + :param: dev: Full path to use for OSD block device setup + :param: osd_journal: List of block devices to use for OSD journals + :param: encrypt: Use block device encryption + :param: bluestore: Use bluestore storage for OSD + :raises subprocess.CalledProcessError: in the event that any supporting + LVM operation failed. + :returns: list. 'ceph-volume' command and required parameters for + execution by check_call + """ + cmd = ['ceph-volume', 'lvm', 'create'] + + osd_fsid = str(uuid.uuid4()) + cmd.append('--osd-fsid') + cmd.append(osd_fsid) + + if bluestore: + cmd.append('--bluestore') + main_device_type = 'block' + else: + cmd.append('--filestore') + main_device_type = 'data' + + if encrypt: + cmd.append('--dmcrypt') + + # On-disk journal volume creation + if not osd_journal and not bluestore: + journal_lv_type = 'journal' + cmd.append('--journal') + cmd.append(_allocate_logical_volume( + dev, + journal_lv_type, + osd_fsid, + size='{}M'.format(calculate_volume_size('journal'))) + ) + + cmd.append('--data') + cmd.append(_allocate_logical_volume(dev, + main_device_type, + osd_fsid)) + + if bluestore: + for extra_volume in ('wal', 'db'): + devices = get_devices('bluestore-{}'.format(extra_volume)) + if devices: + cmd.append('--block.{}'.format(extra_volume)) + least_used = find_least_used_utility_device(devices, + lvs=True) + cmd.append(_allocate_logical_volume( + least_used, + extra_volume, + osd_fsid, + size='{}M'.format(calculate_volume_size(extra_volume)), + shared=True) + ) + + elif osd_journal: + cmd.append('--journal') + least_used = find_least_used_utility_device(osd_journal, + lvs=True) + cmd.append(_allocate_logical_volume( + least_used, + 'journal', + osd_fsid, + size='{}M'.format(calculate_volume_size('journal')), + shared=True) + ) + + return cmd + + +def _partition_name(dev): + """ + Derive the first partition name for a block device + + :param: dev: Full path to block device. + :returns: str: Full path to first partition on block device. + """ + if dev[-1].isdigit(): + return '{}p1'.format(dev) + else: + return '{}1'.format(dev) + + +# TODO(jamespage): Deal with lockbox encrypted bluestore devices. +def is_active_bluestore_device(dev): + """ + Determine whether provided device is part of an active + bluestore based OSD (as its block component). + + :param: dev: Full path to block device to check for Bluestore usage. + :returns: boolean: indicating whether device is in active use. + """ + pv_dev = _partition_name(dev) + if not lvm.is_lvm_physical_volume(pv_dev): + return False + + vg_name = lvm.list_lvm_volume_group(pv_dev) + lv_name = lvm.list_logical_volumes('vg_name={}'.format(vg_name))[0] + + block_symlinks = glob.glob('/var/lib/ceph/osd/ceph-*/block') + for block_candidate in block_symlinks: + if os.path.islink(block_candidate): + target = os.readlink(block_candidate) + if target.endswith(lv_name): + return True + + return False + + +def get_conf(variable): + """ + Get the value of the given configuration variable from the + cluster. + + :param variable: ceph configuration variable + :returns: str. configured value for provided variable + + """ + return subprocess.check_output([ + 'ceph-osd', + '--show-config-value={}'.format(variable), + ]).strip() + + +def calculate_volume_size(lv_type): + """ + Determine the configured size for Bluestore DB/WAL or + Filestore Journal devices + + :param lv_type: volume type (db, wal or journal) + :raises KeyError: if invalid lv_type is supplied + :returns: int. Configured size in megabytes for volume type + """ + # lv_type -> ceph configuration option + _config_map = { + 'db': 'bluestore_block_db_size', + 'wal': 'bluestore_block_wal_size', + 'journal': 'osd_journal_size', + } + + # default sizes in MB + _default_size = { + 'db': 1024, + 'wal': 576, + 'journal': 1024, + } + + # conversion of ceph config units to MB + _units = { + 'db': 1048576, # Bytes -> MB + 'wal': 1048576, # Bytes -> MB + 'journal': 1, # Already in MB + } + + configured_size = get_conf(_config_map[lv_type]) + + if configured_size is None or int(configured_size) == 0: + return _default_size[lv_type] + else: + return int(configured_size) / _units[lv_type] + + +def _initialize_disk(dev): + """ + Initialize a raw block device with a single paritition + consuming 100% of the avaliable disk space. + + Function assumes that block device has already been wiped. + + :param: dev: path to block device to initialize + :raises: subprocess.CalledProcessError: if any parted calls fail + :returns: str: Full path to new partition. + """ + partition = _partition_name(dev) + if not os.path.exists(partition): + subprocess.check_call([ + 'parted', '--script', + dev, + 'mklabel', + 'gpt', + ]) + subprocess.check_call([ + 'parted', '--script', + dev, + 'mkpart', + 'primary', '1', '100%', + ]) + return partition + + +def _allocate_logical_volume(dev, lv_type, osd_fsid, + size=None, shared=False): + """ + Allocate a logical volume from a block device, ensuring any + required initialization and setup of PV's and VG's to support + the LV. + + :param: dev: path to block device to allocate from. + :param: lv_type: logical volume type to create + (data, block, journal, wal, db) + :param: osd_fsid: UUID of the OSD associate with the LV + :param: size: Size in LVM format for the device; + if unset 100% of VG + :param: shared: Shared volume group (journal, wal, db) + :raises subprocess.CalledProcessError: in the event that any supporting + LVM or parted operation fails. + :returns: str: String in the format 'vg_name/lv_name'. + """ + lv_name = "osd-{}-{}".format(lv_type, osd_fsid) + current_volumes = lvm.list_logical_volumes() + pv_dev = _initialize_disk(dev) + + vg_name = None + if not lvm.is_lvm_physical_volume(pv_dev): + lvm.create_lvm_physical_volume(pv_dev) + if shared: + vg_name = 'ceph-{}-{}'.format(lv_type, + str(uuid.uuid4())) + else: + vg_name = 'ceph-{}'.format(osd_fsid) + lvm.create_lvm_volume_group(vg_name, pv_dev) + else: + vg_name = lvm.list_lvm_volume_group(pv_dev) + + if lv_name not in current_volumes: + lvm.create_logical_volume(lv_name, vg_name, size) + + return "{}/{}".format(vg_name, lv_name) + + def osdize_dir(path, encrypt=False, bluestore=False): """Ask ceph-disk to prepare a directory to become an osd. diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 8850da5e..80721c01 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -152,10 +152,7 @@ def _initialize_tests(self): tenant='admin') # Authenticate admin with cinder endpoint - self.cinder = u.authenticate_cinder_admin(self.keystone_sentry, - username='admin', - password='openstack', - tenant='admin') + self.cinder = u.authenticate_cinder_admin(self.keystone) # Authenticate admin with glance endpoint self.glance = u.authenticate_glance_admin(self.keystone) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 5afbbd87..66beeda2 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -21,6 +21,9 @@ from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -271,11 +274,8 @@ def _get_openstack_release(self): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, - self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike, self.xenial_queens, - self.bionic_queens,) = range(13) + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) releases = { ('trusty', None): self.trusty_icehouse, diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index d93cff3c..a60f8fb0 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -50,6 +50,13 @@ NOVA_CLIENT_VERSION = "2" +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', + 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', 'xenial_queens', + 'bionic_queens'] + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -63,7 +70,34 @@ def __init__(self, log_level=ERROR): super(OpenStackAmuletUtils, self).__init__(log_level) def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): """Validate endpoint data. Validate actual endpoint data vs expected endpoint data. The ports @@ -141,7 +175,86 @@ def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' - def validate_svc_catalog_endpoint_data(self, expected, actual): + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. Validate a list of actual service catalog endpoints vs a list of @@ -350,16 +463,13 @@ def keystone_configure_api_version(self, sentry_relation_pairs, deployment, deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant, api_version=2): + def authenticate_cinder_admin(self, keystone, api_version=2): """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - keystone_ip = keystone_sentry.info['public-address'] - ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) + self.log.debug('Authenticating cinder admin...') _clients = { 1: cinder_client.Client, 2: cinder_clientv2.Client} - return _clients[api_version](username, password, tenant, ept) + return _clients[api_version](session=keystone.session) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -367,13 +477,36 @@ def authenticate_keystone(self, keystone_ip, username, password, project_domain_name=None, project_name=None): """Authenticate with Keystone""" self.log.debug('Authenticating with keystone...') - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: auth = v2.Password( username=username, password=password, @@ -381,12 +514,7 @@ def authenticate_keystone(self, keystone_ip, username, password, auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client else: - ep = base_ep + "/v3" auth = v3.Password( user_domain_name=user_domain_name, username=username, @@ -397,10 +525,57 @@ def authenticate_keystone(self, keystone_ip, username, password, auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + api_version = 2 + client_class = keystone_client.Client + # 11 => xenial_queens + if openstack_release and openstack_release >= 11: + api_version = 3 + client_class = keystone_client_v3.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, diff --git a/ceph-osd/tests/charmhelpers/core/hookenv.py b/ceph-osd/tests/charmhelpers/core/hookenv.py index 7ed1cc4e..89f10240 100644 --- a/ceph-osd/tests/charmhelpers/core/hookenv.py +++ b/ceph-osd/tests/charmhelpers/core/hookenv.py @@ -27,6 +27,7 @@ import os import json import yaml +import re import subprocess import sys import errno @@ -67,7 +68,7 @@ def unit_get(attribute): @wraps(func) def wrapper(*args, **kwargs): global cache - key = str((func, args, kwargs)) + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: @@ -1043,7 +1044,6 @@ def juju_version(): universal_newlines=True).strip() -@cached def has_juju_version(minimum_version): """Return True if the Juju version is at least the provided version""" return LooseVersion(juju_version()) >= LooseVersion(minimum_version) @@ -1103,6 +1103,8 @@ def _run_atexit(): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get_primary_address(binding): ''' + Deprecated since Juju 2.3; use network_get() + Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding @@ -1123,7 +1125,6 @@ def network_get_primary_address(binding): return response -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get(endpoint, relation_id=None): """ Retrieve the network details for a relation endpoint @@ -1131,24 +1132,20 @@ def network_get(endpoint, relation_id=None): :param endpoint: string. The name of a relation endpoint :param relation_id: int. The ID of the relation for the current context. :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if run on Juju < 2.1 + :raise: NotImplementedError if request not supported by the Juju version. """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + cmd = ['network-get', endpoint, '--format', 'yaml'] if relation_id: cmd.append('-r') cmd.append(relation_id) - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - # Early versions of Juju 2.0.x required the --primary-address argument. - # We catch that condition here and raise NotImplementedError since - # the requested semantics are not available - the caller can then - # use the network_get_primary_address() method instead. - if '--primary-address is currently required' in e.output.decode('UTF-8'): - raise NotImplementedError - raise + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() return yaml.safe_load(response) @@ -1204,9 +1201,23 @@ def iter_units_for_relation_name(relation_name): def ingress_address(rid=None, unit=None): """ - Retrieve the ingress-address from a relation when available. Otherwise, - return the private-address. This function is to be used on the consuming - side of the relation. + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. Usage: addresses = [ingress_address(rid=u.rid, unit=u.unit) @@ -1220,3 +1231,40 @@ def ingress_address(rid=None, unit=None): settings = relation_get(rid=rid, unit=unit) return (settings.get('ingress-address') or settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen diff --git a/ceph-osd/tests/charmhelpers/core/services/base.py b/ceph-osd/tests/charmhelpers/core/services/base.py index ca9dc996..345b60dc 100644 --- a/ceph-osd/tests/charmhelpers/core/services/base.py +++ b/ceph-osd/tests/charmhelpers/core/services/base.py @@ -313,17 +313,26 @@ def __call__(self, manager, service_name, event_name): with open(port_file) as fp: old_ports = fp.read().split(',') for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) with open(port_file, 'w') as fp: fp.write(','.join(str(port) for port in new_ports)) for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' if event_name == 'start': - hookenv.open_port(port) + hookenv.open_port(port, protocol) elif event_name == 'stop': - hookenv.close_port(port) + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports def service_stop(service_name): From e678fc777acbef1a544be1d3e47a492f8c8c4828 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 9 Apr 2018 17:22:00 +0100 Subject: [PATCH 1471/2699] Improve idempotency of block device processing Resync charms.ceph to pickup improvements in recording of block devices that have been processed as OSD devices to support better idempotency of block device processing codepaths. This fixes a particularly nasty issue with osd-reformat is set to True where the charm can wipe and re-prepare an OSD device prior to the systemd unit actually booting and mounting the OSD's associated filesystem. This change also makes the osd-reformat option a boolean option which is more accessible to users of the charm via the CLI and the Juju GUI. Change-Id: I578203aeebf6da2efc21a10d2e157324186e2a66 Depends-On: I2c6e9d5670c8d1d70584ae19b34eaf16be5dea19 --- ceph-osd/config.yaml | 9 +++++---- ceph-osd/lib/ceph/utils.py | 24 ++++++++++++++++++++++++ ceph-osd/tests/basic_deployment.py | 2 +- 3 files changed, 30 insertions(+), 5 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index f236a70d..b15c261f 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -128,15 +128,16 @@ options: Note that despite bluestore being the default for Ceph Luminous, if this option is False, OSDs will still use filestore. osd-reformat: - type: string - default: + type: boolean + default: False description: | By default, the charm will not re-format a device that already looks as if it might be an OSD device. This is a safeguard to try to prevent data loss. . - Specifying this option (any value) forces a reformat of any OSD devices - found which are not already mounted. + Enabling this option forces a reformat of any OSD devices found which + have not been processed by the unit previously or are not already + mounted. osd-encrypt: type: boolean default: False diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index a7e3f7df..8e0e3c2b 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -76,6 +76,7 @@ get_os_codename_install_source, ) from charmhelpers.contrib.storage.linux import lvm +from charmhelpers.core.unitdata import kv CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph') OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd') @@ -1473,6 +1474,13 @@ def osdize(dev, osd_format, osd_journal, reformat_osd=False, def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, ignore_errors=False, encrypt=False, bluestore=False): + db = kv() + osd_devices = db.get('osd-devices', []) + if dev in osd_devices: + log('Device {} already processed by charm,' + ' skipping'.format(dev)) + return + if not os.path.exists(dev): log('Path {} does not exist - bailing'.format(dev)) return @@ -1515,12 +1523,28 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, log("osdize cmd: {}".format(cmd)) subprocess.check_call(cmd) except subprocess.CalledProcessError: + try: + lsblk_output = subprocess.check_output( + ['lsblk', '-P']).decode('UTF-8') + except subprocess.CalledProcessError as e: + log("Couldn't get lsblk output: {}".format(e), ERROR) if ignore_errors: log('Unable to initialize device: {}'.format(dev), WARNING) + if lsblk_output: + log('lsblk output: {}'.format(lsblk_output), DEBUG) else: log('Unable to initialize device: {}'.format(dev), ERROR) + if lsblk_output: + log('lsblk output: {}'.format(lsblk_output), WARNING) raise + # NOTE: Record processing of device only on success to ensure that + # the charm only tries to initialize a device of OSD usage + # once during its lifetime. + osd_devices.append(dev) + db.set('osd-devices', osd_devices) + db.flush() + def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): """ diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 80721c01..5dcaa318 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -113,7 +113,7 @@ def _configure_services(self): # Include a non-existent device as osd-devices is a whitelist, # and this will catch cases where proposals attempt to change that. ceph_osd_config = { - 'osd-reformat': 'yes', + 'osd-reformat': True, 'ephemeral-unmount': '/mnt', 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' } From 2336e0123a44b66ad875fb1400985ff32bb08e0b Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Tue, 29 Aug 2017 11:55:11 +0200 Subject: [PATCH 1472/2699] Add formatting param for disk usage Pass formatting option to ceph for easier postprocessing. Constrain to values accepted by ceph, default to plain format. Change-Id: I0bb8cfcd8d8962988e786d2d083b1489161b2ff6 Closes-Bug: #1724880 --- ceph-mon/actions.yaml | 6 ++++++ ceph-mon/actions/show-disk-free.py | 6 ++++-- ceph-mon/tests/basic_deployment.py | 24 +++++++++++++++++++++++- 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 2d2f1729..84259dbd 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -230,6 +230,12 @@ crushmap-update: additionalProperties: false show-disk-free: description: Show disk utilization by host and OSD. + params: + format: + type: string + enum: [json, json-pretty, xml, xml-pretty, plain] + default: "plain" + description: Output format, either json, json-pretty, xml, xml-pretty, plain; defaults to plain additionalProperties: false set-noout: description: Set ceph noout across the cluster. diff --git a/ceph-mon/actions/show-disk-free.py b/ceph-mon/actions/show-disk-free.py index 2ba7894f..1f38f094 100755 --- a/ceph-mon/actions/show-disk-free.py +++ b/ceph-mon/actions/show-disk-free.py @@ -18,12 +18,14 @@ sys.path.append('hooks') from subprocess import check_output, CalledProcessError -from charmhelpers.core.hookenv import log, action_set, action_fail +from charmhelpers.core.hookenv import log, action_get, action_set, action_fail if __name__ == '__main__': + # constrained to enum: json,json-pretty,xml,xml-pretty,plain + fmt = action_get("format") try: out = check_output(['ceph', '--id', 'admin', - 'osd', 'df', 'tree']).decode('UTF-8') + 'osd', 'df', 'tree', '-f', fmt]).decode('UTF-8') action_set({'message': out}) except CalledProcessError as e: log(e) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 2aeaa2a8..a15d7b01 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -17,6 +17,7 @@ import amulet import re import time +import json from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment @@ -117,7 +118,7 @@ def _configure_services(self): # Include a non-existent device as osd-devices is a whitelist, # and this will catch cases where proposals attempt to change that. ceph_osd_config = { - 'osd-reformat': 'yes', + 'osd-reformat': True, 'ephemeral-unmount': '/mnt', 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' } @@ -716,6 +717,27 @@ def test_414_get_health_action(self): action_id = u.run_action(sentry_unit, 'get-health') assert u.wait_on_action(action_id), "HEALTH_OK" + def test_420_show_disk_free_action(self): + """Verify show-disk-free""" + u.log.debug("Testing show-disk-free") + if self._get_openstack_release() < self.trusty_kilo: + u.log.info( + "show-disk-free only supported in >=kilo, skipping") + return + sentry_unit = self.ceph0_sentry + action_id = u.run_action(sentry_unit, + 'show-disk-free', + params={'format': 'json'}) + assert u.wait_on_action(action_id), "Show-disk-free action failed." + data = amulet.actions.get_action_output(action_id, full_output=True) + assert data.get(u"status") == "completed", "Show-disk-free failed" + message = data.get(u"results").get(u"message") + assert message is not None + jsonout = json.loads(message.strip()) + nodes = jsonout.get(u"nodes") + assert nodes is not None, "Show-disk-free: no 'nodes' elem" + assert len(nodes) > 0, "Show-disk-free action: 0 nodes" + def test_499_ceph_cmds_exit_zero(self): """Check basic functionality of ceph cli commands against all ceph units.""" From 30d296afebf49c14235dc17a2f2fbb4d683c8c1f Mon Sep 17 00:00:00 2001 From: Sandor Zeestraten Date: Sun, 15 Apr 2018 13:13:22 +0200 Subject: [PATCH 1473/2699] Render crush-initial-weight option if set to 0 Fixes the conditional in the ceph.conf template so it renders the crush-initial-weight config option if set to 0. Change-Id: Iaecbdf52bd3731effa3132e61364918407116dbe Closes-Bug: 1764077 --- ceph-osd/templates/ceph.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 9f2c8144..eb1c3a24 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -33,7 +33,7 @@ crush location = {{crush_location}} {%- if upgrade_in_progress %} setuser match path = /var/lib/ceph/$type/$cluster-$id {%- endif %} -{%- if crush_initial_weight %} +{%- if crush_initial_weight is not none %} osd crush initial weight = {{ crush_initial_weight }} {%- endif %} {% if global -%} From 6aa868ebf4ace41b255ec1101df995783b627ef5 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 25 Apr 2018 10:53:37 +0100 Subject: [PATCH 1474/2699] Ensure initial apparmor mode set Due to changes in the hookenv.config charmhelper, the value of aa-profile-mode does not change between the install and config-changed hooks. This results in the ceph-osd apparmor profile always being enabled by default (rather than being disabled). Ensure that an apparmor enforcement mode is correctly set whenever a new profile is installed - this could either be on first install, or if a new profile is added to the charm. Change-Id: I131c9a871ad970b58fa6f41575c240081f653a21 --- ceph-osd/hooks/ceph_hooks.py | 9 +++++-- ceph-osd/unit_tests/test_ceph_hooks.py | 36 ++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 3bf94587..c984c26e 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -142,12 +142,17 @@ def copy_profile_into_place(): Copy the apparmor profiles included with the charm into the /etc/apparmor.d directory. """ + new_install = False apparmor_dir = os.path.join(os.sep, 'etc', 'apparmor.d') for x in glob.glob('files/apparmor/*'): + if not os.path.exists(os.path.join(apparmor_dir, + os.path.basename(x))): + new_install = True shutil.copy(x, apparmor_dir) + return new_install class CephOsdAppArmorContext(AppArmorContext): @@ -171,8 +176,8 @@ def install_apparmor_profile(): configuration option. """ log('Installing apparmor profile for ceph-osd') - copy_profile_into_place() - if config().changed('aa-profile-mode'): + new_install = copy_profile_into_place() + if new_install or config().changed('aa-profile-mode'): aa_context = CephOsdAppArmorContext() aa_context.setup_aa_profile() service_reload('apparmor') diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 09d8f2d3..3079df04 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -317,6 +317,7 @@ def test_install_apparmor_profile(self, mock_config, m_aa_context = MagicMock() mock_apparmor_context.return_value = m_aa_context mock_ceph.systemd.return_value = False + mock_copy_profile_into_place.return_value = False ceph_hooks.install_apparmor_profile() @@ -346,6 +347,7 @@ def test_install_apparmor_profile_systemd(self, mock_config, mock_apparmor_context.return_value = m_aa_context mock_ceph.systemd.return_value = True mock_ceph.get_local_osd_ids.return_value = [0, 1, 2] + mock_copy_profile_into_place.return_value = False ceph_hooks.install_apparmor_profile() @@ -359,6 +361,40 @@ def test_install_apparmor_profile_systemd(self, mock_config, call('ceph-osd@2'), ]) + @patch.object(ceph_hooks, 'ceph') + @patch.object(ceph_hooks, 'service_restart') + @patch.object(ceph_hooks, 'service_reload') + @patch.object(ceph_hooks, 'copy_profile_into_place') + @patch.object(ceph_hooks, 'CephOsdAppArmorContext') + @patch.object(ceph_hooks, 'config') + def test_install_apparmor_profile_new_install(self, mock_config, + mock_apparmor_context, + mock_copy_profile_into_place, + mock_service_reload, + mock_service_restart, + mock_ceph): + '''Apparmor profile always reloaded on fresh install''' + m_config = MagicMock() + m_config.changed.return_value = True + mock_config.return_value = m_config + m_aa_context = MagicMock() + mock_apparmor_context.return_value = m_aa_context + mock_ceph.systemd.return_value = True + mock_ceph.get_local_osd_ids.return_value = [0, 1, 2] + mock_copy_profile_into_place.return_value = True + + ceph_hooks.install_apparmor_profile() + + m_aa_context.setup_aa_profile.assert_called() + mock_copy_profile_into_place.assert_called() + m_config.changed.assert_not_called() + mock_service_reload.assert_called_with('apparmor') + mock_service_restart.assert_has_calls([ + call('ceph-osd@0'), + call('ceph-osd@1'), + call('ceph-osd@2'), + ]) + @patch.object(ceph_hooks, 'storage_list') @patch.object(ceph_hooks, 'config') def test_get_devices(self, mock_config, mock_storage_list): From d9a83c6ebb8738d6101d01d0bec399392789415e Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 26 Apr 2018 13:13:29 +0200 Subject: [PATCH 1475/2699] Add broker support for passing app_name In Ceph >= Luminous, application name needs to be set on a per-pool level to avoid health warnings. This change adds support for sending the application name over the broker channel from consuming charms. When a name is not sent from the other side of the relation, the application name will be set to "unknown" in Luminous and greater Change-Id: I1109251b08da20adaf3d677c38fc1aacfba29439 Closes-Bug: #1753640 Depends-On: I99ae47b6802f50ea019751ffa328f11567cca567 --- .../contrib/openstack/amulet/deployment.py | 10 +- .../contrib/openstack/amulet/utils.py | 225 ++++++++++++++++-- .../charmhelpers/contrib/openstack/context.py | 10 +- .../charmhelpers/contrib/openstack/utils.py | 2 +- .../contrib/openstack/vaultlocker.py | 84 +++++++ .../contrib/storage/linux/ceph.py | 43 +++- .../charmhelpers/contrib/storage/linux/lvm.py | 29 +++ ceph-mon/hooks/charmhelpers/core/hookenv.py | 130 +++++++--- ceph-mon/hooks/charmhelpers/core/host.py | 11 +- .../hooks/charmhelpers/core/services/base.py | 25 +- ceph-mon/hooks/charmhelpers/core/unitdata.py | 9 +- ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 1 + ceph-mon/lib/ceph/broker.py | 7 +- ceph-mon/tests/basic_deployment.py | 5 +- .../contrib/openstack/amulet/deployment.py | 10 +- .../contrib/openstack/amulet/utils.py | 225 ++++++++++++++++-- ceph-mon/tests/charmhelpers/core/hookenv.py | 130 +++++++--- ceph-mon/tests/charmhelpers/core/host.py | 11 +- .../tests/charmhelpers/core/services/base.py | 25 +- ceph-mon/tests/charmhelpers/core/unitdata.py | 9 +- 20 files changed, 843 insertions(+), 158 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 5afbbd87..66beeda2 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -21,6 +21,9 @@ from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -271,11 +274,8 @@ def _get_openstack_release(self): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, - self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike, self.xenial_queens, - self.bionic_queens,) = range(13) + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) releases = { ('trusty', None): self.trusty_icehouse, diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index d93cff3c..84e87f5d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -50,6 +50,13 @@ NOVA_CLIENT_VERSION = "2" +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', + 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', 'xenial_queens', + 'bionic_queens'] + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -63,7 +70,34 @@ def __init__(self, log_level=ERROR): super(OpenStackAmuletUtils, self).__init__(log_level) def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): """Validate endpoint data. Validate actual endpoint data vs expected endpoint data. The ports @@ -141,7 +175,86 @@ def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' - def validate_svc_catalog_endpoint_data(self, expected, actual): + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. Validate a list of actual service catalog endpoints vs a list of @@ -328,7 +441,7 @@ def keystone_wait_for_propagation(self, sentry_relation_pairs, if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." - "".format(rel['api_version'], api_version)) + "".format(rel.get('api_version'), api_version)) def keystone_configure_api_version(self, sentry_relation_pairs, deployment, api_version): @@ -350,16 +463,13 @@ def keystone_configure_api_version(self, sentry_relation_pairs, deployment, deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant, api_version=2): + def authenticate_cinder_admin(self, keystone, api_version=2): """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - keystone_ip = keystone_sentry.info['public-address'] - ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) + self.log.debug('Authenticating cinder admin...') _clients = { 1: cinder_client.Client, 2: cinder_clientv2.Client} - return _clients[api_version](username, password, tenant, ept) + return _clients[api_version](session=keystone.session) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -367,13 +477,36 @@ def authenticate_keystone(self, keystone_ip, username, password, project_domain_name=None, project_name=None): """Authenticate with Keystone""" self.log.debug('Authenticating with keystone...') - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: auth = v2.Password( username=username, password=password, @@ -381,12 +514,7 @@ def authenticate_keystone(self, keystone_ip, username, password, auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client else: - ep = base_ep + "/v3" auth = v3.Password( user_domain_name=user_domain_name, username=username, @@ -397,10 +525,57 @@ def authenticate_keystone(self, keystone_ip, username, password, auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + api_version = 2 + client_class = keystone_client.Client + # 11 => xenial_queens + if openstack_release and openstack_release >= 11: + api_version = 3 + client_class = keystone_client_v3.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 36cf32fc..2d91f0a7 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -384,6 +384,7 @@ def __call__(self): # so a missing value just indicates keystone needs # upgrading ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') + ctxt['admin_domain_id'] = rdata.get('service_domain_id') return ctxt return {} @@ -796,9 +797,9 @@ def configure_cert(self, cn=None): key_filename = 'key' write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert)) + content=b64decode(cert), perms=0o640) write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key)) + content=b64decode(key), perms=0o640) def configure_ca(self): ca_cert = get_ca_cert() @@ -1872,10 +1873,11 @@ class EnsureDirContext(OSContextGenerator): context is needed to do that before rendering a template. ''' - def __init__(self, dirname): + def __init__(self, dirname, **kwargs): '''Used merely to ensure that a given directory exists.''' self.dirname = dirname + self.kwargs = kwargs def __call__(self): - mkdir(self.dirname) + mkdir(self.dirname, **self.kwargs) return {} diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index b753275d..e7194264 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -182,7 +182,7 @@ ('pike', ['2.13.0', '2.15.0']), ('queens', - ['2.16.0']), + ['2.16.0', '2.17.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py new file mode 100644 index 00000000..0b78e7a4 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -0,0 +1,84 @@ +# Copyright 2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import charmhelpers.contrib.openstack.alternatives as alternatives +import charmhelpers.contrib.openstack.context as context + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host +import charmhelpers.core.templating as templating + + +class VaultKVContext(context.OSContextGenerator): + """Vault KV context for interaction with vault-kv interfaces""" + interfaces = ['secrets-storage'] + + def __init__(self, secret_backend=None): + super(context.OSContextGenerator, self).__init__() + self.secret_backend = ( + secret_backend or 'charm-{}'.format(hookenv.service_name()) + ) + + def __call__(self): + for relation_id in hookenv.relation_ids(self.interfaces[0]): + for unit in hookenv.related_units(relation_id): + vault_url = hookenv.relation_get( + 'vault_url', + unit=unit, + rid=relation_id + ) + role_id = hookenv.relation_get( + '{}_role_id'.format(hookenv.local_unit()), + unit=unit, + rid=relation_id + ) + + if vault_url and role_id: + ctxt = { + 'vault_url': json.loads(vault_url), + 'role_id': json.loads(role_id), + 'secret_backend': self.secret_backend, + } + vault_ca = hookenv.relation_get( + 'vault_ca', + unit=unit, + rid=relation_id + ) + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + self.complete = True + return ctxt + return {} + + +def write_vaultlocker_conf(context, priority=100): + """Write vaultlocker configuration to disk and install alternative + + :param context: Dict of data from vault-kv relation + :ptype: context: dict + :param priority: Priority of alternative configuration + :ptype: priority: int""" + charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format( + hookenv.service_name() + ) + host.mkdir(os.path.dirname(charm_vl_path), perms=0o700) + templating.render(source='vaultlocker.conf.j2', + target=charm_vl_path, + context=context, perms=0o600), + alternatives.install_alternative('vaultlocker.conf', + '/etc/vaultlocker/vaultlocker.conf', + charm_vl_path, priority) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index e13e60a6..76828201 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -291,7 +291,7 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): class ReplicatedPool(Pool): def __init__(self, service, name, pg_num=None, replicas=2, - percent_data=10.0): + percent_data=10.0, app_name=None): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas if pg_num: @@ -301,6 +301,10 @@ def __init__(self, service, name, pg_num=None, replicas=2, self.pg_num = min(pg_num, max_pgs) else: self.pg_num = self.get_pgs(self.replicas, percent_data) + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' def create(self): if not pool_exists(self.service, self.name): @@ -313,6 +317,12 @@ def create(self): update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) except CalledProcessError: raise @@ -320,10 +330,14 @@ def create(self): # Default jerasure erasure coded pool class ErasurePool(Pool): def __init__(self, service, name, erasure_code_profile="default", - percent_data=10.0): + percent_data=10.0, app_name=None): super(ErasurePool, self).__init__(service=service, name=name) self.erasure_code_profile = erasure_code_profile self.percent_data = percent_data + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' def create(self): if not pool_exists(self.service, self.name): @@ -355,6 +369,12 @@ def create(self): 'erasure', self.erasure_code_profile] try: check_call(cmd) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) except CalledProcessError: raise @@ -778,6 +798,25 @@ def update_pool(client, pool, settings): check_call(cmd) +def set_app_name_for_pool(client, pool, name): + """ + Calls `osd pool application enable` for the specified pool name + + :param client: Name of the ceph client to use + :type client: str + :param pool: Pool to set app name for + :type pool: str + :param name: app name for the specified pool + :type name: str + + :raises: CalledProcessError if ceph call fails + """ + if ceph_version() >= '12.0.0': + cmd = ['ceph', '--id', client, 'osd', 'pool', + 'application', 'enable', pool, name] + check_call(cmd) + + def create_pool(service, name, replicas=3, pg_num=None): """Create a new RADOS pool.""" if pool_exists(service, name): diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py index 79a7a245..c8bde692 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -151,3 +151,32 @@ def extend_logical_volume_by_device(lv_name, block_device): ''' cmd = ['lvextend', lv_name, block_device] check_call(cmd) + + +def create_logical_volume(lv_name, volume_group, size=None): + ''' + Create a new logical volume in an existing volume group + + :param lv_name: str: name of logical volume to be created. + :param volume_group: str: Name of volume group to use for the new volume. + :param size: str: Size of logical volume to create (100% if not supplied) + :raises subprocess.CalledProcessError: in the event that the lvcreate fails. + ''' + if size: + check_call([ + 'lvcreate', + '--yes', + '-L', + '{}'.format(size), + '-n', lv_name, volume_group + ]) + # create the lv with all the space available, this is needed because the + # system call is different for LVM + else: + check_call([ + 'lvcreate', + '--yes', + '-l', + '100%FREE', + '-n', lv_name, volume_group + ]) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 7ed1cc4e..e9df1509 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -27,6 +27,7 @@ import os import json import yaml +import re import subprocess import sys import errno @@ -67,7 +68,7 @@ def unit_get(attribute): @wraps(func) def wrapper(*args, **kwargs): global cache - key = str((func, args, kwargs)) + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: @@ -353,22 +354,40 @@ def _implicit_save(self): self.save() -@cached +_cache_config = None + + def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] + try: + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) if scope is not None: - return config_data - return Config(config_data) - except ValueError: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) return None @@ -1043,7 +1062,6 @@ def juju_version(): universal_newlines=True).strip() -@cached def has_juju_version(minimum_version): """Return True if the Juju version is at least the provided version""" return LooseVersion(juju_version()) >= LooseVersion(minimum_version) @@ -1103,6 +1121,8 @@ def _run_atexit(): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get_primary_address(binding): ''' + Deprecated since Juju 2.3; use network_get() + Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding @@ -1123,7 +1143,6 @@ def network_get_primary_address(binding): return response -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get(endpoint, relation_id=None): """ Retrieve the network details for a relation endpoint @@ -1131,24 +1150,20 @@ def network_get(endpoint, relation_id=None): :param endpoint: string. The name of a relation endpoint :param relation_id: int. The ID of the relation for the current context. :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if run on Juju < 2.1 + :raise: NotImplementedError if request not supported by the Juju version. """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + cmd = ['network-get', endpoint, '--format', 'yaml'] if relation_id: cmd.append('-r') cmd.append(relation_id) - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - # Early versions of Juju 2.0.x required the --primary-address argument. - # We catch that condition here and raise NotImplementedError since - # the requested semantics are not available - the caller can then - # use the network_get_primary_address() method instead. - if '--primary-address is currently required' in e.output.decode('UTF-8'): - raise NotImplementedError - raise + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() return yaml.safe_load(response) @@ -1204,9 +1219,23 @@ def iter_units_for_relation_name(relation_name): def ingress_address(rid=None, unit=None): """ - Retrieve the ingress-address from a relation when available. Otherwise, - return the private-address. This function is to be used on the consuming - side of the relation. + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. Usage: addresses = [ingress_address(rid=u.rid, unit=u.unit) @@ -1220,3 +1249,40 @@ def ingress_address(rid=None, unit=None): settings = relation_get(rid=rid, unit=unit) return (settings.get('ingress-address') or settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index fd14d60f..322ab2ac 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path): return output -def modulo_distribution(modulo=3, wait=30): +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): """ Modulo distribution This helper uses the unit number, a modulo value and a constant wait time @@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30): @param modulo: int The modulo number creates the group distribution @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. @return: int Calculated time to wait for unit operation """ unit_number = int(local_unit().split('/')[1]) - return (unit_number % modulo) * wait + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time diff --git a/ceph-mon/hooks/charmhelpers/core/services/base.py b/ceph-mon/hooks/charmhelpers/core/services/base.py index ca9dc996..179ad4f0 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/base.py +++ b/ceph-mon/hooks/charmhelpers/core/services/base.py @@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback): """ def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) - new_ports = service.get('ports', []) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) if os.path.exists(port_file): with open(port_file) as fp: old_ports = fp.read().split(',') for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) with open(port_file, 'w') as fp: fp.write(','.join(str(port) for port in new_ports)) for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' if event_name == 'start': - hookenv.open_port(port) + hookenv.open_port(port, protocol) elif event_name == 'stop': - hookenv.close_port(port) + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports def service_stop(service_name): diff --git a/ceph-mon/hooks/charmhelpers/core/unitdata.py b/ceph-mon/hooks/charmhelpers/core/unitdata.py index 6d7b4942..ab554327 100644 --- a/ceph-mon/hooks/charmhelpers/core/unitdata.py +++ b/ceph-mon/hooks/charmhelpers/core/unitdata.py @@ -166,6 +166,10 @@ class Storage(object): To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. """ def __init__(self, path=None): self.db_path = path @@ -175,8 +179,9 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 910e96a6..653d58f1 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -44,6 +44,7 @@ 'x86_64': PROPOSED_POCKET, 'ppc64le': PROPOSED_PORTS_POCKET, 'aarch64': PROPOSED_PORTS_POCKET, + 's390x': PROPOSED_PORTS_POCKET, } CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' diff --git a/ceph-mon/lib/ceph/broker.py b/ceph-mon/lib/ceph/broker.py index 8ba2e7a9..0b6d3e24 100644 --- a/ceph-mon/lib/ceph/broker.py +++ b/ceph-mon/lib/ceph/broker.py @@ -370,6 +370,8 @@ def handle_erasure_pool(request, service): if erasure_profile is None: erasure_profile = "default-canonical" + app_name = request.get('app-name') + # Check for missing params if pool_name is None: msg = "Missing parameter. name is required for the pool" @@ -393,7 +395,7 @@ def handle_erasure_pool(request, service): pool = ErasurePool(service=service, name=pool_name, erasure_code_profile=erasure_profile, - percent_data=weight) + percent_data=weight, app_name=app_name) # Ok make the erasure pool if not pool_exists(service=service, name=pool_name): log("Creating pool '{}' (erasure_profile={})" @@ -426,6 +428,7 @@ def handle_replicated_pool(request, service): if osds: pg_num = min(pg_num, (len(osds) * 100 // replicas)) + app_name = request.get('app-name') # Check for missing params if pool_name is None or replicas is None: msg = "Missing parameter. name and replicas are required" @@ -446,6 +449,8 @@ def handle_replicated_pool(request, service): kwargs['percent_data'] = weight if replicas: kwargs['replicas'] = replicas + if app_name: + kwargs['app_name'] = app_name pool = ReplicatedPool(service=service, name=pool_name, **kwargs) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index a15d7b01..5929f8c3 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -155,10 +155,7 @@ def _initialize_tests(self): password='openstack', tenant='admin') # Authenticate admin with cinder endpoint - self.cinder = u.authenticate_cinder_admin(self.keystone_sentry, - username='admin', - password='openstack', - tenant='admin') + self.cinder = u.authenticate_cinder_admin(self.keystone) # Authenticate admin with glance endpoint self.glance = u.authenticate_glance_admin(self.keystone) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 5afbbd87..66beeda2 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -21,6 +21,9 @@ from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -271,11 +274,8 @@ def _get_openstack_release(self): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, - self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike, self.xenial_queens, - self.bionic_queens,) = range(13) + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) releases = { ('trusty', None): self.trusty_icehouse, diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index d93cff3c..84e87f5d 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -50,6 +50,13 @@ NOVA_CLIENT_VERSION = "2" +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', + 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', 'xenial_queens', + 'bionic_queens'] + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -63,7 +70,34 @@ def __init__(self, log_level=ERROR): super(OpenStackAmuletUtils, self).__init__(log_level) def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): """Validate endpoint data. Validate actual endpoint data vs expected endpoint data. The ports @@ -141,7 +175,86 @@ def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' - def validate_svc_catalog_endpoint_data(self, expected, actual): + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. Validate a list of actual service catalog endpoints vs a list of @@ -328,7 +441,7 @@ def keystone_wait_for_propagation(self, sentry_relation_pairs, if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." - "".format(rel['api_version'], api_version)) + "".format(rel.get('api_version'), api_version)) def keystone_configure_api_version(self, sentry_relation_pairs, deployment, api_version): @@ -350,16 +463,13 @@ def keystone_configure_api_version(self, sentry_relation_pairs, deployment, deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant, api_version=2): + def authenticate_cinder_admin(self, keystone, api_version=2): """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - keystone_ip = keystone_sentry.info['public-address'] - ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) + self.log.debug('Authenticating cinder admin...') _clients = { 1: cinder_client.Client, 2: cinder_clientv2.Client} - return _clients[api_version](username, password, tenant, ept) + return _clients[api_version](session=keystone.session) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -367,13 +477,36 @@ def authenticate_keystone(self, keystone_ip, username, password, project_domain_name=None, project_name=None): """Authenticate with Keystone""" self.log.debug('Authenticating with keystone...') - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: auth = v2.Password( username=username, password=password, @@ -381,12 +514,7 @@ def authenticate_keystone(self, keystone_ip, username, password, auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client else: - ep = base_ep + "/v3" auth = v3.Password( user_domain_name=user_domain_name, username=username, @@ -397,10 +525,57 @@ def authenticate_keystone(self, keystone_ip, username, password, auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + api_version = 2 + client_class = keystone_client.Client + # 11 => xenial_queens + if openstack_release and openstack_release >= 11: + api_version = 3 + client_class = keystone_client_v3.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, diff --git a/ceph-mon/tests/charmhelpers/core/hookenv.py b/ceph-mon/tests/charmhelpers/core/hookenv.py index 7ed1cc4e..e9df1509 100644 --- a/ceph-mon/tests/charmhelpers/core/hookenv.py +++ b/ceph-mon/tests/charmhelpers/core/hookenv.py @@ -27,6 +27,7 @@ import os import json import yaml +import re import subprocess import sys import errno @@ -67,7 +68,7 @@ def unit_get(attribute): @wraps(func) def wrapper(*args, **kwargs): global cache - key = str((func, args, kwargs)) + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: @@ -353,22 +354,40 @@ def _implicit_save(self): self.save() -@cached +_cache_config = None + + def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] + try: + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) if scope is not None: - return config_data - return Config(config_data) - except ValueError: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) return None @@ -1043,7 +1062,6 @@ def juju_version(): universal_newlines=True).strip() -@cached def has_juju_version(minimum_version): """Return True if the Juju version is at least the provided version""" return LooseVersion(juju_version()) >= LooseVersion(minimum_version) @@ -1103,6 +1121,8 @@ def _run_atexit(): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get_primary_address(binding): ''' + Deprecated since Juju 2.3; use network_get() + Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding @@ -1123,7 +1143,6 @@ def network_get_primary_address(binding): return response -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get(endpoint, relation_id=None): """ Retrieve the network details for a relation endpoint @@ -1131,24 +1150,20 @@ def network_get(endpoint, relation_id=None): :param endpoint: string. The name of a relation endpoint :param relation_id: int. The ID of the relation for the current context. :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if run on Juju < 2.1 + :raise: NotImplementedError if request not supported by the Juju version. """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + cmd = ['network-get', endpoint, '--format', 'yaml'] if relation_id: cmd.append('-r') cmd.append(relation_id) - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - # Early versions of Juju 2.0.x required the --primary-address argument. - # We catch that condition here and raise NotImplementedError since - # the requested semantics are not available - the caller can then - # use the network_get_primary_address() method instead. - if '--primary-address is currently required' in e.output.decode('UTF-8'): - raise NotImplementedError - raise + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() return yaml.safe_load(response) @@ -1204,9 +1219,23 @@ def iter_units_for_relation_name(relation_name): def ingress_address(rid=None, unit=None): """ - Retrieve the ingress-address from a relation when available. Otherwise, - return the private-address. This function is to be used on the consuming - side of the relation. + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. Usage: addresses = [ingress_address(rid=u.rid, unit=u.unit) @@ -1220,3 +1249,40 @@ def ingress_address(rid=None, unit=None): settings = relation_get(rid=rid, unit=unit) return (settings.get('ingress-address') or settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen diff --git a/ceph-mon/tests/charmhelpers/core/host.py b/ceph-mon/tests/charmhelpers/core/host.py index fd14d60f..322ab2ac 100644 --- a/ceph-mon/tests/charmhelpers/core/host.py +++ b/ceph-mon/tests/charmhelpers/core/host.py @@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path): return output -def modulo_distribution(modulo=3, wait=30): +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): """ Modulo distribution This helper uses the unit number, a modulo value and a constant wait time @@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30): @param modulo: int The modulo number creates the group distribution @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. @return: int Calculated time to wait for unit operation """ unit_number = int(local_unit().split('/')[1]) - return (unit_number % modulo) * wait + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time diff --git a/ceph-mon/tests/charmhelpers/core/services/base.py b/ceph-mon/tests/charmhelpers/core/services/base.py index ca9dc996..179ad4f0 100644 --- a/ceph-mon/tests/charmhelpers/core/services/base.py +++ b/ceph-mon/tests/charmhelpers/core/services/base.py @@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback): """ def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) - new_ports = service.get('ports', []) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) if os.path.exists(port_file): with open(port_file) as fp: old_ports = fp.read().split(',') for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) with open(port_file, 'w') as fp: fp.write(','.join(str(port) for port in new_ports)) for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' if event_name == 'start': - hookenv.open_port(port) + hookenv.open_port(port, protocol) elif event_name == 'stop': - hookenv.close_port(port) + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports def service_stop(service_name): diff --git a/ceph-mon/tests/charmhelpers/core/unitdata.py b/ceph-mon/tests/charmhelpers/core/unitdata.py index 6d7b4942..ab554327 100644 --- a/ceph-mon/tests/charmhelpers/core/unitdata.py +++ b/ceph-mon/tests/charmhelpers/core/unitdata.py @@ -166,6 +166,10 @@ class Storage(object): To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. """ def __init__(self, path=None): self.db_path = path @@ -175,8 +179,9 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None From dba22cbf19588da006b5efe8113da17fb3efff8b Mon Sep 17 00:00:00 2001 From: Xav Paice Date: Wed, 18 Apr 2018 19:19:50 +1200 Subject: [PATCH 1476/2699] Update Nagios check for Luminous This adds a test to see if the ceph status output looks like Luminous or newer, and if so changes the output used to collect info. Change-Id: I98d194c329aace3c412701e06632dbfedfadefc7 Closes-Bug: #1756864 --- ceph-mon/files/nagios/check_ceph_status.py | 41 +++- ceph-mon/unit_tests/ceph_crit_luminous.json | 196 ++++++++++++++++++ ceph-mon/unit_tests/ceph_ok_luminous.json | 180 ++++++++++++++++ ceph-mon/unit_tests/test_check_ceph_status.py | 50 ++++- 4 files changed, 455 insertions(+), 12 deletions(-) create mode 100644 ceph-mon/unit_tests/ceph_crit_luminous.json create mode 100644 ceph-mon/unit_tests/ceph_ok_luminous.json diff --git a/ceph-mon/files/nagios/check_ceph_status.py b/ceph-mon/files/nagios/check_ceph_status.py index 98275a51..ef978023 100755 --- a/ceph-mon/files/nagios/check_ceph_status.py +++ b/ceph-mon/files/nagios/check_ceph_status.py @@ -68,6 +68,23 @@ def check_file_freshness(filename, newer_than=3600): % (filename, time.ctime(mtime))) +def get_ceph_version(): + """ + Uses CLI to get the ceph version, because the status output changes from + Luminous onwards (12.2.0 or higher) + + :returns: list of integers, just the actual version number + """ + try: + out_string = subprocess.check_output(['ceph', + '--version']).decode('UTF-8') + except subprocess.CalledProcessError as e: + raise UnknownError( + "UNKNOWN: could not determine Ceph version, error: {}".format(e)) + out_version = [int(x) for x in out_string.split(" ")[2].split(".")] + return out_version + + def check_ceph_status(args): """ Used to check the status of a Ceph cluster. Uses the output of 'ceph @@ -109,15 +126,27 @@ def check_ceph_status(args): required_keys = ['health', 'monmap', 'pgmap'] if not all(key in status_data.keys() for key in required_keys): raise UnknownError('UNKNOWN: status data is incomplete') + ceph_version = get_ceph_version() + if ceph_version[0] >= 12 and ceph_version[1] >= 2: + # This is Luminous or above + overall_status = status_data['health'].get('status') + luminous = True + else: + overall_status = status_data['health'].get('overall_status') + luminous = False - if status_data['health']['overall_status'] != 'HEALTH_OK': + if overall_status != 'HEALTH_OK': # Health is not OK, check if any lines are not in our list of OK # any lines that don't match, check is critical status_msg = [] - for status in status_data['health']['summary']: - if not re.match(ignorable, status['summary']): + if luminous: + status_messages = [x['summary']['message'] for x in status_data['health'].get('checks').values()] + else: + status_messages = [x['summary'] for x in status_data['health']['summary']] + for status in status_messages: + if not re.match(ignorable, status): status_critical = True - status_msg.append(status['summary']) + status_msg.append(status) # If we got this far, then the status is not OK but the status lines # are all in our list of things we consider to be operational tasks. # Check the thresholds and return CRITICAL if exceeded, @@ -138,10 +167,10 @@ def check_ceph_status(args): status_msg.append("Recovering objects/sec {}".format(recovering)) if status_critical: msg = 'CRITICAL: ceph health: "{} {}"'.format( - status_data['health']['overall_status'], + overall_status, ", ".join(status_msg)) raise CriticalError(msg) - if status_data['health']['overall_status'] == 'HEALTH_WARN': + if overall_status == 'HEALTH_WARN': msg = "WARNING: {}".format(", ".join(status_msg)) raise WarnError(msg) message = "All OK" diff --git a/ceph-mon/unit_tests/ceph_crit_luminous.json b/ceph-mon/unit_tests/ceph_crit_luminous.json new file mode 100644 index 00000000..c81a3f36 --- /dev/null +++ b/ceph-mon/unit_tests/ceph_crit_luminous.json @@ -0,0 +1,196 @@ +{ + "fsid": "a7285ad8-3961-11e8-b715-00163e030140", + "health": { + "checks": { + "OSD_DOWN": { + "severity": "HEALTH_WARN", + "summary": { + "message": "1 osds down" + } + }, + "PG_DEGRADED": { + "severity": "HEALTH_WARN", + "summary": { + "message": "Degraded data redundancy: 31/906 objects degraded (3.422%), 74 pgs unclean, 74 pgs degraded" + } + } + }, + "status": "HEALTH_WARN" + }, + "election_epoch": 28, + "quorum": [ + 0, + 1, + 2 + ], + "quorum_names": [ + "juju-7cfc1d-1-lxd-0", + "juju-7cfc1d-0-lxd-0", + "juju-7cfc1d-12-lxd-0" + ], + "monmap": { + "epoch": 2, + "fsid": "a7285ad8-3961-11e8-b715-00163e030140", + "modified": "2018-04-06 06:37:04.978765", + "created": "2018-04-06 06:35:06.513449", + "features": { + "persistent": [ + "kraken", + "luminous" + ], + "optional": [] + }, + "mons": [ + { + "rank": 0, + "name": "juju-7cfc1d-1-lxd-0", + "addr": "172.18.250.75:6789/0", + "public_addr": "172.18.250.75:6789/0" + }, + { + "rank": 1, + "name": "juju-7cfc1d-0-lxd-0", + "addr": "172.18.250.76:6789/0", + "public_addr": "172.18.250.76:6789/0" + }, + { + "rank": 2, + "name": "juju-7cfc1d-12-lxd-0", + "addr": "172.18.250.84:6789/0", + "public_addr": "172.18.250.84:6789/0" + } + ] + }, + "osdmap": { + "osdmap": { + "epoch": 257, + "num_osds": 33, + "num_up_osds": 32, + "num_in_osds": 33, + "full": false, + "nearfull": false, + "num_remapped_pgs": 0 + } + }, + "pgmap": { + "pgs_by_state": [ + { + "state_name": "active+clean", + "count": 958 + }, + { + "state_name": "active+undersized+degraded", + "count": 74 + } + ], + "num_pgs": 1032, + "num_pools": 20, + "num_objects": 302, + "data_bytes": 580388173, + "bytes_used": 2971890057216, + "bytes_avail": 128989599563776, + "bytes_total": 131961489620992, + "degraded_objects": 31, + "degraded_total": 906, + "degraded_ratio": 0.034216 + }, + "fsmap": { + "epoch": 1, + "by_rank": [] + }, + "mgrmap": { + "epoch": 4, + "active_gid": 4131, + "active_name": "juju-7cfc1d-1-lxd-0", + "active_addr": "172.18.250.75:6800/88914", + "available": true, + "standbys": [ + { + "gid": 4134, + "name": "juju-7cfc1d-0-lxd-0", + "available_modules": [ + "balancer", + "dashboard", + "influx", + "localpool", + "prometheus", + "restful", + "selftest", + "status", + "zabbix" + ] + }, + { + "gid": 4299, + "name": "juju-7cfc1d-12-lxd-0", + "available_modules": [ + "balancer", + "dashboard", + "influx", + "localpool", + "prometheus", + "restful", + "selftest", + "status", + "zabbix" + ] + } + ], + "modules": [ + "balancer", + "restful", + "status" + ], + "available_modules": [ + "balancer", + "dashboard", + "influx", + "localpool", + "prometheus", + "restful", + "selftest", + "status", + "zabbix" + ], + "services": {} + }, + "servicemap": { + "epoch": 22, + "modified": "2018-04-14 06:25:03.499825", + "services": { + "rgw": { + "daemons": { + "summary": "", + "radosgw.gateway": { + "start_epoch": 22, + "start_stamp": "2018-04-14 06:25:02.277715", + "gid": 156351, + "addr": "172.18.250.74:0/2962286796", + "metadata": { + "arch": "x86_64", + "ceph_version": "ceph version 12.2.2 (cf0baeeeeba3b47f9427c6c97e2144b094b7e5ba) luminous (stable)", + "cpu": "Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GHz", + "distro": "ubuntu", + "distro_description": "Ubuntu 16.04.4 LTS", + "distro_version": "16.04", + "frontend_config#0": "civetweb port=60", + "frontend_type#0": "civetweb", + "hostname": "juju-7cfc1d-1-lxd-1", + "kernel_description": "#43~16.04.1-Ubuntu SMP Wed Mar 14 17:48:43 UTC 2018", + "kernel_version": "4.13.0-38-generic", + "mem_swap_kb": "8388604", + "mem_total_kb": "528154640", + "num_handles": "1", + "os": "Linux", + "pid": "225019", + "zone_id": "34009c14-e608-47e6-84c5-bf2cefbe94f8", + "zone_name": "default", + "zonegroup_id": "7771c284-f980-41f0-861b-66c95357cb3d", + "zonegroup_name": "default" + } + } + } + } + } + } +} diff --git a/ceph-mon/unit_tests/ceph_ok_luminous.json b/ceph-mon/unit_tests/ceph_ok_luminous.json new file mode 100644 index 00000000..8a489d48 --- /dev/null +++ b/ceph-mon/unit_tests/ceph_ok_luminous.json @@ -0,0 +1,180 @@ +{ + "fsid": "1111111-11111-1111-1111-111111111111", + "health": { + "checks": {}, + "status": "HEALTH_OK" + }, + "election_epoch": 28, + "quorum": [ + 0, + 1, + 2 + ], + "quorum_names": [ + "juju-badbad-1-lxd-0", + "juju-badbad-0-lxd-0", + "juju-badbad-12-lxd-0" + ], + "monmap": { + "epoch": 2, + "fsid": "1111111-11111-1111-1111-111111111111", + "modified": "2018-04-06 06:37:04.978765", + "created": "2018-04-06 06:35:06.513449", + "features": { + "persistent": [ + "kraken", + "luminous" + ], + "optional": [] + }, + "mons": [ + { + "rank": 0, + "name": "juju-badbad-1-lxd-0", + "addr": "10.11.12.75:6789/0", + "public_addr": "10.11.12.75:6789/0" + }, + { + "rank": 1, + "name": "juju-badbad-0-lxd-0", + "addr": "10.11.12.76:6789/0", + "public_addr": "10.11.12.76:6789/0" + }, + { + "rank": 2, + "name": "juju-badbad-12-lxd-0", + "addr": "10.11.12.84:6789/0", + "public_addr": "10.11.12.84:6789/0" + } + ] + }, + "osdmap": { + "osdmap": { + "epoch": 262, + "num_osds": 33, + "num_up_osds": 32, + "num_in_osds": 32, + "full": false, + "nearfull": false, + "num_remapped_pgs": 0 + } + }, + "pgmap": { + "pgs_by_state": [ + { + "state_name": "active+clean", + "count": 1032 + } + ], + "num_pgs": 1032, + "num_pools": 20, + "num_objects": 561, + "data_bytes": 1584814720, + "bytes_used": 2884842602496, + "bytes_avail": 125077821714432, + "bytes_total": 127962664316928, + "read_bytes_sec": 1513, + "read_op_per_sec": 1, + "write_op_per_sec": 0 + }, + "fsmap": { + "epoch": 1, + "by_rank": [] + }, + "mgrmap": { + "epoch": 4, + "active_gid": 4131, + "active_name": "juju-badbad-1-lxd-0", + "active_addr": "10.11.12.75:6800/88914", + "available": true, + "standbys": [ + { + "gid": 4134, + "name": "juju-badbad-0-lxd-0", + "available_modules": [ + "balancer", + "dashboard", + "influx", + "localpool", + "prometheus", + "restful", + "selftest", + "status", + "zabbix" + ] + }, + { + "gid": 4299, + "name": "juju-badbad-12-lxd-0", + "available_modules": [ + "balancer", + "dashboard", + "influx", + "localpool", + "prometheus", + "restful", + "selftest", + "status", + "zabbix" + ] + } + ], + "modules": [ + "balancer", + "restful", + "status" + ], + "available_modules": [ + "balancer", + "dashboard", + "influx", + "localpool", + "prometheus", + "restful", + "selftest", + "status", + "zabbix" + ], + "services": {} + }, + "servicemap": { + "epoch": 29, + "modified": "2018-04-18 06:25:04.076050", + "services": { + "rgw": { + "daemons": { + "summary": "", + "radosgw.gateway": { + "start_epoch": 29, + "start_stamp": "2018-04-18 06:25:02.612368", + "gid": 231504, + "addr": "10.11.12.78:0/2747422053", + "metadata": { + "arch": "x86_64", + "ceph_version": "ceph version 12.2.2 (cf0baeeeeba3b47f9427c6c97e2144b094b7e5ba) luminous (stable)", + "cpu": "Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GHz", + "distro": "ubuntu", + "distro_description": "Ubuntu 16.04.4 LTS", + "distro_version": "16.04", + "frontend_config#0": "civetweb port=60", + "frontend_type#0": "civetweb", + "hostname": "juju-badbad-0-lxd-1", + "kernel_description": "#43~16.04.1-Ubuntu SMP Wed Mar 14 17:48:43 UTC 2018", + "kernel_version": "4.13.0-38-generic", + "mem_swap_kb": "8388604", + "mem_total_kb": "528154640", + "num_handles": "1", + "os": "Linux", + "pid": "225487", + "zone_id": "11111111-1111-1111-1111-111111111111", + "zone_name": "default", + "zonegroup_id": "11111111-1111-1111-1111-111111111111", + "zonegroup_name": "default" + } + } + } + } + } + } +} + diff --git a/ceph-mon/unit_tests/test_check_ceph_status.py b/ceph-mon/unit_tests/test_check_ceph_status.py index eeb13606..caf89bc9 100644 --- a/ceph-mon/unit_tests/test_check_ceph_status.py +++ b/ceph-mon/unit_tests/test_check_ceph_status.py @@ -26,7 +26,15 @@ @patch('subprocess.check_output') class NagiosTestCase(unittest.TestCase): - def test_health_ok(self, mock_subprocess): + def test_get_ceph_version(self, mock_subprocess): + mock_subprocess.return_value = 'ceph version 10.2.9 ' \ + '(2ee413f77150c0f375ff6f10edd6c8f9c7d060d0)'.encode('UTF-8') + ceph_version = check_ceph_status.get_ceph_version() + self.assertEqual(ceph_version, [10, 2, 9]) + + @patch('check_ceph_status.get_ceph_version') + def test_health_ok(self, mock_ceph_version, mock_subprocess): + mock_ceph_version.return_value = [10, 2, 9] with open('unit_tests/ceph_ok.json') as f: tree = f.read() mock_subprocess.return_value = tree.encode('UTF-8') @@ -34,7 +42,19 @@ def test_health_ok(self, mock_subprocess): check_output = check_ceph_status.check_ceph_status(args) self.assertRegex(check_output, r"^All OK$") - def test_health_warn(self, mock_subprocess): + @patch('check_ceph_status.get_ceph_version') + def test_health_ok_luminous(self, mock_ceph_version, mock_subprocess): + mock_ceph_version.return_value = [12, 2, 0] + with open('unit_tests/ceph_ok_luminous.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--degraded_thresh', '1']) + check_output = check_ceph_status.check_ceph_status(args) + self.assertRegex(check_output, r"^All OK$") + + @patch('check_ceph_status.get_ceph_version') + def test_health_warn(self, mock_ceph_version, mock_subprocess): + mock_ceph_version.return_value = [10, 2, 9] with open('unit_tests/ceph_warn.json') as f: tree = f.read() mock_subprocess.return_value = tree.encode('UTF-8') @@ -42,7 +62,9 @@ def test_health_warn(self, mock_subprocess): self.assertRaises(check_ceph_status.WarnError, lambda: check_ceph_status.check_ceph_status(args)) - def test_health_crit(self, mock_subprocess): + @patch('check_ceph_status.get_ceph_version') + def test_health_crit(self, mock_ceph_version, mock_subprocess): + mock_ceph_version.return_value = [10, 2, 9] with open('unit_tests/ceph_crit.json') as f: tree = f.read() mock_subprocess.return_value = tree.encode('UTF-8') @@ -50,7 +72,19 @@ def test_health_crit(self, mock_subprocess): self.assertRaises(check_ceph_status.CriticalError, lambda: check_ceph_status.check_ceph_status(args)) - def test_health_lotsdegraded(self, mock_subprocess): + @patch('check_ceph_status.get_ceph_version') + def test_health_crit_luminous(self, mock_ceph_version, mock_subprocess): + mock_ceph_version.return_value = [12, 2, 0] + with open('unit_tests/ceph_crit_luminous.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--degraded_thresh', '1']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) + + @patch('check_ceph_status.get_ceph_version') + def test_health_lotsdegraded(self, mock_ceph_version, mock_subprocess): + mock_ceph_version.return_value = [10, 2, 9] with open('unit_tests/ceph_params.json') as f: tree = f.read() mock_subprocess.return_value = tree.encode('UTF-8') @@ -58,7 +92,9 @@ def test_health_lotsdegraded(self, mock_subprocess): self.assertRaises(check_ceph_status.CriticalError, lambda: check_ceph_status.check_ceph_status(args)) - def test_health_nodeepscrub(self, mock_subprocess): + @patch('check_ceph_status.get_ceph_version') + def test_health_nodeepscrub(self, mock_ceph_version, mock_subprocess): + mock_ceph_version.return_value = [10, 2, 9] with open('unit_tests/ceph_nodeepscrub.json') as f: tree = f.read() mock_subprocess.return_value = tree.encode('UTF-8') @@ -66,7 +102,9 @@ def test_health_nodeepscrub(self, mock_subprocess): self.assertRaises(check_ceph_status.CriticalError, lambda: check_ceph_status.check_ceph_status(args)) - def test_health_nodeepscrubok(self, mock_subprocess): + @patch('check_ceph_status.get_ceph_version') + def test_health_nodeepscrubok(self, mock_ceph_version, mock_subprocess): + mock_ceph_version.return_value = [10, 2, 9] with open('unit_tests/ceph_nodeepscrub.json') as f: tree = f.read() mock_subprocess.return_value = tree.encode('UTF-8') From 8d8c54f5959459d7eaa440facef9f6c33f5673aa Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Wed, 2 May 2018 16:06:36 -0300 Subject: [PATCH 1477/2699] Sync charm-helpers Change-Id: I8e565d86b15c959ba224136b15e9db037892f6f5 Closes-Bug: #1765805 --- .../charmhelpers/contrib/hahelpers/apache.py | 2 +- .../charmhelpers/contrib/hahelpers/cluster.py | 14 +++-- .../charmhelpers/contrib/openstack/context.py | 9 ++-- .../charmhelpers/contrib/openstack/utils.py | 2 +- .../contrib/storage/linux/ceph.py | 43 ++++++++++++++- .../contrib/storage/linux/utils.py | 16 ++++++ ceph-osd/hooks/charmhelpers/core/hookenv.py | 52 +++++++++++++------ ceph-osd/hooks/charmhelpers/core/host.py | 11 +++- .../hooks/charmhelpers/core/services/base.py | 4 +- ceph-osd/hooks/charmhelpers/core/sysctl.py | 18 ++++--- ceph-osd/hooks/charmhelpers/core/unitdata.py | 9 +++- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 1 + .../contrib/openstack/amulet/utils.py | 2 +- ceph-osd/tests/charmhelpers/core/hookenv.py | 52 +++++++++++++------ ceph-osd/tests/charmhelpers/core/host.py | 11 +++- .../tests/charmhelpers/core/services/base.py | 4 +- ceph-osd/tests/charmhelpers/core/sysctl.py | 18 ++++--- ceph-osd/tests/charmhelpers/core/unitdata.py | 9 +++- 18 files changed, 211 insertions(+), 66 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py index a8527047..605a1bec 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -77,7 +77,7 @@ def get_ca_cert(): def retrieve_ca_cert(cert_file): cert = None if os.path.isfile(cert_file): - with open(cert_file, 'r') as crt: + with open(cert_file, 'rb') as crt: cert = crt.read() return cert diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py index 4207e42c..47facd91 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -371,6 +371,7 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'): ''' Distribute operations by waiting based on modulo_distribution If modulo and or wait are not set, check config_get for those values. + If config values are not set, default to modulo=3 and wait=30. :param modulo: int The modulo number creates the group distribution :param wait: int The constant time wait value @@ -382,10 +383,17 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'): :side effect: Calls time.sleep() ''' if modulo is None: - modulo = config_get('modulo-nodes') + modulo = config_get('modulo-nodes') or 3 if wait is None: - wait = config_get('known-wait') - calculated_wait = modulo_distribution(modulo=modulo, wait=wait) + wait = config_get('known-wait') or 30 + if juju_is_leader(): + # The leader should never wait + calculated_wait = 0 + else: + # non_zero_wait=True guarantees the non-leader who gets modulo 0 + # will still wait + calculated_wait = modulo_distribution(modulo=modulo, wait=wait, + non_zero_wait=True) msg = "Waiting {} seconds for {} ...".format(calculated_wait, operation_name) log(msg, DEBUG) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 6c4497b1..2d91f0a7 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -797,9 +797,9 @@ def configure_cert(self, cn=None): key_filename = 'key' write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert)) + content=b64decode(cert), perms=0o640) write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key)) + content=b64decode(key), perms=0o640) def configure_ca(self): ca_cert = get_ca_cert() @@ -1873,10 +1873,11 @@ class EnsureDirContext(OSContextGenerator): context is needed to do that before rendering a template. ''' - def __init__(self, dirname): + def __init__(self, dirname, **kwargs): '''Used merely to ensure that a given directory exists.''' self.dirname = dirname + self.kwargs = kwargs def __call__(self): - mkdir(self.dirname) + mkdir(self.dirname, **self.kwargs) return {} diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index e7194264..6184abd0 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -306,7 +306,7 @@ def get_os_codename_install_source(src): if src.startswith('cloud:'): ca_rel = src.split(':')[1] - ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + ca_rel = ca_rel.split('-')[1].split('/')[0] return ca_rel # Best guess match based on deb string provided diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index e13e60a6..76828201 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -291,7 +291,7 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): class ReplicatedPool(Pool): def __init__(self, service, name, pg_num=None, replicas=2, - percent_data=10.0): + percent_data=10.0, app_name=None): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas if pg_num: @@ -301,6 +301,10 @@ def __init__(self, service, name, pg_num=None, replicas=2, self.pg_num = min(pg_num, max_pgs) else: self.pg_num = self.get_pgs(self.replicas, percent_data) + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' def create(self): if not pool_exists(self.service, self.name): @@ -313,6 +317,12 @@ def create(self): update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) except CalledProcessError: raise @@ -320,10 +330,14 @@ def create(self): # Default jerasure erasure coded pool class ErasurePool(Pool): def __init__(self, service, name, erasure_code_profile="default", - percent_data=10.0): + percent_data=10.0, app_name=None): super(ErasurePool, self).__init__(service=service, name=name) self.erasure_code_profile = erasure_code_profile self.percent_data = percent_data + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' def create(self): if not pool_exists(self.service, self.name): @@ -355,6 +369,12 @@ def create(self): 'erasure', self.erasure_code_profile] try: check_call(cmd) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) except CalledProcessError: raise @@ -778,6 +798,25 @@ def update_pool(client, pool, settings): check_call(cmd) +def set_app_name_for_pool(client, pool, name): + """ + Calls `osd pool application enable` for the specified pool name + + :param client: Name of the ceph client to use + :type client: str + :param pool: Pool to set app name for + :type pool: str + :param name: app name for the specified pool + :type name: str + + :raises: CalledProcessError if ceph call fails + """ + if ceph_version() >= '12.0.0': + cmd = ['ceph', '--id', client, 'osd', 'pool', + 'application', 'enable', pool, name] + check_call(cmd) + + def create_pool(service, name, replicas=3, pg_num=None): """Create a new RADOS pool.""" if pool_exists(service, name): diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index c9428894..6f846b05 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -67,3 +67,19 @@ def is_device_mounted(device): except Exception: return False return bool(re.search(r'MOUNTPOINT=".+"', out)) + + +def mkfs_xfs(device, force=False): + """Format device with XFS filesystem. + + By default this should fail if the device already has a filesystem on it. + :param device: Full path to device to format + :ptype device: tr + :param force: Force operation + :ptype: force: boolean""" + cmd = ['mkfs.xfs'] + if force: + cmd.append("-f") + + cmd += ['-i', 'size=1024', device] + check_call(cmd) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 89f10240..627d8f79 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -290,7 +290,7 @@ def __init__(self, *args, **kw): self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): + if os.path.exists(self.path) and os.stat(self.path).st_size: self.load_previous() atexit(self._implicit_save) @@ -310,7 +310,11 @@ def load_previous(self, path=None): """ self.path = path or self.path with open(self.path) as f: - self._prev_dict = json.load(f) + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Unable to parse previous config data - {}'.format(str(e)), + level=ERROR) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -354,22 +358,40 @@ def _implicit_save(self): self.save() -@cached +_cache_config = None + + def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) if scope is not None: - return config_data - return Config(config_data) - except ValueError: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) return None diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index fd14d60f..322ab2ac 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path): return output -def modulo_distribution(modulo=3, wait=30): +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): """ Modulo distribution This helper uses the unit number, a modulo value and a constant wait time @@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30): @param modulo: int The modulo number creates the group distribution @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. @return: int Calculated time to wait for unit operation """ unit_number = int(local_unit().split('/')[1]) - return (unit_number % modulo) * wait + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time diff --git a/ceph-osd/hooks/charmhelpers/core/services/base.py b/ceph-osd/hooks/charmhelpers/core/services/base.py index 345b60dc..179ad4f0 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/base.py +++ b/ceph-osd/hooks/charmhelpers/core/services/base.py @@ -307,7 +307,9 @@ class PortManagerCallback(ManagerCallback): """ def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) - new_ports = service.get('ports', []) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) if os.path.exists(port_file): with open(port_file) as fp: diff --git a/ceph-osd/hooks/charmhelpers/core/sysctl.py b/ceph-osd/hooks/charmhelpers/core/sysctl.py index 6e413e31..1f188d8c 100644 --- a/ceph-osd/hooks/charmhelpers/core/sysctl.py +++ b/ceph-osd/hooks/charmhelpers/core/sysctl.py @@ -31,18 +31,22 @@ def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict with open(sysctl_file, "w") as fd: for key, value in sysctl_dict_parsed.items(): diff --git a/ceph-osd/hooks/charmhelpers/core/unitdata.py b/ceph-osd/hooks/charmhelpers/core/unitdata.py index 6d7b4942..ab554327 100644 --- a/ceph-osd/hooks/charmhelpers/core/unitdata.py +++ b/ceph-osd/hooks/charmhelpers/core/unitdata.py @@ -166,6 +166,10 @@ class Storage(object): To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. """ def __init__(self, path=None): self.db_path = path @@ -175,8 +179,9 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 910e96a6..653d58f1 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -44,6 +44,7 @@ 'x86_64': PROPOSED_POCKET, 'ppc64le': PROPOSED_PORTS_POCKET, 'aarch64': PROPOSED_PORTS_POCKET, + 's390x': PROPOSED_PORTS_POCKET, } CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index a60f8fb0..84e87f5d 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -441,7 +441,7 @@ def keystone_wait_for_propagation(self, sentry_relation_pairs, if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." - "".format(rel['api_version'], api_version)) + "".format(rel.get('api_version'), api_version)) def keystone_configure_api_version(self, sentry_relation_pairs, deployment, api_version): diff --git a/ceph-osd/tests/charmhelpers/core/hookenv.py b/ceph-osd/tests/charmhelpers/core/hookenv.py index 89f10240..627d8f79 100644 --- a/ceph-osd/tests/charmhelpers/core/hookenv.py +++ b/ceph-osd/tests/charmhelpers/core/hookenv.py @@ -290,7 +290,7 @@ def __init__(self, *args, **kw): self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): + if os.path.exists(self.path) and os.stat(self.path).st_size: self.load_previous() atexit(self._implicit_save) @@ -310,7 +310,11 @@ def load_previous(self, path=None): """ self.path = path or self.path with open(self.path) as f: - self._prev_dict = json.load(f) + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Unable to parse previous config data - {}'.format(str(e)), + level=ERROR) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -354,22 +358,40 @@ def _implicit_save(self): self.save() -@cached +_cache_config = None + + def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) if scope is not None: - return config_data - return Config(config_data) - except ValueError: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) return None diff --git a/ceph-osd/tests/charmhelpers/core/host.py b/ceph-osd/tests/charmhelpers/core/host.py index fd14d60f..322ab2ac 100644 --- a/ceph-osd/tests/charmhelpers/core/host.py +++ b/ceph-osd/tests/charmhelpers/core/host.py @@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path): return output -def modulo_distribution(modulo=3, wait=30): +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): """ Modulo distribution This helper uses the unit number, a modulo value and a constant wait time @@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30): @param modulo: int The modulo number creates the group distribution @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. @return: int Calculated time to wait for unit operation """ unit_number = int(local_unit().split('/')[1]) - return (unit_number % modulo) * wait + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time diff --git a/ceph-osd/tests/charmhelpers/core/services/base.py b/ceph-osd/tests/charmhelpers/core/services/base.py index 345b60dc..179ad4f0 100644 --- a/ceph-osd/tests/charmhelpers/core/services/base.py +++ b/ceph-osd/tests/charmhelpers/core/services/base.py @@ -307,7 +307,9 @@ class PortManagerCallback(ManagerCallback): """ def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) - new_ports = service.get('ports', []) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) if os.path.exists(port_file): with open(port_file) as fp: diff --git a/ceph-osd/tests/charmhelpers/core/sysctl.py b/ceph-osd/tests/charmhelpers/core/sysctl.py index 6e413e31..1f188d8c 100644 --- a/ceph-osd/tests/charmhelpers/core/sysctl.py +++ b/ceph-osd/tests/charmhelpers/core/sysctl.py @@ -31,18 +31,22 @@ def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict with open(sysctl_file, "w") as fd: for key, value in sysctl_dict_parsed.items(): diff --git a/ceph-osd/tests/charmhelpers/core/unitdata.py b/ceph-osd/tests/charmhelpers/core/unitdata.py index 6d7b4942..ab554327 100644 --- a/ceph-osd/tests/charmhelpers/core/unitdata.py +++ b/ceph-osd/tests/charmhelpers/core/unitdata.py @@ -166,6 +166,10 @@ class Storage(object): To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. """ def __init__(self, path=None): self.db_path = path @@ -175,8 +179,9 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None From 06f1b6d1fb79c145f6a7ce0633e009b23b2ec4dd Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 8 May 2018 10:11:34 +0200 Subject: [PATCH 1478/2699] Support use of partitions with ceph-volume Resync ceph helpers to support use of partitions and bcache devices for OSD filestore, blockstore, journal, db and wal devices. Change-Id: Iac5eefc4c3f53b1ec2f5cfc6e98a9a2168c1c0b6 Closes-Bug: 1769678 --- ceph-osd/lib/ceph/broker.py | 7 +- ceph-osd/lib/ceph/utils.py | 166 ++++++++++++++++++++++++++---------- 2 files changed, 127 insertions(+), 46 deletions(-) diff --git a/ceph-osd/lib/ceph/broker.py b/ceph-osd/lib/ceph/broker.py index 8ba2e7a9..0b6d3e24 100644 --- a/ceph-osd/lib/ceph/broker.py +++ b/ceph-osd/lib/ceph/broker.py @@ -370,6 +370,8 @@ def handle_erasure_pool(request, service): if erasure_profile is None: erasure_profile = "default-canonical" + app_name = request.get('app-name') + # Check for missing params if pool_name is None: msg = "Missing parameter. name is required for the pool" @@ -393,7 +395,7 @@ def handle_erasure_pool(request, service): pool = ErasurePool(service=service, name=pool_name, erasure_code_profile=erasure_profile, - percent_data=weight) + percent_data=weight, app_name=app_name) # Ok make the erasure pool if not pool_exists(service=service, name=pool_name): log("Creating pool '{}' (erasure_profile={})" @@ -426,6 +428,7 @@ def handle_replicated_pool(request, service): if osds: pg_num = min(pg_num, (len(osds) * 100 // replicas)) + app_name = request.get('app-name') # Check for missing params if pool_name is None or replicas is None: msg = "Missing parameter. name and replicas are required" @@ -446,6 +449,8 @@ def handle_replicated_pool(request, service): kwargs['percent_data'] = weight if replicas: kwargs['replicas'] = replicas + if app_name: + kwargs['app_name'] = app_name pool = ReplicatedPool(service=service, name=pool_name, **kwargs) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 8e0e3c2b..8f72a843 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -90,6 +90,13 @@ 'radosgw', 'xfsprogs', 'python-pyudev', 'lvm2', 'parted'] +CEPH_KEY_MANAGER = 'ceph' +VAULT_KEY_MANAGER = 'vault' +KEY_MANAGERS = [ + CEPH_KEY_MANAGER, + VAULT_KEY_MANAGER, +] + LinkSpeed = { "BASE_10": 10, "BASE_100": 100, @@ -1420,10 +1427,9 @@ def get_lvs(dev): operation failed. :returns: list: List of logical volumes provided by the block device """ - pv_dev = _partition_name(dev) - if not lvm.is_lvm_physical_volume(pv_dev): + if not lvm.is_lvm_physical_volume(dev): return [] - vg_name = lvm.list_lvm_volume_group(pv_dev) + vg_name = lvm.list_lvm_volume_group(dev) return lvm.list_logical_volumes('vg_name={}'.format(vg_name)) @@ -1463,17 +1469,42 @@ def get_devices(name): def osdize(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False, bluestore=False): + ignore_errors=False, encrypt=False, bluestore=False, + key_manager=CEPH_KEY_MANAGER): if dev.startswith('/dev'): osdize_dev(dev, osd_format, osd_journal, reformat_osd, ignore_errors, encrypt, - bluestore) + bluestore, key_manager) else: osdize_dir(dev, encrypt, bluestore) def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False, bluestore=False): + ignore_errors=False, encrypt=False, bluestore=False, + key_manager=CEPH_KEY_MANAGER): + """ + Prepare a block device for use as a Ceph OSD + + A block device will only be prepared once during the lifetime + of the calling charm unit; future executions will be skipped. + + :param: dev: Full path to block device to use + :param: osd_format: Format for OSD filesystem + :param: osd_journal: List of block devices to use for OSD journals + :param: reformat_osd: Reformat devices that are not currently in use + which have been used previously + :param: ignore_errors: Don't fail in the event of any errors during + processing + :param: encrypt: Encrypt block devices using 'key_manager' + :param: bluestore: Use bluestore native ceph block device format + :param: key_manager: Key management approach for encryption keys + :raises subprocess.CalledProcessError: in the event that any supporting + subprocess operation failed + :raises ValueError: if an invalid key_manager is provided + """ + if key_manager not in KEY_MANAGERS: + raise ValueError('Unsupported key manager: {}'.format(key_manager)) + db = kv() osd_devices = db.get('osd-devices', []) if dev in osd_devices: @@ -1510,7 +1541,8 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, cmd = _ceph_volume(dev, osd_journal, encrypt, - bluestore) + bluestore, + key_manager) else: cmd = _ceph_disk(dev, osd_format, @@ -1591,7 +1623,8 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): return cmd -def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False): +def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, + key_manager=CEPH_KEY_MANAGER): """ Prepare and activate a device for usage as a Ceph OSD using ceph-volume. @@ -1602,6 +1635,7 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False): :param: osd_journal: List of block devices to use for OSD journals :param: encrypt: Use block device encryption :param: bluestore: Use bluestore storage for OSD + :param: key_manager: dm-crypt Key Manager to use :raises subprocess.CalledProcessError: in the event that any supporting LVM operation failed. :returns: list. 'ceph-volume' command and required parameters for @@ -1620,7 +1654,7 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False): cmd.append('--filestore') main_device_type = 'data' - if encrypt: + if encrypt and key_manager == CEPH_KEY_MANAGER: cmd.append('--dmcrypt') # On-disk journal volume creation @@ -1628,16 +1662,20 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False): journal_lv_type = 'journal' cmd.append('--journal') cmd.append(_allocate_logical_volume( - dev, - journal_lv_type, - osd_fsid, - size='{}M'.format(calculate_volume_size('journal'))) + dev=dev, + lv_type=journal_lv_type, + osd_fsid=osd_fsid, + size='{}M'.format(calculate_volume_size('journal')), + encrypt=encrypt, + key_manager=key_manager) ) cmd.append('--data') - cmd.append(_allocate_logical_volume(dev, - main_device_type, - osd_fsid)) + cmd.append(_allocate_logical_volume(dev=dev, + lv_type=main_device_type, + osd_fsid=osd_fsid, + encrypt=encrypt, + key_manager=key_manager)) if bluestore: for extra_volume in ('wal', 'db'): @@ -1647,11 +1685,13 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False): least_used = find_least_used_utility_device(devices, lvs=True) cmd.append(_allocate_logical_volume( - least_used, - extra_volume, - osd_fsid, + dev=least_used, + lv_type=extra_volume, + osd_fsid=osd_fsid, size='{}M'.format(calculate_volume_size(extra_volume)), - shared=True) + shared=True, + encrypt=encrypt, + key_manager=key_manager) ) elif osd_journal: @@ -1659,11 +1699,13 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False): least_used = find_least_used_utility_device(osd_journal, lvs=True) cmd.append(_allocate_logical_volume( - least_used, - 'journal', - osd_fsid, + dev=least_used, + lv_type='journal', + osd_fsid=osd_fsid, size='{}M'.format(calculate_volume_size('journal')), - shared=True) + shared=True, + encrypt=encrypt, + key_manager=key_manager) ) return cmd @@ -1682,7 +1724,6 @@ def _partition_name(dev): return '{}1'.format(dev) -# TODO(jamespage): Deal with lockbox encrypted bluestore devices. def is_active_bluestore_device(dev): """ Determine whether provided device is part of an active @@ -1691,11 +1732,10 @@ def is_active_bluestore_device(dev): :param: dev: Full path to block device to check for Bluestore usage. :returns: boolean: indicating whether device is in active use. """ - pv_dev = _partition_name(dev) - if not lvm.is_lvm_physical_volume(pv_dev): + if not lvm.is_lvm_physical_volume(dev): return False - vg_name = lvm.list_lvm_volume_group(pv_dev) + vg_name = lvm.list_lvm_volume_group(dev) lv_name = lvm.list_logical_volumes('vg_name={}'.format(vg_name))[0] block_symlinks = glob.glob('/var/lib/ceph/osd/ceph-*/block') @@ -1761,36 +1801,66 @@ def calculate_volume_size(lv_type): return int(configured_size) / _units[lv_type] -def _initialize_disk(dev): +def _luks_uuid(dev): """ - Initialize a raw block device with a single paritition - consuming 100% of the avaliable disk space. + Check to see if dev is a LUKS encrypted volume, returning the UUID + of volume if it is. + + :param: dev: path to block device to check. + :returns: str. UUID of LUKS device or None if not a LUKS device + """ + try: + cmd = ['cryptsetup', 'luksUUID', dev] + return subprocess.check_output(cmd).decode('UTF-8').strip() + except subprocess.CalledProcessError: + return None + + +def _initialize_disk(dev, dev_uuid, encrypt=False, + key_manager=CEPH_KEY_MANAGER): + """ + Initialize a raw block device consuming 100% of the avaliable + disk space. Function assumes that block device has already been wiped. :param: dev: path to block device to initialize + :param: dev_uuid: UUID to use for any dm-crypt operations + :param: encrypt: Encrypt OSD devices using dm-crypt + :param: key_manager: Key management approach for dm-crypt keys :raises: subprocess.CalledProcessError: if any parted calls fail :returns: str: Full path to new partition. """ - partition = _partition_name(dev) - if not os.path.exists(partition): - subprocess.check_call([ - 'parted', '--script', - dev, - 'mklabel', - 'gpt', - ]) + use_vaultlocker = encrypt and key_manager == VAULT_KEY_MANAGER + + if use_vaultlocker: + # NOTE(jamespage): Check to see if already initialized as a LUKS + # volume, which indicates this is a shared block + # device for journal, db or wal volumes. + luks_uuid = _luks_uuid(dev) + if luks_uuid: + return '/dev/mapper/crypt-{}'.format(luks_uuid) + + dm_crypt = '/dev/mapper/crypt-{}'.format(dev_uuid) + + if use_vaultlocker and not os.path.exists(dm_crypt): subprocess.check_call([ - 'parted', '--script', + 'vaultlocker', + 'encrypt', + '--uuid', dev_uuid, dev, - 'mkpart', - 'primary', '1', '100%', ]) - return partition + + if use_vaultlocker: + return dm_crypt + else: + return dev def _allocate_logical_volume(dev, lv_type, osd_fsid, - size=None, shared=False): + size=None, shared=False, + encrypt=False, + key_manager=CEPH_KEY_MANAGER): """ Allocate a logical volume from a block device, ensuring any required initialization and setup of PV's and VG's to support @@ -1803,13 +1873,19 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid, :param: size: Size in LVM format for the device; if unset 100% of VG :param: shared: Shared volume group (journal, wal, db) + :param: encrypt: Encrypt OSD devices using dm-crypt + :param: key_manager: dm-crypt Key Manager to use :raises subprocess.CalledProcessError: in the event that any supporting LVM or parted operation fails. :returns: str: String in the format 'vg_name/lv_name'. """ lv_name = "osd-{}-{}".format(lv_type, osd_fsid) current_volumes = lvm.list_logical_volumes() - pv_dev = _initialize_disk(dev) + if shared: + dev_uuid = str(uuid.uuid4()) + else: + dev_uuid = osd_fsid + pv_dev = _initialize_disk(dev, dev_uuid, encrypt, key_manager) vg_name = None if not lvm.is_lvm_physical_volume(pv_dev): From 0ae286257cb8c3bcd1cce0c610c39a46975e5d52 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Thu, 3 May 2018 15:30:52 +0100 Subject: [PATCH 1479/2699] Tighten up the leader_set(..) usage to handle any errors This stops leader_set from throwing backtraces, and instead logs the error and continues as though the charm is not the leader (which is the case when leader_set(...) fails). Changed the py35 tox job to invoke py3 (to allow it to also test under py3.6 on artful+). Note that the pep8 check is still py27, so the additional # NOQA is to handle Py2 not having FileNotFoundError. Change-Id: Ic25d29983db9a0738d83e66de4673bb50594b599 --- ceph-mon/hooks/ceph_hooks.py | 49 ++++++++++++++++++++++--------- ceph-mon/tox.ini | 13 ++++---- ceph-mon/unit_tests/test_utils.py | 21 +------------ 3 files changed, 41 insertions(+), 42 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 7f8876cd..b7013633 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -205,25 +205,33 @@ def config_changed(): mon_secret = config('monitor-secret') else: mon_secret = "{}".format(ceph.generate_monitor_secret()) - status_set('maintenance', 'Creating FSID and Monitor Secret') opts = { 'fsid': fsid, 'monitor-secret': mon_secret, } - log("Settings for the cluster are: {}".format(opts)) - leader_set(opts) - elif cfg.changed('no-bootstrap') and \ - is_relation_made('bootstrap-source'): + try: + leader_set(opts) + status_set('maintenance', + 'Created FSID and Monitor Secret') + log("Settings for the cluster are: {}".format(opts)) + except Exception as e: + # we're probably not the leader an exception occured + # let's log it anyway. + log("leader_set failed: {}".format(str(e))) + elif (cfg.changed('no-bootstrap') and + is_relation_made('bootstrap-source')): # User changed the no-bootstrap config option, we're the leader, # and the bootstrap-source relation has been made. The charm should # be in a blocked state indicating that the no-bootstrap option # must be set. This block is invoked when the user is trying to # get out of that scenario by enabling no-bootstrap. bootstrap_source_relation_changed() - elif leader_get('fsid') is None or leader_get('monitor-secret') is None: + # unconditionally verify that the fsid and monitor-secret are set now + # otherwise we exit until a leader does this. + if leader_get('fsid') is None or leader_get('monitor-secret') is None: log('still waiting for leader to setup keys') status_set('waiting', 'Waiting for leader to setup keys') - sys.exit(0) + return emit_cephconf() @@ -231,7 +239,12 @@ def config_changed(): if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1 and is_leader()): status_set('maintenance', 'Bootstrapping single Ceph MON') - ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) + # the following call raises an exception if it can't add the keyring + try: + ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) + except FileNotFoundError as e: # NOQA -- PEP8 is still PY2 + log("Couldn't bootstrap the monitor yet: {}".format(str(e))) + return ceph.wait_for_bootstrap() if cmp_pkgrevno('ceph', '12.0.0') >= 0: status_set('maintenance', 'Bootstrapping single Ceph MGR') @@ -323,15 +336,18 @@ def bootstrap_source_relation_changed(): assert curr_secret == mon_secret, \ "bootstrap secret '{}' != current secret '{}'".format( mon_secret, curr_secret) - opts = { 'fsid': fsid, 'monitor-secret': mon_secret, } - - log('Updating leader settings for fsid and monitor-secret ' - 'from remote relation data: {}'.format(opts)) - leader_set(opts) + try: + leader_set(opts) + log('Updating leader settings for fsid and monitor-secret ' + 'from remote relation data: {}'.format(opts)) + except Exception as e: + # we're probably not the leader an exception occured + # let's log it anyway. + log("leader_set failed: {}".format(str(e))) # The leader unit needs to bootstrap itself as it won't receive the # leader-settings-changed hook elsewhere. @@ -353,7 +369,12 @@ def mon_relation(): moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: status_set('maintenance', 'Bootstrapping MON cluster') - ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) + # the following call raises an exception if it can't add the keyring + try: + ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) + except FileNotFoundError as e: # NOQA -- PEP8 is still PY2 + log("Couldn't bootstrap the monitor yet: {}".format(str(e))) + exit(0) ceph.wait_for_bootstrap() ceph.wait_for_quorum() if cmp_pkgrevno('ceph', '12.0.0') >= 0: diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index e5d01d8a..2bbbc97d 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -2,9 +2,10 @@ # This file is managed centrally by release-tools and should not be modified # within individual charm repos. [tox] -envlist = pep8,py27,py35,py36 +;envlist = pep8,py27,py35,py36 +envlist = pep8,py27,py35 skipsdist = True -skip_missing_interpreters = True +;skip_missing_interpreters = True [testenv] setenv = VIRTUAL_ENV={envdir} @@ -24,13 +25,9 @@ deps = -r{toxinidir}/requirements.txt # temporarily disable py27 commands = /bin/true +; keep zuul happy until we change the py35 job [testenv:py35] -basepython = python3.5 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -[testenv:py36] -basepython = python3.6 +basepython = python3 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt diff --git a/ceph-mon/unit_tests/test_utils.py b/ceph-mon/unit_tests/test_utils.py index bc33c4ff..8539d8ec 100644 --- a/ceph-mon/unit_tests/test_utils.py +++ b/ceph-mon/unit_tests/test_utils.py @@ -17,8 +17,7 @@ import os import yaml -from contextlib import contextmanager -from mock import patch, MagicMock +from mock import patch def load_config(): @@ -145,21 +144,3 @@ def get(self, attr=None): elif attr in self.settings: return self.settings[attr] return None - - -@contextmanager -def patch_open(): - '''Patch open() to allow mocking both open() itself and the file that is - yielded. - - Yields the mock for "open" and "file", respectively.''' - mock_open = MagicMock(spec=open) - mock_file = MagicMock(spec=file) - - @contextmanager - def stub_open(*args, **kwargs): - mock_open(*args, **kwargs) - yield mock_file - - with patch('__builtin__.open', stub_open): - yield mock_open, mock_file From 9863f0388f7e67f8a9249b2372ca81c9fc01d001 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 8 May 2018 12:49:07 +0200 Subject: [PATCH 1480/2699] Improve Bootstrap resilience Sync relevant changes from charms.ceph Disable `ceph-create-keys` in init system and explicitly run it in the charms ceph-mon bootstrap process. Change-Id: I03cd596e6e336b75d7d108ed0acde15d9940913f Depends-On: I3d3c7298076730c423ca5cc059316619f415b885 Closes-Bug: #1719436 --- ceph-mon/hooks/ceph_hooks.py | 15 +++++++++++++++ ceph-mon/lib/ceph/utils.py | 26 ++++++++++++++++++-------- 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 7f8876cd..2a7abf0a 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -46,6 +46,7 @@ local_unit, application_version_set) from charmhelpers.core.host import ( + service_pause, service_restart, mkdir, write_file, @@ -120,6 +121,13 @@ def install(): add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.determine_packages(), fatal=True) + try: + # we defer and explicitly run `ceph-create-keys` from + # add_keyring_to_ceph() as part of bootstrap process + # LP: #1719436. + service_pause('ceph-create-keys') + except ValueError: + pass def get_ceph_context(): @@ -618,6 +626,13 @@ def upgrade_charm(): emit_cephconf() apt_install(packages=filter_installed_packages( ceph.determine_packages()), fatal=True) + try: + # we defer and explicitly run `ceph-create-keys` from + # add_keyring_to_ceph() as part of bootstrap process + # LP: #1719436. + service_pause('ceph-create-keys') + except ValueError: + pass ceph.update_monfs() mon_relation_joined() if is_relation_made("nrpe-external-master"): diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index db981fd9..f412032b 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -1349,17 +1349,27 @@ def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): else: service_restart('ceph-mon-all') + # NOTE(jamespage): Later ceph releases require explicit + # call to ceph-create-keys to setup the + # admin keys for the cluster; this command + # will wait for quorum in the cluster before + # returning. + # NOTE(fnordahl): Explicitly run `ceph-crate-keys` for older + # ceph releases too. This improves bootstrap + # resilience as the charm will wait for + # presence of peer units before attempting + # to bootstrap. Note that charms deploying + # ceph-mon service should disable running of + # `ceph-create-keys` service in init system. + cmd = ['ceph-create-keys', '--id', hostname] if cmp_pkgrevno('ceph', '12.0.0') >= 0: - # NOTE(jamespage): Later ceph releases require explicit - # call to ceph-create-keys to setup the - # admin keys for the cluster; this command - # will wait for quorum in the cluster before - # returning. # NOTE(fnordahl): The default timeout in ceph-create-keys of 600 - # seconds is not adequate for all situations. + # seconds is not adequate. Increase timeout when + # timeout parameter available. For older releases + # we rely on retry_on_exception decorator. # LP#1719436 - cmd = ['ceph-create-keys', '--id', hostname, '--timeout', '1800'] - subprocess.check_call(cmd) + cmd.extend(['--timeout', '1800']) + subprocess.check_call(cmd) _client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' osstat = os.stat(_client_admin_keyring) if not osstat.st_size: From a6184370a4ac76fac663bcd52111ac1155e5f8a0 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 9 May 2018 12:36:00 +0100 Subject: [PATCH 1481/2699] ceph-volume: Install charm specific udev rules Ensure that LV's created using the LVM layout implemented by this charm are correctly owned by the ceph user and group, ensuring that ceph-osd processes can start correctly at all times. Change-Id: I23ea51e3bffe7207f75782c5f34b796e9eed2c80 Closes-Bug: 1767087 --- ceph-osd/files/udev/95-charm-ceph-osd.rules | 11 +++++++++++ ceph-osd/hooks/ceph_hooks.py | 14 ++++++++++++++ ceph-osd/unit_tests/test_ceph_hooks.py | 12 ++++++++++++ 3 files changed, 37 insertions(+) create mode 100644 ceph-osd/files/udev/95-charm-ceph-osd.rules diff --git a/ceph-osd/files/udev/95-charm-ceph-osd.rules b/ceph-osd/files/udev/95-charm-ceph-osd.rules new file mode 100644 index 00000000..418cb976 --- /dev/null +++ b/ceph-osd/files/udev/95-charm-ceph-osd.rules @@ -0,0 +1,11 @@ +# OSD LV (ceph-osd charm layout) +ACTION=="add", SUBSYSTEM=="block", \ + ENV{DEVTYPE}=="disk", \ + ENV{DM_LV_NAME}=="osd-*", \ + ENV{DM_VG_NAME}=="ceph-*", \ + OWNER:="ceph", GROUP:="ceph", MODE:="660" +ACTION=="change", SUBSYSTEM=="block", \ + ENV{DEVTYPE}=="disk", \ + ENV{DM_LV_NAME}=="osd-*", \ + ENV{DM_VG_NAME}=="ceph-*", \ + OWNER="ceph", GROUP="ceph", MODE="660" diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index c984c26e..f5b13f3d 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -19,6 +19,7 @@ import sys import tempfile import socket +import subprocess import netifaces sys.path.append('lib') @@ -188,6 +189,17 @@ def install_apparmor_profile(): service_restart('ceph-osd-all') +def install_udev_rules(): + """ + Install and reload udev rules for ceph-volume LV + permissions + """ + for x in glob.glob('files/udev/*'): + shutil.copy(x, '/lib/udev/rules.d') + subprocess.check_call(['udevadm', 'control', + '--reload-rules']) + + @hooks.hook('install.real') @harden() def install(): @@ -196,6 +208,7 @@ def install(): apt_install(packages=ceph.determine_packages(), fatal=True) if config('autotune'): tune_network_adapters() + install_udev_rules() def az_info(): @@ -503,6 +516,7 @@ def upgrade_charm(): emit_cephconf() apt_install(packages=filter_installed_packages(ceph.determine_packages()), fatal=True) + install_udev_rules() @hooks.hook('nrpe-external-master-relation-joined', diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 3079df04..bc2cf132 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -505,3 +505,15 @@ def test_az_info_default_remap(self, environ, config, log): config.assert_called_with('availability_zone') environ.get.assert_called_with('JUJU_AVAILABILITY_ZONE') + + @patch.object(ceph_hooks, 'subprocess') + @patch.object(ceph_hooks, 'shutil') + def test_install_udev_rules(self, shutil, subprocess): + ceph_hooks.install_udev_rules() + shutil.copy.assert_called_once_with( + 'files/udev/95-charm-ceph-osd.rules', + '/lib/udev/rules.d' + ) + subprocess.check_call.assert_called_once_with( + ['udevadm', 'control', '--reload-rules'] + ) From a921a77edefef79c5d199d3a03786a3c76e257e8 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 9 May 2018 16:34:26 -0500 Subject: [PATCH 1482/2699] Remove deprecated functional test targets Change-Id: I916b7f6a57028c316834584191d03cbc7904c5cc --- ceph-mon/tests/gate-basic-trusty-kilo | 25 ------------------------ ceph-mon/tests/gate-basic-trusty-liberty | 25 ------------------------ ceph-mon/tests/gate-basic-xenial-newton | 25 ------------------------ 3 files changed, 75 deletions(-) delete mode 100755 ceph-mon/tests/gate-basic-trusty-kilo delete mode 100755 ceph-mon/tests/gate-basic-trusty-liberty delete mode 100755 ceph-mon/tests/gate-basic-xenial-newton diff --git a/ceph-mon/tests/gate-basic-trusty-kilo b/ceph-mon/tests/gate-basic-trusty-kilo deleted file mode 100755 index 86e772a7..00000000 --- a/ceph-mon/tests/gate-basic-trusty-kilo +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on trusty-kilo.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='trusty', - openstack='cloud:trusty-kilo', - source='cloud:trusty-updates/kilo') - deployment.run_tests() diff --git a/ceph-mon/tests/gate-basic-trusty-liberty b/ceph-mon/tests/gate-basic-trusty-liberty deleted file mode 100755 index 3dfa8b60..00000000 --- a/ceph-mon/tests/gate-basic-trusty-liberty +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on trusty-liberty.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='trusty', - openstack='cloud:trusty-liberty', - source='cloud:trusty-updates/liberty') - deployment.run_tests() diff --git a/ceph-mon/tests/gate-basic-xenial-newton b/ceph-mon/tests/gate-basic-xenial-newton deleted file mode 100755 index 69bf0a5c..00000000 --- a/ceph-mon/tests/gate-basic-xenial-newton +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on xenial-newton.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='xenial', - openstack='cloud:xenial-newton', - source='cloud:xenial-updates/newton') - deployment.run_tests() From 26030a7ae00303017c5c3be7a8c23ee8673bce3d Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 9 May 2018 16:34:34 -0500 Subject: [PATCH 1483/2699] Remove deprecated functional test targets Change-Id: I69643bd2d831d96ee31da4748efd1ed4b93e7f82 --- ceph-osd/tests/gate-basic-trusty-kilo | 25 ------------------------ ceph-osd/tests/gate-basic-trusty-liberty | 25 ------------------------ ceph-osd/tests/gate-basic-xenial-newton | 25 ------------------------ 3 files changed, 75 deletions(-) delete mode 100755 ceph-osd/tests/gate-basic-trusty-kilo delete mode 100755 ceph-osd/tests/gate-basic-trusty-liberty delete mode 100755 ceph-osd/tests/gate-basic-xenial-newton diff --git a/ceph-osd/tests/gate-basic-trusty-kilo b/ceph-osd/tests/gate-basic-trusty-kilo deleted file mode 100755 index 5d30a670..00000000 --- a/ceph-osd/tests/gate-basic-trusty-kilo +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-osd deployment on trusty-kilo.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='trusty', - openstack='cloud:trusty-kilo', - source='cloud:trusty-updates/kilo') - deployment.run_tests() diff --git a/ceph-osd/tests/gate-basic-trusty-liberty b/ceph-osd/tests/gate-basic-trusty-liberty deleted file mode 100755 index 41f1996a..00000000 --- a/ceph-osd/tests/gate-basic-trusty-liberty +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-osd deployment on trusty-liberty.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='trusty', - openstack='cloud:trusty-liberty', - source='cloud:trusty-updates/liberty') - deployment.run_tests() diff --git a/ceph-osd/tests/gate-basic-xenial-newton b/ceph-osd/tests/gate-basic-xenial-newton deleted file mode 100755 index 4dd60b96..00000000 --- a/ceph-osd/tests/gate-basic-xenial-newton +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-osd deployment on xenial-newton.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='xenial', - openstack='cloud:xenial-newton', - source='cloud:xenial-updates/newton') - deployment.run_tests() From e2276d63efc32ca8ab2b64febab9d6c5074afb01 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 9 May 2018 16:34:43 -0500 Subject: [PATCH 1484/2699] Remove deprecated functional test targets Change-Id: I0eb95295b251d320f91771a7a5c5882af26f8034 --- ceph-radosgw/tests/gate-basic-trusty-kilo | 25 -------------------- ceph-radosgw/tests/gate-basic-trusty-liberty | 25 -------------------- ceph-radosgw/tests/gate-basic-xenial-newton | 25 -------------------- 3 files changed, 75 deletions(-) delete mode 100755 ceph-radosgw/tests/gate-basic-trusty-kilo delete mode 100755 ceph-radosgw/tests/gate-basic-trusty-liberty delete mode 100755 ceph-radosgw/tests/gate-basic-xenial-newton diff --git a/ceph-radosgw/tests/gate-basic-trusty-kilo b/ceph-radosgw/tests/gate-basic-trusty-kilo deleted file mode 100755 index ebbad248..00000000 --- a/ceph-radosgw/tests/gate-basic-trusty-kilo +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on trusty-kilo.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='trusty', - openstack='cloud:trusty-kilo', - source='cloud:trusty-updates/kilo') - deployment.run_tests() diff --git a/ceph-radosgw/tests/gate-basic-trusty-liberty b/ceph-radosgw/tests/gate-basic-trusty-liberty deleted file mode 100755 index f5579f5f..00000000 --- a/ceph-radosgw/tests/gate-basic-trusty-liberty +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on trusty-liberty.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='trusty', - openstack='cloud:trusty-liberty', - source='cloud:trusty-updates/liberty') - deployment.run_tests() diff --git a/ceph-radosgw/tests/gate-basic-xenial-newton b/ceph-radosgw/tests/gate-basic-xenial-newton deleted file mode 100755 index f53a3391..00000000 --- a/ceph-radosgw/tests/gate-basic-xenial-newton +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on xenial-newton.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='xenial', - openstack='cloud:xenial-newton', - source='cloud:xenial-updates/newton') - deployment.run_tests() From cb68e3a10f984262bb3be67b820be73ebb803a9e Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 8 May 2018 11:52:30 -0700 Subject: [PATCH 1485/2699] Enable Bionic as a gate test Change bionic test from dev to gate for 18.05. Change-Id: Ice91cd32a8e1b82be80b29e67bdeec32cfa89254 --- .../charmhelpers/contrib/openstack/utils.py | 4 +- .../contrib/storage/linux/ceph.py | 43 +++- .../charmhelpers/contrib/storage/linux/lvm.py | 29 +++ .../contrib/storage/linux/utils.py | 16 ++ ceph-proxy/hooks/charmhelpers/core/hookenv.py | 138 ++++++++--- ceph-proxy/hooks/charmhelpers/core/host.py | 11 +- .../hooks/charmhelpers/core/services/base.py | 25 +- ceph-proxy/hooks/charmhelpers/core/sysctl.py | 18 +- .../hooks/charmhelpers/core/unitdata.py | 9 +- ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py | 1 + ceph-proxy/tests/basic_deployment.py | 2 +- .../contrib/openstack/amulet/deployment.py | 10 +- .../contrib/openstack/amulet/utils.py | 225 ++++++++++++++++-- ceph-proxy/tests/charmhelpers/core/hookenv.py | 138 ++++++++--- ceph-proxy/tests/charmhelpers/core/host.py | 11 +- .../tests/charmhelpers/core/services/base.py | 25 +- ceph-proxy/tests/charmhelpers/core/sysctl.py | 18 +- .../tests/charmhelpers/core/unitdata.py | 9 +- ceph-proxy/tox.ini | 2 +- 19 files changed, 594 insertions(+), 140 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index b753275d..6184abd0 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -182,7 +182,7 @@ ('pike', ['2.13.0', '2.15.0']), ('queens', - ['2.16.0']), + ['2.16.0', '2.17.0']), ]) # >= Liberty version->codename mapping @@ -306,7 +306,7 @@ def get_os_codename_install_source(src): if src.startswith('cloud:'): ca_rel = src.split(':')[1] - ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + ca_rel = ca_rel.split('-')[1].split('/')[0] return ca_rel # Best guess match based on deb string provided diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index e13e60a6..76828201 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -291,7 +291,7 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): class ReplicatedPool(Pool): def __init__(self, service, name, pg_num=None, replicas=2, - percent_data=10.0): + percent_data=10.0, app_name=None): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas if pg_num: @@ -301,6 +301,10 @@ def __init__(self, service, name, pg_num=None, replicas=2, self.pg_num = min(pg_num, max_pgs) else: self.pg_num = self.get_pgs(self.replicas, percent_data) + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' def create(self): if not pool_exists(self.service, self.name): @@ -313,6 +317,12 @@ def create(self): update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) except CalledProcessError: raise @@ -320,10 +330,14 @@ def create(self): # Default jerasure erasure coded pool class ErasurePool(Pool): def __init__(self, service, name, erasure_code_profile="default", - percent_data=10.0): + percent_data=10.0, app_name=None): super(ErasurePool, self).__init__(service=service, name=name) self.erasure_code_profile = erasure_code_profile self.percent_data = percent_data + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' def create(self): if not pool_exists(self.service, self.name): @@ -355,6 +369,12 @@ def create(self): 'erasure', self.erasure_code_profile] try: check_call(cmd) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) except CalledProcessError: raise @@ -778,6 +798,25 @@ def update_pool(client, pool, settings): check_call(cmd) +def set_app_name_for_pool(client, pool, name): + """ + Calls `osd pool application enable` for the specified pool name + + :param client: Name of the ceph client to use + :type client: str + :param pool: Pool to set app name for + :type pool: str + :param name: app name for the specified pool + :type name: str + + :raises: CalledProcessError if ceph call fails + """ + if ceph_version() >= '12.0.0': + cmd = ['ceph', '--id', client, 'osd', 'pool', + 'application', 'enable', pool, name] + check_call(cmd) + + def create_pool(service, name, replicas=3, pg_num=None): """Create a new RADOS pool.""" if pool_exists(service, name): diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py index 79a7a245..c8bde692 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -151,3 +151,32 @@ def extend_logical_volume_by_device(lv_name, block_device): ''' cmd = ['lvextend', lv_name, block_device] check_call(cmd) + + +def create_logical_volume(lv_name, volume_group, size=None): + ''' + Create a new logical volume in an existing volume group + + :param lv_name: str: name of logical volume to be created. + :param volume_group: str: Name of volume group to use for the new volume. + :param size: str: Size of logical volume to create (100% if not supplied) + :raises subprocess.CalledProcessError: in the event that the lvcreate fails. + ''' + if size: + check_call([ + 'lvcreate', + '--yes', + '-L', + '{}'.format(size), + '-n', lv_name, volume_group + ]) + # create the lv with all the space available, this is needed because the + # system call is different for LVM + else: + check_call([ + 'lvcreate', + '--yes', + '-l', + '100%FREE', + '-n', lv_name, volume_group + ]) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py index c9428894..6f846b05 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -67,3 +67,19 @@ def is_device_mounted(device): except Exception: return False return bool(re.search(r'MOUNTPOINT=".+"', out)) + + +def mkfs_xfs(device, force=False): + """Format device with XFS filesystem. + + By default this should fail if the device already has a filesystem on it. + :param device: Full path to device to format + :ptype device: tr + :param force: Force operation + :ptype: force: boolean""" + cmd = ['mkfs.xfs'] + if force: + cmd.append("-f") + + cmd += ['-i', 'size=1024', device] + check_call(cmd) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 7ed1cc4e..627d8f79 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -27,6 +27,7 @@ import os import json import yaml +import re import subprocess import sys import errno @@ -67,7 +68,7 @@ def unit_get(attribute): @wraps(func) def wrapper(*args, **kwargs): global cache - key = str((func, args, kwargs)) + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: @@ -289,7 +290,7 @@ def __init__(self, *args, **kw): self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): + if os.path.exists(self.path) and os.stat(self.path).st_size: self.load_previous() atexit(self._implicit_save) @@ -309,7 +310,11 @@ def load_previous(self, path=None): """ self.path = path or self.path with open(self.path) as f: - self._prev_dict = json.load(f) + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Unable to parse previous config data - {}'.format(str(e)), + level=ERROR) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -353,22 +358,40 @@ def _implicit_save(self): self.save() -@cached +_cache_config = None + + def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) if scope is not None: - return config_data - return Config(config_data) - except ValueError: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) return None @@ -1043,7 +1066,6 @@ def juju_version(): universal_newlines=True).strip() -@cached def has_juju_version(minimum_version): """Return True if the Juju version is at least the provided version""" return LooseVersion(juju_version()) >= LooseVersion(minimum_version) @@ -1103,6 +1125,8 @@ def _run_atexit(): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get_primary_address(binding): ''' + Deprecated since Juju 2.3; use network_get() + Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding @@ -1123,7 +1147,6 @@ def network_get_primary_address(binding): return response -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get(endpoint, relation_id=None): """ Retrieve the network details for a relation endpoint @@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None): :param endpoint: string. The name of a relation endpoint :param relation_id: int. The ID of the relation for the current context. :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if run on Juju < 2.1 + :raise: NotImplementedError if request not supported by the Juju version. """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + cmd = ['network-get', endpoint, '--format', 'yaml'] if relation_id: cmd.append('-r') cmd.append(relation_id) - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - # Early versions of Juju 2.0.x required the --primary-address argument. - # We catch that condition here and raise NotImplementedError since - # the requested semantics are not available - the caller can then - # use the network_get_primary_address() method instead. - if '--primary-address is currently required' in e.output.decode('UTF-8'): - raise NotImplementedError - raise + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() return yaml.safe_load(response) @@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name): def ingress_address(rid=None, unit=None): """ - Retrieve the ingress-address from a relation when available. Otherwise, - return the private-address. This function is to be used on the consuming - side of the relation. + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. Usage: addresses = [ingress_address(rid=u.rid, unit=u.unit) @@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None): settings = relation_get(rid=rid, unit=unit) return (settings.get('ingress-address') or settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index fd14d60f..322ab2ac 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path): return output -def modulo_distribution(modulo=3, wait=30): +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): """ Modulo distribution This helper uses the unit number, a modulo value and a constant wait time @@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30): @param modulo: int The modulo number creates the group distribution @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. @return: int Calculated time to wait for unit operation """ unit_number = int(local_unit().split('/')[1]) - return (unit_number % modulo) * wait + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time diff --git a/ceph-proxy/hooks/charmhelpers/core/services/base.py b/ceph-proxy/hooks/charmhelpers/core/services/base.py index ca9dc996..179ad4f0 100644 --- a/ceph-proxy/hooks/charmhelpers/core/services/base.py +++ b/ceph-proxy/hooks/charmhelpers/core/services/base.py @@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback): """ def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) - new_ports = service.get('ports', []) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) if os.path.exists(port_file): with open(port_file) as fp: old_ports = fp.read().split(',') for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) with open(port_file, 'w') as fp: fp.write(','.join(str(port) for port in new_ports)) for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' if event_name == 'start': - hookenv.open_port(port) + hookenv.open_port(port, protocol) elif event_name == 'stop': - hookenv.close_port(port) + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports def service_stop(service_name): diff --git a/ceph-proxy/hooks/charmhelpers/core/sysctl.py b/ceph-proxy/hooks/charmhelpers/core/sysctl.py index 6e413e31..1f188d8c 100644 --- a/ceph-proxy/hooks/charmhelpers/core/sysctl.py +++ b/ceph-proxy/hooks/charmhelpers/core/sysctl.py @@ -31,18 +31,22 @@ def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict with open(sysctl_file, "w") as fd: for key, value in sysctl_dict_parsed.items(): diff --git a/ceph-proxy/hooks/charmhelpers/core/unitdata.py b/ceph-proxy/hooks/charmhelpers/core/unitdata.py index 6d7b4942..ab554327 100644 --- a/ceph-proxy/hooks/charmhelpers/core/unitdata.py +++ b/ceph-proxy/hooks/charmhelpers/core/unitdata.py @@ -166,6 +166,10 @@ class Storage(object): To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. """ def __init__(self, path=None): self.db_path = path @@ -175,8 +179,9 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py index 910e96a6..653d58f1 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py @@ -44,6 +44,7 @@ 'x86_64': PROPOSED_POCKET, 'ppc64le': PROPOSED_PORTS_POCKET, 'aarch64': PROPOSED_PORTS_POCKET, + 's390x': PROPOSED_PORTS_POCKET, } CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index a1e96f12..1ea9a8d2 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -71,7 +71,7 @@ def _configure_services(self): # Include a non-existent device as osd-devices is a whitelist, # and this will catch cases where proposals attempt to change that. ceph_osd_config = { - 'osd-reformat': 'yes', + 'osd-reformat': True, 'ephemeral-unmount': '/mnt', 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' } diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 5afbbd87..66beeda2 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -21,6 +21,9 @@ from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -271,11 +274,8 @@ def _get_openstack_release(self): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, - self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike, self.xenial_queens, - self.bionic_queens,) = range(13) + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) releases = { ('trusty', None): self.trusty_icehouse, diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index d93cff3c..84e87f5d 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -50,6 +50,13 @@ NOVA_CLIENT_VERSION = "2" +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', + 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', 'xenial_queens', + 'bionic_queens'] + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -63,7 +70,34 @@ def __init__(self, log_level=ERROR): super(OpenStackAmuletUtils, self).__init__(log_level) def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): """Validate endpoint data. Validate actual endpoint data vs expected endpoint data. The ports @@ -141,7 +175,86 @@ def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' - def validate_svc_catalog_endpoint_data(self, expected, actual): + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. Validate a list of actual service catalog endpoints vs a list of @@ -328,7 +441,7 @@ def keystone_wait_for_propagation(self, sentry_relation_pairs, if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." - "".format(rel['api_version'], api_version)) + "".format(rel.get('api_version'), api_version)) def keystone_configure_api_version(self, sentry_relation_pairs, deployment, api_version): @@ -350,16 +463,13 @@ def keystone_configure_api_version(self, sentry_relation_pairs, deployment, deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant, api_version=2): + def authenticate_cinder_admin(self, keystone, api_version=2): """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - keystone_ip = keystone_sentry.info['public-address'] - ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) + self.log.debug('Authenticating cinder admin...') _clients = { 1: cinder_client.Client, 2: cinder_clientv2.Client} - return _clients[api_version](username, password, tenant, ept) + return _clients[api_version](session=keystone.session) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -367,13 +477,36 @@ def authenticate_keystone(self, keystone_ip, username, password, project_domain_name=None, project_name=None): """Authenticate with Keystone""" self.log.debug('Authenticating with keystone...') - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: auth = v2.Password( username=username, password=password, @@ -381,12 +514,7 @@ def authenticate_keystone(self, keystone_ip, username, password, auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client else: - ep = base_ep + "/v3" auth = v3.Password( user_domain_name=user_domain_name, username=username, @@ -397,10 +525,57 @@ def authenticate_keystone(self, keystone_ip, username, password, auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + api_version = 2 + client_class = keystone_client.Client + # 11 => xenial_queens + if openstack_release and openstack_release >= 11: + api_version = 3 + client_class = keystone_client_v3.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, diff --git a/ceph-proxy/tests/charmhelpers/core/hookenv.py b/ceph-proxy/tests/charmhelpers/core/hookenv.py index 7ed1cc4e..627d8f79 100644 --- a/ceph-proxy/tests/charmhelpers/core/hookenv.py +++ b/ceph-proxy/tests/charmhelpers/core/hookenv.py @@ -27,6 +27,7 @@ import os import json import yaml +import re import subprocess import sys import errno @@ -67,7 +68,7 @@ def unit_get(attribute): @wraps(func) def wrapper(*args, **kwargs): global cache - key = str((func, args, kwargs)) + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: @@ -289,7 +290,7 @@ def __init__(self, *args, **kw): self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): + if os.path.exists(self.path) and os.stat(self.path).st_size: self.load_previous() atexit(self._implicit_save) @@ -309,7 +310,11 @@ def load_previous(self, path=None): """ self.path = path or self.path with open(self.path) as f: - self._prev_dict = json.load(f) + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Unable to parse previous config data - {}'.format(str(e)), + level=ERROR) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -353,22 +358,40 @@ def _implicit_save(self): self.save() -@cached +_cache_config = None + + def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) if scope is not None: - return config_data - return Config(config_data) - except ValueError: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) return None @@ -1043,7 +1066,6 @@ def juju_version(): universal_newlines=True).strip() -@cached def has_juju_version(minimum_version): """Return True if the Juju version is at least the provided version""" return LooseVersion(juju_version()) >= LooseVersion(minimum_version) @@ -1103,6 +1125,8 @@ def _run_atexit(): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get_primary_address(binding): ''' + Deprecated since Juju 2.3; use network_get() + Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding @@ -1123,7 +1147,6 @@ def network_get_primary_address(binding): return response -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get(endpoint, relation_id=None): """ Retrieve the network details for a relation endpoint @@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None): :param endpoint: string. The name of a relation endpoint :param relation_id: int. The ID of the relation for the current context. :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if run on Juju < 2.1 + :raise: NotImplementedError if request not supported by the Juju version. """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + cmd = ['network-get', endpoint, '--format', 'yaml'] if relation_id: cmd.append('-r') cmd.append(relation_id) - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - # Early versions of Juju 2.0.x required the --primary-address argument. - # We catch that condition here and raise NotImplementedError since - # the requested semantics are not available - the caller can then - # use the network_get_primary_address() method instead. - if '--primary-address is currently required' in e.output.decode('UTF-8'): - raise NotImplementedError - raise + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() return yaml.safe_load(response) @@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name): def ingress_address(rid=None, unit=None): """ - Retrieve the ingress-address from a relation when available. Otherwise, - return the private-address. This function is to be used on the consuming - side of the relation. + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. Usage: addresses = [ingress_address(rid=u.rid, unit=u.unit) @@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None): settings = relation_get(rid=rid, unit=unit) return (settings.get('ingress-address') or settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen diff --git a/ceph-proxy/tests/charmhelpers/core/host.py b/ceph-proxy/tests/charmhelpers/core/host.py index fd14d60f..322ab2ac 100644 --- a/ceph-proxy/tests/charmhelpers/core/host.py +++ b/ceph-proxy/tests/charmhelpers/core/host.py @@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path): return output -def modulo_distribution(modulo=3, wait=30): +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): """ Modulo distribution This helper uses the unit number, a modulo value and a constant wait time @@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30): @param modulo: int The modulo number creates the group distribution @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. @return: int Calculated time to wait for unit operation """ unit_number = int(local_unit().split('/')[1]) - return (unit_number % modulo) * wait + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time diff --git a/ceph-proxy/tests/charmhelpers/core/services/base.py b/ceph-proxy/tests/charmhelpers/core/services/base.py index ca9dc996..179ad4f0 100644 --- a/ceph-proxy/tests/charmhelpers/core/services/base.py +++ b/ceph-proxy/tests/charmhelpers/core/services/base.py @@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback): """ def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) - new_ports = service.get('ports', []) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) if os.path.exists(port_file): with open(port_file) as fp: old_ports = fp.read().split(',') for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) with open(port_file, 'w') as fp: fp.write(','.join(str(port) for port in new_ports)) for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' if event_name == 'start': - hookenv.open_port(port) + hookenv.open_port(port, protocol) elif event_name == 'stop': - hookenv.close_port(port) + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports def service_stop(service_name): diff --git a/ceph-proxy/tests/charmhelpers/core/sysctl.py b/ceph-proxy/tests/charmhelpers/core/sysctl.py index 6e413e31..1f188d8c 100644 --- a/ceph-proxy/tests/charmhelpers/core/sysctl.py +++ b/ceph-proxy/tests/charmhelpers/core/sysctl.py @@ -31,18 +31,22 @@ def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict with open(sysctl_file, "w") as fd: for key, value in sysctl_dict_parsed.items(): diff --git a/ceph-proxy/tests/charmhelpers/core/unitdata.py b/ceph-proxy/tests/charmhelpers/core/unitdata.py index 6d7b4942..ab554327 100644 --- a/ceph-proxy/tests/charmhelpers/core/unitdata.py +++ b/ceph-proxy/tests/charmhelpers/core/unitdata.py @@ -166,6 +166,10 @@ class Storage(object): To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. """ def __init__(self, path=None): self.db_path = path @@ -175,8 +179,9 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 4f69e8ee..09ca045d 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -60,7 +60,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-queens --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy [testenv:func27-dfs] # Charm Functional Test From a791ab18bcf5233ff8375830a0d24b48835f9057 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 8 May 2018 11:52:03 -0700 Subject: [PATCH 1486/2699] Enable Bionic as a gate test Change bionic test from dev to gate for 18.05. Change-Id: Iaddaede55c19231dc23941015c3d8dd7347d583e --- ceph-fs/src/tests/basic_deployment.py | 2 +- .../tests/{dev-basic-bionic-queens => gate-basic-bionic-queens} | 0 ceph-fs/src/tox.ini | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename ceph-fs/src/tests/{dev-basic-bionic-queens => gate-basic-bionic-queens} (100%) diff --git a/ceph-fs/src/tests/basic_deployment.py b/ceph-fs/src/tests/basic_deployment.py index 5827ff6b..44fbc21c 100644 --- a/ceph-fs/src/tests/basic_deployment.py +++ b/ceph-fs/src/tests/basic_deployment.py @@ -94,7 +94,7 @@ def _configure_services(self, **kwargs): # Include a non-existent device as osd-devices is a whitelist, # and this will catch cases where proposals attempt to change that. ceph_osd_config = { - 'osd-reformat': 'yes', + 'osd-reformat': True, 'ephemeral-unmount': '/mnt', 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent', 'source': self.source, diff --git a/ceph-fs/src/tests/dev-basic-bionic-queens b/ceph-fs/src/tests/gate-basic-bionic-queens similarity index 100% rename from ceph-fs/src/tests/dev-basic-bionic-queens rename to ceph-fs/src/tests/gate-basic-bionic-queens diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index 6ca8ba23..628b3909 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -35,7 +35,7 @@ commands = # Run a specific test as an Amulet smoke test (expected to always pass) basepython = python2.7 commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy [testenv:func27-dfs] # Run all deploy-from-source tests which are +x (may not always pass!) From 7388ed001699d247a6f746e673a83e03d3c66a35 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 8 May 2018 11:52:21 -0700 Subject: [PATCH 1487/2699] Enable Bionic as a gate test Change bionic test from dev to gate for 18.05. Change-Id: Ifbe70a78f93ca0aaeb8491130ef9b6567c1a3d78 --- ceph-osd/tests/basic_deployment.py | 94 +++++++++++++++---- ...bionic-queens => gate-basic-bionic-queens} | 0 ceph-osd/tests/gate-basic-xenial-queens | 25 +++++ ceph-osd/tox.ini | 2 +- 4 files changed, 100 insertions(+), 21 deletions(-) rename ceph-osd/tests/{dev-basic-bionic-queens => gate-basic-bionic-queens} (100%) create mode 100755 ceph-osd/tests/gate-basic-xenial-queens diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 5dcaa318..2fcdf9b0 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -16,6 +16,11 @@ import amulet import time + +import keystoneclient +from keystoneclient.v3 import client as keystone_client_v3 +from novaclient import client as nova_client + from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment ) @@ -146,10 +151,9 @@ def _initialize_tests(self): self._get_openstack_release_string())) # Authenticate admin with keystone - self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, - user='admin', - password='openstack', - tenant='admin') + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) # Authenticate admin with cinder endpoint self.cinder = u.authenticate_cinder_admin(self.keystone) @@ -157,37 +161,87 @@ def _initialize_tests(self): self.glance = u.authenticate_glance_admin(self.keystone) # Authenticate admin with nova endpoint - self.nova = u.authenticate_nova_user(self.keystone, - user='admin', - password='openstack', - tenant='admin') + self.nova = nova_client.Client(2, session=self.keystone_session) + + keystone_ip = self.keystone_sentry.info['public-address'] # Create a demo tenant/role/user self.demo_tenant = 'demoTenant' self.demo_role = 'demoRole' self.demo_user = 'demoUser' + self.demo_project = 'demoProject' + self.demo_domain = 'demoDomain' + if self._get_openstack_release() >= self.xenial_queens: + self.create_users_v3() + self.demo_user_session, auth = u.get_keystone_session( + keystone_ip, + self.demo_user, + 'password', + api_version=3, + user_domain_name=self.demo_domain, + project_domain_name=self.demo_domain, + project_name=self.demo_project + ) + self.keystone_demo = keystone_client_v3.Client( + session=self.demo_user_session) + self.nova_demo = nova_client.Client( + 2, + session=self.demo_user_session) + else: + self.create_users_v2() + # Authenticate demo user with keystone + self.keystone_demo = \ + u.authenticate_keystone_user( + self.keystone, user=self.demo_user, + password='password', + tenant=self.demo_tenant) + # Authenticate demo user with nova-api + self.nova_demo = u.authenticate_nova_user(self.keystone, + user=self.demo_user, + password='password', + tenant=self.demo_tenant) + + def create_users_v3(self): + try: + self.keystone.projects.find(name=self.demo_project) + except keystoneclient.exceptions.NotFound: + domain = self.keystone.domains.create( + self.demo_domain, + description='Demo Domain', + enabled=True + ) + project = self.keystone.projects.create( + self.demo_project, + domain, + description='Demo Project', + enabled=True, + ) + user = self.keystone.users.create( + self.demo_user, + domain=domain.id, + project=self.demo_project, + password='password', + email='demov3@demo.com', + description='Demo', + enabled=True) + role = self.keystone.roles.find(name='Admin') + self.keystone.roles.grant( + role.id, + user=user.id, + project=project.id) + + def create_users_v2(self): if not u.tenant_exists(self.keystone, self.demo_tenant): tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, description='demo tenant', enabled=True) + self.keystone.roles.create(name=self.demo_role) self.keystone.users.create(name=self.demo_user, password='password', tenant_id=tenant.id, email='demo@demo.com') - # Authenticate demo user with keystone - self.keystone_demo = u.authenticate_keystone_user(self.keystone, - self.demo_user, - 'password', - self.demo_tenant) - - # Authenticate demo user with nova-api - self.nova_demo = u.authenticate_nova_user(self.keystone, - self.demo_user, - 'password', - self.demo_tenant) - def test_100_ceph_processes(self): """Verify that the expected service processes are running on each ceph unit.""" diff --git a/ceph-osd/tests/dev-basic-bionic-queens b/ceph-osd/tests/gate-basic-bionic-queens similarity index 100% rename from ceph-osd/tests/dev-basic-bionic-queens rename to ceph-osd/tests/gate-basic-bionic-queens diff --git a/ceph-osd/tests/gate-basic-xenial-queens b/ceph-osd/tests/gate-basic-xenial-queens new file mode 100755 index 00000000..61c7b06d --- /dev/null +++ b/ceph-osd/tests/gate-basic-xenial-queens @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph-osd deployment on xenial-queens.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='xenial', + openstack='cloud:xenial-queens', + source='cloud:xenial-updates/queens') + deployment.run_tests() diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index e5d01d8a..6c223662 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -68,7 +68,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy [testenv:func27-dfs] # Charm Functional Test From 359d8613c32116e4258418d1f97865b5f2065995 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 8 May 2018 11:52:12 -0700 Subject: [PATCH 1488/2699] Enable Bionic as a gate test Change bionic test from dev to gate for 18.05. Change-Id: Iaff111e3ac3802481448ac95b936cf043a441fc0 --- .../charmhelpers/contrib/openstack/utils.py | 2 +- .../contrib/openstack/vaultlocker.py | 78 +++++++++++---- .../contrib/storage/linux/utils.py | 16 ++++ ceph-mon/hooks/charmhelpers/core/hookenv.py | 8 +- ceph-mon/hooks/charmhelpers/core/sysctl.py | 18 ++-- ceph-mon/tests/basic_deployment.py | 94 +++++++++++++++---- ceph-mon/tests/charmhelpers/core/hookenv.py | 8 +- ceph-mon/tests/charmhelpers/core/sysctl.py | 18 ++-- ...bionic-queens => gate-basic-bionic-queens} | 0 ceph-mon/tests/gate-basic-xenial-queens | 25 +++++ ceph-mon/tox.ini | 2 +- 11 files changed, 211 insertions(+), 58 deletions(-) rename ceph-mon/tests/{dev-basic-bionic-queens => gate-basic-bionic-queens} (100%) create mode 100755 ceph-mon/tests/gate-basic-xenial-queens diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index e7194264..6184abd0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -306,7 +306,7 @@ def get_os_codename_install_source(src): if src.startswith('cloud:'): ca_rel = src.split(':')[1] - ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + ca_rel = ca_rel.split('-')[1].split('/')[0] return ca_rel # Best guess match based on deb string provided diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py index 0b78e7a4..a8e4bf88 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -21,6 +21,9 @@ import charmhelpers.core.hookenv as hookenv import charmhelpers.core.host as host import charmhelpers.core.templating as templating +import charmhelpers.core.unitdata as unitdata + +VAULTLOCKER_BACKEND = 'charm-vaultlocker' class VaultKVContext(context.OSContextGenerator): @@ -34,30 +37,39 @@ def __init__(self, secret_backend=None): ) def __call__(self): + db = unitdata.kv() + last_token = db.get('last-token') + secret_id = db.get('secret-id') for relation_id in hookenv.relation_ids(self.interfaces[0]): for unit in hookenv.related_units(relation_id): - vault_url = hookenv.relation_get( - 'vault_url', - unit=unit, - rid=relation_id - ) - role_id = hookenv.relation_get( - '{}_role_id'.format(hookenv.local_unit()), - unit=unit, - rid=relation_id - ) - - if vault_url and role_id: + data = hookenv.relation_get(unit=unit, + rid=relation_id) + vault_url = data.get('vault_url') + role_id = data.get('{}_role_id'.format(hookenv.local_unit())) + token = data.get('{}_token'.format(hookenv.local_unit())) + + if all([vault_url, role_id, token]): + token = json.loads(token) + vault_url = json.loads(vault_url) + + # Tokens may change when secret_id's are being + # reissued - if so use token to get new secret_id + if token != last_token: + secret_id = retrieve_secret_id( + url=vault_url, + token=token + ) + db.set('secret-id', secret_id) + db.set('last-token', token) + db.flush() + ctxt = { - 'vault_url': json.loads(vault_url), + 'vault_url': vault_url, 'role_id': json.loads(role_id), + 'secret_id': secret_id, 'secret_backend': self.secret_backend, } - vault_ca = hookenv.relation_get( - 'vault_ca', - unit=unit, - rid=relation_id - ) + vault_ca = data.get('vault_ca') if vault_ca: ctxt['vault_ca'] = json.loads(vault_ca) self.complete = True @@ -82,3 +94,33 @@ def write_vaultlocker_conf(context, priority=100): alternatives.install_alternative('vaultlocker.conf', '/etc/vaultlocker/vaultlocker.conf', charm_vl_path, priority) + + +def vault_relation_complete(backend=None): + """Determine whether vault relation is complete + + :param backend: Name of secrets backend requested + :ptype backend: string + :returns: whether the relation to vault is complete + :rtype: bool""" + vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) + vault_kv() + return vault_kv.complete + + +# TODO: contrib a high level unwrap method to hvac that works +def retrieve_secret_id(url, token): + """Retrieve a response-wrapped secret_id from Vault + + :param url: URL to Vault Server + :ptype url: str + :param token: One shot Token to use + :ptype token: str + :returns: secret_id to use for Vault Access + :rtype: str""" + import hvac + client = hvac.Client(url=url, token=token) + response = client._post('/v1/sys/wrapping/unwrap') + if response.status_code == 200: + data = response.json() + return data['data']['secret_id'] diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index c9428894..6f846b05 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -67,3 +67,19 @@ def is_device_mounted(device): except Exception: return False return bool(re.search(r'MOUNTPOINT=".+"', out)) + + +def mkfs_xfs(device, force=False): + """Format device with XFS filesystem. + + By default this should fail if the device already has a filesystem on it. + :param device: Full path to device to format + :ptype device: tr + :param force: Force operation + :ptype: force: boolean""" + cmd = ['mkfs.xfs'] + if force: + cmd.append("-f") + + cmd += ['-i', 'size=1024', device] + check_call(cmd) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index e9df1509..627d8f79 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -290,7 +290,7 @@ def __init__(self, *args, **kw): self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): + if os.path.exists(self.path) and os.stat(self.path).st_size: self.load_previous() atexit(self._implicit_save) @@ -310,7 +310,11 @@ def load_previous(self, path=None): """ self.path = path or self.path with open(self.path) as f: - self._prev_dict = json.load(f) + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Unable to parse previous config data - {}'.format(str(e)), + level=ERROR) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v diff --git a/ceph-mon/hooks/charmhelpers/core/sysctl.py b/ceph-mon/hooks/charmhelpers/core/sysctl.py index 6e413e31..1f188d8c 100644 --- a/ceph-mon/hooks/charmhelpers/core/sysctl.py +++ b/ceph-mon/hooks/charmhelpers/core/sysctl.py @@ -31,18 +31,22 @@ def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict with open(sysctl_file, "w") as fd: for key, value in sysctl_dict_parsed.items(): diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 5929f8c3..3d7d4d67 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -19,6 +19,10 @@ import time import json +import keystoneclient +from keystoneclient.v3 import client as keystone_client_v3 +from novaclient import client as nova_client + from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment ) @@ -150,47 +154,97 @@ def _initialize_tests(self): self._get_openstack_release_string())) # Authenticate admin with keystone - self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, - user='admin', - password='openstack', - tenant='admin') + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + # Authenticate admin with cinder endpoint self.cinder = u.authenticate_cinder_admin(self.keystone) # Authenticate admin with glance endpoint self.glance = u.authenticate_glance_admin(self.keystone) # Authenticate admin with nova endpoint - self.nova = u.authenticate_nova_user(self.keystone, - user='admin', - password='openstack', - tenant='admin') + self.nova = nova_client.Client(2, session=self.keystone_session) + + keystone_ip = self.keystone_sentry.info['public-address'] # Create a demo tenant/role/user self.demo_tenant = 'demoTenant' self.demo_role = 'demoRole' self.demo_user = 'demoUser' + self.demo_project = 'demoProject' + self.demo_domain = 'demoDomain' + if self._get_openstack_release() >= self.xenial_queens: + self.create_users_v3() + self.demo_user_session, auth = u.get_keystone_session( + keystone_ip, + self.demo_user, + 'password', + api_version=3, + user_domain_name=self.demo_domain, + project_domain_name=self.demo_domain, + project_name=self.demo_project + ) + self.keystone_demo = keystone_client_v3.Client( + session=self.demo_user_session) + self.nova_demo = nova_client.Client( + 2, + session=self.demo_user_session) + else: + self.create_users_v2() + # Authenticate demo user with keystone + self.keystone_demo = \ + u.authenticate_keystone_user( + self.keystone, user=self.demo_user, + password='password', + tenant=self.demo_tenant) + # Authenticate demo user with nova-api + self.nova_demo = u.authenticate_nova_user(self.keystone, + user=self.demo_user, + password='password', + tenant=self.demo_tenant) + + def create_users_v3(self): + try: + self.keystone.projects.find(name=self.demo_project) + except keystoneclient.exceptions.NotFound: + domain = self.keystone.domains.create( + self.demo_domain, + description='Demo Domain', + enabled=True + ) + project = self.keystone.projects.create( + self.demo_project, + domain, + description='Demo Project', + enabled=True, + ) + user = self.keystone.users.create( + self.demo_user, + domain=domain.id, + project=self.demo_project, + password='password', + email='demov3@demo.com', + description='Demo', + enabled=True) + role = self.keystone.roles.find(name='Admin') + self.keystone.roles.grant( + role.id, + user=user.id, + project=project.id) + + def create_users_v2(self): if not u.tenant_exists(self.keystone, self.demo_tenant): tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, description='demo tenant', enabled=True) + self.keystone.roles.create(name=self.demo_role) self.keystone.users.create(name=self.demo_user, password='password', tenant_id=tenant.id, email='demo@demo.com') - # Authenticate demo user with keystone - self.keystone_demo = u.authenticate_keystone_user(self.keystone, - self.demo_user, - 'password', - self.demo_tenant) - - # Authenticate demo user with nova-api - self.nova_demo = u.authenticate_nova_user(self.keystone, - self.demo_user, - 'password', - self.demo_tenant) - def test_100_ceph_processes(self): """Verify that the expected service processes are running on each ceph unit.""" diff --git a/ceph-mon/tests/charmhelpers/core/hookenv.py b/ceph-mon/tests/charmhelpers/core/hookenv.py index e9df1509..627d8f79 100644 --- a/ceph-mon/tests/charmhelpers/core/hookenv.py +++ b/ceph-mon/tests/charmhelpers/core/hookenv.py @@ -290,7 +290,7 @@ def __init__(self, *args, **kw): self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): + if os.path.exists(self.path) and os.stat(self.path).st_size: self.load_previous() atexit(self._implicit_save) @@ -310,7 +310,11 @@ def load_previous(self, path=None): """ self.path = path or self.path with open(self.path) as f: - self._prev_dict = json.load(f) + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Unable to parse previous config data - {}'.format(str(e)), + level=ERROR) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v diff --git a/ceph-mon/tests/charmhelpers/core/sysctl.py b/ceph-mon/tests/charmhelpers/core/sysctl.py index 6e413e31..1f188d8c 100644 --- a/ceph-mon/tests/charmhelpers/core/sysctl.py +++ b/ceph-mon/tests/charmhelpers/core/sysctl.py @@ -31,18 +31,22 @@ def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict with open(sysctl_file, "w") as fd: for key, value in sysctl_dict_parsed.items(): diff --git a/ceph-mon/tests/dev-basic-bionic-queens b/ceph-mon/tests/gate-basic-bionic-queens similarity index 100% rename from ceph-mon/tests/dev-basic-bionic-queens rename to ceph-mon/tests/gate-basic-bionic-queens diff --git a/ceph-mon/tests/gate-basic-xenial-queens b/ceph-mon/tests/gate-basic-xenial-queens new file mode 100755 index 00000000..5fa16a57 --- /dev/null +++ b/ceph-mon/tests/gate-basic-xenial-queens @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on xenial-queens.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='xenial', + openstack='cloud:xenial-queens', + source='cloud:xenial-updates/queens') + deployment.run_tests() diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 2bbbc97d..9c21d57e 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -65,7 +65,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy [testenv:func27-dfs] # Charm Functional Test From 29922da379e5cd30d06e99613e29fc2face24d95 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 9 Apr 2018 12:32:18 +0100 Subject: [PATCH 1489/2699] Add support for vault key management with vaultlocker vaultlocker provides support for storage of encryption keys for LUKS based dm-crypt device in Hashicorp Vault. Add support for this key management approach for Ceph Luminous or later. Applications will block until vault has been initialized and unsealed at which point OSD devices will be prepared and booted into the Ceph cluster. The dm-crypt layer is placed between the block device parition and the top level LVM PV used to create VG's and LV's to support OSD operation. Vaultlocker enables a systemd unit for each encrypted block device to perform unlocking during reboots of the unit; ceph-volume will then detect the new VG/LV's and boot the ceph-osd processes as required. Note that vault/vaultlocker usage is only supported with ceph-volume, which was introduced into the Ubuntu packages as of the 12.2.4 point release for Luminous. If vault is configured as the key manager in deployments using older versions, a hook error will be thrown with a blocked status message to this effect. Change-Id: I713492d1fd8d371439e96f9eae824b4fe7260e47 Depends-On: If73e7bd518a7bc60c2db08e2aa3a93dcfe79c0dd Depends-On: https://github.com/juju/charm-helpers/pull/159 --- ceph-osd/.gitignore | 2 + ceph-osd/README.md | 46 +- ceph-osd/charm-helpers-hooks.yaml | 2 +- ceph-osd/config.yaml | 13 +- ceph-osd/hooks/ceph_hooks.py | 83 +- .../contrib/openstack/amulet/__init__.py | 13 + .../contrib/openstack/amulet/deployment.py | 354 ++++ .../contrib/openstack/amulet/utils.py | 1515 +++++++++++++++++ .../contrib/openstack/files/__init__.py | 16 + .../contrib/openstack/ha/__init__.py | 13 + .../contrib/openstack/ha/utils.py | 265 +++ .../contrib/openstack/keystone.py | 178 ++ .../contrib/openstack/templates/__init__.py | 16 + .../contrib/openstack/templating.py | 379 +++++ .../contrib/openstack/vaultlocker.py | 126 ++ .../hooks/secrets-storage-relation-broken | 1 + .../hooks/secrets-storage-relation-changed | 1 + .../hooks/secrets-storage-relation-departed | 1 + .../hooks/secrets-storage-relation-joined | 1 + ceph-osd/lib/ceph/utils.py | 26 +- ceph-osd/metadata.yaml | 2 + ceph-osd/templates/vaultlocker.conf.j2 | 6 + .../contrib/openstack/amulet/utils.py | 2 + ceph-osd/unit_tests/test_ceph_hooks.py | 79 + ceph-osd/unit_tests/test_status.py | 39 +- 25 files changed, 3157 insertions(+), 22 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/files/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/ha/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/keystone.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/templates/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/templating.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py create mode 120000 ceph-osd/hooks/secrets-storage-relation-broken create mode 120000 ceph-osd/hooks/secrets-storage-relation-changed create mode 120000 ceph-osd/hooks/secrets-storage-relation-departed create mode 120000 ceph-osd/hooks/secrets-storage-relation-joined create mode 100644 ceph-osd/templates/vaultlocker.conf.j2 diff --git a/ceph-osd/.gitignore b/ceph-osd/.gitignore index c3fd0b63..53bc7bb1 100644 --- a/ceph-osd/.gitignore +++ b/ceph-osd/.gitignore @@ -9,3 +9,5 @@ bin .unit-state.db .idea func-results.json +*__pycache__ +.settings diff --git a/ceph-osd/README.md b/ceph-osd/README.md index b0271061..edc26e05 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -10,32 +10,32 @@ available in a Ceph cluster. Usage ===== - + The charm also supports specification of the storage devices to use in the ceph cluster:: osd-devices: A list of devices that the charm will attempt to detect, initialise and activate as ceph storage. - + This this can be a superset of the actual storage devices presented to each service unit and can be changed post ceph-osd deployment using `juju set`. -For example:: +For example:: ceph-osd: osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde - + Boot things up by using:: juju deploy -n 3 --config ceph.yaml ceph - + You can then deploy this charm by simple doing:: juju deploy -n 10 --config ceph.yaml ceph-osd juju add-relation ceph-osd ceph - + Once the ceph charm has bootstrapped the cluster, it will notify the ceph-osd charm which will scan for the configured storage devices and add them to the pool of available storage. @@ -80,9 +80,39 @@ The AppArmor profile(s) which are generated by the charm should NOT yet be used - With Bluestore enabled. +Block Device Encryption +======================= + +The ceph-osd charm supports encryption of underlying block devices supporting OSD's. + +To use the 'native' key management approach (where dm-crypt keys are stored in the +ceph-mon cluster), simply set the 'osd-encrypt' configuration option:: + + ceph-osd: + options: + osd-encrypt: True + +**NOTE:** This is supported for Ceph Jewel or later. + +Alternatively, encryption keys can be stored in Vault; this requires deployment of +the vault charm (and associated initialization of vault - see the Vault charm for +details) and configuration of the 'osd-encrypt' and 'osd-encrypt-keymanager' +options:: + + ceph-osd: + options: + osd-encrypt: True + osd-encrypt-keymanager: vault + +**NOTE:** This option is only supported with Ceph Luminous or later. + +**NOTE:** Changing these options post deployment will only take effect for any +new block devices added to the ceph-osd application; existing OSD devices will +not be encrypted. + Contact Information =================== Author: James Page -Report bugs at: http://bugs.launchpad.net/charms/+source/ceph-osd/+filebug -Location: http://jujucharms.com/charms/ceph-osd +Report bugs at: http://bugs.launchpad.net/charm-ceph-osd/+filebug +Location: http://jujucharms.com/ceph-osd diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index ab7e3bad..b7fd6950 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -10,7 +10,7 @@ include: - cluster - contrib.python.packages - contrib.storage.linux - - contrib.openstack.alternatives + - contrib.openstack - contrib.network.ip - contrib.openstack: - alternatives diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index b15c261f..88bf5776 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -148,6 +148,17 @@ options: . Specifying this option on a running Ceph OSD node will have no effect until new disks are added, at which point new disks will be encrypted. + osd-encrypt-keymanager: + type: string + default: ceph + description: | + Keymanager to use for storage of dm-crypt keys used for OSD devices; + by default 'ceph' itself will be used for storage of keys, making use + of the key/value storage provided by the ceph-mon cluster. + . + Alternatively 'vault' may be used for storage of dm-crypt keys. Both + approaches ensure that keys are never written to the local filesystem. + This also requires a relation to the vault charm. crush-initial-weight: type: float default: @@ -248,7 +259,7 @@ options: Availability Zone instead of specifically by host. availability_zone: type: string - default: + default: description: | Custom availability zone to provide to Ceph for the OSD placement max-sectors-kb: diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index f5b13f3d..d0681189 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -13,6 +13,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import base64 +import json import glob import os import shutil @@ -34,6 +37,7 @@ relation_ids, related_units, relation_get, + relation_set, Hooks, UnregisteredHookError, service_name, @@ -49,7 +53,8 @@ service_reload, service_restart, add_to_updatedb_prunepath, - restart_on_change + restart_on_change, + write_file, ) from charmhelpers.fetch import ( add_source, @@ -59,7 +64,9 @@ get_upstream_version, ) from charmhelpers.core.sysctl import create as create_sysctl -from charmhelpers.contrib.openstack.context import AppArmorContext +from charmhelpers.contrib.openstack.context import ( + AppArmorContext, +) from utils import ( get_host_ip, get_networks, @@ -74,12 +81,15 @@ from charmhelpers.contrib.network.ip import ( get_ipv6_addr, format_ipv6_addr, + get_relation_ip, ) from charmhelpers.contrib.storage.linux.ceph import ( CephConfContext) from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden +import charmhelpers.contrib.openstack.vaultlocker as vaultlocker + hooks = Hooks() STORAGE_MOUNT_PATH = '/var/lib/ceph' @@ -170,6 +180,23 @@ def __call__(self): return self.ctxt +def use_vaultlocker(): + """Determine whether vaultlocker should be used for OSD encryption + + :returns: whether vaultlocker should be used for key management + :rtype: bool + :raises: ValueError if vaultlocker is enable but ceph < 12.2.4""" + if (config('osd-encrypt') and + config('osd-encrypt-keymanager') == ceph.VAULT_KEY_MANAGER): + if cmp_pkgrevno('ceph', '12.2.4') < 0: + msg = ('vault usage only supported with ceph >= 12.2.4') + status_set('blocked', msg) + raise ValueError(msg) + else: + return True + return False + + def install_apparmor_profile(): """ Install ceph apparmor profiles and configure @@ -365,6 +392,14 @@ def check_overlap(journaldevs, datadevs): @hooks.hook('config-changed') @harden() def config_changed(): + # Determine whether vaultlocker is required and install + if use_vaultlocker(): + installed = len(filter_installed_packages(['vaultlocker'])) == 0 + if not installed: + add_source('ppa:openstack-charmers/vaultlocker') + apt_update(fatal=True) + apt_install('vaultlocker', fatal=True) + # Check if an upgrade was requested check_for_upgrade() @@ -390,6 +425,18 @@ def config_changed(): @hooks.hook('storage.real') def prepare_disks_and_activate(): + # NOTE: vault/vaultlocker preflight check + vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND) + context = vault_kv() + if use_vaultlocker() and not vault_kv.complete: + log('Deferring OSD preparation as vault not ready', + level=DEBUG) + return + elif use_vaultlocker() and vault_kv.complete: + log('Vault ready, writing vaultlocker configuration', + level=DEBUG) + vaultlocker.write_vaultlocker_conf(context) + osd_journal = get_journal_devices() check_overlap(osd_journal, set(get_devices())) log("got journal devs: {}".format(osd_journal), level=DEBUG) @@ -407,7 +454,8 @@ def prepare_disks_and_activate(): osd_journal, config('osd-reformat'), config('ignore-device-errors'), config('osd-encrypt'), - config('bluestore')) + config('bluestore'), + config('osd-encrypt-keymanager')) # Make it fast! if config('autotune'): ceph.tune_dev(dev) @@ -536,6 +584,26 @@ def update_nrpe_config(): nrpe_setup.write() +@hooks.hook('secrets-storage-relation-joined') +def secrets_storage_joined(relation_id=None): + relation_set(relation_id=relation_id, + secret_backend='charm-vaultlocker', + isolated=True, + access_address=get_relation_ip('secrets-storage'), + hostname=socket.gethostname()) + + +@hooks.hook('secrets-storage-relation-changed') +def secrets_storage_changed(): + vault_ca = relation_get('vault_ca') + if vault_ca: + vault_ca = base64.decodestring(json.loads(vault_ca).encode()) + write_file('/usr/local/share/ca-certificates/vault-ca.crt', + vault_ca, perms=0o644) + subprocess.check_call(['update-ca-certificates', '--fresh']) + prepare_disks_and_activate() + + VERSION_PACKAGE = 'ceph-common' @@ -559,6 +627,15 @@ def assess_status(): status_set('waiting', 'Incomplete relation: monitor') return + # Check for vault + if use_vaultlocker(): + if not relation_ids('secrets-storage'): + status_set('blocked', 'Missing relation: vault') + return + if not vaultlocker.vault_relation_complete(): + status_set('waiting', 'Incomplete relation: vault') + return + # Check for OSD device creation parity i.e. at least some devices # must have been presented and used for this charm to be operational running_osds = ceph.get_running_osds() diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 00000000..66beeda2 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,354 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import re +import sys +import six +from collections import OrderedDict +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') + self.openstack = openstack + self.source = source + self.stable = stable + + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + + self.log.info('OpenStackAmuletDeployment: determine branch locations') + + # Charms outside the ~openstack-charmers + base_charms = { + 'mysql': ['trusty'], + 'mongodb': ['trusty'], + 'nrpe': ['trusty', 'xenial'], + } + + for svc in other_services: + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if svc['name'] in base_charms: + # NOTE: not all charms have support for all series we + # want/need to test against, so fix to most recent + # that each base charm supports + target_series = self.series + if self.series not in base_charms[svc['name']]: + target_series = base_charms[svc['name']][-1] + svc['location'] = 'cs:{}/{}'.format(target_series, + svc['name']) + elif self.stable: + svc['location'] = 'cs:{}/{}'.format(self.series, + svc['name']) + else: + svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( + self.series, + svc['name'] + ) + + return other_services + + def _add_services(self, this_service, other_services, use_source=None, + no_origin=None): + """Add services to the deployment and optionally set + openstack-origin/source. + + :param this_service dict: Service dictionary describing the service + whose amulet tests are being run + :param other_services dict: List of service dictionaries describing + the services needed to support the target + service + :param use_source list: List of services which use the 'source' config + option rather than 'openstack-origin' + :param no_origin list: List of services which do not support setting + the Cloud Archive. + Service Dict: + { + 'name': str charm-name, + 'units': int number of units, + 'constraints': dict of juju constraints, + 'location': str location of charm, + } + eg + this_service = { + 'name': 'openvswitch-odl', + 'constraints': {'mem': '8G'}, + } + other_services = [ + { + 'name': 'nova-compute', + 'units': 2, + 'constraints': {'mem': '4G'}, + 'location': cs:~bob/xenial/nova-compute + }, + { + 'name': 'mysql', + 'constraints': {'mem': '2G'}, + }, + {'neutron-api-odl'}] + use_source = ['mysql'] + no_origin = ['neutron-api-odl'] + """ + self.log.info('OpenStackAmuletDeployment: adding services') + + other_services = self._determine_branch_locations(other_services) + + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + + services = other_services + services.append(this_service) + + use_source = use_source or [] + no_origin = no_origin or [] + + # Charms which should use the source config option + use_source = list(set( + use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', + 'ceph-proxy', 'percona-cluster', 'lxd'])) + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = list(set( + no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', + 'nrpe', 'openvswitch-odl', 'neutron-api-odl', + 'odl-controller', 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'])) + + if self.openstack: + for svc in services: + if svc['name'] not in use_source + no_origin: + config = {'openstack-origin': self.openstack} + self.d.configure(svc['name'], config) + + if self.source: + for svc in services: + if svc['name'] in use_source and svc['name'] not in no_origin: + config = {'source': self.source} + self.d.configure(svc['name'], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=None): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + if not timeout: + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) + self.log.info('Waiting for extended status on units for {}s...' + ''.format(timeout)) + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + + # Check for idleness + self.d.sentry.wait(timeout=timeout) + # Check for error states and bail early + self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) + # Check for ready messages + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + + self.log.info('OK') + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + # Must be ordered by OpenStack release (not by Ubuntu release): + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) + + releases = { + ('trusty', None): self.trusty_icehouse, + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, + ('xenial', None): self.xenial_mitaka, + ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, + ('xenial', 'cloud:xenial-pike'): self.xenial_pike, + ('xenial', 'cloud:xenial-queens'): self.xenial_queens, + ('yakkety', None): self.yakkety_newton, + ('zesty', None): self.zesty_ocata, + ('artful', None): self.artful_pike, + ('bionic', None): self.bionic_queens, + } + return releases[(self.series, self.openstack)] + + def _get_openstack_release_string(self): + """Get openstack release string. + + Return a string representing the openstack release. + """ + releases = OrderedDict([ + ('trusty', 'icehouse'), + ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zesty', 'ocata'), + ('artful', 'pike'), + ('bionic', 'queens'), + ]) + if self.openstack: + os_origin = self.openstack.split(':')[1] + return os_origin.split('%s-' % self.series)[1].split('/')[0] + else: + return releases[self.series] + + def get_ceph_expected_pools(self, radosgw=False): + """Return a list of expected ceph pools in a ceph + cinder + glance + test scenario, based on OpenStack release and whether ceph radosgw + is flagged as present or not.""" + + if self._get_openstack_release() == self.trusty_icehouse: + # Icehouse + pools = [ + 'data', + 'metadata', + 'rbd', + 'cinder-ceph', + 'glance' + ] + elif (self.trusty_kilo <= self._get_openstack_release() <= + self.zesty_ocata): + # Kilo through Ocata + pools = [ + 'rbd', + 'cinder-ceph', + 'glance' + ] + else: + # Pike and later + pools = [ + 'cinder-ceph', + 'glance' + ] + + if radosgw: + pools.extend([ + '.rgw.root', + '.rgw.control', + '.rgw', + '.rgw.gc', + '.users.uid' + ]) + + return pools diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 00000000..ef785423 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,1515 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import amulet +import json +import logging +import os +import re +import six +import time +import urllib +import urlparse + +import cinderclient.v1.client as cinder_client +import cinderclient.v2.client as cinder_clientv2 +import glanceclient.v1.client as glance_client +import heatclient.v1.client as heat_client +from keystoneclient.v2_0 import client as keystone_client +from keystoneauth1.identity import ( + v3, + v2, +) +from keystoneauth1 import session as keystone_session +from keystoneclient.v3 import client as keystone_client_v3 +from novaclient import exceptions + +import novaclient.client as nova_client +import novaclient +import pika +import swiftclient + +from charmhelpers.core.decorators import retry_on_exception +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) +from charmhelpers.core.host import CompareHostReleases + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + +NOVA_CLIENT_VERSION = "2" + +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', + 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', 'xenial_queens', + 'bionic_queens'] + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charm tests. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected, expected_num_eps=3): + """Validate keystone v3 endpoint data. + + Validate the v3 endpoint data which has changed from v2. The + ports are used to find the matching endpoint. + + The new v3 endpoint data looks like: + + ['}, + region=RegionOne, + region_id=RegionOne, + service_id=17f842a0dc084b928e476fafe67e4095, + url=http://10.5.6.5:9312>, + '}, + region=RegionOne, + region_id=RegionOne, + service_id=72fc8736fb41435e8b3584205bb2cfa3, + url=http://10.5.6.6:35357/v3>, + ... ] + """ + self.log.debug('Validating v3 endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = [] + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if ((admin_port in ep.url and ep.interface == 'admin') or + (internal_port in ep.url and ep.interface == 'internal') or + (public_port in ep.url and ep.interface == 'public')): + found.append(ep.interface) + # note we ignore the links member. + actual = {'id': ep.id, + 'region': ep.region, + 'region_id': ep.region_id, + 'interface': self.not_null, + 'url': ep.url, + 'service_id': ep.service_id, } + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if len(found) != expected_num_eps: + return 'Unexpected number of endpoints found' + + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('Validating service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_v3_svc_catalog_endpoint_data(self, expected, actual): + """Validate the keystone v3 catalog endpoint data. + + Validate a list of dictinaries that make up the keystone v3 service + catalogue. + + It is in the form of: + + + {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:35357/v3'}, + {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}, + {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}], + u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'f629388955bc407f8b11d8b7ca168086', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9312'}]} + + Note, that an added complication is that the order of admin, public, + internal against 'interface' in each region. + + Thus, the function sorts the expected and actual lists using the + interface key as a sort key, prior to the comparison. + """ + self.log.debug('Validating v3 service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + l_expected = sorted(v, key=lambda x: x['interface']) + l_actual = sorted(actual[k], key=lambda x: x['interface']) + if len(l_actual) != len(l_expected): + return ("endpoint {} has differing number of interfaces " + " - expected({}), actual({})" + .format(k, len(l_expected), len(l_actual))) + for i_expected, i_actual in zip(l_expected, l_actual): + self.log.debug("checking interface {}" + .format(i_expected['interface'])) + ret = self._validate_dict_data(i_expected, i_actual) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('Validating tenant data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('Validating role data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual, api_version=None): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('Validating user data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + if e['name'] == act.name: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'id': act.id} + if api_version == 3: + a['default_project_id'] = getattr(act, + 'default_project_id', + 'none') + else: + a['tenantId'] = act.tenantId + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('Validating flavor data...') + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) + return tenant in [t.name for t in keystone.tenants.list()] + + @retry_on_exception(num_retries=5, base_delay=1) + def keystone_wait_for_propagation(self, sentry_relation_pairs, + api_version): + """Iterate over list of sentry and relation tuples and verify that + api_version has the expected value. + + :param sentry_relation_pairs: list of sentry, relation name tuples used + for monitoring propagation of relation + data + :param api_version: api_version to expect in relation data + :returns: None if successful. Raise on error. + """ + for (sentry, relation_name) in sentry_relation_pairs: + rel = sentry.relation('identity-service', + relation_name) + self.log.debug('keystone relation data: {}'.format(rel)) + if rel.get('api_version') != str(api_version): + raise Exception("api_version not propagated through relation" + " data yet ('{}' != '{}')." + "".format(rel.get('api_version'), api_version)) + + def keystone_configure_api_version(self, sentry_relation_pairs, deployment, + api_version): + """Configure preferred-api-version of keystone in deployment and + monitor provided list of relation objects for propagation + before returning to caller. + + :param sentry_relation_pairs: list of sentry, relation tuples used for + monitoring propagation of relation data + :param deployment: deployment to configure + :param api_version: value preferred-api-version will be set to + :returns: None if successful. Raise on error. + """ + self.log.debug("Setting keystone preferred-api-version: '{}'" + "".format(api_version)) + + config = {'preferred-api-version': api_version} + deployment.d.configure('keystone', config) + deployment._auto_wait_for_status() + self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) + + def authenticate_cinder_admin(self, keystone, api_version=2): + """Authenticates admin user with cinder.""" + self.log.debug('Authenticating cinder admin...') + _clients = { + 1: cinder_client.Client, + 2: cinder_clientv2.Client} + return _clients[api_version](session=keystone.session) + + def authenticate_keystone(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Authenticate with Keystone""" + self.log.debug('Authenticating with keystone...') + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: + auth = v2.Password( + username=username, + password=password, + tenant_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + else: + auth = v3.Password( + user_domain_name=user_domain_name, + username=username, + password=password, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + api_version = 2 + client_class = keystone_client.Client + # 11 => xenial_queens + if openstack_release and openstack_release >= 11: + api_version = 3 + client_class = keystone_client_v3.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant=None, api_version=None, + keystone_ip=None, user_domain_name=None, + project_domain_name=None, + project_name=None): + """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') + if not keystone_ip: + keystone_ip = keystone_sentry.info['public-address'] + + # To support backward compatibility usage of this function + if not project_name: + project_name = tenant + if api_version == 3 and not user_domain_name: + user_domain_name = 'admin_domain' + if api_version == 3 and not project_domain_name: + project_domain_name = 'admin_domain' + if api_version == 3 and not project_name: + project_name = 'admin' + + return self.authenticate_keystone( + keystone_ip, user, password, + api_version=api_version, + user_domain_name=user_domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + admin_port=True) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + interface='publicURL') + keystone_ip = urlparse.urlparse(ep).hostname + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') + ep = keystone.service_catalog.url_for(service_type='image', + interface='adminURL') + if keystone.session: + return glance_client.Client(ep, session=keystone.session) + else: + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + interface='publicURL') + if keystone.session: + return heat_client.Client(endpoint=ep, session=keystone.session) + else: + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + interface='publicURL') + if keystone.session: + return nova_client.Client(NOVA_CLIENT_VERSION, + session=keystone.session, + auth_url=ep) + elif novaclient.__version__[0] >= "7": + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, password=password, + project_name=tenant, auth_url=ep) + else: + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def authenticate_swift_user(self, keystone, user, password, tenant): + """Authenticates a regular user with swift api.""" + self.log.debug('Authenticating swift user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + interface='publicURL') + if keystone.session: + return swiftclient.Connection(session=keystone.session) + else: + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') + + def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", + ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): + """Create the specified flavor.""" + try: + nova.flavors.find(name=name) + except (exceptions.NotFound, exceptions.NoUniqueMatch): + self.log.debug('Creating flavor ({})'.format(name)) + nova.flavors.create(name, ram, vcpus, disk, flavorid, + ephemeral, swap, rxtx_factor, is_public) + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :returns: glance image pointer + """ + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Download cirros image + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open('http://download.cirros-cloud.net/version/released') + version = f.read().strip() + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) + local_path = os.path.join('tests', cirros_img) + + if not os.path.exists(local_path): + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', + version, cirros_img) + opener.retrieve(cirros_url, local_path) + f.close() + + # Create glance image + with open(local_path) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + + # Wait for image to reach active status + img_id = image.id + ret = self.resource_reaches_status(glance.images, img_id, + expected_stat='active', + msg='Image status wait') + if not ret: + msg = 'Glance image failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new image + self.log.debug('Validating image attributes...') + val_img_name = glance.images.get(img_id).name + val_img_stat = glance.images.get(img_id).status + val_img_pub = glance.images.get(img_id).is_public + val_img_cfmt = glance.images.get(img_id).container_format + val_img_dfmt = glance.images.get(img_id).disk_format + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' + 'container fmt:{} disk fmt:{}'.format( + val_img_name, val_img_pub, img_id, + val_img_stat, val_img_cfmt, val_img_dfmt)) + + if val_img_name == image_name and val_img_stat == 'active' \ + and val_img_pub is True and val_img_cfmt == 'bare' \ + and val_img_dfmt == 'qcow2': + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) + return self.delete_resource(glance.images, image, msg='glance image') + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) + image = nova.glance.find_image(image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) + return self.delete_resource(nova.servers, instance, + msg='nova instance') + + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except Exception: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def _get_cinder_obj_name(self, cinder_object): + """Retrieve name of cinder object. + + :param cinder_object: cinder snapshot or volume object + :returns: str cinder object name + """ + # v1 objects store name in 'display_name' attr but v2+ use 'name' + try: + return cinder_object.display_name + except AttributeError: + return cinder_object.name + + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, + img_id=None, src_vol_id=None, snap_id=None): + """Create cinder volume, optionally from a glance image, OR + optionally as a clone of an existing volume, OR optionally + from a snapshot. Wait for the new volume status to reach + the expected status, validate and return a resource pointer. + + :param vol_name: cinder volume display name + :param vol_size: size in gigabytes + :param img_id: optional glance image id + :param src_vol_id: optional source volume id to clone + :param snap_id: optional snapshot id to use + :returns: cinder volume pointer + """ + # Handle parameter input and avoid impossible combinations + if img_id and not src_vol_id and not snap_id: + # Create volume from image + self.log.debug('Creating cinder volume from glance image...') + bootable = 'true' + elif src_vol_id and not img_id and not snap_id: + # Clone an existing volume + self.log.debug('Cloning cinder volume...') + bootable = cinder.volumes.get(src_vol_id).bootable + elif snap_id and not src_vol_id and not img_id: + # Create volume from snapshot + self.log.debug('Creating cinder volume from snapshot...') + snap = cinder.volume_snapshots.find(id=snap_id) + vol_size = snap.size + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id + bootable = cinder.volumes.get(snap_vol_id).bootable + elif not img_id and not src_vol_id and not snap_id: + # Create volume + self.log.debug('Creating cinder volume...') + bootable = 'false' + else: + # Impossible combination of parameters + msg = ('Invalid method use - name:{} size:{} img_id:{} ' + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, + img_id, src_vol_id, + snap_id)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create new volume + try: + vol_new = cinder.volumes.create(display_name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except TypeError: + vol_new = cinder.volumes.create(name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except Exception as e: + msg = 'Failed to create volume: {}'.format(e) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Wait for volume to reach available status + ret = self.resource_reaches_status(cinder.volumes, vol_id, + expected_stat="available", + msg="Volume status wait") + if not ret: + msg = 'Cinder volume failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new volume + self.log.debug('Validating volume attributes...') + val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) + val_vol_boot = cinder.volumes.get(vol_id).bootable + val_vol_stat = cinder.volumes.get(vol_id).status + val_vol_size = cinder.volumes.get(vol_id).size + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' + '{} size:{}'.format(val_vol_name, vol_id, + val_vol_stat, val_vol_boot, + val_vol_size)) + + if val_vol_boot == bootable and val_vol_stat == 'available' \ + and val_vol_name == vol_name and val_vol_size == vol_size: + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return vol_new + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + self.log.debug('Deleting OpenStack resource ' + '{} ({})'.format(resource_id, msg)) + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) + return False + + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False + + def get_ceph_osd_id_cmd(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return ("`initctl list | grep 'ceph-osd ' | " + "awk 'NR=={} {{ print $2 }}' | " + "grep -o '[0-9]*'`".format(index + 1)) + + def get_ceph_pools(self, sentry_unit): + """Return a dict of ceph pools from a single ceph unit, with + pool name as keys, pool id as vals.""" + pools = {} + cmd = 'sudo ceph osd lspools' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, + for pool in str(output).split(','): + pool_id_name = pool.split(' ') + if len(pool_id_name) == 2: + pool_id = pool_id_name[0] + pool_name = pool_id_name[1] + pools[pool_name] = int(pool_id) + + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], + pools)) + return pools + + def get_ceph_df(self, sentry_unit): + """Return dict of ceph df json output, including ceph pool state. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :returns: Dict of ceph df output + """ + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return json.loads(output) + + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param pool_id: Ceph pool ID + :returns: List of pool name, object count, kb disk space used + """ + df = self.get_ceph_df(sentry_unit) + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + + self.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) + return pool_name, obj_count, kb_used + + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes. The + 2nd element is expected to be greater than the 1st; 3rd is expected + to be less than the 2nd. + + :param samples: List containing 3 data samples + :param sample_type: String for logging and usage context + :returns: None if successful, Failure message otherwise + """ + original, created, deleted = range(3) + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + return ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + else: + self.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None + + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.is_open is True + assert connection.is_closing is False + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) + + def validate_memcache(self, sentry_unit, conf, os_release, + earliest_release=5, section='keystone_authtoken', + check_kvs=None): + """Check Memcache is running and is configured to be used + + Example call from Amulet test: + + def test_110_memcache(self): + u.validate_memcache(self.neutron_api_sentry, + '/etc/neutron/neutron.conf', + self._get_openstack_release()) + + :param sentry_unit: sentry unit + :param conf: OpenStack config file to check memcache settings + :param os_release: Current OpenStack release int code + :param earliest_release: Earliest Openstack release to check int code + :param section: OpenStack config file section to check + :param check_kvs: Dict of settings to check in config file + :returns: None + """ + if os_release < earliest_release: + self.log.debug('Skipping memcache checks for deployment. {} <' + 'mitaka'.format(os_release)) + return + _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} + self.log.debug('Checking memcached is running') + ret = self.validate_services_by_name({sentry_unit: ['memcached']}) + if ret: + amulet.raise_status(amulet.FAIL, msg='Memcache running check' + 'failed {}'.format(ret)) + else: + self.log.debug('OK') + self.log.debug('Checking memcache url is configured in {}'.format( + conf)) + if self.validate_config_data(sentry_unit, conf, section, _kvs): + message = "Memcache config error in: {}".format(conf) + amulet.raise_status(amulet.FAIL, msg=message) + else: + self.log.debug('OK') + self.log.debug('Checking memcache configuration in ' + '/etc/memcached.conf') + contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', + fatal=True) + ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') + if CompareHostReleases(ubuntu_release) <= 'trusty': + memcache_listen_addr = 'ip6-localhost' + else: + memcache_listen_addr = '::1' + expected = { + '-p': '11211', + '-l': memcache_listen_addr} + found = [] + for key, value in expected.items(): + for line in contents.split('\n'): + if line.startswith(key): + self.log.debug('Checking {} is set to {}'.format( + key, + value)) + assert value == line.split()[-1] + self.log.debug(line.split()[-1]) + found.append(key) + if sorted(found) == sorted(expected.keys()): + self.log.debug('OK') + else: + message = "Memcache config error in: /etc/memcached.conf" + amulet.raise_status(amulet.FAIL, msg=message) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/files/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/files/__init__.py new file mode 100644 index 00000000..9df5f746 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/files/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/__init__.py new file mode 100644 index 00000000..9b088de8 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py new file mode 100644 index 00000000..6060ae50 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -0,0 +1,265 @@ +# Copyright 2014-2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2016 Canonical Ltd. +# +# Authors: +# Openstack Charmers < +# + +""" +Helpers for high availability. +""" + +import json + +import re + +from charmhelpers.core.hookenv import ( + log, + relation_set, + charm_name, + config, + status_set, + DEBUG, + WARNING, +) + +from charmhelpers.core.host import ( + lsb_release +) + +from charmhelpers.contrib.openstack.ip import ( + resolve_address, + is_ipv6, +) + +from charmhelpers.contrib.network.ip import ( + get_iface_for_address, + get_netmask_for_address, +) + +from charmhelpers.contrib.hahelpers.cluster import ( + get_hacluster_config +) + +JSON_ENCODE_OPTIONS = dict( + sort_keys=True, + allow_nan=False, + indent=None, + separators=(',', ':'), +) + + +class DNSHAException(Exception): + """Raised when an error occurs setting up DNS HA + """ + + pass + + +def update_dns_ha_resource_params(resources, resource_params, + relation_id=None, + crm_ocf='ocf:maas:dns'): + """ Configure DNS-HA resources based on provided configuration and + update resource dictionaries for the HA relation. + + @param resources: Pointer to dictionary of resources. + Usually instantiated in ha_joined(). + @param resource_params: Pointer to dictionary of resource parameters. + Usually instantiated in ha_joined() + @param relation_id: Relation ID of the ha relation + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ + _relation_data = {'resources': {}, 'resource_params': {}} + update_hacluster_dns_ha(charm_name(), + _relation_data, + crm_ocf) + resources.update(_relation_data['resources']) + resource_params.update(_relation_data['resource_params']) + relation_set(relation_id=relation_id, groups=_relation_data['groups']) + + +def assert_charm_supports_dns_ha(): + """Validate prerequisites for DNS HA + The MAAS client is only available on Xenial or greater + + :raises DNSHAException: if release is < 16.04 + """ + if lsb_release().get('DISTRIB_RELEASE') < '16.04': + msg = ('DNS HA is only supported on 16.04 and greater ' + 'versions of Ubuntu.') + status_set('blocked', msg) + raise DNSHAException(msg) + return True + + +def expect_ha(): + """ Determine if the unit expects to be in HA + + Check for VIP or dns-ha settings which indicate the unit should expect to + be related to hacluster. + + @returns boolean + """ + return config('vip') or config('dns-ha') + + +def generate_ha_relation_data(service): + """ Generate relation data for ha relation + + Based on configuration options and unit interfaces, generate a json + encoded dict of relation data items for the hacluster relation, + providing configuration for DNS HA or VIP's + haproxy clone sets. + + @returns dict: json encoded data for use with relation_set + """ + _haproxy_res = 'res_{}_haproxy'.format(service) + _relation_data = { + 'resources': { + _haproxy_res: 'lsb:haproxy', + }, + 'resource_params': { + _haproxy_res: 'op monitor interval="5s"' + }, + 'init_services': { + _haproxy_res: 'haproxy' + }, + 'clones': { + 'cl_{}_haproxy'.format(service): _haproxy_res + }, + } + + if config('dns-ha'): + update_hacluster_dns_ha(service, _relation_data) + else: + update_hacluster_vip(service, _relation_data) + + return { + 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS) + for k, v in _relation_data.items() if v + } + + +def update_hacluster_dns_ha(service, relation_data, + crm_ocf='ocf:maas:dns'): + """ Configure DNS-HA resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ + # Validate the charm environment for DNS HA + assert_charm_supports_dns_ha() + + settings = ['os-admin-hostname', 'os-internal-hostname', + 'os-public-hostname', 'os-access-hostname'] + + # Check which DNS settings are set and update dictionaries + hostname_group = [] + for setting in settings: + hostname = config(setting) + if hostname is None: + log('DNS HA: Hostname setting {} is None. Ignoring.' + ''.format(setting), + DEBUG) + continue + m = re.search('os-(.+?)-hostname', setting) + if m: + endpoint_type = m.group(1) + # resolve_address's ADDRESS_MAP uses 'int' not 'internal' + if endpoint_type == 'internal': + endpoint_type = 'int' + else: + msg = ('Unexpected DNS hostname setting: {}. ' + 'Cannot determine endpoint_type name' + ''.format(setting)) + status_set('blocked', msg) + raise DNSHAException(msg) + + hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type) + if hostname_key in hostname_group: + log('DNS HA: Resource {}: {} already exists in ' + 'hostname group - skipping'.format(hostname_key, hostname), + DEBUG) + continue + + hostname_group.append(hostname_key) + relation_data['resources'][hostname_key] = crm_ocf + relation_data['resource_params'][hostname_key] = ( + 'params fqdn="{}" ip_address="{}"' + .format(hostname, resolve_address(endpoint_type=endpoint_type, + override=False))) + + if len(hostname_group) >= 1: + log('DNS HA: Hostname group is set with {} as members. ' + 'Informing the ha relation'.format(' '.join(hostname_group)), + DEBUG) + relation_data['groups'] = { + 'grp_{}_hostnames'.format(service): ' '.join(hostname_group) + } + else: + msg = 'DNS HA: Hostname group has no members.' + status_set('blocked', msg) + raise DNSHAException(msg) + + +def update_hacluster_vip(service, relation_data): + """ Configure VIP resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. + """ + cluster_config = get_hacluster_config() + vip_group = [] + for vip in cluster_config['vip'].split(): + if is_ipv6(vip): + res_neutron_vip = 'ocf:heartbeat:IPv6addr' + vip_params = 'ipv6addr' + else: + res_neutron_vip = 'ocf:heartbeat:IPaddr2' + vip_params = 'ip' + + iface = (get_iface_for_address(vip) or + config('vip_iface')) + netmask = (get_netmask_for_address(vip) or + config('vip_cidr')) + + if iface is not None: + vip_key = 'res_{}_{}_vip'.format(service, iface) + if vip_key in vip_group: + if vip not in relation_data['resource_params'][vip_key]: + vip_key = '{}_{}'.format(vip_key, vip_params) + else: + log("Resource '%s' (vip='%s') already exists in " + "vip group - skipping" % (vip_key, vip), WARNING) + continue + + relation_data['resources'][vip_key] = res_neutron_vip + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}"'.format(ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask) + ) + vip_group.append(vip_key) + + if len(vip_group) >= 1: + relation_data['groups'] = { + 'grp_{}_vips'.format(service): ' '.join(vip_group) + } diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/keystone.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/keystone.py new file mode 100644 index 00000000..d7e02ccd --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/keystone.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +from charmhelpers.fetch import apt_install +from charmhelpers.contrib.openstack.context import IdentityServiceContext +from charmhelpers.core.hookenv import ( + log, + ERROR, +) + + +def get_api_suffix(api_version): + """Return the formatted api suffix for the given version + @param api_version: version of the keystone endpoint + @returns the api suffix formatted according to the given api + version + """ + return 'v2.0' if api_version in (2, "2", "2.0") else 'v3' + + +def format_endpoint(schema, addr, port, api_version): + """Return a formatted keystone endpoint + @param schema: http or https + @param addr: ipv4/ipv6 host of the keystone service + @param port: port of the keystone service + @param api_version: 2 or 3 + @returns a fully formatted keystone endpoint + """ + return '{}://{}:{}/{}/'.format(schema, addr, port, + get_api_suffix(api_version)) + + +def get_keystone_manager(endpoint, api_version, **kwargs): + """Return a keystonemanager for the correct API version + + @param endpoint: the keystone endpoint to point client at + @param api_version: version of the keystone api the client should use + @param kwargs: token or username/tenant/password information + @returns keystonemanager class used for interrogating keystone + """ + if api_version == 2: + return KeystoneManager2(endpoint, **kwargs) + if api_version == 3: + return KeystoneManager3(endpoint, **kwargs) + raise ValueError('No manager found for api version {}'.format(api_version)) + + +def get_keystone_manager_from_identity_service_context(): + """Return a keystonmanager generated from a + instance of charmhelpers.contrib.openstack.context.IdentityServiceContext + @returns keystonamenager instance + """ + context = IdentityServiceContext()() + if not context: + msg = "Identity service context cannot be generated" + log(msg, level=ERROR) + raise ValueError(msg) + + endpoint = format_endpoint(context['service_protocol'], + context['service_host'], + context['service_port'], + context['api_version']) + + if context['api_version'] in (2, "2.0"): + api_version = 2 + else: + api_version = 3 + + return get_keystone_manager(endpoint, api_version, + username=context['admin_user'], + password=context['admin_password'], + tenant_name=context['admin_tenant_name']) + + +class KeystoneManager(object): + + def resolve_service_id(self, service_name=None, service_type=None): + """Find the service_id of a given service""" + services = [s._info for s in self.api.services.list()] + + service_name = service_name.lower() + for s in services: + name = s['name'].lower() + if service_type and service_name: + if (service_name == name and service_type == s['type']): + return s['id'] + elif service_name and service_name == name: + return s['id'] + elif service_type and service_type == s['type']: + return s['id'] + return None + + def service_exists(self, service_name=None, service_type=None): + """Determine if the given service exists on the service list""" + return self.resolve_service_id(service_name, service_type) is not None + + +class KeystoneManager2(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + + self.api_version = 2 + + token = kwargs.get("token", None) + if token: + api = client.Client(endpoint=endpoint, token=token) + else: + auth = v2.Password(username=kwargs.get("username"), + password=kwargs.get("password"), + tenant_name=kwargs.get("tenant_name"), + auth_url=endpoint) + sess = session.Session(auth=auth) + api = client.Client(session=sess) + + self.api = api + + +class KeystoneManager3(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + + self.api_version = 3 + + token = kwargs.get("token", None) + if token: + auth = token_endpoint.Token(endpoint=endpoint, + token=token) + sess = session.Session(auth=auth) + else: + auth = v3.Password(auth_url=endpoint, + user_id=kwargs.get("username"), + password=kwargs.get("password"), + project_id=kwargs.get("tenant_name")) + sess = session.Session(auth=auth) + + self.api = client.Client(session=sess) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/templates/__init__.py new file mode 100644 index 00000000..9df5f746 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/templates/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/templating.py new file mode 100644 index 00000000..a623315d --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/templating.py @@ -0,0 +1,379 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import six + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import ( + log, + ERROR, + INFO, + TRACE +) +from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES + +try: + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions +except ImportError: + apt_update(fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions + + +class OSConfigException(Exception): + pass + + +def get_loader(templates_dir, os_release): + """ + Create a jinja2.ChoiceLoader containing template dirs up to + and including os_release. If directory template directory + is missing at templates_dir, it will be omitted from the loader. + templates_dir is added to the bottom of the search list as a base + loading dir. + + A charm may also ship a templates dir with this module + and it will be appended to the bottom of the search list, eg:: + + hooks/charmhelpers/contrib/openstack/templates + + :param templates_dir (str): Base template directory containing release + sub-directories. + :param os_release (str): OpenStack release codename to construct template + loader. + :returns: jinja2.ChoiceLoader constructed with a list of + jinja2.FilesystemLoaders, ordered in descending + order by OpenStack release. + """ + tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) + for rel in six.itervalues(OPENSTACK_CODENAMES)] + + if not os.path.isdir(templates_dir): + log('Templates directory not found @ %s.' % templates_dir, + level=ERROR) + raise OSConfigException + + # the bottom contains tempaltes_dir and possibly a common templates dir + # shipped with the helper. + loaders = [FileSystemLoader(templates_dir)] + helper_templates = os.path.join(os.path.dirname(__file__), 'templates') + if os.path.isdir(helper_templates): + loaders.append(FileSystemLoader(helper_templates)) + + for rel, tmpl_dir in tmpl_dirs: + if os.path.isdir(tmpl_dir): + loaders.insert(0, FileSystemLoader(tmpl_dir)) + if rel == os_release: + break + # demote this log to the lowest level; we don't really need to see these + # lots in production even when debugging. + log('Creating choice loader with dirs: %s' % + [l.searchpath for l in loaders], level=TRACE) + return ChoiceLoader(loaders) + + +class OSConfigTemplate(object): + """ + Associates a config file template with a list of context generators. + Responsible for constructing a template context based on those generators. + """ + + def __init__(self, config_file, contexts, config_template=None): + self.config_file = config_file + + if hasattr(contexts, '__call__'): + self.contexts = [contexts] + else: + self.contexts = contexts + + self._complete_contexts = [] + + self.config_template = config_template + + def context(self): + ctxt = {} + for context in self.contexts: + _ctxt = context() + if _ctxt: + ctxt.update(_ctxt) + # track interfaces for every complete context. + [self._complete_contexts.append(interface) + for interface in context.interfaces + if interface not in self._complete_contexts] + return ctxt + + def complete_contexts(self): + ''' + Return a list of interfaces that have satisfied contexts. + ''' + if self._complete_contexts: + return self._complete_contexts + self.context() + return self._complete_contexts + + @property + def is_string_template(self): + """:returns: Boolean if this instance is a template initialised with a string""" + return self.config_template is not None + + +class OSConfigRenderer(object): + """ + This class provides a common templating system to be used by OpenStack + charms. It is intended to help charms share common code and templates, + and ease the burden of managing config templates across multiple OpenStack + releases. + + Basic usage:: + + # import some common context generates from charmhelpers + from charmhelpers.contrib.openstack import context + + # Create a renderer object for a specific OS release. + configs = OSConfigRenderer(templates_dir='/tmp/templates', + openstack_release='folsom') + # register some config files with context generators. + configs.register(config_file='/etc/nova/nova.conf', + contexts=[context.SharedDBContext(), + context.AMQPContext()]) + configs.register(config_file='/etc/nova/api-paste.ini', + contexts=[context.IdentityServiceContext()]) + configs.register(config_file='/etc/haproxy/haproxy.conf', + contexts=[context.HAProxyContext()]) + configs.register(config_file='/etc/keystone/policy.d/extra.cfg', + contexts=[context.ExtraPolicyContext() + context.KeystoneContext()], + config_template=hookenv.config('extra-policy')) + # write out a single config + configs.write('/etc/nova/nova.conf') + # write out all registered configs + configs.write_all() + + **OpenStack Releases and template loading** + + When the object is instantiated, it is associated with a specific OS + release. This dictates how the template loader will be constructed. + + The constructed loader attempts to load the template from several places + in the following order: + - from the most recent OS release-specific template dir (if one exists) + - the base templates_dir + - a template directory shipped in the charm with this helper file. + + For the example above, '/tmp/templates' contains the following structure:: + + /tmp/templates/nova.conf + /tmp/templates/api-paste.ini + /tmp/templates/grizzly/api-paste.ini + /tmp/templates/havana/api-paste.ini + + Since it was registered with the grizzly release, it first seraches + the grizzly directory for nova.conf, then the templates dir. + + When writing api-paste.ini, it will find the template in the grizzly + directory. + + If the object were created with folsom, it would fall back to the + base templates dir for its api-paste.ini template. + + This system should help manage changes in config files through + openstack releases, allowing charms to fall back to the most recently + updated config template for a given release + + The haproxy.conf, since it is not shipped in the templates dir, will + be loaded from the module directory's template directory, eg + $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows + us to ship common templates (haproxy, apache) with the helpers. + + **Context generators** + + Context generators are used to generate template contexts during hook + execution. Doing so may require inspecting service relations, charm + config, etc. When registered, a config file is associated with a list + of generators. When a template is rendered and written, all context + generates are called in a chain to generate the context dictionary + passed to the jinja2 template. See context.py for more info. + """ + def __init__(self, templates_dir, openstack_release): + if not os.path.isdir(templates_dir): + log('Could not locate templates dir %s' % templates_dir, + level=ERROR) + raise OSConfigException + + self.templates_dir = templates_dir + self.openstack_release = openstack_release + self.templates = {} + self._tmpl_env = None + + if None in [Environment, ChoiceLoader, FileSystemLoader]: + # if this code is running, the object is created pre-install hook. + # jinja2 shouldn't get touched until the module is reloaded on next + # hook execution, with proper jinja2 bits successfully imported. + if six.PY2: + apt_install('python-jinja2') + else: + apt_install('python3-jinja2') + + def register(self, config_file, contexts, config_template=None): + """ + Register a config file with a list of context generators to be called + during rendering. + config_template can be used to load a template from a string instead of + using template loaders and template files. + :param config_file (str): a path where a config file will be rendered + :param contexts (list): a list of context dictionaries with kv pairs + :param config_template (str): an optional template string to use + """ + self.templates[config_file] = OSConfigTemplate( + config_file=config_file, + contexts=contexts, + config_template=config_template + ) + log('Registered config file: {}'.format(config_file), + level=INFO) + + def _get_tmpl_env(self): + if not self._tmpl_env: + loader = get_loader(self.templates_dir, self.openstack_release) + self._tmpl_env = Environment(loader=loader) + + def _get_template(self, template): + self._get_tmpl_env() + template = self._tmpl_env.get_template(template) + log('Loaded template from {}'.format(template.filename), + level=INFO) + return template + + def _get_template_from_string(self, ostmpl): + ''' + Get a jinja2 template object from a string. + :param ostmpl: OSConfigTemplate to use as a data source. + ''' + self._get_tmpl_env() + template = self._tmpl_env.from_string(ostmpl.config_template) + log('Loaded a template from a string for {}'.format( + ostmpl.config_file), + level=INFO) + return template + + def render(self, config_file): + if config_file not in self.templates: + log('Config not registered: {}'.format(config_file), level=ERROR) + raise OSConfigException + + ostmpl = self.templates[config_file] + ctxt = ostmpl.context() + + if ostmpl.is_string_template: + template = self._get_template_from_string(ostmpl) + log('Rendering from a string template: ' + '{}'.format(config_file), + level=INFO) + else: + _tmpl = os.path.basename(config_file) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound: + # if no template is found with basename, try looking + # for it using a munged full path, eg: + # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf + _tmpl = '_'.join(config_file.split('/')[1:]) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound as e: + log('Could not load template from {} by {} or {}.' + ''.format( + self.templates_dir, + os.path.basename(config_file), + _tmpl + ), + level=ERROR) + raise e + + log('Rendering from template: {}'.format(config_file), + level=INFO) + return template.render(ctxt) + + def write(self, config_file): + """ + Write a single config file, raises if config file is not registered. + """ + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + + _out = self.render(config_file) + if six.PY3: + _out = _out.encode('UTF-8') + + with open(config_file, 'wb') as out: + out.write(_out) + + log('Wrote template %s.' % config_file, level=INFO) + + def write_all(self): + """ + Write out all registered config files. + """ + [self.write(k) for k in six.iterkeys(self.templates)] + + def set_release(self, openstack_release): + """ + Resets the template environment and generates a new template loader + based on a the new openstack release. + """ + self._tmpl_env = None + self.openstack_release = openstack_release + self._get_tmpl_env() + + def complete_contexts(self): + ''' + Returns a list of context interfaces that yield a complete context. + ''' + interfaces = [] + [interfaces.extend(i.complete_contexts()) + for i in six.itervalues(self.templates)] + return interfaces + + def get_incomplete_context_data(self, interfaces): + ''' + Return dictionary of relation status of interfaces and any missing + required context data. Example: + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}} + ''' + incomplete_context_data = {} + + for i in six.itervalues(self.templates): + for context in i.contexts: + for interface in interfaces: + related = False + if interface in context.interfaces: + related = context.get_related() + missing_data = context.missing_data + if missing_data: + incomplete_context_data[interface] = {'missing_data': missing_data} + if related: + if incomplete_context_data.get(interface): + incomplete_context_data[interface].update({'related': True}) + else: + incomplete_context_data[interface] = {'related': True} + else: + incomplete_context_data[interface] = {'related': False} + return incomplete_context_data diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py new file mode 100644 index 00000000..a8e4bf88 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -0,0 +1,126 @@ +# Copyright 2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import charmhelpers.contrib.openstack.alternatives as alternatives +import charmhelpers.contrib.openstack.context as context + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host +import charmhelpers.core.templating as templating +import charmhelpers.core.unitdata as unitdata + +VAULTLOCKER_BACKEND = 'charm-vaultlocker' + + +class VaultKVContext(context.OSContextGenerator): + """Vault KV context for interaction with vault-kv interfaces""" + interfaces = ['secrets-storage'] + + def __init__(self, secret_backend=None): + super(context.OSContextGenerator, self).__init__() + self.secret_backend = ( + secret_backend or 'charm-{}'.format(hookenv.service_name()) + ) + + def __call__(self): + db = unitdata.kv() + last_token = db.get('last-token') + secret_id = db.get('secret-id') + for relation_id in hookenv.relation_ids(self.interfaces[0]): + for unit in hookenv.related_units(relation_id): + data = hookenv.relation_get(unit=unit, + rid=relation_id) + vault_url = data.get('vault_url') + role_id = data.get('{}_role_id'.format(hookenv.local_unit())) + token = data.get('{}_token'.format(hookenv.local_unit())) + + if all([vault_url, role_id, token]): + token = json.loads(token) + vault_url = json.loads(vault_url) + + # Tokens may change when secret_id's are being + # reissued - if so use token to get new secret_id + if token != last_token: + secret_id = retrieve_secret_id( + url=vault_url, + token=token + ) + db.set('secret-id', secret_id) + db.set('last-token', token) + db.flush() + + ctxt = { + 'vault_url': vault_url, + 'role_id': json.loads(role_id), + 'secret_id': secret_id, + 'secret_backend': self.secret_backend, + } + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + self.complete = True + return ctxt + return {} + + +def write_vaultlocker_conf(context, priority=100): + """Write vaultlocker configuration to disk and install alternative + + :param context: Dict of data from vault-kv relation + :ptype: context: dict + :param priority: Priority of alternative configuration + :ptype: priority: int""" + charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format( + hookenv.service_name() + ) + host.mkdir(os.path.dirname(charm_vl_path), perms=0o700) + templating.render(source='vaultlocker.conf.j2', + target=charm_vl_path, + context=context, perms=0o600), + alternatives.install_alternative('vaultlocker.conf', + '/etc/vaultlocker/vaultlocker.conf', + charm_vl_path, priority) + + +def vault_relation_complete(backend=None): + """Determine whether vault relation is complete + + :param backend: Name of secrets backend requested + :ptype backend: string + :returns: whether the relation to vault is complete + :rtype: bool""" + vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) + vault_kv() + return vault_kv.complete + + +# TODO: contrib a high level unwrap method to hvac that works +def retrieve_secret_id(url, token): + """Retrieve a response-wrapped secret_id from Vault + + :param url: URL to Vault Server + :ptype url: str + :param token: One shot Token to use + :ptype token: str + :returns: secret_id to use for Vault Access + :rtype: str""" + import hvac + client = hvac.Client(url=url, token=token) + response = client._post('/v1/sys/wrapping/unwrap') + if response.status_code == 200: + data = response.json() + return data['data']['secret_id'] diff --git a/ceph-osd/hooks/secrets-storage-relation-broken b/ceph-osd/hooks/secrets-storage-relation-broken new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-osd/hooks/secrets-storage-relation-broken @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/secrets-storage-relation-changed b/ceph-osd/hooks/secrets-storage-relation-changed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-osd/hooks/secrets-storage-relation-changed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/secrets-storage-relation-departed b/ceph-osd/hooks/secrets-storage-relation-departed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-osd/hooks/secrets-storage-relation-departed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/secrets-storage-relation-joined b/ceph-osd/hooks/secrets-storage-relation-joined new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-osd/hooks/secrets-storage-relation-joined @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 8f72a843..4a158e03 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -1364,17 +1364,27 @@ def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): else: service_restart('ceph-mon-all') + # NOTE(jamespage): Later ceph releases require explicit + # call to ceph-create-keys to setup the + # admin keys for the cluster; this command + # will wait for quorum in the cluster before + # returning. + # NOTE(fnordahl): Explicitly run `ceph-crate-keys` for older + # ceph releases too. This improves bootstrap + # resilience as the charm will wait for + # presence of peer units before attempting + # to bootstrap. Note that charms deploying + # ceph-mon service should disable running of + # `ceph-create-keys` service in init system. + cmd = ['ceph-create-keys', '--id', hostname] if cmp_pkgrevno('ceph', '12.0.0') >= 0: - # NOTE(jamespage): Later ceph releases require explicit - # call to ceph-create-keys to setup the - # admin keys for the cluster; this command - # will wait for quorum in the cluster before - # returning. # NOTE(fnordahl): The default timeout in ceph-create-keys of 600 - # seconds is not adequate for all situations. + # seconds is not adequate. Increase timeout when + # timeout parameter available. For older releases + # we rely on retry_on_exception decorator. # LP#1719436 - cmd = ['ceph-create-keys', '--id', hostname, '--timeout', '1800'] - subprocess.check_call(cmd) + cmd.extend(['--timeout', '1800']) + subprocess.check_call(cmd) _client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' osstat = os.stat(_client_admin_keyring) if not osstat.st_size: diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index f75f6872..efcb7198 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -27,6 +27,8 @@ extra-bindings: requires: mon: interface: ceph-osd + secrets-storage: + interface: vault-kv storage: osd-devices: type: block diff --git a/ceph-osd/templates/vaultlocker.conf.j2 b/ceph-osd/templates/vaultlocker.conf.j2 new file mode 100644 index 00000000..5679f81b --- /dev/null +++ b/ceph-osd/templates/vaultlocker.conf.j2 @@ -0,0 +1,6 @@ +# vaultlocker configuration from ceph-osd charm +[vault] +url = {{ vault_url }} +approle = {{ role_id }} +backend = {{ secret_backend }} +secret_id = {{ secret_id }} diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index 84e87f5d..ef785423 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -40,6 +40,7 @@ import pika import swiftclient +from charmhelpers.core.decorators import retry_on_exception from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -423,6 +424,7 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(num_retries=5, base_delay=1) def keystone_wait_for_propagation(self, sentry_relation_pairs, api_version): """Iterate over list of sentry and relation tuples and verify that diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index bc2cf132..a8d84766 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -517,3 +517,82 @@ def test_install_udev_rules(self, shutil, subprocess): subprocess.check_call.assert_called_once_with( ['udevadm', 'control', '--reload-rules'] ) + + +@patch.object(ceph_hooks, 'relation_get') +@patch.object(ceph_hooks, 'relation_set') +@patch.object(ceph_hooks, 'prepare_disks_and_activate') +@patch.object(ceph_hooks, 'get_relation_ip') +@patch.object(ceph_hooks, 'socket') +class SecretsStorageTestCase(unittest.TestCase): + + def test_secrets_storage_relation_joined(self, + _socket, + _get_relation_ip, + _prepare_disks_and_activate, + _relation_set, + _relation_get): + _get_relation_ip.return_value = '10.23.1.2' + _socket.gethostname.return_value = 'testhost' + ceph_hooks.secrets_storage_joined() + _get_relation_ip.assert_called_with('secrets-storage') + _relation_set.assert_called_with( + relation_id=None, + secret_backend='charm-vaultlocker', + isolated=True, + access_address='10.23.1.2', + hostname='testhost' + ) + _socket.gethostname.assert_called_once_with() + + def test_secrets_storage_relation_changed(self, + _socket, + _get_relation_ip, + _prepare_disks_and_activate, + _relation_set, + _relation_get): + _relation_get.return_value = None + ceph_hooks.secrets_storage_changed() + _prepare_disks_and_activate.assert_called_once_with() + + +@patch.object(ceph_hooks, 'cmp_pkgrevno') +@patch.object(ceph_hooks, 'config') +class VaultLockerTestCase(unittest.TestCase): + + def test_use_vaultlocker(self, _config, _cmp_pkgrevno): + _test_data = { + 'osd-encrypt': True, + 'osd-encrypt-keymanager': 'vault', + } + _config.side_effect = lambda x: _test_data.get(x) + _cmp_pkgrevno.return_value = 1 + self.assertTrue(ceph_hooks.use_vaultlocker()) + + def test_use_vaultlocker_no_encryption(self, _config, _cmp_pkgrevno): + _test_data = { + 'osd-encrypt': False, + 'osd-encrypt-keymanager': 'vault', + } + _config.side_effect = lambda x: _test_data.get(x) + _cmp_pkgrevno.return_value = 1 + self.assertFalse(ceph_hooks.use_vaultlocker()) + + def test_use_vaultlocker_not_vault(self, _config, _cmp_pkgrevno): + _test_data = { + 'osd-encrypt': True, + 'osd-encrypt-keymanager': 'ceph', + } + _config.side_effect = lambda x: _test_data.get(x) + _cmp_pkgrevno.return_value = 1 + self.assertFalse(ceph_hooks.use_vaultlocker()) + + def test_use_vaultlocker_old_version(self, _config, _cmp_pkgrevno): + _test_data = { + 'osd-encrypt': True, + 'osd-encrypt-keymanager': 'vault', + } + _config.side_effect = lambda x: _test_data.get(x) + _cmp_pkgrevno.return_value = -1 + self.assertRaises(ValueError, + ceph_hooks.use_vaultlocker) diff --git a/ceph-osd/unit_tests/test_status.py b/ceph-osd/unit_tests/test_status.py index be13b420..c5c080a1 100644 --- a/ceph-osd/unit_tests/test_status.py +++ b/ceph-osd/unit_tests/test_status.py @@ -32,6 +32,8 @@ 'get_conf', 'application_version_set', 'get_upstream_version', + 'vaultlocker', + 'use_vaultlocker', ] CEPH_MONS = [ @@ -47,6 +49,7 @@ def setUp(self): super(ServiceStatusTestCase, self).setUp(hooks, TO_PATCH) self.config.side_effect = self.test_config.get self.get_upstream_version.return_value = '10.2.2' + self.use_vaultlocker.return_value = False def test_assess_status_no_monitor_relation(self): self.relation_ids.return_value = [] @@ -77,6 +80,40 @@ def test_assess_status_monitor_complete_disks(self): self.get_conf.return_value = 'monitor-bootstrap-key' self.ceph.get_running_osds.return_value = ['12345', '67890'] + self.get_upstream_version.return_value = '12.2.4' hooks.assess_status() self.status_set.assert_called_with('active', mock.ANY) - self.application_version_set.assert_called_with('10.2.2') + self.application_version_set.assert_called_with('12.2.4') + + def test_assess_status_monitor_vault_missing(self): + _test_relations = { + 'mon': ['mon:1'], + } + self.relation_ids.side_effect = lambda x: _test_relations.get(x, []) + self.related_units.return_value = CEPH_MONS + self.vaultlocker.vault_relation_complete.return_value = False + self.use_vaultlocker.return_value = True + self.get_conf.return_value = 'monitor-bootstrap-key' + self.ceph.get_running_osds.return_value = ['12345', + '67890'] + self.get_upstream_version.return_value = '12.2.4' + hooks.assess_status() + self.status_set.assert_called_with('blocked', mock.ANY) + self.application_version_set.assert_called_with('12.2.4') + + def test_assess_status_monitor_vault_incomplete(self): + _test_relations = { + 'mon': ['mon:1'], + 'secrets-storage': ['secrets-storage:6'] + } + self.relation_ids.side_effect = lambda x: _test_relations.get(x, []) + self.related_units.return_value = CEPH_MONS + self.vaultlocker.vault_relation_complete.return_value = False + self.use_vaultlocker.return_value = True + self.get_conf.return_value = 'monitor-bootstrap-key' + self.ceph.get_running_osds.return_value = ['12345', + '67890'] + self.get_upstream_version.return_value = '12.2.4' + hooks.assess_status() + self.status_set.assert_called_with('waiting', mock.ANY) + self.application_version_set.assert_called_with('12.2.4') From 62c8f297cf02999f3667f79f9bbd6a554e422329 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 15 May 2018 14:01:12 +0100 Subject: [PATCH 1490/2699] Misc updates to apparmor profile Minor refactoring and updates for DENIED messages seen during 'complain' testing with filestore and bluestore based OSD's with journals, db and wal devices. Tested with Ceph Luminous on 18.04 including data generation using rados bench and pg resizing from 8 -> 256 during testing. Change-Id: I705eacfe4d464b96dde25495eecb95db30423b66 --- ceph-osd/files/apparmor/usr.bin.ceph-osd | 25 +++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/ceph-osd/files/apparmor/usr.bin.ceph-osd b/ceph-osd/files/apparmor/usr.bin.ceph-osd index 04c5865b..14084ab8 100644 --- a/ceph-osd/files/apparmor/usr.bin.ceph-osd +++ b/ceph-osd/files/apparmor/usr.bin.ceph-osd @@ -1,5 +1,4 @@ # vim:syntax=apparmor -# Author: Chris Holcombe #include /usr/bin/ceph-osd { @@ -18,25 +17,29 @@ network inet6 stream, /etc/ceph/* r, + /var/lib/charm/*/ceph.conf r, + + owner @{PROC}/@{pids}/auxv r, + owner @{PROC}/@{pids}/net/dev r, + owner @{PROC}/@{pids}/task/*/comm rw, - @{PROC}/@{pids}/auxv r, - @{PROC}/@{pids}/net/dev r, @{PROC}/loadavg r, + @{PROC}/1/cmdline r, + @{PROC}/partitions r, + @{PROC}/sys/kernel/random/uuid r, - /run/ceph/* rw, + /var/lib/ceph/** rwkl, /srv/ceph/** rwkl, - /tmp/ r, - /var/lib/ceph/** rwk, - /var/lib/ceph/osd/** l, - /var/lib/charm/*/ceph.conf r, + /var/log/ceph/* rwk, - /var/run/ceph/* rwk, - /var/tmp/ r, + + /{,var/}run/ceph/* rwk, + /{,var/}tmp/ r, /dev/ r, /dev/** rw, /sys/devices/** r, - /proc/partitions r, + /run/blkid/blkid.tab r, /bin/dash rix, From b7630e4dda38d812e80b3bef2e9a8a2c73ee029e Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Thu, 17 May 2018 11:44:23 +0100 Subject: [PATCH 1491/2699] Fix Traceback issue when ceph-osd upgrade fails Bug/1770740 surfaced an issue where get_upgrade_position() returns None but the calling function expects and exception to the thrown if the "None" condition exists. This just fixes the code so that the Traceback is stopped and the appropriate error/message is logged for the condition. Change-Id: Ib7d1fdc8f91bc992ccf618ef6f57e99bb90c2dbc Partial-Bug: #1770740 --- ceph-osd/lib/ceph/utils.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 4a158e03..329d69d5 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -2248,14 +2248,19 @@ def wait_on_previous_node(upgrade_key, service, previous_node, version): def get_upgrade_position(osd_sorted_list, match_name): """Return the upgrade position for the given osd. - :param osd_sorted_list: list. Osds sorted - :param match_name: str. The osd name to match - :returns: int. The position or None if not found + :param osd_sorted_list: Osds sorted + :type osd_sorted_list: [str] + :param match_name: The osd name to match + :type match_name: str + :returns: The position of the name + :rtype: int + :raises: ValueError if name is not found """ for index, item in enumerate(osd_sorted_list): if item.name == match_name: return index - return None + raise ValueError("osd name '{}' not found in get_upgrade_position list" + .format(match_name)) # Edge cases: From 35a725731c6650c345551e66804caa6cb5dec63d Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 8 May 2018 11:52:39 -0700 Subject: [PATCH 1492/2699] Enable Bionic as a gate test Change bionic test from dev to gate for 18.05. Change-Id: Ie69b71387cf43add775cffe9d948ec1a42bf7cae --- ceph-radosgw/.gitignore | 2 + ceph-radosgw/hooks/ceph_radosgw_context.py | 6 +- .../charmhelpers/contrib/hahelpers/apache.py | 5 +- .../charmhelpers/contrib/hahelpers/cluster.py | 14 +- .../contrib/openstack/amulet/deployment.py | 10 +- .../contrib/openstack/amulet/utils.py | 225 +++++++++++-- .../charmhelpers/contrib/openstack/context.py | 10 +- .../templates/section-oslo-middleware | 5 + .../templates/section-oslo-notifications | 3 + .../charmhelpers/contrib/openstack/utils.py | 4 +- .../contrib/openstack/vaultlocker.py | 126 +++++++ .../contrib/storage/linux/ceph.py | 43 ++- .../charmhelpers/contrib/storage/linux/lvm.py | 29 ++ .../contrib/storage/linux/utils.py | 16 + .../hooks/charmhelpers/core/hookenv.py | 138 ++++++-- ceph-radosgw/hooks/charmhelpers/core/host.py | 11 +- .../hooks/charmhelpers/core/services/base.py | 25 +- .../hooks/charmhelpers/core/sysctl.py | 18 +- .../hooks/charmhelpers/core/unitdata.py | 9 +- .../hooks/charmhelpers/fetch/ubuntu.py | 1 + ceph-radosgw/tests/basic_deployment.py | 313 +++++++----------- .../contrib/openstack/amulet/deployment.py | 10 +- .../contrib/openstack/amulet/utils.py | 225 +++++++++++-- .../tests/charmhelpers/core/hookenv.py | 138 ++++++-- ceph-radosgw/tests/charmhelpers/core/host.py | 11 +- .../tests/charmhelpers/core/services/base.py | 25 +- .../tests/charmhelpers/core/sysctl.py | 18 +- .../tests/charmhelpers/core/unitdata.py | 9 +- ...bionic-queens => gate-basic-bionic-queens} | 0 ceph-radosgw/tests/gate-basic-xenial-queens | 25 ++ ceph-radosgw/tox.ini | 2 +- .../unit_tests/test_ceph_radosgw_context.py | 120 +++++++ 32 files changed, 1232 insertions(+), 364 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py rename ceph-radosgw/tests/{dev-basic-bionic-queens => gate-basic-bionic-queens} (100%) create mode 100755 ceph-radosgw/tests/gate-basic-xenial-queens diff --git a/ceph-radosgw/.gitignore b/ceph-radosgw/.gitignore index 9fca5d06..0e21f066 100644 --- a/ceph-radosgw/.gitignore +++ b/ceph-radosgw/.gitignore @@ -7,3 +7,5 @@ tags *.pyc .idea .unit-state.db +func-results.json +.stestr/ diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index ce7975c1..5c0654a1 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -90,13 +90,17 @@ def __call__(self): if cmp_pkgrevno('radosgw', "10.2.0") >= 0: ctxt['auth_keystone_v3_supported'] = True + + if (not ctxt.get('admin_domain_id') and + float(ctxt.get('api_version', '2.0')) < 3): + ctxt.pop('admin_domain_id') + ctxt['auth_type'] = 'keystone' ctxt['user_roles'] = config('operator-roles') ctxt['cache_size'] = config('cache-size') ctxt['revocation_check_interval'] = config('revocation-check-interval') if self.context_complete(ctxt): return ctxt - return {} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py index 22acb683..605a1bec 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -65,7 +65,8 @@ def get_ca_cert(): if ca_cert is None: log("Inspecting identity-service relations for CA SSL certificate.", level=INFO) - for r_id in relation_ids('identity-service'): + for r_id in (relation_ids('identity-service') + + relation_ids('identity-credentials')): for unit in relation_list(r_id): if ca_cert is None: ca_cert = relation_get('ca_cert', @@ -76,7 +77,7 @@ def get_ca_cert(): def retrieve_ca_cert(cert_file): cert = None if os.path.isfile(cert_file): - with open(cert_file, 'r') as crt: + with open(cert_file, 'rb') as crt: cert = crt.read() return cert diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index 4207e42c..47facd91 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -371,6 +371,7 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'): ''' Distribute operations by waiting based on modulo_distribution If modulo and or wait are not set, check config_get for those values. + If config values are not set, default to modulo=3 and wait=30. :param modulo: int The modulo number creates the group distribution :param wait: int The constant time wait value @@ -382,10 +383,17 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'): :side effect: Calls time.sleep() ''' if modulo is None: - modulo = config_get('modulo-nodes') + modulo = config_get('modulo-nodes') or 3 if wait is None: - wait = config_get('known-wait') - calculated_wait = modulo_distribution(modulo=modulo, wait=wait) + wait = config_get('known-wait') or 30 + if juju_is_leader(): + # The leader should never wait + calculated_wait = 0 + else: + # non_zero_wait=True guarantees the non-leader who gets modulo 0 + # will still wait + calculated_wait = modulo_distribution(modulo=modulo, wait=wait, + non_zero_wait=True) msg = "Waiting {} seconds for {} ...".format(calculated_wait, operation_name) log(msg, DEBUG) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 5afbbd87..66beeda2 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -21,6 +21,9 @@ from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -271,11 +274,8 @@ def _get_openstack_release(self): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, - self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike, self.xenial_queens, - self.bionic_queens,) = range(13) + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) releases = { ('trusty', None): self.trusty_icehouse, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index d93cff3c..84e87f5d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -50,6 +50,13 @@ NOVA_CLIENT_VERSION = "2" +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', + 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', 'xenial_queens', + 'bionic_queens'] + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -63,7 +70,34 @@ def __init__(self, log_level=ERROR): super(OpenStackAmuletUtils, self).__init__(log_level) def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): """Validate endpoint data. Validate actual endpoint data vs expected endpoint data. The ports @@ -141,7 +175,86 @@ def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' - def validate_svc_catalog_endpoint_data(self, expected, actual): + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. Validate a list of actual service catalog endpoints vs a list of @@ -328,7 +441,7 @@ def keystone_wait_for_propagation(self, sentry_relation_pairs, if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." - "".format(rel['api_version'], api_version)) + "".format(rel.get('api_version'), api_version)) def keystone_configure_api_version(self, sentry_relation_pairs, deployment, api_version): @@ -350,16 +463,13 @@ def keystone_configure_api_version(self, sentry_relation_pairs, deployment, deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant, api_version=2): + def authenticate_cinder_admin(self, keystone, api_version=2): """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - keystone_ip = keystone_sentry.info['public-address'] - ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) + self.log.debug('Authenticating cinder admin...') _clients = { 1: cinder_client.Client, 2: cinder_clientv2.Client} - return _clients[api_version](username, password, tenant, ept) + return _clients[api_version](session=keystone.session) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -367,13 +477,36 @@ def authenticate_keystone(self, keystone_ip, username, password, project_domain_name=None, project_name=None): """Authenticate with Keystone""" self.log.debug('Authenticating with keystone...') - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: auth = v2.Password( username=username, password=password, @@ -381,12 +514,7 @@ def authenticate_keystone(self, keystone_ip, username, password, auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client else: - ep = base_ep + "/v3" auth = v3.Password( user_domain_name=user_domain_name, username=username, @@ -397,10 +525,57 @@ def authenticate_keystone(self, keystone_ip, username, password, auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + api_version = 2 + client_class = keystone_client.Client + # 11 => xenial_queens + if openstack_release and openstack_release >= 11: + api_version = 3 + client_class = keystone_client_v3.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 36cf32fc..2d91f0a7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -384,6 +384,7 @@ def __call__(self): # so a missing value just indicates keystone needs # upgrading ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') + ctxt['admin_domain_id'] = rdata.get('service_domain_id') return ctxt return {} @@ -796,9 +797,9 @@ def configure_cert(self, cn=None): key_filename = 'key' write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert)) + content=b64decode(cert), perms=0o640) write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key)) + content=b64decode(key), perms=0o640) def configure_ca(self): ca_cert = get_ca_cert() @@ -1872,10 +1873,11 @@ class EnsureDirContext(OSContextGenerator): context is needed to do that before rendering a template. ''' - def __init__(self, dirname): + def __init__(self, dirname, **kwargs): '''Used merely to ensure that a given directory exists.''' self.dirname = dirname + self.kwargs = kwargs def __call__(self): - mkdir(self.dirname) + mkdir(self.dirname, **self.kwargs) return {} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware new file mode 100644 index 00000000..dd73230a --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware @@ -0,0 +1,5 @@ +[oslo_middleware] + +# Bug #1758675 +enable_proxy_headers_parsing = true + diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications index 5dccd4bb..021a3c25 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications @@ -5,4 +5,7 @@ transport_url = {{ transport_url }} {% if notification_topics -%} topics = {{ notification_topics }} {% endif -%} +{% if notification_format -%} +notification_format = {{ notification_format }} +{% endif -%} {% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index b753275d..6184abd0 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -182,7 +182,7 @@ ('pike', ['2.13.0', '2.15.0']), ('queens', - ['2.16.0']), + ['2.16.0', '2.17.0']), ]) # >= Liberty version->codename mapping @@ -306,7 +306,7 @@ def get_os_codename_install_source(src): if src.startswith('cloud:'): ca_rel = src.split(':')[1] - ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + ca_rel = ca_rel.split('-')[1].split('/')[0] return ca_rel # Best guess match based on deb string provided diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py new file mode 100644 index 00000000..a8e4bf88 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -0,0 +1,126 @@ +# Copyright 2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import charmhelpers.contrib.openstack.alternatives as alternatives +import charmhelpers.contrib.openstack.context as context + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host +import charmhelpers.core.templating as templating +import charmhelpers.core.unitdata as unitdata + +VAULTLOCKER_BACKEND = 'charm-vaultlocker' + + +class VaultKVContext(context.OSContextGenerator): + """Vault KV context for interaction with vault-kv interfaces""" + interfaces = ['secrets-storage'] + + def __init__(self, secret_backend=None): + super(context.OSContextGenerator, self).__init__() + self.secret_backend = ( + secret_backend or 'charm-{}'.format(hookenv.service_name()) + ) + + def __call__(self): + db = unitdata.kv() + last_token = db.get('last-token') + secret_id = db.get('secret-id') + for relation_id in hookenv.relation_ids(self.interfaces[0]): + for unit in hookenv.related_units(relation_id): + data = hookenv.relation_get(unit=unit, + rid=relation_id) + vault_url = data.get('vault_url') + role_id = data.get('{}_role_id'.format(hookenv.local_unit())) + token = data.get('{}_token'.format(hookenv.local_unit())) + + if all([vault_url, role_id, token]): + token = json.loads(token) + vault_url = json.loads(vault_url) + + # Tokens may change when secret_id's are being + # reissued - if so use token to get new secret_id + if token != last_token: + secret_id = retrieve_secret_id( + url=vault_url, + token=token + ) + db.set('secret-id', secret_id) + db.set('last-token', token) + db.flush() + + ctxt = { + 'vault_url': vault_url, + 'role_id': json.loads(role_id), + 'secret_id': secret_id, + 'secret_backend': self.secret_backend, + } + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + self.complete = True + return ctxt + return {} + + +def write_vaultlocker_conf(context, priority=100): + """Write vaultlocker configuration to disk and install alternative + + :param context: Dict of data from vault-kv relation + :ptype: context: dict + :param priority: Priority of alternative configuration + :ptype: priority: int""" + charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format( + hookenv.service_name() + ) + host.mkdir(os.path.dirname(charm_vl_path), perms=0o700) + templating.render(source='vaultlocker.conf.j2', + target=charm_vl_path, + context=context, perms=0o600), + alternatives.install_alternative('vaultlocker.conf', + '/etc/vaultlocker/vaultlocker.conf', + charm_vl_path, priority) + + +def vault_relation_complete(backend=None): + """Determine whether vault relation is complete + + :param backend: Name of secrets backend requested + :ptype backend: string + :returns: whether the relation to vault is complete + :rtype: bool""" + vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) + vault_kv() + return vault_kv.complete + + +# TODO: contrib a high level unwrap method to hvac that works +def retrieve_secret_id(url, token): + """Retrieve a response-wrapped secret_id from Vault + + :param url: URL to Vault Server + :ptype url: str + :param token: One shot Token to use + :ptype token: str + :returns: secret_id to use for Vault Access + :rtype: str""" + import hvac + client = hvac.Client(url=url, token=token) + response = client._post('/v1/sys/wrapping/unwrap') + if response.status_code == 200: + data = response.json() + return data['data']['secret_id'] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index e13e60a6..76828201 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -291,7 +291,7 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): class ReplicatedPool(Pool): def __init__(self, service, name, pg_num=None, replicas=2, - percent_data=10.0): + percent_data=10.0, app_name=None): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas if pg_num: @@ -301,6 +301,10 @@ def __init__(self, service, name, pg_num=None, replicas=2, self.pg_num = min(pg_num, max_pgs) else: self.pg_num = self.get_pgs(self.replicas, percent_data) + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' def create(self): if not pool_exists(self.service, self.name): @@ -313,6 +317,12 @@ def create(self): update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) except CalledProcessError: raise @@ -320,10 +330,14 @@ def create(self): # Default jerasure erasure coded pool class ErasurePool(Pool): def __init__(self, service, name, erasure_code_profile="default", - percent_data=10.0): + percent_data=10.0, app_name=None): super(ErasurePool, self).__init__(service=service, name=name) self.erasure_code_profile = erasure_code_profile self.percent_data = percent_data + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' def create(self): if not pool_exists(self.service, self.name): @@ -355,6 +369,12 @@ def create(self): 'erasure', self.erasure_code_profile] try: check_call(cmd) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) except CalledProcessError: raise @@ -778,6 +798,25 @@ def update_pool(client, pool, settings): check_call(cmd) +def set_app_name_for_pool(client, pool, name): + """ + Calls `osd pool application enable` for the specified pool name + + :param client: Name of the ceph client to use + :type client: str + :param pool: Pool to set app name for + :type pool: str + :param name: app name for the specified pool + :type name: str + + :raises: CalledProcessError if ceph call fails + """ + if ceph_version() >= '12.0.0': + cmd = ['ceph', '--id', client, 'osd', 'pool', + 'application', 'enable', pool, name] + check_call(cmd) + + def create_pool(service, name, replicas=3, pg_num=None): """Create a new RADOS pool.""" if pool_exists(service, name): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py index 79a7a245..c8bde692 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -151,3 +151,32 @@ def extend_logical_volume_by_device(lv_name, block_device): ''' cmd = ['lvextend', lv_name, block_device] check_call(cmd) + + +def create_logical_volume(lv_name, volume_group, size=None): + ''' + Create a new logical volume in an existing volume group + + :param lv_name: str: name of logical volume to be created. + :param volume_group: str: Name of volume group to use for the new volume. + :param size: str: Size of logical volume to create (100% if not supplied) + :raises subprocess.CalledProcessError: in the event that the lvcreate fails. + ''' + if size: + check_call([ + 'lvcreate', + '--yes', + '-L', + '{}'.format(size), + '-n', lv_name, volume_group + ]) + # create the lv with all the space available, this is needed because the + # system call is different for LVM + else: + check_call([ + 'lvcreate', + '--yes', + '-l', + '100%FREE', + '-n', lv_name, volume_group + ]) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py index c9428894..6f846b05 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -67,3 +67,19 @@ def is_device_mounted(device): except Exception: return False return bool(re.search(r'MOUNTPOINT=".+"', out)) + + +def mkfs_xfs(device, force=False): + """Format device with XFS filesystem. + + By default this should fail if the device already has a filesystem on it. + :param device: Full path to device to format + :ptype device: tr + :param force: Force operation + :ptype: force: boolean""" + cmd = ['mkfs.xfs'] + if force: + cmd.append("-f") + + cmd += ['-i', 'size=1024', device] + check_call(cmd) diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 7ed1cc4e..627d8f79 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -27,6 +27,7 @@ import os import json import yaml +import re import subprocess import sys import errno @@ -67,7 +68,7 @@ def unit_get(attribute): @wraps(func) def wrapper(*args, **kwargs): global cache - key = str((func, args, kwargs)) + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: @@ -289,7 +290,7 @@ def __init__(self, *args, **kw): self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): + if os.path.exists(self.path) and os.stat(self.path).st_size: self.load_previous() atexit(self._implicit_save) @@ -309,7 +310,11 @@ def load_previous(self, path=None): """ self.path = path or self.path with open(self.path) as f: - self._prev_dict = json.load(f) + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Unable to parse previous config data - {}'.format(str(e)), + level=ERROR) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -353,22 +358,40 @@ def _implicit_save(self): self.save() -@cached +_cache_config = None + + def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) if scope is not None: - return config_data - return Config(config_data) - except ValueError: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) return None @@ -1043,7 +1066,6 @@ def juju_version(): universal_newlines=True).strip() -@cached def has_juju_version(minimum_version): """Return True if the Juju version is at least the provided version""" return LooseVersion(juju_version()) >= LooseVersion(minimum_version) @@ -1103,6 +1125,8 @@ def _run_atexit(): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get_primary_address(binding): ''' + Deprecated since Juju 2.3; use network_get() + Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding @@ -1123,7 +1147,6 @@ def network_get_primary_address(binding): return response -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get(endpoint, relation_id=None): """ Retrieve the network details for a relation endpoint @@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None): :param endpoint: string. The name of a relation endpoint :param relation_id: int. The ID of the relation for the current context. :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if run on Juju < 2.1 + :raise: NotImplementedError if request not supported by the Juju version. """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + cmd = ['network-get', endpoint, '--format', 'yaml'] if relation_id: cmd.append('-r') cmd.append(relation_id) - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - # Early versions of Juju 2.0.x required the --primary-address argument. - # We catch that condition here and raise NotImplementedError since - # the requested semantics are not available - the caller can then - # use the network_get_primary_address() method instead. - if '--primary-address is currently required' in e.output.decode('UTF-8'): - raise NotImplementedError - raise + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() return yaml.safe_load(response) @@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name): def ingress_address(rid=None, unit=None): """ - Retrieve the ingress-address from a relation when available. Otherwise, - return the private-address. This function is to be used on the consuming - side of the relation. + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. Usage: addresses = [ingress_address(rid=u.rid, unit=u.unit) @@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None): settings = relation_get(rid=rid, unit=unit) return (settings.get('ingress-address') or settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index fd14d60f..322ab2ac 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path): return output -def modulo_distribution(modulo=3, wait=30): +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): """ Modulo distribution This helper uses the unit number, a modulo value and a constant wait time @@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30): @param modulo: int The modulo number creates the group distribution @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. @return: int Calculated time to wait for unit operation """ unit_number = int(local_unit().split('/')[1]) - return (unit_number % modulo) * wait + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/base.py b/ceph-radosgw/hooks/charmhelpers/core/services/base.py index ca9dc996..179ad4f0 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/base.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/base.py @@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback): """ def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) - new_ports = service.get('ports', []) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) if os.path.exists(port_file): with open(port_file) as fp: old_ports = fp.read().split(',') for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) with open(port_file, 'w') as fp: fp.write(','.join(str(port) for port in new_ports)) for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' if event_name == 'start': - hookenv.open_port(port) + hookenv.open_port(port, protocol) elif event_name == 'stop': - hookenv.close_port(port) + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports def service_stop(service_name): diff --git a/ceph-radosgw/hooks/charmhelpers/core/sysctl.py b/ceph-radosgw/hooks/charmhelpers/core/sysctl.py index 6e413e31..1f188d8c 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/sysctl.py +++ b/ceph-radosgw/hooks/charmhelpers/core/sysctl.py @@ -31,18 +31,22 @@ def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict with open(sysctl_file, "w") as fd: for key, value in sysctl_dict_parsed.items(): diff --git a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py index 6d7b4942..ab554327 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py +++ b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py @@ -166,6 +166,10 @@ class Storage(object): To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. """ def __init__(self, path=None): self.db_path = path @@ -175,8 +179,9 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 910e96a6..653d58f1 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -44,6 +44,7 @@ 'x86_64': PROPOSED_POCKET, 'ppc64le': PROPOSED_PORTS_POCKET, 'aarch64': PROPOSED_PORTS_POCKET, + 's390x': PROPOSED_PORTS_POCKET, } CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index c9b39863..5144e6c7 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -16,6 +16,7 @@ import amulet import keystoneclient +from keystoneclient.v3 import client as keystone_client_v3 import swiftclient from charmhelpers.contrib.openstack.amulet.deployment import ( OpenStackAmuletDeployment @@ -60,7 +61,8 @@ def _add_services(self): """ this_service = {'name': 'ceph-radosgw'} other_services = [ - {'name': 'ceph', 'units': 3}, + {'name': 'ceph-mon', 'units': 3}, + {'name': 'ceph-osd', 'units': 3}, {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}}, {'name': 'keystone'}, {'name': 'rabbitmq-server'}, @@ -78,20 +80,21 @@ def _add_relations(self): 'nova-compute:shared-db': 'percona-cluster:shared-db', 'nova-compute:amqp': 'rabbitmq-server:amqp', 'nova-compute:image-service': 'glance:image-service', - 'nova-compute:ceph': 'ceph:client', + 'nova-compute:ceph': 'ceph-mon:client', 'keystone:shared-db': 'percona-cluster:shared-db', 'glance:shared-db': 'percona-cluster:shared-db', 'glance:identity-service': 'keystone:identity-service', 'glance:amqp': 'rabbitmq-server:amqp', - 'glance:ceph': 'ceph:client', + 'glance:ceph': 'ceph-mon:client', 'cinder:shared-db': 'percona-cluster:shared-db', 'cinder:identity-service': 'keystone:identity-service', 'cinder:amqp': 'rabbitmq-server:amqp', 'cinder:image-service': 'glance:image-service', 'cinder-ceph:storage-backend': 'cinder:storage-backend', - 'cinder-ceph:ceph': 'ceph:client', - 'ceph-radosgw:mon': 'ceph:radosgw', + 'cinder-ceph:ceph': 'ceph-mon:client', + 'ceph-radosgw:mon': 'ceph-mon:radosgw', 'ceph-radosgw:identity-service': 'keystone:identity-service', + 'ceph-osd:mon': 'ceph-mon:osd', } super(CephRadosGwBasicDeployment, self)._add_relations(relations) @@ -112,15 +115,21 @@ def _configure_services(self): 'auth-supported': 'none', 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', - 'osd-reformat': 'yes', + } + + # Include a non-existent device as osd-devices is a whitelist, + # and this will catch cases where proposals attempt to change that. + ceph_osd_config = { + 'osd-reformat': True, 'ephemeral-unmount': '/mnt', - 'osd-devices': '/dev/vdb /srv/ceph' + 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' } configs = {'keystone': keystone_config, 'percona-cluster': pxc_config, 'cinder': cinder_config, - 'ceph': ceph_config} + 'ceph-mon': ceph_config, + 'ceph-osd': ceph_osd_config} super(CephRadosGwBasicDeployment, self)._configure_services(configs) def _initialize_tests(self): @@ -132,9 +141,12 @@ def _initialize_tests(self): self.nova_sentry = self.d.sentry['nova-compute'][0] self.glance_sentry = self.d.sentry['glance'][0] self.cinder_sentry = self.d.sentry['cinder'][0] - self.ceph0_sentry = self.d.sentry['ceph'][0] - self.ceph1_sentry = self.d.sentry['ceph'][1] - self.ceph2_sentry = self.d.sentry['ceph'][2] + self.ceph0_sentry = self.d.sentry['ceph-mon'][0] + self.ceph1_sentry = self.d.sentry['ceph-mon'][1] + self.ceph2_sentry = self.d.sentry['ceph-mon'][2] + self.ceph_osd0_sentry = self.d.sentry['ceph-osd'][0] + self.ceph_osd1_sentry = self.d.sentry['ceph-osd'][1] + self.ceph_osd2_sentry = self.d.sentry['ceph-osd'][2] self.ceph_radosgw_sentry = self.d.sentry['ceph-radosgw'][0] u.log.debug('openstack release val: {}'.format( self._get_openstack_release())) @@ -142,148 +154,127 @@ def _initialize_tests(self): self._get_openstack_release_string())) # Authenticate admin with keystone - self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, - user='admin', - password='openstack', - tenant='admin') + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) # Authenticate admin with glance endpoint self.glance = u.authenticate_glance_admin(self.keystone) + # Authenticate radosgw user using swift api + keystone_ip = self.keystone_sentry.info['public-address'] + keystone_relation = self.keystone_sentry.relation( + 'identity-service', + 'ceph-radosgw:identity-service') + # Create a demo tenant/role/user self.demo_tenant = 'demoTenant' self.demo_role = 'demoRole' self.demo_user = 'demoUser' - if not u.tenant_exists(self.keystone, self.demo_tenant): - tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, - description='demo tenant', - enabled=True) - self.keystone.roles.create(name=self.demo_role) - user = self.keystone.users.create(name=self.demo_user, - password='password', - tenant_id=tenant.id, - email='demo@demo.com') - - # Grant Member role to demo_user - roles = [r._info for r in self.keystone.roles.list()] - for r in roles: - if r['name'].lower() == 'member': - self.keystone_member_role_id = r['id'] - - self.keystone.roles.add_user_role( - user=user.id, - role=self.keystone_member_role_id, - tenant=tenant.id) - - # Authenticate demo user with keystone - self.keystone_demo = u.authenticate_keystone_user(self.keystone, - self.demo_user, - 'password', - self.demo_tenant) - - # Authenticate demo user with nova-api - self.nova_demo = u.authenticate_nova_user(self.keystone, - self.demo_user, - 'password', - self.demo_tenant) - - # Authenticate radosgw user using swift api - ks_obj_rel = self.keystone_sentry.relation( - 'identity-service', - 'ceph-radosgw:identity-service') - self.swift = u.authenticate_swift_user( - self.keystone, - user=ks_obj_rel['service_username'], - password=ks_obj_rel['service_password'], - tenant=ks_obj_rel['service_tenant']) - - self.keystone_v3 = None - - def _initialize_keystone_v3(self): - u.log.debug('Initializing Keystone v3 tests...') - if self.keystone_v3 is not None: - u.log.debug('...allready initialized.') - return - - se_rels = [(self.keystone_sentry, 'ceph-radosgw:identity-service')] - u.keystone_configure_api_version(se_rels, self, 3) - - # Prepare Keystone Client with a domain scoped token for admin user - self.keystone_v3 = u.authenticate_keystone( - self.keystone_sentry.info['public-address'], - 'admin', 'openstack', api_version=3, - user_domain_name='admin_domain', domain_name='admin_domain' - ) - - # Create a demo domain, project and user - self.demo_domain = 'demoDomain' self.demo_project = 'demoProject' + self.demo_domain = 'demoDomain' + if self._get_openstack_release() >= self.xenial_queens: + self.keystone_v3 = self.keystone + self.create_users_v3() + self.demo_user_session, _ = u.get_keystone_session( + keystone_ip, + self.demo_user, + 'password', + api_version=3, + user_domain_name=self.demo_domain, + project_domain_name=self.demo_domain, + project_name=self.demo_project + ) + self.keystone_demo = keystone_client_v3.Client( + session=self.demo_user_session) + self.service_session, _ = u.get_keystone_session( + keystone_ip, + keystone_relation['service_username'], + keystone_relation['service_password'], + api_version=3, + user_domain_name=keystone_relation['service_domain'], + project_domain_name=keystone_relation['service_domain'], + project_name=keystone_relation['service_tenant'] + ) + else: + self.keystone_v3 = None + self.create_users_v2() + # Authenticate demo user with keystone + self.demo_user_session, _ = u.get_keystone_session( + keystone_ip, + self.demo_user, + 'password', + api_version=2, + project_name=self.demo_tenant, + ) + self.keystone_demo = keystoneclient.client.Client( + session=self.demo_user_session) + + self.service_session, _ = u.get_keystone_session( + keystone_ip, + keystone_relation['service_username'], + keystone_relation['service_password'], + api_version=2, + project_name=keystone_relation['service_tenant'] + ) + self.swift = swiftclient.Connection(session=self.service_session) + + def create_users_v3(self): try: - domain = self.keystone_v3.domains.create( + self.keystone.projects.find(name=self.demo_project) + except keystoneclient.exceptions.NotFound: + domain = self.keystone.domains.create( self.demo_domain, description='Demo Domain', - enabled=True, + enabled=True ) - except keystoneclient.exceptions.Conflict: - u.log.debug('Domain {} already exists, proceeding.' - ''.format(self.demo_domain)) - - try: - project = self.keystone_v3.projects.create( + project = self.keystone.projects.create( self.demo_project, domain, description='Demo Project', enabled=True, ) - except keystoneclient.exceptions.Conflict: - u.log.debug('Project {} already exists in domain {}, proceeding.' - ''.format(self.demo_project, domain.name)) - - try: - user = self.keystone_v3.users.create( + user = self.keystone.users.create( self.demo_user, domain=domain.id, project=self.demo_project, password='password', email='demov3@demo.com', - description='Demo v3', - enabled=True, - ) - except keystoneclient.exceptions.Conflict: - u.log.debug('User {} already exists in domain {}, proceeding.' - ''.format(self.demo_user, domain.name)) - self.keystone_v3.roles.grant(self.keystone_member_role_id, - user=user.id, - project=project.id) - - # Prepare Keystone Client with a project scoped token for demo user - self.keystone_demo_v3 = u.authenticate_keystone( - self.keystone_sentry.info['public-address'], - self.demo_user, 'password', api_version=3, - user_domain_name=self.demo_domain, - project_domain_name=self.demo_domain, - project_name=self.demo_project, - ) + description='Demo', + enabled=True) + role = self.keystone.roles.find(name='Member') + self.keystone.roles.grant( + role.id, + user=user.id, + project=project.id) - u.log.debug('OK') + def create_users_v2(self): + if not u.tenant_exists(self.keystone, self.demo_tenant): + tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, + description='demo tenant', + enabled=True) + + self.keystone.roles.create(name=self.demo_role) + self.keystone.users.create(name=self.demo_user, + password='password', + tenant_id=tenant.id, + email='demo@demo.com') + role = self.keystone.roles.find(name='Member') + user = self.keystone.users.find(name=self.demo_user) + tenant = self.keystone.tenants.find(name=self.demo_tenant) + self.keystone.roles.add_user_role( + user=user.id, + role=role.id, + tenant=tenant.id) def test_100_ceph_processes(self): """Verify that the expected service processes are running on each ceph unit.""" - # Process name and quantity of processes to expect on each unit - ceph_processes = { - 'ceph-mon': 1, - 'ceph-osd': 2 - } - # Units with process names and PID quantities expected expected_processes = { self.ceph_radosgw_sentry: {'radosgw': 1}, - self.ceph0_sentry: ceph_processes, - self.ceph1_sentry: ceph_processes, - self.ceph2_sentry: ceph_processes } actual_pids = u.get_unit_process_ids(expected_processes) @@ -294,56 +285,25 @@ def test_100_ceph_processes(self): def test_102_services(self): """Verify the expected services are running on the service units.""" - services = { - self.rabbitmq_sentry: ['rabbitmq-server'], - self.nova_sentry: ['nova-compute'], - self.keystone_sentry: ['keystone'], - self.glance_sentry: ['glance-registry', - 'glance-api'], - self.cinder_sentry: ['cinder-scheduler', - 'cinder-volume'], - } - - if self._get_openstack_release() < self.xenial_mitaka: - services[self.cinder_sentry].append('cinder-api') - else: - services[self.cinder_sentry].append('apache2') - if self._get_openstack_release() < self.xenial_mitaka: - # For upstart systems only. Ceph services under systemd - # are checked by process name instead. - ceph_services = [ - 'ceph-mon-all', - 'ceph-mon id=`hostname`', - 'ceph-osd-all', - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) - ] - services[self.ceph0_sentry] = ceph_services - services[self.ceph1_sentry] = ceph_services - services[self.ceph2_sentry] = ceph_services - services[self.ceph_radosgw_sentry] = ['radosgw-all'] - - if self._get_openstack_release() >= self.trusty_liberty: - services[self.keystone_sentry] = ['apache2'] - - ret = u.validate_services_by_name(services) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) + services = {self.ceph_radosgw_sentry: ['radosgw-all']} + ret = u.validate_services_by_name(services) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) def test_200_ceph_radosgw_ceph_relation(self): """Verify the ceph-radosgw to ceph relation data.""" u.log.debug('Checking ceph-radosgw:mon to ceph:radosgw ' 'relation data...') unit = self.ceph_radosgw_sentry - relation = ['mon', 'ceph:radosgw'] + relation = ['mon', 'ceph-mon:radosgw'] expected = { 'private-address': u.valid_ip } ret = u.validate_relation_data(unit, relation, expected) if ret: - message = u.relation_error('ceph-radosgw to ceph', ret) + message = u.relation_error('ceph-radosgw to ceph-mon', ret) amulet.raise_status(amulet.FAIL, msg=message) def test_201_ceph_radosgw_relation(self): @@ -445,13 +405,20 @@ def test_300_ceph_radosgw_config(self): 'rgw socket path': '/tmp/radosgw.sock', 'log file': '/var/log/ceph/radosgw.log', 'rgw keystone url': 'http://{}:35357/'.format(keystone_ip), - 'rgw keystone admin token': 'ubuntutesting', 'rgw keystone accepted roles': 'Member,Admin', 'rgw keystone token cache size': '500', 'rgw keystone revocation interval': '600', 'rgw frontends': 'civetweb port=70', }, } + if self._get_openstack_release() >= self.xenial_queens: + expected['client.radosgw.gateway']['rgw keystone admin domain'] = ( + 'service_domain') + (expected['client.radosgw.gateway'] + ['rgw keystone admin project']) = 'services' + else: + expected['client.radosgw.gateway']['rgw keystone admin token'] = ( + 'ubuntutesting') for section, pairs in expected.iteritems(): ret = u.validate_config_data(unit, conf, section, pairs) @@ -585,38 +552,14 @@ def test_403_swift_keystone_auth(self, api_version=2): """Check Swift Object Storage functionlaity""" u.log.debug('Check Swift Object Storage functionality (api_version={})' ''.format(api_version)) - keystone_ip = self.keystone_sentry.info['public-address'] - base_ep = "http://{}:5000".format(keystone_ip.strip().decode('utf-8')) - if api_version == 3: - self._initialize_keystone_v3() - ep = base_ep + '/v3' - os_options = { - 'user_domain_name': self.demo_domain, - 'project_domain_name': self.demo_domain, - 'project_name': self.demo_project, - } - conn = swiftclient.client.Connection( - authurl=ep, - user=self.demo_user, - key='password', - os_options=os_options, - auth_version=api_version, - ) - else: - ep = base_ep + '/v2.0' - conn = swiftclient.client.Connection( - authurl=ep, - user=self.demo_user, - key='password', - tenant_name=self.demo_tenant, - auth_version=api_version, - ) + conn = swiftclient.Connection(session=self.keystone_demo.session) u.log.debug('Create container') container = 'demo-container' try: conn.put_container(container) except swiftclient.exceptions.ClientException as e: - if api_version == 3 and e.http_status == 409: + print "EXCEPTION", e.http_status + if e.http_status == 409: # Ceph RadosGW is currently configured with a global namespace # for container names. Make use of this to verify that we # cannot create a container with a name already taken by a diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 5afbbd87..66beeda2 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -21,6 +21,9 @@ from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -271,11 +274,8 @@ def _get_openstack_release(self): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, - self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike, self.xenial_queens, - self.bionic_queens,) = range(13) + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) releases = { ('trusty', None): self.trusty_icehouse, diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index d93cff3c..84e87f5d 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -50,6 +50,13 @@ NOVA_CLIENT_VERSION = "2" +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', + 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', 'xenial_queens', + 'bionic_queens'] + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -63,7 +70,34 @@ def __init__(self, log_level=ERROR): super(OpenStackAmuletUtils, self).__init__(log_level) def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): """Validate endpoint data. Validate actual endpoint data vs expected endpoint data. The ports @@ -141,7 +175,86 @@ def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' - def validate_svc_catalog_endpoint_data(self, expected, actual): + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. Validate a list of actual service catalog endpoints vs a list of @@ -328,7 +441,7 @@ def keystone_wait_for_propagation(self, sentry_relation_pairs, if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." - "".format(rel['api_version'], api_version)) + "".format(rel.get('api_version'), api_version)) def keystone_configure_api_version(self, sentry_relation_pairs, deployment, api_version): @@ -350,16 +463,13 @@ def keystone_configure_api_version(self, sentry_relation_pairs, deployment, deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant, api_version=2): + def authenticate_cinder_admin(self, keystone, api_version=2): """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - keystone_ip = keystone_sentry.info['public-address'] - ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) + self.log.debug('Authenticating cinder admin...') _clients = { 1: cinder_client.Client, 2: cinder_clientv2.Client} - return _clients[api_version](username, password, tenant, ept) + return _clients[api_version](session=keystone.session) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -367,13 +477,36 @@ def authenticate_keystone(self, keystone_ip, username, password, project_domain_name=None, project_name=None): """Authenticate with Keystone""" self.log.debug('Authenticating with keystone...') - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: auth = v2.Password( username=username, password=password, @@ -381,12 +514,7 @@ def authenticate_keystone(self, keystone_ip, username, password, auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client else: - ep = base_ep + "/v3" auth = v3.Password( user_domain_name=user_domain_name, username=username, @@ -397,10 +525,57 @@ def authenticate_keystone(self, keystone_ip, username, password, auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + api_version = 2 + client_class = keystone_client.Client + # 11 => xenial_queens + if openstack_release and openstack_release >= 11: + api_version = 3 + client_class = keystone_client_v3.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, diff --git a/ceph-radosgw/tests/charmhelpers/core/hookenv.py b/ceph-radosgw/tests/charmhelpers/core/hookenv.py index 7ed1cc4e..627d8f79 100644 --- a/ceph-radosgw/tests/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/tests/charmhelpers/core/hookenv.py @@ -27,6 +27,7 @@ import os import json import yaml +import re import subprocess import sys import errno @@ -67,7 +68,7 @@ def unit_get(attribute): @wraps(func) def wrapper(*args, **kwargs): global cache - key = str((func, args, kwargs)) + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: @@ -289,7 +290,7 @@ def __init__(self, *args, **kw): self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): + if os.path.exists(self.path) and os.stat(self.path).st_size: self.load_previous() atexit(self._implicit_save) @@ -309,7 +310,11 @@ def load_previous(self, path=None): """ self.path = path or self.path with open(self.path) as f: - self._prev_dict = json.load(f) + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Unable to parse previous config data - {}'.format(str(e)), + level=ERROR) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -353,22 +358,40 @@ def _implicit_save(self): self.save() -@cached +_cache_config = None + + def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) if scope is not None: - return config_data - return Config(config_data) - except ValueError: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) return None @@ -1043,7 +1066,6 @@ def juju_version(): universal_newlines=True).strip() -@cached def has_juju_version(minimum_version): """Return True if the Juju version is at least the provided version""" return LooseVersion(juju_version()) >= LooseVersion(minimum_version) @@ -1103,6 +1125,8 @@ def _run_atexit(): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get_primary_address(binding): ''' + Deprecated since Juju 2.3; use network_get() + Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding @@ -1123,7 +1147,6 @@ def network_get_primary_address(binding): return response -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get(endpoint, relation_id=None): """ Retrieve the network details for a relation endpoint @@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None): :param endpoint: string. The name of a relation endpoint :param relation_id: int. The ID of the relation for the current context. :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if run on Juju < 2.1 + :raise: NotImplementedError if request not supported by the Juju version. """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + cmd = ['network-get', endpoint, '--format', 'yaml'] if relation_id: cmd.append('-r') cmd.append(relation_id) - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - # Early versions of Juju 2.0.x required the --primary-address argument. - # We catch that condition here and raise NotImplementedError since - # the requested semantics are not available - the caller can then - # use the network_get_primary_address() method instead. - if '--primary-address is currently required' in e.output.decode('UTF-8'): - raise NotImplementedError - raise + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() return yaml.safe_load(response) @@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name): def ingress_address(rid=None, unit=None): """ - Retrieve the ingress-address from a relation when available. Otherwise, - return the private-address. This function is to be used on the consuming - side of the relation. + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. Usage: addresses = [ingress_address(rid=u.rid, unit=u.unit) @@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None): settings = relation_get(rid=rid, unit=unit) return (settings.get('ingress-address') or settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen diff --git a/ceph-radosgw/tests/charmhelpers/core/host.py b/ceph-radosgw/tests/charmhelpers/core/host.py index fd14d60f..322ab2ac 100644 --- a/ceph-radosgw/tests/charmhelpers/core/host.py +++ b/ceph-radosgw/tests/charmhelpers/core/host.py @@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path): return output -def modulo_distribution(modulo=3, wait=30): +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): """ Modulo distribution This helper uses the unit number, a modulo value and a constant wait time @@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30): @param modulo: int The modulo number creates the group distribution @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. @return: int Calculated time to wait for unit operation """ unit_number = int(local_unit().split('/')[1]) - return (unit_number % modulo) * wait + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time diff --git a/ceph-radosgw/tests/charmhelpers/core/services/base.py b/ceph-radosgw/tests/charmhelpers/core/services/base.py index ca9dc996..179ad4f0 100644 --- a/ceph-radosgw/tests/charmhelpers/core/services/base.py +++ b/ceph-radosgw/tests/charmhelpers/core/services/base.py @@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback): """ def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) - new_ports = service.get('ports', []) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) if os.path.exists(port_file): with open(port_file) as fp: old_ports = fp.read().split(',') for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) with open(port_file, 'w') as fp: fp.write(','.join(str(port) for port in new_ports)) for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' if event_name == 'start': - hookenv.open_port(port) + hookenv.open_port(port, protocol) elif event_name == 'stop': - hookenv.close_port(port) + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports def service_stop(service_name): diff --git a/ceph-radosgw/tests/charmhelpers/core/sysctl.py b/ceph-radosgw/tests/charmhelpers/core/sysctl.py index 6e413e31..1f188d8c 100644 --- a/ceph-radosgw/tests/charmhelpers/core/sysctl.py +++ b/ceph-radosgw/tests/charmhelpers/core/sysctl.py @@ -31,18 +31,22 @@ def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict with open(sysctl_file, "w") as fd: for key, value in sysctl_dict_parsed.items(): diff --git a/ceph-radosgw/tests/charmhelpers/core/unitdata.py b/ceph-radosgw/tests/charmhelpers/core/unitdata.py index 6d7b4942..ab554327 100644 --- a/ceph-radosgw/tests/charmhelpers/core/unitdata.py +++ b/ceph-radosgw/tests/charmhelpers/core/unitdata.py @@ -166,6 +166,10 @@ class Storage(object): To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. """ def __init__(self, path=None): self.db_path = path @@ -175,8 +179,9 @@ def __init__(self, path=None): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/ceph-radosgw/tests/dev-basic-bionic-queens b/ceph-radosgw/tests/gate-basic-bionic-queens similarity index 100% rename from ceph-radosgw/tests/dev-basic-bionic-queens rename to ceph-radosgw/tests/gate-basic-bionic-queens diff --git a/ceph-radosgw/tests/gate-basic-xenial-queens b/ceph-radosgw/tests/gate-basic-xenial-queens new file mode 100755 index 00000000..fff90006 --- /dev/null +++ b/ceph-radosgw/tests/gate-basic-xenial-queens @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph-radosgw deployment on xenial-queens.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='xenial', + openstack='cloud:xenial-queens', + source='cloud:xenial-updates/queens') + deployment.run_tests() diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 43190642..09ca045d 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -60,7 +60,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy [testenv:func27-dfs] # Charm Functional Test diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 3aba0188..d707efa8 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -70,6 +70,7 @@ def setUp(self): super(IdentityServiceContextTest, self).setUp(context, TO_PATCH) self.relation_get.side_effect = self.test_relation.get self.config.side_effect = self.test_config.get + self.maxDiff = None @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') @@ -96,6 +97,7 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, 'service_port': 9876, 'service_host': '127.0.0.4', 'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', + 'service_domain_id': '8e50f28a556911e8aaeed33789425d23', 'auth_host': '127.0.0.5', 'auth_port': 5432, 'service_tenant': 'ten', @@ -105,6 +107,7 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, _rget.return_value = id_data ids_ctxt = context.IdentityServiceContext() expect = { + 'admin_domain_id': '8e50f28a556911e8aaeed33789425d23', 'admin_password': 'adminpass', 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', 'admin_tenant_name': 'ten', @@ -126,6 +129,123 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, expect['auth_keystone_v3_supported'] = True self.assertEqual(expect, ids_ctxt()) + @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') + @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') + @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') + @patch.object(charmhelpers.contrib.openstack.context, 'related_units') + @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') + @patch.object(charmhelpers.contrib.openstack.context, 'log') + def test_ids_ctxt_missing_admin_domain_id( + self, _log, _rids, _runits, _rget, _ctxt_comp, _format_ipv6_addr, + jewel_installed=False): + self.test_config.set('operator-roles', 'Babel') + self.test_config.set('cache-size', '42') + self.test_config.set('revocation-check-interval', '7500000') + self.test_relation.set({'admin_token': 'ubuntutesting'}) + self.relation_ids.return_value = ['identity-service:5'] + self.related_units.return_value = ['keystone/0'] + _format_ipv6_addr.return_value = False + _rids.return_value = ['rid1'] + _runits.return_value = ['runit'] + _ctxt_comp.return_value = True + self.cmp_pkgrevno.return_value = -1 + if jewel_installed: + self.cmp_pkgrevno.return_value = 0 + id_data = { + 'service_port': 9876, + 'service_host': '127.0.0.4', + 'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', + 'auth_host': '127.0.0.5', + 'auth_port': 5432, + 'service_tenant': 'ten', + 'service_username': 'admin', + 'service_password': 'adminpass', + } + _rget.return_value = id_data + ids_ctxt = context.IdentityServiceContext() + expect = { + 'admin_password': 'adminpass', + 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', + 'admin_tenant_name': 'ten', + 'admin_token': 'ubuntutesting', + 'admin_user': 'admin', + 'api_version': '2.0', + 'auth_host': '127.0.0.5', + 'auth_port': 5432, + 'auth_protocol': 'http', + 'auth_type': 'keystone', + 'cache_size': '42', + 'revocation_check_interval': '7500000', + 'service_host': '127.0.0.4', + 'service_port': 9876, + 'service_protocol': 'http', + 'user_roles': 'Babel', + } + if jewel_installed: + expect['auth_keystone_v3_supported'] = True + self.assertEqual(expect, ids_ctxt()) + + @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') + @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') + @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') + @patch.object(charmhelpers.contrib.openstack.context, 'related_units') + @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') + @patch.object(charmhelpers.contrib.openstack.context, 'log') + def test_ids_ctxt_v3( + self, _log, _rids, _runits, _rget, _ctxt_comp, _format_ipv6_addr, + jewel_installed=False): + self.test_config.set('operator-roles', 'Babel') + self.test_config.set('cache-size', '42') + self.test_config.set('revocation-check-interval', '7500000') + self.test_relation.set({'admin_token': 'ubuntutesting'}) + self.relation_ids.return_value = ['identity-service:5'] + self.related_units.return_value = ['keystone/0'] + _format_ipv6_addr.return_value = False + _rids.return_value = ['rid1'] + _runits.return_value = ['runit'] + _ctxt_comp.return_value = True + self.cmp_pkgrevno.return_value = -1 + if jewel_installed: + self.cmp_pkgrevno.return_value = 0 + id_data = { + 'service_port': 9876, + 'service_host': '127.0.0.4', + 'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', + 'service_domain_id': '8e50f28a556911e8aaeed33789425d23', + 'service_domain': 'service_domain', + 'auth_host': '127.0.0.5', + 'auth_port': 5432, + 'service_tenant': 'ten', + 'service_username': 'admin', + 'service_password': 'adminpass', + 'api_version': '3', + } + _rget.return_value = id_data + ids_ctxt = context.IdentityServiceContext() + expect = { + 'admin_domain_id': '8e50f28a556911e8aaeed33789425d23', + 'admin_domain_name': 'service_domain', + 'admin_password': 'adminpass', + 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', + 'admin_tenant_name': 'ten', + 'admin_token': 'ubuntutesting', + 'admin_user': 'admin', + 'api_version': '3', + 'auth_host': '127.0.0.5', + 'auth_port': 5432, + 'auth_protocol': 'http', + 'auth_type': 'keystone', + 'cache_size': '42', + 'revocation_check_interval': '7500000', + 'service_host': '127.0.0.4', + 'service_port': 9876, + 'service_protocol': 'http', + 'user_roles': 'Babel', + } + if jewel_installed: + expect['auth_keystone_v3_supported'] = True + self.assertEqual(expect, ids_ctxt()) + def test_ids_ctxt_jewel(self): self.test_ids_ctxt(jewel_installed=True) From 9c2dc916cdff27fde816a32b0b23f0c3b6e5267f Mon Sep 17 00:00:00 2001 From: wangqi Date: Tue, 22 May 2018 03:36:07 +0000 Subject: [PATCH 1493/2699] Switch to using stestr When the TC merged I2637dd714cbb6d38ef8b8dc1083e359207118284 we're supposed to invoke stestr rather than testr so lets do that Change-Id: I151eb37008936358bd4108a2510c924c261dba8e --- ceph-radosgw/.stestr.conf | 3 +++ ceph-radosgw/.testr.conf | 8 -------- ceph-radosgw/test-requirements.txt | 1 + ceph-radosgw/tox.ini | 2 +- 4 files changed, 5 insertions(+), 9 deletions(-) create mode 100644 ceph-radosgw/.stestr.conf delete mode 100644 ceph-radosgw/.testr.conf diff --git a/ceph-radosgw/.stestr.conf b/ceph-radosgw/.stestr.conf new file mode 100644 index 00000000..c963e1f3 --- /dev/null +++ b/ceph-radosgw/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_path=./ diff --git a/ceph-radosgw/.testr.conf b/ceph-radosgw/.testr.conf deleted file mode 100644 index 801646bb..00000000 --- a/ceph-radosgw/.testr.conf +++ /dev/null @@ -1,8 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION - -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 9edd4bbf..67c30f1a 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -21,6 +21,7 @@ python-novaclient>=2.30.1 python-openstackclient>=1.7.0 python-swiftclient>=2.6.0 pika>=0.10.0,<1.0 +stestr>=1.0.0 distro-info # END: Amulet OpenStack Charm Helper Requirements # NOTE: workaround for 14.04 pip/tox diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 09ca045d..7bc84c50 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -12,7 +12,7 @@ setenv = VIRTUAL_ENV={envdir} AMULET_SETUP_TIMEOUT=5400 install_command = pip install {opts} {packages} -commands = ostestr {posargs} +commands = stestr run --slowest {posargs} whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* From e8325a1f14f027c74b05835a8016c98a056090c7 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 28 May 2018 10:24:07 +0200 Subject: [PATCH 1494/2699] Only attempt to bootstrap the cluster if we are not already done This should dramatically reduce hook executions during mon-relation and leader-setting hook executions as we only need to notify all related units on bootstrap Change-Id: I45f4dc47f811bdc3bbe5171ca9a388f0e207f1d0 Related-Bug: #1719436 --- ceph-mon/hooks/ceph_hooks.py | 86 ++++++++++++++++++------------------ 1 file changed, 44 insertions(+), 42 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 7545551a..c5f3046b 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -376,48 +376,50 @@ def mon_relation(): moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: - status_set('maintenance', 'Bootstrapping MON cluster') - # the following call raises an exception if it can't add the keyring - try: - ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) - except FileNotFoundError as e: # NOQA -- PEP8 is still PY2 - log("Couldn't bootstrap the monitor yet: {}".format(str(e))) - exit(0) - ceph.wait_for_bootstrap() - ceph.wait_for_quorum() - if cmp_pkgrevno('ceph', '12.0.0') >= 0: - status_set('maintenance', 'Bootstrapping Ceph MGR') - ceph.bootstrap_manager() - # If we can and want to - if is_leader() and config('customize-failure-domain'): - # But only if the environment supports it - if os.environ.get('JUJU_AVAILABILITY_ZONE'): - cmds = [ - "ceph osd getcrushmap -o /tmp/crush.map", - "crushtool -d /tmp/crush.map| " - "sed 's/step chooseleaf firstn 0 type host/step " - "chooseleaf firstn 0 type rack/' > " - "/tmp/crush.decompiled", - "crushtool -c /tmp/crush.decompiled -o /tmp/crush.map", - "crushtool -i /tmp/crush.map --test", - "ceph osd setcrushmap -i /tmp/crush.map" - ] - for cmd in cmds: - try: - subprocess.check_call(cmd, shell=True) - except subprocess.CalledProcessError as e: - log("Failed to modify crush map:", level='error') - log("Cmd: {}".format(cmd), level='error') - log("Error: {}".format(e.output), level='error') - break - else: - log( - "Your Juju environment doesn't" - "have support for Availability Zones" - ) - notify_osds() - notify_radosgws() - notify_client() + if not ceph.is_bootstrapped(): + status_set('maintenance', 'Bootstrapping MON cluster') + # the following call raises an exception + # if it can't add the keyring + try: + ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) + except FileNotFoundError as e: # NOQA -- PEP8 is still PY2 + log("Couldn't bootstrap the monitor yet: {}".format(str(e))) + exit(0) + ceph.wait_for_bootstrap() + ceph.wait_for_quorum() + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + status_set('maintenance', 'Bootstrapping Ceph MGR') + ceph.bootstrap_manager() + # If we can and want to + if is_leader() and config('customize-failure-domain'): + # But only if the environment supports it + if os.environ.get('JUJU_AVAILABILITY_ZONE'): + cmds = [ + "ceph osd getcrushmap -o /tmp/crush.map", + "crushtool -d /tmp/crush.map| " + "sed 's/step chooseleaf firstn 0 type host/step " + "chooseleaf firstn 0 type rack/' > " + "/tmp/crush.decompiled", + "crushtool -c /tmp/crush.decompiled -o /tmp/crush.map", + "crushtool -i /tmp/crush.map --test", + "ceph osd setcrushmap -i /tmp/crush.map" + ] + for cmd in cmds: + try: + subprocess.check_call(cmd, shell=True) + except subprocess.CalledProcessError as e: + log("Failed to modify crush map:", level='error') + log("Cmd: {}".format(cmd), level='error') + log("Error: {}".format(e.output), level='error') + break + else: + log( + "Your Juju environment doesn't" + "have support for Availability Zones" + ) + notify_osds() + notify_radosgws() + notify_client() else: log('Not enough mons ({}), punting.' .format(len(get_mon_hosts()))) From 400e9259912870639b5a518cecbd805fb83147d3 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 28 May 2018 11:21:36 +0200 Subject: [PATCH 1495/2699] Remove replace-osd action At present this action does not work. While looking to repair the functionality I found a number of issues with the current implementation. For now I suggest we remove this functionality, and at some point we may consider replacing it with a `remove-disk` action. Sync in relevant changes from charms.ceph Depends-On: Id61b87927c43d807aacc93cf05ec8f88d91b7a39 Change-Id: Ic71d304ff65a05ab7249f4dd07adc45429a323e9 --- ceph-osd/actions.yaml | 11 -- ceph-osd/actions/replace-osd | 1 - ceph-osd/actions/replace_osd.py | 99 ------------------ ceph-osd/lib/ceph/utils.py | 125 +---------------------- ceph-osd/unit_tests/test_replace_osd.py | 128 ------------------------ 5 files changed, 4 insertions(+), 360 deletions(-) delete mode 120000 ceph-osd/actions/replace-osd delete mode 100755 ceph-osd/actions/replace_osd.py delete mode 100644 ceph-osd/unit_tests/test_replace_osd.py diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 7858d51f..731de13e 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -24,17 +24,6 @@ resume: description: | Set the local osd units in the charm to 'in'. Note that the pause option does NOT stop the osd processes. -replace-osd: - description: Replace a failed osd with a fresh disk - params: - osd-number: - type: integer - description: The osd number to operate on. Example 99. Hint you can get this information from `ceph osd tree`. - replacement-device: - type: string - description: The replacement device to use. Example /dev/sdb. - required: [osd-number, replacement-device] - additionalProperties: false list-disks: description: List the unmounted disk on the specified unit add-disk: diff --git a/ceph-osd/actions/replace-osd b/ceph-osd/actions/replace-osd deleted file mode 120000 index d9f1a694..00000000 --- a/ceph-osd/actions/replace-osd +++ /dev/null @@ -1 +0,0 @@ -replace_osd.py \ No newline at end of file diff --git a/ceph-osd/actions/replace_osd.py b/ceph-osd/actions/replace_osd.py deleted file mode 100755 index 297ec177..00000000 --- a/ceph-osd/actions/replace_osd.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -sys.path.append('hooks/') -sys.path.append('lib/') - -import charmhelpers.core.hookenv as hookenv - -import ceph.utils - -""" -Given a OSD number this script will attempt to turn that back into a mount -point and then replace the OSD with a new one. -""" - - -def get_disk_stats(): - try: - # https://www.kernel.org/doc/Documentation/iostats.txt - with open('/proc/diskstats', 'rt', encoding='UTF-8') as diskstats: - return diskstats.readlines() - except IOError as err: - hookenv.log('Could not open /proc/diskstats. Error: {}' - .format(str(err))) - hookenv.action_fail( - 'replace-osd failed because /proc/diskstats could not ' - 'be opened {}'.format(str(err))) - return None - - -def lookup_device_name(major_number, minor_number): - """ - - :param major_number: int. The major device number - :param minor_number: int. The minor device number - :return: string. The name of the device. Example: /dev/sda. - Returns None on error. - """ - diskstats = get_disk_stats() - for line in diskstats: - parts = line.split() - if not len(parts) > 3: - # Skip bogus lines - continue - try: - if int(parts[0]) is major_number and int(parts[1]) is \ - minor_number: - # Found our device. Return its name - return parts[2] - except ValueError as value_err: - hookenv.log('Could not convert {} or {} into an integer. Error: {}' - .format(parts[0], parts[1], str(value_err))) - continue - return None - - -def get_device_number(osd_number): - """ - This function will return a tuple of (major_number, minor_number) - device number for the given osd. - :param osd_number: int - :rtype : (major_number,minor_number) - """ - path = "/var/lib/ceph/osd/ceph-{}".format(osd_number) - info = os.lstat(path) - major_number = os.major(info.st_dev) - minor_number = os.minor(info.st_dev) - return major_number, minor_number - - -if __name__ == '__main__': - dead_osd_number = hookenv.action_get("osd-number") - replacement_device = hookenv.action_get("replacement-device") - major, minor = get_device_number(dead_osd_number) - device_name = lookup_device_name(major, minor) - osd_format = hookenv.config('osd-format') - osd_journal = hookenv.config('osd-journal') - - ceph.utils.replace_osd(dead_osd_number=dead_osd_number, - dead_osd_device="/dev/{}".format(device_name), - new_osd_device=replacement_device, - osd_format=osd_format, - osd_journal=osd_journal) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 329d69d5..d281a3b6 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -13,8 +13,6 @@ # limitations under the License. import collections -import ctypes -import errno import glob import json import os @@ -25,7 +23,6 @@ import subprocess import sys import time -import shutil import uuid from datetime import datetime @@ -38,7 +35,6 @@ cmp_pkgrevno, lsb_release, mkdir, - mounts, owner, service_restart, service_start, @@ -835,114 +831,6 @@ def add_bootstrap_hint(peer): ] -def umount(mount_point): - """This function unmounts a mounted directory forcibly. This will - be used for unmounting broken hard drive mounts which may hang. - - If umount returns EBUSY this will lazy unmount. - - :param mount_point: str. A String representing the filesystem mount point - :returns: int. Returns 0 on success. errno otherwise. - """ - libc_path = ctypes.util.find_library("c") - libc = ctypes.CDLL(libc_path, use_errno=True) - - # First try to umount with MNT_FORCE - ret = libc.umount(mount_point, 1) - if ret < 0: - err = ctypes.get_errno() - if err == errno.EBUSY: - # Detach from try. IE lazy umount - ret = libc.umount(mount_point, 2) - if ret < 0: - err = ctypes.get_errno() - return err - return 0 - else: - return err - return 0 - - -def replace_osd(dead_osd_number, - dead_osd_device, - new_osd_device, - osd_format, - osd_journal, - reformat_osd=False, - ignore_errors=False): - """This function will automate the replacement of a failed osd disk as much - as possible. It will revoke the keys for the old osd, remove it from the - crush map and then add a new osd into the cluster. - - :param dead_osd_number: The osd number found in ceph osd tree. Example: 99 - :param dead_osd_device: The physical device. Example: /dev/sda - :param osd_format: - :param osd_journal: - :param reformat_osd: - :param ignore_errors: - """ - host_mounts = mounts() - mount_point = None - for mount in host_mounts: - if mount[1] == dead_osd_device: - mount_point = mount[0] - # need to convert dev to osd number - # also need to get the mounted drive so we can tell the admin to - # replace it - try: - # Drop this osd out of the cluster. This will begin a - # rebalance operation - status_set('maintenance', 'Removing osd {}'.format(dead_osd_number)) - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'out', - 'osd.{}'.format(dead_osd_number)]) - - # Kill the osd process if it's not already dead - if systemd(): - service_stop('ceph-osd@{}'.format(dead_osd_number)) - else: - subprocess.check_output(['stop', 'ceph-osd', 'id={}'.format( - dead_osd_number)]) - # umount if still mounted - ret = umount(mount_point) - if ret < 0: - raise RuntimeError('umount {} failed with error: {}'.format( - mount_point, os.strerror(ret))) - # Clean up the old mount point - shutil.rmtree(mount_point) - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'crush', 'remove', - 'osd.{}'.format(dead_osd_number)]) - # Revoke the OSDs access keys - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'auth', 'del', - 'osd.{}'.format(dead_osd_number)]) - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'rm', - 'osd.{}'.format(dead_osd_number)]) - status_set('maintenance', 'Setting up replacement osd {}'.format( - new_osd_device)) - osdize(new_osd_device, - osd_format, - osd_journal, - reformat_osd, - ignore_errors) - except subprocess.CalledProcessError as e: - log('replace_osd failed with error: ' + e.output) - - def get_partition_list(dev): """Lists the partitions of a block device. @@ -2248,19 +2136,14 @@ def wait_on_previous_node(upgrade_key, service, previous_node, version): def get_upgrade_position(osd_sorted_list, match_name): """Return the upgrade position for the given osd. - :param osd_sorted_list: Osds sorted - :type osd_sorted_list: [str] - :param match_name: The osd name to match - :type match_name: str - :returns: The position of the name - :rtype: int - :raises: ValueError if name is not found + :param osd_sorted_list: list. Osds sorted + :param match_name: str. The osd name to match + :returns: int. The position or None if not found """ for index, item in enumerate(osd_sorted_list): if item.name == match_name: return index - raise ValueError("osd name '{}' not found in get_upgrade_position list" - .format(match_name)) + return None # Edge cases: diff --git a/ceph-osd/unit_tests/test_replace_osd.py b/ceph-osd/unit_tests/test_replace_osd.py deleted file mode 100644 index 141ffbd2..00000000 --- a/ceph-osd/unit_tests/test_replace_osd.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import errno -import posix - -from mock import call, Mock, patch - -import test_utils -import ceph.utils as ceph -import replace_osd - -TO_PATCH = [ - 'ctypes', - 'status_set', -] - -proc_data = [ - ' 8 0 sda 2291336 263100 108136080 1186276 28844343 28798167 ' - '2145908072 49433216 0 7550032 50630100\n', - ' 8 1 sda1 1379 1636 8314 692 75 17 1656 0 0 496 692\n', - ' 8 2 sda2 1 0 2 0 0 0 0 0 0 0 0\n', -] - - -def umount_busy(*args): - # MNT_FORCE - if args[1] == 1: - return -1 - # MNT_DETACH - if args[1] == 2: - return 0 - - -class ReplaceOsdTestCase(test_utils.CharmTestCase): - def setUp(self): - super(ReplaceOsdTestCase, self).setUp(ceph, TO_PATCH) - - def test_umount_ebusy(self): - self.ctypes.util.find_library.return_value = 'libc.so.6' - umount_mock = Mock() - self.ctypes.CDLL.return_value = umount_mock - umount_mock.umount.side_effect = umount_busy - self.ctypes.get_errno.return_value = errno.EBUSY - - ret = ceph.umount('/some/osd/mount') - umount_mock.assert_has_calls([ - call.umount('/some/osd/mount', 1), - call.umount('/some/osd/mount', 2), - ]) - assert ret == 0 - - def test_umount(self): - self.ctypes.util.find_library.return_value = 'libc.so.6' - umount_mock = Mock() - self.ctypes.CDLL.return_value = umount_mock - umount_mock.umount.return_value = 0 - - ret = ceph.umount('/some/osd/mount') - umount_mock.assert_has_calls([ - call.umount('/some/osd/mount', 1), - ]) - assert ret == 0 - - @patch.object(ceph, 'mounts') - @patch.object(ceph.subprocess, 'check_output') - @patch.object(ceph, 'umount') - @patch.object(ceph, 'osdize') - @patch.object(ceph, 'shutil') - @patch.object(ceph, 'systemd') - @patch.object(ceph, 'ceph_user') - def test_replace_osd(self, ceph_user, systemd, shutil, osdize, umount, - check_output, mounts): - ceph_user.return_value = "ceph" - mounts.return_value = [['/var/lib/ceph/osd/ceph-a', '/dev/sda']] - check_output.return_value = True - self.status_set.return_value = None - systemd.return_value = False - umount.return_value = 0 - osdize.return_value = None - shutil.rmtree.return_value = None - ceph.replace_osd(dead_osd_number=0, - dead_osd_device='/dev/sda', - new_osd_device='/dev/sdb', - osd_format=True, - osd_journal=None, - reformat_osd=False, - ignore_errors=False) - check_output.assert_has_calls( - [ - call(['ceph', '--id', 'osd-upgrade', - 'osd', 'out', 'osd.0']), - call(['stop', 'ceph-osd', 'id=0']), - call(['ceph', '--id', - 'osd-upgrade', 'osd', 'crush', 'remove', 'osd.0']), - call(['ceph', '--id', - 'osd-upgrade', 'auth', 'del', 'osd.0']), - call(['ceph', '--id', - 'osd-upgrade', 'osd', 'rm', 'osd.0']) - ] - ) - - @patch('replace_osd.get_disk_stats') - def test_lookup_device_name(self, disk_stats): - disk_stats.return_value = proc_data - dev_name = replace_osd.lookup_device_name(major_number=8, - minor_number=0) - assert dev_name == 'sda', "dev_name: {}".format(dev_name) - - @patch('replace_osd.os.lstat') - def test_get_device_number(self, lstat): - lstat.return_value = posix.stat_result([ - 16877, 16, 51729, 3, 0, 0, 217, 0, 1458086872, 1458086872 - ]) - major, minor = replace_osd.get_device_number(1) - assert major == 202 - assert minor == 17 From 6d36ee671ed20256dec1325ebc64aab328de3029 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 31 May 2018 07:00:33 -0500 Subject: [PATCH 1496/2699] No reformat Do not reformat devices. A subsequent change will be necessary to account for conditions where a reformat is still desired, such as a set of blocking states and user-driven actions. Partial-bug: #1698154 Depends-On: I90a866aa138d18e4242783c42d4c7c587f696d7d Change-Id: I3a41ab38e7a1679cf4f5380a7cc56556da3aaf2b --- ceph-osd/actions/add_disk.py | 1 - ceph-osd/config.yaml | 11 - ceph-osd/hooks/ceph_hooks.py | 9 +- .../charmhelpers/contrib/hahelpers/cluster.py | 5 + .../contrib/openstack/amulet/utils.py | 10 +- .../contrib/openstack/cert_utils.py | 227 ++++++++++++++++++ .../charmhelpers/contrib/openstack/context.py | 51 ++-- .../charmhelpers/contrib/openstack/ip.py | 10 + ceph-osd/hooks/charmhelpers/core/hookenv.py | 7 + ceph-osd/lib/ceph/utils.py | 19 +- ceph-osd/tests/basic_deployment.py | 9 +- .../charmhelpers/contrib/amulet/deployment.py | 6 +- .../contrib/openstack/amulet/utils.py | 10 +- ceph-osd/tests/charmhelpers/core/hookenv.py | 7 + 14 files changed, 312 insertions(+), 70 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index 158d6388..21ea1ecd 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -31,7 +31,6 @@ def add_device(request, device_path, bucket=None): ceph.utils.osdize(dev, hookenv.config('osd-format'), ceph_hooks.get_journal_devices(), - hookenv.config('osd-reformat'), hookenv.config('ignore-device-errors'), hookenv.config('osd-encrypt'), hookenv.config('bluestore')) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 88bf5776..9daa9bb4 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -127,17 +127,6 @@ options: . Note that despite bluestore being the default for Ceph Luminous, if this option is False, OSDs will still use filestore. - osd-reformat: - type: boolean - default: False - description: | - By default, the charm will not re-format a device that already looks - as if it might be an OSD device. This is a safeguard to try to - prevent data loss. - . - Enabling this option forces a reformat of any OSD devices found which - have not been processed by the unit previously or are not already - mounted. osd-encrypt: type: boolean default: False diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index d0681189..3c2cf016 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -451,7 +451,7 @@ def prepare_disks_and_activate(): emit_cephconf() for dev in get_devices(): ceph.osdize(dev, config('osd-format'), - osd_journal, config('osd-reformat'), + osd_journal, config('ignore-device-errors'), config('osd-encrypt'), config('bluestore'), @@ -499,13 +499,6 @@ def get_conf(name): return None -def reformat_osd(): - if config('osd-reformat'): - return True - else: - return False - - def get_devices(): devices = [] if config('osd-devices'): diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py index 47facd91..4a737e24 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -223,6 +223,11 @@ def https(): return True if config_get('ssl_cert') and config_get('ssl_key'): return True + for r_id in relation_ids('certificates'): + for unit in relation_list(r_id): + ca = relation_get('ca', rid=r_id, unit=unit) + if ca: + return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py index ef785423..d43038b2 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -544,7 +544,7 @@ def get_keystone_endpoint(self, keystone_ip, api_version=None, return ep def get_default_keystone_session(self, keystone_sentry, - openstack_release=None): + openstack_release=None, api_version=2): """Return a keystone session object and client object assuming standard default settings @@ -559,12 +559,12 @@ def get_default_keystone_session(self, keystone_sentry, eyc """ self.log.debug('Authenticating keystone admin...') - api_version = 2 - client_class = keystone_client.Client # 11 => xenial_queens - if openstack_release and openstack_release >= 11: - api_version = 3 + if api_version == 3 or (openstack_release and openstack_release >= 11): client_class = keystone_client_v3.Client + api_version = 3 + else: + client_class = keystone_client.Client keystone_ip = keystone_sentry.info['public-address'] session, auth = self.get_keystone_session( keystone_ip, diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py new file mode 100644 index 00000000..de853b53 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -0,0 +1,227 @@ +# Copyright 2014-2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Common python helper functions used for OpenStack charm certificats. + +import os +import json + +from charmhelpers.contrib.network.ip import ( + get_hostname, + resolve_network_cidr, +) +from charmhelpers.core.hookenv import ( + local_unit, + network_get_primary_address, + config, + relation_get, + unit_get, + NoNetworkBinding, + log, + WARNING, +) +from charmhelpers.contrib.openstack.ip import ( + ADMIN, + resolve_address, + get_vip_in_network, + INTERNAL, + PUBLIC, + ADDRESS_MAP) + +from charmhelpers.core.host import ( + mkdir, + write_file, +) + +from charmhelpers.contrib.hahelpers.apache import ( + install_ca_cert +) + + +class CertRequest(object): + + """Create a request for certificates to be generated + """ + + def __init__(self, json_encode=True): + self.entries = [] + self.hostname_entry = None + self.json_encode = json_encode + + def add_entry(self, net_type, cn, addresses): + """Add a request to the batch + + :param net_type: str netwrok space name request is for + :param cn: str Canonical Name for certificate + :param addresses: [] List of addresses to be used as SANs + """ + self.entries.append({ + 'cn': cn, + 'addresses': addresses}) + + def add_hostname_cn(self): + """Add a request for the hostname of the machine""" + ip = unit_get('private-address') + addresses = [ip] + # If a vip is being used without os-hostname config or + # network spaces then we need to ensure the local units + # cert has the approriate vip in the SAN list + vip = get_vip_in_network(resolve_network_cidr(ip)) + if vip: + addresses.append(vip) + self.hostname_entry = { + 'cn': get_hostname(ip), + 'addresses': addresses} + + def add_hostname_cn_ip(self, addresses): + """Add an address to the SAN list for the hostname request + + :param addr: [] List of address to be added + """ + for addr in addresses: + if addr not in self.hostname_entry['addresses']: + self.hostname_entry['addresses'].append(addr) + + def get_request(self): + """Generate request from the batched up entries + + """ + if self.hostname_entry: + self.entries.append(self.hostname_entry) + request = {} + for entry in self.entries: + sans = sorted(list(set(entry['addresses']))) + request[entry['cn']] = {'sans': sans} + if self.json_encode: + return {'cert_requests': json.dumps(request, sort_keys=True)} + else: + return {'cert_requests': request} + + +def get_certificate_request(json_encode=True): + """Generate a certificatee requests based on the network confioguration + + """ + req = CertRequest(json_encode=json_encode) + req.add_hostname_cn() + # Add os-hostname entries + for net_type in [INTERNAL, ADMIN, PUBLIC]: + net_config = config(ADDRESS_MAP[net_type]['override']) + try: + net_addr = resolve_address(endpoint_type=net_type) + ip = network_get_primary_address( + ADDRESS_MAP[net_type]['binding']) + addresses = [net_addr, ip] + vip = get_vip_in_network(resolve_network_cidr(ip)) + if vip: + addresses.append(vip) + if net_config: + req.add_entry( + net_type, + net_config, + addresses) + else: + # There is network address with no corresponding hostname. + # Add the ip to the hostname cert to allow for this. + req.add_hostname_cn_ip(addresses) + except NoNetworkBinding: + log("Skipping request for certificate for ip in {} space, no " + "local address found".format(net_type), WARNING) + return req.get_request() + + +def create_ip_cert_links(ssl_dir, custom_hostname_link=None): + """Create symlinks for SAN records + + :param ssl_dir: str Directory to create symlinks in + :param custom_hostname_link: str Additional link to be created + """ + hostname = get_hostname(unit_get('private-address')) + hostname_cert = os.path.join( + ssl_dir, + 'cert_{}'.format(hostname)) + hostname_key = os.path.join( + ssl_dir, + 'key_{}'.format(hostname)) + # Add links to hostname cert, used if os-hostname vars not set + for net_type in [INTERNAL, ADMIN, PUBLIC]: + try: + addr = resolve_address(endpoint_type=net_type) + cert = os.path.join(ssl_dir, 'cert_{}'.format(addr)) + key = os.path.join(ssl_dir, 'key_{}'.format(addr)) + if os.path.isfile(hostname_cert) and not os.path.isfile(cert): + os.symlink(hostname_cert, cert) + os.symlink(hostname_key, key) + except NoNetworkBinding: + log("Skipping creating cert symlink for ip in {} space, no " + "local address found".format(net_type), WARNING) + if custom_hostname_link: + custom_cert = os.path.join( + ssl_dir, + 'cert_{}'.format(custom_hostname_link)) + custom_key = os.path.join( + ssl_dir, + 'key_{}'.format(custom_hostname_link)) + if os.path.isfile(hostname_cert) and not os.path.isfile(custom_cert): + os.symlink(hostname_cert, custom_cert) + os.symlink(hostname_key, custom_key) + + +def install_certs(ssl_dir, certs, chain=None): + """Install the certs passed into the ssl dir and append the chain if + provided. + + :param ssl_dir: str Directory to create symlinks in + :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}} + :param chain: str Chain to be appended to certs + """ + for cn, bundle in certs.items(): + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + cert_data = bundle['cert'] + if chain: + # Append chain file so that clients that trust the root CA will + # trust certs signed by an intermediate in the chain + cert_data = cert_data + chain + write_file( + path=os.path.join(ssl_dir, cert_filename), + content=cert_data, perms=0o640) + write_file( + path=os.path.join(ssl_dir, key_filename), + content=bundle['key'], perms=0o640) + + +def process_certificates(service_name, relation_id, unit, + custom_hostname_link=None): + """Process the certificates supplied down the relation + + :param service_name: str Name of service the certifcates are for. + :param relation_id: str Relation id providing the certs + :param unit: str Unit providing the certs + :param custom_hostname_link: str Name of custom link to create + """ + data = relation_get(rid=relation_id, unit=unit) + ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) + mkdir(path=ssl_dir) + name = local_unit().replace('/', '_') + certs = data.get('{}.processed_requests'.format(name)) + chain = data.get('chain') + ca = data.get('ca') + if certs: + certs = json.loads(certs) + install_ca_cert(ca.encode()) + install_certs(ssl_dir, certs, chain) + create_ip_cert_links( + ssl_dir, + custom_hostname_link=custom_hostname_link) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 2d91f0a7..b196d63f 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -789,17 +789,18 @@ def configure_cert(self, cn=None): ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) mkdir(path=ssl_dir) cert, key = get_cert(cn) - if cn: - cert_filename = 'cert_{}'.format(cn) - key_filename = 'key_{}'.format(cn) - else: - cert_filename = 'cert' - key_filename = 'key' + if cert and key: + if cn: + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + else: + cert_filename = 'cert' + key_filename = 'key' - write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert), perms=0o640) - write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key), perms=0o640) + write_file(path=os.path.join(ssl_dir, cert_filename), + content=b64decode(cert), perms=0o640) + write_file(path=os.path.join(ssl_dir, key_filename), + content=b64decode(key), perms=0o640) def configure_ca(self): ca_cert = get_ca_cert() @@ -871,23 +872,31 @@ def __call__(self): if not self.external_ports or not https(): return {} - self.configure_ca() + use_keystone_ca = True + for rid in relation_ids('certificates'): + if related_units(rid): + use_keystone_ca = False + + if use_keystone_ca: + self.configure_ca() + self.enable_modules() ctxt = {'namespace': self.service_namespace, 'endpoints': [], 'ext_ports': []} - cns = self.canonical_names() - if cns: - for cn in cns: - self.configure_cert(cn) - else: - # Expect cert/key provided in config (currently assumed that ca - # uses ip for cn) - for net_type in (INTERNAL, ADMIN, PUBLIC): - cn = resolve_address(endpoint_type=net_type) - self.configure_cert(cn) + if use_keystone_ca: + cns = self.canonical_names() + if cns: + for cn in cns: + self.configure_cert(cn) + else: + # Expect cert/key provided in config (currently assumed that ca + # uses ip for cn) + for net_type in (INTERNAL, ADMIN, PUBLIC): + cn = resolve_address(endpoint_type=net_type) + self.configure_cert(cn) addresses = self.get_network_addresses() for address, endpoint in addresses: diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py index d1476b1a..73102af7 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py @@ -184,3 +184,13 @@ def resolve_address(endpoint_type=PUBLIC, override=True): "clustered=%s)" % (net_type, clustered)) return resolved_address + + +def get_vip_in_network(network): + matching_vip = None + vips = config('vip') + if vips: + for vip in vips.split(): + if is_address_in_network(network, vip): + matching_vip = vip + return matching_vip diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 627d8f79..ed7af39e 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -972,6 +972,13 @@ def application_version_set(version): log("Application Version: {}".format(version)) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def goal_state(): + """Juju goal state values""" + cmd = ['goal-state', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def is_leader(): """Does the current unit hold the juju leadership diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index d281a3b6..c71824d3 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -1366,20 +1366,18 @@ def get_devices(name): return set(devices) -def osdize(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False, bluestore=False, - key_manager=CEPH_KEY_MANAGER): +def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, + bluestore=False, key_manager=CEPH_KEY_MANAGER): if dev.startswith('/dev'): osdize_dev(dev, osd_format, osd_journal, - reformat_osd, ignore_errors, encrypt, + ignore_errors, encrypt, bluestore, key_manager) else: osdize_dir(dev, encrypt, bluestore) -def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False, bluestore=False, - key_manager=CEPH_KEY_MANAGER): +def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, + encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER): """ Prepare a block device for use as a Ceph OSD @@ -1389,8 +1387,6 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, :param: dev: Full path to block device to use :param: osd_format: Format for OSD filesystem :param: osd_journal: List of block devices to use for OSD journals - :param: reformat_osd: Reformat devices that are not currently in use - which have been used previously :param: ignore_errors: Don't fail in the event of any errors during processing :param: encrypt: Encrypt block devices using 'key_manager' @@ -1418,7 +1414,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, log('Path {} is not a block device - bailing'.format(dev)) return - if is_osd_disk(dev) and not reformat_osd: + if is_osd_disk(dev): log('Looks like {} is already an' ' OSD data or journal, skipping.'.format(dev)) return @@ -1432,9 +1428,6 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, ' skipping.'.format(dev)) return - if reformat_osd: - zap_disk(dev) - if cmp_pkgrevno('ceph', '12.2.4') >= 0: cmd = _ceph_volume(dev, osd_journal, diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 2fcdf9b0..77db5863 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -63,7 +63,10 @@ def _add_services(self): and the rest of the service are from lp branches that are compatible with the local charm (e.g. stable or next). """ - this_service = {'name': 'ceph-osd', 'units': 3} + this_service = { + 'name': 'ceph-osd', + 'units': 3, + 'storage': {'osd-devices': 'cinder,10G'}} other_services = [ {'name': 'ceph-mon', 'units': 3}, {'name': 'percona-cluster'}, @@ -118,9 +121,7 @@ def _configure_services(self): # Include a non-existent device as osd-devices is a whitelist, # and this will catch cases where proposals attempt to change that. ceph_osd_config = { - 'osd-reformat': True, - 'ephemeral-unmount': '/mnt', - 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' + 'osd-devices': '/srv/ceph /dev/test-non-existent' } configs = {'keystone': keystone_config, diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py index 9c65518e..d21d01d8 100644 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py @@ -50,7 +50,8 @@ def _add_services(self, this_service, other_services): this_service['units'] = 1 self.d.add(this_service['name'], units=this_service['units'], - constraints=this_service.get('constraints')) + constraints=this_service.get('constraints'), + storage=this_service.get('storage')) for svc in other_services: if 'location' in svc: @@ -64,7 +65,8 @@ def _add_services(self, this_service, other_services): svc['units'] = 1 self.d.add(svc['name'], charm=branch_location, units=svc['units'], - constraints=svc.get('constraints')) + constraints=svc.get('constraints'), + storage=svc.get('storage')) def _add_relations(self, relations): """Add all of the relations for the services.""" diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index ef785423..d43038b2 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -544,7 +544,7 @@ def get_keystone_endpoint(self, keystone_ip, api_version=None, return ep def get_default_keystone_session(self, keystone_sentry, - openstack_release=None): + openstack_release=None, api_version=2): """Return a keystone session object and client object assuming standard default settings @@ -559,12 +559,12 @@ def get_default_keystone_session(self, keystone_sentry, eyc """ self.log.debug('Authenticating keystone admin...') - api_version = 2 - client_class = keystone_client.Client # 11 => xenial_queens - if openstack_release and openstack_release >= 11: - api_version = 3 + if api_version == 3 or (openstack_release and openstack_release >= 11): client_class = keystone_client_v3.Client + api_version = 3 + else: + client_class = keystone_client.Client keystone_ip = keystone_sentry.info['public-address'] session, auth = self.get_keystone_session( keystone_ip, diff --git a/ceph-osd/tests/charmhelpers/core/hookenv.py b/ceph-osd/tests/charmhelpers/core/hookenv.py index 627d8f79..ed7af39e 100644 --- a/ceph-osd/tests/charmhelpers/core/hookenv.py +++ b/ceph-osd/tests/charmhelpers/core/hookenv.py @@ -972,6 +972,13 @@ def application_version_set(version): log("Application Version: {}".format(version)) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def goal_state(): + """Juju goal state values""" + cmd = ['goal-state', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def is_leader(): """Does the current unit hold the juju leadership From 41498c0f817ee11919cfed0da6fab17eb74916ba Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 31 May 2018 15:50:06 +0200 Subject: [PATCH 1497/2699] Add action to zap disk(s) This action includes configuration for disk(s) to zap, as well as an additional required flag for the administrator to acknowledge pending data loss Change-Id: I3106e2f10cf132a628aad025f73161b04215598e Related-Bug: #1698154 --- ceph-osd/actions.yaml | 25 ++++ ceph-osd/actions/zap-disk | 1 + ceph-osd/actions/zap_disk.py | 91 +++++++++++++ ceph-osd/unit_tests/test_actions_zap_disk.py | 129 +++++++++++++++++++ 4 files changed, 246 insertions(+) create mode 120000 ceph-osd/actions/zap-disk create mode 100755 ceph-osd/actions/zap_disk.py create mode 100644 ceph-osd/unit_tests/test_actions_zap_disk.py diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 731de13e..994506cd 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -73,3 +73,28 @@ blacklist-remove-disk: Example: '/dev/vdb /var/tmp/test-osd' required: - osd-devices +zap-disk: + description: | + Purge disk of all data and signatures for use by Ceph + . + This action can be necessary in cases where a Ceph cluster is being + redeployed as the charm defaults to skipping disks that look like Ceph + devices in order to preserve data. In order to forcibly redeploy, the + admin is required to perform this action for each disk to be re-consumed. + . + In addition to triggering this action, it is required to pass an additional + parameter option of `i-really-mean-it` to ensure that the + administrator is aware that this *will* cause data loss on the specified + device(s) + params: + devices: + type: string + description: | + A space-separated list of devices to remove the partition table from. + i-really-mean-it: + type: boolean + description: | + This must be toggled to enable actually performing this action + required: + - devices + - i-really-mean-it diff --git a/ceph-osd/actions/zap-disk b/ceph-osd/actions/zap-disk new file mode 120000 index 00000000..0814a432 --- /dev/null +++ b/ceph-osd/actions/zap-disk @@ -0,0 +1 @@ +zap_disk.py \ No newline at end of file diff --git a/ceph-osd/actions/zap_disk.py b/ceph-osd/actions/zap_disk.py new file mode 100755 index 00000000..aae9896c --- /dev/null +++ b/ceph-osd/actions/zap_disk.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +# +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +sys.path.append('lib') +sys.path.append('hooks') + +import charmhelpers.core.hookenv as hookenv +from charmhelpers.contrib.storage.linux.utils import ( + is_block_device, + is_device_mounted, + zap_disk, +) +from charmhelpers.core.unitdata import kv +from ceph.utils import is_active_bluestore_device + + +def get_devices(): + """Parse 'devices' action parameter, returns list.""" + devices = [] + for path in hookenv.action_get('devices').split(' '): + path = path.strip() + if not os.path.isabs(path): + hookenv.action_fail('{}: Not absolute path.'.format(path)) + raise + devices.append(path) + return devices + + +def zap(): + if not hookenv.action_get('i-really-mean-it'): + hookenv.action_fail('i-really-mean-it is a required parameter') + return + + failed_devices = [] + not_block_devices = [] + devices = get_devices() + for device in devices: + if not is_block_device(device): + not_block_devices.append(device) + if is_device_mounted(device) or is_active_bluestore_device(device): + failed_devices.append(device) + + if failed_devices or not_block_devices: + message = "" + if failed_devices: + message = "{} devices are mounted: {}".format( + len(failed_devices), + ", ".join(failed_devices)) + if not_block_devices: + if message is not '': + message += "\n\n" + message += "{} devices are not block devices: {}".format( + len(not_block_devices), + ", ".join(not_block_devices)) + hookenv.action_fail(message) + return + db = kv() + used_devices = db.get('osd-devices', []) + for device in devices: + zap_disk(device) + if device in used_devices: + used_devices.remove(device) + db.set('osd-devices', used_devices) + db.flush() + hookenv.action_set({ + 'message': "{} disk(s) have been zapped, to use them as OSDs, run: \n" + "juju run-action {} add-disk osd-devices=\"{}\"".format( + len(devices), + hookenv.local_unit(), + " ".join(devices)) + }) + + +if __name__ == "__main__": + zap() diff --git a/ceph-osd/unit_tests/test_actions_zap_disk.py b/ceph-osd/unit_tests/test_actions_zap_disk.py new file mode 100644 index 00000000..47f71e7f --- /dev/null +++ b/ceph-osd/unit_tests/test_actions_zap_disk.py @@ -0,0 +1,129 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from actions import zap_disk + +from test_utils import CharmTestCase + + +class ZapDiskActionTests(CharmTestCase): + def setUp(self): + super(ZapDiskActionTests, self).setUp( + zap_disk, ['hookenv', + 'is_block_device', + 'is_device_mounted', + 'is_active_bluestore_device', + 'kv']) + self.is_device_mounted.return_value = False + self.is_block_device.return_value = True + self.is_active_bluestore_device.return_value = False + self.kv.return_value = self.kv + self.hookenv.local_unit.return_value = "ceph-osd-test/0" + + @mock.patch.object(zap_disk, 'zap_disk') + def test_authorized_zap_single_disk(self, + _zap_disk): + """Will zap disk with extra config set""" + def side_effect(arg): + return { + 'devices': '/dev/vdb', + 'i-really-mean-it': True, + }.get(arg) + self.hookenv.action_get.side_effect = side_effect + self.kv.get.return_value = ['/dev/vdb', '/dev/vdz'] + zap_disk.zap() + _zap_disk.assert_called_with('/dev/vdb') + self.kv.get.assert_called_with('osd-devices', []) + self.kv.set.assert_called_with('osd-devices', ['/dev/vdz']) + self.hookenv.action_set.assert_called_with({ + 'message': "1 disk(s) have been zapped, to use " + "them as OSDs, run: \njuju " + "run-action ceph-osd-test/0 add-disk " + "osd-devices=\"/dev/vdb\"" + }) + + @mock.patch.object(zap_disk, 'zap_disk') + def test_authorized_zap_multiple_disks(self, + _zap_disk): + """Will zap disk with extra config set""" + def side_effect(arg): + return { + 'devices': '/dev/vdb /dev/vdc', + 'i-really-mean-it': True, + }.get(arg) + self.hookenv.action_get.side_effect = side_effect + self.kv.get.return_value = ['/dev/vdb', '/dev/vdz'] + zap_disk.zap() + _zap_disk.assert_has_calls([ + mock.call('/dev/vdb'), + mock.call('/dev/vdc'), + ]) + self.kv.get.assert_called_with('osd-devices', []) + self.kv.set.assert_called_with('osd-devices', ['/dev/vdz']) + self.hookenv.action_set.assert_called_with({ + 'message': "2 disk(s) have been zapped, to use " + "them as OSDs, run: \njuju " + "run-action ceph-osd-test/0 add-disk " + "osd-devices=\"/dev/vdb /dev/vdc\"" + }) + + @mock.patch.object(zap_disk, 'zap_disk') + def test_wont_zap_non_block_device(self, + _zap_disk,): + """Will not zap a disk that isn't a block device""" + def side_effect(arg): + return { + 'devices': '/dev/vdb', + 'i-really-mean-it': True, + }.get(arg) + self.hookenv.action_get.side_effect = side_effect + self.is_block_device.return_value = False + zap_disk.zap() + _zap_disk.assert_not_called() + self.hookenv.action_fail.assert_called_with( + "1 devices are not block devices: /dev/vdb") + + @mock.patch.object(zap_disk, 'zap_disk') + def test_wont_zap_mounted_block_device(self, + _zap_disk): + """Will not zap a disk that is mounted""" + def side_effect(arg): + return { + 'devices': '/dev/vdb', + 'i-really-mean-it': True, + }.get(arg) + self.hookenv.action_get.side_effect = side_effect + self.is_device_mounted.return_value = True + zap_disk.zap() + _zap_disk.assert_not_called() + self.hookenv.action_fail.assert_called_with( + "1 devices are mounted: /dev/vdb") + + @mock.patch.object(zap_disk, 'zap_disk') + def test_wont_zap__mounted_bluestore_device(self, + _zap_disk): + """Will not zap a disk that is mounted""" + def side_effect(arg): + return { + 'devices': '/dev/vdb', + 'i-really-mean-it': True, + }.get(arg) + self.hookenv.action_get.side_effect = side_effect + self.is_active_bluestore_device.return_value = True + zap_disk.zap() + _zap_disk.assert_not_called() + self.hookenv.action_fail.assert_called_with( + "1 devices are mounted: /dev/vdb") From a5920db8ef54487e4787d40367f259a7b928a492 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 1 Jun 2018 12:15:01 +0200 Subject: [PATCH 1498/2699] Add pre-flight check for device pristinity Add `non-pristine` key to `list-disks` action. No longer attempt to do initializtion of `osd-journal` devices. Make py27 test noop Flip pep8 test to py3 Partial-Bug: #1698154 Change-Id: I0ca574fa7f0683b4e8a693b9f62fbf6b39689789 Depends-On: I90a866aa138d18e4242783c42d4c7c587f696d7d --- ceph-osd/actions.yaml | 13 +++- ceph-osd/actions/list_disks.py | 23 +++++-- ceph-osd/hooks/ceph_hooks.py | 93 ++++++++++---------------- ceph-osd/hooks/utils.py | 21 +++++- ceph-osd/lib/ceph/utils.py | 45 ++++++++++--- ceph-osd/tests/basic_deployment.py | 64 ++++++++++++++++++ ceph-osd/tox.ini | 10 +-- ceph-osd/unit_tests/test_ceph_hooks.py | 35 ---------- ceph-osd/unit_tests/test_ceph_utils.py | 63 +++++++++++++++++ 9 files changed, 252 insertions(+), 115 deletions(-) create mode 100644 ceph-osd/unit_tests/test_ceph_utils.py diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 994506cd..396dc4c1 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -25,7 +25,18 @@ resume: Set the local osd units in the charm to 'in'. Note that the pause option does NOT stop the osd processes. list-disks: - description: List the unmounted disk on the specified unit + description: | + List disks + . + The 'disks' key is populated with block devices that are known by udev, + are not mounted and not mentioned in 'osd-journal' configuration option. + . + The 'blacklist' key is populated with osd-devices in the blacklist stored + in the local kv store of this specific unit. + . + The 'non-pristine' key is populated with block devices that are known by + udev, are not mounted, not mentioned in 'osd-journal' configuration option + and are currently not eligible for use because of presence of foreign data. add-disk: description: Add disk(s) to Ceph params: diff --git a/ceph-osd/actions/list_disks.py b/ceph-osd/actions/list_disks.py index 6bd3bf9b..819310f8 100755 --- a/ceph-osd/actions/list_disks.py +++ b/ceph-osd/actions/list_disks.py @@ -15,10 +15,17 @@ # limitations under the License. """ -List unmounted devices. +List disks -This script will get all block devices known by udev and check if they -are mounted so that we can give unmounted devices to the administrator. +The 'disks' key is populated with block devices that are known by udev, +are not mounted and not mentioned in 'osd-journal' configuration option. + +The 'blacklist' key is populated with osd-devices in the blacklist stored +in the local kv store of this specific unit. + +The 'non-pristine' key is populated with block devices that are known by +udev, are not mounted, not mentioned in 'osd-journal' configuration option +and are currently not eligible for use because of presence of foreign data. """ import sys @@ -32,7 +39,15 @@ import utils if __name__ == '__main__': + non_pristine = [] + osd_journal = utils.get_journal_devices() + for dev in list(set(ceph.utils.unmounted_disks()) - set(osd_journal)): + if (not ceph.utils.is_active_bluestore_device(dev) and + not ceph.utils.is_pristine_disk(dev)): + non_pristine.append(dev) + hookenv.action_set({ - 'disks': ceph.utils.unmounted_disks(), + 'disks': list(set(ceph.utils.unmounted_disks()) - set(osd_journal)), 'blacklist': utils.get_blacklist(), + 'non-pristine': non_pristine, }) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 3c2cf016..605d5999 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -20,7 +20,6 @@ import os import shutil import sys -import tempfile import socket import subprocess import netifaces @@ -41,6 +40,7 @@ Hooks, UnregisteredHookError, service_name, + status_get, status_set, storage_get, storage_list, @@ -76,6 +76,7 @@ get_public_addr, get_cluster_addr, get_blacklist, + get_journal_devices, ) from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( @@ -85,6 +86,9 @@ ) from charmhelpers.contrib.storage.linux.ceph import ( CephConfContext) +from charmhelpers.contrib.storage.linux.utils import ( + is_device_mounted, +) from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden @@ -357,38 +361,6 @@ def emit_cephconf(upgrading=False): charm_ceph_conf, 90) -JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' - - -def read_zapped_journals(): - if os.path.exists(JOURNAL_ZAPPED): - with open(JOURNAL_ZAPPED, 'rt', encoding='UTF-8') as zapfile: - zapped = set( - filter(None, - [l.strip() for l in zapfile.readlines()])) - log("read zapped: {}".format(zapped), level=DEBUG) - return zapped - return set() - - -def write_zapped_journals(journal_devs): - tmpfh, tmpfile = tempfile.mkstemp() - with os.fdopen(tmpfh, 'wb') as zapfile: - log("write zapped: {}".format(journal_devs), - level=DEBUG) - zapfile.write('\n'.join(sorted(list(journal_devs))).encode('UTF-8')) - shutil.move(tmpfile, JOURNAL_ZAPPED) - - -def check_overlap(journaldevs, datadevs): - if not journaldevs.isdisjoint(datadevs): - msg = ("Journal/data devices mustn't" - " overlap; journal: {0}, data: {1}".format(journaldevs, - datadevs)) - log(msg, level=ERROR) - raise ValueError(msg) - - @hooks.hook('config-changed') @harden() def config_changed(): @@ -438,13 +410,28 @@ def prepare_disks_and_activate(): vaultlocker.write_vaultlocker_conf(context) osd_journal = get_journal_devices() - check_overlap(osd_journal, set(get_devices())) + if not osd_journal.isdisjoint(set(get_devices())): + raise ValueError('`osd-journal` and `osd-devices` options must not' + 'overlap.') log("got journal devs: {}".format(osd_journal), level=DEBUG) - already_zapped = read_zapped_journals() - non_zapped = osd_journal - already_zapped - for journ in non_zapped: - ceph.maybe_zap_journal(journ) - write_zapped_journals(osd_journal) + + # pre-flight check of eligible device pristinity + devices = get_devices() + # filter osd-devices that are file system paths + devices = [dev for dev in devices if dev.startswith('/dev')] + # filter osd-devices that does not exist on this unit + devices = [dev for dev in devices if os.path.exists(dev)] + # filter osd-devices that are already mounted + devices = [dev for dev in devices if not is_device_mounted(dev)] + # filter osd-devices that are active bluestore devices + devices = [dev for dev in devices + if not ceph.is_active_bluestore_device(dev)] + log('Checking for pristine devices: "{}"'.format(devices), level=DEBUG) + if not all(ceph.is_pristine_disk(dev) for dev in devices): + status_set('blocked', + 'Non-pristine devices detected, consult ' + '`list-disks`, `zap-disk` and `blacklist-*` actions.') + return if ceph.is_bootstrapped(): log('ceph bootstrapped, rescanning disks') @@ -521,20 +508,6 @@ def get_devices(): return [device for device in devices if device not in _blacklist] -def get_journal_devices(): - if config('osd-journal'): - devices = [l.strip() for l in config('osd-journal').split(' ')] - else: - devices = [] - storage_ids = storage_list('osd-journals') - devices.extend((storage_get('location', s) for s in storage_ids)) - - # Filter out any devices in the action managed unit-local device blacklist - _blacklist = get_blacklist() - return set(device for device in devices - if device not in _blacklist and os.path.exists(device)) - - @hooks.hook('mon-relation-changed', 'mon-relation-departed') def mon_relation(): @@ -631,13 +604,15 @@ def assess_status(): # Check for OSD device creation parity i.e. at least some devices # must have been presented and used for this charm to be operational + (prev_status, prev_message) = status_get() running_osds = ceph.get_running_osds() - if not running_osds: - status_set('blocked', - 'No block devices detected using current configuration') - else: - status_set('active', - 'Unit is ready ({} OSD)'.format(len(running_osds))) + if prev_status != 'blocked': + if not running_osds: + status_set('blocked', + 'No block devices detected using current configuration') + else: + status_set('active', + 'Unit is ready ({} OSD)'.format(len(running_osds))) @hooks.hook('update-status') diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index b49254c5..a2fffd10 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -12,8 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import socket import re +import os +import socket + from charmhelpers.core.hookenv import ( unit_get, cached, @@ -22,6 +24,8 @@ log, DEBUG, status_set, + storage_get, + storage_list, ) from charmhelpers.core import unitdata from charmhelpers.fetch import ( @@ -39,6 +43,7 @@ get_ipv6_addr ) + TEMPLATES_DIR = 'templates' try: @@ -213,3 +218,17 @@ def get_blacklist(): """Get blacklist stored in the local kv() store""" db = unitdata.kv() return db.get('osd-blacklist', []) + + +def get_journal_devices(): + if config('osd-journal'): + devices = [l.strip() for l in config('osd-journal').split(' ')] + else: + devices = [] + storage_ids = storage_list('osd-journals') + devices.extend((storage_get('location', s) for s in storage_ids)) + + # Filter out any devices in the action managed unit-local device blacklist + _blacklist = get_blacklist() + return set(device for device in devices + if device not in _blacklist and os.path.exists(device)) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index c71824d3..5ff970bf 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -66,7 +66,6 @@ from charmhelpers.contrib.storage.linux.utils import ( is_block_device, is_device_mounted, - zap_disk, ) from charmhelpers.contrib.openstack.utils import ( get_os_codename_install_source, @@ -870,7 +869,42 @@ def get_partition_list(dev): raise +def is_pristine_disk(dev): + """ + Read first 2048 bytes (LBA 0 - 3) of block device to determine whether it + is actually all zeros and safe for us to use. + + Existing partitioning tools does not discern between a failure to read from + block device, failure to understand a partition table and the fact that a + block device has no partition table. Since we need to be positive about + which is which we need to read the device directly and confirm ourselves. + + :param dev: Path to block device + :type dev: str + :returns: True all 2048 bytes == 0x0, False if not + :rtype: bool + """ + want_bytes = 2048 + + f = open(dev, 'rb') + data = f.read(want_bytes) + read_bytes = len(data) + if read_bytes != want_bytes: + log('{}: short read, got {} bytes expected {}.' + .format(dev, read_bytes, want_bytes), level=WARNING) + return False + + return all(byte == 0x0 for byte in data) + + def is_osd_disk(dev): + db = kv() + osd_devices = db.get('osd-devices', []) + if dev in osd_devices: + log('Device {} already processed by charm,' + ' skipping'.format(dev)) + return True + partitions = get_partition_list(dev) for partition in partitions: try: @@ -1296,15 +1330,6 @@ def update_monfs(): pass -def maybe_zap_journal(journal_dev): - if is_osd_disk(journal_dev): - log('Looks like {} is already an OSD data' - ' or journal, skipping.'.format(journal_dev)) - return - zap_disk(journal_dev) - log("Zapped journal device {}".format(journal_dev)) - - def get_partitions(dev): cmd = ['partx', '--raw', '--noheadings', dev] try: diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 77db5863..6e5f880e 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -15,6 +15,7 @@ # limitations under the License. import amulet +import re import time import keystoneclient @@ -712,6 +713,69 @@ def test_900_ceph_encryption(self): mtime, unit_name)) amulet.raise_status('Folder mtime is older than provided mtime') + def test_901_blocked_when_non_pristine_disk_appears(self): + """ + Validate that charm goes into blocked state when it is presented with + new block devices that have foreign data on them. + + Instances used in UOSCI has a flavour with ephemeral storage in + addition to the bootable instance storage. The ephemeral storage + device is partitioned, formatted and mounted early in the boot process + by cloud-init. + + As long as the device is mounted the charm will not attempt to use it. + + If we unmount it and trigger the config-changed hook the block device + will appear as a new and previously untouched device for the charm. + + One of the first steps of device eligibility checks should be to make + sure we are seeing a pristine and empty device before doing any + further processing. + + As the ephemeral device will have data on it we can use it to validate + that these checks work as intended. + """ + u.log.debug('Checking behaviour when non-pristine disks appear...') + u.log.debug('Configuring ephemeral-unmount...') + self.d.configure('ceph-osd', {'ephemeral-unmount': '/mnt', + 'osd-devices': '/dev/vdb'}) + self._auto_wait_for_status(message=re.compile('Non-pristine.*'), + include_only=['ceph-osd']) + u.log.debug('Units now in blocked state, running zap-disk action...') + action_ids = [] + self.ceph_osd_sentry = self.d.sentry['ceph-osd'][0] + for unit in range(0, 3): + zap_disk_params = { + 'devices': '/dev/vdb', + 'i-really-mean-it': True, + } + action_id = u.run_action(self.d.sentry['ceph-osd'][unit], + 'zap-disk', params=zap_disk_params) + action_ids.append(action_id) + for unit in range(0, 3): + assert u.wait_on_action(action_ids[unit]), ( + 'zap-disk action failed.') + + u.log.debug('Running add-disk action...') + action_ids = [] + for unit in range(0, 3): + add_disk_params = { + 'osd-devices': '/dev/vdb', + } + action_id = u.run_action(self.d.sentry['ceph-osd'][unit], + 'add-disk', params=add_disk_params) + action_ids.append(action_id) + + # NOTE(fnordahl): LP: #1774694 + # for unit in range(0, 3): + # assert u.wait_on_action(action_ids[unit]), ( + # 'add-disk action failed.') + + u.log.debug('Wait for idle/ready status...') + self._auto_wait_for_status(include_only=['ceph-osd']) + + u.log.debug('OK') + def test_910_pause_and_resume(self): """The services can be paused and resumed. """ u.log.debug('Checking pause and resume actions...') diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 6c223662..99448527 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -18,11 +18,11 @@ whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* [testenv:py27] -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -# temporarily disable py27 -commands = /bin/true +# ceph charms are Python3-only, but py27 unit test target +# is required by OpenStack Governance. Remove this shim as soon as +# permitted. http://governance.openstack.org/reference/cti/python_cti.html +whitelist_externals = true +commands = true [testenv:py35] basepython = python3.5 diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index a8d84766..f8c442e9 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -405,21 +405,6 @@ def test_get_devices(self, mock_config, mock_storage_list): devices = ceph_hooks.get_devices() self.assertEqual(devices, ['/dev/vda', '/dev/vdb']) - @patch('os.path.exists') - @patch.object(ceph_hooks, 'storage_list') - @patch.object(ceph_hooks, 'config') - def test_get_journal_devices(self, mock_config, mock_storage_list, - mock_os_path_exists): - '''Devices returned as expected''' - config = {'osd-journal': '/dev/vda /dev/vdb'} - mock_config.side_effect = lambda key: config[key] - mock_storage_list.return_value = [] - mock_os_path_exists.return_value = True - devices = ceph_hooks.get_journal_devices() - mock_storage_list.assert_called() - mock_os_path_exists.assert_called() - self.assertEqual(devices, set(['/dev/vda', '/dev/vdb'])) - @patch.object(ceph_hooks, 'get_blacklist') @patch.object(ceph_hooks, 'storage_list') @patch.object(ceph_hooks, 'config') @@ -435,26 +420,6 @@ def test_get_devices_blacklist(self, mock_config, mock_storage_list, mock_get_blacklist.assert_called() self.assertEqual(devices, ['/dev/vdb']) - @patch('os.path.exists') - @patch.object(ceph_hooks, 'get_blacklist') - @patch.object(ceph_hooks, 'storage_list') - @patch.object(ceph_hooks, 'config') - def test_get_journal_devices_blacklist(self, mock_config, - mock_storage_list, - mock_get_blacklist, - mock_os_path_exists): - '''Devices returned as expected when blacklist in effect''' - config = {'osd-journal': '/dev/vda /dev/vdb'} - mock_config.side_effect = lambda key: config[key] - mock_storage_list.return_value = [] - mock_get_blacklist.return_value = ['/dev/vda'] - mock_os_path_exists.return_value = True - devices = ceph_hooks.get_journal_devices() - mock_storage_list.assert_called() - mock_os_path_exists.assert_called() - mock_get_blacklist.assert_called() - self.assertEqual(devices, set(['/dev/vdb'])) - @patch.object(ceph_hooks, 'log') @patch.object(ceph_hooks, 'config') @patch('os.environ') diff --git a/ceph-osd/unit_tests/test_ceph_utils.py b/ceph-osd/unit_tests/test_ceph_utils.py new file mode 100644 index 00000000..f58ae070 --- /dev/null +++ b/ceph-osd/unit_tests/test_ceph_utils.py @@ -0,0 +1,63 @@ +# Copyright 2016 Canonical Ltd + +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from mock import patch + +with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + import utils + + +class CephUtilsTestCase(unittest.TestCase): + def setUp(self): + super(CephUtilsTestCase, self).setUp() + + @patch('os.path.exists') + @patch.object(utils, 'storage_list') + @patch.object(utils, 'config') + def test_get_journal_devices(self, mock_config, mock_storage_list, + mock_os_path_exists): + '''Devices returned as expected''' + config = {'osd-journal': '/dev/vda /dev/vdb'} + mock_config.side_effect = lambda key: config[key] + mock_storage_list.return_value = [] + mock_os_path_exists.return_value = True + devices = utils.get_journal_devices() + mock_storage_list.assert_called() + mock_os_path_exists.assert_called() + self.assertEqual(devices, set(['/dev/vda', '/dev/vdb'])) + + @patch('os.path.exists') + @patch.object(utils, 'get_blacklist') + @patch.object(utils, 'storage_list') + @patch.object(utils, 'config') + def test_get_journal_devices_blacklist(self, mock_config, + mock_storage_list, + mock_get_blacklist, + mock_os_path_exists): + '''Devices returned as expected when blacklist in effect''' + config = {'osd-journal': '/dev/vda /dev/vdb'} + mock_config.side_effect = lambda key: config[key] + mock_storage_list.return_value = [] + mock_get_blacklist.return_value = ['/dev/vda'] + mock_os_path_exists.return_value = True + devices = utils.get_journal_devices() + mock_storage_list.assert_called() + mock_os_path_exists.assert_called() + mock_get_blacklist.assert_called() + self.assertEqual(devices, set(['/dev/vdb'])) From acf41f3e31b5f026f89829181ead6171c84f26bf Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 4 Jun 2018 16:23:58 +0100 Subject: [PATCH 1499/2699] Fix osd object name restriction The list of devices provided to the charm is currently incorrectly parsed such that object names are always limited to 256 chars. This patch ensures that the expected criteria is met. Change-Id: Ic5b25af614c77b35484b12dc654df5ac595d9d80 Closes-Bug: 1775029 --- ceph-osd/hooks/ceph_hooks.py | 6 +++++- ceph-osd/unit_tests/test_ceph_hooks.py | 22 ++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 3c2cf016..fc11ce33 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -269,7 +269,11 @@ def use_short_objects(): if cmp_pkgrevno('ceph', "10.2.0") >= 0: if config('osd-format') in ('ext4'): return True - for device in config('osd-devices'): + devices = config('osd-devices') + if not devices: + return False + + for device in devices.split(): if device and not device.startswith('/dev'): # TODO: determine format of directory based # OSD location diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index a8d84766..9e1a8d40 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -518,6 +518,28 @@ def test_install_udev_rules(self, shutil, subprocess): ['udevadm', 'control', '--reload-rules'] ) + @patch.object(ceph_hooks, 'config') + @patch.object(ceph_hooks, 'cmp_pkgrevno') + def test_use_short_objects(self, mock_cmp_pkgrevno, mock_config): + + def fake_config(key): + return config.get(key, None) + + mock_config.side_effect = fake_config + mock_cmp_pkgrevno.return_value = True + + config = {'osd-devices': '/dev/sdb /dev/sdc', 'osd-format': 'ext4'} + self.assertTrue(ceph_hooks.use_short_objects()) + + config = {'osd-devices': '/dev/sdb /dev/sdc', 'osd-format': 'xfs'} + self.assertFalse(ceph_hooks.use_short_objects()) + + config = {'osd-devices': '/srv/osd', 'osd-format': 'xfs'} + self.assertTrue(ceph_hooks.use_short_objects()) + + config = {'osd-devices': '/srv/osd', 'osd-format': 'ext4'} + self.assertTrue(ceph_hooks.use_short_objects()) + @patch.object(ceph_hooks, 'relation_get') @patch.object(ceph_hooks, 'relation_set') From 7917f194b51f9ff7d164ff6e69428d7743f3eee2 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 4 Jun 2018 18:08:24 +0200 Subject: [PATCH 1500/2699] Fix subscription of filter object error In Python 3 filter() built-in returns a iterator and not a list Change-Id: I641565109b0dcc816a66d3ca3eb578fb9ed44a1a Closes-Bug: #1774694 --- ceph-osd/actions/add_disk.py | 3 +-- ceph-osd/tests/basic_deployment.py | 7 +++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index 21ea1ecd..78c7b5e4 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -39,8 +39,7 @@ def add_device(request, device_path, bucket=None): ceph.utils.tune_dev(dev) mounts = filter(lambda disk: device_path in disk.device, psutil.disk_partitions()) - if mounts: - osd = mounts[0] + for osd in mounts: osd_id = osd.mountpoint.split('/')[-1].split('-')[-1] request.ops.append({ 'op': 'move-osd-to-bucket', diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 6e5f880e..5251ada9 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -766,10 +766,9 @@ def test_901_blocked_when_non_pristine_disk_appears(self): 'add-disk', params=add_disk_params) action_ids.append(action_id) - # NOTE(fnordahl): LP: #1774694 - # for unit in range(0, 3): - # assert u.wait_on_action(action_ids[unit]), ( - # 'add-disk action failed.') + for unit in range(0, 3): + assert u.wait_on_action(action_ids[unit]), ( + 'add-disk action failed.') u.log.debug('Wait for idle/ready status...') self._auto_wait_for_status(include_only=['ceph-osd']) From ba675d9a4ed72af83fc997f9069d870c18c5e0e4 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 4 Jun 2018 17:30:53 +0200 Subject: [PATCH 1501/2699] Update tests to use Juju storage Due to changes to the ceph-osd charm, it is suggested to use Juju storage for testing. Change-Id: I844030005f7a470a286039d58580fa1701144a8b Related-Bug: #1698154 --- ceph-fs/src/tests/basic_deployment.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/ceph-fs/src/tests/basic_deployment.py b/ceph-fs/src/tests/basic_deployment.py index 44fbc21c..4801bb5e 100644 --- a/ceph-fs/src/tests/basic_deployment.py +++ b/ceph-fs/src/tests/basic_deployment.py @@ -63,7 +63,8 @@ def _add_services(self, **kwargs): this_service = {'name': 'ceph-fs', 'units': 1} other_services = [ {'name': 'ceph-mon', 'units': 3}, - {'name': 'ceph-osd', 'units': 3}, + {'name': 'ceph-osd', 'units': 3, + 'storage': {'osd-devices': 'cinder,10G'}}, ] super(CephFsBasicDeployment, self)._add_services(this_service, other_services, @@ -94,10 +95,8 @@ def _configure_services(self, **kwargs): # Include a non-existent device as osd-devices is a whitelist, # and this will catch cases where proposals attempt to change that. ceph_osd_config = { - 'osd-reformat': True, - 'ephemeral-unmount': '/mnt', - 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent', 'source': self.source, + 'osd-devices': '/srv/ceph /dev/test-non-existent', } configs = { From 09555f83adc6483d151087082930c241afe41436 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 4 Jun 2018 17:31:04 +0200 Subject: [PATCH 1502/2699] Update tests to use Juju storage Due to changes to the ceph-osd charm, it is suggested to use Juju storage for testing. Change-Id: Icd1b53d8672271a5350b630f14f20bbd3b6c8740 Related-Bug: #1698154 --- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 7 +++++++ ceph-proxy/tests/basic_deployment.py | 9 +++------ .../tests/charmhelpers/contrib/amulet/deployment.py | 6 ++++-- .../charmhelpers/contrib/openstack/amulet/utils.py | 12 +++++++----- ceph-proxy/tests/charmhelpers/core/hookenv.py | 7 +++++++ 5 files changed, 28 insertions(+), 13 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 627d8f79..ed7af39e 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -972,6 +972,13 @@ def application_version_set(version): log("Application Version: {}".format(version)) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def goal_state(): + """Juju goal state values""" + cmd = ['goal-state', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def is_leader(): """Does the current unit hold the juju leadership diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py index 1ea9a8d2..ec345327 100644 --- a/ceph-proxy/tests/basic_deployment.py +++ b/ceph-proxy/tests/basic_deployment.py @@ -47,7 +47,8 @@ def _add_services(self): """ this_service = {'name': 'ceph-proxy'} other_services = [{'name': 'ceph-mon', 'units': 3}, - {'name': 'ceph-osd', 'units': 3}, + {'name': 'ceph-osd', 'units': 3, + 'storage': {'osd-devices': 'cinder,10G'}}, {'name': 'ceph-radosgw'}] super(CephBasicDeployment, self)._add_services(this_service, other_services) @@ -64,16 +65,12 @@ def _configure_services(self): ceph_config = { 'monitor-count': '3', 'auth-supported': 'none', - 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', - 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', } # Include a non-existent device as osd-devices is a whitelist, # and this will catch cases where proposals attempt to change that. ceph_osd_config = { - 'osd-reformat': True, - 'ephemeral-unmount': '/mnt', - 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' + 'osd-devices': '/srv/ceph /dev/test-non-existent' } proxy_config = { diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py index 9c65518e..d21d01d8 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py @@ -50,7 +50,8 @@ def _add_services(self, this_service, other_services): this_service['units'] = 1 self.d.add(this_service['name'], units=this_service['units'], - constraints=this_service.get('constraints')) + constraints=this_service.get('constraints'), + storage=this_service.get('storage')) for svc in other_services: if 'location' in svc: @@ -64,7 +65,8 @@ def _add_services(self, this_service, other_services): svc['units'] = 1 self.d.add(svc['name'], charm=branch_location, units=svc['units'], - constraints=svc.get('constraints')) + constraints=svc.get('constraints'), + storage=svc.get('storage')) def _add_relations(self, relations): """Add all of the relations for the services.""" diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index 84e87f5d..d43038b2 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -40,6 +40,7 @@ import pika import swiftclient +from charmhelpers.core.decorators import retry_on_exception from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -423,6 +424,7 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(num_retries=5, base_delay=1) def keystone_wait_for_propagation(self, sentry_relation_pairs, api_version): """Iterate over list of sentry and relation tuples and verify that @@ -542,7 +544,7 @@ def get_keystone_endpoint(self, keystone_ip, api_version=None, return ep def get_default_keystone_session(self, keystone_sentry, - openstack_release=None): + openstack_release=None, api_version=2): """Return a keystone session object and client object assuming standard default settings @@ -557,12 +559,12 @@ def get_default_keystone_session(self, keystone_sentry, eyc """ self.log.debug('Authenticating keystone admin...') - api_version = 2 - client_class = keystone_client.Client # 11 => xenial_queens - if openstack_release and openstack_release >= 11: - api_version = 3 + if api_version == 3 or (openstack_release and openstack_release >= 11): client_class = keystone_client_v3.Client + api_version = 3 + else: + client_class = keystone_client.Client keystone_ip = keystone_sentry.info['public-address'] session, auth = self.get_keystone_session( keystone_ip, diff --git a/ceph-proxy/tests/charmhelpers/core/hookenv.py b/ceph-proxy/tests/charmhelpers/core/hookenv.py index 627d8f79..ed7af39e 100644 --- a/ceph-proxy/tests/charmhelpers/core/hookenv.py +++ b/ceph-proxy/tests/charmhelpers/core/hookenv.py @@ -972,6 +972,13 @@ def application_version_set(version): log("Application Version: {}".format(version)) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def goal_state(): + """Juju goal state values""" + cmd = ['goal-state', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def is_leader(): """Does the current unit hold the juju leadership From f74308ce07c510ac11b573b368d8bf8ed6f6123b Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 4 Jun 2018 16:43:47 +0200 Subject: [PATCH 1503/2699] Update amulet tests to use ceph storage support Supporting Juju storage in the amulet tests requires a resync of charmhelpers Change-Id: I890a1e9877c007f7335e4ff9265122711150baf3 Related-Bug: #1698154 --- .../contrib/openstack/amulet/utils.py | 12 +- .../contrib/openstack/cert_utils.py | 227 ++++++++++++++++++ .../charmhelpers/contrib/openstack/context.py | 51 ++-- .../charmhelpers/contrib/openstack/ip.py | 10 + ceph-mon/hooks/charmhelpers/core/hookenv.py | 7 + ceph-mon/tests/basic_deployment.py | 14 +- .../charmhelpers/contrib/amulet/deployment.py | 6 +- .../contrib/openstack/amulet/utils.py | 12 +- ceph-mon/tests/charmhelpers/core/hookenv.py | 7 + 9 files changed, 305 insertions(+), 41 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 84e87f5d..d43038b2 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -40,6 +40,7 @@ import pika import swiftclient +from charmhelpers.core.decorators import retry_on_exception from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -423,6 +424,7 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(num_retries=5, base_delay=1) def keystone_wait_for_propagation(self, sentry_relation_pairs, api_version): """Iterate over list of sentry and relation tuples and verify that @@ -542,7 +544,7 @@ def get_keystone_endpoint(self, keystone_ip, api_version=None, return ep def get_default_keystone_session(self, keystone_sentry, - openstack_release=None): + openstack_release=None, api_version=2): """Return a keystone session object and client object assuming standard default settings @@ -557,12 +559,12 @@ def get_default_keystone_session(self, keystone_sentry, eyc """ self.log.debug('Authenticating keystone admin...') - api_version = 2 - client_class = keystone_client.Client # 11 => xenial_queens - if openstack_release and openstack_release >= 11: - api_version = 3 + if api_version == 3 or (openstack_release and openstack_release >= 11): client_class = keystone_client_v3.Client + api_version = 3 + else: + client_class = keystone_client.Client keystone_ip = keystone_sentry.info['public-address'] session, auth = self.get_keystone_session( keystone_ip, diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py new file mode 100644 index 00000000..de853b53 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -0,0 +1,227 @@ +# Copyright 2014-2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Common python helper functions used for OpenStack charm certificats. + +import os +import json + +from charmhelpers.contrib.network.ip import ( + get_hostname, + resolve_network_cidr, +) +from charmhelpers.core.hookenv import ( + local_unit, + network_get_primary_address, + config, + relation_get, + unit_get, + NoNetworkBinding, + log, + WARNING, +) +from charmhelpers.contrib.openstack.ip import ( + ADMIN, + resolve_address, + get_vip_in_network, + INTERNAL, + PUBLIC, + ADDRESS_MAP) + +from charmhelpers.core.host import ( + mkdir, + write_file, +) + +from charmhelpers.contrib.hahelpers.apache import ( + install_ca_cert +) + + +class CertRequest(object): + + """Create a request for certificates to be generated + """ + + def __init__(self, json_encode=True): + self.entries = [] + self.hostname_entry = None + self.json_encode = json_encode + + def add_entry(self, net_type, cn, addresses): + """Add a request to the batch + + :param net_type: str netwrok space name request is for + :param cn: str Canonical Name for certificate + :param addresses: [] List of addresses to be used as SANs + """ + self.entries.append({ + 'cn': cn, + 'addresses': addresses}) + + def add_hostname_cn(self): + """Add a request for the hostname of the machine""" + ip = unit_get('private-address') + addresses = [ip] + # If a vip is being used without os-hostname config or + # network spaces then we need to ensure the local units + # cert has the approriate vip in the SAN list + vip = get_vip_in_network(resolve_network_cidr(ip)) + if vip: + addresses.append(vip) + self.hostname_entry = { + 'cn': get_hostname(ip), + 'addresses': addresses} + + def add_hostname_cn_ip(self, addresses): + """Add an address to the SAN list for the hostname request + + :param addr: [] List of address to be added + """ + for addr in addresses: + if addr not in self.hostname_entry['addresses']: + self.hostname_entry['addresses'].append(addr) + + def get_request(self): + """Generate request from the batched up entries + + """ + if self.hostname_entry: + self.entries.append(self.hostname_entry) + request = {} + for entry in self.entries: + sans = sorted(list(set(entry['addresses']))) + request[entry['cn']] = {'sans': sans} + if self.json_encode: + return {'cert_requests': json.dumps(request, sort_keys=True)} + else: + return {'cert_requests': request} + + +def get_certificate_request(json_encode=True): + """Generate a certificatee requests based on the network confioguration + + """ + req = CertRequest(json_encode=json_encode) + req.add_hostname_cn() + # Add os-hostname entries + for net_type in [INTERNAL, ADMIN, PUBLIC]: + net_config = config(ADDRESS_MAP[net_type]['override']) + try: + net_addr = resolve_address(endpoint_type=net_type) + ip = network_get_primary_address( + ADDRESS_MAP[net_type]['binding']) + addresses = [net_addr, ip] + vip = get_vip_in_network(resolve_network_cidr(ip)) + if vip: + addresses.append(vip) + if net_config: + req.add_entry( + net_type, + net_config, + addresses) + else: + # There is network address with no corresponding hostname. + # Add the ip to the hostname cert to allow for this. + req.add_hostname_cn_ip(addresses) + except NoNetworkBinding: + log("Skipping request for certificate for ip in {} space, no " + "local address found".format(net_type), WARNING) + return req.get_request() + + +def create_ip_cert_links(ssl_dir, custom_hostname_link=None): + """Create symlinks for SAN records + + :param ssl_dir: str Directory to create symlinks in + :param custom_hostname_link: str Additional link to be created + """ + hostname = get_hostname(unit_get('private-address')) + hostname_cert = os.path.join( + ssl_dir, + 'cert_{}'.format(hostname)) + hostname_key = os.path.join( + ssl_dir, + 'key_{}'.format(hostname)) + # Add links to hostname cert, used if os-hostname vars not set + for net_type in [INTERNAL, ADMIN, PUBLIC]: + try: + addr = resolve_address(endpoint_type=net_type) + cert = os.path.join(ssl_dir, 'cert_{}'.format(addr)) + key = os.path.join(ssl_dir, 'key_{}'.format(addr)) + if os.path.isfile(hostname_cert) and not os.path.isfile(cert): + os.symlink(hostname_cert, cert) + os.symlink(hostname_key, key) + except NoNetworkBinding: + log("Skipping creating cert symlink for ip in {} space, no " + "local address found".format(net_type), WARNING) + if custom_hostname_link: + custom_cert = os.path.join( + ssl_dir, + 'cert_{}'.format(custom_hostname_link)) + custom_key = os.path.join( + ssl_dir, + 'key_{}'.format(custom_hostname_link)) + if os.path.isfile(hostname_cert) and not os.path.isfile(custom_cert): + os.symlink(hostname_cert, custom_cert) + os.symlink(hostname_key, custom_key) + + +def install_certs(ssl_dir, certs, chain=None): + """Install the certs passed into the ssl dir and append the chain if + provided. + + :param ssl_dir: str Directory to create symlinks in + :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}} + :param chain: str Chain to be appended to certs + """ + for cn, bundle in certs.items(): + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + cert_data = bundle['cert'] + if chain: + # Append chain file so that clients that trust the root CA will + # trust certs signed by an intermediate in the chain + cert_data = cert_data + chain + write_file( + path=os.path.join(ssl_dir, cert_filename), + content=cert_data, perms=0o640) + write_file( + path=os.path.join(ssl_dir, key_filename), + content=bundle['key'], perms=0o640) + + +def process_certificates(service_name, relation_id, unit, + custom_hostname_link=None): + """Process the certificates supplied down the relation + + :param service_name: str Name of service the certifcates are for. + :param relation_id: str Relation id providing the certs + :param unit: str Unit providing the certs + :param custom_hostname_link: str Name of custom link to create + """ + data = relation_get(rid=relation_id, unit=unit) + ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) + mkdir(path=ssl_dir) + name = local_unit().replace('/', '_') + certs = data.get('{}.processed_requests'.format(name)) + chain = data.get('chain') + ca = data.get('ca') + if certs: + certs = json.loads(certs) + install_ca_cert(ca.encode()) + install_certs(ssl_dir, certs, chain) + create_ip_cert_links( + ssl_dir, + custom_hostname_link=custom_hostname_link) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 2d91f0a7..b196d63f 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -789,17 +789,18 @@ def configure_cert(self, cn=None): ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) mkdir(path=ssl_dir) cert, key = get_cert(cn) - if cn: - cert_filename = 'cert_{}'.format(cn) - key_filename = 'key_{}'.format(cn) - else: - cert_filename = 'cert' - key_filename = 'key' + if cert and key: + if cn: + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + else: + cert_filename = 'cert' + key_filename = 'key' - write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert), perms=0o640) - write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key), perms=0o640) + write_file(path=os.path.join(ssl_dir, cert_filename), + content=b64decode(cert), perms=0o640) + write_file(path=os.path.join(ssl_dir, key_filename), + content=b64decode(key), perms=0o640) def configure_ca(self): ca_cert = get_ca_cert() @@ -871,23 +872,31 @@ def __call__(self): if not self.external_ports or not https(): return {} - self.configure_ca() + use_keystone_ca = True + for rid in relation_ids('certificates'): + if related_units(rid): + use_keystone_ca = False + + if use_keystone_ca: + self.configure_ca() + self.enable_modules() ctxt = {'namespace': self.service_namespace, 'endpoints': [], 'ext_ports': []} - cns = self.canonical_names() - if cns: - for cn in cns: - self.configure_cert(cn) - else: - # Expect cert/key provided in config (currently assumed that ca - # uses ip for cn) - for net_type in (INTERNAL, ADMIN, PUBLIC): - cn = resolve_address(endpoint_type=net_type) - self.configure_cert(cn) + if use_keystone_ca: + cns = self.canonical_names() + if cns: + for cn in cns: + self.configure_cert(cn) + else: + # Expect cert/key provided in config (currently assumed that ca + # uses ip for cn) + for net_type in (INTERNAL, ADMIN, PUBLIC): + cn = resolve_address(endpoint_type=net_type) + self.configure_cert(cn) addresses = self.get_network_addresses() for address, endpoint in addresses: diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py index d1476b1a..73102af7 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py @@ -184,3 +184,13 @@ def resolve_address(endpoint_type=PUBLIC, override=True): "clustered=%s)" % (net_type, clustered)) return resolved_address + + +def get_vip_in_network(network): + matching_vip = None + vips = config('vip') + if vips: + for vip in vips.split(): + if is_address_in_network(network, vip): + matching_vip = vip + return matching_vip diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 627d8f79..ed7af39e 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -972,6 +972,13 @@ def application_version_set(version): log("Application Version: {}".format(version)) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def goal_state(): + """Juju goal state values""" + cmd = ['goal-state', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def is_leader(): """Does the current unit hold the juju leadership diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 3d7d4d67..9fe29e40 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -68,7 +68,9 @@ def _add_services(self): other_services = [ {'name': 'percona-cluster'}, {'name': 'keystone'}, - {'name': 'ceph-osd', 'units': 3}, + {'name': 'ceph-osd', + 'units': 3, + 'storage': {'osd-devices': 'cinder,10G'}}, {'name': 'rabbitmq-server'}, {'name': 'nova-compute'}, {'name': 'glance'}, @@ -118,20 +120,16 @@ def _configure_services(self): 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', } - - # Include a non-existent device as osd-devices is a whitelist, - # and this will catch cases where proposals attempt to change that. ceph_osd_config = { - 'osd-reformat': True, - 'ephemeral-unmount': '/mnt', - 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' + 'osd-devices': '/srv/ceph /dev/test-non-existent', } configs = {'keystone': keystone_config, 'percona-cluster': pxc_config, 'cinder': cinder_config, 'ceph-mon': ceph_config, - 'ceph-osd': ceph_osd_config} + 'ceph-osd': ceph_osd_config, + } super(CephBasicDeployment, self)._configure_services(configs) def _initialize_tests(self): diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py index 9c65518e..d21d01d8 100644 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py @@ -50,7 +50,8 @@ def _add_services(self, this_service, other_services): this_service['units'] = 1 self.d.add(this_service['name'], units=this_service['units'], - constraints=this_service.get('constraints')) + constraints=this_service.get('constraints'), + storage=this_service.get('storage')) for svc in other_services: if 'location' in svc: @@ -64,7 +65,8 @@ def _add_services(self, this_service, other_services): svc['units'] = 1 self.d.add(svc['name'], charm=branch_location, units=svc['units'], - constraints=svc.get('constraints')) + constraints=svc.get('constraints'), + storage=svc.get('storage')) def _add_relations(self, relations): """Add all of the relations for the services.""" diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index 84e87f5d..d43038b2 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -40,6 +40,7 @@ import pika import swiftclient +from charmhelpers.core.decorators import retry_on_exception from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -423,6 +424,7 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(num_retries=5, base_delay=1) def keystone_wait_for_propagation(self, sentry_relation_pairs, api_version): """Iterate over list of sentry and relation tuples and verify that @@ -542,7 +544,7 @@ def get_keystone_endpoint(self, keystone_ip, api_version=None, return ep def get_default_keystone_session(self, keystone_sentry, - openstack_release=None): + openstack_release=None, api_version=2): """Return a keystone session object and client object assuming standard default settings @@ -557,12 +559,12 @@ def get_default_keystone_session(self, keystone_sentry, eyc """ self.log.debug('Authenticating keystone admin...') - api_version = 2 - client_class = keystone_client.Client # 11 => xenial_queens - if openstack_release and openstack_release >= 11: - api_version = 3 + if api_version == 3 or (openstack_release and openstack_release >= 11): client_class = keystone_client_v3.Client + api_version = 3 + else: + client_class = keystone_client.Client keystone_ip = keystone_sentry.info['public-address'] session, auth = self.get_keystone_session( keystone_ip, diff --git a/ceph-mon/tests/charmhelpers/core/hookenv.py b/ceph-mon/tests/charmhelpers/core/hookenv.py index 627d8f79..ed7af39e 100644 --- a/ceph-mon/tests/charmhelpers/core/hookenv.py +++ b/ceph-mon/tests/charmhelpers/core/hookenv.py @@ -972,6 +972,13 @@ def application_version_set(version): log("Application Version: {}".format(version)) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def goal_state(): + """Juju goal state values""" + cmd = ['goal-state', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def is_leader(): """Does the current unit hold the juju leadership From 6d8c9adad5213c52f547531cd688eab9d2cb9651 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 5 Jun 2018 08:42:19 +0200 Subject: [PATCH 1504/2699] Update README.md with information on initializing disks Weed out some references to the now deprecated `ceph` charm. Add example of juju storage usage and reference to juju storage documentation. Change-Id: Ia9955e2b49589072fd2e1d265a88439d4aebe511 --- ceph-osd/README.md | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/ceph-osd/README.md b/ceph-osd/README.md index edc26e05..a3f22bf8 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -5,8 +5,8 @@ Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. This charm deploys additional Ceph OSD storage service units and should be -used in conjunction with the 'ceph' charm to scale out the amount of storage -available in a Ceph cluster. +used in conjunction with the 'ceph-mon' charm to scale out the amount of +storage available in a Ceph cluster. Usage ===== @@ -18,6 +18,10 @@ cluster:: A list of devices that the charm will attempt to detect, initialise and activate as ceph storage. + If the charm detects pre-existing data on a device it will go into a + blocked state and the operator must resolve the situation utilizing the + `list-disks`, `zap-disk` and/or `blacklist-*` actions. + This this can be a superset of the actual storage devices presented to each service unit and can be changed post ceph-osd deployment using `juju set`. @@ -25,20 +29,28 @@ cluster:: For example:: ceph-osd: + options: osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde -Boot things up by using:: +Example utilizing Juju storage:: + + ceph-osd: + storage: + osd-devices: cinder,20G - juju deploy -n 3 --config ceph.yaml ceph +Please refer to [Juju Storage Documentation](https://docs.jujucharms.com/devel/en/charms-storage) for details on support for various storage providers and cloud substrates. -You can then deploy this charm by simple doing:: +How to deploy:: - juju deploy -n 10 --config ceph.yaml ceph-osd - juju add-relation ceph-osd ceph + juju deploy -n 3 ceph-osd + juju deploy ceph-mon --to lxd:0 + juju add-unit ceph-mon --to lxd:1 + juju add-unit ceph-mon --to lxd:2 + juju add-relation ceph-osd ceph-mon -Once the ceph charm has bootstrapped the cluster, it will notify the ceph-osd -charm which will scan for the configured storage devices and add them to the -pool of available storage. +Once the 'ceph-mon' charm has bootstrapped the cluster, it will notify the +ceph-osd charm which will scan for the configured storage devices and add them +to the pool of available storage. Network Space support ===================== From a81b0a469acb93ed76d383a8e917f6537537dd76 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 5 Jun 2018 13:52:05 +0200 Subject: [PATCH 1505/2699] Make action descriptions terse, move to README.md Output of `juju list-action` is at time of this writing formatted in such a way that we should keep description as terse as possible and refer to documentation elsewhere. Change-Id: Ib8e7a4804e696199803b9ac386da7bf02aafd465 --- ceph-osd/README.md | 113 ++++++++++++++++++++++++++++++++++++++++++ ceph-osd/actions.yaml | 83 +++++++++++-------------------- 2 files changed, 142 insertions(+), 54 deletions(-) diff --git a/ceph-osd/README.md b/ceph-osd/README.md index a3f22bf8..84c84e9f 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -122,6 +122,119 @@ options:: new block devices added to the ceph-osd application; existing OSD devices will not be encrypted. +Actions +======= +The charm offers [actions](https://docs.jujucharms.com/devel/en/actions) which +may be used to perform operational tasks on individual units. + +pause +----- +**USE WITH CAUTION** - Set the local osd units in the charm to 'out' but +does not stop the osds. Unless the osd cluster is set to noout (see below), +this removes them from the ceph cluster and forces ceph to migrate the PGs +to other OSDs in the cluster. + +From [upstream documentation](http://docs.ceph.com/docs/master/rados/operations/add-or-rm-osds/#removing-the-osd) +"Do not let your cluster reach its full ratio when removing an OSD. + Removing OSDs could cause the cluster to reach or exceed its full ratio." + +Also note that for small clusters you may encounter the corner case where +some PGs remain stuck in the active+remapped state. Refer to the above link +on how to resolve this. + +`pause-health` (on a ceph-mon) unit can be used before pausing a ceph-osd +unit to stop the cluster rebalancing the data off this ceph-osd unit. +`pause-health` sets 'noout' on the cluster such that it will not try to +rebalance the data accross the remaining units. + +It is up to the user of the charm to determine whether pause-health should +be used as it depends on whether the osd is being paused for maintenance or +to remove it from the cluster completely. + +**NOTE** the `pause` action does NOT stop the ceph-osd processes. + +resume +------ +Set the local osd units in the charm to 'in'. + + +list-disks +---------- +List disks + +The 'disks' key is populated with block devices that are known by udev, +are not mounted and not mentioned in 'osd-journal' configuration option. + +The 'blacklist' key is populated with osd-devices in the blacklist stored +in the local kv store of this specific unit. + +The 'non-pristine' key is populated with block devices that are known by +udev, are not mounted, not mentioned in 'osd-journal' configuration option +and are currently not eligible for use because of presence of foreign data. + +add-disk +-------- +Add disk(s) to Ceph + +#### Parameters +- `osd-devices` (required) + - The devices to format and set up as osd volumes. +- `bucket` + - The name of the bucket in Ceph to add these devices into + +blacklist-add-disk +------------------ +Add disk(s) to blacklist. Blacklisted disks will not be +initialized for use with Ceph even if listed in the application +level osd-devices configuration option. + +The current blacklist can be viewed with list-disks action. + +**NOTE** This action and blacklist will not have any effect on +already initialized disks. + +#### Parameters +- `osd-devices` (required) + - A space-separated list of devices to add to blacklist. + + Each element should be a absolute path to a device node or filesystem + directory (the latter is supported for ceph >= 0.56.6). + + Example: '/dev/vdb /var/tmp/test-osd' + +blacklist-remove-disk +--------------------- +Remove disk(s) from blacklist. + +#### Parameters +- `osd-devices` (required) + - A space-separated list of devices to remove from blacklist. + + Each element should be a existing entry in the units blacklist. + Use list-disks action to list current blacklist entries. + + Example: '/dev/vdb /var/tmp/test-osd' + +zap-disk +-------- +Purge disk of all data and signatures for use by Ceph + +This action can be necessary in cases where a Ceph cluster is being +redeployed as the charm defaults to skipping disks that look like Ceph +devices in order to preserve data. In order to forcibly redeploy, the +admin is required to perform this action for each disk to be re-consumed. + +In addition to triggering this action, it is required to pass an additional +parameter option of `i-really-mean-it` to ensure that the +administrator is aware that this *will* cause data loss on the specified +device(s) + +#### Parameters +- `devices` (required) + - A space-separated list of devices to remove the partition table from. +- `i-really-mean-it` (required) + - This must be toggled to enable actually performing this action + Contact Information =================== diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 396dc4c1..3be669ce 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -1,44 +1,29 @@ +# NOTE(fnordahl): Output of `juju list-action` is at time of this writing +# formatted in such a way that we should keep description +# as terse as possible and refer to documentation elsewhere. +# +# Verify with `juju list-action` before proposing/committing +# changes. pause: description: | - CAUTION - Set the local osd units in the charm to 'out' but does not stop - the osds. Unless the osd cluster is set to noout (see below), this removes - them from the ceph cluster and forces ceph to migrate the PGs to other OSDs - in the cluster. See the following. - - http://docs.ceph.com/docs/master/rados/operations/add-or-rm-osds/#removing-the-osd - "Do not let your cluster reach its full ratio when removing an OSD. - Removing OSDs could cause the cluster to reach or exceed its full ratio." - Also note that for small clusters you may encounter the corner case where - some PGs remain stuck in the active+remapped state. Refer to the above link - on how to resolve this. - - pause-health (on a ceph-mon) unit can be used before pausing a ceph-osd - unit to stop the cluster rebalancing the data off this ceph-osd unit. - pause-health sets 'noout' on the cluster such that it will not try to - rebalance the data accross the remaining units. - - It is up to the user of the charm to determine whether pause-health should - be used as it depends on whether the osd is being paused for maintenance or - to remove it from the cluster completely. + \ + USE WITH CAUTION - Mark unit OSDs as 'out'. + Documentation: https://jujucharms.com/ceph-osd/ resume: description: | - Set the local osd units in the charm to 'in'. Note that the pause option - does NOT stop the osd processes. + \ + Set the local osd units in the charm to 'in'. + Documentation: https://jujucharms.com/ceph-osd/ list-disks: description: | - List disks - . - The 'disks' key is populated with block devices that are known by udev, - are not mounted and not mentioned in 'osd-journal' configuration option. - . - The 'blacklist' key is populated with osd-devices in the blacklist stored - in the local kv store of this specific unit. - . - The 'non-pristine' key is populated with block devices that are known by - udev, are not mounted, not mentioned in 'osd-journal' configuration option - and are currently not eligible for use because of presence of foreign data. + \ + List disks. + Documentation: https://jujucharms.com/ceph-osd/ add-disk: - description: Add disk(s) to Ceph + description: | + \ + Add disk(s) to Ceph. + Documentation: https://jujucharms.com/ceph-osd/ params: osd-devices: type: string @@ -50,14 +35,9 @@ add-disk: - osd-devices blacklist-add-disk: description: | - Add disk(s) to blacklist. Blacklisted disks will not be - initialized for use with Ceph even if listed in the application - level osd-devices configuration option. - . - The current blacklist can be viewed with list-disks action. - . - NOTE: This action and blacklist will not have any effect on - already initialized disks. + \ + Add disk(s) to blacklist. + Documentation: https://jujucharms.com/ceph-osd/ params: osd-devices: type: string @@ -71,7 +51,10 @@ blacklist-add-disk: required: - osd-devices blacklist-remove-disk: - description: Remove disk(s) from blacklist. + description: | + \ + Remove disk(s) from blacklist. + Documentation: https://jujucharms.com/ceph-osd/ params: osd-devices: type: string @@ -86,17 +69,9 @@ blacklist-remove-disk: - osd-devices zap-disk: description: | - Purge disk of all data and signatures for use by Ceph - . - This action can be necessary in cases where a Ceph cluster is being - redeployed as the charm defaults to skipping disks that look like Ceph - devices in order to preserve data. In order to forcibly redeploy, the - admin is required to perform this action for each disk to be re-consumed. - . - In addition to triggering this action, it is required to pass an additional - parameter option of `i-really-mean-it` to ensure that the - administrator is aware that this *will* cause data loss on the specified - device(s) + \ + USE WITH CAUTION - Purge disk of all data and signatures for use by Ceph. + Documentation: https://jujucharms.com/ceph-osd/ params: devices: type: string From 912b3021e016a5e5f68b74fc4d27e6d2a3ed9b34 Mon Sep 17 00:00:00 2001 From: wangqi Date: Tue, 5 Jun 2018 13:46:27 +0000 Subject: [PATCH 1506/2699] Enable Python hash seed randomization in tests Unit tests should run with hash seed randomization on, to ensure code is not introduced that relies on ordered access of dicts, sets, etc. Python 3.3 enables this at runtime by default. Change-Id: I781fd850039e4817b63edaf275609c1dc47f4949 --- ceph-osd/tox.ini | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 99448527..041c0e54 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -8,7 +8,6 @@ skip_missing_interpreters = True [testenv] setenv = VIRTUAL_ENV={envdir} - PYTHONHASHSEED=0 CHARM_DIR={envdir} AMULET_SETUP_TIMEOUT=5400 install_command = From 5497b887a45d77f7be385dd0ca71c3d60a66a99c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 4 Jun 2018 17:31:15 +0200 Subject: [PATCH 1507/2699] Update tests to use Juju storage Due to changes to the ceph-osd charm, it is suggested to use Juju storage for testing. Change-Id: Ibdf6f4119001a4a07f9ddbc4fb2a9640860ff386 Related-Bug: #1698154 --- .../charmhelpers/contrib/hahelpers/cluster.py | 5 + .../contrib/openstack/amulet/utils.py | 12 +- .../contrib/openstack/cert_utils.py | 227 ++++++++++++++++++ .../charmhelpers/contrib/openstack/context.py | 51 ++-- .../charmhelpers/contrib/openstack/ip.py | 10 + .../hooks/charmhelpers/core/hookenv.py | 7 + ceph-radosgw/tests/basic_deployment.py | 16 +- .../charmhelpers/contrib/amulet/deployment.py | 6 +- .../contrib/openstack/amulet/utils.py | 12 +- .../tests/charmhelpers/core/hookenv.py | 7 + 10 files changed, 309 insertions(+), 44 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index 47facd91..4a737e24 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -223,6 +223,11 @@ def https(): return True if config_get('ssl_cert') and config_get('ssl_key'): return True + for r_id in relation_ids('certificates'): + for unit in relation_list(r_id): + ca = relation_get('ca', rid=r_id, unit=unit) + if ca: + return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 84e87f5d..d43038b2 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -40,6 +40,7 @@ import pika import swiftclient +from charmhelpers.core.decorators import retry_on_exception from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -423,6 +424,7 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(num_retries=5, base_delay=1) def keystone_wait_for_propagation(self, sentry_relation_pairs, api_version): """Iterate over list of sentry and relation tuples and verify that @@ -542,7 +544,7 @@ def get_keystone_endpoint(self, keystone_ip, api_version=None, return ep def get_default_keystone_session(self, keystone_sentry, - openstack_release=None): + openstack_release=None, api_version=2): """Return a keystone session object and client object assuming standard default settings @@ -557,12 +559,12 @@ def get_default_keystone_session(self, keystone_sentry, eyc """ self.log.debug('Authenticating keystone admin...') - api_version = 2 - client_class = keystone_client.Client # 11 => xenial_queens - if openstack_release and openstack_release >= 11: - api_version = 3 + if api_version == 3 or (openstack_release and openstack_release >= 11): client_class = keystone_client_v3.Client + api_version = 3 + else: + client_class = keystone_client.Client keystone_ip = keystone_sentry.info['public-address'] session, auth = self.get_keystone_session( keystone_ip, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py new file mode 100644 index 00000000..de853b53 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -0,0 +1,227 @@ +# Copyright 2014-2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Common python helper functions used for OpenStack charm certificats. + +import os +import json + +from charmhelpers.contrib.network.ip import ( + get_hostname, + resolve_network_cidr, +) +from charmhelpers.core.hookenv import ( + local_unit, + network_get_primary_address, + config, + relation_get, + unit_get, + NoNetworkBinding, + log, + WARNING, +) +from charmhelpers.contrib.openstack.ip import ( + ADMIN, + resolve_address, + get_vip_in_network, + INTERNAL, + PUBLIC, + ADDRESS_MAP) + +from charmhelpers.core.host import ( + mkdir, + write_file, +) + +from charmhelpers.contrib.hahelpers.apache import ( + install_ca_cert +) + + +class CertRequest(object): + + """Create a request for certificates to be generated + """ + + def __init__(self, json_encode=True): + self.entries = [] + self.hostname_entry = None + self.json_encode = json_encode + + def add_entry(self, net_type, cn, addresses): + """Add a request to the batch + + :param net_type: str netwrok space name request is for + :param cn: str Canonical Name for certificate + :param addresses: [] List of addresses to be used as SANs + """ + self.entries.append({ + 'cn': cn, + 'addresses': addresses}) + + def add_hostname_cn(self): + """Add a request for the hostname of the machine""" + ip = unit_get('private-address') + addresses = [ip] + # If a vip is being used without os-hostname config or + # network spaces then we need to ensure the local units + # cert has the approriate vip in the SAN list + vip = get_vip_in_network(resolve_network_cidr(ip)) + if vip: + addresses.append(vip) + self.hostname_entry = { + 'cn': get_hostname(ip), + 'addresses': addresses} + + def add_hostname_cn_ip(self, addresses): + """Add an address to the SAN list for the hostname request + + :param addr: [] List of address to be added + """ + for addr in addresses: + if addr not in self.hostname_entry['addresses']: + self.hostname_entry['addresses'].append(addr) + + def get_request(self): + """Generate request from the batched up entries + + """ + if self.hostname_entry: + self.entries.append(self.hostname_entry) + request = {} + for entry in self.entries: + sans = sorted(list(set(entry['addresses']))) + request[entry['cn']] = {'sans': sans} + if self.json_encode: + return {'cert_requests': json.dumps(request, sort_keys=True)} + else: + return {'cert_requests': request} + + +def get_certificate_request(json_encode=True): + """Generate a certificatee requests based on the network confioguration + + """ + req = CertRequest(json_encode=json_encode) + req.add_hostname_cn() + # Add os-hostname entries + for net_type in [INTERNAL, ADMIN, PUBLIC]: + net_config = config(ADDRESS_MAP[net_type]['override']) + try: + net_addr = resolve_address(endpoint_type=net_type) + ip = network_get_primary_address( + ADDRESS_MAP[net_type]['binding']) + addresses = [net_addr, ip] + vip = get_vip_in_network(resolve_network_cidr(ip)) + if vip: + addresses.append(vip) + if net_config: + req.add_entry( + net_type, + net_config, + addresses) + else: + # There is network address with no corresponding hostname. + # Add the ip to the hostname cert to allow for this. + req.add_hostname_cn_ip(addresses) + except NoNetworkBinding: + log("Skipping request for certificate for ip in {} space, no " + "local address found".format(net_type), WARNING) + return req.get_request() + + +def create_ip_cert_links(ssl_dir, custom_hostname_link=None): + """Create symlinks for SAN records + + :param ssl_dir: str Directory to create symlinks in + :param custom_hostname_link: str Additional link to be created + """ + hostname = get_hostname(unit_get('private-address')) + hostname_cert = os.path.join( + ssl_dir, + 'cert_{}'.format(hostname)) + hostname_key = os.path.join( + ssl_dir, + 'key_{}'.format(hostname)) + # Add links to hostname cert, used if os-hostname vars not set + for net_type in [INTERNAL, ADMIN, PUBLIC]: + try: + addr = resolve_address(endpoint_type=net_type) + cert = os.path.join(ssl_dir, 'cert_{}'.format(addr)) + key = os.path.join(ssl_dir, 'key_{}'.format(addr)) + if os.path.isfile(hostname_cert) and not os.path.isfile(cert): + os.symlink(hostname_cert, cert) + os.symlink(hostname_key, key) + except NoNetworkBinding: + log("Skipping creating cert symlink for ip in {} space, no " + "local address found".format(net_type), WARNING) + if custom_hostname_link: + custom_cert = os.path.join( + ssl_dir, + 'cert_{}'.format(custom_hostname_link)) + custom_key = os.path.join( + ssl_dir, + 'key_{}'.format(custom_hostname_link)) + if os.path.isfile(hostname_cert) and not os.path.isfile(custom_cert): + os.symlink(hostname_cert, custom_cert) + os.symlink(hostname_key, custom_key) + + +def install_certs(ssl_dir, certs, chain=None): + """Install the certs passed into the ssl dir and append the chain if + provided. + + :param ssl_dir: str Directory to create symlinks in + :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}} + :param chain: str Chain to be appended to certs + """ + for cn, bundle in certs.items(): + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + cert_data = bundle['cert'] + if chain: + # Append chain file so that clients that trust the root CA will + # trust certs signed by an intermediate in the chain + cert_data = cert_data + chain + write_file( + path=os.path.join(ssl_dir, cert_filename), + content=cert_data, perms=0o640) + write_file( + path=os.path.join(ssl_dir, key_filename), + content=bundle['key'], perms=0o640) + + +def process_certificates(service_name, relation_id, unit, + custom_hostname_link=None): + """Process the certificates supplied down the relation + + :param service_name: str Name of service the certifcates are for. + :param relation_id: str Relation id providing the certs + :param unit: str Unit providing the certs + :param custom_hostname_link: str Name of custom link to create + """ + data = relation_get(rid=relation_id, unit=unit) + ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) + mkdir(path=ssl_dir) + name = local_unit().replace('/', '_') + certs = data.get('{}.processed_requests'.format(name)) + chain = data.get('chain') + ca = data.get('ca') + if certs: + certs = json.loads(certs) + install_ca_cert(ca.encode()) + install_certs(ssl_dir, certs, chain) + create_ip_cert_links( + ssl_dir, + custom_hostname_link=custom_hostname_link) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 2d91f0a7..b196d63f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -789,17 +789,18 @@ def configure_cert(self, cn=None): ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) mkdir(path=ssl_dir) cert, key = get_cert(cn) - if cn: - cert_filename = 'cert_{}'.format(cn) - key_filename = 'key_{}'.format(cn) - else: - cert_filename = 'cert' - key_filename = 'key' + if cert and key: + if cn: + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + else: + cert_filename = 'cert' + key_filename = 'key' - write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert), perms=0o640) - write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key), perms=0o640) + write_file(path=os.path.join(ssl_dir, cert_filename), + content=b64decode(cert), perms=0o640) + write_file(path=os.path.join(ssl_dir, key_filename), + content=b64decode(key), perms=0o640) def configure_ca(self): ca_cert = get_ca_cert() @@ -871,23 +872,31 @@ def __call__(self): if not self.external_ports or not https(): return {} - self.configure_ca() + use_keystone_ca = True + for rid in relation_ids('certificates'): + if related_units(rid): + use_keystone_ca = False + + if use_keystone_ca: + self.configure_ca() + self.enable_modules() ctxt = {'namespace': self.service_namespace, 'endpoints': [], 'ext_ports': []} - cns = self.canonical_names() - if cns: - for cn in cns: - self.configure_cert(cn) - else: - # Expect cert/key provided in config (currently assumed that ca - # uses ip for cn) - for net_type in (INTERNAL, ADMIN, PUBLIC): - cn = resolve_address(endpoint_type=net_type) - self.configure_cert(cn) + if use_keystone_ca: + cns = self.canonical_names() + if cns: + for cn in cns: + self.configure_cert(cn) + else: + # Expect cert/key provided in config (currently assumed that ca + # uses ip for cn) + for net_type in (INTERNAL, ADMIN, PUBLIC): + cn = resolve_address(endpoint_type=net_type) + self.configure_cert(cn) addresses = self.get_network_addresses() for address, endpoint in addresses: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index d1476b1a..73102af7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -184,3 +184,13 @@ def resolve_address(endpoint_type=PUBLIC, override=True): "clustered=%s)" % (net_type, clustered)) return resolved_address + + +def get_vip_in_network(network): + matching_vip = None + vips = config('vip') + if vips: + for vip in vips.split(): + if is_address_in_network(network, vip): + matching_vip = vip + return matching_vip diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 627d8f79..ed7af39e 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -972,6 +972,13 @@ def application_version_set(version): log("Application Version: {}".format(version)) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def goal_state(): + """Juju goal state values""" + cmd = ['goal-state', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def is_leader(): """Does the current unit hold the juju leadership diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 5144e6c7..16b2c57c 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -62,7 +62,8 @@ def _add_services(self): this_service = {'name': 'ceph-radosgw'} other_services = [ {'name': 'ceph-mon', 'units': 3}, - {'name': 'ceph-osd', 'units': 3}, + {'name': 'ceph-osd', 'units': 3, + 'storage': {'osd-devices': 'cinder,10G'}}, {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}}, {'name': 'keystone'}, {'name': 'rabbitmq-server'}, @@ -113,23 +114,17 @@ def _configure_services(self): ceph_config = { 'monitor-count': '3', 'auth-supported': 'none', - 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', - 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', } - - # Include a non-existent device as osd-devices is a whitelist, - # and this will catch cases where proposals attempt to change that. ceph_osd_config = { - 'osd-reformat': True, - 'ephemeral-unmount': '/mnt', - 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' + 'osd-devices': '/srv/ceph /dev/test-non-existent' } configs = {'keystone': keystone_config, 'percona-cluster': pxc_config, 'cinder': cinder_config, 'ceph-mon': ceph_config, - 'ceph-osd': ceph_osd_config} + 'ceph-osd': ceph_osd_config, + } super(CephRadosGwBasicDeployment, self)._configure_services(configs) def _initialize_tests(self): @@ -324,7 +319,6 @@ def test_201_ceph_radosgw_relation(self): 'radosgw_key': u.not_null, 'auth': 'none', 'ceph-public-address': u.valid_ip, - 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' } ret = [] diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py index 9c65518e..d21d01d8 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py @@ -50,7 +50,8 @@ def _add_services(self, this_service, other_services): this_service['units'] = 1 self.d.add(this_service['name'], units=this_service['units'], - constraints=this_service.get('constraints')) + constraints=this_service.get('constraints'), + storage=this_service.get('storage')) for svc in other_services: if 'location' in svc: @@ -64,7 +65,8 @@ def _add_services(self, this_service, other_services): svc['units'] = 1 self.d.add(svc['name'], charm=branch_location, units=svc['units'], - constraints=svc.get('constraints')) + constraints=svc.get('constraints'), + storage=svc.get('storage')) def _add_relations(self, relations): """Add all of the relations for the services.""" diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index 84e87f5d..d43038b2 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -40,6 +40,7 @@ import pika import swiftclient +from charmhelpers.core.decorators import retry_on_exception from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -423,6 +424,7 @@ def tenant_exists(self, keystone, tenant): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(num_retries=5, base_delay=1) def keystone_wait_for_propagation(self, sentry_relation_pairs, api_version): """Iterate over list of sentry and relation tuples and verify that @@ -542,7 +544,7 @@ def get_keystone_endpoint(self, keystone_ip, api_version=None, return ep def get_default_keystone_session(self, keystone_sentry, - openstack_release=None): + openstack_release=None, api_version=2): """Return a keystone session object and client object assuming standard default settings @@ -557,12 +559,12 @@ def get_default_keystone_session(self, keystone_sentry, eyc """ self.log.debug('Authenticating keystone admin...') - api_version = 2 - client_class = keystone_client.Client # 11 => xenial_queens - if openstack_release and openstack_release >= 11: - api_version = 3 + if api_version == 3 or (openstack_release and openstack_release >= 11): client_class = keystone_client_v3.Client + api_version = 3 + else: + client_class = keystone_client.Client keystone_ip = keystone_sentry.info['public-address'] session, auth = self.get_keystone_session( keystone_ip, diff --git a/ceph-radosgw/tests/charmhelpers/core/hookenv.py b/ceph-radosgw/tests/charmhelpers/core/hookenv.py index 627d8f79..ed7af39e 100644 --- a/ceph-radosgw/tests/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/tests/charmhelpers/core/hookenv.py @@ -972,6 +972,13 @@ def application_version_set(version): log("Application Version: {}".format(version)) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def goal_state(): + """Juju goal state values""" + cmd = ['goal-state', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def is_leader(): """Does the current unit hold the juju leadership From 747fd9a321a15ad76249c317c2036a897ca5a376 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 31 May 2018 07:29:22 -0500 Subject: [PATCH 1508/2699] Remove explicit fsid charm config option The charm has had the capability to automatically handle fsid in deployments for several cycles. Explicitly setting the fsid can lead to unpredictable behavior when data from previous deployments remains on block devices, and when the fsid is explicitly static across those separate deployments. ex. Ceph would see the fsid match and attempt to use the block device from a previous deployment as-is and unsuccessfully. This primarily affects legacy development and CI environments, which may be in place from a time before the charm autoatically handled fsids. The recommendation is for both test and production deployments to defer fsid generation to the charm. Change-Id: I0b87576810faa08a81dc2d559ef925ea02f58db0 Partial-bug: #1698154 --- ceph-mon/config.yaml | 11 ----------- ceph-mon/hooks/ceph_hooks.py | 5 +---- ceph-mon/tests/basic_deployment.py | 5 +---- 3 files changed, 2 insertions(+), 19 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index e718c372..4b689850 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -32,17 +32,6 @@ options: description: | Apply system hardening. Supports a space-delimited list of modules to run. Supported modules currently include os, ssh, apache and mysql. - fsid: - type: string - default: - description: | - The unique identifier (fsid) of the Ceph cluster. - . - To generate a suitable value use `uuidgen`. - If left empty, an fsid will be generated. - . - NOTE: Changing this configuration after deployment is not supported and - new service units will not be able to join the cluster. config-flags: type: string default: diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index c5f3046b..24095b33 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -205,10 +205,7 @@ def config_changed(): if is_leader(): if not config('no-bootstrap'): if not leader_get('fsid') or not leader_get('monitor-secret'): - if config('fsid'): - fsid = config('fsid') - else: - fsid = "{}".format(uuid.uuid1()) + fsid = "{}".format(uuid.uuid1()) if config('monitor-secret'): mon_secret = config('monitor-secret') else: diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 9fe29e40..d1c4cf76 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -117,13 +117,11 @@ def _configure_services(self): ceph_config = { 'monitor-count': '3', 'auth-supported': 'none', - 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', - 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', } + ceph_osd_config = { 'osd-devices': '/srv/ceph /dev/test-non-existent', } - configs = {'keystone': keystone_config, 'percona-cluster': pxc_config, 'cinder': cinder_config, @@ -402,7 +400,6 @@ def test_300_ceph_config(self): conf = '/etc/ceph/ceph.conf' expected = { 'global': { - 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 'log to syslog': 'false', 'err to syslog': 'false', 'clog to syslog': 'false', From 092f2c04048b768a386a49ac0f74c335d2e147c8 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 5 Jun 2018 18:32:50 +0200 Subject: [PATCH 1509/2699] Fix logic error introduced in assess_status() Commit a5920db8ef54487e4787d40367f259a7b928a492 introduced a change in assess_status to not update status as long as the previous status was 'blocked'. This was done because the check for pristine block devices is done elsewhere in the charm. However there are other situations that might set the charm in 'blocked' state. This commit addresses this error. Change-Id: Icdc272dadd7f9ceb670d37238628d31f33d11770 --- ceph-osd/hooks/ceph_hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 6d2b2ec5..ce2aba9e 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -610,7 +610,7 @@ def assess_status(): # must have been presented and used for this charm to be operational (prev_status, prev_message) = status_get() running_osds = ceph.get_running_osds() - if prev_status != 'blocked': + if not prev_message.startswith('Non-pristine'): if not running_osds: status_set('blocked', 'No block devices detected using current configuration') From cb178fcb163162cae55c1dfcf6eb7f672bc3b0fd Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 6 Jun 2018 09:59:32 +0200 Subject: [PATCH 1510/2699] Update tests to not set `fsid` and `monitor-secret` Get `fsid` from leader settings on ceph-mon unit where needed for validation. Change-Id: I751ecff76873a599c0d03ec1308e30e615e38aa8 Related-Bug: #1698154 --- ceph-osd/tests/basic_deployment.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 5251ada9..2cc4664f 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -115,8 +115,6 @@ def _configure_services(self): ceph_config = { 'monitor-count': '3', 'auth-supported': 'none', - 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', - 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', } # Include a non-existent device as osd-devices is a whitelist, @@ -322,13 +320,14 @@ def test_201_ceph0_to_ceph_osd_relation(self): """Verify the ceph0 to ceph-osd relation data.""" u.log.debug('Checking ceph0:ceph-osd mon relation data...') unit = self.ceph0_sentry + (fsid, _) = unit.run('leader-get fsid') relation = ['osd', 'ceph-osd:mon'] expected = { 'osd_bootstrap_key': u.not_null, 'private-address': u.valid_ip, 'auth': u'none', 'ceph-public-address': u.valid_ip, - 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' + 'fsid': fsid, } ret = u.validate_relation_data(unit, relation, expected) @@ -340,13 +339,14 @@ def test_202_ceph1_to_ceph_osd_relation(self): """Verify the ceph1 to ceph-osd relation data.""" u.log.debug('Checking ceph1:ceph-osd mon relation data...') unit = self.ceph1_sentry + (fsid, _) = unit.run('leader-get fsid') relation = ['osd', 'ceph-osd:mon'] expected = { 'osd_bootstrap_key': u.not_null, 'private-address': u.valid_ip, 'auth': u'none', 'ceph-public-address': u.valid_ip, - 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' + 'fsid': fsid, } ret = u.validate_relation_data(unit, relation, expected) @@ -358,13 +358,14 @@ def test_203_ceph2_to_ceph_osd_relation(self): """Verify the ceph2 to ceph-osd relation data.""" u.log.debug('Checking ceph2:ceph-osd mon relation data...') unit = self.ceph2_sentry + (fsid, _) = unit.run('leader-get fsid') relation = ['osd', 'ceph-osd:mon'] expected = { 'osd_bootstrap_key': u.not_null, 'private-address': u.valid_ip, 'auth': u'none', 'ceph-public-address': u.valid_ip, - 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' + 'fsid': fsid, } ret = u.validate_relation_data(unit, relation, expected) @@ -375,6 +376,9 @@ def test_203_ceph2_to_ceph_osd_relation(self): def test_300_ceph_osd_config(self): """Verify the data in the ceph config file.""" u.log.debug('Checking ceph config file data...') + mon_unit = self.ceph0_sentry + (fsid, _) = mon_unit.run('leader-get fsid') + unit = self.ceph_osd_sentry conf = '/etc/ceph/ceph.conf' expected = { @@ -382,7 +386,7 @@ def test_300_ceph_osd_config(self): 'auth cluster required': 'none', 'auth service required': 'none', 'auth client required': 'none', - 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + 'fsid': fsid, 'log to syslog': 'false', 'err to syslog': 'false', 'clog to syslog': 'false' From 9661dea07973ba3e71b0795695a7577940c22bd5 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 8 Jun 2018 12:02:06 +0100 Subject: [PATCH 1511/2699] Add 'osd blacklist' to default mon perms Ensure that the default permissions for clients include the 'osd blacklist' command; This ensures that in the event of a client crashing (due to power outage or segfault), the client and re-connect and write to any devices on reboot. This is a safe permission for all supported Ceph releases. Depends-On: I0b43dece4e1c56fb838b0147bfb75fb9906e6657 Change-Id: Ib1f1e8d7ed54528603b8b08051dafeec075a3232 Closes-Bug: 1773449 --- ceph-mon/hooks/ceph_hooks.py | 5 + ceph-mon/lib/ceph/broker.py | 7 +- ceph-mon/lib/ceph/utils.py | 662 ++++++++++++++++++------- ceph-mon/unit_tests/test_ceph_hooks.py | 8 +- 4 files changed, 503 insertions(+), 179 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 24095b33..e192d846 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -658,6 +658,11 @@ def upgrade_charm(): if is_relation_made("nrpe-external-master"): update_nrpe_config() + # NOTE(jamespage): + # Reprocess broker requests to ensure that any cephx + # key permission changes are applied + notify_client() + @hooks.hook('start') def start(): diff --git a/ceph-mon/lib/ceph/broker.py b/ceph-mon/lib/ceph/broker.py index 0b6d3e24..3e857d21 100644 --- a/ceph-mon/lib/ceph/broker.py +++ b/ceph-mon/lib/ceph/broker.py @@ -81,6 +81,10 @@ "cache_min_flush_age": [int], "cache_min_evict_age": [int], "fast_read": [bool], + "allow_ec_overwrites": [bool], + "compression_mode": [str, ["none", "passive", "aggressive", "force"]], + "compression_algorithm": [str, ["lz4", "snappy", "zlib", "zstd"]], + "compression_required_ratio": [float, [0.0, 1.0]], } CEPH_BUCKET_TYPES = [ @@ -251,7 +255,8 @@ def pool_permission_list_for_service(service): for prefix in prefixes: permissions.append("allow {} object_prefix {}".format(permission, prefix)) - return ["mon", "allow r", "osd", ', '.join(permissions)] + return ['mon', 'allow r, allow command "osd blacklist"', + 'osd', ', '.join(permissions)] def get_service_groups(service, namespace=None): diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index f412032b..7bb951ca 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -13,8 +13,7 @@ # limitations under the License. import collections -import ctypes -import errno +import glob import json import os import pyudev @@ -24,7 +23,7 @@ import subprocess import sys import time -import shutil +import uuid from datetime import datetime @@ -36,7 +35,6 @@ cmp_pkgrevno, lsb_release, mkdir, - mounts, owner, service_restart, service_start, @@ -68,11 +66,12 @@ from charmhelpers.contrib.storage.linux.utils import ( is_block_device, is_device_mounted, - zap_disk, ) from charmhelpers.contrib.openstack.utils import ( get_os_codename_install_source, ) +from charmhelpers.contrib.storage.linux import lvm +from charmhelpers.core.unitdata import kv CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph') OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd') @@ -83,7 +82,15 @@ QUORUM = [LEADER, PEON] PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', - 'radosgw', 'xfsprogs', 'python-pyudev'] + 'radosgw', 'xfsprogs', 'python-pyudev', + 'lvm2', 'parted'] + +CEPH_KEY_MANAGER = 'ceph' +VAULT_KEY_MANAGER = 'vault' +KEY_MANAGERS = [ + CEPH_KEY_MANAGER, + VAULT_KEY_MANAGER, +] LinkSpeed = { "BASE_10": 10, @@ -823,114 +830,6 @@ def add_bootstrap_hint(peer): ] -def umount(mount_point): - """This function unmounts a mounted directory forcibly. This will - be used for unmounting broken hard drive mounts which may hang. - - If umount returns EBUSY this will lazy unmount. - - :param mount_point: str. A String representing the filesystem mount point - :returns: int. Returns 0 on success. errno otherwise. - """ - libc_path = ctypes.util.find_library("c") - libc = ctypes.CDLL(libc_path, use_errno=True) - - # First try to umount with MNT_FORCE - ret = libc.umount(mount_point, 1) - if ret < 0: - err = ctypes.get_errno() - if err == errno.EBUSY: - # Detach from try. IE lazy umount - ret = libc.umount(mount_point, 2) - if ret < 0: - err = ctypes.get_errno() - return err - return 0 - else: - return err - return 0 - - -def replace_osd(dead_osd_number, - dead_osd_device, - new_osd_device, - osd_format, - osd_journal, - reformat_osd=False, - ignore_errors=False): - """This function will automate the replacement of a failed osd disk as much - as possible. It will revoke the keys for the old osd, remove it from the - crush map and then add a new osd into the cluster. - - :param dead_osd_number: The osd number found in ceph osd tree. Example: 99 - :param dead_osd_device: The physical device. Example: /dev/sda - :param osd_format: - :param osd_journal: - :param reformat_osd: - :param ignore_errors: - """ - host_mounts = mounts() - mount_point = None - for mount in host_mounts: - if mount[1] == dead_osd_device: - mount_point = mount[0] - # need to convert dev to osd number - # also need to get the mounted drive so we can tell the admin to - # replace it - try: - # Drop this osd out of the cluster. This will begin a - # rebalance operation - status_set('maintenance', 'Removing osd {}'.format(dead_osd_number)) - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'out', - 'osd.{}'.format(dead_osd_number)]) - - # Kill the osd process if it's not already dead - if systemd(): - service_stop('ceph-osd@{}'.format(dead_osd_number)) - else: - subprocess.check_output(['stop', 'ceph-osd', 'id={}'.format( - dead_osd_number)]) - # umount if still mounted - ret = umount(mount_point) - if ret < 0: - raise RuntimeError('umount {} failed with error: {}'.format( - mount_point, os.strerror(ret))) - # Clean up the old mount point - shutil.rmtree(mount_point) - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'crush', 'remove', - 'osd.{}'.format(dead_osd_number)]) - # Revoke the OSDs access keys - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'auth', 'del', - 'osd.{}'.format(dead_osd_number)]) - subprocess.check_output([ - 'ceph', - '--id', - 'osd-upgrade', - 'osd', 'rm', - 'osd.{}'.format(dead_osd_number)]) - status_set('maintenance', 'Setting up replacement osd {}'.format( - new_osd_device)) - osdize(new_osd_device, - osd_format, - osd_journal, - reformat_osd, - ignore_errors) - except subprocess.CalledProcessError as e: - log('replace_osd failed with error: ' + e.output) - - def get_partition_list(dev): """Lists the partitions of a block device. @@ -970,7 +869,42 @@ def get_partition_list(dev): raise +def is_pristine_disk(dev): + """ + Read first 2048 bytes (LBA 0 - 3) of block device to determine whether it + is actually all zeros and safe for us to use. + + Existing partitioning tools does not discern between a failure to read from + block device, failure to understand a partition table and the fact that a + block device has no partition table. Since we need to be positive about + which is which we need to read the device directly and confirm ourselves. + + :param dev: Path to block device + :type dev: str + :returns: True all 2048 bytes == 0x0, False if not + :rtype: bool + """ + want_bytes = 2048 + + f = open(dev, 'rb') + data = f.read(want_bytes) + read_bytes = len(data) + if read_bytes != want_bytes: + log('{}: short read, got {} bytes expected {}.' + .format(dev, read_bytes, want_bytes), level=WARNING) + return False + + return all(byte == 0x0 for byte in data) + + def is_osd_disk(dev): + db = kv() + osd_devices = db.get('osd-devices', []) + if dev in osd_devices: + log('Device {} already processed by charm,' + ' skipping'.format(dev)) + return True + partitions = get_partition_list(dev) for partition in partitions: try: @@ -1008,6 +942,9 @@ def rescan_osd_devices(): subprocess.call(cmd) + cmd = ['udevadm', 'settle'] + subprocess.call(cmd) + _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" _upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" @@ -1159,7 +1096,8 @@ def get_mds_bootstrap_key(): _default_caps = collections.OrderedDict([ - ('mon', ['allow r']), + ('mon', ['allow r', + 'allow command "osd blacklist"']), ('osd', ['allow rwx']), ]) @@ -1226,6 +1164,7 @@ def get_named_key(name, caps=None, pool_list=None): :param caps: dict of cephx capabilities :returns: Returns a cephx key """ + key_name = 'client.{}'.format(name) try: # Does the key already exist? output = str(subprocess.check_output( @@ -1240,8 +1179,14 @@ def get_named_key(name, caps=None, pool_list=None): ), 'auth', 'get', - 'client.{}'.format(name), + key_name, ]).decode('UTF-8')).strip() + # NOTE(jamespage); + # Apply any changes to key capabilities, dealing with + # upgrades which requires new caps for operation. + upgrade_key_caps(key_name, + caps or _default_caps, + pool_list) return parse_key(output) except subprocess.CalledProcessError: # Couldn't get the key, time to create it! @@ -1257,7 +1202,7 @@ def get_named_key(name, caps=None, pool_list=None): '/var/lib/ceph/mon/ceph-{}/keyring'.format( socket.gethostname() ), - 'auth', 'get-or-create', 'client.{}'.format(name), + 'auth', 'get-or-create', key_name, ] # Add capabilities for subsystem, subcaps in caps.items(): @@ -1276,7 +1221,7 @@ def get_named_key(name, caps=None, pool_list=None): .strip()) # IGNORE:E1103 -def upgrade_key_caps(key, caps): +def upgrade_key_caps(key, caps, pool_list=None): """ Upgrade key to have capabilities caps """ if not is_leader(): # Not the MON leader OR not clustered @@ -1285,6 +1230,12 @@ def upgrade_key_caps(key, caps): "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key ] for subsystem, subcaps in caps.items(): + if subsystem == 'osd': + if pool_list: + # This will output a string similar to: + # "pool=rgw pool=rbd pool=something" + pools = " ".join(['pool={0}'.format(i) for i in pool_list]) + subcaps[0] = subcaps[0] + " " + pools cmd.extend([subsystem, '; '.join(subcaps)]) subprocess.check_call(cmd) @@ -1393,15 +1344,6 @@ def update_monfs(): pass -def maybe_zap_journal(journal_dev): - if is_osd_disk(journal_dev): - log('Looks like {} is already an OSD data' - ' or journal, skipping.'.format(journal_dev)) - return - zap_disk(journal_dev) - log("Zapped journal device {}".format(journal_dev)) - - def get_partitions(dev): cmd = ['partx', '--raw', '--noheadings', dev] try: @@ -1413,17 +1355,35 @@ def get_partitions(dev): return [] -def find_least_used_utility_device(utility_devices): +def get_lvs(dev): + """ + List logical volumes for the provided block device + + :param: dev: Full path to block device. + :raises subprocess.CalledProcessError: in the event that any supporting + operation failed. + :returns: list: List of logical volumes provided by the block device + """ + if not lvm.is_lvm_physical_volume(dev): + return [] + vg_name = lvm.list_lvm_volume_group(dev) + return lvm.list_logical_volumes('vg_name={}'.format(vg_name)) + + +def find_least_used_utility_device(utility_devices, lvs=False): """ Find a utility device which has the smallest number of partitions among other devices in the supplied list. :utility_devices: A list of devices to be used for filestore journal or bluestore wal or db. + :lvs: flag to indicate whether inspection should be based on LVM LV's :return: string device name """ - - usages = map(lambda a: (len(get_partitions(a)), a), utility_devices) + if lvs: + usages = map(lambda a: (len(get_lvs(a)), a), utility_devices) + else: + usages = map(lambda a: (len(get_partitions(a)), a), utility_devices) least = min(usages, key=lambda t: t[0]) return least[1] @@ -1445,18 +1405,46 @@ def get_devices(name): return set(devices) -def osdize(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False, bluestore=False): +def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, + bluestore=False, key_manager=CEPH_KEY_MANAGER): if dev.startswith('/dev'): osdize_dev(dev, osd_format, osd_journal, - reformat_osd, ignore_errors, encrypt, - bluestore) + ignore_errors, encrypt, + bluestore, key_manager) else: osdize_dir(dev, encrypt, bluestore) -def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, - ignore_errors=False, encrypt=False, bluestore=False): +def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, + encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER): + """ + Prepare a block device for use as a Ceph OSD + + A block device will only be prepared once during the lifetime + of the calling charm unit; future executions will be skipped. + + :param: dev: Full path to block device to use + :param: osd_format: Format for OSD filesystem + :param: osd_journal: List of block devices to use for OSD journals + :param: ignore_errors: Don't fail in the event of any errors during + processing + :param: encrypt: Encrypt block devices using 'key_manager' + :param: bluestore: Use bluestore native ceph block device format + :param: key_manager: Key management approach for encryption keys + :raises subprocess.CalledProcessError: in the event that any supporting + subprocess operation failed + :raises ValueError: if an invalid key_manager is provided + """ + if key_manager not in KEY_MANAGERS: + raise ValueError('Unsupported key manager: {}'.format(key_manager)) + + db = kv() + osd_devices = db.get('osd-devices', []) + if dev in osd_devices: + log('Device {} already processed by charm,' + ' skipping'.format(dev)) + return + if not os.path.exists(dev): log('Path {} does not exist - bailing'.format(dev)) return @@ -1465,7 +1453,7 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, log('Path {} is not a block device - bailing'.format(dev)) return - if is_osd_disk(dev) and not reformat_osd: + if is_osd_disk(dev): log('Looks like {} is already an' ' OSD data or journal, skipping.'.format(dev)) return @@ -1474,58 +1462,378 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False, log('Looks like {} is in use, skipping.'.format(dev)) return - status_set('maintenance', 'Initializing device {}'.format(dev)) - cmd = ['ceph-disk', 'prepare'] - # Later versions of ceph support more options - if cmp_pkgrevno('ceph', '0.60') >= 0: - if encrypt: - cmd.append('--dmcrypt') - if cmp_pkgrevno('ceph', '0.48.3') >= 0: - if osd_format and not bluestore: - cmd.append('--fs-type') - cmd.append(osd_format) - - if reformat_osd: - cmd.append('--zap-disk') - - # NOTE(jamespage): enable experimental bluestore support - if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: - cmd.append('--bluestore') - wal = get_devices('bluestore-wal') - if wal: - cmd.append('--block.wal') - least_used_wal = find_least_used_utility_device(wal) - cmd.append(least_used_wal) - db = get_devices('bluestore-db') - if db: - cmd.append('--block.db') - least_used_db = find_least_used_utility_device(db) - cmd.append(least_used_db) - elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: - cmd.append('--filestore') - - cmd.append(dev) - - if osd_journal: - least_used = find_least_used_utility_device(osd_journal) - cmd.append(least_used) + if is_active_bluestore_device(dev): + log('{} is in use as an active bluestore block device,' + ' skipping.'.format(dev)) + return + + if cmp_pkgrevno('ceph', '12.2.4') >= 0: + cmd = _ceph_volume(dev, + osd_journal, + encrypt, + bluestore, + key_manager) else: - # Just provide the device - no other options - # for older versions of ceph - cmd.append(dev) - if reformat_osd: - zap_disk(dev) + cmd = _ceph_disk(dev, + osd_format, + osd_journal, + encrypt, + bluestore) try: + status_set('maintenance', 'Initializing device {}'.format(dev)) log("osdize cmd: {}".format(cmd)) subprocess.check_call(cmd) except subprocess.CalledProcessError: + try: + lsblk_output = subprocess.check_output( + ['lsblk', '-P']).decode('UTF-8') + except subprocess.CalledProcessError as e: + log("Couldn't get lsblk output: {}".format(e), ERROR) if ignore_errors: log('Unable to initialize device: {}'.format(dev), WARNING) + if lsblk_output: + log('lsblk output: {}'.format(lsblk_output), DEBUG) else: log('Unable to initialize device: {}'.format(dev), ERROR) + if lsblk_output: + log('lsblk output: {}'.format(lsblk_output), WARNING) raise + # NOTE: Record processing of device only on success to ensure that + # the charm only tries to initialize a device of OSD usage + # once during its lifetime. + osd_devices.append(dev) + db.set('osd-devices', osd_devices) + db.flush() + + +def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): + """ + Prepare a device for usage as a Ceph OSD using ceph-disk + + :param: dev: Full path to use for OSD block device setup + :param: osd_journal: List of block devices to use for OSD journals + :param: encrypt: Use block device encryption (unsupported) + :param: bluestore: Use bluestore storage for OSD + :returns: list. 'ceph-disk' command and required parameters for + execution by check_call + """ + cmd = ['ceph-disk', 'prepare'] + + if encrypt: + cmd.append('--dmcrypt') + + if osd_format and not bluestore: + cmd.append('--fs-type') + cmd.append(osd_format) + + # NOTE(jamespage): enable experimental bluestore support + if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: + cmd.append('--bluestore') + wal = get_devices('bluestore-wal') + if wal: + cmd.append('--block.wal') + least_used_wal = find_least_used_utility_device(wal) + cmd.append(least_used_wal) + db = get_devices('bluestore-db') + if db: + cmd.append('--block.db') + least_used_db = find_least_used_utility_device(db) + cmd.append(least_used_db) + elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: + cmd.append('--filestore') + + cmd.append(dev) + + if osd_journal: + least_used = find_least_used_utility_device(osd_journal) + cmd.append(least_used) + + return cmd + + +def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, + key_manager=CEPH_KEY_MANAGER): + """ + Prepare and activate a device for usage as a Ceph OSD using ceph-volume. + + This also includes creation of all PV's, VG's and LV's required to + support the initialization of the OSD. + + :param: dev: Full path to use for OSD block device setup + :param: osd_journal: List of block devices to use for OSD journals + :param: encrypt: Use block device encryption + :param: bluestore: Use bluestore storage for OSD + :param: key_manager: dm-crypt Key Manager to use + :raises subprocess.CalledProcessError: in the event that any supporting + LVM operation failed. + :returns: list. 'ceph-volume' command and required parameters for + execution by check_call + """ + cmd = ['ceph-volume', 'lvm', 'create'] + + osd_fsid = str(uuid.uuid4()) + cmd.append('--osd-fsid') + cmd.append(osd_fsid) + + if bluestore: + cmd.append('--bluestore') + main_device_type = 'block' + else: + cmd.append('--filestore') + main_device_type = 'data' + + if encrypt and key_manager == CEPH_KEY_MANAGER: + cmd.append('--dmcrypt') + + # On-disk journal volume creation + if not osd_journal and not bluestore: + journal_lv_type = 'journal' + cmd.append('--journal') + cmd.append(_allocate_logical_volume( + dev=dev, + lv_type=journal_lv_type, + osd_fsid=osd_fsid, + size='{}M'.format(calculate_volume_size('journal')), + encrypt=encrypt, + key_manager=key_manager) + ) + + cmd.append('--data') + cmd.append(_allocate_logical_volume(dev=dev, + lv_type=main_device_type, + osd_fsid=osd_fsid, + encrypt=encrypt, + key_manager=key_manager)) + + if bluestore: + for extra_volume in ('wal', 'db'): + devices = get_devices('bluestore-{}'.format(extra_volume)) + if devices: + cmd.append('--block.{}'.format(extra_volume)) + least_used = find_least_used_utility_device(devices, + lvs=True) + cmd.append(_allocate_logical_volume( + dev=least_used, + lv_type=extra_volume, + osd_fsid=osd_fsid, + size='{}M'.format(calculate_volume_size(extra_volume)), + shared=True, + encrypt=encrypt, + key_manager=key_manager) + ) + + elif osd_journal: + cmd.append('--journal') + least_used = find_least_used_utility_device(osd_journal, + lvs=True) + cmd.append(_allocate_logical_volume( + dev=least_used, + lv_type='journal', + osd_fsid=osd_fsid, + size='{}M'.format(calculate_volume_size('journal')), + shared=True, + encrypt=encrypt, + key_manager=key_manager) + ) + + return cmd + + +def _partition_name(dev): + """ + Derive the first partition name for a block device + + :param: dev: Full path to block device. + :returns: str: Full path to first partition on block device. + """ + if dev[-1].isdigit(): + return '{}p1'.format(dev) + else: + return '{}1'.format(dev) + + +def is_active_bluestore_device(dev): + """ + Determine whether provided device is part of an active + bluestore based OSD (as its block component). + + :param: dev: Full path to block device to check for Bluestore usage. + :returns: boolean: indicating whether device is in active use. + """ + if not lvm.is_lvm_physical_volume(dev): + return False + + vg_name = lvm.list_lvm_volume_group(dev) + lv_name = lvm.list_logical_volumes('vg_name={}'.format(vg_name))[0] + + block_symlinks = glob.glob('/var/lib/ceph/osd/ceph-*/block') + for block_candidate in block_symlinks: + if os.path.islink(block_candidate): + target = os.readlink(block_candidate) + if target.endswith(lv_name): + return True + + return False + + +def get_conf(variable): + """ + Get the value of the given configuration variable from the + cluster. + + :param variable: ceph configuration variable + :returns: str. configured value for provided variable + + """ + return subprocess.check_output([ + 'ceph-osd', + '--show-config-value={}'.format(variable), + ]).strip() + + +def calculate_volume_size(lv_type): + """ + Determine the configured size for Bluestore DB/WAL or + Filestore Journal devices + + :param lv_type: volume type (db, wal or journal) + :raises KeyError: if invalid lv_type is supplied + :returns: int. Configured size in megabytes for volume type + """ + # lv_type -> ceph configuration option + _config_map = { + 'db': 'bluestore_block_db_size', + 'wal': 'bluestore_block_wal_size', + 'journal': 'osd_journal_size', + } + + # default sizes in MB + _default_size = { + 'db': 1024, + 'wal': 576, + 'journal': 1024, + } + + # conversion of ceph config units to MB + _units = { + 'db': 1048576, # Bytes -> MB + 'wal': 1048576, # Bytes -> MB + 'journal': 1, # Already in MB + } + + configured_size = get_conf(_config_map[lv_type]) + + if configured_size is None or int(configured_size) == 0: + return _default_size[lv_type] + else: + return int(configured_size) / _units[lv_type] + + +def _luks_uuid(dev): + """ + Check to see if dev is a LUKS encrypted volume, returning the UUID + of volume if it is. + + :param: dev: path to block device to check. + :returns: str. UUID of LUKS device or None if not a LUKS device + """ + try: + cmd = ['cryptsetup', 'luksUUID', dev] + return subprocess.check_output(cmd).decode('UTF-8').strip() + except subprocess.CalledProcessError: + return None + + +def _initialize_disk(dev, dev_uuid, encrypt=False, + key_manager=CEPH_KEY_MANAGER): + """ + Initialize a raw block device consuming 100% of the avaliable + disk space. + + Function assumes that block device has already been wiped. + + :param: dev: path to block device to initialize + :param: dev_uuid: UUID to use for any dm-crypt operations + :param: encrypt: Encrypt OSD devices using dm-crypt + :param: key_manager: Key management approach for dm-crypt keys + :raises: subprocess.CalledProcessError: if any parted calls fail + :returns: str: Full path to new partition. + """ + use_vaultlocker = encrypt and key_manager == VAULT_KEY_MANAGER + + if use_vaultlocker: + # NOTE(jamespage): Check to see if already initialized as a LUKS + # volume, which indicates this is a shared block + # device for journal, db or wal volumes. + luks_uuid = _luks_uuid(dev) + if luks_uuid: + return '/dev/mapper/crypt-{}'.format(luks_uuid) + + dm_crypt = '/dev/mapper/crypt-{}'.format(dev_uuid) + + if use_vaultlocker and not os.path.exists(dm_crypt): + subprocess.check_call([ + 'vaultlocker', + 'encrypt', + '--uuid', dev_uuid, + dev, + ]) + + if use_vaultlocker: + return dm_crypt + else: + return dev + + +def _allocate_logical_volume(dev, lv_type, osd_fsid, + size=None, shared=False, + encrypt=False, + key_manager=CEPH_KEY_MANAGER): + """ + Allocate a logical volume from a block device, ensuring any + required initialization and setup of PV's and VG's to support + the LV. + + :param: dev: path to block device to allocate from. + :param: lv_type: logical volume type to create + (data, block, journal, wal, db) + :param: osd_fsid: UUID of the OSD associate with the LV + :param: size: Size in LVM format for the device; + if unset 100% of VG + :param: shared: Shared volume group (journal, wal, db) + :param: encrypt: Encrypt OSD devices using dm-crypt + :param: key_manager: dm-crypt Key Manager to use + :raises subprocess.CalledProcessError: in the event that any supporting + LVM or parted operation fails. + :returns: str: String in the format 'vg_name/lv_name'. + """ + lv_name = "osd-{}-{}".format(lv_type, osd_fsid) + current_volumes = lvm.list_logical_volumes() + if shared: + dev_uuid = str(uuid.uuid4()) + else: + dev_uuid = osd_fsid + pv_dev = _initialize_disk(dev, dev_uuid, encrypt, key_manager) + + vg_name = None + if not lvm.is_lvm_physical_volume(pv_dev): + lvm.create_lvm_physical_volume(pv_dev) + if shared: + vg_name = 'ceph-{}-{}'.format(lv_type, + str(uuid.uuid4())) + else: + vg_name = 'ceph-{}'.format(osd_fsid) + lvm.create_lvm_volume_group(vg_name, pv_dev) + else: + vg_name = lvm.list_lvm_volume_group(pv_dev) + + if lv_name not in current_volumes: + lvm.create_logical_volume(lv_name, vg_name, size) + + return "{}/{}".format(vg_name, lv_name) + def osdize_dir(path, encrypt=False, bluestore=False): """Ask ceph-disk to prepare a directory to become an osd. diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index ad955f86..83cb38ae 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -186,10 +186,14 @@ def test_nrpe_dependency_installed(self, mock_config): mocks["apt_install"].assert_called_once_with( ["python-dbus", "lockfile-progs"]) + @patch.object(ceph_hooks, 'ceph') + @patch.object(ceph_hooks, 'notify_client') @patch.object(ceph_hooks, 'config') def test_upgrade_charm_with_nrpe_relation_installs_dependencies( self, - mock_config): + mock_config, + mock_notify_client, + mock_ceph): config = copy.deepcopy(CHARM_CONFIG) mock_config.side_effect = lambda key: config[key] with patch.multiple( @@ -207,6 +211,8 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies( ceph_hooks.upgrade_charm() mocks["apt_install"].assert_called_with( ["python-dbus", "lockfile-progs"]) + mock_notify_client.assert_called_once_with() + mock_ceph.update_monfs.assert_called_once_with() class RelatedUnitsTestCase(unittest.TestCase): From cc6d14c54d84763b3287d138d39fe166d9151970 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 13 Jun 2018 15:25:01 +0200 Subject: [PATCH 1512/2699] Check for ceph.conf instead of package Rather than looking for a package to be installed, this change lets us look for the ceph.conf that is setup by the charm to determine if the charm has moved past its install hook Change-Id: I2b6cfbe0bb4207733ed991f6d3a9850584e30408 --- ceph-osd/hooks/add-storage | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/add-storage b/ceph-osd/hooks/add-storage index cec4d4cf..d7a82411 100755 --- a/ceph-osd/hooks/add-storage +++ b/ceph-osd/hooks/add-storage @@ -9,7 +9,11 @@ # Note: this doesn't wait to ensure that ceph is bootstrapped because # that logic is already existing in the charm's hook. -if ! dpkg -s ceph > /dev/null 2>&1; then +IFS='/' read -r -a array <<< "$JUJU_UNIT_NAME" +LOCAL_UNIT="${array[0]}" +charm_ceph_conf="/var/lib/charm/$LOCAL_UNIT/ceph.conf" + +if ! test -e $charm_ceph_conf; then juju-log "Ceph not yet installed." exit 0 fi From b0cb75436cfe7446ba104aaab633a11f7f4c5b61 Mon Sep 17 00:00:00 2001 From: zhangzs Date: Wed, 20 Jun 2018 14:05:55 +0800 Subject: [PATCH 1513/2699] Remove the duplicated word Change-Id: I72159a39e392252e456cfd23c0f6e4f69175dcfe --- ceph-osd/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 9daa9bb4..955dd753 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -50,7 +50,7 @@ options: that the user consider opening a bug on this charm at http://bugs.launchpad.net/charms providing an explanation of why the config was needed so that we may consider it for inclusion as a - natively supported config in the the charm. + natively supported config in the charm. osd-devices: type: string default: /dev/vdb From d46340dc7f1562a78533edfbdce9925eb2c09dc9 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 21 Jun 2018 18:50:39 +0000 Subject: [PATCH 1514/2699] Sync charm-helpers for Rocky series support Change-Id: I6219e1f6a3037ce88b415b7857e66265ecb96c00 --- .../contrib/openstack/amulet/deployment.py | 3 +++ .../contrib/openstack/amulet/utils.py | 2 +- .../charmhelpers/contrib/openstack/context.py | 22 ++++++++++++++----- .../charmhelpers/contrib/openstack/utils.py | 4 ++++ ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 8 +++++++ .../contrib/openstack/amulet/deployment.py | 3 +++ .../contrib/openstack/amulet/utils.py | 2 +- 7 files changed, 37 insertions(+), 7 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 66beeda2..1c96752a 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -291,6 +291,8 @@ def _get_openstack_release(self): ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, ('bionic', None): self.bionic_queens, + ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, + ('cosmic', None): self.cosmic_rocky, } return releases[(self.series, self.openstack)] @@ -306,6 +308,7 @@ def _get_openstack_release_string(self): ('zesty', 'ocata'), ('artful', 'pike'), ('bionic', 'queens'), + ('cosmic', 'rocky'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index d43038b2..ef4ab54b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -56,7 +56,7 @@ 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens'] + 'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] class OpenStackAmuletUtils(AmuletUtils): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index b196d63f..f3741b0e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -190,8 +190,8 @@ def get_related(self): class SharedDBContext(OSContextGenerator): interfaces = ['shared-db'] - def __init__(self, - database=None, user=None, relation_prefix=None, ssl_dir=None): + def __init__(self, database=None, user=None, relation_prefix=None, + ssl_dir=None, relation_id=None): """Allows inspecting relation for settings prefixed with relation_prefix. This is useful for parsing access for multiple databases returned via the shared-db interface (eg, nova_password, @@ -202,6 +202,7 @@ def __init__(self, self.user = user self.ssl_dir = ssl_dir self.rel_name = self.interfaces[0] + self.relation_id = relation_id def __call__(self): self.database = self.database or config('database') @@ -235,7 +236,12 @@ def __call__(self): if self.relation_prefix: password_setting = self.relation_prefix + '_password' - for rid in relation_ids(self.interfaces[0]): + if self.relation_id: + rids = [self.relation_id] + else: + rids = relation_ids(self.interfaces[0]) + + for rid in rids: self.related = True for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) @@ -448,11 +454,13 @@ def __call__(self): class AMQPContext(OSContextGenerator): - def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): + def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None, + relation_id=None): self.ssl_dir = ssl_dir self.rel_name = rel_name self.relation_prefix = relation_prefix self.interfaces = [rel_name] + self.relation_id = relation_id def __call__(self): log('Generating template context for amqp', level=DEBUG) @@ -473,7 +481,11 @@ def __call__(self): raise OSContextError ctxt = {} - for rid in relation_ids(self.rel_name): + if self.relation_id: + rids = [self.relation_id] + else: + rids = relation_ids(self.rel_name) + for rid in rids: ha_vip_only = False self.related = True transport_hosts = None diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 6184abd0..0180e555 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -133,6 +133,7 @@ ('zesty', 'ocata'), ('artful', 'pike'), ('bionic', 'queens'), + ('cosmic', 'rocky'), ]) @@ -151,6 +152,7 @@ ('2017.1', 'ocata'), ('2017.2', 'pike'), ('2018.1', 'queens'), + ('2018.2', 'rocky'), ]) # The ugly duckling - must list releases oldest to newest @@ -183,6 +185,8 @@ ['2.13.0', '2.15.0']), ('queens', ['2.16.0', '2.17.0']), + ('rocky', + ['2.18.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 653d58f1..736be713 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -158,6 +158,14 @@ 'queens/proposed': 'xenial-proposed/queens', 'xenial-queens/proposed': 'xenial-proposed/queens', 'xenial-proposed/queens': 'xenial-proposed/queens', + # Rocky + 'rocky': 'bionic-updates/rocky', + 'bionic-rocky': 'bionic-updates/rocky', + 'bionic-rocky/updates': 'bionic-updates/rocky', + 'bionic-updates/rocky': 'bionic-updates/rocky', + 'rocky/proposed': 'bionic-proposed/rocky', + 'bionic-rocky/proposed': 'bionic-proposed/rocky', + 'bionic-proposed/rocky': 'bionic-proposed/rocky', } diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 66beeda2..1c96752a 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -291,6 +291,8 @@ def _get_openstack_release(self): ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, ('bionic', None): self.bionic_queens, + ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, + ('cosmic', None): self.cosmic_rocky, } return releases[(self.series, self.openstack)] @@ -306,6 +308,7 @@ def _get_openstack_release_string(self): ('zesty', 'ocata'), ('artful', 'pike'), ('bionic', 'queens'), + ('cosmic', 'rocky'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index d43038b2..ef4ab54b 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -56,7 +56,7 @@ 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens'] + 'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] class OpenStackAmuletUtils(AmuletUtils): From 03b05eedbe5e4ae318dce3fe77f42b6ebc33e9bb Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 21 Jun 2018 18:51:34 +0000 Subject: [PATCH 1515/2699] Sync charm-helpers for Rocky series support Change-Id: Iec56fb9c4a2bfd79c3580dca54c5ba68d64fc4c0 --- .../contrib/openstack/amulet/deployment.py | 3 +++ .../contrib/openstack/amulet/utils.py | 2 +- .../charmhelpers/contrib/openstack/context.py | 22 ++++++++++++++----- .../charmhelpers/contrib/openstack/utils.py | 4 ++++ ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 8 +++++++ .../contrib/openstack/amulet/deployment.py | 3 +++ .../contrib/openstack/amulet/utils.py | 2 +- 7 files changed, 37 insertions(+), 7 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 66beeda2..1c96752a 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -291,6 +291,8 @@ def _get_openstack_release(self): ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, ('bionic', None): self.bionic_queens, + ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, + ('cosmic', None): self.cosmic_rocky, } return releases[(self.series, self.openstack)] @@ -306,6 +308,7 @@ def _get_openstack_release_string(self): ('zesty', 'ocata'), ('artful', 'pike'), ('bionic', 'queens'), + ('cosmic', 'rocky'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py index d43038b2..ef4ab54b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -56,7 +56,7 @@ 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens'] + 'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] class OpenStackAmuletUtils(AmuletUtils): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index b196d63f..f3741b0e 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -190,8 +190,8 @@ def get_related(self): class SharedDBContext(OSContextGenerator): interfaces = ['shared-db'] - def __init__(self, - database=None, user=None, relation_prefix=None, ssl_dir=None): + def __init__(self, database=None, user=None, relation_prefix=None, + ssl_dir=None, relation_id=None): """Allows inspecting relation for settings prefixed with relation_prefix. This is useful for parsing access for multiple databases returned via the shared-db interface (eg, nova_password, @@ -202,6 +202,7 @@ def __init__(self, self.user = user self.ssl_dir = ssl_dir self.rel_name = self.interfaces[0] + self.relation_id = relation_id def __call__(self): self.database = self.database or config('database') @@ -235,7 +236,12 @@ def __call__(self): if self.relation_prefix: password_setting = self.relation_prefix + '_password' - for rid in relation_ids(self.interfaces[0]): + if self.relation_id: + rids = [self.relation_id] + else: + rids = relation_ids(self.interfaces[0]) + + for rid in rids: self.related = True for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) @@ -448,11 +454,13 @@ def __call__(self): class AMQPContext(OSContextGenerator): - def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): + def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None, + relation_id=None): self.ssl_dir = ssl_dir self.rel_name = rel_name self.relation_prefix = relation_prefix self.interfaces = [rel_name] + self.relation_id = relation_id def __call__(self): log('Generating template context for amqp', level=DEBUG) @@ -473,7 +481,11 @@ def __call__(self): raise OSContextError ctxt = {} - for rid in relation_ids(self.rel_name): + if self.relation_id: + rids = [self.relation_id] + else: + rids = relation_ids(self.rel_name) + for rid in rids: ha_vip_only = False self.related = True transport_hosts = None diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 6184abd0..0180e555 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -133,6 +133,7 @@ ('zesty', 'ocata'), ('artful', 'pike'), ('bionic', 'queens'), + ('cosmic', 'rocky'), ]) @@ -151,6 +152,7 @@ ('2017.1', 'ocata'), ('2017.2', 'pike'), ('2018.1', 'queens'), + ('2018.2', 'rocky'), ]) # The ugly duckling - must list releases oldest to newest @@ -183,6 +185,8 @@ ['2.13.0', '2.15.0']), ('queens', ['2.16.0', '2.17.0']), + ('rocky', + ['2.18.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 653d58f1..736be713 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -158,6 +158,14 @@ 'queens/proposed': 'xenial-proposed/queens', 'xenial-queens/proposed': 'xenial-proposed/queens', 'xenial-proposed/queens': 'xenial-proposed/queens', + # Rocky + 'rocky': 'bionic-updates/rocky', + 'bionic-rocky': 'bionic-updates/rocky', + 'bionic-rocky/updates': 'bionic-updates/rocky', + 'bionic-updates/rocky': 'bionic-updates/rocky', + 'rocky/proposed': 'bionic-proposed/rocky', + 'bionic-rocky/proposed': 'bionic-proposed/rocky', + 'bionic-proposed/rocky': 'bionic-proposed/rocky', } diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 66beeda2..1c96752a 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -291,6 +291,8 @@ def _get_openstack_release(self): ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, ('bionic', None): self.bionic_queens, + ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, + ('cosmic', None): self.cosmic_rocky, } return releases[(self.series, self.openstack)] @@ -306,6 +308,7 @@ def _get_openstack_release_string(self): ('zesty', 'ocata'), ('artful', 'pike'), ('bionic', 'queens'), + ('cosmic', 'rocky'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index d43038b2..ef4ab54b 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -56,7 +56,7 @@ 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens'] + 'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] class OpenStackAmuletUtils(AmuletUtils): From 5e4efd6ad191c46142bc228c8fa0c86712d90820 Mon Sep 17 00:00:00 2001 From: Vu Cong Tuan Date: Thu, 28 Jun 2018 08:39:34 +0700 Subject: [PATCH 1516/2699] Add py36 testenv Python 3.6 is installed by default in Ubuntu 18.04 LTS. Therefore, according to Transition Plan [1], it'll be handy to have py36 testenv. For more details, please check Python2 Deprecation Timeline [2] and Python3-first Goal - Completion Criteria [3]. [1] https://wiki.ubuntu.com/Python/Python36Transition [2] https://governance.openstack.org/tc/resolutions/20180529-python2-deprecation-timeline.html [3] https://review.openstack.org/#/c/575933/8/goals/stein/python3-first.rst@42 Change-Id: I2ad2e09a6134be0c2f96ebe5ec82201c68eb74ab --- ceph-radosgw/tox.ini | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 09ca045d..930d5264 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -26,6 +26,11 @@ basepython = python3.5 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python2.7 deps = -r{toxinidir}/requirements.txt From e7e98ae303d1bc2c381827e4982e15d51e6172fe Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 4 Jul 2018 04:57:33 +0100 Subject: [PATCH 1517/2699] Skip udev rule install in containers Ensure that udev rules are not installed and reloaded when running in a container; this is not permitted and the udev rules are used for block devices, which are not supported within container based deployments. Change-Id: I9a580172fcbbf8cec63af7adccb0808915184658 Closes-Bug: 1776713 --- ceph-osd/hooks/ceph_hooks.py | 5 +++++ ceph-osd/unit_tests/test_ceph_hooks.py | 14 +++++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index ce2aba9e..47379f05 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -55,6 +55,7 @@ add_to_updatedb_prunepath, restart_on_change, write_file, + is_container, ) from charmhelpers.fetch import ( add_source, @@ -225,6 +226,10 @@ def install_udev_rules(): Install and reload udev rules for ceph-volume LV permissions """ + if is_container(): + log('Skipping udev rule installation ' + 'as unit is in a container', level=DEBUG) + return for x in glob.glob('files/udev/*'): shutil.copy(x, '/lib/udev/rules.d') subprocess.check_call(['udevadm', 'control', diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index f2cfa28e..9b660c3a 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -471,9 +471,11 @@ def test_az_info_default_remap(self, environ, config, log): config.assert_called_with('availability_zone') environ.get.assert_called_with('JUJU_AVAILABILITY_ZONE') + @patch.object(ceph_hooks, 'is_container') @patch.object(ceph_hooks, 'subprocess') @patch.object(ceph_hooks, 'shutil') - def test_install_udev_rules(self, shutil, subprocess): + def test_install_udev_rules(self, shutil, subprocess, is_container): + is_container.return_value = False ceph_hooks.install_udev_rules() shutil.copy.assert_called_once_with( 'files/udev/95-charm-ceph-osd.rules', @@ -483,6 +485,16 @@ def test_install_udev_rules(self, shutil, subprocess): ['udevadm', 'control', '--reload-rules'] ) + @patch.object(ceph_hooks, 'is_container') + @patch.object(ceph_hooks, 'subprocess') + @patch.object(ceph_hooks, 'shutil') + def test_install_udev_rules_container(self, shutil, subprocess, + is_container): + is_container.return_value = True + ceph_hooks.install_udev_rules() + shutil.copy.assert_not_called() + subprocess.check_call.assert_not_called() + @patch.object(ceph_hooks, 'config') @patch.object(ceph_hooks, 'cmp_pkgrevno') def test_use_short_objects(self, mock_cmp_pkgrevno, mock_config): From 3c6a761633a06a6f1554ab25dcf077962a56ca81 Mon Sep 17 00:00:00 2001 From: Bryan Quigley Date: Fri, 25 May 2018 10:07:13 -0400 Subject: [PATCH 1518/2699] Removes vm.swappiness and vfs_cache_pressure They were both set at 1 in the same commit without justification and both can be bad things to set that low. This commit will just let the kernel defaults come through. Details on how bad it is set to these to 1 courtesy of Jay Vosburgh. vfs_cache_pressure Setting vfs_cache_pressure to 1 for all cases is likely to cause excessive memory usage in the dentry and inode caches for most workloads. For most uses, the default value of 100 is reasonable. The vfs_cache_pressure value specifies the percentage of objects in each of the "dentry" and "inode_entry" slab caches used by filesystems that will be viewed as "freeable" by the slab shrinking logic. Some other variables also adjust the actual number of objects that the kernel will try to free, but for the freeable quantity, a vfs_cache_pressure of 100 will attempt to free 100 times as many objects in a cache as a setting of 1. Similarly, a vfs_cache_pressure of 200 will attempt to free twice as many as a setting of 100. This only comes into play when the kernel has entered reclaim, i.e., it is trying to free cached objects in order to make space to satisfy an allocation that would otherwise fail (or an allocation has already failed or watermarks have been reached and this is occurring asynchronously). By setting vfs_cache_pressure to 1, the kernel will disproportionately reclaim pages from the page cache instead of from the dentry/inode caches, and those will grow with almost no bound (if vfs_cache_pressure is 0, they will literally grow without bound until memory is exhausted). If the system as a whole has a low cache hit ratio on the objects in the dentry and inode caches, they will simply consume memory that is kept idle, and force out page cache pages (file data, block data and anonymous pages). Eventually, the system will resort to swapping of pages and if all else fails to killing processes to free memory. With very low vfs_cache_pressure values, it is more likely that processes will be killed to free memory before dentry / inode cache objects are released. We have had several customers alleviate problems be setting thus value back to the defaults - or having to make them higher to clean things up after being at 1 for so long. vm.swappiness Setting this to 1 will heavily favor (ratio 1:199) releasing file backed pages over writing anonymous pages to swap ("swapping" a file backed page just frees the page, as it can be re-read from its backing file). So, this would, e.g., favor keeping almost all process anonymous pages (stack, heap, etc), even for idle processes, in memory over keeping file backed pages in the page cache. Change-Id: I94186f3e16f61223e362d3db0ddce799ae6120cb Closes-Bug: 1770171 Signed-off-by: Bryan Quigley --- ceph-osd/config.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 9daa9bb4..0b2d61cb 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -232,8 +232,7 @@ options: sysctl: type: string default: '{ kernel.pid_max : 2097152, vm.max_map_count : 524288, - kernel.threads-max: 2097152, vm.vfs_cache_pressure: 1, - vm.swappiness: 1 }' + kernel.threads-max: 2097152 }' description: | YAML-formatted associative array of sysctl key/value pairs to be set persistently. By default we set pid_max, max_map_count and From df92fde5ff14bb819151038d0ff6c3b6a93e0e2f Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 10 Jul 2018 09:28:40 +0100 Subject: [PATCH 1519/2699] add-disk: Ensure key-manager config is passed to osdize Recent changes to support vault for key management require that the 'osd-encrypt-keymanager' is passed to all osdize calls so that the correct key management approach is taken. Ensure that the add-disk action does the same, otherwise keys will always be stored in the ceph mon KV store, rather than in Vault. Closes-Bug: 1780920 Change-Id: I8c722d38d68f13dc00c7444a50d67ce37fbd6a29 --- ceph-osd/actions/add_disk.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index 78c7b5e4..9ba49116 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -33,7 +33,8 @@ def add_device(request, device_path, bucket=None): ceph_hooks.get_journal_devices(), hookenv.config('ignore-device-errors'), hookenv.config('osd-encrypt'), - hookenv.config('bluestore')) + hookenv.config('bluestore'), + hookenv.config('osd-encrypt-keymanager')) # Make it fast! if hookenv.config('autotune'): ceph.utils.tune_dev(dev) From 56249c8bbff6a686afd572d6de8747afa1c1d215 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 11 Jul 2018 14:03:05 -0500 Subject: [PATCH 1520/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: I6da2c6327a7916a55f8f6b848f7c2a0db3741ae4 --- ceph-fs/src/metadata.yaml | 2 +- ceph-fs/src/tests/gate-basic-artful-pike | 23 ----------------------- 2 files changed, 1 insertion(+), 24 deletions(-) delete mode 100755 ceph-fs/src/tests/gate-basic-artful-pike diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 6f0f9cf0..066fd774 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -12,7 +12,7 @@ tags: series: - xenial - bionic - - artful + - cosmic subordinate: false requires: ceph-mds: diff --git a/ceph-fs/src/tests/gate-basic-artful-pike b/ceph-fs/src/tests/gate-basic-artful-pike deleted file mode 100755 index f7293202..00000000 --- a/ceph-fs/src/tests/gate-basic-artful-pike +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on artful-pike.""" - -from basic_deployment import CephFsBasicDeployment - -if __name__ == '__main__': - deployment = CephFsBasicDeployment(series='artful') - deployment.run_tests() From 49cf5fb2d33633d014a6a9595af6a8222fb27383 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 11 Jul 2018 14:03:15 -0500 Subject: [PATCH 1521/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: Ia63fa3d60326f20283b6753638dac8e9406ce7f7 --- ceph-mon/metadata.yaml | 2 +- ceph-mon/tests/gate-basic-artful-pike | 23 ----------------------- 2 files changed, 1 insertion(+), 24 deletions(-) delete mode 100644 ceph-mon/tests/gate-basic-artful-pike diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index f4a233ef..a3fedb2d 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -12,7 +12,7 @@ tags: series: - xenial - bionic - - artful + - cosmic - trusty peers: mon: diff --git a/ceph-mon/tests/gate-basic-artful-pike b/ceph-mon/tests/gate-basic-artful-pike deleted file mode 100644 index 58c1b549..00000000 --- a/ceph-mon/tests/gate-basic-artful-pike +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on artful-pike.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='artful') - deployment.run_tests() From 6443ca50ed52913d5751ed93be1153c4dc8c1a4f Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 11 Jul 2018 14:03:22 -0500 Subject: [PATCH 1522/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: I53e9ff1e1153d086b3eb6d80b2f320c6ae75e880 --- ceph-osd/metadata.yaml | 2 +- ceph-osd/tests/gate-basic-artful-pike | 23 ----------------------- 2 files changed, 1 insertion(+), 24 deletions(-) delete mode 100644 ceph-osd/tests/gate-basic-artful-pike diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index efcb7198..489a03f1 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -13,7 +13,7 @@ tags: series: - xenial - bionic - - artful + - cosmic - trusty description: | Ceph is a distributed storage and network file system designed to provide diff --git a/ceph-osd/tests/gate-basic-artful-pike b/ceph-osd/tests/gate-basic-artful-pike deleted file mode 100644 index d15bfcdb..00000000 --- a/ceph-osd/tests/gate-basic-artful-pike +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-osd deployment on artful-pike.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='artful') - deployment.run_tests() From 9c835524e1de67551b7012f846e45362d3100579 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 11 Jul 2018 14:03:30 -0500 Subject: [PATCH 1523/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: If892cf5e5b045300433dc2737f3b8b479d12527e --- ceph-proxy/metadata.yaml | 2 +- ceph-proxy/tests/gate-basic-artful-pike | 9 --------- 2 files changed, 1 insertion(+), 10 deletions(-) delete mode 100755 ceph-proxy/tests/gate-basic-artful-pike diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index f6a23424..bcf627ac 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -12,7 +12,7 @@ tags: series: - xenial - bionic - - artful + - cosmic - trusty extra-bindings: public: diff --git a/ceph-proxy/tests/gate-basic-artful-pike b/ceph-proxy/tests/gate-basic-artful-pike deleted file mode 100755 index 5815e9dd..00000000 --- a/ceph-proxy/tests/gate-basic-artful-pike +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on artful-pike.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='artful') - deployment.run_tests() From 71772931fb1324ee140c5240d8bdc970feb85e66 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 11 Jul 2018 14:03:39 -0500 Subject: [PATCH 1524/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: I0ebcde2ca99a10a005a6448bca9be7ba40974fd0 --- ceph-radosgw/metadata.yaml | 2 +- ceph-radosgw/tests/gate-basic-artful-pike | 23 ----------------------- 2 files changed, 1 insertion(+), 24 deletions(-) delete mode 100644 ceph-radosgw/tests/gate-basic-artful-pike diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 22d923fb..fa6aee39 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -15,7 +15,7 @@ tags: series: - xenial - bionic - - artful + - cosmic - trusty extra-bindings: public: diff --git a/ceph-radosgw/tests/gate-basic-artful-pike b/ceph-radosgw/tests/gate-basic-artful-pike deleted file mode 100644 index 6ac2a443..00000000 --- a/ceph-radosgw/tests/gate-basic-artful-pike +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on artful-pike.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='artful') - deployment.run_tests() From de1ae8f01ce48b53617c868f240b7433d4ebe745 Mon Sep 17 00:00:00 2001 From: Nguyen Van Duc Date: Thu, 12 Jul 2018 13:27:28 +0700 Subject: [PATCH 1525/2699] Add py36 testenv Python 3.6 is installed by default in Ubuntu 18.04 LTS. Therefore, according to Transition Plan [1], it'll be handy to have py36 testenv. For more details, please check Python2 Deprecation Timeline [2] and Python3-first Goal - Completion Criteria [3]. [1] https://wiki.ubuntu.com/Python/Python36Transition [2] https://governance.openstack.org/tc/resolutions/20180529-python2-deprecation-timeline.html [3] https://review.openstack.org/#/c/575933/8/goals/stein/python3-first.rst@42 Change-Id: I63e7db94fa672f80b08a0644fbce7c0732d67ee4 --- ceph-mon/tox.ini | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 9c21d57e..e0c533ed 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -31,6 +31,11 @@ basepython = python3 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py36] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python2.7 deps = -r{toxinidir}/requirements.txt From 67b65ff0d61cd8514214255f1f88333f729a9e94 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 13 Jul 2018 15:51:25 +0200 Subject: [PATCH 1526/2699] Sync charm-helpers to ensure Rocky support Change-Id: Ib773737fb8b366c7d46cb3570d9e77c5fbe58281 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 15 ++++++++++----- ceph-mon/hooks/charmhelpers/core/host.py | 14 ++++++++++++++ ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 2 +- ceph-mon/tests/charmhelpers/core/host.py | 14 ++++++++++++++ 4 files changed, 39 insertions(+), 6 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 1c55b30f..e3d10c1c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -410,16 +410,21 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): os.chmod(checkpath, 0o644) -def copy_nrpe_checks(): +def copy_nrpe_checks(nrpe_files_dir=None): """ Copy the nrpe checks into place """ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', - 'charmhelpers', 'contrib', 'openstack', - 'files') - + default_nrpe_files_dir = os.path.join( + os.getenv('CHARM_DIR'), + 'hooks', + 'charmhelpers', + 'contrib', + 'openstack', + 'files') + if not nrpe_files_dir: + nrpe_files_dir = default_nrpe_files_dir if not os.path.exists(NAGIOS_PLUGINS): os.makedirs(NAGIOS_PLUGINS) for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 322ab2ac..e9fd38a0 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -972,6 +972,20 @@ def is_container(): def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. + + This method has no effect if the path specified by updatedb_path does not + exist or is not a file. + + @param path: string the path to add to the updatedb.conf PRUNEPATHS value + @param updatedb_path: the path the updatedb.conf file + """ + if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): + # If the updatedb.conf file doesn't exist then don't attempt to update + # the file as the package providing mlocate may not be installed on + # the local system + return + with open(updatedb_path, 'r+') as f_id: updatedb_text = f_id.read() output = updatedb(updatedb_text, path) diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 736be713..19aa6baf 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -315,7 +315,7 @@ def import_key(key): cmd = ['apt-key', 'adv', '--keyserver', 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] try: - subprocess.check_call(cmd) + _run_with_retries(cmd) except subprocess.CalledProcessError: error = "Error importing PGP key '{}'".format(key) log(error) diff --git a/ceph-mon/tests/charmhelpers/core/host.py b/ceph-mon/tests/charmhelpers/core/host.py index 322ab2ac..e9fd38a0 100644 --- a/ceph-mon/tests/charmhelpers/core/host.py +++ b/ceph-mon/tests/charmhelpers/core/host.py @@ -972,6 +972,20 @@ def is_container(): def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. + + This method has no effect if the path specified by updatedb_path does not + exist or is not a file. + + @param path: string the path to add to the updatedb.conf PRUNEPATHS value + @param updatedb_path: the path the updatedb.conf file + """ + if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): + # If the updatedb.conf file doesn't exist then don't attempt to update + # the file as the package providing mlocate may not be installed on + # the local system + return + with open(updatedb_path, 'r+') as f_id: updatedb_text = f_id.read() output = updatedb(updatedb_text, path) From 063a07d36423ff0350159308fab2aa8c66b3fef6 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 13 Jul 2018 15:51:48 +0200 Subject: [PATCH 1527/2699] Sync charm-helpers to ensure Rocky support Change-Id: Ib1fb899fbbe89158b0b5309b7fa8cb78229c9e40 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 15 ++++++++++----- ceph-osd/hooks/charmhelpers/core/host.py | 14 ++++++++++++++ ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 2 +- ceph-osd/tests/charmhelpers/core/host.py | 14 ++++++++++++++ 4 files changed, 39 insertions(+), 6 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 1c55b30f..e3d10c1c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -410,16 +410,21 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): os.chmod(checkpath, 0o644) -def copy_nrpe_checks(): +def copy_nrpe_checks(nrpe_files_dir=None): """ Copy the nrpe checks into place """ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', - 'charmhelpers', 'contrib', 'openstack', - 'files') - + default_nrpe_files_dir = os.path.join( + os.getenv('CHARM_DIR'), + 'hooks', + 'charmhelpers', + 'contrib', + 'openstack', + 'files') + if not nrpe_files_dir: + nrpe_files_dir = default_nrpe_files_dir if not os.path.exists(NAGIOS_PLUGINS): os.makedirs(NAGIOS_PLUGINS) for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 322ab2ac..e9fd38a0 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -972,6 +972,20 @@ def is_container(): def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. + + This method has no effect if the path specified by updatedb_path does not + exist or is not a file. + + @param path: string the path to add to the updatedb.conf PRUNEPATHS value + @param updatedb_path: the path the updatedb.conf file + """ + if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): + # If the updatedb.conf file doesn't exist then don't attempt to update + # the file as the package providing mlocate may not be installed on + # the local system + return + with open(updatedb_path, 'r+') as f_id: updatedb_text = f_id.read() output = updatedb(updatedb_text, path) diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 736be713..19aa6baf 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -315,7 +315,7 @@ def import_key(key): cmd = ['apt-key', 'adv', '--keyserver', 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] try: - subprocess.check_call(cmd) + _run_with_retries(cmd) except subprocess.CalledProcessError: error = "Error importing PGP key '{}'".format(key) log(error) diff --git a/ceph-osd/tests/charmhelpers/core/host.py b/ceph-osd/tests/charmhelpers/core/host.py index 322ab2ac..e9fd38a0 100644 --- a/ceph-osd/tests/charmhelpers/core/host.py +++ b/ceph-osd/tests/charmhelpers/core/host.py @@ -972,6 +972,20 @@ def is_container(): def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. + + This method has no effect if the path specified by updatedb_path does not + exist or is not a file. + + @param path: string the path to add to the updatedb.conf PRUNEPATHS value + @param updatedb_path: the path the updatedb.conf file + """ + if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): + # If the updatedb.conf file doesn't exist then don't attempt to update + # the file as the package providing mlocate may not be installed on + # the local system + return + with open(updatedb_path, 'r+') as f_id: updatedb_text = f_id.read() output = updatedb(updatedb_text, path) From caf5dd2a42a944188171852d44be7a265e9a058e Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 13 Jul 2018 15:52:11 +0200 Subject: [PATCH 1528/2699] Sync charm-helpers to ensure Rocky support Change-Id: I40ca34add5d0f0d59895bfbb67000642c57c8fc5 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 15 ++++++++++----- .../hooks/charmhelpers/contrib/openstack/utils.py | 4 ++++ ceph-proxy/hooks/charmhelpers/core/host.py | 14 ++++++++++++++ ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py | 10 +++++++++- .../contrib/openstack/amulet/deployment.py | 3 +++ .../contrib/openstack/amulet/utils.py | 2 +- ceph-proxy/tests/charmhelpers/core/host.py | 14 ++++++++++++++ 7 files changed, 55 insertions(+), 7 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 1c55b30f..e3d10c1c 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -410,16 +410,21 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): os.chmod(checkpath, 0o644) -def copy_nrpe_checks(): +def copy_nrpe_checks(nrpe_files_dir=None): """ Copy the nrpe checks into place """ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', - 'charmhelpers', 'contrib', 'openstack', - 'files') - + default_nrpe_files_dir = os.path.join( + os.getenv('CHARM_DIR'), + 'hooks', + 'charmhelpers', + 'contrib', + 'openstack', + 'files') + if not nrpe_files_dir: + nrpe_files_dir = default_nrpe_files_dir if not os.path.exists(NAGIOS_PLUGINS): os.makedirs(NAGIOS_PLUGINS) for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index 6184abd0..0180e555 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -133,6 +133,7 @@ ('zesty', 'ocata'), ('artful', 'pike'), ('bionic', 'queens'), + ('cosmic', 'rocky'), ]) @@ -151,6 +152,7 @@ ('2017.1', 'ocata'), ('2017.2', 'pike'), ('2018.1', 'queens'), + ('2018.2', 'rocky'), ]) # The ugly duckling - must list releases oldest to newest @@ -183,6 +185,8 @@ ['2.13.0', '2.15.0']), ('queens', ['2.16.0', '2.17.0']), + ('rocky', + ['2.18.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 322ab2ac..e9fd38a0 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -972,6 +972,20 @@ def is_container(): def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. + + This method has no effect if the path specified by updatedb_path does not + exist or is not a file. + + @param path: string the path to add to the updatedb.conf PRUNEPATHS value + @param updatedb_path: the path the updatedb.conf file + """ + if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): + # If the updatedb.conf file doesn't exist then don't attempt to update + # the file as the package providing mlocate may not be installed on + # the local system + return + with open(updatedb_path, 'r+') as f_id: updatedb_text = f_id.read() output = updatedb(updatedb_text, path) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py index 653d58f1..19aa6baf 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py @@ -158,6 +158,14 @@ 'queens/proposed': 'xenial-proposed/queens', 'xenial-queens/proposed': 'xenial-proposed/queens', 'xenial-proposed/queens': 'xenial-proposed/queens', + # Rocky + 'rocky': 'bionic-updates/rocky', + 'bionic-rocky': 'bionic-updates/rocky', + 'bionic-rocky/updates': 'bionic-updates/rocky', + 'bionic-updates/rocky': 'bionic-updates/rocky', + 'rocky/proposed': 'bionic-proposed/rocky', + 'bionic-rocky/proposed': 'bionic-proposed/rocky', + 'bionic-proposed/rocky': 'bionic-proposed/rocky', } @@ -307,7 +315,7 @@ def import_key(key): cmd = ['apt-key', 'adv', '--keyserver', 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] try: - subprocess.check_call(cmd) + _run_with_retries(cmd) except subprocess.CalledProcessError: error = "Error importing PGP key '{}'".format(key) log(error) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 66beeda2..1c96752a 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -291,6 +291,8 @@ def _get_openstack_release(self): ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, ('bionic', None): self.bionic_queens, + ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, + ('cosmic', None): self.cosmic_rocky, } return releases[(self.series, self.openstack)] @@ -306,6 +308,7 @@ def _get_openstack_release_string(self): ('zesty', 'ocata'), ('artful', 'pike'), ('bionic', 'queens'), + ('cosmic', 'rocky'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index d43038b2..ef4ab54b 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -56,7 +56,7 @@ 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens'] + 'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] class OpenStackAmuletUtils(AmuletUtils): diff --git a/ceph-proxy/tests/charmhelpers/core/host.py b/ceph-proxy/tests/charmhelpers/core/host.py index 322ab2ac..e9fd38a0 100644 --- a/ceph-proxy/tests/charmhelpers/core/host.py +++ b/ceph-proxy/tests/charmhelpers/core/host.py @@ -972,6 +972,20 @@ def is_container(): def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. + + This method has no effect if the path specified by updatedb_path does not + exist or is not a file. + + @param path: string the path to add to the updatedb.conf PRUNEPATHS value + @param updatedb_path: the path the updatedb.conf file + """ + if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): + # If the updatedb.conf file doesn't exist then don't attempt to update + # the file as the package providing mlocate may not be installed on + # the local system + return + with open(updatedb_path, 'r+') as f_id: updatedb_text = f_id.read() output = updatedb(updatedb_text, path) From 06d0935386f9dd527090c90bf9e0a22237d40c18 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 13 Jul 2018 15:52:30 +0200 Subject: [PATCH 1529/2699] Sync charm-helpers to ensure Rocky support Change-Id: Ie77e7a22bb825dea74bdd1a7ead29db0b58d7d4c --- .../charmhelpers/contrib/charmsupport/nrpe.py | 15 ++++++++----- .../contrib/openstack/amulet/deployment.py | 3 +++ .../contrib/openstack/amulet/utils.py | 2 +- .../charmhelpers/contrib/openstack/context.py | 22 ++++++++++++++----- .../charmhelpers/contrib/openstack/utils.py | 4 ++++ ceph-radosgw/hooks/charmhelpers/core/host.py | 14 ++++++++++++ .../hooks/charmhelpers/fetch/ubuntu.py | 10 ++++++++- .../contrib/openstack/amulet/deployment.py | 3 +++ .../contrib/openstack/amulet/utils.py | 2 +- ceph-radosgw/tests/charmhelpers/core/host.py | 14 ++++++++++++ 10 files changed, 76 insertions(+), 13 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 1c55b30f..e3d10c1c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -410,16 +410,21 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): os.chmod(checkpath, 0o644) -def copy_nrpe_checks(): +def copy_nrpe_checks(nrpe_files_dir=None): """ Copy the nrpe checks into place """ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', - 'charmhelpers', 'contrib', 'openstack', - 'files') - + default_nrpe_files_dir = os.path.join( + os.getenv('CHARM_DIR'), + 'hooks', + 'charmhelpers', + 'contrib', + 'openstack', + 'files') + if not nrpe_files_dir: + nrpe_files_dir = default_nrpe_files_dir if not os.path.exists(NAGIOS_PLUGINS): os.makedirs(NAGIOS_PLUGINS) for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 66beeda2..1c96752a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -291,6 +291,8 @@ def _get_openstack_release(self): ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, ('bionic', None): self.bionic_queens, + ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, + ('cosmic', None): self.cosmic_rocky, } return releases[(self.series, self.openstack)] @@ -306,6 +308,7 @@ def _get_openstack_release_string(self): ('zesty', 'ocata'), ('artful', 'pike'), ('bionic', 'queens'), + ('cosmic', 'rocky'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index d43038b2..ef4ab54b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -56,7 +56,7 @@ 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens'] + 'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] class OpenStackAmuletUtils(AmuletUtils): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index b196d63f..f3741b0e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -190,8 +190,8 @@ def get_related(self): class SharedDBContext(OSContextGenerator): interfaces = ['shared-db'] - def __init__(self, - database=None, user=None, relation_prefix=None, ssl_dir=None): + def __init__(self, database=None, user=None, relation_prefix=None, + ssl_dir=None, relation_id=None): """Allows inspecting relation for settings prefixed with relation_prefix. This is useful for parsing access for multiple databases returned via the shared-db interface (eg, nova_password, @@ -202,6 +202,7 @@ def __init__(self, self.user = user self.ssl_dir = ssl_dir self.rel_name = self.interfaces[0] + self.relation_id = relation_id def __call__(self): self.database = self.database or config('database') @@ -235,7 +236,12 @@ def __call__(self): if self.relation_prefix: password_setting = self.relation_prefix + '_password' - for rid in relation_ids(self.interfaces[0]): + if self.relation_id: + rids = [self.relation_id] + else: + rids = relation_ids(self.interfaces[0]) + + for rid in rids: self.related = True for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) @@ -448,11 +454,13 @@ def __call__(self): class AMQPContext(OSContextGenerator): - def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): + def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None, + relation_id=None): self.ssl_dir = ssl_dir self.rel_name = rel_name self.relation_prefix = relation_prefix self.interfaces = [rel_name] + self.relation_id = relation_id def __call__(self): log('Generating template context for amqp', level=DEBUG) @@ -473,7 +481,11 @@ def __call__(self): raise OSContextError ctxt = {} - for rid in relation_ids(self.rel_name): + if self.relation_id: + rids = [self.relation_id] + else: + rids = relation_ids(self.rel_name) + for rid in rids: ha_vip_only = False self.related = True transport_hosts = None diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 6184abd0..0180e555 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -133,6 +133,7 @@ ('zesty', 'ocata'), ('artful', 'pike'), ('bionic', 'queens'), + ('cosmic', 'rocky'), ]) @@ -151,6 +152,7 @@ ('2017.1', 'ocata'), ('2017.2', 'pike'), ('2018.1', 'queens'), + ('2018.2', 'rocky'), ]) # The ugly duckling - must list releases oldest to newest @@ -183,6 +185,8 @@ ['2.13.0', '2.15.0']), ('queens', ['2.16.0', '2.17.0']), + ('rocky', + ['2.18.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 322ab2ac..e9fd38a0 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -972,6 +972,20 @@ def is_container(): def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. + + This method has no effect if the path specified by updatedb_path does not + exist or is not a file. + + @param path: string the path to add to the updatedb.conf PRUNEPATHS value + @param updatedb_path: the path the updatedb.conf file + """ + if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): + # If the updatedb.conf file doesn't exist then don't attempt to update + # the file as the package providing mlocate may not be installed on + # the local system + return + with open(updatedb_path, 'r+') as f_id: updatedb_text = f_id.read() output = updatedb(updatedb_text, path) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 653d58f1..19aa6baf 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -158,6 +158,14 @@ 'queens/proposed': 'xenial-proposed/queens', 'xenial-queens/proposed': 'xenial-proposed/queens', 'xenial-proposed/queens': 'xenial-proposed/queens', + # Rocky + 'rocky': 'bionic-updates/rocky', + 'bionic-rocky': 'bionic-updates/rocky', + 'bionic-rocky/updates': 'bionic-updates/rocky', + 'bionic-updates/rocky': 'bionic-updates/rocky', + 'rocky/proposed': 'bionic-proposed/rocky', + 'bionic-rocky/proposed': 'bionic-proposed/rocky', + 'bionic-proposed/rocky': 'bionic-proposed/rocky', } @@ -307,7 +315,7 @@ def import_key(key): cmd = ['apt-key', 'adv', '--keyserver', 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] try: - subprocess.check_call(cmd) + _run_with_retries(cmd) except subprocess.CalledProcessError: error = "Error importing PGP key '{}'".format(key) log(error) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 66beeda2..1c96752a 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -291,6 +291,8 @@ def _get_openstack_release(self): ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, ('bionic', None): self.bionic_queens, + ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, + ('cosmic', None): self.cosmic_rocky, } return releases[(self.series, self.openstack)] @@ -306,6 +308,7 @@ def _get_openstack_release_string(self): ('zesty', 'ocata'), ('artful', 'pike'), ('bionic', 'queens'), + ('cosmic', 'rocky'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index d43038b2..ef4ab54b 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -56,7 +56,7 @@ 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens'] + 'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] class OpenStackAmuletUtils(AmuletUtils): diff --git a/ceph-radosgw/tests/charmhelpers/core/host.py b/ceph-radosgw/tests/charmhelpers/core/host.py index 322ab2ac..e9fd38a0 100644 --- a/ceph-radosgw/tests/charmhelpers/core/host.py +++ b/ceph-radosgw/tests/charmhelpers/core/host.py @@ -972,6 +972,20 @@ def is_container(): def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. + + This method has no effect if the path specified by updatedb_path does not + exist or is not a file. + + @param path: string the path to add to the updatedb.conf PRUNEPATHS value + @param updatedb_path: the path the updatedb.conf file + """ + if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): + # If the updatedb.conf file doesn't exist then don't attempt to update + # the file as the package providing mlocate may not be installed on + # the local system + return + with open(updatedb_path, 'r+') as f_id: updatedb_text = f_id.read() output = updatedb(updatedb_text, path) From ccc799f1767cec5d03f28cf5105422d0b645c3c8 Mon Sep 17 00:00:00 2001 From: Nguyen Van Duc Date: Tue, 17 Jul 2018 09:45:55 +0700 Subject: [PATCH 1530/2699] Add py36 testenv Python 3.6 is installed by default in Ubuntu 18.04 LTS. Therefore, according to Transition Plan [1], it'll be handy to have py36 testenv. For more details, please check Python2 Deprecation Timeline [2] and Python3-first Goal - Completion Criteria [3]. [1] https://wiki.ubuntu.com/Python/Python36Transition [2] https://governance.openstack.org/tc/resolutions/20180529-python2-deprecation-timeline.html [3] https://review.openstack.org/#/c/575933/8/goals/stein/python3-first.rst@42 Change-Id: I08dc76d61c6d762c75ca6594f0a33f0042da12f3 --- ceph-proxy/tox.ini | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 09ca045d..930d5264 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -26,6 +26,11 @@ basepython = python3.5 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python2.7 deps = -r{toxinidir}/requirements.txt From 8a558a6d256aefb28767e1f5b19c37e9515356db Mon Sep 17 00:00:00 2001 From: Dmitrii Shcherbakov Date: Sat, 14 Jul 2018 22:48:20 +0300 Subject: [PATCH 1531/2699] ignore devices that have already been processed Similar to how osdize in charms.ceph checks for already processed devices we need to avoid checking if they are pristine or not. Additionally, mapped LUKS devices need to be filtered from being zapped as they may hold valuable data. They are only used as underlying devices for device mapper and dmcrypt to provide a decrypted block device abstration so if they really need to be zapped a mapping needs to be removed first. This change also pulls charms.ceph modifications. Change-Id: I96b3d40b3f9e56681be142377e454b15f9e22be3 Co-Authored-By: Dmitrii Shcherbakov Co-Authored-By: Chris Procter Closes-Bug: 1781453 --- ceph-osd/actions/zap_disk.py | 5 +- ceph-osd/hooks/ceph_hooks.py | 12 +++++ ceph-osd/lib/ceph/broker.py | 7 ++- ceph-osd/lib/ceph/utils.py | 50 ++++++++++++++++++-- ceph-osd/unit_tests/test_actions_zap_disk.py | 43 +++++++++++++++++ 5 files changed, 111 insertions(+), 6 deletions(-) diff --git a/ceph-osd/actions/zap_disk.py b/ceph-osd/actions/zap_disk.py index aae9896c..550e70c2 100755 --- a/ceph-osd/actions/zap_disk.py +++ b/ceph-osd/actions/zap_disk.py @@ -28,6 +28,7 @@ ) from charmhelpers.core.unitdata import kv from ceph.utils import is_active_bluestore_device +from ceph.utils import is_mapped_luks_device def get_devices(): @@ -53,7 +54,9 @@ def zap(): for device in devices: if not is_block_device(device): not_block_devices.append(device) - if is_device_mounted(device) or is_active_bluestore_device(device): + if (is_device_mounted(device) or + is_active_bluestore_device(device) or + is_mapped_luks_device(device)): failed_devices.append(device) if failed_devices or not_block_devices: diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 47379f05..0eca9706 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -93,6 +93,8 @@ from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden +from charmhelpers.core.unitdata import kv + import charmhelpers.contrib.openstack.vaultlocker as vaultlocker hooks = Hooks() @@ -426,6 +428,15 @@ def prepare_disks_and_activate(): # pre-flight check of eligible device pristinity devices = get_devices() + + # if a device has been previously touched we need to consider it as + # non-pristine. If it needs to be re-processed it has to be zapped + # via the respective action which also clears the unitdata entry. + db = kv() + touched_devices = db.get('osd-devices', []) + devices = [dev for dev in devices if dev not in touched_devices] + log('Skipping osd devices previously processed by this unit: {}' + .format(touched_devices)) # filter osd-devices that are file system paths devices = [dev for dev in devices if dev.startswith('/dev')] # filter osd-devices that does not exist on this unit @@ -435,6 +446,7 @@ def prepare_disks_and_activate(): # filter osd-devices that are active bluestore devices devices = [dev for dev in devices if not ceph.is_active_bluestore_device(dev)] + log('Checking for pristine devices: "{}"'.format(devices), level=DEBUG) if not all(ceph.is_pristine_disk(dev) for dev in devices): status_set('blocked', diff --git a/ceph-osd/lib/ceph/broker.py b/ceph-osd/lib/ceph/broker.py index 0b6d3e24..3e857d21 100644 --- a/ceph-osd/lib/ceph/broker.py +++ b/ceph-osd/lib/ceph/broker.py @@ -81,6 +81,10 @@ "cache_min_flush_age": [int], "cache_min_evict_age": [int], "fast_read": [bool], + "allow_ec_overwrites": [bool], + "compression_mode": [str, ["none", "passive", "aggressive", "force"]], + "compression_algorithm": [str, ["lz4", "snappy", "zlib", "zstd"]], + "compression_required_ratio": [float, [0.0, 1.0]], } CEPH_BUCKET_TYPES = [ @@ -251,7 +255,8 @@ def pool_permission_list_for_service(service): for prefix in prefixes: permissions.append("allow {} object_prefix {}".format(permission, prefix)) - return ["mon", "allow r", "osd", ', '.join(permissions)] + return ['mon', 'allow r, allow command "osd blacklist"', + 'osd', ', '.join(permissions)] def get_service_groups(service, namespace=None): diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 5ff970bf..6d039cd3 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -1096,7 +1096,8 @@ def get_mds_bootstrap_key(): _default_caps = collections.OrderedDict([ - ('mon', ['allow r']), + ('mon', ['allow r', + 'allow command "osd blacklist"']), ('osd', ['allow rwx']), ]) @@ -1163,6 +1164,7 @@ def get_named_key(name, caps=None, pool_list=None): :param caps: dict of cephx capabilities :returns: Returns a cephx key """ + key_name = 'client.{}'.format(name) try: # Does the key already exist? output = str(subprocess.check_output( @@ -1177,8 +1179,14 @@ def get_named_key(name, caps=None, pool_list=None): ), 'auth', 'get', - 'client.{}'.format(name), + key_name, ]).decode('UTF-8')).strip() + # NOTE(jamespage); + # Apply any changes to key capabilities, dealing with + # upgrades which requires new caps for operation. + upgrade_key_caps(key_name, + caps or _default_caps, + pool_list) return parse_key(output) except subprocess.CalledProcessError: # Couldn't get the key, time to create it! @@ -1194,7 +1202,7 @@ def get_named_key(name, caps=None, pool_list=None): '/var/lib/ceph/mon/ceph-{}/keyring'.format( socket.gethostname() ), - 'auth', 'get-or-create', 'client.{}'.format(name), + 'auth', 'get-or-create', key_name, ] # Add capabilities for subsystem, subcaps in caps.items(): @@ -1213,7 +1221,7 @@ def get_named_key(name, caps=None, pool_list=None): .strip()) # IGNORE:E1103 -def upgrade_key_caps(key, caps): +def upgrade_key_caps(key, caps, pool_list=None): """ Upgrade key to have capabilities caps """ if not is_leader(): # Not the MON leader OR not clustered @@ -1222,6 +1230,12 @@ def upgrade_key_caps(key, caps): "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key ] for subsystem, subcaps in caps.items(): + if subsystem == 'osd': + if pool_list: + # This will output a string similar to: + # "pool=rgw pool=rbd pool=something" + pools = " ".join(['pool={0}'.format(i) for i in pool_list]) + subcaps[0] = subcaps[0] + " " + pools cmd.extend([subsystem, '; '.join(subcaps)]) subprocess.check_call(cmd) @@ -1453,6 +1467,11 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, ' skipping.'.format(dev)) return + if is_mapped_luks_device(dev): + log('{} is a mapped LUKS device,' + ' skipping.'.format(dev)) + return + if cmp_pkgrevno('ceph', '12.2.4') >= 0: cmd = _ceph_volume(dev, osd_journal, @@ -1664,6 +1683,29 @@ def is_active_bluestore_device(dev): return False +def is_luks_device(dev): + """ + Determine if dev is a LUKS-formatted block device. + + :param: dev: A full path to a block device to check for LUKS header + presence + :returns: boolean: indicates whether a device is used based on LUKS header. + """ + return True if _luks_uuid(dev) else False + + +def is_mapped_luks_device(dev): + """ + Determine if dev is a mapped LUKS device + :param: dev: A full path to a block device to be checked + :returns: boolean: indicates whether a device is mapped + """ + _, dirs, _ = next(os.walk('/sys/class/block/{}/holders/' + .format(os.path.basename(dev)))) + is_held = len(dirs) > 0 + return is_held and is_luks_device(dev) + + def get_conf(variable): """ Get the value of the given configuration variable from the diff --git a/ceph-osd/unit_tests/test_actions_zap_disk.py b/ceph-osd/unit_tests/test_actions_zap_disk.py index 47f71e7f..61266e3a 100644 --- a/ceph-osd/unit_tests/test_actions_zap_disk.py +++ b/ceph-osd/unit_tests/test_actions_zap_disk.py @@ -26,10 +26,12 @@ def setUp(self): 'is_block_device', 'is_device_mounted', 'is_active_bluestore_device', + 'is_mapped_luks_device', 'kv']) self.is_device_mounted.return_value = False self.is_block_device.return_value = True self.is_active_bluestore_device.return_value = False + self.is_mapped_luks_device.return_value = False self.kv.return_value = self.kv self.hookenv.local_unit.return_value = "ceph-osd-test/0" @@ -127,3 +129,44 @@ def side_effect(arg): _zap_disk.assert_not_called() self.hookenv.action_fail.assert_called_with( "1 devices are mounted: /dev/vdb") + + @mock.patch.object(zap_disk, 'zap_disk') + def test_wont_zap__mapped_luks_device(self, _zap_disk): + """Will not zap a disk that has a LUKS header""" + def side_effect(arg): + return { + 'devices': '/dev/vdb', + 'i-really-mean-it': True, + }.get(arg) + self.hookenv.action_get.side_effect = side_effect + self.is_active_bluestore_device.return_value = False + self.is_mapped_luks_device.return_value = True + zap_disk.zap() + _zap_disk.assert_not_called() + self.hookenv.action_fail.assert_called_with( + "1 devices are mounted: /dev/vdb") + + @mock.patch.object(zap_disk, 'zap_disk') + def test_zap_luks_not_mapped(self, _zap_disk): + """Will zap disk with extra config set""" + def side_effect(arg): + return { + 'devices': '/dev/vdb', + 'i-really-mean-it': True, + }.get(arg) + + self.is_active_bluestore_device.return_value = False + self.is_mapped_luks_device.return_value = False + + self.hookenv.action_get.side_effect = side_effect + self.kv.get.return_value = ['/dev/vdb', '/dev/vdz'] + zap_disk.zap() + _zap_disk.assert_called_with('/dev/vdb') + self.kv.get.assert_called_with('osd-devices', []) + self.kv.set.assert_called_with('osd-devices', ['/dev/vdz']) + self.hookenv.action_set.assert_called_with({ + 'message': "1 disk(s) have been zapped, to use " + "them as OSDs, run: \njuju " + "run-action ceph-osd-test/0 add-disk " + "osd-devices=\"/dev/vdb\"" + }) From 4d0d5c0b57aa97ca44b5ce2cc8320264619e41c6 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 18 Jul 2018 10:19:55 +0200 Subject: [PATCH 1532/2699] Remove old nova-compute / mysql relation Change-Id: Id4c2b5a1e2409adf375aba282d9ca0b7ebd4d847 Closes-Bug: #1713807 --- ceph-mon/tests/basic_deployment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index d1c4cf76..cebb7a8d 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -83,7 +83,6 @@ def _add_services(self): def _add_relations(self): """Add all of the relations for the services.""" relations = { - 'nova-compute:shared-db': 'percona-cluster:shared-db', 'nova-compute:amqp': 'rabbitmq-server:amqp', 'nova-compute:image-service': 'glance:image-service', 'nova-compute:ceph': 'ceph-mon:client', From 566aa76d3a1151c93951fc467eabd09358117c5c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 18 Jul 2018 10:20:09 +0200 Subject: [PATCH 1533/2699] Remove old nova-compute / mysql relation Change-Id: I0f07da697cb96279dcd7f7f70826c0c465737fb3 Closes-Bug: #1713807 --- ceph-osd/tests/basic_deployment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 2cc4664f..26c9f53d 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -84,7 +84,6 @@ def _add_services(self): def _add_relations(self): """Add all of the relations for the services.""" relations = { - 'nova-compute:shared-db': 'percona-cluster:shared-db', 'nova-compute:amqp': 'rabbitmq-server:amqp', 'nova-compute:image-service': 'glance:image-service', 'nova-compute:ceph': 'ceph-mon:client', From 28b5a99a8b17cbe02de97ce856fa885dd92706fb Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 18 Jul 2018 10:20:23 +0200 Subject: [PATCH 1534/2699] Remove old nova-compute / mysql relation Change-Id: I45bf4f6b9eb1fb50629acc0f00ccde774a7a5a03 Closes-Bug: #1713807 --- ceph-radosgw/tests/basic_deployment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 16b2c57c..5106abca 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -78,7 +78,6 @@ def _add_services(self): def _add_relations(self): """Add all of the relations for the services.""" relations = { - 'nova-compute:shared-db': 'percona-cluster:shared-db', 'nova-compute:amqp': 'rabbitmq-server:amqp', 'nova-compute:image-service': 'glance:image-service', 'nova-compute:ceph': 'ceph-mon:client', From c013deeaa94eab8a709d5594a3618cccbdd54348 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 18 Jul 2018 15:26:21 -0400 Subject: [PATCH 1535/2699] Fixup ceph.conf templating Ensure that code snips don't slurp to much whitespace, resulting in a broken set of keys within the generated configuration file. Change-Id: I7cfe026c60c04ac19741a3a2b364cec3fb8746ba --- ceph-osd/templates/ceph.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index eb1c3a24..77fce613 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -63,7 +63,7 @@ keyring = /var/lib/ceph/osd/$cluster-$id/keyring {% if bluestore -%} {% if not bluestore_experimental -%} osd objectstore = bluestore -{%- endif -%} +{%- endif %} {% if bluestore_block_wal_size -%} bluestore block wal size = {{ bluestore_block_wal_size }} {%- endif %} From 22897ad3d22c9d0cf7ae4470415a94eab284ede1 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 18 Jul 2018 18:34:27 -0400 Subject: [PATCH 1536/2699] Update functional test definitions Change-Id: Ie7cdce3c9bb29381e3f754dff84bc7645d3273aa --- ceph-fs/src/tests/dev-basic-bionic-rocky | 25 ++++++++++++++++++++++++ ceph-fs/src/tests/dev-basic-cosmic-rocky | 23 ++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100755 ceph-fs/src/tests/dev-basic-bionic-rocky create mode 100755 ceph-fs/src/tests/dev-basic-cosmic-rocky diff --git a/ceph-fs/src/tests/dev-basic-bionic-rocky b/ceph-fs/src/tests/dev-basic-bionic-rocky new file mode 100755 index 00000000..0f7abc5a --- /dev/null +++ b/ceph-fs/src/tests/dev-basic-bionic-rocky @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on bionic-rocky.""" + +from basic_deployment import CephFsBasicDeployment + +if __name__ == '__main__': + deployment = CephFsBasicDeployment(series='bionic', + openstack='cloud:bionic-rocky', + source='cloud:bionic-updates/rocky') + deployment.run_tests() diff --git a/ceph-fs/src/tests/dev-basic-cosmic-rocky b/ceph-fs/src/tests/dev-basic-cosmic-rocky new file mode 100755 index 00000000..91cb73c4 --- /dev/null +++ b/ceph-fs/src/tests/dev-basic-cosmic-rocky @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on cosmic-rocky.""" + +from basic_deployment import CephFsBasicDeployment + +if __name__ == '__main__': + deployment = CephFsBasicDeployment(series='cosmic') + deployment.run_tests() From eba249dfe53db0b2f450d4b8badaaac49df3816d Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 18 Jul 2018 18:34:35 -0400 Subject: [PATCH 1537/2699] Update functional test definitions Change-Id: Ie68f38196a1c725b2e0d04317b3630ac9dd03101 --- ...c-xenial-queens => dev-basic-bionic-rocky} | 8 +++---- ceph-mon/tests/dev-basic-cosmic-rocky | 23 +++++++++++++++++++ 2 files changed, 27 insertions(+), 4 deletions(-) rename ceph-mon/tests/{dev-basic-xenial-queens => dev-basic-bionic-rocky} (72%) create mode 100755 ceph-mon/tests/dev-basic-cosmic-rocky diff --git a/ceph-mon/tests/dev-basic-xenial-queens b/ceph-mon/tests/dev-basic-bionic-rocky similarity index 72% rename from ceph-mon/tests/dev-basic-xenial-queens rename to ceph-mon/tests/dev-basic-bionic-rocky index 5fa16a57..0bf3df92 100755 --- a/ceph-mon/tests/dev-basic-xenial-queens +++ b/ceph-mon/tests/dev-basic-bionic-rocky @@ -14,12 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Amulet tests on a basic ceph deployment on xenial-queens.""" +"""Amulet tests on a basic ceph deployment on bionic-rocky.""" from basic_deployment import CephBasicDeployment if __name__ == '__main__': - deployment = CephBasicDeployment(series='xenial', - openstack='cloud:xenial-queens', - source='cloud:xenial-updates/queens') + deployment = CephBasicDeployment(series='bionic', + openstack='cloud:bionic-rocky', + source='cloud:bionic-updates/rocky') deployment.run_tests() diff --git a/ceph-mon/tests/dev-basic-cosmic-rocky b/ceph-mon/tests/dev-basic-cosmic-rocky new file mode 100755 index 00000000..933fb0db --- /dev/null +++ b/ceph-mon/tests/dev-basic-cosmic-rocky @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on cosmic-rocky.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='cosmic') + deployment.run_tests() From 54463e947169b305296ab7c16281d3595adc4c6e Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 18 Jul 2018 18:34:43 -0400 Subject: [PATCH 1538/2699] Update functional test definitions Change-Id: Ib370603b9c9106c85b6b36936c7ca4ea155cf3c1 --- ...c-xenial-queens => dev-basic-bionic-rocky} | 8 +++---- ceph-osd/tests/dev-basic-cosmic-rocky | 23 +++++++++++++++++++ 2 files changed, 27 insertions(+), 4 deletions(-) rename ceph-osd/tests/{dev-basic-xenial-queens => dev-basic-bionic-rocky} (72%) create mode 100755 ceph-osd/tests/dev-basic-cosmic-rocky diff --git a/ceph-osd/tests/dev-basic-xenial-queens b/ceph-osd/tests/dev-basic-bionic-rocky similarity index 72% rename from ceph-osd/tests/dev-basic-xenial-queens rename to ceph-osd/tests/dev-basic-bionic-rocky index 61c7b06d..2e0e4ee1 100755 --- a/ceph-osd/tests/dev-basic-xenial-queens +++ b/ceph-osd/tests/dev-basic-bionic-rocky @@ -14,12 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Amulet tests on a basic ceph-osd deployment on xenial-queens.""" +"""Amulet tests on a basic ceph-osd deployment on bionic-rocky.""" from basic_deployment import CephOsdBasicDeployment if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='xenial', - openstack='cloud:xenial-queens', - source='cloud:xenial-updates/queens') + deployment = CephOsdBasicDeployment(series='bionic', + openstack='cloud:bionic-rocky', + source='cloud:bionic-updates/rocky') deployment.run_tests() diff --git a/ceph-osd/tests/dev-basic-cosmic-rocky b/ceph-osd/tests/dev-basic-cosmic-rocky new file mode 100755 index 00000000..c6b6ef75 --- /dev/null +++ b/ceph-osd/tests/dev-basic-cosmic-rocky @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph-osd deployment on cosmic-rocky.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='cosmic') + deployment.run_tests() From d49c4f709a8e2e4c0bbdd0eb3fec692566a948f8 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 18 Jul 2018 18:34:50 -0400 Subject: [PATCH 1539/2699] Update functional test definitions Change-Id: Ib3f932df1bc0fa0032962880b2830d767933271c --- ceph-proxy/tests/dev-basic-bionic-rocky | 11 +++++++++++ ceph-proxy/tests/dev-basic-cosmic-rocky | 9 +++++++++ 2 files changed, 20 insertions(+) create mode 100755 ceph-proxy/tests/dev-basic-bionic-rocky create mode 100755 ceph-proxy/tests/dev-basic-cosmic-rocky diff --git a/ceph-proxy/tests/dev-basic-bionic-rocky b/ceph-proxy/tests/dev-basic-bionic-rocky new file mode 100755 index 00000000..97c060db --- /dev/null +++ b/ceph-proxy/tests/dev-basic-bionic-rocky @@ -0,0 +1,11 @@ +#!/usr/bin/env python + +"""Amulet tests on a basic ceph deployment on bionic-rocky.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='bionic', + openstack='cloud:bionic-rocky', + source='cloud:bionic-updates/rocky') + deployment.run_tests() diff --git a/ceph-proxy/tests/dev-basic-cosmic-rocky b/ceph-proxy/tests/dev-basic-cosmic-rocky new file mode 100755 index 00000000..2d96d7c7 --- /dev/null +++ b/ceph-proxy/tests/dev-basic-cosmic-rocky @@ -0,0 +1,9 @@ +#!/usr/bin/env python + +"""Amulet tests on a basic ceph deployment on cosmic-rocky.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='cosmic') + deployment.run_tests() From 039d29b7b2e3f79a96687591ce568c0d15af9c79 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 18 Jul 2018 18:34:58 -0400 Subject: [PATCH 1540/2699] Update functional test definitions Change-Id: If4d161b12b771a7c92c2994faeddb68c89d0737b --- ...c-xenial-queens => dev-basic-bionic-rocky} | 8 +++---- ceph-radosgw/tests/dev-basic-cosmic-rocky | 23 +++++++++++++++++++ 2 files changed, 27 insertions(+), 4 deletions(-) rename ceph-radosgw/tests/{dev-basic-xenial-queens => dev-basic-bionic-rocky} (71%) create mode 100755 ceph-radosgw/tests/dev-basic-cosmic-rocky diff --git a/ceph-radosgw/tests/dev-basic-xenial-queens b/ceph-radosgw/tests/dev-basic-bionic-rocky similarity index 71% rename from ceph-radosgw/tests/dev-basic-xenial-queens rename to ceph-radosgw/tests/dev-basic-bionic-rocky index fff90006..8e758fba 100755 --- a/ceph-radosgw/tests/dev-basic-xenial-queens +++ b/ceph-radosgw/tests/dev-basic-bionic-rocky @@ -14,12 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Amulet tests on a basic ceph-radosgw deployment on xenial-queens.""" +"""Amulet tests on a basic ceph-radosgw deployment on bionic-rocky.""" from basic_deployment import CephRadosGwBasicDeployment if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='xenial', - openstack='cloud:xenial-queens', - source='cloud:xenial-updates/queens') + deployment = CephRadosGwBasicDeployment(series='bionic', + openstack='cloud:bionic-rocky', + source='cloud:bionic-updates/rocky') deployment.run_tests() diff --git a/ceph-radosgw/tests/dev-basic-cosmic-rocky b/ceph-radosgw/tests/dev-basic-cosmic-rocky new file mode 100755 index 00000000..dd93dacb --- /dev/null +++ b/ceph-radosgw/tests/dev-basic-cosmic-rocky @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph-radosgw deployment on cosmic-rocky.""" + +from basic_deployment import CephRadosGwBasicDeployment + +if __name__ == '__main__': + deployment = CephRadosGwBasicDeployment(series='cosmic') + deployment.run_tests() From d8fe7f0d66b250d73fb71230f2e8b64a18f7e353 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 13 Jul 2018 16:11:53 +0200 Subject: [PATCH 1541/2699] Rebuild for sync charm-helpers to ensure rocky support Change-Id: I9e6aa27afb6c04130b4e7d3dcea86c40b5268f89 --- ceph-fs/rebuild | 2 +- ceph-fs/src/tests/basic_deployment.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index d9074866..08ac52e8 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -1b817f14-1742-11e8-bf02-13a179b934e9 +93e11b9e-86a6-11e8-b3ac-9f8b2e5df0b9 diff --git a/ceph-fs/src/tests/basic_deployment.py b/ceph-fs/src/tests/basic_deployment.py index 4801bb5e..128e2305 100644 --- a/ceph-fs/src/tests/basic_deployment.py +++ b/ceph-fs/src/tests/basic_deployment.py @@ -87,9 +87,7 @@ def _configure_services(self, **kwargs): ceph_fs_config = { 'source': self.source, } - # NOTE(jamespage): fix fsid to allow later validation ceph_mon_config = { - 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 'source': self.source, } # Include a non-existent device as osd-devices is a whitelist, @@ -180,9 +178,10 @@ def test_300_ceph_config(self): u.log.debug('Checking ceph config file data...') unit = self.ceph_mon0_sentry conf = '/etc/ceph/ceph.conf' + (fsid, _) = unit.run('leader-get fsid') expected = { 'global': { - 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + 'fsid': fsid, 'log to syslog': 'false', 'err to syslog': 'false', 'clog to syslog': 'false', From 8a4befd05be2d4f022667c39ea24574711e63d22 Mon Sep 17 00:00:00 2001 From: Nicolas Pochet Date: Thu, 19 Jul 2018 18:45:26 +0200 Subject: [PATCH 1542/2699] Remove reference to ntp package This is motivated by the fact that: * the ntp/chrony package is already a dependency of the ceph-mon/osd package * NTP will be managed by operators through the NTP charm It is thus useless to keep that package mentioned here. Change-Id: If6ecfc6f515bec4f955a5b52df8f0fbfafd29008 Closes-Bug: #1780690 Depends-on: Iddb7ffcc7ab7a74700855b950f619208511c2fab --- ceph-mon/lib/ceph/utils.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 7bb951ca..7759ec49 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -40,7 +40,6 @@ service_start, service_stop, CompareHostReleases, - is_container, ) from charmhelpers.core.hookenv import ( cached, @@ -81,7 +80,7 @@ PEON = 'peon' QUORUM = [LEADER, PEON] -PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', +PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', 'python-ceph', 'radosgw', 'xfsprogs', 'python-pyudev', 'lvm2', 'parted'] @@ -2615,9 +2614,6 @@ def determine_packages(): :returns: list of ceph packages """ - if is_container(): - PACKAGES.remove('ntp') - return PACKAGES From 3969175511ce7b660b0099c85658ad765f4a7bd3 Mon Sep 17 00:00:00 2001 From: Nicolas Pochet Date: Thu, 19 Jul 2018 15:08:39 +0200 Subject: [PATCH 1543/2699] Remove reference to ntp package This is motivated by the fact that: * the ntp/chrony package is already a dependency of the ceph-mon/osd package * NTP will be managed by operators through the NTP charm It is thus useless to keep that package mentioned here. Change-Id: I5834ff22d4306707529e958cd26f14bbb752c796 Closes-Bug: #1780690 Depends-on: Iddb7ffcc7ab7a74700855b950f619208511c2fab --- ceph-osd/lib/ceph/utils.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 6d039cd3..15f54ef5 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -40,7 +40,6 @@ service_start, service_stop, CompareHostReleases, - is_container, ) from charmhelpers.core.hookenv import ( cached, @@ -81,7 +80,7 @@ PEON = 'peon' QUORUM = [LEADER, PEON] -PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', +PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', 'python-ceph', 'radosgw', 'xfsprogs', 'python-pyudev', 'lvm2', 'parted'] @@ -2643,9 +2642,6 @@ def determine_packages(): :returns: list of ceph packages """ - if is_container(): - PACKAGES.remove('ntp') - return PACKAGES From f6cd8fd8c0a4da8d8c612ef595a1d7a23ae9d237 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 24 Jul 2018 06:51:45 +0100 Subject: [PATCH 1544/2699] mimic: Sync charms.ceph for misc fixes Ensures mon cluster is not queried early in unit lifecycle when the cluster is not bootstrapped or accessible by the ceph-osd unit. Block device sizes are set in local config so querying the monitor is OK to skip. Change-Id: Iea37455b0946560056ee665e819ee0a2a4a7832c --- ceph-osd/lib/ceph/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 15f54ef5..8a44afc5 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -1717,6 +1717,7 @@ def get_conf(variable): return subprocess.check_output([ 'ceph-osd', '--show-config-value={}'.format(variable), + '--no-mon-config', ]).strip() From f11f899a900bd30db3df4edfce2b59da2155f364 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 24 Jul 2018 17:05:12 +0100 Subject: [PATCH 1545/2699] Use provided device paths Ensure that device paths provided by end users are used for OSD's, rather than the link target device as this may change between reboots. The specific use case is bcache, where: /dev/bcacheX: changes between reboots /dev/disk/by-dname/bcacheX: udev managed and consistent This change also ensures that any unit data is updated to switch back to using the provided block device path, rather than the actual target which may have been used in prior charm revisions. Change-Id: If5e88d93b9323052ea762d3a4b66f2442d4a19be Depends-On: If0e1fbc62bfe7d0f9e21db9bfdeee761060de846 Closes-Bug: 1782439 --- ceph-osd/hooks/ceph_hooks.py | 38 +++++++++++++++++++++++++- ceph-osd/lib/ceph/utils.py | 6 ++-- ceph-osd/unit_tests/test_ceph_hooks.py | 9 ++++-- ceph-osd/unit_tests/test_config.py | 14 ++++++++-- 4 files changed, 59 insertions(+), 8 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index dfea7363..3b8cfcdd 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -89,6 +89,7 @@ CephConfContext) from charmhelpers.contrib.storage.linux.utils import ( is_device_mounted, + is_block_device, ) from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden @@ -522,10 +523,19 @@ def get_devices(): if config('osd-devices'): for path in config('osd-devices').split(' '): path = path.strip() + # Ensure that only block devices + # are considered for evaluation as block devices. + # This avoids issues with relative directories + # being passed via configuration, and ensures that + # the path to a block device provided by the user + # is used, rather than its target which may change + # between reboots in the case of bcache devices. + if is_block_device(path): + devices.append(path) # Make sure its a device which is specified using an # absolute path so that the current working directory # or any relative path under this directory is not used - if os.path.isabs(path): + elif os.path.isabs(path): devices.append(os.path.realpath(path)) # List storage instances for the 'osd-devices' @@ -562,6 +572,32 @@ def upgrade_charm(): apt_install(packages=filter_installed_packages(ceph.determine_packages()), fatal=True) install_udev_rules() + remap_resolved_targets() + + +def remap_resolved_targets(): + '''Remap any previous fully resolved target devices to provided names''' + # NOTE(jamespage): Deal with any prior provided dev to + # target device resolution which occurred in prior + # releases of the charm - the user provided value + # should be used in preference to the target path + # to the block device as in some instances this + # is not consistent between reboots (bcache). + db = kv() + touched_devices = db.get('osd-devices', []) + osd_devices = get_devices() + for dev in osd_devices: + real_path = os.path.realpath(dev) + if real_path != dev and real_path in touched_devices: + log('Device {} already processed by charm using ' + 'actual device path {}, updating block device ' + 'usage with provided device path ' + 'and skipping'.format(dev, + real_path)) + touched_devices.remove(real_path) + touched_devices.append(dev) + db.set('osd-devices', touched_devices) + db.flush() @hooks.hook('nrpe-external-master-relation-joined', diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 8a44afc5..4d6ac326 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -1699,8 +1699,10 @@ def is_mapped_luks_device(dev): :param: dev: A full path to a block device to be checked :returns: boolean: indicates whether a device is mapped """ - _, dirs, _ = next(os.walk('/sys/class/block/{}/holders/' - .format(os.path.basename(dev)))) + _, dirs, _ = next(os.walk( + '/sys/class/block/{}/holders/' + .format(os.path.basename(os.path.realpath(dev)))) + ) is_held = len(dirs) > 0 return is_held and is_luks_device(dev) diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 9b660c3a..543d3564 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -395,26 +395,31 @@ def test_install_apparmor_profile_new_install(self, mock_config, call('ceph-osd@2'), ]) + @patch.object(ceph_hooks, 'is_block_device') @patch.object(ceph_hooks, 'storage_list') @patch.object(ceph_hooks, 'config') - def test_get_devices(self, mock_config, mock_storage_list): + def test_get_devices(self, mock_config, mock_storage_list, + mock_is_block_device): '''Devices returned as expected''' config = {'osd-devices': '/dev/vda /dev/vdb'} mock_config.side_effect = lambda key: config[key] mock_storage_list.return_value = [] + mock_is_block_device.return_value = True devices = ceph_hooks.get_devices() self.assertEqual(devices, ['/dev/vda', '/dev/vdb']) + @patch.object(ceph_hooks, 'is_block_device') @patch.object(ceph_hooks, 'get_blacklist') @patch.object(ceph_hooks, 'storage_list') @patch.object(ceph_hooks, 'config') def test_get_devices_blacklist(self, mock_config, mock_storage_list, - mock_get_blacklist): + mock_get_blacklist, mock_is_block_device): '''Devices returned as expected when blacklist in effect''' config = {'osd-devices': '/dev/vda /dev/vdb'} mock_config.side_effect = lambda key: config[key] mock_storage_list.return_value = [] mock_get_blacklist.return_value = ['/dev/vda'] + mock_is_block_device.return_value = True devices = ceph_hooks.get_devices() mock_storage_list.assert_called() mock_get_blacklist.assert_called() diff --git a/ceph-osd/unit_tests/test_config.py b/ceph-osd/unit_tests/test_config.py index c3ae347e..aa539859 100644 --- a/ceph-osd/unit_tests/test_config.py +++ b/ceph-osd/unit_tests/test_config.py @@ -34,6 +34,7 @@ TO_PATCH = [ 'config', + 'is_block_device', ] @@ -43,6 +44,13 @@ def setUp(self): super(GetDevicesTestCase, self).setUp(hooks, TO_PATCH) self.config.side_effect = self.test_config.get self.tmp_dir = tempfile.mkdtemp() + self.bd = { + os.path.join(self.tmp_dir, "device1"): True, + os.path.join(self.tmp_dir, "device1"): True, + os.path.join(self.tmp_dir, "link"): True, + os.path.join(self.tmp_dir, "device"): True, + } + self.is_block_device.side_effect = lambda x: self.bd.get(x, False) self.addCleanup(shutil.rmtree, self.tmp_dir) def test_get_devices_empty(self): @@ -93,11 +101,11 @@ def test_get_devices_non_absolute_path(self): def test_get_devices_symlink(self): """ - If a symlink is specified in osd-devices, get_devices() resolves - it and returns the link target. + If a symlink is specified in osd-devices, get_devices() does not + resolve it and returns the symlink provided. """ device = os.path.join(self.tmp_dir, "device") link = os.path.join(self.tmp_dir, "link") os.symlink(device, link) self.test_config.set("osd-devices", link) - self.assertEqual([device], hooks.get_devices()) + self.assertEqual([link], hooks.get_devices()) From 4f382cb27a0bea2c680bc132cad4f50ec3f10c39 Mon Sep 17 00:00:00 2001 From: dongdong tao Date: Wed, 25 Jul 2018 21:48:23 +0800 Subject: [PATCH 1546/2699] Fix ceph-mon can not print log due to incorrect permission on log file Change-Id: I34cd8ba1e45b90e60bd9c49d7d3973008bd95559 Signed-off-by: dongdong tao Closes-Bug: #1783526 --- ceph-mon/lib/ceph/utils.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 7759ec49..a958dfd8 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -1287,6 +1287,7 @@ def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): subprocess.check_call(['ceph-mon', '--mkfs', '-i', hostname, '--keyring', keyring]) + chownr('/var/log/ceph', ceph_user(), ceph_user()) chownr(path, ceph_user(), ceph_user()) with open(done, 'w'): pass @@ -1466,6 +1467,11 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, ' skipping.'.format(dev)) return + if is_mapped_luks_device(dev): + log('{} is a mapped LUKS device,' + ' skipping.'.format(dev)) + return + if cmp_pkgrevno('ceph', '12.2.4') >= 0: cmd = _ceph_volume(dev, osd_journal, @@ -1677,6 +1683,31 @@ def is_active_bluestore_device(dev): return False +def is_luks_device(dev): + """ + Determine if dev is a LUKS-formatted block device. + + :param: dev: A full path to a block device to check for LUKS header + presence + :returns: boolean: indicates whether a device is used based on LUKS header. + """ + return True if _luks_uuid(dev) else False + + +def is_mapped_luks_device(dev): + """ + Determine if dev is a mapped LUKS device + :param: dev: A full path to a block device to be checked + :returns: boolean: indicates whether a device is mapped + """ + _, dirs, _ = next(os.walk( + '/sys/class/block/{}/holders/' + .format(os.path.basename(os.path.realpath(dev)))) + ) + is_held = len(dirs) > 0 + return is_held and is_luks_device(dev) + + def get_conf(variable): """ Get the value of the given configuration variable from the @@ -1689,6 +1720,7 @@ def get_conf(variable): return subprocess.check_output([ 'ceph-osd', '--show-config-value={}'.format(variable), + '--no-mon-config', ]).strip() From aeb8bce47ee61a4338dfbdec0989bc6a1aa968bb Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 15 Aug 2018 11:37:01 +0100 Subject: [PATCH 1547/2699] Enable bluestore by default For Ceph Lumimous (12.2.0) or later enable Bluestore block device format as the default for Ceph OSD's. Bluestore can be disabled by setting the bluestore config option to False. For older releases, Bluestore cannot be enabled as its not supported - setting the config option will have no effect. Change-Id: I5ca657b9c4da055c4e0ff12e8b91b39d0964be8c --- ceph-osd/config.yaml | 9 +++------ ceph-osd/hooks/ceph_hooks.py | 15 +++++++++++++-- ceph-osd/tests/basic_deployment.py | 2 -- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 4e0f4a55..8e04fb10 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -120,13 +120,10 @@ options: Only supported with ceph >= 0.48.3. bluestore: type: boolean - default: false + default: True description: | - Use experimental bluestore storage format for OSD devices; only supported - in Ceph Jewel (10.2.0) or later. - . - Note that despite bluestore being the default for Ceph Luminous, if this - option is False, OSDs will still use filestore. + Enable bluestore storage format for OSD devices; Only applies for Ceph + Luminous or later. osd-encrypt: type: boolean default: False diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 3b8cfcdd..414ce1b0 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -220,6 +220,16 @@ def use_vaultlocker(): return False +def use_bluestore(): + """Determine whether bluestore should be used for OSD's + + :returns: whether bluestore disk format should be used + :rtype: bool""" + if cmp_pkgrevno('ceph', '10.2.0') < 0: + return False + return config('bluestore') + + def install_apparmor_profile(): """ Install ceph apparmor profiles and configure @@ -336,7 +346,7 @@ def get_ceph_context(upgrading=False): 'dio': str(config('use-direct-io')).lower(), 'short_object_len': use_short_objects(), 'upgrade_in_progress': upgrading, - 'bluestore': config('bluestore'), + 'bluestore': use_bluestore(), 'bluestore_experimental': cmp_pkgrevno('ceph', '12.1.0') < 0, 'bluestore_block_wal_size': config('bluestore-block-wal-size'), 'bluestore_block_db_size': config('bluestore-block-db-size'), @@ -468,12 +478,13 @@ def prepare_disks_and_activate(): if ceph.is_bootstrapped(): log('ceph bootstrapped, rescanning disks') emit_cephconf() + bluestore = use_bluestore() for dev in get_devices(): ceph.osdize(dev, config('osd-format'), osd_journal, config('ignore-device-errors'), config('osd-encrypt'), - config('bluestore'), + bluestore, config('osd-encrypt-keymanager')) # Make it fast! if config('autotune'): diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 26c9f53d..8c75e98f 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -398,8 +398,6 @@ def test_300_ceph_osd_config(self): }, 'osd': { 'keyring': '/var/lib/ceph/osd/$cluster-$id/keyring', - 'osd journal size': '1024', - 'filestore xattr use omap': 'true' }, } From d56e5235c7cbde8fed024f416de22ae8db9397b5 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 24 Aug 2018 16:32:47 +0200 Subject: [PATCH 1548/2699] Sync in charms.ceph This patch looks for multiple nodes in the OSD tree with type root and iterates through all root parent node children to allow for upgrading ceph-osd cluster/devices when running both a default and an ssd pool of OSD hosts, for instance. Change-Id: Iea9812ee7ac67f9b45a6b38c43c130353e68ad8f Closes-Bug: #1788722 Depends-On: I69d653f9f3ea4ee8469f3d7323ee68435ba22099 --- ceph-osd/lib/ceph/utils.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 4d6ac326..53281ea7 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -579,7 +579,15 @@ def get_osd_tree(service): # Make sure children are present in the json if not json_tree['nodes']: return None - child_ids = json_tree['nodes'][0]['children'] + parent_nodes = [ + node for node in json_tree['nodes'] if node['type'] == 'root'] + child_ids = [] + for node in parent_nodes: + try: + child_ids = child_ids + node['children'] + except KeyError: + # skip if this parent has no children + continue for child in json_tree['nodes']: if child['id'] in child_ids: crush_list.append( @@ -1287,6 +1295,7 @@ def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): subprocess.check_call(['ceph-mon', '--mkfs', '-i', hostname, '--keyring', keyring]) + chownr('/var/log/ceph', ceph_user(), ceph_user()) chownr(path, ceph_user(), ceph_user()) with open(done, 'w'): pass From 23b09cbeb9afd73ed1f9b2e3e072f2f16c4809b8 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 27 Aug 2018 18:13:00 -0500 Subject: [PATCH 1549/2699] Update functional test definitions Change-Id: I65af0862d9db22cf84c52fb4ff6fe19f6a61d705 --- .../charmhelpers/contrib/openstack/utils.py | 48 ++++++++++++++- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 58 ++++++++++++++++++- .../contrib/openstack/amulet/utils.py | 29 +++++++--- ceph-proxy/tests/charmhelpers/core/hookenv.py | 58 ++++++++++++++++++- ...c-bionic-rocky => gate-basic-bionic-rocky} | 0 5 files changed, 180 insertions(+), 13 deletions(-) rename ceph-proxy/tests/{dev-basic-bionic-rocky => gate-basic-bionic-rocky} (100%) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index 0180e555..24f5b808 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -831,12 +831,25 @@ def _ows_check_if_paused(services=None, ports=None): """Check if the unit is supposed to be paused, and if so check that the services/ports (if passed) are actually stopped/not being listened to. - if the unit isn't supposed to be paused, just return None, None + If the unit isn't supposed to be paused, just return None, None + + If the unit is performing a series upgrade, return a message indicating + this. @param services: OPTIONAL services spec or list of service names. @param ports: OPTIONAL list of port numbers. @returns state, message or None, None """ + if is_unit_upgrading_set(): + state, message = check_actually_paused(services=services, + ports=ports) + if state is None: + # we're paused okay, so set maintenance and return + state = "blocked" + message = ("Ready for do-release-upgrade and reboot. " + "Set complete when finished.") + return state, message + if is_unit_paused_set(): state, message = check_actually_paused(services=services, ports=ports) @@ -1339,7 +1352,7 @@ def pause_unit(assess_status_func, services=None, ports=None, message = assess_status_func() if message: messages.append(message) - if messages: + if messages and not is_unit_upgrading_set(): raise Exception("Couldn't pause: {}".format("; ".join(messages))) @@ -1689,3 +1702,34 @@ def _ensure_flag(flag): snap_install(snap, _ensure_flag(snaps[snap]['channel']), _ensure_flag(snaps[snap]['mode'])) + + +def set_unit_upgrading(): + """Set the unit to a upgrading state in the local kv() store. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', True) + + +def clear_unit_upgrading(): + """Clear the unit from a upgrading state in the local kv() store + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', False) + + +def is_unit_upgrading_set(): + """Return the state of the kv().get('unit-upgrading'). + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-upgrading'))) + except Exception: + return False diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index ed7af39e..68800074 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -201,11 +201,35 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -def service_name(): - """The name service group this unit belongs to""" +def application_name(): + """ + The name of the deployed application this unit belongs to. + """ return local_unit().split('/')[0] +def service_name(): + """ + .. deprecated:: 0.19.1 + Alias for :func:`application_name`. + """ + return application_name() + + +def model_name(): + """ + Name of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_NAME'] + + +def model_uuid(): + """ + UUID of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_UUID'] + + def principal_unit(): """Returns the principal unit of this unit, otherwise None""" # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT @@ -1297,3 +1321,33 @@ def _to_range(addr): if 'private-address' in settings: return [_to_range(settings['private-address'])] return [] # Should never happen + + +def unit_doomed(unit=None): + """Determines if the unit is being removed from the model + + Requires Juju 2.4.1. + + :param unit: string unit name, defaults to local_unit + :side effect: calls goal_state + :side effect: calls local_unit + :side effect: calls has_juju_version + :return: True if the unit is being removed, already gone, or never existed + """ + if not has_juju_version("2.4.1"): + # We cannot risk blindly returning False for 'we don't know', + # because that could cause data loss; if call sites don't + # need an accurate answer, they likely don't need this helper + # at all. + # goal-state existed in 2.4.0, but did not handle removals + # correctly until 2.4.1. + raise NotImplementedError("is_doomed") + if unit is None: + unit = local_unit() + gs = goal_state() + units = gs.get('units', {}) + if unit not in units: + return True + # I don't think 'dead' units ever show up in the goal-state, but + # check anyway in addition to 'dying'. + return units[unit]['status'] in ('dying', 'dead') diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py index ef4ab54b..6637865d 100644 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -24,7 +24,8 @@ import cinderclient.v1.client as cinder_client import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1.client as glance_client +import glanceclient.v1 as glance_client +import glanceclient.v2 as glance_clientv2 import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client from keystoneauth1.identity import ( @@ -623,7 +624,7 @@ def authenticate_glance_admin(self, keystone): ep = keystone.service_catalog.url_for(service_type='image', interface='adminURL') if keystone.session: - return glance_client.Client(ep, session=keystone.session) + return glance_clientv2.Client("2", session=keystone.session) else: return glance_client.Client(ep, token=keystone.auth_token) @@ -711,10 +712,19 @@ def create_cirros_image(self, glance, image_name): f.close() # Create glance image - with open(local_path) as f: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', data=f) + if float(glance.version) < 2.0: + with open(local_path) as fimage: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', + data=fimage) + else: + image = glance.images.create( + name=image_name, + disk_format="qcow2", + visibility="public", + container_format="bare") + glance.images.upload(image.id, open(local_path, 'rb')) # Wait for image to reach active status img_id = image.id @@ -729,9 +739,14 @@ def create_cirros_image(self, glance, image_name): self.log.debug('Validating image attributes...') val_img_name = glance.images.get(img_id).name val_img_stat = glance.images.get(img_id).status - val_img_pub = glance.images.get(img_id).is_public val_img_cfmt = glance.images.get(img_id).container_format val_img_dfmt = glance.images.get(img_id).disk_format + + if float(glance.version) < 2.0: + val_img_pub = glance.images.get(img_id).is_public + else: + val_img_pub = glance.images.get(img_id).visibility == "public" + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' 'container fmt:{} disk fmt:{}'.format( val_img_name, val_img_pub, img_id, diff --git a/ceph-proxy/tests/charmhelpers/core/hookenv.py b/ceph-proxy/tests/charmhelpers/core/hookenv.py index ed7af39e..68800074 100644 --- a/ceph-proxy/tests/charmhelpers/core/hookenv.py +++ b/ceph-proxy/tests/charmhelpers/core/hookenv.py @@ -201,11 +201,35 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -def service_name(): - """The name service group this unit belongs to""" +def application_name(): + """ + The name of the deployed application this unit belongs to. + """ return local_unit().split('/')[0] +def service_name(): + """ + .. deprecated:: 0.19.1 + Alias for :func:`application_name`. + """ + return application_name() + + +def model_name(): + """ + Name of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_NAME'] + + +def model_uuid(): + """ + UUID of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_UUID'] + + def principal_unit(): """Returns the principal unit of this unit, otherwise None""" # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT @@ -1297,3 +1321,33 @@ def _to_range(addr): if 'private-address' in settings: return [_to_range(settings['private-address'])] return [] # Should never happen + + +def unit_doomed(unit=None): + """Determines if the unit is being removed from the model + + Requires Juju 2.4.1. + + :param unit: string unit name, defaults to local_unit + :side effect: calls goal_state + :side effect: calls local_unit + :side effect: calls has_juju_version + :return: True if the unit is being removed, already gone, or never existed + """ + if not has_juju_version("2.4.1"): + # We cannot risk blindly returning False for 'we don't know', + # because that could cause data loss; if call sites don't + # need an accurate answer, they likely don't need this helper + # at all. + # goal-state existed in 2.4.0, but did not handle removals + # correctly until 2.4.1. + raise NotImplementedError("is_doomed") + if unit is None: + unit = local_unit() + gs = goal_state() + units = gs.get('units', {}) + if unit not in units: + return True + # I don't think 'dead' units ever show up in the goal-state, but + # check anyway in addition to 'dying'. + return units[unit]['status'] in ('dying', 'dead') diff --git a/ceph-proxy/tests/dev-basic-bionic-rocky b/ceph-proxy/tests/gate-basic-bionic-rocky similarity index 100% rename from ceph-proxy/tests/dev-basic-bionic-rocky rename to ceph-proxy/tests/gate-basic-bionic-rocky From 825e253aefbe4a2de0151fa572d496b0607673bb Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 27 Aug 2018 18:13:19 -0500 Subject: [PATCH 1550/2699] Update functional test definitions Change-Id: I6a084d765a85a46ad77350215da34abf98cd15a0 --- .../contrib/openstack/amulet/utils.py | 29 +- .../charmhelpers/contrib/openstack/context.py | 7 +- .../contrib/openstack/ssh_migrations.py | 412 ++++++++++++++++++ .../templates/wsgi-openstack-api.conf | 6 +- .../templates/wsgi-openstack-metadata.conf | 91 ++++ .../charmhelpers/contrib/openstack/utils.py | 48 +- .../hooks/charmhelpers/core/hookenv.py | 58 ++- .../contrib/openstack/amulet/utils.py | 32 +- .../tests/charmhelpers/core/hookenv.py | 58 ++- ...c-bionic-rocky => gate-basic-bionic-rocky} | 0 10 files changed, 715 insertions(+), 26 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/ssh_migrations.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf rename ceph-radosgw/tests/{dev-basic-bionic-rocky => gate-basic-bionic-rocky} (100%) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index ef4ab54b..6637865d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -24,7 +24,8 @@ import cinderclient.v1.client as cinder_client import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1.client as glance_client +import glanceclient.v1 as glance_client +import glanceclient.v2 as glance_clientv2 import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client from keystoneauth1.identity import ( @@ -623,7 +624,7 @@ def authenticate_glance_admin(self, keystone): ep = keystone.service_catalog.url_for(service_type='image', interface='adminURL') if keystone.session: - return glance_client.Client(ep, session=keystone.session) + return glance_clientv2.Client("2", session=keystone.session) else: return glance_client.Client(ep, token=keystone.auth_token) @@ -711,10 +712,19 @@ def create_cirros_image(self, glance, image_name): f.close() # Create glance image - with open(local_path) as f: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', data=f) + if float(glance.version) < 2.0: + with open(local_path) as fimage: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', + data=fimage) + else: + image = glance.images.create( + name=image_name, + disk_format="qcow2", + visibility="public", + container_format="bare") + glance.images.upload(image.id, open(local_path, 'rb')) # Wait for image to reach active status img_id = image.id @@ -729,9 +739,14 @@ def create_cirros_image(self, glance, image_name): self.log.debug('Validating image attributes...') val_img_name = glance.images.get(img_id).name val_img_stat = glance.images.get(img_id).status - val_img_pub = glance.images.get(img_id).is_public val_img_cfmt = glance.images.get(img_id).container_format val_img_dfmt = glance.images.get(img_id).disk_format + + if float(glance.version) < 2.0: + val_img_pub = glance.images.get(img_id).is_public + else: + val_img_pub = glance.images.get(img_id).visibility == "public" + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' 'container fmt:{} disk fmt:{}'.format( val_img_name, val_img_pub, img_id, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index f3741b0e..ca913961 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -1389,11 +1389,12 @@ def __call__(self): class WSGIWorkerConfigContext(WorkerConfigContext): def __init__(self, name=None, script=None, admin_script=None, - public_script=None, process_weight=1.00, + public_script=None, user=None, group=None, + process_weight=1.00, admin_process_weight=0.25, public_process_weight=0.75): self.service_name = name - self.user = name - self.group = name + self.user = user or name + self.group = group or name self.script = script self.admin_script = admin_script self.public_script = public_script diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ssh_migrations.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ssh_migrations.py new file mode 100644 index 00000000..96b9f71d --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ssh_migrations.py @@ -0,0 +1,412 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess + +from charmhelpers.core.hookenv import ( + ERROR, + log, + relation_get, +) +from charmhelpers.contrib.network.ip import ( + is_ipv6, + ns_query, +) +from charmhelpers.contrib.openstack.utils import ( + get_hostname, + get_host_ip, + is_ip, +) + +NOVA_SSH_DIR = '/etc/nova/compute_ssh/' + + +def ssh_directory_for_unit(application_name, user=None): + """Return the directory used to store ssh assets for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified directory path. + :rtype: str + """ + if user: + application_name = "{}_{}".format(application_name, user) + _dir = os.path.join(NOVA_SSH_DIR, application_name) + for d in [NOVA_SSH_DIR, _dir]: + if not os.path.isdir(d): + os.mkdir(d) + for f in ['authorized_keys', 'known_hosts']: + f = os.path.join(_dir, f) + if not os.path.isfile(f): + open(f, 'w').close() + return _dir + + +def known_hosts(application_name, user=None): + """Return the known hosts file for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified path to file. + :rtype: str + """ + return os.path.join( + ssh_directory_for_unit(application_name, user), + 'known_hosts') + + +def authorized_keys(application_name, user=None): + """Return the authorized keys file for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified path to file. + :rtype: str + """ + return os.path.join( + ssh_directory_for_unit(application_name, user), + 'authorized_keys') + + +def ssh_known_host_key(host, application_name, user=None): + """Return the first entry in known_hosts for host. + + :param host: hostname to lookup in file. + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Host key + :rtype: str or None + """ + cmd = [ + 'ssh-keygen', + '-f', known_hosts(application_name, user), + '-H', + '-F', + host] + try: + # The first line of output is like '# Host xx found: line 1 type RSA', + # which should be excluded. + output = subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + # RC of 1 seems to be legitimate for most ssh-keygen -F calls. + if e.returncode == 1: + output = e.output + else: + raise + output = output.strip() + + if output: + # Bug #1500589 cmd has 0 rc on precise if entry not present + lines = output.split('\n') + if len(lines) >= 1: + return lines[0] + + return None + + +def remove_known_host(host, application_name, user=None): + """Remove the entry in known_hosts for host. + + :param host: hostname to lookup in file. + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + log('Removing SSH known host entry for compute host at %s' % host) + cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host] + subprocess.check_call(cmd) + + +def is_same_key(key_1, key_2): + """Extract the key from two host entries and compare them. + + :param key_1: Host key + :type key_1: str + :param key_2: Host key + :type key_2: str + """ + # The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp' + # 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare + # the part start with 'ssh-rsa' followed with '= ', because the hash + # value in the beginning will change each time. + k_1 = key_1.split('= ')[1] + k_2 = key_2.split('= ')[1] + return k_1 == k_2 + + +def add_known_host(host, application_name, user=None): + """Add the given host key to the known hosts file. + + :param host: host name + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] + try: + remote_key = subprocess.check_output(cmd).strip() + except Exception as e: + log('Could not obtain SSH host key from %s' % host, level=ERROR) + raise e + + current_key = ssh_known_host_key(host, application_name, user) + if current_key and remote_key: + if is_same_key(remote_key, current_key): + log('Known host key for compute host %s up to date.' % host) + return + else: + remove_known_host(host, application_name, user) + + log('Adding SSH host key to known hosts for compute node at %s.' % host) + with open(known_hosts(application_name, user), 'a') as out: + out.write("{}\n".format(remote_key)) + + +def ssh_authorized_key_exists(public_key, application_name, user=None): + """Check if given key is in the authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Whether given key is in the authorized_key file. + :rtype: boolean + """ + with open(authorized_keys(application_name, user)) as keys: + return ('%s' % public_key) in keys.read() + + +def add_authorized_key(public_key, application_name, user=None): + """Add given key to the authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + with open(authorized_keys(application_name, user), 'a') as keys: + keys.write("{}\n".format(public_key)) + + +def ssh_compute_add_host_and_key(public_key, hostname, private_address, + application_name, user=None): + """Add a compute nodes ssh details to local cache. + + Collect various hostname variations and add the corresponding host keys to + the local known hosts file. Finally, add the supplied public key to the + authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param hostname: Hostname to collect host keys from. + :type hostname: str + :param private_address:aCorresponding private address for hostname + :type private_address: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + # If remote compute node hands us a hostname, ensure we have a + # known hosts entry for its IP, hostname and FQDN. + hosts = [private_address] + + if not is_ipv6(private_address): + if hostname: + hosts.append(hostname) + + if is_ip(private_address): + hn = get_hostname(private_address) + if hn: + hosts.append(hn) + short = hn.split('.')[0] + if ns_query(short): + hosts.append(short) + else: + hosts.append(get_host_ip(private_address)) + short = private_address.split('.')[0] + if ns_query(short): + hosts.append(short) + + for host in list(set(hosts)): + add_known_host(host, application_name, user) + + if not ssh_authorized_key_exists(public_key, application_name, user): + log('Saving SSH authorized key for compute host at %s.' % + private_address) + add_authorized_key(public_key, application_name, user) + + +def ssh_compute_add(public_key, application_name, rid=None, unit=None, + user=None): + """Add a compute nodes ssh details to local cache. + + Collect various hostname variations and add the corresponding host keys to + the local known hosts file. Finally, add the supplied public key to the + authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param rid: Relation id of the relation between this charm and the app. If + none is supplied it is assumed its the relation relating to + the current hook context. + :type rid: str + :param unit: Unit to add ssh asserts for if none is supplied it is assumed + its the unit relating to the current hook context. + :type unit: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + relation_data = relation_get(rid=rid, unit=unit) + ssh_compute_add_host_and_key( + public_key, + relation_data.get('hostname'), + relation_data.get('private-address'), + application_name, + user=user) + + +def ssh_known_hosts_lines(application_name, user=None): + """Return contents of known_hosts file for given application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + known_hosts_list = [] + with open(known_hosts(application_name, user)) as hosts: + for hosts_line in hosts: + if hosts_line.rstrip(): + known_hosts_list.append(hosts_line.rstrip()) + return(known_hosts_list) + + +def ssh_authorized_keys_lines(application_name, user=None): + """Return contents of authorized_keys file for given application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + authorized_keys_list = [] + + with open(authorized_keys(application_name, user)) as keys: + for authkey_line in keys: + if authkey_line.rstrip(): + authorized_keys_list.append(authkey_line.rstrip()) + return(authorized_keys_list) + + +def ssh_compute_remove(public_key, application_name, user=None): + """Remove given public key from authorized_keys file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + if not (os.path.isfile(authorized_keys(application_name, user)) or + os.path.isfile(known_hosts(application_name, user))): + return + + keys = ssh_authorized_keys_lines(application_name, user=None) + keys = [k.strip() for k in keys] + + if public_key not in keys: + return + + [keys.remove(key) for key in keys if key == public_key] + + with open(authorized_keys(application_name, user), 'w') as _keys: + keys = '\n'.join(keys) + if not keys.endswith('\n'): + keys += '\n' + _keys.write(keys) + + +def get_ssh_settings(application_name, user=None): + """Retrieve the known host entries and public keys for application + + Retrieve the known host entries and public keys for application for all + units of the given application related to this application for the + app + user combination. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Public keys + host keys for all units for app + user combination. + :rtype: dict + """ + settings = {} + keys = {} + prefix = '' + if user: + prefix = '{}_'.format(user) + + for i, line in enumerate(ssh_known_hosts_lines( + application_name=application_name, user=user)): + settings['{}known_hosts_{}'.format(prefix, i)] = line + if settings: + settings['{}known_hosts_max_index'.format(prefix)] = len( + settings.keys()) + + for i, line in enumerate(ssh_authorized_keys_lines( + application_name=application_name, user=user)): + keys['{}authorized_keys_{}'.format(prefix, i)] = line + if keys: + keys['{}authorized_keys_max_index'.format(prefix)] = len(keys.keys()) + settings.update(keys) + return settings + + +def get_all_user_ssh_settings(application_name): + """Retrieve the known host entries and public keys for application + + Retrieve the known host entries and public keys for application for all + units of the given application related to this application for root user + and nova user. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :returns: Public keys + host keys for all units for app + user combination. + :rtype: dict + """ + settings = get_ssh_settings(application_name) + settings.update(get_ssh_settings(application_name, user='nova')) + return settings diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf index e2e73b2c..23b62a38 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf @@ -14,7 +14,7 @@ Listen {{ public_port }} {% if port -%} - WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ + WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ display-name=%{GROUP} WSGIProcessGroup {{ service_name }} WSGIScriptAlias / {{ script }} @@ -40,7 +40,7 @@ Listen {{ public_port }} {% if admin_port -%} - WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ + WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ display-name=%{GROUP} WSGIProcessGroup {{ service_name }}-admin WSGIScriptAlias / {{ admin_script }} @@ -66,7 +66,7 @@ Listen {{ public_port }} {% if public_port -%} - WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ + WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ display-name=%{GROUP} WSGIProcessGroup {{ service_name }}-public WSGIScriptAlias / {{ public_script }} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf new file mode 100644 index 00000000..23b62a38 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf @@ -0,0 +1,91 @@ +# Configuration file maintained by Juju. Local changes may be overwritten. + +{% if port -%} +Listen {{ port }} +{% endif -%} + +{% if admin_port -%} +Listen {{ admin_port }} +{% endif -%} + +{% if public_port -%} +Listen {{ public_port }} +{% endif -%} + +{% if port -%} + + WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }} + WSGIScriptAlias / {{ script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} + +{% if admin_port -%} + + WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-admin + WSGIScriptAlias / {{ admin_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} + +{% if public_port -%} + + WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-public + WSGIScriptAlias / {{ public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 0180e555..24f5b808 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -831,12 +831,25 @@ def _ows_check_if_paused(services=None, ports=None): """Check if the unit is supposed to be paused, and if so check that the services/ports (if passed) are actually stopped/not being listened to. - if the unit isn't supposed to be paused, just return None, None + If the unit isn't supposed to be paused, just return None, None + + If the unit is performing a series upgrade, return a message indicating + this. @param services: OPTIONAL services spec or list of service names. @param ports: OPTIONAL list of port numbers. @returns state, message or None, None """ + if is_unit_upgrading_set(): + state, message = check_actually_paused(services=services, + ports=ports) + if state is None: + # we're paused okay, so set maintenance and return + state = "blocked" + message = ("Ready for do-release-upgrade and reboot. " + "Set complete when finished.") + return state, message + if is_unit_paused_set(): state, message = check_actually_paused(services=services, ports=ports) @@ -1339,7 +1352,7 @@ def pause_unit(assess_status_func, services=None, ports=None, message = assess_status_func() if message: messages.append(message) - if messages: + if messages and not is_unit_upgrading_set(): raise Exception("Couldn't pause: {}".format("; ".join(messages))) @@ -1689,3 +1702,34 @@ def _ensure_flag(flag): snap_install(snap, _ensure_flag(snaps[snap]['channel']), _ensure_flag(snaps[snap]['mode'])) + + +def set_unit_upgrading(): + """Set the unit to a upgrading state in the local kv() store. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', True) + + +def clear_unit_upgrading(): + """Clear the unit from a upgrading state in the local kv() store + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', False) + + +def is_unit_upgrading_set(): + """Return the state of the kv().get('unit-upgrading'). + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-upgrading'))) + except Exception: + return False diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index ed7af39e..68800074 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -201,11 +201,35 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -def service_name(): - """The name service group this unit belongs to""" +def application_name(): + """ + The name of the deployed application this unit belongs to. + """ return local_unit().split('/')[0] +def service_name(): + """ + .. deprecated:: 0.19.1 + Alias for :func:`application_name`. + """ + return application_name() + + +def model_name(): + """ + Name of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_NAME'] + + +def model_uuid(): + """ + UUID of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_UUID'] + + def principal_unit(): """Returns the principal unit of this unit, otherwise None""" # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT @@ -1297,3 +1321,33 @@ def _to_range(addr): if 'private-address' in settings: return [_to_range(settings['private-address'])] return [] # Should never happen + + +def unit_doomed(unit=None): + """Determines if the unit is being removed from the model + + Requires Juju 2.4.1. + + :param unit: string unit name, defaults to local_unit + :side effect: calls goal_state + :side effect: calls local_unit + :side effect: calls has_juju_version + :return: True if the unit is being removed, already gone, or never existed + """ + if not has_juju_version("2.4.1"): + # We cannot risk blindly returning False for 'we don't know', + # because that could cause data loss; if call sites don't + # need an accurate answer, they likely don't need this helper + # at all. + # goal-state existed in 2.4.0, but did not handle removals + # correctly until 2.4.1. + raise NotImplementedError("is_doomed") + if unit is None: + unit = local_unit() + gs = goal_state() + units = gs.get('units', {}) + if unit not in units: + return True + # I don't think 'dead' units ever show up in the goal-state, but + # check anyway in addition to 'dying'. + return units[unit]['status'] in ('dying', 'dead') diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py index ef4ab54b..936b4036 100644 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -24,7 +24,8 @@ import cinderclient.v1.client as cinder_client import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1.client as glance_client +import glanceclient.v1 as glance_client +import glanceclient.v2 as glance_clientv2 import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client from keystoneauth1.identity import ( @@ -623,7 +624,7 @@ def authenticate_glance_admin(self, keystone): ep = keystone.service_catalog.url_for(service_type='image', interface='adminURL') if keystone.session: - return glance_client.Client(ep, session=keystone.session) + return glance_clientv2.Client("2", session=keystone.session) else: return glance_client.Client(ep, token=keystone.auth_token) @@ -711,10 +712,19 @@ def create_cirros_image(self, glance, image_name): f.close() # Create glance image - with open(local_path) as f: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', data=f) + if float(glance.version) < 2.0: + with open(local_path) as fimage: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', + data=fimage) + else: + image = glance.images.create( + name=image_name, + disk_format="qcow2", + visibility="public", + container_format="bare") + glance.images.upload(image.id, open(local_path, 'rb')) # Wait for image to reach active status img_id = image.id @@ -729,9 +739,14 @@ def create_cirros_image(self, glance, image_name): self.log.debug('Validating image attributes...') val_img_name = glance.images.get(img_id).name val_img_stat = glance.images.get(img_id).status - val_img_pub = glance.images.get(img_id).is_public val_img_cfmt = glance.images.get(img_id).container_format val_img_dfmt = glance.images.get(img_id).disk_format + + if float(glance.version) < 2.0: + val_img_pub = glance.images.get(img_id).is_public + else: + val_img_pub = glance.images.get(img_id).visibility == "public" + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' 'container fmt:{} disk fmt:{}'.format( val_img_name, val_img_pub, img_id, @@ -998,6 +1013,9 @@ def get_ceph_pools(self, sentry_unit): cmd, code, output)) amulet.raise_status(amulet.FAIL, msg=msg) + # For mimic ceph osd lspools output + output = output.replace("\n", ",") + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, for pool in str(output).split(','): pool_id_name = pool.split(' ') diff --git a/ceph-radosgw/tests/charmhelpers/core/hookenv.py b/ceph-radosgw/tests/charmhelpers/core/hookenv.py index ed7af39e..68800074 100644 --- a/ceph-radosgw/tests/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/tests/charmhelpers/core/hookenv.py @@ -201,11 +201,35 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -def service_name(): - """The name service group this unit belongs to""" +def application_name(): + """ + The name of the deployed application this unit belongs to. + """ return local_unit().split('/')[0] +def service_name(): + """ + .. deprecated:: 0.19.1 + Alias for :func:`application_name`. + """ + return application_name() + + +def model_name(): + """ + Name of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_NAME'] + + +def model_uuid(): + """ + UUID of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_UUID'] + + def principal_unit(): """Returns the principal unit of this unit, otherwise None""" # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT @@ -1297,3 +1321,33 @@ def _to_range(addr): if 'private-address' in settings: return [_to_range(settings['private-address'])] return [] # Should never happen + + +def unit_doomed(unit=None): + """Determines if the unit is being removed from the model + + Requires Juju 2.4.1. + + :param unit: string unit name, defaults to local_unit + :side effect: calls goal_state + :side effect: calls local_unit + :side effect: calls has_juju_version + :return: True if the unit is being removed, already gone, or never existed + """ + if not has_juju_version("2.4.1"): + # We cannot risk blindly returning False for 'we don't know', + # because that could cause data loss; if call sites don't + # need an accurate answer, they likely don't need this helper + # at all. + # goal-state existed in 2.4.0, but did not handle removals + # correctly until 2.4.1. + raise NotImplementedError("is_doomed") + if unit is None: + unit = local_unit() + gs = goal_state() + units = gs.get('units', {}) + if unit not in units: + return True + # I don't think 'dead' units ever show up in the goal-state, but + # check anyway in addition to 'dying'. + return units[unit]['status'] in ('dying', 'dead') diff --git a/ceph-radosgw/tests/dev-basic-bionic-rocky b/ceph-radosgw/tests/gate-basic-bionic-rocky similarity index 100% rename from ceph-radosgw/tests/dev-basic-bionic-rocky rename to ceph-radosgw/tests/gate-basic-bionic-rocky From caced2e9d6bd6f736e43e2f782acd57c0d00db72 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 27 Aug 2018 18:12:45 -0500 Subject: [PATCH 1551/2699] Update functional test definitions Change-Id: Ib8c7ccae838d4d79a5f9cf737fa8fd3478afadc4 --- .../contrib/openstack/amulet/utils.py | 32 +- .../charmhelpers/contrib/openstack/context.py | 7 +- .../contrib/openstack/ssh_migrations.py | 412 ++++++++++++++++++ .../charmhelpers/contrib/openstack/utils.py | 48 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 58 ++- ceph-osd/tests/basic_deployment.py | 2 +- .../contrib/openstack/amulet/utils.py | 32 +- ceph-osd/tests/charmhelpers/core/hookenv.py | 58 ++- ...c-bionic-rocky => gate-basic-bionic-rocky} | 0 9 files changed, 625 insertions(+), 24 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/ssh_migrations.py rename ceph-osd/tests/{dev-basic-bionic-rocky => gate-basic-bionic-rocky} (100%) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py index ef4ab54b..936b4036 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -24,7 +24,8 @@ import cinderclient.v1.client as cinder_client import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1.client as glance_client +import glanceclient.v1 as glance_client +import glanceclient.v2 as glance_clientv2 import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client from keystoneauth1.identity import ( @@ -623,7 +624,7 @@ def authenticate_glance_admin(self, keystone): ep = keystone.service_catalog.url_for(service_type='image', interface='adminURL') if keystone.session: - return glance_client.Client(ep, session=keystone.session) + return glance_clientv2.Client("2", session=keystone.session) else: return glance_client.Client(ep, token=keystone.auth_token) @@ -711,10 +712,19 @@ def create_cirros_image(self, glance, image_name): f.close() # Create glance image - with open(local_path) as f: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', data=f) + if float(glance.version) < 2.0: + with open(local_path) as fimage: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', + data=fimage) + else: + image = glance.images.create( + name=image_name, + disk_format="qcow2", + visibility="public", + container_format="bare") + glance.images.upload(image.id, open(local_path, 'rb')) # Wait for image to reach active status img_id = image.id @@ -729,9 +739,14 @@ def create_cirros_image(self, glance, image_name): self.log.debug('Validating image attributes...') val_img_name = glance.images.get(img_id).name val_img_stat = glance.images.get(img_id).status - val_img_pub = glance.images.get(img_id).is_public val_img_cfmt = glance.images.get(img_id).container_format val_img_dfmt = glance.images.get(img_id).disk_format + + if float(glance.version) < 2.0: + val_img_pub = glance.images.get(img_id).is_public + else: + val_img_pub = glance.images.get(img_id).visibility == "public" + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' 'container fmt:{} disk fmt:{}'.format( val_img_name, val_img_pub, img_id, @@ -998,6 +1013,9 @@ def get_ceph_pools(self, sentry_unit): cmd, code, output)) amulet.raise_status(amulet.FAIL, msg=msg) + # For mimic ceph osd lspools output + output = output.replace("\n", ",") + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, for pool in str(output).split(','): pool_id_name = pool.split(' ') diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index f3741b0e..ca913961 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -1389,11 +1389,12 @@ def __call__(self): class WSGIWorkerConfigContext(WorkerConfigContext): def __init__(self, name=None, script=None, admin_script=None, - public_script=None, process_weight=1.00, + public_script=None, user=None, group=None, + process_weight=1.00, admin_process_weight=0.25, public_process_weight=0.75): self.service_name = name - self.user = name - self.group = name + self.user = user or name + self.group = group or name self.script = script self.admin_script = admin_script self.public_script = public_script diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ssh_migrations.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ssh_migrations.py new file mode 100644 index 00000000..96b9f71d --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ssh_migrations.py @@ -0,0 +1,412 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess + +from charmhelpers.core.hookenv import ( + ERROR, + log, + relation_get, +) +from charmhelpers.contrib.network.ip import ( + is_ipv6, + ns_query, +) +from charmhelpers.contrib.openstack.utils import ( + get_hostname, + get_host_ip, + is_ip, +) + +NOVA_SSH_DIR = '/etc/nova/compute_ssh/' + + +def ssh_directory_for_unit(application_name, user=None): + """Return the directory used to store ssh assets for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified directory path. + :rtype: str + """ + if user: + application_name = "{}_{}".format(application_name, user) + _dir = os.path.join(NOVA_SSH_DIR, application_name) + for d in [NOVA_SSH_DIR, _dir]: + if not os.path.isdir(d): + os.mkdir(d) + for f in ['authorized_keys', 'known_hosts']: + f = os.path.join(_dir, f) + if not os.path.isfile(f): + open(f, 'w').close() + return _dir + + +def known_hosts(application_name, user=None): + """Return the known hosts file for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified path to file. + :rtype: str + """ + return os.path.join( + ssh_directory_for_unit(application_name, user), + 'known_hosts') + + +def authorized_keys(application_name, user=None): + """Return the authorized keys file for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified path to file. + :rtype: str + """ + return os.path.join( + ssh_directory_for_unit(application_name, user), + 'authorized_keys') + + +def ssh_known_host_key(host, application_name, user=None): + """Return the first entry in known_hosts for host. + + :param host: hostname to lookup in file. + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Host key + :rtype: str or None + """ + cmd = [ + 'ssh-keygen', + '-f', known_hosts(application_name, user), + '-H', + '-F', + host] + try: + # The first line of output is like '# Host xx found: line 1 type RSA', + # which should be excluded. + output = subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + # RC of 1 seems to be legitimate for most ssh-keygen -F calls. + if e.returncode == 1: + output = e.output + else: + raise + output = output.strip() + + if output: + # Bug #1500589 cmd has 0 rc on precise if entry not present + lines = output.split('\n') + if len(lines) >= 1: + return lines[0] + + return None + + +def remove_known_host(host, application_name, user=None): + """Remove the entry in known_hosts for host. + + :param host: hostname to lookup in file. + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + log('Removing SSH known host entry for compute host at %s' % host) + cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host] + subprocess.check_call(cmd) + + +def is_same_key(key_1, key_2): + """Extract the key from two host entries and compare them. + + :param key_1: Host key + :type key_1: str + :param key_2: Host key + :type key_2: str + """ + # The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp' + # 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare + # the part start with 'ssh-rsa' followed with '= ', because the hash + # value in the beginning will change each time. + k_1 = key_1.split('= ')[1] + k_2 = key_2.split('= ')[1] + return k_1 == k_2 + + +def add_known_host(host, application_name, user=None): + """Add the given host key to the known hosts file. + + :param host: host name + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] + try: + remote_key = subprocess.check_output(cmd).strip() + except Exception as e: + log('Could not obtain SSH host key from %s' % host, level=ERROR) + raise e + + current_key = ssh_known_host_key(host, application_name, user) + if current_key and remote_key: + if is_same_key(remote_key, current_key): + log('Known host key for compute host %s up to date.' % host) + return + else: + remove_known_host(host, application_name, user) + + log('Adding SSH host key to known hosts for compute node at %s.' % host) + with open(known_hosts(application_name, user), 'a') as out: + out.write("{}\n".format(remote_key)) + + +def ssh_authorized_key_exists(public_key, application_name, user=None): + """Check if given key is in the authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Whether given key is in the authorized_key file. + :rtype: boolean + """ + with open(authorized_keys(application_name, user)) as keys: + return ('%s' % public_key) in keys.read() + + +def add_authorized_key(public_key, application_name, user=None): + """Add given key to the authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + with open(authorized_keys(application_name, user), 'a') as keys: + keys.write("{}\n".format(public_key)) + + +def ssh_compute_add_host_and_key(public_key, hostname, private_address, + application_name, user=None): + """Add a compute nodes ssh details to local cache. + + Collect various hostname variations and add the corresponding host keys to + the local known hosts file. Finally, add the supplied public key to the + authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param hostname: Hostname to collect host keys from. + :type hostname: str + :param private_address:aCorresponding private address for hostname + :type private_address: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + # If remote compute node hands us a hostname, ensure we have a + # known hosts entry for its IP, hostname and FQDN. + hosts = [private_address] + + if not is_ipv6(private_address): + if hostname: + hosts.append(hostname) + + if is_ip(private_address): + hn = get_hostname(private_address) + if hn: + hosts.append(hn) + short = hn.split('.')[0] + if ns_query(short): + hosts.append(short) + else: + hosts.append(get_host_ip(private_address)) + short = private_address.split('.')[0] + if ns_query(short): + hosts.append(short) + + for host in list(set(hosts)): + add_known_host(host, application_name, user) + + if not ssh_authorized_key_exists(public_key, application_name, user): + log('Saving SSH authorized key for compute host at %s.' % + private_address) + add_authorized_key(public_key, application_name, user) + + +def ssh_compute_add(public_key, application_name, rid=None, unit=None, + user=None): + """Add a compute nodes ssh details to local cache. + + Collect various hostname variations and add the corresponding host keys to + the local known hosts file. Finally, add the supplied public key to the + authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param rid: Relation id of the relation between this charm and the app. If + none is supplied it is assumed its the relation relating to + the current hook context. + :type rid: str + :param unit: Unit to add ssh asserts for if none is supplied it is assumed + its the unit relating to the current hook context. + :type unit: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + relation_data = relation_get(rid=rid, unit=unit) + ssh_compute_add_host_and_key( + public_key, + relation_data.get('hostname'), + relation_data.get('private-address'), + application_name, + user=user) + + +def ssh_known_hosts_lines(application_name, user=None): + """Return contents of known_hosts file for given application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + known_hosts_list = [] + with open(known_hosts(application_name, user)) as hosts: + for hosts_line in hosts: + if hosts_line.rstrip(): + known_hosts_list.append(hosts_line.rstrip()) + return(known_hosts_list) + + +def ssh_authorized_keys_lines(application_name, user=None): + """Return contents of authorized_keys file for given application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + authorized_keys_list = [] + + with open(authorized_keys(application_name, user)) as keys: + for authkey_line in keys: + if authkey_line.rstrip(): + authorized_keys_list.append(authkey_line.rstrip()) + return(authorized_keys_list) + + +def ssh_compute_remove(public_key, application_name, user=None): + """Remove given public key from authorized_keys file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + if not (os.path.isfile(authorized_keys(application_name, user)) or + os.path.isfile(known_hosts(application_name, user))): + return + + keys = ssh_authorized_keys_lines(application_name, user=None) + keys = [k.strip() for k in keys] + + if public_key not in keys: + return + + [keys.remove(key) for key in keys if key == public_key] + + with open(authorized_keys(application_name, user), 'w') as _keys: + keys = '\n'.join(keys) + if not keys.endswith('\n'): + keys += '\n' + _keys.write(keys) + + +def get_ssh_settings(application_name, user=None): + """Retrieve the known host entries and public keys for application + + Retrieve the known host entries and public keys for application for all + units of the given application related to this application for the + app + user combination. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Public keys + host keys for all units for app + user combination. + :rtype: dict + """ + settings = {} + keys = {} + prefix = '' + if user: + prefix = '{}_'.format(user) + + for i, line in enumerate(ssh_known_hosts_lines( + application_name=application_name, user=user)): + settings['{}known_hosts_{}'.format(prefix, i)] = line + if settings: + settings['{}known_hosts_max_index'.format(prefix)] = len( + settings.keys()) + + for i, line in enumerate(ssh_authorized_keys_lines( + application_name=application_name, user=user)): + keys['{}authorized_keys_{}'.format(prefix, i)] = line + if keys: + keys['{}authorized_keys_max_index'.format(prefix)] = len(keys.keys()) + settings.update(keys) + return settings + + +def get_all_user_ssh_settings(application_name): + """Retrieve the known host entries and public keys for application + + Retrieve the known host entries and public keys for application for all + units of the given application related to this application for root user + and nova user. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :returns: Public keys + host keys for all units for app + user combination. + :rtype: dict + """ + settings = get_ssh_settings(application_name) + settings.update(get_ssh_settings(application_name, user='nova')) + return settings diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 0180e555..24f5b808 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -831,12 +831,25 @@ def _ows_check_if_paused(services=None, ports=None): """Check if the unit is supposed to be paused, and if so check that the services/ports (if passed) are actually stopped/not being listened to. - if the unit isn't supposed to be paused, just return None, None + If the unit isn't supposed to be paused, just return None, None + + If the unit is performing a series upgrade, return a message indicating + this. @param services: OPTIONAL services spec or list of service names. @param ports: OPTIONAL list of port numbers. @returns state, message or None, None """ + if is_unit_upgrading_set(): + state, message = check_actually_paused(services=services, + ports=ports) + if state is None: + # we're paused okay, so set maintenance and return + state = "blocked" + message = ("Ready for do-release-upgrade and reboot. " + "Set complete when finished.") + return state, message + if is_unit_paused_set(): state, message = check_actually_paused(services=services, ports=ports) @@ -1339,7 +1352,7 @@ def pause_unit(assess_status_func, services=None, ports=None, message = assess_status_func() if message: messages.append(message) - if messages: + if messages and not is_unit_upgrading_set(): raise Exception("Couldn't pause: {}".format("; ".join(messages))) @@ -1689,3 +1702,34 @@ def _ensure_flag(flag): snap_install(snap, _ensure_flag(snaps[snap]['channel']), _ensure_flag(snaps[snap]['mode'])) + + +def set_unit_upgrading(): + """Set the unit to a upgrading state in the local kv() store. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', True) + + +def clear_unit_upgrading(): + """Clear the unit from a upgrading state in the local kv() store + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', False) + + +def is_unit_upgrading_set(): + """Return the state of the kv().get('unit-upgrading'). + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-upgrading'))) + except Exception: + return False diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index ed7af39e..68800074 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -201,11 +201,35 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -def service_name(): - """The name service group this unit belongs to""" +def application_name(): + """ + The name of the deployed application this unit belongs to. + """ return local_unit().split('/')[0] +def service_name(): + """ + .. deprecated:: 0.19.1 + Alias for :func:`application_name`. + """ + return application_name() + + +def model_name(): + """ + Name of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_NAME'] + + +def model_uuid(): + """ + UUID of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_UUID'] + + def principal_unit(): """Returns the principal unit of this unit, otherwise None""" # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT @@ -1297,3 +1321,33 @@ def _to_range(addr): if 'private-address' in settings: return [_to_range(settings['private-address'])] return [] # Should never happen + + +def unit_doomed(unit=None): + """Determines if the unit is being removed from the model + + Requires Juju 2.4.1. + + :param unit: string unit name, defaults to local_unit + :side effect: calls goal_state + :side effect: calls local_unit + :side effect: calls has_juju_version + :return: True if the unit is being removed, already gone, or never existed + """ + if not has_juju_version("2.4.1"): + # We cannot risk blindly returning False for 'we don't know', + # because that could cause data loss; if call sites don't + # need an accurate answer, they likely don't need this helper + # at all. + # goal-state existed in 2.4.0, but did not handle removals + # correctly until 2.4.1. + raise NotImplementedError("is_doomed") + if unit is None: + unit = local_unit() + gs = goal_state() + units = gs.get('units', {}) + if unit not in units: + return True + # I don't think 'dead' units ever show up in the goal-state, but + # check anyway in addition to 'dying'. + return units[unit]['status'] in ('dying', 'dead') diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py index 8c75e98f..985e0d94 100644 --- a/ceph-osd/tests/basic_deployment.py +++ b/ceph-osd/tests/basic_deployment.py @@ -608,7 +608,7 @@ def test_412_ceph_glance_image_create_delete(self): # Delete ceph-backed glance image u.delete_resource(self.glance.images, - glance_img, msg="glance image") + glance_img.id, msg="glance image") # Final check, ceph glance pool object count and disk usage time.sleep(10) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py index ef4ab54b..936b4036 100644 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -24,7 +24,8 @@ import cinderclient.v1.client as cinder_client import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1.client as glance_client +import glanceclient.v1 as glance_client +import glanceclient.v2 as glance_clientv2 import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client from keystoneauth1.identity import ( @@ -623,7 +624,7 @@ def authenticate_glance_admin(self, keystone): ep = keystone.service_catalog.url_for(service_type='image', interface='adminURL') if keystone.session: - return glance_client.Client(ep, session=keystone.session) + return glance_clientv2.Client("2", session=keystone.session) else: return glance_client.Client(ep, token=keystone.auth_token) @@ -711,10 +712,19 @@ def create_cirros_image(self, glance, image_name): f.close() # Create glance image - with open(local_path) as f: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', data=f) + if float(glance.version) < 2.0: + with open(local_path) as fimage: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', + data=fimage) + else: + image = glance.images.create( + name=image_name, + disk_format="qcow2", + visibility="public", + container_format="bare") + glance.images.upload(image.id, open(local_path, 'rb')) # Wait for image to reach active status img_id = image.id @@ -729,9 +739,14 @@ def create_cirros_image(self, glance, image_name): self.log.debug('Validating image attributes...') val_img_name = glance.images.get(img_id).name val_img_stat = glance.images.get(img_id).status - val_img_pub = glance.images.get(img_id).is_public val_img_cfmt = glance.images.get(img_id).container_format val_img_dfmt = glance.images.get(img_id).disk_format + + if float(glance.version) < 2.0: + val_img_pub = glance.images.get(img_id).is_public + else: + val_img_pub = glance.images.get(img_id).visibility == "public" + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' 'container fmt:{} disk fmt:{}'.format( val_img_name, val_img_pub, img_id, @@ -998,6 +1013,9 @@ def get_ceph_pools(self, sentry_unit): cmd, code, output)) amulet.raise_status(amulet.FAIL, msg=msg) + # For mimic ceph osd lspools output + output = output.replace("\n", ",") + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, for pool in str(output).split(','): pool_id_name = pool.split(' ') diff --git a/ceph-osd/tests/charmhelpers/core/hookenv.py b/ceph-osd/tests/charmhelpers/core/hookenv.py index ed7af39e..68800074 100644 --- a/ceph-osd/tests/charmhelpers/core/hookenv.py +++ b/ceph-osd/tests/charmhelpers/core/hookenv.py @@ -201,11 +201,35 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -def service_name(): - """The name service group this unit belongs to""" +def application_name(): + """ + The name of the deployed application this unit belongs to. + """ return local_unit().split('/')[0] +def service_name(): + """ + .. deprecated:: 0.19.1 + Alias for :func:`application_name`. + """ + return application_name() + + +def model_name(): + """ + Name of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_NAME'] + + +def model_uuid(): + """ + UUID of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_UUID'] + + def principal_unit(): """Returns the principal unit of this unit, otherwise None""" # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT @@ -1297,3 +1321,33 @@ def _to_range(addr): if 'private-address' in settings: return [_to_range(settings['private-address'])] return [] # Should never happen + + +def unit_doomed(unit=None): + """Determines if the unit is being removed from the model + + Requires Juju 2.4.1. + + :param unit: string unit name, defaults to local_unit + :side effect: calls goal_state + :side effect: calls local_unit + :side effect: calls has_juju_version + :return: True if the unit is being removed, already gone, or never existed + """ + if not has_juju_version("2.4.1"): + # We cannot risk blindly returning False for 'we don't know', + # because that could cause data loss; if call sites don't + # need an accurate answer, they likely don't need this helper + # at all. + # goal-state existed in 2.4.0, but did not handle removals + # correctly until 2.4.1. + raise NotImplementedError("is_doomed") + if unit is None: + unit = local_unit() + gs = goal_state() + units = gs.get('units', {}) + if unit not in units: + return True + # I don't think 'dead' units ever show up in the goal-state, but + # check anyway in addition to 'dying'. + return units[unit]['status'] in ('dying', 'dead') diff --git a/ceph-osd/tests/dev-basic-bionic-rocky b/ceph-osd/tests/gate-basic-bionic-rocky similarity index 100% rename from ceph-osd/tests/dev-basic-bionic-rocky rename to ceph-osd/tests/gate-basic-bionic-rocky From 89b3371f531e23bb88031a91ddedd71431e433e4 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 27 Aug 2018 18:12:27 -0500 Subject: [PATCH 1552/2699] Update functional test definitions Change-Id: Ie136db9aa883d4e851fb723b779d79ff7ae6555e --- .../contrib/openstack/amulet/utils.py | 32 +- .../charmhelpers/contrib/openstack/context.py | 7 +- .../contrib/openstack/ssh_migrations.py | 412 ++++++++++++++++++ .../charmhelpers/contrib/openstack/utils.py | 48 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 58 ++- ceph-mon/tests/basic_deployment.py | 2 +- .../contrib/openstack/amulet/utils.py | 32 +- ceph-mon/tests/charmhelpers/core/hookenv.py | 58 ++- ...c-bionic-rocky => gate-basic-bionic-rocky} | 0 9 files changed, 625 insertions(+), 24 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/ssh_migrations.py rename ceph-mon/tests/{dev-basic-bionic-rocky => gate-basic-bionic-rocky} (100%) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index ef4ab54b..936b4036 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -24,7 +24,8 @@ import cinderclient.v1.client as cinder_client import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1.client as glance_client +import glanceclient.v1 as glance_client +import glanceclient.v2 as glance_clientv2 import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client from keystoneauth1.identity import ( @@ -623,7 +624,7 @@ def authenticate_glance_admin(self, keystone): ep = keystone.service_catalog.url_for(service_type='image', interface='adminURL') if keystone.session: - return glance_client.Client(ep, session=keystone.session) + return glance_clientv2.Client("2", session=keystone.session) else: return glance_client.Client(ep, token=keystone.auth_token) @@ -711,10 +712,19 @@ def create_cirros_image(self, glance, image_name): f.close() # Create glance image - with open(local_path) as f: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', data=f) + if float(glance.version) < 2.0: + with open(local_path) as fimage: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', + data=fimage) + else: + image = glance.images.create( + name=image_name, + disk_format="qcow2", + visibility="public", + container_format="bare") + glance.images.upload(image.id, open(local_path, 'rb')) # Wait for image to reach active status img_id = image.id @@ -729,9 +739,14 @@ def create_cirros_image(self, glance, image_name): self.log.debug('Validating image attributes...') val_img_name = glance.images.get(img_id).name val_img_stat = glance.images.get(img_id).status - val_img_pub = glance.images.get(img_id).is_public val_img_cfmt = glance.images.get(img_id).container_format val_img_dfmt = glance.images.get(img_id).disk_format + + if float(glance.version) < 2.0: + val_img_pub = glance.images.get(img_id).is_public + else: + val_img_pub = glance.images.get(img_id).visibility == "public" + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' 'container fmt:{} disk fmt:{}'.format( val_img_name, val_img_pub, img_id, @@ -998,6 +1013,9 @@ def get_ceph_pools(self, sentry_unit): cmd, code, output)) amulet.raise_status(amulet.FAIL, msg=msg) + # For mimic ceph osd lspools output + output = output.replace("\n", ",") + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, for pool in str(output).split(','): pool_id_name = pool.split(' ') diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index f3741b0e..ca913961 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -1389,11 +1389,12 @@ def __call__(self): class WSGIWorkerConfigContext(WorkerConfigContext): def __init__(self, name=None, script=None, admin_script=None, - public_script=None, process_weight=1.00, + public_script=None, user=None, group=None, + process_weight=1.00, admin_process_weight=0.25, public_process_weight=0.75): self.service_name = name - self.user = name - self.group = name + self.user = user or name + self.group = group or name self.script = script self.admin_script = admin_script self.public_script = public_script diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ssh_migrations.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ssh_migrations.py new file mode 100644 index 00000000..96b9f71d --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ssh_migrations.py @@ -0,0 +1,412 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess + +from charmhelpers.core.hookenv import ( + ERROR, + log, + relation_get, +) +from charmhelpers.contrib.network.ip import ( + is_ipv6, + ns_query, +) +from charmhelpers.contrib.openstack.utils import ( + get_hostname, + get_host_ip, + is_ip, +) + +NOVA_SSH_DIR = '/etc/nova/compute_ssh/' + + +def ssh_directory_for_unit(application_name, user=None): + """Return the directory used to store ssh assets for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified directory path. + :rtype: str + """ + if user: + application_name = "{}_{}".format(application_name, user) + _dir = os.path.join(NOVA_SSH_DIR, application_name) + for d in [NOVA_SSH_DIR, _dir]: + if not os.path.isdir(d): + os.mkdir(d) + for f in ['authorized_keys', 'known_hosts']: + f = os.path.join(_dir, f) + if not os.path.isfile(f): + open(f, 'w').close() + return _dir + + +def known_hosts(application_name, user=None): + """Return the known hosts file for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified path to file. + :rtype: str + """ + return os.path.join( + ssh_directory_for_unit(application_name, user), + 'known_hosts') + + +def authorized_keys(application_name, user=None): + """Return the authorized keys file for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified path to file. + :rtype: str + """ + return os.path.join( + ssh_directory_for_unit(application_name, user), + 'authorized_keys') + + +def ssh_known_host_key(host, application_name, user=None): + """Return the first entry in known_hosts for host. + + :param host: hostname to lookup in file. + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Host key + :rtype: str or None + """ + cmd = [ + 'ssh-keygen', + '-f', known_hosts(application_name, user), + '-H', + '-F', + host] + try: + # The first line of output is like '# Host xx found: line 1 type RSA', + # which should be excluded. + output = subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + # RC of 1 seems to be legitimate for most ssh-keygen -F calls. + if e.returncode == 1: + output = e.output + else: + raise + output = output.strip() + + if output: + # Bug #1500589 cmd has 0 rc on precise if entry not present + lines = output.split('\n') + if len(lines) >= 1: + return lines[0] + + return None + + +def remove_known_host(host, application_name, user=None): + """Remove the entry in known_hosts for host. + + :param host: hostname to lookup in file. + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + log('Removing SSH known host entry for compute host at %s' % host) + cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host] + subprocess.check_call(cmd) + + +def is_same_key(key_1, key_2): + """Extract the key from two host entries and compare them. + + :param key_1: Host key + :type key_1: str + :param key_2: Host key + :type key_2: str + """ + # The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp' + # 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare + # the part start with 'ssh-rsa' followed with '= ', because the hash + # value in the beginning will change each time. + k_1 = key_1.split('= ')[1] + k_2 = key_2.split('= ')[1] + return k_1 == k_2 + + +def add_known_host(host, application_name, user=None): + """Add the given host key to the known hosts file. + + :param host: host name + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] + try: + remote_key = subprocess.check_output(cmd).strip() + except Exception as e: + log('Could not obtain SSH host key from %s' % host, level=ERROR) + raise e + + current_key = ssh_known_host_key(host, application_name, user) + if current_key and remote_key: + if is_same_key(remote_key, current_key): + log('Known host key for compute host %s up to date.' % host) + return + else: + remove_known_host(host, application_name, user) + + log('Adding SSH host key to known hosts for compute node at %s.' % host) + with open(known_hosts(application_name, user), 'a') as out: + out.write("{}\n".format(remote_key)) + + +def ssh_authorized_key_exists(public_key, application_name, user=None): + """Check if given key is in the authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Whether given key is in the authorized_key file. + :rtype: boolean + """ + with open(authorized_keys(application_name, user)) as keys: + return ('%s' % public_key) in keys.read() + + +def add_authorized_key(public_key, application_name, user=None): + """Add given key to the authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + with open(authorized_keys(application_name, user), 'a') as keys: + keys.write("{}\n".format(public_key)) + + +def ssh_compute_add_host_and_key(public_key, hostname, private_address, + application_name, user=None): + """Add a compute nodes ssh details to local cache. + + Collect various hostname variations and add the corresponding host keys to + the local known hosts file. Finally, add the supplied public key to the + authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param hostname: Hostname to collect host keys from. + :type hostname: str + :param private_address:aCorresponding private address for hostname + :type private_address: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + # If remote compute node hands us a hostname, ensure we have a + # known hosts entry for its IP, hostname and FQDN. + hosts = [private_address] + + if not is_ipv6(private_address): + if hostname: + hosts.append(hostname) + + if is_ip(private_address): + hn = get_hostname(private_address) + if hn: + hosts.append(hn) + short = hn.split('.')[0] + if ns_query(short): + hosts.append(short) + else: + hosts.append(get_host_ip(private_address)) + short = private_address.split('.')[0] + if ns_query(short): + hosts.append(short) + + for host in list(set(hosts)): + add_known_host(host, application_name, user) + + if not ssh_authorized_key_exists(public_key, application_name, user): + log('Saving SSH authorized key for compute host at %s.' % + private_address) + add_authorized_key(public_key, application_name, user) + + +def ssh_compute_add(public_key, application_name, rid=None, unit=None, + user=None): + """Add a compute nodes ssh details to local cache. + + Collect various hostname variations and add the corresponding host keys to + the local known hosts file. Finally, add the supplied public key to the + authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param rid: Relation id of the relation between this charm and the app. If + none is supplied it is assumed its the relation relating to + the current hook context. + :type rid: str + :param unit: Unit to add ssh asserts for if none is supplied it is assumed + its the unit relating to the current hook context. + :type unit: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + relation_data = relation_get(rid=rid, unit=unit) + ssh_compute_add_host_and_key( + public_key, + relation_data.get('hostname'), + relation_data.get('private-address'), + application_name, + user=user) + + +def ssh_known_hosts_lines(application_name, user=None): + """Return contents of known_hosts file for given application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + known_hosts_list = [] + with open(known_hosts(application_name, user)) as hosts: + for hosts_line in hosts: + if hosts_line.rstrip(): + known_hosts_list.append(hosts_line.rstrip()) + return(known_hosts_list) + + +def ssh_authorized_keys_lines(application_name, user=None): + """Return contents of authorized_keys file for given application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + authorized_keys_list = [] + + with open(authorized_keys(application_name, user)) as keys: + for authkey_line in keys: + if authkey_line.rstrip(): + authorized_keys_list.append(authkey_line.rstrip()) + return(authorized_keys_list) + + +def ssh_compute_remove(public_key, application_name, user=None): + """Remove given public key from authorized_keys file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + if not (os.path.isfile(authorized_keys(application_name, user)) or + os.path.isfile(known_hosts(application_name, user))): + return + + keys = ssh_authorized_keys_lines(application_name, user=None) + keys = [k.strip() for k in keys] + + if public_key not in keys: + return + + [keys.remove(key) for key in keys if key == public_key] + + with open(authorized_keys(application_name, user), 'w') as _keys: + keys = '\n'.join(keys) + if not keys.endswith('\n'): + keys += '\n' + _keys.write(keys) + + +def get_ssh_settings(application_name, user=None): + """Retrieve the known host entries and public keys for application + + Retrieve the known host entries and public keys for application for all + units of the given application related to this application for the + app + user combination. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Public keys + host keys for all units for app + user combination. + :rtype: dict + """ + settings = {} + keys = {} + prefix = '' + if user: + prefix = '{}_'.format(user) + + for i, line in enumerate(ssh_known_hosts_lines( + application_name=application_name, user=user)): + settings['{}known_hosts_{}'.format(prefix, i)] = line + if settings: + settings['{}known_hosts_max_index'.format(prefix)] = len( + settings.keys()) + + for i, line in enumerate(ssh_authorized_keys_lines( + application_name=application_name, user=user)): + keys['{}authorized_keys_{}'.format(prefix, i)] = line + if keys: + keys['{}authorized_keys_max_index'.format(prefix)] = len(keys.keys()) + settings.update(keys) + return settings + + +def get_all_user_ssh_settings(application_name): + """Retrieve the known host entries and public keys for application + + Retrieve the known host entries and public keys for application for all + units of the given application related to this application for root user + and nova user. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :returns: Public keys + host keys for all units for app + user combination. + :rtype: dict + """ + settings = get_ssh_settings(application_name) + settings.update(get_ssh_settings(application_name, user='nova')) + return settings diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 0180e555..24f5b808 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -831,12 +831,25 @@ def _ows_check_if_paused(services=None, ports=None): """Check if the unit is supposed to be paused, and if so check that the services/ports (if passed) are actually stopped/not being listened to. - if the unit isn't supposed to be paused, just return None, None + If the unit isn't supposed to be paused, just return None, None + + If the unit is performing a series upgrade, return a message indicating + this. @param services: OPTIONAL services spec or list of service names. @param ports: OPTIONAL list of port numbers. @returns state, message or None, None """ + if is_unit_upgrading_set(): + state, message = check_actually_paused(services=services, + ports=ports) + if state is None: + # we're paused okay, so set maintenance and return + state = "blocked" + message = ("Ready for do-release-upgrade and reboot. " + "Set complete when finished.") + return state, message + if is_unit_paused_set(): state, message = check_actually_paused(services=services, ports=ports) @@ -1339,7 +1352,7 @@ def pause_unit(assess_status_func, services=None, ports=None, message = assess_status_func() if message: messages.append(message) - if messages: + if messages and not is_unit_upgrading_set(): raise Exception("Couldn't pause: {}".format("; ".join(messages))) @@ -1689,3 +1702,34 @@ def _ensure_flag(flag): snap_install(snap, _ensure_flag(snaps[snap]['channel']), _ensure_flag(snaps[snap]['mode'])) + + +def set_unit_upgrading(): + """Set the unit to a upgrading state in the local kv() store. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', True) + + +def clear_unit_upgrading(): + """Clear the unit from a upgrading state in the local kv() store + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', False) + + +def is_unit_upgrading_set(): + """Return the state of the kv().get('unit-upgrading'). + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-upgrading'))) + except Exception: + return False diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index ed7af39e..68800074 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -201,11 +201,35 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -def service_name(): - """The name service group this unit belongs to""" +def application_name(): + """ + The name of the deployed application this unit belongs to. + """ return local_unit().split('/')[0] +def service_name(): + """ + .. deprecated:: 0.19.1 + Alias for :func:`application_name`. + """ + return application_name() + + +def model_name(): + """ + Name of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_NAME'] + + +def model_uuid(): + """ + UUID of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_UUID'] + + def principal_unit(): """Returns the principal unit of this unit, otherwise None""" # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT @@ -1297,3 +1321,33 @@ def _to_range(addr): if 'private-address' in settings: return [_to_range(settings['private-address'])] return [] # Should never happen + + +def unit_doomed(unit=None): + """Determines if the unit is being removed from the model + + Requires Juju 2.4.1. + + :param unit: string unit name, defaults to local_unit + :side effect: calls goal_state + :side effect: calls local_unit + :side effect: calls has_juju_version + :return: True if the unit is being removed, already gone, or never existed + """ + if not has_juju_version("2.4.1"): + # We cannot risk blindly returning False for 'we don't know', + # because that could cause data loss; if call sites don't + # need an accurate answer, they likely don't need this helper + # at all. + # goal-state existed in 2.4.0, but did not handle removals + # correctly until 2.4.1. + raise NotImplementedError("is_doomed") + if unit is None: + unit = local_unit() + gs = goal_state() + units = gs.get('units', {}) + if unit not in units: + return True + # I don't think 'dead' units ever show up in the goal-state, but + # check anyway in addition to 'dying'. + return units[unit]['status'] in ('dying', 'dead') diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index cebb7a8d..8de30ef1 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -732,7 +732,7 @@ def test_412_ceph_glance_image_create_delete(self): # Delete ceph-backed glance image u.delete_resource(self.glance.images, - glance_img, msg="glance image") + glance_img.id, msg="glance image") # Final check, ceph glance pool object count and disk usage time.sleep(10) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py index ef4ab54b..936b4036 100644 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -24,7 +24,8 @@ import cinderclient.v1.client as cinder_client import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1.client as glance_client +import glanceclient.v1 as glance_client +import glanceclient.v2 as glance_clientv2 import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client from keystoneauth1.identity import ( @@ -623,7 +624,7 @@ def authenticate_glance_admin(self, keystone): ep = keystone.service_catalog.url_for(service_type='image', interface='adminURL') if keystone.session: - return glance_client.Client(ep, session=keystone.session) + return glance_clientv2.Client("2", session=keystone.session) else: return glance_client.Client(ep, token=keystone.auth_token) @@ -711,10 +712,19 @@ def create_cirros_image(self, glance, image_name): f.close() # Create glance image - with open(local_path) as f: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', data=f) + if float(glance.version) < 2.0: + with open(local_path) as fimage: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', + data=fimage) + else: + image = glance.images.create( + name=image_name, + disk_format="qcow2", + visibility="public", + container_format="bare") + glance.images.upload(image.id, open(local_path, 'rb')) # Wait for image to reach active status img_id = image.id @@ -729,9 +739,14 @@ def create_cirros_image(self, glance, image_name): self.log.debug('Validating image attributes...') val_img_name = glance.images.get(img_id).name val_img_stat = glance.images.get(img_id).status - val_img_pub = glance.images.get(img_id).is_public val_img_cfmt = glance.images.get(img_id).container_format val_img_dfmt = glance.images.get(img_id).disk_format + + if float(glance.version) < 2.0: + val_img_pub = glance.images.get(img_id).is_public + else: + val_img_pub = glance.images.get(img_id).visibility == "public" + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' 'container fmt:{} disk fmt:{}'.format( val_img_name, val_img_pub, img_id, @@ -998,6 +1013,9 @@ def get_ceph_pools(self, sentry_unit): cmd, code, output)) amulet.raise_status(amulet.FAIL, msg=msg) + # For mimic ceph osd lspools output + output = output.replace("\n", ",") + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, for pool in str(output).split(','): pool_id_name = pool.split(' ') diff --git a/ceph-mon/tests/charmhelpers/core/hookenv.py b/ceph-mon/tests/charmhelpers/core/hookenv.py index ed7af39e..68800074 100644 --- a/ceph-mon/tests/charmhelpers/core/hookenv.py +++ b/ceph-mon/tests/charmhelpers/core/hookenv.py @@ -201,11 +201,35 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -def service_name(): - """The name service group this unit belongs to""" +def application_name(): + """ + The name of the deployed application this unit belongs to. + """ return local_unit().split('/')[0] +def service_name(): + """ + .. deprecated:: 0.19.1 + Alias for :func:`application_name`. + """ + return application_name() + + +def model_name(): + """ + Name of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_NAME'] + + +def model_uuid(): + """ + UUID of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_UUID'] + + def principal_unit(): """Returns the principal unit of this unit, otherwise None""" # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT @@ -1297,3 +1321,33 @@ def _to_range(addr): if 'private-address' in settings: return [_to_range(settings['private-address'])] return [] # Should never happen + + +def unit_doomed(unit=None): + """Determines if the unit is being removed from the model + + Requires Juju 2.4.1. + + :param unit: string unit name, defaults to local_unit + :side effect: calls goal_state + :side effect: calls local_unit + :side effect: calls has_juju_version + :return: True if the unit is being removed, already gone, or never existed + """ + if not has_juju_version("2.4.1"): + # We cannot risk blindly returning False for 'we don't know', + # because that could cause data loss; if call sites don't + # need an accurate answer, they likely don't need this helper + # at all. + # goal-state existed in 2.4.0, but did not handle removals + # correctly until 2.4.1. + raise NotImplementedError("is_doomed") + if unit is None: + unit = local_unit() + gs = goal_state() + units = gs.get('units', {}) + if unit not in units: + return True + # I don't think 'dead' units ever show up in the goal-state, but + # check anyway in addition to 'dying'. + return units[unit]['status'] in ('dying', 'dead') diff --git a/ceph-mon/tests/dev-basic-bionic-rocky b/ceph-mon/tests/gate-basic-bionic-rocky similarity index 100% rename from ceph-mon/tests/dev-basic-bionic-rocky rename to ceph-mon/tests/gate-basic-bionic-rocky From 01c78414a7855ea4bc3ef7a8f9c7a57793630ab4 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 30 Aug 2018 08:48:43 +0100 Subject: [PATCH 1553/2699] Don't enable bluestore by default for Jewel The previous commit to enable bluestore by default incorrectly used the Jewel version series for the on by default check. Update this to specify Luminous (12.2.0). Change-Id: I8e69b171c67e7c0988c2b25f28fc5bac14de1c5e --- ceph-osd/hooks/ceph_hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 414ce1b0..1630d144 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -225,7 +225,7 @@ def use_bluestore(): :returns: whether bluestore disk format should be used :rtype: bool""" - if cmp_pkgrevno('ceph', '10.2.0') < 0: + if cmp_pkgrevno('ceph', '12.2.0') < 0: return False return config('bluestore') From 2a7781266ca7ddbb4ac21c2f2162546de2960eab Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 27 Aug 2018 18:12:10 -0500 Subject: [PATCH 1554/2699] Update functional test definitions Use ceph-mds@.service systemd configuration Use the upstream provided ceph-mds@ service systemd configuration in favour of the now removed ceph-mds service - this integrates with ceph-mds.target and is the preferred way to manage the daemon. Change-Id: I33b8a9c9fe1ab2f065897a5d4d3beb9dae249c57 --- ceph-fs/src/reactive/ceph_fs.py | 27 ++++++++++++------- ...c-bionic-rocky => gate-basic-bionic-rocky} | 0 2 files changed, 18 insertions(+), 9 deletions(-) rename ceph-fs/src/tests/{dev-basic-bionic-rocky => gate-basic-bionic-rocky} (100%) diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index e8a6b3ed..b41affe5 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -17,13 +17,16 @@ import subprocess from charms import reactive -from charms.reactive import when, when_not, set_state, is_state +from charms.reactive import when, when_not +from charms.reactive.flags import set_flag, clear_flag, is_flag_set from charmhelpers.core import hookenv from charmhelpers.core.hookenv import ( application_version_set, config, log, ERROR, cached, DEBUG, unit_get, network_get_primary_address, relation_ids, status_set) -from charmhelpers.core.host import service_restart +from charmhelpers.core.host import ( + service_restart, + service) from charmhelpers.contrib.network.ip import ( get_address_in_network, get_ipv6_addr) @@ -69,12 +72,14 @@ def install_cephfs(): @when('ceph-mds.pools.available') @when_not('cephfs.started') def setup_mds(relation): - try: - service_restart('ceph-mds') - set_state('cephfs.started') + service_name = 'ceph-mds@{}'.format(socket.gethostname()) + if service_restart(service_name): + set_flag('cephfs.started') + service('enable', service_name) application_version_set(get_upstream_version(VERSION_PACKAGE)) - except subprocess.CalledProcessError as err: - log(message='Error: {}'.format(err), level=ERROR) + else: + log(message='Error: restarting cpeh-mds', level=ERROR) + clear_flag('cephfs.started') @when('ceph-mds.available') @@ -119,6 +124,8 @@ def config_changed(ceph_client): ceph_conf.write(render_template('ceph.conf', ceph_context)) except IOError as err: log("IOError writing ceph.conf: {}".format(err)) + clear_flag('cephfs.configured') + return try: with open(cephx_key, 'w') as key_file: @@ -128,7 +135,9 @@ def config_changed(ceph_client): )) except IOError as err: log("IOError writing mds-a.keyring: {}".format(err)) - set_state('cephfs.configured') + clear_flag('cephfs.configured') + return + set_flag('cephfs.configured') def get_networks(config_opt='ceph-public-network'): @@ -205,7 +214,7 @@ def assess_status(): """Assess status of current unit""" statuses = set([]) messages = set([]) - if is_state('cephfs.started'): + if is_flag_set('cephfs.started'): (status, message) = log_mds() statuses.add(status) messages.add(message) diff --git a/ceph-fs/src/tests/dev-basic-bionic-rocky b/ceph-fs/src/tests/gate-basic-bionic-rocky similarity index 100% rename from ceph-fs/src/tests/dev-basic-bionic-rocky rename to ceph-fs/src/tests/gate-basic-bionic-rocky From 289ebd29ae7532999804ef296246d787716b4b41 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Tue, 11 Sep 2018 13:06:56 -0400 Subject: [PATCH 1555/2699] import zuul job settings from project-config This is a mechanically generated patch to complete step 1 of moving the zuul job settings out of project-config and into each project repository. Because there will be a separate patch on each branch, the branch specifiers for branch-specific jobs have been removed. Because this patch is generated by a script, there may be some cosmetic changes to the layout of the YAML file(s) as the contents are normalized. See the python3-first goal document for details: https://governance.openstack.org/tc/goals/stein/python3-first.html Change-Id: Id0fbfafd412f7428b84e620a9a0254db6dc4d1c6 Story: #2002586 Task: #24317 --- ceph-fs/.zuul.yaml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 ceph-fs/.zuul.yaml diff --git a/ceph-fs/.zuul.yaml b/ceph-fs/.zuul.yaml new file mode 100644 index 00000000..5e75d94d --- /dev/null +++ b/ceph-fs/.zuul.yaml @@ -0,0 +1,4 @@ +- project: + templates: + - python-charm-jobs + - openstack-python35-jobs From eaeed8cdef9c132fdbfb810033aef5728dae4c76 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Tue, 11 Sep 2018 13:07:08 -0400 Subject: [PATCH 1556/2699] import zuul job settings from project-config This is a mechanically generated patch to complete step 1 of moving the zuul job settings out of project-config and into each project repository. Because there will be a separate patch on each branch, the branch specifiers for branch-specific jobs have been removed. Because this patch is generated by a script, there may be some cosmetic changes to the layout of the YAML file(s) as the contents are normalized. See the python3-first goal document for details: https://governance.openstack.org/tc/goals/stein/python3-first.html Change-Id: Iac180889d86d0d336e3060561751781b18e81957 Story: #2002586 Task: #24317 --- ceph-mon/.zuul.yaml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 ceph-mon/.zuul.yaml diff --git a/ceph-mon/.zuul.yaml b/ceph-mon/.zuul.yaml new file mode 100644 index 00000000..7051aeeb --- /dev/null +++ b/ceph-mon/.zuul.yaml @@ -0,0 +1,3 @@ +- project: + templates: + - python35-charm-jobs From 9b5d18cc25666914fc7f790f6d0d722a09cdd743 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Tue, 11 Sep 2018 13:07:20 -0400 Subject: [PATCH 1557/2699] import zuul job settings from project-config This is a mechanically generated patch to complete step 1 of moving the zuul job settings out of project-config and into each project repository. Because there will be a separate patch on each branch, the branch specifiers for branch-specific jobs have been removed. Because this patch is generated by a script, there may be some cosmetic changes to the layout of the YAML file(s) as the contents are normalized. See the python3-first goal document for details: https://governance.openstack.org/tc/goals/stein/python3-first.html Change-Id: Ide35150cd3575c4053445c8bcd4f67e44ae5dbd7 Story: #2002586 Task: #24317 --- ceph-osd/.zuul.yaml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 ceph-osd/.zuul.yaml diff --git a/ceph-osd/.zuul.yaml b/ceph-osd/.zuul.yaml new file mode 100644 index 00000000..7051aeeb --- /dev/null +++ b/ceph-osd/.zuul.yaml @@ -0,0 +1,3 @@ +- project: + templates: + - python35-charm-jobs From 67714179bfc9a46970739c7347f597d7e1163f02 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Tue, 11 Sep 2018 13:07:32 -0400 Subject: [PATCH 1558/2699] import zuul job settings from project-config This is a mechanically generated patch to complete step 1 of moving the zuul job settings out of project-config and into each project repository. Because there will be a separate patch on each branch, the branch specifiers for branch-specific jobs have been removed. Because this patch is generated by a script, there may be some cosmetic changes to the layout of the YAML file(s) as the contents are normalized. See the python3-first goal document for details: https://governance.openstack.org/tc/goals/stein/python3-first.html Change-Id: Ia81142eaa122161aa583550c683f26a18dc5eb86 Story: #2002586 Task: #24317 --- ceph-proxy/.zuul.yaml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 ceph-proxy/.zuul.yaml diff --git a/ceph-proxy/.zuul.yaml b/ceph-proxy/.zuul.yaml new file mode 100644 index 00000000..aa9c508f --- /dev/null +++ b/ceph-proxy/.zuul.yaml @@ -0,0 +1,4 @@ +- project: + templates: + - python-charm-jobs + - openstack-python35-jobs-nonvoting From df4ad6acf5b756f9009c6cc25fc81d9224db42f2 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Tue, 11 Sep 2018 13:07:44 -0400 Subject: [PATCH 1559/2699] import zuul job settings from project-config This is a mechanically generated patch to complete step 1 of moving the zuul job settings out of project-config and into each project repository. Because there will be a separate patch on each branch, the branch specifiers for branch-specific jobs have been removed. Because this patch is generated by a script, there may be some cosmetic changes to the layout of the YAML file(s) as the contents are normalized. See the python3-first goal document for details: https://governance.openstack.org/tc/goals/stein/python3-first.html Change-Id: I855fc3fba891988b687c48f32e5e3badbaac59de Story: #2002586 Task: #24317 --- ceph-radosgw/.zuul.yaml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 ceph-radosgw/.zuul.yaml diff --git a/ceph-radosgw/.zuul.yaml b/ceph-radosgw/.zuul.yaml new file mode 100644 index 00000000..aa9c508f --- /dev/null +++ b/ceph-radosgw/.zuul.yaml @@ -0,0 +1,4 @@ +- project: + templates: + - python-charm-jobs + - openstack-python35-jobs-nonvoting From 18b812910dc79788d32015d33358484a4e1ea367 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 19 Sep 2018 13:23:51 +0200 Subject: [PATCH 1560/2699] Add cosmic Add a tactical change which is already merged into charm-helpers. This needs to go into all charms to solve the chicken:egg issue where cosmic is untestable until this change exists. Reference: https://github.com/juju/charm-helpers/commit/4835c6c167c429527ef0a0291d17cf559c9cf880 Change-Id: I05e49e7dea3688926b7f56ed4a3daddaebe7a2e7 --- ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py | 1 + 2 files changed, 2 insertions(+) diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py index 99451b59..a6d375af 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -21,6 +21,7 @@ 'zesty', 'artful', 'bionic', + 'cosmic', ) diff --git a/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py index 99451b59..a6d375af 100644 --- a/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py @@ -21,6 +21,7 @@ 'zesty', 'artful', 'bionic', + 'cosmic', ) From 39b177faf380090ac3ebd01cd9eb33e4b6e3a513 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 19 Sep 2018 13:24:09 +0200 Subject: [PATCH 1561/2699] Add cosmic Add a tactical change which is already merged into charm-helpers. This needs to go into all charms to solve the chicken:egg issue where cosmic is untestable until this change exists. Reference: https://github.com/juju/charm-helpers/commit/4835c6c167c429527ef0a0291d17cf559c9cf880 Change-Id: I0dfc8408eca5105828e71477cb736eceed8f7148 --- ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py | 1 + 2 files changed, 2 insertions(+) diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index 99451b59..a6d375af 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -21,6 +21,7 @@ 'zesty', 'artful', 'bionic', + 'cosmic', ) diff --git a/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py index 99451b59..a6d375af 100644 --- a/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py @@ -21,6 +21,7 @@ 'zesty', 'artful', 'bionic', + 'cosmic', ) From b855647ffe8968a30f4cecfb2a2d406e0e98df5c Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 19 Sep 2018 13:24:26 +0200 Subject: [PATCH 1562/2699] Add cosmic Add a tactical change which is already merged into charm-helpers. This needs to go into all charms to solve the chicken:egg issue where cosmic is untestable until this change exists. Reference: https://github.com/juju/charm-helpers/commit/4835c6c167c429527ef0a0291d17cf559c9cf880 Change-Id: I91df2da7be9521689372a2be5f6b7220fee449dd --- ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py | 1 + 2 files changed, 2 insertions(+) diff --git a/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py index 99451b59..a6d375af 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -21,6 +21,7 @@ 'zesty', 'artful', 'bionic', + 'cosmic', ) diff --git a/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py index 99451b59..a6d375af 100644 --- a/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py @@ -21,6 +21,7 @@ 'zesty', 'artful', 'bionic', + 'cosmic', ) From c5199e215b9ecc30b65dd067e70a28c67ce8b326 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 19 Sep 2018 13:24:44 +0200 Subject: [PATCH 1563/2699] Add cosmic Add a tactical change which is already merged into charm-helpers. This needs to go into all charms to solve the chicken:egg issue where cosmic is untestable until this change exists. Reference: https://github.com/juju/charm-helpers/commit/4835c6c167c429527ef0a0291d17cf559c9cf880 Change-Id: Iece1cb38018d4e3bffe09bb0af85287c8fef764f --- ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py | 1 + 2 files changed, 2 insertions(+) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index 99451b59..a6d375af 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -21,6 +21,7 @@ 'zesty', 'artful', 'bionic', + 'cosmic', ) diff --git a/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py index 99451b59..a6d375af 100644 --- a/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py @@ -21,6 +21,7 @@ 'zesty', 'artful', 'bionic', + 'cosmic', ) From d6c190bef4d02900eae1f129e3c232d393b41f16 Mon Sep 17 00:00:00 2001 From: David Ames Date: Thu, 20 Sep 2018 07:32:53 +0000 Subject: [PATCH 1564/2699] Series Upgrade Implement the series-upgrade feature allowing to move between Ubuntu series. Change-Id: I13770631becfefb71075a3b0080db2bffc2b268d --- .../contrib/openstack/amulet/utils.py | 3 ++ .../charmhelpers/contrib/openstack/context.py | 4 +++ .../charmhelpers/contrib/openstack/utils.py | 30 ++++++++++++++++++- .../hooks/charmhelpers/core/hookenv.py | 3 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + .../hooks/charmhelpers/fetch/__init__.py | 2 ++ .../hooks/charmhelpers/fetch/bzrurl.py | 4 +-- .../hooks/charmhelpers/fetch/giturl.py | 4 +-- .../hooks/charmhelpers/fetch/ubuntu.py | 20 +++++++++++++ ceph-radosgw/hooks/hooks.py | 24 +++++++++++++++ ceph-radosgw/hooks/post-series-upgrade | 1 + ceph-radosgw/hooks/pre-series-upgrade | 1 + .../tests/charmhelpers/core/hookenv.py | 3 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + 14 files changed, 94 insertions(+), 7 deletions(-) create mode 120000 ceph-radosgw/hooks/post-series-upgrade create mode 120000 ceph-radosgw/hooks/pre-series-upgrade diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 6637865d..936b4036 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -1013,6 +1013,9 @@ def get_ceph_pools(self, sentry_unit): cmd, code, output)) amulet.raise_status(amulet.FAIL, msg=msg) + # For mimic ceph osd lspools output + output = output.replace("\n", ",") + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, for pool in str(output).split(','): pool_id_name = pool.split(' ') diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index ca913961..3e4e82a7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -1519,6 +1519,10 @@ def __call__(self): 'rel_key': 'enable-qos', 'default': False, }, + 'enable_nsg_logging': { + 'rel_key': 'enable-nsg-logging', + 'default': False, + }, } ctxt = self.get_neutron_options({}) for rid in relation_ids('neutron-plugin-api'): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 24f5b808..ae48d6b4 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -186,7 +186,7 @@ ('queens', ['2.16.0', '2.17.0']), ('rocky', - ['2.18.0']), + ['2.18.0', '2.19.0']), ]) # >= Liberty version->codename mapping @@ -1733,3 +1733,31 @@ def is_unit_upgrading_set(): return not(not(kv.get('unit-upgrading'))) except Exception: return False + + +def series_upgrade_prepare(pause_unit_helper=None, configs=None): + """ Run common series upgrade prepare tasks. + + :param pause_unit_helper: function: Function to pause unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + set_unit_upgrading() + if pause_unit_helper and configs: + if not is_unit_paused_set(): + pause_unit_helper(configs) + + +def series_upgrade_complete(resume_unit_helper=None, configs=None): + """ Run common series upgrade complete tasks. + + :param resume_unit_helper: function: Function to resume unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + clear_unit_paused() + clear_unit_upgrading() + if configs: + configs.write_all() + if resume_unit_helper: + resume_unit_helper(configs) diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 68800074..9abf2a45 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -48,6 +48,7 @@ DEBUG = "DEBUG" TRACE = "TRACE" MARKER = object() +SH_MAX_ARG = 131071 cache = {} @@ -98,7 +99,7 @@ def log(message, level=None): command += ['-l', level] if not isinstance(message, six.string_types): message = repr(message) - command += [message] + command += [message[:SH_MAX_ARG]] # Missing juju-log should not cause failures in unit tests # Send log output to stderr try: diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index 99451b59..a6d375af 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -21,6 +21,7 @@ 'zesty', 'artful', 'bionic', + 'cosmic', ) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 480a6276..8572d34f 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -84,6 +84,7 @@ def base_url(self, url): fetch = importlib.import_module(module) filter_installed_packages = fetch.filter_installed_packages +filter_missing_packages = fetch.filter_missing_packages install = fetch.apt_install upgrade = fetch.apt_upgrade update = _fetch_update = fetch.apt_update @@ -96,6 +97,7 @@ def base_url(self, url): apt_update = fetch.apt_update apt_upgrade = fetch.apt_upgrade apt_purge = fetch.apt_purge + apt_autoremove = fetch.apt_autoremove apt_mark = fetch.apt_mark apt_hold = fetch.apt_hold apt_unhold = fetch.apt_unhold diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py index 07cd0293..c4ab3ff1 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/bzrurl.py @@ -13,7 +13,7 @@ # limitations under the License. import os -from subprocess import check_call +from subprocess import STDOUT, check_output from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, @@ -55,7 +55,7 @@ def branch(self, source, dest, revno=None): cmd = ['bzr', 'branch'] cmd += cmd_opts cmd += [source, dest] - check_call(cmd) + check_output(cmd, stderr=STDOUT) def install(self, source, dest=None, revno=None): url_parts = self.parse_url(source) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py index 4cf21bc2..070ca9bb 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/giturl.py @@ -13,7 +13,7 @@ # limitations under the License. import os -from subprocess import check_call, CalledProcessError +from subprocess import check_output, CalledProcessError, STDOUT from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, @@ -50,7 +50,7 @@ def clone(self, source, dest, branch="master", depth=None): cmd = ['git', 'clone', source, dest, '--branch', branch] if depth: cmd.extend(['--depth', depth]) - check_call(cmd) + check_output(cmd, stderr=STDOUT) def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 19aa6baf..ec08cbc2 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -189,6 +189,18 @@ def filter_installed_packages(packages): return _pkgs +def filter_missing_packages(packages): + """Return a list of packages that are installed. + + :param packages: list of packages to evaluate. + :returns list: Packages that are installed. + """ + return list( + set(packages) - + set(filter_installed_packages(packages)) + ) + + def apt_cache(in_memory=True, progress=None): """Build and return an apt cache.""" from apt import apt_pkg @@ -248,6 +260,14 @@ def apt_purge(packages, fatal=False): _run_apt_command(cmd, fatal) +def apt_autoremove(purge=True, fatal=False): + """Purge one or more packages.""" + cmd = ['apt-get', '--assume-yes', 'autoremove'] + if purge: + cmd.append('--purge') + _run_apt_command(cmd, fatal) + + def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark.""" log("Marking {} as {}".format(packages, mark)) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index ea1fdaec..4f68459f 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -64,6 +64,8 @@ from charmhelpers.contrib.openstack.utils import ( is_unit_paused_set, pausable_restart_on_change as restart_on_change, + series_upgrade_prepare, + series_upgrade_complete, ) from charmhelpers.contrib.hahelpers.cluster import ( get_hacluster_config, @@ -80,6 +82,8 @@ assess_status, setup_keystone_certs, disable_unused_apache_sites, + pause_unit_helper, + resume_unit_helper, ) from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden @@ -138,6 +142,12 @@ def install(): '/etc/haproxy/haproxy.cfg': ['haproxy']}) @harden() def config_changed(): + # if we are paused, delay doing any config changed hooks. + # It is forced on the resume. + if is_unit_paused_set(): + log("Unit is pause or upgrading. Skipping config_changed", "WARN") + return + install_packages() disable_unused_apache_sites() @@ -383,6 +393,20 @@ def update_status(): log('Updating status.') +@hooks.hook('pre-series-upgrade') +def pre_series_upgrade(): + log("Running prepare series upgrade hook", "INFO") + series_upgrade_prepare( + pause_unit_helper, CONFIGS) + + +@hooks.hook('post-series-upgrade') +def post_series_upgrade(): + log("Running complete series upgrade hook", "INFO") + series_upgrade_complete( + resume_unit_helper, CONFIGS) + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-radosgw/hooks/post-series-upgrade b/ceph-radosgw/hooks/post-series-upgrade new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/post-series-upgrade @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/pre-series-upgrade b/ceph-radosgw/hooks/pre-series-upgrade new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/pre-series-upgrade @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/tests/charmhelpers/core/hookenv.py b/ceph-radosgw/tests/charmhelpers/core/hookenv.py index 68800074..9abf2a45 100644 --- a/ceph-radosgw/tests/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/tests/charmhelpers/core/hookenv.py @@ -48,6 +48,7 @@ DEBUG = "DEBUG" TRACE = "TRACE" MARKER = object() +SH_MAX_ARG = 131071 cache = {} @@ -98,7 +99,7 @@ def log(message, level=None): command += ['-l', level] if not isinstance(message, six.string_types): message = repr(message) - command += [message] + command += [message[:SH_MAX_ARG]] # Missing juju-log should not cause failures in unit tests # Send log output to stderr try: diff --git a/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py index 99451b59..a6d375af 100644 --- a/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py @@ -21,6 +21,7 @@ 'zesty', 'artful', 'bionic', + 'cosmic', ) From db0232cfda3975684e5d9026b923288d4a5ec624 Mon Sep 17 00:00:00 2001 From: David Ames Date: Thu, 20 Sep 2018 11:51:09 +0000 Subject: [PATCH 1565/2699] Series Upgrade Implement the series-upgrade feature allowing to move between Ubuntu series. Change-Id: I174c30e0de2c9753742262a31b73651b7a4ed3da --- ceph-osd/actions/pause_resume.py | 2 +- ceph-osd/hooks/ceph_hooks.py | 36 +++++++++++++++++++++++++++- ceph-osd/hooks/post-series-upgrade | 1 + ceph-osd/hooks/pre-series-upgrade | 1 + ceph-osd/hooks/utils.py | 38 ------------------------------ 5 files changed, 38 insertions(+), 40 deletions(-) create mode 120000 ceph-osd/hooks/post-series-upgrade create mode 120000 ceph-osd/hooks/pre-series-upgrade diff --git a/ceph-osd/actions/pause_resume.py b/ceph-osd/actions/pause_resume.py index c8d5778b..1c19d5b1 100755 --- a/ceph-osd/actions/pause_resume.py +++ b/ceph-osd/actions/pause_resume.py @@ -30,7 +30,7 @@ from ceph.utils import get_local_osd_ids from ceph_hooks import assess_status -from utils import ( +from charmhelpers.contrib.openstack.utils import ( set_unit_paused, clear_unit_paused, ) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 1630d144..7494c3cd 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -73,7 +73,6 @@ get_networks, assert_charm_supports_ipv6, render_template, - is_unit_paused_set, get_public_addr, get_cluster_addr, get_blacklist, @@ -94,6 +93,15 @@ from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden +from charmhelpers.contrib.openstack.utils import ( + clear_unit_paused, + clear_unit_upgrading, + is_unit_paused_set, + is_unit_upgrading_set, + set_unit_paused, + set_unit_upgrading, +) + from charmhelpers.core.unitdata import kv import charmhelpers.contrib.openstack.vaultlocker as vaultlocker @@ -655,6 +663,11 @@ def assess_status(): """Assess status of current unit""" # check to see if the unit is paused. application_version_set(get_upstream_version(VERSION_PACKAGE)) + if is_unit_upgrading_set(): + status_set("blocked", + "Ready for do-release-upgrade and reboot. " + "Set complete when finished.") + return if is_unit_paused_set(): status_set('maintenance', "Paused. Use 'resume' action to resume normal service.") @@ -699,6 +712,27 @@ def update_status(): log('Updating status.') +@hooks.hook('pre-series-upgrade') +def pre_series_upgrade(): + log("Running prepare series upgrade hook", "INFO") + # NOTE: The Ceph packages handle the series upgrade gracefully. + # In order to indicate the step of the series upgrade process for + # administrators and automated scripts, the charm sets the paused and + # upgrading states. + set_unit_paused() + set_unit_upgrading() + + +@hooks.hook('post-series-upgrade') +def post_series_upgrade(): + log("Running complete series upgrade hook", "INFO") + # In order to indicate the step of the series upgrade process for + # administrators and automated scripts, the charm clears the paused and + # upgrading states. + clear_unit_paused() + clear_unit_upgrading() + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-osd/hooks/post-series-upgrade b/ceph-osd/hooks/post-series-upgrade new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-osd/hooks/post-series-upgrade @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/pre-series-upgrade b/ceph-osd/hooks/pre-series-upgrade new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-osd/hooks/pre-series-upgrade @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index a2fffd10..b773e2d1 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -176,44 +176,6 @@ def assert_charm_supports_ipv6(): "versions less than Trusty 14.04") -# copied charmhelpers.contrib.openstack.utils so that the charm does need the -# entire set of dependencies that that module actually also has to bring in -# from charmhelpers. -def set_unit_paused(): - """Set the unit to a paused state in the local kv() store. - This does NOT actually pause the unit - """ - with unitdata.HookData()() as t: - kv = t[0] - kv.set('unit-paused', True) - - -def clear_unit_paused(): - """Clear the unit from a paused state in the local kv() store - This does NOT actually restart any services - it only clears the - local state. - """ - with unitdata.HookData()() as t: - kv = t[0] - kv.set('unit-paused', False) - - -def is_unit_paused_set(): - """Return the state of the kv().get('unit-paused'). - This does NOT verify that the unit really is paused. - - To help with units that don't have HookData() (testing) - if it excepts, return False - """ - try: - with unitdata.HookData()() as t: - kv = t[0] - # transform something truth-y into a Boolean. - return not(not(kv.get('unit-paused'))) - except: - return False - - def get_blacklist(): """Get blacklist stored in the local kv() store""" db = unitdata.kv() From 46248f939f90d83fa34c58a3a9268ed40bb3318e Mon Sep 17 00:00:00 2001 From: David Ames Date: Thu, 20 Sep 2018 12:00:03 +0000 Subject: [PATCH 1566/2699] Series Upgrade Implement the series-upgrade feature allowing to move between Ubuntu series. Change-Id: Ib0a599eeae75fd2e1948d4c86515da4b77340131 --- ceph-mon/hooks/ceph_hooks.py | 33 ++++++++++++++++++++++++++++++ ceph-mon/hooks/post-series-upgrade | 1 + ceph-mon/hooks/pre-series-upgrade | 1 + 3 files changed, 35 insertions(+) create mode 120000 ceph-mon/hooks/post-series-upgrade create mode 120000 ceph-mon/hooks/pre-series-upgrade diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index e192d846..6365ff78 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -61,6 +61,13 @@ ) from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.alternatives import install_alternative +from charmhelpers.contrib.openstack.utils import ( + clear_unit_paused, + clear_unit_upgrading, + is_unit_upgrading_set, + set_unit_paused, + set_unit_upgrading, +) from charmhelpers.contrib.network.ip import ( get_ipv6_addr, format_ipv6_addr, @@ -720,6 +727,11 @@ def update_nrpe_config(): def assess_status(): '''Assess status of current unit''' application_version_set(get_upstream_version(VERSION_PACKAGE)) + if is_unit_upgrading_set(): + status_set("blocked", + "Ready for do-release-upgrade and reboot. " + "Set complete when finished.") + return # Check that the no-bootstrap config option is set in conjunction with # having the bootstrap-source relation established @@ -760,6 +772,27 @@ def update_status(): log('Updating status.') +@hooks.hook('pre-series-upgrade') +def pre_series_upgrade(): + log("Running prepare series upgrade hook", "INFO") + # NOTE: The Ceph packages handle the series upgrade gracefully. + # In order to indicate the step of the series upgrade process for + # administrators and automated scripts, the charm sets the paused and + # upgrading states. + set_unit_paused() + set_unit_upgrading() + + +@hooks.hook('post-series-upgrade') +def post_series_upgrade(): + log("Running complete series upgrade hook", "INFO") + # In order to indicate the step of the series upgrade process for + # administrators and automated scripts, the charm clears the paused and + # upgrading states. + clear_unit_paused() + clear_unit_upgrading() + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-mon/hooks/post-series-upgrade b/ceph-mon/hooks/post-series-upgrade new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/post-series-upgrade @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/pre-series-upgrade b/ceph-mon/hooks/pre-series-upgrade new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/pre-series-upgrade @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file From eace94d140db79df2fcd6ad0e0369a50387bfe4b Mon Sep 17 00:00:00 2001 From: Dmitrii Shcherbakov Date: Thu, 20 Sep 2018 04:35:29 +0300 Subject: [PATCH 1567/2699] modify default_caps to match current ceph-mon caps The current default caps in ceph-proxy are not up to date with charm-ceph-mon caps. Change-Id: Iaeb8d4dea9c36f522aeaddf54b19e4947c81a559 Closes-Bug: #1794071 --- ceph-proxy/hooks/ceph.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 44b28497..f352f53f 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -11,6 +11,7 @@ import os import re import sys +import collections from charmhelpers.contrib.storage.linux.utils import ( is_block_device, @@ -343,10 +344,11 @@ def get_radosgw_key(): return get_named_key('radosgw.gateway', _radosgw_caps) -_default_caps = { - 'mon': ['allow rw'], - 'osd': ['allow rwx'] -} +_default_caps = collections.OrderedDict([ + ('mon', ['allow r', + 'allow command "osd blacklist"']), + ('osd', ['allow rwx']), +]) admin_caps = { 'mds': ['allow'], From 05dce7ca6113909d6b3d1e7a8fff10d54855e86a Mon Sep 17 00:00:00 2001 From: Dmitrii Shcherbakov Date: Thu, 20 Sep 2018 04:31:15 +0300 Subject: [PATCH 1568/2699] support custom admin user and user auth In order to support cases where pools and keys are pre-created and ceph-proxy just proxies this data to client applications this change introduces support for: * having custom "admin" users which may not actually have admin privileges on the target cluster (client.admin is probably occupied by real admins in this case); * using cephx keys provided via charm config. Change-Id: I01014b6986f92bf0ad8147a08afa1d61fdd5c088 Closes-bug: #1793991 --- ceph-proxy/config.yaml | 15 +++ ceph-proxy/hooks/ceph.py | 19 +++- ceph-proxy/hooks/ceph_hooks.py | 11 ++- ...ceph.client.admin.keyring => ceph.keyring} | 2 +- ceph-proxy/templates/mon.keyring | 2 +- ceph-proxy/unit_tests/test_ceph.py | 95 +++++++++++++++++++ ceph-proxy/unit_tests/test_ceph_hooks.py | 14 ++- 7 files changed, 148 insertions(+), 10 deletions(-) rename ceph-proxy/templates/{ceph.client.admin.keyring => ceph.keyring} (53%) create mode 100644 ceph-proxy/unit_tests/test_ceph.py diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index d8780fe9..533dac62 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -59,3 +59,18 @@ options: . Valid options are "cephx" and "none". If "none" is specified, keys will still be created and deployed so that it can be enabled later. + user-keys: + type: string + default: "" + description: | + A space-separated list of : pairs used to + lookup authentication keys for a specific user instead of trying to + create a user and a key via ceph-mon. + admin-user: + type: string + default: "client.admin" + description: | + A configurable admin user name. Used for scenarios where pools are + pre-created and the user given to charm-ceph-proxy simply needs to + check the existence of a given pool and error out if one does not + exist. Can be used in conjunction with user-keys. diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 44b28497..0295ecec 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -31,6 +31,7 @@ cached, status_set, WARNING, + config, ) from charmhelpers.fetch import ( apt_cache @@ -369,14 +370,30 @@ def get_upgrade_key(): return get_named_key('upgrade-osd', _upgrade_caps) +def _config_user_key(name): + user_keys_list = config('user-keys') + if user_keys_list: + for ukpair in user_keys_list.split(' '): + uk = ukpair.split(':') + if len(uk) == 2: + user_type, k = uk + t, u = user_type.split('.') + if u == name: + return k + + def get_named_key(name, caps=None): + config_user_key = _config_user_key(name) + if config_user_key: + return config_user_key + caps = caps or _default_caps cmd = [ "sudo", "-u", ceph_user(), 'ceph', - '--name', 'client.admin', + '--name', config('admin-user'), '--keyring', '/var/lib/ceph/mon/ceph-{}/keyring'.format( get_unit_hostname() diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 960eeba2..21a9d98e 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -86,11 +86,16 @@ def emit_cephconf(): render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100) - keyring = 'ceph.client.admin.keyring' + + keyring_template = 'ceph.keyring' + keyring = 'ceph.{}.keyring'.format(config('admin-user')) keyring_path = '/etc/ceph/' + keyring - ctx = {'admin_key': config('admin-key')} + ctx = { + 'admin_key': config('admin-key'), + 'admin_user': config('admin-user'), + } user = ceph.ceph_user() - render(keyring, keyring_path, ctx, owner=user, perms=0o600) + render(keyring_template, keyring_path, ctx, owner=user, perms=0o600) keyring = 'keyring' keyring_path = ( diff --git a/ceph-proxy/templates/ceph.client.admin.keyring b/ceph-proxy/templates/ceph.keyring similarity index 53% rename from ceph-proxy/templates/ceph.client.admin.keyring rename to ceph-proxy/templates/ceph.keyring index ce0a4da8..30832f94 100644 --- a/ceph-proxy/templates/ceph.client.admin.keyring +++ b/ceph-proxy/templates/ceph.keyring @@ -1,3 +1,3 @@ -[client.admin] +[{{ admin_user }}] key = {{admin_key}} diff --git a/ceph-proxy/templates/mon.keyring b/ceph-proxy/templates/mon.keyring index 567c2ead..b8aa5bc4 100644 --- a/ceph-proxy/templates/mon.keyring +++ b/ceph-proxy/templates/mon.keyring @@ -1,3 +1,3 @@ -[client.admin] +[{{ admin_user }}] key = {{admin_key}} diff --git a/ceph-proxy/unit_tests/test_ceph.py b/ceph-proxy/unit_tests/test_ceph.py new file mode 100644 index 00000000..9ed36c00 --- /dev/null +++ b/ceph-proxy/unit_tests/test_ceph.py @@ -0,0 +1,95 @@ +import unittest + +import mock + +import ceph + + +class CephTestCase(unittest.TestCase): + def setUp(self): + super(CephTestCase, self).setUp() + + @staticmethod + def populated_config_side_effect(key): + return { + 'user-keys': + 'client.cinder-ceph:AQAij2tbMNjMOhAAqInpXQLFrltDgmYid6KXbg== ' + 'client.glance:AQCnjmtbuEACMxAA7joUmgLIGI4/3LKkPzUy8g== ' + 'client.gnocchi:AQDk7qJb0csAFRAAQqPU6HchVW3PT6ymgXdI/A== ' + 'client.nova-compute-kvm:' + 'AQBkjmtb1hWxLxAA3UhxSblgFSCtHVoZ8W6rNQ== ' + 'client.radosgw.gateway:' + 'AQBljmtb65mrHhAAGy9VRkfsatWVLb9EpoWDfw==', + 'admin-user': 'client.myadmin' + }[key] + + @staticmethod + def empty_config_side_effect(key): + return { + 'user-keys': '', + 'admin-user': 'client.myadmin' + }[key] + + @mock.patch('ceph.config') + def test_config_user_key_populated(self, mock_config): + user_name = 'glance' + user_key = 'AQCnjmtbuEACMxAA7joUmgLIGI4/3LKkPzUy8g==' + + mock_config.side_effect = self.populated_config_side_effect + named_key = ceph._config_user_key(user_name) + self.assertEqual(user_key, named_key) + + @mock.patch('ceph.config') + def test_config_empty_user_key(self, mock_config): + user_name = 'cinder-ceph' + + mock_config.side_effect = self.empty_config_side_effect + named_key = ceph._config_user_key(user_name) + self.assertEqual(named_key, None) + + @mock.patch('ceph.config') + def test_get_named_key_populated(self, mock_config): + user_name = 'glance' + user_key = 'AQCnjmtbuEACMxAA7joUmgLIGI4/3LKkPzUy8g==' + + mock_config.side_effect = self.populated_config_side_effect + named_key = ceph.get_named_key(user_name) + + self.assertEqual(user_key, named_key) + + @mock.patch('subprocess.check_output') + @mock.patch('ceph.get_unit_hostname') + @mock.patch('ceph.ceph_user') + @mock.patch('ceph.config') + def test_get_named_key_empty(self, mock_config, mock_ceph_user, + mock_get_unit_hostname, mock_check_output): + user_name = 'cinder-ceph' + user_type = 'client' + admin_user = 'client.myadmin' + user_spec = '{}.{}'.format(user_type, user_name) + expected_key = 'AQCnjmtbuEACMxAA7joUmgLIGI4/3LKkPzUy8g==' + expected_output = ('[client.testuser]\n key = {}' + .format(expected_key)) + caps = { + 'mon': ['allow rw'], + 'osd': ['allow rwx'] + } + ceph_user = 'ceph' + ceph_proxy_host = 'cephproxy' + mock_get_unit_hostname.return_value = ceph_proxy_host + + def check_output_side_effect(cmd): + return { + ' '.join(['sudo', '-u', ceph_user, 'ceph', '--name', + admin_user, + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + ceph_proxy_host), + 'auth', 'get-or-create', user_spec, 'mon', + 'allow rw', 'osd', 'allow rwx']): expected_output + }[' '.join(cmd)] + mock_check_output.side_effect = check_output_side_effect + mock_config.side_effect = self.empty_config_side_effect + mock_ceph_user.return_value = ceph_user + named_key = ceph.get_named_key(user_name, caps) + self.assertEqual(named_key, expected_key) diff --git a/ceph-proxy/unit_tests/test_ceph_hooks.py b/ceph-proxy/unit_tests/test_ceph_hooks.py index 802fce98..4f655962 100644 --- a/ceph-proxy/unit_tests/test_ceph_hooks.py +++ b/ceph-proxy/unit_tests/test_ceph_hooks.py @@ -76,6 +76,7 @@ def test_emit_cephconf(self, mock_client_rel, mock_rgw_rel, self.test_config.set('monitor-hosts', '127.0.0.1:1234') self.test_config.set('fsid', 'abc123') self.test_config.set('admin-key', 'key123') + self.test_config.set('admin-user', 'client.myadmin') def c(k): x = {'radosgw': ['rados:1'], @@ -105,10 +106,15 @@ def c(k): '/etc/ceph/ceph.conf', '%s/ceph.conf' % dirname, 100) - keyring = 'ceph.client.admin.keyring' - context = {'admin_key': self.test_config.get('admin-key')} - self.render.assert_any_call(keyring, - '/etc/ceph/' + keyring, + keyring_template = 'ceph.keyring' + keyring_name = 'ceph.{}.keyring'.format( + self.test_config.get('admin-user')) + context = { + 'admin_key': self.test_config.get('admin-key'), + 'admin_user': self.test_config.get('admin-user'), + } + self.render.assert_any_call(keyring_template, + '/etc/ceph/' + keyring_name, context, owner='ceph-user', perms=0o600) mock_rgw_rel.assert_called_with(relid='rados:1', unit='rados/1') From 1307c4c91543e00960165b57e4364fa41f5dedeb Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Wed, 26 Sep 2018 18:37:27 -0400 Subject: [PATCH 1569/2699] fix tox python3 overrides We want to default to running all tox environments under python 3, so set the basepython value in each environment. We do not want to specify a minor version number, because we do not want to have to update the file every time we upgrade python. We do not want to set the override once in testenv, because that breaks the more specific versions used in default environments like py35 and py36. Change-Id: Ic34183ed3ea23c8151909da52498c3f6b44c5c2a Signed-off-by: Doug Hellmann --- ceph-fs/tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 3ba2b233..cf5744da 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -48,6 +48,7 @@ deps = -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} src unit_tests [testenv:venv] +basepython = python3 commands = {posargs} [flake8] From 588eccce879d49f36018e755346ef57553bca013 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Wed, 26 Sep 2018 18:37:31 -0400 Subject: [PATCH 1570/2699] fix tox python3 overrides We want to default to running all tox environments under python 3, so set the basepython value in each environment. We do not want to specify a minor version number, because we do not want to have to update the file every time we upgrade python. We do not want to set the override once in testenv, because that breaks the more specific versions used in default environments like py35 and py36. Change-Id: Ia6790f1f07eca936ec8a3d7b9403ad439275eb25 Signed-off-by: Doug Hellmann --- ceph-mon/tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index e0c533ed..fa749815 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -37,13 +37,14 @@ deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt [testenv:pep8] -basepython = python2.7 +basepython = python3 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} hooks unit_tests tests actions lib charm-proof [testenv:venv] +basepython = python3 commands = {posargs} [testenv:func27-noop] From 9d23fabf9d7f5f186987593e3449cb4e99c25e37 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 3 Oct 2018 09:35:36 -0500 Subject: [PATCH 1571/2699] Update requirements Change-Id: Ia6247cc24aeb906b8f76cc9bf553e60a73952e0b --- ceph-mon/requirements.txt | 2 - ceph-mon/test-requirements.txt | 10 +- ceph-mon/tests/charmhelpers/__init__.py | 97 -- .../tests/charmhelpers/contrib/__init__.py | 13 - .../charmhelpers/contrib/amulet/__init__.py | 13 - .../charmhelpers/contrib/amulet/deployment.py | 99 -- .../charmhelpers/contrib/amulet/utils.py | 821 --------- .../contrib/openstack/__init__.py | 13 - .../contrib/openstack/amulet/__init__.py | 13 - .../contrib/openstack/amulet/deployment.py | 357 ---- .../contrib/openstack/amulet/utils.py | 1533 ----------------- ceph-mon/tests/charmhelpers/core/__init__.py | 13 - .../tests/charmhelpers/core/decorators.py | 55 - ceph-mon/tests/charmhelpers/core/files.py | 43 - ceph-mon/tests/charmhelpers/core/fstab.py | 132 -- ceph-mon/tests/charmhelpers/core/hookenv.py | 1353 --------------- ceph-mon/tests/charmhelpers/core/host.py | 1042 ----------- .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 72 - .../charmhelpers/core/host_factory/ubuntu.py | 91 - ceph-mon/tests/charmhelpers/core/hugepage.py | 69 - ceph-mon/tests/charmhelpers/core/kernel.py | 72 - .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 - .../core/kernel_factory/ubuntu.py | 13 - .../charmhelpers/core/services/__init__.py | 16 - .../tests/charmhelpers/core/services/base.py | 362 ---- .../charmhelpers/core/services/helpers.py | 290 ---- ceph-mon/tests/charmhelpers/core/strutils.py | 129 -- ceph-mon/tests/charmhelpers/core/sysctl.py | 58 - .../tests/charmhelpers/core/templating.py | 93 - ceph-mon/tests/charmhelpers/core/unitdata.py | 525 ------ ceph-mon/tests/charmhelpers/osplatform.py | 25 - 33 files changed, 6 insertions(+), 7435 deletions(-) delete mode 100644 ceph-mon/tests/charmhelpers/__init__.py delete mode 100644 ceph-mon/tests/charmhelpers/contrib/__init__.py delete mode 100644 ceph-mon/tests/charmhelpers/contrib/amulet/__init__.py delete mode 100644 ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py delete mode 100644 ceph-mon/tests/charmhelpers/contrib/amulet/utils.py delete mode 100644 ceph-mon/tests/charmhelpers/contrib/openstack/__init__.py delete mode 100644 ceph-mon/tests/charmhelpers/contrib/openstack/amulet/__init__.py delete mode 100644 ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py delete mode 100644 ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py delete mode 100644 ceph-mon/tests/charmhelpers/core/__init__.py delete mode 100644 ceph-mon/tests/charmhelpers/core/decorators.py delete mode 100644 ceph-mon/tests/charmhelpers/core/files.py delete mode 100644 ceph-mon/tests/charmhelpers/core/fstab.py delete mode 100644 ceph-mon/tests/charmhelpers/core/hookenv.py delete mode 100644 ceph-mon/tests/charmhelpers/core/host.py delete mode 100644 ceph-mon/tests/charmhelpers/core/host_factory/__init__.py delete mode 100644 ceph-mon/tests/charmhelpers/core/host_factory/centos.py delete mode 100644 ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py delete mode 100644 ceph-mon/tests/charmhelpers/core/hugepage.py delete mode 100644 ceph-mon/tests/charmhelpers/core/kernel.py delete mode 100644 ceph-mon/tests/charmhelpers/core/kernel_factory/__init__.py delete mode 100644 ceph-mon/tests/charmhelpers/core/kernel_factory/centos.py delete mode 100644 ceph-mon/tests/charmhelpers/core/kernel_factory/ubuntu.py delete mode 100644 ceph-mon/tests/charmhelpers/core/services/__init__.py delete mode 100644 ceph-mon/tests/charmhelpers/core/services/base.py delete mode 100644 ceph-mon/tests/charmhelpers/core/services/helpers.py delete mode 100644 ceph-mon/tests/charmhelpers/core/strutils.py delete mode 100644 ceph-mon/tests/charmhelpers/core/sysctl.py delete mode 100644 ceph-mon/tests/charmhelpers/core/templating.py delete mode 100644 ceph-mon/tests/charmhelpers/core/unitdata.py delete mode 100644 ceph-mon/tests/charmhelpers/osplatform.py diff --git a/ceph-mon/requirements.txt b/ceph-mon/requirements.txt index db0af4d0..b8fec1e2 100644 --- a/ceph-mon/requirements.txt +++ b/ceph-mon/requirements.txt @@ -2,7 +2,6 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=1.8.0,<1.9.0 -PyYAML>=3.1.0 simplejson>=2.2.0 netifaces>=0.10.4 netaddr>=0.7.12,!=0.7.16 @@ -10,4 +9,3 @@ Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 dnspython>=1.12.0 psutil>=1.1.1,<2.0.0 -pyudev diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 6757a47d..2b2c0e11 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -1,16 +1,16 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. +charm-tools>=2.4.4 coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 -charm-tools>=2.0.0;python_version=='2.7' # cheetah templates aren't availble in Python 3+ -requests==2.6.0 +requests>=2.18.4 # BEGIN: Amulet OpenStack Charm Helper Requirements # Liberty client lower constraints -amulet>=1.14.3,<2.0 -bundletester>=0.6.1,<1.0;python_version=='2.7' # cheetah templates aren't availble in Python 3+ +amulet>=1.14.3,<2.0;python_version=='2.7' +bundletester>=0.6.1,<1.0;python_version=='2.7' python-ceilometerclient>=1.5.0 python-cinderclient>=1.4.0 python-glanceclient>=1.1.0 @@ -22,6 +22,8 @@ python-openstackclient>=1.7.0 python-swiftclient>=2.6.0 pika>=0.10.0,<1.0 distro-info +git+https://github.com/juju/charm-helpers.git#egg=charmhelpers # END: Amulet OpenStack Charm Helper Requirements # NOTE: workaround for 14.04 pip/tox pytz +pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/ceph-mon/tests/charmhelpers/__init__.py b/ceph-mon/tests/charmhelpers/__init__.py deleted file mode 100644 index e7aa4715..00000000 --- a/ceph-mon/tests/charmhelpers/__init__.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Bootstrap charm-helpers, installing its dependencies if necessary using -# only standard libraries. -from __future__ import print_function -from __future__ import absolute_import - -import functools -import inspect -import subprocess -import sys - -try: - import six # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa - -try: - import yaml # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa - - -# Holds a list of mapping of mangled function names that have been deprecated -# using the @deprecate decorator below. This is so that the warning is only -# printed once for each usage of the function. -__deprecated_functions = {} - - -def deprecate(warning, date=None, log=None): - """Add a deprecation warning the first time the function is used. - The date, which is a string in semi-ISO8660 format indicate the year-month - that the function is officially going to be removed. - - usage: - - @deprecate('use core/fetch/add_source() instead', '2017-04') - def contributed_add_source_thing(...): - ... - - And it then prints to the log ONCE that the function is deprecated. - The reason for passing the logging function (log) is so that hookenv.log - can be used for a charm if needed. - - :param warning: String to indicat where it has moved ot. - :param date: optional sting, in YYYY-MM format to indicate when the - function will definitely (probably) be removed. - :param log: The log function to call to log. If not, logs to stdout - """ - def wrap(f): - - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - try: - module = inspect.getmodule(f) - file = inspect.getsourcefile(f) - lines = inspect.getsourcelines(f) - f_name = "{}-{}-{}..{}-{}".format( - module.__name__, file, lines[0], lines[-1], f.__name__) - except (IOError, TypeError): - # assume it was local, so just use the name of the function - f_name = f.__name__ - if f_name not in __deprecated_functions: - __deprecated_functions[f_name] = True - s = "DEPRECATION WARNING: Function {} is being removed".format( - f.__name__) - if date: - s = "{} on/around {}".format(s, date) - if warning: - s = "{} : {}".format(s, warning) - if log: - log(s) - else: - print(s) - return f(*args, **kwargs) - return wrapped_f - return wrap diff --git a/ceph-mon/tests/charmhelpers/contrib/__init__.py b/ceph-mon/tests/charmhelpers/contrib/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-mon/tests/charmhelpers/contrib/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-mon/tests/charmhelpers/contrib/amulet/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py deleted file mode 100644 index d21d01d8..00000000 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/deployment.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import os -import six - - -class AmuletDeployment(object): - """Amulet deployment. - - This class provides generic Amulet deployment and test runner - methods. - """ - - def __init__(self, series=None): - """Initialize the deployment environment.""" - self.series = None - - if series: - self.series = series - self.d = amulet.Deployment(series=self.series) - else: - self.d = amulet.Deployment() - - def _add_services(self, this_service, other_services): - """Add services. - - Add services to the deployment where this_service is the local charm - that we're testing and other_services are the other services that - are being used in the local amulet tests. - """ - if this_service['name'] != os.path.basename(os.getcwd()): - s = this_service['name'] - msg = "The charm's root directory name needs to be {}".format(s) - amulet.raise_status(amulet.FAIL, msg=msg) - - if 'units' not in this_service: - this_service['units'] = 1 - - self.d.add(this_service['name'], units=this_service['units'], - constraints=this_service.get('constraints'), - storage=this_service.get('storage')) - - for svc in other_services: - if 'location' in svc: - branch_location = svc['location'] - elif self.series: - branch_location = 'cs:{}/{}'.format(self.series, svc['name']), - else: - branch_location = None - - if 'units' not in svc: - svc['units'] = 1 - - self.d.add(svc['name'], charm=branch_location, units=svc['units'], - constraints=svc.get('constraints'), - storage=svc.get('storage')) - - def _add_relations(self, relations): - """Add all of the relations for the services.""" - for k, v in six.iteritems(relations): - self.d.relate(k, v) - - def _configure_services(self, configs): - """Configure all of the services.""" - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _deploy(self): - """Deploy environment and wait for all hooks to finish executing.""" - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) - try: - self.d.setup(timeout=timeout) - self.d.sentry.wait(timeout=timeout) - except amulet.helpers.TimeoutError: - amulet.raise_status( - amulet.FAIL, - msg="Deployment timed out ({}s)".format(timeout) - ) - except Exception: - raise - - def run_tests(self): - """Run all of the methods that are prefixed with 'test_'.""" - for test in dir(self): - if test.startswith('test_'): - getattr(self, test)() diff --git a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py deleted file mode 100644 index 8a6b7644..00000000 --- a/ceph-mon/tests/charmhelpers/contrib/amulet/utils.py +++ /dev/null @@ -1,821 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import json -import logging -import os -import re -import socket -import subprocess -import sys -import time -import uuid - -import amulet -import distro_info -import six -from six.moves import configparser -if six.PY3: - from urllib import parse as urlparse -else: - import urlparse - - -class AmuletUtils(object): - """Amulet utilities. - - This class provides common utility functions that are used by Amulet - tests. - """ - - def __init__(self, log_level=logging.ERROR): - self.log = self.get_logger(level=log_level) - self.ubuntu_releases = self.get_ubuntu_releases() - - def get_logger(self, name="amulet-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def valid_ip(self, ip): - if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): - return True - else: - return False - - def valid_url(self, url): - p = re.compile( - r'^(?:http|ftp)s?://' - r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa - r'localhost|' - r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' - r'(?::\d+)?' - r'(?:/?|[/?]\S+)$', - re.IGNORECASE) - if p.match(url): - return True - else: - return False - - def get_ubuntu_release_from_sentry(self, sentry_unit): - """Get Ubuntu release codename from sentry unit. - - :param sentry_unit: amulet sentry/service unit pointer - :returns: list of strings - release codename, failure message - """ - msg = None - cmd = 'lsb_release -cs' - release, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} lsb_release: {}'.format( - sentry_unit.info['unit_name'], release)) - else: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, release, code)) - if release not in self.ubuntu_releases: - msg = ("Release ({}) not found in Ubuntu releases " - "({})".format(release, self.ubuntu_releases)) - return release, msg - - def validate_services(self, commands): - """Validate that lists of commands succeed on service units. Can be - used to verify system services are running on the corresponding - service units. - - :param commands: dict with sentry keys and arbitrary command list vals - :returns: None if successful, Failure string message otherwise - """ - self.log.debug('Checking status of system services...') - - # /!\ DEPRECATION WARNING (beisner): - # New and existing tests should be rewritten to use - # validate_services_by_name() as it is aware of init systems. - self.log.warn('DEPRECATION WARNING: use ' - 'validate_services_by_name instead of validate_services ' - 'due to init system differences.') - - for k, v in six.iteritems(commands): - for cmd in v: - output, code = k.run(cmd) - self.log.debug('{} `{}` returned ' - '{}'.format(k.info['unit_name'], - cmd, code)) - if code != 0: - return "command `{}` returned {}".format(cmd, str(code)) - return None - - def validate_services_by_name(self, sentry_services): - """Validate system service status by service name, automatically - detecting init system based on Ubuntu release codename. - - :param sentry_services: dict with sentry keys and svc list values - :returns: None if successful, Failure string message otherwise - """ - self.log.debug('Checking status of system services...') - - # Point at which systemd became a thing - systemd_switch = self.ubuntu_releases.index('vivid') - - for sentry_unit, services_list in six.iteritems(sentry_services): - # Get lsb_release codename from unit - release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) - if ret: - return ret - - for service_name in services_list: - if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name in ['rabbitmq-server', 'apache2', - 'memcached']): - # init is systemd (or regular sysv) - cmd = 'sudo service {} status'.format(service_name) - output, code = sentry_unit.run(cmd) - service_running = code == 0 - elif self.ubuntu_releases.index(release) < systemd_switch: - # init is upstart - cmd = 'sudo status {}'.format(service_name) - output, code = sentry_unit.run(cmd) - service_running = code == 0 and "start/running" in output - - self.log.debug('{} `{}` returned ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code)) - if not service_running: - return u"command `{}` returned {} {}".format( - cmd, output, str(code)) - return None - - def _get_config(self, unit, filename): - """Get a ConfigParser object for parsing a unit's config file.""" - file_contents = unit.file_contents(filename) - - # NOTE(beisner): by default, ConfigParser does not handle options - # with no value, such as the flags used in the mysql my.cnf file. - # https://bugs.python.org/issue7005 - config = configparser.ConfigParser(allow_no_value=True) - config.readfp(io.StringIO(file_contents)) - return config - - def validate_config_data(self, sentry_unit, config_file, section, - expected): - """Validate config file data. - - Verify that the specified section of the config file contains - the expected option key:value pairs. - - Compare expected dictionary data vs actual dictionary data. - The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluates a variable and returns a - bool. - """ - self.log.debug('Validating config file data ({} in {} on {})' - '...'.format(section, config_file, - sentry_unit.info['unit_name'])) - config = self._get_config(sentry_unit, config_file) - - if section != 'DEFAULT' and not config.has_section(section): - return "section [{}] does not exist".format(section) - - for k in expected.keys(): - if not config.has_option(section, k): - return "section [{}] is missing option {}".format(section, k) - - actual = config.get(section, k) - v = expected[k] - if (isinstance(v, six.string_types) or - isinstance(v, bool) or - isinstance(v, six.integer_types)): - # handle explicit values - if actual != v: - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) - # handle function pointers, such as not_null or valid_ip - elif not v(actual): - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) - return None - - def _validate_dict_data(self, expected, actual): - """Validate dictionary data. - - Compare expected dictionary data vs actual dictionary data. - The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluates a variable and returns a - bool. - """ - self.log.debug('actual: {}'.format(repr(actual))) - self.log.debug('expected: {}'.format(repr(expected))) - - for k, v in six.iteritems(expected): - if k in actual: - if (isinstance(v, six.string_types) or - isinstance(v, bool) or - isinstance(v, six.integer_types)): - # handle explicit values - if v != actual[k]: - return "{}:{}".format(k, actual[k]) - # handle function pointers, such as not_null or valid_ip - elif not v(actual[k]): - return "{}:{}".format(k, actual[k]) - else: - return "key '{}' does not exist".format(k) - return None - - def validate_relation_data(self, sentry_unit, relation, expected): - """Validate actual relation data based on expected relation data.""" - actual = sentry_unit.relation(relation[0], relation[1]) - return self._validate_dict_data(expected, actual) - - def _validate_list_data(self, expected, actual): - """Compare expected list vs actual list data.""" - for e in expected: - if e not in actual: - return "expected item {} not found in actual list".format(e) - return None - - def not_null(self, string): - if string is not None: - return True - else: - return False - - def _get_file_mtime(self, sentry_unit, filename): - """Get last modification time of file.""" - return sentry_unit.file_stat(filename)['mtime'] - - def _get_dir_mtime(self, sentry_unit, directory): - """Get last modification time of directory.""" - return sentry_unit.directory_stat(directory)['mtime'] - - def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): - """Get start time of a process based on the last modification time - of the /proc/pid directory. - - :sentry_unit: The sentry unit to check for the service on - :service: service name to look for in process table - :pgrep_full: [Deprecated] Use full command line search mode with pgrep - :returns: epoch time of service process start - :param commands: list of bash commands - :param sentry_units: list of sentry unit pointers - :returns: None if successful; Failure message otherwise - """ - if pgrep_full is not None: - # /!\ DEPRECATION WARNING (beisner): - # No longer implemented, as pidof is now used instead of pgrep. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' - 'longer implemented re: lp 1474030.') - - pid_list = self.get_process_id_list(sentry_unit, service) - pid = pid_list[0] - proc_dir = '/proc/{}'.format(pid) - self.log.debug('Pid for {} on {}: {}'.format( - service, sentry_unit.info['unit_name'], pid)) - - return self._get_dir_mtime(sentry_unit, proc_dir) - - def service_restarted(self, sentry_unit, service, filename, - pgrep_full=None, sleep_time=20): - """Check if service was restarted. - - Compare a service's start time vs a file's last modification time - (such as a config file for that service) to determine if the service - has been restarted. - """ - # /!\ DEPRECATION WARNING (beisner): - # This method is prone to races in that no before-time is known. - # Use validate_service_config_changed instead. - - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - self.log.warn('DEPRECATION WARNING: use ' - 'validate_service_config_changed instead of ' - 'service_restarted due to known races.') - - time.sleep(sleep_time) - if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= - self._get_file_mtime(sentry_unit, filename)): - return True - else: - return False - - def service_restarted_since(self, sentry_unit, mtime, service, - pgrep_full=None, sleep_time=20, - retry_count=30, retry_sleep_time=10): - """Check if service was been started after a given time. - - Args: - sentry_unit (sentry): The sentry unit to check for the service on - mtime (float): The epoch time to check against - service (string): service name to look for in process table - pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Initial sleep time (s) before looking for file - retry_sleep_time (int): Time (s) to sleep between retries - retry_count (int): If file is not found, how many times to retry - - Returns: - bool: True if service found and its start time it newer than mtime, - False if service is older than mtime or if service was - not found. - """ - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - - unit_name = sentry_unit.info['unit_name'] - self.log.debug('Checking that %s service restarted since %s on ' - '%s' % (service, mtime, unit_name)) - time.sleep(sleep_time) - proc_start_time = None - tries = 0 - while tries <= retry_count and not proc_start_time: - try: - proc_start_time = self._get_proc_start_time(sentry_unit, - service, - pgrep_full) - self.log.debug('Attempt {} to get {} proc start time on {} ' - 'OK'.format(tries, service, unit_name)) - except IOError as e: - # NOTE(beisner) - race avoidance, proc may not exist yet. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.debug('Attempt {} to get {} proc start time on {} ' - 'failed\n{}'.format(tries, service, - unit_name, e)) - time.sleep(retry_sleep_time) - tries += 1 - - if not proc_start_time: - self.log.warn('No proc start time found, assuming service did ' - 'not start') - return False - if proc_start_time >= mtime: - self.log.debug('Proc start time is newer than provided mtime' - '(%s >= %s) on %s (OK)' % (proc_start_time, - mtime, unit_name)) - return True - else: - self.log.warn('Proc start time (%s) is older than provided mtime ' - '(%s) on %s, service did not ' - 'restart' % (proc_start_time, mtime, unit_name)) - return False - - def config_updated_since(self, sentry_unit, filename, mtime, - sleep_time=20, retry_count=30, - retry_sleep_time=10): - """Check if file was modified after a given time. - - Args: - sentry_unit (sentry): The sentry unit to check the file mtime on - filename (string): The file to check mtime of - mtime (float): The epoch time to check against - sleep_time (int): Initial sleep time (s) before looking for file - retry_sleep_time (int): Time (s) to sleep between retries - retry_count (int): If file is not found, how many times to retry - - Returns: - bool: True if file was modified more recently than mtime, False if - file was modified before mtime, or if file not found. - """ - unit_name = sentry_unit.info['unit_name'] - self.log.debug('Checking that %s updated since %s on ' - '%s' % (filename, mtime, unit_name)) - time.sleep(sleep_time) - file_mtime = None - tries = 0 - while tries <= retry_count and not file_mtime: - try: - file_mtime = self._get_file_mtime(sentry_unit, filename) - self.log.debug('Attempt {} to get {} file mtime on {} ' - 'OK'.format(tries, filename, unit_name)) - except IOError as e: - # NOTE(beisner) - race avoidance, file may not exist yet. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.debug('Attempt {} to get {} file mtime on {} ' - 'failed\n{}'.format(tries, filename, - unit_name, e)) - time.sleep(retry_sleep_time) - tries += 1 - - if not file_mtime: - self.log.warn('Could not determine file mtime, assuming ' - 'file does not exist') - return False - - if file_mtime >= mtime: - self.log.debug('File mtime is newer than provided mtime ' - '(%s >= %s) on %s (OK)' % (file_mtime, - mtime, unit_name)) - return True - else: - self.log.warn('File mtime is older than provided mtime' - '(%s < on %s) on %s' % (file_mtime, - mtime, unit_name)) - return False - - def validate_service_config_changed(self, sentry_unit, mtime, service, - filename, pgrep_full=None, - sleep_time=20, retry_count=30, - retry_sleep_time=10): - """Check service and file were updated after mtime - - Args: - sentry_unit (sentry): The sentry unit to check for the service on - mtime (float): The epoch time to check against - service (string): service name to look for in process table - filename (string): The file to check mtime of - pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Initial sleep in seconds to pass to test helpers - retry_count (int): If service is not found, how many times to retry - retry_sleep_time (int): Time in seconds to wait between retries - - Typical Usage: - u = OpenStackAmuletUtils(ERROR) - ... - mtime = u.get_sentry_time(self.cinder_sentry) - self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) - if not u.validate_service_config_changed(self.cinder_sentry, - mtime, - 'cinder-api', - '/etc/cinder/cinder.conf') - amulet.raise_status(amulet.FAIL, msg='update failed') - Returns: - bool: True if both service and file where updated/restarted after - mtime, False if service is older than mtime or if service was - not found or if filename was modified before mtime. - """ - - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - - service_restart = self.service_restarted_since( - sentry_unit, mtime, - service, - pgrep_full=pgrep_full, - sleep_time=sleep_time, - retry_count=retry_count, - retry_sleep_time=retry_sleep_time) - - config_update = self.config_updated_since( - sentry_unit, - filename, - mtime, - sleep_time=sleep_time, - retry_count=retry_count, - retry_sleep_time=retry_sleep_time) - - return service_restart and config_update - - def get_sentry_time(self, sentry_unit): - """Return current epoch time on a sentry""" - cmd = "date +'%s'" - return float(sentry_unit.run(cmd)[0]) - - def relation_error(self, name, data): - return 'unexpected relation data in {} - {}'.format(name, data) - - def endpoint_error(self, name, data): - return 'unexpected endpoint data in {} - {}'.format(name, data) - - def get_ubuntu_releases(self): - """Return a list of all Ubuntu releases in order of release.""" - _d = distro_info.UbuntuDistroInfo() - _release_list = _d.all - return _release_list - - def file_to_url(self, file_rel_path): - """Convert a relative file path to a file URL.""" - _abs_path = os.path.abspath(file_rel_path) - return urlparse.urlparse(_abs_path, scheme='file').geturl() - - def check_commands_on_units(self, commands, sentry_units): - """Check that all commands in a list exit zero on all - sentry units in a list. - - :param commands: list of bash commands - :param sentry_units: list of sentry unit pointers - :returns: None if successful; Failure message otherwise - """ - self.log.debug('Checking exit codes for {} commands on {} ' - 'sentry units...'.format(len(commands), - len(sentry_units))) - for sentry_unit in sentry_units: - for cmd in commands: - output, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} `{}` returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - else: - return ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - return None - - def get_process_id_list(self, sentry_unit, process_name, - expect_success=True): - """Get a list of process ID(s) from a single sentry juju unit - for a single process name. - - :param sentry_unit: Amulet sentry instance (juju unit) - :param process_name: Process name - :param expect_success: If False, expect the PID to be missing, - raise if it is present. - :returns: List of process IDs - """ - cmd = 'pidof -x "{}"'.format(process_name) - if not expect_success: - cmd += " || exit 0 && exit 1" - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return str(output).split() - - def get_unit_process_ids(self, unit_processes, expect_success=True): - """Construct a dict containing unit sentries, process names, and - process IDs. - - :param unit_processes: A dictionary of Amulet sentry instance - to list of process names. - :param expect_success: if False expect the processes to not be - running, raise if they are. - :returns: Dictionary of Amulet sentry instance to dictionary - of process names to PIDs. - """ - pid_dict = {} - for sentry_unit, process_list in six.iteritems(unit_processes): - pid_dict[sentry_unit] = {} - for process in process_list: - pids = self.get_process_id_list( - sentry_unit, process, expect_success=expect_success) - pid_dict[sentry_unit].update({process: pids}) - return pid_dict - - def validate_unit_process_ids(self, expected, actual): - """Validate process id quantities for services on units.""" - self.log.debug('Checking units for running processes...') - self.log.debug('Expected PIDs: {}'.format(expected)) - self.log.debug('Actual PIDs: {}'.format(actual)) - - if len(actual) != len(expected): - return ('Unit count mismatch. expected, actual: {}, ' - '{} '.format(len(expected), len(actual))) - - for (e_sentry, e_proc_names) in six.iteritems(expected): - e_sentry_name = e_sentry.info['unit_name'] - if e_sentry in actual.keys(): - a_proc_names = actual[e_sentry] - else: - return ('Expected sentry ({}) not found in actual dict data.' - '{}'.format(e_sentry_name, e_sentry)) - - if len(e_proc_names.keys()) != len(a_proc_names.keys()): - return ('Process name count mismatch. expected, actual: {}, ' - '{}'.format(len(expected), len(actual))) - - for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ - zip(e_proc_names.items(), a_proc_names.items()): - if e_proc_name != a_proc_name: - return ('Process name mismatch. expected, actual: {}, ' - '{}'.format(e_proc_name, a_proc_name)) - - a_pids_length = len(a_pids) - fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' - '{}, {} ({})'.format(e_sentry_name, e_proc_name, - e_pids, a_pids_length, - a_pids)) - - # If expected is a list, ensure at least one PID quantity match - if isinstance(e_pids, list) and \ - a_pids_length not in e_pids: - return fail_msg - # If expected is not bool and not list, - # ensure PID quantities match - elif not isinstance(e_pids, bool) and \ - not isinstance(e_pids, list) and \ - a_pids_length != e_pids: - return fail_msg - # If expected is bool True, ensure 1 or more PIDs exist - elif isinstance(e_pids, bool) and \ - e_pids is True and a_pids_length < 1: - return fail_msg - # If expected is bool False, ensure 0 PIDs exist - elif isinstance(e_pids, bool) and \ - e_pids is False and a_pids_length != 0: - return fail_msg - else: - self.log.debug('PID check OK: {} {} {}: ' - '{}'.format(e_sentry_name, e_proc_name, - e_pids, a_pids)) - return None - - def validate_list_of_identical_dicts(self, list_of_dicts): - """Check that all dicts within a list are identical.""" - hashes = [] - for _dict in list_of_dicts: - hashes.append(hash(frozenset(_dict.items()))) - - self.log.debug('Hashes: {}'.format(hashes)) - if len(set(hashes)) == 1: - self.log.debug('Dicts within list are identical') - else: - return 'Dicts within list are not identical' - - return None - - def validate_sectionless_conf(self, file_contents, expected): - """A crude conf parser. Useful to inspect configuration files which - do not have section headers (as would be necessary in order to use - the configparser). Such as openstack-dashboard or rabbitmq confs.""" - for line in file_contents.split('\n'): - if '=' in line: - args = line.split('=') - if len(args) <= 1: - continue - key = args[0].strip() - value = args[1].strip() - if key in expected.keys(): - if expected[key] != value: - msg = ('Config mismatch. Expected, actual: {}, ' - '{}'.format(expected[key], value)) - amulet.raise_status(amulet.FAIL, msg=msg) - - def get_unit_hostnames(self, units): - """Return a dict of juju unit names to hostnames.""" - host_names = {} - for unit in units: - host_names[unit.info['unit_name']] = \ - str(unit.file_contents('/etc/hostname').strip()) - self.log.debug('Unit host names: {}'.format(host_names)) - return host_names - - def run_cmd_unit(self, sentry_unit, cmd): - """Run a command on a unit, return the output and exit code.""" - output, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} `{}` command returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - else: - msg = ('{} `{}` command returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return str(output), code - - def file_exists_on_unit(self, sentry_unit, file_name): - """Check if a file exists on a unit.""" - try: - sentry_unit.file_stat(file_name) - return True - except IOError: - return False - except Exception as e: - msg = 'Error checking file {}: {}'.format(file_name, e) - amulet.raise_status(amulet.FAIL, msg=msg) - - def file_contents_safe(self, sentry_unit, file_name, - max_wait=60, fatal=False): - """Get file contents from a sentry unit. Wrap amulet file_contents - with retry logic to address races where a file checks as existing, - but no longer exists by the time file_contents is called. - Return None if file not found. Optionally raise if fatal is True.""" - unit_name = sentry_unit.info['unit_name'] - file_contents = False - tries = 0 - while not file_contents and tries < (max_wait / 4): - try: - file_contents = sentry_unit.file_contents(file_name) - except IOError: - self.log.debug('Attempt {} to open file {} from {} ' - 'failed'.format(tries, file_name, - unit_name)) - time.sleep(4) - tries += 1 - - if file_contents: - return file_contents - elif not fatal: - return None - elif fatal: - msg = 'Failed to get file contents from unit.' - amulet.raise_status(amulet.FAIL, msg) - - def port_knock_tcp(self, host="localhost", port=22, timeout=15): - """Open a TCP socket to check for a listening sevice on a host. - - :param host: host name or IP address, default to localhost - :param port: TCP port number, default to 22 - :param timeout: Connect timeout, default to 15 seconds - :returns: True if successful, False if connect failed - """ - - # Resolve host name if possible - try: - connect_host = socket.gethostbyname(host) - host_human = "{} ({})".format(connect_host, host) - except socket.error as e: - self.log.warn('Unable to resolve address: ' - '{} ({}) Trying anyway!'.format(host, e)) - connect_host = host - host_human = connect_host - - # Attempt socket connection - try: - knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - knock.settimeout(timeout) - knock.connect((connect_host, port)) - knock.close() - self.log.debug('Socket connect OK for host ' - '{} on port {}.'.format(host_human, port)) - return True - except socket.error as e: - self.log.debug('Socket connect FAIL for' - ' {} port {} ({})'.format(host_human, port, e)) - return False - - def port_knock_units(self, sentry_units, port=22, - timeout=15, expect_success=True): - """Open a TCP socket to check for a listening sevice on each - listed juju unit. - - :param sentry_units: list of sentry unit pointers - :param port: TCP port number, default to 22 - :param timeout: Connect timeout, default to 15 seconds - :expect_success: True by default, set False to invert logic - :returns: None if successful, Failure message otherwise - """ - for unit in sentry_units: - host = unit.info['public-address'] - connected = self.port_knock_tcp(host, port, timeout) - if not connected and expect_success: - return 'Socket connect failed.' - elif connected and not expect_success: - return 'Socket connected unexpectedly.' - - def get_uuid_epoch_stamp(self): - """Returns a stamp string based on uuid4 and epoch time. Useful in - generating test messages which need to be unique-ish.""" - return '[{}-{}]'.format(uuid.uuid4(), time.time()) - - # amulet juju action helpers: - def run_action(self, unit_sentry, action, - _check_output=subprocess.check_output, - params=None): - """Translate to amulet's built in run_action(). Deprecated. - - Run the named action on a given unit sentry. - - params a dict of parameters to use - _check_output parameter is no longer used - - @return action_id. - """ - self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been ' - 'deprecated for amulet.run_action') - return unit_sentry.run_action(action, action_args=params) - - def wait_on_action(self, action_id, _check_output=subprocess.check_output): - """Wait for a given action, returning if it completed or not. - - action_id a string action uuid - _check_output parameter is no longer used - """ - data = amulet.actions.get_action_output(action_id, full_output=True) - return data.get(u"status") == "completed" - - def status_get(self, unit): - """Return the current service status of this unit.""" - raw_status, return_code = unit.run( - "status-get --format=json --include-data") - if return_code != 0: - return ("unknown", "") - status = json.loads(raw_status) - return (status["status"], status["message"]) diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-mon/tests/charmhelpers/contrib/openstack/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py deleted file mode 100644 index 1c96752a..00000000 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ /dev/null @@ -1,357 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import re -import sys -import six -from collections import OrderedDict -from charmhelpers.contrib.amulet.deployment import ( - AmuletDeployment -) -from charmhelpers.contrib.openstack.amulet.utils import ( - OPENSTACK_RELEASES_PAIRS -) - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - - -class OpenStackAmuletDeployment(AmuletDeployment): - """OpenStack amulet deployment. - - This class inherits from AmuletDeployment and has additional support - that is specifically for use by OpenStack charms. - """ - - def __init__(self, series=None, openstack=None, source=None, - stable=True, log_level=DEBUG): - """Initialize the deployment environment.""" - super(OpenStackAmuletDeployment, self).__init__(series) - self.log = self.get_logger(level=log_level) - self.log.info('OpenStackAmuletDeployment: init') - self.openstack = openstack - self.source = source - self.stable = stable - - def get_logger(self, name="deployment-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def _determine_branch_locations(self, other_services): - """Determine the branch locations for the other services. - - Determine if the local branch being tested is derived from its - stable or next (dev) branch, and based on this, use the corresonding - stable or next branches for the other_services.""" - - self.log.info('OpenStackAmuletDeployment: determine branch locations') - - # Charms outside the ~openstack-charmers - base_charms = { - 'mysql': ['trusty'], - 'mongodb': ['trusty'], - 'nrpe': ['trusty', 'xenial'], - } - - for svc in other_services: - # If a location has been explicitly set, use it - if svc.get('location'): - continue - if svc['name'] in base_charms: - # NOTE: not all charms have support for all series we - # want/need to test against, so fix to most recent - # that each base charm supports - target_series = self.series - if self.series not in base_charms[svc['name']]: - target_series = base_charms[svc['name']][-1] - svc['location'] = 'cs:{}/{}'.format(target_series, - svc['name']) - elif self.stable: - svc['location'] = 'cs:{}/{}'.format(self.series, - svc['name']) - else: - svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( - self.series, - svc['name'] - ) - - return other_services - - def _add_services(self, this_service, other_services, use_source=None, - no_origin=None): - """Add services to the deployment and optionally set - openstack-origin/source. - - :param this_service dict: Service dictionary describing the service - whose amulet tests are being run - :param other_services dict: List of service dictionaries describing - the services needed to support the target - service - :param use_source list: List of services which use the 'source' config - option rather than 'openstack-origin' - :param no_origin list: List of services which do not support setting - the Cloud Archive. - Service Dict: - { - 'name': str charm-name, - 'units': int number of units, - 'constraints': dict of juju constraints, - 'location': str location of charm, - } - eg - this_service = { - 'name': 'openvswitch-odl', - 'constraints': {'mem': '8G'}, - } - other_services = [ - { - 'name': 'nova-compute', - 'units': 2, - 'constraints': {'mem': '4G'}, - 'location': cs:~bob/xenial/nova-compute - }, - { - 'name': 'mysql', - 'constraints': {'mem': '2G'}, - }, - {'neutron-api-odl'}] - use_source = ['mysql'] - no_origin = ['neutron-api-odl'] - """ - self.log.info('OpenStackAmuletDeployment: adding services') - - other_services = self._determine_branch_locations(other_services) - - super(OpenStackAmuletDeployment, self)._add_services(this_service, - other_services) - - services = other_services - services.append(this_service) - - use_source = use_source or [] - no_origin = no_origin or [] - - # Charms which should use the source config option - use_source = list(set( - use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy', 'percona-cluster', 'lxd'])) - - # Charms which can not use openstack-origin, ie. many subordinates - no_origin = list(set( - no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', - 'nrpe', 'openvswitch-odl', 'neutron-api-odl', - 'odl-controller', 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'])) - - if self.openstack: - for svc in services: - if svc['name'] not in use_source + no_origin: - config = {'openstack-origin': self.openstack} - self.d.configure(svc['name'], config) - - if self.source: - for svc in services: - if svc['name'] in use_source and svc['name'] not in no_origin: - config = {'source': self.source} - self.d.configure(svc['name'], config) - - def _configure_services(self, configs): - """Configure all of the services.""" - self.log.info('OpenStackAmuletDeployment: configure services') - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=None): - """Wait for all units to have a specific extended status, except - for any defined as excluded. Unless specified via message, any - status containing any case of 'ready' will be considered a match. - - Examples of message usage: - - Wait for all unit status to CONTAIN any case of 'ready' or 'ok': - message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) - - Wait for all units to reach this status (exact match): - message = re.compile('^Unit is ready and clustered$') - - Wait for all units to reach any one of these (exact match): - message = re.compile('Unit is ready|OK|Ready') - - Wait for at least one unit to reach this status (exact match): - message = {'ready'} - - See Amulet's sentry.wait_for_messages() for message usage detail. - https://github.com/juju/amulet/blob/master/amulet/sentry.py - - :param message: Expected status match - :param exclude_services: List of juju service names to ignore, - not to be used in conjuction with include_only. - :param include_only: List of juju service names to exclusively check, - not to be used in conjuction with exclude_services. - :param timeout: Maximum time in seconds to wait for status match - :returns: None. Raises if timeout is hit. - """ - if not timeout: - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) - self.log.info('Waiting for extended status on units for {}s...' - ''.format(timeout)) - - all_services = self.d.services.keys() - - if exclude_services and include_only: - raise ValueError('exclude_services can not be used ' - 'with include_only') - - if message: - if isinstance(message, re._pattern_type): - match = message.pattern - else: - match = message - - self.log.debug('Custom extended status wait match: ' - '{}'.format(match)) - else: - self.log.debug('Default extended status wait match: contains ' - 'READY (case-insensitive)') - message = re.compile('.*ready.*', re.IGNORECASE) - - if exclude_services: - self.log.debug('Excluding services from extended status match: ' - '{}'.format(exclude_services)) - else: - exclude_services = [] - - if include_only: - services = include_only - else: - services = list(set(all_services) - set(exclude_services)) - - self.log.debug('Waiting up to {}s for extended status on services: ' - '{}'.format(timeout, services)) - service_messages = {service: message for service in services} - - # Check for idleness - self.d.sentry.wait(timeout=timeout) - # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) - # Check for ready messages - self.d.sentry.wait_for_messages(service_messages, timeout=timeout) - - self.log.info('OK') - - def _get_openstack_release(self): - """Get openstack release. - - Return an integer representing the enum value of the openstack - release. - """ - # Must be ordered by OpenStack release (not by Ubuntu release): - for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): - setattr(self, os_pair, i) - - releases = { - ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, - ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, - ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('xenial', None): self.xenial_mitaka, - ('xenial', 'cloud:xenial-newton'): self.xenial_newton, - ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, - ('xenial', 'cloud:xenial-pike'): self.xenial_pike, - ('xenial', 'cloud:xenial-queens'): self.xenial_queens, - ('yakkety', None): self.yakkety_newton, - ('zesty', None): self.zesty_ocata, - ('artful', None): self.artful_pike, - ('bionic', None): self.bionic_queens, - ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, - ('cosmic', None): self.cosmic_rocky, - } - return releases[(self.series, self.openstack)] - - def _get_openstack_release_string(self): - """Get openstack release string. - - Return a string representing the openstack release. - """ - releases = OrderedDict([ - ('trusty', 'icehouse'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ]) - if self.openstack: - os_origin = self.openstack.split(':')[1] - return os_origin.split('%s-' % self.series)[1].split('/')[0] - else: - return releases[self.series] - - def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools in a ceph + cinder + glance - test scenario, based on OpenStack release and whether ceph radosgw - is flagged as present or not.""" - - if self._get_openstack_release() == self.trusty_icehouse: - # Icehouse - pools = [ - 'data', - 'metadata', - 'rbd', - 'cinder-ceph', - 'glance' - ] - elif (self.trusty_kilo <= self._get_openstack_release() <= - self.zesty_ocata): - # Kilo through Ocata - pools = [ - 'rbd', - 'cinder-ceph', - 'glance' - ] - else: - # Pike and later - pools = [ - 'cinder-ceph', - 'glance' - ] - - if radosgw: - pools.extend([ - '.rgw.root', - '.rgw.control', - '.rgw', - '.rgw.gc', - '.users.uid' - ]) - - return pools diff --git a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py deleted file mode 100644 index 936b4036..00000000 --- a/ceph-mon/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ /dev/null @@ -1,1533 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import json -import logging -import os -import re -import six -import time -import urllib -import urlparse - -import cinderclient.v1.client as cinder_client -import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1 as glance_client -import glanceclient.v2 as glance_clientv2 -import heatclient.v1.client as heat_client -from keystoneclient.v2_0 import client as keystone_client -from keystoneauth1.identity import ( - v3, - v2, -) -from keystoneauth1 import session as keystone_session -from keystoneclient.v3 import client as keystone_client_v3 -from novaclient import exceptions - -import novaclient.client as nova_client -import novaclient -import pika -import swiftclient - -from charmhelpers.core.decorators import retry_on_exception -from charmhelpers.contrib.amulet.utils import ( - AmuletUtils -) -from charmhelpers.core.host import CompareHostReleases - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - -NOVA_CLIENT_VERSION = "2" - -OPENSTACK_RELEASES_PAIRS = [ - 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', - 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', - 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', - 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] - - -class OpenStackAmuletUtils(AmuletUtils): - """OpenStack amulet utilities. - - This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charm tests. - """ - - def __init__(self, log_level=ERROR): - """Initialize the deployment environment.""" - super(OpenStackAmuletUtils, self).__init__(log_level) - - def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, openstack_release=None): - """Validate endpoint data. Pick the correct validator based on - OpenStack release. Expected data should be in the v2 format: - { - 'id': id, - 'region': region, - 'adminurl': adminurl, - 'internalurl': internalurl, - 'publicurl': publicurl, - 'service_id': service_id} - - """ - validation_function = self.validate_v2_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_endpoint_data - expected = { - 'id': expected['id'], - 'region': expected['region'], - 'region_id': 'RegionOne', - 'url': self.valid_url, - 'interface': self.not_null, - 'service_id': expected['service_id']} - return validation_function(endpoints, admin_port, internal_port, - public_port, expected) - - def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): - """Validate endpoint data. - - Validate actual endpoint data vs expected endpoint data. The ports - are used to find the matching endpoint. - """ - self.log.debug('Validating endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = False - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if (admin_port in ep.adminurl and - internal_port in ep.internalurl and - public_port in ep.publicurl): - found = True - actual = {'id': ep.id, - 'region': ep.region, - 'adminurl': ep.adminurl, - 'internalurl': ep.internalurl, - 'publicurl': ep.publicurl, - 'service_id': ep.service_id} - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if not found: - return 'endpoint not found' - - def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, expected_num_eps=3): - """Validate keystone v3 endpoint data. - - Validate the v3 endpoint data which has changed from v2. The - ports are used to find the matching endpoint. - - The new v3 endpoint data looks like: - - ['}, - region=RegionOne, - region_id=RegionOne, - service_id=17f842a0dc084b928e476fafe67e4095, - url=http://10.5.6.5:9312>, - '}, - region=RegionOne, - region_id=RegionOne, - service_id=72fc8736fb41435e8b3584205bb2cfa3, - url=http://10.5.6.6:35357/v3>, - ... ] - """ - self.log.debug('Validating v3 endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = [] - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if ((admin_port in ep.url and ep.interface == 'admin') or - (internal_port in ep.url and ep.interface == 'internal') or - (public_port in ep.url and ep.interface == 'public')): - found.append(ep.interface) - # note we ignore the links member. - actual = {'id': ep.id, - 'region': ep.region, - 'region_id': ep.region_id, - 'interface': self.not_null, - 'url': ep.url, - 'service_id': ep.service_id, } - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if len(found) != expected_num_eps: - return 'Unexpected number of endpoints found' - - def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): - """Convert v2 endpoint data into v3. - - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - """ - self.log.warn("Endpoint ID and Region ID validation is limited to not " - "null checks after v2 to v3 conversion") - for svc in ep_data.keys(): - assert len(ep_data[svc]) == 1, "Unknown data format" - svc_ep_data = ep_data[svc][0] - ep_data[svc] = [ - { - 'url': svc_ep_data['adminURL'], - 'interface': 'admin', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['publicURL'], - 'interface': 'public', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['internalURL'], - 'interface': 'internal', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}] - return ep_data - - def validate_svc_catalog_endpoint_data(self, expected, actual, - openstack_release=None): - """Validate service catalog endpoint data. Pick the correct validator - for the OpenStack version. Expected data should be in the v2 format: - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - - """ - validation_function = self.validate_v2_svc_catalog_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_svc_catalog_endpoint_data - expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) - return validation_function(expected, actual) - - def validate_v2_svc_catalog_endpoint_data(self, expected, actual): - """Validate service catalog endpoint data. - - Validate a list of actual service catalog endpoints vs a list of - expected service catalog endpoints. - """ - self.log.debug('Validating service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - ret = self._validate_dict_data(expected[k][0], actual[k][0]) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_v3_svc_catalog_endpoint_data(self, expected, actual): - """Validate the keystone v3 catalog endpoint data. - - Validate a list of dictinaries that make up the keystone v3 service - catalogue. - - It is in the form of: - - - {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:35357/v3'}, - {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}, - {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}], - u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'f629388955bc407f8b11d8b7ca168086', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9312'}]} - - Note, that an added complication is that the order of admin, public, - internal against 'interface' in each region. - - Thus, the function sorts the expected and actual lists using the - interface key as a sort key, prior to the comparison. - """ - self.log.debug('Validating v3 service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - l_expected = sorted(v, key=lambda x: x['interface']) - l_actual = sorted(actual[k], key=lambda x: x['interface']) - if len(l_actual) != len(l_expected): - return ("endpoint {} has differing number of interfaces " - " - expected({}), actual({})" - .format(k, len(l_expected), len(l_actual))) - for i_expected, i_actual in zip(l_expected, l_actual): - self.log.debug("checking interface {}" - .format(i_expected['interface'])) - ret = self._validate_dict_data(i_expected, i_actual) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_tenant_data(self, expected, actual): - """Validate tenant data. - - Validate a list of actual tenant data vs list of expected tenant - data. - """ - self.log.debug('Validating tenant data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'enabled': act.enabled, 'description': act.description, - 'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected tenant data - {}".format(ret) - if not found: - return "tenant {} does not exist".format(e['name']) - return ret - - def validate_role_data(self, expected, actual): - """Validate role data. - - Validate a list of actual role data vs a list of expected role - data. - """ - self.log.debug('Validating role data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected role data - {}".format(ret) - if not found: - return "role {} does not exist".format(e['name']) - return ret - - def validate_user_data(self, expected, actual, api_version=None): - """Validate user data. - - Validate a list of actual user data vs a list of expected user - data. - """ - self.log.debug('Validating user data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - if e['name'] == act.name: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'id': act.id} - if api_version == 3: - a['default_project_id'] = getattr(act, - 'default_project_id', - 'none') - else: - a['tenantId'] = act.tenantId - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected user data - {}".format(ret) - if not found: - return "user {} does not exist".format(e['name']) - return ret - - def validate_flavor_data(self, expected, actual): - """Validate flavor data. - - Validate a list of actual flavors vs a list of expected flavors. - """ - self.log.debug('Validating flavor data...') - self.log.debug('actual: {}'.format(repr(actual))) - act = [a.name for a in actual] - return self._validate_list_data(expected, act) - - def tenant_exists(self, keystone, tenant): - """Return True if tenant exists.""" - self.log.debug('Checking if tenant exists ({})...'.format(tenant)) - return tenant in [t.name for t in keystone.tenants.list()] - - @retry_on_exception(num_retries=5, base_delay=1) - def keystone_wait_for_propagation(self, sentry_relation_pairs, - api_version): - """Iterate over list of sentry and relation tuples and verify that - api_version has the expected value. - - :param sentry_relation_pairs: list of sentry, relation name tuples used - for monitoring propagation of relation - data - :param api_version: api_version to expect in relation data - :returns: None if successful. Raise on error. - """ - for (sentry, relation_name) in sentry_relation_pairs: - rel = sentry.relation('identity-service', - relation_name) - self.log.debug('keystone relation data: {}'.format(rel)) - if rel.get('api_version') != str(api_version): - raise Exception("api_version not propagated through relation" - " data yet ('{}' != '{}')." - "".format(rel.get('api_version'), api_version)) - - def keystone_configure_api_version(self, sentry_relation_pairs, deployment, - api_version): - """Configure preferred-api-version of keystone in deployment and - monitor provided list of relation objects for propagation - before returning to caller. - - :param sentry_relation_pairs: list of sentry, relation tuples used for - monitoring propagation of relation data - :param deployment: deployment to configure - :param api_version: value preferred-api-version will be set to - :returns: None if successful. Raise on error. - """ - self.log.debug("Setting keystone preferred-api-version: '{}'" - "".format(api_version)) - - config = {'preferred-api-version': api_version} - deployment.d.configure('keystone', config) - deployment._auto_wait_for_status() - self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - - def authenticate_cinder_admin(self, keystone, api_version=2): - """Authenticates admin user with cinder.""" - self.log.debug('Authenticating cinder admin...') - _clients = { - 1: cinder_client.Client, - 2: cinder_clientv2.Client} - return _clients[api_version](session=keystone.session) - - def authenticate_keystone(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Authenticate with Keystone""" - self.log.debug('Authenticating with keystone...') - if not api_version: - api_version = 2 - sess, auth = self.get_keystone_session( - keystone_ip=keystone_ip, - username=username, - password=password, - api_version=api_version, - admin_port=admin_port, - user_domain_name=user_domain_name, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name - ) - if api_version == 2: - client = keystone_client.Client(session=sess) - else: - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client - - def get_keystone_session(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Return a keystone session object""" - ep = self.get_keystone_endpoint(keystone_ip, - api_version=api_version, - admin_port=admin_port) - if api_version == 2: - auth = v2.Password( - username=username, - password=password, - tenant_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - else: - auth = v3.Password( - user_domain_name=user_domain_name, - username=username, - password=password, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - return (sess, auth) - - def get_keystone_endpoint(self, keystone_ip, api_version=None, - admin_port=False): - """Return keystone endpoint""" - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if api_version == 2: - ep = base_ep + "/v2.0" - else: - ep = base_ep + "/v3" - return ep - - def get_default_keystone_session(self, keystone_sentry, - openstack_release=None, api_version=2): - """Return a keystone session object and client object assuming standard - default settings - - Example call in amulet tests: - self.keystone_session, self.keystone = u.get_default_keystone_session( - self.keystone_sentry, - openstack_release=self._get_openstack_release()) - - The session can then be used to auth other clients: - neutronclient.Client(session=session) - aodh_client.Client(session=session) - eyc - """ - self.log.debug('Authenticating keystone admin...') - # 11 => xenial_queens - if api_version == 3 or (openstack_release and openstack_release >= 11): - client_class = keystone_client_v3.Client - api_version = 3 - else: - client_class = keystone_client.Client - keystone_ip = keystone_sentry.info['public-address'] - session, auth = self.get_keystone_session( - keystone_ip, - api_version=api_version, - username='admin', - password='openstack', - project_name='admin', - user_domain_name='admin_domain', - project_domain_name='admin_domain') - client = client_class(session=session) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(session) - return session, client - - def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant=None, api_version=None, - keystone_ip=None, user_domain_name=None, - project_domain_name=None, - project_name=None): - """Authenticates admin user with the keystone admin endpoint.""" - self.log.debug('Authenticating keystone admin...') - if not keystone_ip: - keystone_ip = keystone_sentry.info['public-address'] - - # To support backward compatibility usage of this function - if not project_name: - project_name = tenant - if api_version == 3 and not user_domain_name: - user_domain_name = 'admin_domain' - if api_version == 3 and not project_domain_name: - project_domain_name = 'admin_domain' - if api_version == 3 and not project_name: - project_name = 'admin' - - return self.authenticate_keystone( - keystone_ip, user, password, - api_version=api_version, - user_domain_name=user_domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - admin_port=True) - - def authenticate_keystone_user(self, keystone, user, password, tenant): - """Authenticates a regular user with the keystone public endpoint.""" - self.log.debug('Authenticating keystone user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - keystone_ip = urlparse.urlparse(ep).hostname - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant) - - def authenticate_glance_admin(self, keystone): - """Authenticates admin user with glance.""" - self.log.debug('Authenticating glance admin...') - ep = keystone.service_catalog.url_for(service_type='image', - interface='adminURL') - if keystone.session: - return glance_clientv2.Client("2", session=keystone.session) - else: - return glance_client.Client(ep, token=keystone.auth_token) - - def authenticate_heat_admin(self, keystone): - """Authenticates the admin user with heat.""" - self.log.debug('Authenticating heat admin...') - ep = keystone.service_catalog.url_for(service_type='orchestration', - interface='publicURL') - if keystone.session: - return heat_client.Client(endpoint=ep, session=keystone.session) - else: - return heat_client.Client(endpoint=ep, token=keystone.auth_token) - - def authenticate_nova_user(self, keystone, user, password, tenant): - """Authenticates a regular user with nova-api.""" - self.log.debug('Authenticating nova user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return nova_client.Client(NOVA_CLIENT_VERSION, - session=keystone.session, - auth_url=ep) - elif novaclient.__version__[0] >= "7": - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, password=password, - project_name=tenant, auth_url=ep) - else: - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) - - def authenticate_swift_user(self, keystone, user, password, tenant): - """Authenticates a regular user with swift api.""" - self.log.debug('Authenticating swift user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return swiftclient.Connection(session=keystone.session) - else: - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') - - def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create the specified flavor.""" - try: - nova.flavors.find(name=name) - except (exceptions.NotFound, exceptions.NoUniqueMatch): - self.log.debug('Creating flavor ({})'.format(name)) - nova.flavors.create(name, ram, vcpus, disk, flavorid, - ephemeral, swap, rxtx_factor, is_public) - - def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection - :param image_name: display name for new image - :returns: glance image pointer - """ - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) - - # Download cirros image - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - local_path = os.path.join('tests', cirros_img) - - if not os.path.exists(local_path): - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - opener.retrieve(cirros_url, local_path) - f.close() - - # Create glance image - if float(glance.version) < 2.0: - with open(local_path) as fimage: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', - data=fimage) - else: - image = glance.images.create( - name=image_name, - disk_format="qcow2", - visibility="public", - container_format="bare") - glance.images.upload(image.id, open(local_path, 'rb')) - - # Wait for image to reach active status - img_id = image.id - ret = self.resource_reaches_status(glance.images, img_id, - expected_stat='active', - msg='Image status wait') - if not ret: - msg = 'Glance image failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new image - self.log.debug('Validating image attributes...') - val_img_name = glance.images.get(img_id).name - val_img_stat = glance.images.get(img_id).status - val_img_cfmt = glance.images.get(img_id).container_format - val_img_dfmt = glance.images.get(img_id).disk_format - - if float(glance.version) < 2.0: - val_img_pub = glance.images.get(img_id).is_public - else: - val_img_pub = glance.images.get(img_id).visibility == "public" - - msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' - 'container fmt:{} disk fmt:{}'.format( - val_img_name, val_img_pub, img_id, - val_img_stat, val_img_cfmt, val_img_dfmt)) - - if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == 'bare' \ - and val_img_dfmt == 'qcow2': - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return image - - def delete_image(self, glance, image): - """Delete the specified image.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_image.') - self.log.debug('Deleting glance image ({})...'.format(image)) - return self.delete_resource(glance.images, image, msg='glance image') - - def create_instance(self, nova, image_name, instance_name, flavor): - """Create the specified instance.""" - self.log.debug('Creating instance ' - '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.glance.find_image(image_name) - flavor = nova.flavors.find(name=flavor) - instance = nova.servers.create(name=instance_name, image=image, - flavor=flavor) - - count = 1 - status = instance.status - while status != 'ACTIVE' and count < 60: - time.sleep(3) - instance = nova.servers.get(instance.id) - status = instance.status - self.log.debug('instance status: {}'.format(status)) - count += 1 - - if status != 'ACTIVE': - self.log.error('instance creation timed out') - return None - - return instance - - def delete_instance(self, nova, instance): - """Delete the specified instance.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_instance.') - self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, - msg='nova instance') - - def create_or_get_keypair(self, nova, keypair_name="testkey"): - """Create a new keypair, or return pointer if it already exists.""" - try: - _keypair = nova.keypairs.get(keypair_name) - self.log.debug('Keypair ({}) already exists, ' - 'using it.'.format(keypair_name)) - return _keypair - except Exception: - self.log.debug('Keypair ({}) does not exist, ' - 'creating it.'.format(keypair_name)) - - _keypair = nova.keypairs.create(name=keypair_name) - return _keypair - - def _get_cinder_obj_name(self, cinder_object): - """Retrieve name of cinder object. - - :param cinder_object: cinder snapshot or volume object - :returns: str cinder object name - """ - # v1 objects store name in 'display_name' attr but v2+ use 'name' - try: - return cinder_object.display_name - except AttributeError: - return cinder_object.name - - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, - img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, OR - optionally as a clone of an existing volume, OR optionally - from a snapshot. Wait for the new volume status to reach - the expected status, validate and return a resource pointer. - - :param vol_name: cinder volume display name - :param vol_size: size in gigabytes - :param img_id: optional glance image id - :param src_vol_id: optional source volume id to clone - :param snap_id: optional snapshot id to use - :returns: cinder volume pointer - """ - # Handle parameter input and avoid impossible combinations - if img_id and not src_vol_id and not snap_id: - # Create volume from image - self.log.debug('Creating cinder volume from glance image...') - bootable = 'true' - elif src_vol_id and not img_id and not snap_id: - # Clone an existing volume - self.log.debug('Cloning cinder volume...') - bootable = cinder.volumes.get(src_vol_id).bootable - elif snap_id and not src_vol_id and not img_id: - # Create volume from snapshot - self.log.debug('Creating cinder volume from snapshot...') - snap = cinder.volume_snapshots.find(id=snap_id) - vol_size = snap.size - snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id - bootable = cinder.volumes.get(snap_vol_id).bootable - elif not img_id and not src_vol_id and not snap_id: - # Create volume - self.log.debug('Creating cinder volume...') - bootable = 'false' - else: - # Impossible combination of parameters - msg = ('Invalid method use - name:{} size:{} img_id:{} ' - 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, - img_id, src_vol_id, - snap_id)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create new volume - try: - vol_new = cinder.volumes.create(display_name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except TypeError: - vol_new = cinder.volumes.create(name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except Exception as e: - msg = 'Failed to create volume: {}'.format(e) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Wait for volume to reach available status - ret = self.resource_reaches_status(cinder.volumes, vol_id, - expected_stat="available", - msg="Volume status wait") - if not ret: - msg = 'Cinder volume failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new volume - self.log.debug('Validating volume attributes...') - val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) - val_vol_boot = cinder.volumes.get(vol_id).bootable - val_vol_stat = cinder.volumes.get(vol_id).status - val_vol_size = cinder.volumes.get(vol_id).size - msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' - '{} size:{}'.format(val_vol_name, vol_id, - val_vol_stat, val_vol_boot, - val_vol_size)) - - if val_vol_boot == bootable and val_vol_stat == 'available' \ - and val_vol_name == vol_name and val_vol_size == vol_size: - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return vol_new - - def delete_resource(self, resource, resource_id, - msg="resource", max_wait=120): - """Delete one openstack resource, such as one instance, keypair, - image, volume, stack, etc., and confirm deletion within max wait time. - - :param resource: pointer to os resource type, ex:glance_client.images - :param resource_id: unique name or id for the openstack resource - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, otherwise False - """ - self.log.debug('Deleting OpenStack resource ' - '{} ({})'.format(resource_id, msg)) - num_before = len(list(resource.list())) - resource.delete(resource_id) - - tries = 0 - num_after = len(list(resource.list())) - while num_after != (num_before - 1) and tries < (max_wait / 4): - self.log.debug('{} delete check: ' - '{} [{}:{}] {}'.format(msg, tries, - num_before, - num_after, - resource_id)) - time.sleep(4) - num_after = len(list(resource.list())) - tries += 1 - - self.log.debug('{}: expected, actual count = {}, ' - '{}'.format(msg, num_before - 1, num_after)) - - if num_after == (num_before - 1): - return True - else: - self.log.error('{} delete timed out'.format(msg)) - return False - - def resource_reaches_status(self, resource, resource_id, - expected_stat='available', - msg='resource', max_wait=120): - """Wait for an openstack resources status to reach an - expected status within a specified time. Useful to confirm that - nova instances, cinder vols, snapshots, glance images, heat stacks - and other resources eventually reach the expected status. - - :param resource: pointer to os resource type, ex: heat_client.stacks - :param resource_id: unique id for the openstack resource - :param expected_stat: status to expect resource to reach - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, False if status is not reached - """ - - tries = 0 - resource_stat = resource.get(resource_id).status - while resource_stat != expected_stat and tries < (max_wait / 4): - self.log.debug('{} status check: ' - '{} [{}:{}] {}'.format(msg, tries, - resource_stat, - expected_stat, - resource_id)) - time.sleep(4) - resource_stat = resource.get(resource_id).status - tries += 1 - - self.log.debug('{}: expected, actual status = {}, ' - '{}'.format(msg, resource_stat, expected_stat)) - - if resource_stat == expected_stat: - return True - else: - self.log.debug('{} never reached expected status: ' - '{}'.format(resource_id, expected_stat)) - return False - - def get_ceph_osd_id_cmd(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return ("`initctl list | grep 'ceph-osd ' | " - "awk 'NR=={} {{ print $2 }}' | " - "grep -o '[0-9]*'`".format(index + 1)) - - def get_ceph_pools(self, sentry_unit): - """Return a dict of ceph pools from a single ceph unit, with - pool name as keys, pool id as vals.""" - pools = {} - cmd = 'sudo ceph osd lspools' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # For mimic ceph osd lspools output - output = output.replace("\n", ",") - - # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, - for pool in str(output).split(','): - pool_id_name = pool.split(' ') - if len(pool_id_name) == 2: - pool_id = pool_id_name[0] - pool_name = pool_id_name[1] - pools[pool_name] = int(pool_id) - - self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], - pools)) - return pools - - def get_ceph_df(self, sentry_unit): - """Return dict of ceph df json output, including ceph pool state. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :returns: Dict of ceph df output - """ - cmd = 'sudo ceph df --format=json' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return json.loads(output) - - def get_ceph_pool_sample(self, sentry_unit, pool_id=0): - """Take a sample of attributes of a ceph pool, returning ceph - pool name, object count and disk space used for the specified - pool ID number. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :param pool_id: Ceph pool ID - :returns: List of pool name, object count, kb disk space used - """ - df = self.get_ceph_df(sentry_unit) - for pool in df['pools']: - if pool['id'] == pool_id: - pool_name = pool['name'] - obj_count = pool['stats']['objects'] - kb_used = pool['stats']['kb_used'] - - self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, pool_id, - obj_count, kb_used)) - return pool_name, obj_count, kb_used - - def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): - """Validate ceph pool samples taken over time, such as pool - object counts or pool kb used, before adding, after adding, and - after deleting items which affect those pool attributes. The - 2nd element is expected to be greater than the 1st; 3rd is expected - to be less than the 2nd. - - :param samples: List containing 3 data samples - :param sample_type: String for logging and usage context - :returns: None if successful, Failure message otherwise - """ - original, created, deleted = range(3) - if samples[created] <= samples[original] or \ - samples[deleted] >= samples[created]: - return ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - else: - self.log.debug('Ceph {} samples (OK): ' - '{}'.format(sample_type, samples)) - return None - - # rabbitmq/amqp specific helpers: - - def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): - """Wait for rmq units extended status to show cluster readiness, - after an optional initial sleep period. Initial sleep is likely - necessary to be effective following a config change, as status - message may not instantly update to non-ready.""" - - if init_sleep: - time.sleep(init_sleep) - - message = re.compile('^Unit is ready and clustered$') - deployment._auto_wait_for_status(message=message, - timeout=timeout, - include_only=['rabbitmq-server']) - - def add_rmq_test_user(self, sentry_units, - username="testuser1", password="changeme"): - """Add a test user via the first rmq juju unit, check connection as - the new user against all sentry units. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful. Raise on error. - """ - self.log.debug('Adding rmq user ({})...'.format(username)) - - # Check that user does not already exist - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - if username in output: - self.log.warning('User ({}) already exists, returning ' - 'gracefully.'.format(username)) - return - - perms = '".*" ".*" ".*"' - cmds = ['rabbitmqctl add_user {} {}'.format(username, password), - 'rabbitmqctl set_permissions {} {}'.format(username, perms)] - - # Add user via first unit - for cmd in cmds: - output, _ = self.run_cmd_unit(sentry_units[0], cmd) - - # Check connection against the other sentry_units - self.log.debug('Checking user connect against units...') - for sentry_unit in sentry_units: - connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, - username=username, - password=password) - connection.close() - - def delete_rmq_test_user(self, sentry_units, username="testuser1"): - """Delete a rabbitmq user via the first rmq juju unit. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful or no such user. - """ - self.log.debug('Deleting rmq user ({})...'.format(username)) - - # Check that the user exists - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - - if username not in output: - self.log.warning('User ({}) does not exist, returning ' - 'gracefully.'.format(username)) - return - - # Delete the user - cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) - - def get_rmq_cluster_status(self, sentry_unit): - """Execute rabbitmq cluster status command on a unit and return - the full output. - - :param unit: sentry unit - :returns: String containing console output of cluster status command - """ - cmd = 'rabbitmqctl cluster_status' - output, _ = self.run_cmd_unit(sentry_unit, cmd) - self.log.debug('{} cluster_status:\n{}'.format( - sentry_unit.info['unit_name'], output)) - return str(output) - - def get_rmq_cluster_running_nodes(self, sentry_unit): - """Parse rabbitmqctl cluster_status output string, return list of - running rabbitmq cluster nodes. - - :param unit: sentry unit - :returns: List containing node names of running nodes - """ - # NOTE(beisner): rabbitmqctl cluster_status output is not - # json-parsable, do string chop foo, then json.loads that. - str_stat = self.get_rmq_cluster_status(sentry_unit) - if 'running_nodes' in str_stat: - pos_start = str_stat.find("{running_nodes,") + 15 - pos_end = str_stat.find("]},", pos_start) + 1 - str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') - run_nodes = json.loads(str_run_nodes) - return run_nodes - else: - return [] - - def validate_rmq_cluster_running_nodes(self, sentry_units): - """Check that all rmq unit hostnames are represented in the - cluster_status output of all units. - - :param host_names: dict of juju unit names to host names - :param units: list of sentry unit pointers (all rmq units) - :returns: None if successful, otherwise return error message - """ - host_names = self.get_unit_hostnames(sentry_units) - errors = [] - - # Query every unit for cluster_status running nodes - for query_unit in sentry_units: - query_unit_name = query_unit.info['unit_name'] - running_nodes = self.get_rmq_cluster_running_nodes(query_unit) - - # Confirm that every unit is represented in the queried unit's - # cluster_status running nodes output. - for validate_unit in sentry_units: - val_host_name = host_names[validate_unit.info['unit_name']] - val_node_name = 'rabbit@{}'.format(val_host_name) - - if val_node_name not in running_nodes: - errors.append('Cluster member check failed on {}: {} not ' - 'in {}\n'.format(query_unit_name, - val_node_name, - running_nodes)) - if errors: - return ''.join(errors) - - def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): - """Check a single juju rmq unit for ssl and port in the config file.""" - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - conf_file = '/etc/rabbitmq/rabbitmq.config' - conf_contents = str(self.file_contents_safe(sentry_unit, - conf_file, max_wait=16)) - # Checks - conf_ssl = 'ssl' in conf_contents - conf_port = str(port) in conf_contents - - # Port explicitly checked in config - if port and conf_port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif port and not conf_port and conf_ssl: - self.log.debug('SSL is enabled @{} but not on port {} ' - '({})'.format(host, port, unit_name)) - return False - # Port not checked (useful when checking that ssl is disabled) - elif not port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif not conf_ssl: - self.log.debug('SSL not enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return False - else: - msg = ('Unknown condition when checking SSL status @{}:{} ' - '({})'.format(host, port, unit_name)) - amulet.raise_status(amulet.FAIL, msg) - - def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): - """Check that ssl is enabled on rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :param port: optional ssl port override to validate - :returns: None if successful, otherwise return error message - """ - for sentry_unit in sentry_units: - if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): - return ('Unexpected condition: ssl is disabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def validate_rmq_ssl_disabled_units(self, sentry_units): - """Check that ssl is enabled on listed rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :returns: True if successful. Raise on error. - """ - for sentry_unit in sentry_units: - if self.rmq_ssl_is_enabled_on_unit(sentry_unit): - return ('Unexpected condition: ssl is enabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def configure_rmq_ssl_on(self, sentry_units, deployment, - port=None, max_wait=60): - """Turn ssl charm config option on, with optional non-default - ssl port specification. Confirm that it is enabled on every - unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param port: amqp port, use defaults if None - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: on') - - # Enable RMQ SSL - config = {'ssl': 'on'} - if port: - config['ssl_port'] = port - - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): - """Turn ssl charm config option off, confirm that it is disabled - on every unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: off') - - # Disable RMQ SSL - config = {'ssl': 'off'} - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def connect_amqp_by_unit(self, sentry_unit, ssl=False, - port=None, fatal=True, - username="testuser1", password="changeme"): - """Establish and return a pika amqp connection to the rabbitmq service - running on a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :param fatal: boolean, default to True (raises on connect error) - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: pika amqp connection pointer or None if failed and non-fatal - """ - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - # Default port logic if port is not specified - if ssl and not port: - port = 5671 - elif not ssl and not port: - port = 5672 - - self.log.debug('Connecting to amqp on {}:{} ({}) as ' - '{}...'.format(host, port, unit_name, username)) - - try: - credentials = pika.PlainCredentials(username, password) - parameters = pika.ConnectionParameters(host=host, port=port, - credentials=credentials, - ssl=ssl, - connection_attempts=3, - retry_delay=5, - socket_timeout=1) - connection = pika.BlockingConnection(parameters) - assert connection.is_open is True - assert connection.is_closing is False - self.log.debug('Connect OK') - return connection - except Exception as e: - msg = ('amqp connection failed to {}:{} as ' - '{} ({})'.format(host, port, username, str(e))) - if fatal: - amulet.raise_status(amulet.FAIL, msg) - else: - self.log.warn(msg) - return None - - def publish_amqp_message_by_unit(self, sentry_unit, message, - queue="test", ssl=False, - username="testuser1", - password="changeme", - port=None): - """Publish an amqp message to a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param message: amqp message string - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: None. Raises exception if publish failed. - """ - self.log.debug('Publishing message to {} queue:\n{}'.format(queue, - message)) - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - - # NOTE(beisner): extra debug here re: pika hang potential: - # https://github.com/pika/pika/issues/297 - # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw - self.log.debug('Defining channel...') - channel = connection.channel() - self.log.debug('Declaring queue...') - channel.queue_declare(queue=queue, auto_delete=False, durable=True) - self.log.debug('Publishing message...') - channel.basic_publish(exchange='', routing_key=queue, body=message) - self.log.debug('Closing channel...') - channel.close() - self.log.debug('Closing connection...') - connection.close() - - def get_amqp_message_by_unit(self, sentry_unit, queue="test", - username="testuser1", - password="changeme", - ssl=False, port=None): - """Get an amqp message from a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: amqp message body as string. Raise if get fails. - """ - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - channel = connection.channel() - method_frame, _, body = channel.basic_get(queue) - - if method_frame: - self.log.debug('Retreived message from {} queue:\n{}'.format(queue, - body)) - channel.basic_ack(method_frame.delivery_tag) - channel.close() - connection.close() - return body - else: - msg = 'No message retrieved.' - amulet.raise_status(amulet.FAIL, msg) - - def validate_memcache(self, sentry_unit, conf, os_release, - earliest_release=5, section='keystone_authtoken', - check_kvs=None): - """Check Memcache is running and is configured to be used - - Example call from Amulet test: - - def test_110_memcache(self): - u.validate_memcache(self.neutron_api_sentry, - '/etc/neutron/neutron.conf', - self._get_openstack_release()) - - :param sentry_unit: sentry unit - :param conf: OpenStack config file to check memcache settings - :param os_release: Current OpenStack release int code - :param earliest_release: Earliest Openstack release to check int code - :param section: OpenStack config file section to check - :param check_kvs: Dict of settings to check in config file - :returns: None - """ - if os_release < earliest_release: - self.log.debug('Skipping memcache checks for deployment. {} <' - 'mitaka'.format(os_release)) - return - _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} - self.log.debug('Checking memcached is running') - ret = self.validate_services_by_name({sentry_unit: ['memcached']}) - if ret: - amulet.raise_status(amulet.FAIL, msg='Memcache running check' - 'failed {}'.format(ret)) - else: - self.log.debug('OK') - self.log.debug('Checking memcache url is configured in {}'.format( - conf)) - if self.validate_config_data(sentry_unit, conf, section, _kvs): - message = "Memcache config error in: {}".format(conf) - amulet.raise_status(amulet.FAIL, msg=message) - else: - self.log.debug('OK') - self.log.debug('Checking memcache configuration in ' - '/etc/memcached.conf') - contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', - fatal=True) - ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if CompareHostReleases(ubuntu_release) <= 'trusty': - memcache_listen_addr = 'ip6-localhost' - else: - memcache_listen_addr = '::1' - expected = { - '-p': '11211', - '-l': memcache_listen_addr} - found = [] - for key, value in expected.items(): - for line in contents.split('\n'): - if line.startswith(key): - self.log.debug('Checking {} is set to {}'.format( - key, - value)) - assert value == line.split()[-1] - self.log.debug(line.split()[-1]) - found.append(key) - if sorted(found) == sorted(expected.keys()): - self.log.debug('OK') - else: - message = "Memcache config error in: /etc/memcached.conf" - amulet.raise_status(amulet.FAIL, msg=message) diff --git a/ceph-mon/tests/charmhelpers/core/__init__.py b/ceph-mon/tests/charmhelpers/core/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-mon/tests/charmhelpers/core/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/tests/charmhelpers/core/decorators.py b/ceph-mon/tests/charmhelpers/core/decorators.py deleted file mode 100644 index 6ad41ee4..00000000 --- a/ceph-mon/tests/charmhelpers/core/decorators.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2014 Canonical Ltd. -# -# Authors: -# Edward Hope-Morley -# - -import time - -from charmhelpers.core.hookenv import ( - log, - INFO, -) - - -def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): - """If the decorated function raises exception exc_type, allow num_retries - retry attempts before raise the exception. - """ - def _retry_on_exception_inner_1(f): - def _retry_on_exception_inner_2(*args, **kwargs): - retries = num_retries - multiplier = 1 - while True: - try: - return f(*args, **kwargs) - except exc_type: - if not retries: - raise - - delay = base_delay * multiplier - multiplier += 1 - log("Retrying '%s' %d more times (delay=%s)" % - (f.__name__, retries, delay), level=INFO) - retries -= 1 - if delay: - time.sleep(delay) - - return _retry_on_exception_inner_2 - - return _retry_on_exception_inner_1 diff --git a/ceph-mon/tests/charmhelpers/core/files.py b/ceph-mon/tests/charmhelpers/core/files.py deleted file mode 100644 index fdd82b75..00000000 --- a/ceph-mon/tests/charmhelpers/core/files.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -__author__ = 'Jorge Niedbalski ' - -import os -import subprocess - - -def sed(filename, before, after, flags='g'): - """ - Search and replaces the given pattern on filename. - - :param filename: relative or absolute file path. - :param before: expression to be replaced (see 'man sed') - :param after: expression to replace with (see 'man sed') - :param flags: sed-compatible regex flags in example, to make - the search and replace case insensitive, specify ``flags="i"``. - The ``g`` flag is always specified regardless, so you do not - need to remember to include it when overriding this parameter. - :returns: If the sed command exit code was zero then return, - otherwise raise CalledProcessError. - """ - expression = r's/{0}/{1}/{2}'.format(before, - after, flags) - - return subprocess.check_call(["sed", "-i", "-r", "-e", - expression, - os.path.expanduser(filename)]) diff --git a/ceph-mon/tests/charmhelpers/core/fstab.py b/ceph-mon/tests/charmhelpers/core/fstab.py deleted file mode 100644 index d9fa9152..00000000 --- a/ceph-mon/tests/charmhelpers/core/fstab.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import os - -__author__ = 'Jorge Niedbalski R. ' - - -class Fstab(io.FileIO): - """This class extends file in order to implement a file reader/writer - for file `/etc/fstab` - """ - - class Entry(object): - """Entry class represents a non-comment line on the `/etc/fstab` file - """ - def __init__(self, device, mountpoint, filesystem, - options, d=0, p=0): - self.device = device - self.mountpoint = mountpoint - self.filesystem = filesystem - - if not options: - options = "defaults" - - self.options = options - self.d = int(d) - self.p = int(p) - - def __eq__(self, o): - return str(self) == str(o) - - def __str__(self): - return "{} {} {} {} {} {}".format(self.device, - self.mountpoint, - self.filesystem, - self.options, - self.d, - self.p) - - DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') - - def __init__(self, path=None): - if path: - self._path = path - else: - self._path = self.DEFAULT_PATH - super(Fstab, self).__init__(self._path, 'rb+') - - def _hydrate_entry(self, line): - # NOTE: use split with no arguments to split on any - # whitespace including tabs - return Fstab.Entry(*filter( - lambda x: x not in ('', None), - line.strip("\n").split())) - - @property - def entries(self): - self.seek(0) - for line in self.readlines(): - line = line.decode('us-ascii') - try: - if line.strip() and not line.strip().startswith("#"): - yield self._hydrate_entry(line) - except ValueError: - pass - - def get_entry_by_attr(self, attr, value): - for entry in self.entries: - e_attr = getattr(entry, attr) - if e_attr == value: - return entry - return None - - def add_entry(self, entry): - if self.get_entry_by_attr('device', entry.device): - return False - - self.write((str(entry) + '\n').encode('us-ascii')) - self.truncate() - return entry - - def remove_entry(self, entry): - self.seek(0) - - lines = [l.decode('us-ascii') for l in self.readlines()] - - found = False - for index, line in enumerate(lines): - if line.strip() and not line.strip().startswith("#"): - if self._hydrate_entry(line) == entry: - found = True - break - - if not found: - return False - - lines.remove(line) - - self.seek(0) - self.write(''.join(lines).encode('us-ascii')) - self.truncate() - return True - - @classmethod - def remove_by_mountpoint(cls, mountpoint, path=None): - fstab = cls(path=path) - entry = fstab.get_entry_by_attr('mountpoint', mountpoint) - if entry: - return fstab.remove_entry(entry) - return False - - @classmethod - def add(cls, device, mountpoint, filesystem, options=None, path=None): - return cls(path=path).add_entry(Fstab.Entry(device, - mountpoint, filesystem, - options=options)) diff --git a/ceph-mon/tests/charmhelpers/core/hookenv.py b/ceph-mon/tests/charmhelpers/core/hookenv.py deleted file mode 100644 index 68800074..00000000 --- a/ceph-mon/tests/charmhelpers/core/hookenv.py +++ /dev/null @@ -1,1353 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"Interactions with the Juju environment" -# Copyright 2013 Canonical Ltd. -# -# Authors: -# Charm Helpers Developers - -from __future__ import print_function -import copy -from distutils.version import LooseVersion -from functools import wraps -from collections import namedtuple -import glob -import os -import json -import yaml -import re -import subprocess -import sys -import errno -import tempfile -from subprocess import CalledProcessError - -import six -if not six.PY3: - from UserDict import UserDict -else: - from collections import UserDict - - -CRITICAL = "CRITICAL" -ERROR = "ERROR" -WARNING = "WARNING" -INFO = "INFO" -DEBUG = "DEBUG" -TRACE = "TRACE" -MARKER = object() - -cache = {} - - -def cached(func): - """Cache return values for multiple executions of func + args - - For example:: - - @cached - def unit_get(attribute): - pass - - unit_get('test') - - will cache the result of unit_get + 'test' for future calls. - """ - @wraps(func) - def wrapper(*args, **kwargs): - global cache - key = json.dumps((func, args, kwargs), sort_keys=True, default=str) - try: - return cache[key] - except KeyError: - pass # Drop out of the exception handler scope. - res = func(*args, **kwargs) - cache[key] = res - return res - wrapper._wrapped = func - return wrapper - - -def flush(key): - """Flushes any entries from function cache where the - key is found in the function+args """ - flush_list = [] - for item in cache: - if key in item: - flush_list.append(item) - for item in flush_list: - del cache[item] - - -def log(message, level=None): - """Write a message to the juju log""" - command = ['juju-log'] - if level: - command += ['-l', level] - if not isinstance(message, six.string_types): - message = repr(message) - command += [message] - # Missing juju-log should not cause failures in unit tests - # Send log output to stderr - try: - subprocess.call(command) - except OSError as e: - if e.errno == errno.ENOENT: - if level: - message = "{}: {}".format(level, message) - message = "juju-log: {}".format(message) - print(message, file=sys.stderr) - else: - raise - - -class Serializable(UserDict): - """Wrapper, an object that can be serialized to yaml or json""" - - def __init__(self, obj): - # wrap the object - UserDict.__init__(self) - self.data = obj - - def __getattr__(self, attr): - # See if this object has attribute. - if attr in ("json", "yaml", "data"): - return self.__dict__[attr] - # Check for attribute in wrapped object. - got = getattr(self.data, attr, MARKER) - if got is not MARKER: - return got - # Proxy to the wrapped object via dict interface. - try: - return self.data[attr] - except KeyError: - raise AttributeError(attr) - - def __getstate__(self): - # Pickle as a standard dictionary. - return self.data - - def __setstate__(self, state): - # Unpickle into our wrapper. - self.data = state - - def json(self): - """Serialize the object to json""" - return json.dumps(self.data) - - def yaml(self): - """Serialize the object to yaml""" - return yaml.dump(self.data) - - -def execution_environment(): - """A convenient bundling of the current execution context""" - context = {} - context['conf'] = config() - if relation_id(): - context['reltype'] = relation_type() - context['relid'] = relation_id() - context['rel'] = relation_get() - context['unit'] = local_unit() - context['rels'] = relations() - context['env'] = os.environ - return context - - -def in_relation_hook(): - """Determine whether we're running in a relation hook""" - return 'JUJU_RELATION' in os.environ - - -def relation_type(): - """The scope for the current relation hook""" - return os.environ.get('JUJU_RELATION', None) - - -@cached -def relation_id(relation_name=None, service_or_unit=None): - """The relation ID for the current or a specified relation""" - if not relation_name and not service_or_unit: - return os.environ.get('JUJU_RELATION_ID', None) - elif relation_name and service_or_unit: - service_name = service_or_unit.split('/')[0] - for relid in relation_ids(relation_name): - remote_service = remote_service_name(relid) - if remote_service == service_name: - return relid - else: - raise ValueError('Must specify neither or both of relation_name and service_or_unit') - - -def local_unit(): - """Local unit ID""" - return os.environ['JUJU_UNIT_NAME'] - - -def remote_unit(): - """The remote unit for the current relation hook""" - return os.environ.get('JUJU_REMOTE_UNIT', None) - - -def application_name(): - """ - The name of the deployed application this unit belongs to. - """ - return local_unit().split('/')[0] - - -def service_name(): - """ - .. deprecated:: 0.19.1 - Alias for :func:`application_name`. - """ - return application_name() - - -def model_name(): - """ - Name of the model that this unit is deployed in. - """ - return os.environ['JUJU_MODEL_NAME'] - - -def model_uuid(): - """ - UUID of the model that this unit is deployed in. - """ - return os.environ['JUJU_MODEL_UUID'] - - -def principal_unit(): - """Returns the principal unit of this unit, otherwise None""" - # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT - principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) - # If it's empty, then this unit is the principal - if principal_unit == '': - return os.environ['JUJU_UNIT_NAME'] - elif principal_unit is not None: - return principal_unit - # For Juju 2.1 and below, let's try work out the principle unit by - # the various charms' metadata.yaml. - for reltype in relation_types(): - for rid in relation_ids(reltype): - for unit in related_units(rid): - md = _metadata_unit(unit) - if not md: - continue - subordinate = md.pop('subordinate', None) - if not subordinate: - return unit - return None - - -@cached -def remote_service_name(relid=None): - """The remote service name for a given relation-id (or the current relation)""" - if relid is None: - unit = remote_unit() - else: - units = related_units(relid) - unit = units[0] if units else None - return unit.split('/')[0] if unit else None - - -def hook_name(): - """The name of the currently executing hook""" - return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) - - -class Config(dict): - """A dictionary representation of the charm's config.yaml, with some - extra features: - - - See which values in the dictionary have changed since the previous hook. - - For values that have changed, see what the previous value was. - - Store arbitrary data for use in a later hook. - - NOTE: Do not instantiate this object directly - instead call - ``hookenv.config()``, which will return an instance of :class:`Config`. - - Example usage:: - - >>> # inside a hook - >>> from charmhelpers.core import hookenv - >>> config = hookenv.config() - >>> config['foo'] - 'bar' - >>> # store a new key/value for later use - >>> config['mykey'] = 'myval' - - - >>> # user runs `juju set mycharm foo=baz` - >>> # now we're inside subsequent config-changed hook - >>> config = hookenv.config() - >>> config['foo'] - 'baz' - >>> # test to see if this val has changed since last hook - >>> config.changed('foo') - True - >>> # what was the previous value? - >>> config.previous('foo') - 'bar' - >>> # keys/values that we add are preserved across hooks - >>> config['mykey'] - 'myval' - - """ - CONFIG_FILE_NAME = '.juju-persistent-config' - - def __init__(self, *args, **kw): - super(Config, self).__init__(*args, **kw) - self.implicit_save = True - self._prev_dict = None - self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path) and os.stat(self.path).st_size: - self.load_previous() - atexit(self._implicit_save) - - def load_previous(self, path=None): - """Load previous copy of config from disk. - - In normal usage you don't need to call this method directly - it - is called automatically at object initialization. - - :param path: - - File path from which to load the previous config. If `None`, - config is loaded from the default location. If `path` is - specified, subsequent `save()` calls will write to the same - path. - - """ - self.path = path or self.path - with open(self.path) as f: - try: - self._prev_dict = json.load(f) - except ValueError as e: - log('Unable to parse previous config data - {}'.format(str(e)), - level=ERROR) - for k, v in copy.deepcopy(self._prev_dict).items(): - if k not in self: - self[k] = v - - def changed(self, key): - """Return True if the current value for this key is different from - the previous value. - - """ - if self._prev_dict is None: - return True - return self.previous(key) != self.get(key) - - def previous(self, key): - """Return previous value for this key, or None if there - is no previous value. - - """ - if self._prev_dict: - return self._prev_dict.get(key) - return None - - def save(self): - """Save this config to disk. - - If the charm is using the :mod:`Services Framework ` - or :meth:'@hook ' decorator, this - is called automatically at the end of successful hook execution. - Otherwise, it should be called directly by user code. - - To disable automatic saves, set ``implicit_save=False`` on this - instance. - - """ - with open(self.path, 'w') as f: - os.fchmod(f.fileno(), 0o600) - json.dump(self, f) - - def _implicit_save(self): - if self.implicit_save: - self.save() - - -_cache_config = None - - -def config(scope=None): - """ - Get the juju charm configuration (scope==None) or individual key, - (scope=str). The returned value is a Python data structure loaded as - JSON from the Juju config command. - - :param scope: If set, return the value for the specified key. - :type scope: Optional[str] - :returns: Either the whole config as a Config, or a key from it. - :rtype: Any - """ - global _cache_config - config_cmd_line = ['config-get', '--all', '--format=json'] - try: - # JSON Decode Exception for Python3.5+ - exc_json = json.decoder.JSONDecodeError - except AttributeError: - # JSON Decode Exception for Python2.7 through Python3.4 - exc_json = ValueError - try: - if _cache_config is None: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) - _cache_config = Config(config_data) - if scope is not None: - return _cache_config.get(scope) - return _cache_config - except (exc_json, UnicodeDecodeError) as e: - log('Unable to parse output from config-get: config_cmd_line="{}" ' - 'message="{}"' - .format(config_cmd_line, str(e)), level=ERROR) - return None - - -@cached -def relation_get(attribute=None, unit=None, rid=None): - """Get relation information""" - _args = ['relation-get', '--format=json'] - if rid: - _args.append('-r') - _args.append(rid) - _args.append(attribute or '-') - if unit: - _args.append(unit) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except CalledProcessError as e: - if e.returncode == 2: - return None - raise - - -def relation_set(relation_id=None, relation_settings=None, **kwargs): - """Set relation information for the current unit""" - relation_settings = relation_settings if relation_settings else {} - relation_cmd_line = ['relation-set'] - accepts_file = "--file" in subprocess.check_output( - relation_cmd_line + ["--help"], universal_newlines=True) - if relation_id is not None: - relation_cmd_line.extend(('-r', relation_id)) - settings = relation_settings.copy() - settings.update(kwargs) - for key, value in settings.items(): - # Force value to be a string: it always should, but some call - # sites pass in things like dicts or numbers. - if value is not None: - settings[key] = "{}".format(value) - if accepts_file: - # --file was introduced in Juju 1.23.2. Use it by default if - # available, since otherwise we'll break if the relation data is - # too big. Ideally we should tell relation-set to read the data from - # stdin, but that feature is broken in 1.23.2: Bug #1454678. - with tempfile.NamedTemporaryFile(delete=False) as settings_file: - settings_file.write(yaml.safe_dump(settings).encode("utf-8")) - subprocess.check_call( - relation_cmd_line + ["--file", settings_file.name]) - os.remove(settings_file.name) - else: - for key, value in settings.items(): - if value is None: - relation_cmd_line.append('{}='.format(key)) - else: - relation_cmd_line.append('{}={}'.format(key, value)) - subprocess.check_call(relation_cmd_line) - # Flush cache of any relation-gets for local unit - flush(local_unit()) - - -def relation_clear(r_id=None): - ''' Clears any relation data already set on relation r_id ''' - settings = relation_get(rid=r_id, - unit=local_unit()) - for setting in settings: - if setting not in ['public-address', 'private-address']: - settings[setting] = None - relation_set(relation_id=r_id, - **settings) - - -@cached -def relation_ids(reltype=None): - """A list of relation_ids""" - reltype = reltype or relation_type() - relid_cmd_line = ['relation-ids', '--format=json'] - if reltype is not None: - relid_cmd_line.append(reltype) - return json.loads( - subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] - return [] - - -@cached -def related_units(relid=None): - """A list of related units""" - relid = relid or relation_id() - units_cmd_line = ['relation-list', '--format=json'] - if relid is not None: - units_cmd_line.extend(('-r', relid)) - return json.loads( - subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] - - -@cached -def relation_for_unit(unit=None, rid=None): - """Get the json represenation of a unit's relation""" - unit = unit or remote_unit() - relation = relation_get(unit=unit, rid=rid) - for key in relation: - if key.endswith('-list'): - relation[key] = relation[key].split() - relation['__unit__'] = unit - return relation - - -@cached -def relations_for_id(relid=None): - """Get relations of a specific relation ID""" - relation_data = [] - relid = relid or relation_ids() - for unit in related_units(relid): - unit_data = relation_for_unit(unit, relid) - unit_data['__relid__'] = relid - relation_data.append(unit_data) - return relation_data - - -@cached -def relations_of_type(reltype=None): - """Get relations of a specific type""" - relation_data = [] - reltype = reltype or relation_type() - for relid in relation_ids(reltype): - for relation in relations_for_id(relid): - relation['__relid__'] = relid - relation_data.append(relation) - return relation_data - - -@cached -def metadata(): - """Get the current charm metadata.yaml contents as a python object""" - with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: - return yaml.safe_load(md) - - -def _metadata_unit(unit): - """Given the name of a unit (e.g. apache2/0), get the unit charm's - metadata.yaml. Very similar to metadata() but allows us to inspect - other units. Unit needs to be co-located, such as a subordinate or - principal/primary. - - :returns: metadata.yaml as a python object. - - """ - basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) - unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') - if not os.path.exists(joineddir): - return None - with open(joineddir) as md: - return yaml.safe_load(md) - - -@cached -def relation_types(): - """Get a list of relation types supported by this charm""" - rel_types = [] - md = metadata() - for key in ('provides', 'requires', 'peers'): - section = md.get(key) - if section: - rel_types.extend(section.keys()) - return rel_types - - -@cached -def peer_relation_id(): - '''Get the peers relation id if a peers relation has been joined, else None.''' - md = metadata() - section = md.get('peers') - if section: - for key in section: - relids = relation_ids(key) - if relids: - return relids[0] - return None - - -@cached -def relation_to_interface(relation_name): - """ - Given the name of a relation, return the interface that relation uses. - - :returns: The interface name, or ``None``. - """ - return relation_to_role_and_interface(relation_name)[1] - - -@cached -def relation_to_role_and_interface(relation_name): - """ - Given the name of a relation, return the role and the name of the interface - that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). - - :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. - """ - _metadata = metadata() - for role in ('provides', 'requires', 'peers'): - interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') - if interface: - return role, interface - return None, None - - -@cached -def role_and_interface_to_relations(role, interface_name): - """ - Given a role and interface name, return a list of relation names for the - current charm that use that interface under that role (where role is one - of ``provides``, ``requires``, or ``peers``). - - :returns: A list of relation names. - """ - _metadata = metadata() - results = [] - for relation_name, relation in _metadata.get(role, {}).items(): - if relation['interface'] == interface_name: - results.append(relation_name) - return results - - -@cached -def interface_to_relations(interface_name): - """ - Given an interface, return a list of relation names for the current - charm that use that interface. - - :returns: A list of relation names. - """ - results = [] - for role in ('provides', 'requires', 'peers'): - results.extend(role_and_interface_to_relations(role, interface_name)) - return results - - -@cached -def charm_name(): - """Get the name of the current charm as is specified on metadata.yaml""" - return metadata().get('name') - - -@cached -def relations(): - """Get a nested dictionary of relation data for all related units""" - rels = {} - for reltype in relation_types(): - relids = {} - for relid in relation_ids(reltype): - units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} - for unit in related_units(relid): - reldata = relation_get(unit=unit, rid=relid) - units[unit] = reldata - relids[relid] = units - rels[reltype] = relids - return rels - - -@cached -def is_relation_made(relation, keys='private-address'): - ''' - Determine whether a relation is established by checking for - presence of key(s). If a list of keys is provided, they - must all be present for the relation to be identified as made - ''' - if isinstance(keys, str): - keys = [keys] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - context = {} - for k in keys: - context[k] = relation_get(k, rid=r_id, - unit=unit) - if None not in context.values(): - return True - return False - - -def _port_op(op_name, port, protocol="TCP"): - """Open or close a service network port""" - _args = [op_name] - icmp = protocol.upper() == "ICMP" - if icmp: - _args.append(protocol) - else: - _args.append('{}/{}'.format(port, protocol)) - try: - subprocess.check_call(_args) - except subprocess.CalledProcessError: - # Older Juju pre 2.3 doesn't support ICMP - # so treat it as a no-op if it fails. - if not icmp: - raise - - -def open_port(port, protocol="TCP"): - """Open a service network port""" - _port_op('open-port', port, protocol) - - -def close_port(port, protocol="TCP"): - """Close a service network port""" - _port_op('close-port', port, protocol) - - -def open_ports(start, end, protocol="TCP"): - """Opens a range of service network ports""" - _args = ['open-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def close_ports(start, end, protocol="TCP"): - """Close a range of service network ports""" - _args = ['close-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def opened_ports(): - """Get the opened ports - - *Note that this will only show ports opened in a previous hook* - - :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` - """ - _args = ['opened-ports', '--format=json'] - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - - -@cached -def unit_get(attribute): - """Get the unit ID for the remote unit""" - _args = ['unit-get', '--format=json', attribute] - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -def unit_public_ip(): - """Get this unit's public IP address""" - return unit_get('public-address') - - -def unit_private_ip(): - """Get this unit's private IP address""" - return unit_get('private-address') - - -@cached -def storage_get(attribute=None, storage_id=None): - """Get storage attributes""" - _args = ['storage-get', '--format=json'] - if storage_id: - _args.extend(('-s', storage_id)) - if attribute: - _args.append(attribute) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -@cached -def storage_list(storage_name=None): - """List the storage IDs for the unit""" - _args = ['storage-list', '--format=json'] - if storage_name: - _args.append(storage_name) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except OSError as e: - import errno - if e.errno == errno.ENOENT: - # storage-list does not exist - return [] - raise - - -class UnregisteredHookError(Exception): - """Raised when an undefined hook is called""" - pass - - -class Hooks(object): - """A convenient handler for hook functions. - - Example:: - - hooks = Hooks() - - # register a hook, taking its name from the function name - @hooks.hook() - def install(): - pass # your code here - - # register a hook, providing a custom hook name - @hooks.hook("config-changed") - def config_changed(): - pass # your code here - - if __name__ == "__main__": - # execute a hook based on the name the program is called by - hooks.execute(sys.argv) - """ - - def __init__(self, config_save=None): - super(Hooks, self).__init__() - self._hooks = {} - - # For unknown reasons, we allow the Hooks constructor to override - # config().implicit_save. - if config_save is not None: - config().implicit_save = config_save - - def register(self, name, function): - """Register a hook""" - self._hooks[name] = function - - def execute(self, args): - """Execute a registered hook based on args[0]""" - _run_atstart() - hook_name = os.path.basename(args[0]) - if hook_name in self._hooks: - try: - self._hooks[hook_name]() - except SystemExit as x: - if x.code is None or x.code == 0: - _run_atexit() - raise - _run_atexit() - else: - raise UnregisteredHookError(hook_name) - - def hook(self, *hook_names): - """Decorator, registering them as hooks""" - def wrapper(decorated): - for hook_name in hook_names: - self.register(hook_name, decorated) - else: - self.register(decorated.__name__, decorated) - if '_' in decorated.__name__: - self.register( - decorated.__name__.replace('_', '-'), decorated) - return decorated - return wrapper - - -class NoNetworkBinding(Exception): - pass - - -def charm_dir(): - """Return the root directory of the current charm""" - d = os.environ.get('JUJU_CHARM_DIR') - if d is not None: - return d - return os.environ.get('CHARM_DIR') - - -@cached -def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" - cmd = ['action-get'] - if key is not None: - cmd.append(key) - cmd.append('--format=json') - action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) - return action_data - - -def action_set(values): - """Sets the values to be returned after the action finishes""" - cmd = ['action-set'] - for k, v in list(values.items()): - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -def action_fail(message): - """Sets the action status to failed and sets the error message. - - The results set by action_set are preserved.""" - subprocess.check_call(['action-fail', message]) - - -def action_name(): - """Get the name of the currently executing action.""" - return os.environ.get('JUJU_ACTION_NAME') - - -def action_uuid(): - """Get the UUID of the currently executing action.""" - return os.environ.get('JUJU_ACTION_UUID') - - -def action_tag(): - """Get the tag for the currently executing action.""" - return os.environ.get('JUJU_ACTION_TAG') - - -def status_set(workload_state, message): - """Set the workload state with a message - - Use status-set to set the workload state with a message which is visible - to the user via juju status. If the status-set command is not found then - assume this is juju < 1.23 and juju-log the message unstead. - - workload_state -- valid juju workload state. - message -- status update message - """ - valid_states = ['maintenance', 'blocked', 'waiting', 'active'] - if workload_state not in valid_states: - raise ValueError( - '{!r} is not a valid workload state'.format(workload_state) - ) - cmd = ['status-set', workload_state, message] - try: - ret = subprocess.call(cmd) - if ret == 0: - return - except OSError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'status-set failed: {} {}'.format(workload_state, - message) - log(log_message, level='INFO') - - -def status_get(): - """Retrieve the previously set juju workload state and message - - If the status-get command is not found then assume this is juju < 1.23 and - return 'unknown', "" - - """ - cmd = ['status-get', "--format=json", "--include-data"] - try: - raw_status = subprocess.check_output(cmd) - except OSError as e: - if e.errno == errno.ENOENT: - return ('unknown', "") - else: - raise - else: - status = json.loads(raw_status.decode("UTF-8")) - return (status["status"], status["message"]) - - -def translate_exc(from_exc, to_exc): - def inner_translate_exc1(f): - @wraps(f) - def inner_translate_exc2(*args, **kwargs): - try: - return f(*args, **kwargs) - except from_exc: - raise to_exc - - return inner_translate_exc2 - - return inner_translate_exc1 - - -def application_version_set(version): - """Charm authors may trigger this command from any hook to output what - version of the application is running. This could be a package version, - for instance postgres version 9.5. It could also be a build number or - version control revision identifier, for instance git sha 6fb7ba68. """ - - cmd = ['application-version-set'] - cmd.append(version) - try: - subprocess.check_call(cmd) - except OSError: - log("Application Version: {}".format(version)) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def goal_state(): - """Juju goal state values""" - cmd = ['goal-state', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def is_leader(): - """Does the current unit hold the juju leadership - - Uses juju to determine whether the current unit is the leader of its peers - """ - cmd = ['is-leader', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_get(attribute=None): - """Juju leader get value(s)""" - cmd = ['leader-get', '--format=json'] + [attribute or '-'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_set(settings=None, **kwargs): - """Juju leader set value(s)""" - # Don't log secrets. - # log("Juju leader-set '%s'" % (settings), level=DEBUG) - cmd = ['leader-set'] - settings = settings or {} - settings.update(kwargs) - for k, v in settings.items(): - if v is None: - cmd.append('{}='.format(k)) - else: - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_register(ptype, klass, pid): - """ is used while a hook is running to let Juju know that a - payload has been started.""" - cmd = ['payload-register'] - for x in [ptype, klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_unregister(klass, pid): - """ is used while a hook is running to let Juju know - that a payload has been manually stopped. The and provided - must match a payload that has been previously registered with juju using - payload-register.""" - cmd = ['payload-unregister'] - for x in [klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_status_set(klass, pid, status): - """is used to update the current status of a registered payload. - The and provided must match a payload that has been previously - registered with juju using payload-register. The must be one of the - follow: starting, started, stopping, stopped""" - cmd = ['payload-status-set'] - for x in [klass, pid, status]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def resource_get(name): - """used to fetch the resource path of the given name. - - must match a name of defined resource in metadata.yaml - - returns either a path or False if resource not available - """ - if not name: - return False - - cmd = ['resource-get', name] - try: - return subprocess.check_output(cmd).decode('UTF-8') - except subprocess.CalledProcessError: - return False - - -@cached -def juju_version(): - """Full version string (eg. '1.23.3.1-trusty-amd64')""" - # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 - jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] - return subprocess.check_output([jujud, 'version'], - universal_newlines=True).strip() - - -def has_juju_version(minimum_version): - """Return True if the Juju version is at least the provided version""" - return LooseVersion(juju_version()) >= LooseVersion(minimum_version) - - -_atexit = [] -_atstart = [] - - -def atstart(callback, *args, **kwargs): - '''Schedule a callback to run before the main hook. - - Callbacks are run in the order they were added. - - This is useful for modules and classes to perform initialization - and inject behavior. In particular: - - - Run common code before all of your hooks, such as logging - the hook name or interesting relation data. - - Defer object or module initialization that requires a hook - context until we know there actually is a hook context, - making testing easier. - - Rather than requiring charm authors to include boilerplate to - invoke your helper's behavior, have it run automatically if - your object is instantiated or module imported. - - This is not at all useful after your hook framework as been launched. - ''' - global _atstart - _atstart.append((callback, args, kwargs)) - - -def atexit(callback, *args, **kwargs): - '''Schedule a callback to run on successful hook completion. - - Callbacks are run in the reverse order that they were added.''' - _atexit.append((callback, args, kwargs)) - - -def _run_atstart(): - '''Hook frameworks must invoke this before running the main hook body.''' - global _atstart - for callback, args, kwargs in _atstart: - callback(*args, **kwargs) - del _atstart[:] - - -def _run_atexit(): - '''Hook frameworks must invoke this after the main hook body has - successfully completed. Do not invoke it if the hook fails.''' - global _atexit - for callback, args, kwargs in reversed(_atexit): - callback(*args, **kwargs) - del _atexit[:] - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def network_get_primary_address(binding): - ''' - Deprecated since Juju 2.3; use network_get() - - Retrieve the primary network address for a named binding - - :param binding: string. The name of a relation of extra-binding - :return: string. The primary IP address for the named binding - :raise: NotImplementedError if run on Juju < 2.0 - ''' - cmd = ['network-get', '--primary-address', binding] - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - if 'no network config found for binding' in e.output.decode('UTF-8'): - raise NoNetworkBinding("No network binding for {}" - .format(binding)) - else: - raise - return response - - -def network_get(endpoint, relation_id=None): - """ - Retrieve the network details for a relation endpoint - - :param endpoint: string. The name of a relation endpoint - :param relation_id: int. The ID of the relation for the current context. - :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if request not supported by the Juju version. - """ - if not has_juju_version('2.2'): - raise NotImplementedError(juju_version()) # earlier versions require --primary-address - if relation_id and not has_juju_version('2.3'): - raise NotImplementedError # 2.3 added the -r option - - cmd = ['network-get', endpoint, '--format', 'yaml'] - if relation_id: - cmd.append('-r') - cmd.append(relation_id) - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - return yaml.safe_load(response) - - -def add_metric(*args, **kwargs): - """Add metric values. Values may be expressed with keyword arguments. For - metric names containing dashes, these may be expressed as one or more - 'key=value' positional arguments. May only be called from the collect-metrics - hook.""" - _args = ['add-metric'] - _kvpairs = [] - _kvpairs.extend(args) - _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) - _args.extend(sorted(_kvpairs)) - try: - subprocess.check_call(_args) - return - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) - log(log_message, level='INFO') - - -def meter_status(): - """Get the meter status, if running in the meter-status-changed hook.""" - return os.environ.get('JUJU_METER_STATUS') - - -def meter_info(): - """Get the meter status information, if running in the meter-status-changed - hook.""" - return os.environ.get('JUJU_METER_INFO') - - -def iter_units_for_relation_name(relation_name): - """Iterate through all units in a relation - - Generator that iterates through all the units in a relation and yields - a named tuple with rid and unit field names. - - Usage: - data = [(u.rid, u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param relation_name: string relation name - :yield: Named Tuple with rid and unit field names - """ - RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') - for rid in relation_ids(relation_name): - for unit in related_units(rid): - yield RelatedUnit(rid, unit) - - -def ingress_address(rid=None, unit=None): - """ - Retrieve the ingress-address from a relation when available. - Otherwise, return the private-address. - - When used on the consuming side of the relation (unit is a remote - unit), the ingress-address is the IP address that this unit needs - to use to reach the provided service on the remote unit. - - When used on the providing side of the relation (unit == local_unit()), - the ingress-address is the IP address that is advertised to remote - units on this relation. Remote units need to use this address to - reach the local provided service on this unit. - - Note that charms may document some other method to use in - preference to the ingress_address(), such as an address provided - on a different relation attribute or a service discovery mechanism. - This allows charms to redirect inbound connections to their peers - or different applications such as load balancers. - - Usage: - addresses = [ingress_address(rid=u.rid, unit=u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param rid: string relation id - :param unit: string unit name - :side effect: calls relation_get - :return: string IP address - """ - settings = relation_get(rid=rid, unit=unit) - return (settings.get('ingress-address') or - settings.get('private-address')) - - -def egress_subnets(rid=None, unit=None): - """ - Retrieve the egress-subnets from a relation. - - This function is to be used on the providing side of the - relation, and provides the ranges of addresses that client - connections may come from. The result is uninteresting on - the consuming side of a relation (unit == local_unit()). - - Returns a stable list of subnets in CIDR format. - eg. ['192.168.1.0/24', '2001::F00F/128'] - - If egress-subnets is not available, falls back to using the published - ingress-address, or finally private-address. - - :param rid: string relation id - :param unit: string unit name - :side effect: calls relation_get - :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] - """ - def _to_range(addr): - if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: - addr += '/32' - elif ':' in addr and '/' not in addr: # IPv6 - addr += '/128' - return addr - - settings = relation_get(rid=rid, unit=unit) - if 'egress-subnets' in settings: - return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] - if 'ingress-address' in settings: - return [_to_range(settings['ingress-address'])] - if 'private-address' in settings: - return [_to_range(settings['private-address'])] - return [] # Should never happen - - -def unit_doomed(unit=None): - """Determines if the unit is being removed from the model - - Requires Juju 2.4.1. - - :param unit: string unit name, defaults to local_unit - :side effect: calls goal_state - :side effect: calls local_unit - :side effect: calls has_juju_version - :return: True if the unit is being removed, already gone, or never existed - """ - if not has_juju_version("2.4.1"): - # We cannot risk blindly returning False for 'we don't know', - # because that could cause data loss; if call sites don't - # need an accurate answer, they likely don't need this helper - # at all. - # goal-state existed in 2.4.0, but did not handle removals - # correctly until 2.4.1. - raise NotImplementedError("is_doomed") - if unit is None: - unit = local_unit() - gs = goal_state() - units = gs.get('units', {}) - if unit not in units: - return True - # I don't think 'dead' units ever show up in the goal-state, but - # check anyway in addition to 'dying'. - return units[unit]['status'] in ('dying', 'dead') diff --git a/ceph-mon/tests/charmhelpers/core/host.py b/ceph-mon/tests/charmhelpers/core/host.py deleted file mode 100644 index e9fd38a0..00000000 --- a/ceph-mon/tests/charmhelpers/core/host.py +++ /dev/null @@ -1,1042 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tools for working with the host system""" -# Copyright 2012 Canonical Ltd. -# -# Authors: -# Nick Moffitt -# Matthew Wedgwood - -import os -import re -import pwd -import glob -import grp -import random -import string -import subprocess -import hashlib -import functools -import itertools -import six - -from contextlib import contextmanager -from collections import OrderedDict -from .hookenv import log, DEBUG, local_unit -from .fstab import Fstab -from charmhelpers.osplatform import get_platform - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.host_factory.ubuntu import ( - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.host_factory.centos import ( - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - ) # flake8: noqa -- ignore F401 for this import - -UPDATEDB_PATH = '/etc/updatedb.conf' - -def service_start(service_name, **kwargs): - """Start a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('start', service_name, **kwargs) - - -def service_stop(service_name, **kwargs): - """Stop a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('stop', service_name, **kwargs) - - -def service_restart(service_name, **kwargs): - """Restart a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be restarted. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_restart('ceph-osd', id=4) - - :param service_name: the name of the service to restart - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - return service('restart', service_name) - - -def service_reload(service_name, restart_on_failure=False, **kwargs): - """Reload a system service, optionally falling back to restart if - reload fails. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_reload('ceph-osd', id=4) - - :param service_name: the name of the service to reload - :param restart_on_failure: boolean indicating whether to fallback to a - restart if the reload fails. - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - service_result = service('reload', service_name, **kwargs) - if not service_result and restart_on_failure: - service_result = service('restart', service_name, **kwargs) - return service_result - - -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", - **kwargs): - """Pause a system service. - - Stop it, and prevent it from starting again at boot. - - :param service_name: the name of the service to pause - :param init_dir: path to the upstart init directory - :param initd_dir: path to the sysv init directory - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems which do not support - key=value arguments via the commandline. - """ - stopped = True - if service_running(service_name, **kwargs): - stopped = service_stop(service_name, **kwargs) - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): - service('disable', service_name) - service('mask', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "disable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - return stopped - - -def service_resume(service_name, init_dir="/etc/init", - initd_dir="/etc/init.d", **kwargs): - """Resume a system service. - - Reenable starting again at boot. Start the service. - - :param service_name: the name of the service to resume - :param init_dir: the path to the init dir - :param initd dir: the path to the initd dir - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): - service('unmask', service_name) - service('enable', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "enable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - started = service_running(service_name, **kwargs) - - if not started: - started = service_start(service_name, **kwargs) - return started - - -def service(action, service_name, **kwargs): - """Control a system service. - - :param action: the action to take on the service - :param service_name: the name of the service to perform th action on - :param **kwargs: additional params to be passed to the service command in - the form of key=value. - """ - if init_is_systemd(): - cmd = ['systemctl', action, service_name] - else: - cmd = ['service', service_name, action] - for key, value in six.iteritems(kwargs): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - return subprocess.call(cmd) == 0 - - -_UPSTART_CONF = "/etc/init/{}.conf" -_INIT_D_CONF = "/etc/init.d/{}" - - -def service_running(service_name, **kwargs): - """Determine whether a system service is running. - - :param service_name: the name of the service - :param **kwargs: additional args to pass to the service command. This is - used to pass additional key=value arguments to the - service command line for managing specific instance - units (e.g. service ceph-osd status id=2). The kwargs - are ignored in systemd services. - """ - if init_is_systemd(): - return service('is-active', service_name) - else: - if os.path.exists(_UPSTART_CONF.format(service_name)): - try: - cmd = ['status', service_name] - for key, value in six.iteritems(kwargs): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False - else: - # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running - # 'start/running' - if ("start/running" in output or - "is running" in output or - "up and running" in output): - return True - elif os.path.exists(_INIT_D_CONF.format(service_name)): - # Check System V scripts init script return codes - return service('status', service_name) - return False - - -SYSTEMD_SYSTEM = '/run/systemd/system' - - -def init_is_systemd(): - """Return True if the host system uses systemd, False otherwise.""" - if lsb_release()['DISTRIB_CODENAME'] == 'trusty': - return False - return os.path.isdir(SYSTEMD_SYSTEM) - - -def adduser(username, password=None, shell='/bin/bash', - system_user=False, primary_group=None, - secondary_groups=None, uid=None, home_dir=None): - """Add a user to the system. - - Will log but otherwise succeed if the user already exists. - - :param str username: Username to create - :param str password: Password for user; if ``None``, create a system user - :param str shell: The default shell for the user - :param bool system_user: Whether to create a login or system user - :param str primary_group: Primary group for user; defaults to username - :param list secondary_groups: Optional list of additional groups - :param int uid: UID for user being created - :param str home_dir: Home directory for user - - :returns: The password database entry struct, as returned by `pwd.getpwnam` - """ - try: - user_info = pwd.getpwnam(username) - log('user {0} already exists!'.format(username)) - if uid: - user_info = pwd.getpwuid(int(uid)) - log('user with uid {0} already exists!'.format(uid)) - except KeyError: - log('creating user {0}'.format(username)) - cmd = ['useradd'] - if uid: - cmd.extend(['--uid', str(uid)]) - if home_dir: - cmd.extend(['--home', str(home_dir)]) - if system_user or password is None: - cmd.append('--system') - else: - cmd.extend([ - '--create-home', - '--shell', shell, - '--password', password, - ]) - if not primary_group: - try: - grp.getgrnam(username) - primary_group = username # avoid "group exists" error - except KeyError: - pass - if primary_group: - cmd.extend(['-g', primary_group]) - if secondary_groups: - cmd.extend(['-G', ','.join(secondary_groups)]) - cmd.append(username) - subprocess.check_call(cmd) - user_info = pwd.getpwnam(username) - return user_info - - -def user_exists(username): - """Check if a user exists""" - try: - pwd.getpwnam(username) - user_exists = True - except KeyError: - user_exists = False - return user_exists - - -def uid_exists(uid): - """Check if a uid exists""" - try: - pwd.getpwuid(uid) - uid_exists = True - except KeyError: - uid_exists = False - return uid_exists - - -def group_exists(groupname): - """Check if a group exists""" - try: - grp.getgrnam(groupname) - group_exists = True - except KeyError: - group_exists = False - return group_exists - - -def gid_exists(gid): - """Check if a gid exists""" - try: - grp.getgrgid(gid) - gid_exists = True - except KeyError: - gid_exists = False - return gid_exists - - -def add_group(group_name, system_group=False, gid=None): - """Add a group to the system - - Will log but otherwise succeed if the group already exists. - - :param str group_name: group to create - :param bool system_group: Create system group - :param int gid: GID for user being created - - :returns: The password database entry struct, as returned by `grp.getgrnam` - """ - try: - group_info = grp.getgrnam(group_name) - log('group {0} already exists!'.format(group_name)) - if gid: - group_info = grp.getgrgid(gid) - log('group with gid {0} already exists!'.format(gid)) - except KeyError: - log('creating group {0}'.format(group_name)) - add_new_group(group_name, system_group, gid) - group_info = grp.getgrnam(group_name) - return group_info - - -def add_user_to_group(username, group): - """Add a user to a group""" - cmd = ['gpasswd', '-a', username, group] - log("Adding user {} to group {}".format(username, group)) - subprocess.check_call(cmd) - - -def chage(username, lastday=None, expiredate=None, inactive=None, - mindays=None, maxdays=None, root=None, warndays=None): - """Change user password expiry information - - :param str username: User to update - :param str lastday: Set when password was changed in YYYY-MM-DD format - :param str expiredate: Set when user's account will no longer be - accessible in YYYY-MM-DD format. - -1 will remove an account expiration date. - :param str inactive: Set the number of days of inactivity after a password - has expired before the account is locked. - -1 will remove an account's inactivity. - :param str mindays: Set the minimum number of days between password - changes to MIN_DAYS. - 0 indicates the password can be changed anytime. - :param str maxdays: Set the maximum number of days during which a - password is valid. - -1 as MAX_DAYS will remove checking maxdays - :param str root: Apply changes in the CHROOT_DIR directory - :param str warndays: Set the number of days of warning before a password - change is required - :raises subprocess.CalledProcessError: if call to chage fails - """ - cmd = ['chage'] - if root: - cmd.extend(['--root', root]) - if lastday: - cmd.extend(['--lastday', lastday]) - if expiredate: - cmd.extend(['--expiredate', expiredate]) - if inactive: - cmd.extend(['--inactive', inactive]) - if mindays: - cmd.extend(['--mindays', mindays]) - if maxdays: - cmd.extend(['--maxdays', maxdays]) - if warndays: - cmd.extend(['--warndays', warndays]) - cmd.append(username) - subprocess.check_call(cmd) - -remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') - -def rsync(from_path, to_path, flags='-r', options=None, timeout=None): - """Replicate the contents of a path""" - options = options or ['--delete', '--executability'] - cmd = ['/usr/bin/rsync', flags] - if timeout: - cmd = ['timeout', str(timeout)] + cmd - cmd.extend(options) - cmd.append(from_path) - cmd.append(to_path) - log(" ".join(cmd)) - return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() - - -def symlink(source, destination): - """Create a symbolic link""" - log("Symlinking {} as {}".format(source, destination)) - cmd = [ - 'ln', - '-sf', - source, - destination, - ] - subprocess.check_call(cmd) - - -def mkdir(path, owner='root', group='root', perms=0o555, force=False): - """Create a directory""" - log("Making dir {} {}:{} {:o}".format(path, owner, group, - perms)) - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - realpath = os.path.abspath(path) - path_exists = os.path.exists(realpath) - if path_exists and force: - if not os.path.isdir(realpath): - log("Removing non-directory file {} prior to mkdir()".format(path)) - os.unlink(realpath) - os.makedirs(realpath, perms) - elif not path_exists: - os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) - os.chmod(realpath, perms) - - -def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a byte string.""" - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - # lets see if we can grab the file and compare the context, to avoid doing - # a write. - existing_content = None - existing_uid, existing_gid = None, None - try: - with open(path, 'rb') as target: - existing_content = target.read() - stat = os.stat(path) - existing_uid, existing_gid = stat.st_uid, stat.st_gid - except: - pass - if content != existing_content: - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), - level=DEBUG) - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - if six.PY3 and isinstance(content, six.string_types): - content = content.encode('UTF-8') - target.write(content) - return - # the contents were the same, but we might still need to change the - # ownership. - if existing_uid != uid: - log("Changing uid on already existing content: {} -> {}" - .format(existing_uid, uid), level=DEBUG) - os.chown(path, uid, -1) - if existing_gid != gid: - log("Changing gid on already existing content: {} -> {}" - .format(existing_gid, gid), level=DEBUG) - os.chown(path, -1, gid) - - -def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab""" - return Fstab.remove_by_mountpoint(mp) - - -def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file""" - return Fstab.add(dev, mp, fs, options=options) - - -def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): - """Mount a filesystem at a particular mountpoint""" - cmd_args = ['mount'] - if options is not None: - cmd_args.extend(['-o', options]) - cmd_args.extend([device, mountpoint]) - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) - return False - - if persist: - return fstab_add(device, mountpoint, filesystem, options=options) - return True - - -def umount(mountpoint, persist=False): - """Unmount a filesystem""" - cmd_args = ['umount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - - if persist: - return fstab_remove(mountpoint) - return True - - -def mounts(): - """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" - with open('/proc/mounts') as f: - # [['/mount/point','/dev/path'],[...]] - system_mounts = [m[1::-1] for m in [l.strip().split() - for l in f.readlines()]] - return system_mounts - - -def fstab_mount(mountpoint): - """Mount filesystem using fstab""" - cmd_args = ['mount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - return True - - -def file_hash(path, hash_type='md5'): - """Generate a hash checksum of the contents of 'path' or None if not found. - - :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - """ - if os.path.exists(path): - h = getattr(hashlib, hash_type)() - with open(path, 'rb') as source: - h.update(source.read()) - return h.hexdigest() - else: - return None - - -def path_hash(path): - """Generate a hash checksum of all files matching 'path'. Standard - wildcards like '*' and '?' are supported, see documentation for the 'glob' - module for more information. - - :return: dict: A { filename: hash } dictionary for all matched files. - Empty if none found. - """ - return { - filename: file_hash(filename) - for filename in glob.iglob(path) - } - - -def check_hash(path, checksum, hash_type='md5'): - """Validate a file using a cryptographic checksum. - - :param str checksum: Value of the checksum used to validate the file. - :param str hash_type: Hash algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - :raises ChecksumError: If the file fails the checksum - - """ - actual_checksum = file_hash(path, hash_type) - if checksum != actual_checksum: - raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) - - -class ChecksumError(ValueError): - """A class derived from Value error to indicate the checksum failed.""" - pass - - -def restart_on_change(restart_map, stopstart=False, restart_functions=None): - """Restart services based on configuration files changing - - This function is used a decorator, for example:: - - @restart_on_change({ - '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] - '/etc/apache/sites-enabled/*': [ 'apache2' ] - }) - def config_changed(): - pass # your code here - - In this example, the cinder-api and cinder-volume services - would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. The apache2 service would be - restarted if any file matching the pattern got changed, created - or removed. Standard wildcards are supported, see documentation - for the 'glob' module for more information. - - @param restart_map: {path_file_name: [service_name, ...] - @param stopstart: DEFAULT false; whether to stop, start OR restart - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result from decorated function - """ - def wrap(f): - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) - return wrapped_f - return wrap - - -def restart_on_change_helper(lambda_f, restart_map, stopstart=False, - restart_functions=None): - """Helper function to perform the restart_on_change function. - - This is provided for decorators to restart services if files described - in the restart_map have changed after an invocation of lambda_f(). - - @param lambda_f: function to call. - @param restart_map: {file: [service, ...]} - @param stopstart: whether to stop, start or restart a service - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result of lambda_f() - """ - if restart_functions is None: - restart_functions = {} - checksums = {path: path_hash(path) for path in restart_map} - r = lambda_f() - # create a list of lists of the services to restart - restarts = [restart_map[path] - for path in restart_map - if path_hash(path) != checksums[path]] - # create a flat list of ordered services without duplicates from lists - services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) - if services_list: - actions = ('stop', 'start') if stopstart else ('restart',) - for service_name in services_list: - if service_name in restart_functions: - restart_functions[service_name](service_name) - else: - for action in actions: - service(action, service_name) - return r - - -def pwgen(length=None): - """Generate a random pasword.""" - if length is None: - # A random length is ok to use a weak PRNG - length = random.choice(range(35, 45)) - alphanumeric_chars = [ - l for l in (string.ascii_letters + string.digits) - if l not in 'l0QD1vAEIOUaeiou'] - # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the - # actual password - random_generator = random.SystemRandom() - random_chars = [ - random_generator.choice(alphanumeric_chars) for _ in range(length)] - return(''.join(random_chars)) - - -def is_phy_iface(interface): - """Returns True if interface is not virtual, otherwise False.""" - if interface: - sys_net = '/sys/class/net' - if os.path.isdir(sys_net): - for iface in glob.glob(os.path.join(sys_net, '*')): - if '/virtual/' in os.path.realpath(iface): - continue - - if interface == os.path.basename(iface): - return True - - return False - - -def get_bond_master(interface): - """Returns bond master if interface is bond slave otherwise None. - - NOTE: the provided interface is expected to be physical - """ - if interface: - iface_path = '/sys/class/net/%s' % (interface) - if os.path.exists(iface_path): - if '/virtual/' in os.path.realpath(iface_path): - return None - - master = os.path.join(iface_path, 'master') - if os.path.exists(master): - master = os.path.realpath(master) - # make sure it is a bond master - if os.path.exists(os.path.join(master, 'bonding')): - return os.path.basename(master) - - return None - - -def list_nics(nic_type=None): - """Return a list of nics of given type(s)""" - if isinstance(nic_type, six.string_types): - int_types = [nic_type] - else: - int_types = nic_type - - interfaces = [] - if nic_type: - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).decode('UTF-8') - ip_output = ip_output.split('\n') - ip_output = (line for line in ip_output if line) - for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + - r'[0-9]+\.[0-9]+)@.*', line) - if matched: - iface = matched.groups()[0] - else: - iface = line.split()[1].replace(":", "") - - if iface not in interfaces: - interfaces.append(iface) - else: - cmd = ['ip', 'a'] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line.strip() for line in ip_output if line) - - key = re.compile('^[0-9]+:\s+(.+):') - for line in ip_output: - matched = re.search(key, line) - if matched: - iface = matched.group(1) - iface = iface.partition("@")[0] - if iface not in interfaces: - interfaces.append(iface) - - return interfaces - - -def set_nic_mtu(nic, mtu): - """Set the Maximum Transmission Unit (MTU) on a network interface.""" - cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] - subprocess.check_call(cmd) - - -def get_nic_mtu(nic): - """Return the Maximum Transmission Unit (MTU) for a network interface.""" - cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - mtu = "" - for line in ip_output: - words = line.split() - if 'mtu' in words: - mtu = words[words.index("mtu") + 1] - return mtu - - -def get_nic_hwaddr(nic): - """Return the Media Access Control (MAC) for a network interface.""" - cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8') - hwaddr = "" - words = ip_output.split() - if 'link/ether' in words: - hwaddr = words[words.index('link/ether') + 1] - return hwaddr - - -@contextmanager -def chdir(directory): - """Change the current working directory to a different directory for a code - block and return the previous directory after the block exits. Useful to - run commands from a specificed directory. - - :param str directory: The directory path to change to for this context. - """ - cur = os.getcwd() - try: - yield os.chdir(directory) - finally: - os.chdir(cur) - - -def chownr(path, owner, group, follow_links=True, chowntopdir=False): - """Recursively change user and group ownership of files and directories - in given path. Doesn't chown path itself by default, only its children. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - :param bool follow_links: Also follow and chown links if True - :param bool chowntopdir: Also chown path itself if True - """ - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - if follow_links: - chown = os.chown - else: - chown = os.lchown - - if chowntopdir: - broken_symlink = os.path.lexists(path) and not os.path.exists(path) - if not broken_symlink: - chown(path, uid, gid) - for root, dirs, files in os.walk(path, followlinks=follow_links): - for name in dirs + files: - full = os.path.join(root, name) - broken_symlink = os.path.lexists(full) and not os.path.exists(full) - if not broken_symlink: - chown(full, uid, gid) - - -def lchownr(path, owner, group): - """Recursively change user and group ownership of files and directories - in a given path, not following symbolic links. See the documentation for - 'os.lchown' for more information. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - """ - chownr(path, owner, group, follow_links=False) - - -def owner(path): - """Returns a tuple containing the username & groupname owning the path. - - :param str path: the string path to retrieve the ownership - :return tuple(str, str): A (username, groupname) tuple containing the - name of the user and group owning the path. - :raises OSError: if the specified path does not exist - """ - stat = os.stat(path) - username = pwd.getpwuid(stat.st_uid)[0] - groupname = grp.getgrgid(stat.st_gid)[0] - return username, groupname - - -def get_total_ram(): - """The total amount of system RAM in bytes. - - This is what is reported by the OS, and may be overcommitted when - there are multiple containers hosted on the same machine. - """ - with open('/proc/meminfo', 'r') as f: - for line in f.readlines(): - if line: - key, value, unit = line.split() - if key == 'MemTotal:': - assert unit == 'kB', 'Unknown unit' - return int(value) * 1024 # Classic, not KiB. - raise NotImplementedError() - - -UPSTART_CONTAINER_TYPE = '/run/container_type' - - -def is_container(): - """Determine whether unit is running in a container - - @return: boolean indicating if unit is in a container - """ - if init_is_systemd(): - # Detect using systemd-detect-virt - return subprocess.call(['systemd-detect-virt', - '--container']) == 0 - else: - # Detect using upstart container file marker - return os.path.exists(UPSTART_CONTAINER_TYPE) - - -def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): - """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. - - This method has no effect if the path specified by updatedb_path does not - exist or is not a file. - - @param path: string the path to add to the updatedb.conf PRUNEPATHS value - @param updatedb_path: the path the updatedb.conf file - """ - if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): - # If the updatedb.conf file doesn't exist then don't attempt to update - # the file as the package providing mlocate may not be installed on - # the local system - return - - with open(updatedb_path, 'r+') as f_id: - updatedb_text = f_id.read() - output = updatedb(updatedb_text, path) - f_id.seek(0) - f_id.write(output) - f_id.truncate() - - -def updatedb(updatedb_text, new_path): - lines = [line for line in updatedb_text.split("\n")] - for i, line in enumerate(lines): - if line.startswith("PRUNEPATHS="): - paths_line = line.split("=")[1].replace('"', '') - paths = paths_line.split(" ") - if new_path not in paths: - paths.append(new_path) - lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) - output = "\n".join(lines) - return output - - -def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): - """ Modulo distribution - - This helper uses the unit number, a modulo value and a constant wait time - to produce a calculated wait time distribution. This is useful in large - scale deployments to distribute load during an expensive operation such as - service restarts. - - If you have 1000 nodes that need to restart 100 at a time 1 minute at a - time: - - time.wait(modulo_distribution(modulo=100, wait=60)) - restart() - - If you need restarts to happen serially set modulo to the exact number of - nodes and set a high constant wait time: - - time.wait(modulo_distribution(modulo=10, wait=120)) - restart() - - @param modulo: int The modulo number creates the group distribution - @param wait: int The constant time wait value - @param non_zero_wait: boolean Override unit % modulo == 0, - return modulo * wait. Used to avoid collisions with - leader nodes which are often given priority. - @return: int Calculated time to wait for unit operation - """ - unit_number = int(local_unit().split('/')[1]) - calculated_wait_time = (unit_number % modulo) * wait - if non_zero_wait and calculated_wait_time == 0: - return modulo * wait - else: - return calculated_wait_time diff --git a/ceph-mon/tests/charmhelpers/core/host_factory/__init__.py b/ceph-mon/tests/charmhelpers/core/host_factory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-mon/tests/charmhelpers/core/host_factory/centos.py b/ceph-mon/tests/charmhelpers/core/host_factory/centos.py deleted file mode 100644 index 7781a396..00000000 --- a/ceph-mon/tests/charmhelpers/core/host_factory/centos.py +++ /dev/null @@ -1,72 +0,0 @@ -import subprocess -import yum -import os - -from charmhelpers.core.strutils import BasicStringComparator - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Host releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - - def __init__(self, item): - raise NotImplementedError( - "CompareHostReleases() is not implemented for CentOS") - - -def service_available(service_name): - # """Determine whether a system service is available.""" - if os.path.isdir('/run/systemd/system'): - cmd = ['systemctl', 'is-enabled', service_name] - else: - cmd = ['service', service_name, 'is-enabled'] - return subprocess.call(cmd) == 0 - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['groupadd'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('-r') - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/os-release in a dict.""" - d = {} - with open('/etc/os-release', 'r') as lsb: - for l in lsb: - s = l.split('=') - if len(s) != 2: - continue - d[s[0].strip()] = s[1].strip() - return d - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports YumBase function if the pkgcache argument - is None. - """ - if not pkgcache: - y = yum.YumBase() - packages = y.doPackageLists() - pkgcache = {i.Name: i.version for i in packages['installed']} - pkg = pkgcache[package] - if pkg > revno: - return 1 - if pkg < revno: - return -1 - return 0 diff --git a/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py deleted file mode 100644 index a6d375af..00000000 --- a/ceph-mon/tests/charmhelpers/core/host_factory/ubuntu.py +++ /dev/null @@ -1,91 +0,0 @@ -import subprocess - -from charmhelpers.core.strutils import BasicStringComparator - - -UBUNTU_RELEASES = ( - 'lucid', - 'maverick', - 'natty', - 'oneiric', - 'precise', - 'quantal', - 'raring', - 'saucy', - 'trusty', - 'utopic', - 'vivid', - 'wily', - 'xenial', - 'yakkety', - 'zesty', - 'artful', - 'bionic', - 'cosmic', -) - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Ubuntu releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - _list = UBUNTU_RELEASES - - -def service_available(service_name): - """Determine whether a system service is available""" - try: - subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError as e: - return b'unrecognized service' not in e.output - else: - return True - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['addgroup'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('--system') - else: - cmd.extend([ - '--group', - ]) - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/lsb-release in a dict""" - d = {} - with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports apt_cache function from charmhelpers.fetch if - the pkgcache argument is None. Be sure to add charmhelpers.fetch if - you call this function, or pass an apt_pkg.Cache() instance. - """ - import apt_pkg - if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-mon/tests/charmhelpers/core/hugepage.py b/ceph-mon/tests/charmhelpers/core/hugepage.py deleted file mode 100644 index 54b5b5e2..00000000 --- a/ceph-mon/tests/charmhelpers/core/hugepage.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml -from charmhelpers.core import fstab -from charmhelpers.core import sysctl -from charmhelpers.core.host import ( - add_group, - add_user_to_group, - fstab_mount, - mkdir, -) -from charmhelpers.core.strutils import bytes_from_string -from subprocess import check_output - - -def hugepage_support(user, group='hugetlb', nr_hugepages=256, - max_map_count=65536, mnt_point='/run/hugepages/kvm', - pagesize='2MB', mount=True, set_shmmax=False): - """Enable hugepages on system. - - Args: - user (str) -- Username to allow access to hugepages to - group (str) -- Group name to own hugepages - nr_hugepages (int) -- Number of pages to reserve - max_map_count (int) -- Number of Virtual Memory Areas a process can own - mnt_point (str) -- Directory to mount hugepages on - pagesize (str) -- Size of hugepages - mount (bool) -- Whether to Mount hugepages - """ - group_info = add_group(group) - gid = group_info.gr_gid - add_user_to_group(user, group) - if max_map_count < 2 * nr_hugepages: - max_map_count = 2 * nr_hugepages - sysctl_settings = { - 'vm.nr_hugepages': nr_hugepages, - 'vm.max_map_count': max_map_count, - 'vm.hugetlb_shm_group': gid, - } - if set_shmmax: - shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) - shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages - if shmmax_minsize > shmmax_current: - sysctl_settings['kernel.shmmax'] = shmmax_minsize - sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') - mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) - lfstab = fstab.Fstab() - fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) - if fstab_entry: - lfstab.remove_entry(fstab_entry) - entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', - 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) - lfstab.add_entry(entry) - if mount: - fstab_mount(mnt_point) diff --git a/ceph-mon/tests/charmhelpers/core/kernel.py b/ceph-mon/tests/charmhelpers/core/kernel.py deleted file mode 100644 index 2d404528..00000000 --- a/ceph-mon/tests/charmhelpers/core/kernel.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import subprocess - -from charmhelpers.osplatform import get_platform -from charmhelpers.core.hookenv import ( - log, - INFO -) - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.kernel_factory.ubuntu import ( - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.kernel_factory.centos import ( - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import - -__author__ = "Jorge Niedbalski " - - -def modprobe(module, persist=True): - """Load a kernel module and configure for auto-load on reboot.""" - cmd = ['modprobe', module] - - log('Loading kernel module %s' % module, level=INFO) - - subprocess.check_call(cmd) - if persist: - persistent_modprobe(module) - - -def rmmod(module, force=False): - """Remove a module from the linux kernel""" - cmd = ['rmmod'] - if force: - cmd.append('-f') - cmd.append(module) - log('Removing kernel module %s' % module, level=INFO) - return subprocess.check_call(cmd) - - -def lsmod(): - """Shows what kernel modules are currently loaded""" - return subprocess.check_output(['lsmod'], - universal_newlines=True) - - -def is_module_loaded(module): - """Checks if a kernel module is already loaded""" - matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) - return len(matches) > 0 diff --git a/ceph-mon/tests/charmhelpers/core/kernel_factory/__init__.py b/ceph-mon/tests/charmhelpers/core/kernel_factory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-mon/tests/charmhelpers/core/kernel_factory/centos.py b/ceph-mon/tests/charmhelpers/core/kernel_factory/centos.py deleted file mode 100644 index 1c402c11..00000000 --- a/ceph-mon/tests/charmhelpers/core/kernel_factory/centos.py +++ /dev/null @@ -1,17 +0,0 @@ -import subprocess -import os - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - if not os.path.exists('/etc/rc.modules'): - open('/etc/rc.modules', 'a') - os.chmod('/etc/rc.modules', 111) - with open('/etc/rc.modules', 'r+') as modules: - if module not in modules.read(): - modules.write('modprobe %s\n' % module) - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["dracut", "-f", version]) diff --git a/ceph-mon/tests/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-mon/tests/charmhelpers/core/kernel_factory/ubuntu.py deleted file mode 100644 index 3de372fd..00000000 --- a/ceph-mon/tests/charmhelpers/core/kernel_factory/ubuntu.py +++ /dev/null @@ -1,13 +0,0 @@ -import subprocess - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module + "\n") - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-mon/tests/charmhelpers/core/services/__init__.py b/ceph-mon/tests/charmhelpers/core/services/__init__.py deleted file mode 100644 index 61fd074e..00000000 --- a/ceph-mon/tests/charmhelpers/core/services/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .base import * # NOQA -from .helpers import * # NOQA diff --git a/ceph-mon/tests/charmhelpers/core/services/base.py b/ceph-mon/tests/charmhelpers/core/services/base.py deleted file mode 100644 index 179ad4f0..00000000 --- a/ceph-mon/tests/charmhelpers/core/services/base.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import json -from inspect import getargspec -from collections import Iterable, OrderedDict - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -__all__ = ['ServiceManager', 'ManagerCallback', - 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', - 'service_restart', 'service_stop'] - - -class ServiceManager(object): - def __init__(self, services=None): - """ - Register a list of services, given their definitions. - - Service definitions are dicts in the following formats (all keys except - 'service' are optional):: - - { - "service": , - "required_data": , - "provided_data": , - "data_ready": , - "data_lost": , - "start": , - "stop": , - "ports": , - } - - The 'required_data' list should contain dicts of required data (or - dependency managers that act like dicts and know how to collect the data). - Only when all items in the 'required_data' list are populated are the list - of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more - information. - - The 'provided_data' list should contain relation data providers, most likely - a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, - that will indicate a set of data to set on a given relation. - - The 'data_ready' value should be either a single callback, or a list of - callbacks, to be called when all items in 'required_data' pass `is_ready()`. - Each callback will be called with the service name as the only parameter. - After all of the 'data_ready' callbacks are called, the 'start' callbacks - are fired. - - The 'data_lost' value should be either a single callback, or a list of - callbacks, to be called when a 'required_data' item no longer passes - `is_ready()`. Each callback will be called with the service name as the - only parameter. After all of the 'data_lost' callbacks are called, - the 'stop' callbacks are fired. - - The 'start' value should be either a single callback, or a list of - callbacks, to be called when starting the service, after the 'data_ready' - callbacks are complete. Each callback will be called with the service - name as the only parameter. This defaults to - `[host.service_start, services.open_ports]`. - - The 'stop' value should be either a single callback, or a list of - callbacks, to be called when stopping the service. If the service is - being stopped because it no longer has all of its 'required_data', this - will be called after all of the 'data_lost' callbacks are complete. - Each callback will be called with the service name as the only parameter. - This defaults to `[services.close_ports, host.service_stop]`. - - The 'ports' value should be a list of ports to manage. The default - 'start' handler will open the ports after the service is started, - and the default 'stop' handler will close the ports prior to stopping - the service. - - - Examples: - - The following registers an Upstart service called bingod that depends on - a mongodb relation and which runs a custom `db_migrate` function prior to - restarting the service, and a Runit service called spadesd:: - - manager = services.ServiceManager([ - { - 'service': 'bingod', - 'ports': [80, 443], - 'required_data': [MongoRelation(), config(), {'my': 'data'}], - 'data_ready': [ - services.template(source='bingod.conf'), - services.template(source='bingod.ini', - target='/etc/bingod.ini', - owner='bingo', perms=0400), - ], - }, - { - 'service': 'spadesd', - 'data_ready': services.template(source='spadesd_run.j2', - target='/etc/sv/spadesd/run', - perms=0555), - 'start': runit_start, - 'stop': runit_stop, - }, - ]) - manager.manage() - """ - self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') - self._ready = None - self.services = OrderedDict() - for service in services or []: - service_name = service['service'] - self.services[service_name] = service - - def manage(self): - """ - Handle the current hook by doing The Right Thing with the registered services. - """ - hookenv._run_atstart() - try: - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.reconfigure_services() - self.provide_data() - except SystemExit as x: - if x.code is None or x.code == 0: - hookenv._run_atexit() - hookenv._run_atexit() - - def provide_data(self): - """ - Set the relation data for each provider in the ``provided_data`` list. - - A provider must have a `name` attribute, which indicates which relation - to set data on, and a `provide_data()` method, which returns a dict of - data to set. - - The `provide_data()` method can optionally accept two parameters: - - * ``remote_service`` The name of the remote service that the data will - be provided to. The `provide_data()` method will be called once - for each connected service (not unit). This allows the method to - tailor its data to the given service. - * ``service_ready`` Whether or not the service definition had all of - its requirements met, and thus the ``data_ready`` callbacks run. - - Note that the ``provided_data`` methods are now called **after** the - ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks - a chance to generate any data necessary for the providing to the remote - services. - """ - for service_name, service in self.services.items(): - service_ready = self.is_ready(service_name) - for provider in service.get('provided_data', []): - for relid in hookenv.relation_ids(provider.name): - units = hookenv.related_units(relid) - if not units: - continue - remote_service = units[0].split('/')[0] - argspec = getargspec(provider.provide_data) - if len(argspec.args) > 1: - data = provider.provide_data(remote_service, service_ready) - else: - data = provider.provide_data() - if data: - hookenv.relation_set(relid, data) - - def reconfigure_services(self, *service_names): - """ - Update all files for one or more registered services, and, - if ready, optionally restart them. - - If no service names are given, reconfigures all registered services. - """ - for service_name in service_names or self.services.keys(): - if self.is_ready(service_name): - self.fire_event('data_ready', service_name) - self.fire_event('start', service_name, default=[ - service_restart, - manage_ports]) - self.save_ready(service_name) - else: - if self.was_ready(service_name): - self.fire_event('data_lost', service_name) - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - self.save_lost(service_name) - - def stop_services(self, *service_names): - """ - Stop one or more registered services, by name. - - If no service names are given, stops all registered services. - """ - for service_name in service_names or self.services.keys(): - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - - def get_service(self, service_name): - """ - Given the name of a registered service, return its service definition. - """ - service = self.services.get(service_name) - if not service: - raise KeyError('Service not registered: %s' % service_name) - return service - - def fire_event(self, event_name, service_name, default=None): - """ - Fire a data_ready, data_lost, start, or stop event on a given service. - """ - service = self.get_service(service_name) - callbacks = service.get(event_name, default) - if not callbacks: - return - if not isinstance(callbacks, Iterable): - callbacks = [callbacks] - for callback in callbacks: - if isinstance(callback, ManagerCallback): - callback(self, service_name, event_name) - else: - callback(service_name) - - def is_ready(self, service_name): - """ - Determine if a registered service is ready, by checking its 'required_data'. - - A 'required_data' item can be any mapping type, and is considered ready - if `bool(item)` evaluates as True. - """ - service = self.get_service(service_name) - reqs = service.get('required_data', []) - return all(bool(req) for req in reqs) - - def _load_ready_file(self): - if self._ready is not None: - return - if os.path.exists(self._ready_file): - with open(self._ready_file) as fp: - self._ready = set(json.load(fp)) - else: - self._ready = set() - - def _save_ready_file(self): - if self._ready is None: - return - with open(self._ready_file, 'w') as fp: - json.dump(list(self._ready), fp) - - def save_ready(self, service_name): - """ - Save an indicator that the given service is now data_ready. - """ - self._load_ready_file() - self._ready.add(service_name) - self._save_ready_file() - - def save_lost(self, service_name): - """ - Save an indicator that the given service is no longer data_ready. - """ - self._load_ready_file() - self._ready.discard(service_name) - self._save_ready_file() - - def was_ready(self, service_name): - """ - Determine if the given service was previously data_ready. - """ - self._load_ready_file() - return service_name in self._ready - - -class ManagerCallback(object): - """ - Special case of a callback that takes the `ServiceManager` instance - in addition to the service name. - - Subclasses should implement `__call__` which should accept three parameters: - - * `manager` The `ServiceManager` instance - * `service_name` The name of the service it's being triggered for - * `event_name` The name of the event that this callback is handling - """ - def __call__(self, manager, service_name, event_name): - raise NotImplementedError() - - -class PortManagerCallback(ManagerCallback): - """ - Callback class that will open or close ports, for use as either - a start or stop action. - """ - def __call__(self, manager, service_name, event_name): - service = manager.get_service(service_name) - # turn this generator into a list, - # as we'll be going over it multiple times - new_ports = list(service.get('ports', [])) - port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) - if os.path.exists(port_file): - with open(port_file) as fp: - old_ports = fp.read().split(',') - for old_port in old_ports: - if bool(old_port) and not self.ports_contains(old_port, new_ports): - hookenv.close_port(old_port) - with open(port_file, 'w') as fp: - fp.write(','.join(str(port) for port in new_ports)) - for port in new_ports: - # A port is either a number or 'ICMP' - protocol = 'TCP' - if str(port).upper() == 'ICMP': - protocol = 'ICMP' - if event_name == 'start': - hookenv.open_port(port, protocol) - elif event_name == 'stop': - hookenv.close_port(port, protocol) - - def ports_contains(self, port, ports): - if not bool(port): - return False - if str(port).upper() != 'ICMP': - port = int(port) - return port in ports - - -def service_stop(service_name): - """ - Wrapper around host.service_stop to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_running(service_name): - host.service_stop(service_name) - - -def service_restart(service_name): - """ - Wrapper around host.service_restart to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_available(service_name): - if host.service_running(service_name): - host.service_restart(service_name) - else: - host.service_start(service_name) - - -# Convenience aliases -open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-mon/tests/charmhelpers/core/services/helpers.py b/ceph-mon/tests/charmhelpers/core/services/helpers.py deleted file mode 100644 index 3e6e30d2..00000000 --- a/ceph-mon/tests/charmhelpers/core/services/helpers.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import yaml - -from charmhelpers.core import hookenv -from charmhelpers.core import host -from charmhelpers.core import templating - -from charmhelpers.core.services.base import ManagerCallback - - -__all__ = ['RelationContext', 'TemplateCallback', - 'render_template', 'template'] - - -class RelationContext(dict): - """ - Base class for a context generator that gets relation data from juju. - - Subclasses must provide the attributes `name`, which is the name of the - interface of interest, `interface`, which is the type of the interface of - interest, and `required_keys`, which is the set of keys required for the - relation to be considered complete. The data for all interfaces matching - the `name` attribute that are complete will used to populate the dictionary - values (see `get_data`, below). - - The generated context will be namespaced under the relation :attr:`name`, - to prevent potential naming conflicts. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = None - interface = None - - def __init__(self, name=None, additional_required_keys=None): - if not hasattr(self, 'required_keys'): - self.required_keys = [] - - if name is not None: - self.name = name - if additional_required_keys: - self.required_keys.extend(additional_required_keys) - self.get_data() - - def __bool__(self): - """ - Returns True if all of the required_keys are available. - """ - return self.is_ready() - - __nonzero__ = __bool__ - - def __repr__(self): - return super(RelationContext, self).__repr__() - - def is_ready(self): - """ - Returns True if all of the `required_keys` are available from any units. - """ - ready = len(self.get(self.name, [])) > 0 - if not ready: - hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) - return ready - - def _is_ready(self, unit_data): - """ - Helper method that tests a set of relation data and returns True if - all of the `required_keys` are present. - """ - return set(unit_data.keys()).issuperset(set(self.required_keys)) - - def get_data(self): - """ - Retrieve the relation data for each unit involved in a relation and, - if complete, store it in a list under `self[self.name]`. This - is automatically called when the RelationContext is instantiated. - - The units are sorted lexographically first by the service ID, then by - the unit ID. Thus, if an interface has two other services, 'db:1' - and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', - and 'db:2' having one unit, 'mediawiki/0', all of which have a complete - set of data, the relation data for the units will be stored in the - order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. - - If you only care about a single unit on the relation, you can just - access it as `{{ interface[0]['key'] }}`. However, if you can at all - support multiple units on a relation, you should iterate over the list, - like:: - - {% for unit in interface -%} - {{ unit['key'] }}{% if not loop.last %},{% endif %} - {%- endfor %} - - Note that since all sets of relation data from all related services and - units are in a single list, if you need to know which service or unit a - set of data came from, you'll need to extend this class to preserve - that information. - """ - if not hookenv.relation_ids(self.name): - return - - ns = self.setdefault(self.name, []) - for rid in sorted(hookenv.relation_ids(self.name)): - for unit in sorted(hookenv.related_units(rid)): - reldata = hookenv.relation_get(rid=rid, unit=unit) - if self._is_ready(reldata): - ns.append(reldata) - - def provide_data(self): - """ - Return data to be relation_set for this interface. - """ - return {} - - -class MysqlRelation(RelationContext): - """ - Relation context for the `mysql` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'db' - interface = 'mysql' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'user', 'password', 'database'] - RelationContext.__init__(self, *args, **kwargs) - - -class HttpRelation(RelationContext): - """ - Relation context for the `http` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'website' - interface = 'http' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'port'] - RelationContext.__init__(self, *args, **kwargs) - - def provide_data(self): - return { - 'host': hookenv.unit_get('private-address'), - 'port': 80, - } - - -class RequiredConfig(dict): - """ - Data context that loads config options with one or more mandatory options. - - Once the required options have been changed from their default values, all - config options will be available, namespaced under `config` to prevent - potential naming conflicts (for example, between a config option and a - relation property). - - :param list *args: List of options that must be changed from their default values. - """ - - def __init__(self, *args): - self.required_options = args - self['config'] = hookenv.config() - with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.load(fp).get('options', {}) - - def __bool__(self): - for option in self.required_options: - if option not in self['config']: - return False - current_value = self['config'][option] - default_value = self.config[option].get('default') - if current_value == default_value: - return False - if current_value in (None, '') and default_value in (None, ''): - return False - return True - - def __nonzero__(self): - return self.__bool__() - - -class StoredContext(dict): - """ - A data context that always returns the data that it was first created with. - - This is useful to do a one-time generation of things like passwords, that - will thereafter use the same value that was originally generated, instead - of generating a new value each time it is run. - """ - def __init__(self, file_name, config_data): - """ - If the file exists, populate `self` with the data from the file. - Otherwise, populate with the given data and persist it to the file. - """ - if os.path.exists(file_name): - self.update(self.read_context(file_name)) - else: - self.store_context(file_name, config_data) - self.update(config_data) - - def store_context(self, file_name, config_data): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0o600) - yaml.dump(config_data, file_stream) - - def read_context(self, file_name): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'r') as file_stream: - data = yaml.load(file_stream) - if not data: - raise OSError("%s is empty" % file_name) - return data - - -class TemplateCallback(ManagerCallback): - """ - Callback class that will render a Jinja2 template, for use as a ready - action. - - :param str source: The template source file, relative to - `$CHARM_DIR/templates` - - :param str target: The target to write the rendered template to (or None) - :param str owner: The owner of the rendered file - :param str group: The group of the rendered file - :param int perms: The permissions of the rendered file - :param partial on_change_action: functools partial to be executed when - rendered file changes - :param jinja2 loader template_loader: A jinja2 template loader - - :return str: The rendered template - """ - def __init__(self, source, target, - owner='root', group='root', perms=0o444, - on_change_action=None, template_loader=None): - self.source = source - self.target = target - self.owner = owner - self.group = group - self.perms = perms - self.on_change_action = on_change_action - self.template_loader = template_loader - - def __call__(self, manager, service_name, event_name): - pre_checksum = '' - if self.on_change_action and os.path.isfile(self.target): - pre_checksum = host.file_hash(self.target) - service = manager.get_service(service_name) - context = {'ctx': {}} - for ctx in service.get('required_data', []): - context.update(ctx) - context['ctx'].update(ctx) - - result = templating.render(self.source, self.target, context, - self.owner, self.group, self.perms, - template_loader=self.template_loader) - if self.on_change_action: - if pre_checksum == host.file_hash(self.target): - hookenv.log( - 'No change detected: {}'.format(self.target), - hookenv.DEBUG) - else: - self.on_change_action() - - return result - - -# Convenience aliases for templates -render_template = template = TemplateCallback diff --git a/ceph-mon/tests/charmhelpers/core/strutils.py b/ceph-mon/tests/charmhelpers/core/strutils.py deleted file mode 100644 index e8df0452..00000000 --- a/ceph-mon/tests/charmhelpers/core/strutils.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six -import re - - -def bool_from_string(value): - """Interpret string value as boolean. - - Returns True if value translates to True otherwise False. - """ - if isinstance(value, six.string_types): - value = six.text_type(value) - else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) - raise ValueError(msg) - - value = value.strip().lower() - - if value in ['y', 'yes', 'true', 't', 'on']: - return True - elif value in ['n', 'no', 'false', 'f', 'off']: - return False - - msg = "Unable to interpret string value '%s' as boolean" % (value) - raise ValueError(msg) - - -def bytes_from_string(value): - """Interpret human readable string value as bytes. - - Returns int - """ - BYTE_POWER = { - 'K': 1, - 'KB': 1, - 'M': 2, - 'MB': 2, - 'G': 3, - 'GB': 3, - 'T': 4, - 'TB': 4, - 'P': 5, - 'PB': 5, - } - if isinstance(value, six.string_types): - value = six.text_type(value) - else: - msg = "Unable to interpret non-string value '%s' as bytes" % (value) - raise ValueError(msg) - matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if matches: - size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) - else: - # Assume that value passed in is bytes - try: - size = int(value) - except ValueError: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return size - - -class BasicStringComparator(object): - """Provides a class that will compare strings from an iterator type object. - Used to provide > and < comparisons on strings that may not necessarily be - alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the - z-wrap. - """ - - _list = None - - def __init__(self, item): - if self._list is None: - raise Exception("Must define the _list in the class definition!") - try: - self.index = self._list.index(item) - except Exception: - raise KeyError("Item '{}' is not in list '{}'" - .format(item, self._list)) - - def __eq__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index == self._list.index(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __lt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index < self._list.index(other) - - def __ge__(self, other): - return not self.__lt__(other) - - def __gt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index > self._list.index(other) - - def __le__(self, other): - return not self.__gt__(other) - - def __str__(self): - """Always give back the item at the index so it can be used in - comparisons like: - - s_mitaka = CompareOpenStack('mitaka') - s_newton = CompareOpenstack('newton') - - assert s_newton > s_mitaka - - @returns: - """ - return self._list[self.index] diff --git a/ceph-mon/tests/charmhelpers/core/sysctl.py b/ceph-mon/tests/charmhelpers/core/sysctl.py deleted file mode 100644 index 1f188d8c..00000000 --- a/ceph-mon/tests/charmhelpers/core/sysctl.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml - -from subprocess import check_call - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - ERROR, -) - -__author__ = 'Jorge Niedbalski R. ' - - -def create(sysctl_dict, sysctl_file): - """Creates a sysctl.conf file from a YAML associative array - - :param sysctl_dict: a dict or YAML-formatted string of sysctl - options eg "{ 'kernel.max_pid': 1337 }" - :type sysctl_dict: str - :param sysctl_file: path to the sysctl file to be saved - :type sysctl_file: str or unicode - :returns: None - """ - if type(sysctl_dict) is not dict: - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return - else: - sysctl_dict_parsed = sysctl_dict - - with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict_parsed.items(): - fd.write("{}={}\n".format(key, value)) - - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), - level=DEBUG) - - check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-mon/tests/charmhelpers/core/templating.py b/ceph-mon/tests/charmhelpers/core/templating.py deleted file mode 100644 index 9014015c..00000000 --- a/ceph-mon/tests/charmhelpers/core/templating.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', - template_loader=None, config_template=None): - """ - Render a template. - - The `source` path, if not absolute, is relative to the `templates_dir`. - - The `target` path should be absolute. It can also be `None`, in which - case no file will be written. - - The context should be a dict containing the values to be replaced in the - template. - - config_template may be provided to render from a provided template instead - of loading from a file. - - The `owner`, `group`, and `perms` options will be passed to `write_file`. - - If omitted, `templates_dir` defaults to the `templates` folder in the charm. - - The rendered template will be written to the file as well as being returned - as a string. - - Note: Using this requires python-jinja2 or python3-jinja2; if it is not - installed, calling this will attempt to use charmhelpers.fetch.apt_install - to install it. - """ - try: - from jinja2 import FileSystemLoader, Environment, exceptions - except ImportError: - try: - from charmhelpers.fetch import apt_install - except ImportError: - hookenv.log('Could not import jinja2, and could not import ' - 'charmhelpers.fetch to install it', - level=hookenv.ERROR) - raise - if sys.version_info.major == 2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) - from jinja2 import FileSystemLoader, Environment, exceptions - - if template_loader: - template_env = Environment(loader=template_loader) - else: - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - template_env = Environment(loader=FileSystemLoader(templates_dir)) - - # load from a string if provided explicitly - if config_template is not None: - template = template_env.from_string(config_template) - else: - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e - content = template.render(context) - if target is not None: - target_dir = os.path.dirname(target) - if not os.path.exists(target_dir): - # This is a terrible default directory permission, as the file - # or its siblings will often contain secrets. - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) - host.write_file(target, content.encode(encoding), owner, group, perms) - return content diff --git a/ceph-mon/tests/charmhelpers/core/unitdata.py b/ceph-mon/tests/charmhelpers/core/unitdata.py deleted file mode 100644 index ab554327..00000000 --- a/ceph-mon/tests/charmhelpers/core/unitdata.py +++ /dev/null @@ -1,525 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Authors: -# Kapil Thangavelu -# -""" -Intro ------ - -A simple way to store state in units. This provides a key value -storage with support for versioned, transactional operation, -and can calculate deltas from previous values to simplify unit logic -when processing changes. - - -Hook Integration ----------------- - -There are several extant frameworks for hook execution, including - - - charmhelpers.core.hookenv.Hooks - - charmhelpers.core.services.ServiceManager - -The storage classes are framework agnostic, one simple integration is -via the HookData contextmanager. It will record the current hook -execution environment (including relation data, config data, etc.), -setup a transaction and allow easy access to the changes from -previously seen values. One consequence of the integration is the -reservation of particular keys ('rels', 'unit', 'env', 'config', -'charm_revisions') for their respective values. - -Here's a fully worked integration example using hookenv.Hooks:: - - from charmhelper.core import hookenv, unitdata - - hook_data = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # Print all changes to configuration from previously seen - # values. - for changed, (prev, cur) in hook_data.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - # Directly access all charm config as a mapping. - conf = db.getrange('config', True) - - # Directly access all relation data as a mapping - rels = db.getrange('rels', True) - - if __name__ == '__main__': - with hook_data(): - hook.execute() - - -A more basic integration is via the hook_scope context manager which simply -manages transaction scope (and records hook name, and timestamp):: - - >>> from unitdata import kv - >>> db = kv() - >>> with db.hook_scope('install'): - ... # do work, in transactional scope. - ... db.set('x', 1) - >>> db.get('x') - 1 - - -Usage ------ - -Values are automatically json de/serialized to preserve basic typing -and complex data struct capabilities (dicts, lists, ints, booleans, etc). - -Individual values can be manipulated via get/set:: - - >>> kv.set('y', True) - >>> kv.get('y') - True - - # We can set complex values (dicts, lists) as a single key. - >>> kv.set('config', {'a': 1, 'b': True'}) - - # Also supports returning dictionaries as a record which - # provides attribute access. - >>> config = kv.get('config', record=True) - >>> config.b - True - - -Groups of keys can be manipulated with update/getrange:: - - >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") - >>> kv.getrange('gui.', strip=True) - {'z': 1, 'y': 2} - -When updating values, its very helpful to understand which values -have actually changed and how have they changed. The storage -provides a delta method to provide for this:: - - >>> data = {'debug': True, 'option': 2} - >>> delta = kv.delta(data, 'config.') - >>> delta.debug.previous - None - >>> delta.debug.current - True - >>> delta - {'debug': (None, True), 'option': (None, 2)} - -Note the delta method does not persist the actual change, it needs to -be explicitly saved via 'update' method:: - - >>> kv.update(data, 'config.') - -Values modified in the context of a hook scope retain historical values -associated to the hookname. - - >>> with db.hook_scope('config-changed'): - ... db.set('x', 42) - >>> db.gethistory('x') - [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), - (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] - -""" - -import collections -import contextlib -import datetime -import itertools -import json -import os -import pprint -import sqlite3 -import sys - -__author__ = 'Kapil Thangavelu ' - - -class Storage(object): - """Simple key value database for local unit state within charms. - - Modifications are not persisted unless :meth:`flush` is called. - - To support dicts, lists, integer, floats, and booleans values - are automatically json encoded/decoded. - - Note: to facilitate unit testing, ':memory:' can be passed as the - path parameter which causes sqlite3 to only build the db in memory. - This should only be used for testing purposes. - """ - def __init__(self, path=None): - self.db_path = path - if path is None: - if 'UNIT_STATE_DB' in os.environ: - self.db_path = os.environ['UNIT_STATE_DB'] - else: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') - if self.db_path != ':memory:': - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) - self.conn = sqlite3.connect('%s' % self.db_path) - self.cursor = self.conn.cursor() - self.revision = None - self._closed = False - self._init() - - def close(self): - if self._closed: - return - self.flush(False) - self.cursor.close() - self.conn.close() - self._closed = True - - def get(self, key, default=None, record=False): - self.cursor.execute('select data from kv where key=?', [key]) - result = self.cursor.fetchone() - if not result: - return default - if record: - return Record(json.loads(result[0])) - return json.loads(result[0]) - - def getrange(self, key_prefix, strip=False): - """ - Get a range of keys starting with a common prefix as a mapping of - keys to values. - - :param str key_prefix: Common prefix among all keys - :param bool strip: Optionally strip the common prefix from the key - names in the returned dict - :return dict: A (possibly empty) dict of key-value mappings - """ - self.cursor.execute("select key, data from kv where key like ?", - ['%s%%' % key_prefix]) - result = self.cursor.fetchall() - - if not result: - return {} - if not strip: - key_prefix = '' - return dict([ - (k[len(key_prefix):], json.loads(v)) for k, v in result]) - - def update(self, mapping, prefix=""): - """ - Set the values of multiple keys at once. - - :param dict mapping: Mapping of keys to values - :param str prefix: Optional prefix to apply to all keys in `mapping` - before setting - """ - for k, v in mapping.items(): - self.set("%s%s" % (prefix, k), v) - - def unset(self, key): - """ - Remove a key from the database entirely. - """ - self.cursor.execute('delete from kv where key=?', [key]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - [key, self.revision, json.dumps('DELETED')]) - - def unsetrange(self, keys=None, prefix=""): - """ - Remove a range of keys starting with a common prefix, from the database - entirely. - - :param list keys: List of keys to remove. - :param str prefix: Optional prefix to apply to all keys in ``keys`` - before removing. - """ - if keys is not None: - keys = ['%s%s' % (prefix, key) for key in keys] - self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), - list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) - else: - self.cursor.execute('delete from kv where key like ?', - ['%s%%' % prefix]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) - - def set(self, key, value): - """ - Set a value in the database. - - :param str key: Key to set the value for - :param value: Any JSON-serializable value to be set - """ - serialized = json.dumps(value) - - self.cursor.execute('select data from kv where key=?', [key]) - exists = self.cursor.fetchone() - - # Skip mutations to the same value - if exists: - if exists[0] == serialized: - return value - - if not exists: - self.cursor.execute( - 'insert into kv (key, data) values (?, ?)', - (key, serialized)) - else: - self.cursor.execute(''' - update kv - set data = ? - where key = ?''', [serialized, key]) - - # Save - if not self.revision: - return value - - self.cursor.execute( - 'select 1 from kv_revisions where key=? and revision=?', - [key, self.revision]) - exists = self.cursor.fetchone() - - if not exists: - self.cursor.execute( - '''insert into kv_revisions ( - revision, key, data) values (?, ?, ?)''', - (self.revision, key, serialized)) - else: - self.cursor.execute( - ''' - update kv_revisions - set data = ? - where key = ? - and revision = ?''', - [serialized, key, self.revision]) - - return value - - def delta(self, mapping, prefix): - """ - return a delta containing values that have changed. - """ - previous = self.getrange(prefix, strip=True) - if not previous: - pk = set() - else: - pk = set(previous.keys()) - ck = set(mapping.keys()) - delta = DeltaSet() - - # added - for k in ck.difference(pk): - delta[k] = Delta(None, mapping[k]) - - # removed - for k in pk.difference(ck): - delta[k] = Delta(previous[k], None) - - # changed - for k in pk.intersection(ck): - c = mapping[k] - p = previous[k] - if c != p: - delta[k] = Delta(p, c) - - return delta - - @contextlib.contextmanager - def hook_scope(self, name=""): - """Scope all future interactions to the current hook execution - revision.""" - assert not self.revision - self.cursor.execute( - 'insert into hooks (hook, date) values (?, ?)', - (name or sys.argv[0], - datetime.datetime.utcnow().isoformat())) - self.revision = self.cursor.lastrowid - try: - yield self.revision - self.revision = None - except Exception: - self.flush(False) - self.revision = None - raise - else: - self.flush() - - def flush(self, save=True): - if save: - self.conn.commit() - elif self._closed: - return - else: - self.conn.rollback() - - def _init(self): - self.cursor.execute(''' - create table if not exists kv ( - key text, - data text, - primary key (key) - )''') - self.cursor.execute(''' - create table if not exists kv_revisions ( - key text, - revision integer, - data text, - primary key (key, revision) - )''') - self.cursor.execute(''' - create table if not exists hooks ( - version integer primary key autoincrement, - hook text, - date text - )''') - self.conn.commit() - - def gethistory(self, key, deserialize=False): - self.cursor.execute( - ''' - select kv.revision, kv.key, kv.data, h.hook, h.date - from kv_revisions kv, - hooks h - where kv.key=? - and kv.revision = h.version - ''', [key]) - if deserialize is False: - return self.cursor.fetchall() - return map(_parse_history, self.cursor.fetchall()) - - def debug(self, fh=sys.stderr): - self.cursor.execute('select * from kv') - pprint.pprint(self.cursor.fetchall(), stream=fh) - self.cursor.execute('select * from kv_revisions') - pprint.pprint(self.cursor.fetchall(), stream=fh) - - -def _parse_history(d): - return (d[0], d[1], json.loads(d[2]), d[3], - datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) - - -class HookData(object): - """Simple integration for existing hook exec frameworks. - - Records all unit information, and stores deltas for processing - by the hook. - - Sample:: - - from charmhelper.core import hookenv, unitdata - - changes = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # View all changes to configuration - for changed, (prev, cur) in changes.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - if __name__ == '__main__': - with changes(): - hook.execute() - - """ - def __init__(self): - self.kv = kv() - self.conf = None - self.rels = None - - @contextlib.contextmanager - def __call__(self): - from charmhelpers.core import hookenv - hook_name = hookenv.hook_name() - - with self.kv.hook_scope(hook_name): - self._record_charm_version(hookenv.charm_dir()) - delta_config, delta_relation = self._record_hook(hookenv) - yield self.kv, delta_config, delta_relation - - def _record_charm_version(self, charm_dir): - # Record revisions.. charm revisions are meaningless - # to charm authors as they don't control the revision. - # so logic dependnent on revision is not particularly - # useful, however it is useful for debugging analysis. - charm_rev = open( - os.path.join(charm_dir, 'revision')).read().strip() - charm_rev = charm_rev or '0' - revs = self.kv.get('charm_revisions', []) - if charm_rev not in revs: - revs.append(charm_rev.strip() or '0') - self.kv.set('charm_revisions', revs) - - def _record_hook(self, hookenv): - data = hookenv.execution_environment() - self.conf = conf_delta = self.kv.delta(data['conf'], 'config') - self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') - self.kv.set('env', dict(data['env'])) - self.kv.set('unit', data['unit']) - self.kv.set('relid', data.get('relid')) - return conf_delta, rels_delta - - -class Record(dict): - - __slots__ = () - - def __getattr__(self, k): - if k in self: - return self[k] - raise AttributeError(k) - - -class DeltaSet(Record): - - __slots__ = () - - -Delta = collections.namedtuple('Delta', ['previous', 'current']) - - -_KV = None - - -def kv(): - global _KV - if _KV is None: - _KV = Storage() - return _KV diff --git a/ceph-mon/tests/charmhelpers/osplatform.py b/ceph-mon/tests/charmhelpers/osplatform.py deleted file mode 100644 index d9a4d5c0..00000000 --- a/ceph-mon/tests/charmhelpers/osplatform.py +++ /dev/null @@ -1,25 +0,0 @@ -import platform - - -def get_platform(): - """Return the current OS platform. - - For example: if current os platform is Ubuntu then a string "ubuntu" - will be returned (which is the name of the module). - This string is used to decide which platform module should be imported. - """ - # linux_distribution is deprecated and will be removed in Python 3.7 - # Warings *not* disabled, as we certainly need to fix this. - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] - if "Ubuntu" in current_platform: - return "ubuntu" - elif "CentOS" in current_platform: - return "centos" - elif "debian" in current_platform: - # Stock Python does not detect Ubuntu and instead returns debian. - # Or at least it does in some build environments like Travis CI - return "ubuntu" - else: - raise RuntimeError("This module is not supported on {}." - .format(current_platform)) From 5b5a1cf98d04ad64f7986e2fdeda5a6450b39da7 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 3 Oct 2018 09:35:44 -0500 Subject: [PATCH 1572/2699] Update requirements Change-Id: Ic9e43835bcc84441414f93dfd851c571c611fb19 --- ceph-osd/requirements.txt | 2 - ceph-osd/test-requirements.txt | 10 +- ceph-osd/tests/charmhelpers/__init__.py | 97 -- .../tests/charmhelpers/contrib/__init__.py | 13 - .../charmhelpers/contrib/amulet/__init__.py | 13 - .../charmhelpers/contrib/amulet/deployment.py | 99 -- .../charmhelpers/contrib/amulet/utils.py | 821 --------- .../contrib/openstack/__init__.py | 13 - .../contrib/openstack/amulet/__init__.py | 13 - .../contrib/openstack/amulet/deployment.py | 357 ---- .../contrib/openstack/amulet/utils.py | 1533 ----------------- ceph-osd/tests/charmhelpers/core/__init__.py | 13 - .../tests/charmhelpers/core/decorators.py | 55 - ceph-osd/tests/charmhelpers/core/files.py | 43 - ceph-osd/tests/charmhelpers/core/fstab.py | 132 -- ceph-osd/tests/charmhelpers/core/hookenv.py | 1353 --------------- ceph-osd/tests/charmhelpers/core/host.py | 1042 ----------- .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 72 - .../charmhelpers/core/host_factory/ubuntu.py | 91 - ceph-osd/tests/charmhelpers/core/hugepage.py | 69 - ceph-osd/tests/charmhelpers/core/kernel.py | 72 - .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 - .../core/kernel_factory/ubuntu.py | 13 - .../charmhelpers/core/services/__init__.py | 16 - .../tests/charmhelpers/core/services/base.py | 362 ---- .../charmhelpers/core/services/helpers.py | 290 ---- ceph-osd/tests/charmhelpers/core/strutils.py | 129 -- ceph-osd/tests/charmhelpers/core/sysctl.py | 58 - .../tests/charmhelpers/core/templating.py | 93 - ceph-osd/tests/charmhelpers/core/unitdata.py | 525 ------ ceph-osd/tests/charmhelpers/osplatform.py | 25 - 33 files changed, 6 insertions(+), 7435 deletions(-) delete mode 100644 ceph-osd/tests/charmhelpers/__init__.py delete mode 100644 ceph-osd/tests/charmhelpers/contrib/__init__.py delete mode 100644 ceph-osd/tests/charmhelpers/contrib/amulet/__init__.py delete mode 100644 ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py delete mode 100644 ceph-osd/tests/charmhelpers/contrib/amulet/utils.py delete mode 100644 ceph-osd/tests/charmhelpers/contrib/openstack/__init__.py delete mode 100644 ceph-osd/tests/charmhelpers/contrib/openstack/amulet/__init__.py delete mode 100644 ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py delete mode 100644 ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py delete mode 100644 ceph-osd/tests/charmhelpers/core/__init__.py delete mode 100644 ceph-osd/tests/charmhelpers/core/decorators.py delete mode 100644 ceph-osd/tests/charmhelpers/core/files.py delete mode 100644 ceph-osd/tests/charmhelpers/core/fstab.py delete mode 100644 ceph-osd/tests/charmhelpers/core/hookenv.py delete mode 100644 ceph-osd/tests/charmhelpers/core/host.py delete mode 100644 ceph-osd/tests/charmhelpers/core/host_factory/__init__.py delete mode 100644 ceph-osd/tests/charmhelpers/core/host_factory/centos.py delete mode 100644 ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py delete mode 100644 ceph-osd/tests/charmhelpers/core/hugepage.py delete mode 100644 ceph-osd/tests/charmhelpers/core/kernel.py delete mode 100644 ceph-osd/tests/charmhelpers/core/kernel_factory/__init__.py delete mode 100644 ceph-osd/tests/charmhelpers/core/kernel_factory/centos.py delete mode 100644 ceph-osd/tests/charmhelpers/core/kernel_factory/ubuntu.py delete mode 100644 ceph-osd/tests/charmhelpers/core/services/__init__.py delete mode 100644 ceph-osd/tests/charmhelpers/core/services/base.py delete mode 100644 ceph-osd/tests/charmhelpers/core/services/helpers.py delete mode 100644 ceph-osd/tests/charmhelpers/core/strutils.py delete mode 100644 ceph-osd/tests/charmhelpers/core/sysctl.py delete mode 100644 ceph-osd/tests/charmhelpers/core/templating.py delete mode 100644 ceph-osd/tests/charmhelpers/core/unitdata.py delete mode 100644 ceph-osd/tests/charmhelpers/osplatform.py diff --git a/ceph-osd/requirements.txt b/ceph-osd/requirements.txt index db0af4d0..b8fec1e2 100644 --- a/ceph-osd/requirements.txt +++ b/ceph-osd/requirements.txt @@ -2,7 +2,6 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=1.8.0,<1.9.0 -PyYAML>=3.1.0 simplejson>=2.2.0 netifaces>=0.10.4 netaddr>=0.7.12,!=0.7.16 @@ -10,4 +9,3 @@ Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 dnspython>=1.12.0 psutil>=1.1.1,<2.0.0 -pyudev diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 6757a47d..2b2c0e11 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -1,16 +1,16 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. +charm-tools>=2.4.4 coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 -charm-tools>=2.0.0;python_version=='2.7' # cheetah templates aren't availble in Python 3+ -requests==2.6.0 +requests>=2.18.4 # BEGIN: Amulet OpenStack Charm Helper Requirements # Liberty client lower constraints -amulet>=1.14.3,<2.0 -bundletester>=0.6.1,<1.0;python_version=='2.7' # cheetah templates aren't availble in Python 3+ +amulet>=1.14.3,<2.0;python_version=='2.7' +bundletester>=0.6.1,<1.0;python_version=='2.7' python-ceilometerclient>=1.5.0 python-cinderclient>=1.4.0 python-glanceclient>=1.1.0 @@ -22,6 +22,8 @@ python-openstackclient>=1.7.0 python-swiftclient>=2.6.0 pika>=0.10.0,<1.0 distro-info +git+https://github.com/juju/charm-helpers.git#egg=charmhelpers # END: Amulet OpenStack Charm Helper Requirements # NOTE: workaround for 14.04 pip/tox pytz +pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/ceph-osd/tests/charmhelpers/__init__.py b/ceph-osd/tests/charmhelpers/__init__.py deleted file mode 100644 index e7aa4715..00000000 --- a/ceph-osd/tests/charmhelpers/__init__.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Bootstrap charm-helpers, installing its dependencies if necessary using -# only standard libraries. -from __future__ import print_function -from __future__ import absolute_import - -import functools -import inspect -import subprocess -import sys - -try: - import six # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa - -try: - import yaml # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa - - -# Holds a list of mapping of mangled function names that have been deprecated -# using the @deprecate decorator below. This is so that the warning is only -# printed once for each usage of the function. -__deprecated_functions = {} - - -def deprecate(warning, date=None, log=None): - """Add a deprecation warning the first time the function is used. - The date, which is a string in semi-ISO8660 format indicate the year-month - that the function is officially going to be removed. - - usage: - - @deprecate('use core/fetch/add_source() instead', '2017-04') - def contributed_add_source_thing(...): - ... - - And it then prints to the log ONCE that the function is deprecated. - The reason for passing the logging function (log) is so that hookenv.log - can be used for a charm if needed. - - :param warning: String to indicat where it has moved ot. - :param date: optional sting, in YYYY-MM format to indicate when the - function will definitely (probably) be removed. - :param log: The log function to call to log. If not, logs to stdout - """ - def wrap(f): - - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - try: - module = inspect.getmodule(f) - file = inspect.getsourcefile(f) - lines = inspect.getsourcelines(f) - f_name = "{}-{}-{}..{}-{}".format( - module.__name__, file, lines[0], lines[-1], f.__name__) - except (IOError, TypeError): - # assume it was local, so just use the name of the function - f_name = f.__name__ - if f_name not in __deprecated_functions: - __deprecated_functions[f_name] = True - s = "DEPRECATION WARNING: Function {} is being removed".format( - f.__name__) - if date: - s = "{} on/around {}".format(s, date) - if warning: - s = "{} : {}".format(s, warning) - if log: - log(s) - else: - print(s) - return f(*args, **kwargs) - return wrapped_f - return wrap diff --git a/ceph-osd/tests/charmhelpers/contrib/__init__.py b/ceph-osd/tests/charmhelpers/contrib/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-osd/tests/charmhelpers/contrib/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-osd/tests/charmhelpers/contrib/amulet/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py deleted file mode 100644 index d21d01d8..00000000 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/deployment.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import os -import six - - -class AmuletDeployment(object): - """Amulet deployment. - - This class provides generic Amulet deployment and test runner - methods. - """ - - def __init__(self, series=None): - """Initialize the deployment environment.""" - self.series = None - - if series: - self.series = series - self.d = amulet.Deployment(series=self.series) - else: - self.d = amulet.Deployment() - - def _add_services(self, this_service, other_services): - """Add services. - - Add services to the deployment where this_service is the local charm - that we're testing and other_services are the other services that - are being used in the local amulet tests. - """ - if this_service['name'] != os.path.basename(os.getcwd()): - s = this_service['name'] - msg = "The charm's root directory name needs to be {}".format(s) - amulet.raise_status(amulet.FAIL, msg=msg) - - if 'units' not in this_service: - this_service['units'] = 1 - - self.d.add(this_service['name'], units=this_service['units'], - constraints=this_service.get('constraints'), - storage=this_service.get('storage')) - - for svc in other_services: - if 'location' in svc: - branch_location = svc['location'] - elif self.series: - branch_location = 'cs:{}/{}'.format(self.series, svc['name']), - else: - branch_location = None - - if 'units' not in svc: - svc['units'] = 1 - - self.d.add(svc['name'], charm=branch_location, units=svc['units'], - constraints=svc.get('constraints'), - storage=svc.get('storage')) - - def _add_relations(self, relations): - """Add all of the relations for the services.""" - for k, v in six.iteritems(relations): - self.d.relate(k, v) - - def _configure_services(self, configs): - """Configure all of the services.""" - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _deploy(self): - """Deploy environment and wait for all hooks to finish executing.""" - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) - try: - self.d.setup(timeout=timeout) - self.d.sentry.wait(timeout=timeout) - except amulet.helpers.TimeoutError: - amulet.raise_status( - amulet.FAIL, - msg="Deployment timed out ({}s)".format(timeout) - ) - except Exception: - raise - - def run_tests(self): - """Run all of the methods that are prefixed with 'test_'.""" - for test in dir(self): - if test.startswith('test_'): - getattr(self, test)() diff --git a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py deleted file mode 100644 index 8a6b7644..00000000 --- a/ceph-osd/tests/charmhelpers/contrib/amulet/utils.py +++ /dev/null @@ -1,821 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import json -import logging -import os -import re -import socket -import subprocess -import sys -import time -import uuid - -import amulet -import distro_info -import six -from six.moves import configparser -if six.PY3: - from urllib import parse as urlparse -else: - import urlparse - - -class AmuletUtils(object): - """Amulet utilities. - - This class provides common utility functions that are used by Amulet - tests. - """ - - def __init__(self, log_level=logging.ERROR): - self.log = self.get_logger(level=log_level) - self.ubuntu_releases = self.get_ubuntu_releases() - - def get_logger(self, name="amulet-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def valid_ip(self, ip): - if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): - return True - else: - return False - - def valid_url(self, url): - p = re.compile( - r'^(?:http|ftp)s?://' - r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa - r'localhost|' - r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' - r'(?::\d+)?' - r'(?:/?|[/?]\S+)$', - re.IGNORECASE) - if p.match(url): - return True - else: - return False - - def get_ubuntu_release_from_sentry(self, sentry_unit): - """Get Ubuntu release codename from sentry unit. - - :param sentry_unit: amulet sentry/service unit pointer - :returns: list of strings - release codename, failure message - """ - msg = None - cmd = 'lsb_release -cs' - release, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} lsb_release: {}'.format( - sentry_unit.info['unit_name'], release)) - else: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, release, code)) - if release not in self.ubuntu_releases: - msg = ("Release ({}) not found in Ubuntu releases " - "({})".format(release, self.ubuntu_releases)) - return release, msg - - def validate_services(self, commands): - """Validate that lists of commands succeed on service units. Can be - used to verify system services are running on the corresponding - service units. - - :param commands: dict with sentry keys and arbitrary command list vals - :returns: None if successful, Failure string message otherwise - """ - self.log.debug('Checking status of system services...') - - # /!\ DEPRECATION WARNING (beisner): - # New and existing tests should be rewritten to use - # validate_services_by_name() as it is aware of init systems. - self.log.warn('DEPRECATION WARNING: use ' - 'validate_services_by_name instead of validate_services ' - 'due to init system differences.') - - for k, v in six.iteritems(commands): - for cmd in v: - output, code = k.run(cmd) - self.log.debug('{} `{}` returned ' - '{}'.format(k.info['unit_name'], - cmd, code)) - if code != 0: - return "command `{}` returned {}".format(cmd, str(code)) - return None - - def validate_services_by_name(self, sentry_services): - """Validate system service status by service name, automatically - detecting init system based on Ubuntu release codename. - - :param sentry_services: dict with sentry keys and svc list values - :returns: None if successful, Failure string message otherwise - """ - self.log.debug('Checking status of system services...') - - # Point at which systemd became a thing - systemd_switch = self.ubuntu_releases.index('vivid') - - for sentry_unit, services_list in six.iteritems(sentry_services): - # Get lsb_release codename from unit - release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) - if ret: - return ret - - for service_name in services_list: - if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name in ['rabbitmq-server', 'apache2', - 'memcached']): - # init is systemd (or regular sysv) - cmd = 'sudo service {} status'.format(service_name) - output, code = sentry_unit.run(cmd) - service_running = code == 0 - elif self.ubuntu_releases.index(release) < systemd_switch: - # init is upstart - cmd = 'sudo status {}'.format(service_name) - output, code = sentry_unit.run(cmd) - service_running = code == 0 and "start/running" in output - - self.log.debug('{} `{}` returned ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code)) - if not service_running: - return u"command `{}` returned {} {}".format( - cmd, output, str(code)) - return None - - def _get_config(self, unit, filename): - """Get a ConfigParser object for parsing a unit's config file.""" - file_contents = unit.file_contents(filename) - - # NOTE(beisner): by default, ConfigParser does not handle options - # with no value, such as the flags used in the mysql my.cnf file. - # https://bugs.python.org/issue7005 - config = configparser.ConfigParser(allow_no_value=True) - config.readfp(io.StringIO(file_contents)) - return config - - def validate_config_data(self, sentry_unit, config_file, section, - expected): - """Validate config file data. - - Verify that the specified section of the config file contains - the expected option key:value pairs. - - Compare expected dictionary data vs actual dictionary data. - The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluates a variable and returns a - bool. - """ - self.log.debug('Validating config file data ({} in {} on {})' - '...'.format(section, config_file, - sentry_unit.info['unit_name'])) - config = self._get_config(sentry_unit, config_file) - - if section != 'DEFAULT' and not config.has_section(section): - return "section [{}] does not exist".format(section) - - for k in expected.keys(): - if not config.has_option(section, k): - return "section [{}] is missing option {}".format(section, k) - - actual = config.get(section, k) - v = expected[k] - if (isinstance(v, six.string_types) or - isinstance(v, bool) or - isinstance(v, six.integer_types)): - # handle explicit values - if actual != v: - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) - # handle function pointers, such as not_null or valid_ip - elif not v(actual): - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) - return None - - def _validate_dict_data(self, expected, actual): - """Validate dictionary data. - - Compare expected dictionary data vs actual dictionary data. - The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluates a variable and returns a - bool. - """ - self.log.debug('actual: {}'.format(repr(actual))) - self.log.debug('expected: {}'.format(repr(expected))) - - for k, v in six.iteritems(expected): - if k in actual: - if (isinstance(v, six.string_types) or - isinstance(v, bool) or - isinstance(v, six.integer_types)): - # handle explicit values - if v != actual[k]: - return "{}:{}".format(k, actual[k]) - # handle function pointers, such as not_null or valid_ip - elif not v(actual[k]): - return "{}:{}".format(k, actual[k]) - else: - return "key '{}' does not exist".format(k) - return None - - def validate_relation_data(self, sentry_unit, relation, expected): - """Validate actual relation data based on expected relation data.""" - actual = sentry_unit.relation(relation[0], relation[1]) - return self._validate_dict_data(expected, actual) - - def _validate_list_data(self, expected, actual): - """Compare expected list vs actual list data.""" - for e in expected: - if e not in actual: - return "expected item {} not found in actual list".format(e) - return None - - def not_null(self, string): - if string is not None: - return True - else: - return False - - def _get_file_mtime(self, sentry_unit, filename): - """Get last modification time of file.""" - return sentry_unit.file_stat(filename)['mtime'] - - def _get_dir_mtime(self, sentry_unit, directory): - """Get last modification time of directory.""" - return sentry_unit.directory_stat(directory)['mtime'] - - def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): - """Get start time of a process based on the last modification time - of the /proc/pid directory. - - :sentry_unit: The sentry unit to check for the service on - :service: service name to look for in process table - :pgrep_full: [Deprecated] Use full command line search mode with pgrep - :returns: epoch time of service process start - :param commands: list of bash commands - :param sentry_units: list of sentry unit pointers - :returns: None if successful; Failure message otherwise - """ - if pgrep_full is not None: - # /!\ DEPRECATION WARNING (beisner): - # No longer implemented, as pidof is now used instead of pgrep. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' - 'longer implemented re: lp 1474030.') - - pid_list = self.get_process_id_list(sentry_unit, service) - pid = pid_list[0] - proc_dir = '/proc/{}'.format(pid) - self.log.debug('Pid for {} on {}: {}'.format( - service, sentry_unit.info['unit_name'], pid)) - - return self._get_dir_mtime(sentry_unit, proc_dir) - - def service_restarted(self, sentry_unit, service, filename, - pgrep_full=None, sleep_time=20): - """Check if service was restarted. - - Compare a service's start time vs a file's last modification time - (such as a config file for that service) to determine if the service - has been restarted. - """ - # /!\ DEPRECATION WARNING (beisner): - # This method is prone to races in that no before-time is known. - # Use validate_service_config_changed instead. - - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - self.log.warn('DEPRECATION WARNING: use ' - 'validate_service_config_changed instead of ' - 'service_restarted due to known races.') - - time.sleep(sleep_time) - if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= - self._get_file_mtime(sentry_unit, filename)): - return True - else: - return False - - def service_restarted_since(self, sentry_unit, mtime, service, - pgrep_full=None, sleep_time=20, - retry_count=30, retry_sleep_time=10): - """Check if service was been started after a given time. - - Args: - sentry_unit (sentry): The sentry unit to check for the service on - mtime (float): The epoch time to check against - service (string): service name to look for in process table - pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Initial sleep time (s) before looking for file - retry_sleep_time (int): Time (s) to sleep between retries - retry_count (int): If file is not found, how many times to retry - - Returns: - bool: True if service found and its start time it newer than mtime, - False if service is older than mtime or if service was - not found. - """ - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - - unit_name = sentry_unit.info['unit_name'] - self.log.debug('Checking that %s service restarted since %s on ' - '%s' % (service, mtime, unit_name)) - time.sleep(sleep_time) - proc_start_time = None - tries = 0 - while tries <= retry_count and not proc_start_time: - try: - proc_start_time = self._get_proc_start_time(sentry_unit, - service, - pgrep_full) - self.log.debug('Attempt {} to get {} proc start time on {} ' - 'OK'.format(tries, service, unit_name)) - except IOError as e: - # NOTE(beisner) - race avoidance, proc may not exist yet. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.debug('Attempt {} to get {} proc start time on {} ' - 'failed\n{}'.format(tries, service, - unit_name, e)) - time.sleep(retry_sleep_time) - tries += 1 - - if not proc_start_time: - self.log.warn('No proc start time found, assuming service did ' - 'not start') - return False - if proc_start_time >= mtime: - self.log.debug('Proc start time is newer than provided mtime' - '(%s >= %s) on %s (OK)' % (proc_start_time, - mtime, unit_name)) - return True - else: - self.log.warn('Proc start time (%s) is older than provided mtime ' - '(%s) on %s, service did not ' - 'restart' % (proc_start_time, mtime, unit_name)) - return False - - def config_updated_since(self, sentry_unit, filename, mtime, - sleep_time=20, retry_count=30, - retry_sleep_time=10): - """Check if file was modified after a given time. - - Args: - sentry_unit (sentry): The sentry unit to check the file mtime on - filename (string): The file to check mtime of - mtime (float): The epoch time to check against - sleep_time (int): Initial sleep time (s) before looking for file - retry_sleep_time (int): Time (s) to sleep between retries - retry_count (int): If file is not found, how many times to retry - - Returns: - bool: True if file was modified more recently than mtime, False if - file was modified before mtime, or if file not found. - """ - unit_name = sentry_unit.info['unit_name'] - self.log.debug('Checking that %s updated since %s on ' - '%s' % (filename, mtime, unit_name)) - time.sleep(sleep_time) - file_mtime = None - tries = 0 - while tries <= retry_count and not file_mtime: - try: - file_mtime = self._get_file_mtime(sentry_unit, filename) - self.log.debug('Attempt {} to get {} file mtime on {} ' - 'OK'.format(tries, filename, unit_name)) - except IOError as e: - # NOTE(beisner) - race avoidance, file may not exist yet. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.debug('Attempt {} to get {} file mtime on {} ' - 'failed\n{}'.format(tries, filename, - unit_name, e)) - time.sleep(retry_sleep_time) - tries += 1 - - if not file_mtime: - self.log.warn('Could not determine file mtime, assuming ' - 'file does not exist') - return False - - if file_mtime >= mtime: - self.log.debug('File mtime is newer than provided mtime ' - '(%s >= %s) on %s (OK)' % (file_mtime, - mtime, unit_name)) - return True - else: - self.log.warn('File mtime is older than provided mtime' - '(%s < on %s) on %s' % (file_mtime, - mtime, unit_name)) - return False - - def validate_service_config_changed(self, sentry_unit, mtime, service, - filename, pgrep_full=None, - sleep_time=20, retry_count=30, - retry_sleep_time=10): - """Check service and file were updated after mtime - - Args: - sentry_unit (sentry): The sentry unit to check for the service on - mtime (float): The epoch time to check against - service (string): service name to look for in process table - filename (string): The file to check mtime of - pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Initial sleep in seconds to pass to test helpers - retry_count (int): If service is not found, how many times to retry - retry_sleep_time (int): Time in seconds to wait between retries - - Typical Usage: - u = OpenStackAmuletUtils(ERROR) - ... - mtime = u.get_sentry_time(self.cinder_sentry) - self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) - if not u.validate_service_config_changed(self.cinder_sentry, - mtime, - 'cinder-api', - '/etc/cinder/cinder.conf') - amulet.raise_status(amulet.FAIL, msg='update failed') - Returns: - bool: True if both service and file where updated/restarted after - mtime, False if service is older than mtime or if service was - not found or if filename was modified before mtime. - """ - - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - - service_restart = self.service_restarted_since( - sentry_unit, mtime, - service, - pgrep_full=pgrep_full, - sleep_time=sleep_time, - retry_count=retry_count, - retry_sleep_time=retry_sleep_time) - - config_update = self.config_updated_since( - sentry_unit, - filename, - mtime, - sleep_time=sleep_time, - retry_count=retry_count, - retry_sleep_time=retry_sleep_time) - - return service_restart and config_update - - def get_sentry_time(self, sentry_unit): - """Return current epoch time on a sentry""" - cmd = "date +'%s'" - return float(sentry_unit.run(cmd)[0]) - - def relation_error(self, name, data): - return 'unexpected relation data in {} - {}'.format(name, data) - - def endpoint_error(self, name, data): - return 'unexpected endpoint data in {} - {}'.format(name, data) - - def get_ubuntu_releases(self): - """Return a list of all Ubuntu releases in order of release.""" - _d = distro_info.UbuntuDistroInfo() - _release_list = _d.all - return _release_list - - def file_to_url(self, file_rel_path): - """Convert a relative file path to a file URL.""" - _abs_path = os.path.abspath(file_rel_path) - return urlparse.urlparse(_abs_path, scheme='file').geturl() - - def check_commands_on_units(self, commands, sentry_units): - """Check that all commands in a list exit zero on all - sentry units in a list. - - :param commands: list of bash commands - :param sentry_units: list of sentry unit pointers - :returns: None if successful; Failure message otherwise - """ - self.log.debug('Checking exit codes for {} commands on {} ' - 'sentry units...'.format(len(commands), - len(sentry_units))) - for sentry_unit in sentry_units: - for cmd in commands: - output, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} `{}` returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - else: - return ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - return None - - def get_process_id_list(self, sentry_unit, process_name, - expect_success=True): - """Get a list of process ID(s) from a single sentry juju unit - for a single process name. - - :param sentry_unit: Amulet sentry instance (juju unit) - :param process_name: Process name - :param expect_success: If False, expect the PID to be missing, - raise if it is present. - :returns: List of process IDs - """ - cmd = 'pidof -x "{}"'.format(process_name) - if not expect_success: - cmd += " || exit 0 && exit 1" - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return str(output).split() - - def get_unit_process_ids(self, unit_processes, expect_success=True): - """Construct a dict containing unit sentries, process names, and - process IDs. - - :param unit_processes: A dictionary of Amulet sentry instance - to list of process names. - :param expect_success: if False expect the processes to not be - running, raise if they are. - :returns: Dictionary of Amulet sentry instance to dictionary - of process names to PIDs. - """ - pid_dict = {} - for sentry_unit, process_list in six.iteritems(unit_processes): - pid_dict[sentry_unit] = {} - for process in process_list: - pids = self.get_process_id_list( - sentry_unit, process, expect_success=expect_success) - pid_dict[sentry_unit].update({process: pids}) - return pid_dict - - def validate_unit_process_ids(self, expected, actual): - """Validate process id quantities for services on units.""" - self.log.debug('Checking units for running processes...') - self.log.debug('Expected PIDs: {}'.format(expected)) - self.log.debug('Actual PIDs: {}'.format(actual)) - - if len(actual) != len(expected): - return ('Unit count mismatch. expected, actual: {}, ' - '{} '.format(len(expected), len(actual))) - - for (e_sentry, e_proc_names) in six.iteritems(expected): - e_sentry_name = e_sentry.info['unit_name'] - if e_sentry in actual.keys(): - a_proc_names = actual[e_sentry] - else: - return ('Expected sentry ({}) not found in actual dict data.' - '{}'.format(e_sentry_name, e_sentry)) - - if len(e_proc_names.keys()) != len(a_proc_names.keys()): - return ('Process name count mismatch. expected, actual: {}, ' - '{}'.format(len(expected), len(actual))) - - for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ - zip(e_proc_names.items(), a_proc_names.items()): - if e_proc_name != a_proc_name: - return ('Process name mismatch. expected, actual: {}, ' - '{}'.format(e_proc_name, a_proc_name)) - - a_pids_length = len(a_pids) - fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' - '{}, {} ({})'.format(e_sentry_name, e_proc_name, - e_pids, a_pids_length, - a_pids)) - - # If expected is a list, ensure at least one PID quantity match - if isinstance(e_pids, list) and \ - a_pids_length not in e_pids: - return fail_msg - # If expected is not bool and not list, - # ensure PID quantities match - elif not isinstance(e_pids, bool) and \ - not isinstance(e_pids, list) and \ - a_pids_length != e_pids: - return fail_msg - # If expected is bool True, ensure 1 or more PIDs exist - elif isinstance(e_pids, bool) and \ - e_pids is True and a_pids_length < 1: - return fail_msg - # If expected is bool False, ensure 0 PIDs exist - elif isinstance(e_pids, bool) and \ - e_pids is False and a_pids_length != 0: - return fail_msg - else: - self.log.debug('PID check OK: {} {} {}: ' - '{}'.format(e_sentry_name, e_proc_name, - e_pids, a_pids)) - return None - - def validate_list_of_identical_dicts(self, list_of_dicts): - """Check that all dicts within a list are identical.""" - hashes = [] - for _dict in list_of_dicts: - hashes.append(hash(frozenset(_dict.items()))) - - self.log.debug('Hashes: {}'.format(hashes)) - if len(set(hashes)) == 1: - self.log.debug('Dicts within list are identical') - else: - return 'Dicts within list are not identical' - - return None - - def validate_sectionless_conf(self, file_contents, expected): - """A crude conf parser. Useful to inspect configuration files which - do not have section headers (as would be necessary in order to use - the configparser). Such as openstack-dashboard or rabbitmq confs.""" - for line in file_contents.split('\n'): - if '=' in line: - args = line.split('=') - if len(args) <= 1: - continue - key = args[0].strip() - value = args[1].strip() - if key in expected.keys(): - if expected[key] != value: - msg = ('Config mismatch. Expected, actual: {}, ' - '{}'.format(expected[key], value)) - amulet.raise_status(amulet.FAIL, msg=msg) - - def get_unit_hostnames(self, units): - """Return a dict of juju unit names to hostnames.""" - host_names = {} - for unit in units: - host_names[unit.info['unit_name']] = \ - str(unit.file_contents('/etc/hostname').strip()) - self.log.debug('Unit host names: {}'.format(host_names)) - return host_names - - def run_cmd_unit(self, sentry_unit, cmd): - """Run a command on a unit, return the output and exit code.""" - output, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} `{}` command returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - else: - msg = ('{} `{}` command returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return str(output), code - - def file_exists_on_unit(self, sentry_unit, file_name): - """Check if a file exists on a unit.""" - try: - sentry_unit.file_stat(file_name) - return True - except IOError: - return False - except Exception as e: - msg = 'Error checking file {}: {}'.format(file_name, e) - amulet.raise_status(amulet.FAIL, msg=msg) - - def file_contents_safe(self, sentry_unit, file_name, - max_wait=60, fatal=False): - """Get file contents from a sentry unit. Wrap amulet file_contents - with retry logic to address races where a file checks as existing, - but no longer exists by the time file_contents is called. - Return None if file not found. Optionally raise if fatal is True.""" - unit_name = sentry_unit.info['unit_name'] - file_contents = False - tries = 0 - while not file_contents and tries < (max_wait / 4): - try: - file_contents = sentry_unit.file_contents(file_name) - except IOError: - self.log.debug('Attempt {} to open file {} from {} ' - 'failed'.format(tries, file_name, - unit_name)) - time.sleep(4) - tries += 1 - - if file_contents: - return file_contents - elif not fatal: - return None - elif fatal: - msg = 'Failed to get file contents from unit.' - amulet.raise_status(amulet.FAIL, msg) - - def port_knock_tcp(self, host="localhost", port=22, timeout=15): - """Open a TCP socket to check for a listening sevice on a host. - - :param host: host name or IP address, default to localhost - :param port: TCP port number, default to 22 - :param timeout: Connect timeout, default to 15 seconds - :returns: True if successful, False if connect failed - """ - - # Resolve host name if possible - try: - connect_host = socket.gethostbyname(host) - host_human = "{} ({})".format(connect_host, host) - except socket.error as e: - self.log.warn('Unable to resolve address: ' - '{} ({}) Trying anyway!'.format(host, e)) - connect_host = host - host_human = connect_host - - # Attempt socket connection - try: - knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - knock.settimeout(timeout) - knock.connect((connect_host, port)) - knock.close() - self.log.debug('Socket connect OK for host ' - '{} on port {}.'.format(host_human, port)) - return True - except socket.error as e: - self.log.debug('Socket connect FAIL for' - ' {} port {} ({})'.format(host_human, port, e)) - return False - - def port_knock_units(self, sentry_units, port=22, - timeout=15, expect_success=True): - """Open a TCP socket to check for a listening sevice on each - listed juju unit. - - :param sentry_units: list of sentry unit pointers - :param port: TCP port number, default to 22 - :param timeout: Connect timeout, default to 15 seconds - :expect_success: True by default, set False to invert logic - :returns: None if successful, Failure message otherwise - """ - for unit in sentry_units: - host = unit.info['public-address'] - connected = self.port_knock_tcp(host, port, timeout) - if not connected and expect_success: - return 'Socket connect failed.' - elif connected and not expect_success: - return 'Socket connected unexpectedly.' - - def get_uuid_epoch_stamp(self): - """Returns a stamp string based on uuid4 and epoch time. Useful in - generating test messages which need to be unique-ish.""" - return '[{}-{}]'.format(uuid.uuid4(), time.time()) - - # amulet juju action helpers: - def run_action(self, unit_sentry, action, - _check_output=subprocess.check_output, - params=None): - """Translate to amulet's built in run_action(). Deprecated. - - Run the named action on a given unit sentry. - - params a dict of parameters to use - _check_output parameter is no longer used - - @return action_id. - """ - self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been ' - 'deprecated for amulet.run_action') - return unit_sentry.run_action(action, action_args=params) - - def wait_on_action(self, action_id, _check_output=subprocess.check_output): - """Wait for a given action, returning if it completed or not. - - action_id a string action uuid - _check_output parameter is no longer used - """ - data = amulet.actions.get_action_output(action_id, full_output=True) - return data.get(u"status") == "completed" - - def status_get(self, unit): - """Return the current service status of this unit.""" - raw_status, return_code = unit.run( - "status-get --format=json --include-data") - if return_code != 0: - return ("unknown", "") - status = json.loads(raw_status) - return (status["status"], status["message"]) diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-osd/tests/charmhelpers/contrib/openstack/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py deleted file mode 100644 index 1c96752a..00000000 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ /dev/null @@ -1,357 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import re -import sys -import six -from collections import OrderedDict -from charmhelpers.contrib.amulet.deployment import ( - AmuletDeployment -) -from charmhelpers.contrib.openstack.amulet.utils import ( - OPENSTACK_RELEASES_PAIRS -) - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - - -class OpenStackAmuletDeployment(AmuletDeployment): - """OpenStack amulet deployment. - - This class inherits from AmuletDeployment and has additional support - that is specifically for use by OpenStack charms. - """ - - def __init__(self, series=None, openstack=None, source=None, - stable=True, log_level=DEBUG): - """Initialize the deployment environment.""" - super(OpenStackAmuletDeployment, self).__init__(series) - self.log = self.get_logger(level=log_level) - self.log.info('OpenStackAmuletDeployment: init') - self.openstack = openstack - self.source = source - self.stable = stable - - def get_logger(self, name="deployment-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def _determine_branch_locations(self, other_services): - """Determine the branch locations for the other services. - - Determine if the local branch being tested is derived from its - stable or next (dev) branch, and based on this, use the corresonding - stable or next branches for the other_services.""" - - self.log.info('OpenStackAmuletDeployment: determine branch locations') - - # Charms outside the ~openstack-charmers - base_charms = { - 'mysql': ['trusty'], - 'mongodb': ['trusty'], - 'nrpe': ['trusty', 'xenial'], - } - - for svc in other_services: - # If a location has been explicitly set, use it - if svc.get('location'): - continue - if svc['name'] in base_charms: - # NOTE: not all charms have support for all series we - # want/need to test against, so fix to most recent - # that each base charm supports - target_series = self.series - if self.series not in base_charms[svc['name']]: - target_series = base_charms[svc['name']][-1] - svc['location'] = 'cs:{}/{}'.format(target_series, - svc['name']) - elif self.stable: - svc['location'] = 'cs:{}/{}'.format(self.series, - svc['name']) - else: - svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( - self.series, - svc['name'] - ) - - return other_services - - def _add_services(self, this_service, other_services, use_source=None, - no_origin=None): - """Add services to the deployment and optionally set - openstack-origin/source. - - :param this_service dict: Service dictionary describing the service - whose amulet tests are being run - :param other_services dict: List of service dictionaries describing - the services needed to support the target - service - :param use_source list: List of services which use the 'source' config - option rather than 'openstack-origin' - :param no_origin list: List of services which do not support setting - the Cloud Archive. - Service Dict: - { - 'name': str charm-name, - 'units': int number of units, - 'constraints': dict of juju constraints, - 'location': str location of charm, - } - eg - this_service = { - 'name': 'openvswitch-odl', - 'constraints': {'mem': '8G'}, - } - other_services = [ - { - 'name': 'nova-compute', - 'units': 2, - 'constraints': {'mem': '4G'}, - 'location': cs:~bob/xenial/nova-compute - }, - { - 'name': 'mysql', - 'constraints': {'mem': '2G'}, - }, - {'neutron-api-odl'}] - use_source = ['mysql'] - no_origin = ['neutron-api-odl'] - """ - self.log.info('OpenStackAmuletDeployment: adding services') - - other_services = self._determine_branch_locations(other_services) - - super(OpenStackAmuletDeployment, self)._add_services(this_service, - other_services) - - services = other_services - services.append(this_service) - - use_source = use_source or [] - no_origin = no_origin or [] - - # Charms which should use the source config option - use_source = list(set( - use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy', 'percona-cluster', 'lxd'])) - - # Charms which can not use openstack-origin, ie. many subordinates - no_origin = list(set( - no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', - 'nrpe', 'openvswitch-odl', 'neutron-api-odl', - 'odl-controller', 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'])) - - if self.openstack: - for svc in services: - if svc['name'] not in use_source + no_origin: - config = {'openstack-origin': self.openstack} - self.d.configure(svc['name'], config) - - if self.source: - for svc in services: - if svc['name'] in use_source and svc['name'] not in no_origin: - config = {'source': self.source} - self.d.configure(svc['name'], config) - - def _configure_services(self, configs): - """Configure all of the services.""" - self.log.info('OpenStackAmuletDeployment: configure services') - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=None): - """Wait for all units to have a specific extended status, except - for any defined as excluded. Unless specified via message, any - status containing any case of 'ready' will be considered a match. - - Examples of message usage: - - Wait for all unit status to CONTAIN any case of 'ready' or 'ok': - message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) - - Wait for all units to reach this status (exact match): - message = re.compile('^Unit is ready and clustered$') - - Wait for all units to reach any one of these (exact match): - message = re.compile('Unit is ready|OK|Ready') - - Wait for at least one unit to reach this status (exact match): - message = {'ready'} - - See Amulet's sentry.wait_for_messages() for message usage detail. - https://github.com/juju/amulet/blob/master/amulet/sentry.py - - :param message: Expected status match - :param exclude_services: List of juju service names to ignore, - not to be used in conjuction with include_only. - :param include_only: List of juju service names to exclusively check, - not to be used in conjuction with exclude_services. - :param timeout: Maximum time in seconds to wait for status match - :returns: None. Raises if timeout is hit. - """ - if not timeout: - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) - self.log.info('Waiting for extended status on units for {}s...' - ''.format(timeout)) - - all_services = self.d.services.keys() - - if exclude_services and include_only: - raise ValueError('exclude_services can not be used ' - 'with include_only') - - if message: - if isinstance(message, re._pattern_type): - match = message.pattern - else: - match = message - - self.log.debug('Custom extended status wait match: ' - '{}'.format(match)) - else: - self.log.debug('Default extended status wait match: contains ' - 'READY (case-insensitive)') - message = re.compile('.*ready.*', re.IGNORECASE) - - if exclude_services: - self.log.debug('Excluding services from extended status match: ' - '{}'.format(exclude_services)) - else: - exclude_services = [] - - if include_only: - services = include_only - else: - services = list(set(all_services) - set(exclude_services)) - - self.log.debug('Waiting up to {}s for extended status on services: ' - '{}'.format(timeout, services)) - service_messages = {service: message for service in services} - - # Check for idleness - self.d.sentry.wait(timeout=timeout) - # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) - # Check for ready messages - self.d.sentry.wait_for_messages(service_messages, timeout=timeout) - - self.log.info('OK') - - def _get_openstack_release(self): - """Get openstack release. - - Return an integer representing the enum value of the openstack - release. - """ - # Must be ordered by OpenStack release (not by Ubuntu release): - for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): - setattr(self, os_pair, i) - - releases = { - ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, - ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, - ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('xenial', None): self.xenial_mitaka, - ('xenial', 'cloud:xenial-newton'): self.xenial_newton, - ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, - ('xenial', 'cloud:xenial-pike'): self.xenial_pike, - ('xenial', 'cloud:xenial-queens'): self.xenial_queens, - ('yakkety', None): self.yakkety_newton, - ('zesty', None): self.zesty_ocata, - ('artful', None): self.artful_pike, - ('bionic', None): self.bionic_queens, - ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, - ('cosmic', None): self.cosmic_rocky, - } - return releases[(self.series, self.openstack)] - - def _get_openstack_release_string(self): - """Get openstack release string. - - Return a string representing the openstack release. - """ - releases = OrderedDict([ - ('trusty', 'icehouse'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ]) - if self.openstack: - os_origin = self.openstack.split(':')[1] - return os_origin.split('%s-' % self.series)[1].split('/')[0] - else: - return releases[self.series] - - def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools in a ceph + cinder + glance - test scenario, based on OpenStack release and whether ceph radosgw - is flagged as present or not.""" - - if self._get_openstack_release() == self.trusty_icehouse: - # Icehouse - pools = [ - 'data', - 'metadata', - 'rbd', - 'cinder-ceph', - 'glance' - ] - elif (self.trusty_kilo <= self._get_openstack_release() <= - self.zesty_ocata): - # Kilo through Ocata - pools = [ - 'rbd', - 'cinder-ceph', - 'glance' - ] - else: - # Pike and later - pools = [ - 'cinder-ceph', - 'glance' - ] - - if radosgw: - pools.extend([ - '.rgw.root', - '.rgw.control', - '.rgw', - '.rgw.gc', - '.users.uid' - ]) - - return pools diff --git a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py deleted file mode 100644 index 936b4036..00000000 --- a/ceph-osd/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ /dev/null @@ -1,1533 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import json -import logging -import os -import re -import six -import time -import urllib -import urlparse - -import cinderclient.v1.client as cinder_client -import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1 as glance_client -import glanceclient.v2 as glance_clientv2 -import heatclient.v1.client as heat_client -from keystoneclient.v2_0 import client as keystone_client -from keystoneauth1.identity import ( - v3, - v2, -) -from keystoneauth1 import session as keystone_session -from keystoneclient.v3 import client as keystone_client_v3 -from novaclient import exceptions - -import novaclient.client as nova_client -import novaclient -import pika -import swiftclient - -from charmhelpers.core.decorators import retry_on_exception -from charmhelpers.contrib.amulet.utils import ( - AmuletUtils -) -from charmhelpers.core.host import CompareHostReleases - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - -NOVA_CLIENT_VERSION = "2" - -OPENSTACK_RELEASES_PAIRS = [ - 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', - 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', - 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', - 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] - - -class OpenStackAmuletUtils(AmuletUtils): - """OpenStack amulet utilities. - - This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charm tests. - """ - - def __init__(self, log_level=ERROR): - """Initialize the deployment environment.""" - super(OpenStackAmuletUtils, self).__init__(log_level) - - def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, openstack_release=None): - """Validate endpoint data. Pick the correct validator based on - OpenStack release. Expected data should be in the v2 format: - { - 'id': id, - 'region': region, - 'adminurl': adminurl, - 'internalurl': internalurl, - 'publicurl': publicurl, - 'service_id': service_id} - - """ - validation_function = self.validate_v2_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_endpoint_data - expected = { - 'id': expected['id'], - 'region': expected['region'], - 'region_id': 'RegionOne', - 'url': self.valid_url, - 'interface': self.not_null, - 'service_id': expected['service_id']} - return validation_function(endpoints, admin_port, internal_port, - public_port, expected) - - def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): - """Validate endpoint data. - - Validate actual endpoint data vs expected endpoint data. The ports - are used to find the matching endpoint. - """ - self.log.debug('Validating endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = False - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if (admin_port in ep.adminurl and - internal_port in ep.internalurl and - public_port in ep.publicurl): - found = True - actual = {'id': ep.id, - 'region': ep.region, - 'adminurl': ep.adminurl, - 'internalurl': ep.internalurl, - 'publicurl': ep.publicurl, - 'service_id': ep.service_id} - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if not found: - return 'endpoint not found' - - def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, expected_num_eps=3): - """Validate keystone v3 endpoint data. - - Validate the v3 endpoint data which has changed from v2. The - ports are used to find the matching endpoint. - - The new v3 endpoint data looks like: - - ['}, - region=RegionOne, - region_id=RegionOne, - service_id=17f842a0dc084b928e476fafe67e4095, - url=http://10.5.6.5:9312>, - '}, - region=RegionOne, - region_id=RegionOne, - service_id=72fc8736fb41435e8b3584205bb2cfa3, - url=http://10.5.6.6:35357/v3>, - ... ] - """ - self.log.debug('Validating v3 endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = [] - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if ((admin_port in ep.url and ep.interface == 'admin') or - (internal_port in ep.url and ep.interface == 'internal') or - (public_port in ep.url and ep.interface == 'public')): - found.append(ep.interface) - # note we ignore the links member. - actual = {'id': ep.id, - 'region': ep.region, - 'region_id': ep.region_id, - 'interface': self.not_null, - 'url': ep.url, - 'service_id': ep.service_id, } - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if len(found) != expected_num_eps: - return 'Unexpected number of endpoints found' - - def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): - """Convert v2 endpoint data into v3. - - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - """ - self.log.warn("Endpoint ID and Region ID validation is limited to not " - "null checks after v2 to v3 conversion") - for svc in ep_data.keys(): - assert len(ep_data[svc]) == 1, "Unknown data format" - svc_ep_data = ep_data[svc][0] - ep_data[svc] = [ - { - 'url': svc_ep_data['adminURL'], - 'interface': 'admin', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['publicURL'], - 'interface': 'public', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['internalURL'], - 'interface': 'internal', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}] - return ep_data - - def validate_svc_catalog_endpoint_data(self, expected, actual, - openstack_release=None): - """Validate service catalog endpoint data. Pick the correct validator - for the OpenStack version. Expected data should be in the v2 format: - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - - """ - validation_function = self.validate_v2_svc_catalog_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_svc_catalog_endpoint_data - expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) - return validation_function(expected, actual) - - def validate_v2_svc_catalog_endpoint_data(self, expected, actual): - """Validate service catalog endpoint data. - - Validate a list of actual service catalog endpoints vs a list of - expected service catalog endpoints. - """ - self.log.debug('Validating service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - ret = self._validate_dict_data(expected[k][0], actual[k][0]) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_v3_svc_catalog_endpoint_data(self, expected, actual): - """Validate the keystone v3 catalog endpoint data. - - Validate a list of dictinaries that make up the keystone v3 service - catalogue. - - It is in the form of: - - - {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:35357/v3'}, - {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}, - {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}], - u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'f629388955bc407f8b11d8b7ca168086', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9312'}]} - - Note, that an added complication is that the order of admin, public, - internal against 'interface' in each region. - - Thus, the function sorts the expected and actual lists using the - interface key as a sort key, prior to the comparison. - """ - self.log.debug('Validating v3 service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - l_expected = sorted(v, key=lambda x: x['interface']) - l_actual = sorted(actual[k], key=lambda x: x['interface']) - if len(l_actual) != len(l_expected): - return ("endpoint {} has differing number of interfaces " - " - expected({}), actual({})" - .format(k, len(l_expected), len(l_actual))) - for i_expected, i_actual in zip(l_expected, l_actual): - self.log.debug("checking interface {}" - .format(i_expected['interface'])) - ret = self._validate_dict_data(i_expected, i_actual) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_tenant_data(self, expected, actual): - """Validate tenant data. - - Validate a list of actual tenant data vs list of expected tenant - data. - """ - self.log.debug('Validating tenant data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'enabled': act.enabled, 'description': act.description, - 'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected tenant data - {}".format(ret) - if not found: - return "tenant {} does not exist".format(e['name']) - return ret - - def validate_role_data(self, expected, actual): - """Validate role data. - - Validate a list of actual role data vs a list of expected role - data. - """ - self.log.debug('Validating role data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected role data - {}".format(ret) - if not found: - return "role {} does not exist".format(e['name']) - return ret - - def validate_user_data(self, expected, actual, api_version=None): - """Validate user data. - - Validate a list of actual user data vs a list of expected user - data. - """ - self.log.debug('Validating user data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - if e['name'] == act.name: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'id': act.id} - if api_version == 3: - a['default_project_id'] = getattr(act, - 'default_project_id', - 'none') - else: - a['tenantId'] = act.tenantId - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected user data - {}".format(ret) - if not found: - return "user {} does not exist".format(e['name']) - return ret - - def validate_flavor_data(self, expected, actual): - """Validate flavor data. - - Validate a list of actual flavors vs a list of expected flavors. - """ - self.log.debug('Validating flavor data...') - self.log.debug('actual: {}'.format(repr(actual))) - act = [a.name for a in actual] - return self._validate_list_data(expected, act) - - def tenant_exists(self, keystone, tenant): - """Return True if tenant exists.""" - self.log.debug('Checking if tenant exists ({})...'.format(tenant)) - return tenant in [t.name for t in keystone.tenants.list()] - - @retry_on_exception(num_retries=5, base_delay=1) - def keystone_wait_for_propagation(self, sentry_relation_pairs, - api_version): - """Iterate over list of sentry and relation tuples and verify that - api_version has the expected value. - - :param sentry_relation_pairs: list of sentry, relation name tuples used - for monitoring propagation of relation - data - :param api_version: api_version to expect in relation data - :returns: None if successful. Raise on error. - """ - for (sentry, relation_name) in sentry_relation_pairs: - rel = sentry.relation('identity-service', - relation_name) - self.log.debug('keystone relation data: {}'.format(rel)) - if rel.get('api_version') != str(api_version): - raise Exception("api_version not propagated through relation" - " data yet ('{}' != '{}')." - "".format(rel.get('api_version'), api_version)) - - def keystone_configure_api_version(self, sentry_relation_pairs, deployment, - api_version): - """Configure preferred-api-version of keystone in deployment and - monitor provided list of relation objects for propagation - before returning to caller. - - :param sentry_relation_pairs: list of sentry, relation tuples used for - monitoring propagation of relation data - :param deployment: deployment to configure - :param api_version: value preferred-api-version will be set to - :returns: None if successful. Raise on error. - """ - self.log.debug("Setting keystone preferred-api-version: '{}'" - "".format(api_version)) - - config = {'preferred-api-version': api_version} - deployment.d.configure('keystone', config) - deployment._auto_wait_for_status() - self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - - def authenticate_cinder_admin(self, keystone, api_version=2): - """Authenticates admin user with cinder.""" - self.log.debug('Authenticating cinder admin...') - _clients = { - 1: cinder_client.Client, - 2: cinder_clientv2.Client} - return _clients[api_version](session=keystone.session) - - def authenticate_keystone(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Authenticate with Keystone""" - self.log.debug('Authenticating with keystone...') - if not api_version: - api_version = 2 - sess, auth = self.get_keystone_session( - keystone_ip=keystone_ip, - username=username, - password=password, - api_version=api_version, - admin_port=admin_port, - user_domain_name=user_domain_name, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name - ) - if api_version == 2: - client = keystone_client.Client(session=sess) - else: - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client - - def get_keystone_session(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Return a keystone session object""" - ep = self.get_keystone_endpoint(keystone_ip, - api_version=api_version, - admin_port=admin_port) - if api_version == 2: - auth = v2.Password( - username=username, - password=password, - tenant_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - else: - auth = v3.Password( - user_domain_name=user_domain_name, - username=username, - password=password, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - return (sess, auth) - - def get_keystone_endpoint(self, keystone_ip, api_version=None, - admin_port=False): - """Return keystone endpoint""" - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if api_version == 2: - ep = base_ep + "/v2.0" - else: - ep = base_ep + "/v3" - return ep - - def get_default_keystone_session(self, keystone_sentry, - openstack_release=None, api_version=2): - """Return a keystone session object and client object assuming standard - default settings - - Example call in amulet tests: - self.keystone_session, self.keystone = u.get_default_keystone_session( - self.keystone_sentry, - openstack_release=self._get_openstack_release()) - - The session can then be used to auth other clients: - neutronclient.Client(session=session) - aodh_client.Client(session=session) - eyc - """ - self.log.debug('Authenticating keystone admin...') - # 11 => xenial_queens - if api_version == 3 or (openstack_release and openstack_release >= 11): - client_class = keystone_client_v3.Client - api_version = 3 - else: - client_class = keystone_client.Client - keystone_ip = keystone_sentry.info['public-address'] - session, auth = self.get_keystone_session( - keystone_ip, - api_version=api_version, - username='admin', - password='openstack', - project_name='admin', - user_domain_name='admin_domain', - project_domain_name='admin_domain') - client = client_class(session=session) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(session) - return session, client - - def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant=None, api_version=None, - keystone_ip=None, user_domain_name=None, - project_domain_name=None, - project_name=None): - """Authenticates admin user with the keystone admin endpoint.""" - self.log.debug('Authenticating keystone admin...') - if not keystone_ip: - keystone_ip = keystone_sentry.info['public-address'] - - # To support backward compatibility usage of this function - if not project_name: - project_name = tenant - if api_version == 3 and not user_domain_name: - user_domain_name = 'admin_domain' - if api_version == 3 and not project_domain_name: - project_domain_name = 'admin_domain' - if api_version == 3 and not project_name: - project_name = 'admin' - - return self.authenticate_keystone( - keystone_ip, user, password, - api_version=api_version, - user_domain_name=user_domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - admin_port=True) - - def authenticate_keystone_user(self, keystone, user, password, tenant): - """Authenticates a regular user with the keystone public endpoint.""" - self.log.debug('Authenticating keystone user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - keystone_ip = urlparse.urlparse(ep).hostname - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant) - - def authenticate_glance_admin(self, keystone): - """Authenticates admin user with glance.""" - self.log.debug('Authenticating glance admin...') - ep = keystone.service_catalog.url_for(service_type='image', - interface='adminURL') - if keystone.session: - return glance_clientv2.Client("2", session=keystone.session) - else: - return glance_client.Client(ep, token=keystone.auth_token) - - def authenticate_heat_admin(self, keystone): - """Authenticates the admin user with heat.""" - self.log.debug('Authenticating heat admin...') - ep = keystone.service_catalog.url_for(service_type='orchestration', - interface='publicURL') - if keystone.session: - return heat_client.Client(endpoint=ep, session=keystone.session) - else: - return heat_client.Client(endpoint=ep, token=keystone.auth_token) - - def authenticate_nova_user(self, keystone, user, password, tenant): - """Authenticates a regular user with nova-api.""" - self.log.debug('Authenticating nova user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return nova_client.Client(NOVA_CLIENT_VERSION, - session=keystone.session, - auth_url=ep) - elif novaclient.__version__[0] >= "7": - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, password=password, - project_name=tenant, auth_url=ep) - else: - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) - - def authenticate_swift_user(self, keystone, user, password, tenant): - """Authenticates a regular user with swift api.""" - self.log.debug('Authenticating swift user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return swiftclient.Connection(session=keystone.session) - else: - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') - - def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create the specified flavor.""" - try: - nova.flavors.find(name=name) - except (exceptions.NotFound, exceptions.NoUniqueMatch): - self.log.debug('Creating flavor ({})'.format(name)) - nova.flavors.create(name, ram, vcpus, disk, flavorid, - ephemeral, swap, rxtx_factor, is_public) - - def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection - :param image_name: display name for new image - :returns: glance image pointer - """ - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) - - # Download cirros image - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - local_path = os.path.join('tests', cirros_img) - - if not os.path.exists(local_path): - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - opener.retrieve(cirros_url, local_path) - f.close() - - # Create glance image - if float(glance.version) < 2.0: - with open(local_path) as fimage: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', - data=fimage) - else: - image = glance.images.create( - name=image_name, - disk_format="qcow2", - visibility="public", - container_format="bare") - glance.images.upload(image.id, open(local_path, 'rb')) - - # Wait for image to reach active status - img_id = image.id - ret = self.resource_reaches_status(glance.images, img_id, - expected_stat='active', - msg='Image status wait') - if not ret: - msg = 'Glance image failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new image - self.log.debug('Validating image attributes...') - val_img_name = glance.images.get(img_id).name - val_img_stat = glance.images.get(img_id).status - val_img_cfmt = glance.images.get(img_id).container_format - val_img_dfmt = glance.images.get(img_id).disk_format - - if float(glance.version) < 2.0: - val_img_pub = glance.images.get(img_id).is_public - else: - val_img_pub = glance.images.get(img_id).visibility == "public" - - msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' - 'container fmt:{} disk fmt:{}'.format( - val_img_name, val_img_pub, img_id, - val_img_stat, val_img_cfmt, val_img_dfmt)) - - if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == 'bare' \ - and val_img_dfmt == 'qcow2': - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return image - - def delete_image(self, glance, image): - """Delete the specified image.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_image.') - self.log.debug('Deleting glance image ({})...'.format(image)) - return self.delete_resource(glance.images, image, msg='glance image') - - def create_instance(self, nova, image_name, instance_name, flavor): - """Create the specified instance.""" - self.log.debug('Creating instance ' - '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.glance.find_image(image_name) - flavor = nova.flavors.find(name=flavor) - instance = nova.servers.create(name=instance_name, image=image, - flavor=flavor) - - count = 1 - status = instance.status - while status != 'ACTIVE' and count < 60: - time.sleep(3) - instance = nova.servers.get(instance.id) - status = instance.status - self.log.debug('instance status: {}'.format(status)) - count += 1 - - if status != 'ACTIVE': - self.log.error('instance creation timed out') - return None - - return instance - - def delete_instance(self, nova, instance): - """Delete the specified instance.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_instance.') - self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, - msg='nova instance') - - def create_or_get_keypair(self, nova, keypair_name="testkey"): - """Create a new keypair, or return pointer if it already exists.""" - try: - _keypair = nova.keypairs.get(keypair_name) - self.log.debug('Keypair ({}) already exists, ' - 'using it.'.format(keypair_name)) - return _keypair - except Exception: - self.log.debug('Keypair ({}) does not exist, ' - 'creating it.'.format(keypair_name)) - - _keypair = nova.keypairs.create(name=keypair_name) - return _keypair - - def _get_cinder_obj_name(self, cinder_object): - """Retrieve name of cinder object. - - :param cinder_object: cinder snapshot or volume object - :returns: str cinder object name - """ - # v1 objects store name in 'display_name' attr but v2+ use 'name' - try: - return cinder_object.display_name - except AttributeError: - return cinder_object.name - - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, - img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, OR - optionally as a clone of an existing volume, OR optionally - from a snapshot. Wait for the new volume status to reach - the expected status, validate and return a resource pointer. - - :param vol_name: cinder volume display name - :param vol_size: size in gigabytes - :param img_id: optional glance image id - :param src_vol_id: optional source volume id to clone - :param snap_id: optional snapshot id to use - :returns: cinder volume pointer - """ - # Handle parameter input and avoid impossible combinations - if img_id and not src_vol_id and not snap_id: - # Create volume from image - self.log.debug('Creating cinder volume from glance image...') - bootable = 'true' - elif src_vol_id and not img_id and not snap_id: - # Clone an existing volume - self.log.debug('Cloning cinder volume...') - bootable = cinder.volumes.get(src_vol_id).bootable - elif snap_id and not src_vol_id and not img_id: - # Create volume from snapshot - self.log.debug('Creating cinder volume from snapshot...') - snap = cinder.volume_snapshots.find(id=snap_id) - vol_size = snap.size - snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id - bootable = cinder.volumes.get(snap_vol_id).bootable - elif not img_id and not src_vol_id and not snap_id: - # Create volume - self.log.debug('Creating cinder volume...') - bootable = 'false' - else: - # Impossible combination of parameters - msg = ('Invalid method use - name:{} size:{} img_id:{} ' - 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, - img_id, src_vol_id, - snap_id)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create new volume - try: - vol_new = cinder.volumes.create(display_name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except TypeError: - vol_new = cinder.volumes.create(name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except Exception as e: - msg = 'Failed to create volume: {}'.format(e) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Wait for volume to reach available status - ret = self.resource_reaches_status(cinder.volumes, vol_id, - expected_stat="available", - msg="Volume status wait") - if not ret: - msg = 'Cinder volume failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new volume - self.log.debug('Validating volume attributes...') - val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) - val_vol_boot = cinder.volumes.get(vol_id).bootable - val_vol_stat = cinder.volumes.get(vol_id).status - val_vol_size = cinder.volumes.get(vol_id).size - msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' - '{} size:{}'.format(val_vol_name, vol_id, - val_vol_stat, val_vol_boot, - val_vol_size)) - - if val_vol_boot == bootable and val_vol_stat == 'available' \ - and val_vol_name == vol_name and val_vol_size == vol_size: - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return vol_new - - def delete_resource(self, resource, resource_id, - msg="resource", max_wait=120): - """Delete one openstack resource, such as one instance, keypair, - image, volume, stack, etc., and confirm deletion within max wait time. - - :param resource: pointer to os resource type, ex:glance_client.images - :param resource_id: unique name or id for the openstack resource - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, otherwise False - """ - self.log.debug('Deleting OpenStack resource ' - '{} ({})'.format(resource_id, msg)) - num_before = len(list(resource.list())) - resource.delete(resource_id) - - tries = 0 - num_after = len(list(resource.list())) - while num_after != (num_before - 1) and tries < (max_wait / 4): - self.log.debug('{} delete check: ' - '{} [{}:{}] {}'.format(msg, tries, - num_before, - num_after, - resource_id)) - time.sleep(4) - num_after = len(list(resource.list())) - tries += 1 - - self.log.debug('{}: expected, actual count = {}, ' - '{}'.format(msg, num_before - 1, num_after)) - - if num_after == (num_before - 1): - return True - else: - self.log.error('{} delete timed out'.format(msg)) - return False - - def resource_reaches_status(self, resource, resource_id, - expected_stat='available', - msg='resource', max_wait=120): - """Wait for an openstack resources status to reach an - expected status within a specified time. Useful to confirm that - nova instances, cinder vols, snapshots, glance images, heat stacks - and other resources eventually reach the expected status. - - :param resource: pointer to os resource type, ex: heat_client.stacks - :param resource_id: unique id for the openstack resource - :param expected_stat: status to expect resource to reach - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, False if status is not reached - """ - - tries = 0 - resource_stat = resource.get(resource_id).status - while resource_stat != expected_stat and tries < (max_wait / 4): - self.log.debug('{} status check: ' - '{} [{}:{}] {}'.format(msg, tries, - resource_stat, - expected_stat, - resource_id)) - time.sleep(4) - resource_stat = resource.get(resource_id).status - tries += 1 - - self.log.debug('{}: expected, actual status = {}, ' - '{}'.format(msg, resource_stat, expected_stat)) - - if resource_stat == expected_stat: - return True - else: - self.log.debug('{} never reached expected status: ' - '{}'.format(resource_id, expected_stat)) - return False - - def get_ceph_osd_id_cmd(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return ("`initctl list | grep 'ceph-osd ' | " - "awk 'NR=={} {{ print $2 }}' | " - "grep -o '[0-9]*'`".format(index + 1)) - - def get_ceph_pools(self, sentry_unit): - """Return a dict of ceph pools from a single ceph unit, with - pool name as keys, pool id as vals.""" - pools = {} - cmd = 'sudo ceph osd lspools' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # For mimic ceph osd lspools output - output = output.replace("\n", ",") - - # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, - for pool in str(output).split(','): - pool_id_name = pool.split(' ') - if len(pool_id_name) == 2: - pool_id = pool_id_name[0] - pool_name = pool_id_name[1] - pools[pool_name] = int(pool_id) - - self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], - pools)) - return pools - - def get_ceph_df(self, sentry_unit): - """Return dict of ceph df json output, including ceph pool state. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :returns: Dict of ceph df output - """ - cmd = 'sudo ceph df --format=json' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return json.loads(output) - - def get_ceph_pool_sample(self, sentry_unit, pool_id=0): - """Take a sample of attributes of a ceph pool, returning ceph - pool name, object count and disk space used for the specified - pool ID number. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :param pool_id: Ceph pool ID - :returns: List of pool name, object count, kb disk space used - """ - df = self.get_ceph_df(sentry_unit) - for pool in df['pools']: - if pool['id'] == pool_id: - pool_name = pool['name'] - obj_count = pool['stats']['objects'] - kb_used = pool['stats']['kb_used'] - - self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, pool_id, - obj_count, kb_used)) - return pool_name, obj_count, kb_used - - def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): - """Validate ceph pool samples taken over time, such as pool - object counts or pool kb used, before adding, after adding, and - after deleting items which affect those pool attributes. The - 2nd element is expected to be greater than the 1st; 3rd is expected - to be less than the 2nd. - - :param samples: List containing 3 data samples - :param sample_type: String for logging and usage context - :returns: None if successful, Failure message otherwise - """ - original, created, deleted = range(3) - if samples[created] <= samples[original] or \ - samples[deleted] >= samples[created]: - return ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - else: - self.log.debug('Ceph {} samples (OK): ' - '{}'.format(sample_type, samples)) - return None - - # rabbitmq/amqp specific helpers: - - def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): - """Wait for rmq units extended status to show cluster readiness, - after an optional initial sleep period. Initial sleep is likely - necessary to be effective following a config change, as status - message may not instantly update to non-ready.""" - - if init_sleep: - time.sleep(init_sleep) - - message = re.compile('^Unit is ready and clustered$') - deployment._auto_wait_for_status(message=message, - timeout=timeout, - include_only=['rabbitmq-server']) - - def add_rmq_test_user(self, sentry_units, - username="testuser1", password="changeme"): - """Add a test user via the first rmq juju unit, check connection as - the new user against all sentry units. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful. Raise on error. - """ - self.log.debug('Adding rmq user ({})...'.format(username)) - - # Check that user does not already exist - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - if username in output: - self.log.warning('User ({}) already exists, returning ' - 'gracefully.'.format(username)) - return - - perms = '".*" ".*" ".*"' - cmds = ['rabbitmqctl add_user {} {}'.format(username, password), - 'rabbitmqctl set_permissions {} {}'.format(username, perms)] - - # Add user via first unit - for cmd in cmds: - output, _ = self.run_cmd_unit(sentry_units[0], cmd) - - # Check connection against the other sentry_units - self.log.debug('Checking user connect against units...') - for sentry_unit in sentry_units: - connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, - username=username, - password=password) - connection.close() - - def delete_rmq_test_user(self, sentry_units, username="testuser1"): - """Delete a rabbitmq user via the first rmq juju unit. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful or no such user. - """ - self.log.debug('Deleting rmq user ({})...'.format(username)) - - # Check that the user exists - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - - if username not in output: - self.log.warning('User ({}) does not exist, returning ' - 'gracefully.'.format(username)) - return - - # Delete the user - cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) - - def get_rmq_cluster_status(self, sentry_unit): - """Execute rabbitmq cluster status command on a unit and return - the full output. - - :param unit: sentry unit - :returns: String containing console output of cluster status command - """ - cmd = 'rabbitmqctl cluster_status' - output, _ = self.run_cmd_unit(sentry_unit, cmd) - self.log.debug('{} cluster_status:\n{}'.format( - sentry_unit.info['unit_name'], output)) - return str(output) - - def get_rmq_cluster_running_nodes(self, sentry_unit): - """Parse rabbitmqctl cluster_status output string, return list of - running rabbitmq cluster nodes. - - :param unit: sentry unit - :returns: List containing node names of running nodes - """ - # NOTE(beisner): rabbitmqctl cluster_status output is not - # json-parsable, do string chop foo, then json.loads that. - str_stat = self.get_rmq_cluster_status(sentry_unit) - if 'running_nodes' in str_stat: - pos_start = str_stat.find("{running_nodes,") + 15 - pos_end = str_stat.find("]},", pos_start) + 1 - str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') - run_nodes = json.loads(str_run_nodes) - return run_nodes - else: - return [] - - def validate_rmq_cluster_running_nodes(self, sentry_units): - """Check that all rmq unit hostnames are represented in the - cluster_status output of all units. - - :param host_names: dict of juju unit names to host names - :param units: list of sentry unit pointers (all rmq units) - :returns: None if successful, otherwise return error message - """ - host_names = self.get_unit_hostnames(sentry_units) - errors = [] - - # Query every unit for cluster_status running nodes - for query_unit in sentry_units: - query_unit_name = query_unit.info['unit_name'] - running_nodes = self.get_rmq_cluster_running_nodes(query_unit) - - # Confirm that every unit is represented in the queried unit's - # cluster_status running nodes output. - for validate_unit in sentry_units: - val_host_name = host_names[validate_unit.info['unit_name']] - val_node_name = 'rabbit@{}'.format(val_host_name) - - if val_node_name not in running_nodes: - errors.append('Cluster member check failed on {}: {} not ' - 'in {}\n'.format(query_unit_name, - val_node_name, - running_nodes)) - if errors: - return ''.join(errors) - - def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): - """Check a single juju rmq unit for ssl and port in the config file.""" - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - conf_file = '/etc/rabbitmq/rabbitmq.config' - conf_contents = str(self.file_contents_safe(sentry_unit, - conf_file, max_wait=16)) - # Checks - conf_ssl = 'ssl' in conf_contents - conf_port = str(port) in conf_contents - - # Port explicitly checked in config - if port and conf_port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif port and not conf_port and conf_ssl: - self.log.debug('SSL is enabled @{} but not on port {} ' - '({})'.format(host, port, unit_name)) - return False - # Port not checked (useful when checking that ssl is disabled) - elif not port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif not conf_ssl: - self.log.debug('SSL not enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return False - else: - msg = ('Unknown condition when checking SSL status @{}:{} ' - '({})'.format(host, port, unit_name)) - amulet.raise_status(amulet.FAIL, msg) - - def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): - """Check that ssl is enabled on rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :param port: optional ssl port override to validate - :returns: None if successful, otherwise return error message - """ - for sentry_unit in sentry_units: - if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): - return ('Unexpected condition: ssl is disabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def validate_rmq_ssl_disabled_units(self, sentry_units): - """Check that ssl is enabled on listed rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :returns: True if successful. Raise on error. - """ - for sentry_unit in sentry_units: - if self.rmq_ssl_is_enabled_on_unit(sentry_unit): - return ('Unexpected condition: ssl is enabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def configure_rmq_ssl_on(self, sentry_units, deployment, - port=None, max_wait=60): - """Turn ssl charm config option on, with optional non-default - ssl port specification. Confirm that it is enabled on every - unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param port: amqp port, use defaults if None - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: on') - - # Enable RMQ SSL - config = {'ssl': 'on'} - if port: - config['ssl_port'] = port - - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): - """Turn ssl charm config option off, confirm that it is disabled - on every unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: off') - - # Disable RMQ SSL - config = {'ssl': 'off'} - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def connect_amqp_by_unit(self, sentry_unit, ssl=False, - port=None, fatal=True, - username="testuser1", password="changeme"): - """Establish and return a pika amqp connection to the rabbitmq service - running on a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :param fatal: boolean, default to True (raises on connect error) - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: pika amqp connection pointer or None if failed and non-fatal - """ - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - # Default port logic if port is not specified - if ssl and not port: - port = 5671 - elif not ssl and not port: - port = 5672 - - self.log.debug('Connecting to amqp on {}:{} ({}) as ' - '{}...'.format(host, port, unit_name, username)) - - try: - credentials = pika.PlainCredentials(username, password) - parameters = pika.ConnectionParameters(host=host, port=port, - credentials=credentials, - ssl=ssl, - connection_attempts=3, - retry_delay=5, - socket_timeout=1) - connection = pika.BlockingConnection(parameters) - assert connection.is_open is True - assert connection.is_closing is False - self.log.debug('Connect OK') - return connection - except Exception as e: - msg = ('amqp connection failed to {}:{} as ' - '{} ({})'.format(host, port, username, str(e))) - if fatal: - amulet.raise_status(amulet.FAIL, msg) - else: - self.log.warn(msg) - return None - - def publish_amqp_message_by_unit(self, sentry_unit, message, - queue="test", ssl=False, - username="testuser1", - password="changeme", - port=None): - """Publish an amqp message to a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param message: amqp message string - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: None. Raises exception if publish failed. - """ - self.log.debug('Publishing message to {} queue:\n{}'.format(queue, - message)) - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - - # NOTE(beisner): extra debug here re: pika hang potential: - # https://github.com/pika/pika/issues/297 - # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw - self.log.debug('Defining channel...') - channel = connection.channel() - self.log.debug('Declaring queue...') - channel.queue_declare(queue=queue, auto_delete=False, durable=True) - self.log.debug('Publishing message...') - channel.basic_publish(exchange='', routing_key=queue, body=message) - self.log.debug('Closing channel...') - channel.close() - self.log.debug('Closing connection...') - connection.close() - - def get_amqp_message_by_unit(self, sentry_unit, queue="test", - username="testuser1", - password="changeme", - ssl=False, port=None): - """Get an amqp message from a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: amqp message body as string. Raise if get fails. - """ - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - channel = connection.channel() - method_frame, _, body = channel.basic_get(queue) - - if method_frame: - self.log.debug('Retreived message from {} queue:\n{}'.format(queue, - body)) - channel.basic_ack(method_frame.delivery_tag) - channel.close() - connection.close() - return body - else: - msg = 'No message retrieved.' - amulet.raise_status(amulet.FAIL, msg) - - def validate_memcache(self, sentry_unit, conf, os_release, - earliest_release=5, section='keystone_authtoken', - check_kvs=None): - """Check Memcache is running and is configured to be used - - Example call from Amulet test: - - def test_110_memcache(self): - u.validate_memcache(self.neutron_api_sentry, - '/etc/neutron/neutron.conf', - self._get_openstack_release()) - - :param sentry_unit: sentry unit - :param conf: OpenStack config file to check memcache settings - :param os_release: Current OpenStack release int code - :param earliest_release: Earliest Openstack release to check int code - :param section: OpenStack config file section to check - :param check_kvs: Dict of settings to check in config file - :returns: None - """ - if os_release < earliest_release: - self.log.debug('Skipping memcache checks for deployment. {} <' - 'mitaka'.format(os_release)) - return - _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} - self.log.debug('Checking memcached is running') - ret = self.validate_services_by_name({sentry_unit: ['memcached']}) - if ret: - amulet.raise_status(amulet.FAIL, msg='Memcache running check' - 'failed {}'.format(ret)) - else: - self.log.debug('OK') - self.log.debug('Checking memcache url is configured in {}'.format( - conf)) - if self.validate_config_data(sentry_unit, conf, section, _kvs): - message = "Memcache config error in: {}".format(conf) - amulet.raise_status(amulet.FAIL, msg=message) - else: - self.log.debug('OK') - self.log.debug('Checking memcache configuration in ' - '/etc/memcached.conf') - contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', - fatal=True) - ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if CompareHostReleases(ubuntu_release) <= 'trusty': - memcache_listen_addr = 'ip6-localhost' - else: - memcache_listen_addr = '::1' - expected = { - '-p': '11211', - '-l': memcache_listen_addr} - found = [] - for key, value in expected.items(): - for line in contents.split('\n'): - if line.startswith(key): - self.log.debug('Checking {} is set to {}'.format( - key, - value)) - assert value == line.split()[-1] - self.log.debug(line.split()[-1]) - found.append(key) - if sorted(found) == sorted(expected.keys()): - self.log.debug('OK') - else: - message = "Memcache config error in: /etc/memcached.conf" - amulet.raise_status(amulet.FAIL, msg=message) diff --git a/ceph-osd/tests/charmhelpers/core/__init__.py b/ceph-osd/tests/charmhelpers/core/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-osd/tests/charmhelpers/core/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-osd/tests/charmhelpers/core/decorators.py b/ceph-osd/tests/charmhelpers/core/decorators.py deleted file mode 100644 index 6ad41ee4..00000000 --- a/ceph-osd/tests/charmhelpers/core/decorators.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2014 Canonical Ltd. -# -# Authors: -# Edward Hope-Morley -# - -import time - -from charmhelpers.core.hookenv import ( - log, - INFO, -) - - -def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): - """If the decorated function raises exception exc_type, allow num_retries - retry attempts before raise the exception. - """ - def _retry_on_exception_inner_1(f): - def _retry_on_exception_inner_2(*args, **kwargs): - retries = num_retries - multiplier = 1 - while True: - try: - return f(*args, **kwargs) - except exc_type: - if not retries: - raise - - delay = base_delay * multiplier - multiplier += 1 - log("Retrying '%s' %d more times (delay=%s)" % - (f.__name__, retries, delay), level=INFO) - retries -= 1 - if delay: - time.sleep(delay) - - return _retry_on_exception_inner_2 - - return _retry_on_exception_inner_1 diff --git a/ceph-osd/tests/charmhelpers/core/files.py b/ceph-osd/tests/charmhelpers/core/files.py deleted file mode 100644 index fdd82b75..00000000 --- a/ceph-osd/tests/charmhelpers/core/files.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -__author__ = 'Jorge Niedbalski ' - -import os -import subprocess - - -def sed(filename, before, after, flags='g'): - """ - Search and replaces the given pattern on filename. - - :param filename: relative or absolute file path. - :param before: expression to be replaced (see 'man sed') - :param after: expression to replace with (see 'man sed') - :param flags: sed-compatible regex flags in example, to make - the search and replace case insensitive, specify ``flags="i"``. - The ``g`` flag is always specified regardless, so you do not - need to remember to include it when overriding this parameter. - :returns: If the sed command exit code was zero then return, - otherwise raise CalledProcessError. - """ - expression = r's/{0}/{1}/{2}'.format(before, - after, flags) - - return subprocess.check_call(["sed", "-i", "-r", "-e", - expression, - os.path.expanduser(filename)]) diff --git a/ceph-osd/tests/charmhelpers/core/fstab.py b/ceph-osd/tests/charmhelpers/core/fstab.py deleted file mode 100644 index d9fa9152..00000000 --- a/ceph-osd/tests/charmhelpers/core/fstab.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import os - -__author__ = 'Jorge Niedbalski R. ' - - -class Fstab(io.FileIO): - """This class extends file in order to implement a file reader/writer - for file `/etc/fstab` - """ - - class Entry(object): - """Entry class represents a non-comment line on the `/etc/fstab` file - """ - def __init__(self, device, mountpoint, filesystem, - options, d=0, p=0): - self.device = device - self.mountpoint = mountpoint - self.filesystem = filesystem - - if not options: - options = "defaults" - - self.options = options - self.d = int(d) - self.p = int(p) - - def __eq__(self, o): - return str(self) == str(o) - - def __str__(self): - return "{} {} {} {} {} {}".format(self.device, - self.mountpoint, - self.filesystem, - self.options, - self.d, - self.p) - - DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') - - def __init__(self, path=None): - if path: - self._path = path - else: - self._path = self.DEFAULT_PATH - super(Fstab, self).__init__(self._path, 'rb+') - - def _hydrate_entry(self, line): - # NOTE: use split with no arguments to split on any - # whitespace including tabs - return Fstab.Entry(*filter( - lambda x: x not in ('', None), - line.strip("\n").split())) - - @property - def entries(self): - self.seek(0) - for line in self.readlines(): - line = line.decode('us-ascii') - try: - if line.strip() and not line.strip().startswith("#"): - yield self._hydrate_entry(line) - except ValueError: - pass - - def get_entry_by_attr(self, attr, value): - for entry in self.entries: - e_attr = getattr(entry, attr) - if e_attr == value: - return entry - return None - - def add_entry(self, entry): - if self.get_entry_by_attr('device', entry.device): - return False - - self.write((str(entry) + '\n').encode('us-ascii')) - self.truncate() - return entry - - def remove_entry(self, entry): - self.seek(0) - - lines = [l.decode('us-ascii') for l in self.readlines()] - - found = False - for index, line in enumerate(lines): - if line.strip() and not line.strip().startswith("#"): - if self._hydrate_entry(line) == entry: - found = True - break - - if not found: - return False - - lines.remove(line) - - self.seek(0) - self.write(''.join(lines).encode('us-ascii')) - self.truncate() - return True - - @classmethod - def remove_by_mountpoint(cls, mountpoint, path=None): - fstab = cls(path=path) - entry = fstab.get_entry_by_attr('mountpoint', mountpoint) - if entry: - return fstab.remove_entry(entry) - return False - - @classmethod - def add(cls, device, mountpoint, filesystem, options=None, path=None): - return cls(path=path).add_entry(Fstab.Entry(device, - mountpoint, filesystem, - options=options)) diff --git a/ceph-osd/tests/charmhelpers/core/hookenv.py b/ceph-osd/tests/charmhelpers/core/hookenv.py deleted file mode 100644 index 68800074..00000000 --- a/ceph-osd/tests/charmhelpers/core/hookenv.py +++ /dev/null @@ -1,1353 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"Interactions with the Juju environment" -# Copyright 2013 Canonical Ltd. -# -# Authors: -# Charm Helpers Developers - -from __future__ import print_function -import copy -from distutils.version import LooseVersion -from functools import wraps -from collections import namedtuple -import glob -import os -import json -import yaml -import re -import subprocess -import sys -import errno -import tempfile -from subprocess import CalledProcessError - -import six -if not six.PY3: - from UserDict import UserDict -else: - from collections import UserDict - - -CRITICAL = "CRITICAL" -ERROR = "ERROR" -WARNING = "WARNING" -INFO = "INFO" -DEBUG = "DEBUG" -TRACE = "TRACE" -MARKER = object() - -cache = {} - - -def cached(func): - """Cache return values for multiple executions of func + args - - For example:: - - @cached - def unit_get(attribute): - pass - - unit_get('test') - - will cache the result of unit_get + 'test' for future calls. - """ - @wraps(func) - def wrapper(*args, **kwargs): - global cache - key = json.dumps((func, args, kwargs), sort_keys=True, default=str) - try: - return cache[key] - except KeyError: - pass # Drop out of the exception handler scope. - res = func(*args, **kwargs) - cache[key] = res - return res - wrapper._wrapped = func - return wrapper - - -def flush(key): - """Flushes any entries from function cache where the - key is found in the function+args """ - flush_list = [] - for item in cache: - if key in item: - flush_list.append(item) - for item in flush_list: - del cache[item] - - -def log(message, level=None): - """Write a message to the juju log""" - command = ['juju-log'] - if level: - command += ['-l', level] - if not isinstance(message, six.string_types): - message = repr(message) - command += [message] - # Missing juju-log should not cause failures in unit tests - # Send log output to stderr - try: - subprocess.call(command) - except OSError as e: - if e.errno == errno.ENOENT: - if level: - message = "{}: {}".format(level, message) - message = "juju-log: {}".format(message) - print(message, file=sys.stderr) - else: - raise - - -class Serializable(UserDict): - """Wrapper, an object that can be serialized to yaml or json""" - - def __init__(self, obj): - # wrap the object - UserDict.__init__(self) - self.data = obj - - def __getattr__(self, attr): - # See if this object has attribute. - if attr in ("json", "yaml", "data"): - return self.__dict__[attr] - # Check for attribute in wrapped object. - got = getattr(self.data, attr, MARKER) - if got is not MARKER: - return got - # Proxy to the wrapped object via dict interface. - try: - return self.data[attr] - except KeyError: - raise AttributeError(attr) - - def __getstate__(self): - # Pickle as a standard dictionary. - return self.data - - def __setstate__(self, state): - # Unpickle into our wrapper. - self.data = state - - def json(self): - """Serialize the object to json""" - return json.dumps(self.data) - - def yaml(self): - """Serialize the object to yaml""" - return yaml.dump(self.data) - - -def execution_environment(): - """A convenient bundling of the current execution context""" - context = {} - context['conf'] = config() - if relation_id(): - context['reltype'] = relation_type() - context['relid'] = relation_id() - context['rel'] = relation_get() - context['unit'] = local_unit() - context['rels'] = relations() - context['env'] = os.environ - return context - - -def in_relation_hook(): - """Determine whether we're running in a relation hook""" - return 'JUJU_RELATION' in os.environ - - -def relation_type(): - """The scope for the current relation hook""" - return os.environ.get('JUJU_RELATION', None) - - -@cached -def relation_id(relation_name=None, service_or_unit=None): - """The relation ID for the current or a specified relation""" - if not relation_name and not service_or_unit: - return os.environ.get('JUJU_RELATION_ID', None) - elif relation_name and service_or_unit: - service_name = service_or_unit.split('/')[0] - for relid in relation_ids(relation_name): - remote_service = remote_service_name(relid) - if remote_service == service_name: - return relid - else: - raise ValueError('Must specify neither or both of relation_name and service_or_unit') - - -def local_unit(): - """Local unit ID""" - return os.environ['JUJU_UNIT_NAME'] - - -def remote_unit(): - """The remote unit for the current relation hook""" - return os.environ.get('JUJU_REMOTE_UNIT', None) - - -def application_name(): - """ - The name of the deployed application this unit belongs to. - """ - return local_unit().split('/')[0] - - -def service_name(): - """ - .. deprecated:: 0.19.1 - Alias for :func:`application_name`. - """ - return application_name() - - -def model_name(): - """ - Name of the model that this unit is deployed in. - """ - return os.environ['JUJU_MODEL_NAME'] - - -def model_uuid(): - """ - UUID of the model that this unit is deployed in. - """ - return os.environ['JUJU_MODEL_UUID'] - - -def principal_unit(): - """Returns the principal unit of this unit, otherwise None""" - # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT - principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) - # If it's empty, then this unit is the principal - if principal_unit == '': - return os.environ['JUJU_UNIT_NAME'] - elif principal_unit is not None: - return principal_unit - # For Juju 2.1 and below, let's try work out the principle unit by - # the various charms' metadata.yaml. - for reltype in relation_types(): - for rid in relation_ids(reltype): - for unit in related_units(rid): - md = _metadata_unit(unit) - if not md: - continue - subordinate = md.pop('subordinate', None) - if not subordinate: - return unit - return None - - -@cached -def remote_service_name(relid=None): - """The remote service name for a given relation-id (or the current relation)""" - if relid is None: - unit = remote_unit() - else: - units = related_units(relid) - unit = units[0] if units else None - return unit.split('/')[0] if unit else None - - -def hook_name(): - """The name of the currently executing hook""" - return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) - - -class Config(dict): - """A dictionary representation of the charm's config.yaml, with some - extra features: - - - See which values in the dictionary have changed since the previous hook. - - For values that have changed, see what the previous value was. - - Store arbitrary data for use in a later hook. - - NOTE: Do not instantiate this object directly - instead call - ``hookenv.config()``, which will return an instance of :class:`Config`. - - Example usage:: - - >>> # inside a hook - >>> from charmhelpers.core import hookenv - >>> config = hookenv.config() - >>> config['foo'] - 'bar' - >>> # store a new key/value for later use - >>> config['mykey'] = 'myval' - - - >>> # user runs `juju set mycharm foo=baz` - >>> # now we're inside subsequent config-changed hook - >>> config = hookenv.config() - >>> config['foo'] - 'baz' - >>> # test to see if this val has changed since last hook - >>> config.changed('foo') - True - >>> # what was the previous value? - >>> config.previous('foo') - 'bar' - >>> # keys/values that we add are preserved across hooks - >>> config['mykey'] - 'myval' - - """ - CONFIG_FILE_NAME = '.juju-persistent-config' - - def __init__(self, *args, **kw): - super(Config, self).__init__(*args, **kw) - self.implicit_save = True - self._prev_dict = None - self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path) and os.stat(self.path).st_size: - self.load_previous() - atexit(self._implicit_save) - - def load_previous(self, path=None): - """Load previous copy of config from disk. - - In normal usage you don't need to call this method directly - it - is called automatically at object initialization. - - :param path: - - File path from which to load the previous config. If `None`, - config is loaded from the default location. If `path` is - specified, subsequent `save()` calls will write to the same - path. - - """ - self.path = path or self.path - with open(self.path) as f: - try: - self._prev_dict = json.load(f) - except ValueError as e: - log('Unable to parse previous config data - {}'.format(str(e)), - level=ERROR) - for k, v in copy.deepcopy(self._prev_dict).items(): - if k not in self: - self[k] = v - - def changed(self, key): - """Return True if the current value for this key is different from - the previous value. - - """ - if self._prev_dict is None: - return True - return self.previous(key) != self.get(key) - - def previous(self, key): - """Return previous value for this key, or None if there - is no previous value. - - """ - if self._prev_dict: - return self._prev_dict.get(key) - return None - - def save(self): - """Save this config to disk. - - If the charm is using the :mod:`Services Framework ` - or :meth:'@hook ' decorator, this - is called automatically at the end of successful hook execution. - Otherwise, it should be called directly by user code. - - To disable automatic saves, set ``implicit_save=False`` on this - instance. - - """ - with open(self.path, 'w') as f: - os.fchmod(f.fileno(), 0o600) - json.dump(self, f) - - def _implicit_save(self): - if self.implicit_save: - self.save() - - -_cache_config = None - - -def config(scope=None): - """ - Get the juju charm configuration (scope==None) or individual key, - (scope=str). The returned value is a Python data structure loaded as - JSON from the Juju config command. - - :param scope: If set, return the value for the specified key. - :type scope: Optional[str] - :returns: Either the whole config as a Config, or a key from it. - :rtype: Any - """ - global _cache_config - config_cmd_line = ['config-get', '--all', '--format=json'] - try: - # JSON Decode Exception for Python3.5+ - exc_json = json.decoder.JSONDecodeError - except AttributeError: - # JSON Decode Exception for Python2.7 through Python3.4 - exc_json = ValueError - try: - if _cache_config is None: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) - _cache_config = Config(config_data) - if scope is not None: - return _cache_config.get(scope) - return _cache_config - except (exc_json, UnicodeDecodeError) as e: - log('Unable to parse output from config-get: config_cmd_line="{}" ' - 'message="{}"' - .format(config_cmd_line, str(e)), level=ERROR) - return None - - -@cached -def relation_get(attribute=None, unit=None, rid=None): - """Get relation information""" - _args = ['relation-get', '--format=json'] - if rid: - _args.append('-r') - _args.append(rid) - _args.append(attribute or '-') - if unit: - _args.append(unit) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except CalledProcessError as e: - if e.returncode == 2: - return None - raise - - -def relation_set(relation_id=None, relation_settings=None, **kwargs): - """Set relation information for the current unit""" - relation_settings = relation_settings if relation_settings else {} - relation_cmd_line = ['relation-set'] - accepts_file = "--file" in subprocess.check_output( - relation_cmd_line + ["--help"], universal_newlines=True) - if relation_id is not None: - relation_cmd_line.extend(('-r', relation_id)) - settings = relation_settings.copy() - settings.update(kwargs) - for key, value in settings.items(): - # Force value to be a string: it always should, but some call - # sites pass in things like dicts or numbers. - if value is not None: - settings[key] = "{}".format(value) - if accepts_file: - # --file was introduced in Juju 1.23.2. Use it by default if - # available, since otherwise we'll break if the relation data is - # too big. Ideally we should tell relation-set to read the data from - # stdin, but that feature is broken in 1.23.2: Bug #1454678. - with tempfile.NamedTemporaryFile(delete=False) as settings_file: - settings_file.write(yaml.safe_dump(settings).encode("utf-8")) - subprocess.check_call( - relation_cmd_line + ["--file", settings_file.name]) - os.remove(settings_file.name) - else: - for key, value in settings.items(): - if value is None: - relation_cmd_line.append('{}='.format(key)) - else: - relation_cmd_line.append('{}={}'.format(key, value)) - subprocess.check_call(relation_cmd_line) - # Flush cache of any relation-gets for local unit - flush(local_unit()) - - -def relation_clear(r_id=None): - ''' Clears any relation data already set on relation r_id ''' - settings = relation_get(rid=r_id, - unit=local_unit()) - for setting in settings: - if setting not in ['public-address', 'private-address']: - settings[setting] = None - relation_set(relation_id=r_id, - **settings) - - -@cached -def relation_ids(reltype=None): - """A list of relation_ids""" - reltype = reltype or relation_type() - relid_cmd_line = ['relation-ids', '--format=json'] - if reltype is not None: - relid_cmd_line.append(reltype) - return json.loads( - subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] - return [] - - -@cached -def related_units(relid=None): - """A list of related units""" - relid = relid or relation_id() - units_cmd_line = ['relation-list', '--format=json'] - if relid is not None: - units_cmd_line.extend(('-r', relid)) - return json.loads( - subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] - - -@cached -def relation_for_unit(unit=None, rid=None): - """Get the json represenation of a unit's relation""" - unit = unit or remote_unit() - relation = relation_get(unit=unit, rid=rid) - for key in relation: - if key.endswith('-list'): - relation[key] = relation[key].split() - relation['__unit__'] = unit - return relation - - -@cached -def relations_for_id(relid=None): - """Get relations of a specific relation ID""" - relation_data = [] - relid = relid or relation_ids() - for unit in related_units(relid): - unit_data = relation_for_unit(unit, relid) - unit_data['__relid__'] = relid - relation_data.append(unit_data) - return relation_data - - -@cached -def relations_of_type(reltype=None): - """Get relations of a specific type""" - relation_data = [] - reltype = reltype or relation_type() - for relid in relation_ids(reltype): - for relation in relations_for_id(relid): - relation['__relid__'] = relid - relation_data.append(relation) - return relation_data - - -@cached -def metadata(): - """Get the current charm metadata.yaml contents as a python object""" - with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: - return yaml.safe_load(md) - - -def _metadata_unit(unit): - """Given the name of a unit (e.g. apache2/0), get the unit charm's - metadata.yaml. Very similar to metadata() but allows us to inspect - other units. Unit needs to be co-located, such as a subordinate or - principal/primary. - - :returns: metadata.yaml as a python object. - - """ - basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) - unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') - if not os.path.exists(joineddir): - return None - with open(joineddir) as md: - return yaml.safe_load(md) - - -@cached -def relation_types(): - """Get a list of relation types supported by this charm""" - rel_types = [] - md = metadata() - for key in ('provides', 'requires', 'peers'): - section = md.get(key) - if section: - rel_types.extend(section.keys()) - return rel_types - - -@cached -def peer_relation_id(): - '''Get the peers relation id if a peers relation has been joined, else None.''' - md = metadata() - section = md.get('peers') - if section: - for key in section: - relids = relation_ids(key) - if relids: - return relids[0] - return None - - -@cached -def relation_to_interface(relation_name): - """ - Given the name of a relation, return the interface that relation uses. - - :returns: The interface name, or ``None``. - """ - return relation_to_role_and_interface(relation_name)[1] - - -@cached -def relation_to_role_and_interface(relation_name): - """ - Given the name of a relation, return the role and the name of the interface - that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). - - :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. - """ - _metadata = metadata() - for role in ('provides', 'requires', 'peers'): - interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') - if interface: - return role, interface - return None, None - - -@cached -def role_and_interface_to_relations(role, interface_name): - """ - Given a role and interface name, return a list of relation names for the - current charm that use that interface under that role (where role is one - of ``provides``, ``requires``, or ``peers``). - - :returns: A list of relation names. - """ - _metadata = metadata() - results = [] - for relation_name, relation in _metadata.get(role, {}).items(): - if relation['interface'] == interface_name: - results.append(relation_name) - return results - - -@cached -def interface_to_relations(interface_name): - """ - Given an interface, return a list of relation names for the current - charm that use that interface. - - :returns: A list of relation names. - """ - results = [] - for role in ('provides', 'requires', 'peers'): - results.extend(role_and_interface_to_relations(role, interface_name)) - return results - - -@cached -def charm_name(): - """Get the name of the current charm as is specified on metadata.yaml""" - return metadata().get('name') - - -@cached -def relations(): - """Get a nested dictionary of relation data for all related units""" - rels = {} - for reltype in relation_types(): - relids = {} - for relid in relation_ids(reltype): - units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} - for unit in related_units(relid): - reldata = relation_get(unit=unit, rid=relid) - units[unit] = reldata - relids[relid] = units - rels[reltype] = relids - return rels - - -@cached -def is_relation_made(relation, keys='private-address'): - ''' - Determine whether a relation is established by checking for - presence of key(s). If a list of keys is provided, they - must all be present for the relation to be identified as made - ''' - if isinstance(keys, str): - keys = [keys] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - context = {} - for k in keys: - context[k] = relation_get(k, rid=r_id, - unit=unit) - if None not in context.values(): - return True - return False - - -def _port_op(op_name, port, protocol="TCP"): - """Open or close a service network port""" - _args = [op_name] - icmp = protocol.upper() == "ICMP" - if icmp: - _args.append(protocol) - else: - _args.append('{}/{}'.format(port, protocol)) - try: - subprocess.check_call(_args) - except subprocess.CalledProcessError: - # Older Juju pre 2.3 doesn't support ICMP - # so treat it as a no-op if it fails. - if not icmp: - raise - - -def open_port(port, protocol="TCP"): - """Open a service network port""" - _port_op('open-port', port, protocol) - - -def close_port(port, protocol="TCP"): - """Close a service network port""" - _port_op('close-port', port, protocol) - - -def open_ports(start, end, protocol="TCP"): - """Opens a range of service network ports""" - _args = ['open-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def close_ports(start, end, protocol="TCP"): - """Close a range of service network ports""" - _args = ['close-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def opened_ports(): - """Get the opened ports - - *Note that this will only show ports opened in a previous hook* - - :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` - """ - _args = ['opened-ports', '--format=json'] - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - - -@cached -def unit_get(attribute): - """Get the unit ID for the remote unit""" - _args = ['unit-get', '--format=json', attribute] - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -def unit_public_ip(): - """Get this unit's public IP address""" - return unit_get('public-address') - - -def unit_private_ip(): - """Get this unit's private IP address""" - return unit_get('private-address') - - -@cached -def storage_get(attribute=None, storage_id=None): - """Get storage attributes""" - _args = ['storage-get', '--format=json'] - if storage_id: - _args.extend(('-s', storage_id)) - if attribute: - _args.append(attribute) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -@cached -def storage_list(storage_name=None): - """List the storage IDs for the unit""" - _args = ['storage-list', '--format=json'] - if storage_name: - _args.append(storage_name) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except OSError as e: - import errno - if e.errno == errno.ENOENT: - # storage-list does not exist - return [] - raise - - -class UnregisteredHookError(Exception): - """Raised when an undefined hook is called""" - pass - - -class Hooks(object): - """A convenient handler for hook functions. - - Example:: - - hooks = Hooks() - - # register a hook, taking its name from the function name - @hooks.hook() - def install(): - pass # your code here - - # register a hook, providing a custom hook name - @hooks.hook("config-changed") - def config_changed(): - pass # your code here - - if __name__ == "__main__": - # execute a hook based on the name the program is called by - hooks.execute(sys.argv) - """ - - def __init__(self, config_save=None): - super(Hooks, self).__init__() - self._hooks = {} - - # For unknown reasons, we allow the Hooks constructor to override - # config().implicit_save. - if config_save is not None: - config().implicit_save = config_save - - def register(self, name, function): - """Register a hook""" - self._hooks[name] = function - - def execute(self, args): - """Execute a registered hook based on args[0]""" - _run_atstart() - hook_name = os.path.basename(args[0]) - if hook_name in self._hooks: - try: - self._hooks[hook_name]() - except SystemExit as x: - if x.code is None or x.code == 0: - _run_atexit() - raise - _run_atexit() - else: - raise UnregisteredHookError(hook_name) - - def hook(self, *hook_names): - """Decorator, registering them as hooks""" - def wrapper(decorated): - for hook_name in hook_names: - self.register(hook_name, decorated) - else: - self.register(decorated.__name__, decorated) - if '_' in decorated.__name__: - self.register( - decorated.__name__.replace('_', '-'), decorated) - return decorated - return wrapper - - -class NoNetworkBinding(Exception): - pass - - -def charm_dir(): - """Return the root directory of the current charm""" - d = os.environ.get('JUJU_CHARM_DIR') - if d is not None: - return d - return os.environ.get('CHARM_DIR') - - -@cached -def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" - cmd = ['action-get'] - if key is not None: - cmd.append(key) - cmd.append('--format=json') - action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) - return action_data - - -def action_set(values): - """Sets the values to be returned after the action finishes""" - cmd = ['action-set'] - for k, v in list(values.items()): - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -def action_fail(message): - """Sets the action status to failed and sets the error message. - - The results set by action_set are preserved.""" - subprocess.check_call(['action-fail', message]) - - -def action_name(): - """Get the name of the currently executing action.""" - return os.environ.get('JUJU_ACTION_NAME') - - -def action_uuid(): - """Get the UUID of the currently executing action.""" - return os.environ.get('JUJU_ACTION_UUID') - - -def action_tag(): - """Get the tag for the currently executing action.""" - return os.environ.get('JUJU_ACTION_TAG') - - -def status_set(workload_state, message): - """Set the workload state with a message - - Use status-set to set the workload state with a message which is visible - to the user via juju status. If the status-set command is not found then - assume this is juju < 1.23 and juju-log the message unstead. - - workload_state -- valid juju workload state. - message -- status update message - """ - valid_states = ['maintenance', 'blocked', 'waiting', 'active'] - if workload_state not in valid_states: - raise ValueError( - '{!r} is not a valid workload state'.format(workload_state) - ) - cmd = ['status-set', workload_state, message] - try: - ret = subprocess.call(cmd) - if ret == 0: - return - except OSError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'status-set failed: {} {}'.format(workload_state, - message) - log(log_message, level='INFO') - - -def status_get(): - """Retrieve the previously set juju workload state and message - - If the status-get command is not found then assume this is juju < 1.23 and - return 'unknown', "" - - """ - cmd = ['status-get', "--format=json", "--include-data"] - try: - raw_status = subprocess.check_output(cmd) - except OSError as e: - if e.errno == errno.ENOENT: - return ('unknown', "") - else: - raise - else: - status = json.loads(raw_status.decode("UTF-8")) - return (status["status"], status["message"]) - - -def translate_exc(from_exc, to_exc): - def inner_translate_exc1(f): - @wraps(f) - def inner_translate_exc2(*args, **kwargs): - try: - return f(*args, **kwargs) - except from_exc: - raise to_exc - - return inner_translate_exc2 - - return inner_translate_exc1 - - -def application_version_set(version): - """Charm authors may trigger this command from any hook to output what - version of the application is running. This could be a package version, - for instance postgres version 9.5. It could also be a build number or - version control revision identifier, for instance git sha 6fb7ba68. """ - - cmd = ['application-version-set'] - cmd.append(version) - try: - subprocess.check_call(cmd) - except OSError: - log("Application Version: {}".format(version)) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def goal_state(): - """Juju goal state values""" - cmd = ['goal-state', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def is_leader(): - """Does the current unit hold the juju leadership - - Uses juju to determine whether the current unit is the leader of its peers - """ - cmd = ['is-leader', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_get(attribute=None): - """Juju leader get value(s)""" - cmd = ['leader-get', '--format=json'] + [attribute or '-'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_set(settings=None, **kwargs): - """Juju leader set value(s)""" - # Don't log secrets. - # log("Juju leader-set '%s'" % (settings), level=DEBUG) - cmd = ['leader-set'] - settings = settings or {} - settings.update(kwargs) - for k, v in settings.items(): - if v is None: - cmd.append('{}='.format(k)) - else: - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_register(ptype, klass, pid): - """ is used while a hook is running to let Juju know that a - payload has been started.""" - cmd = ['payload-register'] - for x in [ptype, klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_unregister(klass, pid): - """ is used while a hook is running to let Juju know - that a payload has been manually stopped. The and provided - must match a payload that has been previously registered with juju using - payload-register.""" - cmd = ['payload-unregister'] - for x in [klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_status_set(klass, pid, status): - """is used to update the current status of a registered payload. - The and provided must match a payload that has been previously - registered with juju using payload-register. The must be one of the - follow: starting, started, stopping, stopped""" - cmd = ['payload-status-set'] - for x in [klass, pid, status]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def resource_get(name): - """used to fetch the resource path of the given name. - - must match a name of defined resource in metadata.yaml - - returns either a path or False if resource not available - """ - if not name: - return False - - cmd = ['resource-get', name] - try: - return subprocess.check_output(cmd).decode('UTF-8') - except subprocess.CalledProcessError: - return False - - -@cached -def juju_version(): - """Full version string (eg. '1.23.3.1-trusty-amd64')""" - # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 - jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] - return subprocess.check_output([jujud, 'version'], - universal_newlines=True).strip() - - -def has_juju_version(minimum_version): - """Return True if the Juju version is at least the provided version""" - return LooseVersion(juju_version()) >= LooseVersion(minimum_version) - - -_atexit = [] -_atstart = [] - - -def atstart(callback, *args, **kwargs): - '''Schedule a callback to run before the main hook. - - Callbacks are run in the order they were added. - - This is useful for modules and classes to perform initialization - and inject behavior. In particular: - - - Run common code before all of your hooks, such as logging - the hook name or interesting relation data. - - Defer object or module initialization that requires a hook - context until we know there actually is a hook context, - making testing easier. - - Rather than requiring charm authors to include boilerplate to - invoke your helper's behavior, have it run automatically if - your object is instantiated or module imported. - - This is not at all useful after your hook framework as been launched. - ''' - global _atstart - _atstart.append((callback, args, kwargs)) - - -def atexit(callback, *args, **kwargs): - '''Schedule a callback to run on successful hook completion. - - Callbacks are run in the reverse order that they were added.''' - _atexit.append((callback, args, kwargs)) - - -def _run_atstart(): - '''Hook frameworks must invoke this before running the main hook body.''' - global _atstart - for callback, args, kwargs in _atstart: - callback(*args, **kwargs) - del _atstart[:] - - -def _run_atexit(): - '''Hook frameworks must invoke this after the main hook body has - successfully completed. Do not invoke it if the hook fails.''' - global _atexit - for callback, args, kwargs in reversed(_atexit): - callback(*args, **kwargs) - del _atexit[:] - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def network_get_primary_address(binding): - ''' - Deprecated since Juju 2.3; use network_get() - - Retrieve the primary network address for a named binding - - :param binding: string. The name of a relation of extra-binding - :return: string. The primary IP address for the named binding - :raise: NotImplementedError if run on Juju < 2.0 - ''' - cmd = ['network-get', '--primary-address', binding] - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - if 'no network config found for binding' in e.output.decode('UTF-8'): - raise NoNetworkBinding("No network binding for {}" - .format(binding)) - else: - raise - return response - - -def network_get(endpoint, relation_id=None): - """ - Retrieve the network details for a relation endpoint - - :param endpoint: string. The name of a relation endpoint - :param relation_id: int. The ID of the relation for the current context. - :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if request not supported by the Juju version. - """ - if not has_juju_version('2.2'): - raise NotImplementedError(juju_version()) # earlier versions require --primary-address - if relation_id and not has_juju_version('2.3'): - raise NotImplementedError # 2.3 added the -r option - - cmd = ['network-get', endpoint, '--format', 'yaml'] - if relation_id: - cmd.append('-r') - cmd.append(relation_id) - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - return yaml.safe_load(response) - - -def add_metric(*args, **kwargs): - """Add metric values. Values may be expressed with keyword arguments. For - metric names containing dashes, these may be expressed as one or more - 'key=value' positional arguments. May only be called from the collect-metrics - hook.""" - _args = ['add-metric'] - _kvpairs = [] - _kvpairs.extend(args) - _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) - _args.extend(sorted(_kvpairs)) - try: - subprocess.check_call(_args) - return - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) - log(log_message, level='INFO') - - -def meter_status(): - """Get the meter status, if running in the meter-status-changed hook.""" - return os.environ.get('JUJU_METER_STATUS') - - -def meter_info(): - """Get the meter status information, if running in the meter-status-changed - hook.""" - return os.environ.get('JUJU_METER_INFO') - - -def iter_units_for_relation_name(relation_name): - """Iterate through all units in a relation - - Generator that iterates through all the units in a relation and yields - a named tuple with rid and unit field names. - - Usage: - data = [(u.rid, u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param relation_name: string relation name - :yield: Named Tuple with rid and unit field names - """ - RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') - for rid in relation_ids(relation_name): - for unit in related_units(rid): - yield RelatedUnit(rid, unit) - - -def ingress_address(rid=None, unit=None): - """ - Retrieve the ingress-address from a relation when available. - Otherwise, return the private-address. - - When used on the consuming side of the relation (unit is a remote - unit), the ingress-address is the IP address that this unit needs - to use to reach the provided service on the remote unit. - - When used on the providing side of the relation (unit == local_unit()), - the ingress-address is the IP address that is advertised to remote - units on this relation. Remote units need to use this address to - reach the local provided service on this unit. - - Note that charms may document some other method to use in - preference to the ingress_address(), such as an address provided - on a different relation attribute or a service discovery mechanism. - This allows charms to redirect inbound connections to their peers - or different applications such as load balancers. - - Usage: - addresses = [ingress_address(rid=u.rid, unit=u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param rid: string relation id - :param unit: string unit name - :side effect: calls relation_get - :return: string IP address - """ - settings = relation_get(rid=rid, unit=unit) - return (settings.get('ingress-address') or - settings.get('private-address')) - - -def egress_subnets(rid=None, unit=None): - """ - Retrieve the egress-subnets from a relation. - - This function is to be used on the providing side of the - relation, and provides the ranges of addresses that client - connections may come from. The result is uninteresting on - the consuming side of a relation (unit == local_unit()). - - Returns a stable list of subnets in CIDR format. - eg. ['192.168.1.0/24', '2001::F00F/128'] - - If egress-subnets is not available, falls back to using the published - ingress-address, or finally private-address. - - :param rid: string relation id - :param unit: string unit name - :side effect: calls relation_get - :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] - """ - def _to_range(addr): - if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: - addr += '/32' - elif ':' in addr and '/' not in addr: # IPv6 - addr += '/128' - return addr - - settings = relation_get(rid=rid, unit=unit) - if 'egress-subnets' in settings: - return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] - if 'ingress-address' in settings: - return [_to_range(settings['ingress-address'])] - if 'private-address' in settings: - return [_to_range(settings['private-address'])] - return [] # Should never happen - - -def unit_doomed(unit=None): - """Determines if the unit is being removed from the model - - Requires Juju 2.4.1. - - :param unit: string unit name, defaults to local_unit - :side effect: calls goal_state - :side effect: calls local_unit - :side effect: calls has_juju_version - :return: True if the unit is being removed, already gone, or never existed - """ - if not has_juju_version("2.4.1"): - # We cannot risk blindly returning False for 'we don't know', - # because that could cause data loss; if call sites don't - # need an accurate answer, they likely don't need this helper - # at all. - # goal-state existed in 2.4.0, but did not handle removals - # correctly until 2.4.1. - raise NotImplementedError("is_doomed") - if unit is None: - unit = local_unit() - gs = goal_state() - units = gs.get('units', {}) - if unit not in units: - return True - # I don't think 'dead' units ever show up in the goal-state, but - # check anyway in addition to 'dying'. - return units[unit]['status'] in ('dying', 'dead') diff --git a/ceph-osd/tests/charmhelpers/core/host.py b/ceph-osd/tests/charmhelpers/core/host.py deleted file mode 100644 index e9fd38a0..00000000 --- a/ceph-osd/tests/charmhelpers/core/host.py +++ /dev/null @@ -1,1042 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tools for working with the host system""" -# Copyright 2012 Canonical Ltd. -# -# Authors: -# Nick Moffitt -# Matthew Wedgwood - -import os -import re -import pwd -import glob -import grp -import random -import string -import subprocess -import hashlib -import functools -import itertools -import six - -from contextlib import contextmanager -from collections import OrderedDict -from .hookenv import log, DEBUG, local_unit -from .fstab import Fstab -from charmhelpers.osplatform import get_platform - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.host_factory.ubuntu import ( - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.host_factory.centos import ( - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - ) # flake8: noqa -- ignore F401 for this import - -UPDATEDB_PATH = '/etc/updatedb.conf' - -def service_start(service_name, **kwargs): - """Start a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('start', service_name, **kwargs) - - -def service_stop(service_name, **kwargs): - """Stop a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('stop', service_name, **kwargs) - - -def service_restart(service_name, **kwargs): - """Restart a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be restarted. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_restart('ceph-osd', id=4) - - :param service_name: the name of the service to restart - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - return service('restart', service_name) - - -def service_reload(service_name, restart_on_failure=False, **kwargs): - """Reload a system service, optionally falling back to restart if - reload fails. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_reload('ceph-osd', id=4) - - :param service_name: the name of the service to reload - :param restart_on_failure: boolean indicating whether to fallback to a - restart if the reload fails. - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - service_result = service('reload', service_name, **kwargs) - if not service_result and restart_on_failure: - service_result = service('restart', service_name, **kwargs) - return service_result - - -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", - **kwargs): - """Pause a system service. - - Stop it, and prevent it from starting again at boot. - - :param service_name: the name of the service to pause - :param init_dir: path to the upstart init directory - :param initd_dir: path to the sysv init directory - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems which do not support - key=value arguments via the commandline. - """ - stopped = True - if service_running(service_name, **kwargs): - stopped = service_stop(service_name, **kwargs) - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): - service('disable', service_name) - service('mask', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "disable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - return stopped - - -def service_resume(service_name, init_dir="/etc/init", - initd_dir="/etc/init.d", **kwargs): - """Resume a system service. - - Reenable starting again at boot. Start the service. - - :param service_name: the name of the service to resume - :param init_dir: the path to the init dir - :param initd dir: the path to the initd dir - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): - service('unmask', service_name) - service('enable', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "enable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - started = service_running(service_name, **kwargs) - - if not started: - started = service_start(service_name, **kwargs) - return started - - -def service(action, service_name, **kwargs): - """Control a system service. - - :param action: the action to take on the service - :param service_name: the name of the service to perform th action on - :param **kwargs: additional params to be passed to the service command in - the form of key=value. - """ - if init_is_systemd(): - cmd = ['systemctl', action, service_name] - else: - cmd = ['service', service_name, action] - for key, value in six.iteritems(kwargs): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - return subprocess.call(cmd) == 0 - - -_UPSTART_CONF = "/etc/init/{}.conf" -_INIT_D_CONF = "/etc/init.d/{}" - - -def service_running(service_name, **kwargs): - """Determine whether a system service is running. - - :param service_name: the name of the service - :param **kwargs: additional args to pass to the service command. This is - used to pass additional key=value arguments to the - service command line for managing specific instance - units (e.g. service ceph-osd status id=2). The kwargs - are ignored in systemd services. - """ - if init_is_systemd(): - return service('is-active', service_name) - else: - if os.path.exists(_UPSTART_CONF.format(service_name)): - try: - cmd = ['status', service_name] - for key, value in six.iteritems(kwargs): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False - else: - # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running - # 'start/running' - if ("start/running" in output or - "is running" in output or - "up and running" in output): - return True - elif os.path.exists(_INIT_D_CONF.format(service_name)): - # Check System V scripts init script return codes - return service('status', service_name) - return False - - -SYSTEMD_SYSTEM = '/run/systemd/system' - - -def init_is_systemd(): - """Return True if the host system uses systemd, False otherwise.""" - if lsb_release()['DISTRIB_CODENAME'] == 'trusty': - return False - return os.path.isdir(SYSTEMD_SYSTEM) - - -def adduser(username, password=None, shell='/bin/bash', - system_user=False, primary_group=None, - secondary_groups=None, uid=None, home_dir=None): - """Add a user to the system. - - Will log but otherwise succeed if the user already exists. - - :param str username: Username to create - :param str password: Password for user; if ``None``, create a system user - :param str shell: The default shell for the user - :param bool system_user: Whether to create a login or system user - :param str primary_group: Primary group for user; defaults to username - :param list secondary_groups: Optional list of additional groups - :param int uid: UID for user being created - :param str home_dir: Home directory for user - - :returns: The password database entry struct, as returned by `pwd.getpwnam` - """ - try: - user_info = pwd.getpwnam(username) - log('user {0} already exists!'.format(username)) - if uid: - user_info = pwd.getpwuid(int(uid)) - log('user with uid {0} already exists!'.format(uid)) - except KeyError: - log('creating user {0}'.format(username)) - cmd = ['useradd'] - if uid: - cmd.extend(['--uid', str(uid)]) - if home_dir: - cmd.extend(['--home', str(home_dir)]) - if system_user or password is None: - cmd.append('--system') - else: - cmd.extend([ - '--create-home', - '--shell', shell, - '--password', password, - ]) - if not primary_group: - try: - grp.getgrnam(username) - primary_group = username # avoid "group exists" error - except KeyError: - pass - if primary_group: - cmd.extend(['-g', primary_group]) - if secondary_groups: - cmd.extend(['-G', ','.join(secondary_groups)]) - cmd.append(username) - subprocess.check_call(cmd) - user_info = pwd.getpwnam(username) - return user_info - - -def user_exists(username): - """Check if a user exists""" - try: - pwd.getpwnam(username) - user_exists = True - except KeyError: - user_exists = False - return user_exists - - -def uid_exists(uid): - """Check if a uid exists""" - try: - pwd.getpwuid(uid) - uid_exists = True - except KeyError: - uid_exists = False - return uid_exists - - -def group_exists(groupname): - """Check if a group exists""" - try: - grp.getgrnam(groupname) - group_exists = True - except KeyError: - group_exists = False - return group_exists - - -def gid_exists(gid): - """Check if a gid exists""" - try: - grp.getgrgid(gid) - gid_exists = True - except KeyError: - gid_exists = False - return gid_exists - - -def add_group(group_name, system_group=False, gid=None): - """Add a group to the system - - Will log but otherwise succeed if the group already exists. - - :param str group_name: group to create - :param bool system_group: Create system group - :param int gid: GID for user being created - - :returns: The password database entry struct, as returned by `grp.getgrnam` - """ - try: - group_info = grp.getgrnam(group_name) - log('group {0} already exists!'.format(group_name)) - if gid: - group_info = grp.getgrgid(gid) - log('group with gid {0} already exists!'.format(gid)) - except KeyError: - log('creating group {0}'.format(group_name)) - add_new_group(group_name, system_group, gid) - group_info = grp.getgrnam(group_name) - return group_info - - -def add_user_to_group(username, group): - """Add a user to a group""" - cmd = ['gpasswd', '-a', username, group] - log("Adding user {} to group {}".format(username, group)) - subprocess.check_call(cmd) - - -def chage(username, lastday=None, expiredate=None, inactive=None, - mindays=None, maxdays=None, root=None, warndays=None): - """Change user password expiry information - - :param str username: User to update - :param str lastday: Set when password was changed in YYYY-MM-DD format - :param str expiredate: Set when user's account will no longer be - accessible in YYYY-MM-DD format. - -1 will remove an account expiration date. - :param str inactive: Set the number of days of inactivity after a password - has expired before the account is locked. - -1 will remove an account's inactivity. - :param str mindays: Set the minimum number of days between password - changes to MIN_DAYS. - 0 indicates the password can be changed anytime. - :param str maxdays: Set the maximum number of days during which a - password is valid. - -1 as MAX_DAYS will remove checking maxdays - :param str root: Apply changes in the CHROOT_DIR directory - :param str warndays: Set the number of days of warning before a password - change is required - :raises subprocess.CalledProcessError: if call to chage fails - """ - cmd = ['chage'] - if root: - cmd.extend(['--root', root]) - if lastday: - cmd.extend(['--lastday', lastday]) - if expiredate: - cmd.extend(['--expiredate', expiredate]) - if inactive: - cmd.extend(['--inactive', inactive]) - if mindays: - cmd.extend(['--mindays', mindays]) - if maxdays: - cmd.extend(['--maxdays', maxdays]) - if warndays: - cmd.extend(['--warndays', warndays]) - cmd.append(username) - subprocess.check_call(cmd) - -remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') - -def rsync(from_path, to_path, flags='-r', options=None, timeout=None): - """Replicate the contents of a path""" - options = options or ['--delete', '--executability'] - cmd = ['/usr/bin/rsync', flags] - if timeout: - cmd = ['timeout', str(timeout)] + cmd - cmd.extend(options) - cmd.append(from_path) - cmd.append(to_path) - log(" ".join(cmd)) - return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() - - -def symlink(source, destination): - """Create a symbolic link""" - log("Symlinking {} as {}".format(source, destination)) - cmd = [ - 'ln', - '-sf', - source, - destination, - ] - subprocess.check_call(cmd) - - -def mkdir(path, owner='root', group='root', perms=0o555, force=False): - """Create a directory""" - log("Making dir {} {}:{} {:o}".format(path, owner, group, - perms)) - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - realpath = os.path.abspath(path) - path_exists = os.path.exists(realpath) - if path_exists and force: - if not os.path.isdir(realpath): - log("Removing non-directory file {} prior to mkdir()".format(path)) - os.unlink(realpath) - os.makedirs(realpath, perms) - elif not path_exists: - os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) - os.chmod(realpath, perms) - - -def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a byte string.""" - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - # lets see if we can grab the file and compare the context, to avoid doing - # a write. - existing_content = None - existing_uid, existing_gid = None, None - try: - with open(path, 'rb') as target: - existing_content = target.read() - stat = os.stat(path) - existing_uid, existing_gid = stat.st_uid, stat.st_gid - except: - pass - if content != existing_content: - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), - level=DEBUG) - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - if six.PY3 and isinstance(content, six.string_types): - content = content.encode('UTF-8') - target.write(content) - return - # the contents were the same, but we might still need to change the - # ownership. - if existing_uid != uid: - log("Changing uid on already existing content: {} -> {}" - .format(existing_uid, uid), level=DEBUG) - os.chown(path, uid, -1) - if existing_gid != gid: - log("Changing gid on already existing content: {} -> {}" - .format(existing_gid, gid), level=DEBUG) - os.chown(path, -1, gid) - - -def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab""" - return Fstab.remove_by_mountpoint(mp) - - -def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file""" - return Fstab.add(dev, mp, fs, options=options) - - -def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): - """Mount a filesystem at a particular mountpoint""" - cmd_args = ['mount'] - if options is not None: - cmd_args.extend(['-o', options]) - cmd_args.extend([device, mountpoint]) - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) - return False - - if persist: - return fstab_add(device, mountpoint, filesystem, options=options) - return True - - -def umount(mountpoint, persist=False): - """Unmount a filesystem""" - cmd_args = ['umount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - - if persist: - return fstab_remove(mountpoint) - return True - - -def mounts(): - """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" - with open('/proc/mounts') as f: - # [['/mount/point','/dev/path'],[...]] - system_mounts = [m[1::-1] for m in [l.strip().split() - for l in f.readlines()]] - return system_mounts - - -def fstab_mount(mountpoint): - """Mount filesystem using fstab""" - cmd_args = ['mount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - return True - - -def file_hash(path, hash_type='md5'): - """Generate a hash checksum of the contents of 'path' or None if not found. - - :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - """ - if os.path.exists(path): - h = getattr(hashlib, hash_type)() - with open(path, 'rb') as source: - h.update(source.read()) - return h.hexdigest() - else: - return None - - -def path_hash(path): - """Generate a hash checksum of all files matching 'path'. Standard - wildcards like '*' and '?' are supported, see documentation for the 'glob' - module for more information. - - :return: dict: A { filename: hash } dictionary for all matched files. - Empty if none found. - """ - return { - filename: file_hash(filename) - for filename in glob.iglob(path) - } - - -def check_hash(path, checksum, hash_type='md5'): - """Validate a file using a cryptographic checksum. - - :param str checksum: Value of the checksum used to validate the file. - :param str hash_type: Hash algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - :raises ChecksumError: If the file fails the checksum - - """ - actual_checksum = file_hash(path, hash_type) - if checksum != actual_checksum: - raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) - - -class ChecksumError(ValueError): - """A class derived from Value error to indicate the checksum failed.""" - pass - - -def restart_on_change(restart_map, stopstart=False, restart_functions=None): - """Restart services based on configuration files changing - - This function is used a decorator, for example:: - - @restart_on_change({ - '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] - '/etc/apache/sites-enabled/*': [ 'apache2' ] - }) - def config_changed(): - pass # your code here - - In this example, the cinder-api and cinder-volume services - would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. The apache2 service would be - restarted if any file matching the pattern got changed, created - or removed. Standard wildcards are supported, see documentation - for the 'glob' module for more information. - - @param restart_map: {path_file_name: [service_name, ...] - @param stopstart: DEFAULT false; whether to stop, start OR restart - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result from decorated function - """ - def wrap(f): - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) - return wrapped_f - return wrap - - -def restart_on_change_helper(lambda_f, restart_map, stopstart=False, - restart_functions=None): - """Helper function to perform the restart_on_change function. - - This is provided for decorators to restart services if files described - in the restart_map have changed after an invocation of lambda_f(). - - @param lambda_f: function to call. - @param restart_map: {file: [service, ...]} - @param stopstart: whether to stop, start or restart a service - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result of lambda_f() - """ - if restart_functions is None: - restart_functions = {} - checksums = {path: path_hash(path) for path in restart_map} - r = lambda_f() - # create a list of lists of the services to restart - restarts = [restart_map[path] - for path in restart_map - if path_hash(path) != checksums[path]] - # create a flat list of ordered services without duplicates from lists - services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) - if services_list: - actions = ('stop', 'start') if stopstart else ('restart',) - for service_name in services_list: - if service_name in restart_functions: - restart_functions[service_name](service_name) - else: - for action in actions: - service(action, service_name) - return r - - -def pwgen(length=None): - """Generate a random pasword.""" - if length is None: - # A random length is ok to use a weak PRNG - length = random.choice(range(35, 45)) - alphanumeric_chars = [ - l for l in (string.ascii_letters + string.digits) - if l not in 'l0QD1vAEIOUaeiou'] - # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the - # actual password - random_generator = random.SystemRandom() - random_chars = [ - random_generator.choice(alphanumeric_chars) for _ in range(length)] - return(''.join(random_chars)) - - -def is_phy_iface(interface): - """Returns True if interface is not virtual, otherwise False.""" - if interface: - sys_net = '/sys/class/net' - if os.path.isdir(sys_net): - for iface in glob.glob(os.path.join(sys_net, '*')): - if '/virtual/' in os.path.realpath(iface): - continue - - if interface == os.path.basename(iface): - return True - - return False - - -def get_bond_master(interface): - """Returns bond master if interface is bond slave otherwise None. - - NOTE: the provided interface is expected to be physical - """ - if interface: - iface_path = '/sys/class/net/%s' % (interface) - if os.path.exists(iface_path): - if '/virtual/' in os.path.realpath(iface_path): - return None - - master = os.path.join(iface_path, 'master') - if os.path.exists(master): - master = os.path.realpath(master) - # make sure it is a bond master - if os.path.exists(os.path.join(master, 'bonding')): - return os.path.basename(master) - - return None - - -def list_nics(nic_type=None): - """Return a list of nics of given type(s)""" - if isinstance(nic_type, six.string_types): - int_types = [nic_type] - else: - int_types = nic_type - - interfaces = [] - if nic_type: - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).decode('UTF-8') - ip_output = ip_output.split('\n') - ip_output = (line for line in ip_output if line) - for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + - r'[0-9]+\.[0-9]+)@.*', line) - if matched: - iface = matched.groups()[0] - else: - iface = line.split()[1].replace(":", "") - - if iface not in interfaces: - interfaces.append(iface) - else: - cmd = ['ip', 'a'] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line.strip() for line in ip_output if line) - - key = re.compile('^[0-9]+:\s+(.+):') - for line in ip_output: - matched = re.search(key, line) - if matched: - iface = matched.group(1) - iface = iface.partition("@")[0] - if iface not in interfaces: - interfaces.append(iface) - - return interfaces - - -def set_nic_mtu(nic, mtu): - """Set the Maximum Transmission Unit (MTU) on a network interface.""" - cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] - subprocess.check_call(cmd) - - -def get_nic_mtu(nic): - """Return the Maximum Transmission Unit (MTU) for a network interface.""" - cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - mtu = "" - for line in ip_output: - words = line.split() - if 'mtu' in words: - mtu = words[words.index("mtu") + 1] - return mtu - - -def get_nic_hwaddr(nic): - """Return the Media Access Control (MAC) for a network interface.""" - cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8') - hwaddr = "" - words = ip_output.split() - if 'link/ether' in words: - hwaddr = words[words.index('link/ether') + 1] - return hwaddr - - -@contextmanager -def chdir(directory): - """Change the current working directory to a different directory for a code - block and return the previous directory after the block exits. Useful to - run commands from a specificed directory. - - :param str directory: The directory path to change to for this context. - """ - cur = os.getcwd() - try: - yield os.chdir(directory) - finally: - os.chdir(cur) - - -def chownr(path, owner, group, follow_links=True, chowntopdir=False): - """Recursively change user and group ownership of files and directories - in given path. Doesn't chown path itself by default, only its children. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - :param bool follow_links: Also follow and chown links if True - :param bool chowntopdir: Also chown path itself if True - """ - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - if follow_links: - chown = os.chown - else: - chown = os.lchown - - if chowntopdir: - broken_symlink = os.path.lexists(path) and not os.path.exists(path) - if not broken_symlink: - chown(path, uid, gid) - for root, dirs, files in os.walk(path, followlinks=follow_links): - for name in dirs + files: - full = os.path.join(root, name) - broken_symlink = os.path.lexists(full) and not os.path.exists(full) - if not broken_symlink: - chown(full, uid, gid) - - -def lchownr(path, owner, group): - """Recursively change user and group ownership of files and directories - in a given path, not following symbolic links. See the documentation for - 'os.lchown' for more information. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - """ - chownr(path, owner, group, follow_links=False) - - -def owner(path): - """Returns a tuple containing the username & groupname owning the path. - - :param str path: the string path to retrieve the ownership - :return tuple(str, str): A (username, groupname) tuple containing the - name of the user and group owning the path. - :raises OSError: if the specified path does not exist - """ - stat = os.stat(path) - username = pwd.getpwuid(stat.st_uid)[0] - groupname = grp.getgrgid(stat.st_gid)[0] - return username, groupname - - -def get_total_ram(): - """The total amount of system RAM in bytes. - - This is what is reported by the OS, and may be overcommitted when - there are multiple containers hosted on the same machine. - """ - with open('/proc/meminfo', 'r') as f: - for line in f.readlines(): - if line: - key, value, unit = line.split() - if key == 'MemTotal:': - assert unit == 'kB', 'Unknown unit' - return int(value) * 1024 # Classic, not KiB. - raise NotImplementedError() - - -UPSTART_CONTAINER_TYPE = '/run/container_type' - - -def is_container(): - """Determine whether unit is running in a container - - @return: boolean indicating if unit is in a container - """ - if init_is_systemd(): - # Detect using systemd-detect-virt - return subprocess.call(['systemd-detect-virt', - '--container']) == 0 - else: - # Detect using upstart container file marker - return os.path.exists(UPSTART_CONTAINER_TYPE) - - -def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): - """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. - - This method has no effect if the path specified by updatedb_path does not - exist or is not a file. - - @param path: string the path to add to the updatedb.conf PRUNEPATHS value - @param updatedb_path: the path the updatedb.conf file - """ - if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): - # If the updatedb.conf file doesn't exist then don't attempt to update - # the file as the package providing mlocate may not be installed on - # the local system - return - - with open(updatedb_path, 'r+') as f_id: - updatedb_text = f_id.read() - output = updatedb(updatedb_text, path) - f_id.seek(0) - f_id.write(output) - f_id.truncate() - - -def updatedb(updatedb_text, new_path): - lines = [line for line in updatedb_text.split("\n")] - for i, line in enumerate(lines): - if line.startswith("PRUNEPATHS="): - paths_line = line.split("=")[1].replace('"', '') - paths = paths_line.split(" ") - if new_path not in paths: - paths.append(new_path) - lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) - output = "\n".join(lines) - return output - - -def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): - """ Modulo distribution - - This helper uses the unit number, a modulo value and a constant wait time - to produce a calculated wait time distribution. This is useful in large - scale deployments to distribute load during an expensive operation such as - service restarts. - - If you have 1000 nodes that need to restart 100 at a time 1 minute at a - time: - - time.wait(modulo_distribution(modulo=100, wait=60)) - restart() - - If you need restarts to happen serially set modulo to the exact number of - nodes and set a high constant wait time: - - time.wait(modulo_distribution(modulo=10, wait=120)) - restart() - - @param modulo: int The modulo number creates the group distribution - @param wait: int The constant time wait value - @param non_zero_wait: boolean Override unit % modulo == 0, - return modulo * wait. Used to avoid collisions with - leader nodes which are often given priority. - @return: int Calculated time to wait for unit operation - """ - unit_number = int(local_unit().split('/')[1]) - calculated_wait_time = (unit_number % modulo) * wait - if non_zero_wait and calculated_wait_time == 0: - return modulo * wait - else: - return calculated_wait_time diff --git a/ceph-osd/tests/charmhelpers/core/host_factory/__init__.py b/ceph-osd/tests/charmhelpers/core/host_factory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-osd/tests/charmhelpers/core/host_factory/centos.py b/ceph-osd/tests/charmhelpers/core/host_factory/centos.py deleted file mode 100644 index 7781a396..00000000 --- a/ceph-osd/tests/charmhelpers/core/host_factory/centos.py +++ /dev/null @@ -1,72 +0,0 @@ -import subprocess -import yum -import os - -from charmhelpers.core.strutils import BasicStringComparator - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Host releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - - def __init__(self, item): - raise NotImplementedError( - "CompareHostReleases() is not implemented for CentOS") - - -def service_available(service_name): - # """Determine whether a system service is available.""" - if os.path.isdir('/run/systemd/system'): - cmd = ['systemctl', 'is-enabled', service_name] - else: - cmd = ['service', service_name, 'is-enabled'] - return subprocess.call(cmd) == 0 - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['groupadd'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('-r') - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/os-release in a dict.""" - d = {} - with open('/etc/os-release', 'r') as lsb: - for l in lsb: - s = l.split('=') - if len(s) != 2: - continue - d[s[0].strip()] = s[1].strip() - return d - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports YumBase function if the pkgcache argument - is None. - """ - if not pkgcache: - y = yum.YumBase() - packages = y.doPackageLists() - pkgcache = {i.Name: i.version for i in packages['installed']} - pkg = pkgcache[package] - if pkg > revno: - return 1 - if pkg < revno: - return -1 - return 0 diff --git a/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py deleted file mode 100644 index a6d375af..00000000 --- a/ceph-osd/tests/charmhelpers/core/host_factory/ubuntu.py +++ /dev/null @@ -1,91 +0,0 @@ -import subprocess - -from charmhelpers.core.strutils import BasicStringComparator - - -UBUNTU_RELEASES = ( - 'lucid', - 'maverick', - 'natty', - 'oneiric', - 'precise', - 'quantal', - 'raring', - 'saucy', - 'trusty', - 'utopic', - 'vivid', - 'wily', - 'xenial', - 'yakkety', - 'zesty', - 'artful', - 'bionic', - 'cosmic', -) - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Ubuntu releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - _list = UBUNTU_RELEASES - - -def service_available(service_name): - """Determine whether a system service is available""" - try: - subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError as e: - return b'unrecognized service' not in e.output - else: - return True - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['addgroup'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('--system') - else: - cmd.extend([ - '--group', - ]) - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/lsb-release in a dict""" - d = {} - with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports apt_cache function from charmhelpers.fetch if - the pkgcache argument is None. Be sure to add charmhelpers.fetch if - you call this function, or pass an apt_pkg.Cache() instance. - """ - import apt_pkg - if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-osd/tests/charmhelpers/core/hugepage.py b/ceph-osd/tests/charmhelpers/core/hugepage.py deleted file mode 100644 index 54b5b5e2..00000000 --- a/ceph-osd/tests/charmhelpers/core/hugepage.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml -from charmhelpers.core import fstab -from charmhelpers.core import sysctl -from charmhelpers.core.host import ( - add_group, - add_user_to_group, - fstab_mount, - mkdir, -) -from charmhelpers.core.strutils import bytes_from_string -from subprocess import check_output - - -def hugepage_support(user, group='hugetlb', nr_hugepages=256, - max_map_count=65536, mnt_point='/run/hugepages/kvm', - pagesize='2MB', mount=True, set_shmmax=False): - """Enable hugepages on system. - - Args: - user (str) -- Username to allow access to hugepages to - group (str) -- Group name to own hugepages - nr_hugepages (int) -- Number of pages to reserve - max_map_count (int) -- Number of Virtual Memory Areas a process can own - mnt_point (str) -- Directory to mount hugepages on - pagesize (str) -- Size of hugepages - mount (bool) -- Whether to Mount hugepages - """ - group_info = add_group(group) - gid = group_info.gr_gid - add_user_to_group(user, group) - if max_map_count < 2 * nr_hugepages: - max_map_count = 2 * nr_hugepages - sysctl_settings = { - 'vm.nr_hugepages': nr_hugepages, - 'vm.max_map_count': max_map_count, - 'vm.hugetlb_shm_group': gid, - } - if set_shmmax: - shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) - shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages - if shmmax_minsize > shmmax_current: - sysctl_settings['kernel.shmmax'] = shmmax_minsize - sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') - mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) - lfstab = fstab.Fstab() - fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) - if fstab_entry: - lfstab.remove_entry(fstab_entry) - entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', - 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) - lfstab.add_entry(entry) - if mount: - fstab_mount(mnt_point) diff --git a/ceph-osd/tests/charmhelpers/core/kernel.py b/ceph-osd/tests/charmhelpers/core/kernel.py deleted file mode 100644 index 2d404528..00000000 --- a/ceph-osd/tests/charmhelpers/core/kernel.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import subprocess - -from charmhelpers.osplatform import get_platform -from charmhelpers.core.hookenv import ( - log, - INFO -) - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.kernel_factory.ubuntu import ( - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.kernel_factory.centos import ( - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import - -__author__ = "Jorge Niedbalski " - - -def modprobe(module, persist=True): - """Load a kernel module and configure for auto-load on reboot.""" - cmd = ['modprobe', module] - - log('Loading kernel module %s' % module, level=INFO) - - subprocess.check_call(cmd) - if persist: - persistent_modprobe(module) - - -def rmmod(module, force=False): - """Remove a module from the linux kernel""" - cmd = ['rmmod'] - if force: - cmd.append('-f') - cmd.append(module) - log('Removing kernel module %s' % module, level=INFO) - return subprocess.check_call(cmd) - - -def lsmod(): - """Shows what kernel modules are currently loaded""" - return subprocess.check_output(['lsmod'], - universal_newlines=True) - - -def is_module_loaded(module): - """Checks if a kernel module is already loaded""" - matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) - return len(matches) > 0 diff --git a/ceph-osd/tests/charmhelpers/core/kernel_factory/__init__.py b/ceph-osd/tests/charmhelpers/core/kernel_factory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-osd/tests/charmhelpers/core/kernel_factory/centos.py b/ceph-osd/tests/charmhelpers/core/kernel_factory/centos.py deleted file mode 100644 index 1c402c11..00000000 --- a/ceph-osd/tests/charmhelpers/core/kernel_factory/centos.py +++ /dev/null @@ -1,17 +0,0 @@ -import subprocess -import os - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - if not os.path.exists('/etc/rc.modules'): - open('/etc/rc.modules', 'a') - os.chmod('/etc/rc.modules', 111) - with open('/etc/rc.modules', 'r+') as modules: - if module not in modules.read(): - modules.write('modprobe %s\n' % module) - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["dracut", "-f", version]) diff --git a/ceph-osd/tests/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-osd/tests/charmhelpers/core/kernel_factory/ubuntu.py deleted file mode 100644 index 3de372fd..00000000 --- a/ceph-osd/tests/charmhelpers/core/kernel_factory/ubuntu.py +++ /dev/null @@ -1,13 +0,0 @@ -import subprocess - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module + "\n") - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-osd/tests/charmhelpers/core/services/__init__.py b/ceph-osd/tests/charmhelpers/core/services/__init__.py deleted file mode 100644 index 61fd074e..00000000 --- a/ceph-osd/tests/charmhelpers/core/services/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .base import * # NOQA -from .helpers import * # NOQA diff --git a/ceph-osd/tests/charmhelpers/core/services/base.py b/ceph-osd/tests/charmhelpers/core/services/base.py deleted file mode 100644 index 179ad4f0..00000000 --- a/ceph-osd/tests/charmhelpers/core/services/base.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import json -from inspect import getargspec -from collections import Iterable, OrderedDict - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -__all__ = ['ServiceManager', 'ManagerCallback', - 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', - 'service_restart', 'service_stop'] - - -class ServiceManager(object): - def __init__(self, services=None): - """ - Register a list of services, given their definitions. - - Service definitions are dicts in the following formats (all keys except - 'service' are optional):: - - { - "service": , - "required_data": , - "provided_data": , - "data_ready": , - "data_lost": , - "start": , - "stop": , - "ports": , - } - - The 'required_data' list should contain dicts of required data (or - dependency managers that act like dicts and know how to collect the data). - Only when all items in the 'required_data' list are populated are the list - of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more - information. - - The 'provided_data' list should contain relation data providers, most likely - a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, - that will indicate a set of data to set on a given relation. - - The 'data_ready' value should be either a single callback, or a list of - callbacks, to be called when all items in 'required_data' pass `is_ready()`. - Each callback will be called with the service name as the only parameter. - After all of the 'data_ready' callbacks are called, the 'start' callbacks - are fired. - - The 'data_lost' value should be either a single callback, or a list of - callbacks, to be called when a 'required_data' item no longer passes - `is_ready()`. Each callback will be called with the service name as the - only parameter. After all of the 'data_lost' callbacks are called, - the 'stop' callbacks are fired. - - The 'start' value should be either a single callback, or a list of - callbacks, to be called when starting the service, after the 'data_ready' - callbacks are complete. Each callback will be called with the service - name as the only parameter. This defaults to - `[host.service_start, services.open_ports]`. - - The 'stop' value should be either a single callback, or a list of - callbacks, to be called when stopping the service. If the service is - being stopped because it no longer has all of its 'required_data', this - will be called after all of the 'data_lost' callbacks are complete. - Each callback will be called with the service name as the only parameter. - This defaults to `[services.close_ports, host.service_stop]`. - - The 'ports' value should be a list of ports to manage. The default - 'start' handler will open the ports after the service is started, - and the default 'stop' handler will close the ports prior to stopping - the service. - - - Examples: - - The following registers an Upstart service called bingod that depends on - a mongodb relation and which runs a custom `db_migrate` function prior to - restarting the service, and a Runit service called spadesd:: - - manager = services.ServiceManager([ - { - 'service': 'bingod', - 'ports': [80, 443], - 'required_data': [MongoRelation(), config(), {'my': 'data'}], - 'data_ready': [ - services.template(source='bingod.conf'), - services.template(source='bingod.ini', - target='/etc/bingod.ini', - owner='bingo', perms=0400), - ], - }, - { - 'service': 'spadesd', - 'data_ready': services.template(source='spadesd_run.j2', - target='/etc/sv/spadesd/run', - perms=0555), - 'start': runit_start, - 'stop': runit_stop, - }, - ]) - manager.manage() - """ - self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') - self._ready = None - self.services = OrderedDict() - for service in services or []: - service_name = service['service'] - self.services[service_name] = service - - def manage(self): - """ - Handle the current hook by doing The Right Thing with the registered services. - """ - hookenv._run_atstart() - try: - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.reconfigure_services() - self.provide_data() - except SystemExit as x: - if x.code is None or x.code == 0: - hookenv._run_atexit() - hookenv._run_atexit() - - def provide_data(self): - """ - Set the relation data for each provider in the ``provided_data`` list. - - A provider must have a `name` attribute, which indicates which relation - to set data on, and a `provide_data()` method, which returns a dict of - data to set. - - The `provide_data()` method can optionally accept two parameters: - - * ``remote_service`` The name of the remote service that the data will - be provided to. The `provide_data()` method will be called once - for each connected service (not unit). This allows the method to - tailor its data to the given service. - * ``service_ready`` Whether or not the service definition had all of - its requirements met, and thus the ``data_ready`` callbacks run. - - Note that the ``provided_data`` methods are now called **after** the - ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks - a chance to generate any data necessary for the providing to the remote - services. - """ - for service_name, service in self.services.items(): - service_ready = self.is_ready(service_name) - for provider in service.get('provided_data', []): - for relid in hookenv.relation_ids(provider.name): - units = hookenv.related_units(relid) - if not units: - continue - remote_service = units[0].split('/')[0] - argspec = getargspec(provider.provide_data) - if len(argspec.args) > 1: - data = provider.provide_data(remote_service, service_ready) - else: - data = provider.provide_data() - if data: - hookenv.relation_set(relid, data) - - def reconfigure_services(self, *service_names): - """ - Update all files for one or more registered services, and, - if ready, optionally restart them. - - If no service names are given, reconfigures all registered services. - """ - for service_name in service_names or self.services.keys(): - if self.is_ready(service_name): - self.fire_event('data_ready', service_name) - self.fire_event('start', service_name, default=[ - service_restart, - manage_ports]) - self.save_ready(service_name) - else: - if self.was_ready(service_name): - self.fire_event('data_lost', service_name) - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - self.save_lost(service_name) - - def stop_services(self, *service_names): - """ - Stop one or more registered services, by name. - - If no service names are given, stops all registered services. - """ - for service_name in service_names or self.services.keys(): - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - - def get_service(self, service_name): - """ - Given the name of a registered service, return its service definition. - """ - service = self.services.get(service_name) - if not service: - raise KeyError('Service not registered: %s' % service_name) - return service - - def fire_event(self, event_name, service_name, default=None): - """ - Fire a data_ready, data_lost, start, or stop event on a given service. - """ - service = self.get_service(service_name) - callbacks = service.get(event_name, default) - if not callbacks: - return - if not isinstance(callbacks, Iterable): - callbacks = [callbacks] - for callback in callbacks: - if isinstance(callback, ManagerCallback): - callback(self, service_name, event_name) - else: - callback(service_name) - - def is_ready(self, service_name): - """ - Determine if a registered service is ready, by checking its 'required_data'. - - A 'required_data' item can be any mapping type, and is considered ready - if `bool(item)` evaluates as True. - """ - service = self.get_service(service_name) - reqs = service.get('required_data', []) - return all(bool(req) for req in reqs) - - def _load_ready_file(self): - if self._ready is not None: - return - if os.path.exists(self._ready_file): - with open(self._ready_file) as fp: - self._ready = set(json.load(fp)) - else: - self._ready = set() - - def _save_ready_file(self): - if self._ready is None: - return - with open(self._ready_file, 'w') as fp: - json.dump(list(self._ready), fp) - - def save_ready(self, service_name): - """ - Save an indicator that the given service is now data_ready. - """ - self._load_ready_file() - self._ready.add(service_name) - self._save_ready_file() - - def save_lost(self, service_name): - """ - Save an indicator that the given service is no longer data_ready. - """ - self._load_ready_file() - self._ready.discard(service_name) - self._save_ready_file() - - def was_ready(self, service_name): - """ - Determine if the given service was previously data_ready. - """ - self._load_ready_file() - return service_name in self._ready - - -class ManagerCallback(object): - """ - Special case of a callback that takes the `ServiceManager` instance - in addition to the service name. - - Subclasses should implement `__call__` which should accept three parameters: - - * `manager` The `ServiceManager` instance - * `service_name` The name of the service it's being triggered for - * `event_name` The name of the event that this callback is handling - """ - def __call__(self, manager, service_name, event_name): - raise NotImplementedError() - - -class PortManagerCallback(ManagerCallback): - """ - Callback class that will open or close ports, for use as either - a start or stop action. - """ - def __call__(self, manager, service_name, event_name): - service = manager.get_service(service_name) - # turn this generator into a list, - # as we'll be going over it multiple times - new_ports = list(service.get('ports', [])) - port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) - if os.path.exists(port_file): - with open(port_file) as fp: - old_ports = fp.read().split(',') - for old_port in old_ports: - if bool(old_port) and not self.ports_contains(old_port, new_ports): - hookenv.close_port(old_port) - with open(port_file, 'w') as fp: - fp.write(','.join(str(port) for port in new_ports)) - for port in new_ports: - # A port is either a number or 'ICMP' - protocol = 'TCP' - if str(port).upper() == 'ICMP': - protocol = 'ICMP' - if event_name == 'start': - hookenv.open_port(port, protocol) - elif event_name == 'stop': - hookenv.close_port(port, protocol) - - def ports_contains(self, port, ports): - if not bool(port): - return False - if str(port).upper() != 'ICMP': - port = int(port) - return port in ports - - -def service_stop(service_name): - """ - Wrapper around host.service_stop to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_running(service_name): - host.service_stop(service_name) - - -def service_restart(service_name): - """ - Wrapper around host.service_restart to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_available(service_name): - if host.service_running(service_name): - host.service_restart(service_name) - else: - host.service_start(service_name) - - -# Convenience aliases -open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-osd/tests/charmhelpers/core/services/helpers.py b/ceph-osd/tests/charmhelpers/core/services/helpers.py deleted file mode 100644 index 3e6e30d2..00000000 --- a/ceph-osd/tests/charmhelpers/core/services/helpers.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import yaml - -from charmhelpers.core import hookenv -from charmhelpers.core import host -from charmhelpers.core import templating - -from charmhelpers.core.services.base import ManagerCallback - - -__all__ = ['RelationContext', 'TemplateCallback', - 'render_template', 'template'] - - -class RelationContext(dict): - """ - Base class for a context generator that gets relation data from juju. - - Subclasses must provide the attributes `name`, which is the name of the - interface of interest, `interface`, which is the type of the interface of - interest, and `required_keys`, which is the set of keys required for the - relation to be considered complete. The data for all interfaces matching - the `name` attribute that are complete will used to populate the dictionary - values (see `get_data`, below). - - The generated context will be namespaced under the relation :attr:`name`, - to prevent potential naming conflicts. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = None - interface = None - - def __init__(self, name=None, additional_required_keys=None): - if not hasattr(self, 'required_keys'): - self.required_keys = [] - - if name is not None: - self.name = name - if additional_required_keys: - self.required_keys.extend(additional_required_keys) - self.get_data() - - def __bool__(self): - """ - Returns True if all of the required_keys are available. - """ - return self.is_ready() - - __nonzero__ = __bool__ - - def __repr__(self): - return super(RelationContext, self).__repr__() - - def is_ready(self): - """ - Returns True if all of the `required_keys` are available from any units. - """ - ready = len(self.get(self.name, [])) > 0 - if not ready: - hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) - return ready - - def _is_ready(self, unit_data): - """ - Helper method that tests a set of relation data and returns True if - all of the `required_keys` are present. - """ - return set(unit_data.keys()).issuperset(set(self.required_keys)) - - def get_data(self): - """ - Retrieve the relation data for each unit involved in a relation and, - if complete, store it in a list under `self[self.name]`. This - is automatically called when the RelationContext is instantiated. - - The units are sorted lexographically first by the service ID, then by - the unit ID. Thus, if an interface has two other services, 'db:1' - and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', - and 'db:2' having one unit, 'mediawiki/0', all of which have a complete - set of data, the relation data for the units will be stored in the - order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. - - If you only care about a single unit on the relation, you can just - access it as `{{ interface[0]['key'] }}`. However, if you can at all - support multiple units on a relation, you should iterate over the list, - like:: - - {% for unit in interface -%} - {{ unit['key'] }}{% if not loop.last %},{% endif %} - {%- endfor %} - - Note that since all sets of relation data from all related services and - units are in a single list, if you need to know which service or unit a - set of data came from, you'll need to extend this class to preserve - that information. - """ - if not hookenv.relation_ids(self.name): - return - - ns = self.setdefault(self.name, []) - for rid in sorted(hookenv.relation_ids(self.name)): - for unit in sorted(hookenv.related_units(rid)): - reldata = hookenv.relation_get(rid=rid, unit=unit) - if self._is_ready(reldata): - ns.append(reldata) - - def provide_data(self): - """ - Return data to be relation_set for this interface. - """ - return {} - - -class MysqlRelation(RelationContext): - """ - Relation context for the `mysql` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'db' - interface = 'mysql' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'user', 'password', 'database'] - RelationContext.__init__(self, *args, **kwargs) - - -class HttpRelation(RelationContext): - """ - Relation context for the `http` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'website' - interface = 'http' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'port'] - RelationContext.__init__(self, *args, **kwargs) - - def provide_data(self): - return { - 'host': hookenv.unit_get('private-address'), - 'port': 80, - } - - -class RequiredConfig(dict): - """ - Data context that loads config options with one or more mandatory options. - - Once the required options have been changed from their default values, all - config options will be available, namespaced under `config` to prevent - potential naming conflicts (for example, between a config option and a - relation property). - - :param list *args: List of options that must be changed from their default values. - """ - - def __init__(self, *args): - self.required_options = args - self['config'] = hookenv.config() - with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.load(fp).get('options', {}) - - def __bool__(self): - for option in self.required_options: - if option not in self['config']: - return False - current_value = self['config'][option] - default_value = self.config[option].get('default') - if current_value == default_value: - return False - if current_value in (None, '') and default_value in (None, ''): - return False - return True - - def __nonzero__(self): - return self.__bool__() - - -class StoredContext(dict): - """ - A data context that always returns the data that it was first created with. - - This is useful to do a one-time generation of things like passwords, that - will thereafter use the same value that was originally generated, instead - of generating a new value each time it is run. - """ - def __init__(self, file_name, config_data): - """ - If the file exists, populate `self` with the data from the file. - Otherwise, populate with the given data and persist it to the file. - """ - if os.path.exists(file_name): - self.update(self.read_context(file_name)) - else: - self.store_context(file_name, config_data) - self.update(config_data) - - def store_context(self, file_name, config_data): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0o600) - yaml.dump(config_data, file_stream) - - def read_context(self, file_name): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'r') as file_stream: - data = yaml.load(file_stream) - if not data: - raise OSError("%s is empty" % file_name) - return data - - -class TemplateCallback(ManagerCallback): - """ - Callback class that will render a Jinja2 template, for use as a ready - action. - - :param str source: The template source file, relative to - `$CHARM_DIR/templates` - - :param str target: The target to write the rendered template to (or None) - :param str owner: The owner of the rendered file - :param str group: The group of the rendered file - :param int perms: The permissions of the rendered file - :param partial on_change_action: functools partial to be executed when - rendered file changes - :param jinja2 loader template_loader: A jinja2 template loader - - :return str: The rendered template - """ - def __init__(self, source, target, - owner='root', group='root', perms=0o444, - on_change_action=None, template_loader=None): - self.source = source - self.target = target - self.owner = owner - self.group = group - self.perms = perms - self.on_change_action = on_change_action - self.template_loader = template_loader - - def __call__(self, manager, service_name, event_name): - pre_checksum = '' - if self.on_change_action and os.path.isfile(self.target): - pre_checksum = host.file_hash(self.target) - service = manager.get_service(service_name) - context = {'ctx': {}} - for ctx in service.get('required_data', []): - context.update(ctx) - context['ctx'].update(ctx) - - result = templating.render(self.source, self.target, context, - self.owner, self.group, self.perms, - template_loader=self.template_loader) - if self.on_change_action: - if pre_checksum == host.file_hash(self.target): - hookenv.log( - 'No change detected: {}'.format(self.target), - hookenv.DEBUG) - else: - self.on_change_action() - - return result - - -# Convenience aliases for templates -render_template = template = TemplateCallback diff --git a/ceph-osd/tests/charmhelpers/core/strutils.py b/ceph-osd/tests/charmhelpers/core/strutils.py deleted file mode 100644 index e8df0452..00000000 --- a/ceph-osd/tests/charmhelpers/core/strutils.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six -import re - - -def bool_from_string(value): - """Interpret string value as boolean. - - Returns True if value translates to True otherwise False. - """ - if isinstance(value, six.string_types): - value = six.text_type(value) - else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) - raise ValueError(msg) - - value = value.strip().lower() - - if value in ['y', 'yes', 'true', 't', 'on']: - return True - elif value in ['n', 'no', 'false', 'f', 'off']: - return False - - msg = "Unable to interpret string value '%s' as boolean" % (value) - raise ValueError(msg) - - -def bytes_from_string(value): - """Interpret human readable string value as bytes. - - Returns int - """ - BYTE_POWER = { - 'K': 1, - 'KB': 1, - 'M': 2, - 'MB': 2, - 'G': 3, - 'GB': 3, - 'T': 4, - 'TB': 4, - 'P': 5, - 'PB': 5, - } - if isinstance(value, six.string_types): - value = six.text_type(value) - else: - msg = "Unable to interpret non-string value '%s' as bytes" % (value) - raise ValueError(msg) - matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if matches: - size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) - else: - # Assume that value passed in is bytes - try: - size = int(value) - except ValueError: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return size - - -class BasicStringComparator(object): - """Provides a class that will compare strings from an iterator type object. - Used to provide > and < comparisons on strings that may not necessarily be - alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the - z-wrap. - """ - - _list = None - - def __init__(self, item): - if self._list is None: - raise Exception("Must define the _list in the class definition!") - try: - self.index = self._list.index(item) - except Exception: - raise KeyError("Item '{}' is not in list '{}'" - .format(item, self._list)) - - def __eq__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index == self._list.index(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __lt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index < self._list.index(other) - - def __ge__(self, other): - return not self.__lt__(other) - - def __gt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index > self._list.index(other) - - def __le__(self, other): - return not self.__gt__(other) - - def __str__(self): - """Always give back the item at the index so it can be used in - comparisons like: - - s_mitaka = CompareOpenStack('mitaka') - s_newton = CompareOpenstack('newton') - - assert s_newton > s_mitaka - - @returns: - """ - return self._list[self.index] diff --git a/ceph-osd/tests/charmhelpers/core/sysctl.py b/ceph-osd/tests/charmhelpers/core/sysctl.py deleted file mode 100644 index 1f188d8c..00000000 --- a/ceph-osd/tests/charmhelpers/core/sysctl.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml - -from subprocess import check_call - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - ERROR, -) - -__author__ = 'Jorge Niedbalski R. ' - - -def create(sysctl_dict, sysctl_file): - """Creates a sysctl.conf file from a YAML associative array - - :param sysctl_dict: a dict or YAML-formatted string of sysctl - options eg "{ 'kernel.max_pid': 1337 }" - :type sysctl_dict: str - :param sysctl_file: path to the sysctl file to be saved - :type sysctl_file: str or unicode - :returns: None - """ - if type(sysctl_dict) is not dict: - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return - else: - sysctl_dict_parsed = sysctl_dict - - with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict_parsed.items(): - fd.write("{}={}\n".format(key, value)) - - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), - level=DEBUG) - - check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-osd/tests/charmhelpers/core/templating.py b/ceph-osd/tests/charmhelpers/core/templating.py deleted file mode 100644 index 9014015c..00000000 --- a/ceph-osd/tests/charmhelpers/core/templating.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', - template_loader=None, config_template=None): - """ - Render a template. - - The `source` path, if not absolute, is relative to the `templates_dir`. - - The `target` path should be absolute. It can also be `None`, in which - case no file will be written. - - The context should be a dict containing the values to be replaced in the - template. - - config_template may be provided to render from a provided template instead - of loading from a file. - - The `owner`, `group`, and `perms` options will be passed to `write_file`. - - If omitted, `templates_dir` defaults to the `templates` folder in the charm. - - The rendered template will be written to the file as well as being returned - as a string. - - Note: Using this requires python-jinja2 or python3-jinja2; if it is not - installed, calling this will attempt to use charmhelpers.fetch.apt_install - to install it. - """ - try: - from jinja2 import FileSystemLoader, Environment, exceptions - except ImportError: - try: - from charmhelpers.fetch import apt_install - except ImportError: - hookenv.log('Could not import jinja2, and could not import ' - 'charmhelpers.fetch to install it', - level=hookenv.ERROR) - raise - if sys.version_info.major == 2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) - from jinja2 import FileSystemLoader, Environment, exceptions - - if template_loader: - template_env = Environment(loader=template_loader) - else: - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - template_env = Environment(loader=FileSystemLoader(templates_dir)) - - # load from a string if provided explicitly - if config_template is not None: - template = template_env.from_string(config_template) - else: - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e - content = template.render(context) - if target is not None: - target_dir = os.path.dirname(target) - if not os.path.exists(target_dir): - # This is a terrible default directory permission, as the file - # or its siblings will often contain secrets. - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) - host.write_file(target, content.encode(encoding), owner, group, perms) - return content diff --git a/ceph-osd/tests/charmhelpers/core/unitdata.py b/ceph-osd/tests/charmhelpers/core/unitdata.py deleted file mode 100644 index ab554327..00000000 --- a/ceph-osd/tests/charmhelpers/core/unitdata.py +++ /dev/null @@ -1,525 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Authors: -# Kapil Thangavelu -# -""" -Intro ------ - -A simple way to store state in units. This provides a key value -storage with support for versioned, transactional operation, -and can calculate deltas from previous values to simplify unit logic -when processing changes. - - -Hook Integration ----------------- - -There are several extant frameworks for hook execution, including - - - charmhelpers.core.hookenv.Hooks - - charmhelpers.core.services.ServiceManager - -The storage classes are framework agnostic, one simple integration is -via the HookData contextmanager. It will record the current hook -execution environment (including relation data, config data, etc.), -setup a transaction and allow easy access to the changes from -previously seen values. One consequence of the integration is the -reservation of particular keys ('rels', 'unit', 'env', 'config', -'charm_revisions') for their respective values. - -Here's a fully worked integration example using hookenv.Hooks:: - - from charmhelper.core import hookenv, unitdata - - hook_data = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # Print all changes to configuration from previously seen - # values. - for changed, (prev, cur) in hook_data.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - # Directly access all charm config as a mapping. - conf = db.getrange('config', True) - - # Directly access all relation data as a mapping - rels = db.getrange('rels', True) - - if __name__ == '__main__': - with hook_data(): - hook.execute() - - -A more basic integration is via the hook_scope context manager which simply -manages transaction scope (and records hook name, and timestamp):: - - >>> from unitdata import kv - >>> db = kv() - >>> with db.hook_scope('install'): - ... # do work, in transactional scope. - ... db.set('x', 1) - >>> db.get('x') - 1 - - -Usage ------ - -Values are automatically json de/serialized to preserve basic typing -and complex data struct capabilities (dicts, lists, ints, booleans, etc). - -Individual values can be manipulated via get/set:: - - >>> kv.set('y', True) - >>> kv.get('y') - True - - # We can set complex values (dicts, lists) as a single key. - >>> kv.set('config', {'a': 1, 'b': True'}) - - # Also supports returning dictionaries as a record which - # provides attribute access. - >>> config = kv.get('config', record=True) - >>> config.b - True - - -Groups of keys can be manipulated with update/getrange:: - - >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") - >>> kv.getrange('gui.', strip=True) - {'z': 1, 'y': 2} - -When updating values, its very helpful to understand which values -have actually changed and how have they changed. The storage -provides a delta method to provide for this:: - - >>> data = {'debug': True, 'option': 2} - >>> delta = kv.delta(data, 'config.') - >>> delta.debug.previous - None - >>> delta.debug.current - True - >>> delta - {'debug': (None, True), 'option': (None, 2)} - -Note the delta method does not persist the actual change, it needs to -be explicitly saved via 'update' method:: - - >>> kv.update(data, 'config.') - -Values modified in the context of a hook scope retain historical values -associated to the hookname. - - >>> with db.hook_scope('config-changed'): - ... db.set('x', 42) - >>> db.gethistory('x') - [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), - (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] - -""" - -import collections -import contextlib -import datetime -import itertools -import json -import os -import pprint -import sqlite3 -import sys - -__author__ = 'Kapil Thangavelu ' - - -class Storage(object): - """Simple key value database for local unit state within charms. - - Modifications are not persisted unless :meth:`flush` is called. - - To support dicts, lists, integer, floats, and booleans values - are automatically json encoded/decoded. - - Note: to facilitate unit testing, ':memory:' can be passed as the - path parameter which causes sqlite3 to only build the db in memory. - This should only be used for testing purposes. - """ - def __init__(self, path=None): - self.db_path = path - if path is None: - if 'UNIT_STATE_DB' in os.environ: - self.db_path = os.environ['UNIT_STATE_DB'] - else: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') - if self.db_path != ':memory:': - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) - self.conn = sqlite3.connect('%s' % self.db_path) - self.cursor = self.conn.cursor() - self.revision = None - self._closed = False - self._init() - - def close(self): - if self._closed: - return - self.flush(False) - self.cursor.close() - self.conn.close() - self._closed = True - - def get(self, key, default=None, record=False): - self.cursor.execute('select data from kv where key=?', [key]) - result = self.cursor.fetchone() - if not result: - return default - if record: - return Record(json.loads(result[0])) - return json.loads(result[0]) - - def getrange(self, key_prefix, strip=False): - """ - Get a range of keys starting with a common prefix as a mapping of - keys to values. - - :param str key_prefix: Common prefix among all keys - :param bool strip: Optionally strip the common prefix from the key - names in the returned dict - :return dict: A (possibly empty) dict of key-value mappings - """ - self.cursor.execute("select key, data from kv where key like ?", - ['%s%%' % key_prefix]) - result = self.cursor.fetchall() - - if not result: - return {} - if not strip: - key_prefix = '' - return dict([ - (k[len(key_prefix):], json.loads(v)) for k, v in result]) - - def update(self, mapping, prefix=""): - """ - Set the values of multiple keys at once. - - :param dict mapping: Mapping of keys to values - :param str prefix: Optional prefix to apply to all keys in `mapping` - before setting - """ - for k, v in mapping.items(): - self.set("%s%s" % (prefix, k), v) - - def unset(self, key): - """ - Remove a key from the database entirely. - """ - self.cursor.execute('delete from kv where key=?', [key]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - [key, self.revision, json.dumps('DELETED')]) - - def unsetrange(self, keys=None, prefix=""): - """ - Remove a range of keys starting with a common prefix, from the database - entirely. - - :param list keys: List of keys to remove. - :param str prefix: Optional prefix to apply to all keys in ``keys`` - before removing. - """ - if keys is not None: - keys = ['%s%s' % (prefix, key) for key in keys] - self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), - list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) - else: - self.cursor.execute('delete from kv where key like ?', - ['%s%%' % prefix]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) - - def set(self, key, value): - """ - Set a value in the database. - - :param str key: Key to set the value for - :param value: Any JSON-serializable value to be set - """ - serialized = json.dumps(value) - - self.cursor.execute('select data from kv where key=?', [key]) - exists = self.cursor.fetchone() - - # Skip mutations to the same value - if exists: - if exists[0] == serialized: - return value - - if not exists: - self.cursor.execute( - 'insert into kv (key, data) values (?, ?)', - (key, serialized)) - else: - self.cursor.execute(''' - update kv - set data = ? - where key = ?''', [serialized, key]) - - # Save - if not self.revision: - return value - - self.cursor.execute( - 'select 1 from kv_revisions where key=? and revision=?', - [key, self.revision]) - exists = self.cursor.fetchone() - - if not exists: - self.cursor.execute( - '''insert into kv_revisions ( - revision, key, data) values (?, ?, ?)''', - (self.revision, key, serialized)) - else: - self.cursor.execute( - ''' - update kv_revisions - set data = ? - where key = ? - and revision = ?''', - [serialized, key, self.revision]) - - return value - - def delta(self, mapping, prefix): - """ - return a delta containing values that have changed. - """ - previous = self.getrange(prefix, strip=True) - if not previous: - pk = set() - else: - pk = set(previous.keys()) - ck = set(mapping.keys()) - delta = DeltaSet() - - # added - for k in ck.difference(pk): - delta[k] = Delta(None, mapping[k]) - - # removed - for k in pk.difference(ck): - delta[k] = Delta(previous[k], None) - - # changed - for k in pk.intersection(ck): - c = mapping[k] - p = previous[k] - if c != p: - delta[k] = Delta(p, c) - - return delta - - @contextlib.contextmanager - def hook_scope(self, name=""): - """Scope all future interactions to the current hook execution - revision.""" - assert not self.revision - self.cursor.execute( - 'insert into hooks (hook, date) values (?, ?)', - (name or sys.argv[0], - datetime.datetime.utcnow().isoformat())) - self.revision = self.cursor.lastrowid - try: - yield self.revision - self.revision = None - except Exception: - self.flush(False) - self.revision = None - raise - else: - self.flush() - - def flush(self, save=True): - if save: - self.conn.commit() - elif self._closed: - return - else: - self.conn.rollback() - - def _init(self): - self.cursor.execute(''' - create table if not exists kv ( - key text, - data text, - primary key (key) - )''') - self.cursor.execute(''' - create table if not exists kv_revisions ( - key text, - revision integer, - data text, - primary key (key, revision) - )''') - self.cursor.execute(''' - create table if not exists hooks ( - version integer primary key autoincrement, - hook text, - date text - )''') - self.conn.commit() - - def gethistory(self, key, deserialize=False): - self.cursor.execute( - ''' - select kv.revision, kv.key, kv.data, h.hook, h.date - from kv_revisions kv, - hooks h - where kv.key=? - and kv.revision = h.version - ''', [key]) - if deserialize is False: - return self.cursor.fetchall() - return map(_parse_history, self.cursor.fetchall()) - - def debug(self, fh=sys.stderr): - self.cursor.execute('select * from kv') - pprint.pprint(self.cursor.fetchall(), stream=fh) - self.cursor.execute('select * from kv_revisions') - pprint.pprint(self.cursor.fetchall(), stream=fh) - - -def _parse_history(d): - return (d[0], d[1], json.loads(d[2]), d[3], - datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) - - -class HookData(object): - """Simple integration for existing hook exec frameworks. - - Records all unit information, and stores deltas for processing - by the hook. - - Sample:: - - from charmhelper.core import hookenv, unitdata - - changes = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # View all changes to configuration - for changed, (prev, cur) in changes.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - if __name__ == '__main__': - with changes(): - hook.execute() - - """ - def __init__(self): - self.kv = kv() - self.conf = None - self.rels = None - - @contextlib.contextmanager - def __call__(self): - from charmhelpers.core import hookenv - hook_name = hookenv.hook_name() - - with self.kv.hook_scope(hook_name): - self._record_charm_version(hookenv.charm_dir()) - delta_config, delta_relation = self._record_hook(hookenv) - yield self.kv, delta_config, delta_relation - - def _record_charm_version(self, charm_dir): - # Record revisions.. charm revisions are meaningless - # to charm authors as they don't control the revision. - # so logic dependnent on revision is not particularly - # useful, however it is useful for debugging analysis. - charm_rev = open( - os.path.join(charm_dir, 'revision')).read().strip() - charm_rev = charm_rev or '0' - revs = self.kv.get('charm_revisions', []) - if charm_rev not in revs: - revs.append(charm_rev.strip() or '0') - self.kv.set('charm_revisions', revs) - - def _record_hook(self, hookenv): - data = hookenv.execution_environment() - self.conf = conf_delta = self.kv.delta(data['conf'], 'config') - self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') - self.kv.set('env', dict(data['env'])) - self.kv.set('unit', data['unit']) - self.kv.set('relid', data.get('relid')) - return conf_delta, rels_delta - - -class Record(dict): - - __slots__ = () - - def __getattr__(self, k): - if k in self: - return self[k] - raise AttributeError(k) - - -class DeltaSet(Record): - - __slots__ = () - - -Delta = collections.namedtuple('Delta', ['previous', 'current']) - - -_KV = None - - -def kv(): - global _KV - if _KV is None: - _KV = Storage() - return _KV diff --git a/ceph-osd/tests/charmhelpers/osplatform.py b/ceph-osd/tests/charmhelpers/osplatform.py deleted file mode 100644 index d9a4d5c0..00000000 --- a/ceph-osd/tests/charmhelpers/osplatform.py +++ /dev/null @@ -1,25 +0,0 @@ -import platform - - -def get_platform(): - """Return the current OS platform. - - For example: if current os platform is Ubuntu then a string "ubuntu" - will be returned (which is the name of the module). - This string is used to decide which platform module should be imported. - """ - # linux_distribution is deprecated and will be removed in Python 3.7 - # Warings *not* disabled, as we certainly need to fix this. - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] - if "Ubuntu" in current_platform: - return "ubuntu" - elif "CentOS" in current_platform: - return "centos" - elif "debian" in current_platform: - # Stock Python does not detect Ubuntu and instead returns debian. - # Or at least it does in some build environments like Travis CI - return "ubuntu" - else: - raise RuntimeError("This module is not supported on {}." - .format(current_platform)) From c43c9c730692500695ac44cea3c3cf6939ab7f80 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 3 Oct 2018 09:35:58 -0500 Subject: [PATCH 1573/2699] Update requirements Change-Id: I6b0f06d94defd65bfa491ac7a5b5b1063bdfc127 --- ceph-radosgw/requirements.txt | 1 - ceph-radosgw/test-requirements.txt | 11 +- ceph-radosgw/tests/charmhelpers/__init__.py | 97 -- .../tests/charmhelpers/contrib/__init__.py | 13 - .../charmhelpers/contrib/amulet/__init__.py | 13 - .../charmhelpers/contrib/amulet/deployment.py | 99 -- .../charmhelpers/contrib/amulet/utils.py | 821 --------- .../contrib/openstack/__init__.py | 13 - .../contrib/openstack/amulet/__init__.py | 13 - .../contrib/openstack/amulet/deployment.py | 357 ---- .../contrib/openstack/amulet/utils.py | 1533 ----------------- .../tests/charmhelpers/core/__init__.py | 13 - .../tests/charmhelpers/core/decorators.py | 55 - ceph-radosgw/tests/charmhelpers/core/files.py | 43 - ceph-radosgw/tests/charmhelpers/core/fstab.py | 132 -- .../tests/charmhelpers/core/hookenv.py | 1354 --------------- ceph-radosgw/tests/charmhelpers/core/host.py | 1042 ----------- .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 72 - .../charmhelpers/core/host_factory/ubuntu.py | 91 - .../tests/charmhelpers/core/hugepage.py | 69 - .../tests/charmhelpers/core/kernel.py | 72 - .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 - .../core/kernel_factory/ubuntu.py | 13 - .../charmhelpers/core/services/__init__.py | 16 - .../tests/charmhelpers/core/services/base.py | 362 ---- .../charmhelpers/core/services/helpers.py | 290 ---- .../tests/charmhelpers/core/strutils.py | 129 -- .../tests/charmhelpers/core/sysctl.py | 58 - .../tests/charmhelpers/core/templating.py | 93 - .../tests/charmhelpers/core/unitdata.py | 525 ------ ceph-radosgw/tests/charmhelpers/osplatform.py | 25 - 33 files changed, 6 insertions(+), 7436 deletions(-) delete mode 100644 ceph-radosgw/tests/charmhelpers/__init__.py delete mode 100644 ceph-radosgw/tests/charmhelpers/contrib/__init__.py delete mode 100644 ceph-radosgw/tests/charmhelpers/contrib/amulet/__init__.py delete mode 100644 ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py delete mode 100644 ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py delete mode 100644 ceph-radosgw/tests/charmhelpers/contrib/openstack/__init__.py delete mode 100644 ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/__init__.py delete mode 100644 ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py delete mode 100644 ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/__init__.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/decorators.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/files.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/fstab.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/hookenv.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/host.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/host_factory/__init__.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/hugepage.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/kernel.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/kernel_factory/__init__.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/kernel_factory/centos.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/kernel_factory/ubuntu.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/services/__init__.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/services/base.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/services/helpers.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/strutils.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/sysctl.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/templating.py delete mode 100644 ceph-radosgw/tests/charmhelpers/core/unitdata.py delete mode 100644 ceph-radosgw/tests/charmhelpers/osplatform.py diff --git a/ceph-radosgw/requirements.txt b/ceph-radosgw/requirements.txt index 6a3271b0..b8fec1e2 100644 --- a/ceph-radosgw/requirements.txt +++ b/ceph-radosgw/requirements.txt @@ -2,7 +2,6 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=1.8.0,<1.9.0 -PyYAML>=3.1.0 simplejson>=2.2.0 netifaces>=0.10.4 netaddr>=0.7.12,!=0.7.16 diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 67c30f1a..2b2c0e11 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -1,16 +1,16 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. +charm-tools>=2.4.4 coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 -charm-tools>=2.0.0 -requests==2.6.0 +requests>=2.18.4 # BEGIN: Amulet OpenStack Charm Helper Requirements # Liberty client lower constraints -amulet>=1.14.3,<2.0 -bundletester>=0.6.1,<1.0 +amulet>=1.14.3,<2.0;python_version=='2.7' +bundletester>=0.6.1,<1.0;python_version=='2.7' python-ceilometerclient>=1.5.0 python-cinderclient>=1.4.0 python-glanceclient>=1.1.0 @@ -21,8 +21,9 @@ python-novaclient>=2.30.1 python-openstackclient>=1.7.0 python-swiftclient>=2.6.0 pika>=0.10.0,<1.0 -stestr>=1.0.0 distro-info +git+https://github.com/juju/charm-helpers.git#egg=charmhelpers # END: Amulet OpenStack Charm Helper Requirements # NOTE: workaround for 14.04 pip/tox pytz +pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/ceph-radosgw/tests/charmhelpers/__init__.py b/ceph-radosgw/tests/charmhelpers/__init__.py deleted file mode 100644 index e7aa4715..00000000 --- a/ceph-radosgw/tests/charmhelpers/__init__.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Bootstrap charm-helpers, installing its dependencies if necessary using -# only standard libraries. -from __future__ import print_function -from __future__ import absolute_import - -import functools -import inspect -import subprocess -import sys - -try: - import six # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa - -try: - import yaml # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa - - -# Holds a list of mapping of mangled function names that have been deprecated -# using the @deprecate decorator below. This is so that the warning is only -# printed once for each usage of the function. -__deprecated_functions = {} - - -def deprecate(warning, date=None, log=None): - """Add a deprecation warning the first time the function is used. - The date, which is a string in semi-ISO8660 format indicate the year-month - that the function is officially going to be removed. - - usage: - - @deprecate('use core/fetch/add_source() instead', '2017-04') - def contributed_add_source_thing(...): - ... - - And it then prints to the log ONCE that the function is deprecated. - The reason for passing the logging function (log) is so that hookenv.log - can be used for a charm if needed. - - :param warning: String to indicat where it has moved ot. - :param date: optional sting, in YYYY-MM format to indicate when the - function will definitely (probably) be removed. - :param log: The log function to call to log. If not, logs to stdout - """ - def wrap(f): - - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - try: - module = inspect.getmodule(f) - file = inspect.getsourcefile(f) - lines = inspect.getsourcelines(f) - f_name = "{}-{}-{}..{}-{}".format( - module.__name__, file, lines[0], lines[-1], f.__name__) - except (IOError, TypeError): - # assume it was local, so just use the name of the function - f_name = f.__name__ - if f_name not in __deprecated_functions: - __deprecated_functions[f_name] = True - s = "DEPRECATION WARNING: Function {} is being removed".format( - f.__name__) - if date: - s = "{} on/around {}".format(s, date) - if warning: - s = "{} : {}".format(s, warning) - if log: - log(s) - else: - print(s) - return f(*args, **kwargs) - return wrapped_f - return wrap diff --git a/ceph-radosgw/tests/charmhelpers/contrib/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-radosgw/tests/charmhelpers/contrib/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py deleted file mode 100644 index d21d01d8..00000000 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/deployment.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import os -import six - - -class AmuletDeployment(object): - """Amulet deployment. - - This class provides generic Amulet deployment and test runner - methods. - """ - - def __init__(self, series=None): - """Initialize the deployment environment.""" - self.series = None - - if series: - self.series = series - self.d = amulet.Deployment(series=self.series) - else: - self.d = amulet.Deployment() - - def _add_services(self, this_service, other_services): - """Add services. - - Add services to the deployment where this_service is the local charm - that we're testing and other_services are the other services that - are being used in the local amulet tests. - """ - if this_service['name'] != os.path.basename(os.getcwd()): - s = this_service['name'] - msg = "The charm's root directory name needs to be {}".format(s) - amulet.raise_status(amulet.FAIL, msg=msg) - - if 'units' not in this_service: - this_service['units'] = 1 - - self.d.add(this_service['name'], units=this_service['units'], - constraints=this_service.get('constraints'), - storage=this_service.get('storage')) - - for svc in other_services: - if 'location' in svc: - branch_location = svc['location'] - elif self.series: - branch_location = 'cs:{}/{}'.format(self.series, svc['name']), - else: - branch_location = None - - if 'units' not in svc: - svc['units'] = 1 - - self.d.add(svc['name'], charm=branch_location, units=svc['units'], - constraints=svc.get('constraints'), - storage=svc.get('storage')) - - def _add_relations(self, relations): - """Add all of the relations for the services.""" - for k, v in six.iteritems(relations): - self.d.relate(k, v) - - def _configure_services(self, configs): - """Configure all of the services.""" - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _deploy(self): - """Deploy environment and wait for all hooks to finish executing.""" - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) - try: - self.d.setup(timeout=timeout) - self.d.sentry.wait(timeout=timeout) - except amulet.helpers.TimeoutError: - amulet.raise_status( - amulet.FAIL, - msg="Deployment timed out ({}s)".format(timeout) - ) - except Exception: - raise - - def run_tests(self): - """Run all of the methods that are prefixed with 'test_'.""" - for test in dir(self): - if test.startswith('test_'): - getattr(self, test)() diff --git a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py deleted file mode 100644 index 8a6b7644..00000000 --- a/ceph-radosgw/tests/charmhelpers/contrib/amulet/utils.py +++ /dev/null @@ -1,821 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import json -import logging -import os -import re -import socket -import subprocess -import sys -import time -import uuid - -import amulet -import distro_info -import six -from six.moves import configparser -if six.PY3: - from urllib import parse as urlparse -else: - import urlparse - - -class AmuletUtils(object): - """Amulet utilities. - - This class provides common utility functions that are used by Amulet - tests. - """ - - def __init__(self, log_level=logging.ERROR): - self.log = self.get_logger(level=log_level) - self.ubuntu_releases = self.get_ubuntu_releases() - - def get_logger(self, name="amulet-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def valid_ip(self, ip): - if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): - return True - else: - return False - - def valid_url(self, url): - p = re.compile( - r'^(?:http|ftp)s?://' - r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa - r'localhost|' - r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' - r'(?::\d+)?' - r'(?:/?|[/?]\S+)$', - re.IGNORECASE) - if p.match(url): - return True - else: - return False - - def get_ubuntu_release_from_sentry(self, sentry_unit): - """Get Ubuntu release codename from sentry unit. - - :param sentry_unit: amulet sentry/service unit pointer - :returns: list of strings - release codename, failure message - """ - msg = None - cmd = 'lsb_release -cs' - release, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} lsb_release: {}'.format( - sentry_unit.info['unit_name'], release)) - else: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, release, code)) - if release not in self.ubuntu_releases: - msg = ("Release ({}) not found in Ubuntu releases " - "({})".format(release, self.ubuntu_releases)) - return release, msg - - def validate_services(self, commands): - """Validate that lists of commands succeed on service units. Can be - used to verify system services are running on the corresponding - service units. - - :param commands: dict with sentry keys and arbitrary command list vals - :returns: None if successful, Failure string message otherwise - """ - self.log.debug('Checking status of system services...') - - # /!\ DEPRECATION WARNING (beisner): - # New and existing tests should be rewritten to use - # validate_services_by_name() as it is aware of init systems. - self.log.warn('DEPRECATION WARNING: use ' - 'validate_services_by_name instead of validate_services ' - 'due to init system differences.') - - for k, v in six.iteritems(commands): - for cmd in v: - output, code = k.run(cmd) - self.log.debug('{} `{}` returned ' - '{}'.format(k.info['unit_name'], - cmd, code)) - if code != 0: - return "command `{}` returned {}".format(cmd, str(code)) - return None - - def validate_services_by_name(self, sentry_services): - """Validate system service status by service name, automatically - detecting init system based on Ubuntu release codename. - - :param sentry_services: dict with sentry keys and svc list values - :returns: None if successful, Failure string message otherwise - """ - self.log.debug('Checking status of system services...') - - # Point at which systemd became a thing - systemd_switch = self.ubuntu_releases.index('vivid') - - for sentry_unit, services_list in six.iteritems(sentry_services): - # Get lsb_release codename from unit - release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) - if ret: - return ret - - for service_name in services_list: - if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name in ['rabbitmq-server', 'apache2', - 'memcached']): - # init is systemd (or regular sysv) - cmd = 'sudo service {} status'.format(service_name) - output, code = sentry_unit.run(cmd) - service_running = code == 0 - elif self.ubuntu_releases.index(release) < systemd_switch: - # init is upstart - cmd = 'sudo status {}'.format(service_name) - output, code = sentry_unit.run(cmd) - service_running = code == 0 and "start/running" in output - - self.log.debug('{} `{}` returned ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code)) - if not service_running: - return u"command `{}` returned {} {}".format( - cmd, output, str(code)) - return None - - def _get_config(self, unit, filename): - """Get a ConfigParser object for parsing a unit's config file.""" - file_contents = unit.file_contents(filename) - - # NOTE(beisner): by default, ConfigParser does not handle options - # with no value, such as the flags used in the mysql my.cnf file. - # https://bugs.python.org/issue7005 - config = configparser.ConfigParser(allow_no_value=True) - config.readfp(io.StringIO(file_contents)) - return config - - def validate_config_data(self, sentry_unit, config_file, section, - expected): - """Validate config file data. - - Verify that the specified section of the config file contains - the expected option key:value pairs. - - Compare expected dictionary data vs actual dictionary data. - The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluates a variable and returns a - bool. - """ - self.log.debug('Validating config file data ({} in {} on {})' - '...'.format(section, config_file, - sentry_unit.info['unit_name'])) - config = self._get_config(sentry_unit, config_file) - - if section != 'DEFAULT' and not config.has_section(section): - return "section [{}] does not exist".format(section) - - for k in expected.keys(): - if not config.has_option(section, k): - return "section [{}] is missing option {}".format(section, k) - - actual = config.get(section, k) - v = expected[k] - if (isinstance(v, six.string_types) or - isinstance(v, bool) or - isinstance(v, six.integer_types)): - # handle explicit values - if actual != v: - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) - # handle function pointers, such as not_null or valid_ip - elif not v(actual): - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) - return None - - def _validate_dict_data(self, expected, actual): - """Validate dictionary data. - - Compare expected dictionary data vs actual dictionary data. - The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluates a variable and returns a - bool. - """ - self.log.debug('actual: {}'.format(repr(actual))) - self.log.debug('expected: {}'.format(repr(expected))) - - for k, v in six.iteritems(expected): - if k in actual: - if (isinstance(v, six.string_types) or - isinstance(v, bool) or - isinstance(v, six.integer_types)): - # handle explicit values - if v != actual[k]: - return "{}:{}".format(k, actual[k]) - # handle function pointers, such as not_null or valid_ip - elif not v(actual[k]): - return "{}:{}".format(k, actual[k]) - else: - return "key '{}' does not exist".format(k) - return None - - def validate_relation_data(self, sentry_unit, relation, expected): - """Validate actual relation data based on expected relation data.""" - actual = sentry_unit.relation(relation[0], relation[1]) - return self._validate_dict_data(expected, actual) - - def _validate_list_data(self, expected, actual): - """Compare expected list vs actual list data.""" - for e in expected: - if e not in actual: - return "expected item {} not found in actual list".format(e) - return None - - def not_null(self, string): - if string is not None: - return True - else: - return False - - def _get_file_mtime(self, sentry_unit, filename): - """Get last modification time of file.""" - return sentry_unit.file_stat(filename)['mtime'] - - def _get_dir_mtime(self, sentry_unit, directory): - """Get last modification time of directory.""" - return sentry_unit.directory_stat(directory)['mtime'] - - def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): - """Get start time of a process based on the last modification time - of the /proc/pid directory. - - :sentry_unit: The sentry unit to check for the service on - :service: service name to look for in process table - :pgrep_full: [Deprecated] Use full command line search mode with pgrep - :returns: epoch time of service process start - :param commands: list of bash commands - :param sentry_units: list of sentry unit pointers - :returns: None if successful; Failure message otherwise - """ - if pgrep_full is not None: - # /!\ DEPRECATION WARNING (beisner): - # No longer implemented, as pidof is now used instead of pgrep. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' - 'longer implemented re: lp 1474030.') - - pid_list = self.get_process_id_list(sentry_unit, service) - pid = pid_list[0] - proc_dir = '/proc/{}'.format(pid) - self.log.debug('Pid for {} on {}: {}'.format( - service, sentry_unit.info['unit_name'], pid)) - - return self._get_dir_mtime(sentry_unit, proc_dir) - - def service_restarted(self, sentry_unit, service, filename, - pgrep_full=None, sleep_time=20): - """Check if service was restarted. - - Compare a service's start time vs a file's last modification time - (such as a config file for that service) to determine if the service - has been restarted. - """ - # /!\ DEPRECATION WARNING (beisner): - # This method is prone to races in that no before-time is known. - # Use validate_service_config_changed instead. - - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - self.log.warn('DEPRECATION WARNING: use ' - 'validate_service_config_changed instead of ' - 'service_restarted due to known races.') - - time.sleep(sleep_time) - if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= - self._get_file_mtime(sentry_unit, filename)): - return True - else: - return False - - def service_restarted_since(self, sentry_unit, mtime, service, - pgrep_full=None, sleep_time=20, - retry_count=30, retry_sleep_time=10): - """Check if service was been started after a given time. - - Args: - sentry_unit (sentry): The sentry unit to check for the service on - mtime (float): The epoch time to check against - service (string): service name to look for in process table - pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Initial sleep time (s) before looking for file - retry_sleep_time (int): Time (s) to sleep between retries - retry_count (int): If file is not found, how many times to retry - - Returns: - bool: True if service found and its start time it newer than mtime, - False if service is older than mtime or if service was - not found. - """ - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - - unit_name = sentry_unit.info['unit_name'] - self.log.debug('Checking that %s service restarted since %s on ' - '%s' % (service, mtime, unit_name)) - time.sleep(sleep_time) - proc_start_time = None - tries = 0 - while tries <= retry_count and not proc_start_time: - try: - proc_start_time = self._get_proc_start_time(sentry_unit, - service, - pgrep_full) - self.log.debug('Attempt {} to get {} proc start time on {} ' - 'OK'.format(tries, service, unit_name)) - except IOError as e: - # NOTE(beisner) - race avoidance, proc may not exist yet. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.debug('Attempt {} to get {} proc start time on {} ' - 'failed\n{}'.format(tries, service, - unit_name, e)) - time.sleep(retry_sleep_time) - tries += 1 - - if not proc_start_time: - self.log.warn('No proc start time found, assuming service did ' - 'not start') - return False - if proc_start_time >= mtime: - self.log.debug('Proc start time is newer than provided mtime' - '(%s >= %s) on %s (OK)' % (proc_start_time, - mtime, unit_name)) - return True - else: - self.log.warn('Proc start time (%s) is older than provided mtime ' - '(%s) on %s, service did not ' - 'restart' % (proc_start_time, mtime, unit_name)) - return False - - def config_updated_since(self, sentry_unit, filename, mtime, - sleep_time=20, retry_count=30, - retry_sleep_time=10): - """Check if file was modified after a given time. - - Args: - sentry_unit (sentry): The sentry unit to check the file mtime on - filename (string): The file to check mtime of - mtime (float): The epoch time to check against - sleep_time (int): Initial sleep time (s) before looking for file - retry_sleep_time (int): Time (s) to sleep between retries - retry_count (int): If file is not found, how many times to retry - - Returns: - bool: True if file was modified more recently than mtime, False if - file was modified before mtime, or if file not found. - """ - unit_name = sentry_unit.info['unit_name'] - self.log.debug('Checking that %s updated since %s on ' - '%s' % (filename, mtime, unit_name)) - time.sleep(sleep_time) - file_mtime = None - tries = 0 - while tries <= retry_count and not file_mtime: - try: - file_mtime = self._get_file_mtime(sentry_unit, filename) - self.log.debug('Attempt {} to get {} file mtime on {} ' - 'OK'.format(tries, filename, unit_name)) - except IOError as e: - # NOTE(beisner) - race avoidance, file may not exist yet. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.debug('Attempt {} to get {} file mtime on {} ' - 'failed\n{}'.format(tries, filename, - unit_name, e)) - time.sleep(retry_sleep_time) - tries += 1 - - if not file_mtime: - self.log.warn('Could not determine file mtime, assuming ' - 'file does not exist') - return False - - if file_mtime >= mtime: - self.log.debug('File mtime is newer than provided mtime ' - '(%s >= %s) on %s (OK)' % (file_mtime, - mtime, unit_name)) - return True - else: - self.log.warn('File mtime is older than provided mtime' - '(%s < on %s) on %s' % (file_mtime, - mtime, unit_name)) - return False - - def validate_service_config_changed(self, sentry_unit, mtime, service, - filename, pgrep_full=None, - sleep_time=20, retry_count=30, - retry_sleep_time=10): - """Check service and file were updated after mtime - - Args: - sentry_unit (sentry): The sentry unit to check for the service on - mtime (float): The epoch time to check against - service (string): service name to look for in process table - filename (string): The file to check mtime of - pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Initial sleep in seconds to pass to test helpers - retry_count (int): If service is not found, how many times to retry - retry_sleep_time (int): Time in seconds to wait between retries - - Typical Usage: - u = OpenStackAmuletUtils(ERROR) - ... - mtime = u.get_sentry_time(self.cinder_sentry) - self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) - if not u.validate_service_config_changed(self.cinder_sentry, - mtime, - 'cinder-api', - '/etc/cinder/cinder.conf') - amulet.raise_status(amulet.FAIL, msg='update failed') - Returns: - bool: True if both service and file where updated/restarted after - mtime, False if service is older than mtime or if service was - not found or if filename was modified before mtime. - """ - - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - - service_restart = self.service_restarted_since( - sentry_unit, mtime, - service, - pgrep_full=pgrep_full, - sleep_time=sleep_time, - retry_count=retry_count, - retry_sleep_time=retry_sleep_time) - - config_update = self.config_updated_since( - sentry_unit, - filename, - mtime, - sleep_time=sleep_time, - retry_count=retry_count, - retry_sleep_time=retry_sleep_time) - - return service_restart and config_update - - def get_sentry_time(self, sentry_unit): - """Return current epoch time on a sentry""" - cmd = "date +'%s'" - return float(sentry_unit.run(cmd)[0]) - - def relation_error(self, name, data): - return 'unexpected relation data in {} - {}'.format(name, data) - - def endpoint_error(self, name, data): - return 'unexpected endpoint data in {} - {}'.format(name, data) - - def get_ubuntu_releases(self): - """Return a list of all Ubuntu releases in order of release.""" - _d = distro_info.UbuntuDistroInfo() - _release_list = _d.all - return _release_list - - def file_to_url(self, file_rel_path): - """Convert a relative file path to a file URL.""" - _abs_path = os.path.abspath(file_rel_path) - return urlparse.urlparse(_abs_path, scheme='file').geturl() - - def check_commands_on_units(self, commands, sentry_units): - """Check that all commands in a list exit zero on all - sentry units in a list. - - :param commands: list of bash commands - :param sentry_units: list of sentry unit pointers - :returns: None if successful; Failure message otherwise - """ - self.log.debug('Checking exit codes for {} commands on {} ' - 'sentry units...'.format(len(commands), - len(sentry_units))) - for sentry_unit in sentry_units: - for cmd in commands: - output, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} `{}` returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - else: - return ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - return None - - def get_process_id_list(self, sentry_unit, process_name, - expect_success=True): - """Get a list of process ID(s) from a single sentry juju unit - for a single process name. - - :param sentry_unit: Amulet sentry instance (juju unit) - :param process_name: Process name - :param expect_success: If False, expect the PID to be missing, - raise if it is present. - :returns: List of process IDs - """ - cmd = 'pidof -x "{}"'.format(process_name) - if not expect_success: - cmd += " || exit 0 && exit 1" - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return str(output).split() - - def get_unit_process_ids(self, unit_processes, expect_success=True): - """Construct a dict containing unit sentries, process names, and - process IDs. - - :param unit_processes: A dictionary of Amulet sentry instance - to list of process names. - :param expect_success: if False expect the processes to not be - running, raise if they are. - :returns: Dictionary of Amulet sentry instance to dictionary - of process names to PIDs. - """ - pid_dict = {} - for sentry_unit, process_list in six.iteritems(unit_processes): - pid_dict[sentry_unit] = {} - for process in process_list: - pids = self.get_process_id_list( - sentry_unit, process, expect_success=expect_success) - pid_dict[sentry_unit].update({process: pids}) - return pid_dict - - def validate_unit_process_ids(self, expected, actual): - """Validate process id quantities for services on units.""" - self.log.debug('Checking units for running processes...') - self.log.debug('Expected PIDs: {}'.format(expected)) - self.log.debug('Actual PIDs: {}'.format(actual)) - - if len(actual) != len(expected): - return ('Unit count mismatch. expected, actual: {}, ' - '{} '.format(len(expected), len(actual))) - - for (e_sentry, e_proc_names) in six.iteritems(expected): - e_sentry_name = e_sentry.info['unit_name'] - if e_sentry in actual.keys(): - a_proc_names = actual[e_sentry] - else: - return ('Expected sentry ({}) not found in actual dict data.' - '{}'.format(e_sentry_name, e_sentry)) - - if len(e_proc_names.keys()) != len(a_proc_names.keys()): - return ('Process name count mismatch. expected, actual: {}, ' - '{}'.format(len(expected), len(actual))) - - for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ - zip(e_proc_names.items(), a_proc_names.items()): - if e_proc_name != a_proc_name: - return ('Process name mismatch. expected, actual: {}, ' - '{}'.format(e_proc_name, a_proc_name)) - - a_pids_length = len(a_pids) - fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' - '{}, {} ({})'.format(e_sentry_name, e_proc_name, - e_pids, a_pids_length, - a_pids)) - - # If expected is a list, ensure at least one PID quantity match - if isinstance(e_pids, list) and \ - a_pids_length not in e_pids: - return fail_msg - # If expected is not bool and not list, - # ensure PID quantities match - elif not isinstance(e_pids, bool) and \ - not isinstance(e_pids, list) and \ - a_pids_length != e_pids: - return fail_msg - # If expected is bool True, ensure 1 or more PIDs exist - elif isinstance(e_pids, bool) and \ - e_pids is True and a_pids_length < 1: - return fail_msg - # If expected is bool False, ensure 0 PIDs exist - elif isinstance(e_pids, bool) and \ - e_pids is False and a_pids_length != 0: - return fail_msg - else: - self.log.debug('PID check OK: {} {} {}: ' - '{}'.format(e_sentry_name, e_proc_name, - e_pids, a_pids)) - return None - - def validate_list_of_identical_dicts(self, list_of_dicts): - """Check that all dicts within a list are identical.""" - hashes = [] - for _dict in list_of_dicts: - hashes.append(hash(frozenset(_dict.items()))) - - self.log.debug('Hashes: {}'.format(hashes)) - if len(set(hashes)) == 1: - self.log.debug('Dicts within list are identical') - else: - return 'Dicts within list are not identical' - - return None - - def validate_sectionless_conf(self, file_contents, expected): - """A crude conf parser. Useful to inspect configuration files which - do not have section headers (as would be necessary in order to use - the configparser). Such as openstack-dashboard or rabbitmq confs.""" - for line in file_contents.split('\n'): - if '=' in line: - args = line.split('=') - if len(args) <= 1: - continue - key = args[0].strip() - value = args[1].strip() - if key in expected.keys(): - if expected[key] != value: - msg = ('Config mismatch. Expected, actual: {}, ' - '{}'.format(expected[key], value)) - amulet.raise_status(amulet.FAIL, msg=msg) - - def get_unit_hostnames(self, units): - """Return a dict of juju unit names to hostnames.""" - host_names = {} - for unit in units: - host_names[unit.info['unit_name']] = \ - str(unit.file_contents('/etc/hostname').strip()) - self.log.debug('Unit host names: {}'.format(host_names)) - return host_names - - def run_cmd_unit(self, sentry_unit, cmd): - """Run a command on a unit, return the output and exit code.""" - output, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} `{}` command returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - else: - msg = ('{} `{}` command returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return str(output), code - - def file_exists_on_unit(self, sentry_unit, file_name): - """Check if a file exists on a unit.""" - try: - sentry_unit.file_stat(file_name) - return True - except IOError: - return False - except Exception as e: - msg = 'Error checking file {}: {}'.format(file_name, e) - amulet.raise_status(amulet.FAIL, msg=msg) - - def file_contents_safe(self, sentry_unit, file_name, - max_wait=60, fatal=False): - """Get file contents from a sentry unit. Wrap amulet file_contents - with retry logic to address races where a file checks as existing, - but no longer exists by the time file_contents is called. - Return None if file not found. Optionally raise if fatal is True.""" - unit_name = sentry_unit.info['unit_name'] - file_contents = False - tries = 0 - while not file_contents and tries < (max_wait / 4): - try: - file_contents = sentry_unit.file_contents(file_name) - except IOError: - self.log.debug('Attempt {} to open file {} from {} ' - 'failed'.format(tries, file_name, - unit_name)) - time.sleep(4) - tries += 1 - - if file_contents: - return file_contents - elif not fatal: - return None - elif fatal: - msg = 'Failed to get file contents from unit.' - amulet.raise_status(amulet.FAIL, msg) - - def port_knock_tcp(self, host="localhost", port=22, timeout=15): - """Open a TCP socket to check for a listening sevice on a host. - - :param host: host name or IP address, default to localhost - :param port: TCP port number, default to 22 - :param timeout: Connect timeout, default to 15 seconds - :returns: True if successful, False if connect failed - """ - - # Resolve host name if possible - try: - connect_host = socket.gethostbyname(host) - host_human = "{} ({})".format(connect_host, host) - except socket.error as e: - self.log.warn('Unable to resolve address: ' - '{} ({}) Trying anyway!'.format(host, e)) - connect_host = host - host_human = connect_host - - # Attempt socket connection - try: - knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - knock.settimeout(timeout) - knock.connect((connect_host, port)) - knock.close() - self.log.debug('Socket connect OK for host ' - '{} on port {}.'.format(host_human, port)) - return True - except socket.error as e: - self.log.debug('Socket connect FAIL for' - ' {} port {} ({})'.format(host_human, port, e)) - return False - - def port_knock_units(self, sentry_units, port=22, - timeout=15, expect_success=True): - """Open a TCP socket to check for a listening sevice on each - listed juju unit. - - :param sentry_units: list of sentry unit pointers - :param port: TCP port number, default to 22 - :param timeout: Connect timeout, default to 15 seconds - :expect_success: True by default, set False to invert logic - :returns: None if successful, Failure message otherwise - """ - for unit in sentry_units: - host = unit.info['public-address'] - connected = self.port_knock_tcp(host, port, timeout) - if not connected and expect_success: - return 'Socket connect failed.' - elif connected and not expect_success: - return 'Socket connected unexpectedly.' - - def get_uuid_epoch_stamp(self): - """Returns a stamp string based on uuid4 and epoch time. Useful in - generating test messages which need to be unique-ish.""" - return '[{}-{}]'.format(uuid.uuid4(), time.time()) - - # amulet juju action helpers: - def run_action(self, unit_sentry, action, - _check_output=subprocess.check_output, - params=None): - """Translate to amulet's built in run_action(). Deprecated. - - Run the named action on a given unit sentry. - - params a dict of parameters to use - _check_output parameter is no longer used - - @return action_id. - """ - self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been ' - 'deprecated for amulet.run_action') - return unit_sentry.run_action(action, action_args=params) - - def wait_on_action(self, action_id, _check_output=subprocess.check_output): - """Wait for a given action, returning if it completed or not. - - action_id a string action uuid - _check_output parameter is no longer used - """ - data = amulet.actions.get_action_output(action_id, full_output=True) - return data.get(u"status") == "completed" - - def status_get(self, unit): - """Return the current service status of this unit.""" - raw_status, return_code = unit.run( - "status-get --format=json --include-data") - if return_code != 0: - return ("unknown", "") - status = json.loads(raw_status) - return (status["status"], status["message"]) diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py deleted file mode 100644 index 1c96752a..00000000 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ /dev/null @@ -1,357 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import re -import sys -import six -from collections import OrderedDict -from charmhelpers.contrib.amulet.deployment import ( - AmuletDeployment -) -from charmhelpers.contrib.openstack.amulet.utils import ( - OPENSTACK_RELEASES_PAIRS -) - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - - -class OpenStackAmuletDeployment(AmuletDeployment): - """OpenStack amulet deployment. - - This class inherits from AmuletDeployment and has additional support - that is specifically for use by OpenStack charms. - """ - - def __init__(self, series=None, openstack=None, source=None, - stable=True, log_level=DEBUG): - """Initialize the deployment environment.""" - super(OpenStackAmuletDeployment, self).__init__(series) - self.log = self.get_logger(level=log_level) - self.log.info('OpenStackAmuletDeployment: init') - self.openstack = openstack - self.source = source - self.stable = stable - - def get_logger(self, name="deployment-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def _determine_branch_locations(self, other_services): - """Determine the branch locations for the other services. - - Determine if the local branch being tested is derived from its - stable or next (dev) branch, and based on this, use the corresonding - stable or next branches for the other_services.""" - - self.log.info('OpenStackAmuletDeployment: determine branch locations') - - # Charms outside the ~openstack-charmers - base_charms = { - 'mysql': ['trusty'], - 'mongodb': ['trusty'], - 'nrpe': ['trusty', 'xenial'], - } - - for svc in other_services: - # If a location has been explicitly set, use it - if svc.get('location'): - continue - if svc['name'] in base_charms: - # NOTE: not all charms have support for all series we - # want/need to test against, so fix to most recent - # that each base charm supports - target_series = self.series - if self.series not in base_charms[svc['name']]: - target_series = base_charms[svc['name']][-1] - svc['location'] = 'cs:{}/{}'.format(target_series, - svc['name']) - elif self.stable: - svc['location'] = 'cs:{}/{}'.format(self.series, - svc['name']) - else: - svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( - self.series, - svc['name'] - ) - - return other_services - - def _add_services(self, this_service, other_services, use_source=None, - no_origin=None): - """Add services to the deployment and optionally set - openstack-origin/source. - - :param this_service dict: Service dictionary describing the service - whose amulet tests are being run - :param other_services dict: List of service dictionaries describing - the services needed to support the target - service - :param use_source list: List of services which use the 'source' config - option rather than 'openstack-origin' - :param no_origin list: List of services which do not support setting - the Cloud Archive. - Service Dict: - { - 'name': str charm-name, - 'units': int number of units, - 'constraints': dict of juju constraints, - 'location': str location of charm, - } - eg - this_service = { - 'name': 'openvswitch-odl', - 'constraints': {'mem': '8G'}, - } - other_services = [ - { - 'name': 'nova-compute', - 'units': 2, - 'constraints': {'mem': '4G'}, - 'location': cs:~bob/xenial/nova-compute - }, - { - 'name': 'mysql', - 'constraints': {'mem': '2G'}, - }, - {'neutron-api-odl'}] - use_source = ['mysql'] - no_origin = ['neutron-api-odl'] - """ - self.log.info('OpenStackAmuletDeployment: adding services') - - other_services = self._determine_branch_locations(other_services) - - super(OpenStackAmuletDeployment, self)._add_services(this_service, - other_services) - - services = other_services - services.append(this_service) - - use_source = use_source or [] - no_origin = no_origin or [] - - # Charms which should use the source config option - use_source = list(set( - use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy', 'percona-cluster', 'lxd'])) - - # Charms which can not use openstack-origin, ie. many subordinates - no_origin = list(set( - no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', - 'nrpe', 'openvswitch-odl', 'neutron-api-odl', - 'odl-controller', 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'])) - - if self.openstack: - for svc in services: - if svc['name'] not in use_source + no_origin: - config = {'openstack-origin': self.openstack} - self.d.configure(svc['name'], config) - - if self.source: - for svc in services: - if svc['name'] in use_source and svc['name'] not in no_origin: - config = {'source': self.source} - self.d.configure(svc['name'], config) - - def _configure_services(self, configs): - """Configure all of the services.""" - self.log.info('OpenStackAmuletDeployment: configure services') - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=None): - """Wait for all units to have a specific extended status, except - for any defined as excluded. Unless specified via message, any - status containing any case of 'ready' will be considered a match. - - Examples of message usage: - - Wait for all unit status to CONTAIN any case of 'ready' or 'ok': - message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) - - Wait for all units to reach this status (exact match): - message = re.compile('^Unit is ready and clustered$') - - Wait for all units to reach any one of these (exact match): - message = re.compile('Unit is ready|OK|Ready') - - Wait for at least one unit to reach this status (exact match): - message = {'ready'} - - See Amulet's sentry.wait_for_messages() for message usage detail. - https://github.com/juju/amulet/blob/master/amulet/sentry.py - - :param message: Expected status match - :param exclude_services: List of juju service names to ignore, - not to be used in conjuction with include_only. - :param include_only: List of juju service names to exclusively check, - not to be used in conjuction with exclude_services. - :param timeout: Maximum time in seconds to wait for status match - :returns: None. Raises if timeout is hit. - """ - if not timeout: - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) - self.log.info('Waiting for extended status on units for {}s...' - ''.format(timeout)) - - all_services = self.d.services.keys() - - if exclude_services and include_only: - raise ValueError('exclude_services can not be used ' - 'with include_only') - - if message: - if isinstance(message, re._pattern_type): - match = message.pattern - else: - match = message - - self.log.debug('Custom extended status wait match: ' - '{}'.format(match)) - else: - self.log.debug('Default extended status wait match: contains ' - 'READY (case-insensitive)') - message = re.compile('.*ready.*', re.IGNORECASE) - - if exclude_services: - self.log.debug('Excluding services from extended status match: ' - '{}'.format(exclude_services)) - else: - exclude_services = [] - - if include_only: - services = include_only - else: - services = list(set(all_services) - set(exclude_services)) - - self.log.debug('Waiting up to {}s for extended status on services: ' - '{}'.format(timeout, services)) - service_messages = {service: message for service in services} - - # Check for idleness - self.d.sentry.wait(timeout=timeout) - # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) - # Check for ready messages - self.d.sentry.wait_for_messages(service_messages, timeout=timeout) - - self.log.info('OK') - - def _get_openstack_release(self): - """Get openstack release. - - Return an integer representing the enum value of the openstack - release. - """ - # Must be ordered by OpenStack release (not by Ubuntu release): - for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): - setattr(self, os_pair, i) - - releases = { - ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, - ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, - ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('xenial', None): self.xenial_mitaka, - ('xenial', 'cloud:xenial-newton'): self.xenial_newton, - ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, - ('xenial', 'cloud:xenial-pike'): self.xenial_pike, - ('xenial', 'cloud:xenial-queens'): self.xenial_queens, - ('yakkety', None): self.yakkety_newton, - ('zesty', None): self.zesty_ocata, - ('artful', None): self.artful_pike, - ('bionic', None): self.bionic_queens, - ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, - ('cosmic', None): self.cosmic_rocky, - } - return releases[(self.series, self.openstack)] - - def _get_openstack_release_string(self): - """Get openstack release string. - - Return a string representing the openstack release. - """ - releases = OrderedDict([ - ('trusty', 'icehouse'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ]) - if self.openstack: - os_origin = self.openstack.split(':')[1] - return os_origin.split('%s-' % self.series)[1].split('/')[0] - else: - return releases[self.series] - - def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools in a ceph + cinder + glance - test scenario, based on OpenStack release and whether ceph radosgw - is flagged as present or not.""" - - if self._get_openstack_release() == self.trusty_icehouse: - # Icehouse - pools = [ - 'data', - 'metadata', - 'rbd', - 'cinder-ceph', - 'glance' - ] - elif (self.trusty_kilo <= self._get_openstack_release() <= - self.zesty_ocata): - # Kilo through Ocata - pools = [ - 'rbd', - 'cinder-ceph', - 'glance' - ] - else: - # Pike and later - pools = [ - 'cinder-ceph', - 'glance' - ] - - if radosgw: - pools.extend([ - '.rgw.root', - '.rgw.control', - '.rgw', - '.rgw.gc', - '.users.uid' - ]) - - return pools diff --git a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py deleted file mode 100644 index 936b4036..00000000 --- a/ceph-radosgw/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ /dev/null @@ -1,1533 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import json -import logging -import os -import re -import six -import time -import urllib -import urlparse - -import cinderclient.v1.client as cinder_client -import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1 as glance_client -import glanceclient.v2 as glance_clientv2 -import heatclient.v1.client as heat_client -from keystoneclient.v2_0 import client as keystone_client -from keystoneauth1.identity import ( - v3, - v2, -) -from keystoneauth1 import session as keystone_session -from keystoneclient.v3 import client as keystone_client_v3 -from novaclient import exceptions - -import novaclient.client as nova_client -import novaclient -import pika -import swiftclient - -from charmhelpers.core.decorators import retry_on_exception -from charmhelpers.contrib.amulet.utils import ( - AmuletUtils -) -from charmhelpers.core.host import CompareHostReleases - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - -NOVA_CLIENT_VERSION = "2" - -OPENSTACK_RELEASES_PAIRS = [ - 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', - 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', - 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', - 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] - - -class OpenStackAmuletUtils(AmuletUtils): - """OpenStack amulet utilities. - - This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charm tests. - """ - - def __init__(self, log_level=ERROR): - """Initialize the deployment environment.""" - super(OpenStackAmuletUtils, self).__init__(log_level) - - def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, openstack_release=None): - """Validate endpoint data. Pick the correct validator based on - OpenStack release. Expected data should be in the v2 format: - { - 'id': id, - 'region': region, - 'adminurl': adminurl, - 'internalurl': internalurl, - 'publicurl': publicurl, - 'service_id': service_id} - - """ - validation_function = self.validate_v2_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_endpoint_data - expected = { - 'id': expected['id'], - 'region': expected['region'], - 'region_id': 'RegionOne', - 'url': self.valid_url, - 'interface': self.not_null, - 'service_id': expected['service_id']} - return validation_function(endpoints, admin_port, internal_port, - public_port, expected) - - def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): - """Validate endpoint data. - - Validate actual endpoint data vs expected endpoint data. The ports - are used to find the matching endpoint. - """ - self.log.debug('Validating endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = False - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if (admin_port in ep.adminurl and - internal_port in ep.internalurl and - public_port in ep.publicurl): - found = True - actual = {'id': ep.id, - 'region': ep.region, - 'adminurl': ep.adminurl, - 'internalurl': ep.internalurl, - 'publicurl': ep.publicurl, - 'service_id': ep.service_id} - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if not found: - return 'endpoint not found' - - def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, expected_num_eps=3): - """Validate keystone v3 endpoint data. - - Validate the v3 endpoint data which has changed from v2. The - ports are used to find the matching endpoint. - - The new v3 endpoint data looks like: - - ['}, - region=RegionOne, - region_id=RegionOne, - service_id=17f842a0dc084b928e476fafe67e4095, - url=http://10.5.6.5:9312>, - '}, - region=RegionOne, - region_id=RegionOne, - service_id=72fc8736fb41435e8b3584205bb2cfa3, - url=http://10.5.6.6:35357/v3>, - ... ] - """ - self.log.debug('Validating v3 endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = [] - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if ((admin_port in ep.url and ep.interface == 'admin') or - (internal_port in ep.url and ep.interface == 'internal') or - (public_port in ep.url and ep.interface == 'public')): - found.append(ep.interface) - # note we ignore the links member. - actual = {'id': ep.id, - 'region': ep.region, - 'region_id': ep.region_id, - 'interface': self.not_null, - 'url': ep.url, - 'service_id': ep.service_id, } - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if len(found) != expected_num_eps: - return 'Unexpected number of endpoints found' - - def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): - """Convert v2 endpoint data into v3. - - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - """ - self.log.warn("Endpoint ID and Region ID validation is limited to not " - "null checks after v2 to v3 conversion") - for svc in ep_data.keys(): - assert len(ep_data[svc]) == 1, "Unknown data format" - svc_ep_data = ep_data[svc][0] - ep_data[svc] = [ - { - 'url': svc_ep_data['adminURL'], - 'interface': 'admin', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['publicURL'], - 'interface': 'public', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['internalURL'], - 'interface': 'internal', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}] - return ep_data - - def validate_svc_catalog_endpoint_data(self, expected, actual, - openstack_release=None): - """Validate service catalog endpoint data. Pick the correct validator - for the OpenStack version. Expected data should be in the v2 format: - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - - """ - validation_function = self.validate_v2_svc_catalog_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_svc_catalog_endpoint_data - expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) - return validation_function(expected, actual) - - def validate_v2_svc_catalog_endpoint_data(self, expected, actual): - """Validate service catalog endpoint data. - - Validate a list of actual service catalog endpoints vs a list of - expected service catalog endpoints. - """ - self.log.debug('Validating service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - ret = self._validate_dict_data(expected[k][0], actual[k][0]) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_v3_svc_catalog_endpoint_data(self, expected, actual): - """Validate the keystone v3 catalog endpoint data. - - Validate a list of dictinaries that make up the keystone v3 service - catalogue. - - It is in the form of: - - - {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:35357/v3'}, - {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}, - {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}], - u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'f629388955bc407f8b11d8b7ca168086', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9312'}]} - - Note, that an added complication is that the order of admin, public, - internal against 'interface' in each region. - - Thus, the function sorts the expected and actual lists using the - interface key as a sort key, prior to the comparison. - """ - self.log.debug('Validating v3 service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - l_expected = sorted(v, key=lambda x: x['interface']) - l_actual = sorted(actual[k], key=lambda x: x['interface']) - if len(l_actual) != len(l_expected): - return ("endpoint {} has differing number of interfaces " - " - expected({}), actual({})" - .format(k, len(l_expected), len(l_actual))) - for i_expected, i_actual in zip(l_expected, l_actual): - self.log.debug("checking interface {}" - .format(i_expected['interface'])) - ret = self._validate_dict_data(i_expected, i_actual) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_tenant_data(self, expected, actual): - """Validate tenant data. - - Validate a list of actual tenant data vs list of expected tenant - data. - """ - self.log.debug('Validating tenant data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'enabled': act.enabled, 'description': act.description, - 'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected tenant data - {}".format(ret) - if not found: - return "tenant {} does not exist".format(e['name']) - return ret - - def validate_role_data(self, expected, actual): - """Validate role data. - - Validate a list of actual role data vs a list of expected role - data. - """ - self.log.debug('Validating role data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected role data - {}".format(ret) - if not found: - return "role {} does not exist".format(e['name']) - return ret - - def validate_user_data(self, expected, actual, api_version=None): - """Validate user data. - - Validate a list of actual user data vs a list of expected user - data. - """ - self.log.debug('Validating user data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - if e['name'] == act.name: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'id': act.id} - if api_version == 3: - a['default_project_id'] = getattr(act, - 'default_project_id', - 'none') - else: - a['tenantId'] = act.tenantId - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected user data - {}".format(ret) - if not found: - return "user {} does not exist".format(e['name']) - return ret - - def validate_flavor_data(self, expected, actual): - """Validate flavor data. - - Validate a list of actual flavors vs a list of expected flavors. - """ - self.log.debug('Validating flavor data...') - self.log.debug('actual: {}'.format(repr(actual))) - act = [a.name for a in actual] - return self._validate_list_data(expected, act) - - def tenant_exists(self, keystone, tenant): - """Return True if tenant exists.""" - self.log.debug('Checking if tenant exists ({})...'.format(tenant)) - return tenant in [t.name for t in keystone.tenants.list()] - - @retry_on_exception(num_retries=5, base_delay=1) - def keystone_wait_for_propagation(self, sentry_relation_pairs, - api_version): - """Iterate over list of sentry and relation tuples and verify that - api_version has the expected value. - - :param sentry_relation_pairs: list of sentry, relation name tuples used - for monitoring propagation of relation - data - :param api_version: api_version to expect in relation data - :returns: None if successful. Raise on error. - """ - for (sentry, relation_name) in sentry_relation_pairs: - rel = sentry.relation('identity-service', - relation_name) - self.log.debug('keystone relation data: {}'.format(rel)) - if rel.get('api_version') != str(api_version): - raise Exception("api_version not propagated through relation" - " data yet ('{}' != '{}')." - "".format(rel.get('api_version'), api_version)) - - def keystone_configure_api_version(self, sentry_relation_pairs, deployment, - api_version): - """Configure preferred-api-version of keystone in deployment and - monitor provided list of relation objects for propagation - before returning to caller. - - :param sentry_relation_pairs: list of sentry, relation tuples used for - monitoring propagation of relation data - :param deployment: deployment to configure - :param api_version: value preferred-api-version will be set to - :returns: None if successful. Raise on error. - """ - self.log.debug("Setting keystone preferred-api-version: '{}'" - "".format(api_version)) - - config = {'preferred-api-version': api_version} - deployment.d.configure('keystone', config) - deployment._auto_wait_for_status() - self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - - def authenticate_cinder_admin(self, keystone, api_version=2): - """Authenticates admin user with cinder.""" - self.log.debug('Authenticating cinder admin...') - _clients = { - 1: cinder_client.Client, - 2: cinder_clientv2.Client} - return _clients[api_version](session=keystone.session) - - def authenticate_keystone(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Authenticate with Keystone""" - self.log.debug('Authenticating with keystone...') - if not api_version: - api_version = 2 - sess, auth = self.get_keystone_session( - keystone_ip=keystone_ip, - username=username, - password=password, - api_version=api_version, - admin_port=admin_port, - user_domain_name=user_domain_name, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name - ) - if api_version == 2: - client = keystone_client.Client(session=sess) - else: - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client - - def get_keystone_session(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Return a keystone session object""" - ep = self.get_keystone_endpoint(keystone_ip, - api_version=api_version, - admin_port=admin_port) - if api_version == 2: - auth = v2.Password( - username=username, - password=password, - tenant_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - else: - auth = v3.Password( - user_domain_name=user_domain_name, - username=username, - password=password, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - return (sess, auth) - - def get_keystone_endpoint(self, keystone_ip, api_version=None, - admin_port=False): - """Return keystone endpoint""" - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if api_version == 2: - ep = base_ep + "/v2.0" - else: - ep = base_ep + "/v3" - return ep - - def get_default_keystone_session(self, keystone_sentry, - openstack_release=None, api_version=2): - """Return a keystone session object and client object assuming standard - default settings - - Example call in amulet tests: - self.keystone_session, self.keystone = u.get_default_keystone_session( - self.keystone_sentry, - openstack_release=self._get_openstack_release()) - - The session can then be used to auth other clients: - neutronclient.Client(session=session) - aodh_client.Client(session=session) - eyc - """ - self.log.debug('Authenticating keystone admin...') - # 11 => xenial_queens - if api_version == 3 or (openstack_release and openstack_release >= 11): - client_class = keystone_client_v3.Client - api_version = 3 - else: - client_class = keystone_client.Client - keystone_ip = keystone_sentry.info['public-address'] - session, auth = self.get_keystone_session( - keystone_ip, - api_version=api_version, - username='admin', - password='openstack', - project_name='admin', - user_domain_name='admin_domain', - project_domain_name='admin_domain') - client = client_class(session=session) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(session) - return session, client - - def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant=None, api_version=None, - keystone_ip=None, user_domain_name=None, - project_domain_name=None, - project_name=None): - """Authenticates admin user with the keystone admin endpoint.""" - self.log.debug('Authenticating keystone admin...') - if not keystone_ip: - keystone_ip = keystone_sentry.info['public-address'] - - # To support backward compatibility usage of this function - if not project_name: - project_name = tenant - if api_version == 3 and not user_domain_name: - user_domain_name = 'admin_domain' - if api_version == 3 and not project_domain_name: - project_domain_name = 'admin_domain' - if api_version == 3 and not project_name: - project_name = 'admin' - - return self.authenticate_keystone( - keystone_ip, user, password, - api_version=api_version, - user_domain_name=user_domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - admin_port=True) - - def authenticate_keystone_user(self, keystone, user, password, tenant): - """Authenticates a regular user with the keystone public endpoint.""" - self.log.debug('Authenticating keystone user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - keystone_ip = urlparse.urlparse(ep).hostname - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant) - - def authenticate_glance_admin(self, keystone): - """Authenticates admin user with glance.""" - self.log.debug('Authenticating glance admin...') - ep = keystone.service_catalog.url_for(service_type='image', - interface='adminURL') - if keystone.session: - return glance_clientv2.Client("2", session=keystone.session) - else: - return glance_client.Client(ep, token=keystone.auth_token) - - def authenticate_heat_admin(self, keystone): - """Authenticates the admin user with heat.""" - self.log.debug('Authenticating heat admin...') - ep = keystone.service_catalog.url_for(service_type='orchestration', - interface='publicURL') - if keystone.session: - return heat_client.Client(endpoint=ep, session=keystone.session) - else: - return heat_client.Client(endpoint=ep, token=keystone.auth_token) - - def authenticate_nova_user(self, keystone, user, password, tenant): - """Authenticates a regular user with nova-api.""" - self.log.debug('Authenticating nova user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return nova_client.Client(NOVA_CLIENT_VERSION, - session=keystone.session, - auth_url=ep) - elif novaclient.__version__[0] >= "7": - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, password=password, - project_name=tenant, auth_url=ep) - else: - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) - - def authenticate_swift_user(self, keystone, user, password, tenant): - """Authenticates a regular user with swift api.""" - self.log.debug('Authenticating swift user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return swiftclient.Connection(session=keystone.session) - else: - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') - - def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create the specified flavor.""" - try: - nova.flavors.find(name=name) - except (exceptions.NotFound, exceptions.NoUniqueMatch): - self.log.debug('Creating flavor ({})'.format(name)) - nova.flavors.create(name, ram, vcpus, disk, flavorid, - ephemeral, swap, rxtx_factor, is_public) - - def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection - :param image_name: display name for new image - :returns: glance image pointer - """ - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) - - # Download cirros image - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - local_path = os.path.join('tests', cirros_img) - - if not os.path.exists(local_path): - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - opener.retrieve(cirros_url, local_path) - f.close() - - # Create glance image - if float(glance.version) < 2.0: - with open(local_path) as fimage: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', - data=fimage) - else: - image = glance.images.create( - name=image_name, - disk_format="qcow2", - visibility="public", - container_format="bare") - glance.images.upload(image.id, open(local_path, 'rb')) - - # Wait for image to reach active status - img_id = image.id - ret = self.resource_reaches_status(glance.images, img_id, - expected_stat='active', - msg='Image status wait') - if not ret: - msg = 'Glance image failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new image - self.log.debug('Validating image attributes...') - val_img_name = glance.images.get(img_id).name - val_img_stat = glance.images.get(img_id).status - val_img_cfmt = glance.images.get(img_id).container_format - val_img_dfmt = glance.images.get(img_id).disk_format - - if float(glance.version) < 2.0: - val_img_pub = glance.images.get(img_id).is_public - else: - val_img_pub = glance.images.get(img_id).visibility == "public" - - msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' - 'container fmt:{} disk fmt:{}'.format( - val_img_name, val_img_pub, img_id, - val_img_stat, val_img_cfmt, val_img_dfmt)) - - if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == 'bare' \ - and val_img_dfmt == 'qcow2': - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return image - - def delete_image(self, glance, image): - """Delete the specified image.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_image.') - self.log.debug('Deleting glance image ({})...'.format(image)) - return self.delete_resource(glance.images, image, msg='glance image') - - def create_instance(self, nova, image_name, instance_name, flavor): - """Create the specified instance.""" - self.log.debug('Creating instance ' - '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.glance.find_image(image_name) - flavor = nova.flavors.find(name=flavor) - instance = nova.servers.create(name=instance_name, image=image, - flavor=flavor) - - count = 1 - status = instance.status - while status != 'ACTIVE' and count < 60: - time.sleep(3) - instance = nova.servers.get(instance.id) - status = instance.status - self.log.debug('instance status: {}'.format(status)) - count += 1 - - if status != 'ACTIVE': - self.log.error('instance creation timed out') - return None - - return instance - - def delete_instance(self, nova, instance): - """Delete the specified instance.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_instance.') - self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, - msg='nova instance') - - def create_or_get_keypair(self, nova, keypair_name="testkey"): - """Create a new keypair, or return pointer if it already exists.""" - try: - _keypair = nova.keypairs.get(keypair_name) - self.log.debug('Keypair ({}) already exists, ' - 'using it.'.format(keypair_name)) - return _keypair - except Exception: - self.log.debug('Keypair ({}) does not exist, ' - 'creating it.'.format(keypair_name)) - - _keypair = nova.keypairs.create(name=keypair_name) - return _keypair - - def _get_cinder_obj_name(self, cinder_object): - """Retrieve name of cinder object. - - :param cinder_object: cinder snapshot or volume object - :returns: str cinder object name - """ - # v1 objects store name in 'display_name' attr but v2+ use 'name' - try: - return cinder_object.display_name - except AttributeError: - return cinder_object.name - - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, - img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, OR - optionally as a clone of an existing volume, OR optionally - from a snapshot. Wait for the new volume status to reach - the expected status, validate and return a resource pointer. - - :param vol_name: cinder volume display name - :param vol_size: size in gigabytes - :param img_id: optional glance image id - :param src_vol_id: optional source volume id to clone - :param snap_id: optional snapshot id to use - :returns: cinder volume pointer - """ - # Handle parameter input and avoid impossible combinations - if img_id and not src_vol_id and not snap_id: - # Create volume from image - self.log.debug('Creating cinder volume from glance image...') - bootable = 'true' - elif src_vol_id and not img_id and not snap_id: - # Clone an existing volume - self.log.debug('Cloning cinder volume...') - bootable = cinder.volumes.get(src_vol_id).bootable - elif snap_id and not src_vol_id and not img_id: - # Create volume from snapshot - self.log.debug('Creating cinder volume from snapshot...') - snap = cinder.volume_snapshots.find(id=snap_id) - vol_size = snap.size - snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id - bootable = cinder.volumes.get(snap_vol_id).bootable - elif not img_id and not src_vol_id and not snap_id: - # Create volume - self.log.debug('Creating cinder volume...') - bootable = 'false' - else: - # Impossible combination of parameters - msg = ('Invalid method use - name:{} size:{} img_id:{} ' - 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, - img_id, src_vol_id, - snap_id)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create new volume - try: - vol_new = cinder.volumes.create(display_name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except TypeError: - vol_new = cinder.volumes.create(name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except Exception as e: - msg = 'Failed to create volume: {}'.format(e) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Wait for volume to reach available status - ret = self.resource_reaches_status(cinder.volumes, vol_id, - expected_stat="available", - msg="Volume status wait") - if not ret: - msg = 'Cinder volume failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new volume - self.log.debug('Validating volume attributes...') - val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) - val_vol_boot = cinder.volumes.get(vol_id).bootable - val_vol_stat = cinder.volumes.get(vol_id).status - val_vol_size = cinder.volumes.get(vol_id).size - msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' - '{} size:{}'.format(val_vol_name, vol_id, - val_vol_stat, val_vol_boot, - val_vol_size)) - - if val_vol_boot == bootable and val_vol_stat == 'available' \ - and val_vol_name == vol_name and val_vol_size == vol_size: - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return vol_new - - def delete_resource(self, resource, resource_id, - msg="resource", max_wait=120): - """Delete one openstack resource, such as one instance, keypair, - image, volume, stack, etc., and confirm deletion within max wait time. - - :param resource: pointer to os resource type, ex:glance_client.images - :param resource_id: unique name or id for the openstack resource - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, otherwise False - """ - self.log.debug('Deleting OpenStack resource ' - '{} ({})'.format(resource_id, msg)) - num_before = len(list(resource.list())) - resource.delete(resource_id) - - tries = 0 - num_after = len(list(resource.list())) - while num_after != (num_before - 1) and tries < (max_wait / 4): - self.log.debug('{} delete check: ' - '{} [{}:{}] {}'.format(msg, tries, - num_before, - num_after, - resource_id)) - time.sleep(4) - num_after = len(list(resource.list())) - tries += 1 - - self.log.debug('{}: expected, actual count = {}, ' - '{}'.format(msg, num_before - 1, num_after)) - - if num_after == (num_before - 1): - return True - else: - self.log.error('{} delete timed out'.format(msg)) - return False - - def resource_reaches_status(self, resource, resource_id, - expected_stat='available', - msg='resource', max_wait=120): - """Wait for an openstack resources status to reach an - expected status within a specified time. Useful to confirm that - nova instances, cinder vols, snapshots, glance images, heat stacks - and other resources eventually reach the expected status. - - :param resource: pointer to os resource type, ex: heat_client.stacks - :param resource_id: unique id for the openstack resource - :param expected_stat: status to expect resource to reach - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, False if status is not reached - """ - - tries = 0 - resource_stat = resource.get(resource_id).status - while resource_stat != expected_stat and tries < (max_wait / 4): - self.log.debug('{} status check: ' - '{} [{}:{}] {}'.format(msg, tries, - resource_stat, - expected_stat, - resource_id)) - time.sleep(4) - resource_stat = resource.get(resource_id).status - tries += 1 - - self.log.debug('{}: expected, actual status = {}, ' - '{}'.format(msg, resource_stat, expected_stat)) - - if resource_stat == expected_stat: - return True - else: - self.log.debug('{} never reached expected status: ' - '{}'.format(resource_id, expected_stat)) - return False - - def get_ceph_osd_id_cmd(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return ("`initctl list | grep 'ceph-osd ' | " - "awk 'NR=={} {{ print $2 }}' | " - "grep -o '[0-9]*'`".format(index + 1)) - - def get_ceph_pools(self, sentry_unit): - """Return a dict of ceph pools from a single ceph unit, with - pool name as keys, pool id as vals.""" - pools = {} - cmd = 'sudo ceph osd lspools' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # For mimic ceph osd lspools output - output = output.replace("\n", ",") - - # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, - for pool in str(output).split(','): - pool_id_name = pool.split(' ') - if len(pool_id_name) == 2: - pool_id = pool_id_name[0] - pool_name = pool_id_name[1] - pools[pool_name] = int(pool_id) - - self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], - pools)) - return pools - - def get_ceph_df(self, sentry_unit): - """Return dict of ceph df json output, including ceph pool state. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :returns: Dict of ceph df output - """ - cmd = 'sudo ceph df --format=json' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return json.loads(output) - - def get_ceph_pool_sample(self, sentry_unit, pool_id=0): - """Take a sample of attributes of a ceph pool, returning ceph - pool name, object count and disk space used for the specified - pool ID number. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :param pool_id: Ceph pool ID - :returns: List of pool name, object count, kb disk space used - """ - df = self.get_ceph_df(sentry_unit) - for pool in df['pools']: - if pool['id'] == pool_id: - pool_name = pool['name'] - obj_count = pool['stats']['objects'] - kb_used = pool['stats']['kb_used'] - - self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, pool_id, - obj_count, kb_used)) - return pool_name, obj_count, kb_used - - def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): - """Validate ceph pool samples taken over time, such as pool - object counts or pool kb used, before adding, after adding, and - after deleting items which affect those pool attributes. The - 2nd element is expected to be greater than the 1st; 3rd is expected - to be less than the 2nd. - - :param samples: List containing 3 data samples - :param sample_type: String for logging and usage context - :returns: None if successful, Failure message otherwise - """ - original, created, deleted = range(3) - if samples[created] <= samples[original] or \ - samples[deleted] >= samples[created]: - return ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - else: - self.log.debug('Ceph {} samples (OK): ' - '{}'.format(sample_type, samples)) - return None - - # rabbitmq/amqp specific helpers: - - def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): - """Wait for rmq units extended status to show cluster readiness, - after an optional initial sleep period. Initial sleep is likely - necessary to be effective following a config change, as status - message may not instantly update to non-ready.""" - - if init_sleep: - time.sleep(init_sleep) - - message = re.compile('^Unit is ready and clustered$') - deployment._auto_wait_for_status(message=message, - timeout=timeout, - include_only=['rabbitmq-server']) - - def add_rmq_test_user(self, sentry_units, - username="testuser1", password="changeme"): - """Add a test user via the first rmq juju unit, check connection as - the new user against all sentry units. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful. Raise on error. - """ - self.log.debug('Adding rmq user ({})...'.format(username)) - - # Check that user does not already exist - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - if username in output: - self.log.warning('User ({}) already exists, returning ' - 'gracefully.'.format(username)) - return - - perms = '".*" ".*" ".*"' - cmds = ['rabbitmqctl add_user {} {}'.format(username, password), - 'rabbitmqctl set_permissions {} {}'.format(username, perms)] - - # Add user via first unit - for cmd in cmds: - output, _ = self.run_cmd_unit(sentry_units[0], cmd) - - # Check connection against the other sentry_units - self.log.debug('Checking user connect against units...') - for sentry_unit in sentry_units: - connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, - username=username, - password=password) - connection.close() - - def delete_rmq_test_user(self, sentry_units, username="testuser1"): - """Delete a rabbitmq user via the first rmq juju unit. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful or no such user. - """ - self.log.debug('Deleting rmq user ({})...'.format(username)) - - # Check that the user exists - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - - if username not in output: - self.log.warning('User ({}) does not exist, returning ' - 'gracefully.'.format(username)) - return - - # Delete the user - cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) - - def get_rmq_cluster_status(self, sentry_unit): - """Execute rabbitmq cluster status command on a unit and return - the full output. - - :param unit: sentry unit - :returns: String containing console output of cluster status command - """ - cmd = 'rabbitmqctl cluster_status' - output, _ = self.run_cmd_unit(sentry_unit, cmd) - self.log.debug('{} cluster_status:\n{}'.format( - sentry_unit.info['unit_name'], output)) - return str(output) - - def get_rmq_cluster_running_nodes(self, sentry_unit): - """Parse rabbitmqctl cluster_status output string, return list of - running rabbitmq cluster nodes. - - :param unit: sentry unit - :returns: List containing node names of running nodes - """ - # NOTE(beisner): rabbitmqctl cluster_status output is not - # json-parsable, do string chop foo, then json.loads that. - str_stat = self.get_rmq_cluster_status(sentry_unit) - if 'running_nodes' in str_stat: - pos_start = str_stat.find("{running_nodes,") + 15 - pos_end = str_stat.find("]},", pos_start) + 1 - str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') - run_nodes = json.loads(str_run_nodes) - return run_nodes - else: - return [] - - def validate_rmq_cluster_running_nodes(self, sentry_units): - """Check that all rmq unit hostnames are represented in the - cluster_status output of all units. - - :param host_names: dict of juju unit names to host names - :param units: list of sentry unit pointers (all rmq units) - :returns: None if successful, otherwise return error message - """ - host_names = self.get_unit_hostnames(sentry_units) - errors = [] - - # Query every unit for cluster_status running nodes - for query_unit in sentry_units: - query_unit_name = query_unit.info['unit_name'] - running_nodes = self.get_rmq_cluster_running_nodes(query_unit) - - # Confirm that every unit is represented in the queried unit's - # cluster_status running nodes output. - for validate_unit in sentry_units: - val_host_name = host_names[validate_unit.info['unit_name']] - val_node_name = 'rabbit@{}'.format(val_host_name) - - if val_node_name not in running_nodes: - errors.append('Cluster member check failed on {}: {} not ' - 'in {}\n'.format(query_unit_name, - val_node_name, - running_nodes)) - if errors: - return ''.join(errors) - - def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): - """Check a single juju rmq unit for ssl and port in the config file.""" - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - conf_file = '/etc/rabbitmq/rabbitmq.config' - conf_contents = str(self.file_contents_safe(sentry_unit, - conf_file, max_wait=16)) - # Checks - conf_ssl = 'ssl' in conf_contents - conf_port = str(port) in conf_contents - - # Port explicitly checked in config - if port and conf_port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif port and not conf_port and conf_ssl: - self.log.debug('SSL is enabled @{} but not on port {} ' - '({})'.format(host, port, unit_name)) - return False - # Port not checked (useful when checking that ssl is disabled) - elif not port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif not conf_ssl: - self.log.debug('SSL not enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return False - else: - msg = ('Unknown condition when checking SSL status @{}:{} ' - '({})'.format(host, port, unit_name)) - amulet.raise_status(amulet.FAIL, msg) - - def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): - """Check that ssl is enabled on rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :param port: optional ssl port override to validate - :returns: None if successful, otherwise return error message - """ - for sentry_unit in sentry_units: - if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): - return ('Unexpected condition: ssl is disabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def validate_rmq_ssl_disabled_units(self, sentry_units): - """Check that ssl is enabled on listed rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :returns: True if successful. Raise on error. - """ - for sentry_unit in sentry_units: - if self.rmq_ssl_is_enabled_on_unit(sentry_unit): - return ('Unexpected condition: ssl is enabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def configure_rmq_ssl_on(self, sentry_units, deployment, - port=None, max_wait=60): - """Turn ssl charm config option on, with optional non-default - ssl port specification. Confirm that it is enabled on every - unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param port: amqp port, use defaults if None - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: on') - - # Enable RMQ SSL - config = {'ssl': 'on'} - if port: - config['ssl_port'] = port - - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): - """Turn ssl charm config option off, confirm that it is disabled - on every unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: off') - - # Disable RMQ SSL - config = {'ssl': 'off'} - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def connect_amqp_by_unit(self, sentry_unit, ssl=False, - port=None, fatal=True, - username="testuser1", password="changeme"): - """Establish and return a pika amqp connection to the rabbitmq service - running on a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :param fatal: boolean, default to True (raises on connect error) - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: pika amqp connection pointer or None if failed and non-fatal - """ - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - # Default port logic if port is not specified - if ssl and not port: - port = 5671 - elif not ssl and not port: - port = 5672 - - self.log.debug('Connecting to amqp on {}:{} ({}) as ' - '{}...'.format(host, port, unit_name, username)) - - try: - credentials = pika.PlainCredentials(username, password) - parameters = pika.ConnectionParameters(host=host, port=port, - credentials=credentials, - ssl=ssl, - connection_attempts=3, - retry_delay=5, - socket_timeout=1) - connection = pika.BlockingConnection(parameters) - assert connection.is_open is True - assert connection.is_closing is False - self.log.debug('Connect OK') - return connection - except Exception as e: - msg = ('amqp connection failed to {}:{} as ' - '{} ({})'.format(host, port, username, str(e))) - if fatal: - amulet.raise_status(amulet.FAIL, msg) - else: - self.log.warn(msg) - return None - - def publish_amqp_message_by_unit(self, sentry_unit, message, - queue="test", ssl=False, - username="testuser1", - password="changeme", - port=None): - """Publish an amqp message to a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param message: amqp message string - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: None. Raises exception if publish failed. - """ - self.log.debug('Publishing message to {} queue:\n{}'.format(queue, - message)) - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - - # NOTE(beisner): extra debug here re: pika hang potential: - # https://github.com/pika/pika/issues/297 - # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw - self.log.debug('Defining channel...') - channel = connection.channel() - self.log.debug('Declaring queue...') - channel.queue_declare(queue=queue, auto_delete=False, durable=True) - self.log.debug('Publishing message...') - channel.basic_publish(exchange='', routing_key=queue, body=message) - self.log.debug('Closing channel...') - channel.close() - self.log.debug('Closing connection...') - connection.close() - - def get_amqp_message_by_unit(self, sentry_unit, queue="test", - username="testuser1", - password="changeme", - ssl=False, port=None): - """Get an amqp message from a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: amqp message body as string. Raise if get fails. - """ - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - channel = connection.channel() - method_frame, _, body = channel.basic_get(queue) - - if method_frame: - self.log.debug('Retreived message from {} queue:\n{}'.format(queue, - body)) - channel.basic_ack(method_frame.delivery_tag) - channel.close() - connection.close() - return body - else: - msg = 'No message retrieved.' - amulet.raise_status(amulet.FAIL, msg) - - def validate_memcache(self, sentry_unit, conf, os_release, - earliest_release=5, section='keystone_authtoken', - check_kvs=None): - """Check Memcache is running and is configured to be used - - Example call from Amulet test: - - def test_110_memcache(self): - u.validate_memcache(self.neutron_api_sentry, - '/etc/neutron/neutron.conf', - self._get_openstack_release()) - - :param sentry_unit: sentry unit - :param conf: OpenStack config file to check memcache settings - :param os_release: Current OpenStack release int code - :param earliest_release: Earliest Openstack release to check int code - :param section: OpenStack config file section to check - :param check_kvs: Dict of settings to check in config file - :returns: None - """ - if os_release < earliest_release: - self.log.debug('Skipping memcache checks for deployment. {} <' - 'mitaka'.format(os_release)) - return - _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} - self.log.debug('Checking memcached is running') - ret = self.validate_services_by_name({sentry_unit: ['memcached']}) - if ret: - amulet.raise_status(amulet.FAIL, msg='Memcache running check' - 'failed {}'.format(ret)) - else: - self.log.debug('OK') - self.log.debug('Checking memcache url is configured in {}'.format( - conf)) - if self.validate_config_data(sentry_unit, conf, section, _kvs): - message = "Memcache config error in: {}".format(conf) - amulet.raise_status(amulet.FAIL, msg=message) - else: - self.log.debug('OK') - self.log.debug('Checking memcache configuration in ' - '/etc/memcached.conf') - contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', - fatal=True) - ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if CompareHostReleases(ubuntu_release) <= 'trusty': - memcache_listen_addr = 'ip6-localhost' - else: - memcache_listen_addr = '::1' - expected = { - '-p': '11211', - '-l': memcache_listen_addr} - found = [] - for key, value in expected.items(): - for line in contents.split('\n'): - if line.startswith(key): - self.log.debug('Checking {} is set to {}'.format( - key, - value)) - assert value == line.split()[-1] - self.log.debug(line.split()[-1]) - found.append(key) - if sorted(found) == sorted(expected.keys()): - self.log.debug('OK') - else: - message = "Memcache config error in: /etc/memcached.conf" - amulet.raise_status(amulet.FAIL, msg=message) diff --git a/ceph-radosgw/tests/charmhelpers/core/__init__.py b/ceph-radosgw/tests/charmhelpers/core/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-radosgw/tests/charmhelpers/core/decorators.py b/ceph-radosgw/tests/charmhelpers/core/decorators.py deleted file mode 100644 index 6ad41ee4..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/decorators.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2014 Canonical Ltd. -# -# Authors: -# Edward Hope-Morley -# - -import time - -from charmhelpers.core.hookenv import ( - log, - INFO, -) - - -def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): - """If the decorated function raises exception exc_type, allow num_retries - retry attempts before raise the exception. - """ - def _retry_on_exception_inner_1(f): - def _retry_on_exception_inner_2(*args, **kwargs): - retries = num_retries - multiplier = 1 - while True: - try: - return f(*args, **kwargs) - except exc_type: - if not retries: - raise - - delay = base_delay * multiplier - multiplier += 1 - log("Retrying '%s' %d more times (delay=%s)" % - (f.__name__, retries, delay), level=INFO) - retries -= 1 - if delay: - time.sleep(delay) - - return _retry_on_exception_inner_2 - - return _retry_on_exception_inner_1 diff --git a/ceph-radosgw/tests/charmhelpers/core/files.py b/ceph-radosgw/tests/charmhelpers/core/files.py deleted file mode 100644 index fdd82b75..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/files.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -__author__ = 'Jorge Niedbalski ' - -import os -import subprocess - - -def sed(filename, before, after, flags='g'): - """ - Search and replaces the given pattern on filename. - - :param filename: relative or absolute file path. - :param before: expression to be replaced (see 'man sed') - :param after: expression to replace with (see 'man sed') - :param flags: sed-compatible regex flags in example, to make - the search and replace case insensitive, specify ``flags="i"``. - The ``g`` flag is always specified regardless, so you do not - need to remember to include it when overriding this parameter. - :returns: If the sed command exit code was zero then return, - otherwise raise CalledProcessError. - """ - expression = r's/{0}/{1}/{2}'.format(before, - after, flags) - - return subprocess.check_call(["sed", "-i", "-r", "-e", - expression, - os.path.expanduser(filename)]) diff --git a/ceph-radosgw/tests/charmhelpers/core/fstab.py b/ceph-radosgw/tests/charmhelpers/core/fstab.py deleted file mode 100644 index d9fa9152..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/fstab.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import os - -__author__ = 'Jorge Niedbalski R. ' - - -class Fstab(io.FileIO): - """This class extends file in order to implement a file reader/writer - for file `/etc/fstab` - """ - - class Entry(object): - """Entry class represents a non-comment line on the `/etc/fstab` file - """ - def __init__(self, device, mountpoint, filesystem, - options, d=0, p=0): - self.device = device - self.mountpoint = mountpoint - self.filesystem = filesystem - - if not options: - options = "defaults" - - self.options = options - self.d = int(d) - self.p = int(p) - - def __eq__(self, o): - return str(self) == str(o) - - def __str__(self): - return "{} {} {} {} {} {}".format(self.device, - self.mountpoint, - self.filesystem, - self.options, - self.d, - self.p) - - DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') - - def __init__(self, path=None): - if path: - self._path = path - else: - self._path = self.DEFAULT_PATH - super(Fstab, self).__init__(self._path, 'rb+') - - def _hydrate_entry(self, line): - # NOTE: use split with no arguments to split on any - # whitespace including tabs - return Fstab.Entry(*filter( - lambda x: x not in ('', None), - line.strip("\n").split())) - - @property - def entries(self): - self.seek(0) - for line in self.readlines(): - line = line.decode('us-ascii') - try: - if line.strip() and not line.strip().startswith("#"): - yield self._hydrate_entry(line) - except ValueError: - pass - - def get_entry_by_attr(self, attr, value): - for entry in self.entries: - e_attr = getattr(entry, attr) - if e_attr == value: - return entry - return None - - def add_entry(self, entry): - if self.get_entry_by_attr('device', entry.device): - return False - - self.write((str(entry) + '\n').encode('us-ascii')) - self.truncate() - return entry - - def remove_entry(self, entry): - self.seek(0) - - lines = [l.decode('us-ascii') for l in self.readlines()] - - found = False - for index, line in enumerate(lines): - if line.strip() and not line.strip().startswith("#"): - if self._hydrate_entry(line) == entry: - found = True - break - - if not found: - return False - - lines.remove(line) - - self.seek(0) - self.write(''.join(lines).encode('us-ascii')) - self.truncate() - return True - - @classmethod - def remove_by_mountpoint(cls, mountpoint, path=None): - fstab = cls(path=path) - entry = fstab.get_entry_by_attr('mountpoint', mountpoint) - if entry: - return fstab.remove_entry(entry) - return False - - @classmethod - def add(cls, device, mountpoint, filesystem, options=None, path=None): - return cls(path=path).add_entry(Fstab.Entry(device, - mountpoint, filesystem, - options=options)) diff --git a/ceph-radosgw/tests/charmhelpers/core/hookenv.py b/ceph-radosgw/tests/charmhelpers/core/hookenv.py deleted file mode 100644 index 9abf2a45..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/hookenv.py +++ /dev/null @@ -1,1354 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"Interactions with the Juju environment" -# Copyright 2013 Canonical Ltd. -# -# Authors: -# Charm Helpers Developers - -from __future__ import print_function -import copy -from distutils.version import LooseVersion -from functools import wraps -from collections import namedtuple -import glob -import os -import json -import yaml -import re -import subprocess -import sys -import errno -import tempfile -from subprocess import CalledProcessError - -import six -if not six.PY3: - from UserDict import UserDict -else: - from collections import UserDict - - -CRITICAL = "CRITICAL" -ERROR = "ERROR" -WARNING = "WARNING" -INFO = "INFO" -DEBUG = "DEBUG" -TRACE = "TRACE" -MARKER = object() -SH_MAX_ARG = 131071 - -cache = {} - - -def cached(func): - """Cache return values for multiple executions of func + args - - For example:: - - @cached - def unit_get(attribute): - pass - - unit_get('test') - - will cache the result of unit_get + 'test' for future calls. - """ - @wraps(func) - def wrapper(*args, **kwargs): - global cache - key = json.dumps((func, args, kwargs), sort_keys=True, default=str) - try: - return cache[key] - except KeyError: - pass # Drop out of the exception handler scope. - res = func(*args, **kwargs) - cache[key] = res - return res - wrapper._wrapped = func - return wrapper - - -def flush(key): - """Flushes any entries from function cache where the - key is found in the function+args """ - flush_list = [] - for item in cache: - if key in item: - flush_list.append(item) - for item in flush_list: - del cache[item] - - -def log(message, level=None): - """Write a message to the juju log""" - command = ['juju-log'] - if level: - command += ['-l', level] - if not isinstance(message, six.string_types): - message = repr(message) - command += [message[:SH_MAX_ARG]] - # Missing juju-log should not cause failures in unit tests - # Send log output to stderr - try: - subprocess.call(command) - except OSError as e: - if e.errno == errno.ENOENT: - if level: - message = "{}: {}".format(level, message) - message = "juju-log: {}".format(message) - print(message, file=sys.stderr) - else: - raise - - -class Serializable(UserDict): - """Wrapper, an object that can be serialized to yaml or json""" - - def __init__(self, obj): - # wrap the object - UserDict.__init__(self) - self.data = obj - - def __getattr__(self, attr): - # See if this object has attribute. - if attr in ("json", "yaml", "data"): - return self.__dict__[attr] - # Check for attribute in wrapped object. - got = getattr(self.data, attr, MARKER) - if got is not MARKER: - return got - # Proxy to the wrapped object via dict interface. - try: - return self.data[attr] - except KeyError: - raise AttributeError(attr) - - def __getstate__(self): - # Pickle as a standard dictionary. - return self.data - - def __setstate__(self, state): - # Unpickle into our wrapper. - self.data = state - - def json(self): - """Serialize the object to json""" - return json.dumps(self.data) - - def yaml(self): - """Serialize the object to yaml""" - return yaml.dump(self.data) - - -def execution_environment(): - """A convenient bundling of the current execution context""" - context = {} - context['conf'] = config() - if relation_id(): - context['reltype'] = relation_type() - context['relid'] = relation_id() - context['rel'] = relation_get() - context['unit'] = local_unit() - context['rels'] = relations() - context['env'] = os.environ - return context - - -def in_relation_hook(): - """Determine whether we're running in a relation hook""" - return 'JUJU_RELATION' in os.environ - - -def relation_type(): - """The scope for the current relation hook""" - return os.environ.get('JUJU_RELATION', None) - - -@cached -def relation_id(relation_name=None, service_or_unit=None): - """The relation ID for the current or a specified relation""" - if not relation_name and not service_or_unit: - return os.environ.get('JUJU_RELATION_ID', None) - elif relation_name and service_or_unit: - service_name = service_or_unit.split('/')[0] - for relid in relation_ids(relation_name): - remote_service = remote_service_name(relid) - if remote_service == service_name: - return relid - else: - raise ValueError('Must specify neither or both of relation_name and service_or_unit') - - -def local_unit(): - """Local unit ID""" - return os.environ['JUJU_UNIT_NAME'] - - -def remote_unit(): - """The remote unit for the current relation hook""" - return os.environ.get('JUJU_REMOTE_UNIT', None) - - -def application_name(): - """ - The name of the deployed application this unit belongs to. - """ - return local_unit().split('/')[0] - - -def service_name(): - """ - .. deprecated:: 0.19.1 - Alias for :func:`application_name`. - """ - return application_name() - - -def model_name(): - """ - Name of the model that this unit is deployed in. - """ - return os.environ['JUJU_MODEL_NAME'] - - -def model_uuid(): - """ - UUID of the model that this unit is deployed in. - """ - return os.environ['JUJU_MODEL_UUID'] - - -def principal_unit(): - """Returns the principal unit of this unit, otherwise None""" - # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT - principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) - # If it's empty, then this unit is the principal - if principal_unit == '': - return os.environ['JUJU_UNIT_NAME'] - elif principal_unit is not None: - return principal_unit - # For Juju 2.1 and below, let's try work out the principle unit by - # the various charms' metadata.yaml. - for reltype in relation_types(): - for rid in relation_ids(reltype): - for unit in related_units(rid): - md = _metadata_unit(unit) - if not md: - continue - subordinate = md.pop('subordinate', None) - if not subordinate: - return unit - return None - - -@cached -def remote_service_name(relid=None): - """The remote service name for a given relation-id (or the current relation)""" - if relid is None: - unit = remote_unit() - else: - units = related_units(relid) - unit = units[0] if units else None - return unit.split('/')[0] if unit else None - - -def hook_name(): - """The name of the currently executing hook""" - return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) - - -class Config(dict): - """A dictionary representation of the charm's config.yaml, with some - extra features: - - - See which values in the dictionary have changed since the previous hook. - - For values that have changed, see what the previous value was. - - Store arbitrary data for use in a later hook. - - NOTE: Do not instantiate this object directly - instead call - ``hookenv.config()``, which will return an instance of :class:`Config`. - - Example usage:: - - >>> # inside a hook - >>> from charmhelpers.core import hookenv - >>> config = hookenv.config() - >>> config['foo'] - 'bar' - >>> # store a new key/value for later use - >>> config['mykey'] = 'myval' - - - >>> # user runs `juju set mycharm foo=baz` - >>> # now we're inside subsequent config-changed hook - >>> config = hookenv.config() - >>> config['foo'] - 'baz' - >>> # test to see if this val has changed since last hook - >>> config.changed('foo') - True - >>> # what was the previous value? - >>> config.previous('foo') - 'bar' - >>> # keys/values that we add are preserved across hooks - >>> config['mykey'] - 'myval' - - """ - CONFIG_FILE_NAME = '.juju-persistent-config' - - def __init__(self, *args, **kw): - super(Config, self).__init__(*args, **kw) - self.implicit_save = True - self._prev_dict = None - self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path) and os.stat(self.path).st_size: - self.load_previous() - atexit(self._implicit_save) - - def load_previous(self, path=None): - """Load previous copy of config from disk. - - In normal usage you don't need to call this method directly - it - is called automatically at object initialization. - - :param path: - - File path from which to load the previous config. If `None`, - config is loaded from the default location. If `path` is - specified, subsequent `save()` calls will write to the same - path. - - """ - self.path = path or self.path - with open(self.path) as f: - try: - self._prev_dict = json.load(f) - except ValueError as e: - log('Unable to parse previous config data - {}'.format(str(e)), - level=ERROR) - for k, v in copy.deepcopy(self._prev_dict).items(): - if k not in self: - self[k] = v - - def changed(self, key): - """Return True if the current value for this key is different from - the previous value. - - """ - if self._prev_dict is None: - return True - return self.previous(key) != self.get(key) - - def previous(self, key): - """Return previous value for this key, or None if there - is no previous value. - - """ - if self._prev_dict: - return self._prev_dict.get(key) - return None - - def save(self): - """Save this config to disk. - - If the charm is using the :mod:`Services Framework ` - or :meth:'@hook ' decorator, this - is called automatically at the end of successful hook execution. - Otherwise, it should be called directly by user code. - - To disable automatic saves, set ``implicit_save=False`` on this - instance. - - """ - with open(self.path, 'w') as f: - os.fchmod(f.fileno(), 0o600) - json.dump(self, f) - - def _implicit_save(self): - if self.implicit_save: - self.save() - - -_cache_config = None - - -def config(scope=None): - """ - Get the juju charm configuration (scope==None) or individual key, - (scope=str). The returned value is a Python data structure loaded as - JSON from the Juju config command. - - :param scope: If set, return the value for the specified key. - :type scope: Optional[str] - :returns: Either the whole config as a Config, or a key from it. - :rtype: Any - """ - global _cache_config - config_cmd_line = ['config-get', '--all', '--format=json'] - try: - # JSON Decode Exception for Python3.5+ - exc_json = json.decoder.JSONDecodeError - except AttributeError: - # JSON Decode Exception for Python2.7 through Python3.4 - exc_json = ValueError - try: - if _cache_config is None: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) - _cache_config = Config(config_data) - if scope is not None: - return _cache_config.get(scope) - return _cache_config - except (exc_json, UnicodeDecodeError) as e: - log('Unable to parse output from config-get: config_cmd_line="{}" ' - 'message="{}"' - .format(config_cmd_line, str(e)), level=ERROR) - return None - - -@cached -def relation_get(attribute=None, unit=None, rid=None): - """Get relation information""" - _args = ['relation-get', '--format=json'] - if rid: - _args.append('-r') - _args.append(rid) - _args.append(attribute or '-') - if unit: - _args.append(unit) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except CalledProcessError as e: - if e.returncode == 2: - return None - raise - - -def relation_set(relation_id=None, relation_settings=None, **kwargs): - """Set relation information for the current unit""" - relation_settings = relation_settings if relation_settings else {} - relation_cmd_line = ['relation-set'] - accepts_file = "--file" in subprocess.check_output( - relation_cmd_line + ["--help"], universal_newlines=True) - if relation_id is not None: - relation_cmd_line.extend(('-r', relation_id)) - settings = relation_settings.copy() - settings.update(kwargs) - for key, value in settings.items(): - # Force value to be a string: it always should, but some call - # sites pass in things like dicts or numbers. - if value is not None: - settings[key] = "{}".format(value) - if accepts_file: - # --file was introduced in Juju 1.23.2. Use it by default if - # available, since otherwise we'll break if the relation data is - # too big. Ideally we should tell relation-set to read the data from - # stdin, but that feature is broken in 1.23.2: Bug #1454678. - with tempfile.NamedTemporaryFile(delete=False) as settings_file: - settings_file.write(yaml.safe_dump(settings).encode("utf-8")) - subprocess.check_call( - relation_cmd_line + ["--file", settings_file.name]) - os.remove(settings_file.name) - else: - for key, value in settings.items(): - if value is None: - relation_cmd_line.append('{}='.format(key)) - else: - relation_cmd_line.append('{}={}'.format(key, value)) - subprocess.check_call(relation_cmd_line) - # Flush cache of any relation-gets for local unit - flush(local_unit()) - - -def relation_clear(r_id=None): - ''' Clears any relation data already set on relation r_id ''' - settings = relation_get(rid=r_id, - unit=local_unit()) - for setting in settings: - if setting not in ['public-address', 'private-address']: - settings[setting] = None - relation_set(relation_id=r_id, - **settings) - - -@cached -def relation_ids(reltype=None): - """A list of relation_ids""" - reltype = reltype or relation_type() - relid_cmd_line = ['relation-ids', '--format=json'] - if reltype is not None: - relid_cmd_line.append(reltype) - return json.loads( - subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] - return [] - - -@cached -def related_units(relid=None): - """A list of related units""" - relid = relid or relation_id() - units_cmd_line = ['relation-list', '--format=json'] - if relid is not None: - units_cmd_line.extend(('-r', relid)) - return json.loads( - subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] - - -@cached -def relation_for_unit(unit=None, rid=None): - """Get the json represenation of a unit's relation""" - unit = unit or remote_unit() - relation = relation_get(unit=unit, rid=rid) - for key in relation: - if key.endswith('-list'): - relation[key] = relation[key].split() - relation['__unit__'] = unit - return relation - - -@cached -def relations_for_id(relid=None): - """Get relations of a specific relation ID""" - relation_data = [] - relid = relid or relation_ids() - for unit in related_units(relid): - unit_data = relation_for_unit(unit, relid) - unit_data['__relid__'] = relid - relation_data.append(unit_data) - return relation_data - - -@cached -def relations_of_type(reltype=None): - """Get relations of a specific type""" - relation_data = [] - reltype = reltype or relation_type() - for relid in relation_ids(reltype): - for relation in relations_for_id(relid): - relation['__relid__'] = relid - relation_data.append(relation) - return relation_data - - -@cached -def metadata(): - """Get the current charm metadata.yaml contents as a python object""" - with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: - return yaml.safe_load(md) - - -def _metadata_unit(unit): - """Given the name of a unit (e.g. apache2/0), get the unit charm's - metadata.yaml. Very similar to metadata() but allows us to inspect - other units. Unit needs to be co-located, such as a subordinate or - principal/primary. - - :returns: metadata.yaml as a python object. - - """ - basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) - unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') - if not os.path.exists(joineddir): - return None - with open(joineddir) as md: - return yaml.safe_load(md) - - -@cached -def relation_types(): - """Get a list of relation types supported by this charm""" - rel_types = [] - md = metadata() - for key in ('provides', 'requires', 'peers'): - section = md.get(key) - if section: - rel_types.extend(section.keys()) - return rel_types - - -@cached -def peer_relation_id(): - '''Get the peers relation id if a peers relation has been joined, else None.''' - md = metadata() - section = md.get('peers') - if section: - for key in section: - relids = relation_ids(key) - if relids: - return relids[0] - return None - - -@cached -def relation_to_interface(relation_name): - """ - Given the name of a relation, return the interface that relation uses. - - :returns: The interface name, or ``None``. - """ - return relation_to_role_and_interface(relation_name)[1] - - -@cached -def relation_to_role_and_interface(relation_name): - """ - Given the name of a relation, return the role and the name of the interface - that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). - - :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. - """ - _metadata = metadata() - for role in ('provides', 'requires', 'peers'): - interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') - if interface: - return role, interface - return None, None - - -@cached -def role_and_interface_to_relations(role, interface_name): - """ - Given a role and interface name, return a list of relation names for the - current charm that use that interface under that role (where role is one - of ``provides``, ``requires``, or ``peers``). - - :returns: A list of relation names. - """ - _metadata = metadata() - results = [] - for relation_name, relation in _metadata.get(role, {}).items(): - if relation['interface'] == interface_name: - results.append(relation_name) - return results - - -@cached -def interface_to_relations(interface_name): - """ - Given an interface, return a list of relation names for the current - charm that use that interface. - - :returns: A list of relation names. - """ - results = [] - for role in ('provides', 'requires', 'peers'): - results.extend(role_and_interface_to_relations(role, interface_name)) - return results - - -@cached -def charm_name(): - """Get the name of the current charm as is specified on metadata.yaml""" - return metadata().get('name') - - -@cached -def relations(): - """Get a nested dictionary of relation data for all related units""" - rels = {} - for reltype in relation_types(): - relids = {} - for relid in relation_ids(reltype): - units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} - for unit in related_units(relid): - reldata = relation_get(unit=unit, rid=relid) - units[unit] = reldata - relids[relid] = units - rels[reltype] = relids - return rels - - -@cached -def is_relation_made(relation, keys='private-address'): - ''' - Determine whether a relation is established by checking for - presence of key(s). If a list of keys is provided, they - must all be present for the relation to be identified as made - ''' - if isinstance(keys, str): - keys = [keys] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - context = {} - for k in keys: - context[k] = relation_get(k, rid=r_id, - unit=unit) - if None not in context.values(): - return True - return False - - -def _port_op(op_name, port, protocol="TCP"): - """Open or close a service network port""" - _args = [op_name] - icmp = protocol.upper() == "ICMP" - if icmp: - _args.append(protocol) - else: - _args.append('{}/{}'.format(port, protocol)) - try: - subprocess.check_call(_args) - except subprocess.CalledProcessError: - # Older Juju pre 2.3 doesn't support ICMP - # so treat it as a no-op if it fails. - if not icmp: - raise - - -def open_port(port, protocol="TCP"): - """Open a service network port""" - _port_op('open-port', port, protocol) - - -def close_port(port, protocol="TCP"): - """Close a service network port""" - _port_op('close-port', port, protocol) - - -def open_ports(start, end, protocol="TCP"): - """Opens a range of service network ports""" - _args = ['open-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def close_ports(start, end, protocol="TCP"): - """Close a range of service network ports""" - _args = ['close-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def opened_ports(): - """Get the opened ports - - *Note that this will only show ports opened in a previous hook* - - :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` - """ - _args = ['opened-ports', '--format=json'] - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - - -@cached -def unit_get(attribute): - """Get the unit ID for the remote unit""" - _args = ['unit-get', '--format=json', attribute] - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -def unit_public_ip(): - """Get this unit's public IP address""" - return unit_get('public-address') - - -def unit_private_ip(): - """Get this unit's private IP address""" - return unit_get('private-address') - - -@cached -def storage_get(attribute=None, storage_id=None): - """Get storage attributes""" - _args = ['storage-get', '--format=json'] - if storage_id: - _args.extend(('-s', storage_id)) - if attribute: - _args.append(attribute) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -@cached -def storage_list(storage_name=None): - """List the storage IDs for the unit""" - _args = ['storage-list', '--format=json'] - if storage_name: - _args.append(storage_name) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except OSError as e: - import errno - if e.errno == errno.ENOENT: - # storage-list does not exist - return [] - raise - - -class UnregisteredHookError(Exception): - """Raised when an undefined hook is called""" - pass - - -class Hooks(object): - """A convenient handler for hook functions. - - Example:: - - hooks = Hooks() - - # register a hook, taking its name from the function name - @hooks.hook() - def install(): - pass # your code here - - # register a hook, providing a custom hook name - @hooks.hook("config-changed") - def config_changed(): - pass # your code here - - if __name__ == "__main__": - # execute a hook based on the name the program is called by - hooks.execute(sys.argv) - """ - - def __init__(self, config_save=None): - super(Hooks, self).__init__() - self._hooks = {} - - # For unknown reasons, we allow the Hooks constructor to override - # config().implicit_save. - if config_save is not None: - config().implicit_save = config_save - - def register(self, name, function): - """Register a hook""" - self._hooks[name] = function - - def execute(self, args): - """Execute a registered hook based on args[0]""" - _run_atstart() - hook_name = os.path.basename(args[0]) - if hook_name in self._hooks: - try: - self._hooks[hook_name]() - except SystemExit as x: - if x.code is None or x.code == 0: - _run_atexit() - raise - _run_atexit() - else: - raise UnregisteredHookError(hook_name) - - def hook(self, *hook_names): - """Decorator, registering them as hooks""" - def wrapper(decorated): - for hook_name in hook_names: - self.register(hook_name, decorated) - else: - self.register(decorated.__name__, decorated) - if '_' in decorated.__name__: - self.register( - decorated.__name__.replace('_', '-'), decorated) - return decorated - return wrapper - - -class NoNetworkBinding(Exception): - pass - - -def charm_dir(): - """Return the root directory of the current charm""" - d = os.environ.get('JUJU_CHARM_DIR') - if d is not None: - return d - return os.environ.get('CHARM_DIR') - - -@cached -def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" - cmd = ['action-get'] - if key is not None: - cmd.append(key) - cmd.append('--format=json') - action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) - return action_data - - -def action_set(values): - """Sets the values to be returned after the action finishes""" - cmd = ['action-set'] - for k, v in list(values.items()): - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -def action_fail(message): - """Sets the action status to failed and sets the error message. - - The results set by action_set are preserved.""" - subprocess.check_call(['action-fail', message]) - - -def action_name(): - """Get the name of the currently executing action.""" - return os.environ.get('JUJU_ACTION_NAME') - - -def action_uuid(): - """Get the UUID of the currently executing action.""" - return os.environ.get('JUJU_ACTION_UUID') - - -def action_tag(): - """Get the tag for the currently executing action.""" - return os.environ.get('JUJU_ACTION_TAG') - - -def status_set(workload_state, message): - """Set the workload state with a message - - Use status-set to set the workload state with a message which is visible - to the user via juju status. If the status-set command is not found then - assume this is juju < 1.23 and juju-log the message unstead. - - workload_state -- valid juju workload state. - message -- status update message - """ - valid_states = ['maintenance', 'blocked', 'waiting', 'active'] - if workload_state not in valid_states: - raise ValueError( - '{!r} is not a valid workload state'.format(workload_state) - ) - cmd = ['status-set', workload_state, message] - try: - ret = subprocess.call(cmd) - if ret == 0: - return - except OSError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'status-set failed: {} {}'.format(workload_state, - message) - log(log_message, level='INFO') - - -def status_get(): - """Retrieve the previously set juju workload state and message - - If the status-get command is not found then assume this is juju < 1.23 and - return 'unknown', "" - - """ - cmd = ['status-get', "--format=json", "--include-data"] - try: - raw_status = subprocess.check_output(cmd) - except OSError as e: - if e.errno == errno.ENOENT: - return ('unknown', "") - else: - raise - else: - status = json.loads(raw_status.decode("UTF-8")) - return (status["status"], status["message"]) - - -def translate_exc(from_exc, to_exc): - def inner_translate_exc1(f): - @wraps(f) - def inner_translate_exc2(*args, **kwargs): - try: - return f(*args, **kwargs) - except from_exc: - raise to_exc - - return inner_translate_exc2 - - return inner_translate_exc1 - - -def application_version_set(version): - """Charm authors may trigger this command from any hook to output what - version of the application is running. This could be a package version, - for instance postgres version 9.5. It could also be a build number or - version control revision identifier, for instance git sha 6fb7ba68. """ - - cmd = ['application-version-set'] - cmd.append(version) - try: - subprocess.check_call(cmd) - except OSError: - log("Application Version: {}".format(version)) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def goal_state(): - """Juju goal state values""" - cmd = ['goal-state', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def is_leader(): - """Does the current unit hold the juju leadership - - Uses juju to determine whether the current unit is the leader of its peers - """ - cmd = ['is-leader', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_get(attribute=None): - """Juju leader get value(s)""" - cmd = ['leader-get', '--format=json'] + [attribute or '-'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_set(settings=None, **kwargs): - """Juju leader set value(s)""" - # Don't log secrets. - # log("Juju leader-set '%s'" % (settings), level=DEBUG) - cmd = ['leader-set'] - settings = settings or {} - settings.update(kwargs) - for k, v in settings.items(): - if v is None: - cmd.append('{}='.format(k)) - else: - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_register(ptype, klass, pid): - """ is used while a hook is running to let Juju know that a - payload has been started.""" - cmd = ['payload-register'] - for x in [ptype, klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_unregister(klass, pid): - """ is used while a hook is running to let Juju know - that a payload has been manually stopped. The and provided - must match a payload that has been previously registered with juju using - payload-register.""" - cmd = ['payload-unregister'] - for x in [klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_status_set(klass, pid, status): - """is used to update the current status of a registered payload. - The and provided must match a payload that has been previously - registered with juju using payload-register. The must be one of the - follow: starting, started, stopping, stopped""" - cmd = ['payload-status-set'] - for x in [klass, pid, status]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def resource_get(name): - """used to fetch the resource path of the given name. - - must match a name of defined resource in metadata.yaml - - returns either a path or False if resource not available - """ - if not name: - return False - - cmd = ['resource-get', name] - try: - return subprocess.check_output(cmd).decode('UTF-8') - except subprocess.CalledProcessError: - return False - - -@cached -def juju_version(): - """Full version string (eg. '1.23.3.1-trusty-amd64')""" - # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 - jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] - return subprocess.check_output([jujud, 'version'], - universal_newlines=True).strip() - - -def has_juju_version(minimum_version): - """Return True if the Juju version is at least the provided version""" - return LooseVersion(juju_version()) >= LooseVersion(minimum_version) - - -_atexit = [] -_atstart = [] - - -def atstart(callback, *args, **kwargs): - '''Schedule a callback to run before the main hook. - - Callbacks are run in the order they were added. - - This is useful for modules and classes to perform initialization - and inject behavior. In particular: - - - Run common code before all of your hooks, such as logging - the hook name or interesting relation data. - - Defer object or module initialization that requires a hook - context until we know there actually is a hook context, - making testing easier. - - Rather than requiring charm authors to include boilerplate to - invoke your helper's behavior, have it run automatically if - your object is instantiated or module imported. - - This is not at all useful after your hook framework as been launched. - ''' - global _atstart - _atstart.append((callback, args, kwargs)) - - -def atexit(callback, *args, **kwargs): - '''Schedule a callback to run on successful hook completion. - - Callbacks are run in the reverse order that they were added.''' - _atexit.append((callback, args, kwargs)) - - -def _run_atstart(): - '''Hook frameworks must invoke this before running the main hook body.''' - global _atstart - for callback, args, kwargs in _atstart: - callback(*args, **kwargs) - del _atstart[:] - - -def _run_atexit(): - '''Hook frameworks must invoke this after the main hook body has - successfully completed. Do not invoke it if the hook fails.''' - global _atexit - for callback, args, kwargs in reversed(_atexit): - callback(*args, **kwargs) - del _atexit[:] - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def network_get_primary_address(binding): - ''' - Deprecated since Juju 2.3; use network_get() - - Retrieve the primary network address for a named binding - - :param binding: string. The name of a relation of extra-binding - :return: string. The primary IP address for the named binding - :raise: NotImplementedError if run on Juju < 2.0 - ''' - cmd = ['network-get', '--primary-address', binding] - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - if 'no network config found for binding' in e.output.decode('UTF-8'): - raise NoNetworkBinding("No network binding for {}" - .format(binding)) - else: - raise - return response - - -def network_get(endpoint, relation_id=None): - """ - Retrieve the network details for a relation endpoint - - :param endpoint: string. The name of a relation endpoint - :param relation_id: int. The ID of the relation for the current context. - :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if request not supported by the Juju version. - """ - if not has_juju_version('2.2'): - raise NotImplementedError(juju_version()) # earlier versions require --primary-address - if relation_id and not has_juju_version('2.3'): - raise NotImplementedError # 2.3 added the -r option - - cmd = ['network-get', endpoint, '--format', 'yaml'] - if relation_id: - cmd.append('-r') - cmd.append(relation_id) - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - return yaml.safe_load(response) - - -def add_metric(*args, **kwargs): - """Add metric values. Values may be expressed with keyword arguments. For - metric names containing dashes, these may be expressed as one or more - 'key=value' positional arguments. May only be called from the collect-metrics - hook.""" - _args = ['add-metric'] - _kvpairs = [] - _kvpairs.extend(args) - _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) - _args.extend(sorted(_kvpairs)) - try: - subprocess.check_call(_args) - return - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) - log(log_message, level='INFO') - - -def meter_status(): - """Get the meter status, if running in the meter-status-changed hook.""" - return os.environ.get('JUJU_METER_STATUS') - - -def meter_info(): - """Get the meter status information, if running in the meter-status-changed - hook.""" - return os.environ.get('JUJU_METER_INFO') - - -def iter_units_for_relation_name(relation_name): - """Iterate through all units in a relation - - Generator that iterates through all the units in a relation and yields - a named tuple with rid and unit field names. - - Usage: - data = [(u.rid, u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param relation_name: string relation name - :yield: Named Tuple with rid and unit field names - """ - RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') - for rid in relation_ids(relation_name): - for unit in related_units(rid): - yield RelatedUnit(rid, unit) - - -def ingress_address(rid=None, unit=None): - """ - Retrieve the ingress-address from a relation when available. - Otherwise, return the private-address. - - When used on the consuming side of the relation (unit is a remote - unit), the ingress-address is the IP address that this unit needs - to use to reach the provided service on the remote unit. - - When used on the providing side of the relation (unit == local_unit()), - the ingress-address is the IP address that is advertised to remote - units on this relation. Remote units need to use this address to - reach the local provided service on this unit. - - Note that charms may document some other method to use in - preference to the ingress_address(), such as an address provided - on a different relation attribute or a service discovery mechanism. - This allows charms to redirect inbound connections to their peers - or different applications such as load balancers. - - Usage: - addresses = [ingress_address(rid=u.rid, unit=u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param rid: string relation id - :param unit: string unit name - :side effect: calls relation_get - :return: string IP address - """ - settings = relation_get(rid=rid, unit=unit) - return (settings.get('ingress-address') or - settings.get('private-address')) - - -def egress_subnets(rid=None, unit=None): - """ - Retrieve the egress-subnets from a relation. - - This function is to be used on the providing side of the - relation, and provides the ranges of addresses that client - connections may come from. The result is uninteresting on - the consuming side of a relation (unit == local_unit()). - - Returns a stable list of subnets in CIDR format. - eg. ['192.168.1.0/24', '2001::F00F/128'] - - If egress-subnets is not available, falls back to using the published - ingress-address, or finally private-address. - - :param rid: string relation id - :param unit: string unit name - :side effect: calls relation_get - :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] - """ - def _to_range(addr): - if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: - addr += '/32' - elif ':' in addr and '/' not in addr: # IPv6 - addr += '/128' - return addr - - settings = relation_get(rid=rid, unit=unit) - if 'egress-subnets' in settings: - return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] - if 'ingress-address' in settings: - return [_to_range(settings['ingress-address'])] - if 'private-address' in settings: - return [_to_range(settings['private-address'])] - return [] # Should never happen - - -def unit_doomed(unit=None): - """Determines if the unit is being removed from the model - - Requires Juju 2.4.1. - - :param unit: string unit name, defaults to local_unit - :side effect: calls goal_state - :side effect: calls local_unit - :side effect: calls has_juju_version - :return: True if the unit is being removed, already gone, or never existed - """ - if not has_juju_version("2.4.1"): - # We cannot risk blindly returning False for 'we don't know', - # because that could cause data loss; if call sites don't - # need an accurate answer, they likely don't need this helper - # at all. - # goal-state existed in 2.4.0, but did not handle removals - # correctly until 2.4.1. - raise NotImplementedError("is_doomed") - if unit is None: - unit = local_unit() - gs = goal_state() - units = gs.get('units', {}) - if unit not in units: - return True - # I don't think 'dead' units ever show up in the goal-state, but - # check anyway in addition to 'dying'. - return units[unit]['status'] in ('dying', 'dead') diff --git a/ceph-radosgw/tests/charmhelpers/core/host.py b/ceph-radosgw/tests/charmhelpers/core/host.py deleted file mode 100644 index e9fd38a0..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/host.py +++ /dev/null @@ -1,1042 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tools for working with the host system""" -# Copyright 2012 Canonical Ltd. -# -# Authors: -# Nick Moffitt -# Matthew Wedgwood - -import os -import re -import pwd -import glob -import grp -import random -import string -import subprocess -import hashlib -import functools -import itertools -import six - -from contextlib import contextmanager -from collections import OrderedDict -from .hookenv import log, DEBUG, local_unit -from .fstab import Fstab -from charmhelpers.osplatform import get_platform - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.host_factory.ubuntu import ( - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.host_factory.centos import ( - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - ) # flake8: noqa -- ignore F401 for this import - -UPDATEDB_PATH = '/etc/updatedb.conf' - -def service_start(service_name, **kwargs): - """Start a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('start', service_name, **kwargs) - - -def service_stop(service_name, **kwargs): - """Stop a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('stop', service_name, **kwargs) - - -def service_restart(service_name, **kwargs): - """Restart a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be restarted. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_restart('ceph-osd', id=4) - - :param service_name: the name of the service to restart - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - return service('restart', service_name) - - -def service_reload(service_name, restart_on_failure=False, **kwargs): - """Reload a system service, optionally falling back to restart if - reload fails. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_reload('ceph-osd', id=4) - - :param service_name: the name of the service to reload - :param restart_on_failure: boolean indicating whether to fallback to a - restart if the reload fails. - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - service_result = service('reload', service_name, **kwargs) - if not service_result and restart_on_failure: - service_result = service('restart', service_name, **kwargs) - return service_result - - -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", - **kwargs): - """Pause a system service. - - Stop it, and prevent it from starting again at boot. - - :param service_name: the name of the service to pause - :param init_dir: path to the upstart init directory - :param initd_dir: path to the sysv init directory - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems which do not support - key=value arguments via the commandline. - """ - stopped = True - if service_running(service_name, **kwargs): - stopped = service_stop(service_name, **kwargs) - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): - service('disable', service_name) - service('mask', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "disable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - return stopped - - -def service_resume(service_name, init_dir="/etc/init", - initd_dir="/etc/init.d", **kwargs): - """Resume a system service. - - Reenable starting again at boot. Start the service. - - :param service_name: the name of the service to resume - :param init_dir: the path to the init dir - :param initd dir: the path to the initd dir - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): - service('unmask', service_name) - service('enable', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "enable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - started = service_running(service_name, **kwargs) - - if not started: - started = service_start(service_name, **kwargs) - return started - - -def service(action, service_name, **kwargs): - """Control a system service. - - :param action: the action to take on the service - :param service_name: the name of the service to perform th action on - :param **kwargs: additional params to be passed to the service command in - the form of key=value. - """ - if init_is_systemd(): - cmd = ['systemctl', action, service_name] - else: - cmd = ['service', service_name, action] - for key, value in six.iteritems(kwargs): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - return subprocess.call(cmd) == 0 - - -_UPSTART_CONF = "/etc/init/{}.conf" -_INIT_D_CONF = "/etc/init.d/{}" - - -def service_running(service_name, **kwargs): - """Determine whether a system service is running. - - :param service_name: the name of the service - :param **kwargs: additional args to pass to the service command. This is - used to pass additional key=value arguments to the - service command line for managing specific instance - units (e.g. service ceph-osd status id=2). The kwargs - are ignored in systemd services. - """ - if init_is_systemd(): - return service('is-active', service_name) - else: - if os.path.exists(_UPSTART_CONF.format(service_name)): - try: - cmd = ['status', service_name] - for key, value in six.iteritems(kwargs): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False - else: - # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running - # 'start/running' - if ("start/running" in output or - "is running" in output or - "up and running" in output): - return True - elif os.path.exists(_INIT_D_CONF.format(service_name)): - # Check System V scripts init script return codes - return service('status', service_name) - return False - - -SYSTEMD_SYSTEM = '/run/systemd/system' - - -def init_is_systemd(): - """Return True if the host system uses systemd, False otherwise.""" - if lsb_release()['DISTRIB_CODENAME'] == 'trusty': - return False - return os.path.isdir(SYSTEMD_SYSTEM) - - -def adduser(username, password=None, shell='/bin/bash', - system_user=False, primary_group=None, - secondary_groups=None, uid=None, home_dir=None): - """Add a user to the system. - - Will log but otherwise succeed if the user already exists. - - :param str username: Username to create - :param str password: Password for user; if ``None``, create a system user - :param str shell: The default shell for the user - :param bool system_user: Whether to create a login or system user - :param str primary_group: Primary group for user; defaults to username - :param list secondary_groups: Optional list of additional groups - :param int uid: UID for user being created - :param str home_dir: Home directory for user - - :returns: The password database entry struct, as returned by `pwd.getpwnam` - """ - try: - user_info = pwd.getpwnam(username) - log('user {0} already exists!'.format(username)) - if uid: - user_info = pwd.getpwuid(int(uid)) - log('user with uid {0} already exists!'.format(uid)) - except KeyError: - log('creating user {0}'.format(username)) - cmd = ['useradd'] - if uid: - cmd.extend(['--uid', str(uid)]) - if home_dir: - cmd.extend(['--home', str(home_dir)]) - if system_user or password is None: - cmd.append('--system') - else: - cmd.extend([ - '--create-home', - '--shell', shell, - '--password', password, - ]) - if not primary_group: - try: - grp.getgrnam(username) - primary_group = username # avoid "group exists" error - except KeyError: - pass - if primary_group: - cmd.extend(['-g', primary_group]) - if secondary_groups: - cmd.extend(['-G', ','.join(secondary_groups)]) - cmd.append(username) - subprocess.check_call(cmd) - user_info = pwd.getpwnam(username) - return user_info - - -def user_exists(username): - """Check if a user exists""" - try: - pwd.getpwnam(username) - user_exists = True - except KeyError: - user_exists = False - return user_exists - - -def uid_exists(uid): - """Check if a uid exists""" - try: - pwd.getpwuid(uid) - uid_exists = True - except KeyError: - uid_exists = False - return uid_exists - - -def group_exists(groupname): - """Check if a group exists""" - try: - grp.getgrnam(groupname) - group_exists = True - except KeyError: - group_exists = False - return group_exists - - -def gid_exists(gid): - """Check if a gid exists""" - try: - grp.getgrgid(gid) - gid_exists = True - except KeyError: - gid_exists = False - return gid_exists - - -def add_group(group_name, system_group=False, gid=None): - """Add a group to the system - - Will log but otherwise succeed if the group already exists. - - :param str group_name: group to create - :param bool system_group: Create system group - :param int gid: GID for user being created - - :returns: The password database entry struct, as returned by `grp.getgrnam` - """ - try: - group_info = grp.getgrnam(group_name) - log('group {0} already exists!'.format(group_name)) - if gid: - group_info = grp.getgrgid(gid) - log('group with gid {0} already exists!'.format(gid)) - except KeyError: - log('creating group {0}'.format(group_name)) - add_new_group(group_name, system_group, gid) - group_info = grp.getgrnam(group_name) - return group_info - - -def add_user_to_group(username, group): - """Add a user to a group""" - cmd = ['gpasswd', '-a', username, group] - log("Adding user {} to group {}".format(username, group)) - subprocess.check_call(cmd) - - -def chage(username, lastday=None, expiredate=None, inactive=None, - mindays=None, maxdays=None, root=None, warndays=None): - """Change user password expiry information - - :param str username: User to update - :param str lastday: Set when password was changed in YYYY-MM-DD format - :param str expiredate: Set when user's account will no longer be - accessible in YYYY-MM-DD format. - -1 will remove an account expiration date. - :param str inactive: Set the number of days of inactivity after a password - has expired before the account is locked. - -1 will remove an account's inactivity. - :param str mindays: Set the minimum number of days between password - changes to MIN_DAYS. - 0 indicates the password can be changed anytime. - :param str maxdays: Set the maximum number of days during which a - password is valid. - -1 as MAX_DAYS will remove checking maxdays - :param str root: Apply changes in the CHROOT_DIR directory - :param str warndays: Set the number of days of warning before a password - change is required - :raises subprocess.CalledProcessError: if call to chage fails - """ - cmd = ['chage'] - if root: - cmd.extend(['--root', root]) - if lastday: - cmd.extend(['--lastday', lastday]) - if expiredate: - cmd.extend(['--expiredate', expiredate]) - if inactive: - cmd.extend(['--inactive', inactive]) - if mindays: - cmd.extend(['--mindays', mindays]) - if maxdays: - cmd.extend(['--maxdays', maxdays]) - if warndays: - cmd.extend(['--warndays', warndays]) - cmd.append(username) - subprocess.check_call(cmd) - -remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') - -def rsync(from_path, to_path, flags='-r', options=None, timeout=None): - """Replicate the contents of a path""" - options = options or ['--delete', '--executability'] - cmd = ['/usr/bin/rsync', flags] - if timeout: - cmd = ['timeout', str(timeout)] + cmd - cmd.extend(options) - cmd.append(from_path) - cmd.append(to_path) - log(" ".join(cmd)) - return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() - - -def symlink(source, destination): - """Create a symbolic link""" - log("Symlinking {} as {}".format(source, destination)) - cmd = [ - 'ln', - '-sf', - source, - destination, - ] - subprocess.check_call(cmd) - - -def mkdir(path, owner='root', group='root', perms=0o555, force=False): - """Create a directory""" - log("Making dir {} {}:{} {:o}".format(path, owner, group, - perms)) - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - realpath = os.path.abspath(path) - path_exists = os.path.exists(realpath) - if path_exists and force: - if not os.path.isdir(realpath): - log("Removing non-directory file {} prior to mkdir()".format(path)) - os.unlink(realpath) - os.makedirs(realpath, perms) - elif not path_exists: - os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) - os.chmod(realpath, perms) - - -def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a byte string.""" - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - # lets see if we can grab the file and compare the context, to avoid doing - # a write. - existing_content = None - existing_uid, existing_gid = None, None - try: - with open(path, 'rb') as target: - existing_content = target.read() - stat = os.stat(path) - existing_uid, existing_gid = stat.st_uid, stat.st_gid - except: - pass - if content != existing_content: - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), - level=DEBUG) - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - if six.PY3 and isinstance(content, six.string_types): - content = content.encode('UTF-8') - target.write(content) - return - # the contents were the same, but we might still need to change the - # ownership. - if existing_uid != uid: - log("Changing uid on already existing content: {} -> {}" - .format(existing_uid, uid), level=DEBUG) - os.chown(path, uid, -1) - if existing_gid != gid: - log("Changing gid on already existing content: {} -> {}" - .format(existing_gid, gid), level=DEBUG) - os.chown(path, -1, gid) - - -def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab""" - return Fstab.remove_by_mountpoint(mp) - - -def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file""" - return Fstab.add(dev, mp, fs, options=options) - - -def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): - """Mount a filesystem at a particular mountpoint""" - cmd_args = ['mount'] - if options is not None: - cmd_args.extend(['-o', options]) - cmd_args.extend([device, mountpoint]) - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) - return False - - if persist: - return fstab_add(device, mountpoint, filesystem, options=options) - return True - - -def umount(mountpoint, persist=False): - """Unmount a filesystem""" - cmd_args = ['umount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - - if persist: - return fstab_remove(mountpoint) - return True - - -def mounts(): - """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" - with open('/proc/mounts') as f: - # [['/mount/point','/dev/path'],[...]] - system_mounts = [m[1::-1] for m in [l.strip().split() - for l in f.readlines()]] - return system_mounts - - -def fstab_mount(mountpoint): - """Mount filesystem using fstab""" - cmd_args = ['mount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - return True - - -def file_hash(path, hash_type='md5'): - """Generate a hash checksum of the contents of 'path' or None if not found. - - :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - """ - if os.path.exists(path): - h = getattr(hashlib, hash_type)() - with open(path, 'rb') as source: - h.update(source.read()) - return h.hexdigest() - else: - return None - - -def path_hash(path): - """Generate a hash checksum of all files matching 'path'. Standard - wildcards like '*' and '?' are supported, see documentation for the 'glob' - module for more information. - - :return: dict: A { filename: hash } dictionary for all matched files. - Empty if none found. - """ - return { - filename: file_hash(filename) - for filename in glob.iglob(path) - } - - -def check_hash(path, checksum, hash_type='md5'): - """Validate a file using a cryptographic checksum. - - :param str checksum: Value of the checksum used to validate the file. - :param str hash_type: Hash algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - :raises ChecksumError: If the file fails the checksum - - """ - actual_checksum = file_hash(path, hash_type) - if checksum != actual_checksum: - raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) - - -class ChecksumError(ValueError): - """A class derived from Value error to indicate the checksum failed.""" - pass - - -def restart_on_change(restart_map, stopstart=False, restart_functions=None): - """Restart services based on configuration files changing - - This function is used a decorator, for example:: - - @restart_on_change({ - '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] - '/etc/apache/sites-enabled/*': [ 'apache2' ] - }) - def config_changed(): - pass # your code here - - In this example, the cinder-api and cinder-volume services - would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. The apache2 service would be - restarted if any file matching the pattern got changed, created - or removed. Standard wildcards are supported, see documentation - for the 'glob' module for more information. - - @param restart_map: {path_file_name: [service_name, ...] - @param stopstart: DEFAULT false; whether to stop, start OR restart - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result from decorated function - """ - def wrap(f): - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) - return wrapped_f - return wrap - - -def restart_on_change_helper(lambda_f, restart_map, stopstart=False, - restart_functions=None): - """Helper function to perform the restart_on_change function. - - This is provided for decorators to restart services if files described - in the restart_map have changed after an invocation of lambda_f(). - - @param lambda_f: function to call. - @param restart_map: {file: [service, ...]} - @param stopstart: whether to stop, start or restart a service - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result of lambda_f() - """ - if restart_functions is None: - restart_functions = {} - checksums = {path: path_hash(path) for path in restart_map} - r = lambda_f() - # create a list of lists of the services to restart - restarts = [restart_map[path] - for path in restart_map - if path_hash(path) != checksums[path]] - # create a flat list of ordered services without duplicates from lists - services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) - if services_list: - actions = ('stop', 'start') if stopstart else ('restart',) - for service_name in services_list: - if service_name in restart_functions: - restart_functions[service_name](service_name) - else: - for action in actions: - service(action, service_name) - return r - - -def pwgen(length=None): - """Generate a random pasword.""" - if length is None: - # A random length is ok to use a weak PRNG - length = random.choice(range(35, 45)) - alphanumeric_chars = [ - l for l in (string.ascii_letters + string.digits) - if l not in 'l0QD1vAEIOUaeiou'] - # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the - # actual password - random_generator = random.SystemRandom() - random_chars = [ - random_generator.choice(alphanumeric_chars) for _ in range(length)] - return(''.join(random_chars)) - - -def is_phy_iface(interface): - """Returns True if interface is not virtual, otherwise False.""" - if interface: - sys_net = '/sys/class/net' - if os.path.isdir(sys_net): - for iface in glob.glob(os.path.join(sys_net, '*')): - if '/virtual/' in os.path.realpath(iface): - continue - - if interface == os.path.basename(iface): - return True - - return False - - -def get_bond_master(interface): - """Returns bond master if interface is bond slave otherwise None. - - NOTE: the provided interface is expected to be physical - """ - if interface: - iface_path = '/sys/class/net/%s' % (interface) - if os.path.exists(iface_path): - if '/virtual/' in os.path.realpath(iface_path): - return None - - master = os.path.join(iface_path, 'master') - if os.path.exists(master): - master = os.path.realpath(master) - # make sure it is a bond master - if os.path.exists(os.path.join(master, 'bonding')): - return os.path.basename(master) - - return None - - -def list_nics(nic_type=None): - """Return a list of nics of given type(s)""" - if isinstance(nic_type, six.string_types): - int_types = [nic_type] - else: - int_types = nic_type - - interfaces = [] - if nic_type: - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).decode('UTF-8') - ip_output = ip_output.split('\n') - ip_output = (line for line in ip_output if line) - for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + - r'[0-9]+\.[0-9]+)@.*', line) - if matched: - iface = matched.groups()[0] - else: - iface = line.split()[1].replace(":", "") - - if iface not in interfaces: - interfaces.append(iface) - else: - cmd = ['ip', 'a'] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line.strip() for line in ip_output if line) - - key = re.compile('^[0-9]+:\s+(.+):') - for line in ip_output: - matched = re.search(key, line) - if matched: - iface = matched.group(1) - iface = iface.partition("@")[0] - if iface not in interfaces: - interfaces.append(iface) - - return interfaces - - -def set_nic_mtu(nic, mtu): - """Set the Maximum Transmission Unit (MTU) on a network interface.""" - cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] - subprocess.check_call(cmd) - - -def get_nic_mtu(nic): - """Return the Maximum Transmission Unit (MTU) for a network interface.""" - cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - mtu = "" - for line in ip_output: - words = line.split() - if 'mtu' in words: - mtu = words[words.index("mtu") + 1] - return mtu - - -def get_nic_hwaddr(nic): - """Return the Media Access Control (MAC) for a network interface.""" - cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8') - hwaddr = "" - words = ip_output.split() - if 'link/ether' in words: - hwaddr = words[words.index('link/ether') + 1] - return hwaddr - - -@contextmanager -def chdir(directory): - """Change the current working directory to a different directory for a code - block and return the previous directory after the block exits. Useful to - run commands from a specificed directory. - - :param str directory: The directory path to change to for this context. - """ - cur = os.getcwd() - try: - yield os.chdir(directory) - finally: - os.chdir(cur) - - -def chownr(path, owner, group, follow_links=True, chowntopdir=False): - """Recursively change user and group ownership of files and directories - in given path. Doesn't chown path itself by default, only its children. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - :param bool follow_links: Also follow and chown links if True - :param bool chowntopdir: Also chown path itself if True - """ - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - if follow_links: - chown = os.chown - else: - chown = os.lchown - - if chowntopdir: - broken_symlink = os.path.lexists(path) and not os.path.exists(path) - if not broken_symlink: - chown(path, uid, gid) - for root, dirs, files in os.walk(path, followlinks=follow_links): - for name in dirs + files: - full = os.path.join(root, name) - broken_symlink = os.path.lexists(full) and not os.path.exists(full) - if not broken_symlink: - chown(full, uid, gid) - - -def lchownr(path, owner, group): - """Recursively change user and group ownership of files and directories - in a given path, not following symbolic links. See the documentation for - 'os.lchown' for more information. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - """ - chownr(path, owner, group, follow_links=False) - - -def owner(path): - """Returns a tuple containing the username & groupname owning the path. - - :param str path: the string path to retrieve the ownership - :return tuple(str, str): A (username, groupname) tuple containing the - name of the user and group owning the path. - :raises OSError: if the specified path does not exist - """ - stat = os.stat(path) - username = pwd.getpwuid(stat.st_uid)[0] - groupname = grp.getgrgid(stat.st_gid)[0] - return username, groupname - - -def get_total_ram(): - """The total amount of system RAM in bytes. - - This is what is reported by the OS, and may be overcommitted when - there are multiple containers hosted on the same machine. - """ - with open('/proc/meminfo', 'r') as f: - for line in f.readlines(): - if line: - key, value, unit = line.split() - if key == 'MemTotal:': - assert unit == 'kB', 'Unknown unit' - return int(value) * 1024 # Classic, not KiB. - raise NotImplementedError() - - -UPSTART_CONTAINER_TYPE = '/run/container_type' - - -def is_container(): - """Determine whether unit is running in a container - - @return: boolean indicating if unit is in a container - """ - if init_is_systemd(): - # Detect using systemd-detect-virt - return subprocess.call(['systemd-detect-virt', - '--container']) == 0 - else: - # Detect using upstart container file marker - return os.path.exists(UPSTART_CONTAINER_TYPE) - - -def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): - """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. - - This method has no effect if the path specified by updatedb_path does not - exist or is not a file. - - @param path: string the path to add to the updatedb.conf PRUNEPATHS value - @param updatedb_path: the path the updatedb.conf file - """ - if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): - # If the updatedb.conf file doesn't exist then don't attempt to update - # the file as the package providing mlocate may not be installed on - # the local system - return - - with open(updatedb_path, 'r+') as f_id: - updatedb_text = f_id.read() - output = updatedb(updatedb_text, path) - f_id.seek(0) - f_id.write(output) - f_id.truncate() - - -def updatedb(updatedb_text, new_path): - lines = [line for line in updatedb_text.split("\n")] - for i, line in enumerate(lines): - if line.startswith("PRUNEPATHS="): - paths_line = line.split("=")[1].replace('"', '') - paths = paths_line.split(" ") - if new_path not in paths: - paths.append(new_path) - lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) - output = "\n".join(lines) - return output - - -def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): - """ Modulo distribution - - This helper uses the unit number, a modulo value and a constant wait time - to produce a calculated wait time distribution. This is useful in large - scale deployments to distribute load during an expensive operation such as - service restarts. - - If you have 1000 nodes that need to restart 100 at a time 1 minute at a - time: - - time.wait(modulo_distribution(modulo=100, wait=60)) - restart() - - If you need restarts to happen serially set modulo to the exact number of - nodes and set a high constant wait time: - - time.wait(modulo_distribution(modulo=10, wait=120)) - restart() - - @param modulo: int The modulo number creates the group distribution - @param wait: int The constant time wait value - @param non_zero_wait: boolean Override unit % modulo == 0, - return modulo * wait. Used to avoid collisions with - leader nodes which are often given priority. - @return: int Calculated time to wait for unit operation - """ - unit_number = int(local_unit().split('/')[1]) - calculated_wait_time = (unit_number % modulo) * wait - if non_zero_wait and calculated_wait_time == 0: - return modulo * wait - else: - return calculated_wait_time diff --git a/ceph-radosgw/tests/charmhelpers/core/host_factory/__init__.py b/ceph-radosgw/tests/charmhelpers/core/host_factory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py b/ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py deleted file mode 100644 index 7781a396..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/host_factory/centos.py +++ /dev/null @@ -1,72 +0,0 @@ -import subprocess -import yum -import os - -from charmhelpers.core.strutils import BasicStringComparator - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Host releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - - def __init__(self, item): - raise NotImplementedError( - "CompareHostReleases() is not implemented for CentOS") - - -def service_available(service_name): - # """Determine whether a system service is available.""" - if os.path.isdir('/run/systemd/system'): - cmd = ['systemctl', 'is-enabled', service_name] - else: - cmd = ['service', service_name, 'is-enabled'] - return subprocess.call(cmd) == 0 - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['groupadd'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('-r') - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/os-release in a dict.""" - d = {} - with open('/etc/os-release', 'r') as lsb: - for l in lsb: - s = l.split('=') - if len(s) != 2: - continue - d[s[0].strip()] = s[1].strip() - return d - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports YumBase function if the pkgcache argument - is None. - """ - if not pkgcache: - y = yum.YumBase() - packages = y.doPackageLists() - pkgcache = {i.Name: i.version for i in packages['installed']} - pkg = pkgcache[package] - if pkg > revno: - return 1 - if pkg < revno: - return -1 - return 0 diff --git a/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py deleted file mode 100644 index a6d375af..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/host_factory/ubuntu.py +++ /dev/null @@ -1,91 +0,0 @@ -import subprocess - -from charmhelpers.core.strutils import BasicStringComparator - - -UBUNTU_RELEASES = ( - 'lucid', - 'maverick', - 'natty', - 'oneiric', - 'precise', - 'quantal', - 'raring', - 'saucy', - 'trusty', - 'utopic', - 'vivid', - 'wily', - 'xenial', - 'yakkety', - 'zesty', - 'artful', - 'bionic', - 'cosmic', -) - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Ubuntu releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - _list = UBUNTU_RELEASES - - -def service_available(service_name): - """Determine whether a system service is available""" - try: - subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError as e: - return b'unrecognized service' not in e.output - else: - return True - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['addgroup'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('--system') - else: - cmd.extend([ - '--group', - ]) - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/lsb-release in a dict""" - d = {} - with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports apt_cache function from charmhelpers.fetch if - the pkgcache argument is None. Be sure to add charmhelpers.fetch if - you call this function, or pass an apt_pkg.Cache() instance. - """ - import apt_pkg - if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-radosgw/tests/charmhelpers/core/hugepage.py b/ceph-radosgw/tests/charmhelpers/core/hugepage.py deleted file mode 100644 index 54b5b5e2..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/hugepage.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml -from charmhelpers.core import fstab -from charmhelpers.core import sysctl -from charmhelpers.core.host import ( - add_group, - add_user_to_group, - fstab_mount, - mkdir, -) -from charmhelpers.core.strutils import bytes_from_string -from subprocess import check_output - - -def hugepage_support(user, group='hugetlb', nr_hugepages=256, - max_map_count=65536, mnt_point='/run/hugepages/kvm', - pagesize='2MB', mount=True, set_shmmax=False): - """Enable hugepages on system. - - Args: - user (str) -- Username to allow access to hugepages to - group (str) -- Group name to own hugepages - nr_hugepages (int) -- Number of pages to reserve - max_map_count (int) -- Number of Virtual Memory Areas a process can own - mnt_point (str) -- Directory to mount hugepages on - pagesize (str) -- Size of hugepages - mount (bool) -- Whether to Mount hugepages - """ - group_info = add_group(group) - gid = group_info.gr_gid - add_user_to_group(user, group) - if max_map_count < 2 * nr_hugepages: - max_map_count = 2 * nr_hugepages - sysctl_settings = { - 'vm.nr_hugepages': nr_hugepages, - 'vm.max_map_count': max_map_count, - 'vm.hugetlb_shm_group': gid, - } - if set_shmmax: - shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) - shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages - if shmmax_minsize > shmmax_current: - sysctl_settings['kernel.shmmax'] = shmmax_minsize - sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') - mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) - lfstab = fstab.Fstab() - fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) - if fstab_entry: - lfstab.remove_entry(fstab_entry) - entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', - 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) - lfstab.add_entry(entry) - if mount: - fstab_mount(mnt_point) diff --git a/ceph-radosgw/tests/charmhelpers/core/kernel.py b/ceph-radosgw/tests/charmhelpers/core/kernel.py deleted file mode 100644 index 2d404528..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/kernel.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import subprocess - -from charmhelpers.osplatform import get_platform -from charmhelpers.core.hookenv import ( - log, - INFO -) - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.kernel_factory.ubuntu import ( - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.kernel_factory.centos import ( - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import - -__author__ = "Jorge Niedbalski " - - -def modprobe(module, persist=True): - """Load a kernel module and configure for auto-load on reboot.""" - cmd = ['modprobe', module] - - log('Loading kernel module %s' % module, level=INFO) - - subprocess.check_call(cmd) - if persist: - persistent_modprobe(module) - - -def rmmod(module, force=False): - """Remove a module from the linux kernel""" - cmd = ['rmmod'] - if force: - cmd.append('-f') - cmd.append(module) - log('Removing kernel module %s' % module, level=INFO) - return subprocess.check_call(cmd) - - -def lsmod(): - """Shows what kernel modules are currently loaded""" - return subprocess.check_output(['lsmod'], - universal_newlines=True) - - -def is_module_loaded(module): - """Checks if a kernel module is already loaded""" - matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) - return len(matches) > 0 diff --git a/ceph-radosgw/tests/charmhelpers/core/kernel_factory/__init__.py b/ceph-radosgw/tests/charmhelpers/core/kernel_factory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-radosgw/tests/charmhelpers/core/kernel_factory/centos.py b/ceph-radosgw/tests/charmhelpers/core/kernel_factory/centos.py deleted file mode 100644 index 1c402c11..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/kernel_factory/centos.py +++ /dev/null @@ -1,17 +0,0 @@ -import subprocess -import os - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - if not os.path.exists('/etc/rc.modules'): - open('/etc/rc.modules', 'a') - os.chmod('/etc/rc.modules', 111) - with open('/etc/rc.modules', 'r+') as modules: - if module not in modules.read(): - modules.write('modprobe %s\n' % module) - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["dracut", "-f", version]) diff --git a/ceph-radosgw/tests/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-radosgw/tests/charmhelpers/core/kernel_factory/ubuntu.py deleted file mode 100644 index 3de372fd..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/kernel_factory/ubuntu.py +++ /dev/null @@ -1,13 +0,0 @@ -import subprocess - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module + "\n") - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-radosgw/tests/charmhelpers/core/services/__init__.py b/ceph-radosgw/tests/charmhelpers/core/services/__init__.py deleted file mode 100644 index 61fd074e..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/services/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .base import * # NOQA -from .helpers import * # NOQA diff --git a/ceph-radosgw/tests/charmhelpers/core/services/base.py b/ceph-radosgw/tests/charmhelpers/core/services/base.py deleted file mode 100644 index 179ad4f0..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/services/base.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import json -from inspect import getargspec -from collections import Iterable, OrderedDict - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -__all__ = ['ServiceManager', 'ManagerCallback', - 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', - 'service_restart', 'service_stop'] - - -class ServiceManager(object): - def __init__(self, services=None): - """ - Register a list of services, given their definitions. - - Service definitions are dicts in the following formats (all keys except - 'service' are optional):: - - { - "service": , - "required_data": , - "provided_data": , - "data_ready": , - "data_lost": , - "start": , - "stop": , - "ports": , - } - - The 'required_data' list should contain dicts of required data (or - dependency managers that act like dicts and know how to collect the data). - Only when all items in the 'required_data' list are populated are the list - of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more - information. - - The 'provided_data' list should contain relation data providers, most likely - a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, - that will indicate a set of data to set on a given relation. - - The 'data_ready' value should be either a single callback, or a list of - callbacks, to be called when all items in 'required_data' pass `is_ready()`. - Each callback will be called with the service name as the only parameter. - After all of the 'data_ready' callbacks are called, the 'start' callbacks - are fired. - - The 'data_lost' value should be either a single callback, or a list of - callbacks, to be called when a 'required_data' item no longer passes - `is_ready()`. Each callback will be called with the service name as the - only parameter. After all of the 'data_lost' callbacks are called, - the 'stop' callbacks are fired. - - The 'start' value should be either a single callback, or a list of - callbacks, to be called when starting the service, after the 'data_ready' - callbacks are complete. Each callback will be called with the service - name as the only parameter. This defaults to - `[host.service_start, services.open_ports]`. - - The 'stop' value should be either a single callback, or a list of - callbacks, to be called when stopping the service. If the service is - being stopped because it no longer has all of its 'required_data', this - will be called after all of the 'data_lost' callbacks are complete. - Each callback will be called with the service name as the only parameter. - This defaults to `[services.close_ports, host.service_stop]`. - - The 'ports' value should be a list of ports to manage. The default - 'start' handler will open the ports after the service is started, - and the default 'stop' handler will close the ports prior to stopping - the service. - - - Examples: - - The following registers an Upstart service called bingod that depends on - a mongodb relation and which runs a custom `db_migrate` function prior to - restarting the service, and a Runit service called spadesd:: - - manager = services.ServiceManager([ - { - 'service': 'bingod', - 'ports': [80, 443], - 'required_data': [MongoRelation(), config(), {'my': 'data'}], - 'data_ready': [ - services.template(source='bingod.conf'), - services.template(source='bingod.ini', - target='/etc/bingod.ini', - owner='bingo', perms=0400), - ], - }, - { - 'service': 'spadesd', - 'data_ready': services.template(source='spadesd_run.j2', - target='/etc/sv/spadesd/run', - perms=0555), - 'start': runit_start, - 'stop': runit_stop, - }, - ]) - manager.manage() - """ - self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') - self._ready = None - self.services = OrderedDict() - for service in services or []: - service_name = service['service'] - self.services[service_name] = service - - def manage(self): - """ - Handle the current hook by doing The Right Thing with the registered services. - """ - hookenv._run_atstart() - try: - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.reconfigure_services() - self.provide_data() - except SystemExit as x: - if x.code is None or x.code == 0: - hookenv._run_atexit() - hookenv._run_atexit() - - def provide_data(self): - """ - Set the relation data for each provider in the ``provided_data`` list. - - A provider must have a `name` attribute, which indicates which relation - to set data on, and a `provide_data()` method, which returns a dict of - data to set. - - The `provide_data()` method can optionally accept two parameters: - - * ``remote_service`` The name of the remote service that the data will - be provided to. The `provide_data()` method will be called once - for each connected service (not unit). This allows the method to - tailor its data to the given service. - * ``service_ready`` Whether or not the service definition had all of - its requirements met, and thus the ``data_ready`` callbacks run. - - Note that the ``provided_data`` methods are now called **after** the - ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks - a chance to generate any data necessary for the providing to the remote - services. - """ - for service_name, service in self.services.items(): - service_ready = self.is_ready(service_name) - for provider in service.get('provided_data', []): - for relid in hookenv.relation_ids(provider.name): - units = hookenv.related_units(relid) - if not units: - continue - remote_service = units[0].split('/')[0] - argspec = getargspec(provider.provide_data) - if len(argspec.args) > 1: - data = provider.provide_data(remote_service, service_ready) - else: - data = provider.provide_data() - if data: - hookenv.relation_set(relid, data) - - def reconfigure_services(self, *service_names): - """ - Update all files for one or more registered services, and, - if ready, optionally restart them. - - If no service names are given, reconfigures all registered services. - """ - for service_name in service_names or self.services.keys(): - if self.is_ready(service_name): - self.fire_event('data_ready', service_name) - self.fire_event('start', service_name, default=[ - service_restart, - manage_ports]) - self.save_ready(service_name) - else: - if self.was_ready(service_name): - self.fire_event('data_lost', service_name) - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - self.save_lost(service_name) - - def stop_services(self, *service_names): - """ - Stop one or more registered services, by name. - - If no service names are given, stops all registered services. - """ - for service_name in service_names or self.services.keys(): - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - - def get_service(self, service_name): - """ - Given the name of a registered service, return its service definition. - """ - service = self.services.get(service_name) - if not service: - raise KeyError('Service not registered: %s' % service_name) - return service - - def fire_event(self, event_name, service_name, default=None): - """ - Fire a data_ready, data_lost, start, or stop event on a given service. - """ - service = self.get_service(service_name) - callbacks = service.get(event_name, default) - if not callbacks: - return - if not isinstance(callbacks, Iterable): - callbacks = [callbacks] - for callback in callbacks: - if isinstance(callback, ManagerCallback): - callback(self, service_name, event_name) - else: - callback(service_name) - - def is_ready(self, service_name): - """ - Determine if a registered service is ready, by checking its 'required_data'. - - A 'required_data' item can be any mapping type, and is considered ready - if `bool(item)` evaluates as True. - """ - service = self.get_service(service_name) - reqs = service.get('required_data', []) - return all(bool(req) for req in reqs) - - def _load_ready_file(self): - if self._ready is not None: - return - if os.path.exists(self._ready_file): - with open(self._ready_file) as fp: - self._ready = set(json.load(fp)) - else: - self._ready = set() - - def _save_ready_file(self): - if self._ready is None: - return - with open(self._ready_file, 'w') as fp: - json.dump(list(self._ready), fp) - - def save_ready(self, service_name): - """ - Save an indicator that the given service is now data_ready. - """ - self._load_ready_file() - self._ready.add(service_name) - self._save_ready_file() - - def save_lost(self, service_name): - """ - Save an indicator that the given service is no longer data_ready. - """ - self._load_ready_file() - self._ready.discard(service_name) - self._save_ready_file() - - def was_ready(self, service_name): - """ - Determine if the given service was previously data_ready. - """ - self._load_ready_file() - return service_name in self._ready - - -class ManagerCallback(object): - """ - Special case of a callback that takes the `ServiceManager` instance - in addition to the service name. - - Subclasses should implement `__call__` which should accept three parameters: - - * `manager` The `ServiceManager` instance - * `service_name` The name of the service it's being triggered for - * `event_name` The name of the event that this callback is handling - """ - def __call__(self, manager, service_name, event_name): - raise NotImplementedError() - - -class PortManagerCallback(ManagerCallback): - """ - Callback class that will open or close ports, for use as either - a start or stop action. - """ - def __call__(self, manager, service_name, event_name): - service = manager.get_service(service_name) - # turn this generator into a list, - # as we'll be going over it multiple times - new_ports = list(service.get('ports', [])) - port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) - if os.path.exists(port_file): - with open(port_file) as fp: - old_ports = fp.read().split(',') - for old_port in old_ports: - if bool(old_port) and not self.ports_contains(old_port, new_ports): - hookenv.close_port(old_port) - with open(port_file, 'w') as fp: - fp.write(','.join(str(port) for port in new_ports)) - for port in new_ports: - # A port is either a number or 'ICMP' - protocol = 'TCP' - if str(port).upper() == 'ICMP': - protocol = 'ICMP' - if event_name == 'start': - hookenv.open_port(port, protocol) - elif event_name == 'stop': - hookenv.close_port(port, protocol) - - def ports_contains(self, port, ports): - if not bool(port): - return False - if str(port).upper() != 'ICMP': - port = int(port) - return port in ports - - -def service_stop(service_name): - """ - Wrapper around host.service_stop to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_running(service_name): - host.service_stop(service_name) - - -def service_restart(service_name): - """ - Wrapper around host.service_restart to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_available(service_name): - if host.service_running(service_name): - host.service_restart(service_name) - else: - host.service_start(service_name) - - -# Convenience aliases -open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-radosgw/tests/charmhelpers/core/services/helpers.py b/ceph-radosgw/tests/charmhelpers/core/services/helpers.py deleted file mode 100644 index 3e6e30d2..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/services/helpers.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import yaml - -from charmhelpers.core import hookenv -from charmhelpers.core import host -from charmhelpers.core import templating - -from charmhelpers.core.services.base import ManagerCallback - - -__all__ = ['RelationContext', 'TemplateCallback', - 'render_template', 'template'] - - -class RelationContext(dict): - """ - Base class for a context generator that gets relation data from juju. - - Subclasses must provide the attributes `name`, which is the name of the - interface of interest, `interface`, which is the type of the interface of - interest, and `required_keys`, which is the set of keys required for the - relation to be considered complete. The data for all interfaces matching - the `name` attribute that are complete will used to populate the dictionary - values (see `get_data`, below). - - The generated context will be namespaced under the relation :attr:`name`, - to prevent potential naming conflicts. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = None - interface = None - - def __init__(self, name=None, additional_required_keys=None): - if not hasattr(self, 'required_keys'): - self.required_keys = [] - - if name is not None: - self.name = name - if additional_required_keys: - self.required_keys.extend(additional_required_keys) - self.get_data() - - def __bool__(self): - """ - Returns True if all of the required_keys are available. - """ - return self.is_ready() - - __nonzero__ = __bool__ - - def __repr__(self): - return super(RelationContext, self).__repr__() - - def is_ready(self): - """ - Returns True if all of the `required_keys` are available from any units. - """ - ready = len(self.get(self.name, [])) > 0 - if not ready: - hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) - return ready - - def _is_ready(self, unit_data): - """ - Helper method that tests a set of relation data and returns True if - all of the `required_keys` are present. - """ - return set(unit_data.keys()).issuperset(set(self.required_keys)) - - def get_data(self): - """ - Retrieve the relation data for each unit involved in a relation and, - if complete, store it in a list under `self[self.name]`. This - is automatically called when the RelationContext is instantiated. - - The units are sorted lexographically first by the service ID, then by - the unit ID. Thus, if an interface has two other services, 'db:1' - and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', - and 'db:2' having one unit, 'mediawiki/0', all of which have a complete - set of data, the relation data for the units will be stored in the - order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. - - If you only care about a single unit on the relation, you can just - access it as `{{ interface[0]['key'] }}`. However, if you can at all - support multiple units on a relation, you should iterate over the list, - like:: - - {% for unit in interface -%} - {{ unit['key'] }}{% if not loop.last %},{% endif %} - {%- endfor %} - - Note that since all sets of relation data from all related services and - units are in a single list, if you need to know which service or unit a - set of data came from, you'll need to extend this class to preserve - that information. - """ - if not hookenv.relation_ids(self.name): - return - - ns = self.setdefault(self.name, []) - for rid in sorted(hookenv.relation_ids(self.name)): - for unit in sorted(hookenv.related_units(rid)): - reldata = hookenv.relation_get(rid=rid, unit=unit) - if self._is_ready(reldata): - ns.append(reldata) - - def provide_data(self): - """ - Return data to be relation_set for this interface. - """ - return {} - - -class MysqlRelation(RelationContext): - """ - Relation context for the `mysql` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'db' - interface = 'mysql' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'user', 'password', 'database'] - RelationContext.__init__(self, *args, **kwargs) - - -class HttpRelation(RelationContext): - """ - Relation context for the `http` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'website' - interface = 'http' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'port'] - RelationContext.__init__(self, *args, **kwargs) - - def provide_data(self): - return { - 'host': hookenv.unit_get('private-address'), - 'port': 80, - } - - -class RequiredConfig(dict): - """ - Data context that loads config options with one or more mandatory options. - - Once the required options have been changed from their default values, all - config options will be available, namespaced under `config` to prevent - potential naming conflicts (for example, between a config option and a - relation property). - - :param list *args: List of options that must be changed from their default values. - """ - - def __init__(self, *args): - self.required_options = args - self['config'] = hookenv.config() - with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.load(fp).get('options', {}) - - def __bool__(self): - for option in self.required_options: - if option not in self['config']: - return False - current_value = self['config'][option] - default_value = self.config[option].get('default') - if current_value == default_value: - return False - if current_value in (None, '') and default_value in (None, ''): - return False - return True - - def __nonzero__(self): - return self.__bool__() - - -class StoredContext(dict): - """ - A data context that always returns the data that it was first created with. - - This is useful to do a one-time generation of things like passwords, that - will thereafter use the same value that was originally generated, instead - of generating a new value each time it is run. - """ - def __init__(self, file_name, config_data): - """ - If the file exists, populate `self` with the data from the file. - Otherwise, populate with the given data and persist it to the file. - """ - if os.path.exists(file_name): - self.update(self.read_context(file_name)) - else: - self.store_context(file_name, config_data) - self.update(config_data) - - def store_context(self, file_name, config_data): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0o600) - yaml.dump(config_data, file_stream) - - def read_context(self, file_name): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'r') as file_stream: - data = yaml.load(file_stream) - if not data: - raise OSError("%s is empty" % file_name) - return data - - -class TemplateCallback(ManagerCallback): - """ - Callback class that will render a Jinja2 template, for use as a ready - action. - - :param str source: The template source file, relative to - `$CHARM_DIR/templates` - - :param str target: The target to write the rendered template to (or None) - :param str owner: The owner of the rendered file - :param str group: The group of the rendered file - :param int perms: The permissions of the rendered file - :param partial on_change_action: functools partial to be executed when - rendered file changes - :param jinja2 loader template_loader: A jinja2 template loader - - :return str: The rendered template - """ - def __init__(self, source, target, - owner='root', group='root', perms=0o444, - on_change_action=None, template_loader=None): - self.source = source - self.target = target - self.owner = owner - self.group = group - self.perms = perms - self.on_change_action = on_change_action - self.template_loader = template_loader - - def __call__(self, manager, service_name, event_name): - pre_checksum = '' - if self.on_change_action and os.path.isfile(self.target): - pre_checksum = host.file_hash(self.target) - service = manager.get_service(service_name) - context = {'ctx': {}} - for ctx in service.get('required_data', []): - context.update(ctx) - context['ctx'].update(ctx) - - result = templating.render(self.source, self.target, context, - self.owner, self.group, self.perms, - template_loader=self.template_loader) - if self.on_change_action: - if pre_checksum == host.file_hash(self.target): - hookenv.log( - 'No change detected: {}'.format(self.target), - hookenv.DEBUG) - else: - self.on_change_action() - - return result - - -# Convenience aliases for templates -render_template = template = TemplateCallback diff --git a/ceph-radosgw/tests/charmhelpers/core/strutils.py b/ceph-radosgw/tests/charmhelpers/core/strutils.py deleted file mode 100644 index e8df0452..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/strutils.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six -import re - - -def bool_from_string(value): - """Interpret string value as boolean. - - Returns True if value translates to True otherwise False. - """ - if isinstance(value, six.string_types): - value = six.text_type(value) - else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) - raise ValueError(msg) - - value = value.strip().lower() - - if value in ['y', 'yes', 'true', 't', 'on']: - return True - elif value in ['n', 'no', 'false', 'f', 'off']: - return False - - msg = "Unable to interpret string value '%s' as boolean" % (value) - raise ValueError(msg) - - -def bytes_from_string(value): - """Interpret human readable string value as bytes. - - Returns int - """ - BYTE_POWER = { - 'K': 1, - 'KB': 1, - 'M': 2, - 'MB': 2, - 'G': 3, - 'GB': 3, - 'T': 4, - 'TB': 4, - 'P': 5, - 'PB': 5, - } - if isinstance(value, six.string_types): - value = six.text_type(value) - else: - msg = "Unable to interpret non-string value '%s' as bytes" % (value) - raise ValueError(msg) - matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if matches: - size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) - else: - # Assume that value passed in is bytes - try: - size = int(value) - except ValueError: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return size - - -class BasicStringComparator(object): - """Provides a class that will compare strings from an iterator type object. - Used to provide > and < comparisons on strings that may not necessarily be - alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the - z-wrap. - """ - - _list = None - - def __init__(self, item): - if self._list is None: - raise Exception("Must define the _list in the class definition!") - try: - self.index = self._list.index(item) - except Exception: - raise KeyError("Item '{}' is not in list '{}'" - .format(item, self._list)) - - def __eq__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index == self._list.index(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __lt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index < self._list.index(other) - - def __ge__(self, other): - return not self.__lt__(other) - - def __gt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index > self._list.index(other) - - def __le__(self, other): - return not self.__gt__(other) - - def __str__(self): - """Always give back the item at the index so it can be used in - comparisons like: - - s_mitaka = CompareOpenStack('mitaka') - s_newton = CompareOpenstack('newton') - - assert s_newton > s_mitaka - - @returns: - """ - return self._list[self.index] diff --git a/ceph-radosgw/tests/charmhelpers/core/sysctl.py b/ceph-radosgw/tests/charmhelpers/core/sysctl.py deleted file mode 100644 index 1f188d8c..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/sysctl.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml - -from subprocess import check_call - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - ERROR, -) - -__author__ = 'Jorge Niedbalski R. ' - - -def create(sysctl_dict, sysctl_file): - """Creates a sysctl.conf file from a YAML associative array - - :param sysctl_dict: a dict or YAML-formatted string of sysctl - options eg "{ 'kernel.max_pid': 1337 }" - :type sysctl_dict: str - :param sysctl_file: path to the sysctl file to be saved - :type sysctl_file: str or unicode - :returns: None - """ - if type(sysctl_dict) is not dict: - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return - else: - sysctl_dict_parsed = sysctl_dict - - with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict_parsed.items(): - fd.write("{}={}\n".format(key, value)) - - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), - level=DEBUG) - - check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-radosgw/tests/charmhelpers/core/templating.py b/ceph-radosgw/tests/charmhelpers/core/templating.py deleted file mode 100644 index 9014015c..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/templating.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', - template_loader=None, config_template=None): - """ - Render a template. - - The `source` path, if not absolute, is relative to the `templates_dir`. - - The `target` path should be absolute. It can also be `None`, in which - case no file will be written. - - The context should be a dict containing the values to be replaced in the - template. - - config_template may be provided to render from a provided template instead - of loading from a file. - - The `owner`, `group`, and `perms` options will be passed to `write_file`. - - If omitted, `templates_dir` defaults to the `templates` folder in the charm. - - The rendered template will be written to the file as well as being returned - as a string. - - Note: Using this requires python-jinja2 or python3-jinja2; if it is not - installed, calling this will attempt to use charmhelpers.fetch.apt_install - to install it. - """ - try: - from jinja2 import FileSystemLoader, Environment, exceptions - except ImportError: - try: - from charmhelpers.fetch import apt_install - except ImportError: - hookenv.log('Could not import jinja2, and could not import ' - 'charmhelpers.fetch to install it', - level=hookenv.ERROR) - raise - if sys.version_info.major == 2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) - from jinja2 import FileSystemLoader, Environment, exceptions - - if template_loader: - template_env = Environment(loader=template_loader) - else: - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - template_env = Environment(loader=FileSystemLoader(templates_dir)) - - # load from a string if provided explicitly - if config_template is not None: - template = template_env.from_string(config_template) - else: - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e - content = template.render(context) - if target is not None: - target_dir = os.path.dirname(target) - if not os.path.exists(target_dir): - # This is a terrible default directory permission, as the file - # or its siblings will often contain secrets. - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) - host.write_file(target, content.encode(encoding), owner, group, perms) - return content diff --git a/ceph-radosgw/tests/charmhelpers/core/unitdata.py b/ceph-radosgw/tests/charmhelpers/core/unitdata.py deleted file mode 100644 index ab554327..00000000 --- a/ceph-radosgw/tests/charmhelpers/core/unitdata.py +++ /dev/null @@ -1,525 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Authors: -# Kapil Thangavelu -# -""" -Intro ------ - -A simple way to store state in units. This provides a key value -storage with support for versioned, transactional operation, -and can calculate deltas from previous values to simplify unit logic -when processing changes. - - -Hook Integration ----------------- - -There are several extant frameworks for hook execution, including - - - charmhelpers.core.hookenv.Hooks - - charmhelpers.core.services.ServiceManager - -The storage classes are framework agnostic, one simple integration is -via the HookData contextmanager. It will record the current hook -execution environment (including relation data, config data, etc.), -setup a transaction and allow easy access to the changes from -previously seen values. One consequence of the integration is the -reservation of particular keys ('rels', 'unit', 'env', 'config', -'charm_revisions') for their respective values. - -Here's a fully worked integration example using hookenv.Hooks:: - - from charmhelper.core import hookenv, unitdata - - hook_data = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # Print all changes to configuration from previously seen - # values. - for changed, (prev, cur) in hook_data.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - # Directly access all charm config as a mapping. - conf = db.getrange('config', True) - - # Directly access all relation data as a mapping - rels = db.getrange('rels', True) - - if __name__ == '__main__': - with hook_data(): - hook.execute() - - -A more basic integration is via the hook_scope context manager which simply -manages transaction scope (and records hook name, and timestamp):: - - >>> from unitdata import kv - >>> db = kv() - >>> with db.hook_scope('install'): - ... # do work, in transactional scope. - ... db.set('x', 1) - >>> db.get('x') - 1 - - -Usage ------ - -Values are automatically json de/serialized to preserve basic typing -and complex data struct capabilities (dicts, lists, ints, booleans, etc). - -Individual values can be manipulated via get/set:: - - >>> kv.set('y', True) - >>> kv.get('y') - True - - # We can set complex values (dicts, lists) as a single key. - >>> kv.set('config', {'a': 1, 'b': True'}) - - # Also supports returning dictionaries as a record which - # provides attribute access. - >>> config = kv.get('config', record=True) - >>> config.b - True - - -Groups of keys can be manipulated with update/getrange:: - - >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") - >>> kv.getrange('gui.', strip=True) - {'z': 1, 'y': 2} - -When updating values, its very helpful to understand which values -have actually changed and how have they changed. The storage -provides a delta method to provide for this:: - - >>> data = {'debug': True, 'option': 2} - >>> delta = kv.delta(data, 'config.') - >>> delta.debug.previous - None - >>> delta.debug.current - True - >>> delta - {'debug': (None, True), 'option': (None, 2)} - -Note the delta method does not persist the actual change, it needs to -be explicitly saved via 'update' method:: - - >>> kv.update(data, 'config.') - -Values modified in the context of a hook scope retain historical values -associated to the hookname. - - >>> with db.hook_scope('config-changed'): - ... db.set('x', 42) - >>> db.gethistory('x') - [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), - (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] - -""" - -import collections -import contextlib -import datetime -import itertools -import json -import os -import pprint -import sqlite3 -import sys - -__author__ = 'Kapil Thangavelu ' - - -class Storage(object): - """Simple key value database for local unit state within charms. - - Modifications are not persisted unless :meth:`flush` is called. - - To support dicts, lists, integer, floats, and booleans values - are automatically json encoded/decoded. - - Note: to facilitate unit testing, ':memory:' can be passed as the - path parameter which causes sqlite3 to only build the db in memory. - This should only be used for testing purposes. - """ - def __init__(self, path=None): - self.db_path = path - if path is None: - if 'UNIT_STATE_DB' in os.environ: - self.db_path = os.environ['UNIT_STATE_DB'] - else: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') - if self.db_path != ':memory:': - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) - self.conn = sqlite3.connect('%s' % self.db_path) - self.cursor = self.conn.cursor() - self.revision = None - self._closed = False - self._init() - - def close(self): - if self._closed: - return - self.flush(False) - self.cursor.close() - self.conn.close() - self._closed = True - - def get(self, key, default=None, record=False): - self.cursor.execute('select data from kv where key=?', [key]) - result = self.cursor.fetchone() - if not result: - return default - if record: - return Record(json.loads(result[0])) - return json.loads(result[0]) - - def getrange(self, key_prefix, strip=False): - """ - Get a range of keys starting with a common prefix as a mapping of - keys to values. - - :param str key_prefix: Common prefix among all keys - :param bool strip: Optionally strip the common prefix from the key - names in the returned dict - :return dict: A (possibly empty) dict of key-value mappings - """ - self.cursor.execute("select key, data from kv where key like ?", - ['%s%%' % key_prefix]) - result = self.cursor.fetchall() - - if not result: - return {} - if not strip: - key_prefix = '' - return dict([ - (k[len(key_prefix):], json.loads(v)) for k, v in result]) - - def update(self, mapping, prefix=""): - """ - Set the values of multiple keys at once. - - :param dict mapping: Mapping of keys to values - :param str prefix: Optional prefix to apply to all keys in `mapping` - before setting - """ - for k, v in mapping.items(): - self.set("%s%s" % (prefix, k), v) - - def unset(self, key): - """ - Remove a key from the database entirely. - """ - self.cursor.execute('delete from kv where key=?', [key]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - [key, self.revision, json.dumps('DELETED')]) - - def unsetrange(self, keys=None, prefix=""): - """ - Remove a range of keys starting with a common prefix, from the database - entirely. - - :param list keys: List of keys to remove. - :param str prefix: Optional prefix to apply to all keys in ``keys`` - before removing. - """ - if keys is not None: - keys = ['%s%s' % (prefix, key) for key in keys] - self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), - list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) - else: - self.cursor.execute('delete from kv where key like ?', - ['%s%%' % prefix]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) - - def set(self, key, value): - """ - Set a value in the database. - - :param str key: Key to set the value for - :param value: Any JSON-serializable value to be set - """ - serialized = json.dumps(value) - - self.cursor.execute('select data from kv where key=?', [key]) - exists = self.cursor.fetchone() - - # Skip mutations to the same value - if exists: - if exists[0] == serialized: - return value - - if not exists: - self.cursor.execute( - 'insert into kv (key, data) values (?, ?)', - (key, serialized)) - else: - self.cursor.execute(''' - update kv - set data = ? - where key = ?''', [serialized, key]) - - # Save - if not self.revision: - return value - - self.cursor.execute( - 'select 1 from kv_revisions where key=? and revision=?', - [key, self.revision]) - exists = self.cursor.fetchone() - - if not exists: - self.cursor.execute( - '''insert into kv_revisions ( - revision, key, data) values (?, ?, ?)''', - (self.revision, key, serialized)) - else: - self.cursor.execute( - ''' - update kv_revisions - set data = ? - where key = ? - and revision = ?''', - [serialized, key, self.revision]) - - return value - - def delta(self, mapping, prefix): - """ - return a delta containing values that have changed. - """ - previous = self.getrange(prefix, strip=True) - if not previous: - pk = set() - else: - pk = set(previous.keys()) - ck = set(mapping.keys()) - delta = DeltaSet() - - # added - for k in ck.difference(pk): - delta[k] = Delta(None, mapping[k]) - - # removed - for k in pk.difference(ck): - delta[k] = Delta(previous[k], None) - - # changed - for k in pk.intersection(ck): - c = mapping[k] - p = previous[k] - if c != p: - delta[k] = Delta(p, c) - - return delta - - @contextlib.contextmanager - def hook_scope(self, name=""): - """Scope all future interactions to the current hook execution - revision.""" - assert not self.revision - self.cursor.execute( - 'insert into hooks (hook, date) values (?, ?)', - (name or sys.argv[0], - datetime.datetime.utcnow().isoformat())) - self.revision = self.cursor.lastrowid - try: - yield self.revision - self.revision = None - except Exception: - self.flush(False) - self.revision = None - raise - else: - self.flush() - - def flush(self, save=True): - if save: - self.conn.commit() - elif self._closed: - return - else: - self.conn.rollback() - - def _init(self): - self.cursor.execute(''' - create table if not exists kv ( - key text, - data text, - primary key (key) - )''') - self.cursor.execute(''' - create table if not exists kv_revisions ( - key text, - revision integer, - data text, - primary key (key, revision) - )''') - self.cursor.execute(''' - create table if not exists hooks ( - version integer primary key autoincrement, - hook text, - date text - )''') - self.conn.commit() - - def gethistory(self, key, deserialize=False): - self.cursor.execute( - ''' - select kv.revision, kv.key, kv.data, h.hook, h.date - from kv_revisions kv, - hooks h - where kv.key=? - and kv.revision = h.version - ''', [key]) - if deserialize is False: - return self.cursor.fetchall() - return map(_parse_history, self.cursor.fetchall()) - - def debug(self, fh=sys.stderr): - self.cursor.execute('select * from kv') - pprint.pprint(self.cursor.fetchall(), stream=fh) - self.cursor.execute('select * from kv_revisions') - pprint.pprint(self.cursor.fetchall(), stream=fh) - - -def _parse_history(d): - return (d[0], d[1], json.loads(d[2]), d[3], - datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) - - -class HookData(object): - """Simple integration for existing hook exec frameworks. - - Records all unit information, and stores deltas for processing - by the hook. - - Sample:: - - from charmhelper.core import hookenv, unitdata - - changes = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # View all changes to configuration - for changed, (prev, cur) in changes.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - if __name__ == '__main__': - with changes(): - hook.execute() - - """ - def __init__(self): - self.kv = kv() - self.conf = None - self.rels = None - - @contextlib.contextmanager - def __call__(self): - from charmhelpers.core import hookenv - hook_name = hookenv.hook_name() - - with self.kv.hook_scope(hook_name): - self._record_charm_version(hookenv.charm_dir()) - delta_config, delta_relation = self._record_hook(hookenv) - yield self.kv, delta_config, delta_relation - - def _record_charm_version(self, charm_dir): - # Record revisions.. charm revisions are meaningless - # to charm authors as they don't control the revision. - # so logic dependnent on revision is not particularly - # useful, however it is useful for debugging analysis. - charm_rev = open( - os.path.join(charm_dir, 'revision')).read().strip() - charm_rev = charm_rev or '0' - revs = self.kv.get('charm_revisions', []) - if charm_rev not in revs: - revs.append(charm_rev.strip() or '0') - self.kv.set('charm_revisions', revs) - - def _record_hook(self, hookenv): - data = hookenv.execution_environment() - self.conf = conf_delta = self.kv.delta(data['conf'], 'config') - self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') - self.kv.set('env', dict(data['env'])) - self.kv.set('unit', data['unit']) - self.kv.set('relid', data.get('relid')) - return conf_delta, rels_delta - - -class Record(dict): - - __slots__ = () - - def __getattr__(self, k): - if k in self: - return self[k] - raise AttributeError(k) - - -class DeltaSet(Record): - - __slots__ = () - - -Delta = collections.namedtuple('Delta', ['previous', 'current']) - - -_KV = None - - -def kv(): - global _KV - if _KV is None: - _KV = Storage() - return _KV diff --git a/ceph-radosgw/tests/charmhelpers/osplatform.py b/ceph-radosgw/tests/charmhelpers/osplatform.py deleted file mode 100644 index d9a4d5c0..00000000 --- a/ceph-radosgw/tests/charmhelpers/osplatform.py +++ /dev/null @@ -1,25 +0,0 @@ -import platform - - -def get_platform(): - """Return the current OS platform. - - For example: if current os platform is Ubuntu then a string "ubuntu" - will be returned (which is the name of the module). - This string is used to decide which platform module should be imported. - """ - # linux_distribution is deprecated and will be removed in Python 3.7 - # Warings *not* disabled, as we certainly need to fix this. - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] - if "Ubuntu" in current_platform: - return "ubuntu" - elif "CentOS" in current_platform: - return "centos" - elif "debian" in current_platform: - # Stock Python does not detect Ubuntu and instead returns debian. - # Or at least it does in some build environments like Travis CI - return "ubuntu" - else: - raise RuntimeError("This module is not supported on {}." - .format(current_platform)) From 0f9068ce2e697baff10714599739e8338d773119 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 3 Oct 2018 09:35:51 -0500 Subject: [PATCH 1574/2699] Update requirements Change-Id: If2ac287dcf43a85531320e5e91970375aae94885 --- ceph-proxy/.gitignore | 7 +- ceph-proxy/requirements.txt | 1 - ceph-proxy/test-requirements.txt | 10 +- ceph-proxy/tests/charmhelpers/__init__.py | 97 -- .../tests/charmhelpers/contrib/__init__.py | 13 - .../charmhelpers/contrib/amulet/__init__.py | 13 - .../charmhelpers/contrib/amulet/deployment.py | 99 -- .../charmhelpers/contrib/amulet/utils.py | 821 --------- .../contrib/openstack/__init__.py | 13 - .../contrib/openstack/amulet/__init__.py | 13 - .../contrib/openstack/amulet/deployment.py | 357 ---- .../contrib/openstack/amulet/utils.py | 1530 ----------------- .../tests/charmhelpers/core/__init__.py | 13 - .../tests/charmhelpers/core/decorators.py | 55 - ceph-proxy/tests/charmhelpers/core/files.py | 43 - ceph-proxy/tests/charmhelpers/core/fstab.py | 132 -- ceph-proxy/tests/charmhelpers/core/hookenv.py | 1353 --------------- ceph-proxy/tests/charmhelpers/core/host.py | 1042 ----------- .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 72 - .../charmhelpers/core/host_factory/ubuntu.py | 91 - .../tests/charmhelpers/core/hugepage.py | 69 - ceph-proxy/tests/charmhelpers/core/kernel.py | 72 - .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 - .../core/kernel_factory/ubuntu.py | 13 - .../charmhelpers/core/services/__init__.py | 16 - .../tests/charmhelpers/core/services/base.py | 362 ---- .../charmhelpers/core/services/helpers.py | 290 ---- .../tests/charmhelpers/core/strutils.py | 129 -- ceph-proxy/tests/charmhelpers/core/sysctl.py | 58 - .../tests/charmhelpers/core/templating.py | 93 - .../tests/charmhelpers/core/unitdata.py | 525 ------ ceph-proxy/tests/charmhelpers/osplatform.py | 25 - 34 files changed, 10 insertions(+), 7434 deletions(-) delete mode 100644 ceph-proxy/tests/charmhelpers/__init__.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/__init__.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py delete mode 100644 ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/__init__.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/decorators.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/files.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/fstab.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/hookenv.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/host.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/host_factory/__init__.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/host_factory/centos.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/hugepage.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/kernel.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/kernel_factory/__init__.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/kernel_factory/centos.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/kernel_factory/ubuntu.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/services/__init__.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/services/base.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/services/helpers.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/strutils.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/sysctl.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/templating.py delete mode 100644 ceph-proxy/tests/charmhelpers/core/unitdata.py delete mode 100644 ceph-proxy/tests/charmhelpers/osplatform.py diff --git a/ceph-proxy/.gitignore b/ceph-proxy/.gitignore index 7d2fd1fb..9e552b19 100644 --- a/ceph-proxy/.gitignore +++ b/ceph-proxy/.gitignore @@ -1,8 +1,9 @@ bin -.idea .coverage .testrepository .tox *.sw[nop] -.idea -*.pyc \ No newline at end of file +*.pyc +.unit-state.db +.stestr +__pycache__ diff --git a/ceph-proxy/requirements.txt b/ceph-proxy/requirements.txt index 6a3271b0..b8fec1e2 100644 --- a/ceph-proxy/requirements.txt +++ b/ceph-proxy/requirements.txt @@ -2,7 +2,6 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=1.8.0,<1.9.0 -PyYAML>=3.1.0 simplejson>=2.2.0 netifaces>=0.10.4 netaddr>=0.7.12,!=0.7.16 diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 9edd4bbf..2b2c0e11 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -1,16 +1,16 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. +charm-tools>=2.4.4 coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 -charm-tools>=2.0.0 -requests==2.6.0 +requests>=2.18.4 # BEGIN: Amulet OpenStack Charm Helper Requirements # Liberty client lower constraints -amulet>=1.14.3,<2.0 -bundletester>=0.6.1,<1.0 +amulet>=1.14.3,<2.0;python_version=='2.7' +bundletester>=0.6.1,<1.0;python_version=='2.7' python-ceilometerclient>=1.5.0 python-cinderclient>=1.4.0 python-glanceclient>=1.1.0 @@ -22,6 +22,8 @@ python-openstackclient>=1.7.0 python-swiftclient>=2.6.0 pika>=0.10.0,<1.0 distro-info +git+https://github.com/juju/charm-helpers.git#egg=charmhelpers # END: Amulet OpenStack Charm Helper Requirements # NOTE: workaround for 14.04 pip/tox pytz +pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/ceph-proxy/tests/charmhelpers/__init__.py b/ceph-proxy/tests/charmhelpers/__init__.py deleted file mode 100644 index e7aa4715..00000000 --- a/ceph-proxy/tests/charmhelpers/__init__.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Bootstrap charm-helpers, installing its dependencies if necessary using -# only standard libraries. -from __future__ import print_function -from __future__ import absolute_import - -import functools -import inspect -import subprocess -import sys - -try: - import six # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa - -try: - import yaml # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa - - -# Holds a list of mapping of mangled function names that have been deprecated -# using the @deprecate decorator below. This is so that the warning is only -# printed once for each usage of the function. -__deprecated_functions = {} - - -def deprecate(warning, date=None, log=None): - """Add a deprecation warning the first time the function is used. - The date, which is a string in semi-ISO8660 format indicate the year-month - that the function is officially going to be removed. - - usage: - - @deprecate('use core/fetch/add_source() instead', '2017-04') - def contributed_add_source_thing(...): - ... - - And it then prints to the log ONCE that the function is deprecated. - The reason for passing the logging function (log) is so that hookenv.log - can be used for a charm if needed. - - :param warning: String to indicat where it has moved ot. - :param date: optional sting, in YYYY-MM format to indicate when the - function will definitely (probably) be removed. - :param log: The log function to call to log. If not, logs to stdout - """ - def wrap(f): - - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - try: - module = inspect.getmodule(f) - file = inspect.getsourcefile(f) - lines = inspect.getsourcelines(f) - f_name = "{}-{}-{}..{}-{}".format( - module.__name__, file, lines[0], lines[-1], f.__name__) - except (IOError, TypeError): - # assume it was local, so just use the name of the function - f_name = f.__name__ - if f_name not in __deprecated_functions: - __deprecated_functions[f_name] = True - s = "DEPRECATION WARNING: Function {} is being removed".format( - f.__name__) - if date: - s = "{} on/around {}".format(s, date) - if warning: - s = "{} : {}".format(s, warning) - if log: - log(s) - else: - print(s) - return f(*args, **kwargs) - return wrapped_f - return wrap diff --git a/ceph-proxy/tests/charmhelpers/contrib/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py deleted file mode 100644 index d21d01d8..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/deployment.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import os -import six - - -class AmuletDeployment(object): - """Amulet deployment. - - This class provides generic Amulet deployment and test runner - methods. - """ - - def __init__(self, series=None): - """Initialize the deployment environment.""" - self.series = None - - if series: - self.series = series - self.d = amulet.Deployment(series=self.series) - else: - self.d = amulet.Deployment() - - def _add_services(self, this_service, other_services): - """Add services. - - Add services to the deployment where this_service is the local charm - that we're testing and other_services are the other services that - are being used in the local amulet tests. - """ - if this_service['name'] != os.path.basename(os.getcwd()): - s = this_service['name'] - msg = "The charm's root directory name needs to be {}".format(s) - amulet.raise_status(amulet.FAIL, msg=msg) - - if 'units' not in this_service: - this_service['units'] = 1 - - self.d.add(this_service['name'], units=this_service['units'], - constraints=this_service.get('constraints'), - storage=this_service.get('storage')) - - for svc in other_services: - if 'location' in svc: - branch_location = svc['location'] - elif self.series: - branch_location = 'cs:{}/{}'.format(self.series, svc['name']), - else: - branch_location = None - - if 'units' not in svc: - svc['units'] = 1 - - self.d.add(svc['name'], charm=branch_location, units=svc['units'], - constraints=svc.get('constraints'), - storage=svc.get('storage')) - - def _add_relations(self, relations): - """Add all of the relations for the services.""" - for k, v in six.iteritems(relations): - self.d.relate(k, v) - - def _configure_services(self, configs): - """Configure all of the services.""" - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _deploy(self): - """Deploy environment and wait for all hooks to finish executing.""" - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) - try: - self.d.setup(timeout=timeout) - self.d.sentry.wait(timeout=timeout) - except amulet.helpers.TimeoutError: - amulet.raise_status( - amulet.FAIL, - msg="Deployment timed out ({}s)".format(timeout) - ) - except Exception: - raise - - def run_tests(self): - """Run all of the methods that are prefixed with 'test_'.""" - for test in dir(self): - if test.startswith('test_'): - getattr(self, test)() diff --git a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py deleted file mode 100644 index 8a6b7644..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/amulet/utils.py +++ /dev/null @@ -1,821 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import json -import logging -import os -import re -import socket -import subprocess -import sys -import time -import uuid - -import amulet -import distro_info -import six -from six.moves import configparser -if six.PY3: - from urllib import parse as urlparse -else: - import urlparse - - -class AmuletUtils(object): - """Amulet utilities. - - This class provides common utility functions that are used by Amulet - tests. - """ - - def __init__(self, log_level=logging.ERROR): - self.log = self.get_logger(level=log_level) - self.ubuntu_releases = self.get_ubuntu_releases() - - def get_logger(self, name="amulet-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def valid_ip(self, ip): - if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): - return True - else: - return False - - def valid_url(self, url): - p = re.compile( - r'^(?:http|ftp)s?://' - r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa - r'localhost|' - r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' - r'(?::\d+)?' - r'(?:/?|[/?]\S+)$', - re.IGNORECASE) - if p.match(url): - return True - else: - return False - - def get_ubuntu_release_from_sentry(self, sentry_unit): - """Get Ubuntu release codename from sentry unit. - - :param sentry_unit: amulet sentry/service unit pointer - :returns: list of strings - release codename, failure message - """ - msg = None - cmd = 'lsb_release -cs' - release, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} lsb_release: {}'.format( - sentry_unit.info['unit_name'], release)) - else: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, release, code)) - if release not in self.ubuntu_releases: - msg = ("Release ({}) not found in Ubuntu releases " - "({})".format(release, self.ubuntu_releases)) - return release, msg - - def validate_services(self, commands): - """Validate that lists of commands succeed on service units. Can be - used to verify system services are running on the corresponding - service units. - - :param commands: dict with sentry keys and arbitrary command list vals - :returns: None if successful, Failure string message otherwise - """ - self.log.debug('Checking status of system services...') - - # /!\ DEPRECATION WARNING (beisner): - # New and existing tests should be rewritten to use - # validate_services_by_name() as it is aware of init systems. - self.log.warn('DEPRECATION WARNING: use ' - 'validate_services_by_name instead of validate_services ' - 'due to init system differences.') - - for k, v in six.iteritems(commands): - for cmd in v: - output, code = k.run(cmd) - self.log.debug('{} `{}` returned ' - '{}'.format(k.info['unit_name'], - cmd, code)) - if code != 0: - return "command `{}` returned {}".format(cmd, str(code)) - return None - - def validate_services_by_name(self, sentry_services): - """Validate system service status by service name, automatically - detecting init system based on Ubuntu release codename. - - :param sentry_services: dict with sentry keys and svc list values - :returns: None if successful, Failure string message otherwise - """ - self.log.debug('Checking status of system services...') - - # Point at which systemd became a thing - systemd_switch = self.ubuntu_releases.index('vivid') - - for sentry_unit, services_list in six.iteritems(sentry_services): - # Get lsb_release codename from unit - release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) - if ret: - return ret - - for service_name in services_list: - if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name in ['rabbitmq-server', 'apache2', - 'memcached']): - # init is systemd (or regular sysv) - cmd = 'sudo service {} status'.format(service_name) - output, code = sentry_unit.run(cmd) - service_running = code == 0 - elif self.ubuntu_releases.index(release) < systemd_switch: - # init is upstart - cmd = 'sudo status {}'.format(service_name) - output, code = sentry_unit.run(cmd) - service_running = code == 0 and "start/running" in output - - self.log.debug('{} `{}` returned ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code)) - if not service_running: - return u"command `{}` returned {} {}".format( - cmd, output, str(code)) - return None - - def _get_config(self, unit, filename): - """Get a ConfigParser object for parsing a unit's config file.""" - file_contents = unit.file_contents(filename) - - # NOTE(beisner): by default, ConfigParser does not handle options - # with no value, such as the flags used in the mysql my.cnf file. - # https://bugs.python.org/issue7005 - config = configparser.ConfigParser(allow_no_value=True) - config.readfp(io.StringIO(file_contents)) - return config - - def validate_config_data(self, sentry_unit, config_file, section, - expected): - """Validate config file data. - - Verify that the specified section of the config file contains - the expected option key:value pairs. - - Compare expected dictionary data vs actual dictionary data. - The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluates a variable and returns a - bool. - """ - self.log.debug('Validating config file data ({} in {} on {})' - '...'.format(section, config_file, - sentry_unit.info['unit_name'])) - config = self._get_config(sentry_unit, config_file) - - if section != 'DEFAULT' and not config.has_section(section): - return "section [{}] does not exist".format(section) - - for k in expected.keys(): - if not config.has_option(section, k): - return "section [{}] is missing option {}".format(section, k) - - actual = config.get(section, k) - v = expected[k] - if (isinstance(v, six.string_types) or - isinstance(v, bool) or - isinstance(v, six.integer_types)): - # handle explicit values - if actual != v: - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) - # handle function pointers, such as not_null or valid_ip - elif not v(actual): - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) - return None - - def _validate_dict_data(self, expected, actual): - """Validate dictionary data. - - Compare expected dictionary data vs actual dictionary data. - The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluates a variable and returns a - bool. - """ - self.log.debug('actual: {}'.format(repr(actual))) - self.log.debug('expected: {}'.format(repr(expected))) - - for k, v in six.iteritems(expected): - if k in actual: - if (isinstance(v, six.string_types) or - isinstance(v, bool) or - isinstance(v, six.integer_types)): - # handle explicit values - if v != actual[k]: - return "{}:{}".format(k, actual[k]) - # handle function pointers, such as not_null or valid_ip - elif not v(actual[k]): - return "{}:{}".format(k, actual[k]) - else: - return "key '{}' does not exist".format(k) - return None - - def validate_relation_data(self, sentry_unit, relation, expected): - """Validate actual relation data based on expected relation data.""" - actual = sentry_unit.relation(relation[0], relation[1]) - return self._validate_dict_data(expected, actual) - - def _validate_list_data(self, expected, actual): - """Compare expected list vs actual list data.""" - for e in expected: - if e not in actual: - return "expected item {} not found in actual list".format(e) - return None - - def not_null(self, string): - if string is not None: - return True - else: - return False - - def _get_file_mtime(self, sentry_unit, filename): - """Get last modification time of file.""" - return sentry_unit.file_stat(filename)['mtime'] - - def _get_dir_mtime(self, sentry_unit, directory): - """Get last modification time of directory.""" - return sentry_unit.directory_stat(directory)['mtime'] - - def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): - """Get start time of a process based on the last modification time - of the /proc/pid directory. - - :sentry_unit: The sentry unit to check for the service on - :service: service name to look for in process table - :pgrep_full: [Deprecated] Use full command line search mode with pgrep - :returns: epoch time of service process start - :param commands: list of bash commands - :param sentry_units: list of sentry unit pointers - :returns: None if successful; Failure message otherwise - """ - if pgrep_full is not None: - # /!\ DEPRECATION WARNING (beisner): - # No longer implemented, as pidof is now used instead of pgrep. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' - 'longer implemented re: lp 1474030.') - - pid_list = self.get_process_id_list(sentry_unit, service) - pid = pid_list[0] - proc_dir = '/proc/{}'.format(pid) - self.log.debug('Pid for {} on {}: {}'.format( - service, sentry_unit.info['unit_name'], pid)) - - return self._get_dir_mtime(sentry_unit, proc_dir) - - def service_restarted(self, sentry_unit, service, filename, - pgrep_full=None, sleep_time=20): - """Check if service was restarted. - - Compare a service's start time vs a file's last modification time - (such as a config file for that service) to determine if the service - has been restarted. - """ - # /!\ DEPRECATION WARNING (beisner): - # This method is prone to races in that no before-time is known. - # Use validate_service_config_changed instead. - - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - self.log.warn('DEPRECATION WARNING: use ' - 'validate_service_config_changed instead of ' - 'service_restarted due to known races.') - - time.sleep(sleep_time) - if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= - self._get_file_mtime(sentry_unit, filename)): - return True - else: - return False - - def service_restarted_since(self, sentry_unit, mtime, service, - pgrep_full=None, sleep_time=20, - retry_count=30, retry_sleep_time=10): - """Check if service was been started after a given time. - - Args: - sentry_unit (sentry): The sentry unit to check for the service on - mtime (float): The epoch time to check against - service (string): service name to look for in process table - pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Initial sleep time (s) before looking for file - retry_sleep_time (int): Time (s) to sleep between retries - retry_count (int): If file is not found, how many times to retry - - Returns: - bool: True if service found and its start time it newer than mtime, - False if service is older than mtime or if service was - not found. - """ - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - - unit_name = sentry_unit.info['unit_name'] - self.log.debug('Checking that %s service restarted since %s on ' - '%s' % (service, mtime, unit_name)) - time.sleep(sleep_time) - proc_start_time = None - tries = 0 - while tries <= retry_count and not proc_start_time: - try: - proc_start_time = self._get_proc_start_time(sentry_unit, - service, - pgrep_full) - self.log.debug('Attempt {} to get {} proc start time on {} ' - 'OK'.format(tries, service, unit_name)) - except IOError as e: - # NOTE(beisner) - race avoidance, proc may not exist yet. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.debug('Attempt {} to get {} proc start time on {} ' - 'failed\n{}'.format(tries, service, - unit_name, e)) - time.sleep(retry_sleep_time) - tries += 1 - - if not proc_start_time: - self.log.warn('No proc start time found, assuming service did ' - 'not start') - return False - if proc_start_time >= mtime: - self.log.debug('Proc start time is newer than provided mtime' - '(%s >= %s) on %s (OK)' % (proc_start_time, - mtime, unit_name)) - return True - else: - self.log.warn('Proc start time (%s) is older than provided mtime ' - '(%s) on %s, service did not ' - 'restart' % (proc_start_time, mtime, unit_name)) - return False - - def config_updated_since(self, sentry_unit, filename, mtime, - sleep_time=20, retry_count=30, - retry_sleep_time=10): - """Check if file was modified after a given time. - - Args: - sentry_unit (sentry): The sentry unit to check the file mtime on - filename (string): The file to check mtime of - mtime (float): The epoch time to check against - sleep_time (int): Initial sleep time (s) before looking for file - retry_sleep_time (int): Time (s) to sleep between retries - retry_count (int): If file is not found, how many times to retry - - Returns: - bool: True if file was modified more recently than mtime, False if - file was modified before mtime, or if file not found. - """ - unit_name = sentry_unit.info['unit_name'] - self.log.debug('Checking that %s updated since %s on ' - '%s' % (filename, mtime, unit_name)) - time.sleep(sleep_time) - file_mtime = None - tries = 0 - while tries <= retry_count and not file_mtime: - try: - file_mtime = self._get_file_mtime(sentry_unit, filename) - self.log.debug('Attempt {} to get {} file mtime on {} ' - 'OK'.format(tries, filename, unit_name)) - except IOError as e: - # NOTE(beisner) - race avoidance, file may not exist yet. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.debug('Attempt {} to get {} file mtime on {} ' - 'failed\n{}'.format(tries, filename, - unit_name, e)) - time.sleep(retry_sleep_time) - tries += 1 - - if not file_mtime: - self.log.warn('Could not determine file mtime, assuming ' - 'file does not exist') - return False - - if file_mtime >= mtime: - self.log.debug('File mtime is newer than provided mtime ' - '(%s >= %s) on %s (OK)' % (file_mtime, - mtime, unit_name)) - return True - else: - self.log.warn('File mtime is older than provided mtime' - '(%s < on %s) on %s' % (file_mtime, - mtime, unit_name)) - return False - - def validate_service_config_changed(self, sentry_unit, mtime, service, - filename, pgrep_full=None, - sleep_time=20, retry_count=30, - retry_sleep_time=10): - """Check service and file were updated after mtime - - Args: - sentry_unit (sentry): The sentry unit to check for the service on - mtime (float): The epoch time to check against - service (string): service name to look for in process table - filename (string): The file to check mtime of - pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Initial sleep in seconds to pass to test helpers - retry_count (int): If service is not found, how many times to retry - retry_sleep_time (int): Time in seconds to wait between retries - - Typical Usage: - u = OpenStackAmuletUtils(ERROR) - ... - mtime = u.get_sentry_time(self.cinder_sentry) - self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) - if not u.validate_service_config_changed(self.cinder_sentry, - mtime, - 'cinder-api', - '/etc/cinder/cinder.conf') - amulet.raise_status(amulet.FAIL, msg='update failed') - Returns: - bool: True if both service and file where updated/restarted after - mtime, False if service is older than mtime or if service was - not found or if filename was modified before mtime. - """ - - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - - service_restart = self.service_restarted_since( - sentry_unit, mtime, - service, - pgrep_full=pgrep_full, - sleep_time=sleep_time, - retry_count=retry_count, - retry_sleep_time=retry_sleep_time) - - config_update = self.config_updated_since( - sentry_unit, - filename, - mtime, - sleep_time=sleep_time, - retry_count=retry_count, - retry_sleep_time=retry_sleep_time) - - return service_restart and config_update - - def get_sentry_time(self, sentry_unit): - """Return current epoch time on a sentry""" - cmd = "date +'%s'" - return float(sentry_unit.run(cmd)[0]) - - def relation_error(self, name, data): - return 'unexpected relation data in {} - {}'.format(name, data) - - def endpoint_error(self, name, data): - return 'unexpected endpoint data in {} - {}'.format(name, data) - - def get_ubuntu_releases(self): - """Return a list of all Ubuntu releases in order of release.""" - _d = distro_info.UbuntuDistroInfo() - _release_list = _d.all - return _release_list - - def file_to_url(self, file_rel_path): - """Convert a relative file path to a file URL.""" - _abs_path = os.path.abspath(file_rel_path) - return urlparse.urlparse(_abs_path, scheme='file').geturl() - - def check_commands_on_units(self, commands, sentry_units): - """Check that all commands in a list exit zero on all - sentry units in a list. - - :param commands: list of bash commands - :param sentry_units: list of sentry unit pointers - :returns: None if successful; Failure message otherwise - """ - self.log.debug('Checking exit codes for {} commands on {} ' - 'sentry units...'.format(len(commands), - len(sentry_units))) - for sentry_unit in sentry_units: - for cmd in commands: - output, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} `{}` returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - else: - return ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - return None - - def get_process_id_list(self, sentry_unit, process_name, - expect_success=True): - """Get a list of process ID(s) from a single sentry juju unit - for a single process name. - - :param sentry_unit: Amulet sentry instance (juju unit) - :param process_name: Process name - :param expect_success: If False, expect the PID to be missing, - raise if it is present. - :returns: List of process IDs - """ - cmd = 'pidof -x "{}"'.format(process_name) - if not expect_success: - cmd += " || exit 0 && exit 1" - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return str(output).split() - - def get_unit_process_ids(self, unit_processes, expect_success=True): - """Construct a dict containing unit sentries, process names, and - process IDs. - - :param unit_processes: A dictionary of Amulet sentry instance - to list of process names. - :param expect_success: if False expect the processes to not be - running, raise if they are. - :returns: Dictionary of Amulet sentry instance to dictionary - of process names to PIDs. - """ - pid_dict = {} - for sentry_unit, process_list in six.iteritems(unit_processes): - pid_dict[sentry_unit] = {} - for process in process_list: - pids = self.get_process_id_list( - sentry_unit, process, expect_success=expect_success) - pid_dict[sentry_unit].update({process: pids}) - return pid_dict - - def validate_unit_process_ids(self, expected, actual): - """Validate process id quantities for services on units.""" - self.log.debug('Checking units for running processes...') - self.log.debug('Expected PIDs: {}'.format(expected)) - self.log.debug('Actual PIDs: {}'.format(actual)) - - if len(actual) != len(expected): - return ('Unit count mismatch. expected, actual: {}, ' - '{} '.format(len(expected), len(actual))) - - for (e_sentry, e_proc_names) in six.iteritems(expected): - e_sentry_name = e_sentry.info['unit_name'] - if e_sentry in actual.keys(): - a_proc_names = actual[e_sentry] - else: - return ('Expected sentry ({}) not found in actual dict data.' - '{}'.format(e_sentry_name, e_sentry)) - - if len(e_proc_names.keys()) != len(a_proc_names.keys()): - return ('Process name count mismatch. expected, actual: {}, ' - '{}'.format(len(expected), len(actual))) - - for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ - zip(e_proc_names.items(), a_proc_names.items()): - if e_proc_name != a_proc_name: - return ('Process name mismatch. expected, actual: {}, ' - '{}'.format(e_proc_name, a_proc_name)) - - a_pids_length = len(a_pids) - fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' - '{}, {} ({})'.format(e_sentry_name, e_proc_name, - e_pids, a_pids_length, - a_pids)) - - # If expected is a list, ensure at least one PID quantity match - if isinstance(e_pids, list) and \ - a_pids_length not in e_pids: - return fail_msg - # If expected is not bool and not list, - # ensure PID quantities match - elif not isinstance(e_pids, bool) and \ - not isinstance(e_pids, list) and \ - a_pids_length != e_pids: - return fail_msg - # If expected is bool True, ensure 1 or more PIDs exist - elif isinstance(e_pids, bool) and \ - e_pids is True and a_pids_length < 1: - return fail_msg - # If expected is bool False, ensure 0 PIDs exist - elif isinstance(e_pids, bool) and \ - e_pids is False and a_pids_length != 0: - return fail_msg - else: - self.log.debug('PID check OK: {} {} {}: ' - '{}'.format(e_sentry_name, e_proc_name, - e_pids, a_pids)) - return None - - def validate_list_of_identical_dicts(self, list_of_dicts): - """Check that all dicts within a list are identical.""" - hashes = [] - for _dict in list_of_dicts: - hashes.append(hash(frozenset(_dict.items()))) - - self.log.debug('Hashes: {}'.format(hashes)) - if len(set(hashes)) == 1: - self.log.debug('Dicts within list are identical') - else: - return 'Dicts within list are not identical' - - return None - - def validate_sectionless_conf(self, file_contents, expected): - """A crude conf parser. Useful to inspect configuration files which - do not have section headers (as would be necessary in order to use - the configparser). Such as openstack-dashboard or rabbitmq confs.""" - for line in file_contents.split('\n'): - if '=' in line: - args = line.split('=') - if len(args) <= 1: - continue - key = args[0].strip() - value = args[1].strip() - if key in expected.keys(): - if expected[key] != value: - msg = ('Config mismatch. Expected, actual: {}, ' - '{}'.format(expected[key], value)) - amulet.raise_status(amulet.FAIL, msg=msg) - - def get_unit_hostnames(self, units): - """Return a dict of juju unit names to hostnames.""" - host_names = {} - for unit in units: - host_names[unit.info['unit_name']] = \ - str(unit.file_contents('/etc/hostname').strip()) - self.log.debug('Unit host names: {}'.format(host_names)) - return host_names - - def run_cmd_unit(self, sentry_unit, cmd): - """Run a command on a unit, return the output and exit code.""" - output, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} `{}` command returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - else: - msg = ('{} `{}` command returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return str(output), code - - def file_exists_on_unit(self, sentry_unit, file_name): - """Check if a file exists on a unit.""" - try: - sentry_unit.file_stat(file_name) - return True - except IOError: - return False - except Exception as e: - msg = 'Error checking file {}: {}'.format(file_name, e) - amulet.raise_status(amulet.FAIL, msg=msg) - - def file_contents_safe(self, sentry_unit, file_name, - max_wait=60, fatal=False): - """Get file contents from a sentry unit. Wrap amulet file_contents - with retry logic to address races where a file checks as existing, - but no longer exists by the time file_contents is called. - Return None if file not found. Optionally raise if fatal is True.""" - unit_name = sentry_unit.info['unit_name'] - file_contents = False - tries = 0 - while not file_contents and tries < (max_wait / 4): - try: - file_contents = sentry_unit.file_contents(file_name) - except IOError: - self.log.debug('Attempt {} to open file {} from {} ' - 'failed'.format(tries, file_name, - unit_name)) - time.sleep(4) - tries += 1 - - if file_contents: - return file_contents - elif not fatal: - return None - elif fatal: - msg = 'Failed to get file contents from unit.' - amulet.raise_status(amulet.FAIL, msg) - - def port_knock_tcp(self, host="localhost", port=22, timeout=15): - """Open a TCP socket to check for a listening sevice on a host. - - :param host: host name or IP address, default to localhost - :param port: TCP port number, default to 22 - :param timeout: Connect timeout, default to 15 seconds - :returns: True if successful, False if connect failed - """ - - # Resolve host name if possible - try: - connect_host = socket.gethostbyname(host) - host_human = "{} ({})".format(connect_host, host) - except socket.error as e: - self.log.warn('Unable to resolve address: ' - '{} ({}) Trying anyway!'.format(host, e)) - connect_host = host - host_human = connect_host - - # Attempt socket connection - try: - knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - knock.settimeout(timeout) - knock.connect((connect_host, port)) - knock.close() - self.log.debug('Socket connect OK for host ' - '{} on port {}.'.format(host_human, port)) - return True - except socket.error as e: - self.log.debug('Socket connect FAIL for' - ' {} port {} ({})'.format(host_human, port, e)) - return False - - def port_knock_units(self, sentry_units, port=22, - timeout=15, expect_success=True): - """Open a TCP socket to check for a listening sevice on each - listed juju unit. - - :param sentry_units: list of sentry unit pointers - :param port: TCP port number, default to 22 - :param timeout: Connect timeout, default to 15 seconds - :expect_success: True by default, set False to invert logic - :returns: None if successful, Failure message otherwise - """ - for unit in sentry_units: - host = unit.info['public-address'] - connected = self.port_knock_tcp(host, port, timeout) - if not connected and expect_success: - return 'Socket connect failed.' - elif connected and not expect_success: - return 'Socket connected unexpectedly.' - - def get_uuid_epoch_stamp(self): - """Returns a stamp string based on uuid4 and epoch time. Useful in - generating test messages which need to be unique-ish.""" - return '[{}-{}]'.format(uuid.uuid4(), time.time()) - - # amulet juju action helpers: - def run_action(self, unit_sentry, action, - _check_output=subprocess.check_output, - params=None): - """Translate to amulet's built in run_action(). Deprecated. - - Run the named action on a given unit sentry. - - params a dict of parameters to use - _check_output parameter is no longer used - - @return action_id. - """ - self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been ' - 'deprecated for amulet.run_action') - return unit_sentry.run_action(action, action_args=params) - - def wait_on_action(self, action_id, _check_output=subprocess.check_output): - """Wait for a given action, returning if it completed or not. - - action_id a string action uuid - _check_output parameter is no longer used - """ - data = amulet.actions.get_action_output(action_id, full_output=True) - return data.get(u"status") == "completed" - - def status_get(self, unit): - """Return the current service status of this unit.""" - raw_status, return_code = unit.run( - "status-get --format=json --include-data") - if return_code != 0: - return ("unknown", "") - status = json.loads(raw_status) - return (status["status"], status["message"]) diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py deleted file mode 100644 index 1c96752a..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ /dev/null @@ -1,357 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import re -import sys -import six -from collections import OrderedDict -from charmhelpers.contrib.amulet.deployment import ( - AmuletDeployment -) -from charmhelpers.contrib.openstack.amulet.utils import ( - OPENSTACK_RELEASES_PAIRS -) - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - - -class OpenStackAmuletDeployment(AmuletDeployment): - """OpenStack amulet deployment. - - This class inherits from AmuletDeployment and has additional support - that is specifically for use by OpenStack charms. - """ - - def __init__(self, series=None, openstack=None, source=None, - stable=True, log_level=DEBUG): - """Initialize the deployment environment.""" - super(OpenStackAmuletDeployment, self).__init__(series) - self.log = self.get_logger(level=log_level) - self.log.info('OpenStackAmuletDeployment: init') - self.openstack = openstack - self.source = source - self.stable = stable - - def get_logger(self, name="deployment-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def _determine_branch_locations(self, other_services): - """Determine the branch locations for the other services. - - Determine if the local branch being tested is derived from its - stable or next (dev) branch, and based on this, use the corresonding - stable or next branches for the other_services.""" - - self.log.info('OpenStackAmuletDeployment: determine branch locations') - - # Charms outside the ~openstack-charmers - base_charms = { - 'mysql': ['trusty'], - 'mongodb': ['trusty'], - 'nrpe': ['trusty', 'xenial'], - } - - for svc in other_services: - # If a location has been explicitly set, use it - if svc.get('location'): - continue - if svc['name'] in base_charms: - # NOTE: not all charms have support for all series we - # want/need to test against, so fix to most recent - # that each base charm supports - target_series = self.series - if self.series not in base_charms[svc['name']]: - target_series = base_charms[svc['name']][-1] - svc['location'] = 'cs:{}/{}'.format(target_series, - svc['name']) - elif self.stable: - svc['location'] = 'cs:{}/{}'.format(self.series, - svc['name']) - else: - svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( - self.series, - svc['name'] - ) - - return other_services - - def _add_services(self, this_service, other_services, use_source=None, - no_origin=None): - """Add services to the deployment and optionally set - openstack-origin/source. - - :param this_service dict: Service dictionary describing the service - whose amulet tests are being run - :param other_services dict: List of service dictionaries describing - the services needed to support the target - service - :param use_source list: List of services which use the 'source' config - option rather than 'openstack-origin' - :param no_origin list: List of services which do not support setting - the Cloud Archive. - Service Dict: - { - 'name': str charm-name, - 'units': int number of units, - 'constraints': dict of juju constraints, - 'location': str location of charm, - } - eg - this_service = { - 'name': 'openvswitch-odl', - 'constraints': {'mem': '8G'}, - } - other_services = [ - { - 'name': 'nova-compute', - 'units': 2, - 'constraints': {'mem': '4G'}, - 'location': cs:~bob/xenial/nova-compute - }, - { - 'name': 'mysql', - 'constraints': {'mem': '2G'}, - }, - {'neutron-api-odl'}] - use_source = ['mysql'] - no_origin = ['neutron-api-odl'] - """ - self.log.info('OpenStackAmuletDeployment: adding services') - - other_services = self._determine_branch_locations(other_services) - - super(OpenStackAmuletDeployment, self)._add_services(this_service, - other_services) - - services = other_services - services.append(this_service) - - use_source = use_source or [] - no_origin = no_origin or [] - - # Charms which should use the source config option - use_source = list(set( - use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy', 'percona-cluster', 'lxd'])) - - # Charms which can not use openstack-origin, ie. many subordinates - no_origin = list(set( - no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', - 'nrpe', 'openvswitch-odl', 'neutron-api-odl', - 'odl-controller', 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'])) - - if self.openstack: - for svc in services: - if svc['name'] not in use_source + no_origin: - config = {'openstack-origin': self.openstack} - self.d.configure(svc['name'], config) - - if self.source: - for svc in services: - if svc['name'] in use_source and svc['name'] not in no_origin: - config = {'source': self.source} - self.d.configure(svc['name'], config) - - def _configure_services(self, configs): - """Configure all of the services.""" - self.log.info('OpenStackAmuletDeployment: configure services') - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=None): - """Wait for all units to have a specific extended status, except - for any defined as excluded. Unless specified via message, any - status containing any case of 'ready' will be considered a match. - - Examples of message usage: - - Wait for all unit status to CONTAIN any case of 'ready' or 'ok': - message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) - - Wait for all units to reach this status (exact match): - message = re.compile('^Unit is ready and clustered$') - - Wait for all units to reach any one of these (exact match): - message = re.compile('Unit is ready|OK|Ready') - - Wait for at least one unit to reach this status (exact match): - message = {'ready'} - - See Amulet's sentry.wait_for_messages() for message usage detail. - https://github.com/juju/amulet/blob/master/amulet/sentry.py - - :param message: Expected status match - :param exclude_services: List of juju service names to ignore, - not to be used in conjuction with include_only. - :param include_only: List of juju service names to exclusively check, - not to be used in conjuction with exclude_services. - :param timeout: Maximum time in seconds to wait for status match - :returns: None. Raises if timeout is hit. - """ - if not timeout: - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) - self.log.info('Waiting for extended status on units for {}s...' - ''.format(timeout)) - - all_services = self.d.services.keys() - - if exclude_services and include_only: - raise ValueError('exclude_services can not be used ' - 'with include_only') - - if message: - if isinstance(message, re._pattern_type): - match = message.pattern - else: - match = message - - self.log.debug('Custom extended status wait match: ' - '{}'.format(match)) - else: - self.log.debug('Default extended status wait match: contains ' - 'READY (case-insensitive)') - message = re.compile('.*ready.*', re.IGNORECASE) - - if exclude_services: - self.log.debug('Excluding services from extended status match: ' - '{}'.format(exclude_services)) - else: - exclude_services = [] - - if include_only: - services = include_only - else: - services = list(set(all_services) - set(exclude_services)) - - self.log.debug('Waiting up to {}s for extended status on services: ' - '{}'.format(timeout, services)) - service_messages = {service: message for service in services} - - # Check for idleness - self.d.sentry.wait(timeout=timeout) - # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) - # Check for ready messages - self.d.sentry.wait_for_messages(service_messages, timeout=timeout) - - self.log.info('OK') - - def _get_openstack_release(self): - """Get openstack release. - - Return an integer representing the enum value of the openstack - release. - """ - # Must be ordered by OpenStack release (not by Ubuntu release): - for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): - setattr(self, os_pair, i) - - releases = { - ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, - ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, - ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('xenial', None): self.xenial_mitaka, - ('xenial', 'cloud:xenial-newton'): self.xenial_newton, - ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, - ('xenial', 'cloud:xenial-pike'): self.xenial_pike, - ('xenial', 'cloud:xenial-queens'): self.xenial_queens, - ('yakkety', None): self.yakkety_newton, - ('zesty', None): self.zesty_ocata, - ('artful', None): self.artful_pike, - ('bionic', None): self.bionic_queens, - ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, - ('cosmic', None): self.cosmic_rocky, - } - return releases[(self.series, self.openstack)] - - def _get_openstack_release_string(self): - """Get openstack release string. - - Return a string representing the openstack release. - """ - releases = OrderedDict([ - ('trusty', 'icehouse'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ]) - if self.openstack: - os_origin = self.openstack.split(':')[1] - return os_origin.split('%s-' % self.series)[1].split('/')[0] - else: - return releases[self.series] - - def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools in a ceph + cinder + glance - test scenario, based on OpenStack release and whether ceph radosgw - is flagged as present or not.""" - - if self._get_openstack_release() == self.trusty_icehouse: - # Icehouse - pools = [ - 'data', - 'metadata', - 'rbd', - 'cinder-ceph', - 'glance' - ] - elif (self.trusty_kilo <= self._get_openstack_release() <= - self.zesty_ocata): - # Kilo through Ocata - pools = [ - 'rbd', - 'cinder-ceph', - 'glance' - ] - else: - # Pike and later - pools = [ - 'cinder-ceph', - 'glance' - ] - - if radosgw: - pools.extend([ - '.rgw.root', - '.rgw.control', - '.rgw', - '.rgw.gc', - '.users.uid' - ]) - - return pools diff --git a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py deleted file mode 100644 index 6637865d..00000000 --- a/ceph-proxy/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ /dev/null @@ -1,1530 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import json -import logging -import os -import re -import six -import time -import urllib -import urlparse - -import cinderclient.v1.client as cinder_client -import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1 as glance_client -import glanceclient.v2 as glance_clientv2 -import heatclient.v1.client as heat_client -from keystoneclient.v2_0 import client as keystone_client -from keystoneauth1.identity import ( - v3, - v2, -) -from keystoneauth1 import session as keystone_session -from keystoneclient.v3 import client as keystone_client_v3 -from novaclient import exceptions - -import novaclient.client as nova_client -import novaclient -import pika -import swiftclient - -from charmhelpers.core.decorators import retry_on_exception -from charmhelpers.contrib.amulet.utils import ( - AmuletUtils -) -from charmhelpers.core.host import CompareHostReleases - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - -NOVA_CLIENT_VERSION = "2" - -OPENSTACK_RELEASES_PAIRS = [ - 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', - 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', - 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', - 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] - - -class OpenStackAmuletUtils(AmuletUtils): - """OpenStack amulet utilities. - - This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charm tests. - """ - - def __init__(self, log_level=ERROR): - """Initialize the deployment environment.""" - super(OpenStackAmuletUtils, self).__init__(log_level) - - def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, openstack_release=None): - """Validate endpoint data. Pick the correct validator based on - OpenStack release. Expected data should be in the v2 format: - { - 'id': id, - 'region': region, - 'adminurl': adminurl, - 'internalurl': internalurl, - 'publicurl': publicurl, - 'service_id': service_id} - - """ - validation_function = self.validate_v2_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_endpoint_data - expected = { - 'id': expected['id'], - 'region': expected['region'], - 'region_id': 'RegionOne', - 'url': self.valid_url, - 'interface': self.not_null, - 'service_id': expected['service_id']} - return validation_function(endpoints, admin_port, internal_port, - public_port, expected) - - def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): - """Validate endpoint data. - - Validate actual endpoint data vs expected endpoint data. The ports - are used to find the matching endpoint. - """ - self.log.debug('Validating endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = False - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if (admin_port in ep.adminurl and - internal_port in ep.internalurl and - public_port in ep.publicurl): - found = True - actual = {'id': ep.id, - 'region': ep.region, - 'adminurl': ep.adminurl, - 'internalurl': ep.internalurl, - 'publicurl': ep.publicurl, - 'service_id': ep.service_id} - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if not found: - return 'endpoint not found' - - def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, expected_num_eps=3): - """Validate keystone v3 endpoint data. - - Validate the v3 endpoint data which has changed from v2. The - ports are used to find the matching endpoint. - - The new v3 endpoint data looks like: - - ['}, - region=RegionOne, - region_id=RegionOne, - service_id=17f842a0dc084b928e476fafe67e4095, - url=http://10.5.6.5:9312>, - '}, - region=RegionOne, - region_id=RegionOne, - service_id=72fc8736fb41435e8b3584205bb2cfa3, - url=http://10.5.6.6:35357/v3>, - ... ] - """ - self.log.debug('Validating v3 endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = [] - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if ((admin_port in ep.url and ep.interface == 'admin') or - (internal_port in ep.url and ep.interface == 'internal') or - (public_port in ep.url and ep.interface == 'public')): - found.append(ep.interface) - # note we ignore the links member. - actual = {'id': ep.id, - 'region': ep.region, - 'region_id': ep.region_id, - 'interface': self.not_null, - 'url': ep.url, - 'service_id': ep.service_id, } - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if len(found) != expected_num_eps: - return 'Unexpected number of endpoints found' - - def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): - """Convert v2 endpoint data into v3. - - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - """ - self.log.warn("Endpoint ID and Region ID validation is limited to not " - "null checks after v2 to v3 conversion") - for svc in ep_data.keys(): - assert len(ep_data[svc]) == 1, "Unknown data format" - svc_ep_data = ep_data[svc][0] - ep_data[svc] = [ - { - 'url': svc_ep_data['adminURL'], - 'interface': 'admin', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['publicURL'], - 'interface': 'public', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['internalURL'], - 'interface': 'internal', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}] - return ep_data - - def validate_svc_catalog_endpoint_data(self, expected, actual, - openstack_release=None): - """Validate service catalog endpoint data. Pick the correct validator - for the OpenStack version. Expected data should be in the v2 format: - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - - """ - validation_function = self.validate_v2_svc_catalog_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_svc_catalog_endpoint_data - expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) - return validation_function(expected, actual) - - def validate_v2_svc_catalog_endpoint_data(self, expected, actual): - """Validate service catalog endpoint data. - - Validate a list of actual service catalog endpoints vs a list of - expected service catalog endpoints. - """ - self.log.debug('Validating service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - ret = self._validate_dict_data(expected[k][0], actual[k][0]) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_v3_svc_catalog_endpoint_data(self, expected, actual): - """Validate the keystone v3 catalog endpoint data. - - Validate a list of dictinaries that make up the keystone v3 service - catalogue. - - It is in the form of: - - - {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:35357/v3'}, - {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}, - {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}], - u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'f629388955bc407f8b11d8b7ca168086', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9312'}]} - - Note, that an added complication is that the order of admin, public, - internal against 'interface' in each region. - - Thus, the function sorts the expected and actual lists using the - interface key as a sort key, prior to the comparison. - """ - self.log.debug('Validating v3 service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - l_expected = sorted(v, key=lambda x: x['interface']) - l_actual = sorted(actual[k], key=lambda x: x['interface']) - if len(l_actual) != len(l_expected): - return ("endpoint {} has differing number of interfaces " - " - expected({}), actual({})" - .format(k, len(l_expected), len(l_actual))) - for i_expected, i_actual in zip(l_expected, l_actual): - self.log.debug("checking interface {}" - .format(i_expected['interface'])) - ret = self._validate_dict_data(i_expected, i_actual) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_tenant_data(self, expected, actual): - """Validate tenant data. - - Validate a list of actual tenant data vs list of expected tenant - data. - """ - self.log.debug('Validating tenant data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'enabled': act.enabled, 'description': act.description, - 'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected tenant data - {}".format(ret) - if not found: - return "tenant {} does not exist".format(e['name']) - return ret - - def validate_role_data(self, expected, actual): - """Validate role data. - - Validate a list of actual role data vs a list of expected role - data. - """ - self.log.debug('Validating role data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected role data - {}".format(ret) - if not found: - return "role {} does not exist".format(e['name']) - return ret - - def validate_user_data(self, expected, actual, api_version=None): - """Validate user data. - - Validate a list of actual user data vs a list of expected user - data. - """ - self.log.debug('Validating user data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - if e['name'] == act.name: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'id': act.id} - if api_version == 3: - a['default_project_id'] = getattr(act, - 'default_project_id', - 'none') - else: - a['tenantId'] = act.tenantId - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected user data - {}".format(ret) - if not found: - return "user {} does not exist".format(e['name']) - return ret - - def validate_flavor_data(self, expected, actual): - """Validate flavor data. - - Validate a list of actual flavors vs a list of expected flavors. - """ - self.log.debug('Validating flavor data...') - self.log.debug('actual: {}'.format(repr(actual))) - act = [a.name for a in actual] - return self._validate_list_data(expected, act) - - def tenant_exists(self, keystone, tenant): - """Return True if tenant exists.""" - self.log.debug('Checking if tenant exists ({})...'.format(tenant)) - return tenant in [t.name for t in keystone.tenants.list()] - - @retry_on_exception(num_retries=5, base_delay=1) - def keystone_wait_for_propagation(self, sentry_relation_pairs, - api_version): - """Iterate over list of sentry and relation tuples and verify that - api_version has the expected value. - - :param sentry_relation_pairs: list of sentry, relation name tuples used - for monitoring propagation of relation - data - :param api_version: api_version to expect in relation data - :returns: None if successful. Raise on error. - """ - for (sentry, relation_name) in sentry_relation_pairs: - rel = sentry.relation('identity-service', - relation_name) - self.log.debug('keystone relation data: {}'.format(rel)) - if rel.get('api_version') != str(api_version): - raise Exception("api_version not propagated through relation" - " data yet ('{}' != '{}')." - "".format(rel.get('api_version'), api_version)) - - def keystone_configure_api_version(self, sentry_relation_pairs, deployment, - api_version): - """Configure preferred-api-version of keystone in deployment and - monitor provided list of relation objects for propagation - before returning to caller. - - :param sentry_relation_pairs: list of sentry, relation tuples used for - monitoring propagation of relation data - :param deployment: deployment to configure - :param api_version: value preferred-api-version will be set to - :returns: None if successful. Raise on error. - """ - self.log.debug("Setting keystone preferred-api-version: '{}'" - "".format(api_version)) - - config = {'preferred-api-version': api_version} - deployment.d.configure('keystone', config) - deployment._auto_wait_for_status() - self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - - def authenticate_cinder_admin(self, keystone, api_version=2): - """Authenticates admin user with cinder.""" - self.log.debug('Authenticating cinder admin...') - _clients = { - 1: cinder_client.Client, - 2: cinder_clientv2.Client} - return _clients[api_version](session=keystone.session) - - def authenticate_keystone(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Authenticate with Keystone""" - self.log.debug('Authenticating with keystone...') - if not api_version: - api_version = 2 - sess, auth = self.get_keystone_session( - keystone_ip=keystone_ip, - username=username, - password=password, - api_version=api_version, - admin_port=admin_port, - user_domain_name=user_domain_name, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name - ) - if api_version == 2: - client = keystone_client.Client(session=sess) - else: - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client - - def get_keystone_session(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Return a keystone session object""" - ep = self.get_keystone_endpoint(keystone_ip, - api_version=api_version, - admin_port=admin_port) - if api_version == 2: - auth = v2.Password( - username=username, - password=password, - tenant_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - else: - auth = v3.Password( - user_domain_name=user_domain_name, - username=username, - password=password, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - return (sess, auth) - - def get_keystone_endpoint(self, keystone_ip, api_version=None, - admin_port=False): - """Return keystone endpoint""" - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if api_version == 2: - ep = base_ep + "/v2.0" - else: - ep = base_ep + "/v3" - return ep - - def get_default_keystone_session(self, keystone_sentry, - openstack_release=None, api_version=2): - """Return a keystone session object and client object assuming standard - default settings - - Example call in amulet tests: - self.keystone_session, self.keystone = u.get_default_keystone_session( - self.keystone_sentry, - openstack_release=self._get_openstack_release()) - - The session can then be used to auth other clients: - neutronclient.Client(session=session) - aodh_client.Client(session=session) - eyc - """ - self.log.debug('Authenticating keystone admin...') - # 11 => xenial_queens - if api_version == 3 or (openstack_release and openstack_release >= 11): - client_class = keystone_client_v3.Client - api_version = 3 - else: - client_class = keystone_client.Client - keystone_ip = keystone_sentry.info['public-address'] - session, auth = self.get_keystone_session( - keystone_ip, - api_version=api_version, - username='admin', - password='openstack', - project_name='admin', - user_domain_name='admin_domain', - project_domain_name='admin_domain') - client = client_class(session=session) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(session) - return session, client - - def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant=None, api_version=None, - keystone_ip=None, user_domain_name=None, - project_domain_name=None, - project_name=None): - """Authenticates admin user with the keystone admin endpoint.""" - self.log.debug('Authenticating keystone admin...') - if not keystone_ip: - keystone_ip = keystone_sentry.info['public-address'] - - # To support backward compatibility usage of this function - if not project_name: - project_name = tenant - if api_version == 3 and not user_domain_name: - user_domain_name = 'admin_domain' - if api_version == 3 and not project_domain_name: - project_domain_name = 'admin_domain' - if api_version == 3 and not project_name: - project_name = 'admin' - - return self.authenticate_keystone( - keystone_ip, user, password, - api_version=api_version, - user_domain_name=user_domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - admin_port=True) - - def authenticate_keystone_user(self, keystone, user, password, tenant): - """Authenticates a regular user with the keystone public endpoint.""" - self.log.debug('Authenticating keystone user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - keystone_ip = urlparse.urlparse(ep).hostname - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant) - - def authenticate_glance_admin(self, keystone): - """Authenticates admin user with glance.""" - self.log.debug('Authenticating glance admin...') - ep = keystone.service_catalog.url_for(service_type='image', - interface='adminURL') - if keystone.session: - return glance_clientv2.Client("2", session=keystone.session) - else: - return glance_client.Client(ep, token=keystone.auth_token) - - def authenticate_heat_admin(self, keystone): - """Authenticates the admin user with heat.""" - self.log.debug('Authenticating heat admin...') - ep = keystone.service_catalog.url_for(service_type='orchestration', - interface='publicURL') - if keystone.session: - return heat_client.Client(endpoint=ep, session=keystone.session) - else: - return heat_client.Client(endpoint=ep, token=keystone.auth_token) - - def authenticate_nova_user(self, keystone, user, password, tenant): - """Authenticates a regular user with nova-api.""" - self.log.debug('Authenticating nova user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return nova_client.Client(NOVA_CLIENT_VERSION, - session=keystone.session, - auth_url=ep) - elif novaclient.__version__[0] >= "7": - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, password=password, - project_name=tenant, auth_url=ep) - else: - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) - - def authenticate_swift_user(self, keystone, user, password, tenant): - """Authenticates a regular user with swift api.""" - self.log.debug('Authenticating swift user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return swiftclient.Connection(session=keystone.session) - else: - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') - - def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create the specified flavor.""" - try: - nova.flavors.find(name=name) - except (exceptions.NotFound, exceptions.NoUniqueMatch): - self.log.debug('Creating flavor ({})'.format(name)) - nova.flavors.create(name, ram, vcpus, disk, flavorid, - ephemeral, swap, rxtx_factor, is_public) - - def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection - :param image_name: display name for new image - :returns: glance image pointer - """ - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) - - # Download cirros image - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - local_path = os.path.join('tests', cirros_img) - - if not os.path.exists(local_path): - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - opener.retrieve(cirros_url, local_path) - f.close() - - # Create glance image - if float(glance.version) < 2.0: - with open(local_path) as fimage: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', - data=fimage) - else: - image = glance.images.create( - name=image_name, - disk_format="qcow2", - visibility="public", - container_format="bare") - glance.images.upload(image.id, open(local_path, 'rb')) - - # Wait for image to reach active status - img_id = image.id - ret = self.resource_reaches_status(glance.images, img_id, - expected_stat='active', - msg='Image status wait') - if not ret: - msg = 'Glance image failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new image - self.log.debug('Validating image attributes...') - val_img_name = glance.images.get(img_id).name - val_img_stat = glance.images.get(img_id).status - val_img_cfmt = glance.images.get(img_id).container_format - val_img_dfmt = glance.images.get(img_id).disk_format - - if float(glance.version) < 2.0: - val_img_pub = glance.images.get(img_id).is_public - else: - val_img_pub = glance.images.get(img_id).visibility == "public" - - msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' - 'container fmt:{} disk fmt:{}'.format( - val_img_name, val_img_pub, img_id, - val_img_stat, val_img_cfmt, val_img_dfmt)) - - if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == 'bare' \ - and val_img_dfmt == 'qcow2': - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return image - - def delete_image(self, glance, image): - """Delete the specified image.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_image.') - self.log.debug('Deleting glance image ({})...'.format(image)) - return self.delete_resource(glance.images, image, msg='glance image') - - def create_instance(self, nova, image_name, instance_name, flavor): - """Create the specified instance.""" - self.log.debug('Creating instance ' - '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.glance.find_image(image_name) - flavor = nova.flavors.find(name=flavor) - instance = nova.servers.create(name=instance_name, image=image, - flavor=flavor) - - count = 1 - status = instance.status - while status != 'ACTIVE' and count < 60: - time.sleep(3) - instance = nova.servers.get(instance.id) - status = instance.status - self.log.debug('instance status: {}'.format(status)) - count += 1 - - if status != 'ACTIVE': - self.log.error('instance creation timed out') - return None - - return instance - - def delete_instance(self, nova, instance): - """Delete the specified instance.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_instance.') - self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, - msg='nova instance') - - def create_or_get_keypair(self, nova, keypair_name="testkey"): - """Create a new keypair, or return pointer if it already exists.""" - try: - _keypair = nova.keypairs.get(keypair_name) - self.log.debug('Keypair ({}) already exists, ' - 'using it.'.format(keypair_name)) - return _keypair - except Exception: - self.log.debug('Keypair ({}) does not exist, ' - 'creating it.'.format(keypair_name)) - - _keypair = nova.keypairs.create(name=keypair_name) - return _keypair - - def _get_cinder_obj_name(self, cinder_object): - """Retrieve name of cinder object. - - :param cinder_object: cinder snapshot or volume object - :returns: str cinder object name - """ - # v1 objects store name in 'display_name' attr but v2+ use 'name' - try: - return cinder_object.display_name - except AttributeError: - return cinder_object.name - - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, - img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, OR - optionally as a clone of an existing volume, OR optionally - from a snapshot. Wait for the new volume status to reach - the expected status, validate and return a resource pointer. - - :param vol_name: cinder volume display name - :param vol_size: size in gigabytes - :param img_id: optional glance image id - :param src_vol_id: optional source volume id to clone - :param snap_id: optional snapshot id to use - :returns: cinder volume pointer - """ - # Handle parameter input and avoid impossible combinations - if img_id and not src_vol_id and not snap_id: - # Create volume from image - self.log.debug('Creating cinder volume from glance image...') - bootable = 'true' - elif src_vol_id and not img_id and not snap_id: - # Clone an existing volume - self.log.debug('Cloning cinder volume...') - bootable = cinder.volumes.get(src_vol_id).bootable - elif snap_id and not src_vol_id and not img_id: - # Create volume from snapshot - self.log.debug('Creating cinder volume from snapshot...') - snap = cinder.volume_snapshots.find(id=snap_id) - vol_size = snap.size - snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id - bootable = cinder.volumes.get(snap_vol_id).bootable - elif not img_id and not src_vol_id and not snap_id: - # Create volume - self.log.debug('Creating cinder volume...') - bootable = 'false' - else: - # Impossible combination of parameters - msg = ('Invalid method use - name:{} size:{} img_id:{} ' - 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, - img_id, src_vol_id, - snap_id)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create new volume - try: - vol_new = cinder.volumes.create(display_name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except TypeError: - vol_new = cinder.volumes.create(name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except Exception as e: - msg = 'Failed to create volume: {}'.format(e) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Wait for volume to reach available status - ret = self.resource_reaches_status(cinder.volumes, vol_id, - expected_stat="available", - msg="Volume status wait") - if not ret: - msg = 'Cinder volume failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new volume - self.log.debug('Validating volume attributes...') - val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) - val_vol_boot = cinder.volumes.get(vol_id).bootable - val_vol_stat = cinder.volumes.get(vol_id).status - val_vol_size = cinder.volumes.get(vol_id).size - msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' - '{} size:{}'.format(val_vol_name, vol_id, - val_vol_stat, val_vol_boot, - val_vol_size)) - - if val_vol_boot == bootable and val_vol_stat == 'available' \ - and val_vol_name == vol_name and val_vol_size == vol_size: - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return vol_new - - def delete_resource(self, resource, resource_id, - msg="resource", max_wait=120): - """Delete one openstack resource, such as one instance, keypair, - image, volume, stack, etc., and confirm deletion within max wait time. - - :param resource: pointer to os resource type, ex:glance_client.images - :param resource_id: unique name or id for the openstack resource - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, otherwise False - """ - self.log.debug('Deleting OpenStack resource ' - '{} ({})'.format(resource_id, msg)) - num_before = len(list(resource.list())) - resource.delete(resource_id) - - tries = 0 - num_after = len(list(resource.list())) - while num_after != (num_before - 1) and tries < (max_wait / 4): - self.log.debug('{} delete check: ' - '{} [{}:{}] {}'.format(msg, tries, - num_before, - num_after, - resource_id)) - time.sleep(4) - num_after = len(list(resource.list())) - tries += 1 - - self.log.debug('{}: expected, actual count = {}, ' - '{}'.format(msg, num_before - 1, num_after)) - - if num_after == (num_before - 1): - return True - else: - self.log.error('{} delete timed out'.format(msg)) - return False - - def resource_reaches_status(self, resource, resource_id, - expected_stat='available', - msg='resource', max_wait=120): - """Wait for an openstack resources status to reach an - expected status within a specified time. Useful to confirm that - nova instances, cinder vols, snapshots, glance images, heat stacks - and other resources eventually reach the expected status. - - :param resource: pointer to os resource type, ex: heat_client.stacks - :param resource_id: unique id for the openstack resource - :param expected_stat: status to expect resource to reach - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, False if status is not reached - """ - - tries = 0 - resource_stat = resource.get(resource_id).status - while resource_stat != expected_stat and tries < (max_wait / 4): - self.log.debug('{} status check: ' - '{} [{}:{}] {}'.format(msg, tries, - resource_stat, - expected_stat, - resource_id)) - time.sleep(4) - resource_stat = resource.get(resource_id).status - tries += 1 - - self.log.debug('{}: expected, actual status = {}, ' - '{}'.format(msg, resource_stat, expected_stat)) - - if resource_stat == expected_stat: - return True - else: - self.log.debug('{} never reached expected status: ' - '{}'.format(resource_id, expected_stat)) - return False - - def get_ceph_osd_id_cmd(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return ("`initctl list | grep 'ceph-osd ' | " - "awk 'NR=={} {{ print $2 }}' | " - "grep -o '[0-9]*'`".format(index + 1)) - - def get_ceph_pools(self, sentry_unit): - """Return a dict of ceph pools from a single ceph unit, with - pool name as keys, pool id as vals.""" - pools = {} - cmd = 'sudo ceph osd lspools' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, - for pool in str(output).split(','): - pool_id_name = pool.split(' ') - if len(pool_id_name) == 2: - pool_id = pool_id_name[0] - pool_name = pool_id_name[1] - pools[pool_name] = int(pool_id) - - self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], - pools)) - return pools - - def get_ceph_df(self, sentry_unit): - """Return dict of ceph df json output, including ceph pool state. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :returns: Dict of ceph df output - """ - cmd = 'sudo ceph df --format=json' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return json.loads(output) - - def get_ceph_pool_sample(self, sentry_unit, pool_id=0): - """Take a sample of attributes of a ceph pool, returning ceph - pool name, object count and disk space used for the specified - pool ID number. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :param pool_id: Ceph pool ID - :returns: List of pool name, object count, kb disk space used - """ - df = self.get_ceph_df(sentry_unit) - for pool in df['pools']: - if pool['id'] == pool_id: - pool_name = pool['name'] - obj_count = pool['stats']['objects'] - kb_used = pool['stats']['kb_used'] - - self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, pool_id, - obj_count, kb_used)) - return pool_name, obj_count, kb_used - - def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): - """Validate ceph pool samples taken over time, such as pool - object counts or pool kb used, before adding, after adding, and - after deleting items which affect those pool attributes. The - 2nd element is expected to be greater than the 1st; 3rd is expected - to be less than the 2nd. - - :param samples: List containing 3 data samples - :param sample_type: String for logging and usage context - :returns: None if successful, Failure message otherwise - """ - original, created, deleted = range(3) - if samples[created] <= samples[original] or \ - samples[deleted] >= samples[created]: - return ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - else: - self.log.debug('Ceph {} samples (OK): ' - '{}'.format(sample_type, samples)) - return None - - # rabbitmq/amqp specific helpers: - - def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): - """Wait for rmq units extended status to show cluster readiness, - after an optional initial sleep period. Initial sleep is likely - necessary to be effective following a config change, as status - message may not instantly update to non-ready.""" - - if init_sleep: - time.sleep(init_sleep) - - message = re.compile('^Unit is ready and clustered$') - deployment._auto_wait_for_status(message=message, - timeout=timeout, - include_only=['rabbitmq-server']) - - def add_rmq_test_user(self, sentry_units, - username="testuser1", password="changeme"): - """Add a test user via the first rmq juju unit, check connection as - the new user against all sentry units. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful. Raise on error. - """ - self.log.debug('Adding rmq user ({})...'.format(username)) - - # Check that user does not already exist - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - if username in output: - self.log.warning('User ({}) already exists, returning ' - 'gracefully.'.format(username)) - return - - perms = '".*" ".*" ".*"' - cmds = ['rabbitmqctl add_user {} {}'.format(username, password), - 'rabbitmqctl set_permissions {} {}'.format(username, perms)] - - # Add user via first unit - for cmd in cmds: - output, _ = self.run_cmd_unit(sentry_units[0], cmd) - - # Check connection against the other sentry_units - self.log.debug('Checking user connect against units...') - for sentry_unit in sentry_units: - connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, - username=username, - password=password) - connection.close() - - def delete_rmq_test_user(self, sentry_units, username="testuser1"): - """Delete a rabbitmq user via the first rmq juju unit. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful or no such user. - """ - self.log.debug('Deleting rmq user ({})...'.format(username)) - - # Check that the user exists - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - - if username not in output: - self.log.warning('User ({}) does not exist, returning ' - 'gracefully.'.format(username)) - return - - # Delete the user - cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) - - def get_rmq_cluster_status(self, sentry_unit): - """Execute rabbitmq cluster status command on a unit and return - the full output. - - :param unit: sentry unit - :returns: String containing console output of cluster status command - """ - cmd = 'rabbitmqctl cluster_status' - output, _ = self.run_cmd_unit(sentry_unit, cmd) - self.log.debug('{} cluster_status:\n{}'.format( - sentry_unit.info['unit_name'], output)) - return str(output) - - def get_rmq_cluster_running_nodes(self, sentry_unit): - """Parse rabbitmqctl cluster_status output string, return list of - running rabbitmq cluster nodes. - - :param unit: sentry unit - :returns: List containing node names of running nodes - """ - # NOTE(beisner): rabbitmqctl cluster_status output is not - # json-parsable, do string chop foo, then json.loads that. - str_stat = self.get_rmq_cluster_status(sentry_unit) - if 'running_nodes' in str_stat: - pos_start = str_stat.find("{running_nodes,") + 15 - pos_end = str_stat.find("]},", pos_start) + 1 - str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') - run_nodes = json.loads(str_run_nodes) - return run_nodes - else: - return [] - - def validate_rmq_cluster_running_nodes(self, sentry_units): - """Check that all rmq unit hostnames are represented in the - cluster_status output of all units. - - :param host_names: dict of juju unit names to host names - :param units: list of sentry unit pointers (all rmq units) - :returns: None if successful, otherwise return error message - """ - host_names = self.get_unit_hostnames(sentry_units) - errors = [] - - # Query every unit for cluster_status running nodes - for query_unit in sentry_units: - query_unit_name = query_unit.info['unit_name'] - running_nodes = self.get_rmq_cluster_running_nodes(query_unit) - - # Confirm that every unit is represented in the queried unit's - # cluster_status running nodes output. - for validate_unit in sentry_units: - val_host_name = host_names[validate_unit.info['unit_name']] - val_node_name = 'rabbit@{}'.format(val_host_name) - - if val_node_name not in running_nodes: - errors.append('Cluster member check failed on {}: {} not ' - 'in {}\n'.format(query_unit_name, - val_node_name, - running_nodes)) - if errors: - return ''.join(errors) - - def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): - """Check a single juju rmq unit for ssl and port in the config file.""" - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - conf_file = '/etc/rabbitmq/rabbitmq.config' - conf_contents = str(self.file_contents_safe(sentry_unit, - conf_file, max_wait=16)) - # Checks - conf_ssl = 'ssl' in conf_contents - conf_port = str(port) in conf_contents - - # Port explicitly checked in config - if port and conf_port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif port and not conf_port and conf_ssl: - self.log.debug('SSL is enabled @{} but not on port {} ' - '({})'.format(host, port, unit_name)) - return False - # Port not checked (useful when checking that ssl is disabled) - elif not port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif not conf_ssl: - self.log.debug('SSL not enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return False - else: - msg = ('Unknown condition when checking SSL status @{}:{} ' - '({})'.format(host, port, unit_name)) - amulet.raise_status(amulet.FAIL, msg) - - def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): - """Check that ssl is enabled on rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :param port: optional ssl port override to validate - :returns: None if successful, otherwise return error message - """ - for sentry_unit in sentry_units: - if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): - return ('Unexpected condition: ssl is disabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def validate_rmq_ssl_disabled_units(self, sentry_units): - """Check that ssl is enabled on listed rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :returns: True if successful. Raise on error. - """ - for sentry_unit in sentry_units: - if self.rmq_ssl_is_enabled_on_unit(sentry_unit): - return ('Unexpected condition: ssl is enabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def configure_rmq_ssl_on(self, sentry_units, deployment, - port=None, max_wait=60): - """Turn ssl charm config option on, with optional non-default - ssl port specification. Confirm that it is enabled on every - unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param port: amqp port, use defaults if None - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: on') - - # Enable RMQ SSL - config = {'ssl': 'on'} - if port: - config['ssl_port'] = port - - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): - """Turn ssl charm config option off, confirm that it is disabled - on every unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: off') - - # Disable RMQ SSL - config = {'ssl': 'off'} - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def connect_amqp_by_unit(self, sentry_unit, ssl=False, - port=None, fatal=True, - username="testuser1", password="changeme"): - """Establish and return a pika amqp connection to the rabbitmq service - running on a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :param fatal: boolean, default to True (raises on connect error) - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: pika amqp connection pointer or None if failed and non-fatal - """ - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - # Default port logic if port is not specified - if ssl and not port: - port = 5671 - elif not ssl and not port: - port = 5672 - - self.log.debug('Connecting to amqp on {}:{} ({}) as ' - '{}...'.format(host, port, unit_name, username)) - - try: - credentials = pika.PlainCredentials(username, password) - parameters = pika.ConnectionParameters(host=host, port=port, - credentials=credentials, - ssl=ssl, - connection_attempts=3, - retry_delay=5, - socket_timeout=1) - connection = pika.BlockingConnection(parameters) - assert connection.is_open is True - assert connection.is_closing is False - self.log.debug('Connect OK') - return connection - except Exception as e: - msg = ('amqp connection failed to {}:{} as ' - '{} ({})'.format(host, port, username, str(e))) - if fatal: - amulet.raise_status(amulet.FAIL, msg) - else: - self.log.warn(msg) - return None - - def publish_amqp_message_by_unit(self, sentry_unit, message, - queue="test", ssl=False, - username="testuser1", - password="changeme", - port=None): - """Publish an amqp message to a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param message: amqp message string - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: None. Raises exception if publish failed. - """ - self.log.debug('Publishing message to {} queue:\n{}'.format(queue, - message)) - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - - # NOTE(beisner): extra debug here re: pika hang potential: - # https://github.com/pika/pika/issues/297 - # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw - self.log.debug('Defining channel...') - channel = connection.channel() - self.log.debug('Declaring queue...') - channel.queue_declare(queue=queue, auto_delete=False, durable=True) - self.log.debug('Publishing message...') - channel.basic_publish(exchange='', routing_key=queue, body=message) - self.log.debug('Closing channel...') - channel.close() - self.log.debug('Closing connection...') - connection.close() - - def get_amqp_message_by_unit(self, sentry_unit, queue="test", - username="testuser1", - password="changeme", - ssl=False, port=None): - """Get an amqp message from a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: amqp message body as string. Raise if get fails. - """ - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - channel = connection.channel() - method_frame, _, body = channel.basic_get(queue) - - if method_frame: - self.log.debug('Retreived message from {} queue:\n{}'.format(queue, - body)) - channel.basic_ack(method_frame.delivery_tag) - channel.close() - connection.close() - return body - else: - msg = 'No message retrieved.' - amulet.raise_status(amulet.FAIL, msg) - - def validate_memcache(self, sentry_unit, conf, os_release, - earliest_release=5, section='keystone_authtoken', - check_kvs=None): - """Check Memcache is running and is configured to be used - - Example call from Amulet test: - - def test_110_memcache(self): - u.validate_memcache(self.neutron_api_sentry, - '/etc/neutron/neutron.conf', - self._get_openstack_release()) - - :param sentry_unit: sentry unit - :param conf: OpenStack config file to check memcache settings - :param os_release: Current OpenStack release int code - :param earliest_release: Earliest Openstack release to check int code - :param section: OpenStack config file section to check - :param check_kvs: Dict of settings to check in config file - :returns: None - """ - if os_release < earliest_release: - self.log.debug('Skipping memcache checks for deployment. {} <' - 'mitaka'.format(os_release)) - return - _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} - self.log.debug('Checking memcached is running') - ret = self.validate_services_by_name({sentry_unit: ['memcached']}) - if ret: - amulet.raise_status(amulet.FAIL, msg='Memcache running check' - 'failed {}'.format(ret)) - else: - self.log.debug('OK') - self.log.debug('Checking memcache url is configured in {}'.format( - conf)) - if self.validate_config_data(sentry_unit, conf, section, _kvs): - message = "Memcache config error in: {}".format(conf) - amulet.raise_status(amulet.FAIL, msg=message) - else: - self.log.debug('OK') - self.log.debug('Checking memcache configuration in ' - '/etc/memcached.conf') - contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', - fatal=True) - ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if CompareHostReleases(ubuntu_release) <= 'trusty': - memcache_listen_addr = 'ip6-localhost' - else: - memcache_listen_addr = '::1' - expected = { - '-p': '11211', - '-l': memcache_listen_addr} - found = [] - for key, value in expected.items(): - for line in contents.split('\n'): - if line.startswith(key): - self.log.debug('Checking {} is set to {}'.format( - key, - value)) - assert value == line.split()[-1] - self.log.debug(line.split()[-1]) - found.append(key) - if sorted(found) == sorted(expected.keys()): - self.log.debug('OK') - else: - message = "Memcache config error in: /etc/memcached.conf" - amulet.raise_status(amulet.FAIL, msg=message) diff --git a/ceph-proxy/tests/charmhelpers/core/__init__.py b/ceph-proxy/tests/charmhelpers/core/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-proxy/tests/charmhelpers/core/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-proxy/tests/charmhelpers/core/decorators.py b/ceph-proxy/tests/charmhelpers/core/decorators.py deleted file mode 100644 index 6ad41ee4..00000000 --- a/ceph-proxy/tests/charmhelpers/core/decorators.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2014 Canonical Ltd. -# -# Authors: -# Edward Hope-Morley -# - -import time - -from charmhelpers.core.hookenv import ( - log, - INFO, -) - - -def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): - """If the decorated function raises exception exc_type, allow num_retries - retry attempts before raise the exception. - """ - def _retry_on_exception_inner_1(f): - def _retry_on_exception_inner_2(*args, **kwargs): - retries = num_retries - multiplier = 1 - while True: - try: - return f(*args, **kwargs) - except exc_type: - if not retries: - raise - - delay = base_delay * multiplier - multiplier += 1 - log("Retrying '%s' %d more times (delay=%s)" % - (f.__name__, retries, delay), level=INFO) - retries -= 1 - if delay: - time.sleep(delay) - - return _retry_on_exception_inner_2 - - return _retry_on_exception_inner_1 diff --git a/ceph-proxy/tests/charmhelpers/core/files.py b/ceph-proxy/tests/charmhelpers/core/files.py deleted file mode 100644 index fdd82b75..00000000 --- a/ceph-proxy/tests/charmhelpers/core/files.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -__author__ = 'Jorge Niedbalski ' - -import os -import subprocess - - -def sed(filename, before, after, flags='g'): - """ - Search and replaces the given pattern on filename. - - :param filename: relative or absolute file path. - :param before: expression to be replaced (see 'man sed') - :param after: expression to replace with (see 'man sed') - :param flags: sed-compatible regex flags in example, to make - the search and replace case insensitive, specify ``flags="i"``. - The ``g`` flag is always specified regardless, so you do not - need to remember to include it when overriding this parameter. - :returns: If the sed command exit code was zero then return, - otherwise raise CalledProcessError. - """ - expression = r's/{0}/{1}/{2}'.format(before, - after, flags) - - return subprocess.check_call(["sed", "-i", "-r", "-e", - expression, - os.path.expanduser(filename)]) diff --git a/ceph-proxy/tests/charmhelpers/core/fstab.py b/ceph-proxy/tests/charmhelpers/core/fstab.py deleted file mode 100644 index d9fa9152..00000000 --- a/ceph-proxy/tests/charmhelpers/core/fstab.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import os - -__author__ = 'Jorge Niedbalski R. ' - - -class Fstab(io.FileIO): - """This class extends file in order to implement a file reader/writer - for file `/etc/fstab` - """ - - class Entry(object): - """Entry class represents a non-comment line on the `/etc/fstab` file - """ - def __init__(self, device, mountpoint, filesystem, - options, d=0, p=0): - self.device = device - self.mountpoint = mountpoint - self.filesystem = filesystem - - if not options: - options = "defaults" - - self.options = options - self.d = int(d) - self.p = int(p) - - def __eq__(self, o): - return str(self) == str(o) - - def __str__(self): - return "{} {} {} {} {} {}".format(self.device, - self.mountpoint, - self.filesystem, - self.options, - self.d, - self.p) - - DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') - - def __init__(self, path=None): - if path: - self._path = path - else: - self._path = self.DEFAULT_PATH - super(Fstab, self).__init__(self._path, 'rb+') - - def _hydrate_entry(self, line): - # NOTE: use split with no arguments to split on any - # whitespace including tabs - return Fstab.Entry(*filter( - lambda x: x not in ('', None), - line.strip("\n").split())) - - @property - def entries(self): - self.seek(0) - for line in self.readlines(): - line = line.decode('us-ascii') - try: - if line.strip() and not line.strip().startswith("#"): - yield self._hydrate_entry(line) - except ValueError: - pass - - def get_entry_by_attr(self, attr, value): - for entry in self.entries: - e_attr = getattr(entry, attr) - if e_attr == value: - return entry - return None - - def add_entry(self, entry): - if self.get_entry_by_attr('device', entry.device): - return False - - self.write((str(entry) + '\n').encode('us-ascii')) - self.truncate() - return entry - - def remove_entry(self, entry): - self.seek(0) - - lines = [l.decode('us-ascii') for l in self.readlines()] - - found = False - for index, line in enumerate(lines): - if line.strip() and not line.strip().startswith("#"): - if self._hydrate_entry(line) == entry: - found = True - break - - if not found: - return False - - lines.remove(line) - - self.seek(0) - self.write(''.join(lines).encode('us-ascii')) - self.truncate() - return True - - @classmethod - def remove_by_mountpoint(cls, mountpoint, path=None): - fstab = cls(path=path) - entry = fstab.get_entry_by_attr('mountpoint', mountpoint) - if entry: - return fstab.remove_entry(entry) - return False - - @classmethod - def add(cls, device, mountpoint, filesystem, options=None, path=None): - return cls(path=path).add_entry(Fstab.Entry(device, - mountpoint, filesystem, - options=options)) diff --git a/ceph-proxy/tests/charmhelpers/core/hookenv.py b/ceph-proxy/tests/charmhelpers/core/hookenv.py deleted file mode 100644 index 68800074..00000000 --- a/ceph-proxy/tests/charmhelpers/core/hookenv.py +++ /dev/null @@ -1,1353 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"Interactions with the Juju environment" -# Copyright 2013 Canonical Ltd. -# -# Authors: -# Charm Helpers Developers - -from __future__ import print_function -import copy -from distutils.version import LooseVersion -from functools import wraps -from collections import namedtuple -import glob -import os -import json -import yaml -import re -import subprocess -import sys -import errno -import tempfile -from subprocess import CalledProcessError - -import six -if not six.PY3: - from UserDict import UserDict -else: - from collections import UserDict - - -CRITICAL = "CRITICAL" -ERROR = "ERROR" -WARNING = "WARNING" -INFO = "INFO" -DEBUG = "DEBUG" -TRACE = "TRACE" -MARKER = object() - -cache = {} - - -def cached(func): - """Cache return values for multiple executions of func + args - - For example:: - - @cached - def unit_get(attribute): - pass - - unit_get('test') - - will cache the result of unit_get + 'test' for future calls. - """ - @wraps(func) - def wrapper(*args, **kwargs): - global cache - key = json.dumps((func, args, kwargs), sort_keys=True, default=str) - try: - return cache[key] - except KeyError: - pass # Drop out of the exception handler scope. - res = func(*args, **kwargs) - cache[key] = res - return res - wrapper._wrapped = func - return wrapper - - -def flush(key): - """Flushes any entries from function cache where the - key is found in the function+args """ - flush_list = [] - for item in cache: - if key in item: - flush_list.append(item) - for item in flush_list: - del cache[item] - - -def log(message, level=None): - """Write a message to the juju log""" - command = ['juju-log'] - if level: - command += ['-l', level] - if not isinstance(message, six.string_types): - message = repr(message) - command += [message] - # Missing juju-log should not cause failures in unit tests - # Send log output to stderr - try: - subprocess.call(command) - except OSError as e: - if e.errno == errno.ENOENT: - if level: - message = "{}: {}".format(level, message) - message = "juju-log: {}".format(message) - print(message, file=sys.stderr) - else: - raise - - -class Serializable(UserDict): - """Wrapper, an object that can be serialized to yaml or json""" - - def __init__(self, obj): - # wrap the object - UserDict.__init__(self) - self.data = obj - - def __getattr__(self, attr): - # See if this object has attribute. - if attr in ("json", "yaml", "data"): - return self.__dict__[attr] - # Check for attribute in wrapped object. - got = getattr(self.data, attr, MARKER) - if got is not MARKER: - return got - # Proxy to the wrapped object via dict interface. - try: - return self.data[attr] - except KeyError: - raise AttributeError(attr) - - def __getstate__(self): - # Pickle as a standard dictionary. - return self.data - - def __setstate__(self, state): - # Unpickle into our wrapper. - self.data = state - - def json(self): - """Serialize the object to json""" - return json.dumps(self.data) - - def yaml(self): - """Serialize the object to yaml""" - return yaml.dump(self.data) - - -def execution_environment(): - """A convenient bundling of the current execution context""" - context = {} - context['conf'] = config() - if relation_id(): - context['reltype'] = relation_type() - context['relid'] = relation_id() - context['rel'] = relation_get() - context['unit'] = local_unit() - context['rels'] = relations() - context['env'] = os.environ - return context - - -def in_relation_hook(): - """Determine whether we're running in a relation hook""" - return 'JUJU_RELATION' in os.environ - - -def relation_type(): - """The scope for the current relation hook""" - return os.environ.get('JUJU_RELATION', None) - - -@cached -def relation_id(relation_name=None, service_or_unit=None): - """The relation ID for the current or a specified relation""" - if not relation_name and not service_or_unit: - return os.environ.get('JUJU_RELATION_ID', None) - elif relation_name and service_or_unit: - service_name = service_or_unit.split('/')[0] - for relid in relation_ids(relation_name): - remote_service = remote_service_name(relid) - if remote_service == service_name: - return relid - else: - raise ValueError('Must specify neither or both of relation_name and service_or_unit') - - -def local_unit(): - """Local unit ID""" - return os.environ['JUJU_UNIT_NAME'] - - -def remote_unit(): - """The remote unit for the current relation hook""" - return os.environ.get('JUJU_REMOTE_UNIT', None) - - -def application_name(): - """ - The name of the deployed application this unit belongs to. - """ - return local_unit().split('/')[0] - - -def service_name(): - """ - .. deprecated:: 0.19.1 - Alias for :func:`application_name`. - """ - return application_name() - - -def model_name(): - """ - Name of the model that this unit is deployed in. - """ - return os.environ['JUJU_MODEL_NAME'] - - -def model_uuid(): - """ - UUID of the model that this unit is deployed in. - """ - return os.environ['JUJU_MODEL_UUID'] - - -def principal_unit(): - """Returns the principal unit of this unit, otherwise None""" - # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT - principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) - # If it's empty, then this unit is the principal - if principal_unit == '': - return os.environ['JUJU_UNIT_NAME'] - elif principal_unit is not None: - return principal_unit - # For Juju 2.1 and below, let's try work out the principle unit by - # the various charms' metadata.yaml. - for reltype in relation_types(): - for rid in relation_ids(reltype): - for unit in related_units(rid): - md = _metadata_unit(unit) - if not md: - continue - subordinate = md.pop('subordinate', None) - if not subordinate: - return unit - return None - - -@cached -def remote_service_name(relid=None): - """The remote service name for a given relation-id (or the current relation)""" - if relid is None: - unit = remote_unit() - else: - units = related_units(relid) - unit = units[0] if units else None - return unit.split('/')[0] if unit else None - - -def hook_name(): - """The name of the currently executing hook""" - return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) - - -class Config(dict): - """A dictionary representation of the charm's config.yaml, with some - extra features: - - - See which values in the dictionary have changed since the previous hook. - - For values that have changed, see what the previous value was. - - Store arbitrary data for use in a later hook. - - NOTE: Do not instantiate this object directly - instead call - ``hookenv.config()``, which will return an instance of :class:`Config`. - - Example usage:: - - >>> # inside a hook - >>> from charmhelpers.core import hookenv - >>> config = hookenv.config() - >>> config['foo'] - 'bar' - >>> # store a new key/value for later use - >>> config['mykey'] = 'myval' - - - >>> # user runs `juju set mycharm foo=baz` - >>> # now we're inside subsequent config-changed hook - >>> config = hookenv.config() - >>> config['foo'] - 'baz' - >>> # test to see if this val has changed since last hook - >>> config.changed('foo') - True - >>> # what was the previous value? - >>> config.previous('foo') - 'bar' - >>> # keys/values that we add are preserved across hooks - >>> config['mykey'] - 'myval' - - """ - CONFIG_FILE_NAME = '.juju-persistent-config' - - def __init__(self, *args, **kw): - super(Config, self).__init__(*args, **kw) - self.implicit_save = True - self._prev_dict = None - self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path) and os.stat(self.path).st_size: - self.load_previous() - atexit(self._implicit_save) - - def load_previous(self, path=None): - """Load previous copy of config from disk. - - In normal usage you don't need to call this method directly - it - is called automatically at object initialization. - - :param path: - - File path from which to load the previous config. If `None`, - config is loaded from the default location. If `path` is - specified, subsequent `save()` calls will write to the same - path. - - """ - self.path = path or self.path - with open(self.path) as f: - try: - self._prev_dict = json.load(f) - except ValueError as e: - log('Unable to parse previous config data - {}'.format(str(e)), - level=ERROR) - for k, v in copy.deepcopy(self._prev_dict).items(): - if k not in self: - self[k] = v - - def changed(self, key): - """Return True if the current value for this key is different from - the previous value. - - """ - if self._prev_dict is None: - return True - return self.previous(key) != self.get(key) - - def previous(self, key): - """Return previous value for this key, or None if there - is no previous value. - - """ - if self._prev_dict: - return self._prev_dict.get(key) - return None - - def save(self): - """Save this config to disk. - - If the charm is using the :mod:`Services Framework ` - or :meth:'@hook ' decorator, this - is called automatically at the end of successful hook execution. - Otherwise, it should be called directly by user code. - - To disable automatic saves, set ``implicit_save=False`` on this - instance. - - """ - with open(self.path, 'w') as f: - os.fchmod(f.fileno(), 0o600) - json.dump(self, f) - - def _implicit_save(self): - if self.implicit_save: - self.save() - - -_cache_config = None - - -def config(scope=None): - """ - Get the juju charm configuration (scope==None) or individual key, - (scope=str). The returned value is a Python data structure loaded as - JSON from the Juju config command. - - :param scope: If set, return the value for the specified key. - :type scope: Optional[str] - :returns: Either the whole config as a Config, or a key from it. - :rtype: Any - """ - global _cache_config - config_cmd_line = ['config-get', '--all', '--format=json'] - try: - # JSON Decode Exception for Python3.5+ - exc_json = json.decoder.JSONDecodeError - except AttributeError: - # JSON Decode Exception for Python2.7 through Python3.4 - exc_json = ValueError - try: - if _cache_config is None: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) - _cache_config = Config(config_data) - if scope is not None: - return _cache_config.get(scope) - return _cache_config - except (exc_json, UnicodeDecodeError) as e: - log('Unable to parse output from config-get: config_cmd_line="{}" ' - 'message="{}"' - .format(config_cmd_line, str(e)), level=ERROR) - return None - - -@cached -def relation_get(attribute=None, unit=None, rid=None): - """Get relation information""" - _args = ['relation-get', '--format=json'] - if rid: - _args.append('-r') - _args.append(rid) - _args.append(attribute or '-') - if unit: - _args.append(unit) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except CalledProcessError as e: - if e.returncode == 2: - return None - raise - - -def relation_set(relation_id=None, relation_settings=None, **kwargs): - """Set relation information for the current unit""" - relation_settings = relation_settings if relation_settings else {} - relation_cmd_line = ['relation-set'] - accepts_file = "--file" in subprocess.check_output( - relation_cmd_line + ["--help"], universal_newlines=True) - if relation_id is not None: - relation_cmd_line.extend(('-r', relation_id)) - settings = relation_settings.copy() - settings.update(kwargs) - for key, value in settings.items(): - # Force value to be a string: it always should, but some call - # sites pass in things like dicts or numbers. - if value is not None: - settings[key] = "{}".format(value) - if accepts_file: - # --file was introduced in Juju 1.23.2. Use it by default if - # available, since otherwise we'll break if the relation data is - # too big. Ideally we should tell relation-set to read the data from - # stdin, but that feature is broken in 1.23.2: Bug #1454678. - with tempfile.NamedTemporaryFile(delete=False) as settings_file: - settings_file.write(yaml.safe_dump(settings).encode("utf-8")) - subprocess.check_call( - relation_cmd_line + ["--file", settings_file.name]) - os.remove(settings_file.name) - else: - for key, value in settings.items(): - if value is None: - relation_cmd_line.append('{}='.format(key)) - else: - relation_cmd_line.append('{}={}'.format(key, value)) - subprocess.check_call(relation_cmd_line) - # Flush cache of any relation-gets for local unit - flush(local_unit()) - - -def relation_clear(r_id=None): - ''' Clears any relation data already set on relation r_id ''' - settings = relation_get(rid=r_id, - unit=local_unit()) - for setting in settings: - if setting not in ['public-address', 'private-address']: - settings[setting] = None - relation_set(relation_id=r_id, - **settings) - - -@cached -def relation_ids(reltype=None): - """A list of relation_ids""" - reltype = reltype or relation_type() - relid_cmd_line = ['relation-ids', '--format=json'] - if reltype is not None: - relid_cmd_line.append(reltype) - return json.loads( - subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] - return [] - - -@cached -def related_units(relid=None): - """A list of related units""" - relid = relid or relation_id() - units_cmd_line = ['relation-list', '--format=json'] - if relid is not None: - units_cmd_line.extend(('-r', relid)) - return json.loads( - subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] - - -@cached -def relation_for_unit(unit=None, rid=None): - """Get the json represenation of a unit's relation""" - unit = unit or remote_unit() - relation = relation_get(unit=unit, rid=rid) - for key in relation: - if key.endswith('-list'): - relation[key] = relation[key].split() - relation['__unit__'] = unit - return relation - - -@cached -def relations_for_id(relid=None): - """Get relations of a specific relation ID""" - relation_data = [] - relid = relid or relation_ids() - for unit in related_units(relid): - unit_data = relation_for_unit(unit, relid) - unit_data['__relid__'] = relid - relation_data.append(unit_data) - return relation_data - - -@cached -def relations_of_type(reltype=None): - """Get relations of a specific type""" - relation_data = [] - reltype = reltype or relation_type() - for relid in relation_ids(reltype): - for relation in relations_for_id(relid): - relation['__relid__'] = relid - relation_data.append(relation) - return relation_data - - -@cached -def metadata(): - """Get the current charm metadata.yaml contents as a python object""" - with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: - return yaml.safe_load(md) - - -def _metadata_unit(unit): - """Given the name of a unit (e.g. apache2/0), get the unit charm's - metadata.yaml. Very similar to metadata() but allows us to inspect - other units. Unit needs to be co-located, such as a subordinate or - principal/primary. - - :returns: metadata.yaml as a python object. - - """ - basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) - unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') - if not os.path.exists(joineddir): - return None - with open(joineddir) as md: - return yaml.safe_load(md) - - -@cached -def relation_types(): - """Get a list of relation types supported by this charm""" - rel_types = [] - md = metadata() - for key in ('provides', 'requires', 'peers'): - section = md.get(key) - if section: - rel_types.extend(section.keys()) - return rel_types - - -@cached -def peer_relation_id(): - '''Get the peers relation id if a peers relation has been joined, else None.''' - md = metadata() - section = md.get('peers') - if section: - for key in section: - relids = relation_ids(key) - if relids: - return relids[0] - return None - - -@cached -def relation_to_interface(relation_name): - """ - Given the name of a relation, return the interface that relation uses. - - :returns: The interface name, or ``None``. - """ - return relation_to_role_and_interface(relation_name)[1] - - -@cached -def relation_to_role_and_interface(relation_name): - """ - Given the name of a relation, return the role and the name of the interface - that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). - - :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. - """ - _metadata = metadata() - for role in ('provides', 'requires', 'peers'): - interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') - if interface: - return role, interface - return None, None - - -@cached -def role_and_interface_to_relations(role, interface_name): - """ - Given a role and interface name, return a list of relation names for the - current charm that use that interface under that role (where role is one - of ``provides``, ``requires``, or ``peers``). - - :returns: A list of relation names. - """ - _metadata = metadata() - results = [] - for relation_name, relation in _metadata.get(role, {}).items(): - if relation['interface'] == interface_name: - results.append(relation_name) - return results - - -@cached -def interface_to_relations(interface_name): - """ - Given an interface, return a list of relation names for the current - charm that use that interface. - - :returns: A list of relation names. - """ - results = [] - for role in ('provides', 'requires', 'peers'): - results.extend(role_and_interface_to_relations(role, interface_name)) - return results - - -@cached -def charm_name(): - """Get the name of the current charm as is specified on metadata.yaml""" - return metadata().get('name') - - -@cached -def relations(): - """Get a nested dictionary of relation data for all related units""" - rels = {} - for reltype in relation_types(): - relids = {} - for relid in relation_ids(reltype): - units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} - for unit in related_units(relid): - reldata = relation_get(unit=unit, rid=relid) - units[unit] = reldata - relids[relid] = units - rels[reltype] = relids - return rels - - -@cached -def is_relation_made(relation, keys='private-address'): - ''' - Determine whether a relation is established by checking for - presence of key(s). If a list of keys is provided, they - must all be present for the relation to be identified as made - ''' - if isinstance(keys, str): - keys = [keys] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - context = {} - for k in keys: - context[k] = relation_get(k, rid=r_id, - unit=unit) - if None not in context.values(): - return True - return False - - -def _port_op(op_name, port, protocol="TCP"): - """Open or close a service network port""" - _args = [op_name] - icmp = protocol.upper() == "ICMP" - if icmp: - _args.append(protocol) - else: - _args.append('{}/{}'.format(port, protocol)) - try: - subprocess.check_call(_args) - except subprocess.CalledProcessError: - # Older Juju pre 2.3 doesn't support ICMP - # so treat it as a no-op if it fails. - if not icmp: - raise - - -def open_port(port, protocol="TCP"): - """Open a service network port""" - _port_op('open-port', port, protocol) - - -def close_port(port, protocol="TCP"): - """Close a service network port""" - _port_op('close-port', port, protocol) - - -def open_ports(start, end, protocol="TCP"): - """Opens a range of service network ports""" - _args = ['open-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def close_ports(start, end, protocol="TCP"): - """Close a range of service network ports""" - _args = ['close-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def opened_ports(): - """Get the opened ports - - *Note that this will only show ports opened in a previous hook* - - :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` - """ - _args = ['opened-ports', '--format=json'] - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - - -@cached -def unit_get(attribute): - """Get the unit ID for the remote unit""" - _args = ['unit-get', '--format=json', attribute] - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -def unit_public_ip(): - """Get this unit's public IP address""" - return unit_get('public-address') - - -def unit_private_ip(): - """Get this unit's private IP address""" - return unit_get('private-address') - - -@cached -def storage_get(attribute=None, storage_id=None): - """Get storage attributes""" - _args = ['storage-get', '--format=json'] - if storage_id: - _args.extend(('-s', storage_id)) - if attribute: - _args.append(attribute) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -@cached -def storage_list(storage_name=None): - """List the storage IDs for the unit""" - _args = ['storage-list', '--format=json'] - if storage_name: - _args.append(storage_name) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except OSError as e: - import errno - if e.errno == errno.ENOENT: - # storage-list does not exist - return [] - raise - - -class UnregisteredHookError(Exception): - """Raised when an undefined hook is called""" - pass - - -class Hooks(object): - """A convenient handler for hook functions. - - Example:: - - hooks = Hooks() - - # register a hook, taking its name from the function name - @hooks.hook() - def install(): - pass # your code here - - # register a hook, providing a custom hook name - @hooks.hook("config-changed") - def config_changed(): - pass # your code here - - if __name__ == "__main__": - # execute a hook based on the name the program is called by - hooks.execute(sys.argv) - """ - - def __init__(self, config_save=None): - super(Hooks, self).__init__() - self._hooks = {} - - # For unknown reasons, we allow the Hooks constructor to override - # config().implicit_save. - if config_save is not None: - config().implicit_save = config_save - - def register(self, name, function): - """Register a hook""" - self._hooks[name] = function - - def execute(self, args): - """Execute a registered hook based on args[0]""" - _run_atstart() - hook_name = os.path.basename(args[0]) - if hook_name in self._hooks: - try: - self._hooks[hook_name]() - except SystemExit as x: - if x.code is None or x.code == 0: - _run_atexit() - raise - _run_atexit() - else: - raise UnregisteredHookError(hook_name) - - def hook(self, *hook_names): - """Decorator, registering them as hooks""" - def wrapper(decorated): - for hook_name in hook_names: - self.register(hook_name, decorated) - else: - self.register(decorated.__name__, decorated) - if '_' in decorated.__name__: - self.register( - decorated.__name__.replace('_', '-'), decorated) - return decorated - return wrapper - - -class NoNetworkBinding(Exception): - pass - - -def charm_dir(): - """Return the root directory of the current charm""" - d = os.environ.get('JUJU_CHARM_DIR') - if d is not None: - return d - return os.environ.get('CHARM_DIR') - - -@cached -def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" - cmd = ['action-get'] - if key is not None: - cmd.append(key) - cmd.append('--format=json') - action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) - return action_data - - -def action_set(values): - """Sets the values to be returned after the action finishes""" - cmd = ['action-set'] - for k, v in list(values.items()): - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -def action_fail(message): - """Sets the action status to failed and sets the error message. - - The results set by action_set are preserved.""" - subprocess.check_call(['action-fail', message]) - - -def action_name(): - """Get the name of the currently executing action.""" - return os.environ.get('JUJU_ACTION_NAME') - - -def action_uuid(): - """Get the UUID of the currently executing action.""" - return os.environ.get('JUJU_ACTION_UUID') - - -def action_tag(): - """Get the tag for the currently executing action.""" - return os.environ.get('JUJU_ACTION_TAG') - - -def status_set(workload_state, message): - """Set the workload state with a message - - Use status-set to set the workload state with a message which is visible - to the user via juju status. If the status-set command is not found then - assume this is juju < 1.23 and juju-log the message unstead. - - workload_state -- valid juju workload state. - message -- status update message - """ - valid_states = ['maintenance', 'blocked', 'waiting', 'active'] - if workload_state not in valid_states: - raise ValueError( - '{!r} is not a valid workload state'.format(workload_state) - ) - cmd = ['status-set', workload_state, message] - try: - ret = subprocess.call(cmd) - if ret == 0: - return - except OSError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'status-set failed: {} {}'.format(workload_state, - message) - log(log_message, level='INFO') - - -def status_get(): - """Retrieve the previously set juju workload state and message - - If the status-get command is not found then assume this is juju < 1.23 and - return 'unknown', "" - - """ - cmd = ['status-get', "--format=json", "--include-data"] - try: - raw_status = subprocess.check_output(cmd) - except OSError as e: - if e.errno == errno.ENOENT: - return ('unknown', "") - else: - raise - else: - status = json.loads(raw_status.decode("UTF-8")) - return (status["status"], status["message"]) - - -def translate_exc(from_exc, to_exc): - def inner_translate_exc1(f): - @wraps(f) - def inner_translate_exc2(*args, **kwargs): - try: - return f(*args, **kwargs) - except from_exc: - raise to_exc - - return inner_translate_exc2 - - return inner_translate_exc1 - - -def application_version_set(version): - """Charm authors may trigger this command from any hook to output what - version of the application is running. This could be a package version, - for instance postgres version 9.5. It could also be a build number or - version control revision identifier, for instance git sha 6fb7ba68. """ - - cmd = ['application-version-set'] - cmd.append(version) - try: - subprocess.check_call(cmd) - except OSError: - log("Application Version: {}".format(version)) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def goal_state(): - """Juju goal state values""" - cmd = ['goal-state', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def is_leader(): - """Does the current unit hold the juju leadership - - Uses juju to determine whether the current unit is the leader of its peers - """ - cmd = ['is-leader', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_get(attribute=None): - """Juju leader get value(s)""" - cmd = ['leader-get', '--format=json'] + [attribute or '-'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_set(settings=None, **kwargs): - """Juju leader set value(s)""" - # Don't log secrets. - # log("Juju leader-set '%s'" % (settings), level=DEBUG) - cmd = ['leader-set'] - settings = settings or {} - settings.update(kwargs) - for k, v in settings.items(): - if v is None: - cmd.append('{}='.format(k)) - else: - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_register(ptype, klass, pid): - """ is used while a hook is running to let Juju know that a - payload has been started.""" - cmd = ['payload-register'] - for x in [ptype, klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_unregister(klass, pid): - """ is used while a hook is running to let Juju know - that a payload has been manually stopped. The and provided - must match a payload that has been previously registered with juju using - payload-register.""" - cmd = ['payload-unregister'] - for x in [klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_status_set(klass, pid, status): - """is used to update the current status of a registered payload. - The and provided must match a payload that has been previously - registered with juju using payload-register. The must be one of the - follow: starting, started, stopping, stopped""" - cmd = ['payload-status-set'] - for x in [klass, pid, status]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def resource_get(name): - """used to fetch the resource path of the given name. - - must match a name of defined resource in metadata.yaml - - returns either a path or False if resource not available - """ - if not name: - return False - - cmd = ['resource-get', name] - try: - return subprocess.check_output(cmd).decode('UTF-8') - except subprocess.CalledProcessError: - return False - - -@cached -def juju_version(): - """Full version string (eg. '1.23.3.1-trusty-amd64')""" - # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 - jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] - return subprocess.check_output([jujud, 'version'], - universal_newlines=True).strip() - - -def has_juju_version(minimum_version): - """Return True if the Juju version is at least the provided version""" - return LooseVersion(juju_version()) >= LooseVersion(minimum_version) - - -_atexit = [] -_atstart = [] - - -def atstart(callback, *args, **kwargs): - '''Schedule a callback to run before the main hook. - - Callbacks are run in the order they were added. - - This is useful for modules and classes to perform initialization - and inject behavior. In particular: - - - Run common code before all of your hooks, such as logging - the hook name or interesting relation data. - - Defer object or module initialization that requires a hook - context until we know there actually is a hook context, - making testing easier. - - Rather than requiring charm authors to include boilerplate to - invoke your helper's behavior, have it run automatically if - your object is instantiated or module imported. - - This is not at all useful after your hook framework as been launched. - ''' - global _atstart - _atstart.append((callback, args, kwargs)) - - -def atexit(callback, *args, **kwargs): - '''Schedule a callback to run on successful hook completion. - - Callbacks are run in the reverse order that they were added.''' - _atexit.append((callback, args, kwargs)) - - -def _run_atstart(): - '''Hook frameworks must invoke this before running the main hook body.''' - global _atstart - for callback, args, kwargs in _atstart: - callback(*args, **kwargs) - del _atstart[:] - - -def _run_atexit(): - '''Hook frameworks must invoke this after the main hook body has - successfully completed. Do not invoke it if the hook fails.''' - global _atexit - for callback, args, kwargs in reversed(_atexit): - callback(*args, **kwargs) - del _atexit[:] - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def network_get_primary_address(binding): - ''' - Deprecated since Juju 2.3; use network_get() - - Retrieve the primary network address for a named binding - - :param binding: string. The name of a relation of extra-binding - :return: string. The primary IP address for the named binding - :raise: NotImplementedError if run on Juju < 2.0 - ''' - cmd = ['network-get', '--primary-address', binding] - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - if 'no network config found for binding' in e.output.decode('UTF-8'): - raise NoNetworkBinding("No network binding for {}" - .format(binding)) - else: - raise - return response - - -def network_get(endpoint, relation_id=None): - """ - Retrieve the network details for a relation endpoint - - :param endpoint: string. The name of a relation endpoint - :param relation_id: int. The ID of the relation for the current context. - :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if request not supported by the Juju version. - """ - if not has_juju_version('2.2'): - raise NotImplementedError(juju_version()) # earlier versions require --primary-address - if relation_id and not has_juju_version('2.3'): - raise NotImplementedError # 2.3 added the -r option - - cmd = ['network-get', endpoint, '--format', 'yaml'] - if relation_id: - cmd.append('-r') - cmd.append(relation_id) - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - return yaml.safe_load(response) - - -def add_metric(*args, **kwargs): - """Add metric values. Values may be expressed with keyword arguments. For - metric names containing dashes, these may be expressed as one or more - 'key=value' positional arguments. May only be called from the collect-metrics - hook.""" - _args = ['add-metric'] - _kvpairs = [] - _kvpairs.extend(args) - _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) - _args.extend(sorted(_kvpairs)) - try: - subprocess.check_call(_args) - return - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) - log(log_message, level='INFO') - - -def meter_status(): - """Get the meter status, if running in the meter-status-changed hook.""" - return os.environ.get('JUJU_METER_STATUS') - - -def meter_info(): - """Get the meter status information, if running in the meter-status-changed - hook.""" - return os.environ.get('JUJU_METER_INFO') - - -def iter_units_for_relation_name(relation_name): - """Iterate through all units in a relation - - Generator that iterates through all the units in a relation and yields - a named tuple with rid and unit field names. - - Usage: - data = [(u.rid, u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param relation_name: string relation name - :yield: Named Tuple with rid and unit field names - """ - RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') - for rid in relation_ids(relation_name): - for unit in related_units(rid): - yield RelatedUnit(rid, unit) - - -def ingress_address(rid=None, unit=None): - """ - Retrieve the ingress-address from a relation when available. - Otherwise, return the private-address. - - When used on the consuming side of the relation (unit is a remote - unit), the ingress-address is the IP address that this unit needs - to use to reach the provided service on the remote unit. - - When used on the providing side of the relation (unit == local_unit()), - the ingress-address is the IP address that is advertised to remote - units on this relation. Remote units need to use this address to - reach the local provided service on this unit. - - Note that charms may document some other method to use in - preference to the ingress_address(), such as an address provided - on a different relation attribute or a service discovery mechanism. - This allows charms to redirect inbound connections to their peers - or different applications such as load balancers. - - Usage: - addresses = [ingress_address(rid=u.rid, unit=u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param rid: string relation id - :param unit: string unit name - :side effect: calls relation_get - :return: string IP address - """ - settings = relation_get(rid=rid, unit=unit) - return (settings.get('ingress-address') or - settings.get('private-address')) - - -def egress_subnets(rid=None, unit=None): - """ - Retrieve the egress-subnets from a relation. - - This function is to be used on the providing side of the - relation, and provides the ranges of addresses that client - connections may come from. The result is uninteresting on - the consuming side of a relation (unit == local_unit()). - - Returns a stable list of subnets in CIDR format. - eg. ['192.168.1.0/24', '2001::F00F/128'] - - If egress-subnets is not available, falls back to using the published - ingress-address, or finally private-address. - - :param rid: string relation id - :param unit: string unit name - :side effect: calls relation_get - :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] - """ - def _to_range(addr): - if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: - addr += '/32' - elif ':' in addr and '/' not in addr: # IPv6 - addr += '/128' - return addr - - settings = relation_get(rid=rid, unit=unit) - if 'egress-subnets' in settings: - return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] - if 'ingress-address' in settings: - return [_to_range(settings['ingress-address'])] - if 'private-address' in settings: - return [_to_range(settings['private-address'])] - return [] # Should never happen - - -def unit_doomed(unit=None): - """Determines if the unit is being removed from the model - - Requires Juju 2.4.1. - - :param unit: string unit name, defaults to local_unit - :side effect: calls goal_state - :side effect: calls local_unit - :side effect: calls has_juju_version - :return: True if the unit is being removed, already gone, or never existed - """ - if not has_juju_version("2.4.1"): - # We cannot risk blindly returning False for 'we don't know', - # because that could cause data loss; if call sites don't - # need an accurate answer, they likely don't need this helper - # at all. - # goal-state existed in 2.4.0, but did not handle removals - # correctly until 2.4.1. - raise NotImplementedError("is_doomed") - if unit is None: - unit = local_unit() - gs = goal_state() - units = gs.get('units', {}) - if unit not in units: - return True - # I don't think 'dead' units ever show up in the goal-state, but - # check anyway in addition to 'dying'. - return units[unit]['status'] in ('dying', 'dead') diff --git a/ceph-proxy/tests/charmhelpers/core/host.py b/ceph-proxy/tests/charmhelpers/core/host.py deleted file mode 100644 index e9fd38a0..00000000 --- a/ceph-proxy/tests/charmhelpers/core/host.py +++ /dev/null @@ -1,1042 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tools for working with the host system""" -# Copyright 2012 Canonical Ltd. -# -# Authors: -# Nick Moffitt -# Matthew Wedgwood - -import os -import re -import pwd -import glob -import grp -import random -import string -import subprocess -import hashlib -import functools -import itertools -import six - -from contextlib import contextmanager -from collections import OrderedDict -from .hookenv import log, DEBUG, local_unit -from .fstab import Fstab -from charmhelpers.osplatform import get_platform - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.host_factory.ubuntu import ( - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.host_factory.centos import ( - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - ) # flake8: noqa -- ignore F401 for this import - -UPDATEDB_PATH = '/etc/updatedb.conf' - -def service_start(service_name, **kwargs): - """Start a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('start', service_name, **kwargs) - - -def service_stop(service_name, **kwargs): - """Stop a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('stop', service_name, **kwargs) - - -def service_restart(service_name, **kwargs): - """Restart a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be restarted. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_restart('ceph-osd', id=4) - - :param service_name: the name of the service to restart - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - return service('restart', service_name) - - -def service_reload(service_name, restart_on_failure=False, **kwargs): - """Reload a system service, optionally falling back to restart if - reload fails. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_reload('ceph-osd', id=4) - - :param service_name: the name of the service to reload - :param restart_on_failure: boolean indicating whether to fallback to a - restart if the reload fails. - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - service_result = service('reload', service_name, **kwargs) - if not service_result and restart_on_failure: - service_result = service('restart', service_name, **kwargs) - return service_result - - -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", - **kwargs): - """Pause a system service. - - Stop it, and prevent it from starting again at boot. - - :param service_name: the name of the service to pause - :param init_dir: path to the upstart init directory - :param initd_dir: path to the sysv init directory - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems which do not support - key=value arguments via the commandline. - """ - stopped = True - if service_running(service_name, **kwargs): - stopped = service_stop(service_name, **kwargs) - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): - service('disable', service_name) - service('mask', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "disable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - return stopped - - -def service_resume(service_name, init_dir="/etc/init", - initd_dir="/etc/init.d", **kwargs): - """Resume a system service. - - Reenable starting again at boot. Start the service. - - :param service_name: the name of the service to resume - :param init_dir: the path to the init dir - :param initd dir: the path to the initd dir - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): - service('unmask', service_name) - service('enable', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "enable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - started = service_running(service_name, **kwargs) - - if not started: - started = service_start(service_name, **kwargs) - return started - - -def service(action, service_name, **kwargs): - """Control a system service. - - :param action: the action to take on the service - :param service_name: the name of the service to perform th action on - :param **kwargs: additional params to be passed to the service command in - the form of key=value. - """ - if init_is_systemd(): - cmd = ['systemctl', action, service_name] - else: - cmd = ['service', service_name, action] - for key, value in six.iteritems(kwargs): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - return subprocess.call(cmd) == 0 - - -_UPSTART_CONF = "/etc/init/{}.conf" -_INIT_D_CONF = "/etc/init.d/{}" - - -def service_running(service_name, **kwargs): - """Determine whether a system service is running. - - :param service_name: the name of the service - :param **kwargs: additional args to pass to the service command. This is - used to pass additional key=value arguments to the - service command line for managing specific instance - units (e.g. service ceph-osd status id=2). The kwargs - are ignored in systemd services. - """ - if init_is_systemd(): - return service('is-active', service_name) - else: - if os.path.exists(_UPSTART_CONF.format(service_name)): - try: - cmd = ['status', service_name] - for key, value in six.iteritems(kwargs): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False - else: - # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running - # 'start/running' - if ("start/running" in output or - "is running" in output or - "up and running" in output): - return True - elif os.path.exists(_INIT_D_CONF.format(service_name)): - # Check System V scripts init script return codes - return service('status', service_name) - return False - - -SYSTEMD_SYSTEM = '/run/systemd/system' - - -def init_is_systemd(): - """Return True if the host system uses systemd, False otherwise.""" - if lsb_release()['DISTRIB_CODENAME'] == 'trusty': - return False - return os.path.isdir(SYSTEMD_SYSTEM) - - -def adduser(username, password=None, shell='/bin/bash', - system_user=False, primary_group=None, - secondary_groups=None, uid=None, home_dir=None): - """Add a user to the system. - - Will log but otherwise succeed if the user already exists. - - :param str username: Username to create - :param str password: Password for user; if ``None``, create a system user - :param str shell: The default shell for the user - :param bool system_user: Whether to create a login or system user - :param str primary_group: Primary group for user; defaults to username - :param list secondary_groups: Optional list of additional groups - :param int uid: UID for user being created - :param str home_dir: Home directory for user - - :returns: The password database entry struct, as returned by `pwd.getpwnam` - """ - try: - user_info = pwd.getpwnam(username) - log('user {0} already exists!'.format(username)) - if uid: - user_info = pwd.getpwuid(int(uid)) - log('user with uid {0} already exists!'.format(uid)) - except KeyError: - log('creating user {0}'.format(username)) - cmd = ['useradd'] - if uid: - cmd.extend(['--uid', str(uid)]) - if home_dir: - cmd.extend(['--home', str(home_dir)]) - if system_user or password is None: - cmd.append('--system') - else: - cmd.extend([ - '--create-home', - '--shell', shell, - '--password', password, - ]) - if not primary_group: - try: - grp.getgrnam(username) - primary_group = username # avoid "group exists" error - except KeyError: - pass - if primary_group: - cmd.extend(['-g', primary_group]) - if secondary_groups: - cmd.extend(['-G', ','.join(secondary_groups)]) - cmd.append(username) - subprocess.check_call(cmd) - user_info = pwd.getpwnam(username) - return user_info - - -def user_exists(username): - """Check if a user exists""" - try: - pwd.getpwnam(username) - user_exists = True - except KeyError: - user_exists = False - return user_exists - - -def uid_exists(uid): - """Check if a uid exists""" - try: - pwd.getpwuid(uid) - uid_exists = True - except KeyError: - uid_exists = False - return uid_exists - - -def group_exists(groupname): - """Check if a group exists""" - try: - grp.getgrnam(groupname) - group_exists = True - except KeyError: - group_exists = False - return group_exists - - -def gid_exists(gid): - """Check if a gid exists""" - try: - grp.getgrgid(gid) - gid_exists = True - except KeyError: - gid_exists = False - return gid_exists - - -def add_group(group_name, system_group=False, gid=None): - """Add a group to the system - - Will log but otherwise succeed if the group already exists. - - :param str group_name: group to create - :param bool system_group: Create system group - :param int gid: GID for user being created - - :returns: The password database entry struct, as returned by `grp.getgrnam` - """ - try: - group_info = grp.getgrnam(group_name) - log('group {0} already exists!'.format(group_name)) - if gid: - group_info = grp.getgrgid(gid) - log('group with gid {0} already exists!'.format(gid)) - except KeyError: - log('creating group {0}'.format(group_name)) - add_new_group(group_name, system_group, gid) - group_info = grp.getgrnam(group_name) - return group_info - - -def add_user_to_group(username, group): - """Add a user to a group""" - cmd = ['gpasswd', '-a', username, group] - log("Adding user {} to group {}".format(username, group)) - subprocess.check_call(cmd) - - -def chage(username, lastday=None, expiredate=None, inactive=None, - mindays=None, maxdays=None, root=None, warndays=None): - """Change user password expiry information - - :param str username: User to update - :param str lastday: Set when password was changed in YYYY-MM-DD format - :param str expiredate: Set when user's account will no longer be - accessible in YYYY-MM-DD format. - -1 will remove an account expiration date. - :param str inactive: Set the number of days of inactivity after a password - has expired before the account is locked. - -1 will remove an account's inactivity. - :param str mindays: Set the minimum number of days between password - changes to MIN_DAYS. - 0 indicates the password can be changed anytime. - :param str maxdays: Set the maximum number of days during which a - password is valid. - -1 as MAX_DAYS will remove checking maxdays - :param str root: Apply changes in the CHROOT_DIR directory - :param str warndays: Set the number of days of warning before a password - change is required - :raises subprocess.CalledProcessError: if call to chage fails - """ - cmd = ['chage'] - if root: - cmd.extend(['--root', root]) - if lastday: - cmd.extend(['--lastday', lastday]) - if expiredate: - cmd.extend(['--expiredate', expiredate]) - if inactive: - cmd.extend(['--inactive', inactive]) - if mindays: - cmd.extend(['--mindays', mindays]) - if maxdays: - cmd.extend(['--maxdays', maxdays]) - if warndays: - cmd.extend(['--warndays', warndays]) - cmd.append(username) - subprocess.check_call(cmd) - -remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') - -def rsync(from_path, to_path, flags='-r', options=None, timeout=None): - """Replicate the contents of a path""" - options = options or ['--delete', '--executability'] - cmd = ['/usr/bin/rsync', flags] - if timeout: - cmd = ['timeout', str(timeout)] + cmd - cmd.extend(options) - cmd.append(from_path) - cmd.append(to_path) - log(" ".join(cmd)) - return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() - - -def symlink(source, destination): - """Create a symbolic link""" - log("Symlinking {} as {}".format(source, destination)) - cmd = [ - 'ln', - '-sf', - source, - destination, - ] - subprocess.check_call(cmd) - - -def mkdir(path, owner='root', group='root', perms=0o555, force=False): - """Create a directory""" - log("Making dir {} {}:{} {:o}".format(path, owner, group, - perms)) - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - realpath = os.path.abspath(path) - path_exists = os.path.exists(realpath) - if path_exists and force: - if not os.path.isdir(realpath): - log("Removing non-directory file {} prior to mkdir()".format(path)) - os.unlink(realpath) - os.makedirs(realpath, perms) - elif not path_exists: - os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) - os.chmod(realpath, perms) - - -def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a byte string.""" - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - # lets see if we can grab the file and compare the context, to avoid doing - # a write. - existing_content = None - existing_uid, existing_gid = None, None - try: - with open(path, 'rb') as target: - existing_content = target.read() - stat = os.stat(path) - existing_uid, existing_gid = stat.st_uid, stat.st_gid - except: - pass - if content != existing_content: - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), - level=DEBUG) - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - if six.PY3 and isinstance(content, six.string_types): - content = content.encode('UTF-8') - target.write(content) - return - # the contents were the same, but we might still need to change the - # ownership. - if existing_uid != uid: - log("Changing uid on already existing content: {} -> {}" - .format(existing_uid, uid), level=DEBUG) - os.chown(path, uid, -1) - if existing_gid != gid: - log("Changing gid on already existing content: {} -> {}" - .format(existing_gid, gid), level=DEBUG) - os.chown(path, -1, gid) - - -def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab""" - return Fstab.remove_by_mountpoint(mp) - - -def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file""" - return Fstab.add(dev, mp, fs, options=options) - - -def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): - """Mount a filesystem at a particular mountpoint""" - cmd_args = ['mount'] - if options is not None: - cmd_args.extend(['-o', options]) - cmd_args.extend([device, mountpoint]) - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) - return False - - if persist: - return fstab_add(device, mountpoint, filesystem, options=options) - return True - - -def umount(mountpoint, persist=False): - """Unmount a filesystem""" - cmd_args = ['umount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - - if persist: - return fstab_remove(mountpoint) - return True - - -def mounts(): - """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" - with open('/proc/mounts') as f: - # [['/mount/point','/dev/path'],[...]] - system_mounts = [m[1::-1] for m in [l.strip().split() - for l in f.readlines()]] - return system_mounts - - -def fstab_mount(mountpoint): - """Mount filesystem using fstab""" - cmd_args = ['mount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - return True - - -def file_hash(path, hash_type='md5'): - """Generate a hash checksum of the contents of 'path' or None if not found. - - :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - """ - if os.path.exists(path): - h = getattr(hashlib, hash_type)() - with open(path, 'rb') as source: - h.update(source.read()) - return h.hexdigest() - else: - return None - - -def path_hash(path): - """Generate a hash checksum of all files matching 'path'. Standard - wildcards like '*' and '?' are supported, see documentation for the 'glob' - module for more information. - - :return: dict: A { filename: hash } dictionary for all matched files. - Empty if none found. - """ - return { - filename: file_hash(filename) - for filename in glob.iglob(path) - } - - -def check_hash(path, checksum, hash_type='md5'): - """Validate a file using a cryptographic checksum. - - :param str checksum: Value of the checksum used to validate the file. - :param str hash_type: Hash algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - :raises ChecksumError: If the file fails the checksum - - """ - actual_checksum = file_hash(path, hash_type) - if checksum != actual_checksum: - raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) - - -class ChecksumError(ValueError): - """A class derived from Value error to indicate the checksum failed.""" - pass - - -def restart_on_change(restart_map, stopstart=False, restart_functions=None): - """Restart services based on configuration files changing - - This function is used a decorator, for example:: - - @restart_on_change({ - '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] - '/etc/apache/sites-enabled/*': [ 'apache2' ] - }) - def config_changed(): - pass # your code here - - In this example, the cinder-api and cinder-volume services - would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. The apache2 service would be - restarted if any file matching the pattern got changed, created - or removed. Standard wildcards are supported, see documentation - for the 'glob' module for more information. - - @param restart_map: {path_file_name: [service_name, ...] - @param stopstart: DEFAULT false; whether to stop, start OR restart - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result from decorated function - """ - def wrap(f): - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) - return wrapped_f - return wrap - - -def restart_on_change_helper(lambda_f, restart_map, stopstart=False, - restart_functions=None): - """Helper function to perform the restart_on_change function. - - This is provided for decorators to restart services if files described - in the restart_map have changed after an invocation of lambda_f(). - - @param lambda_f: function to call. - @param restart_map: {file: [service, ...]} - @param stopstart: whether to stop, start or restart a service - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result of lambda_f() - """ - if restart_functions is None: - restart_functions = {} - checksums = {path: path_hash(path) for path in restart_map} - r = lambda_f() - # create a list of lists of the services to restart - restarts = [restart_map[path] - for path in restart_map - if path_hash(path) != checksums[path]] - # create a flat list of ordered services without duplicates from lists - services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) - if services_list: - actions = ('stop', 'start') if stopstart else ('restart',) - for service_name in services_list: - if service_name in restart_functions: - restart_functions[service_name](service_name) - else: - for action in actions: - service(action, service_name) - return r - - -def pwgen(length=None): - """Generate a random pasword.""" - if length is None: - # A random length is ok to use a weak PRNG - length = random.choice(range(35, 45)) - alphanumeric_chars = [ - l for l in (string.ascii_letters + string.digits) - if l not in 'l0QD1vAEIOUaeiou'] - # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the - # actual password - random_generator = random.SystemRandom() - random_chars = [ - random_generator.choice(alphanumeric_chars) for _ in range(length)] - return(''.join(random_chars)) - - -def is_phy_iface(interface): - """Returns True if interface is not virtual, otherwise False.""" - if interface: - sys_net = '/sys/class/net' - if os.path.isdir(sys_net): - for iface in glob.glob(os.path.join(sys_net, '*')): - if '/virtual/' in os.path.realpath(iface): - continue - - if interface == os.path.basename(iface): - return True - - return False - - -def get_bond_master(interface): - """Returns bond master if interface is bond slave otherwise None. - - NOTE: the provided interface is expected to be physical - """ - if interface: - iface_path = '/sys/class/net/%s' % (interface) - if os.path.exists(iface_path): - if '/virtual/' in os.path.realpath(iface_path): - return None - - master = os.path.join(iface_path, 'master') - if os.path.exists(master): - master = os.path.realpath(master) - # make sure it is a bond master - if os.path.exists(os.path.join(master, 'bonding')): - return os.path.basename(master) - - return None - - -def list_nics(nic_type=None): - """Return a list of nics of given type(s)""" - if isinstance(nic_type, six.string_types): - int_types = [nic_type] - else: - int_types = nic_type - - interfaces = [] - if nic_type: - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).decode('UTF-8') - ip_output = ip_output.split('\n') - ip_output = (line for line in ip_output if line) - for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + - r'[0-9]+\.[0-9]+)@.*', line) - if matched: - iface = matched.groups()[0] - else: - iface = line.split()[1].replace(":", "") - - if iface not in interfaces: - interfaces.append(iface) - else: - cmd = ['ip', 'a'] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line.strip() for line in ip_output if line) - - key = re.compile('^[0-9]+:\s+(.+):') - for line in ip_output: - matched = re.search(key, line) - if matched: - iface = matched.group(1) - iface = iface.partition("@")[0] - if iface not in interfaces: - interfaces.append(iface) - - return interfaces - - -def set_nic_mtu(nic, mtu): - """Set the Maximum Transmission Unit (MTU) on a network interface.""" - cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] - subprocess.check_call(cmd) - - -def get_nic_mtu(nic): - """Return the Maximum Transmission Unit (MTU) for a network interface.""" - cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - mtu = "" - for line in ip_output: - words = line.split() - if 'mtu' in words: - mtu = words[words.index("mtu") + 1] - return mtu - - -def get_nic_hwaddr(nic): - """Return the Media Access Control (MAC) for a network interface.""" - cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8') - hwaddr = "" - words = ip_output.split() - if 'link/ether' in words: - hwaddr = words[words.index('link/ether') + 1] - return hwaddr - - -@contextmanager -def chdir(directory): - """Change the current working directory to a different directory for a code - block and return the previous directory after the block exits. Useful to - run commands from a specificed directory. - - :param str directory: The directory path to change to for this context. - """ - cur = os.getcwd() - try: - yield os.chdir(directory) - finally: - os.chdir(cur) - - -def chownr(path, owner, group, follow_links=True, chowntopdir=False): - """Recursively change user and group ownership of files and directories - in given path. Doesn't chown path itself by default, only its children. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - :param bool follow_links: Also follow and chown links if True - :param bool chowntopdir: Also chown path itself if True - """ - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - if follow_links: - chown = os.chown - else: - chown = os.lchown - - if chowntopdir: - broken_symlink = os.path.lexists(path) and not os.path.exists(path) - if not broken_symlink: - chown(path, uid, gid) - for root, dirs, files in os.walk(path, followlinks=follow_links): - for name in dirs + files: - full = os.path.join(root, name) - broken_symlink = os.path.lexists(full) and not os.path.exists(full) - if not broken_symlink: - chown(full, uid, gid) - - -def lchownr(path, owner, group): - """Recursively change user and group ownership of files and directories - in a given path, not following symbolic links. See the documentation for - 'os.lchown' for more information. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - """ - chownr(path, owner, group, follow_links=False) - - -def owner(path): - """Returns a tuple containing the username & groupname owning the path. - - :param str path: the string path to retrieve the ownership - :return tuple(str, str): A (username, groupname) tuple containing the - name of the user and group owning the path. - :raises OSError: if the specified path does not exist - """ - stat = os.stat(path) - username = pwd.getpwuid(stat.st_uid)[0] - groupname = grp.getgrgid(stat.st_gid)[0] - return username, groupname - - -def get_total_ram(): - """The total amount of system RAM in bytes. - - This is what is reported by the OS, and may be overcommitted when - there are multiple containers hosted on the same machine. - """ - with open('/proc/meminfo', 'r') as f: - for line in f.readlines(): - if line: - key, value, unit = line.split() - if key == 'MemTotal:': - assert unit == 'kB', 'Unknown unit' - return int(value) * 1024 # Classic, not KiB. - raise NotImplementedError() - - -UPSTART_CONTAINER_TYPE = '/run/container_type' - - -def is_container(): - """Determine whether unit is running in a container - - @return: boolean indicating if unit is in a container - """ - if init_is_systemd(): - # Detect using systemd-detect-virt - return subprocess.call(['systemd-detect-virt', - '--container']) == 0 - else: - # Detect using upstart container file marker - return os.path.exists(UPSTART_CONTAINER_TYPE) - - -def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): - """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. - - This method has no effect if the path specified by updatedb_path does not - exist or is not a file. - - @param path: string the path to add to the updatedb.conf PRUNEPATHS value - @param updatedb_path: the path the updatedb.conf file - """ - if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): - # If the updatedb.conf file doesn't exist then don't attempt to update - # the file as the package providing mlocate may not be installed on - # the local system - return - - with open(updatedb_path, 'r+') as f_id: - updatedb_text = f_id.read() - output = updatedb(updatedb_text, path) - f_id.seek(0) - f_id.write(output) - f_id.truncate() - - -def updatedb(updatedb_text, new_path): - lines = [line for line in updatedb_text.split("\n")] - for i, line in enumerate(lines): - if line.startswith("PRUNEPATHS="): - paths_line = line.split("=")[1].replace('"', '') - paths = paths_line.split(" ") - if new_path not in paths: - paths.append(new_path) - lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) - output = "\n".join(lines) - return output - - -def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): - """ Modulo distribution - - This helper uses the unit number, a modulo value and a constant wait time - to produce a calculated wait time distribution. This is useful in large - scale deployments to distribute load during an expensive operation such as - service restarts. - - If you have 1000 nodes that need to restart 100 at a time 1 minute at a - time: - - time.wait(modulo_distribution(modulo=100, wait=60)) - restart() - - If you need restarts to happen serially set modulo to the exact number of - nodes and set a high constant wait time: - - time.wait(modulo_distribution(modulo=10, wait=120)) - restart() - - @param modulo: int The modulo number creates the group distribution - @param wait: int The constant time wait value - @param non_zero_wait: boolean Override unit % modulo == 0, - return modulo * wait. Used to avoid collisions with - leader nodes which are often given priority. - @return: int Calculated time to wait for unit operation - """ - unit_number = int(local_unit().split('/')[1]) - calculated_wait_time = (unit_number % modulo) * wait - if non_zero_wait and calculated_wait_time == 0: - return modulo * wait - else: - return calculated_wait_time diff --git a/ceph-proxy/tests/charmhelpers/core/host_factory/__init__.py b/ceph-proxy/tests/charmhelpers/core/host_factory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-proxy/tests/charmhelpers/core/host_factory/centos.py b/ceph-proxy/tests/charmhelpers/core/host_factory/centos.py deleted file mode 100644 index 7781a396..00000000 --- a/ceph-proxy/tests/charmhelpers/core/host_factory/centos.py +++ /dev/null @@ -1,72 +0,0 @@ -import subprocess -import yum -import os - -from charmhelpers.core.strutils import BasicStringComparator - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Host releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - - def __init__(self, item): - raise NotImplementedError( - "CompareHostReleases() is not implemented for CentOS") - - -def service_available(service_name): - # """Determine whether a system service is available.""" - if os.path.isdir('/run/systemd/system'): - cmd = ['systemctl', 'is-enabled', service_name] - else: - cmd = ['service', service_name, 'is-enabled'] - return subprocess.call(cmd) == 0 - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['groupadd'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('-r') - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/os-release in a dict.""" - d = {} - with open('/etc/os-release', 'r') as lsb: - for l in lsb: - s = l.split('=') - if len(s) != 2: - continue - d[s[0].strip()] = s[1].strip() - return d - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports YumBase function if the pkgcache argument - is None. - """ - if not pkgcache: - y = yum.YumBase() - packages = y.doPackageLists() - pkgcache = {i.Name: i.version for i in packages['installed']} - pkg = pkgcache[package] - if pkg > revno: - return 1 - if pkg < revno: - return -1 - return 0 diff --git a/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py deleted file mode 100644 index a6d375af..00000000 --- a/ceph-proxy/tests/charmhelpers/core/host_factory/ubuntu.py +++ /dev/null @@ -1,91 +0,0 @@ -import subprocess - -from charmhelpers.core.strutils import BasicStringComparator - - -UBUNTU_RELEASES = ( - 'lucid', - 'maverick', - 'natty', - 'oneiric', - 'precise', - 'quantal', - 'raring', - 'saucy', - 'trusty', - 'utopic', - 'vivid', - 'wily', - 'xenial', - 'yakkety', - 'zesty', - 'artful', - 'bionic', - 'cosmic', -) - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Ubuntu releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - _list = UBUNTU_RELEASES - - -def service_available(service_name): - """Determine whether a system service is available""" - try: - subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError as e: - return b'unrecognized service' not in e.output - else: - return True - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['addgroup'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('--system') - else: - cmd.extend([ - '--group', - ]) - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/lsb-release in a dict""" - d = {} - with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports apt_cache function from charmhelpers.fetch if - the pkgcache argument is None. Be sure to add charmhelpers.fetch if - you call this function, or pass an apt_pkg.Cache() instance. - """ - import apt_pkg - if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/ceph-proxy/tests/charmhelpers/core/hugepage.py b/ceph-proxy/tests/charmhelpers/core/hugepage.py deleted file mode 100644 index 54b5b5e2..00000000 --- a/ceph-proxy/tests/charmhelpers/core/hugepage.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml -from charmhelpers.core import fstab -from charmhelpers.core import sysctl -from charmhelpers.core.host import ( - add_group, - add_user_to_group, - fstab_mount, - mkdir, -) -from charmhelpers.core.strutils import bytes_from_string -from subprocess import check_output - - -def hugepage_support(user, group='hugetlb', nr_hugepages=256, - max_map_count=65536, mnt_point='/run/hugepages/kvm', - pagesize='2MB', mount=True, set_shmmax=False): - """Enable hugepages on system. - - Args: - user (str) -- Username to allow access to hugepages to - group (str) -- Group name to own hugepages - nr_hugepages (int) -- Number of pages to reserve - max_map_count (int) -- Number of Virtual Memory Areas a process can own - mnt_point (str) -- Directory to mount hugepages on - pagesize (str) -- Size of hugepages - mount (bool) -- Whether to Mount hugepages - """ - group_info = add_group(group) - gid = group_info.gr_gid - add_user_to_group(user, group) - if max_map_count < 2 * nr_hugepages: - max_map_count = 2 * nr_hugepages - sysctl_settings = { - 'vm.nr_hugepages': nr_hugepages, - 'vm.max_map_count': max_map_count, - 'vm.hugetlb_shm_group': gid, - } - if set_shmmax: - shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) - shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages - if shmmax_minsize > shmmax_current: - sysctl_settings['kernel.shmmax'] = shmmax_minsize - sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') - mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) - lfstab = fstab.Fstab() - fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) - if fstab_entry: - lfstab.remove_entry(fstab_entry) - entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', - 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) - lfstab.add_entry(entry) - if mount: - fstab_mount(mnt_point) diff --git a/ceph-proxy/tests/charmhelpers/core/kernel.py b/ceph-proxy/tests/charmhelpers/core/kernel.py deleted file mode 100644 index 2d404528..00000000 --- a/ceph-proxy/tests/charmhelpers/core/kernel.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import subprocess - -from charmhelpers.osplatform import get_platform -from charmhelpers.core.hookenv import ( - log, - INFO -) - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.kernel_factory.ubuntu import ( - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.kernel_factory.centos import ( - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import - -__author__ = "Jorge Niedbalski " - - -def modprobe(module, persist=True): - """Load a kernel module and configure for auto-load on reboot.""" - cmd = ['modprobe', module] - - log('Loading kernel module %s' % module, level=INFO) - - subprocess.check_call(cmd) - if persist: - persistent_modprobe(module) - - -def rmmod(module, force=False): - """Remove a module from the linux kernel""" - cmd = ['rmmod'] - if force: - cmd.append('-f') - cmd.append(module) - log('Removing kernel module %s' % module, level=INFO) - return subprocess.check_call(cmd) - - -def lsmod(): - """Shows what kernel modules are currently loaded""" - return subprocess.check_output(['lsmod'], - universal_newlines=True) - - -def is_module_loaded(module): - """Checks if a kernel module is already loaded""" - matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) - return len(matches) > 0 diff --git a/ceph-proxy/tests/charmhelpers/core/kernel_factory/__init__.py b/ceph-proxy/tests/charmhelpers/core/kernel_factory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-proxy/tests/charmhelpers/core/kernel_factory/centos.py b/ceph-proxy/tests/charmhelpers/core/kernel_factory/centos.py deleted file mode 100644 index 1c402c11..00000000 --- a/ceph-proxy/tests/charmhelpers/core/kernel_factory/centos.py +++ /dev/null @@ -1,17 +0,0 @@ -import subprocess -import os - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - if not os.path.exists('/etc/rc.modules'): - open('/etc/rc.modules', 'a') - os.chmod('/etc/rc.modules', 111) - with open('/etc/rc.modules', 'r+') as modules: - if module not in modules.read(): - modules.write('modprobe %s\n' % module) - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["dracut", "-f", version]) diff --git a/ceph-proxy/tests/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-proxy/tests/charmhelpers/core/kernel_factory/ubuntu.py deleted file mode 100644 index 3de372fd..00000000 --- a/ceph-proxy/tests/charmhelpers/core/kernel_factory/ubuntu.py +++ /dev/null @@ -1,13 +0,0 @@ -import subprocess - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module + "\n") - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-proxy/tests/charmhelpers/core/services/__init__.py b/ceph-proxy/tests/charmhelpers/core/services/__init__.py deleted file mode 100644 index 61fd074e..00000000 --- a/ceph-proxy/tests/charmhelpers/core/services/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .base import * # NOQA -from .helpers import * # NOQA diff --git a/ceph-proxy/tests/charmhelpers/core/services/base.py b/ceph-proxy/tests/charmhelpers/core/services/base.py deleted file mode 100644 index 179ad4f0..00000000 --- a/ceph-proxy/tests/charmhelpers/core/services/base.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import json -from inspect import getargspec -from collections import Iterable, OrderedDict - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -__all__ = ['ServiceManager', 'ManagerCallback', - 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', - 'service_restart', 'service_stop'] - - -class ServiceManager(object): - def __init__(self, services=None): - """ - Register a list of services, given their definitions. - - Service definitions are dicts in the following formats (all keys except - 'service' are optional):: - - { - "service": , - "required_data": , - "provided_data": , - "data_ready": , - "data_lost": , - "start": , - "stop": , - "ports": , - } - - The 'required_data' list should contain dicts of required data (or - dependency managers that act like dicts and know how to collect the data). - Only when all items in the 'required_data' list are populated are the list - of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more - information. - - The 'provided_data' list should contain relation data providers, most likely - a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, - that will indicate a set of data to set on a given relation. - - The 'data_ready' value should be either a single callback, or a list of - callbacks, to be called when all items in 'required_data' pass `is_ready()`. - Each callback will be called with the service name as the only parameter. - After all of the 'data_ready' callbacks are called, the 'start' callbacks - are fired. - - The 'data_lost' value should be either a single callback, or a list of - callbacks, to be called when a 'required_data' item no longer passes - `is_ready()`. Each callback will be called with the service name as the - only parameter. After all of the 'data_lost' callbacks are called, - the 'stop' callbacks are fired. - - The 'start' value should be either a single callback, or a list of - callbacks, to be called when starting the service, after the 'data_ready' - callbacks are complete. Each callback will be called with the service - name as the only parameter. This defaults to - `[host.service_start, services.open_ports]`. - - The 'stop' value should be either a single callback, or a list of - callbacks, to be called when stopping the service. If the service is - being stopped because it no longer has all of its 'required_data', this - will be called after all of the 'data_lost' callbacks are complete. - Each callback will be called with the service name as the only parameter. - This defaults to `[services.close_ports, host.service_stop]`. - - The 'ports' value should be a list of ports to manage. The default - 'start' handler will open the ports after the service is started, - and the default 'stop' handler will close the ports prior to stopping - the service. - - - Examples: - - The following registers an Upstart service called bingod that depends on - a mongodb relation and which runs a custom `db_migrate` function prior to - restarting the service, and a Runit service called spadesd:: - - manager = services.ServiceManager([ - { - 'service': 'bingod', - 'ports': [80, 443], - 'required_data': [MongoRelation(), config(), {'my': 'data'}], - 'data_ready': [ - services.template(source='bingod.conf'), - services.template(source='bingod.ini', - target='/etc/bingod.ini', - owner='bingo', perms=0400), - ], - }, - { - 'service': 'spadesd', - 'data_ready': services.template(source='spadesd_run.j2', - target='/etc/sv/spadesd/run', - perms=0555), - 'start': runit_start, - 'stop': runit_stop, - }, - ]) - manager.manage() - """ - self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') - self._ready = None - self.services = OrderedDict() - for service in services or []: - service_name = service['service'] - self.services[service_name] = service - - def manage(self): - """ - Handle the current hook by doing The Right Thing with the registered services. - """ - hookenv._run_atstart() - try: - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.reconfigure_services() - self.provide_data() - except SystemExit as x: - if x.code is None or x.code == 0: - hookenv._run_atexit() - hookenv._run_atexit() - - def provide_data(self): - """ - Set the relation data for each provider in the ``provided_data`` list. - - A provider must have a `name` attribute, which indicates which relation - to set data on, and a `provide_data()` method, which returns a dict of - data to set. - - The `provide_data()` method can optionally accept two parameters: - - * ``remote_service`` The name of the remote service that the data will - be provided to. The `provide_data()` method will be called once - for each connected service (not unit). This allows the method to - tailor its data to the given service. - * ``service_ready`` Whether or not the service definition had all of - its requirements met, and thus the ``data_ready`` callbacks run. - - Note that the ``provided_data`` methods are now called **after** the - ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks - a chance to generate any data necessary for the providing to the remote - services. - """ - for service_name, service in self.services.items(): - service_ready = self.is_ready(service_name) - for provider in service.get('provided_data', []): - for relid in hookenv.relation_ids(provider.name): - units = hookenv.related_units(relid) - if not units: - continue - remote_service = units[0].split('/')[0] - argspec = getargspec(provider.provide_data) - if len(argspec.args) > 1: - data = provider.provide_data(remote_service, service_ready) - else: - data = provider.provide_data() - if data: - hookenv.relation_set(relid, data) - - def reconfigure_services(self, *service_names): - """ - Update all files for one or more registered services, and, - if ready, optionally restart them. - - If no service names are given, reconfigures all registered services. - """ - for service_name in service_names or self.services.keys(): - if self.is_ready(service_name): - self.fire_event('data_ready', service_name) - self.fire_event('start', service_name, default=[ - service_restart, - manage_ports]) - self.save_ready(service_name) - else: - if self.was_ready(service_name): - self.fire_event('data_lost', service_name) - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - self.save_lost(service_name) - - def stop_services(self, *service_names): - """ - Stop one or more registered services, by name. - - If no service names are given, stops all registered services. - """ - for service_name in service_names or self.services.keys(): - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - - def get_service(self, service_name): - """ - Given the name of a registered service, return its service definition. - """ - service = self.services.get(service_name) - if not service: - raise KeyError('Service not registered: %s' % service_name) - return service - - def fire_event(self, event_name, service_name, default=None): - """ - Fire a data_ready, data_lost, start, or stop event on a given service. - """ - service = self.get_service(service_name) - callbacks = service.get(event_name, default) - if not callbacks: - return - if not isinstance(callbacks, Iterable): - callbacks = [callbacks] - for callback in callbacks: - if isinstance(callback, ManagerCallback): - callback(self, service_name, event_name) - else: - callback(service_name) - - def is_ready(self, service_name): - """ - Determine if a registered service is ready, by checking its 'required_data'. - - A 'required_data' item can be any mapping type, and is considered ready - if `bool(item)` evaluates as True. - """ - service = self.get_service(service_name) - reqs = service.get('required_data', []) - return all(bool(req) for req in reqs) - - def _load_ready_file(self): - if self._ready is not None: - return - if os.path.exists(self._ready_file): - with open(self._ready_file) as fp: - self._ready = set(json.load(fp)) - else: - self._ready = set() - - def _save_ready_file(self): - if self._ready is None: - return - with open(self._ready_file, 'w') as fp: - json.dump(list(self._ready), fp) - - def save_ready(self, service_name): - """ - Save an indicator that the given service is now data_ready. - """ - self._load_ready_file() - self._ready.add(service_name) - self._save_ready_file() - - def save_lost(self, service_name): - """ - Save an indicator that the given service is no longer data_ready. - """ - self._load_ready_file() - self._ready.discard(service_name) - self._save_ready_file() - - def was_ready(self, service_name): - """ - Determine if the given service was previously data_ready. - """ - self._load_ready_file() - return service_name in self._ready - - -class ManagerCallback(object): - """ - Special case of a callback that takes the `ServiceManager` instance - in addition to the service name. - - Subclasses should implement `__call__` which should accept three parameters: - - * `manager` The `ServiceManager` instance - * `service_name` The name of the service it's being triggered for - * `event_name` The name of the event that this callback is handling - """ - def __call__(self, manager, service_name, event_name): - raise NotImplementedError() - - -class PortManagerCallback(ManagerCallback): - """ - Callback class that will open or close ports, for use as either - a start or stop action. - """ - def __call__(self, manager, service_name, event_name): - service = manager.get_service(service_name) - # turn this generator into a list, - # as we'll be going over it multiple times - new_ports = list(service.get('ports', [])) - port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) - if os.path.exists(port_file): - with open(port_file) as fp: - old_ports = fp.read().split(',') - for old_port in old_ports: - if bool(old_port) and not self.ports_contains(old_port, new_ports): - hookenv.close_port(old_port) - with open(port_file, 'w') as fp: - fp.write(','.join(str(port) for port in new_ports)) - for port in new_ports: - # A port is either a number or 'ICMP' - protocol = 'TCP' - if str(port).upper() == 'ICMP': - protocol = 'ICMP' - if event_name == 'start': - hookenv.open_port(port, protocol) - elif event_name == 'stop': - hookenv.close_port(port, protocol) - - def ports_contains(self, port, ports): - if not bool(port): - return False - if str(port).upper() != 'ICMP': - port = int(port) - return port in ports - - -def service_stop(service_name): - """ - Wrapper around host.service_stop to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_running(service_name): - host.service_stop(service_name) - - -def service_restart(service_name): - """ - Wrapper around host.service_restart to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_available(service_name): - if host.service_running(service_name): - host.service_restart(service_name) - else: - host.service_start(service_name) - - -# Convenience aliases -open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-proxy/tests/charmhelpers/core/services/helpers.py b/ceph-proxy/tests/charmhelpers/core/services/helpers.py deleted file mode 100644 index 3e6e30d2..00000000 --- a/ceph-proxy/tests/charmhelpers/core/services/helpers.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import yaml - -from charmhelpers.core import hookenv -from charmhelpers.core import host -from charmhelpers.core import templating - -from charmhelpers.core.services.base import ManagerCallback - - -__all__ = ['RelationContext', 'TemplateCallback', - 'render_template', 'template'] - - -class RelationContext(dict): - """ - Base class for a context generator that gets relation data from juju. - - Subclasses must provide the attributes `name`, which is the name of the - interface of interest, `interface`, which is the type of the interface of - interest, and `required_keys`, which is the set of keys required for the - relation to be considered complete. The data for all interfaces matching - the `name` attribute that are complete will used to populate the dictionary - values (see `get_data`, below). - - The generated context will be namespaced under the relation :attr:`name`, - to prevent potential naming conflicts. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = None - interface = None - - def __init__(self, name=None, additional_required_keys=None): - if not hasattr(self, 'required_keys'): - self.required_keys = [] - - if name is not None: - self.name = name - if additional_required_keys: - self.required_keys.extend(additional_required_keys) - self.get_data() - - def __bool__(self): - """ - Returns True if all of the required_keys are available. - """ - return self.is_ready() - - __nonzero__ = __bool__ - - def __repr__(self): - return super(RelationContext, self).__repr__() - - def is_ready(self): - """ - Returns True if all of the `required_keys` are available from any units. - """ - ready = len(self.get(self.name, [])) > 0 - if not ready: - hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) - return ready - - def _is_ready(self, unit_data): - """ - Helper method that tests a set of relation data and returns True if - all of the `required_keys` are present. - """ - return set(unit_data.keys()).issuperset(set(self.required_keys)) - - def get_data(self): - """ - Retrieve the relation data for each unit involved in a relation and, - if complete, store it in a list under `self[self.name]`. This - is automatically called when the RelationContext is instantiated. - - The units are sorted lexographically first by the service ID, then by - the unit ID. Thus, if an interface has two other services, 'db:1' - and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', - and 'db:2' having one unit, 'mediawiki/0', all of which have a complete - set of data, the relation data for the units will be stored in the - order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. - - If you only care about a single unit on the relation, you can just - access it as `{{ interface[0]['key'] }}`. However, if you can at all - support multiple units on a relation, you should iterate over the list, - like:: - - {% for unit in interface -%} - {{ unit['key'] }}{% if not loop.last %},{% endif %} - {%- endfor %} - - Note that since all sets of relation data from all related services and - units are in a single list, if you need to know which service or unit a - set of data came from, you'll need to extend this class to preserve - that information. - """ - if not hookenv.relation_ids(self.name): - return - - ns = self.setdefault(self.name, []) - for rid in sorted(hookenv.relation_ids(self.name)): - for unit in sorted(hookenv.related_units(rid)): - reldata = hookenv.relation_get(rid=rid, unit=unit) - if self._is_ready(reldata): - ns.append(reldata) - - def provide_data(self): - """ - Return data to be relation_set for this interface. - """ - return {} - - -class MysqlRelation(RelationContext): - """ - Relation context for the `mysql` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'db' - interface = 'mysql' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'user', 'password', 'database'] - RelationContext.__init__(self, *args, **kwargs) - - -class HttpRelation(RelationContext): - """ - Relation context for the `http` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'website' - interface = 'http' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'port'] - RelationContext.__init__(self, *args, **kwargs) - - def provide_data(self): - return { - 'host': hookenv.unit_get('private-address'), - 'port': 80, - } - - -class RequiredConfig(dict): - """ - Data context that loads config options with one or more mandatory options. - - Once the required options have been changed from their default values, all - config options will be available, namespaced under `config` to prevent - potential naming conflicts (for example, between a config option and a - relation property). - - :param list *args: List of options that must be changed from their default values. - """ - - def __init__(self, *args): - self.required_options = args - self['config'] = hookenv.config() - with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.load(fp).get('options', {}) - - def __bool__(self): - for option in self.required_options: - if option not in self['config']: - return False - current_value = self['config'][option] - default_value = self.config[option].get('default') - if current_value == default_value: - return False - if current_value in (None, '') and default_value in (None, ''): - return False - return True - - def __nonzero__(self): - return self.__bool__() - - -class StoredContext(dict): - """ - A data context that always returns the data that it was first created with. - - This is useful to do a one-time generation of things like passwords, that - will thereafter use the same value that was originally generated, instead - of generating a new value each time it is run. - """ - def __init__(self, file_name, config_data): - """ - If the file exists, populate `self` with the data from the file. - Otherwise, populate with the given data and persist it to the file. - """ - if os.path.exists(file_name): - self.update(self.read_context(file_name)) - else: - self.store_context(file_name, config_data) - self.update(config_data) - - def store_context(self, file_name, config_data): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0o600) - yaml.dump(config_data, file_stream) - - def read_context(self, file_name): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'r') as file_stream: - data = yaml.load(file_stream) - if not data: - raise OSError("%s is empty" % file_name) - return data - - -class TemplateCallback(ManagerCallback): - """ - Callback class that will render a Jinja2 template, for use as a ready - action. - - :param str source: The template source file, relative to - `$CHARM_DIR/templates` - - :param str target: The target to write the rendered template to (or None) - :param str owner: The owner of the rendered file - :param str group: The group of the rendered file - :param int perms: The permissions of the rendered file - :param partial on_change_action: functools partial to be executed when - rendered file changes - :param jinja2 loader template_loader: A jinja2 template loader - - :return str: The rendered template - """ - def __init__(self, source, target, - owner='root', group='root', perms=0o444, - on_change_action=None, template_loader=None): - self.source = source - self.target = target - self.owner = owner - self.group = group - self.perms = perms - self.on_change_action = on_change_action - self.template_loader = template_loader - - def __call__(self, manager, service_name, event_name): - pre_checksum = '' - if self.on_change_action and os.path.isfile(self.target): - pre_checksum = host.file_hash(self.target) - service = manager.get_service(service_name) - context = {'ctx': {}} - for ctx in service.get('required_data', []): - context.update(ctx) - context['ctx'].update(ctx) - - result = templating.render(self.source, self.target, context, - self.owner, self.group, self.perms, - template_loader=self.template_loader) - if self.on_change_action: - if pre_checksum == host.file_hash(self.target): - hookenv.log( - 'No change detected: {}'.format(self.target), - hookenv.DEBUG) - else: - self.on_change_action() - - return result - - -# Convenience aliases for templates -render_template = template = TemplateCallback diff --git a/ceph-proxy/tests/charmhelpers/core/strutils.py b/ceph-proxy/tests/charmhelpers/core/strutils.py deleted file mode 100644 index e8df0452..00000000 --- a/ceph-proxy/tests/charmhelpers/core/strutils.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six -import re - - -def bool_from_string(value): - """Interpret string value as boolean. - - Returns True if value translates to True otherwise False. - """ - if isinstance(value, six.string_types): - value = six.text_type(value) - else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) - raise ValueError(msg) - - value = value.strip().lower() - - if value in ['y', 'yes', 'true', 't', 'on']: - return True - elif value in ['n', 'no', 'false', 'f', 'off']: - return False - - msg = "Unable to interpret string value '%s' as boolean" % (value) - raise ValueError(msg) - - -def bytes_from_string(value): - """Interpret human readable string value as bytes. - - Returns int - """ - BYTE_POWER = { - 'K': 1, - 'KB': 1, - 'M': 2, - 'MB': 2, - 'G': 3, - 'GB': 3, - 'T': 4, - 'TB': 4, - 'P': 5, - 'PB': 5, - } - if isinstance(value, six.string_types): - value = six.text_type(value) - else: - msg = "Unable to interpret non-string value '%s' as bytes" % (value) - raise ValueError(msg) - matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if matches: - size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) - else: - # Assume that value passed in is bytes - try: - size = int(value) - except ValueError: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return size - - -class BasicStringComparator(object): - """Provides a class that will compare strings from an iterator type object. - Used to provide > and < comparisons on strings that may not necessarily be - alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the - z-wrap. - """ - - _list = None - - def __init__(self, item): - if self._list is None: - raise Exception("Must define the _list in the class definition!") - try: - self.index = self._list.index(item) - except Exception: - raise KeyError("Item '{}' is not in list '{}'" - .format(item, self._list)) - - def __eq__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index == self._list.index(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __lt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index < self._list.index(other) - - def __ge__(self, other): - return not self.__lt__(other) - - def __gt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index > self._list.index(other) - - def __le__(self, other): - return not self.__gt__(other) - - def __str__(self): - """Always give back the item at the index so it can be used in - comparisons like: - - s_mitaka = CompareOpenStack('mitaka') - s_newton = CompareOpenstack('newton') - - assert s_newton > s_mitaka - - @returns: - """ - return self._list[self.index] diff --git a/ceph-proxy/tests/charmhelpers/core/sysctl.py b/ceph-proxy/tests/charmhelpers/core/sysctl.py deleted file mode 100644 index 1f188d8c..00000000 --- a/ceph-proxy/tests/charmhelpers/core/sysctl.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml - -from subprocess import check_call - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - ERROR, -) - -__author__ = 'Jorge Niedbalski R. ' - - -def create(sysctl_dict, sysctl_file): - """Creates a sysctl.conf file from a YAML associative array - - :param sysctl_dict: a dict or YAML-formatted string of sysctl - options eg "{ 'kernel.max_pid': 1337 }" - :type sysctl_dict: str - :param sysctl_file: path to the sysctl file to be saved - :type sysctl_file: str or unicode - :returns: None - """ - if type(sysctl_dict) is not dict: - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return - else: - sysctl_dict_parsed = sysctl_dict - - with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict_parsed.items(): - fd.write("{}={}\n".format(key, value)) - - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), - level=DEBUG) - - check_call(["sysctl", "-p", sysctl_file]) diff --git a/ceph-proxy/tests/charmhelpers/core/templating.py b/ceph-proxy/tests/charmhelpers/core/templating.py deleted file mode 100644 index 9014015c..00000000 --- a/ceph-proxy/tests/charmhelpers/core/templating.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', - template_loader=None, config_template=None): - """ - Render a template. - - The `source` path, if not absolute, is relative to the `templates_dir`. - - The `target` path should be absolute. It can also be `None`, in which - case no file will be written. - - The context should be a dict containing the values to be replaced in the - template. - - config_template may be provided to render from a provided template instead - of loading from a file. - - The `owner`, `group`, and `perms` options will be passed to `write_file`. - - If omitted, `templates_dir` defaults to the `templates` folder in the charm. - - The rendered template will be written to the file as well as being returned - as a string. - - Note: Using this requires python-jinja2 or python3-jinja2; if it is not - installed, calling this will attempt to use charmhelpers.fetch.apt_install - to install it. - """ - try: - from jinja2 import FileSystemLoader, Environment, exceptions - except ImportError: - try: - from charmhelpers.fetch import apt_install - except ImportError: - hookenv.log('Could not import jinja2, and could not import ' - 'charmhelpers.fetch to install it', - level=hookenv.ERROR) - raise - if sys.version_info.major == 2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) - from jinja2 import FileSystemLoader, Environment, exceptions - - if template_loader: - template_env = Environment(loader=template_loader) - else: - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - template_env = Environment(loader=FileSystemLoader(templates_dir)) - - # load from a string if provided explicitly - if config_template is not None: - template = template_env.from_string(config_template) - else: - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e - content = template.render(context) - if target is not None: - target_dir = os.path.dirname(target) - if not os.path.exists(target_dir): - # This is a terrible default directory permission, as the file - # or its siblings will often contain secrets. - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) - host.write_file(target, content.encode(encoding), owner, group, perms) - return content diff --git a/ceph-proxy/tests/charmhelpers/core/unitdata.py b/ceph-proxy/tests/charmhelpers/core/unitdata.py deleted file mode 100644 index ab554327..00000000 --- a/ceph-proxy/tests/charmhelpers/core/unitdata.py +++ /dev/null @@ -1,525 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Authors: -# Kapil Thangavelu -# -""" -Intro ------ - -A simple way to store state in units. This provides a key value -storage with support for versioned, transactional operation, -and can calculate deltas from previous values to simplify unit logic -when processing changes. - - -Hook Integration ----------------- - -There are several extant frameworks for hook execution, including - - - charmhelpers.core.hookenv.Hooks - - charmhelpers.core.services.ServiceManager - -The storage classes are framework agnostic, one simple integration is -via the HookData contextmanager. It will record the current hook -execution environment (including relation data, config data, etc.), -setup a transaction and allow easy access to the changes from -previously seen values. One consequence of the integration is the -reservation of particular keys ('rels', 'unit', 'env', 'config', -'charm_revisions') for their respective values. - -Here's a fully worked integration example using hookenv.Hooks:: - - from charmhelper.core import hookenv, unitdata - - hook_data = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # Print all changes to configuration from previously seen - # values. - for changed, (prev, cur) in hook_data.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - # Directly access all charm config as a mapping. - conf = db.getrange('config', True) - - # Directly access all relation data as a mapping - rels = db.getrange('rels', True) - - if __name__ == '__main__': - with hook_data(): - hook.execute() - - -A more basic integration is via the hook_scope context manager which simply -manages transaction scope (and records hook name, and timestamp):: - - >>> from unitdata import kv - >>> db = kv() - >>> with db.hook_scope('install'): - ... # do work, in transactional scope. - ... db.set('x', 1) - >>> db.get('x') - 1 - - -Usage ------ - -Values are automatically json de/serialized to preserve basic typing -and complex data struct capabilities (dicts, lists, ints, booleans, etc). - -Individual values can be manipulated via get/set:: - - >>> kv.set('y', True) - >>> kv.get('y') - True - - # We can set complex values (dicts, lists) as a single key. - >>> kv.set('config', {'a': 1, 'b': True'}) - - # Also supports returning dictionaries as a record which - # provides attribute access. - >>> config = kv.get('config', record=True) - >>> config.b - True - - -Groups of keys can be manipulated with update/getrange:: - - >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") - >>> kv.getrange('gui.', strip=True) - {'z': 1, 'y': 2} - -When updating values, its very helpful to understand which values -have actually changed and how have they changed. The storage -provides a delta method to provide for this:: - - >>> data = {'debug': True, 'option': 2} - >>> delta = kv.delta(data, 'config.') - >>> delta.debug.previous - None - >>> delta.debug.current - True - >>> delta - {'debug': (None, True), 'option': (None, 2)} - -Note the delta method does not persist the actual change, it needs to -be explicitly saved via 'update' method:: - - >>> kv.update(data, 'config.') - -Values modified in the context of a hook scope retain historical values -associated to the hookname. - - >>> with db.hook_scope('config-changed'): - ... db.set('x', 42) - >>> db.gethistory('x') - [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), - (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] - -""" - -import collections -import contextlib -import datetime -import itertools -import json -import os -import pprint -import sqlite3 -import sys - -__author__ = 'Kapil Thangavelu ' - - -class Storage(object): - """Simple key value database for local unit state within charms. - - Modifications are not persisted unless :meth:`flush` is called. - - To support dicts, lists, integer, floats, and booleans values - are automatically json encoded/decoded. - - Note: to facilitate unit testing, ':memory:' can be passed as the - path parameter which causes sqlite3 to only build the db in memory. - This should only be used for testing purposes. - """ - def __init__(self, path=None): - self.db_path = path - if path is None: - if 'UNIT_STATE_DB' in os.environ: - self.db_path = os.environ['UNIT_STATE_DB'] - else: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') - if self.db_path != ':memory:': - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) - self.conn = sqlite3.connect('%s' % self.db_path) - self.cursor = self.conn.cursor() - self.revision = None - self._closed = False - self._init() - - def close(self): - if self._closed: - return - self.flush(False) - self.cursor.close() - self.conn.close() - self._closed = True - - def get(self, key, default=None, record=False): - self.cursor.execute('select data from kv where key=?', [key]) - result = self.cursor.fetchone() - if not result: - return default - if record: - return Record(json.loads(result[0])) - return json.loads(result[0]) - - def getrange(self, key_prefix, strip=False): - """ - Get a range of keys starting with a common prefix as a mapping of - keys to values. - - :param str key_prefix: Common prefix among all keys - :param bool strip: Optionally strip the common prefix from the key - names in the returned dict - :return dict: A (possibly empty) dict of key-value mappings - """ - self.cursor.execute("select key, data from kv where key like ?", - ['%s%%' % key_prefix]) - result = self.cursor.fetchall() - - if not result: - return {} - if not strip: - key_prefix = '' - return dict([ - (k[len(key_prefix):], json.loads(v)) for k, v in result]) - - def update(self, mapping, prefix=""): - """ - Set the values of multiple keys at once. - - :param dict mapping: Mapping of keys to values - :param str prefix: Optional prefix to apply to all keys in `mapping` - before setting - """ - for k, v in mapping.items(): - self.set("%s%s" % (prefix, k), v) - - def unset(self, key): - """ - Remove a key from the database entirely. - """ - self.cursor.execute('delete from kv where key=?', [key]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - [key, self.revision, json.dumps('DELETED')]) - - def unsetrange(self, keys=None, prefix=""): - """ - Remove a range of keys starting with a common prefix, from the database - entirely. - - :param list keys: List of keys to remove. - :param str prefix: Optional prefix to apply to all keys in ``keys`` - before removing. - """ - if keys is not None: - keys = ['%s%s' % (prefix, key) for key in keys] - self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), - list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) - else: - self.cursor.execute('delete from kv where key like ?', - ['%s%%' % prefix]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) - - def set(self, key, value): - """ - Set a value in the database. - - :param str key: Key to set the value for - :param value: Any JSON-serializable value to be set - """ - serialized = json.dumps(value) - - self.cursor.execute('select data from kv where key=?', [key]) - exists = self.cursor.fetchone() - - # Skip mutations to the same value - if exists: - if exists[0] == serialized: - return value - - if not exists: - self.cursor.execute( - 'insert into kv (key, data) values (?, ?)', - (key, serialized)) - else: - self.cursor.execute(''' - update kv - set data = ? - where key = ?''', [serialized, key]) - - # Save - if not self.revision: - return value - - self.cursor.execute( - 'select 1 from kv_revisions where key=? and revision=?', - [key, self.revision]) - exists = self.cursor.fetchone() - - if not exists: - self.cursor.execute( - '''insert into kv_revisions ( - revision, key, data) values (?, ?, ?)''', - (self.revision, key, serialized)) - else: - self.cursor.execute( - ''' - update kv_revisions - set data = ? - where key = ? - and revision = ?''', - [serialized, key, self.revision]) - - return value - - def delta(self, mapping, prefix): - """ - return a delta containing values that have changed. - """ - previous = self.getrange(prefix, strip=True) - if not previous: - pk = set() - else: - pk = set(previous.keys()) - ck = set(mapping.keys()) - delta = DeltaSet() - - # added - for k in ck.difference(pk): - delta[k] = Delta(None, mapping[k]) - - # removed - for k in pk.difference(ck): - delta[k] = Delta(previous[k], None) - - # changed - for k in pk.intersection(ck): - c = mapping[k] - p = previous[k] - if c != p: - delta[k] = Delta(p, c) - - return delta - - @contextlib.contextmanager - def hook_scope(self, name=""): - """Scope all future interactions to the current hook execution - revision.""" - assert not self.revision - self.cursor.execute( - 'insert into hooks (hook, date) values (?, ?)', - (name or sys.argv[0], - datetime.datetime.utcnow().isoformat())) - self.revision = self.cursor.lastrowid - try: - yield self.revision - self.revision = None - except Exception: - self.flush(False) - self.revision = None - raise - else: - self.flush() - - def flush(self, save=True): - if save: - self.conn.commit() - elif self._closed: - return - else: - self.conn.rollback() - - def _init(self): - self.cursor.execute(''' - create table if not exists kv ( - key text, - data text, - primary key (key) - )''') - self.cursor.execute(''' - create table if not exists kv_revisions ( - key text, - revision integer, - data text, - primary key (key, revision) - )''') - self.cursor.execute(''' - create table if not exists hooks ( - version integer primary key autoincrement, - hook text, - date text - )''') - self.conn.commit() - - def gethistory(self, key, deserialize=False): - self.cursor.execute( - ''' - select kv.revision, kv.key, kv.data, h.hook, h.date - from kv_revisions kv, - hooks h - where kv.key=? - and kv.revision = h.version - ''', [key]) - if deserialize is False: - return self.cursor.fetchall() - return map(_parse_history, self.cursor.fetchall()) - - def debug(self, fh=sys.stderr): - self.cursor.execute('select * from kv') - pprint.pprint(self.cursor.fetchall(), stream=fh) - self.cursor.execute('select * from kv_revisions') - pprint.pprint(self.cursor.fetchall(), stream=fh) - - -def _parse_history(d): - return (d[0], d[1], json.loads(d[2]), d[3], - datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) - - -class HookData(object): - """Simple integration for existing hook exec frameworks. - - Records all unit information, and stores deltas for processing - by the hook. - - Sample:: - - from charmhelper.core import hookenv, unitdata - - changes = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # View all changes to configuration - for changed, (prev, cur) in changes.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - if __name__ == '__main__': - with changes(): - hook.execute() - - """ - def __init__(self): - self.kv = kv() - self.conf = None - self.rels = None - - @contextlib.contextmanager - def __call__(self): - from charmhelpers.core import hookenv - hook_name = hookenv.hook_name() - - with self.kv.hook_scope(hook_name): - self._record_charm_version(hookenv.charm_dir()) - delta_config, delta_relation = self._record_hook(hookenv) - yield self.kv, delta_config, delta_relation - - def _record_charm_version(self, charm_dir): - # Record revisions.. charm revisions are meaningless - # to charm authors as they don't control the revision. - # so logic dependnent on revision is not particularly - # useful, however it is useful for debugging analysis. - charm_rev = open( - os.path.join(charm_dir, 'revision')).read().strip() - charm_rev = charm_rev or '0' - revs = self.kv.get('charm_revisions', []) - if charm_rev not in revs: - revs.append(charm_rev.strip() or '0') - self.kv.set('charm_revisions', revs) - - def _record_hook(self, hookenv): - data = hookenv.execution_environment() - self.conf = conf_delta = self.kv.delta(data['conf'], 'config') - self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') - self.kv.set('env', dict(data['env'])) - self.kv.set('unit', data['unit']) - self.kv.set('relid', data.get('relid')) - return conf_delta, rels_delta - - -class Record(dict): - - __slots__ = () - - def __getattr__(self, k): - if k in self: - return self[k] - raise AttributeError(k) - - -class DeltaSet(Record): - - __slots__ = () - - -Delta = collections.namedtuple('Delta', ['previous', 'current']) - - -_KV = None - - -def kv(): - global _KV - if _KV is None: - _KV = Storage() - return _KV diff --git a/ceph-proxy/tests/charmhelpers/osplatform.py b/ceph-proxy/tests/charmhelpers/osplatform.py deleted file mode 100644 index d9a4d5c0..00000000 --- a/ceph-proxy/tests/charmhelpers/osplatform.py +++ /dev/null @@ -1,25 +0,0 @@ -import platform - - -def get_platform(): - """Return the current OS platform. - - For example: if current os platform is Ubuntu then a string "ubuntu" - will be returned (which is the name of the module). - This string is used to decide which platform module should be imported. - """ - # linux_distribution is deprecated and will be removed in Python 3.7 - # Warings *not* disabled, as we certainly need to fix this. - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] - if "Ubuntu" in current_platform: - return "ubuntu" - elif "CentOS" in current_platform: - return "centos" - elif "debian" in current_platform: - # Stock Python does not detect Ubuntu and instead returns debian. - # Or at least it does in some build environments like Travis CI - return "ubuntu" - else: - raise RuntimeError("This module is not supported on {}." - .format(current_platform)) From 582467e112d898a36187e2636086ec70246a7b52 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 4 Oct 2018 10:47:21 -0500 Subject: [PATCH 1575/2699] Update requirements Also remove tests/charm-helpers if present, in favor of the pip-installed charm-helpers. Change-Id: I57ebccde4c04b1b6b312f152ea778f8ad820b196 --- ceph-fs/.gitignore | 1 + ceph-fs/requirements.txt | 9 ++++++--- ceph-fs/src/layer.yaml | 2 ++ ceph-fs/src/test-requirements.txt | 22 +++++++++++----------- ceph-fs/test-requirements.txt | 10 ++++++++-- 5 files changed, 28 insertions(+), 16 deletions(-) diff --git a/ceph-fs/.gitignore b/ceph-fs/.gitignore index a759aa3f..018b2708 100644 --- a/ceph-fs/.gitignore +++ b/ceph-fs/.gitignore @@ -6,3 +6,4 @@ interfaces __pycache__ *.pyc .idea +.stestr diff --git a/ceph-fs/requirements.txt b/ceph-fs/requirements.txt index 6fe30907..20f335d2 100644 --- a/ceph-fs/requirements.txt +++ b/ceph-fs/requirements.txt @@ -1,4 +1,7 @@ -# Requirements to build the charm -charm-tools +# This file is managed centrally. If you find the need to modify this as a +# one-off, please don't. Intead, consult #openstack-charms and ask about +# requirements management in charms via bot-control. Thank you. +# +# Build requirements +charm-tools>=2.4.4 simplejson -flake8 diff --git a/ceph-fs/src/layer.yaml b/ceph-fs/src/layer.yaml index 5a6786a2..bb34e6ff 100644 --- a/ceph-fs/src/layer.yaml +++ b/ceph-fs/src/layer.yaml @@ -1,5 +1,7 @@ includes: ['layer:basic', 'layer:apt', 'interface:ceph-mds'] options: + status: + patch-hookenv: False apt: packages: - python3-pyxattr diff --git a/ceph-fs/src/test-requirements.txt b/ceph-fs/src/test-requirements.txt index 16d0adb9..f0138637 100644 --- a/ceph-fs/src/test-requirements.txt +++ b/ceph-fs/src/test-requirements.txt @@ -1,19 +1,18 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. +# This file is managed centrally. If you find the need to modify this as a +# one-off, please don't. Intead, consult #openstack-charms and ask about +# requirements management in charms via bot-control. Thank you. +charm-tools>=2.4.4 coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 -charm-tools>=2.0.0 -requests==2.6.0 -# amulet deployment helpers -git+https://github.com/juju/charm-helpers#egg=charmhelpers +requests>=2.18.4 # BEGIN: Amulet OpenStack Charm Helper Requirements # Liberty client lower constraints -amulet>=1.14.3,<2.0 -bundletester>=0.6.1,<1.0 +amulet>=1.14.3,<2.0;python_version=='2.7' +bundletester>=0.6.1,<1.0;python_version=='2.7' aodhclient>=0.1.0 +gnocchiclient>=3.1.0,<3.2.0 python-barbicanclient>=4.0.1 python-ceilometerclient>=1.5.0 python-cinderclient>=1.4.0 @@ -28,6 +27,7 @@ python-openstackclient>=1.7.0 python-swiftclient>=2.6.0 pika>=0.10.0,<1.0 distro-info +git+https://github.com/juju/charm-helpers.git#egg=charmhelpers # END: Amulet OpenStack Charm Helper Requirements -# NOTE: workaround for 14.04 pip/tox -pytz +pytz # workaround for 14.04 pip/tox +pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/ceph-fs/test-requirements.txt b/ceph-fs/test-requirements.txt index 9a0bed81..ca62003b 100644 --- a/ceph-fs/test-requirements.txt +++ b/ceph-fs/test-requirements.txt @@ -1,7 +1,13 @@ -# Unit test requirements +# This file is managed centrally. If you find the need to modify this as a +# one-off, please don't. Intead, consult #openstack-charms and ask about +# requirements management in charms via bot-control. Thank you. +# +# Lint and unit test requirements flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 +requests>=2.18.4 charms.reactive mock>=1.2 +nose>=1.3.7 coverage>=3.6 -git+https://github.com/openstack/charms.openstack#egg=charms.openstack +git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack From d5a8ff6cc4d5fc8f70e28ad1fff8aecdb415abdb Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 20 Sep 2018 11:16:21 +0000 Subject: [PATCH 1576/2699] Pass all CS_ vars to tox env to pickup timeout Change-Id: Ida9fed2033b36ff8baaab19e3d9b708ac524ef16 --- ceph-fs/src/tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index 628b3909..0e36e84b 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -10,7 +10,7 @@ setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 AMULET_SETUP_TIMEOUT=5400 whitelist_externals = juju -passenv = HOME TERM AMULET_* CS_API_* +passenv = HOME TERM AMULET_* CS_* deps = -r{toxinidir}/test-requirements.txt install_command = pip install {opts} {packages} From cd44f1d3fb0edc3aab8257c99ff04ef05221ce78 Mon Sep 17 00:00:00 2001 From: Nicolas Pochet Date: Tue, 7 Aug 2018 11:56:28 +0200 Subject: [PATCH 1577/2699] Migrate tests to zaza Depends-On: https://github.com/openstack-charmers/zaza/pull/93 Change-Id: I9a19960fdb239eb5a8d421f135285e89b8405267 --- ceph-osd/Makefile | 5 +- ceph-osd/charm-helpers-tests.yaml | 7 - ceph-osd/test-requirements.txt | 9 +- ceph-osd/tests/README.md | 9 - ceph-osd/tests/basic_deployment.py | 842 -------------------- ceph-osd/tests/bundles/bionic-queens.yaml | 86 ++ ceph-osd/tests/bundles/bionic-rocky.yaml | 100 +++ ceph-osd/tests/bundles/cosmic-rocky.yaml | 95 +++ ceph-osd/tests/bundles/trusty-icehouse.yaml | 122 +++ ceph-osd/tests/bundles/trusty-mitaka.yaml | 136 ++++ ceph-osd/tests/bundles/xenial-mitaka.yaml | 86 ++ ceph-osd/tests/bundles/xenial-ocata.yaml | 100 +++ ceph-osd/tests/bundles/xenial-pike.yaml | 100 +++ ceph-osd/tests/bundles/xenial-queens.yaml | 100 +++ ceph-osd/tests/dev-basic-cosmic-rocky | 23 - ceph-osd/tests/gate-basic-bionic-queens | 23 - ceph-osd/tests/gate-basic-bionic-rocky | 25 - ceph-osd/tests/gate-basic-trusty-icehouse | 23 - ceph-osd/tests/gate-basic-trusty-mitaka | 25 - ceph-osd/tests/gate-basic-xenial-mitaka | 23 - ceph-osd/tests/gate-basic-xenial-ocata | 25 - ceph-osd/tests/gate-basic-xenial-pike | 25 - ceph-osd/tests/gate-basic-xenial-queens | 25 - ceph-osd/tests/tests.yaml | 38 +- ceph-osd/tox.ini | 48 +- 25 files changed, 959 insertions(+), 1141 deletions(-) delete mode 100644 ceph-osd/charm-helpers-tests.yaml delete mode 100644 ceph-osd/tests/README.md delete mode 100644 ceph-osd/tests/basic_deployment.py create mode 100644 ceph-osd/tests/bundles/bionic-queens.yaml create mode 100644 ceph-osd/tests/bundles/bionic-rocky.yaml create mode 100644 ceph-osd/tests/bundles/cosmic-rocky.yaml create mode 100644 ceph-osd/tests/bundles/trusty-icehouse.yaml create mode 100644 ceph-osd/tests/bundles/trusty-mitaka.yaml create mode 100644 ceph-osd/tests/bundles/xenial-mitaka.yaml create mode 100644 ceph-osd/tests/bundles/xenial-ocata.yaml create mode 100644 ceph-osd/tests/bundles/xenial-pike.yaml create mode 100644 ceph-osd/tests/bundles/xenial-queens.yaml delete mode 100755 ceph-osd/tests/dev-basic-cosmic-rocky delete mode 100755 ceph-osd/tests/gate-basic-bionic-queens delete mode 100755 ceph-osd/tests/gate-basic-bionic-rocky delete mode 100755 ceph-osd/tests/gate-basic-trusty-icehouse delete mode 100755 ceph-osd/tests/gate-basic-trusty-mitaka delete mode 100755 ceph-osd/tests/gate-basic-xenial-mitaka delete mode 100755 ceph-osd/tests/gate-basic-xenial-ocata delete mode 100755 ceph-osd/tests/gate-basic-xenial-pike delete mode 100755 ceph-osd/tests/gate-basic-xenial-queens diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index 7609385a..d06a6904 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -9,8 +9,8 @@ test: @tox -e py27 functional_test: - @echo Starting Amulet tests... - @tox -e func27 + @echo Starting Zaza functional tests... + @tox -e func bin/charm_helpers_sync.py: @mkdir -p bin @@ -23,7 +23,6 @@ bin/git_sync.py: ch-sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml - $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml ceph-sync: bin/git_sync.py $(PYTHON) bin/git_sync.py -d lib -s https://github.com/openstack/charms.ceph.git diff --git a/ceph-osd/charm-helpers-tests.yaml b/ceph-osd/charm-helpers-tests.yaml deleted file mode 100644 index f64f0dde..00000000 --- a/ceph-osd/charm-helpers-tests.yaml +++ /dev/null @@ -1,7 +0,0 @@ -repo: https://github.com/juju/charm-helpers -destination: tests/charmhelpers -include: - - contrib.amulet - - contrib.openstack.amulet - - core - - osplatform diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 2b2c0e11..bb01e1f6 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -6,11 +6,7 @@ coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 -requests>=2.18.4 -# BEGIN: Amulet OpenStack Charm Helper Requirements -# Liberty client lower constraints -amulet>=1.14.3,<2.0;python_version=='2.7' -bundletester>=0.6.1,<1.0;python_version=='2.7' +requests==2.18.4 python-ceilometerclient>=1.5.0 python-cinderclient>=1.4.0 python-glanceclient>=1.1.0 @@ -23,7 +19,6 @@ python-swiftclient>=2.6.0 pika>=0.10.0,<1.0 distro-info git+https://github.com/juju/charm-helpers.git#egg=charmhelpers -# END: Amulet OpenStack Charm Helper Requirements -# NOTE: workaround for 14.04 pip/tox pytz pyudev # for ceph-* charm unit tests (not mocked?) +git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' diff --git a/ceph-osd/tests/README.md b/ceph-osd/tests/README.md deleted file mode 100644 index 046be7fb..00000000 --- a/ceph-osd/tests/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Overview - -This directory provides Amulet tests to verify basic deployment functionality -from the perspective of this charm, its requirements and its features, as -exercised in a subset of the full OpenStack deployment test bundle topology. - -For full details on functional testing of OpenStack charms please refer to -the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing) -section of the OpenStack Charm Guide. diff --git a/ceph-osd/tests/basic_deployment.py b/ceph-osd/tests/basic_deployment.py deleted file mode 100644 index 985e0d94..00000000 --- a/ceph-osd/tests/basic_deployment.py +++ /dev/null @@ -1,842 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import re -import time - -import keystoneclient -from keystoneclient.v3 import client as keystone_client_v3 -from novaclient import client as nova_client - -from charmhelpers.contrib.openstack.amulet.deployment import ( - OpenStackAmuletDeployment -) -from charmhelpers.contrib.openstack.amulet.utils import ( - OpenStackAmuletUtils, - DEBUG, - # ERROR -) - -# Use DEBUG to turn on debug logging -u = OpenStackAmuletUtils(DEBUG) - - -class CephOsdBasicDeployment(OpenStackAmuletDeployment): - """Amulet tests on a basic ceph-osd deployment.""" - - def __init__(self, series=None, openstack=None, source=None, - stable=False): - """Deploy the entire test environment.""" - super(CephOsdBasicDeployment, self).__init__(series, openstack, - source, stable) - self._add_services() - self._add_relations() - self._configure_services() - self._deploy() - - u.log.info('Waiting on extended status checks...') - exclude_services = [] - - # Wait for deployment ready msgs, except exclusions - self._auto_wait_for_status(exclude_services=exclude_services) - - self.d.sentry.wait() - self._initialize_tests() - - def _add_services(self): - """Add services - - Add the services that we're testing, where ceph-osd is local, - and the rest of the service are from lp branches that are - compatible with the local charm (e.g. stable or next). - """ - this_service = { - 'name': 'ceph-osd', - 'units': 3, - 'storage': {'osd-devices': 'cinder,10G'}} - other_services = [ - {'name': 'ceph-mon', 'units': 3}, - {'name': 'percona-cluster'}, - {'name': 'keystone'}, - {'name': 'rabbitmq-server'}, - {'name': 'nova-compute'}, - {'name': 'glance'}, - {'name': 'cinder'}, - {'name': 'cinder-ceph'}, - ] - super(CephOsdBasicDeployment, self)._add_services(this_service, - other_services) - - def _add_relations(self): - """Add all of the relations for the services.""" - relations = { - 'nova-compute:amqp': 'rabbitmq-server:amqp', - 'nova-compute:image-service': 'glance:image-service', - 'nova-compute:ceph': 'ceph-mon:client', - 'keystone:shared-db': 'percona-cluster:shared-db', - 'glance:shared-db': 'percona-cluster:shared-db', - 'glance:identity-service': 'keystone:identity-service', - 'glance:amqp': 'rabbitmq-server:amqp', - 'glance:ceph': 'ceph-mon:client', - 'cinder:shared-db': 'percona-cluster:shared-db', - 'cinder:identity-service': 'keystone:identity-service', - 'cinder:amqp': 'rabbitmq-server:amqp', - 'cinder:image-service': 'glance:image-service', - 'cinder-ceph:storage-backend': 'cinder:storage-backend', - 'cinder-ceph:ceph': 'ceph-mon:client', - 'ceph-osd:mon': 'ceph-mon:osd', - } - super(CephOsdBasicDeployment, self)._add_relations(relations) - - def _configure_services(self): - """Configure all of the services.""" - keystone_config = {'admin-password': 'openstack', - 'admin-token': 'ubuntutesting'} - pxc_config = { - 'max-connections': 1000, - } - - cinder_config = {'block-device': 'None', 'glance-api-version': '2'} - ceph_config = { - 'monitor-count': '3', - 'auth-supported': 'none', - } - - # Include a non-existent device as osd-devices is a whitelist, - # and this will catch cases where proposals attempt to change that. - ceph_osd_config = { - 'osd-devices': '/srv/ceph /dev/test-non-existent' - } - - configs = {'keystone': keystone_config, - 'percona-cluster': pxc_config, - 'cinder': cinder_config, - 'ceph-mon': ceph_config, - 'ceph-osd': ceph_osd_config} - super(CephOsdBasicDeployment, self)._configure_services(configs) - - def _initialize_tests(self): - """Perform final initialization before tests get run.""" - # Access the sentries for inspecting service units - self.pxc_sentry = self.d.sentry['percona-cluster'][0] - self.keystone_sentry = self.d.sentry['keystone'][0] - self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0] - self.nova_sentry = self.d.sentry['nova-compute'][0] - self.glance_sentry = self.d.sentry['glance'][0] - self.cinder_sentry = self.d.sentry['cinder'][0] - self.ceph0_sentry = self.d.sentry['ceph-mon'][0] - self.ceph1_sentry = self.d.sentry['ceph-mon'][1] - self.ceph2_sentry = self.d.sentry['ceph-mon'][2] - self.ceph_osd_sentry = self.d.sentry['ceph-osd'][0] - self.ceph_osd1_sentry = self.d.sentry['ceph-osd'][1] - self.ceph_osd2_sentry = self.d.sentry['ceph-osd'][2] - u.log.debug('openstack release val: {}'.format( - self._get_openstack_release())) - u.log.debug('openstack release str: {}'.format( - self._get_openstack_release_string())) - - # Authenticate admin with keystone - self.keystone_session, self.keystone = u.get_default_keystone_session( - self.keystone_sentry, - openstack_release=self._get_openstack_release()) - - # Authenticate admin with cinder endpoint - self.cinder = u.authenticate_cinder_admin(self.keystone) - # Authenticate admin with glance endpoint - self.glance = u.authenticate_glance_admin(self.keystone) - - # Authenticate admin with nova endpoint - self.nova = nova_client.Client(2, session=self.keystone_session) - - keystone_ip = self.keystone_sentry.info['public-address'] - - # Create a demo tenant/role/user - self.demo_tenant = 'demoTenant' - self.demo_role = 'demoRole' - self.demo_user = 'demoUser' - self.demo_project = 'demoProject' - self.demo_domain = 'demoDomain' - if self._get_openstack_release() >= self.xenial_queens: - self.create_users_v3() - self.demo_user_session, auth = u.get_keystone_session( - keystone_ip, - self.demo_user, - 'password', - api_version=3, - user_domain_name=self.demo_domain, - project_domain_name=self.demo_domain, - project_name=self.demo_project - ) - self.keystone_demo = keystone_client_v3.Client( - session=self.demo_user_session) - self.nova_demo = nova_client.Client( - 2, - session=self.demo_user_session) - else: - self.create_users_v2() - # Authenticate demo user with keystone - self.keystone_demo = \ - u.authenticate_keystone_user( - self.keystone, user=self.demo_user, - password='password', - tenant=self.demo_tenant) - # Authenticate demo user with nova-api - self.nova_demo = u.authenticate_nova_user(self.keystone, - user=self.demo_user, - password='password', - tenant=self.demo_tenant) - - def create_users_v3(self): - try: - self.keystone.projects.find(name=self.demo_project) - except keystoneclient.exceptions.NotFound: - domain = self.keystone.domains.create( - self.demo_domain, - description='Demo Domain', - enabled=True - ) - project = self.keystone.projects.create( - self.demo_project, - domain, - description='Demo Project', - enabled=True, - ) - user = self.keystone.users.create( - self.demo_user, - domain=domain.id, - project=self.demo_project, - password='password', - email='demov3@demo.com', - description='Demo', - enabled=True) - role = self.keystone.roles.find(name='Admin') - self.keystone.roles.grant( - role.id, - user=user.id, - project=project.id) - - def create_users_v2(self): - if not u.tenant_exists(self.keystone, self.demo_tenant): - tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, - description='demo tenant', - enabled=True) - - self.keystone.roles.create(name=self.demo_role) - self.keystone.users.create(name=self.demo_user, - password='password', - tenant_id=tenant.id, - email='demo@demo.com') - - def test_100_ceph_processes(self): - """Verify that the expected service processes are running - on each ceph unit.""" - - # Process name and quantity of processes to expect on each unit - ceph_processes = { - 'ceph-mon': 1, - } - - # Units with process names and PID quantities expected - expected_processes = { - self.ceph0_sentry: ceph_processes, - self.ceph1_sentry: ceph_processes, - self.ceph2_sentry: ceph_processes, - self.ceph_osd_sentry: {'ceph-osd': [2, 3]} - } - - actual_pids = u.get_unit_process_ids(expected_processes) - ret = u.validate_unit_process_ids(expected_processes, actual_pids) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_102_services(self): - """Verify the expected services are running on the service units.""" - - services = { - self.glance_sentry: ['glance-registry', - 'glance-api'], - self.cinder_sentry: ['cinder-scheduler', - 'cinder-volume'], - } - - if self._get_openstack_release() < self.xenial_ocata: - services[self.cinder_sentry].append('cinder-api') - else: - services[self.cinder_sentry].append('apache2') - - if self._get_openstack_release() < self.xenial_mitaka: - # For upstart systems only. Ceph services under systemd - # are checked by process name instead. - ceph_services = [ - 'ceph-mon-all', - 'ceph-mon id=`hostname`', - ] - services[self.ceph0_sentry] = ceph_services - services[self.ceph1_sentry] = ceph_services - services[self.ceph2_sentry] = ceph_services - services[self.ceph_osd_sentry] = [ - 'ceph-osd-all', - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) - ] - - if self._get_openstack_release() >= self.trusty_liberty: - services[self.keystone_sentry] = ['apache2'] - - ret = u.validate_services_by_name(services) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_200_ceph_osd_ceph_relation(self): - """Verify the ceph-osd to ceph relation data.""" - u.log.debug('Checking ceph-osd:ceph-mon relation data...') - unit = self.ceph_osd_sentry - relation = ['mon', 'ceph-mon:osd'] - expected = { - 'private-address': u.valid_ip - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('ceph-osd to ceph-mon', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_201_ceph0_to_ceph_osd_relation(self): - """Verify the ceph0 to ceph-osd relation data.""" - u.log.debug('Checking ceph0:ceph-osd mon relation data...') - unit = self.ceph0_sentry - (fsid, _) = unit.run('leader-get fsid') - relation = ['osd', 'ceph-osd:mon'] - expected = { - 'osd_bootstrap_key': u.not_null, - 'private-address': u.valid_ip, - 'auth': u'none', - 'ceph-public-address': u.valid_ip, - 'fsid': fsid, - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('ceph0 to ceph-osd', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_202_ceph1_to_ceph_osd_relation(self): - """Verify the ceph1 to ceph-osd relation data.""" - u.log.debug('Checking ceph1:ceph-osd mon relation data...') - unit = self.ceph1_sentry - (fsid, _) = unit.run('leader-get fsid') - relation = ['osd', 'ceph-osd:mon'] - expected = { - 'osd_bootstrap_key': u.not_null, - 'private-address': u.valid_ip, - 'auth': u'none', - 'ceph-public-address': u.valid_ip, - 'fsid': fsid, - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('ceph1 to ceph-osd', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_203_ceph2_to_ceph_osd_relation(self): - """Verify the ceph2 to ceph-osd relation data.""" - u.log.debug('Checking ceph2:ceph-osd mon relation data...') - unit = self.ceph2_sentry - (fsid, _) = unit.run('leader-get fsid') - relation = ['osd', 'ceph-osd:mon'] - expected = { - 'osd_bootstrap_key': u.not_null, - 'private-address': u.valid_ip, - 'auth': u'none', - 'ceph-public-address': u.valid_ip, - 'fsid': fsid, - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('ceph2 to ceph-osd', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_300_ceph_osd_config(self): - """Verify the data in the ceph config file.""" - u.log.debug('Checking ceph config file data...') - mon_unit = self.ceph0_sentry - (fsid, _) = mon_unit.run('leader-get fsid') - - unit = self.ceph_osd_sentry - conf = '/etc/ceph/ceph.conf' - expected = { - 'global': { - 'auth cluster required': 'none', - 'auth service required': 'none', - 'auth client required': 'none', - 'fsid': fsid, - 'log to syslog': 'false', - 'err to syslog': 'false', - 'clog to syslog': 'false' - }, - 'mon': { - 'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring' - }, - 'mds': { - 'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring' - }, - 'osd': { - 'keyring': '/var/lib/ceph/osd/$cluster-$id/keyring', - }, - } - - for section, pairs in expected.items(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "ceph config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_302_cinder_rbd_config(self): - """Verify the cinder config file data regarding ceph.""" - u.log.debug('Checking cinder (rbd) config file data...') - unit = self.cinder_sentry - conf = '/etc/cinder/cinder.conf' - section_key = 'cinder-ceph' - expected = { - section_key: { - 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' - } - } - for section, pairs in expected.items(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "cinder (rbd) config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_304_glance_rbd_config(self): - """Verify the glance config file data regarding ceph.""" - u.log.debug('Checking glance (rbd) config file data...') - unit = self.glance_sentry - conf = '/etc/glance/glance-api.conf' - config = { - 'default_store': 'rbd', - 'rbd_store_ceph_conf': '/etc/ceph/ceph.conf', - 'rbd_store_user': 'glance', - 'rbd_store_pool': 'glance', - 'rbd_store_chunk_size': '8' - } - - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later - config['stores'] = ('glance.store.filesystem.Store,' - 'glance.store.http.Store,' - 'glance.store.rbd.Store') - section = 'glance_store' - else: - # Juno or earlier - section = 'DEFAULT' - - expected = {section: config} - for section, pairs in expected.items(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "glance (rbd) config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_306_nova_rbd_config(self): - """Verify the nova config file data regarding ceph.""" - u.log.debug('Checking nova (rbd) config file data...') - unit = self.nova_sentry - conf = '/etc/nova/nova.conf' - expected = { - 'libvirt': { - 'rbd_user': 'nova-compute', - 'rbd_secret_uuid': u.not_null - } - } - for section, pairs in expected.items(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "nova (rbd) config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_400_ceph_check_osd_pools(self): - """Check osd pools on all ceph units, expect them to be - identical, and expect specific pools to be present.""" - u.log.debug('Checking pools on ceph units...') - - expected_pools = self.get_ceph_expected_pools() - results = [] - sentries = [ - self.ceph_osd_sentry, - self.ceph0_sentry, - self.ceph1_sentry, - self.ceph2_sentry - ] - - # Check for presence of expected pools on each unit - u.log.debug('Expected pools: {}'.format(expected_pools)) - for sentry_unit in sentries: - pools = u.get_ceph_pools(sentry_unit) - results.append(pools) - - for expected_pool in expected_pools: - if expected_pool not in pools: - msg = ('{} does not have pool: ' - '{}'.format(sentry_unit.info['unit_name'], - expected_pool)) - amulet.raise_status(amulet.FAIL, msg=msg) - u.log.debug('{} has (at least) the expected ' - 'pools.'.format(sentry_unit.info['unit_name'])) - - # Check that all units returned the same pool name:id data - ret = u.validate_list_of_identical_dicts(results) - if ret: - u.log.debug('Pool list results: {}'.format(results)) - msg = ('{}; Pool list results are not identical on all ' - 'ceph units.'.format(ret)) - amulet.raise_status(amulet.FAIL, msg=msg) - else: - u.log.debug('Pool list on all ceph units produced the ' - 'same results (OK).') - - def test_410_ceph_cinder_vol_create(self): - """Create and confirm a ceph-backed cinder volume, and inspect - ceph cinder pool object count as the volume is created - and deleted.""" - sentry_unit = self.ceph0_sentry - obj_count_samples = [] - pool_size_samples = [] - pools = u.get_ceph_pools(self.ceph0_sentry) - cinder_pool = pools['cinder-ceph'] - - # Check ceph cinder pool object count, disk space usage and pool name - u.log.debug('Checking ceph cinder pool original samples...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - cinder_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - expected = 'cinder-ceph' - if pool_name != expected: - msg = ('Ceph pool {} unexpected name (actual, expected): ' - '{}. {}'.format(cinder_pool, pool_name, expected)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create ceph-backed cinder volume - cinder_vol = u.create_cinder_volume(self.cinder) - - # Re-check ceph cinder pool object count and disk usage - time.sleep(10) - u.log.debug('Checking ceph cinder pool samples after volume create...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - cinder_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - # Delete ceph-backed cinder volume - u.delete_resource(self.cinder.volumes, cinder_vol, msg="cinder volume") - - # Final check, ceph cinder pool object count and disk usage - time.sleep(10) - u.log.debug('Checking ceph cinder pool after volume delete...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - cinder_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - # Validate ceph cinder pool object count samples over time - ret = u.validate_ceph_pool_samples(obj_count_samples, - "cinder pool object count") - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - # Luminous (pike) ceph seems more efficient at disk usage so we cannot - # grantee the ordering of kb_used - if self._get_openstack_release() < self.xenial_mitaka: - # Validate ceph cinder pool disk space usage samples over time - ret = u.validate_ceph_pool_samples(pool_size_samples, - "cinder pool disk usage") - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_412_ceph_glance_image_create_delete(self): - """Create and confirm a ceph-backed glance image, and inspect - ceph glance pool object count as the image is created - and deleted.""" - sentry_unit = self.ceph0_sentry - obj_count_samples = [] - pool_size_samples = [] - pools = u.get_ceph_pools(self.ceph0_sentry) - glance_pool = pools['glance'] - - # Check ceph glance pool object count, disk space usage and pool name - u.log.debug('Checking ceph glance pool original samples...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - glance_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - expected = 'glance' - if pool_name != expected: - msg = ('Ceph glance pool {} unexpected name (actual, ' - 'expected): {}. {}'.format(glance_pool, - pool_name, expected)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create ceph-backed glance image - glance_img = u.create_cirros_image(self.glance, 'cirros-image-1') - - # Re-check ceph glance pool object count and disk usage - time.sleep(10) - u.log.debug('Checking ceph glance pool samples after image create...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - glance_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - # Delete ceph-backed glance image - u.delete_resource(self.glance.images, - glance_img.id, msg="glance image") - - # Final check, ceph glance pool object count and disk usage - time.sleep(10) - u.log.debug('Checking ceph glance pool samples after image delete...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - glance_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - # Validate ceph glance pool object count samples over time - ret = u.validate_ceph_pool_samples(obj_count_samples, - "glance pool object count") - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - # Validate ceph glance pool disk space usage samples over time - ret = u.validate_ceph_pool_samples(pool_size_samples, - "glance pool disk usage") - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_499_ceph_cmds_exit_zero(self): - """Check basic functionality of ceph cli commands against - all ceph units.""" - sentry_units = [ - self.ceph_osd_sentry, - self.ceph0_sentry, - self.ceph1_sentry, - self.ceph2_sentry - ] - commands = [ - 'sudo ceph health', - 'sudo ceph mds stat', - 'sudo ceph pg stat', - 'sudo ceph osd stat', - 'sudo ceph mon stat', - ] - ret = u.check_commands_on_units(commands, sentry_units) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - # FYI: No restart check as ceph services do not restart - # when charm config changes, unless monitor count increases. - - def test_900_ceph_encryption(self): - """Verify that the new disk is added with encryption by checking for - Ceph's encryption keys directory""" - - if self._get_openstack_release() >= self.trusty_mitaka: - u.log.warn("Skipping encryption test for Mitaka") - return - sentry = self.ceph_osd_sentry - set_default = { - 'osd-encrypt': 'False', - 'osd-devices': '/dev/vdb /srv/ceph', - } - set_alternate = { - 'osd-encrypt': 'True', - 'osd-devices': '/dev/vdb /srv/ceph /srv/ceph_encrypted', - } - juju_service = 'ceph-osd' - u.log.debug('Making config change on {}...'.format(juju_service)) - mtime = u.get_sentry_time(sentry) - self.d.configure(juju_service, set_alternate) - unit_name = sentry.info['unit_name'] - - sleep_time = 30 - retry_count = 30 - file_mtime = None - time.sleep(sleep_time) - - filename = '/etc/ceph/dmcrypt-keys' - tries = 0 - retry_sleep_time = 10 - while tries <= retry_count and not file_mtime: - try: - stat = sentry.directory_stat(filename) - file_mtime = stat['mtime'] - self.log.debug('Attempt {} to get {} mtime on {} ' - 'OK'.format(tries, filename, unit_name)) - except IOError as e: - self.d.configure(juju_service, set_default) - self.log.debug('Attempt {} to get {} mtime on {} ' - 'failed\n{}'.format(tries, filename, - unit_name, e)) - time.sleep(retry_sleep_time) - tries += 1 - - self.d.configure(juju_service, set_default) - - if not file_mtime: - self.log.warn('Could not determine mtime, assuming ' - 'folder does not exist') - amulet.raise_status('folder does not exist') - - if file_mtime >= mtime: - self.log.debug('Folder mtime is newer than provided mtime ' - '(%s >= %s) on %s (OK)' % (file_mtime, - mtime, unit_name)) - else: - self.log.warn('Folder mtime is older than provided mtime' - '(%s < on %s) on %s' % (file_mtime, - mtime, unit_name)) - amulet.raise_status('Folder mtime is older than provided mtime') - - def test_901_blocked_when_non_pristine_disk_appears(self): - """ - Validate that charm goes into blocked state when it is presented with - new block devices that have foreign data on them. - - Instances used in UOSCI has a flavour with ephemeral storage in - addition to the bootable instance storage. The ephemeral storage - device is partitioned, formatted and mounted early in the boot process - by cloud-init. - - As long as the device is mounted the charm will not attempt to use it. - - If we unmount it and trigger the config-changed hook the block device - will appear as a new and previously untouched device for the charm. - - One of the first steps of device eligibility checks should be to make - sure we are seeing a pristine and empty device before doing any - further processing. - - As the ephemeral device will have data on it we can use it to validate - that these checks work as intended. - """ - u.log.debug('Checking behaviour when non-pristine disks appear...') - u.log.debug('Configuring ephemeral-unmount...') - self.d.configure('ceph-osd', {'ephemeral-unmount': '/mnt', - 'osd-devices': '/dev/vdb'}) - self._auto_wait_for_status(message=re.compile('Non-pristine.*'), - include_only=['ceph-osd']) - u.log.debug('Units now in blocked state, running zap-disk action...') - action_ids = [] - self.ceph_osd_sentry = self.d.sentry['ceph-osd'][0] - for unit in range(0, 3): - zap_disk_params = { - 'devices': '/dev/vdb', - 'i-really-mean-it': True, - } - action_id = u.run_action(self.d.sentry['ceph-osd'][unit], - 'zap-disk', params=zap_disk_params) - action_ids.append(action_id) - for unit in range(0, 3): - assert u.wait_on_action(action_ids[unit]), ( - 'zap-disk action failed.') - - u.log.debug('Running add-disk action...') - action_ids = [] - for unit in range(0, 3): - add_disk_params = { - 'osd-devices': '/dev/vdb', - } - action_id = u.run_action(self.d.sentry['ceph-osd'][unit], - 'add-disk', params=add_disk_params) - action_ids.append(action_id) - - for unit in range(0, 3): - assert u.wait_on_action(action_ids[unit]), ( - 'add-disk action failed.') - - u.log.debug('Wait for idle/ready status...') - self._auto_wait_for_status(include_only=['ceph-osd']) - - u.log.debug('OK') - - def test_910_pause_and_resume(self): - """The services can be paused and resumed. """ - u.log.debug('Checking pause and resume actions...') - sentry_unit = self.ceph_osd_sentry - - assert u.status_get(sentry_unit)[0] == "active" - - action_id = u.run_action(sentry_unit, "pause") - assert u.wait_on_action(action_id), "Pause action failed." - assert u.status_get(sentry_unit)[0] == "maintenance" - - action_id = u.run_action(sentry_unit, "resume") - assert u.wait_on_action(action_id), "Resume action failed." - assert u.status_get(sentry_unit)[0] == "active" - u.log.debug('OK') - - def test_911_blacklist(self): - """The blacklist actions execute and behave as expected. """ - u.log.debug('Checking blacklist-add-disk and' - 'blacklist-remove-disk actions...') - sentry_unit = self.ceph_osd_sentry - - assert u.status_get(sentry_unit)[0] == "active" - - # Attempt to add device with non-absolute path should fail - action_id = u.run_action(sentry_unit, - "blacklist-add-disk", - params={"osd-devices": "vda"}) - assert not u.wait_on_action(action_id), "completed" - assert u.status_get(sentry_unit)[0] == "active" - - # Attempt to add device with non-existent path should fail - action_id = u.run_action(sentry_unit, - "blacklist-add-disk", - params={"osd-devices": "/non-existent"}) - assert not u.wait_on_action(action_id), "completed" - assert u.status_get(sentry_unit)[0] == "active" - - # Attempt to add device with existent path should succeed - action_id = u.run_action(sentry_unit, - "blacklist-add-disk", - params={"osd-devices": "/dev/vda"}) - assert u.wait_on_action(action_id), "completed" - assert u.status_get(sentry_unit)[0] == "active" - - # Attempt to remove listed device should always succeed - action_id = u.run_action(sentry_unit, - "blacklist-remove-disk", - params={"osd-devices": "/dev/vda"}) - assert u.wait_on_action(action_id), "completed" - assert u.status_get(sentry_unit)[0] == "active" - u.log.debug('OK') - - def test_912_list_disks(self): - """The list-disks action execute. """ - u.log.debug('Checking list-disks action...') - sentry_unit = self.ceph_osd_sentry - - assert u.status_get(sentry_unit)[0] == "active" - - action_id = u.run_action(sentry_unit, "list-disks") - assert u.wait_on_action(action_id), "completed" - assert u.status_get(sentry_unit)[0] == "active" - u.log.debug('OK') diff --git a/ceph-osd/tests/bundles/bionic-queens.yaml b/ceph-osd/tests/bundles/bionic-queens.yaml new file mode 100644 index 00000000..88c9b93e --- /dev/null +++ b/ceph-osd/tests/bundles/bionic-queens.yaml @@ -0,0 +1,86 @@ +series: bionic +applications: + ceph-osd: + charm: ceph-osd + series: bionic + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + percona-cluster: + charm: cs:percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + rabbitmq-server: + charm: cs:rabbitmq-server + num_units: 1 + keystone: + charm: cs:keystone + num_units: 1 + nova-compute: + charm: cs:nova-compute + num_units: 1 + glance: + charm: cs:glance + num_units: 1 + cinder: + charm: cs:cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + cinder-ceph: + charm: cs:cinder-ceph + nova-cloud-controller: + charm: cs:nova-cloud-controller + num_units: 1 +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-osd/tests/bundles/bionic-rocky.yaml b/ceph-osd/tests/bundles/bionic-rocky.yaml new file mode 100644 index 00000000..51c233ba --- /dev/null +++ b/ceph-osd/tests/bundles/bionic-rocky.yaml @@ -0,0 +1,100 @@ +series: bionic +applications: + ceph-osd: + charm: ceph-osd + num_units: 3 + series: bionic + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:bionic-updates/rocky + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:bionic-updates/rocky + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:bionic-updates/rocky + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-updates/rocky + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky/proposed + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky/proposed + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky/proposed + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:bionic-rocky/proposed + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + charm: cs::~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky/proposed +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-osd/tests/bundles/cosmic-rocky.yaml b/ceph-osd/tests/bundles/cosmic-rocky.yaml new file mode 100644 index 00000000..434aaea8 --- /dev/null +++ b/ceph-osd/tests/bundles/cosmic-rocky.yaml @@ -0,0 +1,95 @@ +series: cosmic +applications: + ceph-osd: + charm: ceph-osd + num_units: 3 + series: cosmic + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky/proposed + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky/proposed + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky/proposed + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:bionic-rocky/proposed + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky/proposed +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-osd/tests/bundles/trusty-icehouse.yaml b/ceph-osd/tests/bundles/trusty-icehouse.yaml new file mode 100644 index 00000000..7af06efc --- /dev/null +++ b/ceph-osd/tests/bundles/trusty-icehouse.yaml @@ -0,0 +1,122 @@ +series: trusty +applications: + ceph-osd: + charm: ceph-osd + num_units: 3 + series: trusty + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-osd/tests/bundles/trusty-mitaka.yaml b/ceph-osd/tests/bundles/trusty-mitaka.yaml new file mode 100644 index 00000000..41ea263a --- /dev/null +++ b/ceph-osd/tests/bundles/trusty-mitaka.yaml @@ -0,0 +1,136 @@ +series: trusty +applications: + ceph-osd: + charm: ceph-osd + num_units: 3 + series: trusty + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-osd/tests/bundles/xenial-mitaka.yaml b/ceph-osd/tests/bundles/xenial-mitaka.yaml new file mode 100644 index 00000000..24316244 --- /dev/null +++ b/ceph-osd/tests/bundles/xenial-mitaka.yaml @@ -0,0 +1,86 @@ +series: xenial +applications: + ceph-osd: + charm: ceph-osd + num_units: 3 + series: xenial + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-osd/tests/bundles/xenial-ocata.yaml b/ceph-osd/tests/bundles/xenial-ocata.yaml new file mode 100644 index 00000000..3de59c98 --- /dev/null +++ b/ceph-osd/tests/bundles/xenial-ocata.yaml @@ -0,0 +1,100 @@ +series: xenial +applications: + ceph-osd: + charm: ceph-osd + num_units: 3 + series: xenial + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:xenial-ocata + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:xenial-ocata + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:xenial-ocata + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:xenial-ocata + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:xenial-ocata + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:xenial-ocata + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:xenial-ocata + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:xenial-ocata + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:xenial-ocata +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-osd/tests/bundles/xenial-pike.yaml b/ceph-osd/tests/bundles/xenial-pike.yaml new file mode 100644 index 00000000..ceb778f2 --- /dev/null +++ b/ceph-osd/tests/bundles/xenial-pike.yaml @@ -0,0 +1,100 @@ +series: xenial +applications: + ceph-osd: + charm: ceph-osd + num_units: 3 + series: xenial + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:xenial-pike + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:xenial-pike + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:xenial-pike + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:xenial-pike + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:xenial-pike + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:xenial-pike + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:xenial-pike + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:xenial-pike + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:xenial-pike +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-osd/tests/bundles/xenial-queens.yaml b/ceph-osd/tests/bundles/xenial-queens.yaml new file mode 100644 index 00000000..33188ad6 --- /dev/null +++ b/ceph-osd/tests/bundles/xenial-queens.yaml @@ -0,0 +1,100 @@ +series: xenial +applications: + ceph-osd: + charm: ceph-osd + num_units: 3 + series: xenial + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:xenial-queens + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:xenial-queens + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:xenial-queens + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:xenial-queens + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:xenial-queens + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:xenial-queens + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:xenial-queens + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:xenial-queens + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:xenial-queens +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-osd/tests/dev-basic-cosmic-rocky b/ceph-osd/tests/dev-basic-cosmic-rocky deleted file mode 100755 index c6b6ef75..00000000 --- a/ceph-osd/tests/dev-basic-cosmic-rocky +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-osd deployment on cosmic-rocky.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='cosmic') - deployment.run_tests() diff --git a/ceph-osd/tests/gate-basic-bionic-queens b/ceph-osd/tests/gate-basic-bionic-queens deleted file mode 100755 index 0dafe812..00000000 --- a/ceph-osd/tests/gate-basic-bionic-queens +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-osd deployment on bionic-queens.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='bionic') - deployment.run_tests() diff --git a/ceph-osd/tests/gate-basic-bionic-rocky b/ceph-osd/tests/gate-basic-bionic-rocky deleted file mode 100755 index 2e0e4ee1..00000000 --- a/ceph-osd/tests/gate-basic-bionic-rocky +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-osd deployment on bionic-rocky.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='bionic', - openstack='cloud:bionic-rocky', - source='cloud:bionic-updates/rocky') - deployment.run_tests() diff --git a/ceph-osd/tests/gate-basic-trusty-icehouse b/ceph-osd/tests/gate-basic-trusty-icehouse deleted file mode 100755 index d0bb7793..00000000 --- a/ceph-osd/tests/gate-basic-trusty-icehouse +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-osd deployment on trusty-icehouse.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='trusty') - deployment.run_tests() diff --git a/ceph-osd/tests/gate-basic-trusty-mitaka b/ceph-osd/tests/gate-basic-trusty-mitaka deleted file mode 100755 index 0fd67940..00000000 --- a/ceph-osd/tests/gate-basic-trusty-mitaka +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-osd deployment on trusty-mitaka.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='trusty', - openstack='cloud:trusty-mitaka', - source='cloud:trusty-updates/mitaka') - deployment.run_tests() diff --git a/ceph-osd/tests/gate-basic-xenial-mitaka b/ceph-osd/tests/gate-basic-xenial-mitaka deleted file mode 100755 index bbf91dd0..00000000 --- a/ceph-osd/tests/gate-basic-xenial-mitaka +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-osd deployment on xenial-mitaka.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='xenial') - deployment.run_tests() diff --git a/ceph-osd/tests/gate-basic-xenial-ocata b/ceph-osd/tests/gate-basic-xenial-ocata deleted file mode 100755 index 2908c9b1..00000000 --- a/ceph-osd/tests/gate-basic-xenial-ocata +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-osd deployment on xenial-ocata.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='xenial', - openstack='cloud:xenial-ocata', - source='cloud:xenial-updates/ocata') - deployment.run_tests() diff --git a/ceph-osd/tests/gate-basic-xenial-pike b/ceph-osd/tests/gate-basic-xenial-pike deleted file mode 100755 index cc4a9cd2..00000000 --- a/ceph-osd/tests/gate-basic-xenial-pike +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-osd deployment on xenial-pike.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='xenial', - openstack='cloud:xenial-pike', - source='cloud:xenial-updates/pike') - deployment.run_tests() diff --git a/ceph-osd/tests/gate-basic-xenial-queens b/ceph-osd/tests/gate-basic-xenial-queens deleted file mode 100755 index 61c7b06d..00000000 --- a/ceph-osd/tests/gate-basic-xenial-queens +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-osd deployment on xenial-queens.""" - -from basic_deployment import CephOsdBasicDeployment - -if __name__ == '__main__': - deployment = CephOsdBasicDeployment(series='xenial', - openstack='cloud:xenial-queens', - source='cloud:xenial-updates/queens') - deployment.run_tests() diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index a03e7bad..1833e429 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,18 +1,20 @@ -# Bootstrap the model if necessary. -bootstrap: True -# Re-use bootstrap node. -reset: True -# Use tox/requirements to drive the venv instead of bundletester's venv feature. -virtualenv: False -# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet. -makefile: [] -# Do not specify juju PPA sources. Juju is presumed to be pre-installed -# and configured in all test runner environments. -#sources: -# Do not specify or rely on system packages. -#packages: -# Do not specify python packages here. Use test-requirements.txt -# and tox instead. ie. The venv is constructed before bundletester -# is invoked. -#python-packages: -reset_timeout: 600 +charm_name: ceph-osd +gate_bundles: + - trusty-icehouse + - trusty-mitaka + - xenial-mitaka + - xenial-ocata + - xenial-pike + - xenial-queens + - bionic-queens +smoke_bundles: + - bionic-queens +dev_bundles: + - bionic-rocky + - cosmic-rocky +configure: + - zaza.charm_tests.glance.setup.add_lts_image +tests: + - zaza.charm_tests.ceph.tests.CephLowLevelTest + - zaza.charm_tests.ceph.tests.CephRelationTest + - zaza.charm_tests.ceph.tests.CephTest diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 041c0e54..10ba3dc9 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -15,6 +15,7 @@ install_command = commands = ostestr {posargs} whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* +deps = -r{toxinidir}/test-requirements.txt [testenv:py27] # ceph charms are Python3-only, but py27 unit test target @@ -43,49 +44,20 @@ commands = flake8 {posargs} hooks unit_tests tests actions lib [testenv:venv] commands = {posargs} -[testenv:func27-noop] -# DRY RUN - For Debug -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy + functest-run-suite --keep-model -[testenv:func27] -# Charm Functional Test -# Run all gate tests which are +x (expected to always pass) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func-smoke] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy + functest-run-suite --keep-model --smoke -[testenv:func27-smoke] -# Charm Functional Test -# Run a specific test as an Amulet smoke test (expected to always pass) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy - -[testenv:func27-dfs] -# Charm Functional Test -# Run all deploy-from-source tests which are +x (may not always pass!) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy - -[testenv:func27-dev] -# Charm Functional Test -# Run all development test targets which are +x (may not always pass!) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func-dev] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy + functest-run-suite --keep-model --dev [flake8] ignore = E402,E226 From 6ae9f03d76dbcfce3760a7d143ed6d618d411043 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 10 Oct 2018 12:36:37 +0000 Subject: [PATCH 1578/2699] Tests dir no longer need copy of charmhelpers Charmhelpers is now installed via pip for unit tests so stop 'Make sync' from pulling it down. Change-Id: I65cb2ffc3ac78215c1a3daba067d28241c090c57 --- ceph-mon/Makefile | 1 - ceph-mon/charm-helpers-tests.yaml | 7 ------- 2 files changed, 8 deletions(-) delete mode 100644 ceph-mon/charm-helpers-tests.yaml diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index 5fdba5df..6a179635 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -23,7 +23,6 @@ bin/git_sync.py: ch-sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml - $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml ceph-sync: bin/git_sync.py $(PYTHON) bin/git_sync.py -d lib -s https://github.com/openstack/charms.ceph.git diff --git a/ceph-mon/charm-helpers-tests.yaml b/ceph-mon/charm-helpers-tests.yaml deleted file mode 100644 index f64f0dde..00000000 --- a/ceph-mon/charm-helpers-tests.yaml +++ /dev/null @@ -1,7 +0,0 @@ -repo: https://github.com/juju/charm-helpers -destination: tests/charmhelpers -include: - - contrib.amulet - - contrib.openstack.amulet - - core - - osplatform From 9e6df6158042fe088f9ee011a2868d037223e6e0 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 10 Oct 2018 12:36:37 +0000 Subject: [PATCH 1579/2699] Tests dir no longer need copy of charmhelpers Charmhelpers is now installed via pip for unit tests so stop 'Make sync' from pulling it down. Change-Id: I88dabb654626570ea2c4f07b0799ef6717e6c0bd --- ceph-osd/Makefile | 1 - ceph-osd/charm-helpers-tests.yaml | 7 ------- 2 files changed, 8 deletions(-) delete mode 100644 ceph-osd/charm-helpers-tests.yaml diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index 7609385a..d80cc0b0 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -23,7 +23,6 @@ bin/git_sync.py: ch-sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml - $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml ceph-sync: bin/git_sync.py $(PYTHON) bin/git_sync.py -d lib -s https://github.com/openstack/charms.ceph.git diff --git a/ceph-osd/charm-helpers-tests.yaml b/ceph-osd/charm-helpers-tests.yaml deleted file mode 100644 index f64f0dde..00000000 --- a/ceph-osd/charm-helpers-tests.yaml +++ /dev/null @@ -1,7 +0,0 @@ -repo: https://github.com/juju/charm-helpers -destination: tests/charmhelpers -include: - - contrib.amulet - - contrib.openstack.amulet - - core - - osplatform From 83178678a67d85a28c6dfe601608b8732083c954 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 10 Oct 2018 12:36:37 +0000 Subject: [PATCH 1580/2699] Tests dir no longer need copy of charmhelpers Charmhelpers is now installed via pip for unit tests so stop 'Make sync' from pulling it down. Change-Id: I0434b927be571466f9b13aff0420dd108a70e478 --- ceph-radosgw/Makefile | 1 - ceph-radosgw/charm-helpers-tests.yaml | 7 ------- 2 files changed, 8 deletions(-) delete mode 100644 ceph-radosgw/charm-helpers-tests.yaml diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index c772127d..6813bb22 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -18,7 +18,6 @@ bin/charm_helpers_sync.py: sync: bin/charm_helpers_sync.py @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml - @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml publish: lint test bzr push lp:charms/ceph-radosgw diff --git a/ceph-radosgw/charm-helpers-tests.yaml b/ceph-radosgw/charm-helpers-tests.yaml deleted file mode 100644 index 3a8c294e..00000000 --- a/ceph-radosgw/charm-helpers-tests.yaml +++ /dev/null @@ -1,7 +0,0 @@ -repo: https://github.com/juju/charm-helpers -destination: tests/charmhelpers -include: - - core - - contrib.amulet - - contrib.openstack.amulet - - osplatform From 66cf92f8e707e1611ad7119ef70db6ea9ef2860d Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 11 Oct 2018 15:15:07 +0100 Subject: [PATCH 1581/2699] Resync ceph helpers Resync ceph helpers, picking up fixes for: - Upgrades from Luminous to Mimic. - Correct build of OSD list in more complex CRUSH configurations, resolving upgrade issues. Closes-Bug: 1788722 Change-Id: I7d8fca74ec6eadae21a6e669e8b2522d9e4c9367 --- ceph-osd/lib/ceph/utils.py | 45 ++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 24 deletions(-) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 53281ea7..2ef48abe 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -579,29 +579,23 @@ def get_osd_tree(service): # Make sure children are present in the json if not json_tree['nodes']: return None - parent_nodes = [ - node for node in json_tree['nodes'] if node['type'] == 'root'] - child_ids = [] - for node in parent_nodes: - try: - child_ids = child_ids + node['children'] - except KeyError: - # skip if this parent has no children - continue - for child in json_tree['nodes']: - if child['id'] in child_ids: - crush_list.append( - CrushLocation( - name=child.get('name'), - identifier=child['id'], - host=child.get('host'), - rack=child.get('rack'), - row=child.get('row'), - datacenter=child.get('datacenter'), - chassis=child.get('chassis'), - root=child.get('root') - ) + host_nodes = [ + node for node in json_tree['nodes'] + if node['type'] == 'host' + ] + for host in host_nodes: + crush_list.append( + CrushLocation( + name=host.get('name'), + identifier=host['id'], + host=host.get('host'), + rack=host.get('rack'), + row=host.get('row'), + datacenter=host.get('datacenter'), + chassis=host.get('chassis'), + root=host.get('root') ) + ) return crush_list except ValueError as v: log("Unable to parse ceph tree json: {}. Error: {}".format( @@ -1525,7 +1519,8 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): """ Prepare a device for usage as a Ceph OSD using ceph-disk - :param: dev: Full path to use for OSD block device setup + :param: dev: Full path to use for OSD block device setup, + The function looks up realpath of the device :param: osd_journal: List of block devices to use for OSD journals :param: encrypt: Use block device encryption (unsupported) :param: bluestore: Use bluestore storage for OSD @@ -1557,7 +1552,7 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: cmd.append('--filestore') - cmd.append(dev) + cmd.append(os.path.realpath(dev)) if osd_journal: least_used = find_least_used_utility_device(osd_journal) @@ -2539,6 +2534,7 @@ def dirs_need_ownership_update(service): ('firefly', 'hammer'), ('hammer', 'jewel'), ('jewel', 'luminous'), + ('luminous', 'mimic'), ]) # Map UCA codenames to ceph codenames @@ -2552,6 +2548,7 @@ def dirs_need_ownership_update(service): 'ocata': 'jewel', 'pike': 'luminous', 'queens': 'luminous', + 'rocky': 'mimic', } From 8ad71c10123703bd814bce0b5ad2f0233e62ea51 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 16 Oct 2018 09:33:05 +0100 Subject: [PATCH 1582/2699] Notify MON cluster of number of bootstrapped OSD's To allow the ceph-mon charm to better assess when the Ceph cluster is in a usable state, provide the number of OSD devices that where bootstrapped into the Ceph cluster over the relation to ceph-mon. This is used by the ceph-mon charm inconjunction with the 'expected-osd-count' configuration option to delay pool creation and issue of keys for clients until the expected number of OSD's have been bootstrapped into the cluster. Change-Id: I1370524f0f31120e3cb7305c5bc509a6494c5586 Closes-Bug: 1794878 --- ceph-osd/hooks/ceph_hooks.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 7494c3cd..a41586a4 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -499,6 +499,16 @@ def prepare_disks_and_activate(): ceph.tune_dev(dev) ceph.start_osds(get_devices()) + # Notify MON cluster as to how many OSD's this unit bootstrapped + # into the cluster + for r_id in relation_ids('mon'): + relation_set( + relation_id=r_id, + relation_settings={ + 'bootstrapped-osds': len(db.get('osd-devices', [])) + } + ) + def get_mon_hosts(): hosts = [] From efff311a6658461c3e29746ace848f8879406e8c Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 16 Oct 2018 11:40:09 +0100 Subject: [PATCH 1583/2699] Use fully qualified basepython for py{35,36} targets Make sure the exact python version is used for specific python unit testing targets. Update functional tests to include nova-cloud-controller. Change-Id: I8d57719fce4f152a105d8a61c08b556873105708 --- ceph-mon/tests/basic_deployment.py | 8 ++++++++ ceph-mon/tox.ini | 4 ++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 8de30ef1..76f1f20a 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -76,6 +76,7 @@ def _add_services(self): {'name': 'glance'}, {'name': 'cinder'}, {'name': 'cinder-ceph'}, + {'name': 'nova-cloud-controller'}, ] super(CephBasicDeployment, self)._add_services(this_service, other_services) @@ -98,6 +99,13 @@ def _add_relations(self): 'cinder-ceph:storage-backend': 'cinder:storage-backend', 'cinder-ceph:ceph': 'ceph-mon:client', 'ceph-osd:mon': 'ceph-mon:osd', + 'nova-cloud-controller:shared-db': 'percona-cluster:shared-db', + 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp', + 'nova-cloud-controller:identity-service': 'keystone:' + 'identity-service', + 'nova-cloud-controller:cloud-compute': 'nova-compute:' + 'cloud-compute', + 'nova-cloud-controller:image-service': 'glance:image-service', } super(CephBasicDeployment, self)._add_relations(relations) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index fa749815..4a4eb12e 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -27,12 +27,12 @@ commands = /bin/true ; keep zuul happy until we change the py35 job [testenv:py35] -basepython = python3 +basepython = python3.5 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt [testenv:py36] -basepython = python3 +basepython = python3.6 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt From 00b608ecd9bdea5c577b9e33cd37416cfc0fb159 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 11 Oct 2018 15:12:39 +0100 Subject: [PATCH 1584/2699] Resync ceph helpers Resync ceph helpers, picking up fixes for: - Upgrades from Luminous to Mimic. - Correct build of OSD list in more complex CRUSH configurations, resolving upgrade issues. Add nova-cloud-controller to amulet tests to achieve a complete deployment post landing of changes for Nova Cells v2 support. Closes-Bug: 1788722 Change-Id: I7a5c53c792ab958d94301de62cdd4d804f8a54f7 --- ceph-mon/lib/ceph/utils.py | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index a958dfd8..2ef48abe 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -579,21 +579,23 @@ def get_osd_tree(service): # Make sure children are present in the json if not json_tree['nodes']: return None - child_ids = json_tree['nodes'][0]['children'] - for child in json_tree['nodes']: - if child['id'] in child_ids: - crush_list.append( - CrushLocation( - name=child.get('name'), - identifier=child['id'], - host=child.get('host'), - rack=child.get('rack'), - row=child.get('row'), - datacenter=child.get('datacenter'), - chassis=child.get('chassis'), - root=child.get('root') - ) + host_nodes = [ + node for node in json_tree['nodes'] + if node['type'] == 'host' + ] + for host in host_nodes: + crush_list.append( + CrushLocation( + name=host.get('name'), + identifier=host['id'], + host=host.get('host'), + rack=host.get('rack'), + row=host.get('row'), + datacenter=host.get('datacenter'), + chassis=host.get('chassis'), + root=host.get('root') ) + ) return crush_list except ValueError as v: log("Unable to parse ceph tree json: {}. Error: {}".format( @@ -1517,7 +1519,8 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): """ Prepare a device for usage as a Ceph OSD using ceph-disk - :param: dev: Full path to use for OSD block device setup + :param: dev: Full path to use for OSD block device setup, + The function looks up realpath of the device :param: osd_journal: List of block devices to use for OSD journals :param: encrypt: Use block device encryption (unsupported) :param: bluestore: Use bluestore storage for OSD @@ -1549,7 +1552,7 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: cmd.append('--filestore') - cmd.append(dev) + cmd.append(os.path.realpath(dev)) if osd_journal: least_used = find_least_used_utility_device(osd_journal) @@ -2531,6 +2534,7 @@ def dirs_need_ownership_update(service): ('firefly', 'hammer'), ('hammer', 'jewel'), ('jewel', 'luminous'), + ('luminous', 'mimic'), ]) # Map UCA codenames to ceph codenames @@ -2544,6 +2548,7 @@ def dirs_need_ownership_update(service): 'ocata': 'jewel', 'pike': 'luminous', 'queens': 'luminous', + 'rocky': 'mimic', } From e8e6eaa4c26ebd9c5977669114afe36f79a10de7 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 16 Oct 2018 09:44:48 +0100 Subject: [PATCH 1585/2699] Guard cluster operations until sufficient OSD's booted Ensure that broker requests are not processed and that client access keys are not issued until the expected number of OSD's have been bootstrapped into the cluster. This depends on presentation of the number of bootstrapped OSD's from the ceph-osd charm (see Depends-On). For upgraders, keys will have already been issued so there should be no impact on existing access to the Ceph cluster; The ceph-osd units will present the required relation data post upgrade at which point the charm will mark the cluster as ready for service and continue to process and pending requests. Change-Id: Id67e13c176fc8fd4953ba7c2cf7e33252810940c Depends-On: I1370524f0f31120e3cb7305c5bc509a6494c5586 Closes-Bug: 1794878 --- ceph-mon/hooks/ceph_hooks.py | 69 +++++++++++++++++++------- ceph-mon/unit_tests/test_ceph_hooks.py | 5 +- 2 files changed, 55 insertions(+), 19 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 6365ff78..79cff5f3 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -506,6 +506,46 @@ def related_osds(num_units=3): return False +def sufficient_osds(minimum_osds=3): + ''' + Determine if the minimum number of OSD's have been + bootstrapped into the cluster. + + @param expected_osds: The minimum number of OSD's required + @return: boolean indicating whether the required number of + OSD's where detected. + ''' + bootstrapped_osds = 0 + for r_id in relation_ids('osd'): + for unit in related_units(r_id): + unit_osds = relation_get( + attribute='bootstrapped-osds', + unit=unit, rid=r_id + ) + if unit_osds is not None: + bootstrapped_osds += int(unit_osds) + if bootstrapped_osds >= minimum_osds: + return True + return False + + +def ready_for_service(): + ''' + Determine whether the Ceph cluster is ready to service + storage traffic from clients + + @return: boolean indicating whether the Ceph cluster is + ready for pool creation/client usage. + ''' + if not ceph.is_quorum(): + log('mon cluster is not in quorum', level=DEBUG) + return False + if not sufficient_osds(config('expected-osd-count') or 3): + log('insufficient osds bootstrapped', level=DEBUG) + return False + return True + + @hooks.hook('radosgw-relation-changed') @hooks.hook('radosgw-relation-joined') def radosgw_relation(relid=None, unit=None): @@ -516,8 +556,8 @@ def radosgw_relation(relid=None, unit=None): # NOTE: radosgw needs some usage OSD storage, so defer key # provision until OSD units are detected. - if ceph.is_quorum() and related_osds(): - log('mon cluster in quorum and osds related ' + if ready_for_service(): + log('mon cluster in quorum and osds bootstrapped ' '- providing radosgw with keys') public_addr = get_public_addr() data = { @@ -539,15 +579,13 @@ def radosgw_relation(relid=None, unit=None): log("Not leader - ignoring broker request", level=DEBUG) relation_set(relation_id=relid, relation_settings=data) - else: - log('mon cluster not in quorum or no osds - deferring key provision') @hooks.hook('mds-relation-changed') @hooks.hook('mds-relation-joined') def mds_relation_joined(relid=None, unit=None): - if ceph.is_quorum() and related_osds(): - log('mon cluster in quorum and OSDs related' + if ready_for_service(): + log('mon cluster in quorum and osds bootstrapped ' '- providing mds client with keys') mds_name = relation_get(attribute='mds-name', rid=relid, unit=unit) @@ -571,8 +609,6 @@ def mds_relation_joined(relid=None, unit=None): log("Not leader - ignoring mds broker request", level=DEBUG) relation_set(relation_id=relid, relation_settings=data) - else: - log('Waiting on mon quorum or min osds before provisioning mds keys') @hooks.hook('admin-relation-changed') @@ -582,7 +618,7 @@ def admin_relation_joined(relid=None): name = relation_get('keyring-name') if name is None: name = 'admin' - log('mon cluster in quorum - providing client with keys') + log('mon cluster in quorum - providing admin client with keys') mon_hosts = config('monitor-hosts') or ' '.join(get_mon_hosts()) data = {'key': ceph.get_named_key(name=name, caps=ceph.admin_caps), 'fsid': leader_get('fsid'), @@ -591,14 +627,13 @@ def admin_relation_joined(relid=None): } relation_set(relation_id=relid, relation_settings=data) - else: - log('mon cluster not in quorum - deferring key provision') @hooks.hook('client-relation-joined') def client_relation_joined(relid=None): - if ceph.is_quorum(): - log('mon cluster in quorum - providing client with keys') + if ready_for_service(): + log('mon cluster in quorum and osds bootstrapped ' + '- providing client with keys') service_name = None if relid is None: units = [remote_unit()] @@ -617,14 +652,14 @@ def client_relation_joined(relid=None): data['rbd-features'] = config('default-rbd-features') relation_set(relation_id=relid, relation_settings=data) - else: - log('mon cluster not in quorum - deferring key provision') @hooks.hook('client-relation-changed') def client_relation_changed(relid=None, unit=None): """Process broker requests from ceph client relations.""" - if ceph.is_quorum(): + if ready_for_service(): + log('mon cluster in quorum and osds bootstrapped ' + '- processing client broker requests') if not unit: unit = remote_unit() settings = relation_get(rid=relid, unit=unit) @@ -643,8 +678,6 @@ def client_relation_changed(relid=None, unit=None): } relation_set(relation_id=relid, relation_settings=data) - else: - log('mon cluster not in quorum', level=DEBUG) @hooks.hook('upgrade-charm.real') diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 83cb38ae..2ceec433 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -257,6 +257,7 @@ def test_related_osd_multi_relation(self, call('osd:23') ]) + @patch.object(ceph_hooks, 'ready_for_service') @patch.object(ceph_hooks.ceph, 'is_quorum') @patch.object(ceph_hooks, 'remote_unit') @patch.object(ceph_hooks, 'relation_get') @@ -268,8 +269,10 @@ def test_client_relation_changed_non_rel_hook(self, relation_set, is_leader, relation_get, remote_unit, - is_quorum): + is_quorum, + ready_for_service): # Check for LP #1738154 + ready_for_service.return_value = True process_requests.return_value = 'AOK' is_leader.return_value = True relation_get.return_value = {'broker_req': 'req'} From 5d8a1697a8a4a1567f042d4bc8dc8adffd73d487 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 23 Oct 2018 11:45:43 -0700 Subject: [PATCH 1586/2699] Series Upgrade Implement the series-upgrade feature allowing to move between Ubuntu series. Change-Id: I75bee2a1351d2242dd96bc6e017485557254f644 --- ceph-fs/src/reactive/ceph_fs.py | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index b41affe5..dcb7fa82 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -17,9 +17,10 @@ import subprocess from charms import reactive -from charms.reactive import when, when_not +from charms.reactive import when, when_not, hook from charms.reactive.flags import set_flag, clear_flag, is_flag_set from charmhelpers.core import hookenv +from charmhelpers.core import unitdata from charmhelpers.core.hookenv import ( application_version_set, config, log, ERROR, cached, DEBUG, unit_get, network_get_primary_address, relation_ids, @@ -214,6 +215,14 @@ def assess_status(): """Assess status of current unit""" statuses = set([]) messages = set([]) + + # Handle Series Upgrade + if unitdata.kv().get('charm.vault.series-upgrading'): + status_set("blocked", + "Ready for do-release-upgrade and reboot. " + "Set complete when finished.") + return + if is_flag_set('cephfs.started'): (status, message) = log_mds() statuses.add(status) @@ -247,6 +256,7 @@ def log_mds(): else: return 'active', 'Unit is ready ({} MDS)'.format(len(running_mds)) + # Per https://github.com/juju-solutions/charms.reactive/issues/33, # this module may be imported multiple times so ensure the # initialization hook is only registered once. I have to piggy back @@ -260,3 +270,19 @@ def log_mds(): # and the intialization provided an opertunity to be run. hookenv.atexit(assess_status) reactive._ceph_log_registered = True + + +# Series upgrade hooks are a special case and reacting to the hook directly +# makes sense as we may not want other charm code to run +@hook('pre-series-upgrade') +def pre_series_upgrade(): + """Handler for pre-series-upgrade. + """ + unitdata.kv().set('charm.vault.series-upgrading', True) + + +@hook('post-series-upgrade') +def post_series_upgrade(): + """Handler for post-series-upgrade. + """ + unitdata.kv().set('charm.vault.series-upgrading', False) From 70ccde00bd5b3e1605a0cc078a316ab15eb1c69c Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 23 Oct 2018 11:52:28 -0700 Subject: [PATCH 1587/2699] Series Upgrade Implement the series-upgrade feature allowing to move between Ubuntu series. Change-Id: Ic5df91f3560e8928585b25339cef992653dc59d7 --- ceph-proxy/hooks/ceph_hooks.py | 34 ++++++++++++++++++++++++++++ ceph-proxy/hooks/post-series-upgrade | 1 + ceph-proxy/hooks/pre-series-upgrade | 1 + 3 files changed, 36 insertions(+) create mode 120000 ceph-proxy/hooks/post-series-upgrade create mode 120000 ceph-proxy/hooks/pre-series-upgrade diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 21a9d98e..202f6541 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -38,6 +38,13 @@ ) from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.alternatives import install_alternative +from charmhelpers.contrib.openstack.utils import ( + clear_unit_paused, + clear_unit_upgrading, + is_unit_upgrading_set, + set_unit_paused, + set_unit_upgrading, +) from charmhelpers.core.templating import render @@ -217,6 +224,12 @@ def ready(): def assess_status(): '''Assess status of current unit''' + if is_unit_upgrading_set(): + status_set("blocked", + "Ready for do-release-upgrade and reboot. " + "Set complete when finished.") + return + if ready(): status_set('active', 'Ready to proxy settings') else: @@ -229,6 +242,27 @@ def update_status(): log('Updating status.') +@hooks.hook('pre-series-upgrade') +def pre_series_upgrade(): + log("Running prepare series upgrade hook", "INFO") + # NOTE: The Ceph packages handle the series upgrade gracefully. + # In order to indicate the step of the series upgrade process for + # administrators and automated scripts, the charm sets the paused and + # upgrading states. + set_unit_paused() + set_unit_upgrading() + + +@hooks.hook('post-series-upgrade') +def post_series_upgrade(): + log("Running complete series upgrade hook", "INFO") + # In order to indicate the step of the series upgrade process for + # administrators and automated scripts, the charm clears the paused and + # upgrading states. + clear_unit_paused() + clear_unit_upgrading() + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-proxy/hooks/post-series-upgrade b/ceph-proxy/hooks/post-series-upgrade new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-proxy/hooks/post-series-upgrade @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/pre-series-upgrade b/ceph-proxy/hooks/pre-series-upgrade new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-proxy/hooks/pre-series-upgrade @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file From 641e493a741a3d7d736c62d1488ae71225e5fb32 Mon Sep 17 00:00:00 2001 From: inspurericzhang Date: Fri, 26 Oct 2018 11:27:59 +0800 Subject: [PATCH 1588/2699] [Trivial Fix] Replace Chinese punctuation with English punctuation Curly quotes(Chinese punctuation) usually input from Chinese input method. When read from english context, it makes some confusion. Change-Id: Ie25b76c8b3fc6accb677a024661957c3cd0920a3 --- ceph-proxy/actions.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/actions.yaml b/ceph-proxy/actions.yaml index 3f8e5dfe..7303b69d 100644 --- a/ceph-proxy/actions.yaml +++ b/ceph-proxy/actions.yaml @@ -132,7 +132,7 @@ list-erasure-profiles: description: List the names of all erasure code profiles additionalProperties: false list-pools: - description: List your cluster’s pools + description: List your cluster's pools additionalProperties: false set-pool-max-bytes: description: Set pool quotas for the maximum number of bytes. @@ -165,7 +165,7 @@ rename-pool: required: [pool-name, new-name] additionalProperties: false pool-statistics: - description: Show a pool’s utilization statistics + description: Show a pool's utilization statistics additionalProperties: false snapshot-pool: description: Snapshot a pool From 804e4cd61aa911d12e148a563a782a00cbfe39b0 Mon Sep 17 00:00:00 2001 From: inspurericzhang Date: Fri, 26 Oct 2018 13:49:49 +0800 Subject: [PATCH 1589/2699] [Trivial Fix] Replace Chinese punctuation with English punctuation Curly quotes(Chinese punctuation) usually input from Chinese input method. When read from english context, it makes some confusion. Change-Id: I8d2e630f4cd4ae9044b7f2854b0f75cc7d1aaf50 --- ceph-mon/actions.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 84259dbd..f8a397b8 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -134,7 +134,7 @@ list-erasure-profiles: description: List the names of all erasure code profiles additionalProperties: false list-pools: - description: List your cluster’s pools + description: List your cluster's pools additionalProperties: false set-pool-max-bytes: description: Set pool quotas for the maximum number of bytes. @@ -167,7 +167,7 @@ rename-pool: required: [pool-name, new-name] additionalProperties: false pool-statistics: - description: Show a pool’s utilization statistics + description: Show a pool's utilization statistics additionalProperties: false snapshot-pool: description: Snapshot a pool From c2cf4be56679ecdaa498c0849db6bff8c52da18c Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 23 Jul 2018 12:46:07 +0100 Subject: [PATCH 1590/2699] Avoid unnecessary rewrites of ceph.conf Charm should avoid writing ceph.conf unless it absolutely needs to since this can clash with other processes that might be reading the file (such as ceph-disk called by udev). Change-Id: I3790b5b16fa1473f1c3271b795b3d32c5e8d2fad Closes-Bug: #1783113 --- ceph-osd/hooks/ceph_hooks.py | 6 +++--- ceph-osd/unit_tests/test_ceph_hooks.py | 21 +++++++++++++++++++++ 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index a41586a4..d1b82d42 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -394,9 +394,9 @@ def emit_cephconf(upgrading=False): charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(), group=ceph.ceph_user()) - with open(charm_ceph_conf, 'w') as cephconf: - context = get_ceph_context(upgrading) - cephconf.write(render_template('ceph.conf', context)) + context = get_ceph_context(upgrading) + write_file(charm_ceph_conf, render_template('ceph.conf', context), + ceph.ceph_user(), ceph.ceph_user(), 0o644) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 90) diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 543d3564..20e20895 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -522,6 +522,27 @@ def fake_config(key): config = {'osd-devices': '/srv/osd', 'osd-format': 'ext4'} self.assertTrue(ceph_hooks.use_short_objects()) + @patch.object(ceph_hooks, 'write_file') + @patch.object(ceph_hooks.ceph, 'ceph_user') + @patch.object(ceph_hooks, 'install_alternative') + @patch.object(ceph_hooks, 'render_template') + @patch.object(ceph_hooks, 'get_ceph_context') + @patch.object(ceph_hooks, 'service_name') + @patch.object(ceph_hooks, 'mkdir') + def test_emit_ceph_conf(self, mock_mkdir, mock_service_name, + mock_get_ceph_context, mock_render_template, + mock_install_alternative, mock_ceph_user, + mock_write_file): + mock_service_name.return_value = 'testsvc' + mock_ceph_user.return_value = 'ceph' + mock_get_ceph_context.return_value = {} + mock_render_template.return_value = "awesome ceph config" + + ceph_hooks.emit_cephconf() + + self.assertTrue(mock_write_file.called) + self.assertTrue(mock_install_alternative.called) + @patch.object(ceph_hooks, 'relation_get') @patch.object(ceph_hooks, 'relation_set') From 94718b3b37f2a8df5bc136da554a4b2df4c48c6c Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 1 Nov 2018 22:18:40 -0500 Subject: [PATCH 1591/2699] Fix lint in unit tests re: py3-first and py2 compat Change-Id: Id4a78f4f98b3fc93c42018fc9616b2482a4ff80a --- ceph-osd/unit_tests/test_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/unit_tests/test_utils.py b/ceph-osd/unit_tests/test_utils.py index 941ddb09..90db851e 100644 --- a/ceph-osd/unit_tests/test_utils.py +++ b/ceph-osd/unit_tests/test_utils.py @@ -131,7 +131,7 @@ def patch_open(): Yields the mock for "open" and "file", respectively.''' mock_open = MagicMock(spec=open) - mock_file = MagicMock(spec=file) + mock_file = MagicMock(spec=file) # noqa - transitional py2 py3 @contextmanager def stub_open(*args, **kwargs): From 70d1b7892403f59680b567b145b97554b39a91cf Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 1 Nov 2018 22:18:31 -0500 Subject: [PATCH 1592/2699] Fix lint in unit tests re: py3-first and py2 compat Change-Id: Iaafe2c368390706a61976d3f13fd83d515ce4b3e --- ceph-proxy/unit_tests/test_ceph_broker.py | 2 +- ceph-proxy/unit_tests/test_utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/unit_tests/test_ceph_broker.py b/ceph-proxy/unit_tests/test_ceph_broker.py index b720d94a..bae4b3d7 100644 --- a/ceph-proxy/unit_tests/test_ceph_broker.py +++ b/ceph-proxy/unit_tests/test_ceph_broker.py @@ -28,7 +28,7 @@ def test_process_requests_missing_api_version(self, mock_log): def test_process_requests_invalid_api_version(self, mock_log): req = json.dumps({'api-version': 2, 'ops': []}) rc = ceph_broker.process_requests(req) - print "Return: %s" % rc + print("Return: {}".format(rc)) self.assertEqual(json.loads(rc), {'exit-code': 1, 'stderr': 'Missing or invalid api version (2)'}) diff --git a/ceph-proxy/unit_tests/test_utils.py b/ceph-proxy/unit_tests/test_utils.py index 663a0488..0a774821 100644 --- a/ceph-proxy/unit_tests/test_utils.py +++ b/ceph-proxy/unit_tests/test_utils.py @@ -110,7 +110,7 @@ def patch_open(): Yields the mock for "open" and "file", respectively.''' mock_open = MagicMock(spec=open) - mock_file = MagicMock(spec=file) + mock_file = MagicMock(spec=file) # noqa - transitional py2 py3 @contextmanager def stub_open(*args, **kwargs): From 468183062ac1467cb8872e0ca06cf0080d33144f Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 2 Nov 2018 09:07:30 +0100 Subject: [PATCH 1593/2699] Fix py3 Syntax error Change-Id: I1aab038cbd9c010296c8c3aa67ae50b3ee27cfd4 --- ceph-radosgw/tests/basic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 5106abca..44d1f517 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -551,7 +551,7 @@ def test_403_swift_keystone_auth(self, api_version=2): try: conn.put_container(container) except swiftclient.exceptions.ClientException as e: - print "EXCEPTION", e.http_status + print("EXCEPTION {}".format(e.http_status)) if e.http_status == 409: # Ceph RadosGW is currently configured with a global namespace # for container names. Make use of this to verify that we From 8b15e7a183ef79e6faa065fb90517ac57b448ef7 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Wed, 26 Sep 2018 18:37:42 -0400 Subject: [PATCH 1594/2699] fix tox python3 overrides We want to default to running all tox environments under python 3, so set the basepython value in each environment. We do not want to specify a minor version number, because we do not want to have to update the file every time we upgrade python. We do not want to set the override once in testenv, because that breaks the more specific versions used in default environments like py35 and py36. Change-Id: I97f0eb421287a67964d4ada71c766113f334b89d Signed-off-by: Doug Hellmann --- ceph-radosgw/tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 0914714e..7d738375 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -32,13 +32,14 @@ deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt [testenv:pep8] -basepython = python2.7 +basepython = python3 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} hooks unit_tests tests actions lib charm-proof [testenv:venv] +basepython = python3 commands = {posargs} [testenv:func27-noop] From 792255d72fb7dc10ca11a70bda20837de9b9349a Mon Sep 17 00:00:00 2001 From: Vu Cong Tuan Date: Fri, 2 Nov 2018 16:15:36 +0700 Subject: [PATCH 1595/2699] Replace deprecated "decodestring()" by "decodebytes()" decodestring() is deprecated alias of decodebytes() https://docs.python.org/3/library/base64.html#base64.decodestring The same has been done for nova: https://review.openstack.org/#/c/610401/ Change-Id: I3be35466fce8d1325cc484b6d2b23f3f818cb08f --- ceph-osd/hooks/ceph_hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index a41586a4..3ceda0d5 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -659,7 +659,7 @@ def secrets_storage_joined(relation_id=None): def secrets_storage_changed(): vault_ca = relation_get('vault_ca') if vault_ca: - vault_ca = base64.decodestring(json.loads(vault_ca).encode()) + vault_ca = base64.decodebytes(json.loads(vault_ca).encode()) write_file('/usr/local/share/ca-certificates/vault-ca.crt', vault_ca, perms=0o644) subprocess.check_call(['update-ca-certificates', '--fresh']) From 21930a7fd0bb8f010b52cc4c0320d1c4e0335c78 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Wed, 26 Sep 2018 18:37:35 -0400 Subject: [PATCH 1596/2699] fix tox python3 overrides We want to default to running all tox environments under python 3, so set the basepython value in each environment. We do not want to specify a minor version number, because we do not want to have to update the file every time we upgrade python. We do not want to set the override once in testenv, because that breaks the more specific versions used in default environments like py35 and py36. Change-Id: Iedee21254cd5df9ecf3e11aa7a28c2997bd84c07 Signed-off-by: Doug Hellmann --- ceph-osd/tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 10ba3dc9..6b24dddf 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -35,13 +35,14 @@ deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt [testenv:pep8] -basepython = python2.7 +basepython = python3 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} hooks unit_tests tests actions lib charm-proof [testenv:venv] +basepython = python3 commands = {posargs} [testenv:func] From d2e4a56e9561ef012f70e7b01255053a0e2f6fdf Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 2 Nov 2018 09:39:29 -0500 Subject: [PATCH 1597/2699] Fix lint re: py3-first and py2 compat Change-Id: I2018de28dc4a3767af91a76b9ad8dd1175bec513 --- ceph-proxy/hooks/ceph_broker.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index 329da8a8..000be489 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -3,6 +3,7 @@ # Copyright 2015 Canonical Ltd. # import json +import six from charmhelpers.core.hookenv import ( log, @@ -27,6 +28,7 @@ ReplicatedPool, ) + # This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ # This should do a decent job of preventing people from passing in bad values. # It will give a useful error message @@ -44,8 +46,8 @@ "write_fadvise_dontneed": [bool], "noscrub": [bool], "nodeep-scrub": [bool], - "hit_set_type": [basestring, ["bloom", "explicit_hash", - "explicit_object"]], + "hit_set_type": [six.string_types, ["bloom", "explicit_hash", + "explicit_object"]], "hit_set_count": [int, [1, 1]], "hit_set_period": [int], "hit_set_fpp": [float, [0.0, 1.0]], From 54c0f2603fc2072246fd6b7fdda75a36c8d8357e Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 7 Nov 2018 17:17:31 +0100 Subject: [PATCH 1598/2699] Add directories to osd-devices as well Tracking directory backed OSDs in the kv store allows us to bootstrap further relations based on bootstrapped OSD counts. Change-Id: I1abd767d15c204845d9909d9c7ee9414dbe87a5c Closes-Bug: #1802134 Depends-On: https://review.openstack.org/#/c/616230/ --- ceph-osd/lib/ceph/utils.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 2ef48abe..07f96a67 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -1876,6 +1876,14 @@ def osdize_dir(path, encrypt=False, bluestore=False): :param encrypt: bool. Should the OSD directory be encrypted at rest :returns: None """ + + db = kv() + osd_devices = db.get('osd-devices', []) + if path in osd_devices: + log('Device {} already processed by charm,' + ' skipping'.format(path)) + return + if os.path.exists(os.path.join(path, 'upstart')): log('Path {} is already configured as an OSD - bailing'.format(path)) return @@ -1906,6 +1914,13 @@ def osdize_dir(path, encrypt=False, bluestore=False): log("osdize dir cmd: {}".format(cmd)) subprocess.check_call(cmd) + # NOTE: Record processing of device only on success to ensure that + # the charm only tries to initialize a device of OSD usage + # once during its lifetime. + osd_devices.append(path) + db.set('osd-devices', osd_devices) + db.flush() + def filesystem_mounted(fs): return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 From 15f48cd408630226ed23953fa16877c1344c132e Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 7 Nov 2018 15:33:26 -0600 Subject: [PATCH 1599/2699] Sync charm-helpers Change-Id: Ic731c9213498136434bcbb1cdba8672be25e7243 --- ceph-osd/hooks/charmhelpers/__init__.py | 8 +- .../charmhelpers/contrib/hahelpers/apache.py | 14 +-- .../contrib/hardening/apache/checks/config.py | 3 + .../contrib/hardening/audits/apache.py | 6 +- .../charmhelpers/contrib/hardening/harden.py | 18 ++- .../contrib/openstack/amulet/utils.py | 116 +++++++++++++----- .../contrib/openstack/cert_utils.py | 48 ++++++++ .../charmhelpers/contrib/openstack/context.py | 39 +++++- .../contrib/openstack/ha/utils.py | 12 +- .../charmhelpers/contrib/openstack/utils.py | 57 +++++++-- .../contrib/storage/linux/loopback.py | 2 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 65 +++++++++- ceph-osd/hooks/charmhelpers/core/host.py | 55 +++++++-- ceph-osd/hooks/charmhelpers/core/kernel.py | 4 +- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 2 + ceph-osd/hooks/charmhelpers/fetch/bzrurl.py | 4 +- ceph-osd/hooks/charmhelpers/fetch/giturl.py | 4 +- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 25 +++- 18 files changed, 395 insertions(+), 87 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/__init__.py b/ceph-osd/hooks/charmhelpers/__init__.py index e7aa4715..61ef9071 100644 --- a/ceph-osd/hooks/charmhelpers/__init__.py +++ b/ceph-osd/hooks/charmhelpers/__init__.py @@ -23,22 +23,22 @@ import sys try: - import six # flake8: noqa + import six # NOQA:F401 except ImportError: if sys.version_info.major == 2: subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa + import six # NOQA:F401 try: - import yaml # flake8: noqa + import yaml # NOQA:F401 except ImportError: if sys.version_info.major == 2: subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa + import yaml # NOQA:F401 # Holds a list of mapping of mangled function names that have been deprecated diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py index 605a1bec..2c1e371e 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -23,8 +23,8 @@ # import os -import subprocess +from charmhelpers.core import host from charmhelpers.core.hookenv import ( config as config_get, relation_get, @@ -83,14 +83,4 @@ def retrieve_ca_cert(cert_file): def install_ca_cert(ca_cert): - if ca_cert: - cert_file = ('/usr/local/share/ca-certificates/' - 'keystone_juju_ca_cert.crt') - old_cert = retrieve_ca_cert(cert_file) - if old_cert and old_cert == ca_cert: - log("CA cert is the same as installed version", level=INFO) - else: - log("Installing new CA cert", level=INFO) - with open(cert_file, 'wb') as crt: - crt.write(ca_cert) - subprocess.check_call(['update-ca-certificates', '--fresh']) + host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert') diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 06482aac..341da9ee 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -14,6 +14,7 @@ import os import re +import six import subprocess @@ -95,6 +96,8 @@ def __call__(self): ctxt = settings['hardening'] out = subprocess.check_output(['apache2', '-v']) + if six.PY3: + out = out.decode('utf-8') ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', out).group(1) ctxt['apache_icondir'] = '/usr/share/apache2/icons/' diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py index d32bf44e..04825f5a 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -15,7 +15,7 @@ import re import subprocess -from six import string_types +import six from charmhelpers.core.hookenv import ( log, @@ -35,7 +35,7 @@ class DisabledModuleAudit(BaseAudit): def __init__(self, modules): if modules is None: self.modules = [] - elif isinstance(modules, string_types): + elif isinstance(modules, six.string_types): self.modules = [modules] else: self.modules = modules @@ -69,6 +69,8 @@ def ensure_compliance(self): def _get_loaded_modules(): """Returns the modules which are enabled in Apache.""" output = subprocess.check_output(['apache2ctl', '-M']) + if six.PY3: + output = output.decode('utf-8') modules = [] for line in output.splitlines(): # Each line of the enabled module output looks like: diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py index b55764cd..63f21b9c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py @@ -27,6 +27,8 @@ from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks from charmhelpers.contrib.hardening.apache.checks import run_apache_checks +_DISABLE_HARDENING_FOR_UNIT_TEST = False + def harden(overrides=None): """Hardening decorator. @@ -47,16 +49,28 @@ def harden(overrides=None): provided with 'harden' config. :returns: Returns value returned by decorated function once executed. """ + if overrides is None: + overrides = [] + def _harden_inner1(f): - log("Hardening function '%s'" % (f.__name__), level=DEBUG) + # As this has to be py2.7 compat, we can't use nonlocal. Use a trick + # to capture the dictionary that can then be updated. + _logged = {'done': False} def _harden_inner2(*args, **kwargs): + # knock out hardening via a config var; normally it won't get + # disabled. + if _DISABLE_HARDENING_FOR_UNIT_TEST: + return f(*args, **kwargs) + if not _logged['done']: + log("Hardening function '%s'" % (f.__name__), level=DEBUG) + _logged['done'] = True RUN_CATALOG = OrderedDict([('os', run_os_checks), ('ssh', run_ssh_checks), ('mysql', run_mysql_checks), ('apache', run_apache_checks)]) - enabled = overrides or (config("harden") or "").split() + enabled = overrides[:] or (config("harden") or "").split() if enabled: modules_to_run = [] # modules will always be performed in the following order diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 936b4036..9133e9b3 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -618,12 +618,12 @@ def authenticate_keystone_user(self, keystone, user, password, tenant): return self.authenticate_keystone(keystone_ip, user, password, project_name=tenant) - def authenticate_glance_admin(self, keystone): + def authenticate_glance_admin(self, keystone, force_v1_client=False): """Authenticates admin user with glance.""" self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', interface='adminURL') - if keystone.session: + if not force_v1_client and keystone.session: return glance_clientv2.Client("2", session=keystone.session) else: return glance_client.Client(ep, token=keystone.auth_token) @@ -680,18 +680,30 @@ def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", nova.flavors.create(name, ram, vcpus, disk, flavorid, ephemeral, swap, rxtx_factor, is_public) - def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection + def glance_create_image(self, glance, image_name, image_url, + download_dir='tests', + hypervisor_type=None, + disk_format='qcow2', + architecture='x86_64', + container_format='bare'): + """Download an image and upload it to glance, validate its status + and return an image object pointer. KVM defaults, can override for + LXD. + + :param glance: pointer to authenticated glance api connection :param image_name: display name for new image + :param image_url: url to retrieve + :param download_dir: directory to store downloaded image file + :param hypervisor_type: glance image hypervisor property + :param disk_format: glance image disk format + :param architecture: glance image architecture property + :param container_format: glance image container format :returns: glance image pointer """ - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) + self.log.debug('Creating glance image ({}) from ' + '{}...'.format(image_name, image_url)) - # Download cirros image + # Download image http_proxy = os.getenv('AMULET_HTTP_PROXY') self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: @@ -700,31 +712,34 @@ def create_cirros_image(self, glance, image_name): else: opener = urllib.FancyURLopener() - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - local_path = os.path.join('tests', cirros_img) - - if not os.path.exists(local_path): - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - opener.retrieve(cirros_url, local_path) - f.close() + abs_file_name = os.path.join(download_dir, image_name) + if not os.path.exists(abs_file_name): + opener.retrieve(image_url, abs_file_name) # Create glance image + glance_properties = { + 'architecture': architecture, + } + if hypervisor_type: + glance_properties['hypervisor_type'] = hypervisor_type + # Create glance image if float(glance.version) < 2.0: - with open(local_path) as fimage: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', - data=fimage) + with open(abs_file_name) as f: + image = glance.images.create( + name=image_name, + is_public=True, + disk_format=disk_format, + container_format=container_format, + properties=glance_properties, + data=f) else: image = glance.images.create( name=image_name, - disk_format="qcow2", visibility="public", - container_format="bare") - glance.images.upload(image.id, open(local_path, 'rb')) + disk_format=disk_format, + container_format=container_format) + glance.images.upload(image.id, open(abs_file_name, 'rb')) + glance.images.update(image.id, **glance_properties) # Wait for image to reach active status img_id = image.id @@ -753,15 +768,54 @@ def create_cirros_image(self, glance, image_name): val_img_stat, val_img_cfmt, val_img_dfmt)) if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == 'bare' \ - and val_img_dfmt == 'qcow2': + and val_img_pub is True and val_img_cfmt == container_format \ + and val_img_dfmt == disk_format: self.log.debug(msg_attr) else: - msg = ('Volume validation failed, {}'.format(msg_attr)) + msg = ('Image validation failed, {}'.format(msg_attr)) amulet.raise_status(amulet.FAIL, msg=msg) return image + def create_cirros_image(self, glance, image_name, hypervisor_type=None): + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :param hypervisor_type: glance image hypervisor property + :returns: glance image pointer + """ + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'glance_create_image instead of ' + 'create_cirros_image.') + + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Get cirros image URL + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open('http://download.cirros-cloud.net/version/released') + version = f.read().strip() + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', + version, cirros_img) + f.close() + + return self.glance_create_image( + glance, + image_name, + cirros_url, + hypervisor_type=hypervisor_type) + def delete_image(self, glance, image): """Delete the specified image.""" diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py index de853b53..3e078703 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -25,7 +25,9 @@ local_unit, network_get_primary_address, config, + related_units, relation_get, + relation_ids, unit_get, NoNetworkBinding, log, @@ -225,3 +227,49 @@ def process_certificates(service_name, relation_id, unit, create_ip_cert_links( ssl_dir, custom_hostname_link=custom_hostname_link) + + +def get_requests_for_local_unit(relation_name=None): + """Extract any certificates data targeted at this unit down relation_name. + + :param relation_name: str Name of relation to check for data. + :returns: List of bundles of certificates. + :rtype: List of dicts + """ + local_name = local_unit().replace('/', '_') + raw_certs_key = '{}.processed_requests'.format(local_name) + relation_name = relation_name or 'certificates' + bundles = [] + for rid in relation_ids(relation_name): + for unit in related_units(rid): + data = relation_get(rid=rid, unit=unit) + if data.get(raw_certs_key): + bundles.append({ + 'ca': data['ca'], + 'chain': data.get('chain'), + 'certs': json.loads(data[raw_certs_key])}) + return bundles + + +def get_bundle_for_cn(cn, relation_name=None): + """Extract certificates for the given cn. + + :param cn: str Canonical Name on certificate. + :param relation_name: str Relation to check for certificates down. + :returns: Dictionary of certificate data, + :rtype: dict. + """ + entries = get_requests_for_local_unit(relation_name) + cert_bundle = {} + for entry in entries: + for _cn, bundle in entry['certs'].items(): + if _cn == cn: + cert_bundle = { + 'cert': bundle['cert'], + 'key': bundle['key'], + 'chain': entry['chain'], + 'ca': entry['ca']} + break + if cert_bundle: + break + return cert_bundle diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index ca913961..72084cb3 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -642,7 +642,7 @@ def __call__(self): return {} l_unit = local_unit().replace('/', '-') - cluster_hosts = {} + cluster_hosts = collections.OrderedDict() # NOTE(jamespage): build out map of configured network endpoints # and associated backends @@ -1519,6 +1519,10 @@ def __call__(self): 'rel_key': 'enable-qos', 'default': False, }, + 'enable_nsg_logging': { + 'rel_key': 'enable-nsg-logging', + 'default': False, + }, } ctxt = self.get_neutron_options({}) for rid in relation_ids('neutron-plugin-api'): @@ -1530,10 +1534,15 @@ def __call__(self): if 'l2-population' in rdata: ctxt.update(self.get_neutron_options(rdata)) + extension_drivers = [] + if ctxt['enable_qos']: - ctxt['extension_drivers'] = 'qos' - else: - ctxt['extension_drivers'] = '' + extension_drivers.append('qos') + + if ctxt['enable_nsg_logging']: + extension_drivers.append('log') + + ctxt['extension_drivers'] = ','.join(extension_drivers) return ctxt @@ -1893,7 +1902,7 @@ class EnsureDirContext(OSContextGenerator): Some software requires a user to create a target directory to be scanned for drop-in files with a specific format. This is why this context is needed to do that before rendering a template. - ''' + ''' def __init__(self, dirname, **kwargs): '''Used merely to ensure that a given directory exists.''' @@ -1903,3 +1912,23 @@ def __init__(self, dirname, **kwargs): def __call__(self): mkdir(self.dirname, **self.kwargs) return {} + + +class VersionsContext(OSContextGenerator): + """Context to return the openstack and operating system versions. + + """ + def __init__(self, pkg='python-keystone'): + """Initialise context. + + :param pkg: Package to extrapolate openstack version from. + :type pkg: str + """ + self.pkg = pkg + + def __call__(self): + ostack = os_release(self.pkg, base='icehouse') + osystem = lsb_release()['DISTRIB_CODENAME'].lower() + return { + 'openstack_release': ostack, + 'operating_system_release': osystem} diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py index 6060ae50..add8eb9a 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -28,6 +28,7 @@ import re from charmhelpers.core.hookenv import ( + expected_related_units, log, relation_set, charm_name, @@ -110,12 +111,17 @@ def assert_charm_supports_dns_ha(): def expect_ha(): """ Determine if the unit expects to be in HA - Check for VIP or dns-ha settings which indicate the unit should expect to - be related to hacluster. + Check juju goal-state if ha relation is expected, check for VIP or dns-ha + settings which indicate the unit should expect to be related to hacluster. @returns boolean """ - return config('vip') or config('dns-ha') + ha_related_units = [] + try: + ha_related_units = list(expected_related_units(reltype='ha')) + except (NotImplementedError, KeyError): + pass + return len(ha_related_units) > 0 or config('vip') or config('dns-ha') def generate_ha_relation_data(service): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 24f5b808..29cad083 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -186,7 +186,7 @@ ('queens', ['2.16.0', '2.17.0']), ('rocky', - ['2.18.0']), + ['2.18.0', '2.19.0']), ]) # >= Liberty version->codename mapping @@ -375,7 +375,7 @@ def get_swift_codename(version): return codenames[0] # NOTE: fallback - attempt to match with just major.minor version - match = re.match('^(\d+)\.(\d+)', version) + match = re.match(r'^(\d+)\.(\d+)', version) if match: major_minor_version = match.group(0) for codename, versions in six.iteritems(SWIFT_CODENAMES): @@ -395,7 +395,7 @@ def get_os_codename_package(package, fatal=True): out = subprocess.check_output(cmd) if six.PY3: out = out.decode('UTF-8') - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: return None lines = out.split('\n') for line in lines: @@ -427,11 +427,11 @@ def get_os_codename_package(package, fatal=True): vers = apt.upstream_version(pkg.current_ver.ver_str) if 'swift' in pkg.name: # Fully x.y.z match for swift versions - match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) + match = re.match(r'^(\d+)\.(\d+)\.(\d+)', vers) else: # x.y match only for 20XX.X # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', vers) + match = re.match(r'^(\d+)\.(\d+)', vers) if match: vers = match.group(0) @@ -1450,20 +1450,33 @@ def some_hook(...): see core.utils.restart_on_change() for more details. + Note restart_map can be a callable, in which case, restart_map is only + evaluated at runtime. This means that it is lazy and the underlying + function won't be called if the decorated function is never called. Note, + retains backwards compatibility for passing a non-callable dictionary. + @param f: the function to decorate - @param restart_map: the restart map {conf_file: [services]} + @param restart_map: (optionally callable, which then returns the + restart_map) the restart map {conf_file: [services]} @param stopstart: DEFAULT false; whether to stop, start or just restart @returns decorator to use a restart_on_change with pausability """ def wrap(f): + # py27 compatible nonlocal variable. When py3 only, replace with + # nonlocal keyword + __restart_map_cache = {'cache': None} + @functools.wraps(f) def wrapped_f(*args, **kwargs): if is_unit_paused_set(): return f(*args, **kwargs) + if __restart_map_cache['cache'] is None: + __restart_map_cache['cache'] = restart_map() \ + if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) + (lambda: f(*args, **kwargs)), __restart_map_cache['cache'], + stopstart, restart_functions) return wrapped_f return wrap @@ -1733,3 +1746,31 @@ def is_unit_upgrading_set(): return not(not(kv.get('unit-upgrading'))) except Exception: return False + + +def series_upgrade_prepare(pause_unit_helper=None, configs=None): + """ Run common series upgrade prepare tasks. + + :param pause_unit_helper: function: Function to pause unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + set_unit_upgrading() + if pause_unit_helper and configs: + if not is_unit_paused_set(): + pause_unit_helper(configs) + + +def series_upgrade_complete(resume_unit_helper=None, configs=None): + """ Run common series upgrade complete tasks. + + :param resume_unit_helper: function: Function to resume unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + clear_unit_paused() + clear_unit_upgrading() + if configs: + configs.write_all() + if resume_unit_helper: + resume_unit_helper(configs) diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py index 1d6ae6f0..0dfdae52 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -39,7 +39,7 @@ def loopback_devices(): devs = [d.strip().split(' ') for d in check_output(cmd).splitlines() if d != ''] for dev, _, f in devs: - loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] + loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] return loopbacks diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 68800074..2e287659 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -48,6 +48,7 @@ DEBUG = "DEBUG" TRACE = "TRACE" MARKER = object() +SH_MAX_ARG = 131071 cache = {} @@ -98,7 +99,7 @@ def log(message, level=None): command += ['-l', level] if not isinstance(message, six.string_types): message = repr(message) - command += [message] + command += [message[:SH_MAX_ARG]] # Missing juju-log should not cause failures in unit tests # Send log output to stderr try: @@ -509,6 +510,67 @@ def related_units(relid=None): subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] +def expected_peer_units(): + """Get a generator for units we expect to join peer relation based on + goal-state. + + The local unit is excluded from the result to make it easy to gauge + completion of all peers joining the relation with existing hook tools. + + Example usage: + log('peer {} of {} joined peer relation' + .format(len(related_units()), + len(list(expected_peer_units())))) + + This function will raise NotImplementedError if used with juju versions + without goal-state support. + + :returns: iterator + :rtype: types.GeneratorType + :raises: NotImplementedError + """ + if not has_juju_version("2.4.0"): + # goal-state first appeared in 2.4.0. + raise NotImplementedError("goal-state") + _goal_state = goal_state() + return (key for key in _goal_state['units'] + if '/' in key and key != local_unit()) + + +def expected_related_units(reltype=None): + """Get a generator for units we expect to join relation based on + goal-state. + + Note that you can not use this function for the peer relation, take a look + at expected_peer_units() for that. + + This function will raise KeyError if you request information for a + relation type for which juju goal-state does not have information. It will + raise NotImplementedError if used with juju versions without goal-state + support. + + Example usage: + log('participant {} of {} joined relation {}' + .format(len(related_units()), + len(list(expected_related_units())), + relation_type())) + + :param reltype: Relation type to list data for, default is to list data for + the realtion type we are currently executing a hook for. + :type reltype: str + :returns: iterator + :rtype: types.GeneratorType + :raises: KeyError, NotImplementedError + """ + if not has_juju_version("2.4.4"): + # goal-state existed in 2.4.0, but did not list individual units to + # join a relation in 2.4.1 through 2.4.3. (LP: #1794739) + raise NotImplementedError("goal-state relation unit count") + reltype = reltype or relation_type() + _goal_state = goal_state() + return (key for key in _goal_state['relations'][reltype] if '/' in key) + + @cached def relation_for_unit(unit=None, rid=None): """Get the json represenation of a unit's relation""" @@ -997,6 +1059,7 @@ def application_version_set(version): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) +@cached def goal_state(): """Juju goal state values""" cmd = ['goal-state', '--format=json'] diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index e9fd38a0..79953a44 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -34,13 +34,13 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log, DEBUG, local_unit +from .hookenv import log, INFO, DEBUG, local_unit, charm_name from .fstab import Fstab from charmhelpers.osplatform import get_platform __platform__ = get_platform() if __platform__ == "ubuntu": - from charmhelpers.core.host_factory.ubuntu import ( + from charmhelpers.core.host_factory.ubuntu import ( # NOQA:F401 service_available, add_new_group, lsb_release, @@ -48,7 +48,7 @@ CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": - from charmhelpers.core.host_factory.centos import ( + from charmhelpers.core.host_factory.centos import ( # NOQA:F401 service_available, add_new_group, lsb_release, @@ -58,6 +58,7 @@ UPDATEDB_PATH = '/etc/updatedb.conf' + def service_start(service_name, **kwargs): """Start a system service. @@ -287,8 +288,8 @@ def service_running(service_name, **kwargs): for key, value in six.iteritems(kwargs): parameter = '%s=%s' % (key, value) cmd.append(parameter) - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT).decode('UTF-8') + output = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -442,7 +443,7 @@ def add_user_to_group(username, group): def chage(username, lastday=None, expiredate=None, inactive=None, - mindays=None, maxdays=None, root=None, warndays=None): + mindays=None, maxdays=None, root=None, warndays=None): """Change user password expiry information :param str username: User to update @@ -482,8 +483,10 @@ def chage(username, lastday=None, expiredate=None, inactive=None, cmd.append(username) subprocess.check_call(cmd) + remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -535,13 +538,15 @@ def write_file(path, content, owner='root', group='root', perms=0o444): # lets see if we can grab the file and compare the context, to avoid doing # a write. existing_content = None - existing_uid, existing_gid = None, None + existing_uid, existing_gid, existing_perms = None, None, None try: with open(path, 'rb') as target: existing_content = target.read() stat = os.stat(path) - existing_uid, existing_gid = stat.st_uid, stat.st_gid - except: + existing_uid, existing_gid, existing_perms = ( + stat.st_uid, stat.st_gid, stat.st_mode + ) + except Exception: pass if content != existing_content: log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), @@ -554,7 +559,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444): target.write(content) return # the contents were the same, but we might still need to change the - # ownership. + # ownership or permissions. if existing_uid != uid: log("Changing uid on already existing content: {} -> {}" .format(existing_uid, uid), level=DEBUG) @@ -563,6 +568,10 @@ def write_file(path, content, owner='root', group='root', perms=0o444): log("Changing gid on already existing content: {} -> {}" .format(existing_gid, gid), level=DEBUG) os.chown(path, -1, gid) + if existing_perms != perms: + log("Changing permissions on existing content: {} -> {}" + .format(existing_perms, perms), level=DEBUG) + os.chmod(path, perms) def fstab_remove(mp): @@ -827,7 +836,7 @@ def list_nics(nic_type=None): ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = (line.strip() for line in ip_output if line) - key = re.compile('^[0-9]+:\s+(.+):') + key = re.compile(r'^[0-9]+:\s+(.+):') for line in ip_output: matched = re.search(key, line) if matched: @@ -1040,3 +1049,27 @@ def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): return modulo * wait else: return calculated_wait_time + + +def install_ca_cert(ca_cert, name=None): + """ + Install the given cert as a trusted CA. + + The ``name`` is the stem of the filename where the cert is written, and if + not provided, it will default to ``juju-{charm_name}``. + + If the cert is empty or None, or is unchanged, nothing is done. + """ + if not ca_cert: + return + if not isinstance(ca_cert, bytes): + ca_cert = ca_cert.encode('utf8') + if not name: + name = 'juju-{}'.format(charm_name()) + cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) + new_hash = hashlib.md5(ca_cert).hexdigest() + if file_hash(cert_file) == new_hash: + return + log("Installing new CA cert at: {}".format(cert_file), level=INFO) + write_file(cert_file, ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/ceph-osd/hooks/charmhelpers/core/kernel.py b/ceph-osd/hooks/charmhelpers/core/kernel.py index 2d404528..e01f4f8b 100644 --- a/ceph-osd/hooks/charmhelpers/core/kernel.py +++ b/ceph-osd/hooks/charmhelpers/core/kernel.py @@ -26,12 +26,12 @@ __platform__ = get_platform() if __platform__ == "ubuntu": - from charmhelpers.core.kernel_factory.ubuntu import ( + from charmhelpers.core.kernel_factory.ubuntu import ( # NOQA:F401 persistent_modprobe, update_initramfs, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": - from charmhelpers.core.kernel_factory.centos import ( + from charmhelpers.core.kernel_factory.centos import ( # NOQA:F401 persistent_modprobe, update_initramfs, ) # flake8: noqa -- ignore F401 for this import diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 480a6276..8572d34f 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -84,6 +84,7 @@ def base_url(self, url): fetch = importlib.import_module(module) filter_installed_packages = fetch.filter_installed_packages +filter_missing_packages = fetch.filter_missing_packages install = fetch.apt_install upgrade = fetch.apt_upgrade update = _fetch_update = fetch.apt_update @@ -96,6 +97,7 @@ def base_url(self, url): apt_update = fetch.apt_update apt_upgrade = fetch.apt_upgrade apt_purge = fetch.apt_purge + apt_autoremove = fetch.apt_autoremove apt_mark = fetch.apt_mark apt_hold = fetch.apt_hold apt_unhold = fetch.apt_unhold diff --git a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py index 07cd0293..c4ab3ff1 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/bzrurl.py @@ -13,7 +13,7 @@ # limitations under the License. import os -from subprocess import check_call +from subprocess import STDOUT, check_output from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, @@ -55,7 +55,7 @@ def branch(self, source, dest, revno=None): cmd = ['bzr', 'branch'] cmd += cmd_opts cmd += [source, dest] - check_call(cmd) + check_output(cmd, stderr=STDOUT) def install(self, source, dest=None, revno=None): url_parts = self.parse_url(source) diff --git a/ceph-osd/hooks/charmhelpers/fetch/giturl.py b/ceph-osd/hooks/charmhelpers/fetch/giturl.py index 4cf21bc2..070ca9bb 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/giturl.py @@ -13,7 +13,7 @@ # limitations under the License. import os -from subprocess import check_call, CalledProcessError +from subprocess import check_output, CalledProcessError, STDOUT from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, @@ -50,7 +50,7 @@ def clone(self, source, dest, branch="master", depth=None): cmd = ['git', 'clone', source, dest, '--branch', branch] if depth: cmd.extend(['--depth', depth]) - check_call(cmd) + check_output(cmd, stderr=STDOUT) def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 19aa6baf..c7ad128c 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -189,6 +189,18 @@ def filter_installed_packages(packages): return _pkgs +def filter_missing_packages(packages): + """Return a list of packages that are installed. + + :param packages: list of packages to evaluate. + :returns list: Packages that are installed. + """ + return list( + set(packages) - + set(filter_installed_packages(packages)) + ) + + def apt_cache(in_memory=True, progress=None): """Build and return an apt cache.""" from apt import apt_pkg @@ -248,6 +260,14 @@ def apt_purge(packages, fatal=False): _run_apt_command(cmd, fatal) +def apt_autoremove(purge=True, fatal=False): + """Purge one or more packages.""" + cmd = ['apt-get', '--assume-yes', 'autoremove'] + if purge: + cmd.append('--purge') + _run_apt_command(cmd, fatal) + + def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark.""" log("Marking {} as {}".format(packages, mark)) @@ -274,7 +294,7 @@ def apt_unhold(packages, fatal=False): def import_key(key): """Import an ASCII Armor key. - /!\ A Radix64 format keyid is also supported for backwards + A Radix64 format keyid is also supported for backwards compatibility, but should never be used; the key retrieval mechanism is insecure and subject to man-in-the-middle attacks voiding all signature checks using that key. @@ -434,6 +454,9 @@ def _add_apt_repository(spec): :param spec: the parameter to pass to add_apt_repository """ + if '{series}' in spec: + series = lsb_release()['DISTRIB_CODENAME'] + spec = spec.replace('{series}', series) _run_with_retries(['add-apt-repository', '--yes', spec]) From 4812fb144a909315f4cae4e0b4297b8474deff43 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 7 Nov 2018 15:42:41 -0600 Subject: [PATCH 1600/2699] Rebuild for sync charm-helpers Change-Id: Ie8fc605b85ac7f9fa42db52788d758f69ab6b2ce --- ceph-fs/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index 08ac52e8..2b56fde5 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -93e11b9e-86a6-11e8-b3ac-9f8b2e5df0b9 +0d1ffbde-e2d6-11e8-aa1a-cb07dc37eb28 From c09e8f48efc9335ab3f9f859a33df64c73535c5a Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 7 Nov 2018 15:33:41 -0600 Subject: [PATCH 1601/2699] Sync charm-helpers Change-Id: I2490845fc4482ca7f905416319311e145a527c4e --- ceph-radosgw/hooks/charmhelpers/__init__.py | 8 +- .../charmhelpers/contrib/hahelpers/apache.py | 14 +-- .../contrib/hardening/apache/checks/config.py | 3 + .../contrib/hardening/audits/apache.py | 6 +- .../charmhelpers/contrib/hardening/harden.py | 18 ++- .../contrib/openstack/amulet/utils.py | 116 +++++++++++++----- .../contrib/openstack/cert_utils.py | 48 ++++++++ .../charmhelpers/contrib/openstack/context.py | 35 +++++- .../contrib/openstack/ha/utils.py | 12 +- .../section-keystone-authtoken-mitaka | 6 +- .../charmhelpers/contrib/openstack/utils.py | 27 ++-- .../contrib/storage/linux/loopback.py | 2 +- .../hooks/charmhelpers/core/hookenv.py | 62 ++++++++++ ceph-radosgw/hooks/charmhelpers/core/host.py | 55 +++++++-- .../hooks/charmhelpers/core/kernel.py | 4 +- .../hooks/charmhelpers/fetch/ubuntu.py | 5 +- ceph-radosgw/tests/basic_deployment.py | 14 +++ 17 files changed, 352 insertions(+), 83 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/__init__.py b/ceph-radosgw/hooks/charmhelpers/__init__.py index e7aa4715..61ef9071 100644 --- a/ceph-radosgw/hooks/charmhelpers/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/__init__.py @@ -23,22 +23,22 @@ import sys try: - import six # flake8: noqa + import six # NOQA:F401 except ImportError: if sys.version_info.major == 2: subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa + import six # NOQA:F401 try: - import yaml # flake8: noqa + import yaml # NOQA:F401 except ImportError: if sys.version_info.major == 2: subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa + import yaml # NOQA:F401 # Holds a list of mapping of mangled function names that have been deprecated diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py index 605a1bec..2c1e371e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -23,8 +23,8 @@ # import os -import subprocess +from charmhelpers.core import host from charmhelpers.core.hookenv import ( config as config_get, relation_get, @@ -83,14 +83,4 @@ def retrieve_ca_cert(cert_file): def install_ca_cert(ca_cert): - if ca_cert: - cert_file = ('/usr/local/share/ca-certificates/' - 'keystone_juju_ca_cert.crt') - old_cert = retrieve_ca_cert(cert_file) - if old_cert and old_cert == ca_cert: - log("CA cert is the same as installed version", level=INFO) - else: - log("Installing new CA cert", level=INFO) - with open(cert_file, 'wb') as crt: - crt.write(ca_cert) - subprocess.check_call(['update-ca-certificates', '--fresh']) + host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert') diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 06482aac..341da9ee 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -14,6 +14,7 @@ import os import re +import six import subprocess @@ -95,6 +96,8 @@ def __call__(self): ctxt = settings['hardening'] out = subprocess.check_output(['apache2', '-v']) + if six.PY3: + out = out.decode('utf-8') ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', out).group(1) ctxt['apache_icondir'] = '/usr/share/apache2/icons/' diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py index d32bf44e..04825f5a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -15,7 +15,7 @@ import re import subprocess -from six import string_types +import six from charmhelpers.core.hookenv import ( log, @@ -35,7 +35,7 @@ class DisabledModuleAudit(BaseAudit): def __init__(self, modules): if modules is None: self.modules = [] - elif isinstance(modules, string_types): + elif isinstance(modules, six.string_types): self.modules = [modules] else: self.modules = modules @@ -69,6 +69,8 @@ def ensure_compliance(self): def _get_loaded_modules(): """Returns the modules which are enabled in Apache.""" output = subprocess.check_output(['apache2ctl', '-M']) + if six.PY3: + output = output.decode('utf-8') modules = [] for line in output.splitlines(): # Each line of the enabled module output looks like: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py index b55764cd..63f21b9c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py @@ -27,6 +27,8 @@ from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks from charmhelpers.contrib.hardening.apache.checks import run_apache_checks +_DISABLE_HARDENING_FOR_UNIT_TEST = False + def harden(overrides=None): """Hardening decorator. @@ -47,16 +49,28 @@ def harden(overrides=None): provided with 'harden' config. :returns: Returns value returned by decorated function once executed. """ + if overrides is None: + overrides = [] + def _harden_inner1(f): - log("Hardening function '%s'" % (f.__name__), level=DEBUG) + # As this has to be py2.7 compat, we can't use nonlocal. Use a trick + # to capture the dictionary that can then be updated. + _logged = {'done': False} def _harden_inner2(*args, **kwargs): + # knock out hardening via a config var; normally it won't get + # disabled. + if _DISABLE_HARDENING_FOR_UNIT_TEST: + return f(*args, **kwargs) + if not _logged['done']: + log("Hardening function '%s'" % (f.__name__), level=DEBUG) + _logged['done'] = True RUN_CATALOG = OrderedDict([('os', run_os_checks), ('ssh', run_ssh_checks), ('mysql', run_mysql_checks), ('apache', run_apache_checks)]) - enabled = overrides or (config("harden") or "").split() + enabled = overrides[:] or (config("harden") or "").split() if enabled: modules_to_run = [] # modules will always be performed in the following order diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 936b4036..9133e9b3 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -618,12 +618,12 @@ def authenticate_keystone_user(self, keystone, user, password, tenant): return self.authenticate_keystone(keystone_ip, user, password, project_name=tenant) - def authenticate_glance_admin(self, keystone): + def authenticate_glance_admin(self, keystone, force_v1_client=False): """Authenticates admin user with glance.""" self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', interface='adminURL') - if keystone.session: + if not force_v1_client and keystone.session: return glance_clientv2.Client("2", session=keystone.session) else: return glance_client.Client(ep, token=keystone.auth_token) @@ -680,18 +680,30 @@ def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", nova.flavors.create(name, ram, vcpus, disk, flavorid, ephemeral, swap, rxtx_factor, is_public) - def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection + def glance_create_image(self, glance, image_name, image_url, + download_dir='tests', + hypervisor_type=None, + disk_format='qcow2', + architecture='x86_64', + container_format='bare'): + """Download an image and upload it to glance, validate its status + and return an image object pointer. KVM defaults, can override for + LXD. + + :param glance: pointer to authenticated glance api connection :param image_name: display name for new image + :param image_url: url to retrieve + :param download_dir: directory to store downloaded image file + :param hypervisor_type: glance image hypervisor property + :param disk_format: glance image disk format + :param architecture: glance image architecture property + :param container_format: glance image container format :returns: glance image pointer """ - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) + self.log.debug('Creating glance image ({}) from ' + '{}...'.format(image_name, image_url)) - # Download cirros image + # Download image http_proxy = os.getenv('AMULET_HTTP_PROXY') self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: @@ -700,31 +712,34 @@ def create_cirros_image(self, glance, image_name): else: opener = urllib.FancyURLopener() - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - local_path = os.path.join('tests', cirros_img) - - if not os.path.exists(local_path): - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - opener.retrieve(cirros_url, local_path) - f.close() + abs_file_name = os.path.join(download_dir, image_name) + if not os.path.exists(abs_file_name): + opener.retrieve(image_url, abs_file_name) # Create glance image + glance_properties = { + 'architecture': architecture, + } + if hypervisor_type: + glance_properties['hypervisor_type'] = hypervisor_type + # Create glance image if float(glance.version) < 2.0: - with open(local_path) as fimage: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', - data=fimage) + with open(abs_file_name) as f: + image = glance.images.create( + name=image_name, + is_public=True, + disk_format=disk_format, + container_format=container_format, + properties=glance_properties, + data=f) else: image = glance.images.create( name=image_name, - disk_format="qcow2", visibility="public", - container_format="bare") - glance.images.upload(image.id, open(local_path, 'rb')) + disk_format=disk_format, + container_format=container_format) + glance.images.upload(image.id, open(abs_file_name, 'rb')) + glance.images.update(image.id, **glance_properties) # Wait for image to reach active status img_id = image.id @@ -753,15 +768,54 @@ def create_cirros_image(self, glance, image_name): val_img_stat, val_img_cfmt, val_img_dfmt)) if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == 'bare' \ - and val_img_dfmt == 'qcow2': + and val_img_pub is True and val_img_cfmt == container_format \ + and val_img_dfmt == disk_format: self.log.debug(msg_attr) else: - msg = ('Volume validation failed, {}'.format(msg_attr)) + msg = ('Image validation failed, {}'.format(msg_attr)) amulet.raise_status(amulet.FAIL, msg=msg) return image + def create_cirros_image(self, glance, image_name, hypervisor_type=None): + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :param hypervisor_type: glance image hypervisor property + :returns: glance image pointer + """ + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'glance_create_image instead of ' + 'create_cirros_image.') + + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Get cirros image URL + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open('http://download.cirros-cloud.net/version/released') + version = f.read().strip() + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', + version, cirros_img) + f.close() + + return self.glance_create_image( + glance, + image_name, + cirros_url, + hypervisor_type=hypervisor_type) + def delete_image(self, glance, image): """Delete the specified image.""" diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py index de853b53..3e078703 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -25,7 +25,9 @@ local_unit, network_get_primary_address, config, + related_units, relation_get, + relation_ids, unit_get, NoNetworkBinding, log, @@ -225,3 +227,49 @@ def process_certificates(service_name, relation_id, unit, create_ip_cert_links( ssl_dir, custom_hostname_link=custom_hostname_link) + + +def get_requests_for_local_unit(relation_name=None): + """Extract any certificates data targeted at this unit down relation_name. + + :param relation_name: str Name of relation to check for data. + :returns: List of bundles of certificates. + :rtype: List of dicts + """ + local_name = local_unit().replace('/', '_') + raw_certs_key = '{}.processed_requests'.format(local_name) + relation_name = relation_name or 'certificates' + bundles = [] + for rid in relation_ids(relation_name): + for unit in related_units(rid): + data = relation_get(rid=rid, unit=unit) + if data.get(raw_certs_key): + bundles.append({ + 'ca': data['ca'], + 'chain': data.get('chain'), + 'certs': json.loads(data[raw_certs_key])}) + return bundles + + +def get_bundle_for_cn(cn, relation_name=None): + """Extract certificates for the given cn. + + :param cn: str Canonical Name on certificate. + :param relation_name: str Relation to check for certificates down. + :returns: Dictionary of certificate data, + :rtype: dict. + """ + entries = get_requests_for_local_unit(relation_name) + cert_bundle = {} + for entry in entries: + for _cn, bundle in entry['certs'].items(): + if _cn == cn: + cert_bundle = { + 'cert': bundle['cert'], + 'key': bundle['key'], + 'chain': entry['chain'], + 'ca': entry['ca']} + break + if cert_bundle: + break + return cert_bundle diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 3e4e82a7..72084cb3 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -642,7 +642,7 @@ def __call__(self): return {} l_unit = local_unit().replace('/', '-') - cluster_hosts = {} + cluster_hosts = collections.OrderedDict() # NOTE(jamespage): build out map of configured network endpoints # and associated backends @@ -1534,10 +1534,15 @@ def __call__(self): if 'l2-population' in rdata: ctxt.update(self.get_neutron_options(rdata)) + extension_drivers = [] + if ctxt['enable_qos']: - ctxt['extension_drivers'] = 'qos' - else: - ctxt['extension_drivers'] = '' + extension_drivers.append('qos') + + if ctxt['enable_nsg_logging']: + extension_drivers.append('log') + + ctxt['extension_drivers'] = ','.join(extension_drivers) return ctxt @@ -1897,7 +1902,7 @@ class EnsureDirContext(OSContextGenerator): Some software requires a user to create a target directory to be scanned for drop-in files with a specific format. This is why this context is needed to do that before rendering a template. - ''' + ''' def __init__(self, dirname, **kwargs): '''Used merely to ensure that a given directory exists.''' @@ -1907,3 +1912,23 @@ def __init__(self, dirname, **kwargs): def __call__(self): mkdir(self.dirname, **self.kwargs) return {} + + +class VersionsContext(OSContextGenerator): + """Context to return the openstack and operating system versions. + + """ + def __init__(self, pkg='python-keystone'): + """Initialise context. + + :param pkg: Package to extrapolate openstack version from. + :type pkg: str + """ + self.pkg = pkg + + def __call__(self): + ostack = os_release(self.pkg, base='icehouse') + osystem = lsb_release()['DISTRIB_CODENAME'].lower() + return { + 'openstack_release': ostack, + 'operating_system_release': osystem} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py index 6060ae50..add8eb9a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -28,6 +28,7 @@ import re from charmhelpers.core.hookenv import ( + expected_related_units, log, relation_set, charm_name, @@ -110,12 +111,17 @@ def assert_charm_supports_dns_ha(): def expect_ha(): """ Determine if the unit expects to be in HA - Check for VIP or dns-ha settings which indicate the unit should expect to - be related to hacluster. + Check juju goal-state if ha relation is expected, check for VIP or dns-ha + settings which indicate the unit should expect to be related to hacluster. @returns boolean """ - return config('vip') or config('dns-ha') + ha_related_units = [] + try: + ha_related_units = list(expected_related_units(reltype='ha')) + except (NotImplementedError, KeyError): + pass + return len(ha_related_units) > 0 or config('vip') or config('dns-ha') def generate_ha_relation_data(service): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka index 8e6889e0..c281868b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka @@ -1,12 +1,14 @@ {% if auth_host -%} [keystone_authtoken] -auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} -auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} auth_type = password {% if api_version == "3" -%} +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v3 +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v3 project_domain_name = {{ admin_domain_name }} user_domain_name = {{ admin_domain_name }} {% else -%} +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} project_domain_name = default user_domain_name = default {% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index ae48d6b4..29cad083 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -375,7 +375,7 @@ def get_swift_codename(version): return codenames[0] # NOTE: fallback - attempt to match with just major.minor version - match = re.match('^(\d+)\.(\d+)', version) + match = re.match(r'^(\d+)\.(\d+)', version) if match: major_minor_version = match.group(0) for codename, versions in six.iteritems(SWIFT_CODENAMES): @@ -395,7 +395,7 @@ def get_os_codename_package(package, fatal=True): out = subprocess.check_output(cmd) if six.PY3: out = out.decode('UTF-8') - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: return None lines = out.split('\n') for line in lines: @@ -427,11 +427,11 @@ def get_os_codename_package(package, fatal=True): vers = apt.upstream_version(pkg.current_ver.ver_str) if 'swift' in pkg.name: # Fully x.y.z match for swift versions - match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) + match = re.match(r'^(\d+)\.(\d+)\.(\d+)', vers) else: # x.y match only for 20XX.X # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', vers) + match = re.match(r'^(\d+)\.(\d+)', vers) if match: vers = match.group(0) @@ -1450,20 +1450,33 @@ def some_hook(...): see core.utils.restart_on_change() for more details. + Note restart_map can be a callable, in which case, restart_map is only + evaluated at runtime. This means that it is lazy and the underlying + function won't be called if the decorated function is never called. Note, + retains backwards compatibility for passing a non-callable dictionary. + @param f: the function to decorate - @param restart_map: the restart map {conf_file: [services]} + @param restart_map: (optionally callable, which then returns the + restart_map) the restart map {conf_file: [services]} @param stopstart: DEFAULT false; whether to stop, start or just restart @returns decorator to use a restart_on_change with pausability """ def wrap(f): + # py27 compatible nonlocal variable. When py3 only, replace with + # nonlocal keyword + __restart_map_cache = {'cache': None} + @functools.wraps(f) def wrapped_f(*args, **kwargs): if is_unit_paused_set(): return f(*args, **kwargs) + if __restart_map_cache['cache'] is None: + __restart_map_cache['cache'] = restart_map() \ + if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) + (lambda: f(*args, **kwargs)), __restart_map_cache['cache'], + stopstart, restart_functions) return wrapped_f return wrap diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py index 1d6ae6f0..0dfdae52 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -39,7 +39,7 @@ def loopback_devices(): devs = [d.strip().split(' ') for d in check_output(cmd).splitlines() if d != ''] for dev, _, f in devs: - loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] + loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] return loopbacks diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 9abf2a45..2e287659 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -510,6 +510,67 @@ def related_units(relid=None): subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] +def expected_peer_units(): + """Get a generator for units we expect to join peer relation based on + goal-state. + + The local unit is excluded from the result to make it easy to gauge + completion of all peers joining the relation with existing hook tools. + + Example usage: + log('peer {} of {} joined peer relation' + .format(len(related_units()), + len(list(expected_peer_units())))) + + This function will raise NotImplementedError if used with juju versions + without goal-state support. + + :returns: iterator + :rtype: types.GeneratorType + :raises: NotImplementedError + """ + if not has_juju_version("2.4.0"): + # goal-state first appeared in 2.4.0. + raise NotImplementedError("goal-state") + _goal_state = goal_state() + return (key for key in _goal_state['units'] + if '/' in key and key != local_unit()) + + +def expected_related_units(reltype=None): + """Get a generator for units we expect to join relation based on + goal-state. + + Note that you can not use this function for the peer relation, take a look + at expected_peer_units() for that. + + This function will raise KeyError if you request information for a + relation type for which juju goal-state does not have information. It will + raise NotImplementedError if used with juju versions without goal-state + support. + + Example usage: + log('participant {} of {} joined relation {}' + .format(len(related_units()), + len(list(expected_related_units())), + relation_type())) + + :param reltype: Relation type to list data for, default is to list data for + the realtion type we are currently executing a hook for. + :type reltype: str + :returns: iterator + :rtype: types.GeneratorType + :raises: KeyError, NotImplementedError + """ + if not has_juju_version("2.4.4"): + # goal-state existed in 2.4.0, but did not list individual units to + # join a relation in 2.4.1 through 2.4.3. (LP: #1794739) + raise NotImplementedError("goal-state relation unit count") + reltype = reltype or relation_type() + _goal_state = goal_state() + return (key for key in _goal_state['relations'][reltype] if '/' in key) + + @cached def relation_for_unit(unit=None, rid=None): """Get the json represenation of a unit's relation""" @@ -998,6 +1059,7 @@ def application_version_set(version): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) +@cached def goal_state(): """Juju goal state values""" cmd = ['goal-state', '--format=json'] diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index e9fd38a0..79953a44 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -34,13 +34,13 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log, DEBUG, local_unit +from .hookenv import log, INFO, DEBUG, local_unit, charm_name from .fstab import Fstab from charmhelpers.osplatform import get_platform __platform__ = get_platform() if __platform__ == "ubuntu": - from charmhelpers.core.host_factory.ubuntu import ( + from charmhelpers.core.host_factory.ubuntu import ( # NOQA:F401 service_available, add_new_group, lsb_release, @@ -48,7 +48,7 @@ CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": - from charmhelpers.core.host_factory.centos import ( + from charmhelpers.core.host_factory.centos import ( # NOQA:F401 service_available, add_new_group, lsb_release, @@ -58,6 +58,7 @@ UPDATEDB_PATH = '/etc/updatedb.conf' + def service_start(service_name, **kwargs): """Start a system service. @@ -287,8 +288,8 @@ def service_running(service_name, **kwargs): for key, value in six.iteritems(kwargs): parameter = '%s=%s' % (key, value) cmd.append(parameter) - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT).decode('UTF-8') + output = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -442,7 +443,7 @@ def add_user_to_group(username, group): def chage(username, lastday=None, expiredate=None, inactive=None, - mindays=None, maxdays=None, root=None, warndays=None): + mindays=None, maxdays=None, root=None, warndays=None): """Change user password expiry information :param str username: User to update @@ -482,8 +483,10 @@ def chage(username, lastday=None, expiredate=None, inactive=None, cmd.append(username) subprocess.check_call(cmd) + remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -535,13 +538,15 @@ def write_file(path, content, owner='root', group='root', perms=0o444): # lets see if we can grab the file and compare the context, to avoid doing # a write. existing_content = None - existing_uid, existing_gid = None, None + existing_uid, existing_gid, existing_perms = None, None, None try: with open(path, 'rb') as target: existing_content = target.read() stat = os.stat(path) - existing_uid, existing_gid = stat.st_uid, stat.st_gid - except: + existing_uid, existing_gid, existing_perms = ( + stat.st_uid, stat.st_gid, stat.st_mode + ) + except Exception: pass if content != existing_content: log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), @@ -554,7 +559,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444): target.write(content) return # the contents were the same, but we might still need to change the - # ownership. + # ownership or permissions. if existing_uid != uid: log("Changing uid on already existing content: {} -> {}" .format(existing_uid, uid), level=DEBUG) @@ -563,6 +568,10 @@ def write_file(path, content, owner='root', group='root', perms=0o444): log("Changing gid on already existing content: {} -> {}" .format(existing_gid, gid), level=DEBUG) os.chown(path, -1, gid) + if existing_perms != perms: + log("Changing permissions on existing content: {} -> {}" + .format(existing_perms, perms), level=DEBUG) + os.chmod(path, perms) def fstab_remove(mp): @@ -827,7 +836,7 @@ def list_nics(nic_type=None): ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = (line.strip() for line in ip_output if line) - key = re.compile('^[0-9]+:\s+(.+):') + key = re.compile(r'^[0-9]+:\s+(.+):') for line in ip_output: matched = re.search(key, line) if matched: @@ -1040,3 +1049,27 @@ def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): return modulo * wait else: return calculated_wait_time + + +def install_ca_cert(ca_cert, name=None): + """ + Install the given cert as a trusted CA. + + The ``name`` is the stem of the filename where the cert is written, and if + not provided, it will default to ``juju-{charm_name}``. + + If the cert is empty or None, or is unchanged, nothing is done. + """ + if not ca_cert: + return + if not isinstance(ca_cert, bytes): + ca_cert = ca_cert.encode('utf8') + if not name: + name = 'juju-{}'.format(charm_name()) + cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) + new_hash = hashlib.md5(ca_cert).hexdigest() + if file_hash(cert_file) == new_hash: + return + log("Installing new CA cert at: {}".format(cert_file), level=INFO) + write_file(cert_file, ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/ceph-radosgw/hooks/charmhelpers/core/kernel.py b/ceph-radosgw/hooks/charmhelpers/core/kernel.py index 2d404528..e01f4f8b 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/kernel.py +++ b/ceph-radosgw/hooks/charmhelpers/core/kernel.py @@ -26,12 +26,12 @@ __platform__ = get_platform() if __platform__ == "ubuntu": - from charmhelpers.core.kernel_factory.ubuntu import ( + from charmhelpers.core.kernel_factory.ubuntu import ( # NOQA:F401 persistent_modprobe, update_initramfs, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": - from charmhelpers.core.kernel_factory.centos import ( + from charmhelpers.core.kernel_factory.centos import ( # NOQA:F401 persistent_modprobe, update_initramfs, ) # flake8: noqa -- ignore F401 for this import diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index ec08cbc2..c7ad128c 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -294,7 +294,7 @@ def apt_unhold(packages, fatal=False): def import_key(key): """Import an ASCII Armor key. - /!\ A Radix64 format keyid is also supported for backwards + A Radix64 format keyid is also supported for backwards compatibility, but should never be used; the key retrieval mechanism is insecure and subject to man-in-the-middle attacks voiding all signature checks using that key. @@ -454,6 +454,9 @@ def _add_apt_repository(spec): :param spec: the parameter to pass to add_apt_repository """ + if '{series}' in spec: + series = lsb_release()['DISTRIB_CODENAME'] + spec = spec.replace('{series}', series) _run_with_retries(['add-apt-repository', '--yes', spec]) diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 44d1f517..5b05ffae 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -65,6 +65,7 @@ def _add_services(self): {'name': 'ceph-osd', 'units': 3, 'storage': {'osd-devices': 'cinder,10G'}}, {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}}, + {'name': 'nova-cloud-controller'}, {'name': 'keystone'}, {'name': 'rabbitmq-server'}, {'name': 'nova-compute'}, @@ -81,6 +82,14 @@ def _add_relations(self): 'nova-compute:amqp': 'rabbitmq-server:amqp', 'nova-compute:image-service': 'glance:image-service', 'nova-compute:ceph': 'ceph-mon:client', + 'nova-cloud-controller:shared-db': 'percona-cluster:shared-db', + 'nova-cloud-controller:identity-service': 'keystone:' + 'identity-service', + 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp', + 'nova-cloud-controller:cloud-compute': 'nova-compute:' + 'cloud-compute', + 'nova-cloud-controller:image-service': 'glance:image-service', + 'keystone:shared-db': 'percona-cluster:shared-db', 'glance:shared-db': 'percona-cluster:shared-db', 'glance:identity-service': 'keystone:identity-service', @@ -118,11 +127,16 @@ def _configure_services(self): 'osd-devices': '/srv/ceph /dev/test-non-existent' } + nova_cc_config = {} + if self._get_openstack_release() >= self.xenial_ocata: + nova_cc_config['network-manager'] = 'Neutron' + configs = {'keystone': keystone_config, 'percona-cluster': pxc_config, 'cinder': cinder_config, 'ceph-mon': ceph_config, 'ceph-osd': ceph_osd_config, + 'nova-cloud-controller': nova_cc_config, } super(CephRadosGwBasicDeployment, self)._configure_services(configs) From 4ea4148a0f19edeec9ce5875a196f7435b4be69b Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 7 Nov 2018 15:33:34 -0600 Subject: [PATCH 1602/2699] Sync charm-helpers Change-Id: Ibccf221d130703949f5538d061cb3eb4f44eb677 --- ceph-proxy/charm-helpers-tests.yaml | 7 -- ceph-proxy/hooks/charmhelpers/__init__.py | 8 +-- .../contrib/hardening/apache/checks/config.py | 3 + .../contrib/hardening/audits/apache.py | 6 +- .../charmhelpers/contrib/hardening/harden.py | 18 ++++- .../charmhelpers/contrib/openstack/utils.py | 57 +++++++++++++--- .../contrib/storage/linux/loopback.py | 2 +- ceph-proxy/hooks/charmhelpers/core/hookenv.py | 65 ++++++++++++++++++- ceph-proxy/hooks/charmhelpers/core/host.py | 55 ++++++++++++---- ceph-proxy/hooks/charmhelpers/core/kernel.py | 4 +- .../hooks/charmhelpers/fetch/__init__.py | 2 + ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py | 4 +- ceph-proxy/hooks/charmhelpers/fetch/giturl.py | 4 +- ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py | 25 ++++++- 14 files changed, 217 insertions(+), 43 deletions(-) delete mode 100644 ceph-proxy/charm-helpers-tests.yaml diff --git a/ceph-proxy/charm-helpers-tests.yaml b/ceph-proxy/charm-helpers-tests.yaml deleted file mode 100644 index f64f0dde..00000000 --- a/ceph-proxy/charm-helpers-tests.yaml +++ /dev/null @@ -1,7 +0,0 @@ -repo: https://github.com/juju/charm-helpers -destination: tests/charmhelpers -include: - - contrib.amulet - - contrib.openstack.amulet - - core - - osplatform diff --git a/ceph-proxy/hooks/charmhelpers/__init__.py b/ceph-proxy/hooks/charmhelpers/__init__.py index e7aa4715..61ef9071 100644 --- a/ceph-proxy/hooks/charmhelpers/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/__init__.py @@ -23,22 +23,22 @@ import sys try: - import six # flake8: noqa + import six # NOQA:F401 except ImportError: if sys.version_info.major == 2: subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa + import six # NOQA:F401 try: - import yaml # flake8: noqa + import yaml # NOQA:F401 except ImportError: if sys.version_info.major == 2: subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa + import yaml # NOQA:F401 # Holds a list of mapping of mangled function names that have been deprecated diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 06482aac..341da9ee 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -14,6 +14,7 @@ import os import re +import six import subprocess @@ -95,6 +96,8 @@ def __call__(self): ctxt = settings['hardening'] out = subprocess.check_output(['apache2', '-v']) + if six.PY3: + out = out.decode('utf-8') ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', out).group(1) ctxt['apache_icondir'] = '/usr/share/apache2/icons/' diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py index d32bf44e..04825f5a 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -15,7 +15,7 @@ import re import subprocess -from six import string_types +import six from charmhelpers.core.hookenv import ( log, @@ -35,7 +35,7 @@ class DisabledModuleAudit(BaseAudit): def __init__(self, modules): if modules is None: self.modules = [] - elif isinstance(modules, string_types): + elif isinstance(modules, six.string_types): self.modules = [modules] else: self.modules = modules @@ -69,6 +69,8 @@ def ensure_compliance(self): def _get_loaded_modules(): """Returns the modules which are enabled in Apache.""" output = subprocess.check_output(['apache2ctl', '-M']) + if six.PY3: + output = output.decode('utf-8') modules = [] for line in output.splitlines(): # Each line of the enabled module output looks like: diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-proxy/hooks/charmhelpers/contrib/hardening/harden.py index b55764cd..63f21b9c 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/hardening/harden.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/hardening/harden.py @@ -27,6 +27,8 @@ from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks from charmhelpers.contrib.hardening.apache.checks import run_apache_checks +_DISABLE_HARDENING_FOR_UNIT_TEST = False + def harden(overrides=None): """Hardening decorator. @@ -47,16 +49,28 @@ def harden(overrides=None): provided with 'harden' config. :returns: Returns value returned by decorated function once executed. """ + if overrides is None: + overrides = [] + def _harden_inner1(f): - log("Hardening function '%s'" % (f.__name__), level=DEBUG) + # As this has to be py2.7 compat, we can't use nonlocal. Use a trick + # to capture the dictionary that can then be updated. + _logged = {'done': False} def _harden_inner2(*args, **kwargs): + # knock out hardening via a config var; normally it won't get + # disabled. + if _DISABLE_HARDENING_FOR_UNIT_TEST: + return f(*args, **kwargs) + if not _logged['done']: + log("Hardening function '%s'" % (f.__name__), level=DEBUG) + _logged['done'] = True RUN_CATALOG = OrderedDict([('os', run_os_checks), ('ssh', run_ssh_checks), ('mysql', run_mysql_checks), ('apache', run_apache_checks)]) - enabled = overrides or (config("harden") or "").split() + enabled = overrides[:] or (config("harden") or "").split() if enabled: modules_to_run = [] # modules will always be performed in the following order diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index 24f5b808..29cad083 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -186,7 +186,7 @@ ('queens', ['2.16.0', '2.17.0']), ('rocky', - ['2.18.0']), + ['2.18.0', '2.19.0']), ]) # >= Liberty version->codename mapping @@ -375,7 +375,7 @@ def get_swift_codename(version): return codenames[0] # NOTE: fallback - attempt to match with just major.minor version - match = re.match('^(\d+)\.(\d+)', version) + match = re.match(r'^(\d+)\.(\d+)', version) if match: major_minor_version = match.group(0) for codename, versions in six.iteritems(SWIFT_CODENAMES): @@ -395,7 +395,7 @@ def get_os_codename_package(package, fatal=True): out = subprocess.check_output(cmd) if six.PY3: out = out.decode('UTF-8') - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: return None lines = out.split('\n') for line in lines: @@ -427,11 +427,11 @@ def get_os_codename_package(package, fatal=True): vers = apt.upstream_version(pkg.current_ver.ver_str) if 'swift' in pkg.name: # Fully x.y.z match for swift versions - match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) + match = re.match(r'^(\d+)\.(\d+)\.(\d+)', vers) else: # x.y match only for 20XX.X # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', vers) + match = re.match(r'^(\d+)\.(\d+)', vers) if match: vers = match.group(0) @@ -1450,20 +1450,33 @@ def some_hook(...): see core.utils.restart_on_change() for more details. + Note restart_map can be a callable, in which case, restart_map is only + evaluated at runtime. This means that it is lazy and the underlying + function won't be called if the decorated function is never called. Note, + retains backwards compatibility for passing a non-callable dictionary. + @param f: the function to decorate - @param restart_map: the restart map {conf_file: [services]} + @param restart_map: (optionally callable, which then returns the + restart_map) the restart map {conf_file: [services]} @param stopstart: DEFAULT false; whether to stop, start or just restart @returns decorator to use a restart_on_change with pausability """ def wrap(f): + # py27 compatible nonlocal variable. When py3 only, replace with + # nonlocal keyword + __restart_map_cache = {'cache': None} + @functools.wraps(f) def wrapped_f(*args, **kwargs): if is_unit_paused_set(): return f(*args, **kwargs) + if __restart_map_cache['cache'] is None: + __restart_map_cache['cache'] = restart_map() \ + if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) + (lambda: f(*args, **kwargs)), __restart_map_cache['cache'], + stopstart, restart_functions) return wrapped_f return wrap @@ -1733,3 +1746,31 @@ def is_unit_upgrading_set(): return not(not(kv.get('unit-upgrading'))) except Exception: return False + + +def series_upgrade_prepare(pause_unit_helper=None, configs=None): + """ Run common series upgrade prepare tasks. + + :param pause_unit_helper: function: Function to pause unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + set_unit_upgrading() + if pause_unit_helper and configs: + if not is_unit_paused_set(): + pause_unit_helper(configs) + + +def series_upgrade_complete(resume_unit_helper=None, configs=None): + """ Run common series upgrade complete tasks. + + :param resume_unit_helper: function: Function to resume unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + clear_unit_paused() + clear_unit_upgrading() + if configs: + configs.write_all() + if resume_unit_helper: + resume_unit_helper(configs) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/loopback.py index 1d6ae6f0..0dfdae52 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -39,7 +39,7 @@ def loopback_devices(): devs = [d.strip().split(' ') for d in check_output(cmd).splitlines() if d != ''] for dev, _, f in devs: - loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] + loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] return loopbacks diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/hooks/charmhelpers/core/hookenv.py index 68800074..2e287659 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/hooks/charmhelpers/core/hookenv.py @@ -48,6 +48,7 @@ DEBUG = "DEBUG" TRACE = "TRACE" MARKER = object() +SH_MAX_ARG = 131071 cache = {} @@ -98,7 +99,7 @@ def log(message, level=None): command += ['-l', level] if not isinstance(message, six.string_types): message = repr(message) - command += [message] + command += [message[:SH_MAX_ARG]] # Missing juju-log should not cause failures in unit tests # Send log output to stderr try: @@ -509,6 +510,67 @@ def related_units(relid=None): subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] +def expected_peer_units(): + """Get a generator for units we expect to join peer relation based on + goal-state. + + The local unit is excluded from the result to make it easy to gauge + completion of all peers joining the relation with existing hook tools. + + Example usage: + log('peer {} of {} joined peer relation' + .format(len(related_units()), + len(list(expected_peer_units())))) + + This function will raise NotImplementedError if used with juju versions + without goal-state support. + + :returns: iterator + :rtype: types.GeneratorType + :raises: NotImplementedError + """ + if not has_juju_version("2.4.0"): + # goal-state first appeared in 2.4.0. + raise NotImplementedError("goal-state") + _goal_state = goal_state() + return (key for key in _goal_state['units'] + if '/' in key and key != local_unit()) + + +def expected_related_units(reltype=None): + """Get a generator for units we expect to join relation based on + goal-state. + + Note that you can not use this function for the peer relation, take a look + at expected_peer_units() for that. + + This function will raise KeyError if you request information for a + relation type for which juju goal-state does not have information. It will + raise NotImplementedError if used with juju versions without goal-state + support. + + Example usage: + log('participant {} of {} joined relation {}' + .format(len(related_units()), + len(list(expected_related_units())), + relation_type())) + + :param reltype: Relation type to list data for, default is to list data for + the realtion type we are currently executing a hook for. + :type reltype: str + :returns: iterator + :rtype: types.GeneratorType + :raises: KeyError, NotImplementedError + """ + if not has_juju_version("2.4.4"): + # goal-state existed in 2.4.0, but did not list individual units to + # join a relation in 2.4.1 through 2.4.3. (LP: #1794739) + raise NotImplementedError("goal-state relation unit count") + reltype = reltype or relation_type() + _goal_state = goal_state() + return (key for key in _goal_state['relations'][reltype] if '/' in key) + + @cached def relation_for_unit(unit=None, rid=None): """Get the json represenation of a unit's relation""" @@ -997,6 +1059,7 @@ def application_version_set(version): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) +@cached def goal_state(): """Juju goal state values""" cmd = ['goal-state', '--format=json'] diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index e9fd38a0..79953a44 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -34,13 +34,13 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log, DEBUG, local_unit +from .hookenv import log, INFO, DEBUG, local_unit, charm_name from .fstab import Fstab from charmhelpers.osplatform import get_platform __platform__ = get_platform() if __platform__ == "ubuntu": - from charmhelpers.core.host_factory.ubuntu import ( + from charmhelpers.core.host_factory.ubuntu import ( # NOQA:F401 service_available, add_new_group, lsb_release, @@ -48,7 +48,7 @@ CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": - from charmhelpers.core.host_factory.centos import ( + from charmhelpers.core.host_factory.centos import ( # NOQA:F401 service_available, add_new_group, lsb_release, @@ -58,6 +58,7 @@ UPDATEDB_PATH = '/etc/updatedb.conf' + def service_start(service_name, **kwargs): """Start a system service. @@ -287,8 +288,8 @@ def service_running(service_name, **kwargs): for key, value in six.iteritems(kwargs): parameter = '%s=%s' % (key, value) cmd.append(parameter) - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT).decode('UTF-8') + output = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -442,7 +443,7 @@ def add_user_to_group(username, group): def chage(username, lastday=None, expiredate=None, inactive=None, - mindays=None, maxdays=None, root=None, warndays=None): + mindays=None, maxdays=None, root=None, warndays=None): """Change user password expiry information :param str username: User to update @@ -482,8 +483,10 @@ def chage(username, lastday=None, expiredate=None, inactive=None, cmd.append(username) subprocess.check_call(cmd) + remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -535,13 +538,15 @@ def write_file(path, content, owner='root', group='root', perms=0o444): # lets see if we can grab the file and compare the context, to avoid doing # a write. existing_content = None - existing_uid, existing_gid = None, None + existing_uid, existing_gid, existing_perms = None, None, None try: with open(path, 'rb') as target: existing_content = target.read() stat = os.stat(path) - existing_uid, existing_gid = stat.st_uid, stat.st_gid - except: + existing_uid, existing_gid, existing_perms = ( + stat.st_uid, stat.st_gid, stat.st_mode + ) + except Exception: pass if content != existing_content: log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), @@ -554,7 +559,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444): target.write(content) return # the contents were the same, but we might still need to change the - # ownership. + # ownership or permissions. if existing_uid != uid: log("Changing uid on already existing content: {} -> {}" .format(existing_uid, uid), level=DEBUG) @@ -563,6 +568,10 @@ def write_file(path, content, owner='root', group='root', perms=0o444): log("Changing gid on already existing content: {} -> {}" .format(existing_gid, gid), level=DEBUG) os.chown(path, -1, gid) + if existing_perms != perms: + log("Changing permissions on existing content: {} -> {}" + .format(existing_perms, perms), level=DEBUG) + os.chmod(path, perms) def fstab_remove(mp): @@ -827,7 +836,7 @@ def list_nics(nic_type=None): ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = (line.strip() for line in ip_output if line) - key = re.compile('^[0-9]+:\s+(.+):') + key = re.compile(r'^[0-9]+:\s+(.+):') for line in ip_output: matched = re.search(key, line) if matched: @@ -1040,3 +1049,27 @@ def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): return modulo * wait else: return calculated_wait_time + + +def install_ca_cert(ca_cert, name=None): + """ + Install the given cert as a trusted CA. + + The ``name`` is the stem of the filename where the cert is written, and if + not provided, it will default to ``juju-{charm_name}``. + + If the cert is empty or None, or is unchanged, nothing is done. + """ + if not ca_cert: + return + if not isinstance(ca_cert, bytes): + ca_cert = ca_cert.encode('utf8') + if not name: + name = 'juju-{}'.format(charm_name()) + cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) + new_hash = hashlib.md5(ca_cert).hexdigest() + if file_hash(cert_file) == new_hash: + return + log("Installing new CA cert at: {}".format(cert_file), level=INFO) + write_file(cert_file, ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/ceph-proxy/hooks/charmhelpers/core/kernel.py b/ceph-proxy/hooks/charmhelpers/core/kernel.py index 2d404528..e01f4f8b 100644 --- a/ceph-proxy/hooks/charmhelpers/core/kernel.py +++ b/ceph-proxy/hooks/charmhelpers/core/kernel.py @@ -26,12 +26,12 @@ __platform__ = get_platform() if __platform__ == "ubuntu": - from charmhelpers.core.kernel_factory.ubuntu import ( + from charmhelpers.core.kernel_factory.ubuntu import ( # NOQA:F401 persistent_modprobe, update_initramfs, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": - from charmhelpers.core.kernel_factory.centos import ( + from charmhelpers.core.kernel_factory.centos import ( # NOQA:F401 persistent_modprobe, update_initramfs, ) # flake8: noqa -- ignore F401 for this import diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py index 480a6276..8572d34f 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/__init__.py @@ -84,6 +84,7 @@ def base_url(self, url): fetch = importlib.import_module(module) filter_installed_packages = fetch.filter_installed_packages +filter_missing_packages = fetch.filter_missing_packages install = fetch.apt_install upgrade = fetch.apt_upgrade update = _fetch_update = fetch.apt_update @@ -96,6 +97,7 @@ def base_url(self, url): apt_update = fetch.apt_update apt_upgrade = fetch.apt_upgrade apt_purge = fetch.apt_purge + apt_autoremove = fetch.apt_autoremove apt_mark = fetch.apt_mark apt_hold = fetch.apt_hold apt_unhold = fetch.apt_unhold diff --git a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py index 07cd0293..c4ab3ff1 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py @@ -13,7 +13,7 @@ # limitations under the License. import os -from subprocess import check_call +from subprocess import STDOUT, check_output from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, @@ -55,7 +55,7 @@ def branch(self, source, dest, revno=None): cmd = ['bzr', 'branch'] cmd += cmd_opts cmd += [source, dest] - check_call(cmd) + check_output(cmd, stderr=STDOUT) def install(self, source, dest=None, revno=None): url_parts = self.parse_url(source) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py index 4cf21bc2..070ca9bb 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/giturl.py @@ -13,7 +13,7 @@ # limitations under the License. import os -from subprocess import check_call, CalledProcessError +from subprocess import check_output, CalledProcessError, STDOUT from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, @@ -50,7 +50,7 @@ def clone(self, source, dest, branch="master", depth=None): cmd = ['git', 'clone', source, dest, '--branch', branch] if depth: cmd.extend(['--depth', depth]) - check_call(cmd) + check_output(cmd, stderr=STDOUT) def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) diff --git a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py index 19aa6baf..c7ad128c 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py @@ -189,6 +189,18 @@ def filter_installed_packages(packages): return _pkgs +def filter_missing_packages(packages): + """Return a list of packages that are installed. + + :param packages: list of packages to evaluate. + :returns list: Packages that are installed. + """ + return list( + set(packages) - + set(filter_installed_packages(packages)) + ) + + def apt_cache(in_memory=True, progress=None): """Build and return an apt cache.""" from apt import apt_pkg @@ -248,6 +260,14 @@ def apt_purge(packages, fatal=False): _run_apt_command(cmd, fatal) +def apt_autoremove(purge=True, fatal=False): + """Purge one or more packages.""" + cmd = ['apt-get', '--assume-yes', 'autoremove'] + if purge: + cmd.append('--purge') + _run_apt_command(cmd, fatal) + + def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark.""" log("Marking {} as {}".format(packages, mark)) @@ -274,7 +294,7 @@ def apt_unhold(packages, fatal=False): def import_key(key): """Import an ASCII Armor key. - /!\ A Radix64 format keyid is also supported for backwards + A Radix64 format keyid is also supported for backwards compatibility, but should never be used; the key retrieval mechanism is insecure and subject to man-in-the-middle attacks voiding all signature checks using that key. @@ -434,6 +454,9 @@ def _add_apt_repository(spec): :param spec: the parameter to pass to add_apt_repository """ + if '{series}' in spec: + series = lsb_release()['DISTRIB_CODENAME'] + spec = spec.replace('{series}', series) _run_with_retries(['add-apt-repository', '--yes', spec]) From d15580498917d0ae33266f33a1cbbec08c989630 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 7 Nov 2018 15:33:19 -0600 Subject: [PATCH 1603/2699] Sync charm-helpers Change-Id: I0d330cb48f49a0dd61163901df912f5e864e8c12 --- ceph-mon/hooks/charmhelpers/__init__.py | 8 +- .../contrib/hardening/apache/checks/config.py | 3 + .../contrib/hardening/audits/apache.py | 6 +- .../charmhelpers/contrib/hardening/harden.py | 18 ++- .../contrib/openstack/amulet/utils.py | 116 +++++++++++++----- .../contrib/openstack/cert_utils.py | 48 ++++++++ .../charmhelpers/contrib/openstack/context.py | 39 +++++- .../contrib/openstack/ha/utils.py | 12 +- .../charmhelpers/contrib/openstack/utils.py | 57 +++++++-- .../contrib/storage/linux/loopback.py | 2 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 65 +++++++++- ceph-mon/hooks/charmhelpers/core/host.py | 55 +++++++-- ceph-mon/hooks/charmhelpers/core/kernel.py | 4 +- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 2 + ceph-mon/hooks/charmhelpers/fetch/bzrurl.py | 4 +- ceph-mon/hooks/charmhelpers/fetch/giturl.py | 4 +- ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 25 +++- ceph-mon/tests/basic_deployment.py | 13 +- 18 files changed, 405 insertions(+), 76 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/__init__.py b/ceph-mon/hooks/charmhelpers/__init__.py index e7aa4715..61ef9071 100644 --- a/ceph-mon/hooks/charmhelpers/__init__.py +++ b/ceph-mon/hooks/charmhelpers/__init__.py @@ -23,22 +23,22 @@ import sys try: - import six # flake8: noqa + import six # NOQA:F401 except ImportError: if sys.version_info.major == 2: subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa + import six # NOQA:F401 try: - import yaml # flake8: noqa + import yaml # NOQA:F401 except ImportError: if sys.version_info.major == 2: subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa + import yaml # NOQA:F401 # Holds a list of mapping of mangled function names that have been deprecated diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 06482aac..341da9ee 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -14,6 +14,7 @@ import os import re +import six import subprocess @@ -95,6 +96,8 @@ def __call__(self): ctxt = settings['hardening'] out = subprocess.check_output(['apache2', '-v']) + if six.PY3: + out = out.decode('utf-8') ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', out).group(1) ctxt['apache_icondir'] = '/usr/share/apache2/icons/' diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py index d32bf44e..04825f5a 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -15,7 +15,7 @@ import re import subprocess -from six import string_types +import six from charmhelpers.core.hookenv import ( log, @@ -35,7 +35,7 @@ class DisabledModuleAudit(BaseAudit): def __init__(self, modules): if modules is None: self.modules = [] - elif isinstance(modules, string_types): + elif isinstance(modules, six.string_types): self.modules = [modules] else: self.modules = modules @@ -69,6 +69,8 @@ def ensure_compliance(self): def _get_loaded_modules(): """Returns the modules which are enabled in Apache.""" output = subprocess.check_output(['apache2ctl', '-M']) + if six.PY3: + output = output.decode('utf-8') modules = [] for line in output.splitlines(): # Each line of the enabled module output looks like: diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py index b55764cd..63f21b9c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py @@ -27,6 +27,8 @@ from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks from charmhelpers.contrib.hardening.apache.checks import run_apache_checks +_DISABLE_HARDENING_FOR_UNIT_TEST = False + def harden(overrides=None): """Hardening decorator. @@ -47,16 +49,28 @@ def harden(overrides=None): provided with 'harden' config. :returns: Returns value returned by decorated function once executed. """ + if overrides is None: + overrides = [] + def _harden_inner1(f): - log("Hardening function '%s'" % (f.__name__), level=DEBUG) + # As this has to be py2.7 compat, we can't use nonlocal. Use a trick + # to capture the dictionary that can then be updated. + _logged = {'done': False} def _harden_inner2(*args, **kwargs): + # knock out hardening via a config var; normally it won't get + # disabled. + if _DISABLE_HARDENING_FOR_UNIT_TEST: + return f(*args, **kwargs) + if not _logged['done']: + log("Hardening function '%s'" % (f.__name__), level=DEBUG) + _logged['done'] = True RUN_CATALOG = OrderedDict([('os', run_os_checks), ('ssh', run_ssh_checks), ('mysql', run_mysql_checks), ('apache', run_apache_checks)]) - enabled = overrides or (config("harden") or "").split() + enabled = overrides[:] or (config("harden") or "").split() if enabled: modules_to_run = [] # modules will always be performed in the following order diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 936b4036..9133e9b3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -618,12 +618,12 @@ def authenticate_keystone_user(self, keystone, user, password, tenant): return self.authenticate_keystone(keystone_ip, user, password, project_name=tenant) - def authenticate_glance_admin(self, keystone): + def authenticate_glance_admin(self, keystone, force_v1_client=False): """Authenticates admin user with glance.""" self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', interface='adminURL') - if keystone.session: + if not force_v1_client and keystone.session: return glance_clientv2.Client("2", session=keystone.session) else: return glance_client.Client(ep, token=keystone.auth_token) @@ -680,18 +680,30 @@ def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", nova.flavors.create(name, ram, vcpus, disk, flavorid, ephemeral, swap, rxtx_factor, is_public) - def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection + def glance_create_image(self, glance, image_name, image_url, + download_dir='tests', + hypervisor_type=None, + disk_format='qcow2', + architecture='x86_64', + container_format='bare'): + """Download an image and upload it to glance, validate its status + and return an image object pointer. KVM defaults, can override for + LXD. + + :param glance: pointer to authenticated glance api connection :param image_name: display name for new image + :param image_url: url to retrieve + :param download_dir: directory to store downloaded image file + :param hypervisor_type: glance image hypervisor property + :param disk_format: glance image disk format + :param architecture: glance image architecture property + :param container_format: glance image container format :returns: glance image pointer """ - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) + self.log.debug('Creating glance image ({}) from ' + '{}...'.format(image_name, image_url)) - # Download cirros image + # Download image http_proxy = os.getenv('AMULET_HTTP_PROXY') self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: @@ -700,31 +712,34 @@ def create_cirros_image(self, glance, image_name): else: opener = urllib.FancyURLopener() - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - local_path = os.path.join('tests', cirros_img) - - if not os.path.exists(local_path): - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - opener.retrieve(cirros_url, local_path) - f.close() + abs_file_name = os.path.join(download_dir, image_name) + if not os.path.exists(abs_file_name): + opener.retrieve(image_url, abs_file_name) # Create glance image + glance_properties = { + 'architecture': architecture, + } + if hypervisor_type: + glance_properties['hypervisor_type'] = hypervisor_type + # Create glance image if float(glance.version) < 2.0: - with open(local_path) as fimage: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', - data=fimage) + with open(abs_file_name) as f: + image = glance.images.create( + name=image_name, + is_public=True, + disk_format=disk_format, + container_format=container_format, + properties=glance_properties, + data=f) else: image = glance.images.create( name=image_name, - disk_format="qcow2", visibility="public", - container_format="bare") - glance.images.upload(image.id, open(local_path, 'rb')) + disk_format=disk_format, + container_format=container_format) + glance.images.upload(image.id, open(abs_file_name, 'rb')) + glance.images.update(image.id, **glance_properties) # Wait for image to reach active status img_id = image.id @@ -753,15 +768,54 @@ def create_cirros_image(self, glance, image_name): val_img_stat, val_img_cfmt, val_img_dfmt)) if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == 'bare' \ - and val_img_dfmt == 'qcow2': + and val_img_pub is True and val_img_cfmt == container_format \ + and val_img_dfmt == disk_format: self.log.debug(msg_attr) else: - msg = ('Volume validation failed, {}'.format(msg_attr)) + msg = ('Image validation failed, {}'.format(msg_attr)) amulet.raise_status(amulet.FAIL, msg=msg) return image + def create_cirros_image(self, glance, image_name, hypervisor_type=None): + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :param hypervisor_type: glance image hypervisor property + :returns: glance image pointer + """ + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'glance_create_image instead of ' + 'create_cirros_image.') + + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Get cirros image URL + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open('http://download.cirros-cloud.net/version/released') + version = f.read().strip() + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', + version, cirros_img) + f.close() + + return self.glance_create_image( + glance, + image_name, + cirros_url, + hypervisor_type=hypervisor_type) + def delete_image(self, glance, image): """Delete the specified image.""" diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py index de853b53..3e078703 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -25,7 +25,9 @@ local_unit, network_get_primary_address, config, + related_units, relation_get, + relation_ids, unit_get, NoNetworkBinding, log, @@ -225,3 +227,49 @@ def process_certificates(service_name, relation_id, unit, create_ip_cert_links( ssl_dir, custom_hostname_link=custom_hostname_link) + + +def get_requests_for_local_unit(relation_name=None): + """Extract any certificates data targeted at this unit down relation_name. + + :param relation_name: str Name of relation to check for data. + :returns: List of bundles of certificates. + :rtype: List of dicts + """ + local_name = local_unit().replace('/', '_') + raw_certs_key = '{}.processed_requests'.format(local_name) + relation_name = relation_name or 'certificates' + bundles = [] + for rid in relation_ids(relation_name): + for unit in related_units(rid): + data = relation_get(rid=rid, unit=unit) + if data.get(raw_certs_key): + bundles.append({ + 'ca': data['ca'], + 'chain': data.get('chain'), + 'certs': json.loads(data[raw_certs_key])}) + return bundles + + +def get_bundle_for_cn(cn, relation_name=None): + """Extract certificates for the given cn. + + :param cn: str Canonical Name on certificate. + :param relation_name: str Relation to check for certificates down. + :returns: Dictionary of certificate data, + :rtype: dict. + """ + entries = get_requests_for_local_unit(relation_name) + cert_bundle = {} + for entry in entries: + for _cn, bundle in entry['certs'].items(): + if _cn == cn: + cert_bundle = { + 'cert': bundle['cert'], + 'key': bundle['key'], + 'chain': entry['chain'], + 'ca': entry['ca']} + break + if cert_bundle: + break + return cert_bundle diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index ca913961..72084cb3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -642,7 +642,7 @@ def __call__(self): return {} l_unit = local_unit().replace('/', '-') - cluster_hosts = {} + cluster_hosts = collections.OrderedDict() # NOTE(jamespage): build out map of configured network endpoints # and associated backends @@ -1519,6 +1519,10 @@ def __call__(self): 'rel_key': 'enable-qos', 'default': False, }, + 'enable_nsg_logging': { + 'rel_key': 'enable-nsg-logging', + 'default': False, + }, } ctxt = self.get_neutron_options({}) for rid in relation_ids('neutron-plugin-api'): @@ -1530,10 +1534,15 @@ def __call__(self): if 'l2-population' in rdata: ctxt.update(self.get_neutron_options(rdata)) + extension_drivers = [] + if ctxt['enable_qos']: - ctxt['extension_drivers'] = 'qos' - else: - ctxt['extension_drivers'] = '' + extension_drivers.append('qos') + + if ctxt['enable_nsg_logging']: + extension_drivers.append('log') + + ctxt['extension_drivers'] = ','.join(extension_drivers) return ctxt @@ -1893,7 +1902,7 @@ class EnsureDirContext(OSContextGenerator): Some software requires a user to create a target directory to be scanned for drop-in files with a specific format. This is why this context is needed to do that before rendering a template. - ''' + ''' def __init__(self, dirname, **kwargs): '''Used merely to ensure that a given directory exists.''' @@ -1903,3 +1912,23 @@ def __init__(self, dirname, **kwargs): def __call__(self): mkdir(self.dirname, **self.kwargs) return {} + + +class VersionsContext(OSContextGenerator): + """Context to return the openstack and operating system versions. + + """ + def __init__(self, pkg='python-keystone'): + """Initialise context. + + :param pkg: Package to extrapolate openstack version from. + :type pkg: str + """ + self.pkg = pkg + + def __call__(self): + ostack = os_release(self.pkg, base='icehouse') + osystem = lsb_release()['DISTRIB_CODENAME'].lower() + return { + 'openstack_release': ostack, + 'operating_system_release': osystem} diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py index 6060ae50..add8eb9a 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -28,6 +28,7 @@ import re from charmhelpers.core.hookenv import ( + expected_related_units, log, relation_set, charm_name, @@ -110,12 +111,17 @@ def assert_charm_supports_dns_ha(): def expect_ha(): """ Determine if the unit expects to be in HA - Check for VIP or dns-ha settings which indicate the unit should expect to - be related to hacluster. + Check juju goal-state if ha relation is expected, check for VIP or dns-ha + settings which indicate the unit should expect to be related to hacluster. @returns boolean """ - return config('vip') or config('dns-ha') + ha_related_units = [] + try: + ha_related_units = list(expected_related_units(reltype='ha')) + except (NotImplementedError, KeyError): + pass + return len(ha_related_units) > 0 or config('vip') or config('dns-ha') def generate_ha_relation_data(service): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 24f5b808..29cad083 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -186,7 +186,7 @@ ('queens', ['2.16.0', '2.17.0']), ('rocky', - ['2.18.0']), + ['2.18.0', '2.19.0']), ]) # >= Liberty version->codename mapping @@ -375,7 +375,7 @@ def get_swift_codename(version): return codenames[0] # NOTE: fallback - attempt to match with just major.minor version - match = re.match('^(\d+)\.(\d+)', version) + match = re.match(r'^(\d+)\.(\d+)', version) if match: major_minor_version = match.group(0) for codename, versions in six.iteritems(SWIFT_CODENAMES): @@ -395,7 +395,7 @@ def get_os_codename_package(package, fatal=True): out = subprocess.check_output(cmd) if six.PY3: out = out.decode('UTF-8') - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: return None lines = out.split('\n') for line in lines: @@ -427,11 +427,11 @@ def get_os_codename_package(package, fatal=True): vers = apt.upstream_version(pkg.current_ver.ver_str) if 'swift' in pkg.name: # Fully x.y.z match for swift versions - match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) + match = re.match(r'^(\d+)\.(\d+)\.(\d+)', vers) else: # x.y match only for 20XX.X # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', vers) + match = re.match(r'^(\d+)\.(\d+)', vers) if match: vers = match.group(0) @@ -1450,20 +1450,33 @@ def some_hook(...): see core.utils.restart_on_change() for more details. + Note restart_map can be a callable, in which case, restart_map is only + evaluated at runtime. This means that it is lazy and the underlying + function won't be called if the decorated function is never called. Note, + retains backwards compatibility for passing a non-callable dictionary. + @param f: the function to decorate - @param restart_map: the restart map {conf_file: [services]} + @param restart_map: (optionally callable, which then returns the + restart_map) the restart map {conf_file: [services]} @param stopstart: DEFAULT false; whether to stop, start or just restart @returns decorator to use a restart_on_change with pausability """ def wrap(f): + # py27 compatible nonlocal variable. When py3 only, replace with + # nonlocal keyword + __restart_map_cache = {'cache': None} + @functools.wraps(f) def wrapped_f(*args, **kwargs): if is_unit_paused_set(): return f(*args, **kwargs) + if __restart_map_cache['cache'] is None: + __restart_map_cache['cache'] = restart_map() \ + if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) + (lambda: f(*args, **kwargs)), __restart_map_cache['cache'], + stopstart, restart_functions) return wrapped_f return wrap @@ -1733,3 +1746,31 @@ def is_unit_upgrading_set(): return not(not(kv.get('unit-upgrading'))) except Exception: return False + + +def series_upgrade_prepare(pause_unit_helper=None, configs=None): + """ Run common series upgrade prepare tasks. + + :param pause_unit_helper: function: Function to pause unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + set_unit_upgrading() + if pause_unit_helper and configs: + if not is_unit_paused_set(): + pause_unit_helper(configs) + + +def series_upgrade_complete(resume_unit_helper=None, configs=None): + """ Run common series upgrade complete tasks. + + :param resume_unit_helper: function: Function to resume unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + clear_unit_paused() + clear_unit_upgrading() + if configs: + configs.write_all() + if resume_unit_helper: + resume_unit_helper(configs) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py index 1d6ae6f0..0dfdae52 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -39,7 +39,7 @@ def loopback_devices(): devs = [d.strip().split(' ') for d in check_output(cmd).splitlines() if d != ''] for dev, _, f in devs: - loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] + loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] return loopbacks diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 68800074..2e287659 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -48,6 +48,7 @@ DEBUG = "DEBUG" TRACE = "TRACE" MARKER = object() +SH_MAX_ARG = 131071 cache = {} @@ -98,7 +99,7 @@ def log(message, level=None): command += ['-l', level] if not isinstance(message, six.string_types): message = repr(message) - command += [message] + command += [message[:SH_MAX_ARG]] # Missing juju-log should not cause failures in unit tests # Send log output to stderr try: @@ -509,6 +510,67 @@ def related_units(relid=None): subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] +def expected_peer_units(): + """Get a generator for units we expect to join peer relation based on + goal-state. + + The local unit is excluded from the result to make it easy to gauge + completion of all peers joining the relation with existing hook tools. + + Example usage: + log('peer {} of {} joined peer relation' + .format(len(related_units()), + len(list(expected_peer_units())))) + + This function will raise NotImplementedError if used with juju versions + without goal-state support. + + :returns: iterator + :rtype: types.GeneratorType + :raises: NotImplementedError + """ + if not has_juju_version("2.4.0"): + # goal-state first appeared in 2.4.0. + raise NotImplementedError("goal-state") + _goal_state = goal_state() + return (key for key in _goal_state['units'] + if '/' in key and key != local_unit()) + + +def expected_related_units(reltype=None): + """Get a generator for units we expect to join relation based on + goal-state. + + Note that you can not use this function for the peer relation, take a look + at expected_peer_units() for that. + + This function will raise KeyError if you request information for a + relation type for which juju goal-state does not have information. It will + raise NotImplementedError if used with juju versions without goal-state + support. + + Example usage: + log('participant {} of {} joined relation {}' + .format(len(related_units()), + len(list(expected_related_units())), + relation_type())) + + :param reltype: Relation type to list data for, default is to list data for + the realtion type we are currently executing a hook for. + :type reltype: str + :returns: iterator + :rtype: types.GeneratorType + :raises: KeyError, NotImplementedError + """ + if not has_juju_version("2.4.4"): + # goal-state existed in 2.4.0, but did not list individual units to + # join a relation in 2.4.1 through 2.4.3. (LP: #1794739) + raise NotImplementedError("goal-state relation unit count") + reltype = reltype or relation_type() + _goal_state = goal_state() + return (key for key in _goal_state['relations'][reltype] if '/' in key) + + @cached def relation_for_unit(unit=None, rid=None): """Get the json represenation of a unit's relation""" @@ -997,6 +1059,7 @@ def application_version_set(version): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) +@cached def goal_state(): """Juju goal state values""" cmd = ['goal-state', '--format=json'] diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index e9fd38a0..79953a44 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -34,13 +34,13 @@ from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log, DEBUG, local_unit +from .hookenv import log, INFO, DEBUG, local_unit, charm_name from .fstab import Fstab from charmhelpers.osplatform import get_platform __platform__ = get_platform() if __platform__ == "ubuntu": - from charmhelpers.core.host_factory.ubuntu import ( + from charmhelpers.core.host_factory.ubuntu import ( # NOQA:F401 service_available, add_new_group, lsb_release, @@ -48,7 +48,7 @@ CompareHostReleases, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": - from charmhelpers.core.host_factory.centos import ( + from charmhelpers.core.host_factory.centos import ( # NOQA:F401 service_available, add_new_group, lsb_release, @@ -58,6 +58,7 @@ UPDATEDB_PATH = '/etc/updatedb.conf' + def service_start(service_name, **kwargs): """Start a system service. @@ -287,8 +288,8 @@ def service_running(service_name, **kwargs): for key, value in six.iteritems(kwargs): parameter = '%s=%s' % (key, value) cmd.append(parameter) - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT).decode('UTF-8') + output = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -442,7 +443,7 @@ def add_user_to_group(username, group): def chage(username, lastday=None, expiredate=None, inactive=None, - mindays=None, maxdays=None, root=None, warndays=None): + mindays=None, maxdays=None, root=None, warndays=None): """Change user password expiry information :param str username: User to update @@ -482,8 +483,10 @@ def chage(username, lastday=None, expiredate=None, inactive=None, cmd.append(username) subprocess.check_call(cmd) + remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -535,13 +538,15 @@ def write_file(path, content, owner='root', group='root', perms=0o444): # lets see if we can grab the file and compare the context, to avoid doing # a write. existing_content = None - existing_uid, existing_gid = None, None + existing_uid, existing_gid, existing_perms = None, None, None try: with open(path, 'rb') as target: existing_content = target.read() stat = os.stat(path) - existing_uid, existing_gid = stat.st_uid, stat.st_gid - except: + existing_uid, existing_gid, existing_perms = ( + stat.st_uid, stat.st_gid, stat.st_mode + ) + except Exception: pass if content != existing_content: log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), @@ -554,7 +559,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444): target.write(content) return # the contents were the same, but we might still need to change the - # ownership. + # ownership or permissions. if existing_uid != uid: log("Changing uid on already existing content: {} -> {}" .format(existing_uid, uid), level=DEBUG) @@ -563,6 +568,10 @@ def write_file(path, content, owner='root', group='root', perms=0o444): log("Changing gid on already existing content: {} -> {}" .format(existing_gid, gid), level=DEBUG) os.chown(path, -1, gid) + if existing_perms != perms: + log("Changing permissions on existing content: {} -> {}" + .format(existing_perms, perms), level=DEBUG) + os.chmod(path, perms) def fstab_remove(mp): @@ -827,7 +836,7 @@ def list_nics(nic_type=None): ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = (line.strip() for line in ip_output if line) - key = re.compile('^[0-9]+:\s+(.+):') + key = re.compile(r'^[0-9]+:\s+(.+):') for line in ip_output: matched = re.search(key, line) if matched: @@ -1040,3 +1049,27 @@ def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): return modulo * wait else: return calculated_wait_time + + +def install_ca_cert(ca_cert, name=None): + """ + Install the given cert as a trusted CA. + + The ``name`` is the stem of the filename where the cert is written, and if + not provided, it will default to ``juju-{charm_name}``. + + If the cert is empty or None, or is unchanged, nothing is done. + """ + if not ca_cert: + return + if not isinstance(ca_cert, bytes): + ca_cert = ca_cert.encode('utf8') + if not name: + name = 'juju-{}'.format(charm_name()) + cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) + new_hash = hashlib.md5(ca_cert).hexdigest() + if file_hash(cert_file) == new_hash: + return + log("Installing new CA cert at: {}".format(cert_file), level=INFO) + write_file(cert_file, ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/ceph-mon/hooks/charmhelpers/core/kernel.py b/ceph-mon/hooks/charmhelpers/core/kernel.py index 2d404528..e01f4f8b 100644 --- a/ceph-mon/hooks/charmhelpers/core/kernel.py +++ b/ceph-mon/hooks/charmhelpers/core/kernel.py @@ -26,12 +26,12 @@ __platform__ = get_platform() if __platform__ == "ubuntu": - from charmhelpers.core.kernel_factory.ubuntu import ( + from charmhelpers.core.kernel_factory.ubuntu import ( # NOQA:F401 persistent_modprobe, update_initramfs, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": - from charmhelpers.core.kernel_factory.centos import ( + from charmhelpers.core.kernel_factory.centos import ( # NOQA:F401 persistent_modprobe, update_initramfs, ) # flake8: noqa -- ignore F401 for this import diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 480a6276..8572d34f 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -84,6 +84,7 @@ def base_url(self, url): fetch = importlib.import_module(module) filter_installed_packages = fetch.filter_installed_packages +filter_missing_packages = fetch.filter_missing_packages install = fetch.apt_install upgrade = fetch.apt_upgrade update = _fetch_update = fetch.apt_update @@ -96,6 +97,7 @@ def base_url(self, url): apt_update = fetch.apt_update apt_upgrade = fetch.apt_upgrade apt_purge = fetch.apt_purge + apt_autoremove = fetch.apt_autoremove apt_mark = fetch.apt_mark apt_hold = fetch.apt_hold apt_unhold = fetch.apt_unhold diff --git a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py index 07cd0293..c4ab3ff1 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py @@ -13,7 +13,7 @@ # limitations under the License. import os -from subprocess import check_call +from subprocess import STDOUT, check_output from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, @@ -55,7 +55,7 @@ def branch(self, source, dest, revno=None): cmd = ['bzr', 'branch'] cmd += cmd_opts cmd += [source, dest] - check_call(cmd) + check_output(cmd, stderr=STDOUT) def install(self, source, dest=None, revno=None): url_parts = self.parse_url(source) diff --git a/ceph-mon/hooks/charmhelpers/fetch/giturl.py b/ceph-mon/hooks/charmhelpers/fetch/giturl.py index 4cf21bc2..070ca9bb 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/giturl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/giturl.py @@ -13,7 +13,7 @@ # limitations under the License. import os -from subprocess import check_call, CalledProcessError +from subprocess import check_output, CalledProcessError, STDOUT from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, @@ -50,7 +50,7 @@ def clone(self, source, dest, branch="master", depth=None): cmd = ['git', 'clone', source, dest, '--branch', branch] if depth: cmd.extend(['--depth', depth]) - check_call(cmd) + check_output(cmd, stderr=STDOUT) def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 19aa6baf..c7ad128c 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -189,6 +189,18 @@ def filter_installed_packages(packages): return _pkgs +def filter_missing_packages(packages): + """Return a list of packages that are installed. + + :param packages: list of packages to evaluate. + :returns list: Packages that are installed. + """ + return list( + set(packages) - + set(filter_installed_packages(packages)) + ) + + def apt_cache(in_memory=True, progress=None): """Build and return an apt cache.""" from apt import apt_pkg @@ -248,6 +260,14 @@ def apt_purge(packages, fatal=False): _run_apt_command(cmd, fatal) +def apt_autoremove(purge=True, fatal=False): + """Purge one or more packages.""" + cmd = ['apt-get', '--assume-yes', 'autoremove'] + if purge: + cmd.append('--purge') + _run_apt_command(cmd, fatal) + + def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark.""" log("Marking {} as {}".format(packages, mark)) @@ -274,7 +294,7 @@ def apt_unhold(packages, fatal=False): def import_key(key): """Import an ASCII Armor key. - /!\ A Radix64 format keyid is also supported for backwards + A Radix64 format keyid is also supported for backwards compatibility, but should never be used; the key retrieval mechanism is insecure and subject to man-in-the-middle attacks voiding all signature checks using that key. @@ -434,6 +454,9 @@ def _add_apt_repository(spec): :param spec: the parameter to pass to add_apt_repository """ + if '{series}' in spec: + series = lsb_release()['DISTRIB_CODENAME'] + spec = spec.replace('{series}', series) _run_with_retries(['add-apt-repository', '--yes', spec]) diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 76f1f20a..42f8d72c 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -163,8 +163,19 @@ def _initialize_tests(self): # Authenticate admin with cinder endpoint self.cinder = u.authenticate_cinder_admin(self.keystone) + + force_v1_client = False + if self._get_openstack_release() == self.trusty_icehouse: + # Updating image properties (such as arch or hypervisor) using the + # v2 api in icehouse results in: + # https://bugs.launchpad.net/python-glanceclient/+bug/1371559 + u.log.debug('Forcing glance to use v1 api') + force_v1_client = True + # Authenticate admin with glance endpoint - self.glance = u.authenticate_glance_admin(self.keystone) + self.glance = u.authenticate_glance_admin( + self.keystone, + force_v1_client=force_v1_client) # Authenticate admin with nova endpoint self.nova = nova_client.Client(2, session=self.keystone_session) From 4dca3c11b2eb8744c03bdb014a18e7b5f08ca8e7 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 13 Nov 2018 11:36:46 +0000 Subject: [PATCH 1604/2699] Drop use of vaultlocker PPA vaultlocker is now provided in the UCA and via backports in bionic. Change-Id: I257b1b2e8aff7900eb0845ea416b54fb42b6b1f5 Closes-Bug: 1802905 --- ceph-osd/hooks/ceph_hooks.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index d1b82d42..9ba65848 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -408,8 +408,6 @@ def config_changed(): if use_vaultlocker(): installed = len(filter_installed_packages(['vaultlocker'])) == 0 if not installed: - add_source('ppa:openstack-charmers/vaultlocker') - apt_update(fatal=True) apt_install('vaultlocker', fatal=True) # Check if an upgrade was requested From d55ac26569cba93c85cc200fcae32aea69f8c5c7 Mon Sep 17 00:00:00 2001 From: Shane Peters Date: Mon, 5 Nov 2018 11:45:32 -0500 Subject: [PATCH 1605/2699] Add disable-pg-max-object-skew option Openstack clouds that use ceph will typically start their life with at least one pool (glance) loaded with a disproportionately high amount of data/objects where other pools may remain empty. This can trigger a HEALTH_WARN if mon_pg_warn_max_object_skew is exceeded but that is actually a false positive. Change-Id: I5a535dbb17db2149630d971d85ac311f14298b09 Closes-Bug: 1720374 --- ceph-mon/config.yaml | 8 ++++++++ ceph-mon/hooks/ceph_hooks.py | 4 ++++ ceph-mon/templates/ceph.conf | 4 ++++ ceph-mon/unit_tests/test_ceph_hooks.py | 3 ++- 4 files changed, 18 insertions(+), 1 deletion(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 4b689850..f66d96fc 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -225,3 +225,11 @@ options: from the ceph all-in-one charm to a ceph-mon / ceph-osd deployment. Refer to the Charm Deployment guide at https://docs.openstack.org/charm-deployment-guide/latest/ for more information. + disable-pg-max-object-skew: + type: boolean + default: False + description: | + Openstack clouds that use ceph will typically start their life with at + least one pool (glance) loaded with a disproportionately high amount of + data/objects where other pools may remain empty. This can trigger HEALTH_WARN + if mon_pg_warn_max_object_skew is exceeded but that is actually false positive. diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 79cff5f3..ccbe7970 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -169,6 +169,10 @@ def get_ceph_context(): if config('default-rbd-features'): cephcontext['rbd_features'] = config('default-rbd-features') + if config('disable-pg-max-object-skew'): + cephcontext['disable_object_skew'] = config( + 'disable-pg-max-object-skew') + # NOTE(dosaboy): these sections must correspond to what is supported in the # config template. sections = ['global', 'mds', 'mon'] diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index b19cde13..b4cdb034 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -50,6 +50,10 @@ keyring = /var/lib/ceph/mon/$cluster-$id/keyring {{ key }} = {{ mon[key] }} {% endfor %} {% endif %} +{% if disable_object_skew and "mon pg warn max object skew" not in mon %} +mon pg warn max object skew = 0 +{% endif %} + [mds] keyring = /var/lib/ceph/mds/$cluster-$id/keyring diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 2ceec433..0f96d2da 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -48,7 +48,8 @@ 'nagios_degraded_thresh': '1', 'nagios_misplaced_thresh': '10', 'nagios_recovery_rate': '1', - 'nagios_ignore_nodeepscub': False} + 'nagios_ignore_nodeepscub': False, + 'disable-pg-max-object-skew': False} class CephHooksTestCase(unittest.TestCase): From 795a8893b9f4c824c19c01dbe811f1ebba9fe582 Mon Sep 17 00:00:00 2001 From: Vladimir Grevtsev Date: Fri, 16 Nov 2018 22:16:12 +0300 Subject: [PATCH 1606/2699] Extending a Bluestore config options with examples This proposal is about adding some usage instructions and examples for future charm users. Change-Id: If6698e37e32fc2b526d546cb046a7c8091f66634 --- ceph-osd/config.yaml | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 8e04fb10..1220334b 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -97,16 +97,28 @@ options: type: int default: 0 description: | - Size of a partition or file to use for BlueStore WAL (RocksDB WAL) - A default value is not set as it is calculated by ceph-disk if - not specified. + Size (in bytes) of a partition, file or LV to use for + BlueStore WAL (RocksDB WAL), provided on a per backend device basis. + . + Example: 128 GB device, 8 data devices provided in "osd-devices" + gives 128 / 8 GB = 16 GB = 16000000000 bytes per device. + . + A default value is not set as it is calculated by ceph-disk (before Luminous) + or the charm itself, when ceph-volume is used (Luminous and above). + bluestore-block-db-size: type: int default: 0 description: | - Size of a partition or file to use for BlueStore metadata - or RocksDB SSTs. A default value is not set as it is calculated - by ceph-disk if not specified. + Size (in bytes) of a partition, file or LV to use for BlueStore + metadata or RocksDB SSTs, provided on a per backend device basis. + . + Example: 128 GB device, 8 data devices provided in "osd-devices" + gives 128 / 8 GB = 16 GB = 16000000000 bytes per device. + . + A default value is not set as it is calculated by ceph-disk (before Luminous) + or the charm itself, when ceph-volume is used (Luminous and above). + osd-format: type: string default: xfs From f08b3cf4d115d1619fd2c38e39321adcdc523627 Mon Sep 17 00:00:00 2001 From: Alvaro Uria Date: Thu, 9 Aug 2018 13:22:49 +0200 Subject: [PATCH 1607/2699] Support systemd nrpe check for ceph-osd@N units ceph-osd<->nrpe relation adds a single check that parses the service status of all the ceph-osd processes. The check supported sysv and upstart environments, but not systemd, which has been added. add_init_service_checks does support systemd but it would create a nrpe check per OSD (vs a single check for all OSDs) Change-Id: I34fc01365de6994c93a273f01a1e2278016d21ef Closes-Bug: 1804247 Signed-off-by: Alvaro Uria --- ceph-osd/hooks/ceph_hooks.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index d1b82d42..11ed35ff 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -56,6 +56,7 @@ restart_on_change, write_file, is_container, + init_is_systemd, ) from charmhelpers.fetch import ( add_source, @@ -636,12 +637,21 @@ def update_nrpe_config(): apt_install('python3-dbus') hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() + + # create systemd or upstart check + cmd = '/bin/cat /var/lib/ceph/osd/ceph-*/whoami |' + if init_is_systemd(): + cmd += 'xargs -I_@ /usr/local/lib/nagios/plugins/check_systemd.py' + cmd += ' ceph-osd@_@' + else: + cmd += 'xargs -I@ status ceph-osd id=@' + cmd += ' && exit 0 || exit 2' + nrpe_setup = nrpe.NRPE(hostname=hostname) nrpe_setup.add_check( shortname='ceph-osd', description='process check {%s}' % current_unit, - check_cmd=('/bin/cat /var/lib/ceph/osd/ceph-*/whoami |' - 'xargs -I@ status ceph-osd id=@ && exit 0 || exit 2') + check_cmd=cmd ) nrpe_setup.write() From b7c839d49d8b473474aef6b231c8271138887f8e Mon Sep 17 00:00:00 2001 From: Marian Gasparovic Date: Tue, 6 Nov 2018 12:20:25 +0100 Subject: [PATCH 1608/2699] Don't return Critical when ceph is in warning state. Current implementation returns Critical when Ceph is in warning state, checking for some known exceptions which are considered operational tasks. However this causes many Alarms. This patch changes the behavior to report Warning when Ceph is in HEALTH_WARN. If known operational tasks are exceeding thresholds, Critical is returned. Change-Id: I7a330189da8f0ba9168cedb534823c5e8f4795ba --- ceph-mon/config.yaml | 14 +- ceph-mon/files/nagios/check_ceph_status.py | 93 +++-- ceph-mon/hooks/ceph_hooks.py | 4 +- .../unit_tests/ceph_degraded_luminous.json | 147 ++++++++ ceph-mon/unit_tests/ceph_error.json | 118 ++++++ .../ceph_many_warnings_luminous.json | 147 ++++++++ ceph-mon/unit_tests/ceph_nodeepscrub.json | 353 ++++++++++-------- .../unit_tests/ceph_nodeepscrub_luminous.json | 102 +++++ ceph-mon/unit_tests/test_ceph_hooks.py | 2 +- ceph-mon/unit_tests/test_check_ceph_status.py | 143 +++++-- 10 files changed, 889 insertions(+), 234 deletions(-) create mode 100644 ceph-mon/unit_tests/ceph_degraded_luminous.json create mode 100644 ceph-mon/unit_tests/ceph_error.json create mode 100644 ceph-mon/unit_tests/ceph_many_warnings_luminous.json create mode 100644 ceph-mon/unit_tests/ceph_nodeepscrub_luminous.json diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index f66d96fc..9e23a7ba 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -189,17 +189,21 @@ options: type: float description: "Threshold for degraded ratio (0.1 = 10%)" nagios_misplaced_thresh: - default: 10.0 + default: 1.0 type: float description: "Threshold for misplaced ratio (0.1 = 10%)" nagios_recovery_rate: default: '1' type: string - description: Recovery rate below which we consider recovery to be stalled - nagios_ignore_nodeepscub: - default: False + description: | + Recovery rate (in objects/s) below which we consider recovery + to be stalled. + nagios_raise_nodeepscrub: + default: True type: boolean - description: Whether to ignore the nodeep-scrub flag + description: | + Whether to report Critical instead of Warning when the nodeep-scrub + flag is set. use-direct-io: type: boolean default: True diff --git a/ceph-mon/files/nagios/check_ceph_status.py b/ceph-mon/files/nagios/check_ceph_status.py index ef978023..9839bb85 100755 --- a/ceph-mon/files/nagios/check_ceph_status.py +++ b/ceph-mon/files/nagios/check_ceph_status.py @@ -102,10 +102,6 @@ def check_ceph_status(args): :returns string, describing the status of the ceph cluster. """ - ignorable = (r'\d+ pgs (?:backfill|degraded|recovery_wait|stuck unclean)|' - 'recovery \d+\/\d+ objects (?:degraded|misplaced)') - if args.ignore_nodeepscrub: - ignorable = ignorable + '|nodeep-scrub flag\(s\) set' status_critical = False if args.status_file: check_file_freshness(args.status_file) @@ -136,41 +132,60 @@ def check_ceph_status(args): luminous = False if overall_status != 'HEALTH_OK': - # Health is not OK, check if any lines are not in our list of OK - # any lines that don't match, check is critical + # Health is not OK, collect status message(s) and + # decide whether to return warning or critical + status_critical = False status_msg = [] if luminous: - status_messages = [x['summary']['message'] for x in status_data['health'].get('checks').values()] + status_messages = [x['summary']['message'] + for x in + status_data['health'].get('checks').values()] else: - status_messages = [x['summary'] for x in status_data['health']['summary']] + status_messages = [x['summary'] + for x in + status_data['health']['summary']] for status in status_messages: - if not re.match(ignorable, status): - status_critical = True - status_msg.append(status) - # If we got this far, then the status is not OK but the status lines - # are all in our list of things we consider to be operational tasks. - # Check the thresholds and return CRITICAL if exceeded, - # otherwise there's something not accounted for and we want to know - # about it with a WARN alert. - degraded_ratio = status_data['pgmap'].get('degraded_ratio', 0.0) - if degraded_ratio > args.degraded_thresh: - status_critical = True - status_msg.append("Degraded ratio: {}".format(degraded_ratio)) - misplaced_ratio = status_data['pgmap'].get('misplaced_ratio', 0.0) - if misplaced_ratio > args.misplaced_thresh: - status_critical = True - status_msg.append("Misplaced ratio: {}".format(misplaced_ratio)) - recovering = status_data['pgmap'].get('recovering_objects_per_sec', - 0.0) - if recovering < args.recovery_rate: + status_msg.append(status) + # Check if nedeepscrub is set and whether it should raise an error + if args.raise_nodeepscrub: + if re.match("nodeep-scrub flag", status): + status_critical = True + if overall_status == 'HEALTH_CRITICAL' or \ + overall_status == 'HEALTH_ERR': + # HEALTH_ERR, report critical status_critical = True - status_msg.append("Recovering objects/sec {}".format(recovering)) + else: + # HEALTH_WARN + # Check the threshold for a list of operational tasks, + # and return CRITICAL if exceeded + degraded_ratio = float(status_data['pgmap'].get('degraded_ratio', + 0.0)) + if degraded_ratio > args.degraded_thresh: + status_critical = True + if degraded_ratio > 0: + status_msg.append("Degraded ratio: {}".format(degraded_ratio)) + misplaced_ratio = float(status_data['pgmap'].get('misplaced_ratio', + 0.0)) + if misplaced_ratio > args.misplaced_thresh: + status_critical = True + if misplaced_ratio > 0: + status_msg.append("Misplaced ratio: {}". + format(misplaced_ratio)) + recovering = float(status_data['pgmap']. + get('recovering_objects_per_sec', 0.0)) + if (degraded_ratio > 0 or misplaced_ratio > 0) \ + and recovering > 0 \ + and recovering < args.recovery_rate: + status_critical = True + if recovering > 0: + status_msg.append("Recovering objects/s {}".format(recovering)) if status_critical: msg = 'CRITICAL: ceph health: "{} {}"'.format( overall_status, ", ".join(status_msg)) raise CriticalError(msg) - if overall_status == 'HEALTH_WARN': + else: + # overall_status == 'HEALTH_WARN': msg = "WARNING: {}".format(", ".join(status_msg)) raise WarnError(msg) message = "All OK" @@ -187,21 +202,21 @@ def parse_args(args): 'user account does not have rights for the Ceph ' 'config files.') parser.add_argument('--degraded_thresh', dest='degraded_thresh', - default=1, type=float, + default=1.0, type=float, help="Threshold for degraded ratio (0.1 = 10%)") parser.add_argument('--misplaced_thresh', dest='misplaced_thresh', - default=10, type=float, + default=1.0, type=float, help="Threshold for misplaced ratio (0.1 = 10%)") parser.add_argument('--recovery_rate', dest='recovery_rate', default=1, type=int, - help="Recovery rate below which we consider recovery " - "to be stalled") - parser.add_argument('--ignore_nodeepscrub', dest='ignore_nodeepscrub', + help="Recovery rate (in objects/s) below which we" + "consider recovery to be stalled") + parser.add_argument('--raise_nodeepscrub', dest='raise_nodeepscrub', default=False, action='store_true', - help="Whether to ignore the nodeep-scrub flag. If " - "the nodeep-scrub flag is set, the check returns " - "warning if this param is passed, otherwise " - "returns critical.") + help="Whether to raise an error for the nodeep-scrub" + "flag. If the nodeep-scrub flag is set," + "the check returns critical if this param is" + "passed, otherwise it returns warning.") return parser.parse_args(args) @@ -218,7 +233,7 @@ def main(args): exitcode = 'critical' except WarnError as msg: print(msg) - exitcode = 'critical' + exitcode = 'warning' except: print("%s raised unknown exception '%s'" % ('check_ceph_status', sys.exc_info()[0])) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index ccbe7970..f007181c 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -748,8 +748,8 @@ def update_nrpe_config(): config('nagios_degraded_thresh'), config('nagios_misplaced_thresh'), config('nagios_recovery_rate')) - if config('nagios_ignore_nodeepscub'): - check_cmd = check_cmd + ' --ignore_nodeepscrub' + if config('nagios_raise_nodeepscrub'): + check_cmd = check_cmd + ' --raise_nodeepscrub' nrpe_setup.add_check( shortname="ceph", description='Check Ceph health {{{}}}'.format(current_unit), diff --git a/ceph-mon/unit_tests/ceph_degraded_luminous.json b/ceph-mon/unit_tests/ceph_degraded_luminous.json new file mode 100644 index 00000000..3cf3bdd3 --- /dev/null +++ b/ceph-mon/unit_tests/ceph_degraded_luminous.json @@ -0,0 +1,147 @@ +{ + "fsid": "b03a2900-e297-11e8-a7db-00163ed10659", + "health": { + "checks": { + "OSD_DOWN": { + "severity": "HEALTH_WARN", + "summary": { + "message": "3 osds down" + } + }, + "OSD_HOST_DOWN": { + "severity": "HEALTH_WARN", + "summary": { + "message": "1 host (3 osds) down" + } + }, + "OBJECT_MISPLACED": { + "severity": "HEALTH_WARN", + "summary": { + "message": "9883/43779 objects misplaced (22.575%)" + } + }, + "PG_DEGRADED": { + "severity": "HEALTH_WARN", + "summary": { + "message": "Degraded data redundancy: 14001/43779 objects degraded (31.981%), 32 pgs degraded" + } + }, + "POOL_APP_NOT_ENABLED": { + "severity": "HEALTH_WARN", + "summary": { + "message": "application not enabled on 1 pool(s)" + } + }, + "TOO_FEW_PGS": { + "severity": "HEALTH_WARN", + "summary": { + "message": "too few PGs per OSD (7 < min 30)" + } + } + }, + "status": "HEALTH_WARN" + }, + "election_epoch": 5, + "quorum": [ + 0 + ], + "quorum_names": [ + "juju-460e0f-11" + ], + "monmap": { + "epoch": 1, + "fsid": "b03a2900-e297-11e8-a7db-00163ed10659", + "modified": "2018-11-07 14:17:12.324408", + "created": "2018-11-07 14:17:12.324408", + "features": { + "persistent": [ + "kraken", + "luminous" + ], + "optional": [] + }, + "mons": [ + { + "rank": 0, + "name": "juju-460e0f-11", + "addr": "192.168.100.81:6789/0", + "public_addr": "192.168.100.81:6789/0" + } + ] + }, + "osdmap": { + "osdmap": { + "epoch": 72, + "num_osds": 9, + "num_up_osds": 6, + "num_in_osds": 9, + "full": false, + "nearfull": false, + "num_remapped_pgs": 16 + } + }, + "pgmap": { + "pgs_by_state": [ + { + "state_name": "active+undersized+degraded", + "count": 16 + }, + { + "state_name": "active+undersized+degraded+remapped+backfill_wait", + "count": 14 + }, + { + "state_name": "active+undersized+degraded+remapped+backfilling", + "count": 2 + } + ], + "num_pgs": 32, + "num_pools": 1, + "num_objects": 14593, + "data_bytes": 61169729807, + "bytes_used": 14540595200, + "bytes_avail": 14889525248, + "bytes_total": 29430120448, + "degraded_objects": 14001, + "degraded_total": 43779, + "degraded_ratio": 0.319811, + "misplaced_objects": 9883, + "misplaced_total": 43779, + "misplaced_ratio": 0.225748 + }, + "fsmap": { + "epoch": 1, + "by_rank": [] + }, + "mgrmap": { + "epoch": 5, + "active_gid": 14097, + "active_name": "juju-460e0f-11", + "active_addr": "192.168.100.81:6800/204", + "available": true, + "standbys": [], + "modules": [ + "balancer", + "restful", + "status" + ], + "available_modules": [ + "balancer", + "dashboard", + "influx", + "localpool", + "prometheus", + "restful", + "selftest", + "status", + "zabbix" + ], + "services": {} + }, + "servicemap": { + "epoch": 1, + "modified": "0.000000", + "services": {} + } +} + diff --git a/ceph-mon/unit_tests/ceph_error.json b/ceph-mon/unit_tests/ceph_error.json new file mode 100644 index 00000000..eb9a161c --- /dev/null +++ b/ceph-mon/unit_tests/ceph_error.json @@ -0,0 +1,118 @@ +{ + "health": { + "health": { + "health_services": [ + { + "mons": [ + { + "name": "juju-460e0f-12", + "kb_total": 1829760, + "kb_used": 835072, + "kb_avail": 994688, + "avail_percent": 54, + "last_updated": "2018-11-07 18:46:32.308592", + "store_stats": { + "bytes_total": 15678387, + "bytes_sst": 0, + "bytes_log": 420953, + "bytes_misc": 15257434, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + } + ] + } + ] + }, + "timechecks": { + "epoch": 3, + "round": 0, + "round_status": "finished" + }, + "summary": [ + { + "severity": "HEALTH_ERR", + "summary": "6 pgs are stuck inactive for more than 300 seconds" + }, + { + "severity": "HEALTH_WARN", + "summary": "7 pgs peering" + }, + { + "severity": "HEALTH_WARN", + "summary": "6 pgs stuck inactive" + }, + { + "severity": "HEALTH_WARN", + "summary": "6 pgs stuck unclean" + } + ], + "overall_status": "HEALTH_ERR", + "detail": [] + }, + "fsid": "68a9ca14-e297-11e8-843c-00163e64b0c0", + "election_epoch": 3, + "quorum": [ + 0 + ], + "quorum_names": [ + "juju-460e0f-12" + ], + "monmap": { + "epoch": 1, + "fsid": "68a9ca14-e297-11e8-843c-00163e64b0c0", + "modified": "2018-11-07 14:17:27.659064", + "created": "2018-11-07 14:17:27.659064", + "mons": [ + { + "rank": 0, + "name": "juju-460e0f-12", + "addr": "192.168.100.26:6789\/0" + } + ] + }, + "osdmap": { + "osdmap": { + "epoch": 28, + "num_osds": 9, + "num_up_osds": 9, + "num_in_osds": 9, + "full": false, + "nearfull": false, + "num_remapped_pgs": 0 + } + }, + "pgmap": { + "pgs_by_state": [ + { + "state_name": "creating", + "count": 113 + }, + { + "state_name": "active+clean", + "count": 64 + }, + { + "state_name": "activating", + "count": 8 + }, + { + "state_name": "peering", + "count": 7 + } + ], + "version": 7831, + "num_pgs": 192, + "data_bytes": 1790967809, + "bytes_used": 9995157504, + "bytes_avail": 9157476352, + "bytes_total": 19152633856, + "write_bytes_sec": 89844495, + "read_op_per_sec": 0, + "write_op_per_sec": 21 + }, + "fsmap": { + "epoch": 1, + "by_rank": [] + } +} diff --git a/ceph-mon/unit_tests/ceph_many_warnings_luminous.json b/ceph-mon/unit_tests/ceph_many_warnings_luminous.json new file mode 100644 index 00000000..3e5c11e8 --- /dev/null +++ b/ceph-mon/unit_tests/ceph_many_warnings_luminous.json @@ -0,0 +1,147 @@ +{ + "fsid": "b03a2900-e297-11e8-a7db-00163ed10659", + "health": { + "checks": { + "OBJECT_MISPLACED": { + "severity": "HEALTH_WARN", + "summary": { + "message": "1560/12264 objects misplaced (12.720%)" + } + }, + "PG_AVAILABILITY": { + "severity": "HEALTH_WARN", + "summary": { + "message": "Reduced data availability: 27 pgs inactive, 30 pgs peering" + } + }, + "POOL_APP_NOT_ENABLED": { + "severity": "HEALTH_WARN", + "summary": { + "message": "application not enabled on 1 pool(s)" + } + }, + "TOO_FEW_PGS": { + "severity": "HEALTH_WARN", + "summary": { + "message": "too few PGs per OSD (21 < min 30)" + } + } + }, + "status": "HEALTH_WARN" + }, + "election_epoch": 5, + "quorum": [ + 0 + ], + "quorum_names": [ + "juju-460e0f-11" + ], + "monmap": { + "epoch": 1, + "fsid": "b03a2900-e297-11e8-a7db-00163ed10659", + "modified": "2018-11-07 14:17:12.324408", + "created": "2018-11-07 14:17:12.324408", + "features": { + "persistent": [ + "kraken", + "luminous" + ], + "optional": [] + }, + "mons": [ + { + "rank": 0, + "name": "juju-460e0f-11", + "addr": "192.168.100.81:6789/0", + "public_addr": "192.168.100.81:6789/0" + } + ] + }, + "osdmap": { + "osdmap": { + "epoch": 118, + "num_osds": 9, + "num_up_osds": 9, + "num_in_osds": 9, + "full": false, + "nearfull": false, + "num_remapped_pgs": 15 + } + }, + "pgmap": { + "pgs_by_state": [ + { + "state_name": "unknown", + "count": 65 + }, + { + "state_name": "peering", + "count": 31 + }, + { + "state_name": "activating", + "count": 17 + }, + { + "state_name": "activating+remapped", + "count": 15 + } + ], + "num_pgs": 128, + "num_pools": 1, + "num_objects": 4088, + "data_bytes": 17187733578, + "bytes_used": 14360064000, + "bytes_avail": 15023263744, + "bytes_total": 29383327744, + "unknown_pgs_ratio": 0.507812, + "inactive_pgs_ratio": 0.492188, + "misplaced_objects": 1560, + "misplaced_total": 12264, + "misplaced_ratio": 0.127202, + "recovering_objects_per_sec": 14, + "recovering_bytes_per_sec": 60779755, + "recovering_keys_per_sec": 0, + "num_objects_recovered": 113, + "num_bytes_recovered": 471859200, + "num_keys_recovered": 0, + "read_bytes_sec": 0, + "write_bytes_sec": 244132150, + "read_op_per_sec": 0, + "write_op_per_sec": 116 + }, + "fsmap": { + "epoch": 1, + "by_rank": [] + }, + "mgrmap": { + "epoch": 5, + "active_gid": 14097, + "active_name": "juju-460e0f-11", + "active_addr": "192.168.100.81:6800/204", + "available": true, + "standbys": [], + "modules": [ + "balancer", + "restful", + "status" + ], + "available_modules": [ + "balancer", + "dashboard", + "influx", + "localpool", + "prometheus", + "restful", + "selftest", + "status", + "zabbix" + ], + "services": {} + }, + "servicemap": { + "epoch": 1, + "modified": "0.000000", + "services": {} + } +} diff --git a/ceph-mon/unit_tests/ceph_nodeepscrub.json b/ceph-mon/unit_tests/ceph_nodeepscrub.json index fe3aedf8..2488fabb 100644 --- a/ceph-mon/unit_tests/ceph_nodeepscrub.json +++ b/ceph-mon/unit_tests/ceph_nodeepscrub.json @@ -1,177 +1,202 @@ { - "health": { "health": { - "health_services": [ - { - "mons": [ + "health": { + "health_services": [ + { + "mons": [ + { + "name": "juju-c62a41-21-lxd-0", + "kb_total": 334602320, + "kb_used": 2127960, + "kb_avail": 315454468, + "avail_percent": 94, + "last_updated": "2018-11-08 09:47:09.932189", + "store_stats": { + "bytes_total": 34880542, + "bytes_sst": 0, + "bytes_log": 1647123, + "bytes_misc": 33233419, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + }, + { + "name": "juju-c62a41-24-lxd-0", + "kb_total": 334602320, + "kb_used": 2128116, + "kb_avail": 315454312, + "avail_percent": 94, + "last_updated": "2018-11-08 09:47:16.418007", + "store_stats": { + "bytes_total": 36811676, + "bytes_sst": 0, + "bytes_log": 3574345, + "bytes_misc": 33237331, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + }, + { + "name": "juju-c62a41-25-lxd-0", + "kb_total": 334602320, + "kb_used": 2128860, + "kb_avail": 315453568, + "avail_percent": 94, + "last_updated": "2018-11-08 09:47:21.198816", + "store_stats": { + "bytes_total": 37388424, + "bytes_sst": 0, + "bytes_log": 4151569, + "bytes_misc": 33236855, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + } + ] + } + ] + }, + "timechecks": { + "epoch": 14, + "round": 4480, + "round_status": "finished", + "mons": [ + { + "name": "juju-c62a41-21-lxd-0", + "skew": 0.000000, + "latency": 0.000000, + "health": "HEALTH_OK" + }, + { + "name": "juju-c62a41-24-lxd-0", + "skew": 0.000282, + "latency": 0.000989, + "health": "HEALTH_OK" + }, + { + "name": "juju-c62a41-25-lxd-0", + "skew": -0.001223, + "latency": 0.000776, + "health": "HEALTH_OK" + } + ] + }, + "summary": [ + { + "severity": "HEALTH_WARN", + "summary": "19 pgs backfill_wait" + }, + { + "severity": "HEALTH_WARN", + "summary": "4 pgs backfilling" + }, + { + "severity": "HEALTH_WARN", + "summary": "1 pgs peering" + }, + { + "severity": "HEALTH_WARN", + "summary": "24 pgs stuck unclean" + }, { - "name": "node1", - "kb_total": 140956600, - "kb_used": 15916132, - "kb_avail": 117857208, - "avail_percent": 83, - "last_updated": "2017-05-17 03:23:11.248297", - "store_stats": { - "bytes_total": 140014259, - "bytes_sst": 0, - "bytes_log": 13670758, - "bytes_misc": 126343501, - "last_updated": "0.000000" - }, - "health": "HEALTH_OK" + "severity": "HEALTH_WARN", + "summary": "recovery 17386\/112794 objects misplaced (15.414%)" }, { - "name": "node2", - "kb_total": 70395920, - "kb_used": 10532504, - "kb_avail": 56264436, - "avail_percent": 79, - "last_updated": "2017-05-17 03:23:16.952673", - "store_stats": { - "bytes_total": 315512452, - "bytes_sst": 0, - "bytes_log": 21691698, - "bytes_misc": 293820754, - "last_updated": "0.000000" - }, - "health": "HEALTH_OK" + "severity": "HEALTH_WARN", + "summary": "pool pool1 has many more objects per pg than average (too few pgs?)" }, { - "name": "juju-machine-85-lxc-10", - "kb_total": 131927524, - "kb_used": 79521024, - "kb_avail": 45954016, - "avail_percent": 34, - "last_updated": "2017-05-17 03:23:13.794034", - "store_stats": { - "bytes_total": 89036349, - "bytes_sst": 0, - "bytes_log": 21055337, - "bytes_misc": 67981012, - "last_updated": "0.000000" - }, - "health": "HEALTH_OK" + "severity": "HEALTH_WARN", + "summary": "nodeep-scrub flag(s) set" } - ] - } - ] + ], + "overall_status": "HEALTH_WARN", + "detail": [] }, - "timechecks": { - "epoch": 280, - "round": 19874, - "round_status": "finished", - "mons": [ - { - "name": "node1", - "skew": "0.000000", - "latency": "0.000000", - "health": "HEALTH_OK" - }, - { - "name": "node2", - "skew": "-0.000000", - "latency": "0.000866", - "health": "HEALTH_OK" - }, - { - "name": "juju-machine-85-lxc-10", - "skew": "-0.000000", - "latency": "0.018848", - "health": "HEALTH_OK" - } - ] - }, - "summary": [ - { - "severity": "HEALTH_WARN", - "summary": "nodeep-scrub flag(s) set" - } + "fsid": "66af7af5-2f60-4e0e-94dc-49f49bd37284", + "election_epoch": 14, + "quorum": [ + 0, + 1, + 2 + ], + "quorum_names": [ + "juju-c62a41-21-lxd-0", + "juju-c62a41-24-lxd-0", + "juju-c62a41-25-lxd-0" ], - "overall_status": "HEALTH_WARN", - "detail": [] - }, - "fsid": "some_fsid", - "election_epoch": 280, - "quorum": [ - 0, - 1, - 2 - ], - "quorum_names": [ - "node1", - "node2", - "juju-machine-85-lxc-10" - ], - "monmap": { - "epoch": 3, - "fsid": "some_fsid", - "modified": "2016-11-25 00:08:51.235813", - "created": "0.000000", - "mons": [ - { - "rank": 0, - "name": "node1", - "addr": "10.24.0.15:6789/0" - }, - { - "rank": 1, - "name": "node2", - "addr": "10.24.0.17:6789/0" - }, - { - "rank": 2, - "name": "juju-machine-85-lxc-10", - "addr": "10.24.0.195:6789/0" - } - ] - }, - "osdmap": { + "monmap": { + "epoch": 2, + "fsid": "66af7af5-2f60-4e0e-94dc-49f49bd37284", + "modified": "2018-10-31 15:37:56.902830", + "created": "2018-10-31 15:37:40.288870", + "mons": [ + { + "rank": 0, + "name": "juju-c62a41-21-lxd-0", + "addr": "100.84.195.4:6789\/0" + }, + { + "rank": 1, + "name": "juju-c62a41-24-lxd-0", + "addr": "100.84.196.4:6789\/0" + }, + { + "rank": 2, + "name": "juju-c62a41-25-lxd-0", + "addr": "100.84.196.5:6789\/0" + } + ] + }, "osdmap": { - "epoch": 37820, - "num_osds": 46, - "num_up_osds": 46, - "num_in_osds": 46, - "full": false, - "nearfull": false + "osdmap": { + "epoch": 316, + "num_osds": 48, + "num_up_osds": 48, + "num_in_osds": 48, + "full": false, + "nearfull": false, + "num_remapped_pgs": 22 + } + }, + "pgmap": { + "pgs_by_state": [ + { + "state_name": "active+clean", + "count": 3448 + }, + { + "state_name": "active+remapped+wait_backfill", + "count": 19 + }, + { + "state_name": "active+remapped+backfilling", + "count": 4 + }, + { + "state_name": "peering", + "count": 1 + } + ], + "version": 141480, + "num_pgs": 3472, + "data_bytes": 157009583781, + "bytes_used": 487185850368, + "bytes_avail": 75282911256576, + "bytes_total": 75770097106944, + "misplaced_objects": 17386, + "misplaced_total": 112794, + "misplaced_ratio": 0.154139, + "recovering_objects_per_sec": 436, + "recovering_bytes_per_sec": 1832614589, + "recovering_keys_per_sec": 0, + "num_objects_recovered": 446, + "num_bytes_recovered": 1870659584, + "num_keys_recovered": 0 + }, + "fsmap": { + "epoch": 1, + "by_rank": [] } - }, - "pgmap": { - "pgs_by_state": [ - { - "state_name": "active+clean", - "count": 1988 - }, - { - "state_name": "active+remapped+wait_backfill", - "count": 3 - }, - { - "state_name": "active+remapped+backfilling", - "count": 1 - } - ], - "version": 58873447, - "num_pgs": 1992, - "data_bytes": 35851846298041, - "bytes_used": 107730678743040, - "bytes_avail": 63413590548480, - "bytes_total": 171144269291520, - "degraded_objects": 0, - "degraded_total": 25759217, - "degraded_ratio": 0, - "recovering_objects_per_sec": 17, - "recovering_bytes_per_sec": 72552794, - "recovering_keys_per_sec": 0, - "read_bytes_sec": 23935944, - "write_bytes_sec": 7024503, - "op_per_sec": 5332 - }, - "mdsmap": { - "epoch": 1, - "up": 0, - "in": 0, - "max": 1, - "by_rank": [] - } } - diff --git a/ceph-mon/unit_tests/ceph_nodeepscrub_luminous.json b/ceph-mon/unit_tests/ceph_nodeepscrub_luminous.json new file mode 100644 index 00000000..3d161fba --- /dev/null +++ b/ceph-mon/unit_tests/ceph_nodeepscrub_luminous.json @@ -0,0 +1,102 @@ +{ + "fsid": "b03a2900-e297-11e8-a7db-00163ed10659", + "health": { + "checks": { + "OSDMAP_FLAGS": { + "severity": "HEALTH_WARN", + "summary": { + "message": "nodeep-scrub flag(s) set" + } + } + }, + "status": "HEALTH_WARN" + }, + "election_epoch": 5, + "quorum": [ + 0 + ], + "quorum_names": [ + "juju-460e0f-11" + ], + "monmap": { + "epoch": 1, + "fsid": "b03a2900-e297-11e8-a7db-00163ed10659", + "modified": "2018-11-07 14:17:12.324408", + "created": "2018-11-07 14:17:12.324408", + "features": { + "persistent": [ + "kraken", + "luminous" + ], + "optional": [] + }, + "mons": [ + { + "rank": 0, + "name": "juju-460e0f-11", + "addr": "192.168.100.81:6789/0", + "public_addr": "192.168.100.81:6789/0" + } + ] + }, + "osdmap": { + "osdmap": { + "epoch": 518, + "num_osds": 9, + "num_up_osds": 9, + "num_in_osds": 9, + "full": false, + "nearfull": false, + "num_remapped_pgs": 0 + } + }, + "pgmap": { + "pgs_by_state": [ + { + "state_name": "active+clean", + "count": 128 + } + ], + "num_pgs": 128, + "num_pools": 1, + "num_objects": 14896, + "data_bytes": 62440603919, + "bytes_used": 14225776640, + "bytes_avail": 9450938368, + "bytes_total": 23676715008 + }, + "fsmap": { + "epoch": 1, + "by_rank": [] + }, + "mgrmap": { + "epoch": 5, + "active_gid": 14097, + "active_name": "juju-460e0f-11", + "active_addr": "192.168.100.81:6800/204", + "available": true, + "standbys": [], + "modules": [ + "balancer", + "restful", + "status" + ], + "available_modules": [ + "balancer", + "dashboard", + "influx", + "localpool", + "prometheus", + "restful", + "selftest", + "status", + "zabbix" + ], + "services": {} + }, + "servicemap": { + "epoch": 1, + "modified": "0.000000", + "services": {} + } +} diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 0f96d2da..fa792042 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -48,7 +48,7 @@ 'nagios_degraded_thresh': '1', 'nagios_misplaced_thresh': '10', 'nagios_recovery_rate': '1', - 'nagios_ignore_nodeepscub': False, + 'nagios_raise_nodeepscrub': True, 'disable-pg-max-object-skew': False} diff --git a/ceph-mon/unit_tests/test_check_ceph_status.py b/ceph-mon/unit_tests/test_check_ceph_status.py index caf89bc9..69ac4177 100644 --- a/ceph-mon/unit_tests/test_check_ceph_status.py +++ b/ceph-mon/unit_tests/test_check_ceph_status.py @@ -32,6 +32,7 @@ def test_get_ceph_version(self, mock_subprocess): ceph_version = check_ceph_status.get_ceph_version() self.assertEqual(ceph_version, [10, 2, 9]) + # All OK, pre-luminoius @patch('check_ceph_status.get_ceph_version') def test_health_ok(self, mock_ceph_version, mock_subprocess): mock_ceph_version.return_value = [10, 2, 9] @@ -42,72 +43,168 @@ def test_health_ok(self, mock_ceph_version, mock_subprocess): check_output = check_ceph_status.check_ceph_status(args) self.assertRegex(check_output, r"^All OK$") + # Warning, pre-luminous @patch('check_ceph_status.get_ceph_version') - def test_health_ok_luminous(self, mock_ceph_version, mock_subprocess): - mock_ceph_version.return_value = [12, 2, 0] - with open('unit_tests/ceph_ok_luminous.json') as f: + def test_health_warn(self, mock_ceph_version, mock_subprocess): + mock_ceph_version.return_value = [10, 2, 9] + with open('unit_tests/ceph_warn.json') as f: tree = f.read() mock_subprocess.return_value = tree.encode('UTF-8') - args = check_ceph_status.parse_args(['--degraded_thresh', '1']) - check_output = check_ceph_status.check_ceph_status(args) - self.assertRegex(check_output, r"^All OK$") + args = check_ceph_status.parse_args("") + self.assertRaises(check_ceph_status.WarnError, + lambda: check_ceph_status.check_ceph_status(args)) + # Error, pre-luminous, health_critical status @patch('check_ceph_status.get_ceph_version') - def test_health_warn(self, mock_ceph_version, mock_subprocess): + def test_health_err(self, mock_ceph_version, mock_subprocess): mock_ceph_version.return_value = [10, 2, 9] - with open('unit_tests/ceph_warn.json') as f: + with open('unit_tests/ceph_crit.json') as f: tree = f.read() mock_subprocess.return_value = tree.encode('UTF-8') - args = check_ceph_status.parse_args(['--degraded_thresh', '1']) - self.assertRaises(check_ceph_status.WarnError, + args = check_ceph_status.parse_args("") + self.assertRaises(check_ceph_status.CriticalError, lambda: check_ceph_status.check_ceph_status(args)) + # Error, pre-luminous, overall HEALTH_ERR @patch('check_ceph_status.get_ceph_version') def test_health_crit(self, mock_ceph_version, mock_subprocess): mock_ceph_version.return_value = [10, 2, 9] - with open('unit_tests/ceph_crit.json') as f: + with open('unit_tests/ceph_error.json') as f: tree = f.read() mock_subprocess.return_value = tree.encode('UTF-8') - args = check_ceph_status.parse_args(['--degraded_thresh', '1']) + args = check_ceph_status.parse_args("") self.assertRaises(check_ceph_status.CriticalError, lambda: check_ceph_status.check_ceph_status(args)) + # Error, pre-luminous, because misplaced ratio is too big @patch('check_ceph_status.get_ceph_version') - def test_health_crit_luminous(self, mock_ceph_version, mock_subprocess): - mock_ceph_version.return_value = [12, 2, 0] - with open('unit_tests/ceph_crit_luminous.json') as f: + def test_health_crit_misplaced(self, mock_ceph_version, mock_subprocess): + mock_ceph_version.return_value = [10, 2, 9] + with open('unit_tests/ceph_params.json') as f: tree = f.read() mock_subprocess.return_value = tree.encode('UTF-8') - args = check_ceph_status.parse_args(['--degraded_thresh', '1']) + args = check_ceph_status.parse_args(['--misplaced_thresh', '0.1']) self.assertRaises(check_ceph_status.CriticalError, lambda: check_ceph_status.check_ceph_status(args)) + # Error, pre-luminous, because recovery rate is too low @patch('check_ceph_status.get_ceph_version') - def test_health_lotsdegraded(self, mock_ceph_version, mock_subprocess): + def test_health_crit_recovery(self, mock_ceph_version, mock_subprocess): mock_ceph_version.return_value = [10, 2, 9] with open('unit_tests/ceph_params.json') as f: tree = f.read() mock_subprocess.return_value = tree.encode('UTF-8') - args = check_ceph_status.parse_args(['--degraded_thresh', '1']) + args = check_ceph_status.parse_args(['--recovery_rate', '400']) self.assertRaises(check_ceph_status.CriticalError, lambda: check_ceph_status.check_ceph_status(args)) + # Warning, pre-luminous, deepscrub @patch('check_ceph_status.get_ceph_version') - def test_health_nodeepscrub(self, mock_ceph_version, mock_subprocess): + def test_health_warn_deepscrub(self, mock_ceph_version, mock_subprocess): mock_ceph_version.return_value = [10, 2, 9] with open('unit_tests/ceph_nodeepscrub.json') as f: tree = f.read() mock_subprocess.return_value = tree.encode('UTF-8') - args = check_ceph_status.parse_args(['--degraded_thresh', '1']) - self.assertRaises(check_ceph_status.CriticalError, + args = check_ceph_status.parse_args("") + self.assertRaises(check_ceph_status.WarnError, lambda: check_ceph_status.check_ceph_status(args)) + # Error, pre-luminous, deepscrub @patch('check_ceph_status.get_ceph_version') - def test_health_nodeepscrubok(self, mock_ceph_version, mock_subprocess): + def test_health_crit_deepscrub(self, mock_ceph_version, mock_subprocess): mock_ceph_version.return_value = [10, 2, 9] with open('unit_tests/ceph_nodeepscrub.json') as f: tree = f.read() mock_subprocess.return_value = tree.encode('UTF-8') - args = check_ceph_status.parse_args(['--ignore_nodeepscrub']) + args = check_ceph_status.parse_args(['--raise_nodeepscrub']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) + + # All OK, luminous + @patch('check_ceph_status.get_ceph_version') + def test_health_ok_luminous(self, mock_ceph_version, mock_subprocess): + mock_ceph_version.return_value = [12, 2, 0] + with open('unit_tests/ceph_ok_luminous.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--degraded_thresh', '1']) + check_output = check_ceph_status.check_ceph_status(args) + self.assertRegex(check_output, r"^All OK$") + + # Warning, luminous + @patch('check_ceph_status.get_ceph_version') + def test_health_warn_luminous(self, mock_ceph_version, mock_subprocess): + mock_ceph_version.return_value = [12, 2, 0] + with open('unit_tests/ceph_many_warnings_luminous.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args("") + self.assertRaises(check_ceph_status.WarnError, + lambda: check_ceph_status.check_ceph_status(args)) + +# Error, luminous, because of overall status + + # Error, luminous, because misplaced ratio is too big + @patch('check_ceph_status.get_ceph_version') + def test_health_critical_misplaced_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [12, 2, 0] + with open('unit_tests/ceph_many_warnings_luminous.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--misplaced_thresh', '0.1']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) + + # Error, luminous, because degraded ratio is too big + @patch('check_ceph_status.get_ceph_version') + def test_health_critical_degraded_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [12, 2, 0] + with open('unit_tests/ceph_degraded_luminous.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--degraded_thresh', '0.1']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) + + # Error, luminous, because recovery rate is too low + @patch('check_ceph_status.get_ceph_version') + def test_health_critical_recovery_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [12, 2, 0] + with open('unit_tests/ceph_many_warnings_luminous.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--recovery_rate', '20']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) + + # Warning, luminous, deepscrub + @patch('check_ceph_status.get_ceph_version') + def test_health_warn_deepscrub_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [12, 2, 0] + with open('unit_tests/ceph_nodeepscrub_luminous.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args("") self.assertRaises(check_ceph_status.WarnError, lambda: check_ceph_status.check_ceph_status(args)) + + # Error, luminous, deepscrub + @patch('check_ceph_status.get_ceph_version') + def test_health_crit_deepscrub_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [12, 2, 0] + with open('unit_tests/ceph_nodeepscrub_luminous.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--raise_nodeepscrub']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) From d73fe0305db273a509a0de4ba304066d97fc081d Mon Sep 17 00:00:00 2001 From: David Ames Date: Mon, 26 Nov 2018 14:40:48 -0800 Subject: [PATCH 1609/2699] Rename pause/resume osd-out/osd-in The actions pause and resume actually take all osds on a unit out of the cluster. This is incredibly misleading. This change renames to osd-out and osd-in to better describe what the actions actually do. Change-Id: I76793999f5d3382563eff308a5d7c4db18d065a0 Closes-Bug: #1793507 --- ceph-osd/actions.yaml | 4 ++-- ceph-osd/actions/osd-in | 1 + ceph-osd/actions/osd-out | 1 + .../{pause_resume.py => osd_in_out.py} | 22 ++++++------------- ceph-osd/actions/pause | 1 - ceph-osd/actions/resume | 1 - ...e_resume.py => test_actions_osd_out_in.py} | 22 ++++++++----------- 7 files changed, 20 insertions(+), 32 deletions(-) create mode 120000 ceph-osd/actions/osd-in create mode 120000 ceph-osd/actions/osd-out rename ceph-osd/actions/{pause_resume.py => osd_in_out.py} (85%) delete mode 120000 ceph-osd/actions/pause delete mode 120000 ceph-osd/actions/resume rename ceph-osd/unit_tests/{test_actions_pause_resume.py => test_actions_osd_out_in.py} (82%) diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 3be669ce..16c11267 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -4,12 +4,12 @@ # # Verify with `juju list-action` before proposing/committing # changes. -pause: +osd-out: description: | \ USE WITH CAUTION - Mark unit OSDs as 'out'. Documentation: https://jujucharms.com/ceph-osd/ -resume: +osd-in: description: | \ Set the local osd units in the charm to 'in'. diff --git a/ceph-osd/actions/osd-in b/ceph-osd/actions/osd-in new file mode 120000 index 00000000..1cc47e9f --- /dev/null +++ b/ceph-osd/actions/osd-in @@ -0,0 +1 @@ +osd_in_out.py \ No newline at end of file diff --git a/ceph-osd/actions/osd-out b/ceph-osd/actions/osd-out new file mode 120000 index 00000000..1cc47e9f --- /dev/null +++ b/ceph-osd/actions/osd-out @@ -0,0 +1 @@ +osd_in_out.py \ No newline at end of file diff --git a/ceph-osd/actions/pause_resume.py b/ceph-osd/actions/osd_in_out.py similarity index 85% rename from ceph-osd/actions/pause_resume.py rename to ceph-osd/actions/osd_in_out.py index 1c19d5b1..f5525fec 100755 --- a/ceph-osd/actions/pause_resume.py +++ b/ceph-osd/actions/osd_in_out.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# pause/resume actions file. +# osd_out/osd_in actions file. import os import sys @@ -30,18 +30,12 @@ from ceph.utils import get_local_osd_ids from ceph_hooks import assess_status -from charmhelpers.contrib.openstack.utils import ( - set_unit_paused, - clear_unit_paused, -) - -def pause(args): +def osd_out(args): """Pause the ceph-osd units on the local machine only. - Optionally uses the 'osd-number' from juju action param to only pause a - specific osd. If all the osds are not stopped then the paused status is - not set. + Optionally uses the 'osd-number' from juju action param to only osd_out a + specific osd. @raises CalledProcessError if the ceph commands fails. @raises OSError if it can't get the local osd ids. @@ -52,14 +46,13 @@ def pause(args): '--id', 'osd-upgrade', 'osd', 'out', str(local_id)] check_call(cmd) - set_unit_paused() assess_status() -def resume(args): +def osd_in(args): """Resume the ceph-osd units on this local machine only - @raises subprocess.CalledProcessError should the osd units fails to resume. + @raises subprocess.CalledProcessError should the osd units fails to osd_in. @raises OSError if the unit can't get the local osd ids """ for local_id in get_local_osd_ids(): @@ -68,12 +61,11 @@ def resume(args): '--id', 'osd-upgrade', 'osd', 'in', str(local_id)] check_call(cmd) - clear_unit_paused() assess_status() # A dictionary of all the defined actions to callables (which take # parsed arguments). -ACTIONS = {"pause": pause, "resume": resume} +ACTIONS = {"osd-out": osd_out, "osd-in": osd_in} def main(args): diff --git a/ceph-osd/actions/pause b/ceph-osd/actions/pause deleted file mode 120000 index bd4c0e00..00000000 --- a/ceph-osd/actions/pause +++ /dev/null @@ -1 +0,0 @@ -pause_resume.py \ No newline at end of file diff --git a/ceph-osd/actions/resume b/ceph-osd/actions/resume deleted file mode 120000 index bd4c0e00..00000000 --- a/ceph-osd/actions/resume +++ /dev/null @@ -1 +0,0 @@ -pause_resume.py \ No newline at end of file diff --git a/ceph-osd/unit_tests/test_actions_pause_resume.py b/ceph-osd/unit_tests/test_actions_osd_out_in.py similarity index 82% rename from ceph-osd/unit_tests/test_actions_pause_resume.py rename to ceph-osd/unit_tests/test_actions_osd_out_in.py index b0ac4418..f8b3546c 100644 --- a/ceph-osd/unit_tests/test_actions_pause_resume.py +++ b/ceph-osd/unit_tests/test_actions_osd_out_in.py @@ -20,42 +20,38 @@ sys.path.append('hooks') -import pause_resume as actions +import osd_in_out as actions -class PauseTestCase(CharmTestCase): +class OSDOutTestCase(CharmTestCase): def setUp(self): - super(PauseTestCase, self).setUp( + super(OSDOutTestCase, self).setUp( actions, ["check_call", "get_local_osd_ids", - "set_unit_paused", "assess_status"]) - def test_pauses_services(self): + def test_osd_out(self): self.get_local_osd_ids.return_value = [5] - actions.pause([]) + actions.osd_out([]) cmd = ['ceph', '--id', 'osd-upgrade', 'osd', 'out', '5'] self.check_call.assert_called_once_with(cmd) - self.set_unit_paused.assert_called_once_with() self.assess_status.assert_called_once_with() -class ResumeTestCase(CharmTestCase): +class OSDInTestCase(CharmTestCase): def setUp(self): - super(ResumeTestCase, self).setUp( + super(OSDInTestCase, self).setUp( actions, ["check_call", "get_local_osd_ids", - "clear_unit_paused", "assess_status"]) - def test_pauses_services(self): + def test_osd_in(self): self.get_local_osd_ids.return_value = [5] - actions.resume([]) + actions.osd_in([]) cmd = ['ceph', '--id', 'osd-upgrade', 'osd', 'in', '5'] self.check_call.assert_called_once_with(cmd) - self.clear_unit_paused.assert_called_once_with() self.assess_status.assert_called_once_with() From 5cd5295cad5f9a6365227f566275a4792e1978f1 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 5 Dec 2018 15:24:45 +0000 Subject: [PATCH 1610/2699] Resync charms.ceph and charmhelpers Resync with latest updates to charms.ceph to avoid explicit installation of python-ceph which is not required and breaks under the laters Ceph packaging which no longer ships Python 2 support. Change-Id: I4ce2b91dd476f90c30d1379dac5b00b8aaa9c73a --- .../charmhelpers/contrib/charmsupport/nrpe.py | 23 ++-- .../contrib/openstack/amulet/deployment.py | 3 +- .../contrib/openstack/ha/utils.py | 112 +++++++++++++----- .../charmhelpers/contrib/openstack/utils.py | 99 +++++++++++----- .../contrib/storage/linux/loopback.py | 6 +- ceph-osd/lib/ceph/utils.py | 4 +- 6 files changed, 177 insertions(+), 70 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index e3d10c1c..10d86ac0 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -416,15 +416,20 @@ def copy_nrpe_checks(nrpe_files_dir=None): """ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - default_nrpe_files_dir = os.path.join( - os.getenv('CHARM_DIR'), - 'hooks', - 'charmhelpers', - 'contrib', - 'openstack', - 'files') - if not nrpe_files_dir: - nrpe_files_dir = default_nrpe_files_dir + if nrpe_files_dir is None: + # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks + for segment in ['.', 'hooks']: + nrpe_files_dir = os.path.abspath(os.path.join( + os.getenv('CHARM_DIR'), + segment, + 'charmhelpers', + 'contrib', + 'openstack', + 'files')) + if os.path.isdir(nrpe_files_dir): + break + else: + raise RuntimeError("Couldn't find charmhelpers directory") if not os.path.exists(NAGIOS_PLUGINS): os.makedirs(NAGIOS_PLUGINS) for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 1c96752a..5b7e3cfb 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -168,7 +168,8 @@ def _add_services(self, this_service, other_services, use_source=None, 'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', 'cinder-backup', 'nexentaedge-data', 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'])) + 'cinder-nexentaedge', 'nexentaedge-mgmt', + 'ceilometer-agent'])) if self.openstack: for svc in services: diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py index add8eb9a..cdf4b4c9 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -23,6 +23,7 @@ Helpers for high availability. """ +import hashlib import json import re @@ -35,7 +36,6 @@ config, status_set, DEBUG, - WARNING, ) from charmhelpers.core.host import ( @@ -124,13 +124,29 @@ def expect_ha(): return len(ha_related_units) > 0 or config('vip') or config('dns-ha') -def generate_ha_relation_data(service): +def generate_ha_relation_data(service, extra_settings=None): """ Generate relation data for ha relation Based on configuration options and unit interfaces, generate a json encoded dict of relation data items for the hacluster relation, providing configuration for DNS HA or VIP's + haproxy clone sets. + Example of supplying additional settings:: + + COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' + AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' + AGENT_CA_PARAMS = 'op monitor interval="5s"' + + ha_console_settings = { + 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, + 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, + 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, + 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) + generate_ha_relation_data('nova', extra_settings=ha_console_settings) + + + @param service: Name of the service being configured + @param extra_settings: Dict of additional resource data @returns dict: json encoded data for use with relation_set """ _haproxy_res = 'res_{}_haproxy'.format(service) @@ -149,6 +165,13 @@ def generate_ha_relation_data(service): }, } + if extra_settings: + for k, v in extra_settings.items(): + if _relation_data.get(k): + _relation_data[k].update(v) + else: + _relation_data[k] = v + if config('dns-ha'): update_hacluster_dns_ha(service, _relation_data) else: @@ -232,40 +255,75 @@ def update_hacluster_vip(service, relation_data): """ cluster_config = get_hacluster_config() vip_group = [] + vips_to_delete = [] for vip in cluster_config['vip'].split(): if is_ipv6(vip): - res_neutron_vip = 'ocf:heartbeat:IPv6addr' + res_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else: - res_neutron_vip = 'ocf:heartbeat:IPaddr2' + res_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' - iface = (get_iface_for_address(vip) or - config('vip_iface')) - netmask = (get_netmask_for_address(vip) or - config('vip_cidr')) + iface = get_iface_for_address(vip) + netmask = get_netmask_for_address(vip) + + fallback_params = False + if iface is None: + iface = config('vip_iface') + fallback_params = True + if netmask is None: + netmask = config('vip_cidr') + fallback_params = True if iface is not None: + # NOTE(jamespage): Delete old VIP resources + # Old style naming encoding iface in name + # does not work well in environments where + # interface/subnet wiring is not consistent vip_key = 'res_{}_{}_vip'.format(service, iface) - if vip_key in vip_group: - if vip not in relation_data['resource_params'][vip_key]: - vip_key = '{}_{}'.format(vip_key, vip_params) - else: - log("Resource '%s' (vip='%s') already exists in " - "vip group - skipping" % (vip_key, vip), WARNING) - continue - - relation_data['resources'][vip_key] = res_neutron_vip - relation_data['resource_params'][vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}" ' - 'nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask) - ) + if vip_key in vips_to_delete: + vip_key = '{}_{}'.format(vip_key, vip_params) + vips_to_delete.append(vip_key) + + vip_key = 'res_{}_{}_vip'.format( + service, + hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]) + + relation_data['resources'][vip_key] = res_vip + # NOTE(jamespage): + # Use option provided vip params if these where used + # instead of auto-detected values + if fallback_params: + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}"'.format(ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask) + ) + else: + # NOTE(jamespage): + # let heartbeat figure out which interface and + # netmask to configure, which works nicely + # when network interface naming is not + # consistent across units. + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}"'.format(ip=vip_params, + vip=vip)) + vip_group.append(vip_key) + if vips_to_delete: + try: + relation_data['delete_resources'].extend(vips_to_delete) + except KeyError: + relation_data['delete_resources'] = vips_to_delete + if len(vip_group) >= 1: - relation_data['groups'] = { - 'grp_{}_vips'.format(service): ' '.join(vip_group) - } + key = 'grp_{}_vips'.format(service) + try: + relation_data['groups'][key] = ' '.join(vip_group) + except KeyError: + relation_data['groups'] = { + key: ' '.join(vip_group) + } diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 29cad083..59312fcf 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -73,6 +73,8 @@ service_running, service_pause, service_resume, + service_stop, + service_start, restart_on_change_helper, ) from charmhelpers.fetch import ( @@ -299,7 +301,7 @@ def get_os_codename_install_source(src): rel = '' if src is None: return rel - if src in ['distro', 'distro-proposed']: + if src in ['distro', 'distro-proposed', 'proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] except KeyError: @@ -1303,6 +1305,65 @@ def is_unit_paused_set(): return False +def manage_payload_services(action, services=None, charm_func=None): + """Run an action against all services. + + An optional charm_func() can be called. It should raise an Exception to + indicate that the function failed. If it was succesfull it should return + None or an optional message. + + The signature for charm_func is: + charm_func() -> message: str + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + :param action: Action to run: pause, resume, start or stop. + :type action: str + :param services: See above + :type services: See above + :param charm_func: function to run for custom charm pausing. + :type charm_func: f() + :returns: Status boolean and list of messages + :rtype: (bool, []) + :raises: RuntimeError + """ + actions = { + 'pause': service_pause, + 'resume': service_resume, + 'start': service_start, + 'stop': service_stop} + action = action.lower() + if action not in actions.keys(): + raise RuntimeError( + "action: {} must be one of: {}".format(action, + ', '.join(actions.keys()))) + services = _extract_services_list_helper(services) + messages = [] + success = True + if services: + for service in services.keys(): + rc = actions[action](service) + if not rc: + success = False + messages.append("{} didn't {} cleanly.".format(service, + action)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + success = False + messages.append(str(e)) + return success, messages + + def pause_unit(assess_status_func, services=None, ports=None, charm_func=None): """Pause a unit by stopping the services and setting 'unit-paused' @@ -1333,20 +1394,10 @@ def pause_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - stopped = service_pause(service) - if not stopped: - messages.append("{} didn't stop cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'pause', + services=services, + charm_func=charm_func) set_unit_paused() if assess_status_func: message = assess_status_func() @@ -1385,20 +1436,10 @@ def resume_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - started = service_resume(service) - if not started: - messages.append("{} didn't start cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'resume', + services=services, + charm_func=charm_func) clear_unit_paused() if assess_status_func: message = assess_status_func() diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py index 0dfdae52..82472ff1 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -36,8 +36,10 @@ def loopback_devices(): ''' loopbacks = {} cmd = ['losetup', '-a'] - devs = [d.strip().split(' ') for d in - check_output(cmd).splitlines() if d != ''] + output = check_output(cmd) + if six.PY3: + output = output.decode('utf-8') + devs = [d.strip().split(' ') for d in output.splitlines() if d != ''] for dev, _, f in devs: loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] return loopbacks diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 07f96a67..c03005bb 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -80,8 +80,8 @@ PEON = 'peon' QUORUM = [LEADER, PEON] -PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', 'python-ceph', - 'radosgw', 'xfsprogs', 'python-pyudev', +PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', + 'radosgw', 'xfsprogs', 'lvm2', 'parted'] CEPH_KEY_MANAGER = 'ceph' From 77eb76ff31bf22846f54f6f483435eeab0a64f94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Hausman?= Date: Thu, 6 Dec 2018 17:48:09 +0100 Subject: [PATCH 1611/2699] Remove chown'ing on adding dirs to osd-devices When adding directories to osd-devices (e.g. juju config ceph-osd osd-devices="/srv/ceph1 /srv/ceph2"), the charm was recursively changing owner and group for all files below /var/lib/ceph, for each added directory. This was taking time and was not useful. The fix is to remove unnecessary recursive chown on /var/lib/ceph, since ceph already manages the ownership of the files itself. Change-Id: I39a00591bc86ec49c4ced53eadce75ddb21e2431 Closes-Bug: #1795383 --- ceph-osd/lib/ceph/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 07f96a67..e7a948ab 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -1894,7 +1894,6 @@ def osdize_dir(path, encrypt=False, bluestore=False): return mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) - chownr('/var/lib/ceph', ceph_user(), ceph_user()) cmd = [ 'sudo', '-u', ceph_user(), 'ceph-disk', From a8f3add6bed61db8fd707ab47f948e0dea325c21 Mon Sep 17 00:00:00 2001 From: Trent Lloyd Date: Mon, 10 Dec 2018 15:58:23 +0800 Subject: [PATCH 1612/2699] Set waiting status when number of OSDs has not yet reached expected-osd-count The charm does not process relation requests from clients when the number of OSDs has not yet reached the expected-osd-count. Make this situation clear to the user by setting the charm status to waiting with relevant information. Change-Id: I638547ca4a9f1bf48782c82aa0d92f89f6bfd13a Closes-Bug: 1807652 --- ceph-mon/hooks/ceph_hooks.py | 8 +++++++- ceph-mon/unit_tests/test_status.py | 10 ++++++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index f007181c..45545d9f 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -793,7 +793,13 @@ def assess_status(): # active - bootstrapped + quorum status check if ceph.is_bootstrapped() and ceph.is_quorum(): - status_set('active', 'Unit is ready and clustered') + expected_osd_count = config('expected-osd-count') or 3 + if sufficient_osds(expected_osd_count): + status_set('active', 'Unit is ready and clustered') + else: + status_set('waiting', 'Monitor bootstrapped but waiting for number' + 'of OSDs to reach expected-osd-count ({})' + .format(expected_osd_count)) else: # Unit should be running and clustered, but no quorum # TODO: should this be blocked or waiting? diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index 33f924f5..3bc38b70 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -81,18 +81,24 @@ def test_assess_status_peers_incomplete(self, _peer_units): self.status_set.assert_called_with('waiting', mock.ANY) self.application_version_set.assert_called_with('10.2.2') + @mock.patch.object(hooks, 'sufficient_osds') @mock.patch.object(hooks, 'get_peer_units') - def test_assess_status_peers_complete_active(self, _peer_units): + def test_assess_status_peers_complete_active(self, _peer_units, + _sufficient_osds): _peer_units.return_value = ENOUGH_PEERS_COMPLETE + _sufficient_osds.return_value = True self.ceph.is_bootstrapped.return_value = True self.ceph.is_quorum.return_value = True hooks.assess_status() self.status_set.assert_called_with('active', mock.ANY) self.application_version_set.assert_called_with('10.2.2') + @mock.patch.object(hooks, 'sufficient_osds') @mock.patch.object(hooks, 'get_peer_units') - def test_assess_status_peers_complete_down(self, _peer_units): + def test_assess_status_peers_complete_down(self, _peer_units, + _sufficient_osds): _peer_units.return_value = ENOUGH_PEERS_COMPLETE + _sufficient_osds.return_value = True self.ceph.is_bootstrapped.return_value = False self.ceph.is_quorum.return_value = False hooks.assess_status() From 688d3abbb9be2d6a48596fc9c1e9892bb871aedf Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 5 Dec 2018 15:26:35 +0000 Subject: [PATCH 1613/2699] Resync charms.ceph and charmhelpers Resync the latest charms.ceph to avoid direct installation of Python 2 modules which are not available in the latest Ceph packaging which no longer provides Python 2 support. This commit also updates two action which still used Python to use Python 3 (inline with the rest of the charm). Change-Id: I8fc2a1aa17e48ef5dac9b9974c33b9620fcb7c70 --- ceph-mon/actions/set_noout.py | 2 +- ceph-mon/actions/unset_noout.py | 2 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 23 ++-- .../contrib/openstack/amulet/deployment.py | 3 +- .../contrib/openstack/ha/utils.py | 112 +++++++++++++----- .../charmhelpers/contrib/openstack/utils.py | 99 +++++++++++----- .../contrib/storage/linux/loopback.py | 6 +- ceph-mon/lib/ceph/utils.py | 19 ++- 8 files changed, 194 insertions(+), 72 deletions(-) diff --git a/ceph-mon/actions/set_noout.py b/ceph-mon/actions/set_noout.py index 97aa3841..50c119d9 100755 --- a/ceph-mon/actions/set_noout.py +++ b/ceph-mon/actions/set_noout.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2017 Canonical Ltd # diff --git a/ceph-mon/actions/unset_noout.py b/ceph-mon/actions/unset_noout.py index 8ae9a393..142fa1e5 100755 --- a/ceph-mon/actions/unset_noout.py +++ b/ceph-mon/actions/unset_noout.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2017 Canonical Ltd # diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index e3d10c1c..10d86ac0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -416,15 +416,20 @@ def copy_nrpe_checks(nrpe_files_dir=None): """ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - default_nrpe_files_dir = os.path.join( - os.getenv('CHARM_DIR'), - 'hooks', - 'charmhelpers', - 'contrib', - 'openstack', - 'files') - if not nrpe_files_dir: - nrpe_files_dir = default_nrpe_files_dir + if nrpe_files_dir is None: + # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks + for segment in ['.', 'hooks']: + nrpe_files_dir = os.path.abspath(os.path.join( + os.getenv('CHARM_DIR'), + segment, + 'charmhelpers', + 'contrib', + 'openstack', + 'files')) + if os.path.isdir(nrpe_files_dir): + break + else: + raise RuntimeError("Couldn't find charmhelpers directory") if not os.path.exists(NAGIOS_PLUGINS): os.makedirs(NAGIOS_PLUGINS) for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 1c96752a..5b7e3cfb 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -168,7 +168,8 @@ def _add_services(self, this_service, other_services, use_source=None, 'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', 'cinder-backup', 'nexentaedge-data', 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'])) + 'cinder-nexentaedge', 'nexentaedge-mgmt', + 'ceilometer-agent'])) if self.openstack: for svc in services: diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py index add8eb9a..cdf4b4c9 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -23,6 +23,7 @@ Helpers for high availability. """ +import hashlib import json import re @@ -35,7 +36,6 @@ config, status_set, DEBUG, - WARNING, ) from charmhelpers.core.host import ( @@ -124,13 +124,29 @@ def expect_ha(): return len(ha_related_units) > 0 or config('vip') or config('dns-ha') -def generate_ha_relation_data(service): +def generate_ha_relation_data(service, extra_settings=None): """ Generate relation data for ha relation Based on configuration options and unit interfaces, generate a json encoded dict of relation data items for the hacluster relation, providing configuration for DNS HA or VIP's + haproxy clone sets. + Example of supplying additional settings:: + + COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' + AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' + AGENT_CA_PARAMS = 'op monitor interval="5s"' + + ha_console_settings = { + 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, + 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, + 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, + 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) + generate_ha_relation_data('nova', extra_settings=ha_console_settings) + + + @param service: Name of the service being configured + @param extra_settings: Dict of additional resource data @returns dict: json encoded data for use with relation_set """ _haproxy_res = 'res_{}_haproxy'.format(service) @@ -149,6 +165,13 @@ def generate_ha_relation_data(service): }, } + if extra_settings: + for k, v in extra_settings.items(): + if _relation_data.get(k): + _relation_data[k].update(v) + else: + _relation_data[k] = v + if config('dns-ha'): update_hacluster_dns_ha(service, _relation_data) else: @@ -232,40 +255,75 @@ def update_hacluster_vip(service, relation_data): """ cluster_config = get_hacluster_config() vip_group = [] + vips_to_delete = [] for vip in cluster_config['vip'].split(): if is_ipv6(vip): - res_neutron_vip = 'ocf:heartbeat:IPv6addr' + res_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else: - res_neutron_vip = 'ocf:heartbeat:IPaddr2' + res_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' - iface = (get_iface_for_address(vip) or - config('vip_iface')) - netmask = (get_netmask_for_address(vip) or - config('vip_cidr')) + iface = get_iface_for_address(vip) + netmask = get_netmask_for_address(vip) + + fallback_params = False + if iface is None: + iface = config('vip_iface') + fallback_params = True + if netmask is None: + netmask = config('vip_cidr') + fallback_params = True if iface is not None: + # NOTE(jamespage): Delete old VIP resources + # Old style naming encoding iface in name + # does not work well in environments where + # interface/subnet wiring is not consistent vip_key = 'res_{}_{}_vip'.format(service, iface) - if vip_key in vip_group: - if vip not in relation_data['resource_params'][vip_key]: - vip_key = '{}_{}'.format(vip_key, vip_params) - else: - log("Resource '%s' (vip='%s') already exists in " - "vip group - skipping" % (vip_key, vip), WARNING) - continue - - relation_data['resources'][vip_key] = res_neutron_vip - relation_data['resource_params'][vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}" ' - 'nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask) - ) + if vip_key in vips_to_delete: + vip_key = '{}_{}'.format(vip_key, vip_params) + vips_to_delete.append(vip_key) + + vip_key = 'res_{}_{}_vip'.format( + service, + hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]) + + relation_data['resources'][vip_key] = res_vip + # NOTE(jamespage): + # Use option provided vip params if these where used + # instead of auto-detected values + if fallback_params: + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}"'.format(ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask) + ) + else: + # NOTE(jamespage): + # let heartbeat figure out which interface and + # netmask to configure, which works nicely + # when network interface naming is not + # consistent across units. + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}"'.format(ip=vip_params, + vip=vip)) + vip_group.append(vip_key) + if vips_to_delete: + try: + relation_data['delete_resources'].extend(vips_to_delete) + except KeyError: + relation_data['delete_resources'] = vips_to_delete + if len(vip_group) >= 1: - relation_data['groups'] = { - 'grp_{}_vips'.format(service): ' '.join(vip_group) - } + key = 'grp_{}_vips'.format(service) + try: + relation_data['groups'][key] = ' '.join(vip_group) + except KeyError: + relation_data['groups'] = { + key: ' '.join(vip_group) + } diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 29cad083..59312fcf 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -73,6 +73,8 @@ service_running, service_pause, service_resume, + service_stop, + service_start, restart_on_change_helper, ) from charmhelpers.fetch import ( @@ -299,7 +301,7 @@ def get_os_codename_install_source(src): rel = '' if src is None: return rel - if src in ['distro', 'distro-proposed']: + if src in ['distro', 'distro-proposed', 'proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] except KeyError: @@ -1303,6 +1305,65 @@ def is_unit_paused_set(): return False +def manage_payload_services(action, services=None, charm_func=None): + """Run an action against all services. + + An optional charm_func() can be called. It should raise an Exception to + indicate that the function failed. If it was succesfull it should return + None or an optional message. + + The signature for charm_func is: + charm_func() -> message: str + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + :param action: Action to run: pause, resume, start or stop. + :type action: str + :param services: See above + :type services: See above + :param charm_func: function to run for custom charm pausing. + :type charm_func: f() + :returns: Status boolean and list of messages + :rtype: (bool, []) + :raises: RuntimeError + """ + actions = { + 'pause': service_pause, + 'resume': service_resume, + 'start': service_start, + 'stop': service_stop} + action = action.lower() + if action not in actions.keys(): + raise RuntimeError( + "action: {} must be one of: {}".format(action, + ', '.join(actions.keys()))) + services = _extract_services_list_helper(services) + messages = [] + success = True + if services: + for service in services.keys(): + rc = actions[action](service) + if not rc: + success = False + messages.append("{} didn't {} cleanly.".format(service, + action)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + success = False + messages.append(str(e)) + return success, messages + + def pause_unit(assess_status_func, services=None, ports=None, charm_func=None): """Pause a unit by stopping the services and setting 'unit-paused' @@ -1333,20 +1394,10 @@ def pause_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - stopped = service_pause(service) - if not stopped: - messages.append("{} didn't stop cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'pause', + services=services, + charm_func=charm_func) set_unit_paused() if assess_status_func: message = assess_status_func() @@ -1385,20 +1436,10 @@ def resume_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - started = service_resume(service) - if not started: - messages.append("{} didn't start cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'resume', + services=services, + charm_func=charm_func) clear_unit_paused() if assess_status_func: message = assess_status_func() diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py index 0dfdae52..82472ff1 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -36,8 +36,10 @@ def loopback_devices(): ''' loopbacks = {} cmd = ['losetup', '-a'] - devs = [d.strip().split(' ') for d in - check_output(cmd).splitlines() if d != ''] + output = check_output(cmd) + if six.PY3: + output = output.decode('utf-8') + devs = [d.strip().split(' ') for d in output.splitlines() if d != ''] for dev, _, f in devs: loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] return loopbacks diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 2ef48abe..c03005bb 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -80,8 +80,8 @@ PEON = 'peon' QUORUM = [LEADER, PEON] -PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', 'python-ceph', - 'radosgw', 'xfsprogs', 'python-pyudev', +PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', + 'radosgw', 'xfsprogs', 'lvm2', 'parted'] CEPH_KEY_MANAGER = 'ceph' @@ -1876,6 +1876,14 @@ def osdize_dir(path, encrypt=False, bluestore=False): :param encrypt: bool. Should the OSD directory be encrypted at rest :returns: None """ + + db = kv() + osd_devices = db.get('osd-devices', []) + if path in osd_devices: + log('Device {} already processed by charm,' + ' skipping'.format(path)) + return + if os.path.exists(os.path.join(path, 'upstart')): log('Path {} is already configured as an OSD - bailing'.format(path)) return @@ -1906,6 +1914,13 @@ def osdize_dir(path, encrypt=False, bluestore=False): log("osdize dir cmd: {}".format(cmd)) subprocess.check_call(cmd) + # NOTE: Record processing of device only on success to ensure that + # the charm only tries to initialize a device of OSD usage + # once during its lifetime. + osd_devices.append(path) + db.set('osd-devices', osd_devices) + db.flush() + def filesystem_mounted(fs): return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 From 4cb7234cd387edb1ec9473cf38cd64574993aa74 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 11 Dec 2018 13:39:54 +0000 Subject: [PATCH 1614/2699] Disable object skew warnings Ceph will issue a HEALTH_WARN in the event that one pool has a large number of objects compared to other pools in the cluster: "Issue a HEALTH_WARN in cluster log if the average object number of a certain pool is greater than mon pg warn max object skew times the average object number of the whole pool." For OpenStack deployments, Gnocchi and RADOS gateway can generate a large number of small objects compared to Cinder, Glance and Nova usage, causing the cluster to go into HEALTH_WARN status. Disable this check until the skew evaluation also includes the size of the objects as well as the number. Change-Id: I83211dbdec4dea8dca5b27a66e26a4431d2a7b77 Closes-Bug: 1804846 --- ceph-mon/templates/ceph.conf | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index b4cdb034..6f74d524 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -17,6 +17,12 @@ mon cluster log to syslog = {{ use_syslog }} debug mon = {{ loglevel }}/5 debug osd = {{ loglevel }}/5 +# NOTE(jamespage): +# Disable object skew warnings as these only use +# the number of objects and not their size in the +# skew calculation. +mon pg warn max object skew = -1 + {% if ceph_public_network is string %} public network = {{ ceph_public_network }} {%- endif %} From 66320c8bdeb96fc885cb7389bf05f4ad4f8f39a7 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 11 Dec 2018 15:39:33 +0100 Subject: [PATCH 1615/2699] Add app-name as an option during pool creation Change-Id: I5e43f896d433b5a51ed7ecf6ddd936458c90cc24 Closes-Bug: #1807775 --- ceph-mon/actions.yaml | 3 +++ ceph-mon/actions/create-pool.py | 9 +++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index f8a397b8..85cfb747 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -48,6 +48,9 @@ create-pool: name: type: string description: The name of the pool + app-name: + type: string + description: App name to set on the newly created pool. profile-name: type: string description: The crush profile to use for this pool. The ruleset must exist first. diff --git a/ceph-mon/actions/create-pool.py b/ceph-mon/actions/create-pool.py index aa4a27d2..81a8a554 100755 --- a/ceph-mon/actions/create-pool.py +++ b/ceph-mon/actions/create-pool.py @@ -25,19 +25,24 @@ def create_pool(): pool_name = action_get("name") pool_type = action_get("pool-type") + app_name = action_get("app-name") or None try: if pool_type == "replicated": replicas = action_get("replicas") replicated_pool = ReplicatedPool(name=pool_name, service='admin', - replicas=replicas) + replicas=replicas, + app_name=app_name, + ) replicated_pool.create() elif pool_type == "erasure": crush_profile_name = action_get("erasure-profile-name") erasure_pool = ErasurePool(name=pool_name, erasure_code_profile=crush_profile_name, - service='admin') + service='admin', + app_name=app_name, + ) erasure_pool.create() else: log("Unknown pool type of {}. Only erasure or replicated is " From 16be417f416f7662fe4d5515ec6c093a6e23b093 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 12 Dec 2018 09:27:07 +0000 Subject: [PATCH 1616/2699] Fix missing space in status message Add space between works in status message whilst ceph-mon units are waiting for bootstrapped OSD's >= expected-osd-count. Change-Id: I5178e385c61d5d738e96a307ac9c762ec610f657 --- ceph-mon/hooks/ceph_hooks.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 45545d9f..ebbbc071 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -797,9 +797,12 @@ def assess_status(): if sufficient_osds(expected_osd_count): status_set('active', 'Unit is ready and clustered') else: - status_set('waiting', 'Monitor bootstrapped but waiting for number' - 'of OSDs to reach expected-osd-count ({})' - .format(expected_osd_count)) + status_set( + 'waiting', + 'Monitor bootstrapped but waiting for number of' + ' OSDs to reach expected-osd-count ({})' + .format(expected_osd_count) + ) else: # Unit should be running and clustered, but no quorum # TODO: should this be blocked or waiting? From c179efa2be341fd6799ed3779b036f43854d9997 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 13 Dec 2018 09:06:40 +0100 Subject: [PATCH 1617/2699] Set action output for get_health Change-Id: I23287bd153f1e61a94593734b1de43ef6abad36a Closes-Bug: 1808189 --- ceph-mon/actions/ceph_ops.py | 2 +- ceph-mon/actions/get-health | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/ceph-mon/actions/ceph_ops.py b/ceph-mon/actions/ceph_ops.py index 86765dd0..47eb5c8d 100755 --- a/ceph-mon/actions/ceph_ops.py +++ b/ceph-mon/actions/ceph_ops.py @@ -66,7 +66,7 @@ def get_health(): On error, 'unknown' is returned. """ try: - value = check_output(['ceph', 'health']) + value = check_output(['ceph', 'health']).decode('utf-8') return value except CalledProcessError as e: action_fail(e.message) diff --git a/ceph-mon/actions/get-health b/ceph-mon/actions/get-health index 09cf08c2..60e8a333 100755 --- a/ceph-mon/actions/get-health +++ b/ceph-mon/actions/get-health @@ -1,6 +1,12 @@ #!/usr/bin/python from ceph_ops import get_health +from charmhelpers.core.hookenv import log, action_set, action_fail if __name__ == '__main__': - get_health() + try: + action_set({'message': get_health()}) + except CalledProcessError as e: + log(e) + action_fail( + "ceph health failed with message: {}".format(str(e))) \ No newline at end of file From 3f5880eb50b34c2b4b2387be6cc1dd3770c21832 Mon Sep 17 00:00:00 2001 From: xuleibj Date: Wed, 19 Dec 2018 19:20:18 +0800 Subject: [PATCH 1618/2699] Remove words occured twice times. There are two 'the' in the warning text,it is necessary to remove one of them. Change-Id: If61a95c88d8542c99c0373be5f4555e322803408 --- ceph-mon/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 9e23a7ba..7881fdf1 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -50,7 +50,7 @@ options: that the user consider opening a bug on this charm at http://bugs.launchpad.net/charms providing an explanation of why the config was needed so that we may consider it for inclusion as a - natively supported config in the the charm. + natively supported config in the charm. auth-supported: type: string default: cephx From 76b12611811be6768616a2e06bd762860d093005 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 19 Dec 2018 15:16:22 +0000 Subject: [PATCH 1619/2699] Rebuild for HA resource changes in charm-helpers & charms.openstack. Change-Id: I2e8c6c6840fedad19b6f3607e08cc7e08c97a390 --- ceph-fs/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index 2b56fde5..464856fa 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -0d1ffbde-e2d6-11e8-aa1a-cb07dc37eb28 +f992a562-03a0-11e9-8a41-af644219de17 From d1f5e2b02094ac8fb6c518a8cc506048466db6e2 Mon Sep 17 00:00:00 2001 From: melissaml Date: Sat, 22 Dec 2018 03:05:06 +0800 Subject: [PATCH 1620/2699] fix typos in the docstring Change-Id: I7d2c175c335d87d23a2599f1125b2963e80cf597 --- ceph-mon/hooks/charmhelpers/contrib/openstack/context.py | 2 +- ceph-mon/hooks/charmhelpers/fetch/archiveurl.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 72084cb3..5bb9628f 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -1207,7 +1207,7 @@ class SubordinateConfigContext(OSContextGenerator): The subordinate interface allows subordinates to export their configuration requirements to the principle for multiple config - files and multiple serivces. Ie, a subordinate that has interfaces + files and multiple services. Ie, a subordinate that has interfaces to both glance and nova may export to following yaml blob as json:: glance: diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index dd24f9ec..d25587ad 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -89,7 +89,7 @@ def download(self, source, dest): :param str source: URL pointing to an archive file. :param str dest: Local path location to download archive file to. """ - # propogate all exceptions + # propagate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse(source) if proto in ('http', 'https'): From b66ecb4b4add6d9d6667e23d147481353a43deff Mon Sep 17 00:00:00 2001 From: melissaml Date: Sat, 22 Dec 2018 03:05:06 +0800 Subject: [PATCH 1621/2699] fix typos in the docstring Change-Id: I4f17e67b03d2fcd29de48f09b9f60d34378158cc --- ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py | 2 +- ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 72084cb3..5bb9628f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -1207,7 +1207,7 @@ class SubordinateConfigContext(OSContextGenerator): The subordinate interface allows subordinates to export their configuration requirements to the principle for multiple config - files and multiple serivces. Ie, a subordinate that has interfaces + files and multiple services. Ie, a subordinate that has interfaces to both glance and nova may export to following yaml blob as json:: glance: diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py index dd24f9ec..d25587ad 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py @@ -89,7 +89,7 @@ def download(self, source, dest): :param str source: URL pointing to an archive file. :param str dest: Local path location to download archive file to. """ - # propogate all exceptions + # propagate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse(source) if proto in ('http', 'https'): From e2e66a28b7afafbdb8e354d94c3882c1510ed2dd Mon Sep 17 00:00:00 2001 From: melissaml Date: Sat, 22 Dec 2018 04:21:20 +0800 Subject: [PATCH 1622/2699] fix typos in the docstring Change-Id: I6f335860f5d86329c1fdbb8ec51e4673f75d1d00 --- ceph-osd/hooks/charmhelpers/contrib/openstack/context.py | 2 +- ceph-osd/hooks/charmhelpers/fetch/archiveurl.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 72084cb3..5bb9628f 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -1207,7 +1207,7 @@ class SubordinateConfigContext(OSContextGenerator): The subordinate interface allows subordinates to export their configuration requirements to the principle for multiple config - files and multiple serivces. Ie, a subordinate that has interfaces + files and multiple services. Ie, a subordinate that has interfaces to both glance and nova may export to following yaml blob as json:: glance: diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index dd24f9ec..d25587ad 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -89,7 +89,7 @@ def download(self, source, dest): :param str source: URL pointing to an archive file. :param str dest: Local path location to download archive file to. """ - # propogate all exceptions + # propagate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse(source) if proto in ('http', 'https'): From e064fe466566695b5889c097c40a1035aae79bc4 Mon Sep 17 00:00:00 2001 From: zhouxinyong Date: Mon, 7 Jan 2019 08:59:26 +0800 Subject: [PATCH 1623/2699] Delete the duplicate words in config.yaml Change-Id: I33389cef486cf6232ea755ab446cabe48ed55870 --- ceph-mon/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 9e23a7ba..7881fdf1 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -50,7 +50,7 @@ options: that the user consider opening a bug on this charm at http://bugs.launchpad.net/charms providing an explanation of why the config was needed so that we may consider it for inclusion as a - natively supported config in the the charm. + natively supported config in the charm. auth-supported: type: string default: cephx From 5ecb27848ad44abd35418779d114154cc9d64b9b Mon Sep 17 00:00:00 2001 From: zhouxinyong Date: Mon, 7 Jan 2019 09:01:42 +0800 Subject: [PATCH 1624/2699] Remove those copy words occured twice times in config.yaml Change-Id: Ieeb61474acd89deb1885fb927a6466173828110a --- ceph-radosgw/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 2c484aad..1a75ed0d 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -52,7 +52,7 @@ options: that the user consider opening a bug on this charm at http://bugs.launchpad.net/charms providing an explanation of why the config was needed so that we may consider it for inclusion as a - natively supported config in the the charm. + natively supported config in the charm. port: type: int default: 80 From 26afe7a7fed0f7bd3629b951b3a01ab5791f2f9b Mon Sep 17 00:00:00 2001 From: inspurericzhang Date: Mon, 7 Jan 2019 11:20:38 +0800 Subject: [PATCH 1625/2699] [Trivial Fix] delete dumplicate words config.yaml Although it is trivial mistakes, it affects reading. Change-Id: I7fbf3d0e1f5ffa0cf296a7f4505be0094f982986 --- ceph-mon/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 9e23a7ba..7881fdf1 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -50,7 +50,7 @@ options: that the user consider opening a bug on this charm at http://bugs.launchpad.net/charms providing an explanation of why the config was needed so that we may consider it for inclusion as a - natively supported config in the the charm. + natively supported config in the charm. auth-supported: type: string default: cephx From 0f25c5d79c8e557339a1c1fa5e658a3dd0a9e4e1 Mon Sep 17 00:00:00 2001 From: inspurericzhang Date: Mon, 7 Jan 2019 11:26:07 +0800 Subject: [PATCH 1626/2699] [Trivial Fix] delete dumplicate words config.yaml Although it is trivial mistakes, it affects reading. Change-Id: Ie0d7c59cd23498da2e008f4aa964ea48db512561 --- ceph-radosgw/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 2c484aad..1a75ed0d 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -52,7 +52,7 @@ options: that the user consider opening a bug on this charm at http://bugs.launchpad.net/charms providing an explanation of why the config was needed so that we may consider it for inclusion as a - natively supported config in the the charm. + natively supported config in the charm. port: type: int default: 80 From c1dafa9bc48360068dbefd356d4d1c3b769b55cb Mon Sep 17 00:00:00 2001 From: dongdong tao Date: Fri, 4 Jan 2019 20:25:15 +0800 Subject: [PATCH 1627/2699] Clear the non-pristine blocked status We need to clear the blocked status "non-pristine device detected ..." when there is no non-pristine device anymore Change-Id: I2f31eb9c08be186c08554122c2a160bbcbf2c389 Signed-off-by: dongdong tao Closes-Bug: #1810508 --- ceph-osd/hooks/ceph_hooks.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index c69d693b..20ac05e9 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -722,6 +722,17 @@ def assess_status(): else: status_set('active', 'Unit is ready ({} OSD)'.format(len(running_osds))) + else: + pristine = True + osd_journals = get_journal_devices() + for dev in list(set(ceph.unmounted_disks()) - set(osd_journals)): + if (not ceph.is_active_bluestore_device(dev) and + not ceph.is_pristine_disk(dev)): + pristine = False + break + if pristine: + status_set('active', + 'Unit is ready ({} OSD)'.format(len(running_osds))) @hooks.hook('update-status') From b82f1bdd8a40e01e4dcca74dce5edbb4f27bd678 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Wed, 26 Sep 2018 18:37:38 -0400 Subject: [PATCH 1628/2699] fix tox python3 overrides We want to default to running all tox environments under python 3, so set the basepython value in each environment. We do not want to specify a minor version number, because we do not want to have to update the file every time we upgrade python. We do not want to set the override once in testenv, because that breaks the more specific versions used in default environments like py35 and py36. Change-Id: I376bcfa0b37407876609f1183ecb72b614160457 Signed-off-by: Doug Hellmann --- ceph-proxy/tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 930d5264..1adf5d10 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -32,13 +32,14 @@ deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt [testenv:pep8] -basepython = python2.7 +basepython = python3 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} hooks unit_tests tests actions lib charm-proof [testenv:venv] +basepython = python3 commands = {posargs} [testenv:func27-noop] From 926a2d175b83112e21a48ef6f4c4ca45527b763c Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 7 Jan 2019 14:30:59 -0600 Subject: [PATCH 1629/2699] Remove ch-tests from make sync command The tests/charm-helpers contents were removed in the last cycle but the corresponding sync tool was not updated for this charm. Change-Id: I06fc6263ba31f7e37b45b1bd0af658bcd602254d --- ceph-proxy/Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index 89f4eb1f..17ffd298 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -18,4 +18,3 @@ bin/charm_helpers_sync.py: sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml - $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml From b3f9a21b5daea65eed4f89906ec8d4c92ed2115d Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 8 Jan 2019 10:02:04 +0000 Subject: [PATCH 1630/2699] Add support for RADOS gateway per unit cephx keys If ceph-radosgw units present a key_name attribute, then generate a cephx key using the key name and present back with the key_name as the prefix for the key. This switches radosgw units to having per-unit keys, rather than sharing a global key (required as part of the pre-work to support RADOS gateway federation). Change-Id: I289b75a2935184817b424c5eceead16235c3f53b Depends-On: I7ac4d23a91fa654b62afb62c389a8f9823ef2e05 Closes-Bug: 1808140 --- ceph-mon/hooks/ceph_hooks.py | 11 +++- ceph-mon/lib/ceph/utils.py | 4 +- ceph-mon/unit_tests/test_ceph_hooks.py | 70 +++++++++++++++++++++++++- 3 files changed, 81 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index ebbbc071..8b70ce71 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -566,10 +566,18 @@ def radosgw_relation(relid=None, unit=None): public_addr = get_public_addr() data = { 'fsid': leader_get('fsid'), - 'radosgw_key': ceph.get_radosgw_key(), 'auth': config('auth-supported'), 'ceph-public-address': public_addr, } + key_name = relation_get('key_name', unit=unit, rid=relid) + if key_name: + # New style, per unit keys + data['{}_key'.format(key_name)] = ( + ceph.get_radosgw_key(name=key_name) + ) + else: + # Old style global radosgw key + data['radosgw_key'] = ceph.get_radosgw_key() settings = relation_get(rid=relid, unit=unit) """Process broker request(s).""" @@ -706,6 +714,7 @@ def upgrade_charm(): # Reprocess broker requests to ensure that any cephx # key permission changes are applied notify_client() + notify_radosgws() @hooks.hook('start') diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 2ef48abe..f7f85576 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -1072,8 +1072,8 @@ def import_radosgw_key(key): } -def get_radosgw_key(pool_list=None): - return get_named_key(name='radosgw.gateway', +def get_radosgw_key(pool_list=None, name=None): + return get_named_key(name=name or 'radosgw.gateway', caps=_radosgw_caps, pool_list=pool_list) diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index fa792042..0a8b4393 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -187,6 +187,7 @@ def test_nrpe_dependency_installed(self, mock_config): mocks["apt_install"].assert_called_once_with( ["python-dbus", "lockfile-progs"]) + @patch.object(ceph_hooks, 'notify_radosgws') @patch.object(ceph_hooks, 'ceph') @patch.object(ceph_hooks, 'notify_client') @patch.object(ceph_hooks, 'config') @@ -194,7 +195,8 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies( self, mock_config, mock_notify_client, - mock_ceph): + mock_ceph, + mock_notify_radosgws): config = copy.deepcopy(CHARM_CONFIG) mock_config.side_effect = lambda key: config[key] with patch.multiple( @@ -213,6 +215,7 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies( mocks["apt_install"].assert_called_with( ["python-dbus", "lockfile-progs"]) mock_notify_client.assert_called_once_with() + mock_notify_radosgws.assert_called_once_with() mock_ceph.update_monfs.assert_called_once_with() @@ -409,3 +412,68 @@ def rel_units_side_effect(relid): '172.16.0.2:6789', '172.16.0.3:6789', '172.16.0.4:6789', '172.16.10.2:6789', '172.16.10.3:6789', '172.16.10.4:6789', ]) + + +class RGWRelationTestCase(test_utils.CharmTestCase): + + TO_PATCH = [ + 'relation_get', + 'get_public_addr', + 'ready_for_service', + 'remote_unit', + 'apt_install', + 'filter_installed_packages', + 'leader_get', + 'ceph', + 'process_requests', + 'log', + 'relation_set', + 'config', + ] + + test_key = 'OTQ1MDdiODYtMmZhZi00M2IwLTkzYTgtZWI0MGRhNzdmNzBlCg==' + test_fsid = '96ca5e7d-a9e3-4af1-be2b-85621eb6a8e8' + + def setUp(self): + super(RGWRelationTestCase, self).setUp(ceph_hooks, self.TO_PATCH) + self.relation_get.side_effect = self.test_relation.get + self.config.side_effect = self.test_config.get + self.test_config.set('auth-supported', 'cephx') + self.filter_installed_packages.side_effect = lambda pkgs: pkgs + self.ready_for_service.return_value = True + self.leader_get.return_value = self.test_fsid + self.ceph.is_leader.return_value = True + self.ceph.get_radosgw_key.return_value = self.test_key + self.get_public_addr.return_value = '10.10.10.2' + + def test_legacy_radosgw_key(self): + self.test_relation.set({ + 'key_name': None + }) + ceph_hooks.radosgw_relation('radosgw:1', 'ceph-radosgw/0') + self.relation_set.assert_called_once_with( + relation_id='radosgw:1', + relation_settings={ + 'fsid': self.test_fsid, + 'auth': self.test_config.get('auth-supported'), + 'ceph-public-address': '10.10.10.2', + 'radosgw_key': self.test_key, + } + ) + self.ceph.get_radosgw_key.assert_called_once_with() + + def test_per_unit_radosgw_key(self): + self.test_relation.set({ + 'key_name': 'testhostname' + }) + ceph_hooks.radosgw_relation('radosgw:1', 'ceph-radosgw/0') + self.relation_set.assert_called_once_with( + relation_id='radosgw:1', + relation_settings={ + 'fsid': self.test_fsid, + 'auth': self.test_config.get('auth-supported'), + 'ceph-public-address': '10.10.10.2', + 'testhostname_key': self.test_key, + } + ) + self.ceph.get_radosgw_key.assert_called_once_with(name='testhostname') From 946882e97995212f59daab2091a685171a245bcb Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 8 Jan 2019 15:15:29 +0000 Subject: [PATCH 1631/2699] Sync charm-helpers Change-Id: I6e7b334c9b58035780b71fadd56c6d7c03dc87ec --- .../charmhelpers/contrib/charmsupport/nrpe.py | 2 +- .../contrib/openstack/amulet/deployment.py | 2 + .../contrib/openstack/amulet/utils.py | 3 +- .../contrib/openstack/cert_utils.py | 2 +- .../charmhelpers/contrib/openstack/context.py | 8 +-- .../contrib/openstack/ha/utils.py | 59 ++++++++++++------- .../charmhelpers/contrib/openstack/utils.py | 14 +++++ ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 8 +++ 8 files changed, 68 insertions(+), 30 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 10d86ac0..f59fdd6b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -305,7 +305,7 @@ def write(self): # update-status hooks are configured to firing every 5 minutes by # default. When nagios-nrpe-server is restarted, the nagios server - # reports checks failing causing unneccessary alerts. Let's not restart + # reports checks failing causing unnecessary alerts. Let's not restart # on update-status hooks. if not hook_name() == 'update-status': service('restart', 'nagios-nrpe-server') diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 5b7e3cfb..d1270a73 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -293,7 +293,9 @@ def _get_openstack_release(self): ('artful', None): self.artful_pike, ('bionic', None): self.bionic_queens, ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, + ('bionic', 'cloud:bionic-stein'): self.bionic_stein, ('cosmic', None): self.cosmic_rocky, + ('disco', None): self.disco_stein, } return releases[(self.series, self.openstack)] diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 9133e9b3..ea1fd8f3 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -57,7 +57,8 @@ 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] + 'bionic_queens', 'bionic_rocky', 'cosmic_rocky', + 'bionic_stein', 'disco_stein'] class OpenStackAmuletUtils(AmuletUtils): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py index 3e078703..3a3c6de7 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -195,7 +195,7 @@ def install_certs(ssl_dir, certs, chain=None): if chain: # Append chain file so that clients that trust the root CA will # trust certs signed by an intermediate in the chain - cert_data = cert_data + chain + cert_data = cert_data + os.linesep + chain write_file( path=os.path.join(ssl_dir, cert_filename), content=cert_data, perms=0o640) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 72084cb3..614d444b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -98,7 +98,6 @@ from charmhelpers.contrib.openstack.utils import ( config_flags_parser, enable_memcache, - snap_install_requested, CompareOpenStackReleases, os_release, ) @@ -252,13 +251,8 @@ def __call__(self): 'database': self.database, 'database_user': self.user, 'database_password': rdata.get(password_setting), - 'database_type': 'mysql' + 'database_type': 'mysql+pymysql' } - # Note(coreycb): We can drop mysql+pymysql if we want when the - # following review lands, though it seems mysql+pymysql would - # be preferred. https://review.openstack.org/#/c/462190/ - if snap_install_requested(): - ctxt['database_type'] = 'mysql+pymysql' if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py index cdf4b4c9..718c6d65 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -63,6 +63,9 @@ separators=(',', ':'), ) +VIP_GROUP_NAME = 'grp_{service}_vips' +DNSHA_GROUP_NAME = 'grp_{service}_hostnames' + class DNSHAException(Exception): """Raised when an error occurs setting up DNS HA @@ -239,7 +242,7 @@ def update_hacluster_dns_ha(service, relation_data, 'Informing the ha relation'.format(' '.join(hostname_group)), DEBUG) relation_data['groups'] = { - 'grp_{}_hostnames'.format(service): ' '.join(hostname_group) + DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group) } else: msg = 'DNS HA: Hostname group has no members.' @@ -247,6 +250,27 @@ def update_hacluster_dns_ha(service, relation_data, raise DNSHAException(msg) +def get_vip_settings(vip): + """Calculate which nic is on the correct network for the given vip. + + If nic or netmask discovery fail then fallback to using charm supplied + config. If fallback is used this is indicated via the fallback variable. + + @param vip: VIP to lookup nic and cidr for. + @returns (str, str, bool): eg (iface, netmask, fallback) + """ + iface = get_iface_for_address(vip) + netmask = get_netmask_for_address(vip) + fallback = False + if iface is None: + iface = config('vip_iface') + fallback = True + if netmask is None: + netmask = config('vip_cidr') + fallback = True + return iface, netmask, fallback + + def update_hacluster_vip(service, relation_data): """ Configure VIP resources based on provided configuration @@ -264,17 +288,9 @@ def update_hacluster_vip(service, relation_data): res_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' - iface = get_iface_for_address(vip) - netmask = get_netmask_for_address(vip) - - fallback_params = False - if iface is None: - iface = config('vip_iface') - fallback_params = True - if netmask is None: - netmask = config('vip_cidr') - fallback_params = True + iface, netmask, fallback = get_vip_settings(vip) + vip_monitoring = 'op monitor depth="0" timeout="20s" interval="10s"' if iface is not None: # NOTE(jamespage): Delete old VIP resources # Old style naming encoding iface in name @@ -293,14 +309,15 @@ def update_hacluster_vip(service, relation_data): # NOTE(jamespage): # Use option provided vip params if these where used # instead of auto-detected values - if fallback_params: + if fallback: relation_data['resource_params'][vip_key] = ( 'params {ip}="{vip}" cidr_netmask="{netmask}" ' - 'nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask) - ) + 'nic="{iface}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask, + vip_monitoring=vip_monitoring)) else: # NOTE(jamespage): # let heartbeat figure out which interface and @@ -308,8 +325,10 @@ def update_hacluster_vip(service, relation_data): # when network interface naming is not # consistent across units. relation_data['resource_params'][vip_key] = ( - 'params {ip}="{vip}"'.format(ip=vip_params, - vip=vip)) + 'params {ip}="{vip}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + vip_monitoring=vip_monitoring)) vip_group.append(vip_key) @@ -320,7 +339,7 @@ def update_hacluster_vip(service, relation_data): relation_data['delete_resources'] = vips_to_delete if len(vip_group) >= 1: - key = 'grp_{}_vips'.format(service) + key = VIP_GROUP_NAME.format(service=service) try: relation_data['groups'][key] = ' '.join(vip_group) except KeyError: diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 59312fcf..4e432a25 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -118,6 +118,7 @@ 'pike', 'queens', 'rocky', + 'stein', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -136,6 +137,7 @@ ('artful', 'pike'), ('bionic', 'queens'), ('cosmic', 'rocky'), + ('disco', 'stein'), ]) @@ -155,6 +157,7 @@ ('2017.2', 'pike'), ('2018.1', 'queens'), ('2018.2', 'rocky'), + ('2019.1', 'stein'), ]) # The ugly duckling - must list releases oldest to newest @@ -189,6 +192,8 @@ ['2.16.0', '2.17.0']), ('rocky', ['2.18.0', '2.19.0']), + ('stein', + ['2.19.0']), ]) # >= Liberty version->codename mapping @@ -201,6 +206,7 @@ ('16', 'pike'), ('17', 'queens'), ('18', 'rocky'), + ('19', 'stein'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -210,6 +216,7 @@ ('11', 'pike'), ('12', 'queens'), ('13', 'rocky'), + ('14', 'stein'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -219,6 +226,7 @@ ('11', 'pike'), ('12', 'queens'), ('13', 'rocky'), + ('14', 'stein'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -228,6 +236,7 @@ ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -237,6 +246,7 @@ ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -246,6 +256,7 @@ ('9', 'pike'), ('10', 'queens'), ('11', 'rocky'), + ('12', 'stein'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -255,6 +266,7 @@ ('9', 'pike'), ('10', 'queens'), ('11', 'rocky'), + ('12', 'stein'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -264,6 +276,7 @@ ('15', 'pike'), ('16', 'queens'), ('17', 'rocky'), + ('18', 'stein'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -273,6 +286,7 @@ ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), ]), } diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index c7ad128c..8a5cadf1 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -166,6 +166,14 @@ 'rocky/proposed': 'bionic-proposed/rocky', 'bionic-rocky/proposed': 'bionic-proposed/rocky', 'bionic-proposed/rocky': 'bionic-proposed/rocky', + # Stein + 'stein': 'bionic-updates/stein', + 'bionic-stein': 'bionic-updates/stein', + 'bionic-stein/updates': 'bionic-updates/stein', + 'bionic-updates/stein': 'bionic-updates/stein', + 'stein/proposed': 'bionic-proposed/stein', + 'bionic-stein/proposed': 'bionic-proposed/stein', + 'bionic-proposed/stein': 'bionic-proposed/stein', } From 1e42c9466cf77562bd2d273a88f83c4c632261f3 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 8 Jan 2019 15:15:41 +0000 Subject: [PATCH 1632/2699] Sync charm-helpers Change-Id: Iaa29be49b44c76c4c89cef17b3212502b09d65ac --- .../charmhelpers/contrib/charmsupport/nrpe.py | 25 ++-- .../charmhelpers/contrib/openstack/utils.py | 113 +++++++++++++----- .../contrib/storage/linux/loopback.py | 6 +- ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py | 8 ++ 4 files changed, 111 insertions(+), 41 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index e3d10c1c..f59fdd6b 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -305,7 +305,7 @@ def write(self): # update-status hooks are configured to firing every 5 minutes by # default. When nagios-nrpe-server is restarted, the nagios server - # reports checks failing causing unneccessary alerts. Let's not restart + # reports checks failing causing unnecessary alerts. Let's not restart # on update-status hooks. if not hook_name() == 'update-status': service('restart', 'nagios-nrpe-server') @@ -416,15 +416,20 @@ def copy_nrpe_checks(nrpe_files_dir=None): """ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - default_nrpe_files_dir = os.path.join( - os.getenv('CHARM_DIR'), - 'hooks', - 'charmhelpers', - 'contrib', - 'openstack', - 'files') - if not nrpe_files_dir: - nrpe_files_dir = default_nrpe_files_dir + if nrpe_files_dir is None: + # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks + for segment in ['.', 'hooks']: + nrpe_files_dir = os.path.abspath(os.path.join( + os.getenv('CHARM_DIR'), + segment, + 'charmhelpers', + 'contrib', + 'openstack', + 'files')) + if os.path.isdir(nrpe_files_dir): + break + else: + raise RuntimeError("Couldn't find charmhelpers directory") if not os.path.exists(NAGIOS_PLUGINS): os.makedirs(NAGIOS_PLUGINS) for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index 29cad083..4e432a25 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -73,6 +73,8 @@ service_running, service_pause, service_resume, + service_stop, + service_start, restart_on_change_helper, ) from charmhelpers.fetch import ( @@ -116,6 +118,7 @@ 'pike', 'queens', 'rocky', + 'stein', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -134,6 +137,7 @@ ('artful', 'pike'), ('bionic', 'queens'), ('cosmic', 'rocky'), + ('disco', 'stein'), ]) @@ -153,6 +157,7 @@ ('2017.2', 'pike'), ('2018.1', 'queens'), ('2018.2', 'rocky'), + ('2019.1', 'stein'), ]) # The ugly duckling - must list releases oldest to newest @@ -187,6 +192,8 @@ ['2.16.0', '2.17.0']), ('rocky', ['2.18.0', '2.19.0']), + ('stein', + ['2.19.0']), ]) # >= Liberty version->codename mapping @@ -199,6 +206,7 @@ ('16', 'pike'), ('17', 'queens'), ('18', 'rocky'), + ('19', 'stein'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -208,6 +216,7 @@ ('11', 'pike'), ('12', 'queens'), ('13', 'rocky'), + ('14', 'stein'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -217,6 +226,7 @@ ('11', 'pike'), ('12', 'queens'), ('13', 'rocky'), + ('14', 'stein'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -226,6 +236,7 @@ ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -235,6 +246,7 @@ ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -244,6 +256,7 @@ ('9', 'pike'), ('10', 'queens'), ('11', 'rocky'), + ('12', 'stein'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -253,6 +266,7 @@ ('9', 'pike'), ('10', 'queens'), ('11', 'rocky'), + ('12', 'stein'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -262,6 +276,7 @@ ('15', 'pike'), ('16', 'queens'), ('17', 'rocky'), + ('18', 'stein'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -271,6 +286,7 @@ ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), ]), } @@ -299,7 +315,7 @@ def get_os_codename_install_source(src): rel = '' if src is None: return rel - if src in ['distro', 'distro-proposed']: + if src in ['distro', 'distro-proposed', 'proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] except KeyError: @@ -1303,6 +1319,65 @@ def is_unit_paused_set(): return False +def manage_payload_services(action, services=None, charm_func=None): + """Run an action against all services. + + An optional charm_func() can be called. It should raise an Exception to + indicate that the function failed. If it was succesfull it should return + None or an optional message. + + The signature for charm_func is: + charm_func() -> message: str + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + :param action: Action to run: pause, resume, start or stop. + :type action: str + :param services: See above + :type services: See above + :param charm_func: function to run for custom charm pausing. + :type charm_func: f() + :returns: Status boolean and list of messages + :rtype: (bool, []) + :raises: RuntimeError + """ + actions = { + 'pause': service_pause, + 'resume': service_resume, + 'start': service_start, + 'stop': service_stop} + action = action.lower() + if action not in actions.keys(): + raise RuntimeError( + "action: {} must be one of: {}".format(action, + ', '.join(actions.keys()))) + services = _extract_services_list_helper(services) + messages = [] + success = True + if services: + for service in services.keys(): + rc = actions[action](service) + if not rc: + success = False + messages.append("{} didn't {} cleanly.".format(service, + action)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + success = False + messages.append(str(e)) + return success, messages + + def pause_unit(assess_status_func, services=None, ports=None, charm_func=None): """Pause a unit by stopping the services and setting 'unit-paused' @@ -1333,20 +1408,10 @@ def pause_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - stopped = service_pause(service) - if not stopped: - messages.append("{} didn't stop cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'pause', + services=services, + charm_func=charm_func) set_unit_paused() if assess_status_func: message = assess_status_func() @@ -1385,20 +1450,10 @@ def resume_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - started = service_resume(service) - if not started: - messages.append("{} didn't start cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'resume', + services=services, + charm_func=charm_func) clear_unit_paused() if assess_status_func: message = assess_status_func() diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/loopback.py index 0dfdae52..82472ff1 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -36,8 +36,10 @@ def loopback_devices(): ''' loopbacks = {} cmd = ['losetup', '-a'] - devs = [d.strip().split(' ') for d in - check_output(cmd).splitlines() if d != ''] + output = check_output(cmd) + if six.PY3: + output = output.decode('utf-8') + devs = [d.strip().split(' ') for d in output.splitlines() if d != ''] for dev, _, f in devs: loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] return loopbacks diff --git a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py index c7ad128c..8a5cadf1 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py @@ -166,6 +166,14 @@ 'rocky/proposed': 'bionic-proposed/rocky', 'bionic-rocky/proposed': 'bionic-proposed/rocky', 'bionic-proposed/rocky': 'bionic-proposed/rocky', + # Stein + 'stein': 'bionic-updates/stein', + 'bionic-stein': 'bionic-updates/stein', + 'bionic-stein/updates': 'bionic-updates/stein', + 'bionic-updates/stein': 'bionic-updates/stein', + 'stein/proposed': 'bionic-proposed/stein', + 'bionic-stein/proposed': 'bionic-proposed/stein', + 'bionic-proposed/stein': 'bionic-proposed/stein', } From d90691f262a3f11dcf33fdd75c6beb81e0a44add Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 8 Jan 2019 15:15:52 +0000 Subject: [PATCH 1633/2699] Sync charm-helpers Change-Id: I436c48bae921521f5959d0561c9327dfa2e103ce --- .../charmhelpers/contrib/charmsupport/nrpe.py | 25 ++-- .../contrib/openstack/amulet/deployment.py | 5 +- .../contrib/openstack/amulet/utils.py | 3 +- .../contrib/openstack/cert_utils.py | 2 +- .../charmhelpers/contrib/openstack/context.py | 8 +- .../contrib/openstack/ha/utils.py | 133 ++++++++++++++---- .../charmhelpers/contrib/openstack/utils.py | 113 +++++++++++---- .../contrib/storage/linux/loopback.py | 6 +- .../hooks/charmhelpers/fetch/ubuntu.py | 8 ++ 9 files changed, 224 insertions(+), 79 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index e3d10c1c..f59fdd6b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -305,7 +305,7 @@ def write(self): # update-status hooks are configured to firing every 5 minutes by # default. When nagios-nrpe-server is restarted, the nagios server - # reports checks failing causing unneccessary alerts. Let's not restart + # reports checks failing causing unnecessary alerts. Let's not restart # on update-status hooks. if not hook_name() == 'update-status': service('restart', 'nagios-nrpe-server') @@ -416,15 +416,20 @@ def copy_nrpe_checks(nrpe_files_dir=None): """ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - default_nrpe_files_dir = os.path.join( - os.getenv('CHARM_DIR'), - 'hooks', - 'charmhelpers', - 'contrib', - 'openstack', - 'files') - if not nrpe_files_dir: - nrpe_files_dir = default_nrpe_files_dir + if nrpe_files_dir is None: + # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks + for segment in ['.', 'hooks']: + nrpe_files_dir = os.path.abspath(os.path.join( + os.getenv('CHARM_DIR'), + segment, + 'charmhelpers', + 'contrib', + 'openstack', + 'files')) + if os.path.isdir(nrpe_files_dir): + break + else: + raise RuntimeError("Couldn't find charmhelpers directory") if not os.path.exists(NAGIOS_PLUGINS): os.makedirs(NAGIOS_PLUGINS) for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 1c96752a..d1270a73 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -168,7 +168,8 @@ def _add_services(self, this_service, other_services, use_source=None, 'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', 'cinder-backup', 'nexentaedge-data', 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'])) + 'cinder-nexentaedge', 'nexentaedge-mgmt', + 'ceilometer-agent'])) if self.openstack: for svc in services: @@ -292,7 +293,9 @@ def _get_openstack_release(self): ('artful', None): self.artful_pike, ('bionic', None): self.bionic_queens, ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, + ('bionic', 'cloud:bionic-stein'): self.bionic_stein, ('cosmic', None): self.cosmic_rocky, + ('disco', None): self.disco_stein, } return releases[(self.series, self.openstack)] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 9133e9b3..ea1fd8f3 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -57,7 +57,8 @@ 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] + 'bionic_queens', 'bionic_rocky', 'cosmic_rocky', + 'bionic_stein', 'disco_stein'] class OpenStackAmuletUtils(AmuletUtils): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py index 3e078703..3a3c6de7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -195,7 +195,7 @@ def install_certs(ssl_dir, certs, chain=None): if chain: # Append chain file so that clients that trust the root CA will # trust certs signed by an intermediate in the chain - cert_data = cert_data + chain + cert_data = cert_data + os.linesep + chain write_file( path=os.path.join(ssl_dir, cert_filename), content=cert_data, perms=0o640) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 72084cb3..614d444b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -98,7 +98,6 @@ from charmhelpers.contrib.openstack.utils import ( config_flags_parser, enable_memcache, - snap_install_requested, CompareOpenStackReleases, os_release, ) @@ -252,13 +251,8 @@ def __call__(self): 'database': self.database, 'database_user': self.user, 'database_password': rdata.get(password_setting), - 'database_type': 'mysql' + 'database_type': 'mysql+pymysql' } - # Note(coreycb): We can drop mysql+pymysql if we want when the - # following review lands, though it seems mysql+pymysql would - # be preferred. https://review.openstack.org/#/c/462190/ - if snap_install_requested(): - ctxt['database_type'] = 'mysql+pymysql' if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py index add8eb9a..718c6d65 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -23,6 +23,7 @@ Helpers for high availability. """ +import hashlib import json import re @@ -35,7 +36,6 @@ config, status_set, DEBUG, - WARNING, ) from charmhelpers.core.host import ( @@ -63,6 +63,9 @@ separators=(',', ':'), ) +VIP_GROUP_NAME = 'grp_{service}_vips' +DNSHA_GROUP_NAME = 'grp_{service}_hostnames' + class DNSHAException(Exception): """Raised when an error occurs setting up DNS HA @@ -124,13 +127,29 @@ def expect_ha(): return len(ha_related_units) > 0 or config('vip') or config('dns-ha') -def generate_ha_relation_data(service): +def generate_ha_relation_data(service, extra_settings=None): """ Generate relation data for ha relation Based on configuration options and unit interfaces, generate a json encoded dict of relation data items for the hacluster relation, providing configuration for DNS HA or VIP's + haproxy clone sets. + Example of supplying additional settings:: + + COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' + AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' + AGENT_CA_PARAMS = 'op monitor interval="5s"' + + ha_console_settings = { + 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, + 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, + 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, + 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) + generate_ha_relation_data('nova', extra_settings=ha_console_settings) + + + @param service: Name of the service being configured + @param extra_settings: Dict of additional resource data @returns dict: json encoded data for use with relation_set """ _haproxy_res = 'res_{}_haproxy'.format(service) @@ -149,6 +168,13 @@ def generate_ha_relation_data(service): }, } + if extra_settings: + for k, v in extra_settings.items(): + if _relation_data.get(k): + _relation_data[k].update(v) + else: + _relation_data[k] = v + if config('dns-ha'): update_hacluster_dns_ha(service, _relation_data) else: @@ -216,7 +242,7 @@ def update_hacluster_dns_ha(service, relation_data, 'Informing the ha relation'.format(' '.join(hostname_group)), DEBUG) relation_data['groups'] = { - 'grp_{}_hostnames'.format(service): ' '.join(hostname_group) + DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group) } else: msg = 'DNS HA: Hostname group has no members.' @@ -224,6 +250,27 @@ def update_hacluster_dns_ha(service, relation_data, raise DNSHAException(msg) +def get_vip_settings(vip): + """Calculate which nic is on the correct network for the given vip. + + If nic or netmask discovery fail then fallback to using charm supplied + config. If fallback is used this is indicated via the fallback variable. + + @param vip: VIP to lookup nic and cidr for. + @returns (str, str, bool): eg (iface, netmask, fallback) + """ + iface = get_iface_for_address(vip) + netmask = get_netmask_for_address(vip) + fallback = False + if iface is None: + iface = config('vip_iface') + fallback = True + if netmask is None: + netmask = config('vip_cidr') + fallback = True + return iface, netmask, fallback + + def update_hacluster_vip(service, relation_data): """ Configure VIP resources based on provided configuration @@ -232,40 +279,70 @@ def update_hacluster_vip(service, relation_data): """ cluster_config = get_hacluster_config() vip_group = [] + vips_to_delete = [] for vip in cluster_config['vip'].split(): if is_ipv6(vip): - res_neutron_vip = 'ocf:heartbeat:IPv6addr' + res_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else: - res_neutron_vip = 'ocf:heartbeat:IPaddr2' + res_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' - iface = (get_iface_for_address(vip) or - config('vip_iface')) - netmask = (get_netmask_for_address(vip) or - config('vip_cidr')) + iface, netmask, fallback = get_vip_settings(vip) + vip_monitoring = 'op monitor depth="0" timeout="20s" interval="10s"' if iface is not None: + # NOTE(jamespage): Delete old VIP resources + # Old style naming encoding iface in name + # does not work well in environments where + # interface/subnet wiring is not consistent vip_key = 'res_{}_{}_vip'.format(service, iface) - if vip_key in vip_group: - if vip not in relation_data['resource_params'][vip_key]: - vip_key = '{}_{}'.format(vip_key, vip_params) - else: - log("Resource '%s' (vip='%s') already exists in " - "vip group - skipping" % (vip_key, vip), WARNING) - continue - - relation_data['resources'][vip_key] = res_neutron_vip - relation_data['resource_params'][vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}" ' - 'nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask) - ) + if vip_key in vips_to_delete: + vip_key = '{}_{}'.format(vip_key, vip_params) + vips_to_delete.append(vip_key) + + vip_key = 'res_{}_{}_vip'.format( + service, + hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]) + + relation_data['resources'][vip_key] = res_vip + # NOTE(jamespage): + # Use option provided vip params if these where used + # instead of auto-detected values + if fallback: + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask, + vip_monitoring=vip_monitoring)) + else: + # NOTE(jamespage): + # let heartbeat figure out which interface and + # netmask to configure, which works nicely + # when network interface naming is not + # consistent across units. + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + vip_monitoring=vip_monitoring)) + vip_group.append(vip_key) + if vips_to_delete: + try: + relation_data['delete_resources'].extend(vips_to_delete) + except KeyError: + relation_data['delete_resources'] = vips_to_delete + if len(vip_group) >= 1: - relation_data['groups'] = { - 'grp_{}_vips'.format(service): ' '.join(vip_group) - } + key = VIP_GROUP_NAME.format(service=service) + try: + relation_data['groups'][key] = ' '.join(vip_group) + except KeyError: + relation_data['groups'] = { + key: ' '.join(vip_group) + } diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 29cad083..4e432a25 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -73,6 +73,8 @@ service_running, service_pause, service_resume, + service_stop, + service_start, restart_on_change_helper, ) from charmhelpers.fetch import ( @@ -116,6 +118,7 @@ 'pike', 'queens', 'rocky', + 'stein', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -134,6 +137,7 @@ ('artful', 'pike'), ('bionic', 'queens'), ('cosmic', 'rocky'), + ('disco', 'stein'), ]) @@ -153,6 +157,7 @@ ('2017.2', 'pike'), ('2018.1', 'queens'), ('2018.2', 'rocky'), + ('2019.1', 'stein'), ]) # The ugly duckling - must list releases oldest to newest @@ -187,6 +192,8 @@ ['2.16.0', '2.17.0']), ('rocky', ['2.18.0', '2.19.0']), + ('stein', + ['2.19.0']), ]) # >= Liberty version->codename mapping @@ -199,6 +206,7 @@ ('16', 'pike'), ('17', 'queens'), ('18', 'rocky'), + ('19', 'stein'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -208,6 +216,7 @@ ('11', 'pike'), ('12', 'queens'), ('13', 'rocky'), + ('14', 'stein'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -217,6 +226,7 @@ ('11', 'pike'), ('12', 'queens'), ('13', 'rocky'), + ('14', 'stein'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -226,6 +236,7 @@ ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -235,6 +246,7 @@ ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -244,6 +256,7 @@ ('9', 'pike'), ('10', 'queens'), ('11', 'rocky'), + ('12', 'stein'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -253,6 +266,7 @@ ('9', 'pike'), ('10', 'queens'), ('11', 'rocky'), + ('12', 'stein'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -262,6 +276,7 @@ ('15', 'pike'), ('16', 'queens'), ('17', 'rocky'), + ('18', 'stein'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -271,6 +286,7 @@ ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), ]), } @@ -299,7 +315,7 @@ def get_os_codename_install_source(src): rel = '' if src is None: return rel - if src in ['distro', 'distro-proposed']: + if src in ['distro', 'distro-proposed', 'proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] except KeyError: @@ -1303,6 +1319,65 @@ def is_unit_paused_set(): return False +def manage_payload_services(action, services=None, charm_func=None): + """Run an action against all services. + + An optional charm_func() can be called. It should raise an Exception to + indicate that the function failed. If it was succesfull it should return + None or an optional message. + + The signature for charm_func is: + charm_func() -> message: str + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + :param action: Action to run: pause, resume, start or stop. + :type action: str + :param services: See above + :type services: See above + :param charm_func: function to run for custom charm pausing. + :type charm_func: f() + :returns: Status boolean and list of messages + :rtype: (bool, []) + :raises: RuntimeError + """ + actions = { + 'pause': service_pause, + 'resume': service_resume, + 'start': service_start, + 'stop': service_stop} + action = action.lower() + if action not in actions.keys(): + raise RuntimeError( + "action: {} must be one of: {}".format(action, + ', '.join(actions.keys()))) + services = _extract_services_list_helper(services) + messages = [] + success = True + if services: + for service in services.keys(): + rc = actions[action](service) + if not rc: + success = False + messages.append("{} didn't {} cleanly.".format(service, + action)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + success = False + messages.append(str(e)) + return success, messages + + def pause_unit(assess_status_func, services=None, ports=None, charm_func=None): """Pause a unit by stopping the services and setting 'unit-paused' @@ -1333,20 +1408,10 @@ def pause_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - stopped = service_pause(service) - if not stopped: - messages.append("{} didn't stop cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'pause', + services=services, + charm_func=charm_func) set_unit_paused() if assess_status_func: message = assess_status_func() @@ -1385,20 +1450,10 @@ def resume_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - started = service_resume(service) - if not started: - messages.append("{} didn't start cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'resume', + services=services, + charm_func=charm_func) clear_unit_paused() if assess_status_func: message = assess_status_func() diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py index 0dfdae52..82472ff1 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -36,8 +36,10 @@ def loopback_devices(): ''' loopbacks = {} cmd = ['losetup', '-a'] - devs = [d.strip().split(' ') for d in - check_output(cmd).splitlines() if d != ''] + output = check_output(cmd) + if six.PY3: + output = output.decode('utf-8') + devs = [d.strip().split(' ') for d in output.splitlines() if d != ''] for dev, _, f in devs: loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] return loopbacks diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index c7ad128c..8a5cadf1 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -166,6 +166,14 @@ 'rocky/proposed': 'bionic-proposed/rocky', 'bionic-rocky/proposed': 'bionic-proposed/rocky', 'bionic-proposed/rocky': 'bionic-proposed/rocky', + # Stein + 'stein': 'bionic-updates/stein', + 'bionic-stein': 'bionic-updates/stein', + 'bionic-stein/updates': 'bionic-updates/stein', + 'bionic-updates/stein': 'bionic-updates/stein', + 'stein/proposed': 'bionic-proposed/stein', + 'bionic-stein/proposed': 'bionic-proposed/stein', + 'bionic-proposed/stein': 'bionic-proposed/stein', } From 852bddd0e45dc3688d4feff64e677a82f9d62fc8 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Wed, 9 Jan 2019 14:44:07 +0000 Subject: [PATCH 1634/2699] Sync charm-helpers Change-Id: I9bec9365c84d39106472e4ec5059fd246de51604 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 2 +- .../contrib/openstack/amulet/deployment.py | 2 + .../contrib/openstack/amulet/utils.py | 3 +- .../contrib/openstack/cert_utils.py | 2 +- .../charmhelpers/contrib/openstack/context.py | 8 +-- .../contrib/openstack/ha/utils.py | 59 ++++++++++++------- .../charmhelpers/contrib/openstack/utils.py | 14 +++++ ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 8 +++ 8 files changed, 68 insertions(+), 30 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 10d86ac0..f59fdd6b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -305,7 +305,7 @@ def write(self): # update-status hooks are configured to firing every 5 minutes by # default. When nagios-nrpe-server is restarted, the nagios server - # reports checks failing causing unneccessary alerts. Let's not restart + # reports checks failing causing unnecessary alerts. Let's not restart # on update-status hooks. if not hook_name() == 'update-status': service('restart', 'nagios-nrpe-server') diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 5b7e3cfb..d1270a73 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -293,7 +293,9 @@ def _get_openstack_release(self): ('artful', None): self.artful_pike, ('bionic', None): self.bionic_queens, ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, + ('bionic', 'cloud:bionic-stein'): self.bionic_stein, ('cosmic', None): self.cosmic_rocky, + ('disco', None): self.disco_stein, } return releases[(self.series, self.openstack)] diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 9133e9b3..ea1fd8f3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -57,7 +57,8 @@ 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] + 'bionic_queens', 'bionic_rocky', 'cosmic_rocky', + 'bionic_stein', 'disco_stein'] class OpenStackAmuletUtils(AmuletUtils): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py index 3e078703..3a3c6de7 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -195,7 +195,7 @@ def install_certs(ssl_dir, certs, chain=None): if chain: # Append chain file so that clients that trust the root CA will # trust certs signed by an intermediate in the chain - cert_data = cert_data + chain + cert_data = cert_data + os.linesep + chain write_file( path=os.path.join(ssl_dir, cert_filename), content=cert_data, perms=0o640) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 5bb9628f..d892bad9 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -98,7 +98,6 @@ from charmhelpers.contrib.openstack.utils import ( config_flags_parser, enable_memcache, - snap_install_requested, CompareOpenStackReleases, os_release, ) @@ -252,13 +251,8 @@ def __call__(self): 'database': self.database, 'database_user': self.user, 'database_password': rdata.get(password_setting), - 'database_type': 'mysql' + 'database_type': 'mysql+pymysql' } - # Note(coreycb): We can drop mysql+pymysql if we want when the - # following review lands, though it seems mysql+pymysql would - # be preferred. https://review.openstack.org/#/c/462190/ - if snap_install_requested(): - ctxt['database_type'] = 'mysql+pymysql' if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py index cdf4b4c9..718c6d65 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -63,6 +63,9 @@ separators=(',', ':'), ) +VIP_GROUP_NAME = 'grp_{service}_vips' +DNSHA_GROUP_NAME = 'grp_{service}_hostnames' + class DNSHAException(Exception): """Raised when an error occurs setting up DNS HA @@ -239,7 +242,7 @@ def update_hacluster_dns_ha(service, relation_data, 'Informing the ha relation'.format(' '.join(hostname_group)), DEBUG) relation_data['groups'] = { - 'grp_{}_hostnames'.format(service): ' '.join(hostname_group) + DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group) } else: msg = 'DNS HA: Hostname group has no members.' @@ -247,6 +250,27 @@ def update_hacluster_dns_ha(service, relation_data, raise DNSHAException(msg) +def get_vip_settings(vip): + """Calculate which nic is on the correct network for the given vip. + + If nic or netmask discovery fail then fallback to using charm supplied + config. If fallback is used this is indicated via the fallback variable. + + @param vip: VIP to lookup nic and cidr for. + @returns (str, str, bool): eg (iface, netmask, fallback) + """ + iface = get_iface_for_address(vip) + netmask = get_netmask_for_address(vip) + fallback = False + if iface is None: + iface = config('vip_iface') + fallback = True + if netmask is None: + netmask = config('vip_cidr') + fallback = True + return iface, netmask, fallback + + def update_hacluster_vip(service, relation_data): """ Configure VIP resources based on provided configuration @@ -264,17 +288,9 @@ def update_hacluster_vip(service, relation_data): res_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' - iface = get_iface_for_address(vip) - netmask = get_netmask_for_address(vip) - - fallback_params = False - if iface is None: - iface = config('vip_iface') - fallback_params = True - if netmask is None: - netmask = config('vip_cidr') - fallback_params = True + iface, netmask, fallback = get_vip_settings(vip) + vip_monitoring = 'op monitor depth="0" timeout="20s" interval="10s"' if iface is not None: # NOTE(jamespage): Delete old VIP resources # Old style naming encoding iface in name @@ -293,14 +309,15 @@ def update_hacluster_vip(service, relation_data): # NOTE(jamespage): # Use option provided vip params if these where used # instead of auto-detected values - if fallback_params: + if fallback: relation_data['resource_params'][vip_key] = ( 'params {ip}="{vip}" cidr_netmask="{netmask}" ' - 'nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask) - ) + 'nic="{iface}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask, + vip_monitoring=vip_monitoring)) else: # NOTE(jamespage): # let heartbeat figure out which interface and @@ -308,8 +325,10 @@ def update_hacluster_vip(service, relation_data): # when network interface naming is not # consistent across units. relation_data['resource_params'][vip_key] = ( - 'params {ip}="{vip}"'.format(ip=vip_params, - vip=vip)) + 'params {ip}="{vip}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + vip_monitoring=vip_monitoring)) vip_group.append(vip_key) @@ -320,7 +339,7 @@ def update_hacluster_vip(service, relation_data): relation_data['delete_resources'] = vips_to_delete if len(vip_group) >= 1: - key = 'grp_{}_vips'.format(service) + key = VIP_GROUP_NAME.format(service=service) try: relation_data['groups'][key] = ' '.join(vip_group) except KeyError: diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 59312fcf..4e432a25 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -118,6 +118,7 @@ 'pike', 'queens', 'rocky', + 'stein', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -136,6 +137,7 @@ ('artful', 'pike'), ('bionic', 'queens'), ('cosmic', 'rocky'), + ('disco', 'stein'), ]) @@ -155,6 +157,7 @@ ('2017.2', 'pike'), ('2018.1', 'queens'), ('2018.2', 'rocky'), + ('2019.1', 'stein'), ]) # The ugly duckling - must list releases oldest to newest @@ -189,6 +192,8 @@ ['2.16.0', '2.17.0']), ('rocky', ['2.18.0', '2.19.0']), + ('stein', + ['2.19.0']), ]) # >= Liberty version->codename mapping @@ -201,6 +206,7 @@ ('16', 'pike'), ('17', 'queens'), ('18', 'rocky'), + ('19', 'stein'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -210,6 +216,7 @@ ('11', 'pike'), ('12', 'queens'), ('13', 'rocky'), + ('14', 'stein'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -219,6 +226,7 @@ ('11', 'pike'), ('12', 'queens'), ('13', 'rocky'), + ('14', 'stein'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -228,6 +236,7 @@ ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -237,6 +246,7 @@ ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -246,6 +256,7 @@ ('9', 'pike'), ('10', 'queens'), ('11', 'rocky'), + ('12', 'stein'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -255,6 +266,7 @@ ('9', 'pike'), ('10', 'queens'), ('11', 'rocky'), + ('12', 'stein'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -264,6 +276,7 @@ ('15', 'pike'), ('16', 'queens'), ('17', 'rocky'), + ('18', 'stein'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -273,6 +286,7 @@ ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), ]), } diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index c7ad128c..8a5cadf1 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -166,6 +166,14 @@ 'rocky/proposed': 'bionic-proposed/rocky', 'bionic-rocky/proposed': 'bionic-proposed/rocky', 'bionic-proposed/rocky': 'bionic-proposed/rocky', + # Stein + 'stein': 'bionic-updates/stein', + 'bionic-stein': 'bionic-updates/stein', + 'bionic-stein/updates': 'bionic-updates/stein', + 'bionic-updates/stein': 'bionic-updates/stein', + 'stein/proposed': 'bionic-proposed/stein', + 'bionic-stein/proposed': 'bionic-proposed/stein', + 'bionic-proposed/stein': 'bionic-proposed/stein', } From 2d39db158c88b13f847e794cd85eb59aacbbb5e5 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Thu, 10 Jan 2019 16:11:41 +0100 Subject: [PATCH 1635/2699] Initial commit of charm skeleton --- ceph-rbd-mirror/.gitignore | 5 + ceph-rbd-mirror/.stestr.conf | 3 + ceph-rbd-mirror/LICENSE | 202 +++++++++++ ceph-rbd-mirror/requirements.txt | 7 + ceph-rbd-mirror/src/HACKING.md | 10 + ceph-rbd-mirror/src/README.md | 15 + ceph-rbd-mirror/src/config.yaml | 23 ++ ceph-rbd-mirror/src/copyright | 6 + ceph-rbd-mirror/src/icon.svg | 316 ++++++++++++++++++ ceph-rbd-mirror/src/layer.yaml | 16 + ceph-rbd-mirror/src/lib/__init__.py | 13 + ceph-rbd-mirror/src/lib/charm/__init__.py | 13 + .../src/lib/charm/openstack/__init__.py | 13 + .../lib/charm/openstack/ceph_rbd_mirror.py | 47 +++ ceph-rbd-mirror/src/metadata.yaml | 32 ++ ceph-rbd-mirror/src/reactive/__init__.py | 13 + .../src/reactive/ceph_rbd_mirror_handlers.py | 37 ++ ceph-rbd-mirror/src/test-requirements.txt | 10 + ceph-rbd-mirror/src/tox.ini | 35 ++ ceph-rbd-mirror/test-requirements.txt | 13 + ceph-rbd-mirror/tox.ini | 51 +++ ceph-rbd-mirror/unit_tests/__init__.py | 22 ++ ...est_lib_charm_openstack_ceph_rbd_mirror.py | 29 ++ 23 files changed, 931 insertions(+) create mode 100644 ceph-rbd-mirror/.gitignore create mode 100644 ceph-rbd-mirror/.stestr.conf create mode 100644 ceph-rbd-mirror/LICENSE create mode 100644 ceph-rbd-mirror/requirements.txt create mode 100644 ceph-rbd-mirror/src/HACKING.md create mode 100644 ceph-rbd-mirror/src/README.md create mode 100644 ceph-rbd-mirror/src/config.yaml create mode 100644 ceph-rbd-mirror/src/copyright create mode 100644 ceph-rbd-mirror/src/icon.svg create mode 100644 ceph-rbd-mirror/src/layer.yaml create mode 100644 ceph-rbd-mirror/src/lib/__init__.py create mode 100644 ceph-rbd-mirror/src/lib/charm/__init__.py create mode 100644 ceph-rbd-mirror/src/lib/charm/openstack/__init__.py create mode 100644 ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py create mode 100644 ceph-rbd-mirror/src/metadata.yaml create mode 100644 ceph-rbd-mirror/src/reactive/__init__.py create mode 100644 ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py create mode 100644 ceph-rbd-mirror/src/test-requirements.txt create mode 100644 ceph-rbd-mirror/src/tox.ini create mode 100644 ceph-rbd-mirror/test-requirements.txt create mode 100644 ceph-rbd-mirror/tox.ini create mode 100644 ceph-rbd-mirror/unit_tests/__init__.py create mode 100644 ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py diff --git a/ceph-rbd-mirror/.gitignore b/ceph-rbd-mirror/.gitignore new file mode 100644 index 00000000..eb1cd65b --- /dev/null +++ b/ceph-rbd-mirror/.gitignore @@ -0,0 +1,5 @@ +.tox +.stestr +*__pycache__* +*.pyc +build diff --git a/ceph-rbd-mirror/.stestr.conf b/ceph-rbd-mirror/.stestr.conf new file mode 100644 index 00000000..5fcccaca --- /dev/null +++ b/ceph-rbd-mirror/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/ceph-rbd-mirror/LICENSE b/ceph-rbd-mirror/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/ceph-rbd-mirror/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ceph-rbd-mirror/requirements.txt b/ceph-rbd-mirror/requirements.txt new file mode 100644 index 00000000..20f335d2 --- /dev/null +++ b/ceph-rbd-mirror/requirements.txt @@ -0,0 +1,7 @@ +# This file is managed centrally. If you find the need to modify this as a +# one-off, please don't. Intead, consult #openstack-charms and ask about +# requirements management in charms via bot-control. Thank you. +# +# Build requirements +charm-tools>=2.4.4 +simplejson diff --git a/ceph-rbd-mirror/src/HACKING.md b/ceph-rbd-mirror/src/HACKING.md new file mode 100644 index 00000000..ab232f8f --- /dev/null +++ b/ceph-rbd-mirror/src/HACKING.md @@ -0,0 +1,10 @@ +# Overview + +This charm is developed as part of the OpenStack Charms project, and as such you +should refer to the [OpenStack Charm Development Guide](https://github.com/openstack/charm-guide) for details on how +to contribute to this charm. + +You can find its source code here: . + + + diff --git a/ceph-rbd-mirror/src/README.md b/ceph-rbd-mirror/src/README.md new file mode 100644 index 00000000..7742064c --- /dev/null +++ b/ceph-rbd-mirror/src/README.md @@ -0,0 +1,15 @@ +# Overview + +This charm provides the Ceph RBD Mirror service for use with replication between multiple Ceph clusters. + +Ceph 12.2 Luminous or later is required. + +# Usage + +TBC + +# Bugs + +Please report bugs on [Launchpad](https://bugs.launchpad.net/charm-ceph-rbd-mirror/+filebug). + +For general questions please refer to the OpenStack [Charm Guide](https://docs.openstack.org/charm-guide/latest/). diff --git a/ceph-rbd-mirror/src/config.yaml b/ceph-rbd-mirror/src/config.yaml new file mode 100644 index 00000000..beea7275 --- /dev/null +++ b/ceph-rbd-mirror/src/config.yaml @@ -0,0 +1,23 @@ +options: + source: + default: distro + type: string + description: | + Repository from which to install Ceph + + May be one of the following: + + distro (default) + ppa:somecustom/ppa (PPA name must include UCA OpenStack Release name) + deb url sources entry|key id + or a supported Ubuntu Cloud Archive pocket. + + Supported Ubuntu Cloud Archive pockets include: + + cloud:xenial-pike + cloud:xenial-queens + cloud:bionic-rocky + + Note that updating this setting to a source that is known to + provide a later version of Ceph will trigger a software + upgrade. diff --git a/ceph-rbd-mirror/src/copyright b/ceph-rbd-mirror/src/copyright new file mode 100644 index 00000000..27b9a9ec --- /dev/null +++ b/ceph-rbd-mirror/src/copyright @@ -0,0 +1,6 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd +License: Apache-2.0 + diff --git a/ceph-rbd-mirror/src/icon.svg b/ceph-rbd-mirror/src/icon.svg new file mode 100644 index 00000000..45b29930 --- /dev/null +++ b/ceph-rbd-mirror/src/icon.svg @@ -0,0 +1,316 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + diff --git a/ceph-rbd-mirror/src/layer.yaml b/ceph-rbd-mirror/src/layer.yaml new file mode 100644 index 00000000..b279f0d4 --- /dev/null +++ b/ceph-rbd-mirror/src/layer.yaml @@ -0,0 +1,16 @@ +includes: + - layer:leadership + - layer:openstack + - interface:ceph-client + - interface:nrpe-external-master +options: + basic: + use_venv: True + include_system_packages: True +repo: https://github.com/openstack/charm-ceph-rbd-mirror +config: + deletes: + - debug + - verbose + - use-internal-endpoints + - ssl_ca diff --git a/ceph-rbd-mirror/src/lib/__init__.py b/ceph-rbd-mirror/src/lib/__init__.py new file mode 100644 index 00000000..68451dd0 --- /dev/null +++ b/ceph-rbd-mirror/src/lib/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-rbd-mirror/src/lib/charm/__init__.py b/ceph-rbd-mirror/src/lib/charm/__init__.py new file mode 100644 index 00000000..68451dd0 --- /dev/null +++ b/ceph-rbd-mirror/src/lib/charm/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-rbd-mirror/src/lib/charm/openstack/__init__.py b/ceph-rbd-mirror/src/lib/charm/openstack/__init__.py new file mode 100644 index 00000000..68451dd0 --- /dev/null +++ b/ceph-rbd-mirror/src/lib/charm/openstack/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py new file mode 100644 index 00000000..6d5a7c85 --- /dev/null +++ b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py @@ -0,0 +1,47 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections + +import charms_openstack.charm +import charms_openstack.adapters + + +class CephRBDMirrorCharm(charms_openstack.charm.OpenStackCharm): + # Override source config key to be compatible with the other Ceph charms + source_config_key = 'source' + + # We require Ceph 12.2 Luminous or later for HA support in the Ceph + # rbd-mirror daemon. Luminous appears in UCA at pike. + release = 'pike' + name = 'ceph-rbd-mirror' + packages = ['rbd-mirror'] + python_version = 3 + required_relations = ['ceph-cluster'] + release_pkg = 'rbd-mirror' + package_codenames = { + 'rbd-mirror': collections.OrderedDict([ + ('12', 'pike'), + ('13', 'rocky'), + ]), + } + + def install(self): + """We override install function to configure source before installing + packages. + + The OpenStackAPICharm class already does this but we do not need nor + want the other services it provides for Ceph charms.""" + self.configure_source() + super().install() diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml new file mode 100644 index 00000000..a2f3a8d7 --- /dev/null +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -0,0 +1,32 @@ +name: ceph-rbd-mirror +summary: Highly scalable distributed storage - Ceph RBD Mirroring +maintainer: OpenStack Charmers +description: | + RBD images can be asynchronously mirrored between two Ceph clusters. This + capability uses the RBD journaling image feature to ensure crash-consistent + replication between clusters. Mirroring is configured on a per-pool basis + within peer clusters and can be configured to automatically mirror all images + within a pool or only a specific subset of images. Mirroring is configured + using the rbd command. The rbd-mirror daemon is responsible for pulling image + updates from the remote, peer cluster and applying them to the image within + the local cluster. + + Note: The charm requires Ceph Luminous or later and will only support + mirror configuration for whole pools. +tags: + - openstack + - storage + - file-servers + - misc +series: + - xenial + - bionic + - cosmic +subordinate: false +provides: + nrpe-external-master: + interface: nrpe-external-master + scope: container +requires: + ceph-cluster: + interface: ceph-client diff --git a/ceph-rbd-mirror/src/reactive/__init__.py b/ceph-rbd-mirror/src/reactive/__init__.py new file mode 100644 index 00000000..68451dd0 --- /dev/null +++ b/ceph-rbd-mirror/src/reactive/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py new file mode 100644 index 00000000..79bc51ce --- /dev/null +++ b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py @@ -0,0 +1,37 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import charms.reactive as reactive + +import charms_openstack.bus +import charms_openstack.charm as charm + +import charmhelpers.core as ch_core + + +charms_openstack.bus.discover() + +# Use the charms.openstack defaults for common states and hooks +charm.use_defaults( + 'charm.installed', + 'config.changed', + 'update-status') + + +@reactive.when('ceph-cluster.connected') +def ceph_connected(): + with charm.provide_charm_instance() as charm_instance: + ch_core.hookenv.log('Ceph connected, charm_instance @ {}' + .format(charm_instance), + level=ch_core.hookenv.DEBUG) diff --git a/ceph-rbd-mirror/src/test-requirements.txt b/ceph-rbd-mirror/src/test-requirements.txt new file mode 100644 index 00000000..4578f719 --- /dev/null +++ b/ceph-rbd-mirror/src/test-requirements.txt @@ -0,0 +1,10 @@ +# This file is managed centrally. If you find the need to modify this as a +# one-off, please don't. Intead, consult #openstack-charms and ask about +# requirements management in charms via bot-control. Thank you. +charm-tools>=2.4.4 +coverage>=3.6 +mock>=1.2 +flake8>=2.2.4,<=2.4.1 +os-testr>=0.4.1 +requests>=2.18.4 +git+https://github.com/openstack-charmers/zaza.git#egg=zaza diff --git a/ceph-rbd-mirror/src/tox.ini b/ceph-rbd-mirror/src/tox.ini new file mode 100644 index 00000000..ce451062 --- /dev/null +++ b/ceph-rbd-mirror/src/tox.ini @@ -0,0 +1,35 @@ +[tox] +envlist = pep8 +skipsdist = True + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 +whitelist_externals = juju +passenv = HOME TERM CS_API_* OS_* AMULET_* +deps = -r{toxinidir}/test-requirements.txt +install_command = + pip install {opts} {packages} + +[testenv:pep8] +basepython = python3 +deps=charm-tools +commands = charm-proof + +[testenv:func-noop] +basepython = python3 +commands = + true + +[testenv:func] +basepython = python3 +commands = + functest-run-suite --keep-model + +[testenv:func-smoke] +basepython = python3 +commands = + functest-run-suite --keep-model --smoke + +[testenv:venv] +commands = {posargs} diff --git a/ceph-rbd-mirror/test-requirements.txt b/ceph-rbd-mirror/test-requirements.txt new file mode 100644 index 00000000..ca62003b --- /dev/null +++ b/ceph-rbd-mirror/test-requirements.txt @@ -0,0 +1,13 @@ +# This file is managed centrally. If you find the need to modify this as a +# one-off, please don't. Intead, consult #openstack-charms and ask about +# requirements management in charms via bot-control. Thank you. +# +# Lint and unit test requirements +flake8>=2.2.4,<=2.4.1 +os-testr>=0.4.1 +requests>=2.18.4 +charms.reactive +mock>=1.2 +nose>=1.3.7 +coverage>=3.6 +git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini new file mode 100644 index 00000000..266fb541 --- /dev/null +++ b/ceph-rbd-mirror/tox.ini @@ -0,0 +1,51 @@ +# Source charm: ./tox.ini +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. +[tox] +skipsdist = True +envlist = pep8,py3 + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 + TERM=linux + LAYER_PATH={toxinidir}/layers + JUJU_REPOSITORY={toxinidir}/build +passenv = http_proxy https_proxy INTERFACE_PATH +install_command = + pip install {opts} {packages} +deps = + -r{toxinidir}/requirements.txt + +[testenv:build] +basepython = python3 +commands = + charm-build --log-level DEBUG -o {toxinidir}/build src {posargs} + +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/test-requirements.txt +commands = ostestr {posargs} + +[testenv:py35] +basepython = python3.5 +deps = -r{toxinidir}/test-requirements.txt +commands = ostestr {posargs} + +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/test-requirements.txt +commands = ostestr {posargs} + +[testenv:pep8] +basepython = python3 +deps = -r{toxinidir}/test-requirements.txt +commands = flake8 {posargs} src unit_tests + +[testenv:venv] +basepython = python3 +commands = {posargs} + +[flake8] +# E402 ignore necessary for path append before sys module import in actions +ignore = E402 diff --git a/ceph-rbd-mirror/unit_tests/__init__.py b/ceph-rbd-mirror/unit_tests/__init__.py new file mode 100644 index 00000000..7b5dac4f --- /dev/null +++ b/ceph-rbd-mirror/unit_tests/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +sys.path.append('src') +sys.path.append('src/lib') + +# Mock out charmhelpers so that we can test without it. +import charms_openstack.test_mocks # noqa +charms_openstack.test_mocks.mock_charmhelpers() diff --git a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py new file mode 100644 index 00000000..7e2010c0 --- /dev/null +++ b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py @@ -0,0 +1,29 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import charms_openstack.test_utils as test_utils + +import charm.openstack.ceph_rbd_mirror as ceph_rbd_mirror + + +class Helper(test_utils.PatchHelper): + + def setUp(self): + super().setUp() + self.patch_release(ceph_rbd_mirror.CephRBDMirrorCharm.release) + + +class TestCephRBDMirrorCharm(Helper): + def test_foo(self): + pass From da376b48c1f757583554910120837e970e8da86d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 11 Jan 2019 08:35:12 +0100 Subject: [PATCH 1636/2699] Expose API services If we expose the API services, then the Zaza func tests can be run on any conformant OpenStack, rather than requiring an OpenStack with wide open security groups. Change-Id: Ic0967e198939c6951e3a6df8214e244eb320029f --- ceph-osd/tests/bundles/bionic-queens.yaml | 4 ++++ ceph-osd/tests/bundles/bionic-rocky.yaml | 4 ++++ ceph-osd/tests/bundles/cosmic-rocky.yaml | 4 ++++ ceph-osd/tests/bundles/trusty-icehouse.yaml | 4 ++++ ceph-osd/tests/bundles/trusty-mitaka.yaml | 4 ++++ ceph-osd/tests/bundles/xenial-mitaka.yaml | 4 ++++ ceph-osd/tests/bundles/xenial-ocata.yaml | 4 ++++ ceph-osd/tests/bundles/xenial-pike.yaml | 4 ++++ ceph-osd/tests/bundles/xenial-queens.yaml | 4 ++++ 9 files changed, 36 insertions(+) diff --git a/ceph-osd/tests/bundles/bionic-queens.yaml b/ceph-osd/tests/bundles/bionic-queens.yaml index 88c9b93e..05be7b11 100644 --- a/ceph-osd/tests/bundles/bionic-queens.yaml +++ b/ceph-osd/tests/bundles/bionic-queens.yaml @@ -24,15 +24,18 @@ applications: charm: cs:rabbitmq-server num_units: 1 keystone: + expose: True charm: cs:keystone num_units: 1 nova-compute: charm: cs:nova-compute num_units: 1 glance: + expose: True charm: cs:glance num_units: 1 cinder: + expose: True charm: cs:cinder num_units: 1 options: @@ -41,6 +44,7 @@ applications: cinder-ceph: charm: cs:cinder-ceph nova-cloud-controller: + expose: True charm: cs:nova-cloud-controller num_units: 1 relations: diff --git a/ceph-osd/tests/bundles/bionic-rocky.yaml b/ceph-osd/tests/bundles/bionic-rocky.yaml index 51c233ba..ae37a596 100644 --- a/ceph-osd/tests/bundles/bionic-rocky.yaml +++ b/ceph-osd/tests/bundles/bionic-rocky.yaml @@ -29,6 +29,7 @@ applications: options: source: cloud:bionic-updates/rocky keystone: + expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 options: @@ -39,11 +40,13 @@ applications: options: openstack-origin: cloud:bionic-rocky/proposed glance: + expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 options: openstack-origin: cloud:bionic-rocky/proposed cinder: + expose: True charm: cs:~openstack-charmers-next/cinder num_units: 1 options: @@ -53,6 +56,7 @@ applications: cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: + expose: True charm: cs::~openstack-charmers-next/nova-cloud-controller num_units: 1 options: diff --git a/ceph-osd/tests/bundles/cosmic-rocky.yaml b/ceph-osd/tests/bundles/cosmic-rocky.yaml index 434aaea8..27fe1047 100644 --- a/ceph-osd/tests/bundles/cosmic-rocky.yaml +++ b/ceph-osd/tests/bundles/cosmic-rocky.yaml @@ -24,6 +24,7 @@ applications: charm: cs:~openstack-charmers-next/rabbitmq-server num_units: 1 keystone: + expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 options: @@ -34,11 +35,13 @@ applications: options: openstack-origin: cloud:bionic-rocky/proposed glance: + expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 options: openstack-origin: cloud:bionic-rocky/proposed cinder: + expose: True charm: cs:~openstack-charmers-next/cinder num_units: 1 options: @@ -48,6 +51,7 @@ applications: cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: + expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 options: diff --git a/ceph-osd/tests/bundles/trusty-icehouse.yaml b/ceph-osd/tests/bundles/trusty-icehouse.yaml index 7af06efc..61ec8742 100644 --- a/ceph-osd/tests/bundles/trusty-icehouse.yaml +++ b/ceph-osd/tests/bundles/trusty-icehouse.yaml @@ -40,6 +40,7 @@ applications: constraints: virt-type=kvm keystone: + expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 # workaround while awaiting release of next version of python-libjuju with @@ -54,6 +55,7 @@ applications: constraints: virt-type=kvm glance: + expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 # workaround while awaiting release of next version of python-libjuju with @@ -61,6 +63,7 @@ applications: constraints: virt-type=kvm cinder: + expose: True charm: cs:~openstack-charmers-next/cinder num_units: 1 options: @@ -73,6 +76,7 @@ applications: cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: + expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 # workaround while awaiting release of next version of python-libjuju with diff --git a/ceph-osd/tests/bundles/trusty-mitaka.yaml b/ceph-osd/tests/bundles/trusty-mitaka.yaml index 41ea263a..868bba15 100644 --- a/ceph-osd/tests/bundles/trusty-mitaka.yaml +++ b/ceph-osd/tests/bundles/trusty-mitaka.yaml @@ -45,6 +45,7 @@ applications: constraints: virt-type=kvm keystone: + expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 options: @@ -63,6 +64,7 @@ applications: constraints: virt-type=kvm glance: + expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 options: @@ -72,6 +74,7 @@ applications: constraints: virt-type=kvm cinder: + expose: True charm: cs:~openstack-charmers-next/cinder num_units: 1 options: @@ -85,6 +88,7 @@ applications: cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: + expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 options: diff --git a/ceph-osd/tests/bundles/xenial-mitaka.yaml b/ceph-osd/tests/bundles/xenial-mitaka.yaml index 24316244..37760454 100644 --- a/ceph-osd/tests/bundles/xenial-mitaka.yaml +++ b/ceph-osd/tests/bundles/xenial-mitaka.yaml @@ -24,15 +24,18 @@ applications: charm: cs:~openstack-charmers-next/rabbitmq-server num_units: 1 keystone: + expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 nova-compute: charm: cs:~openstack-charmers-next/nova-compute num_units: 1 glance: + expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 cinder: + expose: True charm: cs:~openstack-charmers-next/cinder num_units: 1 options: @@ -41,6 +44,7 @@ applications: cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: + expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 relations: diff --git a/ceph-osd/tests/bundles/xenial-ocata.yaml b/ceph-osd/tests/bundles/xenial-ocata.yaml index 3de59c98..90676783 100644 --- a/ceph-osd/tests/bundles/xenial-ocata.yaml +++ b/ceph-osd/tests/bundles/xenial-ocata.yaml @@ -29,6 +29,7 @@ applications: options: source: cloud:xenial-ocata keystone: + expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 options: @@ -39,11 +40,13 @@ applications: options: openstack-origin: cloud:xenial-ocata glance: + expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 options: openstack-origin: cloud:xenial-ocata cinder: + expose: True charm: cs:~openstack-charmers-next/cinder num_units: 1 options: @@ -53,6 +56,7 @@ applications: cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: + expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 options: diff --git a/ceph-osd/tests/bundles/xenial-pike.yaml b/ceph-osd/tests/bundles/xenial-pike.yaml index ceb778f2..7a5b49cc 100644 --- a/ceph-osd/tests/bundles/xenial-pike.yaml +++ b/ceph-osd/tests/bundles/xenial-pike.yaml @@ -29,6 +29,7 @@ applications: options: source: cloud:xenial-pike keystone: + expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 options: @@ -39,11 +40,13 @@ applications: options: openstack-origin: cloud:xenial-pike glance: + expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 options: openstack-origin: cloud:xenial-pike cinder: + expose: True charm: cs:~openstack-charmers-next/cinder num_units: 1 options: @@ -53,6 +56,7 @@ applications: cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: + expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 options: diff --git a/ceph-osd/tests/bundles/xenial-queens.yaml b/ceph-osd/tests/bundles/xenial-queens.yaml index 33188ad6..dcc63ced 100644 --- a/ceph-osd/tests/bundles/xenial-queens.yaml +++ b/ceph-osd/tests/bundles/xenial-queens.yaml @@ -29,6 +29,7 @@ applications: options: source: cloud:xenial-queens keystone: + expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 options: @@ -39,11 +40,13 @@ applications: options: openstack-origin: cloud:xenial-queens glance: + expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 options: openstack-origin: cloud:xenial-queens cinder: + expose: True charm: cs:~openstack-charmers-next/cinder num_units: 1 options: @@ -53,6 +56,7 @@ applications: cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: + expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 options: From afd8d229a9102d850cff1fcea5ed391fab0d58e6 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 11 Jan 2019 14:34:30 +0000 Subject: [PATCH 1637/2699] Sync charm-helpers Change-Id: Id0778f87816d686a34163e95b4e389f41a0c6452 --- ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py | 2 +- ceph-mon/hooks/charmhelpers/contrib/openstack/context.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index f59fdd6b..0626b328 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -126,7 +126,7 @@ class CheckException(Exception): class Check(object): - shortname_re = '[A-Za-z0-9-_.]+$' + shortname_re = '[A-Za-z0-9-_.@]+$' service_template = (""" #--------------------------------------------------- # This file is Juju managed diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index d892bad9..8a203754 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -97,6 +97,7 @@ ) from charmhelpers.contrib.openstack.utils import ( config_flags_parser, + get_os_codename_install_source, enable_memcache, CompareOpenStackReleases, os_release, @@ -240,6 +241,8 @@ def __call__(self): else: rids = relation_ids(self.interfaces[0]) + rel = (get_os_codename_install_source(config('openstack-origin')) or + 'icehouse') for rid in rids: self.related = True for unit in related_units(rid): @@ -253,6 +256,8 @@ def __call__(self): 'database_password': rdata.get(password_setting), 'database_type': 'mysql+pymysql' } + if CompareOpenStackReleases(rel) < 'stein': + ctxt['database_type'] = 'mysql' if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) return ctxt From 395361f88b8e288d392f43c4e48291c263ff6a71 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 11 Jan 2019 14:34:44 +0000 Subject: [PATCH 1638/2699] Sync charm-helpers Change-Id: Id2f3315520008b6b14c37b6f3ab605bcf9c0bc1c --- ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py | 2 +- ceph-osd/hooks/charmhelpers/contrib/openstack/context.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index f59fdd6b..0626b328 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -126,7 +126,7 @@ class CheckException(Exception): class Check(object): - shortname_re = '[A-Za-z0-9-_.]+$' + shortname_re = '[A-Za-z0-9-_.@]+$' service_template = (""" #--------------------------------------------------- # This file is Juju managed diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index d892bad9..8a203754 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -97,6 +97,7 @@ ) from charmhelpers.contrib.openstack.utils import ( config_flags_parser, + get_os_codename_install_source, enable_memcache, CompareOpenStackReleases, os_release, @@ -240,6 +241,8 @@ def __call__(self): else: rids = relation_ids(self.interfaces[0]) + rel = (get_os_codename_install_source(config('openstack-origin')) or + 'icehouse') for rid in rids: self.related = True for unit in related_units(rid): @@ -253,6 +256,8 @@ def __call__(self): 'database_password': rdata.get(password_setting), 'database_type': 'mysql+pymysql' } + if CompareOpenStackReleases(rel) < 'stein': + ctxt['database_type'] = 'mysql' if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) return ctxt From acc578f4c5cffd6836e3e8c8be57fbcdc77ffa66 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 11 Jan 2019 14:35:07 +0000 Subject: [PATCH 1639/2699] Sync charm-helpers Change-Id: Ib93a16da251c359cfb19cd1119c9ca60cfbdc2f2 --- ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py | 2 +- ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py index f59fdd6b..0626b328 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -126,7 +126,7 @@ class CheckException(Exception): class Check(object): - shortname_re = '[A-Za-z0-9-_.]+$' + shortname_re = '[A-Za-z0-9-_.@]+$' service_template = (""" #--------------------------------------------------- # This file is Juju managed diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py index dd24f9ec..d25587ad 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py @@ -89,7 +89,7 @@ def download(self, source, dest): :param str source: URL pointing to an archive file. :param str dest: Local path location to download archive file to. """ - # propogate all exceptions + # propagate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse(source) if proto in ('http', 'https'): From 7141c2c589f13bb9677bb565f4c7a443eff3bc76 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 11 Jan 2019 14:35:22 +0000 Subject: [PATCH 1640/2699] Sync charm-helpers Change-Id: I41699bcccc39b94fdfa3dcbc00565dac5e3c2354 --- ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py | 2 +- ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index f59fdd6b..0626b328 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -126,7 +126,7 @@ class CheckException(Exception): class Check(object): - shortname_re = '[A-Za-z0-9-_.]+$' + shortname_re = '[A-Za-z0-9-_.@]+$' service_template = (""" #--------------------------------------------------- # This file is Juju managed diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index d892bad9..8a203754 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -97,6 +97,7 @@ ) from charmhelpers.contrib.openstack.utils import ( config_flags_parser, + get_os_codename_install_source, enable_memcache, CompareOpenStackReleases, os_release, @@ -240,6 +241,8 @@ def __call__(self): else: rids = relation_ids(self.interfaces[0]) + rel = (get_os_codename_install_source(config('openstack-origin')) or + 'icehouse') for rid in rids: self.related = True for unit in related_units(rid): @@ -253,6 +256,8 @@ def __call__(self): 'database_password': rdata.get(password_setting), 'database_type': 'mysql+pymysql' } + if CompareOpenStackReleases(rel) < 'stein': + ctxt['database_type'] = 'mysql' if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) return ctxt From 615aa105bd05d2ce25410bdc7490dd36a06d9b48 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Thu, 10 Jan 2019 18:01:23 +0000 Subject: [PATCH 1641/2699] Fix nrpe ceph-osd status respecting permissions The referenced bug (below) was caused because the nrpe check needed to access the ceph owned directories, and as the nagios user, nrpe can't. This change splits the check into a 'collect' phase that runs as root via a cronjob each minute and writes a file to the tmp directory, and a nrpe check phase that then reads that file and reports back to nagios. The 'check' part deletes the 'collect' file, so that fresh information is available for each nrpe check. The cron task runs every minute (as is lightweight), so the nrpe checks should not be sheduled more frequently than 1 minute. Change-Id: I4f4594a479eed47cc66643d0c6acece491ae854d Closes-Bug: #1810749 --- .../files/nagios/check_ceph_osd_services.py | 65 ++++++++++++++ ceph-osd/files/nagios/check_ceph_status.py | 10 ++- .../files/nagios/collect_ceph_osd_services.py | 90 +++++++++++++++++++ ceph-osd/hooks/ceph_hooks.py | 78 ++++++++++++---- ceph-osd/tests/bundles/bionic-rocky.yaml | 2 +- ceph-osd/tox.ini | 2 +- 6 files changed, 223 insertions(+), 24 deletions(-) create mode 100755 ceph-osd/files/nagios/check_ceph_osd_services.py create mode 100755 ceph-osd/files/nagios/collect_ceph_osd_services.py diff --git a/ceph-osd/files/nagios/check_ceph_osd_services.py b/ceph-osd/files/nagios/check_ceph_osd_services.py new file mode 100755 index 00000000..669160d4 --- /dev/null +++ b/ceph-osd/files/nagios/check_ceph_osd_services.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 + +# Copyright (C) 2018 Canonical +# All Rights Reserved +# Author: Alex Kavanagh + +import os +import sys +import tempfile + +CRON_CHECK_TMPFILE = 'ceph-osd-checks' + +STATE_OK = 0 +STATE_WARNING = 1 +STATE_CRITICAL = 2 +STATE_UNKNOWN = 3 + + +def run_main(): + """Process the CRON_CHECK_TMP_FILE and see if any line is not OK. + + If a line is not OK, the main returns STATE_CRITICAL. + If there are no lines, or the file doesn't exist, it returns STATE_UNKNOWN + Otherwise it returns STATE_OK. + + :returns: nagios state 0,2 or 3 + """ + _tmp_file = os.path.join(tempfile.gettempdir(), CRON_CHECK_TMPFILE) + + if not os.path.isfile(_tmp_file): + print("File '{}' doesn't exist".format(_tmp_file)) + return STATE_UNKNOWN + + try: + with open(_tmp_file, 'rt') as f: + lines = f.readlines() + except Exception as e: + print("Something went wrong reading the file: {}".format(str(e))) + return STATE_UNKNOWN + + # now remove the file in case the next check fails. + try: + os.remove(_tmp_file) + except Exception: + pass + + if not lines: + print("checked status file is empty: {}".format(_tmp_file)) + return STATE_UNKNOWN + + # finally, check that the file contains all ok lines. Unfortunately, it's + # not consistent across releases, but what is consistent is that the check + # command in the collect phase does fail, and so the start of the line is + # 'Failed' + state = STATE_OK + for l in lines: + print(l, end='') + if l.startswith('Failed'): + state = STATE_CRITICAL + + return state + + +if __name__ == '__main__': + sys.exit(run_main()) diff --git a/ceph-osd/files/nagios/check_ceph_status.py b/ceph-osd/files/nagios/check_ceph_status.py index cc21591a..358fafd9 100755 --- a/ceph-osd/files/nagios/check_ceph_status.py +++ b/ceph-osd/files/nagios/check_ceph_status.py @@ -40,7 +40,8 @@ def check_ceph_status(args): msg += '"' raise nagios_plugin.CriticalError(msg) - osds = re.search("^.*: (\d+) osds: (\d+) up, (\d+) in", status_data['osdmap']) + osds = re.search("^.*: (\d+) osds: (\d+) up, (\d+) in", + status_data['osdmap']) if osds.group(1) > osds.group(2): # not all OSDs are "up" msg = 'CRITICAL: Some OSDs are not up. Total: {}, up: {}'.format( osds.group(1), osds.group(2)) @@ -50,7 +51,10 @@ def check_ceph_status(args): if __name__ == '__main__': parser = argparse.ArgumentParser(description='Check ceph status') - parser.add_argument('-f', '--file', dest='status_file', - default=False, help='Optional file with "ceph status" output') + parser.add_argument('-f', + '--file', + dest='status_file', + default=False, + help='Optional file with "ceph status" output') args = parser.parse_args() nagios_plugin.try_check(check_ceph_status, args) diff --git a/ceph-osd/files/nagios/collect_ceph_osd_services.py b/ceph-osd/files/nagios/collect_ceph_osd_services.py new file mode 100755 index 00000000..7133a75e --- /dev/null +++ b/ceph-osd/files/nagios/collect_ceph_osd_services.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 + +# Copyright (C) 2018 Canonical +# All Rights Reserved +# Author: Alex Kavanagh + +import os +import subprocess +import tempfile + +# fasteners only exists in Bionic, so this will fail on xenial and trusty +try: + import fasteners +except ImportError: + fasteners = None + +SYSTEMD_SYSTEM = '/run/systemd/system' +LOCKFILE = '/var/lock/check-osds.lock' +CRON_CHECK_TMPFILE = 'ceph-osd-checks' + + +def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" + if lsb_release()['DISTRIB_CODENAME'] == 'trusty': + return False + return os.path.isdir(SYSTEMD_SYSTEM) + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def get_osd_units(): + """Returns a list of strings, one for each unit that is live""" + cmd = '/bin/cat /var/lib/ceph/osd/ceph-*/whoami' + try: + output = (subprocess + .check_output([cmd], shell=True).decode('utf-8') + .split('\n')) + return [u for u in output if u] + except subprocess.CalledProcessError: + return [] + + +def do_status(): + if init_is_systemd(): + cmd = "/usr/local/lib/nagios/plugins/check_systemd.py ceph-osd@{}" + else: + cmd = "/sbin/status ceph-osd id={}" + + lines = [] + + for unit in get_osd_units(): + try: + output = (subprocess + .check_output(cmd.format(unit).split(), + stderr=subprocess.STDOUT) + .decode('utf-8')) + except subprocess.CalledProcessError as e: + output = ("Failed: check command raised: {}" + .format(e.output.decode('utf-8'))) + lines.append(output) + + _tmp_file = os.path.join(tempfile.gettempdir(), CRON_CHECK_TMPFILE) + with open(_tmp_file, 'wt') as f: + f.writelines(lines) + + +def run_main(): + # on bionic we can interprocess lock; we don't do it for older platforms + if fasteners is not None: + lock = fasteners.InterProcessLock(LOCKFILE) + + if lock.acquire(blocking=False): + try: + do_status() + finally: + lock.release() + else: + do_status() + + +if __name__ == '__main__': + run_main() diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index c69d693b..7828613f 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -15,14 +15,14 @@ # limitations under the License. import base64 -import json import glob +import json +import netifaces import os import shutil -import sys import socket import subprocess -import netifaces +import sys sys.path.append('lib') import ceph.utils as ceph @@ -37,6 +37,7 @@ related_units, relation_get, relation_set, + relations_of_type, Hooks, UnregisteredHookError, service_name, @@ -47,16 +48,17 @@ application_version_set, ) from charmhelpers.core.host import ( - umount, - mkdir, + add_to_updatedb_prunepath, cmp_pkgrevno, + is_container, + lsb_release, + mkdir, + restart_on_change, service_reload, service_restart, - add_to_updatedb_prunepath, - restart_on_change, + umount, write_file, - is_container, - init_is_systemd, + CompareHostReleases, ) from charmhelpers.fetch import ( add_source, @@ -110,6 +112,9 @@ hooks = Hooks() STORAGE_MOUNT_PATH = '/var/lib/ceph' +# cron.d related files +CRON_CEPH_CHECK_FILE = '/etc/cron.d/check-osd-services' + def check_for_upgrade(): if not os.path.exists(ceph._upgrade_keyring): @@ -601,6 +606,7 @@ def upgrade_charm(): fatal=True) install_udev_rules() remap_resolved_targets() + maybe_refresh_nrpe_files() def remap_resolved_targets(): @@ -632,28 +638,62 @@ def remap_resolved_targets(): 'nrpe-external-master-relation-changed') def update_nrpe_config(): # python-dbus is used by check_upstart_job - apt_install('python3-dbus') + # fasteners is used by apt_install collect_ceph_osd_services.py + pkgs = ['python3-dbus'] + if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'bionic': + pkgs.append('python3-fasteners') + apt_install(pkgs) + + # copy the check and collect files over to the plugins directory + charm_dir = os.environ.get('CHARM_DIR', '') + nagios_plugins = '/usr/local/lib/nagios/plugins' + # Grab nagios user/group ID's from original source + _dir = os.stat(nagios_plugins) + uid = _dir.st_uid + gid = _dir.st_gid + for name in ('collect_ceph_osd_services.py', 'check_ceph_osd_services.py'): + target = os.path.join(nagios_plugins, name) + shutil.copy(os.path.join(charm_dir, 'files', 'nagios', name), target) + os.chown(target, uid, gid) + hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() - # create systemd or upstart check - cmd = '/bin/cat /var/lib/ceph/osd/ceph-*/whoami |' - if init_is_systemd(): - cmd += 'xargs -I_@ /usr/local/lib/nagios/plugins/check_systemd.py' - cmd += ' ceph-osd@_@' - else: - cmd += 'xargs -I@ status ceph-osd id=@' - cmd += ' && exit 0 || exit 2' + # BUG#1810749 - the nagios user can't access /var/lib/ceph/.. and that's a + # GOOD THING, as it keeps ceph secure from Nagios. However, to check + # whether ceph is okay, the check_systemd.py or 'status ceph-osd' still + # needs to be called with the contents of ../osd/ceph-*/whoami files. To + # get around this conundrum, instead a cron.d job that runs as root will + # perform the checks every minute, and write to a tempory file the results, + # and the nrpe check will grep this file and error out (return 2) if the + # first 3 characters of a line are not 'OK:'. + + cmd = ('MAILTO=""\n' + '* * * * * root ' + '/usr/local/lib/nagios/plugins/collect_ceph_osd_services.py' + ' 2>&1 | logger -t check-osd\n') + with open(CRON_CEPH_CHECK_FILE, "wt") as f: + f.write(cmd) + + nrpe_cmd = '/usr/local/lib/nagios/plugins/check_ceph_osd_services.py' nrpe_setup = nrpe.NRPE(hostname=hostname) nrpe_setup.add_check( shortname='ceph-osd', description='process check {%s}' % current_unit, - check_cmd=cmd + check_cmd=nrpe_cmd ) nrpe_setup.write() +def maybe_refresh_nrpe_files(): + """if the nrpe-external-master relation exists then refresh the nrpe + configuration -- this is called during a charm upgrade + """ + if relations_of_type('nrpe-external-master'): + update_nrpe_config() + + @hooks.hook('secrets-storage-relation-joined') def secrets_storage_joined(relation_id=None): relation_set(relation_id=relation_id, diff --git a/ceph-osd/tests/bundles/bionic-rocky.yaml b/ceph-osd/tests/bundles/bionic-rocky.yaml index ae37a596..36ea73ba 100644 --- a/ceph-osd/tests/bundles/bionic-rocky.yaml +++ b/ceph-osd/tests/bundles/bionic-rocky.yaml @@ -57,7 +57,7 @@ applications: charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: expose: True - charm: cs::~openstack-charmers-next/nova-cloud-controller + charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 options: openstack-origin: cloud:bionic-rocky/proposed diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 6b24dddf..e9553173 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -38,7 +38,7 @@ deps = -r{toxinidir}/requirements.txt basepython = python3 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} hooks unit_tests tests actions lib +commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof [testenv:venv] From e46c0fb71018c926850aeeedb1b377e60e1f7587 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 17 Jan 2019 17:02:18 +0000 Subject: [PATCH 1642/2699] Rebuild for sync charm-helpers Change-Id: Idf5f0ab3432e6b91c869ee6d3eaccafbd5eb7456 --- ceph-fs/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index 464856fa..ada580de 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -f992a562-03a0-11e9-8a41-af644219de17 +85c5499c-1a79-11e9-8864-470f2a69c15e From 7727bbcf19dd1bd53bf8c0c5ba29e9db2950c183 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 18 Jan 2019 11:43:30 +0200 Subject: [PATCH 1643/2699] Switch charm runtime to python3 Update charm to execute hooks and actions under Python 3; this includes dealing with upgrades (by switching the upgrade-charm hook to be a bash script which installs the required charm runtime dependencies). This commit also drops code from ceph.py which was used across other ceph charms in the past; only the functions required for this charm have been retained. Change-Id: I5e222d907bfa34ffacad16c51abd1278d7d82f56 --- ceph-radosgw/.zuul.yaml | 3 +- ceph-radosgw/actions/actions.py | 2 +- ceph-radosgw/hooks/ceph.py | 198 ------------------ ceph-radosgw/hooks/hooks.py | 5 +- ceph-radosgw/hooks/install | 15 +- ceph-radosgw/hooks/install_deps | 17 ++ ceph-radosgw/hooks/upgrade-charm | 5 +- ceph-radosgw/hooks/utils.py | 2 +- ceph-radosgw/tests/basic_deployment.py | 8 +- ceph-radosgw/tox.ini | 4 +- ceph-radosgw/unit_tests/__init__.py | 6 +- ceph-radosgw/unit_tests/test_ceph.py | 173 --------------- .../unit_tests/test_ceph_radosgw_context.py | 3 + ceph-radosgw/unit_tests/test_hooks.py | 1 + ceph-radosgw/unit_tests/test_utils.py | 2 +- 15 files changed, 42 insertions(+), 402 deletions(-) create mode 100755 ceph-radosgw/hooks/install_deps mode change 120000 => 100755 ceph-radosgw/hooks/upgrade-charm diff --git a/ceph-radosgw/.zuul.yaml b/ceph-radosgw/.zuul.yaml index aa9c508f..7051aeeb 100644 --- a/ceph-radosgw/.zuul.yaml +++ b/ceph-radosgw/.zuul.yaml @@ -1,4 +1,3 @@ - project: templates: - - python-charm-jobs - - openstack-python35-jobs-nonvoting + - python35-charm-jobs diff --git a/ceph-radosgw/actions/actions.py b/ceph-radosgw/actions/actions.py index 7a477b78..5446cc99 100755 --- a/ceph-radosgw/actions/actions.py +++ b/ceph-radosgw/actions/actions.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index 2a87962a..41a2a531 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -13,13 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json import os import subprocess -import time - - -from socket import gethostname as get_unit_hostname from utils import get_pkg_version @@ -30,151 +25,6 @@ CephBrokerRq, ) -LEADER = 'leader' -PEON = 'peon' -QUORUM = [LEADER, PEON] - - -def is_quorum(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) - cmd = [ - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(subprocess.check_output(cmd)) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] in QUORUM: - return True - else: - return False - else: - return False - - -def is_leader(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) - cmd = [ - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(subprocess.check_output(cmd)) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] == LEADER: - return True - else: - return False - else: - return False - - -def wait_for_quorum(): - while not is_quorum(): - time.sleep(3) - - -def add_bootstrap_hint(peer): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(get_unit_hostname()) - cmd = [ - "ceph", - "--admin-daemon", - asok, - "add_bootstrap_peer_hint", - peer - ] - if os.path.exists(asok): - # Ignore any errors for this call - subprocess.call(cmd) - - -DISK_FORMATS = [ - 'xfs', - 'ext4', - 'btrfs' -] - - -def is_osd_disk(dev): - try: - info = subprocess.check_output(['sgdisk', '-i', '1', dev]) - info = info.split("\n") # IGNORE:E1103 - for line in info: - if line.startswith( - 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' - ): - return True - except subprocess.CalledProcessError: - pass - return False - - -def rescan_osd_devices(): - cmd = [ - 'udevadm', 'trigger', - '--subsystem-match=block', '--action=add' - ] - - subprocess.call(cmd) - - -def zap_disk(dev): - cmd = ['sgdisk', '--zap-all', dev] - subprocess.check_call(cmd) - - -_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" - - -def is_bootstrapped(): - return os.path.exists(_bootstrap_keyring) - - -def wait_for_bootstrap(): - while (not is_bootstrapped()): - time.sleep(3) - - -def import_osd_bootstrap_key(key): - if not os.path.exists(_bootstrap_keyring): - cmd = [ - 'ceph-authtool', - _bootstrap_keyring, - '--create-keyring', - '--name=client.bootstrap-osd', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - -# OSD caps taken from ceph-create-keys -_osd_bootstrap_caps = { - 'mon': [ - 'allow command osd create ...', - 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', - 'allow command mon getmap' - ] -} - - -def get_osd_bootstrap_key(): - return get_named_key('bootstrap-osd', _osd_bootstrap_caps) - - _radosgw_keyring = "/etc/ceph/keyring.rados.gateway" @@ -189,54 +39,6 @@ def import_radosgw_key(key): ] subprocess.check_call(cmd) -# OSD caps taken from ceph-create-keys -_radosgw_caps = { - 'mon': ['allow r'], - 'osd': ['allow rwx'] -} - - -def get_radosgw_key(): - return get_named_key('radosgw.gateway', _radosgw_caps) - - -_default_caps = { - 'mon': ['allow r'], - 'osd': ['allow rwx'] -} - - -def get_named_key(name, caps=None): - caps = caps or _default_caps - cmd = [ - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - get_unit_hostname() - ), - 'auth', 'get-or-create', 'client.{}'.format(name), - ] - # Add capabilities - for subsystem, subcaps in caps.iteritems(): - cmd.extend([ - subsystem, - '; '.join(subcaps), - ]) - output = subprocess.check_output(cmd).strip() # IGNORE:E1103 - # get-or-create appears to have different output depending - # on whether its 'get' or 'create' - # 'create' just returns the key, 'get' is more verbose and - # needs parsing - key = None - if len(output.splitlines()) == 1: - key = output - else: - for element in output.splitlines(): - if 'key' in element: - key = element.split(' = ')[1].strip() # IGNORE:E1103 - return key - def get_create_rgw_pools_rq(prefix=None): """Pre-create RGW pools so that they have the correct settings. diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 4f68459f..8481032d 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # @@ -136,8 +136,7 @@ def install(): os.makedirs('/etc/ceph') -@hooks.hook('upgrade-charm', - 'config-changed') +@hooks.hook('config-changed') @restart_on_change({'/etc/ceph/ceph.conf': ['radosgw'], '/etc/haproxy/haproxy.cfg': ['haproxy']}) @harden() diff --git a/ceph-radosgw/hooks/install b/ceph-radosgw/hooks/install index fa9f910f..015c1435 100755 --- a/ceph-radosgw/hooks/install +++ b/ceph-radosgw/hooks/install @@ -2,19 +2,6 @@ # Wrapper to deal with newer Ubuntu versions that don't have py2 installed # by default. -declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml' 'jinja2' 'dnspython') - -check_and_install() { - pkg="${1}-${2}" - if ! dpkg -s ${pkg} 2>&1 > /dev/null; then - apt-get -y install ${pkg} - fi -} - -PYTHON="python" - -for dep in ${DEPS[@]}; do - check_and_install ${PYTHON} ${dep} -done +./hooks/install_deps exec ./hooks/install.real diff --git a/ceph-radosgw/hooks/install_deps b/ceph-radosgw/hooks/install_deps new file mode 100755 index 00000000..506d9d64 --- /dev/null +++ b/ceph-radosgw/hooks/install_deps @@ -0,0 +1,17 @@ +#!/bin/bash +# Install required dependencies for charm runtime + +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'yaml' 'jinja2' 'dnspython') + +check_and_install() { + pkg="${1}-${2}" + if ! dpkg -s ${pkg} 2>&1 > /dev/null; then + apt-get -y install ${pkg} + fi +} + +PYTHON="python3" + +for dep in ${DEPS[@]}; do + check_and_install ${PYTHON} ${dep} +done diff --git a/ceph-radosgw/hooks/upgrade-charm b/ceph-radosgw/hooks/upgrade-charm deleted file mode 120000 index 9416ca6a..00000000 --- a/ceph-radosgw/hooks/upgrade-charm +++ /dev/null @@ -1 +0,0 @@ -hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/upgrade-charm b/ceph-radosgw/hooks/upgrade-charm new file mode 100755 index 00000000..c1771bf0 --- /dev/null +++ b/ceph-radosgw/hooks/upgrade-charm @@ -0,0 +1,4 @@ +#!/bin/bash +# Re-install dependencies to deal with py2->py3 switch for charm + +./hooks/install_deps diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index ccb839b6..c79e0022 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -172,7 +172,7 @@ def register_configs(release='icehouse'): CONFIGS[CEPH_CONF]['contexts'].append( ceph_radosgw_context.IdentityServiceContext() ) - for cfg, rscs in CONFIGS.iteritems(): + for cfg, rscs in CONFIGS.items(): configs.register(cfg, rscs['contexts']) return configs diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index 5b05ffae..afd88839 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -427,7 +427,7 @@ def test_300_ceph_radosgw_config(self): expected['client.radosgw.gateway']['rgw keystone admin token'] = ( 'ubuntutesting') - for section, pairs in expected.iteritems(): + for section, pairs in expected.items(): ret = u.validate_config_data(unit, conf, section, pairs) if ret: message = "ceph config error: {}".format(ret) @@ -444,7 +444,7 @@ def test_302_cinder_rbd_config(self): 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' } } - for section, pairs in expected.iteritems(): + for section, pairs in expected.items(): ret = u.validate_config_data(unit, conf, section, pairs) if ret: message = "cinder (rbd) config error: {}".format(ret) @@ -474,7 +474,7 @@ def test_304_glance_rbd_config(self): section = 'DEFAULT' expected = {section: config} - for section, pairs in expected.iteritems(): + for section, pairs in expected.items(): ret = u.validate_config_data(unit, conf, section, pairs) if ret: message = "glance (rbd) config error: {}".format(ret) @@ -491,7 +491,7 @@ def test_306_nova_rbd_config(self): 'rbd_secret_uuid': u.not_null } } - for section, pairs in expected.iteritems(): + for section, pairs in expected.items(): ret = u.validate_config_data(unit, conf, section, pairs) if ret: message = "nova (rbd) config error: {}".format(ret) diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 7d738375..1b27a4af 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -2,7 +2,7 @@ # This file is managed centrally by release-tools and should not be modified # within individual charm repos. [tox] -envlist = pep8,py27 +envlist = pep8,py35,py36 skipsdist = True [testenv] @@ -12,7 +12,7 @@ setenv = VIRTUAL_ENV={envdir} AMULET_SETUP_TIMEOUT=5400 install_command = pip install {opts} {packages} -commands = stestr run --slowest {posargs} +commands = stestr run --slowest {posargs} whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* diff --git a/ceph-radosgw/unit_tests/__init__.py b/ceph-radosgw/unit_tests/__init__.py index 184cf3d8..ed0779fb 100644 --- a/ceph-radosgw/unit_tests/__init__.py +++ b/ceph-radosgw/unit_tests/__init__.py @@ -14,5 +14,7 @@ import sys -sys.path.append('actions/') -sys.path.append('hooks/') +sys.path.append('actions') +sys.path.append('hooks') +sys.path.append('lib') +sys.path.append('unit_tests') diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index 33c23fda..673dd72f 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -30,10 +30,8 @@ TO_PATCH = [ 'config', - 'get_unit_hostname', 'os', 'subprocess', - 'time', ] @@ -42,143 +40,6 @@ def setUp(self): super(CephRadosGWCephTests, self).setUp(ceph, TO_PATCH) self.config.side_effect = self.test_config.get - def test_is_quorum_leader(self): - self.os.path.exists.return_value = True - self.get_unit_hostname.return_value = 'myhost' - self.subprocess.check_output.return_value = '{"state": "leader"}' - self.assertEqual(ceph.is_quorum(), True) - - def test_is_quorum_notleader(self): - self.os.path.exists.return_value = True - self.get_unit_hostname.return_value = 'myhost' - self.subprocess.check_output.return_value = '{"state": "notleader"}' - self.assertEqual(ceph.is_quorum(), False) - - def test_is_quorum_valerror(self): - self.os.path.exists.return_value = True - self.get_unit_hostname.return_value = 'myhost' - self.subprocess.check_output.return_value = "'state': 'bob'}" - self.assertEqual(ceph.is_quorum(), False) - - def test_is_quorum_no_asok(self): - self.os.path.exists.return_value = False - self.assertEqual(ceph.is_quorum(), False) - - def test_is_leader(self): - self.get_unit_hostname.return_value = 'myhost' - self.os.path.exists.return_value = True - self.subprocess.check_output.return_value = '{"state": "leader"}' - self.assertEqual(ceph.is_leader(), True) - - def test_is_leader_notleader(self): - self.get_unit_hostname.return_value = 'myhost' - self.os.path.exists.return_value = True - self.subprocess.check_output.return_value = '{"state": "notleader"}' - self.assertEqual(ceph.is_leader(), False) - - def test_is_leader_valerror(self): - self.get_unit_hostname.return_value = 'myhost' - self.os.path.exists.return_value = True - self.subprocess.check_output.return_value = "'state': 'bob'}" - self.assertEqual(ceph.is_leader(), False) - - def test_is_leader_noasok(self): - self.get_unit_hostname.return_value = 'myhost' - self.os.path.exists.return_value = False - self.assertEqual(ceph.is_leader(), False) - - def test_wait_for_quorum_yes(self): - results = [True, False] - - def quorum(): - return results.pop() - - _is_quorum = self.patch('is_quorum') - _is_quorum.side_effect = quorum - ceph.wait_for_quorum() - self.time.sleep.assert_called_with(3) - - def test_wait_for_quorum_no(self): - _is_quorum = self.patch('is_quorum') - _is_quorum.return_value = True - ceph.wait_for_quorum() - self.assertFalse(self.time.sleep.called) - - def test_wait_for_bootstrap(self): - results = [True, False] - - def bootstrapped(): - return results.pop() - - _is_bootstrapped = self.patch('is_bootstrapped') - _is_bootstrapped.side_effect = bootstrapped - ceph.wait_for_bootstrap() - self.time.sleep.assert_called_with(3) - - def test_add_bootstrap_hint(self): - self.get_unit_hostname.return_value = 'myhost' - cmd = [ - "ceph", - "--admin-daemon", - '/var/run/ceph/ceph-mon.myhost.asok', - "add_bootstrap_peer_hint", - 'mypeer' - ] - self.os.path.exists.return_value = True - ceph.add_bootstrap_hint('mypeer') - self.subprocess.call.assert_called_with(cmd) - - def test_add_bootstrap_hint_noasok(self): - self.get_unit_hostname.return_value = 'myhost' - self.os.path.exists.return_value = False - ceph.add_bootstrap_hint('mypeer') - self.assertFalse(self.subprocess.call.called) - - def test_is_osd_disk(self): - # XXX Insert real sgdisk output - self.subprocess.check_output.return_value = \ - 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' - self.assertEqual(ceph.is_osd_disk('/dev/fmd0'), True) - - def test_is_osd_disk_no(self): - # XXX Insert real sgdisk output - self.subprocess.check_output.return_value = \ - 'Partition GUID code: 5FBD7E29-9D25-41B8-AFD0-062C0CEFF05D' - self.assertEqual(ceph.is_osd_disk('/dev/fmd0'), False) - - def test_rescan_osd_devices(self): - cmd = [ - 'udevadm', 'trigger', - '--subsystem-match=block', '--action=add' - ] - ceph.rescan_osd_devices() - self.subprocess.call.assert_called_with(cmd) - - def test_zap_disk(self): - cmd = [ - 'sgdisk', '--zap-all', '/dev/fmd0', - ] - ceph.zap_disk('/dev/fmd0') - self.subprocess.check_call.assert_called_with(cmd) - - def test_import_osd_bootstrap_key(self): - self.os.path.exists.return_value = False - cmd = [ - 'ceph-authtool', - '/var/lib/ceph/bootstrap-osd/ceph.keyring', - '--create-keyring', - '--name=client.bootstrap-osd', - '--add-key=mykey', - ] - ceph.import_osd_bootstrap_key('mykey') - self.subprocess.check_call.assert_called_with(cmd) - - def test_is_bootstrapped(self): - self.os.path.exists.return_value = True - self.assertEqual(ceph.is_bootstrapped(), True) - self.os.path.exists.return_value = False - self.assertEqual(ceph.is_bootstrapped(), False) - def test_import_radosgw_key(self): self.os.path.exists.return_value = False ceph.import_radosgw_key('mykey') @@ -191,40 +52,6 @@ def test_import_radosgw_key(self): ] self.subprocess.check_call.assert_called_with(cmd) - def test_get_named_key_create(self): - self.get_unit_hostname.return_value = "myhost" - self.subprocess.check_output.return_value = """ - -[client.dummy] - key = AQAPiu1RCMb4CxAAmP7rrufwZPRqy8bpQa2OeQ== -""" - self.assertEqual(ceph.get_named_key('dummy'), - 'AQAPiu1RCMb4CxAAmP7rrufwZPRqy8bpQa2OeQ==') - cmd = [ - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-myhost/keyring', - 'auth', 'get-or-create', 'client.dummy', - 'mon', 'allow r', 'osd', 'allow rwx' - ] - self.subprocess.check_output.assert_called_with(cmd) - - def test_get_named_key_get(self): - self.get_unit_hostname.return_value = "myhost" - key = "AQAPiu1RCMb4CxAAmP7rrufwZPRqy8bpQa2OeQ==" - self.subprocess.check_output.return_value = key - self.assertEqual(ceph.get_named_key('dummy'), key) - cmd = [ - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-myhost/keyring', - 'auth', 'get-or-create', 'client.dummy', - 'mon', 'allow r', 'osd', 'allow rwx' - ] - self.subprocess.check_output.assert_called_with(cmd) - @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' '.add_op_create_pool') def test_create_rgw_pools_rq_with_prefix(self, mock_broker): diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index d707efa8..848e3c27 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -38,6 +38,7 @@ def setUp(self): super(HAProxyContextTests, self).setUp(context, TO_PATCH) self.relation_get.side_effect = self.test_relation.get self.config.side_effect = self.test_config.get + self.cmp_pkgrevno.return_value = 1 @patch('charmhelpers.contrib.openstack.context.get_relation_ip') @patch('charmhelpers.contrib.openstack.context.mkdir') @@ -71,6 +72,7 @@ def setUp(self): self.relation_get.side_effect = self.test_relation.get self.config.side_effect = self.test_config.get self.maxDiff = None + self.cmp_pkgrevno.return_value = 1 @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') @@ -295,6 +297,7 @@ def setUp(self): super(MonContextTest, self).setUp(context, TO_PATCH) self.config.side_effect = self.test_config.get self.unit_public_ip.return_value = '10.255.255.255' + self.cmp_pkgrevno.return_value = 1 @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index f62f9839..121f06cc 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -67,6 +67,7 @@ def setUp(self): self.test_config.set('source', 'distro') self.test_config.set('key', 'secretkey') self.test_config.set('use-syslog', False) + self.cmp_pkgrevno.return_value = 0 def test_install_packages(self): ceph_hooks.install_packages() diff --git a/ceph-radosgw/unit_tests/test_utils.py b/ceph-radosgw/unit_tests/test_utils.py index e80722d4..5be80ec7 100644 --- a/ceph-radosgw/unit_tests/test_utils.py +++ b/ceph-radosgw/unit_tests/test_utils.py @@ -49,7 +49,7 @@ def get_default_config(): """ default_config = {} config = load_config() - for k, v in config.iteritems(): + for k, v in config.items(): if 'default' in v: default_config[k] = v['default'] else: From d52d072414b08e2b7f837ca3cc4547a5a0e8f88d Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 7 Jan 2019 17:18:07 +0000 Subject: [PATCH 1644/2699] Switch to using systemd units for radosgw Switch to using systemd configurations to manage radosgw instances; the radosgw init script is obsolete and will be removed at some point in time, and the newer style of managing radosgw daemons is inline with current best-practice. This changeset also changes the way cephx keys are issues; before all rgw instances shared a key, now a key is issued per host. The key is named 'rgw.`hostname`' to identify the application and host using the key. Existing deployments using the radosgw init script will be switched to use the new systemd named units; this occurs once the new key for the unit has been presented by the ceph-mon cluster over the mon relation. A small period of outage will occur as the radosgw init based daemon is stopped and disabled prior to the start of the new systemd based radosgw unit. This commit also includes a resync for charmhelpers to pickup support for '@' in NRPE service check names. Change-Id: Ic0d634e619185931633712cb3e3685051a28749d Depends-On: I289b75a2935184817b424c5eceead16235c3f53b Closes-Bug: 1808140 --- ceph-radosgw/hooks/ceph.py | 38 +++- ceph-radosgw/hooks/ceph_radosgw_context.py | 8 +- .../contrib/storage/linux/ceph.py | 27 ++- ceph-radosgw/hooks/hooks.py | 168 ++++++++++-------- ceph-radosgw/hooks/utils.py | 49 ++++- ceph-radosgw/templates/ceph.conf | 10 +- ceph-radosgw/tests/basic_deployment.py | 24 --- ceph-radosgw/unit_tests/test_ceph.py | 8 +- .../unit_tests/test_ceph_radosgw_context.py | 18 +- .../unit_tests/test_ceph_radosgw_utils.py | 72 ++++++++ ceph-radosgw/unit_tests/test_hooks.py | 67 ++++--- 11 files changed, 344 insertions(+), 145 deletions(-) diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph.py index 41a2a531..f5ac5963 100644 --- a/ceph-radosgw/hooks/ceph.py +++ b/ceph-radosgw/hooks/ceph.py @@ -21,23 +21,51 @@ from charmhelpers.core.hookenv import ( config, ) + +from charmhelpers.core.host import ( + mkdir +) from charmhelpers.contrib.storage.linux.ceph import ( CephBrokerRq, ) -_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" +CEPH_DIR = '/etc/ceph' +CEPH_RADOSGW_DIR = '/var/lib/ceph/radosgw' +_radosgw_keyring = "keyring.rados.gateway" -def import_radosgw_key(key): - if not os.path.exists(_radosgw_keyring): +def import_radosgw_key(key, name=None): + if name: + keyring_path = os.path.join(CEPH_RADOSGW_DIR, + 'ceph-{}'.format(name), + 'keyring') + owner = group = 'ceph' + else: + keyring_path = os.path.join(CEPH_DIR, _radosgw_keyring) + owner = group = 'root' + + if not os.path.exists(keyring_path): + mkdir(path=os.path.dirname(keyring_path), + owner=owner, group=group, perms=0o750) cmd = [ 'ceph-authtool', - _radosgw_keyring, + keyring_path, '--create-keyring', - '--name=client.radosgw.gateway', + '--name=client.{}'.format( + name or 'radosgw.gateway' + ), '--add-key={}'.format(key) ] subprocess.check_call(cmd) + cmd = [ + 'chown', + '{}:{}'.format(owner, group), + keyring_path + ] + subprocess.check_call(cmd) + return True + + return False def get_create_rgw_pools_rq(prefix=None): diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 5c0654a1..aec22968 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -144,6 +144,10 @@ class MonContext(context.CephContext): def __call__(self): if not relation_ids('mon'): return {} + + host = socket.gethostname() + systemd_rgw = False + mon_hosts = [] auths = [] @@ -161,6 +165,8 @@ def __call__(self): ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr if ceph_addr: mon_hosts.append(ceph_addr) + if relation_get('rgw.{}_key'.format(host), rid=rid, unit=unit): + systemd_rgw = True if len(set(auths)) != 1: e = ("Inconsistent or absent auth returned by mon units. Setting " @@ -172,7 +178,6 @@ def __call__(self): # /etc/init.d/radosgw mandates that a dns name is used for this # parameter so ensure that address is resolvable - host = socket.gethostname() if config('prefer-ipv6'): ensure_host_resolvable_v6(host) @@ -186,6 +191,7 @@ def __call__(self): 'mon_hosts': ' '.join(mon_hosts), 'hostname': host, 'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0, + 'systemd_rgw': systemd_rgw, 'use_syslog': str(config('use-syslog')).lower(), 'loglevel': config('loglevel'), 'port': port, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 76828201..63c93044 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -856,12 +856,22 @@ def _keyring_path(service): return KEYRING.format(service) -def create_keyring(service, key): - """Create a new Ceph keyring containing key.""" +def add_key(service, key): + """ + Add a key to a keyring. + + Creates the keyring if it doesn't already exist. + + Logs and returns if the key is already in the keyring. + """ keyring = _keyring_path(service) if os.path.exists(keyring): - log('Ceph keyring exists at %s.' % keyring, level=WARNING) - return + with open(keyring, 'r') as ring: + if key in ring.read(): + log('Ceph keyring exists at %s and has not changed.' % keyring, + level=DEBUG) + return + log('Updating existing keyring %s.' % keyring, level=DEBUG) cmd = ['ceph-authtool', keyring, '--create-keyring', '--name=client.{}'.format(service), '--add-key={}'.format(key)] @@ -869,6 +879,11 @@ def create_keyring(service, key): log('Created new ceph keyring at %s.' % keyring, level=DEBUG) +def create_keyring(service, key): + """Deprecated. Please use the more accurately named 'add_key'""" + return add_key(service, key) + + def delete_keyring(service): """Delete an existing Ceph keyring.""" keyring = _keyring_path(service) @@ -905,7 +920,7 @@ def get_ceph_nodes(relation='ceph'): def configure(service, key, auth, use_syslog): """Perform basic configuration of Ceph.""" - create_keyring(service, key) + add_key(service, key) create_key_file(service, key) hosts = get_ceph_nodes() with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: @@ -1068,7 +1083,7 @@ def ensure_ceph_keyring(service, user=None, group=None, if not key: return False - create_keyring(service=service, key=key) + add_key(service=service, key=key) keyring = _keyring_path(service) if user and group: check_call(['chown', '%s.%s' % (user, group), keyring]) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 8481032d..8c6fef88 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -17,6 +17,7 @@ import os import subprocess import sys +import socket import ceph @@ -45,6 +46,9 @@ cmp_pkgrevno, is_container, service_reload, + service_restart, + service_stop, + service, ) from charmhelpers.contrib.network.ip import ( get_relation_ip, @@ -84,6 +88,10 @@ disable_unused_apache_sites, pause_unit_helper, resume_unit_helper, + restart_map, + service_name, + systemd_based_radosgw, + request_per_unit_key, ) from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden @@ -137,58 +145,83 @@ def install(): @hooks.hook('config-changed') -@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw'], - '/etc/haproxy/haproxy.cfg': ['haproxy']}) @harden() def config_changed(): - # if we are paused, delay doing any config changed hooks. - # It is forced on the resume. - if is_unit_paused_set(): - log("Unit is pause or upgrading. Skipping config_changed", "WARN") - return + @restart_on_change(restart_map()) + def _config_changed(): + # if we are paused, delay doing any config changed hooks. + # It is forced on the resume. + if is_unit_paused_set(): + log("Unit is pause or upgrading. Skipping config_changed", "WARN") + return - install_packages() - disable_unused_apache_sites() + install_packages() + disable_unused_apache_sites() + + if config('prefer-ipv6'): + status_set('maintenance', 'configuring ipv6') + setup_ipv6() - if config('prefer-ipv6'): - status_set('maintenance', 'configuring ipv6') - setup_ipv6() + for r_id in relation_ids('identity-service'): + identity_changed(relid=r_id) - for r_id in relation_ids('identity-service'): - identity_changed(relid=r_id) + for r_id in relation_ids('cluster'): + cluster_joined(rid=r_id) - for r_id in relation_ids('cluster'): - cluster_joined(rid=r_id) + # NOTE(jamespage): Re-exec mon relation for any changes to + # enable ceph pool permissions restrictions + for r_id in relation_ids('mon'): + for unit in related_units(r_id): + mon_relation(r_id, unit) - # NOTE(jamespage): Re-exec mon relation for any changes to - # enable ceph pool permissions restrictions - for r_id in relation_ids('mon'): - for unit in related_units(r_id): - mon_relation(r_id, unit) + CONFIGS.write_all() + configure_https() - CONFIGS.write_all() - configure_https() + update_nrpe_config() - update_nrpe_config() + open_port(port=config('port')) + _config_changed() @hooks.hook('mon-relation-departed', 'mon-relation-changed') -@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) def mon_relation(rid=None, unit=None): - rq = ceph.get_create_rgw_pools_rq( - prefix=config('pool-prefix')) - if is_request_complete(rq, relation='mon'): - log('Broker request complete', level=DEBUG) - CONFIGS.write_all() - key = relation_get(attribute='radosgw_key', - rid=rid, unit=unit) - if key: - ceph.import_radosgw_key(key) - if not is_unit_paused_set(): - restart() # TODO figure out a better way todo this - else: - send_request_if_needed(rq, relation='mon') + @restart_on_change(restart_map()) + def _mon_relation(): + key_name = 'rgw.{}'.format(socket.gethostname()) + if request_per_unit_key(): + relation_set(relation_id=rid, + key_name=key_name) + rq = ceph.get_create_rgw_pools_rq( + prefix=config('pool-prefix')) + if is_request_complete(rq, relation='mon'): + log('Broker request complete', level=DEBUG) + CONFIGS.write_all() + # New style per unit keys + key = relation_get(attribute='{}_key'.format(key_name), + rid=rid, unit=unit) + if not key: + # Fallback to old style global key + key = relation_get(attribute='radosgw_key', + rid=rid, unit=unit) + key_name = None + + if key: + new_keyring = ceph.import_radosgw_key(key, + name=key_name) + # NOTE(jamespage): + # Deal with switch from radosgw init script to + # systemd named units for radosgw instances by + # stopping and disabling the radosgw unit + if systemd_based_radosgw(): + service_stop('radosgw') + service('disable', 'radosgw') + if not is_unit_paused_set() and new_keyring: + service('enable', service_name()) + service_restart(service_name()) + else: + send_request_if_needed(rq, relation='mon') + _mon_relation() @hooks.hook('gateway-relation-joined') @@ -197,21 +230,6 @@ def gateway_relation(): port=config('port')) -def start(): - subprocess.call(['service', 'radosgw', 'start']) - open_port(port=config('port')) - - -def stop(): - subprocess.call(['service', 'radosgw', 'stop']) - open_port(port=config('port')) - - -def restart(): - subprocess.call(['service', 'radosgw', 'restart']) - open_port(port=config('port')) - - @hooks.hook('identity-service-relation-joined') def identity_joined(relid=None): if cmp_pkgrevno('radosgw', '0.55') < 0: @@ -233,38 +251,42 @@ def identity_joined(relid=None): @hooks.hook('identity-service-relation-changed') -@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']}) def identity_changed(relid=None): - identity_joined(relid) - CONFIGS.write_all() - if not is_unit_paused_set(): - restart() - configure_https() + @restart_on_change(restart_map()) + def _identity_changed(): + identity_joined(relid) + CONFIGS.write_all() + configure_https() + _identity_changed() @hooks.hook('cluster-relation-joined') -@restart_on_change({'/etc/haproxy/haproxy.cfg': ['haproxy']}) def cluster_joined(rid=None): - settings = {} + @restart_on_change(restart_map()) + def _cluster_joined(): + settings = {} - for addr_type in ADDRESS_TYPES: - address = get_relation_ip( - addr_type, - cidr_network=config('os-{}-network'.format(addr_type))) - if address: - settings['{}-address'.format(addr_type)] = address + for addr_type in ADDRESS_TYPES: + address = get_relation_ip( + addr_type, + cidr_network=config('os-{}-network'.format(addr_type))) + if address: + settings['{}-address'.format(addr_type)] = address - settings['private-address'] = get_relation_ip('cluster') + settings['private-address'] = get_relation_ip('cluster') - relation_set(relation_id=rid, relation_settings=settings) + relation_set(relation_id=rid, relation_settings=settings) + _cluster_joined() @hooks.hook('cluster-relation-changed') -@restart_on_change({'/etc/haproxy/haproxy.cfg': ['haproxy']}) def cluster_changed(): - CONFIGS.write_all() - for r_id in relation_ids('identity-service'): - identity_joined(relid=r_id) + @restart_on_change(restart_map()) + def _cluster_changed(): + CONFIGS.write_all() + for r_id in relation_ids('identity-service'): + identity_joined(relid=r_id) + _cluster_changed() @hooks.hook('ha-relation-joined') diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index c79e0022..b6622adc 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -14,6 +14,7 @@ import os import re +import socket import subprocess import sys @@ -57,6 +58,7 @@ lsb_release, mkdir, CompareHostReleases, + init_is_systemd, ) from charmhelpers.fetch import ( apt_cache, @@ -124,7 +126,7 @@ }), (CEPH_CONF, { 'contexts': [ceph_radosgw_context.MonContext()], - 'services': ['radosgw'], + 'services': [], }), (APACHE_SITE_CONF, { 'contexts': [ceph_radosgw_context.ApacheSSLContext()], @@ -152,14 +154,25 @@ def resource_map(): """ resource_map = deepcopy(BASE_RESOURCE_MAP) - if os.path.exists('/etc/apache2/conf-available'): + if not https(): resource_map.pop(APACHE_SITE_CONF) - else: resource_map.pop(APACHE_SITE_24_CONF) + else: + if os.path.exists('/etc/apache2/conf-available'): + resource_map.pop(APACHE_SITE_CONF) + else: + resource_map.pop(APACHE_SITE_24_CONF) + resource_map[CEPH_CONF]['services'] = [service_name()] return resource_map +def restart_map(): + return OrderedDict([(cfg, v['services']) + for cfg, v in resource_map().items() + if v['services']]) + + # Hardcoded to icehouse to enable use of charmhelper templating/context tools # Ideally these function would support non-OpenStack services def register_configs(release='icehouse'): @@ -180,12 +193,9 @@ def register_configs(release='icehouse'): def services(): """Returns a list of services associate with this charm.""" _services = [] - for v in BASE_RESOURCE_MAP.values(): + for v in resource_map().values(): _services.extend(v.get('services', [])) - _set_services = set(_services) - if not https(): - _set_services.remove('apache2') - return list(_set_services) + return list(set(_services)) def enable_pocket(pocket): @@ -560,3 +570,26 @@ def disable_unused_apache_sites(): with open(APACHE_PORTS_FILE, 'w') as ports: ports.write("") + + +def systemd_based_radosgw(): + """Determine if install should use systemd based radosgw instances""" + host = socket.gethostname() + for rid in relation_ids('mon'): + for unit in related_units(rid): + if relation_get('rgw.{}_key'.format(host), rid=rid, unit=unit): + return True + return False + + +def request_per_unit_key(): + """Determine if a per-unit cephx key should be requested""" + return (cmp_pkgrevno('radosgw', '12.2.0') >= 0 and init_is_systemd()) + + +def service_name(): + """Determine the name of the RADOS Gateway service""" + if systemd_based_radosgw(): + return 'ceph-radosgw@rgw.{}'.format(socket.gethostname()) + else: + return 'radosgw' diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index d89902e4..7b403a82 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -22,12 +22,18 @@ ms bind ipv6 = true {% endfor %} {% endif %} -[client.radosgw.gateway] +{% if systemd_rgw -%} +[client.rgw.{{ hostname }}] host = {{ hostname }} -rgw init timeout = 1200 +{% else -%} +[client.radosgw.gateway] keyring = /etc/ceph/keyring.rados.gateway +host = {{ hostname }} rgw socket path = /tmp/radosgw.sock log file = /var/log/ceph/radosgw.log +{% endif %} + +rgw init timeout = 1200 rgw frontends = civetweb port={{ port }} {% if auth_type == 'keystone' %} rgw keystone url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/ diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py index afd88839..b8de999b 100644 --- a/ceph-radosgw/tests/basic_deployment.py +++ b/ceph-radosgw/tests/basic_deployment.py @@ -329,7 +329,6 @@ def test_201_ceph_radosgw_relation(self): relation = ['radosgw', 'ceph-radosgw:mon'] expected = { 'private-address': u.valid_ip, - 'radosgw_key': u.not_null, 'auth': 'none', 'ceph-public-address': u.valid_ip, } @@ -394,10 +393,6 @@ def test_300_ceph_radosgw_config(self): u.log.debug('Checking ceph config file data...') unit = self.ceph_radosgw_sentry conf = '/etc/ceph/ceph.conf' - keystone_sentry = self.keystone_sentry - relation = keystone_sentry.relation('identity-service', - 'ceph-radosgw:identity-service') - keystone_ip = relation['auth_host'] expected = { 'global': { 'auth cluster required': 'none', @@ -407,26 +402,7 @@ def test_300_ceph_radosgw_config(self): 'err to syslog': 'false', 'clog to syslog': 'false' }, - 'client.radosgw.gateway': { - 'keyring': '/etc/ceph/keyring.rados.gateway', - 'rgw socket path': '/tmp/radosgw.sock', - 'log file': '/var/log/ceph/radosgw.log', - 'rgw keystone url': 'http://{}:35357/'.format(keystone_ip), - 'rgw keystone accepted roles': 'Member,Admin', - 'rgw keystone token cache size': '500', - 'rgw keystone revocation interval': '600', - 'rgw frontends': 'civetweb port=70', - }, } - if self._get_openstack_release() >= self.xenial_queens: - expected['client.radosgw.gateway']['rgw keystone admin domain'] = ( - 'service_domain') - (expected['client.radosgw.gateway'] - ['rgw keystone admin project']) = 'services' - else: - expected['client.radosgw.gateway']['rgw keystone admin token'] = ( - 'ubuntutesting') - for section, pairs in expected.items(): ret = u.validate_config_data(unit, conf, section, pairs) if ret: diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index 673dd72f..044d24e2 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -32,6 +32,7 @@ 'config', 'os', 'subprocess', + 'mkdir', ] @@ -42,6 +43,7 @@ def setUp(self): def test_import_radosgw_key(self): self.os.path.exists.return_value = False + self.os.path.join.return_value = '/etc/ceph/keyring.rados.gateway' ceph.import_radosgw_key('mykey') cmd = [ 'ceph-authtool', @@ -50,7 +52,11 @@ def test_import_radosgw_key(self): '--name=client.radosgw.gateway', '--add-key=mykey' ] - self.subprocess.check_call.assert_called_with(cmd) + self.subprocess.check_call.assert_has_calls([ + call(cmd), + call(['chown', 'root:root', + '/etc/ceph/keyring.rados.gateway']) + ]) @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' '.add_op_create_pool') diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 848e3c27..7bf051b2 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -29,7 +29,8 @@ 'cmp_pkgrevno', 'socket', 'unit_public_ip', - 'determine_api_port' + 'determine_api_port', + 'cmp_pkgrevno', ] @@ -312,6 +313,8 @@ def _relation_get(attr, unit, rid): return addresses.pop() elif attr == 'auth': return 'cephx' + elif attr == 'rgw.testhost_key': + return 'testkey' self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] @@ -322,6 +325,7 @@ def _relation_get(attr, unit, rid): 'hostname': 'testhost', 'mon_hosts': '10.5.4.1 10.5.4.2 10.5.4.3', 'old_auth': False, + 'systemd_rgw': True, 'unit_public_ip': '10.255.255.255', 'use_syslog': 'false', 'loglevel': 1, @@ -346,12 +350,15 @@ def test_list_of_addresses_from_ceph_proxy(self, mock_ensure_rsv_v6): self.socket.gethostname.return_value = 'testhost' mon_ctxt = context.MonContext() addresses = ['10.5.4.1 10.5.4.2 10.5.4.3'] + self.cmp_pkgrevno.return_value = 1 def _relation_get(attr, unit, rid): if attr == 'ceph-public-address': return addresses.pop() elif attr == 'auth': return 'cephx' + elif attr == 'rgw.testhost_key': + return 'testkey' self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] @@ -362,6 +369,7 @@ def _relation_get(attr, unit, rid): 'hostname': 'testhost', 'mon_hosts': '10.5.4.1 10.5.4.2 10.5.4.3', 'old_auth': False, + 'systemd_rgw': True, 'unit_public_ip': '10.255.255.255', 'use_syslog': 'false', 'loglevel': 1, @@ -402,6 +410,9 @@ def _relation_get(attr, unit, rid): return addresses.pop() elif attr == 'auth': return auths.pop() + elif attr == 'rgw.testhost_key': + return 'testkey' + self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] @@ -411,6 +422,7 @@ def _relation_get(attr, unit, rid): 'hostname': 'testhost', 'mon_hosts': '10.5.4.1 10.5.4.2 10.5.4.3', 'old_auth': False, + 'systemd_rgw': True, 'unit_public_ip': '10.255.255.255', 'use_syslog': 'false', 'loglevel': 1, @@ -433,6 +445,9 @@ def _relation_get(attr, unit, rid): return addresses.pop() elif attr == 'auth': return auths.pop() + elif attr == 'rgw.testhost_key': + return 'testkey' + self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] @@ -442,6 +457,7 @@ def _relation_get(attr, unit, rid): 'hostname': 'testhost', 'mon_hosts': '10.5.4.1 10.5.4.2 10.5.4.3', 'old_auth': False, + 'systemd_rgw': True, 'unit_public_ip': '10.255.255.255', 'use_syslog': 'false', 'loglevel': 1, diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index 8df4214d..fcb1dd98 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -28,6 +28,12 @@ 'get_upstream_version', 'format_endpoint', 'https', + 'relation_ids', + 'relation_get', + 'related_units', + 'socket', + 'cmp_pkgrevno', + 'init_is_systemd', ] @@ -35,6 +41,7 @@ class CephRadosGWUtilTests(CharmTestCase): def setUp(self): super(CephRadosGWUtilTests, self).setUp(utils, TO_PATCH) self.get_upstream_version.return_value = '10.2.2' + self.socket.gethostname.return_value = 'testhost' def test_assess_status(self): with patch.object(utils, 'assess_status_func') as asf: @@ -219,3 +226,68 @@ def test_get_ks_ca_cert(self, mock_check_output, mock_Popen, c = ['openssl', 'x509', '-in', '/foo/bar/ca.pem', '-pubkey'] mock_check_output.assert_called_with(c) + + def _setup_relation_data(self, data): + self.relation_ids.return_value = data.keys() + self.related_units.side_effect = ( + lambda rid: data[rid].keys() + ) + self.relation_get.side_effect = ( + lambda attr, rid, unit: data[rid][unit].get(attr) + ) + + def test_systemd_based_radosgw_old_style(self): + _relation_data = { + 'mon:1': { + 'ceph-mon/0': { + 'radosgw_key': 'testkey', + }, + 'ceph-mon/1': { + 'radosgw_key': 'testkey', + }, + 'ceph-mon/2': { + 'radosgw_key': 'testkey', + }, + } + } + self._setup_relation_data(_relation_data) + self.assertFalse(utils.systemd_based_radosgw()) + + def test_systemd_based_radosgw_new_style(self): + _relation_data = { + 'mon:1': { + 'ceph-mon/0': { + 'rgw.testhost_key': 'testkey', + }, + 'ceph-mon/1': { + 'rgw.testhost_key': 'testkey', + }, + 'ceph-mon/2': { + 'rgw.testhost_key': 'testkey', + }, + } + } + self._setup_relation_data(_relation_data) + self.assertTrue(utils.systemd_based_radosgw()) + + def test_request_per_unit_key(self): + self.init_is_systemd.return_value = False + self.cmp_pkgrevno.return_value = -1 + self.assertFalse(utils.request_per_unit_key()) + self.init_is_systemd.return_value = True + self.cmp_pkgrevno.return_value = 1 + self.assertTrue(utils.request_per_unit_key()) + self.init_is_systemd.return_value = False + self.cmp_pkgrevno.return_value = 1 + self.assertFalse(utils.request_per_unit_key()) + + self.cmp_pkgrevno.assert_called_with('radosgw', '12.2.0') + + @patch.object(utils, 'systemd_based_radosgw') + def test_service_name(self, mock_systemd_based_radosgw): + mock_systemd_based_radosgw.return_value = True + self.assertEqual(utils.service_name(), + 'ceph-radosgw@rgw.testhost') + mock_systemd_based_radosgw.return_value = False + self.assertEqual(utils.service_name(), + 'radosgw') diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 121f06cc..5c65ab30 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -55,7 +55,15 @@ 'get_relation_ip', 'disable_unused_apache_sites', 'service_reload', + 'service_stop', + 'service_restart', + 'service', 'setup_keystone_certs', + 'service_name', + 'socket', + 'restart_map', + 'systemd_based_radosgw', + 'request_per_unit_key', ] @@ -68,6 +76,9 @@ def setUp(self): self.test_config.set('key', 'secretkey') self.test_config.set('use-syslog', False) self.cmp_pkgrevno.return_value = 0 + self.service_name.return_value = 'radosgw' + self.request_per_unit_key.return_value = False + self.systemd_based_radosgw.return_value = False def test_install_packages(self): ceph_hooks.install_packages() @@ -95,22 +106,46 @@ def test_config_changed(self, update_nrpe_config): lambda *args, **kwargs: True) def test_mon_relation(self): _ceph = self.patch('ceph') - _restart = self.patch('restart') + _ceph.import_radosgw_key.return_value = True self.relation_get.return_value = 'seckey' + self.socket.gethostname.return_value = 'testinghostname' ceph_hooks.mon_relation() - self.assertTrue(_restart.called) - _ceph.import_radosgw_key.assert_called_with('seckey') + self.relation_set.assert_not_called() + self.service_restart.assert_called_once_with('radosgw') + self.service.assert_called_once_with('enable', 'radosgw') + _ceph.import_radosgw_key.assert_called_with('seckey', + name='rgw.testinghostname') + self.CONFIGS.write_all.assert_called_with() + + @patch.object(ceph_hooks, 'is_request_complete', + lambda *args, **kwargs: True) + def test_mon_relation_request_key(self): + _ceph = self.patch('ceph') + _ceph.import_radosgw_key.return_value = True + self.relation_get.return_value = 'seckey' + self.socket.gethostname.return_value = 'testinghostname' + self.request_per_unit_key.return_value = True + ceph_hooks.mon_relation() + self.relation_set.assert_called_with( + relation_id=None, + key_name='rgw.testinghostname' + ) + self.service_restart.assert_called_once_with('radosgw') + self.service.assert_called_once_with('enable', 'radosgw') + _ceph.import_radosgw_key.assert_called_with('seckey', + name='rgw.testinghostname') self.CONFIGS.write_all.assert_called_with() @patch.object(ceph_hooks, 'is_request_complete', lambda *args, **kwargs: True) def test_mon_relation_nokey(self): _ceph = self.patch('ceph') - _restart = self.patch('restart') + _ceph.import_radosgw_key.return_value = False self.relation_get.return_value = None ceph_hooks.mon_relation() self.assertFalse(_ceph.import_radosgw_key.called) - self.assertFalse(_restart.called) + self.service_restart.assert_not_called() + self.service.assert_not_called() self.CONFIGS.write_all.assert_called_with() @patch.object(ceph_hooks, 'send_request_if_needed') @@ -119,10 +154,11 @@ def test_mon_relation_nokey(self): def test_mon_relation_send_broker_request(self, mock_send_request_if_needed): _ceph = self.patch('ceph') - _restart = self.patch('restart') + _ceph.import_radosgw_key.return_value = False self.relation_get.return_value = 'seckey' ceph_hooks.mon_relation() - self.assertFalse(_restart.called) + self.service_restart.assert_not_called() + self.service.assert_not_called() self.assertFalse(_ceph.import_radosgw_key.called) self.assertFalse(self.CONFIGS.called) self.assertTrue(mock_send_request_if_needed.called) @@ -132,21 +168,6 @@ def test_gateway_relation(self): ceph_hooks.gateway_relation() self.relation_set.assert_called_with(hostname='10.0.0.1', port=80) - def test_start(self): - ceph_hooks.start() - cmd = ['service', 'radosgw', 'start'] - self.subprocess.call.assert_called_with(cmd) - - def test_stop(self): - ceph_hooks.stop() - cmd = ['service', 'radosgw', 'stop'] - self.subprocess.call.assert_called_with(cmd) - - def test_restart(self): - ceph_hooks.restart() - cmd = ['service', 'radosgw', 'restart'] - self.subprocess.call.assert_called_with(cmd) - @patch('charmhelpers.contrib.openstack.ip.service_name', lambda *args: 'ceph-radosgw') @patch('charmhelpers.contrib.openstack.ip.config') @@ -200,10 +221,8 @@ def test_identity_joined_public_name(self, _config, _unit_get, @patch.object(ceph_hooks, 'identity_joined') def test_identity_changed(self, mock_identity_joined): - _restart = self.patch('restart') ceph_hooks.identity_changed() self.CONFIGS.write_all.assert_called_with() - self.assertTrue(_restart.called) self.assertTrue(mock_identity_joined.called) @patch('charmhelpers.contrib.openstack.ip.is_clustered') From 44e8ec860b1d3ea956d0c53a38714ee45b461e7f Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 25 Jan 2019 10:45:51 +0000 Subject: [PATCH 1645/2699] Use OpenStack HA charmhelper for ha configuration Use the new OpenStack HA charmhelper to generate required data to pass to the hacluster charm when running in clustered deployments. This also makes the switch to iface-less configuration of VIP resources resolving issues in deployments where LXD containers don't have consistent interface ordering across the application. Change-Id: Ie0ca7fb0221cb6c3f886161e1b446d4fae5775a9 --- ceph-radosgw/hooks/hooks.py | 80 +++------------------------ ceph-radosgw/unit_tests/test_hooks.py | 71 +++--------------------- 2 files changed, 16 insertions(+), 135 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 8c6fef88..6c37e534 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -30,7 +30,6 @@ relation_set, log, DEBUG, - WARNING, Hooks, UnregisteredHookError, status_set, ) @@ -52,9 +51,6 @@ ) from charmhelpers.contrib.network.ip import ( get_relation_ip, - get_iface_for_address, - get_netmask_for_address, - is_ipv6, ) from charmhelpers.contrib.openstack.context import ADDRESS_TYPES from charmhelpers.contrib.openstack.ip import ( @@ -71,15 +67,11 @@ series_upgrade_prepare, series_upgrade_complete, ) -from charmhelpers.contrib.hahelpers.cluster import ( - get_hacluster_config, -) from charmhelpers.contrib.openstack.ha.utils import ( - update_dns_ha_resource_params, + generate_ha_relation_data, ) from utils import ( enable_pocket, - CEPHRG_HA_RES, register_configs, setup_ipv6, services, @@ -174,6 +166,11 @@ def _config_changed(): for unit in related_units(r_id): mon_relation(r_id, unit) + # Re-trigger hacluster relations to switch to ifaceless + # vip configuration + for r_id in relation_ids('ha'): + ha_relation_joined(r_id) + CONFIGS.write_all() configure_https() @@ -291,69 +288,8 @@ def _cluster_changed(): @hooks.hook('ha-relation-joined') def ha_relation_joined(relation_id=None): - cluster_config = get_hacluster_config() - # Obtain resources - resources = { - 'res_cephrg_haproxy': 'lsb:haproxy' - } - resource_params = { - 'res_cephrg_haproxy': 'op monitor interval="5s"' - } - - if config('dns-ha'): - update_dns_ha_resource_params(relation_id=relation_id, - resources=resources, - resource_params=resource_params) - else: - vip_group = [] - for vip in cluster_config['vip'].split(): - if is_ipv6(vip): - res_rgw_vip = 'ocf:heartbeat:IPv6addr' - vip_params = 'ipv6addr' - else: - res_rgw_vip = 'ocf:heartbeat:IPaddr2' - vip_params = 'ip' - - iface = get_iface_for_address(vip) - netmask = get_netmask_for_address(vip) - - if iface is not None: - vip_key = 'res_cephrg_{}_vip'.format(iface) - if vip_key in vip_group: - if vip not in resource_params[vip_key]: - vip_key = '{}_{}'.format(vip_key, vip_params) - else: - log("Resource '%s' (vip='%s') already exists in " - "vip group - skipping" % (vip_key, vip), WARNING) - continue - - resources[vip_key] = res_rgw_vip - resource_params[vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}"' - ' nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask) - ) - vip_group.append(vip_key) - - if len(vip_group) >= 1: - relation_set(groups={CEPHRG_HA_RES: ' '.join(vip_group)}) - - init_services = { - 'res_cephrg_haproxy': 'haproxy' - } - clones = { - 'cl_cephrg_haproxy': 'res_cephrg_haproxy' - } - - relation_set(relation_id=relation_id, - init_services=init_services, - corosync_bindiface=cluster_config['ha-bindiface'], - corosync_mcastport=cluster_config['ha-mcastport'], - resources=resources, - resource_params=resource_params, - clones=clones) + settings = generate_ha_relation_data('cephrg') + relation_set(relation_id=relation_id, **settings) @hooks.hook('ha-relation-changed') diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 5c65ab30..e01abadc 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -38,8 +38,6 @@ 'cmp_pkgrevno', 'execd_preinstall', 'enable_pocket', - 'get_iface_for_address', - 'get_netmask_for_address', 'log', 'open_port', 'os', @@ -50,8 +48,7 @@ 'status_set', 'subprocess', 'sys', - 'get_hacluster_config', - 'update_dns_ha_resource_params', + 'generate_ha_relation_data', 'get_relation_ip', 'disable_unused_apache_sites', 'service_reload', @@ -261,67 +258,15 @@ def test_cluster_changed(self): self.CONFIGS.write_all.assert_called_with() _id_joined.assert_called_with(relid='rid') - def test_ha_relation_joined_vip(self): - self.test_config.set('ha-bindiface', 'eth8') - self.test_config.set('ha-mcastport', '5000') - self.test_config.set('vip', '10.0.0.10') - self.get_hacluster_config.return_value = { - 'vip': '10.0.0.10', - 'ha-bindiface': 'eth8', - 'ha-mcastport': '5000', + def test_ha_relation_joined(self): + self.generate_ha_relation_data.return_value = { + 'test': 'data' } - self.get_iface_for_address.return_value = 'eth7' - self.get_netmask_for_address.return_value = '255.255.0.0' - ceph_hooks.ha_relation_joined() - eth_params = ('params ip="10.0.0.10" cidr_netmask="255.255.0.0" ' - 'nic="eth7"') - resources = {'res_cephrg_haproxy': 'lsb:haproxy', - 'res_cephrg_eth7_vip': 'ocf:heartbeat:IPaddr2'} - resource_params = {'res_cephrg_haproxy': 'op monitor interval="5s"', - 'res_cephrg_eth7_vip': eth_params} + ceph_hooks.ha_relation_joined(relation_id='ha:1') self.relation_set.assert_called_with( - relation_id=None, - init_services={'res_cephrg_haproxy': 'haproxy'}, - corosync_bindiface='eth8', - corosync_mcastport='5000', - resource_params=resource_params, - resources=resources, - clones={'cl_cephrg_haproxy': 'res_cephrg_haproxy'}) - - def test_ha_joined_dns_ha(self): - def _fake_update(resources, resource_params, relation_id=None): - resources.update({'res_cephrg_public_hostname': 'ocf:maas:dns'}) - resource_params.update({'res_cephrg_public_hostname': - 'params fqdn="keystone.maas" ' - 'ip_address="10.0.0.1"'}) - - self.test_config.set('dns-ha', True) - self.get_hacluster_config.return_value = { - 'vip': None, - 'ha-bindiface': 'em0', - 'ha-mcastport': '8080', - 'os-admin-hostname': None, - 'os-internal-hostname': None, - 'os-public-hostname': 'keystone.maas', - } - args = { - 'relation_id': None, - 'corosync_bindiface': 'em0', - 'corosync_mcastport': '8080', - 'init_services': {'res_cephrg_haproxy': 'haproxy'}, - 'resources': {'res_cephrg_public_hostname': 'ocf:maas:dns', - 'res_cephrg_haproxy': 'lsb:haproxy'}, - 'resource_params': { - 'res_cephrg_public_hostname': 'params fqdn="keystone.maas" ' - 'ip_address="10.0.0.1"', - 'res_cephrg_haproxy': 'op monitor interval="5s"'}, - 'clones': {'cl_cephrg_haproxy': 'res_cephrg_haproxy'} - } - self.update_dns_ha_resource_params.side_effect = _fake_update - - ceph_hooks.ha_relation_joined() - self.assertTrue(self.update_dns_ha_resource_params.called) - self.relation_set.assert_called_with(**args) + relation_id='ha:1', + test='data' + ) def test_ha_relation_changed(self): _id_joined = self.patch('identity_joined') From 167a1be16240535929da8f0b0f6ea196c3c5d0d3 Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Tue, 11 Dec 2018 12:25:35 -0300 Subject: [PATCH 1646/2699] Update cloud-archive.list when upgrading from Pike to Queens ceph-mon charm only upgrades when the ceph version changes, for the case of upgrading from Pike to Queens the charm is skipping any upgrades, because the Cloud Archive has Luminous for those 2 releases. This patch checks if the requested ceph version is luminous and if the 'source' changed from pike to queens to then upgrade /etc/apt/sources.list.d/cloud-archive.list via add_source() Change-Id: I05b7d722e45d3a02a97866903a67bd9b16d4f552 Closes-Bug: 1778823 --- ceph-mon/hooks/ceph_hooks.py | 19 +++++++- ceph-mon/lib/ceph/utils.py | 1 + ceph-mon/unit_tests/test_ceph_hooks.py | 5 +- ceph-mon/unit_tests/test_upgrade.py | 65 ++++++++++++++++++-------- ceph-mon/unit_tests/test_utils.py | 19 ++++++-- 5 files changed, 84 insertions(+), 25 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 8b70ce71..df438832 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -30,6 +30,8 @@ from charmhelpers.core.hookenv import ( log, DEBUG, + ERROR, + INFO, config, relation_ids, related_units, @@ -64,6 +66,7 @@ from charmhelpers.contrib.openstack.utils import ( clear_unit_paused, clear_unit_upgrading, + get_os_codename_install_source, is_unit_upgrading_set, set_unit_paused, set_unit_upgrading, @@ -105,6 +108,11 @@ def check_for_upgrade(): log('old_version: {}'.format(old_version)) # Strip all whitespace new_version = ceph.resolve_ceph_version(hookenv.config('source')) + + old_version_os = get_os_codename_install_source(c.previous('source') or + 'distro') + new_version_os = get_os_codename_install_source(hookenv.config('source')) + log('new_version: {}'.format(new_version)) if (old_version in ceph.UPGRADE_PATHS and @@ -113,12 +121,21 @@ def check_for_upgrade(): old_version, new_version)) ceph.roll_monitor_cluster(new_version=new_version, upgrade_key='admin') + elif (old_version == new_version and + old_version_os < new_version_os): + # See LP: #1778823 + add_source(hookenv.config('source'), hookenv.config('key')) + log(("The installation source has changed yet there is no new major " + "version of Ceph in this new source. As a result no package " + "upgrade will take effect. Please upgrade manually if you need " + "to."), level=INFO) else: # Log a helpful error message log("Invalid upgrade path from {} to {}. " "Valid paths are: {}".format(old_version, new_version, - ceph.pretty_print_upgrade_paths())) + ceph.pretty_print_upgrade_paths()), + level=ERROR) @hooks.hook('install.real') diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 4fbf0fbc..a1cfbdc6 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -2564,6 +2564,7 @@ def dirs_need_ownership_update(service): 'pike': 'luminous', 'queens': 'luminous', 'rocky': 'mimic', + 'stein': 'mimic', } diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 0a8b4393..a7fb43af 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -187,6 +187,7 @@ def test_nrpe_dependency_installed(self, mock_config): mocks["apt_install"].assert_called_once_with( ["python-dbus", "lockfile-progs"]) + @patch.object(ceph_hooks, 'service_pause') @patch.object(ceph_hooks, 'notify_radosgws') @patch.object(ceph_hooks, 'ceph') @patch.object(ceph_hooks, 'notify_client') @@ -196,7 +197,8 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies( mock_config, mock_notify_client, mock_ceph, - mock_notify_radosgws): + mock_notify_radosgws, + mock_service_pause): config = copy.deepcopy(CHARM_CONFIG) mock_config.side_effect = lambda key: config[key] with patch.multiple( @@ -217,6 +219,7 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies( mock_notify_client.assert_called_once_with() mock_notify_radosgws.assert_called_once_with() mock_ceph.update_monfs.assert_called_once_with() + mock_service_pause.assert_called_with('ceph-create-keys') class RelatedUnitsTestCase(unittest.TestCase): diff --git a/ceph-mon/unit_tests/test_upgrade.py b/ceph-mon/unit_tests/test_upgrade.py index f860f61e..f784f7cb 100644 --- a/ceph-mon/unit_tests/test_upgrade.py +++ b/ceph-mon/unit_tests/test_upgrade.py @@ -1,9 +1,9 @@ -import unittest +from mock import patch +from ceph_hooks import check_for_upgrade +from test_utils import CharmTestCase -__author__ = 'Chris Holcombe ' -from mock import patch, MagicMock -from ceph_hooks import check_for_upgrade +__author__ = 'Chris Holcombe ' def config_side_effect(*args): @@ -15,20 +15,17 @@ def config_side_effect(*args): return 'cloud:trusty-kilo' -class UpgradeRollingTestCase(unittest.TestCase): +class UpgradeRollingTestCase(CharmTestCase): @patch('ceph_hooks.ceph.is_bootstrapped') - @patch('ceph_hooks.ceph.resolve_ceph_version') @patch('ceph_hooks.hookenv') @patch('ceph_hooks.ceph.roll_monitor_cluster') def test_check_for_upgrade(self, roll_monitor_cluster, hookenv, - version, is_bootstrapped): + is_bootstrapped): is_bootstrapped.return_value = True - version.side_effect = ['firefly', 'hammer'] - previous_mock = MagicMock().return_value - previous_mock.previous.return_value = "cloud:trusty-juno" - hookenv.config.side_effect = [previous_mock, - config_side_effect('source')] + self.test_config.set_previous('source', 'cloud:trusty-juno') + self.test_config.set('source', 'cloud:trusty-kilo') + hookenv.config.side_effect = self.test_config check_for_upgrade() roll_monitor_cluster.assert_called_with( @@ -36,18 +33,46 @@ def test_check_for_upgrade(self, roll_monitor_cluster, hookenv, upgrade_key='admin') @patch('ceph_hooks.ceph.is_bootstrapped') - @patch('ceph_hooks.ceph.resolve_ceph_version') @patch('ceph_hooks.hookenv') @patch('ceph_hooks.ceph.roll_monitor_cluster') def test_check_for_upgrade_not_bootstrapped(self, roll_monitor_cluster, - hookenv, - version, is_bootstrapped): + hookenv, is_bootstrapped): is_bootstrapped.return_value = False - version.side_effect = ['firefly', 'hammer'] - previous_mock = MagicMock().return_value - previous_mock.previous.return_value = "cloud:trusty-juno" - hookenv.config.side_effect = [previous_mock, - config_side_effect('source')] + self.test_config.set_previous('source', 'cloud:trusty-juno') + self.test_config.set('source', 'cloud:trusty-kilo') + hookenv.config.side_effect = self.test_config + check_for_upgrade() + + roll_monitor_cluster.assert_not_called() + + @patch('ceph_hooks.add_source') + @patch('ceph_hooks.ceph.is_bootstrapped') + @patch('ceph_hooks.hookenv') + @patch('ceph_hooks.ceph.roll_monitor_cluster') + def test_check_for_upgrade_from_pike_to_queens(self, roll_monitor_cluster, + hookenv, is_bootstrapped, + add_source): + is_bootstrapped.return_value = True + hookenv.config.side_effect = self.test_config + self.test_config.set('key', 'some-key') + self.test_config.set_previous('source', 'cloud:xenial-pike') + self.test_config.set('source', 'cloud:xenial-queens') check_for_upgrade() + roll_monitor_cluster.assert_not_called() + add_source.assert_called_with('cloud:xenial-queens', 'some-key') + @patch('ceph_hooks.add_source') + @patch('ceph_hooks.ceph.is_bootstrapped') + @patch('ceph_hooks.hookenv') + @patch('ceph_hooks.ceph.roll_monitor_cluster') + def test_check_for_upgrade_from_rocky_to_stein(self, roll_monitor_cluster, + hookenv, is_bootstrapped, + add_source): + is_bootstrapped.return_value = True + hookenv.config.side_effect = self.test_config + self.test_config.set('key', 'some-key') + self.test_config.set_previous('source', 'cloud:bionic-rocky') + self.test_config.set('source', 'cloud:bionic-stein') + check_for_upgrade() roll_monitor_cluster.assert_not_called() + add_source.assert_called_with('cloud:bionic-stein', 'some-key') diff --git a/ceph-mon/unit_tests/test_utils.py b/ceph-mon/unit_tests/test_utils.py index 8539d8ec..83fe5ae2 100644 --- a/ceph-mon/unit_tests/test_utils.py +++ b/ceph-mon/unit_tests/test_utils.py @@ -59,10 +59,10 @@ def get_default_config(): class CharmTestCase(unittest.TestCase): - def setUp(self, obj, patches): + def setUp(self, obj=None, patches=None): super(CharmTestCase, self).setUp() - self.patches = patches - self.obj = obj + self.patches = patches or [] + self.obj = obj or [] self.test_config = TestConfig() self.test_relation = TestRelation() self.test_leader_settings = TestLeaderSettings() @@ -85,6 +85,13 @@ def __init__(self): self.config = get_default_config() self.config_changed = {} self.config_changed.setdefault(False) + self._previous = get_default_config() + + def __call__(self, key=None): + if key: + return self[key] + else: + return self def get(self, attr=None): if not attr: @@ -113,6 +120,12 @@ def changed(self, attr): def set_changed(self, attr, changed=True): self.config_changed[attr] = changed + def set_previous(self, key, value): + self._previous[key] = value + + def previous(self, key): + return self._previous[key] + class TestRelation(object): From 329d4fa87fa1c3ef37a696e2567fe38f7fd7a60d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 30 Jan 2019 15:54:56 +0100 Subject: [PATCH 1647/2699] Update source/packages if config changes Also, this change removes the harden decorators as the required configuration for harden is _not_ present in the charm config, rendering it useless. Change-Id: I20c124d9588b8fd6c0e6611725a848eaf892f6af Closes-Bug: #1812219 --- ceph-proxy/hooks/ceph_hooks.py | 15 ++++++----- ceph-proxy/unit_tests/test_ceph_hooks.py | 34 ++++++++++++++++++++++-- 2 files changed, 41 insertions(+), 8 deletions(-) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 202f6541..6347e0e9 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -53,7 +53,6 @@ ) from utils import get_unit_hostname -from charmhelpers.contrib.hardening.harden import harden hooks = Hooks() @@ -66,17 +65,19 @@ def install_upstart_scripts(): @hooks.hook('install.real') -@harden() def install(): execd_preinstall() + package_install() + install_upstart_scripts() + + +def package_install(): add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.PACKAGES, fatal=True) - install_upstart_scripts() def emit_cephconf(): - cephcontext = { 'auth_supported': config('auth-supported'), 'mon_hosts': config('monitor-hosts'), @@ -117,8 +118,11 @@ def emit_cephconf(): @hooks.hook('config-changed') -@harden() def config_changed(): + c = config() + if c.previous('source') != config('source') or \ + c.previous('key') != config('key'): + package_install() emit_cephconf() @@ -237,7 +241,6 @@ def assess_status(): @hooks.hook('update-status') -@harden() def update_status(): log('Updating status.') diff --git a/ceph-proxy/unit_tests/test_ceph_hooks.py b/ceph-proxy/unit_tests/test_ceph_hooks.py index 4f655962..0b394cf2 100644 --- a/ceph-proxy/unit_tests/test_ceph_hooks.py +++ b/ceph-proxy/unit_tests/test_ceph_hooks.py @@ -51,8 +51,8 @@ def setUp(self): self.log.side_effect = fake_log @mock.patch('subprocess.check_output') - def test_radosgw_realtion(self, mock_check_output): - + @mock.patch('ceph_hooks.apt_install') + def test_radosgw_relation(self, mock_apt_install, mock_check_output): settings = {'ceph-public-address': '127.0.0.1:1234 [::1]:4321', 'radosgw_key': CEPH_KEY, 'auth': 'cephx', @@ -66,6 +66,7 @@ def test_radosgw_realtion(self, mock_check_output): hooks.radosgw_relation() self.relation_set.assert_called_with(relation_id=None, relation_settings=settings) + mock_apt_install.assert_called_with(packages=[]) @mock.patch('ceph.ceph_user') @mock.patch.object(hooks, 'radosgw_relation') @@ -136,3 +137,32 @@ def test_client_relation_joined(self, mock_check_output): self.relation_set.assert_called_with(relation_id='client:1', relation_settings=data) + + @mock.patch('ceph_hooks.emit_cephconf') + @mock.patch('ceph_hooks.package_install') + def test_config_get_skips_package_update(self, + mock_package_install, + mock_emit_cephconf): + previous_test_config = test_utils.TestConfig() + previous_test_config.set('source', 'distro') + previous_test_config.set('key', '') + previous = mock.MagicMock().return_value + previous.previous.side_effect = lambda x: previous_test_config.get(x) + self.config.side_effect = [previous, "distro", ""] + hooks.config_changed() + mock_package_install.assert_not_called() + mock_emit_cephconf.assert_any_call() + + @mock.patch('ceph_hooks.emit_cephconf') + @mock.patch('ceph_hooks.package_install') + def test_update_apt_source(self, mock_package_install, mock_emit_cephconf): + + previous_test_config = test_utils.TestConfig() + previous_test_config.set('source', 'distro') + previous_test_config.set('key', '') + previous = mock.MagicMock().return_value + previous.previous.side_effect = lambda x: previous_test_config.get(x) + self.config.side_effect = [previous, "cloud:cosmic-mimic", ""] + hooks.config_changed() + mock_package_install.assert_called_with() + mock_emit_cephconf.assert_called_with() From d33c185723c904c80f6aa050657b6160f1c21e6c Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Fri, 1 Feb 2019 11:39:27 -0300 Subject: [PATCH 1648/2699] Update cloud-archive.list when upgrading from Pike to Queens ceph-osd charm only upgrades when the ceph version changes, for the case of upgrading from Pike to Queens the charm is skipping any upgrades, because the Cloud Archive has Luminous for those 2 releases. This patch checks if the requested ceph version is luminous and if the 'source' changed from pike to queens to then upgrade /etc/apt/sources.list.d/cloud-archive.list via add_source() Change-Id: I27c2d7648a1add6528924724a03682060d2d6007 Closes-Bug: 1778823 --- ceph-osd/hooks/ceph_hooks.py | 21 ++++++-- ceph-osd/lib/ceph/utils.py | 1 + ceph-osd/unit_tests/test_upgrade.py | 82 +++++++++++++++++++++-------- ceph-osd/unit_tests/test_utils.py | 30 +++++++++-- 4 files changed, 104 insertions(+), 30 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 7828613f..21885dcc 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -99,6 +99,7 @@ from charmhelpers.contrib.openstack.utils import ( clear_unit_paused, clear_unit_upgrading, + get_os_codename_install_source, is_unit_paused_set, is_unit_upgrading_set, set_unit_paused, @@ -117,6 +118,7 @@ def check_for_upgrade(): + if not os.path.exists(ceph._upgrade_keyring): log("Ceph upgrade keyring not detected, skipping upgrade checks.") return @@ -129,14 +131,14 @@ def check_for_upgrade(): 'distro') log('new_version: {}'.format(new_version)) + old_version_os = get_os_codename_install_source(c.previous('source') or + 'distro') + new_version_os = get_os_codename_install_source(hookenv.config('source')) + # May be in a previous upgrade that was failed if the directories # still need an ownership update. Check this condition. resuming_upgrade = ceph.dirs_need_ownership_update('osd') - if old_version == new_version and not resuming_upgrade: - log("No new ceph version detected, skipping upgrade.", DEBUG) - return - if (ceph.UPGRADE_PATHS.get(old_version) == new_version) or\ resuming_upgrade: if old_version == new_version: @@ -150,12 +152,21 @@ def check_for_upgrade(): ceph.roll_osd_cluster(new_version=new_version, upgrade_key='osd-upgrade') emit_cephconf(upgrading=False) + elif (old_version == new_version and + old_version_os < new_version_os): + # See LP: #1778823 + add_source(hookenv.config('source'), hookenv.config('key')) + log(("The installation source has changed yet there is no new major " + "version of Ceph in this new source. As a result no package " + "upgrade will take effect. Please upgrade manually if you need " + "to."), level=INFO) else: # Log a helpful error message log("Invalid upgrade path from {} to {}. " "Valid paths are: {}".format(old_version, new_version, - ceph.pretty_print_upgrade_paths())) + ceph.pretty_print_upgrade_paths()), + level=ERROR) def tune_network_adapters(): diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 08e627c4..8d06328f 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -2563,6 +2563,7 @@ def dirs_need_ownership_update(service): 'pike': 'luminous', 'queens': 'luminous', 'rocky': 'mimic', + 'stein': 'mimic', } diff --git a/ceph-osd/unit_tests/test_upgrade.py b/ceph-osd/unit_tests/test_upgrade.py index ad876214..fa47ff71 100644 --- a/ceph-osd/unit_tests/test_upgrade.py +++ b/ceph-osd/unit_tests/test_upgrade.py @@ -1,22 +1,12 @@ -import unittest - -__author__ = 'Chris Holcombe ' - -from mock import call, patch, MagicMock - +from mock import call, patch +from test_utils import CharmTestCase from ceph_hooks import check_for_upgrade -def config_side_effect(*args): - if args[0] == 'source': - return 'cloud:trusty-kilo' - elif args[0] == 'key': - return 'key' - elif args[0] == 'release-version': - return 'cloud:trusty-kilo' +__author__ = 'Chris Holcombe ' -class UpgradeRollingTestCase(unittest.TestCase): +class UpgradeRollingTestCase(CharmTestCase): @patch('ceph_hooks.ceph.dirs_need_ownership_update') @patch('ceph_hooks.os.path.exists') @@ -30,10 +20,12 @@ def test_check_for_upgrade(self, roll_osd_cluster, hookenv, dirs_need_ownership_update.return_value = False exists.return_value = True version.side_effect = ['firefly', 'hammer'] - previous_mock = MagicMock().return_value - previous_mock.previous.return_value = "cloud:trusty-juno" - hookenv.config.side_effect = [previous_mock, - config_side_effect('source')] + + self.test_config.set_previous('source', "cloud:trusty-juno") + self.test_config.set('source', 'cloud:trusty-kilo') + self.test_config.set('key', 'key') + + hookenv.config.side_effect = self.test_config check_for_upgrade() roll_osd_cluster.assert_called_with(new_version='hammer', @@ -75,12 +67,58 @@ def test_check_for_upgrade_not_bootstrapped(self, roll_monitor_cluster, version, exists): exists.return_value = False version.side_effect = ['firefly', 'hammer'] - previous_mock = MagicMock().return_value - previous_mock.previous.return_value = "cloud:trusty-juno" - hookenv.config.side_effect = [previous_mock, - config_side_effect('source')] + + self.test_config.set_previous('source', "cloud:trusty-juno") + self.test_config.set('source', 'cloud:trusty-kilo') + self.test_config.set('key', 'key') + + hookenv.config.side_effect = self.test_config check_for_upgrade() roll_monitor_cluster.assert_not_called() exists.assert_called_with( "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring") + + @patch('ceph_hooks.os.path.exists') + @patch('ceph_hooks.ceph.dirs_need_ownership_update') + @patch('ceph_hooks.add_source') + @patch('ceph_hooks.ceph.is_bootstrapped') + @patch('ceph_hooks.hookenv') + @patch('ceph_hooks.ceph.roll_monitor_cluster') + def test_check_for_upgrade_from_pike_to_queens(self, roll_monitor_cluster, + hookenv, is_bootstrapped, + add_source, + dirs_need_ownership_update, + exists): + exists.return_value = True + dirs_need_ownership_update.return_value = False + is_bootstrapped.return_value = True + hookenv.config.side_effect = self.test_config + self.test_config.set('key', 'some-key') + self.test_config.set_previous('source', 'cloud:xenial-pike') + self.test_config.set('source', 'cloud:xenial-queens') + check_for_upgrade() + roll_monitor_cluster.assert_not_called() + add_source.assert_called_with('cloud:xenial-queens', 'some-key') + + @patch('ceph_hooks.os.path.exists') + @patch('ceph_hooks.ceph.dirs_need_ownership_update') + @patch('ceph_hooks.add_source') + @patch('ceph_hooks.ceph.is_bootstrapped') + @patch('ceph_hooks.hookenv') + @patch('ceph_hooks.ceph.roll_monitor_cluster') + def test_check_for_upgrade_from_rocky_to_stein(self, roll_monitor_cluster, + hookenv, is_bootstrapped, + add_source, + dirs_need_ownership_update, + exists): + exists.return_value = True + dirs_need_ownership_update.return_value = False + is_bootstrapped.return_value = True + hookenv.config.side_effect = self.test_config + self.test_config.set('key', 'some-key') + self.test_config.set_previous('source', 'cloud:bionic-rocky') + self.test_config.set('source', 'cloud:bionic-stein') + check_for_upgrade() + roll_monitor_cluster.assert_not_called() + add_source.assert_called_with('cloud:bionic-stein', 'some-key') diff --git a/ceph-osd/unit_tests/test_utils.py b/ceph-osd/unit_tests/test_utils.py index 90db851e..639552e2 100644 --- a/ceph-osd/unit_tests/test_utils.py +++ b/ceph-osd/unit_tests/test_utils.py @@ -67,10 +67,10 @@ def get_default_config(): class CharmTestCase(unittest.TestCase): - def setUp(self, obj, patches): + def setUp(self, obj=None, patches=None): super(CharmTestCase, self).setUp() - self.patches = patches - self.obj = obj + self.patches = patches or [] + self.obj = obj or [] self.test_config = TestConfig() self.test_relation = TestRelation() self.patch_all() @@ -90,6 +90,18 @@ class TestConfig(object): def __init__(self): self.config = get_default_config() + self.config_changed = {} + self.config_changed.setdefault(False) + self._previous = get_default_config() + + def __call__(self, key=None): + if key: + return self[key] + else: + return self + + def __getitem__(self, item): + return self.config[item] def get(self, attr=None): if not attr: @@ -107,6 +119,18 @@ def set(self, attr, value): raise KeyError self.config[attr] = value + def changed(self, attr): + return self.config_changed[attr] + + def set_changed(self, attr, changed=True): + self.config_changed[attr] = changed + + def set_previous(self, key, value): + self._previous[key] = value + + def previous(self, key): + return self._previous[key] + class TestRelation(object): From f768498d7bc94910ad8cec7515cf6a8b6c113e1e Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 1 Feb 2019 13:24:55 +0000 Subject: [PATCH 1649/2699] Add support for tls-certificates relation Add support for the charm to request and receive certificates from the tls-certificates relation. Change-Id: I821ad15aa6af7eaf9d22a00e7d3fb79611d4b6b5 Closes-Bug: 1776643 --- .../hooks/certificates-relation-broken | 1 + .../hooks/certificates-relation-changed | 1 + .../hooks/certificates-relation-departed | 1 + .../hooks/certificates-relation-joined | 1 + ceph-radosgw/hooks/hooks.py | 28 ++++++++++++ ceph-radosgw/metadata.yaml | 2 + ceph-radosgw/unit_tests/test_hooks.py | 43 +++++++++++++++++-- 7 files changed, 74 insertions(+), 3 deletions(-) create mode 120000 ceph-radosgw/hooks/certificates-relation-broken create mode 120000 ceph-radosgw/hooks/certificates-relation-changed create mode 120000 ceph-radosgw/hooks/certificates-relation-departed create mode 120000 ceph-radosgw/hooks/certificates-relation-joined diff --git a/ceph-radosgw/hooks/certificates-relation-broken b/ceph-radosgw/hooks/certificates-relation-broken new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/certificates-relation-broken @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/certificates-relation-changed b/ceph-radosgw/hooks/certificates-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/certificates-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/certificates-relation-departed b/ceph-radosgw/hooks/certificates-relation-departed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/certificates-relation-departed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/certificates-relation-joined b/ceph-radosgw/hooks/certificates-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/certificates-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 6c37e534..b1d79d84 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -88,6 +88,11 @@ from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden +from charmhelpers.contrib.openstack.cert_utils import ( + get_certificate_request, + process_certificates, +) + hooks = Hooks() CONFIGS = register_configs() NSS_DIR = '/var/lib/ceph/nss' @@ -171,6 +176,10 @@ def _config_changed(): for r_id in relation_ids('ha'): ha_relation_joined(r_id) + # Refire certificates relations for VIP changes + for r_id in relation_ids('certificates'): + certs_joined(r_id) + CONFIGS.write_all() configure_https() @@ -283,6 +292,9 @@ def _cluster_changed(): CONFIGS.write_all() for r_id in relation_ids('identity-service'): identity_joined(relid=r_id) + for r_id in relation_ids('certificates'): + for unit in related_units(r_id): + certs_changed(r_id, unit) _cluster_changed() @@ -364,6 +376,22 @@ def post_series_upgrade(): resume_unit_helper, CONFIGS) +@hooks.hook('certificates-relation-joined') +def certs_joined(relation_id=None): + relation_set( + relation_id=relation_id, + relation_settings=get_certificate_request()) + + +@hooks.hook('certificates-relation-changed') +def certs_changed(relation_id=None, unit=None): + @restart_on_change(restart_map(), stopstart=True) + def _certs_changed(): + process_certificates('ceph-radosgw', relation_id, unit) + configure_https() + _certs_changed() + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index fa6aee39..b46e48a3 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -29,6 +29,8 @@ requires: ha: interface: hacluster scope: container + certificates: + interface: tls-certificates provides: nrpe-external-master: interface: nrpe-external-master diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index e01abadc..7a9e2675 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -61,6 +61,8 @@ 'restart_map', 'systemd_based_radosgw', 'request_per_unit_key', + 'get_certificate_request', + 'process_certificates', ] @@ -91,13 +93,19 @@ def test_install(self): self.enable_pocket.assert_called_with('multiverse') self.os.makedirs.called_with('/var/lib/ceph/nss') + @patch.object(ceph_hooks, 'certs_joined') @patch.object(ceph_hooks, 'update_nrpe_config') - def test_config_changed(self, update_nrpe_config): + def test_config_changed(self, update_nrpe_config, mock_certs_joined): _install_packages = self.patch('install_packages') + _relations = { + 'certificates': ['certificates:1'] + } + self.relation_ids.side_effect = lambda name: _relations.get(name, []) ceph_hooks.config_changed() self.assertTrue(_install_packages.called) self.CONFIGS.write_all.assert_called_with() update_nrpe_config.assert_called_with() + mock_certs_joined.assert_called_once_with('certificates:1') @patch.object(ceph_hooks, 'is_request_complete', lambda *args, **kwargs: True) @@ -251,12 +259,22 @@ def test_cluster_joined(self): 'internal-address': '10.0.1.1', 'private-address': '10.0.3.1'})]) - def test_cluster_changed(self): + @patch.object(ceph_hooks, 'certs_changed') + def test_cluster_changed(self, mock_certs_changed): _id_joined = self.patch('identity_joined') - self.relation_ids.return_value = ['rid'] + _relations = { + 'identity-service': ['rid'], + 'certificates': ['certificates:1'], + } + self.relation_ids.side_effect = lambda name: _relations.get(name) + self.related_units.return_value = ['vault/0', 'vault/1'] ceph_hooks.cluster_changed() self.CONFIGS.write_all.assert_called_with() _id_joined.assert_called_with(relid='rid') + mock_certs_changed.assert_has_calls([ + call('certificates:1', 'vault/0'), + call('certificates:1', 'vault/1') + ]) def test_ha_relation_joined(self): self.generate_ha_relation_data.return_value = { @@ -274,3 +292,22 @@ def test_ha_relation_changed(self): self.relation_ids.return_value = ['rid'] ceph_hooks.ha_relation_changed() _id_joined.assert_called_with(relid='rid') + + def test_certs_joined(self): + self.get_certificate_request.return_value = {'foo': 'baa'} + ceph_hooks.certs_joined('certificates:1') + self.relation_set.assert_called_once_with( + relation_id='certificates:1', + relation_settings={'foo': 'baa'} + ) + self.get_certificate_request.assert_called_once_with() + + @patch.object(ceph_hooks, 'configure_https') + def test_certs_changed(self, mock_configure_https): + ceph_hooks.certs_changed('certificates:1', 'vault/0') + self.process_certificates.assert_called_once_with( + 'ceph-radosgw', + 'certificates:1', + 'vault/0' + ) + mock_configure_https.assert_called_once_with() From 28874af1739246ec89d7cc2d0d536eb4a3ef55c1 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 5 Feb 2019 12:34:47 +0100 Subject: [PATCH 1650/2699] Ensure we populate osd-devices with existing devices If an older version of ceph-osd is deployed and then upgraded to a version that keeps track of bootstrapped OSDs, then the list of osd-devices never gets updated with the pre-existing devices. This change allows us to add existing, mounted Ceph OSDs to the osd-devices entry in the local KV storage. Change-Id: I17fab658511275f1dde15683ef296d4c72e7980e Closes-Bug: #1814597 Depends-On: I940b108d914b39b55013a4617c3d17ff7122df60 --- ceph-osd/lib/ceph/utils.py | 129 +++++++++++++++++++------------------ 1 file changed, 67 insertions(+), 62 deletions(-) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 08e627c4..5bff375d 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -1442,77 +1442,82 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, db = kv() osd_devices = db.get('osd-devices', []) - if dev in osd_devices: - log('Device {} already processed by charm,' - ' skipping'.format(dev)) - return - - if not os.path.exists(dev): - log('Path {} does not exist - bailing'.format(dev)) - return + try: + if dev in osd_devices: + log('Device {} already processed by charm,' + ' skipping'.format(dev)) + return - if not is_block_device(dev): - log('Path {} is not a block device - bailing'.format(dev)) - return + if not os.path.exists(dev): + log('Path {} does not exist - bailing'.format(dev)) + return - if is_osd_disk(dev): - log('Looks like {} is already an' - ' OSD data or journal, skipping.'.format(dev)) - return + if not is_block_device(dev): + log('Path {} is not a block device - bailing'.format(dev)) + return - if is_device_mounted(dev): - log('Looks like {} is in use, skipping.'.format(dev)) - return + if is_osd_disk(dev): + log('Looks like {} is already an' + ' OSD data or journal, skipping.'.format(dev)) + if is_device_mounted(dev): + osd_devices.append(dev) + return - if is_active_bluestore_device(dev): - log('{} is in use as an active bluestore block device,' - ' skipping.'.format(dev)) - return + if is_device_mounted(dev): + log('Looks like {} is in use, skipping.'.format(dev)) + return - if is_mapped_luks_device(dev): - log('{} is a mapped LUKS device,' - ' skipping.'.format(dev)) - return + if is_active_bluestore_device(dev): + log('{} is in use as an active bluestore block device,' + ' skipping.'.format(dev)) + osd_devices.append(dev) + return - if cmp_pkgrevno('ceph', '12.2.4') >= 0: - cmd = _ceph_volume(dev, - osd_journal, - encrypt, - bluestore, - key_manager) - else: - cmd = _ceph_disk(dev, - osd_format, - osd_journal, - encrypt, - bluestore) + if is_mapped_luks_device(dev): + log('{} is a mapped LUKS device,' + ' skipping.'.format(dev)) + return - try: - status_set('maintenance', 'Initializing device {}'.format(dev)) - log("osdize cmd: {}".format(cmd)) - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - try: - lsblk_output = subprocess.check_output( - ['lsblk', '-P']).decode('UTF-8') - except subprocess.CalledProcessError as e: - log("Couldn't get lsblk output: {}".format(e), ERROR) - if ignore_errors: - log('Unable to initialize device: {}'.format(dev), WARNING) - if lsblk_output: - log('lsblk output: {}'.format(lsblk_output), DEBUG) + if cmp_pkgrevno('ceph', '12.2.4') >= 0: + cmd = _ceph_volume(dev, + osd_journal, + encrypt, + bluestore, + key_manager) else: - log('Unable to initialize device: {}'.format(dev), ERROR) - if lsblk_output: - log('lsblk output: {}'.format(lsblk_output), WARNING) - raise + cmd = _ceph_disk(dev, + osd_format, + osd_journal, + encrypt, + bluestore) - # NOTE: Record processing of device only on success to ensure that - # the charm only tries to initialize a device of OSD usage - # once during its lifetime. - osd_devices.append(dev) - db.set('osd-devices', osd_devices) - db.flush() + try: + status_set('maintenance', 'Initializing device {}'.format(dev)) + log("osdize cmd: {}".format(cmd)) + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + try: + lsblk_output = subprocess.check_output( + ['lsblk', '-P']).decode('UTF-8') + except subprocess.CalledProcessError as e: + log("Couldn't get lsblk output: {}".format(e), ERROR) + if ignore_errors: + log('Unable to initialize device: {}'.format(dev), WARNING) + if lsblk_output: + log('lsblk output: {}'.format(lsblk_output), DEBUG) + else: + log('Unable to initialize device: {}'.format(dev), ERROR) + if lsblk_output: + log('lsblk output: {}'.format(lsblk_output), WARNING) + raise + + # NOTE: Record processing of device only on success to ensure that + # the charm only tries to initialize a device of OSD usage + # once during its lifetime. + osd_devices.append(dev) + finally: + db.set('osd-devices', osd_devices) + db.flush() def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): From 125546bb29bb63b05fcf3db8e83b2ae92f319547 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 6 Feb 2019 08:00:12 +0100 Subject: [PATCH 1651/2699] Use `ceph-rbd-mirror` interface, move boilerplate upstream Make use of newly created `ceph` layer and `CephCharm` class added to `charms.openstack`. --- ceph-rbd-mirror/src/config.yaml | 24 +-------- ceph-rbd-mirror/src/layer.yaml | 11 +--- .../lib/charm/openstack/ceph_rbd_mirror.py | 20 +++---- ceph-rbd-mirror/src/metadata.yaml | 6 ++- .../src/reactive/ceph_rbd_mirror_handlers.py | 30 ++++++++++- .../src/tests/bundles/bionic-queens.yaml | 53 +++++++++++++++++++ ceph-rbd-mirror/src/tests/tests.yaml | 15 ++++++ ceph-rbd-mirror/tox.ini | 1 + 8 files changed, 110 insertions(+), 50 deletions(-) create mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml create mode 100644 ceph-rbd-mirror/src/tests/tests.yaml diff --git a/ceph-rbd-mirror/src/config.yaml b/ceph-rbd-mirror/src/config.yaml index beea7275..a985d462 100644 --- a/ceph-rbd-mirror/src/config.yaml +++ b/ceph-rbd-mirror/src/config.yaml @@ -1,23 +1 @@ -options: - source: - default: distro - type: string - description: | - Repository from which to install Ceph - - May be one of the following: - - distro (default) - ppa:somecustom/ppa (PPA name must include UCA OpenStack Release name) - deb url sources entry|key id - or a supported Ubuntu Cloud Archive pocket. - - Supported Ubuntu Cloud Archive pockets include: - - cloud:xenial-pike - cloud:xenial-queens - cloud:bionic-rocky - - Note that updating this setting to a source that is known to - provide a later version of Ceph will trigger a software - upgrade. +options: {} diff --git a/ceph-rbd-mirror/src/layer.yaml b/ceph-rbd-mirror/src/layer.yaml index b279f0d4..59aee36b 100644 --- a/ceph-rbd-mirror/src/layer.yaml +++ b/ceph-rbd-mirror/src/layer.yaml @@ -1,16 +1,9 @@ includes: - - layer:leadership - - layer:openstack - - interface:ceph-client + - layer:ceph + - interface:ceph-rbd-mirror - interface:nrpe-external-master options: basic: use_venv: True include_system_packages: True repo: https://github.com/openstack/charm-ceph-rbd-mirror -config: - deletes: - - debug - - verbose - - use-internal-endpoints - - ssl_ca diff --git a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py index 6d5a7c85..e86f204b 100644 --- a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py @@ -12,30 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -import collections - import charms_openstack.charm import charms_openstack.adapters -class CephRBDMirrorCharm(charms_openstack.charm.OpenStackCharm): - # Override source config key to be compatible with the other Ceph charms - source_config_key = 'source' - +class CephRBDMirrorCharm(charms_openstack.charm.CephCharm): # We require Ceph 12.2 Luminous or later for HA support in the Ceph # rbd-mirror daemon. Luminous appears in UCA at pike. release = 'pike' name = 'ceph-rbd-mirror' packages = ['rbd-mirror'] python_version = 3 - required_relations = ['ceph-cluster'] - release_pkg = 'rbd-mirror' - package_codenames = { - 'rbd-mirror': collections.OrderedDict([ - ('12', 'pike'), - ('13', 'rocky'), - ]), - } + required_relations = ['ceph-local', 'ceph-remote'] + + def config_changed(self): + """Check for upgrade.""" + self.upgrade_if_available(None) def install(self): """We override install function to configure source before installing diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index a2f3a8d7..b94a0a33 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -28,5 +28,7 @@ provides: interface: nrpe-external-master scope: container requires: - ceph-cluster: - interface: ceph-client + ceph-local: + interface: ceph-rbd-mirror + ceph-remote: + interface: ceph-rbd-mirror diff --git a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py index 79bc51ce..ff9f0cd1 100644 --- a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py @@ -26,12 +26,38 @@ charm.use_defaults( 'charm.installed', 'config.changed', - 'update-status') + 'update-status', + 'upgrade-charm') -@reactive.when('ceph-cluster.connected') +@reactive.when_all('ceph-local.connected', 'ceph-remote.connected') +@reactive.when_not_all('ceph-local.available', 'ceph-remote.available') def ceph_connected(): + for flag in ('ceph-local.connected', 'ceph-remote.connected'): + endpoint = reactive.relations.endpoint_from_flag(flag) + endpoint.request_key() + with charm.provide_charm_instance() as charm_instance: ch_core.hookenv.log('Ceph connected, charm_instance @ {}' .format(charm_instance), level=ch_core.hookenv.DEBUG) + charm_instance.assess_status() + + +@reactive.when_all('ceph-local.available', 'ceph-remote.available') +def ceph_available(): + mon_hosts = {} + for flag in ('ceph-local.available', 'ceph-remote.available'): + endpoint = reactive.relations.endpoint_from_flag(flag) + mon_hosts[endpoint.endpoint_name] = endpoint.mon_hosts + for relation in endpoint.relations: + for unit in relation.units: + ch_core.hookenv.log('{}: "{}"'.format(flag, unit.received), + level=ch_core.hookenv.INFO) + + with charm.provide_charm_instance() as charm_instance: + ch_core.hookenv.log('Ceph available, mon_hosts: "{}" ' + 'charm_instance @ {}' + .format(mon_hosts, charm_instance), + level=ch_core.hookenv.DEBUG) + charm_instance.assess_status() diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml new file mode 100644 index 00000000..9644c2d3 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml @@ -0,0 +1,53 @@ +series: bionic +applications: + ceph-mon: + charm: cs:~fnordahl/ceph-mon-rbd-mirror + num_units: 3 + options: + expected-osd-count: 3 + source: distro + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: distro + storage: + osd-devices: cinder,20G + ceph-rbd-mirror: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: distro + ceph-mon-b: + charm: cs:~fnordahl/ceph-mon-rbd-mirror + num_units: 3 + options: + expected-osd-count: 3 + source: distro + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: distro + storage: + osd-devices: cinder,20G + ceph-rbd-mirror-b: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: distro +relations: +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml new file mode 100644 index 00000000..7cf75ff9 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -0,0 +1,15 @@ +charm_name: ceph-rbd-mirror +smoke_bundles: +- bionic-queens +gate_bundles: +- xenial-pike +- xenial-queens +- bionic-queens +- bionic-rocky +- cosmic-rocky +dev_bundles: +- disco-stein +configure: +- zaza.charm_tests.noop.setup.basic_setup +tests: +- zaza.charm_tests.noop.tests.NoopTest diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index 266fb541..41ba42d8 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -10,6 +10,7 @@ setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 TERM=linux LAYER_PATH={toxinidir}/layers + INTERFACE_PATH={toxinidir}/interfaces JUJU_REPOSITORY={toxinidir}/build passenv = http_proxy https_proxy INTERFACE_PATH install_command = From 2340ea4075b2c33cb463cfd6d6b8b840c7c62b8c Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 6 Feb 2019 14:47:19 -0600 Subject: [PATCH 1652/2699] Update functional test definitions Remove trusty-icehouse test combo from gate, leaving trusty-mitaka if/where it exists. Change-Id: I1defa1fcaf792afa64b2f52ed486a8881c737a0f --- ceph-mon/tests/gate-basic-trusty-icehouse | 23 ----------------------- 1 file changed, 23 deletions(-) delete mode 100755 ceph-mon/tests/gate-basic-trusty-icehouse diff --git a/ceph-mon/tests/gate-basic-trusty-icehouse b/ceph-mon/tests/gate-basic-trusty-icehouse deleted file mode 100755 index 8a987930..00000000 --- a/ceph-mon/tests/gate-basic-trusty-icehouse +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on trusty-icehouse.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='trusty') - deployment.run_tests() From ebe4264e9e23ce9e2178356740fc783eb10fe417 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 6 Feb 2019 14:47:30 -0600 Subject: [PATCH 1653/2699] Update functional test definitions Remove trusty-icehouse test combo from gate, leaving trusty-mitaka if/where it exists. Change-Id: I12fb4a889e5358aac76e8c7d8f1663976e2b7185 --- ceph-proxy/tests/gate-basic-trusty-icehouse | 9 --------- 1 file changed, 9 deletions(-) delete mode 100755 ceph-proxy/tests/gate-basic-trusty-icehouse diff --git a/ceph-proxy/tests/gate-basic-trusty-icehouse b/ceph-proxy/tests/gate-basic-trusty-icehouse deleted file mode 100755 index c36fd00c..00000000 --- a/ceph-proxy/tests/gate-basic-trusty-icehouse +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on trusty-icehouse.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='trusty') - deployment.run_tests() From 686e6d2a4cd52fc0a31f3804c308bcae9e74cbef Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 6 Feb 2019 14:47:38 -0600 Subject: [PATCH 1654/2699] Update functional test definitions Remove trusty-icehouse test combo from gate, leaving trusty-mitaka if/where it exists. Change-Id: I749698474b0647778b4e4850db234179bb5cec42 --- ceph-radosgw/tests/gate-basic-trusty-icehouse | 23 ------------------- 1 file changed, 23 deletions(-) delete mode 100755 ceph-radosgw/tests/gate-basic-trusty-icehouse diff --git a/ceph-radosgw/tests/gate-basic-trusty-icehouse b/ceph-radosgw/tests/gate-basic-trusty-icehouse deleted file mode 100755 index 46066a7d..00000000 --- a/ceph-radosgw/tests/gate-basic-trusty-icehouse +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on trusty-icehouse.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='trusty') - deployment.run_tests() From 0a01e99448ef004610615a84f66a962c14e0ecdc Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 6 Feb 2019 14:48:32 -0600 Subject: [PATCH 1655/2699] Update functional test definitions Remove trusty-icehouse test combo from gate, leaving trusty-mitaka if/where it exists. Change-Id: I478fdf4e9ab7a25ccd35c611f623cefc116f672c --- ceph-osd/tests/bundles/trusty-icehouse.yaml | 126 -------------------- ceph-osd/tests/tests.yaml | 1 - 2 files changed, 127 deletions(-) delete mode 100644 ceph-osd/tests/bundles/trusty-icehouse.yaml diff --git a/ceph-osd/tests/bundles/trusty-icehouse.yaml b/ceph-osd/tests/bundles/trusty-icehouse.yaml deleted file mode 100644 index 61ec8742..00000000 --- a/ceph-osd/tests/bundles/trusty-icehouse.yaml +++ /dev/null @@ -1,126 +0,0 @@ -series: trusty -applications: - ceph-osd: - charm: ceph-osd - num_units: 3 - series: trusty - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 1833e429..a2b6aae3 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,6 +1,5 @@ charm_name: ceph-osd gate_bundles: - - trusty-icehouse - trusty-mitaka - xenial-mitaka - xenial-ocata From 07761fbb1bbed47d29a042039253050972ca4068 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 28 Jan 2019 09:42:35 +0000 Subject: [PATCH 1656/2699] Switch functional testing to zaza Drop amulet tests and create bundles for all target Ubuntu/OpenStack series combinations inline with current best practice. Zaza tests are not a direct translation from the Amulet tests; the new tests focus in Ceph RADOS Gateway and integration with keystone rather than deploying compute, image and block services (which do not make use of the ceph-radosgw charm). Change-Id: I41914df024eb02e5a555bc0e44993e09d310933e --- ceph-radosgw/test-requirements.txt | 9 +- ceph-radosgw/tests/README.md | 9 - ceph-radosgw/tests/basic_deployment.py | 625 ------------------ ceph-radosgw/tests/bundles/bionic-queens.yaml | 42 ++ ceph-radosgw/tests/bundles/bionic-rocky.yaml | 42 ++ ceph-radosgw/tests/bundles/cosmic-rocky.yaml | 42 ++ .../tests/bundles/trusty-icehouse.yaml | 110 +++ ceph-radosgw/tests/bundles/trusty-mitaka.yaml | 42 ++ ceph-radosgw/tests/bundles/xenial-mitaka.yaml | 42 ++ ceph-radosgw/tests/bundles/xenial-ocata.yaml | 42 ++ ceph-radosgw/tests/bundles/xenial-pike.yaml | 42 ++ ceph-radosgw/tests/bundles/xenial-queens.yaml | 42 ++ ceph-radosgw/tests/dev-basic-cosmic-rocky | 23 - ceph-radosgw/tests/gate-basic-bionic-queens | 23 - ceph-radosgw/tests/gate-basic-bionic-rocky | 25 - ceph-radosgw/tests/gate-basic-trusty-icehouse | 23 - ceph-radosgw/tests/gate-basic-trusty-mitaka | 25 - ceph-radosgw/tests/gate-basic-xenial-mitaka | 23 - ceph-radosgw/tests/gate-basic-xenial-ocata | 25 - ceph-radosgw/tests/gate-basic-xenial-pike | 25 - ceph-radosgw/tests/gate-basic-xenial-queens | 25 - ceph-radosgw/tests/tests.yaml | 34 +- ceph-radosgw/tox.ini | 48 +- 23 files changed, 474 insertions(+), 914 deletions(-) delete mode 100644 ceph-radosgw/tests/README.md delete mode 100644 ceph-radosgw/tests/basic_deployment.py create mode 100644 ceph-radosgw/tests/bundles/bionic-queens.yaml create mode 100644 ceph-radosgw/tests/bundles/bionic-rocky.yaml create mode 100644 ceph-radosgw/tests/bundles/cosmic-rocky.yaml create mode 100644 ceph-radosgw/tests/bundles/trusty-icehouse.yaml create mode 100644 ceph-radosgw/tests/bundles/trusty-mitaka.yaml create mode 100644 ceph-radosgw/tests/bundles/xenial-mitaka.yaml create mode 100644 ceph-radosgw/tests/bundles/xenial-ocata.yaml create mode 100644 ceph-radosgw/tests/bundles/xenial-pike.yaml create mode 100644 ceph-radosgw/tests/bundles/xenial-queens.yaml delete mode 100755 ceph-radosgw/tests/dev-basic-cosmic-rocky delete mode 100755 ceph-radosgw/tests/gate-basic-bionic-queens delete mode 100755 ceph-radosgw/tests/gate-basic-bionic-rocky delete mode 100755 ceph-radosgw/tests/gate-basic-trusty-icehouse delete mode 100755 ceph-radosgw/tests/gate-basic-trusty-mitaka delete mode 100755 ceph-radosgw/tests/gate-basic-xenial-mitaka delete mode 100755 ceph-radosgw/tests/gate-basic-xenial-ocata delete mode 100755 ceph-radosgw/tests/gate-basic-xenial-pike delete mode 100755 ceph-radosgw/tests/gate-basic-xenial-queens diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 2b2c0e11..bb01e1f6 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -6,11 +6,7 @@ coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 os-testr>=0.4.1 -requests>=2.18.4 -# BEGIN: Amulet OpenStack Charm Helper Requirements -# Liberty client lower constraints -amulet>=1.14.3,<2.0;python_version=='2.7' -bundletester>=0.6.1,<1.0;python_version=='2.7' +requests==2.18.4 python-ceilometerclient>=1.5.0 python-cinderclient>=1.4.0 python-glanceclient>=1.1.0 @@ -23,7 +19,6 @@ python-swiftclient>=2.6.0 pika>=0.10.0,<1.0 distro-info git+https://github.com/juju/charm-helpers.git#egg=charmhelpers -# END: Amulet OpenStack Charm Helper Requirements -# NOTE: workaround for 14.04 pip/tox pytz pyudev # for ceph-* charm unit tests (not mocked?) +git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' diff --git a/ceph-radosgw/tests/README.md b/ceph-radosgw/tests/README.md deleted file mode 100644 index 046be7fb..00000000 --- a/ceph-radosgw/tests/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Overview - -This directory provides Amulet tests to verify basic deployment functionality -from the perspective of this charm, its requirements and its features, as -exercised in a subset of the full OpenStack deployment test bundle topology. - -For full details on functional testing of OpenStack charms please refer to -the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing) -section of the OpenStack Charm Guide. diff --git a/ceph-radosgw/tests/basic_deployment.py b/ceph-radosgw/tests/basic_deployment.py deleted file mode 100644 index b8de999b..00000000 --- a/ceph-radosgw/tests/basic_deployment.py +++ /dev/null @@ -1,625 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import keystoneclient -from keystoneclient.v3 import client as keystone_client_v3 -import swiftclient -from charmhelpers.contrib.openstack.amulet.deployment import ( - OpenStackAmuletDeployment -) -from charmhelpers.contrib.openstack.amulet.utils import ( - OpenStackAmuletUtils, - DEBUG, - # ERROR -) - -# Use DEBUG to turn on debug logging -u = OpenStackAmuletUtils(DEBUG) - - -class CephRadosGwBasicDeployment(OpenStackAmuletDeployment): - """Amulet tests on a basic ceph-radosgw deployment.""" - - def __init__(self, series=None, openstack=None, source=None, stable=False): - """Deploy the entire test environment.""" - super(CephRadosGwBasicDeployment, self).__init__(series, openstack, - source, stable) - self._add_services() - self._add_relations() - self._configure_services() - self._deploy() - - u.log.info('Waiting on extended status checks...') - exclude_services = [] - - # Wait for deployment ready msgs, except exclusions - self._auto_wait_for_status(exclude_services=exclude_services) - - self.d.sentry.wait() - self._initialize_tests() - - def _add_services(self): - """Add services - - Add the services that we're testing, where ceph-radosgw is local, - and the rest of the service are from lp branches that are - compatible with the local charm (e.g. stable or next). - """ - this_service = {'name': 'ceph-radosgw'} - other_services = [ - {'name': 'ceph-mon', 'units': 3}, - {'name': 'ceph-osd', 'units': 3, - 'storage': {'osd-devices': 'cinder,10G'}}, - {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}}, - {'name': 'nova-cloud-controller'}, - {'name': 'keystone'}, - {'name': 'rabbitmq-server'}, - {'name': 'nova-compute'}, - {'name': 'glance'}, - {'name': 'cinder'}, - {'name': 'cinder-ceph'}, - ] - super(CephRadosGwBasicDeployment, self)._add_services(this_service, - other_services) - - def _add_relations(self): - """Add all of the relations for the services.""" - relations = { - 'nova-compute:amqp': 'rabbitmq-server:amqp', - 'nova-compute:image-service': 'glance:image-service', - 'nova-compute:ceph': 'ceph-mon:client', - 'nova-cloud-controller:shared-db': 'percona-cluster:shared-db', - 'nova-cloud-controller:identity-service': 'keystone:' - 'identity-service', - 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp', - 'nova-cloud-controller:cloud-compute': 'nova-compute:' - 'cloud-compute', - 'nova-cloud-controller:image-service': 'glance:image-service', - - 'keystone:shared-db': 'percona-cluster:shared-db', - 'glance:shared-db': 'percona-cluster:shared-db', - 'glance:identity-service': 'keystone:identity-service', - 'glance:amqp': 'rabbitmq-server:amqp', - 'glance:ceph': 'ceph-mon:client', - 'cinder:shared-db': 'percona-cluster:shared-db', - 'cinder:identity-service': 'keystone:identity-service', - 'cinder:amqp': 'rabbitmq-server:amqp', - 'cinder:image-service': 'glance:image-service', - 'cinder-ceph:storage-backend': 'cinder:storage-backend', - 'cinder-ceph:ceph': 'ceph-mon:client', - 'ceph-radosgw:mon': 'ceph-mon:radosgw', - 'ceph-radosgw:identity-service': 'keystone:identity-service', - 'ceph-osd:mon': 'ceph-mon:osd', - } - super(CephRadosGwBasicDeployment, self)._add_relations(relations) - - def _configure_services(self): - """Configure all of the services.""" - keystone_config = {'admin-password': 'openstack', - 'admin-token': 'ubuntutesting'} - pxc_config = { - 'dataset-size': '25%', - 'max-connections': 1000, - 'root-password': 'ChangeMe123', - 'sst-password': 'ChangeMe123', - } - - cinder_config = {'block-device': 'None', 'glance-api-version': '2'} - ceph_config = { - 'monitor-count': '3', - 'auth-supported': 'none', - } - ceph_osd_config = { - 'osd-devices': '/srv/ceph /dev/test-non-existent' - } - - nova_cc_config = {} - if self._get_openstack_release() >= self.xenial_ocata: - nova_cc_config['network-manager'] = 'Neutron' - - configs = {'keystone': keystone_config, - 'percona-cluster': pxc_config, - 'cinder': cinder_config, - 'ceph-mon': ceph_config, - 'ceph-osd': ceph_osd_config, - 'nova-cloud-controller': nova_cc_config, - } - super(CephRadosGwBasicDeployment, self)._configure_services(configs) - - def _initialize_tests(self): - """Perform final initialization before tests get run.""" - # Access the sentries for inspecting service units - self.pxc_sentry = self.d.sentry['percona-cluster'][0] - self.keystone_sentry = self.d.sentry['keystone'][0] - self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0] - self.nova_sentry = self.d.sentry['nova-compute'][0] - self.glance_sentry = self.d.sentry['glance'][0] - self.cinder_sentry = self.d.sentry['cinder'][0] - self.ceph0_sentry = self.d.sentry['ceph-mon'][0] - self.ceph1_sentry = self.d.sentry['ceph-mon'][1] - self.ceph2_sentry = self.d.sentry['ceph-mon'][2] - self.ceph_osd0_sentry = self.d.sentry['ceph-osd'][0] - self.ceph_osd1_sentry = self.d.sentry['ceph-osd'][1] - self.ceph_osd2_sentry = self.d.sentry['ceph-osd'][2] - self.ceph_radosgw_sentry = self.d.sentry['ceph-radosgw'][0] - u.log.debug('openstack release val: {}'.format( - self._get_openstack_release())) - u.log.debug('openstack release str: {}'.format( - self._get_openstack_release_string())) - - # Authenticate admin with keystone - self.keystone_session, self.keystone = u.get_default_keystone_session( - self.keystone_sentry, - openstack_release=self._get_openstack_release()) - - # Authenticate admin with glance endpoint - self.glance = u.authenticate_glance_admin(self.keystone) - - # Authenticate radosgw user using swift api - keystone_ip = self.keystone_sentry.info['public-address'] - keystone_relation = self.keystone_sentry.relation( - 'identity-service', - 'ceph-radosgw:identity-service') - - # Create a demo tenant/role/user - self.demo_tenant = 'demoTenant' - self.demo_role = 'demoRole' - self.demo_user = 'demoUser' - self.demo_project = 'demoProject' - self.demo_domain = 'demoDomain' - - if self._get_openstack_release() >= self.xenial_queens: - self.keystone_v3 = self.keystone - self.create_users_v3() - self.demo_user_session, _ = u.get_keystone_session( - keystone_ip, - self.demo_user, - 'password', - api_version=3, - user_domain_name=self.demo_domain, - project_domain_name=self.demo_domain, - project_name=self.demo_project - ) - self.keystone_demo = keystone_client_v3.Client( - session=self.demo_user_session) - self.service_session, _ = u.get_keystone_session( - keystone_ip, - keystone_relation['service_username'], - keystone_relation['service_password'], - api_version=3, - user_domain_name=keystone_relation['service_domain'], - project_domain_name=keystone_relation['service_domain'], - project_name=keystone_relation['service_tenant'] - ) - else: - self.keystone_v3 = None - self.create_users_v2() - # Authenticate demo user with keystone - self.demo_user_session, _ = u.get_keystone_session( - keystone_ip, - self.demo_user, - 'password', - api_version=2, - project_name=self.demo_tenant, - ) - self.keystone_demo = keystoneclient.client.Client( - session=self.demo_user_session) - - self.service_session, _ = u.get_keystone_session( - keystone_ip, - keystone_relation['service_username'], - keystone_relation['service_password'], - api_version=2, - project_name=keystone_relation['service_tenant'] - ) - self.swift = swiftclient.Connection(session=self.service_session) - - def create_users_v3(self): - try: - self.keystone.projects.find(name=self.demo_project) - except keystoneclient.exceptions.NotFound: - domain = self.keystone.domains.create( - self.demo_domain, - description='Demo Domain', - enabled=True - ) - project = self.keystone.projects.create( - self.demo_project, - domain, - description='Demo Project', - enabled=True, - ) - user = self.keystone.users.create( - self.demo_user, - domain=domain.id, - project=self.demo_project, - password='password', - email='demov3@demo.com', - description='Demo', - enabled=True) - role = self.keystone.roles.find(name='Member') - self.keystone.roles.grant( - role.id, - user=user.id, - project=project.id) - - def create_users_v2(self): - if not u.tenant_exists(self.keystone, self.demo_tenant): - tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, - description='demo tenant', - enabled=True) - - self.keystone.roles.create(name=self.demo_role) - self.keystone.users.create(name=self.demo_user, - password='password', - tenant_id=tenant.id, - email='demo@demo.com') - role = self.keystone.roles.find(name='Member') - user = self.keystone.users.find(name=self.demo_user) - tenant = self.keystone.tenants.find(name=self.demo_tenant) - self.keystone.roles.add_user_role( - user=user.id, - role=role.id, - tenant=tenant.id) - - def test_100_ceph_processes(self): - """Verify that the expected service processes are running - on each ceph unit.""" - - # Units with process names and PID quantities expected - expected_processes = { - self.ceph_radosgw_sentry: {'radosgw': 1}, - } - - actual_pids = u.get_unit_process_ids(expected_processes) - ret = u.validate_unit_process_ids(expected_processes, actual_pids) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_102_services(self): - """Verify the expected services are running on the service units.""" - - if self._get_openstack_release() < self.xenial_mitaka: - services = {self.ceph_radosgw_sentry: ['radosgw-all']} - ret = u.validate_services_by_name(services) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_200_ceph_radosgw_ceph_relation(self): - """Verify the ceph-radosgw to ceph relation data.""" - u.log.debug('Checking ceph-radosgw:mon to ceph:radosgw ' - 'relation data...') - unit = self.ceph_radosgw_sentry - relation = ['mon', 'ceph-mon:radosgw'] - expected = { - 'private-address': u.valid_ip - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('ceph-radosgw to ceph-mon', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_201_ceph_radosgw_relation(self): - """Verify the ceph to ceph-radosgw relation data. - - At least one unit (the leader) must have all data provided by the ceph - charm. - """ - u.log.debug('Checking ceph0:radosgw radosgw:mon relation data...') - s_entries = [ - self.ceph0_sentry, - self.ceph1_sentry, - self.ceph2_sentry - ] - relation = ['radosgw', 'ceph-radosgw:mon'] - expected = { - 'private-address': u.valid_ip, - 'auth': 'none', - 'ceph-public-address': u.valid_ip, - } - - ret = [] - for unit in s_entries: - ret.append(u.validate_relation_data(unit, relation, expected)) - - if any(ret): - message = u.relation_error('ceph to ceph-radosgw', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_204_ceph_radosgw_keystone_relation(self): - """Verify the ceph-radosgw to keystone relation data.""" - u.log.debug('Checking ceph-radosgw to keystone id service ' - 'relation data...') - unit = self.ceph_radosgw_sentry - relation = ['identity-service', 'keystone:identity-service'] - expected = { - 'service': 'swift', - 'region': 'RegionOne', - 'public_url': u.valid_url, - 'internal_url': u.valid_url, - 'private-address': u.valid_ip, - 'requested_roles': 'Member,Admin', - 'admin_url': u.valid_url - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('ceph-radosgw to keystone', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_205_keystone_ceph_radosgw_relation(self): - """Verify the keystone to ceph-radosgw relation data.""" - u.log.debug('Checking keystone to ceph-radosgw id service ' - 'relation data...') - unit = self.keystone_sentry - relation = ['identity-service', 'ceph-radosgw:identity-service'] - expected = { - 'service_protocol': 'http', - 'service_tenant': 'services', - 'admin_token': 'ubuntutesting', - 'service_password': u.not_null, - 'service_port': '5000', - 'auth_port': '35357', - 'auth_protocol': 'http', - 'private-address': u.valid_ip, - 'auth_host': u.valid_ip, - 'service_username': 'swift', - 'service_tenant_id': u.not_null, - 'service_host': u.valid_ip - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('keystone to ceph-radosgw', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_300_ceph_radosgw_config(self): - """Verify the data in the ceph config file.""" - u.log.debug('Checking ceph config file data...') - unit = self.ceph_radosgw_sentry - conf = '/etc/ceph/ceph.conf' - expected = { - 'global': { - 'auth cluster required': 'none', - 'auth service required': 'none', - 'auth client required': 'none', - 'log to syslog': 'false', - 'err to syslog': 'false', - 'clog to syslog': 'false' - }, - } - for section, pairs in expected.items(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "ceph config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_302_cinder_rbd_config(self): - """Verify the cinder config file data regarding ceph.""" - u.log.debug('Checking cinder (rbd) config file data...') - unit = self.cinder_sentry - conf = '/etc/cinder/cinder.conf' - section_key = 'cinder-ceph' - expected = { - section_key: { - 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' - } - } - for section, pairs in expected.items(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "cinder (rbd) config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_304_glance_rbd_config(self): - """Verify the glance config file data regarding ceph.""" - u.log.debug('Checking glance (rbd) config file data...') - unit = self.glance_sentry - conf = '/etc/glance/glance-api.conf' - config = { - 'default_store': 'rbd', - 'rbd_store_ceph_conf': '/etc/ceph/ceph.conf', - 'rbd_store_user': 'glance', - 'rbd_store_pool': 'glance', - 'rbd_store_chunk_size': '8' - } - - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later - config['stores'] = ('glance.store.filesystem.Store,' - 'glance.store.http.Store,' - 'glance.store.rbd.Store') - section = 'glance_store' - else: - # Juno or earlier - section = 'DEFAULT' - - expected = {section: config} - for section, pairs in expected.items(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "glance (rbd) config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_306_nova_rbd_config(self): - """Verify the nova config file data regarding ceph.""" - u.log.debug('Checking nova (rbd) config file data...') - unit = self.nova_sentry - conf = '/etc/nova/nova.conf' - expected = { - 'libvirt': { - 'rbd_user': 'nova-compute', - 'rbd_secret_uuid': u.not_null - } - } - for section, pairs in expected.items(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "nova (rbd) config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_400_ceph_check_osd_pools(self): - """Check osd pools on all ceph units, expect them to be - identical, and expect specific pools to be present.""" - u.log.debug('Checking pools on ceph units...') - - expected_pools = self.get_ceph_expected_pools(radosgw=True) - - if self._get_openstack_release() >= self.trusty_mitaka: - non_rgw_pools = self.get_ceph_expected_pools() - _expected_pools = [] - for pool in expected_pools: - if pool not in non_rgw_pools: - # prepend zone name - _expected_pools.append('default%s' % (pool)) - - expected_pools = _expected_pools - - results = [] - sentries = [ - self.ceph_radosgw_sentry, - self.ceph0_sentry, - self.ceph1_sentry, - self.ceph2_sentry - ] - - # Check for presence of expected pools on each unit - u.log.debug('Expected pools: {}'.format(expected_pools)) - for sentry_unit in sentries: - pools = u.get_ceph_pools(sentry_unit) - results.append(pools) - - for expected_pool in expected_pools: - if expected_pool not in pools: - msg = ('{} does not have pool: ' - '{}'.format(sentry_unit.info['unit_name'], - expected_pool)) - amulet.raise_status(amulet.FAIL, msg=msg) - u.log.debug('{} has (at least) the expected ' - 'pools.'.format(sentry_unit.info['unit_name'])) - - # Check that all units returned the same pool name:id data - ret = u.validate_list_of_identical_dicts(results) - if ret: - u.log.debug('Pool list results: {}'.format(results)) - msg = ('{}; Pool list results are not identical on all ' - 'ceph units.'.format(ret)) - amulet.raise_status(amulet.FAIL, msg=msg) - else: - u.log.debug('Pool list on all ceph units produced the ' - 'same results (OK).') - - def test_402_swift_api_connection(self): - """Simple api call to confirm basic service functionality""" - u.log.debug('Checking basic radosgw functionality via swift api...') - headers, containers = self.swift.get_account() - assert('content-type' in headers.keys()) - assert(containers == []) - - def test_403_swift_keystone_auth(self, api_version=2): - """Check Swift Object Storage functionlaity""" - u.log.debug('Check Swift Object Storage functionality (api_version={})' - ''.format(api_version)) - conn = swiftclient.Connection(session=self.keystone_demo.session) - u.log.debug('Create container') - container = 'demo-container' - try: - conn.put_container(container) - except swiftclient.exceptions.ClientException as e: - print("EXCEPTION {}".format(e.http_status)) - if e.http_status == 409: - # Ceph RadosGW is currently configured with a global namespace - # for container names. Make use of this to verify that we - # cannot create a container with a name already taken by a - # same username authenticated in different domain in the - # previous run of this function. If / when we support per - # tenant namespace this logic must be replaced. - u.log.debug('v3 user not allowed to overwrite previously ' - 'created container created by v2 user...OK') - container = 'demo-container-v3' - conn.put_container(container) - else: - raise(e) - - resp_headers, containers = conn.get_account() - if (len(containers) and 'name' in containers[0] and - containers[0]['name'] == container): - u.log.debug('OK') - else: - amulet.raise_status(amulet.FAIL, 'container not created {} {}' - ''.format(resp_headers, containers)) - - def test_403_swift_keystone_auth_v3(self): - if self._get_openstack_release() >= self.trusty_liberty: - self.test_403_swift_keystone_auth(api_version=3) - else: - u.log.debug('Skipping test for openstack_release < trusty_liberty') - - def test_498_radosgw_cmds_exit_zero(self): - """Check basic functionality of radosgw cli commands against - the ceph_radosgw unit.""" - sentry_units = [self.ceph_radosgw_sentry] - commands = [ - 'sudo radosgw-admin bucket list', - 'sudo radosgw-admin zone list', - 'sudo radosgw-admin metadata list', - 'sudo radosgw-admin gc list' - ] - - ret = u.check_commands_on_units(commands, sentry_units) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_499_ceph_cmds_exit_zero(self): - """Check basic functionality of ceph cli commands against - all ceph units.""" - sentry_units = [ - self.ceph_radosgw_sentry, - self.ceph0_sentry, - self.ceph1_sentry, - self.ceph2_sentry - ] - commands = [ - 'sudo ceph health', - 'sudo ceph mds stat', - 'sudo ceph pg stat', - 'sudo ceph osd stat', - 'sudo ceph mon stat', - ] - ret = u.check_commands_on_units(commands, sentry_units) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_910_pause_and_resume(self): - """The services can be paused and resumed. """ - u.log.debug('Checking pause and resume actions...') - unit = self.ceph_radosgw_sentry - - assert u.status_get(unit)[0] == "active" - - action_id = u.run_action(unit, "pause") - assert u.wait_on_action(action_id), "Pause action failed." - assert u.status_get(unit)[0] == "maintenance" - - action_id = u.run_action(unit, "resume") - assert u.wait_on_action(action_id), "Resume action failed." - assert u.status_get(unit)[0] == "active" - u.log.debug('OK') - - # FYI: No restart check as ceph services do not restart - # when charm config changes, unless monitor count increases. diff --git a/ceph-radosgw/tests/bundles/bionic-queens.yaml b/ceph-radosgw/tests/bundles/bionic-queens.yaml new file mode 100644 index 00000000..90652494 --- /dev/null +++ b/ceph-radosgw/tests/bundles/bionic-queens.yaml @@ -0,0 +1,42 @@ +options: + source: &source distro +series: bionic +applications: + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + series: bionic + options: + source: *source + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/bionic-rocky.yaml b/ceph-radosgw/tests/bundles/bionic-rocky.yaml new file mode 100644 index 00000000..aef5d5d2 --- /dev/null +++ b/ceph-radosgw/tests/bundles/bionic-rocky.yaml @@ -0,0 +1,42 @@ +options: + source: &source cloud:bionic-rocky +series: bionic +applications: + ceph-radosgw: + charm: ceph-radosgw + series: bionic + num_units: 1 + options: + source: *source + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/cosmic-rocky.yaml b/ceph-radosgw/tests/bundles/cosmic-rocky.yaml new file mode 100644 index 00000000..64016872 --- /dev/null +++ b/ceph-radosgw/tests/bundles/cosmic-rocky.yaml @@ -0,0 +1,42 @@ +options: + source: &source distro +series: cosmic +applications: + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + series: cosmic + options: + source: *source + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/trusty-icehouse.yaml b/ceph-radosgw/tests/bundles/trusty-icehouse.yaml new file mode 100644 index 00000000..e087ab56 --- /dev/null +++ b/ceph-radosgw/tests/bundles/trusty-icehouse.yaml @@ -0,0 +1,110 @@ +options: + source: &source distro +series: trusty +applications: + ceph-radosgw: + charm: ceph-radosgw + series: trusty + num_units: 1 + options: + source: *source + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *source + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *source + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: *source + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *source +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service +- - cinder-ceph:ceph-access + - nova-compute:ceph-access diff --git a/ceph-radosgw/tests/bundles/trusty-mitaka.yaml b/ceph-radosgw/tests/bundles/trusty-mitaka.yaml new file mode 100644 index 00000000..23f4d66d --- /dev/null +++ b/ceph-radosgw/tests/bundles/trusty-mitaka.yaml @@ -0,0 +1,42 @@ +options: + source: &source cloud:trusty-mitaka +series: trusty +applications: + ceph-radosgw: + charm: ceph-radosgw + series: trusty + num_units: 1 + options: + source: *source + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/xenial-mitaka.yaml b/ceph-radosgw/tests/bundles/xenial-mitaka.yaml new file mode 100644 index 00000000..63e1a11d --- /dev/null +++ b/ceph-radosgw/tests/bundles/xenial-mitaka.yaml @@ -0,0 +1,42 @@ +options: + source: &source distro +series: xenial +applications: + ceph-radosgw: + charm: ceph-radosgw + series: xenial + num_units: 1 + options: + source: *source + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/xenial-ocata.yaml b/ceph-radosgw/tests/bundles/xenial-ocata.yaml new file mode 100644 index 00000000..7da69b18 --- /dev/null +++ b/ceph-radosgw/tests/bundles/xenial-ocata.yaml @@ -0,0 +1,42 @@ +options: + source: &source cloud:xenial-ocata +series: xenial +applications: + ceph-radosgw: + charm: ceph-radosgw + series: xenial + num_units: 1 + options: + source: *source + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/xenial-pike.yaml b/ceph-radosgw/tests/bundles/xenial-pike.yaml new file mode 100644 index 00000000..27705325 --- /dev/null +++ b/ceph-radosgw/tests/bundles/xenial-pike.yaml @@ -0,0 +1,42 @@ +options: + source: &source cloud:xenial-pike +series: xenial +applications: + ceph-radosgw: + charm: ceph-radosgw + series: xenial + num_units: 1 + options: + source: *source + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/xenial-queens.yaml b/ceph-radosgw/tests/bundles/xenial-queens.yaml new file mode 100644 index 00000000..37368b77 --- /dev/null +++ b/ceph-radosgw/tests/bundles/xenial-queens.yaml @@ -0,0 +1,42 @@ +options: + source: &source cloud:xenial-queens +series: xenial +applications: + ceph-radosgw: + charm: ceph-radosgw + series: xenial + num_units: 1 + options: + source: *source + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/dev-basic-cosmic-rocky b/ceph-radosgw/tests/dev-basic-cosmic-rocky deleted file mode 100755 index dd93dacb..00000000 --- a/ceph-radosgw/tests/dev-basic-cosmic-rocky +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on cosmic-rocky.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='cosmic') - deployment.run_tests() diff --git a/ceph-radosgw/tests/gate-basic-bionic-queens b/ceph-radosgw/tests/gate-basic-bionic-queens deleted file mode 100755 index a1246845..00000000 --- a/ceph-radosgw/tests/gate-basic-bionic-queens +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on bionic-queens.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='bionic') - deployment.run_tests() diff --git a/ceph-radosgw/tests/gate-basic-bionic-rocky b/ceph-radosgw/tests/gate-basic-bionic-rocky deleted file mode 100755 index 8e758fba..00000000 --- a/ceph-radosgw/tests/gate-basic-bionic-rocky +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on bionic-rocky.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='bionic', - openstack='cloud:bionic-rocky', - source='cloud:bionic-updates/rocky') - deployment.run_tests() diff --git a/ceph-radosgw/tests/gate-basic-trusty-icehouse b/ceph-radosgw/tests/gate-basic-trusty-icehouse deleted file mode 100755 index 46066a7d..00000000 --- a/ceph-radosgw/tests/gate-basic-trusty-icehouse +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on trusty-icehouse.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='trusty') - deployment.run_tests() diff --git a/ceph-radosgw/tests/gate-basic-trusty-mitaka b/ceph-radosgw/tests/gate-basic-trusty-mitaka deleted file mode 100755 index 6f8e1484..00000000 --- a/ceph-radosgw/tests/gate-basic-trusty-mitaka +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on trusty-mitaka.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='trusty', - openstack='cloud:trusty-mitaka', - source='cloud:trusty-updates/mitaka') - deployment.run_tests() diff --git a/ceph-radosgw/tests/gate-basic-xenial-mitaka b/ceph-radosgw/tests/gate-basic-xenial-mitaka deleted file mode 100755 index aa5d2db2..00000000 --- a/ceph-radosgw/tests/gate-basic-xenial-mitaka +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on xenial-mitaka.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='xenial') - deployment.run_tests() diff --git a/ceph-radosgw/tests/gate-basic-xenial-ocata b/ceph-radosgw/tests/gate-basic-xenial-ocata deleted file mode 100755 index 9007db12..00000000 --- a/ceph-radosgw/tests/gate-basic-xenial-ocata +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on xenial-ocata.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='xenial', - openstack='cloud:xenial-ocata', - source='cloud:xenial-updates/ocata') - deployment.run_tests() diff --git a/ceph-radosgw/tests/gate-basic-xenial-pike b/ceph-radosgw/tests/gate-basic-xenial-pike deleted file mode 100755 index dbf86912..00000000 --- a/ceph-radosgw/tests/gate-basic-xenial-pike +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on xenial-pike.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='xenial', - openstack='cloud:xenial-pike', - source='cloud:xenial-updates/pike') - deployment.run_tests() diff --git a/ceph-radosgw/tests/gate-basic-xenial-queens b/ceph-radosgw/tests/gate-basic-xenial-queens deleted file mode 100755 index fff90006..00000000 --- a/ceph-radosgw/tests/gate-basic-xenial-queens +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph-radosgw deployment on xenial-queens.""" - -from basic_deployment import CephRadosGwBasicDeployment - -if __name__ == '__main__': - deployment = CephRadosGwBasicDeployment(series='xenial', - openstack='cloud:xenial-queens', - source='cloud:xenial-updates/queens') - deployment.run_tests() diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index a03e7bad..8231399e 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,18 +1,16 @@ -# Bootstrap the model if necessary. -bootstrap: True -# Re-use bootstrap node. -reset: True -# Use tox/requirements to drive the venv instead of bundletester's venv feature. -virtualenv: False -# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet. -makefile: [] -# Do not specify juju PPA sources. Juju is presumed to be pre-installed -# and configured in all test runner environments. -#sources: -# Do not specify or rely on system packages. -#packages: -# Do not specify python packages here. Use test-requirements.txt -# and tox instead. ie. The venv is constructed before bundletester -# is invoked. -#python-packages: -reset_timeout: 600 +charm_name: ceph-radosgw +gate_bundles: + - trusty-icehouse + - trusty-mitaka + - xenial-mitaka + - xenial-ocata + - xenial-pike + - xenial-queens + - bionic-queens +smoke_bundles: + - bionic-queens +dev_bundles: + - bionic-rocky + - cosmic-rocky +tests: + - zaza.charm_tests.ceph.tests.CephRGWTest diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 1b27a4af..004005fa 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -15,6 +15,7 @@ install_command = commands = stestr run --slowest {posargs} whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* +deps = -r{toxinidir}/test-requirements.txt [testenv:py27] basepython = python2.7 @@ -42,49 +43,20 @@ commands = flake8 {posargs} hooks unit_tests tests actions lib basepython = python3 commands = {posargs} -[testenv:func27-noop] -# DRY RUN - For Debug -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy - -[testenv:func27] -# Charm Functional Test -# Run all gate tests which are +x (expected to always pass) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy - -[testenv:func27-smoke] -# Charm Functional Test -# Run a specific test as an Amulet smoke test (expected to always pass) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy + functest-run-suite --keep-model -[testenv:func27-dfs] -# Charm Functional Test -# Run all deploy-from-source tests which are +x (may not always pass!) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func-smoke] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy + functest-run-suite --keep-model --smoke -[testenv:func27-dev] -# Charm Functional Test -# Run all development test targets which are +x (may not always pass!) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func-dev] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy + functest-run-suite --keep-model --dev [flake8] ignore = E402,E226 From a91bee7f01370e80bc0965a72eea0afaaf67e4f1 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Feb 2019 15:28:41 -0600 Subject: [PATCH 1657/2699] Update pre-install hooks to fail on error The pre-install operations may fail, yet that failure is not elevated to the user. This masks the failure and makes early package install issues difficult to troubleshoot. If the basic pre-install script fails, the charm should not proceed to later hooks as the requirements may not be met. Hashbangs for bash should specify -e (errexit) on all of the pre-install bash scripts. Change-Id: I6e015c2e0a28f9b990bfd7b84a2317d339abbb4e Closes-bug: #1815243 Partial-bug: #1815231 --- ceph-mon/hooks/install | 2 +- ceph-mon/hooks/install_deps | 2 +- ceph-mon/hooks/upgrade-charm | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/install b/ceph-mon/hooks/install index 9a2f9353..e8ad54b4 100755 --- a/ceph-mon/hooks/install +++ b/ceph-mon/hooks/install @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -e # ensure that the python3 bits are installed, whichever version of ubunut # is being installed. diff --git a/ceph-mon/hooks/install_deps b/ceph-mon/hooks/install_deps index bb600820..c480f29e 100755 --- a/ceph-mon/hooks/install_deps +++ b/ceph-mon/hooks/install_deps @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -e # Wrapper to ensure that python dependencies are installed before we get into # the python part of the hook execution diff --git a/ceph-mon/hooks/upgrade-charm b/ceph-mon/hooks/upgrade-charm index 6f3d75b4..a454f76f 100755 --- a/ceph-mon/hooks/upgrade-charm +++ b/ceph-mon/hooks/upgrade-charm @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -e # Wrapper to ensure that old python bytecode isn't hanging around # after we upgrade the charm with newer libraries rm -rf **/*.pyc From a30c5803eb7765e6dfb4a4d72d058650e4f200bb Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Feb 2019 15:10:21 -0600 Subject: [PATCH 1658/2699] Update pre-install hooks to fail on error The pre-install operations may fail, yet that failure is not elevated to the user. This masks the failure and makes early package install issues difficult to troubleshoot. If the basic pre-install script fails, the charm should not proceed to later hooks as the requirements may not be met. Hashbangs for bash should specify -e (errexit) on all of the pre-install bash scripts. Change-Id: Ie7b99dfa4cbe00a03acf1b2cc2eeecc7f84fbe17 Closes-bug: #1815243 Partial-bug: #1815231 --- ceph-radosgw/hooks/install | 2 +- ceph-radosgw/hooks/install_deps | 2 +- ceph-radosgw/hooks/upgrade-charm | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/hooks/install b/ceph-radosgw/hooks/install index 015c1435..e9027a88 100755 --- a/ceph-radosgw/hooks/install +++ b/ceph-radosgw/hooks/install @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -e # Wrapper to deal with newer Ubuntu versions that don't have py2 installed # by default. diff --git a/ceph-radosgw/hooks/install_deps b/ceph-radosgw/hooks/install_deps index 506d9d64..4d06619a 100755 --- a/ceph-radosgw/hooks/install_deps +++ b/ceph-radosgw/hooks/install_deps @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -e # Install required dependencies for charm runtime declare -a DEPS=('apt' 'netaddr' 'netifaces' 'yaml' 'jinja2' 'dnspython') diff --git a/ceph-radosgw/hooks/upgrade-charm b/ceph-radosgw/hooks/upgrade-charm index c1771bf0..14948004 100755 --- a/ceph-radosgw/hooks/upgrade-charm +++ b/ceph-radosgw/hooks/upgrade-charm @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -e # Re-install dependencies to deal with py2->py3 switch for charm ./hooks/install_deps From 154f648696720de3219b93d97ec47fe7df04981a Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Feb 2019 15:35:02 -0600 Subject: [PATCH 1659/2699] Update pre-install hooks to fail on error The pre-install operations may fail, yet that failure is not elevated to the user. This masks the failure and makes early package install issues difficult to troubleshoot. If the basic pre-install script fails, the charm should not proceed to later hooks as the requirements may not be met. Hashbangs for bash should specify -e (errexit) on all of the pre-install bash scripts. Change-Id: If6ddd58fc6aeb1e9bf359a037a85a2ce2ae05d3c Closes-bug: #1815243 Partial-bug: #1815231 --- ceph-osd/hooks/install | 2 +- ceph-osd/hooks/install_deps | 2 +- ceph-osd/hooks/upgrade-charm | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-osd/hooks/install b/ceph-osd/hooks/install index 96836422..8aded7f5 100755 --- a/ceph-osd/hooks/install +++ b/ceph-osd/hooks/install @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -e # Wrapper to deal with newer Ubuntu versions that don't have py2 installed # by default. diff --git a/ceph-osd/hooks/install_deps b/ceph-osd/hooks/install_deps index 3375e7a0..d0775edc 100755 --- a/ceph-osd/hooks/install_deps +++ b/ceph-osd/hooks/install_deps @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -e # Wrapper to ensure that python dependencies are installed before we get into # the python part of the hook execution diff --git a/ceph-osd/hooks/upgrade-charm b/ceph-osd/hooks/upgrade-charm index 71fc9ce7..dc22fdf4 100755 --- a/ceph-osd/hooks/upgrade-charm +++ b/ceph-osd/hooks/upgrade-charm @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -e # Wrapper to ensure that old python bytecode isn't hanging around # after we upgrade the charm with newer libraries find . -iname '*.pyc' -delete From c0d8e2b072b93c2ab771fb67b33c5d95b9d5690c Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 8 Feb 2019 15:37:15 -0600 Subject: [PATCH 1660/2699] Update pre-install hooks to fail on error The pre-install operations may fail, yet that failure is not elevated to the user. This masks the failure and makes early package install issues difficult to troubleshoot. If the basic pre-install script fails, the charm should not proceed to later hooks as the requirements may not be met. Hashbangs for bash should specify -e (errexit) on all of the pre-install bash scripts. Change-Id: I97e2d9fa092fd6af5f426977cf99ab735e520496 Closes-bug: #1815243 Partial-bug: #1815231 --- ceph-proxy/hooks/install | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/hooks/install b/ceph-proxy/hooks/install index 29ff6894..86d48855 100755 --- a/ceph-proxy/hooks/install +++ b/ceph-proxy/hooks/install @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -e # Wrapper to deal with newer Ubuntu versions that don't have py2 installed # by default. From 79e08083de41df75911f8452cf3bb0d0dc403da2 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 12 Feb 2019 11:33:38 +0000 Subject: [PATCH 1661/2699] Add support for radosgw upgrades Sync charms.ceph and use helper functions to determine whether any changes in the source configuration option are a supported upgrade path. If an upgrade path is detected then upgrade via apt_install with the full list of required packages for the radosgw to force an upgrade. Change-Id: I48a8b5d14ad6ac11af57ddf0260a4a41744e7e21 Closes-Bug: 1539335 --- ceph-radosgw/.pydevproject | 17 +- ceph-radosgw/Makefile | 9 +- ceph-radosgw/hooks/{ceph.py => ceph_rgw.py} | 0 ceph-radosgw/hooks/hooks.py | 46 +- ceph-radosgw/hooks/install_deps | 2 +- ceph-radosgw/lib/.keep | 3 - ceph-radosgw/lib/ceph/__init__.py | 0 ceph-radosgw/lib/ceph/broker.py | 872 ++++++ ceph-radosgw/lib/ceph/crush_utils.py | 154 ++ ceph-radosgw/lib/ceph/utils.py | 2729 +++++++++++++++++++ ceph-radosgw/unit_tests/test_ceph.py | 2 +- ceph-radosgw/unit_tests/test_hooks.py | 68 +- 12 files changed, 3878 insertions(+), 24 deletions(-) rename ceph-radosgw/hooks/{ceph.py => ceph_rgw.py} (100%) delete mode 100644 ceph-radosgw/lib/.keep create mode 100644 ceph-radosgw/lib/ceph/__init__.py create mode 100644 ceph-radosgw/lib/ceph/broker.py create mode 100644 ceph-radosgw/lib/ceph/crush_utils.py create mode 100644 ceph-radosgw/lib/ceph/utils.py diff --git a/ceph-radosgw/.pydevproject b/ceph-radosgw/.pydevproject index 98cc65d3..03181631 100644 --- a/ceph-radosgw/.pydevproject +++ b/ceph-radosgw/.pydevproject @@ -1,8 +1,15 @@ -python 2.7 -Default - -/ceph-radosgw/hooks - + + python 2.7 + + Default + + + /${PROJECT_DIR_NAME}/lib + /${PROJECT_DIR_NAME}/hooks + /${PROJECT_DIR_NAME}/unit_tests + /${PROJECT_DIR_NAME}/actions + + diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index 6813bb22..a0ab412e 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -16,9 +16,12 @@ bin/charm_helpers_sync.py: @mkdir -p bin @curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py +bin/git_sync.py: + @mkdir -p bin + @wget -O bin/git_sync.py https://raw.githubusercontent.com/CanonicalLtd/git-sync/master/git_sync.py + sync: bin/charm_helpers_sync.py @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml -publish: lint test - bzr push lp:charms/ceph-radosgw - bzr push lp:charms/trusty/ceph-radosgw +ceph-sync: bin/git_sync.py + $(PYTHON) bin/git_sync.py -d lib -s https://github.com/openstack/charms.ceph.git diff --git a/ceph-radosgw/hooks/ceph.py b/ceph-radosgw/hooks/ceph_rgw.py similarity index 100% rename from ceph-radosgw/hooks/ceph.py rename to ceph-radosgw/hooks/ceph_rgw.py diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index b1d79d84..77f42263 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -19,7 +19,10 @@ import sys import socket -import ceph +sys.path.append('lib') + +import ceph_rgw as ceph +import ceph.utils as ceph_utils from charmhelpers.core.hookenv import ( relation_get, @@ -39,6 +42,7 @@ apt_purge, add_source, filter_installed_packages, + filter_missing_packages, ) from charmhelpers.payload.execd import execd_preinstall from charmhelpers.core.host import ( @@ -115,16 +119,45 @@ ] +def upgrade_available(): + """Check for upgrade for ceph + + :returns: whether an upgrade is available + :rtype: boolean + """ + c = config() + old_version = ceph_utils.resolve_ceph_version(c.previous('source') or + 'distro') + new_version = ceph_utils.resolve_ceph_version(c.get('source')) + if (old_version in ceph_utils.UPGRADE_PATHS and + new_version == ceph_utils.UPGRADE_PATHS[old_version]): + return True + return False + + def install_packages(): - add_source(config('source'), config('key')) - apt_update(fatal=True) + c = config() + if c.changed('source') or c.changed('key'): + add_source(c.get('source'), c.get('key')) + apt_update(fatal=True) + if is_container(): PACKAGES.remove('ntp') - pkgs = filter_installed_packages(PACKAGES) + + # NOTE: just use full package list if we're in an upgrade + # config-changed execution + pkgs = ( + PACKAGES if upgrade_available() else + filter_installed_packages(PACKAGES) + ) if pkgs: status_set('maintenance', 'Installing radosgw packages') - apt_install(PACKAGES, fatal=True) - apt_purge(APACHE_PACKAGES) + apt_install(pkgs, fatal=True) + + pkgs = filter_missing_packages(APACHE_PACKAGES) + if pkgs: + apt_purge(pkgs) + disable_unused_apache_sites() @@ -153,7 +186,6 @@ def _config_changed(): return install_packages() - disable_unused_apache_sites() if config('prefer-ipv6'): status_set('maintenance', 'configuring ipv6') diff --git a/ceph-radosgw/hooks/install_deps b/ceph-radosgw/hooks/install_deps index 4d06619a..0f116166 100755 --- a/ceph-radosgw/hooks/install_deps +++ b/ceph-radosgw/hooks/install_deps @@ -1,7 +1,7 @@ #!/bin/bash -e # Install required dependencies for charm runtime -declare -a DEPS=('apt' 'netaddr' 'netifaces' 'yaml' 'jinja2' 'dnspython') +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'yaml' 'jinja2' 'dnspython' 'pyudev') check_and_install() { pkg="${1}-${2}" diff --git a/ceph-radosgw/lib/.keep b/ceph-radosgw/lib/.keep deleted file mode 100644 index f49b91ae..00000000 --- a/ceph-radosgw/lib/.keep +++ /dev/null @@ -1,3 +0,0 @@ - This file was created by release-tools to ensure that this empty - directory is preserved in vcs re: lint check definitions in global - tox.ini files. This file can be removed if/when this dir is actually in use. diff --git a/ceph-radosgw/lib/ceph/__init__.py b/ceph-radosgw/lib/ceph/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-radosgw/lib/ceph/broker.py b/ceph-radosgw/lib/ceph/broker.py new file mode 100644 index 00000000..3e857d21 --- /dev/null +++ b/ceph-radosgw/lib/ceph/broker.py @@ -0,0 +1,872 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import json +import os + +from tempfile import NamedTemporaryFile + +from ceph.utils import ( + get_cephfs, + get_osd_weight +) +from ceph.crush_utils import Crushmap + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + ERROR, +) +from charmhelpers.contrib.storage.linux.ceph import ( + create_erasure_profile, + delete_pool, + erasure_profile_exists, + get_osds, + monitor_key_get, + monitor_key_set, + pool_exists, + pool_set, + remove_pool_snapshot, + rename_pool, + set_pool_quota, + snapshot_pool, + validator, + ErasurePool, + Pool, + ReplicatedPool, +) + +# This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ +# This should do a decent job of preventing people from passing in bad values. +# It will give a useful error message +from subprocess import check_call, check_output, CalledProcessError + +POOL_KEYS = { + # "Ceph Key Name": [Python type, [Valid Range]] + "size": [int], + "min_size": [int], + "crash_replay_interval": [int], + "pgp_num": [int], # = or < pg_num + "crush_ruleset": [int], + "hashpspool": [bool], + "nodelete": [bool], + "nopgchange": [bool], + "nosizechange": [bool], + "write_fadvise_dontneed": [bool], + "noscrub": [bool], + "nodeep-scrub": [bool], + "hit_set_type": [str, ["bloom", "explicit_hash", + "explicit_object"]], + "hit_set_count": [int, [1, 1]], + "hit_set_period": [int], + "hit_set_fpp": [float, [0.0, 1.0]], + "cache_target_dirty_ratio": [float], + "cache_target_dirty_high_ratio": [float], + "cache_target_full_ratio": [float], + "target_max_bytes": [int], + "target_max_objects": [int], + "cache_min_flush_age": [int], + "cache_min_evict_age": [int], + "fast_read": [bool], + "allow_ec_overwrites": [bool], + "compression_mode": [str, ["none", "passive", "aggressive", "force"]], + "compression_algorithm": [str, ["lz4", "snappy", "zlib", "zstd"]], + "compression_required_ratio": [float, [0.0, 1.0]], +} + +CEPH_BUCKET_TYPES = [ + 'osd', + 'host', + 'chassis', + 'rack', + 'row', + 'pdu', + 'pod', + 'room', + 'datacenter', + 'region', + 'root' +] + + +def decode_req_encode_rsp(f): + """Decorator to decode incoming requests and encode responses.""" + + def decode_inner(req): + return json.dumps(f(json.loads(req))) + + return decode_inner + + +@decode_req_encode_rsp +def process_requests(reqs): + """Process Ceph broker request(s). + + This is a versioned api. API version must be supplied by the client making + the request. + + :param reqs: dict of request parameters. + :returns: dict. exit-code and reason if not 0 + """ + request_id = reqs.get('request-id') + try: + version = reqs.get('api-version') + if version == 1: + log('Processing request {}'.format(request_id), level=DEBUG) + resp = process_requests_v1(reqs['ops']) + if request_id: + resp['request-id'] = request_id + + return resp + + except Exception as exc: + log(str(exc), level=ERROR) + msg = ("Unexpected error occurred while processing requests: %s" % + reqs) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + msg = ("Missing or invalid api version ({})".format(version)) + resp = {'exit-code': 1, 'stderr': msg} + if request_id: + resp['request-id'] = request_id + + return resp + + +def handle_create_erasure_profile(request, service): + """Create an erasure profile. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + # "local" | "shec" or it defaults to "jerasure" + erasure_type = request.get('erasure-type') + # "host" | "rack" or it defaults to "host" # Any valid Ceph bucket + failure_domain = request.get('failure-domain') + name = request.get('name') + k = request.get('k') + m = request.get('m') + l = request.get('l') + + if failure_domain not in CEPH_BUCKET_TYPES: + msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + create_erasure_profile(service=service, erasure_plugin_name=erasure_type, + profile_name=name, failure_domain=failure_domain, + data_chunks=k, coding_chunks=m, locality=l) + + +def handle_add_permissions_to_key(request, service): + """Groups are defined by the key cephx.groups.(namespace-)?-(name). This + key will contain a dict serialized to JSON with data about the group, + including pools and members. + + A group can optionally have a namespace defined that will be used to + further restrict pool access. + """ + resp = {'exit-code': 0} + + service_name = request.get('name') + group_name = request.get('group') + group_namespace = request.get('group-namespace') + if group_namespace: + group_name = "{}-{}".format(group_namespace, group_name) + group = get_group(group_name=group_name) + service_obj = get_service_groups(service=service_name, + namespace=group_namespace) + if request.get('object-prefix-permissions'): + service_obj['object_prefix_perms'] = request.get( + 'object-prefix-permissions') + format("Service object: {}".format(service_obj)) + permission = request.get('group-permission') or "rwx" + if service_name not in group['services']: + group['services'].append(service_name) + save_group(group=group, group_name=group_name) + if permission not in service_obj['group_names']: + service_obj['group_names'][permission] = [] + if group_name not in service_obj['group_names'][permission]: + service_obj['group_names'][permission].append(group_name) + save_service(service=service_obj, service_name=service_name) + service_obj['groups'] = _build_service_groups(service_obj, + group_namespace) + update_service_permissions(service_name, service_obj, group_namespace) + + return resp + + +def update_service_permissions(service, service_obj=None, namespace=None): + """Update the key permissions for the named client in Ceph""" + if not service_obj: + service_obj = get_service_groups(service=service, namespace=namespace) + permissions = pool_permission_list_for_service(service_obj) + call = ['ceph', 'auth', 'caps', 'client.{}'.format(service)] + permissions + try: + check_call(call) + except CalledProcessError as e: + log("Error updating key capabilities: {}".format(e)) + + +def add_pool_to_group(pool, group, namespace=None): + """Add a named pool to a named group""" + group_name = group + if namespace: + group_name = "{}-{}".format(namespace, group_name) + group = get_group(group_name=group_name) + if pool not in group['pools']: + group["pools"].append(pool) + save_group(group, group_name=group_name) + for service in group['services']: + update_service_permissions(service, namespace=namespace) + + +def pool_permission_list_for_service(service): + """Build the permission string for Ceph for a given service""" + permissions = [] + permission_types = collections.OrderedDict() + for permission, group in sorted(service["group_names"].items()): + if permission not in permission_types: + permission_types[permission] = [] + for item in group: + permission_types[permission].append(item) + for permission, groups in permission_types.items(): + permission = "allow {}".format(permission) + for group in groups: + for pool in service['groups'][group].get('pools', []): + permissions.append("{} pool={}".format(permission, pool)) + for permission, prefixes in sorted( + service.get("object_prefix_perms", {}).items()): + for prefix in prefixes: + permissions.append("allow {} object_prefix {}".format(permission, + prefix)) + return ['mon', 'allow r, allow command "osd blacklist"', + 'osd', ', '.join(permissions)] + + +def get_service_groups(service, namespace=None): + """Services are objects stored with some metadata, they look like (for a + service named "nova"): + { + group_names: {'rwx': ['images']}, + groups: {} + } + After populating the group, it looks like: + { + group_names: {'rwx': ['images']}, + groups: { + 'images': { + pools: ['glance'], + services: ['nova'] + } + } + } + """ + service_json = monitor_key_get(service='admin', + key="cephx.services.{}".format(service)) + try: + service = json.loads(service_json) + except (TypeError, ValueError): + service = None + if service: + service['groups'] = _build_service_groups(service, namespace) + else: + service = {'group_names': {}, 'groups': {}} + return service + + +def _build_service_groups(service, namespace=None): + """Rebuild the 'groups' dict for a service group + + :returns: dict: dictionary keyed by group name of the following + format: + + { + 'images': { + pools: ['glance'], + services: ['nova', 'glance] + }, + 'vms':{ + pools: ['nova'], + services: ['nova'] + } + } + """ + all_groups = {} + for groups in service['group_names'].values(): + for group in groups: + name = group + if namespace: + name = "{}-{}".format(namespace, name) + all_groups[group] = get_group(group_name=name) + return all_groups + + +def get_group(group_name): + """A group is a structure to hold data about a named group, structured as: + { + pools: ['glance'], + services: ['nova'] + } + """ + group_key = get_group_key(group_name=group_name) + group_json = monitor_key_get(service='admin', key=group_key) + try: + group = json.loads(group_json) + except (TypeError, ValueError): + group = None + if not group: + group = { + 'pools': [], + 'services': [] + } + return group + + +def save_service(service_name, service): + """Persist a service in the monitor cluster""" + service['groups'] = {} + return monitor_key_set(service='admin', + key="cephx.services.{}".format(service_name), + value=json.dumps(service, sort_keys=True)) + + +def save_group(group, group_name): + """Persist a group in the monitor cluster""" + group_key = get_group_key(group_name=group_name) + return monitor_key_set(service='admin', + key=group_key, + value=json.dumps(group, sort_keys=True)) + + +def get_group_key(group_name): + """Build group key""" + return 'cephx.groups.{}'.format(group_name) + + +def handle_erasure_pool(request, service): + """Create a new erasure coded pool. + + :param request: dict of request operations and params. + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0. + """ + pool_name = request.get('name') + erasure_profile = request.get('erasure-profile') + quota = request.get('max-bytes') + weight = request.get('weight') + group_name = request.get('group') + + if erasure_profile is None: + erasure_profile = "default-canonical" + + app_name = request.get('app-name') + + # Check for missing params + if pool_name is None: + msg = "Missing parameter. name is required for the pool" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + if group_name: + group_namespace = request.get('group-namespace') + # Add the pool to the group named "group_name" + add_pool_to_group(pool=pool_name, + group=group_name, + namespace=group_namespace) + + # TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds + if not erasure_profile_exists(service=service, name=erasure_profile): + # TODO: Fail and tell them to create the profile or default + msg = ("erasure-profile {} does not exist. Please create it with: " + "create-erasure-profile".format(erasure_profile)) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + pool = ErasurePool(service=service, name=pool_name, + erasure_code_profile=erasure_profile, + percent_data=weight, app_name=app_name) + # Ok make the erasure pool + if not pool_exists(service=service, name=pool_name): + log("Creating pool '{}' (erasure_profile={})" + .format(pool.name, erasure_profile), level=INFO) + pool.create() + + # Set a quota if requested + if quota is not None: + set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + + +def handle_replicated_pool(request, service): + """Create a new replicated pool. + + :param request: dict of request operations and params. + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0. + """ + pool_name = request.get('name') + replicas = request.get('replicas') + quota = request.get('max-bytes') + weight = request.get('weight') + group_name = request.get('group') + + # Optional params + pg_num = request.get('pg_num') + if pg_num: + # Cap pg_num to max allowed just in case. + osds = get_osds(service) + if osds: + pg_num = min(pg_num, (len(osds) * 100 // replicas)) + + app_name = request.get('app-name') + # Check for missing params + if pool_name is None or replicas is None: + msg = "Missing parameter. name and replicas are required" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + if group_name: + group_namespace = request.get('group-namespace') + # Add the pool to the group named "group_name" + add_pool_to_group(pool=pool_name, + group=group_name, + namespace=group_namespace) + + kwargs = {} + if pg_num: + kwargs['pg_num'] = pg_num + if weight: + kwargs['percent_data'] = weight + if replicas: + kwargs['replicas'] = replicas + if app_name: + kwargs['app_name'] = app_name + + pool = ReplicatedPool(service=service, + name=pool_name, **kwargs) + if not pool_exists(service=service, name=pool_name): + log("Creating pool '{}' (replicas={})".format(pool.name, replicas), + level=INFO) + pool.create() + else: + log("Pool '{}' already exists - skipping create".format(pool.name), + level=DEBUG) + + # Set a quota if requested + if quota is not None: + set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + + +def handle_create_cache_tier(request, service): + """Create a cache tier on a cold pool. Modes supported are + "writeback" and "readonly". + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + # mode = "writeback" | "readonly" + storage_pool = request.get('cold-pool') + cache_pool = request.get('hot-pool') + cache_mode = request.get('mode') + + if cache_mode is None: + cache_mode = "writeback" + + # cache and storage pool must exist first + if not pool_exists(service=service, name=storage_pool) or not pool_exists( + service=service, name=cache_pool): + msg = ("cold-pool: {} and hot-pool: {} must exist. Please create " + "them first".format(storage_pool, cache_pool)) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + p = Pool(service=service, name=storage_pool) + p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) + + +def handle_remove_cache_tier(request, service): + """Remove a cache tier from the cold pool. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + storage_pool = request.get('cold-pool') + cache_pool = request.get('hot-pool') + # cache and storage pool must exist first + if not pool_exists(service=service, name=storage_pool) or not pool_exists( + service=service, name=cache_pool): + msg = ("cold-pool: {} or hot-pool: {} doesn't exist. Not " + "deleting cache tier".format(storage_pool, cache_pool)) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + pool = Pool(name=storage_pool, service=service) + pool.remove_cache_tier(cache_pool=cache_pool) + + +def handle_set_pool_value(request, service): + """Sets an arbitrary pool value. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + # Set arbitrary pool values + params = {'pool': request.get('name'), + 'key': request.get('key'), + 'value': request.get('value')} + if params['key'] not in POOL_KEYS: + msg = "Invalid key '{}'".format(params['key']) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Get the validation method + validator_params = POOL_KEYS[params['key']] + if len(validator_params) is 1: + # Validate that what the user passed is actually legal per Ceph's rules + validator(params['value'], validator_params[0]) + else: + # Validate that what the user passed is actually legal per Ceph's rules + validator(params['value'], validator_params[0], validator_params[1]) + + # Set the value + pool_set(service=service, pool_name=params['pool'], key=params['key'], + value=params['value']) + + +def handle_rgw_regionmap_update(request, service): + """Change the radosgw region map. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + name = request.get('client-name') + if not name: + msg = "Missing rgw-region or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + try: + check_output(['radosgw-admin', + '--id', service, + 'regionmap', 'update', '--name', name]) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + + +def handle_rgw_regionmap_default(request, service): + """Create a radosgw region map. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + region = request.get('rgw-region') + name = request.get('client-name') + if not region or not name: + msg = "Missing rgw-region or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + try: + check_output( + [ + 'radosgw-admin', + '--id', service, + 'regionmap', + 'default', + '--rgw-region', region, + '--name', name]) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + + +def handle_rgw_zone_set(request, service): + """Create a radosgw zone. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + json_file = request.get('zone-json') + name = request.get('client-name') + region_name = request.get('region-name') + zone_name = request.get('zone-name') + if not json_file or not name or not region_name or not zone_name: + msg = "Missing json-file or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + infile = NamedTemporaryFile(delete=False) + with open(infile.name, 'w') as infile_handle: + infile_handle.write(json_file) + try: + check_output( + [ + 'radosgw-admin', + '--id', service, + 'zone', + 'set', + '--rgw-zone', zone_name, + '--infile', infile.name, + '--name', name, + ] + ) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + os.unlink(infile.name) + + +def handle_put_osd_in_bucket(request, service): + """Move an osd into a specified crush bucket. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + osd_id = request.get('osd') + target_bucket = request.get('bucket') + if not osd_id or not target_bucket: + msg = "Missing OSD ID or Bucket" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + crushmap = Crushmap() + try: + crushmap.ensure_bucket_is_present(target_bucket) + check_output( + [ + 'ceph', + '--id', service, + 'osd', + 'crush', + 'set', + str(osd_id), + str(get_osd_weight(osd_id)), + "root={}".format(target_bucket) + ] + ) + + except Exception as exc: + msg = "Failed to move OSD " \ + "{} into Bucket {} :: {}".format(osd_id, target_bucket, exc) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + +def handle_rgw_create_user(request, service): + """Create a new rados gateway user. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + user_id = request.get('rgw-uid') + display_name = request.get('display-name') + name = request.get('client-name') + if not name or not display_name or not user_id: + msg = "Missing client-name, display-name or rgw-uid" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + try: + create_output = check_output( + [ + 'radosgw-admin', + '--id', service, + 'user', + 'create', + '--uid', user_id, + '--display-name', display_name, + '--name', name, + '--system' + ] + ) + try: + user_json = json.loads(str(create_output.decode('UTF-8'))) + return {'exit-code': 0, 'user': user_json} + except ValueError as err: + log(err, level=ERROR) + return {'exit-code': 1, 'stderr': err} + + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + + +def handle_create_cephfs(request, service): + """Create a new cephfs. + + :param request: The broker request + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + cephfs_name = request.get('mds_name') + data_pool = request.get('data_pool') + metadata_pool = request.get('metadata_pool') + # Check if the user params were provided + if not cephfs_name or not data_pool or not metadata_pool: + msg = "Missing mds_name, data_pool or metadata_pool params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Sanity check that the required pools exist + if not pool_exists(service=service, name=data_pool): + msg = "CephFS data pool does not exist. Cannot create CephFS" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + if not pool_exists(service=service, name=metadata_pool): + msg = "CephFS metadata pool does not exist. Cannot create CephFS" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + if get_cephfs(service=service): + # CephFS new has already been called + log("CephFS already created") + return + + # Finally create CephFS + try: + check_output(["ceph", + '--id', service, + "fs", "new", cephfs_name, + metadata_pool, + data_pool]) + except CalledProcessError as err: + if err.returncode == 22: + log("CephFS already created") + return + else: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + + +def handle_rgw_region_set(request, service): + # radosgw-admin region set --infile us.json --name client.radosgw.us-east-1 + """Set the rados gateway region. + + :param request: dict. The broker request. + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + json_file = request.get('region-json') + name = request.get('client-name') + region_name = request.get('region-name') + zone_name = request.get('zone-name') + if not json_file or not name or not region_name or not zone_name: + msg = "Missing json-file or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + infile = NamedTemporaryFile(delete=False) + with open(infile.name, 'w') as infile_handle: + infile_handle.write(json_file) + try: + check_output( + [ + 'radosgw-admin', + '--id', service, + 'region', + 'set', + '--rgw-zone', zone_name, + '--infile', infile.name, + '--name', name, + ] + ) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + os.unlink(infile.name) + + +def process_requests_v1(reqs): + """Process v1 requests. + + Takes a list of requests (dicts) and processes each one. If an error is + found, processing stops and the client is notified in the response. + + Returns a response dict containing the exit code (non-zero if any + operation failed along with an explanation). + """ + ret = None + log("Processing {} ceph broker requests".format(len(reqs)), level=INFO) + for req in reqs: + op = req.get('op') + log("Processing op='{}'".format(op), level=DEBUG) + # Use admin client since we do not have other client key locations + # setup to use them for these operations. + svc = 'admin' + if op == "create-pool": + pool_type = req.get('pool-type') # "replicated" | "erasure" + + # Default to replicated if pool_type isn't given + if pool_type == 'erasure': + ret = handle_erasure_pool(request=req, service=svc) + else: + ret = handle_replicated_pool(request=req, service=svc) + elif op == "create-cephfs": + ret = handle_create_cephfs(request=req, service=svc) + elif op == "create-cache-tier": + ret = handle_create_cache_tier(request=req, service=svc) + elif op == "remove-cache-tier": + ret = handle_remove_cache_tier(request=req, service=svc) + elif op == "create-erasure-profile": + ret = handle_create_erasure_profile(request=req, service=svc) + elif op == "delete-pool": + pool = req.get('name') + ret = delete_pool(service=svc, name=pool) + elif op == "rename-pool": + old_name = req.get('name') + new_name = req.get('new-name') + ret = rename_pool(service=svc, old_name=old_name, + new_name=new_name) + elif op == "snapshot-pool": + pool = req.get('name') + snapshot_name = req.get('snapshot-name') + ret = snapshot_pool(service=svc, pool_name=pool, + snapshot_name=snapshot_name) + elif op == "remove-pool-snapshot": + pool = req.get('name') + snapshot_name = req.get('snapshot-name') + ret = remove_pool_snapshot(service=svc, pool_name=pool, + snapshot_name=snapshot_name) + elif op == "set-pool-value": + ret = handle_set_pool_value(request=req, service=svc) + elif op == "rgw-region-set": + ret = handle_rgw_region_set(request=req, service=svc) + elif op == "rgw-zone-set": + ret = handle_rgw_zone_set(request=req, service=svc) + elif op == "rgw-regionmap-update": + ret = handle_rgw_regionmap_update(request=req, service=svc) + elif op == "rgw-regionmap-default": + ret = handle_rgw_regionmap_default(request=req, service=svc) + elif op == "rgw-create-user": + ret = handle_rgw_create_user(request=req, service=svc) + elif op == "move-osd-to-bucket": + ret = handle_put_osd_in_bucket(request=req, service=svc) + elif op == "add-permissions-to-key": + ret = handle_add_permissions_to_key(request=req, service=svc) + else: + msg = "Unknown operation '{}'".format(op) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + if type(ret) == dict and 'exit-code' in ret: + return ret + + return {'exit-code': 0} diff --git a/ceph-radosgw/lib/ceph/crush_utils.py b/ceph-radosgw/lib/ceph/crush_utils.py new file mode 100644 index 00000000..8b6876c1 --- /dev/null +++ b/ceph-radosgw/lib/ceph/crush_utils.py @@ -0,0 +1,154 @@ +# Copyright 2014 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +from subprocess import check_output, CalledProcessError + +from charmhelpers.core.hookenv import ( + log, + ERROR, +) + +CRUSH_BUCKET = """root {name} {{ + id {id} # do not change unnecessarily + # weight 0.000 + alg straw + hash 0 # rjenkins1 +}} + +rule {name} {{ + ruleset 0 + type replicated + min_size 1 + max_size 10 + step take {name} + step chooseleaf firstn 0 type host + step emit +}}""" + +# This regular expression looks for a string like: +# root NAME { +# id NUMBER +# so that we can extract NAME and ID from the crushmap +CRUSHMAP_BUCKETS_RE = re.compile(r"root\s+(.+)\s+\{\s*id\s+(-?\d+)") + +# This regular expression looks for ID strings in the crushmap like: +# id NUMBER +# so that we can extract the IDs from a crushmap +CRUSHMAP_ID_RE = re.compile(r"id\s+(-?\d+)") + + +class Crushmap(object): + """An object oriented approach to Ceph crushmap management.""" + + def __init__(self): + self._crushmap = self.load_crushmap() + roots = re.findall(CRUSHMAP_BUCKETS_RE, self._crushmap) + buckets = [] + ids = list(map( + lambda x: int(x), + re.findall(CRUSHMAP_ID_RE, self._crushmap))) + ids = sorted(ids) + if roots != []: + for root in roots: + buckets.append(CRUSHBucket(root[0], root[1], True)) + + self._buckets = buckets + if ids != []: + self._ids = ids + else: + self._ids = [0] + + def load_crushmap(self): + try: + crush = str(check_output(['ceph', 'osd', 'getcrushmap']) + .decode('UTF-8')) + return str(check_output(['crushtool', '-d', '-'], + stdin=crush.stdout) + .decode('UTF-8')) + except CalledProcessError as e: + log("Error occured while loading and decompiling CRUSH map:" + "{}".format(e), ERROR) + raise "Failed to read CRUSH map" + + def ensure_bucket_is_present(self, bucket_name): + if bucket_name not in [bucket.name for bucket in self.buckets()]: + self.add_bucket(bucket_name) + self.save() + + def buckets(self): + """Return a list of buckets that are in the Crushmap.""" + return self._buckets + + def add_bucket(self, bucket_name): + """Add a named bucket to Ceph""" + new_id = min(self._ids) - 1 + self._ids.append(new_id) + self._buckets.append(CRUSHBucket(bucket_name, new_id)) + + def save(self): + """Persist Crushmap to Ceph""" + try: + crushmap = self.build_crushmap() + compiled = str(check_output(['crushtool', '-c', '/dev/stdin', '-o', + '/dev/stdout'], stdin=crushmap) + .decode('UTF-8')) + ceph_output = str(check_output(['ceph', 'osd', 'setcrushmap', '-i', + '/dev/stdin'], stdin=compiled) + .decode('UTF-8')) + return ceph_output + except CalledProcessError as e: + log("save error: {}".format(e)) + raise "Failed to save CRUSH map." + + def build_crushmap(self): + """Modifies the current CRUSH map to include the new buckets""" + tmp_crushmap = self._crushmap + for bucket in self._buckets: + if not bucket.default: + tmp_crushmap = "{}\n\n{}".format( + tmp_crushmap, + Crushmap.bucket_string(bucket.name, bucket.id)) + + return tmp_crushmap + + @staticmethod + def bucket_string(name, id): + return CRUSH_BUCKET.format(name=name, id=id) + + +class CRUSHBucket(object): + """CRUSH bucket description object.""" + + def __init__(self, name, id, default=False): + self.name = name + self.id = int(id) + self.default = default + + def __repr__(self): + return "Bucket {{Name: {name}, ID: {id}}}".format( + name=self.name, id=self.id) + + def __eq__(self, other): + """Override the default Equals behavior""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return NotImplemented + + def __ne__(self, other): + """Define a non-equality test""" + if isinstance(other, self.__class__): + return not self.__eq__(other) + return NotImplemented diff --git a/ceph-radosgw/lib/ceph/utils.py b/ceph-radosgw/lib/ceph/utils.py new file mode 100644 index 00000000..98320acb --- /dev/null +++ b/ceph-radosgw/lib/ceph/utils.py @@ -0,0 +1,2729 @@ +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import glob +import json +import os +import pyudev +import random +import re +import socket +import subprocess +import sys +import time +import uuid + +from datetime import datetime + +from charmhelpers.core import hookenv +from charmhelpers.core import templating +from charmhelpers.core.decorators import retry_on_exception +from charmhelpers.core.host import ( + chownr, + cmp_pkgrevno, + lsb_release, + mkdir, + owner, + service_restart, + service_start, + service_stop, + CompareHostReleases, +) +from charmhelpers.core.hookenv import ( + cached, + config, + log, + status_set, + DEBUG, + ERROR, + WARNING, + storage_get, + storage_list, +) +from charmhelpers.fetch import ( + apt_cache, + add_source, apt_install, apt_update +) +from charmhelpers.contrib.storage.linux.ceph import ( + get_mon_map, + monitor_key_set, + monitor_key_exists, + monitor_key_get, +) +from charmhelpers.contrib.storage.linux.utils import ( + is_block_device, + is_device_mounted, +) +from charmhelpers.contrib.openstack.utils import ( + get_os_codename_install_source, +) +from charmhelpers.contrib.storage.linux import lvm +from charmhelpers.core.unitdata import kv + +CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph') +OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd') +HDPARM_FILE = os.path.join(os.sep, 'etc', 'hdparm.conf') + +LEADER = 'leader' +PEON = 'peon' +QUORUM = [LEADER, PEON] + +PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', + 'radosgw', 'xfsprogs', + 'lvm2', 'parted'] + +CEPH_KEY_MANAGER = 'ceph' +VAULT_KEY_MANAGER = 'vault' +KEY_MANAGERS = [ + CEPH_KEY_MANAGER, + VAULT_KEY_MANAGER, +] + +LinkSpeed = { + "BASE_10": 10, + "BASE_100": 100, + "BASE_1000": 1000, + "GBASE_10": 10000, + "GBASE_40": 40000, + "GBASE_100": 100000, + "UNKNOWN": None +} + +# Mapping of adapter speed to sysctl settings +NETWORK_ADAPTER_SYSCTLS = { + # 10Gb + LinkSpeed["GBASE_10"]: { + 'net.core.rmem_default': 524287, + 'net.core.wmem_default': 524287, + 'net.core.rmem_max': 524287, + 'net.core.wmem_max': 524287, + 'net.core.optmem_max': 524287, + 'net.core.netdev_max_backlog': 300000, + 'net.ipv4.tcp_rmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_wmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_mem': '10000000 10000000 10000000' + }, + # Mellanox 10/40Gb + LinkSpeed["GBASE_40"]: { + 'net.ipv4.tcp_timestamps': 0, + 'net.ipv4.tcp_sack': 1, + 'net.core.netdev_max_backlog': 250000, + 'net.core.rmem_max': 4194304, + 'net.core.wmem_max': 4194304, + 'net.core.rmem_default': 4194304, + 'net.core.wmem_default': 4194304, + 'net.core.optmem_max': 4194304, + 'net.ipv4.tcp_rmem': '4096 87380 4194304', + 'net.ipv4.tcp_wmem': '4096 65536 4194304', + 'net.ipv4.tcp_low_latency': 1, + 'net.ipv4.tcp_adv_win_scale': 1 + } +} + + +class Partition(object): + def __init__(self, name, number, size, start, end, sectors, uuid): + """A block device partition. + + :param name: Name of block device + :param number: Partition number + :param size: Capacity of the device + :param start: Starting block + :param end: Ending block + :param sectors: Number of blocks + :param uuid: UUID of the partition + """ + self.name = name, + self.number = number + self.size = size + self.start = start + self.end = end + self.sectors = sectors + self.uuid = uuid + + def __str__(self): + return "number: {} start: {} end: {} sectors: {} size: {} " \ + "name: {} uuid: {}".format(self.number, self.start, + self.end, + self.sectors, self.size, + self.name, self.uuid) + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + return not self.__eq__(other) + + +def unmounted_disks(): + """List of unmounted block devices on the current host.""" + disks = [] + context = pyudev.Context() + for device in context.list_devices(DEVTYPE='disk'): + if device['SUBSYSTEM'] == 'block': + matched = False + for block_type in [u'dm', u'loop', u'ram', u'nbd']: + if block_type in device.device_node: + matched = True + if matched: + continue + disks.append(device.device_node) + log("Found disks: {}".format(disks)) + return [disk for disk in disks if not is_device_mounted(disk)] + + +def save_sysctls(sysctl_dict, save_location): + """Persist the sysctls to the hard drive. + + :param sysctl_dict: dict + :param save_location: path to save the settings to + :raises: IOError if anything goes wrong with writing. + """ + try: + # Persist the settings for reboots + with open(save_location, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + except IOError as e: + log("Unable to persist sysctl settings to {}. Error {}".format( + save_location, e), level=ERROR) + raise + + +def tune_nic(network_interface): + """This will set optimal sysctls for the particular network adapter. + + :param network_interface: string The network adapter name. + """ + speed = get_link_speed(network_interface) + if speed in NETWORK_ADAPTER_SYSCTLS: + status_set('maintenance', 'Tuning device {}'.format( + network_interface)) + sysctl_file = os.path.join( + os.sep, + 'etc', + 'sysctl.d', + '51-ceph-osd-charm-{}.conf'.format(network_interface)) + try: + log("Saving sysctl_file: {} values: {}".format( + sysctl_file, NETWORK_ADAPTER_SYSCTLS[speed]), + level=DEBUG) + save_sysctls(sysctl_dict=NETWORK_ADAPTER_SYSCTLS[speed], + save_location=sysctl_file) + except IOError as e: + log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " + "failed. {}".format(network_interface, e), + level=ERROR) + + try: + # Apply the settings + log("Applying sysctl settings", level=DEBUG) + subprocess.check_output(["sysctl", "-p", sysctl_file]) + except subprocess.CalledProcessError as err: + log('sysctl -p {} failed with error {}'.format(sysctl_file, + err.output), + level=ERROR) + else: + log("No settings found for network adapter: {}".format( + network_interface), level=DEBUG) + + +def get_link_speed(network_interface): + """This will find the link speed for a given network device. Returns None + if an error occurs. + :param network_interface: string The network adapter interface. + :returns: LinkSpeed + """ + speed_path = os.path.join(os.sep, 'sys', 'class', 'net', + network_interface, 'speed') + # I'm not sure where else we'd check if this doesn't exist + if not os.path.exists(speed_path): + return LinkSpeed["UNKNOWN"] + + try: + with open(speed_path, 'r') as sysfs: + nic_speed = sysfs.readlines() + + # Did we actually read anything? + if not nic_speed: + return LinkSpeed["UNKNOWN"] + + # Try to find a sysctl match for this particular speed + for name, speed in LinkSpeed.items(): + if speed == int(nic_speed[0].strip()): + return speed + # Default to UNKNOWN if we can't find a match + return LinkSpeed["UNKNOWN"] + except IOError as e: + log("Unable to open {path} because of error: {error}".format( + path=speed_path, + error=e), level='error') + return LinkSpeed["UNKNOWN"] + + +def persist_settings(settings_dict): + # Write all settings to /etc/hdparm.conf + """ This will persist the hard drive settings to the /etc/hdparm.conf file + + The settings_dict should be in the form of {"uuid": {"key":"value"}} + + :param settings_dict: dict of settings to save + """ + if not settings_dict: + return + + try: + templating.render(source='hdparm.conf', target=HDPARM_FILE, + context=settings_dict) + except IOError as err: + log("Unable to open {path} because of error: {error}".format( + path=HDPARM_FILE, error=err), level=ERROR) + except Exception as e: + # The templating.render can raise a jinja2 exception if the + # template is not found. Rather than polluting the import + # space of this charm, simply catch Exception + log('Unable to render {path} due to error: {error}'.format( + path=HDPARM_FILE, error=e), level=ERROR) + + +def set_max_sectors_kb(dev_name, max_sectors_size): + """This function sets the max_sectors_kb size of a given block device. + + :param dev_name: Name of the block device to query + :param max_sectors_size: int of the max_sectors_size to save + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + try: + with open(max_sectors_kb_path, 'w') as f: + f.write(max_sectors_size) + except IOError as e: + log('Failed to write max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e), level=ERROR) + + +def get_max_sectors_kb(dev_name): + """This function gets the max_sectors_kb size of a given block device. + + :param dev_name: Name of the block device to query + :returns: int which is either the max_sectors_kb or 0 on error. + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + + # Read in what Linux has set by default + if os.path.exists(max_sectors_kb_path): + try: + with open(max_sectors_kb_path, 'r') as f: + max_sectors_kb = f.read().strip() + return int(max_sectors_kb) + except IOError as e: + log('Failed to read max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e), level=ERROR) + # Bail. + return 0 + return 0 + + +def get_max_hw_sectors_kb(dev_name): + """This function gets the max_hw_sectors_kb for a given block device. + + :param dev_name: Name of the block device to query + :returns: int which is either the max_hw_sectors_kb or 0 on error. + """ + max_hw_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_hw_sectors_kb') + # Read in what the hardware supports + if os.path.exists(max_hw_sectors_kb_path): + try: + with open(max_hw_sectors_kb_path, 'r') as f: + max_hw_sectors_kb = f.read().strip() + return int(max_hw_sectors_kb) + except IOError as e: + log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( + max_hw_sectors_kb_path, e), level=ERROR) + return 0 + return 0 + + +def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): + """This function sets the hard drive read ahead. + + :param dev_name: Name of the block device to set read ahead on. + :param read_ahead_sectors: int How many sectors to read ahead. + """ + try: + # Set the read ahead sectors to 256 + log('Setting read ahead to {} for device {}'.format( + read_ahead_sectors, + dev_name)) + subprocess.check_output(['hdparm', + '-a{}'.format(read_ahead_sectors), + dev_name]) + except subprocess.CalledProcessError as e: + log('hdparm failed with error: {}'.format(e.output), + level=ERROR) + + +def get_block_uuid(block_dev): + """This queries blkid to get the uuid for a block device. + + :param block_dev: Name of the block device to query. + :returns: The UUID of the device or None on Error. + """ + try: + block_info = str(subprocess + .check_output(['blkid', '-o', 'export', block_dev]) + .decode('UTF-8')) + for tag in block_info.split('\n'): + parts = tag.split('=') + if parts[0] == 'UUID': + return parts[1] + return None + except subprocess.CalledProcessError as err: + log('get_block_uuid failed with error: {}'.format(err.output), + level=ERROR) + return None + + +def check_max_sectors(save_settings_dict, + block_dev, + uuid): + """Tune the max_hw_sectors if needed. + + make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb or at + least 1MB for spinning disks + If the box has a RAID card with cache this could go much bigger. + + :param save_settings_dict: The dict used to persist settings + :param block_dev: A block device name: Example: /dev/sda + :param uuid: The uuid of the block device + """ + dev_name = None + path_parts = os.path.split(block_dev) + if len(path_parts) == 2: + dev_name = path_parts[1] + else: + log('Unable to determine the block device name from path: {}'.format( + block_dev)) + # Play it safe and bail + return + max_sectors_kb = get_max_sectors_kb(dev_name=dev_name) + max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name) + + if max_sectors_kb < max_hw_sectors_kb: + # OK we have a situation where the hardware supports more than Linux is + # currently requesting + config_max_sectors_kb = hookenv.config('max-sectors-kb') + if config_max_sectors_kb < max_hw_sectors_kb: + # Set the max_sectors_kb to the config.yaml value if it is less + # than the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, config_max_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid][ + "read_ahead_sect"] = config_max_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=config_max_sectors_kb) + else: + # Set to the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, max_hw_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=max_hw_sectors_kb) + else: + log('max_sectors_kb match max_hw_sectors_kb. No change needed for ' + 'device: {}'.format(block_dev)) + + +def tune_dev(block_dev): + """Try to make some intelligent decisions with HDD tuning. Future work will + include optimizing SSDs. + + This function will change the read ahead sectors and the max write + sectors for each block device. + + :param block_dev: A block device name: Example: /dev/sda + """ + uuid = get_block_uuid(block_dev) + if uuid is None: + log('block device {} uuid is None. Unable to save to ' + 'hdparm.conf'.format(block_dev), level=DEBUG) + return + save_settings_dict = {} + log('Tuning device {}'.format(block_dev)) + status_set('maintenance', 'Tuning device {}'.format(block_dev)) + set_hdd_read_ahead(block_dev) + save_settings_dict["drive_settings"] = {} + save_settings_dict["drive_settings"][uuid] = {} + save_settings_dict["drive_settings"][uuid]['read_ahead_sect'] = 256 + + check_max_sectors(block_dev=block_dev, + save_settings_dict=save_settings_dict, + uuid=uuid) + + persist_settings(settings_dict=save_settings_dict) + status_set('maintenance', 'Finished tuning device {}'.format(block_dev)) + + +def ceph_user(): + if get_version() > 1: + return 'ceph' + else: + return "root" + + +class CrushLocation(object): + def __init__(self, + name, + identifier, + host, + rack, + row, + datacenter, + chassis, + root): + self.name = name + self.identifier = identifier + self.host = host + self.rack = rack + self.row = row + self.datacenter = datacenter + self.chassis = chassis + self.root = root + + def __str__(self): + return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ + "chassis :{} root: {}".format(self.name, self.identifier, + self.host, self.rack, self.row, + self.datacenter, self.chassis, + self.root) + + def __eq__(self, other): + return not self.name < other.name and not other.name < self.name + + def __ne__(self, other): + return self.name < other.name or other.name < self.name + + def __gt__(self, other): + return self.name > other.name + + def __ge__(self, other): + return not self.name < other.name + + def __le__(self, other): + return self.name < other.name + + +def get_osd_weight(osd_id): + """Returns the weight of the specified OSD. + + :returns: Float + :raises: ValueError if the monmap fails to parse. + :raises: CalledProcessError if our ceph command fails. + """ + try: + tree = str(subprocess + .check_output(['ceph', 'osd', 'tree', '--format=json']) + .decode('UTF-8')) + try: + json_tree = json.loads(tree) + # Make sure children are present in the json + if not json_tree['nodes']: + return None + for device in json_tree['nodes']: + if device['type'] == 'osd' and device['name'] == osd_id: + return device['crush_weight'] + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e)) + raise + + +def get_osd_tree(service): + """Returns the current osd map in JSON. + + :returns: List. + :raises: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + tree = str(subprocess + .check_output(['ceph', '--id', service, + 'osd', 'tree', '--format=json']) + .decode('UTF-8')) + try: + json_tree = json.loads(tree) + crush_list = [] + # Make sure children are present in the json + if not json_tree['nodes']: + return None + host_nodes = [ + node for node in json_tree['nodes'] + if node['type'] == 'host' + ] + for host in host_nodes: + crush_list.append( + CrushLocation( + name=host.get('name'), + identifier=host['id'], + host=host.get('host'), + rack=host.get('rack'), + row=host.get('row'), + datacenter=host.get('datacenter'), + chassis=host.get('chassis'), + root=host.get('root') + ) + ) + return crush_list + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e)) + raise + + +def _get_child_dirs(path): + """Returns a list of directory names in the specified path. + + :param path: a full path listing of the parent directory to return child + directory names + :returns: list. A list of child directories under the parent directory + :raises: ValueError if the specified path does not exist or is not a + directory, + OSError if an error occurs reading the directory listing + """ + if not os.path.exists(path): + raise ValueError('Specfied path "%s" does not exist' % path) + if not os.path.isdir(path): + raise ValueError('Specified path "%s" is not a directory' % path) + + files_in_dir = [os.path.join(path, f) for f in os.listdir(path)] + return list(filter(os.path.isdir, files_in_dir)) + + +def _get_osd_num_from_dirname(dirname): + """Parses the dirname and returns the OSD id. + + Parses a string in the form of 'ceph-{osd#}' and returns the osd number + from the directory name. + + :param dirname: the directory name to return the OSD number from + :return int: the osd number the directory name corresponds to + :raises ValueError: if the osd number cannot be parsed from the provided + directory name. + """ + match = re.search('ceph-(?P\d+)', dirname) + if not match: + raise ValueError("dirname not in correct format: {}".format(dirname)) + + return match.group('osd_id') + + +def get_local_osd_ids(): + """This will list the /var/lib/ceph/osd/* directories and try + to split the ID off of the directory name and return it in + a list. + + :returns: list. A list of osd identifiers + :raises: OSError if something goes wrong with listing the directory. + """ + osd_ids = [] + osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') + if os.path.exists(osd_path): + try: + dirs = os.listdir(osd_path) + for osd_dir in dirs: + osd_id = osd_dir.split('-')[1] + if _is_int(osd_id): + osd_ids.append(osd_id) + except OSError: + raise + return osd_ids + + +def get_local_mon_ids(): + """This will list the /var/lib/ceph/mon/* directories and try + to split the ID off of the directory name and return it in + a list. + + :returns: list. A list of monitor identifiers + :raises: OSError if something goes wrong with listing the directory. + """ + mon_ids = [] + mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') + if os.path.exists(mon_path): + try: + dirs = os.listdir(mon_path) + for mon_dir in dirs: + # Basically this takes everything after ceph- as the monitor ID + match = re.search('ceph-(?P.*)', mon_dir) + if match: + mon_ids.append(match.group('mon_id')) + except OSError: + raise + return mon_ids + + +def _is_int(v): + """Return True if the object v can be turned into an integer.""" + try: + int(v) + return True + except ValueError: + return False + + +def get_version(): + """Derive Ceph release from an installed package.""" + import apt_pkg as apt + + cache = apt_cache() + package = "ceph" + try: + pkg = cache[package] + except: + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation ' \ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match('^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + return float(vers) + + +def error_out(msg): + log("FATAL ERROR: {}".format(msg), + level=ERROR) + sys.exit(1) + + +def is_quorum(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(str(subprocess + .check_output(cmd) + .decode('UTF-8'))) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] in QUORUM: + return True + else: + return False + else: + return False + + +def is_leader(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(str(subprocess + .check_output(cmd) + .decode('UTF-8'))) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] == LEADER: + return True + else: + return False + else: + return False + + +def wait_for_quorum(): + while not is_quorum(): + log("Waiting for quorum to be reached") + time.sleep(3) + + +def add_bootstrap_hint(peer): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "add_bootstrap_peer_hint", + peer + ] + if os.path.exists(asok): + # Ignore any errors for this call + subprocess.call(cmd) + + +DISK_FORMATS = [ + 'xfs', + 'ext4', + 'btrfs' +] + +CEPH_PARTITIONS = [ + '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # ceph encrypted disk in creation + '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # ceph encrypted journal + '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data + '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data + '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal + '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # ceph disk in creation +] + + +def get_partition_list(dev): + """Lists the partitions of a block device. + + :param dev: Path to a block device. ex: /dev/sda + :returns: Returns a list of Partition objects. + :raises: CalledProcessException if lsblk fails + """ + partitions_list = [] + try: + partitions = get_partitions(dev) + # For each line of output + for partition in partitions: + parts = partition.split() + try: + partitions_list.append( + Partition(number=parts[0], + start=parts[1], + end=parts[2], + sectors=parts[3], + size=parts[4], + name=parts[5], + uuid=parts[6]) + ) + except IndexError: + partitions_list.append( + Partition(number=parts[0], + start=parts[1], + end=parts[2], + sectors=parts[3], + size=parts[4], + name="", + uuid=parts[5]) + ) + + return partitions_list + except subprocess.CalledProcessError: + raise + + +def is_pristine_disk(dev): + """ + Read first 2048 bytes (LBA 0 - 3) of block device to determine whether it + is actually all zeros and safe for us to use. + + Existing partitioning tools does not discern between a failure to read from + block device, failure to understand a partition table and the fact that a + block device has no partition table. Since we need to be positive about + which is which we need to read the device directly and confirm ourselves. + + :param dev: Path to block device + :type dev: str + :returns: True all 2048 bytes == 0x0, False if not + :rtype: bool + """ + want_bytes = 2048 + + f = open(dev, 'rb') + data = f.read(want_bytes) + read_bytes = len(data) + if read_bytes != want_bytes: + log('{}: short read, got {} bytes expected {}.' + .format(dev, read_bytes, want_bytes), level=WARNING) + return False + + return all(byte == 0x0 for byte in data) + + +def is_osd_disk(dev): + db = kv() + osd_devices = db.get('osd-devices', []) + if dev in osd_devices: + log('Device {} already processed by charm,' + ' skipping'.format(dev)) + return True + + partitions = get_partition_list(dev) + for partition in partitions: + try: + info = str(subprocess + .check_output(['sgdisk', '-i', partition.number, dev]) + .decode('UTF-8')) + info = info.split("\n") # IGNORE:E1103 + for line in info: + for ptype in CEPH_PARTITIONS: + sig = 'Partition GUID code: {}'.format(ptype) + if line.startswith(sig): + return True + except subprocess.CalledProcessError as e: + log("sgdisk inspection of partition {} on {} failed with " + "error: {}. Skipping".format(partition.minor, dev, e), + level=ERROR) + return False + + +def start_osds(devices): + # Scan for ceph block devices + rescan_osd_devices() + if cmp_pkgrevno('ceph', "0.56.6") >= 0: + # Use ceph-disk activate for directory based OSD's + for dev_or_path in devices: + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): + subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) + + +def udevadm_settle(): + cmd = ['udevadm', 'settle'] + subprocess.call(cmd) + + +def rescan_osd_devices(): + cmd = [ + 'udevadm', 'trigger', + '--subsystem-match=block', '--action=add' + ] + + subprocess.call(cmd) + + udevadm_settle() + + +_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" +_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" + + +def is_bootstrapped(): + return os.path.exists(_bootstrap_keyring) + + +def wait_for_bootstrap(): + while not is_bootstrapped(): + time.sleep(3) + + +def import_osd_bootstrap_key(key): + if not os.path.exists(_bootstrap_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _bootstrap_keyring, + '--create-keyring', + '--name=client.bootstrap-osd', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + + +def import_osd_upgrade_key(key): + if not os.path.exists(_upgrade_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _upgrade_keyring, + '--create-keyring', + '--name=client.osd-upgrade', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + + +def generate_monitor_secret(): + cmd = [ + 'ceph-authtool', + '/dev/stdout', + '--name=mon.', + '--gen-key' + ] + res = str(subprocess.check_output(cmd).decode('UTF-8')) + + return "{}==".format(res.split('=')[1].strip()) + +# OSD caps taken from ceph-create-keys +_osd_bootstrap_caps = { + 'mon': [ + 'allow command osd create ...', + 'allow command osd crush set ...', + r'allow command auth add * osd allow\ * mon allow\ rwx', + 'allow command mon getmap' + ] +} + +_osd_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-osd' + ] +} + + +def parse_key(raw_key): + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(raw_key.splitlines()) == 1: + key = raw_key + else: + for element in raw_key.splitlines(): + if 'key' in element: + return element.split(' = ')[1].strip() # IGNORE:E1103 + return key + + +def get_osd_bootstrap_key(): + try: + # Attempt to get/create a key using the OSD bootstrap profile first + key = get_named_key('bootstrap-osd', + _osd_bootstrap_caps_profile) + except: + # If that fails try with the older style permissions + key = get_named_key('bootstrap-osd', + _osd_bootstrap_caps) + return key + + +_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" + + +def import_radosgw_key(key): + if not os.path.exists(_radosgw_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _radosgw_keyring, + '--create-keyring', + '--name=client.radosgw.gateway', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + +# OSD caps taken from ceph-create-keys +_radosgw_caps = { + 'mon': ['allow rw'], + 'osd': ['allow rwx'] +} +_upgrade_caps = { + 'mon': ['allow rwx'] +} + + +def get_radosgw_key(pool_list=None, name=None): + return get_named_key(name=name or 'radosgw.gateway', + caps=_radosgw_caps, + pool_list=pool_list) + + +def get_mds_key(name): + return create_named_keyring(entity='mds', + name=name, + caps=mds_caps) + + +_mds_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-mds' + ] +} + + +def get_mds_bootstrap_key(): + return get_named_key('bootstrap-mds', + _mds_bootstrap_caps_profile) + + +_default_caps = collections.OrderedDict([ + ('mon', ['allow r', + 'allow command "osd blacklist"']), + ('osd', ['allow rwx']), +]) + +admin_caps = collections.OrderedDict([ + ('mds', ['allow *']), + ('mon', ['allow *']), + ('osd', ['allow *']) +]) + +mds_caps = collections.OrderedDict([ + ('osd', ['allow *']), + ('mds', ['allow']), + ('mon', ['allow rwx']), +]) + +osd_upgrade_caps = collections.OrderedDict([ + ('mon', ['allow command "config-key"', + 'allow command "osd tree"', + 'allow command "config-key list"', + 'allow command "config-key put"', + 'allow command "config-key get"', + 'allow command "config-key exists"', + 'allow command "osd out"', + 'allow command "osd in"', + 'allow command "osd rm"', + 'allow command "auth del"', + ]) +]) + + +def create_named_keyring(entity, name, caps=None): + caps = caps or _default_caps + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', 'get-or-create', '{entity}.{name}'.format(entity=entity, + name=name), + ] + for subsystem, subcaps in caps.items(): + cmd.extend([subsystem, '; '.join(subcaps)]) + log("Calling check_output: {}".format(cmd), level=DEBUG) + return (parse_key(str(subprocess + .check_output(cmd) + .decode('UTF-8')) + .strip())) # IGNORE:E1103 + + +def get_upgrade_key(): + return get_named_key('upgrade-osd', _upgrade_caps) + + +def get_named_key(name, caps=None, pool_list=None): + """Retrieve a specific named cephx key. + + :param name: String Name of key to get. + :param pool_list: The list of pools to give access to + :param caps: dict of cephx capabilities + :returns: Returns a cephx key + """ + key_name = 'client.{}'.format(name) + try: + # Does the key already exist? + output = str(subprocess.check_output( + [ + 'sudo', + '-u', ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', + 'get', + key_name, + ]).decode('UTF-8')).strip() + # NOTE(jamespage); + # Apply any changes to key capabilities, dealing with + # upgrades which requires new caps for operation. + upgrade_key_caps(key_name, + caps or _default_caps, + pool_list) + return parse_key(output) + except subprocess.CalledProcessError: + # Couldn't get the key, time to create it! + log("Creating new key for {}".format(name), level=DEBUG) + caps = caps or _default_caps + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', 'get-or-create', key_name, + ] + # Add capabilities + for subsystem, subcaps in caps.items(): + if subsystem == 'osd': + if pool_list: + # This will output a string similar to: + # "pool=rgw pool=rbd pool=something" + pools = " ".join(['pool={0}'.format(i) for i in pool_list]) + subcaps[0] = subcaps[0] + " " + pools + cmd.extend([subsystem, '; '.join(subcaps)]) + + log("Calling check_output: {}".format(cmd), level=DEBUG) + return parse_key(str(subprocess + .check_output(cmd) + .decode('UTF-8')) + .strip()) # IGNORE:E1103 + + +def upgrade_key_caps(key, caps, pool_list=None): + """ Upgrade key to have capabilities caps """ + if not is_leader(): + # Not the MON leader OR not clustered + return + cmd = [ + "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key + ] + for subsystem, subcaps in caps.items(): + if subsystem == 'osd': + if pool_list: + # This will output a string similar to: + # "pool=rgw pool=rbd pool=something" + pools = " ".join(['pool={0}'.format(i) for i in pool_list]) + subcaps[0] = subcaps[0] + " " + pools + cmd.extend([subsystem, '; '.join(subcaps)]) + subprocess.check_call(cmd) + + +@cached +def systemd(): + return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' + + +def bootstrap_monitor_cluster(secret): + hostname = socket.gethostname() + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + done = '{}/done'.format(path) + if systemd(): + init_marker = '{}/systemd'.format(path) + else: + init_marker = '{}/upstart'.format(path) + + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) + + if os.path.exists(done): + log('bootstrap_monitor_cluster: mon already initialized.') + else: + # Ceph >= 0.61.3 needs this for ceph-mon fs creation + mkdir('/var/run/ceph', owner=ceph_user(), + group=ceph_user(), perms=0o755) + mkdir(path, owner=ceph_user(), group=ceph_user(), + perms=0o755) + # end changes for Ceph >= 0.61.3 + try: + add_keyring_to_ceph(keyring, + secret, + hostname, + path, + done, + init_marker) + + except: + raise + finally: + os.unlink(keyring) + + +@retry_on_exception(3, base_delay=5) +def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring', '--name=mon.', + '--add-key={}'.format(secret), + '--cap', 'mon', 'allow *']) + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + chownr('/var/log/ceph', ceph_user(), ceph_user()) + chownr(path, ceph_user(), ceph_user()) + with open(done, 'w'): + pass + with open(init_marker, 'w'): + pass + + if systemd(): + subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) + service_restart('ceph-mon') + else: + service_restart('ceph-mon-all') + + # NOTE(jamespage): Later ceph releases require explicit + # call to ceph-create-keys to setup the + # admin keys for the cluster; this command + # will wait for quorum in the cluster before + # returning. + # NOTE(fnordahl): Explicitly run `ceph-crate-keys` for older + # ceph releases too. This improves bootstrap + # resilience as the charm will wait for + # presence of peer units before attempting + # to bootstrap. Note that charms deploying + # ceph-mon service should disable running of + # `ceph-create-keys` service in init system. + cmd = ['ceph-create-keys', '--id', hostname] + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + # NOTE(fnordahl): The default timeout in ceph-create-keys of 600 + # seconds is not adequate. Increase timeout when + # timeout parameter available. For older releases + # we rely on retry_on_exception decorator. + # LP#1719436 + cmd.extend(['--timeout', '1800']) + subprocess.check_call(cmd) + _client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' + osstat = os.stat(_client_admin_keyring) + if not osstat.st_size: + # NOTE(fnordahl): Retry will fail as long as this file exists. + # LP#1719436 + os.remove(_client_admin_keyring) + raise Exception + + +def update_monfs(): + hostname = socket.gethostname() + monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + if systemd(): + init_marker = '{}/systemd'.format(monfs) + else: + init_marker = '{}/upstart'.format(monfs) + if os.path.exists(monfs) and not os.path.exists(init_marker): + # Mark mon as managed by upstart so that + # it gets start correctly on reboots + with open(init_marker, 'w'): + pass + + +def get_partitions(dev): + cmd = ['partx', '--raw', '--noheadings', dev] + try: + out = str(subprocess.check_output(cmd).decode('UTF-8')).splitlines() + log("get partitions: {}".format(out), level=DEBUG) + return out + except subprocess.CalledProcessError as e: + log("Can't get info for {0}: {1}".format(dev, e.output)) + return [] + + +def get_lvs(dev): + """ + List logical volumes for the provided block device + + :param: dev: Full path to block device. + :raises subprocess.CalledProcessError: in the event that any supporting + operation failed. + :returns: list: List of logical volumes provided by the block device + """ + if not lvm.is_lvm_physical_volume(dev): + return [] + vg_name = lvm.list_lvm_volume_group(dev) + return lvm.list_logical_volumes('vg_name={}'.format(vg_name)) + + +def find_least_used_utility_device(utility_devices, lvs=False): + """ + Find a utility device which has the smallest number of partitions + among other devices in the supplied list. + + :utility_devices: A list of devices to be used for filestore journal + or bluestore wal or db. + :lvs: flag to indicate whether inspection should be based on LVM LV's + :return: string device name + """ + if lvs: + usages = map(lambda a: (len(get_lvs(a)), a), utility_devices) + else: + usages = map(lambda a: (len(get_partitions(a)), a), utility_devices) + least = min(usages, key=lambda t: t[0]) + return least[1] + + +def get_devices(name): + """ Merge config and juju storage based devices + + :name: THe name of the device type, eg: wal, osd, journal + :returns: Set(device names), which are strings + """ + if config(name): + devices = [l.strip() for l in config(name).split(' ')] + else: + devices = [] + storage_ids = storage_list(name) + devices.extend((storage_get('location', s) for s in storage_ids)) + devices = filter(os.path.exists, devices) + + return set(devices) + + +def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, + bluestore=False, key_manager=CEPH_KEY_MANAGER): + if dev.startswith('/dev'): + osdize_dev(dev, osd_format, osd_journal, + ignore_errors, encrypt, + bluestore, key_manager) + else: + osdize_dir(dev, encrypt, bluestore) + + +def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, + encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER): + """ + Prepare a block device for use as a Ceph OSD + + A block device will only be prepared once during the lifetime + of the calling charm unit; future executions will be skipped. + + :param: dev: Full path to block device to use + :param: osd_format: Format for OSD filesystem + :param: osd_journal: List of block devices to use for OSD journals + :param: ignore_errors: Don't fail in the event of any errors during + processing + :param: encrypt: Encrypt block devices using 'key_manager' + :param: bluestore: Use bluestore native ceph block device format + :param: key_manager: Key management approach for encryption keys + :raises subprocess.CalledProcessError: in the event that any supporting + subprocess operation failed + :raises ValueError: if an invalid key_manager is provided + """ + if key_manager not in KEY_MANAGERS: + raise ValueError('Unsupported key manager: {}'.format(key_manager)) + + db = kv() + osd_devices = db.get('osd-devices', []) + try: + if dev in osd_devices: + log('Device {} already processed by charm,' + ' skipping'.format(dev)) + return + + if not os.path.exists(dev): + log('Path {} does not exist - bailing'.format(dev)) + return + + if not is_block_device(dev): + log('Path {} is not a block device - bailing'.format(dev)) + return + + if is_osd_disk(dev): + log('Looks like {} is already an' + ' OSD data or journal, skipping.'.format(dev)) + if is_device_mounted(dev): + osd_devices.append(dev) + return + + if is_device_mounted(dev): + log('Looks like {} is in use, skipping.'.format(dev)) + return + + if is_active_bluestore_device(dev): + log('{} is in use as an active bluestore block device,' + ' skipping.'.format(dev)) + osd_devices.append(dev) + return + + if is_mapped_luks_device(dev): + log('{} is a mapped LUKS device,' + ' skipping.'.format(dev)) + return + + if cmp_pkgrevno('ceph', '12.2.4') >= 0: + cmd = _ceph_volume(dev, + osd_journal, + encrypt, + bluestore, + key_manager) + else: + cmd = _ceph_disk(dev, + osd_format, + osd_journal, + encrypt, + bluestore) + + try: + status_set('maintenance', 'Initializing device {}'.format(dev)) + log("osdize cmd: {}".format(cmd)) + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + try: + lsblk_output = subprocess.check_output( + ['lsblk', '-P']).decode('UTF-8') + except subprocess.CalledProcessError as e: + log("Couldn't get lsblk output: {}".format(e), ERROR) + if ignore_errors: + log('Unable to initialize device: {}'.format(dev), WARNING) + if lsblk_output: + log('lsblk output: {}'.format(lsblk_output), DEBUG) + else: + log('Unable to initialize device: {}'.format(dev), ERROR) + if lsblk_output: + log('lsblk output: {}'.format(lsblk_output), WARNING) + raise + + # NOTE: Record processing of device only on success to ensure that + # the charm only tries to initialize a device of OSD usage + # once during its lifetime. + osd_devices.append(dev) + finally: + db.set('osd-devices', osd_devices) + db.flush() + + +def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): + """ + Prepare a device for usage as a Ceph OSD using ceph-disk + + :param: dev: Full path to use for OSD block device setup, + The function looks up realpath of the device + :param: osd_journal: List of block devices to use for OSD journals + :param: encrypt: Use block device encryption (unsupported) + :param: bluestore: Use bluestore storage for OSD + :returns: list. 'ceph-disk' command and required parameters for + execution by check_call + """ + cmd = ['ceph-disk', 'prepare'] + + if encrypt: + cmd.append('--dmcrypt') + + if osd_format and not bluestore: + cmd.append('--fs-type') + cmd.append(osd_format) + + # NOTE(jamespage): enable experimental bluestore support + if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: + cmd.append('--bluestore') + wal = get_devices('bluestore-wal') + if wal: + cmd.append('--block.wal') + least_used_wal = find_least_used_utility_device(wal) + cmd.append(least_used_wal) + db = get_devices('bluestore-db') + if db: + cmd.append('--block.db') + least_used_db = find_least_used_utility_device(db) + cmd.append(least_used_db) + elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: + cmd.append('--filestore') + + cmd.append(os.path.realpath(dev)) + + if osd_journal: + least_used = find_least_used_utility_device(osd_journal) + cmd.append(least_used) + + return cmd + + +def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, + key_manager=CEPH_KEY_MANAGER): + """ + Prepare and activate a device for usage as a Ceph OSD using ceph-volume. + + This also includes creation of all PV's, VG's and LV's required to + support the initialization of the OSD. + + :param: dev: Full path to use for OSD block device setup + :param: osd_journal: List of block devices to use for OSD journals + :param: encrypt: Use block device encryption + :param: bluestore: Use bluestore storage for OSD + :param: key_manager: dm-crypt Key Manager to use + :raises subprocess.CalledProcessError: in the event that any supporting + LVM operation failed. + :returns: list. 'ceph-volume' command and required parameters for + execution by check_call + """ + cmd = ['ceph-volume', 'lvm', 'create'] + + osd_fsid = str(uuid.uuid4()) + cmd.append('--osd-fsid') + cmd.append(osd_fsid) + + if bluestore: + cmd.append('--bluestore') + main_device_type = 'block' + else: + cmd.append('--filestore') + main_device_type = 'data' + + if encrypt and key_manager == CEPH_KEY_MANAGER: + cmd.append('--dmcrypt') + + # On-disk journal volume creation + if not osd_journal and not bluestore: + journal_lv_type = 'journal' + cmd.append('--journal') + cmd.append(_allocate_logical_volume( + dev=dev, + lv_type=journal_lv_type, + osd_fsid=osd_fsid, + size='{}M'.format(calculate_volume_size('journal')), + encrypt=encrypt, + key_manager=key_manager) + ) + + cmd.append('--data') + cmd.append(_allocate_logical_volume(dev=dev, + lv_type=main_device_type, + osd_fsid=osd_fsid, + encrypt=encrypt, + key_manager=key_manager)) + + if bluestore: + for extra_volume in ('wal', 'db'): + devices = get_devices('bluestore-{}'.format(extra_volume)) + if devices: + cmd.append('--block.{}'.format(extra_volume)) + least_used = find_least_used_utility_device(devices, + lvs=True) + cmd.append(_allocate_logical_volume( + dev=least_used, + lv_type=extra_volume, + osd_fsid=osd_fsid, + size='{}M'.format(calculate_volume_size(extra_volume)), + shared=True, + encrypt=encrypt, + key_manager=key_manager) + ) + + elif osd_journal: + cmd.append('--journal') + least_used = find_least_used_utility_device(osd_journal, + lvs=True) + cmd.append(_allocate_logical_volume( + dev=least_used, + lv_type='journal', + osd_fsid=osd_fsid, + size='{}M'.format(calculate_volume_size('journal')), + shared=True, + encrypt=encrypt, + key_manager=key_manager) + ) + + return cmd + + +def _partition_name(dev): + """ + Derive the first partition name for a block device + + :param: dev: Full path to block device. + :returns: str: Full path to first partition on block device. + """ + if dev[-1].isdigit(): + return '{}p1'.format(dev) + else: + return '{}1'.format(dev) + + +def is_active_bluestore_device(dev): + """ + Determine whether provided device is part of an active + bluestore based OSD (as its block component). + + :param: dev: Full path to block device to check for Bluestore usage. + :returns: boolean: indicating whether device is in active use. + """ + if not lvm.is_lvm_physical_volume(dev): + return False + + vg_name = lvm.list_lvm_volume_group(dev) + lv_name = lvm.list_logical_volumes('vg_name={}'.format(vg_name))[0] + + block_symlinks = glob.glob('/var/lib/ceph/osd/ceph-*/block') + for block_candidate in block_symlinks: + if os.path.islink(block_candidate): + target = os.readlink(block_candidate) + if target.endswith(lv_name): + return True + + return False + + +def is_luks_device(dev): + """ + Determine if dev is a LUKS-formatted block device. + + :param: dev: A full path to a block device to check for LUKS header + presence + :returns: boolean: indicates whether a device is used based on LUKS header. + """ + return True if _luks_uuid(dev) else False + + +def is_mapped_luks_device(dev): + """ + Determine if dev is a mapped LUKS device + :param: dev: A full path to a block device to be checked + :returns: boolean: indicates whether a device is mapped + """ + _, dirs, _ = next(os.walk( + '/sys/class/block/{}/holders/' + .format(os.path.basename(os.path.realpath(dev)))) + ) + is_held = len(dirs) > 0 + return is_held and is_luks_device(dev) + + +def get_conf(variable): + """ + Get the value of the given configuration variable from the + cluster. + + :param variable: ceph configuration variable + :returns: str. configured value for provided variable + + """ + return subprocess.check_output([ + 'ceph-osd', + '--show-config-value={}'.format(variable), + '--no-mon-config', + ]).strip() + + +def calculate_volume_size(lv_type): + """ + Determine the configured size for Bluestore DB/WAL or + Filestore Journal devices + + :param lv_type: volume type (db, wal or journal) + :raises KeyError: if invalid lv_type is supplied + :returns: int. Configured size in megabytes for volume type + """ + # lv_type -> ceph configuration option + _config_map = { + 'db': 'bluestore_block_db_size', + 'wal': 'bluestore_block_wal_size', + 'journal': 'osd_journal_size', + } + + # default sizes in MB + _default_size = { + 'db': 1024, + 'wal': 576, + 'journal': 1024, + } + + # conversion of ceph config units to MB + _units = { + 'db': 1048576, # Bytes -> MB + 'wal': 1048576, # Bytes -> MB + 'journal': 1, # Already in MB + } + + configured_size = get_conf(_config_map[lv_type]) + + if configured_size is None or int(configured_size) == 0: + return _default_size[lv_type] + else: + return int(configured_size) / _units[lv_type] + + +def _luks_uuid(dev): + """ + Check to see if dev is a LUKS encrypted volume, returning the UUID + of volume if it is. + + :param: dev: path to block device to check. + :returns: str. UUID of LUKS device or None if not a LUKS device + """ + try: + cmd = ['cryptsetup', 'luksUUID', dev] + return subprocess.check_output(cmd).decode('UTF-8').strip() + except subprocess.CalledProcessError: + return None + + +def _initialize_disk(dev, dev_uuid, encrypt=False, + key_manager=CEPH_KEY_MANAGER): + """ + Initialize a raw block device consuming 100% of the avaliable + disk space. + + Function assumes that block device has already been wiped. + + :param: dev: path to block device to initialize + :param: dev_uuid: UUID to use for any dm-crypt operations + :param: encrypt: Encrypt OSD devices using dm-crypt + :param: key_manager: Key management approach for dm-crypt keys + :raises: subprocess.CalledProcessError: if any parted calls fail + :returns: str: Full path to new partition. + """ + use_vaultlocker = encrypt and key_manager == VAULT_KEY_MANAGER + + if use_vaultlocker: + # NOTE(jamespage): Check to see if already initialized as a LUKS + # volume, which indicates this is a shared block + # device for journal, db or wal volumes. + luks_uuid = _luks_uuid(dev) + if luks_uuid: + return '/dev/mapper/crypt-{}'.format(luks_uuid) + + dm_crypt = '/dev/mapper/crypt-{}'.format(dev_uuid) + + if use_vaultlocker and not os.path.exists(dm_crypt): + subprocess.check_call([ + 'vaultlocker', + 'encrypt', + '--uuid', dev_uuid, + dev, + ]) + subprocess.check_call([ + 'dd', + 'if=/dev/zero', + 'of={}'.format(dm_crypt), + 'bs=512', + 'count=1', + ]) + + if use_vaultlocker: + return dm_crypt + else: + return dev + + +def _allocate_logical_volume(dev, lv_type, osd_fsid, + size=None, shared=False, + encrypt=False, + key_manager=CEPH_KEY_MANAGER): + """ + Allocate a logical volume from a block device, ensuring any + required initialization and setup of PV's and VG's to support + the LV. + + :param: dev: path to block device to allocate from. + :param: lv_type: logical volume type to create + (data, block, journal, wal, db) + :param: osd_fsid: UUID of the OSD associate with the LV + :param: size: Size in LVM format for the device; + if unset 100% of VG + :param: shared: Shared volume group (journal, wal, db) + :param: encrypt: Encrypt OSD devices using dm-crypt + :param: key_manager: dm-crypt Key Manager to use + :raises subprocess.CalledProcessError: in the event that any supporting + LVM or parted operation fails. + :returns: str: String in the format 'vg_name/lv_name'. + """ + lv_name = "osd-{}-{}".format(lv_type, osd_fsid) + current_volumes = lvm.list_logical_volumes() + if shared: + dev_uuid = str(uuid.uuid4()) + else: + dev_uuid = osd_fsid + pv_dev = _initialize_disk(dev, dev_uuid, encrypt, key_manager) + + vg_name = None + if not lvm.is_lvm_physical_volume(pv_dev): + lvm.create_lvm_physical_volume(pv_dev) + if shared: + vg_name = 'ceph-{}-{}'.format(lv_type, + str(uuid.uuid4())) + else: + vg_name = 'ceph-{}'.format(osd_fsid) + lvm.create_lvm_volume_group(vg_name, pv_dev) + else: + vg_name = lvm.list_lvm_volume_group(pv_dev) + + if lv_name not in current_volumes: + lvm.create_logical_volume(lv_name, vg_name, size) + + return "{}/{}".format(vg_name, lv_name) + + +def osdize_dir(path, encrypt=False, bluestore=False): + """Ask ceph-disk to prepare a directory to become an osd. + + :param path: str. The directory to osdize + :param encrypt: bool. Should the OSD directory be encrypted at rest + :returns: None + """ + + db = kv() + osd_devices = db.get('osd-devices', []) + if path in osd_devices: + log('Device {} already processed by charm,' + ' skipping'.format(path)) + return + + if os.path.exists(os.path.join(path, 'upstart')): + log('Path {} is already configured as an OSD - bailing'.format(path)) + return + + if cmp_pkgrevno('ceph', "0.56.6") < 0: + log('Unable to use directories for OSDs with ceph < 0.56.6', + level=ERROR) + return + + mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) + chownr('/var/lib/ceph', ceph_user(), ceph_user()) + cmd = [ + 'sudo', '-u', ceph_user(), + 'ceph-disk', + 'prepare', + '--data-dir', + path + ] + if cmp_pkgrevno('ceph', '0.60') >= 0: + if encrypt: + cmd.append('--dmcrypt') + + # NOTE(icey): enable experimental bluestore support + if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: + cmd.append('--bluestore') + elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: + cmd.append('--filestore') + log("osdize dir cmd: {}".format(cmd)) + subprocess.check_call(cmd) + + # NOTE: Record processing of device only on success to ensure that + # the charm only tries to initialize a device of OSD usage + # once during its lifetime. + osd_devices.append(path) + db.set('osd-devices', osd_devices) + db.flush() + + +def filesystem_mounted(fs): + return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 + + +def get_running_osds(): + """Returns a list of the pids of the current running OSD daemons""" + cmd = ['pgrep', 'ceph-osd'] + try: + result = str(subprocess.check_output(cmd).decode('UTF-8')) + return result.split() + except subprocess.CalledProcessError: + return [] + + +def get_cephfs(service): + """List the Ceph Filesystems that exist. + + :param service: The service name to run the ceph command under + :returns: list. Returns a list of the ceph filesystems + """ + if get_version() < 0.86: + # This command wasn't introduced until 0.86 ceph + return [] + try: + output = str(subprocess + .check_output(["ceph", '--id', service, "fs", "ls"]) + .decode('UTF-8')) + if not output: + return [] + """ + Example subprocess output: + 'name: ip-172-31-23-165, metadata pool: ip-172-31-23-165_metadata, + data pools: [ip-172-31-23-165_data ]\n' + output: filesystems: ['ip-172-31-23-165'] + """ + filesystems = [] + for line in output.splitlines(): + parts = line.split(',') + for part in parts: + if "name" in part: + filesystems.append(part.split(' ')[1]) + except subprocess.CalledProcessError: + return [] + + +def wait_for_all_monitors_to_upgrade(new_version, upgrade_key): + """Fairly self explanatory name. This function will wait + for all monitors in the cluster to upgrade or it will + return after a timeout period has expired. + + :param new_version: str of the version to watch + :param upgrade_key: the cephx key name to use + """ + done = False + start_time = time.time() + monitor_list = [] + + mon_map = get_mon_map('admin') + if mon_map['monmap']['mons']: + for mon in mon_map['monmap']['mons']: + monitor_list.append(mon['name']) + while not done: + try: + done = all(monitor_key_exists(upgrade_key, "{}_{}_{}_done".format( + "mon", mon, new_version + )) for mon in monitor_list) + current_time = time.time() + if current_time > (start_time + 10 * 60): + raise Exception + else: + # Wait 30 seconds and test again if all monitors are upgraded + time.sleep(30) + except subprocess.CalledProcessError: + raise + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +def roll_monitor_cluster(new_version, upgrade_key): + """This is tricky to get right so here's what we're going to do. + + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous monitor is upgraded yet. + + :param new_version: str of the version to upgrade to + :param upgrade_key: the cephx key name to use when upgrading + """ + log('roll_monitor_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + monitor_list = [] + mon_map = get_mon_map('admin') + if mon_map['monmap']['mons']: + for mon in mon_map['monmap']['mons']: + monitor_list.append(mon['name']) + else: + status_set('blocked', 'Unable to get monitor cluster information') + sys.exit(1) + log('monitor_list: {}'.format(monitor_list)) + + # A sorted list of osd unit names + mon_sorted_list = sorted(monitor_list) + + try: + position = mon_sorted_list.index(my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(upgrade_key=upgrade_key, + service='mon', + my_name=my_name, + version=new_version) + else: + # Check if the previous node has finished + status_set('waiting', + 'Waiting on {} to finish upgrading'.format( + mon_sorted_list[position - 1])) + wait_on_previous_node(upgrade_key=upgrade_key, + service='mon', + previous_node=mon_sorted_list[position - 1], + version=new_version) + lock_and_roll(upgrade_key=upgrade_key, + service='mon', + my_name=my_name, + version=new_version) + # NOTE(jamespage): + # Wait until all monitors have upgraded before bootstrapping + # the ceph-mgr daemons due to use of new mgr keyring profiles + if new_version == 'luminous': + wait_for_all_monitors_to_upgrade(new_version=new_version, + upgrade_key=upgrade_key) + bootstrap_manager() + except ValueError: + log("Failed to find {} in list {}.".format( + my_name, mon_sorted_list)) + status_set('blocked', 'failed to upgrade monitor') + + +# TODO(jamespage): +# Mimic support will need to ensure that ceph-mgr daemons are also +# restarted during upgrades - probably through use of one of the +# high level systemd targets shipped by the packaging. +def upgrade_monitor(new_version): + """Upgrade the current ceph monitor to the new version + + :param new_version: String version to upgrade to. + """ + current_version = get_version() + status_set("maintenance", "Upgrading monitor") + log("Current ceph version is {}".format(current_version)) + log("Upgrading to: {}".format(new_version)) + + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph source failed with message: {}".format( + err)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + try: + if systemd(): + service_stop('ceph-mon') + else: + service_stop('ceph-mon-all') + apt_install(packages=determine_packages(), fatal=True) + + owner = ceph_user() + + # Ensure the files and directories under /var/lib/ceph is chowned + # properly as part of the move to the Jewel release, which moved the + # ceph daemons to running as ceph:ceph instead of root:root. + if new_version == 'jewel': + # Ensure the ownership of Ceph's directories is correct + chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), + owner=owner, + group=owner, + follow_links=True) + + # Ensure that mon directory is user writable + hostname = socket.gethostname() + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + mkdir(path, owner=ceph_user(), group=ceph_user(), + perms=0o755) + + if systemd(): + service_start('ceph-mon') + else: + service_start('ceph-mon-all') + except subprocess.CalledProcessError as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + +def lock_and_roll(upgrade_key, service, my_name, version): + """Create a lock on the ceph monitor cluster and upgrade. + + :param upgrade_key: str. The cephx key to use + :param service: str. The cephx id to use + :param my_name: str. The current hostname + :param version: str. The version we are upgrading to + """ + start_timestamp = time.time() + + log('monitor_key_set {}_{}_{}_start {}'.format( + service, + my_name, + version, + start_timestamp)) + monitor_key_set(upgrade_key, "{}_{}_{}_start".format( + service, my_name, version), start_timestamp) + log("Rolling") + + # This should be quick + if service == 'osd': + upgrade_osd(version) + elif service == 'mon': + upgrade_monitor(version) + else: + log("Unknown service {}. Unable to upgrade".format(service), + level=ERROR) + log("Done") + + stop_timestamp = time.time() + # Set a key to inform others I am finished + log('monitor_key_set {}_{}_{}_done {}'.format(service, + my_name, + version, + stop_timestamp)) + status_set('maintenance', 'Finishing upgrade') + monitor_key_set(upgrade_key, "{}_{}_{}_done".format(service, + my_name, + version), + stop_timestamp) + + +def wait_on_previous_node(upgrade_key, service, previous_node, version): + """A lock that sleeps the current thread while waiting for the previous + node to finish upgrading. + + :param upgrade_key: + :param service: str. the cephx id to use + :param previous_node: str. The name of the previous node to wait on + :param version: str. The version we are upgrading to + :returns: None + """ + log("Previous node is: {}".format(previous_node)) + + previous_node_finished = monitor_key_exists( + upgrade_key, + "{}_{}_{}_done".format(service, previous_node, version)) + + while previous_node_finished is False: + log("{} is not finished. Waiting".format(previous_node)) + # Has this node been trying to upgrade for longer than + # 10 minutes? + # If so then move on and consider that node dead. + + # NOTE: This assumes the clusters clocks are somewhat accurate + # If the hosts clock is really far off it may cause it to skip + # the previous node even though it shouldn't. + current_timestamp = time.time() + previous_node_start_time = monitor_key_get( + upgrade_key, + "{}_{}_{}_start".format(service, previous_node, version)) + if (previous_node_start_time is not None and + ((current_timestamp - (10 * 60)) > + float(previous_node_start_time))): + # NOTE(jamespage): + # Previous node is probably dead as we've been waiting + # for 10 minutes - lets move on and upgrade + log("Waited 10 mins on node {}. current time: {} > " + "previous node start time: {} Moving on".format( + previous_node, + (current_timestamp - (10 * 60)), + previous_node_start_time)) + return + # NOTE(jamespage) + # Previous node has not started, or started less than + # 10 minutes ago - sleep a random amount of time and + # then check again. + wait_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + previous_node_finished = monitor_key_exists( + upgrade_key, + "{}_{}_{}_done".format(service, previous_node, version)) + + +def get_upgrade_position(osd_sorted_list, match_name): + """Return the upgrade position for the given osd. + + :param osd_sorted_list: list. Osds sorted + :param match_name: str. The osd name to match + :returns: int. The position or None if not found + """ + for index, item in enumerate(osd_sorted_list): + if item.name == match_name: + return index + return None + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +# 2. This assumes that the osd failure domain is not set to osd. +# It rolls an entire server at a time. +def roll_osd_cluster(new_version, upgrade_key): + """This is tricky to get right so here's what we're going to do. + + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous osd is upgraded yet. + + TODO: If you're not in the same failure domain it's safe to upgrade + 1. Examine all pools and adopt the most strict failure domain policy + Example: Pool 1: Failure domain = rack + Pool 2: Failure domain = host + Pool 3: Failure domain = row + + outcome: Failure domain = host + + :param new_version: str of the version to upgrade to + :param upgrade_key: the cephx key name to use when upgrading + """ + log('roll_osd_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + osd_tree = get_osd_tree(service=upgrade_key) + # A sorted list of osd unit names + osd_sorted_list = sorted(osd_tree) + log("osd_sorted_list: {}".format(osd_sorted_list)) + + try: + position = get_upgrade_position(osd_sorted_list, my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(upgrade_key=upgrade_key, + service='osd', + my_name=my_name, + version=new_version) + else: + # Check if the previous node has finished + status_set('waiting', + 'Waiting on {} to finish upgrading'.format( + osd_sorted_list[position - 1].name)) + wait_on_previous_node( + upgrade_key=upgrade_key, + service='osd', + previous_node=osd_sorted_list[position - 1].name, + version=new_version) + lock_and_roll(upgrade_key=upgrade_key, + service='osd', + my_name=my_name, + version=new_version) + except ValueError: + log("Failed to find name {} in list {}".format( + my_name, osd_sorted_list)) + status_set('blocked', 'failed to upgrade osd') + + +def upgrade_osd(new_version): + """Upgrades the current osd + + :param new_version: str. The new version to upgrade to + """ + current_version = get_version() + status_set("maintenance", "Upgrading osd") + log("Current ceph version is {}".format(current_version)) + log("Upgrading to: {}".format(new_version)) + + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph sources failed with message: {}".format( + err)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + try: + # Upgrade the packages before restarting the daemons. + status_set('maintenance', 'Upgrading packages to %s' % new_version) + apt_install(packages=determine_packages(), fatal=True) + + # If the upgrade does not need an ownership update of any of the + # directories in the osd service directory, then simply restart + # all of the OSDs at the same time as this will be the fastest + # way to update the code on the node. + if not dirs_need_ownership_update('osd'): + log('Restarting all OSDs to load new binaries', DEBUG) + if systemd(): + service_restart('ceph-osd.target') + else: + service_restart('ceph-osd-all') + return + + # Need to change the ownership of all directories which are not OSD + # directories as well. + # TODO - this should probably be moved to the general upgrade function + # and done before mon/osd. + update_owner(CEPH_BASE_DIR, recurse_dirs=False) + non_osd_dirs = filter(lambda x: not x == 'osd', + os.listdir(CEPH_BASE_DIR)) + non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x), + non_osd_dirs) + for path in non_osd_dirs: + update_owner(path) + + # Fast service restart wasn't an option because each of the OSD + # directories need the ownership updated for all the files on + # the OSD. Walk through the OSDs one-by-one upgrading the OSD. + for osd_dir in _get_child_dirs(OSD_BASE_DIR): + try: + osd_num = _get_osd_num_from_dirname(osd_dir) + _upgrade_single_osd(osd_num, osd_dir) + except ValueError as ex: + # Directory could not be parsed - junk directory? + log('Could not parse osd directory %s: %s' % (osd_dir, ex), + WARNING) + continue + + except (subprocess.CalledProcessError, IOError) as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + +def _upgrade_single_osd(osd_num, osd_dir): + """Upgrades the single OSD directory. + + :param osd_num: the num of the OSD + :param osd_dir: the directory of the OSD to upgrade + :raises CalledProcessError: if an error occurs in a command issued as part + of the upgrade process + :raises IOError: if an error occurs reading/writing to a file as part + of the upgrade process + """ + stop_osd(osd_num) + disable_osd(osd_num) + update_owner(osd_dir) + enable_osd(osd_num) + start_osd(osd_num) + + +def stop_osd(osd_num): + """Stops the specified OSD number. + + :param osd_num: the osd number to stop + """ + if systemd(): + service_stop('ceph-osd@{}'.format(osd_num)) + else: + service_stop('ceph-osd', id=osd_num) + + +def start_osd(osd_num): + """Starts the specified OSD number. + + :param osd_num: the osd number to start. + """ + if systemd(): + service_start('ceph-osd@{}'.format(osd_num)) + else: + service_start('ceph-osd', id=osd_num) + + +def disable_osd(osd_num): + """Disables the specified OSD number. + + Ensures that the specified osd will not be automatically started at the + next reboot of the system. Due to differences between init systems, + this method cannot make any guarantees that the specified osd cannot be + started manually. + + :param osd_num: the osd id which should be disabled. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + to disable the OSD + :raises IOError, OSError: if the attempt to read/remove the ready file in + an upstart enabled system fails + """ + if systemd(): + # When running under systemd, the individual ceph-osd daemons run as + # templated units and can be directly addressed by referring to the + # templated service name ceph-osd@. Additionally, systemd + # allows one to disable a specific templated unit by running the + # 'systemctl disable ceph-osd@' command. When disabled, the + # OSD should remain disabled until re-enabled via systemd. + # Note: disabling an already disabled service in systemd returns 0, so + # no need to check whether it is enabled or not. + cmd = ['systemctl', 'disable', 'ceph-osd@{}'.format(osd_num)] + subprocess.check_call(cmd) + else: + # Neither upstart nor the ceph-osd upstart script provides for + # disabling the starting of an OSD automatically. The specific OSD + # cannot be prevented from running manually, however it can be + # prevented from running automatically on reboot by removing the + # 'ready' file in the OSD's root directory. This is due to the + # ceph-osd-all upstart script checking for the presence of this file + # before starting the OSD. + ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), + 'ready') + if os.path.exists(ready_file): + os.unlink(ready_file) + + +def enable_osd(osd_num): + """Enables the specified OSD number. + + Ensures that the specified osd_num will be enabled and ready to start + automatically in the event of a reboot. + + :param osd_num: the osd id which should be enabled. + :raises CalledProcessError: if the call to the systemd command issued + fails when enabling the service + :raises IOError: if the attempt to write the ready file in an usptart + enabled system fails + """ + if systemd(): + cmd = ['systemctl', 'enable', 'ceph-osd@{}'.format(osd_num)] + subprocess.check_call(cmd) + else: + # When running on upstart, the OSDs are started via the ceph-osd-all + # upstart script which will only start the osd if it has a 'ready' + # file. Make sure that file exists. + ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), + 'ready') + with open(ready_file, 'w') as f: + f.write('ready') + + # Make sure the correct user owns the file. It shouldn't be necessary + # as the upstart script should run with root privileges, but its better + # to have all the files matching ownership. + update_owner(ready_file) + + +def update_owner(path, recurse_dirs=True): + """Changes the ownership of the specified path. + + Changes the ownership of the specified path to the new ceph daemon user + using the system's native chown functionality. This may take awhile, + so this method will issue a set_status for any changes of ownership which + recurses into directory structures. + + :param path: the path to recursively change ownership for + :param recurse_dirs: boolean indicating whether to recursively change the + ownership of all the files in a path's subtree or to + simply change the ownership of the path. + :raises CalledProcessError: if an error occurs issuing the chown system + command + """ + user = ceph_user() + user_group = '{ceph_user}:{ceph_user}'.format(ceph_user=user) + cmd = ['chown', user_group, path] + if os.path.isdir(path) and recurse_dirs: + status_set('maintenance', ('Updating ownership of %s to %s' % + (path, user))) + cmd.insert(1, '-R') + + log('Changing ownership of {path} to {user}'.format( + path=path, user=user_group), DEBUG) + start = datetime.now() + subprocess.check_call(cmd) + elapsed_time = (datetime.now() - start) + + log('Took {secs} seconds to change the ownership of path: {path}'.format( + secs=elapsed_time.total_seconds(), path=path), DEBUG) + + +def list_pools(service): + """This will list the current pools that Ceph has + + :param service: String service id to run under + :returns: list. Returns a list of the ceph pools. + :raises: CalledProcessError if the subprocess fails to run. + """ + try: + pool_list = [] + pools = str(subprocess + .check_output(['rados', '--id', service, 'lspools']) + .decode('UTF-8')) + for pool in pools.splitlines(): + pool_list.append(pool) + return pool_list + except subprocess.CalledProcessError as err: + log("rados lspools failed with error: {}".format(err.output)) + raise + + +def dirs_need_ownership_update(service): + """Determines if directories still need change of ownership. + + Examines the set of directories under the /var/lib/ceph/{service} directory + and determines if they have the correct ownership or not. This is + necessary due to the upgrade from Hammer to Jewel where the daemon user + changes from root: to ceph:. + + :param service: the name of the service folder to check (e.g. osd, mon) + :returns: boolean. True if the directories need a change of ownership, + False otherwise. + :raises IOError: if an error occurs reading the file stats from one of + the child directories. + :raises OSError: if the specified path does not exist or some other error + """ + expected_owner = expected_group = ceph_user() + path = os.path.join(CEPH_BASE_DIR, service) + for child in _get_child_dirs(path): + curr_owner, curr_group = owner(child) + + if (curr_owner == expected_owner) and (curr_group == expected_group): + continue + + log('Directory "%s" needs its ownership updated' % child, DEBUG) + return True + + # All child directories had the expected ownership + return False + +# A dict of valid ceph upgrade paths. Mapping is old -> new +UPGRADE_PATHS = collections.OrderedDict([ + ('firefly', 'hammer'), + ('hammer', 'jewel'), + ('jewel', 'luminous'), + ('luminous', 'mimic'), +]) + +# Map UCA codenames to ceph codenames +UCA_CODENAME_MAP = { + 'icehouse': 'firefly', + 'juno': 'firefly', + 'kilo': 'hammer', + 'liberty': 'hammer', + 'mitaka': 'jewel', + 'newton': 'jewel', + 'ocata': 'jewel', + 'pike': 'luminous', + 'queens': 'luminous', + 'rocky': 'mimic', + 'stein': 'mimic', +} + + +def pretty_print_upgrade_paths(): + """Pretty print supported upgrade paths for ceph""" + return ["{} -> {}".format(key, value) + for key, value in UPGRADE_PATHS.items()] + + +def resolve_ceph_version(source): + """Resolves a version of ceph based on source configuration + based on Ubuntu Cloud Archive pockets. + + @param: source: source configuration option of charm + :returns: ceph release codename or None if not resolvable + """ + os_release = get_os_codename_install_source(source) + return UCA_CODENAME_MAP.get(os_release) + + +def get_ceph_pg_stat(): + """Returns the result of ceph pg stat. + + :returns: dict + """ + try: + tree = str(subprocess + .check_output(['ceph', 'pg', 'stat', '--format=json']) + .decode('UTF-8')) + try: + json_tree = json.loads(tree) + if not json_tree['num_pg_by_state']: + return None + return json_tree + except ValueError as v: + log("Unable to parse ceph pg stat json: {}. Error: {}".format( + tree, v)) + raise + except subprocess.CalledProcessError as e: + log("ceph pg stat command failed with message: {}".format(e)) + raise + + +def get_ceph_health(): + """Returns the health of the cluster from a 'ceph status' + + :returns: dict tree of ceph status + :raises: CalledProcessError if our ceph command fails to get the overall + status, use get_ceph_health()['overall_status']. + """ + try: + tree = str(subprocess + .check_output(['ceph', 'status', '--format=json']) + .decode('UTF-8')) + try: + json_tree = json.loads(tree) + # Make sure children are present in the json + if not json_tree['overall_status']: + return None + + return json_tree + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v)) + raise + except subprocess.CalledProcessError as e: + log("ceph status command failed with message: {}".format(e)) + raise + + +def reweight_osd(osd_num, new_weight): + """Changes the crush weight of an OSD to the value specified. + + :param osd_num: the osd id which should be changed + :param new_weight: the new weight for the OSD + :returns: bool. True if output looks right, else false. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + """ + try: + cmd_result = str(subprocess + .check_output(['ceph', 'osd', 'crush', + 'reweight', "osd.{}".format(osd_num), + new_weight], + stderr=subprocess.STDOUT) + .decode('UTF-8')) + expected_result = "reweighted item id {ID} name \'osd.{ID}\'".format( + ID=osd_num) + " to {}".format(new_weight) + log(cmd_result) + if expected_result in cmd_result: + return True + return False + except subprocess.CalledProcessError as e: + log("ceph osd crush reweight command failed" + " with message: {}".format(e)) + raise + + +def determine_packages(): + """Determines packages for installation. + + :returns: list of ceph packages + """ + return PACKAGES + + +def bootstrap_manager(): + hostname = socket.gethostname() + path = '/var/lib/ceph/mgr/ceph-{}'.format(hostname) + keyring = os.path.join(path, 'keyring') + + if os.path.exists(keyring): + log('bootstrap_manager: mgr already initialized.') + else: + mkdir(path, owner=ceph_user(), group=ceph_user()) + subprocess.check_call(['ceph', 'auth', 'get-or-create', + 'mgr.{}'.format(hostname), 'mon', + 'allow profile mgr', 'osd', 'allow *', + 'mds', 'allow *', '--out-file', + keyring]) + chownr(path, ceph_user(), ceph_user()) + + unit = 'ceph-mgr@{}'.format(hostname) + subprocess.check_call(['systemctl', 'enable', unit]) + service_restart(unit) + + +def osd_noout(enable): + """Sets or unsets 'noout' + + :param enable: bool. True to set noout, False to unset. + :returns: bool. True if output looks right. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + """ + operation = { + True: 'set', + False: 'unset', + } + try: + subprocess.check_call(['ceph', '--id', 'admin', + 'osd', operation[enable], + 'noout']) + log('running ceph osd {} noout'.format(operation[enable])) + return True + except subprocess.CalledProcessError as e: + log(e) + raise diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index 044d24e2..e13d7da1 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -23,7 +23,7 @@ sys.modules['apt'] = mock_apt sys.modules['apt_pkg'] = mock_apt.apt_pkg -import ceph # noqa +import ceph_rgw as ceph # noqa import utils # noqa from test_utils import CharmTestCase # noqa diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 7a9e2675..a6ddb431 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -13,7 +13,7 @@ # limitations under the License. from mock import ( - patch, call + patch, call, MagicMock ) from test_utils import ( @@ -63,6 +63,9 @@ 'request_per_unit_key', 'get_certificate_request', 'process_certificates', + 'filter_installed_packages', + 'filter_missing_packages', + 'ceph_utils', ] @@ -78,12 +81,69 @@ def setUp(self): self.service_name.return_value = 'radosgw' self.request_per_unit_key.return_value = False self.systemd_based_radosgw.return_value = False + self.filter_installed_packages.side_effect = lambda pkgs: pkgs + self.filter_missing_packages.side_effect = lambda pkgs: pkgs - def test_install_packages(self): + def test_upgrade_available(self): + _vers = { + 'distro': 'luminous', + 'cloud:bionic-rocky': 'mimic', + } + mock_config = MagicMock() + self.test_config.set('source', 'cloud:bionic-rocky') + mock_config.get.side_effect = self.test_config.get + mock_config.previous.return_value = 'distro' + self.config.side_effect = None + self.config.return_value = mock_config + self.ceph_utils.UPGRADE_PATHS = { + 'luminous': 'mimic', + } + self.ceph_utils.resolve_ceph_version.side_effect = ( + lambda v: _vers.get(v) + ) + self.assertTrue(ceph_hooks.upgrade_available()) + + @patch.object(ceph_hooks, 'upgrade_available') + def test_install_packages(self, upgrade_available): + mock_config = MagicMock() + mock_config.get.side_effect = self.test_config.get + mock_config.changed.return_value = True + self.config.side_effect = None + self.config.return_value = mock_config + upgrade_available.return_value = False + ceph_hooks.install_packages() + self.add_source.assert_called_with('distro', 'secretkey') + self.apt_update.assert_called_with(fatal=True) + self.apt_purge.assert_called_with(ceph_hooks.APACHE_PACKAGES) + self.apt_install.assert_called_with(ceph_hooks.PACKAGES, + fatal=True) + mock_config.changed.assert_called_with('source') + self.filter_installed_packages.assert_called_with( + ceph_hooks.PACKAGES + ) + self.filter_missing_packages.assert_called_with( + ceph_hooks.APACHE_PACKAGES + ) + + @patch.object(ceph_hooks, 'upgrade_available') + def test_install_packages_upgrades(self, upgrade_available): + mock_config = MagicMock() + mock_config.get.side_effect = self.test_config.get + mock_config.changed.return_value = True + self.config.side_effect = None + self.config.return_value = mock_config + upgrade_available.return_value = True ceph_hooks.install_packages() self.add_source.assert_called_with('distro', 'secretkey') - self.assertTrue(self.apt_update.called) - self.apt_purge.assert_called_with(['libapache2-mod-fastcgi']) + self.apt_update.assert_called_with(fatal=True) + self.apt_purge.assert_called_with(ceph_hooks.APACHE_PACKAGES) + self.apt_install.assert_called_with(ceph_hooks.PACKAGES, + fatal=True) + mock_config.changed.assert_called_with('source') + self.filter_installed_packages.assert_not_called() + self.filter_missing_packages.assert_called_with( + ceph_hooks.APACHE_PACKAGES + ) def test_install(self): _install_packages = self.patch('install_packages') From bbaa5ac36b45b278d3055be2b139184aee291025 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 12 Feb 2019 15:55:38 -0800 Subject: [PATCH 1662/2699] Update charm-helpers-hooks.yaml and sync ch Using the new version of the sync tool which removes the charmhelpers directory before syncing, run charm helpers sync to find any unexpected missing dependencies. Change-Id: I88706824d0f755b016fc393b273a76e9b09aa4c3 --- ceph-osd/charm-helpers-hooks.yaml | 2 +- .../contrib/openstack/amulet/utils.py | 16 +++--- .../charmhelpers/contrib/openstack/context.py | 10 ++-- .../contrib/openstack/templating.py | 2 +- .../charmhelpers/contrib/openstack/utils.py | 18 +++++- ceph-osd/hooks/charmhelpers/contrib/python.py | 21 +++++++ .../contrib/storage/linux/ceph.py | 27 +++++++-- ceph-osd/hooks/charmhelpers/core/host.py | 1 + .../charmhelpers/core/host_factory/ubuntu.py | 8 +++ .../{contrib => fetch}/python/__init__.py | 2 +- .../hooks/charmhelpers/fetch/python/debug.py | 54 ++++++++++++++++++ .../{contrib => fetch}/python/packages.py | 0 .../hooks/charmhelpers/fetch/python/rpdb.py | 56 +++++++++++++++++++ .../charmhelpers/fetch/python/version.py | 32 +++++++++++ 14 files changed, 224 insertions(+), 25 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/python.py rename ceph-osd/hooks/charmhelpers/{contrib => fetch}/python/__init__.py (92%) create mode 100644 ceph-osd/hooks/charmhelpers/fetch/python/debug.py rename ceph-osd/hooks/charmhelpers/{contrib => fetch}/python/packages.py (100%) create mode 100644 ceph-osd/hooks/charmhelpers/fetch/python/rpdb.py create mode 100644 ceph-osd/hooks/charmhelpers/fetch/python/version.py diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index b7fd6950..4cef00af 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -8,7 +8,7 @@ include: - contrib.hahelpers: - apache - cluster - - contrib.python.packages + - contrib.python - contrib.storage.linux - contrib.openstack - contrib.network.ip diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py index ea1fd8f3..53fa6506 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -88,14 +88,14 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, validation_function = self.validate_v2_endpoint_data xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_endpoint_data - expected = { - 'id': expected['id'], - 'region': expected['region'], - 'region_id': 'RegionOne', - 'url': self.valid_url, - 'interface': self.not_null, - 'service_id': expected['service_id']} + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} return validation_function(endpoints, admin_port, internal_port, public_port, expected) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 8a203754..78a339f6 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -1427,11 +1427,11 @@ def __call__(self): ctxt = {} if is_relation_made('zeromq-configuration', 'host'): for rid in relation_ids('zeromq-configuration'): - for unit in related_units(rid): - ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) - ctxt['zmq_host'] = relation_get('host', unit, rid) - ctxt['zmq_redis_address'] = relation_get( - 'zmq_redis_address', unit, rid) + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + ctxt['zmq_redis_address'] = relation_get( + 'zmq_redis_address', unit, rid) return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/templating.py index a623315d..050f8af5 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/templating.py @@ -183,7 +183,7 @@ class OSConfigRenderer(object): /tmp/templates/grizzly/api-paste.ini /tmp/templates/havana/api-paste.ini - Since it was registered with the grizzly release, it first seraches + Since it was registered with the grizzly release, it first searches the grizzly directory for nova.conf, then the templates dir. When writing api-paste.ini, it will find the template in the grizzly diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 4e432a25..86b011b7 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -83,7 +83,8 @@ add_source as fetch_add_source, SourceConfigError, GPGKeyError, - get_upstream_version + get_upstream_version, + filter_missing_packages ) from charmhelpers.fetch.snap import ( @@ -309,6 +310,15 @@ def error_out(msg): sys.exit(1) +def get_installed_semantic_versioned_packages(): + '''Get a list of installed packages which have OpenStack semantic versioning + + :returns List of installed packages + :rtype: [pkg1, pkg2, ...] + ''' + return filter_missing_packages(PACKAGE_CODENAMES.keys()) + + def get_os_codename_install_source(src): '''Derive OpenStack release codename from a given installation source.''' ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] @@ -972,7 +982,9 @@ def _ows_check_charm_func(state, message, charm_func_with_configs): """ if charm_func_with_configs: charm_state, charm_message = charm_func_with_configs() - if charm_state != 'active' and charm_state != 'unknown': + if (charm_state != 'active' and + charm_state != 'unknown' and + charm_state is not None): state = workload_state_compare(state, charm_state) if message: charm_message = charm_message.replace("Incomplete relations: ", @@ -1241,7 +1253,7 @@ def remote_restart(rel_name, remote_service=None): def check_actually_paused(services=None, ports=None): - """Check that services listed in the services object and and ports + """Check that services listed in the services object and ports are actually closed (not listened to), to verify that the unit is properly paused. diff --git a/ceph-osd/hooks/charmhelpers/contrib/python.py b/ceph-osd/hooks/charmhelpers/contrib/python.py new file mode 100644 index 00000000..84cba8c4 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/python.py @@ -0,0 +1,21 @@ +# Copyright 2014-2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +# deprecated aliases for backwards compatibility +from charmhelpers.fetch.python import debug # noqa +from charmhelpers.fetch.python import packages # noqa +from charmhelpers.fetch.python import rpdb # noqa +from charmhelpers.fetch.python import version # noqa diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 76828201..63c93044 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -856,12 +856,22 @@ def _keyring_path(service): return KEYRING.format(service) -def create_keyring(service, key): - """Create a new Ceph keyring containing key.""" +def add_key(service, key): + """ + Add a key to a keyring. + + Creates the keyring if it doesn't already exist. + + Logs and returns if the key is already in the keyring. + """ keyring = _keyring_path(service) if os.path.exists(keyring): - log('Ceph keyring exists at %s.' % keyring, level=WARNING) - return + with open(keyring, 'r') as ring: + if key in ring.read(): + log('Ceph keyring exists at %s and has not changed.' % keyring, + level=DEBUG) + return + log('Updating existing keyring %s.' % keyring, level=DEBUG) cmd = ['ceph-authtool', keyring, '--create-keyring', '--name=client.{}'.format(service), '--add-key={}'.format(key)] @@ -869,6 +879,11 @@ def create_keyring(service, key): log('Created new ceph keyring at %s.' % keyring, level=DEBUG) +def create_keyring(service, key): + """Deprecated. Please use the more accurately named 'add_key'""" + return add_key(service, key) + + def delete_keyring(service): """Delete an existing Ceph keyring.""" keyring = _keyring_path(service) @@ -905,7 +920,7 @@ def get_ceph_nodes(relation='ceph'): def configure(service, key, auth, use_syslog): """Perform basic configuration of Ceph.""" - create_keyring(service, key) + add_key(service, key) create_key_file(service, key) hosts = get_ceph_nodes() with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: @@ -1068,7 +1083,7 @@ def ensure_ceph_keyring(service, user=None, group=None, if not key: return False - create_keyring(service=service, key=key) + add_key(service=service, key=key) keyring = _keyring_path(service) if user and group: check_call(['chown', '%s.%s' % (user, group), keyring]) diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 79953a44..47c1fc35 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -46,6 +46,7 @@ lsb_release, cmp_pkgrevno, CompareHostReleases, + get_distrib_codename, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( # NOQA:F401 diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index a6d375af..d7e920eb 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -72,6 +72,14 @@ def lsb_release(): return d +def get_distrib_codename(): + """Return the codename of the distribution + :returns: The codename + :rtype: str + """ + return lsb_release()['DISTRIB_CODENAME'].lower() + + def cmp_pkgrevno(package, revno, pkgcache=None): """Compare supplied revno with the revno of the installed package. diff --git a/ceph-osd/hooks/charmhelpers/contrib/python/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/python/__init__.py similarity index 92% rename from ceph-osd/hooks/charmhelpers/contrib/python/__init__.py rename to ceph-osd/hooks/charmhelpers/fetch/python/__init__.py index d7567b86..bff99dc9 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/python/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/python/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2019 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/ceph-osd/hooks/charmhelpers/fetch/python/debug.py b/ceph-osd/hooks/charmhelpers/fetch/python/debug.py new file mode 100644 index 00000000..757135ee --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/fetch/python/debug.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import atexit +import sys + +from charmhelpers.fetch.python.rpdb import Rpdb +from charmhelpers.core.hookenv import ( + open_port, + close_port, + ERROR, + log +) + +__author__ = "Jorge Niedbalski " + +DEFAULT_ADDR = "0.0.0.0" +DEFAULT_PORT = 4444 + + +def _error(message): + log(message, level=ERROR) + + +def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT): + """ + Set a trace point using the remote debugger + """ + atexit.register(close_port, port) + try: + log("Starting a remote python debugger session on %s:%s" % (addr, + port)) + open_port(port) + debugger = Rpdb(addr=addr, port=port) + debugger.set_trace(sys._getframe().f_back) + except Exception: + _error("Cannot start a remote debug session on %s:%s" % (addr, + port)) diff --git a/ceph-osd/hooks/charmhelpers/contrib/python/packages.py b/ceph-osd/hooks/charmhelpers/fetch/python/packages.py similarity index 100% rename from ceph-osd/hooks/charmhelpers/contrib/python/packages.py rename to ceph-osd/hooks/charmhelpers/fetch/python/packages.py diff --git a/ceph-osd/hooks/charmhelpers/fetch/python/rpdb.py b/ceph-osd/hooks/charmhelpers/fetch/python/rpdb.py new file mode 100644 index 00000000..9b31610c --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/fetch/python/rpdb.py @@ -0,0 +1,56 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Remote Python Debugger (pdb wrapper).""" + +import pdb +import socket +import sys + +__author__ = "Bertrand Janin " +__version__ = "0.1.3" + + +class Rpdb(pdb.Pdb): + + def __init__(self, addr="127.0.0.1", port=4444): + """Initialize the socket and initialize pdb.""" + + # Backup stdin and stdout before replacing them by the socket handle + self.old_stdout = sys.stdout + self.old_stdin = sys.stdin + + # Open a 'reusable' socket to let the webapp reload on the same port + self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + self.skt.bind((addr, port)) + self.skt.listen(1) + (clientsocket, address) = self.skt.accept() + handle = clientsocket.makefile('rw') + pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle) + sys.stdout = sys.stdin = handle + + def shutdown(self): + """Revert stdin and stdout, close the socket.""" + sys.stdout = self.old_stdout + sys.stdin = self.old_stdin + self.skt.close() + self.set_continue() + + def do_continue(self, arg): + """Stop all operation on ``continue``.""" + self.shutdown() + return 1 + + do_EOF = do_quit = do_exit = do_c = do_cont = do_continue diff --git a/ceph-osd/hooks/charmhelpers/fetch/python/version.py b/ceph-osd/hooks/charmhelpers/fetch/python/version.py new file mode 100644 index 00000000..3eb42103 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/fetch/python/version.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +__author__ = "Jorge Niedbalski " + + +def current_version(): + """Current system python version""" + return sys.version_info + + +def current_version_string(): + """Current system python version as string major.minor.micro""" + return "{0}.{1}.{2}".format(sys.version_info.major, + sys.version_info.minor, + sys.version_info.micro) From 82d4a92c4a91d9b0099007ce153540001a409bc4 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 12 Feb 2019 15:55:56 -0800 Subject: [PATCH 1663/2699] Update charm-helpers-hooks.yaml and sync ch Using the new version of the sync tool which removes the charmhelpers directory before syncing, run charm helpers sync to find any unexpected missing dependencies. Change-Id: I2248cccf26979cba368d27312692c197df5c4ef1 --- ceph-radosgw/charm-helpers-hooks.yaml | 2 +- .../contrib/openstack/amulet/utils.py | 16 +++--- .../charmhelpers/contrib/openstack/context.py | 10 ++-- .../contrib/openstack/templating.py | 2 +- .../charmhelpers/contrib/openstack/utils.py | 18 +++++- .../hooks/charmhelpers/contrib/python.py | 21 +++++++ ceph-radosgw/hooks/charmhelpers/core/host.py | 1 + .../charmhelpers/core/host_factory/ubuntu.py | 8 +++ .../{contrib => fetch}/python/__init__.py | 2 +- .../hooks/charmhelpers/fetch/python/debug.py | 54 ++++++++++++++++++ .../{contrib => fetch}/python/packages.py | 0 .../hooks/charmhelpers/fetch/python/rpdb.py | 56 +++++++++++++++++++ .../charmhelpers/fetch/python/version.py | 32 +++++++++++ 13 files changed, 203 insertions(+), 19 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/python.py rename ceph-radosgw/hooks/charmhelpers/{contrib => fetch}/python/__init__.py (92%) create mode 100644 ceph-radosgw/hooks/charmhelpers/fetch/python/debug.py rename ceph-radosgw/hooks/charmhelpers/{contrib => fetch}/python/packages.py (100%) create mode 100644 ceph-radosgw/hooks/charmhelpers/fetch/python/rpdb.py create mode 100644 ceph-radosgw/hooks/charmhelpers/fetch/python/version.py diff --git a/ceph-radosgw/charm-helpers-hooks.yaml b/ceph-radosgw/charm-helpers-hooks.yaml index 370de9ae..3a12b370 100644 --- a/ceph-radosgw/charm-helpers-hooks.yaml +++ b/ceph-radosgw/charm-helpers-hooks.yaml @@ -5,7 +5,7 @@ include: - cli - osplatform - fetch - - contrib.python.packages + - contrib.python - contrib.storage.linux - contrib.hahelpers: - apache diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index ea1fd8f3..53fa6506 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -88,14 +88,14 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, validation_function = self.validate_v2_endpoint_data xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_endpoint_data - expected = { - 'id': expected['id'], - 'region': expected['region'], - 'region_id': 'RegionOne', - 'url': self.valid_url, - 'interface': self.not_null, - 'service_id': expected['service_id']} + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} return validation_function(endpoints, admin_port, internal_port, public_port, expected) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 8a203754..78a339f6 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -1427,11 +1427,11 @@ def __call__(self): ctxt = {} if is_relation_made('zeromq-configuration', 'host'): for rid in relation_ids('zeromq-configuration'): - for unit in related_units(rid): - ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) - ctxt['zmq_host'] = relation_get('host', unit, rid) - ctxt['zmq_redis_address'] = relation_get( - 'zmq_redis_address', unit, rid) + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + ctxt['zmq_redis_address'] = relation_get( + 'zmq_redis_address', unit, rid) return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py index a623315d..050f8af5 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py @@ -183,7 +183,7 @@ class OSConfigRenderer(object): /tmp/templates/grizzly/api-paste.ini /tmp/templates/havana/api-paste.ini - Since it was registered with the grizzly release, it first seraches + Since it was registered with the grizzly release, it first searches the grizzly directory for nova.conf, then the templates dir. When writing api-paste.ini, it will find the template in the grizzly diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 4e432a25..86b011b7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -83,7 +83,8 @@ add_source as fetch_add_source, SourceConfigError, GPGKeyError, - get_upstream_version + get_upstream_version, + filter_missing_packages ) from charmhelpers.fetch.snap import ( @@ -309,6 +310,15 @@ def error_out(msg): sys.exit(1) +def get_installed_semantic_versioned_packages(): + '''Get a list of installed packages which have OpenStack semantic versioning + + :returns List of installed packages + :rtype: [pkg1, pkg2, ...] + ''' + return filter_missing_packages(PACKAGE_CODENAMES.keys()) + + def get_os_codename_install_source(src): '''Derive OpenStack release codename from a given installation source.''' ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] @@ -972,7 +982,9 @@ def _ows_check_charm_func(state, message, charm_func_with_configs): """ if charm_func_with_configs: charm_state, charm_message = charm_func_with_configs() - if charm_state != 'active' and charm_state != 'unknown': + if (charm_state != 'active' and + charm_state != 'unknown' and + charm_state is not None): state = workload_state_compare(state, charm_state) if message: charm_message = charm_message.replace("Incomplete relations: ", @@ -1241,7 +1253,7 @@ def remote_restart(rel_name, remote_service=None): def check_actually_paused(services=None, ports=None): - """Check that services listed in the services object and and ports + """Check that services listed in the services object and ports are actually closed (not listened to), to verify that the unit is properly paused. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python.py b/ceph-radosgw/hooks/charmhelpers/contrib/python.py new file mode 100644 index 00000000..84cba8c4 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/python.py @@ -0,0 +1,21 @@ +# Copyright 2014-2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +# deprecated aliases for backwards compatibility +from charmhelpers.fetch.python import debug # noqa +from charmhelpers.fetch.python import packages # noqa +from charmhelpers.fetch.python import rpdb # noqa +from charmhelpers.fetch.python import version # noqa diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 79953a44..47c1fc35 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -46,6 +46,7 @@ lsb_release, cmp_pkgrevno, CompareHostReleases, + get_distrib_codename, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( # NOQA:F401 diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index a6d375af..d7e920eb 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -72,6 +72,14 @@ def lsb_release(): return d +def get_distrib_codename(): + """Return the codename of the distribution + :returns: The codename + :rtype: str + """ + return lsb_release()['DISTRIB_CODENAME'].lower() + + def cmp_pkgrevno(package, revno, pkgcache=None): """Compare supplied revno with the revno of the installed package. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/python/__init__.py similarity index 92% rename from ceph-radosgw/hooks/charmhelpers/contrib/python/__init__.py rename to ceph-radosgw/hooks/charmhelpers/fetch/python/__init__.py index d7567b86..bff99dc9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/python/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/python/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2019 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/python/debug.py b/ceph-radosgw/hooks/charmhelpers/fetch/python/debug.py new file mode 100644 index 00000000..757135ee --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/fetch/python/debug.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import atexit +import sys + +from charmhelpers.fetch.python.rpdb import Rpdb +from charmhelpers.core.hookenv import ( + open_port, + close_port, + ERROR, + log +) + +__author__ = "Jorge Niedbalski " + +DEFAULT_ADDR = "0.0.0.0" +DEFAULT_PORT = 4444 + + +def _error(message): + log(message, level=ERROR) + + +def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT): + """ + Set a trace point using the remote debugger + """ + atexit.register(close_port, port) + try: + log("Starting a remote python debugger session on %s:%s" % (addr, + port)) + open_port(port) + debugger = Rpdb(addr=addr, port=port) + debugger.set_trace(sys._getframe().f_back) + except Exception: + _error("Cannot start a remote debug session on %s:%s" % (addr, + port)) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py b/ceph-radosgw/hooks/charmhelpers/fetch/python/packages.py similarity index 100% rename from ceph-radosgw/hooks/charmhelpers/contrib/python/packages.py rename to ceph-radosgw/hooks/charmhelpers/fetch/python/packages.py diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/python/rpdb.py b/ceph-radosgw/hooks/charmhelpers/fetch/python/rpdb.py new file mode 100644 index 00000000..9b31610c --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/fetch/python/rpdb.py @@ -0,0 +1,56 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Remote Python Debugger (pdb wrapper).""" + +import pdb +import socket +import sys + +__author__ = "Bertrand Janin " +__version__ = "0.1.3" + + +class Rpdb(pdb.Pdb): + + def __init__(self, addr="127.0.0.1", port=4444): + """Initialize the socket and initialize pdb.""" + + # Backup stdin and stdout before replacing them by the socket handle + self.old_stdout = sys.stdout + self.old_stdin = sys.stdin + + # Open a 'reusable' socket to let the webapp reload on the same port + self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + self.skt.bind((addr, port)) + self.skt.listen(1) + (clientsocket, address) = self.skt.accept() + handle = clientsocket.makefile('rw') + pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle) + sys.stdout = sys.stdin = handle + + def shutdown(self): + """Revert stdin and stdout, close the socket.""" + sys.stdout = self.old_stdout + sys.stdin = self.old_stdin + self.skt.close() + self.set_continue() + + def do_continue(self, arg): + """Stop all operation on ``continue``.""" + self.shutdown() + return 1 + + do_EOF = do_quit = do_exit = do_c = do_cont = do_continue diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/python/version.py b/ceph-radosgw/hooks/charmhelpers/fetch/python/version.py new file mode 100644 index 00000000..3eb42103 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/fetch/python/version.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +__author__ = "Jorge Niedbalski " + + +def current_version(): + """Current system python version""" + return sys.version_info + + +def current_version_string(): + """Current system python version as string major.minor.micro""" + return "{0}.{1}.{2}".format(sys.version_info.major, + sys.version_info.minor, + sys.version_info.micro) From fe2cdb121ae41646e30ff77fd03825636a78fce7 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 12 Feb 2019 15:55:47 -0800 Subject: [PATCH 1664/2699] Update charm-helpers-hooks.yaml and sync ch Using the new version of the sync tool which removes the charmhelpers directory before syncing, run charm helpers sync to find any unexpected missing dependencies. Change-Id: Ifa5ba56af4b120903c2cc3822834c4279cd56acc --- .../charmhelpers/contrib/openstack/utils.py | 18 ++++++++++--- .../hooks/charmhelpers/contrib/python.py | 21 +++++++++++++++ .../contrib/storage/linux/ceph.py | 27 ++++++++++++++----- ceph-proxy/hooks/charmhelpers/core/host.py | 1 + .../charmhelpers/core/host_factory/ubuntu.py | 8 ++++++ .../{contrib => fetch}/python/__init__.py | 2 +- .../{contrib => fetch}/python/debug.py | 2 +- .../{contrib => fetch}/python/packages.py | 0 .../{contrib => fetch}/python/rpdb.py | 0 .../{contrib => fetch}/python/version.py | 0 10 files changed, 68 insertions(+), 11 deletions(-) create mode 100644 ceph-proxy/hooks/charmhelpers/contrib/python.py rename ceph-proxy/hooks/charmhelpers/{contrib => fetch}/python/__init__.py (92%) rename ceph-proxy/hooks/charmhelpers/{contrib => fetch}/python/debug.py (96%) rename ceph-proxy/hooks/charmhelpers/{contrib => fetch}/python/packages.py (100%) rename ceph-proxy/hooks/charmhelpers/{contrib => fetch}/python/rpdb.py (100%) rename ceph-proxy/hooks/charmhelpers/{contrib => fetch}/python/version.py (100%) diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py index 4e432a25..86b011b7 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py @@ -83,7 +83,8 @@ add_source as fetch_add_source, SourceConfigError, GPGKeyError, - get_upstream_version + get_upstream_version, + filter_missing_packages ) from charmhelpers.fetch.snap import ( @@ -309,6 +310,15 @@ def error_out(msg): sys.exit(1) +def get_installed_semantic_versioned_packages(): + '''Get a list of installed packages which have OpenStack semantic versioning + + :returns List of installed packages + :rtype: [pkg1, pkg2, ...] + ''' + return filter_missing_packages(PACKAGE_CODENAMES.keys()) + + def get_os_codename_install_source(src): '''Derive OpenStack release codename from a given installation source.''' ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] @@ -972,7 +982,9 @@ def _ows_check_charm_func(state, message, charm_func_with_configs): """ if charm_func_with_configs: charm_state, charm_message = charm_func_with_configs() - if charm_state != 'active' and charm_state != 'unknown': + if (charm_state != 'active' and + charm_state != 'unknown' and + charm_state is not None): state = workload_state_compare(state, charm_state) if message: charm_message = charm_message.replace("Incomplete relations: ", @@ -1241,7 +1253,7 @@ def remote_restart(rel_name, remote_service=None): def check_actually_paused(services=None, ports=None): - """Check that services listed in the services object and and ports + """Check that services listed in the services object and ports are actually closed (not listened to), to verify that the unit is properly paused. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/python.py b/ceph-proxy/hooks/charmhelpers/contrib/python.py new file mode 100644 index 00000000..84cba8c4 --- /dev/null +++ b/ceph-proxy/hooks/charmhelpers/contrib/python.py @@ -0,0 +1,21 @@ +# Copyright 2014-2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +# deprecated aliases for backwards compatibility +from charmhelpers.fetch.python import debug # noqa +from charmhelpers.fetch.python import packages # noqa +from charmhelpers.fetch.python import rpdb # noqa +from charmhelpers.fetch.python import version # noqa diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py index 76828201..63c93044 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -856,12 +856,22 @@ def _keyring_path(service): return KEYRING.format(service) -def create_keyring(service, key): - """Create a new Ceph keyring containing key.""" +def add_key(service, key): + """ + Add a key to a keyring. + + Creates the keyring if it doesn't already exist. + + Logs and returns if the key is already in the keyring. + """ keyring = _keyring_path(service) if os.path.exists(keyring): - log('Ceph keyring exists at %s.' % keyring, level=WARNING) - return + with open(keyring, 'r') as ring: + if key in ring.read(): + log('Ceph keyring exists at %s and has not changed.' % keyring, + level=DEBUG) + return + log('Updating existing keyring %s.' % keyring, level=DEBUG) cmd = ['ceph-authtool', keyring, '--create-keyring', '--name=client.{}'.format(service), '--add-key={}'.format(key)] @@ -869,6 +879,11 @@ def create_keyring(service, key): log('Created new ceph keyring at %s.' % keyring, level=DEBUG) +def create_keyring(service, key): + """Deprecated. Please use the more accurately named 'add_key'""" + return add_key(service, key) + + def delete_keyring(service): """Delete an existing Ceph keyring.""" keyring = _keyring_path(service) @@ -905,7 +920,7 @@ def get_ceph_nodes(relation='ceph'): def configure(service, key, auth, use_syslog): """Perform basic configuration of Ceph.""" - create_keyring(service, key) + add_key(service, key) create_key_file(service, key) hosts = get_ceph_nodes() with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: @@ -1068,7 +1083,7 @@ def ensure_ceph_keyring(service, user=None, group=None, if not key: return False - create_keyring(service=service, key=key) + add_key(service=service, key=key) keyring = _keyring_path(service) if user and group: check_call(['chown', '%s.%s' % (user, group), keyring]) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/hooks/charmhelpers/core/host.py index 79953a44..47c1fc35 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host.py +++ b/ceph-proxy/hooks/charmhelpers/core/host.py @@ -46,6 +46,7 @@ lsb_release, cmp_pkgrevno, CompareHostReleases, + get_distrib_codename, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( # NOQA:F401 diff --git a/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py index a6d375af..d7e920eb 100644 --- a/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -72,6 +72,14 @@ def lsb_release(): return d +def get_distrib_codename(): + """Return the codename of the distribution + :returns: The codename + :rtype: str + """ + return lsb_release()['DISTRIB_CODENAME'].lower() + + def cmp_pkgrevno(package, revno, pkgcache=None): """Compare supplied revno with the revno of the installed package. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/python/__init__.py b/ceph-proxy/hooks/charmhelpers/fetch/python/__init__.py similarity index 92% rename from ceph-proxy/hooks/charmhelpers/contrib/python/__init__.py rename to ceph-proxy/hooks/charmhelpers/fetch/python/__init__.py index d7567b86..bff99dc9 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/python/__init__.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/python/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2019 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/python/debug.py b/ceph-proxy/hooks/charmhelpers/fetch/python/debug.py similarity index 96% rename from ceph-proxy/hooks/charmhelpers/contrib/python/debug.py rename to ceph-proxy/hooks/charmhelpers/fetch/python/debug.py index d2142c75..757135ee 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/python/debug.py +++ b/ceph-proxy/hooks/charmhelpers/fetch/python/debug.py @@ -20,7 +20,7 @@ import atexit import sys -from charmhelpers.contrib.python.rpdb import Rpdb +from charmhelpers.fetch.python.rpdb import Rpdb from charmhelpers.core.hookenv import ( open_port, close_port, diff --git a/ceph-proxy/hooks/charmhelpers/contrib/python/packages.py b/ceph-proxy/hooks/charmhelpers/fetch/python/packages.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/python/packages.py rename to ceph-proxy/hooks/charmhelpers/fetch/python/packages.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/python/rpdb.py b/ceph-proxy/hooks/charmhelpers/fetch/python/rpdb.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/python/rpdb.py rename to ceph-proxy/hooks/charmhelpers/fetch/python/rpdb.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/python/version.py b/ceph-proxy/hooks/charmhelpers/fetch/python/version.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/python/version.py rename to ceph-proxy/hooks/charmhelpers/fetch/python/version.py From 8b12786bd7a36cf08869cfb1fb17c4ff1ebd255d Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 13 Feb 2019 11:09:00 +0000 Subject: [PATCH 1665/2699] Drop keystone integration for PKI token format The PKI token format is no longer supported by the keystone charm; drop code, tests and associated template fragments which deal with configuration of PKI revocation list processing. Change-Id: Ie08779c2aef15589b621c324808bb13089fb4f72 Closes-Bug: 1586550 --- ceph-radosgw/config.yaml | 4 - ceph-radosgw/hooks/ceph_radosgw_context.py | 7 - ceph-radosgw/hooks/hooks.py | 14 - ceph-radosgw/hooks/utils.py | 271 ------------------ ceph-radosgw/templates/ceph.conf | 4 - .../unit_tests/test_ceph_radosgw_context.py | 7 - .../unit_tests/test_ceph_radosgw_utils.py | 134 --------- ceph-radosgw/unit_tests/test_hooks.py | 4 - 8 files changed, 445 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 1a75ed0d..860ad4b5 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -135,10 +135,6 @@ options: type: int default: 500 description: Number of keystone tokens to hold in local cache. - revocation-check-interval: - type: int - default: 600 - description: Interval between revocation checks to keystone. # HA config use-syslog: type: boolean diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index aec22968..7a2b23d7 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -98,7 +98,6 @@ def __call__(self): ctxt['auth_type'] = 'keystone' ctxt['user_roles'] = config('operator-roles') ctxt['cache_size'] = config('cache-size') - ctxt['revocation_check_interval'] = config('revocation-check-interval') if self.context_complete(ctxt): return ctxt return {} @@ -204,12 +203,6 @@ def __call__(self): 'unit_public_ip': unit_public_ip(), } - certs_path = '/var/lib/ceph/nss' - paths = [os.path.join(certs_path, 'ca.pem'), - os.path.join(certs_path, 'signing_certificate.pem')] - if all([os.path.isfile(p) for p in paths]): - ctxt['cms'] = True - # NOTE(dosaboy): these sections must correspond to what is supported in # the config template. sections = ['global', 'client.radosgw.gateway'] diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 77f42263..4fb19c7b 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -75,12 +75,10 @@ generate_ha_relation_data, ) from utils import ( - enable_pocket, register_configs, setup_ipv6, services, assess_status, - setup_keystone_certs, disable_unused_apache_sites, pause_unit_helper, resume_unit_helper, @@ -99,17 +97,10 @@ hooks = Hooks() CONFIGS = register_configs() -NSS_DIR = '/var/lib/ceph/nss' - PACKAGES = [ 'haproxy', - 'libnss3-tools', 'ntp', - 'python-keystoneclient', - 'python-six', # Ensures correct version is installed for precise - # since python-keystoneclient does not pull in icehouse - # version 'radosgw', 'apache2' ] @@ -166,10 +157,7 @@ def install_packages(): def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() - enable_pocket('multiverse') install_packages() - if not os.path.exists(NSS_DIR): - os.makedirs(NSS_DIR) if not os.path.exists('/etc/ceph'): os.makedirs('/etc/ceph') @@ -385,8 +373,6 @@ def configure_https(): if not is_unit_paused_set(): service_reload('apache2', restart_on_failure=True) - setup_keystone_certs(CONFIGS) - @hooks.hook('update-status') @harden() diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index b6622adc..14dbe3bb 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -13,10 +13,8 @@ # limitations under the License. import os -import re import socket import subprocess -import sys from collections import OrderedDict from copy import deepcopy @@ -24,19 +22,11 @@ import ceph_radosgw_context from charmhelpers.core.hookenv import ( - log, - DEBUG, - ERROR, - INFO, relation_get, relation_ids, related_units, application_version_set, ) -from charmhelpers.contrib.network.ip import ( - format_ipv6_addr, - is_ipv6, -) from charmhelpers.contrib.openstack import ( context, templating, @@ -46,9 +36,6 @@ pause_unit, resume_unit, ) -from charmhelpers.contrib.openstack.keystone import ( - format_endpoint, -) from charmhelpers.contrib.hahelpers.cluster import ( get_hacluster_config, https, @@ -56,7 +43,6 @@ from charmhelpers.core.host import ( cmp_pkgrevno, lsb_release, - mkdir, CompareHostReleases, init_is_systemd, ) @@ -69,36 +55,6 @@ get_upstream_version, ) -# NOTE: some packages are installed by the charm so may not be available -# yet. Calls that depend on them should be aware of this (and use the -# defer_if_unavailable() decorator). -try: - import keystoneclient - from keystoneclient.v2_0 import client - from keystoneclient.v3 import client as client_v3 - try: - # Kilo and newer - from keystoneclient.exceptions import ( - ConnectionRefused, - Forbidden, - InternalServerError, - ) - except ImportError: - # Juno and older - from keystoneclient.exceptions import ( - ConnectionError as ConnectionRefused, - Forbidden, - InternalServerError, - ) -except ImportError: - keystoneclient = None - -# This is installed as a dep of python-keystoneclient -try: - import requests -except ImportError: - requests = None - # The interface is said to be satisfied if anyone of the interfaces in the # list has a complete context. REQUIRED_INTERFACES = { @@ -139,14 +95,6 @@ ]) -class KSCertSetupException(BaseException): - """Keystone SSL Certificate Setup Exception. - - This exception should be raised if any part of cert setup fails. - """ - pass - - def resource_map(): """Dynamically generate a map of resources. @@ -198,18 +146,6 @@ def services(): return list(set(_services)) -def enable_pocket(pocket): - apt_sources = "/etc/apt/sources.list" - with open(apt_sources, "r") as sources: - lines = sources.readlines() - with open(apt_sources, "w") as sources: - for line in lines: - if pocket in line: - sources.write(re.sub('^# deb', 'deb', line)) - else: - sources.write(line) - - def get_optional_interfaces(): """Return the optional interfaces that should be checked if the relavent relations have appeared. @@ -347,213 +283,6 @@ def get_pkg_version(name): return version -def defer_if_unavailable(modules): - """If a function depends on a package/module that is installed by the charm - but may not yet have been installed, it can be deferred using this - decorator. - - :param modules: list of modules that must be importable. - """ - def _inner1_defer_if_unavailable(f): - def _inner2_defer_if_unavailable(*args, **kwargs): - for m in modules: - if m not in sys.modules: - log("Module '{}' does not appear to be available " - "yet - deferring call to '{}' until it " - "is.".format(m, f.__name__), level=INFO) - return - - return f(*args, **kwargs) - - return _inner2_defer_if_unavailable - - return _inner1_defer_if_unavailable - - -@defer_if_unavailable(['keystoneclient']) -def get_ks_cert(ksclient, auth_endpoint, cert_type): - """Get certificate from keystone. - - :param ksclient: Keystone client - :param auth_endpoint: Keystone auth endpoint url - :param certs_path: Path to local certs store - :returns: certificate - """ - if ksclient.version == 'v3': - if cert_type == 'signing': - cert_type = 'certificates' - request = ("{}OS-SIMPLE-CERT/{}" - "".format(auth_endpoint, cert_type)) - else: - request = "{}/certificates/{}".format(auth_endpoint, cert_type) - - try: - try: - # Kilo and newer - if cert_type == 'ca': - cert = ksclient.certificates.get_ca_certificate() - elif cert_type in ['signing', 'certificates']: - cert = ksclient.certificates.get_signing_certificate() - else: - raise KSCertSetupException("Invalid cert type " - "'{}'".format(cert_type)) - except AttributeError: - # Keystone v3 or Juno and older - response = requests.request('GET', request) - if response.status_code == requests.codes.ok: - cert = response.text - else: - raise KSCertSetupException("Unable to retrieve certificate") - except (ConnectionRefused, requests.exceptions.ConnectionError, - Forbidden, InternalServerError): - raise KSCertSetupException("Error connecting to keystone") - - return cert - - -@defer_if_unavailable(['keystoneclient']) -def get_ks_ca_cert(ksclient, auth_endpoint, certs_path): - """"Get and store keystone CA certificate. - - :param ksclient: Keystone client - :param auth_endpoint: Keystone auth endpoint url - :param certs_path: Path to local certs store - :returns: None - """ - - ca_cert = get_ks_cert(ksclient, auth_endpoint, 'ca') - if ca_cert: - try: - # Cert should not contain unicode chars. - str(ca_cert) - except UnicodeEncodeError: - raise KSCertSetupException("Did not get a valid ca cert from " - "keystone - cert setup incomplete") - - log("Updating ca cert from keystone", level=DEBUG) - ca = os.path.join(certs_path, 'ca.pem') - with open(ca, 'w') as fd: - fd.write(ca_cert) - - out = subprocess.check_output(['openssl', 'x509', '-in', ca, - '-pubkey']) - p = subprocess.Popen(['certutil', '-d', certs_path, '-A', '-n', 'ca', - '-t', 'TCu,Cu,Tuw'], stdin=subprocess.PIPE) - p.communicate(out) - else: - raise KSCertSetupException("No ca cert available from keystone") - - -@defer_if_unavailable(['keystoneclient']) -def get_ks_signing_cert(ksclient, auth_endpoint, certs_path): - """"Get and store keystone signing certificate. - - :param ksclient: Keystone client - :param auth_endpoint: Keystone auth endpoint url - :param certs_path: Path to local certs store - :returns: None - """ - signing_cert = get_ks_cert(ksclient, auth_endpoint, 'signing') - if signing_cert: - try: - # Cert should not contain unicode chars. - str(signing_cert) - except UnicodeEncodeError: - raise KSCertSetupException("Invalid signing cert from keystone") - - log("Updating signing cert from keystone", level=DEBUG) - signing_cert_path = os.path.join(certs_path, 'signing_certificate.pem') - with open(signing_cert_path, 'w') as fd: - fd.write(signing_cert) - - out = subprocess.check_output(['openssl', 'x509', '-in', - signing_cert_path, '-pubkey']) - p = subprocess.Popen(['certutil', '-A', '-d', certs_path, '-n', - 'signing_cert', '-t', 'P,P,P'], - stdin=subprocess.PIPE) - p.communicate(out) - else: - raise KSCertSetupException("No signing cert available from keystone") - - -@defer_if_unavailable(['keystoneclient']) -def setup_keystone_certs(CONFIGS): - """ - Get CA and signing certs from Keystone used to decrypt revoked token list. - - :param unit: context unit id - :param rid: context relation id - :returns: None - """ - certs_path = '/var/lib/ceph/nss' - if not os.path.exists(certs_path): - mkdir(certs_path) - - # Do not continue until identity-relation is complete - if 'identity-service' not in CONFIGS.complete_contexts(): - log("Missing relation settings - deferring cert setup", - level=DEBUG) - return - - ksclient = get_keystone_client_from_relation() - if not ksclient: - log("Failed to get keystoneclient", level=ERROR) - return - - auth_endpoint = ksclient.auth_endpoint - - try: - get_ks_ca_cert(ksclient, auth_endpoint, certs_path) - get_ks_signing_cert(ksclient, auth_endpoint, certs_path) - except KSCertSetupException as e: - log("Keystone certs setup incomplete - {}".format(e), level=INFO) - - -# TODO: Move to charmhelpers -# TODO: Make it session aware -def get_keystone_client_from_relation(relation_type='identity-service'): - """ Get keystone client from relation data - - :param relation_type: Relation to keystone - :returns: Keystone client - """ - required = ['admin_token', 'auth_host', 'auth_port', 'api_version'] - settings = {} - - rdata = {} - for relid in relation_ids(relation_type): - for unit in related_units(relid): - rdata = relation_get(unit=unit, rid=relid) or {} - if set(required).issubset(set(rdata.keys())): - settings = {key: rdata.get(key) for key in required} - break - - if not settings: - log("Required settings not yet provided by any identity-service " - "relation units", INFO) - return None - - auth_protocol = rdata.get('auth_protocol', 'http') - if is_ipv6(settings.get('auth_host')): - settings['auth_host'] = format_ipv6_addr(settings.get('auth_host')) - - api_version = rdata.get('api_version') - auth_endpoint = format_endpoint(auth_protocol, - settings['auth_host'], - settings['auth_port'], - settings['api_version']) - - if api_version and '3' in api_version: - ksclient = client_v3.Client(token=settings['admin_token'], - endpoint=auth_endpoint) - else: - ksclient = client.Client(token=settings['admin_token'], - endpoint=auth_endpoint) - # Add simple way to retrieve keystone auth endpoint - ksclient.auth_endpoint = auth_endpoint - return ksclient - - def disable_unused_apache_sites(): """Ensure that unused apache configurations are disabled to prevent them from conflicting with the charm-provided version. diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 7b403a82..3b832c4b 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -48,11 +48,7 @@ rgw keystone admin token = {{ admin_token }} {% endif -%} rgw keystone accepted roles = {{ user_roles }} rgw keystone token cache size = {{ cache_size }} -rgw keystone revocation interval = {{ revocation_check_interval }} rgw s3 auth use keystone = true -{% if cms -%} -nss db path = /var/lib/ceph/nss -{% endif %} {% else -%} rgw swift url = http://{{ unit_public_ip }} {% endif -%} diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 7bf051b2..c1a75daf 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -85,7 +85,6 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, _format_ipv6_addr, jewel_installed=False): self.test_config.set('operator-roles', 'Babel') self.test_config.set('cache-size', '42') - self.test_config.set('revocation-check-interval', '7500000') self.test_relation.set({'admin_token': 'ubuntutesting'}) self.relation_ids.return_value = ['identity-service:5'] self.related_units.return_value = ['keystone/0'] @@ -122,7 +121,6 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, 'auth_protocol': 'http', 'auth_type': 'keystone', 'cache_size': '42', - 'revocation_check_interval': '7500000', 'service_host': '127.0.0.4', 'service_port': 9876, 'service_protocol': 'http', @@ -143,7 +141,6 @@ def test_ids_ctxt_missing_admin_domain_id( jewel_installed=False): self.test_config.set('operator-roles', 'Babel') self.test_config.set('cache-size', '42') - self.test_config.set('revocation-check-interval', '7500000') self.test_relation.set({'admin_token': 'ubuntutesting'}) self.relation_ids.return_value = ['identity-service:5'] self.related_units.return_value = ['keystone/0'] @@ -178,7 +175,6 @@ def test_ids_ctxt_missing_admin_domain_id( 'auth_protocol': 'http', 'auth_type': 'keystone', 'cache_size': '42', - 'revocation_check_interval': '7500000', 'service_host': '127.0.0.4', 'service_port': 9876, 'service_protocol': 'http', @@ -199,7 +195,6 @@ def test_ids_ctxt_v3( jewel_installed=False): self.test_config.set('operator-roles', 'Babel') self.test_config.set('cache-size', '42') - self.test_config.set('revocation-check-interval', '7500000') self.test_relation.set({'admin_token': 'ubuntutesting'}) self.relation_ids.return_value = ['identity-service:5'] self.related_units.return_value = ['keystone/0'] @@ -239,7 +234,6 @@ def test_ids_ctxt_v3( 'auth_protocol': 'http', 'auth_type': 'keystone', 'cache_size': '42', - 'revocation_check_interval': '7500000', 'service_host': '127.0.0.4', 'service_port': 9876, 'service_protocol': 'http', @@ -262,7 +256,6 @@ def test_ids_ctxt_no_admin_token(self, _log, _rids, _runits, _rget, _ctxt_comp, _format_ipv6_addr): self.test_config.set('operator-roles', 'Babel') self.test_config.set('cache-size', '42') - self.test_config.set('revocation-check-interval', '7500000') self.test_relation.set({}) self.relation_ids.return_value = ['identity-service:5'] self.related_units.return_value = ['keystone/0'] diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index fcb1dd98..6d1e6b14 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -13,9 +13,7 @@ # limitations under the License. from mock import ( - call, patch, - mock_open, MagicMock, ) @@ -26,7 +24,6 @@ TO_PATCH = [ 'application_version_set', 'get_upstream_version', - 'format_endpoint', 'https', 'relation_ids', 'relation_get', @@ -96,137 +93,6 @@ def test_pause_resume_helper(self, services): # ports=None whilst port checks are disabled. f.assert_called_once_with('assessor', services='s1', ports=None) - @patch.object(utils, 'get_keystone_client_from_relation') - @patch.object(utils, 'is_ipv6', lambda addr: False) - @patch.object(utils, 'get_ks_signing_cert') - @patch.object(utils, 'get_ks_ca_cert') - @patch.object(utils, 'mkdir') - def test_setup_keystone_certs(self, mock_mkdir, - mock_get_ks_ca_cert, - mock_get_ks_signing_cert, - mock_get_keystone_client): - auth_host = 'foo/bar' - auth_port = 80 - auth_url = 'http://%s:%s/v2.0' % (auth_host, auth_port) - mock_ksclient = MagicMock() - mock_ksclient.auth_endpoint = auth_url - mock_get_keystone_client.return_value = mock_ksclient - - configs = MagicMock() - configs.complete_contexts.return_value = ['identity-service'] - - utils.setup_keystone_certs(configs) - mock_get_ks_signing_cert.assert_has_calls([call(mock_ksclient, - auth_url, - '/var/lib/ceph/nss')]) - mock_get_ks_ca_cert.assert_has_calls([call(mock_ksclient, auth_url, - '/var/lib/ceph/nss')]) - - @patch.object(utils, 'client_v3') - @patch.object(utils, 'client') - @patch.object(utils, 'related_units') - @patch.object(utils, 'relation_ids') - @patch.object(utils, 'is_ipv6', lambda addr: False) - @patch.object(utils, 'relation_get') - def test_get_ks_client_from_relation(self, mock_relation_get, - mock_relation_ids, - mock_related_units, - mock_client, - mock_client_v3): - auth_host = 'foo/bar' - auth_port = 80 - admin_token = '666' - auth_url = 'http://%s:%s/v2.0' % (auth_host, auth_port) - self.format_endpoint.return_value = auth_url - mock_relation_ids.return_value = ['identity-service:5'] - mock_related_units.return_value = ['keystone/1'] - rel_data = {'auth_host': auth_host, - 'auth_port': auth_port, - 'admin_token': admin_token, - 'api_version': '2'} - - mock_relation_get.return_value = rel_data - utils.get_keystone_client_from_relation() - mock_client.Client.assert_called_with(endpoint=auth_url, - token=admin_token) - - auth_url = 'http://%s:%s/v3' % (auth_host, auth_port) - self.format_endpoint.return_value = auth_url - rel_data['api_version'] = '3' - mock_relation_get.return_value = rel_data - utils.get_keystone_client_from_relation() - mock_client_v3.Client.assert_called_with(endpoint=auth_url, - token=admin_token) - - @patch.object(utils, 'client_v3') - @patch.object(utils, 'client') - @patch.object(utils, 'related_units') - @patch.object(utils, 'relation_ids') - @patch.object(utils, 'is_ipv6', lambda addr: False) - @patch.object(utils, 'relation_get') - def test_get_ks_client_from_relation_not_available(self, mock_relation_get, - mock_relation_ids, - mock_related_units, - mock_client, - mock_client_v3): - mock_relation_ids.return_value = ['identity-service:5'] - mock_related_units.return_value = ['keystone/1'] - rel_data = {'auth_port': '5000', - 'admin_token': 'foo', - 'api_version': '2'} - - mock_relation_get.return_value = rel_data - ksclient = utils.get_keystone_client_from_relation() - self.assertIsNone(ksclient) - - @patch.object(utils, 'get_ks_cert') - @patch.object(utils.subprocess, 'Popen') - @patch.object(utils.subprocess, 'check_output') - def test_get_ks_signing_cert(self, mock_check_output, mock_Popen, - mock_get_ks_cert): - auth_host = 'foo/bar' - auth_port = 80 - admin_token = '666' - auth_url = 'http://%s:%s/v2.0' % (auth_host, auth_port) - - m = mock_open() - with patch.object(utils, 'open', m, create=True): - - mock_get_ks_cert.return_value = 'signing_cert_data' - utils.get_ks_signing_cert(admin_token, auth_url, '/foo/bar') - - mock_get_ks_cert.return_value = None - with self.assertRaises(utils.KSCertSetupException): - utils.get_ks_signing_cert(admin_token, auth_url, '/foo/bar') - - c = ['openssl', 'x509', '-in', - '/foo/bar/signing_certificate.pem', - '-pubkey'] - mock_check_output.assert_called_with(c) - - @patch.object(utils, 'get_ks_cert') - @patch.object(utils.subprocess, 'Popen') - @patch.object(utils.subprocess, 'check_output') - def test_get_ks_ca_cert(self, mock_check_output, mock_Popen, - mock_get_ks_cert): - auth_host = 'foo/bar' - auth_port = 80 - admin_token = '666' - auth_url = 'http://%s:%s/v2.0' % (auth_host, auth_port) - - m = mock_open() - with patch.object(utils, 'open', m, create=True): - mock_get_ks_cert.return_value = 'ca_cert_data' - utils.get_ks_ca_cert(admin_token, auth_url, '/foo/bar') - - mock_get_ks_cert.return_value = None - with self.assertRaises(utils.KSCertSetupException): - utils.get_ks_ca_cert(admin_token, auth_url, '/foo/bar') - - c = ['openssl', 'x509', '-in', '/foo/bar/ca.pem', - '-pubkey'] - mock_check_output.assert_called_with(c) - def _setup_relation_data(self, data): self.relation_ids.return_value = data.keys() self.related_units.side_effect = ( diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index a6ddb431..070ce8e0 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -37,7 +37,6 @@ 'config', 'cmp_pkgrevno', 'execd_preinstall', - 'enable_pocket', 'log', 'open_port', 'os', @@ -55,7 +54,6 @@ 'service_stop', 'service_restart', 'service', - 'setup_keystone_certs', 'service_name', 'socket', 'restart_map', @@ -150,8 +148,6 @@ def test_install(self): ceph_hooks.install() self.assertTrue(self.execd_preinstall.called) self.assertTrue(_install_packages.called) - self.enable_pocket.assert_called_with('multiverse') - self.os.makedirs.called_with('/var/lib/ceph/nss') @patch.object(ceph_hooks, 'certs_joined') @patch.object(ceph_hooks, 'update_nrpe_config') From 14e08269f46df1368395df3b599837684a8d9c2b Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 9 Jan 2019 16:47:28 +0000 Subject: [PATCH 1666/2699] Add support for RADOS gateway multi-site replication Add new radosgw-multisite typed master and slave relations to support configuration of separate ceph-radosgw deployments as a single realm and zonegroup to support replication of data between distinct RADOS gateway deployments. This mandates the use of the realm, zonegroup and zone configuration options of which realm and zonegroup must match between instances of the ceph-radosgw application participating in the master/slave relation. The radosgw-multisite relation may be deployed as a model local relation or as a cross-model relation. Change-Id: I094f89b0f668e012482ca8aace1756c911b79d17 Closes-Bug: 1666880 --- ceph-radosgw/README.md | 129 +++++- ceph-radosgw/TODO | 4 - ceph-radosgw/actions.yaml | 8 + ceph-radosgw/actions/actions.py | 94 ++++- ceph-radosgw/actions/promote | 1 + ceph-radosgw/actions/readonly | 1 + ceph-radosgw/actions/readwrite | 1 + ceph-radosgw/actions/tidydefaults | 1 + .../bundles/bionic-rocky-multisite.yaml | 73 ++++ ceph-radosgw/bundles/us-east.yaml | 41 ++ ceph-radosgw/bundles/us-west.yaml | 41 ++ ceph-radosgw/config.yaml | 19 + ceph-radosgw/hooks/ceph_radosgw_context.py | 3 + ceph-radosgw/hooks/ceph_rgw.py | 11 +- ceph-radosgw/hooks/hooks.py | 198 +++++++++- ceph-radosgw/hooks/leader-settings-changed | 1 + ceph-radosgw/hooks/master-relation-broken | 1 + ceph-radosgw/hooks/master-relation-changed | 1 + ceph-radosgw/hooks/master-relation-departed | 1 + ceph-radosgw/hooks/master-relation-joined | 1 + ceph-radosgw/hooks/multisite.py | 367 ++++++++++++++++++ ceph-radosgw/hooks/slave-relation-broken | 1 + ceph-radosgw/hooks/slave-relation-changed | 1 + ceph-radosgw/hooks/slave-relation-departed | 1 + ceph-radosgw/hooks/slave-relation-joined | 1 + ceph-radosgw/hooks/utils.py | 112 +++++- ceph-radosgw/metadata.yaml | 4 + ceph-radosgw/templates/ceph.conf | 4 + ceph-radosgw/unit_tests/test_actions.py | 64 +++ .../unit_tests/test_ceph_radosgw_context.py | 12 +- .../unit_tests/test_ceph_radosgw_utils.py | 143 +++++++ ceph-radosgw/unit_tests/test_hooks.py | 306 ++++++++++++++- ceph-radosgw/unit_tests/test_multisite.py | 237 +++++++++++ .../testdata/test_create_realm.json | 7 + .../unit_tests/testdata/test_create_zone.json | 36 ++ .../testdata/test_create_zonegroup.json | 51 +++ .../unit_tests/testdata/test_list_realms.json | 6 + .../unit_tests/testdata/test_list_users.json | 5 + .../testdata/test_list_zonegroups.json | 6 + .../unit_tests/testdata/test_list_zones.json | 6 + 40 files changed, 1970 insertions(+), 30 deletions(-) delete mode 100644 ceph-radosgw/TODO create mode 120000 ceph-radosgw/actions/promote create mode 120000 ceph-radosgw/actions/readonly create mode 120000 ceph-radosgw/actions/readwrite create mode 120000 ceph-radosgw/actions/tidydefaults create mode 100644 ceph-radosgw/bundles/bionic-rocky-multisite.yaml create mode 100644 ceph-radosgw/bundles/us-east.yaml create mode 100644 ceph-radosgw/bundles/us-west.yaml create mode 120000 ceph-radosgw/hooks/leader-settings-changed create mode 120000 ceph-radosgw/hooks/master-relation-broken create mode 120000 ceph-radosgw/hooks/master-relation-changed create mode 120000 ceph-radosgw/hooks/master-relation-departed create mode 120000 ceph-radosgw/hooks/master-relation-joined create mode 100644 ceph-radosgw/hooks/multisite.py create mode 120000 ceph-radosgw/hooks/slave-relation-broken create mode 120000 ceph-radosgw/hooks/slave-relation-changed create mode 120000 ceph-radosgw/hooks/slave-relation-departed create mode 120000 ceph-radosgw/hooks/slave-relation-joined create mode 100644 ceph-radosgw/unit_tests/test_multisite.py create mode 100644 ceph-radosgw/unit_tests/testdata/test_create_realm.json create mode 100644 ceph-radosgw/unit_tests/testdata/test_create_zone.json create mode 100644 ceph-radosgw/unit_tests/testdata/test_create_zonegroup.json create mode 100644 ceph-radosgw/unit_tests/testdata/test_list_realms.json create mode 100644 ceph-radosgw/unit_tests/testdata/test_list_users.json create mode 100644 ceph-radosgw/unit_tests/testdata/test_list_zonegroups.json create mode 100644 ceph-radosgw/unit_tests/testdata/test_list_zones.json diff --git a/ceph-radosgw/README.md b/ceph-radosgw/README.md index 65643ad0..42b6d975 100644 --- a/ceph-radosgw/README.md +++ b/ceph-radosgw/README.md @@ -100,7 +100,7 @@ To use this feature, use the --bind option when deploying the charm: alternatively these can also be provided as part of a juju native bundle configuration: ceph-radosgw: - charm: cs:xenial/ceph-radosgw + charm: cs:ceph-radosgw num_units: 1 bindings: public: public-space @@ -109,19 +109,122 @@ alternatively these can also be provided as part of a juju native bundle configu NOTE: Spaces must be configured in the underlying provider prior to attempting to use them. -NOTE: Existing deployments using os-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. +NOTE: Existing deployments using os-\*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. -Contact Information -=================== +Multi-Site replication +====================== -Author: James Page -Report bugs at: http://bugs.launchpad.net/charms/+source/ceph-radosgw/+filebug -Location: http://jujucharms.com/charms/ceph-radosgw +Overview +-------- + +This charm supports configuration of native replication between Ceph RADOS +gateway deployments. + +This is supported both within a single model and between different models +using cross-model relations. + +By default either ceph-radosgw deployment will accept write operations. + +Deployment +---------- + +NOTE: example bundles for the us-west and us-east models can be found +in the bundles subdirectory of the ceph-radosgw charm. + +NOTE: switching from a standalone deployment to a multi-site replicated +deployment is not supported. + +To deploy in this configuration ensure that the following configuration +options are set on the ceph-radosgw charm deployments - in this example +rgw-us-east and rgw-us-west are both instances of the ceph-radosgw charm: + + rgw-us-east: + realm: replicated + zonegroup: us + zone: us-east + rgw-us-west: + realm: replicated + zonegroup: us + zone: us-west + +When deploying with this configuration the ceph-radosgw applications will +deploy into a blocked state until the master/slave (cross-model) relation +is added. + +Typically each ceph-radosgw deployment will be associated with a separate +ceph cluster at different physical locations - in this example the deployments +are in different models ('us-east' and 'us-west'). + +One ceph-radosgw application acts as the initial master for the deployment - +setup the master relation endpoint as the provider of the offer for the +cross-model relation: + + juju offer -m us-east rgw-us-east:master + +The cross-model relation offer can then be consumed in the other model and +related to the slave ceph-radosgw application: + + juju consume -m us-west admin/us-east.rgw-us-east + juju add-relation -m us-west rgw-us-west:slave rgw-us-east:master + +Once the relation has been added the realm, zonegroup and zone configuration +will be created in the master deployment and then synced to the slave +deployment. + +The current sync status can be validated from either model: + + juju ssh -m us-east ceph-mon/0 + sudo radosgw-admin sync status + realm 142eb39c-67c4-42b3-9116-1f4ffca23964 (replicated) + zonegroup 7b69f059-425b-44f5-8a21-ade63c2034bd (us) + zone 4ee3bc39-b526-4ac9-a233-64ebeacc4574 (us-east) + metadata sync no sync (zone is master) + data sync source: db876cf0-62a8-4b95-88f4-d0f543136a07 (us-west) + syncing + full sync: 0/128 shards + incremental sync: 128/128 shards + data is caught up with source + +Once the deployment is complete, the default zone and zonegroup can +optionally be tidied using the 'tidydefaults' action: + + juju run-action -m us-west --unit rgw-us-west/0 tidydefaults + +This operation is not reversible. + +Failover/Recovery +----------------- + +In the event that the site hosting the zone which is the master for metadata +(in this example us-east) has an outage, the master metadata zone must be +failed over to the slave site; this operation is performed using the 'promote' +action: + + juju run-action -m us-west --wait rgw-us-west/0 promote + +Once this action has completed, the slave site will be the master for metadata +updates and the deployment will accept new uploads of data. + +Once the failed site has been recovered it will resync and resume as a slave +to the promoted master site (us-west in this example). + +The master metadata zone can be failed back to its original location once resync +has completed using the 'promote' action: + + juju run-action -m us-east --wait rgw-us-east/0 promote + +Read/write vs Read-only +----------------------- + +By default all zones within a deployment will be read/write capable but only +the master zone can be used to create new containers. + +Non-master zones can optionally be marked as read-only by using the 'readonly' +action: + + juju run-action -m us-east --wait rgw-us-east/0 readonly -Bootnotes -========= +a zone that is currently read-only can be switched to read/write mode by either +promoting it to be the current master or by using the 'readwrite' action: -The Ceph RADOS Gateway makes use of a multiverse package libapache2-mod-fastcgi. -As such it will try to automatically enable the multiverse pocket in -/etc/apt/sources.list. Note that there is noting 'wrong' with multiverse -components - they typically have less liberal licensing policies or suchlike. + juju run-action -m us-east --wait rgw-us-east/0 readwrite diff --git a/ceph-radosgw/TODO b/ceph-radosgw/TODO deleted file mode 100644 index 75ceb8d5..00000000 --- a/ceph-radosgw/TODO +++ /dev/null @@ -1,4 +0,0 @@ -RADOS Gateway Charm -------------------- - - * Improved process control of radosgw daemon (to many restarts) diff --git a/ceph-radosgw/actions.yaml b/ceph-radosgw/actions.yaml index 4aa9d8f5..d76f76bb 100644 --- a/ceph-radosgw/actions.yaml +++ b/ceph-radosgw/actions.yaml @@ -2,3 +2,11 @@ pause: description: Pause the ceph-radosgw unit. resume: descrpition: Resume the ceph-radosgw unit. +promote: + description: Promote the zone associated with the local units to master/default (multi-site). +readonly: + description: Mark the zone associated with the local units as read only (multi-site). +readwrite: + description: Mark the zone associated with the local units as read/write (multi-site). +tidydefaults: + description: Delete default zone and zonegroup configuration (multi-site). diff --git a/ceph-radosgw/actions/actions.py b/ceph-radosgw/actions/actions.py index 5446cc99..c0c23e3b 100755 --- a/ceph-radosgw/actions/actions.py +++ b/ceph-radosgw/actions/actions.py @@ -15,10 +15,18 @@ # limitations under the License. import os +import subprocess import sys sys.path.append('hooks/') -from charmhelpers.core.hookenv import action_fail + +import multisite + +from charmhelpers.core.hookenv import ( + action_fail, + config, + action_set, +) from utils import ( pause_unit_helper, resume_unit_helper, @@ -39,9 +47,91 @@ def resume(args): resume_unit_helper(register_configs()) +def promote(args): + """Promote zone associated with local RGW units to master/default""" + zone = config('zone') + if not zone: + action_fail('No zone configuration set, not promoting') + return + try: + multisite.modify_zone(zone, + default=True, master=True) + multisite.update_period() + action_set( + values={'message': 'zone:{} promoted to ' + 'master/default'.format(zone)} + ) + except subprocess.CalledProcessError as cpe: + action_fail('Unable to promote zone:{} ' + 'to master: {}'.format(zone, cpe.output)) + + +def readonly(args): + """Mark zone associated with local RGW units as read only""" + zone = config('zone') + if not zone: + action_fail('No zone configuration set, not marking read only') + return + try: + multisite.modify_zone(zone, readonly=True) + multisite.update_period() + action_set( + values={ + 'message': 'zone:{} marked as read only'.format(zone) + } + ) + except subprocess.CalledProcessError as cpe: + action_fail('Unable mark zone:{} ' + 'as read only: {}'.format(zone, cpe.output)) + + +def readwrite(args): + """Mark zone associated with local RGW units as read write""" + zone = config('zone') + if not zone: + action_fail('No zone configuration set, not marking read write') + return + try: + multisite.modify_zone(zone, readonly=False) + multisite.update_period() + action_set( + values={ + 'message': 'zone:{} marked as read write'.format(zone) + } + ) + except subprocess.CalledProcessError as cpe: + action_fail('Unable mark zone:{} ' + 'as read write: {}'.format(zone, cpe.output)) + + +def tidydefaults(args): + """Delete default zone and zonegroup metadata""" + zone = config('zone') + if not zone: + action_fail('No zone configuration set, not deleting defaults') + return + try: + multisite.tidy_defaults() + action_set( + values={ + 'message': 'default zone and zonegroup deleted' + } + ) + except subprocess.CalledProcessError as cpe: + action_fail('Unable delete default zone and zonegroup' + ': {}'.format(zone, cpe.output)) + + # A dictionary of all the defined actions to callables (which take # parsed arguments). -ACTIONS = {"pause": pause, "resume": resume} +ACTIONS = { + "pause": pause, + "resume": resume, + "promote": promote, + "readonly": readonly, + "readwrite": readwrite, + "tidydefaults": tidydefaults, +} def main(args): diff --git a/ceph-radosgw/actions/promote b/ceph-radosgw/actions/promote new file mode 120000 index 00000000..405a394e --- /dev/null +++ b/ceph-radosgw/actions/promote @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/ceph-radosgw/actions/readonly b/ceph-radosgw/actions/readonly new file mode 120000 index 00000000..405a394e --- /dev/null +++ b/ceph-radosgw/actions/readonly @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/ceph-radosgw/actions/readwrite b/ceph-radosgw/actions/readwrite new file mode 120000 index 00000000..405a394e --- /dev/null +++ b/ceph-radosgw/actions/readwrite @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/ceph-radosgw/actions/tidydefaults b/ceph-radosgw/actions/tidydefaults new file mode 120000 index 00000000..405a394e --- /dev/null +++ b/ceph-radosgw/actions/tidydefaults @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/ceph-radosgw/bundles/bionic-rocky-multisite.yaml b/ceph-radosgw/bundles/bionic-rocky-multisite.yaml new file mode 100644 index 00000000..ab5ecf59 --- /dev/null +++ b/ceph-radosgw/bundles/bionic-rocky-multisite.yaml @@ -0,0 +1,73 @@ +options: + source: &source cloud:bionic-rocky +series: bionic +applications: + east-ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw-multisite + num_units: 1 + options: + source: *source + realm: testrealm + zonegroup: testzonegroup + zone: east-1 + region: east-1 + east-ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *source + east-ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + west-ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw-multisite + num_units: 1 + options: + source: *source + realm: testrealm + zonegroup: testzonegroup + zone: west-1 + region: west-1 + west-ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *source + west-ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + region: "east-1 west-1" +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - east-ceph-osd:mon + - east-ceph-mon:osd +- - east-ceph-radosgw:mon + - east-ceph-mon:radosgw +- - east-ceph-radosgw:identity-service + - keystone:identity-service +- - west-ceph-osd:mon + - west-ceph-mon:osd +- - west-ceph-radosgw:mon + - west-ceph-mon:radosgw +- - west-ceph-radosgw:identity-service + - keystone:identity-service +- - west-ceph-radosgw:master + - east-ceph-radosgw:slave diff --git a/ceph-radosgw/bundles/us-east.yaml b/ceph-radosgw/bundles/us-east.yaml new file mode 100644 index 00000000..9483897c --- /dev/null +++ b/ceph-radosgw/bundles/us-east.yaml @@ -0,0 +1,41 @@ +machines: + '0': + constraints: + '1': + constraints: + '2': + constraints: +series: bionic +applications: + ceph-mon: + charm: 'cs:ceph-mon' + num_units: 3 + options: + expected-osd-count: 9 + to: + - lxd:0 + - lxd:1 + - lxd:2 + ceph-osd: + charm: 'cs:ceph-osd' + num_units: 3 + options: + osd-devices: "/dev/disk/by-dname/bcache1 /dev/disk/by-dname/bcache2 /dev/disk/by-dname/bcache3" + to: + - 0 + - 1 + - 2 + rgw-us-east: + charm: 'cs:ceph-radosgw' + num_units: 1 + options: + realm: replicated + zone: us-east + zonegroup: us + to: + - lxd:0 +relations: + - - 'ceph-mon:osd' + - 'ceph-osd:mon' + - - 'rgw-us-east:mon' + - 'ceph-mon:radosgw' diff --git a/ceph-radosgw/bundles/us-west.yaml b/ceph-radosgw/bundles/us-west.yaml new file mode 100644 index 00000000..5a87c37a --- /dev/null +++ b/ceph-radosgw/bundles/us-west.yaml @@ -0,0 +1,41 @@ +machines: + '0': + constraints: + '1': + constraints: + '2': + constraints: +series: bionic +applications: + ceph-mon: + charm: 'cs:ceph-mon' + num_units: 3 + options: + expected-osd-count: 9 + to: + - lxd:0 + - lxd:1 + - lxd:2 + ceph-osd: + charm: 'cs:ceph-osd' + num_units: 3 + options: + osd-devices: "/dev/disk/by-dname/bcache1 /dev/disk/by-dname/bcache2 /dev/disk/by-dname/bcache3" + to: + - 0 + - 1 + - 2 + rgw-us-west: + charm: 'cs:ceph-radosgw' + num_units: 1 + options: + realm: replicated + zone: us-west + zonegroup: us + to: + - lxd:0 +relations: + - - 'ceph-mon:osd' + - 'ceph-osd:mon' + - - 'rgw-us-west:mon' + - 'ceph-mon:radosgw' diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 860ad4b5..56c53c09 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -292,3 +292,22 @@ options: description: | SSL CA to use with the certificate and key provided - this is only required if you are providing a privately signed ssl_cert and ssl_key. + # Multi Site Options + realm: + type: string + default: + description: | + Name of RADOS Gateway Realm to create for multi-site replication. Setting + this option will enable support for multi-site replication, at which + point the zonegroup and zone options must also be provided. + zonegroup: + type: string + default: + description: | + Name of RADOS Gateway Zone Group to create for multi-site replication. + zone: + type: string + default: + description: | + Name of RADOS Gateway Zone to create for multi-site replication. This + option must be specific to the local site e.g. us-west or us-east. diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 7a2b23d7..1a4cc53b 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -212,6 +212,9 @@ def __call__(self): ctxt.update(user_provided) if self.context_complete(ctxt): + # Multi-site Zone configuration is optional, + # so add after assessment + ctxt['rgw_zone'] = config('zone') return ctxt return {} diff --git a/ceph-radosgw/hooks/ceph_rgw.py b/ceph-radosgw/hooks/ceph_rgw.py index f5ac5963..5efde943 100644 --- a/ceph-radosgw/hooks/ceph_rgw.py +++ b/ceph-radosgw/hooks/ceph_rgw.py @@ -23,7 +23,8 @@ ) from charmhelpers.core.host import ( - mkdir + mkdir, + symlink, ) from charmhelpers.contrib.storage.linux.ceph import ( CephBrokerRq, @@ -39,9 +40,12 @@ def import_radosgw_key(key, name=None): keyring_path = os.path.join(CEPH_RADOSGW_DIR, 'ceph-{}'.format(name), 'keyring') + link_path = os.path.join(CEPH_DIR, + 'ceph.client.{}.keyring'.format(name)) owner = group = 'ceph' else: keyring_path = os.path.join(CEPH_DIR, _radosgw_keyring) + link_path = None owner = group = 'root' if not os.path.exists(keyring_path): @@ -63,6 +67,11 @@ def import_radosgw_key(key, name=None): keyring_path ] subprocess.check_call(cmd) + # NOTE: add a link to the keyring in /var/lib/ceph + # to /etc/ceph so we can use it for radosgw-admin + # operations for multi-site configuration + if link_path: + symlink(keyring_path, link_path) return True return False diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 4fb19c7b..5cc7b9f3 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -18,11 +18,13 @@ import subprocess import sys import socket +import uuid sys.path.append('lib') import ceph_rgw as ceph import ceph.utils as ceph_utils +import multisite from charmhelpers.core.hookenv import ( relation_get, @@ -35,6 +37,9 @@ DEBUG, Hooks, UnregisteredHookError, status_set, + is_leader, + leader_set, + leader_get, ) from charmhelpers.fetch import ( apt_update, @@ -86,6 +91,9 @@ service_name, systemd_based_radosgw, request_per_unit_key, + ready_for_service, + restart_nonce_changed, + multisite_deployment, ) from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden @@ -109,6 +117,8 @@ 'libapache2-mod-fastcgi', ] +MULTISITE_SYSTEM_USER = 'multisite-sync' + def upgrade_available(): """Check for upgrade for ceph @@ -200,6 +210,8 @@ def _config_changed(): for r_id in relation_ids('certificates'): certs_joined(r_id) + process_multisite_relations() + CONFIGS.write_all() configure_https() @@ -218,8 +230,9 @@ def _mon_relation(): if request_per_unit_key(): relation_set(relation_id=rid, key_name=key_name) + # NOTE: prefer zone name if in use over pool-prefix. rq = ceph.get_create_rgw_pools_rq( - prefix=config('pool-prefix')) + prefix=config('zone') or config('pool-prefix')) if is_request_complete(rq, relation='mon'): log('Broker request complete', level=DEBUG) CONFIGS.write_all() @@ -242,9 +255,20 @@ def _mon_relation(): if systemd_based_radosgw(): service_stop('radosgw') service('disable', 'radosgw') - if not is_unit_paused_set() and new_keyring: - service('enable', service_name()) + + service('enable', service_name()) + # NOTE(jamespage): + # Multi-site deployments need to defer restart as the + # zone is not created until the master relation is + # joined; restarting here will cause a restart burst + # in systemd and stop the process restarting once + # zone configuration is complete. + if (not is_unit_paused_set() and + new_keyring and + not multisite_deployment()): service_restart(service_name()) + + process_multisite_relations() else: send_request_if_needed(rq, relation='mon') _mon_relation() @@ -410,6 +434,174 @@ def _certs_changed(): _certs_changed() +@hooks.hook('master-relation-joined') +def master_relation_joined(relation_id=None): + if not ready_for_service(legacy=False): + log('unit not ready, deferring multisite configuration') + return + + internal_url = '{}:{}'.format( + canonical_url(CONFIGS, INTERNAL), + config('port') + ) + endpoints = [internal_url] + realm = config('realm') + zonegroup = config('zonegroup') + zone = config('zone') + access_key = leader_get('access_key') + secret = leader_get('secret') + + if not all((realm, zonegroup, zone)): + return + + relation_set(relation_id=relation_id, + realm=realm, + zonegroup=zonegroup, + url=endpoints[0], + access_key=access_key, + secret=secret) + + if not is_leader(): + return + + if not leader_get('restart_nonce'): + # NOTE(jamespage): + # This is an ugly kludge to force creation of the required data + # items in the .rgw.root pool prior to the radosgw process being + # started; radosgw-admin does not currently have a way of doing + # this operation but a period update will force it to be created. + multisite.update_period(fatal=False) + + mutation = False + + if realm not in multisite.list_realms(): + multisite.create_realm(realm, default=True) + mutation = True + + if zonegroup not in multisite.list_zonegroups(): + multisite.create_zonegroup(zonegroup, + endpoints=endpoints, + default=True, master=True, + realm=realm) + mutation = True + + if zone not in multisite.list_zones(): + multisite.create_zone(zone, + endpoints=endpoints, + default=True, master=True, + zonegroup=zonegroup) + mutation = True + + if MULTISITE_SYSTEM_USER not in multisite.list_users(): + access_key, secret = multisite.create_system_user( + MULTISITE_SYSTEM_USER + ) + multisite.modify_zone(zone, + access_key=access_key, + secret=secret) + leader_set(access_key=access_key, + secret=secret) + mutation = True + + if mutation: + multisite.update_period() + service_restart(service_name()) + leader_set(restart_nonce=str(uuid.uuid4())) + + relation_set(relation_id=relation_id, + access_key=access_key, + secret=secret) + + +@hooks.hook('slave-relation-changed') +def slave_relation_changed(relation_id=None, unit=None): + if not is_leader(): + return + if not ready_for_service(legacy=False): + log('unit not ready, deferring multisite configuration') + return + + master_data = relation_get(rid=relation_id, unit=unit) + if not all((master_data.get('realm'), + master_data.get('zonegroup'), + master_data.get('access_key'), + master_data.get('secret'), + master_data.get('url'))): + log("Defer processing until master RGW has provided required data") + return + + internal_url = '{}:{}'.format( + canonical_url(CONFIGS, INTERNAL), + config('port') + ) + endpoints = [internal_url] + + realm = config('realm') + zonegroup = config('zonegroup') + zone = config('zone') + + if (realm, zonegroup) != (master_data['realm'], + master_data['zonegroup']): + log("Mismatched configuration so stop multi-site configuration now") + return + + if not leader_get('restart_nonce'): + # NOTE(jamespage): + # This is an ugly kludge to force creation of the required data + # items in the .rgw.root pool prior to the radosgw process being + # started; radosgw-admin does not currently have a way of doing + # this operation but a period update will force it to be created. + multisite.update_period(fatal=False) + + mutation = False + + if realm not in multisite.list_realms(): + multisite.pull_realm(url=master_data['url'], + access_key=master_data['access_key'], + secret=master_data['secret']) + multisite.pull_period(url=master_data['url'], + access_key=master_data['access_key'], + secret=master_data['secret']) + multisite.set_default_realm(realm) + mutation = True + + if zone not in multisite.list_zones(): + multisite.create_zone(zone, + endpoints=endpoints, + default=False, master=False, + zonegroup=zonegroup, + access_key=master_data['access_key'], + secret=master_data['secret']) + mutation = True + + if mutation: + multisite.update_period() + service_restart(service_name()) + leader_set(restart_nonce=str(uuid.uuid4())) + + +@hooks.hook('leader-settings-changed') +def leader_settings_changed(): + # NOTE: leader unit will only ever set leader storage + # data when multi-site realm, zonegroup, zone or user + # data has been created/changed - trigger restarts + # of rgw services. + if restart_nonce_changed(leader_get('restart_nonce')): + service_restart(service_name()) + if not is_leader(): + for r_id in relation_ids('master'): + master_relation_joined(r_id) + + +def process_multisite_relations(): + """Re-trigger any pending master/slave relations""" + for r_id in relation_ids('master'): + master_relation_joined(r_id) + for r_id in relation_ids('slave'): + for unit in related_units(r_id): + slave_relation_changed(r_id, unit) + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-radosgw/hooks/leader-settings-changed b/ceph-radosgw/hooks/leader-settings-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/leader-settings-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/master-relation-broken b/ceph-radosgw/hooks/master-relation-broken new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/master-relation-broken @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/master-relation-changed b/ceph-radosgw/hooks/master-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/master-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/master-relation-departed b/ceph-radosgw/hooks/master-relation-departed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/master-relation-departed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/master-relation-joined b/ceph-radosgw/hooks/master-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/master-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/multisite.py b/ceph-radosgw/hooks/multisite.py new file mode 100644 index 00000000..a7ddbe9c --- /dev/null +++ b/ceph-radosgw/hooks/multisite.py @@ -0,0 +1,367 @@ +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import functools +import subprocess +import socket + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.decorators as decorators + +RGW_ADMIN = 'radosgw-admin' + + +@decorators.retry_on_exception(num_retries=5, base_delay=3, + exc_type=subprocess.CalledProcessError) +def _check_output(cmd): + """Logging wrapper for subprocess.check_ouput""" + hookenv.log("Executing: {}".format(' '.join(cmd)), level=hookenv.DEBUG) + return subprocess.check_output(cmd).decode('UTF-8') + + +@decorators.retry_on_exception(num_retries=5, base_delay=3, + exc_type=subprocess.CalledProcessError) +def _check_call(cmd): + """Logging wrapper for subprocess.check_call""" + hookenv.log("Executing: {}".format(' '.join(cmd)), level=hookenv.DEBUG) + return subprocess.check_call(cmd) + + +def _call(cmd): + """Logging wrapper for subprocess.call""" + hookenv.log("Executing: {}".format(' '.join(cmd)), level=hookenv.DEBUG) + return subprocess.call(cmd) + + +def _key_name(): + """Determine the name of the cephx key for the local unit""" + return 'rgw.{}'.format(socket.gethostname()) + + +def _list(key): + """ + Internal implementation for list_* functions + + :param key: string for required entity (zone, zonegroup, realm, user) + :type key: str + :return: List of specified entities found + :rtype: list + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + key, 'list' + ] + try: + result = json.loads(_check_output(cmd)) + if isinstance(result, dict): + return result['{}s'.format(key)] + else: + return result + except TypeError: + return [] + + +list_realms = functools.partial(_list, 'realm') +list_zonegroups = functools.partial(_list, 'zonegroup') +list_zones = functools.partial(_list, 'zone') +list_users = functools.partial(_list, 'user') + + +def create_realm(name, default=False): + """ + Create a new RADOS Gateway Realm. + + :param name: name of realm to create + :type name: str + :param default: set new realm as the default realm + :type default: boolean + :return: realm configuration + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'realm', 'create', + '--rgw-realm={}'.format(name) + ] + if default: + cmd += ['--default'] + try: + return json.loads(_check_output(cmd)) + except TypeError: + return None + + +def set_default_realm(name): + """ + Set the default RADOS Gateway Realm + + :param name: name of realm to create + :type name: str + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'realm', 'default', + '--rgw-realm={}'.format(name) + ] + _check_call(cmd) + + +def create_zonegroup(name, endpoints, default=False, master=False, realm=None): + """ + Create a new RADOS Gateway Zone Group + + :param name: name of zonegroup to create + :type name: str + :param endpoints: list of URLs to endpoints for zonegroup + :type endpoints: list[str] + :param default: set new zonegroup as the default zonegroup + :type default: boolean + :param master: set new zonegroup as the master zonegroup + :type master: boolean + :param realm: realm to use for zonegroup + :type realm: str + :return: zonegroup configuration + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'zonegroup', 'create', + '--rgw-zonegroup={}'.format(name), + '--endpoints={}'.format(','.join(endpoints)), + ] + if realm: + cmd.append('--rgw-realm={}'.format(realm)) + if default: + cmd.append('--default') + if master: + cmd.append('--master') + try: + return json.loads(_check_output(cmd)) + except TypeError: + return None + + +def create_zone(name, endpoints, default=False, master=False, zonegroup=None, + access_key=None, secret=None, readonly=False): + """ + Create a new RADOS Gateway Zone + + :param name: name of zone to create + :type name: str + :param endpoints: list of URLs to endpoints for zone + :type endpoints: list[str] + :param default: set new zone as the default zone + :type default: boolean + :param master: set new zone as the master zone + :type master: boolean + :param zonegroup: zonegroup to use for zone + :type zonegroup: str + :param access_key: access-key to use for the zone + :type access_key: str + :param secret: secret to use with access-key for the zone + :type secret: str + :param readonly: set zone as read only + :type: readonly: boolean + :return: dict of zone configuration + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'zone', 'create', + '--rgw-zone={}'.format(name), + '--endpoints={}'.format(','.join(endpoints)), + ] + if zonegroup: + cmd.append('--rgw-zonegroup={}'.format(zonegroup)) + if default: + cmd.append('--default') + if master: + cmd.append('--master') + if access_key and secret: + cmd.append('--access-key={}'.format(access_key)) + cmd.append('--secret={}'.format(secret)) + cmd.append('--read-only={}'.format(1 if readonly else 0)) + try: + return json.loads(_check_output(cmd)) + except TypeError: + return None + + +def modify_zone(name, endpoints=None, default=False, master=False, + access_key=None, secret=None, readonly=False): + """ + Modify an existing RADOS Gateway zone + + :param name: name of zone to create + :type name: str + :param endpoints: list of URLs to endpoints for zone + :type endpoints: list[str] + :param default: set zone as the default zone + :type default: boolean + :param master: set zone as the master zone + :type master: boolean + :param access_key: access-key to use for the zone + :type access_key: str + :param secret: secret to use with access-key for the zone + :type secret: str + :param readonly: set zone as read only + :type: readonly: boolean + :return: zone configuration + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'zone', 'modify', + '--rgw-zone={}'.format(name), + ] + if endpoints: + cmd.append('--endpoints={}'.format(','.join(endpoints))) + if access_key and secret: + cmd.append('--access-key={}'.format(access_key)) + cmd.append('--secret={}'.format(secret)) + if master: + cmd.append('--master') + if default: + cmd.append('--default') + cmd.append('--read-only={}'.format(1 if readonly else 0)) + try: + return json.loads(_check_output(cmd)) + except TypeError: + return None + + +def update_period(fatal=True): + """ + Update RADOS Gateway configuration period + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'period', 'update', '--commit' + ] + if fatal: + _check_call(cmd) + else: + _call(cmd) + + +def tidy_defaults(): + """ + Purge any default zonegroup and zone definitions + """ + if ('default' in list_zonegroups() and + 'default' in list_zones()): + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'zonegroup', 'remove', + '--rgw-zonegroup=default', + '--rgw-zone=default' + ] + _call(cmd) + update_period() + + if 'default' in list_zones(): + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'zone', 'delete', + '--rgw-zone=default' + ] + _call(cmd) + update_period() + + if 'default' in list_zonegroups(): + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'zonegroup', 'delete', + '--rgw-zonegroup=default' + ] + _call(cmd) + update_period() + + +def create_system_user(username): + """ + Create a RADOS Gateway system use for sync usage + + :param username: username of user to create + :type username: str + :return: access key and secret + :rtype: (str, str) + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'user', 'create', + '--uid={}'.format(username), + '--display-name=Synchronization User', + '--system', + ] + try: + result = json.loads(_check_output(cmd)) + return (result['keys'][0]['access_key'], + result['keys'][0]['secret_key']) + except TypeError: + return (None, None) + + +def pull_realm(url, access_key, secret): + """ + Pull in a RADOS Gateway Realm from a master RGW instance + + :param url: url of remote rgw deployment + :type url: str + :param access_key: access-key for remote rgw deployment + :type access_key: str + :param secret: secret for remote rgw deployment + :type secret: str + :return: realm configuration + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'realm', 'pull', + '--url={}'.format(url), + '--access-key={}'.format(access_key), + '--secret={}'.format(secret), + ] + try: + return json.loads(_check_output(cmd)) + except TypeError: + return None + + +def pull_period(url, access_key, secret): + """ + Pull in a RADOS Gateway period from a master RGW instance + + :param url: url of remote rgw deployment + :type url: str + :param access_key: access-key for remote rgw deployment + :type access_key: str + :param secret: secret for remote rgw deployment + :type secret: str + :return: realm configuration + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'period', 'pull', + '--url={}'.format(url), + '--access-key={}'.format(access_key), + '--secret={}'.format(secret), + ] + try: + return json.loads(_check_output(cmd)) + except TypeError: + return None diff --git a/ceph-radosgw/hooks/slave-relation-broken b/ceph-radosgw/hooks/slave-relation-broken new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/slave-relation-broken @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/slave-relation-changed b/ceph-radosgw/hooks/slave-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/slave-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/slave-relation-departed b/ceph-radosgw/hooks/slave-relation-departed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/slave-relation-departed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/slave-relation-joined b/ceph-radosgw/hooks/slave-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/slave-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 14dbe3bb..df474c2e 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -26,6 +26,8 @@ relation_ids, related_units, application_version_set, + config, + leader_get, ) from charmhelpers.contrib.openstack import ( context, @@ -54,6 +56,7 @@ filter_installed_packages, get_upstream_version, ) +from charmhelpers.core import unitdata # The interface is said to be satisfied if anyone of the interfaces in the # list has a complete context. @@ -64,7 +67,8 @@ TEMPLATES_DIR = 'templates' TEMPLATES = 'templates/' HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' -CEPH_CONF = '/etc/ceph/ceph.conf' +CEPH_DIR = '/etc/ceph' +CEPH_CONF = '{}/ceph.conf'.format(CEPH_DIR) VERSION_PACKAGE = 'radosgw' @@ -177,6 +181,41 @@ def check_optional_relations(configs): return ('blocked', 'hacluster missing configuration: ' 'vip, vip_iface, vip_cidr') + # NOTE: misc multi-site relation and config checks + multisite_config = (config('realm'), + config('zonegroup'), + config('zone')) + if relation_ids('master') or relation_ids('slave'): + if not all(multisite_config): + return ('blocked', + 'multi-site configuration incomplete ' + '(realm={realm}, zonegroup={zonegroup}' + ', zone={zone})'.format(**config())) + if (all(multisite_config) and not + (relation_ids('master') or relation_ids('slave'))): + return ('blocked', + 'multi-site configuration but master/slave ' + 'relation missing') + if (all(multisite_config) and relation_ids('slave')): + multisite_ready = False + for rid in relation_ids('slave'): + for unit in related_units(rid): + if relation_get('url', unit=unit, rid=rid): + multisite_ready = True + continue + if not multisite_ready: + return ('waiting', + 'multi-site master relation incomplete') + master_configured = ( + leader_get('access_key'), + leader_get('secret'), + leader_get('restart_nonce'), + ) + if (all(multisite_config) and + relation_ids('master') and + not all(master_configured)): + return ('waiting', + 'waiting for configuration of master zone') # return 'unknown' as the lowest priority to not clobber an existing # status. return 'unknown', '' @@ -317,8 +356,77 @@ def request_per_unit_key(): def service_name(): - """Determine the name of the RADOS Gateway service""" + """Determine the name of the RADOS Gateway service + + :return: service name to use + :rtype: str + """ if systemd_based_radosgw(): return 'ceph-radosgw@rgw.{}'.format(socket.gethostname()) else: return 'radosgw' + + +def ready_for_service(legacy=True): + """ + Determine when local unit is ready to service requests determined + by presentation of required cephx keys on the mon relation and + presence of the associated keyring in /etc/ceph. + + :param legacy: whether to check for legacy key support + :type legacy: boolean + :return: whether unit is ready + :rtype: boolean + """ + name = 'rgw.{}'.format(socket.gethostname()) + for rid in relation_ids('mon'): + for unit in related_units(rid): + if (relation_get('{}_key'.format(name), + rid=rid, unit=unit) and + os.path.exists( + os.path.join( + CEPH_DIR, + 'ceph.client.{}.keyring'.format(name) + ))): + return True + if (legacy and + relation_get('radosgw_key', + rid=rid, unit=unit) and + os.path.exists( + os.path.join( + CEPH_DIR, + 'keyring.rados.gateway' + ))): + return True + return False + + +def restart_nonce_changed(nonce): + """ + Determine whether the restart nonce provided has changed + since this function was last invoked. + + :param nonce: value to confirm has changed against the + remembered value for restart_nonce. + :type nonce: str + :return: whether nonce has changed value + :rtype: boolean + """ + db = unitdata.kv() + nonce_key = 'restart_nonce' + if nonce != db.get(nonce_key): + db.set(nonce_key, nonce) + db.flush() + return True + return False + + +def multisite_deployment(): + """Determine if deployment is multi-site + + :returns: whether multi-site deployment is configured + :rtype: boolean + """ + return all((config('zone'), + config('zonegroup'), + config('realm'))) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index b46e48a3..fdaef865 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -31,12 +31,16 @@ requires: scope: container certificates: interface: tls-certificates + slave: + interface: radosgw-multisite provides: nrpe-external-master: interface: nrpe-external-master scope: container gateway: interface: http + master: + interface: radosgw-multisite peers: cluster: interface: swift-ha diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 3b832c4b..4192847a 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -33,6 +33,10 @@ rgw socket path = /tmp/radosgw.sock log file = /var/log/ceph/radosgw.log {% endif %} +{% if rgw_zone -%} +rgw_zone = {{ rgw_zone }} +{% endif %} + rgw init timeout = 1200 rgw frontends = civetweb port={{ port }} {% if auth_type == 'keystone' %} diff --git a/ceph-radosgw/unit_tests/test_actions.py b/ceph-radosgw/unit_tests/test_actions.py index 7d02cb07..d9eb02c8 100644 --- a/ceph-radosgw/unit_tests/test_actions.py +++ b/ceph-radosgw/unit_tests/test_actions.py @@ -76,3 +76,67 @@ def dummy_action(args): with mock.patch.dict(actions.ACTIONS, {"foo": dummy_action}): actions.main(["foo"]) self.assertEqual(dummy_calls, ["uh oh"]) + + +class MultisiteActionsTestCase(CharmTestCase): + + TO_PATCH = [ + 'action_fail', + 'action_set', + 'multisite', + 'config', + ] + + def setUp(self): + super(MultisiteActionsTestCase, self).setUp(actions, + self.TO_PATCH) + self.config.side_effect = self.test_config.get + + def test_promote(self): + self.test_config.set('zone', 'testzone') + actions.promote([]) + self.multisite.modify_zone.assert_called_once_with( + 'testzone', + default=True, + master=True, + ) + self.multisite.update_period.assert_called_once_with() + + def test_promote_unconfigured(self): + actions.promote([]) + self.action_fail.assert_called_once() + + def test_readonly(self): + self.test_config.set('zone', 'testzone') + actions.readonly([]) + self.multisite.modify_zone.assert_called_once_with( + 'testzone', + readonly=True, + ) + self.multisite.update_period.assert_called_once_with() + + def test_readonly_unconfigured(self): + actions.readonly([]) + self.action_fail.assert_called_once() + + def test_readwrite(self): + self.test_config.set('zone', 'testzone') + actions.readwrite([]) + self.multisite.modify_zone.assert_called_once_with( + 'testzone', + readonly=False, + ) + self.multisite.update_period.assert_called_once_with() + + def test_readwrite_unconfigured(self): + actions.readwrite([]) + self.action_fail.assert_called_once() + + def test_tidydefaults(self): + self.test_config.set('zone', 'testzone') + actions.tidydefaults([]) + self.multisite.tidy_defaults.assert_called_once_with() + + def test_tidydefaults_unconfigured(self): + actions.tidydefaults([]) + self.action_fail.assert_called_once() diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index c1a75daf..4111d4c0 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -324,7 +324,8 @@ def _relation_get(attr, unit, rid): 'loglevel': 1, 'port': 70, 'client_radosgw_gateway': {'rgw init timeout': 60}, - 'ipv6': False + 'ipv6': False, + 'rgw_zone': None, } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -368,7 +369,8 @@ def _relation_get(attr, unit, rid): 'loglevel': 1, 'port': 70, 'client_radosgw_gateway': {'rgw init timeout': 60}, - 'ipv6': False + 'ipv6': False, + 'rgw_zone': None, } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -421,7 +423,8 @@ def _relation_get(attr, unit, rid): 'loglevel': 1, 'port': 70, 'client_radosgw_gateway': {'rgw init timeout': 60}, - 'ipv6': False + 'ipv6': False, + 'rgw_zone': None, } self.assertEqual(expect, mon_ctxt()) @@ -456,7 +459,8 @@ def _relation_get(attr, unit, rid): 'loglevel': 1, 'port': 70, 'client_radosgw_gateway': {'rgw init timeout': 60}, - 'ipv6': False + 'ipv6': False, + 'rgw_zone': None, } self.assertEqual(expect, mon_ctxt()) diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index 6d1e6b14..c398c470 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -31,6 +31,8 @@ 'socket', 'cmp_pkgrevno', 'init_is_systemd', + 'unitdata', + 'config', ] @@ -39,6 +41,7 @@ def setUp(self): super(CephRadosGWUtilTests, self).setUp(utils, TO_PATCH) self.get_upstream_version.return_value = '10.2.2' self.socket.gethostname.return_value = 'testhost' + self.config.side_effect = self.test_config.get def test_assess_status(self): with patch.object(utils, 'assess_status_func') as asf: @@ -136,6 +139,105 @@ def test_systemd_based_radosgw_new_style(self): self._setup_relation_data(_relation_data) self.assertTrue(utils.systemd_based_radosgw()) + @patch.object(utils.os.path, 'exists') + def test_ready_for_service(self, mock_exists): + mock_exists.return_value = True + _relation_data = { + 'mon:1': { + 'ceph-mon/0': { + 'rgw.testhost_key': 'testkey', + }, + 'ceph-mon/1': { + 'rgw.testhost_key': 'testkey', + }, + 'ceph-mon/2': { + 'rgw.testhost_key': 'testkey', + }, + } + } + self._setup_relation_data(_relation_data) + self.assertTrue(utils.ready_for_service()) + mock_exists.assert_called_with( + '/etc/ceph/ceph.client.rgw.testhost.keyring' + ) + + @patch.object(utils.os.path, 'exists') + def test_ready_for_service_legacy(self, mock_exists): + mock_exists.return_value = True + _relation_data = { + 'mon:1': { + 'ceph-mon/0': { + 'radosgw_key': 'testkey', + }, + 'ceph-mon/1': { + 'radosgw_key': 'testkey', + }, + 'ceph-mon/2': { + 'radosgw_key': 'testkey', + }, + } + } + self._setup_relation_data(_relation_data) + self.assertTrue(utils.ready_for_service()) + mock_exists.assert_called_with( + '/etc/ceph/keyring.rados.gateway' + ) + + @patch.object(utils.os.path, 'exists') + def test_ready_for_service_legacy_skip(self, mock_exists): + mock_exists.return_value = True + _relation_data = { + 'mon:1': { + 'ceph-mon/0': { + 'radosgw_key': 'testkey', + }, + 'ceph-mon/1': { + 'radosgw_key': 'testkey', + }, + 'ceph-mon/2': { + 'radosgw_key': 'testkey', + }, + } + } + self._setup_relation_data(_relation_data) + self.assertFalse(utils.ready_for_service(legacy=False)) + + def test_not_ready_for_service(self): + _relation_data = { + 'mon:1': { + 'ceph-mon/0': { + }, + 'ceph-mon/1': { + }, + 'ceph-mon/2': { + }, + } + } + self._setup_relation_data(_relation_data) + self.assertFalse(utils.ready_for_service()) + + @patch.object(utils.os.path, 'exists') + def test_ready_for_service_no_keyring(self, mock_exists): + mock_exists.return_value = False + _relation_data = { + 'mon:1': { + 'ceph-mon/0': { + 'rgw.testhost_key': 'testkey', + }, + 'ceph-mon/1': { + 'rgw.testhost_key': 'testkey', + }, + 'ceph-mon/2': { + 'rgw.testhost_key': 'testkey', + }, + } + } + self._setup_relation_data(_relation_data) + self.assertFalse(utils.ready_for_service()) + mock_exists.assert_called_with( + '/etc/ceph/ceph.client.rgw.testhost.keyring' + ) + def test_request_per_unit_key(self): self.init_is_systemd.return_value = False self.cmp_pkgrevno.return_value = -1 @@ -157,3 +259,44 @@ def test_service_name(self, mock_systemd_based_radosgw): mock_systemd_based_radosgw.return_value = False self.assertEqual(utils.service_name(), 'radosgw') + + def test_restart_nonce_changed_new(self): + _db_data = {} + mock_db = MagicMock() + mock_db.get.side_effect = lambda key: _db_data.get(key) + self.unitdata.kv.return_value = mock_db + self.assertTrue(utils.restart_nonce_changed('foobar')) + mock_db.set.assert_called_once_with('restart_nonce', + 'foobar') + mock_db.flush.assert_called_once_with() + + def test_restart_nonce_changed_existing(self): + _db_data = { + 'restart_nonce': 'foobar' + } + mock_db = MagicMock() + mock_db.get.side_effect = lambda key: _db_data.get(key) + self.unitdata.kv.return_value = mock_db + self.assertFalse(utils.restart_nonce_changed('foobar')) + mock_db.set.assert_not_called() + mock_db.flush.assert_not_called() + + def test_restart_nonce_changed_changed(self): + _db_data = { + 'restart_nonce': 'foobar' + } + mock_db = MagicMock() + mock_db.get.side_effect = lambda key: _db_data.get(key) + self.unitdata.kv.return_value = mock_db + self.assertTrue(utils.restart_nonce_changed('soofar')) + mock_db.set.assert_called_once_with('restart_nonce', + 'soofar') + mock_db.flush.assert_called_once_with() + + def test_multisite_deployment(self): + self.test_config.set('zone', 'testzone') + self.test_config.set('zonegroup', 'testzonegroup') + self.test_config.set('realm', 'testrealm') + self.assertTrue(utils.multisite_deployment()) + self.test_config.set('realm', None) + self.assertFalse(utils.multisite_deployment()) diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 070ce8e0..3051572b 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -13,7 +13,7 @@ # limitations under the License. from mock import ( - patch, call, MagicMock + patch, call, MagicMock, ANY ) from test_utils import ( @@ -64,6 +64,7 @@ 'filter_installed_packages', 'filter_missing_packages', 'ceph_utils', + 'multisite_deployment', ] @@ -81,6 +82,7 @@ def setUp(self): self.systemd_based_radosgw.return_value = False self.filter_installed_packages.side_effect = lambda pkgs: pkgs self.filter_missing_packages.side_effect = lambda pkgs: pkgs + self.multisite_deployment.return_value = False def test_upgrade_available(self): _vers = { @@ -367,3 +369,305 @@ def test_certs_changed(self, mock_configure_https): 'vault/0' ) mock_configure_https.assert_called_once_with() + + +class MiscMultisiteTests(CharmTestCase): + + TO_PATCH = [ + 'restart_nonce_changed', + 'relation_ids', + 'related_units', + 'leader_get', + 'is_leader', + 'master_relation_joined', + 'slave_relation_changed', + 'service_restart', + 'service_name', + ] + + _relation_ids = { + 'master': ['master:1'], + 'slave': ['slave:1'], + } + + _related_units = { + 'master:1': ['rgw/0', 'rgw/1'], + 'slave:1': ['rgw-s/0', 'rgw-s/1'], + } + + def setUp(self): + super(MiscMultisiteTests, self).setUp(ceph_hooks, + self.TO_PATCH) + self.relation_ids.side_effect = ( + lambda endpoint: self._relation_ids.get(endpoint) or [] + ) + self.related_units.side_effect = ( + lambda rid: self._related_units.get(rid) or [] + ) + self.service_name.return_value = 'rgw@hostname' + + def test_leader_settings_changed(self): + self.restart_nonce_changed.return_value = True + self.is_leader.return_value = False + ceph_hooks.leader_settings_changed() + self.service_restart.assert_called_once_with('rgw@hostname') + self.master_relation_joined.assert_called_once_with('master:1') + + def test_process_multisite_relations(self): + ceph_hooks.process_multisite_relations() + self.master_relation_joined.assert_called_once_with('master:1') + self.slave_relation_changed.assert_has_calls([ + call('slave:1', 'rgw-s/0'), + call('slave:1', 'rgw-s/1'), + ]) + + +class CephRadosMultisiteTests(CharmTestCase): + + TO_PATCH = [ + 'ready_for_service', + 'canonical_url', + 'relation_set', + 'relation_get', + 'leader_get', + 'config', + 'is_leader', + 'multisite', + 'leader_set', + 'service_restart', + 'service_name', + 'log', + 'multisite_deployment', + 'systemd_based_radosgw', + ] + + def setUp(self): + super(CephRadosMultisiteTests, self).setUp(ceph_hooks, + self.TO_PATCH) + self.config.side_effect = self.test_config.get + self.ready_for_service.return_value = True + self.canonical_url.return_value = 'http://rgw' + self.service_name.return_value = 'rgw@hostname' + self.multisite_deployment.return_value = True + self.systemd_based_radosgw.return_value = True + + +class MasterMultisiteTests(CephRadosMultisiteTests): + + _complete_config = { + 'realm': 'testrealm', + 'zonegroup': 'testzonegroup', + 'zone': 'testzone', + } + + _leader_data = { + 'access_key': 'mykey', + 'secret': 'mysecret', + } + + _leader_data_done = { + 'access_key': 'mykey', + 'secret': 'mysecret', + 'restart_nonce': 'foobar', + } + + def test_master_relation_joined_missing_config(self): + ceph_hooks.master_relation_joined('master:1') + self.config.assert_has_calls([ + call('realm'), + call('zonegroup'), + call('zone'), + ]) + self.relation_set.assert_not_called() + + def test_master_relation_joined_create_everything(self): + for k, v in self._complete_config.items(): + self.test_config.set(k, v) + self.is_leader.return_value = True + self.leader_get.side_effect = lambda attr: self._leader_data.get(attr) + self.multisite.list_realms.return_value = [] + self.multisite.list_zonegroups.return_value = [] + self.multisite.list_zones.return_value = [] + self.multisite.list_users.return_value = [] + self.multisite.create_system_user.return_value = ( + 'mykey', 'mysecret', + ) + ceph_hooks.master_relation_joined('master:1') + self.config.assert_has_calls([ + call('realm'), + call('zonegroup'), + call('zone'), + ]) + self.multisite.create_realm.assert_called_once_with( + 'testrealm', + default=True, + ) + self.multisite.create_zonegroup.assert_called_once_with( + 'testzonegroup', + endpoints=['http://rgw:80'], + default=True, + master=True, + realm='testrealm', + ) + self.multisite.create_zone.assert_called_once_with( + 'testzone', + endpoints=['http://rgw:80'], + default=True, + master=True, + zonegroup='testzonegroup', + ) + self.multisite.create_system_user.assert_called_once_with( + ceph_hooks.MULTISITE_SYSTEM_USER + ) + self.multisite.modify_zone.assert_called_once_with( + 'testzone', + access_key='mykey', + secret='mysecret', + ) + self.multisite.update_period.assert_has_calls([ + call(fatal=False), + call(), + ]) + self.service_restart.assert_called_once_with('rgw@hostname') + self.leader_set.assert_has_calls([ + call(access_key='mykey', + secret='mysecret'), + call(restart_nonce=ANY), + ]) + self.relation_set.assert_called_with( + relation_id='master:1', + access_key='mykey', + secret='mysecret', + ) + + def test_master_relation_joined_create_nothing(self): + for k, v in self._complete_config.items(): + self.test_config.set(k, v) + self.is_leader.return_value = True + self.leader_get.side_effect = ( + lambda attr: self._leader_data_done.get(attr) + ) + self.multisite.list_realms.return_value = ['testrealm'] + self.multisite.list_zonegroups.return_value = ['testzonegroup'] + self.multisite.list_zones.return_value = ['testzone'] + self.multisite.list_users.return_value = [ + ceph_hooks.MULTISITE_SYSTEM_USER + ] + ceph_hooks.master_relation_joined('master:1') + self.multisite.create_realm.assert_not_called() + self.multisite.create_zonegroup.assert_not_called() + self.multisite.create_zone.assert_not_called() + self.multisite.create_system_user.assert_not_called() + self.multisite.update_period.assert_not_called() + self.service_restart.assert_not_called() + self.leader_set.assert_not_called() + + def test_master_relation_joined_not_leader(self): + for k, v in self._complete_config.items(): + self.test_config.set(k, v) + self.is_leader.return_value = False + self.leader_get.side_effect = lambda attr: self._leader_data.get(attr) + ceph_hooks.master_relation_joined('master:1') + self.relation_set.assert_called_once_with( + relation_id='master:1', + realm='testrealm', + zonegroup='testzonegroup', + url='http://rgw:80', + access_key='mykey', + secret='mysecret', + ) + self.multisite.list_realms.assert_not_called() + + +class SlaveMultisiteTests(CephRadosMultisiteTests): + + _complete_config = { + 'realm': 'testrealm', + 'zonegroup': 'testzonegroup', + 'zone': 'testzone2', + } + + _test_relation = { + 'realm': 'testrealm', + 'zonegroup': 'testzonegroup', + 'access_key': 'anotherkey', + 'secret': 'anothersecret', + 'url': 'http://master:80' + } + + _test_bad_relation = { + 'realm': 'anotherrealm', + 'zonegroup': 'anotherzg', + 'access_key': 'anotherkey', + 'secret': 'anothersecret', + 'url': 'http://master:80' + } + + def test_slave_relation_changed(self): + for k, v in self._complete_config.items(): + self.test_config.set(k, v) + self.is_leader.return_value = True + self.leader_get.return_value = None + self.relation_get.return_value = self._test_relation + self.multisite.list_realms.return_value = [] + self.multisite.list_zones.return_value = [] + ceph_hooks.slave_relation_changed('slave:1', 'rgw/0') + self.config.assert_has_calls([ + call('realm'), + call('zonegroup'), + call('zone'), + ]) + self.multisite.pull_realm.assert_called_once_with( + url=self._test_relation['url'], + access_key=self._test_relation['access_key'], + secret=self._test_relation['secret'], + ) + self.multisite.pull_period.assert_called_once_with( + url=self._test_relation['url'], + access_key=self._test_relation['access_key'], + secret=self._test_relation['secret'], + ) + self.multisite.set_default_realm.assert_called_once_with( + 'testrealm' + ) + self.multisite.create_zone.assert_called_once_with( + 'testzone2', + endpoints=['http://rgw:80'], + default=False, + master=False, + zonegroup='testzonegroup', + access_key=self._test_relation['access_key'], + secret=self._test_relation['secret'], + ) + self.multisite.update_period.assert_has_calls([ + call(fatal=False), + call(), + ]) + self.service_restart.assert_called_once() + self.leader_set.assert_called_once_with(restart_nonce=ANY) + + def test_slave_relation_changed_incomplete_relation(self): + for k, v in self._complete_config.items(): + self.test_config.set(k, v) + self.is_leader.return_value = True + self.relation_get.return_value = {} + ceph_hooks.slave_relation_changed('slave:1', 'rgw/0') + self.config.assert_not_called() + + def test_slave_relation_changed_mismatching_config(self): + for k, v in self._complete_config.items(): + self.test_config.set(k, v) + self.is_leader.return_value = True + self.relation_get.return_value = self._test_bad_relation + ceph_hooks.slave_relation_changed('slave:1', 'rgw/0') + self.config.assert_has_calls([ + call('realm'), + call('zonegroup'), + call('zone'), + ]) + self.multisite.list_realms.assert_not_called() + + def test_slave_relation_changed_not_leader(self): + self.is_leader.return_value = False + ceph_hooks.slave_relation_changed('slave:1', 'rgw/0') + self.relation_get.assert_not_called() diff --git a/ceph-radosgw/unit_tests/test_multisite.py b/ceph-radosgw/unit_tests/test_multisite.py new file mode 100644 index 00000000..f3e5f357 --- /dev/null +++ b/ceph-radosgw/unit_tests/test_multisite.py @@ -0,0 +1,237 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import os +import mock + +import multisite + +from test_utils import CharmTestCase + + +def whoami(): + return inspect.stack()[1][3] + + +class TestMultisiteHelpers(CharmTestCase): + + TO_PATCH = [ + 'subprocess', + 'socket', + 'hookenv', + ] + + def setUp(self): + super(TestMultisiteHelpers, self).setUp(multisite, self.TO_PATCH) + self.socket.gethostname.return_value = 'testhost' + + def _testdata(self, funcname): + return os.path.join(os.path.dirname(__file__), + 'testdata', + '{}.json'.format(funcname)) + + def test_create_realm(self): + with open(self._testdata(whoami()), 'rb') as f: + self.subprocess.check_output.return_value = f.read() + result = multisite.create_realm('beedata', default=True) + self.assertEqual(result['name'], 'beedata') + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'realm', 'create', + '--rgw-realm=beedata', '--default' + ]) + + def test_list_realms(self): + with open(self._testdata(whoami()), 'rb') as f: + self.subprocess.check_output.return_value = f.read() + result = multisite.list_realms() + self.assertTrue('beedata' in result) + + def test_set_default_zone(self): + multisite.set_default_realm('newrealm') + self.subprocess.check_call.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'realm', 'default', + '--rgw-realm=newrealm' + ]) + + def test_create_zonegroup(self): + with open(self._testdata(whoami()), 'rb') as f: + self.subprocess.check_output.return_value = f.read() + result = multisite.create_zonegroup( + 'brundall', + endpoints=['http://localhost:80'], + master=True, + default=True, + realm='beedata', + ) + self.assertEqual(result['name'], 'brundall') + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zonegroup', 'create', + '--rgw-zonegroup=brundall', + '--endpoints=http://localhost:80', + '--rgw-realm=beedata', + '--default', + '--master' + ]) + + def test_list_zonegroups(self): + with open(self._testdata(whoami()), 'rb') as f: + self.subprocess.check_output.return_value = f.read() + result = multisite.list_zonegroups() + self.assertTrue('brundall' in result) + + def test_create_zone(self): + with open(self._testdata(whoami()), 'rb') as f: + self.subprocess.check_output.return_value = f.read() + result = multisite.create_zone( + 'brundall-east', + endpoints=['http://localhost:80'], + master=True, + default=True, + zonegroup='brundall', + access_key='mykey', + secret='mypassword', + ) + self.assertEqual(result['name'], 'brundall-east') + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zone', 'create', + '--rgw-zone=brundall-east', + '--endpoints=http://localhost:80', + '--rgw-zonegroup=brundall', + '--default', '--master', + '--access-key=mykey', + '--secret=mypassword', + '--read-only=0', + ]) + + def test_modify_zone(self): + multisite.modify_zone( + 'brundall-east', + endpoints=['http://localhost:80', 'https://localhost:443'], + access_key='mykey', + secret='secret', + readonly=True + ) + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zone', 'modify', + '--rgw-zone=brundall-east', + '--endpoints=http://localhost:80,https://localhost:443', + '--access-key=mykey', '--secret=secret', + '--read-only=1', + ]) + + def test_modify_zone_promote_master(self): + multisite.modify_zone( + 'brundall-east', + default=True, + master=True, + ) + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zone', 'modify', + '--rgw-zone=brundall-east', + '--master', + '--default', + '--read-only=0', + ]) + + def test_modify_zone_partial_credentials(self): + multisite.modify_zone( + 'brundall-east', + endpoints=['http://localhost:80', 'https://localhost:443'], + access_key='mykey', + ) + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zone', 'modify', + '--rgw-zone=brundall-east', + '--endpoints=http://localhost:80,https://localhost:443', + '--read-only=0', + ]) + + def test_list_zones(self): + with open(self._testdata(whoami()), 'rb') as f: + self.subprocess.check_output.return_value = f.read() + result = multisite.list_zones() + self.assertTrue('brundall-east' in result) + + def test_update_period(self): + multisite.update_period() + self.subprocess.check_call.assert_called_once_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'period', 'update', '--commit' + ]) + + @mock.patch.object(multisite, 'list_zonegroups') + @mock.patch.object(multisite, 'list_zones') + @mock.patch.object(multisite, 'update_period') + def test_tidy_defaults(self, + mock_update_period, + mock_list_zones, + mock_list_zonegroups): + mock_list_zones.return_value = ['default'] + mock_list_zonegroups.return_value = ['default'] + multisite.tidy_defaults() + self.subprocess.call.assert_has_calls([ + mock.call(['radosgw-admin', '--id=rgw.testhost', + 'zonegroup', 'remove', + '--rgw-zonegroup=default', '--rgw-zone=default']), + mock.call(['radosgw-admin', '--id=rgw.testhost', + 'zone', 'delete', + '--rgw-zone=default']), + mock.call(['radosgw-admin', '--id=rgw.testhost', + 'zonegroup', 'delete', + '--rgw-zonegroup=default']) + ]) + mock_update_period.assert_called_with() + + @mock.patch.object(multisite, 'list_zonegroups') + @mock.patch.object(multisite, 'list_zones') + @mock.patch.object(multisite, 'update_period') + def test_tidy_defaults_noop(self, + mock_update_period, + mock_list_zones, + mock_list_zonegroups): + mock_list_zones.return_value = ['brundall-east'] + mock_list_zonegroups.return_value = ['brundall'] + multisite.tidy_defaults() + self.subprocess.call.assert_not_called() + mock_update_period.assert_not_called() + + def test_pull_realm(self): + multisite.pull_realm(url='http://master:80', + access_key='testkey', + secret='testsecret') + self.subprocess.check_output.assert_called_once_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'realm', 'pull', + '--url=http://master:80', + '--access-key=testkey', '--secret=testsecret', + ]) + + def test_pull_period(self): + multisite.pull_period(url='http://master:80', + access_key='testkey', + secret='testsecret') + self.subprocess.check_output.assert_called_once_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'period', 'pull', + '--url=http://master:80', + '--access-key=testkey', '--secret=testsecret', + ]) diff --git a/ceph-radosgw/unit_tests/testdata/test_create_realm.json b/ceph-radosgw/unit_tests/testdata/test_create_realm.json new file mode 100644 index 00000000..343233ff --- /dev/null +++ b/ceph-radosgw/unit_tests/testdata/test_create_realm.json @@ -0,0 +1,7 @@ +{ + "id": "793a0176-ef7d-4d97-b544-a921e19a52e7", + "name": "beedata", + "current_period": "1f30e5fa-2c24-471d-b17d-61135c9f9510", + "epoch": 3 +} + diff --git a/ceph-radosgw/unit_tests/testdata/test_create_zone.json b/ceph-radosgw/unit_tests/testdata/test_create_zone.json new file mode 100644 index 00000000..9530d229 --- /dev/null +++ b/ceph-radosgw/unit_tests/testdata/test_create_zone.json @@ -0,0 +1,36 @@ +{ + "id": "a69d4cd8-1881-4040-ad7c-914ca35af3b2", + "name": "brundall-east", + "domain_root": "brundall-east.rgw.meta:root", + "control_pool": "brundall-east.rgw.control", + "gc_pool": "brundall-east.rgw.log:gc", + "lc_pool": "brundall-east.rgw.log:lc", + "log_pool": "brundall-east.rgw.log", + "intent_log_pool": "brundall-east.rgw.log:intent", + "usage_log_pool": "brundall-east.rgw.log:usage", + "reshard_pool": "brundall-east.rgw.log:reshard", + "user_keys_pool": "brundall-east.rgw.meta:users.keys", + "user_email_pool": "brundall-east.rgw.meta:users.email", + "user_swift_pool": "brundall-east.rgw.meta:users.swift", + "user_uid_pool": "brundall-east.rgw.meta:users.uid", + "system_key": { + "access_key": "90FM6V8B44BSN1MVKYW6", + "secret_key": "bFHSPN3PB4QZqHfTiNIn11ey8kA8OA6Php6kGpdH" + }, + "placement_pools": [ + { + "key": "default-placement", + "val": { + "index_pool": "brundall-east.rgw.buckets.index", + "data_pool": "brundall-east.rgw.buckets.data", + "data_extra_pool": "brundall-east.rgw.buckets.non-ec", + "index_type": 0, + "compression": "" + } + } + ], + "metadata_heap": "", + "tier_config": [], + "realm_id": "793a0176-ef7d-4d97-b544-a921e19a52e7" +} + diff --git a/ceph-radosgw/unit_tests/testdata/test_create_zonegroup.json b/ceph-radosgw/unit_tests/testdata/test_create_zonegroup.json new file mode 100644 index 00000000..688d85c7 --- /dev/null +++ b/ceph-radosgw/unit_tests/testdata/test_create_zonegroup.json @@ -0,0 +1,51 @@ +{ + "id": "3f41f138-5669-4b63-bf61-278f28fc9306", + "name": "brundall", + "api_name": "brundall", + "is_master": "true", + "endpoints": [ + "http://10.5.100.2:80" + ], + "hostnames": [], + "hostnames_s3website": [], + "master_zone": "a69d4cd8-1881-4040-ad7c-914ca35af3b2", + "zones": [ + { + "id": "8be215da-5316-4d12-a584-44b246285a3f", + "name": "brundall-west", + "endpoints": [ + "http://10.5.100.2:80" + ], + "log_meta": "false", + "log_data": "true", + "bucket_index_max_shards": 0, + "read_only": "false", + "tier_type": "", + "sync_from_all": "true", + "sync_from": [] + }, + { + "id": "a69d4cd8-1881-4040-ad7c-914ca35af3b2", + "name": "brundall-east", + "endpoints": [ + "http://10.5.100.1:80" + ], + "log_meta": "false", + "log_data": "true", + "bucket_index_max_shards": 0, + "read_only": "false", + "tier_type": "", + "sync_from_all": "true", + "sync_from": [] + } + ], + "placement_targets": [ + { + "name": "default-placement", + "tags": [] + } + ], + "default_placement": "default-placement", + "realm_id": "793a0176-ef7d-4d97-b544-a921e19a52e7" +} + diff --git a/ceph-radosgw/unit_tests/testdata/test_list_realms.json b/ceph-radosgw/unit_tests/testdata/test_list_realms.json new file mode 100644 index 00000000..ce4462aa --- /dev/null +++ b/ceph-radosgw/unit_tests/testdata/test_list_realms.json @@ -0,0 +1,6 @@ +{ + "default_info": "793a0176-ef7d-4d97-b544-a921e19a52e7", + "realms": [ + "beedata" + ] +} diff --git a/ceph-radosgw/unit_tests/testdata/test_list_users.json b/ceph-radosgw/unit_tests/testdata/test_list_users.json new file mode 100644 index 00000000..ab56bb93 --- /dev/null +++ b/ceph-radosgw/unit_tests/testdata/test_list_users.json @@ -0,0 +1,5 @@ +[ + "testuser", + "multisite-sync" +] + diff --git a/ceph-radosgw/unit_tests/testdata/test_list_zonegroups.json b/ceph-radosgw/unit_tests/testdata/test_list_zonegroups.json new file mode 100644 index 00000000..b4a52e9a --- /dev/null +++ b/ceph-radosgw/unit_tests/testdata/test_list_zonegroups.json @@ -0,0 +1,6 @@ +{ + "default_info": "3f41f138-5669-4b63-bf61-278f28fc9306", + "zonegroups": [ + "brundall" + ] +} diff --git a/ceph-radosgw/unit_tests/testdata/test_list_zones.json b/ceph-radosgw/unit_tests/testdata/test_list_zones.json new file mode 100644 index 00000000..ea27dd90 --- /dev/null +++ b/ceph-radosgw/unit_tests/testdata/test_list_zones.json @@ -0,0 +1,6 @@ +{ + "default_info": "a69d4cd8-1881-4040-ad7c-914ca35af3b2", + "zones": [ + "brundall-east" + ] +} From 4a97c1b5829dc39dc1a3ff473257cde48c2cf6c2 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 18 Feb 2019 09:28:14 +0000 Subject: [PATCH 1667/2699] Update pool creation for >= Jewel The ceph broker request missed some pools for later Ceph versions, and created pools which where no longer required. Update pool list and tweak weights inline with current best practice. Change-Id: I4ed7e08d557c33a05aa8f8c6305914ef9734bad6 Closes-Bug: 1685536 --- ceph-radosgw/hooks/ceph_rgw.py | 54 ++++++-------- ceph-radosgw/unit_tests/test_ceph.py | 104 +++++++++------------------ 2 files changed, 55 insertions(+), 103 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_rgw.py b/ceph-radosgw/hooks/ceph_rgw.py index 5efde943..3af951fd 100644 --- a/ceph-radosgw/hooks/ceph_rgw.py +++ b/ceph-radosgw/hooks/ceph_rgw.py @@ -16,8 +16,6 @@ import os import subprocess -from utils import get_pkg_version - from charmhelpers.core.hookenv import ( config, ) @@ -93,7 +91,7 @@ def _add_light_pool(rq, pool, pg_num, prefix=None): # Per the Ceph PG Calculator, all of the lightweight pools get 0.10% # of the data by default and only the .rgw.buckets.* get higher values weights = { - '.rgw.buckets.index': 1.00, + '.rgw.buckets.index': 3.00, '.rgw.buckets.extra': 1.00 } w = weights.get(pool, 0.10) @@ -112,18 +110,13 @@ def _add_light_pool(rq, pool, pg_num, prefix=None): rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') - # Jewel and above automatically always prefix pool names with zone when - # creating them (see LP: 1573549). - if prefix is None: - vc = apt_pkg.version_compare(get_pkg_version('radosgw'), '10.0.0') - if vc >= 0: - prefix = 'default' - else: - prefix = '' + prefix = prefix or 'default' - # Buckets likely to contain the most data and therefore requiring the most - # PGs - heavy = ['.rgw.buckets'] + # Buckets likely to contain the most data and therefore + # requiring the most PGs + heavy = [ + '.rgw.buckets.data' + ] bucket_weight = config('rgw-buckets-pool-weight') for pool in heavy: pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) @@ -132,27 +125,26 @@ def _add_light_pool(rq, pool, pg_num, prefix=None): # NOTE: we want these pools to have a smaller pg_num/pgp_num than the # others since they are not expected to contain as much data - light = ['.rgw', - '.rgw.root', - '.rgw.control', - '.rgw.gc', - '.rgw.buckets.index', - '.rgw.buckets.extra', - '.log', - '.intent-log', - '.usage', - '.users', - '.users.email', - '.users.swift', - '.users.uid'] + light = [ + '.rgw.control', + '.rgw.data.root', + '.rgw.gc', + '.rgw.log', + '.rgw.intent-log', + '.rgw.meta', + '.rgw.usage', + '.rgw.users.keys', + '.rgw.users.email', + '.rgw.users.swift', + '.rgw.users.uid', + '.rgw.buckets.extra', + '.rgw.buckets.index', + ] pg_num = config('rgw-lightweight-pool-pg-num') for pool in light: _add_light_pool(rq, pool, pg_num, prefix) - if prefix: - light_unprefixed = ['.rgw.root'] - for pool in light_unprefixed: - _add_light_pool(rq, pool, pg_num) + _add_light_pool(rq, '.rgw.root', pg_num) if config('restrict-ceph-pools'): rq.add_op_request_access_to_group(name="objects", diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index e13d7da1..a6b6e231 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -66,78 +66,38 @@ def test_create_rgw_pools_rq_with_prefix(self, mock_broker): self.test_config.set('rgw-buckets-pool-weight', 19) ceph.get_create_rgw_pools_rq(prefix='us-east') mock_broker.assert_has_calls([ - call(replica_count=3, weight=19, name='us-east.rgw.buckets', - group='objects'), - call(pg_num=10, replica_count=3, name='us-east.rgw', - group='objects'), - call(pg_num=10, replica_count=3, name='us-east.rgw.root', + call(replica_count=3, weight=19, name='us-east.rgw.buckets.data', group='objects'), call(pg_num=10, replica_count=3, name='us-east.rgw.control', group='objects'), - call(pg_num=10, replica_count=3, name='us-east.rgw.gc', - group='objects'), - call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.index', - group='objects'), - call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.extra', - group='objects'), - call(pg_num=10, replica_count=3, name='us-east.log', - group='objects'), - call(pg_num=10, replica_count=3, name='us-east.intent-log', - group='objects'), - call(pg_num=10, replica_count=3, name='us-east.usage', - group='objects'), - call(pg_num=10, replica_count=3, name='us-east.users', - group='objects'), - call(pg_num=10, replica_count=3, name='us-east.users.email', - group='objects'), - call(pg_num=10, replica_count=3, name='us-east.users.swift', - group='objects'), - call(pg_num=10, replica_count=3, name='us-east.users.uid', - group='objects'), - call(pg_num=10, replica_count=3, name='.rgw.root', - group='objects')] - ) - - @patch.object(mock_apt.apt_pkg, 'version_compare', lambda *args: -1) - @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' - '.add_op_create_pool') - def test_create_rgw_pools_rq_no_prefix_pre_jewel(self, mock_broker): - self.test_config.set('rgw-lightweight-pool-pg-num', -1) - self.test_config.set('ceph-osd-replication-count', 3) - self.test_config.set('rgw-buckets-pool-weight', 19) - ceph.get_create_rgw_pools_rq(prefix=None) - mock_broker.assert_has_calls([ - call(weight=19, replica_count=3, name='.rgw.buckets', - group='objects'), - call(weight=0.10, replica_count=3, name='.rgw', + call(pg_num=10, replica_count=3, name='us-east.rgw.data.root', group='objects'), - call(weight=0.10, replica_count=3, name='.rgw.root', + call(pg_num=10, replica_count=3, name='us-east.rgw.gc', group='objects'), - call(weight=0.10, replica_count=3, name='.rgw.control', + call(pg_num=10, replica_count=3, name='us-east.rgw.log', group='objects'), - call(weight=0.10, replica_count=3, name='.rgw.gc', + call(pg_num=10, replica_count=3, name='us-east.rgw.intent-log', group='objects'), - call(weight=1.00, replica_count=3, name='.rgw.buckets.index', + call(pg_num=10, replica_count=3, name='us-east.rgw.meta', group='objects'), - call(weight=1.00, replica_count=3, name='.rgw.buckets.extra', + call(pg_num=10, replica_count=3, name='us-east.rgw.usage', group='objects'), - call(weight=0.10, replica_count=3, name='.log', + call(pg_num=10, replica_count=3, name='us-east.rgw.users.keys', group='objects'), - call(weight=0.10, replica_count=3, name='.intent-log', + call(pg_num=10, replica_count=3, name='us-east.rgw.users.email', group='objects'), - call(weight=0.10, replica_count=3, name='.usage', + call(pg_num=10, replica_count=3, name='us-east.rgw.users.swift', group='objects'), - call(weight=0.10, replica_count=3, name='.users', + call(pg_num=10, replica_count=3, name='us-east.rgw.users.uid', group='objects'), - call(weight=0.10, replica_count=3, name='.users.email', + call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.extra', group='objects'), - call(weight=0.10, replica_count=3, name='.users.swift', + call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.index', group='objects'), - call(weight=0.10, replica_count=3, name='.users.uid', - group='objects')] + call(pg_num=10, replica_count=3, name='.rgw.root', + group='objects')], ) - @patch.object(mock_apt.apt_pkg, 'version_compare', lambda *args: 0) @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' '.add_op_request_access_to_group') @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' @@ -150,38 +110,38 @@ def test_create_rgw_pools_rq_no_prefix_post_jewel(self, mock_broker, self.test_config.set('restrict-ceph-pools', True) ceph.get_create_rgw_pools_rq(prefix=None) mock_broker.assert_has_calls([ - call(weight=19, replica_count=3, name='default.rgw.buckets', - group='objects'), - call(weight=0.10, replica_count=3, name='default.rgw', - group='objects'), - call(weight=0.10, replica_count=3, name='default.rgw.root', + call(replica_count=3, weight=19, name='default.rgw.buckets.data', group='objects'), call(weight=0.10, replica_count=3, name='default.rgw.control', group='objects'), + call(weight=0.10, replica_count=3, name='default.rgw.data.root', + group='objects'), call(weight=0.10, replica_count=3, name='default.rgw.gc', group='objects'), - call(weight=1.00, replica_count=3, - name='default.rgw.buckets.index', + call(weight=0.10, replica_count=3, name='default.rgw.log', group='objects'), - call(weight=1.00, replica_count=3, - name='default.rgw.buckets.extra', + call(weight=0.10, replica_count=3, name='default.rgw.intent-log', group='objects'), - call(weight=0.10, replica_count=3, name='default.log', + call(weight=0.10, replica_count=3, name='default.rgw.meta', group='objects'), - call(weight=0.10, replica_count=3, name='default.intent-log', + call(weight=0.10, replica_count=3, name='default.rgw.usage', group='objects'), - call(weight=0.10, replica_count=3, name='default.usage', + call(weight=0.10, replica_count=3, name='default.rgw.users.keys', group='objects'), - call(weight=0.10, replica_count=3, name='default.users', + call(weight=0.10, replica_count=3, name='default.rgw.users.email', group='objects'), - call(weight=0.10, replica_count=3, name='default.users.email', + call(weight=0.10, replica_count=3, name='default.rgw.users.swift', group='objects'), - call(weight=0.10, replica_count=3, name='default.users.swift', + call(weight=0.10, replica_count=3, name='default.rgw.users.uid', group='objects'), - call(weight=0.10, replica_count=3, name='default.users.uid', + call(weight=1.00, replica_count=3, + name='default.rgw.buckets.extra', + group='objects'), + call(weight=3.00, replica_count=3, + name='default.rgw.buckets.index', group='objects'), call(weight=0.10, replica_count=3, name='.rgw.root', - group='objects')] + group='objects')], ) mock_request_access.assert_called_with(key_name='radosgw.gateway', name='objects', From 6480cadbdb5c59919ebea9a6fecb5b6c6b769c90 Mon Sep 17 00:00:00 2001 From: Pete Vander Giessen Date: Fri, 15 Feb 2019 20:11:29 +0000 Subject: [PATCH 1668/2699] Don't clobber apparmor profiles. The charm attempts to avoid restarts of the ceph-osd processes by asssessing whether new or changed apparmor profiles have been installed. The charm always copies apparmor files from the charm to /etc/apparmor.d which overwrites any local changes made to the profile - specifically if the profile is in complain mode. As the content of the profile then changes the restart_on_change decorator then fires the aa_profile_changed function which switches the profile back into enforce mode. This change only overwrites apparmor profiles in the event that the hash of the source file in the charm changes; if it does then the current profile mode is re-asserted to ensure that complain mode deployments don't switch to enforce by mistake. Change-Id: I8f8cbf17af6219bd9fbdcf71a7000cba4c63f3f3 Closes-Bug: 1783373 Co-Authored-By: James Page --- ceph-osd/hooks/ceph_hooks.py | 45 ++++++++++++++++---------- ceph-osd/unit_tests/test_ceph_hooks.py | 2 +- 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 21885dcc..c6b925fd 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -53,12 +53,12 @@ is_container, lsb_release, mkdir, - restart_on_change, service_reload, service_restart, umount, write_file, CompareHostReleases, + file_hash, ) from charmhelpers.fetch import ( add_source, @@ -193,25 +193,33 @@ def aa_profile_changed(service_name='ceph-osd-all'): service_restart(service_name) -@restart_on_change({ - '/etc/apparmor.d/usr.bin.ceph-osd': ['ceph-osd-all']}, - restart_functions={'ceph-osd-all': aa_profile_changed}) def copy_profile_into_place(): """ Copy the apparmor profiles included with the charm into the /etc/apparmor.d directory. - """ - new_install = False - apparmor_dir = os.path.join(os.sep, - 'etc', - 'apparmor.d') + File are only copied if they have changed at source + to avoid overwriting any aa-complain mode flags set + + :returns: flag indicating if any profiles where newly + installed or changed + :rtype: boolean + """ + db = kv() + changes = False + apparmor_dir = os.path.join(os.sep, 'etc', 'apparmor.d') for x in glob.glob('files/apparmor/*'): - if not os.path.exists(os.path.join(apparmor_dir, - os.path.basename(x))): - new_install = True - shutil.copy(x, apparmor_dir) - return new_install + db_key = 'hash:{}'.format(x) + new_hash = file_hash(x) + previous_hash = db.get(db_key) + if new_hash != previous_hash: + log('Installing apparmor profile for {}' + .format(os.path.basename(x))) + shutil.copy(x, apparmor_dir) + db.set(db_key, new_hash) + db.flush() + changes = True + return changes class CephOsdAppArmorContext(AppArmorContext): @@ -261,9 +269,12 @@ def install_apparmor_profile(): based on current setting of 'aa-profile-mode' configuration option. """ - log('Installing apparmor profile for ceph-osd') - new_install = copy_profile_into_place() - if new_install or config().changed('aa-profile-mode'): + changes = copy_profile_into_place() + # NOTE(jamespage): If any profiles where changed or + # freshly installed then force + # re-assertion of the current profile mode + # to avoid complain->enforce side effects + if changes or config().changed('aa-profile-mode'): aa_context = CephOsdAppArmorContext() aa_context.setup_aa_profile() aa_profile_changed() diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 20e20895..23e9d851 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -323,8 +323,8 @@ def test_install_apparmor_profile(self, mock_config, m_aa_context.setup_aa_profile.assert_called() mock_copy_profile_into_place.assert_called() - m_config.changed.assert_called_with('aa-profile-mode') mock_service_restart.assert_called_with('ceph-osd-all') + m_config.changed.assert_called_with('aa-profile-mode') mock_service_reload.assert_called_with('apparmor') @patch.object(ceph_hooks, 'ceph') From 77cfa0dc828ca5ec40eb44a42557211d269ac83c Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 15 Feb 2019 12:56:20 +0000 Subject: [PATCH 1669/2699] action: Misc tidyup and improvements General tidy of actions codebase including sorting out some minor bugs. Added new copy-pool action. Added support for CRUSH device classes when creating erasure coding profiles. Removed need for use of rados python module by switching to using the ceph command line tool for all operations. Add new action for creation of new CRUSH rules using device classes. Resync charms.ceph to support switching the crush_rule key on a pool. Resync charmhelpers for support for device classing. Rename all action py files to by standards compliant. Change-Id: I1f7d95b1637f2064bd11a5d5336764b2946428ac Depends-On: I2c405f9a4956ff1ccf1edb7372a9bb11e1e591a4 --- ceph-mon/actions.yaml | 311 +++++++++++------- ceph-mon/actions/ceph_ops.py | 70 +--- ceph-mon/actions/copy-pool | 1 + ceph-mon/actions/copy_pool.py | 38 +++ ceph-mon/actions/create-cache-tier | 2 +- ceph-mon/actions/create-crush-rule | 1 + ceph-mon/actions/create-erasure-profile | 2 +- ceph-mon/actions/create-pool | 2 +- ...ate-cache-tier.py => create_cache_tier.py} | 0 ceph-mon/actions/create_crush_rule.py | 45 +++ ...e-profile.py => create_erasure_profile.py} | 13 +- .../{create-pool.py => create_pool.py} | 5 +- ceph-mon/actions/crushmap-update | 2 +- ...{crushmap-update.py => crushmap_update.py} | 0 ceph-mon/actions/delete-erasure-profile | 2 +- ceph-mon/actions/delete-pool | 2 +- ...e-profile.py => delete_erasure_profile.py} | 0 .../{delete-pool.py => delete_pool.py} | 31 +- ceph-mon/actions/get-erasure-profile | 2 +- ceph-mon/actions/get-health | 13 +- ...sure-profile.py => get_erasure_profile.py} | 0 ceph-mon/actions/get_health.py | 28 ++ ceph-mon/actions/list-erasure-profiles | 2 +- ceph-mon/actions/list-pools | 2 +- ...e-profiles.py => list_erasure_profiles.py} | 0 .../actions/{list-pools.py => list_pools.py} | 0 ceph-mon/actions/pool-get | 2 +- ceph-mon/actions/pool-set | 2 +- ceph-mon/actions/pool-statistics | 2 +- ceph-mon/actions/{pool-get.py => pool_get.py} | 2 +- ceph-mon/actions/{pool-set.py => pool_set.py} | 2 +- ...{pool-statistics.py => pool_statistics.py} | 0 ceph-mon/actions/remove-cache-tier | 2 +- ceph-mon/actions/remove-pool-snapshot | 2 +- ...ove-cache-tier.py => remove_cache_tier.py} | 0 ...ol-snapshot.py => remove_pool_snapshot.py} | 2 +- ceph-mon/actions/rename-pool | 2 +- .../{rename-pool.py => rename_pool.py} | 2 +- ceph-mon/actions/set-pool-max-bytes | 2 +- ...ool-max-bytes.py => set_pool_max_bytes.py} | 2 +- ceph-mon/actions/show-disk-free | 2 +- .../{show-disk-free.py => show_disk_free.py} | 0 ceph-mon/actions/snapshot-pool | 2 +- .../{snapshot-pool.py => snapshot_pool.py} | 2 +- ceph-mon/charm-helpers-hooks.yaml | 2 +- .../contrib/openstack/amulet/utils.py | 16 +- .../charmhelpers/contrib/openstack/context.py | 10 +- .../contrib/openstack/templating.py | 2 +- .../charmhelpers/contrib/openstack/utils.py | 18 +- .../contrib/storage/linux/ceph.py | 104 +++--- ceph-mon/hooks/charmhelpers/core/host.py | 1 + .../charmhelpers/core/host_factory/ubuntu.py | 8 + .../{contrib => fetch}/python/__init__.py | 2 +- .../hooks/charmhelpers/fetch/python/debug.py | 54 +++ .../{contrib => fetch}/python/packages.py | 0 .../hooks/charmhelpers/fetch/python/rpdb.py | 56 ++++ .../charmhelpers/fetch/python/version.py | 32 ++ ceph-mon/lib/ceph/broker.py | 1 + ceph-mon/lib/ceph/utils.py | 151 +++++---- ceph-mon/unit_tests/test_ceph_actions.py | 127 +++++++ 60 files changed, 840 insertions(+), 348 deletions(-) create mode 120000 ceph-mon/actions/copy-pool create mode 100755 ceph-mon/actions/copy_pool.py create mode 120000 ceph-mon/actions/create-crush-rule rename ceph-mon/actions/{create-cache-tier.py => create_cache_tier.py} (100%) create mode 100755 ceph-mon/actions/create_crush_rule.py rename ceph-mon/actions/{create-erasure-profile.py => create_erasure_profile.py} (93%) rename ceph-mon/actions/{create-pool.py => create_pool.py} (89%) rename ceph-mon/actions/{crushmap-update.py => crushmap_update.py} (100%) rename ceph-mon/actions/{delete-erasure-profile.py => delete_erasure_profile.py} (100%) rename ceph-mon/actions/{delete-pool.py => delete_pool.py} (57%) mode change 100755 => 120000 ceph-mon/actions/get-health rename ceph-mon/actions/{get-erasure-profile.py => get_erasure_profile.py} (100%) create mode 100755 ceph-mon/actions/get_health.py rename ceph-mon/actions/{list-erasure-profiles.py => list_erasure_profiles.py} (100%) rename ceph-mon/actions/{list-pools.py => list_pools.py} (100%) rename ceph-mon/actions/{pool-get.py => pool_get.py} (96%) rename ceph-mon/actions/{pool-set.py => pool_set.py} (97%) rename ceph-mon/actions/{pool-statistics.py => pool_statistics.py} (100%) rename ceph-mon/actions/{remove-cache-tier.py => remove_cache_tier.py} (100%) rename ceph-mon/actions/{remove-pool-snapshot.py => remove_pool_snapshot.py} (97%) rename ceph-mon/actions/{rename-pool.py => rename_pool.py} (96%) rename ceph-mon/actions/{set-pool-max-bytes.py => set_pool_max_bytes.py} (96%) rename ceph-mon/actions/{show-disk-free.py => show_disk_free.py} (100%) rename ceph-mon/actions/{snapshot-pool.py => snapshot_pool.py} (97%) rename ceph-mon/hooks/charmhelpers/{contrib => fetch}/python/__init__.py (92%) create mode 100644 ceph-mon/hooks/charmhelpers/fetch/python/debug.py rename ceph-mon/hooks/charmhelpers/{contrib => fetch}/python/packages.py (100%) create mode 100644 ceph-mon/hooks/charmhelpers/fetch/python/rpdb.py create mode 100644 ceph-mon/hooks/charmhelpers/fetch/python/version.py create mode 100644 ceph-mon/unit_tests/test_ceph_actions.py diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 85cfb747..2f3af8d9 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -1,246 +1,319 @@ pause-health: - description: Pause ceph health operations across the entire ceph cluster + description: "Pause ceph health operations across the entire ceph cluster" resume-health: - description: Resume ceph health operations across the entire ceph cluster + description: "Resume ceph health operations across the entire ceph cluster" get-health: - description: Output the current cluster health reported by `ceph health` + description: "Output the current cluster health reported by `ceph health`" create-cache-tier: - description: Create a new cache tier + description: "Create a new cache tier" params: backer-pool: type: string - description: | - The name of the pool that will back the cache tier. Also known as - the cold pool + description: "The name of the pool that will back the cache tier. Also known as the cold pool" cache-pool: type: string - description: | - The name of the pool that will be the cache pool. Also known - as the hot pool + description: "The name of the pool that will be the cache pool. Also known as the hot pool" cache-mode: type: string default: writeback - enum: [writeback, readonly] - description: | - The mode of the caching tier. Please refer to the Ceph docs for more - information - required: [backer-pool, cache-pool] + enum: + - writeback + - readonly + description: "The mode of the caching tier. Please refer to the Ceph docs for more information" + required: + - backer-pool + - cache-pool additionalProperties: false remove-cache-tier: - description: Remove an existing cache tier + description: "Remove an existing cache tier" params: backer-pool: type: string - description: | - The name of the pool that backs the cache tier. Also known as - the cold pool + description: "The name of the pool that backs the cache tier. Also known as the cold pool" cache-pool: type: string - description: | - The name of the pool that is the cache pool. Also known - as the hot pool - required: [backer-pool, cache-pool] + description: "The name of the pool that is the cache pool. Also known as the hot pool" + required: + - backer-pool + - cache-pool additionalProperties: false - create-pool: - description: Creates a pool + description: "Creates a pool" params: name: type: string - description: The name of the pool + description: "The name of the pool" app-name: type: string - description: App name to set on the newly created pool. + description: "App name to set on the newly created pool." profile-name: type: string - description: The crush profile to use for this pool. The ruleset must exist first. + description: "The crush profile to use for this pool. The ruleset must exist first." pool-type: type: string - default: "replicated" - enum: [replicated, erasure] - description: | - The pool type which may either be replicated to recover from lost OSDs by keeping multiple copies of the - objects or erasure to get a kind of generalized RAID5 capability. + default: replicated + enum: + - replicated + - erasure + description: "The pool type which may either be replicated to recover from lost OSDs by keeping multiple copies of the objects or erasure to get a kind of generalized RAID5 capability." replicas: type: integer default: 3 - description: | - For the replicated pool this is the number of replicas to store of each object. + description: "For the replicated pool this is the number of replicas to store of each object." erasure-profile-name: type: string default: default - description: | - The name of the erasure coding profile to use for this pool. Note this profile must exist - before calling create-pool - required: [name] + description: "The name of the erasure coding profile to use for this pool. Note this profile must exist before calling create-pool" + percent-data: + type: integer + default: 10 + description: "The percentage of data that is expected to be contained in the pool for the specific OSD set. Default value is to assume 10% of the data is for this pool, which is a relatively low % of the data but allows for the pg_num to be increased." + required: + - name + additionalProperties: false +create-crush-rule: + description: "Create a new replicated CRUSH rule to use on a pool." + params: + name: + type: string + description: "The name of the rule" + failure-domain: + type: string + default: host + enum: + - chassis + - datacenter + - host + - osd + - pdu + - pod + - rack + - region + - room + - root + - row + description: "The failure-domain=host will create a CRUSH ruleset that ensures no two chunks are stored in the same host." + device-class: + type: string + enum: + - hdd + - ssd + - nvme + description: "CRUSH device class to use for new rule." + required: + - name additionalProperties: false create-erasure-profile: - description: Create a new erasure code profile to use on a pool. + description: "Create a new erasure code profile to use on a pool." params: name: type: string - description: The name of the profile + description: "The name of the profile" failure-domain: type: string default: host - enum: [chassis, datacenter, host, osd, pdu, pod, rack, region, room, root, row] - description: | - The failure-domain=host will create a CRUSH ruleset that ensures no two chunks are stored in the same host. + enum: + - chassis + - datacenter + - host + - osd + - pdu + - pod + - rack + - region + - room + - root + - row + description: "The failure-domain=host will create a CRUSH ruleset that ensures no two chunks are stored in the same host." plugin: type: string - default: "jerasure" - enum: [jerasure, isa, lrc, shec] - description: | - The erasure plugin to use for this profile. - See http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ for more details + default: jerasure + enum: + - jerasure + - isa + - lrc + - shec + description: "The erasure plugin to use for this profile. See http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ for more details" data-chunks: type: integer default: 3 - description: | - The number of data chunks, i.e. the number of chunks in which the original object is divided. For instance - if K = 2 a 10KB object will be divided into K objects of 5KB each. + description: "The number of data chunks, i.e. the number of chunks in which the original object is divided. For instance if K = 2 a 10KB object will be divided into K objects of 5KB each." coding-chunks: type: integer default: 2 - description: | - The number of coding chunks, i.e. the number of additional chunks computed by the encoding functions. - If there are 2 coding chunks, it means 2 OSDs can be out without losing data. + description: "The number of coding chunks, i.e. the number of additional chunks computed by the encoding functions. If there are 2 coding chunks, it means 2 OSDs can be out without losing data." locality-chunks: type: integer - description: | - Group the coding and data chunks into sets of size locality. For instance, for k=4 and m=2, when locality=3 - two groups of three are created. Each set can be recovered without reading chunks from another set. + description: "Group the coding and data chunks into sets of size locality. For instance, for k=4 and m=2, when locality=3 two groups of three are created. Each set can be recovered without reading chunks from another set." durability-estimator: type: integer - description: | - The number of parity chunks each of which includes each data chunk in its calculation range. The number is used - as a durability estimator. For instance, if c=2, 2 OSDs can be down without losing data. - required: [name, data-chunks, coding-chunks] + description: "The number of parity chunks each of which includes each data chunk in its calculation range. The number is used as a durability estimator. For instance, if c=2, 2 OSDs can be down without losing data." + device-class: + type: string + enum: + - hdd + - ssd + - nvme + description: "CRUSH device class to use for erasure profile." + required: + - name additionalProperties: false get-erasure-profile: - description: Display an erasure code profile. + description: "Display an erasure code profile." params: name: type: string - description: The name of the profile - required: [name] + description: "The name of the profile" + required: + - name additionalProperties: false delete-erasure-profile: - description: Deletes an erasure code profile. + description: "Deletes an erasure code profile." params: name: type: string - description: The name of the profile - required: [name] + description: "The name of the profile" + required: + - name additionalProperties: false list-erasure-profiles: - description: List the names of all erasure code profiles + description: "List the names of all erasure code profiles" additionalProperties: false list-pools: - description: List your cluster's pools + description: "List your cluster's pools" additionalProperties: false set-pool-max-bytes: - description: Set pool quotas for the maximum number of bytes. + description: "Set pool quotas for the maximum number of bytes." params: max: type: integer - description: The name of the pool - pool-name: + description: "The name of the pool" + name: type: string - description: The name of the pool - required: [pool-name, max] + description: "The name of the pool" + required: + - name + - max additionalProperties: false delete-pool: - description: Deletes the named pool + description: "Deletes the named pool" params: - pool-name: + name: type: string - description: The name of the pool - required: [pool-name] + description: "The name of the pool" + required: + - name additionalProperties: false rename-pool: - description: Rename a pool + description: "Rename a pool" params: - pool-name: + name: type: string - description: The name of the pool + description: "The name of the pool" new-name: type: string - description: The new name of the pool - required: [pool-name, new-name] + description: "The new name of the pool" + required: + - name + - new-name additionalProperties: false pool-statistics: - description: Show a pool's utilization statistics + description: "Show a pool's utilization statistics" additionalProperties: false snapshot-pool: - description: Snapshot a pool + description: "Snapshot a pool" params: - pool-name: + name: type: string - description: The name of the pool + description: "The name of the pool" snapshot-name: type: string - description: The name of the snapshot - required: [snapshot-name, pool-name] + description: "The name of the snapshot" + required: + - snapshot-name + - name additionalProperties: false remove-pool-snapshot: - description: Remove a pool snapshot + description: "Remove a pool snapshot" params: - pool-name: + name: type: string - description: The name of the pool + description: "The name of the pool" snapshot-name: type: string - description: The name of the snapshot - required: [snapshot-name, pool-name] + description: "The name of the snapshot" + required: + - snapshot-name + - name additionalProperties: false pool-set: - description: Set a value for the pool + description: "Set a value for the pool" params: - pool-name: + name: type: string - description: The pool to set this variable on. + description: "The pool to set this variable on." key: type: string - description: Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + description: "Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values" value: type: string - description: The value to set - required: [key, value, pool-name] + description: "The value to set" + required: + - key + - value + - name additionalProperties: false pool-get: - description: Get a value for the pool + description: "Get a value for the pool" params: - pool-name: + name: type: string - description: The pool to get this variable from. + description: "The pool to get this variable from." key: type: string - description: Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#get-pool-values - required: [key, pool-name] + description: "Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#get-pool-values" + required: + - key + - name additionalProperties: false crushmap-update: - description: | - Apply a json crushmap definition. This will throw away the existing - ceph crushmap and apply the new definition. Use with extreme caution. - WARNING - This function is extremely dangerous if misused. It can very - easily break your cluster in unexpected ways. + description: "Apply a json crushmap definition. This will throw away the existing ceph crushmap and apply the new definition. Use with extreme caution. WARNING - This function is extremely dangerous if misused. It can very easily break your cluster in unexpected ways." params: map: type: string - description: The json crushmap blob - required: [map] + description: "The json crushmap blob" + required: + - map additionalProperties: false show-disk-free: - description: Show disk utilization by host and OSD. + description: "Show disk utilization by host and OSD." params: format: type: string - enum: [json, json-pretty, xml, xml-pretty, plain] - default: "plain" - description: Output format, either json, json-pretty, xml, xml-pretty, plain; defaults to plain + enum: + - json + - json-pretty + - xml + - xml-pretty + - plain + default: plain + description: "Output format, either json, json-pretty, xml, xml-pretty, plain; defaults to plain" + additionalProperties: false +copy-pool: + description: "Copy contents of a pool to a new pool." + params: + source: + type: string + description: "Pool to copy data from." + target: + type: string + description: "Pool to copy data to." + required: + - source + - target additionalProperties: false set-noout: - description: Set ceph noout across the cluster. + description: "Set ceph noout across the cluster." unset-noout: - description: Unset ceph noout across the cluster. + description: "Unset ceph noout across the cluster." + diff --git a/ceph-mon/actions/ceph_ops.py b/ceph-mon/actions/ceph_ops.py index 47eb5c8d..875fe88d 100755 --- a/ceph-mon/actions/ceph_ops.py +++ b/ceph-mon/actions/ceph_ops.py @@ -13,49 +13,21 @@ # limitations under the License. from subprocess import CalledProcessError, check_output -import rados import sys sys.path.append('hooks') -from charmhelpers.core.hookenv import log, action_get, action_fail +from charmhelpers.core.hookenv import action_get, action_fail from charmhelpers.contrib.storage.linux.ceph import pool_set, \ set_pool_quota, snapshot_pool, remove_pool_snapshot -# Connect to Ceph via Librados and return a connection -def connect(): - """Creates a connection to Ceph using librados.""" - try: - cluster = rados.Rados(conffile='/etc/ceph/ceph.conf') - cluster.connect() - return cluster - except (rados.IOError, - rados.ObjectNotFound, - rados.NoData, - rados.NoSpace, - rados.PermissionError) as rados_error: - log("librados failed with error: {}".format(str(rados_error))) - - -def create_crush_rule(): - """Stub function.""" - # Shell out - pass - - def list_pools(): """Return a list of all Ceph pools.""" try: - cluster = connect() - pool_list = cluster.list_pools() - cluster.shutdown() + pool_list = check_output(['ceph', 'osd', 'pool', 'ls']).decode('UTF-8') return pool_list - except (rados.IOError, - rados.ObjectNotFound, - rados.NoData, - rados.NoSpace, - rados.PermissionError) as e: + except CalledProcessError as e: action_fail(str(e)) @@ -66,10 +38,10 @@ def get_health(): On error, 'unknown' is returned. """ try: - value = check_output(['ceph', 'health']).decode('utf-8') + value = check_output(['ceph', 'health']).decode('UTF-8') return value except CalledProcessError as e: - action_fail(e.message) + action_fail(str(e)) return 'Getting health failed, health unknown' @@ -114,22 +86,16 @@ def pool_stats(): """ Returns statistics for a pool. - The pool name is provided by the action parameter 'pool-name'. + The pool name is provided by the action parameter 'name'. """ try: - pool_name = action_get("pool-name") - cluster = connect() - ioctx = cluster.open_ioctx(pool_name) - stats = ioctx.get_stats() - ioctx.close() - cluster.shutdown() + pool_name = action_get("name") + stats = ( + check_output(['ceph', 'osd', 'pool', 'stats', pool_name]) + .decode('UTF-8') + ) return stats - except (rados.Error, - rados.IOError, - rados.ObjectNotFound, - rados.NoData, - rados.NoSpace, - rados.PermissionError) as e: + except CalledProcessError as e: action_fail(str(e)) @@ -138,10 +104,10 @@ def delete_pool_snapshot(): Delete a pool snapshot. Deletes a snapshot from the pool provided by the action - parameter 'pool-name', with the snapshot name provided by + parameter 'name', with the snapshot name provided by action parameter 'snapshot-name' """ - pool_name = action_get("pool-name") + pool_name = action_get("name") snapshot_name = action_get("snapshot-name") remove_pool_snapshot(service='ceph', pool_name=pool_name, @@ -154,10 +120,10 @@ def set_pool_max_bytes(): Sets the max bytes quota for a pool. Sets the pool quota maximum bytes for the pool specified by - the action parameter 'pool-name' to the value specified by + the action parameter 'name' to the value specified by the action parameter 'max' """ - pool_name = action_get("pool-name") + pool_name = action_get("name") max_bytes = action_get("max") set_pool_quota(service='ceph', pool_name=pool_name, @@ -168,11 +134,11 @@ def snapshot_ceph_pool(): """ Snapshots a Ceph pool. - Snapshots the pool provided in action parameter 'pool-name' and + Snapshots the pool provided in action parameter 'name' and uses the parameter provided in the action parameter 'snapshot-name' as the name for the snapshot. """ - pool_name = action_get("pool-name") + pool_name = action_get("name") snapshot_name = action_get("snapshot-name") snapshot_pool(service='ceph', pool_name=pool_name, diff --git a/ceph-mon/actions/copy-pool b/ceph-mon/actions/copy-pool new file mode 120000 index 00000000..97ffd8cb --- /dev/null +++ b/ceph-mon/actions/copy-pool @@ -0,0 +1 @@ +copy_pool.py \ No newline at end of file diff --git a/ceph-mon/actions/copy_pool.py b/ceph-mon/actions/copy_pool.py new file mode 100755 index 00000000..5112cf70 --- /dev/null +++ b/ceph-mon/actions/copy_pool.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +# +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import subprocess + +sys.path.append('hooks') + +import charmhelpers.core.hookenv as hookenv + + +def copy_pool(): + try: + source = hookenv.action_get("source") + target = hookenv.action_get("target") + subprocess.check_call([ + 'rados', 'cppool', + source, target + ]) + except subprocess.CalledProcessError as e: + hookenv.action_fail("Error copying pool: {}".format(str(e))) + + +if __name__ == '__main__': + copy_pool() diff --git a/ceph-mon/actions/create-cache-tier b/ceph-mon/actions/create-cache-tier index 2a7e4346..90631ac7 120000 --- a/ceph-mon/actions/create-cache-tier +++ b/ceph-mon/actions/create-cache-tier @@ -1 +1 @@ -create-cache-tier.py \ No newline at end of file +create_cache_tier.py \ No newline at end of file diff --git a/ceph-mon/actions/create-crush-rule b/ceph-mon/actions/create-crush-rule new file mode 120000 index 00000000..e4607fb7 --- /dev/null +++ b/ceph-mon/actions/create-crush-rule @@ -0,0 +1 @@ +create_crush_rule.py \ No newline at end of file diff --git a/ceph-mon/actions/create-erasure-profile b/ceph-mon/actions/create-erasure-profile index 58eef8ed..e7625474 120000 --- a/ceph-mon/actions/create-erasure-profile +++ b/ceph-mon/actions/create-erasure-profile @@ -1 +1 @@ -create-erasure-profile.py \ No newline at end of file +create_erasure_profile.py \ No newline at end of file diff --git a/ceph-mon/actions/create-pool b/ceph-mon/actions/create-pool index bf2f130c..226b1774 120000 --- a/ceph-mon/actions/create-pool +++ b/ceph-mon/actions/create-pool @@ -1 +1 @@ -create-pool.py \ No newline at end of file +create_pool.py \ No newline at end of file diff --git a/ceph-mon/actions/create-cache-tier.py b/ceph-mon/actions/create_cache_tier.py similarity index 100% rename from ceph-mon/actions/create-cache-tier.py rename to ceph-mon/actions/create_cache_tier.py diff --git a/ceph-mon/actions/create_crush_rule.py b/ceph-mon/actions/create_crush_rule.py new file mode 100755 index 00000000..5fea57ba --- /dev/null +++ b/ceph-mon/actions/create_crush_rule.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 +# +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import subprocess + +sys.path.append('hooks') + +import charmhelpers.core.hookenv as hookenv + + +def create_crush_rule(): + """Create a new CRUSH rule.""" + rule_name = hookenv.action_get('name') + failure_domain = hookenv.action_get('failure-domain') + device_class = hookenv.action_get('device-class') + cmd = [ + 'ceph', 'osd', 'crush', 'rule', + 'create-replicated', + rule_name, + 'default', + failure_domain + ] + if device_class: + cmd.append(device_class) + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as e: + hookenv.action_fail(str(e)) + +if __name__ == '__main__': + create_crush_rule() diff --git a/ceph-mon/actions/create-erasure-profile.py b/ceph-mon/actions/create_erasure_profile.py similarity index 93% rename from ceph-mon/actions/create-erasure-profile.py rename to ceph-mon/actions/create_erasure_profile.py index 75e43c56..73ccfe0b 100755 --- a/ceph-mon/actions/create-erasure-profile.py +++ b/ceph-mon/actions/create_erasure_profile.py @@ -27,6 +27,7 @@ def make_erasure_profile(): name = action_get("name") plugin = action_get("plugin") failure_domain = action_get("failure-domain") + device_class = action_get("device-class") # jerasure requires k+m # isa requires k+m @@ -42,7 +43,8 @@ def make_erasure_profile(): profile_name=name, data_chunks=k, coding_chunks=m, - failure_domain=failure_domain) + failure_domain=failure_domain, + device_class=device_class) except CalledProcessError as e: log(e) action_fail("Create erasure profile failed with " @@ -56,7 +58,8 @@ def make_erasure_profile(): profile_name=name, data_chunks=k, coding_chunks=m, - failure_domain=failure_domain) + failure_domain=failure_domain, + device_class=device_class) except CalledProcessError as e: log(e) action_fail("Create erasure profile failed with " @@ -72,7 +75,8 @@ def make_erasure_profile(): data_chunks=k, coding_chunks=m, locality=l, - failure_domain=failure_domain) + failure_domain=failure_domain, + device_class=device_class) except CalledProcessError as e: log(e) action_fail("Create erasure profile failed with " @@ -88,7 +92,8 @@ def make_erasure_profile(): data_chunks=k, coding_chunks=m, durability_estimator=c, - failure_domain=failure_domain) + failure_domain=failure_domain, + device_class=device_class) except CalledProcessError as e: log(e) action_fail("Create erasure profile failed with " diff --git a/ceph-mon/actions/create-pool.py b/ceph-mon/actions/create_pool.py similarity index 89% rename from ceph-mon/actions/create-pool.py rename to ceph-mon/actions/create_pool.py index 81a8a554..ae6b00c2 100755 --- a/ceph-mon/actions/create-pool.py +++ b/ceph-mon/actions/create_pool.py @@ -25,7 +25,8 @@ def create_pool(): pool_name = action_get("name") pool_type = action_get("pool-type") - app_name = action_get("app-name") or None + percent_data = action_get("percent-data") or 10 + app_name = action_get("app-name") or 'unknown' try: if pool_type == "replicated": replicas = action_get("replicas") @@ -33,6 +34,7 @@ def create_pool(): service='admin', replicas=replicas, app_name=app_name, + percent_data=float(percent_data), ) replicated_pool.create() @@ -42,6 +44,7 @@ def create_pool(): erasure_code_profile=crush_profile_name, service='admin', app_name=app_name, + percent_data=float(percent_data), ) erasure_pool.create() else: diff --git a/ceph-mon/actions/crushmap-update b/ceph-mon/actions/crushmap-update index af530e0d..1c7ffb94 120000 --- a/ceph-mon/actions/crushmap-update +++ b/ceph-mon/actions/crushmap-update @@ -1 +1 @@ -crushmap-update.py \ No newline at end of file +crushmap_update.py \ No newline at end of file diff --git a/ceph-mon/actions/crushmap-update.py b/ceph-mon/actions/crushmap_update.py similarity index 100% rename from ceph-mon/actions/crushmap-update.py rename to ceph-mon/actions/crushmap_update.py diff --git a/ceph-mon/actions/delete-erasure-profile b/ceph-mon/actions/delete-erasure-profile index 719025e0..65b7c04f 120000 --- a/ceph-mon/actions/delete-erasure-profile +++ b/ceph-mon/actions/delete-erasure-profile @@ -1 +1 @@ -delete-erasure-profile.py \ No newline at end of file +delete_erasure_profile.py \ No newline at end of file diff --git a/ceph-mon/actions/delete-pool b/ceph-mon/actions/delete-pool index 8deb7a08..586a3ae6 120000 --- a/ceph-mon/actions/delete-pool +++ b/ceph-mon/actions/delete-pool @@ -1 +1 @@ -delete-pool.py \ No newline at end of file +delete_pool.py \ No newline at end of file diff --git a/ceph-mon/actions/delete-erasure-profile.py b/ceph-mon/actions/delete_erasure_profile.py similarity index 100% rename from ceph-mon/actions/delete-erasure-profile.py rename to ceph-mon/actions/delete_erasure_profile.py diff --git a/ceph-mon/actions/delete-pool.py b/ceph-mon/actions/delete_pool.py similarity index 57% rename from ceph-mon/actions/delete-pool.py rename to ceph-mon/actions/delete_pool.py index 62e73a6a..d05078da 100755 --- a/ceph-mon/actions/delete-pool.py +++ b/ceph-mon/actions/delete_pool.py @@ -15,28 +15,35 @@ # limitations under the License. import sys +import subprocess sys.path.append('hooks') -import rados -from ceph_ops import connect from charmhelpers.core.hookenv import action_get, log, action_fail +def set_mon_allow_pool_delete(delete=False): + subprocess.check_call([ + 'ceph', 'tell', 'mon.*', + 'injectargs', + '--mon-allow-pool-delete={}'.format('true' if delete else 'false') + ]) + + def remove_pool(): try: pool_name = action_get("name") - cluster = connect() - log("Deleting pool: {}".format(pool_name)) - cluster.delete_pool(str(pool_name)) # Convert from unicode - cluster.shutdown() - except (rados.IOError, - rados.ObjectNotFound, - rados.NoData, - rados.NoSpace, - rados.PermissionError) as e: + set_mon_allow_pool_delete(delete=True) + subprocess.check_call([ + 'ceph', 'osd', 'pool', 'delete', + pool_name, pool_name, + '--yes-i-really-really-mean-it', + ]) + except subprocess.CalledProcessError as e: log(e) - action_fail(e) + action_fail("Error deleting pool: {}".format(str(e))) + finally: + set_mon_allow_pool_delete(delete=False) if __name__ == '__main__': diff --git a/ceph-mon/actions/get-erasure-profile b/ceph-mon/actions/get-erasure-profile index ec29f9e9..97cea7a5 120000 --- a/ceph-mon/actions/get-erasure-profile +++ b/ceph-mon/actions/get-erasure-profile @@ -1 +1 @@ -get-erasure-profile.py \ No newline at end of file +get_erasure_profile.py \ No newline at end of file diff --git a/ceph-mon/actions/get-health b/ceph-mon/actions/get-health deleted file mode 100755 index 60e8a333..00000000 --- a/ceph-mon/actions/get-health +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/python - -from ceph_ops import get_health -from charmhelpers.core.hookenv import log, action_set, action_fail - -if __name__ == '__main__': - try: - action_set({'message': get_health()}) - except CalledProcessError as e: - log(e) - action_fail( - "ceph health failed with message: {}".format(str(e))) \ No newline at end of file diff --git a/ceph-mon/actions/get-health b/ceph-mon/actions/get-health new file mode 120000 index 00000000..9c8a8000 --- /dev/null +++ b/ceph-mon/actions/get-health @@ -0,0 +1 @@ +get_health.py \ No newline at end of file diff --git a/ceph-mon/actions/get-erasure-profile.py b/ceph-mon/actions/get_erasure_profile.py similarity index 100% rename from ceph-mon/actions/get-erasure-profile.py rename to ceph-mon/actions/get_erasure_profile.py diff --git a/ceph-mon/actions/get_health.py b/ceph-mon/actions/get_health.py new file mode 100755 index 00000000..d1e0da48 --- /dev/null +++ b/ceph-mon/actions/get_health.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from subprocess import CalledProcessError + +from ceph_ops import get_health +from charmhelpers.core.hookenv import log, action_set, action_fail + +if __name__ == '__main__': + try: + action_set({'message': get_health()}) + except CalledProcessError as e: + log(e) + action_fail( + "ceph health failed with message: {}".format(str(e))) diff --git a/ceph-mon/actions/list-erasure-profiles b/ceph-mon/actions/list-erasure-profiles index bcd2eca3..6cdaf358 120000 --- a/ceph-mon/actions/list-erasure-profiles +++ b/ceph-mon/actions/list-erasure-profiles @@ -1 +1 @@ -list-erasure-profiles.py \ No newline at end of file +list_erasure_profiles.py \ No newline at end of file diff --git a/ceph-mon/actions/list-pools b/ceph-mon/actions/list-pools index e2fa46dd..65e0c222 120000 --- a/ceph-mon/actions/list-pools +++ b/ceph-mon/actions/list-pools @@ -1 +1 @@ -list-pools.py \ No newline at end of file +list_pools.py \ No newline at end of file diff --git a/ceph-mon/actions/list-erasure-profiles.py b/ceph-mon/actions/list_erasure_profiles.py similarity index 100% rename from ceph-mon/actions/list-erasure-profiles.py rename to ceph-mon/actions/list_erasure_profiles.py diff --git a/ceph-mon/actions/list-pools.py b/ceph-mon/actions/list_pools.py similarity index 100% rename from ceph-mon/actions/list-pools.py rename to ceph-mon/actions/list_pools.py diff --git a/ceph-mon/actions/pool-get b/ceph-mon/actions/pool-get index 129b906d..ad4b9fa0 120000 --- a/ceph-mon/actions/pool-get +++ b/ceph-mon/actions/pool-get @@ -1 +1 @@ -pool-get.py \ No newline at end of file +pool_get.py \ No newline at end of file diff --git a/ceph-mon/actions/pool-set b/ceph-mon/actions/pool-set index 8327dcdf..9339f5e7 120000 --- a/ceph-mon/actions/pool-set +++ b/ceph-mon/actions/pool-set @@ -1 +1 @@ -pool-set.py \ No newline at end of file +pool_set.py \ No newline at end of file diff --git a/ceph-mon/actions/pool-statistics b/ceph-mon/actions/pool-statistics index 9d775f8f..dbf59233 120000 --- a/ceph-mon/actions/pool-statistics +++ b/ceph-mon/actions/pool-statistics @@ -1 +1 @@ -pool-statistics.py \ No newline at end of file +pool_statistics.py \ No newline at end of file diff --git a/ceph-mon/actions/pool-get.py b/ceph-mon/actions/pool_get.py similarity index 96% rename from ceph-mon/actions/pool-get.py rename to ceph-mon/actions/pool_get.py index c5315818..5073d8c3 100755 --- a/ceph-mon/actions/pool-get.py +++ b/ceph-mon/actions/pool_get.py @@ -22,7 +22,7 @@ from charmhelpers.core.hookenv import log, action_set, action_get, action_fail if __name__ == '__main__': - name = action_get('pool-name') + name = action_get('name') key = action_get('key') try: out = check_output(['ceph', '--id', 'admin', diff --git a/ceph-mon/actions/pool-set.py b/ceph-mon/actions/pool_set.py similarity index 97% rename from ceph-mon/actions/pool-set.py rename to ceph-mon/actions/pool_set.py index fa743624..51fb8e83 100755 --- a/ceph-mon/actions/pool-set.py +++ b/ceph-mon/actions/pool_set.py @@ -24,7 +24,7 @@ from ceph.broker import handle_set_pool_value if __name__ == '__main__': - name = action_get("pool-name") + name = action_get("name") key = action_get("key") value = action_get("value") request = {'name': name, diff --git a/ceph-mon/actions/pool-statistics.py b/ceph-mon/actions/pool_statistics.py similarity index 100% rename from ceph-mon/actions/pool-statistics.py rename to ceph-mon/actions/pool_statistics.py diff --git a/ceph-mon/actions/remove-cache-tier b/ceph-mon/actions/remove-cache-tier index 136c0f06..11090fd5 120000 --- a/ceph-mon/actions/remove-cache-tier +++ b/ceph-mon/actions/remove-cache-tier @@ -1 +1 @@ -remove-cache-tier.py \ No newline at end of file +remove_cache_tier.py \ No newline at end of file diff --git a/ceph-mon/actions/remove-pool-snapshot b/ceph-mon/actions/remove-pool-snapshot index c4cc17b6..21fd5ae0 120000 --- a/ceph-mon/actions/remove-pool-snapshot +++ b/ceph-mon/actions/remove-pool-snapshot @@ -1 +1 @@ -remove-pool-snapshot.py \ No newline at end of file +remove_pool_snapshot.py \ No newline at end of file diff --git a/ceph-mon/actions/remove-cache-tier.py b/ceph-mon/actions/remove_cache_tier.py similarity index 100% rename from ceph-mon/actions/remove-cache-tier.py rename to ceph-mon/actions/remove_cache_tier.py diff --git a/ceph-mon/actions/remove-pool-snapshot.py b/ceph-mon/actions/remove_pool_snapshot.py similarity index 97% rename from ceph-mon/actions/remove-pool-snapshot.py rename to ceph-mon/actions/remove_pool_snapshot.py index d535f370..b451b99e 100755 --- a/ceph-mon/actions/remove-pool-snapshot.py +++ b/ceph-mon/actions/remove_pool_snapshot.py @@ -22,7 +22,7 @@ from charmhelpers.contrib.storage.linux.ceph import remove_pool_snapshot if __name__ == '__main__': - name = action_get("pool-name") + name = action_get("name") snapname = action_get("snapshot-name") try: remove_pool_snapshot(service='admin', diff --git a/ceph-mon/actions/rename-pool b/ceph-mon/actions/rename-pool index ce3ff8f5..37007c6f 120000 --- a/ceph-mon/actions/rename-pool +++ b/ceph-mon/actions/rename-pool @@ -1 +1 @@ -rename-pool.py \ No newline at end of file +rename_pool.py \ No newline at end of file diff --git a/ceph-mon/actions/rename-pool.py b/ceph-mon/actions/rename_pool.py similarity index 96% rename from ceph-mon/actions/rename-pool.py rename to ceph-mon/actions/rename_pool.py index 2d769c1d..ba7f7ac2 100755 --- a/ceph-mon/actions/rename-pool.py +++ b/ceph-mon/actions/rename_pool.py @@ -22,7 +22,7 @@ from charmhelpers.contrib.storage.linux.ceph import rename_pool if __name__ == '__main__': - name = action_get("pool-name") + name = action_get("name") new_name = action_get("new-name") try: rename_pool(service='admin', old_name=name, new_name=new_name) diff --git a/ceph-mon/actions/set-pool-max-bytes b/ceph-mon/actions/set-pool-max-bytes index d633c0c2..f65ca1e0 120000 --- a/ceph-mon/actions/set-pool-max-bytes +++ b/ceph-mon/actions/set-pool-max-bytes @@ -1 +1 @@ -set-pool-max-bytes.py \ No newline at end of file +set_pool_max_bytes.py \ No newline at end of file diff --git a/ceph-mon/actions/set-pool-max-bytes.py b/ceph-mon/actions/set_pool_max_bytes.py similarity index 96% rename from ceph-mon/actions/set-pool-max-bytes.py rename to ceph-mon/actions/set_pool_max_bytes.py index 2d549923..d5893c73 100755 --- a/ceph-mon/actions/set-pool-max-bytes.py +++ b/ceph-mon/actions/set_pool_max_bytes.py @@ -23,7 +23,7 @@ if __name__ == '__main__': max_bytes = action_get("max") - name = action_get("pool-name") + name = action_get("name") try: set_pool_quota(service='admin', pool_name=name, max_bytes=max_bytes) except CalledProcessError as e: diff --git a/ceph-mon/actions/show-disk-free b/ceph-mon/actions/show-disk-free index 85abbc3c..a50016bc 120000 --- a/ceph-mon/actions/show-disk-free +++ b/ceph-mon/actions/show-disk-free @@ -1 +1 @@ -show-disk-free.py \ No newline at end of file +show_disk_free.py \ No newline at end of file diff --git a/ceph-mon/actions/show-disk-free.py b/ceph-mon/actions/show_disk_free.py similarity index 100% rename from ceph-mon/actions/show-disk-free.py rename to ceph-mon/actions/show_disk_free.py diff --git a/ceph-mon/actions/snapshot-pool b/ceph-mon/actions/snapshot-pool index 549e3459..dd9c8578 120000 --- a/ceph-mon/actions/snapshot-pool +++ b/ceph-mon/actions/snapshot-pool @@ -1 +1 @@ -snapshot-pool.py \ No newline at end of file +snapshot_pool.py \ No newline at end of file diff --git a/ceph-mon/actions/snapshot-pool.py b/ceph-mon/actions/snapshot_pool.py similarity index 97% rename from ceph-mon/actions/snapshot-pool.py rename to ceph-mon/actions/snapshot_pool.py index fdd007ce..a147b755 100755 --- a/ceph-mon/actions/snapshot-pool.py +++ b/ceph-mon/actions/snapshot_pool.py @@ -22,7 +22,7 @@ from charmhelpers.contrib.storage.linux.ceph import snapshot_pool if __name__ == '__main__': - name = action_get("pool-name") + name = action_get("name") snapname = action_get("snapshot-name") try: snapshot_pool(service='admin', diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index 75ab3b33..c127d09b 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -15,4 +15,4 @@ include: - utils - contrib.charmsupport - contrib.hardening|inc=* - - contrib.python.packages + - fetch.python.packages diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index ea1fd8f3..53fa6506 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -88,14 +88,14 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port, validation_function = self.validate_v2_endpoint_data xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_endpoint_data - expected = { - 'id': expected['id'], - 'region': expected['region'], - 'region_id': 'RegionOne', - 'url': self.valid_url, - 'interface': self.not_null, - 'service_id': expected['service_id']} + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} return validation_function(endpoints, admin_port, internal_port, public_port, expected) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 8a203754..78a339f6 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -1427,11 +1427,11 @@ def __call__(self): ctxt = {} if is_relation_made('zeromq-configuration', 'host'): for rid in relation_ids('zeromq-configuration'): - for unit in related_units(rid): - ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) - ctxt['zmq_host'] = relation_get('host', unit, rid) - ctxt['zmq_redis_address'] = relation_get( - 'zmq_redis_address', unit, rid) + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + ctxt['zmq_redis_address'] = relation_get( + 'zmq_redis_address', unit, rid) return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py index a623315d..050f8af5 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py @@ -183,7 +183,7 @@ class OSConfigRenderer(object): /tmp/templates/grizzly/api-paste.ini /tmp/templates/havana/api-paste.ini - Since it was registered with the grizzly release, it first seraches + Since it was registered with the grizzly release, it first searches the grizzly directory for nova.conf, then the templates dir. When writing api-paste.ini, it will find the template in the grizzly diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 4e432a25..86b011b7 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -83,7 +83,8 @@ add_source as fetch_add_source, SourceConfigError, GPGKeyError, - get_upstream_version + get_upstream_version, + filter_missing_packages ) from charmhelpers.fetch.snap import ( @@ -309,6 +310,15 @@ def error_out(msg): sys.exit(1) +def get_installed_semantic_versioned_packages(): + '''Get a list of installed packages which have OpenStack semantic versioning + + :returns List of installed packages + :rtype: [pkg1, pkg2, ...] + ''' + return filter_missing_packages(PACKAGE_CODENAMES.keys()) + + def get_os_codename_install_source(src): '''Derive OpenStack release codename from a given installation source.''' ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] @@ -972,7 +982,9 @@ def _ows_check_charm_func(state, message, charm_func_with_configs): """ if charm_func_with_configs: charm_state, charm_message = charm_func_with_configs() - if charm_state != 'active' and charm_state != 'unknown': + if (charm_state != 'active' and + charm_state != 'unknown' and + charm_state is not None): state = workload_state_compare(state, charm_state) if message: charm_message = charm_message.replace("Incomplete relations: ", @@ -1241,7 +1253,7 @@ def remote_restart(rel_name, remote_service=None): def check_actually_paused(services=None, ports=None): - """Check that services listed in the services object and and ports + """Check that services listed in the services object and ports are actually closed (not listened to), to verify that the unit is properly paused. diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 76828201..568726bb 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -59,6 +59,7 @@ service_stop, service_running, umount, + cmp_pkgrevno, ) from charmhelpers.fetch import ( apt_install, @@ -178,7 +179,6 @@ def remove_cache_tier(self, cache_pool): """ # read-only is easy, writeback is much harder mode = get_cache_mode(self.service, cache_pool) - version = ceph_version() if mode == 'readonly': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) @@ -186,7 +186,7 @@ def remove_cache_tier(self, cache_pool): elif mode == 'writeback': pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'] - if version >= '10.1': + if cmp_pkgrevno('ceph', '10.1') >= 0: # Jewel added a mandatory flag pool_forward_cmd.append('--yes-i-really-mean-it') @@ -196,7 +196,8 @@ def remove_cache_tier(self, cache_pool): check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) - def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): + def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, + device_class=None): """Return the number of placement groups to use when creating the pool. Returns the number of placement groups which should be specified when @@ -229,6 +230,9 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): increased. NOTE: the default is primarily to handle the scenario where related charms requiring pools has not been upgraded to include an update to indicate their relative usage of the pools. + :param device_class: str. class of storage to use for basis of pgs + calculation; ceph supports nvme, ssd and hdd by default based + on presence of devices of each type in the deployment. :return: int. The number of pgs to use. """ @@ -243,17 +247,20 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): # If the expected-osd-count is specified, then use the max between # the expected-osd-count and the actual osd_count - osd_list = get_osds(self.service) + osd_list = get_osds(self.service, device_class) expected = config('expected-osd-count') or 0 if osd_list: - osd_count = max(expected, len(osd_list)) + if device_class: + osd_count = len(osd_list) + else: + osd_count = max(expected, len(osd_list)) # Log a message to provide some insight if the calculations claim # to be off because someone is setting the expected count and # there are more OSDs in reality. Try to make a proper guess # based upon the cluster itself. - if expected and osd_count != expected: + if not device_class and expected and osd_count != expected: log("Found more OSDs than provided expected count. " "Using the actual count instead", INFO) elif expected: @@ -626,7 +633,8 @@ def remove_erasure_profile(service, profile_name): def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', data_chunks=2, coding_chunks=1, - locality=None, durability_estimator=None): + locality=None, durability_estimator=None, + device_class=None): """ Create a new erasure code profile if one does not already exist for it. Updates the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ @@ -640,10 +648,9 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' :param coding_chunks: int :param locality: int :param durability_estimator: int + :param device_class: six.string_types :return: None. Can raise CalledProcessError """ - version = ceph_version() - # Ensure this failure_domain is allowed by Ceph validator(failure_domain, six.string_types, ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) @@ -654,12 +661,20 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' if locality is not None and durability_estimator is not None: raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0 # failure_domain changed in luminous - if version and version >= '12.0.0': + if luminous_or_later: cmd.append('crush-failure-domain=' + failure_domain) else: cmd.append('ruleset-failure-domain=' + failure_domain) + # device class new in luminous + if luminous_or_later and device_class: + cmd.append('crush-device-class={}'.format(device_class)) + else: + log('Skipping device class configuration (ceph < 12.0.0)', + level=DEBUG) + # Add plugin specific information if locality is not None: # For local erasure codes @@ -744,20 +759,26 @@ def pool_exists(service, name): return name in out.split() -def get_osds(service): +def get_osds(service, device_class=None): """Return a list of all Ceph Object Storage Daemons currently in the - cluster. + cluster (optionally filtered by storage device class). + + :param device_class: Class of storage device for OSD's + :type device_class: str """ - version = ceph_version() - if version and version >= '0.56': + luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0 + if luminous_or_later and device_class: + out = check_output(['ceph', '--id', service, + 'osd', 'crush', 'class', + 'ls-osd', device_class, + '--format=json']) + else: out = check_output(['ceph', '--id', service, 'osd', 'ls', '--format=json']) - if six.PY3: - out = out.decode('UTF-8') - return json.loads(out) - - return None + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) def install(): @@ -811,7 +832,7 @@ def set_app_name_for_pool(client, pool, name): :raises: CalledProcessError if ceph call fails """ - if ceph_version() >= '12.0.0': + if cmp_pkgrevno('ceph', '12.0.0') >= 0: cmd = ['ceph', '--id', client, 'osd', 'pool', 'application', 'enable', pool, name] check_call(cmd) @@ -856,12 +877,22 @@ def _keyring_path(service): return KEYRING.format(service) -def create_keyring(service, key): - """Create a new Ceph keyring containing key.""" +def add_key(service, key): + """ + Add a key to a keyring. + + Creates the keyring if it doesn't already exist. + + Logs and returns if the key is already in the keyring. + """ keyring = _keyring_path(service) if os.path.exists(keyring): - log('Ceph keyring exists at %s.' % keyring, level=WARNING) - return + with open(keyring, 'r') as ring: + if key in ring.read(): + log('Ceph keyring exists at %s and has not changed.' % keyring, + level=DEBUG) + return + log('Updating existing keyring %s.' % keyring, level=DEBUG) cmd = ['ceph-authtool', keyring, '--create-keyring', '--name=client.{}'.format(service), '--add-key={}'.format(key)] @@ -869,6 +900,11 @@ def create_keyring(service, key): log('Created new ceph keyring at %s.' % keyring, level=DEBUG) +def create_keyring(service, key): + """Deprecated. Please use the more accurately named 'add_key'""" + return add_key(service, key) + + def delete_keyring(service): """Delete an existing Ceph keyring.""" keyring = _keyring_path(service) @@ -905,7 +941,7 @@ def get_ceph_nodes(relation='ceph'): def configure(service, key, auth, use_syslog): """Perform basic configuration of Ceph.""" - create_keyring(service, key) + add_key(service, key) create_key_file(service, key) hosts = get_ceph_nodes() with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: @@ -1068,7 +1104,7 @@ def ensure_ceph_keyring(service, user=None, group=None, if not key: return False - create_keyring(service=service, key=key) + add_key(service=service, key=key) keyring = _keyring_path(service) if user and group: check_call(['chown', '%s.%s' % (user, group), keyring]) @@ -1076,22 +1112,6 @@ def ensure_ceph_keyring(service, user=None, group=None, return True -def ceph_version(): - """Retrieve the local version of ceph.""" - if os.path.exists('/usr/bin/ceph'): - cmd = ['ceph', '-v'] - output = check_output(cmd) - if six.PY3: - output = output.decode('UTF-8') - output = output.split() - if len(output) > 3: - return output[2] - else: - return None - else: - return None - - class CephBrokerRq(object): """Ceph broker request. diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 79953a44..47c1fc35 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -46,6 +46,7 @@ lsb_release, cmp_pkgrevno, CompareHostReleases, + get_distrib_codename, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( # NOQA:F401 diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py index a6d375af..d7e920eb 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -72,6 +72,14 @@ def lsb_release(): return d +def get_distrib_codename(): + """Return the codename of the distribution + :returns: The codename + :rtype: str + """ + return lsb_release()['DISTRIB_CODENAME'].lower() + + def cmp_pkgrevno(package, revno, pkgcache=None): """Compare supplied revno with the revno of the installed package. diff --git a/ceph-mon/hooks/charmhelpers/contrib/python/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/python/__init__.py similarity index 92% rename from ceph-mon/hooks/charmhelpers/contrib/python/__init__.py rename to ceph-mon/hooks/charmhelpers/fetch/python/__init__.py index d7567b86..bff99dc9 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/python/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/python/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2019 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/ceph-mon/hooks/charmhelpers/fetch/python/debug.py b/ceph-mon/hooks/charmhelpers/fetch/python/debug.py new file mode 100644 index 00000000..757135ee --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/fetch/python/debug.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import atexit +import sys + +from charmhelpers.fetch.python.rpdb import Rpdb +from charmhelpers.core.hookenv import ( + open_port, + close_port, + ERROR, + log +) + +__author__ = "Jorge Niedbalski " + +DEFAULT_ADDR = "0.0.0.0" +DEFAULT_PORT = 4444 + + +def _error(message): + log(message, level=ERROR) + + +def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT): + """ + Set a trace point using the remote debugger + """ + atexit.register(close_port, port) + try: + log("Starting a remote python debugger session on %s:%s" % (addr, + port)) + open_port(port) + debugger = Rpdb(addr=addr, port=port) + debugger.set_trace(sys._getframe().f_back) + except Exception: + _error("Cannot start a remote debug session on %s:%s" % (addr, + port)) diff --git a/ceph-mon/hooks/charmhelpers/contrib/python/packages.py b/ceph-mon/hooks/charmhelpers/fetch/python/packages.py similarity index 100% rename from ceph-mon/hooks/charmhelpers/contrib/python/packages.py rename to ceph-mon/hooks/charmhelpers/fetch/python/packages.py diff --git a/ceph-mon/hooks/charmhelpers/fetch/python/rpdb.py b/ceph-mon/hooks/charmhelpers/fetch/python/rpdb.py new file mode 100644 index 00000000..9b31610c --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/fetch/python/rpdb.py @@ -0,0 +1,56 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Remote Python Debugger (pdb wrapper).""" + +import pdb +import socket +import sys + +__author__ = "Bertrand Janin " +__version__ = "0.1.3" + + +class Rpdb(pdb.Pdb): + + def __init__(self, addr="127.0.0.1", port=4444): + """Initialize the socket and initialize pdb.""" + + # Backup stdin and stdout before replacing them by the socket handle + self.old_stdout = sys.stdout + self.old_stdin = sys.stdin + + # Open a 'reusable' socket to let the webapp reload on the same port + self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + self.skt.bind((addr, port)) + self.skt.listen(1) + (clientsocket, address) = self.skt.accept() + handle = clientsocket.makefile('rw') + pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle) + sys.stdout = sys.stdin = handle + + def shutdown(self): + """Revert stdin and stdout, close the socket.""" + sys.stdout = self.old_stdout + sys.stdin = self.old_stdin + self.skt.close() + self.set_continue() + + def do_continue(self, arg): + """Stop all operation on ``continue``.""" + self.shutdown() + return 1 + + do_EOF = do_quit = do_exit = do_c = do_cont = do_continue diff --git a/ceph-mon/hooks/charmhelpers/fetch/python/version.py b/ceph-mon/hooks/charmhelpers/fetch/python/version.py new file mode 100644 index 00000000..3eb42103 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/fetch/python/version.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +__author__ = "Jorge Niedbalski " + + +def current_version(): + """Current system python version""" + return sys.version_info + + +def current_version_string(): + """Current system python version as string major.minor.micro""" + return "{0}.{1}.{2}".format(sys.version_info.major, + sys.version_info.minor, + sys.version_info.micro) diff --git a/ceph-mon/lib/ceph/broker.py b/ceph-mon/lib/ceph/broker.py index 3e857d21..0d5a7e80 100644 --- a/ceph-mon/lib/ceph/broker.py +++ b/ceph-mon/lib/ceph/broker.py @@ -85,6 +85,7 @@ "compression_mode": [str, ["none", "passive", "aggressive", "force"]], "compression_algorithm": [str, ["lz4", "snappy", "zlib", "zstd"]], "compression_required_ratio": [float, [0.0, 1.0]], + "crush_rule": [str], } CEPH_BUCKET_TYPES = [ diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index a1cfbdc6..2256c666 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -935,6 +935,11 @@ def start_osds(devices): subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) +def udevadm_settle(): + cmd = ['udevadm', 'settle'] + subprocess.call(cmd) + + def rescan_osd_devices(): cmd = [ 'udevadm', 'trigger', @@ -943,8 +948,7 @@ def rescan_osd_devices(): subprocess.call(cmd) - cmd = ['udevadm', 'settle'] - subprocess.call(cmd) + udevadm_settle() _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" @@ -1128,6 +1132,15 @@ def get_mds_bootstrap_key(): ]) ]) +rbd_mirror_caps = collections.OrderedDict([ + ('mon', ['profile rbd']), + ('osd', ['profile rbd']), +]) + + +def get_rbd_mirror_key(name): + return get_named_key(name=name, caps=rbd_mirror_caps) + def create_named_keyring(entity, name, caps=None): caps = caps or _default_caps @@ -1442,77 +1455,82 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, db = kv() osd_devices = db.get('osd-devices', []) - if dev in osd_devices: - log('Device {} already processed by charm,' - ' skipping'.format(dev)) - return + try: + if dev in osd_devices: + log('Device {} already processed by charm,' + ' skipping'.format(dev)) + return - if not os.path.exists(dev): - log('Path {} does not exist - bailing'.format(dev)) - return + if not os.path.exists(dev): + log('Path {} does not exist - bailing'.format(dev)) + return - if not is_block_device(dev): - log('Path {} is not a block device - bailing'.format(dev)) - return + if not is_block_device(dev): + log('Path {} is not a block device - bailing'.format(dev)) + return - if is_osd_disk(dev): - log('Looks like {} is already an' - ' OSD data or journal, skipping.'.format(dev)) - return + if is_osd_disk(dev): + log('Looks like {} is already an' + ' OSD data or journal, skipping.'.format(dev)) + if is_device_mounted(dev): + osd_devices.append(dev) + return - if is_device_mounted(dev): - log('Looks like {} is in use, skipping.'.format(dev)) - return + if is_device_mounted(dev): + log('Looks like {} is in use, skipping.'.format(dev)) + return - if is_active_bluestore_device(dev): - log('{} is in use as an active bluestore block device,' - ' skipping.'.format(dev)) - return + if is_active_bluestore_device(dev): + log('{} is in use as an active bluestore block device,' + ' skipping.'.format(dev)) + osd_devices.append(dev) + return - if is_mapped_luks_device(dev): - log('{} is a mapped LUKS device,' - ' skipping.'.format(dev)) - return + if is_mapped_luks_device(dev): + log('{} is a mapped LUKS device,' + ' skipping.'.format(dev)) + return - if cmp_pkgrevno('ceph', '12.2.4') >= 0: - cmd = _ceph_volume(dev, - osd_journal, - encrypt, - bluestore, - key_manager) - else: - cmd = _ceph_disk(dev, - osd_format, - osd_journal, - encrypt, - bluestore) + if cmp_pkgrevno('ceph', '12.2.4') >= 0: + cmd = _ceph_volume(dev, + osd_journal, + encrypt, + bluestore, + key_manager) + else: + cmd = _ceph_disk(dev, + osd_format, + osd_journal, + encrypt, + bluestore) - try: - status_set('maintenance', 'Initializing device {}'.format(dev)) - log("osdize cmd: {}".format(cmd)) - subprocess.check_call(cmd) - except subprocess.CalledProcessError: try: - lsblk_output = subprocess.check_output( - ['lsblk', '-P']).decode('UTF-8') - except subprocess.CalledProcessError as e: - log("Couldn't get lsblk output: {}".format(e), ERROR) - if ignore_errors: - log('Unable to initialize device: {}'.format(dev), WARNING) - if lsblk_output: - log('lsblk output: {}'.format(lsblk_output), DEBUG) - else: - log('Unable to initialize device: {}'.format(dev), ERROR) - if lsblk_output: - log('lsblk output: {}'.format(lsblk_output), WARNING) - raise + status_set('maintenance', 'Initializing device {}'.format(dev)) + log("osdize cmd: {}".format(cmd)) + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + try: + lsblk_output = subprocess.check_output( + ['lsblk', '-P']).decode('UTF-8') + except subprocess.CalledProcessError as e: + log("Couldn't get lsblk output: {}".format(e), ERROR) + if ignore_errors: + log('Unable to initialize device: {}'.format(dev), WARNING) + if lsblk_output: + log('lsblk output: {}'.format(lsblk_output), DEBUG) + else: + log('Unable to initialize device: {}'.format(dev), ERROR) + if lsblk_output: + log('lsblk output: {}'.format(lsblk_output), WARNING) + raise - # NOTE: Record processing of device only on success to ensure that - # the charm only tries to initialize a device of OSD usage - # once during its lifetime. - osd_devices.append(dev) - db.set('osd-devices', osd_devices) - db.flush() + # NOTE: Record processing of device only on success to ensure that + # the charm only tries to initialize a device of OSD usage + # once during its lifetime. + osd_devices.append(dev) + finally: + db.set('osd-devices', osd_devices) + db.flush() def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): @@ -1814,6 +1832,13 @@ def _initialize_disk(dev, dev_uuid, encrypt=False, '--uuid', dev_uuid, dev, ]) + subprocess.check_call([ + 'dd', + 'if=/dev/zero', + 'of={}'.format(dm_crypt), + 'bs=512', + 'count=1', + ]) if use_vaultlocker: return dm_crypt diff --git a/ceph-mon/unit_tests/test_ceph_actions.py b/ceph-mon/unit_tests/test_ceph_actions.py new file mode 100644 index 00000000..c54830fa --- /dev/null +++ b/ceph-mon/unit_tests/test_ceph_actions.py @@ -0,0 +1,127 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +import subprocess + +import test_utils +import create_crush_rule +import copy_pool + + +class CopyPoolTestCase(test_utils.CharmTestCase): + + TO_PATCH = [ + 'hookenv', + ] + + def setUp(self): + super(CopyPoolTestCase, self).setUp( + copy_pool, + self.TO_PATCH + ) + + @mock.patch.object(create_crush_rule.subprocess, 'check_call') + def test_copy_pool(self, mock_check_call): + _action_data = { + 'source': 'source-pool', + 'target': 'target-pool', + } + self.hookenv.action_get.side_effect = lambda k: _action_data.get(k) + copy_pool.copy_pool() + mock_check_call.assert_called_with([ + 'rados', 'cppool', + 'source-pool', 'target-pool', + ]) + + @mock.patch.object(create_crush_rule.subprocess, 'check_call') + def test_copy_pool_failed(self, mock_check_call): + _action_data = { + 'source': 'source-pool', + 'target': 'target-pool', + } + self.hookenv.action_get.side_effect = lambda k: _action_data.get(k) + mock_check_call.side_effect = subprocess.CalledProcessError(1, 'rados') + copy_pool.copy_pool() + mock_check_call.assert_called_with([ + 'rados', 'cppool', + 'source-pool', 'target-pool', + ]) + self.hookenv.action_fail.assert_called_once_with(mock.ANY) + + +class CreateCrushRuleTestCase(test_utils.CharmTestCase): + + TO_PATCH = [ + 'hookenv', + ] + + def setUp(self): + super(CreateCrushRuleTestCase, self).setUp( + create_crush_rule, + self.TO_PATCH + ) + + @mock.patch.object(create_crush_rule.subprocess, 'check_call') + def test_create_crush_rule(self, mock_check_call): + _action_data = { + 'name': 'replicated_nvme', + 'failure-domain': 'host', + 'device-class': 'nvme', + } + self.hookenv.action_get.side_effect = lambda k: _action_data.get(k) + create_crush_rule.create_crush_rule() + mock_check_call.assert_called_with([ + 'ceph', 'osd', 'crush', 'rule', + 'create-replicated', + 'replicated_nvme', + 'default', + 'host', + 'nvme', + ]) + + @mock.patch.object(create_crush_rule.subprocess, 'check_call') + def test_create_crush_rule_no_class(self, mock_check_call): + _action_data = { + 'name': 'replicated_whoknows', + 'failure-domain': 'disk', + } + self.hookenv.action_get.side_effect = lambda k: _action_data.get(k) + create_crush_rule.create_crush_rule() + mock_check_call.assert_called_with([ + 'ceph', 'osd', 'crush', 'rule', + 'create-replicated', + 'replicated_whoknows', + 'default', + 'disk', + ]) + + @mock.patch.object(create_crush_rule.subprocess, 'check_call') + def test_create_crush_rule_failed(self, mock_check_call): + _action_data = { + 'name': 'replicated_nvme', + 'failure-domain': 'host', + 'device-class': 'nvme', + } + self.hookenv.action_get.side_effect = lambda k: _action_data.get(k) + mock_check_call.side_effect = subprocess.CalledProcessError(1, 'test') + create_crush_rule.create_crush_rule() + mock_check_call.assert_called_with([ + 'ceph', 'osd', 'crush', 'rule', + 'create-replicated', + 'replicated_nvme', + 'default', + 'host', + 'nvme', + ]) + self.hookenv.action_fail.assert_called_once_with(mock.ANY) From f9f92175a2093c60f3895c67b62fecf7899423ed Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 12 Feb 2019 15:55:30 -0800 Subject: [PATCH 1670/2699] Update charm-helpers-hooks.yaml and sync ch Using the new version of the sync tool which removes the charmhelpers directory before syncing, run charm helpers sync to find any unexpected missing dependencies. Change-Id: I634c89e87918f56a8db89bb888c00b9032c69a17 --- ceph-mon/charm-helpers-hooks.yaml | 2 +- ceph-mon/hooks/charmhelpers/contrib/python.py | 21 +++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/python.py diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index c127d09b..7d151fd7 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -15,4 +15,4 @@ include: - utils - contrib.charmsupport - contrib.hardening|inc=* - - fetch.python.packages + - fetch.python diff --git a/ceph-mon/hooks/charmhelpers/contrib/python.py b/ceph-mon/hooks/charmhelpers/contrib/python.py new file mode 100644 index 00000000..84cba8c4 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/python.py @@ -0,0 +1,21 @@ +# Copyright 2014-2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +# deprecated aliases for backwards compatibility +from charmhelpers.fetch.python import debug # noqa +from charmhelpers.fetch.python import packages # noqa +from charmhelpers.fetch.python import rpdb # noqa +from charmhelpers.fetch.python import version # noqa From 42b5793a0fa59e0d702e8dce0aa720b6fdaa3116 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 20 Feb 2019 06:50:29 +0100 Subject: [PATCH 1671/2699] Set appropriate application tag for pools created Use cases are emerging for the Ceph pool application tags. Let's set appropriate name for the pools created for RadosGW Reference: http://docs.ceph.com/docs/master/rados/operations/pools/#associate-pool-to-application Sync charm-helpers. Change-Id: I5c944d806ef458a82234dcc413cdd5ba34be7c18 --- ceph-radosgw/hooks/ceph_rgw.py | 10 ++- .../contrib/storage/linux/ceph.py | 87 +++++++++++-------- ceph-radosgw/unit_tests/test_ceph.py | 60 ++++++------- 3 files changed, 86 insertions(+), 71 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_rgw.py b/ceph-radosgw/hooks/ceph_rgw.py index 3af951fd..dd9304e6 100644 --- a/ceph-radosgw/hooks/ceph_rgw.py +++ b/ceph-radosgw/hooks/ceph_rgw.py @@ -31,6 +31,7 @@ CEPH_DIR = '/etc/ceph' CEPH_RADOSGW_DIR = '/var/lib/ceph/radosgw' _radosgw_keyring = "keyring.rados.gateway" +CEPH_POOL_APP_NAME = 'rgw' def import_radosgw_key(key, name=None): @@ -99,10 +100,12 @@ def _add_light_pool(rq, pool, pg_num, prefix=None): pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) if pg_num > 0: rq.add_op_create_pool(name=pool, replica_count=replicas, - pg_num=pg_num, group='objects') + pg_num=pg_num, group='objects', + app_name=CEPH_POOL_APP_NAME) else: rq.add_op_create_pool(name=pool, replica_count=replicas, - weight=w, group='objects') + weight=w, group='objects', + app_name=CEPH_POOL_APP_NAME) from apt import apt_pkg @@ -121,7 +124,8 @@ def _add_light_pool(rq, pool, pg_num, prefix=None): for pool in heavy: pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) rq.add_op_create_pool(name=pool, replica_count=replicas, - weight=bucket_weight, group='objects') + weight=bucket_weight, group='objects', + app_name=CEPH_POOL_APP_NAME) # NOTE: we want these pools to have a smaller pg_num/pgp_num than the # others since they are not expected to contain as much data diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 63c93044..22aa978b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -59,6 +59,7 @@ service_stop, service_running, umount, + cmp_pkgrevno, ) from charmhelpers.fetch import ( apt_install, @@ -178,7 +179,6 @@ def remove_cache_tier(self, cache_pool): """ # read-only is easy, writeback is much harder mode = get_cache_mode(self.service, cache_pool) - version = ceph_version() if mode == 'readonly': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) @@ -186,7 +186,7 @@ def remove_cache_tier(self, cache_pool): elif mode == 'writeback': pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'] - if version >= '10.1': + if cmp_pkgrevno('ceph', '10.1') >= 0: # Jewel added a mandatory flag pool_forward_cmd.append('--yes-i-really-mean-it') @@ -196,7 +196,8 @@ def remove_cache_tier(self, cache_pool): check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) - def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): + def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, + device_class=None): """Return the number of placement groups to use when creating the pool. Returns the number of placement groups which should be specified when @@ -229,6 +230,9 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): increased. NOTE: the default is primarily to handle the scenario where related charms requiring pools has not been upgraded to include an update to indicate their relative usage of the pools. + :param device_class: str. class of storage to use for basis of pgs + calculation; ceph supports nvme, ssd and hdd by default based + on presence of devices of each type in the deployment. :return: int. The number of pgs to use. """ @@ -243,17 +247,20 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): # If the expected-osd-count is specified, then use the max between # the expected-osd-count and the actual osd_count - osd_list = get_osds(self.service) + osd_list = get_osds(self.service, device_class) expected = config('expected-osd-count') or 0 if osd_list: - osd_count = max(expected, len(osd_list)) + if device_class: + osd_count = len(osd_list) + else: + osd_count = max(expected, len(osd_list)) # Log a message to provide some insight if the calculations claim # to be off because someone is setting the expected count and # there are more OSDs in reality. Try to make a proper guess # based upon the cluster itself. - if expected and osd_count != expected: + if not device_class and expected and osd_count != expected: log("Found more OSDs than provided expected count. " "Using the actual count instead", INFO) elif expected: @@ -626,7 +633,8 @@ def remove_erasure_profile(service, profile_name): def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', data_chunks=2, coding_chunks=1, - locality=None, durability_estimator=None): + locality=None, durability_estimator=None, + device_class=None): """ Create a new erasure code profile if one does not already exist for it. Updates the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ @@ -640,10 +648,9 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' :param coding_chunks: int :param locality: int :param durability_estimator: int + :param device_class: six.string_types :return: None. Can raise CalledProcessError """ - version = ceph_version() - # Ensure this failure_domain is allowed by Ceph validator(failure_domain, six.string_types, ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) @@ -654,12 +661,20 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' if locality is not None and durability_estimator is not None: raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0 # failure_domain changed in luminous - if version and version >= '12.0.0': + if luminous_or_later: cmd.append('crush-failure-domain=' + failure_domain) else: cmd.append('ruleset-failure-domain=' + failure_domain) + # device class new in luminous + if luminous_or_later and device_class: + cmd.append('crush-device-class={}'.format(device_class)) + else: + log('Skipping device class configuration (ceph < 12.0.0)', + level=DEBUG) + # Add plugin specific information if locality is not None: # For local erasure codes @@ -744,20 +759,26 @@ def pool_exists(service, name): return name in out.split() -def get_osds(service): +def get_osds(service, device_class=None): """Return a list of all Ceph Object Storage Daemons currently in the - cluster. + cluster (optionally filtered by storage device class). + + :param device_class: Class of storage device for OSD's + :type device_class: str """ - version = ceph_version() - if version and version >= '0.56': + luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0 + if luminous_or_later and device_class: + out = check_output(['ceph', '--id', service, + 'osd', 'crush', 'class', + 'ls-osd', device_class, + '--format=json']) + else: out = check_output(['ceph', '--id', service, 'osd', 'ls', '--format=json']) - if six.PY3: - out = out.decode('UTF-8') - return json.loads(out) - - return None + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) def install(): @@ -811,7 +832,7 @@ def set_app_name_for_pool(client, pool, name): :raises: CalledProcessError if ceph call fails """ - if ceph_version() >= '12.0.0': + if cmp_pkgrevno('ceph', '12.0.0') >= 0: cmd = ['ceph', '--id', client, 'osd', 'pool', 'application', 'enable', pool, name] check_call(cmd) @@ -1091,22 +1112,6 @@ def ensure_ceph_keyring(service, user=None, group=None, return True -def ceph_version(): - """Retrieve the local version of ceph.""" - if os.path.exists('/usr/bin/ceph'): - cmd = ['ceph', '-v'] - output = check_output(cmd) - if six.PY3: - output = output.decode('UTF-8') - output = output.split() - if len(output) > 3: - return output[2] - else: - return None - else: - return None - - class CephBrokerRq(object): """Ceph broker request. @@ -1147,7 +1152,8 @@ def add_op_request_access_to_group(self, name, namespace=None, 'object-prefix-permissions': object_prefix_permissions}) def add_op_create_pool(self, name, replica_count=3, pg_num=None, - weight=None, group=None, namespace=None): + weight=None, group=None, namespace=None, + app_name=None): """Adds an operation to create a pool. @param pg_num setting: optional setting. If not provided, this value @@ -1155,6 +1161,11 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, cluster at the time of creation. Note that, if provided, this value will be capped at the current available maximum. @param weight: the percentage of data the pool makes up + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str """ if pg_num and weight: raise ValueError('pg_num and weight are mutually exclusive') @@ -1162,7 +1173,7 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count, 'pg_num': pg_num, 'weight': weight, 'group': group, - 'group-namespace': namespace}) + 'group-namespace': namespace, 'app-name': app_name}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index a6b6e231..de5550d8 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -67,35 +67,35 @@ def test_create_rgw_pools_rq_with_prefix(self, mock_broker): ceph.get_create_rgw_pools_rq(prefix='us-east') mock_broker.assert_has_calls([ call(replica_count=3, weight=19, name='us-east.rgw.buckets.data', - group='objects'), + group='objects', app_name='rgw'), call(pg_num=10, replica_count=3, name='us-east.rgw.control', - group='objects'), + group='objects', app_name='rgw'), call(pg_num=10, replica_count=3, name='us-east.rgw.data.root', - group='objects'), + group='objects', app_name='rgw'), call(pg_num=10, replica_count=3, name='us-east.rgw.gc', - group='objects'), + group='objects', app_name='rgw'), call(pg_num=10, replica_count=3, name='us-east.rgw.log', - group='objects'), + group='objects', app_name='rgw'), call(pg_num=10, replica_count=3, name='us-east.rgw.intent-log', - group='objects'), + group='objects', app_name='rgw'), call(pg_num=10, replica_count=3, name='us-east.rgw.meta', - group='objects'), + group='objects', app_name='rgw'), call(pg_num=10, replica_count=3, name='us-east.rgw.usage', - group='objects'), + group='objects', app_name='rgw'), call(pg_num=10, replica_count=3, name='us-east.rgw.users.keys', - group='objects'), + group='objects', app_name='rgw'), call(pg_num=10, replica_count=3, name='us-east.rgw.users.email', - group='objects'), + group='objects', app_name='rgw'), call(pg_num=10, replica_count=3, name='us-east.rgw.users.swift', - group='objects'), + group='objects', app_name='rgw'), call(pg_num=10, replica_count=3, name='us-east.rgw.users.uid', - group='objects'), + group='objects', app_name='rgw'), call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.extra', - group='objects'), + group='objects', app_name='rgw'), call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.index', - group='objects'), + group='objects', app_name='rgw'), call(pg_num=10, replica_count=3, name='.rgw.root', - group='objects')], + group='objects', app_name='rgw')], ) @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' @@ -111,37 +111,37 @@ def test_create_rgw_pools_rq_no_prefix_post_jewel(self, mock_broker, ceph.get_create_rgw_pools_rq(prefix=None) mock_broker.assert_has_calls([ call(replica_count=3, weight=19, name='default.rgw.buckets.data', - group='objects'), + group='objects', app_name='rgw'), call(weight=0.10, replica_count=3, name='default.rgw.control', - group='objects'), + group='objects', app_name='rgw'), call(weight=0.10, replica_count=3, name='default.rgw.data.root', - group='objects'), + group='objects', app_name='rgw'), call(weight=0.10, replica_count=3, name='default.rgw.gc', - group='objects'), + group='objects', app_name='rgw'), call(weight=0.10, replica_count=3, name='default.rgw.log', - group='objects'), + group='objects', app_name='rgw'), call(weight=0.10, replica_count=3, name='default.rgw.intent-log', - group='objects'), + group='objects', app_name='rgw'), call(weight=0.10, replica_count=3, name='default.rgw.meta', - group='objects'), + group='objects', app_name='rgw'), call(weight=0.10, replica_count=3, name='default.rgw.usage', - group='objects'), + group='objects', app_name='rgw'), call(weight=0.10, replica_count=3, name='default.rgw.users.keys', - group='objects'), + group='objects', app_name='rgw'), call(weight=0.10, replica_count=3, name='default.rgw.users.email', - group='objects'), + group='objects', app_name='rgw'), call(weight=0.10, replica_count=3, name='default.rgw.users.swift', - group='objects'), + group='objects', app_name='rgw'), call(weight=0.10, replica_count=3, name='default.rgw.users.uid', - group='objects'), + group='objects', app_name='rgw'), call(weight=1.00, replica_count=3, name='default.rgw.buckets.extra', - group='objects'), + group='objects', app_name='rgw'), call(weight=3.00, replica_count=3, name='default.rgw.buckets.index', - group='objects'), + group='objects', app_name='rgw'), call(weight=0.10, replica_count=3, name='.rgw.root', - group='objects')], + group='objects', app_name='rgw')], ) mock_request_access.assert_called_with(key_name='radosgw.gateway', name='objects', From e687723a419a541c7de3beb58e7b51dee524ed4e Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 21 Feb 2019 13:14:54 +0000 Subject: [PATCH 1672/2699] Switch auth order for s3 authentication When deploying the RGW in multi-site configurations, communication between sites is authenticated using S3 credentials managed within RGW. In the event that keystone authentication is in use this generates a large number of s3 authentication attempts to keystone which will always fail. Switch the default order to check local auth first and then fallback to external. Change-Id: I7bfc016baf99188ba5a36f663145eeff465d25e8 --- ceph-radosgw/templates/ceph.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 4192847a..db55dd4e 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -53,6 +53,7 @@ rgw keystone admin token = {{ admin_token }} rgw keystone accepted roles = {{ user_roles }} rgw keystone token cache size = {{ cache_size }} rgw s3 auth use keystone = true +rgw s3 auth order = local, external {% else -%} rgw swift url = http://{{ unit_public_ip }} {% endif -%} From 810f083f9ed25c1fbdb27c5fc087e27709cdd2a5 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 22 Feb 2019 14:14:58 +0100 Subject: [PATCH 1673/2699] Move broker request handling code to common helper Change-Id: Iae2ff6362b032e644bf3ae3553792357eae4cb6d --- ceph-mon/hooks/ceph_hooks.py | 90 +++++++++++--------------- ceph-mon/unit_tests/test_ceph_hooks.py | 23 +++++++ 2 files changed, 61 insertions(+), 52 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index df438832..90f428cc 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -474,6 +474,36 @@ def notify_client(): mds_relation_joined(relid=relid, unit=unit) +def handle_broker_request(relid, unit, add_legacy_response=False): + """Retrieve broker request from relation, process, return response data. + + :param relid: Realtion ID + :type relid: str + :param unit: Remote unit name + :type unit: str + :param add_legacy_response: (Optional) Adds the legacy ``broker_rsp`` key + to the response in addition to the new way. + :type add_legacy_response: bool + :returns: Dictionary of response data ready for use with relation_set. + :rtype: dict + """ + response = {} + if not unit: + unit = remote_unit() + settings = relation_get(rid=relid, unit=unit) + if 'broker_req' in settings: + if not ceph.is_leader(): + log("Not leader - ignoring broker request", level=DEBUG) + else: + rsp = process_requests(settings['broker_req']) + unit_id = unit.replace('/', '-') + unit_response_key = 'broker-rsp-' + unit_id + response.update({unit_response_key: rsp}) + if add_legacy_response: + response.update({'broker_rsp': rsp}) + return response + + @hooks.hook('osd-relation-joined') @hooks.hook('osd-relation-changed') def osd_relation(relid=None, unit=None): @@ -489,20 +519,10 @@ def osd_relation(relid=None, unit=None): caps=ceph.osd_upgrade_caps), } - unit = unit or remote_unit() - settings = relation_get(rid=relid, unit=unit) - """Process broker request(s).""" - if 'broker_req' in settings: - if ceph.is_leader(): - rsp = process_requests(settings['broker_req']) - unit_id = unit.replace('/', '-') - unit_response_key = 'broker-rsp-' + unit_id - data[unit_response_key] = rsp - else: - log("Not leader - ignoring broker request", level=DEBUG) - + data.update(handle_broker_request(relid, unit)) relation_set(relation_id=relid, relation_settings=data) + # NOTE: radosgw key provision is gated on presence of OSD # units so ensure that any deferred hooks are processed notify_radosgws() @@ -596,17 +616,7 @@ def radosgw_relation(relid=None, unit=None): # Old style global radosgw key data['radosgw_key'] = ceph.get_radosgw_key() - settings = relation_get(rid=relid, unit=unit) - """Process broker request(s).""" - if 'broker_req' in settings: - if ceph.is_leader(): - rsp = process_requests(settings['broker_req']) - unit_id = unit.replace('/', '-') - unit_response_key = 'broker-rsp-' + unit_id - data[unit_response_key] = rsp - else: - log("Not leader - ignoring broker request", level=DEBUG) - + data.update(handle_broker_request(relid, unit)) relation_set(relation_id=relid, relation_settings=data) @@ -626,17 +636,7 @@ def mds_relation_joined(relid=None, unit=None): 'mds_key': ceph.get_mds_key(name=mds_name), 'auth': config('auth-supported'), 'ceph-public-address': public_addr} - settings = relation_get(rid=relid, unit=unit) - """Process broker request(s).""" - if 'broker_req' in settings: - if ceph.is_leader(): - rsp = process_requests(settings['broker_req']) - unit_id = unit.replace('/', '-') - unit_response_key = 'broker-rsp-' + unit_id - data[unit_response_key] = rsp - else: - log("Not leader - ignoring mds broker request", level=DEBUG) - + data.update(handle_broker_request(relid, unit)) relation_set(relation_id=relid, relation_settings=data) @@ -689,24 +689,10 @@ def client_relation_changed(relid=None, unit=None): if ready_for_service(): log('mon cluster in quorum and osds bootstrapped ' '- processing client broker requests') - if not unit: - unit = remote_unit() - settings = relation_get(rid=relid, unit=unit) - if 'broker_req' in settings: - if not ceph.is_leader(): - log("Not leader - ignoring broker request", level=DEBUG) - else: - rsp = process_requests(settings['broker_req']) - unit_id = unit.replace('/', '-') - unit_response_key = 'broker-rsp-' + unit_id - # broker_rsp is being left for backward compatibility, - # unit_response_key superscedes it - data = { - 'broker_rsp': rsp, - unit_response_key: rsp, - } - relation_set(relation_id=relid, - relation_settings=data) + data = handle_broker_request(relid, unit, add_legacy_response=True) + if len(data): + relation_set(relation_id=relid, + relation_settings=data) @hooks.hook('upgrade-charm.real') diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index a7fb43af..2afde125 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -300,6 +300,29 @@ def test_client_relation_changed_non_rel_hook(self, relation_set, 'broker-rsp-glance-0': 'AOK', 'broker_rsp': 'AOK'}) + @patch.object(ceph_hooks, 'process_requests') + @patch.object(ceph_hooks.ceph, 'is_leader') + @patch.object(ceph_hooks, 'relation_get') + @patch.object(ceph_hooks, 'remote_unit') + def test_handle_broker_request(self, mock_remote_unit, mock_relation_get, + mock_ceph_is_leader, + mock_broker_process_requests): + mock_remote_unit.return_value = 'glance/0' + ceph_hooks.handle_broker_request('rel1', None) + mock_remote_unit.assert_called_once_with() + mock_relation_get.assert_called_once_with(rid='rel1', unit='glance/0') + mock_relation_get.reset_mock() + mock_relation_get.return_value = {'broker_req': 'FAKE-REQUEST'} + mock_broker_process_requests.return_value = 'AOK' + self.assertEqual( + ceph_hooks.handle_broker_request('rel1', 'glance/0'), + {'broker-rsp-glance-0': 'AOK'}) + mock_relation_get.assert_called_once_with(rid='rel1', unit='glance/0') + self.assertEqual( + ceph_hooks.handle_broker_request('rel1', 'glance/0', + add_legacy_response=True), + {'broker_rsp': 'AOK', 'broker-rsp-glance-0': 'AOK'}) + class BootstrapSourceTestCase(test_utils.CharmTestCase): From 27e401a14582f769de5302317d3b38d7e504ae28 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 6 Feb 2019 11:26:54 +0100 Subject: [PATCH 1674/2699] Add ``rbd-mirror`` relation Reactive interface counterpart is here: https://github.com/openstack-charmers/charm-interface-ceph-rbd-mirror Sync charms.ceph. Depends-On: I1bad5311ed034188a78dc67b493c22bff7ce4f7d Change-Id: I509793e6c5aad9ea41fa4904c83b58e7477770e8 --- ceph-mon/hooks/ceph_hooks.py | 84 +++++++++++- ceph-mon/hooks/rbd-mirror-relation-changed | 1 + ceph-mon/hooks/rbd-mirror-relation-joined | 1 + ceph-mon/lib/ceph/utils.py | 123 ++++++++++++++++- ceph-mon/metadata.yaml | 2 + ceph-mon/tox.ini | 9 +- ceph-mon/unit_tests/test_ceph_hooks.py | 151 ++++++++++++++++++++- 7 files changed, 357 insertions(+), 14 deletions(-) create mode 120000 ceph-mon/hooks/rbd-mirror-relation-changed create mode 120000 ceph-mon/hooks/rbd-mirror-relation-joined diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 90f428cc..f621c142 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import os import subprocess import socket @@ -38,6 +39,7 @@ is_relation_made, relation_get, relation_set, + relation_type, leader_set, leader_get, is_leader, remote_unit, @@ -401,7 +403,14 @@ def mon_relation(): moncount = int(config('monitor-count')) if len(get_mon_hosts()) >= moncount: - if not ceph.is_bootstrapped(): + if ceph.is_bootstrapped(): + # The ceph-mon unit chosen for handling broker requests is based on + # internal Ceph MON leadership and not Juju leadership. To update + # the rbd-mirror relation on all ceph-mon units after pool creation + # the unit handling the broker request will update a nonce on the + # mon relation. + notify_rbd_mirrors() + else: status_set('maintenance', 'Bootstrapping MON cluster') # the following call raises an exception # if it can't add the keyring @@ -445,6 +454,7 @@ def mon_relation(): notify_osds() notify_radosgws() notify_client() + notify_rbd_mirrors() else: log('Not enough mons ({}), punting.' .format(len(get_mon_hosts()))) @@ -462,6 +472,12 @@ def notify_radosgws(): radosgw_relation(relid=relid, unit=unit) +def notify_rbd_mirrors(): + for relid in relation_ids('rbd-mirror'): + for unit in related_units(relid): + rbd_mirror_relation(relid=relid, unit=unit) + + def notify_client(): for relid in relation_ids('client'): client_relation_joined(relid) @@ -474,6 +490,25 @@ def notify_client(): mds_relation_joined(relid=relid, unit=unit) +def notify_mons(): + """Update a nonce on the ``mon`` relation. + + This is useful for flagging that our peer mon units should update some of + their client relations. + + Normally we would have handled this with leader storage, but for the Ceph + case, the unit handling the broker requests is the Ceph MON leader and not + necessarilly the Juju leader. + + A non-leader unit has no way of changing data in leader-storage. + """ + nonce = uuid.uuid4() + for relid in relation_ids('mon'): + for unit in related_units(relid): + relation_set(relation_id=relid, + relation_settings={'nonce': nonce}) + + def handle_broker_request(relid, unit, add_legacy_response=False): """Retrieve broker request from relation, process, return response data. @@ -501,6 +536,20 @@ def handle_broker_request(relid, unit, add_legacy_response=False): response.update({unit_response_key: rsp}) if add_legacy_response: response.update({'broker_rsp': rsp}) + + # prevent recursion when called from rbd_mirror_relation() + if relation_type() != 'rbd-mirror': + # update ``rbd-mirror`` relations for this unit with + # information about new pools. + log('Notifying this units rbd-mirror relations after ' + 'processing broker request.', level=DEBUG) + notify_rbd_mirrors() + + # notify mons to flag that the other mon units should update + # their ``rbd-mirror`` relations with information about new pools. + log('Notifying peers after processing broker request.', + level=DEBUG) + notify_mons() return response @@ -527,6 +576,7 @@ def osd_relation(relid=None, unit=None): # units so ensure that any deferred hooks are processed notify_radosgws() notify_client() + notify_rbd_mirrors() else: log('mon cluster not in quorum - deferring fsid provision') @@ -620,6 +670,37 @@ def radosgw_relation(relid=None, unit=None): relation_set(relation_id=relid, relation_settings=data) +@hooks.hook('rbd-mirror-relation-joined') +@hooks.hook('rbd-mirror-relation-changed') +def rbd_mirror_relation(relid=None, unit=None): + if ready_for_service(): + log('mon cluster in quorum and osds bootstrapped ' + '- providing rbd-mirror client with keys') + if not unit: + unit = remote_unit() + # handle broker requests first to get a updated pool map + data = (handle_broker_request(relid, unit)) + data.update({ + 'auth': config('auth-supported'), + 'ceph-public-address': get_public_addr(), + 'pools': json.dumps(ceph.list_pools_detail(), sort_keys=True) + }) + cluster_addr = get_cluster_addr() + if cluster_addr: + data['ceph-cluster-address'] = cluster_addr + # handle both classic and reactive Endpoint peers + try: + unique_id = json.loads( + relation_get('unique_id', unit=unit, rid=relid)) + except (TypeError, json.decoder.JSONDecodeError): + unique_id = relation_get('unique_id', unit=unit, rid=relid) + if unique_id: + data['{}_key'.format(unique_id)] = ceph.get_rbd_mirror_key( + 'rbd-mirror.{}'.format(unique_id)) + + relation_set(relation_id=relid, relation_settings=data) + + @hooks.hook('mds-relation-changed') @hooks.hook('mds-relation-joined') def mds_relation_joined(relid=None, unit=None): @@ -718,6 +799,7 @@ def upgrade_charm(): # key permission changes are applied notify_client() notify_radosgws() + notify_rbd_mirrors() @hooks.hook('start') diff --git a/ceph-mon/hooks/rbd-mirror-relation-changed b/ceph-mon/hooks/rbd-mirror-relation-changed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/rbd-mirror-relation-changed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/rbd-mirror-relation-joined b/ceph-mon/hooks/rbd-mirror-relation-joined new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/rbd-mirror-relation-joined @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 2256c666..1a090403 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -2520,18 +2520,20 @@ def update_owner(path, recurse_dirs=True): secs=elapsed_time.total_seconds(), path=path), DEBUG) -def list_pools(service): +def list_pools(client='admin'): """This will list the current pools that Ceph has - :param service: String service id to run under - :returns: list. Returns a list of the ceph pools. - :raises: CalledProcessError if the subprocess fails to run. + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Returns a list of available pools. + :rtype: list + :raises: subprocess.CalledProcessError if the subprocess fails to run. """ try: pool_list = [] - pools = str(subprocess - .check_output(['rados', '--id', service, 'lspools']) - .decode('UTF-8')) + pools = subprocess.check_output(['rados', '--id', client, 'lspools'], + universal_newlines=True) for pool in pools.splitlines(): pool_list.append(pool) return pool_list @@ -2540,6 +2542,113 @@ def list_pools(service): raise +def get_pool_param(pool, param, client='admin'): + """Get parameter from pool. + + :param pool: Name of pool to get variable from + :type pool: str + :param param: Name of variable to get + :type param: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Value of variable on pool or None + :rtype: str or None + :raises: subprocess.CalledProcessError + """ + try: + output = subprocess.check_output( + ['ceph', '--id', client, 'osd', 'pool', 'get', + pool, param], universal_newlines=True) + except subprocess.CalledProcessError as cp: + if cp.returncode == 2 and 'ENOENT: option' in cp.output: + return None + raise + if ':' in output: + return output.split(':')[1].lstrip().rstrip() + + +def get_pool_quota(pool, client='admin'): + """Get pool quota. + + :param pool: Name of pool to get variable from + :type pool: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Dictionary with quota variables + :rtype: dict + :raises: subprocess.CalledProcessError + """ + output = subprocess.check_output( + ['ceph', '--id', client, 'osd', 'pool', 'get-quota', pool], + universal_newlines=True) + rc = re.compile(r'\s+max\s+(\S+)\s*:\s+(\d+)') + result = {} + for line in output.splitlines(): + m = rc.match(line) + if m: + result.update({'max_{}'.format(m.group(1)): m.group(2)}) + return result + + +def get_pool_applications(pool='', client='admin'): + """Get pool applications. + + :param pool: (Optional) Name of pool to get applications for + Defaults to get for all pools + :type pool: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Dictionary with pool name as key + :rtype: dict + :raises: subprocess.CalledProcessError + """ + + cmd = ['ceph', '--id', client, 'osd', 'pool', 'application', 'get'] + if pool: + cmd.append(pool) + try: + output = subprocess.check_output(cmd, universal_newlines=True) + except subprocess.CalledProcessError as cp: + if cp.returncode == 2 and 'ENOENT' in cp.output: + return {} + raise + return json.loads(output) + + +def list_pools_detail(): + """Get detailed information about pools. + + Structure: + {'pool_name_1': {'applications': {'application': {}}, + 'parameters': {'pg_num': 42, 'size': 42}, + 'quota': {'max_bytes': '1000', + 'max_objects': '10'}, + }, + 'pool_name_2': ... + } + + :returns: Dictionary with detailed pool information. + :rtype: dict + :raises: subproces.CalledProcessError + """ + get_params = ['pg_num', 'size'] + result = {} + applications = get_pool_applications() + for pool in list_pools(): + result[pool] = { + 'applications': applications.get(pool, {}), + 'parameters': {}, + 'quota': get_pool_quota(pool), + } + for param in get_params: + result[pool]['parameters'].update({ + param: get_pool_param(pool, param)}) + return result + + def dirs_need_ownership_update(service): """Determines if directories still need change of ownership. diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index a3fedb2d..23270776 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -34,6 +34,8 @@ provides: interface: ceph-osd radosgw: interface: ceph-radosgw + rbd-mirror: + interface: ceph-rbd-mirror nrpe-external-master: interface: nrpe-external-master scope: container diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 4a4eb12e..965952a1 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -2,10 +2,8 @@ # This file is managed centrally by release-tools and should not be modified # within individual charm repos. [tox] -;envlist = pep8,py27,py35,py36 -envlist = pep8,py27,py35 +envlist = pep8,py3 skipsdist = True -;skip_missing_interpreters = True [testenv] setenv = VIRTUAL_ENV={envdir} @@ -25,6 +23,11 @@ deps = -r{toxinidir}/requirements.txt # temporarily disable py27 commands = /bin/true +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + ; keep zuul happy until we change the py35 job [testenv:py35] basepython = python3.5 diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 2afde125..a3dc1a93 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -1,4 +1,5 @@ import copy +import json import unittest import sys @@ -187,6 +188,7 @@ def test_nrpe_dependency_installed(self, mock_config): mocks["apt_install"].assert_called_once_with( ["python-dbus", "lockfile-progs"]) + @patch.object(ceph_hooks, 'notify_rbd_mirrors') @patch.object(ceph_hooks, 'service_pause') @patch.object(ceph_hooks, 'notify_radosgws') @patch.object(ceph_hooks, 'ceph') @@ -198,7 +200,8 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies( mock_notify_client, mock_ceph, mock_notify_radosgws, - mock_service_pause): + mock_service_pause, + mock_notify_rbd_mirrors): config = copy.deepcopy(CHARM_CONFIG) mock_config.side_effect = lambda key: config[key] with patch.multiple( @@ -221,6 +224,61 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies( mock_ceph.update_monfs.assert_called_once_with() mock_service_pause.assert_called_with('ceph-create-keys') + @patch.object(ceph_hooks, 'mds_relation_joined') + @patch.object(ceph_hooks, 'admin_relation_joined') + @patch.object(ceph_hooks, 'client_relation_changed') + @patch.object(ceph_hooks, 'client_relation_joined') + @patch.object(ceph_hooks, 'related_units') + @patch.object(ceph_hooks, 'relation_ids') + def test_notify_client(self, mock_relation_ids, mock_related_units, + mock_client_relation_joined, + mock_client_relation_changed, + mock_admin_relation_joined, + mock_mds_relation_joined): + mock_relation_ids.return_value = ['arelid'] + mock_related_units.return_value = ['aunit'] + ceph_hooks.notify_client() + mock_relation_ids.assert_has_calls([ + call('client'), + call('admin'), + call('mds'), + ]) + mock_related_units.assert_called_with('arelid') + mock_client_relation_joined.assert_called_once_with('arelid') + mock_client_relation_changed.assert_called_once_with('arelid', 'aunit') + mock_admin_relation_joined.assert_called_once_with('arelid') + mock_mds_relation_joined.assert_called_once_with(relid='arelid', + unit='aunit') + + @patch.object(ceph_hooks, 'rbd_mirror_relation') + @patch.object(ceph_hooks, 'related_units') + @patch.object(ceph_hooks, 'relation_ids') + def test_notify_rbd_mirrors(self, mock_relation_ids, mock_related_units, + mock_rbd_mirror_relation): + mock_relation_ids.return_value = ['arelid'] + mock_related_units.return_value = ['aunit'] + ceph_hooks.notify_rbd_mirrors() + mock_relation_ids.assert_called_once_with('rbd-mirror') + mock_related_units.assert_called_once_with('arelid') + mock_rbd_mirror_relation.assert_called_once_with(relid='arelid', + unit='aunit') + + @patch.object(ceph_hooks, 'uuid') + @patch.object(ceph_hooks, 'relation_set') + @patch.object(ceph_hooks, 'related_units') + @patch.object(ceph_hooks, 'relation_ids') + def test_notify_mons(self, mock_relation_ids, mock_related_units, + mock_relation_set, mock_uuid): + mock_relation_ids.return_value = ['arelid'] + mock_related_units.return_value = ['aunit'] + mock_uuid.uuid4.return_value = 'FAKE-UUID' + ceph_hooks.notify_mons() + mock_relation_ids.assert_called_once_with('mon') + mock_related_units.assert_called_once_with('arelid') + mock_relation_set.assert_called_once_with(relation_id='arelid', + relation_settings={ + 'nonce': 'FAKE-UUID'}) + class RelatedUnitsTestCase(unittest.TestCase): @@ -264,6 +322,7 @@ def test_related_osd_multi_relation(self, call('osd:23') ]) + @patch.object(ceph_hooks, 'relation_ids', return_value=[]) @patch.object(ceph_hooks, 'ready_for_service') @patch.object(ceph_hooks.ceph, 'is_quorum') @patch.object(ceph_hooks, 'remote_unit') @@ -277,7 +336,8 @@ def test_client_relation_changed_non_rel_hook(self, relation_set, relation_get, remote_unit, is_quorum, - ready_for_service): + ready_for_service, + relation_ids): # Check for LP #1738154 ready_for_service.return_value = True process_requests.return_value = 'AOK' @@ -300,13 +360,17 @@ def test_client_relation_changed_non_rel_hook(self, relation_set, 'broker-rsp-glance-0': 'AOK', 'broker_rsp': 'AOK'}) + @patch.object(ceph_hooks, 'notify_mons') + @patch.object(ceph_hooks, 'notify_rbd_mirrors') @patch.object(ceph_hooks, 'process_requests') @patch.object(ceph_hooks.ceph, 'is_leader') @patch.object(ceph_hooks, 'relation_get') @patch.object(ceph_hooks, 'remote_unit') def test_handle_broker_request(self, mock_remote_unit, mock_relation_get, mock_ceph_is_leader, - mock_broker_process_requests): + mock_broker_process_requests, + mock_notify_rbd_mirrors, + mock_notify_mons): mock_remote_unit.return_value = 'glance/0' ceph_hooks.handle_broker_request('rel1', None) mock_remote_unit.assert_called_once_with() @@ -317,6 +381,8 @@ def test_handle_broker_request(self, mock_remote_unit, mock_relation_get, self.assertEqual( ceph_hooks.handle_broker_request('rel1', 'glance/0'), {'broker-rsp-glance-0': 'AOK'}) + mock_notify_rbd_mirrors.assert_called_with() + mock_notify_mons.assert_called_with() mock_relation_get.assert_called_once_with(rid='rel1', unit='glance/0') self.assertEqual( ceph_hooks.handle_broker_request('rel1', 'glance/0', @@ -503,3 +569,82 @@ def test_per_unit_radosgw_key(self): } ) self.ceph.get_radosgw_key.assert_called_once_with(name='testhostname') + + +class RBDMirrorRelationTestCase(test_utils.CharmTestCase): + + TO_PATCH = [ + 'relation_get', + 'get_cluster_addr', + 'get_public_addr', + 'ready_for_service', + 'remote_unit', + 'apt_install', + 'filter_installed_packages', + 'leader_get', + 'ceph', + 'process_requests', + 'log', + 'relation_set', + 'config', + 'handle_broker_request', + ] + + test_key = 'OTQ1MDdiODYtMmZhZi00M2IwLTkzYTgtZWI0MGRhNzdmNzBlCg==' + + def setUp(self): + super(RBDMirrorRelationTestCase, self).setUp(ceph_hooks, self.TO_PATCH) + self.relation_get.side_effect = self.test_relation.get + self.config.side_effect = self.test_config.get + self.test_config.set('auth-supported', 'cephx') + self.filter_installed_packages.side_effect = lambda pkgs: pkgs + self.ready_for_service.return_value = True + self.ceph.is_leader.return_value = True + self.ceph.get_rbd_mirror_key.return_value = self.test_key + self.get_cluster_addr.return_value = '192.0.2.10' + self.get_public_addr.return_value = '198.51.100.10' + self.ceph.list_pools_detail.return_value = {'pool': {}} + + def test_rbd_mirror_relation(self): + self.handle_broker_request.return_value = {} + base_relation_settings = { + 'auth': self.test_config.get('auth-supported'), + 'ceph-public-address': '198.51.100.10', + 'ceph-cluster-address': '192.0.2.10', + 'pools': json.dumps({'pool': {}}), + } + ceph_hooks.rbd_mirror_relation('rbd-mirror:51', 'ceph-rbd-mirror/0') + self.handle_broker_request.assert_called_with( + 'rbd-mirror:51', 'ceph-rbd-mirror/0') + self.relation_set.assert_called_with( + relation_id='rbd-mirror:51', + relation_settings=base_relation_settings) + self.test_relation.set( + {'unique_id': None}) + ceph_hooks.rbd_mirror_relation('rbd-mirror:52', 'ceph-rbd-mirror/0') + self.relation_set.assert_called_with( + relation_id='rbd-mirror:52', + relation_settings=base_relation_settings) + self.test_relation.set( + {'unique_id': json.dumps('otherSideIsReactiveEndpoint')}) + ceph_hooks.rbd_mirror_relation('rbd-mirror:53', 'ceph-rbd-mirror/0') + self.ceph.get_rbd_mirror_key.assert_called_once_with( + 'rbd-mirror.otherSideIsReactiveEndpoint') + key_relation_settings = base_relation_settings.copy() + key_relation_settings.update( + {'otherSideIsReactiveEndpoint_key': self.test_key}) + self.relation_set.assert_called_with( + relation_id='rbd-mirror:53', + relation_settings=key_relation_settings) + self.test_relation.set({'unique_id': 'somehostname'}) + ceph_hooks.rbd_mirror_relation('rbd-mirror:42', 'ceph-rbd-mirror/0') + self.ceph.get_rbd_mirror_key.assert_called_with( + 'rbd-mirror.somehostname') + key_relation_settings = base_relation_settings.copy() + key_relation_settings.update({ + 'otherSideIsReactiveEndpoint_key': self.test_key, + 'somehostname_key': self.test_key + }) + self.relation_set.assert_called_with( + relation_id='rbd-mirror:42', + relation_settings=key_relation_settings) From 91b6dac7b9374ad01b9463bfb9c5f41e62d1b330 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 25 Feb 2019 10:30:31 +0100 Subject: [PATCH 1675/2699] Rebuild to handle Mimic pool tagging requirements In Mimic, CephFS requires that the backing pools have application tags of "cephfs" explicitly, and will fail to configure on other application pools Change-Id: Iffe4b6edb8b7ab33bfde77f2e8bf12ae8bffac1a --- ceph-fs/rebuild | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index ada580de..eb4d7e55 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -1,5 +1 @@ -# This file is used to trigger rebuilds -# when dependencies of the charm change, -# but nothing in the charm needs to. -# simply change the uuid to something new -85c5499c-1a79-11e9-8864-470f2a69c15e +55501b8e-38d8-11e9-a8ad-fb10af1e7610 From 44e71eaa15037fd8935d10e88b182d0c0dd36c39 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 25 Feb 2019 00:19:56 +0300 Subject: [PATCH 1676/2699] Add automatic pool creation and mirror setup Pools with application ``rbd`` are automatically created on the remote end. Mirroring is also automatically configured for these pools in both ends. Implement leadership tracking to allow multiple units to run. Delay startup of service until configuration and keys are ready. --- ceph-rbd-mirror/src/layer.yaml | 1 + .../lib/charm/openstack/ceph_rbd_mirror.py | 67 ++++++++++++--- .../src/reactive/ceph_rbd_mirror_handlers.py | 84 ++++++++++++++----- ceph-rbd-mirror/src/templates/ceph.conf | 21 +++++ ceph-rbd-mirror/src/templates/remote.conf | 21 +++++ .../src/tests/bundles/bionic-queens.yaml | 27 ++++++ 6 files changed, 188 insertions(+), 33 deletions(-) create mode 100644 ceph-rbd-mirror/src/templates/ceph.conf create mode 100644 ceph-rbd-mirror/src/templates/remote.conf diff --git a/ceph-rbd-mirror/src/layer.yaml b/ceph-rbd-mirror/src/layer.yaml index 59aee36b..68e3b5f8 100644 --- a/ceph-rbd-mirror/src/layer.yaml +++ b/ceph-rbd-mirror/src/layer.yaml @@ -1,4 +1,5 @@ includes: + - layer:leadership - layer:ceph - interface:ceph-rbd-mirror - interface:nrpe-external-master diff --git a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py index e86f204b..80f16e21 100644 --- a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py @@ -12,28 +12,73 @@ # See the License for the specific language governing permissions and # limitations under the License. +import socket +import subprocess + import charms_openstack.charm import charms_openstack.adapters +# import charmhelpers.core.host as ch_host + + +class CephRBDMirrorCharmRelationAdapters( + charms_openstack.adapters.OpenStackRelationAdapters): + relation_adapters = { + 'ceph_local': charms_openstack.adapters.CephRelationAdapter, + 'ceph_remote': charms_openstack.adapters.CephRelationAdapter, + } + class CephRBDMirrorCharm(charms_openstack.charm.CephCharm): # We require Ceph 12.2 Luminous or later for HA support in the Ceph # rbd-mirror daemon. Luminous appears in UCA at pike. release = 'pike' name = 'ceph-rbd-mirror' - packages = ['rbd-mirror'] python_version = 3 + packages = ['rbd-mirror'] required_relations = ['ceph-local', 'ceph-remote'] + user = 'ceph' + group = 'ceph' + adapters_class = CephRBDMirrorCharmRelationAdapters + ceph_service_name_override = 'rbd-mirror' + ceph_key_per_unit_name = True + + def __init__(self, **kwargs): + self.ceph_id = 'rbd-mirror.{}'.format(socket.gethostname()) + self.services = [ + 'ceph-rbd-mirror@{}'.format(self.ceph_id), + ] + self.restart_map = { + '/etc/ceph/ceph.conf': self.services, + '/etc/ceph/remote.conf': self.services, + } + super().__init__(**kwargs) + + def _mirror_pool_info(self, pool): + output = subprocess.check_output(['rbd', '--id', self.ceph_id, + 'mirror', 'pool', 'info', pool], + universal_newlines=True) + return output + + def mirror_pool_enabled(self, pool): + return 'Mode: pool' in self._mirror_pool_info(pool) - def config_changed(self): - """Check for upgrade.""" - self.upgrade_if_available(None) + def mirror_pool_has_peers(self, pool): + return 'Peers: none' not in self._mirror_pool_info(pool) - def install(self): - """We override install function to configure source before installing - packages. + def mirror_pool_status(self, pool): + output = subprocess.check_output(['rbd', '--id', self.ceph_id, + 'mirror', 'pool', 'status', pool], + universal_newlines=True) + result = {} + for line in output.splitlines(): + vp = line.split(':') + result.update(vp[0], vp[1].lstrip().rstrip()) + return result - The OpenStackAPICharm class already does this but we do not need nor - want the other services it provides for Ceph charms.""" - self.configure_source() - super().install() + def mirror_pool_enable(self, pool): + base_cmd = ['rbd', '--id', self.ceph_id, 'mirror', 'pool'] + subprocess.check_call(base_cmd + ['enable', pool, 'pool']) + subprocess.check_call(base_cmd + ['peer', 'add', pool, + 'client.{}@remote' + .format(self.ceph_id)]) diff --git a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py index ff9f0cd1..c1677d8f 100644 --- a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py @@ -25,39 +25,79 @@ # Use the charms.openstack defaults for common states and hooks charm.use_defaults( 'charm.installed', - 'config.changed', 'update-status', 'upgrade-charm') @reactive.when_all('ceph-local.connected', 'ceph-remote.connected') @reactive.when_not_all('ceph-local.available', 'ceph-remote.available') -def ceph_connected(): - for flag in ('ceph-local.connected', 'ceph-remote.connected'): - endpoint = reactive.relations.endpoint_from_flag(flag) - endpoint.request_key() +def request_keys(): + with charm.provide_charm_instance() as charm_instance: + for flag in ('ceph-local.connected', 'ceph-remote.connected'): + endpoint = reactive.relations.endpoint_from_flag(flag) + ch_core.hookenv.log('Ceph endpoint "{}" connected, requesting key' + .format(endpoint.endpoint_name), + level=ch_core.hookenv.INFO) + endpoint.request_key() + charm_instance.assess_status() + +@reactive.when('config.changed') +@reactive.when('ceph-local.available') +@reactive.when('ceph-remote.available') +def config_changed(): with charm.provide_charm_instance() as charm_instance: - ch_core.hookenv.log('Ceph connected, charm_instance @ {}' - .format(charm_instance), - level=ch_core.hookenv.DEBUG) + charm_instance.upgrade_if_available([ + reactive.relations.endpoint_from_flag('ceph-local.available'), + reactive.relations.endpoint_from_flag('ceph-remote.available'), + ]) charm_instance.assess_status() -@reactive.when_all('ceph-local.available', 'ceph-remote.available') -def ceph_available(): - mon_hosts = {} - for flag in ('ceph-local.available', 'ceph-remote.available'): - endpoint = reactive.relations.endpoint_from_flag(flag) - mon_hosts[endpoint.endpoint_name] = endpoint.mon_hosts - for relation in endpoint.relations: - for unit in relation.units: - ch_core.hookenv.log('{}: "{}"'.format(flag, unit.received), - level=ch_core.hookenv.INFO) +@reactive.when_not('config.rendered') +def disable_services(): + with charm.provide_charm_instance() as charm_instance: + for service in charm_instance.services: + ch_core.host.service('disable', service) + ch_core.host.service('stop', service) + +@reactive.when('ceph-local.available') +@reactive.when('ceph-remote.available') +def render_stuff(*args): with charm.provide_charm_instance() as charm_instance: - ch_core.hookenv.log('Ceph available, mon_hosts: "{}" ' - 'charm_instance @ {}' - .format(mon_hosts, charm_instance), - level=ch_core.hookenv.DEBUG) + for endpoint in args: + ch_core.hookenv.log('Ceph endpoint "{}" available, configuring ' + 'keyring'.format(endpoint.endpoint_name), + level=ch_core.hookenv.INFO) + ch_core.hookenv.log('Pools: "{}"'.format(endpoint.pools), + level=ch_core.hookenv.INFO) + + cluster_name = ( + 'remote') if endpoint.endpoint_name == 'ceph-remote' else None + charm_instance.configure_ceph_keyring(endpoint, + cluster_name=cluster_name) + charm_instance.render_with_interfaces(args) + with charm.provide_charm_instance() as charm_instance: + for service in charm_instance.services: + ch_core.host.service('enable', service) + ch_core.host.service('start', service) + reactive.set_flag('config.rendered') charm_instance.assess_status() + + +@reactive.when('leadership.is_leader') +@reactive.when('config.rendered') +@reactive.when('ceph-local.available') +@reactive.when('ceph-remote.available') +def configure_pools(): + local = reactive.endpoint_from_flag('ceph-local.available') + remote = reactive.endpoint_from_flag('ceph-remote.available') + with charm.provide_charm_instance() as charm_instance: + for pool, attrs in local.pools.items(): + if 'rbd' in attrs['applications']: + if not (charm_instance.mirror_pool_enabled(pool) and + charm_instance.mirror_pool_has_peers(pool)): + # TODO(fnordahl) add rest of attrs when creating pool + remote.create_pool(pool, app_name='rbd') + charm_instance.mirror_pool_enable(pool) diff --git a/ceph-rbd-mirror/src/templates/ceph.conf b/ceph-rbd-mirror/src/templates/ceph.conf new file mode 100644 index 00000000..b2b74843 --- /dev/null +++ b/ceph-rbd-mirror/src/templates/ceph.conf @@ -0,0 +1,21 @@ +############################################################################### +# [ WARNING ] +# cinder configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### +[global] +{% if ceph_local.auth -%} +auth_supported = {{ ceph_local.auth }} +keyring = /etc/ceph/$cluster.$name.keyring +mon host = {{ ceph_local.monitors }} +{% endif -%} +log to syslog = {{ use_syslog }} +err to syslog = {{ use_syslog }} +clog to syslog = {{ use_syslog }} + +[client] +{% if rbd_client_cache_settings -%} +{% for key, value in rbd_client_cache_settings.items() -%} +{{ key }} = {{ value }} +{% endfor -%} +{%- endif %} diff --git a/ceph-rbd-mirror/src/templates/remote.conf b/ceph-rbd-mirror/src/templates/remote.conf new file mode 100644 index 00000000..dd915378 --- /dev/null +++ b/ceph-rbd-mirror/src/templates/remote.conf @@ -0,0 +1,21 @@ +############################################################################### +# [ WARNING ] +# cinder configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### +[global] +{% if ceph_remote.auth -%} +auth_supported = {{ ceph_remote.auth }} +keyring = /etc/ceph/$cluster.$name.keyring +mon host = {{ ceph_remote.monitors }} +{% endif -%} +log to syslog = {{ use_syslog }} +err to syslog = {{ use_syslog }} +clog to syslog = {{ use_syslog }} + +[client] +{% if rbd_client_cache_settings -%} +{% for key, value in rbd_client_cache_settings.items() -%} +{{ key }} = {{ value }} +{% endfor -%} +{%- endif %} diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml index 9644c2d3..06f41ac0 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml @@ -1,5 +1,20 @@ series: bionic applications: + mysql: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 ceph-mon: charm: cs:~fnordahl/ceph-mon-rbd-mirror num_units: 3 @@ -39,6 +54,18 @@ applications: options: source: distro relations: +- - mysql + - keystone +- - mysql + - cinder +- - rabbitmq-server + - cinder +- - keystone + - cinder +- - cinder + - cinder-ceph +- - cinder-ceph + - ceph-mon - - ceph-mon:osd - ceph-osd:mon - - ceph-mon From 4d83000fb0ffa50206b1422fae16462ef304a6cd Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 25 Feb 2019 14:08:37 +0000 Subject: [PATCH 1677/2699] Support multiple ceph-fs (mds) units Prefix the mds cephx relation key with the provided mds-name value, ensuring that multiple ceph-fs units can be used within a deployment. Change-Id: I4cbfc23886552352d06cf0e96b8459b1ce84a682 --- ceph-mon/hooks/ceph_hooks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index df438832..f4e4601c 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -623,7 +623,8 @@ def mds_relation_joined(relid=None, unit=None): public_addr = get_public_addr() data = { 'fsid': leader_get('fsid'), - 'mds_key': ceph.get_mds_key(name=mds_name), + '{}_mds_key'.format(mds_name): + ceph.get_mds_key(name=mds_name), 'auth': config('auth-supported'), 'ceph-public-address': public_addr} settings = relation_get(rid=relid, unit=unit) From b0fe6d1e72f9a1ff706c7cc1a947279010e8e170 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 26 Feb 2019 11:40:06 +0100 Subject: [PATCH 1678/2699] Move to ``charms.openstack.plugins`` class references --- ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py index 80f16e21..689f2d36 100644 --- a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py @@ -17,6 +17,7 @@ import charms_openstack.charm import charms_openstack.adapters +import charms_openstack.plugins # import charmhelpers.core.host as ch_host @@ -24,12 +25,12 @@ class CephRBDMirrorCharmRelationAdapters( charms_openstack.adapters.OpenStackRelationAdapters): relation_adapters = { - 'ceph_local': charms_openstack.adapters.CephRelationAdapter, - 'ceph_remote': charms_openstack.adapters.CephRelationAdapter, + 'ceph_local': charms_openstack.plugins.CephRelationAdapter, + 'ceph_remote': charms_openstack.plugins.CephRelationAdapter, } -class CephRBDMirrorCharm(charms_openstack.charm.CephCharm): +class CephRBDMirrorCharm(charms_openstack.plugins.CephCharm): # We require Ceph 12.2 Luminous or later for HA support in the Ceph # rbd-mirror daemon. Luminous appears in UCA at pike. release = 'pike' From ee5f06ba0cbbe91dfcd5bbc1af8da57f755cf922 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 26 Feb 2019 15:16:34 +0100 Subject: [PATCH 1679/2699] Rebuild charm to pickup interface changes Pickup updates to the ceph-mds interface type to ensure that multiple ceph-fs units can be deployed. Change-Id: I160dd387a547feeb6f900c7a200139ad271f9b37 --- ceph-fs/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index eb4d7e55..2eeaa818 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -1 +1 @@ -55501b8e-38d8-11e9-a8ad-fb10af1e7610 +885c11f0-ada9-46fb-804a-6c034772f46e From 7aa67edbf8ceb5abe482dcb766cc9587d801672e Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Thu, 28 Feb 2019 09:13:13 +0100 Subject: [PATCH 1680/2699] Add support for creating erasure coded pool Initial implementation of custom status check. --- .../lib/charm/openstack/ceph_rbd_mirror.py | 21 +++++++++++++++++-- .../src/reactive/ceph_rbd_mirror_handlers.py | 21 +++++++++++++++++-- 2 files changed, 38 insertions(+), 4 deletions(-) diff --git a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py index 689f2d36..9c507398 100644 --- a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py @@ -15,11 +15,13 @@ import socket import subprocess +import charms.reactive as reactive + import charms_openstack.charm import charms_openstack.adapters import charms_openstack.plugins -# import charmhelpers.core.host as ch_host +import charmhelpers.core as ch_core class CephRBDMirrorCharmRelationAdapters( @@ -55,6 +57,21 @@ def __init__(self, **kwargs): } super().__init__(**kwargs) + def custom_assess_status_check(self): + """Provide mirrored pool statistics through juju status.""" + if (reactive.is_flag_set('config.rendered') and + reactive.is_flag_set('ceph-local.available') and + reactive.is_flag_set('ceph-remote.available')): + endpoint = reactive.endpoint_from_flag('ceph-local.available') + for pool, attrs in endpoint.pools.items(): + if 'rbd' in attrs['applications']: + status = self.mirror_pool_status(pool) + ch_core.hookenv.log('DEBUG: mirror_pool_status({}) = "{}"' + .format(pool, status), + level=ch_core.hookenv.INFO) + return 'active', 'Custom' + return None, None + def _mirror_pool_info(self, pool): output = subprocess.check_output(['rbd', '--id', self.ceph_id, 'mirror', 'pool', 'info', pool], @@ -74,7 +91,7 @@ def mirror_pool_status(self, pool): result = {} for line in output.splitlines(): vp = line.split(':') - result.update(vp[0], vp[1].lstrip().rstrip()) + result.update({vp[0]: vp[1].lstrip().rstrip()}) return result def mirror_pool_enable(self, pool): diff --git a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py index c1677d8f..28487ade 100644 --- a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py @@ -98,6 +98,23 @@ def configure_pools(): if 'rbd' in attrs['applications']: if not (charm_instance.mirror_pool_enabled(pool) and charm_instance.mirror_pool_has_peers(pool)): - # TODO(fnordahl) add rest of attrs when creating pool - remote.create_pool(pool, app_name='rbd') charm_instance.mirror_pool_enable(pool) + pg_num = attrs['parameters'].get('pg_num', None) + max_bytes = attrs['quota'].get('max_bytes', None) + max_objects = attrs['quota'].get('max_objects', None) + if 'erasure_code_profile' in attrs['parameters']: + ec_profile = attrs['parameters'].get( + 'erasure_code_profile', None) + remote.create_erasure_pool(pool, + erasure_profile=ec_profile, + pg_num=pg_num, + app_name='rbd', + max_bytes=max_bytes, + max_objects=max_objects) + else: + size = attrs['parameters'].get('size', None) + remote.create_replicated_pool(pool, replicas=size, + pg_num=pg_num, + app_name='rbd', + max_bytes=max_bytes, + max_objects=max_objects) From 1f94fee603e2dabbfe8a2dc2f267baa92bcaf3c7 Mon Sep 17 00:00:00 2001 From: Paul Goins Date: Fri, 1 Mar 2019 10:37:18 +0100 Subject: [PATCH 1681/2699] Added tox environment for gathering coverage This technique was borrowed from the tox "cover" environment in openstack/nova's tox.ini. This leverages the fact that stestr lets you override the python executable via the PYTHON environment variable. Doing this allows us to easily generate coverage for our unit tests. An important caveat is that this does not provide any coverage for tests via zaza, amulet, etc. It is purely focused on the unit tests. Note that this replaces the previous .coveragerc; coverage configuration is instead pulled from tox.ini. Change-Id: I0f85b328488743361b6070febef80008efab1358 --- ceph-fs/.zuul.yaml | 1 + ceph-fs/tox.ini | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/ceph-fs/.zuul.yaml b/ceph-fs/.zuul.yaml index 5e75d94d..387b9444 100644 --- a/ceph-fs/.zuul.yaml +++ b/ceph-fs/.zuul.yaml @@ -2,3 +2,4 @@ templates: - python-charm-jobs - openstack-python35-jobs + - openstack-cover-jobs diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index cf5744da..1946992e 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -47,6 +47,33 @@ basepython = python3.5 deps = -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} src unit_tests +[testenv:cover] +# Technique based heavily upon +# https://github.com/openstack/nova/blob/master/tox.ini +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + ostestr {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + */charmhelpers/* + [testenv:venv] basepython = python3 commands = {posargs} From d4f15aacfd6fc15fcdb05769bfdb017c21cefcc1 Mon Sep 17 00:00:00 2001 From: Paul Goins Date: Fri, 1 Mar 2019 10:52:33 +0100 Subject: [PATCH 1682/2699] Added tox environment for gathering coverage This technique was borrowed from the tox "cover" environment in openstack/nova's tox.ini. This leverages the fact that stestr lets you override the python executable via the PYTHON environment variable. Doing this allows us to easily generate coverage for our unit tests. An important caveat is that this does not provide any coverage for tests via zaza, amulet, etc. It is purely focused on the unit tests. Note that this replaces the previous .coveragerc; coverage configuration is instead pulled from tox.ini. Change-Id: I8cc9dbd7b77a69155a4b9c4fdb373accb0d92e69 --- ceph-osd/.coveragerc | 7 ------- ceph-osd/.zuul.yaml | 1 + ceph-osd/tox.ini | 27 +++++++++++++++++++++++++++ 3 files changed, 28 insertions(+), 7 deletions(-) delete mode 100644 ceph-osd/.coveragerc diff --git a/ceph-osd/.coveragerc b/ceph-osd/.coveragerc deleted file mode 100644 index 7f7b5be3..00000000 --- a/ceph-osd/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[report] -# Regexes for lines to exclude from consideration -exclude_lines = - if __name__ == .__main__.: -include= - hooks/hooks.py - hooks/ceph*.py diff --git a/ceph-osd/.zuul.yaml b/ceph-osd/.zuul.yaml index 7051aeeb..dc276615 100644 --- a/ceph-osd/.zuul.yaml +++ b/ceph-osd/.zuul.yaml @@ -1,3 +1,4 @@ - project: templates: - python35-charm-jobs + - openstack-cover-jobs diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index e9553173..6f87dc37 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -41,6 +41,33 @@ deps = -r{toxinidir}/requirements.txt commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof +[testenv:cover] +# Technique based heavily upon +# https://github.com/openstack/nova/blob/master/tox.ini +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + ostestr {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + */charmhelpers/* + [testenv:venv] basepython = python3 commands = {posargs} From af45fdd2198212983c67d1de20f9c18767952570 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Fri, 1 Mar 2019 11:35:39 +0100 Subject: [PATCH 1683/2699] Update readme to clarify charm intent Change-Id: Ia7afa9d8099c55df5ed2ceeea3ed0fa87aa895e3 --- ceph-fs/src/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-fs/src/README.md b/ceph-fs/src/README.md index 6eacef60..161651ab 100644 --- a/ceph-fs/src/README.md +++ b/ceph-fs/src/README.md @@ -1,5 +1,8 @@ # CephFS Charm +This charm exists to provide an example integration of CephFS, for the purpose +of test and reference. It is not intended for production use at this time. + # Overview Ceph is a distributed storage and network file system designed to provide From 510755d52da94ef6933735f251d00853f610815a Mon Sep 17 00:00:00 2001 From: Paul Goins Date: Fri, 1 Mar 2019 10:39:46 +0100 Subject: [PATCH 1684/2699] Added tox environment for gathering coverage This technique was borrowed from the tox "cover" environment in openstack/nova's tox.ini. This leverages the fact that stestr lets you override the python executable via the PYTHON environment variable. Doing this allows us to easily generate coverage for our unit tests. An important caveat is that this does not provide any coverage for tests via zaza, amulet, etc. It is purely focused on the unit tests. Note that this replaces the previous .coveragerc; coverage configuration is instead pulled from tox.ini. Change-Id: Idedc9707c2670825e2307ffacf40496d52f03d13 --- ceph-mon/.coveragerc | 7 ------- ceph-mon/.zuul.yaml | 1 + ceph-mon/tox.ini | 28 ++++++++++++++++++++++++++++ 3 files changed, 29 insertions(+), 7 deletions(-) delete mode 100644 ceph-mon/.coveragerc diff --git a/ceph-mon/.coveragerc b/ceph-mon/.coveragerc deleted file mode 100644 index 7f7b5be3..00000000 --- a/ceph-mon/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[report] -# Regexes for lines to exclude from consideration -exclude_lines = - if __name__ == .__main__.: -include= - hooks/hooks.py - hooks/ceph*.py diff --git a/ceph-mon/.zuul.yaml b/ceph-mon/.zuul.yaml index 7051aeeb..dc276615 100644 --- a/ceph-mon/.zuul.yaml +++ b/ceph-mon/.zuul.yaml @@ -1,3 +1,4 @@ - project: templates: - python35-charm-jobs + - openstack-cover-jobs diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 965952a1..8acbe61a 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -46,6 +46,34 @@ deps = -r{toxinidir}/requirements.txt commands = flake8 {posargs} hooks unit_tests tests actions lib charm-proof +[testenv:cover] +# Technique based heavily upon +# https://github.com/openstack/nova/blob/master/tox.ini +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + ostestr {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + */charmhelpers/* + unit_tests/* + [testenv:venv] basepython = python3 commands = {posargs} From 2087788b38ef1de30b9cc97fc690af89118f982e Mon Sep 17 00:00:00 2001 From: Paul Goins Date: Fri, 1 Mar 2019 14:50:45 +0100 Subject: [PATCH 1685/2699] Excluding unit_tests from coverage Unit tests generally will all be executed, and measuring them will skew the overall coverage numbers. Thus, I'm excluding the unit_tests folder. Change-Id: Ie3e2e8fc61a941470fb23d5cff4922bf7e3f1591 --- ceph-osd/tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 6f87dc37..b2ac067b 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -67,6 +67,7 @@ source = omit = .tox/* */charmhelpers/* + unit_tests/* [testenv:venv] basepython = python3 From 4312facf1e92518b5491d81c07d19545d23d3e81 Mon Sep 17 00:00:00 2001 From: Paul Goins Date: Fri, 1 Mar 2019 14:50:53 +0100 Subject: [PATCH 1686/2699] Excluding unit_tests from coverage Unit tests generally will all be executed, and measuring them will skew the overall coverage numbers. Thus, I'm excluding the unit_tests folder. Change-Id: I5ce51441f78624bd93ca18aa5901c555ca8a39ac --- ceph-fs/tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 1946992e..8cb1c96f 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -73,6 +73,7 @@ source = omit = .tox/* */charmhelpers/* + unit_tests/* [testenv:venv] basepython = python3 From 7cf07ff73e5c75b3e93c2c48c562a849734059ea Mon Sep 17 00:00:00 2001 From: Andre Ruiz Date: Tue, 26 Feb 2019 17:13:40 +0100 Subject: [PATCH 1687/2699] Implement new option to enable discard on SSDs This change implements a new option called 'bdev-enable-discard' to control behaviour of issuing discards to SSDs on ceph bluestore. The new code tries to autodetect cases where it should be enabled by default but will allow forcing if desired. Change-Id: I7b83605c827eb4058bc4b46c92eb114c11108c93 Closes-Bug: #1788433 --- ceph-osd/config.yaml | 9 +++++ ceph-osd/hooks/ceph_hooks.py | 8 ++++ ceph-osd/hooks/utils.py | 33 ++++++++++++++++ ceph-osd/lib/ceph/utils.py | 2 +- ceph-osd/templates/ceph.conf | 2 + ceph-osd/unit_tests/test_ceph_hooks.py | 8 ++++ ceph-osd/unit_tests/test_ceph_utils.py | 54 ++++++++++++++++++++++++++ 7 files changed, 115 insertions(+), 1 deletion(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 1220334b..6fe04ffe 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -63,6 +63,15 @@ options: . For ceph >= 0.56.6 these can also be directories instead of devices - the charm assumes anything not starting with /dev is a directory instead. + bdev-enable-discard: + type: string + default: auto + description: | + Enables async discard on devices. This option will enable/disable both + bdev-enable-discard and bdev-async-discard options in ceph configuration + at the same time. The default value "auto" will try to autodetect and + should work in most cases. If you need to force a behaviour you can + set it to "enable" or "disable". Only applies for Ceph Mimic or later. osd-journal: type: string default: diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index c6b925fd..187c6c56 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -80,6 +80,7 @@ get_cluster_addr, get_blacklist, get_journal_devices, + should_enable_discard, ) from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( @@ -388,6 +389,13 @@ def get_ceph_context(upgrading=False): 'bluestore_block_db_size': config('bluestore-block-db-size'), } + if config('bdev-enable-discard').lower() == 'enabled': + cephcontext['bdev_discard'] = True + elif config('bdev-enable-discard').lower() == 'auto': + cephcontext['bdev_discard'] = should_enable_discard(get_devices()) + else: + cephcontext['bdev_discard'] = False + if config('prefer-ipv6'): dynamic_ipv6_address = get_ipv6_addr()[0] if not public_network: diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index b773e2d1..3064f7a5 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -15,6 +15,7 @@ import re import os import socket +import subprocess from charmhelpers.core.hookenv import ( unit_get, @@ -23,6 +24,7 @@ network_get_primary_address, log, DEBUG, + WARNING, status_set, storage_get, storage_list, @@ -194,3 +196,34 @@ def get_journal_devices(): _blacklist = get_blacklist() return set(device for device in devices if device not in _blacklist and os.path.exists(device)) + + +def should_enable_discard(devices): + """ + Tries to autodetect if we can enable discard on devices and if that + discard can be asynchronous. We want to enable both options if there's + any SSDs unless any of them are using SATA <= 3.0, in which case + discard is supported but is a blocking operation. + """ + discard_enable = True + for device in devices: + # whitelist some devices that do not need checking + if (device.startswith("/dev/nvme") or + device.startswith("/dev/vd")): + continue + if (device.startswith("/dev/") and + os.path.exists(device) and + is_sata30orless(device)): + discard_enable = False + log("SSD Discard autodetection: {} is forcing discard off" + "(sata <= 3.0)".format(device), level=WARNING) + return discard_enable + + +def is_sata30orless(device): + result = subprocess.check_output(["/usr/sbin/smartctl", "-i", device]) + print(result) + for line in str(result).split("\\n"): + if re.match("SATA Version is: *SATA (1\.|2\.|3\.0)", str(line)): + return True + return False diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 83388c9b..dac98c99 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -82,7 +82,7 @@ PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', 'radosgw', 'xfsprogs', - 'lvm2', 'parted'] + 'lvm2', 'parted', 'smartmontools'] CEPH_KEY_MANAGER = 'ceph' VAULT_KEY_MANAGER = 'vault' diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 77fce613..2682be01 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -75,6 +75,8 @@ osd journal size = {{ osd_journal_size }} filestore xattr use omap = true journal dio = {{ dio }} {%- endif %} +bdev enable discard = {{ bdev_discard }} +bdev async discard = {{ bdev_discard }} {%- if short_object_len %} osd max object name len = 256 diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 23e9d851..b55b87ae 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -37,6 +37,8 @@ 'customize-failure-domain': False, 'bluestore': False, 'crush-initial-weight': '0', + 'bdev-enable-discard': 'enabled', + 'osd-devices': '/dev/vdb', 'bluestore': False, 'bluestore-block-wal-size': 0, 'bluestore-block-db-size': 0, @@ -84,6 +86,7 @@ def test_get_ceph_context(self, mock_config, mock_config2): 'short_object_len': True, 'upgrade_in_progress': False, 'use_syslog': 'true', + 'bdev_discard': True, 'bluestore': False, 'bluestore_experimental': False, 'bluestore_block_wal_size': 0, @@ -123,6 +126,7 @@ def test_get_ceph_context_filestore_old(self, mock_config, mock_config2): 'short_object_len': True, 'upgrade_in_progress': False, 'use_syslog': 'true', + 'bdev_discard': True, 'bluestore': False, 'bluestore_experimental': True, 'bluestore_block_wal_size': 0, @@ -168,6 +172,7 @@ def test_get_ceph_context_bluestore(self, mock_config, mock_config2): 'short_object_len': True, 'upgrade_in_progress': False, 'use_syslog': 'true', + 'bdev_discard': True, 'bluestore': True, 'bluestore_experimental': False, 'bluestore_block_wal_size': BLUESTORE_WAL_TEST_SIZE, @@ -210,6 +215,7 @@ def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2): 'short_object_len': True, 'upgrade_in_progress': False, 'use_syslog': 'true', + 'bdev_discard': True, 'bluestore': True, 'bluestore_experimental': True, 'bluestore_block_wal_size': BLUESTORE_WAL_TEST_SIZE, @@ -250,6 +256,7 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): 'short_object_len': True, 'upgrade_in_progress': False, 'use_syslog': 'true', + 'bdev_discard': True, 'bluestore': False, 'bluestore_experimental': False, 'bluestore_block_wal_size': 0, @@ -292,6 +299,7 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'short_object_len': True, 'upgrade_in_progress': False, 'use_syslog': 'true', + 'bdev_discard': True, 'bluestore': False, 'bluestore_experimental': False, 'bluestore_block_wal_size': 0, diff --git a/ceph-osd/unit_tests/test_ceph_utils.py b/ceph-osd/unit_tests/test_ceph_utils.py index f58ae070..88dfefe4 100644 --- a/ceph-osd/unit_tests/test_ceph_utils.py +++ b/ceph-osd/unit_tests/test_ceph_utils.py @@ -61,3 +61,57 @@ def test_get_journal_devices_blacklist(self, mock_config, mock_os_path_exists.assert_called() mock_get_blacklist.assert_called() self.assertEqual(devices, set(['/dev/vdb'])) + + @patch('os.path.exists') + @patch.object(utils, 'is_sata30orless') + def test_should_enable_discard_yes(self, mock_is_sata30orless, + mock_os_path_exists): + devices = ['/dev/sda', '/dev/vda', '/dev/nvme0n1'] + mock_os_path_exists.return_value = True + mock_is_sata30orless.return_value = False + ret = utils.should_enable_discard(devices) + mock_os_path_exists.assert_called() + mock_is_sata30orless.assert_called() + self.assertEqual(ret, True) + + @patch('os.path.exists') + @patch.object(utils, 'is_sata30orless') + def test_should_enable_discard_no(self, mock_is_sata30orless, + mock_os_path_exists): + devices = ['/dev/sda', '/dev/vda', '/dev/nvme0n1'] + mock_os_path_exists.return_value = True + mock_is_sata30orless.return_value = True + ret = utils.should_enable_discard(devices) + mock_os_path_exists.assert_called() + mock_is_sata30orless.assert_called() + self.assertEqual(ret, False) + + @patch('subprocess.check_output') + def test_is_sata30orless_sata31(self, mock_subprocess_check_output): + extcmd_output = (b'supressed text\nSATA Version is: ' + b'SATA 3.1, 6.0 Gb/s (current: 6.0 Gb/s)\n' + b'supressed text\n\n') + mock_subprocess_check_output.return_value = extcmd_output + ret = utils.is_sata30orless('/dev/sda') + mock_subprocess_check_output.assert_called() + self.assertEqual(ret, False) + + @patch('subprocess.check_output') + def test_is_sata30orless_sata30(self, mock_subprocess_check_output): + extcmd_output = (b'supressed text\nSATA Version is: ' + b'SATA 3.0, 6.0 Gb/s (current: 6.0 Gb/s)\n' + b'supressed text\n\n') + mock_subprocess_check_output.return_value = extcmd_output + ret = utils.is_sata30orless('/dev/sda') + mock_subprocess_check_output.assert_called() + self.assertEqual(ret, True) + + @patch('subprocess.check_output') + def test_is_sata30orless_sata26(self, mock_subprocess_check_output): + extcmd_output = (b'supressed text\nSATA Version is: ' + b'SATA 2.6, 3.0 Gb/s (current: 3.0 Gb/s)\n' + b'supressed text\n\n') + mock_subprocess_check_output.return_value = extcmd_output + ret = utils.is_sata30orless('/dev/sda') + mock_subprocess_check_output.assert_called() + self.assertEqual(ret, True) From 4c06095bf1f89b40721326240ffce1ff33f4da02 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Thu, 28 Feb 2019 00:07:26 +0100 Subject: [PATCH 1688/2699] Fix handling of recursion in notify_rbd_mirrors The committed iteration had a problem: In the event the ``ceph-rbd-mirror`` charm created a new pool on a remote cluster and set the relation data on the ceph leader unit, members of other instances of the ``rbd-mirror`` relation would not be updated with pool information. Also limit number of times notify_mons() is run Change-Id: I2a03ca02285e7a99c2cae48dbafc014fb478fb84 --- ceph-mon/hooks/ceph_hooks.py | 28 +++++++++++++++----------- ceph-mon/unit_tests/test_ceph_hooks.py | 14 ++++++++++--- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 5e9b00d7..3447e450 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -39,7 +39,6 @@ is_relation_made, relation_get, relation_set, - relation_type, leader_set, leader_get, is_leader, remote_unit, @@ -475,7 +474,7 @@ def notify_radosgws(): def notify_rbd_mirrors(): for relid in relation_ids('rbd-mirror'): for unit in related_units(relid): - rbd_mirror_relation(relid=relid, unit=unit) + rbd_mirror_relation(relid=relid, unit=unit, recurse=False) def notify_client(): @@ -509,7 +508,8 @@ def notify_mons(): relation_settings={'nonce': nonce}) -def handle_broker_request(relid, unit, add_legacy_response=False): +def handle_broker_request(relid, unit, add_legacy_response=False, + recurse=True): """Retrieve broker request from relation, process, return response data. :param relid: Realtion ID @@ -519,6 +519,10 @@ def handle_broker_request(relid, unit, add_legacy_response=False): :param add_legacy_response: (Optional) Adds the legacy ``broker_rsp`` key to the response in addition to the new way. :type add_legacy_response: bool + :param recurse: Whether we should call out to update relation functions or + not. Mainly used to handle recursion when called from + notify_rbd_mirrors() + :type recurse: bool :returns: Dictionary of response data ready for use with relation_set. :rtype: dict """ @@ -537,19 +541,19 @@ def handle_broker_request(relid, unit, add_legacy_response=False): if add_legacy_response: response.update({'broker_rsp': rsp}) - # prevent recursion when called from rbd_mirror_relation() - if relation_type() != 'rbd-mirror': + if relation_ids('rbd-mirror') and recurse: # update ``rbd-mirror`` relations for this unit with # information about new pools. log('Notifying this units rbd-mirror relations after ' 'processing broker request.', level=DEBUG) notify_rbd_mirrors() - # notify mons to flag that the other mon units should update - # their ``rbd-mirror`` relations with information about new pools. - log('Notifying peers after processing broker request.', - level=DEBUG) - notify_mons() + # notify mons to flag that the other mon units should update + # their ``rbd-mirror`` relations with information about new + # pools. + log('Notifying peers after processing broker request.', + level=DEBUG) + notify_mons() return response @@ -672,14 +676,14 @@ def radosgw_relation(relid=None, unit=None): @hooks.hook('rbd-mirror-relation-joined') @hooks.hook('rbd-mirror-relation-changed') -def rbd_mirror_relation(relid=None, unit=None): +def rbd_mirror_relation(relid=None, unit=None, recurse=True): if ready_for_service(): log('mon cluster in quorum and osds bootstrapped ' '- providing rbd-mirror client with keys') if not unit: unit = remote_unit() # handle broker requests first to get a updated pool map - data = (handle_broker_request(relid, unit)) + data = (handle_broker_request(relid, unit, recurse=recurse)) data.update({ 'auth': config('auth-supported'), 'ceph-public-address': get_public_addr(), diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index a3dc1a93..8a1404f6 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -261,7 +261,8 @@ def test_notify_rbd_mirrors(self, mock_relation_ids, mock_related_units, mock_relation_ids.assert_called_once_with('rbd-mirror') mock_related_units.assert_called_once_with('arelid') mock_rbd_mirror_relation.assert_called_once_with(relid='arelid', - unit='aunit') + unit='aunit', + recurse=False) @patch.object(ceph_hooks, 'uuid') @patch.object(ceph_hooks, 'relation_set') @@ -360,6 +361,7 @@ def test_client_relation_changed_non_rel_hook(self, relation_set, 'broker-rsp-glance-0': 'AOK', 'broker_rsp': 'AOK'}) + @patch.object(ceph_hooks, 'relation_ids') @patch.object(ceph_hooks, 'notify_mons') @patch.object(ceph_hooks, 'notify_rbd_mirrors') @patch.object(ceph_hooks, 'process_requests') @@ -370,7 +372,8 @@ def test_handle_broker_request(self, mock_remote_unit, mock_relation_get, mock_ceph_is_leader, mock_broker_process_requests, mock_notify_rbd_mirrors, - mock_notify_mons): + mock_notify_mons, + mock_relation_ids): mock_remote_unit.return_value = 'glance/0' ceph_hooks.handle_broker_request('rel1', None) mock_remote_unit.assert_called_once_with() @@ -388,6 +391,11 @@ def test_handle_broker_request(self, mock_remote_unit, mock_relation_get, ceph_hooks.handle_broker_request('rel1', 'glance/0', add_legacy_response=True), {'broker_rsp': 'AOK', 'broker-rsp-glance-0': 'AOK'}) + mock_notify_rbd_mirrors.reset_mock() + mock_notify_mons.reset_mock() + ceph_hooks.handle_broker_request('rel1', None, recurse=False) + self.assertFalse(mock_notify_rbd_mirrors.called) + self.assertFalse(mock_notify_mons.called) class BootstrapSourceTestCase(test_utils.CharmTestCase): @@ -615,7 +623,7 @@ def test_rbd_mirror_relation(self): } ceph_hooks.rbd_mirror_relation('rbd-mirror:51', 'ceph-rbd-mirror/0') self.handle_broker_request.assert_called_with( - 'rbd-mirror:51', 'ceph-rbd-mirror/0') + 'rbd-mirror:51', 'ceph-rbd-mirror/0', recurse=True) self.relation_set.assert_called_with( relation_id='rbd-mirror:51', relation_settings=base_relation_settings) From 8974fcd6022a792b0c6b731d2d46fe5e65526a09 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 26 Feb 2019 17:13:58 +0100 Subject: [PATCH 1689/2699] Add setting ``max_objects`` quota and retrieving erasure code profile Also fix a few missing redirects of STDERR. Depends-On: I243328af5db202f092c53b4d48e0d293378d2ca0 Change-Id: Ia204468ecf5de6bb4d74ec9fcb68393d5b18b4f1 --- ceph-mon/lib/ceph/broker.py | 16 +++++++++------ ceph-mon/lib/ceph/utils.py | 40 +++++++++++++++++++++++++++++++------ 2 files changed, 44 insertions(+), 12 deletions(-) diff --git a/ceph-mon/lib/ceph/broker.py b/ceph-mon/lib/ceph/broker.py index 0d5a7e80..3226f4cc 100644 --- a/ceph-mon/lib/ceph/broker.py +++ b/ceph-mon/lib/ceph/broker.py @@ -369,7 +369,8 @@ def handle_erasure_pool(request, service): """ pool_name = request.get('name') erasure_profile = request.get('erasure-profile') - quota = request.get('max-bytes') + max_bytes = request.get('max-bytes') + max_objects = request.get('max-objects') weight = request.get('weight') group_name = request.get('group') @@ -409,8 +410,9 @@ def handle_erasure_pool(request, service): pool.create() # Set a quota if requested - if quota is not None: - set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + if max_bytes or max_objects: + set_pool_quota(service=service, pool_name=pool_name, + max_bytes=max_bytes, max_objects=max_objects) def handle_replicated_pool(request, service): @@ -422,7 +424,8 @@ def handle_replicated_pool(request, service): """ pool_name = request.get('name') replicas = request.get('replicas') - quota = request.get('max-bytes') + max_bytes = request.get('max-bytes') + max_objects = request.get('max-objects') weight = request.get('weight') group_name = request.get('group') @@ -469,8 +472,9 @@ def handle_replicated_pool(request, service): level=DEBUG) # Set a quota if requested - if quota is not None: - set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + if max_bytes or max_objects: + set_pool_quota(service=service, pool_name=pool_name, + max_bytes=max_bytes, max_objects=max_objects) def handle_create_cache_tier(request, service): diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 1a090403..b4f87907 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -2533,7 +2533,8 @@ def list_pools(client='admin'): try: pool_list = [] pools = subprocess.check_output(['rados', '--id', client, 'lspools'], - universal_newlines=True) + universal_newlines=True, + stderr=subprocess.STDOUT) for pool in pools.splitlines(): pool_list.append(pool) return pool_list @@ -2558,8 +2559,8 @@ def get_pool_param(pool, param, client='admin'): """ try: output = subprocess.check_output( - ['ceph', '--id', client, 'osd', 'pool', 'get', - pool, param], universal_newlines=True) + ['ceph', '--id', client, 'osd', 'pool', 'get', pool, param], + universal_newlines=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as cp: if cp.returncode == 2 and 'ENOENT: option' in cp.output: return None @@ -2568,6 +2569,27 @@ def get_pool_param(pool, param, client='admin'): return output.split(':')[1].lstrip().rstrip() +def get_pool_erasure_profile(pool, client='admin'): + """Get erasure code profile for pool. + + :param pool: Name of pool to get variable from + :type pool: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Erasure code profile of pool or None + :rtype: str or None + :raises: subprocess.CalledProcessError + """ + try: + return get_pool_param(pool, 'erasure_code_profile', client=client) + except subprocess.CalledProcessError as cp: + if cp.returncode == 13 and 'EACCES: pool' in cp.output: + # Not a Erasure coded pool + return None + raise + + def get_pool_quota(pool, client='admin'): """Get pool quota. @@ -2582,7 +2604,7 @@ def get_pool_quota(pool, client='admin'): """ output = subprocess.check_output( ['ceph', '--id', client, 'osd', 'pool', 'get-quota', pool], - universal_newlines=True) + universal_newlines=True, stderr=subprocess.STDOUT) rc = re.compile(r'\s+max\s+(\S+)\s*:\s+(\d+)') result = {} for line in output.splitlines(): @@ -2610,7 +2632,9 @@ def get_pool_applications(pool='', client='admin'): if pool: cmd.append(pool) try: - output = subprocess.check_output(cmd, universal_newlines=True) + output = subprocess.check_output(cmd, + universal_newlines=True, + stderr=subprocess.STDOUT) except subprocess.CalledProcessError as cp: if cp.returncode == 2 and 'ENOENT' in cp.output: return {} @@ -2623,7 +2647,7 @@ def list_pools_detail(): Structure: {'pool_name_1': {'applications': {'application': {}}, - 'parameters': {'pg_num': 42, 'size': 42}, + 'parameters': {'pg_num': '42', 'size': '42'}, 'quota': {'max_bytes': '1000', 'max_objects': '10'}, }, @@ -2646,6 +2670,10 @@ def list_pools_detail(): for param in get_params: result[pool]['parameters'].update({ param: get_pool_param(pool, param)}) + erasure_profile = get_pool_erasure_profile(pool) + if erasure_profile: + result[pool]['parameters'].update({ + 'erasure_code_profile': erasure_profile}) return result From 2e4192067010b1dbf314c75263e74f060d91c62b Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 1 Mar 2019 12:33:53 +0100 Subject: [PATCH 1690/2699] Fold ``client-relation-(changed|joined)`` into one function Main driver for this is the ability to change ``rbd-features`` post-deploy. The ability to do that is a requirement for enabling the use of ``rbd-mirror`` features on an existing cluster. Change-Id: I9024ae59029b7afd01a9a6a064756465f5cea6c4 --- ceph-mon/hooks/ceph_hooks.py | 51 +++++---------- ceph-mon/unit_tests/test_ceph_hooks.py | 89 +++++++++++++++++++++----- 2 files changed, 90 insertions(+), 50 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 3447e450..89d92504 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -479,9 +479,8 @@ def notify_rbd_mirrors(): def notify_client(): for relid in relation_ids('client'): - client_relation_joined(relid) for unit in related_units(relid): - client_relation_changed(relid, unit) + client_relation(relid, unit) for relid in relation_ids('admin'): admin_relation_joined(relid) for relid in relation_ids('mds'): @@ -744,41 +743,25 @@ def admin_relation_joined(relid=None): relation_settings=data) -@hooks.hook('client-relation-joined') -def client_relation_joined(relid=None): - if ready_for_service(): - log('mon cluster in quorum and osds bootstrapped ' - '- providing client with keys') - service_name = None - if relid is None: - units = [remote_unit()] - service_name = units[0].split('/')[0] - else: - units = related_units(relid) - if len(units) > 0: - service_name = units[0].split('/')[0] - - if service_name is not None: - public_addr = get_public_addr() - data = {'key': ceph.get_named_key(service_name), - 'auth': config('auth-supported'), - 'ceph-public-address': public_addr} - if config('default-rbd-features'): - data['rbd-features'] = config('default-rbd-features') - relation_set(relation_id=relid, - relation_settings=data) - - @hooks.hook('client-relation-changed') -def client_relation_changed(relid=None, unit=None): - """Process broker requests from ceph client relations.""" +@hooks.hook('client-relation-joined') +def client_relation(relid=None, unit=None): if ready_for_service(): log('mon cluster in quorum and osds bootstrapped ' - '- processing client broker requests') - data = handle_broker_request(relid, unit, add_legacy_response=True) - if len(data): - relation_set(relation_id=relid, - relation_settings=data) + '- providing client with keys, processing broker requests') + service_name = hookenv.remote_service_name() + public_addr = get_public_addr() + data = {'key': ceph.get_named_key(service_name), + 'auth': config('auth-supported'), + 'ceph-public-address': public_addr} + if config('default-rbd-features'): + data['rbd-features'] = config('default-rbd-features') + if not unit: + unit = remote_unit() + data.update( + handle_broker_request(relid, unit, add_legacy_response=True)) + relation_set(relation_id=relid, + relation_settings=data) @hooks.hook('upgrade-charm.real') diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 8a1404f6..d95d12c9 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -226,13 +226,11 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies( @patch.object(ceph_hooks, 'mds_relation_joined') @patch.object(ceph_hooks, 'admin_relation_joined') - @patch.object(ceph_hooks, 'client_relation_changed') - @patch.object(ceph_hooks, 'client_relation_joined') + @patch.object(ceph_hooks, 'client_relation') @patch.object(ceph_hooks, 'related_units') @patch.object(ceph_hooks, 'relation_ids') def test_notify_client(self, mock_relation_ids, mock_related_units, - mock_client_relation_joined, - mock_client_relation_changed, + mock_client_relation, mock_admin_relation_joined, mock_mds_relation_joined): mock_relation_ids.return_value = ['arelid'] @@ -244,8 +242,7 @@ def test_notify_client(self, mock_relation_ids, mock_related_units, call('mds'), ]) mock_related_units.assert_called_with('arelid') - mock_client_relation_joined.assert_called_once_with('arelid') - mock_client_relation_changed.assert_called_once_with('arelid', 'aunit') + mock_client_relation.assert_called_once_with('arelid', 'aunit') mock_admin_relation_joined.assert_called_once_with('arelid') mock_mds_relation_joined.assert_called_once_with(relid='arelid', unit='aunit') @@ -323,6 +320,54 @@ def test_related_osd_multi_relation(self, call('osd:23') ]) + @patch.object(ceph_hooks, 'relation_set') + @patch.object(ceph_hooks, 'handle_broker_request') + @patch.object(ceph_hooks, 'config') + @patch.object(ceph_hooks.ceph, 'get_named_key') + @patch.object(ceph_hooks, 'get_public_addr') + @patch.object(ceph_hooks.hookenv, 'remote_service_name') + @patch.object(ceph_hooks, 'ready_for_service') + def test_client_relation(self, + _ready_for_service, + _remote_service_name, + _get_public_addr, + _get_named_key, + _config, + _handle_broker_request, + _relation_set): + _remote_service_name.return_value = 'glance' + config = copy.deepcopy(CHARM_CONFIG) + _config.side_effect = lambda key: config[key] + _handle_broker_request.return_value = {} + ceph_hooks.client_relation(relid='rel1', unit='glance/0') + _ready_for_service.assert_called_once_with() + _get_public_addr.assert_called_once_with() + _get_named_key.assert_called_once_with('glance') + _handle_broker_request.assert_called_once_with( + 'rel1', 'glance/0', add_legacy_response=True) + _relation_set.assert_called_once_with( + relation_id='rel1', + relation_settings={ + 'key': _get_named_key(), + 'auth': False, + 'ceph-public-address': _get_public_addr() + }) + config.update({'default-rbd-features': 42}) + _relation_set.reset_mock() + ceph_hooks.client_relation(relid='rel1', unit='glance/0') + _relation_set.assert_called_once_with( + relation_id='rel1', + relation_settings={ + 'key': _get_named_key(), + 'auth': False, + 'ceph-public-address': _get_public_addr(), + 'rbd-features': 42, + }) + + @patch.object(ceph_hooks, 'config') + @patch.object(ceph_hooks.ceph, 'get_named_key') + @patch.object(ceph_hooks, 'get_public_addr') + @patch.object(ceph_hooks.hookenv, 'remote_service_name') @patch.object(ceph_hooks, 'relation_ids', return_value=[]) @patch.object(ceph_hooks, 'ready_for_service') @patch.object(ceph_hooks.ceph, 'is_quorum') @@ -331,14 +376,18 @@ def test_related_osd_multi_relation(self, @patch.object(ceph_hooks.ceph, 'is_leader') @patch.object(ceph_hooks, 'process_requests') @patch.object(ceph_hooks, 'relation_set') - def test_client_relation_changed_non_rel_hook(self, relation_set, - process_requests, - is_leader, - relation_get, - remote_unit, - is_quorum, - ready_for_service, - relation_ids): + def test_client_relation_non_rel_hook(self, relation_set, + process_requests, + is_leader, + relation_get, + remote_unit, + is_quorum, + ready_for_service, + relation_ids, + remote_service_name, + get_public_addr, + get_named_key, + _config): # Check for LP #1738154 ready_for_service.return_value = True process_requests.return_value = 'AOK' @@ -346,18 +395,26 @@ def test_client_relation_changed_non_rel_hook(self, relation_set, relation_get.return_value = {'broker_req': 'req'} remote_unit.return_value = None is_quorum.return_value = True - ceph_hooks.client_relation_changed(relid='rel1', unit='glance/0') + config = copy.deepcopy(CHARM_CONFIG) + _config.side_effect = lambda key: config[key] + ceph_hooks.client_relation(relid='rel1', unit='glance/0') relation_set.assert_called_once_with( relation_id='rel1', relation_settings={ + 'key': get_named_key(), + 'auth': False, + 'ceph-public-address': get_public_addr(), 'broker-rsp-glance-0': 'AOK', 'broker_rsp': 'AOK'}) relation_set.reset_mock() remote_unit.return_value = 'glance/0' - ceph_hooks.client_relation_changed() + ceph_hooks.client_relation() relation_set.assert_called_once_with( relation_id=None, relation_settings={ + 'key': get_named_key(), + 'auth': False, + 'ceph-public-address': get_public_addr(), 'broker-rsp-glance-0': 'AOK', 'broker_rsp': 'AOK'}) From dc06275c02b2af51de57e5f87b4b36cb5e7344a6 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 1 Mar 2019 12:47:01 +0100 Subject: [PATCH 1691/2699] Run ``ceph-client`` relations on ``config-changed`` Change-Id: I71c6d21474238760a74b8724911834abb518f895 --- ceph-mon/hooks/ceph_hooks.py | 3 +++ ceph-mon/unit_tests/test_ceph_hooks.py | 37 ++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 89d92504..43647703 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -284,6 +284,9 @@ def config_changed(): status_set('maintenance', 'Bootstrapping single Ceph MGR') ceph.bootstrap_manager() + # Update client relations + notify_client() + def get_mon_hosts(): hosts = [] diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index d95d12c9..6173e2cd 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -513,6 +513,43 @@ def test_bootstrap_source_different_fsid_secret(self): self.assertRaises(AssertionError, ceph_hooks.bootstrap_source_relation_changed) + @patch.object(ceph_hooks, 'notify_client') + @patch.object(ceph_hooks.ceph, 'is_bootstrapped') + @patch.object(ceph_hooks, 'emit_cephconf') + @patch.object(ceph_hooks, 'leader_get') + @patch.object(ceph_hooks, 'is_leader') + @patch.object(ceph_hooks, 'relations_of_type') + @patch.object(ceph_hooks, 'get_mon_hosts') + @patch.object(ceph_hooks, 'check_for_upgrade') + @patch.object(ceph_hooks, 'config') + def test_config_changed(self, + _config, + _check_for_upgrade, + _get_mon_hosts, + _relations_of_type, + _is_leader, + _leader_get, + _emit_cephconf, + _is_bootstrapped, + _notify_client): + config = copy.deepcopy(CHARM_CONFIG) + _config.side_effect = \ + lambda key=None: config.get(key, None) if key else config + _relations_of_type.return_value = False + _is_leader.return_value = False + _leader_get.side_effect = ['fsid', 'monsec'] + _is_bootstrapped.return_value = True + ceph_hooks.config_changed() + _check_for_upgrade.assert_called_once_with() + _get_mon_hosts.assert_called_once_with() + _leader_get.assert_has_calls([ + call('fsid'), + call('monitor-secret'), + ]) + _emit_cephconf.assert_called_once_with() + _is_bootstrapped.assert_called_once_with() + _notify_client.assert_called_once_with() + @patch.object(ceph_hooks, 'emit_cephconf') @patch.object(ceph_hooks, 'create_sysctl') @patch.object(ceph_hooks, 'check_for_upgrade') From b1df8d71f03f0af9b40e44205002c410ac95ded1 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 1 Mar 2019 22:21:08 +0100 Subject: [PATCH 1692/2699] Enable image features when ``rbd-mirror`` relation present The RBD Mirroring feature requires ``journaling`` and ``exclusive-lock`` image features to be enabled. Set the appropriate value so new images get these features automatically. Change-Id: Ie36c23b27fb7238814993756cb1d72e86309ab02 --- ceph-mon/config.yaml | 26 +++++++--- ceph-mon/hooks/ceph_hooks.py | 10 ++-- ceph-mon/hooks/utils.py | 70 +++++++++++++++++++++++--- ceph-mon/unit_tests/test_ceph_hooks.py | 17 +++++-- ceph-mon/unit_tests/test_ceph_utils.py | 65 ++++++++++++++++++++++++ 5 files changed, 164 insertions(+), 24 deletions(-) create mode 100644 ceph-mon/unit_tests/test_ceph_utils.py diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 7881fdf1..fa40f7e1 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -212,14 +212,24 @@ options: type: int default: description: | - Restrict the rbd features used to the specified level. If set, this will - inform clients that they should set the config value `rbd default - features`, for example: - . - rbd default features = 1 - . - This needs to be set to 1 when deploying a cloud with the nova-lxd - hypervisor. + Default RBD Features to use when creating new images. The value of this + configuration option will be shared with consumers of the ``ceph-client`` + interface and client charms may choose to add this to the Ceph + configuration file on the units they manage. + + Example: + + rbd default features = 1 + + NOTE: If you have clients using the kernel RBD driver you must set this + configuration option to a value corrensponding to the features the driver + in your kernel supports. The kernel RBD driver tends to be multiple + cycles behind the userspace driver available for libvirt/qemu. Nova LXD + is among the clients depending on the kernel RBD driver. + + NOTE: If you want to use the RBD Mirroring feature you must either let + this configuration option be the default or make sure the value you set + includes the ``exclusive-lock`` and ``journaling`` features. no-bootstrap: type: boolean default: False diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 43647703..355e01b3 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -81,10 +81,11 @@ from charmhelpers.contrib.storage.linux.ceph import ( CephConfContext) from utils import ( + assert_charm_supports_ipv6, + get_cluster_addr, get_networks, get_public_addr, - get_cluster_addr, - assert_charm_supports_ipv6 + get_rbd_features, ) from charmhelpers.contrib.charmsupport import nrpe @@ -184,8 +185,9 @@ def get_ceph_context(): cephcontext['public_addr'] = get_public_addr() cephcontext['cluster_addr'] = get_cluster_addr() - if config('default-rbd-features'): - cephcontext['rbd_features'] = config('default-rbd-features') + rbd_features = get_rbd_features() + if rbd_features: + cephcontext['rbd_features'] = rbd_features if config('disable-pg-max-object-skew'): cephcontext['disable_object_skew'] = config( diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index f4bc81e2..46063d3d 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -12,27 +12,30 @@ # See the License for the specific language governing permissions and # limitations under the License. -import socket import re +import socket +import subprocess + from charmhelpers.core.hookenv import ( - unit_get, + DEBUG, cached, config, - status_set, - network_get_primary_address, + goal_state, log, - DEBUG, + network_get_primary_address, + related_units, + relation_ids, + status_set, + unit_get, ) from charmhelpers.fetch import ( apt_install, filter_installed_packages ) - from charmhelpers.core.host import ( lsb_release, CompareHostReleases, ) - from charmhelpers.contrib.network.ip import ( get_address_in_network, get_ipv6_addr @@ -152,3 +155,56 @@ def assert_charm_supports_ipv6(): if CompareHostReleases(_release) < "trusty": raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") + + +def has_rbd_mirrors(): + """Determine if we have or will have ``rbd-mirror`` charms related. + + :returns: True or False + :rtype: bool + """ + try: + # NOTE(fnordahl): This optimization will not be useful until we get a + # resolution on LP: #1818245 + raise NotImplementedError + gs = goal_state() + return 'rbd-mirror' in gs.get('relations', {}) + except NotImplementedError: + for relid in relation_ids('rbd-mirror'): + if related_units(relid): + return True + + +def get_default_rbd_features(): + """Get default value for ``rbd_default_features``. + + This is retrieved by asking the installed Ceph binary to show its runtime + config when using a empty configuration file. + + :returns: Installed Ceph's Default vaule for ``rbd_default_features`` + :rtype: int + :raises: subprocess.CalledProcessError + """ + output = subprocess.check_output( + ['ceph', '-c', '/dev/null', '--show-config'], + universal_newlines=True) + for line in output.splitlines(): + if 'rbd_default_features' in line: + return int(line.split('=')[1].lstrip().rstrip()) + + +def get_rbd_features(): + """Determine if we should set, and what the rbd default features should be. + + :returns: None or the apropriate value to use + :rtype: Option[int, None] + """ + RBD_FEATURE_EXCLUSIVE_LOCK = 4 + RBD_FEATURE_JOURNALING = 64 + + rbd_feature_config = config('default-rbd-features') + if rbd_feature_config: + return int(rbd_feature_config) + elif has_rbd_mirrors(): + return (get_default_rbd_features() | + RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_JOURNALING) diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 6173e2cd..e7515307 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -57,6 +57,7 @@ class CephHooksTestCase(unittest.TestCase): def setUp(self): super(CephHooksTestCase, self).setUp() + @patch.object(ceph_hooks, 'get_rbd_features', return_value=None) @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) @@ -66,7 +67,8 @@ def setUp(self): @patch.object(ceph_hooks, 'leader_get', lambda *args: '1234') @patch.object(ceph, 'config') @patch.object(ceph_hooks, 'config') - def test_get_ceph_context(self, mock_config, mock_config2): + def test_get_ceph_context(self, mock_config, mock_config2, + _get_rbd_features): config = copy.deepcopy(CHARM_CONFIG) mock_config.side_effect = lambda key: config[key] mock_config2.side_effect = lambda key: config[key] @@ -84,6 +86,7 @@ def test_get_ceph_context(self, mock_config, mock_config2): 'use_syslog': 'true'} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks, 'get_rbd_features', return_value=1) @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', @@ -94,9 +97,9 @@ def test_get_ceph_context(self, mock_config, mock_config2): @patch.object(ceph_hooks, 'leader_get', lambda *args: '1234') @patch.object(ceph, 'config') @patch.object(ceph_hooks, 'config') - def test_get_ceph_context_rbd_features(self, mock_config, mock_config2): + def test_get_ceph_context_rbd_features(self, mock_config, mock_config2, + _get_rbd_features): config = copy.deepcopy(CHARM_CONFIG) - config['default-rbd-features'] = 1 mock_config.side_effect = lambda key: config[key] mock_config2.side_effect = lambda key: config[key] ctxt = ceph_hooks.get_ceph_context() @@ -114,6 +117,7 @@ def test_get_ceph_context_rbd_features(self, mock_config, mock_config2): 'rbd_features': 1} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks, 'get_rbd_features', return_value=None) @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) @@ -123,7 +127,8 @@ def test_get_ceph_context_rbd_features(self, mock_config, mock_config2): @patch.object(ceph_hooks, 'leader_get', lambda *args: '1234') @patch.object(ceph, 'config') @patch.object(ceph_hooks, 'config') - def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): + def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2, + _get_rbd_features): config = copy.deepcopy(CHARM_CONFIG) config['config-flags'] = '{"mon": {"mon sync max retries": 10}}' mock_config.side_effect = lambda key: config[key] @@ -143,6 +148,7 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): 'use_syslog': 'true'} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks, 'get_rbd_features', return_value=None) @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) @@ -153,7 +159,8 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): @patch.object(ceph, 'config') @patch.object(ceph_hooks, 'config') def test_get_ceph_context_w_config_flags_invalid(self, mock_config, - mock_config2): + mock_config2, + _get_rbd_features): config = copy.deepcopy(CHARM_CONFIG) config['config-flags'] = ('{"mon": {"mon sync max retries": 10},' '"foo": "bar"}') diff --git a/ceph-mon/unit_tests/test_ceph_utils.py b/ceph-mon/unit_tests/test_ceph_utils.py new file mode 100644 index 00000000..23013afd --- /dev/null +++ b/ceph-mon/unit_tests/test_ceph_utils.py @@ -0,0 +1,65 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +import test_utils + +from hooks import utils + + +class CephUtilsTestCase(test_utils.CharmTestCase): + + def setUp(self): + super().setUp() + + @mock.patch.object(utils, 'related_units') + @mock.patch.object(utils, 'relation_ids') + def test_has_rbd_mirrors(self, _relation_ids, _related_units): + # NOTE(fnordahl): This optimization will not be useful until we get a + # resolution on LP: #1818245 + # _goal_state.return_value = {'relations': {'rbd-mirror': None}} + # self.assertTrue(utils.has_rbd_mirrors()) + # _goal_state.assert_called_once_with() + # _goal_state.side_effect = NotImplementedError + _relation_ids.return_value = ['arelid'] + _related_units.return_value = ['aunit/0'] + self.assertTrue(utils.has_rbd_mirrors()) + _relation_ids.assert_called_once_with('rbd-mirror') + _related_units.assert_called_once_with('arelid') + + @mock.patch.object(utils.subprocess, 'check_output') + def test_get_default_rbd_features(self, _check_output): + _check_output.return_value = ('a = b\nrbd_default_features = 61\n' + 'c = d\n') + self.assertEquals( + utils.get_default_rbd_features(), + 61) + _check_output.assert_called_once_with( + ['ceph', '-c', '/dev/null', '--show-config'], + universal_newlines=True) + + @mock.patch.object(utils, 'get_default_rbd_features') + @mock.patch.object(utils, 'has_rbd_mirrors') + @mock.patch.object(utils, 'config') + def test_get_rbd_features(self, _config, _has_rbd_mirrors, + _get_default_rbd_features): + _config.side_effect = \ + lambda key: {'default-rbd-features': 42}.get(key, None) + self.assertEquals(utils.get_rbd_features(), 42) + _has_rbd_mirrors.return_value = True + _get_default_rbd_features.return_value = 61 + _config.side_effect = lambda key: {}.get(key, None) + self.assertEquals(utils.get_rbd_features(), 125) + _has_rbd_mirrors.return_value = False + self.assertEquals(utils.get_rbd_features(), None) From 9609306ff7898e8340645f61b9d75d4e8b3873f9 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Sat, 2 Mar 2019 14:15:40 +0100 Subject: [PATCH 1693/2699] Block when RBD Mirroring enabled with incorrect RBD features If the ``default-rbd-features`` configuration option is not set the correct feature bitmap will be computed automatically. However, if the user has explicitly set the configuration option we will honour that, but block if it does not contain the required bits for RBD Mirroring. Change-Id: I84ab445780d2208dc87c36b1eb8171b27a992a1e --- ceph-mon/hooks/ceph_hooks.py | 13 +++++++++++++ ceph-mon/hooks/utils.py | 19 ++++++++++++++----- ceph-mon/unit_tests/test_ceph_utils.py | 9 +++++++++ ceph-mon/unit_tests/test_status.py | 25 +++++++++++++++++++++++-- 4 files changed, 59 insertions(+), 7 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 355e01b3..82f88f41 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -81,11 +81,13 @@ from charmhelpers.contrib.storage.linux.ceph import ( CephConfContext) from utils import ( + add_rbd_mirror_features, assert_charm_supports_ipv6, get_cluster_addr, get_networks, get_public_addr, get_rbd_features, + has_rbd_mirrors, ) from charmhelpers.contrib.charmsupport import nrpe @@ -878,6 +880,17 @@ def assess_status(): status_set('waiting', 'Peer units detected, waiting for addresses') return + configured_rbd_features = config('default-rbd-features') + if has_rbd_mirrors() and configured_rbd_features: + if add_rbd_mirror_features( + configured_rbd_features) != configured_rbd_features: + # The configured RBD features bitmap does not contain the features + # required for RBD Mirroring + status_set('blocked', 'Configuration mismatch: RBD Mirroring ' + 'enabled but incorrect value set for ' + '``default-rbd-features``') + return + # active - bootstrapped + quorum status check if ceph.is_bootstrapped() and ceph.is_quorum(): expected_osd_count = config('expected-osd-count') or 3 diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 46063d3d..158d5912 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -193,18 +193,27 @@ def get_default_rbd_features(): return int(line.split('=')[1].lstrip().rstrip()) +def add_rbd_mirror_features(rbd_features): + """Take a RBD Features bitmap and add the features required for Mirroring. + + :param rbd_features: Input bitmap + :type rbd_features: int + :returns: Bitmap bitwise OR'ed with the features required for Mirroring. + :rtype: int + """ + RBD_FEATURE_EXCLUSIVE_LOCK = 4 + RBD_FEATURE_JOURNALING = 64 + return rbd_features | RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_JOURNALING + + def get_rbd_features(): """Determine if we should set, and what the rbd default features should be. :returns: None or the apropriate value to use :rtype: Option[int, None] """ - RBD_FEATURE_EXCLUSIVE_LOCK = 4 - RBD_FEATURE_JOURNALING = 64 - rbd_feature_config = config('default-rbd-features') if rbd_feature_config: return int(rbd_feature_config) elif has_rbd_mirrors(): - return (get_default_rbd_features() | - RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_JOURNALING) + return add_rbd_mirror_features(get_default_rbd_features()) diff --git a/ceph-mon/unit_tests/test_ceph_utils.py b/ceph-mon/unit_tests/test_ceph_utils.py index 23013afd..9765b8b6 100644 --- a/ceph-mon/unit_tests/test_ceph_utils.py +++ b/ceph-mon/unit_tests/test_ceph_utils.py @@ -49,6 +49,15 @@ def test_get_default_rbd_features(self, _check_output): ['ceph', '-c', '/dev/null', '--show-config'], universal_newlines=True) + def test_add_mirror_rbd_features(self): + DEFAULT_FEATURES = 61 + RBD_FEATURE_EXCLUSIVE_LOCK = 4 + RBD_FEATURE_JOURNALING = 64 + COMBINED_FEATURES = (DEFAULT_FEATURES | RBD_FEATURE_EXCLUSIVE_LOCK | + RBD_FEATURE_JOURNALING) + self.assertEqual(utils.add_rbd_mirror_features(DEFAULT_FEATURES), + COMBINED_FEATURES) + @mock.patch.object(utils, 'get_default_rbd_features') @mock.patch.object(utils, 'has_rbd_mirrors') @mock.patch.object(utils, 'config') diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index 3bc38b70..8d0bc105 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -81,30 +81,51 @@ def test_assess_status_peers_incomplete(self, _peer_units): self.status_set.assert_called_with('waiting', mock.ANY) self.application_version_set.assert_called_with('10.2.2') + @mock.patch.object(hooks, 'has_rbd_mirrors') @mock.patch.object(hooks, 'sufficient_osds') @mock.patch.object(hooks, 'get_peer_units') def test_assess_status_peers_complete_active(self, _peer_units, - _sufficient_osds): + _sufficient_osds, + _has_rbd_mirrors): _peer_units.return_value = ENOUGH_PEERS_COMPLETE _sufficient_osds.return_value = True self.ceph.is_bootstrapped.return_value = True self.ceph.is_quorum.return_value = True + _has_rbd_mirrors.return_value = False hooks.assess_status() self.status_set.assert_called_with('active', mock.ANY) self.application_version_set.assert_called_with('10.2.2') + @mock.patch.object(hooks, 'has_rbd_mirrors') @mock.patch.object(hooks, 'sufficient_osds') @mock.patch.object(hooks, 'get_peer_units') def test_assess_status_peers_complete_down(self, _peer_units, - _sufficient_osds): + _sufficient_osds, + _has_rbd_mirrors): _peer_units.return_value = ENOUGH_PEERS_COMPLETE _sufficient_osds.return_value = True self.ceph.is_bootstrapped.return_value = False self.ceph.is_quorum.return_value = False + _has_rbd_mirrors.return_value = False hooks.assess_status() self.status_set.assert_called_with('blocked', mock.ANY) self.application_version_set.assert_called_with('10.2.2') + @mock.patch.object(hooks, 'has_rbd_mirrors') + @mock.patch.object(hooks, 'sufficient_osds') + @mock.patch.object(hooks, 'get_peer_units') + def test_assess_status_rbd_feature_mismatch(self, _peer_units, + _sufficient_osds, + _has_rbd_mirrors): + _peer_units.return_value = ENOUGH_PEERS_COMPLETE + _sufficient_osds.return_value = True + self.ceph.is_bootstrapped.return_value = True + self.ceph.is_quorum.return_value = True + _has_rbd_mirrors.return_value = True + self.test_config.set('default-rbd-features', 61) + hooks.assess_status() + self.status_set.assert_called_once_with('blocked', mock.ANY) + def test_get_peer_units_no_peers(self): self.relation_ids.return_value = ['mon:1'] self.related_units.return_value = [] From 172893a8344da1422a296fe91bbef0be44503dee Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 23 Jan 2019 14:46:26 +0000 Subject: [PATCH 1694/2699] udevadm settle before processing osd-devices Run udevadm settle before processing osd-devices as there may by udev events that have been queued for processing on server startup. Includes charms.ceph sync Corresponding charms.ceph change is: Iec5932a4d819ad87e54c2af391abe1befe84f164 Change-Id: Ia4210d6a5c2c8a0b0e2038b25f24d2d82600e10a Closes-Bug: #1812925 --- ceph-osd/hooks/ceph_hooks.py | 1 + ceph-osd/lib/ceph/broker.py | 17 ++-- ceph-osd/lib/ceph/utils.py | 180 ++++++++++++++++++++++++++++++++--- 3 files changed, 181 insertions(+), 17 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index c6b925fd..003c6a89 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -513,6 +513,7 @@ def prepare_disks_and_activate(): log('ceph bootstrapped, rescanning disks') emit_cephconf() bluestore = use_bluestore() + ceph.udevadm_settle() for dev in get_devices(): ceph.osdize(dev, config('osd-format'), osd_journal, diff --git a/ceph-osd/lib/ceph/broker.py b/ceph-osd/lib/ceph/broker.py index 3e857d21..3226f4cc 100644 --- a/ceph-osd/lib/ceph/broker.py +++ b/ceph-osd/lib/ceph/broker.py @@ -85,6 +85,7 @@ "compression_mode": [str, ["none", "passive", "aggressive", "force"]], "compression_algorithm": [str, ["lz4", "snappy", "zlib", "zstd"]], "compression_required_ratio": [float, [0.0, 1.0]], + "crush_rule": [str], } CEPH_BUCKET_TYPES = [ @@ -368,7 +369,8 @@ def handle_erasure_pool(request, service): """ pool_name = request.get('name') erasure_profile = request.get('erasure-profile') - quota = request.get('max-bytes') + max_bytes = request.get('max-bytes') + max_objects = request.get('max-objects') weight = request.get('weight') group_name = request.get('group') @@ -408,8 +410,9 @@ def handle_erasure_pool(request, service): pool.create() # Set a quota if requested - if quota is not None: - set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + if max_bytes or max_objects: + set_pool_quota(service=service, pool_name=pool_name, + max_bytes=max_bytes, max_objects=max_objects) def handle_replicated_pool(request, service): @@ -421,7 +424,8 @@ def handle_replicated_pool(request, service): """ pool_name = request.get('name') replicas = request.get('replicas') - quota = request.get('max-bytes') + max_bytes = request.get('max-bytes') + max_objects = request.get('max-objects') weight = request.get('weight') group_name = request.get('group') @@ -468,8 +472,9 @@ def handle_replicated_pool(request, service): level=DEBUG) # Set a quota if requested - if quota is not None: - set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + if max_bytes or max_objects: + set_pool_quota(service=service, pool_name=pool_name, + max_bytes=max_bytes, max_objects=max_objects) def handle_create_cache_tier(request, service): diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 83388c9b..b4f87907 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -935,6 +935,11 @@ def start_osds(devices): subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) +def udevadm_settle(): + cmd = ['udevadm', 'settle'] + subprocess.call(cmd) + + def rescan_osd_devices(): cmd = [ 'udevadm', 'trigger', @@ -943,8 +948,7 @@ def rescan_osd_devices(): subprocess.call(cmd) - cmd = ['udevadm', 'settle'] - subprocess.call(cmd) + udevadm_settle() _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" @@ -1072,8 +1076,8 @@ def import_radosgw_key(key): } -def get_radosgw_key(pool_list=None): - return get_named_key(name='radosgw.gateway', +def get_radosgw_key(pool_list=None, name=None): + return get_named_key(name=name or 'radosgw.gateway', caps=_radosgw_caps, pool_list=pool_list) @@ -1128,6 +1132,15 @@ def get_mds_bootstrap_key(): ]) ]) +rbd_mirror_caps = collections.OrderedDict([ + ('mon', ['profile rbd']), + ('osd', ['profile rbd']), +]) + + +def get_rbd_mirror_key(name): + return get_named_key(name=name, caps=rbd_mirror_caps) + def create_named_keyring(entity, name, caps=None): caps = caps or _default_caps @@ -1819,6 +1832,13 @@ def _initialize_disk(dev, dev_uuid, encrypt=False, '--uuid', dev_uuid, dev, ]) + subprocess.check_call([ + 'dd', + 'if=/dev/zero', + 'of={}'.format(dm_crypt), + 'bs=512', + 'count=1', + ]) if use_vaultlocker: return dm_crypt @@ -1899,6 +1919,7 @@ def osdize_dir(path, encrypt=False, bluestore=False): return mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) + chownr('/var/lib/ceph', ceph_user(), ceph_user()) cmd = [ 'sudo', '-u', ceph_user(), 'ceph-disk', @@ -2499,18 +2520,21 @@ def update_owner(path, recurse_dirs=True): secs=elapsed_time.total_seconds(), path=path), DEBUG) -def list_pools(service): +def list_pools(client='admin'): """This will list the current pools that Ceph has - :param service: String service id to run under - :returns: list. Returns a list of the ceph pools. - :raises: CalledProcessError if the subprocess fails to run. + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Returns a list of available pools. + :rtype: list + :raises: subprocess.CalledProcessError if the subprocess fails to run. """ try: pool_list = [] - pools = str(subprocess - .check_output(['rados', '--id', service, 'lspools']) - .decode('UTF-8')) + pools = subprocess.check_output(['rados', '--id', client, 'lspools'], + universal_newlines=True, + stderr=subprocess.STDOUT) for pool in pools.splitlines(): pool_list.append(pool) return pool_list @@ -2519,6 +2543,140 @@ def list_pools(service): raise +def get_pool_param(pool, param, client='admin'): + """Get parameter from pool. + + :param pool: Name of pool to get variable from + :type pool: str + :param param: Name of variable to get + :type param: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Value of variable on pool or None + :rtype: str or None + :raises: subprocess.CalledProcessError + """ + try: + output = subprocess.check_output( + ['ceph', '--id', client, 'osd', 'pool', 'get', pool, param], + universal_newlines=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as cp: + if cp.returncode == 2 and 'ENOENT: option' in cp.output: + return None + raise + if ':' in output: + return output.split(':')[1].lstrip().rstrip() + + +def get_pool_erasure_profile(pool, client='admin'): + """Get erasure code profile for pool. + + :param pool: Name of pool to get variable from + :type pool: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Erasure code profile of pool or None + :rtype: str or None + :raises: subprocess.CalledProcessError + """ + try: + return get_pool_param(pool, 'erasure_code_profile', client=client) + except subprocess.CalledProcessError as cp: + if cp.returncode == 13 and 'EACCES: pool' in cp.output: + # Not a Erasure coded pool + return None + raise + + +def get_pool_quota(pool, client='admin'): + """Get pool quota. + + :param pool: Name of pool to get variable from + :type pool: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Dictionary with quota variables + :rtype: dict + :raises: subprocess.CalledProcessError + """ + output = subprocess.check_output( + ['ceph', '--id', client, 'osd', 'pool', 'get-quota', pool], + universal_newlines=True, stderr=subprocess.STDOUT) + rc = re.compile(r'\s+max\s+(\S+)\s*:\s+(\d+)') + result = {} + for line in output.splitlines(): + m = rc.match(line) + if m: + result.update({'max_{}'.format(m.group(1)): m.group(2)}) + return result + + +def get_pool_applications(pool='', client='admin'): + """Get pool applications. + + :param pool: (Optional) Name of pool to get applications for + Defaults to get for all pools + :type pool: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Dictionary with pool name as key + :rtype: dict + :raises: subprocess.CalledProcessError + """ + + cmd = ['ceph', '--id', client, 'osd', 'pool', 'application', 'get'] + if pool: + cmd.append(pool) + try: + output = subprocess.check_output(cmd, + universal_newlines=True, + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as cp: + if cp.returncode == 2 and 'ENOENT' in cp.output: + return {} + raise + return json.loads(output) + + +def list_pools_detail(): + """Get detailed information about pools. + + Structure: + {'pool_name_1': {'applications': {'application': {}}, + 'parameters': {'pg_num': '42', 'size': '42'}, + 'quota': {'max_bytes': '1000', + 'max_objects': '10'}, + }, + 'pool_name_2': ... + } + + :returns: Dictionary with detailed pool information. + :rtype: dict + :raises: subproces.CalledProcessError + """ + get_params = ['pg_num', 'size'] + result = {} + applications = get_pool_applications() + for pool in list_pools(): + result[pool] = { + 'applications': applications.get(pool, {}), + 'parameters': {}, + 'quota': get_pool_quota(pool), + } + for param in get_params: + result[pool]['parameters'].update({ + param: get_pool_param(pool, param)}) + erasure_profile = get_pool_erasure_profile(pool) + if erasure_profile: + result[pool]['parameters'].update({ + 'erasure_code_profile': erasure_profile}) + return result + + def dirs_need_ownership_update(service): """Determines if directories still need change of ownership. From 0e303ac75ba59aca2ec0e2cd7620b1da8241ddbc Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 4 Mar 2019 08:27:35 +0100 Subject: [PATCH 1695/2699] Add gate jobs Fix a few discrepancies discovered during unit testing. Add missing unit tests. Add end to end functional test bundle. Change-Id: Ic05c72f9e684f615b60a3975779e76526a0c9c64 --- ceph-rbd-mirror/.gitignore | 3 + ceph-rbd-mirror/.gitreview | 4 + ceph-rbd-mirror/.zuul.yaml | 3 + .../lib/charm/openstack/ceph_rbd_mirror.py | 1 - .../src/reactive/ceph_rbd_mirror_handlers.py | 16 +- .../src/tests/bundles/bionic-queens-e2e.yaml | 118 +++++++++++ .../src/tests/bundles/bionic-queens.yaml | 4 +- ceph-rbd-mirror/src/tests/tests.yaml | 7 +- ceph-rbd-mirror/test-requirements.txt | 2 +- ceph-rbd-mirror/tox.ini | 46 +++- .../test_ceph_rbd_mirror_handlers.py | 197 ++++++++++++++++++ ...est_lib_charm_openstack_ceph_rbd_mirror.py | 57 ++++- 12 files changed, 434 insertions(+), 24 deletions(-) create mode 100644 ceph-rbd-mirror/.gitreview create mode 100644 ceph-rbd-mirror/.zuul.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml create mode 100644 ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py diff --git a/ceph-rbd-mirror/.gitignore b/ceph-rbd-mirror/.gitignore index eb1cd65b..bcfbbf60 100644 --- a/ceph-rbd-mirror/.gitignore +++ b/ceph-rbd-mirror/.gitignore @@ -3,3 +3,6 @@ *__pycache__* *.pyc build +.coverage +cover/ +*.swp diff --git a/ceph-rbd-mirror/.gitreview b/ceph-rbd-mirror/.gitreview new file mode 100644 index 00000000..b094119b --- /dev/null +++ b/ceph-rbd-mirror/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=openstack/charm-ceph-rbd-mirror.git diff --git a/ceph-rbd-mirror/.zuul.yaml b/ceph-rbd-mirror/.zuul.yaml new file mode 100644 index 00000000..7051aeeb --- /dev/null +++ b/ceph-rbd-mirror/.zuul.yaml @@ -0,0 +1,3 @@ +- project: + templates: + - python35-charm-jobs diff --git a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py index 9c507398..b765e2a2 100644 --- a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py @@ -69,7 +69,6 @@ def custom_assess_status_check(self): ch_core.hookenv.log('DEBUG: mirror_pool_status({}) = "{}"' .format(pool, status), level=ch_core.hookenv.INFO) - return 'active', 'Custom' return None, None def _mirror_pool_info(self, pool): diff --git a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py index 28487ade..6ba018c7 100644 --- a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py @@ -34,12 +34,12 @@ def request_keys(): with charm.provide_charm_instance() as charm_instance: for flag in ('ceph-local.connected', 'ceph-remote.connected'): - endpoint = reactive.relations.endpoint_from_flag(flag) + endpoint = reactive.endpoint_from_flag(flag) ch_core.hookenv.log('Ceph endpoint "{}" connected, requesting key' .format(endpoint.endpoint_name), level=ch_core.hookenv.INFO) endpoint.request_key() - charm_instance.assess_status() + charm_instance.assess_status() @reactive.when('config.changed') @@ -48,8 +48,8 @@ def request_keys(): def config_changed(): with charm.provide_charm_instance() as charm_instance: charm_instance.upgrade_if_available([ - reactive.relations.endpoint_from_flag('ceph-local.available'), - reactive.relations.endpoint_from_flag('ceph-remote.available'), + reactive.endpoint_from_flag('ceph-local.available'), + reactive.endpoint_from_flag('ceph-remote.available'), ]) charm_instance.assess_status() @@ -60,6 +60,7 @@ def disable_services(): for service in charm_instance.services: ch_core.host.service('disable', service) ch_core.host.service('stop', service) + charm_instance.assess_status() @reactive.when('ceph-local.available') @@ -78,10 +79,9 @@ def render_stuff(*args): charm_instance.configure_ceph_keyring(endpoint, cluster_name=cluster_name) charm_instance.render_with_interfaces(args) - with charm.provide_charm_instance() as charm_instance: - for service in charm_instance.services: - ch_core.host.service('enable', service) - ch_core.host.service('start', service) + for service in charm_instance.services: + ch_core.host.service('enable', service) + ch_core.host.service('start', service) reactive.set_flag('config.rendered') charm_instance.assess_status() diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml new file mode 100644 index 00000000..835f4db0 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml @@ -0,0 +1,118 @@ +series: bionic +applications: + mysql: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + num_units: 0 + nova-cloud-controller: + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: distro + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: distro + storage: + osd-devices: cinder,20G + ceph-rbd-mirror: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: distro + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: distro + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: distro + storage: + osd-devices: cinder,20G + ceph-rbd-mirror-b: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: distro +relations: +- - mysql + - keystone +- - mysql + - cinder +- - rabbitmq-server + - cinder +- - keystone + - cinder +- - cinder + - cinder-ceph +- - cinder-ceph + - ceph-mon +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote +- - mysql:shared-db + - nova-cloud-controller:shared-db +- - keystone:identity-service + - nova-cloud-controller:identity-service +- - rabbitmq-server:amqp + - nova-cloud-controller:amqp +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:cloud-compute + - nova-cloud-controller:cloud-compute +- - glance:identity-service + - keystone:identity-service +- - glance:shared-db + - mysql:shared-db +- - glance:amqp + - rabbitmq-server:amqp +- - glance:image-service + - nova-compute:image-service +- - neutron-openvswitch:neutron-plugin + - nova-compute:neutron-plugin +- - neutron-openvswitch:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml index 06f41ac0..f9c12243 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml @@ -16,7 +16,7 @@ applications: charm: cs:~openstack-charmers-next/cinder-ceph num_units: 0 ceph-mon: - charm: cs:~fnordahl/ceph-mon-rbd-mirror + charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: expected-osd-count: 3 @@ -35,7 +35,7 @@ applications: options: source: distro ceph-mon-b: - charm: cs:~fnordahl/ceph-mon-rbd-mirror + charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: expected-osd-count: 3 diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index 7cf75ff9..ad4e96bb 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -2,13 +2,8 @@ charm_name: ceph-rbd-mirror smoke_bundles: - bionic-queens gate_bundles: -- xenial-pike -- xenial-queens - bionic-queens -- bionic-rocky -- cosmic-rocky -dev_bundles: -- disco-stein +- bionic-queens-e2e configure: - zaza.charm_tests.noop.setup.basic_setup tests: diff --git a/ceph-rbd-mirror/test-requirements.txt b/ceph-rbd-mirror/test-requirements.txt index ca62003b..3162b0fb 100644 --- a/ceph-rbd-mirror/test-requirements.txt +++ b/ceph-rbd-mirror/test-requirements.txt @@ -4,7 +4,7 @@ # # Lint and unit test requirements flake8>=2.2.4,<=2.4.1 -os-testr>=0.4.1 +stestr requests>=2.18.4 charms.reactive mock>=1.2 diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index 41ba42d8..550f5ab6 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -12,7 +12,7 @@ setenv = VIRTUAL_ENV={envdir} LAYER_PATH={toxinidir}/layers INTERFACE_PATH={toxinidir}/interfaces JUJU_REPOSITORY={toxinidir}/build -passenv = http_proxy https_proxy INTERFACE_PATH +passenv = http_proxy https_proxy install_command = pip install {opts} {packages} deps = @@ -26,17 +26,45 @@ commands = [testenv:py3] basepython = python3 deps = -r{toxinidir}/test-requirements.txt -commands = ostestr {posargs} +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + [testenv:py35] basepython = python3.5 deps = -r{toxinidir}/test-requirements.txt -commands = ostestr {posargs} +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report [testenv:py36] basepython = python3.6 deps = -r{toxinidir}/test-requirements.txt -commands = ostestr {posargs} +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report [testenv:pep8] basepython = python3 @@ -47,6 +75,16 @@ commands = flake8 {posargs} src unit_tests basepython = python3 commands = {posargs} +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + unit_tests/* + [flake8] # E402 ignore necessary for path append before sys module import in actions ignore = E402 diff --git a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py new file mode 100644 index 00000000..0b5a5f7d --- /dev/null +++ b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py @@ -0,0 +1,197 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +import charm.openstack.ceph_rbd_mirror as crm +import reactive.ceph_rbd_mirror_handlers as handlers + +import charms_openstack.test_utils as test_utils + + +class TestRegisteredHooks(test_utils.TestRegisteredHooks): + + def test_hooks(self): + defaults = [ + 'charm.installed', + 'update-status', + 'upgrade-charm', + ] + hook_set = { + 'when': { + 'config_changed': ( + 'config.changed', + 'ceph-local.available', + 'ceph-remote.available', + ), + 'render_stuff': ( + 'ceph-local.available', + 'ceph-remote.available', + ), + 'configure_pools': ( + 'leadership.is_leader', + 'config.rendered', + 'ceph-local.available', + 'ceph-remote.available', + ), + }, + 'when_all': { + 'request_keys': ( + 'ceph-local.connected', + 'ceph-remote.connected', + ), + }, + 'when_not': { + 'disable_services': ( + 'config.rendered', + ), + }, + 'when_not_all': { + 'request_keys': ( + 'ceph-local.available', + 'ceph-remote.available', + ), + }, + } + # test that the hooks were registered + self.registered_hooks_test_helper(handlers, hook_set, defaults) + + +class TestCephRBDMirrorHandlers(test_utils.PatchHelper): + + def setUp(self): + super().setUp() + self.patch_release(crm.CephRBDMirrorCharm.release) + self.crm_charm = mock.MagicMock() + self.patch_object(handlers.charm, 'provide_charm_instance', + new=mock.MagicMock()) + self.provide_charm_instance().__enter__.return_value = \ + self.crm_charm + self.provide_charm_instance().__exit__.return_value = None + + def test_request_keys(self): + self.patch_object(handlers.reactive, 'endpoint_from_flag') + endpoint_local = mock.MagicMock() + endpoint_remote = mock.MagicMock() + endpoint_local.endpoint_name = 'ceph-local' + endpoint_remote.endpoint_name = 'ceph-remote' + self.endpoint_from_flag.side_effect = [endpoint_local, + endpoint_remote] + handlers.request_keys() + self.endpoint_from_flag.assert_has_calls([ + mock.call('ceph-local.connected'), + mock.call('ceph-remote.connected'), + ]) + endpoint_local.request_key.assert_called_once_with() + endpoint_remote.request_key.assert_called_once_with() + self.crm_charm.assess_status.assert_called_once_with() + + def test_config_changed(self): + self.patch_object(handlers.reactive, 'endpoint_from_flag') + handlers.config_changed() + self.endpoint_from_flag.assert_has_calls([ + mock.call('ceph-local.available'), + mock.call('ceph-remote.available'), + ]) + self.crm_charm.upgrade_if_available.assert_called_once_with( + [self.endpoint_from_flag(), self.endpoint_from_flag()]) + self.crm_charm.assess_status.assert_called_once_with() + + def test_disable_services(self): + self.patch_object(handlers.ch_core.host, 'service') + self.crm_charm.services = ['aservice'] + handlers.disable_services() + self.service.assert_has_calls([ + mock.call('disable', 'aservice'), + mock.call('stop', 'aservice'), + ]) + self.crm_charm.assess_status.assert_called_once_with() + + def test_render_stuff(self): + self.patch_object(handlers.ch_core.host, 'service') + endpoint_local = mock.MagicMock() + endpoint_remote = mock.MagicMock() + endpoint_local.endpoint_name = 'ceph-local' + endpoint_local.pools = {} + endpoint_remote.endpoint_name = 'ceph-remote' + endpoint_remote.pools = {} + self.crm_charm.services = ['aservice'] + handlers.render_stuff(endpoint_local, endpoint_remote) + self.crm_charm.configure_ceph_keyring.assert_has_calls([ + mock.call(endpoint_local, cluster_name=None), + mock.call(endpoint_remote, cluster_name='remote'), + ]) + self.crm_charm.render_with_interfaces.assert_called_once_with( + (endpoint_local, endpoint_remote)) + self.service.assert_has_calls([ + mock.call('enable', 'aservice'), + mock.call('start', 'aservice'), + ]) + self.crm_charm.assess_status.assert_called_once_with() + + def test_configure_pools(self): + self.patch_object(handlers.reactive, 'endpoint_from_flag') + endpoint_local = mock.MagicMock() + endpoint_remote = mock.MagicMock() + endpoint_local.endpoint_name = 'ceph-local' + endpoint_local.pools = { + 'cinder-ceph': { + 'applications': {'rbd': {}}, + 'parameters': {'pg_num': 42, 'size': 3}, + 'quota': {'max_bytes': 1024, 'max_objects': 51}, + }, + } + endpoint_remote.endpoint_name = 'ceph-remote' + self.endpoint_from_flag.side_effect = [endpoint_local, + endpoint_remote] + self.crm_charm.mirror_pool_enabled.return_value = False + handlers.configure_pools() + self.endpoint_from_flag.assert_has_calls([ + mock.call('ceph-local.available'), + mock.call('ceph-remote.available'), + ]) + self.crm_charm.mirror_pool_enabled.assert_called_once_with( + 'cinder-ceph') + self.crm_charm.mirror_pool_enable.assert_called_once_with( + 'cinder-ceph') + endpoint_remote.create_replicated_pool.assert_called_once_with( + 'cinder-ceph', replicas=3, pg_num=42, app_name='rbd', + max_bytes=1024, max_objects=51) + self.assertFalse(endpoint_remote.create_erasure_pool.called) + self.endpoint_from_flag.side_effect = [endpoint_local, + endpoint_remote] + self.crm_charm.mirror_pool_enabled.return_value = True + self.crm_charm.mirror_pool_has_peers.return_value = True + self.crm_charm.mirror_pool_enabled.reset_mock() + self.crm_charm.mirror_pool_enable.reset_mock() + handlers.configure_pools() + self.crm_charm.mirror_pool_enabled.assert_called_once_with( + 'cinder-ceph') + self.crm_charm.mirror_pool_has_peers.assert_called_once_with( + 'cinder-ceph') + self.assertFalse(self.crm_charm.mirror_pool_enable.called) + endpoint_local.pools = { + 'cinder-ceph': { + 'applications': {'rbd': {}}, + 'parameters': {'pg_num': 42, 'erasure_code_profile': 'prof'}, + 'quota': {'max_bytes': 1024, 'max_objects': 51}, + }, + } + self.endpoint_from_flag.side_effect = [endpoint_local, + endpoint_remote] + endpoint_remote.create_replicated_pool.reset_mock() + handlers.configure_pools() + endpoint_remote.create_erasure_pool.assert_called_once_with( + 'cinder-ceph', erasure_profile='prof', pg_num=42, app_name='rbd', + max_bytes=1024, max_objects=51) diff --git a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py index 7e2010c0..3d399f30 100644 --- a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import mock + import charms_openstack.test_utils as test_utils import charm.openstack.ceph_rbd_mirror as ceph_rbd_mirror @@ -25,5 +27,56 @@ def setUp(self): class TestCephRBDMirrorCharm(Helper): - def test_foo(self): - pass + + def test_custom_assess_status_check(self): + self.patch_object(ceph_rbd_mirror.socket, 'gethostname') + self.patch_object(ceph_rbd_mirror.reactive, 'is_flag_set') + self.is_flag_set.return_value = False + crmc = ceph_rbd_mirror.CephRBDMirrorCharm() + self.assertEqual(crmc.custom_assess_status_check(), (None, None)) + self.is_flag_set.return_value = True + self.patch_object(ceph_rbd_mirror.reactive, 'endpoint_from_flag') + self.assertEqual(crmc.custom_assess_status_check(), + (None, None)) + self.endpoint_from_flag.assert_called_once_with( + 'ceph-local.available') + + def test__mirror_pool_info(self): + self.patch_object(ceph_rbd_mirror.socket, 'gethostname') + self.patch_object(ceph_rbd_mirror.subprocess, 'check_output') + self.gethostname.return_value = 'ahostname' + crmc = ceph_rbd_mirror.CephRBDMirrorCharm() + crmc._mirror_pool_info('apool') + self.check_output.assert_called_once_with( + ['rbd', '--id', 'rbd-mirror.ahostname', 'mirror', 'pool', 'info', + 'apool'], universal_newlines=True) + + def test_mirror_pool_enabled(self): + self.patch_object(ceph_rbd_mirror.socket, 'gethostname') + crmc = ceph_rbd_mirror.CephRBDMirrorCharm() + _mirror_pool_info = mock.MagicMock() + _mirror_pool_info.return_value = ( + 'Mode: pool\n' + 'Peers: \n' + ' UUID NAME CLIENT' + ' \n') + crmc._mirror_pool_info = _mirror_pool_info + self.assertTrue(crmc.mirror_pool_enabled('apool')) + _mirror_pool_info.assert_called_once_with('apool') + _mirror_pool_info.return_value = 'Mode: disabled\n' + self.assertFalse(crmc.mirror_pool_enabled('apool')) + + def test_mirror_pool_has_peers(self): + self.patch_object(ceph_rbd_mirror.socket, 'gethostname') + crmc = ceph_rbd_mirror.CephRBDMirrorCharm() + _mirror_pool_info = mock.MagicMock() + _mirror_pool_info.return_value = ( + 'Mode: pool\n' + 'Peers: \n' + ' UUID NAME CLIENT' + ' \n') + crmc._mirror_pool_info = _mirror_pool_info + self.assertTrue(crmc.mirror_pool_has_peers('apool')) + _mirror_pool_info.assert_called_once_with('apool') + _mirror_pool_info.return_value = 'Mode: pool\nPeers: none\n' + self.assertFalse(crmc.mirror_pool_has_peers('apool')) From 68e5d681ae8fd8ca8d95e50a09f0844231cfc2aa Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 4 Mar 2019 12:14:13 +0100 Subject: [PATCH 1696/2699] Make rbd tool use json formatted output Change-Id: Ie2233350cb4520b598dd127b24132fdb4ed42802 --- .../lib/charm/openstack/ceph_rbd_mirror.py | 20 +++++------ ...est_lib_charm_openstack_ceph_rbd_mirror.py | 35 ++++++++++++------- 2 files changed, 32 insertions(+), 23 deletions(-) diff --git a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py index b765e2a2..8ff7f4ab 100644 --- a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import socket import subprocess @@ -73,25 +74,24 @@ def custom_assess_status_check(self): def _mirror_pool_info(self, pool): output = subprocess.check_output(['rbd', '--id', self.ceph_id, - 'mirror', 'pool', 'info', pool], + 'mirror', 'pool', 'info', '--format', + 'json', pool], universal_newlines=True) - return output + return json.loads(output) def mirror_pool_enabled(self, pool): - return 'Mode: pool' in self._mirror_pool_info(pool) + return self._mirror_pool_info(pool).get('mode', None) == 'pool' def mirror_pool_has_peers(self, pool): - return 'Peers: none' not in self._mirror_pool_info(pool) + return len(self._mirror_pool_info(pool).get('peers', [])) > 0 def mirror_pool_status(self, pool): output = subprocess.check_output(['rbd', '--id', self.ceph_id, - 'mirror', 'pool', 'status', pool], + 'mirror', 'pool', 'status', + '--format', 'json', '--verbose', + pool], universal_newlines=True) - result = {} - for line in output.splitlines(): - vp = line.split(':') - result.update({vp[0]: vp[1].lstrip().rstrip()}) - return result + return json.loads(output) def mirror_pool_enable(self, pool): base_cmd = ['rbd', '--id', self.ceph_id, 'mirror', 'pool'] diff --git a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py index 3d399f30..8e17ed49 100644 --- a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py @@ -45,38 +45,47 @@ def test__mirror_pool_info(self): self.patch_object(ceph_rbd_mirror.socket, 'gethostname') self.patch_object(ceph_rbd_mirror.subprocess, 'check_output') self.gethostname.return_value = 'ahostname' + self.check_output.return_value = '{}' crmc = ceph_rbd_mirror.CephRBDMirrorCharm() crmc._mirror_pool_info('apool') self.check_output.assert_called_once_with( ['rbd', '--id', 'rbd-mirror.ahostname', 'mirror', 'pool', 'info', - 'apool'], universal_newlines=True) + '--format', 'json', 'apool'], universal_newlines=True) def test_mirror_pool_enabled(self): self.patch_object(ceph_rbd_mirror.socket, 'gethostname') crmc = ceph_rbd_mirror.CephRBDMirrorCharm() _mirror_pool_info = mock.MagicMock() - _mirror_pool_info.return_value = ( - 'Mode: pool\n' - 'Peers: \n' - ' UUID NAME CLIENT' - ' \n') + _mirror_pool_info.return_value = { + 'mode': 'pool', + 'peers': [{ + 'uuid': '0e4dfe58-93fc-44f8-8c74-7e700f950118', + 'cluster_name': 'remote', + 'client_name': + 'client.rbd-mirror.juju-c50b1a-zaza-4ce96f1e7e43-12'}] + } crmc._mirror_pool_info = _mirror_pool_info self.assertTrue(crmc.mirror_pool_enabled('apool')) _mirror_pool_info.assert_called_once_with('apool') - _mirror_pool_info.return_value = 'Mode: disabled\n' + _mirror_pool_info.return_value = {'mode': 'disabled'} self.assertFalse(crmc.mirror_pool_enabled('apool')) def test_mirror_pool_has_peers(self): self.patch_object(ceph_rbd_mirror.socket, 'gethostname') crmc = ceph_rbd_mirror.CephRBDMirrorCharm() _mirror_pool_info = mock.MagicMock() - _mirror_pool_info.return_value = ( - 'Mode: pool\n' - 'Peers: \n' - ' UUID NAME CLIENT' - ' \n') + _mirror_pool_info.return_value = { + 'mode': 'pool', + 'peers': [{ + 'uuid': '0e4dfe58-93fc-44f8-8c74-7e700f950118', + 'cluster_name': 'remote', + 'client_name': + 'client.rbd-mirror.juju-c50b1a-zaza-4ce96f1e7e43-12'}] + } crmc._mirror_pool_info = _mirror_pool_info self.assertTrue(crmc.mirror_pool_has_peers('apool')) _mirror_pool_info.assert_called_once_with('apool') - _mirror_pool_info.return_value = 'Mode: pool\nPeers: none\n' + _mirror_pool_info.return_value = { + 'mode': 'pool', + 'peers': []} self.assertFalse(crmc.mirror_pool_has_peers('apool')) From ed624288c1081a18ba03a8be93a650a68db1d1ec Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 4 Mar 2019 17:44:39 +0100 Subject: [PATCH 1697/2699] Add summary stats for mirrored pools and images in workload status Change-Id: I550473edf7c7253b96fdb323b8b4761049a1de88 --- .../lib/charm/openstack/ceph_rbd_mirror.py | 51 ++++++++++++++++--- ...est_lib_charm_openstack_ceph_rbd_mirror.py | 25 ++++++++- 2 files changed, 69 insertions(+), 7 deletions(-) diff --git a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py index 8ff7f4ab..d195c794 100644 --- a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections import json import socket import subprocess @@ -64,12 +65,39 @@ def custom_assess_status_check(self): reactive.is_flag_set('ceph-local.available') and reactive.is_flag_set('ceph-remote.available')): endpoint = reactive.endpoint_from_flag('ceph-local.available') - for pool, attrs in endpoint.pools.items(): - if 'rbd' in attrs['applications']: - status = self.mirror_pool_status(pool) - ch_core.hookenv.log('DEBUG: mirror_pool_status({}) = "{}"' - .format(pool, status), - level=ch_core.hookenv.INFO) + stats = self.mirror_pools_summary( + (pool for pool, attrs in endpoint.pools.items() + if 'rbd' in attrs['applications'])) + ch_core.hookenv.log('mirror_pools_summary = "{}"' + .format(stats), + level=ch_core.hookenv.DEBUG) + status = 'active' + pool_msg = '' + image_msg = '' + for health, count in stats['pool_health'].items(): + if not pool_msg: + pool_msg = 'Pools ' + pool_msg += '{} ({}) '.format(health, count) + if health != 'OK': + status = 'blocked' + for state, count in stats['image_states'].items(): + if not image_msg: + image_msg = 'Images ' + if state == 'stopped': + state_name = 'Primary' + elif state == 'replaying': + state_name = 'Secondary' + else: + state_name = state + image_msg += '{} ({}) '.format(state_name, count) + msg = '' + if pool_msg: + msg = 'Unit is ready ({})'.format( + pool_msg + image_msg.rstrip()) + else: + status = 'waiting' + msg = 'Waiting for pools to be created' + return status, msg return None, None def _mirror_pool_info(self, pool): @@ -93,6 +121,17 @@ def mirror_pool_status(self, pool): universal_newlines=True) return json.loads(output) + def mirror_pools_summary(self, pools): + stats = {} + stats['pool_health'] = collections.defaultdict(int) + stats['image_states'] = collections.defaultdict(int) + for pool in pools: + pool_stat = self.mirror_pool_status(pool) + stats['pool_health'][pool_stat['summary']['health']] += 1 + for state, value in pool_stat['summary']['states'].items(): + stats['image_states'][state] += value + return stats + def mirror_pool_enable(self, pool): base_cmd = ['rbd', '--id', self.ceph_id, 'mirror', 'pool'] subprocess.check_call(base_cmd + ['enable', pool, 'pool']) diff --git a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py index 8e17ed49..08933879 100644 --- a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections import mock import charms_openstack.test_utils as test_utils @@ -37,9 +38,31 @@ def test_custom_assess_status_check(self): self.is_flag_set.return_value = True self.patch_object(ceph_rbd_mirror.reactive, 'endpoint_from_flag') self.assertEqual(crmc.custom_assess_status_check(), - (None, None)) + ('waiting', 'Waiting for pools to be created')) self.endpoint_from_flag.assert_called_once_with( 'ceph-local.available') + crmc.mirror_pools_summary = mock.MagicMock() + crmc.mirror_pools_summary.return_value = collections.OrderedDict({ + 'pool_health': collections.OrderedDict( + {'OK': 1, 'WARN': 1, 'ERROR': 1}), + 'image_states': collections.OrderedDict( + {'stopped': 2, 'replaying': 2}), + }) + result = crmc.custom_assess_status_check() + self.assertTrue('blocked' in result[0]) + # the order of which the statuses appear in the string is undefined + self.assertTrue('OK (1)' in result[1]) + self.assertTrue('WARN (1)' in result[1]) + self.assertTrue('ERROR (1)' in result[1]) + self.assertTrue('Primary (2)' in result[1]) + self.assertTrue('Secondary (2)' in result[1]) + crmc.mirror_pools_summary.return_value = collections.OrderedDict({ + 'pool_health': collections.OrderedDict({'OK': 1}), + 'image_states': collections.OrderedDict({'stopped': 2}), + }) + self.assertEqual(crmc.custom_assess_status_check(), + ('active', 'Unit is ready (Pools OK (1) ' + 'Images Primary (2))')) def test__mirror_pool_info(self): self.patch_object(ceph_rbd_mirror.socket, 'gethostname') From 8e27f3ccf2d3e7f0a121402d11fbb9436e1e94db Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 5 Mar 2019 10:37:55 +0100 Subject: [PATCH 1698/2699] Add actions ``demote`` is used to demote all images in all pools, used for operator controlled fail over/fall back. ``promote`` is used to promote all images in all pools, used for operator controlled or disaster recovery fail over/fall back. ``refresh-pools`` is used to refresh list of eligible pools from local Ceph cluster. Side effect is to enable mirroring of pools created manually without the use of the charm ceph broker protocol. Change-Id: I9af983b37045f83a0a9703e2212b371b97dc3121 Depends-On: I97bfb9a2c0e30998566aee56d4630af6baa36d45 --- ceph-rbd-mirror/src/actions.yaml | 18 +++ ceph-rbd-mirror/src/actions/actions.py | 103 ++++++++++++++++ ceph-rbd-mirror/src/actions/demote | 1 + ceph-rbd-mirror/src/actions/promote | 1 + ceph-rbd-mirror/src/actions/refresh-pools | 1 + .../src/reactive/ceph_rbd_mirror_handlers.py | 11 ++ ceph-rbd-mirror/unit_tests/test_actions.py | 115 ++++++++++++++++++ .../test_ceph_rbd_mirror_handlers.py | 21 ++++ 8 files changed, 271 insertions(+) create mode 100644 ceph-rbd-mirror/src/actions.yaml create mode 100755 ceph-rbd-mirror/src/actions/actions.py create mode 120000 ceph-rbd-mirror/src/actions/demote create mode 120000 ceph-rbd-mirror/src/actions/promote create mode 120000 ceph-rbd-mirror/src/actions/refresh-pools create mode 100644 ceph-rbd-mirror/unit_tests/test_actions.py diff --git a/ceph-rbd-mirror/src/actions.yaml b/ceph-rbd-mirror/src/actions.yaml new file mode 100644 index 00000000..8325746b --- /dev/null +++ b/ceph-rbd-mirror/src/actions.yaml @@ -0,0 +1,18 @@ +demote: + description: | + Demote all primary images within all pools to non-primary. + params: + force: + type: boolean +promote: + description: | + Promote all non-primary images within all pools to primary. + params: + force: + type: boolean +refresh-pools: + description: | + \ + Refresh list of pools from local and remote Ceph endpoint. + As a side effect, mirroring will be configured for any manually created + pools that the charm currently does not know about. diff --git a/ceph-rbd-mirror/src/actions/actions.py b/ceph-rbd-mirror/src/actions/actions.py new file mode 100755 index 00000000..163d3585 --- /dev/null +++ b/ceph-rbd-mirror/src/actions/actions.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess +import sys + +# Load basic layer module from $CHARM_DIR/lib +sys.path.append('lib') +from charms.layer import basic + +# setup module loading from charm venv +basic.bootstrap_charm_deps() + +import charms.reactive as reactive +import charmhelpers.core as ch_core +import charms_openstack.bus +import charms_openstack.charm + +# load reactive interfaces +reactive.bus.discover() +# load Endpoint based interface data +ch_core.hookenv._run_atstart() + +# load charm class +charms_openstack.bus.discover() + + +def rbd_mirror_action(args): + """Perform RBD command on pools in local Ceph endpoint.""" + action_name = os.path.basename(args[0]) + with charms_openstack.charm.provide_charm_instance() as charm: + ceph_local = reactive.endpoint_from_name('ceph-local') + pools = (pool for pool, attrs in ceph_local.pools.items() + if 'rbd' in attrs['applications']) + result = [] + cmd = ['rbd', '--id', charm.ceph_id, 'mirror', 'pool', action_name] + if ch_core.hookenv.action_get('force'): + cmd += ['--force'] + for pool in pools: + output = subprocess.check_output(cmd + [pool], + stderr=subprocess.STDOUT, + universal_newlines=True) + result.append('{}: {}'.format(pool, output.rstrip())) + ch_core.hookenv.action_set({'output': '\n'.join(result)}) + + +def refresh_pools(args): + """Refresh list of pools from Ceph. + + This is done by updating data on relations to ceph-mons which lead to them + updating the relation data they have with us as a response. + + Due to how the reactive framework handles publishing of relation data we + must do this by setting a flag and runnnig the reactive handlers, emulating + a full hook execution. + """ + if not reactive.is_flag_set('leadership.is_leader'): + ch_core.hookenv.action_fail('run action on the leader unit') + return + + # set and flush flag to disk + reactive.set_flag('refresh.pools') + ch_core.unitdata._KV.flush() + + # run reactive handlers to deal with flag + return reactive.main() + + +ACTIONS = { + 'demote': rbd_mirror_action, + 'promote': rbd_mirror_action, + 'refresh-pools': refresh_pools, +} + + +def main(args): + action_name = os.path.basename(args[0]) + try: + action = ACTIONS[action_name] + except KeyError: + return 'Action {} is undefined'.format(action_name) + + try: + action(args) + except Exception as e: + ch_core.hookenv.action_fail(str(e)) + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/ceph-rbd-mirror/src/actions/demote b/ceph-rbd-mirror/src/actions/demote new file mode 120000 index 00000000..405a394e --- /dev/null +++ b/ceph-rbd-mirror/src/actions/demote @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/ceph-rbd-mirror/src/actions/promote b/ceph-rbd-mirror/src/actions/promote new file mode 120000 index 00000000..405a394e --- /dev/null +++ b/ceph-rbd-mirror/src/actions/promote @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/ceph-rbd-mirror/src/actions/refresh-pools b/ceph-rbd-mirror/src/actions/refresh-pools new file mode 120000 index 00000000..405a394e --- /dev/null +++ b/ceph-rbd-mirror/src/actions/refresh-pools @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py index 6ba018c7..a31dd1c0 100644 --- a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py @@ -86,6 +86,17 @@ def render_stuff(*args): charm_instance.assess_status() +@reactive.when('leadership.is_leader') +@reactive.when('refresh.pools') +@reactive.when('ceph-local.available') +@reactive.when('ceph-remote.available') +def refresh_pools(): + for endpoint in 'ceph-local', 'ceph-remote': + endpoint = reactive.endpoint_from_name(endpoint) + endpoint.refresh_pools() + reactive.clear_flag('refresh.pools') + + @reactive.when('leadership.is_leader') @reactive.when('config.rendered') @reactive.when('ceph-local.available') diff --git a/ceph-rbd-mirror/unit_tests/test_actions.py b/ceph-rbd-mirror/unit_tests/test_actions.py new file mode 100644 index 00000000..7fb3517d --- /dev/null +++ b/ceph-rbd-mirror/unit_tests/test_actions.py @@ -0,0 +1,115 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import mock +import sys + +sys.modules['charms.layer'] = mock.MagicMock() +import actions.actions as actions +import charm.openstack.ceph_rbd_mirror as crm + +import charms_openstack.test_utils as test_utils + + +class TestCephRBDMirrorActions(test_utils.PatchHelper): + + def setUp(self): + super().setUp() + self.patch_release(crm.CephRBDMirrorCharm.release) + self.crm_charm = mock.MagicMock() + self.patch_object(actions.charms_openstack.charm, + 'provide_charm_instance', + new=mock.MagicMock()) + self.provide_charm_instance().__enter__.return_value = \ + self.crm_charm + self.provide_charm_instance().__exit__.return_value = None + + def test_rbd_mirror_action(self): + self.patch_object(actions.reactive, 'endpoint_from_name') + self.patch_object(actions.ch_core.hookenv, 'action_get') + self.patch_object(actions.subprocess, 'check_output') + self.patch_object(actions.ch_core.hookenv, 'action_set') + endpoint = mock.MagicMock() + endpoint.pools = collections.OrderedDict( + {'apool': {'applications': {'rbd': {}}}, + 'bpool': {'applications': {'rbd': {}}}}) + self.endpoint_from_name.return_value = endpoint + self.crm_charm.ceph_id = 'acephid' + self.action_get.return_value = False + self.check_output.return_value = 'Promoted 0 mirrored images\n' + actions.rbd_mirror_action(['promote']) + self.endpoint_from_name.assert_called_once_with('ceph-local') + self.action_get.assert_called_once_with('force') + self.check_output.assert_has_calls([ + mock.call(['rbd', '--id', 'acephid', 'mirror', 'pool', 'promote', + 'apool'], + stderr=actions.subprocess.STDOUT, + universal_newlines=True), + mock.call(['rbd', '--id', 'acephid', 'mirror', 'pool', 'promote', + 'bpool'], + stderr=actions.subprocess.STDOUT, + universal_newlines=True), + ], any_order=True) + # the order the pools has in the output string is undefined + self.action_set.assert_called_once_with( + {'output': mock.ANY}) + for entry in self.action_set.call_args[0][0]['output'].split('\n'): + assert (entry == 'apool: Promoted 0 mirrored images' or + entry == 'bpool: Promoted 0 mirrored images') + self.action_get.return_value = True + self.check_output.reset_mock() + actions.rbd_mirror_action(['promote']) + self.check_output.assert_has_calls([ + mock.call(['rbd', '--id', 'acephid', 'mirror', 'pool', 'promote', + '--force', 'apool'], + stderr=actions.subprocess.STDOUT, + universal_newlines=True), + mock.call(['rbd', '--id', 'acephid', 'mirror', 'pool', 'promote', + '--force', 'bpool'], + stderr=actions.subprocess.STDOUT, + universal_newlines=True), + ], any_order=True) + + def test_refresh_pools(self): + self.patch_object(actions.reactive, 'is_flag_set') + self.patch_object(actions.ch_core.hookenv, 'action_fail') + self.is_flag_set.return_value = False + actions.refresh_pools([]) + self.is_flag_set.assert_called_once_with('leadership.is_leader') + self.action_fail.assert_called_once_with( + 'run action on the leader unit') + self.is_flag_set.return_value = True + self.patch_object(actions.reactive, 'set_flag') + self.patch_object(actions.ch_core.unitdata, '_KV') + self.patch_object(actions.reactive, 'main') + actions.refresh_pools([]) + self.set_flag.assert_called_once_with('refresh.pools') + self._KV.flush.assert_called_once_with() + self.main.assert_called_once_with() + + def test_main(self): + self.patch_object(actions, 'ACTIONS') + self.patch_object(actions.ch_core.hookenv, 'action_fail') + args = ['/non-existent/path/to/charm/binary/promote'] + function = mock.MagicMock() + self.ACTIONS.__getitem__.return_value = function + actions.main(args) + function.assert_called_once_with(args) + self.ACTIONS.__getitem__.side_effect = KeyError + self.assertEqual(actions.main(args), 'Action promote is undefined') + self.ACTIONS.__getitem__.side_effect = None + function.side_effect = Exception('random exception') + actions.main(args) + self.action_fail.assert_called_once_with('random exception') diff --git a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py index 0b5a5f7d..3d798c1e 100644 --- a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py @@ -45,6 +45,12 @@ def test_hooks(self): 'ceph-local.available', 'ceph-remote.available', ), + 'refresh_pools': ( + 'leadership.is_leader', + 'refresh.pools', + 'ceph-local.available', + 'ceph-remote.available', + ), }, 'when_all': { 'request_keys': ( @@ -140,6 +146,21 @@ def test_render_stuff(self): ]) self.crm_charm.assess_status.assert_called_once_with() + def test_refresh_pools(self): + self.patch_object(handlers.reactive, 'endpoint_from_name') + self.patch_object(handlers.reactive, 'clear_flag') + endpoint_local = mock.MagicMock() + endpoint_remote = mock.MagicMock() + self.endpoint_from_name.side_effect = [endpoint_local, endpoint_remote] + handlers.refresh_pools() + self.endpoint_from_name.assert_has_calls([ + mock.call('ceph-local'), + mock.call('ceph-remote'), + ]) + endpoint_local.refresh_pools.assert_called_once_with() + endpoint_remote.refresh_pools.assert_called_once_with() + self.clear_flag.assert_called_once_with('refresh.pools') + def test_configure_pools(self): self.patch_object(handlers.reactive, 'endpoint_from_flag') endpoint_local = mock.MagicMock() From 5cf97b033df0f4e65ab1bc37f1de3d92a8d4fc5d Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 6 Mar 2019 11:09:02 +0100 Subject: [PATCH 1699/2699] Add configuration of public/cluster network Fix syslog configuration. Depends-On: Ia49f4921e772376763be178e11d7777676ccc8da Change-Id: Ic7b0a724052b5d61694993c0728e7e4bdc0d9bfc --- ceph-rbd-mirror/src/metadata.yaml | 3 +++ ceph-rbd-mirror/src/templates/ceph.conf | 16 +++++++--------- ceph-rbd-mirror/src/templates/remote.conf | 16 +++++++--------- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index b94a0a33..421f3849 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -22,6 +22,9 @@ series: - xenial - bionic - cosmic +extra-bindings: + public: + cluster: subordinate: false provides: nrpe-external-master: diff --git a/ceph-rbd-mirror/src/templates/ceph.conf b/ceph-rbd-mirror/src/templates/ceph.conf index b2b74843..fb7a2847 100644 --- a/ceph-rbd-mirror/src/templates/ceph.conf +++ b/ceph-rbd-mirror/src/templates/ceph.conf @@ -9,13 +9,11 @@ auth_supported = {{ ceph_local.auth }} keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ ceph_local.monitors }} {% endif -%} -log to syslog = {{ use_syslog }} -err to syslog = {{ use_syslog }} -clog to syslog = {{ use_syslog }} +log to syslog = {{ options.use_syslog }} +err to syslog = {{ options.use_syslog }} +clog to syslog = {{ options.use_syslog }} -[client] -{% if rbd_client_cache_settings -%} -{% for key, value in rbd_client_cache_settings.items() -%} -{{ key }} = {{ value }} -{% endfor -%} -{%- endif %} +public network = {{ ceph_local.public_network }} +{% if ceph_local.cluster_network %} +cluster network = {{ ceph_local.cluster_network }} +{% endif -%} diff --git a/ceph-rbd-mirror/src/templates/remote.conf b/ceph-rbd-mirror/src/templates/remote.conf index dd915378..64ab359f 100644 --- a/ceph-rbd-mirror/src/templates/remote.conf +++ b/ceph-rbd-mirror/src/templates/remote.conf @@ -9,13 +9,11 @@ auth_supported = {{ ceph_remote.auth }} keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ ceph_remote.monitors }} {% endif -%} -log to syslog = {{ use_syslog }} -err to syslog = {{ use_syslog }} -clog to syslog = {{ use_syslog }} +log to syslog = {{ options.use_syslog }} +err to syslog = {{ options.use_syslog }} +log to syslog = {{ options.use_syslog }} -[client] -{% if rbd_client_cache_settings -%} -{% for key, value in rbd_client_cache_settings.items() -%} -{{ key }} = {{ value }} -{% endfor -%} -{%- endif %} +public network = {{ ceph_remote.public_network }} +{% if ceph_remote.cluster_network %} +cluster network = {{ ceph_remote.cluster_network }} +{% endif -%} From a6784a98a938464eaf4f71f31783c9b5209d3a05 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 6 Mar 2019 13:41:15 +0100 Subject: [PATCH 1700/2699] Configure stestr directly Change-Id: If015f76a839733e1876214c002ec2c9c75f2e12a --- ceph-osd/.stestr.conf | 3 +++ ceph-osd/.testr.conf | 8 -------- ceph-osd/test-requirements.txt | 2 +- ceph-osd/tox.ini | 4 ++-- 4 files changed, 6 insertions(+), 11 deletions(-) create mode 100644 ceph-osd/.stestr.conf delete mode 100644 ceph-osd/.testr.conf diff --git a/ceph-osd/.stestr.conf b/ceph-osd/.stestr.conf new file mode 100644 index 00000000..5fcccaca --- /dev/null +++ b/ceph-osd/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/ceph-osd/.testr.conf b/ceph-osd/.testr.conf deleted file mode 100644 index 801646bb..00000000 --- a/ceph-osd/.testr.conf +++ /dev/null @@ -1,8 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION - -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index bb01e1f6..b378cb0c 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -5,7 +5,7 @@ charm-tools>=2.4.4 coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 -os-testr>=0.4.1 +stestr>=2.2.0 requests==2.18.4 python-ceilometerclient>=1.5.0 python-cinderclient>=1.4.0 diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index b2ac067b..f5b4a7e2 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -12,7 +12,7 @@ setenv = VIRTUAL_ENV={envdir} AMULET_SETUP_TIMEOUT=5400 install_command = pip install {opts} {packages} -commands = ostestr {posargs} +commands = stestr run {posargs} whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* deps = -r{toxinidir}/test-requirements.txt @@ -52,7 +52,7 @@ setenv = PYTHON=coverage run commands = coverage erase - ostestr {posargs} + stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml From 0f0aa13ae75919311e8ceaae066525b2be5e025a Mon Sep 17 00:00:00 2001 From: Pete Vander Giessen Date: Thu, 7 Mar 2019 17:10:16 -0500 Subject: [PATCH 1701/2699] Replace ostestr with stestr in testing framework. A system upgrade broke ostestr. We can fix it by just calling stestr directly. Change-Id: I8d24df7c6baa225f79501024c1872a07ae022e1a --- ceph-fs/.stestr.conf | 3 +++ ceph-fs/.testr.conf | 8 -------- ceph-fs/src/test-requirements.txt | 2 +- ceph-fs/test-requirements.txt | 2 +- ceph-fs/tox.ini | 6 +++--- 5 files changed, 8 insertions(+), 13 deletions(-) create mode 100644 ceph-fs/.stestr.conf delete mode 100644 ceph-fs/.testr.conf diff --git a/ceph-fs/.stestr.conf b/ceph-fs/.stestr.conf new file mode 100644 index 00000000..5fcccaca --- /dev/null +++ b/ceph-fs/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/ceph-fs/.testr.conf b/ceph-fs/.testr.conf deleted file mode 100644 index 801646bb..00000000 --- a/ceph-fs/.testr.conf +++ /dev/null @@ -1,8 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION - -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/ceph-fs/src/test-requirements.txt b/ceph-fs/src/test-requirements.txt index f0138637..e56db9a6 100644 --- a/ceph-fs/src/test-requirements.txt +++ b/ceph-fs/src/test-requirements.txt @@ -5,7 +5,7 @@ charm-tools>=2.4.4 coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 -os-testr>=0.4.1 +stestr>=2.2.0 requests>=2.18.4 # BEGIN: Amulet OpenStack Charm Helper Requirements # Liberty client lower constraints diff --git a/ceph-fs/test-requirements.txt b/ceph-fs/test-requirements.txt index ca62003b..14b380e4 100644 --- a/ceph-fs/test-requirements.txt +++ b/ceph-fs/test-requirements.txt @@ -4,7 +4,7 @@ # # Lint and unit test requirements flake8>=2.2.4,<=2.4.1 -os-testr>=0.4.1 +stestr>=2.2.0 requests>=2.18.4 charms.reactive mock>=1.2 diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 8cb1c96f..0667434e 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -35,12 +35,12 @@ commands = true [testenv:py34] basepython = python3.4 deps = -r{toxinidir}/test-requirements.txt -commands = ostestr {posargs} +commands = stestr run {posargs} [testenv:py35] basepython = python3.5 deps = -r{toxinidir}/test-requirements.txt -commands = ostestr {posargs} +commands = stestr run {posargs} [testenv:pep8] basepython = python3.5 @@ -58,7 +58,7 @@ setenv = PYTHON=coverage run commands = coverage erase - ostestr {posargs} + stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml From 752d637ee343ddf417b604f25abf917a44f3f225 Mon Sep 17 00:00:00 2001 From: Pete Vander Giessen Date: Thu, 7 Mar 2019 17:10:22 -0500 Subject: [PATCH 1702/2699] Replace ostestr with stestr in testing framework. A system upgrade broke ostestr. We can fix it by just calling stestr directly. Change-Id: Id8d098f97ffc8b0cfcf9a1f76e6240dc51e0eed3 --- ceph-mon/test-requirements.txt | 2 +- ceph-mon/tox.ini | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 2b2c0e11..272ce1da 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -5,7 +5,7 @@ charm-tools>=2.4.4 coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 -os-testr>=0.4.1 +stestr>=2.2.0 requests>=2.18.4 # BEGIN: Amulet OpenStack Charm Helper Requirements # Liberty client lower constraints diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 8acbe61a..795d90ba 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -12,7 +12,7 @@ setenv = VIRTUAL_ENV={envdir} AMULET_SETUP_TIMEOUT=5400 install_command = pip install {opts} {packages} -commands = ostestr {posargs} +commands = stestr run {posargs} whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* @@ -57,7 +57,7 @@ setenv = PYTHON=coverage run commands = coverage erase - ostestr {posargs} + stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml From 90cfc3425547d46a7302f1f431fd2ac299eea097 Mon Sep 17 00:00:00 2001 From: Pete Vander Giessen Date: Thu, 7 Mar 2019 17:10:29 -0500 Subject: [PATCH 1703/2699] Replace ostestr with stestr in testing framework. A system upgrade broke ostestr. We can fix it by just calling stestr directly. Change-Id: I238730d47f05ec18cc1efd7ab20b88a8c29a2858 --- ceph-proxy/.stestr.conf | 3 +++ ceph-proxy/.testr.conf | 8 -------- ceph-proxy/test-requirements.txt | 2 +- ceph-proxy/tox.ini | 2 +- 4 files changed, 5 insertions(+), 10 deletions(-) create mode 100644 ceph-proxy/.stestr.conf delete mode 100644 ceph-proxy/.testr.conf diff --git a/ceph-proxy/.stestr.conf b/ceph-proxy/.stestr.conf new file mode 100644 index 00000000..5fcccaca --- /dev/null +++ b/ceph-proxy/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/ceph-proxy/.testr.conf b/ceph-proxy/.testr.conf deleted file mode 100644 index 801646bb..00000000 --- a/ceph-proxy/.testr.conf +++ /dev/null @@ -1,8 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION - -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 2b2c0e11..272ce1da 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -5,7 +5,7 @@ charm-tools>=2.4.4 coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 -os-testr>=0.4.1 +stestr>=2.2.0 requests>=2.18.4 # BEGIN: Amulet OpenStack Charm Helper Requirements # Liberty client lower constraints diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 1adf5d10..68ab28e6 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -12,7 +12,7 @@ setenv = VIRTUAL_ENV={envdir} AMULET_SETUP_TIMEOUT=5400 install_command = pip install {opts} {packages} -commands = ostestr {posargs} +commands = stestr run {posargs} whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* From ec6acd73fe2258ae2197e58888b205bc1cae2402 Mon Sep 17 00:00:00 2001 From: Pete Vander Giessen Date: Thu, 7 Mar 2019 17:10:35 -0500 Subject: [PATCH 1704/2699] Replace ostestr with stestr in testing framework. A system upgrade broke ostestr. We can fix it by just calling stestr directly. Change-Id: I924488478eeb61a3cb2976e294c7015f933d8258 --- ceph-radosgw/.stestr.conf | 2 +- ceph-radosgw/test-requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/.stestr.conf b/ceph-radosgw/.stestr.conf index c963e1f3..5fcccaca 100644 --- a/ceph-radosgw/.stestr.conf +++ b/ceph-radosgw/.stestr.conf @@ -1,3 +1,3 @@ [DEFAULT] test_path=./unit_tests -top_path=./ +top_dir=./ diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index bb01e1f6..b378cb0c 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -5,7 +5,7 @@ charm-tools>=2.4.4 coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 -os-testr>=0.4.1 +stestr>=2.2.0 requests==2.18.4 python-ceilometerclient>=1.5.0 python-cinderclient>=1.4.0 From d84eabe8e79461e5e52c8def15cdfae60ef1346c Mon Sep 17 00:00:00 2001 From: Pete Vander Giessen Date: Thu, 7 Mar 2019 17:10:41 -0500 Subject: [PATCH 1705/2699] Replace ostestr with stestr in testing framework. A system upgrade broke ostestr. We can fix it by just calling stestr directly. Change-Id: Iff4d20ff6ab6c624c5113e3e2dcba7ce02886aeb --- ceph-rbd-mirror/src/test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/src/test-requirements.txt b/ceph-rbd-mirror/src/test-requirements.txt index 4578f719..e4401e40 100644 --- a/ceph-rbd-mirror/src/test-requirements.txt +++ b/ceph-rbd-mirror/src/test-requirements.txt @@ -5,6 +5,6 @@ charm-tools>=2.4.4 coverage>=3.6 mock>=1.2 flake8>=2.2.4,<=2.4.1 -os-testr>=0.4.1 +stestr>=2.2.0 requests>=2.18.4 git+https://github.com/openstack-charmers/zaza.git#egg=zaza From 26be9f4c9e52c46e03180ada45bd105170f599d9 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Fri, 1 Feb 2019 12:17:30 +0000 Subject: [PATCH 1706/2699] Convert the charm to Python3 * Move charmhelpers to the root of the charm * sync charmhelpers to latest version Change-Id: Id0b838f0206635cf912d205f2fb6fda7b31d0dfe --- ceph-proxy/.zuul.yaml | 3 +- ceph-proxy/actions/create-cache-tier | 42 +++- ceph-proxy/actions/create-cache-tier.py | 16 +- ceph-proxy/actions/create-erasure-profile | 24 ++- ceph-proxy/actions/create-pool | 18 +- ceph-proxy/actions/delete-erasure-profile | 15 +- ceph-proxy/actions/delete-pool | 19 +- ceph-proxy/actions/get-erasure-profile | 15 +- ceph-proxy/actions/list-erasure-profiles | 21 +- ceph-proxy/actions/list-pools | 21 +- ceph-proxy/actions/pool-get | 21 +- ceph-proxy/actions/pool-set | 19 +- ceph-proxy/actions/pool-statistics | 20 +- ceph-proxy/actions/remove-cache-tier | 42 +++- ceph-proxy/actions/remove-cache-tier.py | 19 +- ceph-proxy/actions/remove-pool-snapshot | 20 +- ceph-proxy/actions/rename-pool | 20 +- ceph-proxy/actions/set-pool-max-bytes | 20 +- ceph-proxy/actions/snapshot-pool | 20 +- ceph-proxy/charm-helpers-hooks.yaml | 2 +- .../{hooks => }/charmhelpers/__init__.py | 0 .../{hooks => }/charmhelpers/cli/__init__.py | 0 .../{hooks => }/charmhelpers/cli/benchmark.py | 0 .../{hooks => }/charmhelpers/cli/commands.py | 0 .../{hooks => }/charmhelpers/cli/hookenv.py | 0 .../{hooks => }/charmhelpers/cli/host.py | 0 .../{hooks => }/charmhelpers/cli/unitdata.py | 9 + .../charmhelpers/contrib/__init__.py | 0 .../contrib/charmsupport/__init__.py | 0 .../charmhelpers/contrib/charmsupport/nrpe.py | 0 .../contrib/charmsupport/volumes.py | 0 .../contrib/hardening/README.hardening.md | 0 .../contrib/hardening/__init__.py | 0 .../contrib/hardening/apache/__init__.py | 0 .../hardening/apache/checks/__init__.py | 0 .../contrib/hardening/apache/checks/config.py | 0 .../apache/templates/99-hardening.conf | 0 .../hardening/apache/templates/__init__.py | 0 .../hardening/apache/templates/alias.conf | 0 .../contrib/hardening/audits/__init__.py | 0 .../contrib/hardening/audits/apache.py | 0 .../contrib/hardening/audits/apt.py | 0 .../contrib/hardening/audits/file.py | 0 .../contrib/hardening/defaults/__init__.py | 0 .../contrib/hardening/defaults/apache.yaml | 0 .../hardening/defaults/apache.yaml.schema | 0 .../contrib/hardening/defaults/mysql.yaml | 0 .../hardening/defaults/mysql.yaml.schema | 0 .../contrib/hardening/defaults/os.yaml | 0 .../contrib/hardening/defaults/os.yaml.schema | 0 .../contrib/hardening/defaults/ssh.yaml | 0 .../hardening/defaults/ssh.yaml.schema | 0 .../charmhelpers/contrib/hardening/harden.py | 0 .../contrib/hardening/host/__init__.py | 0 .../contrib/hardening/host/checks/__init__.py | 0 .../contrib/hardening/host/checks/apt.py | 0 .../contrib/hardening/host/checks/limits.py | 0 .../contrib/hardening/host/checks/login.py | 0 .../hardening/host/checks/minimize_access.py | 0 .../contrib/hardening/host/checks/pam.py | 0 .../contrib/hardening/host/checks/profile.py | 0 .../hardening/host/checks/securetty.py | 0 .../hardening/host/checks/suid_sgid.py | 0 .../contrib/hardening/host/checks/sysctl.py | 0 .../hardening/host/templates/10.hardcore.conf | 0 .../hardening/host/templates/99-hardening.sh | 0 .../host/templates/99-juju-hardening.conf | 0 .../hardening/host/templates/__init__.py | 0 .../hardening/host/templates/login.defs | 0 .../contrib/hardening/host/templates/modules | 0 .../hardening/host/templates/passwdqc.conf | 0 .../host/templates/pinerolo_profile.sh | 0 .../hardening/host/templates/securetty | 0 .../contrib/hardening/host/templates/tally2 | 0 .../contrib/hardening/mysql/__init__.py | 0 .../hardening/mysql/checks/__init__.py | 0 .../contrib/hardening/mysql/checks/config.py | 0 .../hardening/mysql/templates/__init__.py | 0 .../hardening/mysql/templates/hardening.cnf | 0 .../contrib/hardening/ssh/__init__.py | 0 .../contrib/hardening/ssh/checks/__init__.py | 0 .../contrib/hardening/ssh/checks/config.py | 0 .../hardening/ssh/templates/__init__.py | 0 .../hardening/ssh/templates/ssh_config | 0 .../hardening/ssh/templates/sshd_config | 0 .../contrib/hardening/templating.py | 0 .../charmhelpers/contrib/hardening/utils.py | 0 .../charmhelpers/contrib/network/__init__.py | 0 .../charmhelpers/contrib/network/ip.py | 0 .../contrib/openstack/__init__.py | 0 .../contrib/openstack/alternatives.py | 0 .../contrib/openstack/exceptions.py | 0 .../charmhelpers/contrib/openstack/utils.py | 4 +- .../charmhelpers/contrib/python.py | 0 .../charmhelpers/contrib/storage/__init__.py | 0 .../contrib/storage/linux/__init__.py | 0 .../contrib/storage/linux/ceph.py | 191 ++++++++++++------ .../contrib/storage/linux/loopback.py | 0 .../charmhelpers/contrib/storage/linux/lvm.py | 0 .../contrib/storage/linux/utils.py | 0 .../{hooks => }/charmhelpers/core/__init__.py | 0 .../charmhelpers/core/decorators.py | 0 .../{hooks => }/charmhelpers/core/files.py | 0 .../{hooks => }/charmhelpers/core/fstab.py | 0 .../{hooks => }/charmhelpers/core/hookenv.py | 74 +++++++ .../{hooks => }/charmhelpers/core/host.py | 0 .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 0 .../charmhelpers/core/host_factory/ubuntu.py | 0 .../{hooks => }/charmhelpers/core/hugepage.py | 0 .../{hooks => }/charmhelpers/core/kernel.py | 0 .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 0 .../core/kernel_factory/ubuntu.py | 0 .../charmhelpers/core/services/__init__.py | 0 .../charmhelpers/core/services/base.py | 0 .../charmhelpers/core/services/helpers.py | 0 .../{hooks => }/charmhelpers/core/strutils.py | 0 .../{hooks => }/charmhelpers/core/sysctl.py | 0 .../charmhelpers/core/templating.py | 0 .../{hooks => }/charmhelpers/core/unitdata.py | 0 .../charmhelpers/fetch/__init__.py | 0 .../charmhelpers/fetch/archiveurl.py | 0 .../{hooks => }/charmhelpers/fetch/bzrurl.py | 0 .../{hooks => }/charmhelpers/fetch/centos.py | 0 .../{hooks => }/charmhelpers/fetch/giturl.py | 0 .../charmhelpers/fetch/python/__init__.py | 0 .../charmhelpers/fetch/python/debug.py | 0 .../charmhelpers/fetch/python/packages.py | 0 .../charmhelpers/fetch/python/rpdb.py | 0 .../charmhelpers/fetch/python/version.py | 0 .../{hooks => }/charmhelpers/fetch/snap.py | 0 .../{hooks => }/charmhelpers/fetch/ubuntu.py | 183 +++++++++++++---- .../{hooks => }/charmhelpers/osplatform.py | 0 .../charmhelpers/payload/__init__.py | 0 .../{hooks => }/charmhelpers/payload/execd.py | 0 ceph-proxy/hooks/ceph.py | 19 +- ceph-proxy/hooks/ceph_hooks.py | 14 +- ceph-proxy/hooks/install | 2 +- ceph-proxy/hooks/utils.py | 4 +- ceph-proxy/tox.ini | 3 +- ceph-proxy/unit_tests/__init__.py | 19 +- ceph-proxy/unit_tests/test_ceph.py | 10 +- ceph-proxy/unit_tests/test_ceph_hooks.py | 4 +- ceph-proxy/unit_tests/test_utils.py | 2 +- 145 files changed, 748 insertions(+), 187 deletions(-) mode change 120000 => 100755 ceph-proxy/actions/create-cache-tier mode change 120000 => 100755 ceph-proxy/actions/remove-cache-tier rename ceph-proxy/{hooks => }/charmhelpers/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/cli/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/cli/benchmark.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/cli/commands.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/cli/hookenv.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/cli/host.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/cli/unitdata.py (80%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/charmsupport/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/charmsupport/nrpe.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/charmsupport/volumes.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/README.hardening.md (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/apache/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/apache/checks/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/apache/checks/config.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/apache/templates/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/apache/templates/alias.conf (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/audits/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/audits/apache.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/audits/apt.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/audits/file.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/defaults/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/defaults/apache.yaml (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/defaults/apache.yaml.schema (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/defaults/mysql.yaml (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/defaults/os.yaml (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/defaults/os.yaml.schema (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/defaults/ssh.yaml (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/harden.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/checks/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/checks/apt.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/checks/limits.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/checks/login.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/checks/minimize_access.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/checks/pam.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/checks/profile.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/checks/securetty.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/checks/suid_sgid.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/checks/sysctl.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/templates/99-hardening.sh (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/templates/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/templates/login.defs (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/templates/modules (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/templates/passwdqc.conf (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/templates/securetty (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/host/templates/tally2 (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/mysql/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/mysql/checks/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/mysql/checks/config.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/mysql/templates/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/ssh/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/ssh/checks/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/ssh/checks/config.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/ssh/templates/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/ssh/templates/ssh_config (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/ssh/templates/sshd_config (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/templating.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/hardening/utils.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/network/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/network/ip.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/openstack/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/openstack/alternatives.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/openstack/exceptions.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/openstack/utils.py (99%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/python.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/storage/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/storage/linux/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/storage/linux/ceph.py (89%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/storage/linux/loopback.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/storage/linux/lvm.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/contrib/storage/linux/utils.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/decorators.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/files.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/fstab.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/hookenv.py (93%) rename ceph-proxy/{hooks => }/charmhelpers/core/host.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/host_factory/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/host_factory/centos.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/host_factory/ubuntu.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/hugepage.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/kernel.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/kernel_factory/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/kernel_factory/centos.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/kernel_factory/ubuntu.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/services/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/services/base.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/services/helpers.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/strutils.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/sysctl.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/templating.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/core/unitdata.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/fetch/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/fetch/archiveurl.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/fetch/bzrurl.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/fetch/centos.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/fetch/giturl.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/fetch/python/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/fetch/python/debug.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/fetch/python/packages.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/fetch/python/rpdb.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/fetch/python/version.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/fetch/snap.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/fetch/ubuntu.py (76%) rename ceph-proxy/{hooks => }/charmhelpers/osplatform.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/payload/__init__.py (100%) rename ceph-proxy/{hooks => }/charmhelpers/payload/execd.py (100%) diff --git a/ceph-proxy/.zuul.yaml b/ceph-proxy/.zuul.yaml index aa9c508f..7051aeeb 100644 --- a/ceph-proxy/.zuul.yaml +++ b/ceph-proxy/.zuul.yaml @@ -1,4 +1,3 @@ - project: templates: - - python-charm-jobs - - openstack-python35-jobs-nonvoting + - python35-charm-jobs diff --git a/ceph-proxy/actions/create-cache-tier b/ceph-proxy/actions/create-cache-tier deleted file mode 120000 index 2a7e4346..00000000 --- a/ceph-proxy/actions/create-cache-tier +++ /dev/null @@ -1 +0,0 @@ -create-cache-tier.py \ No newline at end of file diff --git a/ceph-proxy/actions/create-cache-tier b/ceph-proxy/actions/create-cache-tier new file mode 100755 index 00000000..e8170cf2 --- /dev/null +++ b/ceph-proxy/actions/create-cache-tier @@ -0,0 +1,41 @@ +#!/usr/bin/python +__author__ = 'chris' +from subprocess import CalledProcessError +import sys + +sys.path.append('hooks') + +from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists +from charmhelpers.core.hookenv import action_get, log, action_fail + + +def make_cache_tier(): + backer_pool = action_get("backer-pool") + cache_pool = action_get("cache-pool") + cache_mode = action_get("cache-mode") + + # Pre flight checks + if not pool_exists('admin', backer_pool): + log("Please create {} pool before calling create-cache-tier".format( + backer_pool)) + action_fail("create-cache-tier failed. Backer pool {} must exist " + "before calling this".format(backer_pool)) + + if not pool_exists('admin', cache_pool): + log("Please create {} pool before calling create-cache-tier".format( + cache_pool)) + action_fail("create-cache-tier failed. Cache pool {} must exist " + "before calling this".format(cache_pool)) + + pool = Pool(service='admin', name=backer_pool) + try: + pool.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) + except CalledProcessError as err: + log("Add cache tier failed with message: {}".format( + err.message)) + action_fail("create-cache-tier failed. Add cache tier failed with " + "message: {}".format(err.message)) + + +if __name__ == '__main__': + make_cache_tier() diff --git a/ceph-proxy/actions/create-cache-tier.py b/ceph-proxy/actions/create-cache-tier.py index e8170cf2..928e9418 100755 --- a/ceph-proxy/actions/create-cache-tier.py +++ b/ceph-proxy/actions/create-cache-tier.py @@ -1,9 +1,21 @@ -#!/usr/bin/python +#!/usr/bin/env python3 __author__ = 'chris' +import os from subprocess import CalledProcessError import sys -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) + from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists from charmhelpers.core.hookenv import action_get, log, action_fail diff --git a/ceph-proxy/actions/create-erasure-profile b/ceph-proxy/actions/create-erasure-profile index 2b00b588..7400ccd3 100755 --- a/ceph-proxy/actions/create-erasure-profile +++ b/ceph-proxy/actions/create-erasure-profile @@ -1,8 +1,20 @@ -#!/usr/bin/python +#!/usr/bin/env python3 +import os from subprocess import CalledProcessError import sys -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) + from charmhelpers.contrib.storage.linux.ceph import create_erasure_profile from charmhelpers.core.hookenv import action_get, log, action_fail @@ -29,7 +41,7 @@ def make_erasure_profile(): coding_chunks=m, failure_domain=failure_domain) except CalledProcessError as e: - log(e) + log(str(e)) action_fail("Create erasure profile failed with " "message: {}".format(e.message)) elif plugin == "isa": @@ -43,7 +55,7 @@ def make_erasure_profile(): coding_chunks=m, failure_domain=failure_domain) except CalledProcessError as e: - log(e) + log(str(e)) action_fail("Create erasure profile failed with " "message: {}".format(e.message)) elif plugin == "local": @@ -59,7 +71,7 @@ def make_erasure_profile(): locality=l, failure_domain=failure_domain) except CalledProcessError as e: - log(e) + log(str(e)) action_fail("Create erasure profile failed with " "message: {}".format(e.message)) elif plugin == "shec": @@ -75,7 +87,7 @@ def make_erasure_profile(): durability_estimator=c, failure_domain=failure_domain) except CalledProcessError as e: - log(e) + log(str(e)) action_fail("Create erasure profile failed with " "message: {}".format(e.message)) else: diff --git a/ceph-proxy/actions/create-pool b/ceph-proxy/actions/create-pool index 4d1d2148..0dd0be36 100755 --- a/ceph-proxy/actions/create-pool +++ b/ceph-proxy/actions/create-pool @@ -1,7 +1,19 @@ -#!/usr/bin/python +#!/usr/bin/env python3 +import os import sys -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) + from subprocess import CalledProcessError from charmhelpers.core.hookenv import action_get, log, action_fail from charmhelpers.contrib.storage.linux.ceph import ErasurePool, ReplicatedPool @@ -31,7 +43,7 @@ def create_pool(): "is allowed".format(pool_type)) except CalledProcessError as e: action_fail("Pool creation failed because of a failed process. " - "Ret Code: {} Message: {}".format(e.returncode, e.message)) + "Ret Code: {} Message: {}".format(e.returncode, str(e))) if __name__ == '__main__': diff --git a/ceph-proxy/actions/delete-erasure-profile b/ceph-proxy/actions/delete-erasure-profile index 075c410e..8651d07a 100755 --- a/ceph-proxy/actions/delete-erasure-profile +++ b/ceph-proxy/actions/delete-erasure-profile @@ -1,10 +1,21 @@ -#!/usr/bin/python +#!/usr/bin/env python3 from subprocess import CalledProcessError __author__ = 'chris' +import os import sys -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) from charmhelpers.contrib.storage.linux.ceph import remove_erasure_profile from charmhelpers.core.hookenv import action_get, log, action_fail diff --git a/ceph-proxy/actions/delete-pool b/ceph-proxy/actions/delete-pool index 3d655076..68b89b23 100755 --- a/ceph-proxy/actions/delete-pool +++ b/ceph-proxy/actions/delete-pool @@ -1,7 +1,18 @@ -#!/usr/bin/python +#!/usr/bin/env python3 +import os import sys -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) import rados from ceph_ops import connect @@ -20,8 +31,8 @@ def remove_pool(): rados.NoData, rados.NoSpace, rados.PermissionError) as e: - log(e) - action_fail(e) + log(str(e)) + action_fail(str(e)) if __name__ == '__main__': diff --git a/ceph-proxy/actions/get-erasure-profile b/ceph-proxy/actions/get-erasure-profile index 29ece59d..39947bb5 100755 --- a/ceph-proxy/actions/get-erasure-profile +++ b/ceph-proxy/actions/get-erasure-profile @@ -1,8 +1,19 @@ -#!/usr/bin/python +#!/usr/bin/env python3 __author__ = 'chris' +import os import sys -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) from charmhelpers.contrib.storage.linux.ceph import get_erasure_profile from charmhelpers.core.hookenv import action_get, action_set diff --git a/ceph-proxy/actions/list-erasure-profiles b/ceph-proxy/actions/list-erasure-profiles index cf6dfa09..fd0586fb 100755 --- a/ceph-proxy/actions/list-erasure-profiles +++ b/ceph-proxy/actions/list-erasure-profiles @@ -1,9 +1,20 @@ -#!/usr/bin/python +#!/usr/bin/env python3 __author__ = 'chris' -import sys +import os from subprocess import check_output, CalledProcessError +import sys + +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) -sys.path.append('hooks') +_add_path(_hooks) +_add_path(_root) from charmhelpers.core.hookenv import action_get, log, action_set, action_fail @@ -17,6 +28,6 @@ if __name__ == '__main__': 'ls']).decode('UTF-8') action_set({'message': out}) except CalledProcessError as e: - log(e) + log(str(e)) action_fail("Listing erasure profiles failed with error: {}".format( - e.message)) + str(e))) diff --git a/ceph-proxy/actions/list-pools b/ceph-proxy/actions/list-pools index 102667cf..67c1aed0 100755 --- a/ceph-proxy/actions/list-pools +++ b/ceph-proxy/actions/list-pools @@ -1,9 +1,20 @@ -#!/usr/bin/python +#!/usr/bin/env python3 __author__ = 'chris' -import sys +import os from subprocess import check_output, CalledProcessError +import sys + +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) -sys.path.append('hooks') +_add_path(_hooks) +_add_path(_root) from charmhelpers.core.hookenv import log, action_set, action_fail @@ -13,5 +24,5 @@ if __name__ == '__main__': 'osd', 'lspools']).decode('UTF-8') action_set({'message': out}) except CalledProcessError as e: - log(e) - action_fail("List pools failed with error: {}".format(e.message)) + log(str(e)) + action_fail("List pools failed with error: {}".format(str(e))) diff --git a/ceph-proxy/actions/pool-get b/ceph-proxy/actions/pool-get index e4f924b9..3a42ab4d 100755 --- a/ceph-proxy/actions/pool-get +++ b/ceph-proxy/actions/pool-get @@ -1,9 +1,20 @@ -#!/usr/bin/python +#!/usr/bin/env python3 __author__ = 'chris' -import sys +import os from subprocess import check_output, CalledProcessError +import sys + +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) -sys.path.append('hooks') +_add_path(_hooks) +_add_path(_root) from charmhelpers.core.hookenv import log, action_set, action_get, action_fail @@ -15,5 +26,5 @@ if __name__ == '__main__': 'osd', 'pool', 'get', name, key]).decode('UTF-8') action_set({'message': out}) except CalledProcessError as e: - log(e) - action_fail("Pool get failed with message: {}".format(e.message)) + log(str(e)) + action_fail("Pool get failed with message: {}".format(str(e))) diff --git a/ceph-proxy/actions/pool-set b/ceph-proxy/actions/pool-set index 1f6e13b8..8963c908 100755 --- a/ceph-proxy/actions/pool-set +++ b/ceph-proxy/actions/pool-set @@ -1,8 +1,19 @@ -#!/usr/bin/python +#!/usr/bin/env python3 +import os from subprocess import CalledProcessError import sys -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) from charmhelpers.core.hookenv import action_get, log, action_fail from ceph_broker import handle_set_pool_value @@ -18,6 +29,6 @@ if __name__ == '__main__': try: handle_set_pool_value(service='admin', request=request) except CalledProcessError as e: - log(e.message) + log(str(e)) action_fail("Setting pool key: {} and value: {} failed with " - "message: {}".format(key, value, e.message)) + "message: {}".format(key, value, str(e))) diff --git a/ceph-proxy/actions/pool-statistics b/ceph-proxy/actions/pool-statistics index 536c889a..403267f3 100755 --- a/ceph-proxy/actions/pool-statistics +++ b/ceph-proxy/actions/pool-statistics @@ -1,7 +1,19 @@ -#!/usr/bin/python +#!/usr/bin/env python3 +import os import sys -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) + from subprocess import check_output, CalledProcessError from charmhelpers.core.hookenv import log, action_set, action_fail @@ -11,5 +23,5 @@ if __name__ == '__main__': 'df']).decode('UTF-8') action_set({'message': out}) except CalledProcessError as e: - log(e) - action_fail("ceph df failed with message: {}".format(e.message)) + log(str(e)) + action_fail("ceph df failed with message: {}".format(str(e))) diff --git a/ceph-proxy/actions/remove-cache-tier b/ceph-proxy/actions/remove-cache-tier deleted file mode 120000 index 136c0f06..00000000 --- a/ceph-proxy/actions/remove-cache-tier +++ /dev/null @@ -1 +0,0 @@ -remove-cache-tier.py \ No newline at end of file diff --git a/ceph-proxy/actions/remove-cache-tier b/ceph-proxy/actions/remove-cache-tier new file mode 100755 index 00000000..79db9cf7 --- /dev/null +++ b/ceph-proxy/actions/remove-cache-tier @@ -0,0 +1,41 @@ +#!/usr/bin/python +from subprocess import CalledProcessError +import sys + +sys.path.append('hooks') + +from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists +from charmhelpers.core.hookenv import action_get, log, action_fail + +__author__ = 'chris' + + +def delete_cache_tier(): + backer_pool = action_get("backer-pool") + cache_pool = action_get("cache-pool") + + # Pre flight checks + if not pool_exists('admin', backer_pool): + log("Backer pool {} must exist before calling this".format( + backer_pool)) + action_fail("remove-cache-tier failed. Backer pool {} must exist " + "before calling this".format(backer_pool)) + + if not pool_exists('admin', cache_pool): + log("Cache pool {} must exist before calling this".format( + cache_pool)) + action_fail("remove-cache-tier failed. Cache pool {} must exist " + "before calling this".format(cache_pool)) + + pool = Pool(service='admin', name=backer_pool) + try: + pool.remove_cache_tier(cache_pool=cache_pool) + except CalledProcessError as err: + log("Removing the cache tier failed with message: {}".format( + err.message)) + action_fail("remove-cache-tier failed. Removing the cache tier failed " + "with message: {}".format(err.message)) + + +if __name__ == '__main__': + delete_cache_tier() diff --git a/ceph-proxy/actions/remove-cache-tier.py b/ceph-proxy/actions/remove-cache-tier.py index 79db9cf7..8c9b9375 100755 --- a/ceph-proxy/actions/remove-cache-tier.py +++ b/ceph-proxy/actions/remove-cache-tier.py @@ -1,8 +1,19 @@ -#!/usr/bin/python +#!/usr/bin/env python3 +import os from subprocess import CalledProcessError import sys -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists from charmhelpers.core.hookenv import action_get, log, action_fail @@ -32,9 +43,9 @@ def delete_cache_tier(): pool.remove_cache_tier(cache_pool=cache_pool) except CalledProcessError as err: log("Removing the cache tier failed with message: {}".format( - err.message)) + str(err))) action_fail("remove-cache-tier failed. Removing the cache tier failed " - "with message: {}".format(err.message)) + "with message: {}".format(str(err))) if __name__ == '__main__': diff --git a/ceph-proxy/actions/remove-pool-snapshot b/ceph-proxy/actions/remove-pool-snapshot index 387849ea..645ff07f 100755 --- a/ceph-proxy/actions/remove-pool-snapshot +++ b/ceph-proxy/actions/remove-pool-snapshot @@ -1,7 +1,19 @@ -#!/usr/bin/python +#!/usr/bin/env python3 +import os import sys -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) + from subprocess import CalledProcessError from charmhelpers.core.hookenv import action_get, log, action_fail from charmhelpers.contrib.storage.linux.ceph import remove_pool_snapshot @@ -14,6 +26,6 @@ if __name__ == '__main__': pool_name=name, snapshot_name=snapname) except CalledProcessError as e: - log(e) + log(str(e)) action_fail("Remove pool snapshot failed with message: {}".format( - e.message)) + str(e))) diff --git a/ceph-proxy/actions/rename-pool b/ceph-proxy/actions/rename-pool index 6fe088ec..3301830f 100755 --- a/ceph-proxy/actions/rename-pool +++ b/ceph-proxy/actions/rename-pool @@ -1,7 +1,19 @@ -#!/usr/bin/python +#!/usr/bin/env python3 +import os import sys -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) + from subprocess import CalledProcessError from charmhelpers.core.hookenv import action_get, log, action_fail from charmhelpers.contrib.storage.linux.ceph import rename_pool @@ -12,5 +24,5 @@ if __name__ == '__main__': try: rename_pool(service='admin', old_name=name, new_name=new_name) except CalledProcessError as e: - log(e) - action_fail("Renaming pool failed with message: {}".format(e.message)) + log(str(e)) + action_fail("Renaming pool failed with message: {}".format(str(e))) diff --git a/ceph-proxy/actions/set-pool-max-bytes b/ceph-proxy/actions/set-pool-max-bytes index 86360885..c1550d41 100755 --- a/ceph-proxy/actions/set-pool-max-bytes +++ b/ceph-proxy/actions/set-pool-max-bytes @@ -1,7 +1,19 @@ -#!/usr/bin/python +#!/usr/bin/env python3 +import os import sys -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) + from subprocess import CalledProcessError from charmhelpers.core.hookenv import action_get, log, action_fail from charmhelpers.contrib.storage.linux.ceph import set_pool_quota @@ -12,5 +24,5 @@ if __name__ == '__main__': try: set_pool_quota(service='admin', pool_name=name, max_bytes=max_bytes) except CalledProcessError as e: - log(e) - action_fail("Set pool quota failed with message: {}".format(e.message)) + log(str(e)) + action_fail("Set pool quota failed with message: {}".format(str(e))) diff --git a/ceph-proxy/actions/snapshot-pool b/ceph-proxy/actions/snapshot-pool index a02619bf..0191bcc9 100755 --- a/ceph-proxy/actions/snapshot-pool +++ b/ceph-proxy/actions/snapshot-pool @@ -1,7 +1,19 @@ -#!/usr/bin/python +#!/usr/bin/env python3 +import os import sys -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) + from subprocess import CalledProcessError from charmhelpers.core.hookenv import action_get, log, action_fail from charmhelpers.contrib.storage.linux.ceph import snapshot_pool @@ -14,5 +26,5 @@ if __name__ == '__main__': pool_name=name, snapshot_name=snapname) except CalledProcessError as e: - log(e) - action_fail("Snapshot pool failed with message: {}".format(e.message)) + log(str(e)) + action_fail("Snapshot pool failed with message: {}".format(str(e))) diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index e4767c51..8f484eb1 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -1,5 +1,5 @@ repo: https://github.com/juju/charm-helpers -destination: hooks/charmhelpers +destination: charmhelpers include: - core - cli diff --git a/ceph-proxy/hooks/charmhelpers/__init__.py b/ceph-proxy/charmhelpers/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/__init__.py rename to ceph-proxy/charmhelpers/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/cli/__init__.py b/ceph-proxy/charmhelpers/cli/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/cli/__init__.py rename to ceph-proxy/charmhelpers/cli/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/cli/benchmark.py b/ceph-proxy/charmhelpers/cli/benchmark.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/cli/benchmark.py rename to ceph-proxy/charmhelpers/cli/benchmark.py diff --git a/ceph-proxy/hooks/charmhelpers/cli/commands.py b/ceph-proxy/charmhelpers/cli/commands.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/cli/commands.py rename to ceph-proxy/charmhelpers/cli/commands.py diff --git a/ceph-proxy/hooks/charmhelpers/cli/hookenv.py b/ceph-proxy/charmhelpers/cli/hookenv.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/cli/hookenv.py rename to ceph-proxy/charmhelpers/cli/hookenv.py diff --git a/ceph-proxy/hooks/charmhelpers/cli/host.py b/ceph-proxy/charmhelpers/cli/host.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/cli/host.py rename to ceph-proxy/charmhelpers/cli/host.py diff --git a/ceph-proxy/hooks/charmhelpers/cli/unitdata.py b/ceph-proxy/charmhelpers/cli/unitdata.py similarity index 80% rename from ceph-proxy/hooks/charmhelpers/cli/unitdata.py rename to ceph-proxy/charmhelpers/cli/unitdata.py index c5728582..acce846f 100644 --- a/ceph-proxy/hooks/charmhelpers/cli/unitdata.py +++ b/ceph-proxy/charmhelpers/cli/unitdata.py @@ -19,9 +19,16 @@ @cmdline.subcommand_builder('unitdata', description="Store and retrieve data") def unitdata_cmd(subparser): nested = subparser.add_subparsers() + get_cmd = nested.add_parser('get', help='Retrieve data') get_cmd.add_argument('key', help='Key to retrieve the value of') get_cmd.set_defaults(action='get', value=None) + + getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data') + getrange_cmd.add_argument('key', metavar='prefix', + help='Prefix of the keys to retrieve') + getrange_cmd.set_defaults(action='getrange', value=None) + set_cmd = nested.add_parser('set', help='Store data') set_cmd.add_argument('key', help='Key to set') set_cmd.add_argument('value', help='Value to store') @@ -30,6 +37,8 @@ def unitdata_cmd(subparser): def _unitdata_cmd(action, key, value): if action == 'get': return unitdata.kv().get(key) + elif action == 'getrange': + return unitdata.kv().getrange(key) elif action == 'set': unitdata.kv().set(key, value) unitdata.kv().flush() diff --git a/ceph-proxy/hooks/charmhelpers/contrib/__init__.py b/ceph-proxy/charmhelpers/contrib/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/__init__.py rename to ceph-proxy/charmhelpers/contrib/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/__init__.py b/ceph-proxy/charmhelpers/contrib/charmsupport/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/charmsupport/__init__.py rename to ceph-proxy/charmhelpers/contrib/charmsupport/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/charmsupport/nrpe.py rename to ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-proxy/charmhelpers/contrib/charmsupport/volumes.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/charmsupport/volumes.py rename to ceph-proxy/charmhelpers/contrib/charmsupport/volumes.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/README.hardening.md b/ceph-proxy/charmhelpers/contrib/hardening/README.hardening.md similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/README.hardening.md rename to ceph-proxy/charmhelpers/contrib/hardening/README.hardening.md diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/__init__.py b/ceph-proxy/charmhelpers/contrib/hardening/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/__init__.py rename to ceph-proxy/charmhelpers/contrib/hardening/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/__init__.py b/ceph-proxy/charmhelpers/contrib/hardening/apache/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/__init__.py rename to ceph-proxy/charmhelpers/contrib/hardening/apache/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py b/ceph-proxy/charmhelpers/contrib/hardening/apache/checks/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py rename to ceph-proxy/charmhelpers/contrib/hardening/apache/checks/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-proxy/charmhelpers/contrib/hardening/apache/checks/config.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/checks/config.py rename to ceph-proxy/charmhelpers/contrib/hardening/apache/checks/config.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf b/ceph-proxy/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf rename to ceph-proxy/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py b/ceph-proxy/charmhelpers/contrib/hardening/apache/templates/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py rename to ceph-proxy/charmhelpers/contrib/hardening/apache/templates/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf b/ceph-proxy/charmhelpers/contrib/hardening/apache/templates/alias.conf similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf rename to ceph-proxy/charmhelpers/contrib/hardening/apache/templates/alias.conf diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/ceph-proxy/charmhelpers/contrib/hardening/audits/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/__init__.py rename to ceph-proxy/charmhelpers/contrib/hardening/audits/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-proxy/charmhelpers/contrib/hardening/audits/apache.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apache.py rename to ceph-proxy/charmhelpers/contrib/hardening/audits/apache.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-proxy/charmhelpers/contrib/hardening/audits/apt.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/apt.py rename to ceph-proxy/charmhelpers/contrib/hardening/audits/apt.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/file.py b/ceph-proxy/charmhelpers/contrib/hardening/audits/file.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/audits/file.py rename to ceph-proxy/charmhelpers/contrib/hardening/audits/file.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/__init__.py b/ceph-proxy/charmhelpers/contrib/hardening/defaults/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/__init__.py rename to ceph-proxy/charmhelpers/contrib/hardening/defaults/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml b/ceph-proxy/charmhelpers/contrib/hardening/defaults/apache.yaml similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml rename to ceph-proxy/charmhelpers/contrib/hardening/defaults/apache.yaml diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/ceph-proxy/charmhelpers/contrib/hardening/defaults/apache.yaml.schema similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema rename to ceph-proxy/charmhelpers/contrib/hardening/defaults/apache.yaml.schema diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml b/ceph-proxy/charmhelpers/contrib/hardening/defaults/mysql.yaml similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml rename to ceph-proxy/charmhelpers/contrib/hardening/defaults/mysql.yaml diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema b/ceph-proxy/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema rename to ceph-proxy/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml b/ceph-proxy/charmhelpers/contrib/hardening/defaults/os.yaml similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml rename to ceph-proxy/charmhelpers/contrib/hardening/defaults/os.yaml diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/ceph-proxy/charmhelpers/contrib/hardening/defaults/os.yaml.schema similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema rename to ceph-proxy/charmhelpers/contrib/hardening/defaults/os.yaml.schema diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml b/ceph-proxy/charmhelpers/contrib/hardening/defaults/ssh.yaml similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml rename to ceph-proxy/charmhelpers/contrib/hardening/defaults/ssh.yaml diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema b/ceph-proxy/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema rename to ceph-proxy/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-proxy/charmhelpers/contrib/hardening/harden.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/harden.py rename to ceph-proxy/charmhelpers/contrib/hardening/harden.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/__init__.py b/ceph-proxy/charmhelpers/contrib/hardening/host/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/__init__.py rename to ceph-proxy/charmhelpers/contrib/hardening/host/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py b/ceph-proxy/charmhelpers/contrib/hardening/host/checks/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py rename to ceph-proxy/charmhelpers/contrib/hardening/host/checks/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/apt.py b/ceph-proxy/charmhelpers/contrib/hardening/host/checks/apt.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/apt.py rename to ceph-proxy/charmhelpers/contrib/hardening/host/checks/apt.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/limits.py b/ceph-proxy/charmhelpers/contrib/hardening/host/checks/limits.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/limits.py rename to ceph-proxy/charmhelpers/contrib/hardening/host/checks/limits.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/login.py b/ceph-proxy/charmhelpers/contrib/hardening/host/checks/login.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/login.py rename to ceph-proxy/charmhelpers/contrib/hardening/host/checks/login.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/ceph-proxy/charmhelpers/contrib/hardening/host/checks/minimize_access.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py rename to ceph-proxy/charmhelpers/contrib/hardening/host/checks/minimize_access.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/pam.py b/ceph-proxy/charmhelpers/contrib/hardening/host/checks/pam.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/pam.py rename to ceph-proxy/charmhelpers/contrib/hardening/host/checks/pam.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/ceph-proxy/charmhelpers/contrib/hardening/host/checks/profile.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/profile.py rename to ceph-proxy/charmhelpers/contrib/hardening/host/checks/profile.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py b/ceph-proxy/charmhelpers/contrib/hardening/host/checks/securetty.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py rename to ceph-proxy/charmhelpers/contrib/hardening/host/checks/securetty.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/ceph-proxy/charmhelpers/contrib/hardening/host/checks/suid_sgid.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py rename to ceph-proxy/charmhelpers/contrib/hardening/host/checks/suid_sgid.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py b/ceph-proxy/charmhelpers/contrib/hardening/host/checks/sysctl.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py rename to ceph-proxy/charmhelpers/contrib/hardening/host/checks/sysctl.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf b/ceph-proxy/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf rename to ceph-proxy/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh b/ceph-proxy/charmhelpers/contrib/hardening/host/templates/99-hardening.sh similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh rename to ceph-proxy/charmhelpers/contrib/hardening/host/templates/99-hardening.sh diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf b/ceph-proxy/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf rename to ceph-proxy/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py b/ceph-proxy/charmhelpers/contrib/hardening/host/templates/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py rename to ceph-proxy/charmhelpers/contrib/hardening/host/templates/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/login.defs b/ceph-proxy/charmhelpers/contrib/hardening/host/templates/login.defs similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/login.defs rename to ceph-proxy/charmhelpers/contrib/hardening/host/templates/login.defs diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/modules b/ceph-proxy/charmhelpers/contrib/hardening/host/templates/modules similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/modules rename to ceph-proxy/charmhelpers/contrib/hardening/host/templates/modules diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf b/ceph-proxy/charmhelpers/contrib/hardening/host/templates/passwdqc.conf similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf rename to ceph-proxy/charmhelpers/contrib/hardening/host/templates/passwdqc.conf diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh b/ceph-proxy/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh rename to ceph-proxy/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/securetty b/ceph-proxy/charmhelpers/contrib/hardening/host/templates/securetty similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/securetty rename to ceph-proxy/charmhelpers/contrib/hardening/host/templates/securetty diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/tally2 b/ceph-proxy/charmhelpers/contrib/hardening/host/templates/tally2 similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/host/templates/tally2 rename to ceph-proxy/charmhelpers/contrib/hardening/host/templates/tally2 diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/__init__.py b/ceph-proxy/charmhelpers/contrib/hardening/mysql/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/__init__.py rename to ceph-proxy/charmhelpers/contrib/hardening/mysql/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/ceph-proxy/charmhelpers/contrib/hardening/mysql/checks/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py rename to ceph-proxy/charmhelpers/contrib/hardening/mysql/checks/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py b/ceph-proxy/charmhelpers/contrib/hardening/mysql/checks/config.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py rename to ceph-proxy/charmhelpers/contrib/hardening/mysql/checks/config.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py b/ceph-proxy/charmhelpers/contrib/hardening/mysql/templates/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py rename to ceph-proxy/charmhelpers/contrib/hardening/mysql/templates/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf b/ceph-proxy/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf rename to ceph-proxy/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/__init__.py b/ceph-proxy/charmhelpers/contrib/hardening/ssh/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/__init__.py rename to ceph-proxy/charmhelpers/contrib/hardening/ssh/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/ceph-proxy/charmhelpers/contrib/hardening/ssh/checks/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py rename to ceph-proxy/charmhelpers/contrib/hardening/ssh/checks/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-proxy/charmhelpers/contrib/hardening/ssh/checks/config.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py rename to ceph-proxy/charmhelpers/contrib/hardening/ssh/checks/config.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py b/ceph-proxy/charmhelpers/contrib/hardening/ssh/templates/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py rename to ceph-proxy/charmhelpers/contrib/hardening/ssh/templates/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config b/ceph-proxy/charmhelpers/contrib/hardening/ssh/templates/ssh_config similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config rename to ceph-proxy/charmhelpers/contrib/hardening/ssh/templates/ssh_config diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config b/ceph-proxy/charmhelpers/contrib/hardening/ssh/templates/sshd_config similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config rename to ceph-proxy/charmhelpers/contrib/hardening/ssh/templates/sshd_config diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-proxy/charmhelpers/contrib/hardening/templating.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/templating.py rename to ceph-proxy/charmhelpers/contrib/hardening/templating.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-proxy/charmhelpers/contrib/hardening/utils.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/hardening/utils.py rename to ceph-proxy/charmhelpers/contrib/hardening/utils.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/__init__.py b/ceph-proxy/charmhelpers/contrib/network/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/network/__init__.py rename to ceph-proxy/charmhelpers/contrib/network/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/network/ip.py b/ceph-proxy/charmhelpers/contrib/network/ip.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/network/ip.py rename to ceph-proxy/charmhelpers/contrib/network/ip.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/__init__.py b/ceph-proxy/charmhelpers/contrib/openstack/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/openstack/__init__.py rename to ceph-proxy/charmhelpers/contrib/openstack/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-proxy/charmhelpers/contrib/openstack/alternatives.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/openstack/alternatives.py rename to ceph-proxy/charmhelpers/contrib/openstack/alternatives.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/exceptions.py b/ceph-proxy/charmhelpers/contrib/openstack/exceptions.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/openstack/exceptions.py rename to ceph-proxy/charmhelpers/contrib/openstack/exceptions.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py similarity index 99% rename from ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py rename to ceph-proxy/charmhelpers/contrib/openstack/utils.py index 86b011b7..e5e25369 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -194,7 +194,7 @@ ('rocky', ['2.18.0', '2.19.0']), ('stein', - ['2.19.0']), + ['2.20.0']), ]) # >= Liberty version->codename mapping @@ -656,7 +656,7 @@ def openstack_upgrade_available(package): else: avail_vers = get_os_version_install_source(src) apt.init() - return apt.version_compare(avail_vers, cur_vers) == 1 + return apt.version_compare(avail_vers, cur_vers) >= 1 def ensure_block_device(block_device): diff --git a/ceph-proxy/hooks/charmhelpers/contrib/python.py b/ceph-proxy/charmhelpers/contrib/python.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/python.py rename to ceph-proxy/charmhelpers/contrib/python.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/__init__.py b/ceph-proxy/charmhelpers/contrib/storage/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/storage/__init__.py rename to ceph-proxy/charmhelpers/contrib/storage/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/__init__.py b/ceph-proxy/charmhelpers/contrib/storage/linux/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/storage/linux/__init__.py rename to ceph-proxy/charmhelpers/contrib/storage/linux/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py similarity index 89% rename from ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py rename to ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py index 63c93044..2c62092c 100644 --- a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py @@ -59,6 +59,7 @@ service_stop, service_running, umount, + cmp_pkgrevno, ) from charmhelpers.fetch import ( apt_install, @@ -178,7 +179,6 @@ def remove_cache_tier(self, cache_pool): """ # read-only is easy, writeback is much harder mode = get_cache_mode(self.service, cache_pool) - version = ceph_version() if mode == 'readonly': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) @@ -186,7 +186,7 @@ def remove_cache_tier(self, cache_pool): elif mode == 'writeback': pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'] - if version >= '10.1': + if cmp_pkgrevno('ceph-common', '10.1') >= 0: # Jewel added a mandatory flag pool_forward_cmd.append('--yes-i-really-mean-it') @@ -196,7 +196,8 @@ def remove_cache_tier(self, cache_pool): check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) - def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): + def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, + device_class=None): """Return the number of placement groups to use when creating the pool. Returns the number of placement groups which should be specified when @@ -229,6 +230,9 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): increased. NOTE: the default is primarily to handle the scenario where related charms requiring pools has not been upgraded to include an update to indicate their relative usage of the pools. + :param device_class: str. class of storage to use for basis of pgs + calculation; ceph supports nvme, ssd and hdd by default based + on presence of devices of each type in the deployment. :return: int. The number of pgs to use. """ @@ -243,17 +247,20 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): # If the expected-osd-count is specified, then use the max between # the expected-osd-count and the actual osd_count - osd_list = get_osds(self.service) + osd_list = get_osds(self.service, device_class) expected = config('expected-osd-count') or 0 if osd_list: - osd_count = max(expected, len(osd_list)) + if device_class: + osd_count = len(osd_list) + else: + osd_count = max(expected, len(osd_list)) # Log a message to provide some insight if the calculations claim # to be off because someone is setting the expected count and # there are more OSDs in reality. Try to make a proper guess # based upon the cluster itself. - if expected and osd_count != expected: + if not device_class and expected and osd_count != expected: log("Found more OSDs than provided expected count. " "Using the actual count instead", INFO) elif expected: @@ -575,21 +582,24 @@ def remove_pool_snapshot(service, pool_name, snapshot_name): raise -# max_bytes should be an int or long -def set_pool_quota(service, pool_name, max_bytes): +def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): """ - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param max_bytes: int or long - :return: None. Can raise CalledProcessError + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: Name of pool + :type pool_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + :raises: subprocess.CalledProcessError """ - # Set a byte quota on a RADOS pool in ceph. - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, - 'max_bytes', str(max_bytes)] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name] + if max_bytes: + cmd = cmd + ['max_bytes', str(max_bytes)] + if max_objects: + cmd = cmd + ['max_objects', str(max_objects)] + check_call(cmd) def remove_pool_quota(service, pool_name): @@ -626,7 +636,8 @@ def remove_erasure_profile(service, profile_name): def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', data_chunks=2, coding_chunks=1, - locality=None, durability_estimator=None): + locality=None, durability_estimator=None, + device_class=None): """ Create a new erasure code profile if one does not already exist for it. Updates the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ @@ -640,10 +651,9 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' :param coding_chunks: int :param locality: int :param durability_estimator: int + :param device_class: six.string_types :return: None. Can raise CalledProcessError """ - version = ceph_version() - # Ensure this failure_domain is allowed by Ceph validator(failure_domain, six.string_types, ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) @@ -654,12 +664,20 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' if locality is not None and durability_estimator is not None: raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 # failure_domain changed in luminous - if version and version >= '12.0.0': + if luminous_or_later: cmd.append('crush-failure-domain=' + failure_domain) else: cmd.append('ruleset-failure-domain=' + failure_domain) + # device class new in luminous + if luminous_or_later and device_class: + cmd.append('crush-device-class={}'.format(device_class)) + else: + log('Skipping device class configuration (ceph < 12.0.0)', + level=DEBUG) + # Add plugin specific information if locality is not None: # For local erasure codes @@ -744,20 +762,26 @@ def pool_exists(service, name): return name in out.split() -def get_osds(service): +def get_osds(service, device_class=None): """Return a list of all Ceph Object Storage Daemons currently in the - cluster. + cluster (optionally filtered by storage device class). + + :param device_class: Class of storage device for OSD's + :type device_class: str """ - version = ceph_version() - if version and version >= '0.56': + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 + if luminous_or_later and device_class: + out = check_output(['ceph', '--id', service, + 'osd', 'crush', 'class', + 'ls-osd', device_class, + '--format=json']) + else: out = check_output(['ceph', '--id', service, 'osd', 'ls', '--format=json']) - if six.PY3: - out = out.decode('UTF-8') - return json.loads(out) - - return None + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) def install(): @@ -811,7 +835,7 @@ def set_app_name_for_pool(client, pool, name): :raises: CalledProcessError if ceph call fails """ - if ceph_version() >= '12.0.0': + if cmp_pkgrevno('ceph-common', '12.0.0') >= 0: cmd = ['ceph', '--id', client, 'osd', 'pool', 'application', 'enable', pool, name] check_call(cmd) @@ -1091,22 +1115,6 @@ def ensure_ceph_keyring(service, user=None, group=None, return True -def ceph_version(): - """Retrieve the local version of ceph.""" - if os.path.exists('/usr/bin/ceph'): - cmd = ['ceph', '-v'] - output = check_output(cmd) - if six.PY3: - output = output.decode('UTF-8') - output = output.split() - if len(output) > 3: - return output[2] - else: - return None - else: - return None - - class CephBrokerRq(object): """Ceph broker request. @@ -1147,14 +1155,47 @@ def add_op_request_access_to_group(self, name, namespace=None, 'object-prefix-permissions': object_prefix_permissions}) def add_op_create_pool(self, name, replica_count=3, pg_num=None, - weight=None, group=None, namespace=None): - """Adds an operation to create a pool. - - @param pg_num setting: optional setting. If not provided, this value - will be calculated by the broker based on how many OSDs are in the - cluster at the time of creation. Note that, if provided, this value - will be capped at the current available maximum. - @param weight: the percentage of data the pool makes up + weight=None, group=None, namespace=None, + app_name=None, max_bytes=None, max_objects=None): + """DEPRECATED: Use ``add_op_create_replicated_pool()`` or + ``add_op_create_erasure_pool()`` instead. + """ + return self.add_op_create_replicated_pool( + name, replica_count=replica_count, pg_num=pg_num, weight=weight, + group=group, namespace=namespace, app_name=app_name, + max_bytes=max_bytes, max_objects=max_objects) + + def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, + weight=None, group=None, namespace=None, + app_name=None, max_bytes=None, + max_objects=None): + """Adds an operation to create a replicated pool. + + :param name: Name of pool to create + :type name: str + :param replica_count: Number of copies Ceph should keep of your data. + :type replica_count: int + :param pg_num: Request specific number of Placement Groups to create + for pool. + :type pg_num: int + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + Used to calculate number of Placement Groups to create + for pool. + :type weight: float + :param group: Group to add pool to + :type group: str + :param namespace: Group namespace + :type namespace: str + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int """ if pg_num and weight: raise ValueError('pg_num and weight are mutually exclusive') @@ -1162,7 +1203,41 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count, 'pg_num': pg_num, 'weight': weight, 'group': group, - 'group-namespace': namespace}) + 'group-namespace': namespace, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) + + def add_op_create_erasure_pool(self, name, erasure_profile=None, + weight=None, group=None, app_name=None, + max_bytes=None, max_objects=None): + """Adds an operation to create a erasure coded pool. + + :param name: Name of pool to create + :type name: str + :param erasure_profile: Name of erasure code profile to use. If not + set the ceph-mon unit handling the broker + request will set its default value. + :type erasure_profile: str + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + :type weight: float + :param group: Group to add pool to + :type group: str + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + """ + self.ops.append({'op': 'create-pool', 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'weight': weight, + 'group': group, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-proxy/charmhelpers/contrib/storage/linux/loopback.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/storage/linux/loopback.py rename to ceph-proxy/charmhelpers/contrib/storage/linux/loopback.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-proxy/charmhelpers/contrib/storage/linux/lvm.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/storage/linux/lvm.py rename to ceph-proxy/charmhelpers/contrib/storage/linux/lvm.py diff --git a/ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/charmhelpers/contrib/storage/linux/utils.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/contrib/storage/linux/utils.py rename to ceph-proxy/charmhelpers/contrib/storage/linux/utils.py diff --git a/ceph-proxy/hooks/charmhelpers/core/__init__.py b/ceph-proxy/charmhelpers/core/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/__init__.py rename to ceph-proxy/charmhelpers/core/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/core/decorators.py b/ceph-proxy/charmhelpers/core/decorators.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/decorators.py rename to ceph-proxy/charmhelpers/core/decorators.py diff --git a/ceph-proxy/hooks/charmhelpers/core/files.py b/ceph-proxy/charmhelpers/core/files.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/files.py rename to ceph-proxy/charmhelpers/core/files.py diff --git a/ceph-proxy/hooks/charmhelpers/core/fstab.py b/ceph-proxy/charmhelpers/core/fstab.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/fstab.py rename to ceph-proxy/charmhelpers/core/fstab.py diff --git a/ceph-proxy/hooks/charmhelpers/core/hookenv.py b/ceph-proxy/charmhelpers/core/hookenv.py similarity index 93% rename from ceph-proxy/hooks/charmhelpers/core/hookenv.py rename to ceph-proxy/charmhelpers/core/hookenv.py index 2e287659..4744eb43 100644 --- a/ceph-proxy/hooks/charmhelpers/core/hookenv.py +++ b/ceph-proxy/charmhelpers/core/hookenv.py @@ -50,6 +50,11 @@ MARKER = object() SH_MAX_ARG = 131071 + +RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. ' + 'This may not be compatible with software you are ' + 'running in your shell.') + cache = {} @@ -1414,3 +1419,72 @@ def unit_doomed(unit=None): # I don't think 'dead' units ever show up in the goal-state, but # check anyway in addition to 'dying'. return units[unit]['status'] in ('dying', 'dead') + + +def env_proxy_settings(selected_settings=None): + """Get proxy settings from process environment variables. + + Get charm proxy settings from environment variables that correspond to + juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2, + see lp:1782236) in a format suitable for passing to an application that + reacts to proxy settings passed as environment variables. Some applications + support lowercase or uppercase notation (e.g. curl), some support only + lowercase (e.g. wget), there are also subjectively rare cases of only + uppercase notation support. no_proxy CIDR and wildcard support also varies + between runtimes and applications as there is no enforced standard. + + Some applications may connect to multiple destinations and expose config + options that would affect only proxy settings for a specific destination + these should be handled in charms in an application-specific manner. + + :param selected_settings: format only a subset of possible settings + :type selected_settings: list + :rtype: Option(None, dict[str, str]) + """ + SUPPORTED_SETTINGS = { + 'http': 'HTTP_PROXY', + 'https': 'HTTPS_PROXY', + 'no_proxy': 'NO_PROXY', + 'ftp': 'FTP_PROXY' + } + if selected_settings is None: + selected_settings = SUPPORTED_SETTINGS + + selected_vars = [v for k, v in SUPPORTED_SETTINGS.items() + if k in selected_settings] + proxy_settings = {} + for var in selected_vars: + var_val = os.getenv(var) + if var_val: + proxy_settings[var] = var_val + proxy_settings[var.lower()] = var_val + # Now handle juju-prefixed environment variables. The legacy vs new + # environment variable usage is mutually exclusive + charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var)) + if charm_var_val: + proxy_settings[var] = charm_var_val + proxy_settings[var.lower()] = charm_var_val + if 'no_proxy' in proxy_settings: + if _contains_range(proxy_settings['no_proxy']): + log(RANGE_WARNING, level=WARNING) + return proxy_settings if proxy_settings else None + + +def _contains_range(addresses): + """Check for cidr or wildcard domain in a string. + + Given a string comprising a comma seperated list of ip addresses + and domain names, determine whether the string contains IP ranges + or wildcard domains. + + :param addresses: comma seperated list of domains and ip addresses. + :type addresses: str + """ + return ( + # Test for cidr (e.g. 10.20.20.0/24) + "/" in addresses or + # Test for wildcard domains (*.foo.com or .foo.com) + "*" in addresses or + addresses.startswith(".") or + ",." in addresses or + " ." in addresses) diff --git a/ceph-proxy/hooks/charmhelpers/core/host.py b/ceph-proxy/charmhelpers/core/host.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/host.py rename to ceph-proxy/charmhelpers/core/host.py diff --git a/ceph-proxy/hooks/charmhelpers/core/host_factory/__init__.py b/ceph-proxy/charmhelpers/core/host_factory/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/host_factory/__init__.py rename to ceph-proxy/charmhelpers/core/host_factory/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/core/host_factory/centos.py b/ceph-proxy/charmhelpers/core/host_factory/centos.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/host_factory/centos.py rename to ceph-proxy/charmhelpers/core/host_factory/centos.py diff --git a/ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/host_factory/ubuntu.py rename to ceph-proxy/charmhelpers/core/host_factory/ubuntu.py diff --git a/ceph-proxy/hooks/charmhelpers/core/hugepage.py b/ceph-proxy/charmhelpers/core/hugepage.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/hugepage.py rename to ceph-proxy/charmhelpers/core/hugepage.py diff --git a/ceph-proxy/hooks/charmhelpers/core/kernel.py b/ceph-proxy/charmhelpers/core/kernel.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/kernel.py rename to ceph-proxy/charmhelpers/core/kernel.py diff --git a/ceph-proxy/hooks/charmhelpers/core/kernel_factory/__init__.py b/ceph-proxy/charmhelpers/core/kernel_factory/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/kernel_factory/__init__.py rename to ceph-proxy/charmhelpers/core/kernel_factory/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/core/kernel_factory/centos.py b/ceph-proxy/charmhelpers/core/kernel_factory/centos.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/kernel_factory/centos.py rename to ceph-proxy/charmhelpers/core/kernel_factory/centos.py diff --git a/ceph-proxy/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-proxy/charmhelpers/core/kernel_factory/ubuntu.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/kernel_factory/ubuntu.py rename to ceph-proxy/charmhelpers/core/kernel_factory/ubuntu.py diff --git a/ceph-proxy/hooks/charmhelpers/core/services/__init__.py b/ceph-proxy/charmhelpers/core/services/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/services/__init__.py rename to ceph-proxy/charmhelpers/core/services/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/core/services/base.py b/ceph-proxy/charmhelpers/core/services/base.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/services/base.py rename to ceph-proxy/charmhelpers/core/services/base.py diff --git a/ceph-proxy/hooks/charmhelpers/core/services/helpers.py b/ceph-proxy/charmhelpers/core/services/helpers.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/services/helpers.py rename to ceph-proxy/charmhelpers/core/services/helpers.py diff --git a/ceph-proxy/hooks/charmhelpers/core/strutils.py b/ceph-proxy/charmhelpers/core/strutils.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/strutils.py rename to ceph-proxy/charmhelpers/core/strutils.py diff --git a/ceph-proxy/hooks/charmhelpers/core/sysctl.py b/ceph-proxy/charmhelpers/core/sysctl.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/sysctl.py rename to ceph-proxy/charmhelpers/core/sysctl.py diff --git a/ceph-proxy/hooks/charmhelpers/core/templating.py b/ceph-proxy/charmhelpers/core/templating.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/templating.py rename to ceph-proxy/charmhelpers/core/templating.py diff --git a/ceph-proxy/hooks/charmhelpers/core/unitdata.py b/ceph-proxy/charmhelpers/core/unitdata.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/core/unitdata.py rename to ceph-proxy/charmhelpers/core/unitdata.py diff --git a/ceph-proxy/hooks/charmhelpers/fetch/__init__.py b/ceph-proxy/charmhelpers/fetch/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/fetch/__init__.py rename to ceph-proxy/charmhelpers/fetch/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py b/ceph-proxy/charmhelpers/fetch/archiveurl.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/fetch/archiveurl.py rename to ceph-proxy/charmhelpers/fetch/archiveurl.py diff --git a/ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py b/ceph-proxy/charmhelpers/fetch/bzrurl.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/fetch/bzrurl.py rename to ceph-proxy/charmhelpers/fetch/bzrurl.py diff --git a/ceph-proxy/hooks/charmhelpers/fetch/centos.py b/ceph-proxy/charmhelpers/fetch/centos.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/fetch/centos.py rename to ceph-proxy/charmhelpers/fetch/centos.py diff --git a/ceph-proxy/hooks/charmhelpers/fetch/giturl.py b/ceph-proxy/charmhelpers/fetch/giturl.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/fetch/giturl.py rename to ceph-proxy/charmhelpers/fetch/giturl.py diff --git a/ceph-proxy/hooks/charmhelpers/fetch/python/__init__.py b/ceph-proxy/charmhelpers/fetch/python/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/fetch/python/__init__.py rename to ceph-proxy/charmhelpers/fetch/python/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/fetch/python/debug.py b/ceph-proxy/charmhelpers/fetch/python/debug.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/fetch/python/debug.py rename to ceph-proxy/charmhelpers/fetch/python/debug.py diff --git a/ceph-proxy/hooks/charmhelpers/fetch/python/packages.py b/ceph-proxy/charmhelpers/fetch/python/packages.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/fetch/python/packages.py rename to ceph-proxy/charmhelpers/fetch/python/packages.py diff --git a/ceph-proxy/hooks/charmhelpers/fetch/python/rpdb.py b/ceph-proxy/charmhelpers/fetch/python/rpdb.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/fetch/python/rpdb.py rename to ceph-proxy/charmhelpers/fetch/python/rpdb.py diff --git a/ceph-proxy/hooks/charmhelpers/fetch/python/version.py b/ceph-proxy/charmhelpers/fetch/python/version.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/fetch/python/version.py rename to ceph-proxy/charmhelpers/fetch/python/version.py diff --git a/ceph-proxy/hooks/charmhelpers/fetch/snap.py b/ceph-proxy/charmhelpers/fetch/snap.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/fetch/snap.py rename to ceph-proxy/charmhelpers/fetch/snap.py diff --git a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py similarity index 76% rename from ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py rename to ceph-proxy/charmhelpers/fetch/ubuntu.py index 8a5cadf1..c6d9341e 100644 --- a/ceph-proxy/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -19,15 +19,14 @@ import six import time import subprocess -from tempfile import NamedTemporaryFile -from charmhelpers.core.host import ( - lsb_release -) +from charmhelpers.core.host import get_distrib_codename + from charmhelpers.core.hookenv import ( log, DEBUG, WARNING, + env_proxy_settings, ) from charmhelpers.fetch import SourceConfigError, GPGKeyError @@ -303,12 +302,17 @@ def import_key(key): """Import an ASCII Armor key. A Radix64 format keyid is also supported for backwards - compatibility, but should never be used; the key retrieval - mechanism is insecure and subject to man-in-the-middle attacks - voiding all signature checks using that key. - - :param keyid: The key in ASCII armor format, - including BEGIN and END markers. + compatibility. In this case Ubuntu keyserver will be + queried for a key via HTTPS by its keyid. This method + is less preferrable because https proxy servers may + require traffic decryption which is equivalent to a + man-in-the-middle attack (a proxy server impersonates + keyserver TLS certificates and has to be explicitly + trusted by the system). + + :param key: A GPG key in ASCII armor format, + including BEGIN and END markers or a keyid. + :type key: (bytes, str) :raises: GPGKeyError if the key could not be imported """ key = key.strip() @@ -319,35 +323,131 @@ def import_key(key): log("PGP key found (looks like ASCII Armor format)", level=DEBUG) if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and '-----END PGP PUBLIC KEY BLOCK-----' in key): - log("Importing ASCII Armor PGP key", level=DEBUG) - with NamedTemporaryFile() as keyfile: - with open(keyfile.name, 'w') as fd: - fd.write(key) - fd.write("\n") - cmd = ['apt-key', 'add', keyfile.name] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error = "Error importing PGP key '{}'".format(key) - log(error) - raise GPGKeyError(error) + log("Writing provided PGP key in the binary format", level=DEBUG) + if six.PY3: + key_bytes = key.encode('utf-8') + else: + key_bytes = key + key_name = _get_keyid_by_gpg_key(key_bytes) + key_gpg = _dearmor_gpg_key(key_bytes) + _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) else: raise GPGKeyError("ASCII armor markers missing from GPG key") else: - # We should only send things obviously not a keyid offsite - # via this unsecured protocol, as it may be a secret or part - # of one. log("PGP key found (looks like Radix64 format)", level=WARNING) - log("INSECURLY importing PGP key from keyserver; " + log("SECURELY importing PGP key from keyserver; " "full key not provided.", level=WARNING) - cmd = ['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] - try: - _run_with_retries(cmd) - except subprocess.CalledProcessError: - error = "Error importing PGP key '{}'".format(key) - log(error) - raise GPGKeyError(error) + # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL + # to retrieve GPG keys. `apt-key adv` command is deprecated as is + # apt-key in general as noted in its manpage. See lp:1433761 for more + # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop + # gpg + key_asc = _get_key_by_keyid(key) + # write the key in GPG format so that apt-key list shows it + key_gpg = _dearmor_gpg_key(key_asc) + _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg) + + +def _get_keyid_by_gpg_key(key_material): + """Get a GPG key fingerprint by GPG key material. + Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded + or binary GPG key material. Can be used, for example, to generate file + names for keys passed via charm options. + + :param key_material: ASCII armor-encoded or binary GPG key material + :type key_material: bytes + :raises: GPGKeyError if invalid key material has been provided + :returns: A GPG key fingerprint + :rtype: str + """ + # Use the same gpg command for both Xenial and Bionic + cmd = 'gpg --with-colons --with-fingerprint' + ps = subprocess.Popen(cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_material) + if six.PY3: + out = out.decode('utf-8') + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material provided') + # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) + return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1) + + +def _get_key_by_keyid(keyid): + """Get a key via HTTPS from the Ubuntu keyserver. + Different key ID formats are supported by SKS keyservers (the longer ones + are more secure, see "dead beef attack" and https://evil32.com/). Since + HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will + impersonate keyserver.ubuntu.com and generate a certificate with + keyserver.ubuntu.com in the CN field or in SubjAltName fields of a + certificate. If such proxy behavior is expected it is necessary to add the + CA certificate chain containing the intermediate CA of the SSLBump proxy to + every machine that this code runs on via ca-certs cloud-init directive (via + cloudinit-userdata model-config) or via other means (such as through a + custom charm option). Also note that DNS resolution for the hostname in a + URL is done at a proxy server - not at the client side. + + 8-digit (32 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6 + 16-digit (64 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6 + 40-digit key ID: + https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6 + + :param keyid: An 8, 16 or 40 hex digit keyid to find a key for + :type keyid: (bytes, str) + :returns: A key material for the specified GPG key id + :rtype: (str, bytes) + :raises: subprocess.CalledProcessError + """ + # options=mr - machine-readable output (disables html wrappers) + keyserver_url = ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr&exact=on&search=0x{}') + curl_cmd = ['curl', keyserver_url.format(keyid)] + # use proxy server settings in order to retrieve the key + return subprocess.check_output(curl_cmd, + env=env_proxy_settings(['https'])) + + +def _dearmor_gpg_key(key_asc): + """Converts a GPG key in the ASCII armor format to the binary format. + + :param key_asc: A GPG key in ASCII armor format. + :type key_asc: (str, bytes) + :returns: A GPG key in binary format + :rtype: (str, bytes) + :raises: GPGKeyError + """ + ps = subprocess.Popen(['gpg', '--dearmor'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_asc) + # no need to decode output as it is binary (invalid utf-8), only error + if six.PY3: + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material. Check your network setup' + ' (MTU, routing, DNS) and/or proxy server settings' + ' as well as destination keyserver status.') + else: + return out + + +def _write_apt_gpg_keyfile(key_name, key_material): + """Writes GPG key material into a file at a provided path. + + :param key_name: A key name to use for a key file (could be a fingerprint) + :type key_name: str + :param key_material: A GPG key material (binary) + :type key_material: (str, bytes) + """ + with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name), + 'wb') as keyf: + keyf.write(key_material) def add_source(source, key=None, fail_invalid=False): @@ -442,13 +542,13 @@ def add_source(source, key=None, fail_invalid=False): def _add_proposed(): """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list - Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for + Uses get_distrib_codename to determine the correct stanza for the deb line. For intel architecutres PROPOSED_POCKET is used for the release, but for other architectures PROPOSED_PORTS_POCKET is used for the release. """ - release = lsb_release()['DISTRIB_CODENAME'] + release = get_distrib_codename() arch = platform.machine() if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): raise SourceConfigError("Arch {} not supported for (distro-)proposed" @@ -461,11 +561,16 @@ def _add_apt_repository(spec): """Add the spec using add_apt_repository :param spec: the parameter to pass to add_apt_repository + :type spec: str """ if '{series}' in spec: - series = lsb_release()['DISTRIB_CODENAME'] + series = get_distrib_codename() spec = spec.replace('{series}', series) - _run_with_retries(['add-apt-repository', '--yes', spec]) + # software-properties package for bionic properly reacts to proxy settings + # passed as environment variables (See lp:1433761). This is not the case + # LTS and non-LTS releases below bionic. + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https'])) def _add_cloud_pocket(pocket): @@ -534,7 +639,7 @@ def _verify_is_ubuntu_rel(release, os_release): :raises: SourceConfigError if the release is not the same as the ubuntu release. """ - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + ubuntu_rel = get_distrib_codename() if release != ubuntu_rel: raise SourceConfigError( 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' diff --git a/ceph-proxy/hooks/charmhelpers/osplatform.py b/ceph-proxy/charmhelpers/osplatform.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/osplatform.py rename to ceph-proxy/charmhelpers/osplatform.py diff --git a/ceph-proxy/hooks/charmhelpers/payload/__init__.py b/ceph-proxy/charmhelpers/payload/__init__.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/payload/__init__.py rename to ceph-proxy/charmhelpers/payload/__init__.py diff --git a/ceph-proxy/hooks/charmhelpers/payload/execd.py b/ceph-proxy/charmhelpers/payload/execd.py similarity index 100% rename from ceph-proxy/hooks/charmhelpers/payload/execd.py rename to ceph-proxy/charmhelpers/payload/execd.py diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index de1e48e3..8000a005 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -128,7 +128,7 @@ def is_quorum(): ] if os.path.exists(asok): try: - result = json.loads(subprocess.check_output(cmd)) + result = json.loads(subprocess.check_output(cmd).decode('utf-8')) except subprocess.CalledProcessError: return False except ValueError: @@ -155,7 +155,7 @@ def is_leader(): ] if os.path.exists(asok): try: - result = json.loads(subprocess.check_output(cmd)) + result = json.loads(subprocess.check_output(cmd).decode('utf-8')) except subprocess.CalledProcessError: return False except ValueError: @@ -201,7 +201,9 @@ def add_bootstrap_hint(peer): def is_osd_disk(dev): try: - info = subprocess.check_output(['sgdisk', '-i', '1', dev]) + info = (subprocess + .check_output(['sgdisk', '-i', '1', dev]) + .decode('utf-8')) info = info.split("\n") # IGNORE:E1103 for line in info: if line.startswith( @@ -266,7 +268,7 @@ def generate_monitor_secret(): '--name=mon.', '--gen-key' ] - res = subprocess.check_output(cmd) + res = subprocess.check_output(cmd).decode('utf-8') return "{}==".format(res.split('=')[1].strip()) @@ -403,12 +405,15 @@ def get_named_key(name, caps=None): 'auth', 'get-or-create', 'client.{}'.format(name), ] # Add capabilities - for subsystem, subcaps in caps.iteritems(): + for subsystem, subcaps in caps.items(): cmd.extend([ subsystem, '; '.join(subcaps), ]) - return parse_key(subprocess.check_output(cmd).strip()) # IGNORE:E1103 + return parse_key(subprocess + .check_output(cmd) + .decode('utf-8') + .strip()) # IGNORE:E1103 def upgrade_key_caps(key, caps): @@ -419,7 +424,7 @@ def upgrade_key_caps(key, caps): cmd = [ "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key ] - for subsystem, subcaps in caps.iteritems(): + for subsystem, subcaps in caps.items(): cmd.extend([subsystem, '; '.join(subcaps)]) subprocess.check_call(cmd) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 6347e0e9..322ccb0a 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2012 Canonical Ltd. @@ -13,6 +13,18 @@ import shutil import sys + +_path = os.path.dirname(os.path.realpath(__file__)) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + + +_add_path(_root) + import ceph from charmhelpers.core.hookenv import ( log, diff --git a/ceph-proxy/hooks/install b/ceph-proxy/hooks/install index 86d48855..eb058242 100755 --- a/ceph-proxy/hooks/install +++ b/ceph-proxy/hooks/install @@ -11,7 +11,7 @@ check_and_install() { fi } -PYTHON="python" +PYTHON="python3" for dep in ${DEPS[@]}; do check_and_install ${PYTHON} ${dep} diff --git a/ceph-proxy/hooks/utils.py b/ceph-proxy/hooks/utils.py index 5b68a1e7..d1cf5009 100644 --- a/ceph-proxy/hooks/utils.py +++ b/ceph-proxy/hooks/utils.py @@ -43,9 +43,9 @@ def enable_pocket(pocket): apt_sources = "/etc/apt/sources.list" - with open(apt_sources, "r") as sources: + with open(apt_sources, "rt") as sources: lines = sources.readlines() - with open(apt_sources, "w") as sources: + with open(apt_sources, "wt") as sources: for line in lines: if pocket in line: sources.write(re.sub('^# deb', 'deb', line)) diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 68ab28e6..3a8edbff 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -2,7 +2,7 @@ # This file is managed centrally by release-tools and should not be modified # within individual charm repos. [tox] -envlist = pep8,py27 +envlist = pep8,py3{5,6} skipsdist = True [testenv] @@ -20,6 +20,7 @@ passenv = HOME TERM AMULET_* CS_API_* basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +commands = /bin/true [testenv:py35] basepython = python3.5 diff --git a/ceph-proxy/unit_tests/__init__.py b/ceph-proxy/unit_tests/__init__.py index f80aab3d..ba8fe96e 100644 --- a/ceph-proxy/unit_tests/__init__.py +++ b/ceph-proxy/unit_tests/__init__.py @@ -1,2 +1,19 @@ +import os import sys -sys.path.append('hooks') + + +_path = os.path.dirname(os.path.realpath(__file__)) +_actions = os.path.abspath(os.path.join(_path, '../actions')) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_charmhelpers = os.path.abspath(os.path.join(_path, '../charmhelpers')) +_unit_tests = os.path.abspath(os.path.join(_path, '../unit_tests')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_actions) +_add_path(_hooks) +_add_path(_charmhelpers) +_add_path(_unit_tests) diff --git a/ceph-proxy/unit_tests/test_ceph.py b/ceph-proxy/unit_tests/test_ceph.py index 9ed36c00..4cb55b2d 100644 --- a/ceph-proxy/unit_tests/test_ceph.py +++ b/ceph-proxy/unit_tests/test_ceph.py @@ -1,3 +1,4 @@ +import collections import unittest import mock @@ -70,10 +71,8 @@ def test_get_named_key_empty(self, mock_config, mock_ceph_user, expected_key = 'AQCnjmtbuEACMxAA7joUmgLIGI4/3LKkPzUy8g==' expected_output = ('[client.testuser]\n key = {}' .format(expected_key)) - caps = { - 'mon': ['allow rw'], - 'osd': ['allow rwx'] - } + caps = collections.OrderedDict([('mon', ['allow rw']), + ('osd', ['allow rwx'])]) ceph_user = 'ceph' ceph_proxy_host = 'cephproxy' mock_get_unit_hostname.return_value = ceph_proxy_host @@ -86,7 +85,8 @@ def check_output_side_effect(cmd): '/var/lib/ceph/mon/ceph-{}/keyring'.format( ceph_proxy_host), 'auth', 'get-or-create', user_spec, 'mon', - 'allow rw', 'osd', 'allow rwx']): expected_output + 'allow rw', 'osd', 'allow rwx']): (expected_output + .encode('utf-8')) }[' '.join(cmd)] mock_check_output.side_effect = check_output_side_effect mock_config.side_effect = self.empty_config_side_effect diff --git a/ceph-proxy/unit_tests/test_ceph_hooks.py b/ceph-proxy/unit_tests/test_ceph_hooks.py index 0b394cf2..1bf97df3 100644 --- a/ceph-proxy/unit_tests/test_ceph_hooks.py +++ b/ceph-proxy/unit_tests/test_ceph_hooks.py @@ -58,7 +58,7 @@ def test_radosgw_relation(self, mock_apt_install, mock_check_output): 'auth': 'cephx', 'fsid': 'some-fsid'} - mock_check_output.return_value = CEPH_GET_KEY + mock_check_output.return_value = CEPH_GET_KEY.encode() self.relation_get.return_value = {} self.test_config.set('monitor-hosts', settings['ceph-public-address']) self.test_config.set('fsid', settings['fsid']) @@ -123,7 +123,7 @@ def c(k): @mock.patch('subprocess.check_output') def test_client_relation_joined(self, mock_check_output): - mock_check_output.return_value = CEPH_GET_KEY + mock_check_output.return_value = CEPH_GET_KEY.encode() self.test_config.set('monitor-hosts', '127.0.0.1:1234') self.test_config.set('fsid', 'abc123') self.test_config.set('admin-key', 'some-admin-key') diff --git a/ceph-proxy/unit_tests/test_utils.py b/ceph-proxy/unit_tests/test_utils.py index 0a774821..ed0e7a1e 100644 --- a/ceph-proxy/unit_tests/test_utils.py +++ b/ceph-proxy/unit_tests/test_utils.py @@ -36,7 +36,7 @@ def get_default_config(): ''' default_config = {} config = load_config() - for k, v in config.iteritems(): + for k, v in config.items(): if 'default' in v: default_config[k] = v['default'] else: From ed52894c0b3df932c57b387c029f593d75de2250 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Thu, 7 Mar 2019 10:00:59 +0100 Subject: [PATCH 1707/2699] Always run mon relation after handling broker request This must be done to make sure the individual unit relation data is in sync. Change-Id: Ibf51766c868c179fa1b3245b394e32d0f2e7c1ab --- ceph-mon/hooks/ceph_hooks.py | 17 ++++++++++------- ceph-mon/unit_tests/test_ceph_hooks.py | 2 +- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 82f88f41..2de99fc2 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -547,19 +547,22 @@ def handle_broker_request(relid, unit, add_legacy_response=False, if add_legacy_response: response.update({'broker_rsp': rsp}) - if relation_ids('rbd-mirror') and recurse: - # update ``rbd-mirror`` relations for this unit with - # information about new pools. - log('Notifying this units rbd-mirror relations after ' - 'processing broker request.', level=DEBUG) - notify_rbd_mirrors() - + if relation_ids('rbd-mirror'): + # NOTE(fnordahl): juju relation level data candidate # notify mons to flag that the other mon units should update # their ``rbd-mirror`` relations with information about new # pools. log('Notifying peers after processing broker request.', level=DEBUG) notify_mons() + + if recurse: + # update ``rbd-mirror`` relations for this unit with + # information about new pools. + log('Notifying this units rbd-mirror relations after ' + 'processing broker request.', level=DEBUG) + notify_rbd_mirrors() + return response diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index e7515307..4600915c 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -459,7 +459,7 @@ def test_handle_broker_request(self, mock_remote_unit, mock_relation_get, mock_notify_mons.reset_mock() ceph_hooks.handle_broker_request('rel1', None, recurse=False) self.assertFalse(mock_notify_rbd_mirrors.called) - self.assertFalse(mock_notify_mons.called) + mock_notify_mons.assert_called_once_with() class BootstrapSourceTestCase(test_utils.CharmTestCase): From 18c9822a47850ab05654776d109a39769ef89d34 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 12 Mar 2019 14:55:53 +0100 Subject: [PATCH 1708/2699] Handle package installs before anything else Handling other functions of the charm upgrade before possible new packages leads to breakages when dependencies are unmet Change-Id: I0e4857d0d03c2d076290a4cc6a08dc1e6d5e1610 Closes-Bug: #1819669 --- ceph-osd/hooks/ceph_hooks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 187c6c56..b9135fe1 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -630,10 +630,10 @@ def mon_relation(): @hooks.hook('upgrade-charm.real') @harden() def upgrade_charm(): - if get_fsid() and get_auth(): - emit_cephconf() apt_install(packages=filter_installed_packages(ceph.determine_packages()), fatal=True) + if get_fsid() and get_auth(): + emit_cephconf() install_udev_rules() remap_resolved_targets() maybe_refresh_nrpe_files() From f7f02a683ab378c1d653c042198e9f3c2de549b6 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 26 Feb 2019 16:29:56 +0100 Subject: [PATCH 1709/2699] Add security-checklist to ceph-mon Change-Id: I1b0f24fed7a5c49ba0f2477ee767b7fe5190adbf --- ceph-mon/actions.yaml | 3 +- ceph-mon/actions/security-checklist | 1 + ceph-mon/actions/security_checklist.py | 46 +++ ceph-mon/charm-helpers-hooks.yaml | 1 + .../contrib/openstack/audits/__init__.py | 212 ++++++++++++ .../audits/openstack_security_guide.py | 303 ++++++++++++++++++ .../charmhelpers/contrib/openstack/context.py | 3 +- .../charmhelpers/contrib/openstack/ip.py | 3 +- ceph-mon/hooks/charmhelpers/contrib/python.py | 21 -- .../contrib/storage/linux/ceph.py | 10 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 74 +++++ ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 187 ++++++++--- ceph-mon/tests/basic_deployment.py | 11 + 13 files changed, 812 insertions(+), 63 deletions(-) create mode 120000 ceph-mon/actions/security-checklist create mode 100755 ceph-mon/actions/security_checklist.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/audits/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/python.py diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 2f3af8d9..f7839d49 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -316,4 +316,5 @@ set-noout: description: "Set ceph noout across the cluster." unset-noout: description: "Unset ceph noout across the cluster." - +security-checklist: + description: Validate the running configuration against the OpenStack security guides checklist diff --git a/ceph-mon/actions/security-checklist b/ceph-mon/actions/security-checklist new file mode 120000 index 00000000..47464970 --- /dev/null +++ b/ceph-mon/actions/security-checklist @@ -0,0 +1 @@ +security_checklist.py \ No newline at end of file diff --git a/ceph-mon/actions/security_checklist.py b/ceph-mon/actions/security_checklist.py new file mode 100755 index 00000000..7afb3c1e --- /dev/null +++ b/ceph-mon/actions/security_checklist.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 +# +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +sys.path.append('hooks') + +import charmhelpers.contrib.openstack.audits as audits +from charmhelpers.contrib.openstack.audits import ( + openstack_security_guide, +) + +# Via the openstack_security_guide above, we are running the following +# security assertions automatically: +# +# - validate-file-ownership +# - validate-file-permissions + + +def main(): + config = { + 'audit_type': audits.AuditType.OpenStackSecurityGuide, + 'files': openstack_security_guide.FILE_ASSERTIONS['ceph-mon'], + 'excludes': [ + 'validate-uses-keystone', + 'validate-uses-tls-for-glance', + 'validate-uses-tls-for-keystone', + ], + } + return audits.action_parse_results(audits.run(config)) + +if __name__ == "__main__": + sys.exit(main()) diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index 7d151fd7..ed7cbe74 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -11,6 +11,7 @@ include: - contrib.network.ip - contrib.openstack: - alternatives + - audits - exceptions - utils - contrib.charmsupport diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/__init__.py new file mode 100644 index 00000000..7f7e5f79 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/__init__.py @@ -0,0 +1,212 @@ +# Copyright 2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OpenStack Security Audit code""" + +import collections +from enum import Enum +import traceback + +from charmhelpers.core.host import cmp_pkgrevno +import charmhelpers.contrib.openstack.utils as openstack_utils +import charmhelpers.core.hookenv as hookenv + + +class AuditType(Enum): + OpenStackSecurityGuide = 1 + + +_audits = {} + +Audit = collections.namedtuple('Audit', 'func filters') + + +def audit(*args): + """Decorator to register an audit. + + These are used to generate audits that can be run on a + deployed system that matches the given configuration + + :param args: List of functions to filter tests against + :type args: List[Callable[Dict]] + """ + def wrapper(f): + test_name = f.__name__ + if _audits.get(test_name): + raise RuntimeError( + "Test name '{}' used more than once" + .format(test_name)) + non_callables = [fn for fn in args if not callable(fn)] + if non_callables: + raise RuntimeError( + "Configuration includes non-callable filters: {}" + .format(non_callables)) + _audits[test_name] = Audit(func=f, filters=args) + return f + return wrapper + + +def is_audit_type(*args): + """This audit is included in the specified kinds of audits. + + :param *args: List of AuditTypes to include this audit in + :type args: List[AuditType] + :rtype: Callable[Dict] + """ + def _is_audit_type(audit_options): + if audit_options.get('audit_type') in args: + return True + else: + return False + return _is_audit_type + + +def since_package(pkg, pkg_version): + """This audit should be run after the specified package version (incl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The package version + :type release: str + :rtype: Callable[Dict] + """ + def _since_package(audit_options=None): + return cmp_pkgrevno(pkg, pkg_version) >= 0 + + return _since_package + + +def before_package(pkg, pkg_version): + """This audit should be run before the specified package version (excl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The package version + :type release: str + :rtype: Callable[Dict] + """ + def _before_package(audit_options=None): + return not since_package(pkg, pkg_version)() + + return _before_package + + +def since_openstack_release(pkg, release): + """This audit should run after the specified OpenStack version (incl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The OpenStack release codename + :type release: str + :rtype: Callable[Dict] + """ + def _since_openstack_release(audit_options=None): + _release = openstack_utils.get_os_codename_package(pkg) + return openstack_utils.CompareOpenStackReleases(_release) >= release + + return _since_openstack_release + + +def before_openstack_release(pkg, release): + """This audit should run before the specified OpenStack version (excl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The OpenStack release codename + :type release: str + :rtype: Callable[Dict] + """ + def _before_openstack_release(audit_options=None): + return not since_openstack_release(pkg, release)() + + return _before_openstack_release + + +def it_has_config(config_key): + """This audit should be run based on specified config keys. + + :param config_key: Config key to look for + :type config_key: str + :rtype: Callable[Dict] + """ + def _it_has_config(audit_options): + return audit_options.get(config_key) is not None + + return _it_has_config + + +def run(audit_options): + """Run the configured audits with the specified audit_options. + + :param audit_options: Configuration for the audit + :type audit_options: Config + + :rtype: Dict[str, str] + """ + errors = {} + results = {} + for name, audit in sorted(_audits.items()): + result_name = name.replace('_', '-') + if result_name in audit_options.get('excludes', []): + print( + "Skipping {} because it is" + "excluded in audit config" + .format(result_name)) + continue + if all(p(audit_options) for p in audit.filters): + try: + audit.func(audit_options) + print("{}: PASS".format(name)) + results[result_name] = { + 'success': True, + } + except AssertionError as e: + print("{}: FAIL ({})".format(name, e)) + results[result_name] = { + 'success': False, + 'message': e, + } + except Exception as e: + print("{}: ERROR ({})".format(name, e)) + errors[name] = e + results[result_name] = { + 'success': False, + 'message': e, + } + for name, error in errors.items(): + print("=" * 20) + print("Error in {}: ".format(name)) + traceback.print_tb(error.__traceback__) + print() + return results + + +def action_parse_results(result): + """Parse the result of `run` in the context of an action. + + :param result: The result of running the security-checklist + action on a unit + :type result: Dict[str, Dict[str, str]] + :rtype: int + """ + passed = True + for test, result in result.items(): + if result['success']: + hookenv.action_set({test: 'PASS'}) + else: + hookenv.action_set({test: 'FAIL - {}'.format(result['message'])}) + passed = False + if not passed: + hookenv.action_fail("One or more tests failed") + return 0 if passed else 1 diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py new file mode 100644 index 00000000..ba5e2486 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -0,0 +1,303 @@ +# Copyright 2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import configparser +import glob +import os.path +import subprocess + +from charmhelpers.contrib.openstack.audits import ( + audit, + AuditType, + # filters + is_audit_type, + it_has_config, +) + +from charmhelpers.core.hookenv import ( + cached, +) + + +FILE_ASSERTIONS = { + 'barbican': { + # From security guide + '/etc/barbican/barbican.conf': {'group': 'barbican', 'mode': '640'}, + '/etc/barbican/barbican-api-paste.ini': + {'group': 'barbican', 'mode': '640'}, + '/etc/barbican/policy.json': {'group': 'barbican', 'mode': '640'}, + }, + 'ceph-mon': { + '/var/lib/charm/ceph-mon/ceph.conf': + {'owner': 'root', 'group': 'root', 'mode': '644'}, + '/etc/ceph/ceph.client.admin.keyring': + {'owner': 'ceph', 'group': 'ceph'}, + '/etc/ceph/rbdmap': {'mode': '644'}, + '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, + '/var/lib/ceph/bootstrap-*/ceph.keyring': + {'owner': 'ceph', 'group': 'ceph', 'mode': '600'} + }, + 'ceph-osd': { + '/var/lib/charm/ceph-osd/ceph.conf': + {'owner': 'ceph', 'group': 'ceph', 'mode': '644'}, + '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, + '/var/lib/ceph/*': {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, + '/var/lib/ceph/bootstrap-*/ceph.keyring': + {'owner': 'ceph', 'group': 'ceph', 'mode': '600'}, + '/var/lib/ceph/radosgw': + {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, + }, + 'cinder': { + # From security guide + '/etc/cinder/cinder.conf': {'group': 'cinder', 'mode': '640'}, + '/etc/cinder/api-paste.conf': {'group': 'cinder', 'mode': '640'}, + '/etc/cinder/rootwrap.conf': {'group': 'cinder', 'mode': '640'}, + }, + 'glance': { + # From security guide + '/etc/glance/glance-api-paste.ini': {'group': 'glance', 'mode': '640'}, + '/etc/glance/glance-api.conf': {'group': 'glance', 'mode': '640'}, + '/etc/glance/glance-cache.conf': {'group': 'glance', 'mode': '640'}, + '/etc/glance/glance-manage.conf': {'group': 'glance', 'mode': '640'}, + '/etc/glance/glance-registry-paste.ini': + {'group': 'glance', 'mode': '640'}, + '/etc/glance/glance-registry.conf': {'group': 'glance', 'mode': '640'}, + '/etc/glance/glance-scrubber.conf': {'group': 'glance', 'mode': '640'}, + '/etc/glance/glance-swift-store.conf': + {'group': 'glance', 'mode': '640'}, + '/etc/glance/policy.json': {'group': 'glance', 'mode': '640'}, + '/etc/glance/schema-image.json': {'group': 'glance', 'mode': '640'}, + '/etc/glance/schema.json': {'group': 'glance', 'mode': '640'}, + }, + 'keystone': { + # From security guide + '/etc/keystone/keystone.conf': {'group': 'keystone', 'mode': '640'}, + '/etc/keystone/keystone-paste.ini': + {'group': 'keystone', 'mode': '640'}, + '/etc/keystone/policy.json': {'group': 'keystone', 'mode': '640'}, + '/etc/keystone/logging.conf': {'group': 'keystone', 'mode': '640'}, + '/etc/keystone/ssl/certs/signing_cert.pem': + {'group': 'keystone', 'mode': '640'}, + '/etc/keystone/ssl/private/signing_key.pem': + {'group': 'keystone', 'mode': '640'}, + '/etc/keystone/ssl/certs/ca.pem': {'group': 'keystone', 'mode': '640'}, + }, + 'manilla': { + # From security guide + '/etc/manila/manila.conf': {'group': 'manilla', 'mode': '640'}, + '/etc/manila/api-paste.ini': {'group': 'manilla', 'mode': '640'}, + '/etc/manila/policy.json': {'group': 'manilla', 'mode': '640'}, + '/etc/manila/rootwrap.conf': {'group': 'manilla', 'mode': '640'}, + }, + 'neutron-gateway': { + '/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'}, + '/etc/neutron/rootwrap.conf': {'mode': '640'}, + '/etc/neutron/rootwrap.d': {'mode': '755'}, + '/etc/neutron/*': {'group': 'neutron', 'mode': '644'}, + }, + 'neutron-api': { + # From security guide + '/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'}, + '/etc/nova/api-paste.ini': {'group': 'neutron', 'mode': '640'}, + '/etc/neutron/rootwrap.conf': {'group': 'neutron', 'mode': '640'}, + # Additional validations + '/etc/neutron/rootwrap.d': {'mode': '755'}, + '/etc/neutron/neutron_lbaas.conf': {'mode': '644'}, + '/etc/neutron/neutron_vpnaas.conf': {'mode': '644'}, + '/etc/neutron/*': {'group': 'neutron', 'mode': '644'}, + }, + 'nova-cloud-controller': { + # From security guide + '/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'}, + '/etc/nova/nova.conf': {'group': 'nova', 'mode': '750'}, + '/etc/nova/*': {'group': 'nova', 'mode': '640'}, + # Additional validations + '/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'}, + }, + 'nova-compute': { + # From security guide + '/etc/nova/nova.conf': {'group': 'nova', 'mode': '640'}, + '/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'}, + '/etc/nova/rootwrap.conf': {'group': 'nova', 'mode': '640'}, + # Additional Validations + '/etc/nova/nova-compute.conf': {'group': 'nova', 'mode': '640'}, + '/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'}, + '/etc/nova/nm.conf': {'mode': '644'}, + '/etc/nova/*': {'group': 'nova', 'mode': '640'}, + }, + 'openstack-dashboard': { + # From security guide + '/etc/openstack-dashboard/local_settings.py': + {'group': 'horizon', 'mode': '640'}, + }, +} + +Ownership = collections.namedtuple('Ownership', 'owner group mode') + + +@cached +def _stat(file): + """ + Get the Ownership information from a file. + + :param file: The path to a file to stat + :type file: str + :returns: owner, group, and mode of the specified file + :rtype: Ownership + :raises subprocess.CalledProcessError: If the underlying stat fails + """ + out = subprocess.check_output( + ['stat', '-c', '%U %G %a', file]).decode('utf-8') + return Ownership(*out.strip().split(' ')) + + +@cached +def _config_ini(path): + """ + Parse an ini file + + :param path: The path to a file to parse + :type file: str + :returns: Configuration contained in path + :rtype: Dict + """ + conf = configparser.ConfigParser() + conf.read(path) + return dict(conf) + + +def _validate_file_ownership(owner, group, file_name): + """ + Validate that a specified file is owned by `owner:group`. + + :param owner: Name of the owner + :type owner: str + :param group: Name of the group + :type group: str + :param file_name: Path to the file to verify + :type file_name: str + """ + try: + ownership = _stat(file_name) + except subprocess.CalledProcessError as e: + print("Error reading file: {}".format(e)) + assert False, "Specified file does not exist: {}".format(file_name) + assert owner == ownership.owner, \ + "{} has an incorrect owner: {} should be {}".format( + file_name, ownership.owner, owner) + assert group == ownership.group, \ + "{} has an incorrect group: {} should be {}".format( + file_name, ownership.group, group) + print("Validate ownership of {}: PASS".format(file_name)) + + +def _validate_file_mode(mode, file_name): + """ + Validate that a specified file has the specified permissions. + + :param mode: file mode that is desires + :type owner: str + :param file_name: Path to the file to verify + :type file_name: str + """ + try: + ownership = _stat(file_name) + except subprocess.CalledProcessError as e: + print("Error reading file: {}".format(e)) + assert False, "Specified file does not exist: {}".format(file_name) + assert mode == ownership.mode, \ + "{} has an incorrect mode: {} should be {}".format( + file_name, ownership.mode, mode) + print("Validate mode of {}: PASS".format(file_name)) + + +@cached +def _config_section(config, section): + """Read the configuration file and return a section.""" + path = os.path.join(config.get('config_path'), config.get('config_file')) + conf = _config_ini(path) + return conf.get(section) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide), + it_has_config('files')) +def validate_file_ownership(config): + """Verify that configuration files are owned by the correct user/group.""" + files = config.get('files', {}) + for file_name, options in files.items(): + for key in options.keys(): + if key not in ["owner", "group", "mode"]: + raise RuntimeError( + "Invalid ownership configuration: {}".format(key)) + owner = options.get('owner', config.get('owner', 'root')) + group = options.get('group', config.get('group', 'root')) + if '*' in file_name: + for file in glob.glob(file_name): + if file not in files.keys(): + if os.path.isfile(file): + _validate_file_ownership(owner, group, file) + else: + if os.path.isfile(file_name): + _validate_file_ownership(owner, group, file_name) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide), + it_has_config('files')) +def validate_file_permissions(config): + """Verify that permissions on configuration files are secure enough.""" + files = config.get('files', {}) + for file_name, options in files.items(): + for key in options.keys(): + if key not in ["owner", "group", "mode"]: + raise RuntimeError( + "Invalid ownership configuration: {}".format(key)) + mode = options.get('mode', config.get('permissions', '600')) + if '*' in file_name: + for file in glob.glob(file_name): + if file not in files.keys(): + if os.path.isfile(file): + _validate_file_mode(mode, file) + else: + if os.path.isfile(file_name): + _validate_file_mode(mode, file_name) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_keystone(audit_options): + """Validate that the service uses Keystone for authentication.""" + section = _config_section(audit_options, 'DEFAULT') + assert section is not None, "Missing section 'DEFAULT'" + assert section.get('auth_strategy') == "keystone", \ + "Application is not using Keystone" + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_tls_for_keystone(audit_options): + """Verify that TLS is used to communicate with Keystone.""" + section = _config_section(audit_options, 'keystone_authtoken') + assert section is not None, "Missing section 'keystone_authtoken'" + assert not section.get('insecure') and \ + "https://" in section.get("auth_uri"), \ + "TLS is not used for Keystone" + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_tls_for_glance(audit_options): + """Verify that TLS is used to communicate with Glance.""" + section = _config_section(audit_options, 'glance') + assert section is not None, "Missing section 'glance'" + assert not section.get('insecure') and \ + "https://" in section.get("api_servers"), \ + "TLS is not used for Glance" diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 78a339f6..fc634cc6 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -29,6 +29,7 @@ filter_installed_packages, ) from charmhelpers.core.hookenv import ( + NoNetworkBinding, config, is_relation_made, local_unit, @@ -868,7 +869,7 @@ def get_network_addresses(self): addr = network_get_primary_address( ADDRESS_MAP[net_type]['binding'] ) - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): addr = fallback endpoint = resolve_address(net_type) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py index 73102af7..df83b91b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py @@ -13,6 +13,7 @@ # limitations under the License. from charmhelpers.core.hookenv import ( + NoNetworkBinding, config, unit_get, service_name, @@ -175,7 +176,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True): # configuration is not in use try: resolved_address = network_get_primary_address(binding) - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): resolved_address = fallback_addr if resolved_address is None: diff --git a/ceph-mon/hooks/charmhelpers/contrib/python.py b/ceph-mon/hooks/charmhelpers/contrib/python.py deleted file mode 100644 index 84cba8c4..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/python.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2014-2019 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import - -# deprecated aliases for backwards compatibility -from charmhelpers.fetch.python import debug # noqa -from charmhelpers.fetch.python import packages # noqa -from charmhelpers.fetch.python import rpdb # noqa -from charmhelpers.fetch.python import version # noqa diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 568726bb..22aa978b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1152,7 +1152,8 @@ def add_op_request_access_to_group(self, name, namespace=None, 'object-prefix-permissions': object_prefix_permissions}) def add_op_create_pool(self, name, replica_count=3, pg_num=None, - weight=None, group=None, namespace=None): + weight=None, group=None, namespace=None, + app_name=None): """Adds an operation to create a pool. @param pg_num setting: optional setting. If not provided, this value @@ -1160,6 +1161,11 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, cluster at the time of creation. Note that, if provided, this value will be capped at the current available maximum. @param weight: the percentage of data the pool makes up + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str """ if pg_num and weight: raise ValueError('pg_num and weight are mutually exclusive') @@ -1167,7 +1173,7 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count, 'pg_num': pg_num, 'weight': weight, 'group': group, - 'group-namespace': namespace}) + 'group-namespace': namespace, 'app-name': app_name}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 2e287659..4744eb43 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -50,6 +50,11 @@ MARKER = object() SH_MAX_ARG = 131071 + +RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. ' + 'This may not be compatible with software you are ' + 'running in your shell.') + cache = {} @@ -1414,3 +1419,72 @@ def unit_doomed(unit=None): # I don't think 'dead' units ever show up in the goal-state, but # check anyway in addition to 'dying'. return units[unit]['status'] in ('dying', 'dead') + + +def env_proxy_settings(selected_settings=None): + """Get proxy settings from process environment variables. + + Get charm proxy settings from environment variables that correspond to + juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2, + see lp:1782236) in a format suitable for passing to an application that + reacts to proxy settings passed as environment variables. Some applications + support lowercase or uppercase notation (e.g. curl), some support only + lowercase (e.g. wget), there are also subjectively rare cases of only + uppercase notation support. no_proxy CIDR and wildcard support also varies + between runtimes and applications as there is no enforced standard. + + Some applications may connect to multiple destinations and expose config + options that would affect only proxy settings for a specific destination + these should be handled in charms in an application-specific manner. + + :param selected_settings: format only a subset of possible settings + :type selected_settings: list + :rtype: Option(None, dict[str, str]) + """ + SUPPORTED_SETTINGS = { + 'http': 'HTTP_PROXY', + 'https': 'HTTPS_PROXY', + 'no_proxy': 'NO_PROXY', + 'ftp': 'FTP_PROXY' + } + if selected_settings is None: + selected_settings = SUPPORTED_SETTINGS + + selected_vars = [v for k, v in SUPPORTED_SETTINGS.items() + if k in selected_settings] + proxy_settings = {} + for var in selected_vars: + var_val = os.getenv(var) + if var_val: + proxy_settings[var] = var_val + proxy_settings[var.lower()] = var_val + # Now handle juju-prefixed environment variables. The legacy vs new + # environment variable usage is mutually exclusive + charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var)) + if charm_var_val: + proxy_settings[var] = charm_var_val + proxy_settings[var.lower()] = charm_var_val + if 'no_proxy' in proxy_settings: + if _contains_range(proxy_settings['no_proxy']): + log(RANGE_WARNING, level=WARNING) + return proxy_settings if proxy_settings else None + + +def _contains_range(addresses): + """Check for cidr or wildcard domain in a string. + + Given a string comprising a comma seperated list of ip addresses + and domain names, determine whether the string contains IP ranges + or wildcard domains. + + :param addresses: comma seperated list of domains and ip addresses. + :type addresses: str + """ + return ( + # Test for cidr (e.g. 10.20.20.0/24) + "/" in addresses or + # Test for wildcard domains (*.foo.com or .foo.com) + "*" in addresses or + addresses.startswith(".") or + ",." in addresses or + " ." in addresses) diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 8a5cadf1..2394caf3 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -19,15 +19,16 @@ import six import time import subprocess -from tempfile import NamedTemporaryFile from charmhelpers.core.host import ( - lsb_release + get_distrib_codename, + CompareHostReleases, ) from charmhelpers.core.hookenv import ( log, DEBUG, WARNING, + env_proxy_settings, ) from charmhelpers.fetch import SourceConfigError, GPGKeyError @@ -303,12 +304,17 @@ def import_key(key): """Import an ASCII Armor key. A Radix64 format keyid is also supported for backwards - compatibility, but should never be used; the key retrieval - mechanism is insecure and subject to man-in-the-middle attacks - voiding all signature checks using that key. - - :param keyid: The key in ASCII armor format, - including BEGIN and END markers. + compatibility. In this case Ubuntu keyserver will be + queried for a key via HTTPS by its keyid. This method + is less preferrable because https proxy servers may + require traffic decryption which is equivalent to a + man-in-the-middle attack (a proxy server impersonates + keyserver TLS certificates and has to be explicitly + trusted by the system). + + :param key: A GPG key in ASCII armor format, + including BEGIN and END markers or a keyid. + :type key: (bytes, str) :raises: GPGKeyError if the key could not be imported """ key = key.strip() @@ -319,35 +325,137 @@ def import_key(key): log("PGP key found (looks like ASCII Armor format)", level=DEBUG) if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and '-----END PGP PUBLIC KEY BLOCK-----' in key): - log("Importing ASCII Armor PGP key", level=DEBUG) - with NamedTemporaryFile() as keyfile: - with open(keyfile.name, 'w') as fd: - fd.write(key) - fd.write("\n") - cmd = ['apt-key', 'add', keyfile.name] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error = "Error importing PGP key '{}'".format(key) - log(error) - raise GPGKeyError(error) + log("Writing provided PGP key in the binary format", level=DEBUG) + if six.PY3: + key_bytes = key.encode('utf-8') + else: + key_bytes = key + key_name = _get_keyid_by_gpg_key(key_bytes) + key_gpg = _dearmor_gpg_key(key_bytes) + _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) else: raise GPGKeyError("ASCII armor markers missing from GPG key") else: - # We should only send things obviously not a keyid offsite - # via this unsecured protocol, as it may be a secret or part - # of one. log("PGP key found (looks like Radix64 format)", level=WARNING) - log("INSECURLY importing PGP key from keyserver; " + log("SECURELY importing PGP key from keyserver; " "full key not provided.", level=WARNING) - cmd = ['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] - try: - _run_with_retries(cmd) - except subprocess.CalledProcessError: - error = "Error importing PGP key '{}'".format(key) - log(error) - raise GPGKeyError(error) + # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL + # to retrieve GPG keys. `apt-key adv` command is deprecated as is + # apt-key in general as noted in its manpage. See lp:1433761 for more + # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop + # gpg + key_asc = _get_key_by_keyid(key) + # write the key in GPG format so that apt-key list shows it + key_gpg = _dearmor_gpg_key(key_asc) + _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg) + + +def _get_keyid_by_gpg_key(key_material): + """Get a GPG key fingerprint by GPG key material. + Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded + or binary GPG key material. Can be used, for example, to generate file + names for keys passed via charm options. + + :param key_material: ASCII armor-encoded or binary GPG key material + :type key_material: bytes + :raises: GPGKeyError if invalid key material has been provided + :returns: A GPG key fingerprint + :rtype: str + """ + # trusty, xenial and bionic handling differs due to gpg 1.x to 2.x change + release = get_distrib_codename() + is_gpgv2_distro = CompareHostReleases(release) >= "bionic" + if is_gpgv2_distro: + # --import is mandatory, otherwise fingerprint is not printed + cmd = 'gpg --with-colons --import-options show-only --import --dry-run' + else: + cmd = 'gpg --with-colons --with-fingerprint' + ps = subprocess.Popen(cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_material) + if six.PY3: + out = out.decode('utf-8') + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material provided') + # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) + return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1) + + +def _get_key_by_keyid(keyid): + """Get a key via HTTPS from the Ubuntu keyserver. + Different key ID formats are supported by SKS keyservers (the longer ones + are more secure, see "dead beef attack" and https://evil32.com/). Since + HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will + impersonate keyserver.ubuntu.com and generate a certificate with + keyserver.ubuntu.com in the CN field or in SubjAltName fields of a + certificate. If such proxy behavior is expected it is necessary to add the + CA certificate chain containing the intermediate CA of the SSLBump proxy to + every machine that this code runs on via ca-certs cloud-init directive (via + cloudinit-userdata model-config) or via other means (such as through a + custom charm option). Also note that DNS resolution for the hostname in a + URL is done at a proxy server - not at the client side. + + 8-digit (32 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6 + 16-digit (64 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6 + 40-digit key ID: + https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6 + + :param keyid: An 8, 16 or 40 hex digit keyid to find a key for + :type keyid: (bytes, str) + :returns: A key material for the specified GPG key id + :rtype: (str, bytes) + :raises: subprocess.CalledProcessError + """ + # options=mr - machine-readable output (disables html wrappers) + keyserver_url = ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr&exact=on&search=0x{}') + curl_cmd = ['curl', keyserver_url.format(keyid)] + # use proxy server settings in order to retrieve the key + return subprocess.check_output(curl_cmd, + env=env_proxy_settings(['https'])) + + +def _dearmor_gpg_key(key_asc): + """Converts a GPG key in the ASCII armor format to the binary format. + + :param key_asc: A GPG key in ASCII armor format. + :type key_asc: (str, bytes) + :returns: A GPG key in binary format + :rtype: (str, bytes) + :raises: GPGKeyError + """ + ps = subprocess.Popen(['gpg', '--dearmor'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_asc) + # no need to decode output as it is binary (invalid utf-8), only error + if six.PY3: + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material. Check your network setup' + ' (MTU, routing, DNS) and/or proxy server settings' + ' as well as destination keyserver status.') + else: + return out + + +def _write_apt_gpg_keyfile(key_name, key_material): + """Writes GPG key material into a file at a provided path. + + :param key_name: A key name to use for a key file (could be a fingerprint) + :type key_name: str + :param key_material: A GPG key material (binary) + :type key_material: (str, bytes) + """ + with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name), + 'wb') as keyf: + keyf.write(key_material) def add_source(source, key=None, fail_invalid=False): @@ -442,13 +550,13 @@ def add_source(source, key=None, fail_invalid=False): def _add_proposed(): """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list - Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for + Uses get_distrib_codename to determine the correct stanza for the deb line. For intel architecutres PROPOSED_POCKET is used for the release, but for other architectures PROPOSED_PORTS_POCKET is used for the release. """ - release = lsb_release()['DISTRIB_CODENAME'] + release = get_distrib_codename() arch = platform.machine() if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): raise SourceConfigError("Arch {} not supported for (distro-)proposed" @@ -461,11 +569,16 @@ def _add_apt_repository(spec): """Add the spec using add_apt_repository :param spec: the parameter to pass to add_apt_repository + :type spec: str """ if '{series}' in spec: - series = lsb_release()['DISTRIB_CODENAME'] + series = get_distrib_codename() spec = spec.replace('{series}', series) - _run_with_retries(['add-apt-repository', '--yes', spec]) + # software-properties package for bionic properly reacts to proxy settings + # passed as environment variables (See lp:1433761). This is not the case + # LTS and non-LTS releases below bionic. + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https'])) def _add_cloud_pocket(pocket): @@ -534,7 +647,7 @@ def _verify_is_ubuntu_rel(release, os_release): :raises: SourceConfigError if the release is not the same as the ubuntu release. """ - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + ubuntu_rel = get_distrib_codename() if release != ubuntu_rel: raise SourceConfigError( 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index 42f8d72c..da0a7d07 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -564,6 +564,17 @@ def test_402_pause_resume_actions(self): if 'nodown' in output or 'noout' in output: amulet.raise_status(amulet.FAIL, msg="Still has noout,nodown") + def test_501_security_checklist_action(self): + """Verify expected result on a default install""" + u.log.debug("Testing security-checklist") + sentry_unit = self.ceph0_sentry + + action_id = u.run_action(sentry_unit, "security-checklist") + u.wait_on_action(action_id) + data = amulet.actions.get_action_output(action_id, full_output=True) + assert data.get(u"status") == "completed", \ + "Security check is expected to pass by default" + @staticmethod def find_pool(sentry_unit, pool_name): """ From ba3c21b34259718da653d6996e0bc9be21aaf065 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 26 Feb 2019 16:35:34 +0100 Subject: [PATCH 1710/2699] Add security-checklist action Change-Id: I6e2ff9b546a1283748550beefc366ae055b63b7e Func-Test-PR: https://github.com/openstack-charmers/zaza/pull/193 --- ceph-osd/actions.yaml | 4 +- ceph-osd/actions/security-checklist | 1 + ceph-osd/actions/security_checklist.py | 47 +++ ceph-osd/charm-helpers-hooks.yaml | 1 + .../contrib/openstack/audits/__init__.py | 212 ++++++++++++ .../audits/openstack_security_guide.py | 303 ++++++++++++++++++ .../charmhelpers/contrib/openstack/context.py | 3 +- .../charmhelpers/contrib/openstack/ip.py | 3 +- .../contrib/storage/linux/ceph.py | 87 ++--- ceph-osd/hooks/charmhelpers/core/hookenv.py | 74 +++++ ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 187 ++++++++--- ceph-osd/tests/tests.yaml | 1 + 12 files changed, 845 insertions(+), 78 deletions(-) create mode 120000 ceph-osd/actions/security-checklist create mode 100755 ceph-osd/actions/security_checklist.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/audits/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 16c11267..5302bac4 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -11,7 +11,7 @@ osd-out: Documentation: https://jujucharms.com/ceph-osd/ osd-in: description: | - \ + \ Set the local osd units in the charm to 'in'. Documentation: https://jujucharms.com/ceph-osd/ list-disks: @@ -84,3 +84,5 @@ zap-disk: required: - devices - i-really-mean-it +security-checklist: + description: Validate the running configuration against the OpenStack security guides checklist diff --git a/ceph-osd/actions/security-checklist b/ceph-osd/actions/security-checklist new file mode 120000 index 00000000..47464970 --- /dev/null +++ b/ceph-osd/actions/security-checklist @@ -0,0 +1 @@ +security_checklist.py \ No newline at end of file diff --git a/ceph-osd/actions/security_checklist.py b/ceph-osd/actions/security_checklist.py new file mode 100755 index 00000000..3f1e10b2 --- /dev/null +++ b/ceph-osd/actions/security_checklist.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +# +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +sys.path.append('hooks') + +import charmhelpers.contrib.openstack.audits as audits +from charmhelpers.contrib.openstack.audits import ( + openstack_security_guide, +) + + +# Via the openstack_security_guide above, we are running the following +# security assertions automatically: +# +# - validate-file-ownership +# - validate-file-permissions + + +def main(): + config = { + 'audit_type': audits.AuditType.OpenStackSecurityGuide, + 'files': openstack_security_guide.FILE_ASSERTIONS['ceph-osd'], + 'excludes': [ + 'validate-uses-keystone', + 'validate-uses-tls-for-glance', + 'validate-uses-tls-for-keystone', + ], + } + return audits.action_parse_results(audits.run(config)) + +if __name__ == "__main__": + sys.exit(main()) diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index 4cef00af..84ef4424 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -14,6 +14,7 @@ include: - contrib.network.ip - contrib.openstack: - alternatives + - audits - context - exceptions - ip diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/__init__.py new file mode 100644 index 00000000..7f7e5f79 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/__init__.py @@ -0,0 +1,212 @@ +# Copyright 2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OpenStack Security Audit code""" + +import collections +from enum import Enum +import traceback + +from charmhelpers.core.host import cmp_pkgrevno +import charmhelpers.contrib.openstack.utils as openstack_utils +import charmhelpers.core.hookenv as hookenv + + +class AuditType(Enum): + OpenStackSecurityGuide = 1 + + +_audits = {} + +Audit = collections.namedtuple('Audit', 'func filters') + + +def audit(*args): + """Decorator to register an audit. + + These are used to generate audits that can be run on a + deployed system that matches the given configuration + + :param args: List of functions to filter tests against + :type args: List[Callable[Dict]] + """ + def wrapper(f): + test_name = f.__name__ + if _audits.get(test_name): + raise RuntimeError( + "Test name '{}' used more than once" + .format(test_name)) + non_callables = [fn for fn in args if not callable(fn)] + if non_callables: + raise RuntimeError( + "Configuration includes non-callable filters: {}" + .format(non_callables)) + _audits[test_name] = Audit(func=f, filters=args) + return f + return wrapper + + +def is_audit_type(*args): + """This audit is included in the specified kinds of audits. + + :param *args: List of AuditTypes to include this audit in + :type args: List[AuditType] + :rtype: Callable[Dict] + """ + def _is_audit_type(audit_options): + if audit_options.get('audit_type') in args: + return True + else: + return False + return _is_audit_type + + +def since_package(pkg, pkg_version): + """This audit should be run after the specified package version (incl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The package version + :type release: str + :rtype: Callable[Dict] + """ + def _since_package(audit_options=None): + return cmp_pkgrevno(pkg, pkg_version) >= 0 + + return _since_package + + +def before_package(pkg, pkg_version): + """This audit should be run before the specified package version (excl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The package version + :type release: str + :rtype: Callable[Dict] + """ + def _before_package(audit_options=None): + return not since_package(pkg, pkg_version)() + + return _before_package + + +def since_openstack_release(pkg, release): + """This audit should run after the specified OpenStack version (incl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The OpenStack release codename + :type release: str + :rtype: Callable[Dict] + """ + def _since_openstack_release(audit_options=None): + _release = openstack_utils.get_os_codename_package(pkg) + return openstack_utils.CompareOpenStackReleases(_release) >= release + + return _since_openstack_release + + +def before_openstack_release(pkg, release): + """This audit should run before the specified OpenStack version (excl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The OpenStack release codename + :type release: str + :rtype: Callable[Dict] + """ + def _before_openstack_release(audit_options=None): + return not since_openstack_release(pkg, release)() + + return _before_openstack_release + + +def it_has_config(config_key): + """This audit should be run based on specified config keys. + + :param config_key: Config key to look for + :type config_key: str + :rtype: Callable[Dict] + """ + def _it_has_config(audit_options): + return audit_options.get(config_key) is not None + + return _it_has_config + + +def run(audit_options): + """Run the configured audits with the specified audit_options. + + :param audit_options: Configuration for the audit + :type audit_options: Config + + :rtype: Dict[str, str] + """ + errors = {} + results = {} + for name, audit in sorted(_audits.items()): + result_name = name.replace('_', '-') + if result_name in audit_options.get('excludes', []): + print( + "Skipping {} because it is" + "excluded in audit config" + .format(result_name)) + continue + if all(p(audit_options) for p in audit.filters): + try: + audit.func(audit_options) + print("{}: PASS".format(name)) + results[result_name] = { + 'success': True, + } + except AssertionError as e: + print("{}: FAIL ({})".format(name, e)) + results[result_name] = { + 'success': False, + 'message': e, + } + except Exception as e: + print("{}: ERROR ({})".format(name, e)) + errors[name] = e + results[result_name] = { + 'success': False, + 'message': e, + } + for name, error in errors.items(): + print("=" * 20) + print("Error in {}: ".format(name)) + traceback.print_tb(error.__traceback__) + print() + return results + + +def action_parse_results(result): + """Parse the result of `run` in the context of an action. + + :param result: The result of running the security-checklist + action on a unit + :type result: Dict[str, Dict[str, str]] + :rtype: int + """ + passed = True + for test, result in result.items(): + if result['success']: + hookenv.action_set({test: 'PASS'}) + else: + hookenv.action_set({test: 'FAIL - {}'.format(result['message'])}) + passed = False + if not passed: + hookenv.action_fail("One or more tests failed") + return 0 if passed else 1 diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py new file mode 100644 index 00000000..ba5e2486 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -0,0 +1,303 @@ +# Copyright 2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import configparser +import glob +import os.path +import subprocess + +from charmhelpers.contrib.openstack.audits import ( + audit, + AuditType, + # filters + is_audit_type, + it_has_config, +) + +from charmhelpers.core.hookenv import ( + cached, +) + + +FILE_ASSERTIONS = { + 'barbican': { + # From security guide + '/etc/barbican/barbican.conf': {'group': 'barbican', 'mode': '640'}, + '/etc/barbican/barbican-api-paste.ini': + {'group': 'barbican', 'mode': '640'}, + '/etc/barbican/policy.json': {'group': 'barbican', 'mode': '640'}, + }, + 'ceph-mon': { + '/var/lib/charm/ceph-mon/ceph.conf': + {'owner': 'root', 'group': 'root', 'mode': '644'}, + '/etc/ceph/ceph.client.admin.keyring': + {'owner': 'ceph', 'group': 'ceph'}, + '/etc/ceph/rbdmap': {'mode': '644'}, + '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, + '/var/lib/ceph/bootstrap-*/ceph.keyring': + {'owner': 'ceph', 'group': 'ceph', 'mode': '600'} + }, + 'ceph-osd': { + '/var/lib/charm/ceph-osd/ceph.conf': + {'owner': 'ceph', 'group': 'ceph', 'mode': '644'}, + '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, + '/var/lib/ceph/*': {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, + '/var/lib/ceph/bootstrap-*/ceph.keyring': + {'owner': 'ceph', 'group': 'ceph', 'mode': '600'}, + '/var/lib/ceph/radosgw': + {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, + }, + 'cinder': { + # From security guide + '/etc/cinder/cinder.conf': {'group': 'cinder', 'mode': '640'}, + '/etc/cinder/api-paste.conf': {'group': 'cinder', 'mode': '640'}, + '/etc/cinder/rootwrap.conf': {'group': 'cinder', 'mode': '640'}, + }, + 'glance': { + # From security guide + '/etc/glance/glance-api-paste.ini': {'group': 'glance', 'mode': '640'}, + '/etc/glance/glance-api.conf': {'group': 'glance', 'mode': '640'}, + '/etc/glance/glance-cache.conf': {'group': 'glance', 'mode': '640'}, + '/etc/glance/glance-manage.conf': {'group': 'glance', 'mode': '640'}, + '/etc/glance/glance-registry-paste.ini': + {'group': 'glance', 'mode': '640'}, + '/etc/glance/glance-registry.conf': {'group': 'glance', 'mode': '640'}, + '/etc/glance/glance-scrubber.conf': {'group': 'glance', 'mode': '640'}, + '/etc/glance/glance-swift-store.conf': + {'group': 'glance', 'mode': '640'}, + '/etc/glance/policy.json': {'group': 'glance', 'mode': '640'}, + '/etc/glance/schema-image.json': {'group': 'glance', 'mode': '640'}, + '/etc/glance/schema.json': {'group': 'glance', 'mode': '640'}, + }, + 'keystone': { + # From security guide + '/etc/keystone/keystone.conf': {'group': 'keystone', 'mode': '640'}, + '/etc/keystone/keystone-paste.ini': + {'group': 'keystone', 'mode': '640'}, + '/etc/keystone/policy.json': {'group': 'keystone', 'mode': '640'}, + '/etc/keystone/logging.conf': {'group': 'keystone', 'mode': '640'}, + '/etc/keystone/ssl/certs/signing_cert.pem': + {'group': 'keystone', 'mode': '640'}, + '/etc/keystone/ssl/private/signing_key.pem': + {'group': 'keystone', 'mode': '640'}, + '/etc/keystone/ssl/certs/ca.pem': {'group': 'keystone', 'mode': '640'}, + }, + 'manilla': { + # From security guide + '/etc/manila/manila.conf': {'group': 'manilla', 'mode': '640'}, + '/etc/manila/api-paste.ini': {'group': 'manilla', 'mode': '640'}, + '/etc/manila/policy.json': {'group': 'manilla', 'mode': '640'}, + '/etc/manila/rootwrap.conf': {'group': 'manilla', 'mode': '640'}, + }, + 'neutron-gateway': { + '/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'}, + '/etc/neutron/rootwrap.conf': {'mode': '640'}, + '/etc/neutron/rootwrap.d': {'mode': '755'}, + '/etc/neutron/*': {'group': 'neutron', 'mode': '644'}, + }, + 'neutron-api': { + # From security guide + '/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'}, + '/etc/nova/api-paste.ini': {'group': 'neutron', 'mode': '640'}, + '/etc/neutron/rootwrap.conf': {'group': 'neutron', 'mode': '640'}, + # Additional validations + '/etc/neutron/rootwrap.d': {'mode': '755'}, + '/etc/neutron/neutron_lbaas.conf': {'mode': '644'}, + '/etc/neutron/neutron_vpnaas.conf': {'mode': '644'}, + '/etc/neutron/*': {'group': 'neutron', 'mode': '644'}, + }, + 'nova-cloud-controller': { + # From security guide + '/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'}, + '/etc/nova/nova.conf': {'group': 'nova', 'mode': '750'}, + '/etc/nova/*': {'group': 'nova', 'mode': '640'}, + # Additional validations + '/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'}, + }, + 'nova-compute': { + # From security guide + '/etc/nova/nova.conf': {'group': 'nova', 'mode': '640'}, + '/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'}, + '/etc/nova/rootwrap.conf': {'group': 'nova', 'mode': '640'}, + # Additional Validations + '/etc/nova/nova-compute.conf': {'group': 'nova', 'mode': '640'}, + '/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'}, + '/etc/nova/nm.conf': {'mode': '644'}, + '/etc/nova/*': {'group': 'nova', 'mode': '640'}, + }, + 'openstack-dashboard': { + # From security guide + '/etc/openstack-dashboard/local_settings.py': + {'group': 'horizon', 'mode': '640'}, + }, +} + +Ownership = collections.namedtuple('Ownership', 'owner group mode') + + +@cached +def _stat(file): + """ + Get the Ownership information from a file. + + :param file: The path to a file to stat + :type file: str + :returns: owner, group, and mode of the specified file + :rtype: Ownership + :raises subprocess.CalledProcessError: If the underlying stat fails + """ + out = subprocess.check_output( + ['stat', '-c', '%U %G %a', file]).decode('utf-8') + return Ownership(*out.strip().split(' ')) + + +@cached +def _config_ini(path): + """ + Parse an ini file + + :param path: The path to a file to parse + :type file: str + :returns: Configuration contained in path + :rtype: Dict + """ + conf = configparser.ConfigParser() + conf.read(path) + return dict(conf) + + +def _validate_file_ownership(owner, group, file_name): + """ + Validate that a specified file is owned by `owner:group`. + + :param owner: Name of the owner + :type owner: str + :param group: Name of the group + :type group: str + :param file_name: Path to the file to verify + :type file_name: str + """ + try: + ownership = _stat(file_name) + except subprocess.CalledProcessError as e: + print("Error reading file: {}".format(e)) + assert False, "Specified file does not exist: {}".format(file_name) + assert owner == ownership.owner, \ + "{} has an incorrect owner: {} should be {}".format( + file_name, ownership.owner, owner) + assert group == ownership.group, \ + "{} has an incorrect group: {} should be {}".format( + file_name, ownership.group, group) + print("Validate ownership of {}: PASS".format(file_name)) + + +def _validate_file_mode(mode, file_name): + """ + Validate that a specified file has the specified permissions. + + :param mode: file mode that is desires + :type owner: str + :param file_name: Path to the file to verify + :type file_name: str + """ + try: + ownership = _stat(file_name) + except subprocess.CalledProcessError as e: + print("Error reading file: {}".format(e)) + assert False, "Specified file does not exist: {}".format(file_name) + assert mode == ownership.mode, \ + "{} has an incorrect mode: {} should be {}".format( + file_name, ownership.mode, mode) + print("Validate mode of {}: PASS".format(file_name)) + + +@cached +def _config_section(config, section): + """Read the configuration file and return a section.""" + path = os.path.join(config.get('config_path'), config.get('config_file')) + conf = _config_ini(path) + return conf.get(section) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide), + it_has_config('files')) +def validate_file_ownership(config): + """Verify that configuration files are owned by the correct user/group.""" + files = config.get('files', {}) + for file_name, options in files.items(): + for key in options.keys(): + if key not in ["owner", "group", "mode"]: + raise RuntimeError( + "Invalid ownership configuration: {}".format(key)) + owner = options.get('owner', config.get('owner', 'root')) + group = options.get('group', config.get('group', 'root')) + if '*' in file_name: + for file in glob.glob(file_name): + if file not in files.keys(): + if os.path.isfile(file): + _validate_file_ownership(owner, group, file) + else: + if os.path.isfile(file_name): + _validate_file_ownership(owner, group, file_name) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide), + it_has_config('files')) +def validate_file_permissions(config): + """Verify that permissions on configuration files are secure enough.""" + files = config.get('files', {}) + for file_name, options in files.items(): + for key in options.keys(): + if key not in ["owner", "group", "mode"]: + raise RuntimeError( + "Invalid ownership configuration: {}".format(key)) + mode = options.get('mode', config.get('permissions', '600')) + if '*' in file_name: + for file in glob.glob(file_name): + if file not in files.keys(): + if os.path.isfile(file): + _validate_file_mode(mode, file) + else: + if os.path.isfile(file_name): + _validate_file_mode(mode, file_name) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_keystone(audit_options): + """Validate that the service uses Keystone for authentication.""" + section = _config_section(audit_options, 'DEFAULT') + assert section is not None, "Missing section 'DEFAULT'" + assert section.get('auth_strategy') == "keystone", \ + "Application is not using Keystone" + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_tls_for_keystone(audit_options): + """Verify that TLS is used to communicate with Keystone.""" + section = _config_section(audit_options, 'keystone_authtoken') + assert section is not None, "Missing section 'keystone_authtoken'" + assert not section.get('insecure') and \ + "https://" in section.get("auth_uri"), \ + "TLS is not used for Keystone" + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_tls_for_glance(audit_options): + """Verify that TLS is used to communicate with Glance.""" + section = _config_section(audit_options, 'glance') + assert section is not None, "Missing section 'glance'" + assert not section.get('insecure') and \ + "https://" in section.get("api_servers"), \ + "TLS is not used for Glance" diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 78a339f6..fc634cc6 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -29,6 +29,7 @@ filter_installed_packages, ) from charmhelpers.core.hookenv import ( + NoNetworkBinding, config, is_relation_made, local_unit, @@ -868,7 +869,7 @@ def get_network_addresses(self): addr = network_get_primary_address( ADDRESS_MAP[net_type]['binding'] ) - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): addr = fallback endpoint = resolve_address(net_type) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py index 73102af7..df83b91b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py @@ -13,6 +13,7 @@ # limitations under the License. from charmhelpers.core.hookenv import ( + NoNetworkBinding, config, unit_get, service_name, @@ -175,7 +176,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True): # configuration is not in use try: resolved_address = network_get_primary_address(binding) - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): resolved_address = fallback_addr if resolved_address is None: diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 63c93044..22aa978b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -59,6 +59,7 @@ service_stop, service_running, umount, + cmp_pkgrevno, ) from charmhelpers.fetch import ( apt_install, @@ -178,7 +179,6 @@ def remove_cache_tier(self, cache_pool): """ # read-only is easy, writeback is much harder mode = get_cache_mode(self.service, cache_pool) - version = ceph_version() if mode == 'readonly': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) @@ -186,7 +186,7 @@ def remove_cache_tier(self, cache_pool): elif mode == 'writeback': pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'] - if version >= '10.1': + if cmp_pkgrevno('ceph', '10.1') >= 0: # Jewel added a mandatory flag pool_forward_cmd.append('--yes-i-really-mean-it') @@ -196,7 +196,8 @@ def remove_cache_tier(self, cache_pool): check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) - def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): + def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, + device_class=None): """Return the number of placement groups to use when creating the pool. Returns the number of placement groups which should be specified when @@ -229,6 +230,9 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): increased. NOTE: the default is primarily to handle the scenario where related charms requiring pools has not been upgraded to include an update to indicate their relative usage of the pools. + :param device_class: str. class of storage to use for basis of pgs + calculation; ceph supports nvme, ssd and hdd by default based + on presence of devices of each type in the deployment. :return: int. The number of pgs to use. """ @@ -243,17 +247,20 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): # If the expected-osd-count is specified, then use the max between # the expected-osd-count and the actual osd_count - osd_list = get_osds(self.service) + osd_list = get_osds(self.service, device_class) expected = config('expected-osd-count') or 0 if osd_list: - osd_count = max(expected, len(osd_list)) + if device_class: + osd_count = len(osd_list) + else: + osd_count = max(expected, len(osd_list)) # Log a message to provide some insight if the calculations claim # to be off because someone is setting the expected count and # there are more OSDs in reality. Try to make a proper guess # based upon the cluster itself. - if expected and osd_count != expected: + if not device_class and expected and osd_count != expected: log("Found more OSDs than provided expected count. " "Using the actual count instead", INFO) elif expected: @@ -626,7 +633,8 @@ def remove_erasure_profile(service, profile_name): def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', data_chunks=2, coding_chunks=1, - locality=None, durability_estimator=None): + locality=None, durability_estimator=None, + device_class=None): """ Create a new erasure code profile if one does not already exist for it. Updates the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ @@ -640,10 +648,9 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' :param coding_chunks: int :param locality: int :param durability_estimator: int + :param device_class: six.string_types :return: None. Can raise CalledProcessError """ - version = ceph_version() - # Ensure this failure_domain is allowed by Ceph validator(failure_domain, six.string_types, ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) @@ -654,12 +661,20 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' if locality is not None and durability_estimator is not None: raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0 # failure_domain changed in luminous - if version and version >= '12.0.0': + if luminous_or_later: cmd.append('crush-failure-domain=' + failure_domain) else: cmd.append('ruleset-failure-domain=' + failure_domain) + # device class new in luminous + if luminous_or_later and device_class: + cmd.append('crush-device-class={}'.format(device_class)) + else: + log('Skipping device class configuration (ceph < 12.0.0)', + level=DEBUG) + # Add plugin specific information if locality is not None: # For local erasure codes @@ -744,20 +759,26 @@ def pool_exists(service, name): return name in out.split() -def get_osds(service): +def get_osds(service, device_class=None): """Return a list of all Ceph Object Storage Daemons currently in the - cluster. + cluster (optionally filtered by storage device class). + + :param device_class: Class of storage device for OSD's + :type device_class: str """ - version = ceph_version() - if version and version >= '0.56': + luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0 + if luminous_or_later and device_class: + out = check_output(['ceph', '--id', service, + 'osd', 'crush', 'class', + 'ls-osd', device_class, + '--format=json']) + else: out = check_output(['ceph', '--id', service, 'osd', 'ls', '--format=json']) - if six.PY3: - out = out.decode('UTF-8') - return json.loads(out) - - return None + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) def install(): @@ -811,7 +832,7 @@ def set_app_name_for_pool(client, pool, name): :raises: CalledProcessError if ceph call fails """ - if ceph_version() >= '12.0.0': + if cmp_pkgrevno('ceph', '12.0.0') >= 0: cmd = ['ceph', '--id', client, 'osd', 'pool', 'application', 'enable', pool, name] check_call(cmd) @@ -1091,22 +1112,6 @@ def ensure_ceph_keyring(service, user=None, group=None, return True -def ceph_version(): - """Retrieve the local version of ceph.""" - if os.path.exists('/usr/bin/ceph'): - cmd = ['ceph', '-v'] - output = check_output(cmd) - if six.PY3: - output = output.decode('UTF-8') - output = output.split() - if len(output) > 3: - return output[2] - else: - return None - else: - return None - - class CephBrokerRq(object): """Ceph broker request. @@ -1147,7 +1152,8 @@ def add_op_request_access_to_group(self, name, namespace=None, 'object-prefix-permissions': object_prefix_permissions}) def add_op_create_pool(self, name, replica_count=3, pg_num=None, - weight=None, group=None, namespace=None): + weight=None, group=None, namespace=None, + app_name=None): """Adds an operation to create a pool. @param pg_num setting: optional setting. If not provided, this value @@ -1155,6 +1161,11 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, cluster at the time of creation. Note that, if provided, this value will be capped at the current available maximum. @param weight: the percentage of data the pool makes up + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str """ if pg_num and weight: raise ValueError('pg_num and weight are mutually exclusive') @@ -1162,7 +1173,7 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count, 'pg_num': pg_num, 'weight': weight, 'group': group, - 'group-namespace': namespace}) + 'group-namespace': namespace, 'app-name': app_name}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 2e287659..4744eb43 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -50,6 +50,11 @@ MARKER = object() SH_MAX_ARG = 131071 + +RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. ' + 'This may not be compatible with software you are ' + 'running in your shell.') + cache = {} @@ -1414,3 +1419,72 @@ def unit_doomed(unit=None): # I don't think 'dead' units ever show up in the goal-state, but # check anyway in addition to 'dying'. return units[unit]['status'] in ('dying', 'dead') + + +def env_proxy_settings(selected_settings=None): + """Get proxy settings from process environment variables. + + Get charm proxy settings from environment variables that correspond to + juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2, + see lp:1782236) in a format suitable for passing to an application that + reacts to proxy settings passed as environment variables. Some applications + support lowercase or uppercase notation (e.g. curl), some support only + lowercase (e.g. wget), there are also subjectively rare cases of only + uppercase notation support. no_proxy CIDR and wildcard support also varies + between runtimes and applications as there is no enforced standard. + + Some applications may connect to multiple destinations and expose config + options that would affect only proxy settings for a specific destination + these should be handled in charms in an application-specific manner. + + :param selected_settings: format only a subset of possible settings + :type selected_settings: list + :rtype: Option(None, dict[str, str]) + """ + SUPPORTED_SETTINGS = { + 'http': 'HTTP_PROXY', + 'https': 'HTTPS_PROXY', + 'no_proxy': 'NO_PROXY', + 'ftp': 'FTP_PROXY' + } + if selected_settings is None: + selected_settings = SUPPORTED_SETTINGS + + selected_vars = [v for k, v in SUPPORTED_SETTINGS.items() + if k in selected_settings] + proxy_settings = {} + for var in selected_vars: + var_val = os.getenv(var) + if var_val: + proxy_settings[var] = var_val + proxy_settings[var.lower()] = var_val + # Now handle juju-prefixed environment variables. The legacy vs new + # environment variable usage is mutually exclusive + charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var)) + if charm_var_val: + proxy_settings[var] = charm_var_val + proxy_settings[var.lower()] = charm_var_val + if 'no_proxy' in proxy_settings: + if _contains_range(proxy_settings['no_proxy']): + log(RANGE_WARNING, level=WARNING) + return proxy_settings if proxy_settings else None + + +def _contains_range(addresses): + """Check for cidr or wildcard domain in a string. + + Given a string comprising a comma seperated list of ip addresses + and domain names, determine whether the string contains IP ranges + or wildcard domains. + + :param addresses: comma seperated list of domains and ip addresses. + :type addresses: str + """ + return ( + # Test for cidr (e.g. 10.20.20.0/24) + "/" in addresses or + # Test for wildcard domains (*.foo.com or .foo.com) + "*" in addresses or + addresses.startswith(".") or + ",." in addresses or + " ." in addresses) diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 8a5cadf1..2394caf3 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -19,15 +19,16 @@ import six import time import subprocess -from tempfile import NamedTemporaryFile from charmhelpers.core.host import ( - lsb_release + get_distrib_codename, + CompareHostReleases, ) from charmhelpers.core.hookenv import ( log, DEBUG, WARNING, + env_proxy_settings, ) from charmhelpers.fetch import SourceConfigError, GPGKeyError @@ -303,12 +304,17 @@ def import_key(key): """Import an ASCII Armor key. A Radix64 format keyid is also supported for backwards - compatibility, but should never be used; the key retrieval - mechanism is insecure and subject to man-in-the-middle attacks - voiding all signature checks using that key. - - :param keyid: The key in ASCII armor format, - including BEGIN and END markers. + compatibility. In this case Ubuntu keyserver will be + queried for a key via HTTPS by its keyid. This method + is less preferrable because https proxy servers may + require traffic decryption which is equivalent to a + man-in-the-middle attack (a proxy server impersonates + keyserver TLS certificates and has to be explicitly + trusted by the system). + + :param key: A GPG key in ASCII armor format, + including BEGIN and END markers or a keyid. + :type key: (bytes, str) :raises: GPGKeyError if the key could not be imported """ key = key.strip() @@ -319,35 +325,137 @@ def import_key(key): log("PGP key found (looks like ASCII Armor format)", level=DEBUG) if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and '-----END PGP PUBLIC KEY BLOCK-----' in key): - log("Importing ASCII Armor PGP key", level=DEBUG) - with NamedTemporaryFile() as keyfile: - with open(keyfile.name, 'w') as fd: - fd.write(key) - fd.write("\n") - cmd = ['apt-key', 'add', keyfile.name] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error = "Error importing PGP key '{}'".format(key) - log(error) - raise GPGKeyError(error) + log("Writing provided PGP key in the binary format", level=DEBUG) + if six.PY3: + key_bytes = key.encode('utf-8') + else: + key_bytes = key + key_name = _get_keyid_by_gpg_key(key_bytes) + key_gpg = _dearmor_gpg_key(key_bytes) + _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) else: raise GPGKeyError("ASCII armor markers missing from GPG key") else: - # We should only send things obviously not a keyid offsite - # via this unsecured protocol, as it may be a secret or part - # of one. log("PGP key found (looks like Radix64 format)", level=WARNING) - log("INSECURLY importing PGP key from keyserver; " + log("SECURELY importing PGP key from keyserver; " "full key not provided.", level=WARNING) - cmd = ['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] - try: - _run_with_retries(cmd) - except subprocess.CalledProcessError: - error = "Error importing PGP key '{}'".format(key) - log(error) - raise GPGKeyError(error) + # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL + # to retrieve GPG keys. `apt-key adv` command is deprecated as is + # apt-key in general as noted in its manpage. See lp:1433761 for more + # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop + # gpg + key_asc = _get_key_by_keyid(key) + # write the key in GPG format so that apt-key list shows it + key_gpg = _dearmor_gpg_key(key_asc) + _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg) + + +def _get_keyid_by_gpg_key(key_material): + """Get a GPG key fingerprint by GPG key material. + Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded + or binary GPG key material. Can be used, for example, to generate file + names for keys passed via charm options. + + :param key_material: ASCII armor-encoded or binary GPG key material + :type key_material: bytes + :raises: GPGKeyError if invalid key material has been provided + :returns: A GPG key fingerprint + :rtype: str + """ + # trusty, xenial and bionic handling differs due to gpg 1.x to 2.x change + release = get_distrib_codename() + is_gpgv2_distro = CompareHostReleases(release) >= "bionic" + if is_gpgv2_distro: + # --import is mandatory, otherwise fingerprint is not printed + cmd = 'gpg --with-colons --import-options show-only --import --dry-run' + else: + cmd = 'gpg --with-colons --with-fingerprint' + ps = subprocess.Popen(cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_material) + if six.PY3: + out = out.decode('utf-8') + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material provided') + # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) + return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1) + + +def _get_key_by_keyid(keyid): + """Get a key via HTTPS from the Ubuntu keyserver. + Different key ID formats are supported by SKS keyservers (the longer ones + are more secure, see "dead beef attack" and https://evil32.com/). Since + HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will + impersonate keyserver.ubuntu.com and generate a certificate with + keyserver.ubuntu.com in the CN field or in SubjAltName fields of a + certificate. If such proxy behavior is expected it is necessary to add the + CA certificate chain containing the intermediate CA of the SSLBump proxy to + every machine that this code runs on via ca-certs cloud-init directive (via + cloudinit-userdata model-config) or via other means (such as through a + custom charm option). Also note that DNS resolution for the hostname in a + URL is done at a proxy server - not at the client side. + + 8-digit (32 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6 + 16-digit (64 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6 + 40-digit key ID: + https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6 + + :param keyid: An 8, 16 or 40 hex digit keyid to find a key for + :type keyid: (bytes, str) + :returns: A key material for the specified GPG key id + :rtype: (str, bytes) + :raises: subprocess.CalledProcessError + """ + # options=mr - machine-readable output (disables html wrappers) + keyserver_url = ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr&exact=on&search=0x{}') + curl_cmd = ['curl', keyserver_url.format(keyid)] + # use proxy server settings in order to retrieve the key + return subprocess.check_output(curl_cmd, + env=env_proxy_settings(['https'])) + + +def _dearmor_gpg_key(key_asc): + """Converts a GPG key in the ASCII armor format to the binary format. + + :param key_asc: A GPG key in ASCII armor format. + :type key_asc: (str, bytes) + :returns: A GPG key in binary format + :rtype: (str, bytes) + :raises: GPGKeyError + """ + ps = subprocess.Popen(['gpg', '--dearmor'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_asc) + # no need to decode output as it is binary (invalid utf-8), only error + if six.PY3: + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material. Check your network setup' + ' (MTU, routing, DNS) and/or proxy server settings' + ' as well as destination keyserver status.') + else: + return out + + +def _write_apt_gpg_keyfile(key_name, key_material): + """Writes GPG key material into a file at a provided path. + + :param key_name: A key name to use for a key file (could be a fingerprint) + :type key_name: str + :param key_material: A GPG key material (binary) + :type key_material: (str, bytes) + """ + with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name), + 'wb') as keyf: + keyf.write(key_material) def add_source(source, key=None, fail_invalid=False): @@ -442,13 +550,13 @@ def add_source(source, key=None, fail_invalid=False): def _add_proposed(): """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list - Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for + Uses get_distrib_codename to determine the correct stanza for the deb line. For intel architecutres PROPOSED_POCKET is used for the release, but for other architectures PROPOSED_PORTS_POCKET is used for the release. """ - release = lsb_release()['DISTRIB_CODENAME'] + release = get_distrib_codename() arch = platform.machine() if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): raise SourceConfigError("Arch {} not supported for (distro-)proposed" @@ -461,11 +569,16 @@ def _add_apt_repository(spec): """Add the spec using add_apt_repository :param spec: the parameter to pass to add_apt_repository + :type spec: str """ if '{series}' in spec: - series = lsb_release()['DISTRIB_CODENAME'] + series = get_distrib_codename() spec = spec.replace('{series}', series) - _run_with_retries(['add-apt-repository', '--yes', spec]) + # software-properties package for bionic properly reacts to proxy settings + # passed as environment variables (See lp:1433761). This is not the case + # LTS and non-LTS releases below bionic. + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https'])) def _add_cloud_pocket(pocket): @@ -534,7 +647,7 @@ def _verify_is_ubuntu_rel(release, os_release): :raises: SourceConfigError if the release is not the same as the ubuntu release. """ - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + ubuntu_rel = get_distrib_codename() if release != ubuntu_rel: raise SourceConfigError( 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index a2b6aae3..e9497d89 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -17,3 +17,4 @@ tests: - zaza.charm_tests.ceph.tests.CephLowLevelTest - zaza.charm_tests.ceph.tests.CephRelationTest - zaza.charm_tests.ceph.tests.CephTest + - zaza.charm_tests.ceph.osd.tests.SecurityTest From 89bc20d990c4940723e7673cef533a7f68e598f3 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 13 Mar 2019 11:26:19 +0100 Subject: [PATCH 1711/2699] Use ``ceph-conf`` to retrieve default values The ``ceph`` command expects connection to a running cluster even if it does not use it. Change-Id: Ied3edf63706e2d48d2ea09056bc6d6508e9e3e0f Closes-Bug: #1819852 --- ceph-mon/hooks/utils.py | 13 ++++++------- ceph-mon/unit_tests/test_ceph_utils.py | 9 ++++++--- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 158d5912..0ac0405f 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import re import socket import subprocess @@ -183,14 +184,12 @@ def get_default_rbd_features(): :returns: Installed Ceph's Default vaule for ``rbd_default_features`` :rtype: int - :raises: subprocess.CalledProcessError + :raises: IndexError, json.JSONDecodeError, subprocess.CalledProcessError """ - output = subprocess.check_output( - ['ceph', '-c', '/dev/null', '--show-config'], - universal_newlines=True) - for line in output.splitlines(): - if 'rbd_default_features' in line: - return int(line.split('=')[1].lstrip().rstrip()) + ceph_conf = json.loads(subprocess.check_output( + ['ceph-conf', '-c', '/dev/null', '-D', '--format', 'json'], + universal_newlines=True)) + return int(ceph_conf['rbd_default_features']) def add_rbd_mirror_features(rbd_features): diff --git a/ceph-mon/unit_tests/test_ceph_utils.py b/ceph-mon/unit_tests/test_ceph_utils.py index 9765b8b6..076429a3 100644 --- a/ceph-mon/unit_tests/test_ceph_utils.py +++ b/ceph-mon/unit_tests/test_ceph_utils.py @@ -11,6 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import mock import test_utils @@ -40,13 +41,15 @@ def test_has_rbd_mirrors(self, _relation_ids, _related_units): @mock.patch.object(utils.subprocess, 'check_output') def test_get_default_rbd_features(self, _check_output): - _check_output.return_value = ('a = b\nrbd_default_features = 61\n' - 'c = d\n') + _check_output.return_value = json.dumps( + {'a': 'b', + 'rbd_default_features': '61', + 'c': 'd'}) self.assertEquals( utils.get_default_rbd_features(), 61) _check_output.assert_called_once_with( - ['ceph', '-c', '/dev/null', '--show-config'], + ['ceph-conf', '-c', '/dev/null', '-D', '--format', 'json'], universal_newlines=True) def test_add_mirror_rbd_features(self): From 1f95042a9615a071631dfceaa0e834b3b858f94d Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 13 Mar 2019 11:05:10 +0000 Subject: [PATCH 1712/2699] Ensure relation id is passed to remote_service_name Its possible for the client_relation handler to be called from outside of the original hook context; as a result use of remote_service_name is not safe as this will evaluate in the current hook context, rather than the relation_id/unit context provided in the function parameters. Passing the relation_id to the helper ensures that the remote units for the intended relation are used to generate the remote service name. Change-Id: I2305a111c0a618156208e4f96acda5e63736051c Closes-Bug: 1819892 --- ceph-mon/hooks/ceph_hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 2de99fc2..30492096 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -759,7 +759,7 @@ def client_relation(relid=None, unit=None): if ready_for_service(): log('mon cluster in quorum and osds bootstrapped ' '- providing client with keys, processing broker requests') - service_name = hookenv.remote_service_name() + service_name = hookenv.remote_service_name(relid=relid) public_addr = get_public_addr() data = {'key': ceph.get_named_key(service_name), 'auth': config('auth-supported'), From 2886ae93a0789e3c1bec054aba857db2107cfa98 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 12 Mar 2019 13:38:22 +0100 Subject: [PATCH 1713/2699] Do not fail when called on an invalid device When calling smartctl on bcache devices, we will get a non-zero return code. In this case, we fail safe and do not enable a potentially unavailable feature. Additionally, other possible failures with device identification will be caught and safely handled this way. Change-Id: Ie10fb24cbfedf98c8bb53b710b95962579d3284e Closes-Bug: #1819652 --- ceph-osd/hooks/utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 3064f7a5..786c1bbe 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -211,9 +211,13 @@ def should_enable_discard(devices): if (device.startswith("/dev/nvme") or device.startswith("/dev/vd")): continue + try: + sata_3_or_less = is_sata30orless(device) + except subprocess.CalledProcessError: + sata_3_or_less = True if (device.startswith("/dev/") and os.path.exists(device) and - is_sata30orless(device)): + sata_3_or_less): discard_enable = False log("SSD Discard autodetection: {} is forcing discard off" "(sata <= 3.0)".format(device), level=WARNING) From 4cb8dce5b66fa8b5ff933d69ad9abfda8c909d30 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 14 Mar 2019 13:54:17 +0000 Subject: [PATCH 1714/2699] Ensure remote service name is resolved In the event that no units have presented on a relation, the remote_service_name function will return 'None'. Ensure this situation is detected and skip broker request processing if this is the case. Change-Id: I210ed914326761b7b353a7b86303d8877a112ca5 Related-Bug: 1819892 --- ceph-mon/hooks/ceph_hooks.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 30492096..759bd73f 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -760,6 +760,10 @@ def client_relation(relid=None, unit=None): log('mon cluster in quorum and osds bootstrapped ' '- providing client with keys, processing broker requests') service_name = hookenv.remote_service_name(relid=relid) + if not service_name: + log('Unable to determine remote service name, deferring ' + 'processing of broker requests') + return public_addr = get_public_addr() data = {'key': ceph.get_named_key(service_name), 'auth': config('auth-supported'), From aae4daec3bf5642b32f468255feca4d264c3e20c Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Thu, 14 Mar 2019 09:32:13 +0100 Subject: [PATCH 1715/2699] Ensure clients are updated with RBD features bitmap The ``client-relation`` hook did not compute RBD features based on presence of ``rbd-mirror`` relation as it should. Also, at present, units joining the ``client-relation`` before any ``rbd-mirror`` relations have completed would not get the appropriate RBD features bitmap set on the relation. Change-Id: Ic9e60fc91ecc467dead099c73ecc71d80c907fba --- ceph-mon/hooks/ceph_hooks.py | 10 ++++++++-- ceph-mon/unit_tests/test_ceph_hooks.py | 21 ++++++++++++++++----- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 759bd73f..b96dc817 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -713,6 +713,11 @@ def rbd_mirror_relation(relid=None, unit=None, recurse=True): relation_set(relation_id=relid, relation_settings=data) + # make sure clients are updated with the appropriate RBD features + # bitmap. + if recurse: + notify_client() + @hooks.hook('mds-relation-changed') @hooks.hook('mds-relation-joined') @@ -768,8 +773,9 @@ def client_relation(relid=None, unit=None): data = {'key': ceph.get_named_key(service_name), 'auth': config('auth-supported'), 'ceph-public-address': public_addr} - if config('default-rbd-features'): - data['rbd-features'] = config('default-rbd-features') + rbd_features = get_rbd_features() + if rbd_features: + data['rbd-features'] = rbd_features if not unit: unit = remote_unit() data.update( diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 4600915c..553afef6 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -327,6 +327,7 @@ def test_related_osd_multi_relation(self, call('osd:23') ]) + @patch.object(ceph_hooks, 'get_rbd_features') @patch.object(ceph_hooks, 'relation_set') @patch.object(ceph_hooks, 'handle_broker_request') @patch.object(ceph_hooks, 'config') @@ -341,11 +342,13 @@ def test_client_relation(self, _get_named_key, _config, _handle_broker_request, - _relation_set): + _relation_set, + _get_rbd_features): _remote_service_name.return_value = 'glance' config = copy.deepcopy(CHARM_CONFIG) _config.side_effect = lambda key: config[key] _handle_broker_request.return_value = {} + _get_rbd_features.return_value = None ceph_hooks.client_relation(relid='rel1', unit='glance/0') _ready_for_service.assert_called_once_with() _get_public_addr.assert_called_once_with() @@ -359,7 +362,7 @@ def test_client_relation(self, 'auth': False, 'ceph-public-address': _get_public_addr() }) - config.update({'default-rbd-features': 42}) + _get_rbd_features.return_value = 42 _relation_set.reset_mock() ceph_hooks.client_relation(relid='rel1', unit='glance/0') _relation_set.assert_called_once_with( @@ -371,6 +374,7 @@ def test_client_relation(self, 'rbd-features': 42, }) + @patch.object(ceph_hooks, 'get_rbd_features') @patch.object(ceph_hooks, 'config') @patch.object(ceph_hooks.ceph, 'get_named_key') @patch.object(ceph_hooks, 'get_public_addr') @@ -394,7 +398,8 @@ def test_client_relation_non_rel_hook(self, relation_set, remote_service_name, get_public_addr, get_named_key, - _config): + _config, + _get_rbd_features): # Check for LP #1738154 ready_for_service.return_value = True process_requests.return_value = 'AOK' @@ -404,6 +409,7 @@ def test_client_relation_non_rel_hook(self, relation_set, is_quorum.return_value = True config = copy.deepcopy(CHARM_CONFIG) _config.side_effect = lambda key: config[key] + _get_rbd_features.return_value = None ceph_hooks.client_relation(relid='rel1', unit='glance/0') relation_set.assert_called_once_with( relation_id='rel1', @@ -714,7 +720,8 @@ def setUp(self): self.get_public_addr.return_value = '198.51.100.10' self.ceph.list_pools_detail.return_value = {'pool': {}} - def test_rbd_mirror_relation(self): + @patch.object(ceph_hooks, 'notify_client') + def test_rbd_mirror_relation(self, _notify_client): self.handle_broker_request.return_value = {} base_relation_settings = { 'auth': self.test_config.get('auth-supported'), @@ -730,12 +737,16 @@ def test_rbd_mirror_relation(self): relation_settings=base_relation_settings) self.test_relation.set( {'unique_id': None}) - ceph_hooks.rbd_mirror_relation('rbd-mirror:52', 'ceph-rbd-mirror/0') + _notify_client.assert_called_once_with() + _notify_client.reset_mock() + ceph_hooks.rbd_mirror_relation('rbd-mirror:52', 'ceph-rbd-mirror/0', + recurse=False) self.relation_set.assert_called_with( relation_id='rbd-mirror:52', relation_settings=base_relation_settings) self.test_relation.set( {'unique_id': json.dumps('otherSideIsReactiveEndpoint')}) + self.assertFalse(_notify_client.called) ceph_hooks.rbd_mirror_relation('rbd-mirror:53', 'ceph-rbd-mirror/0') self.ceph.get_rbd_mirror_key.assert_called_once_with( 'rbd-mirror.otherSideIsReactiveEndpoint') From 60f4d04d0072a6db9841b53840c3fcefe40dc6f6 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 18 Mar 2019 15:55:28 +0100 Subject: [PATCH 1716/2699] Add ``status`` action The action gets rbd mirror pool status for mirrored pools. Change-Id: Ie0962dbee9e0ba9298df6b2448d21180c3a9c0d4 --- ceph-rbd-mirror/src/actions.yaml | 13 ++++++++++++ ceph-rbd-mirror/src/actions/actions.py | 24 +++++++++++++++++++--- ceph-rbd-mirror/src/actions/status | 1 + ceph-rbd-mirror/unit_tests/test_actions.py | 24 +++++++++++++++------- 4 files changed, 52 insertions(+), 10 deletions(-) create mode 120000 ceph-rbd-mirror/src/actions/status diff --git a/ceph-rbd-mirror/src/actions.yaml b/ceph-rbd-mirror/src/actions.yaml index 8325746b..9412eb78 100644 --- a/ceph-rbd-mirror/src/actions.yaml +++ b/ceph-rbd-mirror/src/actions.yaml @@ -16,3 +16,16 @@ refresh-pools: Refresh list of pools from local and remote Ceph endpoint. As a side effect, mirroring will be configured for any manually created pools that the charm currently does not know about. +status: + description: | + Get mirror pool status + params: + verbose: + type: boolean + format: + type: string + default: plain + enum: + - plain + - json + - xml diff --git a/ceph-rbd-mirror/src/actions/actions.py b/ceph-rbd-mirror/src/actions/actions.py index 163d3585..3c2bd81b 100755 --- a/ceph-rbd-mirror/src/actions/actions.py +++ b/ceph-rbd-mirror/src/actions/actions.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import os import subprocess import sys @@ -45,16 +46,32 @@ def rbd_mirror_action(args): ceph_local = reactive.endpoint_from_name('ceph-local') pools = (pool for pool, attrs in ceph_local.pools.items() if 'rbd' in attrs['applications']) - result = [] + result = {} cmd = ['rbd', '--id', charm.ceph_id, 'mirror', 'pool', action_name] if ch_core.hookenv.action_get('force'): cmd += ['--force'] + if ch_core.hookenv.action_get('verbose'): + cmd += ['--verbose'] + output_format = ch_core.hookenv.action_get('format') + if output_format: + cmd += ['--format', output_format] for pool in pools: output = subprocess.check_output(cmd + [pool], stderr=subprocess.STDOUT, universal_newlines=True) - result.append('{}: {}'.format(pool, output.rstrip())) - ch_core.hookenv.action_set({'output': '\n'.join(result)}) + if output_format == 'json': + result[pool] = json.loads(output) + else: + result[pool] = output.rstrip() + if output_format == 'json': + ch_core.hookenv.action_set({'output': json.dumps(result)}) + else: + output_str = '' + for pool, output in result.items(): + if output_str: + output_str += '\n' + output_str += '{}: {}'.format(pool, output) + ch_core.hookenv.action_set({'output': output_str}) def refresh_pools(args): @@ -83,6 +100,7 @@ def refresh_pools(args): 'demote': rbd_mirror_action, 'promote': rbd_mirror_action, 'refresh-pools': refresh_pools, + 'status': rbd_mirror_action, } diff --git a/ceph-rbd-mirror/src/actions/status b/ceph-rbd-mirror/src/actions/status new file mode 120000 index 00000000..405a394e --- /dev/null +++ b/ceph-rbd-mirror/src/actions/status @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/ceph-rbd-mirror/unit_tests/test_actions.py b/ceph-rbd-mirror/unit_tests/test_actions.py index 7fb3517d..5baa5415 100644 --- a/ceph-rbd-mirror/unit_tests/test_actions.py +++ b/ceph-rbd-mirror/unit_tests/test_actions.py @@ -51,7 +51,11 @@ def test_rbd_mirror_action(self): self.check_output.return_value = 'Promoted 0 mirrored images\n' actions.rbd_mirror_action(['promote']) self.endpoint_from_name.assert_called_once_with('ceph-local') - self.action_get.assert_called_once_with('force') + self.action_get.assert_has_calls([ + mock.call('force'), + mock.call('verbose'), + mock.call('format'), + ]) self.check_output.assert_has_calls([ mock.call(['rbd', '--id', 'acephid', 'mirror', 'pool', 'promote', 'apool'], @@ -65,22 +69,28 @@ def test_rbd_mirror_action(self): # the order the pools has in the output string is undefined self.action_set.assert_called_once_with( {'output': mock.ANY}) - for entry in self.action_set.call_args[0][0]['output'].split('\n'): - assert (entry == 'apool: Promoted 0 mirrored images' or - entry == 'bpool: Promoted 0 mirrored images') - self.action_get.return_value = True + self.assertEquals( + sorted(self.action_set.call_args[0][0]['output'].split('\n')), + ['apool: Promoted 0 mirrored images', + 'bpool: Promoted 0 mirrored images']) + self.action_get.side_effect = [True, True, False] self.check_output.reset_mock() actions.rbd_mirror_action(['promote']) self.check_output.assert_has_calls([ mock.call(['rbd', '--id', 'acephid', 'mirror', 'pool', 'promote', - '--force', 'apool'], + '--force', '--verbose', 'apool'], stderr=actions.subprocess.STDOUT, universal_newlines=True), mock.call(['rbd', '--id', 'acephid', 'mirror', 'pool', 'promote', - '--force', 'bpool'], + '--force', '--verbose', 'bpool'], stderr=actions.subprocess.STDOUT, universal_newlines=True), ], any_order=True) + self.action_get.assert_has_calls([ + mock.call('force'), + mock.call('verbose'), + mock.call('format'), + ]) def test_refresh_pools(self): self.patch_object(actions.reactive, 'is_flag_set') From 03a6d4dfdbd61cbc3a9b5d28988ccd145dd3b70c Mon Sep 17 00:00:00 2001 From: David Coronel Date: Wed, 20 Mar 2019 14:38:41 -0400 Subject: [PATCH 1717/2699] Update capitalization typo in readme Change-Id: I573509ee31eb87aa579b9fbfcdf0903f2cc675e1 --- ceph-mon/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 9898468b..4a646de7 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -17,7 +17,7 @@ adding storage devices. ## Actions -This charm supports pausing and resuming ceph's health functions on a cluster, for example when doing maintenance on a machine. to pause or resume, call: +This charm supports pausing and resuming ceph's health functions on a cluster, for example when doing maintenance on a machine. To pause or resume, call: `juju action do --unit ceph-mon/0 pause-health` or `juju action do --unit ceph-mon/0 resume-health` From 7635ed792017a351ab76c4014fd18b36f571c922 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 7 Mar 2019 05:45:48 +0000 Subject: [PATCH 1718/2699] Add multisite testing Add bundles for testing of multisite configurations for OpenStack Queens and Rocky. Change-Id: I8bbd3977f13709422d3c2c7086d285b5736cb883 --- .../bundles/bionic-queens-multisite.yaml | 75 +++++++++++++++++++ .../tests/bundles/bionic-rocky-multisite.yaml | 75 +++++++++++++++++++ ceph-radosgw/tests/tests.yaml | 6 +- 3 files changed, 154 insertions(+), 2 deletions(-) create mode 100644 ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml create mode 100644 ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml diff --git a/ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml b/ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml new file mode 100644 index 00000000..8cb411ad --- /dev/null +++ b/ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml @@ -0,0 +1,75 @@ +options: + source: &source distro +series: bionic +applications: + ceph-radosgw: + series: bionic + charm: ../../../ceph-radosgw + num_units: 1 + options: + source: *source + realm: testrealm + zonegroup: testzonegroup + zone: east-1 + region: east-1 + east-ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *source + east-ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + slave-ceph-radosgw: + series: bionic + charm: ../../../ceph-radosgw + num_units: 1 + options: + source: *source + realm: testrealm + zonegroup: testzonegroup + zone: west-1 + region: west-1 + west-ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *source + west-ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + region: "east-1 west-1" +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - east-ceph-osd:mon + - east-ceph-mon:osd +- - ceph-radosgw:mon + - east-ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service +- - west-ceph-osd:mon + - west-ceph-mon:osd +- - slave-ceph-radosgw:mon + - west-ceph-mon:radosgw +- - slave-ceph-radosgw:identity-service + - keystone:identity-service +- - slave-ceph-radosgw:master + - ceph-radosgw:slave diff --git a/ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml b/ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml new file mode 100644 index 00000000..75e7eadb --- /dev/null +++ b/ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml @@ -0,0 +1,75 @@ +options: + source: &source cloud:bionic-rocky +series: bionic +applications: + ceph-radosgw: + series: bionic + charm: ../../../ceph-radosgw + num_units: 1 + options: + source: *source + realm: testrealm + zonegroup: testzonegroup + zone: east-1 + region: east-1 + east-ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *source + east-ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + slave-ceph-radosgw: + series: bionic + charm: ../../../ceph-radosgw + num_units: 1 + options: + source: *source + realm: testrealm + zonegroup: testzonegroup + zone: west-1 + region: west-1 + west-ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *source + west-ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + region: "east-1 west-1" +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - east-ceph-osd:mon + - east-ceph-mon:osd +- - ceph-radosgw:mon + - east-ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service +- - west-ceph-osd:mon + - west-ceph-mon:osd +- - slave-ceph-radosgw:mon + - west-ceph-mon:radosgw +- - slave-ceph-radosgw:identity-service + - keystone:identity-service +- - slave-ceph-radosgw:master + - ceph-radosgw:slave diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 8231399e..a8ed3404 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -7,10 +7,12 @@ gate_bundles: - xenial-pike - xenial-queens - bionic-queens + - bionic-queens-multisite + - bionic-rocky + - bionic-rocky-multisite smoke_bundles: - - bionic-queens -dev_bundles: - bionic-rocky +dev_bundles: - cosmic-rocky tests: - zaza.charm_tests.ceph.tests.CephRGWTest From d8a1542ba074974701f5f6a2cbc5ef2170d97f8d Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Thu, 28 Mar 2019 11:57:17 +0100 Subject: [PATCH 1719/2699] Handle relation departure Change-Id: I5b7600a524ce1ec7bce34efd12dadcea26197832 --- ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py | 5 +++++ ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py index a31dd1c0..54b98bb1 100644 --- a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py @@ -68,6 +68,11 @@ def disable_services(): def render_stuff(*args): with charm.provide_charm_instance() as charm_instance: for endpoint in args: + if not endpoint.key: + ch_core.hookenv.log('Ceph endpoint "{}" flagged available yet ' + 'no key. Relation is probably departing.', + level=ch_core.hookenv.INFO) + return ch_core.hookenv.log('Ceph endpoint "{}" available, configuring ' 'keyring'.format(endpoint.endpoint_name), level=ch_core.hookenv.INFO) diff --git a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py index 3d798c1e..d8dd5d6a 100644 --- a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py @@ -133,6 +133,10 @@ def test_render_stuff(self): endpoint_remote.endpoint_name = 'ceph-remote' endpoint_remote.pools = {} self.crm_charm.services = ['aservice'] + endpoint_local.key = None + handlers.render_stuff(endpoint_local, endpoint_remote) + self.assertFalse(self.crm_charm.configure_ceph_keyring.called) + endpoint_local.key = 'akey' handlers.render_stuff(endpoint_local, endpoint_remote) self.crm_charm.configure_ceph_keyring.assert_has_calls([ mock.call(endpoint_local, cluster_name=None), From f555075dcfe00a07feda4e262c124da6a46e48fb Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Sat, 30 Mar 2019 18:25:58 +0100 Subject: [PATCH 1720/2699] Centralize pool mirror eligibility decision making Pool mirror eligibility is currently done in generator expressions repeated where needed. Centralize this in one function. Change-Id: I6e151621cefb4fe2c6339c9b16cd31c01b95e6f2 --- ceph-rbd-mirror/src/actions/actions.py | 3 +- .../lib/charm/openstack/ceph_rbd_mirror.py | 17 ++++++- .../src/reactive/ceph_rbd_mirror_handlers.py | 47 +++++++++---------- ceph-rbd-mirror/unit_tests/test_actions.py | 2 + .../test_ceph_rbd_mirror_handlers.py | 4 ++ 5 files changed, 45 insertions(+), 28 deletions(-) diff --git a/ceph-rbd-mirror/src/actions/actions.py b/ceph-rbd-mirror/src/actions/actions.py index 3c2bd81b..9786e013 100755 --- a/ceph-rbd-mirror/src/actions/actions.py +++ b/ceph-rbd-mirror/src/actions/actions.py @@ -44,8 +44,7 @@ def rbd_mirror_action(args): action_name = os.path.basename(args[0]) with charms_openstack.charm.provide_charm_instance() as charm: ceph_local = reactive.endpoint_from_name('ceph-local') - pools = (pool for pool, attrs in ceph_local.pools.items() - if 'rbd' in attrs['applications']) + pools = charm.eligible_pools(ceph_local.pools) result = {} cmd = ['rbd', '--id', charm.ceph_id, 'mirror', 'pool', action_name] if ch_core.hookenv.action_get('force'): diff --git a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py index d195c794..9486d38c 100644 --- a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py @@ -59,6 +59,20 @@ def __init__(self, **kwargs): } super().__init__(**kwargs) + def eligible_pools(self, pools): + """Filter eligible pools. + + :param pools: Dictionary with detailed pool information as provided + over the ``ceph-rbd-mirror`` interface provided by the + ``ceph-mon`` charm. + :type pools: dict + :returns: Dictionary with detailed pool information for pools eligible + for mirroring. + :rtype: dict + """ + return {pool: attrs for pool, attrs in pools.items() + if 'rbd' in attrs['applications']} + def custom_assess_status_check(self): """Provide mirrored pool statistics through juju status.""" if (reactive.is_flag_set('config.rendered') and @@ -66,8 +80,7 @@ def custom_assess_status_check(self): reactive.is_flag_set('ceph-remote.available')): endpoint = reactive.endpoint_from_flag('ceph-local.available') stats = self.mirror_pools_summary( - (pool for pool, attrs in endpoint.pools.items() - if 'rbd' in attrs['applications'])) + self.eligible_pools(endpoint.pools)) ch_core.hookenv.log('mirror_pools_summary = "{}"' .format(stats), level=ch_core.hookenv.DEBUG) diff --git a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py index 54b98bb1..a7d3adb1 100644 --- a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py @@ -110,27 +110,26 @@ def configure_pools(): local = reactive.endpoint_from_flag('ceph-local.available') remote = reactive.endpoint_from_flag('ceph-remote.available') with charm.provide_charm_instance() as charm_instance: - for pool, attrs in local.pools.items(): - if 'rbd' in attrs['applications']: - if not (charm_instance.mirror_pool_enabled(pool) and - charm_instance.mirror_pool_has_peers(pool)): - charm_instance.mirror_pool_enable(pool) - pg_num = attrs['parameters'].get('pg_num', None) - max_bytes = attrs['quota'].get('max_bytes', None) - max_objects = attrs['quota'].get('max_objects', None) - if 'erasure_code_profile' in attrs['parameters']: - ec_profile = attrs['parameters'].get( - 'erasure_code_profile', None) - remote.create_erasure_pool(pool, - erasure_profile=ec_profile, - pg_num=pg_num, - app_name='rbd', - max_bytes=max_bytes, - max_objects=max_objects) - else: - size = attrs['parameters'].get('size', None) - remote.create_replicated_pool(pool, replicas=size, - pg_num=pg_num, - app_name='rbd', - max_bytes=max_bytes, - max_objects=max_objects) + for pool, attrs in charm_instance.eligible_pools(local.pools).items(): + if not (charm_instance.mirror_pool_enabled(pool) and + charm_instance.mirror_pool_has_peers(pool)): + charm_instance.mirror_pool_enable(pool) + pg_num = attrs['parameters'].get('pg_num', None) + max_bytes = attrs['quota'].get('max_bytes', None) + max_objects = attrs['quota'].get('max_objects', None) + if 'erasure_code_profile' in attrs['parameters']: + ec_profile = attrs['parameters'].get( + 'erasure_code_profile', None) + remote.create_erasure_pool(pool, + erasure_profile=ec_profile, + pg_num=pg_num, + app_name='rbd', + max_bytes=max_bytes, + max_objects=max_objects) + else: + size = attrs['parameters'].get('size', None) + remote.create_replicated_pool(pool, replicas=size, + pg_num=pg_num, + app_name='rbd', + max_bytes=max_bytes, + max_objects=max_objects) diff --git a/ceph-rbd-mirror/unit_tests/test_actions.py b/ceph-rbd-mirror/unit_tests/test_actions.py index 5baa5415..09be4db5 100644 --- a/ceph-rbd-mirror/unit_tests/test_actions.py +++ b/ceph-rbd-mirror/unit_tests/test_actions.py @@ -46,11 +46,13 @@ def test_rbd_mirror_action(self): {'apool': {'applications': {'rbd': {}}}, 'bpool': {'applications': {'rbd': {}}}}) self.endpoint_from_name.return_value = endpoint + self.crm_charm.eligible_pools.return_value = endpoint.pools self.crm_charm.ceph_id = 'acephid' self.action_get.return_value = False self.check_output.return_value = 'Promoted 0 mirrored images\n' actions.rbd_mirror_action(['promote']) self.endpoint_from_name.assert_called_once_with('ceph-local') + self.crm_charm.eligible_pools.assert_called_once_with(endpoint.pools) self.action_get.assert_has_calls([ mock.call('force'), mock.call('verbose'), diff --git a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py index d8dd5d6a..e56ac4d7 100644 --- a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py @@ -180,12 +180,15 @@ def test_configure_pools(self): endpoint_remote.endpoint_name = 'ceph-remote' self.endpoint_from_flag.side_effect = [endpoint_local, endpoint_remote] + self.crm_charm.eligible_pools.return_value = endpoint_local.pools self.crm_charm.mirror_pool_enabled.return_value = False handlers.configure_pools() self.endpoint_from_flag.assert_has_calls([ mock.call('ceph-local.available'), mock.call('ceph-remote.available'), ]) + self.crm_charm.eligible_pools.assert_called_once_with( + endpoint_local.pools) self.crm_charm.mirror_pool_enabled.assert_called_once_with( 'cinder-ceph') self.crm_charm.mirror_pool_enable.assert_called_once_with( @@ -216,6 +219,7 @@ def test_configure_pools(self): self.endpoint_from_flag.side_effect = [endpoint_local, endpoint_remote] endpoint_remote.create_replicated_pool.reset_mock() + self.crm_charm.eligible_pools.return_value = endpoint_local.pools handlers.configure_pools() endpoint_remote.create_erasure_pool.assert_called_once_with( 'cinder-ceph', erasure_profile='prof', pg_num=42, app_name='rbd', From 6f2f0ac2fae91592679351b656b64e0c82c18507 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 1 Apr 2019 12:08:55 +0200 Subject: [PATCH 1721/2699] Add action to force resync of images in all pools on local endpoint There exist failure scenarios where abrupt shutdown and/or interruptions to communication may lead to a split-brain situation where the RBD Mirroring process in both Ceph clusters claim to be the primary. In such a situation the operator must decide which cluster has the most recent data and should be elected primary by using the ``demote`` and ``promote`` (optionally with force parameter) actions. After making this decision the secondary cluster must be resynced to track the promoted master, this is done by running the ``resync-pools`` action on the non-master cluster. Change-Id: I4f57c9202ed4d055066286f808369ec0ddddb7ea --- ceph-rbd-mirror/src/README.md | 27 +++++++++++++++++- ceph-rbd-mirror/src/actions.yaml | 12 ++++++++ ceph-rbd-mirror/src/actions/actions.py | 32 ++++++++++++++++++++++ ceph-rbd-mirror/src/actions/resync-pools | 1 + ceph-rbd-mirror/unit_tests/test_actions.py | 26 ++++++++++++++++++ 5 files changed, 97 insertions(+), 1 deletion(-) create mode 120000 ceph-rbd-mirror/src/actions/resync-pools diff --git a/ceph-rbd-mirror/src/README.md b/ceph-rbd-mirror/src/README.md index 7742064c..4fd890b3 100644 --- a/ceph-rbd-mirror/src/README.md +++ b/ceph-rbd-mirror/src/README.md @@ -6,7 +6,32 @@ Ceph 12.2 Luminous or later is required. # Usage -TBC +## Recovering from abrupt shutdown + +There exist failure scenarios where abrupt shutdown and/or interruptions to +communication may lead to a split-brain situation where the RBD Mirroring +process in both Ceph clusters claim to be the primary. + +In such a situation the operator must decide which cluster has the most +recent data and should be elected primary by using the ``demote`` and +``promote`` (optionally with force parameter) actions. + +After making this decision the secondary cluster must be resynced to track +the promoted master, this is done by running the ``resync-pools`` action on +the non-master cluster. + + juju run-action -m site-b ceph-rbd-mirror/leader --wait demote + juju run-action -m site-a ceph-rbd-mirror/leader --wait promote force=True + + juju run-action -m site-a ceph-rbd-mirror/leader --wait status verbose=True + juju run-action -m site-b ceph-rbd-mirror/leader --wait status verbose=True + + juju run-action -m site-b ceph-rbd-mirror/leader --wait resync-pools i-really-mean-it=True + +__NOTE__ When using Ceph Luminous, the mirror state information will not be +accurate after recovering from unclean shutdown. Regardless of the output of +the status information you will be able to write to images after a forced +promote. # Bugs diff --git a/ceph-rbd-mirror/src/actions.yaml b/ceph-rbd-mirror/src/actions.yaml index 9412eb78..972de6cb 100644 --- a/ceph-rbd-mirror/src/actions.yaml +++ b/ceph-rbd-mirror/src/actions.yaml @@ -16,6 +16,18 @@ refresh-pools: Refresh list of pools from local and remote Ceph endpoint. As a side effect, mirroring will be configured for any manually created pools that the charm currently does not know about. +resync-pools: + description: | + \ + USE WITH CAUTION - Force image resync for all images in pools on local + Ceph endpoint. + params: + i-really-mean-it: + type: boolean + description: | + This must be set to true to perform the action + required: + - i-really-mean-it status: description: | Get mirror pool status diff --git a/ceph-rbd-mirror/src/actions/actions.py b/ceph-rbd-mirror/src/actions/actions.py index 9786e013..caa48888 100755 --- a/ceph-rbd-mirror/src/actions/actions.py +++ b/ceph-rbd-mirror/src/actions/actions.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections import json import os import subprocess @@ -95,10 +96,41 @@ def refresh_pools(args): return reactive.main() +def resync_pools(args): + """Force image resync on pools in local Ceph endpoint.""" + if not ch_core.hookenv.action_get('i-really-mean-it'): + ch_core.hookenv.action_fail('Required parameter not set') + return + with charms_openstack.charm.provide_charm_instance() as charm: + ceph_local = reactive.endpoint_from_name('ceph-local') + pools = charm.eligible_pools(ceph_local.pools) + result = collections.defaultdict(dict) + for pool in pools: + # list images in pool + output = subprocess.check_output( + ['rbd', '--id', charm.ceph_id, '--format', 'json', + '-p', pool, 'ls'], universal_newlines=True) + images = json.loads(output) + for image in images: + output = subprocess.check_output( + ['rbd', '--id', charm.ceph_id, 'mirror', 'image', 'resync', + '{}/{}'.format(pool, image)], universal_newlines=True) + result[pool][image] = output.rstrip() + output_str = '' + for pool in result: + for image in result[pool]: + if output_str: + output_str += '\n' + output_str += '{}/{}: {}'.format(pool, image, + result[pool][image]) + ch_core.hookenv.action_set({'output': output_str}) + + ACTIONS = { 'demote': rbd_mirror_action, 'promote': rbd_mirror_action, 'refresh-pools': refresh_pools, + 'resync-pools': resync_pools, 'status': rbd_mirror_action, } diff --git a/ceph-rbd-mirror/src/actions/resync-pools b/ceph-rbd-mirror/src/actions/resync-pools new file mode 120000 index 00000000..405a394e --- /dev/null +++ b/ceph-rbd-mirror/src/actions/resync-pools @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/ceph-rbd-mirror/unit_tests/test_actions.py b/ceph-rbd-mirror/unit_tests/test_actions.py index 09be4db5..1ec4bcc7 100644 --- a/ceph-rbd-mirror/unit_tests/test_actions.py +++ b/ceph-rbd-mirror/unit_tests/test_actions.py @@ -13,6 +13,7 @@ # limitations under the License. import collections +import json import mock import sys @@ -111,6 +112,31 @@ def test_refresh_pools(self): self._KV.flush.assert_called_once_with() self.main.assert_called_once_with() + def test_resync_pools(self): + self.patch_object(actions.reactive, 'endpoint_from_name') + self.patch_object(actions.ch_core.hookenv, 'action_get') + self.patch_object(actions.subprocess, 'check_output') + self.patch_object(actions.ch_core.hookenv, 'action_set') + endpoint = mock.MagicMock() + endpoint.pools = collections.OrderedDict( + {'apool': {'applications': {'rbd': {}}}}) + self.endpoint_from_name.return_value = endpoint + self.crm_charm.eligible_pools.return_value = endpoint.pools + self.crm_charm.ceph_id = 'acephid' + self.action_get.return_value = False + actions.resync_pools([]) + self.assertFalse(self.check_output.called) + self.assertFalse(self.action_set.called) + self.action_get.return_value = True + self.check_output.side_effect = [ + json.dumps(['imagea']), + 'resync flagged for imagea\n', + ] + actions.resync_pools([]) + self.assertEquals( + sorted(self.action_set.call_args[0][0]['output'].split('\n')), + ['apool/imagea: resync flagged for imagea']) + def test_main(self): self.patch_object(actions, 'ACTIONS') self.patch_object(actions.ch_core.hookenv, 'action_fail') From f2f4490c089c60d97d7894b0483b616a1904d28e Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Thu, 7 Mar 2019 10:48:18 +0100 Subject: [PATCH 1722/2699] Update docs and functional test definitions Change-Id: I792b950a3e85b3bdc442676144253658bc5770c5 --- ceph-rbd-mirror/src/README.md | 125 ++++++++++- ceph-rbd-mirror/src/metadata.yaml | 13 +- .../tests/bundles/bionic-queens-e2e-lxd.yaml | 207 ++++++++++++++++++ .../src/tests/bundles/bionic-queens-e2e.yaml | 97 +++++++- .../src/tests/bundles/bionic-queens.yaml | 30 ++- .../tests/bundles/bionic-rocky-site-a.yaml | 74 +++++++ .../tests/bundles/bionic-rocky-site-b.yaml | 27 +++ .../src/tests/bundles/bionic-rocky.yaml | 102 +++++++++ .../src/tests/bundles/cosmic-rocky.yaml | 102 +++++++++ .../src/tests/bundles/disco-stein.yaml | 102 +++++++++ .../src/tests/bundles/xenial-pike.yaml | 103 +++++++++ .../src/tests/bundles/xenial-queens.yaml | 103 +++++++++ ceph-rbd-mirror/src/tests/tests.yaml | 17 +- 13 files changed, 1082 insertions(+), 20 deletions(-) create mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e-lxd.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-b.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/cosmic-rocky.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/disco-stein.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml diff --git a/ceph-rbd-mirror/src/README.md b/ceph-rbd-mirror/src/README.md index 4fd890b3..d88dd3f4 100644 --- a/ceph-rbd-mirror/src/README.md +++ b/ceph-rbd-mirror/src/README.md @@ -1,11 +1,132 @@ # Overview -This charm provides the Ceph RBD Mirror service for use with replication between multiple Ceph clusters. +The ``ceph-rbd-mirror`` charm supports deployment of the Ceph RBD Mirror daemon +and helps automate remote creation and configuration of mirroring for Ceph +pools used to host RBD images. -Ceph 12.2 Luminous or later is required. +Actions for operator driven failover and fallback for the pools used for RBD +images is also provided. + + Data center redundancy is a large topic and this work addresses a very + specific piece in the puzzle related to Ceph RBD images. You need to + combine this with `Ceph RADOS Gateway Multisite replication`_ and other + work to get a complete solution. + +.. _Ceph RADOS Gateway Multisite replication: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-rgw-multisite.html + +This is supported both for multiple distinct Ceph clusters within a single Juju +model and between different models with help from cross-model relations. + +When the charm is related to a local and a remote Ceph cluster it will +automatically create pools eligible for mirroring on the remote cluster and +enable mirroring. + +Eligible pools are selected on the basis of Ceph pool tagging and all pools +with the application ``rbd`` enabled on them will be selected. + + As of the 19.04 charm release charms will automatically have newly created + pools for use with RBD tagged with the ``rbd`` tag. + + Only mirroring of whole pools is supported by the charm. + +A prerequisite for RBD Mirroring is that every RBD image within each pool is +created with the ``journaling`` and ``exclusive-lock`` image features enabled. + +To support this the ``ceph-mon`` charm will announce these image features over +the ``client`` relation when it has units connected to its ``rbd-mirror`` +endpoint. This will ensure that images created in the deployment get the +appropriate features to support mirroring. + + RBD Mirroring is only supported when deployed with Ceph Luminous or later. + +The Ceph RBD Mirror feature supports running multiple instances of the daemon. +Having multiple daemons will cause the mirroring load to automatically be +(re-)distributed between the daemons. + +This addresses both High Availability and performance concerns. You can +make use of this feature by increasing the number of ``ceph-rbd-mirror`` units +in your deployment. + + The charm is written for Two-way Replication, which give you the ability to + fail over and fall back to/from a secondary site. + + Ceph does have support for mirroring to any number of slave clusters but + this is not implemented nor supported by the charm. + +The charm is aware of network spaces and you will be able to tell the RBD +Mirror daemon about network configuration by binding the ``public`` and +``cluster`` endpoints. + +The RBD Mirror daemon will use the network associated with the ``cluster`` +endpoint for mirroring traffic when available. + +# Deployment + + Example bundles with a minimal test configuration can be found + in the ``tests/bundles`` subdirectory of the ``ceph-rbd-mirror`` charm. + + Both examples of two Ceph clusters deployed in one model and Ceph clusters + deployed in separate models are available. + +To make use of cross model relations you must first set up an offer to export +a application endpoint from a model. In this example we use the model names +``site-a`` and ``site-b``. + + juju switch site-a + juju offer ceph-mon:rbd-mirror site-a-rbd-mirror + + juju switch site-b + juju offer ceph-mon:rbd-mirror site-b-rbd-mirror + + +After creating the offers we can import the remote offer to a model and add +a relation between applications just like we normally would do in a +single-model deployment. + + juju switch site-a + juju consume admin/site-b.site-b-rbd-mirror + juju add-relation ceph-rbd-mirror:ceph-remote site-b-rbd-mirror + + juju switch site-b + juju consume admin/site-a.site-a-rbd-mirror + juju add-relation ceph-rbd-mirror:ceph-remote site-a-rbd-mirror # Usage +## Pools + +Pools created by other charms through the Ceph broker protocol will +automatically be detected and acted upon. Pools tagged with the ``rbd`` +application will be selected for mirroring. + +If you manually create a pool, either through actions on the ``ceph-mon`` +charm or by talking to Ceph directly, you must inform the ``ceph-rbd-mirror`` +charm about them. + +This is accomplished by executing the ``refresh-pools`` action. + + juju run-action -m site-a ceph-mon/leader --wait create-pool name=mypool \ + app-name=rbd + juju run-action -m site-a ceph-rbd-mirror/leader --wait refresh-pools + +## Failover and Fallback + +Controlled failover and fallback + + juju run-action -m site-a ceph-rbd-mirror/leader --wait status verbose=True + juju run-action -m site-b ceph-rbd-mirror/leader --wait status verbose=True + + juju run-action -m site-a ceph-rbd-mirror/leader --wait demote + + juju run-action -m site-a ceph-rbd-mirror/leader --wait status verbose=True + juju run-action -m site-b ceph-rbd-mirror/leader --wait status verbose=True + + juju run-action -m site-b ceph-rbd-mirror/leader --wait promote + +__NOTE__ When using Ceph Luminous, the mirror status information may not be +accurate. Specifically the ``entries_behind_master`` counter may never get to +``0`` even though the image is fully synchronized. + ## Recovering from abrupt shutdown There exist failure scenarios where abrupt shutdown and/or interruptions to diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index 421f3849..c4bbc676 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -4,15 +4,11 @@ maintainer: OpenStack Charmers description: | RBD images can be asynchronously mirrored between two Ceph clusters. This capability uses the RBD journaling image feature to ensure crash-consistent - replication between clusters. Mirroring is configured on a per-pool basis - within peer clusters and can be configured to automatically mirror all images - within a pool or only a specific subset of images. Mirroring is configured - using the rbd command. The rbd-mirror daemon is responsible for pulling image - updates from the remote, peer cluster and applying them to the image within - the local cluster. + replication between clusters. The charm automatically creates pools used for + RBD images on the remote cluster and configures mirroring. Pools tagged with + the ``rbd`` application are selected. - Note: The charm requires Ceph Luminous or later and will only support - mirror configuration for whole pools. + NOTE: The charm requires Ceph Luminous or later. tags: - openstack - storage @@ -22,6 +18,7 @@ series: - xenial - bionic - cosmic + - disco extra-bindings: public: cluster: diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e-lxd.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e-lxd.yaml new file mode 100644 index 00000000..eb0fb912 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e-lxd.yaml @@ -0,0 +1,207 @@ +series: bionic +machines: + '0': + constraints: mem=16G + series: bionic + '1': + constraints: mem=16G + series: bionic + '2': + constraints: mem=16G + series: bionic + '3': + constraints: mem=16G + series: bionic + '4': + constraints: mem=16G + series: bionic + '5': + constraints: mem=16G + series: bionic + '6': + constraints: mem=16G + series: bionic + '7': + constraints: mem=16G + series: bionic +applications: + mysql: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + bindings: + '': libvirt-maas + to: + - lxd:0 + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + bindings: + '': libvirt-maas + to: + - lxd:1 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + bindings: + '': libvirt-maas + to: + - lxd:2 + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + bindings: + '': libvirt-maas + to: + - lxd:0 + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + bindings: + '': libvirt-maas + to: + - lxd:1 + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + num_units: 0 + nova-cloud-controller: + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + bindings: + '': libvirt-maas + to: + - lxd:2 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + to: + - 0 + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: distro + bindings: + public: libvirt-maas + cluster: libvirt-default + to: + - lxd:0 + - lxd:1 + - lxd:2 + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: distro + osd-devices: /dev/vdb + bindings: + public: libvirt-maas + cluster: libvirt-default + to: + - 0 + - 1 + - 2 + ceph-rbd-mirror: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: distro + bindings: + public: libvirt-maas + cluster: libvirt-default + to: + - 3 + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: distro + bindings: + public: libvirt-maas + cluster: libvirt-default + to: + - lxd:4 + - lxd:5 + - lxd:6 + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: distro + osd-devices: /dev/vdb + bindings: + public: libvirt-maas + cluster: libvirt-default + to: + - 4 + - 5 + - 6 + ceph-rbd-mirror-b: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: distro + bindings: + public: libvirt-maas + cluster: libvirt-default + to: + - 7 +relations: +- - mysql + - keystone +- - mysql + - cinder +- - rabbitmq-server + - cinder +- - keystone + - cinder +- - cinder + - cinder-ceph +- - cinder-ceph + - ceph-mon +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote +- - mysql:shared-db + - nova-cloud-controller:shared-db +- - keystone:identity-service + - nova-cloud-controller:identity-service +- - rabbitmq-server:amqp + - nova-cloud-controller:amqp +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:cloud-compute + - nova-cloud-controller:cloud-compute +- - glance:identity-service + - keystone:identity-service +- - glance:shared-db + - mysql:shared-db +- - glance:amqp + - rabbitmq-server:amqp +- - glance:image-service + - nova-compute:image-service +- - neutron-openvswitch:neutron-plugin + - nova-compute:neutron-plugin +- - neutron-openvswitch:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml index 835f4db0..eb0fb912 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml @@ -1,70 +1,159 @@ series: bionic +machines: + '0': + constraints: mem=16G + series: bionic + '1': + constraints: mem=16G + series: bionic + '2': + constraints: mem=16G + series: bionic + '3': + constraints: mem=16G + series: bionic + '4': + constraints: mem=16G + series: bionic + '5': + constraints: mem=16G + series: bionic + '6': + constraints: mem=16G + series: bionic + '7': + constraints: mem=16G + series: bionic applications: mysql: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 + bindings: + '': libvirt-maas + to: + - lxd:0 keystone: charm: cs:~openstack-charmers-next/keystone num_units: 1 + bindings: + '': libvirt-maas + to: + - lxd:1 rabbitmq-server: charm: cs:~openstack-charmers-next/rabbitmq-server num_units: 1 + bindings: + '': libvirt-maas + to: + - lxd:2 cinder: charm: cs:~openstack-charmers-next/cinder num_units: 1 + options: + block-device: None + bindings: + '': libvirt-maas + to: + - lxd:0 cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph num_units: 0 glance: charm: cs:~openstack-charmers-next/glance num_units: 1 + bindings: + '': libvirt-maas + to: + - lxd:1 neutron-openvswitch: charm: cs:~openstack-charmers-next/neutron-openvswitch num_units: 0 nova-cloud-controller: charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 + bindings: + '': libvirt-maas + to: + - lxd:2 nova-compute: charm: cs:~openstack-charmers-next/nova-compute num_units: 1 + to: + - 0 ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: expected-osd-count: 3 source: distro + bindings: + public: libvirt-maas + cluster: libvirt-default + to: + - lxd:0 + - lxd:1 + - lxd:2 ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 options: source: distro - storage: - osd-devices: cinder,20G + osd-devices: /dev/vdb + bindings: + public: libvirt-maas + cluster: libvirt-default + to: + - 0 + - 1 + - 2 ceph-rbd-mirror: series: bionic charm: ../../../ceph-rbd-mirror num_units: 1 options: source: distro + bindings: + public: libvirt-maas + cluster: libvirt-default + to: + - 3 ceph-mon-b: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: expected-osd-count: 3 source: distro + bindings: + public: libvirt-maas + cluster: libvirt-default + to: + - lxd:4 + - lxd:5 + - lxd:6 ceph-osd-b: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 options: source: distro - storage: - osd-devices: cinder,20G + osd-devices: /dev/vdb + bindings: + public: libvirt-maas + cluster: libvirt-default + to: + - 4 + - 5 + - 6 ceph-rbd-mirror-b: series: bionic charm: ../../../ceph-rbd-mirror num_units: 1 options: source: distro + bindings: + public: libvirt-maas + cluster: libvirt-default + to: + - 7 relations: - - mysql - keystone diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml index f9c12243..8d1c28bd 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml @@ -3,18 +3,32 @@ applications: mysql: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 + options: + source: distro keystone: charm: cs:~openstack-charmers-next/keystone num_units: 1 + options: + openstack-origin: distro rabbitmq-server: charm: cs:~openstack-charmers-next/rabbitmq-server num_units: 1 + options: + source: distro cinder: charm: cs:~openstack-charmers-next/cinder num_units: 1 + options: + block-device: None + glance-api-version: 2 cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph num_units: 0 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: distro ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 @@ -26,8 +40,9 @@ applications: num_units: 3 options: source: distro - storage: - osd-devices: cinder,20G + bluestore: False + use-direct-io: False + osd-devices: /opt ceph-rbd-mirror: series: bionic charm: ../../../ceph-rbd-mirror @@ -45,8 +60,9 @@ applications: num_units: 3 options: source: distro - storage: - osd-devices: cinder,20G + bluestore: False + use-direct-io: False + osd-devices: /opt ceph-rbd-mirror-b: series: bionic charm: ../../../ceph-rbd-mirror @@ -58,14 +74,20 @@ relations: - keystone - - mysql - cinder +- - mysql + - glance - - rabbitmq-server - cinder - - keystone - cinder +- - keystone + - glance - - cinder - cinder-ceph - - cinder-ceph - ceph-mon +- - glance + - ceph-mon - - ceph-mon:osd - ceph-osd:mon - - ceph-mon diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml new file mode 100644 index 00000000..905212b9 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml @@ -0,0 +1,74 @@ +series: bionic +applications: + mysql: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + source: cloud:bionic-rocky + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-rocky + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-rocky + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:bionic-rocky + bluestore: False + use-direct-io: False + osd-devices: /opt + ceph-rbd-mirror: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:bionic-rocky +relations: +- - mysql + - keystone +- - mysql + - cinder +- - mysql + - glance +- - rabbitmq-server + - cinder +- - keystone + - cinder +- - keystone + - glance +- - cinder + - cinder-ceph +- - cinder-ceph + - ceph-mon +- - glance + - ceph-mon +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-b.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-b.yaml new file mode 100644 index 00000000..2f377961 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-b.yaml @@ -0,0 +1,27 @@ +series: bionic +applications: + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-rocky + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:bionic-rocky + bluestore: False + use-direct-io: False + osd-devices: /opt + ceph-rbd-mirror: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:bionic-rocky +relations: +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml new file mode 100644 index 00000000..9da10be1 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml @@ -0,0 +1,102 @@ +series: bionic +applications: + mysql: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + source: cloud:bionic-rocky + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-rocky + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-rocky + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:bionic-rocky + bluestore: False + use-direct-io: False + osd-devices: /opt + ceph-rbd-mirror: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:bionic-rocky + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-rocky + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:bionic-rocky + bluestore: False + use-direct-io: False + osd-devices: /opt + ceph-rbd-mirror-b: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:bionic-rocky +relations: +- - mysql + - keystone +- - mysql + - cinder +- - mysql + - glance +- - rabbitmq-server + - cinder +- - keystone + - cinder +- - keystone + - glance +- - cinder + - cinder-ceph +- - cinder-ceph + - ceph-mon +- - glance + - ceph-mon +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/cosmic-rocky.yaml b/ceph-rbd-mirror/src/tests/bundles/cosmic-rocky.yaml new file mode 100644 index 00000000..2671e2e3 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/cosmic-rocky.yaml @@ -0,0 +1,102 @@ +series: cosmic +applications: + mysql: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + source: distro + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: distro + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: distro + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: distro + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: distro + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: distro + bluestore: False + use-direct-io: False + osd-devices: /opt + ceph-rbd-mirror: + series: cosmic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: distro + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: distro + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: distro + bluestore: False + use-direct-io: False + osd-devices: /opt + ceph-rbd-mirror-b: + series: cosmic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: distro +relations: +- - mysql + - keystone +- - mysql + - cinder +- - mysql + - glance +- - rabbitmq-server + - cinder +- - keystone + - cinder +- - keystone + - glance +- - cinder + - cinder-ceph +- - cinder-ceph + - ceph-mon +- - glance + - ceph-mon +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/disco-stein.yaml b/ceph-rbd-mirror/src/tests/bundles/disco-stein.yaml new file mode 100644 index 00000000..6b1ed05d --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/disco-stein.yaml @@ -0,0 +1,102 @@ +series: disco +applications: + mysql: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + source: distro + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: distro + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: distro + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: distro + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: distro + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: distro + bluestore: False + use-direct-io: False + osd-devices: /opt + ceph-rbd-mirror: + series: disco + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: distro + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: distro + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: distro + bluestore: False + use-direct-io: False + osd-devices: /opt + ceph-rbd-mirror-b: + series: disco + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: distro +relations: +- - mysql + - keystone +- - mysql + - cinder +- - mysql + - glance +- - rabbitmq-server + - cinder +- - keystone + - cinder +- - keystone + - glance +- - cinder + - cinder-ceph +- - cinder-ceph + - ceph-mon +- - glance + - ceph-mon +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml b/ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml new file mode 100644 index 00000000..d29319d1 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml @@ -0,0 +1,103 @@ +series: xenial +applications: + mysql: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + source: cloud:xenial-pike + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:xenial-pike + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:xenial-pike + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + openstack-origin: cloud:xenial-pike + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:xenial-pike + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:xenial-pike + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:xenial-pike + bluestore: False + use-direct-io: False + osd-devices: /opt + ceph-rbd-mirror: + series: xenial + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:xenial-pike + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:xenial-pike + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:xenial-pike + bluestore: False + use-direct-io: False + osd-devices: /opt + ceph-rbd-mirror-b: + series: xenial + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:xenial-pike +relations: +- - mysql + - keystone +- - mysql + - cinder +- - mysql + - glance +- - rabbitmq-server + - cinder +- - keystone + - cinder +- - keystone + - glance +- - cinder + - cinder-ceph +- - cinder-ceph + - ceph-mon +- - glance + - ceph-mon +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml b/ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml new file mode 100644 index 00000000..167e0e70 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml @@ -0,0 +1,103 @@ +series: xenial +applications: + mysql: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + source: cloud:xenial-queens + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:xenial-queens + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:xenial-queens + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + openstack-origin: cloud:xenial-queens + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:xenial-queens + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:xenial-queens + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:xenial-queens + bluestore: False + use-direct-io: False + osd-devices: /opt + ceph-rbd-mirror: + series: xenial + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:xenial-queens + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:xenial-queens + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:xenial-queens + bluestore: False + use-direct-io: False + osd-devices: /opt + ceph-rbd-mirror-b: + series: xenial + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:xenial-queens +relations: +- - mysql + - keystone +- - mysql + - cinder +- - mysql + - glance +- - rabbitmq-server + - cinder +- - keystone + - cinder +- - keystone + - glance +- - cinder + - cinder-ceph +- - cinder-ceph + - ceph-mon +- - glance + - ceph-mon +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index ad4e96bb..813be83b 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -2,9 +2,22 @@ charm_name: ceph-rbd-mirror smoke_bundles: - bionic-queens gate_bundles: +- xenial-pike +- xenial-queens - bionic-queens +- bionic-rocky +- cosmic-rocky +comment: | + Hold ``disco-stein`` bundle until all dependend charms have disco support. + The e2e bundles are useful for development but adds no additional value to + the functional tests. +dev_bundles: +- disco-stein - bionic-queens-e2e +- bionic-queens-e2e-lxd configure: -- zaza.charm_tests.noop.setup.basic_setup +- zaza.charm_tests.glance.setup.add_lts_image tests: -- zaza.charm_tests.noop.tests.NoopTest +- zaza.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorTest +- zaza.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorControlledFailoverTest +- zaza.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorDisasterFailoverTest From 5a1fdac81f5b8054495cda696862cc780015b0e5 Mon Sep 17 00:00:00 2001 From: Marian Gasparovic Date: Mon, 1 Apr 2019 11:32:10 +0200 Subject: [PATCH 1723/2699] Creates aditional nrpe checks which parse warning/error messages When Ceph is in a warning state for reason1 and in the meantime new reason2 appears, operator is not alerted and also cannot mute alarms selectively (as described in bug #1735579) This patch alllows to specify a dictionary of 'name':'regex' where 'name' will become ceph-$name check in nrpe and $regex will be searched for in warning/error messages. It is specified via charm nagios_additional_checks parameter. There is also nagios_additional_checks_critical parameter which specifies if those checks are reported as warning or error. Change-Id: I73a7c15db88793bb78841d8395535c97ca2af872 Partial-Bug: 1735579 --- ceph-mon/config.yaml | 19 ++++ ceph-mon/files/nagios/check_ceph_status.py | 86 +++++++++++++++---- ceph-mon/hooks/ceph_hooks.py | 20 +++++ ceph-mon/unit_tests/test_ceph_hooks.py | 2 + ceph-mon/unit_tests/test_check_ceph_status.py | 80 +++++++++++++++++ 5 files changed, 188 insertions(+), 19 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index fa40f7e1..4b4a3c0a 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -204,6 +204,25 @@ options: description: | Whether to report Critical instead of Warning when the nodeep-scrub flag is set. + nagios_additional_checks: + default: "" + type: string + description: | + Dictionary describing additional checks. Key is a name of a check + which will be visible in Nagios. Value is a string (regular expression) + which is checked against status messages. + . + Example: + . + {'noout': 'noout', 'too_few': 'too few PGs', 'clock': 'clock skew', + 'osd-down': 'osds down', 'degraded_redundancy': 'Degraded data redundancy'} + . + nagios_additional_checks_critical: + default: False + type: boolean + description: | + Whether additional checks report warning or error when their checks + are positive. use-direct-io: type: boolean default: True diff --git a/ceph-mon/files/nagios/check_ceph_status.py b/ceph-mon/files/nagios/check_ceph_status.py index 9839bb85..986426b5 100755 --- a/ceph-mon/files/nagios/check_ceph_status.py +++ b/ceph-mon/files/nagios/check_ceph_status.py @@ -74,6 +74,7 @@ def get_ceph_version(): Luminous onwards (12.2.0 or higher) :returns: list of integers, just the actual version number + :raises: UnknownError """ try: out_string = subprocess.check_output(['ceph', @@ -85,6 +86,36 @@ def get_ceph_version(): return out_version +def get_status_and_messages(status_data): + """ + Used to get general status of a Ceph cluster as well as a list of + error/warning messages. + + :param status_data: JSON formatted output from ceph health + :type status_data: str + :returns: + - string representing overall status of the cluster + - list of error or warning messages + :rtype: tuple(str, list) + :raises: UnknownError + """ + + try: + ceph_version = get_ceph_version() + except UnknownError as e: + raise UnknownError(e) + if ceph_version[0] >= 12 and ceph_version[1] >= 2: + # This is Luminous or above + overall_status = status_data['health'].get('status') + status_messages = [x['summary']['message'] for x in + status_data['health'].get('checks', {}).values()] + else: + overall_status = status_data['health'].get('overall_status') + status_messages = [x['summary'] for x in + status_data['health']['summary']] + return overall_status, status_messages + + def check_ceph_status(args): """ Used to check the status of a Ceph cluster. Uses the output of 'ceph @@ -100,6 +131,7 @@ def check_ceph_status(args): :param args: argparse object formatted in the convention of generic Nagios checks :returns string, describing the status of the ceph cluster. + :raises: UnknownError """ status_critical = False @@ -122,28 +154,32 @@ def check_ceph_status(args): required_keys = ['health', 'monmap', 'pgmap'] if not all(key in status_data.keys() for key in required_keys): raise UnknownError('UNKNOWN: status data is incomplete') - ceph_version = get_ceph_version() - if ceph_version[0] >= 12 and ceph_version[1] >= 2: - # This is Luminous or above - overall_status = status_data['health'].get('status') - luminous = True - else: - overall_status = status_data['health'].get('overall_status') - luminous = False + + try: + overall_status, status_messages = get_status_and_messages(status_data) + except UnknownError as e: + raise UnknownError(e) + + message_all_ok = "All OK" + + # if it is just additional check, deal with it and ignore overall health + if args.additional_check is not None: + for status_message in status_messages: + if re.search(args.additional_check, status_message) is not None: + if args.additional_check_critical: + msg = "CRITICAL: {}".format(status_message) + raise CriticalError(msg) + else: + msg = "WARNING: {}".format(status_message) + raise WarnError(msg) + print(message_all_ok) + return message_all_ok if overall_status != 'HEALTH_OK': # Health is not OK, collect status message(s) and # decide whether to return warning or critical status_critical = False status_msg = [] - if luminous: - status_messages = [x['summary']['message'] - for x in - status_data['health'].get('checks').values()] - else: - status_messages = [x['summary'] - for x in - status_data['health']['summary']] for status in status_messages: status_msg.append(status) # Check if nedeepscrub is set and whether it should raise an error @@ -188,9 +224,8 @@ def check_ceph_status(args): # overall_status == 'HEALTH_WARN': msg = "WARNING: {}".format(", ".join(status_msg)) raise WarnError(msg) - message = "All OK" - print(message) - return message + print(message_all_ok) + return message_all_ok def parse_args(args): @@ -217,6 +252,19 @@ def parse_args(args): "flag. If the nodeep-scrub flag is set," "the check returns critical if this param is" "passed, otherwise it returns warning.") + parser.add_argument('--additional_check', dest='additional_check', + default=None, + help="Check if a given pattern exists in any status" + "message. If it does, report warning or critical" + "for this check according to content of" + "additional_check_critical parameter") + parser.add_argument('--additional_check_critical', + dest='additional_check_critical', default=False, + action='store_true', + help="Specifies what is returned if a check is" + "positive. If the argument is not provided," + "check returns a warning. Otherwise it " + "returns an error condition.") return parser.parse_args(args) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index b96dc817..34e06db9 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import ast import json import os import subprocess @@ -857,6 +858,25 @@ def update_nrpe_config(): description='Check Ceph health {{{}}}'.format(current_unit), check_cmd=check_cmd ) + + if config('nagios_additional_checks'): + additional_critical = config('nagios_additional_checks_critical') + x = ast.literal_eval(config('nagios_additional_checks')) + + for key, value in x.items(): + name = "ceph-{}".format(key.replace(" ", "")) + log("Adding check {}".format(name)) + check_cmd = 'check_ceph_status.py -f {}' \ + ' --additional_check \\\"{}\\\"' \ + ' {}'.format(STATUS_FILE, value, + "--additional_check_critical" + if additional_critical is True else "") + nrpe_setup.add_check( + shortname=name, + description='Additional Ceph checks {{{}}}'.format( + current_unit), + check_cmd=check_cmd + ) nrpe_setup.write() diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 553afef6..371f0a25 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -50,6 +50,8 @@ 'nagios_misplaced_thresh': '10', 'nagios_recovery_rate': '1', 'nagios_raise_nodeepscrub': True, + 'nagios_additional_checks': "", + 'nagios_additional_checks_critical': False, 'disable-pg-max-object-skew': False} diff --git a/ceph-mon/unit_tests/test_check_ceph_status.py b/ceph-mon/unit_tests/test_check_ceph_status.py index 69ac4177..44f8e517 100644 --- a/ceph-mon/unit_tests/test_check_ceph_status.py +++ b/ceph-mon/unit_tests/test_check_ceph_status.py @@ -208,3 +208,83 @@ def test_health_crit_deepscrub_luminous(self, args = check_ceph_status.parse_args(['--raise_nodeepscrub']) self.assertRaises(check_ceph_status.CriticalError, lambda: check_ceph_status.check_ceph_status(args)) + + # Additional Ok, luminous, deepscrub + @patch('check_ceph_status.get_ceph_version') + def test_additional_ok_deepscrub_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [12, 2, 0] + with open('unit_tests/ceph_nodeepscrub_luminous.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--additional_check', 'osd out']) + check_output = check_ceph_status.check_ceph_status(args) + self.assertRegex(check_output, r"^All OK$") + + # Additional warning, luminous, deepscrub + @patch('check_ceph_status.get_ceph_version') + def test_additional_warn_deepscrub_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [12, 2, 0] + with open('unit_tests/ceph_nodeepscrub_luminous.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--additional_check', 'deep']) + self.assertRaises(check_ceph_status.WarnError, + lambda: check_ceph_status.check_ceph_status(args)) + + # Additional error, luminous, deepscrub + @patch('check_ceph_status.get_ceph_version') + def test_additional_error_deepscrub_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [12, 2, 0] + with open('unit_tests/ceph_nodeepscrub_luminous.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--additional_check', 'deep', + '--additional_check_critical']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) + + # Additional Ok, pre-luminous, deepscrub + @patch('check_ceph_status.get_ceph_version') + def test_additional_ok_deepscrub_pre_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [10, 2, 9] + with open('unit_tests/ceph_nodeepscrub.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--additional_check', 'osd out']) + check_output = check_ceph_status.check_ceph_status(args) + self.assertRegex(check_output, r"^All OK$") + + # Additional warning, pre-luminous, deepscrub + @patch('check_ceph_status.get_ceph_version') + def test_additional_warn_deepscrub_pre_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [10, 2, 9] + with open('unit_tests/ceph_nodeepscrub.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--additional_check', 'deep']) + self.assertRaises(check_ceph_status.WarnError, + lambda: check_ceph_status.check_ceph_status(args)) + + # Additional error, pre-luminous, deepscrub + @patch('check_ceph_status.get_ceph_version') + def test_additional_error_deepscrub_pre_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [10, 2, 9] + with open('unit_tests/ceph_nodeepscrub.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--additional_check', 'deep', + '--additional_check_critical']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) From bfc6d36a297062e9f5130745c3e9538c8db60497 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 29 Mar 2019 07:23:01 +0100 Subject: [PATCH 1724/2699] Handle inability to retrieve status gracefully Tactical fix for occational segfault in ``rbd mirror pool status`` Change-Id: Ic50d06ee646ca525085fa009d344c8eabd178839 Partial-Bug: #1820976 --- .../src/lib/charm/openstack/ceph_rbd_mirror.py | 9 +++++++-- .../test_lib_charm_openstack_ceph_rbd_mirror.py | 4 ++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py index 9486d38c..4b3270e1 100644 --- a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py @@ -79,8 +79,13 @@ def custom_assess_status_check(self): reactive.is_flag_set('ceph-local.available') and reactive.is_flag_set('ceph-remote.available')): endpoint = reactive.endpoint_from_flag('ceph-local.available') - stats = self.mirror_pools_summary( - self.eligible_pools(endpoint.pools)) + try: + stats = self.mirror_pools_summary( + self.eligible_pools(endpoint.pools)) + except subprocess.CalledProcessError as e: + ch_core.hookenv.log('Unable to retrieve mirror pool status: ' + '"{}"'.format(e)) + return None, None ch_core.hookenv.log('mirror_pools_summary = "{}"' .format(stats), level=ch_core.hookenv.DEBUG) diff --git a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py index 08933879..93d02ba3 100644 --- a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py @@ -14,6 +14,7 @@ import collections import mock +import subprocess import charms_openstack.test_utils as test_utils @@ -63,6 +64,9 @@ def test_custom_assess_status_check(self): self.assertEqual(crmc.custom_assess_status_check(), ('active', 'Unit is ready (Pools OK (1) ' 'Images Primary (2))')) + crmc.mirror_pools_summary.side_effect = subprocess.CalledProcessError( + 42, []) + self.assertEqual(crmc.custom_assess_status_check(), (None, None)) def test__mirror_pool_info(self): self.patch_object(ceph_rbd_mirror.socket, 'gethostname') From 6afcb64dddb9d1eb02b7d24994aa9026ef3920da Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 4 Apr 2019 10:11:48 +0200 Subject: [PATCH 1725/2699] Sync charm-helpers for Stein release As a part of the Stein release, we need to ensure that charmhelpers is up to date. Change-Id: Ic037cadc6eab7ba6fa6379f3b5cc822297cd01a4 --- ceph-mon/hooks/charmhelpers/cli/unitdata.py | 9 ++ .../audits/openstack_security_guide.py | 113 ++++++----------- .../contrib/openstack/cert_utils.py | 18 ++- .../charmhelpers/contrib/openstack/context.py | 34 ++++- .../charmhelpers/contrib/openstack/ip.py | 2 +- .../charmhelpers/contrib/openstack/utils.py | 4 +- .../contrib/storage/linux/ceph.py | 116 ++++++++++++++---- .../contrib/storage/linux/utils.py | 41 +++++++ ceph-mon/hooks/charmhelpers/core/host.py | 1 + .../charmhelpers/core/host_factory/ubuntu.py | 14 +++ ceph-mon/hooks/charmhelpers/core/sysctl.py | 13 +- ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 16 +-- 12 files changed, 255 insertions(+), 126 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/cli/unitdata.py b/ceph-mon/hooks/charmhelpers/cli/unitdata.py index c5728582..acce846f 100644 --- a/ceph-mon/hooks/charmhelpers/cli/unitdata.py +++ b/ceph-mon/hooks/charmhelpers/cli/unitdata.py @@ -19,9 +19,16 @@ @cmdline.subcommand_builder('unitdata', description="Store and retrieve data") def unitdata_cmd(subparser): nested = subparser.add_subparsers() + get_cmd = nested.add_parser('get', help='Retrieve data') get_cmd.add_argument('key', help='Key to retrieve the value of') get_cmd.set_defaults(action='get', value=None) + + getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data') + getrange_cmd.add_argument('key', metavar='prefix', + help='Prefix of the keys to retrieve') + getrange_cmd.set_defaults(action='getrange', value=None) + set_cmd = nested.add_parser('set', help='Store data') set_cmd.add_argument('key', help='Key to set') set_cmd.add_argument('value', help='Value to store') @@ -30,6 +37,8 @@ def unitdata_cmd(subparser): def _unitdata_cmd(action, key, value): if action == 'get': return unitdata.kv().get(key) + elif action == 'getrange': + return unitdata.kv().getrange(key) elif action == 'set': unitdata.kv().set(key, value) unitdata.kv().flush() diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py index ba5e2486..e5b7ac1e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -30,14 +30,20 @@ cached, ) +""" +The Security Guide suggests a specific list of files inside the +config directory for the service having 640 specifically, but +by ensuring the containing directory is 750, only the owner can +write, and only the group can read files within the directory. +By restricting access to the containing directory, we can more +effectively ensure that there is no accidental leakage if a new +file is added to the service without being added to the security +guide, and to this check. +""" FILE_ASSERTIONS = { 'barbican': { - # From security guide - '/etc/barbican/barbican.conf': {'group': 'barbican', 'mode': '640'}, - '/etc/barbican/barbican-api-paste.ini': - {'group': 'barbican', 'mode': '640'}, - '/etc/barbican/policy.json': {'group': 'barbican', 'mode': '640'}, + '/etc/barbican': {'group': 'barbican', 'mode': '750'}, }, 'ceph-mon': { '/var/lib/charm/ceph-mon/ceph.conf': @@ -60,82 +66,29 @@ {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, }, 'cinder': { - # From security guide - '/etc/cinder/cinder.conf': {'group': 'cinder', 'mode': '640'}, - '/etc/cinder/api-paste.conf': {'group': 'cinder', 'mode': '640'}, - '/etc/cinder/rootwrap.conf': {'group': 'cinder', 'mode': '640'}, + '/etc/cinder': {'group': 'cinder', 'mode': '750'}, }, 'glance': { - # From security guide - '/etc/glance/glance-api-paste.ini': {'group': 'glance', 'mode': '640'}, - '/etc/glance/glance-api.conf': {'group': 'glance', 'mode': '640'}, - '/etc/glance/glance-cache.conf': {'group': 'glance', 'mode': '640'}, - '/etc/glance/glance-manage.conf': {'group': 'glance', 'mode': '640'}, - '/etc/glance/glance-registry-paste.ini': - {'group': 'glance', 'mode': '640'}, - '/etc/glance/glance-registry.conf': {'group': 'glance', 'mode': '640'}, - '/etc/glance/glance-scrubber.conf': {'group': 'glance', 'mode': '640'}, - '/etc/glance/glance-swift-store.conf': - {'group': 'glance', 'mode': '640'}, - '/etc/glance/policy.json': {'group': 'glance', 'mode': '640'}, - '/etc/glance/schema-image.json': {'group': 'glance', 'mode': '640'}, - '/etc/glance/schema.json': {'group': 'glance', 'mode': '640'}, + '/etc/glance': {'group': 'glance', 'mode': '750'}, }, 'keystone': { - # From security guide - '/etc/keystone/keystone.conf': {'group': 'keystone', 'mode': '640'}, - '/etc/keystone/keystone-paste.ini': - {'group': 'keystone', 'mode': '640'}, - '/etc/keystone/policy.json': {'group': 'keystone', 'mode': '640'}, - '/etc/keystone/logging.conf': {'group': 'keystone', 'mode': '640'}, - '/etc/keystone/ssl/certs/signing_cert.pem': - {'group': 'keystone', 'mode': '640'}, - '/etc/keystone/ssl/private/signing_key.pem': - {'group': 'keystone', 'mode': '640'}, - '/etc/keystone/ssl/certs/ca.pem': {'group': 'keystone', 'mode': '640'}, + '/etc/keystone': + {'owner': 'keystone', 'group': 'keystone', 'mode': '750'}, }, 'manilla': { - # From security guide - '/etc/manila/manila.conf': {'group': 'manilla', 'mode': '640'}, - '/etc/manila/api-paste.ini': {'group': 'manilla', 'mode': '640'}, - '/etc/manila/policy.json': {'group': 'manilla', 'mode': '640'}, - '/etc/manila/rootwrap.conf': {'group': 'manilla', 'mode': '640'}, + '/etc/manila': {'group': 'manilla', 'mode': '750'}, }, 'neutron-gateway': { - '/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'}, - '/etc/neutron/rootwrap.conf': {'mode': '640'}, - '/etc/neutron/rootwrap.d': {'mode': '755'}, - '/etc/neutron/*': {'group': 'neutron', 'mode': '644'}, + '/etc/neutron': {'group': 'neutron', 'mode': '750'}, }, 'neutron-api': { - # From security guide - '/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'}, - '/etc/nova/api-paste.ini': {'group': 'neutron', 'mode': '640'}, - '/etc/neutron/rootwrap.conf': {'group': 'neutron', 'mode': '640'}, - # Additional validations - '/etc/neutron/rootwrap.d': {'mode': '755'}, - '/etc/neutron/neutron_lbaas.conf': {'mode': '644'}, - '/etc/neutron/neutron_vpnaas.conf': {'mode': '644'}, - '/etc/neutron/*': {'group': 'neutron', 'mode': '644'}, + '/etc/neutron/': {'group': 'neutron', 'mode': '750'}, }, 'nova-cloud-controller': { - # From security guide - '/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'}, - '/etc/nova/nova.conf': {'group': 'nova', 'mode': '750'}, - '/etc/nova/*': {'group': 'nova', 'mode': '640'}, - # Additional validations - '/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'}, + '/etc/nova': {'group': 'nova', 'mode': '750'}, }, 'nova-compute': { - # From security guide - '/etc/nova/nova.conf': {'group': 'nova', 'mode': '640'}, - '/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'}, - '/etc/nova/rootwrap.conf': {'group': 'nova', 'mode': '640'}, - # Additional Validations - '/etc/nova/nova-compute.conf': {'group': 'nova', 'mode': '640'}, - '/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'}, - '/etc/nova/nm.conf': {'mode': '644'}, - '/etc/nova/*': {'group': 'nova', 'mode': '640'}, + '/etc/nova/': {'group': 'nova', 'mode': '750'}, }, 'openstack-dashboard': { # From security guide @@ -178,7 +131,7 @@ def _config_ini(path): return dict(conf) -def _validate_file_ownership(owner, group, file_name): +def _validate_file_ownership(owner, group, file_name, optional=False): """ Validate that a specified file is owned by `owner:group`. @@ -188,12 +141,16 @@ def _validate_file_ownership(owner, group, file_name): :type group: str :param file_name: Path to the file to verify :type file_name: str + :param optional: Is this file optional, + ie: Should this test fail when it's missing + :type optional: bool """ try: ownership = _stat(file_name) except subprocess.CalledProcessError as e: print("Error reading file: {}".format(e)) - assert False, "Specified file does not exist: {}".format(file_name) + if not optional: + assert False, "Specified file does not exist: {}".format(file_name) assert owner == ownership.owner, \ "{} has an incorrect owner: {} should be {}".format( file_name, ownership.owner, owner) @@ -203,7 +160,7 @@ def _validate_file_ownership(owner, group, file_name): print("Validate ownership of {}: PASS".format(file_name)) -def _validate_file_mode(mode, file_name): +def _validate_file_mode(mode, file_name, optional=False): """ Validate that a specified file has the specified permissions. @@ -211,12 +168,16 @@ def _validate_file_mode(mode, file_name): :type owner: str :param file_name: Path to the file to verify :type file_name: str + :param optional: Is this file optional, + ie: Should this test fail when it's missing + :type optional: bool """ try: ownership = _stat(file_name) except subprocess.CalledProcessError as e: print("Error reading file: {}".format(e)) - assert False, "Specified file does not exist: {}".format(file_name) + if not optional: + assert False, "Specified file does not exist: {}".format(file_name) assert mode == ownership.mode, \ "{} has an incorrect mode: {} should be {}".format( file_name, ownership.mode, mode) @@ -243,14 +204,15 @@ def validate_file_ownership(config): "Invalid ownership configuration: {}".format(key)) owner = options.get('owner', config.get('owner', 'root')) group = options.get('group', config.get('group', 'root')) + optional = options.get('optional', config.get('optional', 'False')) if '*' in file_name: for file in glob.glob(file_name): if file not in files.keys(): if os.path.isfile(file): - _validate_file_ownership(owner, group, file) + _validate_file_ownership(owner, group, file, optional) else: if os.path.isfile(file_name): - _validate_file_ownership(owner, group, file_name) + _validate_file_ownership(owner, group, file_name, optional) @audit(is_audit_type(AuditType.OpenStackSecurityGuide), @@ -264,14 +226,15 @@ def validate_file_permissions(config): raise RuntimeError( "Invalid ownership configuration: {}".format(key)) mode = options.get('mode', config.get('permissions', '600')) + optional = options.get('optional', config.get('optional', 'False')) if '*' in file_name: for file in glob.glob(file_name): if file not in files.keys(): if os.path.isfile(file): - _validate_file_mode(mode, file) + _validate_file_mode(mode, file, optional) else: if os.path.isfile(file_name): - _validate_file_mode(mode, file_name) + _validate_file_mode(mode, file_name, optional) @audit(is_audit_type(AuditType.OpenStackSecurityGuide)) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py index 3a3c6de7..47b8603a 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -180,13 +180,17 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None): os.symlink(hostname_key, custom_key) -def install_certs(ssl_dir, certs, chain=None): +def install_certs(ssl_dir, certs, chain=None, user='root', group='root'): """Install the certs passed into the ssl dir and append the chain if provided. :param ssl_dir: str Directory to create symlinks in :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}} :param chain: str Chain to be appended to certs + :param user: (Optional) Owner of certificate files. Defaults to 'root' + :type user: str + :param group: (Optional) Group of certificate files. Defaults to 'root' + :type group: str """ for cn, bundle in certs.items(): cert_filename = 'cert_{}'.format(cn) @@ -197,21 +201,25 @@ def install_certs(ssl_dir, certs, chain=None): # trust certs signed by an intermediate in the chain cert_data = cert_data + os.linesep + chain write_file( - path=os.path.join(ssl_dir, cert_filename), + path=os.path.join(ssl_dir, cert_filename), owner=user, group=group, content=cert_data, perms=0o640) write_file( - path=os.path.join(ssl_dir, key_filename), + path=os.path.join(ssl_dir, key_filename), owner=user, group=group, content=bundle['key'], perms=0o640) def process_certificates(service_name, relation_id, unit, - custom_hostname_link=None): + custom_hostname_link=None, user='root', group='root'): """Process the certificates supplied down the relation :param service_name: str Name of service the certifcates are for. :param relation_id: str Relation id providing the certs :param unit: str Unit providing the certs :param custom_hostname_link: str Name of custom link to create + :param user: (Optional) Owner of certificate files. Defaults to 'root' + :type user: str + :param group: (Optional) Group of certificate files. Defaults to 'root' + :type group: str """ data = relation_get(rid=relation_id, unit=unit) ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) @@ -223,7 +231,7 @@ def process_certificates(service_name, relation_id, unit, if certs: certs = json.loads(certs) install_ca_cert(ca.encode()) - install_certs(ssl_dir, certs, chain) + install_certs(ssl_dir, certs, chain, user=user, group=group) create_ip_cert_links( ssl_dir, custom_hostname_link=custom_hostname_link) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index fc634cc6..d5133713 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -792,6 +792,7 @@ class ApacheSSLContext(OSContextGenerator): # and service namespace accordingly. external_ports = [] service_namespace = None + user = group = 'root' def enable_modules(self): cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers'] @@ -810,9 +811,11 @@ def configure_cert(self, cn=None): key_filename = 'key' write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert), perms=0o640) + content=b64decode(cert), owner=self.user, + group=self.group, perms=0o640) write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key), perms=0o640) + content=b64decode(key), owner=self.user, + group=self.group, perms=0o640) def configure_ca(self): ca_cert = get_ca_cert() @@ -1932,3 +1935,30 @@ def __call__(self): return { 'openstack_release': ostack, 'operating_system_release': osystem} + + +class LogrotateContext(OSContextGenerator): + """Common context generator for logrotate.""" + + def __init__(self, location, interval, count): + """ + :param location: Absolute path for the logrotate config file + :type location: str + :param interval: The interval for the rotations. Valid values are + 'daily', 'weekly', 'monthly', 'yearly' + :type interval: str + :param count: The logrotate count option configures the 'count' times + the log files are being rotated before being + :type count: int + """ + self.location = location + self.interval = interval + self.count = 'rotate {}'.format(count) + + def __call__(self): + ctxt = { + 'logrotate_logs_location': self.location, + 'logrotate_interval': self.interval, + 'logrotate_count': self.count, + } + return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py index df83b91b..723aebc1 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py @@ -159,7 +159,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True): if is_address_in_network(bound_cidr, vip): resolved_address = vip break - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): # If no net-splits configured and no support for extra # bindings/network spaces so we expect a single vip resolved_address = vips[0] diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 86b011b7..e5e25369 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -194,7 +194,7 @@ ('rocky', ['2.18.0', '2.19.0']), ('stein', - ['2.19.0']), + ['2.20.0']), ]) # >= Liberty version->codename mapping @@ -656,7 +656,7 @@ def openstack_upgrade_available(package): else: avail_vers = get_os_version_install_source(src) apt.init() - return apt.version_compare(avail_vers, cur_vers) == 1 + return apt.version_compare(avail_vers, cur_vers) >= 1 def ensure_block_device(block_device): diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 22aa978b..2c62092c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -186,7 +186,7 @@ def remove_cache_tier(self, cache_pool): elif mode == 'writeback': pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'] - if cmp_pkgrevno('ceph', '10.1') >= 0: + if cmp_pkgrevno('ceph-common', '10.1') >= 0: # Jewel added a mandatory flag pool_forward_cmd.append('--yes-i-really-mean-it') @@ -582,21 +582,24 @@ def remove_pool_snapshot(service, pool_name, snapshot_name): raise -# max_bytes should be an int or long -def set_pool_quota(service, pool_name, max_bytes): +def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): """ - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param max_bytes: int or long - :return: None. Can raise CalledProcessError + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: Name of pool + :type pool_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + :raises: subprocess.CalledProcessError """ - # Set a byte quota on a RADOS pool in ceph. - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, - 'max_bytes', str(max_bytes)] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name] + if max_bytes: + cmd = cmd + ['max_bytes', str(max_bytes)] + if max_objects: + cmd = cmd + ['max_objects', str(max_objects)] + check_call(cmd) def remove_pool_quota(service, pool_name): @@ -661,7 +664,7 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' if locality is not None and durability_estimator is not None: raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") - luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0 + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 # failure_domain changed in luminous if luminous_or_later: cmd.append('crush-failure-domain=' + failure_domain) @@ -766,7 +769,7 @@ def get_osds(service, device_class=None): :param device_class: Class of storage device for OSD's :type device_class: str """ - luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0 + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 if luminous_or_later and device_class: out = check_output(['ceph', '--id', service, 'osd', 'crush', 'class', @@ -832,7 +835,7 @@ def set_app_name_for_pool(client, pool, name): :raises: CalledProcessError if ceph call fails """ - if cmp_pkgrevno('ceph', '12.0.0') >= 0: + if cmp_pkgrevno('ceph-common', '12.0.0') >= 0: cmd = ['ceph', '--id', client, 'osd', 'pool', 'application', 'enable', pool, name] check_call(cmd) @@ -1153,19 +1156,46 @@ def add_op_request_access_to_group(self, name, namespace=None, def add_op_create_pool(self, name, replica_count=3, pg_num=None, weight=None, group=None, namespace=None, - app_name=None): - """Adds an operation to create a pool. - - @param pg_num setting: optional setting. If not provided, this value - will be calculated by the broker based on how many OSDs are in the - cluster at the time of creation. Note that, if provided, this value - will be capped at the current available maximum. - @param weight: the percentage of data the pool makes up + app_name=None, max_bytes=None, max_objects=None): + """DEPRECATED: Use ``add_op_create_replicated_pool()`` or + ``add_op_create_erasure_pool()`` instead. + """ + return self.add_op_create_replicated_pool( + name, replica_count=replica_count, pg_num=pg_num, weight=weight, + group=group, namespace=namespace, app_name=app_name, + max_bytes=max_bytes, max_objects=max_objects) + + def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, + weight=None, group=None, namespace=None, + app_name=None, max_bytes=None, + max_objects=None): + """Adds an operation to create a replicated pool. + + :param name: Name of pool to create + :type name: str + :param replica_count: Number of copies Ceph should keep of your data. + :type replica_count: int + :param pg_num: Request specific number of Placement Groups to create + for pool. + :type pg_num: int + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + Used to calculate number of Placement Groups to create + for pool. + :type weight: float + :param group: Group to add pool to + :type group: str + :param namespace: Group namespace + :type namespace: str :param app_name: (Optional) Tag pool with application name. Note that there is certain protocols emerging upstream with regard to meaningful application names to use. Examples are ``rbd`` and ``rgw``. :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int """ if pg_num and weight: raise ValueError('pg_num and weight are mutually exclusive') @@ -1173,7 +1203,41 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count, 'pg_num': pg_num, 'weight': weight, 'group': group, - 'group-namespace': namespace, 'app-name': app_name}) + 'group-namespace': namespace, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) + + def add_op_create_erasure_pool(self, name, erasure_profile=None, + weight=None, group=None, app_name=None, + max_bytes=None, max_objects=None): + """Adds an operation to create a erasure coded pool. + + :param name: Name of pool to create + :type name: str + :param erasure_profile: Name of erasure code profile to use. If not + set the ceph-mon unit handling the broker + request will set its default value. + :type erasure_profile: str + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + :type weight: float + :param group: Group to add pool to + :type group: str + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + """ + self.ops.append({'op': 'create-pool', 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'weight': weight, + 'group': group, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index 6f846b05..c57aaf35 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -17,12 +17,53 @@ from stat import S_ISBLK from subprocess import ( + CalledProcessError, check_call, check_output, call ) +def _luks_uuid(dev): + """ + Check to see if dev is a LUKS encrypted volume, returning the UUID + of volume if it is. + + :param: dev: path to block device to check. + :returns: str. UUID of LUKS device or None if not a LUKS device + """ + try: + cmd = ['cryptsetup', 'luksUUID', dev] + return check_output(cmd).decode('UTF-8').strip() + except CalledProcessError: + return None + + +def is_luks_device(dev): + """ + Determine if dev is a LUKS-formatted block device. + + :param: dev: A full path to a block device to check for LUKS header + presence + :returns: boolean: indicates whether a device is used based on LUKS header. + """ + return True if _luks_uuid(dev) else False + + +def is_mapped_luks_device(dev): + """ + Determine if dev is a mapped LUKS device + :param: dev: A full path to a block device to be checked + :returns: boolean: indicates whether a device is mapped + """ + _, dirs, _ = next(os.walk( + '/sys/class/block/{}/holders/' + .format(os.path.basename(os.path.realpath(dev)))) + ) + is_held = len(dirs) > 0 + return is_held and is_luks_device(dev) + + def is_block_device(path): ''' Confirm device at path is a valid block device node. diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 47c1fc35..32754ff9 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -47,6 +47,7 @@ cmp_pkgrevno, CompareHostReleases, get_distrib_codename, + arch ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( # NOQA:F401 diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py index d7e920eb..a3162fac 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,6 @@ import subprocess +from charmhelpers.core.hookenv import cached from charmhelpers.core.strutils import BasicStringComparator @@ -97,3 +98,16 @@ def cmp_pkgrevno(package, revno, pkgcache=None): pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@cached +def arch(): + """Return the package architecture as a string. + + :returns: the architecture + :rtype: str + :raises: subprocess.CalledProcessError if dpkg command fails + """ + return subprocess.check_output( + ['dpkg', '--print-architecture'] + ).rstrip().decode('UTF-8') diff --git a/ceph-mon/hooks/charmhelpers/core/sysctl.py b/ceph-mon/hooks/charmhelpers/core/sysctl.py index 1f188d8c..f1f4a28f 100644 --- a/ceph-mon/hooks/charmhelpers/core/sysctl.py +++ b/ceph-mon/hooks/charmhelpers/core/sysctl.py @@ -28,7 +28,7 @@ __author__ = 'Jorge Niedbalski R. ' -def create(sysctl_dict, sysctl_file): +def create(sysctl_dict, sysctl_file, ignore=False): """Creates a sysctl.conf file from a YAML associative array :param sysctl_dict: a dict or YAML-formatted string of sysctl @@ -36,6 +36,8 @@ def create(sysctl_dict, sysctl_file): :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode + :param ignore: If True, ignore "unknown variable" errors. + :type ignore: bool :returns: None """ if type(sysctl_dict) is not dict: @@ -52,7 +54,12 @@ def create(sysctl_dict, sysctl_file): for key, value in sysctl_dict_parsed.items(): fd.write("{}={}\n".format(key, value)) - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), + log("Updating sysctl_file: {} values: {}".format(sysctl_file, + sysctl_dict_parsed), level=DEBUG) - check_call(["sysctl", "-p", sysctl_file]) + call = ["sysctl", "-p", sysctl_file] + if ignore: + call.append("-e") + + check_call(call) diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 2394caf3..c6d9341e 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -20,10 +20,8 @@ import time import subprocess -from charmhelpers.core.host import ( - get_distrib_codename, - CompareHostReleases, -) +from charmhelpers.core.host import get_distrib_codename + from charmhelpers.core.hookenv import ( log, DEBUG, @@ -362,14 +360,8 @@ def _get_keyid_by_gpg_key(key_material): :returns: A GPG key fingerprint :rtype: str """ - # trusty, xenial and bionic handling differs due to gpg 1.x to 2.x change - release = get_distrib_codename() - is_gpgv2_distro = CompareHostReleases(release) >= "bionic" - if is_gpgv2_distro: - # --import is mandatory, otherwise fingerprint is not printed - cmd = 'gpg --with-colons --import-options show-only --import --dry-run' - else: - cmd = 'gpg --with-colons --with-fingerprint' + # Use the same gpg command for both Xenial and Bionic + cmd = 'gpg --with-colons --with-fingerprint' ps = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, From c8faa4149c2f0aad362bf4b2688e4f86045dd638 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 4 Apr 2019 10:12:05 +0200 Subject: [PATCH 1726/2699] Sync charm-helpers for Stein release As a part of the Stein release, we need to ensure that charmhelpers is up to date. Change-Id: I8a252fd730fa89b2c6aa12765a388d7a35f1dcc7 --- ceph-osd/hooks/charmhelpers/cli/unitdata.py | 9 ++ .../audits/openstack_security_guide.py | 113 ++++++----------- .../contrib/openstack/cert_utils.py | 18 ++- .../charmhelpers/contrib/openstack/context.py | 34 ++++- .../charmhelpers/contrib/openstack/ip.py | 2 +- .../charmhelpers/contrib/openstack/utils.py | 4 +- .../contrib/storage/linux/ceph.py | 116 ++++++++++++++---- .../contrib/storage/linux/utils.py | 41 +++++++ ceph-osd/hooks/charmhelpers/core/host.py | 1 + .../charmhelpers/core/host_factory/ubuntu.py | 14 +++ ceph-osd/hooks/charmhelpers/core/sysctl.py | 13 +- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 16 +-- 12 files changed, 255 insertions(+), 126 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/cli/unitdata.py b/ceph-osd/hooks/charmhelpers/cli/unitdata.py index c5728582..acce846f 100644 --- a/ceph-osd/hooks/charmhelpers/cli/unitdata.py +++ b/ceph-osd/hooks/charmhelpers/cli/unitdata.py @@ -19,9 +19,16 @@ @cmdline.subcommand_builder('unitdata', description="Store and retrieve data") def unitdata_cmd(subparser): nested = subparser.add_subparsers() + get_cmd = nested.add_parser('get', help='Retrieve data') get_cmd.add_argument('key', help='Key to retrieve the value of') get_cmd.set_defaults(action='get', value=None) + + getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data') + getrange_cmd.add_argument('key', metavar='prefix', + help='Prefix of the keys to retrieve') + getrange_cmd.set_defaults(action='getrange', value=None) + set_cmd = nested.add_parser('set', help='Store data') set_cmd.add_argument('key', help='Key to set') set_cmd.add_argument('value', help='Value to store') @@ -30,6 +37,8 @@ def unitdata_cmd(subparser): def _unitdata_cmd(action, key, value): if action == 'get': return unitdata.kv().get(key) + elif action == 'getrange': + return unitdata.kv().getrange(key) elif action == 'set': unitdata.kv().set(key, value) unitdata.kv().flush() diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py index ba5e2486..e5b7ac1e 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -30,14 +30,20 @@ cached, ) +""" +The Security Guide suggests a specific list of files inside the +config directory for the service having 640 specifically, but +by ensuring the containing directory is 750, only the owner can +write, and only the group can read files within the directory. +By restricting access to the containing directory, we can more +effectively ensure that there is no accidental leakage if a new +file is added to the service without being added to the security +guide, and to this check. +""" FILE_ASSERTIONS = { 'barbican': { - # From security guide - '/etc/barbican/barbican.conf': {'group': 'barbican', 'mode': '640'}, - '/etc/barbican/barbican-api-paste.ini': - {'group': 'barbican', 'mode': '640'}, - '/etc/barbican/policy.json': {'group': 'barbican', 'mode': '640'}, + '/etc/barbican': {'group': 'barbican', 'mode': '750'}, }, 'ceph-mon': { '/var/lib/charm/ceph-mon/ceph.conf': @@ -60,82 +66,29 @@ {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, }, 'cinder': { - # From security guide - '/etc/cinder/cinder.conf': {'group': 'cinder', 'mode': '640'}, - '/etc/cinder/api-paste.conf': {'group': 'cinder', 'mode': '640'}, - '/etc/cinder/rootwrap.conf': {'group': 'cinder', 'mode': '640'}, + '/etc/cinder': {'group': 'cinder', 'mode': '750'}, }, 'glance': { - # From security guide - '/etc/glance/glance-api-paste.ini': {'group': 'glance', 'mode': '640'}, - '/etc/glance/glance-api.conf': {'group': 'glance', 'mode': '640'}, - '/etc/glance/glance-cache.conf': {'group': 'glance', 'mode': '640'}, - '/etc/glance/glance-manage.conf': {'group': 'glance', 'mode': '640'}, - '/etc/glance/glance-registry-paste.ini': - {'group': 'glance', 'mode': '640'}, - '/etc/glance/glance-registry.conf': {'group': 'glance', 'mode': '640'}, - '/etc/glance/glance-scrubber.conf': {'group': 'glance', 'mode': '640'}, - '/etc/glance/glance-swift-store.conf': - {'group': 'glance', 'mode': '640'}, - '/etc/glance/policy.json': {'group': 'glance', 'mode': '640'}, - '/etc/glance/schema-image.json': {'group': 'glance', 'mode': '640'}, - '/etc/glance/schema.json': {'group': 'glance', 'mode': '640'}, + '/etc/glance': {'group': 'glance', 'mode': '750'}, }, 'keystone': { - # From security guide - '/etc/keystone/keystone.conf': {'group': 'keystone', 'mode': '640'}, - '/etc/keystone/keystone-paste.ini': - {'group': 'keystone', 'mode': '640'}, - '/etc/keystone/policy.json': {'group': 'keystone', 'mode': '640'}, - '/etc/keystone/logging.conf': {'group': 'keystone', 'mode': '640'}, - '/etc/keystone/ssl/certs/signing_cert.pem': - {'group': 'keystone', 'mode': '640'}, - '/etc/keystone/ssl/private/signing_key.pem': - {'group': 'keystone', 'mode': '640'}, - '/etc/keystone/ssl/certs/ca.pem': {'group': 'keystone', 'mode': '640'}, + '/etc/keystone': + {'owner': 'keystone', 'group': 'keystone', 'mode': '750'}, }, 'manilla': { - # From security guide - '/etc/manila/manila.conf': {'group': 'manilla', 'mode': '640'}, - '/etc/manila/api-paste.ini': {'group': 'manilla', 'mode': '640'}, - '/etc/manila/policy.json': {'group': 'manilla', 'mode': '640'}, - '/etc/manila/rootwrap.conf': {'group': 'manilla', 'mode': '640'}, + '/etc/manila': {'group': 'manilla', 'mode': '750'}, }, 'neutron-gateway': { - '/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'}, - '/etc/neutron/rootwrap.conf': {'mode': '640'}, - '/etc/neutron/rootwrap.d': {'mode': '755'}, - '/etc/neutron/*': {'group': 'neutron', 'mode': '644'}, + '/etc/neutron': {'group': 'neutron', 'mode': '750'}, }, 'neutron-api': { - # From security guide - '/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'}, - '/etc/nova/api-paste.ini': {'group': 'neutron', 'mode': '640'}, - '/etc/neutron/rootwrap.conf': {'group': 'neutron', 'mode': '640'}, - # Additional validations - '/etc/neutron/rootwrap.d': {'mode': '755'}, - '/etc/neutron/neutron_lbaas.conf': {'mode': '644'}, - '/etc/neutron/neutron_vpnaas.conf': {'mode': '644'}, - '/etc/neutron/*': {'group': 'neutron', 'mode': '644'}, + '/etc/neutron/': {'group': 'neutron', 'mode': '750'}, }, 'nova-cloud-controller': { - # From security guide - '/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'}, - '/etc/nova/nova.conf': {'group': 'nova', 'mode': '750'}, - '/etc/nova/*': {'group': 'nova', 'mode': '640'}, - # Additional validations - '/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'}, + '/etc/nova': {'group': 'nova', 'mode': '750'}, }, 'nova-compute': { - # From security guide - '/etc/nova/nova.conf': {'group': 'nova', 'mode': '640'}, - '/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'}, - '/etc/nova/rootwrap.conf': {'group': 'nova', 'mode': '640'}, - # Additional Validations - '/etc/nova/nova-compute.conf': {'group': 'nova', 'mode': '640'}, - '/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'}, - '/etc/nova/nm.conf': {'mode': '644'}, - '/etc/nova/*': {'group': 'nova', 'mode': '640'}, + '/etc/nova/': {'group': 'nova', 'mode': '750'}, }, 'openstack-dashboard': { # From security guide @@ -178,7 +131,7 @@ def _config_ini(path): return dict(conf) -def _validate_file_ownership(owner, group, file_name): +def _validate_file_ownership(owner, group, file_name, optional=False): """ Validate that a specified file is owned by `owner:group`. @@ -188,12 +141,16 @@ def _validate_file_ownership(owner, group, file_name): :type group: str :param file_name: Path to the file to verify :type file_name: str + :param optional: Is this file optional, + ie: Should this test fail when it's missing + :type optional: bool """ try: ownership = _stat(file_name) except subprocess.CalledProcessError as e: print("Error reading file: {}".format(e)) - assert False, "Specified file does not exist: {}".format(file_name) + if not optional: + assert False, "Specified file does not exist: {}".format(file_name) assert owner == ownership.owner, \ "{} has an incorrect owner: {} should be {}".format( file_name, ownership.owner, owner) @@ -203,7 +160,7 @@ def _validate_file_ownership(owner, group, file_name): print("Validate ownership of {}: PASS".format(file_name)) -def _validate_file_mode(mode, file_name): +def _validate_file_mode(mode, file_name, optional=False): """ Validate that a specified file has the specified permissions. @@ -211,12 +168,16 @@ def _validate_file_mode(mode, file_name): :type owner: str :param file_name: Path to the file to verify :type file_name: str + :param optional: Is this file optional, + ie: Should this test fail when it's missing + :type optional: bool """ try: ownership = _stat(file_name) except subprocess.CalledProcessError as e: print("Error reading file: {}".format(e)) - assert False, "Specified file does not exist: {}".format(file_name) + if not optional: + assert False, "Specified file does not exist: {}".format(file_name) assert mode == ownership.mode, \ "{} has an incorrect mode: {} should be {}".format( file_name, ownership.mode, mode) @@ -243,14 +204,15 @@ def validate_file_ownership(config): "Invalid ownership configuration: {}".format(key)) owner = options.get('owner', config.get('owner', 'root')) group = options.get('group', config.get('group', 'root')) + optional = options.get('optional', config.get('optional', 'False')) if '*' in file_name: for file in glob.glob(file_name): if file not in files.keys(): if os.path.isfile(file): - _validate_file_ownership(owner, group, file) + _validate_file_ownership(owner, group, file, optional) else: if os.path.isfile(file_name): - _validate_file_ownership(owner, group, file_name) + _validate_file_ownership(owner, group, file_name, optional) @audit(is_audit_type(AuditType.OpenStackSecurityGuide), @@ -264,14 +226,15 @@ def validate_file_permissions(config): raise RuntimeError( "Invalid ownership configuration: {}".format(key)) mode = options.get('mode', config.get('permissions', '600')) + optional = options.get('optional', config.get('optional', 'False')) if '*' in file_name: for file in glob.glob(file_name): if file not in files.keys(): if os.path.isfile(file): - _validate_file_mode(mode, file) + _validate_file_mode(mode, file, optional) else: if os.path.isfile(file_name): - _validate_file_mode(mode, file_name) + _validate_file_mode(mode, file_name, optional) @audit(is_audit_type(AuditType.OpenStackSecurityGuide)) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py index 3a3c6de7..47b8603a 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -180,13 +180,17 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None): os.symlink(hostname_key, custom_key) -def install_certs(ssl_dir, certs, chain=None): +def install_certs(ssl_dir, certs, chain=None, user='root', group='root'): """Install the certs passed into the ssl dir and append the chain if provided. :param ssl_dir: str Directory to create symlinks in :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}} :param chain: str Chain to be appended to certs + :param user: (Optional) Owner of certificate files. Defaults to 'root' + :type user: str + :param group: (Optional) Group of certificate files. Defaults to 'root' + :type group: str """ for cn, bundle in certs.items(): cert_filename = 'cert_{}'.format(cn) @@ -197,21 +201,25 @@ def install_certs(ssl_dir, certs, chain=None): # trust certs signed by an intermediate in the chain cert_data = cert_data + os.linesep + chain write_file( - path=os.path.join(ssl_dir, cert_filename), + path=os.path.join(ssl_dir, cert_filename), owner=user, group=group, content=cert_data, perms=0o640) write_file( - path=os.path.join(ssl_dir, key_filename), + path=os.path.join(ssl_dir, key_filename), owner=user, group=group, content=bundle['key'], perms=0o640) def process_certificates(service_name, relation_id, unit, - custom_hostname_link=None): + custom_hostname_link=None, user='root', group='root'): """Process the certificates supplied down the relation :param service_name: str Name of service the certifcates are for. :param relation_id: str Relation id providing the certs :param unit: str Unit providing the certs :param custom_hostname_link: str Name of custom link to create + :param user: (Optional) Owner of certificate files. Defaults to 'root' + :type user: str + :param group: (Optional) Group of certificate files. Defaults to 'root' + :type group: str """ data = relation_get(rid=relation_id, unit=unit) ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) @@ -223,7 +231,7 @@ def process_certificates(service_name, relation_id, unit, if certs: certs = json.loads(certs) install_ca_cert(ca.encode()) - install_certs(ssl_dir, certs, chain) + install_certs(ssl_dir, certs, chain, user=user, group=group) create_ip_cert_links( ssl_dir, custom_hostname_link=custom_hostname_link) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index fc634cc6..d5133713 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -792,6 +792,7 @@ class ApacheSSLContext(OSContextGenerator): # and service namespace accordingly. external_ports = [] service_namespace = None + user = group = 'root' def enable_modules(self): cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers'] @@ -810,9 +811,11 @@ def configure_cert(self, cn=None): key_filename = 'key' write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert), perms=0o640) + content=b64decode(cert), owner=self.user, + group=self.group, perms=0o640) write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key), perms=0o640) + content=b64decode(key), owner=self.user, + group=self.group, perms=0o640) def configure_ca(self): ca_cert = get_ca_cert() @@ -1932,3 +1935,30 @@ def __call__(self): return { 'openstack_release': ostack, 'operating_system_release': osystem} + + +class LogrotateContext(OSContextGenerator): + """Common context generator for logrotate.""" + + def __init__(self, location, interval, count): + """ + :param location: Absolute path for the logrotate config file + :type location: str + :param interval: The interval for the rotations. Valid values are + 'daily', 'weekly', 'monthly', 'yearly' + :type interval: str + :param count: The logrotate count option configures the 'count' times + the log files are being rotated before being + :type count: int + """ + self.location = location + self.interval = interval + self.count = 'rotate {}'.format(count) + + def __call__(self): + ctxt = { + 'logrotate_logs_location': self.location, + 'logrotate_interval': self.interval, + 'logrotate_count': self.count, + } + return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py index df83b91b..723aebc1 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py @@ -159,7 +159,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True): if is_address_in_network(bound_cidr, vip): resolved_address = vip break - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): # If no net-splits configured and no support for extra # bindings/network spaces so we expect a single vip resolved_address = vips[0] diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 86b011b7..e5e25369 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -194,7 +194,7 @@ ('rocky', ['2.18.0', '2.19.0']), ('stein', - ['2.19.0']), + ['2.20.0']), ]) # >= Liberty version->codename mapping @@ -656,7 +656,7 @@ def openstack_upgrade_available(package): else: avail_vers = get_os_version_install_source(src) apt.init() - return apt.version_compare(avail_vers, cur_vers) == 1 + return apt.version_compare(avail_vers, cur_vers) >= 1 def ensure_block_device(block_device): diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 22aa978b..2c62092c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -186,7 +186,7 @@ def remove_cache_tier(self, cache_pool): elif mode == 'writeback': pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'] - if cmp_pkgrevno('ceph', '10.1') >= 0: + if cmp_pkgrevno('ceph-common', '10.1') >= 0: # Jewel added a mandatory flag pool_forward_cmd.append('--yes-i-really-mean-it') @@ -582,21 +582,24 @@ def remove_pool_snapshot(service, pool_name, snapshot_name): raise -# max_bytes should be an int or long -def set_pool_quota(service, pool_name, max_bytes): +def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): """ - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param max_bytes: int or long - :return: None. Can raise CalledProcessError + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: Name of pool + :type pool_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + :raises: subprocess.CalledProcessError """ - # Set a byte quota on a RADOS pool in ceph. - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, - 'max_bytes', str(max_bytes)] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name] + if max_bytes: + cmd = cmd + ['max_bytes', str(max_bytes)] + if max_objects: + cmd = cmd + ['max_objects', str(max_objects)] + check_call(cmd) def remove_pool_quota(service, pool_name): @@ -661,7 +664,7 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' if locality is not None and durability_estimator is not None: raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") - luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0 + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 # failure_domain changed in luminous if luminous_or_later: cmd.append('crush-failure-domain=' + failure_domain) @@ -766,7 +769,7 @@ def get_osds(service, device_class=None): :param device_class: Class of storage device for OSD's :type device_class: str """ - luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0 + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 if luminous_or_later and device_class: out = check_output(['ceph', '--id', service, 'osd', 'crush', 'class', @@ -832,7 +835,7 @@ def set_app_name_for_pool(client, pool, name): :raises: CalledProcessError if ceph call fails """ - if cmp_pkgrevno('ceph', '12.0.0') >= 0: + if cmp_pkgrevno('ceph-common', '12.0.0') >= 0: cmd = ['ceph', '--id', client, 'osd', 'pool', 'application', 'enable', pool, name] check_call(cmd) @@ -1153,19 +1156,46 @@ def add_op_request_access_to_group(self, name, namespace=None, def add_op_create_pool(self, name, replica_count=3, pg_num=None, weight=None, group=None, namespace=None, - app_name=None): - """Adds an operation to create a pool. - - @param pg_num setting: optional setting. If not provided, this value - will be calculated by the broker based on how many OSDs are in the - cluster at the time of creation. Note that, if provided, this value - will be capped at the current available maximum. - @param weight: the percentage of data the pool makes up + app_name=None, max_bytes=None, max_objects=None): + """DEPRECATED: Use ``add_op_create_replicated_pool()`` or + ``add_op_create_erasure_pool()`` instead. + """ + return self.add_op_create_replicated_pool( + name, replica_count=replica_count, pg_num=pg_num, weight=weight, + group=group, namespace=namespace, app_name=app_name, + max_bytes=max_bytes, max_objects=max_objects) + + def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, + weight=None, group=None, namespace=None, + app_name=None, max_bytes=None, + max_objects=None): + """Adds an operation to create a replicated pool. + + :param name: Name of pool to create + :type name: str + :param replica_count: Number of copies Ceph should keep of your data. + :type replica_count: int + :param pg_num: Request specific number of Placement Groups to create + for pool. + :type pg_num: int + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + Used to calculate number of Placement Groups to create + for pool. + :type weight: float + :param group: Group to add pool to + :type group: str + :param namespace: Group namespace + :type namespace: str :param app_name: (Optional) Tag pool with application name. Note that there is certain protocols emerging upstream with regard to meaningful application names to use. Examples are ``rbd`` and ``rgw``. :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int """ if pg_num and weight: raise ValueError('pg_num and weight are mutually exclusive') @@ -1173,7 +1203,41 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count, 'pg_num': pg_num, 'weight': weight, 'group': group, - 'group-namespace': namespace, 'app-name': app_name}) + 'group-namespace': namespace, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) + + def add_op_create_erasure_pool(self, name, erasure_profile=None, + weight=None, group=None, app_name=None, + max_bytes=None, max_objects=None): + """Adds an operation to create a erasure coded pool. + + :param name: Name of pool to create + :type name: str + :param erasure_profile: Name of erasure code profile to use. If not + set the ceph-mon unit handling the broker + request will set its default value. + :type erasure_profile: str + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + :type weight: float + :param group: Group to add pool to + :type group: str + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + """ + self.ops.append({'op': 'create-pool', 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'weight': weight, + 'group': group, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index 6f846b05..c57aaf35 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -17,12 +17,53 @@ from stat import S_ISBLK from subprocess import ( + CalledProcessError, check_call, check_output, call ) +def _luks_uuid(dev): + """ + Check to see if dev is a LUKS encrypted volume, returning the UUID + of volume if it is. + + :param: dev: path to block device to check. + :returns: str. UUID of LUKS device or None if not a LUKS device + """ + try: + cmd = ['cryptsetup', 'luksUUID', dev] + return check_output(cmd).decode('UTF-8').strip() + except CalledProcessError: + return None + + +def is_luks_device(dev): + """ + Determine if dev is a LUKS-formatted block device. + + :param: dev: A full path to a block device to check for LUKS header + presence + :returns: boolean: indicates whether a device is used based on LUKS header. + """ + return True if _luks_uuid(dev) else False + + +def is_mapped_luks_device(dev): + """ + Determine if dev is a mapped LUKS device + :param: dev: A full path to a block device to be checked + :returns: boolean: indicates whether a device is mapped + """ + _, dirs, _ = next(os.walk( + '/sys/class/block/{}/holders/' + .format(os.path.basename(os.path.realpath(dev)))) + ) + is_held = len(dirs) > 0 + return is_held and is_luks_device(dev) + + def is_block_device(path): ''' Confirm device at path is a valid block device node. diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 47c1fc35..32754ff9 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -47,6 +47,7 @@ cmp_pkgrevno, CompareHostReleases, get_distrib_codename, + arch ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( # NOQA:F401 diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index d7e920eb..a3162fac 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,6 @@ import subprocess +from charmhelpers.core.hookenv import cached from charmhelpers.core.strutils import BasicStringComparator @@ -97,3 +98,16 @@ def cmp_pkgrevno(package, revno, pkgcache=None): pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@cached +def arch(): + """Return the package architecture as a string. + + :returns: the architecture + :rtype: str + :raises: subprocess.CalledProcessError if dpkg command fails + """ + return subprocess.check_output( + ['dpkg', '--print-architecture'] + ).rstrip().decode('UTF-8') diff --git a/ceph-osd/hooks/charmhelpers/core/sysctl.py b/ceph-osd/hooks/charmhelpers/core/sysctl.py index 1f188d8c..f1f4a28f 100644 --- a/ceph-osd/hooks/charmhelpers/core/sysctl.py +++ b/ceph-osd/hooks/charmhelpers/core/sysctl.py @@ -28,7 +28,7 @@ __author__ = 'Jorge Niedbalski R. ' -def create(sysctl_dict, sysctl_file): +def create(sysctl_dict, sysctl_file, ignore=False): """Creates a sysctl.conf file from a YAML associative array :param sysctl_dict: a dict or YAML-formatted string of sysctl @@ -36,6 +36,8 @@ def create(sysctl_dict, sysctl_file): :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode + :param ignore: If True, ignore "unknown variable" errors. + :type ignore: bool :returns: None """ if type(sysctl_dict) is not dict: @@ -52,7 +54,12 @@ def create(sysctl_dict, sysctl_file): for key, value in sysctl_dict_parsed.items(): fd.write("{}={}\n".format(key, value)) - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), + log("Updating sysctl_file: {} values: {}".format(sysctl_file, + sysctl_dict_parsed), level=DEBUG) - check_call(["sysctl", "-p", sysctl_file]) + call = ["sysctl", "-p", sysctl_file] + if ignore: + call.append("-e") + + check_call(call) diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 2394caf3..c6d9341e 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -20,10 +20,8 @@ import time import subprocess -from charmhelpers.core.host import ( - get_distrib_codename, - CompareHostReleases, -) +from charmhelpers.core.host import get_distrib_codename + from charmhelpers.core.hookenv import ( log, DEBUG, @@ -362,14 +360,8 @@ def _get_keyid_by_gpg_key(key_material): :returns: A GPG key fingerprint :rtype: str """ - # trusty, xenial and bionic handling differs due to gpg 1.x to 2.x change - release = get_distrib_codename() - is_gpgv2_distro = CompareHostReleases(release) >= "bionic" - if is_gpgv2_distro: - # --import is mandatory, otherwise fingerprint is not printed - cmd = 'gpg --with-colons --import-options show-only --import --dry-run' - else: - cmd = 'gpg --with-colons --with-fingerprint' + # Use the same gpg command for both Xenial and Bionic + cmd = 'gpg --with-colons --with-fingerprint' ps = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, From 0bd972a3e150c557120c05e126b30b22e9cb5cf8 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 4 Apr 2019 10:12:21 +0200 Subject: [PATCH 1727/2699] Sync charm-helpers for Stein release As a part of the Stein release, we need to ensure that charmhelpers is up to date. Change-Id: I53f388a0371b09bacd10122351f060a5a131947b --- .../contrib/storage/linux/utils.py | 41 +++++++++++++++++++ ceph-proxy/charmhelpers/core/host.py | 1 + .../charmhelpers/core/host_factory/ubuntu.py | 14 +++++++ ceph-proxy/charmhelpers/core/sysctl.py | 13 ++++-- 4 files changed, 66 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/charmhelpers/contrib/storage/linux/utils.py index 6f846b05..c57aaf35 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/utils.py @@ -17,12 +17,53 @@ from stat import S_ISBLK from subprocess import ( + CalledProcessError, check_call, check_output, call ) +def _luks_uuid(dev): + """ + Check to see if dev is a LUKS encrypted volume, returning the UUID + of volume if it is. + + :param: dev: path to block device to check. + :returns: str. UUID of LUKS device or None if not a LUKS device + """ + try: + cmd = ['cryptsetup', 'luksUUID', dev] + return check_output(cmd).decode('UTF-8').strip() + except CalledProcessError: + return None + + +def is_luks_device(dev): + """ + Determine if dev is a LUKS-formatted block device. + + :param: dev: A full path to a block device to check for LUKS header + presence + :returns: boolean: indicates whether a device is used based on LUKS header. + """ + return True if _luks_uuid(dev) else False + + +def is_mapped_luks_device(dev): + """ + Determine if dev is a mapped LUKS device + :param: dev: A full path to a block device to be checked + :returns: boolean: indicates whether a device is mapped + """ + _, dirs, _ = next(os.walk( + '/sys/class/block/{}/holders/' + .format(os.path.basename(os.path.realpath(dev)))) + ) + is_held = len(dirs) > 0 + return is_held and is_luks_device(dev) + + def is_block_device(path): ''' Confirm device at path is a valid block device node. diff --git a/ceph-proxy/charmhelpers/core/host.py b/ceph-proxy/charmhelpers/core/host.py index 47c1fc35..32754ff9 100644 --- a/ceph-proxy/charmhelpers/core/host.py +++ b/ceph-proxy/charmhelpers/core/host.py @@ -47,6 +47,7 @@ cmp_pkgrevno, CompareHostReleases, get_distrib_codename, + arch ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( # NOQA:F401 diff --git a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py index d7e920eb..a3162fac 100644 --- a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,6 @@ import subprocess +from charmhelpers.core.hookenv import cached from charmhelpers.core.strutils import BasicStringComparator @@ -97,3 +98,16 @@ def cmp_pkgrevno(package, revno, pkgcache=None): pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@cached +def arch(): + """Return the package architecture as a string. + + :returns: the architecture + :rtype: str + :raises: subprocess.CalledProcessError if dpkg command fails + """ + return subprocess.check_output( + ['dpkg', '--print-architecture'] + ).rstrip().decode('UTF-8') diff --git a/ceph-proxy/charmhelpers/core/sysctl.py b/ceph-proxy/charmhelpers/core/sysctl.py index 1f188d8c..f1f4a28f 100644 --- a/ceph-proxy/charmhelpers/core/sysctl.py +++ b/ceph-proxy/charmhelpers/core/sysctl.py @@ -28,7 +28,7 @@ __author__ = 'Jorge Niedbalski R. ' -def create(sysctl_dict, sysctl_file): +def create(sysctl_dict, sysctl_file, ignore=False): """Creates a sysctl.conf file from a YAML associative array :param sysctl_dict: a dict or YAML-formatted string of sysctl @@ -36,6 +36,8 @@ def create(sysctl_dict, sysctl_file): :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode + :param ignore: If True, ignore "unknown variable" errors. + :type ignore: bool :returns: None """ if type(sysctl_dict) is not dict: @@ -52,7 +54,12 @@ def create(sysctl_dict, sysctl_file): for key, value in sysctl_dict_parsed.items(): fd.write("{}={}\n".format(key, value)) - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), + log("Updating sysctl_file: {} values: {}".format(sysctl_file, + sysctl_dict_parsed), level=DEBUG) - check_call(["sysctl", "-p", sysctl_file]) + call = ["sysctl", "-p", sysctl_file] + if ignore: + call.append("-e") + + check_call(call) From d457598a574a30c73a54c6359892bffae3349fb3 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 4 Apr 2019 10:12:36 +0200 Subject: [PATCH 1728/2699] Sync charm-helpers for Stein release As a part of the Stein release, we need to ensure that charmhelpers is up to date. Change-Id: Ie14247e8021c31322cf4581cae3e0a55c0319550 --- .../hooks/charmhelpers/cli/unitdata.py | 9 + .../contrib/openstack/audits/__init__.py | 212 ++++++++++++++ .../audits/openstack_security_guide.py | 266 ++++++++++++++++++ .../contrib/openstack/cert_utils.py | 18 +- .../charmhelpers/contrib/openstack/context.py | 37 ++- .../charmhelpers/contrib/openstack/ip.py | 5 +- .../contrib/openstack/templates/logrotate | 9 + .../templates/section-oslo-messaging-rabbit | 10 + .../charmhelpers/contrib/openstack/utils.py | 4 +- .../contrib/storage/linux/ceph.py | 116 ++++++-- .../contrib/storage/linux/utils.py | 41 +++ .../hooks/charmhelpers/core/hookenv.py | 74 +++++ ceph-radosgw/hooks/charmhelpers/core/host.py | 1 + .../charmhelpers/core/host_factory/ubuntu.py | 14 + .../hooks/charmhelpers/core/sysctl.py | 13 +- .../hooks/charmhelpers/fetch/ubuntu.py | 183 +++++++++--- 16 files changed, 932 insertions(+), 80 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/logrotate create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit diff --git a/ceph-radosgw/hooks/charmhelpers/cli/unitdata.py b/ceph-radosgw/hooks/charmhelpers/cli/unitdata.py index c5728582..acce846f 100644 --- a/ceph-radosgw/hooks/charmhelpers/cli/unitdata.py +++ b/ceph-radosgw/hooks/charmhelpers/cli/unitdata.py @@ -19,9 +19,16 @@ @cmdline.subcommand_builder('unitdata', description="Store and retrieve data") def unitdata_cmd(subparser): nested = subparser.add_subparsers() + get_cmd = nested.add_parser('get', help='Retrieve data') get_cmd.add_argument('key', help='Key to retrieve the value of') get_cmd.set_defaults(action='get', value=None) + + getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data') + getrange_cmd.add_argument('key', metavar='prefix', + help='Prefix of the keys to retrieve') + getrange_cmd.set_defaults(action='getrange', value=None) + set_cmd = nested.add_parser('set', help='Store data') set_cmd.add_argument('key', help='Key to set') set_cmd.add_argument('value', help='Value to store') @@ -30,6 +37,8 @@ def unitdata_cmd(subparser): def _unitdata_cmd(action, key, value): if action == 'get': return unitdata.kv().get(key) + elif action == 'getrange': + return unitdata.kv().getrange(key) elif action == 'set': unitdata.kv().set(key, value) unitdata.kv().flush() diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/__init__.py new file mode 100644 index 00000000..7f7e5f79 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/__init__.py @@ -0,0 +1,212 @@ +# Copyright 2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OpenStack Security Audit code""" + +import collections +from enum import Enum +import traceback + +from charmhelpers.core.host import cmp_pkgrevno +import charmhelpers.contrib.openstack.utils as openstack_utils +import charmhelpers.core.hookenv as hookenv + + +class AuditType(Enum): + OpenStackSecurityGuide = 1 + + +_audits = {} + +Audit = collections.namedtuple('Audit', 'func filters') + + +def audit(*args): + """Decorator to register an audit. + + These are used to generate audits that can be run on a + deployed system that matches the given configuration + + :param args: List of functions to filter tests against + :type args: List[Callable[Dict]] + """ + def wrapper(f): + test_name = f.__name__ + if _audits.get(test_name): + raise RuntimeError( + "Test name '{}' used more than once" + .format(test_name)) + non_callables = [fn for fn in args if not callable(fn)] + if non_callables: + raise RuntimeError( + "Configuration includes non-callable filters: {}" + .format(non_callables)) + _audits[test_name] = Audit(func=f, filters=args) + return f + return wrapper + + +def is_audit_type(*args): + """This audit is included in the specified kinds of audits. + + :param *args: List of AuditTypes to include this audit in + :type args: List[AuditType] + :rtype: Callable[Dict] + """ + def _is_audit_type(audit_options): + if audit_options.get('audit_type') in args: + return True + else: + return False + return _is_audit_type + + +def since_package(pkg, pkg_version): + """This audit should be run after the specified package version (incl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The package version + :type release: str + :rtype: Callable[Dict] + """ + def _since_package(audit_options=None): + return cmp_pkgrevno(pkg, pkg_version) >= 0 + + return _since_package + + +def before_package(pkg, pkg_version): + """This audit should be run before the specified package version (excl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The package version + :type release: str + :rtype: Callable[Dict] + """ + def _before_package(audit_options=None): + return not since_package(pkg, pkg_version)() + + return _before_package + + +def since_openstack_release(pkg, release): + """This audit should run after the specified OpenStack version (incl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The OpenStack release codename + :type release: str + :rtype: Callable[Dict] + """ + def _since_openstack_release(audit_options=None): + _release = openstack_utils.get_os_codename_package(pkg) + return openstack_utils.CompareOpenStackReleases(_release) >= release + + return _since_openstack_release + + +def before_openstack_release(pkg, release): + """This audit should run before the specified OpenStack version (excl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The OpenStack release codename + :type release: str + :rtype: Callable[Dict] + """ + def _before_openstack_release(audit_options=None): + return not since_openstack_release(pkg, release)() + + return _before_openstack_release + + +def it_has_config(config_key): + """This audit should be run based on specified config keys. + + :param config_key: Config key to look for + :type config_key: str + :rtype: Callable[Dict] + """ + def _it_has_config(audit_options): + return audit_options.get(config_key) is not None + + return _it_has_config + + +def run(audit_options): + """Run the configured audits with the specified audit_options. + + :param audit_options: Configuration for the audit + :type audit_options: Config + + :rtype: Dict[str, str] + """ + errors = {} + results = {} + for name, audit in sorted(_audits.items()): + result_name = name.replace('_', '-') + if result_name in audit_options.get('excludes', []): + print( + "Skipping {} because it is" + "excluded in audit config" + .format(result_name)) + continue + if all(p(audit_options) for p in audit.filters): + try: + audit.func(audit_options) + print("{}: PASS".format(name)) + results[result_name] = { + 'success': True, + } + except AssertionError as e: + print("{}: FAIL ({})".format(name, e)) + results[result_name] = { + 'success': False, + 'message': e, + } + except Exception as e: + print("{}: ERROR ({})".format(name, e)) + errors[name] = e + results[result_name] = { + 'success': False, + 'message': e, + } + for name, error in errors.items(): + print("=" * 20) + print("Error in {}: ".format(name)) + traceback.print_tb(error.__traceback__) + print() + return results + + +def action_parse_results(result): + """Parse the result of `run` in the context of an action. + + :param result: The result of running the security-checklist + action on a unit + :type result: Dict[str, Dict[str, str]] + :rtype: int + """ + passed = True + for test, result in result.items(): + if result['success']: + hookenv.action_set({test: 'PASS'}) + else: + hookenv.action_set({test: 'FAIL - {}'.format(result['message'])}) + passed = False + if not passed: + hookenv.action_fail("One or more tests failed") + return 0 if passed else 1 diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py new file mode 100644 index 00000000..e5b7ac1e --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -0,0 +1,266 @@ +# Copyright 2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import configparser +import glob +import os.path +import subprocess + +from charmhelpers.contrib.openstack.audits import ( + audit, + AuditType, + # filters + is_audit_type, + it_has_config, +) + +from charmhelpers.core.hookenv import ( + cached, +) + +""" +The Security Guide suggests a specific list of files inside the +config directory for the service having 640 specifically, but +by ensuring the containing directory is 750, only the owner can +write, and only the group can read files within the directory. + +By restricting access to the containing directory, we can more +effectively ensure that there is no accidental leakage if a new +file is added to the service without being added to the security +guide, and to this check. +""" +FILE_ASSERTIONS = { + 'barbican': { + '/etc/barbican': {'group': 'barbican', 'mode': '750'}, + }, + 'ceph-mon': { + '/var/lib/charm/ceph-mon/ceph.conf': + {'owner': 'root', 'group': 'root', 'mode': '644'}, + '/etc/ceph/ceph.client.admin.keyring': + {'owner': 'ceph', 'group': 'ceph'}, + '/etc/ceph/rbdmap': {'mode': '644'}, + '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, + '/var/lib/ceph/bootstrap-*/ceph.keyring': + {'owner': 'ceph', 'group': 'ceph', 'mode': '600'} + }, + 'ceph-osd': { + '/var/lib/charm/ceph-osd/ceph.conf': + {'owner': 'ceph', 'group': 'ceph', 'mode': '644'}, + '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, + '/var/lib/ceph/*': {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, + '/var/lib/ceph/bootstrap-*/ceph.keyring': + {'owner': 'ceph', 'group': 'ceph', 'mode': '600'}, + '/var/lib/ceph/radosgw': + {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, + }, + 'cinder': { + '/etc/cinder': {'group': 'cinder', 'mode': '750'}, + }, + 'glance': { + '/etc/glance': {'group': 'glance', 'mode': '750'}, + }, + 'keystone': { + '/etc/keystone': + {'owner': 'keystone', 'group': 'keystone', 'mode': '750'}, + }, + 'manilla': { + '/etc/manila': {'group': 'manilla', 'mode': '750'}, + }, + 'neutron-gateway': { + '/etc/neutron': {'group': 'neutron', 'mode': '750'}, + }, + 'neutron-api': { + '/etc/neutron/': {'group': 'neutron', 'mode': '750'}, + }, + 'nova-cloud-controller': { + '/etc/nova': {'group': 'nova', 'mode': '750'}, + }, + 'nova-compute': { + '/etc/nova/': {'group': 'nova', 'mode': '750'}, + }, + 'openstack-dashboard': { + # From security guide + '/etc/openstack-dashboard/local_settings.py': + {'group': 'horizon', 'mode': '640'}, + }, +} + +Ownership = collections.namedtuple('Ownership', 'owner group mode') + + +@cached +def _stat(file): + """ + Get the Ownership information from a file. + + :param file: The path to a file to stat + :type file: str + :returns: owner, group, and mode of the specified file + :rtype: Ownership + :raises subprocess.CalledProcessError: If the underlying stat fails + """ + out = subprocess.check_output( + ['stat', '-c', '%U %G %a', file]).decode('utf-8') + return Ownership(*out.strip().split(' ')) + + +@cached +def _config_ini(path): + """ + Parse an ini file + + :param path: The path to a file to parse + :type file: str + :returns: Configuration contained in path + :rtype: Dict + """ + conf = configparser.ConfigParser() + conf.read(path) + return dict(conf) + + +def _validate_file_ownership(owner, group, file_name, optional=False): + """ + Validate that a specified file is owned by `owner:group`. + + :param owner: Name of the owner + :type owner: str + :param group: Name of the group + :type group: str + :param file_name: Path to the file to verify + :type file_name: str + :param optional: Is this file optional, + ie: Should this test fail when it's missing + :type optional: bool + """ + try: + ownership = _stat(file_name) + except subprocess.CalledProcessError as e: + print("Error reading file: {}".format(e)) + if not optional: + assert False, "Specified file does not exist: {}".format(file_name) + assert owner == ownership.owner, \ + "{} has an incorrect owner: {} should be {}".format( + file_name, ownership.owner, owner) + assert group == ownership.group, \ + "{} has an incorrect group: {} should be {}".format( + file_name, ownership.group, group) + print("Validate ownership of {}: PASS".format(file_name)) + + +def _validate_file_mode(mode, file_name, optional=False): + """ + Validate that a specified file has the specified permissions. + + :param mode: file mode that is desires + :type owner: str + :param file_name: Path to the file to verify + :type file_name: str + :param optional: Is this file optional, + ie: Should this test fail when it's missing + :type optional: bool + """ + try: + ownership = _stat(file_name) + except subprocess.CalledProcessError as e: + print("Error reading file: {}".format(e)) + if not optional: + assert False, "Specified file does not exist: {}".format(file_name) + assert mode == ownership.mode, \ + "{} has an incorrect mode: {} should be {}".format( + file_name, ownership.mode, mode) + print("Validate mode of {}: PASS".format(file_name)) + + +@cached +def _config_section(config, section): + """Read the configuration file and return a section.""" + path = os.path.join(config.get('config_path'), config.get('config_file')) + conf = _config_ini(path) + return conf.get(section) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide), + it_has_config('files')) +def validate_file_ownership(config): + """Verify that configuration files are owned by the correct user/group.""" + files = config.get('files', {}) + for file_name, options in files.items(): + for key in options.keys(): + if key not in ["owner", "group", "mode"]: + raise RuntimeError( + "Invalid ownership configuration: {}".format(key)) + owner = options.get('owner', config.get('owner', 'root')) + group = options.get('group', config.get('group', 'root')) + optional = options.get('optional', config.get('optional', 'False')) + if '*' in file_name: + for file in glob.glob(file_name): + if file not in files.keys(): + if os.path.isfile(file): + _validate_file_ownership(owner, group, file, optional) + else: + if os.path.isfile(file_name): + _validate_file_ownership(owner, group, file_name, optional) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide), + it_has_config('files')) +def validate_file_permissions(config): + """Verify that permissions on configuration files are secure enough.""" + files = config.get('files', {}) + for file_name, options in files.items(): + for key in options.keys(): + if key not in ["owner", "group", "mode"]: + raise RuntimeError( + "Invalid ownership configuration: {}".format(key)) + mode = options.get('mode', config.get('permissions', '600')) + optional = options.get('optional', config.get('optional', 'False')) + if '*' in file_name: + for file in glob.glob(file_name): + if file not in files.keys(): + if os.path.isfile(file): + _validate_file_mode(mode, file, optional) + else: + if os.path.isfile(file_name): + _validate_file_mode(mode, file_name, optional) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_keystone(audit_options): + """Validate that the service uses Keystone for authentication.""" + section = _config_section(audit_options, 'DEFAULT') + assert section is not None, "Missing section 'DEFAULT'" + assert section.get('auth_strategy') == "keystone", \ + "Application is not using Keystone" + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_tls_for_keystone(audit_options): + """Verify that TLS is used to communicate with Keystone.""" + section = _config_section(audit_options, 'keystone_authtoken') + assert section is not None, "Missing section 'keystone_authtoken'" + assert not section.get('insecure') and \ + "https://" in section.get("auth_uri"), \ + "TLS is not used for Keystone" + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_tls_for_glance(audit_options): + """Verify that TLS is used to communicate with Glance.""" + section = _config_section(audit_options, 'glance') + assert section is not None, "Missing section 'glance'" + assert not section.get('insecure') and \ + "https://" in section.get("api_servers"), \ + "TLS is not used for Glance" diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py index 3a3c6de7..47b8603a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -180,13 +180,17 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None): os.symlink(hostname_key, custom_key) -def install_certs(ssl_dir, certs, chain=None): +def install_certs(ssl_dir, certs, chain=None, user='root', group='root'): """Install the certs passed into the ssl dir and append the chain if provided. :param ssl_dir: str Directory to create symlinks in :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}} :param chain: str Chain to be appended to certs + :param user: (Optional) Owner of certificate files. Defaults to 'root' + :type user: str + :param group: (Optional) Group of certificate files. Defaults to 'root' + :type group: str """ for cn, bundle in certs.items(): cert_filename = 'cert_{}'.format(cn) @@ -197,21 +201,25 @@ def install_certs(ssl_dir, certs, chain=None): # trust certs signed by an intermediate in the chain cert_data = cert_data + os.linesep + chain write_file( - path=os.path.join(ssl_dir, cert_filename), + path=os.path.join(ssl_dir, cert_filename), owner=user, group=group, content=cert_data, perms=0o640) write_file( - path=os.path.join(ssl_dir, key_filename), + path=os.path.join(ssl_dir, key_filename), owner=user, group=group, content=bundle['key'], perms=0o640) def process_certificates(service_name, relation_id, unit, - custom_hostname_link=None): + custom_hostname_link=None, user='root', group='root'): """Process the certificates supplied down the relation :param service_name: str Name of service the certifcates are for. :param relation_id: str Relation id providing the certs :param unit: str Unit providing the certs :param custom_hostname_link: str Name of custom link to create + :param user: (Optional) Owner of certificate files. Defaults to 'root' + :type user: str + :param group: (Optional) Group of certificate files. Defaults to 'root' + :type group: str """ data = relation_get(rid=relation_id, unit=unit) ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) @@ -223,7 +231,7 @@ def process_certificates(service_name, relation_id, unit, if certs: certs = json.loads(certs) install_ca_cert(ca.encode()) - install_certs(ssl_dir, certs, chain) + install_certs(ssl_dir, certs, chain, user=user, group=group) create_ip_cert_links( ssl_dir, custom_hostname_link=custom_hostname_link) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 78a339f6..d5133713 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -29,6 +29,7 @@ filter_installed_packages, ) from charmhelpers.core.hookenv import ( + NoNetworkBinding, config, is_relation_made, local_unit, @@ -791,6 +792,7 @@ class ApacheSSLContext(OSContextGenerator): # and service namespace accordingly. external_ports = [] service_namespace = None + user = group = 'root' def enable_modules(self): cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers'] @@ -809,9 +811,11 @@ def configure_cert(self, cn=None): key_filename = 'key' write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert), perms=0o640) + content=b64decode(cert), owner=self.user, + group=self.group, perms=0o640) write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key), perms=0o640) + content=b64decode(key), owner=self.user, + group=self.group, perms=0o640) def configure_ca(self): ca_cert = get_ca_cert() @@ -868,7 +872,7 @@ def get_network_addresses(self): addr = network_get_primary_address( ADDRESS_MAP[net_type]['binding'] ) - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): addr = fallback endpoint = resolve_address(net_type) @@ -1931,3 +1935,30 @@ def __call__(self): return { 'openstack_release': ostack, 'operating_system_release': osystem} + + +class LogrotateContext(OSContextGenerator): + """Common context generator for logrotate.""" + + def __init__(self, location, interval, count): + """ + :param location: Absolute path for the logrotate config file + :type location: str + :param interval: The interval for the rotations. Valid values are + 'daily', 'weekly', 'monthly', 'yearly' + :type interval: str + :param count: The logrotate count option configures the 'count' times + the log files are being rotated before being + :type count: int + """ + self.location = location + self.interval = interval + self.count = 'rotate {}'.format(count) + + def __call__(self): + ctxt = { + 'logrotate_logs_location': self.location, + 'logrotate_interval': self.interval, + 'logrotate_count': self.count, + } + return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index 73102af7..723aebc1 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -13,6 +13,7 @@ # limitations under the License. from charmhelpers.core.hookenv import ( + NoNetworkBinding, config, unit_get, service_name, @@ -158,7 +159,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True): if is_address_in_network(bound_cidr, vip): resolved_address = vip break - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): # If no net-splits configured and no support for extra # bindings/network spaces so we expect a single vip resolved_address = vips[0] @@ -175,7 +176,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True): # configuration is not in use try: resolved_address = network_get_primary_address(binding) - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): resolved_address = fallback_addr if resolved_address is None: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/logrotate b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/logrotate new file mode 100644 index 00000000..b2900d09 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/logrotate @@ -0,0 +1,9 @@ +/var/log/{{ logrotate_logs_location }}/*.log { + {{ logrotate_interval }} + {{ logrotate_count }} + compress + delaycompress + missingok + notifempty + copytruncate +} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit new file mode 100644 index 00000000..bed2216a --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit @@ -0,0 +1,10 @@ +[oslo_messaging_rabbit] +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +{% endif -%} +{% if rabbit_ssl_port -%} +ssl = True +{% endif -%} +{% if rabbit_ssl_ca -%} +ssl_ca_file = {{ rabbit_ssl_ca }} +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 86b011b7..e5e25369 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -194,7 +194,7 @@ ('rocky', ['2.18.0', '2.19.0']), ('stein', - ['2.19.0']), + ['2.20.0']), ]) # >= Liberty version->codename mapping @@ -656,7 +656,7 @@ def openstack_upgrade_available(package): else: avail_vers = get_os_version_install_source(src) apt.init() - return apt.version_compare(avail_vers, cur_vers) == 1 + return apt.version_compare(avail_vers, cur_vers) >= 1 def ensure_block_device(block_device): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 22aa978b..2c62092c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -186,7 +186,7 @@ def remove_cache_tier(self, cache_pool): elif mode == 'writeback': pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'] - if cmp_pkgrevno('ceph', '10.1') >= 0: + if cmp_pkgrevno('ceph-common', '10.1') >= 0: # Jewel added a mandatory flag pool_forward_cmd.append('--yes-i-really-mean-it') @@ -582,21 +582,24 @@ def remove_pool_snapshot(service, pool_name, snapshot_name): raise -# max_bytes should be an int or long -def set_pool_quota(service, pool_name, max_bytes): +def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): """ - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param max_bytes: int or long - :return: None. Can raise CalledProcessError + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: Name of pool + :type pool_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + :raises: subprocess.CalledProcessError """ - # Set a byte quota on a RADOS pool in ceph. - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, - 'max_bytes', str(max_bytes)] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name] + if max_bytes: + cmd = cmd + ['max_bytes', str(max_bytes)] + if max_objects: + cmd = cmd + ['max_objects', str(max_objects)] + check_call(cmd) def remove_pool_quota(service, pool_name): @@ -661,7 +664,7 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' if locality is not None and durability_estimator is not None: raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") - luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0 + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 # failure_domain changed in luminous if luminous_or_later: cmd.append('crush-failure-domain=' + failure_domain) @@ -766,7 +769,7 @@ def get_osds(service, device_class=None): :param device_class: Class of storage device for OSD's :type device_class: str """ - luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0 + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 if luminous_or_later and device_class: out = check_output(['ceph', '--id', service, 'osd', 'crush', 'class', @@ -832,7 +835,7 @@ def set_app_name_for_pool(client, pool, name): :raises: CalledProcessError if ceph call fails """ - if cmp_pkgrevno('ceph', '12.0.0') >= 0: + if cmp_pkgrevno('ceph-common', '12.0.0') >= 0: cmd = ['ceph', '--id', client, 'osd', 'pool', 'application', 'enable', pool, name] check_call(cmd) @@ -1153,19 +1156,46 @@ def add_op_request_access_to_group(self, name, namespace=None, def add_op_create_pool(self, name, replica_count=3, pg_num=None, weight=None, group=None, namespace=None, - app_name=None): - """Adds an operation to create a pool. - - @param pg_num setting: optional setting. If not provided, this value - will be calculated by the broker based on how many OSDs are in the - cluster at the time of creation. Note that, if provided, this value - will be capped at the current available maximum. - @param weight: the percentage of data the pool makes up + app_name=None, max_bytes=None, max_objects=None): + """DEPRECATED: Use ``add_op_create_replicated_pool()`` or + ``add_op_create_erasure_pool()`` instead. + """ + return self.add_op_create_replicated_pool( + name, replica_count=replica_count, pg_num=pg_num, weight=weight, + group=group, namespace=namespace, app_name=app_name, + max_bytes=max_bytes, max_objects=max_objects) + + def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, + weight=None, group=None, namespace=None, + app_name=None, max_bytes=None, + max_objects=None): + """Adds an operation to create a replicated pool. + + :param name: Name of pool to create + :type name: str + :param replica_count: Number of copies Ceph should keep of your data. + :type replica_count: int + :param pg_num: Request specific number of Placement Groups to create + for pool. + :type pg_num: int + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + Used to calculate number of Placement Groups to create + for pool. + :type weight: float + :param group: Group to add pool to + :type group: str + :param namespace: Group namespace + :type namespace: str :param app_name: (Optional) Tag pool with application name. Note that there is certain protocols emerging upstream with regard to meaningful application names to use. Examples are ``rbd`` and ``rgw``. :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int """ if pg_num and weight: raise ValueError('pg_num and weight are mutually exclusive') @@ -1173,7 +1203,41 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count, 'pg_num': pg_num, 'weight': weight, 'group': group, - 'group-namespace': namespace, 'app-name': app_name}) + 'group-namespace': namespace, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) + + def add_op_create_erasure_pool(self, name, erasure_profile=None, + weight=None, group=None, app_name=None, + max_bytes=None, max_objects=None): + """Adds an operation to create a erasure coded pool. + + :param name: Name of pool to create + :type name: str + :param erasure_profile: Name of erasure code profile to use. If not + set the ceph-mon unit handling the broker + request will set its default value. + :type erasure_profile: str + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + :type weight: float + :param group: Group to add pool to + :type group: str + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + """ + self.ops.append({'op': 'create-pool', 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'weight': weight, + 'group': group, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py index 6f846b05..c57aaf35 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -17,12 +17,53 @@ from stat import S_ISBLK from subprocess import ( + CalledProcessError, check_call, check_output, call ) +def _luks_uuid(dev): + """ + Check to see if dev is a LUKS encrypted volume, returning the UUID + of volume if it is. + + :param: dev: path to block device to check. + :returns: str. UUID of LUKS device or None if not a LUKS device + """ + try: + cmd = ['cryptsetup', 'luksUUID', dev] + return check_output(cmd).decode('UTF-8').strip() + except CalledProcessError: + return None + + +def is_luks_device(dev): + """ + Determine if dev is a LUKS-formatted block device. + + :param: dev: A full path to a block device to check for LUKS header + presence + :returns: boolean: indicates whether a device is used based on LUKS header. + """ + return True if _luks_uuid(dev) else False + + +def is_mapped_luks_device(dev): + """ + Determine if dev is a mapped LUKS device + :param: dev: A full path to a block device to be checked + :returns: boolean: indicates whether a device is mapped + """ + _, dirs, _ = next(os.walk( + '/sys/class/block/{}/holders/' + .format(os.path.basename(os.path.realpath(dev)))) + ) + is_held = len(dirs) > 0 + return is_held and is_luks_device(dev) + + def is_block_device(path): ''' Confirm device at path is a valid block device node. diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 2e287659..4744eb43 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -50,6 +50,11 @@ MARKER = object() SH_MAX_ARG = 131071 + +RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. ' + 'This may not be compatible with software you are ' + 'running in your shell.') + cache = {} @@ -1414,3 +1419,72 @@ def unit_doomed(unit=None): # I don't think 'dead' units ever show up in the goal-state, but # check anyway in addition to 'dying'. return units[unit]['status'] in ('dying', 'dead') + + +def env_proxy_settings(selected_settings=None): + """Get proxy settings from process environment variables. + + Get charm proxy settings from environment variables that correspond to + juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2, + see lp:1782236) in a format suitable for passing to an application that + reacts to proxy settings passed as environment variables. Some applications + support lowercase or uppercase notation (e.g. curl), some support only + lowercase (e.g. wget), there are also subjectively rare cases of only + uppercase notation support. no_proxy CIDR and wildcard support also varies + between runtimes and applications as there is no enforced standard. + + Some applications may connect to multiple destinations and expose config + options that would affect only proxy settings for a specific destination + these should be handled in charms in an application-specific manner. + + :param selected_settings: format only a subset of possible settings + :type selected_settings: list + :rtype: Option(None, dict[str, str]) + """ + SUPPORTED_SETTINGS = { + 'http': 'HTTP_PROXY', + 'https': 'HTTPS_PROXY', + 'no_proxy': 'NO_PROXY', + 'ftp': 'FTP_PROXY' + } + if selected_settings is None: + selected_settings = SUPPORTED_SETTINGS + + selected_vars = [v for k, v in SUPPORTED_SETTINGS.items() + if k in selected_settings] + proxy_settings = {} + for var in selected_vars: + var_val = os.getenv(var) + if var_val: + proxy_settings[var] = var_val + proxy_settings[var.lower()] = var_val + # Now handle juju-prefixed environment variables. The legacy vs new + # environment variable usage is mutually exclusive + charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var)) + if charm_var_val: + proxy_settings[var] = charm_var_val + proxy_settings[var.lower()] = charm_var_val + if 'no_proxy' in proxy_settings: + if _contains_range(proxy_settings['no_proxy']): + log(RANGE_WARNING, level=WARNING) + return proxy_settings if proxy_settings else None + + +def _contains_range(addresses): + """Check for cidr or wildcard domain in a string. + + Given a string comprising a comma seperated list of ip addresses + and domain names, determine whether the string contains IP ranges + or wildcard domains. + + :param addresses: comma seperated list of domains and ip addresses. + :type addresses: str + """ + return ( + # Test for cidr (e.g. 10.20.20.0/24) + "/" in addresses or + # Test for wildcard domains (*.foo.com or .foo.com) + "*" in addresses or + addresses.startswith(".") or + ",." in addresses or + " ." in addresses) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 47c1fc35..32754ff9 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -47,6 +47,7 @@ cmp_pkgrevno, CompareHostReleases, get_distrib_codename, + arch ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": from charmhelpers.core.host_factory.centos import ( # NOQA:F401 diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index d7e920eb..a3162fac 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,6 @@ import subprocess +from charmhelpers.core.hookenv import cached from charmhelpers.core.strutils import BasicStringComparator @@ -97,3 +98,16 @@ def cmp_pkgrevno(package, revno, pkgcache=None): pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@cached +def arch(): + """Return the package architecture as a string. + + :returns: the architecture + :rtype: str + :raises: subprocess.CalledProcessError if dpkg command fails + """ + return subprocess.check_output( + ['dpkg', '--print-architecture'] + ).rstrip().decode('UTF-8') diff --git a/ceph-radosgw/hooks/charmhelpers/core/sysctl.py b/ceph-radosgw/hooks/charmhelpers/core/sysctl.py index 1f188d8c..f1f4a28f 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/sysctl.py +++ b/ceph-radosgw/hooks/charmhelpers/core/sysctl.py @@ -28,7 +28,7 @@ __author__ = 'Jorge Niedbalski R. ' -def create(sysctl_dict, sysctl_file): +def create(sysctl_dict, sysctl_file, ignore=False): """Creates a sysctl.conf file from a YAML associative array :param sysctl_dict: a dict or YAML-formatted string of sysctl @@ -36,6 +36,8 @@ def create(sysctl_dict, sysctl_file): :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode + :param ignore: If True, ignore "unknown variable" errors. + :type ignore: bool :returns: None """ if type(sysctl_dict) is not dict: @@ -52,7 +54,12 @@ def create(sysctl_dict, sysctl_file): for key, value in sysctl_dict_parsed.items(): fd.write("{}={}\n".format(key, value)) - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), + log("Updating sysctl_file: {} values: {}".format(sysctl_file, + sysctl_dict_parsed), level=DEBUG) - check_call(["sysctl", "-p", sysctl_file]) + call = ["sysctl", "-p", sysctl_file] + if ignore: + call.append("-e") + + check_call(call) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 8a5cadf1..c6d9341e 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -19,15 +19,14 @@ import six import time import subprocess -from tempfile import NamedTemporaryFile -from charmhelpers.core.host import ( - lsb_release -) +from charmhelpers.core.host import get_distrib_codename + from charmhelpers.core.hookenv import ( log, DEBUG, WARNING, + env_proxy_settings, ) from charmhelpers.fetch import SourceConfigError, GPGKeyError @@ -303,12 +302,17 @@ def import_key(key): """Import an ASCII Armor key. A Radix64 format keyid is also supported for backwards - compatibility, but should never be used; the key retrieval - mechanism is insecure and subject to man-in-the-middle attacks - voiding all signature checks using that key. - - :param keyid: The key in ASCII armor format, - including BEGIN and END markers. + compatibility. In this case Ubuntu keyserver will be + queried for a key via HTTPS by its keyid. This method + is less preferrable because https proxy servers may + require traffic decryption which is equivalent to a + man-in-the-middle attack (a proxy server impersonates + keyserver TLS certificates and has to be explicitly + trusted by the system). + + :param key: A GPG key in ASCII armor format, + including BEGIN and END markers or a keyid. + :type key: (bytes, str) :raises: GPGKeyError if the key could not be imported """ key = key.strip() @@ -319,35 +323,131 @@ def import_key(key): log("PGP key found (looks like ASCII Armor format)", level=DEBUG) if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and '-----END PGP PUBLIC KEY BLOCK-----' in key): - log("Importing ASCII Armor PGP key", level=DEBUG) - with NamedTemporaryFile() as keyfile: - with open(keyfile.name, 'w') as fd: - fd.write(key) - fd.write("\n") - cmd = ['apt-key', 'add', keyfile.name] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error = "Error importing PGP key '{}'".format(key) - log(error) - raise GPGKeyError(error) + log("Writing provided PGP key in the binary format", level=DEBUG) + if six.PY3: + key_bytes = key.encode('utf-8') + else: + key_bytes = key + key_name = _get_keyid_by_gpg_key(key_bytes) + key_gpg = _dearmor_gpg_key(key_bytes) + _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) else: raise GPGKeyError("ASCII armor markers missing from GPG key") else: - # We should only send things obviously not a keyid offsite - # via this unsecured protocol, as it may be a secret or part - # of one. log("PGP key found (looks like Radix64 format)", level=WARNING) - log("INSECURLY importing PGP key from keyserver; " + log("SECURELY importing PGP key from keyserver; " "full key not provided.", level=WARNING) - cmd = ['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] - try: - _run_with_retries(cmd) - except subprocess.CalledProcessError: - error = "Error importing PGP key '{}'".format(key) - log(error) - raise GPGKeyError(error) + # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL + # to retrieve GPG keys. `apt-key adv` command is deprecated as is + # apt-key in general as noted in its manpage. See lp:1433761 for more + # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop + # gpg + key_asc = _get_key_by_keyid(key) + # write the key in GPG format so that apt-key list shows it + key_gpg = _dearmor_gpg_key(key_asc) + _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg) + + +def _get_keyid_by_gpg_key(key_material): + """Get a GPG key fingerprint by GPG key material. + Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded + or binary GPG key material. Can be used, for example, to generate file + names for keys passed via charm options. + + :param key_material: ASCII armor-encoded or binary GPG key material + :type key_material: bytes + :raises: GPGKeyError if invalid key material has been provided + :returns: A GPG key fingerprint + :rtype: str + """ + # Use the same gpg command for both Xenial and Bionic + cmd = 'gpg --with-colons --with-fingerprint' + ps = subprocess.Popen(cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_material) + if six.PY3: + out = out.decode('utf-8') + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material provided') + # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) + return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1) + + +def _get_key_by_keyid(keyid): + """Get a key via HTTPS from the Ubuntu keyserver. + Different key ID formats are supported by SKS keyservers (the longer ones + are more secure, see "dead beef attack" and https://evil32.com/). Since + HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will + impersonate keyserver.ubuntu.com and generate a certificate with + keyserver.ubuntu.com in the CN field or in SubjAltName fields of a + certificate. If such proxy behavior is expected it is necessary to add the + CA certificate chain containing the intermediate CA of the SSLBump proxy to + every machine that this code runs on via ca-certs cloud-init directive (via + cloudinit-userdata model-config) or via other means (such as through a + custom charm option). Also note that DNS resolution for the hostname in a + URL is done at a proxy server - not at the client side. + + 8-digit (32 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6 + 16-digit (64 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6 + 40-digit key ID: + https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6 + + :param keyid: An 8, 16 or 40 hex digit keyid to find a key for + :type keyid: (bytes, str) + :returns: A key material for the specified GPG key id + :rtype: (str, bytes) + :raises: subprocess.CalledProcessError + """ + # options=mr - machine-readable output (disables html wrappers) + keyserver_url = ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr&exact=on&search=0x{}') + curl_cmd = ['curl', keyserver_url.format(keyid)] + # use proxy server settings in order to retrieve the key + return subprocess.check_output(curl_cmd, + env=env_proxy_settings(['https'])) + + +def _dearmor_gpg_key(key_asc): + """Converts a GPG key in the ASCII armor format to the binary format. + + :param key_asc: A GPG key in ASCII armor format. + :type key_asc: (str, bytes) + :returns: A GPG key in binary format + :rtype: (str, bytes) + :raises: GPGKeyError + """ + ps = subprocess.Popen(['gpg', '--dearmor'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_asc) + # no need to decode output as it is binary (invalid utf-8), only error + if six.PY3: + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material. Check your network setup' + ' (MTU, routing, DNS) and/or proxy server settings' + ' as well as destination keyserver status.') + else: + return out + + +def _write_apt_gpg_keyfile(key_name, key_material): + """Writes GPG key material into a file at a provided path. + + :param key_name: A key name to use for a key file (could be a fingerprint) + :type key_name: str + :param key_material: A GPG key material (binary) + :type key_material: (str, bytes) + """ + with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name), + 'wb') as keyf: + keyf.write(key_material) def add_source(source, key=None, fail_invalid=False): @@ -442,13 +542,13 @@ def add_source(source, key=None, fail_invalid=False): def _add_proposed(): """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list - Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for + Uses get_distrib_codename to determine the correct stanza for the deb line. For intel architecutres PROPOSED_POCKET is used for the release, but for other architectures PROPOSED_PORTS_POCKET is used for the release. """ - release = lsb_release()['DISTRIB_CODENAME'] + release = get_distrib_codename() arch = platform.machine() if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): raise SourceConfigError("Arch {} not supported for (distro-)proposed" @@ -461,11 +561,16 @@ def _add_apt_repository(spec): """Add the spec using add_apt_repository :param spec: the parameter to pass to add_apt_repository + :type spec: str """ if '{series}' in spec: - series = lsb_release()['DISTRIB_CODENAME'] + series = get_distrib_codename() spec = spec.replace('{series}', series) - _run_with_retries(['add-apt-repository', '--yes', spec]) + # software-properties package for bionic properly reacts to proxy settings + # passed as environment variables (See lp:1433761). This is not the case + # LTS and non-LTS releases below bionic. + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https'])) def _add_cloud_pocket(pocket): @@ -534,7 +639,7 @@ def _verify_is_ubuntu_rel(release, os_release): :raises: SourceConfigError if the release is not the same as the ubuntu release. """ - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + ubuntu_rel = get_distrib_codename() if release != ubuntu_rel: raise SourceConfigError( 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' From debafdcc05b1b1c8ba42ae35822f41a943fe97b1 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 4 Apr 2019 16:29:34 +0200 Subject: [PATCH 1729/2699] Sync charm-helpers for Stein release As a part of the Stein release, we need to ensure that charmhelpers is up to date. Change-Id: I15d1a7288afca659577651bbed1b9b8af5dc3fe5 --- ceph-fs/rebuild | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index 2eeaa818..b05554ea 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -1 +1,5 @@ -885c11f0-ada9-46fb-804a-6c034772f46e +# This file is used to trigger rebuilds +# when dependencies of the charm change, +# but nothing in the charm needs to. +# simply change the uuid to something new +f12babec-56e5-11e9-bef2-a3031baa93b0 From f176c4f5b687798ea295b9d688e05ceed97667bf Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 5 Apr 2019 07:28:15 +0200 Subject: [PATCH 1730/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: I0ed1a4b126276e4bd29960b6943e8915a01f89ff --- ceph-fs/src/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 066fd774..eb0c2746 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -13,6 +13,7 @@ series: - xenial - bionic - cosmic + - disco subordinate: false requires: ceph-mds: From bb37ac91a179e2974b69041d341b9494c76566b5 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 5 Apr 2019 07:28:19 +0200 Subject: [PATCH 1731/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: I95cf0427becd218ea2163abd6de8f375f68cf131 --- ceph-mon/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 23270776..6866d9c3 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -13,6 +13,7 @@ series: - xenial - bionic - cosmic + - disco - trusty peers: mon: From 4efcc38e86badfbb5f05879e8450f24ce37191bd Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 5 Apr 2019 07:28:21 +0200 Subject: [PATCH 1732/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: I225a0828bb43555fe72fdfbf1028be7dde090d3a --- ceph-osd/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 489a03f1..59f3cbe9 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -14,6 +14,7 @@ series: - xenial - bionic - cosmic + - disco - trusty description: | Ceph is a distributed storage and network file system designed to provide From 13b36e7a36a3fd5d6e8d0c3999735a57159e42a1 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 5 Apr 2019 07:28:23 +0200 Subject: [PATCH 1733/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: I30800945c7799727f19731f04b61972148df830c --- ceph-proxy/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index bcf627ac..575c9bb6 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -13,6 +13,7 @@ series: - xenial - bionic - cosmic + - disco - trusty extra-bindings: public: From 0579360c5bbe87c78ba931f0d6f3e24165c0e957 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 5 Apr 2019 07:28:27 +0200 Subject: [PATCH 1734/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: Id28834a02b8d540ce23458813a5cf439931f1d59 --- ceph-radosgw/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index fdaef865..a27d2a68 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -16,6 +16,7 @@ series: - xenial - bionic - cosmic + - disco - trusty extra-bindings: public: From 2a3041a7a446ae36cd83d1e13f04f51f17189b6c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 5 Apr 2019 17:11:39 +0200 Subject: [PATCH 1735/2699] Sync charm-helpers to enable Ubuntu Disco Change-Id: Iefb724c5bc15600c780343c737dac0f5c745f005 --- .../hooks/charmhelpers/contrib/openstack/amulet/deployment.py | 1 + ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py | 1 + 2 files changed, 2 insertions(+) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index d1270a73..8e57467b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -312,6 +312,7 @@ def _get_openstack_release_string(self): ('artful', 'pike'), ('bionic', 'queens'), ('cosmic', 'rocky'), + ('disco', 'stein'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py index a3162fac..0ee2b660 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -23,6 +23,7 @@ 'artful', 'bionic', 'cosmic', + 'disco', ) From 0033dca96b2abd0f041d48dbd2020b168777b0fe Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 5 Apr 2019 17:11:55 +0200 Subject: [PATCH 1736/2699] Sync charm-helpers to enable Ubuntu Disco Change-Id: If57b1d477c1562f1222dc05e419fc762d7c64d39 --- .../hooks/charmhelpers/contrib/openstack/amulet/deployment.py | 1 + ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py | 1 + 2 files changed, 2 insertions(+) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index d1270a73..8e57467b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -312,6 +312,7 @@ def _get_openstack_release_string(self): ('artful', 'pike'), ('bionic', 'queens'), ('cosmic', 'rocky'), + ('disco', 'stein'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index a3162fac..0ee2b660 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -23,6 +23,7 @@ 'artful', 'bionic', 'cosmic', + 'disco', ) From 99d73e21b06e0e3d5ae63df23dc7966f87be54a6 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 5 Apr 2019 17:12:10 +0200 Subject: [PATCH 1737/2699] Sync charm-helpers to enable Ubuntu Disco Change-Id: If730e91a5e15fb1279024af4562d99a942cd7348 --- ceph-proxy/charmhelpers/core/host_factory/ubuntu.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py index a3162fac..0ee2b660 100644 --- a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py @@ -23,6 +23,7 @@ 'artful', 'bionic', 'cosmic', + 'disco', ) From 266ecd5fb217aeac53e7aa95466cdd993d429243 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 5 Apr 2019 17:12:25 +0200 Subject: [PATCH 1738/2699] Sync charm-helpers to enable Ubuntu Disco Change-Id: Idf43a567f749704fba382f5051c91d4f9664bc9d --- .../hooks/charmhelpers/contrib/openstack/amulet/deployment.py | 1 + ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py | 1 + 2 files changed, 2 insertions(+) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index d1270a73..8e57467b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -312,6 +312,7 @@ def _get_openstack_release_string(self): ('artful', 'pike'), ('bionic', 'queens'), ('cosmic', 'rocky'), + ('disco', 'stein'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index a3162fac..0ee2b660 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -23,6 +23,7 @@ 'artful', 'bionic', 'cosmic', + 'disco', ) From 5b219de7143fdb77d3ba9508131d932c07fb177c Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 3 Apr 2019 11:30:10 +0200 Subject: [PATCH 1739/2699] Enable bionic/stein and disco/stein functional tests Change-Id: Ibf106a4c327e0f078b41061d83b0ae09694cf0a5 --- .../src/tests/bundles/bionic-stein.yaml | 102 ++++++++++++++++++ ceph-rbd-mirror/src/tests/tests.yaml | 4 +- 2 files changed, 104 insertions(+), 2 deletions(-) create mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml new file mode 100644 index 00000000..b09dcf8d --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml @@ -0,0 +1,102 @@ +series: bionic +applications: + mysql: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + source: cloud:bionic-stein + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-stein + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-stein + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-stein + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-stein + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:bionic-stein + bluestore: False + use-direct-io: False + osd-devices: /opt + ceph-rbd-mirror: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:bionic-stein + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-stein + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:bionic-stein + bluestore: False + use-direct-io: False + osd-devices: /opt + ceph-rbd-mirror-b: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:bionic-stein +relations: +- - mysql + - keystone +- - mysql + - cinder +- - mysql + - glance +- - rabbitmq-server + - cinder +- - keystone + - cinder +- - keystone + - glance +- - cinder + - cinder-ceph +- - cinder-ceph + - ceph-mon +- - glance + - ceph-mon +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index 813be83b..1cc20b3c 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -6,13 +6,13 @@ gate_bundles: - xenial-queens - bionic-queens - bionic-rocky +- bionic-stein - cosmic-rocky +- disco-stein comment: | - Hold ``disco-stein`` bundle until all dependend charms have disco support. The e2e bundles are useful for development but adds no additional value to the functional tests. dev_bundles: -- disco-stein - bionic-queens-e2e - bionic-queens-e2e-lxd configure: From b196873b771b85b088f1a7e1ee560712b21e4d58 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 3 Apr 2019 11:51:54 +0200 Subject: [PATCH 1740/2699] Enable bionic/stein and disco/stein functional tests Drop service check for ``glance-registry``. The service is removed as of Stein, and it really is not the ``charm-ceph-mon`` functional tests job of checking this anyway. Any such detail should be deferred to the ``charm-glance`` functional tests. Change-Id: I85dd396b55e3d1582da18c6d8248ba1cced3e89f --- ceph-mon/tests/basic_deployment.py | 3 +-- ceph-mon/tests/dev-basic-disco-stein | 23 +++++++++++++++++++++++ ceph-mon/tests/gate-basic-bionic-stein | 25 +++++++++++++++++++++++++ 3 files changed, 49 insertions(+), 2 deletions(-) create mode 100755 ceph-mon/tests/dev-basic-disco-stein create mode 100755 ceph-mon/tests/gate-basic-bionic-stein diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py index da0a7d07..3713bdb7 100644 --- a/ceph-mon/tests/basic_deployment.py +++ b/ceph-mon/tests/basic_deployment.py @@ -287,8 +287,7 @@ def test_102_services(self): self.rabbitmq_sentry: ['rabbitmq-server'], self.nova_sentry: ['nova-compute'], self.keystone_sentry: ['keystone'], - self.glance_sentry: ['glance-registry', - 'glance-api'], + self.glance_sentry: ['glance-api'], self.cinder_sentry: ['cinder-scheduler', 'cinder-volume'], } diff --git a/ceph-mon/tests/dev-basic-disco-stein b/ceph-mon/tests/dev-basic-disco-stein new file mode 100755 index 00000000..27d2c980 --- /dev/null +++ b/ceph-mon/tests/dev-basic-disco-stein @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on disco-stein.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='disco') + deployment.run_tests() diff --git a/ceph-mon/tests/gate-basic-bionic-stein b/ceph-mon/tests/gate-basic-bionic-stein new file mode 100755 index 00000000..ab8f6f20 --- /dev/null +++ b/ceph-mon/tests/gate-basic-bionic-stein @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on bionic-stein.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='bionic', + openstack='cloud:bionic-stein', + source='cloud:bionic-stein') + deployment.run_tests() From 9f738365df6771ec7eb0c4c367912b20eaec3dc5 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Apr 2019 10:38:17 +0100 Subject: [PATCH 1741/2699] Rebuild with new charmhelpers release 0.19.13 supports use of Ubuntu Disco. Change-Id: I8fd684384f0dd5dff07fdb209984b892bedaa6ac --- ceph-fs/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index b05554ea..551ab478 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -f12babec-56e5-11e9-bef2-a3031baa93b0 +120650ec-5aab-11e9-a87e-fbc92e9be59b From 48659a4b8bd12e4c51d0bcecc7d7195bd93ff06e Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 9 Apr 2019 10:38:30 +0100 Subject: [PATCH 1742/2699] Rebuild with new charmhelpers release 0.19.13 supports use of Ubuntu Disco. Change-Id: I7d2f132603e1ec996888ea91c1b0c562cc63cddc --- ceph-rbd-mirror/rebuild | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 ceph-rbd-mirror/rebuild diff --git a/ceph-rbd-mirror/rebuild b/ceph-rbd-mirror/rebuild new file mode 100644 index 00000000..551ab478 --- /dev/null +++ b/ceph-rbd-mirror/rebuild @@ -0,0 +1,5 @@ +# This file is used to trigger rebuilds +# when dependencies of the charm change, +# but nothing in the charm needs to. +# simply change the uuid to something new +120650ec-5aab-11e9-a87e-fbc92e9be59b From 0945883e34c828850692b503f9de9925c04e8b7d Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 8 Apr 2019 13:50:25 +0200 Subject: [PATCH 1743/2699] Enable bionic/stein and disco/stein functional tests Drop explicit install of python-ceph; ceph-common has an appropriate dependency on the required python{3}-ceph package so this is not required, and breaks on Stein where Python 2 packages are no longer provided. Closes-Bug: 1824154 Change-Id: Ia219258f73f038170bc5a070d562e499459fe246 --- ceph-proxy/hooks/ceph.py | 2 +- ceph-proxy/tests/dev-basic-disco-stein | 9 +++++++++ ceph-proxy/tests/gate-basic-bionic-stein | 11 +++++++++++ 3 files changed, 21 insertions(+), 1 deletion(-) create mode 100755 ceph-proxy/tests/dev-basic-disco-stein create mode 100755 ceph-proxy/tests/gate-basic-bionic-stein diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 8000a005..b4878d7a 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -45,7 +45,7 @@ PEON = 'peon' QUORUM = [LEADER, PEON] -PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs'] def ceph_user(): diff --git a/ceph-proxy/tests/dev-basic-disco-stein b/ceph-proxy/tests/dev-basic-disco-stein new file mode 100755 index 00000000..dcbc1be4 --- /dev/null +++ b/ceph-proxy/tests/dev-basic-disco-stein @@ -0,0 +1,9 @@ +#!/usr/bin/env python + +"""Amulet tests on a basic ceph deployment on disco-stein.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='disco') + deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-bionic-stein b/ceph-proxy/tests/gate-basic-bionic-stein new file mode 100755 index 00000000..76356954 --- /dev/null +++ b/ceph-proxy/tests/gate-basic-bionic-stein @@ -0,0 +1,11 @@ +#!/usr/bin/env python + +"""Amulet tests on a basic ceph deployment on bionic-stein.""" + +from basic_deployment import CephBasicDeployment + +if __name__ == '__main__': + deployment = CephBasicDeployment(series='bionic', + openstack='cloud:bionic-stein', + source='cloud:bionic-stein') + deployment.run_tests() From 2c3bd75d9611d260b5d8faeecd73167c17964c46 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 10 Apr 2019 16:34:46 +0100 Subject: [PATCH 1744/2699] Enable bionic/stein and disco/stein functional tests Stein ceph packages no longer provide python-ceph; its not directly used by the charm and the ceph-* packages have an appropriate dependency on the underlying python{3}-ceph package for each release of OpenStack and Ceph. Closes-Bug: 1824154 Change-Id: Ibcec0142dbcff5509c7545f1bcc2d7d832b6d697 --- ceph-fs/src/reactive/ceph_fs.py | 12 ++++------- ceph-fs/src/tests/dev-basic-disco-stein | 23 +++++++++++++++++++++ ceph-fs/src/tests/gate-basic-bionic-stein | 25 +++++++++++++++++++++++ 3 files changed, 52 insertions(+), 8 deletions(-) create mode 100755 ceph-fs/src/tests/dev-basic-disco-stein create mode 100755 ceph-fs/src/tests/gate-basic-bionic-stein diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index dcb7fa82..a4041298 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -16,6 +16,8 @@ import socket import subprocess +import dns.resolver + from charms import reactive from charms.reactive import when, when_not, hook from charms.reactive.flags import set_flag, clear_flag, is_flag_set @@ -34,19 +36,13 @@ from charmhelpers.fetch import ( get_upstream_version, - apt_install, filter_installed_packages) +) import jinja2 from charms.apt import queue_install, add_source -PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs'] +PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs'] -try: - import dns.resolver -except ImportError: - apt_install(filter_installed_packages(['python-dnspython']), - fatal=True) - import dns.resolver TEMPLATES_DIR = 'templates' VERSION_PACKAGE = 'ceph-common' diff --git a/ceph-fs/src/tests/dev-basic-disco-stein b/ceph-fs/src/tests/dev-basic-disco-stein new file mode 100755 index 00000000..9c12c275 --- /dev/null +++ b/ceph-fs/src/tests/dev-basic-disco-stein @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on disco-stein.""" + +from basic_deployment import CephFsBasicDeployment + +if __name__ == '__main__': + deployment = CephFsBasicDeployment(series='disco') + deployment.run_tests() diff --git a/ceph-fs/src/tests/gate-basic-bionic-stein b/ceph-fs/src/tests/gate-basic-bionic-stein new file mode 100755 index 00000000..32098369 --- /dev/null +++ b/ceph-fs/src/tests/gate-basic-bionic-stein @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on bionic-stein.""" + +from basic_deployment import CephFsBasicDeployment + +if __name__ == '__main__': + deployment = CephFsBasicDeployment(series='bionic', + openstack='cloud:bionic-stein', + source='cloud:bionic-stein') + deployment.run_tests() From 65fa947d2418e8c5986949bf5cc4f0a3b5a487a8 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 3 Apr 2019 11:46:43 +0200 Subject: [PATCH 1745/2699] Enable bionic/stein and disco/stein functional tests Also fixes the bionic/rocky and cosmic/rocy bundles. Move cosmic/rocky and disco/stein to dev_bundles. Change-Id: I77b6aa2b5fba921c4735a86028e9e7c8b09ff40a --- ceph-osd/tests/bundles/bionic-rocky.yaml | 18 ++-- ceph-osd/tests/bundles/bionic-stein.yaml | 104 +++++++++++++++++++++++ ceph-osd/tests/bundles/cosmic-rocky.yaml | 11 +-- ceph-osd/tests/bundles/disco-stein.yaml | 90 ++++++++++++++++++++ ceph-osd/tests/tests.yaml | 14 +-- 5 files changed, 212 insertions(+), 25 deletions(-) create mode 100644 ceph-osd/tests/bundles/bionic-stein.yaml create mode 100644 ceph-osd/tests/bundles/disco-stein.yaml diff --git a/ceph-osd/tests/bundles/bionic-rocky.yaml b/ceph-osd/tests/bundles/bionic-rocky.yaml index 36ea73ba..f1385175 100644 --- a/ceph-osd/tests/bundles/bionic-rocky.yaml +++ b/ceph-osd/tests/bundles/bionic-rocky.yaml @@ -8,43 +8,43 @@ applications: osd-devices: 'cinder,10G' options: osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:bionic-updates/rocky + source: cloud:bionic-rocky ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: monitor-count: '3' auth-supported: 'none' - source: cloud:bionic-updates/rocky + source: cloud:bionic-rocky percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 options: dataset-size: 25% max-connections: 1000 - source: cloud:bionic-updates/rocky + source: cloud:bionic-rocky rabbitmq-server: charm: cs:~openstack-charmers-next/rabbitmq-server num_units: 1 options: - source: cloud:bionic-updates/rocky + source: cloud:bionic-rocky keystone: expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 options: - openstack-origin: cloud:bionic-rocky/proposed + openstack-origin: cloud:bionic-rocky nova-compute: charm: cs:~openstack-charmers-next/nova-compute num_units: 1 options: - openstack-origin: cloud:bionic-rocky/proposed + openstack-origin: cloud:bionic-rocky glance: expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 options: - openstack-origin: cloud:bionic-rocky/proposed + openstack-origin: cloud:bionic-rocky cinder: expose: True charm: cs:~openstack-charmers-next/cinder @@ -52,7 +52,7 @@ applications: options: block-device: 'None' glance-api-version: '2' - openstack-origin: cloud:bionic-rocky/proposed + openstack-origin: cloud:bionic-rocky cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: @@ -60,7 +60,7 @@ applications: charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 options: - openstack-origin: cloud:bionic-rocky/proposed + openstack-origin: cloud:bionic-rocky relations: - - nova-compute:amqp - rabbitmq-server:amqp diff --git a/ceph-osd/tests/bundles/bionic-stein.yaml b/ceph-osd/tests/bundles/bionic-stein.yaml new file mode 100644 index 00000000..bf822df5 --- /dev/null +++ b/ceph-osd/tests/bundles/bionic-stein.yaml @@ -0,0 +1,104 @@ +series: bionic +applications: + ceph-osd: + charm: ceph-osd + num_units: 3 + series: bionic + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:bionic-stein + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:bionic-stein + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:bionic-stein + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-stein + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-stein + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-stein + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-stein + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:bionic-stein + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:bionic-stein +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-osd/tests/bundles/cosmic-rocky.yaml b/ceph-osd/tests/bundles/cosmic-rocky.yaml index 27fe1047..97a6c6e8 100644 --- a/ceph-osd/tests/bundles/cosmic-rocky.yaml +++ b/ceph-osd/tests/bundles/cosmic-rocky.yaml @@ -1,9 +1,9 @@ series: cosmic applications: ceph-osd: + series: cosmic charm: ceph-osd num_units: 3 - series: cosmic storage: osd-devices: 'cinder,10G' options: @@ -27,19 +27,13 @@ applications: expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 - options: - openstack-origin: cloud:bionic-rocky/proposed nova-compute: charm: cs:~openstack-charmers-next/nova-compute num_units: 1 - options: - openstack-origin: cloud:bionic-rocky/proposed glance: expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 - options: - openstack-origin: cloud:bionic-rocky/proposed cinder: expose: True charm: cs:~openstack-charmers-next/cinder @@ -47,15 +41,12 @@ applications: options: block-device: 'None' glance-api-version: '2' - openstack-origin: cloud:bionic-rocky/proposed cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 - options: - openstack-origin: cloud:bionic-rocky/proposed relations: - - nova-compute:amqp - rabbitmq-server:amqp diff --git a/ceph-osd/tests/bundles/disco-stein.yaml b/ceph-osd/tests/bundles/disco-stein.yaml new file mode 100644 index 00000000..06cdaf67 --- /dev/null +++ b/ceph-osd/tests/bundles/disco-stein.yaml @@ -0,0 +1,90 @@ +series: disco +applications: + ceph-osd: + series: disco + charm: ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index e9497d89..6413b9b6 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,16 +1,18 @@ charm_name: ceph-osd gate_bundles: - - trusty-mitaka - - xenial-mitaka - - xenial-ocata - - xenial-pike - - xenial-queens + - bionic-stein + - bionic-rocky - bionic-queens + - xenial-queens + - xenial-pike + - xenial-ocata + - xenial-mitaka + - trusty-mitaka smoke_bundles: - bionic-queens dev_bundles: - - bionic-rocky - cosmic-rocky + - disco-stein configure: - zaza.charm_tests.glance.setup.add_lts_image tests: From 22abd1dc83406a123d8f8f1319b72ecdb4c91243 Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 19 Apr 2019 08:32:52 -0700 Subject: [PATCH 1746/2699] Remove old .pyc files Avoid Byte code errors: bad magic number in 'ceph': b'\x03\xf3\r\n' Change-Id: I156803991b2b3783ef633837a3945f50a86ef0dc Closes-Bug: #1825392 --- ceph-radosgw/hooks/upgrade-charm | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/upgrade-charm b/ceph-radosgw/hooks/upgrade-charm index 14948004..71a85b0a 100755 --- a/ceph-radosgw/hooks/upgrade-charm +++ b/ceph-radosgw/hooks/upgrade-charm @@ -1,4 +1,9 @@ #!/bin/bash -e -# Re-install dependencies to deal with py2->py3 switch for charm +# Wrapper to ensure that old python bytecode isn't hanging around +# after we upgrade the charm with newer libraries +find . -iname '*.pyc' -delete +find . -name '__pycache__' -prune -exec rm -rf "{}" \; + +# Re-install dependencies to deal with py2->py3 switch for charm ./hooks/install_deps From 9a5ac4e7c1fc4dd5abfc891b1f48e580461ba859 Mon Sep 17 00:00:00 2001 From: OpenDev Sysadmins Date: Fri, 19 Apr 2019 19:28:22 +0000 Subject: [PATCH 1747/2699] OpenDev Migration Patch This commit was bulk generated and pushed by the OpenDev sysadmins as a part of the Git hosting and code review systems migration detailed in these mailing list posts: http://lists.openstack.org/pipermail/openstack-discuss/2019-March/003603.html http://lists.openstack.org/pipermail/openstack-discuss/2019-April/004920.html Attempts have been made to correct repository namespaces and hostnames based on simple pattern matching, but it's possible some were updated incorrectly or missed entirely. Please reach out to us via the contact information listed at https://opendev.org/ with any questions you may have. --- ceph-mon/.gitreview | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/.gitreview b/ceph-mon/.gitreview index 47000658..5c2f5880 100644 --- a/ceph-mon/.gitreview +++ b/ceph-mon/.gitreview @@ -1,4 +1,4 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 project=openstack/charm-ceph-mon.git \ No newline at end of file From 1f49521dad29dd49f29c155ba4d244e29fda65e0 Mon Sep 17 00:00:00 2001 From: OpenDev Sysadmins Date: Fri, 19 Apr 2019 19:31:00 +0000 Subject: [PATCH 1748/2699] OpenDev Migration Patch This commit was bulk generated and pushed by the OpenDev sysadmins as a part of the Git hosting and code review systems migration detailed in these mailing list posts: http://lists.openstack.org/pipermail/openstack-discuss/2019-March/003603.html http://lists.openstack.org/pipermail/openstack-discuss/2019-April/004920.html Attempts have been made to correct repository namespaces and hostnames based on simple pattern matching, but it's possible some were updated incorrectly or missed entirely. Please reach out to us via the contact information listed at https://opendev.org/ with any questions you may have. --- ceph-osd/.gitreview | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/.gitreview b/ceph-osd/.gitreview index c365f65e..8c3d120d 100644 --- a/ceph-osd/.gitreview +++ b/ceph-osd/.gitreview @@ -1,4 +1,4 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 project=openstack/charm-ceph-osd.git From 35ce8e245dc9f9fcb6fe11ade5c77f412d629b81 Mon Sep 17 00:00:00 2001 From: OpenDev Sysadmins Date: Fri, 19 Apr 2019 19:35:19 +0000 Subject: [PATCH 1749/2699] OpenDev Migration Patch This commit was bulk generated and pushed by the OpenDev sysadmins as a part of the Git hosting and code review systems migration detailed in these mailing list posts: http://lists.openstack.org/pipermail/openstack-discuss/2019-March/003603.html http://lists.openstack.org/pipermail/openstack-discuss/2019-April/004920.html Attempts have been made to correct repository namespaces and hostnames based on simple pattern matching, but it's possible some were updated incorrectly or missed entirely. Please reach out to us via the contact information listed at https://opendev.org/ with any questions you may have. --- ceph-radosgw/.gitreview | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/.gitreview b/ceph-radosgw/.gitreview index b6cf22ea..496586e6 100644 --- a/ceph-radosgw/.gitreview +++ b/ceph-radosgw/.gitreview @@ -1,4 +1,4 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 project=openstack/charm-ceph-radosgw.git From e8ca12d056a17c918675a689b706b7edd87af91b Mon Sep 17 00:00:00 2001 From: OpenDev Sysadmins Date: Fri, 19 Apr 2019 19:42:09 +0000 Subject: [PATCH 1750/2699] OpenDev Migration Patch This commit was bulk generated and pushed by the OpenDev sysadmins as a part of the Git hosting and code review systems migration detailed in these mailing list posts: http://lists.openstack.org/pipermail/openstack-discuss/2019-March/003603.html http://lists.openstack.org/pipermail/openstack-discuss/2019-April/004920.html Attempts have been made to correct repository namespaces and hostnames based on simple pattern matching, but it's possible some were updated incorrectly or missed entirely. Please reach out to us via the contact information listed at https://opendev.org/ with any questions you may have. --- ceph-proxy/.gitreview | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/.gitreview b/ceph-proxy/.gitreview index 0eac5a3b..14a8e185 100644 --- a/ceph-proxy/.gitreview +++ b/ceph-proxy/.gitreview @@ -1,4 +1,4 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 project=openstack/charm-ceph-proxy From c62d7b8e98134d0051424d9ec6d5157e78e63237 Mon Sep 17 00:00:00 2001 From: OpenDev Sysadmins Date: Fri, 19 Apr 2019 19:44:57 +0000 Subject: [PATCH 1751/2699] OpenDev Migration Patch This commit was bulk generated and pushed by the OpenDev sysadmins as a part of the Git hosting and code review systems migration detailed in these mailing list posts: http://lists.openstack.org/pipermail/openstack-discuss/2019-March/003603.html http://lists.openstack.org/pipermail/openstack-discuss/2019-April/004920.html Attempts have been made to correct repository namespaces and hostnames based on simple pattern matching, but it's possible some were updated incorrectly or missed entirely. Please reach out to us via the contact information listed at https://opendev.org/ with any questions you may have. --- ceph-rbd-mirror/.gitreview | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/.gitreview b/ceph-rbd-mirror/.gitreview index b094119b..4a1c91d6 100644 --- a/ceph-rbd-mirror/.gitreview +++ b/ceph-rbd-mirror/.gitreview @@ -1,4 +1,4 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 project=openstack/charm-ceph-rbd-mirror.git From 2c8b419669b29111cd381ac37860fa95559f36b9 Mon Sep 17 00:00:00 2001 From: OpenDev Sysadmins Date: Fri, 19 Apr 2019 19:48:07 +0000 Subject: [PATCH 1752/2699] OpenDev Migration Patch This commit was bulk generated and pushed by the OpenDev sysadmins as a part of the Git hosting and code review systems migration detailed in these mailing list posts: http://lists.openstack.org/pipermail/openstack-discuss/2019-March/003603.html http://lists.openstack.org/pipermail/openstack-discuss/2019-April/004920.html Attempts have been made to correct repository namespaces and hostnames based on simple pattern matching, but it's possible some were updated incorrectly or missed entirely. Please reach out to us via the contact information listed at https://opendev.org/ with any questions you may have. --- ceph-fs/.gitreview | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/.gitreview b/ceph-fs/.gitreview index c1131ca5..245b1ad2 100644 --- a/ceph-fs/.gitreview +++ b/ceph-fs/.gitreview @@ -1,4 +1,4 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 project=openstack/charm-ceph-fs.git From 89e028df4d5a1ca7a053cf64ace9b83631e1bbf6 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 23 Apr 2019 15:33:34 +0200 Subject: [PATCH 1753/2699] Pass key data instead of interface object Update to match change in ``charms.openstack`` Depends-On: If1d645f4708e27b724f93cac0e14431137c885d7 Change-Id: I01053ff88e4dba58893dc20e171095f62524f480 --- ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py | 2 +- ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py index a7d3adb1..804bbe82 100644 --- a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py @@ -81,7 +81,7 @@ def render_stuff(*args): cluster_name = ( 'remote') if endpoint.endpoint_name == 'ceph-remote' else None - charm_instance.configure_ceph_keyring(endpoint, + charm_instance.configure_ceph_keyring(endpoint.key, cluster_name=cluster_name) charm_instance.render_with_interfaces(args) for service in charm_instance.services: diff --git a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py index e56ac4d7..e54ff357 100644 --- a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py @@ -139,8 +139,8 @@ def test_render_stuff(self): endpoint_local.key = 'akey' handlers.render_stuff(endpoint_local, endpoint_remote) self.crm_charm.configure_ceph_keyring.assert_has_calls([ - mock.call(endpoint_local, cluster_name=None), - mock.call(endpoint_remote, cluster_name='remote'), + mock.call(endpoint_local.key, cluster_name=None), + mock.call(endpoint_remote.key, cluster_name='remote'), ]) self.crm_charm.render_with_interfaces.assert_called_once_with( (endpoint_local, endpoint_remote)) From 8ef24dfc4dd2fd60b33d301debad3499b15db59f Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 23 Apr 2019 23:12:37 +0000 Subject: [PATCH 1754/2699] Dropping the py35 testing All the integration testing has been moved to Bionic now[1] and py3.5 is not tested runtime for Train or stable/stein[2]. As per below ML thread, we are good to drop the py35 testing now: http://lists.openstack.org/pipermail/openstack-discuss/2019-April/005097.html [1] http://lists.openstack.org/pipermail/openstack-discuss/2019-April/004647.html [2] https://governance.openstack.org/tc/reference/runtimes/stein.html https://governance.openstack.org/tc/reference/runtimes/train.html Change-Id: I17d4ecbe471032b256b2113768a8e62c4fd45161 --- ceph-radosgw/.zuul.yaml | 2 +- ceph-radosgw/tox.ini | 7 +------ 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/ceph-radosgw/.zuul.yaml b/ceph-radosgw/.zuul.yaml index 7051aeeb..affeb907 100644 --- a/ceph-radosgw/.zuul.yaml +++ b/ceph-radosgw/.zuul.yaml @@ -1,3 +1,3 @@ - project: templates: - - python35-charm-jobs + - python36-charm-jobs diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 004005fa..01095fc5 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -2,7 +2,7 @@ # This file is managed centrally by release-tools and should not be modified # within individual charm repos. [tox] -envlist = pep8,py35,py36 +envlist = pep8,py36 skipsdist = True [testenv] @@ -22,11 +22,6 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -[testenv:py35] -basepython = python3.5 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - [testenv:py36] basepython = python3.6 deps = -r{toxinidir}/requirements.txt From 1c6d876c089c06f94c45f43f6f83df0c7be3c2b9 Mon Sep 17 00:00:00 2001 From: Pete Vander Giessen Date: Wed, 24 Apr 2019 09:39:26 -0400 Subject: [PATCH 1755/2699] Fix spurious nagios alerts for radosgw service. Currently, when the charm tears down the default radosgw daemon in order to make way for per host daemons, it does not remove the nrpe check for the daemon. This PR fixes the issue. It also closes a gap where alerts for the per host daemons are not setup until a hook that happens to call update_nrpe_checks as a side-effect is run. Change-Id: I7621b9671b010a77bb3e94bdd1e80f45274c73e5 Closes-Bug: #1825843 --- ceph-radosgw/hooks/hooks.py | 23 +++++++++++++++++++- ceph-radosgw/unit_tests/test_hooks.py | 31 +++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 5cc7b9f3..7db0e240 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -255,6 +255,12 @@ def _mon_relation(): if systemd_based_radosgw(): service_stop('radosgw') service('disable', 'radosgw') + # Update the nrpe config. If we wait for the below + # to be called elsewhere, there exists a period + # where nagios will report the radosgw service as + # down, and also not be monitoring the per + # host services. + update_nrpe_config(checks_to_remove=['radosgw']) service('enable', service_name()) # NOTE(jamespage): @@ -362,13 +368,28 @@ def ha_relation_changed(): @hooks.hook('nrpe-external-master-relation-joined', 'nrpe-external-master-relation-changed') -def update_nrpe_config(): +def update_nrpe_config(checks_to_remove=None): + """ + Update the checks for the nagios plugin. + + :param checks_to_remove: list of short names of nrpe checks to + remove. For example, pass ['radosgw'] to remove the check for + the default systemd radosgw service, to make way for per host + services. + :type checks_to_remove: list + + """ # python-dbus is used by check_upstart_job apt_install('python-dbus') hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) nrpe.copy_nrpe_checks() + if checks_to_remove is not None: + log("Removing the following nrpe checks: {}".format(checks_to_remove), + level=DEBUG) + for svc in checks_to_remove: + nrpe_setup.remove_check(shortname=svc) nrpe.add_init_service_checks(nrpe_setup, services(), current_unit) nrpe.add_haproxy_checks(nrpe_setup, current_unit) nrpe_setup.write() diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 3051572b..cc046b83 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -671,3 +671,34 @@ def test_slave_relation_changed_not_leader(self): self.is_leader.return_value = False ceph_hooks.slave_relation_changed('slave:1', 'rgw/0') self.relation_get.assert_not_called() + + @patch.object(ceph_hooks, 'apt_install') + @patch.object(ceph_hooks, 'services') + @patch.object(ceph_hooks, 'nrpe') + def test_update_nrpe_config(self, nrpe, services, apt_install): + # Setup Mocks + nrpe.get_nagios_hostname.return_value = 'foo' + nrpe.get_nagios_unit_name.return_value = 'bar' + nrpe_setup = MagicMock() + nrpe.NRPE.return_value = nrpe_setup + services.return_value = ['baz', 'qux'] + + # Call the routine + ceph_hooks.update_nrpe_config() + + # Verify calls + apt_install.assert_called() + nrpe.get_nagios_hostname.assert_called() + nrpe.get_nagios_unit_name.assert_called() + nrpe.copy_nrpe_checks.assert_called() + nrpe.remove_check.assert_not_called() + nrpe.add_init_service_checks.assert_called_with(nrpe_setup, + ['baz', 'qux'], 'bar') + nrpe.add_haproxy_checks.assert_called_with(nrpe_setup, 'bar') + nrpe_setup.write.assert_called() + + # Verify that remove_check is called appropriately if we pass + # checks_to_remove + ceph_hooks.update_nrpe_config(checks_to_remove=['quux', 'quuux']) + nrpe_setup.remove_check.assert_has_calls([call(shortname='quux'), + call(shortname='quuux')]) From 885f3ecad9c0774c2dda8825a21811088c4dbd57 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 23 Apr 2019 13:46:19 +0200 Subject: [PATCH 1756/2699] Migrate Zaza tests to independant OpenStack Charms specific library Change-Id: Ia50a57c52c97208a014ea89e6c48be2fe6f9f36d --- ceph-osd/test-requirements.txt | 2 +- ceph-osd/tests/tests.yaml | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index b378cb0c..659376ab 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -21,4 +21,4 @@ distro-info git+https://github.com/juju/charm-helpers.git#egg=charmhelpers pytz pyudev # for ceph-* charm unit tests (not mocked?) -git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza.git@namespace-tests#egg=zaza;python_version>='3.0' diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 6413b9b6..7b1dd330 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -14,9 +14,9 @@ dev_bundles: - cosmic-rocky - disco-stein configure: - - zaza.charm_tests.glance.setup.add_lts_image + - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: - - zaza.charm_tests.ceph.tests.CephLowLevelTest - - zaza.charm_tests.ceph.tests.CephRelationTest - - zaza.charm_tests.ceph.tests.CephTest - - zaza.charm_tests.ceph.osd.tests.SecurityTest + - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest + - zaza.openstack.charm_tests.ceph.tests.CephRelationTest + - zaza.openstack.charm_tests.ceph.tests.CephTest + - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest From 4bd19823058be24e35e3a278153aadeea3b38e11 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 24 Apr 2019 17:31:34 +0200 Subject: [PATCH 1757/2699] Migrate Zaza tests to independant OpenStack Charms specific library Change-Id: Ib39798d8676e9431a55be6a0a52e9ad5de66f3da --- ceph-radosgw/test-requirements.txt | 2 +- ceph-radosgw/tests/tests.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index b378cb0c..f53935c0 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -21,4 +21,4 @@ distro-info git+https://github.com/juju/charm-helpers.git#egg=charmhelpers pytz pyudev # for ceph-* charm unit tests (not mocked?) -git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza.git@namespace-tests#egg=zaza;python_version>='3.0' \ No newline at end of file diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index a8ed3404..55f93139 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -15,4 +15,4 @@ smoke_bundles: dev_bundles: - cosmic-rocky tests: - - zaza.charm_tests.ceph.tests.CephRGWTest + - zaza.openstack.charm_tests.ceph.tests.CephRGWTest From 0a91b61963f9c52e3bcea4bf7d7f685d339e54db Mon Sep 17 00:00:00 2001 From: Nicolas Pochet Date: Fri, 26 Apr 2019 20:15:36 +0200 Subject: [PATCH 1758/2699] Fix NRPE check_osd Move the result of the systemd service state to `/var/lib/nagios` Change-Id: I83287590e279054973fdb28b374a49704626ed01 Closes-Bug: 1826594 --- ceph-osd/files/nagios/check_ceph_osd_services.py | 4 ++-- ceph-osd/files/nagios/collect_ceph_osd_services.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-osd/files/nagios/check_ceph_osd_services.py b/ceph-osd/files/nagios/check_ceph_osd_services.py index 669160d4..f53c5fa5 100755 --- a/ceph-osd/files/nagios/check_ceph_osd_services.py +++ b/ceph-osd/files/nagios/check_ceph_osd_services.py @@ -6,9 +6,9 @@ import os import sys -import tempfile CRON_CHECK_TMPFILE = 'ceph-osd-checks' +NAGIOS_HOME = '/var/lib/nagios' STATE_OK = 0 STATE_WARNING = 1 @@ -25,7 +25,7 @@ def run_main(): :returns: nagios state 0,2 or 3 """ - _tmp_file = os.path.join(tempfile.gettempdir(), CRON_CHECK_TMPFILE) + _tmp_file = os.path.join(NAGIOS_HOME, CRON_CHECK_TMPFILE) if not os.path.isfile(_tmp_file): print("File '{}' doesn't exist".format(_tmp_file)) diff --git a/ceph-osd/files/nagios/collect_ceph_osd_services.py b/ceph-osd/files/nagios/collect_ceph_osd_services.py index 7133a75e..84764aba 100755 --- a/ceph-osd/files/nagios/collect_ceph_osd_services.py +++ b/ceph-osd/files/nagios/collect_ceph_osd_services.py @@ -6,7 +6,6 @@ import os import subprocess -import tempfile # fasteners only exists in Bionic, so this will fail on xenial and trusty try: @@ -17,6 +16,7 @@ SYSTEMD_SYSTEM = '/run/systemd/system' LOCKFILE = '/var/lock/check-osds.lock' CRON_CHECK_TMPFILE = 'ceph-osd-checks' +NAGIOS_HOME = '/var/lib/nagios' def init_is_systemd(): @@ -67,7 +67,7 @@ def do_status(): .format(e.output.decode('utf-8'))) lines.append(output) - _tmp_file = os.path.join(tempfile.gettempdir(), CRON_CHECK_TMPFILE) + _tmp_file = os.path.join(NAGIOS_HOME, CRON_CHECK_TMPFILE) with open(_tmp_file, 'wt') as f: f.writelines(lines) From 41a292cb99974f47fff1345f76749dfbb451bf0b Mon Sep 17 00:00:00 2001 From: Marian Gasparovic Date: Fri, 26 Apr 2019 16:54:02 +0200 Subject: [PATCH 1759/2699] Creates nrpe check for number of OSDs Alert is triggered when number of known OSDs in osdmap is different than number of "in" or "up" OSDs. Change-Id: Id3d43f0146452d0bbd73e1ce98616a994eaee090 Partial-Bug: 1735579 --- ceph-mon/config.yaml | 10 +++- ceph-mon/files/nagios/check_ceph_status.py | 19 +++++++ ceph-mon/hooks/ceph_hooks.py | 8 +++ ceph-mon/unit_tests/ceph_warn.json | 2 +- ceph-mon/unit_tests/test_ceph_hooks.py | 1 + ceph-mon/unit_tests/test_check_ceph_status.py | 52 +++++++++++++++++++ 6 files changed, 89 insertions(+), 3 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 4b4a3c0a..72392e17 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -204,6 +204,12 @@ options: description: | Whether to report Critical instead of Warning when the nodeep-scrub flag is set. + nagios_check_num_osds: + default: False + type: boolean + description: | + Whether to report an error when number of known OSDs does not equal + to the number of OSDs in or up. nagios_additional_checks: default: "" type: string @@ -214,8 +220,8 @@ options: . Example: . - {'noout': 'noout', 'too_few': 'too few PGs', 'clock': 'clock skew', - 'osd-down': 'osds down', 'degraded_redundancy': 'Degraded data redundancy'} + {'noout_set': 'noout', 'too_few_PGs': 'too few PGs', 'clock': 'clock skew', + 'degraded_redundancy': 'Degraded data redundancy'} . nagios_additional_checks_critical: default: False diff --git a/ceph-mon/files/nagios/check_ceph_status.py b/ceph-mon/files/nagios/check_ceph_status.py index 986426b5..844e7f2f 100755 --- a/ceph-mon/files/nagios/check_ceph_status.py +++ b/ceph-mon/files/nagios/check_ceph_status.py @@ -175,6 +175,20 @@ def check_ceph_status(args): print(message_all_ok) return message_all_ok + # if it is just --check_osds_down, deal with it and ignore overall health + if args.check_num_osds: + osdmap = status_data['osdmap']['osdmap'] + num_osds = osdmap['num_osds'] + num_up_osds = osdmap['num_up_osds'] + num_in_osds = osdmap['num_in_osds'] + if num_osds != num_up_osds or num_up_osds != num_in_osds: + msg = "CRITICAL: OSDs: {}, OSDs up: {}, OSDs in: {}".format( + num_osds, num_up_osds, num_in_osds) + raise CriticalError(msg) + message_ok = "OK: {} OSDs, all up and in".format(num_osds) + print(message_ok) + return message_ok + if overall_status != 'HEALTH_OK': # Health is not OK, collect status message(s) and # decide whether to return warning or critical @@ -265,6 +279,11 @@ def parse_args(args): "positive. If the argument is not provided," "check returns a warning. Otherwise it " "returns an error condition.") + parser.add_argument('--check_num_osds', + dest='check_num_osds', default=False, + action='store_true', + help="Check whether all OSDs are up and in") + return parser.parse_args(args) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 34e06db9..e94df9d6 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -877,6 +877,14 @@ def update_nrpe_config(): current_unit), check_cmd=check_cmd ) + if config('nagios_check_num_osds'): + check_cmd = 'check_ceph_status.py -f {} --check_num_osds'.format( + STATUS_FILE) + nrpe_setup.add_check( + shortname='ceph_num_osds', + description='Check whether all OSDs are up and in', + check_cmd=check_cmd + ) nrpe_setup.write() diff --git a/ceph-mon/unit_tests/ceph_warn.json b/ceph-mon/unit_tests/ceph_warn.json index 45c81578..3688dd00 100644 --- a/ceph-mon/unit_tests/ceph_warn.json +++ b/ceph-mon/unit_tests/ceph_warn.json @@ -1 +1 @@ -{"health":{"health":{"health_services":[{"mons":[{"name":"juju-2691ab-1-lxd-1","kb_total":155284096,"kb_used":1247744,"kb_avail":154036352,"avail_percent":99,"last_updated":"2017-05-17 03:31:35.562497","store_stats":{"bytes_total":1012055342,"bytes_sst":0,"bytes_log":29673298,"bytes_misc":982382044,"last_updated":"0.000000"},"health":"HEALTH_OK"},{"name":"juju-2691ab-13-lxd-0","kb_total":153820288,"kb_used":1361280,"kb_avail":152459008,"avail_percent":99,"last_updated":"2017-05-17 03:31:04.097201","store_stats":{"bytes_total":1370003168,"bytes_sst":0,"bytes_log":29813159,"bytes_misc":1340190009,"last_updated":"0.000000"},"health":"HEALTH_OK"},{"name":"juju-2691ab-2-lxd-1","kb_total":155251072,"kb_used":1373440,"kb_avail":153877632,"avail_percent":99,"last_updated":"2017-05-17 03:31:20.684777","store_stats":{"bytes_total":1400974192,"bytes_sst":0,"bytes_log":1129945,"bytes_misc":1399844247,"last_updated":"0.000000"},"health":"HEALTH_OK"}]}]},"timechecks":{"epoch":32,"round":24492,"round_status":"finished","mons":[{"name":"juju-2691ab-1-lxd-1","skew":0.000000,"latency":0.000000,"health":"HEALTH_OK"},{"name":"juju-2691ab-13-lxd-0","skew":0.000919,"latency":0.001036,"health":"HEALTH_OK"},{"name":"juju-2691ab-2-lxd-1","skew":0.000000,"latency":0.001009,"health":"HEALTH_OK"}]},"summary":[{"severity":"HEALTH_WARN","summary":"48 pgs backfill_wait"},{"severity":"HEALTH_WARN","summary":"45 pgs backfilling"},{"severity":"HEALTH_WARN","summary":"1 pgs degraded"},{"severity":"HEALTH_WARN","summary":"1 pgs recovery_wait"},{"severity":"HEALTH_WARN","summary":"22 pgs stuck unclean"},{"severity":"HEALTH_WARN","summary":"recovery 14\/46842755 objects degraded (0.000%)"},{"severity":"HEALTH_WARN","summary":"recovery 448540\/46842755 objects misplaced (0.958%)"}],"overall_status":"HEALTH_WARN","detail":[]},"fsid":"ca9451f1-5c4f-4e85-bb14-a08dfc0568f7","election_epoch":32,"quorum":[0,1,2],"quorum_names":["juju-2691ab-1-lxd-1","juju-2691ab-13-lxd-0","juju-2691ab-2-lxd-1"],"monmap":{"epoch":1,"fsid":"ca9451f1-5c4f-4e85-bb14-a08dfc0568f7","modified":"2016-12-03 08:09:21.854671","created":"2016-12-03 08:09:21.854671","mons":[{"rank":0,"name":"juju-2691ab-1-lxd-1","addr":"10.182.254.221:6789\/0"},{"rank":1,"name":"juju-2691ab-13-lxd-0","addr":"10.182.254.229:6789\/0"},{"rank":2,"name":"juju-2691ab-2-lxd-1","addr":"10.182.254.242:6789\/0"}]},"osdmap":{"osdmap":{"epoch":141540,"num_osds":314,"num_up_osds":314,"num_in_osds":314,"full":false,"nearfull":false,"num_remapped_pgs":92}},"pgmap":{"pgs_by_state":[{"state_name":"active+clean","count":9274},{"state_name":"active+remapped+wait_backfill","count":48},{"state_name":"active+remapped+backfilling","count":45},{"state_name":"active+clean+scrubbing+deep","count":9},{"state_name":"active+remapped","count":2},{"state_name":"active+recovery_wait+degraded","count":1},{"state_name":"active+clean+scrubbing","count":1}],"version":13885884,"num_pgs":9380,"data_bytes":64713222471610,"bytes_used":193613093122048,"bytes_avail":690058090491904,"bytes_total":883671183613952,"degraded_objects":14,"degraded_total":46842755,"degraded_ratio":0.000000,"misplaced_objects":448540,"misplaced_total":46842755,"misplaced_ratio":0.009575,"recovering_objects_per_sec":389,"recovering_bytes_per_sec":1629711746,"recovering_keys_per_sec":0,"num_objects_recovered":218,"num_bytes_recovered":912252928,"num_keys_recovered":0,"read_bytes_sec":117041457,"write_bytes_sec":293414043,"read_op_per_sec":5282,"write_op_per_sec":5270},"fsmap":{"epoch":1,"by_rank":[]}} +{"health":{"health":{"health_services":[{"mons":[{"name":"juju-2691ab-1-lxd-1","kb_total":155284096,"kb_used":1247744,"kb_avail":154036352,"avail_percent":99,"last_updated":"2017-05-17 03:31:35.562497","store_stats":{"bytes_total":1012055342,"bytes_sst":0,"bytes_log":29673298,"bytes_misc":982382044,"last_updated":"0.000000"},"health":"HEALTH_OK"},{"name":"juju-2691ab-13-lxd-0","kb_total":153820288,"kb_used":1361280,"kb_avail":152459008,"avail_percent":99,"last_updated":"2017-05-17 03:31:04.097201","store_stats":{"bytes_total":1370003168,"bytes_sst":0,"bytes_log":29813159,"bytes_misc":1340190009,"last_updated":"0.000000"},"health":"HEALTH_OK"},{"name":"juju-2691ab-2-lxd-1","kb_total":155251072,"kb_used":1373440,"kb_avail":153877632,"avail_percent":99,"last_updated":"2017-05-17 03:31:20.684777","store_stats":{"bytes_total":1400974192,"bytes_sst":0,"bytes_log":1129945,"bytes_misc":1399844247,"last_updated":"0.000000"},"health":"HEALTH_OK"}]}]},"timechecks":{"epoch":32,"round":24492,"round_status":"finished","mons":[{"name":"juju-2691ab-1-lxd-1","skew":0.000000,"latency":0.000000,"health":"HEALTH_OK"},{"name":"juju-2691ab-13-lxd-0","skew":0.000919,"latency":0.001036,"health":"HEALTH_OK"},{"name":"juju-2691ab-2-lxd-1","skew":0.000000,"latency":0.001009,"health":"HEALTH_OK"}]},"summary":[{"severity":"HEALTH_WARN","summary":"48 pgs backfill_wait"},{"severity":"HEALTH_WARN","summary":"45 pgs backfilling"},{"severity":"HEALTH_WARN","summary":"1 pgs degraded"},{"severity":"HEALTH_WARN","summary":"1 pgs recovery_wait"},{"severity":"HEALTH_WARN","summary":"22 pgs stuck unclean"},{"severity":"HEALTH_WARN","summary":"recovery 14\/46842755 objects degraded (0.000%)"},{"severity":"HEALTH_WARN","summary":"recovery 448540\/46842755 objects misplaced (0.958%)"}],"overall_status":"HEALTH_WARN","detail":[]},"fsid":"ca9451f1-5c4f-4e85-bb14-a08dfc0568f7","election_epoch":32,"quorum":[0,1,2],"quorum_names":["juju-2691ab-1-lxd-1","juju-2691ab-13-lxd-0","juju-2691ab-2-lxd-1"],"monmap":{"epoch":1,"fsid":"ca9451f1-5c4f-4e85-bb14-a08dfc0568f7","modified":"2016-12-03 08:09:21.854671","created":"2016-12-03 08:09:21.854671","mons":[{"rank":0,"name":"juju-2691ab-1-lxd-1","addr":"10.182.254.221:6789\/0"},{"rank":1,"name":"juju-2691ab-13-lxd-0","addr":"10.182.254.229:6789\/0"},{"rank":2,"name":"juju-2691ab-2-lxd-1","addr":"10.182.254.242:6789\/0"}]},"osdmap":{"osdmap":{"epoch":141540,"num_osds":314,"num_up_osds":311,"num_in_osds":311,"full":false,"nearfull":false,"num_remapped_pgs":92}},"pgmap":{"pgs_by_state":[{"state_name":"active+clean","count":9274},{"state_name":"active+remapped+wait_backfill","count":48},{"state_name":"active+remapped+backfilling","count":45},{"state_name":"active+clean+scrubbing+deep","count":9},{"state_name":"active+remapped","count":2},{"state_name":"active+recovery_wait+degraded","count":1},{"state_name":"active+clean+scrubbing","count":1}],"version":13885884,"num_pgs":9380,"data_bytes":64713222471610,"bytes_used":193613093122048,"bytes_avail":690058090491904,"bytes_total":883671183613952,"degraded_objects":14,"degraded_total":46842755,"degraded_ratio":0.000000,"misplaced_objects":448540,"misplaced_total":46842755,"misplaced_ratio":0.009575,"recovering_objects_per_sec":389,"recovering_bytes_per_sec":1629711746,"recovering_keys_per_sec":0,"num_objects_recovered":218,"num_bytes_recovered":912252928,"num_keys_recovered":0,"read_bytes_sec":117041457,"write_bytes_sec":293414043,"read_op_per_sec":5282,"write_op_per_sec":5270},"fsmap":{"epoch":1,"by_rank":[]}} diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 371f0a25..a0ed8d11 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -52,6 +52,7 @@ 'nagios_raise_nodeepscrub': True, 'nagios_additional_checks': "", 'nagios_additional_checks_critical': False, + 'nagios_check_num_osds': False, 'disable-pg-max-object-skew': False} diff --git a/ceph-mon/unit_tests/test_check_ceph_status.py b/ceph-mon/unit_tests/test_check_ceph_status.py index 44f8e517..377976e3 100644 --- a/ceph-mon/unit_tests/test_check_ceph_status.py +++ b/ceph-mon/unit_tests/test_check_ceph_status.py @@ -288,3 +288,55 @@ def test_additional_error_deepscrub_pre_luminous(self, '--additional_check_critical']) self.assertRaises(check_ceph_status.CriticalError, lambda: check_ceph_status.check_ceph_status(args)) + + # Num OSD OK, pre-luminous + @patch('check_ceph_status.get_ceph_version') + def test_num_osds_ok_pre_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [10, 2, 9] + with open('unit_tests/ceph_ok.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--check_num_osds']) + check_output = check_ceph_status.check_ceph_status(args) + self.assertRegex(check_output, r"^OK") + + # Num OSD error, pre-luminous + @patch('check_ceph_status.get_ceph_version') + def test_num_osds_error_pre_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [10, 2, 9] + with open('unit_tests/ceph_warn.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--check_num_osds']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) + + # Num OSD OK, luminous + @patch('check_ceph_status.get_ceph_version') + def test_num_osds_ok_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [12, 2, 0] + with open('unit_tests/ceph_many_warnings_luminous.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--check_num_osds']) + check_output = check_ceph_status.check_ceph_status(args) + self.assertRegex(check_output, r"^OK") + + # Num OSD error, luminous + @patch('check_ceph_status.get_ceph_version') + def test_num_osds_error_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [12, 2, 0] + with open('unit_tests/ceph_degraded_luminous.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args(['--check_num_osds']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) From 8656dec4668724ebe848a24e988ca8a44eb01109 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 7 May 2019 13:38:23 +0000 Subject: [PATCH 1760/2699] Charmhelper sync and corresponding unit test fix. Change-Id: I291fb02e1439a224ed91f0d7d2bd80115b73671a --- .../contrib/openstack/cert_utils.py | 4 + .../charmhelpers/contrib/openstack/context.py | 78 +++++++++++++++++++ .../section-keystone-authtoken-v3only | 9 +++ .../templates/section-oslo-notifications | 3 +- .../charmhelpers/contrib/openstack/utils.py | 2 +- .../unit_tests/test_ceph_radosgw_context.py | 22 ++++-- 6 files changed, 111 insertions(+), 7 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py index 47b8603a..0ba57024 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -220,6 +220,8 @@ def process_certificates(service_name, relation_id, unit, :type user: str :param group: (Optional) Group of certificate files. Defaults to 'root' :type group: str + :returns: True if certificates processed for local unit or False + :rtype: bool """ data = relation_get(rid=relation_id, unit=unit) ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) @@ -235,6 +237,8 @@ def process_certificates(service_name, relation_id, unit, create_ip_cert_links( ssl_dir, custom_hostname_link=custom_hostname_link) + return True + return False def get_requests_for_local_unit(relation_name=None): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index d5133713..51ee03b8 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -117,6 +117,7 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' ADDRESS_TYPES = ['admin', 'internal', 'public'] HAPROXY_RUN_DIR = '/var/run/haproxy/' +DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2" def ensure_packages(packages): @@ -351,10 +352,70 @@ def _setup_pki_cache(self): return cachedir return None + def _get_pkg_name(self, python_name='keystonemiddleware'): + """Get corresponding distro installed package for python + package name. + + :param python_name: nameof the python package + :type: string + """ + pkg_names = map(lambda x: x + python_name, ('python3-', 'python-')) + + for pkg in pkg_names: + if not filter_installed_packages(pkg): + return pkg + + return None + + def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): + """Build Jinja2 context for full rendering of [keystone_authtoken] + section with variable names included. Re-constructed from former + template 'section-keystone-auth-mitaka'. + + :param ctxt: Jinja2 context returned from self.__call__() + :type: dict + :param keystonemiddleware_os_rel: OpenStack release name of + keystonemiddleware package installed + """ + c = collections.OrderedDict((('auth_type', 'password'),)) + + # 'www_authenticate_uri' replaced 'auth_uri' since Stein, + # see keystonemiddleware upstream sources for more info + if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein': + c.update(( + ('www_authenticate_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) + else: + c.update(( + ('auth_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) + + c.update(( + ('auth_url', "{}://{}:{}/v3".format( + ctxt.get('auth_protocol', ''), + ctxt.get('auth_host', ''), + ctxt.get('auth_port', ''))), + ('project_domain_name', ctxt.get('admin_domain_name', '')), + ('user_domain_name', ctxt.get('admin_domain_name', '')), + ('project_name', ctxt.get('admin_tenant_name', '')), + ('username', ctxt.get('admin_user', '')), + ('password', ctxt.get('admin_password', '')), + ('signing_dir', ctxt.get('signing_dir', '')),)) + + return c + def __call__(self): log('Generating template context for ' + self.rel_name, level=DEBUG) ctxt = {} + keystonemiddleware_os_release = None + if self._get_pkg_name(): + keystonemiddleware_os_release = os_release(self._get_pkg_name()) + cachedir = self._setup_pki_cache() if cachedir: ctxt['signing_dir'] = cachedir @@ -385,6 +446,14 @@ def __call__(self): ctxt.update({'admin_domain_name': rdata.get('service_domain')}) + # we keep all veriables in ctxt for compatibility and + # add nested dictionary for keystone_authtoken generic + # templating + if keystonemiddleware_os_release: + ctxt['keystone_authtoken'] = \ + self._get_keystone_authtoken_ctxt( + ctxt, keystonemiddleware_os_release) + if self.context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs @@ -569,6 +638,15 @@ def __call__(self): ctxt['oslo_messaging_flags'] = config_flags_parser( oslo_messaging_flags) + oslo_messaging_driver = conf.get( + 'oslo-messaging-driver', DEFAULT_OSLO_MESSAGING_DRIVER) + if oslo_messaging_driver: + ctxt['oslo_messaging_driver'] = oslo_messaging_driver + + notification_format = conf.get('notification-format', None) + if notification_format: + ctxt['notification_format'] = notification_format + if not self.complete: return {} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only new file mode 100644 index 00000000..d26a91fe --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only @@ -0,0 +1,9 @@ +{% if auth_host -%} +[keystone_authtoken] +{% for option_name, option_value in keystone_authtoken.items() -%} +{{ option_name }} = {{ option_value }} +{% endfor -%} +{% if use_memcache == true %} +memcached_servers = {{ memcache_url }} +{% endif -%} +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications index 021a3c25..7bb43d4f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications @@ -1,11 +1,12 @@ {% if transport_url -%} [oslo_messaging_notifications] -driver = messagingv2 +driver = {{ oslo_messaging_driver }} transport_url = {{ transport_url }} {% if notification_topics -%} topics = {{ notification_topics }} {% endif -%} {% if notification_format -%} +[notifications] notification_format = {{ notification_format }} {% endif -%} {% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index e5e25369..1914ab84 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -194,7 +194,7 @@ ('rocky', ['2.18.0', '2.19.0']), ('stein', - ['2.20.0']), + ['2.20.0', '2.21.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 4111d4c0..b8743e35 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -75,6 +75,8 @@ def setUp(self): self.maxDiff = None self.cmp_pkgrevno.return_value = 1 + @patch.object(charmhelpers.contrib.openstack.context, + 'filter_installed_packages', return_value=['absent-pkg']) @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') @@ -82,7 +84,8 @@ def setUp(self): @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') @patch.object(charmhelpers.contrib.openstack.context, 'log') def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, - _format_ipv6_addr, jewel_installed=False): + _format_ipv6_addr, _filter_installed_packages, + jewel_installed=False): self.test_config.set('operator-roles', 'Babel') self.test_config.set('cache-size', '42') self.test_relation.set({'admin_token': 'ubuntutesting'}) @@ -130,6 +133,8 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, expect['auth_keystone_v3_supported'] = True self.assertEqual(expect, ids_ctxt()) + @patch.object(charmhelpers.contrib.openstack.context, + 'filter_installed_packages', return_value=['absent-pkg']) @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') @@ -138,7 +143,7 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, @patch.object(charmhelpers.contrib.openstack.context, 'log') def test_ids_ctxt_missing_admin_domain_id( self, _log, _rids, _runits, _rget, _ctxt_comp, _format_ipv6_addr, - jewel_installed=False): + _filter_installed_packages, jewel_installed=False): self.test_config.set('operator-roles', 'Babel') self.test_config.set('cache-size', '42') self.test_relation.set({'admin_token': 'ubuntutesting'}) @@ -184,6 +189,8 @@ def test_ids_ctxt_missing_admin_domain_id( expect['auth_keystone_v3_supported'] = True self.assertEqual(expect, ids_ctxt()) + @patch.object(charmhelpers.contrib.openstack.context, + 'filter_installed_packages', return_value=['absent-pkg']) @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') @@ -192,7 +199,7 @@ def test_ids_ctxt_missing_admin_domain_id( @patch.object(charmhelpers.contrib.openstack.context, 'log') def test_ids_ctxt_v3( self, _log, _rids, _runits, _rget, _ctxt_comp, _format_ipv6_addr, - jewel_installed=False): + _filter_installed_packages, jewel_installed=False): self.test_config.set('operator-roles', 'Babel') self.test_config.set('cache-size', '42') self.test_relation.set({'admin_token': 'ubuntutesting'}) @@ -246,6 +253,8 @@ def test_ids_ctxt_v3( def test_ids_ctxt_jewel(self): self.test_ids_ctxt(jewel_installed=True) + @patch.object(charmhelpers.contrib.openstack.context, + 'filter_installed_packages', return_value=['absent-pkg']) @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') @@ -253,7 +262,8 @@ def test_ids_ctxt_jewel(self): @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') @patch.object(charmhelpers.contrib.openstack.context, 'log') def test_ids_ctxt_no_admin_token(self, _log, _rids, _runits, _rget, - _ctxt_comp, _format_ipv6_addr): + _ctxt_comp, _format_ipv6_addr, + _filter_installed_packages): self.test_config.set('operator-roles', 'Babel') self.test_config.set('cache-size', '42') self.test_relation.set({}) @@ -277,9 +287,11 @@ def test_ids_ctxt_no_admin_token(self, _log, _rids, _runits, _rget, ids_ctxt = context.IdentityServiceContext() self.assertEqual({}, ids_ctxt()) + @patch.object(charmhelpers.contrib.openstack.context, + 'filter_installed_packages', return_value=['absent-pkg']) @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') @patch.object(charmhelpers.contrib.openstack.context, 'log') - def test_ids_ctxt_no_rels(self, _log, _rids): + def test_ids_ctxt_no_rels(self, _log, _rids, _filter_installed_packages): _rids.return_value = [] ids_ctxt = context.IdentityServiceContext() self.assertEqual(ids_ctxt(), None) From 3b8d0522f26a2af2ead9e79326f3a941257081fb Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 7 May 2019 17:22:51 +0200 Subject: [PATCH 1761/2699] Remove duplicate CharmHelpers Because we have charmhelpers in-tree, adding it via test-requirements.txt means we have duplicate modules Change-Id: I7109f96124b3b52fd25a41d3ac60c37e651eb43a --- ceph-radosgw/test-requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index f53935c0..5d24ce55 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -18,7 +18,6 @@ python-openstackclient>=1.7.0 python-swiftclient>=2.6.0 pika>=0.10.0,<1.0 distro-info -git+https://github.com/juju/charm-helpers.git#egg=charmhelpers pytz pyudev # for ceph-* charm unit tests (not mocked?) git+https://github.com/openstack-charmers/zaza.git@namespace-tests#egg=zaza;python_version>='3.0' \ No newline at end of file From 759c98893a1d2579d7ddec5d7bc83106eeb4aa79 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 8 May 2019 12:22:46 +0200 Subject: [PATCH 1762/2699] Remove duplicate CharmHelpers Because we have charmhelpers in-tree, adding it via test-requirements.txt means we have duplicate modules Change-Id: Ib3b14aa7cf6bcdc75dde9c9130beef2179767c6b --- ceph-osd/test-requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 659376ab..d129d57d 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -18,7 +18,6 @@ python-openstackclient>=1.7.0 python-swiftclient>=2.6.0 pika>=0.10.0,<1.0 distro-info -git+https://github.com/juju/charm-helpers.git#egg=charmhelpers pytz pyudev # for ceph-* charm unit tests (not mocked?) git+https://github.com/openstack-charmers/zaza.git@namespace-tests#egg=zaza;python_version>='3.0' From 98835d3954e00b9bf0fd30c4ffc52348f1256044 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 9 May 2019 12:29:52 +0200 Subject: [PATCH 1763/2699] Migrate to split zaza - zaza.openstack This change adds in zaza.openstack, as well as changing the branch target of zaza to a branch with zaza.openstack removed, allowing us to validate the new library Change-Id: Idb00a091e17db9a1d0bb22fe4738fcf931d85db4 --- ceph-rbd-mirror/src/test-requirements.txt | 3 ++- ceph-rbd-mirror/src/tests/tests.yaml | 8 ++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/ceph-rbd-mirror/src/test-requirements.txt b/ceph-rbd-mirror/src/test-requirements.txt index e4401e40..f828a5ae 100644 --- a/ceph-rbd-mirror/src/test-requirements.txt +++ b/ceph-rbd-mirror/src/test-requirements.txt @@ -7,4 +7,5 @@ mock>=1.2 flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 requests>=2.18.4 -git+https://github.com/openstack-charmers/zaza.git#egg=zaza +git+https://github.com/openstack-charmers/zaza.git@remove-namespaced-tests#egg=zaza +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack \ No newline at end of file diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index 1cc20b3c..7653f783 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -16,8 +16,8 @@ dev_bundles: - bionic-queens-e2e - bionic-queens-e2e-lxd configure: -- zaza.charm_tests.glance.setup.add_lts_image +- zaza.openstack.charm_tests.glance.setup.add_lts_image tests: -- zaza.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorTest -- zaza.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorControlledFailoverTest -- zaza.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorDisasterFailoverTest +- zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorTest +- zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorControlledFailoverTest +- zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorDisasterFailoverTest From f2465dfbf804987eebfd758fb7a70a7059d674f1 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 9 May 2019 12:29:39 +0200 Subject: [PATCH 1764/2699] Migrate to split zaza - zaza.openstack This change adds in zaza.openstack, as well as changing the branch target of zaza to a branch with zaza.openstack removed, allowing us to validate the new library Change-Id: Ide3a4a0e05d88731bbf5c1c33475ad3d86b95e6f --- ceph-osd/test-requirements.txt | 3 ++- ceph-osd/tox.ini | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index d129d57d..cb80d6ed 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -20,4 +20,5 @@ pika>=0.10.0,<1.0 distro-info pytz pyudev # for ceph-* charm unit tests (not mocked?) -git+https://github.com/openstack-charmers/zaza.git@namespace-tests#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza.git@remove-namespaced-tests#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack \ No newline at end of file diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index f5b4a7e2..4d35de76 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -23,6 +23,7 @@ deps = -r{toxinidir}/test-requirements.txt # permitted. http://governance.openstack.org/reference/cti/python_cti.html whitelist_externals = true commands = true +deps = [testenv:py35] basepython = python3.5 From 2cad674503b302a84f0f6f0e64544b46577fb69c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 10 May 2019 17:05:13 +0200 Subject: [PATCH 1765/2699] Migrate to split zaza - zaza.openstack This change adds in zaza.openstack, as well as changing the branch target of zaza to a branch with zaza.openstack removed, allowing us to validate the new library Change-Id: I72415540bee6eea1969432bb3e35a5573def9d49 --- ceph-radosgw/test-requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 5d24ce55..434ccc66 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -20,4 +20,5 @@ pika>=0.10.0,<1.0 distro-info pytz pyudev # for ceph-* charm unit tests (not mocked?) -git+https://github.com/openstack-charmers/zaza.git@namespace-tests#egg=zaza;python_version>='3.0' \ No newline at end of file +git+https://github.com/openstack-charmers/zaza.git@remove-namespaced-tests#egg=zaza;python_version>'3.4' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack;python_version>'3.4' \ No newline at end of file From 2f0945f7973e6f5304f5c8034c07ff38c3142167 Mon Sep 17 00:00:00 2001 From: David Ames Date: Tue, 14 May 2019 13:24:38 -0700 Subject: [PATCH 1766/2699] Validate output of list_logical_volumes The charm was checking for the zeroth value of the return value of list_logical_volumes. However, if no logical volumes are found it returns an empty list. This change validates that the list has an entry. Depends-On: I75a6b1dda15dd7c2cece8cfe97b28317b3d5162b Change-Id: I2d371dae94dca328cf4782a79e85c1c6fd77f547 Closes-Bug: #1819382 --- ceph-osd/lib/ceph/utils.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 35f351f3..972f1ad3 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -1555,7 +1555,7 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): cmd.append(osd_format) # NOTE(jamespage): enable experimental bluestore support - if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: + if cmp_pkgrevno('ceph', '12.2.0') >= 0 and bluestore: cmd.append('--bluestore') wal = get_devices('bluestore-wal') if wal: @@ -1567,7 +1567,7 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): cmd.append('--block.db') least_used_db = find_least_used_utility_device(db) cmd.append(least_used_db) - elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: + elif cmp_pkgrevno('ceph', '12.2.0') >= 0 and not bluestore: cmd.append('--filestore') cmd.append(os.path.realpath(dev)) @@ -1692,7 +1692,10 @@ def is_active_bluestore_device(dev): return False vg_name = lvm.list_lvm_volume_group(dev) - lv_name = lvm.list_logical_volumes('vg_name={}'.format(vg_name))[0] + try: + lv_name = lvm.list_logical_volumes('vg_name={}'.format(vg_name))[0] + except IndexError: + return False block_symlinks = glob.glob('/var/lib/ceph/osd/ceph-*/block') for block_candidate in block_symlinks: From 7924f580359e66832a872d5151d79e4f6558b240 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 1 May 2019 15:26:21 +0200 Subject: [PATCH 1767/2699] Migrate charm-ceph-mon testing to Zaza Closes-Bug: #1828424 Change-Id: Ie46129f02566f17eabbf2eb0cae217fa0e886a4f --- ceph-mon/test-requirements.txt | 2 + ceph-mon/tests/README.md | 9 - ceph-mon/tests/basic_deployment.py | 835 ---------------------- ceph-mon/tests/bundles/bionic-queens.yaml | 90 +++ ceph-mon/tests/bundles/bionic-rocky.yaml | 104 +++ ceph-mon/tests/bundles/bionic-stein.yaml | 104 +++ ceph-mon/tests/bundles/cosmic-rocky.yaml | 90 +++ ceph-mon/tests/bundles/disco-stein.yaml | 90 +++ ceph-mon/tests/bundles/trusty-mitaka.yaml | 140 ++++ ceph-mon/tests/bundles/xenial-mitaka.yaml | 90 +++ ceph-mon/tests/bundles/xenial-ocata.yaml | 104 +++ ceph-mon/tests/bundles/xenial-pike.yaml | 104 +++ ceph-mon/tests/bundles/xenial-queens.yaml | 104 +++ ceph-mon/tests/dev-basic-cosmic-rocky | 23 - ceph-mon/tests/dev-basic-disco-stein | 23 - ceph-mon/tests/gate-basic-bionic-queens | 23 - ceph-mon/tests/gate-basic-bionic-rocky | 25 - ceph-mon/tests/gate-basic-bionic-stein | 25 - ceph-mon/tests/gate-basic-trusty-mitaka | 25 - ceph-mon/tests/gate-basic-xenial-mitaka | 23 - ceph-mon/tests/gate-basic-xenial-ocata | 25 - ceph-mon/tests/gate-basic-xenial-pike | 25 - ceph-mon/tests/gate-basic-xenial-queens | 25 - ceph-mon/tests/tests.yaml | 40 +- ceph-mon/tox.ini | 47 +- 25 files changed, 1054 insertions(+), 1141 deletions(-) delete mode 100644 ceph-mon/tests/README.md delete mode 100644 ceph-mon/tests/basic_deployment.py create mode 100644 ceph-mon/tests/bundles/bionic-queens.yaml create mode 100644 ceph-mon/tests/bundles/bionic-rocky.yaml create mode 100644 ceph-mon/tests/bundles/bionic-stein.yaml create mode 100644 ceph-mon/tests/bundles/cosmic-rocky.yaml create mode 100644 ceph-mon/tests/bundles/disco-stein.yaml create mode 100644 ceph-mon/tests/bundles/trusty-mitaka.yaml create mode 100644 ceph-mon/tests/bundles/xenial-mitaka.yaml create mode 100644 ceph-mon/tests/bundles/xenial-ocata.yaml create mode 100644 ceph-mon/tests/bundles/xenial-pike.yaml create mode 100644 ceph-mon/tests/bundles/xenial-queens.yaml delete mode 100755 ceph-mon/tests/dev-basic-cosmic-rocky delete mode 100755 ceph-mon/tests/dev-basic-disco-stein delete mode 100755 ceph-mon/tests/gate-basic-bionic-queens delete mode 100755 ceph-mon/tests/gate-basic-bionic-rocky delete mode 100755 ceph-mon/tests/gate-basic-bionic-stein delete mode 100755 ceph-mon/tests/gate-basic-trusty-mitaka delete mode 100755 ceph-mon/tests/gate-basic-xenial-mitaka delete mode 100755 ceph-mon/tests/gate-basic-xenial-ocata delete mode 100755 ceph-mon/tests/gate-basic-xenial-pike delete mode 100755 ceph-mon/tests/gate-basic-xenial-queens diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 272ce1da..bd4be894 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -27,3 +27,5 @@ git+https://github.com/juju/charm-helpers.git#egg=charmhelpers # NOTE: workaround for 14.04 pip/tox pytz pyudev # for ceph-* charm unit tests (not mocked?) +git+https://github.com/openstack-charmers/zaza.git@remove-namespaced-tests#egg=zaza;python_version>'3.4' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack;python_version>'3.4' \ No newline at end of file diff --git a/ceph-mon/tests/README.md b/ceph-mon/tests/README.md deleted file mode 100644 index 046be7fb..00000000 --- a/ceph-mon/tests/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Overview - -This directory provides Amulet tests to verify basic deployment functionality -from the perspective of this charm, its requirements and its features, as -exercised in a subset of the full OpenStack deployment test bundle topology. - -For full details on functional testing of OpenStack charms please refer to -the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing) -section of the OpenStack Charm Guide. diff --git a/ceph-mon/tests/basic_deployment.py b/ceph-mon/tests/basic_deployment.py deleted file mode 100644 index 3713bdb7..00000000 --- a/ceph-mon/tests/basic_deployment.py +++ /dev/null @@ -1,835 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import re -import time -import json - -import keystoneclient -from keystoneclient.v3 import client as keystone_client_v3 -from novaclient import client as nova_client - -from charmhelpers.contrib.openstack.amulet.deployment import ( - OpenStackAmuletDeployment -) -from charmhelpers.contrib.openstack.amulet.utils import ( # noqa - OpenStackAmuletUtils, - DEBUG, - # ERROR - ) - -# Use DEBUG to turn on debug logging -u = OpenStackAmuletUtils(DEBUG) - - -class CephBasicDeployment(OpenStackAmuletDeployment): - """Amulet tests on a basic ceph deployment.""" - - def __init__(self, series=None, openstack=None, source=None, stable=False): - """Deploy the entire test environment.""" - super(CephBasicDeployment, self).__init__(series, openstack, source, - stable) - self._add_services() - self._add_relations() - self._configure_services() - self._deploy() - - u.log.info('Waiting on extended status checks...') - exclude_services = [] - - # Wait for deployment ready msgs, except exclusions - self._auto_wait_for_status(exclude_services=exclude_services) - - self.d.sentry.wait() - self._initialize_tests() - - def _add_services(self): - """Add services - - Add the services that we're testing, where ceph is local, - and the rest of the service are from lp branches that are - compatible with the local charm (e.g. stable or next). - """ - this_service = {'name': 'ceph-mon', 'units': 3} - other_services = [ - {'name': 'percona-cluster'}, - {'name': 'keystone'}, - {'name': 'ceph-osd', - 'units': 3, - 'storage': {'osd-devices': 'cinder,10G'}}, - {'name': 'rabbitmq-server'}, - {'name': 'nova-compute'}, - {'name': 'glance'}, - {'name': 'cinder'}, - {'name': 'cinder-ceph'}, - {'name': 'nova-cloud-controller'}, - ] - super(CephBasicDeployment, self)._add_services(this_service, - other_services) - - def _add_relations(self): - """Add all of the relations for the services.""" - relations = { - 'nova-compute:amqp': 'rabbitmq-server:amqp', - 'nova-compute:image-service': 'glance:image-service', - 'nova-compute:ceph': 'ceph-mon:client', - 'keystone:shared-db': 'percona-cluster:shared-db', - 'glance:shared-db': 'percona-cluster:shared-db', - 'glance:identity-service': 'keystone:identity-service', - 'glance:amqp': 'rabbitmq-server:amqp', - 'glance:ceph': 'ceph-mon:client', - 'cinder:shared-db': 'percona-cluster:shared-db', - 'cinder:identity-service': 'keystone:identity-service', - 'cinder:amqp': 'rabbitmq-server:amqp', - 'cinder:image-service': 'glance:image-service', - 'cinder-ceph:storage-backend': 'cinder:storage-backend', - 'cinder-ceph:ceph': 'ceph-mon:client', - 'ceph-osd:mon': 'ceph-mon:osd', - 'nova-cloud-controller:shared-db': 'percona-cluster:shared-db', - 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp', - 'nova-cloud-controller:identity-service': 'keystone:' - 'identity-service', - 'nova-cloud-controller:cloud-compute': 'nova-compute:' - 'cloud-compute', - 'nova-cloud-controller:image-service': 'glance:image-service', - } - super(CephBasicDeployment, self)._add_relations(relations) - - def _configure_services(self): - """Configure all of the services.""" - keystone_config = {'admin-password': 'openstack', - 'admin-token': 'ubuntutesting'} - cinder_config = {'block-device': 'None', 'glance-api-version': '2'} - - pxc_config = { - 'max-connections': 1000, - } - - # Include a non-existent device as osd-devices is a whitelist, - # and this will catch cases where proposals attempt to change that. - ceph_config = { - 'monitor-count': '3', - 'auth-supported': 'none', - } - - ceph_osd_config = { - 'osd-devices': '/srv/ceph /dev/test-non-existent', - } - configs = {'keystone': keystone_config, - 'percona-cluster': pxc_config, - 'cinder': cinder_config, - 'ceph-mon': ceph_config, - 'ceph-osd': ceph_osd_config, - } - super(CephBasicDeployment, self)._configure_services(configs) - - def _initialize_tests(self): - """Perform final initialization before tests get run.""" - # Access the sentries for inspecting service units - self.pxc_sentry = self.d.sentry['percona-cluster'][0] - self.keystone_sentry = self.d.sentry['keystone'][0] - self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0] - self.nova_sentry = self.d.sentry['nova-compute'][0] - self.glance_sentry = self.d.sentry['glance'][0] - self.cinder_sentry = self.d.sentry['cinder'][0] - self.cinder_ceph_sentry = self.d.sentry['cinder-ceph'][0] - self.ceph_osd_sentry = self.d.sentry['ceph-osd'][0] - self.ceph0_sentry = self.d.sentry['ceph-mon'][0] - self.ceph1_sentry = self.d.sentry['ceph-mon'][1] - self.ceph2_sentry = self.d.sentry['ceph-mon'][2] - u.log.debug('openstack release val: {}'.format( - self._get_openstack_release())) - u.log.debug('openstack release str: {}'.format( - self._get_openstack_release_string())) - - # Authenticate admin with keystone - self.keystone_session, self.keystone = u.get_default_keystone_session( - self.keystone_sentry, - openstack_release=self._get_openstack_release()) - - # Authenticate admin with cinder endpoint - self.cinder = u.authenticate_cinder_admin(self.keystone) - - force_v1_client = False - if self._get_openstack_release() == self.trusty_icehouse: - # Updating image properties (such as arch or hypervisor) using the - # v2 api in icehouse results in: - # https://bugs.launchpad.net/python-glanceclient/+bug/1371559 - u.log.debug('Forcing glance to use v1 api') - force_v1_client = True - - # Authenticate admin with glance endpoint - self.glance = u.authenticate_glance_admin( - self.keystone, - force_v1_client=force_v1_client) - - # Authenticate admin with nova endpoint - self.nova = nova_client.Client(2, session=self.keystone_session) - - keystone_ip = self.keystone_sentry.info['public-address'] - - # Create a demo tenant/role/user - self.demo_tenant = 'demoTenant' - self.demo_role = 'demoRole' - self.demo_user = 'demoUser' - self.demo_project = 'demoProject' - self.demo_domain = 'demoDomain' - if self._get_openstack_release() >= self.xenial_queens: - self.create_users_v3() - self.demo_user_session, auth = u.get_keystone_session( - keystone_ip, - self.demo_user, - 'password', - api_version=3, - user_domain_name=self.demo_domain, - project_domain_name=self.demo_domain, - project_name=self.demo_project - ) - self.keystone_demo = keystone_client_v3.Client( - session=self.demo_user_session) - self.nova_demo = nova_client.Client( - 2, - session=self.demo_user_session) - else: - self.create_users_v2() - # Authenticate demo user with keystone - self.keystone_demo = \ - u.authenticate_keystone_user( - self.keystone, user=self.demo_user, - password='password', - tenant=self.demo_tenant) - # Authenticate demo user with nova-api - self.nova_demo = u.authenticate_nova_user(self.keystone, - user=self.demo_user, - password='password', - tenant=self.demo_tenant) - - def create_users_v3(self): - try: - self.keystone.projects.find(name=self.demo_project) - except keystoneclient.exceptions.NotFound: - domain = self.keystone.domains.create( - self.demo_domain, - description='Demo Domain', - enabled=True - ) - project = self.keystone.projects.create( - self.demo_project, - domain, - description='Demo Project', - enabled=True, - ) - user = self.keystone.users.create( - self.demo_user, - domain=domain.id, - project=self.demo_project, - password='password', - email='demov3@demo.com', - description='Demo', - enabled=True) - role = self.keystone.roles.find(name='Admin') - self.keystone.roles.grant( - role.id, - user=user.id, - project=project.id) - - def create_users_v2(self): - if not u.tenant_exists(self.keystone, self.demo_tenant): - tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, - description='demo tenant', - enabled=True) - - self.keystone.roles.create(name=self.demo_role) - self.keystone.users.create(name=self.demo_user, - password='password', - tenant_id=tenant.id, - email='demo@demo.com') - - def test_100_ceph_processes(self): - """Verify that the expected service processes are running - on each ceph unit.""" - - # Process name and quantity of processes to expect on each unit - ceph_processes = { - 'ceph-mon': 1 - } - - # Units with process names and PID quantities expected - expected_processes = { - self.ceph0_sentry: ceph_processes, - self.ceph1_sentry: ceph_processes, - self.ceph2_sentry: ceph_processes - } - - actual_pids = u.get_unit_process_ids(expected_processes) - ret = u.validate_unit_process_ids(expected_processes, actual_pids) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_102_services(self): - """Verify the expected services are running on the service units.""" - - services = { - self.rabbitmq_sentry: ['rabbitmq-server'], - self.nova_sentry: ['nova-compute'], - self.keystone_sentry: ['keystone'], - self.glance_sentry: ['glance-api'], - self.cinder_sentry: ['cinder-scheduler', - 'cinder-volume'], - } - - if self._get_openstack_release() < self.xenial_ocata: - services[self.cinder_sentry].append('cinder-api') - - if self._get_openstack_release() < self.xenial_mitaka: - # For upstart systems only. Ceph services under systemd - # are checked by process name instead. - ceph_services = [ - 'ceph-mon-all', - 'ceph-mon id=`hostname`' - ] - services[self.ceph0_sentry] = ceph_services - services[self.ceph1_sentry] = ceph_services - services[self.ceph2_sentry] = ceph_services - - ceph_osd_services = [ - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) - ] - - services[self.ceph_osd_sentry] = ceph_osd_services - - if self._get_openstack_release() >= self.trusty_liberty: - services[self.keystone_sentry] = ['apache2'] - - ret = u.validate_services_by_name(services) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_200_ceph_nova_client_relation(self): - """Verify the ceph to nova ceph-client relation data.""" - u.log.debug('Checking ceph:nova-compute ceph-mon relation data...') - unit = self.ceph0_sentry - relation = ['client', 'nova-compute:ceph'] - expected = { - 'private-address': u.valid_ip, - 'auth': 'none', - 'key': u.not_null - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('ceph-mon to nova ceph-client', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_201_nova_ceph_client_relation(self): - """Verify the nova to ceph client relation data.""" - u.log.debug('Checking nova-compute:ceph ceph-client relation data...') - unit = self.nova_sentry - relation = ['ceph', 'ceph-mon:client'] - expected = { - 'private-address': u.valid_ip - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('nova to ceph ceph-client', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_202_ceph_glance_client_relation(self): - """Verify the ceph to glance ceph-client relation data.""" - u.log.debug('Checking ceph:glance client relation data...') - unit = self.ceph1_sentry - relation = ['client', 'glance:ceph'] - expected = { - 'private-address': u.valid_ip, - 'auth': 'none', - 'key': u.not_null - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('ceph to glance ceph-client', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_203_glance_ceph_client_relation(self): - """Verify the glance to ceph client relation data.""" - u.log.debug('Checking glance:ceph client relation data...') - unit = self.glance_sentry - relation = ['ceph', 'ceph-mon:client'] - expected = { - 'private-address': u.valid_ip - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('glance to ceph ceph-client', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_204_ceph_cinder_client_relation(self): - """Verify the ceph to cinder ceph-client relation data.""" - u.log.debug('Checking ceph:cinder ceph relation data...') - unit = self.ceph2_sentry - relation = ['client', 'cinder-ceph:ceph'] - expected = { - 'private-address': u.valid_ip, - 'auth': 'none', - 'key': u.not_null - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('ceph to cinder ceph-client', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_205_cinder_ceph_client_relation(self): - """Verify the cinder to ceph ceph-client relation data.""" - u.log.debug('Checking cinder:ceph ceph relation data...') - unit = self.cinder_ceph_sentry - relation = ['ceph', 'ceph-mon:client'] - expected = { - 'private-address': u.valid_ip - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('cinder to ceph ceph-client', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_300_ceph_config(self): - """Verify the data in the ceph config file.""" - u.log.debug('Checking ceph config file data...') - unit = self.ceph0_sentry - conf = '/etc/ceph/ceph.conf' - expected = { - 'global': { - 'log to syslog': 'false', - 'err to syslog': 'false', - 'clog to syslog': 'false', - 'mon cluster log to syslog': 'false', - 'auth cluster required': 'none', - 'auth service required': 'none', - 'auth client required': 'none' - }, - 'mon': { - 'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring' - }, - 'mds': { - 'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring' - }, - } - - for section, pairs in expected.iteritems(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "ceph config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_302_cinder_rbd_config(self): - """Verify the cinder config file data regarding ceph.""" - u.log.debug('Checking cinder (rbd) config file data...') - unit = self.cinder_sentry - conf = '/etc/cinder/cinder.conf' - section_key = 'cinder-ceph' - expected = { - section_key: { - 'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver' - } - } - for section, pairs in expected.iteritems(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "cinder (rbd) config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_304_glance_rbd_config(self): - """Verify the glance config file data regarding ceph.""" - u.log.debug('Checking glance (rbd) config file data...') - unit = self.glance_sentry - conf = '/etc/glance/glance-api.conf' - config = { - 'default_store': 'rbd', - 'rbd_store_ceph_conf': '/etc/ceph/ceph.conf', - 'rbd_store_user': 'glance', - 'rbd_store_pool': 'glance', - 'rbd_store_chunk_size': '8' - } - - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later - config['stores'] = ('glance.store.filesystem.Store,' - 'glance.store.http.Store,' - 'glance.store.rbd.Store') - section = 'glance_store' - else: - # Juno or earlier - section = 'DEFAULT' - - expected = {section: config} - for section, pairs in expected.iteritems(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "glance (rbd) config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_306_nova_rbd_config(self): - """Verify the nova config file data regarding ceph.""" - u.log.debug('Checking nova (rbd) config file data...') - unit = self.nova_sentry - conf = '/etc/nova/nova.conf' - expected = { - 'libvirt': { - 'rbd_user': 'nova-compute', - 'rbd_secret_uuid': u.not_null - } - } - for section, pairs in expected.iteritems(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "nova (rbd) config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_400_ceph_check_osd_pools(self): - """Check osd pools on all ceph units, expect them to be - identical, and expect specific pools to be present.""" - u.log.debug('Checking pools on ceph units...') - - expected_pools = self.get_ceph_expected_pools() - results = [] - sentries = [ - self.ceph0_sentry, - self.ceph1_sentry, - self.ceph2_sentry - ] - - # Check for presence of expected pools on each unit - u.log.debug('Expected pools: {}'.format(expected_pools)) - for sentry_unit in sentries: - pools = u.get_ceph_pools(sentry_unit) - results.append(pools) - - for expected_pool in expected_pools: - if expected_pool not in pools: - msg = ('{} does not have pool: ' - '{}'.format(sentry_unit.info['unit_name'], - expected_pool)) - amulet.raise_status(amulet.FAIL, msg=msg) - u.log.debug('{} has (at least) the expected ' - 'pools.'.format(sentry_unit.info['unit_name'])) - - # Check that all units returned the same pool name:id data - ret = u.validate_list_of_identical_dicts(results) - if ret: - u.log.debug('Pool list results: {}'.format(results)) - msg = ('{}; Pool list results are not identical on all ' - 'ceph units.'.format(ret)) - amulet.raise_status(amulet.FAIL, msg=msg) - else: - u.log.debug('Pool list on all ceph units produced the ' - 'same results (OK).') - - def test_402_pause_resume_actions(self): - """Veryfy that pause/resume works""" - u.log.debug("Testing pause") - cmd = "ceph -s" - - sentry_unit = self.ceph0_sentry - action_id = u.run_action(sentry_unit, 'pause-health') - assert u.wait_on_action(action_id), "Pause health action failed." - - output, code = sentry_unit.run(cmd) - if 'nodown' not in output or 'noout' not in output: - amulet.raise_status(amulet.FAIL, msg="Missing noout,nodown") - - u.log.debug("Testing resume") - action_id = u.run_action(sentry_unit, 'resume-health') - assert u.wait_on_action(action_id), "Resume health action failed." - - output, code = sentry_unit.run(cmd) - if 'nodown' in output or 'noout' in output: - amulet.raise_status(amulet.FAIL, msg="Still has noout,nodown") - - def test_501_security_checklist_action(self): - """Verify expected result on a default install""" - u.log.debug("Testing security-checklist") - sentry_unit = self.ceph0_sentry - - action_id = u.run_action(sentry_unit, "security-checklist") - u.wait_on_action(action_id) - data = amulet.actions.get_action_output(action_id, full_output=True) - assert data.get(u"status") == "completed", \ - "Security check is expected to pass by default" - - @staticmethod - def find_pool(sentry_unit, pool_name): - """ - This will do a ceph osd dump and search for pool you specify - :param sentry_unit: The unit to run this command from. - :param pool_name: str. The name of the Ceph pool to query - :return: str or None. The ceph pool or None if not found - """ - output, dump_code = sentry_unit.run("ceph osd dump") - if dump_code is not 0: - amulet.raise_status( - amulet.FAIL, - msg="ceph osd dump failed with output: {}".format( - output)) - for line in output.split('\n'): - match = re.search(r"pool\s+\d+\s+'(?P.*)'", line) - if match: - name = match.group('pool_name') - if name == pool_name: - return line - return None - - def test_403_cache_tier_actions(self): - """Verify that cache tier add/remove works""" - u.log.debug("Testing cache tiering") - - sentry_unit = self.ceph0_sentry - # Create our backer pool - output, code = sentry_unit.run("ceph osd pool create cold 128 128 ") - if code is not 0: - amulet.raise_status( - amulet.FAIL, - msg="ceph osd pool create cold failed with output: {}".format( - output)) - - # Create our cache pool - output, code = sentry_unit.run("ceph osd pool create hot 128 128 ") - if code is not 0: - amulet.raise_status( - amulet.FAIL, - msg="ceph osd pool create hot failed with output: {}".format( - output)) - - action_id = u.run_action(sentry_unit, - 'create-cache-tier', - params={ - 'backer-pool': 'cold', - 'cache-pool': 'hot', - 'cache-mode': 'writeback'}) - assert u.wait_on_action(action_id), \ - "Create cache tier action failed." - - pool_line = self.find_pool( - sentry_unit=sentry_unit, - pool_name='hot') - - assert "cache_mode writeback" in pool_line, \ - "cache_mode writeback not found in cache pool" - remove_action_id = u.run_action(sentry_unit, - 'remove-cache-tier', - params={ - 'backer-pool': 'cold', - 'cache-pool': 'hot'}) - assert u.wait_on_action(remove_action_id), \ - "Remove cache tier action failed" - pool_line = self.find_pool(sentry_unit=sentry_unit, pool_name='hot') - assert "cache_mode" not in pool_line, \ - "cache_mode is still enabled on cache pool" - - def test_404_set_noout_actions(self): - """Verify that set/unset noout works""" - u.log.debug("Testing set noout") - cmd = "ceph -s" - - sentry_unit = self.ceph0_sentry - action_id = u.run_action(sentry_unit, 'set-noout') - assert u.wait_on_action(action_id), "Set noout action failed." - - output, code = sentry_unit.run(cmd) - if 'noout' not in output: - amulet.raise_status(amulet.FAIL, msg="Missing noout") - - u.log.debug("Testing unset noout") - action_id = u.run_action(sentry_unit, 'unset-noout') - assert u.wait_on_action(action_id), "Unset noout action failed." - - output, code = sentry_unit.run(cmd) - if 'noout' in output: - amulet.raise_status(amulet.FAIL, msg="Still has noout") - - def test_410_ceph_cinder_vol_create(self): - """Create and confirm a ceph-backed cinder volume, and inspect - ceph cinder pool object count as the volume is created - and deleted.""" - sentry_unit = self.ceph0_sentry - obj_count_samples = [] - pool_size_samples = [] - pools = u.get_ceph_pools(self.ceph0_sentry) - cinder_pool = pools['cinder-ceph'] - - # Check ceph cinder pool object count, disk space usage and pool name - u.log.debug('Checking ceph cinder pool original samples...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - cinder_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - expected = 'cinder-ceph' - if pool_name != expected: - msg = ('Ceph pool {} unexpected name (actual, expected): ' - '{}. {}'.format(cinder_pool, pool_name, expected)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create ceph-backed cinder volume - cinder_vol = u.create_cinder_volume(self.cinder) - - # Re-check ceph cinder pool object count and disk usage - time.sleep(10) - u.log.debug('Checking ceph cinder pool samples after volume create...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - cinder_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - # Delete ceph-backed cinder volume - u.delete_resource(self.cinder.volumes, cinder_vol, msg="cinder volume") - - # Final check, ceph cinder pool object count and disk usage - time.sleep(10) - u.log.debug('Checking ceph cinder pool after volume delete...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - cinder_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - # Validate ceph cinder pool object count samples over time - ret = u.validate_ceph_pool_samples(obj_count_samples, - "cinder pool object count") - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - # Luminous (pike) ceph seems more efficient at disk usage so we cannot - # grantee the ordering of kb_used - if self._get_openstack_release() < self.xenial_pike: - # Validate ceph cinder pool disk space usage samples over time - ret = u.validate_ceph_pool_samples(pool_size_samples, - "cinder pool disk usage") - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_412_ceph_glance_image_create_delete(self): - """Create and confirm a ceph-backed glance image, and inspect - ceph glance pool object count as the image is created - and deleted.""" - sentry_unit = self.ceph0_sentry - obj_count_samples = [] - pool_size_samples = [] - pools = u.get_ceph_pools(self.ceph0_sentry) - glance_pool = pools['glance'] - - # Check ceph glance pool object count, disk space usage and pool name - u.log.debug('Checking ceph glance pool original samples...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - glance_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - expected = 'glance' - if pool_name != expected: - msg = ('Ceph glance pool {} unexpected name (actual, ' - 'expected): {}. {}'.format(glance_pool, - pool_name, expected)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create ceph-backed glance image - glance_img = u.create_cirros_image(self.glance, "cirros-image-1") - - # Re-check ceph glance pool object count and disk usage - time.sleep(10) - u.log.debug('Checking ceph glance pool samples after image create...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - glance_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - # Delete ceph-backed glance image - u.delete_resource(self.glance.images, - glance_img.id, msg="glance image") - - # Final check, ceph glance pool object count and disk usage - time.sleep(10) - u.log.debug('Checking ceph glance pool samples after image delete...') - pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit, - glance_pool) - obj_count_samples.append(obj_count) - pool_size_samples.append(kb_used) - - # Validate ceph glance pool object count samples over time - ret = u.validate_ceph_pool_samples(obj_count_samples, - "glance pool object count") - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - # Validate ceph glance pool disk space usage samples over time - ret = u.validate_ceph_pool_samples(pool_size_samples, - "glance pool disk usage") - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_414_get_health_action(self): - """Verify that getting health works""" - u.log.debug("Testing get-health") - - sentry_unit = self.ceph0_sentry - action_id = u.run_action(sentry_unit, 'get-health') - assert u.wait_on_action(action_id), "HEALTH_OK" - - def test_420_show_disk_free_action(self): - """Verify show-disk-free""" - u.log.debug("Testing show-disk-free") - if self._get_openstack_release() < self.trusty_kilo: - u.log.info( - "show-disk-free only supported in >=kilo, skipping") - return - sentry_unit = self.ceph0_sentry - action_id = u.run_action(sentry_unit, - 'show-disk-free', - params={'format': 'json'}) - assert u.wait_on_action(action_id), "Show-disk-free action failed." - data = amulet.actions.get_action_output(action_id, full_output=True) - assert data.get(u"status") == "completed", "Show-disk-free failed" - message = data.get(u"results").get(u"message") - assert message is not None - jsonout = json.loads(message.strip()) - nodes = jsonout.get(u"nodes") - assert nodes is not None, "Show-disk-free: no 'nodes' elem" - assert len(nodes) > 0, "Show-disk-free action: 0 nodes" - - def test_499_ceph_cmds_exit_zero(self): - """Check basic functionality of ceph cli commands against - all ceph units.""" - sentry_units = [ - self.ceph0_sentry, - self.ceph1_sentry, - self.ceph2_sentry - ] - commands = [ - 'sudo ceph health', - 'sudo ceph mds stat', - 'sudo ceph pg stat', - 'sudo ceph osd stat', - 'sudo ceph mon stat', - ] - ret = u.check_commands_on_units(commands, sentry_units) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - # FYI: No restart check as ceph services do not restart - # when charm config changes, unless monitor count increases. diff --git a/ceph-mon/tests/bundles/bionic-queens.yaml b/ceph-mon/tests/bundles/bionic-queens.yaml new file mode 100644 index 00000000..f8565100 --- /dev/null +++ b/ceph-mon/tests/bundles/bionic-queens.yaml @@ -0,0 +1,90 @@ +series: bionic +applications: + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + series: bionic + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-mon/tests/bundles/bionic-rocky.yaml b/ceph-mon/tests/bundles/bionic-rocky.yaml new file mode 100644 index 00000000..7e165bde --- /dev/null +++ b/ceph-mon/tests/bundles/bionic-rocky.yaml @@ -0,0 +1,104 @@ +series: bionic +applications: + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:bionic-rocky + ceph-mon: + charm: ceph-mon + series: bionic + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:bionic-rocky + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:bionic-rocky + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-rocky + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:bionic-rocky + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-mon/tests/bundles/bionic-stein.yaml b/ceph-mon/tests/bundles/bionic-stein.yaml new file mode 100644 index 00000000..56695dd7 --- /dev/null +++ b/ceph-mon/tests/bundles/bionic-stein.yaml @@ -0,0 +1,104 @@ +series: bionic +applications: + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:bionic-stein + ceph-mon: + charm: ceph-mon + series: bionic + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:bionic-stein + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:bionic-stein + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-stein + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-stein + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-stein + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-stein + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:bionic-stein + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:bionic-stein +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-mon/tests/bundles/cosmic-rocky.yaml b/ceph-mon/tests/bundles/cosmic-rocky.yaml new file mode 100644 index 00000000..5b30768d --- /dev/null +++ b/ceph-mon/tests/bundles/cosmic-rocky.yaml @@ -0,0 +1,90 @@ +series: cosmic +applications: + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: ceph-mon + series: cosmic + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-mon/tests/bundles/disco-stein.yaml b/ceph-mon/tests/bundles/disco-stein.yaml new file mode 100644 index 00000000..bb2fb5a0 --- /dev/null +++ b/ceph-mon/tests/bundles/disco-stein.yaml @@ -0,0 +1,90 @@ +series: disco +applications: + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: ceph-mon + series: disco + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-mon/tests/bundles/trusty-mitaka.yaml b/ceph-mon/tests/bundles/trusty-mitaka.yaml new file mode 100644 index 00000000..058c72b0 --- /dev/null +++ b/ceph-mon/tests/bundles/trusty-mitaka.yaml @@ -0,0 +1,140 @@ +series: trusty +applications: + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + ceph-mon: + charm: ceph-mon + series: trusty + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:trusty-mitaka + # workaround while awaiting release of next version of python-libjuju with + # model-constraints support + constraints: + virt-type=kvm +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-mon/tests/bundles/xenial-mitaka.yaml b/ceph-mon/tests/bundles/xenial-mitaka.yaml new file mode 100644 index 00000000..a9e28cc1 --- /dev/null +++ b/ceph-mon/tests/bundles/xenial-mitaka.yaml @@ -0,0 +1,90 @@ +series: xenial +applications: + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: ceph-mon + series: xenial + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-mon/tests/bundles/xenial-ocata.yaml b/ceph-mon/tests/bundles/xenial-ocata.yaml new file mode 100644 index 00000000..6187a500 --- /dev/null +++ b/ceph-mon/tests/bundles/xenial-ocata.yaml @@ -0,0 +1,104 @@ +series: xenial +applications: + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:xenial-ocata + ceph-mon: + charm: ceph-mon + series: xenial + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:xenial-ocata + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:xenial-ocata + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:xenial-ocata + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:xenial-ocata + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:xenial-ocata + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:xenial-ocata + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:xenial-ocata + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:xenial-ocata +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-mon/tests/bundles/xenial-pike.yaml b/ceph-mon/tests/bundles/xenial-pike.yaml new file mode 100644 index 00000000..8d31f272 --- /dev/null +++ b/ceph-mon/tests/bundles/xenial-pike.yaml @@ -0,0 +1,104 @@ +series: xenial +applications: + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:xenial-pike + ceph-mon: + charm: ceph-mon + series: xenial + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:xenial-pike + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:xenial-pike + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:xenial-pike + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:xenial-pike + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:xenial-pike + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:xenial-pike + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:xenial-pike + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:xenial-pike +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-mon/tests/bundles/xenial-queens.yaml b/ceph-mon/tests/bundles/xenial-queens.yaml new file mode 100644 index 00000000..ab12215e --- /dev/null +++ b/ceph-mon/tests/bundles/xenial-queens.yaml @@ -0,0 +1,104 @@ +series: xenial +applications: + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:xenial-queens + ceph-mon: + charm: ceph-mon + series: xenial + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:xenial-queens + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:xenial-queens + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:xenial-queens + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:xenial-queens + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:xenial-queens + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:xenial-queens + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:xenial-queens + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:xenial-queens +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-mon/tests/dev-basic-cosmic-rocky b/ceph-mon/tests/dev-basic-cosmic-rocky deleted file mode 100755 index 933fb0db..00000000 --- a/ceph-mon/tests/dev-basic-cosmic-rocky +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on cosmic-rocky.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='cosmic') - deployment.run_tests() diff --git a/ceph-mon/tests/dev-basic-disco-stein b/ceph-mon/tests/dev-basic-disco-stein deleted file mode 100755 index 27d2c980..00000000 --- a/ceph-mon/tests/dev-basic-disco-stein +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on disco-stein.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='disco') - deployment.run_tests() diff --git a/ceph-mon/tests/gate-basic-bionic-queens b/ceph-mon/tests/gate-basic-bionic-queens deleted file mode 100755 index e531990d..00000000 --- a/ceph-mon/tests/gate-basic-bionic-queens +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on bionic-queens.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='bionic') - deployment.run_tests() diff --git a/ceph-mon/tests/gate-basic-bionic-rocky b/ceph-mon/tests/gate-basic-bionic-rocky deleted file mode 100755 index 0bf3df92..00000000 --- a/ceph-mon/tests/gate-basic-bionic-rocky +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on bionic-rocky.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='bionic', - openstack='cloud:bionic-rocky', - source='cloud:bionic-updates/rocky') - deployment.run_tests() diff --git a/ceph-mon/tests/gate-basic-bionic-stein b/ceph-mon/tests/gate-basic-bionic-stein deleted file mode 100755 index ab8f6f20..00000000 --- a/ceph-mon/tests/gate-basic-bionic-stein +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on bionic-stein.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='bionic', - openstack='cloud:bionic-stein', - source='cloud:bionic-stein') - deployment.run_tests() diff --git a/ceph-mon/tests/gate-basic-trusty-mitaka b/ceph-mon/tests/gate-basic-trusty-mitaka deleted file mode 100755 index 52b688fa..00000000 --- a/ceph-mon/tests/gate-basic-trusty-mitaka +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on trusty-mitaka.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='trusty', - openstack='cloud:trusty-mitaka', - source='cloud:trusty-updates/mitaka') - deployment.run_tests() diff --git a/ceph-mon/tests/gate-basic-xenial-mitaka b/ceph-mon/tests/gate-basic-xenial-mitaka deleted file mode 100755 index f8977000..00000000 --- a/ceph-mon/tests/gate-basic-xenial-mitaka +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on xenial-mitaka.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='xenial') - deployment.run_tests() diff --git a/ceph-mon/tests/gate-basic-xenial-ocata b/ceph-mon/tests/gate-basic-xenial-ocata deleted file mode 100755 index ec2713ce..00000000 --- a/ceph-mon/tests/gate-basic-xenial-ocata +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on xenial-ocata.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='xenial', - openstack='cloud:xenial-ocata', - source='cloud:xenial-updates/ocata') - deployment.run_tests() diff --git a/ceph-mon/tests/gate-basic-xenial-pike b/ceph-mon/tests/gate-basic-xenial-pike deleted file mode 100755 index 2fafe3b0..00000000 --- a/ceph-mon/tests/gate-basic-xenial-pike +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on xenial-pike.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='xenial', - openstack='cloud:xenial-pike', - source='cloud:xenial-updates/pike') - deployment.run_tests() diff --git a/ceph-mon/tests/gate-basic-xenial-queens b/ceph-mon/tests/gate-basic-xenial-queens deleted file mode 100755 index 5fa16a57..00000000 --- a/ceph-mon/tests/gate-basic-xenial-queens +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on xenial-queens.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='xenial', - openstack='cloud:xenial-queens', - source='cloud:xenial-updates/queens') - deployment.run_tests() diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index a03e7bad..b504616f 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,18 +1,22 @@ -# Bootstrap the model if necessary. -bootstrap: True -# Re-use bootstrap node. -reset: True -# Use tox/requirements to drive the venv instead of bundletester's venv feature. -virtualenv: False -# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet. -makefile: [] -# Do not specify juju PPA sources. Juju is presumed to be pre-installed -# and configured in all test runner environments. -#sources: -# Do not specify or rely on system packages. -#packages: -# Do not specify python packages here. Use test-requirements.txt -# and tox instead. ie. The venv is constructed before bundletester -# is invoked. -#python-packages: -reset_timeout: 600 +charm_name: ceph-mon +gate_bundles: + - bionic-stein + - bionic-rocky + - bionic-queens + - xenial-queens + - xenial-pike + - xenial-ocata + - xenial-mitaka + - trusty-mitaka +smoke_bundles: + - bionic-queens +dev_bundles: + - cosmic-rocky + - disco-stein +configure: + - zaza.openstack.charm_tests.glance.setup.add_lts_image +tests: + - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest + - zaza.openstack.charm_tests.ceph.tests.CephRelationTest + - zaza.openstack.charm_tests.ceph.tests.CephTest + - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 795d90ba..3874fee6 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -15,6 +15,7 @@ install_command = commands = stestr run {posargs} whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* +deps = -r{toxinidir}/test-requirements.txt [testenv:py27] basepython = python2.7 @@ -78,49 +79,21 @@ omit = basepython = python3 commands = {posargs} -[testenv:func27-noop] -# DRY RUN - For Debug -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy -[testenv:func27] -# Charm Functional Test -# Run all gate tests which are +x (expected to always pass) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy - -[testenv:func27-smoke] -# Charm Functional Test -# Run a specific test as an Amulet smoke test (expected to always pass) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy + functest-run-suite --keep-model -[testenv:func27-dfs] -# Charm Functional Test -# Run all deploy-from-source tests which are +x (may not always pass!) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func-smoke] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy + functest-run-suite --keep-model --smoke -[testenv:func27-dev] -# Charm Functional Test -# Run all development test targets which are +x (may not always pass!) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func-dev] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy + functest-run-suite --keep-model --dev [flake8] ignore = E402,E226 From 5727552b7d8dc64a429527f8a6986044b65cff71 Mon Sep 17 00:00:00 2001 From: Zachary Zehring Date: Mon, 20 May 2019 15:24:44 -0400 Subject: [PATCH 1768/2699] Notify ceph-mon relations on version upgrade. This notification is added to allow ceph-mon to know when ceph-osd units have upgraded in order to run post-upgrade commands (required for certain upgrades). - Add notify_mon_of_upgrade function that sets ceph_release to release version. - Refactor some hard coded strings to vars - Set relation ceph_release when setting bootstrapped-osd Change-Id: I708b33cafc8818af7e3c63a7fa80c02978e66f65 Partial-Bug: #1828630 --- ceph-osd/hooks/ceph_hooks.py | 14 ++++++++- ceph-osd/unit_tests/test_upgrade.py | 44 +++++++++++++++++++++++++---- 2 files changed, 52 insertions(+), 6 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 2e51f0ab..38d78d0d 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -153,6 +153,7 @@ def check_for_upgrade(): ceph.roll_osd_cluster(new_version=new_version, upgrade_key='osd-upgrade') emit_cephconf(upgrading=False) + notify_mon_of_upgrade(new_version) elif (old_version == new_version and old_version_os < new_version_os): # See LP: #1778823 @@ -170,6 +171,14 @@ def check_for_upgrade(): level=ERROR) +def notify_mon_of_upgrade(release): + for relation_id in relation_ids('mon'): + log('Notifying relation {} of upgrade to {}'.format( + relation_id, release)) + relation_set(relation_id=relation_id, + relation_settings=dict(ceph_release=release)) + + def tune_network_adapters(): interfaces = netifaces.interfaces() for interface in interfaces: @@ -540,7 +549,10 @@ def prepare_disks_and_activate(): relation_set( relation_id=r_id, relation_settings={ - 'bootstrapped-osds': len(db.get('osd-devices', [])) + 'bootstrapped-osds': len(db.get('osd-devices', [])), + 'ceph_release': ceph.resolve_ceph_version( + hookenv.config('source') or 'distro' + ) } ) diff --git a/ceph-osd/unit_tests/test_upgrade.py b/ceph-osd/unit_tests/test_upgrade.py index fa47ff71..efc2800e 100644 --- a/ceph-osd/unit_tests/test_upgrade.py +++ b/ceph-osd/unit_tests/test_upgrade.py @@ -1,6 +1,6 @@ from mock import call, patch from test_utils import CharmTestCase -from ceph_hooks import check_for_upgrade +from ceph_hooks import check_for_upgrade, notify_mon_of_upgrade __author__ = 'Chris Holcombe ' @@ -8,6 +8,7 @@ class UpgradeRollingTestCase(CharmTestCase): + @patch('ceph_hooks.notify_mon_of_upgrade') @patch('ceph_hooks.ceph.dirs_need_ownership_update') @patch('ceph_hooks.os.path.exists') @patch('ceph_hooks.ceph.resolve_ceph_version') @@ -16,10 +17,13 @@ class UpgradeRollingTestCase(CharmTestCase): @patch('ceph_hooks.ceph.roll_osd_cluster') def test_check_for_upgrade(self, roll_osd_cluster, hookenv, emit_cephconf, version, exists, - dirs_need_ownership_update): + dirs_need_ownership_update, + notify_mon_of_upgrade): dirs_need_ownership_update.return_value = False exists.return_value = True - version.side_effect = ['firefly', 'hammer'] + version_pre = 'firefly' + version_post = 'hammer' + version.side_effect = [version_pre, version_post] self.test_config.set_previous('source', "cloud:trusty-juno") self.test_config.set('source', 'cloud:trusty-kilo') @@ -34,7 +38,9 @@ def test_check_for_upgrade(self, roll_osd_cluster, hookenv, call(upgrading=False)]) exists.assert_called_with( "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring") + notify_mon_of_upgrade.assert_called_once_with(version_post) + @patch('ceph_hooks.notify_mon_of_upgrade') @patch('ceph_hooks.ceph.dirs_need_ownership_update') @patch('ceph_hooks.os.path.exists') @patch('ceph_hooks.ceph.resolve_ceph_version') @@ -44,10 +50,12 @@ def test_check_for_upgrade(self, roll_osd_cluster, hookenv, def test_resume_failed_upgrade(self, roll_osd_cluster, hookenv, emit_cephconf, version, exists, - dirs_need_ownership_update): + dirs_need_ownership_update, + notify_mon_of_upgrade): dirs_need_ownership_update.return_value = True exists.return_value = True - version.side_effect = ['jewel', 'jewel'] + version_pre_and_post = 'jewel' + version.side_effect = [version_pre_and_post, version_pre_and_post] check_for_upgrade() @@ -57,6 +65,7 @@ def test_resume_failed_upgrade(self, roll_osd_cluster, call(upgrading=False)]) exists.assert_called_with( "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring") + notify_mon_of_upgrade.assert_called_once_with(version_pre_and_post) @patch('ceph_hooks.os.path.exists') @patch('ceph_hooks.ceph.resolve_ceph_version') @@ -122,3 +131,28 @@ def test_check_for_upgrade_from_rocky_to_stein(self, roll_monitor_cluster, check_for_upgrade() roll_monitor_cluster.assert_not_called() add_source.assert_called_with('cloud:bionic-stein', 'some-key') + + +class UpgradeUtilTestCase(CharmTestCase): + @patch('ceph_hooks.relation_ids') + @patch('ceph_hooks.log') + @patch('ceph_hooks.relation_set') + def test_notify_mon_of_upgrade(self, relation_set, log, relation_ids): + relation_ids_to_check = ['1', '2', '3'] + relation_ids.return_value = relation_ids_to_check + release = 'luminous' + + notify_mon_of_upgrade(release) + + self.assertEqual(log.call_count, len(relation_ids_to_check)) + relation_ids.assert_called_once_with('mon') + set_dict = dict(ceph_release=release) + relation_set_calls = [ + call(relation_id=relation_ids_to_check[0], + relation_settings=set_dict), + call(relation_id=relation_ids_to_check[1], + relation_settings=set_dict), + call(relation_id=relation_ids_to_check[2], + relation_settings=set_dict), + ] + relation_set.assert_has_calls(relation_set_calls) From f15d67c7ccfb906479770fd9f30f11745ef1a83d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 22 May 2019 12:24:16 +0200 Subject: [PATCH 1769/2699] Retarget Zaza to master now that split has landed This change is required as Zaza underwent a split of the openstack tests from the Zaza project, so we had to track the working branch until that change landed. As it has landed, it is now time to use Zaza from master again Change-Id: I524cdb7fc1fc8a39c7f945e2f627d2d784e88aee --- ceph-osd/test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index cb80d6ed..d76fb046 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -20,5 +20,5 @@ pika>=0.10.0,<1.0 distro-info pytz pyudev # for ceph-* charm unit tests (not mocked?) -git+https://github.com/openstack-charmers/zaza.git@remove-namespaced-tests#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack \ No newline at end of file From e7ec6047c6dc925e1d3e588b173ca76f06e03e5a Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 22 May 2019 12:24:29 +0200 Subject: [PATCH 1770/2699] Retarget Zaza to master now that split has landed This change is required as Zaza underwent a split of the openstack tests from the Zaza project, so we had to track the working branch until that change landed. As it has landed, it is now time to use Zaza from master again Change-Id: I3f4b9ba3177a1262468d4222a9f9c4f3df078398 --- ceph-radosgw/test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 434ccc66..6d547d81 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -20,5 +20,5 @@ pika>=0.10.0,<1.0 distro-info pytz pyudev # for ceph-* charm unit tests (not mocked?) -git+https://github.com/openstack-charmers/zaza.git@remove-namespaced-tests#egg=zaza;python_version>'3.4' +git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>'3.4' git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack;python_version>'3.4' \ No newline at end of file From 9e5366597d8626f57e1e337c8e332ddbed2544f4 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 22 May 2019 12:24:41 +0200 Subject: [PATCH 1771/2699] Retarget Zaza to master now that split has landed This change is required as Zaza underwent a split of the openstack tests from the Zaza project, so we had to track the working branch until that change landed. As it has landed, it is now time to use Zaza from master again Change-Id: Icc0e4143361de0c6694ed5fd2e1c21f80f460f84 --- ceph-rbd-mirror/src/test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/src/test-requirements.txt b/ceph-rbd-mirror/src/test-requirements.txt index f828a5ae..f4a766df 100644 --- a/ceph-rbd-mirror/src/test-requirements.txt +++ b/ceph-rbd-mirror/src/test-requirements.txt @@ -7,5 +7,5 @@ mock>=1.2 flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 requests>=2.18.4 -git+https://github.com/openstack-charmers/zaza.git@remove-namespaced-tests#egg=zaza +git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack \ No newline at end of file From c7f1f7ae383cbf03bb9743ade68c7eb17f69f7de Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 24 May 2019 11:14:13 +0200 Subject: [PATCH 1772/2699] Remove deps installation in py27 job Change-Id: I297f20e7f400fef191c164d3d22be8cfc3e9c514 --- ceph-fs/tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 0667434e..12b4a2e2 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -29,6 +29,7 @@ basepython = python2.7 # Reactive source charms are Python3-only, but a py27 unit test target # is required by OpenStack Governance. Remove this shim as soon as # permitted. http://governance.openstack.org/reference/cti/python_cti.html +deps = whitelist_externals = true commands = true From 10f87109566e1bb1677be15bdd045d0dc00091ff Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 23 Apr 2019 05:17:20 +0000 Subject: [PATCH 1773/2699] Dropping the py35 testing All the integration testing has been moved to Bionic now[1] and py3.5 is not tested runtime for Train or stable/stein[2]. As per below ML thread, we are good to drop the py35 testing now: http://lists.openstack.org/pipermail/openstack-discuss/2019-April/005097.html [1] http://lists.openstack.org/pipermail/openstack-discuss/2019-April/004647.html [2] https://governance.openstack.org/tc/reference/runtimes/stein.html https://governance.openstack.org/tc/reference/runtimes/train.html Change-Id: I940661f000d558539caf9264a9dab1e9f55307f7 --- ceph-fs/.zuul.yaml | 2 +- ceph-fs/tox.ini | 13 ++++--------- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/ceph-fs/.zuul.yaml b/ceph-fs/.zuul.yaml index 387b9444..0faad733 100644 --- a/ceph-fs/.zuul.yaml +++ b/ceph-fs/.zuul.yaml @@ -1,5 +1,5 @@ - project: templates: - python-charm-jobs - - openstack-python35-jobs + - openstack-python36-jobs - openstack-cover-jobs diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 12b4a2e2..9ed983b2 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -3,7 +3,7 @@ # within individual charm repos. [tox] skipsdist = True -envlist = pep8,py34,py35 +envlist = pep8,py36 skip_missing_interpreters = True [testenv] @@ -33,18 +33,13 @@ deps = whitelist_externals = true commands = true -[testenv:py34] -basepython = python3.4 -deps = -r{toxinidir}/test-requirements.txt -commands = stestr run {posargs} - -[testenv:py35] -basepython = python3.5 +[testenv:py36] +basepython = python3.6 deps = -r{toxinidir}/test-requirements.txt commands = stestr run {posargs} [testenv:pep8] -basepython = python3.5 +basepython = python3.6 deps = -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} src unit_tests From de2b0913c94d747a1f3eaa9ffaee1d35dbf971f8 Mon Sep 17 00:00:00 2001 From: Ramon Grullon Date: Tue, 28 May 2019 13:42:30 +0000 Subject: [PATCH 1774/2699] Migrate charm-ceph-proxy testing to Zaza Change-Id: If9e79b22a919997961a23929435450af0883b882 Closes-Bug: #1828424 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/5 --- ceph-proxy/test-requirements.txt | 19 +-- ceph-proxy/tests/basic_deployment.py | 155 ------------------ ceph-proxy/tests/bundles/bionic-queens.yaml | 23 +++ ceph-proxy/tests/bundles/bionic-rocky.yaml | 30 ++++ ceph-proxy/tests/bundles/bionic-stein.yaml | 30 ++++ ceph-proxy/tests/bundles/trusty-icehouse.yaml | 26 +++ ceph-proxy/tests/bundles/trusty-juno.yaml | 30 ++++ ceph-proxy/tests/bundles/trusty-kilo.yaml | 30 ++++ ceph-proxy/tests/bundles/trusty-liberty.yaml | 30 ++++ ceph-proxy/tests/bundles/trusty-mitaka.yaml | 30 ++++ ceph-proxy/tests/bundles/xenial-mitaka.yaml | 26 +++ ceph-proxy/tests/bundles/xenial-newton.yaml | 30 ++++ ceph-proxy/tests/bundles/xenial-ocata.yaml | 30 ++++ ceph-proxy/tests/bundles/xenial-pike.yaml | 30 ++++ ceph-proxy/tests/bundles/xenial-queens.yaml | 30 ++++ ceph-proxy/tests/dev-basic-cosmic-rocky | 9 - ceph-proxy/tests/dev-basic-disco-stein | 9 - ceph-proxy/tests/gate-basic-bionic-queens | 9 - ceph-proxy/tests/gate-basic-bionic-rocky | 11 -- ceph-proxy/tests/gate-basic-bionic-stein | 11 -- ceph-proxy/tests/gate-basic-trusty-mitaka | 11 -- ceph-proxy/tests/gate-basic-xenial-mitaka | 9 - ceph-proxy/tests/gate-basic-xenial-ocata | 11 -- ceph-proxy/tests/gate-basic-xenial-pike | 11 -- ceph-proxy/tests/gate-basic-xenial-queens | 11 -- ceph-proxy/tests/tests.yaml | 56 +++++-- ceph-proxy/tox.ini | 55 ++----- 27 files changed, 429 insertions(+), 333 deletions(-) delete mode 100644 ceph-proxy/tests/basic_deployment.py create mode 100644 ceph-proxy/tests/bundles/bionic-queens.yaml create mode 100644 ceph-proxy/tests/bundles/bionic-rocky.yaml create mode 100644 ceph-proxy/tests/bundles/bionic-stein.yaml create mode 100644 ceph-proxy/tests/bundles/trusty-icehouse.yaml create mode 100644 ceph-proxy/tests/bundles/trusty-juno.yaml create mode 100644 ceph-proxy/tests/bundles/trusty-kilo.yaml create mode 100644 ceph-proxy/tests/bundles/trusty-liberty.yaml create mode 100644 ceph-proxy/tests/bundles/trusty-mitaka.yaml create mode 100644 ceph-proxy/tests/bundles/xenial-mitaka.yaml create mode 100644 ceph-proxy/tests/bundles/xenial-newton.yaml create mode 100644 ceph-proxy/tests/bundles/xenial-ocata.yaml create mode 100644 ceph-proxy/tests/bundles/xenial-pike.yaml create mode 100644 ceph-proxy/tests/bundles/xenial-queens.yaml delete mode 100755 ceph-proxy/tests/dev-basic-cosmic-rocky delete mode 100755 ceph-proxy/tests/dev-basic-disco-stein delete mode 100755 ceph-proxy/tests/gate-basic-bionic-queens delete mode 100755 ceph-proxy/tests/gate-basic-bionic-rocky delete mode 100755 ceph-proxy/tests/gate-basic-bionic-stein delete mode 100755 ceph-proxy/tests/gate-basic-trusty-mitaka delete mode 100755 ceph-proxy/tests/gate-basic-xenial-mitaka delete mode 100755 ceph-proxy/tests/gate-basic-xenial-ocata delete mode 100755 ceph-proxy/tests/gate-basic-xenial-pike delete mode 100755 ceph-proxy/tests/gate-basic-xenial-queens diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 272ce1da..d131a212 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -7,23 +7,8 @@ mock>=1.2 flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 requests>=2.18.4 -# BEGIN: Amulet OpenStack Charm Helper Requirements -# Liberty client lower constraints -amulet>=1.14.3,<2.0;python_version=='2.7' -bundletester>=0.6.1,<1.0;python_version=='2.7' -python-ceilometerclient>=1.5.0 -python-cinderclient>=1.4.0 -python-glanceclient>=1.1.0 -python-heatclient>=0.8.0 -python-keystoneclient>=1.7.1 -python-neutronclient>=3.1.0 -python-novaclient>=2.30.1 -python-openstackclient>=1.7.0 -python-swiftclient>=2.6.0 -pika>=0.10.0,<1.0 -distro-info -git+https://github.com/juju/charm-helpers.git#egg=charmhelpers -# END: Amulet OpenStack Charm Helper Requirements # NOTE: workaround for 14.04 pip/tox pytz pyudev # for ceph-* charm unit tests (not mocked?) +git+https://github.com/openstack-charmers/zaza.git@remove-namespaced-tests#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack;python_version>='3.0' diff --git a/ceph-proxy/tests/basic_deployment.py b/ceph-proxy/tests/basic_deployment.py deleted file mode 100644 index ec345327..00000000 --- a/ceph-proxy/tests/basic_deployment.py +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/env python - -import amulet - -from charmhelpers.contrib.openstack.amulet.deployment import ( - OpenStackAmuletDeployment -) -from charmhelpers.contrib.openstack.amulet.utils import ( # noqa - OpenStackAmuletUtils, - DEBUG, - # ERROR - ) - -# Use DEBUG to turn on debug logging -u = OpenStackAmuletUtils(DEBUG) - - -class CephBasicDeployment(OpenStackAmuletDeployment): - """Amulet tests on a basic ceph deployment.""" - - def __init__(self, series=None, openstack=None, source=None, stable=False): - """Deploy the entire test environment.""" - super(CephBasicDeployment, self).__init__(series, openstack, source, - stable) - self._add_services() - self._add_relations() - self._configure_services() - self._deploy() - - u.log.info('Waiting on extended status checks...') - exclude_services = ['ceph-proxy', 'ceph-radosgw'] - - # Wait for deployment ready msgs, except exclusions - self._auto_wait_for_status(exclude_services=exclude_services) - - self._configure_proxy() - self.d.sentry.wait() - self._initialize_tests() - self._auto_wait_for_status() - - def _add_services(self): - """Add services - - Add the services that we're testing, where ceph is local, - and the rest of the service are from lp branches that are - compatible with the local charm (e.g. stable or next). - """ - this_service = {'name': 'ceph-proxy'} - other_services = [{'name': 'ceph-mon', 'units': 3}, - {'name': 'ceph-osd', 'units': 3, - 'storage': {'osd-devices': 'cinder,10G'}}, - {'name': 'ceph-radosgw'}] - super(CephBasicDeployment, self)._add_services(this_service, - other_services) - - def _add_relations(self): - """Add all of the relations for the services.""" - relations = { - 'ceph-osd:mon': 'ceph-mon:osd', - 'ceph-radosgw:mon': 'ceph-proxy:radosgw', - } - super(CephBasicDeployment, self)._add_relations(relations) - - def _configure_services(self): - ceph_config = { - 'monitor-count': '3', - 'auth-supported': 'none', - } - - # Include a non-existent device as osd-devices is a whitelist, - # and this will catch cases where proposals attempt to change that. - ceph_osd_config = { - 'osd-devices': '/srv/ceph /dev/test-non-existent' - } - - proxy_config = { - 'source': self.source - } - configs = {'ceph-mon': ceph_config, - 'ceph-osd': ceph_osd_config, - 'ceph-proxy': proxy_config} - super(CephBasicDeployment, self)._configure_services(configs) - - def _configure_proxy(self): - """Setup CephProxy with Ceph configuration - from running Ceph cluster - """ - mon_key = u.file_contents_safe( - self.d.sentry['ceph-mon'][0], - '/etc/ceph/ceph.client.admin.keyring' - ).split(' = ')[-1].rstrip() - - ceph_ips = [] - for x in self.d.sentry['ceph-mon']: - output, code = x.run("unit-get private-address") - ceph_ips.append(output + ':6789') - - proxy_config = { - 'auth-supported': 'none', - 'admin-key': mon_key, - 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', - 'monitor-hosts': ' '.join(ceph_ips) - } - u.log.debug('Config: {}'.format(proxy_config)) - self.d.configure('ceph-proxy', proxy_config) - - def _initialize_tests(self): - """Perform final initialization before tests get run.""" - # Access the sentries for inspecting service units - self.ceph_osd_sentry = self.d.sentry['ceph-osd'][0] - self.ceph0_sentry = self.d.sentry['ceph-mon'][0] - self.radosgw_sentry = self.d.sentry['ceph-radosgw'][0] - self.proxy_sentry = self.d.sentry['ceph-proxy'][0] - - u.log.debug('openstack release val: {}'.format( - self._get_openstack_release())) - u.log.debug('openstack release str: {}'.format( - self._get_openstack_release_string())) - - def test_100_ceph_processes(self): - """Verify that the expected service processes are running - on each ceph unit.""" - - # Process name and quantity of processes to expect on each unit - ceph_processes = { - 'ceph-mon': 1, - } - - # Units with process names and PID quantities expected - expected_processes = { - self.ceph0_sentry: ceph_processes - } - - actual_pids = u.get_unit_process_ids(expected_processes) - ret = u.validate_unit_process_ids(expected_processes, actual_pids) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_499_ceph_cmds_exit_zero(self): - """Check basic functionality of ceph cli commands against - ceph proxy units.""" - sentry_units = [ - self.proxy_sentry, - self.ceph0_sentry - ] - commands = [ - 'sudo ceph health', - 'sudo ceph mds stat', - 'sudo ceph pg stat', - 'sudo ceph osd stat', - 'sudo ceph mon stat', - ] - ret = u.check_commands_on_units(commands, sentry_units) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) diff --git a/ceph-proxy/tests/bundles/bionic-queens.yaml b/ceph-proxy/tests/bundles/bionic-queens.yaml new file mode 100644 index 00000000..2aba41b4 --- /dev/null +++ b/ceph-proxy/tests/bundles/bionic-queens.yaml @@ -0,0 +1,23 @@ +series: bionic +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/bionic-rocky.yaml b/ceph-proxy/tests/bundles/bionic-rocky.yaml new file mode 100644 index 00000000..9d985312 --- /dev/null +++ b/ceph-proxy/tests/bundles/bionic-rocky.yaml @@ -0,0 +1,30 @@ +series: bionic +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-rocky + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + options: + source: cloud:bionic-rocky + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + options: + source: cloud:bionic-rocky + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 + options: + source: cloud:bionic-rocky +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/bionic-stein.yaml b/ceph-proxy/tests/bundles/bionic-stein.yaml new file mode 100644 index 00000000..a5bdebd5 --- /dev/null +++ b/ceph-proxy/tests/bundles/bionic-stein.yaml @@ -0,0 +1,30 @@ +series: bionic +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-stein + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + options: + source: cloud:bionic-stein + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + options: + source: cloud:bionic-stein + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 + options: + source: cloud:bionic-stein +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/trusty-icehouse.yaml b/ceph-proxy/tests/bundles/trusty-icehouse.yaml new file mode 100644 index 00000000..c9c5b8da --- /dev/null +++ b/ceph-proxy/tests/bundles/trusty-icehouse.yaml @@ -0,0 +1,26 @@ +series: trusty +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + options: + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + options: + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 + options: +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/trusty-juno.yaml b/ceph-proxy/tests/bundles/trusty-juno.yaml new file mode 100644 index 00000000..7c85e398 --- /dev/null +++ b/ceph-proxy/tests/bundles/trusty-juno.yaml @@ -0,0 +1,30 @@ +series: trusty +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:trusty-juno + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + options: + source: cloud:trusty-juno + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + options: + source: cloud:trusty-juno + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 + options: + source: cloud:trusty-juno +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/trusty-kilo.yaml b/ceph-proxy/tests/bundles/trusty-kilo.yaml new file mode 100644 index 00000000..c9b18bc0 --- /dev/null +++ b/ceph-proxy/tests/bundles/trusty-kilo.yaml @@ -0,0 +1,30 @@ +series: trusty +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:trusty-kilo + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + options: + source: cloud:trusty-kilo + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + options: + source: cloud:trusty-kilo + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 + options: + source: cloud:trusty-kilo +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/trusty-liberty.yaml b/ceph-proxy/tests/bundles/trusty-liberty.yaml new file mode 100644 index 00000000..d1835814 --- /dev/null +++ b/ceph-proxy/tests/bundles/trusty-liberty.yaml @@ -0,0 +1,30 @@ +series: trusty +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:trusty-liberty + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + options: + source: cloud:trusty-liberty + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + options: + source: cloud:trusty-liberty + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 + options: + source: cloud:trusty-liberty +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/trusty-mitaka.yaml b/ceph-proxy/tests/bundles/trusty-mitaka.yaml new file mode 100644 index 00000000..32861944 --- /dev/null +++ b/ceph-proxy/tests/bundles/trusty-mitaka.yaml @@ -0,0 +1,30 @@ +series: trusty +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:trusty-mitaka + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + options: + source: cloud:trusty-mitaka + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + options: + source: cloud:trusty-mitaka + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 + options: + source: trusty-mitaka +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/xenial-mitaka.yaml b/ceph-proxy/tests/bundles/xenial-mitaka.yaml new file mode 100644 index 00000000..48ba327c --- /dev/null +++ b/ceph-proxy/tests/bundles/xenial-mitaka.yaml @@ -0,0 +1,26 @@ +series: xenial +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + options: + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + options: + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 + options: +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/xenial-newton.yaml b/ceph-proxy/tests/bundles/xenial-newton.yaml new file mode 100644 index 00000000..a58bae67 --- /dev/null +++ b/ceph-proxy/tests/bundles/xenial-newton.yaml @@ -0,0 +1,30 @@ +series: xenial +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:xenial-newton + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + options: + source: cloud:xenial-newton + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + options: + source: cloud:xenial-newton + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 + options: + source: cloud:xenial-newton +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/xenial-ocata.yaml b/ceph-proxy/tests/bundles/xenial-ocata.yaml new file mode 100644 index 00000000..351bc5dd --- /dev/null +++ b/ceph-proxy/tests/bundles/xenial-ocata.yaml @@ -0,0 +1,30 @@ +series: xenial +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:xenial-ocata + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + options: + source: cloud:xenial-ocata + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + options: + source: cloud:xenial-ocata + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 + options: + source: xenial-ocata +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/xenial-pike.yaml b/ceph-proxy/tests/bundles/xenial-pike.yaml new file mode 100644 index 00000000..3745ea1f --- /dev/null +++ b/ceph-proxy/tests/bundles/xenial-pike.yaml @@ -0,0 +1,30 @@ +series: bionic +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:xenial-pike + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + options: + source: cloud:xenial-pike + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + options: + source: cloud:xenial-pike + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 + options: + source: xenial-pike +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/xenial-queens.yaml b/ceph-proxy/tests/bundles/xenial-queens.yaml new file mode 100644 index 00000000..c7aab9de --- /dev/null +++ b/ceph-proxy/tests/bundles/xenial-queens.yaml @@ -0,0 +1,30 @@ +series: xenial +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:xenial-queens + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + options: + source: cloud:xenial-queens + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + options: + source: cloud:xenial-queens + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 + options: + source: xenial-queens +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/dev-basic-cosmic-rocky b/ceph-proxy/tests/dev-basic-cosmic-rocky deleted file mode 100755 index 2d96d7c7..00000000 --- a/ceph-proxy/tests/dev-basic-cosmic-rocky +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on cosmic-rocky.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='cosmic') - deployment.run_tests() diff --git a/ceph-proxy/tests/dev-basic-disco-stein b/ceph-proxy/tests/dev-basic-disco-stein deleted file mode 100755 index dcbc1be4..00000000 --- a/ceph-proxy/tests/dev-basic-disco-stein +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on disco-stein.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='disco') - deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-bionic-queens b/ceph-proxy/tests/gate-basic-bionic-queens deleted file mode 100755 index 7179b93b..00000000 --- a/ceph-proxy/tests/gate-basic-bionic-queens +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on bionic-queens.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='bionic') - deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-bionic-rocky b/ceph-proxy/tests/gate-basic-bionic-rocky deleted file mode 100755 index 97c060db..00000000 --- a/ceph-proxy/tests/gate-basic-bionic-rocky +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on bionic-rocky.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='bionic', - openstack='cloud:bionic-rocky', - source='cloud:bionic-updates/rocky') - deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-bionic-stein b/ceph-proxy/tests/gate-basic-bionic-stein deleted file mode 100755 index 76356954..00000000 --- a/ceph-proxy/tests/gate-basic-bionic-stein +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on bionic-stein.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='bionic', - openstack='cloud:bionic-stein', - source='cloud:bionic-stein') - deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-trusty-mitaka b/ceph-proxy/tests/gate-basic-trusty-mitaka deleted file mode 100755 index 6157b03c..00000000 --- a/ceph-proxy/tests/gate-basic-trusty-mitaka +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on trusty-mitaka.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='trusty', - openstack='cloud:trusty-mitaka', - source='cloud:trusty-updates/mitaka') - deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-xenial-mitaka b/ceph-proxy/tests/gate-basic-xenial-mitaka deleted file mode 100755 index 603c8c05..00000000 --- a/ceph-proxy/tests/gate-basic-xenial-mitaka +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on xenial-mitaka.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='xenial') - deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-xenial-ocata b/ceph-proxy/tests/gate-basic-xenial-ocata deleted file mode 100755 index 1c3a430f..00000000 --- a/ceph-proxy/tests/gate-basic-xenial-ocata +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on xenial-ocata.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='xenial', - openstack='cloud:xenial-ocata', - source='cloud:xenial-updates/ocata') - deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-xenial-pike b/ceph-proxy/tests/gate-basic-xenial-pike deleted file mode 100755 index 8f4410fd..00000000 --- a/ceph-proxy/tests/gate-basic-xenial-pike +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on xenial-pike.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='xenial', - openstack='cloud:xenial-pike', - source='cloud:xenial-updates/pike') - deployment.run_tests() diff --git a/ceph-proxy/tests/gate-basic-xenial-queens b/ceph-proxy/tests/gate-basic-xenial-queens deleted file mode 100755 index 829ce932..00000000 --- a/ceph-proxy/tests/gate-basic-xenial-queens +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python - -"""Amulet tests on a basic ceph deployment on xenial-queens.""" - -from basic_deployment import CephBasicDeployment - -if __name__ == '__main__': - deployment = CephBasicDeployment(series='xenial', - openstack='cloud:xenial-queens', - source='cloud:xenial-updates/queens') - deployment.run_tests() diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index a03e7bad..0dc7bc83 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -1,18 +1,38 @@ -# Bootstrap the model if necessary. -bootstrap: True -# Re-use bootstrap node. -reset: True -# Use tox/requirements to drive the venv instead of bundletester's venv feature. -virtualenv: False -# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet. -makefile: [] -# Do not specify juju PPA sources. Juju is presumed to be pre-installed -# and configured in all test runner environments. -#sources: -# Do not specify or rely on system packages. -#packages: -# Do not specify python packages here. Use test-requirements.txt -# and tox instead. ie. The venv is constructed before bundletester -# is invoked. -#python-packages: -reset_timeout: 600 +configure: + - zaza.openstack.configure.ceph_proxy.setup_ceph_proxy + +tests: + - zaza.openstack.charm_tests.ceph.tests.CephProxyTest + +gate_bundles: + - trusty-mitaka # jewel + - xenial-mitaka # jewel + - xenial-queens # luminous + - bionic-queens # luminous + - bionic-rocky # mimic +dev_bundles: + # Icehouse + - trusty-icehouse + - trusty-juno + # Hammer + - trusty-kilo + - trusty-liberty + # Jewel + - xenial-newton + - xenial-ocata + # Pike + - xenial-pike + # Mimic + - bionic-stein + +smoke_bundles: + - bionic-queens + +target_deploy_status: + ceph-proxy: + workload-status: blocked + workload-status-message: Ensure FSID and admin-key are set + ceph-radosgw: + workload-status: blocked + workload-status-message: "Missing relations: mon" + diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 3a8edbff..aa3d6d0c 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -15,6 +15,8 @@ install_command = commands = stestr run {posargs} whitelist_externals = juju passenv = HOME TERM AMULET_* CS_API_* +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt [testenv:py27] basepython = python2.7 @@ -43,50 +45,21 @@ commands = flake8 {posargs} hooks unit_tests tests actions lib basepython = python3 commands = {posargs} -[testenv:func27-noop] -# DRY RUN - For Debug -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy - -[testenv:func27] -# Charm Functional Test -# Run all gate tests which are +x (expected to always pass) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy +[flake8] +ignore = E402,E226 +exclude = */charmhelpers -[testenv:func27-smoke] -# Charm Functional Test -# Run a specific test as an Amulet smoke test (expected to always pass) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy + functest-run-suite --keep-model -[testenv:func27-dfs] -# Charm Functional Test -# Run all deploy-from-source tests which are +x (may not always pass!) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func-smoke] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy + functest-run-suite --keep-model --smoke -[testenv:func27-dev] -# Charm Functional Test -# Run all development test targets which are +x (may not always pass!) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func-dev] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy - -[flake8] -ignore = E402,E226 -exclude = */charmhelpers + functest-run-suite --keep-model --dev From 28c4744ecc20c56ea326ab5a651dc0374c40556f Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 28 May 2019 15:33:20 +0200 Subject: [PATCH 1775/2699] Ensure that the mon relation completes This change adds the fsid to the mon context to ensure that the charm knows when the relation is complete. Without an explicit key that only comes from the relation, the ceph-radosgw charm will happily go "ready" before that relation has completed, leading to a confusing state where the charm goes "blocked" because the radosgw service is not running. Change-Id: I4bfdabbd36400701debfb7a39a9c40701fc8b5ee --- ceph-radosgw/hooks/ceph_radosgw_context.py | 2 ++ ceph-radosgw/unit_tests/test_ceph_radosgw_context.py | 12 ++++++++++++ 2 files changed, 14 insertions(+) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 1a4cc53b..a29ee770 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -152,6 +152,7 @@ def __call__(self): for rid in relation_ids('mon'): for unit in related_units(rid): + fsid = relation_get('fsid', rid=rid, unit=unit) _auth = relation_get('auth', rid=rid, unit=unit) if _auth: auths.append(_auth) @@ -201,6 +202,7 @@ def __call__(self): # (since it defaults to the port the service runs on, and that is # not available externally). ~tribaal 'unit_public_ip': unit_public_ip(), + 'fsid': fsid, } # NOTE(dosaboy): these sections must correspond to what is supported in diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index b8743e35..f1c80a56 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -320,6 +320,8 @@ def _relation_get(attr, unit, rid): return 'cephx' elif attr == 'rgw.testhost_key': return 'testkey' + elif attr == 'fsid': + return 'testfsid' self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] @@ -338,6 +340,7 @@ def _relation_get(attr, unit, rid): 'client_radosgw_gateway': {'rgw init timeout': 60}, 'ipv6': False, 'rgw_zone': None, + 'fsid': 'testfsid', } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -365,6 +368,8 @@ def _relation_get(attr, unit, rid): return 'cephx' elif attr == 'rgw.testhost_key': return 'testkey' + elif attr == 'fsid': + return 'testfsid' self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] @@ -383,6 +388,7 @@ def _relation_get(attr, unit, rid): 'client_radosgw_gateway': {'rgw init timeout': 60}, 'ipv6': False, 'rgw_zone': None, + 'fsid': 'testfsid', } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -419,6 +425,8 @@ def _relation_get(attr, unit, rid): return auths.pop() elif attr == 'rgw.testhost_key': return 'testkey' + elif attr == 'fsid': + return 'testfsid' self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] @@ -437,6 +445,7 @@ def _relation_get(attr, unit, rid): 'client_radosgw_gateway': {'rgw init timeout': 60}, 'ipv6': False, 'rgw_zone': None, + 'fsid': 'testfsid', } self.assertEqual(expect, mon_ctxt()) @@ -455,6 +464,8 @@ def _relation_get(attr, unit, rid): return auths.pop() elif attr == 'rgw.testhost_key': return 'testkey' + elif attr == 'fsid': + return 'testfsid' self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] @@ -473,6 +484,7 @@ def _relation_get(attr, unit, rid): 'client_radosgw_gateway': {'rgw init timeout': 60}, 'ipv6': False, 'rgw_zone': None, + 'fsid': 'testfsid', } self.assertEqual(expect, mon_ctxt()) From b58f5fe39e965e1a252640fd189079aa3725aa42 Mon Sep 17 00:00:00 2001 From: Zachary Zehring Date: Mon, 20 May 2019 16:33:23 -0400 Subject: [PATCH 1776/2699] Add set require-osd-release command to osd hook. To access all ceph features for a new release, require-osd-release must be set to the current release. Else, features will not be available and ceph health gives a warning on luminous. Here, we check to see if an osd has upgraded its release and notified mon. If so, we run the post-upgrade steps when all osds have reached the new release. The one (and only) step is to set require-osd-release if and only if all osds (and mons) have been upgraded to the same version. Get osd release information from ceph_release key in relation dict. Add call to set require-osd-release to current release. Add execute post-upgrade steps func in osd-relations hook. Add logic for determinig whether to run set require-osd-release command. Add logic for checking if all osds and mons have converged to same release. Create func to grab all unique osd releases on each unit. Change-Id: Ia0bc15b3b6d7e8a21fda8e2343d70d9a0024a767 Closes-Bug: #1828630 --- ceph-mon/hooks/ceph_hooks.py | 7 + ceph-mon/hooks/utils.py | 111 +++++++++++ ceph-mon/unit_tests/test_ceph_utils.py | 246 +++++++++++++++++++++++++ 3 files changed, 364 insertions(+) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index e94df9d6..ccdc3ce3 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -89,6 +89,8 @@ get_public_addr, get_rbd_features, has_rbd_mirrors, + get_ceph_osd_releases, + execute_post_osd_upgrade_steps ) from charmhelpers.contrib.charmsupport import nrpe @@ -586,6 +588,11 @@ def osd_relation(relid=None, unit=None): relation_set(relation_id=relid, relation_settings=data) + if is_leader(): + ceph_osd_releases = get_ceph_osd_releases() + if len(ceph_osd_releases) == 1: + execute_post_osd_upgrade_steps(ceph_osd_releases[0]) + # NOTE: radosgw key provision is gated on presence of OSD # units so ensure that any deferred hooks are processed notify_radosgws() diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 0ac0405f..a35efedb 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -16,6 +16,7 @@ import re import socket import subprocess +import errno from charmhelpers.core.hookenv import ( DEBUG, @@ -26,6 +27,7 @@ network_get_primary_address, related_units, relation_ids, + relation_get, status_set, unit_get, ) @@ -50,6 +52,11 @@ import dns.resolver +class OsdPostUpgradeError(Exception): + """Error class for OSD post-upgrade operations.""" + pass + + def enable_pocket(pocket): apt_sources = "/etc/apt/sources.list" with open(apt_sources, "r") as sources: @@ -216,3 +223,107 @@ def get_rbd_features(): return int(rbd_feature_config) elif has_rbd_mirrors(): return add_rbd_mirror_features(get_default_rbd_features()) + + +def get_ceph_osd_releases(): + ceph_osd_releases = set() + for r_id in relation_ids('osd'): + for unit in related_units(r_id): + ceph_osd_release = relation_get( + attribute='ceph_release', + unit=unit, rid=r_id + ) + if ceph_osd_release is not None: + ceph_osd_releases.add(ceph_osd_release) + return list(ceph_osd_releases) + + +def execute_post_osd_upgrade_steps(ceph_osd_release): + """Executes post-upgrade steps. + + Allows execution of any steps that need to be taken after osd upgrades + have finished (often specified in ceph upgrade docs). + + :param str ceph_osd_release: the new ceph-osd release. + """ + log('Executing post-ceph-osd upgrade commands.') + try: + if (_all_ceph_versions_same() and + not _is_required_osd_release(ceph_osd_release)): + log('Setting require_osd_release to {}.'.format(ceph_osd_release)) + _set_require_osd_release(ceph_osd_release) + except OsdPostUpgradeError as upgrade_error: + msg = 'OSD post-upgrade steps failed: {}'.format( + upgrade_error) + log(message=msg, level='ERROR') + + +def _all_ceph_versions_same(): + """Checks that ceph-mon and ceph-osd have converged to the same version. + + :return boolean: True if all same, false if not or command failed. + """ + try: + versions_command = 'ceph versions' + versions_str = subprocess.check_output( + versions_command.split()).decode('UTF-8') + except subprocess.CalledProcessError as call_error: + if call_error.returncode == errno.EINVAL: + log('Calling "ceph versions" failed. Command requires ' + 'luminous and above.', level='WARNING') + return False + else: + log('Calling "ceph versions" failed.', level='ERROR') + raise OsdPostUpgradeError(call_error) + versions_dict = json.loads(versions_str) + if len(versions_dict['overall']) > 1: + log('All upgrades of mon and osd have not completed.') + return False + if len(versions_dict['osd']) < 1: + log('Monitors have converged but no osd versions found.', + level='WARNING') + return False + return True + + +def _is_required_osd_release(release): + """Checks to see if require_osd_release is set to input release. + + Runs and parses the ceph osd dump command to determine if + require_osd_release is set to the input release. If so, return + True. Else, return False. + + :param str release: the release to check against + :return bool: True if releases match, else False. + :raises: OsdPostUpgradeError + """ + try: + dump_command = 'ceph osd dump -f json' + osd_dump_str = subprocess.check_output( + dump_command.split()).decode('UTF-8') + osd_dump_dict = json.loads(osd_dump_str) + except subprocess.CalledProcessError as cmd_error: + log(message='Command {} failed.'.format(cmd_error.cmd), + level='ERROR') + raise OsdPostUpgradeError(cmd_error) + except json.JSONDecodeError as decode_error: + log(message='Failed to decode JSON.', + level='ERROR') + raise OsdPostUpgradeError(decode_error) + return osd_dump_dict.get('require_osd_release') == release + + +def _set_require_osd_release(release): + """Attempts to set the required_osd_release osd config option. + + :param str release: The release to set option to + :raises: OsdPostUpgradeError + """ + try: + command = 'ceph osd require-osd-release {} ' \ + '--yes-i-really-mean-it'.format(release) + subprocess.check_call(command.split()) + except subprocess.CalledProcessError as call_error: + msg = 'Unable to execute command <{}>'.format(call_error.cmd) + log(message=msg, level='ERROR') + raise OsdPostUpgradeError(call_error) diff --git a/ceph-mon/unit_tests/test_ceph_utils.py b/ceph-mon/unit_tests/test_ceph_utils.py index 076429a3..7062091a 100644 --- a/ceph-mon/unit_tests/test_ceph_utils.py +++ b/ceph-mon/unit_tests/test_ceph_utils.py @@ -75,3 +75,249 @@ def test_get_rbd_features(self, _config, _has_rbd_mirrors, self.assertEquals(utils.get_rbd_features(), 125) _has_rbd_mirrors.return_value = False self.assertEquals(utils.get_rbd_features(), None) + + @mock.patch.object(utils, '_is_required_osd_release') + @mock.patch.object(utils, '_all_ceph_versions_same') + @mock.patch.object(utils, '_set_require_osd_release') + @mock.patch.object(utils, 'log') + def test_execute_post_osd_upgrade_steps_executes( + self, log, _set_require_osd_release, + _all_ceph_versions_same, _is_required_osd_release): + release = 'luminous' + + _all_ceph_versions_same.return_value = True + _is_required_osd_release.return_value = False + + utils.execute_post_osd_upgrade_steps(release) + + _set_require_osd_release.assert_called_once_with(release) + + @mock.patch.object(utils, '_is_required_osd_release') + @mock.patch.object(utils, '_all_ceph_versions_same') + @mock.patch.object(utils, '_set_require_osd_release') + @mock.patch.object(utils, 'log') + def test_execute_post_osd_upgrade_steps_no_exec_already_set( + self, log, _set_require_osd_release, + _all_ceph_versions_same, _is_required_osd_release): + release = 'jewel' + + _all_ceph_versions_same.return_value = True + _is_required_osd_release.return_value = True + + utils.execute_post_osd_upgrade_steps(release) + + _set_require_osd_release.assert_not_called() + + @mock.patch.object(utils, '_is_required_osd_release') + @mock.patch.object(utils, '_all_ceph_versions_same') + @mock.patch.object(utils, '_set_require_osd_release') + @mock.patch.object(utils, 'log') + def test_execute_post_osd_upgrade_steps_handle_upgrade_error( + self, log, _set_require_osd_release, + _all_ceph_versions_same, _is_required_osd_release): + release = 'luminous' + + _all_ceph_versions_same.side_effect = utils.OsdPostUpgradeError() + + utils.execute_post_osd_upgrade_steps(release) + + log.assert_called_with(message=mock.ANY, level='ERROR') + + @mock.patch.object(utils.subprocess, 'check_output') + @mock.patch.object(utils.json, 'loads') + @mock.patch.object(utils, 'log') + def test_all_ceph_versions_same_one_overall_one_osd_true( + self, log, json_loads, subprocess_check_output): + mock_versions_dict = dict( + osd=dict(version_1=1), + overall=dict(version_1=2) + ) + json_loads.return_value = mock_versions_dict + + return_bool = utils._all_ceph_versions_same() + + self.assertTrue( + return_bool, + msg='all_ceph_versions_same returned False but should be True') + log.assert_not_called() + + @mock.patch.object(utils.subprocess, 'check_output') + @mock.patch.object(utils.json, 'loads') + @mock.patch.object(utils, 'log') + def test_all_ceph_versions_same_two_overall_returns_false( + self, log, json_loads, subprocess_check_output): + mock_versions_dict = dict( + osd=dict(version_1=1), + overall=dict(version_1=1, version_2=2) + ) + json_loads.return_value = mock_versions_dict + + return_bool = utils._all_ceph_versions_same() + + self.assertFalse( + return_bool, + msg='all_ceph_versions_same returned True but should be False') + log.assert_called_once() + + @mock.patch.object(utils.subprocess, 'check_output') + @mock.patch.object(utils.json, 'loads') + @mock.patch.object(utils, 'log') + def test_all_ceph_versions_same_one_overall_no_osd_returns_false( + self, log, json_loads, subprocess_check_output): + mock_versions_dict = dict( + osd=dict(), + overall=dict(version_1=1) + ) + json_loads.return_value = mock_versions_dict + + return_bool = utils._all_ceph_versions_same() + + self.assertFalse( + return_bool, + msg='all_ceph_versions_same returned True but should be False') + log.assert_called_once() + + @mock.patch.object(utils.subprocess, 'check_output') + @mock.patch.object(utils, 'log') + def test_all_ceph_versions_same_cmd_not_found( + self, log, subprocess_check_output): + call_exception = utils.subprocess.CalledProcessError( + 22, mock.MagicMock() + ) + subprocess_check_output.side_effect = call_exception + + return_bool = utils._all_ceph_versions_same() + + self.assertFalse(return_bool) + + @mock.patch.object(utils.subprocess, 'check_output') + @mock.patch.object(utils, 'log') + def test_all_ceph_versions_same_raise_error_on_unknown_rc( + self, log, subprocess_check_output): + call_exception = utils.subprocess.CalledProcessError( + 0, mock.MagicMock() + ) + subprocess_check_output.side_effect = call_exception + + with self.assertRaises(utils.OsdPostUpgradeError): + utils._all_ceph_versions_same() + + @mock.patch.object(utils.subprocess, 'check_call') + @mock.patch.object(utils, 'log') + def test_set_require_osd_release_success(self, log, check_call): + release = 'luminous' + utils._set_require_osd_release(release) + expected_call = mock.call( + ['ceph', 'osd', 'require-osd-release', release] + ) + check_call.has_calls(expected_call) + + @mock.patch.object(utils.subprocess, 'check_call') + @mock.patch.object(utils, 'log') + def test_set_require_osd_release_raise_call_error(self, log, check_call): + release = 'luminous' + check_call.side_effect = utils.subprocess.CalledProcessError( + 0, mock.mock.MagicMock() + ) + expected_call = mock.call( + ['ceph', 'osd', 'require-osd-release', release] + ) + + with self.assertRaises(utils.OsdPostUpgradeError): + utils._set_require_osd_release(release) + + check_call.has_calls(expected_call) + log.assert_called_once() + + @mock.patch.object(utils, 'relation_ids') + @mock.patch.object(utils, 'related_units') + @mock.patch.object(utils, 'relation_get') + def test_get_ceph_osd_releases_one_release( + self, relation_get, related_units, relation_ids): + r_ids = ['a', 'b', 'c'] + r_units = ['1'] + ceph_release = 'mimic' + + relation_ids.return_value = r_ids + related_units.return_value = r_units + relation_get.return_value = ceph_release + + releases = utils.get_ceph_osd_releases() + + self.assertEqual(len(releases), 1) + self.assertEqual(releases[0], ceph_release) + + @mock.patch.object(utils, 'relation_ids') + @mock.patch.object(utils, 'related_units') + @mock.patch.object(utils, 'relation_get') + def test_get_ceph_osd_releases_two_releases( + self, relation_get, related_units, relation_ids): + r_ids = ['a', 'b'] + r_units = ['1'] + ceph_release_1 = 'luminous' + ceph_release_2 = 'mimic' + + relation_ids.return_value = r_ids + related_units.return_value = r_units + relation_get.side_effect = [ceph_release_1, ceph_release_2] + + releases = utils.get_ceph_osd_releases() + + self.assertEqual(len(releases), 2) + self.assertEqual(releases[0], ceph_release_1) + self.assertEqual(releases[1], ceph_release_2) + + @mock.patch.object(utils.subprocess, 'check_output') + @mock.patch.object(utils.json, 'loads') + def test_is_required_osd_release_not_set_return_false( + self, loads, check_output): + release = 'luminous' + previous_release = 'jewel' + osd_dump_dict = dict(require_osd_release=previous_release) + + loads.return_value = osd_dump_dict + + return_bool = utils._is_required_osd_release(release) + + self.assertFalse(return_bool) + + @mock.patch.object(utils.subprocess, 'check_output') + @mock.patch.object(utils.json, 'loads') + def test_is_required_osd_release_is_set_return_true( + self, loads, check_output): + release = 'luminous' + osd_dump_dict = dict(require_osd_release=release) + + loads.return_value = osd_dump_dict + + return_bool = utils._is_required_osd_release(release) + + self.assertTrue(return_bool) + + @mock.patch.object(utils.subprocess, 'check_output') + @mock.patch.object(utils.json, 'loads') + def test_is_required_osd_release_subprocess_error(self, loads, + check_output): + release = 'luminous' + + call_exception = utils.subprocess.CalledProcessError( + 0, mock.MagicMock() + ) + check_output.side_effect = call_exception + + with self.assertRaises(utils.OsdPostUpgradeError): + utils._is_required_osd_release(release) + + @mock.patch.object(utils.subprocess, 'check_output') + @mock.patch.object(utils.json, 'loads') + def test_is_required_osd_release_json_loads_error(self, loads, + check_output): + release = 'luminous' + + call_exception = utils.json.JSONDecodeError( + '', mock.MagicMock(), 0 + ) + loads.side_effect = call_exception + + with self.assertRaises(utils.OsdPostUpgradeError): + utils._is_required_osd_release(release) From e88eedfd8b72fe54ff2e988473d4671576f5562f Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 31 May 2019 10:50:02 +0100 Subject: [PATCH 1777/2699] Ensure fsid variable always exists for mon context To avoid any occurences of: UnboundLocalError: local variable 'fsid' referenced before assignment ensure that the fsid variable is always defined, but set to None if the mon cluster has not yet presented this data item. Change-Id: I497b2553a2f7743763e2ea040ff9276d97bf5d73 --- ceph-radosgw/hooks/ceph_radosgw_context.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index a29ee770..7e4fed8c 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -149,6 +149,7 @@ def __call__(self): mon_hosts = [] auths = [] + fsid = None for rid in relation_ids('mon'): for unit in related_units(rid): From 745608e43a758b5a4a6d274dcbdefc5768c71b3f Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 30 May 2019 16:48:17 +0100 Subject: [PATCH 1778/2699] Add support for Nautilus release Resync charms.ceph to pickup changes to support new approach to admin and bootstrap key generation with in the ceph-mon cluster. Update get_mon_hosts to avoid specifying the port number for the MON daemons; we use the default so this is not required, and at Nautilus the MON daemons run both v1 and v2 messenger ports. Specifying the port in the ceph.conf file disables the v2 messenger port which is not the desired behaviour on upgrade or new installation. Drop start hook; this has been present in the charm since its inception as 'ceph' but is really not needed - re-deploying the charm to units which previous hosted ceph-mon is dangerous and the ceph-* daemons should still be running anyway. Depends-On: I2dfab7404b72e314625ea554ee64595c5e26f3c6 Change-Id: I340dbf427551e6f9f8cd4ca89128627e497d6097 --- ceph-mon/hooks/ceph_hooks.py | 19 +-- ceph-mon/lib/ceph/utils.py | 166 ++++++++++++++++++------- ceph-mon/unit_tests/test_ceph_hooks.py | 4 +- 3 files changed, 126 insertions(+), 63 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index ccdc3ce3..b889fee2 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -18,7 +18,6 @@ import json import os import subprocess -import socket import sys import uuid @@ -51,7 +50,6 @@ application_version_set) from charmhelpers.core.host import ( service_pause, - service_restart, mkdir, write_file, rsync, @@ -298,7 +296,7 @@ def config_changed(): def get_mon_hosts(): hosts = [] addr = get_public_addr() - hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) + hosts.append(format_ipv6_addr(addr) or addr) rel_ids = relation_ids('mon') if config('no-bootstrap'): @@ -308,8 +306,7 @@ def get_mon_hosts(): for unit in related_units(relid): addr = relation_get('ceph-public-address', unit, relid) if addr is not None: - hosts.append('{}:6789'.format( - format_ipv6_addr(addr) or addr)) + hosts.append(format_ipv6_addr(addr) or addr) return sorted(hosts) @@ -818,18 +815,6 @@ def upgrade_charm(): notify_rbd_mirrors() -@hooks.hook('start') -def start(): - # In case we're being redeployed to the same machines, try - # to make sure everything is running as soon as possible. - if ceph.systemd(): - service_restart('ceph-mon') - else: - service_restart('ceph-mon-all') - if cmp_pkgrevno('ceph', '12.0.0') >= 0: - service_restart('ceph-mgr@{}'.format(socket.gethostname())) - - @hooks.hook('nrpe-external-master-relation-joined') @hooks.hook('nrpe-external-master-relation-changed') def update_nrpe_config(): diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index b4f87907..970b15fe 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -40,6 +40,7 @@ service_start, service_stop, CompareHostReleases, + write_file, ) from charmhelpers.core.hookenv import ( cached, @@ -82,7 +83,7 @@ PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', 'radosgw', 'xfsprogs', - 'lvm2', 'parted'] + 'lvm2', 'parted', 'smartmontools'] CEPH_KEY_MANAGER = 'ceph' VAULT_KEY_MANAGER = 'vault' @@ -950,13 +951,13 @@ def rescan_osd_devices(): udevadm_settle() - +_client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" _upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" def is_bootstrapped(): - return os.path.exists(_bootstrap_keyring) + return os.path.exists(_client_admin_keyring) def wait_for_bootstrap(): @@ -1259,7 +1260,23 @@ def systemd(): return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' +def use_bluestore(): + """Determine whether bluestore should be used for OSD's + + :returns: whether bluestore disk format should be used + :rtype: bool""" + if cmp_pkgrevno('ceph', '12.2.0') < 0: + return False + return config('bluestore') + + def bootstrap_monitor_cluster(secret): + """Bootstrap local ceph mon into the ceph cluster + + :param secret: cephx secret to use for monitor authentication + :type secret: str + :raises: Exception if ceph mon cannot be bootstrapped + """ hostname = socket.gethostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) done = '{}/done'.format(path) @@ -1280,21 +1297,35 @@ def bootstrap_monitor_cluster(secret): perms=0o755) # end changes for Ceph >= 0.61.3 try: - add_keyring_to_ceph(keyring, - secret, - hostname, - path, - done, - init_marker) - + _create_monitor(keyring, + secret, + hostname, + path, + done, + init_marker) + _create_keyrings() except: raise finally: os.unlink(keyring) -@retry_on_exception(3, base_delay=5) -def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): +def _create_monitor(keyring, secret, hostname, path, done, init_marker): + """Create monitor filesystem and enable and start ceph-mon process + + :param keyring: path to temporary keyring on disk + :type keyring: str + :param secret: cephx secret to use for monitor authentication + :type: secret: str + :param hostname: hostname of the local unit + :type hostname: str + :param path: full path to ceph mon directory + :type path: str + :param done: full path to 'done' marker for ceph mon + :type done: str + :param init_marker: full path to 'init' marker for ceph mon + :type init_marker: str + """ subprocess.check_call(['ceph-authtool', keyring, '--create-keyring', '--name=mon.', '--add-key={}'.format(secret), @@ -1310,39 +1341,72 @@ def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): pass if systemd(): - subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) - service_restart('ceph-mon') + if cmp_pkgrevno('ceph', '14.0.0') >= 0: + systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) + else: + systemd_unit = 'ceph-mon' + subprocess.check_call(['systemctl', 'enable', systemd_unit]) + service_restart(systemd_unit) else: service_restart('ceph-mon-all') - # NOTE(jamespage): Later ceph releases require explicit - # call to ceph-create-keys to setup the - # admin keys for the cluster; this command - # will wait for quorum in the cluster before - # returning. - # NOTE(fnordahl): Explicitly run `ceph-crate-keys` for older - # ceph releases too. This improves bootstrap - # resilience as the charm will wait for - # presence of peer units before attempting - # to bootstrap. Note that charms deploying - # ceph-mon service should disable running of - # `ceph-create-keys` service in init system. - cmd = ['ceph-create-keys', '--id', hostname] - if cmp_pkgrevno('ceph', '12.0.0') >= 0: - # NOTE(fnordahl): The default timeout in ceph-create-keys of 600 - # seconds is not adequate. Increase timeout when - # timeout parameter available. For older releases - # we rely on retry_on_exception decorator. - # LP#1719436 - cmd.extend(['--timeout', '1800']) - subprocess.check_call(cmd) - _client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' - osstat = os.stat(_client_admin_keyring) - if not osstat.st_size: - # NOTE(fnordahl): Retry will fail as long as this file exists. - # LP#1719436 - os.remove(_client_admin_keyring) - raise Exception + +@retry_on_exception(3, base_delay=5) +def _create_keyrings(): + """Create keyrings for operation of ceph-mon units + + :raises: Exception if keyrings cannot be created + """ + if cmp_pkgrevno('ceph', '14.0.0') >= 0: + # NOTE(jamespage): At Nautilus, keys are created by the + # monitors automatically and just need + # exporting. + output = str(subprocess.check_output( + [ + 'sudo', + '-u', ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', 'get', 'client.admin', + ]).decode('UTF-8')).strip() + if not output: + # NOTE: key not yet created, raise exception and retry + raise Exception + write_file(_client_admin_keyring, output, + owner=ceph_user(), group=ceph_user(), + perms=0o400) + else: + # NOTE(jamespage): Later ceph releases require explicit + # call to ceph-create-keys to setup the + # admin keys for the cluster; this command + # will wait for quorum in the cluster before + # returning. + # NOTE(fnordahl): Explicitly run `ceph-create-keys` for older + # ceph releases too. This improves bootstrap + # resilience as the charm will wait for + # presence of peer units before attempting + # to bootstrap. Note that charms deploying + # ceph-mon service should disable running of + # `ceph-create-keys` service in init system. + cmd = ['ceph-create-keys', '--id', socket.gethostname()] + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + # NOTE(fnordahl): The default timeout in ceph-create-keys of 600 + # seconds is not adequate. Increase timeout when + # timeout parameter available. For older releases + # we rely on retry_on_exception decorator. + # LP#1719436 + cmd.extend(['--timeout', '1800']) + subprocess.check_call(cmd) + osstat = os.stat(_client_admin_keyring) + if not osstat.st_size: + # NOTE(fnordahl): Retry will fail as long as this file exists. + # LP#1719436 + os.remove(_client_admin_keyring) + raise Exception def update_monfs(): @@ -1555,7 +1619,7 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): cmd.append(osd_format) # NOTE(jamespage): enable experimental bluestore support - if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: + if use_bluestore(): cmd.append('--bluestore') wal = get_devices('bluestore-wal') if wal: @@ -1692,7 +1756,10 @@ def is_active_bluestore_device(dev): return False vg_name = lvm.list_lvm_volume_group(dev) - lv_name = lvm.list_logical_volumes('vg_name={}'.format(vg_name))[0] + try: + lv_name = lvm.list_logical_volumes('vg_name={}'.format(vg_name))[0] + except IndexError: + return False block_symlinks = glob.glob('/var/lib/ceph/osd/ceph-*/block') for block_candidate in block_symlinks: @@ -2700,6 +2767,14 @@ def dirs_need_ownership_update(service): if (curr_owner == expected_owner) and (curr_group == expected_group): continue + # NOTE(lathiat): when config_changed runs on reboot, the OSD might not + # yet be mounted or started, and the underlying directory the OSD is + # mounted to is expected to be owned by root. So skip the check. This + # may also happen for OSD directories for OSDs that were removed. + if (service == 'osd' and + not os.path.exists(os.path.join(child, 'magic'))): + continue + log('Directory "%s" needs its ownership updated' % child, DEBUG) return True @@ -2712,6 +2787,8 @@ def dirs_need_ownership_update(service): ('hammer', 'jewel'), ('jewel', 'luminous'), ('luminous', 'mimic'), + ('luminous', 'nautilus'), + ('mimic', 'nautilus'), ]) # Map UCA codenames to ceph codenames @@ -2727,6 +2804,7 @@ def dirs_need_ownership_update(service): 'queens': 'luminous', 'rocky': 'mimic', 'stein': 'mimic', + 'train': 'nautilus', } diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index a0ed8d11..f3a149bf 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -619,8 +619,8 @@ def rel_units_side_effect(relid): self.relation_get.side_effect = rel_get_side_effect hosts = ceph_hooks.get_mon_hosts() self.assertEqual(hosts, [ - '172.16.0.2:6789', '172.16.0.3:6789', '172.16.0.4:6789', - '172.16.10.2:6789', '172.16.10.3:6789', '172.16.10.4:6789', + '172.16.0.2', '172.16.0.3', '172.16.0.4', + '172.16.10.2', '172.16.10.3', '172.16.10.4', ]) From 7121a844ddd8b57ae87ec636186e937ceb7f9989 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 20 Jun 2019 08:43:04 +0100 Subject: [PATCH 1779/2699] Mark multisite test bundles as dev Move multisite test bundles to dev section; the tests experience some race conditions with RADOS Gateway deployments taking time to resync after promotion operations that needs to be accomodated in the zaza test code. Related-Bug: 1833509 Change-Id: If36671a00c253e36c339e72dd14b20f7c68f7577 --- ceph-radosgw/tests/tests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 55f93139..78ec5bf2 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -7,12 +7,12 @@ gate_bundles: - xenial-pike - xenial-queens - bionic-queens - - bionic-queens-multisite - bionic-rocky - - bionic-rocky-multisite smoke_bundles: - bionic-rocky dev_bundles: - cosmic-rocky + - bionic-queens-multisite + - bionic-rocky-multisite tests: - zaza.openstack.charm_tests.ceph.tests.CephRGWTest From 44873674988d8aaabcafe302a3b95f8d62529af5 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 25 Jun 2019 14:47:39 +0100 Subject: [PATCH 1780/2699] Remove trusty-icehouse functional test Change-Id: I248d6ef5ae3ffb1d5b8e666e4c4676578460d439 --- .../tests/bundles/trusty-icehouse.yaml | 110 ------------------ ceph-radosgw/tests/tests.yaml | 1 - 2 files changed, 111 deletions(-) delete mode 100644 ceph-radosgw/tests/bundles/trusty-icehouse.yaml diff --git a/ceph-radosgw/tests/bundles/trusty-icehouse.yaml b/ceph-radosgw/tests/bundles/trusty-icehouse.yaml deleted file mode 100644 index e087ab56..00000000 --- a/ceph-radosgw/tests/bundles/trusty-icehouse.yaml +++ /dev/null @@ -1,110 +0,0 @@ -options: - source: &source distro -series: trusty -applications: - ceph-radosgw: - charm: ceph-radosgw - series: trusty - num_units: 1 - options: - source: *source - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - auth-supported: 'none' - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *source - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *source - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: *source - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: *source -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service -- - cinder-ceph:ceph-access - - nova-compute:ceph-access diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 78ec5bf2..b879711b 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,6 +1,5 @@ charm_name: ceph-radosgw gate_bundles: - - trusty-icehouse - trusty-mitaka - xenial-mitaka - xenial-ocata From 2c2667613b25cbaac0d80665e3c5a45962e202ce Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 1 Jul 2019 17:21:03 +0200 Subject: [PATCH 1781/2699] Shift ceph-radosgw to constrained instance size This tries to resolve an issue where the CI environment places instances on machines with swap volumes and Juju storage hands off bad information Change-Id: Ie0202b05319c1bcab8334f8c8356acca4c96eaf5 --- ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml | 2 ++ ceph-radosgw/tests/bundles/bionic-queens.yaml | 1 + ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml | 2 ++ ceph-radosgw/tests/bundles/bionic-rocky.yaml | 1 + ceph-radosgw/tests/bundles/cosmic-rocky.yaml | 1 + ceph-radosgw/tests/bundles/trusty-mitaka.yaml | 1 + ceph-radosgw/tests/bundles/xenial-mitaka.yaml | 1 + ceph-radosgw/tests/bundles/xenial-ocata.yaml | 1 + ceph-radosgw/tests/bundles/xenial-pike.yaml | 1 + ceph-radosgw/tests/bundles/xenial-queens.yaml | 1 + 10 files changed, 12 insertions(+) diff --git a/ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml b/ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml index 8cb411ad..ca5686c9 100644 --- a/ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml +++ b/ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml @@ -15,6 +15,7 @@ applications: east-ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 + constraints: "mem=2048" storage: osd-devices: 'cinder,10G' options: @@ -37,6 +38,7 @@ applications: west-ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 + constraints: "mem=2048" storage: osd-devices: 'cinder,10G' options: diff --git a/ceph-radosgw/tests/bundles/bionic-queens.yaml b/ceph-radosgw/tests/bundles/bionic-queens.yaml index 90652494..68a12cfa 100644 --- a/ceph-radosgw/tests/bundles/bionic-queens.yaml +++ b/ceph-radosgw/tests/bundles/bionic-queens.yaml @@ -11,6 +11,7 @@ applications: ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 + constraints: "mem=2048" storage: osd-devices: 'cinder,10G' options: diff --git a/ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml b/ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml index 75e7eadb..97eb3f32 100644 --- a/ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml +++ b/ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml @@ -15,6 +15,7 @@ applications: east-ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 + constraints: "mem=2048" storage: osd-devices: 'cinder,10G' options: @@ -37,6 +38,7 @@ applications: west-ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 + constraints: "mem=2048" storage: osd-devices: 'cinder,10G' options: diff --git a/ceph-radosgw/tests/bundles/bionic-rocky.yaml b/ceph-radosgw/tests/bundles/bionic-rocky.yaml index aef5d5d2..e4d7508b 100644 --- a/ceph-radosgw/tests/bundles/bionic-rocky.yaml +++ b/ceph-radosgw/tests/bundles/bionic-rocky.yaml @@ -11,6 +11,7 @@ applications: ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 + constraints: "mem=2048" storage: osd-devices: 'cinder,10G' options: diff --git a/ceph-radosgw/tests/bundles/cosmic-rocky.yaml b/ceph-radosgw/tests/bundles/cosmic-rocky.yaml index 64016872..6ddbfecd 100644 --- a/ceph-radosgw/tests/bundles/cosmic-rocky.yaml +++ b/ceph-radosgw/tests/bundles/cosmic-rocky.yaml @@ -11,6 +11,7 @@ applications: ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 + constraints: "mem=2048" storage: osd-devices: 'cinder,10G' options: diff --git a/ceph-radosgw/tests/bundles/trusty-mitaka.yaml b/ceph-radosgw/tests/bundles/trusty-mitaka.yaml index 23f4d66d..e6e6b17d 100644 --- a/ceph-radosgw/tests/bundles/trusty-mitaka.yaml +++ b/ceph-radosgw/tests/bundles/trusty-mitaka.yaml @@ -11,6 +11,7 @@ applications: ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 + constraints: "mem=2048" storage: osd-devices: 'cinder,10G' options: diff --git a/ceph-radosgw/tests/bundles/xenial-mitaka.yaml b/ceph-radosgw/tests/bundles/xenial-mitaka.yaml index 63e1a11d..603a7813 100644 --- a/ceph-radosgw/tests/bundles/xenial-mitaka.yaml +++ b/ceph-radosgw/tests/bundles/xenial-mitaka.yaml @@ -11,6 +11,7 @@ applications: ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 + constraints: "mem=2048" storage: osd-devices: 'cinder,10G' options: diff --git a/ceph-radosgw/tests/bundles/xenial-ocata.yaml b/ceph-radosgw/tests/bundles/xenial-ocata.yaml index 7da69b18..6c7a7fe3 100644 --- a/ceph-radosgw/tests/bundles/xenial-ocata.yaml +++ b/ceph-radosgw/tests/bundles/xenial-ocata.yaml @@ -11,6 +11,7 @@ applications: ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 + constraints: "mem=2048" storage: osd-devices: 'cinder,10G' options: diff --git a/ceph-radosgw/tests/bundles/xenial-pike.yaml b/ceph-radosgw/tests/bundles/xenial-pike.yaml index 27705325..63d31d2f 100644 --- a/ceph-radosgw/tests/bundles/xenial-pike.yaml +++ b/ceph-radosgw/tests/bundles/xenial-pike.yaml @@ -11,6 +11,7 @@ applications: ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 + constraints: "mem=2048" storage: osd-devices: 'cinder,10G' options: diff --git a/ceph-radosgw/tests/bundles/xenial-queens.yaml b/ceph-radosgw/tests/bundles/xenial-queens.yaml index 37368b77..0c3b3ba7 100644 --- a/ceph-radosgw/tests/bundles/xenial-queens.yaml +++ b/ceph-radosgw/tests/bundles/xenial-queens.yaml @@ -11,6 +11,7 @@ applications: ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 + constraints: "mem=2048" storage: osd-devices: 'cinder,10G' options: From c6a6b7a58c83e5e327423f44e93d537611cd24c8 Mon Sep 17 00:00:00 2001 From: Rodrigo Barbieri Date: Thu, 13 Jun 2019 11:57:20 -0300 Subject: [PATCH 1782/2699] Add config option for keystone admin roles RADOS Gateway supports setting keystone operator and admin roles. RADOS Gateway requires admin roles for keystone users to change their user quota. Regular operator/member roles are not allowed to do so. The lack of this config option prevents swift users with admin roles from being able to set their quotas. Therefore, a config option 'admin-roles' is now added to the charm to map to 'rgw keystone accepted admin roles' RADOS Gateway config. Please note that this is only effective from Luminous Ceph Release. Change-Id: Ic0b9aa39eef9fbc6c43eb4e66ab72d90787c2017 Closes-Bug: #1831577 --- ceph-radosgw/config.yaml | 8 +++- ceph-radosgw/hooks/ceph_radosgw_context.py | 8 +++- ceph-radosgw/hooks/hooks.py | 6 ++- ceph-radosgw/templates/ceph.conf | 1 + .../unit_tests/test_ceph_radosgw_context.py | 28 +++++++++---- ceph-radosgw/unit_tests/test_hooks.py | 40 ++++++++++++------- 6 files changed, 65 insertions(+), 26 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 56c53c09..322e07f5 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -121,10 +121,16 @@ options: # Keystone integration operator-roles: type: string - default: "Member,Admin" + default: "Member" description: | Comma-separated list of Swift operator roles; used when integrating with OpenStack Keystone. + admin-roles: + type: string + default: "Admin" + description: | + Comma-separated list of Swift admin roles; used when integrating with + OpenStack Keystone. Admin roles can set the user quota amount. region: type: string default: RegionOne diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 7e4fed8c..54b73822 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -96,7 +96,13 @@ def __call__(self): ctxt.pop('admin_domain_id') ctxt['auth_type'] = 'keystone' - ctxt['user_roles'] = config('operator-roles') + if cmp_pkgrevno('radosgw', "11.0.0") >= 0: + ctxt['user_roles'] = config('operator-roles') + ctxt['admin_roles'] = config('admin-roles') + else: + ctxt['user_roles'] = config('operator-roles') + if config('admin-roles'): + ctxt['user_roles'] += (',' + config('admin-roles')) ctxt['cache_size'] = config('cache-size') if self.context_complete(ctxt): return ctxt diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 7db0e240..8debca8f 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -298,11 +298,15 @@ def identity_joined(relid=None): (canonical_url(CONFIGS, INTERNAL), port) public_url = '%s:%s/swift/v1' % \ (canonical_url(CONFIGS, PUBLIC), port) + roles = [x for x in [config('operator-roles'), config('admin-roles')] if x] + requested_roles = '' + if roles: + requested_roles = ','.join(roles) if len(roles) > 1 else roles[0] relation_set(service='swift', region=config('region'), public_url=public_url, internal_url=internal_url, admin_url=admin_url, - requested_roles=config('operator-roles'), + requested_roles=requested_roles, relation_id=relid) diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index db55dd4e..c1ea349d 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -51,6 +51,7 @@ rgw keystone admin project = {{ admin_tenant_name }} rgw keystone admin token = {{ admin_token }} {% endif -%} rgw keystone accepted roles = {{ user_roles }} +rgw keystone accepted admin roles = {{ admin_roles }} rgw keystone token cache size = {{ cache_size }} rgw s3 auth use keystone = true rgw s3 auth order = local, external diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index f1c80a56..1ba768f2 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -85,8 +85,12 @@ def setUp(self): @patch.object(charmhelpers.contrib.openstack.context, 'log') def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, _format_ipv6_addr, _filter_installed_packages, - jewel_installed=False): + jewel_installed=False, cmp_pkgrevno_side_effects=None): + self.cmp_pkgrevno.side_effect = (cmp_pkgrevno_side_effects + if cmp_pkgrevno_side_effects + else [-1, -1]) self.test_config.set('operator-roles', 'Babel') + self.test_config.set('admin-roles', 'Dart') self.test_config.set('cache-size', '42') self.test_relation.set({'admin_token': 'ubuntutesting'}) self.relation_ids.return_value = ['identity-service:5'] @@ -95,9 +99,6 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, _rids.return_value = 'rid1' _runits.return_value = 'runit' _ctxt_comp.return_value = True - self.cmp_pkgrevno.return_value = -1 - if jewel_installed: - self.cmp_pkgrevno.return_value = 0 id_data = { 'service_port': 9876, 'service_host': '127.0.0.4', @@ -127,8 +128,12 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, 'service_host': '127.0.0.4', 'service_port': 9876, 'service_protocol': 'http', - 'user_roles': 'Babel', } + if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[1] >= 0: + expect['user_roles'] = 'Babel' + expect['admin_roles'] = 'Dart' + else: + expect['user_roles'] = 'Babel,Dart' if jewel_installed: expect['auth_keystone_v3_supported'] = True self.assertEqual(expect, ids_ctxt()) @@ -145,6 +150,7 @@ def test_ids_ctxt_missing_admin_domain_id( self, _log, _rids, _runits, _rget, _ctxt_comp, _format_ipv6_addr, _filter_installed_packages, jewel_installed=False): self.test_config.set('operator-roles', 'Babel') + self.test_config.set('admin-roles', 'Dart') self.test_config.set('cache-size', '42') self.test_relation.set({'admin_token': 'ubuntutesting'}) self.relation_ids.return_value = ['identity-service:5'] @@ -183,7 +189,7 @@ def test_ids_ctxt_missing_admin_domain_id( 'service_host': '127.0.0.4', 'service_port': 9876, 'service_protocol': 'http', - 'user_roles': 'Babel', + 'user_roles': 'Babel,Dart', } if jewel_installed: expect['auth_keystone_v3_supported'] = True @@ -201,6 +207,7 @@ def test_ids_ctxt_v3( self, _log, _rids, _runits, _rget, _ctxt_comp, _format_ipv6_addr, _filter_installed_packages, jewel_installed=False): self.test_config.set('operator-roles', 'Babel') + self.test_config.set('admin-roles', 'Dart') self.test_config.set('cache-size', '42') self.test_relation.set({'admin_token': 'ubuntutesting'}) self.relation_ids.return_value = ['identity-service:5'] @@ -244,14 +251,19 @@ def test_ids_ctxt_v3( 'service_host': '127.0.0.4', 'service_port': 9876, 'service_protocol': 'http', - 'user_roles': 'Babel', + 'user_roles': 'Babel,Dart', } if jewel_installed: expect['auth_keystone_v3_supported'] = True self.assertEqual(expect, ids_ctxt()) def test_ids_ctxt_jewel(self): - self.test_ids_ctxt(jewel_installed=True) + self.test_ids_ctxt(jewel_installed=True, + cmp_pkgrevno_side_effects=[0, -1]) + + def test_ids_ctxt_luminous(self): + self.test_ids_ctxt(jewel_installed=True, + cmp_pkgrevno_side_effects=[1, 0]) @patch.object(charmhelpers.contrib.openstack.context, 'filter_installed_packages', return_value=['absent-pkg']) diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index cc046b83..45207e7c 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -244,21 +244,31 @@ def test_identity_joined_early_version(self, _config): @patch('charmhelpers.contrib.openstack.ip.resolve_address') @patch('charmhelpers.contrib.openstack.ip.config') def test_identity_joined(self, _config, _resolve_address): - self.related_units = ['unit/0'] - self.cmp_pkgrevno.return_value = 1 - _resolve_address.return_value = 'myserv' - _config.side_effect = self.test_config.get - self.test_config.set('region', 'region1') - self.test_config.set('operator-roles', 'admin') - ceph_hooks.identity_joined(relid='rid') - self.relation_set.assert_called_with( - service='swift', - region='region1', - public_url='http://myserv:80/swift/v1', - internal_url='http://myserv:80/swift/v1', - requested_roles='admin', - relation_id='rid', - admin_url='http://myserv:80/swift') + + def _test_identify_joined(expected): + self.related_units = ['unit/0'] + self.cmp_pkgrevno.return_value = 1 + _resolve_address.return_value = 'myserv' + _config.side_effect = self.test_config.get + self.test_config.set('region', 'region1') + ceph_hooks.identity_joined(relid='rid') + self.relation_set.assert_called_with( + service='swift', + region='region1', + public_url='http://myserv:80/swift/v1', + internal_url='http://myserv:80/swift/v1', + requested_roles=expected, + relation_id='rid', + admin_url='http://myserv:80/swift') + + inputs = [{'operator': 'foo', 'admin': 'bar', 'expected': 'foo,bar'}, + {'operator': 'foo', 'expected': 'foo'}, + {'admin': 'bar', 'expected': 'bar'}, + {'expected': ''}] + for input in inputs: + self.test_config.set('operator-roles', input.get('operator', '')) + self.test_config.set('admin-roles', input.get('admin', '')) + _test_identify_joined(input['expected']) @patch('charmhelpers.contrib.openstack.ip.service_name', lambda *args: 'ceph-radosgw') From 7e510b5af2dc63d8cc305433a244303ebdb872c7 Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Tue, 2 Jul 2019 22:20:26 -0400 Subject: [PATCH 1783/2699] Fix bdev-enable-discard to honor the documentation When the user provides an unexpected value in this configuration option a warning message is logged and the workload is set to blocked. Closes-Bug: #1835145 Change-Id: I10cac1cf43c11cf11fde196244820ac28b5a47d0 --- ceph-osd/hooks/ceph_hooks.py | 30 ++++++++++--- ceph-osd/unit_tests/test_ceph_hooks.py | 61 +++++++++++++++++++++++++- 2 files changed, 85 insertions(+), 6 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 38d78d0d..a2729593 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -32,6 +32,7 @@ DEBUG, ERROR, INFO, + WARNING, config, relation_ids, related_units, @@ -398,11 +399,12 @@ def get_ceph_context(upgrading=False): 'bluestore_block_db_size': config('bluestore-block-db-size'), } - if config('bdev-enable-discard').lower() == 'enabled': - cephcontext['bdev_discard'] = True - elif config('bdev-enable-discard').lower() == 'auto': - cephcontext['bdev_discard'] = should_enable_discard(get_devices()) - else: + try: + cephcontext['bdev_discard'] = get_bdev_enable_discard() + except ValueError as ex: + # the user set bdev-enable-discard to a non valid value, so logging the + # issue as a warning and falling back to False/disable + log(str(ex), level=WARNING) cephcontext['bdev_discard'] = False if config('prefer-ipv6'): @@ -625,6 +627,19 @@ def get_devices(): return [device for device in devices if device not in _blacklist] +def get_bdev_enable_discard(): + bdev_enable_discard = config('bdev-enable-discard').lower() + if bdev_enable_discard in ['enable', 'enabled']: + return True + elif bdev_enable_discard == 'auto': + return should_enable_discard(get_devices()) + elif bdev_enable_discard in ['disable', 'disabled']: + return False + else: + raise ValueError(("Invalid value for configuration " + "bdev-enable-discard: %s") % bdev_enable_discard) + + @hooks.hook('mon-relation-changed', 'mon-relation-departed') def mon_relation(): @@ -817,6 +832,11 @@ def assess_status(): status_set('active', 'Unit is ready ({} OSD)'.format(len(running_osds))) + try: + get_bdev_enable_discard() + except ValueError as ex: + status_set('blocked', str(ex)) + @hooks.hook('update-status') @harden() diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index b55b87ae..bc58a9e4 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -37,7 +37,7 @@ 'customize-failure-domain': False, 'bluestore': False, 'crush-initial-weight': '0', - 'bdev-enable-discard': 'enabled', + 'bdev-enable-discard': 'enable', 'osd-devices': '/dev/vdb', 'bluestore': False, 'bluestore-block-wal-size': 0, @@ -93,6 +93,47 @@ def test_get_ceph_context(self, mock_config, mock_config2): 'bluestore_block_db_size': 0} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') + @patch.object(ceph_hooks, 'get_auth', lambda *args: False) + @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") + @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") + @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) + @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', + '10.0.0.2']) + @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph, 'config') + @patch.object(ceph_hooks, 'config') + def test_get_ceph_context_invalid_bdev_enable_discard(self, mock_config, + mock_config2): + config = copy.deepcopy(CHARM_CONFIG) + config['bdev-enable-discard'] = 'some-invalid-value' + mock_config.side_effect = lambda key: config[key] + mock_config2.side_effect = lambda key: config[key] + ctxt = ceph_hooks.get_ceph_context() + expected = {'auth_supported': False, + 'ceph_cluster_network': '', + 'ceph_public_network': '', + 'cluster_addr': '10.1.0.1', + 'dio': 'true', + 'fsid': '1234', + 'loglevel': 1, + 'mon_hosts': '10.0.0.1 10.0.0.2', + 'old_auth': False, + 'crush_initial_weight': '0', + 'osd_journal_size': 1024, + 'osd_max_backfills': 1, + 'osd_recovery_max_active': 2, + 'public_addr': '10.0.0.1', + 'short_object_len': True, + 'upgrade_in_progress': False, + 'use_syslog': 'true', + 'bdev_discard': False, + 'bluestore': False, + 'bluestore_experimental': False, + 'bluestore_block_wal_size': 0, + 'bluestore_block_db_size': 0} + self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @patch.object(ceph_hooks, 'get_auth', lambda *args: False) @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @@ -551,6 +592,24 @@ def test_emit_ceph_conf(self, mock_mkdir, mock_service_name, self.assertTrue(mock_write_file.called) self.assertTrue(mock_install_alternative.called) + @patch.object(ceph_hooks, 'should_enable_discard') + @patch.object(ceph_hooks, 'config') + def test_get_bdev_enable_discard(self, mock_config, + mock_should_enable_discard): + mock_should_enable_discard.return_value = True + config = {'bdev-enable-discard': 'xxx', + 'osd-devices': '/dev/vdb'} + mock_config.side_effect = lambda key: config[key] + self.assertRaises(ValueError, ceph_hooks.get_bdev_enable_discard) + + for value, expected in [('enable', True), + ('enabled', True), + ('disable', False), + ('disabled', False), + ('auto', True)]: + config['bdev-enable-discard'] = value + self.assertEqual(ceph_hooks.get_bdev_enable_discard(), expected) + @patch.object(ceph_hooks, 'relation_get') @patch.object(ceph_hooks, 'relation_set') From fa1a9f27938a01b1c03393104c958c207e608581 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 8 Jul 2019 16:31:10 +0200 Subject: [PATCH 1784/2699] Enable bundle specific tox zaza target Change-Id: I2c4719507aa1809cd711aa6bd98201b943156258 --- ceph-osd/tox.ini | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 4d35de76..28cc3111 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -23,7 +23,7 @@ deps = -r{toxinidir}/test-requirements.txt # permitted. http://governance.openstack.org/reference/cti/python_cti.html whitelist_externals = true commands = true -deps = +deps = [testenv:py35] basepython = python3.5 @@ -89,6 +89,11 @@ basepython = python3 commands = functest-run-suite --keep-model --dev +[testenv:func-target] +basepython = python3 +commands = + functest-run-suite --keep-model --bundle {posargs} + [flake8] ignore = E402,E226 exclude = */charmhelpers From eb0cfe8cc47cf7678c2817585fd63d2da49a7dce Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 8 Jul 2019 16:31:10 +0200 Subject: [PATCH 1785/2699] Enable bundle specific tox zaza target Change-Id: Ia9770187b471f32f7dbeeb3630f367174ea0314f --- ceph-radosgw/tox.ini | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 01095fc5..1d1e3cb2 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -53,6 +53,11 @@ basepython = python3 commands = functest-run-suite --keep-model --dev +[testenv:func-target] +basepython = python3 +commands = + functest-run-suite --keep-model --bundle {posargs} + [flake8] ignore = E402,E226 exclude = */charmhelpers From 8d144f03e5c5072828f87db2a985dbe985dafc12 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 8 Jul 2019 16:31:11 +0200 Subject: [PATCH 1786/2699] Enable bundle specific tox zaza target Change-Id: I400cca48767510bfc79ac056ba42b119127e682f --- ceph-rbd-mirror/src/tox.ini | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ceph-rbd-mirror/src/tox.ini b/ceph-rbd-mirror/src/tox.ini index ce451062..cd35f5c3 100644 --- a/ceph-rbd-mirror/src/tox.ini +++ b/ceph-rbd-mirror/src/tox.ini @@ -31,5 +31,10 @@ basepython = python3 commands = functest-run-suite --keep-model --smoke +[testenv:func-target] +basepython = python3 +commands = + functest-run-suite --keep-model --bundle {posargs} + [testenv:venv] commands = {posargs} From 3ad5ebda33605eeb887d4e0cffdc55e524e20f9d Mon Sep 17 00:00:00 2001 From: Trent Lloyd Date: Tue, 18 Jun 2019 12:43:17 +0800 Subject: [PATCH 1787/2699] bluestore-wal only needed if separate to DB device Update config.yaml to clarify that bluestore-wal should only be set where a separate (faster) device is being used for the WAL. Otherwise, the WAL is automatically maintained within the space of the DB device and does not need to be configured separately. Additionally clarify that this device is used as an LVM PV and space is allocated for each block device based on the bluestore-block-{db,wal}-size setting. Change-Id: I54fc582ecb2cee5de1302685e9103c636c7a307b Ref: http://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/ --- ceph-osd/config.yaml | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 6fe04ffe..b84fd3ac 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -85,12 +85,24 @@ options: type: string default: description: | - Path to a BlueStore WAL block device or file. + Path to a BlueStore WAL block device or file. Should only be set if using + a separate physical device that is faster than the DB device (such as an + NVDIMM or faster SSD). Otherwise BlueStore automatically maintains the + WAL inside of the DB device. This block device is used as an LVM PV and + then space is allocated for each block device as needed based on the + bluestore-block-wal-size setting. bluestore-db: type: string default: description: | - Path to a BlueStore WAL db block device or file + Path to a BlueStore WAL db block device or file. If you have a separate + physical device faster than the block device this will store all of the + filesystem metadata (RocksDB) there and also integrates the Write Ahead + Log (WAL) unless a further separate bluestore-wal device is configured + which is not needed unless it is faster again than the bluestore-db + device. This block device is used as an LVM PV and then space is + allocated for each block device as needed based on the + bluestore-block-db-size setting. osd-journal-size: type: int default: 1024 From 8a6370c6e8991107bbd7f54f8f7b283eda7e7a93 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 12 Jul 2019 15:01:32 +0200 Subject: [PATCH 1788/2699] Sync charm-helpers Additionally, this has unit test fixes for a CephContext update and a Keystone V3 update that came with this sync. Change-Id: I8ad78dbebf94ac0e6d0bcee6af2e24552c7175a3 --- ceph-radosgw/hooks/ceph_radosgw_context.py | 19 +++ .../charmhelpers/contrib/charmsupport/nrpe.py | 35 ++++- .../contrib/openstack/amulet/deployment.py | 20 +++ .../contrib/openstack/amulet/utils.py | 14 +- .../audits/openstack_security_guide.py | 10 +- .../contrib/openstack/cert_utils.py | 6 +- .../charmhelpers/contrib/openstack/context.py | 142 ++++++++++++++++-- .../charmhelpers/contrib/openstack/neutron.py | 5 + .../templates/section-oslo-notifications | 3 + .../openstack/templates/vendor_data.json | 1 + .../charmhelpers/contrib/openstack/utils.py | 14 ++ .../contrib/storage/linux/ceph.py | 19 ++- .../contrib/storage/linux/utils.py | 8 +- .../hooks/charmhelpers/fetch/ubuntu.py | 16 +- ceph-radosgw/tests/bundles/bionic-stein.yaml | 43 ++++++ ceph-radosgw/tests/bundles/trusty-mitaka.yaml | 2 +- ceph-radosgw/tests/tests.yaml | 3 +- .../unit_tests/test_ceph_radosgw_context.py | 2 + 18 files changed, 329 insertions(+), 33 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/vendor_data.json create mode 100644 ceph-radosgw/tests/bundles/bionic-stein.yaml diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 54b73822..f319c707 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -227,3 +227,22 @@ def __call__(self): return ctxt return {} + + def context_complete(self, ctxt): + """Overridden here to ensure the context is actually complete. + + We set `key` and `auth` to None here, by default, to ensure + that the context will always evaluate to incomplete until the + Ceph relation has actually sent these details; otherwise, + there is a potential race condition between the relation + appearing and the first unit actually setting this data on the + relation. + + :param ctxt: The current context members + :type ctxt: Dict[str, ANY] + :returns: True if the context is complete + :rtype: bool + """ + if 'fsid' not in ctxt: + return False + return context.OSContextGenerator.context_complete(self, ctxt) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 0626b328..a3d89936 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -33,6 +33,7 @@ hook_name, local_unit, log, + relation_get, relation_ids, relation_set, relations_of_type, @@ -260,11 +261,23 @@ def __init__(self, hostname=None, primary=True): relation = relation_ids('nrpe-external-master') if relation: log("Setting charm primary status {}".format(primary)) - for rid in relation_ids('nrpe-external-master'): + for rid in relation: relation_set(relation_id=rid, relation_settings={'primary': self.primary}) + self.remove_check_queue = set() def add_check(self, *args, **kwargs): + shortname = None + if kwargs.get('shortname') is None: + if len(args) > 0: + shortname = args[0] + else: + shortname = kwargs['shortname'] + self.checks.append(Check(*args, **kwargs)) + try: + self.remove_check_queue.remove(shortname) + except KeyError: + pass def remove_check(self, *args, **kwargs): if kwargs.get('shortname') is None: @@ -281,6 +294,7 @@ def remove_check(self, *args, **kwargs): check = Check(*args, **kwargs) check.remove(self.hostname) + self.remove_check_queue.add(kwargs['shortname']) def write(self): try: @@ -313,7 +327,24 @@ def write(self): monitor_ids = relation_ids("local-monitors") + \ relation_ids("nrpe-external-master") for rid in monitor_ids: - relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + reldata = relation_get(unit=local_unit(), rid=rid) + if 'monitors' in reldata: + # update the existing set of monitors with the new data + old_monitors = yaml.safe_load(reldata['monitors']) + old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe'] + # remove keys that are in the remove_check_queue + old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items() + if k not in self.remove_check_queue} + # update/add nrpe_monitors + old_nrpe_monitors.update(nrpe_monitors) + old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors + # write back to the relation + relation_set(relation_id=rid, monitors=yaml.dump(old_monitors)) + else: + # write a brand new set of monitors, as no existing ones. + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + self.remove_check_queue.clear() def get_nagios_hostcontext(relation_name='nrpe-external-master'): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 8e57467b..77925cc2 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -294,8 +294,10 @@ def _get_openstack_release(self): ('bionic', None): self.bionic_queens, ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, ('bionic', 'cloud:bionic-stein'): self.bionic_stein, + ('bionic', 'cloud:bionic-train'): self.bionic_train, ('cosmic', None): self.cosmic_rocky, ('disco', None): self.disco_stein, + ('eoan', None): self.eoan_train, } return releases[(self.series, self.openstack)] @@ -313,6 +315,7 @@ def _get_openstack_release_string(self): ('bionic', 'queens'), ('cosmic', 'rocky'), ('disco', 'stein'), + ('eoan', 'train'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] @@ -320,6 +323,23 @@ def _get_openstack_release_string(self): else: return releases[self.series] + def get_percona_service_entry(self, memory_constraint=None): + """Return a amulet service entry for percona cluster. + + :param memory_constraint: Override the default memory constraint + in the service entry. + :type memory_constraint: str + :returns: Amulet service entry. + :rtype: dict + """ + memory_constraint = memory_constraint or '3072M' + svc_entry = { + 'name': 'percona-cluster', + 'constraints': {'mem': memory_constraint}} + if self._get_openstack_release() <= self.trusty_mitaka: + svc_entry['location'] = 'cs:trusty/percona-cluster' + return svc_entry + def get_ceph_expected_pools(self, radosgw=False): """Return a list of expected ceph pools in a ceph + cinder + glance test scenario, based on OpenStack release and whether ceph radosgw diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 53fa6506..0a5f81bd 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -54,11 +54,15 @@ OPENSTACK_RELEASES_PAIRS = [ 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', - 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', - 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', - 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens', 'bionic_rocky', 'cosmic_rocky', - 'bionic_stein', 'disco_stein'] + 'trusty_mitaka', 'xenial_mitaka', + 'xenial_newton', 'yakkety_newton', + 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', + 'xenial_queens', 'bionic_queens', + 'bionic_rocky', 'cosmic_rocky', + 'bionic_stein', 'disco_stein', + 'bionic_train', 'eoan_train', +] class OpenStackAmuletUtils(AmuletUtils): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py index e5b7ac1e..b7b8a60f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -126,7 +126,11 @@ def _config_ini(path): :returns: Configuration contained in path :rtype: Dict """ - conf = configparser.ConfigParser() + # When strict is enabled, duplicate options are not allowed in the + # parsed INI; however, Oslo allows duplicate values. This change + # causes us to ignore the duplicate values which is acceptable as + # long as we don't validate any multi-value options + conf = configparser.ConfigParser(strict=False) conf.read(path) return dict(conf) @@ -204,7 +208,7 @@ def validate_file_ownership(config): "Invalid ownership configuration: {}".format(key)) owner = options.get('owner', config.get('owner', 'root')) group = options.get('group', config.get('group', 'root')) - optional = options.get('optional', config.get('optional', 'False')) + optional = options.get('optional', config.get('optional', False)) if '*' in file_name: for file in glob.glob(file_name): if file not in files.keys(): @@ -226,7 +230,7 @@ def validate_file_permissions(config): raise RuntimeError( "Invalid ownership configuration: {}".format(key)) mode = options.get('mode', config.get('permissions', '600')) - optional = options.get('optional', config.get('optional', 'False')) + optional = options.get('optional', config.get('optional', False)) if '*' in file_name: for file in glob.glob(file_name): if file not in files.keys(): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py index 0ba57024..b494af64 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -106,9 +106,11 @@ def get_request(self): sans = sorted(list(set(entry['addresses']))) request[entry['cn']] = {'sans': sans} if self.json_encode: - return {'cert_requests': json.dumps(request, sort_keys=True)} + req = {'cert_requests': json.dumps(request, sort_keys=True)} else: - return {'cert_requests': request} + req = {'cert_requests': request} + req['unit_name'] = local_unit().replace('/', '_') + return req def get_certificate_request(json_encode=True): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 51ee03b8..a6545e12 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -258,7 +258,7 @@ def __call__(self): 'database_password': rdata.get(password_setting), 'database_type': 'mysql+pymysql' } - if CompareOpenStackReleases(rel) < 'stein': + if CompareOpenStackReleases(rel) < 'queens': ctxt['database_type'] = 'mysql' if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) @@ -362,7 +362,7 @@ def _get_pkg_name(self, python_name='keystonemiddleware'): pkg_names = map(lambda x: x + python_name, ('python3-', 'python-')) for pkg in pkg_names: - if not filter_installed_packages(pkg): + if not filter_installed_packages((pkg,)): return pkg return None @@ -443,8 +443,10 @@ def __call__(self): 'api_version': api_version}) if float(api_version) > 2: - ctxt.update({'admin_domain_name': - rdata.get('service_domain')}) + ctxt.update({ + 'admin_domain_name': rdata.get('service_domain'), + 'service_project_id': rdata.get('service_tenant_id'), + 'service_domain_id': rdata.get('service_domain_id')}) # we keep all veriables in ctxt for compatibility and # add nested dictionary for keystone_authtoken generic @@ -521,6 +523,86 @@ def __call__(self): return {} +class NovaVendorMetadataContext(OSContextGenerator): + """Context used for configuring nova vendor metadata on nova.conf file.""" + + def __init__(self, os_release_pkg, interfaces=None): + """Initialize the NovaVendorMetadataContext object. + + :param os_release_pkg: the package name to extract the OpenStack + release codename from. + :type os_release_pkg: str + :param interfaces: list of string values to be used as the Context's + relation interfaces. + :type interfaces: List[str] + """ + self.os_release_pkg = os_release_pkg + if interfaces is not None: + self.interfaces = interfaces + + def __call__(self): + cmp_os_release = CompareOpenStackReleases( + os_release(self.os_release_pkg)) + ctxt = {'vendor_data': False} + + vdata_providers = [] + vdata = config('vendor-data') + vdata_url = config('vendor-data-url') + + if vdata: + try: + # validate the JSON. If invalid, we do not set anything here + json.loads(vdata) + except (TypeError, ValueError) as e: + log('Error decoding vendor-data. {}'.format(e), level=ERROR) + else: + ctxt['vendor_data'] = True + # Mitaka does not support DynamicJSON + # so vendordata_providers is not needed + if cmp_os_release > 'mitaka': + vdata_providers.append('StaticJSON') + + if vdata_url: + if cmp_os_release > 'mitaka': + ctxt['vendor_data_url'] = vdata_url + vdata_providers.append('DynamicJSON') + else: + log('Dynamic vendor data unsupported' + ' for {}.'.format(cmp_os_release), level=ERROR) + if vdata_providers: + ctxt['vendordata_providers'] = ','.join(vdata_providers) + + return ctxt + + +class NovaVendorMetadataJSONContext(OSContextGenerator): + """Context used for writing nova vendor metadata json file.""" + + def __init__(self, os_release_pkg): + """Initialize the NovaVendorMetadataJSONContext object. + + :param os_release_pkg: the package name to extract the OpenStack + release codename from. + :type os_release_pkg: str + """ + self.os_release_pkg = os_release_pkg + + def __call__(self): + ctxt = {'vendor_data_json': '{}'} + + vdata = config('vendor-data') + if vdata: + try: + # validate the JSON. If invalid, we return empty. + json.loads(vdata) + except (TypeError, ValueError) as e: + log('Error decoding vendor-data. {}'.format(e), level=ERROR) + else: + ctxt['vendor_data_json'] = vdata + + return ctxt + + class AMQPContext(OSContextGenerator): def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None, @@ -647,6 +729,10 @@ def __call__(self): if notification_format: ctxt['notification_format'] = notification_format + send_notifications_to_logs = conf.get('send-notifications-to-logs', None) + if send_notifications_to_logs: + ctxt['send_notifications_to_logs'] = send_notifications_to_logs + if not self.complete: return {} @@ -698,6 +784,25 @@ def __call__(self): ensure_packages(['ceph-common']) return ctxt + def context_complete(self, ctxt): + """Overridden here to ensure the context is actually complete. + + We set `key` and `auth` to None here, by default, to ensure + that the context will always evaluate to incomplete until the + Ceph relation has actually sent these details; otherwise, + there is a potential race condition between the relation + appearing and the first unit actually setting this data on the + relation. + + :param ctxt: The current context members + :type ctxt: Dict[str, ANY] + :returns: True if the context is complete + :rtype: bool + """ + if 'auth' not in ctxt or 'key' not in ctxt: + return False + return super(CephContext, self).context_complete(ctxt) + class HAProxyContext(OSContextGenerator): """Provides half a context for the haproxy template, which describes @@ -1188,7 +1293,9 @@ def resolve_ports(self, ports): hwaddr_to_nic = {} hwaddr_to_ip = {} - for nic in list_nics(): + extant_nics = list_nics() + + for nic in extant_nics: # Ignore virtual interfaces (bond masters will be identified from # their slaves) if not is_phy_iface(nic): @@ -1219,10 +1326,11 @@ def resolve_ports(self, ports): # Entry is a MAC address for a valid interface that doesn't # have an IP address assigned yet. resolved.append(hwaddr_to_nic[entry]) - else: - # If the passed entry is not a MAC address, assume it's a valid - # interface, and that the user put it there on purpose (we can - # trust it to be the real external network). + elif entry in extant_nics: + # If the passed entry is not a MAC address and the interface + # exists, assume it's a valid interface, and that the user put + # it there on purpose (we can trust it to be the real external + # network). resolved.append(entry) # Ensure no duplicates @@ -1604,6 +1712,18 @@ def __call__(self): 'rel_key': 'enable-nsg-logging', 'default': False, }, + 'enable_nfg_logging': { + 'rel_key': 'enable-nfg-logging', + 'default': False, + }, + 'global_physnet_mtu': { + 'rel_key': 'global-physnet-mtu', + 'default': 1500, + }, + 'physical_network_mtus': { + 'rel_key': 'physical-network-mtus', + 'default': None, + }, } ctxt = self.get_neutron_options({}) for rid in relation_ids('neutron-plugin-api'): @@ -1665,13 +1785,13 @@ class DataPortContext(NeutronPortContext): def __call__(self): ports = config('data-port') if ports: - # Map of {port/mac:bridge} + # Map of {bridge:port/mac} portmap = parse_data_port_mappings(ports) ports = portmap.keys() # Resolve provided ports or mac addresses and filter out those # already attached to a bridge. resolved = self.resolve_ports(ports) - # FIXME: is this necessary? + # Rebuild port index using resolved and filtered ports. normalized = {get_nic_hwaddr(port): port for port in resolved if port not in ports} normalized.update({port: port for port in resolved diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index 0f847f56..fb5607f3 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -217,6 +217,11 @@ def neutron_plugins(): plugins['nsx']['config'] = '/etc/neutron/nsx.ini' plugins['vsp']['driver'] = ( 'nuage_neutron.plugins.nuage.plugin.NuagePlugin') + if CompareOpenStackReleases(release) >= 'newton': + plugins['vsp']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' + plugins['vsp']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' + plugins['vsp']['server_packages'] = ['neutron-server', + 'neutron-plugin-ml2'] return plugins diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications index 7bb43d4f..71c7eb06 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications @@ -2,6 +2,9 @@ [oslo_messaging_notifications] driver = {{ oslo_messaging_driver }} transport_url = {{ transport_url }} +{% if send_notifications_to_logs %} +driver = log +{% endif %} {% if notification_topics -%} topics = {{ notification_topics }} {% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/vendor_data.json b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/vendor_data.json new file mode 100644 index 00000000..904f612a --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/vendor_data.json @@ -0,0 +1 @@ +{{ vendor_data_json }} \ No newline at end of file diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 1914ab84..d43a4d20 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -120,6 +120,7 @@ 'queens', 'rocky', 'stein', + 'train', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -139,6 +140,7 @@ ('bionic', 'queens'), ('cosmic', 'rocky'), ('disco', 'stein'), + ('eoan', 'train'), ]) @@ -159,6 +161,7 @@ ('2018.1', 'queens'), ('2018.2', 'rocky'), ('2019.1', 'stein'), + ('2019.2', 'train'), ]) # The ugly duckling - must list releases oldest to newest @@ -195,6 +198,8 @@ ['2.18.0', '2.19.0']), ('stein', ['2.20.0', '2.21.0']), + ('train', + ['2.22.0']), ]) # >= Liberty version->codename mapping @@ -208,6 +213,7 @@ ('17', 'queens'), ('18', 'rocky'), ('19', 'stein'), + ('20', 'train'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -218,6 +224,7 @@ ('12', 'queens'), ('13', 'rocky'), ('14', 'stein'), + ('15', 'train'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -228,6 +235,7 @@ ('12', 'queens'), ('13', 'rocky'), ('14', 'stein'), + ('15', 'train'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -238,6 +246,7 @@ ('13', 'queens'), ('14', 'rocky'), ('15', 'stein'), + ('16', 'train'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -248,6 +257,7 @@ ('13', 'queens'), ('14', 'rocky'), ('15', 'stein'), + ('16', 'train'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -258,6 +268,7 @@ ('10', 'queens'), ('11', 'rocky'), ('12', 'stein'), + ('13', 'train'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -268,6 +279,7 @@ ('10', 'queens'), ('11', 'rocky'), ('12', 'stein'), + ('13', 'train'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -278,6 +290,7 @@ ('16', 'queens'), ('17', 'rocky'), ('18', 'stein'), + ('19', 'train'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -288,6 +301,7 @@ ('13', 'queens'), ('14', 'rocky'), ('15', 'stein'), + ('16', 'train'), ]), } diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 2c62092c..a9864467 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1482,13 +1482,28 @@ def send_request_if_needed(request, relation='ceph'): relation_set(relation_id=rid, broker_req=request.request) +def has_broker_rsp(rid=None, unit=None): + """Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data. + + :param rid: The relation to check (default of None means current relation) + :type rid: Union[str, None] + :param unit: The remote unit to check (default of None means current unit) + :type unit: Union[str, None] + :returns: True if broker key exists and is set to something 'truthy' + :rtype: bool + """ + rdata = relation_get(rid=rid, unit=unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + return True if broker_rsp else False + + def is_broker_action_done(action, rid=None, unit=None): """Check whether broker action has completed yet. @param action: name of action to be performed @returns True if action complete otherwise False """ - rdata = relation_get(rid, unit) or {} + rdata = relation_get(rid=rid, unit=unit) or {} broker_rsp = rdata.get(get_broker_rsp_key()) if not broker_rsp: return False @@ -1510,7 +1525,7 @@ def mark_broker_action_done(action, rid=None, unit=None): @param action: name of action to be performed @returns None """ - rdata = relation_get(rid, unit) or {} + rdata = relation_get(rid=rid, unit=unit) or {} broker_rsp = rdata.get(get_broker_rsp_key()) if not broker_rsp: return diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py index c57aaf35..a3561760 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -110,17 +110,19 @@ def is_device_mounted(device): return bool(re.search(r'MOUNTPOINT=".+"', out)) -def mkfs_xfs(device, force=False): +def mkfs_xfs(device, force=False, inode_size=1024): """Format device with XFS filesystem. By default this should fail if the device already has a filesystem on it. :param device: Full path to device to format :ptype device: tr :param force: Force operation - :ptype: force: boolean""" + :ptype: force: boolean + :param inode_size: XFS inode size in bytes + :ptype inode_size: int""" cmd = ['mkfs.xfs'] if force: cmd.append("-f") - cmd += ['-i', 'size=1024', device] + cmd += ['-i', "size={}".format(inode_size), device] check_call(cmd) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index c6d9341e..24c76e34 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -173,6 +173,14 @@ 'stein/proposed': 'bionic-proposed/stein', 'bionic-stein/proposed': 'bionic-proposed/stein', 'bionic-proposed/stein': 'bionic-proposed/stein', + # Train + 'train': 'bionic-updates/train', + 'bionic-train': 'bionic-updates/train', + 'bionic-train/updates': 'bionic-updates/train', + 'bionic-updates/train': 'bionic-updates/train', + 'train/proposed': 'bionic-proposed/train', + 'bionic-train/proposed': 'bionic-proposed/train', + 'bionic-proposed/train': 'bionic-proposed/train', } @@ -522,14 +530,16 @@ def add_source(source, key=None, fail_invalid=False): for r, fn in six.iteritems(_mapping): m = re.match(r, source) if m: - # call the assoicated function with the captured groups - # raises SourceConfigError on error. - fn(*m.groups()) if key: + # Import key before adding the source which depends on it, + # as refreshing packages could fail otherwise. try: import_key(key) except GPGKeyError as e: raise SourceConfigError(str(e)) + # call the associated function with the captured groups + # raises SourceConfigError on error. + fn(*m.groups()) break else: # nothing matched. log an error and maybe sys.exit diff --git a/ceph-radosgw/tests/bundles/bionic-stein.yaml b/ceph-radosgw/tests/bundles/bionic-stein.yaml new file mode 100644 index 00000000..0cb9f50c --- /dev/null +++ b/ceph-radosgw/tests/bundles/bionic-stein.yaml @@ -0,0 +1,43 @@ +options: + source: &source cloud:bionic-stein +series: bionic +applications: + ceph-radosgw: + charm: ceph-radosgw + series: bionic + num_units: 1 + options: + source: *source + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/trusty-mitaka.yaml b/ceph-radosgw/tests/bundles/trusty-mitaka.yaml index e6e6b17d..119e1d8c 100644 --- a/ceph-radosgw/tests/bundles/trusty-mitaka.yaml +++ b/ceph-radosgw/tests/bundles/trusty-mitaka.yaml @@ -24,7 +24,7 @@ applications: source: *source auth-supported: 'none' percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster + charm: cs:trusty/percona-cluster num_units: 1 keystone: expose: True diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index b879711b..adc68702 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -7,8 +7,9 @@ gate_bundles: - xenial-queens - bionic-queens - bionic-rocky + - bionic-stein smoke_bundles: - - bionic-rocky + - bionic-stein dev_bundles: - cosmic-rocky - bionic-queens-multisite diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 1ba768f2..adb56f79 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -248,8 +248,10 @@ def test_ids_ctxt_v3( 'auth_protocol': 'http', 'auth_type': 'keystone', 'cache_size': '42', + 'service_domain_id': '8e50f28a556911e8aaeed33789425d23', 'service_host': '127.0.0.4', 'service_port': 9876, + 'service_project_id': '2852107b8f8f473aaf0d769c7bbcf86b', 'service_protocol': 'http', 'user_roles': 'Babel,Dart', } From 02bb358077673cd5370c3a209bfd676f44b00c79 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 12 Jul 2019 15:00:37 +0200 Subject: [PATCH 1789/2699] Sync charm-helpers Change-Id: I9923cd3d894e20511a32e23028075234df7f8de2 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 35 ++- .../contrib/openstack/amulet/deployment.py | 20 ++ .../contrib/openstack/amulet/utils.py | 14 +- .../audits/openstack_security_guide.py | 10 +- .../contrib/openstack/cert_utils.py | 10 +- .../charmhelpers/contrib/openstack/context.py | 218 +++++++++++++++++- .../charmhelpers/contrib/openstack/neutron.py | 5 + .../charmhelpers/contrib/openstack/utils.py | 16 +- .../contrib/storage/linux/ceph.py | 19 +- .../contrib/storage/linux/utils.py | 8 +- ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 16 +- ceph-mon/tests/bundles/trusty-mitaka.yaml | 2 +- ceph-mon/tests/tests.yaml | 2 +- 13 files changed, 342 insertions(+), 33 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 0626b328..a3d89936 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -33,6 +33,7 @@ hook_name, local_unit, log, + relation_get, relation_ids, relation_set, relations_of_type, @@ -260,11 +261,23 @@ def __init__(self, hostname=None, primary=True): relation = relation_ids('nrpe-external-master') if relation: log("Setting charm primary status {}".format(primary)) - for rid in relation_ids('nrpe-external-master'): + for rid in relation: relation_set(relation_id=rid, relation_settings={'primary': self.primary}) + self.remove_check_queue = set() def add_check(self, *args, **kwargs): + shortname = None + if kwargs.get('shortname') is None: + if len(args) > 0: + shortname = args[0] + else: + shortname = kwargs['shortname'] + self.checks.append(Check(*args, **kwargs)) + try: + self.remove_check_queue.remove(shortname) + except KeyError: + pass def remove_check(self, *args, **kwargs): if kwargs.get('shortname') is None: @@ -281,6 +294,7 @@ def remove_check(self, *args, **kwargs): check = Check(*args, **kwargs) check.remove(self.hostname) + self.remove_check_queue.add(kwargs['shortname']) def write(self): try: @@ -313,7 +327,24 @@ def write(self): monitor_ids = relation_ids("local-monitors") + \ relation_ids("nrpe-external-master") for rid in monitor_ids: - relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + reldata = relation_get(unit=local_unit(), rid=rid) + if 'monitors' in reldata: + # update the existing set of monitors with the new data + old_monitors = yaml.safe_load(reldata['monitors']) + old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe'] + # remove keys that are in the remove_check_queue + old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items() + if k not in self.remove_check_queue} + # update/add nrpe_monitors + old_nrpe_monitors.update(nrpe_monitors) + old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors + # write back to the relation + relation_set(relation_id=rid, monitors=yaml.dump(old_monitors)) + else: + # write a brand new set of monitors, as no existing ones. + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + self.remove_check_queue.clear() def get_nagios_hostcontext(relation_name='nrpe-external-master'): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 8e57467b..77925cc2 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -294,8 +294,10 @@ def _get_openstack_release(self): ('bionic', None): self.bionic_queens, ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, ('bionic', 'cloud:bionic-stein'): self.bionic_stein, + ('bionic', 'cloud:bionic-train'): self.bionic_train, ('cosmic', None): self.cosmic_rocky, ('disco', None): self.disco_stein, + ('eoan', None): self.eoan_train, } return releases[(self.series, self.openstack)] @@ -313,6 +315,7 @@ def _get_openstack_release_string(self): ('bionic', 'queens'), ('cosmic', 'rocky'), ('disco', 'stein'), + ('eoan', 'train'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] @@ -320,6 +323,23 @@ def _get_openstack_release_string(self): else: return releases[self.series] + def get_percona_service_entry(self, memory_constraint=None): + """Return a amulet service entry for percona cluster. + + :param memory_constraint: Override the default memory constraint + in the service entry. + :type memory_constraint: str + :returns: Amulet service entry. + :rtype: dict + """ + memory_constraint = memory_constraint or '3072M' + svc_entry = { + 'name': 'percona-cluster', + 'constraints': {'mem': memory_constraint}} + if self._get_openstack_release() <= self.trusty_mitaka: + svc_entry['location'] = 'cs:trusty/percona-cluster' + return svc_entry + def get_ceph_expected_pools(self, radosgw=False): """Return a list of expected ceph pools in a ceph + cinder + glance test scenario, based on OpenStack release and whether ceph radosgw diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 53fa6506..0a5f81bd 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -54,11 +54,15 @@ OPENSTACK_RELEASES_PAIRS = [ 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', - 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', - 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', - 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens', 'bionic_rocky', 'cosmic_rocky', - 'bionic_stein', 'disco_stein'] + 'trusty_mitaka', 'xenial_mitaka', + 'xenial_newton', 'yakkety_newton', + 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', + 'xenial_queens', 'bionic_queens', + 'bionic_rocky', 'cosmic_rocky', + 'bionic_stein', 'disco_stein', + 'bionic_train', 'eoan_train', +] class OpenStackAmuletUtils(AmuletUtils): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py index e5b7ac1e..b7b8a60f 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -126,7 +126,11 @@ def _config_ini(path): :returns: Configuration contained in path :rtype: Dict """ - conf = configparser.ConfigParser() + # When strict is enabled, duplicate options are not allowed in the + # parsed INI; however, Oslo allows duplicate values. This change + # causes us to ignore the duplicate values which is acceptable as + # long as we don't validate any multi-value options + conf = configparser.ConfigParser(strict=False) conf.read(path) return dict(conf) @@ -204,7 +208,7 @@ def validate_file_ownership(config): "Invalid ownership configuration: {}".format(key)) owner = options.get('owner', config.get('owner', 'root')) group = options.get('group', config.get('group', 'root')) - optional = options.get('optional', config.get('optional', 'False')) + optional = options.get('optional', config.get('optional', False)) if '*' in file_name: for file in glob.glob(file_name): if file not in files.keys(): @@ -226,7 +230,7 @@ def validate_file_permissions(config): raise RuntimeError( "Invalid ownership configuration: {}".format(key)) mode = options.get('mode', config.get('permissions', '600')) - optional = options.get('optional', config.get('optional', 'False')) + optional = options.get('optional', config.get('optional', False)) if '*' in file_name: for file in glob.glob(file_name): if file not in files.keys(): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py index 47b8603a..b494af64 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -106,9 +106,11 @@ def get_request(self): sans = sorted(list(set(entry['addresses']))) request[entry['cn']] = {'sans': sans} if self.json_encode: - return {'cert_requests': json.dumps(request, sort_keys=True)} + req = {'cert_requests': json.dumps(request, sort_keys=True)} else: - return {'cert_requests': request} + req = {'cert_requests': request} + req['unit_name'] = local_unit().replace('/', '_') + return req def get_certificate_request(json_encode=True): @@ -220,6 +222,8 @@ def process_certificates(service_name, relation_id, unit, :type user: str :param group: (Optional) Group of certificate files. Defaults to 'root' :type group: str + :returns: True if certificates processed for local unit or False + :rtype: bool """ data = relation_get(rid=relation_id, unit=unit) ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) @@ -235,6 +239,8 @@ def process_certificates(service_name, relation_id, unit, create_ip_cert_links( ssl_dir, custom_hostname_link=custom_hostname_link) + return True + return False def get_requests_for_local_unit(relation_name=None): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index d5133713..a6545e12 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -117,6 +117,7 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' ADDRESS_TYPES = ['admin', 'internal', 'public'] HAPROXY_RUN_DIR = '/var/run/haproxy/' +DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2" def ensure_packages(packages): @@ -257,7 +258,7 @@ def __call__(self): 'database_password': rdata.get(password_setting), 'database_type': 'mysql+pymysql' } - if CompareOpenStackReleases(rel) < 'stein': + if CompareOpenStackReleases(rel) < 'queens': ctxt['database_type'] = 'mysql' if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) @@ -351,10 +352,70 @@ def _setup_pki_cache(self): return cachedir return None + def _get_pkg_name(self, python_name='keystonemiddleware'): + """Get corresponding distro installed package for python + package name. + + :param python_name: nameof the python package + :type: string + """ + pkg_names = map(lambda x: x + python_name, ('python3-', 'python-')) + + for pkg in pkg_names: + if not filter_installed_packages((pkg,)): + return pkg + + return None + + def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): + """Build Jinja2 context for full rendering of [keystone_authtoken] + section with variable names included. Re-constructed from former + template 'section-keystone-auth-mitaka'. + + :param ctxt: Jinja2 context returned from self.__call__() + :type: dict + :param keystonemiddleware_os_rel: OpenStack release name of + keystonemiddleware package installed + """ + c = collections.OrderedDict((('auth_type', 'password'),)) + + # 'www_authenticate_uri' replaced 'auth_uri' since Stein, + # see keystonemiddleware upstream sources for more info + if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein': + c.update(( + ('www_authenticate_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) + else: + c.update(( + ('auth_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) + + c.update(( + ('auth_url', "{}://{}:{}/v3".format( + ctxt.get('auth_protocol', ''), + ctxt.get('auth_host', ''), + ctxt.get('auth_port', ''))), + ('project_domain_name', ctxt.get('admin_domain_name', '')), + ('user_domain_name', ctxt.get('admin_domain_name', '')), + ('project_name', ctxt.get('admin_tenant_name', '')), + ('username', ctxt.get('admin_user', '')), + ('password', ctxt.get('admin_password', '')), + ('signing_dir', ctxt.get('signing_dir', '')),)) + + return c + def __call__(self): log('Generating template context for ' + self.rel_name, level=DEBUG) ctxt = {} + keystonemiddleware_os_release = None + if self._get_pkg_name(): + keystonemiddleware_os_release = os_release(self._get_pkg_name()) + cachedir = self._setup_pki_cache() if cachedir: ctxt['signing_dir'] = cachedir @@ -382,8 +443,18 @@ def __call__(self): 'api_version': api_version}) if float(api_version) > 2: - ctxt.update({'admin_domain_name': - rdata.get('service_domain')}) + ctxt.update({ + 'admin_domain_name': rdata.get('service_domain'), + 'service_project_id': rdata.get('service_tenant_id'), + 'service_domain_id': rdata.get('service_domain_id')}) + + # we keep all veriables in ctxt for compatibility and + # add nested dictionary for keystone_authtoken generic + # templating + if keystonemiddleware_os_release: + ctxt['keystone_authtoken'] = \ + self._get_keystone_authtoken_ctxt( + ctxt, keystonemiddleware_os_release) if self.context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse @@ -452,6 +523,86 @@ def __call__(self): return {} +class NovaVendorMetadataContext(OSContextGenerator): + """Context used for configuring nova vendor metadata on nova.conf file.""" + + def __init__(self, os_release_pkg, interfaces=None): + """Initialize the NovaVendorMetadataContext object. + + :param os_release_pkg: the package name to extract the OpenStack + release codename from. + :type os_release_pkg: str + :param interfaces: list of string values to be used as the Context's + relation interfaces. + :type interfaces: List[str] + """ + self.os_release_pkg = os_release_pkg + if interfaces is not None: + self.interfaces = interfaces + + def __call__(self): + cmp_os_release = CompareOpenStackReleases( + os_release(self.os_release_pkg)) + ctxt = {'vendor_data': False} + + vdata_providers = [] + vdata = config('vendor-data') + vdata_url = config('vendor-data-url') + + if vdata: + try: + # validate the JSON. If invalid, we do not set anything here + json.loads(vdata) + except (TypeError, ValueError) as e: + log('Error decoding vendor-data. {}'.format(e), level=ERROR) + else: + ctxt['vendor_data'] = True + # Mitaka does not support DynamicJSON + # so vendordata_providers is not needed + if cmp_os_release > 'mitaka': + vdata_providers.append('StaticJSON') + + if vdata_url: + if cmp_os_release > 'mitaka': + ctxt['vendor_data_url'] = vdata_url + vdata_providers.append('DynamicJSON') + else: + log('Dynamic vendor data unsupported' + ' for {}.'.format(cmp_os_release), level=ERROR) + if vdata_providers: + ctxt['vendordata_providers'] = ','.join(vdata_providers) + + return ctxt + + +class NovaVendorMetadataJSONContext(OSContextGenerator): + """Context used for writing nova vendor metadata json file.""" + + def __init__(self, os_release_pkg): + """Initialize the NovaVendorMetadataJSONContext object. + + :param os_release_pkg: the package name to extract the OpenStack + release codename from. + :type os_release_pkg: str + """ + self.os_release_pkg = os_release_pkg + + def __call__(self): + ctxt = {'vendor_data_json': '{}'} + + vdata = config('vendor-data') + if vdata: + try: + # validate the JSON. If invalid, we return empty. + json.loads(vdata) + except (TypeError, ValueError) as e: + log('Error decoding vendor-data. {}'.format(e), level=ERROR) + else: + ctxt['vendor_data_json'] = vdata + + return ctxt + + class AMQPContext(OSContextGenerator): def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None, @@ -569,6 +720,19 @@ def __call__(self): ctxt['oslo_messaging_flags'] = config_flags_parser( oslo_messaging_flags) + oslo_messaging_driver = conf.get( + 'oslo-messaging-driver', DEFAULT_OSLO_MESSAGING_DRIVER) + if oslo_messaging_driver: + ctxt['oslo_messaging_driver'] = oslo_messaging_driver + + notification_format = conf.get('notification-format', None) + if notification_format: + ctxt['notification_format'] = notification_format + + send_notifications_to_logs = conf.get('send-notifications-to-logs', None) + if send_notifications_to_logs: + ctxt['send_notifications_to_logs'] = send_notifications_to_logs + if not self.complete: return {} @@ -620,6 +784,25 @@ def __call__(self): ensure_packages(['ceph-common']) return ctxt + def context_complete(self, ctxt): + """Overridden here to ensure the context is actually complete. + + We set `key` and `auth` to None here, by default, to ensure + that the context will always evaluate to incomplete until the + Ceph relation has actually sent these details; otherwise, + there is a potential race condition between the relation + appearing and the first unit actually setting this data on the + relation. + + :param ctxt: The current context members + :type ctxt: Dict[str, ANY] + :returns: True if the context is complete + :rtype: bool + """ + if 'auth' not in ctxt or 'key' not in ctxt: + return False + return super(CephContext, self).context_complete(ctxt) + class HAProxyContext(OSContextGenerator): """Provides half a context for the haproxy template, which describes @@ -1110,7 +1293,9 @@ def resolve_ports(self, ports): hwaddr_to_nic = {} hwaddr_to_ip = {} - for nic in list_nics(): + extant_nics = list_nics() + + for nic in extant_nics: # Ignore virtual interfaces (bond masters will be identified from # their slaves) if not is_phy_iface(nic): @@ -1141,10 +1326,11 @@ def resolve_ports(self, ports): # Entry is a MAC address for a valid interface that doesn't # have an IP address assigned yet. resolved.append(hwaddr_to_nic[entry]) - else: - # If the passed entry is not a MAC address, assume it's a valid - # interface, and that the user put it there on purpose (we can - # trust it to be the real external network). + elif entry in extant_nics: + # If the passed entry is not a MAC address and the interface + # exists, assume it's a valid interface, and that the user put + # it there on purpose (we can trust it to be the real external + # network). resolved.append(entry) # Ensure no duplicates @@ -1526,6 +1712,18 @@ def __call__(self): 'rel_key': 'enable-nsg-logging', 'default': False, }, + 'enable_nfg_logging': { + 'rel_key': 'enable-nfg-logging', + 'default': False, + }, + 'global_physnet_mtu': { + 'rel_key': 'global-physnet-mtu', + 'default': 1500, + }, + 'physical_network_mtus': { + 'rel_key': 'physical-network-mtus', + 'default': None, + }, } ctxt = self.get_neutron_options({}) for rid in relation_ids('neutron-plugin-api'): @@ -1587,13 +1785,13 @@ class DataPortContext(NeutronPortContext): def __call__(self): ports = config('data-port') if ports: - # Map of {port/mac:bridge} + # Map of {bridge:port/mac} portmap = parse_data_port_mappings(ports) ports = portmap.keys() # Resolve provided ports or mac addresses and filter out those # already attached to a bridge. resolved = self.resolve_ports(ports) - # FIXME: is this necessary? + # Rebuild port index using resolved and filtered ports. normalized = {get_nic_hwaddr(port): port for port in resolved if port not in ports} normalized.update({port: port for port in resolved diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py index 0f847f56..fb5607f3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py @@ -217,6 +217,11 @@ def neutron_plugins(): plugins['nsx']['config'] = '/etc/neutron/nsx.ini' plugins['vsp']['driver'] = ( 'nuage_neutron.plugins.nuage.plugin.NuagePlugin') + if CompareOpenStackReleases(release) >= 'newton': + plugins['vsp']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' + plugins['vsp']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' + plugins['vsp']['server_packages'] = ['neutron-server', + 'neutron-plugin-ml2'] return plugins diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index e5e25369..d43a4d20 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -120,6 +120,7 @@ 'queens', 'rocky', 'stein', + 'train', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -139,6 +140,7 @@ ('bionic', 'queens'), ('cosmic', 'rocky'), ('disco', 'stein'), + ('eoan', 'train'), ]) @@ -159,6 +161,7 @@ ('2018.1', 'queens'), ('2018.2', 'rocky'), ('2019.1', 'stein'), + ('2019.2', 'train'), ]) # The ugly duckling - must list releases oldest to newest @@ -194,7 +197,9 @@ ('rocky', ['2.18.0', '2.19.0']), ('stein', - ['2.20.0']), + ['2.20.0', '2.21.0']), + ('train', + ['2.22.0']), ]) # >= Liberty version->codename mapping @@ -208,6 +213,7 @@ ('17', 'queens'), ('18', 'rocky'), ('19', 'stein'), + ('20', 'train'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -218,6 +224,7 @@ ('12', 'queens'), ('13', 'rocky'), ('14', 'stein'), + ('15', 'train'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -228,6 +235,7 @@ ('12', 'queens'), ('13', 'rocky'), ('14', 'stein'), + ('15', 'train'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -238,6 +246,7 @@ ('13', 'queens'), ('14', 'rocky'), ('15', 'stein'), + ('16', 'train'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -248,6 +257,7 @@ ('13', 'queens'), ('14', 'rocky'), ('15', 'stein'), + ('16', 'train'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -258,6 +268,7 @@ ('10', 'queens'), ('11', 'rocky'), ('12', 'stein'), + ('13', 'train'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -268,6 +279,7 @@ ('10', 'queens'), ('11', 'rocky'), ('12', 'stein'), + ('13', 'train'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -278,6 +290,7 @@ ('16', 'queens'), ('17', 'rocky'), ('18', 'stein'), + ('19', 'train'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -288,6 +301,7 @@ ('13', 'queens'), ('14', 'rocky'), ('15', 'stein'), + ('16', 'train'), ]), } diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 2c62092c..a9864467 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1482,13 +1482,28 @@ def send_request_if_needed(request, relation='ceph'): relation_set(relation_id=rid, broker_req=request.request) +def has_broker_rsp(rid=None, unit=None): + """Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data. + + :param rid: The relation to check (default of None means current relation) + :type rid: Union[str, None] + :param unit: The remote unit to check (default of None means current unit) + :type unit: Union[str, None] + :returns: True if broker key exists and is set to something 'truthy' + :rtype: bool + """ + rdata = relation_get(rid=rid, unit=unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + return True if broker_rsp else False + + def is_broker_action_done(action, rid=None, unit=None): """Check whether broker action has completed yet. @param action: name of action to be performed @returns True if action complete otherwise False """ - rdata = relation_get(rid, unit) or {} + rdata = relation_get(rid=rid, unit=unit) or {} broker_rsp = rdata.get(get_broker_rsp_key()) if not broker_rsp: return False @@ -1510,7 +1525,7 @@ def mark_broker_action_done(action, rid=None, unit=None): @param action: name of action to be performed @returns None """ - rdata = relation_get(rid, unit) or {} + rdata = relation_get(rid=rid, unit=unit) or {} broker_rsp = rdata.get(get_broker_rsp_key()) if not broker_rsp: return diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py index c57aaf35..a3561760 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -110,17 +110,19 @@ def is_device_mounted(device): return bool(re.search(r'MOUNTPOINT=".+"', out)) -def mkfs_xfs(device, force=False): +def mkfs_xfs(device, force=False, inode_size=1024): """Format device with XFS filesystem. By default this should fail if the device already has a filesystem on it. :param device: Full path to device to format :ptype device: tr :param force: Force operation - :ptype: force: boolean""" + :ptype: force: boolean + :param inode_size: XFS inode size in bytes + :ptype inode_size: int""" cmd = ['mkfs.xfs'] if force: cmd.append("-f") - cmd += ['-i', 'size=1024', device] + cmd += ['-i', "size={}".format(inode_size), device] check_call(cmd) diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index c6d9341e..24c76e34 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -173,6 +173,14 @@ 'stein/proposed': 'bionic-proposed/stein', 'bionic-stein/proposed': 'bionic-proposed/stein', 'bionic-proposed/stein': 'bionic-proposed/stein', + # Train + 'train': 'bionic-updates/train', + 'bionic-train': 'bionic-updates/train', + 'bionic-train/updates': 'bionic-updates/train', + 'bionic-updates/train': 'bionic-updates/train', + 'train/proposed': 'bionic-proposed/train', + 'bionic-train/proposed': 'bionic-proposed/train', + 'bionic-proposed/train': 'bionic-proposed/train', } @@ -522,14 +530,16 @@ def add_source(source, key=None, fail_invalid=False): for r, fn in six.iteritems(_mapping): m = re.match(r, source) if m: - # call the assoicated function with the captured groups - # raises SourceConfigError on error. - fn(*m.groups()) if key: + # Import key before adding the source which depends on it, + # as refreshing packages could fail otherwise. try: import_key(key) except GPGKeyError as e: raise SourceConfigError(str(e)) + # call the associated function with the captured groups + # raises SourceConfigError on error. + fn(*m.groups()) break else: # nothing matched. log an error and maybe sys.exit diff --git a/ceph-mon/tests/bundles/trusty-mitaka.yaml b/ceph-mon/tests/bundles/trusty-mitaka.yaml index 058c72b0..bf56d66f 100644 --- a/ceph-mon/tests/bundles/trusty-mitaka.yaml +++ b/ceph-mon/tests/bundles/trusty-mitaka.yaml @@ -25,7 +25,7 @@ applications: constraints: virt-type=kvm percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster + charm: cs:trusty/percona-cluster num_units: 1 options: dataset-size: 25% diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index b504616f..7b1f7c21 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -9,7 +9,7 @@ gate_bundles: - xenial-mitaka - trusty-mitaka smoke_bundles: - - bionic-queens + - bionic-stein dev_bundles: - cosmic-rocky - disco-stein From f174a224387533968f8149c248b7daa7bfd69aba Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 12 Jul 2019 15:01:15 +0200 Subject: [PATCH 1790/2699] Sync charm-helpers Change-Id: I759be5dc13a784efe5850c403275c2c0b288da13 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 35 +++++++++++++++++-- .../charmhelpers/contrib/openstack/utils.py | 16 ++++++++- .../contrib/storage/linux/ceph.py | 19 ++++++++-- .../contrib/storage/linux/utils.py | 8 +++-- ceph-proxy/charmhelpers/fetch/ubuntu.py | 16 +++++++-- ceph-proxy/tests/tests.yaml | 2 +- 6 files changed, 84 insertions(+), 12 deletions(-) diff --git a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py index 0626b328..a3d89936 100644 --- a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py @@ -33,6 +33,7 @@ hook_name, local_unit, log, + relation_get, relation_ids, relation_set, relations_of_type, @@ -260,11 +261,23 @@ def __init__(self, hostname=None, primary=True): relation = relation_ids('nrpe-external-master') if relation: log("Setting charm primary status {}".format(primary)) - for rid in relation_ids('nrpe-external-master'): + for rid in relation: relation_set(relation_id=rid, relation_settings={'primary': self.primary}) + self.remove_check_queue = set() def add_check(self, *args, **kwargs): + shortname = None + if kwargs.get('shortname') is None: + if len(args) > 0: + shortname = args[0] + else: + shortname = kwargs['shortname'] + self.checks.append(Check(*args, **kwargs)) + try: + self.remove_check_queue.remove(shortname) + except KeyError: + pass def remove_check(self, *args, **kwargs): if kwargs.get('shortname') is None: @@ -281,6 +294,7 @@ def remove_check(self, *args, **kwargs): check = Check(*args, **kwargs) check.remove(self.hostname) + self.remove_check_queue.add(kwargs['shortname']) def write(self): try: @@ -313,7 +327,24 @@ def write(self): monitor_ids = relation_ids("local-monitors") + \ relation_ids("nrpe-external-master") for rid in monitor_ids: - relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + reldata = relation_get(unit=local_unit(), rid=rid) + if 'monitors' in reldata: + # update the existing set of monitors with the new data + old_monitors = yaml.safe_load(reldata['monitors']) + old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe'] + # remove keys that are in the remove_check_queue + old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items() + if k not in self.remove_check_queue} + # update/add nrpe_monitors + old_nrpe_monitors.update(nrpe_monitors) + old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors + # write back to the relation + relation_set(relation_id=rid, monitors=yaml.dump(old_monitors)) + else: + # write a brand new set of monitors, as no existing ones. + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + self.remove_check_queue.clear() def get_nagios_hostcontext(relation_name='nrpe-external-master'): diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index e5e25369..d43a4d20 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -120,6 +120,7 @@ 'queens', 'rocky', 'stein', + 'train', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -139,6 +140,7 @@ ('bionic', 'queens'), ('cosmic', 'rocky'), ('disco', 'stein'), + ('eoan', 'train'), ]) @@ -159,6 +161,7 @@ ('2018.1', 'queens'), ('2018.2', 'rocky'), ('2019.1', 'stein'), + ('2019.2', 'train'), ]) # The ugly duckling - must list releases oldest to newest @@ -194,7 +197,9 @@ ('rocky', ['2.18.0', '2.19.0']), ('stein', - ['2.20.0']), + ['2.20.0', '2.21.0']), + ('train', + ['2.22.0']), ]) # >= Liberty version->codename mapping @@ -208,6 +213,7 @@ ('17', 'queens'), ('18', 'rocky'), ('19', 'stein'), + ('20', 'train'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -218,6 +224,7 @@ ('12', 'queens'), ('13', 'rocky'), ('14', 'stein'), + ('15', 'train'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -228,6 +235,7 @@ ('12', 'queens'), ('13', 'rocky'), ('14', 'stein'), + ('15', 'train'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -238,6 +246,7 @@ ('13', 'queens'), ('14', 'rocky'), ('15', 'stein'), + ('16', 'train'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -248,6 +257,7 @@ ('13', 'queens'), ('14', 'rocky'), ('15', 'stein'), + ('16', 'train'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -258,6 +268,7 @@ ('10', 'queens'), ('11', 'rocky'), ('12', 'stein'), + ('13', 'train'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -268,6 +279,7 @@ ('10', 'queens'), ('11', 'rocky'), ('12', 'stein'), + ('13', 'train'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -278,6 +290,7 @@ ('16', 'queens'), ('17', 'rocky'), ('18', 'stein'), + ('19', 'train'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -288,6 +301,7 @@ ('13', 'queens'), ('14', 'rocky'), ('15', 'stein'), + ('16', 'train'), ]), } diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py index 2c62092c..a9864467 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py @@ -1482,13 +1482,28 @@ def send_request_if_needed(request, relation='ceph'): relation_set(relation_id=rid, broker_req=request.request) +def has_broker_rsp(rid=None, unit=None): + """Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data. + + :param rid: The relation to check (default of None means current relation) + :type rid: Union[str, None] + :param unit: The remote unit to check (default of None means current unit) + :type unit: Union[str, None] + :returns: True if broker key exists and is set to something 'truthy' + :rtype: bool + """ + rdata = relation_get(rid=rid, unit=unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + return True if broker_rsp else False + + def is_broker_action_done(action, rid=None, unit=None): """Check whether broker action has completed yet. @param action: name of action to be performed @returns True if action complete otherwise False """ - rdata = relation_get(rid, unit) or {} + rdata = relation_get(rid=rid, unit=unit) or {} broker_rsp = rdata.get(get_broker_rsp_key()) if not broker_rsp: return False @@ -1510,7 +1525,7 @@ def mark_broker_action_done(action, rid=None, unit=None): @param action: name of action to be performed @returns None """ - rdata = relation_get(rid, unit) or {} + rdata = relation_get(rid=rid, unit=unit) or {} broker_rsp = rdata.get(get_broker_rsp_key()) if not broker_rsp: return diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/charmhelpers/contrib/storage/linux/utils.py index c57aaf35..a3561760 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/utils.py @@ -110,17 +110,19 @@ def is_device_mounted(device): return bool(re.search(r'MOUNTPOINT=".+"', out)) -def mkfs_xfs(device, force=False): +def mkfs_xfs(device, force=False, inode_size=1024): """Format device with XFS filesystem. By default this should fail if the device already has a filesystem on it. :param device: Full path to device to format :ptype device: tr :param force: Force operation - :ptype: force: boolean""" + :ptype: force: boolean + :param inode_size: XFS inode size in bytes + :ptype inode_size: int""" cmd = ['mkfs.xfs'] if force: cmd.append("-f") - cmd += ['-i', 'size=1024', device] + cmd += ['-i', "size={}".format(inode_size), device] check_call(cmd) diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py index c6d9341e..24c76e34 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -173,6 +173,14 @@ 'stein/proposed': 'bionic-proposed/stein', 'bionic-stein/proposed': 'bionic-proposed/stein', 'bionic-proposed/stein': 'bionic-proposed/stein', + # Train + 'train': 'bionic-updates/train', + 'bionic-train': 'bionic-updates/train', + 'bionic-train/updates': 'bionic-updates/train', + 'bionic-updates/train': 'bionic-updates/train', + 'train/proposed': 'bionic-proposed/train', + 'bionic-train/proposed': 'bionic-proposed/train', + 'bionic-proposed/train': 'bionic-proposed/train', } @@ -522,14 +530,16 @@ def add_source(source, key=None, fail_invalid=False): for r, fn in six.iteritems(_mapping): m = re.match(r, source) if m: - # call the assoicated function with the captured groups - # raises SourceConfigError on error. - fn(*m.groups()) if key: + # Import key before adding the source which depends on it, + # as refreshing packages could fail otherwise. try: import_key(key) except GPGKeyError as e: raise SourceConfigError(str(e)) + # call the associated function with the captured groups + # raises SourceConfigError on error. + fn(*m.groups()) break else: # nothing matched. log an error and maybe sys.exit diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 0dc7bc83..b5a568ac 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -26,7 +26,7 @@ dev_bundles: - bionic-stein smoke_bundles: - - bionic-queens + - bionic-stein target_deploy_status: ceph-proxy: From 2aaed5babc10a4e0a8a60cb998abf20b55753386 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 12 Jul 2019 15:00:56 +0200 Subject: [PATCH 1791/2699] Sync charm-helpers Change-Id: I458ce4e8938072bc42f7250eadb1a12b097139ff --- .../charmhelpers/contrib/charmsupport/nrpe.py | 35 ++- .../contrib/openstack/amulet/deployment.py | 20 ++ .../contrib/openstack/amulet/utils.py | 14 +- .../audits/openstack_security_guide.py | 10 +- .../contrib/openstack/cert_utils.py | 10 +- .../charmhelpers/contrib/openstack/context.py | 218 +++++++++++++++++- .../charmhelpers/contrib/openstack/neutron.py | 5 + .../charmhelpers/contrib/openstack/utils.py | 16 +- .../contrib/storage/linux/ceph.py | 19 +- .../contrib/storage/linux/utils.py | 8 +- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 16 +- ceph-osd/tests/bundles/trusty-mitaka.yaml | 2 +- ceph-osd/tests/tests.yaml | 2 +- 13 files changed, 342 insertions(+), 33 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 0626b328..a3d89936 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -33,6 +33,7 @@ hook_name, local_unit, log, + relation_get, relation_ids, relation_set, relations_of_type, @@ -260,11 +261,23 @@ def __init__(self, hostname=None, primary=True): relation = relation_ids('nrpe-external-master') if relation: log("Setting charm primary status {}".format(primary)) - for rid in relation_ids('nrpe-external-master'): + for rid in relation: relation_set(relation_id=rid, relation_settings={'primary': self.primary}) + self.remove_check_queue = set() def add_check(self, *args, **kwargs): + shortname = None + if kwargs.get('shortname') is None: + if len(args) > 0: + shortname = args[0] + else: + shortname = kwargs['shortname'] + self.checks.append(Check(*args, **kwargs)) + try: + self.remove_check_queue.remove(shortname) + except KeyError: + pass def remove_check(self, *args, **kwargs): if kwargs.get('shortname') is None: @@ -281,6 +294,7 @@ def remove_check(self, *args, **kwargs): check = Check(*args, **kwargs) check.remove(self.hostname) + self.remove_check_queue.add(kwargs['shortname']) def write(self): try: @@ -313,7 +327,24 @@ def write(self): monitor_ids = relation_ids("local-monitors") + \ relation_ids("nrpe-external-master") for rid in monitor_ids: - relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + reldata = relation_get(unit=local_unit(), rid=rid) + if 'monitors' in reldata: + # update the existing set of monitors with the new data + old_monitors = yaml.safe_load(reldata['monitors']) + old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe'] + # remove keys that are in the remove_check_queue + old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items() + if k not in self.remove_check_queue} + # update/add nrpe_monitors + old_nrpe_monitors.update(nrpe_monitors) + old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors + # write back to the relation + relation_set(relation_id=rid, monitors=yaml.dump(old_monitors)) + else: + # write a brand new set of monitors, as no existing ones. + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + self.remove_check_queue.clear() def get_nagios_hostcontext(relation_name='nrpe-external-master'): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 8e57467b..77925cc2 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -294,8 +294,10 @@ def _get_openstack_release(self): ('bionic', None): self.bionic_queens, ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, ('bionic', 'cloud:bionic-stein'): self.bionic_stein, + ('bionic', 'cloud:bionic-train'): self.bionic_train, ('cosmic', None): self.cosmic_rocky, ('disco', None): self.disco_stein, + ('eoan', None): self.eoan_train, } return releases[(self.series, self.openstack)] @@ -313,6 +315,7 @@ def _get_openstack_release_string(self): ('bionic', 'queens'), ('cosmic', 'rocky'), ('disco', 'stein'), + ('eoan', 'train'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] @@ -320,6 +323,23 @@ def _get_openstack_release_string(self): else: return releases[self.series] + def get_percona_service_entry(self, memory_constraint=None): + """Return a amulet service entry for percona cluster. + + :param memory_constraint: Override the default memory constraint + in the service entry. + :type memory_constraint: str + :returns: Amulet service entry. + :rtype: dict + """ + memory_constraint = memory_constraint or '3072M' + svc_entry = { + 'name': 'percona-cluster', + 'constraints': {'mem': memory_constraint}} + if self._get_openstack_release() <= self.trusty_mitaka: + svc_entry['location'] = 'cs:trusty/percona-cluster' + return svc_entry + def get_ceph_expected_pools(self, radosgw=False): """Return a list of expected ceph pools in a ceph + cinder + glance test scenario, based on OpenStack release and whether ceph radosgw diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 53fa6506..0a5f81bd 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -54,11 +54,15 @@ OPENSTACK_RELEASES_PAIRS = [ 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', - 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', - 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', - 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens', 'bionic_rocky', 'cosmic_rocky', - 'bionic_stein', 'disco_stein'] + 'trusty_mitaka', 'xenial_mitaka', + 'xenial_newton', 'yakkety_newton', + 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', + 'xenial_queens', 'bionic_queens', + 'bionic_rocky', 'cosmic_rocky', + 'bionic_stein', 'disco_stein', + 'bionic_train', 'eoan_train', +] class OpenStackAmuletUtils(AmuletUtils): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py index e5b7ac1e..b7b8a60f 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -126,7 +126,11 @@ def _config_ini(path): :returns: Configuration contained in path :rtype: Dict """ - conf = configparser.ConfigParser() + # When strict is enabled, duplicate options are not allowed in the + # parsed INI; however, Oslo allows duplicate values. This change + # causes us to ignore the duplicate values which is acceptable as + # long as we don't validate any multi-value options + conf = configparser.ConfigParser(strict=False) conf.read(path) return dict(conf) @@ -204,7 +208,7 @@ def validate_file_ownership(config): "Invalid ownership configuration: {}".format(key)) owner = options.get('owner', config.get('owner', 'root')) group = options.get('group', config.get('group', 'root')) - optional = options.get('optional', config.get('optional', 'False')) + optional = options.get('optional', config.get('optional', False)) if '*' in file_name: for file in glob.glob(file_name): if file not in files.keys(): @@ -226,7 +230,7 @@ def validate_file_permissions(config): raise RuntimeError( "Invalid ownership configuration: {}".format(key)) mode = options.get('mode', config.get('permissions', '600')) - optional = options.get('optional', config.get('optional', 'False')) + optional = options.get('optional', config.get('optional', False)) if '*' in file_name: for file in glob.glob(file_name): if file not in files.keys(): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py index 47b8603a..b494af64 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -106,9 +106,11 @@ def get_request(self): sans = sorted(list(set(entry['addresses']))) request[entry['cn']] = {'sans': sans} if self.json_encode: - return {'cert_requests': json.dumps(request, sort_keys=True)} + req = {'cert_requests': json.dumps(request, sort_keys=True)} else: - return {'cert_requests': request} + req = {'cert_requests': request} + req['unit_name'] = local_unit().replace('/', '_') + return req def get_certificate_request(json_encode=True): @@ -220,6 +222,8 @@ def process_certificates(service_name, relation_id, unit, :type user: str :param group: (Optional) Group of certificate files. Defaults to 'root' :type group: str + :returns: True if certificates processed for local unit or False + :rtype: bool """ data = relation_get(rid=relation_id, unit=unit) ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) @@ -235,6 +239,8 @@ def process_certificates(service_name, relation_id, unit, create_ip_cert_links( ssl_dir, custom_hostname_link=custom_hostname_link) + return True + return False def get_requests_for_local_unit(relation_name=None): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index d5133713..a6545e12 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -117,6 +117,7 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' ADDRESS_TYPES = ['admin', 'internal', 'public'] HAPROXY_RUN_DIR = '/var/run/haproxy/' +DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2" def ensure_packages(packages): @@ -257,7 +258,7 @@ def __call__(self): 'database_password': rdata.get(password_setting), 'database_type': 'mysql+pymysql' } - if CompareOpenStackReleases(rel) < 'stein': + if CompareOpenStackReleases(rel) < 'queens': ctxt['database_type'] = 'mysql' if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) @@ -351,10 +352,70 @@ def _setup_pki_cache(self): return cachedir return None + def _get_pkg_name(self, python_name='keystonemiddleware'): + """Get corresponding distro installed package for python + package name. + + :param python_name: nameof the python package + :type: string + """ + pkg_names = map(lambda x: x + python_name, ('python3-', 'python-')) + + for pkg in pkg_names: + if not filter_installed_packages((pkg,)): + return pkg + + return None + + def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): + """Build Jinja2 context for full rendering of [keystone_authtoken] + section with variable names included. Re-constructed from former + template 'section-keystone-auth-mitaka'. + + :param ctxt: Jinja2 context returned from self.__call__() + :type: dict + :param keystonemiddleware_os_rel: OpenStack release name of + keystonemiddleware package installed + """ + c = collections.OrderedDict((('auth_type', 'password'),)) + + # 'www_authenticate_uri' replaced 'auth_uri' since Stein, + # see keystonemiddleware upstream sources for more info + if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein': + c.update(( + ('www_authenticate_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) + else: + c.update(( + ('auth_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) + + c.update(( + ('auth_url', "{}://{}:{}/v3".format( + ctxt.get('auth_protocol', ''), + ctxt.get('auth_host', ''), + ctxt.get('auth_port', ''))), + ('project_domain_name', ctxt.get('admin_domain_name', '')), + ('user_domain_name', ctxt.get('admin_domain_name', '')), + ('project_name', ctxt.get('admin_tenant_name', '')), + ('username', ctxt.get('admin_user', '')), + ('password', ctxt.get('admin_password', '')), + ('signing_dir', ctxt.get('signing_dir', '')),)) + + return c + def __call__(self): log('Generating template context for ' + self.rel_name, level=DEBUG) ctxt = {} + keystonemiddleware_os_release = None + if self._get_pkg_name(): + keystonemiddleware_os_release = os_release(self._get_pkg_name()) + cachedir = self._setup_pki_cache() if cachedir: ctxt['signing_dir'] = cachedir @@ -382,8 +443,18 @@ def __call__(self): 'api_version': api_version}) if float(api_version) > 2: - ctxt.update({'admin_domain_name': - rdata.get('service_domain')}) + ctxt.update({ + 'admin_domain_name': rdata.get('service_domain'), + 'service_project_id': rdata.get('service_tenant_id'), + 'service_domain_id': rdata.get('service_domain_id')}) + + # we keep all veriables in ctxt for compatibility and + # add nested dictionary for keystone_authtoken generic + # templating + if keystonemiddleware_os_release: + ctxt['keystone_authtoken'] = \ + self._get_keystone_authtoken_ctxt( + ctxt, keystonemiddleware_os_release) if self.context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse @@ -452,6 +523,86 @@ def __call__(self): return {} +class NovaVendorMetadataContext(OSContextGenerator): + """Context used for configuring nova vendor metadata on nova.conf file.""" + + def __init__(self, os_release_pkg, interfaces=None): + """Initialize the NovaVendorMetadataContext object. + + :param os_release_pkg: the package name to extract the OpenStack + release codename from. + :type os_release_pkg: str + :param interfaces: list of string values to be used as the Context's + relation interfaces. + :type interfaces: List[str] + """ + self.os_release_pkg = os_release_pkg + if interfaces is not None: + self.interfaces = interfaces + + def __call__(self): + cmp_os_release = CompareOpenStackReleases( + os_release(self.os_release_pkg)) + ctxt = {'vendor_data': False} + + vdata_providers = [] + vdata = config('vendor-data') + vdata_url = config('vendor-data-url') + + if vdata: + try: + # validate the JSON. If invalid, we do not set anything here + json.loads(vdata) + except (TypeError, ValueError) as e: + log('Error decoding vendor-data. {}'.format(e), level=ERROR) + else: + ctxt['vendor_data'] = True + # Mitaka does not support DynamicJSON + # so vendordata_providers is not needed + if cmp_os_release > 'mitaka': + vdata_providers.append('StaticJSON') + + if vdata_url: + if cmp_os_release > 'mitaka': + ctxt['vendor_data_url'] = vdata_url + vdata_providers.append('DynamicJSON') + else: + log('Dynamic vendor data unsupported' + ' for {}.'.format(cmp_os_release), level=ERROR) + if vdata_providers: + ctxt['vendordata_providers'] = ','.join(vdata_providers) + + return ctxt + + +class NovaVendorMetadataJSONContext(OSContextGenerator): + """Context used for writing nova vendor metadata json file.""" + + def __init__(self, os_release_pkg): + """Initialize the NovaVendorMetadataJSONContext object. + + :param os_release_pkg: the package name to extract the OpenStack + release codename from. + :type os_release_pkg: str + """ + self.os_release_pkg = os_release_pkg + + def __call__(self): + ctxt = {'vendor_data_json': '{}'} + + vdata = config('vendor-data') + if vdata: + try: + # validate the JSON. If invalid, we return empty. + json.loads(vdata) + except (TypeError, ValueError) as e: + log('Error decoding vendor-data. {}'.format(e), level=ERROR) + else: + ctxt['vendor_data_json'] = vdata + + return ctxt + + class AMQPContext(OSContextGenerator): def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None, @@ -569,6 +720,19 @@ def __call__(self): ctxt['oslo_messaging_flags'] = config_flags_parser( oslo_messaging_flags) + oslo_messaging_driver = conf.get( + 'oslo-messaging-driver', DEFAULT_OSLO_MESSAGING_DRIVER) + if oslo_messaging_driver: + ctxt['oslo_messaging_driver'] = oslo_messaging_driver + + notification_format = conf.get('notification-format', None) + if notification_format: + ctxt['notification_format'] = notification_format + + send_notifications_to_logs = conf.get('send-notifications-to-logs', None) + if send_notifications_to_logs: + ctxt['send_notifications_to_logs'] = send_notifications_to_logs + if not self.complete: return {} @@ -620,6 +784,25 @@ def __call__(self): ensure_packages(['ceph-common']) return ctxt + def context_complete(self, ctxt): + """Overridden here to ensure the context is actually complete. + + We set `key` and `auth` to None here, by default, to ensure + that the context will always evaluate to incomplete until the + Ceph relation has actually sent these details; otherwise, + there is a potential race condition between the relation + appearing and the first unit actually setting this data on the + relation. + + :param ctxt: The current context members + :type ctxt: Dict[str, ANY] + :returns: True if the context is complete + :rtype: bool + """ + if 'auth' not in ctxt or 'key' not in ctxt: + return False + return super(CephContext, self).context_complete(ctxt) + class HAProxyContext(OSContextGenerator): """Provides half a context for the haproxy template, which describes @@ -1110,7 +1293,9 @@ def resolve_ports(self, ports): hwaddr_to_nic = {} hwaddr_to_ip = {} - for nic in list_nics(): + extant_nics = list_nics() + + for nic in extant_nics: # Ignore virtual interfaces (bond masters will be identified from # their slaves) if not is_phy_iface(nic): @@ -1141,10 +1326,11 @@ def resolve_ports(self, ports): # Entry is a MAC address for a valid interface that doesn't # have an IP address assigned yet. resolved.append(hwaddr_to_nic[entry]) - else: - # If the passed entry is not a MAC address, assume it's a valid - # interface, and that the user put it there on purpose (we can - # trust it to be the real external network). + elif entry in extant_nics: + # If the passed entry is not a MAC address and the interface + # exists, assume it's a valid interface, and that the user put + # it there on purpose (we can trust it to be the real external + # network). resolved.append(entry) # Ensure no duplicates @@ -1526,6 +1712,18 @@ def __call__(self): 'rel_key': 'enable-nsg-logging', 'default': False, }, + 'enable_nfg_logging': { + 'rel_key': 'enable-nfg-logging', + 'default': False, + }, + 'global_physnet_mtu': { + 'rel_key': 'global-physnet-mtu', + 'default': 1500, + }, + 'physical_network_mtus': { + 'rel_key': 'physical-network-mtus', + 'default': None, + }, } ctxt = self.get_neutron_options({}) for rid in relation_ids('neutron-plugin-api'): @@ -1587,13 +1785,13 @@ class DataPortContext(NeutronPortContext): def __call__(self): ports = config('data-port') if ports: - # Map of {port/mac:bridge} + # Map of {bridge:port/mac} portmap = parse_data_port_mappings(ports) ports = portmap.keys() # Resolve provided ports or mac addresses and filter out those # already attached to a bridge. resolved = self.resolve_ports(ports) - # FIXME: is this necessary? + # Rebuild port index using resolved and filtered ports. normalized = {get_nic_hwaddr(port): port for port in resolved if port not in ports} normalized.update({port: port for port in resolved diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py index 0f847f56..fb5607f3 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py @@ -217,6 +217,11 @@ def neutron_plugins(): plugins['nsx']['config'] = '/etc/neutron/nsx.ini' plugins['vsp']['driver'] = ( 'nuage_neutron.plugins.nuage.plugin.NuagePlugin') + if CompareOpenStackReleases(release) >= 'newton': + plugins['vsp']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' + plugins['vsp']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' + plugins['vsp']['server_packages'] = ['neutron-server', + 'neutron-plugin-ml2'] return plugins diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index e5e25369..d43a4d20 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -120,6 +120,7 @@ 'queens', 'rocky', 'stein', + 'train', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -139,6 +140,7 @@ ('bionic', 'queens'), ('cosmic', 'rocky'), ('disco', 'stein'), + ('eoan', 'train'), ]) @@ -159,6 +161,7 @@ ('2018.1', 'queens'), ('2018.2', 'rocky'), ('2019.1', 'stein'), + ('2019.2', 'train'), ]) # The ugly duckling - must list releases oldest to newest @@ -194,7 +197,9 @@ ('rocky', ['2.18.0', '2.19.0']), ('stein', - ['2.20.0']), + ['2.20.0', '2.21.0']), + ('train', + ['2.22.0']), ]) # >= Liberty version->codename mapping @@ -208,6 +213,7 @@ ('17', 'queens'), ('18', 'rocky'), ('19', 'stein'), + ('20', 'train'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -218,6 +224,7 @@ ('12', 'queens'), ('13', 'rocky'), ('14', 'stein'), + ('15', 'train'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -228,6 +235,7 @@ ('12', 'queens'), ('13', 'rocky'), ('14', 'stein'), + ('15', 'train'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -238,6 +246,7 @@ ('13', 'queens'), ('14', 'rocky'), ('15', 'stein'), + ('16', 'train'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -248,6 +257,7 @@ ('13', 'queens'), ('14', 'rocky'), ('15', 'stein'), + ('16', 'train'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -258,6 +268,7 @@ ('10', 'queens'), ('11', 'rocky'), ('12', 'stein'), + ('13', 'train'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -268,6 +279,7 @@ ('10', 'queens'), ('11', 'rocky'), ('12', 'stein'), + ('13', 'train'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -278,6 +290,7 @@ ('16', 'queens'), ('17', 'rocky'), ('18', 'stein'), + ('19', 'train'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -288,6 +301,7 @@ ('13', 'queens'), ('14', 'rocky'), ('15', 'stein'), + ('16', 'train'), ]), } diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 2c62092c..a9864467 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1482,13 +1482,28 @@ def send_request_if_needed(request, relation='ceph'): relation_set(relation_id=rid, broker_req=request.request) +def has_broker_rsp(rid=None, unit=None): + """Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data. + + :param rid: The relation to check (default of None means current relation) + :type rid: Union[str, None] + :param unit: The remote unit to check (default of None means current unit) + :type unit: Union[str, None] + :returns: True if broker key exists and is set to something 'truthy' + :rtype: bool + """ + rdata = relation_get(rid=rid, unit=unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + return True if broker_rsp else False + + def is_broker_action_done(action, rid=None, unit=None): """Check whether broker action has completed yet. @param action: name of action to be performed @returns True if action complete otherwise False """ - rdata = relation_get(rid, unit) or {} + rdata = relation_get(rid=rid, unit=unit) or {} broker_rsp = rdata.get(get_broker_rsp_key()) if not broker_rsp: return False @@ -1510,7 +1525,7 @@ def mark_broker_action_done(action, rid=None, unit=None): @param action: name of action to be performed @returns None """ - rdata = relation_get(rid, unit) or {} + rdata = relation_get(rid=rid, unit=unit) or {} broker_rsp = rdata.get(get_broker_rsp_key()) if not broker_rsp: return diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index c57aaf35..a3561760 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -110,17 +110,19 @@ def is_device_mounted(device): return bool(re.search(r'MOUNTPOINT=".+"', out)) -def mkfs_xfs(device, force=False): +def mkfs_xfs(device, force=False, inode_size=1024): """Format device with XFS filesystem. By default this should fail if the device already has a filesystem on it. :param device: Full path to device to format :ptype device: tr :param force: Force operation - :ptype: force: boolean""" + :ptype: force: boolean + :param inode_size: XFS inode size in bytes + :ptype inode_size: int""" cmd = ['mkfs.xfs'] if force: cmd.append("-f") - cmd += ['-i', 'size=1024', device] + cmd += ['-i', "size={}".format(inode_size), device] check_call(cmd) diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index c6d9341e..24c76e34 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -173,6 +173,14 @@ 'stein/proposed': 'bionic-proposed/stein', 'bionic-stein/proposed': 'bionic-proposed/stein', 'bionic-proposed/stein': 'bionic-proposed/stein', + # Train + 'train': 'bionic-updates/train', + 'bionic-train': 'bionic-updates/train', + 'bionic-train/updates': 'bionic-updates/train', + 'bionic-updates/train': 'bionic-updates/train', + 'train/proposed': 'bionic-proposed/train', + 'bionic-train/proposed': 'bionic-proposed/train', + 'bionic-proposed/train': 'bionic-proposed/train', } @@ -522,14 +530,16 @@ def add_source(source, key=None, fail_invalid=False): for r, fn in six.iteritems(_mapping): m = re.match(r, source) if m: - # call the assoicated function with the captured groups - # raises SourceConfigError on error. - fn(*m.groups()) if key: + # Import key before adding the source which depends on it, + # as refreshing packages could fail otherwise. try: import_key(key) except GPGKeyError as e: raise SourceConfigError(str(e)) + # call the associated function with the captured groups + # raises SourceConfigError on error. + fn(*m.groups()) break else: # nothing matched. log an error and maybe sys.exit diff --git a/ceph-osd/tests/bundles/trusty-mitaka.yaml b/ceph-osd/tests/bundles/trusty-mitaka.yaml index 868bba15..8e4b598f 100644 --- a/ceph-osd/tests/bundles/trusty-mitaka.yaml +++ b/ceph-osd/tests/bundles/trusty-mitaka.yaml @@ -25,7 +25,7 @@ applications: constraints: virt-type=kvm percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster + charm: cs:trusty/percona-cluster num_units: 1 options: dataset-size: 25% diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 7b1dd330..273551d4 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -9,7 +9,7 @@ gate_bundles: - xenial-mitaka - trusty-mitaka smoke_bundles: - - bionic-queens + - bionic-stein dev_bundles: - cosmic-rocky - disco-stein From d0e07fe91b73c777df3303263e6dcc5812d0de74 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 17 Jul 2019 14:57:36 +0200 Subject: [PATCH 1792/2699] Update functional test definitions Change-Id: I4f15381cd1030b4711a5756cede4cada0294e0f7 --- ceph-fs/src/tests/dev-basic-cosmic-rocky | 23 ----------------------- ceph-fs/src/tox.ini | 2 +- 2 files changed, 1 insertion(+), 24 deletions(-) delete mode 100755 ceph-fs/src/tests/dev-basic-cosmic-rocky diff --git a/ceph-fs/src/tests/dev-basic-cosmic-rocky b/ceph-fs/src/tests/dev-basic-cosmic-rocky deleted file mode 100755 index 91cb73c4..00000000 --- a/ceph-fs/src/tests/dev-basic-cosmic-rocky +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on cosmic-rocky.""" - -from basic_deployment import CephFsBasicDeployment - -if __name__ == '__main__': - deployment = CephFsBasicDeployment(series='cosmic') - deployment.run_tests() diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index 0e36e84b..4c5f90ef 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -35,7 +35,7 @@ commands = # Run a specific test as an Amulet smoke test (expected to always pass) basepython = python2.7 commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-stein --no-destroy [testenv:func27-dfs] # Run all deploy-from-source tests which are +x (may not always pass!) From c6fb72e7447dd6c6a1cda7f4e8d86c84444d2130 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 17 Jul 2019 15:06:21 +0200 Subject: [PATCH 1793/2699] Update functional test definitions Change-Id: Ic8c456597a1be8c1945906db377b3fe5f1f5f6ec --- ceph-osd/tests/bundles/cosmic-rocky.yaml | 90 ------------------------ ceph-osd/tests/tests.yaml | 1 - 2 files changed, 91 deletions(-) delete mode 100644 ceph-osd/tests/bundles/cosmic-rocky.yaml diff --git a/ceph-osd/tests/bundles/cosmic-rocky.yaml b/ceph-osd/tests/bundles/cosmic-rocky.yaml deleted file mode 100644 index 97a6c6e8..00000000 --- a/ceph-osd/tests/bundles/cosmic-rocky.yaml +++ /dev/null @@ -1,90 +0,0 @@ -series: cosmic -applications: - ceph-osd: - series: cosmic - charm: ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 273551d4..7e214b57 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -11,7 +11,6 @@ gate_bundles: smoke_bundles: - bionic-stein dev_bundles: - - cosmic-rocky - disco-stein configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image From 49fb300064b2822c01b7c3ed42e59ebed8a392b7 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 17 Jul 2019 15:08:35 +0200 Subject: [PATCH 1794/2699] Update functional test definitions Change-Id: I81e4583c1f2d172cbbba0eb3a091d3a0f88a9df2 --- ceph-radosgw/tests/bundles/cosmic-rocky.yaml | 43 -------------------- ceph-radosgw/tests/tests.yaml | 15 ++++--- 2 files changed, 7 insertions(+), 51 deletions(-) delete mode 100644 ceph-radosgw/tests/bundles/cosmic-rocky.yaml diff --git a/ceph-radosgw/tests/bundles/cosmic-rocky.yaml b/ceph-radosgw/tests/bundles/cosmic-rocky.yaml deleted file mode 100644 index 6ddbfecd..00000000 --- a/ceph-radosgw/tests/bundles/cosmic-rocky.yaml +++ /dev/null @@ -1,43 +0,0 @@ -options: - source: &source distro -series: cosmic -applications: - ceph-radosgw: - charm: ceph-radosgw - num_units: 1 - series: cosmic - options: - source: *source - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - auth-supported: 'none' - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index adc68702..db9d6630 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,17 +1,16 @@ charm_name: ceph-radosgw gate_bundles: - - trusty-mitaka - - xenial-mitaka - - xenial-ocata - - xenial-pike - - xenial-queens - - bionic-queens - - bionic-rocky - bionic-stein + - bionic-rocky + - bionic-queens + - xenial-queens + - xenial-pike + - xenial-ocata + - xenial-mitaka + - trusty-mitaka smoke_bundles: - bionic-stein dev_bundles: - - cosmic-rocky - bionic-queens-multisite - bionic-rocky-multisite tests: From a2dce21bd3d6cd09a23ffdfb9648ee59b3c45594 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 17 Jul 2019 15:09:37 +0200 Subject: [PATCH 1795/2699] Update functional test definitions Change-Id: I3c9eaf8ecdf2acebcaaf26ab256e48dc43c669a1 --- .../src/tests/bundles/cosmic-rocky.yaml | 102 ------------------ ceph-rbd-mirror/src/tests/tests.yaml | 13 ++- 2 files changed, 6 insertions(+), 109 deletions(-) delete mode 100644 ceph-rbd-mirror/src/tests/bundles/cosmic-rocky.yaml diff --git a/ceph-rbd-mirror/src/tests/bundles/cosmic-rocky.yaml b/ceph-rbd-mirror/src/tests/bundles/cosmic-rocky.yaml deleted file mode 100644 index 2671e2e3..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/cosmic-rocky.yaml +++ /dev/null @@ -1,102 +0,0 @@ -series: cosmic -applications: - mysql: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - source: distro - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: distro - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: distro - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: distro - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: distro - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: distro - bluestore: False - use-direct-io: False - osd-devices: /opt - ceph-rbd-mirror: - series: cosmic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: distro - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: distro - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: distro - bluestore: False - use-direct-io: False - osd-devices: /opt - ceph-rbd-mirror-b: - series: cosmic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: distro -relations: -- - mysql - - keystone -- - mysql - - cinder -- - mysql - - glance -- - rabbitmq-server - - cinder -- - keystone - - cinder -- - keystone - - glance -- - cinder - - cinder-ceph -- - cinder-ceph - - ceph-mon -- - glance - - ceph-mon -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index 7653f783..a9bceb8d 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -1,14 +1,13 @@ charm_name: ceph-rbd-mirror smoke_bundles: -- bionic-queens -gate_bundles: -- xenial-pike -- xenial-queens -- bionic-queens -- bionic-rocky - bionic-stein -- cosmic-rocky +gate_bundles: - disco-stein +- bionic-stein +- bionic-rocky +- bionic-queens +- xenial-queens +- xenial-pike comment: | The e2e bundles are useful for development but adds no additional value to the functional tests. From 24962254d8b489d876304e3fbcbe07d7703c49c0 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 12 Jul 2019 14:29:14 -0400 Subject: [PATCH 1796/2699] Add Python 3 Train unit tests This is a mechanically generated patch to ensure unit testing is in place for all of the Tested Runtimes for Train. See the Train python3-updates goal document for details: https://governance.openstack.org/tc/goals/train/python3-updates.html Note that python35-charm-jobs is retained since this charm is supported on Xenial. Change-Id: Id0e49e062f3a84841e2f115866aff31ab7fb0a09 Story: #2005924 Task: #34228 --- ceph-osd/.zuul.yaml | 1 + ceph-osd/tox.ini | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ceph-osd/.zuul.yaml b/ceph-osd/.zuul.yaml index dc276615..7332a874 100644 --- a/ceph-osd/.zuul.yaml +++ b/ceph-osd/.zuul.yaml @@ -1,4 +1,5 @@ - project: templates: - python35-charm-jobs + - openstack-python3-train-jobs - openstack-cover-jobs diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 4d35de76..72c0e659 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -2,7 +2,7 @@ # This file is managed centrally by release-tools and should not be modified # within individual charm repos. [tox] -envlist = pep8,py27,py35,py36 +envlist = pep8,py27,py37 skipsdist = True skip_missing_interpreters = True @@ -35,6 +35,11 @@ basepython = python3.6 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python3 deps = -r{toxinidir}/requirements.txt From e06a41c136bb8a8b5dae6be835f939711fcc199a Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 12 Jul 2019 14:29:13 -0400 Subject: [PATCH 1797/2699] Add Python 3 Train unit tests This is a mechanically generated patch to ensure unit testing is in place for all of the Tested Runtimes for Train. See the Train python3-updates goal document for details: https://governance.openstack.org/tc/goals/train/python3-updates.html Note that python35-charm-jobs is retained since this charm is supported on Xenial. Change-Id: I51693cd7d535f2b5acb5ba118951f466ef7ac51f Story: #2005924 Task: #34228 --- ceph-mon/.zuul.yaml | 1 + ceph-mon/tox.ini | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ceph-mon/.zuul.yaml b/ceph-mon/.zuul.yaml index dc276615..7332a874 100644 --- a/ceph-mon/.zuul.yaml +++ b/ceph-mon/.zuul.yaml @@ -1,4 +1,5 @@ - project: templates: - python35-charm-jobs + - openstack-python3-train-jobs - openstack-cover-jobs diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 3874fee6..99340106 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -2,7 +2,7 @@ # This file is managed centrally by release-tools and should not be modified # within individual charm repos. [tox] -envlist = pep8,py3 +envlist = pep8,py37 skipsdist = True [testenv] @@ -40,6 +40,11 @@ basepython = python3.6 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python3 deps = -r{toxinidir}/requirements.txt From b89115d8b7ab75db9987d433d201ca81f2d6ec1e Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 12 Jul 2019 14:29:14 -0400 Subject: [PATCH 1798/2699] Add Python 3 Train unit tests This is a mechanically generated patch to ensure unit testing is in place for all of the Tested Runtimes for Train. See the Train python3-updates goal document for details: https://governance.openstack.org/tc/goals/train/python3-updates.html Note that python35-charm-jobs is retained since this charm is supported on Xenial. Change-Id: Ib39664a09e76cb5a3f764057b42d5cd33c1f8f75 Story: #2005924 Task: #34228 --- ceph-proxy/.zuul.yaml | 1 + ceph-proxy/tox.ini | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/.zuul.yaml b/ceph-proxy/.zuul.yaml index 7051aeeb..18efca1a 100644 --- a/ceph-proxy/.zuul.yaml +++ b/ceph-proxy/.zuul.yaml @@ -1,3 +1,4 @@ - project: templates: - python35-charm-jobs + - openstack-python3-train-jobs diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index aa3d6d0c..b09ae94b 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -2,7 +2,7 @@ # This file is managed centrally by release-tools and should not be modified # within individual charm repos. [tox] -envlist = pep8,py3{5,6} +envlist = pep8,py37 skipsdist = True [testenv] @@ -34,6 +34,11 @@ basepython = python3.6 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python3 deps = -r{toxinidir}/requirements.txt From eb26dff51f3fd2369430e84857f045d14b8a1ce5 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 12 Jul 2019 14:29:15 -0400 Subject: [PATCH 1799/2699] Add Python 3 Train unit tests This is a mechanically generated patch to ensure unit testing is in place for all of the Tested Runtimes for Train. See the Train python3-updates goal document for details: https://governance.openstack.org/tc/goals/train/python3-updates.html Note that python35-charm-jobs is retained since this charm is supported on Xenial. Change-Id: I92c63ab07a70fb0fa7e7788b1226866a8b228ec9 Story: #2005924 Task: #34228 --- ceph-rbd-mirror/.zuul.yaml | 1 + ceph-rbd-mirror/tox.ini | 16 +++++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/.zuul.yaml b/ceph-rbd-mirror/.zuul.yaml index 7051aeeb..18efca1a 100644 --- a/ceph-rbd-mirror/.zuul.yaml +++ b/ceph-rbd-mirror/.zuul.yaml @@ -1,3 +1,4 @@ - project: templates: - python35-charm-jobs + - openstack-python3-train-jobs diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index 550f5ab6..ee6ada13 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -3,7 +3,7 @@ # within individual charm repos. [tox] skipsdist = True -envlist = pep8,py3 +envlist = pep8,py37 [testenv] setenv = VIRTUAL_ENV={envdir} @@ -66,6 +66,20 @@ commands = coverage xml -o cover/coverage.xml coverage report +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/test-requirements.txt +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + [testenv:pep8] basepython = python3 deps = -r{toxinidir}/test-requirements.txt From 23f64f69b7cd29255da5abf8622fe0b74951057b Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 12 Jul 2019 14:29:13 -0400 Subject: [PATCH 1800/2699] Add Python 3 Train unit tests This is a mechanically generated patch to ensure unit testing is in place for all of the Tested Runtimes for Train. See the Train python3-updates goal document for details: https://governance.openstack.org/tc/goals/train/python3-updates.html Note that openstack-python35-jobs is added since this charm is supported on Xenial. Change-Id: Ie5a0ac7be4ecb95c109564368710c892f085a844 Story: #2005924 Task: #34228 --- ceph-fs/.zuul.yaml | 3 ++- ceph-fs/tox.ini | 12 +++++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/ceph-fs/.zuul.yaml b/ceph-fs/.zuul.yaml index 0faad733..2e4862c1 100644 --- a/ceph-fs/.zuul.yaml +++ b/ceph-fs/.zuul.yaml @@ -1,5 +1,6 @@ - project: templates: - python-charm-jobs - - openstack-python36-jobs + - openstack-python35-jobs + - openstack-python3-train-jobs - openstack-cover-jobs diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 9ed983b2..718be2b5 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -3,7 +3,7 @@ # within individual charm repos. [tox] skipsdist = True -envlist = pep8,py36 +envlist = pep8,py37 skip_missing_interpreters = True [testenv] @@ -33,11 +33,21 @@ deps = whitelist_externals = true commands = true +[testenv:py35] +basepython = python3.5 +deps = -r{toxinidir}/test-requirements.txt +commands = stestr run {posargs} + [testenv:py36] basepython = python3.6 deps = -r{toxinidir}/test-requirements.txt commands = stestr run {posargs} +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/test-requirements.txt +commands = stestr run {posargs} + [testenv:pep8] basepython = python3.6 deps = -r{toxinidir}/test-requirements.txt From f8d06678140f44c64d3396341f270df781c61440 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 30 Jul 2019 17:59:46 -0400 Subject: [PATCH 1801/2699] add link to charms deployment guide Change-Id: Idff68a51ec858a79d99668dbb6396bf6045f5c82 --- ceph-rbd-mirror/src/README.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/src/README.md b/ceph-rbd-mirror/src/README.md index d88dd3f4..0b62bd59 100644 --- a/ceph-rbd-mirror/src/README.md +++ b/ceph-rbd-mirror/src/README.md @@ -47,7 +47,7 @@ This addresses both High Availability and performance concerns. You can make use of this feature by increasing the number of ``ceph-rbd-mirror`` units in your deployment. - The charm is written for Two-way Replication, which give you the ability to + The charm is written for Two-way Replication, which gives you the ability to fail over and fall back to/from a secondary site. Ceph does have support for mirroring to any number of slave clusters but @@ -60,6 +60,12 @@ Mirror daemon about network configuration by binding the ``public`` and The RBD Mirror daemon will use the network associated with the ``cluster`` endpoint for mirroring traffic when available. +For more information on charms and RBD Mirroring see the [Ceph RBD Mirroring][ceph-rbd-mirroring] section of the +[OpenStack Charms Deployment Guide][charms-deploy-guide]. + +[ceph-rbd-mirroring]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-ceph-rbd-mirror.html +[charms-deploy-guide]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/index.html + # Deployment Example bundles with a minimal test configuration can be found From 6c8f4d2ee9201a6da22ccd13e07a83b57940febe Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 2 Aug 2019 10:54:29 -0700 Subject: [PATCH 1802/2699] Rebuild for sync charms.openstack Change-Id: Iac4d36d9a98dc4c0d09c21ce851e497de27cd1fc Partial-Bug: #1838783 --- ceph-fs/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index 551ab478..862a4081 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -120650ec-5aab-11e9-a87e-fbc92e9be59b +6a4e8d66-b54e-11e9-9e21-27ae1864b062 From 84da81603b6ebdb38f620d4db71b4cd18e2c1bed Mon Sep 17 00:00:00 2001 From: David Ames Date: Fri, 2 Aug 2019 10:54:43 -0700 Subject: [PATCH 1803/2699] Rebuild for sync charms.openstack Change-Id: I7234afe50a0edabac5b72b8536b33cbf3c320149 Partial-Bug: #1838783 --- ceph-rbd-mirror/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/rebuild b/ceph-rbd-mirror/rebuild index 551ab478..862a4081 100644 --- a/ceph-rbd-mirror/rebuild +++ b/ceph-rbd-mirror/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -120650ec-5aab-11e9-a87e-fbc92e9be59b +6a4e8d66-b54e-11e9-9e21-27ae1864b062 From 662c38a544bea502a0a609f391e0c10e458db859 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Fri, 2 Aug 2019 23:33:49 -0400 Subject: [PATCH 1804/2699] rewrite README file Change-Id: Iebf54bccbb0cbb802590697f46a0f00a5db94de9 --- ceph-rbd-mirror/src/README.md | 249 ++++++++++++++++++++-------------- 1 file changed, 150 insertions(+), 99 deletions(-) diff --git a/ceph-rbd-mirror/src/README.md b/ceph-rbd-mirror/src/README.md index 0b62bd59..39ffefa0 100644 --- a/ceph-rbd-mirror/src/README.md +++ b/ceph-rbd-mirror/src/README.md @@ -1,151 +1,194 @@ # Overview -The ``ceph-rbd-mirror`` charm supports deployment of the Ceph RBD Mirror daemon -and helps automate remote creation and configuration of mirroring for Ceph -pools used to host RBD images. +The `ceph-rbd-mirror` charm deploys the Ceph `rbd-mirror` daemon and helps +automate remote creation and configuration of mirroring for Ceph pools used for +hosting RBD images. Actions for operator driven failover and fallback of the +RBD image pools are also provided. -Actions for operator driven failover and fallback for the pools used for RBD -images is also provided. +> **Note**: The `ceph-rbd-mirror` charm addresses only one specific element in + datacentre redundancy. Refer to [Ceph RADOS Gateway Multisite Replication][ceph-multisite-replication] + and other work to arrive at a complete solution. - Data center redundancy is a large topic and this work addresses a very - specific piece in the puzzle related to Ceph RBD images. You need to - combine this with `Ceph RADOS Gateway Multisite replication`_ and other - work to get a complete solution. +For more information on charms and RBD mirroring see the [Ceph RBD Mirroring][ceph-rbd-mirroring] +appendix in the [OpenStack Charms Deployment Guide][charms-deploy-guide]. -.. _Ceph RADOS Gateway Multisite replication: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-rgw-multisite.html +# Functionality -This is supported both for multiple distinct Ceph clusters within a single Juju -model and between different models with help from cross-model relations. +The charm has the following major features: -When the charm is related to a local and a remote Ceph cluster it will -automatically create pools eligible for mirroring on the remote cluster and -enable mirroring. +- Support for a maximum of two Ceph clusters. The clusters may reside within a + single model or be contained within two separate models. -Eligible pools are selected on the basis of Ceph pool tagging and all pools -with the application ``rbd`` enabled on them will be selected. +- Specifically written for two-way replication. This provides the ability to + fail over and fall back to/from a single secondary site. Ceph does have + support for mirroring to any number of clusters but the charm does not + support this. - As of the 19.04 charm release charms will automatically have newly created - pools for use with RBD tagged with the ``rbd`` tag. +- Automatically creates and configures (for mirroring) pools in the remote + cluster based on any pools in the local cluster that are labelled with the + 'rbd' tag. - Only mirroring of whole pools is supported by the charm. +- Mirroring of whole pools only. Ceph itself has support for the mirroring of + individual images but the charm does not support this. -A prerequisite for RBD Mirroring is that every RBD image within each pool is -created with the ``journaling`` and ``exclusive-lock`` image features enabled. +- Network space aware. The mirror daemon can be informed about network + configuration by binding the `public` and `cluster` endpoints. The daemon + will use the network associated with the `cluster` endpoint for mirroring + traffic. -To support this the ``ceph-mon`` charm will announce these image features over -the ``client`` relation when it has units connected to its ``rbd-mirror`` -endpoint. This will ensure that images created in the deployment get the -appropriate features to support mirroring. +Other notes on RBD mirroring: - RBD Mirroring is only supported when deployed with Ceph Luminous or later. +- Supports multiple running instances of the mirror daemon in each cluster. + Doing so allows for the dynamic re-distribution of the mirroring load amongst + the daemons. This addresses both high availability and performance concerns. + Leverage this feature by scaling out the `ceph-rbd-mirror` application (i.e. + add more units). -The Ceph RBD Mirror feature supports running multiple instances of the daemon. -Having multiple daemons will cause the mirroring load to automatically be -(re-)distributed between the daemons. +- Requires that every RBD image within each pool is created with the + `journaling` and `exclusive-lock` image features enabled. The charm enables + these features by default and the `ceph-mon` charm will announce them over + the `client` relation when it has units connected to its `rbd-mirror` + endpoint. -This addresses both High Availability and performance concerns. You can -make use of this feature by increasing the number of ``ceph-rbd-mirror`` units -in your deployment. +- The feature first appeared in Ceph `v.12.2` (Luminous). - The charm is written for Two-way Replication, which gives you the ability to - fail over and fall back to/from a secondary site. +# Deployment - Ceph does have support for mirroring to any number of slave clusters but - this is not implemented nor supported by the charm. +It is assumed that the two Ceph clusters have been set up (i.e. `ceph-mon` and +`ceph-osd` charms are deployed and relations added). -The charm is aware of network spaces and you will be able to tell the RBD -Mirror daemon about network configuration by binding the ``public`` and -``cluster`` endpoints. +> **Note**: Minimal two-cluster test bundles can be found in the + `src/tests/bundles` subdirectory where both the one-model and two-model + scenarios are featured. -The RBD Mirror daemon will use the network associated with the ``cluster`` -endpoint for mirroring traffic when available. +## Using one model -For more information on charms and RBD Mirroring see the [Ceph RBD Mirroring][ceph-rbd-mirroring] section of the -[OpenStack Charms Deployment Guide][charms-deploy-guide]. +Deploy the charm for each cluster, giving each application a name to +distinguish one from the other (site 'a' and site 'b'): -[ceph-rbd-mirroring]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-ceph-rbd-mirror.html -[charms-deploy-guide]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/index.html + juju deploy ceph-rbd-mirror ceph-rbd-mirror-a + juju deploy ceph-rbd-mirror ceph-rbd-mirror-b -# Deployment +Add a relation between the 'ceph-mon' of site 'a' and both the local (site 'a') +and remote (site 'b') units of 'ceph-rbd-mirror': - Example bundles with a minimal test configuration can be found - in the ``tests/bundles`` subdirectory of the ``ceph-rbd-mirror`` charm. + juju add-relation ceph-mon-a ceph-rbd-mirror-a:ceph-local + juju add-relation ceph-mon-a ceph-rbd-mirror-b:ceph-remote - Both examples of two Ceph clusters deployed in one model and Ceph clusters - deployed in separate models are available. +Perform the analogous procedure for the 'ceph-mon' of site 'b': -To make use of cross model relations you must first set up an offer to export -a application endpoint from a model. In this example we use the model names -``site-a`` and ``site-b``. + juju add-relation ceph-mon-b ceph-rbd-mirror-b:ceph-local + juju add-relation ceph-mon-b ceph-rbd-mirror-a:ceph-remote + +## Using two models + +In model 'site-a', deploy the charm and add the local relation: juju switch site-a - juju offer ceph-mon:rbd-mirror site-a-rbd-mirror + juju deploy ceph-rbd-mirror ceph-rbd-mirror-a + juju add-relation ceph-mon-a ceph-rbd-mirror-a:ceph-local + +To create the inter-site relation one must export one of the application +endpoints from the model by means of an "offer". Here, we make an offer for +'ceph-rbd-mirror': + + juju offer ceph-rbd-mirror-a:ceph-remote + Application "ceph-rbd-mirror-a" endpoints [ceph-remote] available at "admin/site-a.ceph-rbd-mirror-a" + +Perform the analogous procedure in the other model ('site-b'): juju switch site-b - juju offer ceph-mon:rbd-mirror site-b-rbd-mirror + juju deploy ceph-rbd-mirror ceph-rbd-mirror-b + juju add-relation ceph-mon-b ceph-rbd-mirror-b:ceph-local + juju offer ceph-rbd-mirror-b:ceph-remote + application "ceph-rbd-mirror-b" endpoints [ceph-remote] available at "admin/site-b.ceph-rbd-mirror-b" +Add the *cross model relations* by referring to the offer URLs (included in the +output above) as if they were application endpoints in each respective model. -After creating the offers we can import the remote offer to a model and add -a relation between applications just like we normally would do in a -single-model deployment. +For site 'a': juju switch site-a - juju consume admin/site-b.site-b-rbd-mirror - juju add-relation ceph-rbd-mirror:ceph-remote site-b-rbd-mirror + juju add-relation ceph-mon-a admin/site-b.ceph-rbd-mirror-b + +For site 'b': juju switch site-b - juju consume admin/site-a.site-a-rbd-mirror - juju add-relation ceph-rbd-mirror:ceph-remote site-a-rbd-mirror + juju add-relation ceph-mon-b admin/site-a.ceph-rbd-mirror-a # Usage +Usage procedures covered here touch upon pool creation, failover & fallback, +and recovery. In all cases we presuppose that each cluster resides within a +separate model. + ## Pools -Pools created by other charms through the Ceph broker protocol will -automatically be detected and acted upon. Pools tagged with the ``rbd`` -application will be selected for mirroring. +As of the 19.04 OpenStack Charms release, due to Ceph Luminous, any pool +associated with the RBD application during its creation will automatically be +labelled with the 'rbd' tag. The following occurs together: -If you manually create a pool, either through actions on the ``ceph-mon`` -charm or by talking to Ceph directly, you must inform the ``ceph-rbd-mirror`` -charm about them. +Pool creation ==> RBD application-association ==> 'rbd' tag -This is accomplished by executing the ``refresh-pools`` action. +RBD pools can be created by either a supporting charm (through the Ceph broker +protocol) or manually by the operator: - juju run-action -m site-a ceph-mon/leader --wait create-pool name=mypool \ - app-name=rbd - juju run-action -m site-a ceph-rbd-mirror/leader --wait refresh-pools +1. A charm-created pool (e.g. via `glance`) will automatically be detected and +acted upon (i.e. a remote pool will be set up). -## Failover and Fallback +1. A manually-created pool, whether done via the `ceph-mon` application or +through Ceph directly, will require an action to be run on the +`ceph-rbd-mirror` application leader in order for the remote pool to come +online. -Controlled failover and fallback +For example, a pool is created manually in site 'a' via `ceph-mon` and then +`ceph-rbd-mirror` (of site 'a') is informed about it: - juju run-action -m site-a ceph-rbd-mirror/leader --wait status verbose=True - juju run-action -m site-b ceph-rbd-mirror/leader --wait status verbose=True + juju run-action -m site-a ceph-mon-a/leader --wait create-pool name=mypool app-name=rbd + juju run-action -m site-a ceph-rbd-mirror-a/leader --wait refresh-pools - juju run-action -m site-a ceph-rbd-mirror/leader --wait demote +## Failover and fallback - juju run-action -m site-a ceph-rbd-mirror/leader --wait status verbose=True - juju run-action -m site-b ceph-rbd-mirror/leader --wait status verbose=True +To manage failover and fallback, the `demote` and `promote` actions are applied +to the `ceph-rbd-mirror` application leader. + +Here, we fail over from site 'a' to site 'b' by demoting site 'a' and promoting +site 'b'. The rest of the commands are status checks: + + juju run-action -m site-a ceph-rbd-mirror-a/leader --wait status verbose=True + juju run-action -m site-b ceph-rbd-mirror-b/leader --wait status verbose=True + + juju run-action -m site-a ceph-rbd-mirror-a/leader --wait demote + + juju run-action -m site-a ceph-rbd-mirror-a/leader --wait status verbose=True + juju run-action -m site-b ceph-rbd-mirror-b/leader --wait status verbose=True - juju run-action -m site-b ceph-rbd-mirror/leader --wait promote + juju run-action -m site-b ceph-rbd-mirror-b/leader --wait promote -__NOTE__ When using Ceph Luminous, the mirror status information may not be -accurate. Specifically the ``entries_behind_master`` counter may never get to -``0`` even though the image is fully synchronized. +To fall back to site 'a': + + juju run-action -m site-b ceph-rbd-mirror-b/leader --wait demote + juju run-action -m site-a ceph-rbd-mirror-a/leader --wait promote + +> **Note**: When using Ceph Luminous, the mirror status information may not be + accurate. Specifically, the `entries_behind_master` counter may never get to + `0` even though the image has been fully synchronised. ## Recovering from abrupt shutdown -There exist failure scenarios where abrupt shutdown and/or interruptions to -communication may lead to a split-brain situation where the RBD Mirroring -process in both Ceph clusters claim to be the primary. +It is possible that an abrupt shutdown and/or an interruption to communication +channels may lead to a "split-brain" condition. This may cause the mirroring +daemon in each cluster to claim to be the primary. In such cases, the operator +must make a call as to which daemon is correct. Generally speaking, this +means deciding which cluster has the most recent data. -In such a situation the operator must decide which cluster has the most -recent data and should be elected primary by using the ``demote`` and -``promote`` (optionally with force parameter) actions. +Elect a primary by applying the `demote` and `promote` actions to the +appropriate `ceph-rbd-mirror` leader. After doing so, the `resync-pools` +action must be run on the secondary cluster leader. The `promote` action may +require a force option. -After making this decision the secondary cluster must be resynced to track -the promoted master, this is done by running the ``resync-pools`` action on -the non-master cluster. +Here, we make site 'a' be the primary by demoting site 'b' and promoting site +'a': juju run-action -m site-b ceph-rbd-mirror/leader --wait demote juju run-action -m site-a ceph-rbd-mirror/leader --wait promote force=True @@ -155,13 +198,21 @@ the non-master cluster. juju run-action -m site-b ceph-rbd-mirror/leader --wait resync-pools i-really-mean-it=True -__NOTE__ When using Ceph Luminous, the mirror state information will not be -accurate after recovering from unclean shutdown. Regardless of the output of -the status information you will be able to write to images after a forced -promote. +> **Note**: When using Ceph Luminous, the mirror state information will not be + accurate after recovering from unclean shutdown. Regardless of the output of + the status information, you will be able to write to images after a forced + promote. # Bugs -Please report bugs on [Launchpad](https://bugs.launchpad.net/charm-ceph-rbd-mirror/+filebug). +Please report bugs for the `ceph-rbd-mirror` charm on [Launchpad][charm-ceph-rbd-mirror-bugs]. + +For general questions, refer to the [OpenStack Charm Guide][charms-guide]. -For general questions please refer to the OpenStack [Charm Guide](https://docs.openstack.org/charm-guide/latest/). + + +[ceph-multisite-replication]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-rgw-multisite.html +[ceph-rbd-mirroring]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-ceph-rbd-mirror.html +[charms-deploy-guide]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/index.html +[charm-ceph-rbd-mirror-bugs]: https://bugs.launchpad.net/charm-ceph-rbd-mirror/+filebug +[charms-guide]: https://docs.openstack.org/charm-guide/latest/ From bffae6f44aff34a4b6cf9318ca04fe0d496ddb9f Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 24 Apr 2019 09:21:05 +0200 Subject: [PATCH 1805/2699] Distribute direct charm dependencies Do not rely on system installed Python packages but distribute the direct charm dependencies as part of the charms wheelhouse. As the span of distributions we need to support with reactive charms widens we will run into compability problems with the current model. For further reference see juju/charm-helpers#341 and juju-solutions/layer-basic#135 Change-Id: I129ca4506e44abc1e062d5545ff52f992f86a3b9 --- ceph-rbd-mirror/src/layer.yaml | 1 - ceph-rbd-mirror/src/wheelhouse.txt | 3 +++ 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 ceph-rbd-mirror/src/wheelhouse.txt diff --git a/ceph-rbd-mirror/src/layer.yaml b/ceph-rbd-mirror/src/layer.yaml index 68e3b5f8..cc2e549b 100644 --- a/ceph-rbd-mirror/src/layer.yaml +++ b/ceph-rbd-mirror/src/layer.yaml @@ -6,5 +6,4 @@ includes: options: basic: use_venv: True - include_system_packages: True repo: https://github.com/openstack/charm-ceph-rbd-mirror diff --git a/ceph-rbd-mirror/src/wheelhouse.txt b/ceph-rbd-mirror/src/wheelhouse.txt new file mode 100644 index 00000000..17c12301 --- /dev/null +++ b/ceph-rbd-mirror/src/wheelhouse.txt @@ -0,0 +1,3 @@ + +jinja2 +psutil From 340ca25fb00591e32724b8045469345bcfa57fe4 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 24 Apr 2019 09:20:14 +0200 Subject: [PATCH 1806/2699] Distribute direct charm dependencies Do not rely on system installed Python packages but distribute the direct charm dependencies as part of the charms wheelhouse. As the span of distributions we need to support with reactive charms widens we will run into compability problems with the current model. For further reference see juju/charm-helpers#341 and juju-solutions/layer-basic#135 Change-Id: Ie868ed20563d3e37e19ba513ccffe575923bb9e4 --- ceph-fs/src/layer.yaml | 3 --- ceph-fs/src/wheelhouse.txt | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-fs/src/layer.yaml b/ceph-fs/src/layer.yaml index bb34e6ff..e7d3cb84 100644 --- a/ceph-fs/src/layer.yaml +++ b/ceph-fs/src/layer.yaml @@ -2,7 +2,4 @@ includes: ['layer:basic', 'layer:apt', 'interface:ceph-mds'] options: status: patch-hookenv: False - apt: - packages: - - python3-pyxattr repo: https://git.openstack.org/openstack/charm-ceph-fs diff --git a/ceph-fs/src/wheelhouse.txt b/ceph-fs/src/wheelhouse.txt index bbb9970c..56042c3a 100644 --- a/ceph-fs/src/wheelhouse.txt +++ b/ceph-fs/src/wheelhouse.txt @@ -1,3 +1,6 @@ netifaces dnspython3 ceph_api +pyxattr +jinja2 +psutil From b880e2a404073c52ca66bd3213a3a80f5f66ca2c Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 12 Jul 2019 14:29:15 -0400 Subject: [PATCH 1807/2699] Add Python 3 Train unit tests This is a mechanically generated patch to ensure unit testing is in place for all of the Tested Runtimes for Train. See the Train python3-updates goal document for details: https://governance.openstack.org/tc/goals/train/python3-updates.html Change-Id: Ie3b80cb280794688c9a907779c30e049a13936f1 Story: #2005924 Task: #34228 --- ceph-radosgw/.zuul.yaml | 3 ++- ceph-radosgw/tox.ini | 12 +++++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/.zuul.yaml b/ceph-radosgw/.zuul.yaml index affeb907..18efca1a 100644 --- a/ceph-radosgw/.zuul.yaml +++ b/ceph-radosgw/.zuul.yaml @@ -1,3 +1,4 @@ - project: templates: - - python36-charm-jobs + - python35-charm-jobs + - openstack-python3-train-jobs diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 01095fc5..10ac5cd4 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -2,7 +2,7 @@ # This file is managed centrally by release-tools and should not be modified # within individual charm repos. [tox] -envlist = pep8,py36 +envlist = pep8,py37 skipsdist = True [testenv] @@ -22,11 +22,21 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py35] +basepython = python3.5 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py36] basepython = python3.6 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python3 deps = -r{toxinidir}/requirements.txt From 7a6e03358fbb06e2955f2e12117779d881f17fc3 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 15 Aug 2019 15:41:49 -0400 Subject: [PATCH 1808/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: Id8b2e499d8b3682c20cd310fd978609adf8cee8d --- ceph-fs/src/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index eb0c2746..ebfb833d 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -12,8 +12,8 @@ tags: series: - xenial - bionic - - cosmic - disco + - eoan subordinate: false requires: ceph-mds: From 2cdc6826c2d8cdf488d56f7915bc1e79c9a74806 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 15 Aug 2019 15:41:58 -0400 Subject: [PATCH 1809/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: Icc110d1591307c7dbc74d5249f6f8c962ae50e5c --- ceph-mon/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 6866d9c3..ceb660bc 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -12,8 +12,8 @@ tags: series: - xenial - bionic - - cosmic - disco + - eoan - trusty peers: mon: From 3d3c0652c27f57b39122b6e7943f3bbbfaf8f8c0 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 15 Aug 2019 15:42:07 -0400 Subject: [PATCH 1810/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: I15be21c0761e6ff84415b2ae004f41fd28b76409 --- ceph-osd/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 59f3cbe9..bc1ed06b 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -13,8 +13,8 @@ tags: series: - xenial - bionic - - cosmic - disco + - eoan - trusty description: | Ceph is a distributed storage and network file system designed to provide From 7dc4de433639faf2b4d9e3b0d6e5a67508801b05 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 15 Aug 2019 15:42:16 -0400 Subject: [PATCH 1811/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: Ie306d2caaafb16eff0ee4481a691c1f5a2f31cf4 --- ceph-proxy/metadata.yaml | 2 +- ceph-proxy/tests/bundles/trusty-juno.yaml | 30 -------------------- ceph-proxy/tests/bundles/trusty-kilo.yaml | 30 -------------------- ceph-proxy/tests/bundles/trusty-liberty.yaml | 30 -------------------- ceph-proxy/tests/bundles/xenial-newton.yaml | 30 -------------------- 5 files changed, 1 insertion(+), 121 deletions(-) delete mode 100644 ceph-proxy/tests/bundles/trusty-juno.yaml delete mode 100644 ceph-proxy/tests/bundles/trusty-kilo.yaml delete mode 100644 ceph-proxy/tests/bundles/trusty-liberty.yaml delete mode 100644 ceph-proxy/tests/bundles/xenial-newton.yaml diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 575c9bb6..e84ab9b5 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -12,8 +12,8 @@ tags: series: - xenial - bionic - - cosmic - disco + - eoan - trusty extra-bindings: public: diff --git a/ceph-proxy/tests/bundles/trusty-juno.yaml b/ceph-proxy/tests/bundles/trusty-juno.yaml deleted file mode 100644 index 7c85e398..00000000 --- a/ceph-proxy/tests/bundles/trusty-juno.yaml +++ /dev/null @@ -1,30 +0,0 @@ -series: trusty -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:trusty-juno - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - options: - source: cloud:trusty-juno - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - options: - source: cloud:trusty-juno - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - options: - source: cloud:trusty-juno -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/trusty-kilo.yaml b/ceph-proxy/tests/bundles/trusty-kilo.yaml deleted file mode 100644 index c9b18bc0..00000000 --- a/ceph-proxy/tests/bundles/trusty-kilo.yaml +++ /dev/null @@ -1,30 +0,0 @@ -series: trusty -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:trusty-kilo - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - options: - source: cloud:trusty-kilo - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - options: - source: cloud:trusty-kilo - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - options: - source: cloud:trusty-kilo -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/trusty-liberty.yaml b/ceph-proxy/tests/bundles/trusty-liberty.yaml deleted file mode 100644 index d1835814..00000000 --- a/ceph-proxy/tests/bundles/trusty-liberty.yaml +++ /dev/null @@ -1,30 +0,0 @@ -series: trusty -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:trusty-liberty - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - options: - source: cloud:trusty-liberty - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - options: - source: cloud:trusty-liberty - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - options: - source: cloud:trusty-liberty -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/xenial-newton.yaml b/ceph-proxy/tests/bundles/xenial-newton.yaml deleted file mode 100644 index a58bae67..00000000 --- a/ceph-proxy/tests/bundles/xenial-newton.yaml +++ /dev/null @@ -1,30 +0,0 @@ -series: xenial -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:xenial-newton - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - options: - source: cloud:xenial-newton - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - options: - source: cloud:xenial-newton - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - options: - source: cloud:xenial-newton -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' From a8cfad0d63e8c3a90bedf1e6a136ef337502d104 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 15 Aug 2019 15:42:25 -0400 Subject: [PATCH 1812/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: Ib6244dc1f5f9c8a0eccbd8f95d5b4f39f7077510 --- ceph-radosgw/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index a27d2a68..b7e9c1f5 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -15,8 +15,8 @@ tags: series: - xenial - bionic - - cosmic - disco + - eoan - trusty extra-bindings: public: From 6527f19d54aa3680c9bec6a2799d8117eb422e7e Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 15 Aug 2019 15:42:33 -0400 Subject: [PATCH 1813/2699] Update series metadata Ensure that EOL releases are removed from metadata and tests, and that the current dev release is enabled in metadata. Dev release tests are enabled separately because of chickens and eggs. Change-Id: Ie196f329f2f5b98960c895e2bfcf0ea8082ee2b4 --- ceph-rbd-mirror/src/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index c4bbc676..e8776293 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -17,8 +17,8 @@ tags: series: - xenial - bionic - - cosmic - disco + - eoan extra-bindings: public: cluster: From 5e3b6eb0c06de3019647640c60219c1c683521fb Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 21 Aug 2019 15:42:44 +0200 Subject: [PATCH 1814/2699] Refactor OSD bootstrap code out of charms.ceph As the OSD bootstrap code is unique to ceph-osd, the code to check / setup the OSD bootstrap & upgrade keys can be moved in-tree. Change-Id: Ia1639c81238ca782b5600bbe445cf6ee105bfd37 --- ceph-osd/hooks/ceph_hooks.py | 9 ++++-- ceph-osd/hooks/utils.py | 59 ++++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 3 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index a2729593..f9903fd6 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -73,6 +73,9 @@ AppArmorContext, ) from utils import ( + is_osd_bootstrap_ready, + import_osd_bootstrap_key, + import_osd_upgrade_key, get_host_ip, get_networks, assert_charm_supports_ipv6, @@ -528,7 +531,7 @@ def prepare_disks_and_activate(): '`list-disks`, `zap-disk` and `blacklist-*` actions.') return - if ceph.is_bootstrapped(): + if is_osd_bootstrap_ready(): log('ceph bootstrapped, rescanning disks') emit_cephconf() bluestore = use_bluestore() @@ -648,8 +651,8 @@ def mon_relation(): if get_fsid() and get_auth() and bootstrap_key: log('mon has provided conf- scanning disks') emit_cephconf() - ceph.import_osd_bootstrap_key(bootstrap_key) - ceph.import_osd_upgrade_key(upgrade_key) + import_osd_bootstrap_key(bootstrap_key) + import_osd_upgrade_key(upgrade_key) prepare_disks_and_activate() else: log('mon cluster has not yet provided conf') diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 786c1bbe..9f93b598 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -16,6 +16,10 @@ import os import socket import subprocess +import sys + +sys.path.append('lib') +import ceph.utils as ceph from charmhelpers.core.hookenv import ( unit_get, @@ -63,6 +67,61 @@ import dns.resolver +_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" +_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" + + +def is_osd_bootstrap_ready(): + """ + Is this machine ready to add OSDs. + + :returns: boolean: Is the OSD bootstrap key present + """ + return os.path.exists(_bootstrap_keyring) + + +def import_osd_bootstrap_key(key): + """ + Ensure that the osd-bootstrap keyring is setup. + + :param key: The cephx key to add to the bootstrap keyring + :type key: str + :raises: subprocess.CalledProcessError""" + if not os.path.exists(_bootstrap_keyring): + cmd = [ + "sudo", + "-u", + ceph.ceph_user(), + 'ceph-authtool', + _bootstrap_keyring, + '--create-keyring', + '--name=client.bootstrap-osd', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + + +def import_osd_upgrade_key(key): + """ + Ensure that the osd-upgrade keyring is setup. + + :param key: The cephx key to add to the upgrade keyring + :type key: str + :raises: subprocess.CalledProcessError""" + if not os.path.exists(_upgrade_keyring): + cmd = [ + "sudo", + "-u", + ceph.ceph_user(), + 'ceph-authtool', + _upgrade_keyring, + '--create-keyring', + '--name=client.osd-upgrade', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + + def render_template(template_name, context, template_dir=TEMPLATES_DIR): templates = jinja2.Environment( loader=jinja2.FileSystemLoader(template_dir)) From cc7acf152ddac2903e486f778e12d4661b0cfed2 Mon Sep 17 00:00:00 2001 From: Camille Rodriguez Date: Mon, 26 Aug 2019 15:40:06 -0400 Subject: [PATCH 1815/2699] Add disco-stein bundle to gate tests The bundle disco-stein was in the dev bundles because a bug with the Python 3.7 syntax prevented the success of the deployment of HA clusters. This bug is fixed, we can reintegrate disco-stein in the gate bundles. Other small additions: - removing sitepackages in tox.ini to avoid test env pollution - skip_missing_interpreters in tox.ini set to False to avoid false positives by skipping missing interpreters. LP Related-Bug: #1823718 Change-Id: I2844aaa42ea4b51c2bb162d17a9c1c308b5d8be8 --- .../{dev-basic-disco-stein => gate-basic-disco-stein} | 0 ceph-fs/src/tox.ini | 4 ++++ ceph-fs/tox.ini | 7 +++++-- 3 files changed, 9 insertions(+), 2 deletions(-) rename ceph-fs/src/tests/{dev-basic-disco-stein => gate-basic-disco-stein} (100%) diff --git a/ceph-fs/src/tests/dev-basic-disco-stein b/ceph-fs/src/tests/gate-basic-disco-stein similarity index 100% rename from ceph-fs/src/tests/dev-basic-disco-stein rename to ceph-fs/src/tests/gate-basic-disco-stein diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index 4c5f90ef..ffdb9313 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -4,6 +4,10 @@ [tox] envlist = pep8 skipsdist = True +# NOTE(beisner): Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE(beisner): Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 718be2b5..72598d76 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -4,7 +4,10 @@ [tox] skipsdist = True envlist = pep8,py37 -skip_missing_interpreters = True +# NOTE(beisner): Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE(beisner): Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} @@ -49,7 +52,7 @@ deps = -r{toxinidir}/test-requirements.txt commands = stestr run {posargs} [testenv:pep8] -basepython = python3.6 +basepython = python3 deps = -r{toxinidir}/test-requirements.txt commands = flake8 {posargs} src unit_tests From 5b7b8c7351f5d863805833379fb797f8897c48ff Mon Sep 17 00:00:00 2001 From: Camille Rodriguez Date: Mon, 26 Aug 2019 16:53:30 -0400 Subject: [PATCH 1816/2699] Add disco-stein bundle to gate tests This project already had the disco-stein gate test, but since this change is part of a batch change, I had keeping the same title. Changes: - removing sitepackages in tox.ini to avoid test env pollution - skip_missing_interpreters in tox.ini set to False to avoid false positives by skipping missing interpreters. Change-Id: I477d789cca10e9b714771c1419fe355bf44e4b6a --- ceph-rbd-mirror/src/tox.ini | 6 +++++- ceph-rbd-mirror/tox.ini | 4 ++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/src/tox.ini b/ceph-rbd-mirror/src/tox.ini index cd35f5c3..00c11349 100644 --- a/ceph-rbd-mirror/src/tox.ini +++ b/ceph-rbd-mirror/src/tox.ini @@ -1,12 +1,16 @@ [tox] envlist = pep8 skipsdist = True +# NOTE(beisner): Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE(beisner): Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 whitelist_externals = juju -passenv = HOME TERM CS_API_* OS_* AMULET_* +passenv = HOME TERM CS_API_* OS_* deps = -r{toxinidir}/test-requirements.txt install_command = pip install {opts} {packages} diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index ee6ada13..01db0b4e 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -4,6 +4,10 @@ [tox] skipsdist = True envlist = pep8,py37 +# NOTE(beisner): Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE(beisner): Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} From de3f7599718ce60639b97c3c925f6db173b5d350 Mon Sep 17 00:00:00 2001 From: Camille Rodriguez Date: Mon, 26 Aug 2019 16:29:22 -0400 Subject: [PATCH 1817/2699] Add disco-stein bundle to gate tests The bundle disco-stein was in the dev bundles because a bug with the Python 3.7 syntax prevented the success of the deployment of HA clusters. This bug is fixed, we can reintegrate disco-stein in the gate bundles. Other small additions: - Passing OS env vars instead of AMULET - removing sitepackages in tox.ini to avoid test env pollution - skip_missing_interpreters in tox.ini set to False to avoid false positives by skipping missing interpreters. - adding bionic-stein bundle to gate_bundles instead of dev LP Related-Bug: #1823718 Change-Id: Ia7093a5654a763670be05635f7f6fce292eb9c96 --- ceph-proxy/tests/bundles/disco-stein.yaml | 23 +++++++++++++++++++++++ ceph-proxy/tests/tests.yaml | 5 ++--- ceph-proxy/tox.ini | 7 +++++-- 3 files changed, 30 insertions(+), 5 deletions(-) create mode 100644 ceph-proxy/tests/bundles/disco-stein.yaml diff --git a/ceph-proxy/tests/bundles/disco-stein.yaml b/ceph-proxy/tests/bundles/disco-stein.yaml new file mode 100644 index 00000000..7a4237af --- /dev/null +++ b/ceph-proxy/tests/bundles/disco-stein.yaml @@ -0,0 +1,23 @@ +series: disco +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index b5a568ac..53de0b72 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -10,6 +10,8 @@ gate_bundles: - xenial-queens # luminous - bionic-queens # luminous - bionic-rocky # mimic + - bionic-stein + - disco-stein dev_bundles: # Icehouse - trusty-icehouse @@ -22,9 +24,6 @@ dev_bundles: - xenial-ocata # Pike - xenial-pike - # Mimic - - bionic-stein - smoke_bundles: - bionic-stein diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index aa3d6d0c..6783b1ce 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -4,17 +4,20 @@ [tox] envlist = pep8,py3{5,6} skipsdist = True +# NOTE(beisner): Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE(beisner): Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} - AMULET_SETUP_TIMEOUT=5400 install_command = pip install {opts} {packages} commands = stestr run {posargs} whitelist_externals = juju -passenv = HOME TERM AMULET_* CS_API_* +passenv = HOME TERM OS_* CS_API_* deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt From 71d4a8c45e5cc37c2072a32def0d811897dd3227 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 4 Jul 2019 15:53:22 +0200 Subject: [PATCH 1818/2699] Enable pg_autoscaler for new Nautilus installations This change also allows an admin to enable it for existing installations that are upgraded to Nautilus via a config option. This change also enabled bionic-train to allow testing with Ceph Nautilus. func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/52 Depends-On: https://github.com/juju/charm-helpers/pull/343 Change-Id: Ic532204aa1576cdbeb47de7410f421aa9e4bac42 --- ceph-mon/config.yaml | 14 ++- ceph-mon/hooks/ceph_hooks.py | 43 +++++++- .../contrib/storage/linux/ceph.py | 53 +++++++++ ceph-mon/hooks/utils.py | 15 +++ ceph-mon/tests/bundles/bionic-train.yaml | 103 ++++++++++++++++++ ceph-mon/tests/tests.yaml | 1 + ceph-mon/unit_tests/test_ceph_hooks.py | 80 +++++++++++++- ceph-mon/unit_tests/test_ceph_utils.py | 15 +++ 8 files changed, 317 insertions(+), 7 deletions(-) create mode 100644 ceph-mon/tests/bundles/bionic-train.yaml diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 72392e17..e3e0c92a 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -156,7 +156,7 @@ options: kernel.threads-max: 2097152 }' description: | YAML-formatted associative array of sysctl key/value pairs to be set - persistently. By default we set pid_max, max_map_count and + persistently. By default we set pid_max, max_map_count and threads-max to a high value to avoid problems with large numbers (>20) of OSDs recovering. very large clusters should set those values even higher (e.g. max for kernel.pid_max is 4194303). @@ -196,7 +196,7 @@ options: default: '1' type: string description: | - Recovery rate (in objects/s) below which we consider recovery + Recovery rate (in objects/s) below which we consider recovery to be stalled. nagios_raise_nodeepscrub: default: True @@ -272,3 +272,13 @@ options: least one pool (glance) loaded with a disproportionately high amount of data/objects where other pools may remain empty. This can trigger HEALTH_WARN if mon_pg_warn_max_object_skew is exceeded but that is actually false positive. + pg-autotune: + type: string + default: auto + description: | + The default configuration for pg-autotune will be to automatically enable + the module for new cluster installs on Ceph Nautilus, but to leave it + disabled for all cluster upgrades to Nautilus. To enable the pg-autotune + feature for upgraded clusters, the pg-autotune option should be set to + 'true'. To disable the autotuner for new clusters, the pg-autotune option + should be set to 'false'. diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index b889fee2..f6f36dfa 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -78,7 +78,9 @@ from charmhelpers.core.sysctl import create as create_sysctl from charmhelpers.core.templating import render from charmhelpers.contrib.storage.linux.ceph import ( - CephConfContext) + CephConfContext, + enable_pg_autoscale, +) from utils import ( add_rbd_mirror_features, assert_charm_supports_ipv6, @@ -88,7 +90,8 @@ get_rbd_features, has_rbd_mirrors, get_ceph_osd_releases, - execute_post_osd_upgrade_steps + execute_post_osd_upgrade_steps, + mgr_enable_module, ) from charmhelpers.contrib.charmsupport import nrpe @@ -265,6 +268,18 @@ def config_changed(): # must be set. This block is invoked when the user is trying to # get out of that scenario by enabling no-bootstrap. bootstrap_source_relation_changed() + + # This will only ensure that we are enabled if the 'pg-autotune' option + # is explicitly set to 'true', and not if it is 'auto' or 'false' + if (config('pg-autotune') == 'true' and + cmp_pkgrevno('ceph', '14.2.0') >= 0): + # The return value of the enable_module call will tell us if the + # module was already enabled, in which case, we don't need to + # re-configure the already configured pools + if mgr_enable_module('pg_autoscaler'): + ceph.monitor_key_set('admin', 'autotune', 'true') + for pool in ceph.list_pools(): + enable_pg_autoscale('admin', pool) # unconditionally verify that the fsid and monitor-secret are set now # otherwise we exit until a leader does this. if leader_get('fsid') is None or leader_get('monitor-secret') is None: @@ -430,6 +445,22 @@ def mon_relation(): if cmp_pkgrevno('ceph', '12.0.0') >= 0: status_set('maintenance', 'Bootstrapping Ceph MGR') ceph.bootstrap_manager() + if ceph.monitor_key_exists('admin', 'autotune'): + autotune = ceph.monitor_key_get('admin', 'autotune') + else: + autotune = config('pg-autotune') + if (cmp_pkgrevno('ceph', '14.2.0') >= 0 and + (autotune == 'true' or + autotune == 'auto')): + ceph.monitor_key_set('admin', 'autotune', 'true') + else: + ceph.monitor_key_set('admin', 'autotune', 'false') + if ceph.monitor_key_get('admin', 'autotune') == 'true': + try: + mgr_enable_module('pg_autoscaler') + except subprocess.CalledProcessError: + log("Failed to initialize autoscaler, it must be " + "initialized on the last monitor", level='info') # If we can and want to if is_leader() and config('customize-failure-domain'): # But only if the environment supports it @@ -806,6 +837,14 @@ def upgrade_charm(): mon_relation_joined() if is_relation_made("nrpe-external-master"): update_nrpe_config() + if not ceph.monitor_key_exists('admin', 'autotune'): + autotune = config('pg-autotune') + if (cmp_pkgrevno('ceph', '14.2.0') >= 0 and + (autotune == 'true' or + autotune == 'auto')): + ceph.monitor_key_set('admin', 'autotune', 'true') + else: + ceph.monitor_key_set('admin', 'autotune', 'false') # NOTE(jamespage): # Reprocess broker requests to ensure that any cephx diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index a9864467..bd9c6842 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -301,6 +301,7 @@ def __init__(self, service, name, pg_num=None, replicas=2, percent_data=10.0, app_name=None): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas + self.percent_data = percent_data if pg_num: # Since the number of placement groups were specified, ensure # that there aren't too many created. @@ -324,12 +325,24 @@ def create(self): update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) try: set_app_name_for_pool(client=self.service, pool=self.name, name=self.app_name) except CalledProcessError: log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e, level=WARNING)) except CalledProcessError: raise @@ -382,6 +395,18 @@ def create(self): name=self.app_name) except CalledProcessError: log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e, level=WARNING)) except CalledProcessError: raise @@ -389,6 +414,34 @@ def create(self): Returns json formatted output""" +def enabled_manager_modules(): + """Return a list of enabled manager modules. + + :rtype: List[str] + """ + cmd = ['ceph', 'mgr', 'module', 'ls'] + try: + modules = check_output(cmd) + if six.PY3: + modules = modules.decode('utf-8') + except CalledProcessError as e: + log("Failed to list ceph modules: {}".format(e), WARNING) + return [] + modules = json.loads(modules) + return modules['enabled_modules'] + + +def enable_pg_autoscale(service, pool_name): + """ + Enable Ceph's PG autoscaler for the specified pool. + + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types. The name of the pool to enable sutoscaling on + :raise: CalledProcessError if the command fails + """ + check_call(['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) + + def get_mon_map(service): """ Returns the current monitor map. diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index a35efedb..a6451089 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -43,6 +43,7 @@ get_address_in_network, get_ipv6_addr ) +from charmhelpers.contrib.storage.linux import ceph try: import dns.resolver @@ -69,6 +70,20 @@ def enable_pocket(pocket): sources.write(line) +def mgr_enable_module(module): + """Enable a Ceph Manager Module. + + :param module: The module name to enable + :type module: str + + :raises: subprocess.CalledProcessError + """ + if module not in ceph.enabled_manager_modules(): + subprocess.check_call(['ceph', 'mgr', 'module', 'enable', module]) + return True + return False + + @cached def get_unit_hostname(): return socket.gethostname() diff --git a/ceph-mon/tests/bundles/bionic-train.yaml b/ceph-mon/tests/bundles/bionic-train.yaml new file mode 100644 index 00000000..815194fe --- /dev/null +++ b/ceph-mon/tests/bundles/bionic-train.yaml @@ -0,0 +1,103 @@ +series: bionic +applications: + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: cloud:bionic-train/proposed + ceph-mon: + charm: ceph-mon + series: bionic + num_units: 3 + options: + monitor-count: '3' + source: cloud:bionic-train/proposed + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:bionic-train/proposed + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-train/proposed + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-train/proposed + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-train/proposed + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-train/proposed + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:bionic-train/proposed + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:bionic-train/proposed +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 7b1f7c21..df940108 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,5 +1,6 @@ charm_name: ceph-mon gate_bundles: + - bionic-train - bionic-stein - bionic-rocky - bionic-queens diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index f3a149bf..7d4c9dff 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -19,7 +19,6 @@ lambda *args, **kwargs: f(*args, **kwargs)) import ceph_hooks - TO_PATCH = [ 'config', 'is_leader', @@ -56,9 +55,10 @@ 'disable-pg-max-object-skew': False} -class CephHooksTestCase(unittest.TestCase): +class CephHooksTestCase(test_utils.CharmTestCase): def setUp(self): - super(CephHooksTestCase, self).setUp() + super(CephHooksTestCase, self).setUp(ceph_hooks, TO_PATCH) + self.config.side_effect = self.test_config.get @patch.object(ceph_hooks, 'get_rbd_features', return_value=None) @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @@ -287,6 +287,80 @@ def test_notify_mons(self, mock_relation_ids, mock_related_units, relation_settings={ 'nonce': 'FAKE-UUID'}) + @patch.object(ceph_hooks.ceph, 'list_pools') + @patch.object(ceph_hooks, 'mgr_enable_module') + @patch.object(ceph_hooks, 'emit_cephconf') + @patch.object(ceph_hooks, 'create_sysctl') + @patch.object(ceph_hooks, 'check_for_upgrade') + @patch.object(ceph_hooks, 'get_mon_hosts') + @patch.object(ceph_hooks, 'bootstrap_source_relation_changed') + @patch.object(ceph_hooks, 'relations_of_type') + def test_config_changed_no_autotune(self, + relations_of_type, + bootstrap_source_rel_changed, + get_mon_hosts, + check_for_upgrade, + create_sysctl, + emit_ceph_conf, + mgr_enable_module, + list_pools): + relations_of_type.return_value = False + self.test_config.set('pg-autotune', 'false') + ceph_hooks.config_changed() + mgr_enable_module.assert_not_called() + + @patch.object(ceph_hooks.ceph, 'monitor_key_set') + @patch.object(ceph_hooks.ceph, 'list_pools') + @patch.object(ceph_hooks, 'mgr_enable_module') + @patch.object(ceph_hooks, 'emit_cephconf') + @patch.object(ceph_hooks, 'create_sysctl') + @patch.object(ceph_hooks, 'check_for_upgrade') + @patch.object(ceph_hooks, 'get_mon_hosts') + @patch.object(ceph_hooks, 'bootstrap_source_relation_changed') + @patch.object(ceph_hooks, 'relations_of_type') + @patch.object(ceph_hooks, 'cmp_pkgrevno') + def test_config_changed_with_autotune(self, + cmp_pkgrevno, + relations_of_type, + bootstrap_source_rel_changed, + get_mon_hosts, + check_for_upgrade, + create_sysctl, + emit_ceph_conf, + mgr_enable_module, + list_pools, monitor_key_set): + relations_of_type.return_value = False + cmp_pkgrevno.return_value = 1 + self.test_config.set('pg-autotune', 'true') + ceph_hooks.config_changed() + mgr_enable_module.assert_called_once_with('pg_autoscaler') + monitor_key_set.assert_called_once_with('admin', 'autotune', 'true') + + @patch.object(ceph_hooks.ceph, 'list_pools') + @patch.object(ceph_hooks, 'mgr_enable_module') + @patch.object(ceph_hooks, 'emit_cephconf') + @patch.object(ceph_hooks, 'create_sysctl') + @patch.object(ceph_hooks, 'check_for_upgrade') + @patch.object(ceph_hooks, 'get_mon_hosts') + @patch.object(ceph_hooks, 'bootstrap_source_relation_changed') + @patch.object(ceph_hooks, 'relations_of_type') + @patch.object(ceph_hooks, 'cmp_pkgrevno') + def test_config_changed_with_default_autotune(self, + cmp_pkgrevno, + relations_of_type, + bootstrap_source_rel_changed, + get_mon_hosts, + check_for_upgrade, + create_sysctl, + emit_ceph_conf, + mgr_enable_module, + list_pools): + relations_of_type.return_value = False + cmp_pkgrevno.return_value = 1 + self.test_config.set('pg-autotune', 'auto') + ceph_hooks.config_changed() + mgr_enable_module.assert_not_called() + class RelatedUnitsTestCase(unittest.TestCase): diff --git a/ceph-mon/unit_tests/test_ceph_utils.py b/ceph-mon/unit_tests/test_ceph_utils.py index 7062091a..3e8d7a72 100644 --- a/ceph-mon/unit_tests/test_ceph_utils.py +++ b/ceph-mon/unit_tests/test_ceph_utils.py @@ -39,6 +39,21 @@ def test_has_rbd_mirrors(self, _relation_ids, _related_units): _relation_ids.assert_called_once_with('rbd-mirror') _related_units.assert_called_once_with('arelid') + @mock.patch.object(utils.ceph, 'enabled_manager_modules') + @mock.patch.object(utils.subprocess, 'check_call') + def test_mgr_enable_module(self, _call, _enabled_modules): + _enabled_modules.return_value = [] + utils.mgr_enable_module('test-module') + _call.assert_called_once_with( + ['ceph', 'mgr', 'module', 'enable', 'test-module']) + + @mock.patch.object(utils.ceph, 'enabled_manager_modules') + @mock.patch.object(utils.subprocess, 'check_call') + def test_mgr_enable_module_again(self, _call, _enabled_modules): + _enabled_modules.return_value = ['test-module'] + utils.mgr_enable_module('test-module') + _call.assert_not_called() + @mock.patch.object(utils.subprocess, 'check_output') def test_get_default_rbd_features(self, _check_output): _check_output.return_value = json.dumps( From fd7332e7c21dc775495ff196f5674fad59030b0f Mon Sep 17 00:00:00 2001 From: Rodrigo Barbieri Date: Mon, 23 Sep 2019 17:58:18 -0300 Subject: [PATCH 1819/2699] Fix CI not deploying patchset changes Tests.yaml was missing a parameter, causing zaza to pull latest ceph-proxy from charm store instead of using the changes of patches proposed in gerrit. This change fixes it, now zaza deploys ceph-proxy from local repo created during tests that contains the patch proposed in gerrit. Change-Id: I3681e72f032780b864d02a95cf983f26a0b1c91e --- ceph-proxy/tests/tests.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 53de0b72..9d1ec8dd 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -1,3 +1,4 @@ +charm_name: ceph-proxy configure: - zaza.openstack.configure.ceph_proxy.setup_ceph_proxy From d12c2e4949bf9b7eab66420e8d2dacf292f36e87 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 24 Sep 2019 07:10:03 +0000 Subject: [PATCH 1820/2699] Fix zaza source The charm was incorrectly pulling in an old branch of zaza, this change points it back at master. Change-Id: I2a98f843c54f883782e4911412b45185996ce038 --- ceph-proxy/test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index d131a212..e069d2fd 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -10,5 +10,5 @@ requests>=2.18.4 # NOTE: workaround for 14.04 pip/tox pytz pyudev # for ceph-* charm unit tests (not mocked?) -git+https://github.com/openstack-charmers/zaza.git@remove-namespaced-tests#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack;python_version>='3.0' From fa9fadb1a3bab72cf342526a957a83e880c88c3b Mon Sep 17 00:00:00 2001 From: taodd Date: Tue, 24 Sep 2019 18:19:38 +0800 Subject: [PATCH 1821/2699] Sync charms.ceph to get fix Change-Id: Ib3d4b79690eb5931b4f0680b937590b317a91427 Closes-Bug: #1841021 --- ceph-osd/lib/ceph/utils.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 972f1ad3..31447037 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -1912,9 +1912,10 @@ def osdize_dir(path, encrypt=False, bluestore=False): ' skipping'.format(path)) return - if os.path.exists(os.path.join(path, 'upstart')): - log('Path {} is already configured as an OSD - bailing'.format(path)) - return + for t in ['upstart', 'systemd']: + if os.path.exists(os.path.join(path, t)): + log('Path {} is already used as an OSD dir - bailing'.format(path)) + return if cmp_pkgrevno('ceph', "0.56.6") < 0: log('Unable to use directories for OSDs with ceph < 0.56.6', From 01917190e486f678f9c3d9233df251dc93c253c5 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 30 Sep 2019 17:07:32 -0500 Subject: [PATCH 1822/2699] Sync charm/ceph helpers, tox, and requirements Change-Id: I287caaa68bc4321767217b83b0696a4f5a10e766 --- ceph-rbd-mirror/requirements.txt | 7 +- ceph-rbd-mirror/src/files/.gitkeep | 0 ceph-rbd-mirror/src/test-requirements.txt | 17 ++--- ceph-rbd-mirror/src/tox.ini | 14 ++-- ceph-rbd-mirror/test-requirements.txt | 18 ++++-- ceph-rbd-mirror/tox.ini | 78 +++++++++-------------- 6 files changed, 66 insertions(+), 68 deletions(-) create mode 100644 ceph-rbd-mirror/src/files/.gitkeep diff --git a/ceph-rbd-mirror/requirements.txt b/ceph-rbd-mirror/requirements.txt index 20f335d2..b1d4872c 100644 --- a/ceph-rbd-mirror/requirements.txt +++ b/ceph-rbd-mirror/requirements.txt @@ -1,6 +1,7 @@ -# This file is managed centrally. If you find the need to modify this as a -# one-off, please don't. Intead, consult #openstack-charms and ask about -# requirements management in charms via bot-control. Thank you. +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools # # Build requirements charm-tools>=2.4.4 diff --git a/ceph-rbd-mirror/src/files/.gitkeep b/ceph-rbd-mirror/src/files/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/ceph-rbd-mirror/src/test-requirements.txt b/ceph-rbd-mirror/src/test-requirements.txt index f4a766df..d3c9be84 100644 --- a/ceph-rbd-mirror/src/test-requirements.txt +++ b/ceph-rbd-mirror/src/test-requirements.txt @@ -1,11 +1,8 @@ -# This file is managed centrally. If you find the need to modify this as a -# one-off, please don't. Intead, consult #openstack-charms and ask about -# requirements management in charms via bot-control. Thank you. -charm-tools>=2.4.4 -coverage>=3.6 -mock>=1.2 -flake8>=2.2.4,<=2.4.1 -stestr>=2.2.0 -requests>=2.18.4 +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# Functional Test Requirements (let Zaza's dependencies solve all dependencies here!) git+https://github.com/openstack-charmers/zaza.git#egg=zaza -git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack \ No newline at end of file +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack diff --git a/ceph-rbd-mirror/src/tox.ini b/ceph-rbd-mirror/src/tox.ini index 00c11349..07a7adcb 100644 --- a/ceph-rbd-mirror/src/tox.ini +++ b/ceph-rbd-mirror/src/tox.ini @@ -1,16 +1,22 @@ +# Source charm (with zaza): ./src/tox.ini +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools + [tox] envlist = pep8 skipsdist = True -# NOTE(beisner): Avoid build/test env pollution by not enabling sitepackages. +# NOTE: Avoid build/test env pollution by not enabling sitepackages. sitepackages = False -# NOTE(beisner): Avoid false positives by not skipping missing interpreters. +# NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 whitelist_externals = juju -passenv = HOME TERM CS_API_* OS_* +passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt install_command = pip install {opts} {packages} @@ -23,7 +29,7 @@ commands = charm-proof [testenv:func-noop] basepython = python3 commands = - true + functest-run-suite --help [testenv:func] basepython = python3 diff --git a/ceph-rbd-mirror/test-requirements.txt b/ceph-rbd-mirror/test-requirements.txt index 3162b0fb..0ab97f6e 100644 --- a/ceph-rbd-mirror/test-requirements.txt +++ b/ceph-rbd-mirror/test-requirements.txt @@ -1,13 +1,23 @@ -# This file is managed centrally. If you find the need to modify this as a -# one-off, please don't. Intead, consult #openstack-charms and ask about -# requirements management in charms via bot-control. Thank you. +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools # # Lint and unit test requirements flake8>=2.2.4,<=2.4.1 -stestr +stestr>=2.2.0 requests>=2.18.4 charms.reactive mock>=1.2 nose>=1.3.7 coverage>=3.6 git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack +# +# Revisit for removal / mock improvement: +netifaces # vault +psycopg2-binary # vault +tenacity # vault +pbr # vault +cryptography # vault, keystone-saml-mellon +lxml # keystone-saml-mellon +hvac # vault, barbican-vault diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index 01db0b4e..5b41c1dd 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -1,12 +1,15 @@ # Source charm: ./tox.ini # This file is managed centrally by release-tools and should not be modified -# within individual charm repos. +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools + [tox] skipsdist = True -envlist = pep8,py37 -# NOTE(beisner): Avoid build/test env pollution by not enabling sitepackages. +envlist = pep8,py3 +# NOTE: Avoid build/test env pollution by not enabling sitepackages. sitepackages = False -# NOTE(beisner): Avoid false positives by not skipping missing interpreters. +# NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False [testenv] @@ -16,7 +19,7 @@ setenv = VIRTUAL_ENV={envdir} LAYER_PATH={toxinidir}/layers INTERFACE_PATH={toxinidir}/interfaces JUJU_REPOSITORY={toxinidir}/build -passenv = http_proxy https_proxy +passenv = http_proxy https_proxy INTERFACE_PATH LAYER_PATH JUJU_REPOSITORY install_command = pip install {opts} {packages} deps = @@ -30,69 +33,45 @@ commands = [testenv:py3] basepython = python3 deps = -r{toxinidir}/test-requirements.txt -setenv = - {[testenv]setenv} - PYTHON=coverage run -commands = - coverage erase - stestr run {posargs} - coverage combine - coverage html -d cover - coverage xml -o cover/coverage.xml - coverage report - +commands = stestr run --slowest {posargs} [testenv:py35] basepython = python3.5 deps = -r{toxinidir}/test-requirements.txt -setenv = - {[testenv]setenv} - PYTHON=coverage run -commands = - coverage erase - stestr run {posargs} - coverage combine - coverage html -d cover - coverage xml -o cover/coverage.xml - coverage report +commands = stestr run --slowest {posargs} [testenv:py36] basepython = python3.6 deps = -r{toxinidir}/test-requirements.txt -setenv = - {[testenv]setenv} - PYTHON=coverage run -commands = - coverage erase - stestr run {posargs} - coverage combine - coverage html -d cover - coverage xml -o cover/coverage.xml - coverage report +commands = stestr run --slowest {posargs} [testenv:py37] basepython = python3.7 deps = -r{toxinidir}/test-requirements.txt +commands = stestr run --slowest {posargs} + +[testenv:pep8] +basepython = python3 +deps = -r{toxinidir}/test-requirements.txt +commands = flake8 {posargs} src unit_tests + +[testenv:cover] +# Technique based heavily upon +# https://github.com/openstack/nova/blob/master/tox.ini +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt setenv = {[testenv]setenv} PYTHON=coverage run commands = coverage erase - stestr run {posargs} + stestr run --slowest {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml coverage report -[testenv:pep8] -basepython = python3 -deps = -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} src unit_tests - -[testenv:venv] -basepython = python3 -commands = {posargs} - [coverage:run] branch = True concurrency = multiprocessing @@ -101,8 +80,13 @@ source = . omit = .tox/* + */charmhelpers/* unit_tests/* +[testenv:venv] +basepython = python3 +commands = {posargs} + [flake8] # E402 ignore necessary for path append before sys module import in actions -ignore = E402 +ignore = E402,W504 From a52b2020242faecacdb150674d189bb72716a411 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 30 Sep 2019 17:07:11 -0500 Subject: [PATCH 1823/2699] Sync charm/ceph helpers, tox, and requirements Change-Id: I1be6d121b470729c150f060d4e970f58d28fc13d --- ceph-fs/.zuul.yaml | 2 +- ceph-fs/src/files/.gitkeep | 0 ceph-fs/src/test-requirements.txt | 2 +- ceph-fs/src/tox.ini | 30 ++++++++++++------------- ceph-fs/tox.ini | 37 +++++++++++++++---------------- 5 files changed, 34 insertions(+), 37 deletions(-) create mode 100644 ceph-fs/src/files/.gitkeep diff --git a/ceph-fs/.zuul.yaml b/ceph-fs/.zuul.yaml index 2e4862c1..830f18ea 100644 --- a/ceph-fs/.zuul.yaml +++ b/ceph-fs/.zuul.yaml @@ -1,6 +1,6 @@ - project: templates: - - python-charm-jobs + - python35-charm-jobs - openstack-python35-jobs - openstack-python3-train-jobs - openstack-cover-jobs diff --git a/ceph-fs/src/files/.gitkeep b/ceph-fs/src/files/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/ceph-fs/src/test-requirements.txt b/ceph-fs/src/test-requirements.txt index e56db9a6..a4b77a56 100644 --- a/ceph-fs/src/test-requirements.txt +++ b/ceph-fs/src/test-requirements.txt @@ -15,7 +15,7 @@ aodhclient>=0.1.0 gnocchiclient>=3.1.0,<3.2.0 python-barbicanclient>=4.0.1 python-ceilometerclient>=1.5.0 -python-cinderclient>=1.4.0 +python-cinderclient>=1.4.0,<5.0.0 python-designateclient>=1.5 python-glanceclient>=1.1.0 python-heatclient>=0.8.0 diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index ffdb9313..8a91d50b 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -1,53 +1,51 @@ -# Source charm: ./src/tox.ini +# Source charm (with amulet): ./src/tox.ini # This file is managed centrally by release-tools and should not be modified -# within individual charm repos. +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools + [tox] envlist = pep8 skipsdist = True -# NOTE(beisner): Avoid build/test env pollution by not enabling sitepackages. +# NOTE: Avoid build/test env pollution by not enabling sitepackages. sitepackages = False -# NOTE(beisner): Avoid false positives by not skipping missing interpreters. +# NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + CHARM_DIR={envdir} AMULET_SETUP_TIMEOUT=5400 whitelist_externals = juju -passenv = HOME TERM AMULET_* CS_* +passenv = HOME TERM AMULET_* CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt install_command = pip install {opts} {packages} [testenv:pep8] -basepython = python2.7 +basepython = python3 commands = charm-proof -[testenv:func27-noop] +[testenv:func-noop] # DRY RUN - For Debug basepython = python2.7 commands = bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy -[testenv:func27] +[testenv:func] # Run all gate tests which are +x (expected to always pass) basepython = python2.7 commands = bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy -[testenv:func27-smoke] +[testenv:func-smoke] # Run a specific test as an Amulet smoke test (expected to always pass) basepython = python2.7 commands = bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-stein --no-destroy -[testenv:func27-dfs] -# Run all deploy-from-source tests which are +x (may not always pass!) -basepython = python2.7 -commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy - -[testenv:func27-dev] +[testenv:func-dev] # Run all development test targets which are +x (may not always pass!) basepython = python2.7 commands = diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 72598d76..5b41c1dd 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -1,12 +1,15 @@ # Source charm: ./tox.ini # This file is managed centrally by release-tools and should not be modified -# within individual charm repos. +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools + [tox] skipsdist = True -envlist = pep8,py37 -# NOTE(beisner): Avoid build/test env pollution by not enabling sitepackages. +envlist = pep8,py3 +# NOTE: Avoid build/test env pollution by not enabling sitepackages. sitepackages = False -# NOTE(beisner): Avoid false positives by not skipping missing interpreters. +# NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False [testenv] @@ -16,40 +19,36 @@ setenv = VIRTUAL_ENV={envdir} LAYER_PATH={toxinidir}/layers INTERFACE_PATH={toxinidir}/interfaces JUJU_REPOSITORY={toxinidir}/build -passenv = http_proxy https_proxy +passenv = http_proxy https_proxy INTERFACE_PATH LAYER_PATH JUJU_REPOSITORY install_command = pip install {opts} {packages} deps = -r{toxinidir}/requirements.txt [testenv:build] -basepython = python2.7 +basepython = python3 commands = charm-build --log-level DEBUG -o {toxinidir}/build src {posargs} -[testenv:py27] -basepython = python2.7 -# Reactive source charms are Python3-only, but a py27 unit test target -# is required by OpenStack Governance. Remove this shim as soon as -# permitted. http://governance.openstack.org/reference/cti/python_cti.html -deps = -whitelist_externals = true -commands = true +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/test-requirements.txt +commands = stestr run --slowest {posargs} [testenv:py35] basepython = python3.5 deps = -r{toxinidir}/test-requirements.txt -commands = stestr run {posargs} +commands = stestr run --slowest {posargs} [testenv:py36] basepython = python3.6 deps = -r{toxinidir}/test-requirements.txt -commands = stestr run {posargs} +commands = stestr run --slowest {posargs} [testenv:py37] basepython = python3.7 deps = -r{toxinidir}/test-requirements.txt -commands = stestr run {posargs} +commands = stestr run --slowest {posargs} [testenv:pep8] basepython = python3 @@ -67,7 +66,7 @@ setenv = PYTHON=coverage run commands = coverage erase - stestr run {posargs} + stestr run --slowest {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml @@ -90,4 +89,4 @@ commands = {posargs} [flake8] # E402 ignore necessary for path append before sys module import in actions -ignore = E402 +ignore = E402,W504 From bf164764023a67886d3d6937177c791c11b5da39 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 1 Oct 2019 17:25:36 +0200 Subject: [PATCH 1824/2699] Ensure that we check if there is a current version Change-Id: Ibc0097f08e0d0991a8d8d87c21f0c028d005d83b Closes-Bug: #1846227 --- .../contrib/hardening/audits/apt.py | 2 +- .../contrib/openstack/amulet/utils.py | 8 +- .../charmhelpers/contrib/openstack/context.py | 23 + .../contrib/openstack/ha/utils.py | 29 +- .../charmhelpers/contrib/openstack/policyd.py | 700 ++++++++++++++++++ .../section-oslo-messaging-rabbit-ocata | 10 + .../charmhelpers/contrib/openstack/utils.py | 16 +- .../contrib/storage/linux/ceph.py | 51 ++ ceph-radosgw/hooks/charmhelpers/core/host.py | 27 + .../charmhelpers/core/host_factory/ubuntu.py | 3 +- .../hooks/charmhelpers/fetch/__init__.py | 2 + .../hooks/charmhelpers/fetch/ubuntu.py | 143 ++-- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 237 ++++++ ceph-radosgw/hooks/utils.py | 4 +- 14 files changed, 1187 insertions(+), 68 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata create mode 100644 ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py index 3dc14e3c..67521e17 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -13,7 +13,6 @@ # limitations under the License. from __future__ import absolute_import # required for external apt import -from apt import apt_pkg from six import string_types from charmhelpers.fetch import ( @@ -26,6 +25,7 @@ WARNING, ) from charmhelpers.contrib.hardening.audits import BaseAudit +from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg class AptConfig(BaseAudit): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 0a5f81bd..7d95a590 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -709,8 +709,8 @@ def glance_create_image(self, glance, image_name, image_url, '{}...'.format(image_name, image_url)) # Download image - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + http_proxy = os.getenv('OS_TEST_HTTP_PROXY') + self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: proxies = {'http': http_proxy} opener = urllib.FancyURLopener(proxies) @@ -800,8 +800,8 @@ def create_cirros_image(self, glance, image_name, hypervisor_type=None): '({})...'.format(image_name)) # Get cirros image URL - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + http_proxy = os.getenv('OS_TEST_HTTP_PROXY') + self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: proxies = {'http': http_proxy} opener = urllib.FancyURLopener(proxies) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index a6545e12..a3d48c41 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -18,6 +18,7 @@ import math import os import re +import socket import time from base64 import b64decode from subprocess import check_call, CalledProcessError @@ -1716,6 +1717,10 @@ def __call__(self): 'rel_key': 'enable-nfg-logging', 'default': False, }, + 'enable_port_forwarding': { + 'rel_key': 'enable-port-forwarding', + 'default': False, + }, 'global_physnet_mtu': { 'rel_key': 'global-physnet-mtu', 'default': 1500, @@ -1745,6 +1750,13 @@ def __call__(self): ctxt['extension_drivers'] = ','.join(extension_drivers) + l3_extension_plugins = [] + + if ctxt['enable_port_forwarding']: + l3_extension_plugins.append('port_forwarding') + + ctxt['l3_extension_plugins'] = l3_extension_plugins + return ctxt def get_neutron_options(self, rdata): @@ -2160,3 +2172,14 @@ def __call__(self): 'logrotate_count': self.count, } return ctxt + + +class HostInfoContext(OSContextGenerator): + """Context to provide host information.""" + + def __call__(self): + ctxt = { + 'host_fqdn': socket.getfqdn(), + 'host': socket.gethostname(), + } + return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py index 718c6d65..e017bc20 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -127,7 +127,9 @@ def expect_ha(): return len(ha_related_units) > 0 or config('vip') or config('dns-ha') -def generate_ha_relation_data(service, extra_settings=None): +def generate_ha_relation_data(service, + extra_settings=None, + haproxy_enabled=True): """ Generate relation data for ha relation Based on configuration options and unit interfaces, generate a json @@ -152,21 +154,18 @@ def generate_ha_relation_data(service, extra_settings=None): @param extra_settings: Dict of additional resource data @returns dict: json encoded data for use with relation_set """ - _haproxy_res = 'res_{}_haproxy'.format(service) - _relation_data = { - 'resources': { - _haproxy_res: 'lsb:haproxy', - }, - 'resource_params': { + _relation_data = {'resources': {}, 'resource_params': {}} + + if haproxy_enabled: + _haproxy_res = 'res_{}_haproxy'.format(service) + _relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'} + _relation_data['resource_params'] = { _haproxy_res: 'op monitor interval="5s"' - }, - 'init_services': { - _haproxy_res: 'haproxy' - }, - 'clones': { + } + _relation_data['init_services'] = {_haproxy_res: 'haproxy'} + _relation_data['clones'] = { 'cl_{}_haproxy'.format(service): _haproxy_res - }, - } + } if extra_settings: for k, v in extra_settings.items(): @@ -290,7 +289,7 @@ def update_hacluster_vip(service, relation_data): iface, netmask, fallback = get_vip_settings(vip) - vip_monitoring = 'op monitor depth="0" timeout="20s" interval="10s"' + vip_monitoring = 'op monitor timeout="20s" interval="10s" depth="0"' if iface is not None: # NOTE(jamespage): Delete old VIP resources # Old style naming encoding iface in name diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py new file mode 100644 index 00000000..1adf2472 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py @@ -0,0 +1,700 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import contextlib +import os +import six +import shutil +import yaml +import zipfile + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as ch_host + +# Features provided by this module: + +""" +Policy.d helper functions +========================= + +The functions in this module are designed, as a set, to provide an easy-to-use +set of hooks for classic charms to add in /etc//policy.d/ +directory override YAML files. + +(For charms.openstack charms, a mixin class is provided for this +functionality). + +In order to "hook" this functionality into a (classic) charm, two functions are +provided: + + maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=none, + blacklist_keys=none, + template_function=none, + restart_handler=none) + + maybe_do_policyd_overrides_on_config_changed(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None + +(See the docstrings for details on the parameters) + +The functions should be called from the install and upgrade hooks in the charm. +The `maybe_do_policyd_overrides_on_config_changed` function is designed to be +called on the config-changed hook, in that it does an additional check to +ensure that an already overriden policy.d in an upgrade or install hooks isn't +repeated. + +In order the *enable* this functionality, the charm's install, config_changed, +and upgrade_charm hooks need to be modified, and a new config option (see +below) needs to be added. The README for the charm should also be updated. + +Examples from the keystone charm are: + +@hooks.hook('install.real') +@harden() +def install(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + + +@hooks.hook('config-changed') +@restart_on_change(restart_map(), restart_functions=restart_function_map()) +@harden() +def config_changed(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides_on_config_changed(os_release('keystone'), + 'keystone') + +@hooks.hook('upgrade-charm') +@restart_on_change(restart_map(), stopstart=True) +@harden() +def upgrade_charm(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + +Status Line +=========== + +The workload status code in charm-helpers has been modified to detect if +policy.d override code has been incorporated into the charm by checking for the +new config variable (in the config.yaml). If it has been, then the workload +status line will automatically show "PO:" at the beginning of the workload +status for that unit/service if the config option is set. If the policy +override is broken, the "PO (broken):" will be shown. No changes to the charm +(apart from those already mentioned) are needed to enable this functionality. +(charms.openstack charms also get this functionality, but please see that +library for further details). +""" + +# The config.yaml for the charm should contain the following for the config +# option: + +""" + use-policyd-override: + type: boolean + default: False + description: | + If True then use the resource file named 'policyd-override' to install + override yaml files in the service's policy.d directory. The resource + file should be a zip file containing at least one yaml file with a .yaml + or .yml extension. If False then remove the overrides. +""" + +# The metadata.yaml for the charm should contain the following: +""" +resources: + policyd-override: + type: file + filename: policyd-override.zip + description: The policy.d overrides file +""" + +# The README for the charm should contain the following: +""" +Policy Overrides +---------------- + +This service allows for policy overrides using the `policy.d` directory. This +is an **advanced** feature and the policies that the service supports should be +clearly and unambiguously understood before trying to override, or add to, the +default policies that the service uses. + +The charm also has some policy defaults. They should also be understood before +being overridden. It is possible to break the system (for tenants and other +services) if policies are incorrectly applied to the service. + +Policy overrides are YAML files that contain rules that will add to, or +override, existing policy rules in the service. The `policy.d` directory is +a place to put the YAML override files. This charm owns the +`/etc/keystone/policy.d` directory, and as such, any manual changes to it will +be overwritten on charm upgrades. + +Policy overrides are provided to the charm using a resource file called +`policyd-override`. This is attached to the charm using (for example): + + juju attach-resource policyd-override= + +The `` is the name that this charm is deployed as, with +`` being the resource file containing the policy overrides. + +The format of the resource file is a ZIP file (.zip extension) containing at +least one YAML file with an extension of `.yaml` or `.yml`. Note that any +directories in the ZIP file are ignored; all of the files are flattened into a +single directory. There must not be any duplicated filenames; this will cause +an error and nothing in the resource file will be applied. + +(ed. next part is optional is the charm supports some form of +template/substitution on a read file) + +If a (ed. "one or more of") [`.j2`, `.tmpl`, `.tpl`] file is found in the +resource file then the charm will perform a substitution with charm variables +taken from the config or relations. (ed. edit as appropriate to include the +variable). + +To enable the policy overrides the config option `use-policyd-override` must be +set to `True`. + +When `use-policyd-override` is `True` the status line of the charm will be +prefixed with `PO:` indicating that policies have been overridden. If the +installation of the policy override YAML files failed for any reason then the +status line will be prefixed with `PO (broken):`. The log file for the charm +will indicate the reason. No policy override files are installed if the `PO +(broken):` is shown. The status line indicates that the overrides are broken, +not that the policy for the service has failed - they will be the defaults for +the charm and service. + +If the policy overrides did not install then *either* attach a new, corrected, +resource file *or* disable the policy overrides by setting +`use-policyd-override` to False. + +Policy overrides on one service may affect the functionality of another +service. Therefore, it may be necessary to provide policy overrides for +multiple service charms to achieve a consistent set of policies across the +OpenStack system. The charms for the other services that may need overrides +should be checked to ensure that they support overrides before proceeding. +""" + +POLICYD_VALID_EXTS = ['.yaml', '.yml', '.j2', '.tmpl', '.tpl'] +POLICYD_TEMPLATE_EXTS = ['.j2', '.tmpl', '.tpl'] +POLICYD_RESOURCE_NAME = "policyd-override" +POLICYD_CONFIG_NAME = "use-policyd-override" +POLICYD_SUCCESS_FILENAME = "policyd-override-success" +POLICYD_LOG_LEVEL_DEFAULT = hookenv.INFO +POLICYD_ALWAYS_BLACKLISTED_KEYS = ("admin_required", "cloud_admin") + + +class BadPolicyZipFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +class BadPolicyYamlFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +if six.PY2: + BadZipFile = zipfile.BadZipfile +else: + BadZipFile = zipfile.BadZipFile + + +def is_policyd_override_valid_on_this_release(openstack_release): + """Check that the charm is running on at least Ubuntu Xenial, and at + least the queens release. + + :param openstack_release: the release codename that is installed. + :type openstack_release: str + :returns: True if okay + :rtype: bool + """ + # NOTE(ajkavanagh) circular import! This is because the status message + # generation code in utils has to call into this module, but this function + # needs the CompareOpenStackReleases() function. The only way to solve + # this is either to put ALL of this module into utils, or refactor one or + # other of the CompareOpenStackReleases or status message generation code + # into a 3rd module. + import charmhelpers.contrib.openstack.utils as ch_utils + return ch_utils.CompareOpenStackReleases(openstack_release) >= 'queens' + + +def maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None): + """If the config option is set, get the resource file and process it to + enable the policy.d overrides for the service passed. + + The param `openstack_release` is required as the policyd overrides feature + is only supported on openstack_release "queens" or later, and on ubuntu + "xenial" or later. Prior to these versions, this feature is a NOP. + + The optional template_function is a function that accepts a string and has + an opportunity to modify the loaded file prior to it being read by + yaml.safe_load(). This allows the charm to perform "templating" using + charm derived data. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + The param restart_handler is an optional Callable that is called to perform + the service restart if the policy.d file is changed. This should normally + be None as oslo.policy automatically picks up changes in the policy.d + directory. However, for any services where this is buggy then a + restart_handler can be used to force the policy.d files to be read. + + :param openstack_release: The openstack release that is installed. + :type openstack_release: str + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the string + prior to being processed as a Yaml document. + :type template_function: Union[None, Callable[[str], str]] + :param restart_handler: The function to call if the service should be + restarted. + :type restart_handler: Union[None, Callable[]] + """ + config = hookenv.config() + try: + if not config.get(POLICYD_CONFIG_NAME, False): + remove_policy_success_file() + clean_policyd_dir_for(service, blacklist_paths) + return + except Exception: + return + if not is_policyd_override_valid_on_this_release(openstack_release): + return + # from now on it should succeed; if it doesn't then status line will show + # broken. + resource_filename = get_policy_resource_filename() + restart = process_policy_resource_file( + resource_filename, service, blacklist_paths, blacklist_keys, + template_function) + if restart and restart_handler is not None and callable(restart_handler): + restart_handler() + + +def maybe_do_policyd_overrides_on_config_changed(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None): + """This function is designed to be called from the config changed hook + handler. It will only perform the policyd overrides if the config is True + and the success file doesn't exist. Otherwise, it does nothing as the + resource file has already been processed. + + See maybe_do_policyd_overrides() for more details on the params. + + :param openstack_release: The openstack release that is installed. + :type openstack_release: str + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the string + prior to being processed as a Yaml document. + :type template_function: Union[None, Callable[[str], str]] + :param restart_handler: The function to call if the service should be + restarted. + :type restart_handler: Union[None, Callable[]] + """ + config = hookenv.config() + try: + if not config.get(POLICYD_CONFIG_NAME, False): + remove_policy_success_file() + clean_policyd_dir_for(service, blacklist_paths) + return + except Exception: + return + # if the policyd overrides have been performed just return + if os.path.isfile(_policy_success_file()): + return + maybe_do_policyd_overrides( + openstack_release, service, blacklist_paths, blacklist_keys, + template_function, restart_handler) + + +def get_policy_resource_filename(): + """Function to extract the policy resource filename + + :returns: The filename of the resource, if set, otherwise, if an error + occurs, then None is returned. + :rtype: Union[str, None] + """ + try: + return hookenv.resource_get(POLICYD_RESOURCE_NAME) + except Exception: + return None + + +@contextlib.contextmanager +def open_and_filter_yaml_files(filepath): + """Validate that the filepath provided is a zip file and contains at least + one (.yaml|.yml) file, and that the files are not duplicated when the zip + file is flattened. Note that the yaml files are not checked. This is the + first stage in validating the policy zipfile; individual yaml files are not + checked for validity or black listed keys. + + An example of use is: + + with open_and_filter_yaml_files(some_path) as zfp, g: + for zipinfo in g: + # do something with zipinfo ... + + :param filepath: a filepath object that can be opened by zipfile + :type filepath: Union[AnyStr, os.PathLike[AntStr]] + :returns: (zfp handle, + a generator of the (name, filename, ZipInfo object) tuples) as a + tuple. + :rtype: ContextManager[(zipfile.ZipFile, + Generator[(name, str, str, zipfile.ZipInfo)])] + :raises: zipfile.BadZipFile + :raises: BadPolicyZipFile if duplicated yaml or missing + :raises: IOError if the filepath is not found + """ + with zipfile.ZipFile(filepath, 'r') as zfp: + # first pass through; check for duplicates and at least one yaml file. + names = collections.defaultdict(int) + yamlfiles = _yamlfiles(zfp) + for name, _, _, _ in yamlfiles: + names[name] += 1 + # There must be at least 1 yaml file. + if len(names.keys()) == 0: + raise BadPolicyZipFile("contains no yaml files with {} extensions." + .format(", ".join(POLICYD_VALID_EXTS))) + # There must be no duplicates + duplicates = [n for n, c in names.items() if c > 1] + if duplicates: + raise BadPolicyZipFile("{} have duplicates in the zip file." + .format(", ".join(duplicates))) + # Finally, let's yield the generator + yield (zfp, yamlfiles) + + +def _yamlfiles(zipfile): + """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) + and the infolist item from a zipfile. + + :param zipfile: the zipfile to read zipinfo items from + :type zipfile: zipfile.ZipFile + :returns: generator of (name, ext, filename, info item) for each self-identified + yaml file. + :rtype: List[(str, str, str, zipfile.ZipInfo)] + """ + l = [] + for infolist_item in zipfile.infolist(): + if infolist_item.is_dir(): + continue + _, name_ext = os.path.split(infolist_item.filename) + name, ext = os.path.splitext(name_ext) + ext = ext.lower() + if ext and ext in POLICYD_VALID_EXTS: + l.append((name, ext, name_ext, infolist_item)) + return l + + +def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): + """Read, validate and return the (first) yaml document from the stream. + + The doc is read, and checked for a yaml file. The the top-level keys are + checked against the blacklist_keys provided. If there are problems then an + Exception is raised. Otherwise the yaml document is returned as a Python + object that can be dumped back as a yaml file on the system. + + The yaml file must only consist of a str:str mapping, and if not then the + yaml file is rejected. + + :param stream_or_doc: the file object to read the yaml from + :type stream_or_doc: Union[AnyStr, IO[AnyStr]] + :param blacklist_keys: Any keys, which if in the yaml file, should cause + and error. + :type blacklisted_keys: Union[None, List[str]] + :returns: the yaml file as a python document + :rtype: Dict[str, str] + :raises: yaml.YAMLError if there is a problem with the document + :raises: BadPolicyYamlFile if file doesn't look right or there are + blacklisted keys in the file. + """ + blacklist_keys = blacklist_keys or [] + blacklist_keys.append(POLICYD_ALWAYS_BLACKLISTED_KEYS) + doc = yaml.safe_load(stream_or_doc) + if not isinstance(doc, dict): + raise BadPolicyYamlFile("doesn't look like a policy file?") + keys = set(doc.keys()) + blacklisted_keys_present = keys.intersection(blacklist_keys) + if blacklisted_keys_present: + raise BadPolicyYamlFile("blacklisted keys {} present." + .format(", ".join(blacklisted_keys_present))) + if not all(isinstance(k, six.string_types) for k in keys): + raise BadPolicyYamlFile("keys in yaml aren't all strings?") + # check that the dictionary looks like a mapping of str to str + if not all(isinstance(v, six.string_types) for v in doc.values()): + raise BadPolicyYamlFile("values in yaml aren't all strings?") + return doc + + +def policyd_dir_for(service): + """Return the policy directory for the named service. + + This assumes the default name of "policy.d" which is kept across all + charms. + + :param service: str + :returns: the policy.d override directory. + :rtype: os.PathLike[str] + """ + return os.path.join("/", "etc", service, "policy.d") + + +def clean_policyd_dir_for(service, keep_paths=None): + """Clean out the policyd directory except for items that should be kept. + + The keep_paths, if used, should be set to the full path of the files that + should be kept in the policyd directory for the service. Note that the + service name is passed in, and then the policyd_dir_for() function is used. + This is so that a coding error doesn't result in a sudden deletion of the + charm (say). + + :param service: the service name to use to construct the policy.d dir. + :type service: str + :param keep_paths: optional list of paths to not delete. + :type keep_paths: Union[None, List[str]] + """ + keep_paths = keep_paths or [] + path = policyd_dir_for(service) + if not os.path.exists(path): + ch_host.mkdir(path, owner=service, group=service, perms=0o775) + _scanner = os.scandir if six.PY3 else _py2_scandir + for direntry in _scanner(path): + # see if the path should be kept. + if direntry.path in keep_paths: + continue + # we remove any directories; it's ours and there shouldn't be any + if direntry.is_dir(): + shutil.rmtree(direntry.path) + else: + os.remove(direntry.path) + + +@contextlib.contextmanager +def _py2_scandir(path): + """provide a py2 implementation of os.scandir if this module ever gets used + in a py2 charm (unlikely). uses os.listdir() to get the names in the path, + and then mocks the is_dir() function using os.path.isdir() to check for a + directory. + + :param path: the path to list the directories for + :type path: str + :returns: Generator that provides _P27Direntry objects + :rtype: ContextManager[_P27Direntry] + """ + for f in os.listdir(path): + yield _P27Direntry(f) + + +class _P27Direntry(object): + """Mock a scandir Direntry object with enough to use in + clean_policyd_dir_for + """ + + def __init__(self, path): + self.path = path + + def is_dir(self): + return os.path.isdir(self.path) + + +def path_for_policy_file(service, name): + """Return the full path for a policy.d file that will be written to the + service's policy.d directory. + + It is constructed using policyd_dir_for(), the name and the ".yaml" + extension. + + :param service: the service name + :type service: str + :param name: the name for the policy override + :type name: str + :returns: the full path name for the file + :rtype: os.PathLike[str] + """ + return os.path.join(policyd_dir_for(service), name + ".yaml") + + +def _policy_success_file(): + """Return the file name for a successful drop of policy.d overrides + + :returns: the path name for the file. + :rtype: str + """ + return os.path.join(hookenv.charm_dir(), POLICYD_SUCCESS_FILENAME) + + +def remove_policy_success_file(): + """Remove the file that indicates successful policyd override.""" + try: + os.remove(_policy_success_file()) + except Exception: + pass + + +def policyd_status_message_prefix(): + """Return the prefix str for the status line. + + "PO:" indicating that the policy overrides are in place, or "PO (broken):" + if the policy is supposed to be working but there is no success file. + + :returns: the prefix + :rtype: str + """ + if os.path.isfile(_policy_success_file()): + return "PO:" + return "PO (broken):" + + +def process_policy_resource_file(resource_file, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None): + """Process the resource file (which should contain at least one yaml file) + and write those files to the service's policy.d directory. + + The optional template_function is a function that accepts a python + string and has an opportunity to modify the document + prior to it being read by the yaml.safe_load() function and written to + disk. Note that this function does *not* say how the templating is done - + this is up to the charm to implement its chosen method. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + If any error occurs, then the policy.d directory is cleared, the error is + written to the log, and the status line will eventually show as failed. + + :param resource_file: The zipped file to open and extract yaml files form. + :type resource_file: Union[AnyStr, os.PathLike[AnyStr]] + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the yaml + document. + :type template_function: Union[None, Callable[[AnyStr], AnyStr]] + :returns: True if the processing was successful, False if not. + :rtype: boolean + """ + blacklist_paths = blacklist_paths or [] + completed = False + try: + with open_and_filter_yaml_files(resource_file) as (zfp, gen): + # first clear out the policy.d directory and clear success + remove_policy_success_file() + clean_policyd_dir_for(service, blacklist_paths) + for name, ext, filename, zipinfo in gen: + # construct a name for the output file. + yaml_filename = path_for_policy_file(service, name) + if yaml_filename in blacklist_paths: + raise BadPolicyZipFile("policy.d name {} is blacklisted" + .format(yaml_filename)) + with zfp.open(zipinfo) as fp: + doc = fp.read() + # if template_function is not None, then offer the document + # to the template function + if ext in POLICYD_TEMPLATE_EXTS: + if (template_function is None or not + callable(template_function)): + raise BadPolicyZipFile( + "Template {} but no template_function is " + "available".format(filename)) + doc = template_function(doc) + yaml_doc = read_and_validate_yaml(doc, blacklist_keys) + with open(yaml_filename, "wt") as f: + yaml.dump(yaml_doc, f) + # Every thing worked, so we mark up a success. + completed = True + except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: + hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except IOError as e: + # technically this shouldn't happen; it would be a programming error as + # the filename comes from Juju and thus, should exist. + hookenv.log( + "File {} failed with IOError. This really shouldn't happen" + " -- error: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except Exception as e: + import traceback + hookenv.log("General Exception({}) during policyd processing" + .format(str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + hookenv.log(traceback.format_exc()) + finally: + if not completed: + hookenv.log("Processing {} failed: cleaning policy.d directory" + .format(resource_file), + level=POLICYD_LOG_LEVEL_DEFAULT) + clean_policyd_dir_for(service, blacklist_paths) + else: + # touch the success filename + hookenv.log("policy.d overrides installed.", + level=POLICYD_LOG_LEVEL_DEFAULT) + open(_policy_success_file(), "w").close() + return completed diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata new file mode 100644 index 00000000..365f4375 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata @@ -0,0 +1,10 @@ +[oslo_messaging_rabbit] +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +{% endif -%} +{% if rabbit_ssl_port -%} +rabbit_use_ssl = True +{% endif -%} +{% if rabbit_ssl_ca -%} +ssl_ca_file = {{ rabbit_ssl_ca }} +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index d43a4d20..ac96f844 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -84,7 +84,8 @@ SourceConfigError, GPGKeyError, get_upstream_version, - filter_missing_packages + filter_missing_packages, + ubuntu_apt_pkg as apt, ) from charmhelpers.fetch.snap import ( @@ -96,6 +97,10 @@ from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.openstack.exceptions import OSContextError +from charmhelpers.contrib.openstack.policyd import ( + policyd_status_message_prefix, + POLICYD_CONFIG_NAME, +) CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' @@ -443,8 +448,6 @@ def get_os_codename_package(package, fatal=True): # Second item in list is Version return line.split()[1] - import apt_pkg as apt - cache = apt_cache() try: @@ -658,7 +661,6 @@ def openstack_upgrade_available(package): a newer version of package. """ - import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) if not cur_vers: @@ -864,6 +866,12 @@ def _determine_os_workload_status( message = "Unit is ready" juju_log(message, 'INFO') + try: + if config(POLICYD_CONFIG_NAME): + message = "{} {}".format(policyd_status_message_prefix(), message) + except Exception: + pass + return state, message diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index a9864467..e13dfa8b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -301,6 +301,7 @@ def __init__(self, service, name, pg_num=None, replicas=2, percent_data=10.0, app_name=None): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas + self.percent_data = percent_data if pg_num: # Since the number of placement groups were specified, ensure # that there aren't too many created. @@ -324,12 +325,24 @@ def create(self): update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) try: set_app_name_for_pool(client=self.service, pool=self.name, name=self.app_name) except CalledProcessError: log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e, level=WARNING)) except CalledProcessError: raise @@ -382,6 +395,18 @@ def create(self): name=self.app_name) except CalledProcessError: log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e, level=WARNING)) except CalledProcessError: raise @@ -389,6 +414,32 @@ def create(self): Returns json formatted output""" +def enabled_manager_modules(): + """Return a list of enabled manager modules. + + :rtype: List[str] + """ + cmd = ['ceph', 'mgr', 'module', 'ls'] + try: + modules = check_output(cmd) + except CalledProcessError as e: + log("Failed to list ceph modules: {}".format(e), WARNING) + return [] + modules = json.loads(modules) + return modules['enabled_modules'] + + +def enable_pg_autoscale(service, pool_name): + """ + Enable Ceph's PG autoscaler for the specified pool. + + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types. The name of the pool to enable sutoscaling on + :raise: CalledProcessError if the command fails + """ + check_call(['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) + + def get_mon_map(service): """ Returns the current monitor map. diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 32754ff9..b33ac906 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -1075,3 +1075,30 @@ def install_ca_cert(ca_cert, name=None): log("Installing new CA cert at: {}".format(cert_file), level=INFO) write_file(cert_file, ca_cert) subprocess.check_call(['update-ca-certificates', '--fresh']) + + +def get_system_env(key, default=None): + """Get data from system environment as represented in ``/etc/environment``. + + :param key: Key to look up + :type key: str + :param default: Value to return if key is not found + :type default: any + :returns: Value for key if found or contents of default parameter + :rtype: any + :raises: subprocess.CalledProcessError + """ + env_file = '/etc/environment' + # use the shell and env(1) to parse the global environments file. This is + # done to get the correct result even if the user has shell variable + # substitutions or other shell logic in that file. + output = subprocess.check_output( + ['env', '-i', '/bin/bash', '-c', + 'set -a && source {} && env'.format(env_file)], + universal_newlines=True) + for k, v in (line.split('=', 1) + for line in output.splitlines() if '=' in line): + if k == key: + return v + else: + return default diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index 0ee2b660..1b57e2ce 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -24,6 +24,7 @@ 'bionic', 'cosmic', 'disco', + 'eoan', ) @@ -93,7 +94,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None): the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. """ - import apt_pkg + from charmhelpers.fetch import apt_pkg if not pkgcache: from charmhelpers.fetch import apt_cache pkgcache = apt_cache() diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 8572d34f..0cc7fc85 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -103,6 +103,8 @@ def base_url(self, url): apt_unhold = fetch.apt_unhold import_key = fetch.import_key get_upstream_version = fetch.get_upstream_version + apt_pkg = fetch.ubuntu_apt_pkg + get_apt_dpkg_env = fetch.get_apt_dpkg_env elif __platform__ == "centos": yum_search = fetch.yum_search diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 24c76e34..31225235 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -13,14 +13,14 @@ # limitations under the License. from collections import OrderedDict -import os import platform import re import six -import time import subprocess +import sys +import time -from charmhelpers.core.host import get_distrib_codename +from charmhelpers.core.host import get_distrib_codename, get_system_env from charmhelpers.core.hookenv import ( log, @@ -29,6 +29,7 @@ env_proxy_settings, ) from charmhelpers.fetch import SourceConfigError, GPGKeyError +from charmhelpers.fetch import ubuntu_apt_pkg PROPOSED_POCKET = ( "# Proposed\n" @@ -216,18 +217,42 @@ def filter_missing_packages(packages): ) -def apt_cache(in_memory=True, progress=None): - """Build and return an apt cache.""" - from apt import apt_pkg - apt_pkg.init() - if in_memory: - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache(progress) +def apt_cache(*_, **__): + """Shim returning an object simulating the apt_pkg Cache. + + :param _: Accept arguments for compability, not used. + :type _: any + :param __: Accept keyword arguments for compability, not used. + :type __: any + :returns:Object used to interrogate the system apt and dpkg databases. + :rtype:ubuntu_apt_pkg.Cache + """ + if 'apt_pkg' in sys.modules: + # NOTE(fnordahl): When our consumer use the upstream ``apt_pkg`` module + # in conjunction with the apt_cache helper function, they may expect us + # to call ``apt_pkg.init()`` for them. + # + # Detect this situation, log a warning and make the call to + # ``apt_pkg.init()`` to avoid the consumer Python interpreter from + # crashing with a segmentation fault. + log('Support for use of upstream ``apt_pkg`` module in conjunction' + 'with charm-helpers is deprecated since 2019-06-25', level=WARNING) + sys.modules['apt_pkg'].init() + return ubuntu_apt_pkg.Cache() def apt_install(packages, options=None, fatal=False): - """Install one or more packages.""" + """Install one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -244,7 +269,17 @@ def apt_install(packages, options=None, fatal=False): def apt_upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages.""" + """Upgrade all packages. + + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :param dist: Whether ``dist-upgrade`` should be used over ``upgrade`` + :type dist: bool + :raises: subprocess.CalledProcessError + """ if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -265,7 +300,15 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): - """Purge one or more packages.""" + """Purge one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ cmd = ['apt-get', '--assume-yes', 'purge'] if isinstance(packages, six.string_types): cmd.append(packages) @@ -276,7 +319,14 @@ def apt_purge(packages, fatal=False): def apt_autoremove(purge=True, fatal=False): - """Purge one or more packages.""" + """Purge one or more packages. + :param purge: Whether the ``--purge`` option should be passed on or not. + :type purge: bool + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ cmd = ['apt-get', '--assume-yes', 'autoremove'] if purge: cmd.append('--purge') @@ -660,21 +710,22 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_message="", cmd_env=None): """Run a command and retry until success or max_retries is reached. - :param: cmd: str: The apt command to run. - :param: max_retries: int: The number of retries to attempt on a fatal - command. Defaults to CMD_RETRY_COUNT. - :param: retry_exitcodes: tuple: Optional additional exit codes to retry. - Defaults to retry on exit code 1. - :param: retry_message: str: Optional log prefix emitted during retries. - :param: cmd_env: dict: Environment variables to add to the command run. + :param cmd: The apt command to run. + :type cmd: str + :param max_retries: The number of retries to attempt on a fatal + command. Defaults to CMD_RETRY_COUNT. + :type max_retries: int + :param retry_exitcodes: Optional additional exit codes to retry. + Defaults to retry on exit code 1. + :type retry_exitcodes: tuple + :param retry_message: Optional log prefix emitted during retries. + :type retry_message: str + :param: cmd_env: Environment variables to add to the command run. + :type cmd_env: Option[None, Dict[str, str]] """ - - env = None - kwargs = {} + env = get_apt_dpkg_env() if cmd_env: - env = os.environ.copy() env.update(cmd_env) - kwargs['env'] = env if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -686,8 +737,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_results = (None,) + retry_exitcodes while result in retry_results: try: - # result = subprocess.check_call(cmd, env=env) - result = subprocess.check_call(cmd, **kwargs) + result = subprocess.check_call(cmd, env=env) except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > max_retries: @@ -700,22 +750,18 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), def _run_apt_command(cmd, fatal=False): """Run an apt command with optional retries. - :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. + :param cmd: The apt command to run. + :type cmd: str + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool """ - # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. - cmd_env = { - 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} - if fatal: _run_with_retries( - cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), + cmd, retry_exitcodes=(1, APT_NO_LOCK,), retry_message="Couldn't acquire DPKG lock") else: - env = os.environ.copy() - env.update(cmd_env) - subprocess.call(cmd, env=env) + subprocess.call(cmd, env=get_apt_dpkg_env()) def get_upstream_version(package): @@ -723,7 +769,6 @@ def get_upstream_version(package): @returns None (if not installed) or the upstream version """ - import apt_pkg cache = apt_cache() try: pkg = cache[package] @@ -735,4 +780,18 @@ def get_upstream_version(package): # package is known, but no version is currently installed. return None - return apt_pkg.upstream_version(pkg.current_ver.ver_str) + return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str) + + +def get_apt_dpkg_env(): + """Get environment suitable for execution of APT and DPKG tools. + + We keep this in a helper function instead of in a global constant to + avoid execution on import of the library. + :returns: Environment suitable for execution of APT and DPKG tools. + :rtype: Dict[str, str] + """ + # The fallback is used in the event of ``/etc/environment`` not containing + # avalid PATH variable. + return {'DEBIAN_FRONTEND': 'noninteractive', + 'PATH': get_system_env('PATH', '/usr/sbin:/usr/bin:/sbin:/bin')} diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py new file mode 100644 index 00000000..104f91f1 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -0,0 +1,237 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provide a subset of the ``python-apt`` module API. + +Data collection is done through subprocess calls to ``apt-cache`` and +``dpkg-query`` commands. + +The main purpose for this module is to avoid dependency on the +``python-apt`` python module. + +The indicated python module is a wrapper around the ``apt`` C++ library +which is tightly connected to the version of the distribution it was +shipped on. It is not developed in a backward/forward compatible manner. + +This in turn makes it incredibly hard to distribute as a wheel for a piece +of python software that supports a span of distro releases [0][1]. + +Upstream feedback like [2] does not give confidence in this ever changing, +so with this we get rid of the dependency. + +0: https://github.com/juju-solutions/layer-basic/pull/135 +1: https://bugs.launchpad.net/charm-octavia/+bug/1824112 +2: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=845330#10 +""" + +import locale +import os +import subprocess + + +class _container(dict): + """Simple container for attributes.""" + __getattr__ = dict.__getitem__ + __setattr__ = dict.__setitem__ + + +class Package(_container): + """Simple container for package attributes.""" + + +class Version(_container): + """Simple container for version attributes.""" + + +class Cache(object): + """Simulation of ``apt_pkg`` Cache object.""" + def __init__(self, progress=None): + pass + + def __getitem__(self, package): + """Get information about a package from apt and dpkg databases. + + :param package: Name of package + :type package: str + :returns: Package object + :rtype: object + :raises: KeyError, subprocess.CalledProcessError + """ + apt_result = self._apt_cache_show([package])[package] + apt_result['name'] = apt_result.pop('package') + pkg = Package(apt_result) + dpkg_result = self._dpkg_list([package]).get(package, {}) + current_ver = None + installed_version = dpkg_result.get('version') + if installed_version: + current_ver = Version({'ver_str': installed_version}) + pkg.current_ver = current_ver + pkg.architecture = dpkg_result.get('architecture') + return pkg + + def _dpkg_list(self, packages): + """Get data from system dpkg database for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about installed packages, keys like + ``dpkg-query --list`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['dpkg-query', '--list'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + except subprocess.CalledProcessError as cp: + # ``dpkg-query`` may return error and at the same time have + # produced useful output, for example when asked for multiple + # packages where some are not installed + if cp.returncode != 1: + raise + output = cp.output + headings = [] + for line in output.splitlines(): + if line.startswith('||/'): + headings = line.split() + headings.pop(0) + continue + elif (line.startswith('|') or line.startswith('+') or + line.startswith('dpkg-query:')): + continue + else: + data = line.split(None, 4) + status = data.pop(0) + if status != 'ii': + continue + pkg = {} + pkg.update({k.lower(): v for k, v in zip(headings, data)}) + if 'name' in pkg: + pkgs.update({pkg['name']: pkg}) + return pkgs + + def _apt_cache_show(self, packages): + """Get data from system apt cache for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about package, keys like + ``apt-cache show`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['apt-cache', 'show', '--no-all-versions'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + previous = None + pkg = {} + for line in output.splitlines(): + if not line: + if 'package' in pkg: + pkgs.update({pkg['package']: pkg}) + pkg = {} + continue + if line.startswith(' '): + if previous and previous in pkg: + pkg[previous] += os.linesep + line.lstrip() + continue + if ':' in line: + kv = line.split(':', 1) + key = kv[0].lower() + if key == 'n': + continue + previous = key + pkg.update({key: kv[1].lstrip()}) + except subprocess.CalledProcessError as cp: + # ``apt-cache`` returns 100 if none of the packages asked for + # exist in the apt cache. + if cp.returncode != 100: + raise + return pkgs + + +def init(): + """Compability shim that does nothing.""" + pass + + +def upstream_version(version): + """Extracts upstream version from a version string. + + Upstream reference: https://salsa.debian.org/apt-team/apt/blob/master/ + apt-pkg/deb/debversion.cc#L259 + + :param version: Version string + :type version: str + :returns: Upstream version + :rtype: str + """ + if version: + version = version.split(':')[-1] + version = version.split('-')[0] + return version + + +def version_compare(a, b): + """Compare the given versions. + + Call out to ``dpkg`` to make sure the code doing the comparison is + compatible with what the ``apt`` library would do. Mimic the return + values. + + Upstream reference: + https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html + ?highlight=version_compare#apt_pkg.version_compare + + :param a: version string + :type a: str + :param b: version string + :type b: str + :returns: >0 if ``a`` is greater than ``b``, 0 if a equals b, + <0 if ``a`` is smaller than ``b`` + :rtype: int + :raises: subprocess.CalledProcessError, RuntimeError + """ + for op in ('gt', 1), ('eq', 0), ('lt', -1): + try: + subprocess.check_call(['dpkg', '--compare-versions', + a, op[0], b], + stderr=subprocess.STDOUT, + universal_newlines=True) + return op[1] + except subprocess.CalledProcessError as cp: + if cp.returncode == 1: + continue + raise + else: + raise RuntimeError('Unable to compare "{}" and "{}", according to ' + 'our logic they are neither greater, equal nor ' + 'less than each other.') diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index df474c2e..0db98a11 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -318,7 +318,9 @@ def _pause_resume_helper(f, configs): def get_pkg_version(name): from apt import apt_pkg pkg = apt_cache()[name] - version = apt_pkg.upstream_version(pkg.current_ver.ver_str) + version = None + if pkg.current_ver: + version = apt_pkg.upstream_version(pkg.current_ver.ver_str) return version From 8046865c8b545ad47b84ac4667df1eaf084956c7 Mon Sep 17 00:00:00 2001 From: tpsilva Date: Fri, 12 Jul 2019 16:03:38 -0300 Subject: [PATCH 1825/2699] Add support for 'add-permissions-to-key' requests Currently add-permissions-to-key requests are not being handled by this charm, so if cinder-ceph or glance is deployed with restrict-ceph-pools=True, it will not be configured properly. This patch ports the implementation of the add-permissions-to-key handling from the ceph-mon charm, and updates other methods required to properly set up cinder-ceph and glance ceph pools. Change-Id: If5950af155e10ee4d140300fd7ed5730798b65f0 Closes-bug: #1836408 --- ceph-proxy/hooks/ceph.py | 68 ++++-- ceph-proxy/hooks/ceph_broker.py | 268 ++++++++++++++++++++-- ceph-proxy/hooks/ceph_hooks.py | 15 +- ceph-proxy/unit_tests/test_ceph.py | 42 ++-- ceph-proxy/unit_tests/test_ceph_broker.py | 1 - 5 files changed, 340 insertions(+), 54 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index b4878d7a..a85a84ae 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -28,6 +28,7 @@ ) from charmhelpers.core.hookenv import ( log, + DEBUG, ERROR, cached, status_set, @@ -386,11 +387,41 @@ def _config_user_key(name): return k -def get_named_key(name, caps=None): - config_user_key = _config_user_key(name) - if config_user_key: - return config_user_key +def get_named_key(name, caps=None, pool_list=None): + """Retrieve a specific named cephx key. + :param name: String Name of key to get. + :param pool_list: The list of pools to give access to + :param caps: dict of cephx capabilities + :returns: Returns a cephx key + """ + key_name = 'client.{}'.format(name) + try: + # Does the key already exist? + output = str(subprocess.check_output( + [ + 'sudo', + '-u', ceph_user(), + 'ceph', + '--name', config('admin-user'), + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + get_unit_hostname() + ), + 'auth', + 'get', + key_name, + ]).decode('UTF-8')).strip() + # NOTE(jamespage); + # Apply any changes to key capabilities, dealing with + # upgrades which requires new caps for operation. + upgrade_key_caps(key_name, + caps or _default_caps, + pool_list) + return parse_key(output) + except subprocess.CalledProcessError: + # Couldn't get the key, time to create it! + log("Creating new key for {}".format(name), level=DEBUG) caps = caps or _default_caps cmd = [ "sudo", @@ -402,21 +433,26 @@ def get_named_key(name, caps=None): '/var/lib/ceph/mon/ceph-{}/keyring'.format( get_unit_hostname() ), - 'auth', 'get-or-create', 'client.{}'.format(name), + 'auth', 'get-or-create', key_name, ] # Add capabilities for subsystem, subcaps in caps.items(): - cmd.extend([ - subsystem, - '; '.join(subcaps), - ]) - return parse_key(subprocess - .check_output(cmd) - .decode('utf-8') + if subsystem == 'osd': + if pool_list: + # This will output a string similar to: + # "pool=rgw pool=rbd pool=something" + pools = " ".join(['pool={0}'.format(i) for i in pool_list]) + subcaps[0] = subcaps[0] + " " + pools + cmd.extend([subsystem, '; '.join(subcaps)]) + + log("Calling check_output: {}".format(cmd), level=DEBUG) + return parse_key(str(subprocess + .check_output(cmd) + .decode('UTF-8')) .strip()) # IGNORE:E1103 -def upgrade_key_caps(key, caps): +def upgrade_key_caps(key, caps, pool_list=None): """ Upgrade key to have capabilities caps """ if not is_leader(): # Not the MON leader OR not clustered @@ -425,6 +461,12 @@ def upgrade_key_caps(key, caps): "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key ] for subsystem, subcaps in caps.items(): + if subsystem == 'osd': + if pool_list: + # This will output a string similar to: + # "pool=rgw pool=rbd pool=something" + pools = " ".join(['pool={0}'.format(i) for i in pool_list]) + subcaps[0] = subcaps[0] + " " + pools cmd.extend([subsystem, '; '.join(subcaps)]) subprocess.check_call(cmd) diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/hooks/ceph_broker.py index 000be489..ec55a67d 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/hooks/ceph_broker.py @@ -2,8 +2,11 @@ # # Copyright 2015 Canonical Ltd. # +import collections import json import six +from subprocess import check_call, CalledProcessError + from charmhelpers.core.hookenv import ( log, @@ -16,6 +19,8 @@ delete_pool, erasure_profile_exists, get_osds, + monitor_key_get, + monitor_key_set, pool_exists, pool_set, remove_pool_snapshot, @@ -139,19 +144,37 @@ def handle_create_erasure_profile(request, service): def handle_erasure_pool(request, service): + """Create a new erasure coded pool. + + :param request: dict of request operations and params. + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0. + """ pool_name = request.get('name') erasure_profile = request.get('erasure-profile') - quota = request.get('max-bytes') + max_bytes = request.get('max-bytes') + max_objects = request.get('max-objects') + weight = request.get('weight') + group_name = request.get('group') if erasure_profile is None: erasure_profile = "default-canonical" + app_name = request.get('app-name') + # Check for missing params if pool_name is None: msg = "Missing parameter. name is required for the pool" log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} + if group_name: + group_namespace = request.get('group-namespace') + # Add the pool to the group named "group_name" + add_pool_to_group(pool=pool_name, + group=group_name, + namespace=group_namespace) + # TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds if not erasure_profile_exists(service=service, name=erasure_profile): # TODO: Fail and tell them to create the profile or default @@ -161,23 +184,33 @@ def handle_erasure_pool(request, service): return {'exit-code': 1, 'stderr': msg} pool = ErasurePool(service=service, name=pool_name, - erasure_code_profile=erasure_profile) + erasure_code_profile=erasure_profile, + percent_data=weight, app_name=app_name) # Ok make the erasure pool if not pool_exists(service=service, name=pool_name): - log("Creating pool '%s' (erasure_profile=%s)" % (pool.name, - erasure_profile), - level=INFO) + log("Creating pool '{}' (erasure_profile={})" + .format(pool.name, erasure_profile), level=INFO) pool.create() # Set a quota if requested - if quota is not None: - set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + if max_bytes or max_objects: + set_pool_quota(service=service, pool_name=pool_name, + max_bytes=max_bytes, max_objects=max_objects) def handle_replicated_pool(request, service): + """Create a new replicated pool. + + :param request: dict of request operations and params. + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0. + """ pool_name = request.get('name') replicas = request.get('replicas') - quota = request.get('max-bytes') + max_bytes = request.get('max-bytes') + max_objects = request.get('max-objects') + weight = request.get('weight') + group_name = request.get('group') # Optional params pg_num = request.get('pg_num') @@ -187,27 +220,44 @@ def handle_replicated_pool(request, service): if osds: pg_num = min(pg_num, (len(osds) * 100 // replicas)) + app_name = request.get('app-name') # Check for missing params if pool_name is None or replicas is None: msg = "Missing parameter. name and replicas are required" log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} + if group_name: + group_namespace = request.get('group-namespace') + # Add the pool to the group named "group_name" + add_pool_to_group(pool=pool_name, + group=group_name, + namespace=group_namespace) + + kwargs = {} + if pg_num: + kwargs['pg_num'] = pg_num + if weight: + kwargs['percent_data'] = weight + if replicas: + kwargs['replicas'] = replicas + if app_name: + kwargs['app_name'] = app_name + pool = ReplicatedPool(service=service, - name=pool_name, - replicas=replicas, - pg_num=pg_num) + name=pool_name, **kwargs) if not pool_exists(service=service, name=pool_name): - log("Creating pool '%s' (replicas=%s)" % (pool.name, replicas), + log("Creating pool '{}' (replicas={})".format(pool.name, replicas), level=INFO) pool.create() else: - log("Pool '%s' already exists - skipping create" % pool.name, + log("Pool '{}' already exists - skipping create".format(pool.name), level=DEBUG) # Set a quota if requested - if quota is not None: - set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + if max_bytes or max_objects: + set_pool_quota(service=service, pool_name=pool_name, + max_bytes=max_bytes, max_objects=max_objects) def handle_create_cache_tier(request, service): @@ -270,6 +320,192 @@ def handle_set_pool_value(request, service): value=params['value']) +def handle_add_permissions_to_key(request, service): + """Groups are defined by the key cephx.groups.(namespace-)?-(name). This + key will contain a dict serialized to JSON with data about the group, + including pools and members. + + A group can optionally have a namespace defined that will be used to + further restrict pool access. + """ + resp = {'exit-code': 0} + + service_name = request.get('name') + group_name = request.get('group') + group_namespace = request.get('group-namespace') + if group_namespace: + group_name = "{}-{}".format(group_namespace, group_name) + group = get_group(group_name=group_name) + service_obj = get_service_groups(service=service_name, + namespace=group_namespace) + if request.get('object-prefix-permissions'): + service_obj['object_prefix_perms'] = request.get( + 'object-prefix-permissions') + format("Service object: {}".format(service_obj)) + permission = request.get('group-permission') or "rwx" + if service_name not in group['services']: + group['services'].append(service_name) + save_group(group=group, group_name=group_name) + if permission not in service_obj['group_names']: + service_obj['group_names'][permission] = [] + if group_name not in service_obj['group_names'][permission]: + service_obj['group_names'][permission].append(group_name) + save_service(service=service_obj, service_name=service_name) + service_obj['groups'] = _build_service_groups(service_obj, + group_namespace) + update_service_permissions(service_name, service_obj, group_namespace) + + return resp + + +def add_pool_to_group(pool, group, namespace=None): + """Add a named pool to a named group""" + group_name = group + if namespace: + group_name = "{}-{}".format(namespace, group_name) + group = get_group(group_name=group_name) + if pool not in group['pools']: + group["pools"].append(pool) + save_group(group, group_name=group_name) + for service in group['services']: + update_service_permissions(service, namespace=namespace) + + +def pool_permission_list_for_service(service): + """Build the permission string for Ceph for a given service""" + permissions = [] + permission_types = collections.OrderedDict() + for permission, group in sorted(service["group_names"].items()): + if permission not in permission_types: + permission_types[permission] = [] + for item in group: + permission_types[permission].append(item) + for permission, groups in permission_types.items(): + permission = "allow {}".format(permission) + for group in groups: + for pool in service['groups'][group].get('pools', []): + permissions.append("{} pool={}".format(permission, pool)) + for permission, prefixes in sorted( + service.get("object_prefix_perms", {}).items()): + for prefix in prefixes: + permissions.append("allow {} object_prefix {}".format(permission, + prefix)) + return ['mon', 'allow r, allow command "osd blacklist"', + 'osd', ', '.join(permissions)] + + +def update_service_permissions(service, service_obj=None, namespace=None): + """Update the key permissions for the named client in Ceph""" + if not service_obj: + service_obj = get_service_groups(service=service, namespace=namespace) + permissions = pool_permission_list_for_service(service_obj) + call = ['ceph', 'auth', 'caps', 'client.{}'.format(service)] + permissions + try: + check_call(call) + except CalledProcessError as e: + log("Error updating key capabilities: {}".format(e)) + + +def save_service(service_name, service): + """Persist a service in the monitor cluster""" + service['groups'] = {} + return monitor_key_set(service='admin', + key="cephx.services.{}".format(service_name), + value=json.dumps(service, sort_keys=True)) + + +def save_group(group, group_name): + """Persist a group in the monitor cluster""" + group_key = get_group_key(group_name=group_name) + return monitor_key_set(service='admin', + key=group_key, + value=json.dumps(group, sort_keys=True)) + + +def get_group(group_name): + """A group is a structure to hold data about a named group, structured as: + { + pools: ['glance'], + services: ['nova'] + } + """ + group_key = get_group_key(group_name=group_name) + group_json = monitor_key_get(service='admin', key=group_key) + try: + group = json.loads(group_json) + except (TypeError, ValueError): + group = None + if not group: + group = { + 'pools': [], + 'services': [] + } + return group + + +def get_group_key(group_name): + """Build group key""" + return 'cephx.groups.{}'.format(group_name) + + +def get_service_groups(service, namespace=None): + """Services are objects stored with some metadata, they look like (for a + service named "nova"): + { + group_names: {'rwx': ['images']}, + groups: {} + } + After populating the group, it looks like: + { + group_names: {'rwx': ['images']}, + groups: { + 'images': { + pools: ['glance'], + services: ['nova'] + } + } + } + """ + service_json = monitor_key_get(service='admin', + key="cephx.services.{}".format(service)) + try: + service = json.loads(service_json) + except (TypeError, ValueError): + service = None + if service: + service['groups'] = _build_service_groups(service, namespace) + else: + service = {'group_names': {}, 'groups': {}} + return service + + +def _build_service_groups(service, namespace=None): + """Rebuild the 'groups' dict for a service group + + :returns: dict: dictionary keyed by group name of the following + format: + + { + 'images': { + pools: ['glance'], + services: ['nova', 'glance] + }, + 'vms':{ + pools: ['nova'], + services: ['nova'] + } + } + """ + all_groups = {} + for groups in service['group_names'].values(): + for group in groups: + name = group + if namespace: + name = "{}-{}".format(namespace, name) + all_groups[group] = get_group(group_name=name) + return all_groups + + def process_requests_v1(reqs): """Process v1 requests. @@ -322,6 +558,8 @@ def process_requests_v1(reqs): snapshot_name=snapshot_name) elif op == "set-pool-value": ret = handle_set_pool_value(request=req, service=svc) + elif op == "add-permissions-to-key": + ret = handle_add_permissions_to_key(request=req, service=svc) else: msg = "Unknown operation '%s'" % op log(msg, level=ERROR) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 322ccb0a..a637ff69 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -170,7 +170,7 @@ def radosgw_relation(relid=None, unit=None): 'ceph-public-address': ceph_addrs, } - settings = relation_get(rid=relid, unit=unit) + settings = relation_get(rid=relid, unit=unit) or {} """Process broker request(s).""" if 'broker_req' in settings: rsp = process_requests(settings['broker_req']) @@ -185,7 +185,7 @@ def radosgw_relation(relid=None, unit=None): @hooks.hook('client-relation-joined') -def client_relation_joined(relid=None): +def client_relation_joined(relid=None, unit=None): if ready(): service_name = None if relid is None: @@ -202,6 +202,15 @@ def client_relation_joined(relid=None): 'auth': config('auth-supported'), 'ceph-public-address': ceph_addrs} + settings = relation_get(rid=relid, unit=unit) or {} + data_update = {} + if 'broker_req' in settings: + rsp = process_requests(settings['broker_req']) + unit_id = unit.replace('/', '-') + unit_response_key = 'broker-rsp-' + unit_id + data_update[unit_response_key] = rsp + data.update(data_update) + log('relation_set (%s): %s' % (relid, str(data)), level=DEBUG) relation_set(relation_id=relid, relation_settings=data) @@ -213,7 +222,7 @@ def client_relation_joined(relid=None): def client_relation_changed(): """Process broker requests from ceph client relations.""" if ready(): - settings = relation_get() + settings = relation_get() or {} if 'broker_req' in settings: # the request is processed only by the leader as reported by juju if not is_leader(): diff --git a/ceph-proxy/unit_tests/test_ceph.py b/ceph-proxy/unit_tests/test_ceph.py index 4cb55b2d..5b951e91 100644 --- a/ceph-proxy/unit_tests/test_ceph.py +++ b/ceph-proxy/unit_tests/test_ceph.py @@ -1,4 +1,5 @@ import collections +import subprocess import unittest import mock @@ -48,26 +49,34 @@ def test_config_empty_user_key(self, mock_config): named_key = ceph._config_user_key(user_name) self.assertEqual(named_key, None) + @mock.patch('subprocess.check_output') @mock.patch('ceph.config') - def test_get_named_key_populated(self, mock_config): - user_name = 'glance' - user_key = 'AQCnjmtbuEACMxAA7joUmgLIGI4/3LKkPzUy8g==' + def test_get_named_key_new(self, mock_config, mock_check_output): + user_name = 'cinder-ceph' + expected_key = 'AQCnjmtbuEACMxAA7joUmgLIGI4/3LKkPzUy8g==' + expected_output = ('[client.testuser]\n key = {}' + .format(expected_key)) - mock_config.side_effect = self.populated_config_side_effect + def check_output_side_effect(cmd): + if 'get-or-create' in cmd: + return expected_output.encode('utf-8') + else: + raise subprocess.CalledProcessError(1, "") + + mock_config.side_effect = self.empty_config_side_effect + mock_check_output.side_effect = check_output_side_effect named_key = ceph.get_named_key(user_name) + print(named_key) - self.assertEqual(user_key, named_key) + self.assertEqual(expected_key, named_key) @mock.patch('subprocess.check_output') @mock.patch('ceph.get_unit_hostname') @mock.patch('ceph.ceph_user') @mock.patch('ceph.config') - def test_get_named_key_empty(self, mock_config, mock_ceph_user, - mock_get_unit_hostname, mock_check_output): + def test_get_named_key_existing(self, mock_config, mock_ceph_user, + mock_get_unit_hostname, mock_check_output): user_name = 'cinder-ceph' - user_type = 'client' - admin_user = 'client.myadmin' - user_spec = '{}.{}'.format(user_type, user_name) expected_key = 'AQCnjmtbuEACMxAA7joUmgLIGI4/3LKkPzUy8g==' expected_output = ('[client.testuser]\n key = {}' .format(expected_key)) @@ -77,18 +86,7 @@ def test_get_named_key_empty(self, mock_config, mock_ceph_user, ceph_proxy_host = 'cephproxy' mock_get_unit_hostname.return_value = ceph_proxy_host - def check_output_side_effect(cmd): - return { - ' '.join(['sudo', '-u', ceph_user, 'ceph', '--name', - admin_user, - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - ceph_proxy_host), - 'auth', 'get-or-create', user_spec, 'mon', - 'allow rw', 'osd', 'allow rwx']): (expected_output - .encode('utf-8')) - }[' '.join(cmd)] - mock_check_output.side_effect = check_output_side_effect + mock_check_output.return_value = expected_output.encode('utf-8') mock_config.side_effect = self.empty_config_side_effect mock_ceph_user.return_value = ceph_user named_key = ceph.get_named_key(user_name, caps) diff --git a/ceph-proxy/unit_tests/test_ceph_broker.py b/ceph-proxy/unit_tests/test_ceph_broker.py index bae4b3d7..c1be6494 100644 --- a/ceph-proxy/unit_tests/test_ceph_broker.py +++ b/ceph-proxy/unit_tests/test_ceph_broker.py @@ -121,7 +121,6 @@ def test_process_requests_create_pool_rid(self, mock_log, mock_pool_exists.assert_called_with(service='admin', name='foo') mock_replicated_pool.assert_called_with(service='admin', name='foo', - pg_num=None, replicas=3) self.assertEqual(json.loads(rc)['exit-code'], 0) self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') From c112351ba807332dce918fcc84d0b61fbf568020 Mon Sep 17 00:00:00 2001 From: Rodrigo Barbieri Date: Sun, 15 Sep 2019 21:56:18 -0300 Subject: [PATCH 1826/2699] Add cinder-ceph to CI and functional test Added cinder-ceph to CI to run a new functional test included in zaza. Change-Id: I3a91ff4cc55edd824501d0e190cc239ae03f54a5 --- ceph-proxy/tests/bundles/bionic-queens.yaml | 47 +++++++++++++++++++++ ceph-proxy/tests/bundles/bionic-rocky.yaml | 46 ++++++++++++++++++++ ceph-proxy/tests/bundles/bionic-stein.yaml | 47 +++++++++++++++++++++ ceph-proxy/tests/bundles/disco-stein.yaml | 46 ++++++++++++++++++++ ceph-proxy/tests/bundles/trusty-mitaka.yaml | 46 ++++++++++++++++++++ ceph-proxy/tests/bundles/xenial-mitaka.yaml | 46 ++++++++++++++++++++ ceph-proxy/tests/bundles/xenial-ocata.yaml | 46 ++++++++++++++++++++ ceph-proxy/tests/bundles/xenial-pike.yaml | 46 ++++++++++++++++++++ ceph-proxy/tests/bundles/xenial-queens.yaml | 46 ++++++++++++++++++++ ceph-proxy/tests/tests.yaml | 7 ++- 10 files changed, 422 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/tests/bundles/bionic-queens.yaml b/ceph-proxy/tests/bundles/bionic-queens.yaml index 2aba41b4..2da0862b 100644 --- a/ceph-proxy/tests/bundles/bionic-queens.yaml +++ b/ceph-proxy/tests/bundles/bionic-queens.yaml @@ -16,8 +16,55 @@ applications: ceph-radosgw: charm: 'cs:~openstack-charmers-next/ceph-radosgw' num_units: 1 + cinder: + charm: 'cs:~openstack-charmers-next/cinder' + num_units: 1 + options: + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + cinder-ceph: + charm: 'cs:~openstack-charmers-next/cinder-ceph' + options: + restrict-ceph-pools: True + keystone: + charm: 'cs:~openstack-charmers-next/keystone' + num_units: 1 + options: + admin-password: openstack + admin-token: ubuntutesting + constraints: mem=1024 + percona-cluster: + charm: 'cs:~openstack-charmers-next/percona-cluster' + num_units: 1 + options: + dataset-size: 50% + max-connections: 1000 + innodb-buffer-pool-size: 256M + root-password: ChangeMe123 + sst-password: ChangeMe123 + constraints: mem=4096 + rabbitmq-server: + charm: 'cs:~openstack-charmers-next/rabbitmq-server' + num_units: 1 + constraints: mem=1024 relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' - - 'ceph-proxy:radosgw' - 'ceph-radosgw:mon' + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + - - 'cinder:shared-db' + - 'percona-cluster:shared-db' + - - 'keystone:shared-db' + - 'percona-cluster:shared-db' + - - 'cinder:identity-service' + - 'keystone:identity-service' + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + diff --git a/ceph-proxy/tests/bundles/bionic-rocky.yaml b/ceph-proxy/tests/bundles/bionic-rocky.yaml index 9d985312..98681218 100644 --- a/ceph-proxy/tests/bundles/bionic-rocky.yaml +++ b/ceph-proxy/tests/bundles/bionic-rocky.yaml @@ -23,8 +23,54 @@ applications: num_units: 1 options: source: cloud:bionic-rocky + cinder: + charm: 'cs:~openstack-charmers-next/cinder' + num_units: 1 + options: + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + cinder-ceph: + charm: 'cs:~openstack-charmers-next/cinder-ceph' + options: + restrict-ceph-pools: True + keystone: + charm: 'cs:~openstack-charmers-next/keystone' + num_units: 1 + options: + admin-password: openstack + admin-token: ubuntutesting + constraints: mem=1024 + percona-cluster: + charm: 'cs:~openstack-charmers-next/percona-cluster' + num_units: 1 + options: + dataset-size: 50% + max-connections: 1000 + innodb-buffer-pool-size: 256M + root-password: ChangeMe123 + sst-password: ChangeMe123 + constraints: mem=4096 + rabbitmq-server: + charm: 'cs:~openstack-charmers-next/rabbitmq-server' + num_units: 1 + constraints: mem=1024 relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' - - 'ceph-proxy:radosgw' - 'ceph-radosgw:mon' + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + - - 'cinder:shared-db' + - 'percona-cluster:shared-db' + - - 'keystone:shared-db' + - 'percona-cluster:shared-db' + - - 'cinder:identity-service' + - 'keystone:identity-service' + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/bionic-stein.yaml b/ceph-proxy/tests/bundles/bionic-stein.yaml index a5bdebd5..9a17be94 100644 --- a/ceph-proxy/tests/bundles/bionic-stein.yaml +++ b/ceph-proxy/tests/bundles/bionic-stein.yaml @@ -23,8 +23,55 @@ applications: num_units: 1 options: source: cloud:bionic-stein + cinder: + charm: 'cs:~openstack-charmers-next/cinder' + num_units: 1 + options: + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + cinder-ceph: + charm: 'cs:~openstack-charmers-next/cinder-ceph' + options: + restrict-ceph-pools: True + keystone: + charm: 'cs:~openstack-charmers-next/keystone' + num_units: 1 + options: + admin-password: openstack + admin-token: ubuntutesting + constraints: mem=1024 + percona-cluster: + charm: 'cs:~openstack-charmers-next/percona-cluster' + num_units: 1 + options: + dataset-size: 50% + max-connections: 1000 + innodb-buffer-pool-size: 256M + root-password: ChangeMe123 + sst-password: ChangeMe123 + constraints: mem=4096 + rabbitmq-server: + charm: 'cs:~openstack-charmers-next/rabbitmq-server' + num_units: 1 + constraints: mem=1024 relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' - - 'ceph-proxy:radosgw' - 'ceph-radosgw:mon' + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + - - 'cinder:shared-db' + - 'percona-cluster:shared-db' + - - 'keystone:shared-db' + - 'percona-cluster:shared-db' + - - 'cinder:identity-service' + - 'keystone:identity-service' + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + diff --git a/ceph-proxy/tests/bundles/disco-stein.yaml b/ceph-proxy/tests/bundles/disco-stein.yaml index 7a4237af..123f8fa1 100644 --- a/ceph-proxy/tests/bundles/disco-stein.yaml +++ b/ceph-proxy/tests/bundles/disco-stein.yaml @@ -16,8 +16,54 @@ applications: ceph-radosgw: charm: 'cs:~openstack-charmers-next/ceph-radosgw' num_units: 1 + cinder: + charm: 'cs:~openstack-charmers-next/cinder' + num_units: 1 + options: + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + cinder-ceph: + charm: 'cs:~openstack-charmers-next/cinder-ceph' + options: + restrict-ceph-pools: True + keystone: + charm: 'cs:~openstack-charmers-next/keystone' + num_units: 1 + options: + admin-password: openstack + admin-token: ubuntutesting + constraints: mem=1024 + percona-cluster: + charm: 'cs:~openstack-charmers-next/percona-cluster' + num_units: 1 + options: + dataset-size: 50% + max-connections: 1000 + innodb-buffer-pool-size: 256M + root-password: ChangeMe123 + sst-password: ChangeMe123 + constraints: mem=4096 + rabbitmq-server: + charm: 'cs:~openstack-charmers-next/rabbitmq-server' + num_units: 1 + constraints: mem=1024 relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' - - 'ceph-proxy:radosgw' - 'ceph-radosgw:mon' + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + - - 'cinder:shared-db' + - 'percona-cluster:shared-db' + - - 'keystone:shared-db' + - 'percona-cluster:shared-db' + - - 'cinder:identity-service' + - 'keystone:identity-service' + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/trusty-mitaka.yaml b/ceph-proxy/tests/bundles/trusty-mitaka.yaml index 32861944..6b826ae6 100644 --- a/ceph-proxy/tests/bundles/trusty-mitaka.yaml +++ b/ceph-proxy/tests/bundles/trusty-mitaka.yaml @@ -23,8 +23,54 @@ applications: num_units: 1 options: source: trusty-mitaka + cinder: + charm: 'cs:~openstack-charmers-next/cinder' + num_units: 1 + options: + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + cinder-ceph: + charm: 'cs:~openstack-charmers-next/cinder-ceph' + options: + restrict-ceph-pools: True + keystone: + charm: 'cs:~openstack-charmers-next/keystone' + num_units: 1 + options: + admin-password: openstack + admin-token: ubuntutesting + constraints: mem=1024 + percona-cluster: + charm: 'cs:trusty/percona-cluster' + num_units: 1 + options: + dataset-size: 50% + max-connections: 1000 + innodb-buffer-pool-size: 256M + root-password: ChangeMe123 + sst-password: ChangeMe123 + constraints: mem=4096 + rabbitmq-server: + charm: 'cs:~openstack-charmers-next/rabbitmq-server' + num_units: 1 + constraints: mem=1024 relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' - - 'ceph-proxy:radosgw' - 'ceph-radosgw:mon' + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + - - 'cinder:shared-db' + - 'percona-cluster:shared-db' + - - 'keystone:shared-db' + - 'percona-cluster:shared-db' + - - 'cinder:identity-service' + - 'keystone:identity-service' + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/xenial-mitaka.yaml b/ceph-proxy/tests/bundles/xenial-mitaka.yaml index 48ba327c..1de24acd 100644 --- a/ceph-proxy/tests/bundles/xenial-mitaka.yaml +++ b/ceph-proxy/tests/bundles/xenial-mitaka.yaml @@ -19,8 +19,54 @@ applications: charm: 'cs:~openstack-charmers-next/ceph-radosgw' num_units: 1 options: + cinder: + charm: 'cs:~openstack-charmers-next/cinder' + num_units: 1 + options: + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + cinder-ceph: + charm: 'cs:~openstack-charmers-next/cinder-ceph' + options: + restrict-ceph-pools: True + keystone: + charm: 'cs:~openstack-charmers-next/keystone' + num_units: 1 + options: + admin-password: openstack + admin-token: ubuntutesting + constraints: mem=1024 + percona-cluster: + charm: 'cs:~openstack-charmers-next/percona-cluster' + num_units: 1 + options: + dataset-size: 50% + max-connections: 1000 + innodb-buffer-pool-size: 256M + root-password: ChangeMe123 + sst-password: ChangeMe123 + constraints: mem=4096 + rabbitmq-server: + charm: 'cs:~openstack-charmers-next/rabbitmq-server' + num_units: 1 + constraints: mem=1024 relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' - - 'ceph-proxy:radosgw' - 'ceph-radosgw:mon' + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + - - 'cinder:shared-db' + - 'percona-cluster:shared-db' + - - 'keystone:shared-db' + - 'percona-cluster:shared-db' + - - 'cinder:identity-service' + - 'keystone:identity-service' + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/xenial-ocata.yaml b/ceph-proxy/tests/bundles/xenial-ocata.yaml index 351bc5dd..96ac47cf 100644 --- a/ceph-proxy/tests/bundles/xenial-ocata.yaml +++ b/ceph-proxy/tests/bundles/xenial-ocata.yaml @@ -23,8 +23,54 @@ applications: num_units: 1 options: source: xenial-ocata + cinder: + charm: 'cs:~openstack-charmers-next/cinder' + num_units: 1 + options: + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + cinder-ceph: + charm: 'cs:~openstack-charmers-next/cinder-ceph' + options: + restrict-ceph-pools: True + keystone: + charm: 'cs:~openstack-charmers-next/keystone' + num_units: 1 + options: + admin-password: openstack + admin-token: ubuntutesting + constraints: mem=1024 + percona-cluster: + charm: 'cs:~openstack-charmers-next/percona-cluster' + num_units: 1 + options: + dataset-size: 50% + max-connections: 1000 + innodb-buffer-pool-size: 256M + root-password: ChangeMe123 + sst-password: ChangeMe123 + constraints: mem=4096 + rabbitmq-server: + charm: 'cs:~openstack-charmers-next/rabbitmq-server' + num_units: 1 + constraints: mem=1024 relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' - - 'ceph-proxy:radosgw' - 'ceph-radosgw:mon' + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + - - 'cinder:shared-db' + - 'percona-cluster:shared-db' + - - 'keystone:shared-db' + - 'percona-cluster:shared-db' + - - 'cinder:identity-service' + - 'keystone:identity-service' + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/xenial-pike.yaml b/ceph-proxy/tests/bundles/xenial-pike.yaml index 3745ea1f..09ade6cf 100644 --- a/ceph-proxy/tests/bundles/xenial-pike.yaml +++ b/ceph-proxy/tests/bundles/xenial-pike.yaml @@ -23,8 +23,54 @@ applications: num_units: 1 options: source: xenial-pike + cinder: + charm: 'cs:~openstack-charmers-next/cinder' + num_units: 1 + options: + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + cinder-ceph: + charm: 'cs:~openstack-charmers-next/cinder-ceph' + options: + restrict-ceph-pools: True + keystone: + charm: 'cs:~openstack-charmers-next/keystone' + num_units: 1 + options: + admin-password: openstack + admin-token: ubuntutesting + constraints: mem=1024 + percona-cluster: + charm: 'cs:~openstack-charmers-next/percona-cluster' + num_units: 1 + options: + dataset-size: 50% + max-connections: 1000 + innodb-buffer-pool-size: 256M + root-password: ChangeMe123 + sst-password: ChangeMe123 + constraints: mem=4096 + rabbitmq-server: + charm: 'cs:~openstack-charmers-next/rabbitmq-server' + num_units: 1 + constraints: mem=1024 relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' - - 'ceph-proxy:radosgw' - 'ceph-radosgw:mon' + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + - - 'cinder:shared-db' + - 'percona-cluster:shared-db' + - - 'keystone:shared-db' + - 'percona-cluster:shared-db' + - - 'cinder:identity-service' + - 'keystone:identity-service' + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/xenial-queens.yaml b/ceph-proxy/tests/bundles/xenial-queens.yaml index c7aab9de..83bf9558 100644 --- a/ceph-proxy/tests/bundles/xenial-queens.yaml +++ b/ceph-proxy/tests/bundles/xenial-queens.yaml @@ -23,8 +23,54 @@ applications: num_units: 1 options: source: xenial-queens + cinder: + charm: 'cs:~openstack-charmers-next/cinder' + num_units: 1 + options: + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + cinder-ceph: + charm: 'cs:~openstack-charmers-next/cinder-ceph' + options: + restrict-ceph-pools: True + keystone: + charm: 'cs:~openstack-charmers-next/keystone' + num_units: 1 + options: + admin-password: openstack + admin-token: ubuntutesting + constraints: mem=1024 + percona-cluster: + charm: 'cs:~openstack-charmers-next/percona-cluster' + num_units: 1 + options: + dataset-size: 50% + max-connections: 1000 + innodb-buffer-pool-size: 256M + root-password: ChangeMe123 + sst-password: ChangeMe123 + constraints: mem=4096 + rabbitmq-server: + charm: 'cs:~openstack-charmers-next/rabbitmq-server' + num_units: 1 + constraints: mem=1024 relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' - - 'ceph-proxy:radosgw' - 'ceph-radosgw:mon' + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + - - 'cinder:shared-db' + - 'percona-cluster:shared-db' + - - 'keystone:shared-db' + - 'percona-cluster:shared-db' + - - 'cinder:identity-service' + - 'keystone:identity-service' + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 9d1ec8dd..f68a6d89 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -35,4 +35,9 @@ target_deploy_status: ceph-radosgw: workload-status: blocked workload-status-message: "Missing relations: mon" - + cinder-ceph: + workload-status: waiting + workload-status-message: "Incomplete relations: ceph" + keystone: + workload-status: active + workload-status-message: "Unit is ready" From ce948319675476ce2eac83b0499545d8d387f24f Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 1 Oct 2019 08:09:47 +0200 Subject: [PATCH 1827/2699] Use charm-internal _upgrade_keyring This change includes a charms.ceph sync. The _upgrade_keyring function was removed from charms.ceph so this charm needs to use the already-existing, charm- internal version. Change-Id: Ia7cf352a2456dc85aca6f61d2e88327cd0c15f7e Closes-Bug: #1845975 --- ceph-osd/hooks/ceph_hooks.py | 3 +- ceph-osd/lib/ceph/crush_utils.py | 2 +- ceph-osd/lib/ceph/utils.py | 209 +++++++++++++++++++------------ 3 files changed, 132 insertions(+), 82 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index f9903fd6..8de5870e 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -85,6 +85,7 @@ get_blacklist, get_journal_devices, should_enable_discard, + _upgrade_keyring, ) from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.network.ip import ( @@ -124,7 +125,7 @@ def check_for_upgrade(): - if not os.path.exists(ceph._upgrade_keyring): + if not os.path.exists(_upgrade_keyring): log("Ceph upgrade keyring not detected, skipping upgrade checks.") return diff --git a/ceph-osd/lib/ceph/crush_utils.py b/ceph-osd/lib/ceph/crush_utils.py index 8b6876c1..8fe09fa4 100644 --- a/ceph-osd/lib/ceph/crush_utils.py +++ b/ceph-osd/lib/ceph/crush_utils.py @@ -24,7 +24,7 @@ CRUSH_BUCKET = """root {name} {{ id {id} # do not change unnecessarily # weight 0.000 - alg straw + alg straw2 hash 0 # rjenkins1 }} diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 31447037..ee555e25 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -40,6 +40,7 @@ service_start, service_stop, CompareHostReleases, + write_file, ) from charmhelpers.core.hookenv import ( cached, @@ -928,11 +929,13 @@ def is_osd_disk(dev): def start_osds(devices): # Scan for ceph block devices rescan_osd_devices() - if cmp_pkgrevno('ceph', "0.56.6") >= 0: - # Use ceph-disk activate for directory based OSD's - for dev_or_path in devices: - if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) + if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and + cmp_pkgrevno('ceph', '14.2.0') < 0): + # Use ceph-disk activate for directory based OSD's + for dev_or_path in devices: + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): + subprocess.check_call( + ['ceph-disk', 'activate', dev_or_path]) def udevadm_settle(): @@ -950,13 +953,12 @@ def rescan_osd_devices(): udevadm_settle() - -_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" -_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" +_client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' def is_bootstrapped(): - return os.path.exists(_bootstrap_keyring) + return os.path.exists( + '/var/lib/ceph/mon/ceph-{}/done'.format(socket.gethostname())) def wait_for_bootstrap(): @@ -964,36 +966,6 @@ def wait_for_bootstrap(): time.sleep(3) -def import_osd_bootstrap_key(key): - if not os.path.exists(_bootstrap_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _bootstrap_keyring, - '--create-keyring', - '--name=client.bootstrap-osd', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - -def import_osd_upgrade_key(key): - if not os.path.exists(_upgrade_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _upgrade_keyring, - '--create-keyring', - '--name=client.osd-upgrade', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - def generate_monitor_secret(): cmd = [ 'ceph-authtool', @@ -1259,7 +1231,23 @@ def systemd(): return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' +def use_bluestore(): + """Determine whether bluestore should be used for OSD's + + :returns: whether bluestore disk format should be used + :rtype: bool""" + if cmp_pkgrevno('ceph', '12.2.0') < 0: + return False + return config('bluestore') + + def bootstrap_monitor_cluster(secret): + """Bootstrap local ceph mon into the ceph cluster + + :param secret: cephx secret to use for monitor authentication + :type secret: str + :raises: Exception if ceph mon cannot be bootstrapped + """ hostname = socket.gethostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) done = '{}/done'.format(path) @@ -1280,21 +1268,35 @@ def bootstrap_monitor_cluster(secret): perms=0o755) # end changes for Ceph >= 0.61.3 try: - add_keyring_to_ceph(keyring, - secret, - hostname, - path, - done, - init_marker) - + _create_monitor(keyring, + secret, + hostname, + path, + done, + init_marker) + _create_keyrings() except: raise finally: os.unlink(keyring) -@retry_on_exception(3, base_delay=5) -def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): +def _create_monitor(keyring, secret, hostname, path, done, init_marker): + """Create monitor filesystem and enable and start ceph-mon process + + :param keyring: path to temporary keyring on disk + :type keyring: str + :param secret: cephx secret to use for monitor authentication + :type: secret: str + :param hostname: hostname of the local unit + :type hostname: str + :param path: full path to ceph mon directory + :type path: str + :param done: full path to 'done' marker for ceph mon + :type done: str + :param init_marker: full path to 'init' marker for ceph mon + :type init_marker: str + """ subprocess.check_call(['ceph-authtool', keyring, '--create-keyring', '--name=mon.', '--add-key={}'.format(secret), @@ -1310,39 +1312,72 @@ def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): pass if systemd(): - subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) - service_restart('ceph-mon') + if cmp_pkgrevno('ceph', '14.0.0') >= 0: + systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) + else: + systemd_unit = 'ceph-mon' + subprocess.check_call(['systemctl', 'enable', systemd_unit]) + service_restart(systemd_unit) else: service_restart('ceph-mon-all') - # NOTE(jamespage): Later ceph releases require explicit - # call to ceph-create-keys to setup the - # admin keys for the cluster; this command - # will wait for quorum in the cluster before - # returning. - # NOTE(fnordahl): Explicitly run `ceph-crate-keys` for older - # ceph releases too. This improves bootstrap - # resilience as the charm will wait for - # presence of peer units before attempting - # to bootstrap. Note that charms deploying - # ceph-mon service should disable running of - # `ceph-create-keys` service in init system. - cmd = ['ceph-create-keys', '--id', hostname] - if cmp_pkgrevno('ceph', '12.0.0') >= 0: - # NOTE(fnordahl): The default timeout in ceph-create-keys of 600 - # seconds is not adequate. Increase timeout when - # timeout parameter available. For older releases - # we rely on retry_on_exception decorator. - # LP#1719436 - cmd.extend(['--timeout', '1800']) - subprocess.check_call(cmd) - _client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' - osstat = os.stat(_client_admin_keyring) - if not osstat.st_size: - # NOTE(fnordahl): Retry will fail as long as this file exists. - # LP#1719436 - os.remove(_client_admin_keyring) - raise Exception + +@retry_on_exception(3, base_delay=5) +def _create_keyrings(): + """Create keyrings for operation of ceph-mon units + + :raises: Exception if keyrings cannot be created + """ + if cmp_pkgrevno('ceph', '14.0.0') >= 0: + # NOTE(jamespage): At Nautilus, keys are created by the + # monitors automatically and just need + # exporting. + output = str(subprocess.check_output( + [ + 'sudo', + '-u', ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', 'get', 'client.admin', + ]).decode('UTF-8')).strip() + if not output: + # NOTE: key not yet created, raise exception and retry + raise Exception + write_file(_client_admin_keyring, output, + owner=ceph_user(), group=ceph_user(), + perms=0o400) + else: + # NOTE(jamespage): Later ceph releases require explicit + # call to ceph-create-keys to setup the + # admin keys for the cluster; this command + # will wait for quorum in the cluster before + # returning. + # NOTE(fnordahl): Explicitly run `ceph-create-keys` for older + # ceph releases too. This improves bootstrap + # resilience as the charm will wait for + # presence of peer units before attempting + # to bootstrap. Note that charms deploying + # ceph-mon service should disable running of + # `ceph-create-keys` service in init system. + cmd = ['ceph-create-keys', '--id', socket.gethostname()] + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + # NOTE(fnordahl): The default timeout in ceph-create-keys of 600 + # seconds is not adequate. Increase timeout when + # timeout parameter available. For older releases + # we rely on retry_on_exception decorator. + # LP#1719436 + cmd.extend(['--timeout', '1800']) + subprocess.check_call(cmd) + osstat = os.stat(_client_admin_keyring) + if not osstat.st_size: + # NOTE(fnordahl): Retry will fail as long as this file exists. + # LP#1719436 + os.remove(_client_admin_keyring) + raise Exception def update_monfs(): @@ -1427,6 +1462,10 @@ def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, ignore_errors, encrypt, bluestore, key_manager) else: + if cmp_pkgrevno('ceph', '14.0.0') >= 0: + log("Directory backed OSDs can not be created on Nautilus", + level=WARNING) + return osdize_dir(dev, encrypt, bluestore) @@ -1555,7 +1594,7 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): cmd.append(osd_format) # NOTE(jamespage): enable experimental bluestore support - if cmp_pkgrevno('ceph', '12.2.0') >= 0 and bluestore: + if use_bluestore(): cmd.append('--bluestore') wal = get_devices('bluestore-wal') if wal: @@ -1567,7 +1606,7 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): cmd.append('--block.db') least_used_db = find_least_used_utility_device(db) cmd.append(least_used_db) - elif cmp_pkgrevno('ceph', '12.2.0') >= 0 and not bluestore: + elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: cmd.append('--filestore') cmd.append(os.path.realpath(dev)) @@ -2704,6 +2743,14 @@ def dirs_need_ownership_update(service): if (curr_owner == expected_owner) and (curr_group == expected_group): continue + # NOTE(lathiat): when config_changed runs on reboot, the OSD might not + # yet be mounted or started, and the underlying directory the OSD is + # mounted to is expected to be owned by root. So skip the check. This + # may also happen for OSD directories for OSDs that were removed. + if (service == 'osd' and + not os.path.exists(os.path.join(child, 'magic'))): + continue + log('Directory "%s" needs its ownership updated' % child, DEBUG) return True @@ -2716,6 +2763,7 @@ def dirs_need_ownership_update(service): ('hammer', 'jewel'), ('jewel', 'luminous'), ('luminous', 'mimic'), + ('mimic', 'nautilus'), ]) # Map UCA codenames to ceph codenames @@ -2731,6 +2779,7 @@ def dirs_need_ownership_update(service): 'queens': 'luminous', 'rocky': 'mimic', 'stein': 'mimic', + 'train': 'nautilus', } From 6537d4fd5ec974db468e2015f176c7f1c3eac0ff Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 30 Sep 2019 17:07:19 -0500 Subject: [PATCH 1828/2699] Sync charm/ceph helpers, tox, and requirements Change-Id: I7a2ebaa43184420812b6f348039dd37de47d2488 --- ceph-osd/charm-helpers-hooks.yaml | 1 + .../contrib/hardening/audits/apt.py | 2 +- .../contrib/openstack/amulet/utils.py | 8 +- .../charmhelpers/contrib/openstack/context.py | 23 + .../contrib/openstack/ha/utils.py | 29 +- .../charmhelpers/contrib/openstack/policyd.py | 700 ++++++++++++++++++ .../charmhelpers/contrib/openstack/utils.py | 16 +- .../contrib/storage/linux/ceph.py | 51 ++ ceph-osd/hooks/charmhelpers/core/host.py | 27 + .../charmhelpers/core/host_factory/ubuntu.py | 3 +- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 2 + ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 143 ++-- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 237 ++++++ ceph-osd/requirements.txt | 12 +- ceph-osd/test-requirements.txt | 32 +- ceph-osd/tox.ini | 43 +- 16 files changed, 1224 insertions(+), 105 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py create mode 100644 ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index 84ef4424..26d981a4 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -22,3 +22,4 @@ include: - utils - contrib.charmsupport - contrib.hardening|inc=* + - contrib.openstack.policyd diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py index 3dc14e3c..67521e17 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -13,7 +13,6 @@ # limitations under the License. from __future__ import absolute_import # required for external apt import -from apt import apt_pkg from six import string_types from charmhelpers.fetch import ( @@ -26,6 +25,7 @@ WARNING, ) from charmhelpers.contrib.hardening.audits import BaseAudit +from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg class AptConfig(BaseAudit): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 0a5f81bd..7d95a590 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -709,8 +709,8 @@ def glance_create_image(self, glance, image_name, image_url, '{}...'.format(image_name, image_url)) # Download image - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + http_proxy = os.getenv('OS_TEST_HTTP_PROXY') + self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: proxies = {'http': http_proxy} opener = urllib.FancyURLopener(proxies) @@ -800,8 +800,8 @@ def create_cirros_image(self, glance, image_name, hypervisor_type=None): '({})...'.format(image_name)) # Get cirros image URL - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + http_proxy = os.getenv('OS_TEST_HTTP_PROXY') + self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: proxies = {'http': http_proxy} opener = urllib.FancyURLopener(proxies) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index a6545e12..a3d48c41 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -18,6 +18,7 @@ import math import os import re +import socket import time from base64 import b64decode from subprocess import check_call, CalledProcessError @@ -1716,6 +1717,10 @@ def __call__(self): 'rel_key': 'enable-nfg-logging', 'default': False, }, + 'enable_port_forwarding': { + 'rel_key': 'enable-port-forwarding', + 'default': False, + }, 'global_physnet_mtu': { 'rel_key': 'global-physnet-mtu', 'default': 1500, @@ -1745,6 +1750,13 @@ def __call__(self): ctxt['extension_drivers'] = ','.join(extension_drivers) + l3_extension_plugins = [] + + if ctxt['enable_port_forwarding']: + l3_extension_plugins.append('port_forwarding') + + ctxt['l3_extension_plugins'] = l3_extension_plugins + return ctxt def get_neutron_options(self, rdata): @@ -2160,3 +2172,14 @@ def __call__(self): 'logrotate_count': self.count, } return ctxt + + +class HostInfoContext(OSContextGenerator): + """Context to provide host information.""" + + def __call__(self): + ctxt = { + 'host_fqdn': socket.getfqdn(), + 'host': socket.gethostname(), + } + return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py index 718c6d65..e017bc20 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -127,7 +127,9 @@ def expect_ha(): return len(ha_related_units) > 0 or config('vip') or config('dns-ha') -def generate_ha_relation_data(service, extra_settings=None): +def generate_ha_relation_data(service, + extra_settings=None, + haproxy_enabled=True): """ Generate relation data for ha relation Based on configuration options and unit interfaces, generate a json @@ -152,21 +154,18 @@ def generate_ha_relation_data(service, extra_settings=None): @param extra_settings: Dict of additional resource data @returns dict: json encoded data for use with relation_set """ - _haproxy_res = 'res_{}_haproxy'.format(service) - _relation_data = { - 'resources': { - _haproxy_res: 'lsb:haproxy', - }, - 'resource_params': { + _relation_data = {'resources': {}, 'resource_params': {}} + + if haproxy_enabled: + _haproxy_res = 'res_{}_haproxy'.format(service) + _relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'} + _relation_data['resource_params'] = { _haproxy_res: 'op monitor interval="5s"' - }, - 'init_services': { - _haproxy_res: 'haproxy' - }, - 'clones': { + } + _relation_data['init_services'] = {_haproxy_res: 'haproxy'} + _relation_data['clones'] = { 'cl_{}_haproxy'.format(service): _haproxy_res - }, - } + } if extra_settings: for k, v in extra_settings.items(): @@ -290,7 +289,7 @@ def update_hacluster_vip(service, relation_data): iface, netmask, fallback = get_vip_settings(vip) - vip_monitoring = 'op monitor depth="0" timeout="20s" interval="10s"' + vip_monitoring = 'op monitor timeout="20s" interval="10s" depth="0"' if iface is not None: # NOTE(jamespage): Delete old VIP resources # Old style naming encoding iface in name diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py new file mode 100644 index 00000000..1adf2472 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py @@ -0,0 +1,700 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import contextlib +import os +import six +import shutil +import yaml +import zipfile + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as ch_host + +# Features provided by this module: + +""" +Policy.d helper functions +========================= + +The functions in this module are designed, as a set, to provide an easy-to-use +set of hooks for classic charms to add in /etc//policy.d/ +directory override YAML files. + +(For charms.openstack charms, a mixin class is provided for this +functionality). + +In order to "hook" this functionality into a (classic) charm, two functions are +provided: + + maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=none, + blacklist_keys=none, + template_function=none, + restart_handler=none) + + maybe_do_policyd_overrides_on_config_changed(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None + +(See the docstrings for details on the parameters) + +The functions should be called from the install and upgrade hooks in the charm. +The `maybe_do_policyd_overrides_on_config_changed` function is designed to be +called on the config-changed hook, in that it does an additional check to +ensure that an already overriden policy.d in an upgrade or install hooks isn't +repeated. + +In order the *enable* this functionality, the charm's install, config_changed, +and upgrade_charm hooks need to be modified, and a new config option (see +below) needs to be added. The README for the charm should also be updated. + +Examples from the keystone charm are: + +@hooks.hook('install.real') +@harden() +def install(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + + +@hooks.hook('config-changed') +@restart_on_change(restart_map(), restart_functions=restart_function_map()) +@harden() +def config_changed(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides_on_config_changed(os_release('keystone'), + 'keystone') + +@hooks.hook('upgrade-charm') +@restart_on_change(restart_map(), stopstart=True) +@harden() +def upgrade_charm(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + +Status Line +=========== + +The workload status code in charm-helpers has been modified to detect if +policy.d override code has been incorporated into the charm by checking for the +new config variable (in the config.yaml). If it has been, then the workload +status line will automatically show "PO:" at the beginning of the workload +status for that unit/service if the config option is set. If the policy +override is broken, the "PO (broken):" will be shown. No changes to the charm +(apart from those already mentioned) are needed to enable this functionality. +(charms.openstack charms also get this functionality, but please see that +library for further details). +""" + +# The config.yaml for the charm should contain the following for the config +# option: + +""" + use-policyd-override: + type: boolean + default: False + description: | + If True then use the resource file named 'policyd-override' to install + override yaml files in the service's policy.d directory. The resource + file should be a zip file containing at least one yaml file with a .yaml + or .yml extension. If False then remove the overrides. +""" + +# The metadata.yaml for the charm should contain the following: +""" +resources: + policyd-override: + type: file + filename: policyd-override.zip + description: The policy.d overrides file +""" + +# The README for the charm should contain the following: +""" +Policy Overrides +---------------- + +This service allows for policy overrides using the `policy.d` directory. This +is an **advanced** feature and the policies that the service supports should be +clearly and unambiguously understood before trying to override, or add to, the +default policies that the service uses. + +The charm also has some policy defaults. They should also be understood before +being overridden. It is possible to break the system (for tenants and other +services) if policies are incorrectly applied to the service. + +Policy overrides are YAML files that contain rules that will add to, or +override, existing policy rules in the service. The `policy.d` directory is +a place to put the YAML override files. This charm owns the +`/etc/keystone/policy.d` directory, and as such, any manual changes to it will +be overwritten on charm upgrades. + +Policy overrides are provided to the charm using a resource file called +`policyd-override`. This is attached to the charm using (for example): + + juju attach-resource policyd-override= + +The `` is the name that this charm is deployed as, with +`` being the resource file containing the policy overrides. + +The format of the resource file is a ZIP file (.zip extension) containing at +least one YAML file with an extension of `.yaml` or `.yml`. Note that any +directories in the ZIP file are ignored; all of the files are flattened into a +single directory. There must not be any duplicated filenames; this will cause +an error and nothing in the resource file will be applied. + +(ed. next part is optional is the charm supports some form of +template/substitution on a read file) + +If a (ed. "one or more of") [`.j2`, `.tmpl`, `.tpl`] file is found in the +resource file then the charm will perform a substitution with charm variables +taken from the config or relations. (ed. edit as appropriate to include the +variable). + +To enable the policy overrides the config option `use-policyd-override` must be +set to `True`. + +When `use-policyd-override` is `True` the status line of the charm will be +prefixed with `PO:` indicating that policies have been overridden. If the +installation of the policy override YAML files failed for any reason then the +status line will be prefixed with `PO (broken):`. The log file for the charm +will indicate the reason. No policy override files are installed if the `PO +(broken):` is shown. The status line indicates that the overrides are broken, +not that the policy for the service has failed - they will be the defaults for +the charm and service. + +If the policy overrides did not install then *either* attach a new, corrected, +resource file *or* disable the policy overrides by setting +`use-policyd-override` to False. + +Policy overrides on one service may affect the functionality of another +service. Therefore, it may be necessary to provide policy overrides for +multiple service charms to achieve a consistent set of policies across the +OpenStack system. The charms for the other services that may need overrides +should be checked to ensure that they support overrides before proceeding. +""" + +POLICYD_VALID_EXTS = ['.yaml', '.yml', '.j2', '.tmpl', '.tpl'] +POLICYD_TEMPLATE_EXTS = ['.j2', '.tmpl', '.tpl'] +POLICYD_RESOURCE_NAME = "policyd-override" +POLICYD_CONFIG_NAME = "use-policyd-override" +POLICYD_SUCCESS_FILENAME = "policyd-override-success" +POLICYD_LOG_LEVEL_DEFAULT = hookenv.INFO +POLICYD_ALWAYS_BLACKLISTED_KEYS = ("admin_required", "cloud_admin") + + +class BadPolicyZipFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +class BadPolicyYamlFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +if six.PY2: + BadZipFile = zipfile.BadZipfile +else: + BadZipFile = zipfile.BadZipFile + + +def is_policyd_override_valid_on_this_release(openstack_release): + """Check that the charm is running on at least Ubuntu Xenial, and at + least the queens release. + + :param openstack_release: the release codename that is installed. + :type openstack_release: str + :returns: True if okay + :rtype: bool + """ + # NOTE(ajkavanagh) circular import! This is because the status message + # generation code in utils has to call into this module, but this function + # needs the CompareOpenStackReleases() function. The only way to solve + # this is either to put ALL of this module into utils, or refactor one or + # other of the CompareOpenStackReleases or status message generation code + # into a 3rd module. + import charmhelpers.contrib.openstack.utils as ch_utils + return ch_utils.CompareOpenStackReleases(openstack_release) >= 'queens' + + +def maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None): + """If the config option is set, get the resource file and process it to + enable the policy.d overrides for the service passed. + + The param `openstack_release` is required as the policyd overrides feature + is only supported on openstack_release "queens" or later, and on ubuntu + "xenial" or later. Prior to these versions, this feature is a NOP. + + The optional template_function is a function that accepts a string and has + an opportunity to modify the loaded file prior to it being read by + yaml.safe_load(). This allows the charm to perform "templating" using + charm derived data. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + The param restart_handler is an optional Callable that is called to perform + the service restart if the policy.d file is changed. This should normally + be None as oslo.policy automatically picks up changes in the policy.d + directory. However, for any services where this is buggy then a + restart_handler can be used to force the policy.d files to be read. + + :param openstack_release: The openstack release that is installed. + :type openstack_release: str + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the string + prior to being processed as a Yaml document. + :type template_function: Union[None, Callable[[str], str]] + :param restart_handler: The function to call if the service should be + restarted. + :type restart_handler: Union[None, Callable[]] + """ + config = hookenv.config() + try: + if not config.get(POLICYD_CONFIG_NAME, False): + remove_policy_success_file() + clean_policyd_dir_for(service, blacklist_paths) + return + except Exception: + return + if not is_policyd_override_valid_on_this_release(openstack_release): + return + # from now on it should succeed; if it doesn't then status line will show + # broken. + resource_filename = get_policy_resource_filename() + restart = process_policy_resource_file( + resource_filename, service, blacklist_paths, blacklist_keys, + template_function) + if restart and restart_handler is not None and callable(restart_handler): + restart_handler() + + +def maybe_do_policyd_overrides_on_config_changed(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None): + """This function is designed to be called from the config changed hook + handler. It will only perform the policyd overrides if the config is True + and the success file doesn't exist. Otherwise, it does nothing as the + resource file has already been processed. + + See maybe_do_policyd_overrides() for more details on the params. + + :param openstack_release: The openstack release that is installed. + :type openstack_release: str + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the string + prior to being processed as a Yaml document. + :type template_function: Union[None, Callable[[str], str]] + :param restart_handler: The function to call if the service should be + restarted. + :type restart_handler: Union[None, Callable[]] + """ + config = hookenv.config() + try: + if not config.get(POLICYD_CONFIG_NAME, False): + remove_policy_success_file() + clean_policyd_dir_for(service, blacklist_paths) + return + except Exception: + return + # if the policyd overrides have been performed just return + if os.path.isfile(_policy_success_file()): + return + maybe_do_policyd_overrides( + openstack_release, service, blacklist_paths, blacklist_keys, + template_function, restart_handler) + + +def get_policy_resource_filename(): + """Function to extract the policy resource filename + + :returns: The filename of the resource, if set, otherwise, if an error + occurs, then None is returned. + :rtype: Union[str, None] + """ + try: + return hookenv.resource_get(POLICYD_RESOURCE_NAME) + except Exception: + return None + + +@contextlib.contextmanager +def open_and_filter_yaml_files(filepath): + """Validate that the filepath provided is a zip file and contains at least + one (.yaml|.yml) file, and that the files are not duplicated when the zip + file is flattened. Note that the yaml files are not checked. This is the + first stage in validating the policy zipfile; individual yaml files are not + checked for validity or black listed keys. + + An example of use is: + + with open_and_filter_yaml_files(some_path) as zfp, g: + for zipinfo in g: + # do something with zipinfo ... + + :param filepath: a filepath object that can be opened by zipfile + :type filepath: Union[AnyStr, os.PathLike[AntStr]] + :returns: (zfp handle, + a generator of the (name, filename, ZipInfo object) tuples) as a + tuple. + :rtype: ContextManager[(zipfile.ZipFile, + Generator[(name, str, str, zipfile.ZipInfo)])] + :raises: zipfile.BadZipFile + :raises: BadPolicyZipFile if duplicated yaml or missing + :raises: IOError if the filepath is not found + """ + with zipfile.ZipFile(filepath, 'r') as zfp: + # first pass through; check for duplicates and at least one yaml file. + names = collections.defaultdict(int) + yamlfiles = _yamlfiles(zfp) + for name, _, _, _ in yamlfiles: + names[name] += 1 + # There must be at least 1 yaml file. + if len(names.keys()) == 0: + raise BadPolicyZipFile("contains no yaml files with {} extensions." + .format(", ".join(POLICYD_VALID_EXTS))) + # There must be no duplicates + duplicates = [n for n, c in names.items() if c > 1] + if duplicates: + raise BadPolicyZipFile("{} have duplicates in the zip file." + .format(", ".join(duplicates))) + # Finally, let's yield the generator + yield (zfp, yamlfiles) + + +def _yamlfiles(zipfile): + """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) + and the infolist item from a zipfile. + + :param zipfile: the zipfile to read zipinfo items from + :type zipfile: zipfile.ZipFile + :returns: generator of (name, ext, filename, info item) for each self-identified + yaml file. + :rtype: List[(str, str, str, zipfile.ZipInfo)] + """ + l = [] + for infolist_item in zipfile.infolist(): + if infolist_item.is_dir(): + continue + _, name_ext = os.path.split(infolist_item.filename) + name, ext = os.path.splitext(name_ext) + ext = ext.lower() + if ext and ext in POLICYD_VALID_EXTS: + l.append((name, ext, name_ext, infolist_item)) + return l + + +def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): + """Read, validate and return the (first) yaml document from the stream. + + The doc is read, and checked for a yaml file. The the top-level keys are + checked against the blacklist_keys provided. If there are problems then an + Exception is raised. Otherwise the yaml document is returned as a Python + object that can be dumped back as a yaml file on the system. + + The yaml file must only consist of a str:str mapping, and if not then the + yaml file is rejected. + + :param stream_or_doc: the file object to read the yaml from + :type stream_or_doc: Union[AnyStr, IO[AnyStr]] + :param blacklist_keys: Any keys, which if in the yaml file, should cause + and error. + :type blacklisted_keys: Union[None, List[str]] + :returns: the yaml file as a python document + :rtype: Dict[str, str] + :raises: yaml.YAMLError if there is a problem with the document + :raises: BadPolicyYamlFile if file doesn't look right or there are + blacklisted keys in the file. + """ + blacklist_keys = blacklist_keys or [] + blacklist_keys.append(POLICYD_ALWAYS_BLACKLISTED_KEYS) + doc = yaml.safe_load(stream_or_doc) + if not isinstance(doc, dict): + raise BadPolicyYamlFile("doesn't look like a policy file?") + keys = set(doc.keys()) + blacklisted_keys_present = keys.intersection(blacklist_keys) + if blacklisted_keys_present: + raise BadPolicyYamlFile("blacklisted keys {} present." + .format(", ".join(blacklisted_keys_present))) + if not all(isinstance(k, six.string_types) for k in keys): + raise BadPolicyYamlFile("keys in yaml aren't all strings?") + # check that the dictionary looks like a mapping of str to str + if not all(isinstance(v, six.string_types) for v in doc.values()): + raise BadPolicyYamlFile("values in yaml aren't all strings?") + return doc + + +def policyd_dir_for(service): + """Return the policy directory for the named service. + + This assumes the default name of "policy.d" which is kept across all + charms. + + :param service: str + :returns: the policy.d override directory. + :rtype: os.PathLike[str] + """ + return os.path.join("/", "etc", service, "policy.d") + + +def clean_policyd_dir_for(service, keep_paths=None): + """Clean out the policyd directory except for items that should be kept. + + The keep_paths, if used, should be set to the full path of the files that + should be kept in the policyd directory for the service. Note that the + service name is passed in, and then the policyd_dir_for() function is used. + This is so that a coding error doesn't result in a sudden deletion of the + charm (say). + + :param service: the service name to use to construct the policy.d dir. + :type service: str + :param keep_paths: optional list of paths to not delete. + :type keep_paths: Union[None, List[str]] + """ + keep_paths = keep_paths or [] + path = policyd_dir_for(service) + if not os.path.exists(path): + ch_host.mkdir(path, owner=service, group=service, perms=0o775) + _scanner = os.scandir if six.PY3 else _py2_scandir + for direntry in _scanner(path): + # see if the path should be kept. + if direntry.path in keep_paths: + continue + # we remove any directories; it's ours and there shouldn't be any + if direntry.is_dir(): + shutil.rmtree(direntry.path) + else: + os.remove(direntry.path) + + +@contextlib.contextmanager +def _py2_scandir(path): + """provide a py2 implementation of os.scandir if this module ever gets used + in a py2 charm (unlikely). uses os.listdir() to get the names in the path, + and then mocks the is_dir() function using os.path.isdir() to check for a + directory. + + :param path: the path to list the directories for + :type path: str + :returns: Generator that provides _P27Direntry objects + :rtype: ContextManager[_P27Direntry] + """ + for f in os.listdir(path): + yield _P27Direntry(f) + + +class _P27Direntry(object): + """Mock a scandir Direntry object with enough to use in + clean_policyd_dir_for + """ + + def __init__(self, path): + self.path = path + + def is_dir(self): + return os.path.isdir(self.path) + + +def path_for_policy_file(service, name): + """Return the full path for a policy.d file that will be written to the + service's policy.d directory. + + It is constructed using policyd_dir_for(), the name and the ".yaml" + extension. + + :param service: the service name + :type service: str + :param name: the name for the policy override + :type name: str + :returns: the full path name for the file + :rtype: os.PathLike[str] + """ + return os.path.join(policyd_dir_for(service), name + ".yaml") + + +def _policy_success_file(): + """Return the file name for a successful drop of policy.d overrides + + :returns: the path name for the file. + :rtype: str + """ + return os.path.join(hookenv.charm_dir(), POLICYD_SUCCESS_FILENAME) + + +def remove_policy_success_file(): + """Remove the file that indicates successful policyd override.""" + try: + os.remove(_policy_success_file()) + except Exception: + pass + + +def policyd_status_message_prefix(): + """Return the prefix str for the status line. + + "PO:" indicating that the policy overrides are in place, or "PO (broken):" + if the policy is supposed to be working but there is no success file. + + :returns: the prefix + :rtype: str + """ + if os.path.isfile(_policy_success_file()): + return "PO:" + return "PO (broken):" + + +def process_policy_resource_file(resource_file, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None): + """Process the resource file (which should contain at least one yaml file) + and write those files to the service's policy.d directory. + + The optional template_function is a function that accepts a python + string and has an opportunity to modify the document + prior to it being read by the yaml.safe_load() function and written to + disk. Note that this function does *not* say how the templating is done - + this is up to the charm to implement its chosen method. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + If any error occurs, then the policy.d directory is cleared, the error is + written to the log, and the status line will eventually show as failed. + + :param resource_file: The zipped file to open and extract yaml files form. + :type resource_file: Union[AnyStr, os.PathLike[AnyStr]] + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the yaml + document. + :type template_function: Union[None, Callable[[AnyStr], AnyStr]] + :returns: True if the processing was successful, False if not. + :rtype: boolean + """ + blacklist_paths = blacklist_paths or [] + completed = False + try: + with open_and_filter_yaml_files(resource_file) as (zfp, gen): + # first clear out the policy.d directory and clear success + remove_policy_success_file() + clean_policyd_dir_for(service, blacklist_paths) + for name, ext, filename, zipinfo in gen: + # construct a name for the output file. + yaml_filename = path_for_policy_file(service, name) + if yaml_filename in blacklist_paths: + raise BadPolicyZipFile("policy.d name {} is blacklisted" + .format(yaml_filename)) + with zfp.open(zipinfo) as fp: + doc = fp.read() + # if template_function is not None, then offer the document + # to the template function + if ext in POLICYD_TEMPLATE_EXTS: + if (template_function is None or not + callable(template_function)): + raise BadPolicyZipFile( + "Template {} but no template_function is " + "available".format(filename)) + doc = template_function(doc) + yaml_doc = read_and_validate_yaml(doc, blacklist_keys) + with open(yaml_filename, "wt") as f: + yaml.dump(yaml_doc, f) + # Every thing worked, so we mark up a success. + completed = True + except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: + hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except IOError as e: + # technically this shouldn't happen; it would be a programming error as + # the filename comes from Juju and thus, should exist. + hookenv.log( + "File {} failed with IOError. This really shouldn't happen" + " -- error: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except Exception as e: + import traceback + hookenv.log("General Exception({}) during policyd processing" + .format(str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + hookenv.log(traceback.format_exc()) + finally: + if not completed: + hookenv.log("Processing {} failed: cleaning policy.d directory" + .format(resource_file), + level=POLICYD_LOG_LEVEL_DEFAULT) + clean_policyd_dir_for(service, blacklist_paths) + else: + # touch the success filename + hookenv.log("policy.d overrides installed.", + level=POLICYD_LOG_LEVEL_DEFAULT) + open(_policy_success_file(), "w").close() + return completed diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index d43a4d20..ac96f844 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -84,7 +84,8 @@ SourceConfigError, GPGKeyError, get_upstream_version, - filter_missing_packages + filter_missing_packages, + ubuntu_apt_pkg as apt, ) from charmhelpers.fetch.snap import ( @@ -96,6 +97,10 @@ from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.openstack.exceptions import OSContextError +from charmhelpers.contrib.openstack.policyd import ( + policyd_status_message_prefix, + POLICYD_CONFIG_NAME, +) CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' @@ -443,8 +448,6 @@ def get_os_codename_package(package, fatal=True): # Second item in list is Version return line.split()[1] - import apt_pkg as apt - cache = apt_cache() try: @@ -658,7 +661,6 @@ def openstack_upgrade_available(package): a newer version of package. """ - import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) if not cur_vers: @@ -864,6 +866,12 @@ def _determine_os_workload_status( message = "Unit is ready" juju_log(message, 'INFO') + try: + if config(POLICYD_CONFIG_NAME): + message = "{} {}".format(policyd_status_message_prefix(), message) + except Exception: + pass + return state, message diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index a9864467..e13dfa8b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -301,6 +301,7 @@ def __init__(self, service, name, pg_num=None, replicas=2, percent_data=10.0, app_name=None): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas + self.percent_data = percent_data if pg_num: # Since the number of placement groups were specified, ensure # that there aren't too many created. @@ -324,12 +325,24 @@ def create(self): update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) try: set_app_name_for_pool(client=self.service, pool=self.name, name=self.app_name) except CalledProcessError: log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e, level=WARNING)) except CalledProcessError: raise @@ -382,6 +395,18 @@ def create(self): name=self.app_name) except CalledProcessError: log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e, level=WARNING)) except CalledProcessError: raise @@ -389,6 +414,32 @@ def create(self): Returns json formatted output""" +def enabled_manager_modules(): + """Return a list of enabled manager modules. + + :rtype: List[str] + """ + cmd = ['ceph', 'mgr', 'module', 'ls'] + try: + modules = check_output(cmd) + except CalledProcessError as e: + log("Failed to list ceph modules: {}".format(e), WARNING) + return [] + modules = json.loads(modules) + return modules['enabled_modules'] + + +def enable_pg_autoscale(service, pool_name): + """ + Enable Ceph's PG autoscaler for the specified pool. + + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types. The name of the pool to enable sutoscaling on + :raise: CalledProcessError if the command fails + """ + check_call(['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) + + def get_mon_map(service): """ Returns the current monitor map. diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 32754ff9..b33ac906 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -1075,3 +1075,30 @@ def install_ca_cert(ca_cert, name=None): log("Installing new CA cert at: {}".format(cert_file), level=INFO) write_file(cert_file, ca_cert) subprocess.check_call(['update-ca-certificates', '--fresh']) + + +def get_system_env(key, default=None): + """Get data from system environment as represented in ``/etc/environment``. + + :param key: Key to look up + :type key: str + :param default: Value to return if key is not found + :type default: any + :returns: Value for key if found or contents of default parameter + :rtype: any + :raises: subprocess.CalledProcessError + """ + env_file = '/etc/environment' + # use the shell and env(1) to parse the global environments file. This is + # done to get the correct result even if the user has shell variable + # substitutions or other shell logic in that file. + output = subprocess.check_output( + ['env', '-i', '/bin/bash', '-c', + 'set -a && source {} && env'.format(env_file)], + universal_newlines=True) + for k, v in (line.split('=', 1) + for line in output.splitlines() if '=' in line): + if k == key: + return v + else: + return default diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index 0ee2b660..1b57e2ce 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -24,6 +24,7 @@ 'bionic', 'cosmic', 'disco', + 'eoan', ) @@ -93,7 +94,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None): the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. """ - import apt_pkg + from charmhelpers.fetch import apt_pkg if not pkgcache: from charmhelpers.fetch import apt_cache pkgcache = apt_cache() diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 8572d34f..0cc7fc85 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -103,6 +103,8 @@ def base_url(self, url): apt_unhold = fetch.apt_unhold import_key = fetch.import_key get_upstream_version = fetch.get_upstream_version + apt_pkg = fetch.ubuntu_apt_pkg + get_apt_dpkg_env = fetch.get_apt_dpkg_env elif __platform__ == "centos": yum_search = fetch.yum_search diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 24c76e34..31225235 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -13,14 +13,14 @@ # limitations under the License. from collections import OrderedDict -import os import platform import re import six -import time import subprocess +import sys +import time -from charmhelpers.core.host import get_distrib_codename +from charmhelpers.core.host import get_distrib_codename, get_system_env from charmhelpers.core.hookenv import ( log, @@ -29,6 +29,7 @@ env_proxy_settings, ) from charmhelpers.fetch import SourceConfigError, GPGKeyError +from charmhelpers.fetch import ubuntu_apt_pkg PROPOSED_POCKET = ( "# Proposed\n" @@ -216,18 +217,42 @@ def filter_missing_packages(packages): ) -def apt_cache(in_memory=True, progress=None): - """Build and return an apt cache.""" - from apt import apt_pkg - apt_pkg.init() - if in_memory: - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache(progress) +def apt_cache(*_, **__): + """Shim returning an object simulating the apt_pkg Cache. + + :param _: Accept arguments for compability, not used. + :type _: any + :param __: Accept keyword arguments for compability, not used. + :type __: any + :returns:Object used to interrogate the system apt and dpkg databases. + :rtype:ubuntu_apt_pkg.Cache + """ + if 'apt_pkg' in sys.modules: + # NOTE(fnordahl): When our consumer use the upstream ``apt_pkg`` module + # in conjunction with the apt_cache helper function, they may expect us + # to call ``apt_pkg.init()`` for them. + # + # Detect this situation, log a warning and make the call to + # ``apt_pkg.init()`` to avoid the consumer Python interpreter from + # crashing with a segmentation fault. + log('Support for use of upstream ``apt_pkg`` module in conjunction' + 'with charm-helpers is deprecated since 2019-06-25', level=WARNING) + sys.modules['apt_pkg'].init() + return ubuntu_apt_pkg.Cache() def apt_install(packages, options=None, fatal=False): - """Install one or more packages.""" + """Install one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -244,7 +269,17 @@ def apt_install(packages, options=None, fatal=False): def apt_upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages.""" + """Upgrade all packages. + + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :param dist: Whether ``dist-upgrade`` should be used over ``upgrade`` + :type dist: bool + :raises: subprocess.CalledProcessError + """ if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -265,7 +300,15 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): - """Purge one or more packages.""" + """Purge one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ cmd = ['apt-get', '--assume-yes', 'purge'] if isinstance(packages, six.string_types): cmd.append(packages) @@ -276,7 +319,14 @@ def apt_purge(packages, fatal=False): def apt_autoremove(purge=True, fatal=False): - """Purge one or more packages.""" + """Purge one or more packages. + :param purge: Whether the ``--purge`` option should be passed on or not. + :type purge: bool + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ cmd = ['apt-get', '--assume-yes', 'autoremove'] if purge: cmd.append('--purge') @@ -660,21 +710,22 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_message="", cmd_env=None): """Run a command and retry until success or max_retries is reached. - :param: cmd: str: The apt command to run. - :param: max_retries: int: The number of retries to attempt on a fatal - command. Defaults to CMD_RETRY_COUNT. - :param: retry_exitcodes: tuple: Optional additional exit codes to retry. - Defaults to retry on exit code 1. - :param: retry_message: str: Optional log prefix emitted during retries. - :param: cmd_env: dict: Environment variables to add to the command run. + :param cmd: The apt command to run. + :type cmd: str + :param max_retries: The number of retries to attempt on a fatal + command. Defaults to CMD_RETRY_COUNT. + :type max_retries: int + :param retry_exitcodes: Optional additional exit codes to retry. + Defaults to retry on exit code 1. + :type retry_exitcodes: tuple + :param retry_message: Optional log prefix emitted during retries. + :type retry_message: str + :param: cmd_env: Environment variables to add to the command run. + :type cmd_env: Option[None, Dict[str, str]] """ - - env = None - kwargs = {} + env = get_apt_dpkg_env() if cmd_env: - env = os.environ.copy() env.update(cmd_env) - kwargs['env'] = env if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -686,8 +737,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_results = (None,) + retry_exitcodes while result in retry_results: try: - # result = subprocess.check_call(cmd, env=env) - result = subprocess.check_call(cmd, **kwargs) + result = subprocess.check_call(cmd, env=env) except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > max_retries: @@ -700,22 +750,18 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), def _run_apt_command(cmd, fatal=False): """Run an apt command with optional retries. - :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. + :param cmd: The apt command to run. + :type cmd: str + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool """ - # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. - cmd_env = { - 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} - if fatal: _run_with_retries( - cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), + cmd, retry_exitcodes=(1, APT_NO_LOCK,), retry_message="Couldn't acquire DPKG lock") else: - env = os.environ.copy() - env.update(cmd_env) - subprocess.call(cmd, env=env) + subprocess.call(cmd, env=get_apt_dpkg_env()) def get_upstream_version(package): @@ -723,7 +769,6 @@ def get_upstream_version(package): @returns None (if not installed) or the upstream version """ - import apt_pkg cache = apt_cache() try: pkg = cache[package] @@ -735,4 +780,18 @@ def get_upstream_version(package): # package is known, but no version is currently installed. return None - return apt_pkg.upstream_version(pkg.current_ver.ver_str) + return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str) + + +def get_apt_dpkg_env(): + """Get environment suitable for execution of APT and DPKG tools. + + We keep this in a helper function instead of in a global constant to + avoid execution on import of the library. + :returns: Environment suitable for execution of APT and DPKG tools. + :rtype: Dict[str, str] + """ + # The fallback is used in the event of ``/etc/environment`` not containing + # avalid PATH variable. + return {'DEBIAN_FRONTEND': 'noninteractive', + 'PATH': get_system_env('PATH', '/usr/sbin:/usr/bin:/sbin:/bin')} diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py new file mode 100644 index 00000000..104f91f1 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -0,0 +1,237 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provide a subset of the ``python-apt`` module API. + +Data collection is done through subprocess calls to ``apt-cache`` and +``dpkg-query`` commands. + +The main purpose for this module is to avoid dependency on the +``python-apt`` python module. + +The indicated python module is a wrapper around the ``apt`` C++ library +which is tightly connected to the version of the distribution it was +shipped on. It is not developed in a backward/forward compatible manner. + +This in turn makes it incredibly hard to distribute as a wheel for a piece +of python software that supports a span of distro releases [0][1]. + +Upstream feedback like [2] does not give confidence in this ever changing, +so with this we get rid of the dependency. + +0: https://github.com/juju-solutions/layer-basic/pull/135 +1: https://bugs.launchpad.net/charm-octavia/+bug/1824112 +2: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=845330#10 +""" + +import locale +import os +import subprocess + + +class _container(dict): + """Simple container for attributes.""" + __getattr__ = dict.__getitem__ + __setattr__ = dict.__setitem__ + + +class Package(_container): + """Simple container for package attributes.""" + + +class Version(_container): + """Simple container for version attributes.""" + + +class Cache(object): + """Simulation of ``apt_pkg`` Cache object.""" + def __init__(self, progress=None): + pass + + def __getitem__(self, package): + """Get information about a package from apt and dpkg databases. + + :param package: Name of package + :type package: str + :returns: Package object + :rtype: object + :raises: KeyError, subprocess.CalledProcessError + """ + apt_result = self._apt_cache_show([package])[package] + apt_result['name'] = apt_result.pop('package') + pkg = Package(apt_result) + dpkg_result = self._dpkg_list([package]).get(package, {}) + current_ver = None + installed_version = dpkg_result.get('version') + if installed_version: + current_ver = Version({'ver_str': installed_version}) + pkg.current_ver = current_ver + pkg.architecture = dpkg_result.get('architecture') + return pkg + + def _dpkg_list(self, packages): + """Get data from system dpkg database for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about installed packages, keys like + ``dpkg-query --list`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['dpkg-query', '--list'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + except subprocess.CalledProcessError as cp: + # ``dpkg-query`` may return error and at the same time have + # produced useful output, for example when asked for multiple + # packages where some are not installed + if cp.returncode != 1: + raise + output = cp.output + headings = [] + for line in output.splitlines(): + if line.startswith('||/'): + headings = line.split() + headings.pop(0) + continue + elif (line.startswith('|') or line.startswith('+') or + line.startswith('dpkg-query:')): + continue + else: + data = line.split(None, 4) + status = data.pop(0) + if status != 'ii': + continue + pkg = {} + pkg.update({k.lower(): v for k, v in zip(headings, data)}) + if 'name' in pkg: + pkgs.update({pkg['name']: pkg}) + return pkgs + + def _apt_cache_show(self, packages): + """Get data from system apt cache for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about package, keys like + ``apt-cache show`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['apt-cache', 'show', '--no-all-versions'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + previous = None + pkg = {} + for line in output.splitlines(): + if not line: + if 'package' in pkg: + pkgs.update({pkg['package']: pkg}) + pkg = {} + continue + if line.startswith(' '): + if previous and previous in pkg: + pkg[previous] += os.linesep + line.lstrip() + continue + if ':' in line: + kv = line.split(':', 1) + key = kv[0].lower() + if key == 'n': + continue + previous = key + pkg.update({key: kv[1].lstrip()}) + except subprocess.CalledProcessError as cp: + # ``apt-cache`` returns 100 if none of the packages asked for + # exist in the apt cache. + if cp.returncode != 100: + raise + return pkgs + + +def init(): + """Compability shim that does nothing.""" + pass + + +def upstream_version(version): + """Extracts upstream version from a version string. + + Upstream reference: https://salsa.debian.org/apt-team/apt/blob/master/ + apt-pkg/deb/debversion.cc#L259 + + :param version: Version string + :type version: str + :returns: Upstream version + :rtype: str + """ + if version: + version = version.split(':')[-1] + version = version.split('-')[0] + return version + + +def version_compare(a, b): + """Compare the given versions. + + Call out to ``dpkg`` to make sure the code doing the comparison is + compatible with what the ``apt`` library would do. Mimic the return + values. + + Upstream reference: + https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html + ?highlight=version_compare#apt_pkg.version_compare + + :param a: version string + :type a: str + :param b: version string + :type b: str + :returns: >0 if ``a`` is greater than ``b``, 0 if a equals b, + <0 if ``a`` is smaller than ``b`` + :rtype: int + :raises: subprocess.CalledProcessError, RuntimeError + """ + for op in ('gt', 1), ('eq', 0), ('lt', -1): + try: + subprocess.check_call(['dpkg', '--compare-versions', + a, op[0], b], + stderr=subprocess.STDOUT, + universal_newlines=True) + return op[1] + except subprocess.CalledProcessError as cp: + if cp.returncode == 1: + continue + raise + else: + raise RuntimeError('Unable to compare "{}" and "{}", according to ' + 'our logic they are neither greater, equal nor ' + 'less than each other.') diff --git a/ceph-osd/requirements.txt b/ceph-osd/requirements.txt index b8fec1e2..343beed1 100644 --- a/ceph-osd/requirements.txt +++ b/ceph-osd/requirements.txt @@ -1,6 +1,12 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. +# pbr>=1.8.0,<1.9.0 simplejson>=2.2.0 netifaces>=0.10.4 diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index d76fb046..7d9c2587 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -1,24 +1,18 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. +# charm-tools>=2.4.4 -coverage>=3.6 +requests>=2.18.4 mock>=1.2 flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 -requests==2.18.4 -python-ceilometerclient>=1.5.0 -python-cinderclient>=1.4.0 -python-glanceclient>=1.1.0 -python-heatclient>=0.8.0 -python-keystoneclient>=1.7.1 -python-neutronclient>=3.1.0 -python-novaclient>=2.30.1 -python-openstackclient>=1.7.0 -python-swiftclient>=2.6.0 -pika>=0.10.0,<1.0 -distro-info -pytz -pyudev # for ceph-* charm unit tests (not mocked?) +coverage>=4.5.2 +pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' -git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack \ No newline at end of file +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 2aba0183..20dbbfc5 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -1,30 +1,31 @@ -# Classic charm: ./tox.ini +# Classic charm (with zaza): ./tox.ini # This file is managed centrally by release-tools and should not be modified -# within individual charm repos. +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. [tox] -envlist = pep8,py27,py37 +envlist = pep8,py3 skipsdist = True -skip_missing_interpreters = True +# NOTE: Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE: Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 CHARM_DIR={envdir} - AMULET_SETUP_TIMEOUT=5400 install_command = pip install {opts} {packages} -commands = stestr run {posargs} +commands = stestr run --slowest {posargs} whitelist_externals = juju -passenv = HOME TERM AMULET_* CS_API_* +passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt -[testenv:py27] -# ceph charms are Python3-only, but py27 unit test target -# is required by OpenStack Governance. Remove this shim as soon as -# permitted. http://governance.openstack.org/reference/cti/python_cti.html -whitelist_externals = true -commands = true -deps = - [testenv:py35] basepython = python3.5 deps = -r{toxinidir}/requirements.txt @@ -40,6 +41,11 @@ basepython = python3.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python3 deps = -r{toxinidir}/requirements.txt @@ -58,7 +64,7 @@ setenv = PYTHON=coverage run commands = coverage erase - stestr run {posargs} + stestr run --slowest {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml @@ -79,6 +85,11 @@ omit = basepython = python3 commands = {posargs} +[testenv:func-noop] +basepython = python3 +commands = + functest-run-suite --help + [testenv:func] basepython = python3 commands = From 0cd448342e4674ba144d6bb757c3bc38c21d5712 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 30 Sep 2019 17:07:28 -0500 Subject: [PATCH 1829/2699] Sync charm/ceph helpers, tox, and requirements Change-Id: Ibb199504764b139f0207e0bd2e40a199559e1e44 --- ceph-radosgw/charm-helpers-hooks.yaml | 1 + ceph-radosgw/lib/ceph/broker.py | 17 +- ceph-radosgw/lib/ceph/crush_utils.py | 2 +- ceph-radosgw/lib/ceph/utils.py | 381 ++++++++++++++++++++------ ceph-radosgw/requirements.txt | 12 +- ceph-radosgw/test-requirements.txt | 34 +-- ceph-radosgw/tox.ini | 64 ++++- 7 files changed, 379 insertions(+), 132 deletions(-) diff --git a/ceph-radosgw/charm-helpers-hooks.yaml b/ceph-radosgw/charm-helpers-hooks.yaml index 3a12b370..fa9cd645 100644 --- a/ceph-radosgw/charm-helpers-hooks.yaml +++ b/ceph-radosgw/charm-helpers-hooks.yaml @@ -15,3 +15,4 @@ include: - contrib.openstack|inc=* - contrib.charmsupport - contrib.hardening|inc=* + - contrib.openstack.policyd diff --git a/ceph-radosgw/lib/ceph/broker.py b/ceph-radosgw/lib/ceph/broker.py index 3e857d21..3226f4cc 100644 --- a/ceph-radosgw/lib/ceph/broker.py +++ b/ceph-radosgw/lib/ceph/broker.py @@ -85,6 +85,7 @@ "compression_mode": [str, ["none", "passive", "aggressive", "force"]], "compression_algorithm": [str, ["lz4", "snappy", "zlib", "zstd"]], "compression_required_ratio": [float, [0.0, 1.0]], + "crush_rule": [str], } CEPH_BUCKET_TYPES = [ @@ -368,7 +369,8 @@ def handle_erasure_pool(request, service): """ pool_name = request.get('name') erasure_profile = request.get('erasure-profile') - quota = request.get('max-bytes') + max_bytes = request.get('max-bytes') + max_objects = request.get('max-objects') weight = request.get('weight') group_name = request.get('group') @@ -408,8 +410,9 @@ def handle_erasure_pool(request, service): pool.create() # Set a quota if requested - if quota is not None: - set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + if max_bytes or max_objects: + set_pool_quota(service=service, pool_name=pool_name, + max_bytes=max_bytes, max_objects=max_objects) def handle_replicated_pool(request, service): @@ -421,7 +424,8 @@ def handle_replicated_pool(request, service): """ pool_name = request.get('name') replicas = request.get('replicas') - quota = request.get('max-bytes') + max_bytes = request.get('max-bytes') + max_objects = request.get('max-objects') weight = request.get('weight') group_name = request.get('group') @@ -468,8 +472,9 @@ def handle_replicated_pool(request, service): level=DEBUG) # Set a quota if requested - if quota is not None: - set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota) + if max_bytes or max_objects: + set_pool_quota(service=service, pool_name=pool_name, + max_bytes=max_bytes, max_objects=max_objects) def handle_create_cache_tier(request, service): diff --git a/ceph-radosgw/lib/ceph/crush_utils.py b/ceph-radosgw/lib/ceph/crush_utils.py index 8b6876c1..8fe09fa4 100644 --- a/ceph-radosgw/lib/ceph/crush_utils.py +++ b/ceph-radosgw/lib/ceph/crush_utils.py @@ -24,7 +24,7 @@ CRUSH_BUCKET = """root {name} {{ id {id} # do not change unnecessarily # weight 0.000 - alg straw + alg straw2 hash 0 # rjenkins1 }} diff --git a/ceph-radosgw/lib/ceph/utils.py b/ceph-radosgw/lib/ceph/utils.py index 98320acb..ee555e25 100644 --- a/ceph-radosgw/lib/ceph/utils.py +++ b/ceph-radosgw/lib/ceph/utils.py @@ -40,6 +40,7 @@ service_start, service_stop, CompareHostReleases, + write_file, ) from charmhelpers.core.hookenv import ( cached, @@ -82,7 +83,7 @@ PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', 'radosgw', 'xfsprogs', - 'lvm2', 'parted'] + 'lvm2', 'parted', 'smartmontools'] CEPH_KEY_MANAGER = 'ceph' VAULT_KEY_MANAGER = 'vault' @@ -928,11 +929,13 @@ def is_osd_disk(dev): def start_osds(devices): # Scan for ceph block devices rescan_osd_devices() - if cmp_pkgrevno('ceph', "0.56.6") >= 0: - # Use ceph-disk activate for directory based OSD's - for dev_or_path in devices: - if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) + if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and + cmp_pkgrevno('ceph', '14.2.0') < 0): + # Use ceph-disk activate for directory based OSD's + for dev_or_path in devices: + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): + subprocess.check_call( + ['ceph-disk', 'activate', dev_or_path]) def udevadm_settle(): @@ -950,13 +953,12 @@ def rescan_osd_devices(): udevadm_settle() - -_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" -_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" +_client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' def is_bootstrapped(): - return os.path.exists(_bootstrap_keyring) + return os.path.exists( + '/var/lib/ceph/mon/ceph-{}/done'.format(socket.gethostname())) def wait_for_bootstrap(): @@ -964,36 +966,6 @@ def wait_for_bootstrap(): time.sleep(3) -def import_osd_bootstrap_key(key): - if not os.path.exists(_bootstrap_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _bootstrap_keyring, - '--create-keyring', - '--name=client.bootstrap-osd', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - -def import_osd_upgrade_key(key): - if not os.path.exists(_upgrade_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _upgrade_keyring, - '--create-keyring', - '--name=client.osd-upgrade', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - def generate_monitor_secret(): cmd = [ 'ceph-authtool', @@ -1132,6 +1104,15 @@ def get_mds_bootstrap_key(): ]) ]) +rbd_mirror_caps = collections.OrderedDict([ + ('mon', ['profile rbd']), + ('osd', ['profile rbd']), +]) + + +def get_rbd_mirror_key(name): + return get_named_key(name=name, caps=rbd_mirror_caps) + def create_named_keyring(entity, name, caps=None): caps = caps or _default_caps @@ -1250,7 +1231,23 @@ def systemd(): return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' +def use_bluestore(): + """Determine whether bluestore should be used for OSD's + + :returns: whether bluestore disk format should be used + :rtype: bool""" + if cmp_pkgrevno('ceph', '12.2.0') < 0: + return False + return config('bluestore') + + def bootstrap_monitor_cluster(secret): + """Bootstrap local ceph mon into the ceph cluster + + :param secret: cephx secret to use for monitor authentication + :type secret: str + :raises: Exception if ceph mon cannot be bootstrapped + """ hostname = socket.gethostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) done = '{}/done'.format(path) @@ -1271,21 +1268,35 @@ def bootstrap_monitor_cluster(secret): perms=0o755) # end changes for Ceph >= 0.61.3 try: - add_keyring_to_ceph(keyring, - secret, - hostname, - path, - done, - init_marker) - + _create_monitor(keyring, + secret, + hostname, + path, + done, + init_marker) + _create_keyrings() except: raise finally: os.unlink(keyring) -@retry_on_exception(3, base_delay=5) -def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): +def _create_monitor(keyring, secret, hostname, path, done, init_marker): + """Create monitor filesystem and enable and start ceph-mon process + + :param keyring: path to temporary keyring on disk + :type keyring: str + :param secret: cephx secret to use for monitor authentication + :type: secret: str + :param hostname: hostname of the local unit + :type hostname: str + :param path: full path to ceph mon directory + :type path: str + :param done: full path to 'done' marker for ceph mon + :type done: str + :param init_marker: full path to 'init' marker for ceph mon + :type init_marker: str + """ subprocess.check_call(['ceph-authtool', keyring, '--create-keyring', '--name=mon.', '--add-key={}'.format(secret), @@ -1301,39 +1312,72 @@ def add_keyring_to_ceph(keyring, secret, hostname, path, done, init_marker): pass if systemd(): - subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) - service_restart('ceph-mon') + if cmp_pkgrevno('ceph', '14.0.0') >= 0: + systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) + else: + systemd_unit = 'ceph-mon' + subprocess.check_call(['systemctl', 'enable', systemd_unit]) + service_restart(systemd_unit) else: service_restart('ceph-mon-all') - # NOTE(jamespage): Later ceph releases require explicit - # call to ceph-create-keys to setup the - # admin keys for the cluster; this command - # will wait for quorum in the cluster before - # returning. - # NOTE(fnordahl): Explicitly run `ceph-crate-keys` for older - # ceph releases too. This improves bootstrap - # resilience as the charm will wait for - # presence of peer units before attempting - # to bootstrap. Note that charms deploying - # ceph-mon service should disable running of - # `ceph-create-keys` service in init system. - cmd = ['ceph-create-keys', '--id', hostname] - if cmp_pkgrevno('ceph', '12.0.0') >= 0: - # NOTE(fnordahl): The default timeout in ceph-create-keys of 600 - # seconds is not adequate. Increase timeout when - # timeout parameter available. For older releases - # we rely on retry_on_exception decorator. - # LP#1719436 - cmd.extend(['--timeout', '1800']) - subprocess.check_call(cmd) - _client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' - osstat = os.stat(_client_admin_keyring) - if not osstat.st_size: - # NOTE(fnordahl): Retry will fail as long as this file exists. - # LP#1719436 - os.remove(_client_admin_keyring) - raise Exception + +@retry_on_exception(3, base_delay=5) +def _create_keyrings(): + """Create keyrings for operation of ceph-mon units + + :raises: Exception if keyrings cannot be created + """ + if cmp_pkgrevno('ceph', '14.0.0') >= 0: + # NOTE(jamespage): At Nautilus, keys are created by the + # monitors automatically and just need + # exporting. + output = str(subprocess.check_output( + [ + 'sudo', + '-u', ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', 'get', 'client.admin', + ]).decode('UTF-8')).strip() + if not output: + # NOTE: key not yet created, raise exception and retry + raise Exception + write_file(_client_admin_keyring, output, + owner=ceph_user(), group=ceph_user(), + perms=0o400) + else: + # NOTE(jamespage): Later ceph releases require explicit + # call to ceph-create-keys to setup the + # admin keys for the cluster; this command + # will wait for quorum in the cluster before + # returning. + # NOTE(fnordahl): Explicitly run `ceph-create-keys` for older + # ceph releases too. This improves bootstrap + # resilience as the charm will wait for + # presence of peer units before attempting + # to bootstrap. Note that charms deploying + # ceph-mon service should disable running of + # `ceph-create-keys` service in init system. + cmd = ['ceph-create-keys', '--id', socket.gethostname()] + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + # NOTE(fnordahl): The default timeout in ceph-create-keys of 600 + # seconds is not adequate. Increase timeout when + # timeout parameter available. For older releases + # we rely on retry_on_exception decorator. + # LP#1719436 + cmd.extend(['--timeout', '1800']) + subprocess.check_call(cmd) + osstat = os.stat(_client_admin_keyring) + if not osstat.st_size: + # NOTE(fnordahl): Retry will fail as long as this file exists. + # LP#1719436 + os.remove(_client_admin_keyring) + raise Exception def update_monfs(): @@ -1418,6 +1462,10 @@ def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, ignore_errors, encrypt, bluestore, key_manager) else: + if cmp_pkgrevno('ceph', '14.0.0') >= 0: + log("Directory backed OSDs can not be created on Nautilus", + level=WARNING) + return osdize_dir(dev, encrypt, bluestore) @@ -1546,7 +1594,7 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): cmd.append(osd_format) # NOTE(jamespage): enable experimental bluestore support - if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: + if use_bluestore(): cmd.append('--bluestore') wal = get_devices('bluestore-wal') if wal: @@ -1683,7 +1731,10 @@ def is_active_bluestore_device(dev): return False vg_name = lvm.list_lvm_volume_group(dev) - lv_name = lvm.list_logical_volumes('vg_name={}'.format(vg_name))[0] + try: + lv_name = lvm.list_logical_volumes('vg_name={}'.format(vg_name))[0] + except IndexError: + return False block_symlinks = glob.glob('/var/lib/ceph/osd/ceph-*/block') for block_candidate in block_symlinks: @@ -1900,9 +1951,10 @@ def osdize_dir(path, encrypt=False, bluestore=False): ' skipping'.format(path)) return - if os.path.exists(os.path.join(path, 'upstart')): - log('Path {} is already configured as an OSD - bailing'.format(path)) - return + for t in ['upstart', 'systemd']: + if os.path.exists(os.path.join(path, t)): + log('Path {} is already used as an OSD dir - bailing'.format(path)) + return if cmp_pkgrevno('ceph', "0.56.6") < 0: log('Unable to use directories for OSDs with ceph < 0.56.6', @@ -2511,18 +2563,21 @@ def update_owner(path, recurse_dirs=True): secs=elapsed_time.total_seconds(), path=path), DEBUG) -def list_pools(service): +def list_pools(client='admin'): """This will list the current pools that Ceph has - :param service: String service id to run under - :returns: list. Returns a list of the ceph pools. - :raises: CalledProcessError if the subprocess fails to run. + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Returns a list of available pools. + :rtype: list + :raises: subprocess.CalledProcessError if the subprocess fails to run. """ try: pool_list = [] - pools = str(subprocess - .check_output(['rados', '--id', service, 'lspools']) - .decode('UTF-8')) + pools = subprocess.check_output(['rados', '--id', client, 'lspools'], + universal_newlines=True, + stderr=subprocess.STDOUT) for pool in pools.splitlines(): pool_list.append(pool) return pool_list @@ -2531,6 +2586,140 @@ def list_pools(service): raise +def get_pool_param(pool, param, client='admin'): + """Get parameter from pool. + + :param pool: Name of pool to get variable from + :type pool: str + :param param: Name of variable to get + :type param: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Value of variable on pool or None + :rtype: str or None + :raises: subprocess.CalledProcessError + """ + try: + output = subprocess.check_output( + ['ceph', '--id', client, 'osd', 'pool', 'get', pool, param], + universal_newlines=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as cp: + if cp.returncode == 2 and 'ENOENT: option' in cp.output: + return None + raise + if ':' in output: + return output.split(':')[1].lstrip().rstrip() + + +def get_pool_erasure_profile(pool, client='admin'): + """Get erasure code profile for pool. + + :param pool: Name of pool to get variable from + :type pool: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Erasure code profile of pool or None + :rtype: str or None + :raises: subprocess.CalledProcessError + """ + try: + return get_pool_param(pool, 'erasure_code_profile', client=client) + except subprocess.CalledProcessError as cp: + if cp.returncode == 13 and 'EACCES: pool' in cp.output: + # Not a Erasure coded pool + return None + raise + + +def get_pool_quota(pool, client='admin'): + """Get pool quota. + + :param pool: Name of pool to get variable from + :type pool: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Dictionary with quota variables + :rtype: dict + :raises: subprocess.CalledProcessError + """ + output = subprocess.check_output( + ['ceph', '--id', client, 'osd', 'pool', 'get-quota', pool], + universal_newlines=True, stderr=subprocess.STDOUT) + rc = re.compile(r'\s+max\s+(\S+)\s*:\s+(\d+)') + result = {} + for line in output.splitlines(): + m = rc.match(line) + if m: + result.update({'max_{}'.format(m.group(1)): m.group(2)}) + return result + + +def get_pool_applications(pool='', client='admin'): + """Get pool applications. + + :param pool: (Optional) Name of pool to get applications for + Defaults to get for all pools + :type pool: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Dictionary with pool name as key + :rtype: dict + :raises: subprocess.CalledProcessError + """ + + cmd = ['ceph', '--id', client, 'osd', 'pool', 'application', 'get'] + if pool: + cmd.append(pool) + try: + output = subprocess.check_output(cmd, + universal_newlines=True, + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as cp: + if cp.returncode == 2 and 'ENOENT' in cp.output: + return {} + raise + return json.loads(output) + + +def list_pools_detail(): + """Get detailed information about pools. + + Structure: + {'pool_name_1': {'applications': {'application': {}}, + 'parameters': {'pg_num': '42', 'size': '42'}, + 'quota': {'max_bytes': '1000', + 'max_objects': '10'}, + }, + 'pool_name_2': ... + } + + :returns: Dictionary with detailed pool information. + :rtype: dict + :raises: subproces.CalledProcessError + """ + get_params = ['pg_num', 'size'] + result = {} + applications = get_pool_applications() + for pool in list_pools(): + result[pool] = { + 'applications': applications.get(pool, {}), + 'parameters': {}, + 'quota': get_pool_quota(pool), + } + for param in get_params: + result[pool]['parameters'].update({ + param: get_pool_param(pool, param)}) + erasure_profile = get_pool_erasure_profile(pool) + if erasure_profile: + result[pool]['parameters'].update({ + 'erasure_code_profile': erasure_profile}) + return result + + def dirs_need_ownership_update(service): """Determines if directories still need change of ownership. @@ -2554,6 +2743,14 @@ def dirs_need_ownership_update(service): if (curr_owner == expected_owner) and (curr_group == expected_group): continue + # NOTE(lathiat): when config_changed runs on reboot, the OSD might not + # yet be mounted or started, and the underlying directory the OSD is + # mounted to is expected to be owned by root. So skip the check. This + # may also happen for OSD directories for OSDs that were removed. + if (service == 'osd' and + not os.path.exists(os.path.join(child, 'magic'))): + continue + log('Directory "%s" needs its ownership updated' % child, DEBUG) return True @@ -2566,6 +2763,7 @@ def dirs_need_ownership_update(service): ('hammer', 'jewel'), ('jewel', 'luminous'), ('luminous', 'mimic'), + ('mimic', 'nautilus'), ]) # Map UCA codenames to ceph codenames @@ -2581,6 +2779,7 @@ def dirs_need_ownership_update(service): 'queens': 'luminous', 'rocky': 'mimic', 'stein': 'mimic', + 'train': 'nautilus', } diff --git a/ceph-radosgw/requirements.txt b/ceph-radosgw/requirements.txt index b8fec1e2..343beed1 100644 --- a/ceph-radosgw/requirements.txt +++ b/ceph-radosgw/requirements.txt @@ -1,6 +1,12 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. +# pbr>=1.8.0,<1.9.0 simplejson>=2.2.0 netifaces>=0.10.4 diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 6d547d81..7d9c2587 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -1,24 +1,18 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. +# charm-tools>=2.4.4 -coverage>=3.6 +requests>=2.18.4 mock>=1.2 flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 -requests==2.18.4 -python-ceilometerclient>=1.5.0 -python-cinderclient>=1.4.0 -python-glanceclient>=1.1.0 -python-heatclient>=0.8.0 -python-keystoneclient>=1.7.1 -python-neutronclient>=3.1.0 -python-novaclient>=2.30.1 -python-openstackclient>=1.7.0 -python-swiftclient>=2.6.0 -pika>=0.10.0,<1.0 -distro-info -pytz -pyudev # for ceph-* charm unit tests (not mocked?) -git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>'3.4' -git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack;python_version>'3.4' \ No newline at end of file +coverage>=4.5.2 +pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) +git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 3d8cf47d..20dbbfc5 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -1,27 +1,31 @@ -# Classic charm: ./tox.ini +# Classic charm (with zaza): ./tox.ini # This file is managed centrally by release-tools and should not be modified -# within individual charm repos. +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. [tox] -envlist = pep8,py37 +envlist = pep8,py3 skipsdist = True +# NOTE: Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE: Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} - AMULET_SETUP_TIMEOUT=5400 install_command = pip install {opts} {packages} commands = stestr run --slowest {posargs} whitelist_externals = juju -passenv = HOME TERM AMULET_* CS_API_* +passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt -[testenv:py27] -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - [testenv:py35] basepython = python3.5 deps = -r{toxinidir}/requirements.txt @@ -37,17 +41,55 @@ basepython = python3.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python3 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} hooks unit_tests tests actions lib +commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof +[testenv:cover] +# Technique based heavily upon +# https://github.com/openstack/nova/blob/master/tox.ini +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run --slowest {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + */charmhelpers/* + unit_tests/* + [testenv:venv] basepython = python3 commands = {posargs} +[testenv:func-noop] +basepython = python3 +commands = + functest-run-suite --help + [testenv:func] basepython = python3 commands = From 01dd362ebee287457818e32832d947de733f2874 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 19 Sep 2019 11:34:52 +0200 Subject: [PATCH 1830/2699] Enable Ceph Radosgw tenant namespacing This change enabled automatic tenant namespacing, which also allows enabling global read permissions on buckets. Change-Id: Ic37c7161b7dddad49e3c2ab075d7e8b72f436b35 Closes-Bug: #1833072 --- ceph-radosgw/README.md | 21 ++++++ ceph-radosgw/config.yaml | 15 ++++ ceph-radosgw/hooks/ceph_radosgw_context.py | 2 + ceph-radosgw/hooks/hooks.py | 22 ++++-- ceph-radosgw/hooks/upgrade-charm | 2 + ceph-radosgw/hooks/upgrade-charm.real | 1 + ceph-radosgw/templates/ceph.conf | 4 ++ .../bundles/bionic-queens-namespaced.yaml | 44 ++++++++++++ .../bundles/bionic-rocky-namespaced.yaml | 44 ++++++++++++ .../bundles/bionic-stein-namespaced.yaml | 44 ++++++++++++ .../bundles/xenial-mitaka-namespaced.yaml | 44 ++++++++++++ ceph-radosgw/tests/tests.yaml | 4 ++ .../unit_tests/test_ceph_radosgw_context.py | 72 +++++++++++++++++++ ceph-radosgw/unit_tests/test_hooks.py | 66 +++++++++++++++-- 14 files changed, 377 insertions(+), 8 deletions(-) create mode 120000 ceph-radosgw/hooks/upgrade-charm.real create mode 100644 ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml create mode 100644 ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml create mode 100644 ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml create mode 100644 ceph-radosgw/tests/bundles/xenial-mitaka-namespaced.yaml diff --git a/ceph-radosgw/README.md b/ceph-radosgw/README.md index 42b6d975..9d95be57 100644 --- a/ceph-radosgw/README.md +++ b/ceph-radosgw/README.md @@ -228,3 +228,24 @@ a zone that is currently read-only can be switched to read/write mode by either promoting it to be the current master or by using the 'readwrite' action: juju run-action -m us-east --wait rgw-us-east/0 readwrite + +Tenant Namespacing +------------------ + +By default, Ceph Rados Gateway puts all tenant buckets into the same global +namespace, disallowing multiple tenants to have buckets with the same name. +Tenant namespacing can be enabled in this charm by deploying with configuration +like: + + ceph-radosgw: + charm: cs:ceph-radosgw + num_units: 1 + options: + namespace-tenants: True + +Enabling tenant namespacing will place all tenant buckets into their own +namespace under their tenant id, as well as adding the tenant's ID parameter to +the Keystone endpoint registration to allow seamless integration with OpenStack. +Tenant namespacing cannot be toggled on in an existing installation as it will +remove tenant access to existing buckets. Toggling this option on an already +deployed Rados Gateway will have no effect. \ No newline at end of file diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 322e07f5..a2791008 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -317,3 +317,18 @@ options: description: | Name of RADOS Gateway Zone to create for multi-site replication. This option must be specific to the local site e.g. us-west or us-east. + namespace-tenants: + type: boolean + default: False + description: | + Enable tenant namespacing. If tenant namespacing is enabled, keystone + tenants will be implicitly added to a matching tenant in radosgw, in + addition to updating the catalog URL to allow radosgw to support + publicly-readable containers and temporary URLS. This namespacing + also allows multiple tenants to create buckets with the same names, + as the bucket names are namespaced into the tenant namespaces in the + RADOS gateway. + + This configuration option will not be enabled on a charm upgrade, and + cannot be toggled on in an existing installation as it will remove + tenant access to existing buckets. diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index f319c707..9cf29a1c 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -33,6 +33,7 @@ relation_get, relation_ids, unit_public_ip, + leader_get, ) from charmhelpers.contrib.network.ip import ( format_ipv6_addr, @@ -104,6 +105,7 @@ def __call__(self): if config('admin-roles'): ctxt['user_roles'] += (',' + config('admin-roles')) ctxt['cache_size'] = config('cache-size') + ctxt['namespace_tenants'] = leader_get('namespace_tenants') if self.context_complete(ctxt): return ctxt return {} diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 8debca8f..8a8a2760 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -170,6 +170,14 @@ def install(): install_packages() if not os.path.exists('/etc/ceph'): os.makedirs('/etc/ceph') + if is_leader(): + leader_set(namespace_tenants=config('namespace-tenants')) + + +@hooks.hook('upgrade-charm.real') +def upgrade_charm(): + if is_leader() and not leader_get('namespace_tenants'): + leader_set(namespace_tenants=False) @hooks.hook('config-changed') @@ -294,10 +302,16 @@ def identity_joined(relid=None): port = config('port') admin_url = '%s:%i/swift' % (canonical_url(CONFIGS, ADMIN), port) - internal_url = '%s:%s/swift/v1' % \ - (canonical_url(CONFIGS, INTERNAL), port) - public_url = '%s:%s/swift/v1' % \ - (canonical_url(CONFIGS, PUBLIC), port) + if leader_get('namespace_tenants'): + internal_url = '%s:%s/swift/v1/AUTH_$(project_id)s' % \ + (canonical_url(CONFIGS, INTERNAL), port) + public_url = '%s:%s/swift/v1/AUTH_$(project_id)s' % \ + (canonical_url(CONFIGS, PUBLIC), port) + else: + internal_url = '%s:%s/swift/v1' % \ + (canonical_url(CONFIGS, INTERNAL), port) + public_url = '%s:%s/swift/v1' % \ + (canonical_url(CONFIGS, PUBLIC), port) roles = [x for x in [config('operator-roles'), config('admin-roles')] if x] requested_roles = '' if roles: diff --git a/ceph-radosgw/hooks/upgrade-charm b/ceph-radosgw/hooks/upgrade-charm index 71a85b0a..4ae2e75f 100755 --- a/ceph-radosgw/hooks/upgrade-charm +++ b/ceph-radosgw/hooks/upgrade-charm @@ -7,3 +7,5 @@ find . -name '__pycache__' -prune -exec rm -rf "{}" \; # Re-install dependencies to deal with py2->py3 switch for charm ./hooks/install_deps + +./hooks/upgrade-charm.real diff --git a/ceph-radosgw/hooks/upgrade-charm.real b/ceph-radosgw/hooks/upgrade-charm.real new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/upgrade-charm.real @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index c1ea349d..3d5bc34c 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -55,6 +55,10 @@ rgw keystone accepted admin roles = {{ admin_roles }} rgw keystone token cache size = {{ cache_size }} rgw s3 auth use keystone = true rgw s3 auth order = local, external +{% if namespace_tenants %} +rgw swift account in url = true +rgw keystone implicit tenants = true +{% endif %} {% else -%} rgw swift url = http://{{ unit_public_ip }} {% endif -%} diff --git a/ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml new file mode 100644 index 00000000..9f005463 --- /dev/null +++ b/ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml @@ -0,0 +1,44 @@ +options: + source: &source distro +series: bionic +applications: + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + series: bionic + options: + source: *source + namespace-tenants: True + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml new file mode 100644 index 00000000..d57a78ad --- /dev/null +++ b/ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml @@ -0,0 +1,44 @@ +options: + source: &source cloud:bionic-rocky +series: bionic +applications: + ceph-radosgw: + charm: ceph-radosgw + series: bionic + num_units: 1 + options: + source: *source + namespace-tenants: True + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml new file mode 100644 index 00000000..e9bfd072 --- /dev/null +++ b/ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml @@ -0,0 +1,44 @@ +options: + source: &source cloud:bionic-stein +series: bionic +applications: + ceph-radosgw: + charm: ceph-radosgw + series: bionic + num_units: 1 + options: + source: *source + namespace-tenants: True + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/xenial-mitaka-namespaced.yaml b/ceph-radosgw/tests/bundles/xenial-mitaka-namespaced.yaml new file mode 100644 index 00000000..9fecfdd4 --- /dev/null +++ b/ceph-radosgw/tests/bundles/xenial-mitaka-namespaced.yaml @@ -0,0 +1,44 @@ +options: + source: &source distro +series: xenial +applications: + ceph-radosgw: + charm: ceph-radosgw + series: xenial + num_units: 1 + options: + source: *source + namespace-tenants: True + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index db9d6630..cc0f6499 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,12 +1,16 @@ charm_name: ceph-radosgw gate_bundles: - bionic-stein + - bionic-stein-namespaced - bionic-rocky + - bionic-rocky-namespaced - bionic-queens + - bionic-queens-namespaced - xenial-queens - xenial-pike - xenial-ocata - xenial-mitaka + - xenial-mitaka-namespaced - trusty-mitaka smoke_bundles: - bionic-stein diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index adb56f79..d0f1c24c 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -31,6 +31,7 @@ 'unit_public_ip', 'determine_api_port', 'cmp_pkgrevno', + 'leader_get', ] @@ -74,6 +75,7 @@ def setUp(self): self.config.side_effect = self.test_config.get self.maxDiff = None self.cmp_pkgrevno.return_value = 1 + self.leader_get.return_value = False @patch.object(charmhelpers.contrib.openstack.context, 'filter_installed_packages', return_value=['absent-pkg']) @@ -124,6 +126,74 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, 'auth_port': 5432, 'auth_protocol': 'http', 'auth_type': 'keystone', + 'namespace_tenants': False, + 'cache_size': '42', + 'service_host': '127.0.0.4', + 'service_port': 9876, + 'service_protocol': 'http', + } + if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[1] >= 0: + expect['user_roles'] = 'Babel' + expect['admin_roles'] = 'Dart' + else: + expect['user_roles'] = 'Babel,Dart' + if jewel_installed: + expect['auth_keystone_v3_supported'] = True + self.assertEqual(expect, ids_ctxt()) + + @patch.object(charmhelpers.contrib.openstack.context, + 'filter_installed_packages', return_value=['absent-pkg']) + @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') + @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') + @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') + @patch.object(charmhelpers.contrib.openstack.context, 'related_units') + @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') + @patch.object(charmhelpers.contrib.openstack.context, 'log') + def test_ids_ctxt_with_namespace(self, _log, _rids, _runits, _rget, + _ctxt_comp, _format_ipv6_addr, + _filter_installed_packages, + jewel_installed=False, + cmp_pkgrevno_side_effects=None): + self.cmp_pkgrevno.side_effect = (cmp_pkgrevno_side_effects + if cmp_pkgrevno_side_effects + else [-1, -1]) + self.test_config.set('operator-roles', 'Babel') + self.test_config.set('admin-roles', 'Dart') + self.test_config.set('cache-size', '42') + self.test_relation.set({'admin_token': 'ubuntutesting'}) + self.relation_ids.return_value = ['identity-service:5'] + self.related_units.return_value = ['keystone/0'] + _format_ipv6_addr.return_value = False + _rids.return_value = 'rid1' + _runits.return_value = 'runit' + _ctxt_comp.return_value = True + self.leader_get.return_value = True + id_data = { + 'service_port': 9876, + 'service_host': '127.0.0.4', + 'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', + 'service_domain_id': '8e50f28a556911e8aaeed33789425d23', + 'auth_host': '127.0.0.5', + 'auth_port': 5432, + 'service_tenant': 'ten', + 'service_username': 'admin', + 'service_password': 'adminpass', + } + _rget.return_value = id_data + ids_ctxt = context.IdentityServiceContext() + expect = { + 'admin_domain_id': '8e50f28a556911e8aaeed33789425d23', + 'admin_password': 'adminpass', + 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', + 'admin_tenant_name': 'ten', + 'admin_token': 'ubuntutesting', + 'admin_user': 'admin', + 'api_version': '2.0', + 'auth_host': '127.0.0.5', + 'auth_port': 5432, + 'auth_protocol': 'http', + 'auth_type': 'keystone', + 'namespace_tenants': True, 'cache_size': '42', 'service_host': '127.0.0.4', 'service_port': 9876, @@ -185,6 +255,7 @@ def test_ids_ctxt_missing_admin_domain_id( 'auth_port': 5432, 'auth_protocol': 'http', 'auth_type': 'keystone', + 'namespace_tenants': False, 'cache_size': '42', 'service_host': '127.0.0.4', 'service_port': 9876, @@ -247,6 +318,7 @@ def test_ids_ctxt_v3( 'auth_port': 5432, 'auth_protocol': 'http', 'auth_type': 'keystone', + 'namespace_tenants': False, 'cache_size': '42', 'service_domain_id': '8e50f28a556911e8aaeed33789425d23', 'service_host': '127.0.0.4', diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 45207e7c..66576aaf 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -145,11 +145,28 @@ def test_install_packages_upgrades(self, upgrade_available): ceph_hooks.APACHE_PACKAGES ) - def test_install(self): + @patch.object(ceph_hooks, 'leader_set') + @patch.object(ceph_hooks, 'is_leader') + def test_install(self, is_leader, leader_set): _install_packages = self.patch('install_packages') + is_leader.return_value = True ceph_hooks.install() self.assertTrue(self.execd_preinstall.called) self.assertTrue(_install_packages.called) + is_leader.assert_called_once() + leader_set.assert_called_once_with(namespace_tenants=False) + + @patch.object(ceph_hooks, 'leader_set') + @patch.object(ceph_hooks, 'is_leader') + def test_install_without_namespacing(self, is_leader, leader_set): + _install_packages = self.patch('install_packages') + is_leader.return_value = True + self.test_config.set('namespace-tenants', True) + ceph_hooks.install() + self.assertTrue(self.execd_preinstall.called) + self.assertTrue(_install_packages.called) + is_leader.assert_called_once() + leader_set.assert_called_once_with(namespace_tenants=True) @patch.object(ceph_hooks, 'certs_joined') @patch.object(ceph_hooks, 'update_nrpe_config') @@ -231,19 +248,22 @@ def test_gateway_relation(self): ceph_hooks.gateway_relation() self.relation_set.assert_called_with(hostname='10.0.0.1', port=80) + @patch.object(ceph_hooks, 'leader_get') @patch('charmhelpers.contrib.openstack.ip.service_name', lambda *args: 'ceph-radosgw') @patch('charmhelpers.contrib.openstack.ip.config') - def test_identity_joined_early_version(self, _config): + def test_identity_joined_early_version(self, _config, _leader_get): self.cmp_pkgrevno.return_value = -1 + _leader_get.return_value = False ceph_hooks.identity_joined() self.sys.exit.assert_called_with(1) + @patch.object(ceph_hooks, 'leader_get') @patch('charmhelpers.contrib.openstack.ip.service_name', lambda *args: 'ceph-radosgw') @patch('charmhelpers.contrib.openstack.ip.resolve_address') @patch('charmhelpers.contrib.openstack.ip.config') - def test_identity_joined(self, _config, _resolve_address): + def test_identity_joined(self, _config, _resolve_address, _leader_get): def _test_identify_joined(expected): self.related_units = ['unit/0'] @@ -251,6 +271,7 @@ def _test_identify_joined(expected): _resolve_address.return_value = 'myserv' _config.side_effect = self.test_config.get self.test_config.set('region', 'region1') + _leader_get.return_value = False ceph_hooks.identity_joined(relid='rid') self.relation_set.assert_called_with( service='swift', @@ -270,18 +291,55 @@ def _test_identify_joined(expected): self.test_config.set('admin-roles', input.get('admin', '')) _test_identify_joined(input['expected']) + @patch.object(ceph_hooks, 'leader_get') + @patch('charmhelpers.contrib.openstack.ip.service_name', + lambda *args: 'ceph-radosgw') + @patch('charmhelpers.contrib.openstack.ip.resolve_address') + @patch('charmhelpers.contrib.openstack.ip.config') + def test_identity_joined_namespaced(self, _config, + _resolve_address, _leader_get): + _leader_get.return_value = True + + def _test_identify_joined(expected): + self.related_units = ['unit/0'] + self.cmp_pkgrevno.return_value = 1 + _resolve_address.return_value = 'myserv' + _config.side_effect = self.test_config.get + self.test_config.set('region', 'region1') + _leader_get.return_value = True + ceph_hooks.identity_joined(relid='rid') + self.relation_set.assert_called_with( + service='swift', + region='region1', + public_url='http://myserv:80/swift/v1/AUTH_$(project_id)s', + internal_url='http://myserv:80/swift/v1/AUTH_$(project_id)s', + requested_roles=expected, + relation_id='rid', + admin_url='http://myserv:80/swift') + + inputs = [{'operator': 'foo', 'admin': 'bar', 'expected': 'foo,bar'}, + {'operator': 'foo', 'expected': 'foo'}, + {'admin': 'bar', 'expected': 'bar'}, + {'expected': ''}] + for input in inputs: + self.test_config.set('operator-roles', input.get('operator', '')) + self.test_config.set('admin-roles', input.get('admin', '')) + _test_identify_joined(input['expected']) + + @patch.object(ceph_hooks, 'leader_get') @patch('charmhelpers.contrib.openstack.ip.service_name', lambda *args: 'ceph-radosgw') @patch('charmhelpers.contrib.openstack.ip.is_clustered') @patch('charmhelpers.contrib.openstack.ip.unit_get') @patch('charmhelpers.contrib.openstack.ip.config') def test_identity_joined_public_name(self, _config, _unit_get, - _is_clustered): + _is_clustered, _leader_get): self.related_units = ['unit/0'] _config.side_effect = self.test_config.get self.test_config.set('os-public-hostname', 'files.example.com') _unit_get.return_value = 'myserv' _is_clustered.return_value = False + _leader_get.return_value = False ceph_hooks.identity_joined(relid='rid') self.relation_set.assert_called_with( service='swift', From 60938504e25c4207c4f0bc2612d4528244f0a5a1 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 11 Oct 2019 15:45:28 +0200 Subject: [PATCH 1831/2699] Parse the leader_get output Because laeder_get only deals with stringy types, it is unsound to evaluate them as booleans to toggle something on or off. Change-Id: I18c3763dce53d1d652185f9fba73523a5c5a65a6 Closes-Bug: #1847769 --- ceph-radosgw/hooks/ceph_radosgw_context.py | 2 +- ceph-radosgw/hooks/hooks.py | 4 ++-- ceph-radosgw/unit_tests/test_ceph_radosgw_context.py | 4 ++-- ceph-radosgw/unit_tests/test_hooks.py | 8 ++++---- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 9cf29a1c..958d7eeb 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -105,7 +105,7 @@ def __call__(self): if config('admin-roles'): ctxt['user_roles'] += (',' + config('admin-roles')) ctxt['cache_size'] = config('cache-size') - ctxt['namespace_tenants'] = leader_get('namespace_tenants') + ctxt['namespace_tenants'] = leader_get('namespace_tenants') == 'True' if self.context_complete(ctxt): return ctxt return {} diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 8a8a2760..3d81941c 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -176,7 +176,7 @@ def install(): @hooks.hook('upgrade-charm.real') def upgrade_charm(): - if is_leader() and not leader_get('namespace_tenants'): + if is_leader() and not leader_get('namespace_tenants') == 'True': leader_set(namespace_tenants=False) @@ -302,7 +302,7 @@ def identity_joined(relid=None): port = config('port') admin_url = '%s:%i/swift' % (canonical_url(CONFIGS, ADMIN), port) - if leader_get('namespace_tenants'): + if leader_get('namespace_tenants') == 'True': internal_url = '%s:%s/swift/v1/AUTH_$(project_id)s' % \ (canonical_url(CONFIGS, INTERNAL), port) public_url = '%s:%s/swift/v1/AUTH_$(project_id)s' % \ diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index d0f1c24c..8c0d79a3 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -75,7 +75,7 @@ def setUp(self): self.config.side_effect = self.test_config.get self.maxDiff = None self.cmp_pkgrevno.return_value = 1 - self.leader_get.return_value = False + self.leader_get.return_value = 'False' @patch.object(charmhelpers.contrib.openstack.context, 'filter_installed_packages', return_value=['absent-pkg']) @@ -167,7 +167,7 @@ def test_ids_ctxt_with_namespace(self, _log, _rids, _runits, _rget, _rids.return_value = 'rid1' _runits.return_value = 'runit' _ctxt_comp.return_value = True - self.leader_get.return_value = True + self.leader_get.return_value = 'True' id_data = { 'service_port': 9876, 'service_host': '127.0.0.4', diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 66576aaf..eda79f64 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -254,7 +254,7 @@ def test_gateway_relation(self): @patch('charmhelpers.contrib.openstack.ip.config') def test_identity_joined_early_version(self, _config, _leader_get): self.cmp_pkgrevno.return_value = -1 - _leader_get.return_value = False + _leader_get.return_value = 'False' ceph_hooks.identity_joined() self.sys.exit.assert_called_with(1) @@ -271,7 +271,7 @@ def _test_identify_joined(expected): _resolve_address.return_value = 'myserv' _config.side_effect = self.test_config.get self.test_config.set('region', 'region1') - _leader_get.return_value = False + _leader_get.return_value = 'False' ceph_hooks.identity_joined(relid='rid') self.relation_set.assert_called_with( service='swift', @@ -306,7 +306,7 @@ def _test_identify_joined(expected): _resolve_address.return_value = 'myserv' _config.side_effect = self.test_config.get self.test_config.set('region', 'region1') - _leader_get.return_value = True + _leader_get.return_value = 'True' ceph_hooks.identity_joined(relid='rid') self.relation_set.assert_called_with( service='swift', @@ -339,7 +339,7 @@ def test_identity_joined_public_name(self, _config, _unit_get, self.test_config.set('os-public-hostname', 'files.example.com') _unit_get.return_value = 'myserv' _is_clustered.return_value = False - _leader_get.return_value = False + _leader_get.return_value = 'False' ceph_hooks.identity_joined(relid='rid') self.relation_set.assert_called_with( service='swift', From 97381e62db7a8a8b33cc0aff0ca391ed60204f52 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 30 Sep 2019 17:07:23 -0500 Subject: [PATCH 1832/2699] Sync charm/ceph helpers, tox, and requirements Also clean up pre-existing pep8 violations in the files/* dir which was previously not covered by lint testing. Also clean-up mocking issues around apt-pkg replacements in the unit tests. Also fix py35 issue with enabled_manager_modules() function in the charmhelpers library (see https://github.com/juju/charm-helpers/pull/387). Also fix the functional tests bundles so they actually test the version of OpenStack that the bundle indicates. Change-Id: I2c8d84fadc11311c622dd308c4694496872dc157 --- ceph-proxy/actions.yaml | 2 +- ceph-proxy/charm-helpers-hooks.yaml | 1 + .../contrib/hardening/audits/apt.py | 2 +- .../charmhelpers/contrib/openstack/policyd.py | 700 ++++++++++++++++++ .../charmhelpers/contrib/openstack/utils.py | 16 +- .../contrib/storage/linux/ceph.py | 53 ++ ceph-proxy/charmhelpers/core/host.py | 27 + .../charmhelpers/core/host_factory/ubuntu.py | 3 +- ceph-proxy/charmhelpers/fetch/__init__.py | 2 + ceph-proxy/charmhelpers/fetch/ubuntu.py | 143 ++-- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 237 ++++++ ceph-proxy/files/nagios/check_ceph_status.py | 25 +- ceph-proxy/requirements.txt | 12 +- ceph-proxy/test-requirements.txt | 22 +- ceph-proxy/tests/bundles/bionic-rocky.yaml | 5 + ceph-proxy/tests/bundles/bionic-stein.yaml | 5 + ceph-proxy/tests/bundles/trusty-mitaka.yaml | 5 + ceph-proxy/tests/bundles/xenial-ocata.yaml | 5 + ceph-proxy/tests/bundles/xenial-pike.yaml | 5 + ceph-proxy/tests/bundles/xenial-queens.yaml | 7 +- ceph-proxy/tox.ini | 80 +- ceph-proxy/unit_tests/test_ceph.py | 5 +- ceph-proxy/unit_tests/test_ceph_hooks.py | 11 +- 23 files changed, 1280 insertions(+), 93 deletions(-) create mode 100644 ceph-proxy/charmhelpers/contrib/openstack/policyd.py create mode 100644 ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py diff --git a/ceph-proxy/actions.yaml b/ceph-proxy/actions.yaml index 7303b69d..18e6a498 100644 --- a/ceph-proxy/actions.yaml +++ b/ceph-proxy/actions.yaml @@ -43,7 +43,7 @@ remove-cache-tier: create-pool: description: Creates a pool params: - name: + name: type: string description: The name of the pool profile-name: diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index 8f484eb1..af0da178 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -19,3 +19,4 @@ include: - contrib.charmsupport - contrib.hardening|inc=* - contrib.python + - contrib.openstack.policyd diff --git a/ceph-proxy/charmhelpers/contrib/hardening/audits/apt.py b/ceph-proxy/charmhelpers/contrib/hardening/audits/apt.py index 3dc14e3c..67521e17 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-proxy/charmhelpers/contrib/hardening/audits/apt.py @@ -13,7 +13,6 @@ # limitations under the License. from __future__ import absolute_import # required for external apt import -from apt import apt_pkg from six import string_types from charmhelpers.fetch import ( @@ -26,6 +25,7 @@ WARNING, ) from charmhelpers.contrib.hardening.audits import BaseAudit +from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg class AptConfig(BaseAudit): diff --git a/ceph-proxy/charmhelpers/contrib/openstack/policyd.py b/ceph-proxy/charmhelpers/contrib/openstack/policyd.py new file mode 100644 index 00000000..1adf2472 --- /dev/null +++ b/ceph-proxy/charmhelpers/contrib/openstack/policyd.py @@ -0,0 +1,700 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import contextlib +import os +import six +import shutil +import yaml +import zipfile + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as ch_host + +# Features provided by this module: + +""" +Policy.d helper functions +========================= + +The functions in this module are designed, as a set, to provide an easy-to-use +set of hooks for classic charms to add in /etc//policy.d/ +directory override YAML files. + +(For charms.openstack charms, a mixin class is provided for this +functionality). + +In order to "hook" this functionality into a (classic) charm, two functions are +provided: + + maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=none, + blacklist_keys=none, + template_function=none, + restart_handler=none) + + maybe_do_policyd_overrides_on_config_changed(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None + +(See the docstrings for details on the parameters) + +The functions should be called from the install and upgrade hooks in the charm. +The `maybe_do_policyd_overrides_on_config_changed` function is designed to be +called on the config-changed hook, in that it does an additional check to +ensure that an already overriden policy.d in an upgrade or install hooks isn't +repeated. + +In order the *enable* this functionality, the charm's install, config_changed, +and upgrade_charm hooks need to be modified, and a new config option (see +below) needs to be added. The README for the charm should also be updated. + +Examples from the keystone charm are: + +@hooks.hook('install.real') +@harden() +def install(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + + +@hooks.hook('config-changed') +@restart_on_change(restart_map(), restart_functions=restart_function_map()) +@harden() +def config_changed(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides_on_config_changed(os_release('keystone'), + 'keystone') + +@hooks.hook('upgrade-charm') +@restart_on_change(restart_map(), stopstart=True) +@harden() +def upgrade_charm(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + +Status Line +=========== + +The workload status code in charm-helpers has been modified to detect if +policy.d override code has been incorporated into the charm by checking for the +new config variable (in the config.yaml). If it has been, then the workload +status line will automatically show "PO:" at the beginning of the workload +status for that unit/service if the config option is set. If the policy +override is broken, the "PO (broken):" will be shown. No changes to the charm +(apart from those already mentioned) are needed to enable this functionality. +(charms.openstack charms also get this functionality, but please see that +library for further details). +""" + +# The config.yaml for the charm should contain the following for the config +# option: + +""" + use-policyd-override: + type: boolean + default: False + description: | + If True then use the resource file named 'policyd-override' to install + override yaml files in the service's policy.d directory. The resource + file should be a zip file containing at least one yaml file with a .yaml + or .yml extension. If False then remove the overrides. +""" + +# The metadata.yaml for the charm should contain the following: +""" +resources: + policyd-override: + type: file + filename: policyd-override.zip + description: The policy.d overrides file +""" + +# The README for the charm should contain the following: +""" +Policy Overrides +---------------- + +This service allows for policy overrides using the `policy.d` directory. This +is an **advanced** feature and the policies that the service supports should be +clearly and unambiguously understood before trying to override, or add to, the +default policies that the service uses. + +The charm also has some policy defaults. They should also be understood before +being overridden. It is possible to break the system (for tenants and other +services) if policies are incorrectly applied to the service. + +Policy overrides are YAML files that contain rules that will add to, or +override, existing policy rules in the service. The `policy.d` directory is +a place to put the YAML override files. This charm owns the +`/etc/keystone/policy.d` directory, and as such, any manual changes to it will +be overwritten on charm upgrades. + +Policy overrides are provided to the charm using a resource file called +`policyd-override`. This is attached to the charm using (for example): + + juju attach-resource policyd-override= + +The `` is the name that this charm is deployed as, with +`` being the resource file containing the policy overrides. + +The format of the resource file is a ZIP file (.zip extension) containing at +least one YAML file with an extension of `.yaml` or `.yml`. Note that any +directories in the ZIP file are ignored; all of the files are flattened into a +single directory. There must not be any duplicated filenames; this will cause +an error and nothing in the resource file will be applied. + +(ed. next part is optional is the charm supports some form of +template/substitution on a read file) + +If a (ed. "one or more of") [`.j2`, `.tmpl`, `.tpl`] file is found in the +resource file then the charm will perform a substitution with charm variables +taken from the config or relations. (ed. edit as appropriate to include the +variable). + +To enable the policy overrides the config option `use-policyd-override` must be +set to `True`. + +When `use-policyd-override` is `True` the status line of the charm will be +prefixed with `PO:` indicating that policies have been overridden. If the +installation of the policy override YAML files failed for any reason then the +status line will be prefixed with `PO (broken):`. The log file for the charm +will indicate the reason. No policy override files are installed if the `PO +(broken):` is shown. The status line indicates that the overrides are broken, +not that the policy for the service has failed - they will be the defaults for +the charm and service. + +If the policy overrides did not install then *either* attach a new, corrected, +resource file *or* disable the policy overrides by setting +`use-policyd-override` to False. + +Policy overrides on one service may affect the functionality of another +service. Therefore, it may be necessary to provide policy overrides for +multiple service charms to achieve a consistent set of policies across the +OpenStack system. The charms for the other services that may need overrides +should be checked to ensure that they support overrides before proceeding. +""" + +POLICYD_VALID_EXTS = ['.yaml', '.yml', '.j2', '.tmpl', '.tpl'] +POLICYD_TEMPLATE_EXTS = ['.j2', '.tmpl', '.tpl'] +POLICYD_RESOURCE_NAME = "policyd-override" +POLICYD_CONFIG_NAME = "use-policyd-override" +POLICYD_SUCCESS_FILENAME = "policyd-override-success" +POLICYD_LOG_LEVEL_DEFAULT = hookenv.INFO +POLICYD_ALWAYS_BLACKLISTED_KEYS = ("admin_required", "cloud_admin") + + +class BadPolicyZipFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +class BadPolicyYamlFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +if six.PY2: + BadZipFile = zipfile.BadZipfile +else: + BadZipFile = zipfile.BadZipFile + + +def is_policyd_override_valid_on_this_release(openstack_release): + """Check that the charm is running on at least Ubuntu Xenial, and at + least the queens release. + + :param openstack_release: the release codename that is installed. + :type openstack_release: str + :returns: True if okay + :rtype: bool + """ + # NOTE(ajkavanagh) circular import! This is because the status message + # generation code in utils has to call into this module, but this function + # needs the CompareOpenStackReleases() function. The only way to solve + # this is either to put ALL of this module into utils, or refactor one or + # other of the CompareOpenStackReleases or status message generation code + # into a 3rd module. + import charmhelpers.contrib.openstack.utils as ch_utils + return ch_utils.CompareOpenStackReleases(openstack_release) >= 'queens' + + +def maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None): + """If the config option is set, get the resource file and process it to + enable the policy.d overrides for the service passed. + + The param `openstack_release` is required as the policyd overrides feature + is only supported on openstack_release "queens" or later, and on ubuntu + "xenial" or later. Prior to these versions, this feature is a NOP. + + The optional template_function is a function that accepts a string and has + an opportunity to modify the loaded file prior to it being read by + yaml.safe_load(). This allows the charm to perform "templating" using + charm derived data. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + The param restart_handler is an optional Callable that is called to perform + the service restart if the policy.d file is changed. This should normally + be None as oslo.policy automatically picks up changes in the policy.d + directory. However, for any services where this is buggy then a + restart_handler can be used to force the policy.d files to be read. + + :param openstack_release: The openstack release that is installed. + :type openstack_release: str + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the string + prior to being processed as a Yaml document. + :type template_function: Union[None, Callable[[str], str]] + :param restart_handler: The function to call if the service should be + restarted. + :type restart_handler: Union[None, Callable[]] + """ + config = hookenv.config() + try: + if not config.get(POLICYD_CONFIG_NAME, False): + remove_policy_success_file() + clean_policyd_dir_for(service, blacklist_paths) + return + except Exception: + return + if not is_policyd_override_valid_on_this_release(openstack_release): + return + # from now on it should succeed; if it doesn't then status line will show + # broken. + resource_filename = get_policy_resource_filename() + restart = process_policy_resource_file( + resource_filename, service, blacklist_paths, blacklist_keys, + template_function) + if restart and restart_handler is not None and callable(restart_handler): + restart_handler() + + +def maybe_do_policyd_overrides_on_config_changed(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None): + """This function is designed to be called from the config changed hook + handler. It will only perform the policyd overrides if the config is True + and the success file doesn't exist. Otherwise, it does nothing as the + resource file has already been processed. + + See maybe_do_policyd_overrides() for more details on the params. + + :param openstack_release: The openstack release that is installed. + :type openstack_release: str + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the string + prior to being processed as a Yaml document. + :type template_function: Union[None, Callable[[str], str]] + :param restart_handler: The function to call if the service should be + restarted. + :type restart_handler: Union[None, Callable[]] + """ + config = hookenv.config() + try: + if not config.get(POLICYD_CONFIG_NAME, False): + remove_policy_success_file() + clean_policyd_dir_for(service, blacklist_paths) + return + except Exception: + return + # if the policyd overrides have been performed just return + if os.path.isfile(_policy_success_file()): + return + maybe_do_policyd_overrides( + openstack_release, service, blacklist_paths, blacklist_keys, + template_function, restart_handler) + + +def get_policy_resource_filename(): + """Function to extract the policy resource filename + + :returns: The filename of the resource, if set, otherwise, if an error + occurs, then None is returned. + :rtype: Union[str, None] + """ + try: + return hookenv.resource_get(POLICYD_RESOURCE_NAME) + except Exception: + return None + + +@contextlib.contextmanager +def open_and_filter_yaml_files(filepath): + """Validate that the filepath provided is a zip file and contains at least + one (.yaml|.yml) file, and that the files are not duplicated when the zip + file is flattened. Note that the yaml files are not checked. This is the + first stage in validating the policy zipfile; individual yaml files are not + checked for validity or black listed keys. + + An example of use is: + + with open_and_filter_yaml_files(some_path) as zfp, g: + for zipinfo in g: + # do something with zipinfo ... + + :param filepath: a filepath object that can be opened by zipfile + :type filepath: Union[AnyStr, os.PathLike[AntStr]] + :returns: (zfp handle, + a generator of the (name, filename, ZipInfo object) tuples) as a + tuple. + :rtype: ContextManager[(zipfile.ZipFile, + Generator[(name, str, str, zipfile.ZipInfo)])] + :raises: zipfile.BadZipFile + :raises: BadPolicyZipFile if duplicated yaml or missing + :raises: IOError if the filepath is not found + """ + with zipfile.ZipFile(filepath, 'r') as zfp: + # first pass through; check for duplicates and at least one yaml file. + names = collections.defaultdict(int) + yamlfiles = _yamlfiles(zfp) + for name, _, _, _ in yamlfiles: + names[name] += 1 + # There must be at least 1 yaml file. + if len(names.keys()) == 0: + raise BadPolicyZipFile("contains no yaml files with {} extensions." + .format(", ".join(POLICYD_VALID_EXTS))) + # There must be no duplicates + duplicates = [n for n, c in names.items() if c > 1] + if duplicates: + raise BadPolicyZipFile("{} have duplicates in the zip file." + .format(", ".join(duplicates))) + # Finally, let's yield the generator + yield (zfp, yamlfiles) + + +def _yamlfiles(zipfile): + """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) + and the infolist item from a zipfile. + + :param zipfile: the zipfile to read zipinfo items from + :type zipfile: zipfile.ZipFile + :returns: generator of (name, ext, filename, info item) for each self-identified + yaml file. + :rtype: List[(str, str, str, zipfile.ZipInfo)] + """ + l = [] + for infolist_item in zipfile.infolist(): + if infolist_item.is_dir(): + continue + _, name_ext = os.path.split(infolist_item.filename) + name, ext = os.path.splitext(name_ext) + ext = ext.lower() + if ext and ext in POLICYD_VALID_EXTS: + l.append((name, ext, name_ext, infolist_item)) + return l + + +def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): + """Read, validate and return the (first) yaml document from the stream. + + The doc is read, and checked for a yaml file. The the top-level keys are + checked against the blacklist_keys provided. If there are problems then an + Exception is raised. Otherwise the yaml document is returned as a Python + object that can be dumped back as a yaml file on the system. + + The yaml file must only consist of a str:str mapping, and if not then the + yaml file is rejected. + + :param stream_or_doc: the file object to read the yaml from + :type stream_or_doc: Union[AnyStr, IO[AnyStr]] + :param blacklist_keys: Any keys, which if in the yaml file, should cause + and error. + :type blacklisted_keys: Union[None, List[str]] + :returns: the yaml file as a python document + :rtype: Dict[str, str] + :raises: yaml.YAMLError if there is a problem with the document + :raises: BadPolicyYamlFile if file doesn't look right or there are + blacklisted keys in the file. + """ + blacklist_keys = blacklist_keys or [] + blacklist_keys.append(POLICYD_ALWAYS_BLACKLISTED_KEYS) + doc = yaml.safe_load(stream_or_doc) + if not isinstance(doc, dict): + raise BadPolicyYamlFile("doesn't look like a policy file?") + keys = set(doc.keys()) + blacklisted_keys_present = keys.intersection(blacklist_keys) + if blacklisted_keys_present: + raise BadPolicyYamlFile("blacklisted keys {} present." + .format(", ".join(blacklisted_keys_present))) + if not all(isinstance(k, six.string_types) for k in keys): + raise BadPolicyYamlFile("keys in yaml aren't all strings?") + # check that the dictionary looks like a mapping of str to str + if not all(isinstance(v, six.string_types) for v in doc.values()): + raise BadPolicyYamlFile("values in yaml aren't all strings?") + return doc + + +def policyd_dir_for(service): + """Return the policy directory for the named service. + + This assumes the default name of "policy.d" which is kept across all + charms. + + :param service: str + :returns: the policy.d override directory. + :rtype: os.PathLike[str] + """ + return os.path.join("/", "etc", service, "policy.d") + + +def clean_policyd_dir_for(service, keep_paths=None): + """Clean out the policyd directory except for items that should be kept. + + The keep_paths, if used, should be set to the full path of the files that + should be kept in the policyd directory for the service. Note that the + service name is passed in, and then the policyd_dir_for() function is used. + This is so that a coding error doesn't result in a sudden deletion of the + charm (say). + + :param service: the service name to use to construct the policy.d dir. + :type service: str + :param keep_paths: optional list of paths to not delete. + :type keep_paths: Union[None, List[str]] + """ + keep_paths = keep_paths or [] + path = policyd_dir_for(service) + if not os.path.exists(path): + ch_host.mkdir(path, owner=service, group=service, perms=0o775) + _scanner = os.scandir if six.PY3 else _py2_scandir + for direntry in _scanner(path): + # see if the path should be kept. + if direntry.path in keep_paths: + continue + # we remove any directories; it's ours and there shouldn't be any + if direntry.is_dir(): + shutil.rmtree(direntry.path) + else: + os.remove(direntry.path) + + +@contextlib.contextmanager +def _py2_scandir(path): + """provide a py2 implementation of os.scandir if this module ever gets used + in a py2 charm (unlikely). uses os.listdir() to get the names in the path, + and then mocks the is_dir() function using os.path.isdir() to check for a + directory. + + :param path: the path to list the directories for + :type path: str + :returns: Generator that provides _P27Direntry objects + :rtype: ContextManager[_P27Direntry] + """ + for f in os.listdir(path): + yield _P27Direntry(f) + + +class _P27Direntry(object): + """Mock a scandir Direntry object with enough to use in + clean_policyd_dir_for + """ + + def __init__(self, path): + self.path = path + + def is_dir(self): + return os.path.isdir(self.path) + + +def path_for_policy_file(service, name): + """Return the full path for a policy.d file that will be written to the + service's policy.d directory. + + It is constructed using policyd_dir_for(), the name and the ".yaml" + extension. + + :param service: the service name + :type service: str + :param name: the name for the policy override + :type name: str + :returns: the full path name for the file + :rtype: os.PathLike[str] + """ + return os.path.join(policyd_dir_for(service), name + ".yaml") + + +def _policy_success_file(): + """Return the file name for a successful drop of policy.d overrides + + :returns: the path name for the file. + :rtype: str + """ + return os.path.join(hookenv.charm_dir(), POLICYD_SUCCESS_FILENAME) + + +def remove_policy_success_file(): + """Remove the file that indicates successful policyd override.""" + try: + os.remove(_policy_success_file()) + except Exception: + pass + + +def policyd_status_message_prefix(): + """Return the prefix str for the status line. + + "PO:" indicating that the policy overrides are in place, or "PO (broken):" + if the policy is supposed to be working but there is no success file. + + :returns: the prefix + :rtype: str + """ + if os.path.isfile(_policy_success_file()): + return "PO:" + return "PO (broken):" + + +def process_policy_resource_file(resource_file, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None): + """Process the resource file (which should contain at least one yaml file) + and write those files to the service's policy.d directory. + + The optional template_function is a function that accepts a python + string and has an opportunity to modify the document + prior to it being read by the yaml.safe_load() function and written to + disk. Note that this function does *not* say how the templating is done - + this is up to the charm to implement its chosen method. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + If any error occurs, then the policy.d directory is cleared, the error is + written to the log, and the status line will eventually show as failed. + + :param resource_file: The zipped file to open and extract yaml files form. + :type resource_file: Union[AnyStr, os.PathLike[AnyStr]] + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the yaml + document. + :type template_function: Union[None, Callable[[AnyStr], AnyStr]] + :returns: True if the processing was successful, False if not. + :rtype: boolean + """ + blacklist_paths = blacklist_paths or [] + completed = False + try: + with open_and_filter_yaml_files(resource_file) as (zfp, gen): + # first clear out the policy.d directory and clear success + remove_policy_success_file() + clean_policyd_dir_for(service, blacklist_paths) + for name, ext, filename, zipinfo in gen: + # construct a name for the output file. + yaml_filename = path_for_policy_file(service, name) + if yaml_filename in blacklist_paths: + raise BadPolicyZipFile("policy.d name {} is blacklisted" + .format(yaml_filename)) + with zfp.open(zipinfo) as fp: + doc = fp.read() + # if template_function is not None, then offer the document + # to the template function + if ext in POLICYD_TEMPLATE_EXTS: + if (template_function is None or not + callable(template_function)): + raise BadPolicyZipFile( + "Template {} but no template_function is " + "available".format(filename)) + doc = template_function(doc) + yaml_doc = read_and_validate_yaml(doc, blacklist_keys) + with open(yaml_filename, "wt") as f: + yaml.dump(yaml_doc, f) + # Every thing worked, so we mark up a success. + completed = True + except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: + hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except IOError as e: + # technically this shouldn't happen; it would be a programming error as + # the filename comes from Juju and thus, should exist. + hookenv.log( + "File {} failed with IOError. This really shouldn't happen" + " -- error: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except Exception as e: + import traceback + hookenv.log("General Exception({}) during policyd processing" + .format(str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + hookenv.log(traceback.format_exc()) + finally: + if not completed: + hookenv.log("Processing {} failed: cleaning policy.d directory" + .format(resource_file), + level=POLICYD_LOG_LEVEL_DEFAULT) + clean_policyd_dir_for(service, blacklist_paths) + else: + # touch the success filename + hookenv.log("policy.d overrides installed.", + level=POLICYD_LOG_LEVEL_DEFAULT) + open(_policy_success_file(), "w").close() + return completed diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index d43a4d20..ac96f844 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -84,7 +84,8 @@ SourceConfigError, GPGKeyError, get_upstream_version, - filter_missing_packages + filter_missing_packages, + ubuntu_apt_pkg as apt, ) from charmhelpers.fetch.snap import ( @@ -96,6 +97,10 @@ from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.openstack.exceptions import OSContextError +from charmhelpers.contrib.openstack.policyd import ( + policyd_status_message_prefix, + POLICYD_CONFIG_NAME, +) CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' @@ -443,8 +448,6 @@ def get_os_codename_package(package, fatal=True): # Second item in list is Version return line.split()[1] - import apt_pkg as apt - cache = apt_cache() try: @@ -658,7 +661,6 @@ def openstack_upgrade_available(package): a newer version of package. """ - import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) if not cur_vers: @@ -864,6 +866,12 @@ def _determine_os_workload_status( message = "Unit is ready" juju_log(message, 'INFO') + try: + if config(POLICYD_CONFIG_NAME): + message = "{} {}".format(policyd_status_message_prefix(), message) + except Exception: + pass + return state, message diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py index a9864467..a25c79e3 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py @@ -301,6 +301,7 @@ def __init__(self, service, name, pg_num=None, replicas=2, percent_data=10.0, app_name=None): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas + self.percent_data = percent_data if pg_num: # Since the number of placement groups were specified, ensure # that there aren't too many created. @@ -324,12 +325,24 @@ def create(self): update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) try: set_app_name_for_pool(client=self.service, pool=self.name, name=self.app_name) except CalledProcessError: log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e, level=WARNING)) except CalledProcessError: raise @@ -382,6 +395,18 @@ def create(self): name=self.app_name) except CalledProcessError: log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e, level=WARNING)) except CalledProcessError: raise @@ -389,6 +414,34 @@ def create(self): Returns json formatted output""" +def enabled_manager_modules(): + """Return a list of enabled manager modules. + + :rtype: List[str] + """ + cmd = ['ceph', 'mgr', 'module', 'ls'] + try: + modules = check_output(cmd) + if six.PY3: + modules = modules.decode('UTF-8') + except CalledProcessError as e: + log("Failed to list ceph modules: {}".format(e), WARNING) + return [] + modules = json.loads(modules) + return modules['enabled_modules'] + + +def enable_pg_autoscale(service, pool_name): + """ + Enable Ceph's PG autoscaler for the specified pool. + + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types. The name of the pool to enable sutoscaling on + :raise: CalledProcessError if the command fails + """ + check_call(['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) + + def get_mon_map(service): """ Returns the current monitor map. diff --git a/ceph-proxy/charmhelpers/core/host.py b/ceph-proxy/charmhelpers/core/host.py index 32754ff9..b33ac906 100644 --- a/ceph-proxy/charmhelpers/core/host.py +++ b/ceph-proxy/charmhelpers/core/host.py @@ -1075,3 +1075,30 @@ def install_ca_cert(ca_cert, name=None): log("Installing new CA cert at: {}".format(cert_file), level=INFO) write_file(cert_file, ca_cert) subprocess.check_call(['update-ca-certificates', '--fresh']) + + +def get_system_env(key, default=None): + """Get data from system environment as represented in ``/etc/environment``. + + :param key: Key to look up + :type key: str + :param default: Value to return if key is not found + :type default: any + :returns: Value for key if found or contents of default parameter + :rtype: any + :raises: subprocess.CalledProcessError + """ + env_file = '/etc/environment' + # use the shell and env(1) to parse the global environments file. This is + # done to get the correct result even if the user has shell variable + # substitutions or other shell logic in that file. + output = subprocess.check_output( + ['env', '-i', '/bin/bash', '-c', + 'set -a && source {} && env'.format(env_file)], + universal_newlines=True) + for k, v in (line.split('=', 1) + for line in output.splitlines() if '=' in line): + if k == key: + return v + else: + return default diff --git a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py index 0ee2b660..1b57e2ce 100644 --- a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py @@ -24,6 +24,7 @@ 'bionic', 'cosmic', 'disco', + 'eoan', ) @@ -93,7 +94,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None): the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. """ - import apt_pkg + from charmhelpers.fetch import apt_pkg if not pkgcache: from charmhelpers.fetch import apt_cache pkgcache = apt_cache() diff --git a/ceph-proxy/charmhelpers/fetch/__init__.py b/ceph-proxy/charmhelpers/fetch/__init__.py index 8572d34f..0cc7fc85 100644 --- a/ceph-proxy/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/charmhelpers/fetch/__init__.py @@ -103,6 +103,8 @@ def base_url(self, url): apt_unhold = fetch.apt_unhold import_key = fetch.import_key get_upstream_version = fetch.get_upstream_version + apt_pkg = fetch.ubuntu_apt_pkg + get_apt_dpkg_env = fetch.get_apt_dpkg_env elif __platform__ == "centos": yum_search = fetch.yum_search diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py index 24c76e34..31225235 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -13,14 +13,14 @@ # limitations under the License. from collections import OrderedDict -import os import platform import re import six -import time import subprocess +import sys +import time -from charmhelpers.core.host import get_distrib_codename +from charmhelpers.core.host import get_distrib_codename, get_system_env from charmhelpers.core.hookenv import ( log, @@ -29,6 +29,7 @@ env_proxy_settings, ) from charmhelpers.fetch import SourceConfigError, GPGKeyError +from charmhelpers.fetch import ubuntu_apt_pkg PROPOSED_POCKET = ( "# Proposed\n" @@ -216,18 +217,42 @@ def filter_missing_packages(packages): ) -def apt_cache(in_memory=True, progress=None): - """Build and return an apt cache.""" - from apt import apt_pkg - apt_pkg.init() - if in_memory: - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache(progress) +def apt_cache(*_, **__): + """Shim returning an object simulating the apt_pkg Cache. + + :param _: Accept arguments for compability, not used. + :type _: any + :param __: Accept keyword arguments for compability, not used. + :type __: any + :returns:Object used to interrogate the system apt and dpkg databases. + :rtype:ubuntu_apt_pkg.Cache + """ + if 'apt_pkg' in sys.modules: + # NOTE(fnordahl): When our consumer use the upstream ``apt_pkg`` module + # in conjunction with the apt_cache helper function, they may expect us + # to call ``apt_pkg.init()`` for them. + # + # Detect this situation, log a warning and make the call to + # ``apt_pkg.init()`` to avoid the consumer Python interpreter from + # crashing with a segmentation fault. + log('Support for use of upstream ``apt_pkg`` module in conjunction' + 'with charm-helpers is deprecated since 2019-06-25', level=WARNING) + sys.modules['apt_pkg'].init() + return ubuntu_apt_pkg.Cache() def apt_install(packages, options=None, fatal=False): - """Install one or more packages.""" + """Install one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -244,7 +269,17 @@ def apt_install(packages, options=None, fatal=False): def apt_upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages.""" + """Upgrade all packages. + + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :param dist: Whether ``dist-upgrade`` should be used over ``upgrade`` + :type dist: bool + :raises: subprocess.CalledProcessError + """ if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -265,7 +300,15 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): - """Purge one or more packages.""" + """Purge one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ cmd = ['apt-get', '--assume-yes', 'purge'] if isinstance(packages, six.string_types): cmd.append(packages) @@ -276,7 +319,14 @@ def apt_purge(packages, fatal=False): def apt_autoremove(purge=True, fatal=False): - """Purge one or more packages.""" + """Purge one or more packages. + :param purge: Whether the ``--purge`` option should be passed on or not. + :type purge: bool + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ cmd = ['apt-get', '--assume-yes', 'autoremove'] if purge: cmd.append('--purge') @@ -660,21 +710,22 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_message="", cmd_env=None): """Run a command and retry until success or max_retries is reached. - :param: cmd: str: The apt command to run. - :param: max_retries: int: The number of retries to attempt on a fatal - command. Defaults to CMD_RETRY_COUNT. - :param: retry_exitcodes: tuple: Optional additional exit codes to retry. - Defaults to retry on exit code 1. - :param: retry_message: str: Optional log prefix emitted during retries. - :param: cmd_env: dict: Environment variables to add to the command run. + :param cmd: The apt command to run. + :type cmd: str + :param max_retries: The number of retries to attempt on a fatal + command. Defaults to CMD_RETRY_COUNT. + :type max_retries: int + :param retry_exitcodes: Optional additional exit codes to retry. + Defaults to retry on exit code 1. + :type retry_exitcodes: tuple + :param retry_message: Optional log prefix emitted during retries. + :type retry_message: str + :param: cmd_env: Environment variables to add to the command run. + :type cmd_env: Option[None, Dict[str, str]] """ - - env = None - kwargs = {} + env = get_apt_dpkg_env() if cmd_env: - env = os.environ.copy() env.update(cmd_env) - kwargs['env'] = env if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -686,8 +737,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_results = (None,) + retry_exitcodes while result in retry_results: try: - # result = subprocess.check_call(cmd, env=env) - result = subprocess.check_call(cmd, **kwargs) + result = subprocess.check_call(cmd, env=env) except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > max_retries: @@ -700,22 +750,18 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), def _run_apt_command(cmd, fatal=False): """Run an apt command with optional retries. - :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. + :param cmd: The apt command to run. + :type cmd: str + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool """ - # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. - cmd_env = { - 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} - if fatal: _run_with_retries( - cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), + cmd, retry_exitcodes=(1, APT_NO_LOCK,), retry_message="Couldn't acquire DPKG lock") else: - env = os.environ.copy() - env.update(cmd_env) - subprocess.call(cmd, env=env) + subprocess.call(cmd, env=get_apt_dpkg_env()) def get_upstream_version(package): @@ -723,7 +769,6 @@ def get_upstream_version(package): @returns None (if not installed) or the upstream version """ - import apt_pkg cache = apt_cache() try: pkg = cache[package] @@ -735,4 +780,18 @@ def get_upstream_version(package): # package is known, but no version is currently installed. return None - return apt_pkg.upstream_version(pkg.current_ver.ver_str) + return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str) + + +def get_apt_dpkg_env(): + """Get environment suitable for execution of APT and DPKG tools. + + We keep this in a helper function instead of in a global constant to + avoid execution on import of the library. + :returns: Environment suitable for execution of APT and DPKG tools. + :rtype: Dict[str, str] + """ + # The fallback is used in the event of ``/etc/environment`` not containing + # avalid PATH variable. + return {'DEBIAN_FRONTEND': 'noninteractive', + 'PATH': get_system_env('PATH', '/usr/sbin:/usr/bin:/sbin:/bin')} diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py new file mode 100644 index 00000000..104f91f1 --- /dev/null +++ b/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -0,0 +1,237 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provide a subset of the ``python-apt`` module API. + +Data collection is done through subprocess calls to ``apt-cache`` and +``dpkg-query`` commands. + +The main purpose for this module is to avoid dependency on the +``python-apt`` python module. + +The indicated python module is a wrapper around the ``apt`` C++ library +which is tightly connected to the version of the distribution it was +shipped on. It is not developed in a backward/forward compatible manner. + +This in turn makes it incredibly hard to distribute as a wheel for a piece +of python software that supports a span of distro releases [0][1]. + +Upstream feedback like [2] does not give confidence in this ever changing, +so with this we get rid of the dependency. + +0: https://github.com/juju-solutions/layer-basic/pull/135 +1: https://bugs.launchpad.net/charm-octavia/+bug/1824112 +2: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=845330#10 +""" + +import locale +import os +import subprocess + + +class _container(dict): + """Simple container for attributes.""" + __getattr__ = dict.__getitem__ + __setattr__ = dict.__setitem__ + + +class Package(_container): + """Simple container for package attributes.""" + + +class Version(_container): + """Simple container for version attributes.""" + + +class Cache(object): + """Simulation of ``apt_pkg`` Cache object.""" + def __init__(self, progress=None): + pass + + def __getitem__(self, package): + """Get information about a package from apt and dpkg databases. + + :param package: Name of package + :type package: str + :returns: Package object + :rtype: object + :raises: KeyError, subprocess.CalledProcessError + """ + apt_result = self._apt_cache_show([package])[package] + apt_result['name'] = apt_result.pop('package') + pkg = Package(apt_result) + dpkg_result = self._dpkg_list([package]).get(package, {}) + current_ver = None + installed_version = dpkg_result.get('version') + if installed_version: + current_ver = Version({'ver_str': installed_version}) + pkg.current_ver = current_ver + pkg.architecture = dpkg_result.get('architecture') + return pkg + + def _dpkg_list(self, packages): + """Get data from system dpkg database for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about installed packages, keys like + ``dpkg-query --list`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['dpkg-query', '--list'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + except subprocess.CalledProcessError as cp: + # ``dpkg-query`` may return error and at the same time have + # produced useful output, for example when asked for multiple + # packages where some are not installed + if cp.returncode != 1: + raise + output = cp.output + headings = [] + for line in output.splitlines(): + if line.startswith('||/'): + headings = line.split() + headings.pop(0) + continue + elif (line.startswith('|') or line.startswith('+') or + line.startswith('dpkg-query:')): + continue + else: + data = line.split(None, 4) + status = data.pop(0) + if status != 'ii': + continue + pkg = {} + pkg.update({k.lower(): v for k, v in zip(headings, data)}) + if 'name' in pkg: + pkgs.update({pkg['name']: pkg}) + return pkgs + + def _apt_cache_show(self, packages): + """Get data from system apt cache for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about package, keys like + ``apt-cache show`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['apt-cache', 'show', '--no-all-versions'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + previous = None + pkg = {} + for line in output.splitlines(): + if not line: + if 'package' in pkg: + pkgs.update({pkg['package']: pkg}) + pkg = {} + continue + if line.startswith(' '): + if previous and previous in pkg: + pkg[previous] += os.linesep + line.lstrip() + continue + if ':' in line: + kv = line.split(':', 1) + key = kv[0].lower() + if key == 'n': + continue + previous = key + pkg.update({key: kv[1].lstrip()}) + except subprocess.CalledProcessError as cp: + # ``apt-cache`` returns 100 if none of the packages asked for + # exist in the apt cache. + if cp.returncode != 100: + raise + return pkgs + + +def init(): + """Compability shim that does nothing.""" + pass + + +def upstream_version(version): + """Extracts upstream version from a version string. + + Upstream reference: https://salsa.debian.org/apt-team/apt/blob/master/ + apt-pkg/deb/debversion.cc#L259 + + :param version: Version string + :type version: str + :returns: Upstream version + :rtype: str + """ + if version: + version = version.split(':')[-1] + version = version.split('-')[0] + return version + + +def version_compare(a, b): + """Compare the given versions. + + Call out to ``dpkg`` to make sure the code doing the comparison is + compatible with what the ``apt`` library would do. Mimic the return + values. + + Upstream reference: + https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html + ?highlight=version_compare#apt_pkg.version_compare + + :param a: version string + :type a: str + :param b: version string + :type b: str + :returns: >0 if ``a`` is greater than ``b``, 0 if a equals b, + <0 if ``a`` is smaller than ``b`` + :rtype: int + :raises: subprocess.CalledProcessError, RuntimeError + """ + for op in ('gt', 1), ('eq', 0), ('lt', -1): + try: + subprocess.check_call(['dpkg', '--compare-versions', + a, op[0], b], + stderr=subprocess.STDOUT, + universal_newlines=True) + return op[1] + except subprocess.CalledProcessError as cp: + if cp.returncode == 1: + continue + raise + else: + raise RuntimeError('Unable to compare "{}" and "{}", according to ' + 'our logic they are neither greater, equal nor ' + 'less than each other.') diff --git a/ceph-proxy/files/nagios/check_ceph_status.py b/ceph-proxy/files/nagios/check_ceph_status.py index cb8d1a1a..e7638f0f 100755 --- a/ceph-proxy/files/nagios/check_ceph_status.py +++ b/ceph-proxy/files/nagios/check_ceph_status.py @@ -15,30 +15,37 @@ def check_ceph_status(args): nagios_plugin.check_file_freshness(args.status_file, 3600) with open(args.status_file, "r") as f: lines = f.readlines() - status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) + status_data = dict( + l.strip().split(' ', 1) for l in lines if len(l) > 1 + ) else: lines = subprocess.check_output(["ceph", "status"]).split('\n') - status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) + status_data = dict( + l.strip().split(' ', 1) for l in lines if len(l) > 1 + ) - if ('health' not in status_data - or 'monmap' not in status_data - or 'osdmap'not in status_data): + if ('health' not in status_data or + 'monmap' not in status_data or + 'osdmap'not in status_data): raise nagios_plugin.UnknownError('UNKNOWN: status data is incomplete') if status_data['health'] != 'HEALTH_OK': - msg = 'CRITICAL: ceph health status: "{}"'.format(status_data['health']) + msg = 'CRITICAL: ceph health status: "{}"'.format( + status_data['health']) raise nagios_plugin.CriticalError(msg) - osds = re.search("^.*: (\d+) osds: (\d+) up, (\d+) in", status_data['osdmap']) + osds = re.search("^.*: (\d+) osds: (\d+) up, (\d+) in", + status_data['osdmap']) if osds.group(1) > osds.group(2): # not all OSDs are "up" msg = 'CRITICAL: Some OSDs are not up. Total: {}, up: {}'.format( osds.group(1), osds.group(2)) raise nagios_plugin.CriticalError(msg) - print "All OK" + print("All OK") if __name__ == '__main__': parser = argparse.ArgumentParser(description='Check ceph status') parser.add_argument('-f', '--file', dest='status_file', - default=False, help='Optional file with "ceph status" output') + default=False, + help='Optional file with "ceph status" output') args = parser.parse_args() nagios_plugin.try_check(check_ceph_status, args) diff --git a/ceph-proxy/requirements.txt b/ceph-proxy/requirements.txt index b8fec1e2..343beed1 100644 --- a/ceph-proxy/requirements.txt +++ b/ceph-proxy/requirements.txt @@ -1,6 +1,12 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. +# pbr>=1.8.0,<1.9.0 simplejson>=2.2.0 netifaces>=0.10.4 diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index e069d2fd..7d9c2587 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -1,14 +1,18 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. +# charm-tools>=2.4.4 -coverage>=3.6 +requests>=2.18.4 mock>=1.2 flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 -requests>=2.18.4 -# NOTE: workaround for 14.04 pip/tox -pytz -pyudev # for ceph-* charm unit tests (not mocked?) +coverage>=4.5.2 +pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' -git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack diff --git a/ceph-proxy/tests/bundles/bionic-rocky.yaml b/ceph-proxy/tests/bundles/bionic-rocky.yaml index 98681218..6050fa48 100644 --- a/ceph-proxy/tests/bundles/bionic-rocky.yaml +++ b/ceph-proxy/tests/bundles/bionic-rocky.yaml @@ -27,6 +27,7 @@ applications: charm: 'cs:~openstack-charmers-next/cinder' num_units: 1 options: + openstack-origin: cloud:bionic-rocky block-device: "" ephemeral-unmount: "" glance-api-version: 2 @@ -40,6 +41,7 @@ applications: charm: 'cs:~openstack-charmers-next/keystone' num_units: 1 options: + openstack-origin: cloud:bionic-rocky admin-password: openstack admin-token: ubuntutesting constraints: mem=1024 @@ -47,6 +49,7 @@ applications: charm: 'cs:~openstack-charmers-next/percona-cluster' num_units: 1 options: + source: cloud:bionic-rocky dataset-size: 50% max-connections: 1000 innodb-buffer-pool-size: 256M @@ -57,6 +60,8 @@ applications: charm: 'cs:~openstack-charmers-next/rabbitmq-server' num_units: 1 constraints: mem=1024 + options: + source: cloud:bionic-rocky relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' diff --git a/ceph-proxy/tests/bundles/bionic-stein.yaml b/ceph-proxy/tests/bundles/bionic-stein.yaml index 9a17be94..c37ee8dc 100644 --- a/ceph-proxy/tests/bundles/bionic-stein.yaml +++ b/ceph-proxy/tests/bundles/bionic-stein.yaml @@ -27,6 +27,7 @@ applications: charm: 'cs:~openstack-charmers-next/cinder' num_units: 1 options: + openstack-origin: cloud:bionic-stein block-device: "" ephemeral-unmount: "" glance-api-version: 2 @@ -40,6 +41,7 @@ applications: charm: 'cs:~openstack-charmers-next/keystone' num_units: 1 options: + openstack-origin: cloud:bionic-stein admin-password: openstack admin-token: ubuntutesting constraints: mem=1024 @@ -47,6 +49,7 @@ applications: charm: 'cs:~openstack-charmers-next/percona-cluster' num_units: 1 options: + source: cloud:bionic-stein dataset-size: 50% max-connections: 1000 innodb-buffer-pool-size: 256M @@ -57,6 +60,8 @@ applications: charm: 'cs:~openstack-charmers-next/rabbitmq-server' num_units: 1 constraints: mem=1024 + options: + source: cloud:bionic-stein relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' diff --git a/ceph-proxy/tests/bundles/trusty-mitaka.yaml b/ceph-proxy/tests/bundles/trusty-mitaka.yaml index 6b826ae6..61df904f 100644 --- a/ceph-proxy/tests/bundles/trusty-mitaka.yaml +++ b/ceph-proxy/tests/bundles/trusty-mitaka.yaml @@ -27,6 +27,7 @@ applications: charm: 'cs:~openstack-charmers-next/cinder' num_units: 1 options: + openstack-origin: cloud:trusty-mitaka block-device: "" ephemeral-unmount: "" glance-api-version: 2 @@ -42,11 +43,13 @@ applications: options: admin-password: openstack admin-token: ubuntutesting + openstack-origin: cloud:trusty-mitaka constraints: mem=1024 percona-cluster: charm: 'cs:trusty/percona-cluster' num_units: 1 options: + source: cloud:trusty-mitaka dataset-size: 50% max-connections: 1000 innodb-buffer-pool-size: 256M @@ -57,6 +60,8 @@ applications: charm: 'cs:~openstack-charmers-next/rabbitmq-server' num_units: 1 constraints: mem=1024 + options: + source: cloud:trusty-mitaka relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' diff --git a/ceph-proxy/tests/bundles/xenial-ocata.yaml b/ceph-proxy/tests/bundles/xenial-ocata.yaml index 96ac47cf..bd1fd236 100644 --- a/ceph-proxy/tests/bundles/xenial-ocata.yaml +++ b/ceph-proxy/tests/bundles/xenial-ocata.yaml @@ -27,6 +27,7 @@ applications: charm: 'cs:~openstack-charmers-next/cinder' num_units: 1 options: + openstack-origin: cloud:xenial-ocata block-device: "" ephemeral-unmount: "" glance-api-version: 2 @@ -40,6 +41,7 @@ applications: charm: 'cs:~openstack-charmers-next/keystone' num_units: 1 options: + openstack-origin: cloud:xenial-ocata admin-password: openstack admin-token: ubuntutesting constraints: mem=1024 @@ -47,6 +49,7 @@ applications: charm: 'cs:~openstack-charmers-next/percona-cluster' num_units: 1 options: + source: cloud:xenial-ocata dataset-size: 50% max-connections: 1000 innodb-buffer-pool-size: 256M @@ -57,6 +60,8 @@ applications: charm: 'cs:~openstack-charmers-next/rabbitmq-server' num_units: 1 constraints: mem=1024 + options: + source: cloud:xenial-ocata relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' diff --git a/ceph-proxy/tests/bundles/xenial-pike.yaml b/ceph-proxy/tests/bundles/xenial-pike.yaml index 09ade6cf..6b6dd7cb 100644 --- a/ceph-proxy/tests/bundles/xenial-pike.yaml +++ b/ceph-proxy/tests/bundles/xenial-pike.yaml @@ -27,6 +27,7 @@ applications: charm: 'cs:~openstack-charmers-next/cinder' num_units: 1 options: + openstack-origin: cloud:xenial-pike block-device: "" ephemeral-unmount: "" glance-api-version: 2 @@ -42,11 +43,13 @@ applications: options: admin-password: openstack admin-token: ubuntutesting + openstack-origin: cloud:xenial-pike constraints: mem=1024 percona-cluster: charm: 'cs:~openstack-charmers-next/percona-cluster' num_units: 1 options: + source: cloud:xenial-pike dataset-size: 50% max-connections: 1000 innodb-buffer-pool-size: 256M @@ -57,6 +60,8 @@ applications: charm: 'cs:~openstack-charmers-next/rabbitmq-server' num_units: 1 constraints: mem=1024 + options: + source: cloud:xenial-pike relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' diff --git a/ceph-proxy/tests/bundles/xenial-queens.yaml b/ceph-proxy/tests/bundles/xenial-queens.yaml index 83bf9558..48c298cb 100644 --- a/ceph-proxy/tests/bundles/xenial-queens.yaml +++ b/ceph-proxy/tests/bundles/xenial-queens.yaml @@ -22,11 +22,12 @@ applications: charm: 'cs:~openstack-charmers-next/ceph-radosgw' num_units: 1 options: - source: xenial-queens + source: cloud:xenial-queens cinder: charm: 'cs:~openstack-charmers-next/cinder' num_units: 1 options: + openstack-origin: cloud:xenial-queens block-device: "" ephemeral-unmount: "" glance-api-version: 2 @@ -40,6 +41,7 @@ applications: charm: 'cs:~openstack-charmers-next/keystone' num_units: 1 options: + openstack-origin: cloud:xenial-queens admin-password: openstack admin-token: ubuntutesting constraints: mem=1024 @@ -47,6 +49,7 @@ applications: charm: 'cs:~openstack-charmers-next/percona-cluster' num_units: 1 options: + source: cloud:xenial-queens dataset-size: 50% max-connections: 1000 innodb-buffer-pool-size: 256M @@ -57,6 +60,8 @@ applications: charm: 'cs:~openstack-charmers-next/rabbitmq-server' num_units: 1 constraints: mem=1024 + options: + source: cloud:xenial-queens relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 888ad469..20dbbfc5 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -1,12 +1,18 @@ -# Classic charm: ./tox.ini +# Classic charm (with zaza): ./tox.ini # This file is managed centrally by release-tools and should not be modified -# within individual charm repos. +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. [tox] -envlist = pep8,py37 +envlist = pep8,py3 skipsdist = True -# NOTE(beisner): Avoid build/test env pollution by not enabling sitepackages. +# NOTE: Avoid build/test env pollution by not enabling sitepackages. sitepackages = False -# NOTE(beisner): Avoid false positives by not skipping missing interpreters. +# NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False [testenv] @@ -15,17 +21,10 @@ setenv = VIRTUAL_ENV={envdir} CHARM_DIR={envdir} install_command = pip install {opts} {packages} -commands = stestr run {posargs} +commands = stestr run --slowest {posargs} whitelist_externals = juju -passenv = HOME TERM OS_* CS_API_* -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -[testenv:py27] -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = /bin/true +passenv = HOME TERM CS_* OS_* TEST_* +deps = -r{toxinidir}/test-requirements.txt [testenv:py35] basepython = python3.5 @@ -42,20 +41,54 @@ basepython = python3.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python3 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} hooks unit_tests tests actions lib +commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof +[testenv:cover] +# Technique based heavily upon +# https://github.com/openstack/nova/blob/master/tox.ini +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run --slowest {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + */charmhelpers/* + unit_tests/* + [testenv:venv] basepython = python3 commands = {posargs} -[flake8] -ignore = E402,E226 -exclude = */charmhelpers +[testenv:func-noop] +basepython = python3 +commands = + functest-run-suite --help [testenv:func] basepython = python3 @@ -71,3 +104,12 @@ commands = basepython = python3 commands = functest-run-suite --keep-model --dev + +[testenv:func-target] +basepython = python3 +commands = + functest-run-suite --keep-model --bundle {posargs} + +[flake8] +ignore = E402,E226 +exclude = */charmhelpers diff --git a/ceph-proxy/unit_tests/test_ceph.py b/ceph-proxy/unit_tests/test_ceph.py index 5b951e91..bd8d7097 100644 --- a/ceph-proxy/unit_tests/test_ceph.py +++ b/ceph-proxy/unit_tests/test_ceph.py @@ -49,9 +49,12 @@ def test_config_empty_user_key(self, mock_config): named_key = ceph._config_user_key(user_name) self.assertEqual(named_key, None) + @mock.patch.object(ceph, 'ceph_user') @mock.patch('subprocess.check_output') @mock.patch('ceph.config') - def test_get_named_key_new(self, mock_config, mock_check_output): + def test_get_named_key_new(self, mock_config, mock_check_output, + mock_ceph_user): + mock_ceph_user.return_value = 'ceph' user_name = 'cinder-ceph' expected_key = 'AQCnjmtbuEACMxAA7joUmgLIGI4/3LKkPzUy8g==' expected_output = ('[client.testuser]\n key = {}' diff --git a/ceph-proxy/unit_tests/test_ceph_hooks.py b/ceph-proxy/unit_tests/test_ceph_hooks.py index 1bf97df3..e6b4f089 100644 --- a/ceph-proxy/unit_tests/test_ceph_hooks.py +++ b/ceph-proxy/unit_tests/test_ceph_hooks.py @@ -50,9 +50,14 @@ def setUp(self): self.remote_unit.return_value = 'client/0' self.log.side_effect = fake_log + @mock.patch.object(hooks.ceph, 'ceph_user') + @mock.patch.object(hooks, 'filter_installed_packages') @mock.patch('subprocess.check_output') @mock.patch('ceph_hooks.apt_install') - def test_radosgw_relation(self, mock_apt_install, mock_check_output): + def test_radosgw_relation(self, mock_apt_install, mock_check_output, + mock_filter_installed_packages, mock_ceph_user): + mock_filter_installed_packages.return_value = [] + mock_ceph_user.return_value = 'ceph' settings = {'ceph-public-address': '127.0.0.1:1234 [::1]:4321', 'radosgw_key': CEPH_KEY, 'auth': 'cephx', @@ -121,9 +126,11 @@ def c(k): mock_rgw_rel.assert_called_with(relid='rados:1', unit='rados/1') mock_client_rel.assert_called_with('client:1') + @mock.patch.object(hooks.ceph, 'ceph_user') @mock.patch('subprocess.check_output') - def test_client_relation_joined(self, mock_check_output): + def test_client_relation_joined(self, mock_check_output, mock_ceph_user): mock_check_output.return_value = CEPH_GET_KEY.encode() + mock_ceph_user.return_value = 'ceph' self.test_config.set('monitor-hosts', '127.0.0.1:1234') self.test_config.set('fsid', 'abc123') self.test_config.set('admin-key', 'some-admin-key') From f6ad0b8169bacb4bff7a558c69cd99d66bc1541b Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Mon, 30 Sep 2019 17:07:15 -0500 Subject: [PATCH 1833/2699] Sync charm/ceph helpers, tox, and requirements Also fix for bug where the mgr daemon is not ready. A wait is introduced if it is not ready before trying to assert pg-autotune. Closes-Bug: #1848576 Change-Id: Ic3c1e1f7902f7e2052dc4cf432303f874369238d --- ceph-mon/charm-helpers-hooks.yaml | 1 + ceph-mon/hooks/ceph_hooks.py | 1 + .../contrib/hardening/audits/apt.py | 2 +- .../contrib/openstack/amulet/utils.py | 8 +- .../charmhelpers/contrib/openstack/context.py | 23 + .../contrib/openstack/ha/utils.py | 29 +- .../charmhelpers/contrib/openstack/policyd.py | 700 ++++++++++++++++++ .../charmhelpers/contrib/openstack/utils.py | 16 +- .../contrib/storage/linux/ceph.py | 2 - ceph-mon/hooks/charmhelpers/core/host.py | 27 + .../charmhelpers/core/host_factory/ubuntu.py | 3 +- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 2 + ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 143 ++-- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 237 ++++++ ceph-mon/lib/ceph/crush_utils.py | 2 +- ceph-mon/lib/ceph/utils.py | 80 +- ceph-mon/requirements.txt | 12 +- ceph-mon/test-requirements.txt | 41 +- ceph-mon/tox.ini | 52 +- 19 files changed, 1218 insertions(+), 163 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py create mode 100644 ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index ed7cbe74..54f52290 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -17,3 +17,4 @@ include: - contrib.charmsupport - contrib.hardening|inc=* - fetch.python + - contrib.openstack.policyd diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index f6f36dfa..af819f7b 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -448,6 +448,7 @@ def mon_relation(): if ceph.monitor_key_exists('admin', 'autotune'): autotune = ceph.monitor_key_get('admin', 'autotune') else: + ceph.wait_for_manager() autotune = config('pg-autotune') if (cmp_pkgrevno('ceph', '14.2.0') >= 0 and (autotune == 'true' or diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py index 3dc14e3c..67521e17 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -13,7 +13,6 @@ # limitations under the License. from __future__ import absolute_import # required for external apt import -from apt import apt_pkg from six import string_types from charmhelpers.fetch import ( @@ -26,6 +25,7 @@ WARNING, ) from charmhelpers.contrib.hardening.audits import BaseAudit +from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg class AptConfig(BaseAudit): diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 0a5f81bd..7d95a590 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -709,8 +709,8 @@ def glance_create_image(self, glance, image_name, image_url, '{}...'.format(image_name, image_url)) # Download image - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + http_proxy = os.getenv('OS_TEST_HTTP_PROXY') + self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: proxies = {'http': http_proxy} opener = urllib.FancyURLopener(proxies) @@ -800,8 +800,8 @@ def create_cirros_image(self, glance, image_name, hypervisor_type=None): '({})...'.format(image_name)) # Get cirros image URL - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + http_proxy = os.getenv('OS_TEST_HTTP_PROXY') + self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: proxies = {'http': http_proxy} opener = urllib.FancyURLopener(proxies) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index a6545e12..a3d48c41 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -18,6 +18,7 @@ import math import os import re +import socket import time from base64 import b64decode from subprocess import check_call, CalledProcessError @@ -1716,6 +1717,10 @@ def __call__(self): 'rel_key': 'enable-nfg-logging', 'default': False, }, + 'enable_port_forwarding': { + 'rel_key': 'enable-port-forwarding', + 'default': False, + }, 'global_physnet_mtu': { 'rel_key': 'global-physnet-mtu', 'default': 1500, @@ -1745,6 +1750,13 @@ def __call__(self): ctxt['extension_drivers'] = ','.join(extension_drivers) + l3_extension_plugins = [] + + if ctxt['enable_port_forwarding']: + l3_extension_plugins.append('port_forwarding') + + ctxt['l3_extension_plugins'] = l3_extension_plugins + return ctxt def get_neutron_options(self, rdata): @@ -2160,3 +2172,14 @@ def __call__(self): 'logrotate_count': self.count, } return ctxt + + +class HostInfoContext(OSContextGenerator): + """Context to provide host information.""" + + def __call__(self): + ctxt = { + 'host_fqdn': socket.getfqdn(), + 'host': socket.gethostname(), + } + return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py index 718c6d65..e017bc20 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -127,7 +127,9 @@ def expect_ha(): return len(ha_related_units) > 0 or config('vip') or config('dns-ha') -def generate_ha_relation_data(service, extra_settings=None): +def generate_ha_relation_data(service, + extra_settings=None, + haproxy_enabled=True): """ Generate relation data for ha relation Based on configuration options and unit interfaces, generate a json @@ -152,21 +154,18 @@ def generate_ha_relation_data(service, extra_settings=None): @param extra_settings: Dict of additional resource data @returns dict: json encoded data for use with relation_set """ - _haproxy_res = 'res_{}_haproxy'.format(service) - _relation_data = { - 'resources': { - _haproxy_res: 'lsb:haproxy', - }, - 'resource_params': { + _relation_data = {'resources': {}, 'resource_params': {}} + + if haproxy_enabled: + _haproxy_res = 'res_{}_haproxy'.format(service) + _relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'} + _relation_data['resource_params'] = { _haproxy_res: 'op monitor interval="5s"' - }, - 'init_services': { - _haproxy_res: 'haproxy' - }, - 'clones': { + } + _relation_data['init_services'] = {_haproxy_res: 'haproxy'} + _relation_data['clones'] = { 'cl_{}_haproxy'.format(service): _haproxy_res - }, - } + } if extra_settings: for k, v in extra_settings.items(): @@ -290,7 +289,7 @@ def update_hacluster_vip(service, relation_data): iface, netmask, fallback = get_vip_settings(vip) - vip_monitoring = 'op monitor depth="0" timeout="20s" interval="10s"' + vip_monitoring = 'op monitor timeout="20s" interval="10s" depth="0"' if iface is not None: # NOTE(jamespage): Delete old VIP resources # Old style naming encoding iface in name diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py new file mode 100644 index 00000000..1adf2472 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py @@ -0,0 +1,700 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import contextlib +import os +import six +import shutil +import yaml +import zipfile + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as ch_host + +# Features provided by this module: + +""" +Policy.d helper functions +========================= + +The functions in this module are designed, as a set, to provide an easy-to-use +set of hooks for classic charms to add in /etc//policy.d/ +directory override YAML files. + +(For charms.openstack charms, a mixin class is provided for this +functionality). + +In order to "hook" this functionality into a (classic) charm, two functions are +provided: + + maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=none, + blacklist_keys=none, + template_function=none, + restart_handler=none) + + maybe_do_policyd_overrides_on_config_changed(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None + +(See the docstrings for details on the parameters) + +The functions should be called from the install and upgrade hooks in the charm. +The `maybe_do_policyd_overrides_on_config_changed` function is designed to be +called on the config-changed hook, in that it does an additional check to +ensure that an already overriden policy.d in an upgrade or install hooks isn't +repeated. + +In order the *enable* this functionality, the charm's install, config_changed, +and upgrade_charm hooks need to be modified, and a new config option (see +below) needs to be added. The README for the charm should also be updated. + +Examples from the keystone charm are: + +@hooks.hook('install.real') +@harden() +def install(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + + +@hooks.hook('config-changed') +@restart_on_change(restart_map(), restart_functions=restart_function_map()) +@harden() +def config_changed(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides_on_config_changed(os_release('keystone'), + 'keystone') + +@hooks.hook('upgrade-charm') +@restart_on_change(restart_map(), stopstart=True) +@harden() +def upgrade_charm(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + +Status Line +=========== + +The workload status code in charm-helpers has been modified to detect if +policy.d override code has been incorporated into the charm by checking for the +new config variable (in the config.yaml). If it has been, then the workload +status line will automatically show "PO:" at the beginning of the workload +status for that unit/service if the config option is set. If the policy +override is broken, the "PO (broken):" will be shown. No changes to the charm +(apart from those already mentioned) are needed to enable this functionality. +(charms.openstack charms also get this functionality, but please see that +library for further details). +""" + +# The config.yaml for the charm should contain the following for the config +# option: + +""" + use-policyd-override: + type: boolean + default: False + description: | + If True then use the resource file named 'policyd-override' to install + override yaml files in the service's policy.d directory. The resource + file should be a zip file containing at least one yaml file with a .yaml + or .yml extension. If False then remove the overrides. +""" + +# The metadata.yaml for the charm should contain the following: +""" +resources: + policyd-override: + type: file + filename: policyd-override.zip + description: The policy.d overrides file +""" + +# The README for the charm should contain the following: +""" +Policy Overrides +---------------- + +This service allows for policy overrides using the `policy.d` directory. This +is an **advanced** feature and the policies that the service supports should be +clearly and unambiguously understood before trying to override, or add to, the +default policies that the service uses. + +The charm also has some policy defaults. They should also be understood before +being overridden. It is possible to break the system (for tenants and other +services) if policies are incorrectly applied to the service. + +Policy overrides are YAML files that contain rules that will add to, or +override, existing policy rules in the service. The `policy.d` directory is +a place to put the YAML override files. This charm owns the +`/etc/keystone/policy.d` directory, and as such, any manual changes to it will +be overwritten on charm upgrades. + +Policy overrides are provided to the charm using a resource file called +`policyd-override`. This is attached to the charm using (for example): + + juju attach-resource policyd-override= + +The `` is the name that this charm is deployed as, with +`` being the resource file containing the policy overrides. + +The format of the resource file is a ZIP file (.zip extension) containing at +least one YAML file with an extension of `.yaml` or `.yml`. Note that any +directories in the ZIP file are ignored; all of the files are flattened into a +single directory. There must not be any duplicated filenames; this will cause +an error and nothing in the resource file will be applied. + +(ed. next part is optional is the charm supports some form of +template/substitution on a read file) + +If a (ed. "one or more of") [`.j2`, `.tmpl`, `.tpl`] file is found in the +resource file then the charm will perform a substitution with charm variables +taken from the config or relations. (ed. edit as appropriate to include the +variable). + +To enable the policy overrides the config option `use-policyd-override` must be +set to `True`. + +When `use-policyd-override` is `True` the status line of the charm will be +prefixed with `PO:` indicating that policies have been overridden. If the +installation of the policy override YAML files failed for any reason then the +status line will be prefixed with `PO (broken):`. The log file for the charm +will indicate the reason. No policy override files are installed if the `PO +(broken):` is shown. The status line indicates that the overrides are broken, +not that the policy for the service has failed - they will be the defaults for +the charm and service. + +If the policy overrides did not install then *either* attach a new, corrected, +resource file *or* disable the policy overrides by setting +`use-policyd-override` to False. + +Policy overrides on one service may affect the functionality of another +service. Therefore, it may be necessary to provide policy overrides for +multiple service charms to achieve a consistent set of policies across the +OpenStack system. The charms for the other services that may need overrides +should be checked to ensure that they support overrides before proceeding. +""" + +POLICYD_VALID_EXTS = ['.yaml', '.yml', '.j2', '.tmpl', '.tpl'] +POLICYD_TEMPLATE_EXTS = ['.j2', '.tmpl', '.tpl'] +POLICYD_RESOURCE_NAME = "policyd-override" +POLICYD_CONFIG_NAME = "use-policyd-override" +POLICYD_SUCCESS_FILENAME = "policyd-override-success" +POLICYD_LOG_LEVEL_DEFAULT = hookenv.INFO +POLICYD_ALWAYS_BLACKLISTED_KEYS = ("admin_required", "cloud_admin") + + +class BadPolicyZipFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +class BadPolicyYamlFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +if six.PY2: + BadZipFile = zipfile.BadZipfile +else: + BadZipFile = zipfile.BadZipFile + + +def is_policyd_override_valid_on_this_release(openstack_release): + """Check that the charm is running on at least Ubuntu Xenial, and at + least the queens release. + + :param openstack_release: the release codename that is installed. + :type openstack_release: str + :returns: True if okay + :rtype: bool + """ + # NOTE(ajkavanagh) circular import! This is because the status message + # generation code in utils has to call into this module, but this function + # needs the CompareOpenStackReleases() function. The only way to solve + # this is either to put ALL of this module into utils, or refactor one or + # other of the CompareOpenStackReleases or status message generation code + # into a 3rd module. + import charmhelpers.contrib.openstack.utils as ch_utils + return ch_utils.CompareOpenStackReleases(openstack_release) >= 'queens' + + +def maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None): + """If the config option is set, get the resource file and process it to + enable the policy.d overrides for the service passed. + + The param `openstack_release` is required as the policyd overrides feature + is only supported on openstack_release "queens" or later, and on ubuntu + "xenial" or later. Prior to these versions, this feature is a NOP. + + The optional template_function is a function that accepts a string and has + an opportunity to modify the loaded file prior to it being read by + yaml.safe_load(). This allows the charm to perform "templating" using + charm derived data. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + The param restart_handler is an optional Callable that is called to perform + the service restart if the policy.d file is changed. This should normally + be None as oslo.policy automatically picks up changes in the policy.d + directory. However, for any services where this is buggy then a + restart_handler can be used to force the policy.d files to be read. + + :param openstack_release: The openstack release that is installed. + :type openstack_release: str + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the string + prior to being processed as a Yaml document. + :type template_function: Union[None, Callable[[str], str]] + :param restart_handler: The function to call if the service should be + restarted. + :type restart_handler: Union[None, Callable[]] + """ + config = hookenv.config() + try: + if not config.get(POLICYD_CONFIG_NAME, False): + remove_policy_success_file() + clean_policyd_dir_for(service, blacklist_paths) + return + except Exception: + return + if not is_policyd_override_valid_on_this_release(openstack_release): + return + # from now on it should succeed; if it doesn't then status line will show + # broken. + resource_filename = get_policy_resource_filename() + restart = process_policy_resource_file( + resource_filename, service, blacklist_paths, blacklist_keys, + template_function) + if restart and restart_handler is not None and callable(restart_handler): + restart_handler() + + +def maybe_do_policyd_overrides_on_config_changed(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None): + """This function is designed to be called from the config changed hook + handler. It will only perform the policyd overrides if the config is True + and the success file doesn't exist. Otherwise, it does nothing as the + resource file has already been processed. + + See maybe_do_policyd_overrides() for more details on the params. + + :param openstack_release: The openstack release that is installed. + :type openstack_release: str + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the string + prior to being processed as a Yaml document. + :type template_function: Union[None, Callable[[str], str]] + :param restart_handler: The function to call if the service should be + restarted. + :type restart_handler: Union[None, Callable[]] + """ + config = hookenv.config() + try: + if not config.get(POLICYD_CONFIG_NAME, False): + remove_policy_success_file() + clean_policyd_dir_for(service, blacklist_paths) + return + except Exception: + return + # if the policyd overrides have been performed just return + if os.path.isfile(_policy_success_file()): + return + maybe_do_policyd_overrides( + openstack_release, service, blacklist_paths, blacklist_keys, + template_function, restart_handler) + + +def get_policy_resource_filename(): + """Function to extract the policy resource filename + + :returns: The filename of the resource, if set, otherwise, if an error + occurs, then None is returned. + :rtype: Union[str, None] + """ + try: + return hookenv.resource_get(POLICYD_RESOURCE_NAME) + except Exception: + return None + + +@contextlib.contextmanager +def open_and_filter_yaml_files(filepath): + """Validate that the filepath provided is a zip file and contains at least + one (.yaml|.yml) file, and that the files are not duplicated when the zip + file is flattened. Note that the yaml files are not checked. This is the + first stage in validating the policy zipfile; individual yaml files are not + checked for validity or black listed keys. + + An example of use is: + + with open_and_filter_yaml_files(some_path) as zfp, g: + for zipinfo in g: + # do something with zipinfo ... + + :param filepath: a filepath object that can be opened by zipfile + :type filepath: Union[AnyStr, os.PathLike[AntStr]] + :returns: (zfp handle, + a generator of the (name, filename, ZipInfo object) tuples) as a + tuple. + :rtype: ContextManager[(zipfile.ZipFile, + Generator[(name, str, str, zipfile.ZipInfo)])] + :raises: zipfile.BadZipFile + :raises: BadPolicyZipFile if duplicated yaml or missing + :raises: IOError if the filepath is not found + """ + with zipfile.ZipFile(filepath, 'r') as zfp: + # first pass through; check for duplicates and at least one yaml file. + names = collections.defaultdict(int) + yamlfiles = _yamlfiles(zfp) + for name, _, _, _ in yamlfiles: + names[name] += 1 + # There must be at least 1 yaml file. + if len(names.keys()) == 0: + raise BadPolicyZipFile("contains no yaml files with {} extensions." + .format(", ".join(POLICYD_VALID_EXTS))) + # There must be no duplicates + duplicates = [n for n, c in names.items() if c > 1] + if duplicates: + raise BadPolicyZipFile("{} have duplicates in the zip file." + .format(", ".join(duplicates))) + # Finally, let's yield the generator + yield (zfp, yamlfiles) + + +def _yamlfiles(zipfile): + """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) + and the infolist item from a zipfile. + + :param zipfile: the zipfile to read zipinfo items from + :type zipfile: zipfile.ZipFile + :returns: generator of (name, ext, filename, info item) for each self-identified + yaml file. + :rtype: List[(str, str, str, zipfile.ZipInfo)] + """ + l = [] + for infolist_item in zipfile.infolist(): + if infolist_item.is_dir(): + continue + _, name_ext = os.path.split(infolist_item.filename) + name, ext = os.path.splitext(name_ext) + ext = ext.lower() + if ext and ext in POLICYD_VALID_EXTS: + l.append((name, ext, name_ext, infolist_item)) + return l + + +def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): + """Read, validate and return the (first) yaml document from the stream. + + The doc is read, and checked for a yaml file. The the top-level keys are + checked against the blacklist_keys provided. If there are problems then an + Exception is raised. Otherwise the yaml document is returned as a Python + object that can be dumped back as a yaml file on the system. + + The yaml file must only consist of a str:str mapping, and if not then the + yaml file is rejected. + + :param stream_or_doc: the file object to read the yaml from + :type stream_or_doc: Union[AnyStr, IO[AnyStr]] + :param blacklist_keys: Any keys, which if in the yaml file, should cause + and error. + :type blacklisted_keys: Union[None, List[str]] + :returns: the yaml file as a python document + :rtype: Dict[str, str] + :raises: yaml.YAMLError if there is a problem with the document + :raises: BadPolicyYamlFile if file doesn't look right or there are + blacklisted keys in the file. + """ + blacklist_keys = blacklist_keys or [] + blacklist_keys.append(POLICYD_ALWAYS_BLACKLISTED_KEYS) + doc = yaml.safe_load(stream_or_doc) + if not isinstance(doc, dict): + raise BadPolicyYamlFile("doesn't look like a policy file?") + keys = set(doc.keys()) + blacklisted_keys_present = keys.intersection(blacklist_keys) + if blacklisted_keys_present: + raise BadPolicyYamlFile("blacklisted keys {} present." + .format(", ".join(blacklisted_keys_present))) + if not all(isinstance(k, six.string_types) for k in keys): + raise BadPolicyYamlFile("keys in yaml aren't all strings?") + # check that the dictionary looks like a mapping of str to str + if not all(isinstance(v, six.string_types) for v in doc.values()): + raise BadPolicyYamlFile("values in yaml aren't all strings?") + return doc + + +def policyd_dir_for(service): + """Return the policy directory for the named service. + + This assumes the default name of "policy.d" which is kept across all + charms. + + :param service: str + :returns: the policy.d override directory. + :rtype: os.PathLike[str] + """ + return os.path.join("/", "etc", service, "policy.d") + + +def clean_policyd_dir_for(service, keep_paths=None): + """Clean out the policyd directory except for items that should be kept. + + The keep_paths, if used, should be set to the full path of the files that + should be kept in the policyd directory for the service. Note that the + service name is passed in, and then the policyd_dir_for() function is used. + This is so that a coding error doesn't result in a sudden deletion of the + charm (say). + + :param service: the service name to use to construct the policy.d dir. + :type service: str + :param keep_paths: optional list of paths to not delete. + :type keep_paths: Union[None, List[str]] + """ + keep_paths = keep_paths or [] + path = policyd_dir_for(service) + if not os.path.exists(path): + ch_host.mkdir(path, owner=service, group=service, perms=0o775) + _scanner = os.scandir if six.PY3 else _py2_scandir + for direntry in _scanner(path): + # see if the path should be kept. + if direntry.path in keep_paths: + continue + # we remove any directories; it's ours and there shouldn't be any + if direntry.is_dir(): + shutil.rmtree(direntry.path) + else: + os.remove(direntry.path) + + +@contextlib.contextmanager +def _py2_scandir(path): + """provide a py2 implementation of os.scandir if this module ever gets used + in a py2 charm (unlikely). uses os.listdir() to get the names in the path, + and then mocks the is_dir() function using os.path.isdir() to check for a + directory. + + :param path: the path to list the directories for + :type path: str + :returns: Generator that provides _P27Direntry objects + :rtype: ContextManager[_P27Direntry] + """ + for f in os.listdir(path): + yield _P27Direntry(f) + + +class _P27Direntry(object): + """Mock a scandir Direntry object with enough to use in + clean_policyd_dir_for + """ + + def __init__(self, path): + self.path = path + + def is_dir(self): + return os.path.isdir(self.path) + + +def path_for_policy_file(service, name): + """Return the full path for a policy.d file that will be written to the + service's policy.d directory. + + It is constructed using policyd_dir_for(), the name and the ".yaml" + extension. + + :param service: the service name + :type service: str + :param name: the name for the policy override + :type name: str + :returns: the full path name for the file + :rtype: os.PathLike[str] + """ + return os.path.join(policyd_dir_for(service), name + ".yaml") + + +def _policy_success_file(): + """Return the file name for a successful drop of policy.d overrides + + :returns: the path name for the file. + :rtype: str + """ + return os.path.join(hookenv.charm_dir(), POLICYD_SUCCESS_FILENAME) + + +def remove_policy_success_file(): + """Remove the file that indicates successful policyd override.""" + try: + os.remove(_policy_success_file()) + except Exception: + pass + + +def policyd_status_message_prefix(): + """Return the prefix str for the status line. + + "PO:" indicating that the policy overrides are in place, or "PO (broken):" + if the policy is supposed to be working but there is no success file. + + :returns: the prefix + :rtype: str + """ + if os.path.isfile(_policy_success_file()): + return "PO:" + return "PO (broken):" + + +def process_policy_resource_file(resource_file, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None): + """Process the resource file (which should contain at least one yaml file) + and write those files to the service's policy.d directory. + + The optional template_function is a function that accepts a python + string and has an opportunity to modify the document + prior to it being read by the yaml.safe_load() function and written to + disk. Note that this function does *not* say how the templating is done - + this is up to the charm to implement its chosen method. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + If any error occurs, then the policy.d directory is cleared, the error is + written to the log, and the status line will eventually show as failed. + + :param resource_file: The zipped file to open and extract yaml files form. + :type resource_file: Union[AnyStr, os.PathLike[AnyStr]] + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the yaml + document. + :type template_function: Union[None, Callable[[AnyStr], AnyStr]] + :returns: True if the processing was successful, False if not. + :rtype: boolean + """ + blacklist_paths = blacklist_paths or [] + completed = False + try: + with open_and_filter_yaml_files(resource_file) as (zfp, gen): + # first clear out the policy.d directory and clear success + remove_policy_success_file() + clean_policyd_dir_for(service, blacklist_paths) + for name, ext, filename, zipinfo in gen: + # construct a name for the output file. + yaml_filename = path_for_policy_file(service, name) + if yaml_filename in blacklist_paths: + raise BadPolicyZipFile("policy.d name {} is blacklisted" + .format(yaml_filename)) + with zfp.open(zipinfo) as fp: + doc = fp.read() + # if template_function is not None, then offer the document + # to the template function + if ext in POLICYD_TEMPLATE_EXTS: + if (template_function is None or not + callable(template_function)): + raise BadPolicyZipFile( + "Template {} but no template_function is " + "available".format(filename)) + doc = template_function(doc) + yaml_doc = read_and_validate_yaml(doc, blacklist_keys) + with open(yaml_filename, "wt") as f: + yaml.dump(yaml_doc, f) + # Every thing worked, so we mark up a success. + completed = True + except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: + hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except IOError as e: + # technically this shouldn't happen; it would be a programming error as + # the filename comes from Juju and thus, should exist. + hookenv.log( + "File {} failed with IOError. This really shouldn't happen" + " -- error: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except Exception as e: + import traceback + hookenv.log("General Exception({}) during policyd processing" + .format(str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + hookenv.log(traceback.format_exc()) + finally: + if not completed: + hookenv.log("Processing {} failed: cleaning policy.d directory" + .format(resource_file), + level=POLICYD_LOG_LEVEL_DEFAULT) + clean_policyd_dir_for(service, blacklist_paths) + else: + # touch the success filename + hookenv.log("policy.d overrides installed.", + level=POLICYD_LOG_LEVEL_DEFAULT) + open(_policy_success_file(), "w").close() + return completed diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index d43a4d20..ac96f844 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -84,7 +84,8 @@ SourceConfigError, GPGKeyError, get_upstream_version, - filter_missing_packages + filter_missing_packages, + ubuntu_apt_pkg as apt, ) from charmhelpers.fetch.snap import ( @@ -96,6 +97,10 @@ from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.openstack.exceptions import OSContextError +from charmhelpers.contrib.openstack.policyd import ( + policyd_status_message_prefix, + POLICYD_CONFIG_NAME, +) CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' @@ -443,8 +448,6 @@ def get_os_codename_package(package, fatal=True): # Second item in list is Version return line.split()[1] - import apt_pkg as apt - cache = apt_cache() try: @@ -658,7 +661,6 @@ def openstack_upgrade_available(package): a newer version of package. """ - import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) if not cur_vers: @@ -864,6 +866,12 @@ def _determine_os_workload_status( message = "Unit is ready" juju_log(message, 'INFO') + try: + if config(POLICYD_CONFIG_NAME): + message = "{} {}".format(policyd_status_message_prefix(), message) + except Exception: + pass + return state, message diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index bd9c6842..e13dfa8b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -422,8 +422,6 @@ def enabled_manager_modules(): cmd = ['ceph', 'mgr', 'module', 'ls'] try: modules = check_output(cmd) - if six.PY3: - modules = modules.decode('utf-8') except CalledProcessError as e: log("Failed to list ceph modules: {}".format(e), WARNING) return [] diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 32754ff9..b33ac906 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -1075,3 +1075,30 @@ def install_ca_cert(ca_cert, name=None): log("Installing new CA cert at: {}".format(cert_file), level=INFO) write_file(cert_file, ca_cert) subprocess.check_call(['update-ca-certificates', '--fresh']) + + +def get_system_env(key, default=None): + """Get data from system environment as represented in ``/etc/environment``. + + :param key: Key to look up + :type key: str + :param default: Value to return if key is not found + :type default: any + :returns: Value for key if found or contents of default parameter + :rtype: any + :raises: subprocess.CalledProcessError + """ + env_file = '/etc/environment' + # use the shell and env(1) to parse the global environments file. This is + # done to get the correct result even if the user has shell variable + # substitutions or other shell logic in that file. + output = subprocess.check_output( + ['env', '-i', '/bin/bash', '-c', + 'set -a && source {} && env'.format(env_file)], + universal_newlines=True) + for k, v in (line.split('=', 1) + for line in output.splitlines() if '=' in line): + if k == key: + return v + else: + return default diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py index 0ee2b660..1b57e2ce 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -24,6 +24,7 @@ 'bionic', 'cosmic', 'disco', + 'eoan', ) @@ -93,7 +94,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None): the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. """ - import apt_pkg + from charmhelpers.fetch import apt_pkg if not pkgcache: from charmhelpers.fetch import apt_cache pkgcache = apt_cache() diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 8572d34f..0cc7fc85 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -103,6 +103,8 @@ def base_url(self, url): apt_unhold = fetch.apt_unhold import_key = fetch.import_key get_upstream_version = fetch.get_upstream_version + apt_pkg = fetch.ubuntu_apt_pkg + get_apt_dpkg_env = fetch.get_apt_dpkg_env elif __platform__ == "centos": yum_search = fetch.yum_search diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 24c76e34..31225235 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -13,14 +13,14 @@ # limitations under the License. from collections import OrderedDict -import os import platform import re import six -import time import subprocess +import sys +import time -from charmhelpers.core.host import get_distrib_codename +from charmhelpers.core.host import get_distrib_codename, get_system_env from charmhelpers.core.hookenv import ( log, @@ -29,6 +29,7 @@ env_proxy_settings, ) from charmhelpers.fetch import SourceConfigError, GPGKeyError +from charmhelpers.fetch import ubuntu_apt_pkg PROPOSED_POCKET = ( "# Proposed\n" @@ -216,18 +217,42 @@ def filter_missing_packages(packages): ) -def apt_cache(in_memory=True, progress=None): - """Build and return an apt cache.""" - from apt import apt_pkg - apt_pkg.init() - if in_memory: - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache(progress) +def apt_cache(*_, **__): + """Shim returning an object simulating the apt_pkg Cache. + + :param _: Accept arguments for compability, not used. + :type _: any + :param __: Accept keyword arguments for compability, not used. + :type __: any + :returns:Object used to interrogate the system apt and dpkg databases. + :rtype:ubuntu_apt_pkg.Cache + """ + if 'apt_pkg' in sys.modules: + # NOTE(fnordahl): When our consumer use the upstream ``apt_pkg`` module + # in conjunction with the apt_cache helper function, they may expect us + # to call ``apt_pkg.init()`` for them. + # + # Detect this situation, log a warning and make the call to + # ``apt_pkg.init()`` to avoid the consumer Python interpreter from + # crashing with a segmentation fault. + log('Support for use of upstream ``apt_pkg`` module in conjunction' + 'with charm-helpers is deprecated since 2019-06-25', level=WARNING) + sys.modules['apt_pkg'].init() + return ubuntu_apt_pkg.Cache() def apt_install(packages, options=None, fatal=False): - """Install one or more packages.""" + """Install one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -244,7 +269,17 @@ def apt_install(packages, options=None, fatal=False): def apt_upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages.""" + """Upgrade all packages. + + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :param dist: Whether ``dist-upgrade`` should be used over ``upgrade`` + :type dist: bool + :raises: subprocess.CalledProcessError + """ if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -265,7 +300,15 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): - """Purge one or more packages.""" + """Purge one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ cmd = ['apt-get', '--assume-yes', 'purge'] if isinstance(packages, six.string_types): cmd.append(packages) @@ -276,7 +319,14 @@ def apt_purge(packages, fatal=False): def apt_autoremove(purge=True, fatal=False): - """Purge one or more packages.""" + """Purge one or more packages. + :param purge: Whether the ``--purge`` option should be passed on or not. + :type purge: bool + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ cmd = ['apt-get', '--assume-yes', 'autoremove'] if purge: cmd.append('--purge') @@ -660,21 +710,22 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_message="", cmd_env=None): """Run a command and retry until success or max_retries is reached. - :param: cmd: str: The apt command to run. - :param: max_retries: int: The number of retries to attempt on a fatal - command. Defaults to CMD_RETRY_COUNT. - :param: retry_exitcodes: tuple: Optional additional exit codes to retry. - Defaults to retry on exit code 1. - :param: retry_message: str: Optional log prefix emitted during retries. - :param: cmd_env: dict: Environment variables to add to the command run. + :param cmd: The apt command to run. + :type cmd: str + :param max_retries: The number of retries to attempt on a fatal + command. Defaults to CMD_RETRY_COUNT. + :type max_retries: int + :param retry_exitcodes: Optional additional exit codes to retry. + Defaults to retry on exit code 1. + :type retry_exitcodes: tuple + :param retry_message: Optional log prefix emitted during retries. + :type retry_message: str + :param: cmd_env: Environment variables to add to the command run. + :type cmd_env: Option[None, Dict[str, str]] """ - - env = None - kwargs = {} + env = get_apt_dpkg_env() if cmd_env: - env = os.environ.copy() env.update(cmd_env) - kwargs['env'] = env if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -686,8 +737,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_results = (None,) + retry_exitcodes while result in retry_results: try: - # result = subprocess.check_call(cmd, env=env) - result = subprocess.check_call(cmd, **kwargs) + result = subprocess.check_call(cmd, env=env) except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > max_retries: @@ -700,22 +750,18 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), def _run_apt_command(cmd, fatal=False): """Run an apt command with optional retries. - :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. + :param cmd: The apt command to run. + :type cmd: str + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool """ - # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. - cmd_env = { - 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} - if fatal: _run_with_retries( - cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), + cmd, retry_exitcodes=(1, APT_NO_LOCK,), retry_message="Couldn't acquire DPKG lock") else: - env = os.environ.copy() - env.update(cmd_env) - subprocess.call(cmd, env=env) + subprocess.call(cmd, env=get_apt_dpkg_env()) def get_upstream_version(package): @@ -723,7 +769,6 @@ def get_upstream_version(package): @returns None (if not installed) or the upstream version """ - import apt_pkg cache = apt_cache() try: pkg = cache[package] @@ -735,4 +780,18 @@ def get_upstream_version(package): # package is known, but no version is currently installed. return None - return apt_pkg.upstream_version(pkg.current_ver.ver_str) + return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str) + + +def get_apt_dpkg_env(): + """Get environment suitable for execution of APT and DPKG tools. + + We keep this in a helper function instead of in a global constant to + avoid execution on import of the library. + :returns: Environment suitable for execution of APT and DPKG tools. + :rtype: Dict[str, str] + """ + # The fallback is used in the event of ``/etc/environment`` not containing + # avalid PATH variable. + return {'DEBIAN_FRONTEND': 'noninteractive', + 'PATH': get_system_env('PATH', '/usr/sbin:/usr/bin:/sbin:/bin')} diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py new file mode 100644 index 00000000..104f91f1 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -0,0 +1,237 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provide a subset of the ``python-apt`` module API. + +Data collection is done through subprocess calls to ``apt-cache`` and +``dpkg-query`` commands. + +The main purpose for this module is to avoid dependency on the +``python-apt`` python module. + +The indicated python module is a wrapper around the ``apt`` C++ library +which is tightly connected to the version of the distribution it was +shipped on. It is not developed in a backward/forward compatible manner. + +This in turn makes it incredibly hard to distribute as a wheel for a piece +of python software that supports a span of distro releases [0][1]. + +Upstream feedback like [2] does not give confidence in this ever changing, +so with this we get rid of the dependency. + +0: https://github.com/juju-solutions/layer-basic/pull/135 +1: https://bugs.launchpad.net/charm-octavia/+bug/1824112 +2: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=845330#10 +""" + +import locale +import os +import subprocess + + +class _container(dict): + """Simple container for attributes.""" + __getattr__ = dict.__getitem__ + __setattr__ = dict.__setitem__ + + +class Package(_container): + """Simple container for package attributes.""" + + +class Version(_container): + """Simple container for version attributes.""" + + +class Cache(object): + """Simulation of ``apt_pkg`` Cache object.""" + def __init__(self, progress=None): + pass + + def __getitem__(self, package): + """Get information about a package from apt and dpkg databases. + + :param package: Name of package + :type package: str + :returns: Package object + :rtype: object + :raises: KeyError, subprocess.CalledProcessError + """ + apt_result = self._apt_cache_show([package])[package] + apt_result['name'] = apt_result.pop('package') + pkg = Package(apt_result) + dpkg_result = self._dpkg_list([package]).get(package, {}) + current_ver = None + installed_version = dpkg_result.get('version') + if installed_version: + current_ver = Version({'ver_str': installed_version}) + pkg.current_ver = current_ver + pkg.architecture = dpkg_result.get('architecture') + return pkg + + def _dpkg_list(self, packages): + """Get data from system dpkg database for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about installed packages, keys like + ``dpkg-query --list`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['dpkg-query', '--list'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + except subprocess.CalledProcessError as cp: + # ``dpkg-query`` may return error and at the same time have + # produced useful output, for example when asked for multiple + # packages where some are not installed + if cp.returncode != 1: + raise + output = cp.output + headings = [] + for line in output.splitlines(): + if line.startswith('||/'): + headings = line.split() + headings.pop(0) + continue + elif (line.startswith('|') or line.startswith('+') or + line.startswith('dpkg-query:')): + continue + else: + data = line.split(None, 4) + status = data.pop(0) + if status != 'ii': + continue + pkg = {} + pkg.update({k.lower(): v for k, v in zip(headings, data)}) + if 'name' in pkg: + pkgs.update({pkg['name']: pkg}) + return pkgs + + def _apt_cache_show(self, packages): + """Get data from system apt cache for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about package, keys like + ``apt-cache show`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['apt-cache', 'show', '--no-all-versions'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + previous = None + pkg = {} + for line in output.splitlines(): + if not line: + if 'package' in pkg: + pkgs.update({pkg['package']: pkg}) + pkg = {} + continue + if line.startswith(' '): + if previous and previous in pkg: + pkg[previous] += os.linesep + line.lstrip() + continue + if ':' in line: + kv = line.split(':', 1) + key = kv[0].lower() + if key == 'n': + continue + previous = key + pkg.update({key: kv[1].lstrip()}) + except subprocess.CalledProcessError as cp: + # ``apt-cache`` returns 100 if none of the packages asked for + # exist in the apt cache. + if cp.returncode != 100: + raise + return pkgs + + +def init(): + """Compability shim that does nothing.""" + pass + + +def upstream_version(version): + """Extracts upstream version from a version string. + + Upstream reference: https://salsa.debian.org/apt-team/apt/blob/master/ + apt-pkg/deb/debversion.cc#L259 + + :param version: Version string + :type version: str + :returns: Upstream version + :rtype: str + """ + if version: + version = version.split(':')[-1] + version = version.split('-')[0] + return version + + +def version_compare(a, b): + """Compare the given versions. + + Call out to ``dpkg`` to make sure the code doing the comparison is + compatible with what the ``apt`` library would do. Mimic the return + values. + + Upstream reference: + https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html + ?highlight=version_compare#apt_pkg.version_compare + + :param a: version string + :type a: str + :param b: version string + :type b: str + :returns: >0 if ``a`` is greater than ``b``, 0 if a equals b, + <0 if ``a`` is smaller than ``b`` + :rtype: int + :raises: subprocess.CalledProcessError, RuntimeError + """ + for op in ('gt', 1), ('eq', 0), ('lt', -1): + try: + subprocess.check_call(['dpkg', '--compare-versions', + a, op[0], b], + stderr=subprocess.STDOUT, + universal_newlines=True) + return op[1] + except subprocess.CalledProcessError as cp: + if cp.returncode == 1: + continue + raise + else: + raise RuntimeError('Unable to compare "{}" and "{}", according to ' + 'our logic they are neither greater, equal nor ' + 'less than each other.') diff --git a/ceph-mon/lib/ceph/crush_utils.py b/ceph-mon/lib/ceph/crush_utils.py index 8b6876c1..8fe09fa4 100644 --- a/ceph-mon/lib/ceph/crush_utils.py +++ b/ceph-mon/lib/ceph/crush_utils.py @@ -24,7 +24,7 @@ CRUSH_BUCKET = """root {name} {{ id {id} # do not change unnecessarily # weight 0.000 - alg straw + alg straw2 hash 0 # rjenkins1 }} diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 970b15fe..99e444cb 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -793,12 +793,33 @@ def is_leader(): return False +def manager_available(): + # if manager daemon isn't on this release, just say it is Fine + if cmp_pkgrevno('ceph', '11.0.0') < 0: + return True + cmd = ["sudo", "-u", "ceph", "ceph", "mgr", "dump", "-f", "json"] + try: + result = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return result['available'] + except subprocess.CalledProcessError as e: + log("'{}' failed: {}".format(" ".join(cmd), str(e))) + return False + except Exception: + return False + + def wait_for_quorum(): while not is_quorum(): log("Waiting for quorum to be reached") time.sleep(3) +def wait_for_manager(): + while not manager_available(): + log("Waiting for manager to be available") + time.sleep(5) + + def add_bootstrap_hint(peer): asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) cmd = [ @@ -929,11 +950,13 @@ def is_osd_disk(dev): def start_osds(devices): # Scan for ceph block devices rescan_osd_devices() - if cmp_pkgrevno('ceph', "0.56.6") >= 0: - # Use ceph-disk activate for directory based OSD's - for dev_or_path in devices: - if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call(['ceph-disk', 'activate', dev_or_path]) + if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and + cmp_pkgrevno('ceph', '14.2.0') < 0): + # Use ceph-disk activate for directory based OSD's + for dev_or_path in devices: + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): + subprocess.check_call( + ['ceph-disk', 'activate', dev_or_path]) def udevadm_settle(): @@ -952,12 +975,11 @@ def rescan_osd_devices(): udevadm_settle() _client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' -_bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" -_upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" def is_bootstrapped(): - return os.path.exists(_client_admin_keyring) + return os.path.exists( + '/var/lib/ceph/mon/ceph-{}/done'.format(socket.gethostname())) def wait_for_bootstrap(): @@ -965,36 +987,6 @@ def wait_for_bootstrap(): time.sleep(3) -def import_osd_bootstrap_key(key): - if not os.path.exists(_bootstrap_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _bootstrap_keyring, - '--create-keyring', - '--name=client.bootstrap-osd', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - -def import_osd_upgrade_key(key): - if not os.path.exists(_upgrade_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _upgrade_keyring, - '--create-keyring', - '--name=client.osd-upgrade', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - def generate_monitor_secret(): cmd = [ 'ceph-authtool', @@ -1491,6 +1483,10 @@ def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, ignore_errors, encrypt, bluestore, key_manager) else: + if cmp_pkgrevno('ceph', '14.0.0') >= 0: + log("Directory backed OSDs can not be created on Nautilus", + level=WARNING) + return osdize_dir(dev, encrypt, bluestore) @@ -1976,9 +1972,10 @@ def osdize_dir(path, encrypt=False, bluestore=False): ' skipping'.format(path)) return - if os.path.exists(os.path.join(path, 'upstart')): - log('Path {} is already configured as an OSD - bailing'.format(path)) - return + for t in ['upstart', 'systemd']: + if os.path.exists(os.path.join(path, t)): + log('Path {} is already used as an OSD dir - bailing'.format(path)) + return if cmp_pkgrevno('ceph', "0.56.6") < 0: log('Unable to use directories for OSDs with ceph < 0.56.6', @@ -2787,7 +2784,6 @@ def dirs_need_ownership_update(service): ('hammer', 'jewel'), ('jewel', 'luminous'), ('luminous', 'mimic'), - ('luminous', 'nautilus'), ('mimic', 'nautilus'), ]) diff --git a/ceph-mon/requirements.txt b/ceph-mon/requirements.txt index b8fec1e2..343beed1 100644 --- a/ceph-mon/requirements.txt +++ b/ceph-mon/requirements.txt @@ -1,6 +1,12 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. +# pbr>=1.8.0,<1.9.0 simplejson>=2.2.0 netifaces>=0.10.4 diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index bd4be894..7d9c2587 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -1,31 +1,18 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. +# charm-tools>=2.4.4 -coverage>=3.6 +requests>=2.18.4 mock>=1.2 flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 -requests>=2.18.4 -# BEGIN: Amulet OpenStack Charm Helper Requirements -# Liberty client lower constraints -amulet>=1.14.3,<2.0;python_version=='2.7' -bundletester>=0.6.1,<1.0;python_version=='2.7' -python-ceilometerclient>=1.5.0 -python-cinderclient>=1.4.0 -python-glanceclient>=1.1.0 -python-heatclient>=0.8.0 -python-keystoneclient>=1.7.1 -python-neutronclient>=3.1.0 -python-novaclient>=2.30.1 -python-openstackclient>=1.7.0 -python-swiftclient>=2.6.0 -pika>=0.10.0,<1.0 -distro-info -git+https://github.com/juju/charm-helpers.git#egg=charmhelpers -# END: Amulet OpenStack Charm Helper Requirements -# NOTE: workaround for 14.04 pip/tox -pytz -pyudev # for ceph-* charm unit tests (not mocked?) -git+https://github.com/openstack-charmers/zaza.git@remove-namespaced-tests#egg=zaza;python_version>'3.4' -git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack;python_version>'3.4' \ No newline at end of file +coverage>=4.5.2 +pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) +git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 99340106..20dbbfc5 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -1,35 +1,31 @@ -# Classic charm: ./tox.ini +# Classic charm (with zaza): ./tox.ini # This file is managed centrally by release-tools and should not be modified -# within individual charm repos. +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. [tox] -envlist = pep8,py37 +envlist = pep8,py3 skipsdist = True +# NOTE: Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE: Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} - AMULET_SETUP_TIMEOUT=5400 install_command = pip install {opts} {packages} -commands = stestr run {posargs} +commands = stestr run --slowest {posargs} whitelist_externals = juju -passenv = HOME TERM AMULET_* CS_API_* +passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt -[testenv:py27] -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -# temporarily disable py27 -commands = /bin/true - -[testenv:py3] -basepython = python3 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -; keep zuul happy until we change the py35 job [testenv:py35] basepython = python3.5 deps = -r{toxinidir}/requirements.txt @@ -45,11 +41,16 @@ basepython = python3.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:pep8] basepython = python3 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} hooks unit_tests tests actions lib +commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof [testenv:cover] @@ -63,7 +64,7 @@ setenv = PYTHON=coverage run commands = coverage erase - stestr run {posargs} + stestr run --slowest {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml @@ -84,6 +85,10 @@ omit = basepython = python3 commands = {posargs} +[testenv:func-noop] +basepython = python3 +commands = + functest-run-suite --help [testenv:func] basepython = python3 @@ -100,6 +105,11 @@ basepython = python3 commands = functest-run-suite --keep-model --dev +[testenv:func-target] +basepython = python3 +commands = + functest-run-suite --keep-model --bundle {posargs} + [flake8] ignore = E402,E226 exclude = */charmhelpers From e1253037d2f89ad04b08582b8f0ae45db6ad8fbc Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 21 Oct 2019 12:04:40 +0000 Subject: [PATCH 1834/2699] Resync charmhelpers A fix to the charm which resides in charm-helpers was inadvertantly removed by a recent commit. Resync charm-helpers to bring it back in. Change-Id: Ic8a98c3985596f00ce193619039a76a79484edc0 --- .../charmhelpers/contrib/openstack/context.py | 4 +- .../charmhelpers/contrib/openstack/policyd.py | 104 ++++++++++-------- .../charmhelpers/contrib/openstack/utils.py | 13 ++- .../charmhelpers/contrib/python/__init__.py | 0 .../contrib/storage/linux/ceph.py | 35 ++++-- ceph-mon/hooks/charmhelpers/core/hookenv.py | 18 +++ 6 files changed, 112 insertions(+), 62 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/python/__init__.py diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index a3d48c41..9b80b6d6 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -1940,7 +1940,7 @@ def _determine_ctxt(self): as well as the catalog_info string that would be supplied. Returns a dict containing the volume_api_version and the volume_catalog_info. """ - rel = os_release(self.pkg, base='icehouse') + rel = os_release(self.pkg) version = '2' if CompareOpenStackReleases(rel) >= 'pike': version = '3' @@ -2140,7 +2140,7 @@ def __init__(self, pkg='python-keystone'): self.pkg = pkg def __call__(self): - ostack = os_release(self.pkg, base='icehouse') + ostack = os_release(self.pkg) osystem = lsb_release()['DISTRIB_CODENAME'].lower() return { 'openstack_release': ostack, diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py index 1adf2472..83ca4ab7 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py @@ -17,6 +17,7 @@ import os import six import shutil +import sys import yaml import zipfile @@ -115,8 +116,8 @@ def upgrade_charm(): default: False description: | If True then use the resource file named 'policyd-override' to install - override yaml files in the service's policy.d directory. The resource - file should be a zip file containing at least one yaml file with a .yaml + override YAML files in the service's policy.d directory. The resource + file should be a ZIP file containing at least one yaml file with a .yaml or .yml extension. If False then remove the overrides. """ @@ -134,14 +135,14 @@ def upgrade_charm(): Policy Overrides ---------------- -This service allows for policy overrides using the `policy.d` directory. This -is an **advanced** feature and the policies that the service supports should be -clearly and unambiguously understood before trying to override, or add to, the -default policies that the service uses. +This feature allows for policy overrides using the `policy.d` directory. This +is an **advanced** feature and the policies that the OpenStack service supports +should be clearly and unambiguously understood before trying to override, or +add to, the default policies that the service uses. The charm also has some +policy defaults. They should also be understood before being overridden. -The charm also has some policy defaults. They should also be understood before -being overridden. It is possible to break the system (for tenants and other -services) if policies are incorrectly applied to the service. +> **Caution**: It is possible to break the system (for tenants and other + services) if policies are incorrectly applied to the service. Policy overrides are YAML files that contain rules that will add to, or override, existing policy rules in the service. The `policy.d` directory is @@ -149,30 +150,16 @@ def upgrade_charm(): `/etc/keystone/policy.d` directory, and as such, any manual changes to it will be overwritten on charm upgrades. -Policy overrides are provided to the charm using a resource file called -`policyd-override`. This is attached to the charm using (for example): +Overrides are provided to the charm using a Juju resource called +`policyd-override`. The resource is a ZIP file. This file, say +`overrides.zip`, is attached to the charm by: - juju attach-resource policyd-override= -The `` is the name that this charm is deployed as, with -`` being the resource file containing the policy overrides. + juju attach-resource policyd-override=overrides.zip -The format of the resource file is a ZIP file (.zip extension) containing at -least one YAML file with an extension of `.yaml` or `.yml`. Note that any -directories in the ZIP file are ignored; all of the files are flattened into a -single directory. There must not be any duplicated filenames; this will cause -an error and nothing in the resource file will be applied. +The policy override is enabled in the charm using: -(ed. next part is optional is the charm supports some form of -template/substitution on a read file) - -If a (ed. "one or more of") [`.j2`, `.tmpl`, `.tpl`] file is found in the -resource file then the charm will perform a substitution with charm variables -taken from the config or relations. (ed. edit as appropriate to include the -variable). - -To enable the policy overrides the config option `use-policyd-override` must be -set to `True`. + juju config use-policyd-override=true When `use-policyd-override` is `True` the status line of the charm will be prefixed with `PO:` indicating that policies have been overridden. If the @@ -180,12 +167,8 @@ def upgrade_charm(): status line will be prefixed with `PO (broken):`. The log file for the charm will indicate the reason. No policy override files are installed if the `PO (broken):` is shown. The status line indicates that the overrides are broken, -not that the policy for the service has failed - they will be the defaults for -the charm and service. - -If the policy overrides did not install then *either* attach a new, corrected, -resource file *or* disable the policy overrides by setting -`use-policyd-override` to False. +not that the policy for the service has failed. The policy will be the defaults +for the charm and service. Policy overrides on one service may affect the functionality of another service. Therefore, it may be necessary to provide policy overrides for @@ -296,15 +279,28 @@ def maybe_do_policyd_overrides(openstack_release, restarted. :type restart_handler: Union[None, Callable[]] """ + hookenv.log("Running maybe_do_policyd_overrides", + level=POLICYD_LOG_LEVEL_DEFAULT) + if not is_policyd_override_valid_on_this_release(openstack_release): + hookenv.log("... policy overrides not valid on this release: {}" + .format(openstack_release), + level=POLICYD_LOG_LEVEL_DEFAULT) + return config = hookenv.config() try: if not config.get(POLICYD_CONFIG_NAME, False): - remove_policy_success_file() clean_policyd_dir_for(service, blacklist_paths) + if (os.path.isfile(_policy_success_file()) and + restart_handler is not None and + callable(restart_handler)): + restart_handler() + remove_policy_success_file() return - except Exception: - return - if not is_policyd_override_valid_on_this_release(openstack_release): + except Exception as e: + hookenv.log("... ERROR: Exception is: {}".format(str(e)), + level=POLICYD_CONFIG_NAME) + import traceback + hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT) return # from now on it should succeed; if it doesn't then status line will show # broken. @@ -345,16 +341,30 @@ def maybe_do_policyd_overrides_on_config_changed(openstack_release, restarted. :type restart_handler: Union[None, Callable[]] """ + if not is_policyd_override_valid_on_this_release(openstack_release): + return + hookenv.log("Running maybe_do_policyd_overrides_on_config_changed", + level=POLICYD_LOG_LEVEL_DEFAULT) config = hookenv.config() try: if not config.get(POLICYD_CONFIG_NAME, False): - remove_policy_success_file() clean_policyd_dir_for(service, blacklist_paths) + if (os.path.isfile(_policy_success_file()) and + restart_handler is not None and + callable(restart_handler)): + restart_handler() + remove_policy_success_file() return - except Exception: + except Exception as e: + hookenv.log("... ERROR: Exception is: {}".format(str(e)), + level=POLICYD_CONFIG_NAME) + import traceback + hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT) return # if the policyd overrides have been performed just return if os.path.isfile(_policy_success_file()): + hookenv.log("... already setup, so skipping.", + level=POLICYD_LOG_LEVEL_DEFAULT) return maybe_do_policyd_overrides( openstack_release, service, blacklist_paths, blacklist_keys, @@ -430,8 +440,13 @@ def _yamlfiles(zipfile): """ l = [] for infolist_item in zipfile.infolist(): - if infolist_item.is_dir(): - continue + try: + if infolist_item.is_dir(): + continue + except AttributeError: + # fallback to "old" way to determine dir entry for pre-py36 + if infolist_item.filename.endswith('/'): + continue _, name_ext = os.path.split(infolist_item.filename) name, ext = os.path.splitext(name_ext) ext = ext.lower() @@ -511,7 +526,7 @@ def clean_policyd_dir_for(service, keep_paths=None): path = policyd_dir_for(service) if not os.path.exists(path): ch_host.mkdir(path, owner=service, group=service, perms=0o775) - _scanner = os.scandir if six.PY3 else _py2_scandir + _scanner = os.scandir if sys.version_info > (3, 4) else _py2_scandir for direntry in _scanner(path): # see if the path should be kept. if direntry.path in keep_paths: @@ -641,6 +656,7 @@ def process_policy_resource_file(resource_file, :returns: True if the processing was successful, False if not. :rtype: boolean """ + hookenv.log("Running process_policy_resource_file", level=hookenv.DEBUG) blacklist_paths = blacklist_paths or [] completed = False try: diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index ac96f844..02190264 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -204,7 +204,7 @@ ('stein', ['2.20.0', '2.21.0']), ('train', - ['2.22.0']), + ['2.22.0', '2.23.0']), ]) # >= Liberty version->codename mapping @@ -531,7 +531,7 @@ def reset_os_release(): _os_rel = None -def os_release(package, base='essex', reset_cache=False): +def os_release(package, base=None, reset_cache=False): ''' Returns OpenStack release codename from a cached global. @@ -542,6 +542,8 @@ def os_release(package, base='essex', reset_cache=False): the installation source, the earliest release supported by the charm should be returned. ''' + if not base: + base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']] global _os_rel if reset_cache: reset_os_release() @@ -670,7 +672,10 @@ def openstack_upgrade_available(package): codename = get_os_codename_install_source(src) avail_vers = get_os_version_codename_swift(codename) else: - avail_vers = get_os_version_install_source(src) + try: + avail_vers = get_os_version_install_source(src) + except: + avail_vers = cur_vers apt.init() return apt.version_compare(avail_vers, cur_vers) >= 1 @@ -1693,7 +1698,7 @@ def enable_memcache(source=None, release=None, package=None): if release: _release = release else: - _release = os_release(package, base='icehouse') + _release = os_release(package) if not _release: _release = get_os_codename_install_source(source) diff --git a/ceph-mon/hooks/charmhelpers/contrib/python/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/python/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index e13dfa8b..104977af 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -422,6 +422,8 @@ def enabled_manager_modules(): cmd = ['ceph', 'mgr', 'module', 'ls'] try: modules = check_output(cmd) + if six.PY3: + modules = modules.decode('UTF-8') except CalledProcessError as e: log("Failed to list ceph modules: {}".format(e), WARNING) return [] @@ -1185,6 +1187,15 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] + def add_op(self, op): + """Add an op if it is not already in the list. + + :param op: Operation to add. + :type op: dict + """ + if op not in self.ops: + self.ops.append(op) + def add_op_request_access_to_group(self, name, namespace=None, permission=None, key_name=None, object_prefix_permissions=None): @@ -1198,7 +1209,7 @@ def add_op_request_access_to_group(self, name, namespace=None, 'rwx': ['prefix1', 'prefix2'], 'class-read': ['prefix3']} """ - self.ops.append({ + self.add_op({ 'op': 'add-permissions-to-key', 'group': name, 'namespace': namespace, 'name': key_name or service_name(), @@ -1251,11 +1262,11 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, if pg_num and weight: raise ValueError('pg_num and weight are mutually exclusive') - self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight, 'group': group, - 'group-namespace': namespace, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + self.add_op({'op': 'create-pool', 'name': name, + 'replicas': replica_count, 'pg_num': pg_num, + 'weight': weight, 'group': group, + 'group-namespace': namespace, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) def add_op_create_erasure_pool(self, name, erasure_profile=None, weight=None, group=None, app_name=None, @@ -1283,12 +1294,12 @@ def add_op_create_erasure_pool(self, name, erasure_profile=None, :param max_objects: Maximum objects quota to apply :type max_objects: int """ - self.ops.append({'op': 'create-pool', 'name': name, - 'pool-type': 'erasure', - 'erasure-profile': erasure_profile, - 'weight': weight, - 'group': group, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + self.add_op({'op': 'create-pool', 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'weight': weight, + 'group': group, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 4744eb43..39b1cd09 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -119,6 +119,24 @@ def log(message, level=None): raise +def action_log(message): + """Write an action progress message""" + command = ['action-log'] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message[:SH_MAX_ARG]] + # Missing action-log should not cause failures in unit tests + # Send action_log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + message = "action-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + class Serializable(UserDict): """Wrapper, an object that can be serialized to yaml or json""" From 1275a64864da4d27f136b1ec29c767362278f903 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 16 Oct 2019 14:01:03 +0200 Subject: [PATCH 1835/2699] Sync charms.ceph to add new permission request to broker Change-Id: Id5023785fc748e44978b669db5b79c6c40e88de5 Depends-On: Ifd341bd80833d4a7fd62e89e3c0e2b7fd64fafba --- ceph-mon/lib/ceph/broker.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/ceph-mon/lib/ceph/broker.py b/ceph-mon/lib/ceph/broker.py index 3226f4cc..bae74a12 100644 --- a/ceph-mon/lib/ceph/broker.py +++ b/ceph-mon/lib/ceph/broker.py @@ -212,6 +212,18 @@ def handle_add_permissions_to_key(request, service): return resp +def handle_set_key_permissions(request, service): + """Ensure the key has the requested permissions.""" + permissions = request.get('permissions') + client = request.get('client') + call = ['ceph', '--id', service, 'auth', 'caps', + 'client.{}'.format(client)] + permissions + try: + check_call(call) + except CalledProcessError as e: + log("Error updating key capabilities: {}".format(e), level=ERROR) + + def update_service_permissions(service, service_obj=None, namespace=None): """Update the key permissions for the named client in Ceph""" if not service_obj: @@ -866,6 +878,8 @@ def process_requests_v1(reqs): ret = handle_put_osd_in_bucket(request=req, service=svc) elif op == "add-permissions-to-key": ret = handle_add_permissions_to_key(request=req, service=svc) + elif op == 'set-key-permissions': + ret = handle_set_key_permissions(request=req, service=svc) else: msg = "Unknown operation '{}'".format(op) log(msg, level=ERROR) From c4f18131abfa125c894c501a1a9296d15e066cc0 Mon Sep 17 00:00:00 2001 From: taodd Date: Mon, 14 Oct 2019 18:18:10 +0800 Subject: [PATCH 1836/2699] Fix list-disk action wrongly report osd journal as the non-pristine device Change-Id: Ib117fc5797447ade75d50a7ff7dae5de473fe7b9 Closes-Bug: #1847988 --- ceph-osd/actions/list_disks.py | 13 ++++++++-- .../unit_tests/test_actions_list_disks.py | 26 +++++++++++++++++++ 2 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 ceph-osd/unit_tests/test_actions_list_disks.py diff --git a/ceph-osd/actions/list_disks.py b/ceph-osd/actions/list_disks.py index 819310f8..46031a46 100755 --- a/ceph-osd/actions/list_disks.py +++ b/ceph-osd/actions/list_disks.py @@ -29,6 +29,7 @@ """ import sys +import os sys.path.append('hooks/') sys.path.append('lib/') @@ -38,9 +39,13 @@ import ceph.utils import utils -if __name__ == '__main__': + +def list_disk(): non_pristine = [] - osd_journal = utils.get_journal_devices() + osd_journal = [] + for journal in utils.get_journal_devices(): + osd_journal.append(os.path.realpath(journal)) + for dev in list(set(ceph.utils.unmounted_disks()) - set(osd_journal)): if (not ceph.utils.is_active_bluestore_device(dev) and not ceph.utils.is_pristine_disk(dev)): @@ -51,3 +56,7 @@ 'blacklist': utils.get_blacklist(), 'non-pristine': non_pristine, }) + + +if __name__ == '__main__': + list_disk() diff --git a/ceph-osd/unit_tests/test_actions_list_disks.py b/ceph-osd/unit_tests/test_actions_list_disks.py new file mode 100644 index 00000000..4990d61d --- /dev/null +++ b/ceph-osd/unit_tests/test_actions_list_disks.py @@ -0,0 +1,26 @@ +from actions import list_disks + +from test_utils import CharmTestCase + + +class ListDisksActionTests(CharmTestCase): + def setUp(self): + super(ListDisksActionTests, self).setUp( + list_disks, ['hookenv', + 'ceph', + 'utils', + 'os']) + self.ceph.utils.unmounted_disks.return_value = ['/dev/sda', '/dev/sdm'] + + def test_list_disks_journal_symbol_link(self): + self.utils.get_journal_devices.return_value = {'/dev/disk/ceph/sdm'} + self.os.path.realpath.return_value = '/dev/sdm' + self.ceph.utils.is_active_bluestore_device.return_value = False + self.ceph.utils.is_pristine_disk.return_value = False + self.utils.get_blacklist.return_value = [] + list_disks.list_disk() + self.hookenv.action_set.assert_called_with({ + 'disks': ['/dev/sda'], + 'blacklist': [], + 'non-pristine': ['/dev/sda'] + }) From 154d8362c10025f71b396a9a0398b49e08857193 Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Thu, 24 Oct 2019 09:57:32 +0000 Subject: [PATCH 1837/2699] sync with last charms.ceph * 7839ca3 put create_keyrings as a public method Change-Id: Ia090d4c37e19016a8468ce62113b2531c69de0e5 Signed-off-by: Sahid Orentino Ferdjaoui --- ceph-mon/lib/ceph/utils.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 99e444cb..6b5e4955 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -29,7 +29,6 @@ from charmhelpers.core import hookenv from charmhelpers.core import templating -from charmhelpers.core.decorators import retry_on_exception from charmhelpers.core.host import ( chownr, cmp_pkgrevno, @@ -1295,7 +1294,6 @@ def bootstrap_monitor_cluster(secret): path, done, init_marker) - _create_keyrings() except: raise finally: @@ -1343,10 +1341,11 @@ def _create_monitor(keyring, secret, hostname, path, done, init_marker): service_restart('ceph-mon-all') -@retry_on_exception(3, base_delay=5) -def _create_keyrings(): +def create_keyrings(): """Create keyrings for operation of ceph-mon units + NOTE: The quorum should be done before to execute this function. + :raises: Exception if keyrings cannot be created """ if cmp_pkgrevno('ceph', '14.0.0') >= 0: From 60325d7eee5b398e944249e00f832ec757acc83f Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Thu, 24 Oct 2019 09:59:36 +0000 Subject: [PATCH 1838/2699] sync with last charms.ceph 7839ca3 put create_keyrings as a public method Change-Id: Ia1ab1860df33960519d0fac70b8315bc3e8e25a3 Signed-off-by: Sahid Orentino Ferdjaoui --- ceph-osd/lib/ceph/utils.py | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index ee555e25..6b5e4955 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -29,7 +29,6 @@ from charmhelpers.core import hookenv from charmhelpers.core import templating -from charmhelpers.core.decorators import retry_on_exception from charmhelpers.core.host import ( chownr, cmp_pkgrevno, @@ -793,12 +792,33 @@ def is_leader(): return False +def manager_available(): + # if manager daemon isn't on this release, just say it is Fine + if cmp_pkgrevno('ceph', '11.0.0') < 0: + return True + cmd = ["sudo", "-u", "ceph", "ceph", "mgr", "dump", "-f", "json"] + try: + result = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return result['available'] + except subprocess.CalledProcessError as e: + log("'{}' failed: {}".format(" ".join(cmd), str(e))) + return False + except Exception: + return False + + def wait_for_quorum(): while not is_quorum(): log("Waiting for quorum to be reached") time.sleep(3) +def wait_for_manager(): + while not manager_available(): + log("Waiting for manager to be available") + time.sleep(5) + + def add_bootstrap_hint(peer): asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) cmd = [ @@ -1274,7 +1294,6 @@ def bootstrap_monitor_cluster(secret): path, done, init_marker) - _create_keyrings() except: raise finally: @@ -1322,10 +1341,11 @@ def _create_monitor(keyring, secret, hostname, path, done, init_marker): service_restart('ceph-mon-all') -@retry_on_exception(3, base_delay=5) -def _create_keyrings(): +def create_keyrings(): """Create keyrings for operation of ceph-mon units + NOTE: The quorum should be done before to execute this function. + :raises: Exception if keyrings cannot be created """ if cmp_pkgrevno('ceph', '14.0.0') >= 0: From d36ffcd4c88adc7e6ee4f452a4b54d67f7957520 Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Tue, 22 Oct 2019 13:12:31 +0000 Subject: [PATCH 1839/2699] Enable functional tests for train This patch also provides a fix from the last charms.ceph sync to fix random issue with ceph >= 14. Change-Id: I8aa03e073442c2eaa1c979562f68c435c229db16 Signed-off-by: Sahid Orentino Ferdjaoui --- ceph-mon/hooks/ceph_hooks.py | 3 +++ ceph-mon/tests/tests.yaml | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index af819f7b..46ba0901 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -300,6 +300,8 @@ def config_changed(): log("Couldn't bootstrap the monitor yet: {}".format(str(e))) return ceph.wait_for_bootstrap() + ceph.wait_for_quorum() + ceph.create_keyrings() if cmp_pkgrevno('ceph', '12.0.0') >= 0: status_set('maintenance', 'Bootstrapping single Ceph MGR') ceph.bootstrap_manager() @@ -442,6 +444,7 @@ def mon_relation(): exit(0) ceph.wait_for_bootstrap() ceph.wait_for_quorum() + ceph.create_keyrings() if cmp_pkgrevno('ceph', '12.0.0') >= 0: status_set('maintenance', 'Bootstrapping Ceph MGR') ceph.bootstrap_manager() diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index df940108..4b72dbbd 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -10,7 +10,7 @@ gate_bundles: - xenial-mitaka - trusty-mitaka smoke_bundles: - - bionic-stein + - bionic-train dev_bundles: - cosmic-rocky - disco-stein From 6293bdb46662af7dac540303c1087ceb2d3bde0b Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 24 Oct 2019 13:15:34 -0400 Subject: [PATCH 1840/2699] Enable functional tests for bionic-train This patch also updates the smoke test to run bionic-train. Change-Id: I0ba45f400cde80ab4ec86e7ceb88dea786c74c82 --- ceph-proxy/tests/bundles/bionic-train.yaml | 82 ++++++++++++++++++++++ ceph-proxy/tests/tests.yaml | 3 +- 2 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 ceph-proxy/tests/bundles/bionic-train.yaml diff --git a/ceph-proxy/tests/bundles/bionic-train.yaml b/ceph-proxy/tests/bundles/bionic-train.yaml new file mode 100644 index 00000000..29f0dbeb --- /dev/null +++ b/ceph-proxy/tests/bundles/bionic-train.yaml @@ -0,0 +1,82 @@ +series: bionic +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-train + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + options: + source: cloud:bionic-train + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + options: + source: cloud:bionic-train + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 + options: + source: cloud:bionic-train + cinder: + charm: 'cs:~openstack-charmers-next/cinder' + num_units: 1 + options: + openstack-origin: cloud:bionic-train + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + cinder-ceph: + charm: 'cs:~openstack-charmers-next/cinder-ceph' + options: + restrict-ceph-pools: True + keystone: + charm: 'cs:~openstack-charmers-next/keystone' + num_units: 1 + options: + openstack-origin: cloud:bionic-train + admin-password: openstack + admin-token: ubuntutesting + constraints: mem=1024 + percona-cluster: + charm: 'cs:~openstack-charmers-next/percona-cluster' + num_units: 1 + options: + source: cloud:bionic-train + dataset-size: 50% + max-connections: 1000 + innodb-buffer-pool-size: 256M + root-password: ChangeMe123 + sst-password: ChangeMe123 + constraints: mem=4096 + rabbitmq-server: + charm: 'cs:~openstack-charmers-next/rabbitmq-server' + num_units: 1 + constraints: mem=1024 + options: + source: cloud:bionic-train +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + - - 'cinder:shared-db' + - 'percona-cluster:shared-db' + - - 'keystone:shared-db' + - 'percona-cluster:shared-db' + - - 'cinder:identity-service' + - 'keystone:identity-service' + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index f68a6d89..841465e5 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -12,6 +12,7 @@ gate_bundles: - bionic-queens # luminous - bionic-rocky # mimic - bionic-stein + - bionic-train - disco-stein dev_bundles: # Icehouse @@ -26,7 +27,7 @@ dev_bundles: # Pike - xenial-pike smoke_bundles: - - bionic-stein + - bionic-train target_deploy_status: ceph-proxy: From 1ee1542710484b05ceb91d2218d6a45d859c123c Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Fri, 18 Oct 2019 12:07:36 +0200 Subject: [PATCH 1841/2699] Enable functional tests for train This patch also updates the smoke test to run train. Change-Id: I129d21e0b6a1afb531442511c267a0c34c0d958c Signed-off-by: Sahid Orentino Ferdjaoui --- ceph-osd/tests/bundles/bionic-train.yaml | 103 +++++++++++++++++++++++ ceph-osd/tests/tests.yaml | 3 +- 2 files changed, 105 insertions(+), 1 deletion(-) create mode 100644 ceph-osd/tests/bundles/bionic-train.yaml diff --git a/ceph-osd/tests/bundles/bionic-train.yaml b/ceph-osd/tests/bundles/bionic-train.yaml new file mode 100644 index 00000000..2b813ed5 --- /dev/null +++ b/ceph-osd/tests/bundles/bionic-train.yaml @@ -0,0 +1,103 @@ +series: bionic +applications: + ceph-osd: + charm: ceph-osd + num_units: 3 + series: bionic + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: cloud:bionic-train + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: cloud:bionic-train + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:bionic-train + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-train + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-train + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-train + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-train + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:bionic-train + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:bionic-train +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 7e214b57..cf377766 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,5 +1,6 @@ charm_name: ceph-osd gate_bundles: + - bionic-train - bionic-stein - bionic-rocky - bionic-queens @@ -9,7 +10,7 @@ gate_bundles: - xenial-mitaka - trusty-mitaka smoke_bundles: - - bionic-stein + - bionic-train dev_bundles: - disco-stein configure: From 7112750b45d5c879f12a5fb9b8d9c767e986897a Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 16 Oct 2019 15:26:52 +0100 Subject: [PATCH 1842/2699] Revert "Remove explicit fsid charm config option" Revert the removal of the fsid charm configuration option. This is required for the in-place migration of an existing Ceph deployment into a Charm managed deployment. This reverts commit 747fd9a321a15ad76249c317c2036a897ca5a376. Change-Id: I260b3eef85465293cc7ec554913dc66d9a4b7ee4 Closes-Bug: 1846337 --- ceph-mon/config.yaml | 9 ++ ceph-mon/hooks/ceph_hooks.py | 5 +- .../tests/bundles/bionic-train-with-fsid.yaml | 104 ++++++++++++++++++ ceph-mon/tests/tests.yaml | 1 + 4 files changed, 118 insertions(+), 1 deletion(-) create mode 100644 ceph-mon/tests/bundles/bionic-train-with-fsid.yaml diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index e3e0c92a..55e41380 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -32,6 +32,15 @@ options: description: | Apply system hardening. Supports a space-delimited list of modules to run. Supported modules currently include os, ssh, apache and mysql. + fsid: + type: string + default: + description: | + The unique identifier (fsid) of the Ceph cluster. + . + WARNING: this option should only be used when performing an in-place + migration of an existing non-charm deployed Ceph cluster to a charm + managed deployment. config-flags: type: string default: diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 46ba0901..2ed2114d 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -242,7 +242,10 @@ def config_changed(): if is_leader(): if not config('no-bootstrap'): if not leader_get('fsid') or not leader_get('monitor-secret'): - fsid = "{}".format(uuid.uuid1()) + if config('fsid'): + fsid = config('fsid') + else: + fsid = "{}".format(uuid.uuid1()) if config('monitor-secret'): mon_secret = config('monitor-secret') else: diff --git a/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml b/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml new file mode 100644 index 00000000..143e0ecc --- /dev/null +++ b/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml @@ -0,0 +1,104 @@ +series: bionic +applications: + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: cloud:bionic-train/proposed + ceph-mon: + charm: ceph-mon + series: bionic + num_units: 3 + options: + monitor-count: '3' + source: cloud:bionic-train/proposed + fsid: 3930914c-4fc5-4720-8975-b7bf554f647c + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:bionic-train/proposed + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-train/proposed + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-train/proposed + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-train/proposed + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-train/proposed + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:bionic-train/proposed + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:bionic-train/proposed +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 4b72dbbd..46f09e8d 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,6 +1,7 @@ charm_name: ceph-mon gate_bundles: - bionic-train + - bionic-train-with-fsid - bionic-stein - bionic-rocky - bionic-queens From 3d117e45f3c1efc020583045600ef3f6276dcfd0 Mon Sep 17 00:00:00 2001 From: inspurericzhang Date: Thu, 31 Oct 2019 17:29:15 +0800 Subject: [PATCH 1843/2699] Switch to Ussuri jobs Change-Id: I0ee0de9048fae3e23aec52f896bf73b7c4f99102 --- ceph-fs/.zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/.zuul.yaml b/ceph-fs/.zuul.yaml index 830f18ea..650658d7 100644 --- a/ceph-fs/.zuul.yaml +++ b/ceph-fs/.zuul.yaml @@ -2,5 +2,5 @@ templates: - python35-charm-jobs - openstack-python35-jobs - - openstack-python3-train-jobs + - openstack-python3-ussuri-jobs - openstack-cover-jobs From 8aa0412b84eae991b8d062c2ee897add95d7fe98 Mon Sep 17 00:00:00 2001 From: inspurericzhang Date: Thu, 31 Oct 2019 17:29:17 +0800 Subject: [PATCH 1844/2699] Switch to Ussuri jobs Change-Id: Id61c1806a17a76c45e3dc19f1d97c250cc6125b8 --- ceph-proxy/.zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/.zuul.yaml b/ceph-proxy/.zuul.yaml index 18efca1a..34184f1d 100644 --- a/ceph-proxy/.zuul.yaml +++ b/ceph-proxy/.zuul.yaml @@ -1,4 +1,4 @@ - project: templates: - python35-charm-jobs - - openstack-python3-train-jobs + - openstack-python3-ussuri-jobs From 082e23a05846427f3cf12146ff83c1e4119765c8 Mon Sep 17 00:00:00 2001 From: inspurericzhang Date: Thu, 31 Oct 2019 17:29:17 +0800 Subject: [PATCH 1845/2699] Switch to Ussuri jobs Change-Id: I071200484b8fc28548264ad76c821f241d77f7f2 --- ceph-radosgw/.zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/.zuul.yaml b/ceph-radosgw/.zuul.yaml index 18efca1a..34184f1d 100644 --- a/ceph-radosgw/.zuul.yaml +++ b/ceph-radosgw/.zuul.yaml @@ -1,4 +1,4 @@ - project: templates: - python35-charm-jobs - - openstack-python3-train-jobs + - openstack-python3-ussuri-jobs From ff353e202f850426c4246f761f5797c7b57640ac Mon Sep 17 00:00:00 2001 From: inspurericzhang Date: Thu, 31 Oct 2019 17:29:18 +0800 Subject: [PATCH 1846/2699] Switch to Ussuri jobs Change-Id: I88580aee3943af84b9f23ee67da38d80a8394d11 --- ceph-rbd-mirror/.zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/.zuul.yaml b/ceph-rbd-mirror/.zuul.yaml index 18efca1a..34184f1d 100644 --- a/ceph-rbd-mirror/.zuul.yaml +++ b/ceph-rbd-mirror/.zuul.yaml @@ -1,4 +1,4 @@ - project: templates: - python35-charm-jobs - - openstack-python3-train-jobs + - openstack-python3-ussuri-jobs From 4d5306c45cedb432d546d0644624ee60328aa8fb Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 24 Oct 2019 13:17:48 -0400 Subject: [PATCH 1847/2699] Enable functional tests for bionic-train This patch also updates the smoke test to run bionic-train. Change-Id: Ife2ac3f4dc37216df28fce912c47fc6ddec454cf --- .../bundles/bionic-train-namespaced.yaml | 43 +++++++++++++++++++ ceph-radosgw/tests/bundles/bionic-train.yaml | 42 ++++++++++++++++++ ceph-radosgw/tests/tests.yaml | 4 +- 3 files changed, 88 insertions(+), 1 deletion(-) create mode 100644 ceph-radosgw/tests/bundles/bionic-train-namespaced.yaml create mode 100644 ceph-radosgw/tests/bundles/bionic-train.yaml diff --git a/ceph-radosgw/tests/bundles/bionic-train-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-train-namespaced.yaml new file mode 100644 index 00000000..8773b257 --- /dev/null +++ b/ceph-radosgw/tests/bundles/bionic-train-namespaced.yaml @@ -0,0 +1,43 @@ +options: + source: &source cloud:bionic-train +series: bionic +applications: + ceph-radosgw: + charm: ceph-radosgw + series: bionic + num_units: 1 + options: + source: *source + namespace-tenants: True + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/bionic-train.yaml b/ceph-radosgw/tests/bundles/bionic-train.yaml new file mode 100644 index 00000000..8858d027 --- /dev/null +++ b/ceph-radosgw/tests/bundles/bionic-train.yaml @@ -0,0 +1,42 @@ +options: + source: &source cloud:bionic-train +series: bionic +applications: + ceph-radosgw: + charm: ceph-radosgw + series: bionic + num_units: 1 + options: + source: *source + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index cc0f6499..0aca1f4c 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,5 +1,7 @@ charm_name: ceph-radosgw gate_bundles: + - bionic-train + - bionic-train-namespaced - bionic-stein - bionic-stein-namespaced - bionic-rocky @@ -13,7 +15,7 @@ gate_bundles: - xenial-mitaka-namespaced - trusty-mitaka smoke_bundles: - - bionic-stein + - bionic-train dev_bundles: - bionic-queens-multisite - bionic-rocky-multisite From 032d5d8ec757ab4d0fd2eb1a258a0264f15a389a Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 24 Oct 2019 13:09:49 -0400 Subject: [PATCH 1848/2699] Enable functional tests for bionic-train This patch also updates the smoke test to run bionic-train. Change-Id: I113899e62e7eab845c3b0e7022200ccad02a2723 --- ceph-fs/src/tests/basic_deployment.py | 7 +++++++ ceph-fs/src/tests/gate-basic-bionic-train | 25 +++++++++++++++++++++++ ceph-fs/src/tox.ini | 2 +- 3 files changed, 33 insertions(+), 1 deletion(-) create mode 100755 ceph-fs/src/tests/gate-basic-bionic-train diff --git a/ceph-fs/src/tests/basic_deployment.py b/ceph-fs/src/tests/basic_deployment.py index 128e2305..bbaf1632 100644 --- a/ceph-fs/src/tests/basic_deployment.py +++ b/ceph-fs/src/tests/basic_deployment.py @@ -128,6 +128,13 @@ def test_100_ceph_processes(self): ceph_osd_processes = { 'ceph-osd': 2 } + + # Pre-nautilus (ie. pre-train) we had a directory backed OSD and + # a disk backed OSD, but at nautilus (and beyond) we only have + # the disk backed OSD. + if self._get_openstack_release() >= self.bionic_train: + ceph_osd_processes['ceph-osd'] = 1 + ceph_mds_processes = { 'ceph-mds': 1 } diff --git a/ceph-fs/src/tests/gate-basic-bionic-train b/ceph-fs/src/tests/gate-basic-bionic-train new file mode 100755 index 00000000..fd29eb2a --- /dev/null +++ b/ceph-fs/src/tests/gate-basic-bionic-train @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceph deployment on bionic-train.""" + +from basic_deployment import CephFsBasicDeployment + +if __name__ == '__main__': + deployment = CephFsBasicDeployment(series='bionic', + openstack='cloud:bionic-train', + source='cloud:bionic-train') + deployment.run_tests() diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index 8a91d50b..dd17ae57 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -43,7 +43,7 @@ commands = # Run a specific test as an Amulet smoke test (expected to always pass) basepython = python2.7 commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-stein --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-train --no-destroy [testenv:func-dev] # Run all development test targets which are +x (may not always pass!) From dbde0f9713a63241b7a3155683d6da693b793371 Mon Sep 17 00:00:00 2001 From: Andrew McLeod Date: Thu, 31 Oct 2019 15:43:56 +0100 Subject: [PATCH 1849/2699] Sync charms.ceph use btrfs-progs instead of btrfs-tools for eoan and later update bionic-train zaza test bundle to include placement Closes-Bug: #1850181 Change-Id: I6c414cb7db45f5a1a2cf9b65f6fd93c40f631a46 --- ceph-osd/lib/ceph/utils.py | 7 ++++++- ceph-osd/tests/bundles/bionic-train.yaml | 11 +++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 6b5e4955..2cdd2f50 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -80,7 +80,12 @@ PEON = 'peon' QUORUM = [LEADER, PEON] -PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', +if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': + btrfs_package = 'btrfs-progs' +else: + btrfs_package = 'btrfs-tools' + +PACKAGES = ['ceph', 'gdisk', btrfs_package, 'radosgw', 'xfsprogs', 'lvm2', 'parted', 'smartmontools'] diff --git a/ceph-osd/tests/bundles/bionic-train.yaml b/ceph-osd/tests/bundles/bionic-train.yaml index 2b813ed5..1b270900 100644 --- a/ceph-osd/tests/bundles/bionic-train.yaml +++ b/ceph-osd/tests/bundles/bionic-train.yaml @@ -60,6 +60,11 @@ applications: num_units: 1 options: openstack-origin: cloud:bionic-train + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: cloud:bionic-train relations: - - nova-compute:amqp - rabbitmq-server:amqp @@ -101,3 +106,9 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - placement + - percona-cluster +- - placement + - keystone +- - placement + - nova-cloud-controller From bfbfd7078e3a1f00bd114621aa6e17a3c9994a4c Mon Sep 17 00:00:00 2001 From: Andrew McLeod Date: Thu, 31 Oct 2019 15:43:00 +0100 Subject: [PATCH 1850/2699] Sync charms.ceph use btrfs-progs instead of btrfs-tools for eoan and later update bionic-train zaza test bundle to include placement Closes-Bug: #1850181 Change-Id: Ic8e46dda154c19edadf0e6810701233c3db88e04 --- ceph-mon/lib/ceph/utils.py | 7 ++++- ceph-mon/tests/bundles/bionic-train.yaml | 35 ++++++++++++++++-------- 2 files changed, 29 insertions(+), 13 deletions(-) diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 6b5e4955..2cdd2f50 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -80,7 +80,12 @@ PEON = 'peon' QUORUM = [LEADER, PEON] -PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', +if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': + btrfs_package = 'btrfs-progs' +else: + btrfs_package = 'btrfs-tools' + +PACKAGES = ['ceph', 'gdisk', btrfs_package, 'radosgw', 'xfsprogs', 'lvm2', 'parted', 'smartmontools'] diff --git a/ceph-mon/tests/bundles/bionic-train.yaml b/ceph-mon/tests/bundles/bionic-train.yaml index 815194fe..1b270900 100644 --- a/ceph-mon/tests/bundles/bionic-train.yaml +++ b/ceph-mon/tests/bundles/bionic-train.yaml @@ -1,49 +1,49 @@ series: bionic applications: ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ceph-osd num_units: 3 + series: bionic storage: osd-devices: 'cinder,10G' options: osd-devices: '/dev/test-non-existent' - source: cloud:bionic-train/proposed + source: cloud:bionic-train ceph-mon: - charm: ceph-mon - series: bionic + charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: monitor-count: '3' - source: cloud:bionic-train/proposed + source: cloud:bionic-train percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 options: dataset-size: 25% max-connections: 1000 - source: cloud:bionic-train/proposed + source: cloud:bionic-train rabbitmq-server: charm: cs:~openstack-charmers-next/rabbitmq-server num_units: 1 options: - source: cloud:bionic-train/proposed + source: cloud:bionic-train keystone: expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 options: - openstack-origin: cloud:bionic-train/proposed + openstack-origin: cloud:bionic-train nova-compute: charm: cs:~openstack-charmers-next/nova-compute num_units: 1 options: - openstack-origin: cloud:bionic-train/proposed + openstack-origin: cloud:bionic-train glance: expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 options: - openstack-origin: cloud:bionic-train/proposed + openstack-origin: cloud:bionic-train cinder: expose: True charm: cs:~openstack-charmers-next/cinder @@ -51,7 +51,7 @@ applications: options: block-device: 'None' glance-api-version: '2' - openstack-origin: cloud:bionic-train/proposed + openstack-origin: cloud:bionic-train cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: @@ -59,7 +59,12 @@ applications: charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 options: - openstack-origin: cloud:bionic-train/proposed + openstack-origin: cloud:bionic-train + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: cloud:bionic-train relations: - - nova-compute:amqp - rabbitmq-server:amqp @@ -101,3 +106,9 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - placement + - percona-cluster +- - placement + - keystone +- - placement + - nova-cloud-controller From 15d33ac9a1464530a1a7bdf73aa0e159ff505081 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 20 Nov 2019 10:26:28 +0000 Subject: [PATCH 1851/2699] Re-fix Traceback issue when ceph-osd upgrade fails This was originally fixed in Ib7d1fdc8f91bc992ccf618ef6f57e99bb90c2dbc but unfortunately wasn't also added to the charms.ceph library. Thus, this is a re-application of that fix; the charms to ceph fix is in [1]. Bug/1770740 surfaced an issue where get_upgrade_position() returns None but the calling function expects and exception to the thrown if the "None" condition exists. This just fixes the code so that the Traceback is stopped and the appropriate error/message is logged for the condition. [1] https://review.opendev.org/#/c/695163/ I16539b2bc35104eed54033bebb1154cad8a5cf0f Change-Id: Ieee8d13f25027ad540a23a6428c2226b6c20999a Partial-Bug: #1770740 --- ceph-osd/lib/ceph/utils.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 2cdd2f50..3f5bdb1e 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -2310,14 +2310,19 @@ def wait_on_previous_node(upgrade_key, service, previous_node, version): def get_upgrade_position(osd_sorted_list, match_name): """Return the upgrade position for the given osd. - :param osd_sorted_list: list. Osds sorted - :param match_name: str. The osd name to match - :returns: int. The position or None if not found + :param osd_sorted_list: Osds sorted + :type osd_sorted_list: [str] + :param match_name: The osd name to match + :type match_name: str + :returns: The position of the name + :rtype: int + :raises: ValueError if name is not found """ for index, item in enumerate(osd_sorted_list): if item.name == match_name: return index - return None + raise ValueError("osd name '{}' not found in get_upgrade_position list" + .format(match_name)) # Edge cases: From 2c5d2dfaa29d847220a0c4a128c7566475223b46 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 29 Nov 2019 08:02:08 +0100 Subject: [PATCH 1852/2699] Make use of default ``config.rendered`` handler Change-Id: I44307aa958ee3caf245865a21948fd2641fa0a6d --- .../src/reactive/ceph_rbd_mirror_handlers.py | 14 +------------ .../test_ceph_rbd_mirror_handlers.py | 21 +------------------ 2 files changed, 2 insertions(+), 33 deletions(-) diff --git a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py index 804bbe82..0b4fd58c 100644 --- a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py @@ -25,6 +25,7 @@ # Use the charms.openstack defaults for common states and hooks charm.use_defaults( 'charm.installed', + 'config.rendered', 'update-status', 'upgrade-charm') @@ -54,15 +55,6 @@ def config_changed(): charm_instance.assess_status() -@reactive.when_not('config.rendered') -def disable_services(): - with charm.provide_charm_instance() as charm_instance: - for service in charm_instance.services: - ch_core.host.service('disable', service) - ch_core.host.service('stop', service) - charm_instance.assess_status() - - @reactive.when('ceph-local.available') @reactive.when('ceph-remote.available') def render_stuff(*args): @@ -84,11 +76,7 @@ def render_stuff(*args): charm_instance.configure_ceph_keyring(endpoint.key, cluster_name=cluster_name) charm_instance.render_with_interfaces(args) - for service in charm_instance.services: - ch_core.host.service('enable', service) - ch_core.host.service('start', service) reactive.set_flag('config.rendered') - charm_instance.assess_status() @reactive.when('leadership.is_leader') diff --git a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py index e54ff357..0b2af68c 100644 --- a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py @@ -25,6 +25,7 @@ class TestRegisteredHooks(test_utils.TestRegisteredHooks): def test_hooks(self): defaults = [ 'charm.installed', + 'config.rendered', 'update-status', 'upgrade-charm', ] @@ -58,11 +59,6 @@ def test_hooks(self): 'ceph-remote.connected', ), }, - 'when_not': { - 'disable_services': ( - 'config.rendered', - ), - }, 'when_not_all': { 'request_keys': ( 'ceph-local.available', @@ -114,16 +110,6 @@ def test_config_changed(self): [self.endpoint_from_flag(), self.endpoint_from_flag()]) self.crm_charm.assess_status.assert_called_once_with() - def test_disable_services(self): - self.patch_object(handlers.ch_core.host, 'service') - self.crm_charm.services = ['aservice'] - handlers.disable_services() - self.service.assert_has_calls([ - mock.call('disable', 'aservice'), - mock.call('stop', 'aservice'), - ]) - self.crm_charm.assess_status.assert_called_once_with() - def test_render_stuff(self): self.patch_object(handlers.ch_core.host, 'service') endpoint_local = mock.MagicMock() @@ -144,11 +130,6 @@ def test_render_stuff(self): ]) self.crm_charm.render_with_interfaces.assert_called_once_with( (endpoint_local, endpoint_remote)) - self.service.assert_has_calls([ - mock.call('enable', 'aservice'), - mock.call('start', 'aservice'), - ]) - self.crm_charm.assess_status.assert_called_once_with() def test_refresh_pools(self): self.patch_object(handlers.reactive, 'endpoint_from_name') From bbb8e5b51f50c7caaf6a0eb4c4f8631541f1312d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 2 Dec 2019 13:45:23 +0100 Subject: [PATCH 1853/2699] Fix charm reference in bionic-queens bundle Change-Id: Iaf8b405d0e9e4667e732dd21f94e82b9caea8576 --- ceph-mon/tests/bundles/bionic-queens.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/tests/bundles/bionic-queens.yaml b/ceph-mon/tests/bundles/bionic-queens.yaml index f8565100..8bc49b32 100644 --- a/ceph-mon/tests/bundles/bionic-queens.yaml +++ b/ceph-mon/tests/bundles/bionic-queens.yaml @@ -8,7 +8,7 @@ applications: options: osd-devices: '/srv/ceph /dev/test-non-existent' ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ceph-mon series: bionic num_units: 3 options: From bb74350eb96327716f6479317a8ee58b5b5c9021 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 2 Dec 2019 13:49:08 +0100 Subject: [PATCH 1854/2699] Migrate CephFS tests to Zaza Change-Id: I36d3d58d8f5c15475460997cce20fe442978eeed Closes-Bug: #1828424 --- ceph-fs/src/test-requirements.txt | 24 +- ceph-fs/src/tests/README.md | 9 - ceph-fs/src/tests/basic_deployment.py | 277 ------------------- ceph-fs/src/tests/bundles/bionic-queens.yaml | 95 +++++++ ceph-fs/src/tests/bundles/bionic-rocky.yaml | 111 ++++++++ ceph-fs/src/tests/bundles/bionic-stein.yaml | 111 ++++++++ ceph-fs/src/tests/bundles/bionic-train.yaml | 121 ++++++++ ceph-fs/src/tests/bundles/cosmic-rocky.yaml | 95 +++++++ ceph-fs/src/tests/bundles/disco-stein.yaml | 95 +++++++ ceph-fs/src/tests/bundles/xenial-mitaka.yaml | 95 +++++++ ceph-fs/src/tests/bundles/xenial-ocata.yaml | 111 ++++++++ ceph-fs/src/tests/bundles/xenial-pike.yaml | 111 ++++++++ ceph-fs/src/tests/bundles/xenial-queens.yaml | 111 ++++++++ ceph-fs/src/tests/gate-basic-bionic-queens | 23 -- ceph-fs/src/tests/gate-basic-bionic-rocky | 25 -- ceph-fs/src/tests/gate-basic-bionic-stein | 25 -- ceph-fs/src/tests/gate-basic-bionic-train | 25 -- ceph-fs/src/tests/gate-basic-disco-stein | 23 -- ceph-fs/src/tests/gate-basic-xenial-mitaka | 23 -- ceph-fs/src/tests/gate-basic-xenial-ocata | 25 -- ceph-fs/src/tests/gate-basic-xenial-pike | 25 -- ceph-fs/src/tests/gate-basic-xenial-queens | 25 -- ceph-fs/src/tests/tests.yaml | 40 +-- ceph-fs/src/tox.ini | 31 +-- ceph-fs/tox.ini | 2 +- 25 files changed, 1094 insertions(+), 564 deletions(-) delete mode 100644 ceph-fs/src/tests/README.md delete mode 100644 ceph-fs/src/tests/basic_deployment.py create mode 100644 ceph-fs/src/tests/bundles/bionic-queens.yaml create mode 100644 ceph-fs/src/tests/bundles/bionic-rocky.yaml create mode 100644 ceph-fs/src/tests/bundles/bionic-stein.yaml create mode 100644 ceph-fs/src/tests/bundles/bionic-train.yaml create mode 100644 ceph-fs/src/tests/bundles/cosmic-rocky.yaml create mode 100644 ceph-fs/src/tests/bundles/disco-stein.yaml create mode 100644 ceph-fs/src/tests/bundles/xenial-mitaka.yaml create mode 100644 ceph-fs/src/tests/bundles/xenial-ocata.yaml create mode 100644 ceph-fs/src/tests/bundles/xenial-pike.yaml create mode 100644 ceph-fs/src/tests/bundles/xenial-queens.yaml delete mode 100755 ceph-fs/src/tests/gate-basic-bionic-queens delete mode 100755 ceph-fs/src/tests/gate-basic-bionic-rocky delete mode 100755 ceph-fs/src/tests/gate-basic-bionic-stein delete mode 100755 ceph-fs/src/tests/gate-basic-bionic-train delete mode 100755 ceph-fs/src/tests/gate-basic-disco-stein delete mode 100755 ceph-fs/src/tests/gate-basic-xenial-mitaka delete mode 100755 ceph-fs/src/tests/gate-basic-xenial-ocata delete mode 100755 ceph-fs/src/tests/gate-basic-xenial-pike delete mode 100755 ceph-fs/src/tests/gate-basic-xenial-queens diff --git a/ceph-fs/src/test-requirements.txt b/ceph-fs/src/test-requirements.txt index a4b77a56..7e9d6093 100644 --- a/ceph-fs/src/test-requirements.txt +++ b/ceph-fs/src/test-requirements.txt @@ -7,27 +7,7 @@ mock>=1.2 flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 requests>=2.18.4 -# BEGIN: Amulet OpenStack Charm Helper Requirements -# Liberty client lower constraints -amulet>=1.14.3,<2.0;python_version=='2.7' -bundletester>=0.6.1,<1.0;python_version=='2.7' -aodhclient>=0.1.0 -gnocchiclient>=3.1.0,<3.2.0 -python-barbicanclient>=4.0.1 -python-ceilometerclient>=1.5.0 -python-cinderclient>=1.4.0,<5.0.0 -python-designateclient>=1.5 -python-glanceclient>=1.1.0 -python-heatclient>=0.8.0 -python-keystoneclient>=1.7.1 -python-manilaclient>=1.8.1 -python-neutronclient>=3.1.0 -python-novaclient>=2.30.1 -python-openstackclient>=1.7.0 -python-swiftclient>=2.6.0 -pika>=0.10.0,<1.0 -distro-info -git+https://github.com/juju/charm-helpers.git#egg=charmhelpers -# END: Amulet OpenStack Charm Helper Requirements +git+https://github.com/openstack-charmers/zaza.git#egg=zaza +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack pytz # workaround for 14.04 pip/tox pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/ceph-fs/src/tests/README.md b/ceph-fs/src/tests/README.md deleted file mode 100644 index 046be7fb..00000000 --- a/ceph-fs/src/tests/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Overview - -This directory provides Amulet tests to verify basic deployment functionality -from the perspective of this charm, its requirements and its features, as -exercised in a subset of the full OpenStack deployment test bundle topology. - -For full details on functional testing of OpenStack charms please refer to -the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing) -section of the OpenStack Charm Guide. diff --git a/ceph-fs/src/tests/basic_deployment.py b/ceph-fs/src/tests/basic_deployment.py deleted file mode 100644 index bbaf1632..00000000 --- a/ceph-fs/src/tests/basic_deployment.py +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet - -from charmhelpers.contrib.openstack.amulet.deployment import ( - OpenStackAmuletDeployment -) -from charmhelpers.contrib.openstack.amulet.utils import ( # noqa - OpenStackAmuletUtils, - DEBUG, - ) - -# Use DEBUG to turn on debug logging -u = OpenStackAmuletUtils(DEBUG) - - -class CephFsBasicDeployment(OpenStackAmuletDeployment): - """Amulet tests on a basic ceph deployment.""" - - def __init__(self, series=None, openstack=None, source=None, stable=False): - """Deploy the entire test environment.""" - super(CephFsBasicDeployment, self).__init__(series, - openstack, - source, - stable) - self._add_services() - self._add_relations() - self._configure_services() - self._deploy() - - u.log.info('Waiting on extended status checks...') - exclude_services = [] - - # Wait for deployment ready msgs, except exclusions - self._auto_wait_for_status(exclude_services=exclude_services) - - self.d.sentry.wait() - self._initialize_tests() - - def _add_services(self, **kwargs): - """Add services - - Add the services that we're testing, where cephfs is local, - and the rest of the service are from lp branches that are - compatible with the local charm (e.g. stable or next). - :param **kwargs: - """ - no_origin = ['ceph-fs'] - this_service = {'name': 'ceph-fs', 'units': 1} - other_services = [ - {'name': 'ceph-mon', 'units': 3}, - {'name': 'ceph-osd', 'units': 3, - 'storage': {'osd-devices': 'cinder,10G'}}, - ] - super(CephFsBasicDeployment, self)._add_services(this_service, - other_services, - no_origin=no_origin) - - def _add_relations(self, **kwargs): - """Add all of the relations for the services. - :param **kwargs: - """ - relations = { - 'ceph-osd:mon': 'ceph-mon:osd', - 'ceph-fs:ceph-mds': 'ceph-mon:mds', - } - super(CephFsBasicDeployment, self)._add_relations(relations) - - def _configure_services(self, **kwargs): - """Configure all of the services. - :param **kwargs: - """ - ceph_fs_config = { - 'source': self.source, - } - ceph_mon_config = { - 'source': self.source, - } - # Include a non-existent device as osd-devices is a whitelist, - # and this will catch cases where proposals attempt to change that. - ceph_osd_config = { - 'source': self.source, - 'osd-devices': '/srv/ceph /dev/test-non-existent', - } - - configs = { - 'ceph-mon': ceph_mon_config, - 'ceph-osd': ceph_osd_config, - 'ceph-fs': ceph_fs_config, - } - super(CephFsBasicDeployment, self)._configure_services(configs) - - def _initialize_tests(self): - """Perform final initialization before tests get run.""" - # Access the sentries for inspecting service units - self.ceph_osd0_sentry = self.d.sentry['ceph-osd'][0] - self.ceph_osd1_sentry = self.d.sentry['ceph-osd'][1] - self.ceph_osd2_sentry = self.d.sentry['ceph-osd'][2] - - self.ceph_mon0_sentry = self.d.sentry['ceph-mon'][0] - self.ceph_mon1_sentry = self.d.sentry['ceph-mon'][1] - self.ceph_mon2_sentry = self.d.sentry['ceph-mon'][2] - - self.ceph_mds_sentry = self.d.sentry['ceph-fs'][0] - - def test_100_ceph_processes(self): - """Verify that the expected service processes are running - on each ceph unit.""" - - # Process name and quantity of processes to expect on each unit - ceph_mon_processes = { - 'ceph-mon': 1 - } - ceph_osd_processes = { - 'ceph-osd': 2 - } - - # Pre-nautilus (ie. pre-train) we had a directory backed OSD and - # a disk backed OSD, but at nautilus (and beyond) we only have - # the disk backed OSD. - if self._get_openstack_release() >= self.bionic_train: - ceph_osd_processes['ceph-osd'] = 1 - - ceph_mds_processes = { - 'ceph-mds': 1 - } - - # Units with process names and PID quantities expected - expected_processes = { - self.ceph_mon0_sentry: ceph_mon_processes, - self.ceph_mon1_sentry: ceph_mon_processes, - self.ceph_mon2_sentry: ceph_mon_processes, - self.ceph_osd0_sentry: ceph_osd_processes, - self.ceph_osd1_sentry: ceph_osd_processes, - self.ceph_osd2_sentry: ceph_osd_processes, - self.ceph_mds_sentry: ceph_mds_processes - } - - actual_pids = u.get_unit_process_ids(expected_processes) - ret = u.validate_unit_process_ids(expected_processes, actual_pids) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_102_services(self): - """Verify the expected services are running on the service units.""" - services = {} - - if self._get_openstack_release() < self.xenial_mitaka: - # For upstart systems only. Ceph services under systemd - # are checked by process name instead. - ceph_services = [ - 'ceph-mon-all', - 'ceph-mon id=`hostname`' - ] - services[self.ceph_mon0_sentry] = ceph_services - services[self.ceph_mon1_sentry] = ceph_services - services[self.ceph_mon2_sentry] = ceph_services - - ceph_osd_services = [ - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), - ] - - services[self.ceph_osd0_sentry] = ceph_osd_services - - ret = u.validate_services_by_name(services) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_300_ceph_config(self): - """Verify the data in the ceph config file.""" - u.log.debug('Checking ceph config file data...') - unit = self.ceph_mon0_sentry - conf = '/etc/ceph/ceph.conf' - (fsid, _) = unit.run('leader-get fsid') - expected = { - 'global': { - 'fsid': fsid, - 'log to syslog': 'false', - 'err to syslog': 'false', - 'clog to syslog': 'false', - 'mon cluster log to syslog': 'false', - 'auth cluster required': 'cephx', - 'auth service required': 'cephx', - 'auth client required': 'cephx' - }, - 'mon': { - 'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring' - }, - 'mds': { - 'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring' - }, - } - - for section, pairs in expected.iteritems(): - ret = u.validate_config_data(unit, conf, section, pairs) - if ret: - message = "ceph config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_400_ceph_check_osd_pools(self): - """Check osd pools on all ceph units, expect them to be - identical, and expect specific pools to be present.""" - u.log.debug('Checking pools on ceph units...') - - if self._get_openstack_release() >= self.xenial_pike: - expected_pools = ['ceph-fs_data', 'ceph-fs_metadata'] - else: - expected_pools = ['rbd', 'ceph-fs_data', 'ceph-fs_metadata'] - results = [] - sentries = [ - self.ceph_mon0_sentry, - self.ceph_mon1_sentry, - self.ceph_mon2_sentry - ] - - # Check for presence of expected pools on each unit - u.log.debug('Expected pools: {}'.format(expected_pools)) - for sentry_unit in sentries: - pools = u.get_ceph_pools(sentry_unit) - results.append(pools) - - for expected_pool in expected_pools: - if expected_pool not in pools: - msg = ('{} does not have pool: ' - '{}'.format(sentry_unit.info['unit_name'], - expected_pool)) - amulet.raise_status(amulet.FAIL, msg=msg) - u.log.debug('{} has (at least) the expected ' - 'pools.'.format(sentry_unit.info['unit_name'])) - - # Check that all units returned the same pool name:id data - ret = u.validate_list_of_identical_dicts(results) - if ret: - u.log.debug('Pool list results: {}'.format(results)) - msg = ('{}; Pool list results are not identical on all ' - 'ceph units.'.format(ret)) - amulet.raise_status(amulet.FAIL, msg=msg) - else: - u.log.debug('Pool list on all ceph units produced the ' - 'same results (OK).') - - def test_499_ceph_cmds_exit_zero(self): - """Check basic functionality of ceph cli commands against - all ceph units.""" - sentry_units = [ - self.ceph_mon0_sentry, - self.ceph_mon1_sentry, - self.ceph_mon2_sentry - ] - commands = [ - 'sudo ceph health', - 'sudo ceph mds stat', - 'sudo ceph pg stat', - 'sudo ceph osd stat', - 'sudo ceph mon stat', - 'sudo ceph fs ls', - ] - ret = u.check_commands_on_units(commands, sentry_units) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - # FYI: No restart check as ceph services do not restart - # when charm config changes, unless monitor count increases. diff --git a/ceph-fs/src/tests/bundles/bionic-queens.yaml b/ceph-fs/src/tests/bundles/bionic-queens.yaml new file mode 100644 index 00000000..74640d30 --- /dev/null +++ b/ceph-fs/src/tests/bundles/bionic-queens.yaml @@ -0,0 +1,95 @@ +series: bionic +applications: + ceph-fs: + charm: ceph-fs + series: bionic + num_units: 1 + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 +relations: +- - ceph-mon:mds + - ceph-fs:ceph-mds +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-fs/src/tests/bundles/bionic-rocky.yaml b/ceph-fs/src/tests/bundles/bionic-rocky.yaml new file mode 100644 index 00000000..376deaaa --- /dev/null +++ b/ceph-fs/src/tests/bundles/bionic-rocky.yaml @@ -0,0 +1,111 @@ +series: bionic +applications: + ceph-fs: + charm: ceph-fs + series: bionic + num_units: 1 + options: + source: cloud:bionic-rocky + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:bionic-rocky + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:bionic-rocky + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:bionic-rocky + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-rocky + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:bionic-rocky + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky +relations: +- - ceph-mon:mds + - ceph-fs:ceph-mds +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-fs/src/tests/bundles/bionic-stein.yaml b/ceph-fs/src/tests/bundles/bionic-stein.yaml new file mode 100644 index 00000000..fc02435f --- /dev/null +++ b/ceph-fs/src/tests/bundles/bionic-stein.yaml @@ -0,0 +1,111 @@ +series: bionic +applications: + ceph-fs: + charm: ceph-fs + series: bionic + num_units: 1 + options: + source: cloud:bionic-stein + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:bionic-stein + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:bionic-stein + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:bionic-stein + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-stein + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-stein + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-stein + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-stein + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:bionic-stein + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:bionic-stein +relations: +- - ceph-mon:mds + - ceph-fs:ceph-mds +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-fs/src/tests/bundles/bionic-train.yaml b/ceph-fs/src/tests/bundles/bionic-train.yaml new file mode 100644 index 00000000..26eeaae7 --- /dev/null +++ b/ceph-fs/src/tests/bundles/bionic-train.yaml @@ -0,0 +1,121 @@ +series: bionic +applications: + ceph-fs: + charm: ceph-fs + series: bionic + num_units: 1 + options: + source: cloud:bionic-train/proposed + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: cloud:bionic-train/proposed + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: cloud:bionic-train/proposed + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:bionic-train/proposed + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-train/proposed + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-train/proposed + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-train/proposed + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-train/proposed + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:bionic-train/proposed + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:bionic-train/proposed + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: cloud:bionic-train +relations: +- - ceph-mon:mds + - ceph-fs:ceph-mds +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service +- - placement + - percona-cluster +- - placement + - keystone +- - placement + - nova-cloud-controller \ No newline at end of file diff --git a/ceph-fs/src/tests/bundles/cosmic-rocky.yaml b/ceph-fs/src/tests/bundles/cosmic-rocky.yaml new file mode 100644 index 00000000..6fa4f308 --- /dev/null +++ b/ceph-fs/src/tests/bundles/cosmic-rocky.yaml @@ -0,0 +1,95 @@ +series: cosmic +applications: + ceph-fs: + charm: ceph-fs + series: cosmic + num_units: 1 + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 +relations: +- - ceph-mon:mds + - ceph-fs:ceph-mds +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-fs/src/tests/bundles/disco-stein.yaml b/ceph-fs/src/tests/bundles/disco-stein.yaml new file mode 100644 index 00000000..5a2add22 --- /dev/null +++ b/ceph-fs/src/tests/bundles/disco-stein.yaml @@ -0,0 +1,95 @@ +series: disco +applications: + ceph-fs: + charm: ceph-fs + series: disco + num_units: 1 + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 +relations: +- - ceph-mon:mds + - ceph-fs:ceph-mds +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-fs/src/tests/bundles/xenial-mitaka.yaml b/ceph-fs/src/tests/bundles/xenial-mitaka.yaml new file mode 100644 index 00000000..81569cea --- /dev/null +++ b/ceph-fs/src/tests/bundles/xenial-mitaka.yaml @@ -0,0 +1,95 @@ +series: xenial +applications: + ceph-fs: + charm: ceph-fs + series: xenial + num_units: 1 + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 +relations: +- - ceph-mon:mds + - ceph-fs:ceph-mds +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-fs/src/tests/bundles/xenial-ocata.yaml b/ceph-fs/src/tests/bundles/xenial-ocata.yaml new file mode 100644 index 00000000..5cb819de --- /dev/null +++ b/ceph-fs/src/tests/bundles/xenial-ocata.yaml @@ -0,0 +1,111 @@ +series: xenial +applications: + ceph-fs: + charm: ceph-fs + series: xenial + num_units: 1 + options: + source: cloud:xenial-ocata + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:xenial-ocata + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:xenial-ocata + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:xenial-ocata + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:xenial-ocata + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:xenial-ocata + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:xenial-ocata + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:xenial-ocata + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:xenial-ocata + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:xenial-ocata +relations: +- - ceph-mon:mds + - ceph-fs:ceph-mds +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-fs/src/tests/bundles/xenial-pike.yaml b/ceph-fs/src/tests/bundles/xenial-pike.yaml new file mode 100644 index 00000000..ba00c37d --- /dev/null +++ b/ceph-fs/src/tests/bundles/xenial-pike.yaml @@ -0,0 +1,111 @@ +series: xenial +applications: + ceph-fs: + charm: ceph-fs + series: xenial + num_units: 1 + options: + source: cloud:xenial-pike + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:xenial-pike + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:xenial-pike + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:xenial-pike + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:xenial-pike + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:xenial-pike + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:xenial-pike + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:xenial-pike + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:xenial-pike + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:xenial-pike +relations: +- - ceph-mon:mds + - ceph-fs:ceph-mds +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-fs/src/tests/bundles/xenial-queens.yaml b/ceph-fs/src/tests/bundles/xenial-queens.yaml new file mode 100644 index 00000000..cf057141 --- /dev/null +++ b/ceph-fs/src/tests/bundles/xenial-queens.yaml @@ -0,0 +1,111 @@ +series: xenial +applications: + ceph-fs: + charm: ceph-fs + series: xenial + num_units: 1 + options: + source: cloud:xenial-queens + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/srv/ceph /dev/test-non-existent' + source: cloud:xenial-queens + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + auth-supported: 'none' + source: cloud:xenial-queens + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:xenial-queens + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:xenial-queens + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:xenial-queens + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:xenial-queens + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:xenial-queens + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:xenial-queens + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:xenial-queens +relations: +- - ceph-mon:mds + - ceph-fs:ceph-mds +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service diff --git a/ceph-fs/src/tests/gate-basic-bionic-queens b/ceph-fs/src/tests/gate-basic-bionic-queens deleted file mode 100755 index f48d42fd..00000000 --- a/ceph-fs/src/tests/gate-basic-bionic-queens +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on bionic-queens.""" - -from basic_deployment import CephFsBasicDeployment - -if __name__ == '__main__': - deployment = CephFsBasicDeployment(series='bionic') - deployment.run_tests() diff --git a/ceph-fs/src/tests/gate-basic-bionic-rocky b/ceph-fs/src/tests/gate-basic-bionic-rocky deleted file mode 100755 index 0f7abc5a..00000000 --- a/ceph-fs/src/tests/gate-basic-bionic-rocky +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on bionic-rocky.""" - -from basic_deployment import CephFsBasicDeployment - -if __name__ == '__main__': - deployment = CephFsBasicDeployment(series='bionic', - openstack='cloud:bionic-rocky', - source='cloud:bionic-updates/rocky') - deployment.run_tests() diff --git a/ceph-fs/src/tests/gate-basic-bionic-stein b/ceph-fs/src/tests/gate-basic-bionic-stein deleted file mode 100755 index 32098369..00000000 --- a/ceph-fs/src/tests/gate-basic-bionic-stein +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on bionic-stein.""" - -from basic_deployment import CephFsBasicDeployment - -if __name__ == '__main__': - deployment = CephFsBasicDeployment(series='bionic', - openstack='cloud:bionic-stein', - source='cloud:bionic-stein') - deployment.run_tests() diff --git a/ceph-fs/src/tests/gate-basic-bionic-train b/ceph-fs/src/tests/gate-basic-bionic-train deleted file mode 100755 index fd29eb2a..00000000 --- a/ceph-fs/src/tests/gate-basic-bionic-train +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2019 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on bionic-train.""" - -from basic_deployment import CephFsBasicDeployment - -if __name__ == '__main__': - deployment = CephFsBasicDeployment(series='bionic', - openstack='cloud:bionic-train', - source='cloud:bionic-train') - deployment.run_tests() diff --git a/ceph-fs/src/tests/gate-basic-disco-stein b/ceph-fs/src/tests/gate-basic-disco-stein deleted file mode 100755 index 9c12c275..00000000 --- a/ceph-fs/src/tests/gate-basic-disco-stein +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on disco-stein.""" - -from basic_deployment import CephFsBasicDeployment - -if __name__ == '__main__': - deployment = CephFsBasicDeployment(series='disco') - deployment.run_tests() diff --git a/ceph-fs/src/tests/gate-basic-xenial-mitaka b/ceph-fs/src/tests/gate-basic-xenial-mitaka deleted file mode 100755 index b9bc393d..00000000 --- a/ceph-fs/src/tests/gate-basic-xenial-mitaka +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on xenial-mitaka.""" - -from basic_deployment import CephFsBasicDeployment - -if __name__ == '__main__': - deployment = CephFsBasicDeployment(series='xenial') - deployment.run_tests() diff --git a/ceph-fs/src/tests/gate-basic-xenial-ocata b/ceph-fs/src/tests/gate-basic-xenial-ocata deleted file mode 100755 index 86f6a645..00000000 --- a/ceph-fs/src/tests/gate-basic-xenial-ocata +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on xenial-ocata.""" - -from basic_deployment import CephFsBasicDeployment - -if __name__ == '__main__': - deployment = CephFsBasicDeployment(series='xenial', - openstack='cloud:xenial-ocata', - source='cloud:xenial-updates/ocata') - deployment.run_tests() diff --git a/ceph-fs/src/tests/gate-basic-xenial-pike b/ceph-fs/src/tests/gate-basic-xenial-pike deleted file mode 100755 index 9af16420..00000000 --- a/ceph-fs/src/tests/gate-basic-xenial-pike +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on xenial-pike.""" - -from basic_deployment import CephFsBasicDeployment - -if __name__ == '__main__': - deployment = CephFsBasicDeployment(series='xenial', - openstack='cloud:xenial-pike', - source='cloud:xenial-updates/pike') - deployment.run_tests() diff --git a/ceph-fs/src/tests/gate-basic-xenial-queens b/ceph-fs/src/tests/gate-basic-xenial-queens deleted file mode 100755 index 2d0dd71a..00000000 --- a/ceph-fs/src/tests/gate-basic-xenial-queens +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic ceph deployment on xenial-queens.""" - -from basic_deployment import CephFsBasicDeployment - -if __name__ == '__main__': - deployment = CephFsBasicDeployment(series='xenial', - openstack='cloud:xenial-queens', - source='cloud:xenial-updates/queens') - deployment.run_tests() diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index af79ff11..7b4c390c 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -1,18 +1,22 @@ -# Bootstrap the model if necessary. -bootstrap: True -# Re-use bootstrap node instead of destroying/re-bootstrapping. -reset: True -# Use tox/requirements to drive the venv instead of bundletester's venv feature. -virtualenv: False -# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet. -makefile: [] -# Do not specify juju PPA sources. Juju is presumed to be pre-installed -# and configured in all test runner environments. -#sources: -# Do not specify or rely on system packages. -#packages: -# Do not specify python packages here. Use test-requirements.txt -# and tox instead. ie. The venv is constructed before bundletester -# is invoked. -#python-packages: -reset_timeout: 600 +charm_name: ceph-fs +gate_bundles: + - bionic-train + - bionic-stein + - bionic-rocky + - bionic-queens + - xenial-queens + - xenial-pike + - xenial-ocata + - xenial-mitaka +smoke_bundles: + - bionic-stein +dev_bundles: + - cosmic-rocky + - disco-stein +configure: + - zaza.openstack.charm_tests.glance.setup.add_lts_image +tests: + - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest + - zaza.openstack.charm_tests.ceph.tests.CephRelationTest + - zaza.openstack.charm_tests.ceph.tests.CephTest + - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index dd17ae57..9c27bbfa 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -1,4 +1,4 @@ -# Source charm (with amulet): ./src/tox.ini +# Source charm (with zaza): ./src/tox.ini # This file is managed centrally by release-tools and should not be modified # within individual charm repos. See the 'global' dir contents for available # choices of tox.ini for OpenStack Charms: @@ -15,41 +15,36 @@ skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 - CHARM_DIR={envdir} - AMULET_SETUP_TIMEOUT=5400 whitelist_externals = juju -passenv = HOME TERM AMULET_* CS_* OS_* TEST_* +passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt install_command = pip install {opts} {packages} [testenv:pep8] basepython = python3 +deps=charm-tools commands = charm-proof [testenv:func-noop] -# DRY RUN - For Debug -basepython = python2.7 +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy + functest-run-suite --help [testenv:func] -# Run all gate tests which are +x (expected to always pass) -basepython = python2.7 +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy + functest-run-suite --keep-model [testenv:func-smoke] -# Run a specific test as an Amulet smoke test (expected to always pass) -basepython = python2.7 +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-train --no-destroy + functest-run-suite --keep-model --smoke -[testenv:func-dev] -# Run all development test targets which are +x (may not always pass!) -basepython = python2.7 +[testenv:func-target] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy + functest-run-suite --keep-model --bundle {posargs} [testenv:venv] -commands = {posargs} +commands = {posargs} \ No newline at end of file diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 5b41c1dd..f8f50927 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -89,4 +89,4 @@ commands = {posargs} [flake8] # E402 ignore necessary for path append before sys module import in actions -ignore = E402,W504 +ignore = E402,W504 \ No newline at end of file From 36f3ec8668d17f1e9d1dadf983d6c606c29ab56a Mon Sep 17 00:00:00 2001 From: ShangXiao Date: Tue, 10 Dec 2019 09:57:19 +0800 Subject: [PATCH 1855/2699] Switch to Ussuri jobs Change-Id: I94bf717b40cac6298fece0d08f74b756b937b8fd --- ceph-mon/.zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/.zuul.yaml b/ceph-mon/.zuul.yaml index 7332a874..b3037e94 100644 --- a/ceph-mon/.zuul.yaml +++ b/ceph-mon/.zuul.yaml @@ -1,5 +1,5 @@ - project: templates: - python35-charm-jobs - - openstack-python3-train-jobs + - openstack-python3-ussuri-jobs - openstack-cover-jobs From 4d8fb47a3ba70afbd40b6ac8ddce3267a409022f Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 10 Dec 2019 13:37:17 +0000 Subject: [PATCH 1856/2699] Charmhelpers sync to get vaultlocker fixes Also gate checking vault context completing on whether dependencies are installed. Change-Id: I6c89944960f592300921fbd455c6d1d8c4b9b2a2 Closes-Bug: #1849323 --- ceph-osd/hooks/ceph_hooks.py | 23 +- .../charmhelpers/contrib/openstack/context.py | 4 +- .../contrib/openstack/ha/utils.py | 3 +- .../charmhelpers/contrib/openstack/policyd.py | 308 ++++++++++++------ .../charmhelpers/contrib/openstack/utils.py | 13 +- .../contrib/openstack/vaultlocker.py | 55 +++- .../contrib/storage/linux/ceph.py | 35 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 109 ++++++- 8 files changed, 397 insertions(+), 153 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 8de5870e..c5992525 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -486,17 +486,18 @@ def config_changed(): @hooks.hook('storage.real') def prepare_disks_and_activate(): - # NOTE: vault/vaultlocker preflight check - vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND) - context = vault_kv() - if use_vaultlocker() and not vault_kv.complete: - log('Deferring OSD preparation as vault not ready', - level=DEBUG) - return - elif use_vaultlocker() and vault_kv.complete: - log('Vault ready, writing vaultlocker configuration', - level=DEBUG) - vaultlocker.write_vaultlocker_conf(context) + if use_vaultlocker(): + # NOTE: vault/vaultlocker preflight check + vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND) + context = vault_kv() + if not vault_kv.complete: + log('Deferring OSD preparation as vault not ready', + level=DEBUG) + return + else: + log('Vault ready, writing vaultlocker configuration', + level=DEBUG) + vaultlocker.write_vaultlocker_conf(context) osd_journal = get_journal_devices() if not osd_journal.isdisjoint(set(get_devices())): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index a3d48c41..9b80b6d6 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -1940,7 +1940,7 @@ def _determine_ctxt(self): as well as the catalog_info string that would be supplied. Returns a dict containing the volume_api_version and the volume_catalog_info. """ - rel = os_release(self.pkg, base='icehouse') + rel = os_release(self.pkg) version = '2' if CompareOpenStackReleases(rel) >= 'pike': version = '3' @@ -2140,7 +2140,7 @@ def __init__(self, pkg='python-keystone'): self.pkg = pkg def __call__(self): - ostack = os_release(self.pkg, base='icehouse') + ostack = os_release(self.pkg) osystem = lsb_release()['DISTRIB_CODENAME'].lower() return { 'openstack_release': ostack, diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py index e017bc20..a5cbdf53 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -157,10 +157,11 @@ def generate_ha_relation_data(service, _relation_data = {'resources': {}, 'resource_params': {}} if haproxy_enabled: + _meta = 'meta migration-threshold="INFINITY" failure-timeout="5s"' _haproxy_res = 'res_{}_haproxy'.format(service) _relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'} _relation_data['resource_params'] = { - _haproxy_res: 'op monitor interval="5s"' + _haproxy_res: '{} op monitor interval="5s"'.format(_meta) } _relation_data['init_services'] = {_haproxy_res: 'haproxy'} _relation_data['clones'] = { diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py index 1adf2472..1d9a353a 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py @@ -17,9 +17,11 @@ import os import six import shutil +import sys import yaml import zipfile +import charmhelpers import charmhelpers.core.hookenv as hookenv import charmhelpers.core.host as ch_host @@ -115,8 +117,8 @@ def upgrade_charm(): default: False description: | If True then use the resource file named 'policyd-override' to install - override yaml files in the service's policy.d directory. The resource - file should be a zip file containing at least one yaml file with a .yaml + override YAML files in the service's policy.d directory. The resource + file should be a ZIP file containing at least one yaml file with a .yaml or .yml extension. If False then remove the overrides. """ @@ -134,14 +136,14 @@ def upgrade_charm(): Policy Overrides ---------------- -This service allows for policy overrides using the `policy.d` directory. This -is an **advanced** feature and the policies that the service supports should be -clearly and unambiguously understood before trying to override, or add to, the -default policies that the service uses. +This feature allows for policy overrides using the `policy.d` directory. This +is an **advanced** feature and the policies that the OpenStack service supports +should be clearly and unambiguously understood before trying to override, or +add to, the default policies that the service uses. The charm also has some +policy defaults. They should also be understood before being overridden. -The charm also has some policy defaults. They should also be understood before -being overridden. It is possible to break the system (for tenants and other -services) if policies are incorrectly applied to the service. +> **Caution**: It is possible to break the system (for tenants and other + services) if policies are incorrectly applied to the service. Policy overrides are YAML files that contain rules that will add to, or override, existing policy rules in the service. The `policy.d` directory is @@ -149,30 +151,16 @@ def upgrade_charm(): `/etc/keystone/policy.d` directory, and as such, any manual changes to it will be overwritten on charm upgrades. -Policy overrides are provided to the charm using a resource file called -`policyd-override`. This is attached to the charm using (for example): +Overrides are provided to the charm using a Juju resource called +`policyd-override`. The resource is a ZIP file. This file, say +`overrides.zip`, is attached to the charm by: - juju attach-resource policyd-override= -The `` is the name that this charm is deployed as, with -`` being the resource file containing the policy overrides. + juju attach-resource policyd-override=overrides.zip -The format of the resource file is a ZIP file (.zip extension) containing at -least one YAML file with an extension of `.yaml` or `.yml`. Note that any -directories in the ZIP file are ignored; all of the files are flattened into a -single directory. There must not be any duplicated filenames; this will cause -an error and nothing in the resource file will be applied. +The policy override is enabled in the charm using: -(ed. next part is optional is the charm supports some form of -template/substitution on a read file) - -If a (ed. "one or more of") [`.j2`, `.tmpl`, `.tpl`] file is found in the -resource file then the charm will perform a substitution with charm variables -taken from the config or relations. (ed. edit as appropriate to include the -variable). - -To enable the policy overrides the config option `use-policyd-override` must be -set to `True`. + juju config use-policyd-override=true When `use-policyd-override` is `True` the status line of the charm will be prefixed with `PO:` indicating that policies have been overridden. If the @@ -180,12 +168,8 @@ def upgrade_charm(): status line will be prefixed with `PO (broken):`. The log file for the charm will indicate the reason. No policy override files are installed if the `PO (broken):` is shown. The status line indicates that the overrides are broken, -not that the policy for the service has failed - they will be the defaults for -the charm and service. - -If the policy overrides did not install then *either* attach a new, corrected, -resource file *or* disable the policy overrides by setting -`use-policyd-override` to False. +not that the policy for the service has failed. The policy will be the defaults +for the charm and service. Policy overrides on one service may affect the functionality of another service. Therefore, it may be necessary to provide policy overrides for @@ -251,7 +235,10 @@ def maybe_do_policyd_overrides(openstack_release, blacklist_paths=None, blacklist_keys=None, template_function=None, - restart_handler=None): + restart_handler=None, + user=None, + group=None, + config_changed=False): """If the config option is set, get the resource file and process it to enable the policy.d overrides for the service passed. @@ -280,6 +267,11 @@ def maybe_do_policyd_overrides(openstack_release, directory. However, for any services where this is buggy then a restart_handler can be used to force the policy.d files to be read. + If the config_changed param is True, then the handling is slightly + different: It will only perform the policyd overrides if the config is True + and the success file doesn't exist. Otherwise, it does nothing as the + resource file has already been processed. + :param openstack_release: The openstack release that is installed. :type openstack_release: str :param service: the service name to construct the policy.d directory for. @@ -295,16 +287,43 @@ def maybe_do_policyd_overrides(openstack_release, :param restart_handler: The function to call if the service should be restarted. :type restart_handler: Union[None, Callable[]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] + :param config_changed: Set to True for config_changed hook. + :type config_changed: bool """ + _user = service if user is None else user + _group = service if group is None else group + if not is_policyd_override_valid_on_this_release(openstack_release): + return + hookenv.log("Running maybe_do_policyd_overrides", + level=POLICYD_LOG_LEVEL_DEFAULT) config = hookenv.config() try: if not config.get(POLICYD_CONFIG_NAME, False): + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) + if (os.path.isfile(_policy_success_file()) + and restart_handler is not None + and callable(restart_handler)): + restart_handler() remove_policy_success_file() - clean_policyd_dir_for(service, blacklist_paths) return - except Exception: + except Exception as e: + hookenv.log("... ERROR: Exception is: {}".format(str(e)), + level=POLICYD_CONFIG_NAME) + import traceback + hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT) return - if not is_policyd_override_valid_on_this_release(openstack_release): + # if the policyd overrides have been performed when doing config_changed + # just return + if config_changed and is_policy_success_file_set(): + hookenv.log("... already setup, so skipping.", + level=POLICYD_LOG_LEVEL_DEFAULT) return # from now on it should succeed; if it doesn't then status line will show # broken. @@ -316,49 +335,18 @@ def maybe_do_policyd_overrides(openstack_release, restart_handler() -def maybe_do_policyd_overrides_on_config_changed(openstack_release, - service, - blacklist_paths=None, - blacklist_keys=None, - template_function=None, - restart_handler=None): - """This function is designed to be called from the config changed hook - handler. It will only perform the policyd overrides if the config is True - and the success file doesn't exist. Otherwise, it does nothing as the - resource file has already been processed. +@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead") +def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): + """This function is designed to be called from the config changed hook. - See maybe_do_policyd_overrides() for more details on the params. + DEPRECATED: please use maybe_do_policyd_overrides() with the param + `config_changed` as `True`. - :param openstack_release: The openstack release that is installed. - :type openstack_release: str - :param service: the service name to construct the policy.d directory for. - :type service: str - :param blacklist_paths: optional list of paths to leave alone - :type blacklist_paths: Union[None, List[str]] - :param blacklist_keys: optional list of keys that mustn't appear in the - yaml file's - :type blacklist_keys: Union[None, List[str]] - :param template_function: Optional function that can modify the string - prior to being processed as a Yaml document. - :type template_function: Union[None, Callable[[str], str]] - :param restart_handler: The function to call if the service should be - restarted. - :type restart_handler: Union[None, Callable[]] + See maybe_do_policyd_overrides() for more details on the params. """ - config = hookenv.config() - try: - if not config.get(POLICYD_CONFIG_NAME, False): - remove_policy_success_file() - clean_policyd_dir_for(service, blacklist_paths) - return - except Exception: - return - # if the policyd overrides have been performed just return - if os.path.isfile(_policy_success_file()): - return - maybe_do_policyd_overrides( - openstack_release, service, blacklist_paths, blacklist_keys, - template_function, restart_handler) + if 'config_changed' not in kwargs.keys(): + kwargs['config_changed'] = True + return maybe_do_policyd_overrides(*args, **kwargs) def get_policy_resource_filename(): @@ -375,13 +363,16 @@ def get_policy_resource_filename(): @contextlib.contextmanager -def open_and_filter_yaml_files(filepath): +def open_and_filter_yaml_files(filepath, has_subdirs=False): """Validate that the filepath provided is a zip file and contains at least one (.yaml|.yml) file, and that the files are not duplicated when the zip file is flattened. Note that the yaml files are not checked. This is the first stage in validating the policy zipfile; individual yaml files are not checked for validity or black listed keys. + If the has_subdirs param is True, then the files are flattened to the first + directory, and the files in the root are ignored. + An example of use is: with open_and_filter_yaml_files(some_path) as zfp, g: @@ -390,6 +381,8 @@ def open_and_filter_yaml_files(filepath): :param filepath: a filepath object that can be opened by zipfile :type filepath: Union[AnyStr, os.PathLike[AntStr]] + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool :returns: (zfp handle, a generator of the (name, filename, ZipInfo object) tuples) as a tuple. @@ -402,7 +395,7 @@ def open_and_filter_yaml_files(filepath): with zipfile.ZipFile(filepath, 'r') as zfp: # first pass through; check for duplicates and at least one yaml file. names = collections.defaultdict(int) - yamlfiles = _yamlfiles(zfp) + yamlfiles = _yamlfiles(zfp, has_subdirs) for name, _, _, _ in yamlfiles: names[name] += 1 # There must be at least 1 yaml file. @@ -418,26 +411,49 @@ def open_and_filter_yaml_files(filepath): yield (zfp, yamlfiles) -def _yamlfiles(zipfile): +def _yamlfiles(zipfile, has_subdirs=False): """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) and the infolist item from a zipfile. + If the `has_subdirs` param is True, the the only yaml files that have a + directory component are read, and then first part of the directory + component is kept, along with the filename in the name. e.g. an entry with + a filename of: + + compute/someotherdir/override.yaml + + is returned as: + + compute/override, yaml, override.yaml, + + This is to help with the special, additional, processing that the dashboard + charm requires. + :param zipfile: the zipfile to read zipinfo items from :type zipfile: zipfile.ZipFile - :returns: generator of (name, ext, filename, info item) for each self-identified - yaml file. + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool + :returns: generator of (name, ext, filename, info item) for each + self-identified yaml file. :rtype: List[(str, str, str, zipfile.ZipInfo)] """ - l = [] + files = [] for infolist_item in zipfile.infolist(): - if infolist_item.is_dir(): - continue - _, name_ext = os.path.split(infolist_item.filename) + try: + if infolist_item.is_dir(): + continue + except AttributeError: + # fallback to "old" way to determine dir entry for pre-py36 + if infolist_item.filename.endswith('/'): + continue + _dir, name_ext = os.path.split(infolist_item.filename) name, ext = os.path.splitext(name_ext) + if has_subdirs and _dir != "": + name = os.path.join(_dir.split(os.path.sep)[0], name) ext = ext.lower() if ext and ext in POLICYD_VALID_EXTS: - l.append((name, ext, name_ext, infolist_item)) - return l + files.append((name, ext, name_ext, infolist_item)) + return files def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): @@ -483,9 +499,6 @@ def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): def policyd_dir_for(service): """Return the policy directory for the named service. - This assumes the default name of "policy.d" which is kept across all - charms. - :param service: str :returns: the policy.d override directory. :rtype: os.PathLike[str] @@ -493,7 +506,7 @@ def policyd_dir_for(service): return os.path.join("/", "etc", service, "policy.d") -def clean_policyd_dir_for(service, keep_paths=None): +def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): """Clean out the policyd directory except for items that should be kept. The keep_paths, if used, should be set to the full path of the files that @@ -506,12 +519,19 @@ def clean_policyd_dir_for(service, keep_paths=None): :type service: str :param keep_paths: optional list of paths to not delete. :type keep_paths: Union[None, List[str]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] """ + _user = service if user is None else user + _group = service if group is None else group keep_paths = keep_paths or [] path = policyd_dir_for(service) + hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): - ch_host.mkdir(path, owner=service, group=service, perms=0o775) - _scanner = os.scandir if six.PY3 else _py2_scandir + ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) + _scanner = os.scandir if sys.version_info > (3, 4) else _py2_scandir for direntry in _scanner(path): # see if the path should be kept. if direntry.path in keep_paths: @@ -523,6 +543,22 @@ def clean_policyd_dir_for(service, keep_paths=None): os.remove(direntry.path) +def maybe_create_directory_for(path, user, group): + """For the filename 'path', ensure that the directory for that path exists. + + Note that if the directory already exists then the permissions are NOT + changed. + + :param path: the filename including the path to it. + :type path: str + :param user: the user to create the directory as + :param group: the group to create the directory as + """ + _dir, _ = os.path.split(path) + if not os.path.exists(_dir): + ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) + + @contextlib.contextmanager def _py2_scandir(path): """provide a py2 implementation of os.scandir if this module ever gets used @@ -558,6 +594,11 @@ def path_for_policy_file(service, name): It is constructed using policyd_dir_for(), the name and the ".yaml" extension. + For horizon, for example, it's a bit more complicated. The name param is + actually "override_service_dir/a_name", where target_service needs to be + one the allowed horizon override services. This translation and check is + done in the _yamlfiles() function. + :param service: the service name :type service: str :param name: the name for the policy override @@ -585,6 +626,22 @@ def remove_policy_success_file(): pass +def set_policy_success_file(): + """Set the file that indicates successful policyd override.""" + open(_policy_success_file(), "w").close() + + +def is_policy_success_file_set(): + """Returns True if the policy success file has been set. + + This indicates that policies are overridden and working properly. + + :returns: True if the policy file is set + :rtype: bool + """ + return os.path.isfile(_policy_success_file()) + + def policyd_status_message_prefix(): """Return the prefix str for the status line. @@ -594,7 +651,7 @@ def policyd_status_message_prefix(): :returns: the prefix :rtype: str """ - if os.path.isfile(_policy_success_file()): + if is_policy_success_file_set(): return "PO:" return "PO (broken):" @@ -603,7 +660,11 @@ def process_policy_resource_file(resource_file, service, blacklist_paths=None, blacklist_keys=None, - template_function=None): + template_function=None, + preserve_topdir=False, + preprocess_filename=None, + user=None, + group=None): """Process the resource file (which should contain at least one yaml file) and write those files to the service's policy.d directory. @@ -623,6 +684,16 @@ def process_policy_resource_file(resource_file, its file path reconstructed. This, also, must not match any path in the black list. + The yaml filename can be modified in two ways. If the `preserve_topdir` + param is True, then files will be flattened to the top dir. This allows + for creating sets of files that can be grouped into a single level tree + structure. + + Secondly, if the `preprocess_filename` param is not None and callable() + then the name is passed to that function for preprocessing before being + converted to the end location. This is to allow munging of the filename + prior to being tested for a blacklist path. + If any error occurs, then the policy.d directory is cleared, the error is written to the log, and the status line will eventually show as failed. @@ -638,17 +709,39 @@ def process_policy_resource_file(resource_file, :param template_function: Optional function that can modify the yaml document. :type template_function: Union[None, Callable[[AnyStr], AnyStr]] + :param preserve_topdir: Keep the toplevel subdir + :type preserve_topdir: bool + :param preprocess_filename: Optional function to use to process filenames + extracted from the resource file. + :type preprocess_filename: Union[None, Callable[[AnyStr]. AnyStr]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] :returns: True if the processing was successful, False if not. :rtype: boolean """ + hookenv.log("Running process_policy_resource_file", level=hookenv.DEBUG) blacklist_paths = blacklist_paths or [] completed = False + _preprocess = None + if preprocess_filename is not None and callable(preprocess_filename): + _preprocess = preprocess_filename + _user = service if user is None else user + _group = service if group is None else group try: - with open_and_filter_yaml_files(resource_file) as (zfp, gen): + with open_and_filter_yaml_files( + resource_file, preserve_topdir) as (zfp, gen): # first clear out the policy.d directory and clear success remove_policy_success_file() - clean_policyd_dir_for(service, blacklist_paths) + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) for name, ext, filename, zipinfo in gen: + # See if the name should be preprocessed. + if _preprocess is not None: + name = _preprocess(name) # construct a name for the output file. yaml_filename = path_for_policy_file(service, name) if yaml_filename in blacklist_paths: @@ -666,8 +759,12 @@ def process_policy_resource_file(resource_file, "available".format(filename)) doc = template_function(doc) yaml_doc = read_and_validate_yaml(doc, blacklist_keys) - with open(yaml_filename, "wt") as f: - yaml.dump(yaml_doc, f) + # we may have to create the directory + maybe_create_directory_for(yaml_filename, _user, _group) + ch_host.write_file(yaml_filename, + yaml.dump(yaml_doc).encode('utf-8'), + _user, + _group) # Every thing worked, so we mark up a success. completed = True except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: @@ -691,10 +788,13 @@ def process_policy_resource_file(resource_file, hookenv.log("Processing {} failed: cleaning policy.d directory" .format(resource_file), level=POLICYD_LOG_LEVEL_DEFAULT) - clean_policyd_dir_for(service, blacklist_paths) + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) else: # touch the success filename hookenv.log("policy.d overrides installed.", level=POLICYD_LOG_LEVEL_DEFAULT) - open(_policy_success_file(), "w").close() + set_policy_success_file() return completed diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index ac96f844..02190264 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -204,7 +204,7 @@ ('stein', ['2.20.0', '2.21.0']), ('train', - ['2.22.0']), + ['2.22.0', '2.23.0']), ]) # >= Liberty version->codename mapping @@ -531,7 +531,7 @@ def reset_os_release(): _os_rel = None -def os_release(package, base='essex', reset_cache=False): +def os_release(package, base=None, reset_cache=False): ''' Returns OpenStack release codename from a cached global. @@ -542,6 +542,8 @@ def os_release(package, base='essex', reset_cache=False): the installation source, the earliest release supported by the charm should be returned. ''' + if not base: + base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']] global _os_rel if reset_cache: reset_os_release() @@ -670,7 +672,10 @@ def openstack_upgrade_available(package): codename = get_os_codename_install_source(src) avail_vers = get_os_version_codename_swift(codename) else: - avail_vers = get_os_version_install_source(src) + try: + avail_vers = get_os_version_install_source(src) + except: + avail_vers = cur_vers apt.init() return apt.version_compare(avail_vers, cur_vers) >= 1 @@ -1693,7 +1698,7 @@ def enable_memcache(source=None, release=None, package=None): if release: _release = release else: - _release = os_release(package, base='icehouse') + _release = os_release(package) if not _release: _release = get_os_codename_install_source(source) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py index a8e4bf88..c162de27 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -37,9 +37,13 @@ def __init__(self, secret_backend=None): ) def __call__(self): + import hvac + ctxt = {} + # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 db = unitdata.kv() - last_token = db.get('last-token') + # currently known-good secret-id secret_id = db.get('secret-id') + for relation_id in hookenv.relation_ids(self.interfaces[0]): for unit in hookenv.related_units(relation_id): data = hookenv.relation_get(unit=unit, @@ -54,27 +58,48 @@ def __call__(self): # Tokens may change when secret_id's are being # reissued - if so use token to get new secret_id - if token != last_token: + token_success = False + try: secret_id = retrieve_secret_id( url=vault_url, token=token ) + token_success = True + except hvac.exceptions.InvalidRequest: + # Try next + pass + + if token_success: db.set('secret-id', secret_id) - db.set('last-token', token) db.flush() - ctxt = { - 'vault_url': vault_url, - 'role_id': json.loads(role_id), - 'secret_id': secret_id, - 'secret_backend': self.secret_backend, - } - vault_ca = data.get('vault_ca') - if vault_ca: - ctxt['vault_ca'] = json.loads(vault_ca) - self.complete = True - return ctxt - return {} + ctxt['vault_url'] = vault_url + ctxt['role_id'] = json.loads(role_id) + ctxt['secret_id'] = secret_id + ctxt['secret_backend'] = self.secret_backend + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + + self.complete = True + break + else: + if secret_id: + ctxt['vault_url'] = vault_url + ctxt['role_id'] = json.loads(role_id) + ctxt['secret_id'] = secret_id + ctxt['secret_backend'] = self.secret_backend + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + + if self.complete: + break + + if ctxt: + self.complete = True + + return ctxt def write_vaultlocker_conf(context, priority=100): diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index e13dfa8b..104977af 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -422,6 +422,8 @@ def enabled_manager_modules(): cmd = ['ceph', 'mgr', 'module', 'ls'] try: modules = check_output(cmd) + if six.PY3: + modules = modules.decode('UTF-8') except CalledProcessError as e: log("Failed to list ceph modules: {}".format(e), WARNING) return [] @@ -1185,6 +1187,15 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] + def add_op(self, op): + """Add an op if it is not already in the list. + + :param op: Operation to add. + :type op: dict + """ + if op not in self.ops: + self.ops.append(op) + def add_op_request_access_to_group(self, name, namespace=None, permission=None, key_name=None, object_prefix_permissions=None): @@ -1198,7 +1209,7 @@ def add_op_request_access_to_group(self, name, namespace=None, 'rwx': ['prefix1', 'prefix2'], 'class-read': ['prefix3']} """ - self.ops.append({ + self.add_op({ 'op': 'add-permissions-to-key', 'group': name, 'namespace': namespace, 'name': key_name or service_name(), @@ -1251,11 +1262,11 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, if pg_num and weight: raise ValueError('pg_num and weight are mutually exclusive') - self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight, 'group': group, - 'group-namespace': namespace, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + self.add_op({'op': 'create-pool', 'name': name, + 'replicas': replica_count, 'pg_num': pg_num, + 'weight': weight, 'group': group, + 'group-namespace': namespace, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) def add_op_create_erasure_pool(self, name, erasure_profile=None, weight=None, group=None, app_name=None, @@ -1283,12 +1294,12 @@ def add_op_create_erasure_pool(self, name, erasure_profile=None, :param max_objects: Maximum objects quota to apply :type max_objects: int """ - self.ops.append({'op': 'create-pool', 'name': name, - 'pool-type': 'erasure', - 'erasure-profile': erasure_profile, - 'weight': weight, - 'group': group, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + self.add_op({'op': 'create-pool', 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'weight': weight, + 'group': group, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 4744eb43..647f6e4b 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -34,6 +34,8 @@ import tempfile from subprocess import CalledProcessError +from charmhelpers import deprecate + import six if not six.PY3: from UserDict import UserDict @@ -119,6 +121,24 @@ def log(message, level=None): raise +def function_log(message): + """Write a function progress message""" + command = ['function-log'] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message[:SH_MAX_ARG]] + # Missing function-log should not cause failures in unit tests + # Send function_log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + message = "function-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + class Serializable(UserDict): """Wrapper, an object that can be serialized to yaml or json""" @@ -946,9 +966,23 @@ def charm_dir(): return os.environ.get('CHARM_DIR') +def cmd_exists(cmd): + """Return True if the specified cmd exists in the path""" + return any( + os.access(os.path.join(path, cmd), os.X_OK) + for path in os.environ["PATH"].split(os.pathsep) + ) + + @cached +@deprecate("moved to function_get()", log=log) def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_get`. + + Gets the value of an action parameter, or all key/value param pairs. + """ cmd = ['action-get'] if key is not None: cmd.append(key) @@ -957,36 +991,103 @@ def action_get(key=None): return action_data +@cached +def function_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['function-get'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-get'] + + if key is not None: + cmd.append(key) + cmd.append('--format=json') + function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return function_data + + +@deprecate("moved to function_set()", log=log) def action_set(values): - """Sets the values to be returned after the action finishes""" + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_set`. + + Sets the values to be returned after the action finishes. + """ cmd = ['action-set'] for k, v in list(values.items()): cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) +def function_set(values): + """Sets the values to be returned after the function finishes""" + cmd = ['function-set'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-set'] + + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@deprecate("moved to function_fail()", log=log) def action_fail(message): - """Sets the action status to failed and sets the error message. + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_fail`. + + Sets the action status to failed and sets the error message. - The results set by action_set are preserved.""" + The results set by action_set are preserved. + """ subprocess.check_call(['action-fail', message]) +def function_fail(message): + """Sets the function status to failed and sets the error message. + + The results set by function_set are preserved.""" + cmd = ['function-fail'] + # Fallback for older charms. + if not cmd_exists('function-fail'): + cmd = ['action-fail'] + cmd.append(message) + + subprocess.check_call(cmd) + + def action_name(): """Get the name of the currently executing action.""" return os.environ.get('JUJU_ACTION_NAME') +def function_name(): + """Get the name of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_NAME') or action_name() + + def action_uuid(): """Get the UUID of the currently executing action.""" return os.environ.get('JUJU_ACTION_UUID') +def function_id(): + """Get the ID of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_ID') or action_uuid() + + def action_tag(): """Get the tag for the currently executing action.""" return os.environ.get('JUJU_ACTION_TAG') +def function_tag(): + """Get the tag for the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() + + def status_set(workload_state, message): """Set the workload state with a message From a3841578589bd84efca008900236f73823435e35 Mon Sep 17 00:00:00 2001 From: Alexandros Soumplis Date: Tue, 17 Dec 2019 13:48:07 +0200 Subject: [PATCH 1857/2699] Ignore devices with OSError This bug handles OSError when trying to read from CDrom devices with no disc in the is_pristine_disk function which makes the run-action list-disks to fail. Change-Id: I5e86895d1adff3f95c7feb5ed0f78b998c28ed1f Depends-On: I951897a699305604821f2c910ee9ea91582c4e40 Closes-Bug: #1833857 Signed-off-by: Alexandros Soumplis --- ceph-osd/lib/ceph/utils.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 3f5bdb1e..3638d72a 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -913,7 +913,12 @@ def is_pristine_disk(dev): """ want_bytes = 2048 - f = open(dev, 'rb') + try: + f = open(dev, 'rb') + except OSError as e: + log(e) + return False + data = f.read(want_bytes) read_bytes = len(data) if read_bytes != want_bytes: From 86752dd75d6f49562138bd9ea0a0b4207743fada Mon Sep 17 00:00:00 2001 From: ShangXiao Date: Thu, 26 Dec 2019 18:20:43 +0800 Subject: [PATCH 1858/2699] Switch to Ussuri jobs Change-Id: I4bd9718c93d317fdc80651086fdff7476d8eb9c4 --- ceph-osd/.zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/.zuul.yaml b/ceph-osd/.zuul.yaml index 7332a874..b3037e94 100644 --- a/ceph-osd/.zuul.yaml +++ b/ceph-osd/.zuul.yaml @@ -1,5 +1,5 @@ - project: templates: - python35-charm-jobs - - openstack-python3-train-jobs + - openstack-python3-ussuri-jobs - openstack-cover-jobs From c7c2068a58459ee61aa3fb914c81e9bc1be38289 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 17 Jan 2020 14:21:22 -0500 Subject: [PATCH 1859/2699] Sync charm-helpers for Ussuri/Focal release and version details Change-Id: I5ac45332a126143ce0299c2c534523bd890f5f57 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 14 ++++ .../contrib/openstack/amulet/deployment.py | 3 + .../contrib/openstack/amulet/utils.py | 1 + .../audits/openstack_security_guide.py | 4 +- .../charmhelpers/contrib/openstack/context.py | 65 ++++++++++++++++++- .../charmhelpers/contrib/openstack/policyd.py | 6 +- .../charmhelpers/contrib/openstack/utils.py | 48 +++++++++++++- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 8 +++ ceph-osd/lib/ceph/broker.py | 14 ++++ ceph-osd/lib/ceph/utils.py | 2 +- 10 files changed, 155 insertions(+), 10 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index a3d89936..d775861b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -484,3 +484,17 @@ def add_haproxy_checks(nrpe, unit_name): shortname='haproxy_queue', description='Check HAProxy queue depth {%s}' % unit_name, check_cmd='check_haproxy_queue_depth.sh') + + +def remove_deprecated_check(nrpe, deprecated_services): + """ + Remove checks fro deprecated services in list + + :param nrpe: NRPE object to remove check from + :type nrpe: NRPE + :param deprecated_services: List of deprecated services that are removed + :type deprecated_services: list + """ + for dep_svc in deprecated_services: + log('Deprecated service: {}'.format(dep_svc)) + nrpe.remove_check(shortname=dep_svc) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 77925cc2..dd3aebe9 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -295,9 +295,11 @@ def _get_openstack_release(self): ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, ('bionic', 'cloud:bionic-stein'): self.bionic_stein, ('bionic', 'cloud:bionic-train'): self.bionic_train, + ('bionic', 'cloud:bionic-ussuri'): self.bionic_ussuri, ('cosmic', None): self.cosmic_rocky, ('disco', None): self.disco_stein, ('eoan', None): self.eoan_train, + ('focal', None): self.focal_ussuri, } return releases[(self.series, self.openstack)] @@ -316,6 +318,7 @@ def _get_openstack_release_string(self): ('cosmic', 'rocky'), ('disco', 'stein'), ('eoan', 'train'), + ('focal', 'ussuri'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 7d95a590..14864198 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -62,6 +62,7 @@ 'bionic_rocky', 'cosmic_rocky', 'bionic_stein', 'disco_stein', 'bionic_train', 'eoan_train', + 'bionic_ussuri', 'focal_ussuri', ] diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py index b7b8a60f..79740ed0 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -244,8 +244,8 @@ def validate_file_permissions(config): @audit(is_audit_type(AuditType.OpenStackSecurityGuide)) def validate_uses_keystone(audit_options): """Validate that the service uses Keystone for authentication.""" - section = _config_section(audit_options, 'DEFAULT') - assert section is not None, "Missing section 'DEFAULT'" + section = _config_section(audit_options, 'api') or _config_section(audit_options, 'DEFAULT') + assert section is not None, "Missing section 'api / DEFAULT'" assert section.get('auth_strategy') == "keystone", \ "Application is not using Keystone" diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 9b80b6d6..e99aba47 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -730,6 +730,10 @@ def __call__(self): if notification_format: ctxt['notification_format'] = notification_format + notification_topics = conf.get('notification-topics', None) + if notification_topics: + ctxt['notification_topics'] = notification_topics + send_notifications_to_logs = conf.get('send-notifications-to-logs', None) if send_notifications_to_logs: ctxt['send_notifications_to_logs'] = send_notifications_to_logs @@ -2177,9 +2181,66 @@ def __call__(self): class HostInfoContext(OSContextGenerator): """Context to provide host information.""" + def __init__(self, use_fqdn_hint_cb=None): + """Initialize HostInfoContext + + :param use_fqdn_hint_cb: Callback whose return value used to populate + `use_fqdn_hint` + :type use_fqdn_hint_cb: Callable[[], bool] + """ + # Store callback used to get hint for whether FQDN should be used + + # Depending on the workload a charm manages, the use of FQDN vs. + # shortname may be a deploy-time decision, i.e. behaviour can not + # change on charm upgrade or post-deployment configuration change. + + # The hint is passed on as a flag in the context to allow the decision + # to be made in the Jinja2 configuration template. + self.use_fqdn_hint_cb = use_fqdn_hint_cb + + def _get_canonical_name(self, name=None): + """Get the official FQDN of the host + + The implementation of ``socket.getfqdn()`` in the standard Python + library does not exhaust all methods of getting the official name + of a host ref Python issue https://bugs.python.org/issue5004 + + This function mimics the behaviour of a call to ``hostname -f`` to + get the official FQDN but returns an empty string if it is + unsuccessful. + + :param name: Shortname to get FQDN on + :type name: Optional[str] + :returns: The official FQDN for host or empty string ('') + :rtype: str + """ + name = name or socket.gethostname() + fqdn = '' + + if six.PY2: + exc = socket.error + else: + exc = OSError + + try: + addrs = socket.getaddrinfo( + name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) + except exc: + pass + else: + for addr in addrs: + if addr[3]: + if '.' in addr[3]: + fqdn = addr[3] + break + return fqdn + def __call__(self): + name = socket.gethostname() ctxt = { - 'host_fqdn': socket.getfqdn(), - 'host': socket.gethostname(), + 'host_fqdn': self._get_canonical_name(name) or name, + 'host': name, + 'use_fqdn_hint': ( + self.use_fqdn_hint_cb() if self.use_fqdn_hint_cb else False) } return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py index 1d9a353a..d89d2cca 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py @@ -307,9 +307,9 @@ def maybe_do_policyd_overrides(openstack_release, blacklist_paths, user=_user, group=_group) - if (os.path.isfile(_policy_success_file()) - and restart_handler is not None - and callable(restart_handler)): + if (os.path.isfile(_policy_success_file()) and + restart_handler is not None and + callable(restart_handler)): restart_handler() remove_policy_success_file() return diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 02190264..566404a0 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -50,9 +50,14 @@ hook_name, application_version_set, cached, + leader_set, + leader_get, ) -from charmhelpers.core.strutils import BasicStringComparator +from charmhelpers.core.strutils import ( + BasicStringComparator, + bool_from_string, +) from charmhelpers.contrib.storage.linux.lvm import ( deactivate_lvm_volume_group, @@ -126,6 +131,7 @@ 'rocky', 'stein', 'train', + 'ussuri', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -146,6 +152,7 @@ ('cosmic', 'rocky'), ('disco', 'stein'), ('eoan', 'train'), + ('focal', 'ussuri'), ]) @@ -167,6 +174,7 @@ ('2018.2', 'rocky'), ('2019.1', 'stein'), ('2019.2', 'train'), + ('2020.1', 'ussuri'), ]) # The ugly duckling - must list releases oldest to newest @@ -205,6 +213,8 @@ ['2.20.0', '2.21.0']), ('train', ['2.22.0', '2.23.0']), + ('ussuri', + ['2.24.0']), ]) # >= Liberty version->codename mapping @@ -219,6 +229,7 @@ ('18', 'rocky'), ('19', 'stein'), ('20', 'train'), + ('21', 'ussuri'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -230,6 +241,7 @@ ('13', 'rocky'), ('14', 'stein'), ('15', 'train'), + ('16', 'ussuri'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -241,6 +253,7 @@ ('13', 'rocky'), ('14', 'stein'), ('15', 'train'), + ('16', 'ussuri'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -252,6 +265,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), + ('17', 'ussuri'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -263,6 +277,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), + ('17', 'ussuri'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -274,6 +289,7 @@ ('11', 'rocky'), ('12', 'stein'), ('13', 'train'), + ('14', 'ussuri'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -285,6 +301,7 @@ ('11', 'rocky'), ('12', 'stein'), ('13', 'train'), + ('14', 'ussuri'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -296,6 +313,7 @@ ('17', 'rocky'), ('18', 'stein'), ('19', 'train'), + ('20', 'ussuri'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -307,6 +325,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), + ('17', 'ussuri'), ]), } @@ -674,7 +693,7 @@ def openstack_upgrade_available(package): else: try: avail_vers = get_os_version_install_source(src) - except: + except Exception: avail_vers = cur_vers apt.init() return apt.version_compare(avail_vers, cur_vers) >= 1 @@ -1868,3 +1887,28 @@ def series_upgrade_complete(resume_unit_helper=None, configs=None): configs.write_all() if resume_unit_helper: resume_unit_helper(configs) + + +def is_db_initialised(): + """Check leader storage to see if database has been initialised. + + :returns: Whether DB has been initialised + :rtype: bool + """ + db_initialised = None + if leader_get('db-initialised') is None: + juju_log( + 'db-initialised key missing, assuming db is not initialised', + 'DEBUG') + db_initialised = False + else: + db_initialised = bool_from_string(leader_get('db-initialised')) + juju_log('Database initialised: {}'.format(db_initialised), 'DEBUG') + return db_initialised + + +def set_db_initialised(): + """Add flag to leader storage to indicate database has been initialised. + """ + juju_log('Setting db-initialised to True', 'DEBUG') + leader_set({'db-initialised': True}) diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 31225235..3ddaf0dd 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -182,6 +182,14 @@ 'train/proposed': 'bionic-proposed/train', 'bionic-train/proposed': 'bionic-proposed/train', 'bionic-proposed/train': 'bionic-proposed/train', + # Ussuri + 'ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri/updates': 'bionic-updates/ussuri', + 'bionic-updates/ussuri': 'bionic-updates/ussuri', + 'ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-proposed/ussuri': 'bionic-proposed/ussuri', } diff --git a/ceph-osd/lib/ceph/broker.py b/ceph-osd/lib/ceph/broker.py index 3226f4cc..bae74a12 100644 --- a/ceph-osd/lib/ceph/broker.py +++ b/ceph-osd/lib/ceph/broker.py @@ -212,6 +212,18 @@ def handle_add_permissions_to_key(request, service): return resp +def handle_set_key_permissions(request, service): + """Ensure the key has the requested permissions.""" + permissions = request.get('permissions') + client = request.get('client') + call = ['ceph', '--id', service, 'auth', 'caps', + 'client.{}'.format(client)] + permissions + try: + check_call(call) + except CalledProcessError as e: + log("Error updating key capabilities: {}".format(e), level=ERROR) + + def update_service_permissions(service, service_obj=None, namespace=None): """Update the key permissions for the named client in Ceph""" if not service_obj: @@ -866,6 +878,8 @@ def process_requests_v1(reqs): ret = handle_put_osd_in_bucket(request=req, service=svc) elif op == "add-permissions-to-key": ret = handle_add_permissions_to_key(request=req, service=svc) + elif op == 'set-key-permissions': + ret = handle_set_key_permissions(request=req, service=svc) else: msg = "Unknown operation '{}'".format(op) log(msg, level=ERROR) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 3638d72a..1ec62d54 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -181,7 +181,7 @@ def unmounted_disks(): for device in context.list_devices(DEVTYPE='disk'): if device['SUBSYSTEM'] == 'block': matched = False - for block_type in [u'dm', u'loop', u'ram', u'nbd']: + for block_type in [u'dm-', u'loop', u'ram', u'nbd']: if block_type in device.device_node: matched = True if matched: From 635d38edefb8b58289402574f22b4ad5efce0ccc Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 17 Jan 2020 14:21:28 -0500 Subject: [PATCH 1860/2699] Sync charm-helpers for Ussuri/Focal release and version details Change-Id: I7587becc363365b7fff1d74b678e917bb27ea2b5 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 14 + .../charmhelpers/contrib/openstack/policyd.py | 308 ++++++++++++------ .../charmhelpers/contrib/openstack/utils.py | 59 +++- .../contrib/storage/linux/ceph.py | 33 +- ceph-proxy/charmhelpers/core/hookenv.py | 109 ++++++- ceph-proxy/charmhelpers/fetch/ubuntu.py | 8 + 6 files changed, 406 insertions(+), 125 deletions(-) diff --git a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py index a3d89936..d775861b 100644 --- a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py @@ -484,3 +484,17 @@ def add_haproxy_checks(nrpe, unit_name): shortname='haproxy_queue', description='Check HAProxy queue depth {%s}' % unit_name, check_cmd='check_haproxy_queue_depth.sh') + + +def remove_deprecated_check(nrpe, deprecated_services): + """ + Remove checks fro deprecated services in list + + :param nrpe: NRPE object to remove check from + :type nrpe: NRPE + :param deprecated_services: List of deprecated services that are removed + :type deprecated_services: list + """ + for dep_svc in deprecated_services: + log('Deprecated service: {}'.format(dep_svc)) + nrpe.remove_check(shortname=dep_svc) diff --git a/ceph-proxy/charmhelpers/contrib/openstack/policyd.py b/ceph-proxy/charmhelpers/contrib/openstack/policyd.py index 1adf2472..d89d2cca 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/policyd.py @@ -17,9 +17,11 @@ import os import six import shutil +import sys import yaml import zipfile +import charmhelpers import charmhelpers.core.hookenv as hookenv import charmhelpers.core.host as ch_host @@ -115,8 +117,8 @@ def upgrade_charm(): default: False description: | If True then use the resource file named 'policyd-override' to install - override yaml files in the service's policy.d directory. The resource - file should be a zip file containing at least one yaml file with a .yaml + override YAML files in the service's policy.d directory. The resource + file should be a ZIP file containing at least one yaml file with a .yaml or .yml extension. If False then remove the overrides. """ @@ -134,14 +136,14 @@ def upgrade_charm(): Policy Overrides ---------------- -This service allows for policy overrides using the `policy.d` directory. This -is an **advanced** feature and the policies that the service supports should be -clearly and unambiguously understood before trying to override, or add to, the -default policies that the service uses. +This feature allows for policy overrides using the `policy.d` directory. This +is an **advanced** feature and the policies that the OpenStack service supports +should be clearly and unambiguously understood before trying to override, or +add to, the default policies that the service uses. The charm also has some +policy defaults. They should also be understood before being overridden. -The charm also has some policy defaults. They should also be understood before -being overridden. It is possible to break the system (for tenants and other -services) if policies are incorrectly applied to the service. +> **Caution**: It is possible to break the system (for tenants and other + services) if policies are incorrectly applied to the service. Policy overrides are YAML files that contain rules that will add to, or override, existing policy rules in the service. The `policy.d` directory is @@ -149,30 +151,16 @@ def upgrade_charm(): `/etc/keystone/policy.d` directory, and as such, any manual changes to it will be overwritten on charm upgrades. -Policy overrides are provided to the charm using a resource file called -`policyd-override`. This is attached to the charm using (for example): +Overrides are provided to the charm using a Juju resource called +`policyd-override`. The resource is a ZIP file. This file, say +`overrides.zip`, is attached to the charm by: - juju attach-resource policyd-override= -The `` is the name that this charm is deployed as, with -`` being the resource file containing the policy overrides. + juju attach-resource policyd-override=overrides.zip -The format of the resource file is a ZIP file (.zip extension) containing at -least one YAML file with an extension of `.yaml` or `.yml`. Note that any -directories in the ZIP file are ignored; all of the files are flattened into a -single directory. There must not be any duplicated filenames; this will cause -an error and nothing in the resource file will be applied. +The policy override is enabled in the charm using: -(ed. next part is optional is the charm supports some form of -template/substitution on a read file) - -If a (ed. "one or more of") [`.j2`, `.tmpl`, `.tpl`] file is found in the -resource file then the charm will perform a substitution with charm variables -taken from the config or relations. (ed. edit as appropriate to include the -variable). - -To enable the policy overrides the config option `use-policyd-override` must be -set to `True`. + juju config use-policyd-override=true When `use-policyd-override` is `True` the status line of the charm will be prefixed with `PO:` indicating that policies have been overridden. If the @@ -180,12 +168,8 @@ def upgrade_charm(): status line will be prefixed with `PO (broken):`. The log file for the charm will indicate the reason. No policy override files are installed if the `PO (broken):` is shown. The status line indicates that the overrides are broken, -not that the policy for the service has failed - they will be the defaults for -the charm and service. - -If the policy overrides did not install then *either* attach a new, corrected, -resource file *or* disable the policy overrides by setting -`use-policyd-override` to False. +not that the policy for the service has failed. The policy will be the defaults +for the charm and service. Policy overrides on one service may affect the functionality of another service. Therefore, it may be necessary to provide policy overrides for @@ -251,7 +235,10 @@ def maybe_do_policyd_overrides(openstack_release, blacklist_paths=None, blacklist_keys=None, template_function=None, - restart_handler=None): + restart_handler=None, + user=None, + group=None, + config_changed=False): """If the config option is set, get the resource file and process it to enable the policy.d overrides for the service passed. @@ -280,6 +267,11 @@ def maybe_do_policyd_overrides(openstack_release, directory. However, for any services where this is buggy then a restart_handler can be used to force the policy.d files to be read. + If the config_changed param is True, then the handling is slightly + different: It will only perform the policyd overrides if the config is True + and the success file doesn't exist. Otherwise, it does nothing as the + resource file has already been processed. + :param openstack_release: The openstack release that is installed. :type openstack_release: str :param service: the service name to construct the policy.d directory for. @@ -295,16 +287,43 @@ def maybe_do_policyd_overrides(openstack_release, :param restart_handler: The function to call if the service should be restarted. :type restart_handler: Union[None, Callable[]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] + :param config_changed: Set to True for config_changed hook. + :type config_changed: bool """ + _user = service if user is None else user + _group = service if group is None else group + if not is_policyd_override_valid_on_this_release(openstack_release): + return + hookenv.log("Running maybe_do_policyd_overrides", + level=POLICYD_LOG_LEVEL_DEFAULT) config = hookenv.config() try: if not config.get(POLICYD_CONFIG_NAME, False): + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) + if (os.path.isfile(_policy_success_file()) and + restart_handler is not None and + callable(restart_handler)): + restart_handler() remove_policy_success_file() - clean_policyd_dir_for(service, blacklist_paths) return - except Exception: + except Exception as e: + hookenv.log("... ERROR: Exception is: {}".format(str(e)), + level=POLICYD_CONFIG_NAME) + import traceback + hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT) return - if not is_policyd_override_valid_on_this_release(openstack_release): + # if the policyd overrides have been performed when doing config_changed + # just return + if config_changed and is_policy_success_file_set(): + hookenv.log("... already setup, so skipping.", + level=POLICYD_LOG_LEVEL_DEFAULT) return # from now on it should succeed; if it doesn't then status line will show # broken. @@ -316,49 +335,18 @@ def maybe_do_policyd_overrides(openstack_release, restart_handler() -def maybe_do_policyd_overrides_on_config_changed(openstack_release, - service, - blacklist_paths=None, - blacklist_keys=None, - template_function=None, - restart_handler=None): - """This function is designed to be called from the config changed hook - handler. It will only perform the policyd overrides if the config is True - and the success file doesn't exist. Otherwise, it does nothing as the - resource file has already been processed. +@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead") +def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): + """This function is designed to be called from the config changed hook. - See maybe_do_policyd_overrides() for more details on the params. + DEPRECATED: please use maybe_do_policyd_overrides() with the param + `config_changed` as `True`. - :param openstack_release: The openstack release that is installed. - :type openstack_release: str - :param service: the service name to construct the policy.d directory for. - :type service: str - :param blacklist_paths: optional list of paths to leave alone - :type blacklist_paths: Union[None, List[str]] - :param blacklist_keys: optional list of keys that mustn't appear in the - yaml file's - :type blacklist_keys: Union[None, List[str]] - :param template_function: Optional function that can modify the string - prior to being processed as a Yaml document. - :type template_function: Union[None, Callable[[str], str]] - :param restart_handler: The function to call if the service should be - restarted. - :type restart_handler: Union[None, Callable[]] + See maybe_do_policyd_overrides() for more details on the params. """ - config = hookenv.config() - try: - if not config.get(POLICYD_CONFIG_NAME, False): - remove_policy_success_file() - clean_policyd_dir_for(service, blacklist_paths) - return - except Exception: - return - # if the policyd overrides have been performed just return - if os.path.isfile(_policy_success_file()): - return - maybe_do_policyd_overrides( - openstack_release, service, blacklist_paths, blacklist_keys, - template_function, restart_handler) + if 'config_changed' not in kwargs.keys(): + kwargs['config_changed'] = True + return maybe_do_policyd_overrides(*args, **kwargs) def get_policy_resource_filename(): @@ -375,13 +363,16 @@ def get_policy_resource_filename(): @contextlib.contextmanager -def open_and_filter_yaml_files(filepath): +def open_and_filter_yaml_files(filepath, has_subdirs=False): """Validate that the filepath provided is a zip file and contains at least one (.yaml|.yml) file, and that the files are not duplicated when the zip file is flattened. Note that the yaml files are not checked. This is the first stage in validating the policy zipfile; individual yaml files are not checked for validity or black listed keys. + If the has_subdirs param is True, then the files are flattened to the first + directory, and the files in the root are ignored. + An example of use is: with open_and_filter_yaml_files(some_path) as zfp, g: @@ -390,6 +381,8 @@ def open_and_filter_yaml_files(filepath): :param filepath: a filepath object that can be opened by zipfile :type filepath: Union[AnyStr, os.PathLike[AntStr]] + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool :returns: (zfp handle, a generator of the (name, filename, ZipInfo object) tuples) as a tuple. @@ -402,7 +395,7 @@ def open_and_filter_yaml_files(filepath): with zipfile.ZipFile(filepath, 'r') as zfp: # first pass through; check for duplicates and at least one yaml file. names = collections.defaultdict(int) - yamlfiles = _yamlfiles(zfp) + yamlfiles = _yamlfiles(zfp, has_subdirs) for name, _, _, _ in yamlfiles: names[name] += 1 # There must be at least 1 yaml file. @@ -418,26 +411,49 @@ def open_and_filter_yaml_files(filepath): yield (zfp, yamlfiles) -def _yamlfiles(zipfile): +def _yamlfiles(zipfile, has_subdirs=False): """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) and the infolist item from a zipfile. + If the `has_subdirs` param is True, the the only yaml files that have a + directory component are read, and then first part of the directory + component is kept, along with the filename in the name. e.g. an entry with + a filename of: + + compute/someotherdir/override.yaml + + is returned as: + + compute/override, yaml, override.yaml, + + This is to help with the special, additional, processing that the dashboard + charm requires. + :param zipfile: the zipfile to read zipinfo items from :type zipfile: zipfile.ZipFile - :returns: generator of (name, ext, filename, info item) for each self-identified - yaml file. + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool + :returns: generator of (name, ext, filename, info item) for each + self-identified yaml file. :rtype: List[(str, str, str, zipfile.ZipInfo)] """ - l = [] + files = [] for infolist_item in zipfile.infolist(): - if infolist_item.is_dir(): - continue - _, name_ext = os.path.split(infolist_item.filename) + try: + if infolist_item.is_dir(): + continue + except AttributeError: + # fallback to "old" way to determine dir entry for pre-py36 + if infolist_item.filename.endswith('/'): + continue + _dir, name_ext = os.path.split(infolist_item.filename) name, ext = os.path.splitext(name_ext) + if has_subdirs and _dir != "": + name = os.path.join(_dir.split(os.path.sep)[0], name) ext = ext.lower() if ext and ext in POLICYD_VALID_EXTS: - l.append((name, ext, name_ext, infolist_item)) - return l + files.append((name, ext, name_ext, infolist_item)) + return files def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): @@ -483,9 +499,6 @@ def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): def policyd_dir_for(service): """Return the policy directory for the named service. - This assumes the default name of "policy.d" which is kept across all - charms. - :param service: str :returns: the policy.d override directory. :rtype: os.PathLike[str] @@ -493,7 +506,7 @@ def policyd_dir_for(service): return os.path.join("/", "etc", service, "policy.d") -def clean_policyd_dir_for(service, keep_paths=None): +def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): """Clean out the policyd directory except for items that should be kept. The keep_paths, if used, should be set to the full path of the files that @@ -506,12 +519,19 @@ def clean_policyd_dir_for(service, keep_paths=None): :type service: str :param keep_paths: optional list of paths to not delete. :type keep_paths: Union[None, List[str]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] """ + _user = service if user is None else user + _group = service if group is None else group keep_paths = keep_paths or [] path = policyd_dir_for(service) + hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): - ch_host.mkdir(path, owner=service, group=service, perms=0o775) - _scanner = os.scandir if six.PY3 else _py2_scandir + ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) + _scanner = os.scandir if sys.version_info > (3, 4) else _py2_scandir for direntry in _scanner(path): # see if the path should be kept. if direntry.path in keep_paths: @@ -523,6 +543,22 @@ def clean_policyd_dir_for(service, keep_paths=None): os.remove(direntry.path) +def maybe_create_directory_for(path, user, group): + """For the filename 'path', ensure that the directory for that path exists. + + Note that if the directory already exists then the permissions are NOT + changed. + + :param path: the filename including the path to it. + :type path: str + :param user: the user to create the directory as + :param group: the group to create the directory as + """ + _dir, _ = os.path.split(path) + if not os.path.exists(_dir): + ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) + + @contextlib.contextmanager def _py2_scandir(path): """provide a py2 implementation of os.scandir if this module ever gets used @@ -558,6 +594,11 @@ def path_for_policy_file(service, name): It is constructed using policyd_dir_for(), the name and the ".yaml" extension. + For horizon, for example, it's a bit more complicated. The name param is + actually "override_service_dir/a_name", where target_service needs to be + one the allowed horizon override services. This translation and check is + done in the _yamlfiles() function. + :param service: the service name :type service: str :param name: the name for the policy override @@ -585,6 +626,22 @@ def remove_policy_success_file(): pass +def set_policy_success_file(): + """Set the file that indicates successful policyd override.""" + open(_policy_success_file(), "w").close() + + +def is_policy_success_file_set(): + """Returns True if the policy success file has been set. + + This indicates that policies are overridden and working properly. + + :returns: True if the policy file is set + :rtype: bool + """ + return os.path.isfile(_policy_success_file()) + + def policyd_status_message_prefix(): """Return the prefix str for the status line. @@ -594,7 +651,7 @@ def policyd_status_message_prefix(): :returns: the prefix :rtype: str """ - if os.path.isfile(_policy_success_file()): + if is_policy_success_file_set(): return "PO:" return "PO (broken):" @@ -603,7 +660,11 @@ def process_policy_resource_file(resource_file, service, blacklist_paths=None, blacklist_keys=None, - template_function=None): + template_function=None, + preserve_topdir=False, + preprocess_filename=None, + user=None, + group=None): """Process the resource file (which should contain at least one yaml file) and write those files to the service's policy.d directory. @@ -623,6 +684,16 @@ def process_policy_resource_file(resource_file, its file path reconstructed. This, also, must not match any path in the black list. + The yaml filename can be modified in two ways. If the `preserve_topdir` + param is True, then files will be flattened to the top dir. This allows + for creating sets of files that can be grouped into a single level tree + structure. + + Secondly, if the `preprocess_filename` param is not None and callable() + then the name is passed to that function for preprocessing before being + converted to the end location. This is to allow munging of the filename + prior to being tested for a blacklist path. + If any error occurs, then the policy.d directory is cleared, the error is written to the log, and the status line will eventually show as failed. @@ -638,17 +709,39 @@ def process_policy_resource_file(resource_file, :param template_function: Optional function that can modify the yaml document. :type template_function: Union[None, Callable[[AnyStr], AnyStr]] + :param preserve_topdir: Keep the toplevel subdir + :type preserve_topdir: bool + :param preprocess_filename: Optional function to use to process filenames + extracted from the resource file. + :type preprocess_filename: Union[None, Callable[[AnyStr]. AnyStr]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] :returns: True if the processing was successful, False if not. :rtype: boolean """ + hookenv.log("Running process_policy_resource_file", level=hookenv.DEBUG) blacklist_paths = blacklist_paths or [] completed = False + _preprocess = None + if preprocess_filename is not None and callable(preprocess_filename): + _preprocess = preprocess_filename + _user = service if user is None else user + _group = service if group is None else group try: - with open_and_filter_yaml_files(resource_file) as (zfp, gen): + with open_and_filter_yaml_files( + resource_file, preserve_topdir) as (zfp, gen): # first clear out the policy.d directory and clear success remove_policy_success_file() - clean_policyd_dir_for(service, blacklist_paths) + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) for name, ext, filename, zipinfo in gen: + # See if the name should be preprocessed. + if _preprocess is not None: + name = _preprocess(name) # construct a name for the output file. yaml_filename = path_for_policy_file(service, name) if yaml_filename in blacklist_paths: @@ -666,8 +759,12 @@ def process_policy_resource_file(resource_file, "available".format(filename)) doc = template_function(doc) yaml_doc = read_and_validate_yaml(doc, blacklist_keys) - with open(yaml_filename, "wt") as f: - yaml.dump(yaml_doc, f) + # we may have to create the directory + maybe_create_directory_for(yaml_filename, _user, _group) + ch_host.write_file(yaml_filename, + yaml.dump(yaml_doc).encode('utf-8'), + _user, + _group) # Every thing worked, so we mark up a success. completed = True except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: @@ -691,10 +788,13 @@ def process_policy_resource_file(resource_file, hookenv.log("Processing {} failed: cleaning policy.d directory" .format(resource_file), level=POLICYD_LOG_LEVEL_DEFAULT) - clean_policyd_dir_for(service, blacklist_paths) + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) else: # touch the success filename hookenv.log("policy.d overrides installed.", level=POLICYD_LOG_LEVEL_DEFAULT) - open(_policy_success_file(), "w").close() + set_policy_success_file() return completed diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index ac96f844..566404a0 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -50,9 +50,14 @@ hook_name, application_version_set, cached, + leader_set, + leader_get, ) -from charmhelpers.core.strutils import BasicStringComparator +from charmhelpers.core.strutils import ( + BasicStringComparator, + bool_from_string, +) from charmhelpers.contrib.storage.linux.lvm import ( deactivate_lvm_volume_group, @@ -126,6 +131,7 @@ 'rocky', 'stein', 'train', + 'ussuri', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -146,6 +152,7 @@ ('cosmic', 'rocky'), ('disco', 'stein'), ('eoan', 'train'), + ('focal', 'ussuri'), ]) @@ -167,6 +174,7 @@ ('2018.2', 'rocky'), ('2019.1', 'stein'), ('2019.2', 'train'), + ('2020.1', 'ussuri'), ]) # The ugly duckling - must list releases oldest to newest @@ -204,7 +212,9 @@ ('stein', ['2.20.0', '2.21.0']), ('train', - ['2.22.0']), + ['2.22.0', '2.23.0']), + ('ussuri', + ['2.24.0']), ]) # >= Liberty version->codename mapping @@ -219,6 +229,7 @@ ('18', 'rocky'), ('19', 'stein'), ('20', 'train'), + ('21', 'ussuri'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -230,6 +241,7 @@ ('13', 'rocky'), ('14', 'stein'), ('15', 'train'), + ('16', 'ussuri'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -241,6 +253,7 @@ ('13', 'rocky'), ('14', 'stein'), ('15', 'train'), + ('16', 'ussuri'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -252,6 +265,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), + ('17', 'ussuri'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -263,6 +277,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), + ('17', 'ussuri'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -274,6 +289,7 @@ ('11', 'rocky'), ('12', 'stein'), ('13', 'train'), + ('14', 'ussuri'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -285,6 +301,7 @@ ('11', 'rocky'), ('12', 'stein'), ('13', 'train'), + ('14', 'ussuri'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -296,6 +313,7 @@ ('17', 'rocky'), ('18', 'stein'), ('19', 'train'), + ('20', 'ussuri'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -307,6 +325,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), + ('17', 'ussuri'), ]), } @@ -531,7 +550,7 @@ def reset_os_release(): _os_rel = None -def os_release(package, base='essex', reset_cache=False): +def os_release(package, base=None, reset_cache=False): ''' Returns OpenStack release codename from a cached global. @@ -542,6 +561,8 @@ def os_release(package, base='essex', reset_cache=False): the installation source, the earliest release supported by the charm should be returned. ''' + if not base: + base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']] global _os_rel if reset_cache: reset_os_release() @@ -670,7 +691,10 @@ def openstack_upgrade_available(package): codename = get_os_codename_install_source(src) avail_vers = get_os_version_codename_swift(codename) else: - avail_vers = get_os_version_install_source(src) + try: + avail_vers = get_os_version_install_source(src) + except Exception: + avail_vers = cur_vers apt.init() return apt.version_compare(avail_vers, cur_vers) >= 1 @@ -1693,7 +1717,7 @@ def enable_memcache(source=None, release=None, package=None): if release: _release = release else: - _release = os_release(package, base='icehouse') + _release = os_release(package) if not _release: _release = get_os_codename_install_source(source) @@ -1863,3 +1887,28 @@ def series_upgrade_complete(resume_unit_helper=None, configs=None): configs.write_all() if resume_unit_helper: resume_unit_helper(configs) + + +def is_db_initialised(): + """Check leader storage to see if database has been initialised. + + :returns: Whether DB has been initialised + :rtype: bool + """ + db_initialised = None + if leader_get('db-initialised') is None: + juju_log( + 'db-initialised key missing, assuming db is not initialised', + 'DEBUG') + db_initialised = False + else: + db_initialised = bool_from_string(leader_get('db-initialised')) + juju_log('Database initialised: {}'.format(db_initialised), 'DEBUG') + return db_initialised + + +def set_db_initialised(): + """Add flag to leader storage to indicate database has been initialised. + """ + juju_log('Setting db-initialised to True', 'DEBUG') + leader_set({'db-initialised': True}) diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py index a25c79e3..104977af 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py @@ -1187,6 +1187,15 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] + def add_op(self, op): + """Add an op if it is not already in the list. + + :param op: Operation to add. + :type op: dict + """ + if op not in self.ops: + self.ops.append(op) + def add_op_request_access_to_group(self, name, namespace=None, permission=None, key_name=None, object_prefix_permissions=None): @@ -1200,7 +1209,7 @@ def add_op_request_access_to_group(self, name, namespace=None, 'rwx': ['prefix1', 'prefix2'], 'class-read': ['prefix3']} """ - self.ops.append({ + self.add_op({ 'op': 'add-permissions-to-key', 'group': name, 'namespace': namespace, 'name': key_name or service_name(), @@ -1253,11 +1262,11 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, if pg_num and weight: raise ValueError('pg_num and weight are mutually exclusive') - self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight, 'group': group, - 'group-namespace': namespace, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + self.add_op({'op': 'create-pool', 'name': name, + 'replicas': replica_count, 'pg_num': pg_num, + 'weight': weight, 'group': group, + 'group-namespace': namespace, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) def add_op_create_erasure_pool(self, name, erasure_profile=None, weight=None, group=None, app_name=None, @@ -1285,12 +1294,12 @@ def add_op_create_erasure_pool(self, name, erasure_profile=None, :param max_objects: Maximum objects quota to apply :type max_objects: int """ - self.ops.append({'op': 'create-pool', 'name': name, - 'pool-type': 'erasure', - 'erasure-profile': erasure_profile, - 'weight': weight, - 'group': group, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + self.add_op({'op': 'create-pool', 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'weight': weight, + 'group': group, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-proxy/charmhelpers/core/hookenv.py b/ceph-proxy/charmhelpers/core/hookenv.py index 4744eb43..647f6e4b 100644 --- a/ceph-proxy/charmhelpers/core/hookenv.py +++ b/ceph-proxy/charmhelpers/core/hookenv.py @@ -34,6 +34,8 @@ import tempfile from subprocess import CalledProcessError +from charmhelpers import deprecate + import six if not six.PY3: from UserDict import UserDict @@ -119,6 +121,24 @@ def log(message, level=None): raise +def function_log(message): + """Write a function progress message""" + command = ['function-log'] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message[:SH_MAX_ARG]] + # Missing function-log should not cause failures in unit tests + # Send function_log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + message = "function-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + class Serializable(UserDict): """Wrapper, an object that can be serialized to yaml or json""" @@ -946,9 +966,23 @@ def charm_dir(): return os.environ.get('CHARM_DIR') +def cmd_exists(cmd): + """Return True if the specified cmd exists in the path""" + return any( + os.access(os.path.join(path, cmd), os.X_OK) + for path in os.environ["PATH"].split(os.pathsep) + ) + + @cached +@deprecate("moved to function_get()", log=log) def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_get`. + + Gets the value of an action parameter, or all key/value param pairs. + """ cmd = ['action-get'] if key is not None: cmd.append(key) @@ -957,36 +991,103 @@ def action_get(key=None): return action_data +@cached +def function_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['function-get'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-get'] + + if key is not None: + cmd.append(key) + cmd.append('--format=json') + function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return function_data + + +@deprecate("moved to function_set()", log=log) def action_set(values): - """Sets the values to be returned after the action finishes""" + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_set`. + + Sets the values to be returned after the action finishes. + """ cmd = ['action-set'] for k, v in list(values.items()): cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) +def function_set(values): + """Sets the values to be returned after the function finishes""" + cmd = ['function-set'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-set'] + + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@deprecate("moved to function_fail()", log=log) def action_fail(message): - """Sets the action status to failed and sets the error message. + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_fail`. + + Sets the action status to failed and sets the error message. - The results set by action_set are preserved.""" + The results set by action_set are preserved. + """ subprocess.check_call(['action-fail', message]) +def function_fail(message): + """Sets the function status to failed and sets the error message. + + The results set by function_set are preserved.""" + cmd = ['function-fail'] + # Fallback for older charms. + if not cmd_exists('function-fail'): + cmd = ['action-fail'] + cmd.append(message) + + subprocess.check_call(cmd) + + def action_name(): """Get the name of the currently executing action.""" return os.environ.get('JUJU_ACTION_NAME') +def function_name(): + """Get the name of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_NAME') or action_name() + + def action_uuid(): """Get the UUID of the currently executing action.""" return os.environ.get('JUJU_ACTION_UUID') +def function_id(): + """Get the ID of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_ID') or action_uuid() + + def action_tag(): """Get the tag for the currently executing action.""" return os.environ.get('JUJU_ACTION_TAG') +def function_tag(): + """Get the tag for the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() + + def status_set(workload_state, message): """Set the workload state with a message diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py index 31225235..3ddaf0dd 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -182,6 +182,14 @@ 'train/proposed': 'bionic-proposed/train', 'bionic-train/proposed': 'bionic-proposed/train', 'bionic-proposed/train': 'bionic-proposed/train', + # Ussuri + 'ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri/updates': 'bionic-updates/ussuri', + 'bionic-updates/ussuri': 'bionic-updates/ussuri', + 'ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-proposed/ussuri': 'bionic-proposed/ussuri', } From 63322b046716360ed6fc202696d4c0c5964b7048 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 17 Jan 2020 14:21:33 -0500 Subject: [PATCH 1861/2699] Sync charm-helpers for Ussuri/Focal release and version details Change-Id: I1133270811a8c27bf5e72794f7f6c2349e997385 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 14 + .../contrib/openstack/amulet/deployment.py | 3 + .../contrib/openstack/amulet/utils.py | 1 + .../audits/openstack_security_guide.py | 4 +- .../charmhelpers/contrib/openstack/context.py | 69 +++- .../contrib/openstack/ha/utils.py | 3 +- .../charmhelpers/contrib/openstack/policyd.py | 308 ++++++++++++------ .../openstack/templates/section-placement | 19 ++ .../charmhelpers/contrib/openstack/utils.py | 59 +++- .../contrib/openstack/vaultlocker.py | 55 +++- .../contrib/storage/linux/ceph.py | 35 +- .../hooks/charmhelpers/core/hookenv.py | 109 ++++++- .../hooks/charmhelpers/fetch/ubuntu.py | 8 + ceph-radosgw/lib/ceph/broker.py | 14 + ceph-radosgw/lib/ceph/utils.py | 57 +++- 15 files changed, 600 insertions(+), 158 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-placement diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index a3d89936..d775861b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -484,3 +484,17 @@ def add_haproxy_checks(nrpe, unit_name): shortname='haproxy_queue', description='Check HAProxy queue depth {%s}' % unit_name, check_cmd='check_haproxy_queue_depth.sh') + + +def remove_deprecated_check(nrpe, deprecated_services): + """ + Remove checks fro deprecated services in list + + :param nrpe: NRPE object to remove check from + :type nrpe: NRPE + :param deprecated_services: List of deprecated services that are removed + :type deprecated_services: list + """ + for dep_svc in deprecated_services: + log('Deprecated service: {}'.format(dep_svc)) + nrpe.remove_check(shortname=dep_svc) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 77925cc2..dd3aebe9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -295,9 +295,11 @@ def _get_openstack_release(self): ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, ('bionic', 'cloud:bionic-stein'): self.bionic_stein, ('bionic', 'cloud:bionic-train'): self.bionic_train, + ('bionic', 'cloud:bionic-ussuri'): self.bionic_ussuri, ('cosmic', None): self.cosmic_rocky, ('disco', None): self.disco_stein, ('eoan', None): self.eoan_train, + ('focal', None): self.focal_ussuri, } return releases[(self.series, self.openstack)] @@ -316,6 +318,7 @@ def _get_openstack_release_string(self): ('cosmic', 'rocky'), ('disco', 'stein'), ('eoan', 'train'), + ('focal', 'ussuri'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 7d95a590..14864198 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -62,6 +62,7 @@ 'bionic_rocky', 'cosmic_rocky', 'bionic_stein', 'disco_stein', 'bionic_train', 'eoan_train', + 'bionic_ussuri', 'focal_ussuri', ] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py index b7b8a60f..79740ed0 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -244,8 +244,8 @@ def validate_file_permissions(config): @audit(is_audit_type(AuditType.OpenStackSecurityGuide)) def validate_uses_keystone(audit_options): """Validate that the service uses Keystone for authentication.""" - section = _config_section(audit_options, 'DEFAULT') - assert section is not None, "Missing section 'DEFAULT'" + section = _config_section(audit_options, 'api') or _config_section(audit_options, 'DEFAULT') + assert section is not None, "Missing section 'api / DEFAULT'" assert section.get('auth_strategy') == "keystone", \ "Application is not using Keystone" diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index a3d48c41..e99aba47 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -730,6 +730,10 @@ def __call__(self): if notification_format: ctxt['notification_format'] = notification_format + notification_topics = conf.get('notification-topics', None) + if notification_topics: + ctxt['notification_topics'] = notification_topics + send_notifications_to_logs = conf.get('send-notifications-to-logs', None) if send_notifications_to_logs: ctxt['send_notifications_to_logs'] = send_notifications_to_logs @@ -1940,7 +1944,7 @@ def _determine_ctxt(self): as well as the catalog_info string that would be supplied. Returns a dict containing the volume_api_version and the volume_catalog_info. """ - rel = os_release(self.pkg, base='icehouse') + rel = os_release(self.pkg) version = '2' if CompareOpenStackReleases(rel) >= 'pike': version = '3' @@ -2140,7 +2144,7 @@ def __init__(self, pkg='python-keystone'): self.pkg = pkg def __call__(self): - ostack = os_release(self.pkg, base='icehouse') + ostack = os_release(self.pkg) osystem = lsb_release()['DISTRIB_CODENAME'].lower() return { 'openstack_release': ostack, @@ -2177,9 +2181,66 @@ def __call__(self): class HostInfoContext(OSContextGenerator): """Context to provide host information.""" + def __init__(self, use_fqdn_hint_cb=None): + """Initialize HostInfoContext + + :param use_fqdn_hint_cb: Callback whose return value used to populate + `use_fqdn_hint` + :type use_fqdn_hint_cb: Callable[[], bool] + """ + # Store callback used to get hint for whether FQDN should be used + + # Depending on the workload a charm manages, the use of FQDN vs. + # shortname may be a deploy-time decision, i.e. behaviour can not + # change on charm upgrade or post-deployment configuration change. + + # The hint is passed on as a flag in the context to allow the decision + # to be made in the Jinja2 configuration template. + self.use_fqdn_hint_cb = use_fqdn_hint_cb + + def _get_canonical_name(self, name=None): + """Get the official FQDN of the host + + The implementation of ``socket.getfqdn()`` in the standard Python + library does not exhaust all methods of getting the official name + of a host ref Python issue https://bugs.python.org/issue5004 + + This function mimics the behaviour of a call to ``hostname -f`` to + get the official FQDN but returns an empty string if it is + unsuccessful. + + :param name: Shortname to get FQDN on + :type name: Optional[str] + :returns: The official FQDN for host or empty string ('') + :rtype: str + """ + name = name or socket.gethostname() + fqdn = '' + + if six.PY2: + exc = socket.error + else: + exc = OSError + + try: + addrs = socket.getaddrinfo( + name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) + except exc: + pass + else: + for addr in addrs: + if addr[3]: + if '.' in addr[3]: + fqdn = addr[3] + break + return fqdn + def __call__(self): + name = socket.gethostname() ctxt = { - 'host_fqdn': socket.getfqdn(), - 'host': socket.gethostname(), + 'host_fqdn': self._get_canonical_name(name) or name, + 'host': name, + 'use_fqdn_hint': ( + self.use_fqdn_hint_cb() if self.use_fqdn_hint_cb else False) } return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py index e017bc20..a5cbdf53 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -157,10 +157,11 @@ def generate_ha_relation_data(service, _relation_data = {'resources': {}, 'resource_params': {}} if haproxy_enabled: + _meta = 'meta migration-threshold="INFINITY" failure-timeout="5s"' _haproxy_res = 'res_{}_haproxy'.format(service) _relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'} _relation_data['resource_params'] = { - _haproxy_res: 'op monitor interval="5s"' + _haproxy_res: '{} op monitor interval="5s"'.format(_meta) } _relation_data['init_services'] = {_haproxy_res: 'haproxy'} _relation_data['clones'] = { diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py index 1adf2472..d89d2cca 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py @@ -17,9 +17,11 @@ import os import six import shutil +import sys import yaml import zipfile +import charmhelpers import charmhelpers.core.hookenv as hookenv import charmhelpers.core.host as ch_host @@ -115,8 +117,8 @@ def upgrade_charm(): default: False description: | If True then use the resource file named 'policyd-override' to install - override yaml files in the service's policy.d directory. The resource - file should be a zip file containing at least one yaml file with a .yaml + override YAML files in the service's policy.d directory. The resource + file should be a ZIP file containing at least one yaml file with a .yaml or .yml extension. If False then remove the overrides. """ @@ -134,14 +136,14 @@ def upgrade_charm(): Policy Overrides ---------------- -This service allows for policy overrides using the `policy.d` directory. This -is an **advanced** feature and the policies that the service supports should be -clearly and unambiguously understood before trying to override, or add to, the -default policies that the service uses. +This feature allows for policy overrides using the `policy.d` directory. This +is an **advanced** feature and the policies that the OpenStack service supports +should be clearly and unambiguously understood before trying to override, or +add to, the default policies that the service uses. The charm also has some +policy defaults. They should also be understood before being overridden. -The charm also has some policy defaults. They should also be understood before -being overridden. It is possible to break the system (for tenants and other -services) if policies are incorrectly applied to the service. +> **Caution**: It is possible to break the system (for tenants and other + services) if policies are incorrectly applied to the service. Policy overrides are YAML files that contain rules that will add to, or override, existing policy rules in the service. The `policy.d` directory is @@ -149,30 +151,16 @@ def upgrade_charm(): `/etc/keystone/policy.d` directory, and as such, any manual changes to it will be overwritten on charm upgrades. -Policy overrides are provided to the charm using a resource file called -`policyd-override`. This is attached to the charm using (for example): +Overrides are provided to the charm using a Juju resource called +`policyd-override`. The resource is a ZIP file. This file, say +`overrides.zip`, is attached to the charm by: - juju attach-resource policyd-override= -The `` is the name that this charm is deployed as, with -`` being the resource file containing the policy overrides. + juju attach-resource policyd-override=overrides.zip -The format of the resource file is a ZIP file (.zip extension) containing at -least one YAML file with an extension of `.yaml` or `.yml`. Note that any -directories in the ZIP file are ignored; all of the files are flattened into a -single directory. There must not be any duplicated filenames; this will cause -an error and nothing in the resource file will be applied. +The policy override is enabled in the charm using: -(ed. next part is optional is the charm supports some form of -template/substitution on a read file) - -If a (ed. "one or more of") [`.j2`, `.tmpl`, `.tpl`] file is found in the -resource file then the charm will perform a substitution with charm variables -taken from the config or relations. (ed. edit as appropriate to include the -variable). - -To enable the policy overrides the config option `use-policyd-override` must be -set to `True`. + juju config use-policyd-override=true When `use-policyd-override` is `True` the status line of the charm will be prefixed with `PO:` indicating that policies have been overridden. If the @@ -180,12 +168,8 @@ def upgrade_charm(): status line will be prefixed with `PO (broken):`. The log file for the charm will indicate the reason. No policy override files are installed if the `PO (broken):` is shown. The status line indicates that the overrides are broken, -not that the policy for the service has failed - they will be the defaults for -the charm and service. - -If the policy overrides did not install then *either* attach a new, corrected, -resource file *or* disable the policy overrides by setting -`use-policyd-override` to False. +not that the policy for the service has failed. The policy will be the defaults +for the charm and service. Policy overrides on one service may affect the functionality of another service. Therefore, it may be necessary to provide policy overrides for @@ -251,7 +235,10 @@ def maybe_do_policyd_overrides(openstack_release, blacklist_paths=None, blacklist_keys=None, template_function=None, - restart_handler=None): + restart_handler=None, + user=None, + group=None, + config_changed=False): """If the config option is set, get the resource file and process it to enable the policy.d overrides for the service passed. @@ -280,6 +267,11 @@ def maybe_do_policyd_overrides(openstack_release, directory. However, for any services where this is buggy then a restart_handler can be used to force the policy.d files to be read. + If the config_changed param is True, then the handling is slightly + different: It will only perform the policyd overrides if the config is True + and the success file doesn't exist. Otherwise, it does nothing as the + resource file has already been processed. + :param openstack_release: The openstack release that is installed. :type openstack_release: str :param service: the service name to construct the policy.d directory for. @@ -295,16 +287,43 @@ def maybe_do_policyd_overrides(openstack_release, :param restart_handler: The function to call if the service should be restarted. :type restart_handler: Union[None, Callable[]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] + :param config_changed: Set to True for config_changed hook. + :type config_changed: bool """ + _user = service if user is None else user + _group = service if group is None else group + if not is_policyd_override_valid_on_this_release(openstack_release): + return + hookenv.log("Running maybe_do_policyd_overrides", + level=POLICYD_LOG_LEVEL_DEFAULT) config = hookenv.config() try: if not config.get(POLICYD_CONFIG_NAME, False): + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) + if (os.path.isfile(_policy_success_file()) and + restart_handler is not None and + callable(restart_handler)): + restart_handler() remove_policy_success_file() - clean_policyd_dir_for(service, blacklist_paths) return - except Exception: + except Exception as e: + hookenv.log("... ERROR: Exception is: {}".format(str(e)), + level=POLICYD_CONFIG_NAME) + import traceback + hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT) return - if not is_policyd_override_valid_on_this_release(openstack_release): + # if the policyd overrides have been performed when doing config_changed + # just return + if config_changed and is_policy_success_file_set(): + hookenv.log("... already setup, so skipping.", + level=POLICYD_LOG_LEVEL_DEFAULT) return # from now on it should succeed; if it doesn't then status line will show # broken. @@ -316,49 +335,18 @@ def maybe_do_policyd_overrides(openstack_release, restart_handler() -def maybe_do_policyd_overrides_on_config_changed(openstack_release, - service, - blacklist_paths=None, - blacklist_keys=None, - template_function=None, - restart_handler=None): - """This function is designed to be called from the config changed hook - handler. It will only perform the policyd overrides if the config is True - and the success file doesn't exist. Otherwise, it does nothing as the - resource file has already been processed. +@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead") +def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): + """This function is designed to be called from the config changed hook. - See maybe_do_policyd_overrides() for more details on the params. + DEPRECATED: please use maybe_do_policyd_overrides() with the param + `config_changed` as `True`. - :param openstack_release: The openstack release that is installed. - :type openstack_release: str - :param service: the service name to construct the policy.d directory for. - :type service: str - :param blacklist_paths: optional list of paths to leave alone - :type blacklist_paths: Union[None, List[str]] - :param blacklist_keys: optional list of keys that mustn't appear in the - yaml file's - :type blacklist_keys: Union[None, List[str]] - :param template_function: Optional function that can modify the string - prior to being processed as a Yaml document. - :type template_function: Union[None, Callable[[str], str]] - :param restart_handler: The function to call if the service should be - restarted. - :type restart_handler: Union[None, Callable[]] + See maybe_do_policyd_overrides() for more details on the params. """ - config = hookenv.config() - try: - if not config.get(POLICYD_CONFIG_NAME, False): - remove_policy_success_file() - clean_policyd_dir_for(service, blacklist_paths) - return - except Exception: - return - # if the policyd overrides have been performed just return - if os.path.isfile(_policy_success_file()): - return - maybe_do_policyd_overrides( - openstack_release, service, blacklist_paths, blacklist_keys, - template_function, restart_handler) + if 'config_changed' not in kwargs.keys(): + kwargs['config_changed'] = True + return maybe_do_policyd_overrides(*args, **kwargs) def get_policy_resource_filename(): @@ -375,13 +363,16 @@ def get_policy_resource_filename(): @contextlib.contextmanager -def open_and_filter_yaml_files(filepath): +def open_and_filter_yaml_files(filepath, has_subdirs=False): """Validate that the filepath provided is a zip file and contains at least one (.yaml|.yml) file, and that the files are not duplicated when the zip file is flattened. Note that the yaml files are not checked. This is the first stage in validating the policy zipfile; individual yaml files are not checked for validity or black listed keys. + If the has_subdirs param is True, then the files are flattened to the first + directory, and the files in the root are ignored. + An example of use is: with open_and_filter_yaml_files(some_path) as zfp, g: @@ -390,6 +381,8 @@ def open_and_filter_yaml_files(filepath): :param filepath: a filepath object that can be opened by zipfile :type filepath: Union[AnyStr, os.PathLike[AntStr]] + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool :returns: (zfp handle, a generator of the (name, filename, ZipInfo object) tuples) as a tuple. @@ -402,7 +395,7 @@ def open_and_filter_yaml_files(filepath): with zipfile.ZipFile(filepath, 'r') as zfp: # first pass through; check for duplicates and at least one yaml file. names = collections.defaultdict(int) - yamlfiles = _yamlfiles(zfp) + yamlfiles = _yamlfiles(zfp, has_subdirs) for name, _, _, _ in yamlfiles: names[name] += 1 # There must be at least 1 yaml file. @@ -418,26 +411,49 @@ def open_and_filter_yaml_files(filepath): yield (zfp, yamlfiles) -def _yamlfiles(zipfile): +def _yamlfiles(zipfile, has_subdirs=False): """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) and the infolist item from a zipfile. + If the `has_subdirs` param is True, the the only yaml files that have a + directory component are read, and then first part of the directory + component is kept, along with the filename in the name. e.g. an entry with + a filename of: + + compute/someotherdir/override.yaml + + is returned as: + + compute/override, yaml, override.yaml, + + This is to help with the special, additional, processing that the dashboard + charm requires. + :param zipfile: the zipfile to read zipinfo items from :type zipfile: zipfile.ZipFile - :returns: generator of (name, ext, filename, info item) for each self-identified - yaml file. + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool + :returns: generator of (name, ext, filename, info item) for each + self-identified yaml file. :rtype: List[(str, str, str, zipfile.ZipInfo)] """ - l = [] + files = [] for infolist_item in zipfile.infolist(): - if infolist_item.is_dir(): - continue - _, name_ext = os.path.split(infolist_item.filename) + try: + if infolist_item.is_dir(): + continue + except AttributeError: + # fallback to "old" way to determine dir entry for pre-py36 + if infolist_item.filename.endswith('/'): + continue + _dir, name_ext = os.path.split(infolist_item.filename) name, ext = os.path.splitext(name_ext) + if has_subdirs and _dir != "": + name = os.path.join(_dir.split(os.path.sep)[0], name) ext = ext.lower() if ext and ext in POLICYD_VALID_EXTS: - l.append((name, ext, name_ext, infolist_item)) - return l + files.append((name, ext, name_ext, infolist_item)) + return files def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): @@ -483,9 +499,6 @@ def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): def policyd_dir_for(service): """Return the policy directory for the named service. - This assumes the default name of "policy.d" which is kept across all - charms. - :param service: str :returns: the policy.d override directory. :rtype: os.PathLike[str] @@ -493,7 +506,7 @@ def policyd_dir_for(service): return os.path.join("/", "etc", service, "policy.d") -def clean_policyd_dir_for(service, keep_paths=None): +def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): """Clean out the policyd directory except for items that should be kept. The keep_paths, if used, should be set to the full path of the files that @@ -506,12 +519,19 @@ def clean_policyd_dir_for(service, keep_paths=None): :type service: str :param keep_paths: optional list of paths to not delete. :type keep_paths: Union[None, List[str]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] """ + _user = service if user is None else user + _group = service if group is None else group keep_paths = keep_paths or [] path = policyd_dir_for(service) + hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): - ch_host.mkdir(path, owner=service, group=service, perms=0o775) - _scanner = os.scandir if six.PY3 else _py2_scandir + ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) + _scanner = os.scandir if sys.version_info > (3, 4) else _py2_scandir for direntry in _scanner(path): # see if the path should be kept. if direntry.path in keep_paths: @@ -523,6 +543,22 @@ def clean_policyd_dir_for(service, keep_paths=None): os.remove(direntry.path) +def maybe_create_directory_for(path, user, group): + """For the filename 'path', ensure that the directory for that path exists. + + Note that if the directory already exists then the permissions are NOT + changed. + + :param path: the filename including the path to it. + :type path: str + :param user: the user to create the directory as + :param group: the group to create the directory as + """ + _dir, _ = os.path.split(path) + if not os.path.exists(_dir): + ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) + + @contextlib.contextmanager def _py2_scandir(path): """provide a py2 implementation of os.scandir if this module ever gets used @@ -558,6 +594,11 @@ def path_for_policy_file(service, name): It is constructed using policyd_dir_for(), the name and the ".yaml" extension. + For horizon, for example, it's a bit more complicated. The name param is + actually "override_service_dir/a_name", where target_service needs to be + one the allowed horizon override services. This translation and check is + done in the _yamlfiles() function. + :param service: the service name :type service: str :param name: the name for the policy override @@ -585,6 +626,22 @@ def remove_policy_success_file(): pass +def set_policy_success_file(): + """Set the file that indicates successful policyd override.""" + open(_policy_success_file(), "w").close() + + +def is_policy_success_file_set(): + """Returns True if the policy success file has been set. + + This indicates that policies are overridden and working properly. + + :returns: True if the policy file is set + :rtype: bool + """ + return os.path.isfile(_policy_success_file()) + + def policyd_status_message_prefix(): """Return the prefix str for the status line. @@ -594,7 +651,7 @@ def policyd_status_message_prefix(): :returns: the prefix :rtype: str """ - if os.path.isfile(_policy_success_file()): + if is_policy_success_file_set(): return "PO:" return "PO (broken):" @@ -603,7 +660,11 @@ def process_policy_resource_file(resource_file, service, blacklist_paths=None, blacklist_keys=None, - template_function=None): + template_function=None, + preserve_topdir=False, + preprocess_filename=None, + user=None, + group=None): """Process the resource file (which should contain at least one yaml file) and write those files to the service's policy.d directory. @@ -623,6 +684,16 @@ def process_policy_resource_file(resource_file, its file path reconstructed. This, also, must not match any path in the black list. + The yaml filename can be modified in two ways. If the `preserve_topdir` + param is True, then files will be flattened to the top dir. This allows + for creating sets of files that can be grouped into a single level tree + structure. + + Secondly, if the `preprocess_filename` param is not None and callable() + then the name is passed to that function for preprocessing before being + converted to the end location. This is to allow munging of the filename + prior to being tested for a blacklist path. + If any error occurs, then the policy.d directory is cleared, the error is written to the log, and the status line will eventually show as failed. @@ -638,17 +709,39 @@ def process_policy_resource_file(resource_file, :param template_function: Optional function that can modify the yaml document. :type template_function: Union[None, Callable[[AnyStr], AnyStr]] + :param preserve_topdir: Keep the toplevel subdir + :type preserve_topdir: bool + :param preprocess_filename: Optional function to use to process filenames + extracted from the resource file. + :type preprocess_filename: Union[None, Callable[[AnyStr]. AnyStr]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] :returns: True if the processing was successful, False if not. :rtype: boolean """ + hookenv.log("Running process_policy_resource_file", level=hookenv.DEBUG) blacklist_paths = blacklist_paths or [] completed = False + _preprocess = None + if preprocess_filename is not None and callable(preprocess_filename): + _preprocess = preprocess_filename + _user = service if user is None else user + _group = service if group is None else group try: - with open_and_filter_yaml_files(resource_file) as (zfp, gen): + with open_and_filter_yaml_files( + resource_file, preserve_topdir) as (zfp, gen): # first clear out the policy.d directory and clear success remove_policy_success_file() - clean_policyd_dir_for(service, blacklist_paths) + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) for name, ext, filename, zipinfo in gen: + # See if the name should be preprocessed. + if _preprocess is not None: + name = _preprocess(name) # construct a name for the output file. yaml_filename = path_for_policy_file(service, name) if yaml_filename in blacklist_paths: @@ -666,8 +759,12 @@ def process_policy_resource_file(resource_file, "available".format(filename)) doc = template_function(doc) yaml_doc = read_and_validate_yaml(doc, blacklist_keys) - with open(yaml_filename, "wt") as f: - yaml.dump(yaml_doc, f) + # we may have to create the directory + maybe_create_directory_for(yaml_filename, _user, _group) + ch_host.write_file(yaml_filename, + yaml.dump(yaml_doc).encode('utf-8'), + _user, + _group) # Every thing worked, so we mark up a success. completed = True except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: @@ -691,10 +788,13 @@ def process_policy_resource_file(resource_file, hookenv.log("Processing {} failed: cleaning policy.d directory" .format(resource_file), level=POLICYD_LOG_LEVEL_DEFAULT) - clean_policyd_dir_for(service, blacklist_paths) + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) else: # touch the success filename hookenv.log("policy.d overrides installed.", level=POLICYD_LOG_LEVEL_DEFAULT) - open(_policy_success_file(), "w").close() + set_policy_success_file() return completed diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-placement b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-placement new file mode 100644 index 00000000..97724bdb --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-placement @@ -0,0 +1,19 @@ +[placement] +{% if auth_host -%} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +auth_type = password +{% if api_version == "3" -%} +project_domain_name = {{ admin_domain_name }} +user_domain_name = {{ admin_domain_name }} +{% else -%} +project_domain_name = default +user_domain_name = default +{% endif -%} +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +{% endif -%} +{% if region -%} +os_region_name = {{ region }} +{% endif -%} +randomize_allocation_candidates = true diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index ac96f844..566404a0 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -50,9 +50,14 @@ hook_name, application_version_set, cached, + leader_set, + leader_get, ) -from charmhelpers.core.strutils import BasicStringComparator +from charmhelpers.core.strutils import ( + BasicStringComparator, + bool_from_string, +) from charmhelpers.contrib.storage.linux.lvm import ( deactivate_lvm_volume_group, @@ -126,6 +131,7 @@ 'rocky', 'stein', 'train', + 'ussuri', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -146,6 +152,7 @@ ('cosmic', 'rocky'), ('disco', 'stein'), ('eoan', 'train'), + ('focal', 'ussuri'), ]) @@ -167,6 +174,7 @@ ('2018.2', 'rocky'), ('2019.1', 'stein'), ('2019.2', 'train'), + ('2020.1', 'ussuri'), ]) # The ugly duckling - must list releases oldest to newest @@ -204,7 +212,9 @@ ('stein', ['2.20.0', '2.21.0']), ('train', - ['2.22.0']), + ['2.22.0', '2.23.0']), + ('ussuri', + ['2.24.0']), ]) # >= Liberty version->codename mapping @@ -219,6 +229,7 @@ ('18', 'rocky'), ('19', 'stein'), ('20', 'train'), + ('21', 'ussuri'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -230,6 +241,7 @@ ('13', 'rocky'), ('14', 'stein'), ('15', 'train'), + ('16', 'ussuri'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -241,6 +253,7 @@ ('13', 'rocky'), ('14', 'stein'), ('15', 'train'), + ('16', 'ussuri'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -252,6 +265,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), + ('17', 'ussuri'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -263,6 +277,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), + ('17', 'ussuri'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -274,6 +289,7 @@ ('11', 'rocky'), ('12', 'stein'), ('13', 'train'), + ('14', 'ussuri'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -285,6 +301,7 @@ ('11', 'rocky'), ('12', 'stein'), ('13', 'train'), + ('14', 'ussuri'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -296,6 +313,7 @@ ('17', 'rocky'), ('18', 'stein'), ('19', 'train'), + ('20', 'ussuri'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -307,6 +325,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), + ('17', 'ussuri'), ]), } @@ -531,7 +550,7 @@ def reset_os_release(): _os_rel = None -def os_release(package, base='essex', reset_cache=False): +def os_release(package, base=None, reset_cache=False): ''' Returns OpenStack release codename from a cached global. @@ -542,6 +561,8 @@ def os_release(package, base='essex', reset_cache=False): the installation source, the earliest release supported by the charm should be returned. ''' + if not base: + base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']] global _os_rel if reset_cache: reset_os_release() @@ -670,7 +691,10 @@ def openstack_upgrade_available(package): codename = get_os_codename_install_source(src) avail_vers = get_os_version_codename_swift(codename) else: - avail_vers = get_os_version_install_source(src) + try: + avail_vers = get_os_version_install_source(src) + except Exception: + avail_vers = cur_vers apt.init() return apt.version_compare(avail_vers, cur_vers) >= 1 @@ -1693,7 +1717,7 @@ def enable_memcache(source=None, release=None, package=None): if release: _release = release else: - _release = os_release(package, base='icehouse') + _release = os_release(package) if not _release: _release = get_os_codename_install_source(source) @@ -1863,3 +1887,28 @@ def series_upgrade_complete(resume_unit_helper=None, configs=None): configs.write_all() if resume_unit_helper: resume_unit_helper(configs) + + +def is_db_initialised(): + """Check leader storage to see if database has been initialised. + + :returns: Whether DB has been initialised + :rtype: bool + """ + db_initialised = None + if leader_get('db-initialised') is None: + juju_log( + 'db-initialised key missing, assuming db is not initialised', + 'DEBUG') + db_initialised = False + else: + db_initialised = bool_from_string(leader_get('db-initialised')) + juju_log('Database initialised: {}'.format(db_initialised), 'DEBUG') + return db_initialised + + +def set_db_initialised(): + """Add flag to leader storage to indicate database has been initialised. + """ + juju_log('Setting db-initialised to True', 'DEBUG') + leader_set({'db-initialised': True}) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py index a8e4bf88..c162de27 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -37,9 +37,13 @@ def __init__(self, secret_backend=None): ) def __call__(self): + import hvac + ctxt = {} + # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 db = unitdata.kv() - last_token = db.get('last-token') + # currently known-good secret-id secret_id = db.get('secret-id') + for relation_id in hookenv.relation_ids(self.interfaces[0]): for unit in hookenv.related_units(relation_id): data = hookenv.relation_get(unit=unit, @@ -54,27 +58,48 @@ def __call__(self): # Tokens may change when secret_id's are being # reissued - if so use token to get new secret_id - if token != last_token: + token_success = False + try: secret_id = retrieve_secret_id( url=vault_url, token=token ) + token_success = True + except hvac.exceptions.InvalidRequest: + # Try next + pass + + if token_success: db.set('secret-id', secret_id) - db.set('last-token', token) db.flush() - ctxt = { - 'vault_url': vault_url, - 'role_id': json.loads(role_id), - 'secret_id': secret_id, - 'secret_backend': self.secret_backend, - } - vault_ca = data.get('vault_ca') - if vault_ca: - ctxt['vault_ca'] = json.loads(vault_ca) - self.complete = True - return ctxt - return {} + ctxt['vault_url'] = vault_url + ctxt['role_id'] = json.loads(role_id) + ctxt['secret_id'] = secret_id + ctxt['secret_backend'] = self.secret_backend + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + + self.complete = True + break + else: + if secret_id: + ctxt['vault_url'] = vault_url + ctxt['role_id'] = json.loads(role_id) + ctxt['secret_id'] = secret_id + ctxt['secret_backend'] = self.secret_backend + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + + if self.complete: + break + + if ctxt: + self.complete = True + + return ctxt def write_vaultlocker_conf(context, priority=100): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index e13dfa8b..104977af 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -422,6 +422,8 @@ def enabled_manager_modules(): cmd = ['ceph', 'mgr', 'module', 'ls'] try: modules = check_output(cmd) + if six.PY3: + modules = modules.decode('UTF-8') except CalledProcessError as e: log("Failed to list ceph modules: {}".format(e), WARNING) return [] @@ -1185,6 +1187,15 @@ def __init__(self, api_version=1, request_id=None): self.request_id = str(uuid.uuid1()) self.ops = [] + def add_op(self, op): + """Add an op if it is not already in the list. + + :param op: Operation to add. + :type op: dict + """ + if op not in self.ops: + self.ops.append(op) + def add_op_request_access_to_group(self, name, namespace=None, permission=None, key_name=None, object_prefix_permissions=None): @@ -1198,7 +1209,7 @@ def add_op_request_access_to_group(self, name, namespace=None, 'rwx': ['prefix1', 'prefix2'], 'class-read': ['prefix3']} """ - self.ops.append({ + self.add_op({ 'op': 'add-permissions-to-key', 'group': name, 'namespace': namespace, 'name': key_name or service_name(), @@ -1251,11 +1262,11 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, if pg_num and weight: raise ValueError('pg_num and weight are mutually exclusive') - self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight, 'group': group, - 'group-namespace': namespace, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + self.add_op({'op': 'create-pool', 'name': name, + 'replicas': replica_count, 'pg_num': pg_num, + 'weight': weight, 'group': group, + 'group-namespace': namespace, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) def add_op_create_erasure_pool(self, name, erasure_profile=None, weight=None, group=None, app_name=None, @@ -1283,12 +1294,12 @@ def add_op_create_erasure_pool(self, name, erasure_profile=None, :param max_objects: Maximum objects quota to apply :type max_objects: int """ - self.ops.append({'op': 'create-pool', 'name': name, - 'pool-type': 'erasure', - 'erasure-profile': erasure_profile, - 'weight': weight, - 'group': group, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + self.add_op({'op': 'create-pool', 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'weight': weight, + 'group': group, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 4744eb43..647f6e4b 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -34,6 +34,8 @@ import tempfile from subprocess import CalledProcessError +from charmhelpers import deprecate + import six if not six.PY3: from UserDict import UserDict @@ -119,6 +121,24 @@ def log(message, level=None): raise +def function_log(message): + """Write a function progress message""" + command = ['function-log'] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message[:SH_MAX_ARG]] + # Missing function-log should not cause failures in unit tests + # Send function_log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + message = "function-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + class Serializable(UserDict): """Wrapper, an object that can be serialized to yaml or json""" @@ -946,9 +966,23 @@ def charm_dir(): return os.environ.get('CHARM_DIR') +def cmd_exists(cmd): + """Return True if the specified cmd exists in the path""" + return any( + os.access(os.path.join(path, cmd), os.X_OK) + for path in os.environ["PATH"].split(os.pathsep) + ) + + @cached +@deprecate("moved to function_get()", log=log) def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_get`. + + Gets the value of an action parameter, or all key/value param pairs. + """ cmd = ['action-get'] if key is not None: cmd.append(key) @@ -957,36 +991,103 @@ def action_get(key=None): return action_data +@cached +def function_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['function-get'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-get'] + + if key is not None: + cmd.append(key) + cmd.append('--format=json') + function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return function_data + + +@deprecate("moved to function_set()", log=log) def action_set(values): - """Sets the values to be returned after the action finishes""" + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_set`. + + Sets the values to be returned after the action finishes. + """ cmd = ['action-set'] for k, v in list(values.items()): cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) +def function_set(values): + """Sets the values to be returned after the function finishes""" + cmd = ['function-set'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-set'] + + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@deprecate("moved to function_fail()", log=log) def action_fail(message): - """Sets the action status to failed and sets the error message. + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_fail`. + + Sets the action status to failed and sets the error message. - The results set by action_set are preserved.""" + The results set by action_set are preserved. + """ subprocess.check_call(['action-fail', message]) +def function_fail(message): + """Sets the function status to failed and sets the error message. + + The results set by function_set are preserved.""" + cmd = ['function-fail'] + # Fallback for older charms. + if not cmd_exists('function-fail'): + cmd = ['action-fail'] + cmd.append(message) + + subprocess.check_call(cmd) + + def action_name(): """Get the name of the currently executing action.""" return os.environ.get('JUJU_ACTION_NAME') +def function_name(): + """Get the name of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_NAME') or action_name() + + def action_uuid(): """Get the UUID of the currently executing action.""" return os.environ.get('JUJU_ACTION_UUID') +def function_id(): + """Get the ID of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_ID') or action_uuid() + + def action_tag(): """Get the tag for the currently executing action.""" return os.environ.get('JUJU_ACTION_TAG') +def function_tag(): + """Get the tag for the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() + + def status_set(workload_state, message): """Set the workload state with a message diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 31225235..3ddaf0dd 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -182,6 +182,14 @@ 'train/proposed': 'bionic-proposed/train', 'bionic-train/proposed': 'bionic-proposed/train', 'bionic-proposed/train': 'bionic-proposed/train', + # Ussuri + 'ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri/updates': 'bionic-updates/ussuri', + 'bionic-updates/ussuri': 'bionic-updates/ussuri', + 'ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-proposed/ussuri': 'bionic-proposed/ussuri', } diff --git a/ceph-radosgw/lib/ceph/broker.py b/ceph-radosgw/lib/ceph/broker.py index 3226f4cc..bae74a12 100644 --- a/ceph-radosgw/lib/ceph/broker.py +++ b/ceph-radosgw/lib/ceph/broker.py @@ -212,6 +212,18 @@ def handle_add_permissions_to_key(request, service): return resp +def handle_set_key_permissions(request, service): + """Ensure the key has the requested permissions.""" + permissions = request.get('permissions') + client = request.get('client') + call = ['ceph', '--id', service, 'auth', 'caps', + 'client.{}'.format(client)] + permissions + try: + check_call(call) + except CalledProcessError as e: + log("Error updating key capabilities: {}".format(e), level=ERROR) + + def update_service_permissions(service, service_obj=None, namespace=None): """Update the key permissions for the named client in Ceph""" if not service_obj: @@ -866,6 +878,8 @@ def process_requests_v1(reqs): ret = handle_put_osd_in_bucket(request=req, service=svc) elif op == "add-permissions-to-key": ret = handle_add_permissions_to_key(request=req, service=svc) + elif op == 'set-key-permissions': + ret = handle_set_key_permissions(request=req, service=svc) else: msg = "Unknown operation '{}'".format(op) log(msg, level=ERROR) diff --git a/ceph-radosgw/lib/ceph/utils.py b/ceph-radosgw/lib/ceph/utils.py index ee555e25..1ec62d54 100644 --- a/ceph-radosgw/lib/ceph/utils.py +++ b/ceph-radosgw/lib/ceph/utils.py @@ -29,7 +29,6 @@ from charmhelpers.core import hookenv from charmhelpers.core import templating -from charmhelpers.core.decorators import retry_on_exception from charmhelpers.core.host import ( chownr, cmp_pkgrevno, @@ -81,7 +80,12 @@ PEON = 'peon' QUORUM = [LEADER, PEON] -PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', +if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': + btrfs_package = 'btrfs-progs' +else: + btrfs_package = 'btrfs-tools' + +PACKAGES = ['ceph', 'gdisk', btrfs_package, 'radosgw', 'xfsprogs', 'lvm2', 'parted', 'smartmontools'] @@ -177,7 +181,7 @@ def unmounted_disks(): for device in context.list_devices(DEVTYPE='disk'): if device['SUBSYSTEM'] == 'block': matched = False - for block_type in [u'dm', u'loop', u'ram', u'nbd']: + for block_type in [u'dm-', u'loop', u'ram', u'nbd']: if block_type in device.device_node: matched = True if matched: @@ -793,12 +797,33 @@ def is_leader(): return False +def manager_available(): + # if manager daemon isn't on this release, just say it is Fine + if cmp_pkgrevno('ceph', '11.0.0') < 0: + return True + cmd = ["sudo", "-u", "ceph", "ceph", "mgr", "dump", "-f", "json"] + try: + result = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return result['available'] + except subprocess.CalledProcessError as e: + log("'{}' failed: {}".format(" ".join(cmd), str(e))) + return False + except Exception: + return False + + def wait_for_quorum(): while not is_quorum(): log("Waiting for quorum to be reached") time.sleep(3) +def wait_for_manager(): + while not manager_available(): + log("Waiting for manager to be available") + time.sleep(5) + + def add_bootstrap_hint(peer): asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) cmd = [ @@ -888,7 +913,12 @@ def is_pristine_disk(dev): """ want_bytes = 2048 - f = open(dev, 'rb') + try: + f = open(dev, 'rb') + except OSError as e: + log(e) + return False + data = f.read(want_bytes) read_bytes = len(data) if read_bytes != want_bytes: @@ -1274,7 +1304,6 @@ def bootstrap_monitor_cluster(secret): path, done, init_marker) - _create_keyrings() except: raise finally: @@ -1322,10 +1351,11 @@ def _create_monitor(keyring, secret, hostname, path, done, init_marker): service_restart('ceph-mon-all') -@retry_on_exception(3, base_delay=5) -def _create_keyrings(): +def create_keyrings(): """Create keyrings for operation of ceph-mon units + NOTE: The quorum should be done before to execute this function. + :raises: Exception if keyrings cannot be created """ if cmp_pkgrevno('ceph', '14.0.0') >= 0: @@ -2285,14 +2315,19 @@ def wait_on_previous_node(upgrade_key, service, previous_node, version): def get_upgrade_position(osd_sorted_list, match_name): """Return the upgrade position for the given osd. - :param osd_sorted_list: list. Osds sorted - :param match_name: str. The osd name to match - :returns: int. The position or None if not found + :param osd_sorted_list: Osds sorted + :type osd_sorted_list: [str] + :param match_name: The osd name to match + :type match_name: str + :returns: The position of the name + :rtype: int + :raises: ValueError if name is not found """ for index, item in enumerate(osd_sorted_list): if item.name == match_name: return index - return None + raise ValueError("osd name '{}' not found in get_upgrade_position list" + .format(match_name)) # Edge cases: From 1e9b5fa9819b2594d287eb44ee654f8c08c4cdb5 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 17 Jan 2020 14:21:17 -0500 Subject: [PATCH 1862/2699] Sync charm-helpers for Ussuri/Focal release and version details Also add placement service/relations to bionic-train-with-fsid.yaml test bundle. Change-Id: If3741ef2d579219fe938c30b8c7c890f4ce7eae9 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 14 + .../contrib/openstack/amulet/deployment.py | 3 + .../contrib/openstack/amulet/utils.py | 1 + .../audits/openstack_security_guide.py | 4 +- .../charmhelpers/contrib/openstack/context.py | 65 ++++- .../contrib/openstack/ha/utils.py | 3 +- .../charmhelpers/contrib/openstack/policyd.py | 250 ++++++++++++------ .../charmhelpers/contrib/openstack/utils.py | 48 +++- .../contrib/openstack/vaultlocker.py | 55 ++-- .../charmhelpers/contrib/python/__init__.py | 0 ceph-mon/hooks/charmhelpers/core/hookenv.py | 103 +++++++- ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 8 + ceph-mon/lib/ceph/utils.py | 22 +- .../tests/bundles/bionic-train-with-fsid.yaml | 11 + 14 files changed, 466 insertions(+), 121 deletions(-) delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/python/__init__.py diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index a3d89936..d775861b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -484,3 +484,17 @@ def add_haproxy_checks(nrpe, unit_name): shortname='haproxy_queue', description='Check HAProxy queue depth {%s}' % unit_name, check_cmd='check_haproxy_queue_depth.sh') + + +def remove_deprecated_check(nrpe, deprecated_services): + """ + Remove checks fro deprecated services in list + + :param nrpe: NRPE object to remove check from + :type nrpe: NRPE + :param deprecated_services: List of deprecated services that are removed + :type deprecated_services: list + """ + for dep_svc in deprecated_services: + log('Deprecated service: {}'.format(dep_svc)) + nrpe.remove_check(shortname=dep_svc) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 77925cc2..dd3aebe9 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -295,9 +295,11 @@ def _get_openstack_release(self): ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, ('bionic', 'cloud:bionic-stein'): self.bionic_stein, ('bionic', 'cloud:bionic-train'): self.bionic_train, + ('bionic', 'cloud:bionic-ussuri'): self.bionic_ussuri, ('cosmic', None): self.cosmic_rocky, ('disco', None): self.disco_stein, ('eoan', None): self.eoan_train, + ('focal', None): self.focal_ussuri, } return releases[(self.series, self.openstack)] @@ -316,6 +318,7 @@ def _get_openstack_release_string(self): ('cosmic', 'rocky'), ('disco', 'stein'), ('eoan', 'train'), + ('focal', 'ussuri'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 7d95a590..14864198 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -62,6 +62,7 @@ 'bionic_rocky', 'cosmic_rocky', 'bionic_stein', 'disco_stein', 'bionic_train', 'eoan_train', + 'bionic_ussuri', 'focal_ussuri', ] diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py index b7b8a60f..79740ed0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -244,8 +244,8 @@ def validate_file_permissions(config): @audit(is_audit_type(AuditType.OpenStackSecurityGuide)) def validate_uses_keystone(audit_options): """Validate that the service uses Keystone for authentication.""" - section = _config_section(audit_options, 'DEFAULT') - assert section is not None, "Missing section 'DEFAULT'" + section = _config_section(audit_options, 'api') or _config_section(audit_options, 'DEFAULT') + assert section is not None, "Missing section 'api / DEFAULT'" assert section.get('auth_strategy') == "keystone", \ "Application is not using Keystone" diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 9b80b6d6..e99aba47 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -730,6 +730,10 @@ def __call__(self): if notification_format: ctxt['notification_format'] = notification_format + notification_topics = conf.get('notification-topics', None) + if notification_topics: + ctxt['notification_topics'] = notification_topics + send_notifications_to_logs = conf.get('send-notifications-to-logs', None) if send_notifications_to_logs: ctxt['send_notifications_to_logs'] = send_notifications_to_logs @@ -2177,9 +2181,66 @@ def __call__(self): class HostInfoContext(OSContextGenerator): """Context to provide host information.""" + def __init__(self, use_fqdn_hint_cb=None): + """Initialize HostInfoContext + + :param use_fqdn_hint_cb: Callback whose return value used to populate + `use_fqdn_hint` + :type use_fqdn_hint_cb: Callable[[], bool] + """ + # Store callback used to get hint for whether FQDN should be used + + # Depending on the workload a charm manages, the use of FQDN vs. + # shortname may be a deploy-time decision, i.e. behaviour can not + # change on charm upgrade or post-deployment configuration change. + + # The hint is passed on as a flag in the context to allow the decision + # to be made in the Jinja2 configuration template. + self.use_fqdn_hint_cb = use_fqdn_hint_cb + + def _get_canonical_name(self, name=None): + """Get the official FQDN of the host + + The implementation of ``socket.getfqdn()`` in the standard Python + library does not exhaust all methods of getting the official name + of a host ref Python issue https://bugs.python.org/issue5004 + + This function mimics the behaviour of a call to ``hostname -f`` to + get the official FQDN but returns an empty string if it is + unsuccessful. + + :param name: Shortname to get FQDN on + :type name: Optional[str] + :returns: The official FQDN for host or empty string ('') + :rtype: str + """ + name = name or socket.gethostname() + fqdn = '' + + if six.PY2: + exc = socket.error + else: + exc = OSError + + try: + addrs = socket.getaddrinfo( + name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) + except exc: + pass + else: + for addr in addrs: + if addr[3]: + if '.' in addr[3]: + fqdn = addr[3] + break + return fqdn + def __call__(self): + name = socket.gethostname() ctxt = { - 'host_fqdn': socket.getfqdn(), - 'host': socket.gethostname(), + 'host_fqdn': self._get_canonical_name(name) or name, + 'host': name, + 'use_fqdn_hint': ( + self.use_fqdn_hint_cb() if self.use_fqdn_hint_cb else False) } return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py index e017bc20..a5cbdf53 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -157,10 +157,11 @@ def generate_ha_relation_data(service, _relation_data = {'resources': {}, 'resource_params': {}} if haproxy_enabled: + _meta = 'meta migration-threshold="INFINITY" failure-timeout="5s"' _haproxy_res = 'res_{}_haproxy'.format(service) _relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'} _relation_data['resource_params'] = { - _haproxy_res: 'op monitor interval="5s"' + _haproxy_res: '{} op monitor interval="5s"'.format(_meta) } _relation_data['init_services'] = {_haproxy_res: 'haproxy'} _relation_data['clones'] = { diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py index 83ca4ab7..d89d2cca 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py @@ -21,6 +21,7 @@ import yaml import zipfile +import charmhelpers import charmhelpers.core.hookenv as hookenv import charmhelpers.core.host as ch_host @@ -234,7 +235,10 @@ def maybe_do_policyd_overrides(openstack_release, blacklist_paths=None, blacklist_keys=None, template_function=None, - restart_handler=None): + restart_handler=None, + user=None, + group=None, + config_changed=False): """If the config option is set, get the resource file and process it to enable the policy.d overrides for the service passed. @@ -263,6 +267,11 @@ def maybe_do_policyd_overrides(openstack_release, directory. However, for any services where this is buggy then a restart_handler can be used to force the policy.d files to be read. + If the config_changed param is True, then the handling is slightly + different: It will only perform the policyd overrides if the config is True + and the success file doesn't exist. Otherwise, it does nothing as the + resource file has already been processed. + :param openstack_release: The openstack release that is installed. :type openstack_release: str :param service: the service name to construct the policy.d directory for. @@ -278,18 +287,26 @@ def maybe_do_policyd_overrides(openstack_release, :param restart_handler: The function to call if the service should be restarted. :type restart_handler: Union[None, Callable[]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] + :param config_changed: Set to True for config_changed hook. + :type config_changed: bool """ - hookenv.log("Running maybe_do_policyd_overrides", - level=POLICYD_LOG_LEVEL_DEFAULT) + _user = service if user is None else user + _group = service if group is None else group if not is_policyd_override_valid_on_this_release(openstack_release): - hookenv.log("... policy overrides not valid on this release: {}" - .format(openstack_release), - level=POLICYD_LOG_LEVEL_DEFAULT) return + hookenv.log("Running maybe_do_policyd_overrides", + level=POLICYD_LOG_LEVEL_DEFAULT) config = hookenv.config() try: if not config.get(POLICYD_CONFIG_NAME, False): - clean_policyd_dir_for(service, blacklist_paths) + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) if (os.path.isfile(_policy_success_file()) and restart_handler is not None and callable(restart_handler)): @@ -302,6 +319,12 @@ def maybe_do_policyd_overrides(openstack_release, import traceback hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT) return + # if the policyd overrides have been performed when doing config_changed + # just return + if config_changed and is_policy_success_file_set(): + hookenv.log("... already setup, so skipping.", + level=POLICYD_LOG_LEVEL_DEFAULT) + return # from now on it should succeed; if it doesn't then status line will show # broken. resource_filename = get_policy_resource_filename() @@ -312,63 +335,18 @@ def maybe_do_policyd_overrides(openstack_release, restart_handler() -def maybe_do_policyd_overrides_on_config_changed(openstack_release, - service, - blacklist_paths=None, - blacklist_keys=None, - template_function=None, - restart_handler=None): - """This function is designed to be called from the config changed hook - handler. It will only perform the policyd overrides if the config is True - and the success file doesn't exist. Otherwise, it does nothing as the - resource file has already been processed. +@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead") +def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): + """This function is designed to be called from the config changed hook. - See maybe_do_policyd_overrides() for more details on the params. + DEPRECATED: please use maybe_do_policyd_overrides() with the param + `config_changed` as `True`. - :param openstack_release: The openstack release that is installed. - :type openstack_release: str - :param service: the service name to construct the policy.d directory for. - :type service: str - :param blacklist_paths: optional list of paths to leave alone - :type blacklist_paths: Union[None, List[str]] - :param blacklist_keys: optional list of keys that mustn't appear in the - yaml file's - :type blacklist_keys: Union[None, List[str]] - :param template_function: Optional function that can modify the string - prior to being processed as a Yaml document. - :type template_function: Union[None, Callable[[str], str]] - :param restart_handler: The function to call if the service should be - restarted. - :type restart_handler: Union[None, Callable[]] + See maybe_do_policyd_overrides() for more details on the params. """ - if not is_policyd_override_valid_on_this_release(openstack_release): - return - hookenv.log("Running maybe_do_policyd_overrides_on_config_changed", - level=POLICYD_LOG_LEVEL_DEFAULT) - config = hookenv.config() - try: - if not config.get(POLICYD_CONFIG_NAME, False): - clean_policyd_dir_for(service, blacklist_paths) - if (os.path.isfile(_policy_success_file()) and - restart_handler is not None and - callable(restart_handler)): - restart_handler() - remove_policy_success_file() - return - except Exception as e: - hookenv.log("... ERROR: Exception is: {}".format(str(e)), - level=POLICYD_CONFIG_NAME) - import traceback - hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT) - return - # if the policyd overrides have been performed just return - if os.path.isfile(_policy_success_file()): - hookenv.log("... already setup, so skipping.", - level=POLICYD_LOG_LEVEL_DEFAULT) - return - maybe_do_policyd_overrides( - openstack_release, service, blacklist_paths, blacklist_keys, - template_function, restart_handler) + if 'config_changed' not in kwargs.keys(): + kwargs['config_changed'] = True + return maybe_do_policyd_overrides(*args, **kwargs) def get_policy_resource_filename(): @@ -385,13 +363,16 @@ def get_policy_resource_filename(): @contextlib.contextmanager -def open_and_filter_yaml_files(filepath): +def open_and_filter_yaml_files(filepath, has_subdirs=False): """Validate that the filepath provided is a zip file and contains at least one (.yaml|.yml) file, and that the files are not duplicated when the zip file is flattened. Note that the yaml files are not checked. This is the first stage in validating the policy zipfile; individual yaml files are not checked for validity or black listed keys. + If the has_subdirs param is True, then the files are flattened to the first + directory, and the files in the root are ignored. + An example of use is: with open_and_filter_yaml_files(some_path) as zfp, g: @@ -400,6 +381,8 @@ def open_and_filter_yaml_files(filepath): :param filepath: a filepath object that can be opened by zipfile :type filepath: Union[AnyStr, os.PathLike[AntStr]] + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool :returns: (zfp handle, a generator of the (name, filename, ZipInfo object) tuples) as a tuple. @@ -412,7 +395,7 @@ def open_and_filter_yaml_files(filepath): with zipfile.ZipFile(filepath, 'r') as zfp: # first pass through; check for duplicates and at least one yaml file. names = collections.defaultdict(int) - yamlfiles = _yamlfiles(zfp) + yamlfiles = _yamlfiles(zfp, has_subdirs) for name, _, _, _ in yamlfiles: names[name] += 1 # There must be at least 1 yaml file. @@ -428,17 +411,33 @@ def open_and_filter_yaml_files(filepath): yield (zfp, yamlfiles) -def _yamlfiles(zipfile): +def _yamlfiles(zipfile, has_subdirs=False): """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) and the infolist item from a zipfile. + If the `has_subdirs` param is True, the the only yaml files that have a + directory component are read, and then first part of the directory + component is kept, along with the filename in the name. e.g. an entry with + a filename of: + + compute/someotherdir/override.yaml + + is returned as: + + compute/override, yaml, override.yaml, + + This is to help with the special, additional, processing that the dashboard + charm requires. + :param zipfile: the zipfile to read zipinfo items from :type zipfile: zipfile.ZipFile - :returns: generator of (name, ext, filename, info item) for each self-identified - yaml file. + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool + :returns: generator of (name, ext, filename, info item) for each + self-identified yaml file. :rtype: List[(str, str, str, zipfile.ZipInfo)] """ - l = [] + files = [] for infolist_item in zipfile.infolist(): try: if infolist_item.is_dir(): @@ -447,12 +446,14 @@ def _yamlfiles(zipfile): # fallback to "old" way to determine dir entry for pre-py36 if infolist_item.filename.endswith('/'): continue - _, name_ext = os.path.split(infolist_item.filename) + _dir, name_ext = os.path.split(infolist_item.filename) name, ext = os.path.splitext(name_ext) + if has_subdirs and _dir != "": + name = os.path.join(_dir.split(os.path.sep)[0], name) ext = ext.lower() if ext and ext in POLICYD_VALID_EXTS: - l.append((name, ext, name_ext, infolist_item)) - return l + files.append((name, ext, name_ext, infolist_item)) + return files def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): @@ -498,9 +499,6 @@ def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): def policyd_dir_for(service): """Return the policy directory for the named service. - This assumes the default name of "policy.d" which is kept across all - charms. - :param service: str :returns: the policy.d override directory. :rtype: os.PathLike[str] @@ -508,7 +506,7 @@ def policyd_dir_for(service): return os.path.join("/", "etc", service, "policy.d") -def clean_policyd_dir_for(service, keep_paths=None): +def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): """Clean out the policyd directory except for items that should be kept. The keep_paths, if used, should be set to the full path of the files that @@ -521,11 +519,18 @@ def clean_policyd_dir_for(service, keep_paths=None): :type service: str :param keep_paths: optional list of paths to not delete. :type keep_paths: Union[None, List[str]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] """ + _user = service if user is None else user + _group = service if group is None else group keep_paths = keep_paths or [] path = policyd_dir_for(service) + hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): - ch_host.mkdir(path, owner=service, group=service, perms=0o775) + ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) _scanner = os.scandir if sys.version_info > (3, 4) else _py2_scandir for direntry in _scanner(path): # see if the path should be kept. @@ -538,6 +543,22 @@ def clean_policyd_dir_for(service, keep_paths=None): os.remove(direntry.path) +def maybe_create_directory_for(path, user, group): + """For the filename 'path', ensure that the directory for that path exists. + + Note that if the directory already exists then the permissions are NOT + changed. + + :param path: the filename including the path to it. + :type path: str + :param user: the user to create the directory as + :param group: the group to create the directory as + """ + _dir, _ = os.path.split(path) + if not os.path.exists(_dir): + ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) + + @contextlib.contextmanager def _py2_scandir(path): """provide a py2 implementation of os.scandir if this module ever gets used @@ -573,6 +594,11 @@ def path_for_policy_file(service, name): It is constructed using policyd_dir_for(), the name and the ".yaml" extension. + For horizon, for example, it's a bit more complicated. The name param is + actually "override_service_dir/a_name", where target_service needs to be + one the allowed horizon override services. This translation and check is + done in the _yamlfiles() function. + :param service: the service name :type service: str :param name: the name for the policy override @@ -600,6 +626,22 @@ def remove_policy_success_file(): pass +def set_policy_success_file(): + """Set the file that indicates successful policyd override.""" + open(_policy_success_file(), "w").close() + + +def is_policy_success_file_set(): + """Returns True if the policy success file has been set. + + This indicates that policies are overridden and working properly. + + :returns: True if the policy file is set + :rtype: bool + """ + return os.path.isfile(_policy_success_file()) + + def policyd_status_message_prefix(): """Return the prefix str for the status line. @@ -609,7 +651,7 @@ def policyd_status_message_prefix(): :returns: the prefix :rtype: str """ - if os.path.isfile(_policy_success_file()): + if is_policy_success_file_set(): return "PO:" return "PO (broken):" @@ -618,7 +660,11 @@ def process_policy_resource_file(resource_file, service, blacklist_paths=None, blacklist_keys=None, - template_function=None): + template_function=None, + preserve_topdir=False, + preprocess_filename=None, + user=None, + group=None): """Process the resource file (which should contain at least one yaml file) and write those files to the service's policy.d directory. @@ -638,6 +684,16 @@ def process_policy_resource_file(resource_file, its file path reconstructed. This, also, must not match any path in the black list. + The yaml filename can be modified in two ways. If the `preserve_topdir` + param is True, then files will be flattened to the top dir. This allows + for creating sets of files that can be grouped into a single level tree + structure. + + Secondly, if the `preprocess_filename` param is not None and callable() + then the name is passed to that function for preprocessing before being + converted to the end location. This is to allow munging of the filename + prior to being tested for a blacklist path. + If any error occurs, then the policy.d directory is cleared, the error is written to the log, and the status line will eventually show as failed. @@ -653,18 +709,39 @@ def process_policy_resource_file(resource_file, :param template_function: Optional function that can modify the yaml document. :type template_function: Union[None, Callable[[AnyStr], AnyStr]] + :param preserve_topdir: Keep the toplevel subdir + :type preserve_topdir: bool + :param preprocess_filename: Optional function to use to process filenames + extracted from the resource file. + :type preprocess_filename: Union[None, Callable[[AnyStr]. AnyStr]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] :returns: True if the processing was successful, False if not. :rtype: boolean """ hookenv.log("Running process_policy_resource_file", level=hookenv.DEBUG) blacklist_paths = blacklist_paths or [] completed = False + _preprocess = None + if preprocess_filename is not None and callable(preprocess_filename): + _preprocess = preprocess_filename + _user = service if user is None else user + _group = service if group is None else group try: - with open_and_filter_yaml_files(resource_file) as (zfp, gen): + with open_and_filter_yaml_files( + resource_file, preserve_topdir) as (zfp, gen): # first clear out the policy.d directory and clear success remove_policy_success_file() - clean_policyd_dir_for(service, blacklist_paths) + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) for name, ext, filename, zipinfo in gen: + # See if the name should be preprocessed. + if _preprocess is not None: + name = _preprocess(name) # construct a name for the output file. yaml_filename = path_for_policy_file(service, name) if yaml_filename in blacklist_paths: @@ -682,8 +759,12 @@ def process_policy_resource_file(resource_file, "available".format(filename)) doc = template_function(doc) yaml_doc = read_and_validate_yaml(doc, blacklist_keys) - with open(yaml_filename, "wt") as f: - yaml.dump(yaml_doc, f) + # we may have to create the directory + maybe_create_directory_for(yaml_filename, _user, _group) + ch_host.write_file(yaml_filename, + yaml.dump(yaml_doc).encode('utf-8'), + _user, + _group) # Every thing worked, so we mark up a success. completed = True except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: @@ -707,10 +788,13 @@ def process_policy_resource_file(resource_file, hookenv.log("Processing {} failed: cleaning policy.d directory" .format(resource_file), level=POLICYD_LOG_LEVEL_DEFAULT) - clean_policyd_dir_for(service, blacklist_paths) + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) else: # touch the success filename hookenv.log("policy.d overrides installed.", level=POLICYD_LOG_LEVEL_DEFAULT) - open(_policy_success_file(), "w").close() + set_policy_success_file() return completed diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 02190264..566404a0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -50,9 +50,14 @@ hook_name, application_version_set, cached, + leader_set, + leader_get, ) -from charmhelpers.core.strutils import BasicStringComparator +from charmhelpers.core.strutils import ( + BasicStringComparator, + bool_from_string, +) from charmhelpers.contrib.storage.linux.lvm import ( deactivate_lvm_volume_group, @@ -126,6 +131,7 @@ 'rocky', 'stein', 'train', + 'ussuri', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -146,6 +152,7 @@ ('cosmic', 'rocky'), ('disco', 'stein'), ('eoan', 'train'), + ('focal', 'ussuri'), ]) @@ -167,6 +174,7 @@ ('2018.2', 'rocky'), ('2019.1', 'stein'), ('2019.2', 'train'), + ('2020.1', 'ussuri'), ]) # The ugly duckling - must list releases oldest to newest @@ -205,6 +213,8 @@ ['2.20.0', '2.21.0']), ('train', ['2.22.0', '2.23.0']), + ('ussuri', + ['2.24.0']), ]) # >= Liberty version->codename mapping @@ -219,6 +229,7 @@ ('18', 'rocky'), ('19', 'stein'), ('20', 'train'), + ('21', 'ussuri'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -230,6 +241,7 @@ ('13', 'rocky'), ('14', 'stein'), ('15', 'train'), + ('16', 'ussuri'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -241,6 +253,7 @@ ('13', 'rocky'), ('14', 'stein'), ('15', 'train'), + ('16', 'ussuri'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -252,6 +265,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), + ('17', 'ussuri'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -263,6 +277,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), + ('17', 'ussuri'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -274,6 +289,7 @@ ('11', 'rocky'), ('12', 'stein'), ('13', 'train'), + ('14', 'ussuri'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -285,6 +301,7 @@ ('11', 'rocky'), ('12', 'stein'), ('13', 'train'), + ('14', 'ussuri'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -296,6 +313,7 @@ ('17', 'rocky'), ('18', 'stein'), ('19', 'train'), + ('20', 'ussuri'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -307,6 +325,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), + ('17', 'ussuri'), ]), } @@ -674,7 +693,7 @@ def openstack_upgrade_available(package): else: try: avail_vers = get_os_version_install_source(src) - except: + except Exception: avail_vers = cur_vers apt.init() return apt.version_compare(avail_vers, cur_vers) >= 1 @@ -1868,3 +1887,28 @@ def series_upgrade_complete(resume_unit_helper=None, configs=None): configs.write_all() if resume_unit_helper: resume_unit_helper(configs) + + +def is_db_initialised(): + """Check leader storage to see if database has been initialised. + + :returns: Whether DB has been initialised + :rtype: bool + """ + db_initialised = None + if leader_get('db-initialised') is None: + juju_log( + 'db-initialised key missing, assuming db is not initialised', + 'DEBUG') + db_initialised = False + else: + db_initialised = bool_from_string(leader_get('db-initialised')) + juju_log('Database initialised: {}'.format(db_initialised), 'DEBUG') + return db_initialised + + +def set_db_initialised(): + """Add flag to leader storage to indicate database has been initialised. + """ + juju_log('Setting db-initialised to True', 'DEBUG') + leader_set({'db-initialised': True}) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py index a8e4bf88..c162de27 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -37,9 +37,13 @@ def __init__(self, secret_backend=None): ) def __call__(self): + import hvac + ctxt = {} + # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 db = unitdata.kv() - last_token = db.get('last-token') + # currently known-good secret-id secret_id = db.get('secret-id') + for relation_id in hookenv.relation_ids(self.interfaces[0]): for unit in hookenv.related_units(relation_id): data = hookenv.relation_get(unit=unit, @@ -54,27 +58,48 @@ def __call__(self): # Tokens may change when secret_id's are being # reissued - if so use token to get new secret_id - if token != last_token: + token_success = False + try: secret_id = retrieve_secret_id( url=vault_url, token=token ) + token_success = True + except hvac.exceptions.InvalidRequest: + # Try next + pass + + if token_success: db.set('secret-id', secret_id) - db.set('last-token', token) db.flush() - ctxt = { - 'vault_url': vault_url, - 'role_id': json.loads(role_id), - 'secret_id': secret_id, - 'secret_backend': self.secret_backend, - } - vault_ca = data.get('vault_ca') - if vault_ca: - ctxt['vault_ca'] = json.loads(vault_ca) - self.complete = True - return ctxt - return {} + ctxt['vault_url'] = vault_url + ctxt['role_id'] = json.loads(role_id) + ctxt['secret_id'] = secret_id + ctxt['secret_backend'] = self.secret_backend + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + + self.complete = True + break + else: + if secret_id: + ctxt['vault_url'] = vault_url + ctxt['role_id'] = json.loads(role_id) + ctxt['secret_id'] = secret_id + ctxt['secret_backend'] = self.secret_backend + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + + if self.complete: + break + + if ctxt: + self.complete = True + + return ctxt def write_vaultlocker_conf(context, priority=100): diff --git a/ceph-mon/hooks/charmhelpers/contrib/python/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/python/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 39b1cd09..647f6e4b 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -34,6 +34,8 @@ import tempfile from subprocess import CalledProcessError +from charmhelpers import deprecate + import six if not six.PY3: from UserDict import UserDict @@ -119,19 +121,19 @@ def log(message, level=None): raise -def action_log(message): - """Write an action progress message""" - command = ['action-log'] +def function_log(message): + """Write a function progress message""" + command = ['function-log'] if not isinstance(message, six.string_types): message = repr(message) command += [message[:SH_MAX_ARG]] - # Missing action-log should not cause failures in unit tests - # Send action_log output to stderr + # Missing function-log should not cause failures in unit tests + # Send function_log output to stderr try: subprocess.call(command) except OSError as e: if e.errno == errno.ENOENT: - message = "action-log: {}".format(message) + message = "function-log: {}".format(message) print(message, file=sys.stderr) else: raise @@ -964,9 +966,23 @@ def charm_dir(): return os.environ.get('CHARM_DIR') +def cmd_exists(cmd): + """Return True if the specified cmd exists in the path""" + return any( + os.access(os.path.join(path, cmd), os.X_OK) + for path in os.environ["PATH"].split(os.pathsep) + ) + + @cached +@deprecate("moved to function_get()", log=log) def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_get`. + + Gets the value of an action parameter, or all key/value param pairs. + """ cmd = ['action-get'] if key is not None: cmd.append(key) @@ -975,36 +991,103 @@ def action_get(key=None): return action_data +@cached +def function_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['function-get'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-get'] + + if key is not None: + cmd.append(key) + cmd.append('--format=json') + function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return function_data + + +@deprecate("moved to function_set()", log=log) def action_set(values): - """Sets the values to be returned after the action finishes""" + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_set`. + + Sets the values to be returned after the action finishes. + """ cmd = ['action-set'] for k, v in list(values.items()): cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) +def function_set(values): + """Sets the values to be returned after the function finishes""" + cmd = ['function-set'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-set'] + + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@deprecate("moved to function_fail()", log=log) def action_fail(message): - """Sets the action status to failed and sets the error message. + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_fail`. + + Sets the action status to failed and sets the error message. - The results set by action_set are preserved.""" + The results set by action_set are preserved. + """ subprocess.check_call(['action-fail', message]) +def function_fail(message): + """Sets the function status to failed and sets the error message. + + The results set by function_set are preserved.""" + cmd = ['function-fail'] + # Fallback for older charms. + if not cmd_exists('function-fail'): + cmd = ['action-fail'] + cmd.append(message) + + subprocess.check_call(cmd) + + def action_name(): """Get the name of the currently executing action.""" return os.environ.get('JUJU_ACTION_NAME') +def function_name(): + """Get the name of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_NAME') or action_name() + + def action_uuid(): """Get the UUID of the currently executing action.""" return os.environ.get('JUJU_ACTION_UUID') +def function_id(): + """Get the ID of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_ID') or action_uuid() + + def action_tag(): """Get the tag for the currently executing action.""" return os.environ.get('JUJU_ACTION_TAG') +def function_tag(): + """Get the tag for the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() + + def status_set(workload_state, message): """Set the workload state with a message diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 31225235..3ddaf0dd 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -182,6 +182,14 @@ 'train/proposed': 'bionic-proposed/train', 'bionic-train/proposed': 'bionic-proposed/train', 'bionic-proposed/train': 'bionic-proposed/train', + # Ussuri + 'ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri/updates': 'bionic-updates/ussuri', + 'bionic-updates/ussuri': 'bionic-updates/ussuri', + 'ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-proposed/ussuri': 'bionic-proposed/ussuri', } diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 2cdd2f50..1ec62d54 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -181,7 +181,7 @@ def unmounted_disks(): for device in context.list_devices(DEVTYPE='disk'): if device['SUBSYSTEM'] == 'block': matched = False - for block_type in [u'dm', u'loop', u'ram', u'nbd']: + for block_type in [u'dm-', u'loop', u'ram', u'nbd']: if block_type in device.device_node: matched = True if matched: @@ -913,7 +913,12 @@ def is_pristine_disk(dev): """ want_bytes = 2048 - f = open(dev, 'rb') + try: + f = open(dev, 'rb') + except OSError as e: + log(e) + return False + data = f.read(want_bytes) read_bytes = len(data) if read_bytes != want_bytes: @@ -2310,14 +2315,19 @@ def wait_on_previous_node(upgrade_key, service, previous_node, version): def get_upgrade_position(osd_sorted_list, match_name): """Return the upgrade position for the given osd. - :param osd_sorted_list: list. Osds sorted - :param match_name: str. The osd name to match - :returns: int. The position or None if not found + :param osd_sorted_list: Osds sorted + :type osd_sorted_list: [str] + :param match_name: The osd name to match + :type match_name: str + :returns: The position of the name + :rtype: int + :raises: ValueError if name is not found """ for index, item in enumerate(osd_sorted_list): if item.name == match_name: return index - return None + raise ValueError("osd name '{}' not found in get_upgrade_position list" + .format(match_name)) # Edge cases: diff --git a/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml b/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml index 143e0ecc..5729dfe4 100644 --- a/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml +++ b/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml @@ -61,6 +61,11 @@ applications: num_units: 1 options: openstack-origin: cloud:bionic-train/proposed + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: cloud:bionic-train relations: - - nova-compute:amqp - rabbitmq-server:amqp @@ -102,3 +107,9 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - placement + - percona-cluster +- - placement + - keystone +- - placement + - nova-cloud-controller From ed936f8237fc26b0e9ddf6b255732a4dade073d3 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 30 Jan 2020 17:23:09 +0100 Subject: [PATCH 1863/2699] Update docs to remove non-production notices Change-Id: I02f28239076eca7a8db791ac4253181d31197396 --- ceph-fs/src/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/src/README.md b/ceph-fs/src/README.md index 161651ab..1aabb715 100644 --- a/ceph-fs/src/README.md +++ b/ceph-fs/src/README.md @@ -1,7 +1,7 @@ # CephFS Charm This charm exists to provide an example integration of CephFS, for the purpose -of test and reference. It is not intended for production use at this time. +of test and reference. # Overview From 9623052ccae806f40bda8ab490edca11dfd9184d Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Fri, 24 Jan 2020 12:49:56 -0500 Subject: [PATCH 1864/2699] Comprehensive review of ceph-mon README Review of README. Actions section was added. The removed upgrade information will be moved to the CDG upgrade-openstack appendix. The upgrade paths information I don't believe is necessary. The UCA method is already generically laid out in the CDG upgrade appendix. The only benefit is for the user to know what version of Ceph they will end up with, but this is hard to keep up to date in our documentation. The upgrade edge case section will be moved to the upgrade issues section of the upgrade appendix. Change-Id: Ie4d8d69dc400cc79aa966bd72d7c601882a50bfc --- ceph-mon/README.md | 241 ++++++++++++++++++++++++++++++--------------- 1 file changed, 164 insertions(+), 77 deletions(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 4a646de7..6ddb7d34 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -1,118 +1,205 @@ # Overview -Ceph is a distributed storage and network file system designed to provide +[Ceph][ceph-upstream] is a unified, distributed storage system designed for excellent performance, reliability, and scalability. -This charm deploys a Ceph monitor cluster. +The ceph-mon charm deploys Ceph monitor nodes, allowing one to create a monitor +cluster. It is used in conjunction with the [ceph-osd][ceph-osd-charm] charm. +Together, these charms can scale out the amount of storage available in a Ceph +cluster. # Usage -Boot things up by using: +## Deployment - juju deploy -n 3 ceph-mon +A cloud with three MON nodes is a typical design whereas three OSD nodes are +considered the minimum. For example, to deploy a Ceph cluster consisting of +three OSDs and three MONs: -By default the ceph-mon cluster will not bootstrap until 3 service units have -been deployed and started; this is to ensure that a quorum is achieved prior to -adding storage devices. + juju deploy --config ceph-osd.yaml -n 3 ceph-osd + juju deploy --to lxd:0 ceph-mon + juju add-unit --to lxd:1 ceph-mon + juju add-unit --to lxd:2 ceph-mon + juju add-relation ceph-osd ceph-mon + +Here, a containerised MON is running alongside each OSD. + +By default, the monitor cluster will not be complete until three ceph-mon units +have been deployed. This is to ensure that a quorum is achieved prior to the +addition of storage devices. + +See the [Ceph documentation][ceph-docs-monitors] for notes on monitor cluster +deployment strategies. + +> **Note**: Refer to the [Install OpenStack][cdg-install-openstack] page in the + OpenStack Charms Deployment Guide for instructions on installing a monitor + cluster for use with OpenStack. + +## Network spaces + +This charm supports the use of Juju [network spaces][juju-docs-spaces] (Juju +`v.2.0`). This feature optionally allows specific types of the application's +network traffic to be bound to subnets that the underlying hardware is +connected to. + +> **Note**: Spaces must be configured in the backing cloud prior to deployment. + +The ceph-mon charm exposes the following Ceph traffic types (bindings): + +- 'public' (front-side) +- 'cluster' (back-side) + +For example, providing that spaces 'data-space' and 'cluster-space' exist, the +deploy command above could look like this: + + juju deploy --config ceph-mon.yaml -n 3 ceph-mon \ + --bind "public=data-space cluster=cluster-space" + +Alternatively, configuration can be provided as part of a bundle: + +```yaml + ceph-osd: + charm: cs:ceph-mon + num_units: 1 + bindings: + public: data-space + cluster: cluster-space +``` + +Refer to the [Ceph Network Reference][ceph-docs-network-ref] to learn about the +implications of segregating Ceph network traffic. + +> **Note**: Existing ceph-mon units configured with the `ceph-public-network` + or `ceph-cluster-network` options will continue to honour them. Furthermore, + these options override any space bindings, if set. ## Actions -This charm supports pausing and resuming ceph's health functions on a cluster, for example when doing maintenance on a machine. To pause or resume, call: +This section lists Juju [actions][juju-docs-actions] supported by the charm. +Actions allow specific operations to be performed on a per-unit basis. -`juju action do --unit ceph-mon/0 pause-health` or `juju action do --unit ceph-mon/0 resume-health` +### copy-pool -## Scale Out Usage +Copy contents of a pool to a new pool. -You can use the Ceph OSD and Ceph Radosgw charms: +### create-cache-tier -- [Ceph OSD](https://jujucharms.com/ceph-osd) -- [Ceph Rados Gateway](https://jujucharms.com/ceph-radosgw) +Create a new cache tier. -## Rolling Upgrades +### create-crush-rule -ceph-mon and ceph-osd charms have the ability to initiate a rolling upgrade. -This is initiated by setting the config value for `source`. To perform a -rolling upgrade first set the source for ceph-mon. Watch `juju status`. -Once the monitor cluster is upgraded proceed to setting the ceph-osd source -setting. Again watch `juju status` for output. The monitors and osds will -sort themselves into a known order and upgrade one by one. As each server is -upgrading the upgrade code will down all the monitor or osd processes on that -server, apply the update and then restart them. You will notice in the -`juju status` output that the servers will tell you which previous server they -are waiting on. +Create a new replicated CRUSH rule to use on a pool. -#### Supported Upgrade Paths -Currently the following upgrade paths are supported using -the [Ubuntu Cloud Archive](https://wiki.ubuntu.com/OpenStack/CloudArchive): -- trusty-firefly -> trusty-hammer -- trusty-hammer -> trusty-jewel +### create-erasure-profile -Firefly is available in Trusty, Hammer is in Trusty-Juno (end of life), -Trusty-Kilo, Trusty-Liberty, and Jewel is available in Trusty-Mitaka. +Create a new erasure code profile to use on a pool. -For example if the current config source setting is: `cloud:trusty-liberty` -changing that to `cloud:trusty-mitaka` will initiate a rolling upgrade of -the monitor cluster from hammer to jewel. +### create-pool -#### Edge cases -There's an edge case in the upgrade code where if the previous node never -starts upgrading itself then the rolling upgrade can hang forever. If you -notice this has happened it can be fixed by setting the appropriate key in the -ceph monitor cluster. The monitor cluster will have -keys that look like `ceph-mon_ip-ceph-mon-0_1484680239.573482_start` and -`ceph-mon_ip-ceph-mon-0_1484680274.181742_stop`. What each server is looking for -is that stop key to indicate that the previous server upgraded successfully and -it's safe to take itself down. If the stop key is not present it will wait -10 minutes, then consider that server dead and move on. +Create a pool. -## Network Space support +### crushmap-update -This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above. +Apply a new CRUSH map definition. -Network traffic can be bound to specific network spaces using the public (front-side) and cluster (back-side) bindings: +> **Warning**: This action can break your cluster in unexpected ways if + misused. - juju deploy ceph-mon --bind "public=data-space cluster=cluster-space" +### delete-erasure-profile -alternatively these can also be provided as part of a Juju native bundle configuration: +Delete an erasure code profile. - ceph-mon: - charm: cs:xenial/ceph-mon - num_units: 1 - bindings: - public: data-space - cluster: cluster-space +### delete-pool + +Delete a pool. + +### get-erasure-profile + +Display an erasure code profile. + +### get-health + +Display cluster health. + +### list-erasure-profiles + +List erasure code profiles. + +### list-pools + +List pools. + +### pause-health + +Pause the cluster's health operations. + +### pool-get + +Get a value for a pool. + +### pool-set + +Set a value for a pool. + +### pool-statistics + +Display a pool's utilisation statistics. + +### remove-cache-tier + +Remove a cache tier. + +### remove-pool-snapshot + +Remove a pool's snapshot. + +### rename-pool + +Rename a pool. + +### resume-health + +Resume the cluster's health operations. + +### security-checklist + +Validate the running configuration against the OpenStack security guides +checklist. + +### set-noout + +Set the cluster's 'noout' flag. -Please refer to the [Ceph Network Reference](http://docs.ceph.com/docs/master/rados/configuration/network-config-ref) for details on how using these options effects network traffic within a Ceph deployment. +### set-pool-max-bytes -**NOTE:** Spaces must be configured in the underlying provider prior to attempting to use them. +Set a pool's quota for the maximum number of bytes. -**NOTE**: Existing deployments using ceph-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. +### show-disk-free -**NOTE**: The monitor-hosts field is only used to migrate existing clusters to a juju managed solution and should be left blank otherwise. +Show disk utilisation by host and OSD. -# Contact Information +### snapshot-pool -## Authors +Create a pool snapshot. -- Paul Collins , -- James Page +### unset-noout -Report bugs on [Launchpad](http://bugs.launchpad.net/charms/+source/ceph/+filebug) +Unset the cluster's 'noout' flag. -## Ceph +# Bugs -- [Ceph website](http://ceph.com) -- [Ceph mailing lists](http://ceph.com/resources/mailing-list-irc/) -- [Ceph bug tracker](http://tracker.ceph.com/projects/ceph) +Please report bugs on [Launchpad][lp-bugs-charm-ceph-mon]. -# Technical Footnotes +For general charm questions refer to the OpenStack [Charm Guide][cg]. -This charm uses the new-style Ceph deployment as reverse-engineered from the -Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected -a different strategy to form the monitor cluster. Since we don't know the -names *or* addresses of the machines in advance, we use the _relation-joined_ -hook to wait for all three nodes to come up, and then write their addresses -to ceph.conf in the "mon host" parameter. After we initialize the monitor -cluster a quorum forms quickly, and OSD bringup proceeds. + -See [the documentation](http://ceph.com/docs/master/dev/mon-bootstrap/) for more information on Ceph monitor cluster deployment strategies and pitfalls. +[ceph-upstream]: https://ceph.io +[cg]: https://docs.openstack.org/charm-guide +[ceph-osd-charm]: https://jaas.ai/ceph-osd +[juju-docs-actions]: https://jaas.ai/docs/actions +[juju-docs-spaces]: https://jaas.ai/docs/spaces +[ceph-docs-network-ref]: http://docs.ceph.com/docs/master/rados/configuration/network-config-ref +[ceph-docs-monitors]: https://docs.ceph.com/docs/master/dev/mon-bootstrap +[lp-bugs-charm-ceph-mon]: https://bugs.launchpad.net/charm-ceph-mon/+filebug +[cdg-install-openstack]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/install-openstack.html From 1f4a708f94129f6bea41568621246c39aceb0a2b Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 31 Jan 2020 12:12:08 +0000 Subject: [PATCH 1865/2699] Ensure bootstrapped-osds count updated after add-disk When we add/zap disks it may change the overall osd count so need to ensure this is kept up-to-date. Change-Id: Ib55547f88316e80a8948ce808ea992c1402458f5 Closes-Bug: #1861293 --- ceph-osd/actions/add_disk.py | 18 ++++++- ceph-osd/unit_tests/test_actions_add_disk.py | 57 ++++++++++++++++++++ 2 files changed, 73 insertions(+), 2 deletions(-) create mode 100644 ceph-osd/unit_tests/test_actions_add_disk.py diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index 9ba49116..d16668a9 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -24,12 +24,14 @@ import charmhelpers.contrib.storage.linux.ceph as ch_ceph import charmhelpers.core.hookenv as hookenv +from charmhelpers.core.unitdata import kv + import ceph_hooks import ceph.utils def add_device(request, device_path, bucket=None): - ceph.utils.osdize(dev, hookenv.config('osd-format'), + ceph.utils.osdize(device_path, hookenv.config('osd-format'), ceph_hooks.get_journal_devices(), hookenv.config('ignore-device-errors'), hookenv.config('osd-encrypt'), @@ -37,7 +39,7 @@ def add_device(request, device_path, bucket=None): hookenv.config('osd-encrypt-keymanager')) # Make it fast! if hookenv.config('autotune'): - ceph.utils.tune_dev(dev) + ceph.utils.tune_dev(device_path) mounts = filter(lambda disk: device_path in disk.device, psutil.disk_partitions()) for osd in mounts: @@ -46,6 +48,18 @@ def add_device(request, device_path, bucket=None): 'op': 'move-osd-to-bucket', 'osd': "osd.{}".format(osd_id), 'bucket': bucket}) + + # Ensure mon's count of osds is accurate + db = kv() + bootstrapped_osds = len(db.get('osd-devices', [])) + for r_id in hookenv.relation_ids('mon'): + hookenv.relation_set( + relation_id=r_id, + relation_settings={ + 'bootstrapped-osds': bootstrapped_osds, + } + ) + return request diff --git a/ceph-osd/unit_tests/test_actions_add_disk.py b/ceph-osd/unit_tests/test_actions_add_disk.py new file mode 100644 index 00000000..a5d711cd --- /dev/null +++ b/ceph-osd/unit_tests/test_actions_add_disk.py @@ -0,0 +1,57 @@ +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from actions import add_disk + +from test_utils import CharmTestCase + + +class AddDiskActionTests(CharmTestCase): + def setUp(self): + super(AddDiskActionTests, self).setUp( + add_disk, ['hookenv', 'kv']) + self.kv.return_value = self.kv + + @mock.patch.object(add_disk.ceph_hooks, 'get_journal_devices') + @mock.patch.object(add_disk.ceph.utils, 'osdize') + def test_add_device(self, mock_osdize, mock_get_journal_devices): + + def fake_config(key): + return { + 'ignore-device-errors': True, + 'osd-encrypt': True, + 'bluestore': True, + 'osd-encrypt-keymanager': True, + 'autotune': False, + }.get(key) + + self.hookenv.config.side_effect = fake_config + mock_get_journal_devices.return_value = '' + self.hookenv.relation_ids.return_value = ['ceph:0'] + + db = mock.MagicMock() + self.kv.return_value = db + db.get.return_value = ['/dev/myosddev'] + + request = {'ops': []} + add_disk.add_device(request, '/dev/myosddev') + + call = mock.call(relation_id='ceph:0', + relation_settings={'bootstrapped-osds': 1}) + self.hookenv.relation_set.assert_has_calls([call]) + mock_osdize.assert_has_calls([mock.call('/dev/myosddev', + None, '', True, True, True, + True)]) From c97ff579f3ae51bb8c2545083a95cbd0269bd079 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 4 Feb 2020 16:38:13 +0000 Subject: [PATCH 1866/2699] Charmhelper sync for 20.02 Change-Id: Ic31214ea1126edfa0d75967c03fdd5f979c3f6c1 --- .../contrib/hardening/audits/apt.py | 5 +- .../charmhelpers/contrib/openstack/context.py | 152 ++++++++++++++++++ .../charmhelpers/contrib/openstack/utils.py | 35 ++++ .../charmhelpers/fetch/ubuntu_apt_pkg.py | 30 ++++ ceph-mon/hooks/charmhelpers/osplatform.py | 3 + ceph-mon/lib/ceph/utils.py | 15 +- 6 files changed, 231 insertions(+), 9 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py index 67521e17..cad7bf73 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -52,7 +52,7 @@ class RestrictedPackages(BaseAudit): def __init__(self, pkgs, **kwargs): super(RestrictedPackages, self).__init__(**kwargs) if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): - self.pkgs = [pkgs] + self.pkgs = pkgs.split() else: self.pkgs = pkgs @@ -100,4 +100,5 @@ def delete_package(self, cache, pkg): apt_purge(pkg.name) def is_virtual_package(self, pkg): - return pkg.has_provides and not pkg.has_versions + return (pkg.get('has_provides', False) and + not pkg.get('has_versions', False)) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index e99aba47..bc90804b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -25,6 +25,10 @@ import six +from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( + _config_ini as config_ini +) + from charmhelpers.fetch import ( apt_install, filter_installed_packages, @@ -2244,3 +2248,151 @@ def __call__(self): self.use_fqdn_hint_cb() if self.use_fqdn_hint_cb else False) } return ctxt + + +def validate_ovs_use_veth(*args, **kwargs): + """Validate OVS use veth setting for dhcp agents + + The ovs_use_veth setting is considered immutable as it will break existing + deployments. Historically, we set ovs_use_veth=True in dhcp_agent.ini. It + turns out this is no longer necessary. Ideally, all new deployments would + have this set to False. + + This function validates that the config value does not conflict with + previously deployed settings in dhcp_agent.ini. + + See LP Bug#1831935 for details. + + :returns: Status state and message + :rtype: Union[(None, None), (string, string)] + """ + existing_ovs_use_veth = ( + DHCPAgentContext.get_existing_ovs_use_veth()) + config_ovs_use_veth = DHCPAgentContext.parse_ovs_use_veth() + + # Check settings are set and not None + if existing_ovs_use_veth is not None and config_ovs_use_veth is not None: + # Check for mismatch between existing config ini and juju config + if existing_ovs_use_veth != config_ovs_use_veth: + # Stop the line to avoid breakage + msg = ( + "The existing setting for dhcp_agent.ini ovs_use_veth, {}, " + "does not match the juju config setting, {}. This may lead to " + "VMs being unable to receive a DHCP IP. Either change the " + "juju config setting or dhcp agents may need to be recreated." + .format(existing_ovs_use_veth, config_ovs_use_veth)) + log(msg, ERROR) + return ( + "blocked", + "Mismatched existing and configured ovs-use-veth. See log.") + + # Everything is OK + return None, None + + +class DHCPAgentContext(OSContextGenerator): + + def __call__(self): + """Return the DHCPAGentContext. + + Return all DHCP Agent INI related configuration. + ovs unit is attached to (as a subordinate) and the 'dns_domain' from + the neutron-plugin-api relations (if one is set). + + :returns: Dictionary context + :rtype: Dict + """ + + ctxt = {} + dnsmasq_flags = config('dnsmasq-flags') + if dnsmasq_flags: + ctxt['dnsmasq_flags'] = config_flags_parser(dnsmasq_flags) + ctxt['dns_servers'] = config('dns-servers') + + neutron_api_settings = NeutronAPIContext()() + + ctxt['debug'] = config('debug') + ctxt['instance_mtu'] = config('instance-mtu') + ctxt['ovs_use_veth'] = self.get_ovs_use_veth() + + ctxt['enable_metadata_network'] = config('enable-metadata-network') + ctxt['enable_isolated_metadata'] = config('enable-isolated-metadata') + + if neutron_api_settings.get('dns_domain'): + ctxt['dns_domain'] = neutron_api_settings.get('dns_domain') + + # Override user supplied config for these plugins as these settings are + # mandatory + if config('plugin') in ['nvp', 'nsx', 'n1kv']: + ctxt['enable_metadata_network'] = True + ctxt['enable_isolated_metadata'] = True + + return ctxt + + @staticmethod + def get_existing_ovs_use_veth(): + """Return existing ovs_use_veth setting from dhcp_agent.ini. + + :returns: Boolean value of existing ovs_use_veth setting or None + :rtype: Optional[Bool] + """ + DHCP_AGENT_INI = "/etc/neutron/dhcp_agent.ini" + existing_ovs_use_veth = None + # If there is a dhcp_agent.ini file read the current setting + if os.path.isfile(DHCP_AGENT_INI): + # config_ini does the right thing and returns None if the setting is + # commented. + existing_ovs_use_veth = ( + config_ini(DHCP_AGENT_INI)["DEFAULT"].get("ovs_use_veth")) + # Convert to Bool if necessary + if isinstance(existing_ovs_use_veth, six.string_types): + return bool_from_string(existing_ovs_use_veth) + return existing_ovs_use_veth + + @staticmethod + def parse_ovs_use_veth(): + """Parse the ovs-use-veth config setting. + + Parse the string config setting for ovs-use-veth and return a boolean + or None. + + bool_from_string will raise a ValueError if the string is not falsy or + truthy. + + :raises: ValueError for invalid input + :returns: Boolean value of ovs-use-veth or None + :rtype: Optional[Bool] + """ + _config = config("ovs-use-veth") + # An unset parameter returns None. Just in case we will also check for + # an empty string: "". Ironically, (the problem we are trying to avoid) + # "False" returns True and "" returns False. + if _config is None or not _config: + # Return None + return + # bool_from_string handles many variations of true and false strings + # as well as upper and lowercases including: + # ['y', 'yes', 'true', 't', 'on', 'n', 'no', 'false', 'f', 'off'] + return bool_from_string(_config) + + def get_ovs_use_veth(self): + """Return correct ovs_use_veth setting for use in dhcp_agent.ini. + + Get the right value from config or existing dhcp_agent.ini file. + Existing has precedence. Attempt to default to "False" without + disrupting existing deployments. Handle existing deployments and + upgrades safely. See LP Bug#1831935 + + :returns: Value to use for ovs_use_veth setting + :rtype: Bool + """ + _existing = self.get_existing_ovs_use_veth() + if _existing is not None: + return _existing + + _config = self.parse_ovs_use_veth() + if _config is None: + # New better default + return False + else: + return _config diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 566404a0..161199c4 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -44,6 +44,7 @@ INFO, ERROR, related_units, + relation_get, relation_ids, relation_set, status_set, @@ -331,6 +332,10 @@ DEFAULT_LOOPBACK_SIZE = '5G' +DB_SERIES_UPGRADING_KEY = 'cluster-series-upgrading' + +DB_MAINTENANCE_KEYS = [DB_SERIES_UPGRADING_KEY] + class CompareOpenStackReleases(BasicStringComparator): """Provide comparisons of OpenStack releases. @@ -1912,3 +1917,33 @@ def set_db_initialised(): """ juju_log('Setting db-initialised to True', 'DEBUG') leader_set({'db-initialised': True}) + + +def is_db_maintenance_mode(relid=None): + """Check relation data from notifications of db in maintenance mode. + + :returns: Whether db has notified it is in maintenance mode. + :rtype: bool + """ + juju_log('Checking for maintenance notifications', 'DEBUG') + if relid: + r_ids = [relid] + else: + r_ids = relation_ids('shared-db') + rids_units = [(r, u) for r in r_ids for u in related_units(r)] + notifications = [] + for r_id, unit in rids_units: + settings = relation_get(unit=unit, rid=r_id) + for key, value in settings.items(): + if value and key in DB_MAINTENANCE_KEYS: + juju_log( + 'Unit: {}, Key: {}, Value: {}'.format(unit, key, value), + 'DEBUG') + try: + notifications.append(bool_from_string(value)) + except ValueError: + juju_log( + 'Could not discern bool from {}'.format(value), + 'WARN') + pass + return True in notifications diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index 104f91f1..929a75d7 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -38,6 +38,7 @@ import locale import os import subprocess +import sys class _container(dict): @@ -59,6 +60,13 @@ class Cache(object): def __init__(self, progress=None): pass + def __contains__(self, package): + try: + pkg = self.__getitem__(package) + return pkg is not None + except KeyError: + return False + def __getitem__(self, package): """Get information about a package from apt and dpkg databases. @@ -178,6 +186,28 @@ def _apt_cache_show(self, packages): return pkgs +class Config(_container): + def __init__(self): + super(Config, self).__init__(self._populate()) + + def _populate(self): + cfgs = {} + cmd = ['apt-config', 'dump'] + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + for line in output.splitlines(): + if not line.startswith("CommandLine"): + k, v = line.split(" ", 1) + cfgs[k] = v.strip(";").strip("\"") + + return cfgs + + +# Backwards compatibility with old apt_pkg module +sys.modules[__name__].config = Config() + + def init(): """Compability shim that does nothing.""" pass diff --git a/ceph-mon/hooks/charmhelpers/osplatform.py b/ceph-mon/hooks/charmhelpers/osplatform.py index d9a4d5c0..c7fd1363 100644 --- a/ceph-mon/hooks/charmhelpers/osplatform.py +++ b/ceph-mon/hooks/charmhelpers/osplatform.py @@ -20,6 +20,9 @@ def get_platform(): # Stock Python does not detect Ubuntu and instead returns debian. # Or at least it does in some build environments like Travis CI return "ubuntu" + elif "elementary" in current_platform: + # ElementaryOS fails to run tests locally without this. + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/ceph/utils.py index 1ec62d54..7c970784 100644 --- a/ceph-mon/lib/ceph/utils.py +++ b/ceph-mon/lib/ceph/utils.py @@ -80,12 +80,7 @@ PEON = 'peon' QUORUM = [LEADER, PEON] -if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': - btrfs_package = 'btrfs-progs' -else: - btrfs_package = 'btrfs-tools' - -PACKAGES = ['ceph', 'gdisk', btrfs_package, +PACKAGES = ['ceph', 'gdisk', 'radosgw', 'xfsprogs', 'lvm2', 'parted', 'smartmontools'] @@ -2917,7 +2912,13 @@ def determine_packages(): :returns: list of ceph packages """ - return PACKAGES + packages = PACKAGES.copy() + if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': + btrfs_package = 'btrfs-progs' + else: + btrfs_package = 'btrfs-tools' + packages.append(btrfs_package) + return packages def bootstrap_manager(): From 59e67896236e0ecd492365421874989979208aa9 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 4 Feb 2020 16:38:19 +0000 Subject: [PATCH 1867/2699] Charmhelper sync for 20.02 Change-Id: I696c0f3303f038a66cf2298d10f081b580e0a9fe --- .../charmhelpers/contrib/hahelpers/cluster.py | 45 ++++++ .../contrib/hardening/audits/apt.py | 5 +- .../charmhelpers/contrib/openstack/context.py | 152 ++++++++++++++++++ .../charmhelpers/contrib/openstack/utils.py | 35 ++++ .../charmhelpers/fetch/ubuntu_apt_pkg.py | 30 ++++ ceph-osd/hooks/charmhelpers/osplatform.py | 3 + ceph-osd/lib/ceph/utils.py | 15 +- 7 files changed, 276 insertions(+), 9 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py index 4a737e24..ba34fba0 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -25,6 +25,7 @@ clustering-related helpers. """ +import functools import subprocess import os import time @@ -281,6 +282,10 @@ def determine_apache_port(public_port, singlenode_mode=False): return public_port - (i * 10) +determine_apache_port_single = functools.partial( + determine_apache_port, singlenode_mode=True) + + def get_hacluster_config(exclude_keys=None): ''' Obtains all relevant configuration from charm configuration required @@ -404,3 +409,43 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'): log(msg, DEBUG) status_set('maintenance', msg) time.sleep(calculated_wait) + + +def get_managed_services_and_ports(services, external_ports, + external_services=None, + port_conv_f=determine_apache_port_single): + """Get the services and ports managed by this charm. + + Return only the services and corresponding ports that are managed by this + charm. This excludes haproxy when there is a relation with hacluster. This + is because this charm passes responsability for stopping and starting + haproxy to hacluster. + + Similarly, if a relation with hacluster exists then the ports returned by + this method correspond to those managed by the apache server rather than + haproxy. + + :param services: List of services. + :type services: List[str] + :param external_ports: List of ports managed by external services. + :type external_ports: List[int] + :param external_services: List of services to be removed if ha relation is + present. + :type external_services: List[str] + :param port_conv_f: Function to apply to ports to calculate the ports + managed by services controlled by this charm. + :type port_convert_func: f() + :returns: A tuple containing a list of services first followed by a list of + ports. + :rtype: Tuple[List[str], List[int]] + """ + if external_services is None: + external_services = ['haproxy'] + if relation_ids('ha'): + for svc in external_services: + try: + services.remove(svc) + except ValueError: + pass + external_ports = [port_conv_f(p) for p in external_ports] + return services, external_ports diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py index 67521e17..cad7bf73 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -52,7 +52,7 @@ class RestrictedPackages(BaseAudit): def __init__(self, pkgs, **kwargs): super(RestrictedPackages, self).__init__(**kwargs) if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): - self.pkgs = [pkgs] + self.pkgs = pkgs.split() else: self.pkgs = pkgs @@ -100,4 +100,5 @@ def delete_package(self, cache, pkg): apt_purge(pkg.name) def is_virtual_package(self, pkg): - return pkg.has_provides and not pkg.has_versions + return (pkg.get('has_provides', False) and + not pkg.get('has_versions', False)) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index e99aba47..bc90804b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -25,6 +25,10 @@ import six +from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( + _config_ini as config_ini +) + from charmhelpers.fetch import ( apt_install, filter_installed_packages, @@ -2244,3 +2248,151 @@ def __call__(self): self.use_fqdn_hint_cb() if self.use_fqdn_hint_cb else False) } return ctxt + + +def validate_ovs_use_veth(*args, **kwargs): + """Validate OVS use veth setting for dhcp agents + + The ovs_use_veth setting is considered immutable as it will break existing + deployments. Historically, we set ovs_use_veth=True in dhcp_agent.ini. It + turns out this is no longer necessary. Ideally, all new deployments would + have this set to False. + + This function validates that the config value does not conflict with + previously deployed settings in dhcp_agent.ini. + + See LP Bug#1831935 for details. + + :returns: Status state and message + :rtype: Union[(None, None), (string, string)] + """ + existing_ovs_use_veth = ( + DHCPAgentContext.get_existing_ovs_use_veth()) + config_ovs_use_veth = DHCPAgentContext.parse_ovs_use_veth() + + # Check settings are set and not None + if existing_ovs_use_veth is not None and config_ovs_use_veth is not None: + # Check for mismatch between existing config ini and juju config + if existing_ovs_use_veth != config_ovs_use_veth: + # Stop the line to avoid breakage + msg = ( + "The existing setting for dhcp_agent.ini ovs_use_veth, {}, " + "does not match the juju config setting, {}. This may lead to " + "VMs being unable to receive a DHCP IP. Either change the " + "juju config setting or dhcp agents may need to be recreated." + .format(existing_ovs_use_veth, config_ovs_use_veth)) + log(msg, ERROR) + return ( + "blocked", + "Mismatched existing and configured ovs-use-veth. See log.") + + # Everything is OK + return None, None + + +class DHCPAgentContext(OSContextGenerator): + + def __call__(self): + """Return the DHCPAGentContext. + + Return all DHCP Agent INI related configuration. + ovs unit is attached to (as a subordinate) and the 'dns_domain' from + the neutron-plugin-api relations (if one is set). + + :returns: Dictionary context + :rtype: Dict + """ + + ctxt = {} + dnsmasq_flags = config('dnsmasq-flags') + if dnsmasq_flags: + ctxt['dnsmasq_flags'] = config_flags_parser(dnsmasq_flags) + ctxt['dns_servers'] = config('dns-servers') + + neutron_api_settings = NeutronAPIContext()() + + ctxt['debug'] = config('debug') + ctxt['instance_mtu'] = config('instance-mtu') + ctxt['ovs_use_veth'] = self.get_ovs_use_veth() + + ctxt['enable_metadata_network'] = config('enable-metadata-network') + ctxt['enable_isolated_metadata'] = config('enable-isolated-metadata') + + if neutron_api_settings.get('dns_domain'): + ctxt['dns_domain'] = neutron_api_settings.get('dns_domain') + + # Override user supplied config for these plugins as these settings are + # mandatory + if config('plugin') in ['nvp', 'nsx', 'n1kv']: + ctxt['enable_metadata_network'] = True + ctxt['enable_isolated_metadata'] = True + + return ctxt + + @staticmethod + def get_existing_ovs_use_veth(): + """Return existing ovs_use_veth setting from dhcp_agent.ini. + + :returns: Boolean value of existing ovs_use_veth setting or None + :rtype: Optional[Bool] + """ + DHCP_AGENT_INI = "/etc/neutron/dhcp_agent.ini" + existing_ovs_use_veth = None + # If there is a dhcp_agent.ini file read the current setting + if os.path.isfile(DHCP_AGENT_INI): + # config_ini does the right thing and returns None if the setting is + # commented. + existing_ovs_use_veth = ( + config_ini(DHCP_AGENT_INI)["DEFAULT"].get("ovs_use_veth")) + # Convert to Bool if necessary + if isinstance(existing_ovs_use_veth, six.string_types): + return bool_from_string(existing_ovs_use_veth) + return existing_ovs_use_veth + + @staticmethod + def parse_ovs_use_veth(): + """Parse the ovs-use-veth config setting. + + Parse the string config setting for ovs-use-veth and return a boolean + or None. + + bool_from_string will raise a ValueError if the string is not falsy or + truthy. + + :raises: ValueError for invalid input + :returns: Boolean value of ovs-use-veth or None + :rtype: Optional[Bool] + """ + _config = config("ovs-use-veth") + # An unset parameter returns None. Just in case we will also check for + # an empty string: "". Ironically, (the problem we are trying to avoid) + # "False" returns True and "" returns False. + if _config is None or not _config: + # Return None + return + # bool_from_string handles many variations of true and false strings + # as well as upper and lowercases including: + # ['y', 'yes', 'true', 't', 'on', 'n', 'no', 'false', 'f', 'off'] + return bool_from_string(_config) + + def get_ovs_use_veth(self): + """Return correct ovs_use_veth setting for use in dhcp_agent.ini. + + Get the right value from config or existing dhcp_agent.ini file. + Existing has precedence. Attempt to default to "False" without + disrupting existing deployments. Handle existing deployments and + upgrades safely. See LP Bug#1831935 + + :returns: Value to use for ovs_use_veth setting + :rtype: Bool + """ + _existing = self.get_existing_ovs_use_veth() + if _existing is not None: + return _existing + + _config = self.parse_ovs_use_veth() + if _config is None: + # New better default + return False + else: + return _config diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 566404a0..161199c4 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -44,6 +44,7 @@ INFO, ERROR, related_units, + relation_get, relation_ids, relation_set, status_set, @@ -331,6 +332,10 @@ DEFAULT_LOOPBACK_SIZE = '5G' +DB_SERIES_UPGRADING_KEY = 'cluster-series-upgrading' + +DB_MAINTENANCE_KEYS = [DB_SERIES_UPGRADING_KEY] + class CompareOpenStackReleases(BasicStringComparator): """Provide comparisons of OpenStack releases. @@ -1912,3 +1917,33 @@ def set_db_initialised(): """ juju_log('Setting db-initialised to True', 'DEBUG') leader_set({'db-initialised': True}) + + +def is_db_maintenance_mode(relid=None): + """Check relation data from notifications of db in maintenance mode. + + :returns: Whether db has notified it is in maintenance mode. + :rtype: bool + """ + juju_log('Checking for maintenance notifications', 'DEBUG') + if relid: + r_ids = [relid] + else: + r_ids = relation_ids('shared-db') + rids_units = [(r, u) for r in r_ids for u in related_units(r)] + notifications = [] + for r_id, unit in rids_units: + settings = relation_get(unit=unit, rid=r_id) + for key, value in settings.items(): + if value and key in DB_MAINTENANCE_KEYS: + juju_log( + 'Unit: {}, Key: {}, Value: {}'.format(unit, key, value), + 'DEBUG') + try: + notifications.append(bool_from_string(value)) + except ValueError: + juju_log( + 'Could not discern bool from {}'.format(value), + 'WARN') + pass + return True in notifications diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index 104f91f1..929a75d7 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -38,6 +38,7 @@ import locale import os import subprocess +import sys class _container(dict): @@ -59,6 +60,13 @@ class Cache(object): def __init__(self, progress=None): pass + def __contains__(self, package): + try: + pkg = self.__getitem__(package) + return pkg is not None + except KeyError: + return False + def __getitem__(self, package): """Get information about a package from apt and dpkg databases. @@ -178,6 +186,28 @@ def _apt_cache_show(self, packages): return pkgs +class Config(_container): + def __init__(self): + super(Config, self).__init__(self._populate()) + + def _populate(self): + cfgs = {} + cmd = ['apt-config', 'dump'] + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + for line in output.splitlines(): + if not line.startswith("CommandLine"): + k, v = line.split(" ", 1) + cfgs[k] = v.strip(";").strip("\"") + + return cfgs + + +# Backwards compatibility with old apt_pkg module +sys.modules[__name__].config = Config() + + def init(): """Compability shim that does nothing.""" pass diff --git a/ceph-osd/hooks/charmhelpers/osplatform.py b/ceph-osd/hooks/charmhelpers/osplatform.py index d9a4d5c0..c7fd1363 100644 --- a/ceph-osd/hooks/charmhelpers/osplatform.py +++ b/ceph-osd/hooks/charmhelpers/osplatform.py @@ -20,6 +20,9 @@ def get_platform(): # Stock Python does not detect Ubuntu and instead returns debian. # Or at least it does in some build environments like Travis CI return "ubuntu" + elif "elementary" in current_platform: + # ElementaryOS fails to run tests locally without this. + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/ceph/utils.py index 1ec62d54..7c970784 100644 --- a/ceph-osd/lib/ceph/utils.py +++ b/ceph-osd/lib/ceph/utils.py @@ -80,12 +80,7 @@ PEON = 'peon' QUORUM = [LEADER, PEON] -if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': - btrfs_package = 'btrfs-progs' -else: - btrfs_package = 'btrfs-tools' - -PACKAGES = ['ceph', 'gdisk', btrfs_package, +PACKAGES = ['ceph', 'gdisk', 'radosgw', 'xfsprogs', 'lvm2', 'parted', 'smartmontools'] @@ -2917,7 +2912,13 @@ def determine_packages(): :returns: list of ceph packages """ - return PACKAGES + packages = PACKAGES.copy() + if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': + btrfs_package = 'btrfs-progs' + else: + btrfs_package = 'btrfs-tools' + packages.append(btrfs_package) + return packages def bootstrap_manager(): From f90a3a683f9606ce13a7022b868e915008bdf86c Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 4 Feb 2020 16:38:30 +0000 Subject: [PATCH 1868/2699] Charmhelper sync for 20.02 Change-Id: Ic2e24d223124684d8609c23485e59e8a84f576fc --- .../charmhelpers/contrib/hahelpers/cluster.py | 45 ++++++ .../contrib/hardening/audits/apt.py | 5 +- .../charmhelpers/contrib/openstack/context.py | 152 ++++++++++++++++++ .../charmhelpers/contrib/openstack/utils.py | 35 ++++ .../charmhelpers/fetch/ubuntu_apt_pkg.py | 30 ++++ ceph-radosgw/hooks/charmhelpers/osplatform.py | 3 + ceph-radosgw/lib/ceph/utils.py | 15 +- 7 files changed, 276 insertions(+), 9 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index 4a737e24..ba34fba0 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -25,6 +25,7 @@ clustering-related helpers. """ +import functools import subprocess import os import time @@ -281,6 +282,10 @@ def determine_apache_port(public_port, singlenode_mode=False): return public_port - (i * 10) +determine_apache_port_single = functools.partial( + determine_apache_port, singlenode_mode=True) + + def get_hacluster_config(exclude_keys=None): ''' Obtains all relevant configuration from charm configuration required @@ -404,3 +409,43 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'): log(msg, DEBUG) status_set('maintenance', msg) time.sleep(calculated_wait) + + +def get_managed_services_and_ports(services, external_ports, + external_services=None, + port_conv_f=determine_apache_port_single): + """Get the services and ports managed by this charm. + + Return only the services and corresponding ports that are managed by this + charm. This excludes haproxy when there is a relation with hacluster. This + is because this charm passes responsability for stopping and starting + haproxy to hacluster. + + Similarly, if a relation with hacluster exists then the ports returned by + this method correspond to those managed by the apache server rather than + haproxy. + + :param services: List of services. + :type services: List[str] + :param external_ports: List of ports managed by external services. + :type external_ports: List[int] + :param external_services: List of services to be removed if ha relation is + present. + :type external_services: List[str] + :param port_conv_f: Function to apply to ports to calculate the ports + managed by services controlled by this charm. + :type port_convert_func: f() + :returns: A tuple containing a list of services first followed by a list of + ports. + :rtype: Tuple[List[str], List[int]] + """ + if external_services is None: + external_services = ['haproxy'] + if relation_ids('ha'): + for svc in external_services: + try: + services.remove(svc) + except ValueError: + pass + external_ports = [port_conv_f(p) for p in external_ports] + return services, external_ports diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py index 67521e17..cad7bf73 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -52,7 +52,7 @@ class RestrictedPackages(BaseAudit): def __init__(self, pkgs, **kwargs): super(RestrictedPackages, self).__init__(**kwargs) if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): - self.pkgs = [pkgs] + self.pkgs = pkgs.split() else: self.pkgs = pkgs @@ -100,4 +100,5 @@ def delete_package(self, cache, pkg): apt_purge(pkg.name) def is_virtual_package(self, pkg): - return pkg.has_provides and not pkg.has_versions + return (pkg.get('has_provides', False) and + not pkg.get('has_versions', False)) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index e99aba47..bc90804b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -25,6 +25,10 @@ import six +from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( + _config_ini as config_ini +) + from charmhelpers.fetch import ( apt_install, filter_installed_packages, @@ -2244,3 +2248,151 @@ def __call__(self): self.use_fqdn_hint_cb() if self.use_fqdn_hint_cb else False) } return ctxt + + +def validate_ovs_use_veth(*args, **kwargs): + """Validate OVS use veth setting for dhcp agents + + The ovs_use_veth setting is considered immutable as it will break existing + deployments. Historically, we set ovs_use_veth=True in dhcp_agent.ini. It + turns out this is no longer necessary. Ideally, all new deployments would + have this set to False. + + This function validates that the config value does not conflict with + previously deployed settings in dhcp_agent.ini. + + See LP Bug#1831935 for details. + + :returns: Status state and message + :rtype: Union[(None, None), (string, string)] + """ + existing_ovs_use_veth = ( + DHCPAgentContext.get_existing_ovs_use_veth()) + config_ovs_use_veth = DHCPAgentContext.parse_ovs_use_veth() + + # Check settings are set and not None + if existing_ovs_use_veth is not None and config_ovs_use_veth is not None: + # Check for mismatch between existing config ini and juju config + if existing_ovs_use_veth != config_ovs_use_veth: + # Stop the line to avoid breakage + msg = ( + "The existing setting for dhcp_agent.ini ovs_use_veth, {}, " + "does not match the juju config setting, {}. This may lead to " + "VMs being unable to receive a DHCP IP. Either change the " + "juju config setting or dhcp agents may need to be recreated." + .format(existing_ovs_use_veth, config_ovs_use_veth)) + log(msg, ERROR) + return ( + "blocked", + "Mismatched existing and configured ovs-use-veth. See log.") + + # Everything is OK + return None, None + + +class DHCPAgentContext(OSContextGenerator): + + def __call__(self): + """Return the DHCPAGentContext. + + Return all DHCP Agent INI related configuration. + ovs unit is attached to (as a subordinate) and the 'dns_domain' from + the neutron-plugin-api relations (if one is set). + + :returns: Dictionary context + :rtype: Dict + """ + + ctxt = {} + dnsmasq_flags = config('dnsmasq-flags') + if dnsmasq_flags: + ctxt['dnsmasq_flags'] = config_flags_parser(dnsmasq_flags) + ctxt['dns_servers'] = config('dns-servers') + + neutron_api_settings = NeutronAPIContext()() + + ctxt['debug'] = config('debug') + ctxt['instance_mtu'] = config('instance-mtu') + ctxt['ovs_use_veth'] = self.get_ovs_use_veth() + + ctxt['enable_metadata_network'] = config('enable-metadata-network') + ctxt['enable_isolated_metadata'] = config('enable-isolated-metadata') + + if neutron_api_settings.get('dns_domain'): + ctxt['dns_domain'] = neutron_api_settings.get('dns_domain') + + # Override user supplied config for these plugins as these settings are + # mandatory + if config('plugin') in ['nvp', 'nsx', 'n1kv']: + ctxt['enable_metadata_network'] = True + ctxt['enable_isolated_metadata'] = True + + return ctxt + + @staticmethod + def get_existing_ovs_use_veth(): + """Return existing ovs_use_veth setting from dhcp_agent.ini. + + :returns: Boolean value of existing ovs_use_veth setting or None + :rtype: Optional[Bool] + """ + DHCP_AGENT_INI = "/etc/neutron/dhcp_agent.ini" + existing_ovs_use_veth = None + # If there is a dhcp_agent.ini file read the current setting + if os.path.isfile(DHCP_AGENT_INI): + # config_ini does the right thing and returns None if the setting is + # commented. + existing_ovs_use_veth = ( + config_ini(DHCP_AGENT_INI)["DEFAULT"].get("ovs_use_veth")) + # Convert to Bool if necessary + if isinstance(existing_ovs_use_veth, six.string_types): + return bool_from_string(existing_ovs_use_veth) + return existing_ovs_use_veth + + @staticmethod + def parse_ovs_use_veth(): + """Parse the ovs-use-veth config setting. + + Parse the string config setting for ovs-use-veth and return a boolean + or None. + + bool_from_string will raise a ValueError if the string is not falsy or + truthy. + + :raises: ValueError for invalid input + :returns: Boolean value of ovs-use-veth or None + :rtype: Optional[Bool] + """ + _config = config("ovs-use-veth") + # An unset parameter returns None. Just in case we will also check for + # an empty string: "". Ironically, (the problem we are trying to avoid) + # "False" returns True and "" returns False. + if _config is None or not _config: + # Return None + return + # bool_from_string handles many variations of true and false strings + # as well as upper and lowercases including: + # ['y', 'yes', 'true', 't', 'on', 'n', 'no', 'false', 'f', 'off'] + return bool_from_string(_config) + + def get_ovs_use_veth(self): + """Return correct ovs_use_veth setting for use in dhcp_agent.ini. + + Get the right value from config or existing dhcp_agent.ini file. + Existing has precedence. Attempt to default to "False" without + disrupting existing deployments. Handle existing deployments and + upgrades safely. See LP Bug#1831935 + + :returns: Value to use for ovs_use_veth setting + :rtype: Bool + """ + _existing = self.get_existing_ovs_use_veth() + if _existing is not None: + return _existing + + _config = self.parse_ovs_use_veth() + if _config is None: + # New better default + return False + else: + return _config diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 566404a0..161199c4 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -44,6 +44,7 @@ INFO, ERROR, related_units, + relation_get, relation_ids, relation_set, status_set, @@ -331,6 +332,10 @@ DEFAULT_LOOPBACK_SIZE = '5G' +DB_SERIES_UPGRADING_KEY = 'cluster-series-upgrading' + +DB_MAINTENANCE_KEYS = [DB_SERIES_UPGRADING_KEY] + class CompareOpenStackReleases(BasicStringComparator): """Provide comparisons of OpenStack releases. @@ -1912,3 +1917,33 @@ def set_db_initialised(): """ juju_log('Setting db-initialised to True', 'DEBUG') leader_set({'db-initialised': True}) + + +def is_db_maintenance_mode(relid=None): + """Check relation data from notifications of db in maintenance mode. + + :returns: Whether db has notified it is in maintenance mode. + :rtype: bool + """ + juju_log('Checking for maintenance notifications', 'DEBUG') + if relid: + r_ids = [relid] + else: + r_ids = relation_ids('shared-db') + rids_units = [(r, u) for r in r_ids for u in related_units(r)] + notifications = [] + for r_id, unit in rids_units: + settings = relation_get(unit=unit, rid=r_id) + for key, value in settings.items(): + if value and key in DB_MAINTENANCE_KEYS: + juju_log( + 'Unit: {}, Key: {}, Value: {}'.format(unit, key, value), + 'DEBUG') + try: + notifications.append(bool_from_string(value)) + except ValueError: + juju_log( + 'Could not discern bool from {}'.format(value), + 'WARN') + pass + return True in notifications diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index 104f91f1..929a75d7 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -38,6 +38,7 @@ import locale import os import subprocess +import sys class _container(dict): @@ -59,6 +60,13 @@ class Cache(object): def __init__(self, progress=None): pass + def __contains__(self, package): + try: + pkg = self.__getitem__(package) + return pkg is not None + except KeyError: + return False + def __getitem__(self, package): """Get information about a package from apt and dpkg databases. @@ -178,6 +186,28 @@ def _apt_cache_show(self, packages): return pkgs +class Config(_container): + def __init__(self): + super(Config, self).__init__(self._populate()) + + def _populate(self): + cfgs = {} + cmd = ['apt-config', 'dump'] + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + for line in output.splitlines(): + if not line.startswith("CommandLine"): + k, v = line.split(" ", 1) + cfgs[k] = v.strip(";").strip("\"") + + return cfgs + + +# Backwards compatibility with old apt_pkg module +sys.modules[__name__].config = Config() + + def init(): """Compability shim that does nothing.""" pass diff --git a/ceph-radosgw/hooks/charmhelpers/osplatform.py b/ceph-radosgw/hooks/charmhelpers/osplatform.py index d9a4d5c0..c7fd1363 100644 --- a/ceph-radosgw/hooks/charmhelpers/osplatform.py +++ b/ceph-radosgw/hooks/charmhelpers/osplatform.py @@ -20,6 +20,9 @@ def get_platform(): # Stock Python does not detect Ubuntu and instead returns debian. # Or at least it does in some build environments like Travis CI return "ubuntu" + elif "elementary" in current_platform: + # ElementaryOS fails to run tests locally without this. + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) diff --git a/ceph-radosgw/lib/ceph/utils.py b/ceph-radosgw/lib/ceph/utils.py index 1ec62d54..7c970784 100644 --- a/ceph-radosgw/lib/ceph/utils.py +++ b/ceph-radosgw/lib/ceph/utils.py @@ -80,12 +80,7 @@ PEON = 'peon' QUORUM = [LEADER, PEON] -if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': - btrfs_package = 'btrfs-progs' -else: - btrfs_package = 'btrfs-tools' - -PACKAGES = ['ceph', 'gdisk', btrfs_package, +PACKAGES = ['ceph', 'gdisk', 'radosgw', 'xfsprogs', 'lvm2', 'parted', 'smartmontools'] @@ -2917,7 +2912,13 @@ def determine_packages(): :returns: list of ceph packages """ - return PACKAGES + packages = PACKAGES.copy() + if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': + btrfs_package = 'btrfs-progs' + else: + btrfs_package = 'btrfs-tools' + packages.append(btrfs_package) + return packages def bootstrap_manager(): From 9565d5e0269d88fade0100bdd012dee900828a17 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Thu, 23 Jan 2020 15:04:41 -0500 Subject: [PATCH 1869/2699] Comprehensive review of ceph-osd README Review of README. Corrected doc URLs in actions.yaml. The trailing spaces on these lines are deliberate (forces a carriage return): 260 265 291 307 332 337 Change-Id: Ia61edbfcbf27bf9bc6b35a71793df39c7cb46907 --- ceph-osd/README.md | 435 +++++++++++++++++++++++++++--------------- ceph-osd/actions.yaml | 14 +- 2 files changed, 284 insertions(+), 165 deletions(-) diff --git a/ceph-osd/README.md b/ceph-osd/README.md index 84c84e9f..f5822ea1 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -1,243 +1,362 @@ -Overview -======== +# Overview -Ceph is a distributed storage and network file system designed to provide +[Ceph][ceph-upstream] is a unified, distributed storage system designed for excellent performance, reliability, and scalability. -This charm deploys additional Ceph OSD storage service units and should be -used in conjunction with the 'ceph-mon' charm to scale out the amount of -storage available in a Ceph cluster. +The ceph-osd charm deploys the Ceph object storage daemon (OSD) and manages its +volumes. It is used in conjunction with the [ceph-mon][ceph-mon-charm] charm. +Together, these charms can scale out the amount of storage available in a Ceph +cluster. -Usage -===== +# Usage -The charm also supports specification of the storage devices to use in the ceph -cluster:: +## Storage devices - osd-devices: - A list of devices that the charm will attempt to detect, initialise and - activate as ceph storage. +The list of all possible storage devices for the cluster is defined by the +`osd-devices` option (default value is `/dev/vdb`). Configuration is typically +provided via a YAML file, like `ceph-osd.yaml`. See the following examples: - If the charm detects pre-existing data on a device it will go into a - blocked state and the operator must resolve the situation utilizing the - `list-disks`, `zap-disk` and/or `blacklist-*` actions. - - This this can be a superset of the actual storage devices presented to - each service unit and can be changed post ceph-osd deployment using - `juju set`. - -For example:: +1. Block devices (regular) +```yaml ceph-osd: options: - osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde + osd-devices: /dev/vdb /dev/vdc /dev/vdd +``` -Example utilizing Juju storage:: +Each regular block device must be an absolute path to a device node. +2. Block devices (Juju storage) + +```yaml ceph-osd: storage: osd-devices: cinder,20G +``` + +See the [Juju documentation][juju-docs-storage] for guidance on implementing +Juju storage. + +3. Directory-backed OSDs + +```yaml + ceph-osd: + storage: + osd-devices: /var/tmp/osd-1 +``` + +> **Note**: OSD directories can no longer be created starting with Ceph + Nautilus. Existing OSD directories will continue to function after an upgrade + to Nautilus. + +The list defined by option `osd-devices` may affect newly added ceph-osd units +as well as existing units (the option may be modified after units have been +added). The charm will attempt to activate as Ceph storage any listed device +that is visible by the unit's underlying machine. To prevent the activation of +volumes on existing units the `blacklist-add-disk` action may be used. + +The configuration option is modified in the usual way. For instance, to have it +consist solely of devices '/dev/sdb' and '/dev/sdc': -Please refer to [Juju Storage Documentation](https://docs.jujucharms.com/devel/en/charms-storage) for details on support for various storage providers and cloud substrates. + juju config ceph-osd osd-devices='/dev/sdb /dev/sdc' -How to deploy:: +The charm will go into a blocked state (visible in `juju status` output) if it +detects pre-existing data on a device. In this case the operator can either +instruct the charm to ignore the disk (action `blacklist-add-disk`) or to have +it purge all data on the disk (action `zap-disk`). - juju deploy -n 3 ceph-osd - juju deploy ceph-mon --to lxd:0 - juju add-unit ceph-mon --to lxd:1 - juju add-unit ceph-mon --to lxd:2 +## Deployment + +A cloud with three MON nodes is a typical design whereas three OSD nodes are +considered the minimum. For example, to deploy a Ceph cluster consisting of +three OSDs and three MONs: + + juju deploy --config ceph-osd.yaml -n 3 ceph-osd + juju deploy --to lxd:0 ceph-mon + juju add-unit --to lxd:1 ceph-mon + juju add-unit --to lxd:2 ceph-mon juju add-relation ceph-osd ceph-mon -Once the 'ceph-mon' charm has bootstrapped the cluster, it will notify the -ceph-osd charm which will scan for the configured storage devices and add them -to the pool of available storage. +Here, a containerised MON is running alongside each OSD. + +> **Note**: Refer to the [Install OpenStack][cdg-install-openstack] page in the + OpenStack Charms Deployment Guide for instructions on installing the ceph-osd + application for use with OpenStack. -Network Space support -===================== +For each ceph-osd unit, the ceph-osd charm will scan for all the devices +configured via the `osd-devices` option and attempt to assign to it all the +ones it finds. The cluster's initial pool of available storage is the "sum" of +all these assigned devices. -This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above. +## Network spaces -Network traffic can be bound to specific network spaces using the public (front-side) and cluster (back-side) bindings: +This charm supports the use of Juju [network spaces][juju-docs-spaces] (Juju +`v.2.0`). This feature optionally allows specific types of the application's +network traffic to be bound to subnets that the underlying hardware is +connected to. - juju deploy ceph-osd --bind "public=data-space cluster=cluster-space" +> **Note**: Spaces must be configured in the backing cloud prior to deployment. -alternatively these can also be provided as part of a Juju native bundle configuration: +The ceph-osd charm exposes the following Ceph traffic types (bindings): +- 'public' (front-side) +- 'cluster' (back-side) + +For example, providing that spaces 'data-space' and 'cluster-space' exist, the +deploy command above could look like this: + + juju deploy --config ceph-osd.yaml -n 3 ceph-osd \ + --bind "public=data-space cluster=cluster-space" + +Alternatively, configuration can be provided as part of a bundle: + +```yaml ceph-osd: - charm: cs:xenial/ceph-osd + charm: cs:ceph-osd num_units: 1 bindings: public: data-space cluster: cluster-space +``` -Please refer to the [Ceph Network Reference](http://docs.ceph.com/docs/master/rados/configuration/network-config-ref) for details on how using these options effects network traffic within a Ceph deployment. +Refer to the [Ceph Network Reference][ceph-docs-network-ref] to learn about the +implications of segregating Ceph network traffic. -**NOTE:** Spaces must be configured in the underlying provider prior to attempting to use them. +> **Note**: Existing ceph-osd units configured with the `ceph-public-network` + or `ceph-cluster-network` options will continue to honour them. Furthermore, + these options override any space bindings, if set. -**NOTE**: Existing deployments using ceph-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. +## AppArmor profiles -AppArmor Profiles -================= +Although AppArmor is not enabled for Ceph by default, an AppArmor profile can +be generated by the charm by assigning a value of 'complain', 'enforce', or +'disable' (the default) to option `aa-profile-mode`. -AppArmor is not enforced for Ceph by default. An AppArmor profile can be generated by the charm. However, great care must be taken. +> **Caution**: Enabling an AppArmor profile is disruptive to a running Ceph + cluster as all ceph-osd processes must be restarted. -Changing the value of the ```aa-profile-mode``` option is disruptive to a running Ceph cluster as all ceph-osd processes must be restarted as part of changing the AppArmor profile enforcement mode. +The new profile has a narrow supported use case, and it should always be +verified in pre-production against the specific configurations and topologies +intended for production. -The generated AppArmor profile currently has a narrow supported use case, and it should always be verified in pre-production against the specific configurations and topologies intended for production. +The profiles generated by the charm should **not** be used in the following +scenarios: -The AppArmor profile(s) which are generated by the charm should NOT yet be used in the following scenarios: - - When there are separate journal devices. - - On any version of Ceph prior to Luminous. - - On any version of Ubuntu other than 16.04. - - With Bluestore enabled. +- On any version of Ubuntu older than 16.04 +- On any version of Ceph older than Luminous +- When OSD journal devices are in use +- When Ceph BlueStore is enabled +## Block device encryption -Block Device Encryption -======================= - -The ceph-osd charm supports encryption of underlying block devices supporting OSD's. - -To use the 'native' key management approach (where dm-crypt keys are stored in the -ceph-mon cluster), simply set the 'osd-encrypt' configuration option:: +The ceph-osd charm supports encryption for OSD volumes that are backed by block +devices. To use Ceph's native key management framework, available since Ceph +Jewel, set option `osd-encrypt` for the ceph-osd charm: +```yaml ceph-osd: options: osd-encrypt: True +``` -**NOTE:** This is supported for Ceph Jewel or later. +Here, dm-crypt keys are stored in the MON sub-cluster. -Alternatively, encryption keys can be stored in Vault; this requires deployment of -the vault charm (and associated initialization of vault - see the Vault charm for -details) and configuration of the 'osd-encrypt' and 'osd-encrypt-keymanager' -options:: +Alternatively, since Ceph Luminous, encryption keys can be stored in Vault, +which is deployed and initialised via the [vault][vault-charm] charm. Set +options `osd-encrypt` and `osd-encrypt-keymanager` for the ceph-osd charm: +```yaml ceph-osd: options: osd-encrypt: True osd-encrypt-keymanager: vault +``` + +> **Important**: Post deployment configuration will only affect block devices + associated with **new** ceph-osd units. + +## Actions + +This section covers Juju [actions][juju-docs-actions] supported by the charm. +Actions allow specific operations to be performed on a per-unit basis. + +### osd-out + +Set as 'out' all OSD volumes on a unit. + +> **Warning**: This action has the potential of impacting your cluster + significantly. The [Ceph documentation][ceph-docs-removing-osds] on this + topic is considered essential reading. + +The `osd-out` action sets **all** OSDs on the unit as 'out'. Unless the cluster +itself is set to 'noout' this action will cause Ceph to rebalance data by +migrating PGs out of the unit's OSDs and onto OSDs available on other units. +The impact is twofold: + +1. The available space on the remaining OSDs is reduced. Not only is there less + space for future workloads but there is a danger of exceeding the cluster's + storage capacity. +1. The traffic and CPU load on the cluster is increased. + +> **Note**: It has been reported that setting OSDs as 'out' may cause some PGs + to get stuck in the 'active+remapped' state. This is an upstream issue. + +The [ceph-mon][ceph-mon-charm] charm has an action called `set-noout` that sets +'noout' for the cluster. + +It may be perfectly fine to have data rebalanced. The decisive factor is +whether the OSDs are being paused temporarily (e.g. the underlying machine is +scheduled for maintenance) or whether they are being removed from the cluster +completely (e.g. the storage hardware is reaching EOL). + +Example: + + juju run-action --wait ceph-osd/4 osd-out + +### osd-in + +Set as 'in' all OSD volumes on a unit. + +The `osd-in` action is reciprocal to the `osd-out` action. The OSDs are set to +'in'. It is typically used when the `osd-out` action was used in conjunction +with the cluster 'noout' flag. + +Example: + + juju run-action --wait ceph-osd/4 osd-in + +### list-disks + +List disks known to a unit. + +The `list-disks` action lists the unit's block devices by categorising them in +three ways: + +- `disks`: visible (known by udev), unused (not mounted), and not designated as + an OSD journal (via the `osd-journal` configuration option) + +- `blacklist`: like `disks` but blacklisted (see action `blacklist-add-disk`) + +- `non-pristine`: like `disks` but not eligible for use due to the presence of + existing data + +Example: + + juju run-action --wait ceph-osd/4 list-disks + +### add-disk + +Add a disk to a unit. + +A ceph-osd unit is automatically assigned OSD volumes based on the current +value of the `osd-devices` application option. The `add-disk` action allows the +operator to manually add OSD volumes (for disks that are not listed by +`osd-devices`) to an existing unit. + +**Parameters** + + + +- `osd-devices` (required) + A space-separated list of devices to format and initialise as OSD volumes. + + + +- `bucket` + The name of a Ceph bucket to add these devices to. + +Example: + + juju run-action --wait ceph-osd/4 add-disk osd-devices=/dev/vde -**NOTE:** This option is only supported with Ceph Luminous or later. +### blacklist-add-disk -**NOTE:** Changing these options post deployment will only take effect for any -new block devices added to the ceph-osd application; existing OSD devices will -not be encrypted. +Add a disk to a unit's blacklist. -Actions -======= -The charm offers [actions](https://docs.jujucharms.com/devel/en/actions) which -may be used to perform operational tasks on individual units. +The `blacklist-add-disk` action allows the operator to add disks (that are +visible to the unit's underlying machine) to the unit's blacklist. A +blacklisted device will not be initialised as an OSD volume when the value of +the `osd-devices` application option changes. This action does not prevent a +device from being activated via the `add-disk` action. -pause ------ -**USE WITH CAUTION** - Set the local osd units in the charm to 'out' but -does not stop the osds. Unless the osd cluster is set to noout (see below), -this removes them from the ceph cluster and forces ceph to migrate the PGs -to other OSDs in the cluster. +Use the `list-disks` action to list the unit's blacklist entries. -From [upstream documentation](http://docs.ceph.com/docs/master/rados/operations/add-or-rm-osds/#removing-the-osd) -"Do not let your cluster reach its full ratio when removing an OSD. - Removing OSDs could cause the cluster to reach or exceed its full ratio." +> **Important**: This action and blacklist do not have any effect on current + OSD volumes. -Also note that for small clusters you may encounter the corner case where -some PGs remain stuck in the active+remapped state. Refer to the above link -on how to resolve this. +**Parameters** -`pause-health` (on a ceph-mon) unit can be used before pausing a ceph-osd -unit to stop the cluster rebalancing the data off this ceph-osd unit. -`pause-health` sets 'noout' on the cluster such that it will not try to -rebalance the data accross the remaining units. + -It is up to the user of the charm to determine whether pause-health should -be used as it depends on whether the osd is being paused for maintenance or -to remove it from the cluster completely. +- `osd-devices` (required) + A space-separated list of devices to add to a unit's blacklist. -**NOTE** the `pause` action does NOT stop the ceph-osd processes. +Example: -resume ------- -Set the local osd units in the charm to 'in'. + juju run-action --wait ceph-osd/0 \ + blacklist-add-disk osd-devices='/dev/vda /dev/vdf' +### blacklist-remove-disk -list-disks ----------- -List disks +Remove a disk from a unit's blacklist. -The 'disks' key is populated with block devices that are known by udev, -are not mounted and not mentioned in 'osd-journal' configuration option. +**Parameters** -The 'blacklist' key is populated with osd-devices in the blacklist stored -in the local kv store of this specific unit. + -The 'non-pristine' key is populated with block devices that are known by -udev, are not mounted, not mentioned in 'osd-journal' configuration option -and are currently not eligible for use because of presence of foreign data. +- `osd-devices` (required) + A space-separated list of devices to remove from a unit's blacklist. -add-disk --------- -Add disk(s) to Ceph +Each device should have an existing entry in the unit's blacklist. Use the +`list-disks` action to list the unit's blacklist entries. -#### Parameters -- `osd-devices` (required) - - The devices to format and set up as osd volumes. -- `bucket` - - The name of the bucket in Ceph to add these devices into +Example: -blacklist-add-disk ------------------- -Add disk(s) to blacklist. Blacklisted disks will not be -initialized for use with Ceph even if listed in the application -level osd-devices configuration option. + juju run-action --wait ceph-osd/1 \ + blacklist-remove-disk osd-devices=/dev/vdb -The current blacklist can be viewed with list-disks action. +### zap-disk -**NOTE** This action and blacklist will not have any effect on -already initialized disks. +Purge a unit's disk of all data. -#### Parameters -- `osd-devices` (required) - - A space-separated list of devices to add to blacklist. +In order to prevent unintentional data loss, the charm will not use a disk that +has existing data already on it. To forcibly make a disk available, the +`zap-disk` action can be used. Due to the destructive nature of this action the +`i-really-mean-it` option must be passed. This action is normally followed by +the `add-disk` action. - Each element should be a absolute path to a device node or filesystem - directory (the latter is supported for ceph >= 0.56.6). +**Parameters** - Example: '/dev/vdb /var/tmp/test-osd' + -blacklist-remove-disk ---------------------- -Remove disk(s) from blacklist. +- `devices` (required) + A space-separated list of devices to be recycled. -#### Parameters -- `osd-devices` (required) - - A space-separated list of devices to remove from blacklist. + - Each element should be a existing entry in the units blacklist. - Use list-disks action to list current blacklist entries. +- `i-really-mean-it` (required) + An option that acts as a confirmation for performing the action. - Example: '/dev/vdb /var/tmp/test-osd' +Example: -zap-disk --------- -Purge disk of all data and signatures for use by Ceph + juju run-action --wait ceph-osd/3 zap-disk i-really-mean-it devices=/dev/vdc -This action can be necessary in cases where a Ceph cluster is being -redeployed as the charm defaults to skipping disks that look like Ceph -devices in order to preserve data. In order to forcibly redeploy, the -admin is required to perform this action for each disk to be re-consumed. +# Bugs -In addition to triggering this action, it is required to pass an additional -parameter option of `i-really-mean-it` to ensure that the -administrator is aware that this *will* cause data loss on the specified -device(s) +Please report bugs on [Launchpad][lp-bugs-charm-ceph-osd]. -#### Parameters -- `devices` (required) - - A space-separated list of devices to remove the partition table from. -- `i-really-mean-it` (required) - - This must be toggled to enable actually performing this action +For general charm questions refer to the OpenStack [Charm Guide][cg]. -Contact Information -=================== + -Author: James Page -Report bugs at: http://bugs.launchpad.net/charm-ceph-osd/+filebug -Location: http://jujucharms.com/ceph-osd +[ceph-upstream]: https://ceph.io +[cg]: https://docs.openstack.org/charm-guide +[ceph-mon-charm]: https://jaas.ai/ceph-mon +[vault-charm]: https://jaas.ai/vault +[juju-docs-storage]: https://jaas.ai/docs/storage +[juju-docs-actions]: https://jaas.ai/docs/actions +[juju-docs-spaces]: https://jaas.ai/docs/spaces +[ceph-docs-removing-osds]: https://docs.ceph.com/docs/master/rados/operations/add-or-rm-osds/ +[ceph-docs-network-ref]: http://docs.ceph.com/docs/master/rados/configuration/network-config-ref +[lp-bugs-charm-ceph-osd]: https://bugs.launchpad.net/charm-ceph-osd/+filebug +[cdg-install-openstack]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/install-openstack.html diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 5302bac4..880a922d 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -8,22 +8,22 @@ osd-out: description: | \ USE WITH CAUTION - Mark unit OSDs as 'out'. - Documentation: https://jujucharms.com/ceph-osd/ + Documentation: https://jaas.ai/ceph-osd/ osd-in: description: | \ Set the local osd units in the charm to 'in'. - Documentation: https://jujucharms.com/ceph-osd/ + Documentation: https://jaas.ai/ceph-osd/ list-disks: description: | \ List disks. - Documentation: https://jujucharms.com/ceph-osd/ + Documentation: https://jaas.ai/ceph-osd/ add-disk: description: | \ Add disk(s) to Ceph. - Documentation: https://jujucharms.com/ceph-osd/ + Documentation: https://jaas.ai/ceph-osd/ params: osd-devices: type: string @@ -37,7 +37,7 @@ blacklist-add-disk: description: | \ Add disk(s) to blacklist. - Documentation: https://jujucharms.com/ceph-osd/ + Documentation: https://jaas.ai/ceph-osd/ params: osd-devices: type: string @@ -54,7 +54,7 @@ blacklist-remove-disk: description: | \ Remove disk(s) from blacklist. - Documentation: https://jujucharms.com/ceph-osd/ + Documentation: https://jaas.ai/ceph-osd/ params: osd-devices: type: string @@ -71,7 +71,7 @@ zap-disk: description: | \ USE WITH CAUTION - Purge disk of all data and signatures for use by Ceph. - Documentation: https://jujucharms.com/ceph-osd/ + Documentation: https://jaas.ai/ceph-osd/ params: devices: type: string From 87e2e716ab7f35391f84509a1fd8686716a84f7c Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 4 Feb 2020 16:38:24 +0000 Subject: [PATCH 1870/2699] Charmhelper sync for 20.02 Change-Id: Ib2e4cb4f12c1d0868af73c8b690b7cd5ebff76c2 --- .../contrib/hardening/audits/apt.py | 5 +-- .../charmhelpers/contrib/openstack/utils.py | 35 +++++++++++++++++++ .../charmhelpers/fetch/ubuntu_apt_pkg.py | 30 ++++++++++++++++ ceph-proxy/charmhelpers/osplatform.py | 3 ++ ceph-proxy/metadata.yaml | 1 - ceph-proxy/tests/tests.yaml | 1 - 6 files changed, 71 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/charmhelpers/contrib/hardening/audits/apt.py b/ceph-proxy/charmhelpers/contrib/hardening/audits/apt.py index 67521e17..cad7bf73 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-proxy/charmhelpers/contrib/hardening/audits/apt.py @@ -52,7 +52,7 @@ class RestrictedPackages(BaseAudit): def __init__(self, pkgs, **kwargs): super(RestrictedPackages, self).__init__(**kwargs) if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): - self.pkgs = [pkgs] + self.pkgs = pkgs.split() else: self.pkgs = pkgs @@ -100,4 +100,5 @@ def delete_package(self, cache, pkg): apt_purge(pkg.name) def is_virtual_package(self, pkg): - return pkg.has_provides and not pkg.has_versions + return (pkg.get('has_provides', False) and + not pkg.get('has_versions', False)) diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index 566404a0..161199c4 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -44,6 +44,7 @@ INFO, ERROR, related_units, + relation_get, relation_ids, relation_set, status_set, @@ -331,6 +332,10 @@ DEFAULT_LOOPBACK_SIZE = '5G' +DB_SERIES_UPGRADING_KEY = 'cluster-series-upgrading' + +DB_MAINTENANCE_KEYS = [DB_SERIES_UPGRADING_KEY] + class CompareOpenStackReleases(BasicStringComparator): """Provide comparisons of OpenStack releases. @@ -1912,3 +1917,33 @@ def set_db_initialised(): """ juju_log('Setting db-initialised to True', 'DEBUG') leader_set({'db-initialised': True}) + + +def is_db_maintenance_mode(relid=None): + """Check relation data from notifications of db in maintenance mode. + + :returns: Whether db has notified it is in maintenance mode. + :rtype: bool + """ + juju_log('Checking for maintenance notifications', 'DEBUG') + if relid: + r_ids = [relid] + else: + r_ids = relation_ids('shared-db') + rids_units = [(r, u) for r in r_ids for u in related_units(r)] + notifications = [] + for r_id, unit in rids_units: + settings = relation_get(unit=unit, rid=r_id) + for key, value in settings.items(): + if value and key in DB_MAINTENANCE_KEYS: + juju_log( + 'Unit: {}, Key: {}, Value: {}'.format(unit, key, value), + 'DEBUG') + try: + notifications.append(bool_from_string(value)) + except ValueError: + juju_log( + 'Could not discern bool from {}'.format(value), + 'WARN') + pass + return True in notifications diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py index 104f91f1..929a75d7 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -38,6 +38,7 @@ import locale import os import subprocess +import sys class _container(dict): @@ -59,6 +60,13 @@ class Cache(object): def __init__(self, progress=None): pass + def __contains__(self, package): + try: + pkg = self.__getitem__(package) + return pkg is not None + except KeyError: + return False + def __getitem__(self, package): """Get information about a package from apt and dpkg databases. @@ -178,6 +186,28 @@ def _apt_cache_show(self, packages): return pkgs +class Config(_container): + def __init__(self): + super(Config, self).__init__(self._populate()) + + def _populate(self): + cfgs = {} + cmd = ['apt-config', 'dump'] + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + for line in output.splitlines(): + if not line.startswith("CommandLine"): + k, v = line.split(" ", 1) + cfgs[k] = v.strip(";").strip("\"") + + return cfgs + + +# Backwards compatibility with old apt_pkg module +sys.modules[__name__].config = Config() + + def init(): """Compability shim that does nothing.""" pass diff --git a/ceph-proxy/charmhelpers/osplatform.py b/ceph-proxy/charmhelpers/osplatform.py index d9a4d5c0..c7fd1363 100644 --- a/ceph-proxy/charmhelpers/osplatform.py +++ b/ceph-proxy/charmhelpers/osplatform.py @@ -20,6 +20,9 @@ def get_platform(): # Stock Python does not detect Ubuntu and instead returns debian. # Or at least it does in some build environments like Travis CI return "ubuntu" + elif "elementary" in current_platform: + # ElementaryOS fails to run tests locally without this. + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index e84ab9b5..de628f67 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -12,7 +12,6 @@ tags: series: - xenial - bionic - - disco - eoan - trusty extra-bindings: diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 841465e5..b7448ec6 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -13,7 +13,6 @@ gate_bundles: - bionic-rocky # mimic - bionic-stein - bionic-train - - disco-stein dev_bundles: # Icehouse - trusty-icehouse From 7969884710bf4343df8a6caf971626e773ba08d7 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Wed, 5 Feb 2020 11:10:49 -0500 Subject: [PATCH 1871/2699] Fix typo in bundle yaml Change-Id: I5efe8ebc21a2726d13c1c26fd5bbf511b0b6ac64 --- ceph-mon/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 6ddb7d34..83828f19 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -58,7 +58,7 @@ deploy command above could look like this: Alternatively, configuration can be provided as part of a bundle: ```yaml - ceph-osd: + ceph-mon: charm: cs:ceph-mon num_units: 1 bindings: From 1fa35a31eef2420cdd32bbc8bc0fcf31d9640d01 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 5 Feb 2020 16:52:34 +0000 Subject: [PATCH 1872/2699] Rebuild for 20.02 release Change-Id: Ie4f42115d1a8f8ea19b24df2adae71e4df3a2d77 --- ceph-fs/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index 862a4081..d9293a5b 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -6a4e8d66-b54e-11e9-9e21-27ae1864b062 +d1c16faa-4837-11ea-810e-97e1f362d969 From 7f707f1469dcd3bacc61c42d9c6d9f65deb34516 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 5 Feb 2020 16:52:41 +0000 Subject: [PATCH 1873/2699] Rebuild for 20.02 release Remove Disco Change-Id: I77c66884fd47508b46eba7152cffb644e4935204 --- ceph-rbd-mirror/rebuild | 2 +- ceph-rbd-mirror/src/metadata.yaml | 1 - ceph-rbd-mirror/src/tests/tests.yaml | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/ceph-rbd-mirror/rebuild b/ceph-rbd-mirror/rebuild index 862a4081..d9293a5b 100644 --- a/ceph-rbd-mirror/rebuild +++ b/ceph-rbd-mirror/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -6a4e8d66-b54e-11e9-9e21-27ae1864b062 +d1c16faa-4837-11ea-810e-97e1f362d969 diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index e8776293..7de422e9 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -17,7 +17,6 @@ tags: series: - xenial - bionic - - disco - eoan extra-bindings: public: diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index a9bceb8d..f62a849b 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -2,7 +2,6 @@ charm_name: ceph-rbd-mirror smoke_bundles: - bionic-stein gate_bundles: -- disco-stein - bionic-stein - bionic-rocky - bionic-queens From 3405c20c922ef2205459697c9414921437583a5b Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 12 Feb 2020 11:26:46 +0100 Subject: [PATCH 1874/2699] Ensure that we define `unit` before use When ceph-proxy is configured prior to being related to clients, it is possible for the units to go into a hook error because of an undefined variable. This change ensures that we do correctly define the unit before we use it. Change-Id: Ic6e28783bde4fc342d7c5ef1d733e69a03b702fe Closes-Bug: #1862487 --- ceph-proxy/hooks/ceph_hooks.py | 6 ++++-- ceph-proxy/unit_tests/test_ceph_hooks.py | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index a637ff69..53568cd9 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -146,7 +146,8 @@ def notify_radosgws(): def notify_client(): for relid in relation_ids('client'): - client_relation_joined(relid) + for unit in related_units(relid): + client_relation_joined(relid=relid, unit=unit) @hooks.hook('radosgw-relation-changed') @@ -195,7 +196,8 @@ def client_relation_joined(relid=None, unit=None): units = related_units(relid) if len(units) > 0: service_name = units[0].split('/')[0] - + if unit is None: + unit = units[0] if service_name is not None: ceph_addrs = config('monitor-hosts') data = {'key': ceph.get_named_key(service_name), diff --git a/ceph-proxy/unit_tests/test_ceph_hooks.py b/ceph-proxy/unit_tests/test_ceph_hooks.py index e6b4f089..63dccdb5 100644 --- a/ceph-proxy/unit_tests/test_ceph_hooks.py +++ b/ceph-proxy/unit_tests/test_ceph_hooks.py @@ -87,7 +87,9 @@ def test_emit_cephconf(self, mock_client_rel, mock_rgw_rel, def c(k): x = {'radosgw': ['rados:1'], 'client': ['client:1'], - 'rados:1': ['rados/1']} + 'rados:1': ['rados/1'], + 'client:1': ['client/1'], + } return x[k] self.relation_ids.side_effect = c @@ -124,7 +126,7 @@ def c(k): context, owner='ceph-user', perms=0o600) mock_rgw_rel.assert_called_with(relid='rados:1', unit='rados/1') - mock_client_rel.assert_called_with('client:1') + mock_client_rel.assert_called_with(relid='client:1', unit='client/1') @mock.patch.object(hooks.ceph, 'ceph_user') @mock.patch('subprocess.check_output') From 8b2941ab5f14e7ebbf01b215256b88779cc05f2c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 7 Feb 2020 15:15:04 +0100 Subject: [PATCH 1875/2699] Add a new functional test to CephFS Change-Id: I03dab145e96dbabf91a8d76bbf53464d6b6ec63c func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/175 --- ceph-fs/src/tests/bundles/bionic-queens.yaml | 65 +++++++++----- ceph-fs/src/tests/bundles/bionic-rocky.yaml | 68 ++++++++------ ceph-fs/src/tests/bundles/bionic-stein.yaml | 94 +++++++++++--------- ceph-fs/src/tests/bundles/bionic-train.yaml | 90 +++++++++++-------- ceph-fs/src/tests/bundles/cosmic-rocky.yaml | 67 ++++++++------ ceph-fs/src/tests/bundles/disco-stein.yaml | 67 ++++++++------ ceph-fs/src/tests/bundles/xenial-mitaka.yaml | 67 ++++++++------ ceph-fs/src/tests/bundles/xenial-ocata.yaml | 70 +++++++++------ ceph-fs/src/tests/bundles/xenial-pike.yaml | 78 +++++++++------- ceph-fs/src/tests/bundles/xenial-queens.yaml | 70 +++++++++------ ceph-fs/src/tests/tests.yaml | 13 +-- 11 files changed, 455 insertions(+), 294 deletions(-) diff --git a/ceph-fs/src/tests/bundles/bionic-queens.yaml b/ceph-fs/src/tests/bundles/bionic-queens.yaml index 74640d30..fc6301f2 100644 --- a/ceph-fs/src/tests/bundles/bionic-queens.yaml +++ b/ceph-fs/src/tests/bundles/bionic-queens.yaml @@ -16,7 +16,6 @@ applications: num_units: 3 options: monitor-count: '3' - auth-supported: 'none' percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 @@ -30,26 +29,38 @@ applications: expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 glance: expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G options: - block-device: 'None' - glance-api-version: '2' - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh nova-cloud-controller: expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 + options: + network-manager: Neutron + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + flat-network-providers: physnet1 + neutron-security-groups: true + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex relations: - - ceph-mon:mds - ceph-fs:ceph-mds @@ -69,18 +80,6 @@ relations: - rabbitmq-server:amqp - - glance:ceph - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client - - ceph-osd:mon - ceph-mon:osd - - nova-cloud-controller:shared-db @@ -93,3 +92,21 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - 'neutron-api:shared-db' + - 'percona-cluster:shared-db' +- - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' +- - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' +- - 'neutron-api:identity-service' + - 'keystone:identity-service' +- - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' +- - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' +- - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' \ No newline at end of file diff --git a/ceph-fs/src/tests/bundles/bionic-rocky.yaml b/ceph-fs/src/tests/bundles/bionic-rocky.yaml index 376deaaa..90c488c6 100644 --- a/ceph-fs/src/tests/bundles/bionic-rocky.yaml +++ b/ceph-fs/src/tests/bundles/bionic-rocky.yaml @@ -19,7 +19,6 @@ applications: num_units: 3 options: monitor-count: '3' - auth-supported: 'none' source: cloud:bionic-rocky percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster @@ -39,32 +38,43 @@ applications: num_units: 1 options: openstack-origin: cloud:bionic-rocky - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky glance: expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 options: openstack-origin: cloud:bionic-rocky - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G options: - block-device: 'None' - glance-api-version: '2' + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh openstack-origin: cloud:bionic-rocky - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 options: + network-manager: Neutron + openstack-origin: cloud:bionic-rocky + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: cloud:bionic-rocky + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex openstack-origin: cloud:bionic-rocky relations: - - ceph-mon:mds @@ -85,18 +95,6 @@ relations: - rabbitmq-server:amqp - - glance:ceph - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client - - ceph-osd:mon - ceph-mon:osd - - nova-cloud-controller:shared-db @@ -109,3 +107,21 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - 'neutron-api:shared-db' + - 'percona-cluster:shared-db' +- - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' +- - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' +- - 'neutron-api:identity-service' + - 'keystone:identity-service' +- - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' +- - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' +- - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' \ No newline at end of file diff --git a/ceph-fs/src/tests/bundles/bionic-stein.yaml b/ceph-fs/src/tests/bundles/bionic-stein.yaml index fc02435f..3b05d7c6 100644 --- a/ceph-fs/src/tests/bundles/bionic-stein.yaml +++ b/ceph-fs/src/tests/bundles/bionic-stein.yaml @@ -1,71 +1,79 @@ series: bionic +options: + source: &source cloud:bionic-stein applications: ceph-fs: charm: ceph-fs series: bionic num_units: 1 options: - source: cloud:bionic-stein + source: *source ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 storage: osd-devices: 'cinder,10G' options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:bionic-stein + source: *source ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: - monitor-count: '3' - auth-supported: 'none' - source: cloud:bionic-stein + source: *source percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 options: - dataset-size: 25% - max-connections: 1000 - source: cloud:bionic-stein + source: *source rabbitmq-server: charm: cs:~openstack-charmers-next/rabbitmq-server num_units: 1 options: - source: cloud:bionic-stein + source: *source keystone: expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 options: - openstack-origin: cloud:bionic-stein - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-stein + openstack-origin: *source glance: expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 options: - openstack-origin: cloud:bionic-stein - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:bionic-stein - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + openstack-origin: *source nova-cloud-controller: expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 options: - openstack-origin: cloud:bionic-stein + network-manager: Neutron + openstack-origin: *source + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + constraints: mem=8G root-disk=60G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: *source + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: *source + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex + openstack-origin: *source relations: - - ceph-mon:mds - ceph-fs:ceph-mds @@ -85,18 +93,6 @@ relations: - rabbitmq-server:amqp - - glance:ceph - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client - - ceph-osd:mon - ceph-mon:osd - - nova-cloud-controller:shared-db @@ -109,3 +105,21 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - 'neutron-api:shared-db' + - 'percona-cluster:shared-db' +- - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' +- - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' +- - 'neutron-api:identity-service' + - 'keystone:identity-service' +- - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' +- - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' +- - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/bionic-train.yaml b/ceph-fs/src/tests/bundles/bionic-train.yaml index 26eeaae7..b81fe488 100644 --- a/ceph-fs/src/tests/bundles/bionic-train.yaml +++ b/ceph-fs/src/tests/bundles/bionic-train.yaml @@ -5,7 +5,7 @@ applications: series: bionic num_units: 1 options: - source: cloud:bionic-train/proposed + source: cloud:bionic-train ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 @@ -13,63 +13,75 @@ applications: osd-devices: 'cinder,10G' options: osd-devices: '/dev/test-non-existent' - source: cloud:bionic-train/proposed + source: cloud:bionic-train ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: monitor-count: '3' - source: cloud:bionic-train/proposed + source: cloud:bionic-train percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 options: dataset-size: 25% max-connections: 1000 - source: cloud:bionic-train/proposed + source: cloud:bionic-train rabbitmq-server: charm: cs:~openstack-charmers-next/rabbitmq-server num_units: 1 options: - source: cloud:bionic-train/proposed + source: cloud:bionic-train keystone: expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 options: - openstack-origin: cloud:bionic-train/proposed - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-train/proposed + openstack-origin: cloud:bionic-train glance: expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 options: - openstack-origin: cloud:bionic-train/proposed - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:bionic-train/proposed - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + openstack-origin: cloud:bionic-train nova-cloud-controller: expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 + + options: + network-manager: Neutron + openstack-origin: cloud:bionic-train + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G options: - openstack-origin: cloud:bionic-train/proposed + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: cloud:bionic-train placement: charm: cs:~openstack-charmers-next/placement num_units: 1 options: openstack-origin: cloud:bionic-train + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: cloud:bionic-train + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex + openstack-origin: cloud:bionic-train relations: - - ceph-mon:mds - ceph-fs:ceph-mds @@ -89,18 +101,6 @@ relations: - rabbitmq-server:amqp - - glance:ceph - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client - - ceph-osd:mon - ceph-mon:osd - - nova-cloud-controller:shared-db @@ -118,4 +118,22 @@ relations: - - placement - keystone - - placement - - nova-cloud-controller \ No newline at end of file + - nova-cloud-controller +- - 'neutron-api:shared-db' + - 'percona-cluster:shared-db' +- - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' +- - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' +- - 'neutron-api:identity-service' + - 'keystone:identity-service' +- - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' +- - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' +- - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' \ No newline at end of file diff --git a/ceph-fs/src/tests/bundles/cosmic-rocky.yaml b/ceph-fs/src/tests/bundles/cosmic-rocky.yaml index 6fa4f308..4cd23d67 100644 --- a/ceph-fs/src/tests/bundles/cosmic-rocky.yaml +++ b/ceph-fs/src/tests/bundles/cosmic-rocky.yaml @@ -16,7 +16,6 @@ applications: num_units: 3 options: monitor-count: '3' - auth-supported: 'none' percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 @@ -30,26 +29,38 @@ applications: expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 glance: expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 + options: + network-manager: Neutron + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + flat-network-providers: physnet1 + neutron-security-groups: true + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex relations: - - ceph-mon:mds - ceph-fs:ceph-mds @@ -69,18 +80,6 @@ relations: - rabbitmq-server:amqp - - glance:ceph - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client - - ceph-osd:mon - ceph-mon:osd - - nova-cloud-controller:shared-db @@ -93,3 +92,21 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - 'neutron-api:shared-db' + - 'percona-cluster:shared-db' +- - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' +- - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' +- - 'neutron-api:identity-service' + - 'keystone:identity-service' +- - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' +- - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' +- - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/disco-stein.yaml b/ceph-fs/src/tests/bundles/disco-stein.yaml index 5a2add22..97974817 100644 --- a/ceph-fs/src/tests/bundles/disco-stein.yaml +++ b/ceph-fs/src/tests/bundles/disco-stein.yaml @@ -16,7 +16,6 @@ applications: num_units: 3 options: monitor-count: '3' - auth-supported: 'none' percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 @@ -30,26 +29,38 @@ applications: expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 glance: expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 + options: + network-manager: Neutron + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + flat-network-providers: physnet1 + neutron-security-groups: true + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex relations: - - ceph-mon:mds - ceph-fs:ceph-mds @@ -69,18 +80,6 @@ relations: - rabbitmq-server:amqp - - glance:ceph - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client - - ceph-osd:mon - ceph-mon:osd - - nova-cloud-controller:shared-db @@ -93,3 +92,21 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - 'neutron-api:shared-db' + - 'percona-cluster:shared-db' +- - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' +- - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' +- - 'neutron-api:identity-service' + - 'keystone:identity-service' +- - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' +- - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' +- - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/xenial-mitaka.yaml b/ceph-fs/src/tests/bundles/xenial-mitaka.yaml index 81569cea..71486c14 100644 --- a/ceph-fs/src/tests/bundles/xenial-mitaka.yaml +++ b/ceph-fs/src/tests/bundles/xenial-mitaka.yaml @@ -16,7 +16,6 @@ applications: num_units: 3 options: monitor-count: '3' - auth-supported: 'none' percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 @@ -30,26 +29,38 @@ applications: expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 glance: expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 + options: + network-manager: Neutron + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + flat-network-providers: physnet1 + neutron-security-groups: true + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex relations: - - ceph-mon:mds - ceph-fs:ceph-mds @@ -69,18 +80,6 @@ relations: - rabbitmq-server:amqp - - glance:ceph - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client - - ceph-osd:mon - ceph-mon:osd - - nova-cloud-controller:shared-db @@ -93,3 +92,21 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - 'neutron-api:shared-db' + - 'percona-cluster:shared-db' +- - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' +- - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' +- - 'neutron-api:identity-service' + - 'keystone:identity-service' +- - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' +- - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' +- - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/xenial-ocata.yaml b/ceph-fs/src/tests/bundles/xenial-ocata.yaml index 5cb819de..ee645f3e 100644 --- a/ceph-fs/src/tests/bundles/xenial-ocata.yaml +++ b/ceph-fs/src/tests/bundles/xenial-ocata.yaml @@ -19,7 +19,6 @@ applications: num_units: 3 options: monitor-count: '3' - auth-supported: 'none' source: cloud:xenial-ocata percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster @@ -39,32 +38,43 @@ applications: num_units: 1 options: openstack-origin: cloud:xenial-ocata - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:xenial-ocata glance: expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 options: openstack-origin: cloud:xenial-ocata - cinder: + nova-cloud-controller: expose: True - charm: cs:~openstack-charmers-next/cinder + charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 options: - block-device: 'None' - glance-api-version: '2' + network-manager: Neutron openstack-origin: cloud:xenial-ocata - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: cloud:xenial-ocata + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: cloud:xenial-ocata + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway num_units: 1 options: + bridge-mappings: physnet1:br-ex openstack-origin: cloud:xenial-ocata relations: - - ceph-mon:mds @@ -85,18 +95,6 @@ relations: - rabbitmq-server:amqp - - glance:ceph - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client - - ceph-osd:mon - ceph-mon:osd - - nova-cloud-controller:shared-db @@ -109,3 +107,21 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - 'neutron-api:shared-db' + - 'percona-cluster:shared-db' +- - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' +- - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' +- - 'neutron-api:identity-service' + - 'keystone:identity-service' +- - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' +- - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' +- - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/xenial-pike.yaml b/ceph-fs/src/tests/bundles/xenial-pike.yaml index ba00c37d..da172d31 100644 --- a/ceph-fs/src/tests/bundles/xenial-pike.yaml +++ b/ceph-fs/src/tests/bundles/xenial-pike.yaml @@ -19,7 +19,6 @@ applications: num_units: 3 options: monitor-count: '3' - auth-supported: 'none' source: cloud:xenial-pike percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster @@ -39,42 +38,53 @@ applications: num_units: 1 options: openstack-origin: cloud:xenial-pike - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:xenial-pike glance: expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 options: openstack-origin: cloud:xenial-pike - cinder: + nova-cloud-controller: expose: True - charm: cs:~openstack-charmers-next/cinder + charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 options: - block-device: 'None' - glance-api-version: '2' + network-manager: Neutron openstack-origin: cloud:xenial-pike - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G root-disk=20G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: cloud:xenial-pike + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: cloud:xenial-pike + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway num_units: 1 options: + bridge-mappings: physnet1:br-ex openstack-origin: cloud:xenial-pike relations: +- - ceph-osd:mon + - ceph-mon:osd - - ceph-mon:mds - ceph-fs:ceph-mds - - nova-compute:amqp - rabbitmq-server:amqp - - nova-compute:image-service - glance:image-service -- - nova-compute:ceph - - ceph-mon:client - - keystone:shared-db - percona-cluster:shared-db - - glance:shared-db @@ -83,22 +93,6 @@ relations: - keystone:identity-service - - glance:amqp - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd - - nova-cloud-controller:shared-db - percona-cluster:shared-db - - nova-cloud-controller:identity-service @@ -109,3 +103,21 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - 'neutron-api:shared-db' + - 'percona-cluster:shared-db' +- - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' +- - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' +- - 'neutron-api:identity-service' + - 'keystone:identity-service' +- - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' +- - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' +- - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/xenial-queens.yaml b/ceph-fs/src/tests/bundles/xenial-queens.yaml index cf057141..dc0cc9bc 100644 --- a/ceph-fs/src/tests/bundles/xenial-queens.yaml +++ b/ceph-fs/src/tests/bundles/xenial-queens.yaml @@ -19,7 +19,6 @@ applications: num_units: 3 options: monitor-count: '3' - auth-supported: 'none' source: cloud:xenial-queens percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster @@ -39,32 +38,43 @@ applications: num_units: 1 options: openstack-origin: cloud:xenial-queens - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:xenial-queens glance: expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 options: openstack-origin: cloud:xenial-queens - cinder: + nova-cloud-controller: expose: True - charm: cs:~openstack-charmers-next/cinder + charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 options: - block-device: 'None' - glance-api-version: '2' + network-manager: Neutron openstack-origin: cloud:xenial-queens - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: cloud:xenial-queens + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: cloud:xenial-queens + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway num_units: 1 options: + bridge-mappings: physnet1:br-ex openstack-origin: cloud:xenial-queens relations: - - ceph-mon:mds @@ -85,18 +95,6 @@ relations: - rabbitmq-server:amqp - - glance:ceph - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client - - ceph-osd:mon - ceph-mon:osd - - nova-cloud-controller:shared-db @@ -109,3 +107,21 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - 'neutron-api:shared-db' + - 'percona-cluster:shared-db' +- - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' +- - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' +- - 'neutron-api:identity-service' + - 'keystone:identity-service' +- - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' +- - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' +- - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' \ No newline at end of file diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index 7b4c390c..cf1b3b3d 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -5,18 +5,19 @@ gate_bundles: - bionic-rocky - bionic-queens - xenial-queens - - xenial-pike + # Xenial-pike is missing because of + # https://bugs.launchpad.net/charm-nova-compute/+bug/1862624 - xenial-ocata - xenial-mitaka smoke_bundles: - bionic-stein dev_bundles: - cosmic-rocky - - disco-stein configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image + - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network + - zaza.openstack.charm_tests.nova.setup.create_flavors + - zaza.openstack.charm_tests.nova.setup.manage_ssh_key + - zaza.openstack.charm_tests.keystone.setup.add_demo_user tests: - - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - - zaza.openstack.charm_tests.ceph.tests.CephRelationTest - - zaza.openstack.charm_tests.ceph.tests.CephTest - - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest + - zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests From 4ae320c3ea55e6eb848f2e916fee05e307528db6 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Thu, 13 Feb 2020 16:07:28 +0000 Subject: [PATCH 1876/2699] Ensure python3-hvac is installed for charms with encypt option The referenced bug is essentially: make vault:secrets relation to vault but keep the 'encrypt' option as False. In this case, the Context handling code in charm-helpers is expecting python3-hvac to be available, but it is only installed if the encrypt option is set to True. Hence the charm crashes. This resolves that crash. Note the related charm-helpers fix [1]. [1]: https://github.com/juju/charm-helpers/pull/431 Change-Id: I9cb60a9340554c91668272b46f7c2dcf9f0ac2d1 Closes-bug: #1862085 --- .../charmhelpers/contrib/openstack/vaultlocker.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py index c162de27..866a2697 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -37,7 +37,19 @@ def __init__(self, secret_backend=None): ) def __call__(self): - import hvac + try: + import hvac + except ImportError: + # BUG: #1862085 - if the relation is made to vault, but the + # 'encrypt' option is not made, then the charm errors with an + # import warning. This catches that, logs a warning, and returns + # with an empty context. + hookenv.log("VaultKVContext: trying to use hvac pythong module " + "but it's not available. Is secrets-stroage relation " + "made, but encrypt option not set?", + level=hookenv.WARNING) + # return an emptry context on hvac import error + return {} ctxt = {} # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 db = unitdata.kv() From 50aeda3d6dfbe31b23c39fe6151849b1404f340c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 17 Feb 2020 17:28:28 +0100 Subject: [PATCH 1877/2699] Add mock for get_blacklist Change-Id: I53a95911e47710db207721b6d6e5c556a33b0249 Closes-Bug: #1863636 --- ceph-osd/unit_tests/test_config.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-osd/unit_tests/test_config.py b/ceph-osd/unit_tests/test_config.py index aa539859..fe5c1094 100644 --- a/ceph-osd/unit_tests/test_config.py +++ b/ceph-osd/unit_tests/test_config.py @@ -35,6 +35,7 @@ TO_PATCH = [ 'config', 'is_block_device', + 'get_blacklist', ] @@ -51,6 +52,7 @@ def setUp(self): os.path.join(self.tmp_dir, "device"): True, } self.is_block_device.side_effect = lambda x: self.bd.get(x, False) + self.get_blacklist.return_value = [] self.addCleanup(shutil.rmtree, self.tmp_dir) def test_get_devices_empty(self): From f59a05dc0454513744bdfb8e4692566df19aa510 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 18 Feb 2020 12:31:50 +0000 Subject: [PATCH 1878/2699] Remove disco support from the charm Change-Id: Ia9549a9404f3b5dbc62631817fa6bd3de809b617 --- ceph-osd/metadata.yaml | 1 - .../{disco-stein.yaml => eoan-train.yaml} | 32 ++++++++++++++++--- ceph-osd/tests/tests.yaml | 2 +- 3 files changed, 29 insertions(+), 6 deletions(-) rename ceph-osd/tests/bundles/{disco-stein.yaml => eoan-train.yaml} (75%) diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index bc1ed06b..4a85edd9 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -13,7 +13,6 @@ tags: series: - xenial - bionic - - disco - eoan - trusty description: | diff --git a/ceph-osd/tests/bundles/disco-stein.yaml b/ceph-osd/tests/bundles/eoan-train.yaml similarity index 75% rename from ceph-osd/tests/bundles/disco-stein.yaml rename to ceph-osd/tests/bundles/eoan-train.yaml index 06cdaf67..8539256c 100644 --- a/ceph-osd/tests/bundles/disco-stein.yaml +++ b/ceph-osd/tests/bundles/eoan-train.yaml @@ -1,39 +1,49 @@ -series: disco +series: eoan applications: ceph-osd: - series: disco charm: ceph-osd num_units: 3 + series: eoan storage: osd-devices: 'cinder,10G' options: - osd-devices: '/srv/ceph /dev/test-non-existent' + osd-devices: '/dev/test-non-existent' + source: cloud:eoan-train ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: monitor-count: '3' - auth-supported: 'none' + source: cloud:eoan-train percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 options: dataset-size: 25% max-connections: 1000 + source: cloud:eoan-train rabbitmq-server: charm: cs:~openstack-charmers-next/rabbitmq-server num_units: 1 + options: + source: cloud:eoan-train keystone: expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 + options: + openstack-origin: cloud:eoan-train nova-compute: charm: cs:~openstack-charmers-next/nova-compute num_units: 1 + options: + openstack-origin: cloud:eoan-train glance: expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 + options: + openstack-origin: cloud:eoan-train cinder: expose: True charm: cs:~openstack-charmers-next/cinder @@ -41,12 +51,20 @@ applications: options: block-device: 'None' glance-api-version: '2' + openstack-origin: cloud:eoan-train cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph nova-cloud-controller: expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 + options: + openstack-origin: cloud:eoan-train + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: cloud:eoan-train relations: - - nova-compute:amqp - rabbitmq-server:amqp @@ -88,3 +106,9 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - placement + - percona-cluster +- - placement + - keystone +- - placement + - nova-cloud-controller diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index cf377766..7c310acb 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -12,7 +12,7 @@ gate_bundles: smoke_bundles: - bionic-train dev_bundles: - - disco-stein + - eoan-train configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: From aec5e82852ea8d58bd8cb4f25ee5bb281ce5c3e3 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 18 Feb 2020 14:27:32 +0000 Subject: [PATCH 1879/2699] Remove disco support from the charm Change-Id: I5106499e5d0f90fe02c07a7668d810a0a97822aa --- ceph-proxy/tests/bundles/disco-stein.yaml | 69 ----------------------- 1 file changed, 69 deletions(-) delete mode 100644 ceph-proxy/tests/bundles/disco-stein.yaml diff --git a/ceph-proxy/tests/bundles/disco-stein.yaml b/ceph-proxy/tests/bundles/disco-stein.yaml deleted file mode 100644 index 123f8fa1..00000000 --- a/ceph-proxy/tests/bundles/disco-stein.yaml +++ /dev/null @@ -1,69 +0,0 @@ -series: disco -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - cinder: - charm: 'cs:~openstack-charmers-next/cinder' - num_units: 1 - options: - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - cinder-ceph: - charm: 'cs:~openstack-charmers-next/cinder-ceph' - options: - restrict-ceph-pools: True - keystone: - charm: 'cs:~openstack-charmers-next/keystone' - num_units: 1 - options: - admin-password: openstack - admin-token: ubuntutesting - constraints: mem=1024 - percona-cluster: - charm: 'cs:~openstack-charmers-next/percona-cluster' - num_units: 1 - options: - dataset-size: 50% - max-connections: 1000 - innodb-buffer-pool-size: 256M - root-password: ChangeMe123 - sst-password: ChangeMe123 - constraints: mem=4096 - rabbitmq-server: - charm: 'cs:~openstack-charmers-next/rabbitmq-server' - num_units: 1 - constraints: mem=1024 -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - 'cinder:shared-db' - - 'percona-cluster:shared-db' - - - 'keystone:shared-db' - - 'percona-cluster:shared-db' - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' From d888a6adf9ee0efc42fd304c2faf20c74835710e Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 18 Feb 2020 17:03:02 +0000 Subject: [PATCH 1880/2699] Remove disco support from the charm Change-Id: I7b709f4203799c1466d2327ca5a21cf2da7c416f --- ceph-mon/metadata.yaml | 1 - ceph-mon/tests/bundles/cosmic-rocky.yaml | 90 ------------------------ ceph-mon/tests/bundles/disco-stein.yaml | 90 ------------------------ ceph-mon/tests/tests.yaml | 2 - 4 files changed, 183 deletions(-) delete mode 100644 ceph-mon/tests/bundles/cosmic-rocky.yaml delete mode 100644 ceph-mon/tests/bundles/disco-stein.yaml diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index ceb660bc..59814225 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -12,7 +12,6 @@ tags: series: - xenial - bionic - - disco - eoan - trusty peers: diff --git a/ceph-mon/tests/bundles/cosmic-rocky.yaml b/ceph-mon/tests/bundles/cosmic-rocky.yaml deleted file mode 100644 index 5b30768d..00000000 --- a/ceph-mon/tests/bundles/cosmic-rocky.yaml +++ /dev/null @@ -1,90 +0,0 @@ -series: cosmic -applications: - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: ceph-mon - series: cosmic - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service diff --git a/ceph-mon/tests/bundles/disco-stein.yaml b/ceph-mon/tests/bundles/disco-stein.yaml deleted file mode 100644 index bb2fb5a0..00000000 --- a/ceph-mon/tests/bundles/disco-stein.yaml +++ /dev/null @@ -1,90 +0,0 @@ -series: disco -applications: - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: ceph-mon - series: disco - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 46f09e8d..8a9b3886 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -13,8 +13,6 @@ gate_bundles: smoke_bundles: - bionic-train dev_bundles: - - cosmic-rocky - - disco-stein configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: From dd2ac9fcda6602ded755df6999cda78e7ad39243 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 18 Feb 2020 17:28:52 +0000 Subject: [PATCH 1881/2699] Remove disco support from the charm Change-Id: I31b2312570a64c6f9dd78c86fee9a53ee014c0c8 --- .../src/tests/bundles/disco-stein.yaml | 102 ------------------ 1 file changed, 102 deletions(-) delete mode 100644 ceph-rbd-mirror/src/tests/bundles/disco-stein.yaml diff --git a/ceph-rbd-mirror/src/tests/bundles/disco-stein.yaml b/ceph-rbd-mirror/src/tests/bundles/disco-stein.yaml deleted file mode 100644 index 6b1ed05d..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/disco-stein.yaml +++ /dev/null @@ -1,102 +0,0 @@ -series: disco -applications: - mysql: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - source: distro - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: distro - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: distro - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: distro - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: distro - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: distro - bluestore: False - use-direct-io: False - osd-devices: /opt - ceph-rbd-mirror: - series: disco - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: distro - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: distro - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: distro - bluestore: False - use-direct-io: False - osd-devices: /opt - ceph-rbd-mirror-b: - series: disco - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: distro -relations: -- - mysql - - keystone -- - mysql - - cinder -- - mysql - - glance -- - rabbitmq-server - - cinder -- - keystone - - cinder -- - keystone - - glance -- - cinder - - cinder-ceph -- - cinder-ceph - - ceph-mon -- - glance - - ceph-mon -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote From 9e49377d28f7a72d8f8608b284ed32b2b7710e8d Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 18 Feb 2020 18:20:48 +0000 Subject: [PATCH 1882/2699] Remove disco support from the charm Change-Id: I0d1d361e3cecdc656357dda3e3c73dd0a41200f3 --- ceph-radosgw/metadata.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index b7e9c1f5..5d483d76 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -15,7 +15,6 @@ tags: series: - xenial - bionic - - disco - eoan - trusty extra-bindings: From b359491d629da11f7cd0e512bf78fd391362808e Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 18 Feb 2020 12:14:41 +0000 Subject: [PATCH 1883/2699] Remove disco support from the charm Also remove old cosmic test as well. Change-Id: I4659ba823e2bf66e6ea202be3886e0326ed1c529 --- ceph-fs/src/metadata.yaml | 1 - ceph-fs/src/tests/bundles/bionic-train.yaml | 3 +- .../{cosmic-rocky.yaml => eoan-train.yaml} | 63 ++++++++++++------- ceph-fs/src/tests/tests.yaml | 2 +- 4 files changed, 41 insertions(+), 28 deletions(-) rename ceph-fs/src/tests/bundles/{cosmic-rocky.yaml => eoan-train.yaml} (70%) diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index ebfb833d..1aeb7bcc 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -12,7 +12,6 @@ tags: series: - xenial - bionic - - disco - eoan subordinate: false requires: diff --git a/ceph-fs/src/tests/bundles/bionic-train.yaml b/ceph-fs/src/tests/bundles/bionic-train.yaml index b81fe488..da7feee9 100644 --- a/ceph-fs/src/tests/bundles/bionic-train.yaml +++ b/ceph-fs/src/tests/bundles/bionic-train.yaml @@ -48,7 +48,6 @@ applications: expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 - options: network-manager: Neutron openstack-origin: cloud:bionic-train @@ -136,4 +135,4 @@ relations: - - 'neutron-openvswitch:amqp' - 'rabbitmq-server:amqp' - - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' \ No newline at end of file + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/cosmic-rocky.yaml b/ceph-fs/src/tests/bundles/eoan-train.yaml similarity index 70% rename from ceph-fs/src/tests/bundles/cosmic-rocky.yaml rename to ceph-fs/src/tests/bundles/eoan-train.yaml index 4cd23d67..0062b631 100644 --- a/ceph-fs/src/tests/bundles/cosmic-rocky.yaml +++ b/ceph-fs/src/tests/bundles/eoan-train.yaml @@ -1,66 +1,75 @@ -series: cosmic +series: eoan applications: ceph-fs: charm: ceph-fs - series: cosmic + series: eoan num_units: 1 + options: + source: cloud:eoan-train/proposed ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 storage: osd-devices: 'cinder,10G' options: - osd-devices: '/srv/ceph /dev/test-non-existent' + osd-devices: '/dev/test-non-existent' + source: cloud:eoan-train/proposed ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: monitor-count: '3' + source: cloud:eoan-train/proposed percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 options: dataset-size: 25% max-connections: 1000 + source: cloud:eoan-train/proposed rabbitmq-server: charm: cs:~openstack-charmers-next/rabbitmq-server num_units: 1 + options: + source: cloud:eoan-train/proposed keystone: expose: True charm: cs:~openstack-charmers-next/keystone num_units: 1 + options: + openstack-origin: cloud:eoan-train/proposed + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:eoan-train/proposed glance: expose: True charm: cs:~openstack-charmers-next/glance num_units: 1 - nova-cloud-controller: + options: + openstack-origin: cloud:eoan-train/proposed + cinder: expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + charm: cs:~openstack-charmers-next/cinder num_units: 1 options: - network-manager: Neutron - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:eoan-train/proposed + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 options: - flat-network-providers: physnet1 - neutron-security-groups: true - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway + openstack-origin: cloud:eoan-train/proposed + placement: + charm: cs:~openstack-charmers-next/placement num_units: 1 options: - bridge-mappings: physnet1:br-ex + openstack-origin: cloud:eoan-train relations: - - ceph-mon:mds - ceph-fs:ceph-mds @@ -92,6 +101,12 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - placement + - percona-cluster +- - placement + - keystone +- - placement + - nova-cloud-controller - - 'neutron-api:shared-db' - 'percona-cluster:shared-db' - - 'neutron-api:amqp' diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index cf1b3b3d..71f6a5dc 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -12,7 +12,7 @@ gate_bundles: smoke_bundles: - bionic-stein dev_bundles: - - cosmic-rocky + - eoan-train configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network From 0253c5dea54d7cb42f9d3cba93037fc0ad40388d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 21 Feb 2020 16:47:44 +0100 Subject: [PATCH 1884/2699] Remove note about ceph-fs being for testing Change-Id: I26895f8e2163e81f7d987e7dcda79e9628462a34 --- ceph-fs/src/README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-fs/src/README.md b/ceph-fs/src/README.md index 1aabb715..2061e62d 100644 --- a/ceph-fs/src/README.md +++ b/ceph-fs/src/README.md @@ -1,7 +1,6 @@ # CephFS Charm -This charm exists to provide an example integration of CephFS, for the purpose -of test and reference. +This charm exists to provide integration of CephFS. # Overview From 0cf6b42207314591a9d91ec1c233d26c086acf11 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 21 Feb 2020 18:46:57 +0000 Subject: [PATCH 1885/2699] Rename lib/ceph to lib/charms_ceph The new python3-ceph-common deb package (introduced in ceph octopus) adds a new ceph directory (a parent package in python terms) in /usr/lib/python3/dist-packages/ceph/. This results in a conflict with charm-ceph-osd/lib/ceph/. For example, with the current import of ceph.utils in hooks/ceph_hooks.py, Python finds no utils.py in /usr/lib/python3/dist-packages/ceph/ and then stops searching. Therefore, rename lib/ceph to lib/charms_ceph to avoid the conflict. Depends-On: https://review.opendev.org/#/c/709226 Change-Id: Id5bdff991c1e6c196c09ba5a2241ebd5ebe6da91 --- ceph-mon/actions/pool_set.py | 2 +- ceph-mon/actions/set_noout.py | 2 +- ceph-mon/actions/unset_noout.py | 2 +- ceph-mon/hooks/ceph_hooks.py | 4 ++-- ceph-mon/lib/{ceph => charms_ceph}/__init__.py | 0 ceph-mon/lib/{ceph => charms_ceph}/broker.py | 4 ++-- ceph-mon/lib/{ceph => charms_ceph}/crush_utils.py | 0 ceph-mon/lib/{ceph => charms_ceph}/utils.py | 0 ceph-mon/unit_tests/test_ceph_ops.py | 2 +- 9 files changed, 8 insertions(+), 8 deletions(-) rename ceph-mon/lib/{ceph => charms_ceph}/__init__.py (100%) rename ceph-mon/lib/{ceph => charms_ceph}/broker.py (99%) rename ceph-mon/lib/{ceph => charms_ceph}/crush_utils.py (100%) rename ceph-mon/lib/{ceph => charms_ceph}/utils.py (100%) diff --git a/ceph-mon/actions/pool_set.py b/ceph-mon/actions/pool_set.py index 51fb8e83..8549fe70 100755 --- a/ceph-mon/actions/pool_set.py +++ b/ceph-mon/actions/pool_set.py @@ -21,7 +21,7 @@ sys.path.append('hooks') from charmhelpers.core.hookenv import action_get, log, action_fail -from ceph.broker import handle_set_pool_value +from charms_ceph.broker import handle_set_pool_value if __name__ == '__main__': name = action_get("name") diff --git a/ceph-mon/actions/set_noout.py b/ceph-mon/actions/set_noout.py index 50c119d9..145c6988 100755 --- a/ceph-mon/actions/set_noout.py +++ b/ceph-mon/actions/set_noout.py @@ -18,7 +18,7 @@ sys.path.append('hooks') from charmhelpers.core.hookenv import action_set, action_fail sys.path.append('lib') -from ceph.utils import osd_noout +from charms_ceph.utils import osd_noout if __name__ == '__main__': result = osd_noout(True) diff --git a/ceph-mon/actions/unset_noout.py b/ceph-mon/actions/unset_noout.py index 142fa1e5..36be4a69 100755 --- a/ceph-mon/actions/unset_noout.py +++ b/ceph-mon/actions/unset_noout.py @@ -18,7 +18,7 @@ sys.path.append('hooks') from charmhelpers.core.hookenv import action_set, action_fail sys.path.append('lib') -from ceph.utils import osd_noout +from charms_ceph.utils import osd_noout if __name__ == '__main__': result = osd_noout(False) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 2ed2114d..627705fb 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -22,8 +22,8 @@ import uuid sys.path.append('lib') -import ceph.utils as ceph -from ceph.broker import ( +import charms_ceph.utils as ceph +from charms_ceph.broker import ( process_requests ) diff --git a/ceph-mon/lib/ceph/__init__.py b/ceph-mon/lib/charms_ceph/__init__.py similarity index 100% rename from ceph-mon/lib/ceph/__init__.py rename to ceph-mon/lib/charms_ceph/__init__.py diff --git a/ceph-mon/lib/ceph/broker.py b/ceph-mon/lib/charms_ceph/broker.py similarity index 99% rename from ceph-mon/lib/ceph/broker.py rename to ceph-mon/lib/charms_ceph/broker.py index bae74a12..ceda9a85 100644 --- a/ceph-mon/lib/ceph/broker.py +++ b/ceph-mon/lib/charms_ceph/broker.py @@ -18,11 +18,11 @@ from tempfile import NamedTemporaryFile -from ceph.utils import ( +from charms_ceph.utils import ( get_cephfs, get_osd_weight ) -from ceph.crush_utils import Crushmap +from charms_ceph.crush_utils import Crushmap from charmhelpers.core.hookenv import ( log, diff --git a/ceph-mon/lib/ceph/crush_utils.py b/ceph-mon/lib/charms_ceph/crush_utils.py similarity index 100% rename from ceph-mon/lib/ceph/crush_utils.py rename to ceph-mon/lib/charms_ceph/crush_utils.py diff --git a/ceph-mon/lib/ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py similarity index 100% rename from ceph-mon/lib/ceph/utils.py rename to ceph-mon/lib/charms_ceph/utils.py diff --git a/ceph-mon/unit_tests/test_ceph_ops.py b/ceph-mon/unit_tests/test_ceph_ops.py index 5f17e03e..f82dbd09 100644 --- a/ceph-mon/unit_tests/test_ceph_ops.py +++ b/ceph-mon/unit_tests/test_ceph_ops.py @@ -20,7 +20,7 @@ patch, ) -from ceph import broker +from charms_ceph import broker class TestCephOps(unittest.TestCase): From c23a4715d0233f8357bdcfdae72520d98babcdb2 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 21 Feb 2020 21:11:49 +0000 Subject: [PATCH 1886/2699] Rename lib/ceph to lib/charms_ceph The new python3-ceph-common deb package (introduced in ceph octopus) adds a new ceph directory (a parent package in python terms) in /usr/lib/python3/dist-packages/ceph/. This results in a conflict with charm-ceph-osd/lib/ceph/. For example, with the current import of ceph.utils in hooks/hooks.py, Python finds no utils.py in /usr/lib/python3/dist-packages/ceph/ and then stops searching. Therefore, rename lib/ceph to lib/charms_ceph to avoid the conflict. Depends-On: https://review.opendev.org/#/c/709226 Change-Id: I433537805ae56dc7a42e9d34ea6d491ffb4c79c0 --- ceph-radosgw/hooks/hooks.py | 2 +- ceph-radosgw/lib/{ceph => charms_ceph}/__init__.py | 0 ceph-radosgw/lib/{ceph => charms_ceph}/broker.py | 4 ++-- ceph-radosgw/lib/{ceph => charms_ceph}/crush_utils.py | 0 ceph-radosgw/lib/{ceph => charms_ceph}/utils.py | 0 5 files changed, 3 insertions(+), 3 deletions(-) rename ceph-radosgw/lib/{ceph => charms_ceph}/__init__.py (100%) rename ceph-radosgw/lib/{ceph => charms_ceph}/broker.py (99%) rename ceph-radosgw/lib/{ceph => charms_ceph}/crush_utils.py (100%) rename ceph-radosgw/lib/{ceph => charms_ceph}/utils.py (100%) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 3d81941c..845714d7 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -23,7 +23,7 @@ sys.path.append('lib') import ceph_rgw as ceph -import ceph.utils as ceph_utils +import charms_ceph.utils as ceph_utils import multisite from charmhelpers.core.hookenv import ( diff --git a/ceph-radosgw/lib/ceph/__init__.py b/ceph-radosgw/lib/charms_ceph/__init__.py similarity index 100% rename from ceph-radosgw/lib/ceph/__init__.py rename to ceph-radosgw/lib/charms_ceph/__init__.py diff --git a/ceph-radosgw/lib/ceph/broker.py b/ceph-radosgw/lib/charms_ceph/broker.py similarity index 99% rename from ceph-radosgw/lib/ceph/broker.py rename to ceph-radosgw/lib/charms_ceph/broker.py index bae74a12..ceda9a85 100644 --- a/ceph-radosgw/lib/ceph/broker.py +++ b/ceph-radosgw/lib/charms_ceph/broker.py @@ -18,11 +18,11 @@ from tempfile import NamedTemporaryFile -from ceph.utils import ( +from charms_ceph.utils import ( get_cephfs, get_osd_weight ) -from ceph.crush_utils import Crushmap +from charms_ceph.crush_utils import Crushmap from charmhelpers.core.hookenv import ( log, diff --git a/ceph-radosgw/lib/ceph/crush_utils.py b/ceph-radosgw/lib/charms_ceph/crush_utils.py similarity index 100% rename from ceph-radosgw/lib/ceph/crush_utils.py rename to ceph-radosgw/lib/charms_ceph/crush_utils.py diff --git a/ceph-radosgw/lib/ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py similarity index 100% rename from ceph-radosgw/lib/ceph/utils.py rename to ceph-radosgw/lib/charms_ceph/utils.py From 4a7d47cdd94aabb570e9c761d6f6e41a95a3a0d0 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 21 Feb 2020 18:22:58 +0000 Subject: [PATCH 1887/2699] Rename lib/ceph to lib/charms_ceph The new python3-ceph-common deb package (introduced in ceph octopus) adds a new ceph directory (a parent package in python terms) in /usr/lib/python3/dist-packages/ceph/. This results in a conflict with charm-ceph-osd/lib/ceph/. For example, with the current import of ceph.utils in hooks/ceph_hooks.py, Python finds no utils.py in /usr/lib/python3/dist-packages/ceph/ and then stops searching. Therefore, rename lib/ceph to lib/charms_ceph to avoid the conflict. Depends-On: https://review.opendev.org/#/c/709226 Change-Id: I13ae7c048d8f1eef2ea64b13ae14b51dbfaaf3cd --- ceph-osd/actions/add_disk.py | 16 ++++++++-------- ceph-osd/actions/list_disks.py | 12 +++++++----- ceph-osd/actions/osd_in_out.py | 2 +- ceph-osd/actions/zap_disk.py | 4 ++-- ceph-osd/hooks/ceph_hooks.py | 2 +- ceph-osd/hooks/utils.py | 2 +- ceph-osd/lib/{ceph => charms_ceph}/__init__.py | 0 ceph-osd/lib/{ceph => charms_ceph}/broker.py | 4 ++-- .../lib/{ceph => charms_ceph}/crush_utils.py | 0 ceph-osd/lib/{ceph => charms_ceph}/utils.py | 0 ceph-osd/unit_tests/test_actions_add_disk.py | 2 +- ceph-osd/unit_tests/test_actions_list_disks.py | 9 +++++---- ceph-osd/unit_tests/test_tuning.py | 2 +- 13 files changed, 29 insertions(+), 26 deletions(-) rename ceph-osd/lib/{ceph => charms_ceph}/__init__.py (100%) rename ceph-osd/lib/{ceph => charms_ceph}/broker.py (99%) rename ceph-osd/lib/{ceph => charms_ceph}/crush_utils.py (100%) rename ceph-osd/lib/{ceph => charms_ceph}/utils.py (100%) diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index d16668a9..b725c9b0 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -27,19 +27,19 @@ from charmhelpers.core.unitdata import kv import ceph_hooks -import ceph.utils +import charms_ceph.utils def add_device(request, device_path, bucket=None): - ceph.utils.osdize(device_path, hookenv.config('osd-format'), - ceph_hooks.get_journal_devices(), - hookenv.config('ignore-device-errors'), - hookenv.config('osd-encrypt'), - hookenv.config('bluestore'), - hookenv.config('osd-encrypt-keymanager')) + charms_ceph.utils.osdize(device_path, hookenv.config('osd-format'), + ceph_hooks.get_journal_devices(), + hookenv.config('ignore-device-errors'), + hookenv.config('osd-encrypt'), + hookenv.config('bluestore'), + hookenv.config('osd-encrypt-keymanager')) # Make it fast! if hookenv.config('autotune'): - ceph.utils.tune_dev(device_path) + charms_ceph.utils.tune_dev(device_path) mounts = filter(lambda disk: device_path in disk.device, psutil.disk_partitions()) for osd in mounts: diff --git a/ceph-osd/actions/list_disks.py b/ceph-osd/actions/list_disks.py index 46031a46..861b975c 100755 --- a/ceph-osd/actions/list_disks.py +++ b/ceph-osd/actions/list_disks.py @@ -36,7 +36,7 @@ import charmhelpers.core.hookenv as hookenv -import ceph.utils +import charms_ceph.utils import utils @@ -46,13 +46,15 @@ def list_disk(): for journal in utils.get_journal_devices(): osd_journal.append(os.path.realpath(journal)) - for dev in list(set(ceph.utils.unmounted_disks()) - set(osd_journal)): - if (not ceph.utils.is_active_bluestore_device(dev) and - not ceph.utils.is_pristine_disk(dev)): + for dev in list(set(charms_ceph.utils.unmounted_disks()) - + set(osd_journal)): + if (not charms_ceph.utils.is_active_bluestore_device(dev) and + not charms_ceph.utils.is_pristine_disk(dev)): non_pristine.append(dev) hookenv.action_set({ - 'disks': list(set(ceph.utils.unmounted_disks()) - set(osd_journal)), + 'disks': list(set(charms_ceph.utils.unmounted_disks()) - + set(osd_journal)), 'blacklist': utils.get_blacklist(), 'non-pristine': non_pristine, }) diff --git a/ceph-osd/actions/osd_in_out.py b/ceph-osd/actions/osd_in_out.py index f5525fec..f25168ab 100755 --- a/ceph-osd/actions/osd_in_out.py +++ b/ceph-osd/actions/osd_in_out.py @@ -27,7 +27,7 @@ action_fail, ) -from ceph.utils import get_local_osd_ids +from charms_ceph.utils import get_local_osd_ids from ceph_hooks import assess_status diff --git a/ceph-osd/actions/zap_disk.py b/ceph-osd/actions/zap_disk.py index 550e70c2..f85a48fd 100755 --- a/ceph-osd/actions/zap_disk.py +++ b/ceph-osd/actions/zap_disk.py @@ -27,8 +27,8 @@ zap_disk, ) from charmhelpers.core.unitdata import kv -from ceph.utils import is_active_bluestore_device -from ceph.utils import is_mapped_luks_device +from charms_ceph.utils import is_active_bluestore_device +from charms_ceph.utils import is_mapped_luks_device def get_devices(): diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index c5992525..2479453c 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -25,7 +25,7 @@ import sys sys.path.append('lib') -import ceph.utils as ceph +import charms_ceph.utils as ceph from charmhelpers.core import hookenv from charmhelpers.core.hookenv import ( log, diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 9f93b598..912abe30 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -19,7 +19,7 @@ import sys sys.path.append('lib') -import ceph.utils as ceph +import charms_ceph.utils as ceph from charmhelpers.core.hookenv import ( unit_get, diff --git a/ceph-osd/lib/ceph/__init__.py b/ceph-osd/lib/charms_ceph/__init__.py similarity index 100% rename from ceph-osd/lib/ceph/__init__.py rename to ceph-osd/lib/charms_ceph/__init__.py diff --git a/ceph-osd/lib/ceph/broker.py b/ceph-osd/lib/charms_ceph/broker.py similarity index 99% rename from ceph-osd/lib/ceph/broker.py rename to ceph-osd/lib/charms_ceph/broker.py index bae74a12..ceda9a85 100644 --- a/ceph-osd/lib/ceph/broker.py +++ b/ceph-osd/lib/charms_ceph/broker.py @@ -18,11 +18,11 @@ from tempfile import NamedTemporaryFile -from ceph.utils import ( +from charms_ceph.utils import ( get_cephfs, get_osd_weight ) -from ceph.crush_utils import Crushmap +from charms_ceph.crush_utils import Crushmap from charmhelpers.core.hookenv import ( log, diff --git a/ceph-osd/lib/ceph/crush_utils.py b/ceph-osd/lib/charms_ceph/crush_utils.py similarity index 100% rename from ceph-osd/lib/ceph/crush_utils.py rename to ceph-osd/lib/charms_ceph/crush_utils.py diff --git a/ceph-osd/lib/ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py similarity index 100% rename from ceph-osd/lib/ceph/utils.py rename to ceph-osd/lib/charms_ceph/utils.py diff --git a/ceph-osd/unit_tests/test_actions_add_disk.py b/ceph-osd/unit_tests/test_actions_add_disk.py index a5d711cd..0fdef53a 100644 --- a/ceph-osd/unit_tests/test_actions_add_disk.py +++ b/ceph-osd/unit_tests/test_actions_add_disk.py @@ -26,7 +26,7 @@ def setUp(self): self.kv.return_value = self.kv @mock.patch.object(add_disk.ceph_hooks, 'get_journal_devices') - @mock.patch.object(add_disk.ceph.utils, 'osdize') + @mock.patch.object(add_disk.charms_ceph.utils, 'osdize') def test_add_device(self, mock_osdize, mock_get_journal_devices): def fake_config(key): diff --git a/ceph-osd/unit_tests/test_actions_list_disks.py b/ceph-osd/unit_tests/test_actions_list_disks.py index 4990d61d..358dd663 100644 --- a/ceph-osd/unit_tests/test_actions_list_disks.py +++ b/ceph-osd/unit_tests/test_actions_list_disks.py @@ -7,16 +7,17 @@ class ListDisksActionTests(CharmTestCase): def setUp(self): super(ListDisksActionTests, self).setUp( list_disks, ['hookenv', - 'ceph', + 'charms_ceph', 'utils', 'os']) - self.ceph.utils.unmounted_disks.return_value = ['/dev/sda', '/dev/sdm'] + self.charms_ceph.utils.unmounted_disks.return_value = ['/dev/sda', + '/dev/sdm'] def test_list_disks_journal_symbol_link(self): self.utils.get_journal_devices.return_value = {'/dev/disk/ceph/sdm'} self.os.path.realpath.return_value = '/dev/sdm' - self.ceph.utils.is_active_bluestore_device.return_value = False - self.ceph.utils.is_pristine_disk.return_value = False + self.charms_ceph.utils.is_active_bluestore_device.return_value = False + self.charms_ceph.utils.is_pristine_disk.return_value = False self.utils.get_blacklist.return_value = [] list_disks.list_disk() self.hookenv.action_set.assert_called_with({ diff --git a/ceph-osd/unit_tests/test_tuning.py b/ceph-osd/unit_tests/test_tuning.py index e9abb33a..bb0631f4 100644 --- a/ceph-osd/unit_tests/test_tuning.py +++ b/ceph-osd/unit_tests/test_tuning.py @@ -1,7 +1,7 @@ __author__ = 'Chris Holcombe ' from mock import patch, call import test_utils -import ceph.utils as ceph +import charms_ceph.utils as ceph TO_PATCH = [ 'hookenv', From f04a6657baf56b162309ddbbec74bfda5fe0b900 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 25 Feb 2020 18:18:23 +0000 Subject: [PATCH 1888/2699] Initial commit --- ceph-iscsi/.gitignore | 1 + ceph-iscsi/.gitmodules | 6 ++ ceph-iscsi/README.md | 18 ++++ ceph-iscsi/charm-prep.sh | 10 ++ ceph-iscsi/config.yaml | 49 ++++++++++ ceph-iscsi/hooks/install | 1 + ceph-iscsi/metadata.yaml | 19 ++++ ceph-iscsi/mod/interface-ceph-client | 1 + ceph-iscsi/mod/operator | 1 + ceph-iscsi/src/charm.py | 83 +++++++++++++++++ .../templates/ceph.client.ceph-iscsi.keyring | 3 + ceph-iscsi/templates/ceph.conf | 15 +++ ceph-iscsi/templates/iscsi-gateway.cfg | 27 ++++++ ceph-iscsi/test-requirements.txt | 13 +++ ceph-iscsi/tests/01-setup-client-apt.sh | 7 ++ ceph-iscsi/tests/02-setup-gw.sh | 26 ++++++ ceph-iscsi/tests/03-setup-client-iscsi.sh | 18 ++++ ceph-iscsi/tests/deploy.sh | 1 + ceph-iscsi/tests/focal.yaml | 28 ++++++ ceph-iscsi/tox.ini | 92 +++++++++++++++++++ 20 files changed, 419 insertions(+) create mode 100644 ceph-iscsi/.gitignore create mode 100644 ceph-iscsi/.gitmodules create mode 100644 ceph-iscsi/README.md create mode 100755 ceph-iscsi/charm-prep.sh create mode 100644 ceph-iscsi/config.yaml create mode 120000 ceph-iscsi/hooks/install create mode 100644 ceph-iscsi/metadata.yaml create mode 160000 ceph-iscsi/mod/interface-ceph-client create mode 160000 ceph-iscsi/mod/operator create mode 100755 ceph-iscsi/src/charm.py create mode 100644 ceph-iscsi/templates/ceph.client.ceph-iscsi.keyring create mode 100644 ceph-iscsi/templates/ceph.conf create mode 100644 ceph-iscsi/templates/iscsi-gateway.cfg create mode 100644 ceph-iscsi/test-requirements.txt create mode 100755 ceph-iscsi/tests/01-setup-client-apt.sh create mode 100755 ceph-iscsi/tests/02-setup-gw.sh create mode 100755 ceph-iscsi/tests/03-setup-client-iscsi.sh create mode 100755 ceph-iscsi/tests/deploy.sh create mode 100644 ceph-iscsi/tests/focal.yaml create mode 100644 ceph-iscsi/tox.ini diff --git a/ceph-iscsi/.gitignore b/ceph-iscsi/.gitignore new file mode 100644 index 00000000..a65b4177 --- /dev/null +++ b/ceph-iscsi/.gitignore @@ -0,0 +1 @@ +lib diff --git a/ceph-iscsi/.gitmodules b/ceph-iscsi/.gitmodules new file mode 100644 index 00000000..c03af6ad --- /dev/null +++ b/ceph-iscsi/.gitmodules @@ -0,0 +1,6 @@ +[submodule "mod/operator"] + path = mod/operator + url = https://github.com/canonical/operator +[submodule "mod/interface-ceph-client"] + path = mod/interface-ceph-client + url = https://github.com/gnuoy/oper-interface-ceph-client.git diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md new file mode 100644 index 00000000..88ae53cc --- /dev/null +++ b/ceph-iscsi/README.md @@ -0,0 +1,18 @@ +Ceph iSCSI Gateway charm +======================== + +To use, first pull in dependencies: + +```bash +./charm-prep.sh +``` + +To deploy with an example and test: + +```bash +cd test +./deploy.sh +./01-setup-client-apt.sh +./02-setup-gw.sh +./03-setup-client-iscsi.sh +``` diff --git a/ceph-iscsi/charm-prep.sh b/ceph-iscsi/charm-prep.sh new file mode 100755 index 00000000..4e57d26d --- /dev/null +++ b/ceph-iscsi/charm-prep.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +rm -rf lib/* + +pip install -t lib/ git+https://github.com/juju/charm-helpers.git + +git submodule init +git submodule update +(cd lib; ln -s ../mod/operator/ops;) +(cd lib; ln -s ../mod/interface-ceph-client/interface_ceph_client.py;) diff --git a/ceph-iscsi/config.yaml b/ceph-iscsi/config.yaml new file mode 100644 index 00000000..aa944462 --- /dev/null +++ b/ceph-iscsi/config.yaml @@ -0,0 +1,49 @@ +options: + loglevel: + default: 1 + type: int + description: Mon and OSD debug level. Max is 20. + source: + type: string + default: + description: | + Optional configuration to support use of additional sources such as: + - ppa:myteam/ppa + - cloud:trusty-proposed/kilo + - http://my.archive.com/ubuntu main + The last option should be used in conjunction with the key configuration + option. + Note that a minimum ceph version of 0.48.2 is required for use with this + charm which is NOT provided by the packages in the main Ubuntu archive + for precise but is provided in the Ubuntu cloud archive. + key: + type: string + default: + description: | + Key ID to import to the apt keyring to support use with arbitary source + configuration from outside of Launchpad archives or PPA's. + use-syslog: + type: boolean + default: False + description: | + If set to True, supporting services will log to syslog. + ceph-public-network: + type: string + default: + description: | + The IP address and netmask of the public (front-side) network (e.g., + 192.168.0.0/24). + If multiple networks are to be used, a space-delimited list of a.b.c.d/x + can be provided. + prefer-ipv6: + type: boolean + default: False + description: | + If True enables IPv6 support. The charm will expect network interfaces + to be configured with an IPv6 address. If set to False (default) IPv4 + is expected. + + NOTE: these charms do not currently support IPv6 privacy extension. In + order for this charm to function correctly, the privacy extension must be + disabled and a non-temporary address must be configured/available on + your network interface. diff --git a/ceph-iscsi/hooks/install b/ceph-iscsi/hooks/install new file mode 120000 index 00000000..25b1f68f --- /dev/null +++ b/ceph-iscsi/hooks/install @@ -0,0 +1 @@ +../src/charm.py \ No newline at end of file diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml new file mode 100644 index 00000000..555ffc95 --- /dev/null +++ b/ceph-iscsi/metadata.yaml @@ -0,0 +1,19 @@ +name: ceph-iscsi +summary: Gateway for provisioning iscsi devices backed by ceph. +maintainer: OpenStack Charmers +description: | + The iSCSI gateway is integrating Ceph Storage with the iSCSI standard to + provide a Highly Available (HA) iSCSI target that exports RADOS Block Device + (RBD) images as SCSI disks. +tags: + - openstack + - storage + - misc +series: + - focal +subordinate: false +requires: + ceph-client: + interface: ceph-client +extra-bindings: + public: diff --git a/ceph-iscsi/mod/interface-ceph-client b/ceph-iscsi/mod/interface-ceph-client new file mode 160000 index 00000000..93b4661b --- /dev/null +++ b/ceph-iscsi/mod/interface-ceph-client @@ -0,0 +1 @@ +Subproject commit 93b4661be184753038da626d3a04d3855f948430 diff --git a/ceph-iscsi/mod/operator b/ceph-iscsi/mod/operator new file mode 160000 index 00000000..3a73427d --- /dev/null +++ b/ceph-iscsi/mod/operator @@ -0,0 +1 @@ +Subproject commit 3a73427dee96f49acfe25880924528a3e57834cc diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py new file mode 100755 index 00000000..6b6e7a2a --- /dev/null +++ b/ceph-iscsi/src/charm.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 + +import os +import subprocess +import sys + +sys.path.append('lib') + +from ops.charm import CharmBase +from ops.framework import ( + StoredState, +) +from ops.main import main +from charmhelpers.fetch import ( + apt_install, + apt_update, +) +import charmhelpers.core.host as ch_host +import charmhelpers.core.templating as ch_templating +import interface_ceph_client + + +class CephISCSIGatewayCharm(CharmBase): + state = StoredState() + + PACKAGES = ['ceph-iscsi', 'tcmu-runner', 'ceph-common'] + CEPH_CAPABILITIES = [ + "osd", "allow *", + "mon", "allow *", + "mgr", "allow r"] + + def __init__(self, framework, key): + super().__init__(framework, key) + self.framework.observe(self.on.install, self) + self.framework.observe(self.on.ceph_client_relation_joined, self) + self.ceph_client = interface_ceph_client.CephClientRequires( + self, + 'ceph-client') + self.framework.observe(self.ceph_client.on.pools_available, self) + + def on_install(self, event): + apt_update(fatal=True) + apt_install(self.PACKAGES, fatal=True) + + def on_ceph_client_relation_joined(self, event): + self.ceph_client.create_replicated_pool('rbd') + self.ceph_client.request_ceph_permissions( + 'ceph-iscsi', + self.CEPH_CAPABILITIES) + + def on_pools_available(self, event): + ceph_context = { + 'use_syslog': + str(self.framework.model.config['use-syslog']).lower(), + 'loglevel': self.framework.model.config['loglevel'] + } + ceph_context.update(self.ceph_client.get_pool_data()) + ceph_context['mon_hosts'] = ' '.join(ceph_context['mon_hosts']) + + restart_map = { + '/etc/ceph/ceph.conf': ['rbd-target-api'], + '/etc/ceph/iscsi-gateway.cfg': ['rbd-target-api'], + '/etc/ceph/ceph.client.ceph-iscsi.keyring': ['rbd-target-api']} + + def daemon_reload_and_restart(service_name): + subprocess.check_call(['systemctl', 'daemon-reload']) + subprocess.check_call(['systemctl', 'restart', service_name]) + + rfuncs = { + 'rbd-target-api': daemon_reload_and_restart} + + @ch_host.restart_on_change(restart_map, restart_functions=rfuncs) + def render_configs(): + for config_file in restart_map.keys(): + ch_templating.render( + os.path.basename(config_file), + config_file, + ceph_context) + render_configs() + + +if __name__ == '__main__': + main(CephISCSIGatewayCharm) diff --git a/ceph-iscsi/templates/ceph.client.ceph-iscsi.keyring b/ceph-iscsi/templates/ceph.client.ceph-iscsi.keyring new file mode 100644 index 00000000..fe132228 --- /dev/null +++ b/ceph-iscsi/templates/ceph.client.ceph-iscsi.keyring @@ -0,0 +1,3 @@ +[client.ceph-iscsi] + key = {{ key }} + diff --git a/ceph-iscsi/templates/ceph.conf b/ceph-iscsi/templates/ceph.conf new file mode 100644 index 00000000..c8b38244 --- /dev/null +++ b/ceph-iscsi/templates/ceph.conf @@ -0,0 +1,15 @@ +############################################################################### +# [ WARNING ] +# configuration file maintained by Juju +# local changes will be overwritten. +############################################################################### +[global] +auth_supported = {{ auth_supported }} +mon host = {{ mon_hosts }} +keyring = /etc/ceph/$cluster.$name.keyring + +[client.ceph-iscsi] +client mount uid = 0 +client mount gid = 0 +log file = /var/log/ceph/ceph-client.iscsi.log + diff --git a/ceph-iscsi/templates/iscsi-gateway.cfg b/ceph-iscsi/templates/iscsi-gateway.cfg new file mode 100644 index 00000000..26af98f0 --- /dev/null +++ b/ceph-iscsi/templates/iscsi-gateway.cfg @@ -0,0 +1,27 @@ +[config] +# Name of the Ceph storage cluster. A suitable Ceph configuration file allowing +# # access to the Ceph storage cluster from the gateway node is required, if not +# # colocated on an OSD node. +logger_level = DEBUG +cluster_name = ceph +cluster_client_name = client.ceph-iscsi +# +# # Place a copy of the ceph cluster's admin keyring in the gateway's /etc/ceph +# # drectory and reference the filename here +#gateway_keyring = ceph.client.admin.keyring +gateway_keyring = ceph.client.ceph-iscsi.keyring +# +# +# # API settings. +# # The API supports a number of options that allow you to tailor it to your +# # local environment. If you want to run the API under https, you will need to +# # create cert/key files that are compatible for each iSCSI gateway node, that is +# # not locked to a specific node. SSL cert and key files *must* be called +# # 'iscsi-gateway.crt' and 'iscsi-gateway.key' and placed in the '/etc/ceph/' directory +# # on *each* gateway node. With the SSL files in place, you can use 'api_secure = true' +# # to switch to https mode. +# +# # To support the API, the bear minimum settings are: +api_secure = false +# +# diff --git a/ceph-iscsi/test-requirements.txt b/ceph-iscsi/test-requirements.txt new file mode 100644 index 00000000..14b380e4 --- /dev/null +++ b/ceph-iscsi/test-requirements.txt @@ -0,0 +1,13 @@ +# This file is managed centrally. If you find the need to modify this as a +# one-off, please don't. Intead, consult #openstack-charms and ask about +# requirements management in charms via bot-control. Thank you. +# +# Lint and unit test requirements +flake8>=2.2.4,<=2.4.1 +stestr>=2.2.0 +requests>=2.18.4 +charms.reactive +mock>=1.2 +nose>=1.3.7 +coverage>=3.6 +git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack diff --git a/ceph-iscsi/tests/01-setup-client-apt.sh b/ceph-iscsi/tests/01-setup-client-apt.sh new file mode 100755 index 00000000..65a97474 --- /dev/null +++ b/ceph-iscsi/tests/01-setup-client-apt.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +client="ubuntu/0" + +juju run --unit $client "apt install --yes open-iscsi multipath-tools" +juju run --unit $client "systemctl start iscsi" +juju run --unit $client "systemctl start iscsid" diff --git a/ceph-iscsi/tests/02-setup-gw.sh b/ceph-iscsi/tests/02-setup-gw.sh new file mode 100755 index 00000000..25eeef71 --- /dev/null +++ b/ceph-iscsi/tests/02-setup-gw.sh @@ -0,0 +1,26 @@ +#!/bin/bash -x + +gw1="ceph-iscsi/0" +gw2="ceph-iscsi/1" + +gw1_hostname=$(juju run --unit $gw1 "hostname -f") +gw2_hostname=$(juju run --unit $gw2 "hostname -f") +gw1_ip=$(juju status $gw1 --format=oneline | awk '{print $3}' | tr -d \\n ) +gw2_ip=$(juju status $gw2 --format=oneline | awk '{print $3}' | tr -d \\n ) +client_initiatorname=$(juju run --unit ubuntu/0 "grep -E '^InitiatorName' /etc/iscsi/initiatorname.iscsi") +client_initiatorname=$(echo $client_initiatorname | awk 'BEGIN {FS="="} {print $2}') +echo "!$gw1_hostname!" +echo "!$gw2_hostname!" +echo "!$gw1_ip!" +echo "!$gw2_ip!" +echo "!$client_initiatorname!" + +gw_iqn="iqn.2003-01.com.canonical.iscsi-gw:iscsi-igw" + +juju run --unit $gw1 "gwcli /iscsi-targets/ create $gw_iqn" +juju run --unit $gw1 "gwcli /iscsi-targets/${gw_iqn}/gateways create $gw1_hostname $gw1_ip skipchecks=true" +juju run --unit $gw1 "gwcli /iscsi-targets/${gw_iqn}/gateways create $gw2_hostname $gw2_ip skipchecks=true" +juju run --unit $gw1 "gwcli /disks create pool=rbd image=disk_1 size=1G" +juju run --unit $gw1 "gwcli /iscsi-targets/${gw_iqn}/hosts create ${client_initiatorname}" +juju run --unit $gw1 "gwcli /iscsi-targets/${gw_iqn}/hosts/${client_initiatorname} auth username=myiscsiusername password=myiscsipassword" +juju run --unit $gw1 "gwcli /iscsi-targets/${gw_iqn}/hosts/${client_initiatorname} disk add rbd/disk_1" diff --git a/ceph-iscsi/tests/03-setup-client-iscsi.sh b/ceph-iscsi/tests/03-setup-client-iscsi.sh new file mode 100755 index 00000000..df0913cf --- /dev/null +++ b/ceph-iscsi/tests/03-setup-client-iscsi.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +client="ubuntu/0" + +gw1="ceph-iscsi/0" + +gw1_ip=$(juju status $gw1 --format=oneline | awk '{print $3}' | tr -d \\n ) + +juju run --unit $client "iscsiadm -m discovery -t st -p $gw1_ip" + +target_name="iqn.2003-01.com.canonical.iscsi-gw:iscsi-igw" + +juju run --unit $client "iscsiadm --mode node --targetname ${target_name} --op=update --name node.session.auth.authmethod --value=CHAP" +juju run --unit $client "iscsiadm --mode node --targetname ${target_name} --op=update --name node.session.auth.username --value=myiscsiusername" +juju run --unit $client "iscsiadm --mode node --targetname ${target_name} --op=update --name node.session.auth.password --value=myiscsipassword" +juju run --unit $client "iscsiadm --mode node --targetname ${target_name} --login" +sleep 5 +juju ssh ubuntu/0 "ls -l /dev/dm-0" diff --git a/ceph-iscsi/tests/deploy.sh b/ceph-iscsi/tests/deploy.sh new file mode 100755 index 00000000..d8e831d1 --- /dev/null +++ b/ceph-iscsi/tests/deploy.sh @@ -0,0 +1 @@ +juju deploy --force ./focal.yaml diff --git a/ceph-iscsi/tests/focal.yaml b/ceph-iscsi/tests/focal.yaml new file mode 100644 index 00000000..d045ad52 --- /dev/null +++ b/ceph-iscsi/tests/focal.yaml @@ -0,0 +1,28 @@ +series: bionic +applications: + ubuntu: + charm: cs:ubuntu + num_units: 1 + ceph-iscsi: + charm: ../ + series: focal + num_units: 2 + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: cloud:bionic-train + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: cloud:bionic-train +relations: +- - ceph-mon:client + - ceph-iscsi:ceph-client +- - ceph-osd:mon + - ceph-mon:osd diff --git a/ceph-iscsi/tox.ini b/ceph-iscsi/tox.ini new file mode 100644 index 00000000..f8f50927 --- /dev/null +++ b/ceph-iscsi/tox.ini @@ -0,0 +1,92 @@ +# Source charm: ./tox.ini +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools + +[tox] +skipsdist = True +envlist = pep8,py3 +# NOTE: Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE: Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 + TERM=linux + LAYER_PATH={toxinidir}/layers + INTERFACE_PATH={toxinidir}/interfaces + JUJU_REPOSITORY={toxinidir}/build +passenv = http_proxy https_proxy INTERFACE_PATH LAYER_PATH JUJU_REPOSITORY +install_command = + pip install {opts} {packages} +deps = + -r{toxinidir}/requirements.txt + +[testenv:build] +basepython = python3 +commands = + charm-build --log-level DEBUG -o {toxinidir}/build src {posargs} + +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/test-requirements.txt +commands = stestr run --slowest {posargs} + +[testenv:py35] +basepython = python3.5 +deps = -r{toxinidir}/test-requirements.txt +commands = stestr run --slowest {posargs} + +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/test-requirements.txt +commands = stestr run --slowest {posargs} + +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/test-requirements.txt +commands = stestr run --slowest {posargs} + +[testenv:pep8] +basepython = python3 +deps = -r{toxinidir}/test-requirements.txt +commands = flake8 {posargs} src unit_tests + +[testenv:cover] +# Technique based heavily upon +# https://github.com/openstack/nova/blob/master/tox.ini +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run --slowest {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + */charmhelpers/* + unit_tests/* + +[testenv:venv] +basepython = python3 +commands = {posargs} + +[flake8] +# E402 ignore necessary for path append before sys module import in actions +ignore = E402,W504 \ No newline at end of file From a7526b62b20bfab6cb349cd107399a6119021a3a Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 26 Feb 2020 11:28:20 +0100 Subject: [PATCH 1889/2699] Add newline at end of file when writing keyring Change-Id: I6682803c3dc67fd07f9db0073fe8eaa0de29c6d6 Closes-Bug: #1864706 Depends-On: If79977c2ec7588b09ca2e118141c7f3d9cf61767 --- ceph-mon/lib/charms_ceph/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index 7c970784..2b8a490d 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -1372,6 +1372,7 @@ def create_keyrings(): if not output: # NOTE: key not yet created, raise exception and retry raise Exception + output += '\n' write_file(_client_admin_keyring, output, owner=ceph_user(), group=ceph_user(), perms=0o400) From ead9d8bfb04e1c78e018a275ddf541ba7d19a64b Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 26 Feb 2020 14:21:19 +0000 Subject: [PATCH 1890/2699] General fixes and tests --- ceph-iscsi/.gitignore | 1 + ceph-iscsi/src/charm.py | 14 ++++ ceph-iscsi/test-requirements.txt | 14 ++-- ceph-iscsi/tests/01-setup-client-apt.sh | 7 -- ceph-iscsi/tests/02-setup-gw.sh | 26 ------- ceph-iscsi/tests/03-setup-client-iscsi.sh | 18 ----- ceph-iscsi/tests/{ => bundles}/focal.yaml | 2 +- ceph-iscsi/tests/deploy.sh | 1 - ceph-iscsi/tests/tests.yaml | 9 +++ ceph-iscsi/todo.txt | 13 ++++ ceph-iscsi/tox.ini | 84 ++++++----------------- 11 files changed, 66 insertions(+), 123 deletions(-) delete mode 100755 ceph-iscsi/tests/01-setup-client-apt.sh delete mode 100755 ceph-iscsi/tests/02-setup-gw.sh delete mode 100755 ceph-iscsi/tests/03-setup-client-iscsi.sh rename ceph-iscsi/tests/{ => bundles}/focal.yaml (96%) delete mode 100755 ceph-iscsi/tests/deploy.sh create mode 100644 ceph-iscsi/tests/tests.yaml create mode 100644 ceph-iscsi/todo.txt diff --git a/ceph-iscsi/.gitignore b/ceph-iscsi/.gitignore index a65b4177..950798c2 100644 --- a/ceph-iscsi/.gitignore +++ b/ceph-iscsi/.gitignore @@ -1 +1,2 @@ lib +.tox diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 6b6e7a2a..74ce08fa 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -11,6 +11,10 @@ StoredState, ) from ops.main import main +from ops.model import ( + ActiveStatus, + WaitingStatus, +) from charmhelpers.fetch import ( apt_install, apt_update, @@ -31,7 +35,9 @@ class CephISCSIGatewayCharm(CharmBase): def __init__(self, framework, key): super().__init__(framework, key) + self.state.set_default(is_started=False) self.framework.observe(self.on.install, self) + self.framework.observe(self.on.update_status, self) self.framework.observe(self.on.ceph_client_relation_joined, self) self.ceph_client = interface_ceph_client.CephClientRequires( self, @@ -42,6 +48,12 @@ def on_install(self, event): apt_update(fatal=True) apt_install(self.PACKAGES, fatal=True) + def on_update_status(self, event): + if self.state.is_started: + self.model.unit.status = ActiveStatus('Unit is ready') + else: + self.model.unit.status = WaitingStatus('not ready for reasons') + def on_ceph_client_relation_joined(self, event): self.ceph_client.create_replicated_pool('rbd') self.ceph_client.request_ceph_permissions( @@ -77,6 +89,8 @@ def render_configs(): config_file, ceph_context) render_configs() + self.state.is_started = True + self.model.unit.status = ActiveStatus('Unit is ready') if __name__ == '__main__': diff --git a/ceph-iscsi/test-requirements.txt b/ceph-iscsi/test-requirements.txt index 14b380e4..8d44cd78 100644 --- a/ceph-iscsi/test-requirements.txt +++ b/ceph-iscsi/test-requirements.txt @@ -1,13 +1,13 @@ # This file is managed centrally. If you find the need to modify this as a # one-off, please don't. Intead, consult #openstack-charms and ask about # requirements management in charms via bot-control. Thank you. -# -# Lint and unit test requirements +charm-tools>=2.4.4 +coverage>=3.6 +mock>=1.2 flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 requests>=2.18.4 -charms.reactive -mock>=1.2 -nose>=1.3.7 -coverage>=3.6 -git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack +git+https://github.com/gnuoy/zaza.git@force-focal#egg=zaza +git+https://github.com/gnuoy/zaza-openstack-tests.git@ceph-iscsi-tests#egg=zaza.openstack +pytz # workaround for 14.04 pip/tox +pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/ceph-iscsi/tests/01-setup-client-apt.sh b/ceph-iscsi/tests/01-setup-client-apt.sh deleted file mode 100755 index 65a97474..00000000 --- a/ceph-iscsi/tests/01-setup-client-apt.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -client="ubuntu/0" - -juju run --unit $client "apt install --yes open-iscsi multipath-tools" -juju run --unit $client "systemctl start iscsi" -juju run --unit $client "systemctl start iscsid" diff --git a/ceph-iscsi/tests/02-setup-gw.sh b/ceph-iscsi/tests/02-setup-gw.sh deleted file mode 100755 index 25eeef71..00000000 --- a/ceph-iscsi/tests/02-setup-gw.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -x - -gw1="ceph-iscsi/0" -gw2="ceph-iscsi/1" - -gw1_hostname=$(juju run --unit $gw1 "hostname -f") -gw2_hostname=$(juju run --unit $gw2 "hostname -f") -gw1_ip=$(juju status $gw1 --format=oneline | awk '{print $3}' | tr -d \\n ) -gw2_ip=$(juju status $gw2 --format=oneline | awk '{print $3}' | tr -d \\n ) -client_initiatorname=$(juju run --unit ubuntu/0 "grep -E '^InitiatorName' /etc/iscsi/initiatorname.iscsi") -client_initiatorname=$(echo $client_initiatorname | awk 'BEGIN {FS="="} {print $2}') -echo "!$gw1_hostname!" -echo "!$gw2_hostname!" -echo "!$gw1_ip!" -echo "!$gw2_ip!" -echo "!$client_initiatorname!" - -gw_iqn="iqn.2003-01.com.canonical.iscsi-gw:iscsi-igw" - -juju run --unit $gw1 "gwcli /iscsi-targets/ create $gw_iqn" -juju run --unit $gw1 "gwcli /iscsi-targets/${gw_iqn}/gateways create $gw1_hostname $gw1_ip skipchecks=true" -juju run --unit $gw1 "gwcli /iscsi-targets/${gw_iqn}/gateways create $gw2_hostname $gw2_ip skipchecks=true" -juju run --unit $gw1 "gwcli /disks create pool=rbd image=disk_1 size=1G" -juju run --unit $gw1 "gwcli /iscsi-targets/${gw_iqn}/hosts create ${client_initiatorname}" -juju run --unit $gw1 "gwcli /iscsi-targets/${gw_iqn}/hosts/${client_initiatorname} auth username=myiscsiusername password=myiscsipassword" -juju run --unit $gw1 "gwcli /iscsi-targets/${gw_iqn}/hosts/${client_initiatorname} disk add rbd/disk_1" diff --git a/ceph-iscsi/tests/03-setup-client-iscsi.sh b/ceph-iscsi/tests/03-setup-client-iscsi.sh deleted file mode 100755 index df0913cf..00000000 --- a/ceph-iscsi/tests/03-setup-client-iscsi.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -client="ubuntu/0" - -gw1="ceph-iscsi/0" - -gw1_ip=$(juju status $gw1 --format=oneline | awk '{print $3}' | tr -d \\n ) - -juju run --unit $client "iscsiadm -m discovery -t st -p $gw1_ip" - -target_name="iqn.2003-01.com.canonical.iscsi-gw:iscsi-igw" - -juju run --unit $client "iscsiadm --mode node --targetname ${target_name} --op=update --name node.session.auth.authmethod --value=CHAP" -juju run --unit $client "iscsiadm --mode node --targetname ${target_name} --op=update --name node.session.auth.username --value=myiscsiusername" -juju run --unit $client "iscsiadm --mode node --targetname ${target_name} --op=update --name node.session.auth.password --value=myiscsipassword" -juju run --unit $client "iscsiadm --mode node --targetname ${target_name} --login" -sleep 5 -juju ssh ubuntu/0 "ls -l /dev/dm-0" diff --git a/ceph-iscsi/tests/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml similarity index 96% rename from ceph-iscsi/tests/focal.yaml rename to ceph-iscsi/tests/bundles/focal.yaml index d045ad52..3e62783b 100644 --- a/ceph-iscsi/tests/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -4,7 +4,7 @@ applications: charm: cs:ubuntu num_units: 1 ceph-iscsi: - charm: ../ + charm: ../../ series: focal num_units: 2 ceph-osd: diff --git a/ceph-iscsi/tests/deploy.sh b/ceph-iscsi/tests/deploy.sh deleted file mode 100755 index d8e831d1..00000000 --- a/ceph-iscsi/tests/deploy.sh +++ /dev/null @@ -1 +0,0 @@ -juju deploy --force ./focal.yaml diff --git a/ceph-iscsi/tests/tests.yaml b/ceph-iscsi/tests/tests.yaml new file mode 100644 index 00000000..28dd4613 --- /dev/null +++ b/ceph-iscsi/tests/tests.yaml @@ -0,0 +1,9 @@ +charm_name: ceph-iscsi +gate_bundles: + - focal +smoke_bundles: + - focal +configure: + - zaza.openstack.charm_tests.ceph.iscsi.setup.basic_guest_setup +tests: + - zaza.openstack.charm_tests.ceph.iscsi.tests.CephISCSIGatewayTest diff --git a/ceph-iscsi/todo.txt b/ceph-iscsi/todo.txt new file mode 100644 index 00000000..7facf073 --- /dev/null +++ b/ceph-iscsi/todo.txt @@ -0,0 +1,13 @@ +Confirm zaza tests are working +Remove hardcoded ceph pool name and expose as a config option +Write spec +Fix workload status so it reports missing relations +Implement pause/resume +Add iscsi target create action +Add series upgrade +Write README +Move to openstack-charmers +Refactor ceph broker code in charm helpers +Rewrite ceph-client interface to stop using any relation* commands via charmhelpers +implement source config option +Proper Update Status diff --git a/ceph-iscsi/tox.ini b/ceph-iscsi/tox.ini index f8f50927..9c27bbfa 100644 --- a/ceph-iscsi/tox.ini +++ b/ceph-iscsi/tox.ini @@ -1,12 +1,12 @@ -# Source charm: ./tox.ini +# Source charm (with zaza): ./src/tox.ini # This file is managed centrally by release-tools and should not be modified # within individual charm repos. See the 'global' dir contents for available # choices of tox.ini for OpenStack Charms: # https://github.com/openstack-charmers/release-tools [tox] +envlist = pep8 skipsdist = True -envlist = pep8,py3 # NOTE: Avoid build/test env pollution by not enabling sitepackages. sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. @@ -15,78 +15,36 @@ skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 - TERM=linux - LAYER_PATH={toxinidir}/layers - INTERFACE_PATH={toxinidir}/interfaces - JUJU_REPOSITORY={toxinidir}/build -passenv = http_proxy https_proxy INTERFACE_PATH LAYER_PATH JUJU_REPOSITORY +whitelist_externals = juju +passenv = HOME TERM CS_* OS_* TEST_* +deps = -r{toxinidir}/test-requirements.txt install_command = pip install {opts} {packages} -deps = - -r{toxinidir}/requirements.txt -[testenv:build] +[testenv:pep8] basepython = python3 -commands = - charm-build --log-level DEBUG -o {toxinidir}/build src {posargs} +deps=charm-tools +commands = charm-proof -[testenv:py3] +[testenv:func-noop] basepython = python3 -deps = -r{toxinidir}/test-requirements.txt -commands = stestr run --slowest {posargs} - -[testenv:py35] -basepython = python3.5 -deps = -r{toxinidir}/test-requirements.txt -commands = stestr run --slowest {posargs} - -[testenv:py36] -basepython = python3.6 -deps = -r{toxinidir}/test-requirements.txt -commands = stestr run --slowest {posargs} - -[testenv:py37] -basepython = python3.7 -deps = -r{toxinidir}/test-requirements.txt -commands = stestr run --slowest {posargs} +commands = + functest-run-suite --help -[testenv:pep8] +[testenv:func] basepython = python3 -deps = -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} src unit_tests +commands = + functest-run-suite --keep-model -[testenv:cover] -# Technique based heavily upon -# https://github.com/openstack/nova/blob/master/tox.ini +[testenv:func-smoke] basepython = python3 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -setenv = - {[testenv]setenv} - PYTHON=coverage run commands = - coverage erase - stestr run --slowest {posargs} - coverage combine - coverage html -d cover - coverage xml -o cover/coverage.xml - coverage report + functest-run-suite --keep-model --smoke -[coverage:run] -branch = True -concurrency = multiprocessing -parallel = True -source = - . -omit = - .tox/* - */charmhelpers/* - unit_tests/* - -[testenv:venv] +[testenv:func-target] basepython = python3 -commands = {posargs} +commands = + functest-run-suite --keep-model --bundle {posargs} -[flake8] -# E402 ignore necessary for path append before sys module import in actions -ignore = E402,W504 \ No newline at end of file +[testenv:venv] +commands = {posargs} \ No newline at end of file From ba090f81f80289d362b7fb9058cdc9e16797225f Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 26 Feb 2020 15:21:52 +0000 Subject: [PATCH 1891/2699] Change pool name --- ceph-iscsi/src/charm.py | 2 +- ceph-iscsi/templates/iscsi-gateway.cfg | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 74ce08fa..8c330d41 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -55,7 +55,7 @@ def on_update_status(self, event): self.model.unit.status = WaitingStatus('not ready for reasons') def on_ceph_client_relation_joined(self, event): - self.ceph_client.create_replicated_pool('rbd') + self.ceph_client.create_replicated_pool('iscsi') self.ceph_client.request_ceph_permissions( 'ceph-iscsi', self.CEPH_CAPABILITIES) diff --git a/ceph-iscsi/templates/iscsi-gateway.cfg b/ceph-iscsi/templates/iscsi-gateway.cfg index 26af98f0..d3361ff8 100644 --- a/ceph-iscsi/templates/iscsi-gateway.cfg +++ b/ceph-iscsi/templates/iscsi-gateway.cfg @@ -5,6 +5,7 @@ logger_level = DEBUG cluster_name = ceph cluster_client_name = client.ceph-iscsi +pool = iscsi # # # Place a copy of the ceph cluster's admin keyring in the gateway's /etc/ceph # # drectory and reference the filename here From 277a19349ab0b00ba841c33c4ebbd0936643da83 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 26 Feb 2020 16:42:41 +0000 Subject: [PATCH 1892/2699] Add puase/resume and a bit of WL status --- ceph-iscsi/actions.yaml | 15 +++++++++++++ ceph-iscsi/src/charm.py | 49 ++++++++++++++++++++++++++++++++++------- 2 files changed, 56 insertions(+), 8 deletions(-) create mode 100644 ceph-iscsi/actions.yaml diff --git a/ceph-iscsi/actions.yaml b/ceph-iscsi/actions.yaml new file mode 100644 index 00000000..892bd15a --- /dev/null +++ b/ceph-iscsi/actions.yaml @@ -0,0 +1,15 @@ +openstack-upgrade: + description: Perform openstack upgrades. Config option action-managed-upgrade must be set to True. +pause: + description: | + Pause neutron-api services. + If the neutron-api deployment is clustered using the hacluster charm, the + corresponding hacluster unit on the node must first be paused as well. + Not doing so may lead to an interruption of service. +resume: + description: | + Resume neutron-api services. + If the neutron-api deployment is clustered using the hacluster charm, the + corresponding hacluster unit on the node must be resumed as well. +security-checklist: + description: Validate the running configuration against the OpenStack security guides checklist diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 8c330d41..22981ab4 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -13,12 +13,14 @@ from ops.main import main from ops.model import ( ActiveStatus, + MaintenanceStatus, WaitingStatus, ) from charmhelpers.fetch import ( apt_install, apt_update, ) +import charmhelpers.contrib.openstack.utils as os_utils import charmhelpers.core.host as ch_host import charmhelpers.core.templating as ch_templating import interface_ceph_client @@ -33,9 +35,15 @@ class CephISCSIGatewayCharm(CharmBase): "mon", "allow *", "mgr", "allow r"] + RESTART_MAP = { + '/etc/ceph/ceph.conf': ['rbd-target-api'], + '/etc/ceph/iscsi-gateway.cfg': ['rbd-target-api'], + '/etc/ceph/ceph.client.ceph-iscsi.keyring': ['rbd-target-api']} + def __init__(self, framework, key): super().__init__(framework, key) self.state.set_default(is_started=False) + self.state.set_default(is_paused=False) self.framework.observe(self.on.install, self) self.framework.observe(self.on.update_status, self) self.framework.observe(self.on.ceph_client_relation_joined, self) @@ -43,17 +51,25 @@ def __init__(self, framework, key): self, 'ceph-client') self.framework.observe(self.ceph_client.on.pools_available, self) + self.framework.observe(self.on.pause_action, self) def on_install(self, event): apt_update(fatal=True) apt_install(self.PACKAGES, fatal=True) - def on_update_status(self, event): + def update_status(self): + if self.state.is_paused: + self.model.unit.status = MaintenanceStatus( + "Paused. Use 'resume' action to resume normal service.") + return if self.state.is_started: self.model.unit.status = ActiveStatus('Unit is ready') else: self.model.unit.status = WaitingStatus('not ready for reasons') + def on_update_status(self, event): + self.update_status() + def on_ceph_client_relation_joined(self, event): self.ceph_client.create_replicated_pool('iscsi') self.ceph_client.request_ceph_permissions( @@ -69,10 +85,6 @@ def on_pools_available(self, event): ceph_context.update(self.ceph_client.get_pool_data()) ceph_context['mon_hosts'] = ' '.join(ceph_context['mon_hosts']) - restart_map = { - '/etc/ceph/ceph.conf': ['rbd-target-api'], - '/etc/ceph/iscsi-gateway.cfg': ['rbd-target-api'], - '/etc/ceph/ceph.client.ceph-iscsi.keyring': ['rbd-target-api']} def daemon_reload_and_restart(service_name): subprocess.check_call(['systemctl', 'daemon-reload']) @@ -81,7 +93,7 @@ def daemon_reload_and_restart(service_name): rfuncs = { 'rbd-target-api': daemon_reload_and_restart} - @ch_host.restart_on_change(restart_map, restart_functions=rfuncs) + @ch_host.restart_on_change(self.RESTART_MAP, restart_functions=rfuncs) def render_configs(): for config_file in restart_map.keys(): ch_templating.render( @@ -90,8 +102,29 @@ def render_configs(): ceph_context) render_configs() self.state.is_started = True - self.model.unit.status = ActiveStatus('Unit is ready') - + self.update_status() + + def services(self): + _svcs = [] + for svc in self.RESTART_MAP.values(): + _svcs.extend(svc) + return list(set(_svcs)) + + def on_pause_action(self, event): + _, messages = os_utils.manage_payload_services( + 'pause', + services=self.services(), + charm_func=None) + self.state.is_paused = True + self.update_status() + + def on_resume_action(self, event): + _, messages = os_utils.manage_payload_services( + 'resume', + services=self.services(), + charm_func=None) + self.state.is_paused = False + self.update_status() if __name__ == '__main__': main(CephISCSIGatewayCharm) From f10c77ec68ed43b82cb97e97b0021ca4818e76d0 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 28 Feb 2020 16:37:30 +0000 Subject: [PATCH 1893/2699] Next cut --- ceph-iscsi/README.md | 3 + ceph-iscsi/actions.yaml | 40 ++++- ceph-iscsi/charm-prep.sh | 1 + ceph-iscsi/metadata.yaml | 3 + ceph-iscsi/mod/interface-ceph-client | 2 +- ceph-iscsi/mod/ops_openstack/ops_openstack.py | 74 +++++++++ ceph-iscsi/src/charm.py | 145 +++++++++++------- ceph-iscsi/src/interface_ceph_iscsi_peer.py | 93 +++++++++++ ceph-iscsi/templates/iscsi-gateway.cfg | 4 + ceph-iscsi/todo.txt | 4 + 10 files changed, 304 insertions(+), 65 deletions(-) create mode 100644 ceph-iscsi/mod/ops_openstack/ops_openstack.py create mode 100644 ceph-iscsi/src/interface_ceph_iscsi_peer.py diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index 88ae53cc..d10b2017 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -16,3 +16,6 @@ cd test ./02-setup-gw.sh ./03-setup-client-iscsi.sh ``` + +To run the charm tests (tested on OpenStack provider): +tox -e func-smoke diff --git a/ceph-iscsi/actions.yaml b/ceph-iscsi/actions.yaml index 892bd15a..533fb782 100644 --- a/ceph-iscsi/actions.yaml +++ b/ceph-iscsi/actions.yaml @@ -1,15 +1,43 @@ -openstack-upgrade: - description: Perform openstack upgrades. Config option action-managed-upgrade must be set to True. pause: description: | - Pause neutron-api services. - If the neutron-api deployment is clustered using the hacluster charm, the + Pause ceph-iscsi services. + If the ceph-iscsi deployment is clustered using the hacluster charm, the corresponding hacluster unit on the node must first be paused as well. Not doing so may lead to an interruption of service. resume: description: | - Resume neutron-api services. - If the neutron-api deployment is clustered using the hacluster charm, the + Resume ceph-iscsi services. + If the ceph-iscsi deployment is clustered using the hacluster charm, the corresponding hacluster unit on the node must be resumed as well. security-checklist: description: Validate the running configuration against the OpenStack security guides checklist +create-target: + description: "Create a new cache tier" + params: + gateway-units: + type: string + default: writeback + description: "Space seperated list of gateway units eg 'ceph-iscsi/0 ceph-scsi/1'" + iqn: + type: string + default: writeback + description: "iSCSI Qualified Name" + image-size: + type: string + default: 1G + description: "Target size" + image-name: + type: string + default: disk_1 + description: "Image name " + client-initiatorname: + type: string + default: 1G + description: "The initiator name of the client that will mount the target" + required: + - gateway-units + - iqn + - image-size + - image-name + - client-initiatorname + diff --git a/ceph-iscsi/charm-prep.sh b/ceph-iscsi/charm-prep.sh index 4e57d26d..220240f0 100755 --- a/ceph-iscsi/charm-prep.sh +++ b/ceph-iscsi/charm-prep.sh @@ -8,3 +8,4 @@ git submodule init git submodule update (cd lib; ln -s ../mod/operator/ops;) (cd lib; ln -s ../mod/interface-ceph-client/interface_ceph_client.py;) +(cd lib; ln -s ../mod/ops_openstack/ops_openstack.py;) diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index 555ffc95..bb0bb397 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -15,5 +15,8 @@ subordinate: false requires: ceph-client: interface: ceph-client +peers: + cluster: + interface: ceph-iscsi-peer extra-bindings: public: diff --git a/ceph-iscsi/mod/interface-ceph-client b/ceph-iscsi/mod/interface-ceph-client index 93b4661b..cb3557ba 160000 --- a/ceph-iscsi/mod/interface-ceph-client +++ b/ceph-iscsi/mod/interface-ceph-client @@ -1 +1 @@ -Subproject commit 93b4661be184753038da626d3a04d3855f948430 +Subproject commit cb3557ba8aa2997936e19ed876d2b2b962a75868 diff --git a/ceph-iscsi/mod/ops_openstack/ops_openstack.py b/ceph-iscsi/mod/ops_openstack/ops_openstack.py new file mode 100644 index 00000000..061a7390 --- /dev/null +++ b/ceph-iscsi/mod/ops_openstack/ops_openstack.py @@ -0,0 +1,74 @@ +from ops.charm import CharmBase +from ops.framework import ( + StoredState, +) + +from charmhelpers.fetch import ( + apt_install, + apt_update, +) +from ops.model import ( + ActiveStatus, + MaintenanceStatus, + WaitingStatus, +) +import charmhelpers.contrib.openstack.utils as os_utils +import logging +logger = logging.getLogger() + +class OSBaseCharm(CharmBase): + state = StoredState() + + PACKAGES = [] + + RESTART_MAP = {} + + def __init__(self, framework, key): + super().__init__(framework, key) + self.state.set_default(is_started=False) + self.state.set_default(is_paused=False) + self.framework.observe(self.on.install, self) + self.framework.observe(self.on.update_status, self) + self.framework.observe(self.on.pause_action, self) + self.framework.observe(self.on.resume_action, self) + + def on_install(self, event): + logging.info("Installing packages") + apt_update(fatal=True) + apt_install(self.PACKAGES, fatal=True) + + def update_status(self): + logging.info("Updating status") + if self.state.is_paused: + self.model.unit.status = MaintenanceStatus( + "Paused. Use 'resume' action to resume normal service.") + if self.state.is_started: + self.model.unit.status = ActiveStatus('Unit is ready') + else: + self.model.unit.status = WaitingStatus('Not ready for reasons') + logging.info("Status updated") + + def on_update_status(self, event): + self.update_status() + + def services(self): + _svcs = [] + for svc in self.RESTART_MAP.values(): + _svcs.extend(svc) + return list(set(_svcs)) + + def on_pause_action(self, event): + _, messages = os_utils.manage_payload_services( + 'pause', + services=self.services(), + charm_func=None) + self.state.is_paused = True + self.update_status() + + def on_resume_action(self, event): + _, messages = os_utils.manage_payload_services( + 'resume', + services=self.services(), + charm_func=None) + self.state.is_paused = False + self.update_status() diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 22981ab4..fad227e7 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -1,34 +1,50 @@ #!/usr/bin/env python3 +import logging import os +import socket import subprocess import sys +import string +import secrets sys.path.append('lib') -from ops.charm import CharmBase from ops.framework import ( StoredState, ) from ops.main import main -from ops.model import ( - ActiveStatus, - MaintenanceStatus, - WaitingStatus, -) -from charmhelpers.fetch import ( - apt_install, - apt_update, -) -import charmhelpers.contrib.openstack.utils as os_utils import charmhelpers.core.host as ch_host import charmhelpers.core.templating as ch_templating import interface_ceph_client - - -class CephISCSIGatewayCharm(CharmBase): +import interface_ceph_iscsi_peer + +import ops_openstack + +logger = logging.getLogger() + +class GatewayClient(): + + CREATE_TARGET = "/iscsi-targets/ create {gw_iqn}" + def run(self, path, cmd): + _cmd = ['gwcli', path] + _cmd.extend(cmd.split()) + logging.info(_cmd) + subprocess.check_call(_cmd) + + def create_target(self, gw_iqn): + self.run( + "/iscsi-targets/", + "create {gw_iqn}".format(gw_iqn=gw_iqn)) + + def add_gateway_to_target(self, target, gateway_ip, gateway_fqdn): + self.run( + "/iscsi-targets/{}/gateways/".format(target), + "create {} {}".format(gateway_fqdn, gateway_ip), + ) + +class CephISCSIGatewayCharm(ops_openstack.OSBaseCharm): state = StoredState() - PACKAGES = ['ceph-iscsi', 'tcmu-runner', 'ceph-common'] CEPH_CAPABILITIES = [ "osd", "allow *", @@ -40,51 +56,81 @@ class CephISCSIGatewayCharm(CharmBase): '/etc/ceph/iscsi-gateway.cfg': ['rbd-target-api'], '/etc/ceph/ceph.client.ceph-iscsi.keyring': ['rbd-target-api']} + DEFAULT_TARGET = "iqn.2003-01.com.ubuntu.iscsi-gw:iscsi-igw" + def __init__(self, framework, key): super().__init__(framework, key) - self.state.set_default(is_started=False) - self.state.set_default(is_paused=False) - self.framework.observe(self.on.install, self) - self.framework.observe(self.on.update_status, self) + self.state.set_default(target_created=False) self.framework.observe(self.on.ceph_client_relation_joined, self) self.ceph_client = interface_ceph_client.CephClientRequires( self, 'ceph-client') self.framework.observe(self.ceph_client.on.pools_available, self) - self.framework.observe(self.on.pause_action, self) - - def on_install(self, event): - apt_update(fatal=True) - apt_install(self.PACKAGES, fatal=True) - - def update_status(self): - if self.state.is_paused: - self.model.unit.status = MaintenanceStatus( - "Paused. Use 'resume' action to resume normal service.") + self.peers = interface_ceph_iscsi_peer.CephISCSIGatewayPeers( + self, + 'cluster') + self.framework.observe(self.peers.on.has_peers, self) + self.framework.observe(self.peers.on.ready_peers, self) + + def setup_default_target(self): + gw_client = GatewayClient() + gw_client.create_target(self.DEFAULT_TARGET) + gw_client.add_gateway_to_target( + self.DEFAULT_TARGET, + self.peers.cluster_bind_address, + socket.getfqdn()) + for gw_unit, gw_config in self.peers.ready_peer_details.items(): + gw_client.add_gateway_to_target( + self.DEFAULT_TARGET, + gw_config['ip'], + gw_config['fqdn']) + self.state.target_created = True + + def on_ready_peers(self, event): + if not self.model.unit.is_leader(): + logging.info("Leader should do setup") + return + if not self.state.is_started: + logging.info("Cannot perform setup yet, not started") + event.defer() + return + if self.state.target_created: + logging.info("Initial target setup already complete") return - if self.state.is_started: - self.model.unit.status = ActiveStatus('Unit is ready') else: - self.model.unit.status = WaitingStatus('not ready for reasons') + self.setup_default_target() - def on_update_status(self, event): - self.update_status() + def on_has_peers(self, event): + logging.info("Unit has peers") + if self.model.unit.is_leader() and not self.peers.admin_password: + logging.info("Setting admin password") + alphabet = string.ascii_letters + string.digits + password = ''.join(secrets.choice(alphabet) for i in range(8)) + self.peers.set_admin_password(password) def on_ceph_client_relation_joined(self, event): + logging.info("Requesting replicated pool") self.ceph_client.create_replicated_pool('iscsi') + logging.info("Requesting permissions") self.ceph_client.request_ceph_permissions( 'ceph-iscsi', self.CEPH_CAPABILITIES) def on_pools_available(self, event): + logging.info("on_pools_available") + if not self.peers.admin_password: + logging.info("Defering setup") + event.defer() + return ceph_context = { 'use_syslog': str(self.framework.model.config['use-syslog']).lower(), - 'loglevel': self.framework.model.config['loglevel'] + 'loglevel': self.framework.model.config['loglevel'], + 'admin_password': self.peers.admin_password, } ceph_context.update(self.ceph_client.get_pool_data()) ceph_context['mon_hosts'] = ' '.join(ceph_context['mon_hosts']) - + ceph_context['gw_hosts'] = ' '.join(sorted(self.peers.peer_addresses)) def daemon_reload_and_restart(service_name): subprocess.check_call(['systemctl', 'daemon-reload']) @@ -95,36 +141,19 @@ def daemon_reload_and_restart(service_name): @ch_host.restart_on_change(self.RESTART_MAP, restart_functions=rfuncs) def render_configs(): - for config_file in restart_map.keys(): + for config_file in self.RESTART_MAP.keys(): ch_templating.render( os.path.basename(config_file), config_file, ceph_context) + logging.info("Rendering config") render_configs() + logging.info("Setting started state") + self.peers.announce_ready() self.state.is_started = True self.update_status() + logging.info("on_pools_available: status updated") - def services(self): - _svcs = [] - for svc in self.RESTART_MAP.values(): - _svcs.extend(svc) - return list(set(_svcs)) - - def on_pause_action(self, event): - _, messages = os_utils.manage_payload_services( - 'pause', - services=self.services(), - charm_func=None) - self.state.is_paused = True - self.update_status() - - def on_resume_action(self, event): - _, messages = os_utils.manage_payload_services( - 'resume', - services=self.services(), - charm_func=None) - self.state.is_paused = False - self.update_status() if __name__ == '__main__': main(CephISCSIGatewayCharm) diff --git a/ceph-iscsi/src/interface_ceph_iscsi_peer.py b/ceph-iscsi/src/interface_ceph_iscsi_peer.py new file mode 100644 index 00000000..c3309262 --- /dev/null +++ b/ceph-iscsi/src/interface_ceph_iscsi_peer.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 + +import logging +import socket + +from ops.framework import ( + StoredState, + EventBase, + EventsBase, + EventSource, + Object) + + +class HasPeersEvent(EventBase): + pass + +class ReadyPeersEvent(EventBase): + pass + + +class CephISCSIGatewayPeerEvents(EventsBase): + has_peers = EventSource(HasPeersEvent) + ready_peers = EventSource(ReadyPeersEvent) + + +class CephISCSIGatewayPeers(Object): + + on = CephISCSIGatewayPeerEvents() + state = StoredState() + PASSWORD_KEY = 'admin_password' + READY_KEY = 'gateway_ready' + FQDN_KEY = 'gateway_fqdn' + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self.relation_name = relation_name + self.framework.observe( + charm.on[relation_name].relation_changed, + self.on_changed) + + def on_changed(self, event): + logging.info("CephISCSIGatewayPeers on_changed") + self.on.has_peers.emit() + if self.ready_peer_details: + self.on.ready_peers.emit() + + def set_admin_password(self, password): + logging.info("Setting admin password") + self.peer_rel.data[self.peer_rel.app][self.PASSWORD_KEY] = password + + def announce_ready(self): + logging.info("announcing ready") + self.peer_rel.data[self.framework.model.unit][self.READY_KEY] = 'True' + self.peer_rel.data[self.framework.model.unit][self.FQDN_KEY] = socket.getfqdn() + + @property + def ready_peer_details(self): + peers = {} + for u in self.peer_rel.units: + if self.peer_rel.data[u].get(self.READY_KEY) == 'True': + peers[u.name] = { + 'fqdn': self.peer_rel.data[u][self.FQDN_KEY], + 'ip': self.peer_rel.data[u]['ingress-address']} + return peers + + @property + def is_joined(self): + return self.peer_rel is not None + + @property + def peer_rel(self): + return self.framework.model.get_relation(self.relation_name) + + @property + def peer_binding(self): + return self.framework.model.get_binding(self.peer_rel) + + @property + def cluster_bind_address(self): + return self.peer_binding.network.bind_address + + @property + def admin_password(self): + # https://github.com/canonical/operator/issues/148 + # return self.peer_rel.data[self.peer_rel.app].get(self.PASSWORD_KEY) + return 'hardcodedpassword' + + @property + def peer_addresses(self): + addresses = [] + for u in self.peer_rel.units: + addresses.append(self.peer_rel.data[u]['ingress-address']) + return addresses diff --git a/ceph-iscsi/templates/iscsi-gateway.cfg b/ceph-iscsi/templates/iscsi-gateway.cfg index d3361ff8..46521ca5 100644 --- a/ceph-iscsi/templates/iscsi-gateway.cfg +++ b/ceph-iscsi/templates/iscsi-gateway.cfg @@ -24,5 +24,9 @@ gateway_keyring = ceph.client.ceph-iscsi.keyring # # # To support the API, the bear minimum settings are: api_secure = false +api_user = admin +api_password = {{ admin_password }} +api_port = 5000 +trusted_ip_list = {{ gw_hosts }} # # diff --git a/ceph-iscsi/todo.txt b/ceph-iscsi/todo.txt index 7facf073..74340f1d 100644 --- a/ceph-iscsi/todo.txt +++ b/ceph-iscsi/todo.txt @@ -11,3 +11,7 @@ Refactor ceph broker code in charm helpers Rewrite ceph-client interface to stop using any relation* commands via charmhelpers implement source config option Proper Update Status +admin password +Certificates interface +trusted_ips +security checklist From 8c1312befbf3c97bc1e80941ea51083ca9b61aa0 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 29 Feb 2020 12:43:18 +0000 Subject: [PATCH 1894/2699] Add create-target action --- ceph-iscsi/actions.yaml | 9 ++- ceph-iscsi/mod/ops_openstack/ops_openstack.py | 74 ------------------- ceph-iscsi/src/charm.py | 74 +++++++++++++++---- ceph-iscsi/src/interface_ceph_iscsi_peer.py | 17 +++-- ceph-iscsi/todo.txt | 38 +++++----- 5 files changed, 102 insertions(+), 110 deletions(-) delete mode 100644 ceph-iscsi/mod/ops_openstack/ops_openstack.py diff --git a/ceph-iscsi/actions.yaml b/ceph-iscsi/actions.yaml index 533fb782..abf6fd3e 100644 --- a/ceph-iscsi/actions.yaml +++ b/ceph-iscsi/actions.yaml @@ -34,10 +34,17 @@ create-target: type: string default: 1G description: "The initiator name of the client that will mount the target" + client-username: + type: string + description: "The CHAPs username to be created for the client" + client-password: + type: string + description: "The CHAPs password to be created for the client" required: - gateway-units - iqn - image-size - image-name - client-initiatorname - + - client-username + - client-password diff --git a/ceph-iscsi/mod/ops_openstack/ops_openstack.py b/ceph-iscsi/mod/ops_openstack/ops_openstack.py deleted file mode 100644 index 061a7390..00000000 --- a/ceph-iscsi/mod/ops_openstack/ops_openstack.py +++ /dev/null @@ -1,74 +0,0 @@ -from ops.charm import CharmBase -from ops.framework import ( - StoredState, -) - -from charmhelpers.fetch import ( - apt_install, - apt_update, -) -from ops.model import ( - ActiveStatus, - MaintenanceStatus, - WaitingStatus, -) -import charmhelpers.contrib.openstack.utils as os_utils -import logging -logger = logging.getLogger() - -class OSBaseCharm(CharmBase): - state = StoredState() - - PACKAGES = [] - - RESTART_MAP = {} - - def __init__(self, framework, key): - super().__init__(framework, key) - self.state.set_default(is_started=False) - self.state.set_default(is_paused=False) - self.framework.observe(self.on.install, self) - self.framework.observe(self.on.update_status, self) - self.framework.observe(self.on.pause_action, self) - self.framework.observe(self.on.resume_action, self) - - def on_install(self, event): - logging.info("Installing packages") - apt_update(fatal=True) - apt_install(self.PACKAGES, fatal=True) - - def update_status(self): - logging.info("Updating status") - if self.state.is_paused: - self.model.unit.status = MaintenanceStatus( - "Paused. Use 'resume' action to resume normal service.") - if self.state.is_started: - self.model.unit.status = ActiveStatus('Unit is ready') - else: - self.model.unit.status = WaitingStatus('Not ready for reasons') - logging.info("Status updated") - - def on_update_status(self, event): - self.update_status() - - def services(self): - _svcs = [] - for svc in self.RESTART_MAP.values(): - _svcs.extend(svc) - return list(set(_svcs)) - - def on_pause_action(self, event): - _, messages = os_utils.manage_payload_services( - 'pause', - services=self.services(), - charm_func=None) - self.state.is_paused = True - self.update_status() - - def on_resume_action(self, event): - _, messages = os_utils.manage_payload_services( - 'resume', - services=self.services(), - charm_func=None) - self.state.is_paused = False - self.update_status() diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index fad227e7..4bc7c9b7 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -24,24 +24,46 @@ logger = logging.getLogger() class GatewayClient(): - - CREATE_TARGET = "/iscsi-targets/ create {gw_iqn}" + def run(self, path, cmd): _cmd = ['gwcli', path] _cmd.extend(cmd.split()) logging.info(_cmd) + print(_cmd) subprocess.check_call(_cmd) - def create_target(self, gw_iqn): + def create_target(self, iqn): self.run( "/iscsi-targets/", - "create {gw_iqn}".format(gw_iqn=gw_iqn)) - - def add_gateway_to_target(self, target, gateway_ip, gateway_fqdn): + "create {}".format(iqn)) + + def add_gateway_to_target(self, iqn, gateway_ip, gateway_fqdn): + self.run( + "/iscsi-targets/{}/gateways/".format(iqn), + "create {} {}".format(gateway_fqdn, gateway_ip)) + + def create_pool(self, pool_name, image_name, image_size): self.run( - "/iscsi-targets/{}/gateways/".format(target), - "create {} {}".format(gateway_fqdn, gateway_ip), - ) + "/disks", + "create pool={} image={} size={}".format( + pool_name, + image_name, + image_size)) + + def add_client_to_target(self, iqn, initiatorname): + self.run( + "/iscsi-targets/{}/hosts/".format(iqn), + "create {}".format(initiatorname)) + + def add_client_auth(self, iqn, initiatorname, username, password): + self.run( + "/iscsi-targets/{}/hosts/{}".format(iqn, initiatorname), + "auth username={} password={}".format(username, password)) + + def add_disk_to_client(self, iqn, initiatorname, pool_name, image_name): + self.run( + "/iscsi-targets/{}/hosts/{}".format(iqn, initiatorname), + "disk add {}/{}".format(pool_name, image_name)) class CephISCSIGatewayCharm(ops_openstack.OSBaseCharm): state = StoredState() @@ -71,14 +93,40 @@ def __init__(self, framework, key): 'cluster') self.framework.observe(self.peers.on.has_peers, self) self.framework.observe(self.peers.on.ready_peers, self) + self.framework.observe(self.on.create_target_action, self) + + def on_create_target_action(self, event): + gw_client = GatewayClient() + gw_client.create_target(event.params['iqn']) + for gw_unit, gw_config in self.peers.ready_peer_details.items(): + added_gateways = [] + if gw_unit in event.params['gateway-units']: + gw_client.add_gateway_to_target( + event.params['iqn'], + gw_config['ip'], + gw_config['fqdn']) + added_gateways.append(gw_unit) + gw_client.create_pool( + 'iscsi', + event.params['image-name'], + event.params['image-size']) + gw_client.add_client_to_target( + event.params['iqn'], + event.params['client-initiatorname']) + gw_client.add_client_auth( + event.params['iqn'], + event.params['client-initiatorname'], + event.params['client-username'], + event.params['client-password']) + gw_client.add_disk_to_client( + event.params['iqn'], + event.params['client-initiatorname'], + 'iscsi', + event.params['image-name']) def setup_default_target(self): gw_client = GatewayClient() gw_client.create_target(self.DEFAULT_TARGET) - gw_client.add_gateway_to_target( - self.DEFAULT_TARGET, - self.peers.cluster_bind_address, - socket.getfqdn()) for gw_unit, gw_config in self.peers.ready_peer_details.items(): gw_client.add_gateway_to_target( self.DEFAULT_TARGET, diff --git a/ceph-iscsi/src/interface_ceph_iscsi_peer.py b/ceph-iscsi/src/interface_ceph_iscsi_peer.py index c3309262..26000ab0 100644 --- a/ceph-iscsi/src/interface_ceph_iscsi_peer.py +++ b/ceph-iscsi/src/interface_ceph_iscsi_peer.py @@ -51,11 +51,14 @@ def set_admin_password(self, password): def announce_ready(self): logging.info("announcing ready") self.peer_rel.data[self.framework.model.unit][self.READY_KEY] = 'True' - self.peer_rel.data[self.framework.model.unit][self.FQDN_KEY] = socket.getfqdn() + self.peer_rel.data[self.framework.model.unit][self.FQDN_KEY] = self.fqdn @property def ready_peer_details(self): - peers = {} + peers = { + self.framework.model.unit.name: { + 'fqdn': self.fqdn, + 'ip': self.cluster_bind_address}} for u in self.peer_rel.units: if self.peer_rel.data[u].get(self.READY_KEY) == 'True': peers[u.name] = { @@ -63,6 +66,10 @@ def ready_peer_details(self): 'ip': self.peer_rel.data[u]['ingress-address']} return peers + @property + def fqdn(self): + return socket.getfqdn() + @property def is_joined(self): return self.peer_rel is not None @@ -77,7 +84,7 @@ def peer_binding(self): @property def cluster_bind_address(self): - return self.peer_binding.network.bind_address + return str(self.peer_binding.network.bind_address) @property def admin_password(self): @@ -87,7 +94,7 @@ def admin_password(self): @property def peer_addresses(self): - addresses = [] + addresses = [self.cluster_bind_address] for u in self.peer_rel.units: addresses.append(self.peer_rel.data[u]['ingress-address']) - return addresses + return sorted(addresses) diff --git a/ceph-iscsi/todo.txt b/ceph-iscsi/todo.txt index 74340f1d..64fc4a7d 100644 --- a/ceph-iscsi/todo.txt +++ b/ceph-iscsi/todo.txt @@ -1,17 +1,21 @@ -Confirm zaza tests are working -Remove hardcoded ceph pool name and expose as a config option -Write spec -Fix workload status so it reports missing relations -Implement pause/resume -Add iscsi target create action -Add series upgrade -Write README -Move to openstack-charmers -Refactor ceph broker code in charm helpers -Rewrite ceph-client interface to stop using any relation* commands via charmhelpers -implement source config option -Proper Update Status -admin password -Certificates interface -trusted_ips -security checklist +Todo +* implement source config option +* Proper Update Status +* Remove hardcoded ceph pool name and expose as a config option +* Write spec +* Fix workload status so it reports missing relations +* Add series upgrade +* Write README +* Move to openstack-charmers +* Refactor ceph broker code in charm helpers +* Rewrite ceph-client interface to stop using any relation* commands via charmhelpers +* Certificates interface +* security checklist +* zaza tests for pause/resume + +Mostly Done +* trusted_ips +* zaza tests for creating nd mounting a target +* Implement pause/resume +* Add iscsi target create action +* admin password From 5f3e8886132c0eed8ff79ebbfa30261f13b4880a Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 29 Feb 2020 14:48:49 +0000 Subject: [PATCH 1895/2699] Add required relations --- ceph-iscsi/.gitmodules | 3 +++ ceph-iscsi/charm-prep.sh | 2 +- ceph-iscsi/mod/ops-openstack | 1 + ceph-iscsi/src/charm.py | 1 + 4 files changed, 6 insertions(+), 1 deletion(-) create mode 160000 ceph-iscsi/mod/ops-openstack diff --git a/ceph-iscsi/.gitmodules b/ceph-iscsi/.gitmodules index c03af6ad..bb016b77 100644 --- a/ceph-iscsi/.gitmodules +++ b/ceph-iscsi/.gitmodules @@ -4,3 +4,6 @@ [submodule "mod/interface-ceph-client"] path = mod/interface-ceph-client url = https://github.com/gnuoy/oper-interface-ceph-client.git +[submodule "mod/ops-openstack"] + path = mod/ops-openstack + url = https://github.com/gnuoy/ops-openstack.git diff --git a/ceph-iscsi/charm-prep.sh b/ceph-iscsi/charm-prep.sh index 220240f0..0bade361 100755 --- a/ceph-iscsi/charm-prep.sh +++ b/ceph-iscsi/charm-prep.sh @@ -8,4 +8,4 @@ git submodule init git submodule update (cd lib; ln -s ../mod/operator/ops;) (cd lib; ln -s ../mod/interface-ceph-client/interface_ceph_client.py;) -(cd lib; ln -s ../mod/ops_openstack/ops_openstack.py;) +(cd lib; ln -s ../mod/ops-openstack/ops_openstack.py) diff --git a/ceph-iscsi/mod/ops-openstack b/ceph-iscsi/mod/ops-openstack new file mode 160000 index 00000000..518b4fbd --- /dev/null +++ b/ceph-iscsi/mod/ops-openstack @@ -0,0 +1 @@ +Subproject commit 518b4fbdc4201394192048eaa5b4f9d4ef79c3cc diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 4bc7c9b7..77fa75d6 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -79,6 +79,7 @@ class CephISCSIGatewayCharm(ops_openstack.OSBaseCharm): '/etc/ceph/ceph.client.ceph-iscsi.keyring': ['rbd-target-api']} DEFAULT_TARGET = "iqn.2003-01.com.ubuntu.iscsi-gw:iscsi-igw" + REQUIRED_RELATIONS = ['ceph-client', 'cluster'] def __init__(self, framework, key): super().__init__(framework, key) From 384f42ec838605dc85e88288c76f8194ddc82d37 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 29 Feb 2020 16:20:37 +0000 Subject: [PATCH 1896/2699] Setup and fix lint errors --- ceph-iscsi/charm-prep.sh | 3 +++ ceph-iscsi/mod/operator | 2 +- ceph-iscsi/mod/ops-openstack | 2 +- ceph-iscsi/src/charm.py | 3 ++- ceph-iscsi/src/interface_ceph_iscsi_peer.py | 6 ++++-- ceph-iscsi/todo.txt | 6 +++--- ceph-iscsi/tox.ini | 6 +++--- 7 files changed, 17 insertions(+), 11 deletions(-) diff --git a/ceph-iscsi/charm-prep.sh b/ceph-iscsi/charm-prep.sh index 0bade361..0e5c2f48 100755 --- a/ceph-iscsi/charm-prep.sh +++ b/ceph-iscsi/charm-prep.sh @@ -9,3 +9,6 @@ git submodule update (cd lib; ln -s ../mod/operator/ops;) (cd lib; ln -s ../mod/interface-ceph-client/interface_ceph_client.py;) (cd lib; ln -s ../mod/ops-openstack/ops_openstack.py) +(cd mod/interface-ceph-client; git pull origin master) +(cd mod/operator; git pull origin master) +(cd mod/ops-openstack; git pull origin master) diff --git a/ceph-iscsi/mod/operator b/ceph-iscsi/mod/operator index 3a73427d..67254df6 160000 --- a/ceph-iscsi/mod/operator +++ b/ceph-iscsi/mod/operator @@ -1 +1 @@ -Subproject commit 3a73427dee96f49acfe25880924528a3e57834cc +Subproject commit 67254df6458e78e53f0f85ce80c1af3d7cff3205 diff --git a/ceph-iscsi/mod/ops-openstack b/ceph-iscsi/mod/ops-openstack index 518b4fbd..e8903fbe 160000 --- a/ceph-iscsi/mod/ops-openstack +++ b/ceph-iscsi/mod/ops-openstack @@ -1 +1 @@ -Subproject commit 518b4fbdc4201394192048eaa5b4f9d4ef79c3cc +Subproject commit e8903fbe58fe76d23db9bebe0647fb9707a93460 diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 77fa75d6..0585c55e 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -2,7 +2,6 @@ import logging import os -import socket import subprocess import sys import string @@ -23,6 +22,7 @@ logger = logging.getLogger() + class GatewayClient(): def run(self, path, cmd): @@ -65,6 +65,7 @@ def add_disk_to_client(self, iqn, initiatorname, pool_name, image_name): "/iscsi-targets/{}/hosts/{}".format(iqn, initiatorname), "disk add {}/{}".format(pool_name, image_name)) + class CephISCSIGatewayCharm(ops_openstack.OSBaseCharm): state = StoredState() PACKAGES = ['ceph-iscsi', 'tcmu-runner', 'ceph-common'] diff --git a/ceph-iscsi/src/interface_ceph_iscsi_peer.py b/ceph-iscsi/src/interface_ceph_iscsi_peer.py index 26000ab0..87eae24f 100644 --- a/ceph-iscsi/src/interface_ceph_iscsi_peer.py +++ b/ceph-iscsi/src/interface_ceph_iscsi_peer.py @@ -14,6 +14,7 @@ class HasPeersEvent(EventBase): pass + class ReadyPeersEvent(EventBase): pass @@ -34,6 +35,7 @@ class CephISCSIGatewayPeers(Object): def __init__(self, charm, relation_name): super().__init__(charm, relation_name) self.relation_name = relation_name + self.this_unit = self.framework.model.unit self.framework.observe( charm.on[relation_name].relation_changed, self.on_changed) @@ -50,8 +52,8 @@ def set_admin_password(self, password): def announce_ready(self): logging.info("announcing ready") - self.peer_rel.data[self.framework.model.unit][self.READY_KEY] = 'True' - self.peer_rel.data[self.framework.model.unit][self.FQDN_KEY] = self.fqdn + self.peer_rel.data[self.this_unit][self.READY_KEY] = 'True' + self.peer_rel.data[self.this_unit][self.FQDN_KEY] = self.fqdn @property def ready_peer_details(self): diff --git a/ceph-iscsi/todo.txt b/ceph-iscsi/todo.txt index 64fc4a7d..1c937fdd 100644 --- a/ceph-iscsi/todo.txt +++ b/ceph-iscsi/todo.txt @@ -1,9 +1,6 @@ Todo -* implement source config option -* Proper Update Status * Remove hardcoded ceph pool name and expose as a config option * Write spec -* Fix workload status so it reports missing relations * Add series upgrade * Write README * Move to openstack-charmers @@ -19,3 +16,6 @@ Mostly Done * Implement pause/resume * Add iscsi target create action * admin password +* implement source config option +* Proper Update Status +* Fix workload status so it reports missing relations diff --git a/ceph-iscsi/tox.ini b/ceph-iscsi/tox.ini index 9c27bbfa..9b1151b6 100644 --- a/ceph-iscsi/tox.ini +++ b/ceph-iscsi/tox.ini @@ -23,8 +23,8 @@ install_command = [testenv:pep8] basepython = python3 -deps=charm-tools -commands = charm-proof +deps = -r{toxinidir}/test-requirements.txt +commands = flake8 --ignore=E402 {posargs} src [testenv:func-noop] basepython = python3 @@ -47,4 +47,4 @@ commands = functest-run-suite --keep-model --bundle {posargs} [testenv:venv] -commands = {posargs} \ No newline at end of file +commands = {posargs} From 20ba8c7aec12e8929a21c4281b1772fde5abfbae Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 3 Mar 2020 09:51:14 +0100 Subject: [PATCH 1897/2699] Update charm build requirements Change-Id: I9cb7b90c9865f8f7422f70a9c1cb41e4186ec493 --- ceph-fs/requirements.txt | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ceph-fs/requirements.txt b/ceph-fs/requirements.txt index 20f335d2..5f2fff3a 100644 --- a/ceph-fs/requirements.txt +++ b/ceph-fs/requirements.txt @@ -1,7 +1,10 @@ -# This file is managed centrally. If you find the need to modify this as a -# one-off, please don't. Intead, consult #openstack-charms and ask about -# requirements management in charms via bot-control. Thank you. +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools # # Build requirements charm-tools>=2.4.4 +# importlib-resources 1.1.0 removed Python 3.5 support +importlib-resources<1.1.0 simplejson From 9a20bad920c65c5893bfffd040e1bfd0755e0c00 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 3 Mar 2020 09:51:14 +0100 Subject: [PATCH 1898/2699] Update charm build requirements Change-Id: I4e79385937f209aeece09a2902178710eac625a6 --- ceph-rbd-mirror/requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-rbd-mirror/requirements.txt b/ceph-rbd-mirror/requirements.txt index b1d4872c..5f2fff3a 100644 --- a/ceph-rbd-mirror/requirements.txt +++ b/ceph-rbd-mirror/requirements.txt @@ -5,4 +5,6 @@ # # Build requirements charm-tools>=2.4.4 +# importlib-resources 1.1.0 removed Python 3.5 support +importlib-resources<1.1.0 simplejson From 5cdbdf1ffe7f622ff156fc414f179118a7c71207 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 5 Mar 2020 13:30:21 +0100 Subject: [PATCH 1899/2699] Sync charm-helpers for py38, distro, and other updates Change-Id: I0a21ed8ad4bf740308bd822c9fe36872e78c9ee5 --- ceph-mon/Makefile | 2 +- .../charmhelpers/contrib/openstack/policyd.py | 23 ++-- .../charmhelpers/contrib/openstack/utils.py | 113 ++++++++++++++++-- .../contrib/openstack/vaultlocker.py | 14 ++- .../contrib/storage/linux/ceph.py | 2 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-mon/hooks/charmhelpers/osplatform.py | 24 +++- ceph-mon/lib/charms_ceph/utils.py | 1 + 8 files changed, 156 insertions(+), 24 deletions(-) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile index 6a179635..c772e4c1 100644 --- a/ceph-mon/Makefile +++ b/ceph-mon/Makefile @@ -1,5 +1,5 @@ #!/usr/bin/make -PYTHON := /usr/bin/env python +PYTHON := /usr/bin/env python3 lint: @tox -e pep8 diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py index d89d2cca..f2bb21e9 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py @@ -17,7 +17,6 @@ import os import six import shutil -import sys import yaml import zipfile @@ -531,7 +530,7 @@ def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) - _scanner = os.scandir if sys.version_info > (3, 4) else _py2_scandir + _scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir for direntry in _scanner(path): # see if the path should be kept. if direntry.path in keep_paths: @@ -560,23 +559,25 @@ def maybe_create_directory_for(path, user, group): @contextlib.contextmanager -def _py2_scandir(path): - """provide a py2 implementation of os.scandir if this module ever gets used - in a py2 charm (unlikely). uses os.listdir() to get the names in the path, - and then mocks the is_dir() function using os.path.isdir() to check for a +def _fallback_scandir(path): + """Fallback os.scandir implementation. + + provide a fallback implementation of os.scandir if this module ever gets + used in a py2 or py34 charm. Uses os.listdir() to get the names in the path, + and then mocks the is_dir() function using os.path.isdir() to check for directory. :param path: the path to list the directories for :type path: str - :returns: Generator that provides _P27Direntry objects - :rtype: ContextManager[_P27Direntry] + :returns: Generator that provides _FBDirectory objects + :rtype: ContextManager[_FBDirectory] """ for f in os.listdir(path): - yield _P27Direntry(f) + yield _FBDirectory(f) -class _P27Direntry(object): - """Mock a scandir Direntry object with enough to use in +class _FBDirectory(object): + """Mock a scandir Directory object with enough to use in clean_policyd_dir_for """ diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 161199c4..5c8f6eff 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -278,7 +278,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), - ('17', 'ussuri'), + ('18', 'ussuri'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -326,7 +326,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), - ('17', 'ussuri'), + ('18', 'ussuri'), ]), } @@ -555,9 +555,8 @@ def reset_os_release(): _os_rel = None -def os_release(package, base=None, reset_cache=False): - ''' - Returns OpenStack release codename from a cached global. +def os_release(package, base=None, reset_cache=False, source_key=None): + """Returns OpenStack release codename from a cached global. If reset_cache then unset the cached os_release version and return the freshly determined version. @@ -565,7 +564,20 @@ def os_release(package, base=None, reset_cache=False): If the codename can not be determined from either an installed package or the installation source, the earliest release supported by the charm should be returned. - ''' + + :param package: Name of package to determine release from + :type package: str + :param base: Fallback codename if endavours to determine from package fail + :type base: Optional[str] + :param reset_cache: Reset any cached codename value + :type reset_cache: bool + :param source_key: Name of source configuration option + (default: 'openstack-origin') + :type source_key: Optional[str] + :returns: OpenStack release codename + :rtype: str + """ + source_key = source_key or 'openstack-origin' if not base: base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']] global _os_rel @@ -575,7 +587,7 @@ def os_release(package, base=None, reset_cache=False): return _os_rel _os_rel = ( get_os_codename_package(package, fatal=False) or - get_os_codename_install_source(config('openstack-origin')) or + get_os_codename_install_source(config(source_key)) or base) return _os_rel @@ -658,6 +670,93 @@ def config_value_changed(option): return current != saved +def get_endpoint_key(service_name, relation_id, unit_name): + """Return the key used to refer to an ep changed notification from a unit. + + :param service_name: Service name eg nova, neutron, placement etc + :type service_name: str + :param relation_id: The id of the relation the unit is on. + :type relation_id: str + :param unit_name: The name of the unit publishing the notification. + :type unit_name: str + :returns: The key used to refer to an ep changed notification from a unit + :rtype: str + """ + return '{}-{}-{}'.format( + service_name, + relation_id.replace(':', '_'), + unit_name.replace('/', '_')) + + +def get_endpoint_notifications(service_names, rel_name='identity-service'): + """Return all notifications for the given services. + + :param service_names: List of service name. + :type service_name: List + :param rel_name: Name of the relation to query + :type rel_name: str + :returns: A dict containing the source of the notification and its nonce. + :rtype: Dict[str, str] + """ + notifications = {} + for rid in relation_ids(rel_name): + for unit in related_units(relid=rid): + ep_changed_json = relation_get( + rid=rid, + unit=unit, + attribute='ep_changed') + if ep_changed_json: + ep_changed = json.loads(ep_changed_json) + for service in service_names: + if ep_changed.get(service): + key = get_endpoint_key(service, rid, unit) + notifications[key] = ep_changed[service] + return notifications + + +def endpoint_changed(service_name, rel_name='identity-service'): + """Whether a new notification has been recieved for an endpoint. + + :param service_name: Service name eg nova, neutron, placement etc + :type service_name: str + :param rel_name: Name of the relation to query + :type rel_name: str + :returns: Whether endpoint has changed + :rtype: bool + """ + changed = False + with unitdata.HookData()() as t: + db = t[0] + notifications = get_endpoint_notifications( + [service_name], + rel_name=rel_name) + for key, nonce in notifications.items(): + if db.get(key) != nonce: + juju_log(('New endpoint change notification found: ' + '{}={}').format(key, nonce), + 'INFO') + changed = True + break + return changed + + +def save_endpoint_changed_triggers(service_names, rel_name='identity-service'): + """Save the enpoint triggers in db so it can be tracked if they changed. + + :param service_names: List of service name. + :type service_name: List + :param rel_name: Name of the relation to query + :type rel_name: str + """ + with unitdata.HookData()() as t: + db = t[0] + notifications = get_endpoint_notifications( + service_names, + rel_name=rel_name) + for key, nonce in notifications.items(): + db.set(key, nonce) + + def save_script_rc(script_path="scripts/scriptrc", **env_vars): """ Write an rc file in the charm-delivered directory containing diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py index c162de27..866a2697 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -37,7 +37,19 @@ def __init__(self, secret_backend=None): ) def __call__(self): - import hvac + try: + import hvac + except ImportError: + # BUG: #1862085 - if the relation is made to vault, but the + # 'encrypt' option is not made, then the charm errors with an + # import warning. This catches that, logs a warning, and returns + # with an empty context. + hookenv.log("VaultKVContext: trying to use hvac pythong module " + "but it's not available. Is secrets-stroage relation " + "made, but encrypt option not set?", + level=hookenv.WARNING) + # return an emptry context on hvac import error + return {} ctxt = {} # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 db = unitdata.kv() diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 104977af..dabfb6c2 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1042,7 +1042,7 @@ def filesystem_mounted(fs): def make_filesystem(blk_device, fstype='ext4', timeout=10): """Make a new filesystem on the specified block device.""" count = 0 - e_noent = os.errno.ENOENT + e_noent = errno.ENOENT while not os.path.exists(blk_device): if count >= timeout: log('Gave up waiting on block device %s' % blk_device, diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py index 1b57e2ce..3edc0687 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -25,6 +25,7 @@ 'cosmic', 'disco', 'eoan', + 'focal' ) diff --git a/ceph-mon/hooks/charmhelpers/osplatform.py b/ceph-mon/hooks/charmhelpers/osplatform.py index c7fd1363..78c81af5 100644 --- a/ceph-mon/hooks/charmhelpers/osplatform.py +++ b/ceph-mon/hooks/charmhelpers/osplatform.py @@ -1,4 +1,5 @@ import platform +import os def get_platform(): @@ -9,9 +10,13 @@ def get_platform(): This string is used to decide which platform module should be imported. """ # linux_distribution is deprecated and will be removed in Python 3.7 - # Warings *not* disabled, as we certainly need to fix this. - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] + # Warnings *not* disabled, as we certainly need to fix this. + if hasattr(platform, 'linux_distribution'): + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + else: + current_platform = _get_platform_from_fs() + if "Ubuntu" in current_platform: return "ubuntu" elif "CentOS" in current_platform: @@ -26,3 +31,16 @@ def get_platform(): else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) + + +def _get_platform_from_fs(): + """Get Platform from /etc/os-release.""" + with open(os.path.join(os.sep, 'etc', 'os-release')) as fin: + content = dict( + line.split('=', 1) + for line in fin.read().splitlines() + if '=' in line + ) + for k, v in content.items(): + content[k] = v.strip('"') + return content["NAME"] diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index 2b8a490d..90ebb1b6 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -1372,6 +1372,7 @@ def create_keyrings(): if not output: # NOTE: key not yet created, raise exception and retry raise Exception + # NOTE: octopus wants newline at end of file LP: #1864706 output += '\n' write_file(_client_admin_keyring, output, owner=ceph_user(), group=ceph_user(), From 2d3b51be7d76c8e368789cc8b8b320427bb69b0b Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 5 Mar 2020 13:30:33 +0100 Subject: [PATCH 1900/2699] Sync charm-helpers for py38, distro, and other updates Change-Id: I9e8848cc6dff2addd4db087740ba5bbc967577cf --- ceph-osd/Makefile | 2 +- .../charmhelpers/contrib/openstack/policyd.py | 23 ++-- .../charmhelpers/contrib/openstack/utils.py | 113 ++++++++++++++++-- .../contrib/storage/linux/ceph.py | 2 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-osd/hooks/charmhelpers/osplatform.py | 24 +++- ceph-osd/lib/charms_ceph/utils.py | 2 + 7 files changed, 144 insertions(+), 23 deletions(-) diff --git a/ceph-osd/Makefile b/ceph-osd/Makefile index d06a6904..4e7d3f76 100644 --- a/ceph-osd/Makefile +++ b/ceph-osd/Makefile @@ -1,5 +1,5 @@ #!/usr/bin/make -PYTHON := /usr/bin/env python +PYTHON := /usr/bin/env python3 lint: @tox -e pep8 diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py index d89d2cca..f2bb21e9 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py @@ -17,7 +17,6 @@ import os import six import shutil -import sys import yaml import zipfile @@ -531,7 +530,7 @@ def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) - _scanner = os.scandir if sys.version_info > (3, 4) else _py2_scandir + _scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir for direntry in _scanner(path): # see if the path should be kept. if direntry.path in keep_paths: @@ -560,23 +559,25 @@ def maybe_create_directory_for(path, user, group): @contextlib.contextmanager -def _py2_scandir(path): - """provide a py2 implementation of os.scandir if this module ever gets used - in a py2 charm (unlikely). uses os.listdir() to get the names in the path, - and then mocks the is_dir() function using os.path.isdir() to check for a +def _fallback_scandir(path): + """Fallback os.scandir implementation. + + provide a fallback implementation of os.scandir if this module ever gets + used in a py2 or py34 charm. Uses os.listdir() to get the names in the path, + and then mocks the is_dir() function using os.path.isdir() to check for directory. :param path: the path to list the directories for :type path: str - :returns: Generator that provides _P27Direntry objects - :rtype: ContextManager[_P27Direntry] + :returns: Generator that provides _FBDirectory objects + :rtype: ContextManager[_FBDirectory] """ for f in os.listdir(path): - yield _P27Direntry(f) + yield _FBDirectory(f) -class _P27Direntry(object): - """Mock a scandir Direntry object with enough to use in +class _FBDirectory(object): + """Mock a scandir Directory object with enough to use in clean_policyd_dir_for """ diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 161199c4..5c8f6eff 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -278,7 +278,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), - ('17', 'ussuri'), + ('18', 'ussuri'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -326,7 +326,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), - ('17', 'ussuri'), + ('18', 'ussuri'), ]), } @@ -555,9 +555,8 @@ def reset_os_release(): _os_rel = None -def os_release(package, base=None, reset_cache=False): - ''' - Returns OpenStack release codename from a cached global. +def os_release(package, base=None, reset_cache=False, source_key=None): + """Returns OpenStack release codename from a cached global. If reset_cache then unset the cached os_release version and return the freshly determined version. @@ -565,7 +564,20 @@ def os_release(package, base=None, reset_cache=False): If the codename can not be determined from either an installed package or the installation source, the earliest release supported by the charm should be returned. - ''' + + :param package: Name of package to determine release from + :type package: str + :param base: Fallback codename if endavours to determine from package fail + :type base: Optional[str] + :param reset_cache: Reset any cached codename value + :type reset_cache: bool + :param source_key: Name of source configuration option + (default: 'openstack-origin') + :type source_key: Optional[str] + :returns: OpenStack release codename + :rtype: str + """ + source_key = source_key or 'openstack-origin' if not base: base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']] global _os_rel @@ -575,7 +587,7 @@ def os_release(package, base=None, reset_cache=False): return _os_rel _os_rel = ( get_os_codename_package(package, fatal=False) or - get_os_codename_install_source(config('openstack-origin')) or + get_os_codename_install_source(config(source_key)) or base) return _os_rel @@ -658,6 +670,93 @@ def config_value_changed(option): return current != saved +def get_endpoint_key(service_name, relation_id, unit_name): + """Return the key used to refer to an ep changed notification from a unit. + + :param service_name: Service name eg nova, neutron, placement etc + :type service_name: str + :param relation_id: The id of the relation the unit is on. + :type relation_id: str + :param unit_name: The name of the unit publishing the notification. + :type unit_name: str + :returns: The key used to refer to an ep changed notification from a unit + :rtype: str + """ + return '{}-{}-{}'.format( + service_name, + relation_id.replace(':', '_'), + unit_name.replace('/', '_')) + + +def get_endpoint_notifications(service_names, rel_name='identity-service'): + """Return all notifications for the given services. + + :param service_names: List of service name. + :type service_name: List + :param rel_name: Name of the relation to query + :type rel_name: str + :returns: A dict containing the source of the notification and its nonce. + :rtype: Dict[str, str] + """ + notifications = {} + for rid in relation_ids(rel_name): + for unit in related_units(relid=rid): + ep_changed_json = relation_get( + rid=rid, + unit=unit, + attribute='ep_changed') + if ep_changed_json: + ep_changed = json.loads(ep_changed_json) + for service in service_names: + if ep_changed.get(service): + key = get_endpoint_key(service, rid, unit) + notifications[key] = ep_changed[service] + return notifications + + +def endpoint_changed(service_name, rel_name='identity-service'): + """Whether a new notification has been recieved for an endpoint. + + :param service_name: Service name eg nova, neutron, placement etc + :type service_name: str + :param rel_name: Name of the relation to query + :type rel_name: str + :returns: Whether endpoint has changed + :rtype: bool + """ + changed = False + with unitdata.HookData()() as t: + db = t[0] + notifications = get_endpoint_notifications( + [service_name], + rel_name=rel_name) + for key, nonce in notifications.items(): + if db.get(key) != nonce: + juju_log(('New endpoint change notification found: ' + '{}={}').format(key, nonce), + 'INFO') + changed = True + break + return changed + + +def save_endpoint_changed_triggers(service_names, rel_name='identity-service'): + """Save the enpoint triggers in db so it can be tracked if they changed. + + :param service_names: List of service name. + :type service_name: List + :param rel_name: Name of the relation to query + :type rel_name: str + """ + with unitdata.HookData()() as t: + db = t[0] + notifications = get_endpoint_notifications( + service_names, + rel_name=rel_name) + for key, nonce in notifications.items(): + db.set(key, nonce) + + def save_script_rc(script_path="scripts/scriptrc", **env_vars): """ Write an rc file in the charm-delivered directory containing diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 104977af..dabfb6c2 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1042,7 +1042,7 @@ def filesystem_mounted(fs): def make_filesystem(blk_device, fstype='ext4', timeout=10): """Make a new filesystem on the specified block device.""" count = 0 - e_noent = os.errno.ENOENT + e_noent = errno.ENOENT while not os.path.exists(blk_device): if count >= timeout: log('Gave up waiting on block device %s' % blk_device, diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index 1b57e2ce..3edc0687 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -25,6 +25,7 @@ 'cosmic', 'disco', 'eoan', + 'focal' ) diff --git a/ceph-osd/hooks/charmhelpers/osplatform.py b/ceph-osd/hooks/charmhelpers/osplatform.py index c7fd1363..78c81af5 100644 --- a/ceph-osd/hooks/charmhelpers/osplatform.py +++ b/ceph-osd/hooks/charmhelpers/osplatform.py @@ -1,4 +1,5 @@ import platform +import os def get_platform(): @@ -9,9 +10,13 @@ def get_platform(): This string is used to decide which platform module should be imported. """ # linux_distribution is deprecated and will be removed in Python 3.7 - # Warings *not* disabled, as we certainly need to fix this. - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] + # Warnings *not* disabled, as we certainly need to fix this. + if hasattr(platform, 'linux_distribution'): + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + else: + current_platform = _get_platform_from_fs() + if "Ubuntu" in current_platform: return "ubuntu" elif "CentOS" in current_platform: @@ -26,3 +31,16 @@ def get_platform(): else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) + + +def _get_platform_from_fs(): + """Get Platform from /etc/os-release.""" + with open(os.path.join(os.sep, 'etc', 'os-release')) as fin: + content = dict( + line.split('=', 1) + for line in fin.read().splitlines() + if '=' in line + ) + for k, v in content.items(): + content[k] = v.strip('"') + return content["NAME"] diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 7c970784..90ebb1b6 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -1372,6 +1372,8 @@ def create_keyrings(): if not output: # NOTE: key not yet created, raise exception and retry raise Exception + # NOTE: octopus wants newline at end of file LP: #1864706 + output += '\n' write_file(_client_admin_keyring, output, owner=ceph_user(), group=ceph_user(), perms=0o400) From a84423952ee73b3db857e873eef5933c02816bba Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 5 Mar 2020 13:30:45 +0100 Subject: [PATCH 1901/2699] Sync charm-helpers for py38, distro, and other updates Change-Id: I7964252f9d65b17915ac1d324b186955213a6297 --- ceph-proxy/Makefile | 2 +- .../charmhelpers/contrib/openstack/policyd.py | 23 ++-- .../charmhelpers/contrib/openstack/utils.py | 113 ++++++++++++++++-- .../contrib/storage/linux/ceph.py | 2 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-proxy/charmhelpers/osplatform.py | 24 +++- 6 files changed, 142 insertions(+), 23 deletions(-) diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index 17ffd298..39458a3d 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -1,5 +1,5 @@ #!/usr/bin/make -PYTHON := /usr/bin/env python +PYTHON := /usr/bin/env python3 lint: @tox -e pep8 diff --git a/ceph-proxy/charmhelpers/contrib/openstack/policyd.py b/ceph-proxy/charmhelpers/contrib/openstack/policyd.py index d89d2cca..f2bb21e9 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/policyd.py @@ -17,7 +17,6 @@ import os import six import shutil -import sys import yaml import zipfile @@ -531,7 +530,7 @@ def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) - _scanner = os.scandir if sys.version_info > (3, 4) else _py2_scandir + _scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir for direntry in _scanner(path): # see if the path should be kept. if direntry.path in keep_paths: @@ -560,23 +559,25 @@ def maybe_create_directory_for(path, user, group): @contextlib.contextmanager -def _py2_scandir(path): - """provide a py2 implementation of os.scandir if this module ever gets used - in a py2 charm (unlikely). uses os.listdir() to get the names in the path, - and then mocks the is_dir() function using os.path.isdir() to check for a +def _fallback_scandir(path): + """Fallback os.scandir implementation. + + provide a fallback implementation of os.scandir if this module ever gets + used in a py2 or py34 charm. Uses os.listdir() to get the names in the path, + and then mocks the is_dir() function using os.path.isdir() to check for directory. :param path: the path to list the directories for :type path: str - :returns: Generator that provides _P27Direntry objects - :rtype: ContextManager[_P27Direntry] + :returns: Generator that provides _FBDirectory objects + :rtype: ContextManager[_FBDirectory] """ for f in os.listdir(path): - yield _P27Direntry(f) + yield _FBDirectory(f) -class _P27Direntry(object): - """Mock a scandir Direntry object with enough to use in +class _FBDirectory(object): + """Mock a scandir Directory object with enough to use in clean_policyd_dir_for """ diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index 161199c4..5c8f6eff 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -278,7 +278,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), - ('17', 'ussuri'), + ('18', 'ussuri'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -326,7 +326,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), - ('17', 'ussuri'), + ('18', 'ussuri'), ]), } @@ -555,9 +555,8 @@ def reset_os_release(): _os_rel = None -def os_release(package, base=None, reset_cache=False): - ''' - Returns OpenStack release codename from a cached global. +def os_release(package, base=None, reset_cache=False, source_key=None): + """Returns OpenStack release codename from a cached global. If reset_cache then unset the cached os_release version and return the freshly determined version. @@ -565,7 +564,20 @@ def os_release(package, base=None, reset_cache=False): If the codename can not be determined from either an installed package or the installation source, the earliest release supported by the charm should be returned. - ''' + + :param package: Name of package to determine release from + :type package: str + :param base: Fallback codename if endavours to determine from package fail + :type base: Optional[str] + :param reset_cache: Reset any cached codename value + :type reset_cache: bool + :param source_key: Name of source configuration option + (default: 'openstack-origin') + :type source_key: Optional[str] + :returns: OpenStack release codename + :rtype: str + """ + source_key = source_key or 'openstack-origin' if not base: base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']] global _os_rel @@ -575,7 +587,7 @@ def os_release(package, base=None, reset_cache=False): return _os_rel _os_rel = ( get_os_codename_package(package, fatal=False) or - get_os_codename_install_source(config('openstack-origin')) or + get_os_codename_install_source(config(source_key)) or base) return _os_rel @@ -658,6 +670,93 @@ def config_value_changed(option): return current != saved +def get_endpoint_key(service_name, relation_id, unit_name): + """Return the key used to refer to an ep changed notification from a unit. + + :param service_name: Service name eg nova, neutron, placement etc + :type service_name: str + :param relation_id: The id of the relation the unit is on. + :type relation_id: str + :param unit_name: The name of the unit publishing the notification. + :type unit_name: str + :returns: The key used to refer to an ep changed notification from a unit + :rtype: str + """ + return '{}-{}-{}'.format( + service_name, + relation_id.replace(':', '_'), + unit_name.replace('/', '_')) + + +def get_endpoint_notifications(service_names, rel_name='identity-service'): + """Return all notifications for the given services. + + :param service_names: List of service name. + :type service_name: List + :param rel_name: Name of the relation to query + :type rel_name: str + :returns: A dict containing the source of the notification and its nonce. + :rtype: Dict[str, str] + """ + notifications = {} + for rid in relation_ids(rel_name): + for unit in related_units(relid=rid): + ep_changed_json = relation_get( + rid=rid, + unit=unit, + attribute='ep_changed') + if ep_changed_json: + ep_changed = json.loads(ep_changed_json) + for service in service_names: + if ep_changed.get(service): + key = get_endpoint_key(service, rid, unit) + notifications[key] = ep_changed[service] + return notifications + + +def endpoint_changed(service_name, rel_name='identity-service'): + """Whether a new notification has been recieved for an endpoint. + + :param service_name: Service name eg nova, neutron, placement etc + :type service_name: str + :param rel_name: Name of the relation to query + :type rel_name: str + :returns: Whether endpoint has changed + :rtype: bool + """ + changed = False + with unitdata.HookData()() as t: + db = t[0] + notifications = get_endpoint_notifications( + [service_name], + rel_name=rel_name) + for key, nonce in notifications.items(): + if db.get(key) != nonce: + juju_log(('New endpoint change notification found: ' + '{}={}').format(key, nonce), + 'INFO') + changed = True + break + return changed + + +def save_endpoint_changed_triggers(service_names, rel_name='identity-service'): + """Save the enpoint triggers in db so it can be tracked if they changed. + + :param service_names: List of service name. + :type service_name: List + :param rel_name: Name of the relation to query + :type rel_name: str + """ + with unitdata.HookData()() as t: + db = t[0] + notifications = get_endpoint_notifications( + service_names, + rel_name=rel_name) + for key, nonce in notifications.items(): + db.set(key, nonce) + + def save_script_rc(script_path="scripts/scriptrc", **env_vars): """ Write an rc file in the charm-delivered directory containing diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py index 104977af..dabfb6c2 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py @@ -1042,7 +1042,7 @@ def filesystem_mounted(fs): def make_filesystem(blk_device, fstype='ext4', timeout=10): """Make a new filesystem on the specified block device.""" count = 0 - e_noent = os.errno.ENOENT + e_noent = errno.ENOENT while not os.path.exists(blk_device): if count >= timeout: log('Gave up waiting on block device %s' % blk_device, diff --git a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py index 1b57e2ce..3edc0687 100644 --- a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py @@ -25,6 +25,7 @@ 'cosmic', 'disco', 'eoan', + 'focal' ) diff --git a/ceph-proxy/charmhelpers/osplatform.py b/ceph-proxy/charmhelpers/osplatform.py index c7fd1363..78c81af5 100644 --- a/ceph-proxy/charmhelpers/osplatform.py +++ b/ceph-proxy/charmhelpers/osplatform.py @@ -1,4 +1,5 @@ import platform +import os def get_platform(): @@ -9,9 +10,13 @@ def get_platform(): This string is used to decide which platform module should be imported. """ # linux_distribution is deprecated and will be removed in Python 3.7 - # Warings *not* disabled, as we certainly need to fix this. - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] + # Warnings *not* disabled, as we certainly need to fix this. + if hasattr(platform, 'linux_distribution'): + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + else: + current_platform = _get_platform_from_fs() + if "Ubuntu" in current_platform: return "ubuntu" elif "CentOS" in current_platform: @@ -26,3 +31,16 @@ def get_platform(): else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) + + +def _get_platform_from_fs(): + """Get Platform from /etc/os-release.""" + with open(os.path.join(os.sep, 'etc', 'os-release')) as fin: + content = dict( + line.split('=', 1) + for line in fin.read().splitlines() + if '=' in line + ) + for k, v in content.items(): + content[k] = v.strip('"') + return content["NAME"] From 3d1a0f02308988ea2311ed75da52bee647994319 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Thu, 5 Mar 2020 13:30:59 +0100 Subject: [PATCH 1902/2699] Sync charm-helpers for py38, distro, and other updates Change-Id: Ic50f48c3b3ba21c65cee07badf398cf3ef222c86 --- ceph-radosgw/Makefile | 2 +- .../charmhelpers/contrib/openstack/policyd.py | 23 ++-- .../charmhelpers/contrib/openstack/utils.py | 113 ++++++++++++++++-- .../contrib/openstack/vaultlocker.py | 14 ++- .../contrib/storage/linux/ceph.py | 2 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-radosgw/hooks/charmhelpers/osplatform.py | 24 +++- ceph-radosgw/lib/charms_ceph/utils.py | 2 + 8 files changed, 157 insertions(+), 24 deletions(-) diff --git a/ceph-radosgw/Makefile b/ceph-radosgw/Makefile index a0ab412e..03a08092 100644 --- a/ceph-radosgw/Makefile +++ b/ceph-radosgw/Makefile @@ -1,5 +1,5 @@ #!/usr/bin/make -PYTHON := /usr/bin/env python +PYTHON := /usr/bin/env python3 lint: @tox -e pep8 diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py index d89d2cca..f2bb21e9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py @@ -17,7 +17,6 @@ import os import six import shutil -import sys import yaml import zipfile @@ -531,7 +530,7 @@ def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) - _scanner = os.scandir if sys.version_info > (3, 4) else _py2_scandir + _scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir for direntry in _scanner(path): # see if the path should be kept. if direntry.path in keep_paths: @@ -560,23 +559,25 @@ def maybe_create_directory_for(path, user, group): @contextlib.contextmanager -def _py2_scandir(path): - """provide a py2 implementation of os.scandir if this module ever gets used - in a py2 charm (unlikely). uses os.listdir() to get the names in the path, - and then mocks the is_dir() function using os.path.isdir() to check for a +def _fallback_scandir(path): + """Fallback os.scandir implementation. + + provide a fallback implementation of os.scandir if this module ever gets + used in a py2 or py34 charm. Uses os.listdir() to get the names in the path, + and then mocks the is_dir() function using os.path.isdir() to check for directory. :param path: the path to list the directories for :type path: str - :returns: Generator that provides _P27Direntry objects - :rtype: ContextManager[_P27Direntry] + :returns: Generator that provides _FBDirectory objects + :rtype: ContextManager[_FBDirectory] """ for f in os.listdir(path): - yield _P27Direntry(f) + yield _FBDirectory(f) -class _P27Direntry(object): - """Mock a scandir Direntry object with enough to use in +class _FBDirectory(object): + """Mock a scandir Directory object with enough to use in clean_policyd_dir_for """ diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 161199c4..5c8f6eff 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -278,7 +278,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), - ('17', 'ussuri'), + ('18', 'ussuri'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -326,7 +326,7 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), - ('17', 'ussuri'), + ('18', 'ussuri'), ]), } @@ -555,9 +555,8 @@ def reset_os_release(): _os_rel = None -def os_release(package, base=None, reset_cache=False): - ''' - Returns OpenStack release codename from a cached global. +def os_release(package, base=None, reset_cache=False, source_key=None): + """Returns OpenStack release codename from a cached global. If reset_cache then unset the cached os_release version and return the freshly determined version. @@ -565,7 +564,20 @@ def os_release(package, base=None, reset_cache=False): If the codename can not be determined from either an installed package or the installation source, the earliest release supported by the charm should be returned. - ''' + + :param package: Name of package to determine release from + :type package: str + :param base: Fallback codename if endavours to determine from package fail + :type base: Optional[str] + :param reset_cache: Reset any cached codename value + :type reset_cache: bool + :param source_key: Name of source configuration option + (default: 'openstack-origin') + :type source_key: Optional[str] + :returns: OpenStack release codename + :rtype: str + """ + source_key = source_key or 'openstack-origin' if not base: base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']] global _os_rel @@ -575,7 +587,7 @@ def os_release(package, base=None, reset_cache=False): return _os_rel _os_rel = ( get_os_codename_package(package, fatal=False) or - get_os_codename_install_source(config('openstack-origin')) or + get_os_codename_install_source(config(source_key)) or base) return _os_rel @@ -658,6 +670,93 @@ def config_value_changed(option): return current != saved +def get_endpoint_key(service_name, relation_id, unit_name): + """Return the key used to refer to an ep changed notification from a unit. + + :param service_name: Service name eg nova, neutron, placement etc + :type service_name: str + :param relation_id: The id of the relation the unit is on. + :type relation_id: str + :param unit_name: The name of the unit publishing the notification. + :type unit_name: str + :returns: The key used to refer to an ep changed notification from a unit + :rtype: str + """ + return '{}-{}-{}'.format( + service_name, + relation_id.replace(':', '_'), + unit_name.replace('/', '_')) + + +def get_endpoint_notifications(service_names, rel_name='identity-service'): + """Return all notifications for the given services. + + :param service_names: List of service name. + :type service_name: List + :param rel_name: Name of the relation to query + :type rel_name: str + :returns: A dict containing the source of the notification and its nonce. + :rtype: Dict[str, str] + """ + notifications = {} + for rid in relation_ids(rel_name): + for unit in related_units(relid=rid): + ep_changed_json = relation_get( + rid=rid, + unit=unit, + attribute='ep_changed') + if ep_changed_json: + ep_changed = json.loads(ep_changed_json) + for service in service_names: + if ep_changed.get(service): + key = get_endpoint_key(service, rid, unit) + notifications[key] = ep_changed[service] + return notifications + + +def endpoint_changed(service_name, rel_name='identity-service'): + """Whether a new notification has been recieved for an endpoint. + + :param service_name: Service name eg nova, neutron, placement etc + :type service_name: str + :param rel_name: Name of the relation to query + :type rel_name: str + :returns: Whether endpoint has changed + :rtype: bool + """ + changed = False + with unitdata.HookData()() as t: + db = t[0] + notifications = get_endpoint_notifications( + [service_name], + rel_name=rel_name) + for key, nonce in notifications.items(): + if db.get(key) != nonce: + juju_log(('New endpoint change notification found: ' + '{}={}').format(key, nonce), + 'INFO') + changed = True + break + return changed + + +def save_endpoint_changed_triggers(service_names, rel_name='identity-service'): + """Save the enpoint triggers in db so it can be tracked if they changed. + + :param service_names: List of service name. + :type service_name: List + :param rel_name: Name of the relation to query + :type rel_name: str + """ + with unitdata.HookData()() as t: + db = t[0] + notifications = get_endpoint_notifications( + service_names, + rel_name=rel_name) + for key, nonce in notifications.items(): + db.set(key, nonce) + + def save_script_rc(script_path="scripts/scriptrc", **env_vars): """ Write an rc file in the charm-delivered directory containing diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py index c162de27..866a2697 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -37,7 +37,19 @@ def __init__(self, secret_backend=None): ) def __call__(self): - import hvac + try: + import hvac + except ImportError: + # BUG: #1862085 - if the relation is made to vault, but the + # 'encrypt' option is not made, then the charm errors with an + # import warning. This catches that, logs a warning, and returns + # with an empty context. + hookenv.log("VaultKVContext: trying to use hvac pythong module " + "but it's not available. Is secrets-stroage relation " + "made, but encrypt option not set?", + level=hookenv.WARNING) + # return an emptry context on hvac import error + return {} ctxt = {} # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 db = unitdata.kv() diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 104977af..dabfb6c2 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1042,7 +1042,7 @@ def filesystem_mounted(fs): def make_filesystem(blk_device, fstype='ext4', timeout=10): """Make a new filesystem on the specified block device.""" count = 0 - e_noent = os.errno.ENOENT + e_noent = errno.ENOENT while not os.path.exists(blk_device): if count >= timeout: log('Gave up waiting on block device %s' % blk_device, diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index 1b57e2ce..3edc0687 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -25,6 +25,7 @@ 'cosmic', 'disco', 'eoan', + 'focal' ) diff --git a/ceph-radosgw/hooks/charmhelpers/osplatform.py b/ceph-radosgw/hooks/charmhelpers/osplatform.py index c7fd1363..78c81af5 100644 --- a/ceph-radosgw/hooks/charmhelpers/osplatform.py +++ b/ceph-radosgw/hooks/charmhelpers/osplatform.py @@ -1,4 +1,5 @@ import platform +import os def get_platform(): @@ -9,9 +10,13 @@ def get_platform(): This string is used to decide which platform module should be imported. """ # linux_distribution is deprecated and will be removed in Python 3.7 - # Warings *not* disabled, as we certainly need to fix this. - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] + # Warnings *not* disabled, as we certainly need to fix this. + if hasattr(platform, 'linux_distribution'): + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + else: + current_platform = _get_platform_from_fs() + if "Ubuntu" in current_platform: return "ubuntu" elif "CentOS" in current_platform: @@ -26,3 +31,16 @@ def get_platform(): else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) + + +def _get_platform_from_fs(): + """Get Platform from /etc/os-release.""" + with open(os.path.join(os.sep, 'etc', 'os-release')) as fin: + content = dict( + line.split('=', 1) + for line in fin.read().splitlines() + if '=' in line + ) + for k, v in content.items(): + content[k] = v.strip('"') + return content["NAME"] diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index 7c970784..90ebb1b6 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -1372,6 +1372,8 @@ def create_keyrings(): if not output: # NOTE: key not yet created, raise exception and retry raise Exception + # NOTE: octopus wants newline at end of file LP: #1864706 + output += '\n' write_file(_client_admin_keyring, output, owner=ceph_user(), group=ceph_user(), perms=0o400) From 520f29dce07a78423c9b2dbcd3022d745a30a189 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 2 Mar 2020 12:22:30 +0100 Subject: [PATCH 1903/2699] Remove storage type specification When storage type is specified, the bundles can only be deployed on a matching substrate. Change-Id: If6c79eec024f8397147ae71055c02080ed640c43 --- ceph-mon/tests/bundles/bionic-queens.yaml | 2 +- ceph-mon/tests/bundles/bionic-rocky.yaml | 2 +- ceph-mon/tests/bundles/bionic-stein.yaml | 2 +- ceph-mon/tests/bundles/bionic-train-with-fsid.yaml | 2 +- ceph-mon/tests/bundles/bionic-train.yaml | 2 +- ceph-mon/tests/bundles/trusty-mitaka.yaml | 2 +- ceph-mon/tests/bundles/xenial-mitaka.yaml | 2 +- ceph-mon/tests/bundles/xenial-ocata.yaml | 2 +- ceph-mon/tests/bundles/xenial-pike.yaml | 2 +- ceph-mon/tests/bundles/xenial-queens.yaml | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/ceph-mon/tests/bundles/bionic-queens.yaml b/ceph-mon/tests/bundles/bionic-queens.yaml index 8bc49b32..3cd188f1 100644 --- a/ceph-mon/tests/bundles/bionic-queens.yaml +++ b/ceph-mon/tests/bundles/bionic-queens.yaml @@ -4,7 +4,7 @@ applications: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: '10G' options: osd-devices: '/srv/ceph /dev/test-non-existent' ceph-mon: diff --git a/ceph-mon/tests/bundles/bionic-rocky.yaml b/ceph-mon/tests/bundles/bionic-rocky.yaml index 7e165bde..1ea79374 100644 --- a/ceph-mon/tests/bundles/bionic-rocky.yaml +++ b/ceph-mon/tests/bundles/bionic-rocky.yaml @@ -4,7 +4,7 @@ applications: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: '10G' options: osd-devices: '/srv/ceph /dev/test-non-existent' source: cloud:bionic-rocky diff --git a/ceph-mon/tests/bundles/bionic-stein.yaml b/ceph-mon/tests/bundles/bionic-stein.yaml index 56695dd7..e71dcd68 100644 --- a/ceph-mon/tests/bundles/bionic-stein.yaml +++ b/ceph-mon/tests/bundles/bionic-stein.yaml @@ -4,7 +4,7 @@ applications: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: '10G' options: osd-devices: '/srv/ceph /dev/test-non-existent' source: cloud:bionic-stein diff --git a/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml b/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml index 5729dfe4..418064e2 100644 --- a/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml +++ b/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml @@ -4,7 +4,7 @@ applications: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: '10G' options: osd-devices: '/dev/test-non-existent' source: cloud:bionic-train/proposed diff --git a/ceph-mon/tests/bundles/bionic-train.yaml b/ceph-mon/tests/bundles/bionic-train.yaml index 1b270900..7535683d 100644 --- a/ceph-mon/tests/bundles/bionic-train.yaml +++ b/ceph-mon/tests/bundles/bionic-train.yaml @@ -5,7 +5,7 @@ applications: num_units: 3 series: bionic storage: - osd-devices: 'cinder,10G' + osd-devices: '10G' options: osd-devices: '/dev/test-non-existent' source: cloud:bionic-train diff --git a/ceph-mon/tests/bundles/trusty-mitaka.yaml b/ceph-mon/tests/bundles/trusty-mitaka.yaml index bf56d66f..0fc7c3e0 100644 --- a/ceph-mon/tests/bundles/trusty-mitaka.yaml +++ b/ceph-mon/tests/bundles/trusty-mitaka.yaml @@ -4,7 +4,7 @@ applications: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: '10G' options: osd-devices: '/srv/ceph /dev/test-non-existent' source: cloud:trusty-mitaka diff --git a/ceph-mon/tests/bundles/xenial-mitaka.yaml b/ceph-mon/tests/bundles/xenial-mitaka.yaml index a9e28cc1..c7a987bc 100644 --- a/ceph-mon/tests/bundles/xenial-mitaka.yaml +++ b/ceph-mon/tests/bundles/xenial-mitaka.yaml @@ -4,7 +4,7 @@ applications: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: '10G' options: osd-devices: '/srv/ceph /dev/test-non-existent' ceph-mon: diff --git a/ceph-mon/tests/bundles/xenial-ocata.yaml b/ceph-mon/tests/bundles/xenial-ocata.yaml index 6187a500..12d17dd6 100644 --- a/ceph-mon/tests/bundles/xenial-ocata.yaml +++ b/ceph-mon/tests/bundles/xenial-ocata.yaml @@ -4,7 +4,7 @@ applications: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: '10G' options: osd-devices: '/srv/ceph /dev/test-non-existent' source: cloud:xenial-ocata diff --git a/ceph-mon/tests/bundles/xenial-pike.yaml b/ceph-mon/tests/bundles/xenial-pike.yaml index 8d31f272..bdefea27 100644 --- a/ceph-mon/tests/bundles/xenial-pike.yaml +++ b/ceph-mon/tests/bundles/xenial-pike.yaml @@ -4,7 +4,7 @@ applications: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: '10G' options: osd-devices: '/srv/ceph /dev/test-non-existent' source: cloud:xenial-pike diff --git a/ceph-mon/tests/bundles/xenial-queens.yaml b/ceph-mon/tests/bundles/xenial-queens.yaml index ab12215e..ad747d3f 100644 --- a/ceph-mon/tests/bundles/xenial-queens.yaml +++ b/ceph-mon/tests/bundles/xenial-queens.yaml @@ -4,7 +4,7 @@ applications: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: '10G' options: osd-devices: '/srv/ceph /dev/test-non-existent' source: cloud:xenial-queens From 64b6ee18019d360ba416fe5ebdd837cc39d21578 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 11 Mar 2020 09:24:40 +0100 Subject: [PATCH 1904/2699] Rebuild for updates to charms.openstack Change-Id: I39e0bcfff2b503c9365753aa1b8fa2223d675c31 --- ceph-fs/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index d9293a5b..39ab9a72 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -d1c16faa-4837-11ea-810e-97e1f362d969 +3fef9e9b-3b33-4c1d-9fde-d2a783bbfa92 From b748afc010e0fce89f473e2857fe2c8fd504d661 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 11 Mar 2020 09:24:40 +0100 Subject: [PATCH 1905/2699] Rebuild for updates to charms.openstack Change-Id: I928d49789f17983389068f0e36af6d618384a122 --- ceph-rbd-mirror/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/rebuild b/ceph-rbd-mirror/rebuild index d9293a5b..39ab9a72 100644 --- a/ceph-rbd-mirror/rebuild +++ b/ceph-rbd-mirror/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -d1c16faa-4837-11ea-810e-97e1f362d969 +3fef9e9b-3b33-4c1d-9fde-d2a783bbfa92 From 825b6b6ce6bcc67cf27d81a1e6e3ae14e02d3457 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Thu, 12 Mar 2020 10:55:47 +0100 Subject: [PATCH 1906/2699] Unpin flake8, fix lint We need to unpin flake8 to allow linting on Python 3.8 systems. Sync charms.ceph. Depends-On: Idf2fea27b19cec47ffed9891b518ac7b5b75e405 Change-Id: I91a2133b6d7dc7e59d62dd80779cdc6e77206a20 --- ceph-radosgw/hooks/ceph_radosgw_context.py | 2 +- ceph-radosgw/hooks/utils.py | 2 +- ceph-radosgw/lib/charms_ceph/broker.py | 12 ++++++----- ceph-radosgw/lib/charms_ceph/utils.py | 24 +++++++++++++--------- ceph-radosgw/test-requirements.txt | 2 +- ceph-radosgw/tox.ini | 2 +- 6 files changed, 25 insertions(+), 19 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 958d7eeb..539a5df9 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -134,7 +134,7 @@ def ensure_host_resolvable_v6(hostname): with open(tmp_hosts, 'a+') as fd: lines = fd.readlines() for line in lines: - key = "^%s\s+" % (host_addr) + key = r"^%s\s+" % (host_addr) if re.search(key, line): break else: diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 0db98a11..4e65ae51 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -177,7 +177,7 @@ def check_optional_relations(configs): if relation_ids('ha'): try: get_hacluster_config() - except: + except Exception: return ('blocked', 'hacluster missing configuration: ' 'vip, vip_iface, vip_cidr') diff --git a/ceph-radosgw/lib/charms_ceph/broker.py b/ceph-radosgw/lib/charms_ceph/broker.py index ceda9a85..726f9498 100644 --- a/ceph-radosgw/lib/charms_ceph/broker.py +++ b/ceph-radosgw/lib/charms_ceph/broker.py @@ -160,9 +160,10 @@ def handle_create_erasure_profile(request, service): # "host" | "rack" or it defaults to "host" # Any valid Ceph bucket failure_domain = request.get('failure-domain') name = request.get('name') - k = request.get('k') - m = request.get('m') - l = request.get('l') + # Binary Distribution Matrix (BDM) parameters + bdm_k = request.get('k') + bdm_m = request.get('m') + bdm_l = request.get('l') if failure_domain not in CEPH_BUCKET_TYPES: msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES) @@ -171,7 +172,8 @@ def handle_create_erasure_profile(request, service): create_erasure_profile(service=service, erasure_plugin_name=erasure_type, profile_name=name, failure_domain=failure_domain, - data_chunks=k, coding_chunks=m, locality=l) + data_chunks=bdm_k, coding_chunks=bdm_m, + locality=bdm_l) def handle_add_permissions_to_key(request, service): @@ -556,7 +558,7 @@ def handle_set_pool_value(request, service): # Get the validation method validator_params = POOL_KEYS[params['key']] - if len(validator_params) is 1: + if len(validator_params) == 1: # Validate that what the user passed is actually legal per Ceph's rules validator(params['value'], validator_params[0]) else: diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index 90ebb1b6..d2752520 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -637,7 +637,7 @@ def _get_osd_num_from_dirname(dirname): :raises ValueError: if the osd number cannot be parsed from the provided directory name. """ - match = re.search('ceph-(?P\d+)', dirname) + match = re.search(r'ceph-(?P\d+)', dirname) if not match: raise ValueError("dirname not in correct format: {}".format(dirname)) @@ -706,7 +706,7 @@ def get_version(): package = "ceph" try: pkg = cache[package] - except: + except KeyError: # the package is unknown to the current apt cache. e = 'Could not determine version of package with no installation ' \ 'candidate: %s' % package @@ -721,7 +721,7 @@ def get_version(): # x.y match only for 20XX.X # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', vers) + match = re.match(r'^(\d+)\.(\d+)', vers) if match: vers = match.group(0) @@ -956,11 +956,11 @@ def start_osds(devices): rescan_osd_devices() if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and cmp_pkgrevno('ceph', '14.2.0') < 0): - # Use ceph-disk activate for directory based OSD's - for dev_or_path in devices: - if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call( - ['ceph-disk', 'activate', dev_or_path]) + # Use ceph-disk activate for directory based OSD's + for dev_or_path in devices: + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): + subprocess.check_call( + ['ceph-disk', 'activate', dev_or_path]) def udevadm_settle(): @@ -978,6 +978,7 @@ def rescan_osd_devices(): udevadm_settle() + _client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' @@ -1002,6 +1003,7 @@ def generate_monitor_secret(): return "{}==".format(res.split('=')[1].strip()) + # OSD caps taken from ceph-create-keys _osd_bootstrap_caps = { 'mon': [ @@ -1039,7 +1041,7 @@ def get_osd_bootstrap_key(): # Attempt to get/create a key using the OSD bootstrap profile first key = get_named_key('bootstrap-osd', _osd_bootstrap_caps_profile) - except: + except Exception: # If that fails try with the older style permissions key = get_named_key('bootstrap-osd', _osd_bootstrap_caps) @@ -1063,6 +1065,7 @@ def import_radosgw_key(key): ] subprocess.check_call(cmd) + # OSD caps taken from ceph-create-keys _radosgw_caps = { 'mon': ['allow rw'], @@ -1299,7 +1302,7 @@ def bootstrap_monitor_cluster(secret): path, done, init_marker) - except: + except Exception: raise finally: os.unlink(keyring) @@ -2789,6 +2792,7 @@ def dirs_need_ownership_update(service): # All child directories had the expected ownership return False + # A dict of valid ceph upgrade paths. Mapping is old -> new UPGRADE_PATHS = collections.OrderedDict([ ('firefly', 'hammer'), diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 7d9c2587..44b50231 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -10,7 +10,7 @@ charm-tools>=2.4.4 requests>=2.18.4 mock>=1.2 -flake8>=2.2.4,<=2.4.1 +flake8>=2.2.4 stestr>=2.2.0 coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 20dbbfc5..355780fa 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -111,5 +111,5 @@ commands = functest-run-suite --keep-model --bundle {posargs} [flake8] -ignore = E402,E226 +ignore = E402,E226,W504 exclude = */charmhelpers From 8b022a910ecbf4e29c7b599bab86d1e3089a5bd3 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 12 Mar 2020 13:27:12 +0000 Subject: [PATCH 1907/2699] Add adapters --- ceph-iscsi/charm-prep.sh | 1 + ceph-iscsi/mod/operator | 2 +- ceph-iscsi/mod/ops-openstack | 2 +- ceph-iscsi/src/charm.py | 63 +++++++++++++++---- .../templates/ceph.client.ceph-iscsi.keyring | 2 +- ceph-iscsi/templates/ceph.conf | 4 +- ceph-iscsi/templates/iscsi-gateway.cfg | 4 +- ceph-iscsi/todo.txt | 1 + 8 files changed, 60 insertions(+), 19 deletions(-) diff --git a/ceph-iscsi/charm-prep.sh b/ceph-iscsi/charm-prep.sh index 0e5c2f48..0f29b419 100755 --- a/ceph-iscsi/charm-prep.sh +++ b/ceph-iscsi/charm-prep.sh @@ -9,6 +9,7 @@ git submodule update (cd lib; ln -s ../mod/operator/ops;) (cd lib; ln -s ../mod/interface-ceph-client/interface_ceph_client.py;) (cd lib; ln -s ../mod/ops-openstack/ops_openstack.py) +(cd lib; ln -s ../mod/ops-openstack/adapters.py) (cd mod/interface-ceph-client; git pull origin master) (cd mod/operator; git pull origin master) (cd mod/ops-openstack; git pull origin master) diff --git a/ceph-iscsi/mod/operator b/ceph-iscsi/mod/operator index 67254df6..6620d0d1 160000 --- a/ceph-iscsi/mod/operator +++ b/ceph-iscsi/mod/operator @@ -1 +1 @@ -Subproject commit 67254df6458e78e53f0f85ce80c1af3d7cff3205 +Subproject commit 6620d0d18e82f02401203ab29ea3712f93031989 diff --git a/ceph-iscsi/mod/ops-openstack b/ceph-iscsi/mod/ops-openstack index e8903fbe..3c1ba3c3 160000 --- a/ceph-iscsi/mod/ops-openstack +++ b/ceph-iscsi/mod/ops-openstack @@ -1 +1 @@ -Subproject commit e8903fbe58fe76d23db9bebe0647fb9707a93460 +Subproject commit 3c1ba3c3f2a25e92ac2d900448fba9e6714fd32c diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 0585c55e..bb41d20d 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -18,11 +18,56 @@ import interface_ceph_client import interface_ceph_iscsi_peer +import adapters import ops_openstack logger = logging.getLogger() +class CephClientAdapter(adapters.OpenStackOperRelationAdapter): + + def __init__(self, relation): + super(CephClientAdapter, self).__init__(relation) + + @property + def mon_hosts(self): + hosts = self.relation.get_relation_data()['mon_hosts'] + return ' '.join(sorted(hosts)) + + @property + def auth_supported(self): + return self.relation.get_relation_data()['auth'] + + @property + def key(self): + return self.relation.get_relation_data()['key'] + + +class PeerAdapter(adapters.OpenStackOperRelationAdapter): + + def __init__(self, relation): + super(PeerAdapter, self).__init__(relation) + + +class GatewayClientPeerAdapter(PeerAdapter): + + def __init__(self, relation): + super(GatewayClientPeerAdapter, self).__init__(relation) + + @property + def gw_hosts(self): + hosts = self.relation.peer_addresses + return ' '.join(sorted(hosts)) + + +class CephISCSIGatewayAdapters(adapters.OpenStackRelationAdapters): + + relation_adapters = { + 'ceph-client': CephClientAdapter, + 'cluster': GatewayClientPeerAdapter, + } + + class GatewayClient(): def run(self, path, cmd): @@ -85,14 +130,17 @@ class CephISCSIGatewayCharm(ops_openstack.OSBaseCharm): def __init__(self, framework, key): super().__init__(framework, key) self.state.set_default(target_created=False) - self.framework.observe(self.on.ceph_client_relation_joined, self) self.ceph_client = interface_ceph_client.CephClientRequires( self, 'ceph-client') - self.framework.observe(self.ceph_client.on.pools_available, self) self.peers = interface_ceph_iscsi_peer.CephISCSIGatewayPeers( self, 'cluster') + self.adapters = CephISCSIGatewayAdapters( + (self.ceph_client, self.peers), + self) + self.framework.observe(self.on.ceph_client_relation_joined, self) + self.framework.observe(self.ceph_client.on.pools_available, self) self.framework.observe(self.peers.on.has_peers, self) self.framework.observe(self.peers.on.ready_peers, self) self.framework.observe(self.on.create_target_action, self) @@ -172,15 +220,6 @@ def on_pools_available(self, event): logging.info("Defering setup") event.defer() return - ceph_context = { - 'use_syslog': - str(self.framework.model.config['use-syslog']).lower(), - 'loglevel': self.framework.model.config['loglevel'], - 'admin_password': self.peers.admin_password, - } - ceph_context.update(self.ceph_client.get_pool_data()) - ceph_context['mon_hosts'] = ' '.join(ceph_context['mon_hosts']) - ceph_context['gw_hosts'] = ' '.join(sorted(self.peers.peer_addresses)) def daemon_reload_and_restart(service_name): subprocess.check_call(['systemctl', 'daemon-reload']) @@ -195,7 +234,7 @@ def render_configs(): ch_templating.render( os.path.basename(config_file), config_file, - ceph_context) + self.adapters) logging.info("Rendering config") render_configs() logging.info("Setting started state") diff --git a/ceph-iscsi/templates/ceph.client.ceph-iscsi.keyring b/ceph-iscsi/templates/ceph.client.ceph-iscsi.keyring index fe132228..fed64cf1 100644 --- a/ceph-iscsi/templates/ceph.client.ceph-iscsi.keyring +++ b/ceph-iscsi/templates/ceph.client.ceph-iscsi.keyring @@ -1,3 +1,3 @@ [client.ceph-iscsi] - key = {{ key }} + key = {{ ceph_client.key }} diff --git a/ceph-iscsi/templates/ceph.conf b/ceph-iscsi/templates/ceph.conf index c8b38244..a27fd58f 100644 --- a/ceph-iscsi/templates/ceph.conf +++ b/ceph-iscsi/templates/ceph.conf @@ -4,8 +4,8 @@ # local changes will be overwritten. ############################################################################### [global] -auth_supported = {{ auth_supported }} -mon host = {{ mon_hosts }} +auth supported = {{ ceph_client.auth_supported }} +mon host = {{ ceph_client.mon_hosts }} keyring = /etc/ceph/$cluster.$name.keyring [client.ceph-iscsi] diff --git a/ceph-iscsi/templates/iscsi-gateway.cfg b/ceph-iscsi/templates/iscsi-gateway.cfg index 46521ca5..9a7e0a2d 100644 --- a/ceph-iscsi/templates/iscsi-gateway.cfg +++ b/ceph-iscsi/templates/iscsi-gateway.cfg @@ -25,8 +25,8 @@ gateway_keyring = ceph.client.ceph-iscsi.keyring # # To support the API, the bear minimum settings are: api_secure = false api_user = admin -api_password = {{ admin_password }} +api_password = {{ cluster.admin_password }} api_port = 5000 -trusted_ip_list = {{ gw_hosts }} +trusted_ip_list = {{ cluster.gw_hosts }} # # diff --git a/ceph-iscsi/todo.txt b/ceph-iscsi/todo.txt index 1c937fdd..f49b36a2 100644 --- a/ceph-iscsi/todo.txt +++ b/ceph-iscsi/todo.txt @@ -9,6 +9,7 @@ Todo * Certificates interface * security checklist * zaza tests for pause/resume +* Ceph haeartbeat settings https://docs.ceph.com/docs/master/rbd/iscsi-requirements/ Mostly Done * trusted_ips From 36cd9de7fe652ddef6acbcaba8caa09069ececb7 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 16 Mar 2020 12:04:30 +0000 Subject: [PATCH 1908/2699] Move gwcli code --- ceph-iscsi/mod/operator | 2 +- ceph-iscsi/mod/ops-openstack | 2 +- ceph-iscsi/src/charm.py | 65 ++++++++++------------------------ ceph-iscsi/src/gwcli_client.py | 46 ++++++++++++++++++++++++ 4 files changed, 67 insertions(+), 48 deletions(-) create mode 100644 ceph-iscsi/src/gwcli_client.py diff --git a/ceph-iscsi/mod/operator b/ceph-iscsi/mod/operator index 6620d0d1..04712d98 160000 --- a/ceph-iscsi/mod/operator +++ b/ceph-iscsi/mod/operator @@ -1 +1 @@ -Subproject commit 6620d0d18e82f02401203ab29ea3712f93031989 +Subproject commit 04712d98b1fb662f0b34ae840753abacd6073b2e diff --git a/ceph-iscsi/mod/ops-openstack b/ceph-iscsi/mod/ops-openstack index 3c1ba3c3..764fcf1b 160000 --- a/ceph-iscsi/mod/ops-openstack +++ b/ceph-iscsi/mod/ops-openstack @@ -1 +1 @@ -Subproject commit 3c1ba3c3f2a25e92ac2d900448fba9e6714fd32c +Subproject commit 764fcf1b222bd9be712c1999daffe9adb1da8206 diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index bb41d20d..c0e222b0 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -20,6 +20,7 @@ import adapters import ops_openstack +import gwcli_client logger = logging.getLogger() @@ -68,50 +69,8 @@ class CephISCSIGatewayAdapters(adapters.OpenStackRelationAdapters): } -class GatewayClient(): +class CephISCSIGatewayCharmBase(ops_openstack.OSBaseCharm): - def run(self, path, cmd): - _cmd = ['gwcli', path] - _cmd.extend(cmd.split()) - logging.info(_cmd) - print(_cmd) - subprocess.check_call(_cmd) - - def create_target(self, iqn): - self.run( - "/iscsi-targets/", - "create {}".format(iqn)) - - def add_gateway_to_target(self, iqn, gateway_ip, gateway_fqdn): - self.run( - "/iscsi-targets/{}/gateways/".format(iqn), - "create {} {}".format(gateway_fqdn, gateway_ip)) - - def create_pool(self, pool_name, image_name, image_size): - self.run( - "/disks", - "create pool={} image={} size={}".format( - pool_name, - image_name, - image_size)) - - def add_client_to_target(self, iqn, initiatorname): - self.run( - "/iscsi-targets/{}/hosts/".format(iqn), - "create {}".format(initiatorname)) - - def add_client_auth(self, iqn, initiatorname, username, password): - self.run( - "/iscsi-targets/{}/hosts/{}".format(iqn, initiatorname), - "auth username={} password={}".format(username, password)) - - def add_disk_to_client(self, iqn, initiatorname, pool_name, image_name): - self.run( - "/iscsi-targets/{}/hosts/{}".format(iqn, initiatorname), - "disk add {}/{}".format(pool_name, image_name)) - - -class CephISCSIGatewayCharm(ops_openstack.OSBaseCharm): state = StoredState() PACKAGES = ['ceph-iscsi', 'tcmu-runner', 'ceph-common'] CEPH_CAPABILITIES = [ @@ -129,6 +88,7 @@ class CephISCSIGatewayCharm(ops_openstack.OSBaseCharm): def __init__(self, framework, key): super().__init__(framework, key) + logging.info("Using {} class".format(self.release)) self.state.set_default(target_created=False) self.ceph_client = interface_ceph_client.CephClientRequires( self, @@ -146,7 +106,7 @@ def __init__(self, framework, key): self.framework.observe(self.on.create_target_action, self) def on_create_target_action(self, event): - gw_client = GatewayClient() + gw_client = gwcli_client.GatewayClient() gw_client.create_target(event.params['iqn']) for gw_unit, gw_config in self.peers.ready_peer_details.items(): added_gateways = [] @@ -175,7 +135,7 @@ def on_create_target_action(self, event): event.params['image-name']) def setup_default_target(self): - gw_client = GatewayClient() + gw_client = gwcli_client.GatewayClient() gw_client.create_target(self.DEFAULT_TARGET) for gw_unit, gw_config in self.peers.ready_peer_details.items(): gw_client.add_gateway_to_target( @@ -244,5 +204,18 @@ def render_configs(): logging.info("on_pools_available: status updated") +@ops_openstack.charm_class +class CephISCSIGatewayCharmJewel(CephISCSIGatewayCharmBase): + + state = StoredState() + release = 'jewel' + + +@ops_openstack.charm_class +class CephISCSIGatewayCharmOcto(CephISCSIGatewayCharmBase): + + state = StoredState() + release = 'octopus' + if __name__ == '__main__': - main(CephISCSIGatewayCharm) + main(ops_openstack.get_charm_class_for_release()) diff --git a/ceph-iscsi/src/gwcli_client.py b/ceph-iscsi/src/gwcli_client.py new file mode 100644 index 00000000..d22cfb16 --- /dev/null +++ b/ceph-iscsi/src/gwcli_client.py @@ -0,0 +1,46 @@ +import logging +import subprocess + +logger = logging.getLogger() + + +class GatewayClient(): + + def run(self, path, cmd): + _cmd = ['gwcli', path] + _cmd.extend(cmd.split()) + logging.info(_cmd) + subprocess.check_call(_cmd) + + def create_target(self, iqn): + self.run( + "/iscsi-targets/", + "create {}".format(iqn)) + + def add_gateway_to_target(self, iqn, gateway_ip, gateway_fqdn): + self.run( + "/iscsi-targets/{}/gateways/".format(iqn), + "create {} {}".format(gateway_fqdn, gateway_ip)) + + def create_pool(self, pool_name, image_name, image_size): + self.run( + "/disks", + "create pool={} image={} size={}".format( + pool_name, + image_name, + image_size)) + + def add_client_to_target(self, iqn, initiatorname): + self.run( + "/iscsi-targets/{}/hosts/".format(iqn), + "create {}".format(initiatorname)) + + def add_client_auth(self, iqn, initiatorname, username, password): + self.run( + "/iscsi-targets/{}/hosts/{}".format(iqn, initiatorname), + "auth username={} password={}".format(username, password)) + + def add_disk_to_client(self, iqn, initiatorname, pool_name, image_name): + self.run( + "/iscsi-targets/{}/hosts/{}".format(iqn, initiatorname), + "disk add {}/{}".format(pool_name, image_name)) From 82db75199979ef225164891fd1426de0853b45da Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 16 Mar 2020 12:05:19 +0000 Subject: [PATCH 1909/2699] Update todo --- ceph-iscsi/todo.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-iscsi/todo.txt b/ceph-iscsi/todo.txt index f49b36a2..d31171a1 100644 --- a/ceph-iscsi/todo.txt +++ b/ceph-iscsi/todo.txt @@ -1,6 +1,5 @@ Todo * Remove hardcoded ceph pool name and expose as a config option -* Write spec * Add series upgrade * Write README * Move to openstack-charmers @@ -20,3 +19,4 @@ Mostly Done * implement source config option * Proper Update Status * Fix workload status so it reports missing relations +* Write spec From d1b28204d84927dd67b2e8d200af46f02326ec36 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Thu, 12 Mar 2020 10:52:35 +0100 Subject: [PATCH 1910/2699] Remove use of admin_token for Keystone V2.0 deployments At present the charm configures the Ceph RADOS GW with the admin_token as credentials when connecting to a deployment with Keystone V2.0 API. We want to move away from that and as such we need to update the charm to configure username, password and project name instead. Change-Id: Idab6a5740a541b922f9dbd65165d0328d747e78e --- ceph-radosgw/hooks/ceph_radosgw_context.py | 7 ---- ceph-radosgw/templates/ceph.conf | 7 ++-- .../unit_tests/test_ceph_radosgw_context.py | 42 ------------------- 3 files changed, 4 insertions(+), 52 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 539a5df9..43dd7534 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -82,13 +82,6 @@ def __call__(self): if not ctxt: return - ctxt['admin_token'] = None - for relid in relation_ids('identity-service'): - for unit in related_units(relid): - if not ctxt.get('admin_token'): - ctxt['admin_token'] = \ - relation_get('admin_token', unit, relid) - if cmp_pkgrevno('radosgw', "10.2.0") >= 0: ctxt['auth_keystone_v3_supported'] = True diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 3d5bc34c..924927f4 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -41,14 +41,15 @@ rgw init timeout = 1200 rgw frontends = civetweb port={{ port }} {% if auth_type == 'keystone' %} rgw keystone url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/ -{% if auth_keystone_v3_supported and api_version == '3' -%} -rgw keystone api version = 3 rgw keystone admin user = {{ admin_user }} rgw keystone admin password = {{ admin_password }} +{% if auth_keystone_v3_supported and api_version == '3' -%} +rgw keystone api version = 3 rgw keystone admin domain = {{ admin_domain_name }} rgw keystone admin project = {{ admin_tenant_name }} {% else -%} -rgw keystone admin token = {{ admin_token }} +rgw keystone api version = 2 +rgw keystone admin tenant = {{ admin_tenant_name }} {% endif -%} rgw keystone accepted roles = {{ user_roles }} rgw keystone accepted admin roles = {{ admin_roles }} diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 8c0d79a3..eec5069a 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -94,7 +94,6 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, self.test_config.set('operator-roles', 'Babel') self.test_config.set('admin-roles', 'Dart') self.test_config.set('cache-size', '42') - self.test_relation.set({'admin_token': 'ubuntutesting'}) self.relation_ids.return_value = ['identity-service:5'] self.related_units.return_value = ['keystone/0'] _format_ipv6_addr.return_value = False @@ -119,7 +118,6 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, 'admin_password': 'adminpass', 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', 'admin_tenant_name': 'ten', - 'admin_token': 'ubuntutesting', 'admin_user': 'admin', 'api_version': '2.0', 'auth_host': '127.0.0.5', @@ -160,7 +158,6 @@ def test_ids_ctxt_with_namespace(self, _log, _rids, _runits, _rget, self.test_config.set('operator-roles', 'Babel') self.test_config.set('admin-roles', 'Dart') self.test_config.set('cache-size', '42') - self.test_relation.set({'admin_token': 'ubuntutesting'}) self.relation_ids.return_value = ['identity-service:5'] self.related_units.return_value = ['keystone/0'] _format_ipv6_addr.return_value = False @@ -186,7 +183,6 @@ def test_ids_ctxt_with_namespace(self, _log, _rids, _runits, _rget, 'admin_password': 'adminpass', 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', 'admin_tenant_name': 'ten', - 'admin_token': 'ubuntutesting', 'admin_user': 'admin', 'api_version': '2.0', 'auth_host': '127.0.0.5', @@ -222,7 +218,6 @@ def test_ids_ctxt_missing_admin_domain_id( self.test_config.set('operator-roles', 'Babel') self.test_config.set('admin-roles', 'Dart') self.test_config.set('cache-size', '42') - self.test_relation.set({'admin_token': 'ubuntutesting'}) self.relation_ids.return_value = ['identity-service:5'] self.related_units.return_value = ['keystone/0'] _format_ipv6_addr.return_value = False @@ -248,7 +243,6 @@ def test_ids_ctxt_missing_admin_domain_id( 'admin_password': 'adminpass', 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', 'admin_tenant_name': 'ten', - 'admin_token': 'ubuntutesting', 'admin_user': 'admin', 'api_version': '2.0', 'auth_host': '127.0.0.5', @@ -280,7 +274,6 @@ def test_ids_ctxt_v3( self.test_config.set('operator-roles', 'Babel') self.test_config.set('admin-roles', 'Dart') self.test_config.set('cache-size', '42') - self.test_relation.set({'admin_token': 'ubuntutesting'}) self.relation_ids.return_value = ['identity-service:5'] self.related_units.return_value = ['keystone/0'] _format_ipv6_addr.return_value = False @@ -311,7 +304,6 @@ def test_ids_ctxt_v3( 'admin_password': 'adminpass', 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', 'admin_tenant_name': 'ten', - 'admin_token': 'ubuntutesting', 'admin_user': 'admin', 'api_version': '3', 'auth_host': '127.0.0.5', @@ -339,40 +331,6 @@ def test_ids_ctxt_luminous(self): self.test_ids_ctxt(jewel_installed=True, cmp_pkgrevno_side_effects=[1, 0]) - @patch.object(charmhelpers.contrib.openstack.context, - 'filter_installed_packages', return_value=['absent-pkg']) - @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') - @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') - @patch.object(charmhelpers.contrib.openstack.context, 'related_units') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') - @patch.object(charmhelpers.contrib.openstack.context, 'log') - def test_ids_ctxt_no_admin_token(self, _log, _rids, _runits, _rget, - _ctxt_comp, _format_ipv6_addr, - _filter_installed_packages): - self.test_config.set('operator-roles', 'Babel') - self.test_config.set('cache-size', '42') - self.test_relation.set({}) - self.relation_ids.return_value = ['identity-service:5'] - self.related_units.return_value = ['keystone/0'] - _format_ipv6_addr.return_value = False - _rids.return_value = 'rid1' - _runits.return_value = 'runit' - _ctxt_comp.return_value = True - id_data = { - 'service_port': 9876, - 'service_host': '127.0.0.4', - 'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'service_tenant': 'ten', - 'service_username': 'admin', - 'service_password': 'adminpass', - } - _rget.return_value = id_data - ids_ctxt = context.IdentityServiceContext() - self.assertEqual({}, ids_ctxt()) - @patch.object(charmhelpers.contrib.openstack.context, 'filter_installed_packages', return_value=['absent-pkg']) @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') From f3d97934b368df2196d99383c779461c973493be Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 16 Mar 2020 13:35:16 +0100 Subject: [PATCH 1911/2699] Add S3 endpoint to service catalog Also add functional test of the S3 API. Change-Id: I614a7bd9c294b783a289d8b38eca9520114e9c95 Func-Test-Pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/194 --- ceph-radosgw/hooks/hooks.py | 19 +++++-- ceph-radosgw/tests/tests.yaml | 23 +++++---- ceph-radosgw/unit_tests/test_hooks.py | 71 ++++++++++++++++++--------- 3 files changed, 75 insertions(+), 38 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 845714d7..cc6c536c 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -316,12 +316,23 @@ def identity_joined(relid=None): requested_roles = '' if roles: requested_roles = ','.join(roles) if len(roles) > 1 else roles[0] - relation_set(service='swift', - region=config('region'), - public_url=public_url, internal_url=internal_url, - admin_url=admin_url, + relation_set(swift_service='swift', + swift_region=config('region'), + swift_public_url=public_url, + swift_internal_url=internal_url, + swift_admin_url=admin_url, requested_roles=requested_roles, relation_id=relid) + if cmp_pkgrevno('radosgw', '12.2') >= 0: + relation_set(s3_service='s3', + s3_region=config('region'), + s3_public_url='{}:{}/'.format( + canonical_url(CONFIGS, PUBLIC), port), + s3_internal_url='{}:{}/'.format( + canonical_url(CONFIGS, INTERNAL), port), + s3_admin_url='{}:{}/'.format( + canonical_url(CONFIGS, ADMIN), port), + relation_id=relid) @hooks.hook('identity-service-relation-changed') diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 0aca1f4c..e631981a 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,23 +1,26 @@ charm_name: ceph-radosgw gate_bundles: - - bionic-train - - bionic-train-namespaced - - bionic-stein - - bionic-stein-namespaced - - bionic-rocky - - bionic-rocky-namespaced - - bionic-queens - - bionic-queens-namespaced - - xenial-queens + - test-s3api: bionic-train + - test-s3api: bionic-train-namespaced + - test-s3api: bionic-stein + - test-s3api: bionic-stein-namespaced + - test-s3api: bionic-rocky + - test-s3api: bionic-rocky-namespaced + - test-s3api: bionic-queens + - test-s3api: bionic-queens-namespaced + - test-s3api: xenial-queens - xenial-pike - xenial-ocata - xenial-mitaka - xenial-mitaka-namespaced - trusty-mitaka smoke_bundles: - - bionic-train + - test-s3api: bionic-train dev_bundles: - bionic-queens-multisite - bionic-rocky-multisite tests: - zaza.openstack.charm_tests.ceph.tests.CephRGWTest + - test-s3api: + - zaza.openstack.charm_tests.ceph.tests.CephRGWTest + - zaza.openstack.charm_tests.swift.tests.S3APITest diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index eda79f64..96a6c866 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -273,14 +273,21 @@ def _test_identify_joined(expected): self.test_config.set('region', 'region1') _leader_get.return_value = 'False' ceph_hooks.identity_joined(relid='rid') - self.relation_set.assert_called_with( - service='swift', - region='region1', - public_url='http://myserv:80/swift/v1', - internal_url='http://myserv:80/swift/v1', - requested_roles=expected, - relation_id='rid', - admin_url='http://myserv:80/swift') + self.relation_set.assert_has_calls([ + call(swift_service='swift', + swift_region='region1', + swift_public_url='http://myserv:80/swift/v1', + swift_internal_url='http://myserv:80/swift/v1', + swift_admin_url='http://myserv:80/swift', + requested_roles=expected, + relation_id='rid'), + call(s3_service='s3', + s3_region='region1', + s3_public_url='http://myserv:80/', + s3_internal_url='http://myserv:80/', + s3_admin_url='http://myserv:80/', + relation_id='rid') + ]) inputs = [{'operator': 'foo', 'admin': 'bar', 'expected': 'foo,bar'}, {'operator': 'foo', 'expected': 'foo'}, @@ -308,14 +315,23 @@ def _test_identify_joined(expected): self.test_config.set('region', 'region1') _leader_get.return_value = 'True' ceph_hooks.identity_joined(relid='rid') - self.relation_set.assert_called_with( - service='swift', - region='region1', - public_url='http://myserv:80/swift/v1/AUTH_$(project_id)s', - internal_url='http://myserv:80/swift/v1/AUTH_$(project_id)s', - requested_roles=expected, - relation_id='rid', - admin_url='http://myserv:80/swift') + self.relation_set.assert_has_calls([ + call(swift_service='swift', + swift_region='region1', + swift_public_url=( + 'http://myserv:80/swift/v1/AUTH_$(project_id)s'), + swift_internal_url=( + 'http://myserv:80/swift/v1/AUTH_$(project_id)s'), + swift_admin_url='http://myserv:80/swift', + requested_roles=expected, + relation_id='rid'), + call(s3_service='s3', + s3_region='region1', + s3_public_url='http://myserv:80/', + s3_internal_url='http://myserv:80/', + s3_admin_url='http://myserv:80/', + relation_id='rid') + ]) inputs = [{'operator': 'foo', 'admin': 'bar', 'expected': 'foo,bar'}, {'operator': 'foo', 'expected': 'foo'}, @@ -341,14 +357,21 @@ def test_identity_joined_public_name(self, _config, _unit_get, _is_clustered.return_value = False _leader_get.return_value = 'False' ceph_hooks.identity_joined(relid='rid') - self.relation_set.assert_called_with( - service='swift', - region='RegionOne', - public_url='http://files.example.com:80/swift/v1', - internal_url='http://myserv:80/swift/v1', - requested_roles='Member,Admin', - relation_id='rid', - admin_url='http://myserv:80/swift') + self.relation_set.assert_has_calls([ + call(swift_service='swift', + swift_region='RegionOne', + swift_public_url='http://files.example.com:80/swift/v1', + swift_internal_url='http://myserv:80/swift/v1', + swift_admin_url='http://myserv:80/swift', + requested_roles='Member,Admin', + relation_id='rid'), + call(s3_service='s3', + s3_region='RegionOne', + s3_public_url='http://files.example.com:80/', + s3_internal_url='http://myserv:80/', + s3_admin_url='http://myserv:80/', + relation_id='rid') + ]) @patch.object(ceph_hooks, 'identity_joined') def test_identity_changed(self, mock_identity_joined): From 323f715572615917c06707b4838b7311a3ed97ed Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 18 Mar 2020 11:30:37 +0000 Subject: [PATCH 1912/2699] Add support for setting pool name --- ceph-iscsi/config.yaml | 5 +++++ ceph-iscsi/mod/ops-openstack | 2 +- ceph-iscsi/src/charm.py | 7 ++++--- ceph-iscsi/templates/iscsi-gateway.cfg | 2 +- ceph-iscsi/tests/bundles/focal.yaml | 2 ++ ceph-iscsi/todo.txt | 11 ++++++----- 6 files changed, 19 insertions(+), 10 deletions(-) diff --git a/ceph-iscsi/config.yaml b/ceph-iscsi/config.yaml index aa944462..291d5d13 100644 --- a/ceph-iscsi/config.yaml +++ b/ceph-iscsi/config.yaml @@ -35,6 +35,11 @@ options: 192.168.0.0/24). If multiple networks are to be used, a space-delimited list of a.b.c.d/x can be provided. + rbd-pool: + type: string + default: iscsi + description: | + RBD pool to use for iscsi backend. prefer-ipv6: type: boolean default: False diff --git a/ceph-iscsi/mod/ops-openstack b/ceph-iscsi/mod/ops-openstack index 764fcf1b..0d650e28 160000 --- a/ceph-iscsi/mod/ops-openstack +++ b/ceph-iscsi/mod/ops-openstack @@ -1 +1 @@ -Subproject commit 764fcf1b222bd9be712c1999daffe9adb1da8206 +Subproject commit 0d650e28963b69129353e723f9b148c1b7afc437 diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index c0e222b0..71853d26 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -117,7 +117,7 @@ def on_create_target_action(self, event): gw_config['fqdn']) added_gateways.append(gw_unit) gw_client.create_pool( - 'iscsi', + self.framework.model.config['rbd-pool'], event.params['image-name'], event.params['image-size']) gw_client.add_client_to_target( @@ -131,7 +131,7 @@ def on_create_target_action(self, event): gw_client.add_disk_to_client( event.params['iqn'], event.params['client-initiatorname'], - 'iscsi', + self.framework.model.config['rbd-pool'], event.params['image-name']) def setup_default_target(self): @@ -168,7 +168,8 @@ def on_has_peers(self, event): def on_ceph_client_relation_joined(self, event): logging.info("Requesting replicated pool") - self.ceph_client.create_replicated_pool('iscsi') + self.ceph_client.create_replicated_pool( + self.framework.model.config['rbd-pool']) logging.info("Requesting permissions") self.ceph_client.request_ceph_permissions( 'ceph-iscsi', diff --git a/ceph-iscsi/templates/iscsi-gateway.cfg b/ceph-iscsi/templates/iscsi-gateway.cfg index 9a7e0a2d..2fd2aa24 100644 --- a/ceph-iscsi/templates/iscsi-gateway.cfg +++ b/ceph-iscsi/templates/iscsi-gateway.cfg @@ -5,7 +5,7 @@ logger_level = DEBUG cluster_name = ceph cluster_client_name = client.ceph-iscsi -pool = iscsi +pool = {{ options.rbd_pool }} # # # Place a copy of the ceph cluster's admin keyring in the gateway's /etc/ceph # # drectory and reference the filename here diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index 3e62783b..86ca50fb 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -7,6 +7,8 @@ applications: charm: ../../ series: focal num_units: 2 + options: + rbd-pool: tmbtil ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 diff --git a/ceph-iscsi/todo.txt b/ceph-iscsi/todo.txt index d31171a1..7d24970e 100644 --- a/ceph-iscsi/todo.txt +++ b/ceph-iscsi/todo.txt @@ -1,14 +1,13 @@ Todo -* Remove hardcoded ceph pool name and expose as a config option -* Add series upgrade -* Write README -* Move to openstack-charmers * Refactor ceph broker code in charm helpers * Rewrite ceph-client interface to stop using any relation* commands via charmhelpers * Certificates interface +* Ceph heartbeat settings https://docs.ceph.com/docs/master/rbd/iscsi-requirements/ +* Write README +* Move to openstack-charmers * security checklist * zaza tests for pause/resume -* Ceph haeartbeat settings https://docs.ceph.com/docs/master/rbd/iscsi-requirements/ +* remove hardcoded password Mostly Done * trusted_ips @@ -20,3 +19,5 @@ Mostly Done * Proper Update Status * Fix workload status so it reports missing relations * Write spec +* Remove hardcoded ceph pool name and expose as a config option +* Add series upgrade From 0e7e09f3ad482a0854446b2b6a37d75febaf8469 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 18 Mar 2020 11:33:04 +0000 Subject: [PATCH 1913/2699] mod/ops-openstack update --- ceph-iscsi/mod/ops-openstack | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-iscsi/mod/ops-openstack b/ceph-iscsi/mod/ops-openstack index 0d650e28..e4cfb975 160000 --- a/ceph-iscsi/mod/ops-openstack +++ b/ceph-iscsi/mod/ops-openstack @@ -1 +1 @@ -Subproject commit 0d650e28963b69129353e723f9b148c1b7afc437 +Subproject commit e4cfb975aa1ab33f81f683df5b75cd96f47b76de From f90d57f5131f48c0b6eed8e41f82b5084a8d031f Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 17 Mar 2020 09:27:08 -0400 Subject: [PATCH 1914/2699] Maintain OSD state on upgrade Sync charms.ceph Ensure each OSD reaches its pre-restart state before proceeding after restart. This prevents the charm from finalizing the upgrade prior to OSDs recovering after upgrade. For example, if the state is 'active' prior to restart, then it must reach 'active' after restart, at which point the upgrade will be allowed to complete. Change-Id: I1067a8cdd1e2b706db07f194eca6fb2efeccb817 Depends-On: https://review.opendev.org/#/c/713743/ Closes-Bug: #1821028 --- ceph-osd/lib/charms_ceph/broker.py | 12 +-- ceph-osd/lib/charms_ceph/utils.py | 140 +++++++++++++++++++++++++---- 2 files changed, 128 insertions(+), 24 deletions(-) diff --git a/ceph-osd/lib/charms_ceph/broker.py b/ceph-osd/lib/charms_ceph/broker.py index ceda9a85..726f9498 100644 --- a/ceph-osd/lib/charms_ceph/broker.py +++ b/ceph-osd/lib/charms_ceph/broker.py @@ -160,9 +160,10 @@ def handle_create_erasure_profile(request, service): # "host" | "rack" or it defaults to "host" # Any valid Ceph bucket failure_domain = request.get('failure-domain') name = request.get('name') - k = request.get('k') - m = request.get('m') - l = request.get('l') + # Binary Distribution Matrix (BDM) parameters + bdm_k = request.get('k') + bdm_m = request.get('m') + bdm_l = request.get('l') if failure_domain not in CEPH_BUCKET_TYPES: msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES) @@ -171,7 +172,8 @@ def handle_create_erasure_profile(request, service): create_erasure_profile(service=service, erasure_plugin_name=erasure_type, profile_name=name, failure_domain=failure_domain, - data_chunks=k, coding_chunks=m, locality=l) + data_chunks=bdm_k, coding_chunks=bdm_m, + locality=bdm_l) def handle_add_permissions_to_key(request, service): @@ -556,7 +558,7 @@ def handle_set_pool_value(request, service): # Get the validation method validator_params = POOL_KEYS[params['key']] - if len(validator_params) is 1: + if len(validator_params) == 1: # Validate that what the user passed is actually legal per Ceph's rules validator(params['value'], validator_params[0]) else: diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 90ebb1b6..c26a131b 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -25,6 +25,7 @@ import time import uuid +from contextlib import contextmanager from datetime import datetime from charmhelpers.core import hookenv @@ -175,12 +176,16 @@ def unmounted_disks(): context = pyudev.Context() for device in context.list_devices(DEVTYPE='disk'): if device['SUBSYSTEM'] == 'block': + if device.device_node is None: + continue + matched = False for block_type in [u'dm-', u'loop', u'ram', u'nbd']: if block_type in device.device_node: matched = True if matched: continue + disks.append(device.device_node) log("Found disks: {}".format(disks)) return [disk for disk in disks if not is_device_mounted(disk)] @@ -637,7 +642,7 @@ def _get_osd_num_from_dirname(dirname): :raises ValueError: if the osd number cannot be parsed from the provided directory name. """ - match = re.search('ceph-(?P\d+)', dirname) + match = re.search(r'ceph-(?P\d+)', dirname) if not match: raise ValueError("dirname not in correct format: {}".format(dirname)) @@ -706,7 +711,7 @@ def get_version(): package = "ceph" try: pkg = cache[package] - except: + except KeyError: # the package is unknown to the current apt cache. e = 'Could not determine version of package with no installation ' \ 'candidate: %s' % package @@ -721,7 +726,7 @@ def get_version(): # x.y match only for 20XX.X # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', vers) + match = re.match(r'^(\d+)\.(\d+)', vers) if match: vers = match.group(0) @@ -956,11 +961,11 @@ def start_osds(devices): rescan_osd_devices() if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and cmp_pkgrevno('ceph', '14.2.0') < 0): - # Use ceph-disk activate for directory based OSD's - for dev_or_path in devices: - if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call( - ['ceph-disk', 'activate', dev_or_path]) + # Use ceph-disk activate for directory based OSD's + for dev_or_path in devices: + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): + subprocess.check_call( + ['ceph-disk', 'activate', dev_or_path]) def udevadm_settle(): @@ -978,6 +983,7 @@ def rescan_osd_devices(): udevadm_settle() + _client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' @@ -1002,6 +1008,7 @@ def generate_monitor_secret(): return "{}==".format(res.split('=')[1].strip()) + # OSD caps taken from ceph-create-keys _osd_bootstrap_caps = { 'mon': [ @@ -1039,7 +1046,7 @@ def get_osd_bootstrap_key(): # Attempt to get/create a key using the OSD bootstrap profile first key = get_named_key('bootstrap-osd', _osd_bootstrap_caps_profile) - except: + except Exception: # If that fails try with the older style permissions key = get_named_key('bootstrap-osd', _osd_bootstrap_caps) @@ -1063,6 +1070,7 @@ def import_radosgw_key(key): ] subprocess.check_call(cmd) + # OSD caps taken from ceph-create-keys _radosgw_caps = { 'mon': ['allow rw'], @@ -1299,7 +1307,7 @@ def bootstrap_monitor_cluster(secret): path, done, init_marker) - except: + except Exception: raise finally: os.unlink(keyring) @@ -2416,10 +2424,11 @@ def upgrade_osd(new_version): # way to update the code on the node. if not dirs_need_ownership_update('osd'): log('Restarting all OSDs to load new binaries', DEBUG) - if systemd(): - service_restart('ceph-osd.target') - else: - service_restart('ceph-osd-all') + with maintain_all_osd_states(): + if systemd(): + service_restart('ceph-osd.target') + else: + service_restart('ceph-osd-all') return # Need to change the ownership of all directories which are not OSD @@ -2464,11 +2473,12 @@ def _upgrade_single_osd(osd_num, osd_dir): :raises IOError: if an error occurs reading/writing to a file as part of the upgrade process """ - stop_osd(osd_num) - disable_osd(osd_num) - update_owner(osd_dir) - enable_osd(osd_num) - start_osd(osd_num) + with maintain_osd_state(osd_num): + stop_osd(osd_num) + disable_osd(osd_num) + update_owner(osd_dir) + enable_osd(osd_num) + start_osd(osd_num) def stop_osd(osd_num): @@ -2595,6 +2605,97 @@ def update_owner(path, recurse_dirs=True): secs=elapsed_time.total_seconds(), path=path), DEBUG) +def get_osd_state(osd_num, osd_goal_state=None): + """Get OSD state or loop until OSD state matches OSD goal state. + + If osd_goal_state is None, just return the current OSD state. + If osd_goal_state is not None, loop until the current OSD state matches + the OSD goal state. + + :param osd_num: the osd id to get state for + :param osd_goal_state: (Optional) string indicating state to wait for + Defaults to None + :returns: Returns a str, the OSD state. + :rtype: str + """ + while True: + asok = "/var/run/ceph/ceph-osd.{}.asok".format(osd_num) + cmd = [ + 'ceph', + 'daemon', + asok, + 'status' + ] + try: + result = json.loads(str(subprocess + .check_output(cmd) + .decode('UTF-8'))) + except (subprocess.CalledProcessError, ValueError) as e: + log("{}".format(e), level=DEBUG) + continue + osd_state = result['state'] + log("OSD {} state: {}, goal state: {}".format( + osd_num, osd_state, osd_goal_state), level=DEBUG) + if not osd_goal_state: + return osd_state + if osd_state == osd_goal_state: + return osd_state + + +def get_all_osd_states(osd_goal_states=None): + """Get all OSD states or loop until all OSD states match OSD goal states. + + If osd_goal_states is None, just return a dictionary of current OSD states. + If osd_goal_states is not None, loop until the current OSD states match + the OSD goal states. + + :param osd_goal_states: (Optional) dict indicating states to wait for + Defaults to None + :returns: Returns a dictionary of current OSD states. + :rtype: dict + """ + osd_states = {} + for osd_num in get_local_osd_ids(): + if not osd_goal_states: + osd_states[osd_num] = get_osd_state(osd_num) + else: + osd_states[osd_num] = get_osd_state( + osd_num, + osd_goal_state=osd_goal_states[osd_num]) + return osd_states + + +@contextmanager +def maintain_osd_state(osd_num): + """Ensure the state of an OSD is maintained. + + Ensures the state of an OSD is the same at the end of a block nested + in a with statement as it was at the beginning of the block. + + :param osd_num: the osd id to maintain state for + """ + osd_state = get_osd_state(osd_num) + try: + yield + finally: + get_osd_state(osd_num, osd_goal_state=osd_state) + + +@contextmanager +def maintain_all_osd_states(): + """Ensure all local OSD states are maintained. + + Ensures the states of all local OSDs are the same at the end of a + block nested in a with statement as they were at the beginning of + the block. + """ + osd_states = get_all_osd_states() + try: + yield + finally: + get_all_osd_states(osd_goal_states=osd_states) + + def list_pools(client='admin'): """This will list the current pools that Ceph has @@ -2789,6 +2890,7 @@ def dirs_need_ownership_update(service): # All child directories had the expected ownership return False + # A dict of valid ceph upgrade paths. Mapping is old -> new UPGRADE_PATHS = collections.OrderedDict([ ('firefly', 'hammer'), From 04ea2865b914a1abfdd9cf172c8b291301cc3b9c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 20 Mar 2020 10:18:18 +0100 Subject: [PATCH 1915/2699] Ensure that the admin keyring has mgr capabilities Change-Id: Icd30d464f3a6c52990c29f595d3e801e23c3c55c Closes-Bug: #1867795 Depends-On: I5412fa1cd40a27213296532fd160012ec5299b3e --- ceph-mon/lib/charms_ceph/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index 90ebb1b6..8cbcdd5b 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -1105,6 +1105,7 @@ def get_mds_bootstrap_key(): admin_caps = collections.OrderedDict([ ('mds', ['allow *']), + ('mgr', ['allow *']), ('mon', ['allow *']), ('osd', ['allow *']) ]) From 2bf680606c51e342d7b8037793022d3fe18df3da Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 23 Mar 2020 09:36:37 +0100 Subject: [PATCH 1916/2699] Drop import of python-apt Use compability shim provided by c-h instead. Change-Id: Ic6bbb9f67cb93f1b58f72549ab867b3f80ae9be7 --- ceph-radosgw/hooks/ceph_rgw.py | 3 --- ceph-radosgw/hooks/utils.py | 5 +---- ceph-radosgw/unit_tests/test_ceph.py | 15 +++------------ 3 files changed, 4 insertions(+), 19 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_rgw.py b/ceph-radosgw/hooks/ceph_rgw.py index dd9304e6..7d48ab8a 100644 --- a/ceph-radosgw/hooks/ceph_rgw.py +++ b/ceph-radosgw/hooks/ceph_rgw.py @@ -107,9 +107,6 @@ def _add_light_pool(rq, pool, pg_num, prefix=None): weight=w, group='objects', app_name=CEPH_POOL_APP_NAME) - from apt import apt_pkg - - apt_pkg.init() rq = CephBrokerRq() replicas = config('ceph-osd-replication-count') diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 4e65ae51..6d2ec213 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -51,6 +51,7 @@ from charmhelpers.fetch import ( apt_cache, apt_install, + apt_pkg, apt_update, add_source, filter_installed_packages, @@ -227,9 +228,6 @@ def setup_ipv6(): raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") - from apt import apt_pkg - apt_pkg.init() - # Need haproxy >= 1.5.3 for ipv6 so for Trusty if we are <= Kilo we need to # use trusty-backports otherwise we can use the UCA. vc = apt_pkg.version_compare(get_pkg_version('haproxy'), '1.5.3') @@ -316,7 +314,6 @@ def _pause_resume_helper(f, configs): def get_pkg_version(name): - from apt import apt_pkg pkg = apt_cache()[name] version = None if pkg.current_ver: diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index de5550d8..6ef85dec 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -12,16 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -from mock import patch, call, MagicMock - -# python-apt is not installed as part of test-requirements but is imported by -# some charmhelpers modules so create a fake import. -mock_apt = MagicMock() -mock_apt.apt_pkg = MagicMock() -sys.modules['apt'] = mock_apt -sys.modules['apt_pkg'] = mock_apt.apt_pkg +from mock import patch, call import ceph_rgw as ceph # noqa import utils # noqa @@ -147,7 +138,7 @@ def test_create_rgw_pools_rq_no_prefix_post_jewel(self, mock_broker, name='objects', permission='rwx') - @patch.object(mock_apt.apt_pkg, 'version_compare', lambda *args: -1) + @patch.object(utils.apt_pkg, 'version_compare', lambda *args: -1) @patch.object(utils, 'lsb_release', lambda: {'DISTRIB_CODENAME': 'trusty'}) @patch.object(utils, 'add_source') @@ -160,7 +151,7 @@ def test_setup_ipv6_install_backports(self, mock_add_source, self.assertTrue(mock_apt_update.called) self.assertTrue(mock_apt_install.called) - @patch.object(mock_apt.apt_pkg, 'version_compare', lambda *args: 0) + @patch.object(utils.apt_pkg, 'version_compare', lambda *args: 0) @patch.object(utils, 'lsb_release', lambda: {'DISTRIB_CODENAME': 'trusty'}) @patch.object(utils, 'add_source') From 9d3f43c979c1b872033537b48e2913fdc4180c37 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 25 Mar 2020 10:11:43 +0000 Subject: [PATCH 1917/2699] Add support for certificates --- ceph-iscsi/.gitignore | 2 + ceph-iscsi/metadata.yaml | 2 + ceph-iscsi/mod/operator | 2 +- ceph-iscsi/mod/ops-openstack | 2 +- ceph-iscsi/src/charm.py | 67 +++++++++-- ceph-iscsi/src/interface_tls_certificates.py | 116 +++++++++++++++++++ ceph-iscsi/templates/iscsi-gateway.cfg | 2 +- ceph-iscsi/tests/bundles/focal.yaml | 14 +++ ceph-iscsi/tests/tests.yaml | 5 + ceph-iscsi/todo.txt | 10 +- 10 files changed, 208 insertions(+), 14 deletions(-) create mode 100644 ceph-iscsi/src/interface_tls_certificates.py diff --git a/ceph-iscsi/.gitignore b/ceph-iscsi/.gitignore index 950798c2..7cd7b847 100644 --- a/ceph-iscsi/.gitignore +++ b/ceph-iscsi/.gitignore @@ -1,2 +1,4 @@ lib .tox +test-decrpyt.py +.swp diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index bb0bb397..5c646604 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -15,6 +15,8 @@ subordinate: false requires: ceph-client: interface: ceph-client + certificates: + interface: tls-certificates peers: cluster: interface: ceph-iscsi-peer diff --git a/ceph-iscsi/mod/operator b/ceph-iscsi/mod/operator index 04712d98..fabb0335 160000 --- a/ceph-iscsi/mod/operator +++ b/ceph-iscsi/mod/operator @@ -1 +1 @@ -Subproject commit 04712d98b1fb662f0b34ae840753abacd6073b2e +Subproject commit fabb0335c4d02c915dd93f754c38c78685ed54b6 diff --git a/ceph-iscsi/mod/ops-openstack b/ceph-iscsi/mod/ops-openstack index e4cfb975..f203311d 160000 --- a/ceph-iscsi/mod/ops-openstack +++ b/ceph-iscsi/mod/ops-openstack @@ -1 +1 @@ -Subproject commit e4cfb975aa1ab33f81f683df5b75cd96f47b76de +Subproject commit f203311d4664fb68871b1d4a2367f6588fb1af29 diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 71853d26..eb24f341 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 +import socket import logging import os import subprocess @@ -17,12 +18,13 @@ import charmhelpers.core.templating as ch_templating import interface_ceph_client import interface_ceph_iscsi_peer +import interface_tls_certificates import adapters import ops_openstack import gwcli_client -logger = logging.getLogger() +logger = logging.getLogger(__name__) class CephClientAdapter(adapters.OpenStackOperRelationAdapter): @@ -61,11 +63,22 @@ def gw_hosts(self): return ' '.join(sorted(hosts)) +class TLSCertificatesAdapter(adapters.OpenStackOperRelationAdapter): + + def __init__(self, relation): + super(TLSCertificatesAdapter, self).__init__(relation) + + @property + def enable_tls(self): + return bool(self.relation.application_certs) + + class CephISCSIGatewayAdapters(adapters.OpenStackRelationAdapters): relation_adapters = { 'ceph-client': CephClientAdapter, 'cluster': GatewayClientPeerAdapter, + 'certificates': TLSCertificatesAdapter, } @@ -90,20 +103,24 @@ def __init__(self, framework, key): super().__init__(framework, key) logging.info("Using {} class".format(self.release)) self.state.set_default(target_created=False) + self.state.set_default(enable_tls=False) self.ceph_client = interface_ceph_client.CephClientRequires( self, 'ceph-client') self.peers = interface_ceph_iscsi_peer.CephISCSIGatewayPeers( self, 'cluster') + self.tls = interface_tls_certificates.TlsRequires(self, "certificates") self.adapters = CephISCSIGatewayAdapters( - (self.ceph_client, self.peers), + (self.ceph_client, self.peers, self.tls), self) self.framework.observe(self.on.ceph_client_relation_joined, self) self.framework.observe(self.ceph_client.on.pools_available, self) self.framework.observe(self.peers.on.has_peers, self) self.framework.observe(self.peers.on.ready_peers, self) self.framework.observe(self.on.create_target_action, self) + self.framework.observe(self.on.certificates_relation_joined, self) + self.framework.observe(self.on.certificates_relation_changed, self) def on_create_target_action(self, event): gw_client = gwcli_client.GatewayClient() @@ -117,7 +134,7 @@ def on_create_target_action(self, event): gw_config['fqdn']) added_gateways.append(gw_unit) gw_client.create_pool( - self.framework.model.config['rbd-pool'], + self.model.config['rbd-pool'], event.params['image-name'], event.params['image-size']) gw_client.add_client_to_target( @@ -131,7 +148,7 @@ def on_create_target_action(self, event): gw_client.add_disk_to_client( event.params['iqn'], event.params['client-initiatorname'], - self.framework.model.config['rbd-pool'], + self.model.config['rbd-pool'], event.params['image-name']) def setup_default_target(self): @@ -145,7 +162,7 @@ def setup_default_target(self): self.state.target_created = True def on_ready_peers(self, event): - if not self.model.unit.is_leader(): + if not self.unit.is_leader(): logging.info("Leader should do setup") return if not self.state.is_started: @@ -160,7 +177,7 @@ def on_ready_peers(self, event): def on_has_peers(self, event): logging.info("Unit has peers") - if self.model.unit.is_leader() and not self.peers.admin_password: + if self.unit.is_leader() and not self.peers.admin_password: logging.info("Setting admin password") alphabet = string.ascii_letters + string.digits password = ''.join(secrets.choice(alphabet) for i in range(8)) @@ -169,7 +186,7 @@ def on_has_peers(self, event): def on_ceph_client_relation_joined(self, event): logging.info("Requesting replicated pool") self.ceph_client.create_replicated_pool( - self.framework.model.config['rbd-pool']) + self.model.config['rbd-pool']) logging.info("Requesting permissions") self.ceph_client.request_ceph_permissions( 'ceph-iscsi', @@ -204,6 +221,42 @@ def render_configs(): self.update_status() logging.info("on_pools_available: status updated") + def on_certificates_relation_joined(self, event): + addresses = set() + for binding_name in ['public', 'cluster']: + binding = self.model.get_binding(binding_name) + addresses.add(binding.network.ingress_address) + addresses.add(binding.network.bind_address) + sans = [str(s) for s in addresses] + sans.append(socket.gethostname()) + self.tls.request_application_cert(socket.getfqdn(), sans) + + def on_certificates_relation_changed(self, event): + app_certs = self.tls.application_certs + if not all([self.tls.root_ca_cert, app_certs]): + return + if self.tls.chain: + # Append chain file so that clients that trust the root CA will + # trust certs signed by an intermediate in the chain + ca_cert_data = self.tls.root_ca_cert + os.linesep + self.tls.chain + pem_data = app_certs['cert'] + os.linesep + app_certs['key'] + tls_files = { + '/etc/ceph/iscsi-gateway.crt': app_certs['cert'], + '/etc/ceph/iscsi-gateway.key': app_certs['key'], + '/etc/ceph/iscsi-gateway.pem': pem_data, + '/usr/local/share/ca-certificates/vault_ca_cert.crt': ca_cert_data} + for tls_file, tls_data in tls_files.items(): + with open(tls_file, 'w') as f: + f.write(tls_data) + subprocess.check_call(['update-ca-certificates']) + cert_out = subprocess.check_output( + ('openssl x509 -inform pem -in /etc/ceph/iscsi-gateway.pem ' + '-pubkey -noout').split()) + with open('/etc/ceph/iscsi-gateway-pub.key', 'w') as f: + f.write(cert_out.decode('UTF-8')) + self.state.enable_tls = True + self.on_pools_available(event) + @ops_openstack.charm_class class CephISCSIGatewayCharmJewel(CephISCSIGatewayCharmBase): diff --git a/ceph-iscsi/src/interface_tls_certificates.py b/ceph-iscsi/src/interface_tls_certificates.py new file mode 100644 index 00000000..0e12adb9 --- /dev/null +++ b/ceph-iscsi/src/interface_tls_certificates.py @@ -0,0 +1,116 @@ +import json +from ops.framework import ( + Object, +) + + +class TlsRequires(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.name = self.relation_name = key + + def request_application_cert(self, cn, sans): + """ + Request a client certificate and key be generated for the given + common name (`cn`) and list of alternative names (`sans`). + + This can be called multiple times to request more than one client + certificate, although the common names must be unique. If called + again with the same common name, it will be ignored. + """ + relations = self.framework.model.relations[self.name] + if not relations: + return + # assume we'll only be connected to one provider + relation = relations[0] + unit = self.framework.model.unit + requests = relation.data[unit].get('application_cert_requests', '{}') + requests = json.loads(requests) + requests[cn] = {'sans': sans} + relation.data[unit]['application_cert_requests'] = json.dumps( + requests, + sort_keys=True) + + @property + def root_ca_cert(self): + """ + Root CA certificate. + """ + # only the leader of the provider should set the CA, or all units + # had better agree + for relation in self.framework.model.relations[self.name]: + for unit in relation.units: + if relation.data[unit].get('ca'): + return relation.data[unit].get('ca') + + @property + def chain(self): + """ + Root CA certificate. + """ + # only the leader of the provider should set the CA, or all units + # had better agree + for relation in self.framework.model.relations[self.name]: + for unit in relation.units: + if relation.data[unit].get('chain'): + return relation.data[unit].get('chain') + + @property + def server_certs(self): + """ + List of [Certificate][] instances for all available server certs. + """ + unit_name = self.framework.model.unit.name.replace('/', '_') + field = '{}.processed_requests'.format(unit_name) + + for relation in self.framework.model.relations[self.name]: + for unit in relation.units: + if field not in relation.data[unit]: + continue + certs_data = relation.data[unit][field] + if not certs_data: + continue + certs_data = json.loads(certs_data) + if not certs_data: + continue + return list(certs_data.values())[0] + + @property + def client_certs(self): + """ + List of [Certificate][] instances for all available client certs. + """ + unit_name = self.framework.model.unit.name.replace('/', '_') + field = '{}.processed_client_requests'.format(unit_name) + + for relation in self.framework.model.relations[self.name]: + for unit in relation.units: + if field not in relation.data[unit]: + continue + certs_data = relation.data[unit][field] + if not certs_data: + continue + certs_data = json.loads(certs_data) + if not certs_data: + continue + return list(certs_data.values())[0] + + @property + def application_certs(self): + """ + List of [Certificate][] instances for all available application certs. + """ + unit_name = self.framework.model.unit.name.replace('/', '_') + field = '{}.processed_application_requests'.format(unit_name) + + for relation in self.framework.model.relations[self.name]: + for unit in relation.units: + if field not in relation.data[unit]: + continue + certs_data = relation.data[unit][field] + if not certs_data: + continue + certs_data = json.loads(certs_data) + if not certs_data: + continue + return certs_data['app_data'] diff --git a/ceph-iscsi/templates/iscsi-gateway.cfg b/ceph-iscsi/templates/iscsi-gateway.cfg index 2fd2aa24..f439fa8d 100644 --- a/ceph-iscsi/templates/iscsi-gateway.cfg +++ b/ceph-iscsi/templates/iscsi-gateway.cfg @@ -23,7 +23,7 @@ gateway_keyring = ceph.client.ceph-iscsi.keyring # # to switch to https mode. # # # To support the API, the bear minimum settings are: -api_secure = false +api_secure = {{ certificates.enable_tls }} api_user = admin api_password = {{ cluster.admin_password }} api_port = 5000 diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index 86ca50fb..a14902db 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -23,8 +23,22 @@ applications: options: monitor-count: '3' source: cloud:bionic-train + vault: + num_units: 1 +# charm: cs:~openstack-charmers-next/vault + charm: cs:~gnuoy/vault-28 + mysql: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + innodb-buffer-pool-size: 256M + max-connections: 1000 relations: - - ceph-mon:client - ceph-iscsi:ceph-client +- - vault:certificates + - ceph-iscsi:certificates - - ceph-osd:mon - ceph-mon:osd +- - vault:shared-db + - mysql:shared-db diff --git a/ceph-iscsi/tests/tests.yaml b/ceph-iscsi/tests/tests.yaml index 28dd4613..8f5990e2 100644 --- a/ceph-iscsi/tests/tests.yaml +++ b/ceph-iscsi/tests/tests.yaml @@ -4,6 +4,11 @@ gate_bundles: smoke_bundles: - focal configure: + - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation - zaza.openstack.charm_tests.ceph.iscsi.setup.basic_guest_setup tests: - zaza.openstack.charm_tests.ceph.iscsi.tests.CephISCSIGatewayTest +target_deploy_status: + vault: + workload-status: blocked + workload-status-message: Vault needs to be initialized diff --git a/ceph-iscsi/todo.txt b/ceph-iscsi/todo.txt index 7d24970e..9776b5d9 100644 --- a/ceph-iscsi/todo.txt +++ b/ceph-iscsi/todo.txt @@ -1,15 +1,17 @@ Todo -* Refactor ceph broker code in charm helpers -* Rewrite ceph-client interface to stop using any relation* commands via charmhelpers -* Certificates interface -* Ceph heartbeat settings https://docs.ceph.com/docs/master/rbd/iscsi-requirements/ * Write README * Move to openstack-charmers * security checklist * zaza tests for pause/resume * remove hardcoded password +* switch to mod_wsgi + +* Refactor ceph broker code in charm helpers +* Rewrite ceph-client interface to stop using any relation* commands via charmhelpers +* Ceph heartbeat settings https://docs.ceph.com/docs/master/rbd/iscsi-requirements/ Mostly Done +* Certificates interface * trusted_ips * zaza tests for creating nd mounting a target * Implement pause/resume From 44e7585b9f53ee822cc719a4270f4e930bd2c0f9 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 1 Apr 2020 12:56:11 +0200 Subject: [PATCH 1918/2699] Be more defensive when checking Vault When relations are departing during a unit removal, the relation to Vault can loose enough data that the vault-relation-departed hook raises an exception. The checks for Vault readiness should be more defensive to ensure that charms can successfully depart their relations Depends-On https://github.com/juju/charm-helpers/pull/445 Closes-Bug: #1868282 Change-Id: I0b6226f0f3500aef7304f8e8b38d06daebfd0c20 --- .../charmhelpers/contrib/openstack/vaultlocker.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py index 866a2697..4690f6b0 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -140,9 +140,16 @@ def vault_relation_complete(backend=None): :ptype backend: string :returns: whether the relation to vault is complete :rtype: bool""" - vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) - vault_kv() - return vault_kv.complete + try: + import hvac + except ImportError: + return False + try: + vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) + vault_kv() + return vault_kv.complete + except hvac.exceptions.InvalidRequest: + return False # TODO: contrib a high level unwrap method to hvac that works From 9e69027024d13809e85a8644c27c4971b91e1c02 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 2 Apr 2020 13:49:37 +0000 Subject: [PATCH 1919/2699] Add README and other bits --- ceph-iscsi/README.md | 141 ++++++++++++++++++-- ceph-iscsi/actions.yaml | 13 +- ceph-iscsi/metadata.yaml | 3 + ceph-iscsi/mod/interface-ceph-client | 2 +- ceph-iscsi/mod/operator | 2 +- ceph-iscsi/mod/ops-openstack | 2 +- ceph-iscsi/src/charm.py | 46 +++++-- ceph-iscsi/src/interface_ceph_iscsi_peer.py | 4 +- ceph-iscsi/tests/bundles/focal.yaml | 6 +- 9 files changed, 185 insertions(+), 34 deletions(-) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index d10b2017..ab1562ab 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -1,21 +1,138 @@ -Ceph iSCSI Gateway charm -======================== +# Overview -To use, first pull in dependencies: +The charm provides the Ceph iSCSI gateway service. It is intended to be used +in conjunction with the ceph-osd and ceph-mon charms. + +> **Warning**: This charm is in a preview state for testing and should not + be used outside of the lab. + +# Usage + +## Deployment + +When deploying ceph-iscsi ensure that exactly two units of the charm are being +deployed, this will provide multiple data paths to clients. + +> **Note**: Deploying four units is also theoretical possible but has not + been tested. + +A sample `bundle.yaml` file's contents: + +```yaml + series: focal + applications: + ceph-iscsi: + charm: cs:ceph-iscsi + num_units: 2 + ceph-osd: + charm: cs:ceph-osd + num_units: 3 + storage: + osd-devices: /dev/vdb + options: + source: cloud:bionic-train + ceph-mon: + charm: cs:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: cloud:bionic-train + relations: + - - ceph-mon:client + - ceph-iscsi:ceph-client + - - ceph-osd:mon + - ceph-mon:osd +``` + +> **Important**: Make sure the designated block device passed to the ceph-osd + charms exists and is not currently in use. + +Deploy the bundle: + + juju deploy ./bundle.yaml + + +## Managing Targets + +The charm provides an action for creating a simple target. If more complex +managment of targets is requires then the `gwcli` tool should be used. `gwcli` +is available from the root account on the gateway nodes. ```bash -./charm-prep.sh + $ juju ssh ceph-iscsi/1 + $ sudo gwcli + /> ls ``` -To deploy with an example and test: +## Actions + +This section covers Juju [actions][juju-docs-actions] supported by the charm. +Actions allow specific operations to be performed on a per-unit basis. + +### create-target + +Run this action to create an iscsi target. ```bash -cd test -./deploy.sh -./01-setup-client-apt.sh -./02-setup-gw.sh -./03-setup-client-iscsi.sh + $ juju run-action ceph-iscsi/0 create-target \ + image-size=2G \ + image-name=bob \ + client-initiatorname=iqn.1993-08.org.debian:01:aaa2299be916 \ + client-username=usera \ + client-password=testpass + Action queued with id: "28" +``` + +If the iqn of the created target is returned in the ouput from the action: + +```bash + $ juju show-action-output 28 + UnitId: ceph-iscsi/0 + results: + iqn: iqn.2003-01.com.ubuntu.iscsi-gw:iscsi-igw + status: completed + timing: + completed: 2020-04-02 13:32:02 +0000 UTC + enqueued: 2020-04-02 13:18:42 +0000 UTC + started: 2020-04-02 13:18:45 +0000 UTC +``` + +### pause + +Pause the ceph-iscsi unit. This action will stop the rbd services. + +### resume + +Resume the ceph-iscsi unit. This action will start the rbd services if paused. + +## Network spaces + +This charm supports the use of Juju [network spaces][juju-docs-spaces] (Juju +`v.2.0`). This feature optionally allows specific types of the application's +network traffic to be bound to subnets that the underlying hardware is +connected to. + +> **Note**: Spaces must be configured in the backing cloud prior to deployment. + +The ceph-iscsi charm exposes the following traffic types (bindings): + +- 'public' (front-side) +- 'cluster' (back-side) + +For example, providing that spaces 'data-space' and 'cluster-space' exist, the +deploy command above could look like this: + + juju deploy --config ceph-iscsi.yaml -n 2 ceph-iscsi \ + --bind "public=data-space cluster=cluster-space" + +Alternatively, configuration can be provided as part of a bundle: + +```yaml + ceph-iscsi: + charm: cs:ceph-iscsi + num_units: 2 + bindings: + public: data-space + cluster: cluster-space ``` -To run the charm tests (tested on OpenStack provider): -tox -e func-smoke diff --git a/ceph-iscsi/actions.yaml b/ceph-iscsi/actions.yaml index abf6fd3e..17598675 100644 --- a/ceph-iscsi/actions.yaml +++ b/ceph-iscsi/actions.yaml @@ -11,20 +11,24 @@ resume: corresponding hacluster unit on the node must be resumed as well. security-checklist: description: Validate the running configuration against the OpenStack security guides checklist +add-trusted-ip: + description: "Add IP address that is permitted to talk to API" + params: + ips: + type: string + default: '' + description: "Space seperated list of trusted ips" create-target: description: "Create a new cache tier" params: gateway-units: type: string - default: writeback description: "Space seperated list of gateway units eg 'ceph-iscsi/0 ceph-scsi/1'" iqn: type: string - default: writeback description: "iSCSI Qualified Name" image-size: type: string - default: 1G description: "Target size" image-name: type: string @@ -32,7 +36,6 @@ create-target: description: "Image name " client-initiatorname: type: string - default: 1G description: "The initiator name of the client that will mount the target" client-username: type: string @@ -41,8 +44,6 @@ create-target: type: string description: "The CHAPs password to be created for the client" required: - - gateway-units - - iqn - image-size - image-name - client-initiatorname diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index 5c646604..c809f86c 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -12,6 +12,9 @@ tags: series: - focal subordinate: false +extra-bindings: + public: + cluster: requires: ceph-client: interface: ceph-client diff --git a/ceph-iscsi/mod/interface-ceph-client b/ceph-iscsi/mod/interface-ceph-client index cb3557ba..6a99f92a 160000 --- a/ceph-iscsi/mod/interface-ceph-client +++ b/ceph-iscsi/mod/interface-ceph-client @@ -1 +1 @@ -Subproject commit cb3557ba8aa2997936e19ed876d2b2b962a75868 +Subproject commit 6a99f92ae090aad224044d0862a3da78c7a04a55 diff --git a/ceph-iscsi/mod/operator b/ceph-iscsi/mod/operator index fabb0335..d259e091 160000 --- a/ceph-iscsi/mod/operator +++ b/ceph-iscsi/mod/operator @@ -1 +1 @@ -Subproject commit fabb0335c4d02c915dd93f754c38c78685ed54b6 +Subproject commit d259e0919fc19075b1e3636a5dd3c94ab81fd416 diff --git a/ceph-iscsi/mod/ops-openstack b/ceph-iscsi/mod/ops-openstack index f203311d..bef6f216 160000 --- a/ceph-iscsi/mod/ops-openstack +++ b/ceph-iscsi/mod/ops-openstack @@ -1 +1 @@ -Subproject commit f203311d4664fb68871b1d4a2367f6588fb1af29 +Subproject commit bef6f2161be12eeb3385aac113a738aecc85d807 diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index eb24f341..34e6745f 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -92,7 +92,7 @@ class CephISCSIGatewayCharmBase(ops_openstack.OSBaseCharm): "mgr", "allow r"] RESTART_MAP = { - '/etc/ceph/ceph.conf': ['rbd-target-api'], + '/etc/ceph/ceph.conf': ['rbd-target-api', 'rbd-target-gw'], '/etc/ceph/iscsi-gateway.cfg': ['rbd-target-api'], '/etc/ceph/ceph.client.ceph-iscsi.keyring': ['rbd-target-api']} @@ -104,6 +104,7 @@ def __init__(self, framework, key): logging.info("Using {} class".format(self.release)) self.state.set_default(target_created=False) self.state.set_default(enable_tls=False) + self.state.set_default(additional_trusted_ips=[]) self.ceph_client = interface_ceph_client.CephClientRequires( self, 'ceph-client') @@ -119,17 +120,28 @@ def __init__(self, framework, key): self.framework.observe(self.peers.on.has_peers, self) self.framework.observe(self.peers.on.ready_peers, self) self.framework.observe(self.on.create_target_action, self) + self.framework.observe(self.on.add_trusted_ip_action, self) self.framework.observe(self.on.certificates_relation_joined, self) self.framework.observe(self.on.certificates_relation_changed, self) + self.framework.observe(self.on.config_changed, self) + self.framework.observe(self.on.upgrade_charm, self) + def on_add_trusted_ip_action(self, event): + self.state.additional_trusted_ips.append(event.params['ips'].split(' ')) + logging.info(self.state.additional_trusted_ips) + def on_create_target_action(self, event): gw_client = gwcli_client.GatewayClient() - gw_client.create_target(event.params['iqn']) + target = event.params.get('iqn', self.DEFAULT_TARGET) + gateway_units = event.params.get( + 'gateway-units', + [u for u in self.peers.ready_peer_details.keys()]) + gw_client.create_target(target) for gw_unit, gw_config in self.peers.ready_peer_details.items(): added_gateways = [] - if gw_unit in event.params['gateway-units']: + if gw_unit in gateway_units: gw_client.add_gateway_to_target( - event.params['iqn'], + target, gw_config['ip'], gw_config['fqdn']) added_gateways.append(gw_unit) @@ -138,18 +150,19 @@ def on_create_target_action(self, event): event.params['image-name'], event.params['image-size']) gw_client.add_client_to_target( - event.params['iqn'], + target, event.params['client-initiatorname']) gw_client.add_client_auth( - event.params['iqn'], + target, event.params['client-initiatorname'], event.params['client-username'], event.params['client-password']) gw_client.add_disk_to_client( - event.params['iqn'], + target, event.params['client-initiatorname'], self.model.config['rbd-pool'], event.params['image-name']) + event.set_results({'iqn': target}) def setup_default_target(self): gw_client = gwcli_client.GatewayClient() @@ -173,7 +186,11 @@ def on_ready_peers(self, event): logging.info("Initial target setup already complete") return else: - self.setup_default_target() + # This appears to race and sometime runs before the + # peer is 100% ready. There is probably little value + # in this anyway so may just remove it. + # self.setup_default_target() + return def on_has_peers(self, event): logging.info("Unit has peers") @@ -191,6 +208,19 @@ def on_ceph_client_relation_joined(self, event): self.ceph_client.request_ceph_permissions( 'ceph-iscsi', self.CEPH_CAPABILITIES) + self.ceph_client.request_osd_settings({ + 'osd heartbeat grace': 20, + 'osd heartbeat interval': 5}) + + def on_config_changed(self, event): + if self.state.is_started: + self.on_pools_available(event) + self.on_ceph_client_relation_joined(event) + + def on_upgrade_charm(self, event): + if self.state.is_started: + self.on_pools_available(event) + self.on_ceph_client_relation_joined(event) def on_pools_available(self, event): logging.info("on_pools_available") diff --git a/ceph-iscsi/src/interface_ceph_iscsi_peer.py b/ceph-iscsi/src/interface_ceph_iscsi_peer.py index 87eae24f..ca156f36 100644 --- a/ceph-iscsi/src/interface_ceph_iscsi_peer.py +++ b/ceph-iscsi/src/interface_ceph_iscsi_peer.py @@ -6,7 +6,7 @@ from ops.framework import ( StoredState, EventBase, - EventsBase, + EventSetBase, EventSource, Object) @@ -19,7 +19,7 @@ class ReadyPeersEvent(EventBase): pass -class CephISCSIGatewayPeerEvents(EventsBase): +class CephISCSIGatewayPeerEvents(EventSetBase): has_peers = EventSource(HasPeersEvent) ready_peers = EventSource(ReadyPeersEvent) diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index a14902db..5390b860 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -10,7 +10,7 @@ applications: options: rbd-pool: tmbtil ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: cs:~gnuoy/ceph-osd-5 num_units: 3 storage: osd-devices: 'cinder,10G' @@ -18,7 +18,7 @@ applications: osd-devices: '/dev/test-non-existent' source: cloud:bionic-train ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: cs:~gnuoy/ceph-mon-6 num_units: 3 options: monitor-count: '3' @@ -26,7 +26,7 @@ applications: vault: num_units: 1 # charm: cs:~openstack-charmers-next/vault - charm: cs:~gnuoy/vault-28 + charm: cs:~gnuoy/vault-29 mysql: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 From ca8955fcdddc2b115f3f8e7c3023179e17daa41f Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 3 Apr 2020 14:08:33 +0000 Subject: [PATCH 1920/2699] Fix README and make distinction around metadata pool --- ceph-iscsi/README.md | 7 +++++++ ceph-iscsi/actions.yaml | 5 +++++ ceph-iscsi/config.yaml | 4 ++-- ceph-iscsi/src/charm.py | 4 ++-- ceph-iscsi/templates/iscsi-gateway.cfg | 2 +- ceph-iscsi/tests/bundles/focal.yaml | 2 +- 6 files changed, 18 insertions(+), 6 deletions(-) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index ab1562ab..ebff8a2c 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -77,6 +77,7 @@ Run this action to create an iscsi target. $ juju run-action ceph-iscsi/0 create-target \ image-size=2G \ image-name=bob \ + pool-name=superssd \ client-initiatorname=iqn.1993-08.org.debian:01:aaa2299be916 \ client-username=usera \ client-password=testpass @@ -136,3 +137,9 @@ Alternatively, configuration can be provided as part of a bundle: cluster: cluster-space ``` + + +[cg]: https://docs.openstack.org/charm-guide +[cdg]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide +[juju-docs-spaces]: https://jaas.ai/docs/spaces +[juju-docs-actions]: https://jaas.ai/docs/actions diff --git a/ceph-iscsi/actions.yaml b/ceph-iscsi/actions.yaml index 17598675..3f8091af 100644 --- a/ceph-iscsi/actions.yaml +++ b/ceph-iscsi/actions.yaml @@ -34,6 +34,10 @@ create-target: type: string default: disk_1 description: "Image name " + pool-name: + type: string + default: iscsi + description: "Name of ceph pool to use to back target " client-initiatorname: type: string description: "The initiator name of the client that will mount the target" @@ -44,6 +48,7 @@ create-target: type: string description: "The CHAPs password to be created for the client" required: + - pool-name - image-size - image-name - client-initiatorname diff --git a/ceph-iscsi/config.yaml b/ceph-iscsi/config.yaml index 291d5d13..e2416e77 100644 --- a/ceph-iscsi/config.yaml +++ b/ceph-iscsi/config.yaml @@ -35,11 +35,11 @@ options: 192.168.0.0/24). If multiple networks are to be used, a space-delimited list of a.b.c.d/x can be provided. - rbd-pool: + rbd-metadata-pool: type: string default: iscsi description: | - RBD pool to use for iscsi backend. + RBD pool to use to store gateway configuration. prefer-ipv6: type: boolean default: False diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 34e6745f..2c3897e2 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -160,7 +160,7 @@ def on_create_target_action(self, event): gw_client.add_disk_to_client( target, event.params['client-initiatorname'], - self.model.config['rbd-pool'], + self.model.config['pool-name'], event.params['image-name']) event.set_results({'iqn': target}) @@ -203,7 +203,7 @@ def on_has_peers(self, event): def on_ceph_client_relation_joined(self, event): logging.info("Requesting replicated pool") self.ceph_client.create_replicated_pool( - self.model.config['rbd-pool']) + self.model.config['rbd-metadata-pool']) logging.info("Requesting permissions") self.ceph_client.request_ceph_permissions( 'ceph-iscsi', diff --git a/ceph-iscsi/templates/iscsi-gateway.cfg b/ceph-iscsi/templates/iscsi-gateway.cfg index f439fa8d..3c068427 100644 --- a/ceph-iscsi/templates/iscsi-gateway.cfg +++ b/ceph-iscsi/templates/iscsi-gateway.cfg @@ -5,7 +5,7 @@ logger_level = DEBUG cluster_name = ceph cluster_client_name = client.ceph-iscsi -pool = {{ options.rbd_pool }} +pool = {{ options.rbd_metadata_pool }} # # # Place a copy of the ceph cluster's admin keyring in the gateway's /etc/ceph # # drectory and reference the filename here diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index 5390b860..19b72818 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -8,7 +8,7 @@ applications: series: focal num_units: 2 options: - rbd-pool: tmbtil + rbd-metadata-pool: tmbtil ceph-osd: charm: cs:~gnuoy/ceph-osd-5 num_units: 3 From 26363ae7ba80511c2707d2174651a841b28f5124 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 3 Apr 2020 15:16:07 +0000 Subject: [PATCH 1921/2699] Fix pool-name in action --- ceph-iscsi/src/charm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 2c3897e2..caf195ed 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -146,7 +146,7 @@ def on_create_target_action(self, event): gw_config['fqdn']) added_gateways.append(gw_unit) gw_client.create_pool( - self.model.config['rbd-pool'], + event.params['pool-name'], event.params['image-name'], event.params['image-size']) gw_client.add_client_to_target( @@ -160,7 +160,7 @@ def on_create_target_action(self, event): gw_client.add_disk_to_client( target, event.params['client-initiatorname'], - self.model.config['pool-name'], + event.params['pool-name'], event.params['image-name']) event.set_results({'iqn': target}) From 71d4a9640762f8e515623b7655a2c0ad491e2292 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 8 Apr 2020 07:54:02 +0000 Subject: [PATCH 1922/2699] Update readme --- ceph-iscsi/README.md | 72 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index ebff8a2c..eb6e5db9 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -137,9 +137,81 @@ Alternatively, configuration can be provided as part of a bundle: cluster: cluster-space ``` +# VMWare integration + +1. Create ceph pool if required. + + To create a new pool to back the iscsi targets run the create-pool action + from the ceph-mon charm. + +```bash + $ juju run-action ceph-mon/0 create-pool name=iscsi-targets + Action queued with id: "1" + $ juju show-action-output 1 + UnitId: ceph-mon/0 + results: + Stderr: | + pool 'iscsi-targets' created + set pool 2 size to 3 + set pool 2 target_size_ratio to 0.1 + enabled application 'unknown' on pool 'iscsi-targets' + set pool 2 pg_autoscale_mode to on + status: completed + timing: + completed: 2020-04-08 06:42:00 +0000 UTC + enqueued: 2020-04-08 06:41:38 +0000 UTC + started: 2020-04-08 06:41:42 +0000 UTC +``` + +2. Collect the Initiator name for adapter. + + From the VMWare admin UI select the `Adapters` tab in the Storage + context. Ensure `iSCSI enabled` is set to `Enabled`. + + Click 'Configure iSCSI' and take a note of the `iqn` name. + +4. Create iSCSI target. + + Run the action to create a target for VMWare to use. + +> **Note**: The username should be more than eight characters and the password + between twelve and sixteen characters. + +```bash + $ juju run-action ceph-iscsi/0 create-target \ + client-initiatorname="iqn.1998-01.com.vmware:node-caloric-02f98bac" \ + client-username=vmwareclient \ + client-password=12to16characters \ + image-size=10G \ + image-name=disk_1 \ + pool-name=iscsi-targets + $ juju show-action-output 2 + UnitId: ceph-iscsi/0 + results: + Stdout: | + Warning: Could not load preferences file /root/.gwcli/prefs.bin. + iqn: iqn.2003-01.com.ubuntu.iscsi-gw:iscsi-igw + status: completed + timing: + completed: 2020-04-08 06:58:34 +0000 UTC + enqueued: 2020-04-08 06:58:15 +0000 UTC + started: 2020-04-08 06:58:19 +0000 UTC +``` + +5. Add target to VMWare. + + Follow the [Ceph iSCSI Gateway][ceph-vmware] documentation to use the new + target. Use CHAP username and password provided to the `create-target` + action. + +> **Warning**: As of the time of writing the workaround to set the CHAP + credentials via the esx cli is still needed. + + [cg]: https://docs.openstack.org/charm-guide [cdg]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide [juju-docs-spaces]: https://jaas.ai/docs/spaces [juju-docs-actions]: https://jaas.ai/docs/actions +[ceph-vmware]: https://docs.ceph.com/docs/master/rbd/iscsi-initiator-esx/ From 80e1123d652d7a5c6d596eda0b8b27fa93265aa8 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 31 Mar 2020 15:40:41 +0200 Subject: [PATCH 1923/2699] Add support for Ceph's prometheus monitoring This adds support for Ceph's prometheus monitoring when Ceph itself has this support Change-Id: I621824c910b0540181d5aca194c6b98e1d3d62d6 Closes-Bug: #1789332 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/214 --- ceph-mon/README.md | 12 +++++ ceph-mon/hooks/ceph_hooks.py | 50 +++++++++++++++++++++ ceph-mon/hooks/prometheus-relation-changed | 1 + ceph-mon/hooks/prometheus-relation-departed | 1 + ceph-mon/hooks/prometheus-relation-joined | 1 + ceph-mon/hooks/utils.py | 27 ++++++++++- ceph-mon/metadata.yaml | 2 + ceph-mon/tests/bundles/bionic-queens.yaml | 5 +++ ceph-mon/tests/bundles/bionic-rocky.yaml | 5 +++ ceph-mon/tests/bundles/bionic-stein.yaml | 5 +++ ceph-mon/tests/bundles/bionic-train.yaml | 5 +++ ceph-mon/tests/bundles/xenial-queens.yaml | 5 +++ ceph-mon/tests/tests.yaml | 1 + ceph-mon/unit_tests/test_ceph_hooks.py | 5 ++- ceph-mon/unit_tests/test_ceph_utils.py | 18 ++++++++ 15 files changed, 141 insertions(+), 2 deletions(-) create mode 120000 ceph-mon/hooks/prometheus-relation-changed create mode 120000 ceph-mon/hooks/prometheus-relation-departed create mode 120000 ceph-mon/hooks/prometheus-relation-joined diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 83828f19..bb87aba7 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -73,6 +73,17 @@ implications of segregating Ceph network traffic. or `ceph-cluster-network` options will continue to honour them. Furthermore, these options override any space bindings, if set. +## Monitoring + +The charm supports Ceph metric monitoring with Prometheus. Add relations to the +[prometheus][prometheus-charm] application in this way: + + juju deploy cs:prometheus2 + juju add-relation ceph-mon prometheus2 + +> **Note**: Prometheus support is available starting with Ceph Luminous + (xenial-queens UCA pocket). + ## Actions This section lists Juju [actions][juju-docs-actions] supported by the charm. @@ -203,3 +214,4 @@ For general charm questions refer to the OpenStack [Charm Guide][cg]. [ceph-docs-monitors]: https://docs.ceph.com/docs/master/dev/mon-bootstrap [lp-bugs-charm-ceph-mon]: https://bugs.launchpad.net/charm-ceph-mon/+filebug [cdg-install-openstack]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/install-openstack.html +[prometheus-charm]: https://jaas.ai/prometheus2 \ No newline at end of file diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 627705fb..91a57a8e 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -33,6 +33,7 @@ DEBUG, ERROR, INFO, + WARNING, config, relation_ids, related_units, @@ -91,7 +92,9 @@ has_rbd_mirrors, get_ceph_osd_releases, execute_post_osd_upgrade_steps, + mgr_disable_module, mgr_enable_module, + is_mgr_module_enabled, ) from charmhelpers.contrib.charmsupport import nrpe @@ -416,6 +419,39 @@ def bootstrap_source_relation_changed(): mon_relation() +@hooks.hook('prometheus-relation-joined', + 'prometheus-relation-changed') +def prometheus_relation(relid=None, unit=None, prometheus_permitted=None, + module_enabled=None): + if not ceph.is_bootstrapped(): + return + if prometheus_permitted is None: + prometheus_permitted = cmp_pkgrevno('ceph', '12.2.0') >= 0 + if module_enabled is None: + module_enabled = (is_mgr_module_enabled('prometheus') or + mgr_enable_module('prometheus')) + log("checking if prometheus module is enabled") + if prometheus_permitted and module_enabled: + log("Updating prometheus") + addr = get_public_addr() + data = { + 'hostname': format_ipv6_addr(addr) or addr, + 'port': 9283, + } + relation_set(relation_id=relid, + relation_settings=data) + else: + log("Couldn't enable prometheus, but are related. " + "Prometheus is available in Ceph version: {} ; " + "Prometheus Module is enabled: {}".format( + prometheus_permitted, module_enabled), level=WARNING) + + +@hooks.hook('prometheus-relation-departed') +def prometheus_left(): + mgr_disable_module('prometheus') + + @hooks.hook('mon-relation-departed', 'mon-relation-changed', 'leader-settings-changed', @@ -499,11 +535,24 @@ def mon_relation(): notify_radosgws() notify_client() notify_rbd_mirrors() + notify_prometheus() else: log('Not enough mons ({}), punting.' .format(len(get_mon_hosts()))) +def notify_prometheus(): + if relation_ids('prometheus') and ceph.is_bootstrapped(): + prometheus_permitted = cmp_pkgrevno('ceph', '12.2.0') >= 0 + module_enabled = (is_mgr_module_enabled('prometheus') or + mgr_enable_module('prometheus')) + for relid in relation_ids('prometheus'): + for unit in related_units(relid): + prometheus_relation(relid=relid, unit=unit, + prometheus_permitted=prometheus_permitted, + module_enabled=module_enabled) + + def notify_osds(): for relid in relation_ids('osd'): for unit in related_units(relid): @@ -859,6 +908,7 @@ def upgrade_charm(): notify_client() notify_radosgws() notify_rbd_mirrors() + notify_prometheus() @hooks.hook('nrpe-external-master-relation-joined') diff --git a/ceph-mon/hooks/prometheus-relation-changed b/ceph-mon/hooks/prometheus-relation-changed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/prometheus-relation-changed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/prometheus-relation-departed b/ceph-mon/hooks/prometheus-relation-departed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/prometheus-relation-departed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/prometheus-relation-joined b/ceph-mon/hooks/prometheus-relation-joined new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/prometheus-relation-joined @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index a6451089..fda4ca0d 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -70,6 +70,17 @@ def enable_pocket(pocket): sources.write(line) +def is_mgr_module_enabled(module): + """Is a given manager module enabled. + + :param module: + :type module: str + :returns: Whether the named module is enabled + :rtype: bool + """ + return module in ceph.enabled_manager_modules() + + def mgr_enable_module(module): """Enable a Ceph Manager Module. @@ -78,12 +89,26 @@ def mgr_enable_module(module): :raises: subprocess.CalledProcessError """ - if module not in ceph.enabled_manager_modules(): + if not is_mgr_module_enabled(module): subprocess.check_call(['ceph', 'mgr', 'module', 'enable', module]) return True return False +def mgr_disable_module(module): + """Enable a Ceph Manager Module. + + :param module: The module name to enable + :type module: str + + :raises: subprocess.CalledProcessError + """ + if is_mgr_module_enabled(module): + subprocess.check_call(['ceph', 'mgr', 'module', 'disable', module]) + return True + return False + + @cached def get_unit_hostname(): return socket.gethostname() diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 59814225..08ff8ce8 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -39,6 +39,8 @@ provides: nrpe-external-master: interface: nrpe-external-master scope: container + prometheus: + interface: http requires: bootstrap-source: interface: ceph-bootstrap diff --git a/ceph-mon/tests/bundles/bionic-queens.yaml b/ceph-mon/tests/bundles/bionic-queens.yaml index 8bc49b32..0c4e275c 100644 --- a/ceph-mon/tests/bundles/bionic-queens.yaml +++ b/ceph-mon/tests/bundles/bionic-queens.yaml @@ -47,6 +47,9 @@ applications: expose: True charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 + prometheus2: + charm: cs:prometheus2 + num_units: 1 relations: - - nova-compute:amqp - rabbitmq-server:amqp @@ -88,3 +91,5 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - ceph-mon:prometheus + - prometheus2:target diff --git a/ceph-mon/tests/bundles/bionic-rocky.yaml b/ceph-mon/tests/bundles/bionic-rocky.yaml index 7e165bde..274d5131 100644 --- a/ceph-mon/tests/bundles/bionic-rocky.yaml +++ b/ceph-mon/tests/bundles/bionic-rocky.yaml @@ -61,6 +61,9 @@ applications: num_units: 1 options: openstack-origin: cloud:bionic-rocky + prometheus2: + charm: cs:prometheus2 + num_units: 1 relations: - - nova-compute:amqp - rabbitmq-server:amqp @@ -102,3 +105,5 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - ceph-mon:prometheus + - prometheus2:target diff --git a/ceph-mon/tests/bundles/bionic-stein.yaml b/ceph-mon/tests/bundles/bionic-stein.yaml index 56695dd7..d809bb2d 100644 --- a/ceph-mon/tests/bundles/bionic-stein.yaml +++ b/ceph-mon/tests/bundles/bionic-stein.yaml @@ -61,6 +61,9 @@ applications: num_units: 1 options: openstack-origin: cloud:bionic-stein + prometheus2: + charm: cs:prometheus2 + num_units: 1 relations: - - nova-compute:amqp - rabbitmq-server:amqp @@ -102,3 +105,5 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - ceph-mon:prometheus + - prometheus2:target diff --git a/ceph-mon/tests/bundles/bionic-train.yaml b/ceph-mon/tests/bundles/bionic-train.yaml index 1b270900..f3a9cba8 100644 --- a/ceph-mon/tests/bundles/bionic-train.yaml +++ b/ceph-mon/tests/bundles/bionic-train.yaml @@ -65,6 +65,9 @@ applications: num_units: 1 options: openstack-origin: cloud:bionic-train + prometheus2: + charm: cs:prometheus2 + num_units: 1 relations: - - nova-compute:amqp - rabbitmq-server:amqp @@ -112,3 +115,5 @@ relations: - keystone - - placement - nova-cloud-controller +- - ceph-mon:prometheus + - prometheus2:target diff --git a/ceph-mon/tests/bundles/xenial-queens.yaml b/ceph-mon/tests/bundles/xenial-queens.yaml index ab12215e..9c94e9bf 100644 --- a/ceph-mon/tests/bundles/xenial-queens.yaml +++ b/ceph-mon/tests/bundles/xenial-queens.yaml @@ -61,6 +61,9 @@ applications: num_units: 1 options: openstack-origin: cloud:xenial-queens + prometheus2: + charm: cs:prometheus2 + num_units: 1 relations: - - nova-compute:amqp - rabbitmq-server:amqp @@ -102,3 +105,5 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - ceph-mon:prometheus + - prometheus2:target diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 8a9b3886..aea60ae0 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -20,3 +20,4 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CephRelationTest - zaza.openstack.charm_tests.ceph.tests.CephTest - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest + - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest \ No newline at end of file diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 7d4c9dff..e9aa57bd 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -198,6 +198,7 @@ def test_nrpe_dependency_installed(self, mock_config): mocks["apt_install"].assert_called_once_with( ["python-dbus", "lockfile-progs"]) + @patch.object(ceph_hooks, 'notify_prometheus') @patch.object(ceph_hooks, 'notify_rbd_mirrors') @patch.object(ceph_hooks, 'service_pause') @patch.object(ceph_hooks, 'notify_radosgws') @@ -211,7 +212,8 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies( mock_ceph, mock_notify_radosgws, mock_service_pause, - mock_notify_rbd_mirrors): + mock_notify_rbd_mirrors, + mock_notify_prometheus): config = copy.deepcopy(CHARM_CONFIG) mock_config.side_effect = lambda key: config[key] with patch.multiple( @@ -232,6 +234,7 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies( mock_notify_client.assert_called_once_with() mock_notify_radosgws.assert_called_once_with() mock_ceph.update_monfs.assert_called_once_with() + mock_notify_prometheus.assert_called_once_with() mock_service_pause.assert_called_with('ceph-create-keys') @patch.object(ceph_hooks, 'mds_relation_joined') diff --git a/ceph-mon/unit_tests/test_ceph_utils.py b/ceph-mon/unit_tests/test_ceph_utils.py index 3e8d7a72..1be5ea0b 100644 --- a/ceph-mon/unit_tests/test_ceph_utils.py +++ b/ceph-mon/unit_tests/test_ceph_utils.py @@ -39,6 +39,24 @@ def test_has_rbd_mirrors(self, _relation_ids, _related_units): _relation_ids.assert_called_once_with('rbd-mirror') _related_units.assert_called_once_with('arelid') + @mock.patch.object(utils.ceph, 'enabled_manager_modules') + def test_mgr_module_enabled(self, _enabled_modules): + _enabled_modules.return_value = [] + self.assertFalse(utils.is_mgr_module_enabled('test-module')) + + @mock.patch.object(utils.ceph, 'enabled_manager_modules') + def test_mgr_module__is_enabled(self, _enabled_modules): + _enabled_modules.return_value = ['test-module'] + self.assertTrue(utils.is_mgr_module_enabled('test-module')) + + @mock.patch.object(utils.ceph, 'enabled_manager_modules') + @mock.patch.object(utils.subprocess, 'check_call') + def test_mgr_disable_module(self, _call, _enabled_modules): + _enabled_modules.return_value = ['test-module'] + utils.mgr_disable_module('test-module') + _call.assert_called_once_with( + ['ceph', 'mgr', 'module', 'disable', 'test-module']) + @mock.patch.object(utils.ceph, 'enabled_manager_modules') @mock.patch.object(utils.subprocess, 'check_call') def test_mgr_enable_module(self, _call, _enabled_modules): From 815f1ad4050c9d8792cff2229470d1cd4f5baa81 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 14 Apr 2020 08:39:13 +0000 Subject: [PATCH 1924/2699] Unit check and test colocation with osds --- ceph-iscsi/src/charm.py | 11 ++++++ ceph-iscsi/src/interface_ceph_iscsi_peer.py | 9 +++++ ceph-iscsi/tests/bundles/focal.yaml | 37 ++++++++++++++++++--- 3 files changed, 53 insertions(+), 4 deletions(-) diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index caf195ed..894b9968 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -14,6 +14,7 @@ StoredState, ) from ops.main import main +import ops.model import charmhelpers.core.host as ch_host import charmhelpers.core.templating as ch_templating import interface_ceph_client @@ -98,6 +99,9 @@ class CephISCSIGatewayCharmBase(ops_openstack.OSBaseCharm): DEFAULT_TARGET = "iqn.2003-01.com.ubuntu.iscsi-gw:iscsi-igw" REQUIRED_RELATIONS = ['ceph-client', 'cluster'] + # Two has been tested before is probably fine too but needs + # validating + ALLOWED_UNIT_COUNTS = [2] def __init__(self, framework, key): super().__init__(framework, key) @@ -287,6 +291,13 @@ def on_certificates_relation_changed(self, event): self.state.enable_tls = True self.on_pools_available(event) + def custom_status_check(self): + if self.peers.unit_count not in self.ALLOWED_UNIT_COUNTS: + self.unit.status = ops.model.BlockedStatus( + '{} is an invalid unit count'.format(self.peers.unit_count)) + return False + return True + @ops_openstack.charm_class class CephISCSIGatewayCharmJewel(CephISCSIGatewayCharmBase): diff --git a/ceph-iscsi/src/interface_ceph_iscsi_peer.py b/ceph-iscsi/src/interface_ceph_iscsi_peer.py index ca156f36..0d2067c1 100644 --- a/ceph-iscsi/src/interface_ceph_iscsi_peer.py +++ b/ceph-iscsi/src/interface_ceph_iscsi_peer.py @@ -100,3 +100,12 @@ def peer_addresses(self): for u in self.peer_rel.units: addresses.append(self.peer_rel.data[u]['ingress-address']) return sorted(addresses) + + @property + def peer_count(self): + return len(self.peer_rel.units) + + @property + def unit_count(self): + return len(self.peer_rel.units) + 1 + diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index 19b72818..2d0cac8b 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -1,14 +1,31 @@ -series: bionic +series: focal +machines: + '0': + constraints: mem=3072M + '1': + '2': + '3': + '4': + '5': + '6': + series: bionic + '7': + series: bionic + '8': applications: ubuntu: charm: cs:ubuntu num_units: 1 + to: + - '8' ceph-iscsi: charm: ../../ - series: focal num_units: 2 options: rbd-metadata-pool: tmbtil + to: + - '0' + - '1' ceph-osd: charm: cs:~gnuoy/ceph-osd-5 num_units: 3 @@ -16,23 +33,35 @@ applications: osd-devices: 'cinder,10G' options: osd-devices: '/dev/test-non-existent' - source: cloud:bionic-train + to: + - '0' + - '1' + - '2' ceph-mon: charm: cs:~gnuoy/ceph-mon-6 num_units: 3 options: monitor-count: '3' - source: cloud:bionic-train + to: + - '3' + - '4' + - '5' vault: + series: bionic num_units: 1 # charm: cs:~openstack-charmers-next/vault charm: cs:~gnuoy/vault-29 + to: + - '6' mysql: + series: bionic charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 options: innodb-buffer-pool-size: 256M max-connections: 1000 + to: + - '7' relations: - - ceph-mon:client - ceph-iscsi:ceph-client From d963d5d4fac1c39b0cd66a5c4c7d62dd8f38b5e0 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 14 Apr 2020 08:40:23 +0000 Subject: [PATCH 1925/2699] Bump dependancies --- ceph-iscsi/mod/interface-ceph-client | 2 +- ceph-iscsi/mod/operator | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-iscsi/mod/interface-ceph-client b/ceph-iscsi/mod/interface-ceph-client index 6a99f92a..9b05a712 160000 --- a/ceph-iscsi/mod/interface-ceph-client +++ b/ceph-iscsi/mod/interface-ceph-client @@ -1 +1 @@ -Subproject commit 6a99f92ae090aad224044d0862a3da78c7a04a55 +Subproject commit 9b05a71226ba5b11203871a75e92a9c29d8cbfe6 diff --git a/ceph-iscsi/mod/operator b/ceph-iscsi/mod/operator index d259e091..ac86de84 160000 --- a/ceph-iscsi/mod/operator +++ b/ceph-iscsi/mod/operator @@ -1 +1 @@ -Subproject commit d259e0919fc19075b1e3636a5dd3c94ab81fd416 +Subproject commit ac86de84fb60c45bfc974b6e3b7212af2e473974 From a36c17f874f0b4a34ca64af71487c173cf8d3f18 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 14 Apr 2020 09:03:49 +0000 Subject: [PATCH 1926/2699] Bump ops-openstack --- ceph-iscsi/mod/ops-openstack | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-iscsi/mod/ops-openstack b/ceph-iscsi/mod/ops-openstack index bef6f216..58c9f309 160000 --- a/ceph-iscsi/mod/ops-openstack +++ b/ceph-iscsi/mod/ops-openstack @@ -1 +1 @@ -Subproject commit bef6f2161be12eeb3385aac113a738aecc85d807 +Subproject commit 58c9f3093ecc8c1797272d7a6720d7f8ef2d39aa From 88cf8697ad78881d5a997e6d87adedbfed32f6b2 Mon Sep 17 00:00:00 2001 From: Dmitrii Shcherbakov Date: Mon, 13 Apr 2020 18:00:16 +0300 Subject: [PATCH 1927/2699] Add unit tests for CephISCSIGatewayPeers Also: EventSetBase got renamed to ObjectEvents in the framework. --- ceph-iscsi/mod/interface-ceph-client | 2 +- ceph-iscsi/mod/operator | 2 +- ceph-iscsi/src/interface_ceph_iscsi_peer.py | 5 +- ceph-iscsi/unit_tests/__init__.py | 0 .../test_interface_ceph_iscsi_peer.py | 168 ++++++++++++++++++ 5 files changed, 172 insertions(+), 5 deletions(-) create mode 100644 ceph-iscsi/unit_tests/__init__.py create mode 100644 ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py diff --git a/ceph-iscsi/mod/interface-ceph-client b/ceph-iscsi/mod/interface-ceph-client index 9b05a712..0a3b553f 160000 --- a/ceph-iscsi/mod/interface-ceph-client +++ b/ceph-iscsi/mod/interface-ceph-client @@ -1 +1 @@ -Subproject commit 9b05a71226ba5b11203871a75e92a9c29d8cbfe6 +Subproject commit 0a3b553fe8e3d3eadcddefee09486213a2f65544 diff --git a/ceph-iscsi/mod/operator b/ceph-iscsi/mod/operator index ac86de84..04cb3479 160000 --- a/ceph-iscsi/mod/operator +++ b/ceph-iscsi/mod/operator @@ -1 +1 @@ -Subproject commit ac86de84fb60c45bfc974b6e3b7212af2e473974 +Subproject commit 04cb347938b87776638fdeaa48f6c5c0f115346d diff --git a/ceph-iscsi/src/interface_ceph_iscsi_peer.py b/ceph-iscsi/src/interface_ceph_iscsi_peer.py index 0d2067c1..c82df588 100644 --- a/ceph-iscsi/src/interface_ceph_iscsi_peer.py +++ b/ceph-iscsi/src/interface_ceph_iscsi_peer.py @@ -6,7 +6,7 @@ from ops.framework import ( StoredState, EventBase, - EventSetBase, + ObjectEvents, EventSource, Object) @@ -19,7 +19,7 @@ class ReadyPeersEvent(EventBase): pass -class CephISCSIGatewayPeerEvents(EventSetBase): +class CephISCSIGatewayPeerEvents(ObjectEvents): has_peers = EventSource(HasPeersEvent) ready_peers = EventSource(ReadyPeersEvent) @@ -108,4 +108,3 @@ def peer_count(self): @property def unit_count(self): return len(self.peer_rel.units) + 1 - diff --git a/ceph-iscsi/unit_tests/__init__.py b/ceph-iscsi/unit_tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py b/ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py new file mode 100644 index 00000000..12f27704 --- /dev/null +++ b/ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python3 + +import unittest +import sys + +sys.path.append('lib') # noqa +sys.path.append('src') # noqa + +import interface_ceph_iscsi_peer + +from unittest import mock +from mock import PropertyMock + +from ops import framework +from ops.testing import Harness +from ops.charm import CharmBase + +from interface_ceph_iscsi_peer import CephISCSIGatewayPeers, ReadyPeersEvent + + +class TestCephISCSIGatewayPeers(unittest.TestCase): + + def setUp(self): + self.harness = Harness(CharmBase, meta=''' + name: ceph-iscsi + peers: + cluster: + interface: ceph-iscsi-peer + ''') + + @mock.patch.object(CephISCSIGatewayPeers, 'cluster_bind_address', + new_callable=PropertyMock) + @mock.patch('socket.getfqdn') + def test_on_changed(self, _getfqdn, _cluster_bind_address): + our_fqdn = 'ceph-iscsi-0.example' + _getfqdn.return_value = our_fqdn + # TODO: Replace this with calls to the test harness once + # https://github.com/canonical/operator/issues/222 is fixed. + _cluster_bind_address.return_value = '192.0.2.1' + + class TestReceiver(framework.Object): + + def __init__(self, parent, key): + super().__init__(parent, key) + self.observed_events = [] + + def on_ready_peers(self, event): + self.observed_events.append(event) + + self.harness.begin() + self.peers = CephISCSIGatewayPeers(self.harness.charm, 'cluster') + + receiver = TestReceiver(self.harness.framework, 'receiver') + self.harness.framework.observe(self.peers.on.ready_peers, + receiver) + relation_id = self.harness.add_relation('cluster', 'ceph-iscsi') + self.harness.add_relation_unit( + relation_id, + 'ceph-iscsi/1', + { + 'ingress-address': '192.0.2.2', + 'gateway_ready': 'True', + 'gateway_fqdn': 'ceph-iscsi-1.example' + } + ) + self.assertEqual(len(receiver.observed_events), 1) + self.assertIsInstance(receiver.observed_events[0], + ReadyPeersEvent) + + def test_set_admin_password(self): + self.harness.set_leader() + self.harness.begin() + self.peers = CephISCSIGatewayPeers(self.harness.charm, 'cluster') + self.harness.add_relation('cluster', 'ceph-iscsi') + + self.peers.set_admin_password('s3cr3t') + rel_data = self.harness.charm.model.get_relation('cluster').data + our_app = self.harness.charm.app + self.assertEqual(rel_data[our_app]['admin_password'], 's3cr3t') + + @mock.patch('socket.getfqdn') + def test_announce_ready(self, _getfqdn): + our_fqdn = 'ceph-iscsi-0.example' + _getfqdn.return_value = our_fqdn + self.harness.begin() + self.peers = CephISCSIGatewayPeers(self.harness.charm, 'cluster') + self.harness.add_relation('cluster', 'ceph-iscsi') + + self.peers.announce_ready() + rel_data = self.harness.charm.model.get_relation('cluster').data + our_unit = self.harness.charm.unit + self.assertEqual(rel_data[our_unit]['gateway_fqdn'], our_fqdn) + self.assertEqual(rel_data[our_unit]['gateway_ready'], 'True') + + @mock.patch.object(CephISCSIGatewayPeers, 'cluster_bind_address', + new_callable=PropertyMock) + @mock.patch('socket.getfqdn') + def test_ready_peer_details(self, _getfqdn, _cluster_bind_address): + _getfqdn.return_value = 'ceph-iscsi-0.example' + # TODO: Replace this with calls to the test harness once + # https://github.com/canonical/operator/issues/222 is fixed. + _cluster_bind_address.return_value = '192.0.2.1' + + self.harness.begin() + self.peers = CephISCSIGatewayPeers(self.harness.charm, 'cluster') + relation_id = self.harness.add_relation('cluster', 'ceph-iscsi') + + self.harness.add_relation_unit( + relation_id, + 'ceph-iscsi/1', + { + 'ingress-address': '192.0.2.2', + 'gateway_ready': 'True', + 'gateway_fqdn': 'ceph-iscsi-1.example' + } + ) + self.harness.add_relation_unit( + relation_id, + 'ceph-iscsi/2', + { + 'ingress-address': '192.0.2.3', + 'gateway_ready': 'True', + 'gateway_fqdn': 'ceph-iscsi-2.example', + } + ) + self.harness.add_relation_unit( + relation_id, + 'ceph-iscsi/3', + {'ingress-address': '192.0.2.4'} + ) + + self.peers.ready_peer_details + + @mock.patch.object(interface_ceph_iscsi_peer.CephISCSIGatewayPeers, + 'cluster_bind_address', new_callable=PropertyMock) + def test_ready_peer_addresses(self, _cluster_bind_address): + # TODO: Replace this with calls to the test harness once + # https://github.com/canonical/operator/issues/222 is fixed. + _cluster_bind_address.return_value = '192.0.2.1' + + self.harness.begin() + self.peers = CephISCSIGatewayPeers(self.harness.charm, 'cluster') + relation_id = self.harness.add_relation('cluster', 'ceph-iscsi') + + self.harness.add_relation_unit( + relation_id, + 'ceph-iscsi/1', + { + 'ingress-address': '192.0.2.2', + 'gateway_ready': 'True', + 'gateway_fqdn': 'ceph-iscsi-1.example' + } + ) + self.harness.add_relation_unit( + relation_id, + 'ceph-iscsi/2', + { + 'ingress-address': '192.0.2.3', + 'gateway_ready': 'True', + 'gateway_fqdn': 'ceph-iscsi-2.example', + } + ) + self.assertEqual(['192.0.2.1', '192.0.2.2', '192.0.2.3'], + self.peers.peer_addresses) + + +if __name__ == '__main__': + unittest.main() From 596a79cb5e506b0820e8b0735a6026f99ee4e4ec Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 21 Apr 2020 13:33:13 +0000 Subject: [PATCH 1928/2699] bump mod/interface-ceph-client --- ceph-iscsi/mod/interface-ceph-client | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-iscsi/mod/interface-ceph-client b/ceph-iscsi/mod/interface-ceph-client index 9b05a712..0a3b553f 160000 --- a/ceph-iscsi/mod/interface-ceph-client +++ b/ceph-iscsi/mod/interface-ceph-client @@ -1 +1 @@ -Subproject commit 9b05a71226ba5b11203871a75e92a9c29d8cbfe6 +Subproject commit 0a3b553fe8e3d3eadcddefee09486213a2f65544 From 9915f63bdeb0330da0d203d6f9f10b19f67d93f1 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 21 Apr 2020 14:10:30 +0000 Subject: [PATCH 1929/2699] Bump interface-ceph-client --- ceph-iscsi/mod/interface-ceph-client | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-iscsi/mod/interface-ceph-client b/ceph-iscsi/mod/interface-ceph-client index 0a3b553f..4f84bcad 160000 --- a/ceph-iscsi/mod/interface-ceph-client +++ b/ceph-iscsi/mod/interface-ceph-client @@ -1 +1 @@ -Subproject commit 0a3b553fe8e3d3eadcddefee09486213a2f65544 +Subproject commit 4f84bcad2d4b3ea415b5eccc850c85b9f4fc172e From ab4ff2af5cedb6499fb621fb7240e11a0d3bad59 Mon Sep 17 00:00:00 2001 From: Andrew McLeod Date: Mon, 20 Apr 2020 12:14:19 +0200 Subject: [PATCH 1930/2699] Handle new sysctl exitcode in focal (0 -> 255) ch-sync for focal sysctl modification: When sysctl is run in a container before focal the exitcode is 0. In focal, the exitcode is 255 Change-Id: I4cc337d538ca5d2c2e2da488013101bb82074721 Closes-Bug: 1873420 --- ceph-mon/charm-helpers-hooks.yaml | 1 + .../contrib/hahelpers/__init__.py | 13 + .../charmhelpers/contrib/hahelpers/apache.py | 86 ++++ .../charmhelpers/contrib/hahelpers/cluster.py | 451 ++++++++++++++++++ .../charmhelpers/contrib/openstack/utils.py | 306 +++++++++++- .../contrib/openstack/vaultlocker.py | 13 +- .../contrib/storage/linux/ceph.py | 147 +++++- .../contrib/storage/linux/loopback.py | 8 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 46 +- ceph-mon/hooks/charmhelpers/core/sysctl.py | 14 +- 10 files changed, 1062 insertions(+), 23 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hahelpers/__init__.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hahelpers/apache.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index 54f52290..df1e68a5 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -9,6 +9,7 @@ include: - payload.execd - contrib.openstack - contrib.network.ip + - contrib.hahelpers - contrib.openstack: - alternatives - audits diff --git a/ceph-mon/hooks/charmhelpers/contrib/hahelpers/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hahelpers/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hahelpers/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-mon/hooks/charmhelpers/contrib/hahelpers/apache.py new file mode 100644 index 00000000..2c1e371e --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -0,0 +1,86 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import os + +from charmhelpers.core import host +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + relation_ids, + related_units as relation_list, + log, + INFO, +) + + +def get_cert(cn=None): + # TODO: deal with multiple https endpoints via charm config + cert = config_get('ssl_cert') + key = config_get('ssl_key') + if not (cert and key): + log("Inspecting identity-service relations for SSL certificate.", + level=INFO) + cert = key = None + if cn: + ssl_cert_attr = 'ssl_cert_{}'.format(cn) + ssl_key_attr = 'ssl_key_{}'.format(cn) + else: + ssl_cert_attr = 'ssl_cert' + ssl_key_attr = 'ssl_key' + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not cert: + cert = relation_get(ssl_cert_attr, + rid=r_id, unit=unit) + if not key: + key = relation_get(ssl_key_attr, + rid=r_id, unit=unit) + return (cert, key) + + +def get_ca_cert(): + ca_cert = config_get('ssl_ca') + if ca_cert is None: + log("Inspecting identity-service relations for CA SSL certificate.", + level=INFO) + for r_id in (relation_ids('identity-service') + + relation_ids('identity-credentials')): + for unit in relation_list(r_id): + if ca_cert is None: + ca_cert = relation_get('ca_cert', + rid=r_id, unit=unit) + return ca_cert + + +def retrieve_ca_cert(cert_file): + cert = None + if os.path.isfile(cert_file): + with open(cert_file, 'rb') as crt: + cert = crt.read() + return cert + + +def install_ca_cert(ca_cert): + host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert') diff --git a/ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py new file mode 100644 index 00000000..ba34fba0 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -0,0 +1,451 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# Adam Gandelman +# + +""" +Helpers for clustering and determining "cluster leadership" and other +clustering-related helpers. +""" + +import functools +import subprocess +import os +import time + +from socket import gethostname as get_unit_hostname + +import six + +from charmhelpers.core.hookenv import ( + log, + relation_ids, + related_units as relation_list, + relation_get, + config as config_get, + INFO, + DEBUG, + WARNING, + unit_get, + is_leader as juju_is_leader, + status_set, +) +from charmhelpers.core.host import ( + modulo_distribution, +) +from charmhelpers.core.decorators import ( + retry_on_exception, +) +from charmhelpers.core.strutils import ( + bool_from_string, +) + +DC_RESOURCE_NAME = 'DC' + + +class HAIncompleteConfig(Exception): + pass + + +class HAIncorrectConfig(Exception): + pass + + +class CRMResourceNotFound(Exception): + pass + + +class CRMDCNotFound(Exception): + pass + + +def is_elected_leader(resource): + """ + Returns True if the charm executing this is the elected cluster leader. + + It relies on two mechanisms to determine leadership: + 1. If juju is sufficiently new and leadership election is supported, + the is_leader command will be used. + 2. If the charm is part of a corosync cluster, call corosync to + determine leadership. + 3. If the charm is not part of a corosync cluster, the leader is + determined as being "the alive unit with the lowest unit numer". In + other words, the oldest surviving unit. + """ + try: + return juju_is_leader() + except NotImplementedError: + log('Juju leadership election feature not enabled' + ', using fallback support', + level=WARNING) + + if is_clustered(): + if not is_crm_leader(resource): + log('Deferring action to CRM leader.', level=INFO) + return False + else: + peers = peer_units() + if peers and not oldest_peer(peers): + log('Deferring action to oldest service unit.', level=INFO) + return False + return True + + +def is_clustered(): + for r_id in (relation_ids('ha') or []): + for unit in (relation_list(r_id) or []): + clustered = relation_get('clustered', + rid=r_id, + unit=unit) + if clustered: + return True + return False + + +def is_crm_dc(): + """ + Determine leadership by querying the pacemaker Designated Controller + """ + cmd = ['crm', 'status'] + try: + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") + except subprocess.CalledProcessError as ex: + raise CRMDCNotFound(str(ex)) + + current_dc = '' + for line in status.split('\n'): + if line.startswith('Current DC'): + # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum + current_dc = line.split(':')[1].split()[0] + if current_dc == get_unit_hostname(): + return True + elif current_dc == 'NONE': + raise CRMDCNotFound('Current DC: NONE') + + return False + + +@retry_on_exception(5, base_delay=2, + exc_type=(CRMResourceNotFound, CRMDCNotFound)) +def is_crm_leader(resource, retry=False): + """ + Returns True if the charm calling this is the elected corosync leader, + as returned by calling the external "crm" command. + + We allow this operation to be retried to avoid the possibility of getting a + false negative. See LP #1396246 for more info. + """ + if resource == DC_RESOURCE_NAME: + return is_crm_dc() + cmd = ['crm', 'resource', 'show', resource] + try: + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") + except subprocess.CalledProcessError: + status = None + + if status and get_unit_hostname() in status: + return True + + if status and "resource %s is NOT running" % (resource) in status: + raise CRMResourceNotFound("CRM resource %s not found" % (resource)) + + return False + + +def is_leader(resource): + log("is_leader is deprecated. Please consider using is_crm_leader " + "instead.", level=WARNING) + return is_crm_leader(resource) + + +def peer_units(peer_relation="cluster"): + peers = [] + for r_id in (relation_ids(peer_relation) or []): + for unit in (relation_list(r_id) or []): + peers.append(unit) + return peers + + +def peer_ips(peer_relation='cluster', addr_key='private-address'): + '''Return a dict of peers and their private-address''' + peers = {} + for r_id in relation_ids(peer_relation): + for unit in relation_list(r_id): + peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) + return peers + + +def oldest_peer(peers): + """Determines who the oldest peer is by comparing unit numbers.""" + local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) + for peer in peers: + remote_unit_no = int(peer.split('/')[1]) + if remote_unit_no < local_unit_no: + return False + return True + + +def eligible_leader(resource): + log("eligible_leader is deprecated. Please consider using " + "is_elected_leader instead.", level=WARNING) + return is_elected_leader(resource) + + +def https(): + ''' + Determines whether enough data has been provided in configuration + or relation data to configure HTTPS + . + returns: boolean + ''' + use_https = config_get('use-https') + if use_https and bool_from_string(use_https): + return True + if config_get('ssl_cert') and config_get('ssl_key'): + return True + for r_id in relation_ids('certificates'): + for unit in relation_list(r_id): + ca = relation_get('ca', rid=r_id, unit=unit) + if ca: + return True + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN + rel_state = [ + relation_get('https_keystone', rid=r_id, unit=unit), + relation_get('ca_cert', rid=r_id, unit=unit), + ] + # NOTE: works around (LP: #1203241) + if (None not in rel_state) and ('' not in rel_state): + return True + return False + + +def determine_api_port(public_port, singlenode_mode=False): + ''' + Determine correct API server listening port based on + existence of HTTPS reverse proxy and/or haproxy. + + public_port: int: standard public port for given service + + singlenode_mode: boolean: Shuffle ports when only a single unit is present + + returns: int: the correct listening port for the API service + ''' + i = 0 + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): + i += 1 + if https(): + i += 1 + return public_port - (i * 10) + + +def determine_apache_port(public_port, singlenode_mode=False): + ''' + Description: Determine correct apache listening port based on public IP + + state of the cluster. + + public_port: int: standard public port for given service + + singlenode_mode: boolean: Shuffle ports when only a single unit is present + + returns: int: the correct listening port for the HAProxy service + ''' + i = 0 + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): + i += 1 + return public_port - (i * 10) + + +determine_apache_port_single = functools.partial( + determine_apache_port, singlenode_mode=True) + + +def get_hacluster_config(exclude_keys=None): + ''' + Obtains all relevant configuration from charm configuration required + for initiating a relation to hacluster: + + ha-bindiface, ha-mcastport, vip, os-internal-hostname, + os-admin-hostname, os-public-hostname, os-access-hostname + + param: exclude_keys: list of setting key(s) to be excluded. + returns: dict: A dict containing settings keyed by setting name. + raises: HAIncompleteConfig if settings are missing or incorrect. + ''' + settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname', + 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname'] + conf = {} + for setting in settings: + if exclude_keys and setting in exclude_keys: + continue + + conf[setting] = config_get(setting) + + if not valid_hacluster_config(): + raise HAIncorrectConfig('Insufficient or incorrect config data to ' + 'configure hacluster.') + return conf + + +def valid_hacluster_config(): + ''' + Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname + must be set. + + Note: ha-bindiface and ha-macastport both have defaults and will always + be set. We only care that either vip or dns-ha is set. + + :returns: boolean: valid config returns true. + raises: HAIncompatibileConfig if settings conflict. + raises: HAIncompleteConfig if settings are missing. + ''' + vip = config_get('vip') + dns = config_get('dns-ha') + if not(bool(vip) ^ bool(dns)): + msg = ('HA: Either vip or dns-ha must be set but not both in order to ' + 'use high availability') + status_set('blocked', msg) + raise HAIncorrectConfig(msg) + + # If dns-ha then one of os-*-hostname must be set + if dns: + dns_settings = ['os-internal-hostname', 'os-admin-hostname', + 'os-public-hostname', 'os-access-hostname'] + # At this point it is unknown if one or all of the possible + # network spaces are in HA. Validate at least one is set which is + # the minimum required. + for setting in dns_settings: + if config_get(setting): + log('DNS HA: At least one hostname is set {}: {}' + ''.format(setting, config_get(setting)), + level=DEBUG) + return True + + msg = ('DNS HA: At least one os-*-hostname(s) must be set to use ' + 'DNS HA') + status_set('blocked', msg) + raise HAIncompleteConfig(msg) + + log('VIP HA: VIP is set {}'.format(vip), level=DEBUG) + return True + + +def canonical_url(configs, vip_setting='vip'): + ''' + Returns the correct HTTP URL to this host given the state of HTTPS + configuration and hacluster. + + :configs : OSTemplateRenderer: A config tempating object to inspect for + a complete https context. + + :vip_setting: str: Setting in charm config that specifies + VIP address. + ''' + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + if is_clustered(): + addr = config_get(vip_setting) + else: + addr = unit_get('private-address') + return '%s://%s' % (scheme, addr) + + +def distributed_wait(modulo=None, wait=None, operation_name='operation'): + ''' Distribute operations by waiting based on modulo_distribution + + If modulo and or wait are not set, check config_get for those values. + If config values are not set, default to modulo=3 and wait=30. + + :param modulo: int The modulo number creates the group distribution + :param wait: int The constant time wait value + :param operation_name: string Operation name for status message + i.e. 'restart' + :side effect: Calls config_get() + :side effect: Calls log() + :side effect: Calls status_set() + :side effect: Calls time.sleep() + ''' + if modulo is None: + modulo = config_get('modulo-nodes') or 3 + if wait is None: + wait = config_get('known-wait') or 30 + if juju_is_leader(): + # The leader should never wait + calculated_wait = 0 + else: + # non_zero_wait=True guarantees the non-leader who gets modulo 0 + # will still wait + calculated_wait = modulo_distribution(modulo=modulo, wait=wait, + non_zero_wait=True) + msg = "Waiting {} seconds for {} ...".format(calculated_wait, + operation_name) + log(msg, DEBUG) + status_set('maintenance', msg) + time.sleep(calculated_wait) + + +def get_managed_services_and_ports(services, external_ports, + external_services=None, + port_conv_f=determine_apache_port_single): + """Get the services and ports managed by this charm. + + Return only the services and corresponding ports that are managed by this + charm. This excludes haproxy when there is a relation with hacluster. This + is because this charm passes responsability for stopping and starting + haproxy to hacluster. + + Similarly, if a relation with hacluster exists then the ports returned by + this method correspond to those managed by the apache server rather than + haproxy. + + :param services: List of services. + :type services: List[str] + :param external_ports: List of ports managed by external services. + :type external_ports: List[int] + :param external_services: List of services to be removed if ha relation is + present. + :type external_services: List[str] + :param port_conv_f: Function to apply to ports to calculate the ports + managed by services controlled by this charm. + :type port_convert_func: f() + :returns: A tuple containing a list of services first followed by a list of + ports. + :rtype: Tuple[List[str], List[int]] + """ + if external_services is None: + external_services = ['haproxy'] + if relation_ids('ha'): + for svc in external_services: + try: + services.remove(svc) + except ValueError: + pass + external_ports = [port_conv_f(p) for p in external_ports] + return services, external_ports diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 5c8f6eff..1cf751c0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -13,7 +13,7 @@ # limitations under the License. # Common python helper functions used for OpenStack charms. -from collections import OrderedDict +from collections import OrderedDict, namedtuple from functools import wraps import subprocess @@ -36,15 +36,20 @@ from charmhelpers.core import unitdata from charmhelpers.core.hookenv import ( + WL_STATES, action_fail, action_set, config, + expected_peer_units, + expected_related_units, log as juju_log, charm_dir, INFO, ERROR, + metadata, related_units, relation_get, + relation_id, relation_ids, relation_set, status_set, @@ -53,6 +58,7 @@ cached, leader_set, leader_get, + local_unit, ) from charmhelpers.core.strutils import ( @@ -108,6 +114,10 @@ POLICYD_CONFIG_NAME, ) +from charmhelpers.contrib.openstack.ha.utils import ( + expect_ha, +) + CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' @@ -1810,6 +1820,16 @@ def os_application_version_set(package): application_version_set(application_version) +def os_application_status_set(check_function): + """Run the supplied function and set the application status accordingly. + + :param check_function: Function to run to get app states and messages. + :type check_function: function + """ + state, message = check_function() + status_set(state, message, application_status=True) + + def enable_memcache(source=None, release=None, package=None): """Determine if memcache should be enabled on the local unit @@ -2046,3 +2066,287 @@ def is_db_maintenance_mode(relid=None): 'WARN') pass return True in notifications + + +@cached +def container_scoped_relations(): + """Get all the container scoped relations + + :returns: List of relation names + :rtype: List + """ + md = metadata() + relations = [] + for relation_type in ('provides', 'requires', 'peers'): + for relation in md.get(relation_type, []): + if md[relation_type][relation].get('scope') == 'container': + relations.append(relation) + return relations + + +def is_db_ready(use_current_context=False, rel_name=None): + """Check remote database is ready to be used. + + Database relations are expected to provide a list of 'allowed' units to + confirm that the database is ready for use by those units. + + If db relation has provided this information and local unit is a member, + returns True otherwise False. + + :param use_current_context: Whether to limit checks to current hook + context. + :type use_current_context: bool + :param rel_name: Name of relation to check + :type rel_name: string + :returns: Whether remote db is ready. + :rtype: bool + :raises: Exception + """ + key = 'allowed_units' + + rel_name = rel_name or 'shared-db' + this_unit = local_unit() + + if use_current_context: + if relation_id() in relation_ids(rel_name): + rids_units = [(None, None)] + else: + raise Exception("use_current_context=True but not in {} " + "rel hook contexts (currently in {})." + .format(rel_name, relation_id())) + else: + rids_units = [(r_id, u) + for r_id in relation_ids(rel_name) + for u in related_units(r_id)] + + for rid, unit in rids_units: + allowed_units = relation_get(rid=rid, unit=unit, attribute=key) + if allowed_units and this_unit in allowed_units.split(): + juju_log("This unit ({}) is in allowed unit list from {}".format( + this_unit, + unit), 'DEBUG') + return True + + juju_log("This unit was not found in any allowed unit list") + return False + + +def is_expected_scale(peer_relation_name='cluster'): + """Query juju goal-state to determine whether our peer- and dependency- + relations are at the expected scale. + + Useful for deferring per unit per relation housekeeping work until we are + ready to complete it successfully and without unnecessary repetiton. + + Always returns True if version of juju used does not support goal-state. + + :param peer_relation_name: Name of peer relation + :type rel_name: string + :returns: True or False + :rtype: bool + """ + def _get_relation_id(rel_type): + return next((rid for rid in relation_ids(reltype=rel_type)), None) + + Relation = namedtuple('Relation', 'rel_type rel_id') + peer_rid = _get_relation_id(peer_relation_name) + # Units with no peers should still have a peer relation. + if not peer_rid: + juju_log('Not at expected scale, no peer relation found', 'DEBUG') + return False + expected_relations = [ + Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))] + if expect_ha(): + expected_relations.append( + Relation( + rel_type='ha', + rel_id=_get_relation_id('ha'))) + juju_log( + 'Checking scale of {} relations'.format( + ','.join([r.rel_type for r in expected_relations])), + 'DEBUG') + try: + if (len(related_units(relid=peer_rid)) < + len(list(expected_peer_units()))): + return False + for rel in expected_relations: + if not rel.rel_id: + juju_log( + 'Expected to find {} relation, but it is missing'.format( + rel.rel_type), + 'DEBUG') + return False + # Goal state returns every unit even for container scoped + # relations but the charm only ever has a relation with + # the local unit. + if rel.rel_type in container_scoped_relations(): + expected_count = 1 + else: + expected_count = len( + list(expected_related_units(reltype=rel.rel_type))) + if len(related_units(relid=rel.rel_id)) < expected_count: + juju_log( + ('Not at expected scale, not enough units on {} ' + 'relation'.format(rel.rel_type)), + 'DEBUG') + return False + except NotImplementedError: + return True + juju_log('All checks have passed, unit is at expected scale', 'DEBUG') + return True + + +def get_peer_key(unit_name): + """Get the peer key for this unit. + + The peer key is the key a unit uses to publish its status down the peer + relation + + :param unit_name: Name of unit + :type unit_name: string + :returns: Peer key for given unit + :rtype: string + """ + return 'unit-state-{}'.format(unit_name.replace('/', '-')) + + +UNIT_READY = 'READY' +UNIT_NOTREADY = 'NOTREADY' +UNIT_UNKNOWN = 'UNKNOWN' +UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN] + + +def inform_peers_unit_state(state, relation_name='cluster'): + """Inform peers of the state of this unit. + + :param state: State of unit to publish + :type state: string + :param relation_name: Name of relation to publish state on + :type relation_name: string + """ + if state not in UNIT_STATES: + raise ValueError( + "Setting invalid state {} for unit".format(state)) + for r_id in relation_ids(relation_name): + relation_set(relation_id=r_id, + relation_settings={ + get_peer_key(local_unit()): state}) + + +def get_peers_unit_state(relation_name='cluster'): + """Get the state of all peers. + + :param relation_name: Name of relation to check peers on. + :type relation_name: string + :returns: Unit states keyed on unit name. + :rtype: dict + :raises: ValueError + """ + r_ids = relation_ids(relation_name) + rids_units = [(r, u) for r in r_ids for u in related_units(r)] + unit_states = {} + for r_id, unit in rids_units: + settings = relation_get(unit=unit, rid=r_id) + unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN) + if unit_states[unit] not in UNIT_STATES: + raise ValueError( + "Unit in unknown state {}".format(unit_states[unit])) + return unit_states + + +def are_peers_ready(relation_name='cluster'): + """Check if all peers are ready. + + :param relation_name: Name of relation to check peers on. + :type relation_name: string + :returns: Whether all units are ready. + :rtype: bool + """ + unit_states = get_peers_unit_state(relation_name) + return all(v == UNIT_READY for v in unit_states.values()) + + +def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'): + """Inform peers if this unit is ready. + + The check function should return a tuple (state, message). A state + of 'READY' indicates the unit is READY. + + :param check_unit_ready_func: Function to run to check readiness + :type check_unit_ready_func: function + :param relation_name: Name of relation to check peers on. + :type relation_name: string + """ + unit_ready, msg = check_unit_ready_func() + if unit_ready: + state = UNIT_READY + else: + state = UNIT_NOTREADY + juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG') + inform_peers_unit_state(state, relation_name) + + +def check_api_unit_ready(check_db_ready=True): + """Check if this unit is ready. + + :param check_db_ready: Include checks of database readiness. + :type check_db_ready: bool + :returns: Whether unit state is ready and status message + :rtype: (bool, str) + """ + unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready) + return unit_state == WL_STATES.ACTIVE, msg + + +def get_api_unit_status(check_db_ready=True): + """Return a workload status and message for this unit. + + :param check_db_ready: Include checks of database readiness. + :type check_db_ready: bool + :returns: Workload state and message + :rtype: (bool, str) + """ + unit_state = WL_STATES.ACTIVE + msg = 'Unit is ready' + if is_db_maintenance_mode(): + unit_state = WL_STATES.MAINTENANCE + msg = 'Database in maintenance mode.' + elif is_unit_paused_set(): + unit_state = WL_STATES.BLOCKED + msg = 'Unit paused.' + elif check_db_ready and not is_db_ready(): + unit_state = WL_STATES.WAITING + msg = 'Allowed_units list provided but this unit not present' + elif not is_db_initialised(): + unit_state = WL_STATES.WAITING + msg = 'Database not initialised' + elif not is_expected_scale(): + unit_state = WL_STATES.WAITING + msg = 'Charm and its dependencies not yet at expected scale' + juju_log(msg, 'DEBUG') + return unit_state, msg + + +def check_api_application_ready(): + """Check if this application is ready. + + :returns: Whether application state is ready and status message + :rtype: (bool, str) + """ + app_state, msg = get_api_application_status() + return app_state == WL_STATES.ACTIVE, msg + + +def get_api_application_status(): + """Return a workload status and message for this application. + + :returns: Workload state and message + :rtype: (bool, str) + """ + app_state, msg = get_api_unit_status() + if app_state == WL_STATES.ACTIVE: + if are_peers_ready(): + return WL_STATES.ACTIVE, 'Application Ready' + else: + return WL_STATES.WAITING, 'Some units are not ready' + return app_state, msg diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py index 866a2697..4690f6b0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -140,9 +140,16 @@ def vault_relation_complete(backend=None): :ptype backend: string :returns: whether the relation to vault is complete :rtype: bool""" - vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) - vault_kv() - return vault_kv.complete + try: + import hvac + except ImportError: + return False + try: + vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) + vault_kv() + return vault_kv.complete + except hvac.exceptions.InvalidRequest: + return False # TODO: contrib a high level unwrap method to hvac that works diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index dabfb6c2..eb31b782 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -22,6 +22,7 @@ # Adam Gandelman # +import collections import errno import hashlib import math @@ -93,6 +94,88 @@ DEFAULT_MINIMUM_PGS = 2 +class OsdPostUpgradeError(Exception): + """Error class for OSD post-upgrade operations.""" + pass + + +class OSDSettingConflict(Exception): + """Error class for conflicting osd setting requests.""" + pass + + +class OSDSettingNotAllowed(Exception): + """Error class for a disallowed setting.""" + pass + + +OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed) + +OSD_SETTING_WHITELIST = [ + 'osd heartbeat grace', + 'osd heartbeat interval', +] + + +def _order_dict_by_key(rdict): + """Convert a dictionary into an OrderedDict sorted by key. + + :param rdict: Dictionary to be ordered. + :type rdict: dict + :returns: Ordered Dictionary. + :rtype: collections.OrderedDict + """ + return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0])) + + +def get_osd_settings(relation_name): + """Consolidate requested osd settings from all clients. + + Consolidate requested osd settings from all clients. Check that the + requested setting is on the whitelist and it does not conflict with + any other requested settings. + + :returns: Dictionary of settings + :rtype: dict + + :raises: OSDSettingNotAllowed + :raises: OSDSettingConflict + """ + rel_ids = relation_ids(relation_name) + osd_settings = {} + for relid in rel_ids: + for unit in related_units(relid): + unit_settings = relation_get('osd-settings', unit, relid) or '{}' + unit_settings = json.loads(unit_settings) + for key, value in unit_settings.items(): + if key not in OSD_SETTING_WHITELIST: + msg = 'Illegal settings "{}"'.format(key) + raise OSDSettingNotAllowed(msg) + if key in osd_settings: + if osd_settings[key] != unit_settings[key]: + msg = 'Conflicting settings for "{}"'.format(key) + raise OSDSettingConflict(msg) + else: + osd_settings[key] = value + return _order_dict_by_key(osd_settings) + + +def send_osd_settings(): + """Pass on requested OSD settings to osd units.""" + try: + settings = get_osd_settings('client') + except OSD_SETTING_EXCEPTIONS as e: + # There is a problem with the settings, not passing them on. Update + # status will notify the user. + log(e, level=ERROR) + return + data = { + 'osd-settings': json.dumps(settings, sort_keys=True)} + for relid in relation_ids('osd'): + relation_set(relation_id=relid, + relation_settings=data) + + def validator(value, valid_type, valid_range=None): """ Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values @@ -1635,5 +1718,67 @@ def __call__(self): continue ceph_conf[key] = conf[key] - return ceph_conf + + +class CephOSDConfContext(CephConfContext): + """Ceph config (ceph.conf) context. + + Consolidates settings from config-flags via CephConfContext with + settings provided by the mons. The config-flag values are preserved in + conf['osd'], settings from the mons which do not clash with config-flag + settings are in conf['osd_from_client'] and finally settings which do + clash are in conf['osd_from_client_conflict']. Rather than silently drop + the conflicting settings they are provided in the context so they can be + rendered commented out to give some visability to the admin. + """ + + def __init__(self, permitted_sections=None): + super(CephOSDConfContext, self).__init__( + permitted_sections=permitted_sections) + try: + self.settings_from_mons = get_osd_settings('mon') + except OSDSettingConflict: + log( + "OSD settings from mons are inconsistent, ignoring them", + level=WARNING) + self.settings_from_mons = {} + + def filter_osd_from_mon_settings(self): + """Filter settings from client relation against config-flags. + + :returns: A tuple ( + ,config-flag values, + ,client settings which do not conflict with config-flag values, + ,client settings which confilct with config-flag values) + :rtype: (OrderedDict, OrderedDict, OrderedDict) + """ + ceph_conf = super(CephOSDConfContext, self).__call__() + conflicting_entries = {} + clear_entries = {} + for key, value in self.settings_from_mons.items(): + if key in ceph_conf.get('osd', {}): + if ceph_conf['osd'][key] != value: + conflicting_entries[key] = value + else: + clear_entries[key] = value + clear_entries = _order_dict_by_key(clear_entries) + conflicting_entries = _order_dict_by_key(conflicting_entries) + return ceph_conf, clear_entries, conflicting_entries + + def __call__(self): + """Construct OSD config context. + + Standard context with two additional special keys. + osd_from_client_conflict: client settings which confilct with + config-flag values + osd_from_client: settings which do not conflict with config-flag + values + + :returns: OSD config context dict. + :rtype: dict + """ + conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings() + conf['osd_from_client_conflict'] = osd_conflict + conf['osd_from_client'] = osd_clear + return conf diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py index 82472ff1..74bab40e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -32,6 +32,10 @@ def loopback_devices(): /dev/loop0: [0807]:961814 (/tmp/my.img) + or: + + /dev/loop0: [0807]:961814 (/tmp/my.img (deleted)) + :returns: dict: a dict mapping {loopback_dev: backing_file} ''' loopbacks = {} @@ -39,9 +43,9 @@ def loopback_devices(): output = check_output(cmd) if six.PY3: output = output.decode('utf-8') - devs = [d.strip().split(' ') for d in output.splitlines() if d != ''] + devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] for dev, _, f in devs: - loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] + loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] return loopbacks diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 647f6e4b..9e425a88 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -21,6 +21,7 @@ from __future__ import print_function import copy from distutils.version import LooseVersion +from enum import Enum from functools import wraps from collections import namedtuple import glob @@ -57,6 +58,14 @@ 'This may not be compatible with software you are ' 'running in your shell.') + +class WL_STATES(Enum): + ACTIVE = 'active' + BLOCKED = 'blocked' + MAINTENANCE = 'maintenance' + WAITING = 'waiting' + + cache = {} @@ -1088,22 +1097,31 @@ def function_tag(): return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() -def status_set(workload_state, message): +def status_set(workload_state, message, application_status=False): """Set the workload state with a message Use status-set to set the workload state with a message which is visible to the user via juju status. If the status-set command is not found then - assume this is juju < 1.23 and juju-log the message unstead. + assume this is juju < 1.23 and juju-log the message instead. - workload_state -- valid juju workload state. - message -- status update message + workload_state -- valid juju workload state. str or WL_STATES + message -- status update message + application_status -- Whether this is an application state set """ - valid_states = ['maintenance', 'blocked', 'waiting', 'active'] - if workload_state not in valid_states: + # Extract the value if workload_state is an Enum + try: + workload_state = workload_state.value + except AttributeError: + pass + workload_state = workload_state.lower() + if workload_state not in [s.lower() for s in WL_STATES.__members__.keys()]: raise ValueError( '{!r} is not a valid workload state'.format(workload_state) ) - cmd = ['status-set', workload_state, message] + cmd = ['status-set'] + if application_status: + cmd.append('--application') + cmd.extend([workload_state, message]) try: ret = subprocess.call(cmd) if ret == 0: @@ -1526,13 +1544,13 @@ def env_proxy_settings(selected_settings=None): """Get proxy settings from process environment variables. Get charm proxy settings from environment variables that correspond to - juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2, - see lp:1782236) in a format suitable for passing to an application that - reacts to proxy settings passed as environment variables. Some applications - support lowercase or uppercase notation (e.g. curl), some support only - lowercase (e.g. wget), there are also subjectively rare cases of only - uppercase notation support. no_proxy CIDR and wildcard support also varies - between runtimes and applications as there is no enforced standard. + juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see + lp:1782236) and juju-ftp-proxy in a format suitable for passing to an + application that reacts to proxy settings passed as environment variables. + Some applications support lowercase or uppercase notation (e.g. curl), some + support only lowercase (e.g. wget), there are also subjectively rare cases + of only uppercase notation support. no_proxy CIDR and wildcard support also + varies between runtimes and applications as there is no enforced standard. Some applications may connect to multiple destinations and expose config options that would affect only proxy settings for a specific destination diff --git a/ceph-mon/hooks/charmhelpers/core/sysctl.py b/ceph-mon/hooks/charmhelpers/core/sysctl.py index f1f4a28f..386428d6 100644 --- a/ceph-mon/hooks/charmhelpers/core/sysctl.py +++ b/ceph-mon/hooks/charmhelpers/core/sysctl.py @@ -17,14 +17,17 @@ import yaml -from subprocess import check_call +from subprocess import check_call, CalledProcessError from charmhelpers.core.hookenv import ( log, DEBUG, ERROR, + WARNING, ) +from charmhelpers.core.host import is_container + __author__ = 'Jorge Niedbalski R. ' @@ -62,4 +65,11 @@ def create(sysctl_dict, sysctl_file, ignore=False): if ignore: call.append("-e") - check_call(call) + try: + check_call(call) + except CalledProcessError as e: + if is_container(): + log("Error setting some sysctl keys in this container: {}".format(e.output), + level=WARNING) + else: + raise e From 4fa7248aa4655fc6b40ee9e9bbf5c757ebcae33e Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Thu, 26 Mar 2020 12:32:36 +0000 Subject: [PATCH 1931/2699] Enable focal and ussuri as part of the gate tests Add bionic-ussuri and focal-ussuri (with mysql8 support) bundles. The associated func-test-pr is to disable the /dev/vdb pristine disk check as it doesn't work on focal. Change-Id: I00b2b739c97dc41b2f987f29e12844e6cf5e921c func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/249 --- ceph-osd/metadata.yaml | 1 + ceph-osd/tests/bundles/bionic-ussuri.yaml | 114 +++++++++++ ceph-osd/tests/bundles/focal-ussuri.yaml | 219 ++++++++++++++++++++++ ceph-osd/tests/tests.yaml | 6 +- 4 files changed, 339 insertions(+), 1 deletion(-) create mode 100644 ceph-osd/tests/bundles/bionic-ussuri.yaml create mode 100644 ceph-osd/tests/bundles/focal-ussuri.yaml diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 4a85edd9..bf0ccf36 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -14,6 +14,7 @@ series: - xenial - bionic - eoan + - focal - trusty description: | Ceph is a distributed storage and network file system designed to provide diff --git a/ceph-osd/tests/bundles/bionic-ussuri.yaml b/ceph-osd/tests/bundles/bionic-ussuri.yaml new file mode 100644 index 00000000..b82bc2e5 --- /dev/null +++ b/ceph-osd/tests/bundles/bionic-ussuri.yaml @@ -0,0 +1,114 @@ +series: bionic +applications: + ceph-osd: + charm: ceph-osd + num_units: 3 + series: bionic + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: cloud:bionic-ussuri + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: cloud:bionic-ussuri + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:bionic-ussuri + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-ussuri + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:bionic-ussuri + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service +- - placement + - percona-cluster +- - placement + - keystone +- - placement + - nova-cloud-controller diff --git a/ceph-osd/tests/bundles/focal-ussuri.yaml b/ceph-osd/tests/bundles/focal-ussuri.yaml new file mode 100644 index 00000000..78e4e7de --- /dev/null +++ b/ceph-osd/tests/bundles/focal-ussuri.yaml @@ -0,0 +1,219 @@ +variables: + openstack-origin: &openstack-origin distro + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: 'None' + glance-api-version: '2' + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 7c310acb..cc63b6f4 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,5 +1,7 @@ charm_name: ceph-osd gate_bundles: + - focal-ussuri + - bionic-ussuri - bionic-train - bionic-stein - bionic-rocky @@ -12,7 +14,6 @@ gate_bundles: smoke_bundles: - bionic-train dev_bundles: - - eoan-train configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: @@ -20,3 +21,6 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CephRelationTest - zaza.openstack.charm_tests.ceph.tests.CephTest - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest +tests_options: + force_deploy: + - focal-ussuri From 435caad299bb903b0d3ec697caf109340d03177d Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 27 Mar 2020 11:13:35 +0000 Subject: [PATCH 1932/2699] Pass on osd settings from clients to osd units If a client application has requested particular osd settings then validate them and pass them on to the osd units. Change-Id: Iad7132b3e6b34038ecd9a61fbb9589b22117396d --- ceph-mon/hooks/ceph_hooks.py | 11 ++++++ .../charmhelpers/contrib/openstack/utils.py | 26 ++++++------- ceph-mon/hooks/charmhelpers/core/hookenv.py | 38 ++++++++++--------- ceph-mon/unit_tests/test_ceph_hooks.py | 10 ++++- ceph-mon/unit_tests/test_status.py | 30 ++++++++++++++- 5 files changed, 80 insertions(+), 35 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 91a57a8e..cace5894 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -80,7 +80,10 @@ from charmhelpers.core.templating import render from charmhelpers.contrib.storage.linux.ceph import ( CephConfContext, + OSD_SETTING_EXCEPTIONS, enable_pg_autoscale, + get_osd_settings, + send_osd_settings, ) from utils import ( add_rbd_mirror_features, @@ -682,6 +685,7 @@ def osd_relation(relid=None, unit=None): notify_radosgws() notify_client() notify_rbd_mirrors() + send_osd_settings() else: log('mon cluster not in quorum - deferring fsid provision') @@ -853,6 +857,7 @@ def admin_relation_joined(relid=None): @hooks.hook('client-relation-changed') @hooks.hook('client-relation-joined') def client_relation(relid=None, unit=None): + send_osd_settings() if ready_for_service(): log('mon cluster in quorum and osds bootstrapped ' '- providing client with keys, processing broker requests') @@ -1020,6 +1025,12 @@ def assess_status(): '``default-rbd-features``') return + try: + get_osd_settings('client') + except OSD_SETTING_EXCEPTIONS as e: + status_set('blocked', str(e)) + return + # active - bootstrapped + quorum status check if ceph.is_bootstrapped() and ceph.is_quorum(): expected_osd_count = config('expected-osd-count') or 3 diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 1cf751c0..e59e0d1e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -36,7 +36,7 @@ from charmhelpers.core import unitdata from charmhelpers.core.hookenv import ( - WL_STATES, + WORKLOAD_STATES, action_fail, action_set, config, @@ -1827,7 +1827,7 @@ def os_application_status_set(check_function): :type check_function: function """ state, message = check_function() - status_set(state, message, application_status=True) + status_set(state, message, application=True) def enable_memcache(source=None, release=None, package=None): @@ -2295,7 +2295,7 @@ def check_api_unit_ready(check_db_ready=True): :rtype: (bool, str) """ unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready) - return unit_state == WL_STATES.ACTIVE, msg + return unit_state == WORKLOAD_STATES.ACTIVE, msg def get_api_unit_status(check_db_ready=True): @@ -2306,22 +2306,22 @@ def get_api_unit_status(check_db_ready=True): :returns: Workload state and message :rtype: (bool, str) """ - unit_state = WL_STATES.ACTIVE + unit_state = WORKLOAD_STATES.ACTIVE msg = 'Unit is ready' if is_db_maintenance_mode(): - unit_state = WL_STATES.MAINTENANCE + unit_state = WORKLOAD_STATES.MAINTENANCE msg = 'Database in maintenance mode.' elif is_unit_paused_set(): - unit_state = WL_STATES.BLOCKED + unit_state = WORKLOAD_STATES.BLOCKED msg = 'Unit paused.' elif check_db_ready and not is_db_ready(): - unit_state = WL_STATES.WAITING + unit_state = WORKLOAD_STATES.WAITING msg = 'Allowed_units list provided but this unit not present' elif not is_db_initialised(): - unit_state = WL_STATES.WAITING + unit_state = WORKLOAD_STATES.WAITING msg = 'Database not initialised' elif not is_expected_scale(): - unit_state = WL_STATES.WAITING + unit_state = WORKLOAD_STATES.WAITING msg = 'Charm and its dependencies not yet at expected scale' juju_log(msg, 'DEBUG') return unit_state, msg @@ -2334,7 +2334,7 @@ def check_api_application_ready(): :rtype: (bool, str) """ app_state, msg = get_api_application_status() - return app_state == WL_STATES.ACTIVE, msg + return app_state == WORKLOAD_STATES.ACTIVE, msg def get_api_application_status(): @@ -2344,9 +2344,9 @@ def get_api_application_status(): :rtype: (bool, str) """ app_state, msg = get_api_unit_status() - if app_state == WL_STATES.ACTIVE: + if app_state == WORKLOAD_STATES.ACTIVE: if are_peers_ready(): - return WL_STATES.ACTIVE, 'Application Ready' + return WORKLOAD_STATES.ACTIVE, 'Application Ready' else: - return WL_STATES.WAITING, 'Some units are not ready' + return WORKLOAD_STATES.WAITING, 'Some units are not ready' return app_state, msg diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 9e425a88..d7c37c17 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -59,7 +59,7 @@ 'running in your shell.') -class WL_STATES(Enum): +class WORKLOAD_STATES(Enum): ACTIVE = 'active' BLOCKED = 'blocked' MAINTENANCE = 'maintenance' @@ -1097,31 +1097,33 @@ def function_tag(): return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() -def status_set(workload_state, message, application_status=False): +def status_set(workload_state, message, application=False): """Set the workload state with a message Use status-set to set the workload state with a message which is visible to the user via juju status. If the status-set command is not found then assume this is juju < 1.23 and juju-log the message instead. - workload_state -- valid juju workload state. str or WL_STATES - message -- status update message - application_status -- Whether this is an application state set + workload_state -- valid juju workload state. str or WORKLOAD_STATES + message -- status update message + application -- Whether this is an application state set """ - # Extract the value if workload_state is an Enum - try: - workload_state = workload_state.value - except AttributeError: - pass - workload_state = workload_state.lower() - if workload_state not in [s.lower() for s in WL_STATES.__members__.keys()]: - raise ValueError( - '{!r} is not a valid workload state'.format(workload_state) - ) + bad_state_msg = '{!r} is not a valid workload state' + + if isinstance(workload_state, str): + try: + # Convert string to enum. + workload_state = WORKLOAD_STATES[workload_state.upper()] + except KeyError: + raise ValueError(bad_state_msg.format(workload_state)) + + if workload_state not in WORKLOAD_STATES: + raise ValueError(bad_state_msg.format(workload_state)) + cmd = ['status-set'] - if application_status: + if application: cmd.append('--application') - cmd.extend([workload_state, message]) + cmd.extend([workload_state.value, message]) try: ret = subprocess.call(cmd) if ret == 0: @@ -1129,7 +1131,7 @@ def status_set(workload_state, message, application_status=False): except OSError as e: if e.errno != errno.ENOENT: raise - log_message = 'status-set failed: {} {}'.format(workload_state, + log_message = 'status-set failed: {} {}'.format(workload_state.value, message) log(log_message, level='INFO') diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index e9aa57bd..50dd5e05 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -407,6 +407,7 @@ def test_related_osd_multi_relation(self, call('osd:23') ]) + @patch.object(ceph_hooks, 'send_osd_settings') @patch.object(ceph_hooks, 'get_rbd_features') @patch.object(ceph_hooks, 'relation_set') @patch.object(ceph_hooks, 'handle_broker_request') @@ -423,7 +424,8 @@ def test_client_relation(self, _config, _handle_broker_request, _relation_set, - _get_rbd_features): + _get_rbd_features, + _send_osd_settings): _remote_service_name.return_value = 'glance' config = copy.deepcopy(CHARM_CONFIG) _config.side_effect = lambda key: config[key] @@ -431,6 +433,7 @@ def test_client_relation(self, _get_rbd_features.return_value = None ceph_hooks.client_relation(relid='rel1', unit='glance/0') _ready_for_service.assert_called_once_with() + _send_osd_settings.assert_called_once_with() _get_public_addr.assert_called_once_with() _get_named_key.assert_called_once_with('glance') _handle_broker_request.assert_called_once_with( @@ -454,6 +457,7 @@ def test_client_relation(self, 'rbd-features': 42, }) + @patch.object(ceph_hooks, 'send_osd_settings') @patch.object(ceph_hooks, 'get_rbd_features') @patch.object(ceph_hooks, 'config') @patch.object(ceph_hooks.ceph, 'get_named_key') @@ -479,7 +483,8 @@ def test_client_relation_non_rel_hook(self, relation_set, get_public_addr, get_named_key, _config, - _get_rbd_features): + _get_rbd_features, + _send_osd_settings): # Check for LP #1738154 ready_for_service.return_value = True process_requests.return_value = 'AOK' @@ -491,6 +496,7 @@ def test_client_relation_non_rel_hook(self, relation_set, _config.side_effect = lambda key: config[key] _get_rbd_features.return_value = None ceph_hooks.client_relation(relid='rel1', unit='glance/0') + _send_osd_settings.assert_called_once_with() relation_set.assert_called_once_with( relation_id='rel1', relation_settings={ diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index 8d0bc105..01ba5c45 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -17,6 +17,8 @@ import test_utils +import charmhelpers.contrib.storage.linux.ceph as ch_ceph + # python-apt is not installed as part of test-requirements but is imported by # some charmhelpers modules so create a fake import. mock_apt = mock.MagicMock() @@ -81,32 +83,56 @@ def test_assess_status_peers_incomplete(self, _peer_units): self.status_set.assert_called_with('waiting', mock.ANY) self.application_version_set.assert_called_with('10.2.2') + @mock.patch.object(hooks, 'get_osd_settings') @mock.patch.object(hooks, 'has_rbd_mirrors') @mock.patch.object(hooks, 'sufficient_osds') @mock.patch.object(hooks, 'get_peer_units') def test_assess_status_peers_complete_active(self, _peer_units, _sufficient_osds, - _has_rbd_mirrors): + _has_rbd_mirrors, + _get_osd_settings): _peer_units.return_value = ENOUGH_PEERS_COMPLETE _sufficient_osds.return_value = True self.ceph.is_bootstrapped.return_value = True self.ceph.is_quorum.return_value = True _has_rbd_mirrors.return_value = False + _get_osd_settings.return_value = {} hooks.assess_status() self.status_set.assert_called_with('active', mock.ANY) self.application_version_set.assert_called_with('10.2.2') + @mock.patch.object(hooks, 'get_osd_settings') + @mock.patch.object(hooks, 'has_rbd_mirrors') + @mock.patch.object(hooks, 'sufficient_osds') + @mock.patch.object(hooks, 'get_peer_units') + def test_assess_status_invalid_osd_settings(self, _peer_units, + _sufficient_osds, + _has_rbd_mirrors, + _get_osd_settings): + _peer_units.return_value = ENOUGH_PEERS_COMPLETE + _sufficient_osds.return_value = True + self.ceph.is_bootstrapped.return_value = True + self.ceph.is_quorum.return_value = True + _has_rbd_mirrors.return_value = False + _get_osd_settings.side_effect = ch_ceph.OSDSettingConflict( + 'conflict in setting foo') + hooks.assess_status() + self.status_set.assert_called_with('blocked', mock.ANY) + + @mock.patch.object(hooks, 'get_osd_settings') @mock.patch.object(hooks, 'has_rbd_mirrors') @mock.patch.object(hooks, 'sufficient_osds') @mock.patch.object(hooks, 'get_peer_units') def test_assess_status_peers_complete_down(self, _peer_units, _sufficient_osds, - _has_rbd_mirrors): + _has_rbd_mirrors, + _get_osd_settings): _peer_units.return_value = ENOUGH_PEERS_COMPLETE _sufficient_osds.return_value = True self.ceph.is_bootstrapped.return_value = False self.ceph.is_quorum.return_value = False _has_rbd_mirrors.return_value = False + _get_osd_settings.return_value = {} hooks.assess_status() self.status_set.assert_called_with('blocked', mock.ANY) self.application_version_set.assert_called_with('10.2.2') From d6e06023bc70666965b2badf01d1bb50f48e1478 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 23 Apr 2020 15:11:34 +0000 Subject: [PATCH 1933/2699] Fix symlinks if they are wrong --- ceph-iscsi/charm-prep.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-iscsi/charm-prep.sh b/ceph-iscsi/charm-prep.sh index 0f29b419..d41ad095 100755 --- a/ceph-iscsi/charm-prep.sh +++ b/ceph-iscsi/charm-prep.sh @@ -1,6 +1,7 @@ #!/bin/bash rm -rf lib/* +rm adapters.py interface_ceph_client.py ops ops_openstack.py pip install -t lib/ git+https://github.com/juju/charm-helpers.git From ea46ce315902944d5103d6bc7e455b90fc9e3bb3 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 24 Apr 2020 15:15:03 +0000 Subject: [PATCH 1934/2699] Update bundle --- ceph-iscsi/README.md | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index eb6e5db9..62c03c4f 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -20,23 +20,35 @@ A sample `bundle.yaml` file's contents: ```yaml series: focal + machines: + '0': + '1': + '2': applications: ceph-iscsi: charm: cs:ceph-iscsi num_units: 2 + to: + - lxd:0 + - lxd:1 ceph-osd: charm: cs:ceph-osd num_units: 3 storage: osd-devices: /dev/vdb - options: - source: cloud:bionic-train + to: + - '0' + - '1' + - '2' ceph-mon: charm: cs:ceph-mon num_units: 3 options: monitor-count: '3' - source: cloud:bionic-train + to: + - lxd:0 + - lxd:1 + - lxd:2 relations: - - ceph-mon:client - ceph-iscsi:ceph-client From 06e58f86c847e10d0c64f23bcf445c024eaf12d4 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 27 Apr 2020 15:30:12 +0000 Subject: [PATCH 1935/2699] Add unit tests and update tox --- ceph-iscsi/.gitignore | 2 + ceph-iscsi/.stestr.conf | 3 + ceph-iscsi/requirements.txt | 1 + ceph-iscsi/src/charm.py | 10 +- ceph-iscsi/src/interface_ceph_iscsi_peer.py | 3 +- ceph-iscsi/tox.ini | 81 ++++- .../unit_tests/test_cepch_iscsi_charm.py | 321 ++++++++++++++++++ 7 files changed, 407 insertions(+), 14 deletions(-) create mode 100644 ceph-iscsi/.stestr.conf create mode 100644 ceph-iscsi/requirements.txt create mode 100644 ceph-iscsi/unit_tests/test_cepch_iscsi_charm.py diff --git a/ceph-iscsi/.gitignore b/ceph-iscsi/.gitignore index 7cd7b847..1ec16378 100644 --- a/ceph-iscsi/.gitignore +++ b/ceph-iscsi/.gitignore @@ -2,3 +2,5 @@ lib .tox test-decrpyt.py .swp +__pycache__ +.stestr/ diff --git a/ceph-iscsi/.stestr.conf b/ceph-iscsi/.stestr.conf new file mode 100644 index 00000000..5fcccaca --- /dev/null +++ b/ceph-iscsi/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/ceph-iscsi/requirements.txt b/ceph-iscsi/requirements.txt new file mode 100644 index 00000000..8299f55a --- /dev/null +++ b/ceph-iscsi/requirements.txt @@ -0,0 +1 @@ +# requirements diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 894b9968..6a4343e2 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -102,6 +102,7 @@ class CephISCSIGatewayCharmBase(ops_openstack.OSBaseCharm): # Two has been tested before is probably fine too but needs # validating ALLOWED_UNIT_COUNTS = [2] + release = 'default' def __init__(self, framework, key): super().__init__(framework, key) @@ -131,9 +132,10 @@ def __init__(self, framework, key): self.framework.observe(self.on.upgrade_charm, self) def on_add_trusted_ip_action(self, event): - self.state.additional_trusted_ips.append(event.params['ips'].split(' ')) + self.state.additional_trusted_ips.append( + event.params['ips'].split(' ')) logging.info(self.state.additional_trusted_ips) - + def on_create_target_action(self, event): gw_client = gwcli_client.GatewayClient() target = event.params.get('iqn', self.DEFAULT_TARGET) @@ -273,13 +275,15 @@ def on_certificates_relation_changed(self, event): # Append chain file so that clients that trust the root CA will # trust certs signed by an intermediate in the chain ca_cert_data = self.tls.root_ca_cert + os.linesep + self.tls.chain + else: + ca_cert_data = self.tls.root_ca_cert pem_data = app_certs['cert'] + os.linesep + app_certs['key'] tls_files = { '/etc/ceph/iscsi-gateway.crt': app_certs['cert'], '/etc/ceph/iscsi-gateway.key': app_certs['key'], '/etc/ceph/iscsi-gateway.pem': pem_data, '/usr/local/share/ca-certificates/vault_ca_cert.crt': ca_cert_data} - for tls_file, tls_data in tls_files.items(): + for tls_file, tls_data in sorted(tls_files.items()): with open(tls_file, 'w') as f: f.write(tls_data) subprocess.check_call(['update-ca-certificates']) diff --git a/ceph-iscsi/src/interface_ceph_iscsi_peer.py b/ceph-iscsi/src/interface_ceph_iscsi_peer.py index c82df588..beeac301 100644 --- a/ceph-iscsi/src/interface_ceph_iscsi_peer.py +++ b/ceph-iscsi/src/interface_ceph_iscsi_peer.py @@ -91,8 +91,7 @@ def cluster_bind_address(self): @property def admin_password(self): # https://github.com/canonical/operator/issues/148 - # return self.peer_rel.data[self.peer_rel.app].get(self.PASSWORD_KEY) - return 'hardcodedpassword' + return self.peer_rel.data[self.peer_rel.app].get(self.PASSWORD_KEY) @property def peer_addresses(self): diff --git a/ceph-iscsi/tox.ini b/ceph-iscsi/tox.ini index 9b1151b6..7e543b85 100644 --- a/ceph-iscsi/tox.ini +++ b/ceph-iscsi/tox.ini @@ -1,11 +1,14 @@ -# Source charm (with zaza): ./src/tox.ini +# Classic charm (with zaza): ./tox.ini # This file is managed centrally by release-tools and should not be modified # within individual charm repos. See the 'global' dir contents for available # choices of tox.ini for OpenStack Charms: # https://github.com/openstack-charmers/release-tools - +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. [tox] -envlist = pep8 +envlist = pep8,py3 skipsdist = True # NOTE: Avoid build/test env pollution by not enabling sitepackages. sitepackages = False @@ -15,16 +18,71 @@ skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + CHARM_DIR={envdir} +install_command = + pip install {opts} {packages} +commands = stestr run --slowest {posargs} whitelist_externals = juju passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt -install_command = - pip install {opts} {packages} + +[testenv:py35] +basepython = python3.5 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt [testenv:pep8] basepython = python3 -deps = -r{toxinidir}/test-requirements.txt -commands = flake8 --ignore=E402 {posargs} src +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = flake8 {posargs} src unit_tests tests + +[testenv:cover] +# Technique based heavily upon +# https://github.com/openstack/nova/blob/master/tox.ini +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run --slowest {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + */charmhelpers/* + unit_tests/* + +[testenv:venv] +basepython = python3 +commands = {posargs} [testenv:func-noop] basepython = python3 @@ -41,10 +99,15 @@ basepython = python3 commands = functest-run-suite --keep-model --smoke +[testenv:func-dev] +basepython = python3 +commands = + functest-run-suite --keep-model --dev + [testenv:func-target] basepython = python3 commands = functest-run-suite --keep-model --bundle {posargs} -[testenv:venv] -commands = {posargs} +[flake8] +ignore = E402,E226 diff --git a/ceph-iscsi/unit_tests/test_cepch_iscsi_charm.py b/ceph-iscsi/unit_tests/test_cepch_iscsi_charm.py new file mode 100644 index 00000000..8cc2bad5 --- /dev/null +++ b/ceph-iscsi/unit_tests/test_cepch_iscsi_charm.py @@ -0,0 +1,321 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import unittest +import sys + +sys.path.append('lib') # noqa +sys.path.append('src') # noqa + +from mock import call, patch, MagicMock, ANY + +from ops.testing import Harness, _TestingModelBackend +from ops.model import ( + BlockedStatus, +) +from ops import framework, model + +import charm + + +class CharmTestCase(unittest.TestCase): + + def setUp(self, obj, patches): + super().setUp() + self.patches = patches + self.obj = obj + self.patch_all() + + def patch(self, method): + _m = patch.object(self.obj, method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def patch_all(self): + for method in self.patches: + setattr(self, method, self.patch(method)) + + +class TestCephISCSIGatewayCharmBase(CharmTestCase): + + PATCHES = [ + 'ch_templating', + 'gwcli_client', + 'subprocess', + ] + + def setUp(self): + super().setUp(charm, self.PATCHES) + self.harness = Harness( + charm.CephISCSIGatewayCharmBase, + ) + self.gwc = MagicMock() + self.gwcli_client.GatewayClient.return_value = self.gwc + + # BEGIN: Workaround until + # https://github.com/canonical/operator/pull/196 lands + class _TestingOPSModelBackend(_TestingModelBackend): + + def relation_ids(self, relation_name): + return self._relation_ids_map.get(relation_name, []) + + # Hardcoded until network_get is implemented in + # _TestingModelBackend + def network_get(self, endpoint_name, relation_id=None): + network_data = { + 'bind-addresses': [{ + 'interface-name': 'eth0', + 'addresses': [{ + 'cidr': '10.0.0.0/24', + 'value': '10.0.0.10'}]}], + 'ingress-addresses': ['10.0.0.10'], + 'egress-subnets': ['10.0.0.0/24']} + return network_data + + self.harness._backend = _TestingOPSModelBackend( + self.harness._unit_name) + self.harness._model = model.Model( + self.harness._unit_name, + self.harness._meta, + self.harness._backend) + self.harness._framework = framework.Framework( + ":memory:", + self.harness._charm_dir, + self.harness._meta, + self.harness._model) + # END Workaround + + def test_init(self): + self.harness.begin() + self.assertFalse(self.harness.charm.state.target_created) + self.assertFalse(self.harness.charm.state.enable_tls) + self.assertEqual(self.harness.charm.state.additional_trusted_ips, []) + + def add_cluster_relation(self): + rel_id = self.harness.add_relation('cluster', 'ceph-iscsi') + self.harness.add_relation_unit( + rel_id, + 'ceph-iscsi/1', + { + 'ingress-address': '10.0.0.2', + 'gateway_ready': 'True', + 'gateway_fqdn': 'ceph-iscsi-1.example' + } + ) + return rel_id + + @patch('socket.getfqdn') + def test_on_create_target_action(self, _getfqdn): + _getfqdn.return_value = 'ceph-iscsi-0.example' + self.add_cluster_relation() + self.harness.begin() + action_event = MagicMock() + action_event.params = { + 'iqn': 'iqn.mock.iscsi-gw:iscsi-igw', + 'gateway-units': 'ceph-iscsi/0 ceph-iscsi/1', + 'pool-name': 'iscsi-pool', + 'image-name': 'disk1', + 'image-size': '5G', + 'client-initiatorname': 'client-initiator', + 'client-username': 'myusername', + 'client-password': 'mypassword'} + self.harness.charm.on_create_target_action(action_event) + self.gwc.add_gateway_to_target.assert_has_calls([ + call( + 'iqn.mock.iscsi-gw:iscsi-igw', + '10.0.0.10', + 'ceph-iscsi-0.example'), + call( + 'iqn.mock.iscsi-gw:iscsi-igw', + '10.0.0.2', + 'ceph-iscsi-1.example')]) + + self.gwc.create_pool.assert_called_once_with( + 'iscsi-pool', + 'disk1', + '5G') + self.gwc.add_client_to_target.assert_called_once_with( + 'iqn.mock.iscsi-gw:iscsi-igw', + 'client-initiator') + self.gwc.add_client_auth.assert_called_once_with( + 'iqn.mock.iscsi-gw:iscsi-igw', + 'client-initiator', + 'myusername', + 'mypassword') + self.gwc.add_disk_to_client.assert_called_once_with( + 'iqn.mock.iscsi-gw:iscsi-igw', + 'client-initiator', + 'iscsi-pool', + 'disk1') + + @patch.object(charm.secrets, 'choice') + def test_on_has_peers(self, _choice): + _choice.return_value = 'r' + self.add_cluster_relation() + self.harness.begin() + self.assertIsNone( + self.harness.charm.peers.admin_password) + self.harness.set_leader() + self.harness.charm.peers.on.has_peers.emit() + self.assertEqual( + self.harness.charm.peers.admin_password, 'rrrrrrrr') + + def test_on_has_peers_not_leader(self): + self.add_cluster_relation() + self.harness.begin() + self.assertIsNone( + self.harness.charm.peers.admin_password) + self.harness.set_leader(False) + self.harness.charm.peers.on.has_peers.emit() + self.assertIsNone( + self.harness.charm.peers.admin_password) + + def test_on_has_peers_existing_password(self): + rel_id = self.add_cluster_relation() + self.harness.update_relation_data( + rel_id, + 'ceph-iscsi', + {'admin_password': 'existing password'}) + self.harness.begin() + self.harness.set_leader() + self.harness.charm.peers.on.has_peers.emit() + self.assertEqual( + self.harness.charm.peers.admin_password, + 'existing password') + + def test_on_ceph_client_relation_joined(self): + rel_id = self.harness.add_relation('ceph-client', 'ceph-mon') + self.harness.update_config( + key_values={'rbd-metadata-pool': 'iscsi-pool'}) + self.harness.begin() + self.harness.add_relation_unit( + rel_id, + 'ceph-mon/0', + {'ingress-address': '10.0.0.3'}, + ) + rel_data = self.harness.get_relation_data(rel_id, 'ceph-iscsi/0') + req_osd_settings = json.loads(rel_data['osd-settings']) + self.assertEqual( + req_osd_settings, + {'osd heartbeat grace': 20, 'osd heartbeat interval': 5}) + req_pool = json.loads(rel_data['broker_req']) + self.assertEqual( + req_pool['ops'], + [{ + 'app-name': None, + 'group': None, + 'group-namespace': None, + 'max-bytes': None, + 'max-objects': None, + 'name': 'iscsi-pool', + 'op': 'create-pool', + 'pg_num': None, + 'replicas': 3, + 'weight': None}, + { + 'client': 'ceph-iscsi', + 'op': 'set-key-permissions', + 'permissions': [ + 'osd', + 'allow *', + 'mon', + 'allow *', + 'mgr', + 'allow r']}]) + + def test_on_pools_available(self): + rel_id = self.add_cluster_relation() + self.harness.update_relation_data( + rel_id, + 'ceph-iscsi', + {'admin_password': 'existing password', + 'gateway_ready': False}) + self.harness.begin() + self.harness.charm.ceph_client.on.pools_available.emit() + self.ch_templating.render.assert_has_calls([ + call('ceph.conf', '/etc/ceph/ceph.conf', ANY), + call('iscsi-gateway.cfg', '/etc/ceph/iscsi-gateway.cfg', ANY), + call( + 'ceph.client.ceph-iscsi.keyring', + '/etc/ceph/ceph.client.ceph-iscsi.keyring', ANY)]) + self.assertTrue(self.harness.charm.state.is_started) + rel_data = self.harness.get_relation_data(rel_id, 'ceph-iscsi/0') + self.assertEqual(rel_data['gateway_ready'], 'True') + + @patch('socket.gethostname') + def test_on_certificates_relation_joined(self, _gethostname): + _gethostname.return_value = 'server1' + rel_id = self.harness.add_relation('certificates', 'vault') + self.harness.begin() + self.harness.add_relation_unit( + rel_id, + 'vault/0', + {'ingress-address': '10.0.0.3'}, + ) + rel_data = self.harness.get_relation_data(rel_id, 'ceph-iscsi/0') + self.assertEqual( + rel_data['application_cert_requests'], + '{"server1": {"sans": ["10.0.0.10", "server1"]}}') + + @patch('socket.gethostname') + def test_on_certificates_relation_changed(self, _gethostname): + _gethostname.return_value = 'server1' + self.subprocess.check_output.return_value = b'pubkey' + rel_id = self.harness.add_relation('certificates', 'vault') + self.add_cluster_relation() + self.harness.begin() + with patch('builtins.open', unittest.mock.mock_open()) as _open: + self.harness.add_relation_unit( + rel_id, + 'vault/0', + remote_unit_data={ + 'ceph-iscsi_0.processed_application_requests': + '{"app_data": {"cert": "appcert", "key": "appkey"}}', + 'ca': 'ca'}) + expect_calls = [ + call('/etc/ceph/iscsi-gateway.crt', 'w'), + call('/etc/ceph/iscsi-gateway.key', 'w'), + call('/etc/ceph/iscsi-gateway.pem', 'w'), + call('/usr/local/share/ca-certificates/vault_ca_cert.crt', 'w')] + for open_call in expect_calls: + self.assertIn(open_call, _open.call_args_list) + handle = _open() + handle.write.assert_has_calls([ + call('appcert'), + call('appkey'), + call('appcert\nappkey'), + call('ca'), + call('pubkey')]) + self.subprocess.check_call.assert_called_once_with( + ['update-ca-certificates']) + self.subprocess.check_output.assert_called_once_with( + ['openssl', 'x509', '-inform', 'pem', '-in', + '/etc/ceph/iscsi-gateway.pem', '-pubkey', '-noout']) + self.assertTrue(self.harness.charm.state.enable_tls) + + def test_custom_status_check(self): + self.harness.add_relation('ceph-client', 'ceph-mon') + self.harness.add_relation('cluster', 'ceph-iscsi') + self.harness.begin() + self.harness.charm.on.update_status.emit() + self.assertEqual( + self.harness.charm.unit.status.message, + '1 is an invalid unit count') + self.assertIsInstance( + self.harness.charm.unit.status, + BlockedStatus) From 947feb4e768dde7a09fe3bbdb890df8b47d03c3e Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 29 Apr 2020 05:43:29 +0000 Subject: [PATCH 1936/2699] Unit test fixes * Fixed typo in test_ceph_iscsi_charm.py file name * Replaced artifical emit in test_on_has_peers with by adding relation data later in the test. * Updated calls to `add_relation_unit` as the `remote_unit_data` arg has been removed. --- ceph-iscsi/mod/operator | 2 +- ...scsi_charm.py => test_ceph_iscsi_charm.py} | 37 ++++++++++++++----- .../test_interface_ceph_iscsi_peer.py | 36 ++++++++++++------ 3 files changed, 53 insertions(+), 22 deletions(-) rename ceph-iscsi/unit_tests/{test_cepch_iscsi_charm.py => test_ceph_iscsi_charm.py} (92%) diff --git a/ceph-iscsi/mod/operator b/ceph-iscsi/mod/operator index 04cb3479..60c43f81 160000 --- a/ceph-iscsi/mod/operator +++ b/ceph-iscsi/mod/operator @@ -1 +1 @@ -Subproject commit 04cb347938b87776638fdeaa48f6c5c0f115346d +Subproject commit 60c43f81e36139ab4044c185247eb27fe389bce6 diff --git a/ceph-iscsi/unit_tests/test_cepch_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py similarity index 92% rename from ceph-iscsi/unit_tests/test_cepch_iscsi_charm.py rename to ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index 8cc2bad5..b3352d01 100644 --- a/ceph-iscsi/unit_tests/test_cepch_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -109,14 +109,16 @@ def test_init(self): def add_cluster_relation(self): rel_id = self.harness.add_relation('cluster', 'ceph-iscsi') self.harness.add_relation_unit( + rel_id, + 'ceph-iscsi/1') + self.harness.update_relation_data( rel_id, 'ceph-iscsi/1', { 'ingress-address': '10.0.0.2', 'gateway_ready': 'True', 'gateway_fqdn': 'ceph-iscsi-1.example' - } - ) + }) return rel_id @patch('socket.getfqdn') @@ -165,13 +167,23 @@ def test_on_create_target_action(self, _getfqdn): @patch.object(charm.secrets, 'choice') def test_on_has_peers(self, _choice): + rel_id = self.harness.add_relation('cluster', 'ceph-iscsi') _choice.return_value = 'r' - self.add_cluster_relation() self.harness.begin() + self.harness.add_relation_unit( + rel_id, + 'ceph-iscsi/1') self.assertIsNone( self.harness.charm.peers.admin_password) self.harness.set_leader() - self.harness.charm.peers.on.has_peers.emit() + self.harness.update_relation_data( + rel_id, + 'ceph-iscsi/1', + { + 'ingress-address': '10.0.0.2', + 'gateway_ready': 'True', + 'gateway_fqdn': 'ceph-iscsi-1.example' + }) self.assertEqual( self.harness.charm.peers.admin_password, 'rrrrrrrr') @@ -204,10 +216,12 @@ def test_on_ceph_client_relation_joined(self): key_values={'rbd-metadata-pool': 'iscsi-pool'}) self.harness.begin() self.harness.add_relation_unit( + rel_id, + 'ceph-mon/0') + self.harness.update_relation_data( rel_id, 'ceph-mon/0', - {'ingress-address': '10.0.0.3'}, - ) + {'ingress-address': '10.0.0.3'}) rel_data = self.harness.get_relation_data(rel_id, 'ceph-iscsi/0') req_osd_settings = json.loads(rel_data['osd-settings']) self.assertEqual( @@ -263,10 +277,12 @@ def test_on_certificates_relation_joined(self, _gethostname): rel_id = self.harness.add_relation('certificates', 'vault') self.harness.begin() self.harness.add_relation_unit( + rel_id, + 'vault/0') + self.harness.update_relation_data( rel_id, 'vault/0', - {'ingress-address': '10.0.0.3'}, - ) + {'ingress-address': '10.0.0.3'}) rel_data = self.harness.get_relation_data(rel_id, 'ceph-iscsi/0') self.assertEqual( rel_data['application_cert_requests'], @@ -281,9 +297,12 @@ def test_on_certificates_relation_changed(self, _gethostname): self.harness.begin() with patch('builtins.open', unittest.mock.mock_open()) as _open: self.harness.add_relation_unit( + rel_id, + 'vault/0') + self.harness.update_relation_data( rel_id, 'vault/0', - remote_unit_data={ + { 'ceph-iscsi_0.processed_application_requests': '{"app_data": {"cert": "appcert", "key": "appkey"}}', 'ca': 'ca'}) diff --git a/ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py b/ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py index 12f27704..f57b8b4c 100644 --- a/ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py +++ b/ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py @@ -55,14 +55,16 @@ def on_ready_peers(self, event): receiver) relation_id = self.harness.add_relation('cluster', 'ceph-iscsi') self.harness.add_relation_unit( + relation_id, + 'ceph-iscsi/1') + self.harness.update_relation_data( relation_id, 'ceph-iscsi/1', { 'ingress-address': '192.0.2.2', 'gateway_ready': 'True', 'gateway_fqdn': 'ceph-iscsi-1.example' - } - ) + }) self.assertEqual(len(receiver.observed_events), 1) self.assertIsInstance(receiver.observed_events[0], ReadyPeersEvent) @@ -106,28 +108,34 @@ def test_ready_peer_details(self, _getfqdn, _cluster_bind_address): relation_id = self.harness.add_relation('cluster', 'ceph-iscsi') self.harness.add_relation_unit( + relation_id, + 'ceph-iscsi/1') + self.harness.update_relation_data( relation_id, 'ceph-iscsi/1', { 'ingress-address': '192.0.2.2', 'gateway_ready': 'True', 'gateway_fqdn': 'ceph-iscsi-1.example' - } - ) + }) self.harness.add_relation_unit( + relation_id, + 'ceph-iscsi/2') + self.harness.update_relation_data( relation_id, 'ceph-iscsi/2', { 'ingress-address': '192.0.2.3', 'gateway_ready': 'True', 'gateway_fqdn': 'ceph-iscsi-2.example', - } - ) + }) self.harness.add_relation_unit( + relation_id, + 'ceph-iscsi/3') + self.harness.update_relation_data( relation_id, 'ceph-iscsi/3', - {'ingress-address': '192.0.2.4'} - ) + {'ingress-address': '192.0.2.4'}) self.peers.ready_peer_details @@ -143,23 +151,27 @@ def test_ready_peer_addresses(self, _cluster_bind_address): relation_id = self.harness.add_relation('cluster', 'ceph-iscsi') self.harness.add_relation_unit( + relation_id, + 'ceph-iscsi/1') + self.harness.update_relation_data( relation_id, 'ceph-iscsi/1', { 'ingress-address': '192.0.2.2', 'gateway_ready': 'True', 'gateway_fqdn': 'ceph-iscsi-1.example' - } - ) + }) self.harness.add_relation_unit( + relation_id, + 'ceph-iscsi/2') + self.harness.update_relation_data( relation_id, 'ceph-iscsi/2', { 'ingress-address': '192.0.2.3', 'gateway_ready': 'True', 'gateway_fqdn': 'ceph-iscsi-2.example', - } - ) + }) self.assertEqual(['192.0.2.1', '192.0.2.2', '192.0.2.3'], self.peers.peer_addresses) From d3625e34b2492a42c065e57bc1f43c8f3701b8e5 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 28 Apr 2020 15:27:37 +0000 Subject: [PATCH 1937/2699] Support charm-helpers from submodule --- ceph-iscsi/.gitignore | 4 ++-- ceph-iscsi/.gitmodules | 3 +++ ceph-iscsi/build-requirements.txt | 2 ++ ceph-iscsi/charm-prep.sh | 1 - ceph-iscsi/lib/README.txt | 1 + ceph-iscsi/mod/charm-helpers | 1 + ceph-iscsi/tox.ini | 40 +++++++++++++++++++++++-------- 7 files changed, 39 insertions(+), 13 deletions(-) create mode 100644 ceph-iscsi/build-requirements.txt create mode 100644 ceph-iscsi/lib/README.txt create mode 160000 ceph-iscsi/mod/charm-helpers diff --git a/ceph-iscsi/.gitignore b/ceph-iscsi/.gitignore index 1ec16378..18cac16c 100644 --- a/ceph-iscsi/.gitignore +++ b/ceph-iscsi/.gitignore @@ -1,6 +1,6 @@ -lib .tox -test-decrpyt.py .swp __pycache__ .stestr/ +lib/* +!lib/README.txt diff --git a/ceph-iscsi/.gitmodules b/ceph-iscsi/.gitmodules index bb016b77..cc44e0e7 100644 --- a/ceph-iscsi/.gitmodules +++ b/ceph-iscsi/.gitmodules @@ -7,3 +7,6 @@ [submodule "mod/ops-openstack"] path = mod/ops-openstack url = https://github.com/gnuoy/ops-openstack.git +[submodule "mod/charm-helpers"] + path = mod/charm-helpers + url = https://github.com/juju/charm-helpers.git diff --git a/ceph-iscsi/build-requirements.txt b/ceph-iscsi/build-requirements.txt new file mode 100644 index 00000000..81ba4155 --- /dev/null +++ b/ceph-iscsi/build-requirements.txt @@ -0,0 +1,2 @@ +mod/charm-helpers +#file:///home/ubuntu/branches/ceph-iscsi/mod/charm-helpers diff --git a/ceph-iscsi/charm-prep.sh b/ceph-iscsi/charm-prep.sh index d41ad095..0f29b419 100755 --- a/ceph-iscsi/charm-prep.sh +++ b/ceph-iscsi/charm-prep.sh @@ -1,7 +1,6 @@ #!/bin/bash rm -rf lib/* -rm adapters.py interface_ceph_client.py ops ops_openstack.py pip install -t lib/ git+https://github.com/juju/charm-helpers.git diff --git a/ceph-iscsi/lib/README.txt b/ceph-iscsi/lib/README.txt new file mode 100644 index 00000000..7931d0d4 --- /dev/null +++ b/ceph-iscsi/lib/README.txt @@ -0,0 +1 @@ +Only generated files should be in here diff --git a/ceph-iscsi/mod/charm-helpers b/ceph-iscsi/mod/charm-helpers new file mode 160000 index 00000000..f3f36f85 --- /dev/null +++ b/ceph-iscsi/mod/charm-helpers @@ -0,0 +1 @@ +Subproject commit f3f36f85f54380a651ba05972e78467ad22468e3 diff --git a/ceph-iscsi/tox.ini b/ceph-iscsi/tox.ini index 7e543b85..4c3de17f 100644 --- a/ceph-iscsi/tox.ini +++ b/ceph-iscsi/tox.ini @@ -1,12 +1,5 @@ -# Classic charm (with zaza): ./tox.ini -# This file is managed centrally by release-tools and should not be modified -# within individual charm repos. See the 'global' dir contents for available -# choices of tox.ini for OpenStack Charms: -# https://github.com/openstack-charmers/release-tools -# -# TODO: Distill the func test requirements from the lint/unit test -# requirements. They are intertwined. Also, Zaza itself should specify -# all of its own requirements and if it doesn't, fix it there. +# Operator charm (with zaza): tox.ini + [tox] envlist = pep8,py3 skipsdist = True @@ -22,7 +15,9 @@ setenv = VIRTUAL_ENV={envdir} install_command = pip install {opts} {packages} commands = stestr run --slowest {posargs} -whitelist_externals = juju +whitelist_externals = + git + ln passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt @@ -84,6 +79,31 @@ omit = basepython = python3 commands = {posargs} +[testenv:build] +basepython = python3 +deps = +commands = + git submodule update --init + pip install -t lib -r build-requirements.txt + ln -f -t lib -s ../mod/operator/ops + ln -f -t lib -s ../mod/interface-ceph-client/interface_ceph_client.py + ln -f -t lib -s ../mod/ops-openstack/ops_openstack.py + ln -f -t lib -s ../mod/ops-openstack/adapters.py + +[testenv:update-deps] +basepython = python3 +deps = +commands = + git submodule update --init + git -C mod/operator pull origin master + git -C mod/ops-openstack pull origin master + git -C mod/charm-helpers pull origin master + pip install -t lib -r build-requirements.txt --upgrade + ln -f -t lib -s ../mod/operator/ops + ln -f -t lib -s ../mod/interface-ceph-client/interface_ceph_client.py + ln -f -t lib -s ../mod/ops-openstack/ops_openstack.py + ln -f -t lib -s ../mod/ops-openstack/adapters.py + [testenv:func-noop] basepython = python3 commands = From 18ecb8d7a962b1d826afd3a1d811c9a959bbf686 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 29 Apr 2020 09:26:34 +0000 Subject: [PATCH 1938/2699] Add tox targets for managing dependencies --- ceph-iscsi/README.md | 8 ++++++++ ceph-iscsi/build-requirements.txt | 1 - ceph-iscsi/charm-init.sh | 24 ++++++++++++++++++++++++ ceph-iscsi/charm-prep.sh | 15 --------------- ceph-iscsi/mod/operator | 2 +- ceph-iscsi/tox.ini | 18 +++--------------- 6 files changed, 36 insertions(+), 32 deletions(-) create mode 100755 ceph-iscsi/charm-init.sh delete mode 100755 ceph-iscsi/charm-prep.sh diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index 62c03c4f..3b6730fe 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -219,6 +219,14 @@ Alternatively, configuration can be provided as part of a bundle: > **Warning**: As of the time of writing the workaround to set the CHAP credentials via the esx cli is still needed. +## Development + +The charm needs to pull in its dependencies before it can be deployed. To +pull in the dependency versions that correspond to this version of the +charm then run the `build` tox target. + +To update all dependencies to their latest versions then run the `update-deps` +tox target. diff --git a/ceph-iscsi/build-requirements.txt b/ceph-iscsi/build-requirements.txt index 81ba4155..1b770a72 100644 --- a/ceph-iscsi/build-requirements.txt +++ b/ceph-iscsi/build-requirements.txt @@ -1,2 +1 @@ mod/charm-helpers -#file:///home/ubuntu/branches/ceph-iscsi/mod/charm-helpers diff --git a/ceph-iscsi/charm-init.sh b/ceph-iscsi/charm-init.sh new file mode 100755 index 00000000..64372359 --- /dev/null +++ b/ceph-iscsi/charm-init.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +UPDATE="" +while getopts ":u" opt; do + case $opt in + u) UPDATE=true;; + esac +done + +git submodule update --init + +if [[ -z "$UPDATE" ]]; then + pip install -t lib -r build-requirements.txt +else + git -C mod/operator pull origin master + git -C mod/ops-openstack pull origin master + git -C mod/charm-helpers pull origin master + pip install -t lib -r build-requirements.txt --upgrade +fi + +ln -f -t lib -s ../mod/operator/ops +ln -f -t lib -s ../mod/interface-ceph-client/interface_ceph_client.py +ln -f -t lib -s ../mod/ops-openstack/ops_openstack.py +ln -f -t lib -s ../mod/ops-openstack/adapters.py diff --git a/ceph-iscsi/charm-prep.sh b/ceph-iscsi/charm-prep.sh deleted file mode 100755 index 0f29b419..00000000 --- a/ceph-iscsi/charm-prep.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -rm -rf lib/* - -pip install -t lib/ git+https://github.com/juju/charm-helpers.git - -git submodule init -git submodule update -(cd lib; ln -s ../mod/operator/ops;) -(cd lib; ln -s ../mod/interface-ceph-client/interface_ceph_client.py;) -(cd lib; ln -s ../mod/ops-openstack/ops_openstack.py) -(cd lib; ln -s ../mod/ops-openstack/adapters.py) -(cd mod/interface-ceph-client; git pull origin master) -(cd mod/operator; git pull origin master) -(cd mod/ops-openstack; git pull origin master) diff --git a/ceph-iscsi/mod/operator b/ceph-iscsi/mod/operator index 60c43f81..58ff602b 160000 --- a/ceph-iscsi/mod/operator +++ b/ceph-iscsi/mod/operator @@ -1 +1 @@ -Subproject commit 60c43f81e36139ab4044c185247eb27fe389bce6 +Subproject commit 58ff602b29763172c39502de0fa64de965cfbea7 diff --git a/ceph-iscsi/tox.ini b/ceph-iscsi/tox.ini index 4c3de17f..cf9f42d3 100644 --- a/ceph-iscsi/tox.ini +++ b/ceph-iscsi/tox.ini @@ -18,6 +18,7 @@ commands = stestr run --slowest {posargs} whitelist_externals = git ln + charm-init.sh passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt @@ -83,26 +84,13 @@ commands = {posargs} basepython = python3 deps = commands = - git submodule update --init - pip install -t lib -r build-requirements.txt - ln -f -t lib -s ../mod/operator/ops - ln -f -t lib -s ../mod/interface-ceph-client/interface_ceph_client.py - ln -f -t lib -s ../mod/ops-openstack/ops_openstack.py - ln -f -t lib -s ../mod/ops-openstack/adapters.py + ./charm-init.sh [testenv:update-deps] basepython = python3 deps = commands = - git submodule update --init - git -C mod/operator pull origin master - git -C mod/ops-openstack pull origin master - git -C mod/charm-helpers pull origin master - pip install -t lib -r build-requirements.txt --upgrade - ln -f -t lib -s ../mod/operator/ops - ln -f -t lib -s ../mod/interface-ceph-client/interface_ceph_client.py - ln -f -t lib -s ../mod/ops-openstack/ops_openstack.py - ln -f -t lib -s ../mod/ops-openstack/adapters.py + ./charm-init.sh -u [testenv:func-noop] basepython = python3 From 7b9a57444a9e83d456846da9320133f7df7a7283 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Thu, 26 Mar 2020 13:53:47 +0000 Subject: [PATCH 1939/2699] Enable focal and ussuri as part of the gate tests Add bionic-ussuri and focal-ussuri (with mysql8 support) bundles. Change-Id: I525b13cf92644dc9d0a4c5290e7a951e3d1a653a --- ceph-mon/metadata.yaml | 1 + ceph-mon/tests/bundles/bionic-queens.yaml | 2 +- ceph-mon/tests/bundles/bionic-rocky.yaml | 2 +- ceph-mon/tests/bundles/bionic-stein.yaml | 2 +- .../tests/bundles/bionic-train-with-fsid.yaml | 2 +- ceph-mon/tests/bundles/bionic-train.yaml | 4 +- ceph-mon/tests/bundles/bionic-ussuri.yaml | 119 +++++++++ ceph-mon/tests/bundles/focal-ussuri.yaml | 230 ++++++++++++++++++ ceph-mon/tests/bundles/trusty-mitaka.yaml | 2 +- ceph-mon/tests/bundles/xenial-mitaka.yaml | 2 +- ceph-mon/tests/bundles/xenial-ocata.yaml | 2 +- ceph-mon/tests/bundles/xenial-pike.yaml | 2 +- ceph-mon/tests/bundles/xenial-queens.yaml | 2 +- ceph-mon/tests/tests.yaml | 7 +- 14 files changed, 367 insertions(+), 12 deletions(-) create mode 100644 ceph-mon/tests/bundles/bionic-ussuri.yaml create mode 100644 ceph-mon/tests/bundles/focal-ussuri.yaml diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 08ff8ce8..62f824c2 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -13,6 +13,7 @@ series: - xenial - bionic - eoan + - focal - trusty peers: mon: diff --git a/ceph-mon/tests/bundles/bionic-queens.yaml b/ceph-mon/tests/bundles/bionic-queens.yaml index 1c543531..d6115de4 100644 --- a/ceph-mon/tests/bundles/bionic-queens.yaml +++ b/ceph-mon/tests/bundles/bionic-queens.yaml @@ -8,7 +8,7 @@ applications: options: osd-devices: '/srv/ceph /dev/test-non-existent' ceph-mon: - charm: ceph-mon + charm: ../../../ceph-mon series: bionic num_units: 3 options: diff --git a/ceph-mon/tests/bundles/bionic-rocky.yaml b/ceph-mon/tests/bundles/bionic-rocky.yaml index b4afdeeb..88a47e10 100644 --- a/ceph-mon/tests/bundles/bionic-rocky.yaml +++ b/ceph-mon/tests/bundles/bionic-rocky.yaml @@ -9,7 +9,7 @@ applications: osd-devices: '/srv/ceph /dev/test-non-existent' source: cloud:bionic-rocky ceph-mon: - charm: ceph-mon + charm: ../../../ceph-mon series: bionic num_units: 3 options: diff --git a/ceph-mon/tests/bundles/bionic-stein.yaml b/ceph-mon/tests/bundles/bionic-stein.yaml index 86185f5c..2778b254 100644 --- a/ceph-mon/tests/bundles/bionic-stein.yaml +++ b/ceph-mon/tests/bundles/bionic-stein.yaml @@ -9,7 +9,7 @@ applications: osd-devices: '/srv/ceph /dev/test-non-existent' source: cloud:bionic-stein ceph-mon: - charm: ceph-mon + charm: ../../../ceph-mon series: bionic num_units: 3 options: diff --git a/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml b/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml index 418064e2..40ce0925 100644 --- a/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml +++ b/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml @@ -9,7 +9,7 @@ applications: osd-devices: '/dev/test-non-existent' source: cloud:bionic-train/proposed ceph-mon: - charm: ceph-mon + charm: ../../../ceph-mon series: bionic num_units: 3 options: diff --git a/ceph-mon/tests/bundles/bionic-train.yaml b/ceph-mon/tests/bundles/bionic-train.yaml index de4a3c85..de35e827 100644 --- a/ceph-mon/tests/bundles/bionic-train.yaml +++ b/ceph-mon/tests/bundles/bionic-train.yaml @@ -1,7 +1,7 @@ series: bionic applications: ceph-osd: - charm: ceph-osd + charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 series: bionic storage: @@ -10,7 +10,7 @@ applications: osd-devices: '/dev/test-non-existent' source: cloud:bionic-train ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ../../../ceph-mon num_units: 3 options: monitor-count: '3' diff --git a/ceph-mon/tests/bundles/bionic-ussuri.yaml b/ceph-mon/tests/bundles/bionic-ussuri.yaml new file mode 100644 index 00000000..62d50d12 --- /dev/null +++ b/ceph-mon/tests/bundles/bionic-ussuri.yaml @@ -0,0 +1,119 @@ +series: bionic +applications: + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + series: bionic + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: cloud:bionic-ussuri + ceph-mon: + charm: ../../../ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: cloud:bionic-ussuri + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:bionic-ussuri + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-ussuri + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: cloud:bionic-ussuri + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + prometheus2: + charm: cs:prometheus2 + num_units: 1 +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - nova-compute:image-service + - glance:image-service +- - nova-compute:ceph + - ceph-mon:client +- - keystone:shared-db + - percona-cluster:shared-db +- - glance:shared-db + - percona-cluster:shared-db +- - glance:identity-service + - keystone:identity-service +- - glance:amqp + - rabbitmq-server:amqp +- - glance:ceph + - ceph-mon:client +- - cinder:shared-db + - percona-cluster:shared-db +- - cinder:identity-service + - keystone:identity-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:image-service + - glance:image-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - cinder-ceph:ceph + - ceph-mon:client +- - ceph-osd:mon + - ceph-mon:osd +- - nova-cloud-controller:shared-db + - percona-cluster:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:image-service + - glance:image-service +- - placement + - percona-cluster +- - placement + - keystone +- - placement + - nova-cloud-controller +- - ceph-mon:prometheus + - prometheus2:target diff --git a/ceph-mon/tests/bundles/focal-ussuri.yaml b/ceph-mon/tests/bundles/focal-ussuri.yaml new file mode 100644 index 00000000..4e0582ef --- /dev/null +++ b/ceph-mon/tests/bundles/focal-ussuri.yaml @@ -0,0 +1,230 @@ +variables: + openstack-origin: &openstack-origin distro + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *openstack-origin + osd-devices: '/dev/test-non-existent' + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: ../../../ceph-mon + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: *openstack-origin + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + prometheus2: + charm: cs:prometheus2 + num_units: 1 + to: + - '16' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'ceph-mon:prometheus' + - 'prometheus2:target' diff --git a/ceph-mon/tests/bundles/trusty-mitaka.yaml b/ceph-mon/tests/bundles/trusty-mitaka.yaml index 0fc7c3e0..2e25f9ae 100644 --- a/ceph-mon/tests/bundles/trusty-mitaka.yaml +++ b/ceph-mon/tests/bundles/trusty-mitaka.yaml @@ -13,7 +13,7 @@ applications: constraints: virt-type=kvm ceph-mon: - charm: ceph-mon + charm: ../../../ceph-mon series: trusty num_units: 3 options: diff --git a/ceph-mon/tests/bundles/xenial-mitaka.yaml b/ceph-mon/tests/bundles/xenial-mitaka.yaml index c7a987bc..ae5967d1 100644 --- a/ceph-mon/tests/bundles/xenial-mitaka.yaml +++ b/ceph-mon/tests/bundles/xenial-mitaka.yaml @@ -8,7 +8,7 @@ applications: options: osd-devices: '/srv/ceph /dev/test-non-existent' ceph-mon: - charm: ceph-mon + charm: ../../../ceph-mon series: xenial num_units: 3 options: diff --git a/ceph-mon/tests/bundles/xenial-ocata.yaml b/ceph-mon/tests/bundles/xenial-ocata.yaml index 12d17dd6..8f02a5be 100644 --- a/ceph-mon/tests/bundles/xenial-ocata.yaml +++ b/ceph-mon/tests/bundles/xenial-ocata.yaml @@ -9,7 +9,7 @@ applications: osd-devices: '/srv/ceph /dev/test-non-existent' source: cloud:xenial-ocata ceph-mon: - charm: ceph-mon + charm: ../../../ceph-mon series: xenial num_units: 3 options: diff --git a/ceph-mon/tests/bundles/xenial-pike.yaml b/ceph-mon/tests/bundles/xenial-pike.yaml index bdefea27..63dc3193 100644 --- a/ceph-mon/tests/bundles/xenial-pike.yaml +++ b/ceph-mon/tests/bundles/xenial-pike.yaml @@ -9,7 +9,7 @@ applications: osd-devices: '/srv/ceph /dev/test-non-existent' source: cloud:xenial-pike ceph-mon: - charm: ceph-mon + charm: ../../../ceph-mon series: xenial num_units: 3 options: diff --git a/ceph-mon/tests/bundles/xenial-queens.yaml b/ceph-mon/tests/bundles/xenial-queens.yaml index 1e5789ba..ac31a915 100644 --- a/ceph-mon/tests/bundles/xenial-queens.yaml +++ b/ceph-mon/tests/bundles/xenial-queens.yaml @@ -9,7 +9,7 @@ applications: osd-devices: '/srv/ceph /dev/test-non-existent' source: cloud:xenial-queens ceph-mon: - charm: ceph-mon + charm: ../../../ceph-mon series: xenial num_units: 3 options: diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index aea60ae0..d92d4fcc 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,5 +1,7 @@ charm_name: ceph-mon gate_bundles: + - focal-ussuri + - bionic-ussuri - bionic-train - bionic-train-with-fsid - bionic-stein @@ -20,4 +22,7 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CephRelationTest - zaza.openstack.charm_tests.ceph.tests.CephTest - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest \ No newline at end of file + - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest +tests_options: + force_deploy: + - focal-ussuri From 0471b8b3bfe93a9b98491777fbfaec152d9ce29e Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 29 Apr 2020 18:58:22 +0100 Subject: [PATCH 1940/2699] Fix up the test-paths to be more obvious There's "magic" in the zaza package that converts charm names to paths for the charm under test. However, it is poorly understood, and most charms use the relative path. This patchset does that. Overlays complicate that picture, but this charm doesn't use overlays. Change-Id: Ic558c952359a5e5f60125452433d7b0588d0a1c4 --- ceph-osd/tests/bundles/bionic-queens.yaml | 2 +- ceph-osd/tests/bundles/bionic-rocky.yaml | 2 +- ceph-osd/tests/bundles/bionic-stein.yaml | 2 +- ceph-osd/tests/bundles/bionic-train.yaml | 2 +- ceph-osd/tests/bundles/bionic-ussuri.yaml | 2 +- ceph-osd/tests/bundles/eoan-train.yaml | 2 +- ceph-osd/tests/bundles/focal-ussuri.yaml | 2 +- ceph-osd/tests/bundles/trusty-mitaka.yaml | 2 +- ceph-osd/tests/bundles/xenial-mitaka.yaml | 2 +- ceph-osd/tests/bundles/xenial-ocata.yaml | 2 +- ceph-osd/tests/bundles/xenial-pike.yaml | 2 +- ceph-osd/tests/bundles/xenial-queens.yaml | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/ceph-osd/tests/bundles/bionic-queens.yaml b/ceph-osd/tests/bundles/bionic-queens.yaml index 05be7b11..74c8cc3d 100644 --- a/ceph-osd/tests/bundles/bionic-queens.yaml +++ b/ceph-osd/tests/bundles/bionic-queens.yaml @@ -1,7 +1,7 @@ series: bionic applications: ceph-osd: - charm: ceph-osd + charm: ../../../ceph-osd series: bionic num_units: 3 storage: diff --git a/ceph-osd/tests/bundles/bionic-rocky.yaml b/ceph-osd/tests/bundles/bionic-rocky.yaml index f1385175..8c1a02ca 100644 --- a/ceph-osd/tests/bundles/bionic-rocky.yaml +++ b/ceph-osd/tests/bundles/bionic-rocky.yaml @@ -1,7 +1,7 @@ series: bionic applications: ceph-osd: - charm: ceph-osd + charm: ../../../ceph-osd num_units: 3 series: bionic storage: diff --git a/ceph-osd/tests/bundles/bionic-stein.yaml b/ceph-osd/tests/bundles/bionic-stein.yaml index bf822df5..a226ba05 100644 --- a/ceph-osd/tests/bundles/bionic-stein.yaml +++ b/ceph-osd/tests/bundles/bionic-stein.yaml @@ -1,7 +1,7 @@ series: bionic applications: ceph-osd: - charm: ceph-osd + charm: ../../../ceph-osd num_units: 3 series: bionic storage: diff --git a/ceph-osd/tests/bundles/bionic-train.yaml b/ceph-osd/tests/bundles/bionic-train.yaml index 1b270900..34636498 100644 --- a/ceph-osd/tests/bundles/bionic-train.yaml +++ b/ceph-osd/tests/bundles/bionic-train.yaml @@ -1,7 +1,7 @@ series: bionic applications: ceph-osd: - charm: ceph-osd + charm: ../../../ceph-osd num_units: 3 series: bionic storage: diff --git a/ceph-osd/tests/bundles/bionic-ussuri.yaml b/ceph-osd/tests/bundles/bionic-ussuri.yaml index b82bc2e5..647de631 100644 --- a/ceph-osd/tests/bundles/bionic-ussuri.yaml +++ b/ceph-osd/tests/bundles/bionic-ussuri.yaml @@ -1,7 +1,7 @@ series: bionic applications: ceph-osd: - charm: ceph-osd + charm: ../../../ceph-osd num_units: 3 series: bionic storage: diff --git a/ceph-osd/tests/bundles/eoan-train.yaml b/ceph-osd/tests/bundles/eoan-train.yaml index 8539256c..4aab9457 100644 --- a/ceph-osd/tests/bundles/eoan-train.yaml +++ b/ceph-osd/tests/bundles/eoan-train.yaml @@ -1,7 +1,7 @@ series: eoan applications: ceph-osd: - charm: ceph-osd + charm: ../../../ceph-osd num_units: 3 series: eoan storage: diff --git a/ceph-osd/tests/bundles/focal-ussuri.yaml b/ceph-osd/tests/bundles/focal-ussuri.yaml index 78e4e7de..ace547bf 100644 --- a/ceph-osd/tests/bundles/focal-ussuri.yaml +++ b/ceph-osd/tests/bundles/focal-ussuri.yaml @@ -50,7 +50,7 @@ applications: - '2' ceph-osd: - charm: ceph-osd + charm: ../../../ceph-osd num_units: 3 storage: osd-devices: 'cinder,10G' diff --git a/ceph-osd/tests/bundles/trusty-mitaka.yaml b/ceph-osd/tests/bundles/trusty-mitaka.yaml index 8e4b598f..f2d53bec 100644 --- a/ceph-osd/tests/bundles/trusty-mitaka.yaml +++ b/ceph-osd/tests/bundles/trusty-mitaka.yaml @@ -1,7 +1,7 @@ series: trusty applications: ceph-osd: - charm: ceph-osd + charm: ../../../ceph-osd num_units: 3 series: trusty storage: diff --git a/ceph-osd/tests/bundles/xenial-mitaka.yaml b/ceph-osd/tests/bundles/xenial-mitaka.yaml index 37760454..845fb18b 100644 --- a/ceph-osd/tests/bundles/xenial-mitaka.yaml +++ b/ceph-osd/tests/bundles/xenial-mitaka.yaml @@ -1,7 +1,7 @@ series: xenial applications: ceph-osd: - charm: ceph-osd + charm: ../../../ceph-osd num_units: 3 series: xenial storage: diff --git a/ceph-osd/tests/bundles/xenial-ocata.yaml b/ceph-osd/tests/bundles/xenial-ocata.yaml index 90676783..0985a425 100644 --- a/ceph-osd/tests/bundles/xenial-ocata.yaml +++ b/ceph-osd/tests/bundles/xenial-ocata.yaml @@ -1,7 +1,7 @@ series: xenial applications: ceph-osd: - charm: ceph-osd + charm: ../../../ceph-osd num_units: 3 series: xenial storage: diff --git a/ceph-osd/tests/bundles/xenial-pike.yaml b/ceph-osd/tests/bundles/xenial-pike.yaml index 7a5b49cc..0c089bb1 100644 --- a/ceph-osd/tests/bundles/xenial-pike.yaml +++ b/ceph-osd/tests/bundles/xenial-pike.yaml @@ -1,7 +1,7 @@ series: xenial applications: ceph-osd: - charm: ceph-osd + charm: ../../../ceph-osd num_units: 3 series: xenial storage: diff --git a/ceph-osd/tests/bundles/xenial-queens.yaml b/ceph-osd/tests/bundles/xenial-queens.yaml index dcc63ced..ba4f5b72 100644 --- a/ceph-osd/tests/bundles/xenial-queens.yaml +++ b/ceph-osd/tests/bundles/xenial-queens.yaml @@ -1,7 +1,7 @@ series: xenial applications: ceph-osd: - charm: ceph-osd + charm: ../../../ceph-osd num_units: 3 series: xenial storage: From 255bc2b64cf649c84086360a3ccc321e9615c2ef Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 30 Apr 2020 15:54:59 +0000 Subject: [PATCH 1941/2699] Update readme --- ceph-iscsi/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index 3b6730fe..d424abab 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -29,8 +29,8 @@ A sample `bundle.yaml` file's contents: charm: cs:ceph-iscsi num_units: 2 to: - - lxd:0 - - lxd:1 + - '0' + - '1' ceph-osd: charm: cs:ceph-osd num_units: 3 From df7a9e49d891829396264dc036610cf69626e649 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 30 Apr 2020 16:34:45 +0000 Subject: [PATCH 1942/2699] Drop to earlier framework version due to circular import error --- ceph-iscsi/mod/operator | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-iscsi/mod/operator b/ceph-iscsi/mod/operator index 58ff602b..bb9b534e 160000 --- a/ceph-iscsi/mod/operator +++ b/ceph-iscsi/mod/operator @@ -1 +1 @@ -Subproject commit 58ff602b29763172c39502de0fa64de965cfbea7 +Subproject commit bb9b534e68b04286eef78a36ed930815ffb1968d From d191d85ab47c64c048be511f9468488ba67f75d5 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 1 May 2020 07:26:47 +0000 Subject: [PATCH 1943/2699] General fixes * Specify minimum Juju version * Stop cluster count block being overwritted on setup * Fix cluster count in peer relation when relation is not ready * Move location of ops-openstack to openstack-charmers --- ceph-iscsi/.gitmodules | 2 +- ceph-iscsi/metadata.yaml | 1 + ceph-iscsi/mod/ops-openstack | 2 +- ceph-iscsi/src/charm.py | 11 +++++++++++ ceph-iscsi/src/interface_ceph_iscsi_peer.py | 7 +++++-- 5 files changed, 19 insertions(+), 4 deletions(-) diff --git a/ceph-iscsi/.gitmodules b/ceph-iscsi/.gitmodules index cc44e0e7..6386c4f9 100644 --- a/ceph-iscsi/.gitmodules +++ b/ceph-iscsi/.gitmodules @@ -6,7 +6,7 @@ url = https://github.com/gnuoy/oper-interface-ceph-client.git [submodule "mod/ops-openstack"] path = mod/ops-openstack - url = https://github.com/gnuoy/ops-openstack.git + url = https://github.com/openstack-charmers/ops-openstack.git [submodule "mod/charm-helpers"] path = mod/charm-helpers url = https://github.com/juju/charm-helpers.git diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index c809f86c..0f3c7f69 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -12,6 +12,7 @@ tags: series: - focal subordinate: false +min-juju-version: 2.7.6 extra-bindings: public: cluster: diff --git a/ceph-iscsi/mod/ops-openstack b/ceph-iscsi/mod/ops-openstack index 58c9f309..18b8f1fc 160000 --- a/ceph-iscsi/mod/ops-openstack +++ b/ceph-iscsi/mod/ops-openstack @@ -1 +1 @@ -Subproject commit 58c9f3093ecc8c1797272d7a6720d7f8ef2d39aa +Subproject commit 18b8f1fcfe7bee87217e7d3152e377c2c3e3f2ff diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 6a4343e2..bef471b3 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -131,6 +131,13 @@ def __init__(self, framework, key): self.framework.observe(self.on.config_changed, self) self.framework.observe(self.on.upgrade_charm, self) + def on_install(self, event): + if ch_host.is_container(): + logging.info("Installing into a container is not supported") + self.update_status() + else: + self.install_pkgs() + def on_add_trusted_ip_action(self, event): self.state.additional_trusted_ips.append( event.params['ips'].split(' ')) @@ -296,6 +303,10 @@ def on_certificates_relation_changed(self, event): self.on_pools_available(event) def custom_status_check(self): + if ch_host.is_container(): + self.unit.status = ops.model.BlockedStatus( + 'Charm cannot be deployed into a container') + return False if self.peers.unit_count not in self.ALLOWED_UNIT_COUNTS: self.unit.status = ops.model.BlockedStatus( '{} is an invalid unit count'.format(self.peers.unit_count)) diff --git a/ceph-iscsi/src/interface_ceph_iscsi_peer.py b/ceph-iscsi/src/interface_ceph_iscsi_peer.py index beeac301..20eba6d9 100644 --- a/ceph-iscsi/src/interface_ceph_iscsi_peer.py +++ b/ceph-iscsi/src/interface_ceph_iscsi_peer.py @@ -102,8 +102,11 @@ def peer_addresses(self): @property def peer_count(self): - return len(self.peer_rel.units) + if self.peer_rel: + return len(self.peer_rel.units) + else: + return 0 @property def unit_count(self): - return len(self.peer_rel.units) + 1 + return self.peer_count + 1 From ef402e8a7596fbc596089dcc2d421992b13070a0 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 1 May 2020 10:15:26 +0000 Subject: [PATCH 1944/2699] Switch to all focal func tests --- ceph-iscsi/tests/bundles/focal.yaml | 47 +++++++++++++++-------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index 2d0cac8b..cc7d12a4 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -8,16 +8,19 @@ machines: '4': '5': '6': - series: bionic '7': - series: bionic '8': + constraints: mem=3072M + '9': + constraints: mem=3072M + '10': + constraints: mem=3072M applications: ubuntu: charm: cs:ubuntu num_units: 1 to: - - '8' + - '7' ceph-iscsi: charm: ../../ num_units: 2 @@ -47,27 +50,27 @@ applications: - '4' - '5' vault: - series: bionic num_units: 1 -# charm: cs:~openstack-charmers-next/vault - charm: cs:~gnuoy/vault-29 + charm: cs:~openstack-charmers-next/vault to: - '6' - mysql: - series: bionic - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - innodb-buffer-pool-size: 256M - max-connections: 1000 + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 to: - - '7' + - '8' + - '9' + - '10' + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router relations: -- - ceph-mon:client - - ceph-iscsi:ceph-client -- - vault:certificates - - ceph-iscsi:certificates -- - ceph-osd:mon - - ceph-mon:osd -- - vault:shared-db - - mysql:shared-db + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' From b33646fa9f211142597788151a2d126eda9a6cc8 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 31 Mar 2020 21:05:45 +0100 Subject: [PATCH 1945/2699] Enable focal and ussuri testing for gate This adds bundles for: * bionic-ussuri * focal-ussuri And adds a force option of the focal-ussuri bundle as it is not yet officially supported by Juju. Change-Id: Ic0f82df6f1dc76cdba21f23e8b04ff9e12f32181 --- ceph-fs/src/metadata.yaml | 1 + ceph-fs/src/reactive/ceph_fs.py | 12 +- ceph-fs/src/tests/bundles/bionic-ussuri.yaml | 139 ++++++++++++ ceph-fs/src/tests/bundles/focal-ussuri.yaml | 219 +++++++++++++++++++ ceph-fs/src/tests/tests.yaml | 7 +- 5 files changed, 376 insertions(+), 2 deletions(-) create mode 100644 ceph-fs/src/tests/bundles/bionic-ussuri.yaml create mode 100644 ceph-fs/src/tests/bundles/focal-ussuri.yaml diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 1aeb7bcc..f3099745 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -13,6 +13,7 @@ series: - xenial - bionic - eoan + - focal subordinate: false requires: ceph-mds: diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index a4041298..9091e8ad 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -28,6 +28,8 @@ network_get_primary_address, relation_ids, status_set) from charmhelpers.core.host import ( + CompareHostReleases, + lsb_release, service_restart, service) from charmhelpers.contrib.network.ip import ( @@ -42,6 +44,7 @@ from charms.apt import queue_install, add_source PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs'] +PACKAGES_FOCAL = ['ceph', 'gdisk', 'ntp', 'btrfs-progs', 'xfsprogs'] TEMPLATES_DIR = 'templates' VERSION_PACKAGE = 'ceph-common' @@ -57,7 +60,11 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR): @when_not('apt.installed.ceph') def install_ceph_base(): add_source(config('source'), key=config('key')) - queue_install(PACKAGES) + release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(release) >= 'focal': + queue_install(PACKAGES_FOCAL) + else: + queue_install(PACKAGES) @when_not('apt.installed.ceph-mds') @@ -281,4 +288,7 @@ def pre_series_upgrade(): def post_series_upgrade(): """Handler for post-series-upgrade. """ + release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(release) >= 'focal': + queue_install(PACKAGES_FOCAL) unitdata.kv().set('charm.vault.series-upgrading', False) diff --git a/ceph-fs/src/tests/bundles/bionic-ussuri.yaml b/ceph-fs/src/tests/bundles/bionic-ussuri.yaml new file mode 100644 index 00000000..5eada6c7 --- /dev/null +++ b/ceph-fs/src/tests/bundles/bionic-ussuri.yaml @@ -0,0 +1,139 @@ +series: bionic +applications: + ceph-fs: + charm: ceph-fs + num_units: 1 + options: + source: cloud:bionic-ussuri + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: cloud:bionic-ussuri + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: cloud:bionic-ussuri + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:bionic-ussuri + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-ussuri + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + network-manager: Neutron + openstack-origin: cloud:bionic-ussuri + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: cloud:bionic-ussuri + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + manage-neutron-plugin-legacy-mode: true + neutron-plugin: ovs + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: cloud:bionic-ussuri + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex + openstack-origin: cloud:bionic-ussuri +relations: +- - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' +- - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' +- - 'nova-compute:image-service' + - 'glance:image-service' +- - 'nova-compute:ceph' + - 'ceph-mon:client' +- - 'keystone:shared-db' + - 'percona-cluster:shared-db' +- - 'glance:shared-db' + - 'percona-cluster:shared-db' +- - 'glance:identity-service' + - 'keystone:identity-service' +- - 'glance:amqp' + - 'rabbitmq-server:amqp' +- - 'glance:ceph' + - 'ceph-mon:client' +- - 'ceph-osd:mon' + - 'ceph-mon:osd' +- - 'nova-cloud-controller:shared-db' + - 'percona-cluster:shared-db' +- - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' +- - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' +- - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' +- - 'nova-cloud-controller:image-service' + - 'glance:image-service' +- - 'placement' + - 'percona-cluster' +- - 'placement' + - 'keystone' +- - 'placement' + - 'nova-cloud-controller' +- - 'neutron-api:shared-db' + - 'percona-cluster:shared-db' +- - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' +- - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' +- - 'neutron-api:identity-service' + - 'keystone:identity-service' +- - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' +- - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' +- - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' +- - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/focal-ussuri.yaml b/ceph-fs/src/tests/bundles/focal-ussuri.yaml new file mode 100644 index 00000000..7f348f1b --- /dev/null +++ b/ceph-fs/src/tests/bundles/focal-ussuri.yaml @@ -0,0 +1,219 @@ +variables: + openstack-origin: &openstack-origin distro + +series: &series focal + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + '3': + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + neutron-api-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-fs: + charm: ceph-fs + num_units: 1 + options: + source: *openstack-origin + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + network-manager: Neutron + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: *openstack-origin + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + manage-neutron-plugin-legacy-mode: true + neutron-plugin: ovs + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: *openstack-origin + + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex + openstack-origin: *openstack-origin + +relations: + + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'neutron-api:shared-db' + - 'neutron-api-mysql-router:shared-db' + - - 'neutron-api-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' + + - - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' + + - - 'neutron-api:identity-service' + - 'keystone:identity-service' + + - - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' + + - - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index 71f6a5dc..46773214 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -1,5 +1,7 @@ charm_name: ceph-fs gate_bundles: + - focal-ussuri + - bionic-ussuri - bionic-train - bionic-stein - bionic-rocky @@ -12,7 +14,7 @@ gate_bundles: smoke_bundles: - bionic-stein dev_bundles: - - eoan-train + - bionic-train configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network @@ -21,3 +23,6 @@ configure: - zaza.openstack.charm_tests.keystone.setup.add_demo_user tests: - zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests +tests_options: + force_deploy: + - focal-ussuri From eb74d5703f00983d1880c38d6c71e5980497a2d1 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 23 Apr 2020 13:04:51 +0000 Subject: [PATCH 1946/2699] Apply OSD settings from mons. Apply OSD settings requested by the mons via the juju relation. Add the OSD settings to config too. Before applying the settings config-flags is checked to ensure there is no overlap. Change-Id: Id69222217a1c99d0269831913abdf488791cb572 --- ceph-osd/hooks/ceph_hooks.py | 9 +- .../charmhelpers/contrib/openstack/utils.py | 306 +++++++++++++++++- .../contrib/openstack/vaultlocker.py | 13 +- .../contrib/storage/linux/ceph.py | 147 ++++++++- .../contrib/storage/linux/loopback.py | 8 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 56 ++-- ceph-osd/hooks/charmhelpers/core/sysctl.py | 14 +- ceph-osd/lib/charms_ceph/utils.py | 58 ++++ ceph-osd/templates/ceph.conf | 14 + ceph-osd/unit_tests/test_ceph_hooks.py | 27 +- 10 files changed, 620 insertions(+), 32 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 2479453c..d453b32d 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -93,8 +93,7 @@ format_ipv6_addr, get_relation_ip, ) -from charmhelpers.contrib.storage.linux.ceph import ( - CephConfContext) +import charmhelpers.contrib.storage.linux.ceph as ch_ceph from charmhelpers.contrib.storage.linux.utils import ( is_device_mounted, is_block_device, @@ -435,7 +434,8 @@ def get_ceph_context(upgrading=False): # NOTE(dosaboy): these sections must correspond to what is supported in the # config template. sections = ['global', 'osd'] - cephcontext.update(CephConfContext(permitted_sections=sections)()) + cephcontext.update( + ch_ceph.CephOSDConfContext(permitted_sections=sections)()) return cephcontext @@ -656,6 +656,9 @@ def mon_relation(): import_osd_bootstrap_key(bootstrap_key) import_osd_upgrade_key(upgrade_key) prepare_disks_and_activate() + _, settings, _ = (ch_ceph.CephOSDConfContext() + .filter_osd_from_mon_settings()) + ceph.apply_osd_settings(settings) else: log('mon cluster has not yet provided conf') diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 5c8f6eff..e59e0d1e 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -13,7 +13,7 @@ # limitations under the License. # Common python helper functions used for OpenStack charms. -from collections import OrderedDict +from collections import OrderedDict, namedtuple from functools import wraps import subprocess @@ -36,15 +36,20 @@ from charmhelpers.core import unitdata from charmhelpers.core.hookenv import ( + WORKLOAD_STATES, action_fail, action_set, config, + expected_peer_units, + expected_related_units, log as juju_log, charm_dir, INFO, ERROR, + metadata, related_units, relation_get, + relation_id, relation_ids, relation_set, status_set, @@ -53,6 +58,7 @@ cached, leader_set, leader_get, + local_unit, ) from charmhelpers.core.strutils import ( @@ -108,6 +114,10 @@ POLICYD_CONFIG_NAME, ) +from charmhelpers.contrib.openstack.ha.utils import ( + expect_ha, +) + CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' @@ -1810,6 +1820,16 @@ def os_application_version_set(package): application_version_set(application_version) +def os_application_status_set(check_function): + """Run the supplied function and set the application status accordingly. + + :param check_function: Function to run to get app states and messages. + :type check_function: function + """ + state, message = check_function() + status_set(state, message, application=True) + + def enable_memcache(source=None, release=None, package=None): """Determine if memcache should be enabled on the local unit @@ -2046,3 +2066,287 @@ def is_db_maintenance_mode(relid=None): 'WARN') pass return True in notifications + + +@cached +def container_scoped_relations(): + """Get all the container scoped relations + + :returns: List of relation names + :rtype: List + """ + md = metadata() + relations = [] + for relation_type in ('provides', 'requires', 'peers'): + for relation in md.get(relation_type, []): + if md[relation_type][relation].get('scope') == 'container': + relations.append(relation) + return relations + + +def is_db_ready(use_current_context=False, rel_name=None): + """Check remote database is ready to be used. + + Database relations are expected to provide a list of 'allowed' units to + confirm that the database is ready for use by those units. + + If db relation has provided this information and local unit is a member, + returns True otherwise False. + + :param use_current_context: Whether to limit checks to current hook + context. + :type use_current_context: bool + :param rel_name: Name of relation to check + :type rel_name: string + :returns: Whether remote db is ready. + :rtype: bool + :raises: Exception + """ + key = 'allowed_units' + + rel_name = rel_name or 'shared-db' + this_unit = local_unit() + + if use_current_context: + if relation_id() in relation_ids(rel_name): + rids_units = [(None, None)] + else: + raise Exception("use_current_context=True but not in {} " + "rel hook contexts (currently in {})." + .format(rel_name, relation_id())) + else: + rids_units = [(r_id, u) + for r_id in relation_ids(rel_name) + for u in related_units(r_id)] + + for rid, unit in rids_units: + allowed_units = relation_get(rid=rid, unit=unit, attribute=key) + if allowed_units and this_unit in allowed_units.split(): + juju_log("This unit ({}) is in allowed unit list from {}".format( + this_unit, + unit), 'DEBUG') + return True + + juju_log("This unit was not found in any allowed unit list") + return False + + +def is_expected_scale(peer_relation_name='cluster'): + """Query juju goal-state to determine whether our peer- and dependency- + relations are at the expected scale. + + Useful for deferring per unit per relation housekeeping work until we are + ready to complete it successfully and without unnecessary repetiton. + + Always returns True if version of juju used does not support goal-state. + + :param peer_relation_name: Name of peer relation + :type rel_name: string + :returns: True or False + :rtype: bool + """ + def _get_relation_id(rel_type): + return next((rid for rid in relation_ids(reltype=rel_type)), None) + + Relation = namedtuple('Relation', 'rel_type rel_id') + peer_rid = _get_relation_id(peer_relation_name) + # Units with no peers should still have a peer relation. + if not peer_rid: + juju_log('Not at expected scale, no peer relation found', 'DEBUG') + return False + expected_relations = [ + Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))] + if expect_ha(): + expected_relations.append( + Relation( + rel_type='ha', + rel_id=_get_relation_id('ha'))) + juju_log( + 'Checking scale of {} relations'.format( + ','.join([r.rel_type for r in expected_relations])), + 'DEBUG') + try: + if (len(related_units(relid=peer_rid)) < + len(list(expected_peer_units()))): + return False + for rel in expected_relations: + if not rel.rel_id: + juju_log( + 'Expected to find {} relation, but it is missing'.format( + rel.rel_type), + 'DEBUG') + return False + # Goal state returns every unit even for container scoped + # relations but the charm only ever has a relation with + # the local unit. + if rel.rel_type in container_scoped_relations(): + expected_count = 1 + else: + expected_count = len( + list(expected_related_units(reltype=rel.rel_type))) + if len(related_units(relid=rel.rel_id)) < expected_count: + juju_log( + ('Not at expected scale, not enough units on {} ' + 'relation'.format(rel.rel_type)), + 'DEBUG') + return False + except NotImplementedError: + return True + juju_log('All checks have passed, unit is at expected scale', 'DEBUG') + return True + + +def get_peer_key(unit_name): + """Get the peer key for this unit. + + The peer key is the key a unit uses to publish its status down the peer + relation + + :param unit_name: Name of unit + :type unit_name: string + :returns: Peer key for given unit + :rtype: string + """ + return 'unit-state-{}'.format(unit_name.replace('/', '-')) + + +UNIT_READY = 'READY' +UNIT_NOTREADY = 'NOTREADY' +UNIT_UNKNOWN = 'UNKNOWN' +UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN] + + +def inform_peers_unit_state(state, relation_name='cluster'): + """Inform peers of the state of this unit. + + :param state: State of unit to publish + :type state: string + :param relation_name: Name of relation to publish state on + :type relation_name: string + """ + if state not in UNIT_STATES: + raise ValueError( + "Setting invalid state {} for unit".format(state)) + for r_id in relation_ids(relation_name): + relation_set(relation_id=r_id, + relation_settings={ + get_peer_key(local_unit()): state}) + + +def get_peers_unit_state(relation_name='cluster'): + """Get the state of all peers. + + :param relation_name: Name of relation to check peers on. + :type relation_name: string + :returns: Unit states keyed on unit name. + :rtype: dict + :raises: ValueError + """ + r_ids = relation_ids(relation_name) + rids_units = [(r, u) for r in r_ids for u in related_units(r)] + unit_states = {} + for r_id, unit in rids_units: + settings = relation_get(unit=unit, rid=r_id) + unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN) + if unit_states[unit] not in UNIT_STATES: + raise ValueError( + "Unit in unknown state {}".format(unit_states[unit])) + return unit_states + + +def are_peers_ready(relation_name='cluster'): + """Check if all peers are ready. + + :param relation_name: Name of relation to check peers on. + :type relation_name: string + :returns: Whether all units are ready. + :rtype: bool + """ + unit_states = get_peers_unit_state(relation_name) + return all(v == UNIT_READY for v in unit_states.values()) + + +def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'): + """Inform peers if this unit is ready. + + The check function should return a tuple (state, message). A state + of 'READY' indicates the unit is READY. + + :param check_unit_ready_func: Function to run to check readiness + :type check_unit_ready_func: function + :param relation_name: Name of relation to check peers on. + :type relation_name: string + """ + unit_ready, msg = check_unit_ready_func() + if unit_ready: + state = UNIT_READY + else: + state = UNIT_NOTREADY + juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG') + inform_peers_unit_state(state, relation_name) + + +def check_api_unit_ready(check_db_ready=True): + """Check if this unit is ready. + + :param check_db_ready: Include checks of database readiness. + :type check_db_ready: bool + :returns: Whether unit state is ready and status message + :rtype: (bool, str) + """ + unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready) + return unit_state == WORKLOAD_STATES.ACTIVE, msg + + +def get_api_unit_status(check_db_ready=True): + """Return a workload status and message for this unit. + + :param check_db_ready: Include checks of database readiness. + :type check_db_ready: bool + :returns: Workload state and message + :rtype: (bool, str) + """ + unit_state = WORKLOAD_STATES.ACTIVE + msg = 'Unit is ready' + if is_db_maintenance_mode(): + unit_state = WORKLOAD_STATES.MAINTENANCE + msg = 'Database in maintenance mode.' + elif is_unit_paused_set(): + unit_state = WORKLOAD_STATES.BLOCKED + msg = 'Unit paused.' + elif check_db_ready and not is_db_ready(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Allowed_units list provided but this unit not present' + elif not is_db_initialised(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Database not initialised' + elif not is_expected_scale(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Charm and its dependencies not yet at expected scale' + juju_log(msg, 'DEBUG') + return unit_state, msg + + +def check_api_application_ready(): + """Check if this application is ready. + + :returns: Whether application state is ready and status message + :rtype: (bool, str) + """ + app_state, msg = get_api_application_status() + return app_state == WORKLOAD_STATES.ACTIVE, msg + + +def get_api_application_status(): + """Return a workload status and message for this application. + + :returns: Workload state and message + :rtype: (bool, str) + """ + app_state, msg = get_api_unit_status() + if app_state == WORKLOAD_STATES.ACTIVE: + if are_peers_ready(): + return WORKLOAD_STATES.ACTIVE, 'Application Ready' + else: + return WORKLOAD_STATES.WAITING, 'Some units are not ready' + return app_state, msg diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py index 866a2697..4690f6b0 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -140,9 +140,16 @@ def vault_relation_complete(backend=None): :ptype backend: string :returns: whether the relation to vault is complete :rtype: bool""" - vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) - vault_kv() - return vault_kv.complete + try: + import hvac + except ImportError: + return False + try: + vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) + vault_kv() + return vault_kv.complete + except hvac.exceptions.InvalidRequest: + return False # TODO: contrib a high level unwrap method to hvac that works diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index dabfb6c2..eb31b782 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -22,6 +22,7 @@ # Adam Gandelman # +import collections import errno import hashlib import math @@ -93,6 +94,88 @@ DEFAULT_MINIMUM_PGS = 2 +class OsdPostUpgradeError(Exception): + """Error class for OSD post-upgrade operations.""" + pass + + +class OSDSettingConflict(Exception): + """Error class for conflicting osd setting requests.""" + pass + + +class OSDSettingNotAllowed(Exception): + """Error class for a disallowed setting.""" + pass + + +OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed) + +OSD_SETTING_WHITELIST = [ + 'osd heartbeat grace', + 'osd heartbeat interval', +] + + +def _order_dict_by_key(rdict): + """Convert a dictionary into an OrderedDict sorted by key. + + :param rdict: Dictionary to be ordered. + :type rdict: dict + :returns: Ordered Dictionary. + :rtype: collections.OrderedDict + """ + return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0])) + + +def get_osd_settings(relation_name): + """Consolidate requested osd settings from all clients. + + Consolidate requested osd settings from all clients. Check that the + requested setting is on the whitelist and it does not conflict with + any other requested settings. + + :returns: Dictionary of settings + :rtype: dict + + :raises: OSDSettingNotAllowed + :raises: OSDSettingConflict + """ + rel_ids = relation_ids(relation_name) + osd_settings = {} + for relid in rel_ids: + for unit in related_units(relid): + unit_settings = relation_get('osd-settings', unit, relid) or '{}' + unit_settings = json.loads(unit_settings) + for key, value in unit_settings.items(): + if key not in OSD_SETTING_WHITELIST: + msg = 'Illegal settings "{}"'.format(key) + raise OSDSettingNotAllowed(msg) + if key in osd_settings: + if osd_settings[key] != unit_settings[key]: + msg = 'Conflicting settings for "{}"'.format(key) + raise OSDSettingConflict(msg) + else: + osd_settings[key] = value + return _order_dict_by_key(osd_settings) + + +def send_osd_settings(): + """Pass on requested OSD settings to osd units.""" + try: + settings = get_osd_settings('client') + except OSD_SETTING_EXCEPTIONS as e: + # There is a problem with the settings, not passing them on. Update + # status will notify the user. + log(e, level=ERROR) + return + data = { + 'osd-settings': json.dumps(settings, sort_keys=True)} + for relid in relation_ids('osd'): + relation_set(relation_id=relid, + relation_settings=data) + + def validator(value, valid_type, valid_range=None): """ Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values @@ -1635,5 +1718,67 @@ def __call__(self): continue ceph_conf[key] = conf[key] - return ceph_conf + + +class CephOSDConfContext(CephConfContext): + """Ceph config (ceph.conf) context. + + Consolidates settings from config-flags via CephConfContext with + settings provided by the mons. The config-flag values are preserved in + conf['osd'], settings from the mons which do not clash with config-flag + settings are in conf['osd_from_client'] and finally settings which do + clash are in conf['osd_from_client_conflict']. Rather than silently drop + the conflicting settings they are provided in the context so they can be + rendered commented out to give some visability to the admin. + """ + + def __init__(self, permitted_sections=None): + super(CephOSDConfContext, self).__init__( + permitted_sections=permitted_sections) + try: + self.settings_from_mons = get_osd_settings('mon') + except OSDSettingConflict: + log( + "OSD settings from mons are inconsistent, ignoring them", + level=WARNING) + self.settings_from_mons = {} + + def filter_osd_from_mon_settings(self): + """Filter settings from client relation against config-flags. + + :returns: A tuple ( + ,config-flag values, + ,client settings which do not conflict with config-flag values, + ,client settings which confilct with config-flag values) + :rtype: (OrderedDict, OrderedDict, OrderedDict) + """ + ceph_conf = super(CephOSDConfContext, self).__call__() + conflicting_entries = {} + clear_entries = {} + for key, value in self.settings_from_mons.items(): + if key in ceph_conf.get('osd', {}): + if ceph_conf['osd'][key] != value: + conflicting_entries[key] = value + else: + clear_entries[key] = value + clear_entries = _order_dict_by_key(clear_entries) + conflicting_entries = _order_dict_by_key(conflicting_entries) + return ceph_conf, clear_entries, conflicting_entries + + def __call__(self): + """Construct OSD config context. + + Standard context with two additional special keys. + osd_from_client_conflict: client settings which confilct with + config-flag values + osd_from_client: settings which do not conflict with config-flag + values + + :returns: OSD config context dict. + :rtype: dict + """ + conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings() + conf['osd_from_client_conflict'] = osd_conflict + conf['osd_from_client'] = osd_clear + return conf diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py index 82472ff1..74bab40e 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -32,6 +32,10 @@ def loopback_devices(): /dev/loop0: [0807]:961814 (/tmp/my.img) + or: + + /dev/loop0: [0807]:961814 (/tmp/my.img (deleted)) + :returns: dict: a dict mapping {loopback_dev: backing_file} ''' loopbacks = {} @@ -39,9 +43,9 @@ def loopback_devices(): output = check_output(cmd) if six.PY3: output = output.decode('utf-8') - devs = [d.strip().split(' ') for d in output.splitlines() if d != ''] + devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] for dev, _, f in devs: - loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] + loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] return loopbacks diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 647f6e4b..d7c37c17 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -21,6 +21,7 @@ from __future__ import print_function import copy from distutils.version import LooseVersion +from enum import Enum from functools import wraps from collections import namedtuple import glob @@ -57,6 +58,14 @@ 'This may not be compatible with software you are ' 'running in your shell.') + +class WORKLOAD_STATES(Enum): + ACTIVE = 'active' + BLOCKED = 'blocked' + MAINTENANCE = 'maintenance' + WAITING = 'waiting' + + cache = {} @@ -1088,22 +1097,33 @@ def function_tag(): return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() -def status_set(workload_state, message): +def status_set(workload_state, message, application=False): """Set the workload state with a message Use status-set to set the workload state with a message which is visible to the user via juju status. If the status-set command is not found then - assume this is juju < 1.23 and juju-log the message unstead. + assume this is juju < 1.23 and juju-log the message instead. - workload_state -- valid juju workload state. - message -- status update message + workload_state -- valid juju workload state. str or WORKLOAD_STATES + message -- status update message + application -- Whether this is an application state set """ - valid_states = ['maintenance', 'blocked', 'waiting', 'active'] - if workload_state not in valid_states: - raise ValueError( - '{!r} is not a valid workload state'.format(workload_state) - ) - cmd = ['status-set', workload_state, message] + bad_state_msg = '{!r} is not a valid workload state' + + if isinstance(workload_state, str): + try: + # Convert string to enum. + workload_state = WORKLOAD_STATES[workload_state.upper()] + except KeyError: + raise ValueError(bad_state_msg.format(workload_state)) + + if workload_state not in WORKLOAD_STATES: + raise ValueError(bad_state_msg.format(workload_state)) + + cmd = ['status-set'] + if application: + cmd.append('--application') + cmd.extend([workload_state.value, message]) try: ret = subprocess.call(cmd) if ret == 0: @@ -1111,7 +1131,7 @@ def status_set(workload_state, message): except OSError as e: if e.errno != errno.ENOENT: raise - log_message = 'status-set failed: {} {}'.format(workload_state, + log_message = 'status-set failed: {} {}'.format(workload_state.value, message) log(log_message, level='INFO') @@ -1526,13 +1546,13 @@ def env_proxy_settings(selected_settings=None): """Get proxy settings from process environment variables. Get charm proxy settings from environment variables that correspond to - juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2, - see lp:1782236) in a format suitable for passing to an application that - reacts to proxy settings passed as environment variables. Some applications - support lowercase or uppercase notation (e.g. curl), some support only - lowercase (e.g. wget), there are also subjectively rare cases of only - uppercase notation support. no_proxy CIDR and wildcard support also varies - between runtimes and applications as there is no enforced standard. + juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see + lp:1782236) and juju-ftp-proxy in a format suitable for passing to an + application that reacts to proxy settings passed as environment variables. + Some applications support lowercase or uppercase notation (e.g. curl), some + support only lowercase (e.g. wget), there are also subjectively rare cases + of only uppercase notation support. no_proxy CIDR and wildcard support also + varies between runtimes and applications as there is no enforced standard. Some applications may connect to multiple destinations and expose config options that would affect only proxy settings for a specific destination diff --git a/ceph-osd/hooks/charmhelpers/core/sysctl.py b/ceph-osd/hooks/charmhelpers/core/sysctl.py index f1f4a28f..386428d6 100644 --- a/ceph-osd/hooks/charmhelpers/core/sysctl.py +++ b/ceph-osd/hooks/charmhelpers/core/sysctl.py @@ -17,14 +17,17 @@ import yaml -from subprocess import check_call +from subprocess import check_call, CalledProcessError from charmhelpers.core.hookenv import ( log, DEBUG, ERROR, + WARNING, ) +from charmhelpers.core.host import is_container + __author__ = 'Jorge Niedbalski R. ' @@ -62,4 +65,11 @@ def create(sysctl_dict, sysctl_file, ignore=False): if ignore: call.append("-e") - check_call(call) + try: + check_call(call) + except CalledProcessError as e: + if is_container(): + log("Error setting some sysctl keys in this container: {}".format(e.output), + level=WARNING) + else: + raise e diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index c26a131b..a3fd276d 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -1113,6 +1113,7 @@ def get_mds_bootstrap_key(): admin_caps = collections.OrderedDict([ ('mds', ['allow *']), + ('mgr', ['allow *']), ('mon', ['allow *']), ('osd', ['allow *']) ]) @@ -2640,6 +2641,7 @@ def get_osd_state(osd_num, osd_goal_state=None): return osd_state if osd_state == osd_goal_state: return osd_state + time.sleep(3) def get_all_osd_states(osd_goal_states=None): @@ -2898,6 +2900,7 @@ def dirs_need_ownership_update(service): ('jewel', 'luminous'), ('luminous', 'mimic'), ('mimic', 'nautilus'), + ('nautilus', 'octopus'), ]) # Map UCA codenames to ceph codenames @@ -2914,6 +2917,7 @@ def dirs_need_ownership_update(service): 'rocky': 'mimic', 'stein': 'mimic', 'train': 'nautilus', + 'ussuri': 'octopus', } @@ -3066,3 +3070,57 @@ def osd_noout(enable): except subprocess.CalledProcessError as e: log(e) raise + + +class OSDConfigSetError(Exception): + """Error occured applying OSD settings.""" + pass + + +def apply_osd_settings(settings): + """Applies the provided osd settings + + Apply the provided settings to all local OSD unless settings are already + present. Settings stop being applied on encountering an error. + + :param settings: dict. Dictionary of settings to apply. + :returns: bool. True if commands ran succesfully. + :raises: OSDConfigSetError + """ + current_settings = {} + base_cmd = 'ceph daemon osd.{osd_id} config --format=json' + get_cmd = base_cmd + ' get {key}' + set_cmd = base_cmd + ' set {key} {value}' + + def _get_cli_key(key): + return(key.replace(' ', '_')) + # Retrieve the current values to check keys are correct and to make this a + # noop if setting are already applied. + for osd_id in get_local_osd_ids(): + for key, value in sorted(settings.items()): + cli_key = _get_cli_key(key) + cmd = get_cmd.format(osd_id=osd_id, key=cli_key) + out = json.loads( + subprocess.check_output(cmd.split()).decode('UTF-8')) + if 'error' in out: + log("Error retrieving osd setting: {}".format(out['error']), + level=ERROR) + return False + current_settings[key] = out[cli_key] + settings_diff = { + k: v + for k, v in settings.items() + if str(v) != str(current_settings[k])} + for key, value in sorted(settings_diff.items()): + log("Setting {} to {}".format(key, value), level=DEBUG) + cmd = set_cmd.format( + osd_id=osd_id, + key=_get_cli_key(key), + value=value) + out = json.loads( + subprocess.check_output(cmd.split()).decode('UTF-8')) + if 'error' in out: + log("Error applying osd setting: {}".format(out['error']), + level=ERROR) + raise OSDConfigSetError + return True diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 2682be01..560e764c 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -88,6 +88,20 @@ osd max backfills = {{ osd_max_backfills }} {%- if osd_recovery_max_active %} osd recovery max active = {{ osd_recovery_max_active }} {%- endif %} + +{% if osd_from_client -%} +# The following are charm provided options provided via the mon relation. +{% for key in osd_from_client -%} +{{ key }} = {{ osd_from_client[key] }} +{% endfor %} +{% endif %} +{% if osd_from_client_conflict -%} +# The following are charm provided options which conflict with options from +# config-flags. +{% for key in osd_from_client_conflict -%} +# {{ key }} = {{ osd_from_client_conflict[key] }} +{% endfor %} +{% endif %} {% if osd -%} # The following are user-provided options provided via the config-flags charm option. {% for key in osd -%} diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index bc58a9e4..0d11c5f1 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections import OrderedDict import copy import unittest @@ -54,6 +55,7 @@ class CephHooksTestCase(unittest.TestCase): def setUp(self): super(CephHooksTestCase, self).setUp() + @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @patch.object(ceph_hooks, 'get_auth', lambda *args: False) @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @@ -82,6 +84,8 @@ def test_get_ceph_context(self, mock_config, mock_config2): 'osd_journal_size': 1024, 'osd_max_backfills': 1, 'osd_recovery_max_active': 2, + 'osd_from_client': OrderedDict(), + 'osd_from_client_conflict': OrderedDict(), 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, @@ -93,6 +97,7 @@ def test_get_ceph_context(self, mock_config, mock_config2): 'bluestore_block_db_size': 0} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @patch.object(ceph_hooks, 'get_auth', lambda *args: False) @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @@ -123,6 +128,8 @@ def test_get_ceph_context_invalid_bdev_enable_discard(self, mock_config, 'osd_journal_size': 1024, 'osd_max_backfills': 1, 'osd_recovery_max_active': 2, + 'osd_from_client': OrderedDict(), + 'osd_from_client_conflict': OrderedDict(), 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, @@ -134,6 +141,7 @@ def test_get_ceph_context_invalid_bdev_enable_discard(self, mock_config, 'bluestore_block_db_size': 0} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @patch.object(ceph_hooks, 'get_auth', lambda *args: False) @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @@ -163,6 +171,8 @@ def test_get_ceph_context_filestore_old(self, mock_config, mock_config2): 'osd_journal_size': 1024, 'osd_max_backfills': 1, 'osd_recovery_max_active': 2, + 'osd_from_client': OrderedDict(), + 'osd_from_client_conflict': OrderedDict(), 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, @@ -174,6 +184,7 @@ def test_get_ceph_context_filestore_old(self, mock_config, mock_config2): 'bluestore_block_db_size': 0} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @patch.object(ceph_hooks, 'get_auth', lambda *args: False) @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @@ -209,6 +220,8 @@ def test_get_ceph_context_bluestore(self, mock_config, mock_config2): 'osd_journal_size': 1024, 'osd_max_backfills': 1, 'osd_recovery_max_active': 2, + 'osd_from_client': OrderedDict(), + 'osd_from_client_conflict': OrderedDict(), 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, @@ -220,6 +233,7 @@ def test_get_ceph_context_bluestore(self, mock_config, mock_config2): 'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @patch.object(ceph_hooks, 'get_auth', lambda *args: False) @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @@ -232,6 +246,7 @@ def test_get_ceph_context_bluestore(self, mock_config, mock_config2): @patch.object(ceph, 'config') @patch.object(ceph_hooks, 'config') def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2): + self.maxDiff = None config = copy.deepcopy(CHARM_CONFIG) config['bluestore'] = True config['bluestore-block-wal-size'] = BLUESTORE_WAL_TEST_SIZE @@ -252,6 +267,8 @@ def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2): 'osd_journal_size': 1024, 'osd_max_backfills': 1, 'osd_recovery_max_active': 2, + 'osd_from_client': OrderedDict(), + 'osd_from_client_conflict': OrderedDict(), 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, @@ -263,6 +280,7 @@ def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2): 'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @patch.object(ceph_hooks, 'get_auth', lambda *args: False) @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @@ -288,11 +306,13 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): 'loglevel': 1, 'mon_hosts': '10.0.0.1 10.0.0.2', 'old_auth': False, - 'osd': {'osd max write size': 1024}, + 'osd': OrderedDict([('osd max write size', 1024)]), 'crush_initial_weight': '0', 'osd_journal_size': 1024, 'osd_max_backfills': 1, 'osd_recovery_max_active': 2, + 'osd_from_client': OrderedDict(), + 'osd_from_client_conflict': OrderedDict(), 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, @@ -304,6 +324,7 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): 'bluestore_block_db_size': 0} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @patch.object(ceph_hooks, 'get_auth', lambda *args: False) @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @@ -331,11 +352,13 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'loglevel': 1, 'mon_hosts': '10.0.0.1 10.0.0.2', 'old_auth': False, - 'osd': {'osd max write size': 1024}, + 'osd': OrderedDict([('osd max write size', 1024)]), 'crush_initial_weight': '0', 'osd_journal_size': 1024, 'osd_max_backfills': 1, 'osd_recovery_max_active': 2, + 'osd_from_client': OrderedDict(), + 'osd_from_client_conflict': OrderedDict(), 'public_addr': '10.0.0.1', 'short_object_len': True, 'upgrade_in_progress': False, From 55b11cadfaadf91d5194336a0d46033e8bc38511 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Wed, 6 May 2020 13:28:29 +0200 Subject: [PATCH 1947/2699] Pre-freeze 'make *-sync' Change-Id: Ia4f7043fc7281cea841baf81daded1273c4158f3 --- .../charmhelpers/contrib/openstack/context.py | 751 +++++++++++++++++- .../contrib/openstack/vaultlocker.py | 11 +- 2 files changed, 760 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index bc90804b..335e2d5c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -13,13 +13,17 @@ # limitations under the License. import collections +import copy +import enum import glob +import hashlib import json import math import os import re import socket import time + from base64 import b64decode from subprocess import check_call, CalledProcessError @@ -50,7 +54,8 @@ INFO, ERROR, status_set, - network_get_primary_address + network_get_primary_address, + WARNING, ) from charmhelpers.core.sysctl import create as sysctl_create @@ -110,6 +115,13 @@ ) from charmhelpers.core.unitdata import kv +try: + from sriov_netplan_shim import pci +except ImportError: + # The use of the function and contexts that require the pci module is + # optional. + pass + try: import psutil except ImportError: @@ -263,6 +275,12 @@ def __call__(self): 'database_password': rdata.get(password_setting), 'database_type': 'mysql+pymysql' } + # Port is being introduced with LP Bug #1876188 + # but it not currently required and may not be set in all + # cases, particularly in classic charms. + port = rdata.get('db_port') + if port: + ctxt['database_port'] = port if CompareOpenStackReleases(rel) < 'queens': ctxt['database_type'] = 'mysql' if self.context_complete(ctxt): @@ -2396,3 +2414,734 @@ def get_ovs_use_veth(self): return False else: return _config + + +EntityMac = collections.namedtuple('EntityMac', ['entity', 'mac']) + + +def resolve_pci_from_mapping_config(config_key): + """Resolve local PCI devices from MAC addresses in mapping config. + + Note that this function keeps record of mac->PCI address lookups + in the local unit db as the devices will disappaear from the system + once bound. + + :param config_key: Configuration option key to parse data from + :type config_key: str + :returns: PCI device address to Tuple(entity, mac) map + :rtype: collections.OrderedDict[str,Tuple[str,str]] + """ + devices = pci.PCINetDevices() + resolved_devices = collections.OrderedDict() + db = kv() + # Note that ``parse_data_port_mappings`` returns Dict regardless of input + for mac, entity in parse_data_port_mappings(config(config_key)).items(): + pcidev = devices.get_device_from_mac(mac) + if pcidev: + # NOTE: store mac->pci allocation as post binding + # it disappears from PCIDevices. + db.set(mac, pcidev.pci_address) + db.flush() + + pci_address = db.get(mac) + if pci_address: + resolved_devices[pci_address] = EntityMac(entity, mac) + + return resolved_devices + + +class DPDKDeviceContext(OSContextGenerator): + + def __init__(self, driver_key=None, bridges_key=None, bonds_key=None): + """Initialize DPDKDeviceContext. + + :param driver_key: Key to use when retrieving driver config. + :type driver_key: str + :param bridges_key: Key to use when retrieving bridge config. + :type bridges_key: str + :param bonds_key: Key to use when retrieving bonds config. + :type bonds_key: str + """ + self.driver_key = driver_key or 'dpdk-driver' + self.bridges_key = bridges_key or 'data-port' + self.bonds_key = bonds_key or 'dpdk-bond-mappings' + + def __call__(self): + """Populate context. + + :returns: context + :rtype: Dict[str,Union[str,collections.OrderedDict[str,str]]] + """ + driver = config(self.driver_key) + if driver is None: + return {} + # Resolve PCI devices for both directly used devices (_bridges) + # and devices for use in dpdk bonds (_bonds) + pci_devices = resolve_pci_from_mapping_config(self.bridges_key) + pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) + return {'devices': pci_devices, + 'driver': driver} + + +class OVSDPDKDeviceContext(OSContextGenerator): + + def __init__(self, bridges_key=None, bonds_key=None): + """Initialize OVSDPDKDeviceContext. + + :param bridges_key: Key to use when retrieving bridge config. + :type bridges_key: str + :param bonds_key: Key to use when retrieving bonds config. + :type bonds_key: str + """ + self.bridges_key = bridges_key or 'data-port' + self.bonds_key = bonds_key or 'dpdk-bond-mappings' + + @staticmethod + def _parse_cpu_list(cpulist): + """Parses a linux cpulist for a numa node + + :returns: list of cores + :rtype: List[int] + """ + cores = [] + ranges = cpulist.split(',') + for cpu_range in ranges: + if "-" in cpu_range: + cpu_min_max = cpu_range.split('-') + cores += range(int(cpu_min_max[0]), + int(cpu_min_max[1]) + 1) + else: + cores.append(int(cpu_range)) + return cores + + def _numa_node_cores(self): + """Get map of numa node -> cpu core + + :returns: map of numa node -> cpu core + :rtype: Dict[str,List[int]] + """ + nodes = {} + node_regex = '/sys/devices/system/node/node*' + for node in glob.glob(node_regex): + index = node.lstrip('/sys/devices/system/node/node') + with open(os.path.join(node, 'cpulist')) as cpulist: + nodes[index] = self._parse_cpu_list(cpulist.read().strip()) + return nodes + + def cpu_mask(self): + """Get hex formatted CPU mask + + The mask is based on using the first config:dpdk-socket-cores + cores of each NUMA node in the unit. + :returns: hex formatted CPU mask + :rtype: str + """ + num_cores = config('dpdk-socket-cores') + mask = 0 + for cores in self._numa_node_cores().values(): + for core in cores[:num_cores]: + mask = mask | 1 << core + return format(mask, '#04x') + + def socket_memory(self): + """Formatted list of socket memory configuration per NUMA node + + :returns: socket memory configuration per NUMA node + :rtype: str + """ + sm_size = config('dpdk-socket-memory') + node_regex = '/sys/devices/system/node/node*' + mem_list = [str(sm_size) for _ in glob.glob(node_regex)] + if mem_list: + return ','.join(mem_list) + else: + return str(sm_size) + + def devices(self): + """List of PCI devices for use by DPDK + + :returns: List of PCI devices for use by DPDK + :rtype: collections.OrderedDict[str,str] + """ + pci_devices = resolve_pci_from_mapping_config(self.bridges_key) + pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) + return pci_devices + + def _formatted_whitelist(self, flag): + """Flag formatted list of devices to whitelist + + :param flag: flag format to use + :type flag: str + :rtype: str + """ + whitelist = [] + for device in self.devices(): + whitelist.append(flag.format(device=device)) + return ' '.join(whitelist) + + def device_whitelist(self): + """Formatted list of devices to whitelist for dpdk + + using the old style '-w' flag + + :returns: devices to whitelist prefixed by '-w ' + :rtype: str + """ + return self._formatted_whitelist('-w {device}') + + def pci_whitelist(self): + """Formatted list of devices to whitelist for dpdk + + using the new style '--pci-whitelist' flag + + :returns: devices to whitelist prefixed by '--pci-whitelist ' + :rtype: str + """ + return self._formatted_whitelist('--pci-whitelist {device}') + + def __call__(self): + """Populate context. + + :returns: context + :rtype: Dict[str,Union[bool,str]] + """ + ctxt = {} + whitelist = self.device_whitelist() + if whitelist: + ctxt['dpdk_enabled'] = config('enable-dpdk') + ctxt['device_whitelist'] = self.device_whitelist() + ctxt['socket_memory'] = self.socket_memory() + ctxt['cpu_mask'] = self.cpu_mask() + return ctxt + + +class BridgePortInterfaceMap(object): + """Build a map of bridge ports and interaces from charm configuration. + + NOTE: the handling of this detail in the charm is pre-deprecated. + + The long term goal is for network connectivity detail to be modelled in + the server provisioning layer (such as MAAS) which in turn will provide + a Netplan YAML description that will be used to drive Open vSwitch. + + Until we get to that reality the charm will need to configure this + detail based on application level configuration options. + + There is a established way of mapping interfaces to ports and bridges + in the ``neutron-openvswitch`` and ``neutron-gateway`` charms and we + will carry that forward. + + The relationship between bridge, port and interface(s). + +--------+ + | bridge | + +--------+ + | + +----------------+ + | port aka. bond | + +----------------+ + | | + +-+ +-+ + |i| |i| + |n| |n| + |t| |t| + |0| |N| + +-+ +-+ + """ + class interface_type(enum.Enum): + """Supported interface types. + + Supported interface types can be found in the ``iface_types`` column + in the ``Open_vSwitch`` table on a running system. + """ + dpdk = 'dpdk' + internal = 'internal' + system = 'system' + + def __str__(self): + """Return string representation of value. + + :returns: string representation of value. + :rtype: str + """ + return self.value + + def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None, + global_mtu=None): + """Initialize map. + + :param bridges_key: Name of bridge:interface/port map config key + (default: 'data-port') + :type bridges_key: Optional[str] + :param bonds_key: Name of port-name:interface map config key + (default: 'dpdk-bond-mappings') + :type bonds_key: Optional[str] + :param enable_dpdk_key: Name of DPDK toggle config key + (default: 'enable-dpdk') + :type enable_dpdk_key: Optional[str] + :param global_mtu: Set a MTU on all interfaces at map initialization. + + The default is to have Open vSwitch get this from the underlying + interface as set up by bare metal provisioning. + + Note that you can augment the MTU on an individual interface basis + like this: + + ifdatamap = bpi.get_ifdatamap(bridge, port) + ifdatamap = { + port: { + **ifdata, + **{'mtu-request': my_individual_mtu_map[port]}, + } + for port, ifdata in ifdatamap.items() + } + :type global_mtu: Optional[int] + """ + bridges_key = bridges_key or 'data-port' + bonds_key = bonds_key or 'dpdk-bond-mappings' + enable_dpdk_key = enable_dpdk_key or 'enable-dpdk' + self._map = collections.defaultdict( + lambda: collections.defaultdict(dict)) + self._ifname_mac_map = collections.defaultdict(list) + self._mac_ifname_map = {} + self._mac_pci_address_map = {} + + # First we iterate over the list of physical interfaces visible to the + # system and update interface name to mac and mac to interface name map + for ifname in list_nics(): + if not is_phy_iface(ifname): + continue + mac = get_nic_hwaddr(ifname) + self._ifname_mac_map[ifname] = [mac] + self._mac_ifname_map[mac] = ifname + + # In light of the pre-deprecation notice in the docstring of this + # class we will expose the ability to configure OVS bonds as a + # DPDK-only feature, but generally use the data structures internally. + if config(enable_dpdk_key): + # resolve PCI address of interfaces listed in the bridges and bonds + # charm configuration options. Note that for already bound + # interfaces the helper will retrieve MAC address from the unit + # KV store as the information is no longer available in sysfs. + _pci_bridge_mac = resolve_pci_from_mapping_config( + bridges_key) + _pci_bond_mac = resolve_pci_from_mapping_config( + bonds_key) + + for pci_address, bridge_mac in _pci_bridge_mac.items(): + if bridge_mac.mac in self._mac_ifname_map: + # if we already have the interface name in our map it is + # visible to the system and therefore not bound to DPDK + continue + ifname = 'dpdk-{}'.format( + hashlib.sha1( + pci_address.encode('UTF-8')).hexdigest()[:7]) + self._ifname_mac_map[ifname] = [bridge_mac.mac] + self._mac_ifname_map[bridge_mac.mac] = ifname + self._mac_pci_address_map[bridge_mac.mac] = pci_address + + for pci_address, bond_mac in _pci_bond_mac.items(): + # for bonds we want to be able to get a list of macs from + # the bond name and also get at the interface name made up + # of the hash of the PCI address + ifname = 'dpdk-{}'.format( + hashlib.sha1( + pci_address.encode('UTF-8')).hexdigest()[:7]) + self._ifname_mac_map[bond_mac.entity].append(bond_mac.mac) + self._mac_ifname_map[bond_mac.mac] = ifname + self._mac_pci_address_map[bond_mac.mac] = pci_address + + config_bridges = config(bridges_key) or '' + for bridge, ifname_or_mac in ( + pair.split(':', 1) + for pair in config_bridges.split()): + if ':' in ifname_or_mac: + try: + ifname = self.ifname_from_mac(ifname_or_mac) + except KeyError: + # The interface is destined for a different unit in the + # deployment. + continue + macs = [ifname_or_mac] + else: + ifname = ifname_or_mac + macs = self.macs_from_ifname(ifname_or_mac) + + portname = ifname + for mac in macs: + try: + pci_address = self.pci_address_from_mac(mac) + iftype = self.interface_type.dpdk + ifname = self.ifname_from_mac(mac) + except KeyError: + pci_address = None + iftype = self.interface_type.system + + self.add_interface( + bridge, portname, ifname, iftype, pci_address, global_mtu) + + def __getitem__(self, key): + """Provide a Dict-like interface, get value of item. + + :param key: Key to look up value from. + :type key: any + :returns: Value + :rtype: any + """ + return self._map.__getitem__(key) + + def __iter__(self): + """Provide a Dict-like interface, iterate over keys. + + :returns: Iterator + :rtype: Iterator[any] + """ + return self._map.__iter__() + + def __len__(self): + """Provide a Dict-like interface, measure the length of internal map. + + :returns: Length + :rtype: int + """ + return len(self._map) + + def items(self): + """Provide a Dict-like interface, iterate over items. + + :returns: Key Value pairs + :rtype: Iterator[any, any] + """ + return self._map.items() + + def keys(self): + """Provide a Dict-like interface, iterate over keys. + + :returns: Iterator + :rtype: Iterator[any] + """ + return self._map.keys() + + def ifname_from_mac(self, mac): + """ + :returns: Name of interface + :rtype: str + :raises: KeyError + """ + return (get_bond_master(self._mac_ifname_map[mac]) or + self._mac_ifname_map[mac]) + + def macs_from_ifname(self, ifname): + """ + :returns: List of hardware address (MAC) of interface + :rtype: List[str] + :raises: KeyError + """ + return self._ifname_mac_map[ifname] + + def pci_address_from_mac(self, mac): + """ + :param mac: Hardware address (MAC) of interface + :type mac: str + :returns: PCI address of device associated with mac + :rtype: str + :raises: KeyError + """ + return self._mac_pci_address_map[mac] + + def add_interface(self, bridge, port, ifname, iftype, + pci_address, mtu_request): + """Add an interface to the map. + + :param bridge: Name of bridge on which the bond will be added + :type bridge: str + :param port: Name of port which will represent the bond on bridge + :type port: str + :param ifname: Name of interface that will make up the bonded port + :type ifname: str + :param iftype: Type of interface + :type iftype: BridgeBondMap.interface_type + :param pci_address: PCI address of interface + :type pci_address: Optional[str] + :param mtu_request: MTU to request for interface + :type mtu_request: Optional[int] + """ + self._map[bridge][port][ifname] = { + 'type': str(iftype), + } + if pci_address: + self._map[bridge][port][ifname].update({ + 'pci-address': pci_address, + }) + if mtu_request is not None: + self._map[bridge][port][ifname].update({ + 'mtu-request': str(mtu_request) + }) + + def get_ifdatamap(self, bridge, port): + """Get structure suitable for charmhelpers.contrib.network.ovs helpers. + + :param bridge: Name of bridge on which the port will be added + :type bridge: str + :param port: Name of port which will represent one or more interfaces + :type port: str + """ + for _bridge, _ports in self.items(): + for _port, _interfaces in _ports.items(): + if _bridge == bridge and _port == port: + ifdatamap = {} + for name, data in _interfaces.items(): + ifdatamap.update({ + name: { + 'type': data['type'], + }, + }) + if data.get('mtu-request') is not None: + ifdatamap[name].update({ + 'mtu_request': data['mtu-request'], + }) + if data.get('pci-address'): + ifdatamap[name].update({ + 'options': { + 'dpdk-devargs': data['pci-address'], + }, + }) + return ifdatamap + + +class BondConfig(object): + """Container and helpers for bond configuration options. + + Data is put into a dictionary and a convenient config get interface is + provided. + """ + + DEFAULT_LACP_CONFIG = { + 'mode': 'balance-tcp', + 'lacp': 'active', + 'lacp-time': 'fast' + } + ALL_BONDS = 'ALL_BONDS' + + BOND_MODES = ['active-backup', 'balance-slb', 'balance-tcp'] + BOND_LACP = ['active', 'passive', 'off'] + BOND_LACP_TIME = ['fast', 'slow'] + + def __init__(self, config_key=None): + """Parse specified configuration option. + + :param config_key: Configuration key to retrieve data from + (default: ``dpdk-bond-config``) + :type config_key: Optional[str] + """ + self.config_key = config_key or 'dpdk-bond-config' + + self.lacp_config = { + self.ALL_BONDS: copy.deepcopy(self.DEFAULT_LACP_CONFIG) + } + + lacp_config = config(self.config_key) + if lacp_config: + lacp_config_map = lacp_config.split() + for entry in lacp_config_map: + bond, entry = entry.partition(':')[0:3:2] + if not bond: + bond = self.ALL_BONDS + + mode, entry = entry.partition(':')[0:3:2] + if not mode: + mode = self.DEFAULT_LACP_CONFIG['mode'] + assert mode in self.BOND_MODES, \ + "Bond mode {} is invalid".format(mode) + + lacp, entry = entry.partition(':')[0:3:2] + if not lacp: + lacp = self.DEFAULT_LACP_CONFIG['lacp'] + assert lacp in self.BOND_LACP, \ + "Bond lacp {} is invalid".format(lacp) + + lacp_time, entry = entry.partition(':')[0:3:2] + if not lacp_time: + lacp_time = self.DEFAULT_LACP_CONFIG['lacp-time'] + assert lacp_time in self.BOND_LACP_TIME, \ + "Bond lacp-time {} is invalid".format(lacp_time) + + self.lacp_config[bond] = { + 'mode': mode, + 'lacp': lacp, + 'lacp-time': lacp_time + } + + def get_bond_config(self, bond): + """Get the LACP configuration for a bond + + :param bond: the bond name + :return: a dictionary with the configuration of the bond + :rtype: Dict[str,Dict[str,str]] + """ + return self.lacp_config.get(bond, self.lacp_config[self.ALL_BONDS]) + + def get_ovs_portdata(self, bond): + """Get structure suitable for charmhelpers.contrib.network.ovs helpers. + + :param bond: the bond name + :return: a dictionary with the configuration of the bond + :rtype: Dict[str,Union[str,Dict[str,str]]] + """ + bond_config = self.get_bond_config(bond) + return { + 'bond_mode': bond_config['mode'], + 'lacp': bond_config['lacp'], + 'other_config': { + 'lacp-time': bond_config['lacp-time'], + }, + } + + +class SRIOVContext(OSContextGenerator): + """Provide context for configuring SR-IOV devices.""" + + class sriov_config_mode(enum.Enum): + """Mode in which SR-IOV is configured. + + The configuration option identified by the ``numvfs_key`` parameter + is overloaded and defines in which mode the charm should interpret + the other SR-IOV-related configuration options. + """ + auto = 'auto' + blanket = 'blanket' + explicit = 'explicit' + + def _determine_numvfs(self, device, sriov_numvfs): + """Determine number of Virtual Functions (VFs) configured for device. + + :param device: Object describing a PCI Network interface card (NIC)/ + :type device: sriov_netplan_shim.pci.PCINetDevice + :param sriov_numvfs: Number of VFs requested for blanket configuration. + :type sriov_numvfs: int + :returns: Number of VFs to configure for device + :rtype: Optional[int] + """ + + def _get_capped_numvfs(requested): + """Get a number of VFs that does not exceed individual card limits. + + Depending and make and model of NIC the number of VFs supported + vary. Requesting more VFs than a card support would be a fatal + error, cap the requested number at the total number of VFs each + individual card supports. + + :param requested: Number of VFs requested + :type requested: int + :returns: Number of VFs allowed + :rtype: int + """ + actual = min(int(requested), int(device.sriov_totalvfs)) + if actual < int(requested): + log('Requested VFs ({}) too high for device {}. Falling back ' + 'to value supprted by device: {}' + .format(requested, device.interface_name, + device.sriov_totalvfs), + level=WARNING) + return actual + + if self._sriov_config_mode == self.sriov_config_mode.auto: + # auto-mode + # + # If device mapping configuration is present, return information + # on cards with mapping. + # + # If no device mapping configuration is present, return information + # for all cards. + # + # The maximum number of VFs supported by card will be used. + if (self._sriov_mapped_devices and + device.interface_name not in self._sriov_mapped_devices): + log('SR-IOV configured in auto mode: No device mapping for {}' + .format(device.interface_name), + level=DEBUG) + return + return _get_capped_numvfs(device.sriov_totalvfs) + elif self._sriov_config_mode == self.sriov_config_mode.blanket: + # blanket-mode + # + # User has specified a number of VFs that should apply to all + # cards with support for VFs. + return _get_capped_numvfs(sriov_numvfs) + elif self._sriov_config_mode == self.sriov_config_mode.explicit: + # explicit-mode + # + # User has given a list of interface names and associated number of + # VFs + if device.interface_name not in self._sriov_config_devices: + log('SR-IOV configured in explicit mode: No device:numvfs ' + 'pair for device {}, skipping.' + .format(device.interface_name), + level=DEBUG) + return + return _get_capped_numvfs( + self._sriov_config_devices[device.interface_name]) + else: + raise RuntimeError('This should not be reached') + + def __init__(self, numvfs_key=None, device_mappings_key=None): + """Initialize map from PCI devices and configuration options. + + :param numvfs_key: Config key for numvfs (default: 'sriov-numvfs') + :type numvfs_key: Optional[str] + :param device_mappings_key: Config key for device mappings + (default: 'sriov-device-mappings') + :type device_mappings_key: Optional[str] + :raises: RuntimeError + """ + numvfs_key = numvfs_key or 'sriov-numvfs' + device_mappings_key = device_mappings_key or 'sriov-device-mappings' + + devices = pci.PCINetDevices() + charm_config = config() + sriov_numvfs = charm_config.get(numvfs_key) or '' + sriov_device_mappings = charm_config.get(device_mappings_key) or '' + + # create list of devices from sriov_device_mappings config option + self._sriov_mapped_devices = [ + pair.split(':', 1)[1] + for pair in sriov_device_mappings.split() + ] + + # create map of device:numvfs from sriov_numvfs config option + self._sriov_config_devices = { + ifname: numvfs for ifname, numvfs in ( + pair.split(':', 1) for pair in sriov_numvfs.split() + if ':' in sriov_numvfs) + } + + # determine configuration mode from contents of sriov_numvfs + if sriov_numvfs == 'auto': + self._sriov_config_mode = self.sriov_config_mode.auto + elif sriov_numvfs.isdigit(): + self._sriov_config_mode = self.sriov_config_mode.blanket + elif ':' in sriov_numvfs: + self._sriov_config_mode = self.sriov_config_mode.explicit + else: + raise RuntimeError('Unable to determine mode of SR-IOV ' + 'configuration.') + + self._map = { + device.interface_name: self._determine_numvfs(device, sriov_numvfs) + for device in devices.pci_devices + if device.sriov and + self._determine_numvfs(device, sriov_numvfs) is not None + } + + def __call__(self): + """Provide SR-IOV context. + + :returns: Map interface name: min(configured, max) virtual functions. + Example: + { + 'eth0': 16, + 'eth1': 32, + 'eth2': 64, + } + :rtype: Dict[str,int] + """ + return self._map diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py index 4690f6b0..4ee6c1db 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -163,7 +163,16 @@ def retrieve_secret_id(url, token): :returns: secret_id to use for Vault Access :rtype: str""" import hvac - client = hvac.Client(url=url, token=token) + try: + # hvac 0.10.1 changed default adapter to JSONAdapter + client = hvac.Client(url=url, token=token, adapter=hvac.adapters.Request) + except AttributeError: + # hvac < 0.6.2 doesn't have adapter but uses the same response interface + client = hvac.Client(url=url, token=token) + else: + # hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate + if not isinstance(client.adapter, hvac.adapters.Request): + client.adapter = hvac.adapters.Request(base_uri=url, token=token) response = client._post('/v1/sys/wrapping/unwrap') if response.status_code == 200: data = response.json() From 15f86705be8c223ea1ddf449acc4ca2d6a904c0b Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Wed, 6 May 2020 18:35:22 +0200 Subject: [PATCH 1948/2699] Pre-freeze 'make *-sync' Change-Id: I7f029e37e4743587912c3264a38556caf8e00b3a --- .../charmhelpers/contrib/openstack/context.py | 751 +++++++++++++++++- .../contrib/openstack/vaultlocker.py | 11 +- ceph-mon/lib/charms_ceph/broker.py | 12 +- ceph-mon/lib/charms_ceph/utils.py | 197 ++++- 4 files changed, 945 insertions(+), 26 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index bc90804b..335e2d5c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -13,13 +13,17 @@ # limitations under the License. import collections +import copy +import enum import glob +import hashlib import json import math import os import re import socket import time + from base64 import b64decode from subprocess import check_call, CalledProcessError @@ -50,7 +54,8 @@ INFO, ERROR, status_set, - network_get_primary_address + network_get_primary_address, + WARNING, ) from charmhelpers.core.sysctl import create as sysctl_create @@ -110,6 +115,13 @@ ) from charmhelpers.core.unitdata import kv +try: + from sriov_netplan_shim import pci +except ImportError: + # The use of the function and contexts that require the pci module is + # optional. + pass + try: import psutil except ImportError: @@ -263,6 +275,12 @@ def __call__(self): 'database_password': rdata.get(password_setting), 'database_type': 'mysql+pymysql' } + # Port is being introduced with LP Bug #1876188 + # but it not currently required and may not be set in all + # cases, particularly in classic charms. + port = rdata.get('db_port') + if port: + ctxt['database_port'] = port if CompareOpenStackReleases(rel) < 'queens': ctxt['database_type'] = 'mysql' if self.context_complete(ctxt): @@ -2396,3 +2414,734 @@ def get_ovs_use_veth(self): return False else: return _config + + +EntityMac = collections.namedtuple('EntityMac', ['entity', 'mac']) + + +def resolve_pci_from_mapping_config(config_key): + """Resolve local PCI devices from MAC addresses in mapping config. + + Note that this function keeps record of mac->PCI address lookups + in the local unit db as the devices will disappaear from the system + once bound. + + :param config_key: Configuration option key to parse data from + :type config_key: str + :returns: PCI device address to Tuple(entity, mac) map + :rtype: collections.OrderedDict[str,Tuple[str,str]] + """ + devices = pci.PCINetDevices() + resolved_devices = collections.OrderedDict() + db = kv() + # Note that ``parse_data_port_mappings`` returns Dict regardless of input + for mac, entity in parse_data_port_mappings(config(config_key)).items(): + pcidev = devices.get_device_from_mac(mac) + if pcidev: + # NOTE: store mac->pci allocation as post binding + # it disappears from PCIDevices. + db.set(mac, pcidev.pci_address) + db.flush() + + pci_address = db.get(mac) + if pci_address: + resolved_devices[pci_address] = EntityMac(entity, mac) + + return resolved_devices + + +class DPDKDeviceContext(OSContextGenerator): + + def __init__(self, driver_key=None, bridges_key=None, bonds_key=None): + """Initialize DPDKDeviceContext. + + :param driver_key: Key to use when retrieving driver config. + :type driver_key: str + :param bridges_key: Key to use when retrieving bridge config. + :type bridges_key: str + :param bonds_key: Key to use when retrieving bonds config. + :type bonds_key: str + """ + self.driver_key = driver_key or 'dpdk-driver' + self.bridges_key = bridges_key or 'data-port' + self.bonds_key = bonds_key or 'dpdk-bond-mappings' + + def __call__(self): + """Populate context. + + :returns: context + :rtype: Dict[str,Union[str,collections.OrderedDict[str,str]]] + """ + driver = config(self.driver_key) + if driver is None: + return {} + # Resolve PCI devices for both directly used devices (_bridges) + # and devices for use in dpdk bonds (_bonds) + pci_devices = resolve_pci_from_mapping_config(self.bridges_key) + pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) + return {'devices': pci_devices, + 'driver': driver} + + +class OVSDPDKDeviceContext(OSContextGenerator): + + def __init__(self, bridges_key=None, bonds_key=None): + """Initialize OVSDPDKDeviceContext. + + :param bridges_key: Key to use when retrieving bridge config. + :type bridges_key: str + :param bonds_key: Key to use when retrieving bonds config. + :type bonds_key: str + """ + self.bridges_key = bridges_key or 'data-port' + self.bonds_key = bonds_key or 'dpdk-bond-mappings' + + @staticmethod + def _parse_cpu_list(cpulist): + """Parses a linux cpulist for a numa node + + :returns: list of cores + :rtype: List[int] + """ + cores = [] + ranges = cpulist.split(',') + for cpu_range in ranges: + if "-" in cpu_range: + cpu_min_max = cpu_range.split('-') + cores += range(int(cpu_min_max[0]), + int(cpu_min_max[1]) + 1) + else: + cores.append(int(cpu_range)) + return cores + + def _numa_node_cores(self): + """Get map of numa node -> cpu core + + :returns: map of numa node -> cpu core + :rtype: Dict[str,List[int]] + """ + nodes = {} + node_regex = '/sys/devices/system/node/node*' + for node in glob.glob(node_regex): + index = node.lstrip('/sys/devices/system/node/node') + with open(os.path.join(node, 'cpulist')) as cpulist: + nodes[index] = self._parse_cpu_list(cpulist.read().strip()) + return nodes + + def cpu_mask(self): + """Get hex formatted CPU mask + + The mask is based on using the first config:dpdk-socket-cores + cores of each NUMA node in the unit. + :returns: hex formatted CPU mask + :rtype: str + """ + num_cores = config('dpdk-socket-cores') + mask = 0 + for cores in self._numa_node_cores().values(): + for core in cores[:num_cores]: + mask = mask | 1 << core + return format(mask, '#04x') + + def socket_memory(self): + """Formatted list of socket memory configuration per NUMA node + + :returns: socket memory configuration per NUMA node + :rtype: str + """ + sm_size = config('dpdk-socket-memory') + node_regex = '/sys/devices/system/node/node*' + mem_list = [str(sm_size) for _ in glob.glob(node_regex)] + if mem_list: + return ','.join(mem_list) + else: + return str(sm_size) + + def devices(self): + """List of PCI devices for use by DPDK + + :returns: List of PCI devices for use by DPDK + :rtype: collections.OrderedDict[str,str] + """ + pci_devices = resolve_pci_from_mapping_config(self.bridges_key) + pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) + return pci_devices + + def _formatted_whitelist(self, flag): + """Flag formatted list of devices to whitelist + + :param flag: flag format to use + :type flag: str + :rtype: str + """ + whitelist = [] + for device in self.devices(): + whitelist.append(flag.format(device=device)) + return ' '.join(whitelist) + + def device_whitelist(self): + """Formatted list of devices to whitelist for dpdk + + using the old style '-w' flag + + :returns: devices to whitelist prefixed by '-w ' + :rtype: str + """ + return self._formatted_whitelist('-w {device}') + + def pci_whitelist(self): + """Formatted list of devices to whitelist for dpdk + + using the new style '--pci-whitelist' flag + + :returns: devices to whitelist prefixed by '--pci-whitelist ' + :rtype: str + """ + return self._formatted_whitelist('--pci-whitelist {device}') + + def __call__(self): + """Populate context. + + :returns: context + :rtype: Dict[str,Union[bool,str]] + """ + ctxt = {} + whitelist = self.device_whitelist() + if whitelist: + ctxt['dpdk_enabled'] = config('enable-dpdk') + ctxt['device_whitelist'] = self.device_whitelist() + ctxt['socket_memory'] = self.socket_memory() + ctxt['cpu_mask'] = self.cpu_mask() + return ctxt + + +class BridgePortInterfaceMap(object): + """Build a map of bridge ports and interaces from charm configuration. + + NOTE: the handling of this detail in the charm is pre-deprecated. + + The long term goal is for network connectivity detail to be modelled in + the server provisioning layer (such as MAAS) which in turn will provide + a Netplan YAML description that will be used to drive Open vSwitch. + + Until we get to that reality the charm will need to configure this + detail based on application level configuration options. + + There is a established way of mapping interfaces to ports and bridges + in the ``neutron-openvswitch`` and ``neutron-gateway`` charms and we + will carry that forward. + + The relationship between bridge, port and interface(s). + +--------+ + | bridge | + +--------+ + | + +----------------+ + | port aka. bond | + +----------------+ + | | + +-+ +-+ + |i| |i| + |n| |n| + |t| |t| + |0| |N| + +-+ +-+ + """ + class interface_type(enum.Enum): + """Supported interface types. + + Supported interface types can be found in the ``iface_types`` column + in the ``Open_vSwitch`` table on a running system. + """ + dpdk = 'dpdk' + internal = 'internal' + system = 'system' + + def __str__(self): + """Return string representation of value. + + :returns: string representation of value. + :rtype: str + """ + return self.value + + def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None, + global_mtu=None): + """Initialize map. + + :param bridges_key: Name of bridge:interface/port map config key + (default: 'data-port') + :type bridges_key: Optional[str] + :param bonds_key: Name of port-name:interface map config key + (default: 'dpdk-bond-mappings') + :type bonds_key: Optional[str] + :param enable_dpdk_key: Name of DPDK toggle config key + (default: 'enable-dpdk') + :type enable_dpdk_key: Optional[str] + :param global_mtu: Set a MTU on all interfaces at map initialization. + + The default is to have Open vSwitch get this from the underlying + interface as set up by bare metal provisioning. + + Note that you can augment the MTU on an individual interface basis + like this: + + ifdatamap = bpi.get_ifdatamap(bridge, port) + ifdatamap = { + port: { + **ifdata, + **{'mtu-request': my_individual_mtu_map[port]}, + } + for port, ifdata in ifdatamap.items() + } + :type global_mtu: Optional[int] + """ + bridges_key = bridges_key or 'data-port' + bonds_key = bonds_key or 'dpdk-bond-mappings' + enable_dpdk_key = enable_dpdk_key or 'enable-dpdk' + self._map = collections.defaultdict( + lambda: collections.defaultdict(dict)) + self._ifname_mac_map = collections.defaultdict(list) + self._mac_ifname_map = {} + self._mac_pci_address_map = {} + + # First we iterate over the list of physical interfaces visible to the + # system and update interface name to mac and mac to interface name map + for ifname in list_nics(): + if not is_phy_iface(ifname): + continue + mac = get_nic_hwaddr(ifname) + self._ifname_mac_map[ifname] = [mac] + self._mac_ifname_map[mac] = ifname + + # In light of the pre-deprecation notice in the docstring of this + # class we will expose the ability to configure OVS bonds as a + # DPDK-only feature, but generally use the data structures internally. + if config(enable_dpdk_key): + # resolve PCI address of interfaces listed in the bridges and bonds + # charm configuration options. Note that for already bound + # interfaces the helper will retrieve MAC address from the unit + # KV store as the information is no longer available in sysfs. + _pci_bridge_mac = resolve_pci_from_mapping_config( + bridges_key) + _pci_bond_mac = resolve_pci_from_mapping_config( + bonds_key) + + for pci_address, bridge_mac in _pci_bridge_mac.items(): + if bridge_mac.mac in self._mac_ifname_map: + # if we already have the interface name in our map it is + # visible to the system and therefore not bound to DPDK + continue + ifname = 'dpdk-{}'.format( + hashlib.sha1( + pci_address.encode('UTF-8')).hexdigest()[:7]) + self._ifname_mac_map[ifname] = [bridge_mac.mac] + self._mac_ifname_map[bridge_mac.mac] = ifname + self._mac_pci_address_map[bridge_mac.mac] = pci_address + + for pci_address, bond_mac in _pci_bond_mac.items(): + # for bonds we want to be able to get a list of macs from + # the bond name and also get at the interface name made up + # of the hash of the PCI address + ifname = 'dpdk-{}'.format( + hashlib.sha1( + pci_address.encode('UTF-8')).hexdigest()[:7]) + self._ifname_mac_map[bond_mac.entity].append(bond_mac.mac) + self._mac_ifname_map[bond_mac.mac] = ifname + self._mac_pci_address_map[bond_mac.mac] = pci_address + + config_bridges = config(bridges_key) or '' + for bridge, ifname_or_mac in ( + pair.split(':', 1) + for pair in config_bridges.split()): + if ':' in ifname_or_mac: + try: + ifname = self.ifname_from_mac(ifname_or_mac) + except KeyError: + # The interface is destined for a different unit in the + # deployment. + continue + macs = [ifname_or_mac] + else: + ifname = ifname_or_mac + macs = self.macs_from_ifname(ifname_or_mac) + + portname = ifname + for mac in macs: + try: + pci_address = self.pci_address_from_mac(mac) + iftype = self.interface_type.dpdk + ifname = self.ifname_from_mac(mac) + except KeyError: + pci_address = None + iftype = self.interface_type.system + + self.add_interface( + bridge, portname, ifname, iftype, pci_address, global_mtu) + + def __getitem__(self, key): + """Provide a Dict-like interface, get value of item. + + :param key: Key to look up value from. + :type key: any + :returns: Value + :rtype: any + """ + return self._map.__getitem__(key) + + def __iter__(self): + """Provide a Dict-like interface, iterate over keys. + + :returns: Iterator + :rtype: Iterator[any] + """ + return self._map.__iter__() + + def __len__(self): + """Provide a Dict-like interface, measure the length of internal map. + + :returns: Length + :rtype: int + """ + return len(self._map) + + def items(self): + """Provide a Dict-like interface, iterate over items. + + :returns: Key Value pairs + :rtype: Iterator[any, any] + """ + return self._map.items() + + def keys(self): + """Provide a Dict-like interface, iterate over keys. + + :returns: Iterator + :rtype: Iterator[any] + """ + return self._map.keys() + + def ifname_from_mac(self, mac): + """ + :returns: Name of interface + :rtype: str + :raises: KeyError + """ + return (get_bond_master(self._mac_ifname_map[mac]) or + self._mac_ifname_map[mac]) + + def macs_from_ifname(self, ifname): + """ + :returns: List of hardware address (MAC) of interface + :rtype: List[str] + :raises: KeyError + """ + return self._ifname_mac_map[ifname] + + def pci_address_from_mac(self, mac): + """ + :param mac: Hardware address (MAC) of interface + :type mac: str + :returns: PCI address of device associated with mac + :rtype: str + :raises: KeyError + """ + return self._mac_pci_address_map[mac] + + def add_interface(self, bridge, port, ifname, iftype, + pci_address, mtu_request): + """Add an interface to the map. + + :param bridge: Name of bridge on which the bond will be added + :type bridge: str + :param port: Name of port which will represent the bond on bridge + :type port: str + :param ifname: Name of interface that will make up the bonded port + :type ifname: str + :param iftype: Type of interface + :type iftype: BridgeBondMap.interface_type + :param pci_address: PCI address of interface + :type pci_address: Optional[str] + :param mtu_request: MTU to request for interface + :type mtu_request: Optional[int] + """ + self._map[bridge][port][ifname] = { + 'type': str(iftype), + } + if pci_address: + self._map[bridge][port][ifname].update({ + 'pci-address': pci_address, + }) + if mtu_request is not None: + self._map[bridge][port][ifname].update({ + 'mtu-request': str(mtu_request) + }) + + def get_ifdatamap(self, bridge, port): + """Get structure suitable for charmhelpers.contrib.network.ovs helpers. + + :param bridge: Name of bridge on which the port will be added + :type bridge: str + :param port: Name of port which will represent one or more interfaces + :type port: str + """ + for _bridge, _ports in self.items(): + for _port, _interfaces in _ports.items(): + if _bridge == bridge and _port == port: + ifdatamap = {} + for name, data in _interfaces.items(): + ifdatamap.update({ + name: { + 'type': data['type'], + }, + }) + if data.get('mtu-request') is not None: + ifdatamap[name].update({ + 'mtu_request': data['mtu-request'], + }) + if data.get('pci-address'): + ifdatamap[name].update({ + 'options': { + 'dpdk-devargs': data['pci-address'], + }, + }) + return ifdatamap + + +class BondConfig(object): + """Container and helpers for bond configuration options. + + Data is put into a dictionary and a convenient config get interface is + provided. + """ + + DEFAULT_LACP_CONFIG = { + 'mode': 'balance-tcp', + 'lacp': 'active', + 'lacp-time': 'fast' + } + ALL_BONDS = 'ALL_BONDS' + + BOND_MODES = ['active-backup', 'balance-slb', 'balance-tcp'] + BOND_LACP = ['active', 'passive', 'off'] + BOND_LACP_TIME = ['fast', 'slow'] + + def __init__(self, config_key=None): + """Parse specified configuration option. + + :param config_key: Configuration key to retrieve data from + (default: ``dpdk-bond-config``) + :type config_key: Optional[str] + """ + self.config_key = config_key or 'dpdk-bond-config' + + self.lacp_config = { + self.ALL_BONDS: copy.deepcopy(self.DEFAULT_LACP_CONFIG) + } + + lacp_config = config(self.config_key) + if lacp_config: + lacp_config_map = lacp_config.split() + for entry in lacp_config_map: + bond, entry = entry.partition(':')[0:3:2] + if not bond: + bond = self.ALL_BONDS + + mode, entry = entry.partition(':')[0:3:2] + if not mode: + mode = self.DEFAULT_LACP_CONFIG['mode'] + assert mode in self.BOND_MODES, \ + "Bond mode {} is invalid".format(mode) + + lacp, entry = entry.partition(':')[0:3:2] + if not lacp: + lacp = self.DEFAULT_LACP_CONFIG['lacp'] + assert lacp in self.BOND_LACP, \ + "Bond lacp {} is invalid".format(lacp) + + lacp_time, entry = entry.partition(':')[0:3:2] + if not lacp_time: + lacp_time = self.DEFAULT_LACP_CONFIG['lacp-time'] + assert lacp_time in self.BOND_LACP_TIME, \ + "Bond lacp-time {} is invalid".format(lacp_time) + + self.lacp_config[bond] = { + 'mode': mode, + 'lacp': lacp, + 'lacp-time': lacp_time + } + + def get_bond_config(self, bond): + """Get the LACP configuration for a bond + + :param bond: the bond name + :return: a dictionary with the configuration of the bond + :rtype: Dict[str,Dict[str,str]] + """ + return self.lacp_config.get(bond, self.lacp_config[self.ALL_BONDS]) + + def get_ovs_portdata(self, bond): + """Get structure suitable for charmhelpers.contrib.network.ovs helpers. + + :param bond: the bond name + :return: a dictionary with the configuration of the bond + :rtype: Dict[str,Union[str,Dict[str,str]]] + """ + bond_config = self.get_bond_config(bond) + return { + 'bond_mode': bond_config['mode'], + 'lacp': bond_config['lacp'], + 'other_config': { + 'lacp-time': bond_config['lacp-time'], + }, + } + + +class SRIOVContext(OSContextGenerator): + """Provide context for configuring SR-IOV devices.""" + + class sriov_config_mode(enum.Enum): + """Mode in which SR-IOV is configured. + + The configuration option identified by the ``numvfs_key`` parameter + is overloaded and defines in which mode the charm should interpret + the other SR-IOV-related configuration options. + """ + auto = 'auto' + blanket = 'blanket' + explicit = 'explicit' + + def _determine_numvfs(self, device, sriov_numvfs): + """Determine number of Virtual Functions (VFs) configured for device. + + :param device: Object describing a PCI Network interface card (NIC)/ + :type device: sriov_netplan_shim.pci.PCINetDevice + :param sriov_numvfs: Number of VFs requested for blanket configuration. + :type sriov_numvfs: int + :returns: Number of VFs to configure for device + :rtype: Optional[int] + """ + + def _get_capped_numvfs(requested): + """Get a number of VFs that does not exceed individual card limits. + + Depending and make and model of NIC the number of VFs supported + vary. Requesting more VFs than a card support would be a fatal + error, cap the requested number at the total number of VFs each + individual card supports. + + :param requested: Number of VFs requested + :type requested: int + :returns: Number of VFs allowed + :rtype: int + """ + actual = min(int(requested), int(device.sriov_totalvfs)) + if actual < int(requested): + log('Requested VFs ({}) too high for device {}. Falling back ' + 'to value supprted by device: {}' + .format(requested, device.interface_name, + device.sriov_totalvfs), + level=WARNING) + return actual + + if self._sriov_config_mode == self.sriov_config_mode.auto: + # auto-mode + # + # If device mapping configuration is present, return information + # on cards with mapping. + # + # If no device mapping configuration is present, return information + # for all cards. + # + # The maximum number of VFs supported by card will be used. + if (self._sriov_mapped_devices and + device.interface_name not in self._sriov_mapped_devices): + log('SR-IOV configured in auto mode: No device mapping for {}' + .format(device.interface_name), + level=DEBUG) + return + return _get_capped_numvfs(device.sriov_totalvfs) + elif self._sriov_config_mode == self.sriov_config_mode.blanket: + # blanket-mode + # + # User has specified a number of VFs that should apply to all + # cards with support for VFs. + return _get_capped_numvfs(sriov_numvfs) + elif self._sriov_config_mode == self.sriov_config_mode.explicit: + # explicit-mode + # + # User has given a list of interface names and associated number of + # VFs + if device.interface_name not in self._sriov_config_devices: + log('SR-IOV configured in explicit mode: No device:numvfs ' + 'pair for device {}, skipping.' + .format(device.interface_name), + level=DEBUG) + return + return _get_capped_numvfs( + self._sriov_config_devices[device.interface_name]) + else: + raise RuntimeError('This should not be reached') + + def __init__(self, numvfs_key=None, device_mappings_key=None): + """Initialize map from PCI devices and configuration options. + + :param numvfs_key: Config key for numvfs (default: 'sriov-numvfs') + :type numvfs_key: Optional[str] + :param device_mappings_key: Config key for device mappings + (default: 'sriov-device-mappings') + :type device_mappings_key: Optional[str] + :raises: RuntimeError + """ + numvfs_key = numvfs_key or 'sriov-numvfs' + device_mappings_key = device_mappings_key or 'sriov-device-mappings' + + devices = pci.PCINetDevices() + charm_config = config() + sriov_numvfs = charm_config.get(numvfs_key) or '' + sriov_device_mappings = charm_config.get(device_mappings_key) or '' + + # create list of devices from sriov_device_mappings config option + self._sriov_mapped_devices = [ + pair.split(':', 1)[1] + for pair in sriov_device_mappings.split() + ] + + # create map of device:numvfs from sriov_numvfs config option + self._sriov_config_devices = { + ifname: numvfs for ifname, numvfs in ( + pair.split(':', 1) for pair in sriov_numvfs.split() + if ':' in sriov_numvfs) + } + + # determine configuration mode from contents of sriov_numvfs + if sriov_numvfs == 'auto': + self._sriov_config_mode = self.sriov_config_mode.auto + elif sriov_numvfs.isdigit(): + self._sriov_config_mode = self.sriov_config_mode.blanket + elif ':' in sriov_numvfs: + self._sriov_config_mode = self.sriov_config_mode.explicit + else: + raise RuntimeError('Unable to determine mode of SR-IOV ' + 'configuration.') + + self._map = { + device.interface_name: self._determine_numvfs(device, sriov_numvfs) + for device in devices.pci_devices + if device.sriov and + self._determine_numvfs(device, sriov_numvfs) is not None + } + + def __call__(self): + """Provide SR-IOV context. + + :returns: Map interface name: min(configured, max) virtual functions. + Example: + { + 'eth0': 16, + 'eth1': 32, + 'eth2': 64, + } + :rtype: Dict[str,int] + """ + return self._map diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py index 4690f6b0..4ee6c1db 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -163,7 +163,16 @@ def retrieve_secret_id(url, token): :returns: secret_id to use for Vault Access :rtype: str""" import hvac - client = hvac.Client(url=url, token=token) + try: + # hvac 0.10.1 changed default adapter to JSONAdapter + client = hvac.Client(url=url, token=token, adapter=hvac.adapters.Request) + except AttributeError: + # hvac < 0.6.2 doesn't have adapter but uses the same response interface + client = hvac.Client(url=url, token=token) + else: + # hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate + if not isinstance(client.adapter, hvac.adapters.Request): + client.adapter = hvac.adapters.Request(base_uri=url, token=token) response = client._post('/v1/sys/wrapping/unwrap') if response.status_code == 200: data = response.json() diff --git a/ceph-mon/lib/charms_ceph/broker.py b/ceph-mon/lib/charms_ceph/broker.py index ceda9a85..726f9498 100644 --- a/ceph-mon/lib/charms_ceph/broker.py +++ b/ceph-mon/lib/charms_ceph/broker.py @@ -160,9 +160,10 @@ def handle_create_erasure_profile(request, service): # "host" | "rack" or it defaults to "host" # Any valid Ceph bucket failure_domain = request.get('failure-domain') name = request.get('name') - k = request.get('k') - m = request.get('m') - l = request.get('l') + # Binary Distribution Matrix (BDM) parameters + bdm_k = request.get('k') + bdm_m = request.get('m') + bdm_l = request.get('l') if failure_domain not in CEPH_BUCKET_TYPES: msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES) @@ -171,7 +172,8 @@ def handle_create_erasure_profile(request, service): create_erasure_profile(service=service, erasure_plugin_name=erasure_type, profile_name=name, failure_domain=failure_domain, - data_chunks=k, coding_chunks=m, locality=l) + data_chunks=bdm_k, coding_chunks=bdm_m, + locality=bdm_l) def handle_add_permissions_to_key(request, service): @@ -556,7 +558,7 @@ def handle_set_pool_value(request, service): # Get the validation method validator_params = POOL_KEYS[params['key']] - if len(validator_params) is 1: + if len(validator_params) == 1: # Validate that what the user passed is actually legal per Ceph's rules validator(params['value'], validator_params[0]) else: diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index 8cbcdd5b..a3fd276d 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -25,6 +25,7 @@ import time import uuid +from contextlib import contextmanager from datetime import datetime from charmhelpers.core import hookenv @@ -175,12 +176,16 @@ def unmounted_disks(): context = pyudev.Context() for device in context.list_devices(DEVTYPE='disk'): if device['SUBSYSTEM'] == 'block': + if device.device_node is None: + continue + matched = False for block_type in [u'dm-', u'loop', u'ram', u'nbd']: if block_type in device.device_node: matched = True if matched: continue + disks.append(device.device_node) log("Found disks: {}".format(disks)) return [disk for disk in disks if not is_device_mounted(disk)] @@ -637,7 +642,7 @@ def _get_osd_num_from_dirname(dirname): :raises ValueError: if the osd number cannot be parsed from the provided directory name. """ - match = re.search('ceph-(?P\d+)', dirname) + match = re.search(r'ceph-(?P\d+)', dirname) if not match: raise ValueError("dirname not in correct format: {}".format(dirname)) @@ -706,7 +711,7 @@ def get_version(): package = "ceph" try: pkg = cache[package] - except: + except KeyError: # the package is unknown to the current apt cache. e = 'Could not determine version of package with no installation ' \ 'candidate: %s' % package @@ -721,7 +726,7 @@ def get_version(): # x.y match only for 20XX.X # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', vers) + match = re.match(r'^(\d+)\.(\d+)', vers) if match: vers = match.group(0) @@ -956,11 +961,11 @@ def start_osds(devices): rescan_osd_devices() if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and cmp_pkgrevno('ceph', '14.2.0') < 0): - # Use ceph-disk activate for directory based OSD's - for dev_or_path in devices: - if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call( - ['ceph-disk', 'activate', dev_or_path]) + # Use ceph-disk activate for directory based OSD's + for dev_or_path in devices: + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): + subprocess.check_call( + ['ceph-disk', 'activate', dev_or_path]) def udevadm_settle(): @@ -978,6 +983,7 @@ def rescan_osd_devices(): udevadm_settle() + _client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' @@ -1002,6 +1008,7 @@ def generate_monitor_secret(): return "{}==".format(res.split('=')[1].strip()) + # OSD caps taken from ceph-create-keys _osd_bootstrap_caps = { 'mon': [ @@ -1039,7 +1046,7 @@ def get_osd_bootstrap_key(): # Attempt to get/create a key using the OSD bootstrap profile first key = get_named_key('bootstrap-osd', _osd_bootstrap_caps_profile) - except: + except Exception: # If that fails try with the older style permissions key = get_named_key('bootstrap-osd', _osd_bootstrap_caps) @@ -1063,6 +1070,7 @@ def import_radosgw_key(key): ] subprocess.check_call(cmd) + # OSD caps taken from ceph-create-keys _radosgw_caps = { 'mon': ['allow rw'], @@ -1300,7 +1308,7 @@ def bootstrap_monitor_cluster(secret): path, done, init_marker) - except: + except Exception: raise finally: os.unlink(keyring) @@ -2417,10 +2425,11 @@ def upgrade_osd(new_version): # way to update the code on the node. if not dirs_need_ownership_update('osd'): log('Restarting all OSDs to load new binaries', DEBUG) - if systemd(): - service_restart('ceph-osd.target') - else: - service_restart('ceph-osd-all') + with maintain_all_osd_states(): + if systemd(): + service_restart('ceph-osd.target') + else: + service_restart('ceph-osd-all') return # Need to change the ownership of all directories which are not OSD @@ -2465,11 +2474,12 @@ def _upgrade_single_osd(osd_num, osd_dir): :raises IOError: if an error occurs reading/writing to a file as part of the upgrade process """ - stop_osd(osd_num) - disable_osd(osd_num) - update_owner(osd_dir) - enable_osd(osd_num) - start_osd(osd_num) + with maintain_osd_state(osd_num): + stop_osd(osd_num) + disable_osd(osd_num) + update_owner(osd_dir) + enable_osd(osd_num) + start_osd(osd_num) def stop_osd(osd_num): @@ -2596,6 +2606,98 @@ def update_owner(path, recurse_dirs=True): secs=elapsed_time.total_seconds(), path=path), DEBUG) +def get_osd_state(osd_num, osd_goal_state=None): + """Get OSD state or loop until OSD state matches OSD goal state. + + If osd_goal_state is None, just return the current OSD state. + If osd_goal_state is not None, loop until the current OSD state matches + the OSD goal state. + + :param osd_num: the osd id to get state for + :param osd_goal_state: (Optional) string indicating state to wait for + Defaults to None + :returns: Returns a str, the OSD state. + :rtype: str + """ + while True: + asok = "/var/run/ceph/ceph-osd.{}.asok".format(osd_num) + cmd = [ + 'ceph', + 'daemon', + asok, + 'status' + ] + try: + result = json.loads(str(subprocess + .check_output(cmd) + .decode('UTF-8'))) + except (subprocess.CalledProcessError, ValueError) as e: + log("{}".format(e), level=DEBUG) + continue + osd_state = result['state'] + log("OSD {} state: {}, goal state: {}".format( + osd_num, osd_state, osd_goal_state), level=DEBUG) + if not osd_goal_state: + return osd_state + if osd_state == osd_goal_state: + return osd_state + time.sleep(3) + + +def get_all_osd_states(osd_goal_states=None): + """Get all OSD states or loop until all OSD states match OSD goal states. + + If osd_goal_states is None, just return a dictionary of current OSD states. + If osd_goal_states is not None, loop until the current OSD states match + the OSD goal states. + + :param osd_goal_states: (Optional) dict indicating states to wait for + Defaults to None + :returns: Returns a dictionary of current OSD states. + :rtype: dict + """ + osd_states = {} + for osd_num in get_local_osd_ids(): + if not osd_goal_states: + osd_states[osd_num] = get_osd_state(osd_num) + else: + osd_states[osd_num] = get_osd_state( + osd_num, + osd_goal_state=osd_goal_states[osd_num]) + return osd_states + + +@contextmanager +def maintain_osd_state(osd_num): + """Ensure the state of an OSD is maintained. + + Ensures the state of an OSD is the same at the end of a block nested + in a with statement as it was at the beginning of the block. + + :param osd_num: the osd id to maintain state for + """ + osd_state = get_osd_state(osd_num) + try: + yield + finally: + get_osd_state(osd_num, osd_goal_state=osd_state) + + +@contextmanager +def maintain_all_osd_states(): + """Ensure all local OSD states are maintained. + + Ensures the states of all local OSDs are the same at the end of a + block nested in a with statement as they were at the beginning of + the block. + """ + osd_states = get_all_osd_states() + try: + yield + finally: + get_all_osd_states(osd_goal_states=osd_states) + + def list_pools(client='admin'): """This will list the current pools that Ceph has @@ -2790,6 +2892,7 @@ def dirs_need_ownership_update(service): # All child directories had the expected ownership return False + # A dict of valid ceph upgrade paths. Mapping is old -> new UPGRADE_PATHS = collections.OrderedDict([ ('firefly', 'hammer'), @@ -2797,6 +2900,7 @@ def dirs_need_ownership_update(service): ('jewel', 'luminous'), ('luminous', 'mimic'), ('mimic', 'nautilus'), + ('nautilus', 'octopus'), ]) # Map UCA codenames to ceph codenames @@ -2813,6 +2917,7 @@ def dirs_need_ownership_update(service): 'rocky': 'mimic', 'stein': 'mimic', 'train': 'nautilus', + 'ussuri': 'octopus', } @@ -2965,3 +3070,57 @@ def osd_noout(enable): except subprocess.CalledProcessError as e: log(e) raise + + +class OSDConfigSetError(Exception): + """Error occured applying OSD settings.""" + pass + + +def apply_osd_settings(settings): + """Applies the provided osd settings + + Apply the provided settings to all local OSD unless settings are already + present. Settings stop being applied on encountering an error. + + :param settings: dict. Dictionary of settings to apply. + :returns: bool. True if commands ran succesfully. + :raises: OSDConfigSetError + """ + current_settings = {} + base_cmd = 'ceph daemon osd.{osd_id} config --format=json' + get_cmd = base_cmd + ' get {key}' + set_cmd = base_cmd + ' set {key} {value}' + + def _get_cli_key(key): + return(key.replace(' ', '_')) + # Retrieve the current values to check keys are correct and to make this a + # noop if setting are already applied. + for osd_id in get_local_osd_ids(): + for key, value in sorted(settings.items()): + cli_key = _get_cli_key(key) + cmd = get_cmd.format(osd_id=osd_id, key=cli_key) + out = json.loads( + subprocess.check_output(cmd.split()).decode('UTF-8')) + if 'error' in out: + log("Error retrieving osd setting: {}".format(out['error']), + level=ERROR) + return False + current_settings[key] = out[cli_key] + settings_diff = { + k: v + for k, v in settings.items() + if str(v) != str(current_settings[k])} + for key, value in sorted(settings_diff.items()): + log("Setting {} to {}".format(key, value), level=DEBUG) + cmd = set_cmd.format( + osd_id=osd_id, + key=_get_cli_key(key), + value=value) + out = json.loads( + subprocess.check_output(cmd.split()).decode('UTF-8')) + if 'error' in out: + log("Error applying osd setting: {}".format(out['error']), + level=ERROR) + raise OSDConfigSetError + return True From f5a0dd75e7ea46cfe1674b3d6744f1bdafe5845c Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Wed, 6 May 2020 18:37:04 +0200 Subject: [PATCH 1949/2699] Pre-freeze 'make *-sync' Change-Id: I22f57eedd928b3f7d7b27e73b12efd3a5d559933 --- .../charmhelpers/contrib/openstack/context.py | 751 +++++++++++++++++- .../charmhelpers/contrib/openstack/utils.py | 306 ++++++- .../contrib/openstack/vaultlocker.py | 24 +- .../contrib/storage/linux/ceph.py | 147 +++- .../contrib/storage/linux/loopback.py | 8 +- .../hooks/charmhelpers/core/hookenv.py | 56 +- .../hooks/charmhelpers/core/sysctl.py | 14 +- ceph-radosgw/lib/charms_ceph/utils.py | 174 +++- 8 files changed, 1442 insertions(+), 38 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index bc90804b..335e2d5c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -13,13 +13,17 @@ # limitations under the License. import collections +import copy +import enum import glob +import hashlib import json import math import os import re import socket import time + from base64 import b64decode from subprocess import check_call, CalledProcessError @@ -50,7 +54,8 @@ INFO, ERROR, status_set, - network_get_primary_address + network_get_primary_address, + WARNING, ) from charmhelpers.core.sysctl import create as sysctl_create @@ -110,6 +115,13 @@ ) from charmhelpers.core.unitdata import kv +try: + from sriov_netplan_shim import pci +except ImportError: + # The use of the function and contexts that require the pci module is + # optional. + pass + try: import psutil except ImportError: @@ -263,6 +275,12 @@ def __call__(self): 'database_password': rdata.get(password_setting), 'database_type': 'mysql+pymysql' } + # Port is being introduced with LP Bug #1876188 + # but it not currently required and may not be set in all + # cases, particularly in classic charms. + port = rdata.get('db_port') + if port: + ctxt['database_port'] = port if CompareOpenStackReleases(rel) < 'queens': ctxt['database_type'] = 'mysql' if self.context_complete(ctxt): @@ -2396,3 +2414,734 @@ def get_ovs_use_veth(self): return False else: return _config + + +EntityMac = collections.namedtuple('EntityMac', ['entity', 'mac']) + + +def resolve_pci_from_mapping_config(config_key): + """Resolve local PCI devices from MAC addresses in mapping config. + + Note that this function keeps record of mac->PCI address lookups + in the local unit db as the devices will disappaear from the system + once bound. + + :param config_key: Configuration option key to parse data from + :type config_key: str + :returns: PCI device address to Tuple(entity, mac) map + :rtype: collections.OrderedDict[str,Tuple[str,str]] + """ + devices = pci.PCINetDevices() + resolved_devices = collections.OrderedDict() + db = kv() + # Note that ``parse_data_port_mappings`` returns Dict regardless of input + for mac, entity in parse_data_port_mappings(config(config_key)).items(): + pcidev = devices.get_device_from_mac(mac) + if pcidev: + # NOTE: store mac->pci allocation as post binding + # it disappears from PCIDevices. + db.set(mac, pcidev.pci_address) + db.flush() + + pci_address = db.get(mac) + if pci_address: + resolved_devices[pci_address] = EntityMac(entity, mac) + + return resolved_devices + + +class DPDKDeviceContext(OSContextGenerator): + + def __init__(self, driver_key=None, bridges_key=None, bonds_key=None): + """Initialize DPDKDeviceContext. + + :param driver_key: Key to use when retrieving driver config. + :type driver_key: str + :param bridges_key: Key to use when retrieving bridge config. + :type bridges_key: str + :param bonds_key: Key to use when retrieving bonds config. + :type bonds_key: str + """ + self.driver_key = driver_key or 'dpdk-driver' + self.bridges_key = bridges_key or 'data-port' + self.bonds_key = bonds_key or 'dpdk-bond-mappings' + + def __call__(self): + """Populate context. + + :returns: context + :rtype: Dict[str,Union[str,collections.OrderedDict[str,str]]] + """ + driver = config(self.driver_key) + if driver is None: + return {} + # Resolve PCI devices for both directly used devices (_bridges) + # and devices for use in dpdk bonds (_bonds) + pci_devices = resolve_pci_from_mapping_config(self.bridges_key) + pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) + return {'devices': pci_devices, + 'driver': driver} + + +class OVSDPDKDeviceContext(OSContextGenerator): + + def __init__(self, bridges_key=None, bonds_key=None): + """Initialize OVSDPDKDeviceContext. + + :param bridges_key: Key to use when retrieving bridge config. + :type bridges_key: str + :param bonds_key: Key to use when retrieving bonds config. + :type bonds_key: str + """ + self.bridges_key = bridges_key or 'data-port' + self.bonds_key = bonds_key or 'dpdk-bond-mappings' + + @staticmethod + def _parse_cpu_list(cpulist): + """Parses a linux cpulist for a numa node + + :returns: list of cores + :rtype: List[int] + """ + cores = [] + ranges = cpulist.split(',') + for cpu_range in ranges: + if "-" in cpu_range: + cpu_min_max = cpu_range.split('-') + cores += range(int(cpu_min_max[0]), + int(cpu_min_max[1]) + 1) + else: + cores.append(int(cpu_range)) + return cores + + def _numa_node_cores(self): + """Get map of numa node -> cpu core + + :returns: map of numa node -> cpu core + :rtype: Dict[str,List[int]] + """ + nodes = {} + node_regex = '/sys/devices/system/node/node*' + for node in glob.glob(node_regex): + index = node.lstrip('/sys/devices/system/node/node') + with open(os.path.join(node, 'cpulist')) as cpulist: + nodes[index] = self._parse_cpu_list(cpulist.read().strip()) + return nodes + + def cpu_mask(self): + """Get hex formatted CPU mask + + The mask is based on using the first config:dpdk-socket-cores + cores of each NUMA node in the unit. + :returns: hex formatted CPU mask + :rtype: str + """ + num_cores = config('dpdk-socket-cores') + mask = 0 + for cores in self._numa_node_cores().values(): + for core in cores[:num_cores]: + mask = mask | 1 << core + return format(mask, '#04x') + + def socket_memory(self): + """Formatted list of socket memory configuration per NUMA node + + :returns: socket memory configuration per NUMA node + :rtype: str + """ + sm_size = config('dpdk-socket-memory') + node_regex = '/sys/devices/system/node/node*' + mem_list = [str(sm_size) for _ in glob.glob(node_regex)] + if mem_list: + return ','.join(mem_list) + else: + return str(sm_size) + + def devices(self): + """List of PCI devices for use by DPDK + + :returns: List of PCI devices for use by DPDK + :rtype: collections.OrderedDict[str,str] + """ + pci_devices = resolve_pci_from_mapping_config(self.bridges_key) + pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) + return pci_devices + + def _formatted_whitelist(self, flag): + """Flag formatted list of devices to whitelist + + :param flag: flag format to use + :type flag: str + :rtype: str + """ + whitelist = [] + for device in self.devices(): + whitelist.append(flag.format(device=device)) + return ' '.join(whitelist) + + def device_whitelist(self): + """Formatted list of devices to whitelist for dpdk + + using the old style '-w' flag + + :returns: devices to whitelist prefixed by '-w ' + :rtype: str + """ + return self._formatted_whitelist('-w {device}') + + def pci_whitelist(self): + """Formatted list of devices to whitelist for dpdk + + using the new style '--pci-whitelist' flag + + :returns: devices to whitelist prefixed by '--pci-whitelist ' + :rtype: str + """ + return self._formatted_whitelist('--pci-whitelist {device}') + + def __call__(self): + """Populate context. + + :returns: context + :rtype: Dict[str,Union[bool,str]] + """ + ctxt = {} + whitelist = self.device_whitelist() + if whitelist: + ctxt['dpdk_enabled'] = config('enable-dpdk') + ctxt['device_whitelist'] = self.device_whitelist() + ctxt['socket_memory'] = self.socket_memory() + ctxt['cpu_mask'] = self.cpu_mask() + return ctxt + + +class BridgePortInterfaceMap(object): + """Build a map of bridge ports and interaces from charm configuration. + + NOTE: the handling of this detail in the charm is pre-deprecated. + + The long term goal is for network connectivity detail to be modelled in + the server provisioning layer (such as MAAS) which in turn will provide + a Netplan YAML description that will be used to drive Open vSwitch. + + Until we get to that reality the charm will need to configure this + detail based on application level configuration options. + + There is a established way of mapping interfaces to ports and bridges + in the ``neutron-openvswitch`` and ``neutron-gateway`` charms and we + will carry that forward. + + The relationship between bridge, port and interface(s). + +--------+ + | bridge | + +--------+ + | + +----------------+ + | port aka. bond | + +----------------+ + | | + +-+ +-+ + |i| |i| + |n| |n| + |t| |t| + |0| |N| + +-+ +-+ + """ + class interface_type(enum.Enum): + """Supported interface types. + + Supported interface types can be found in the ``iface_types`` column + in the ``Open_vSwitch`` table on a running system. + """ + dpdk = 'dpdk' + internal = 'internal' + system = 'system' + + def __str__(self): + """Return string representation of value. + + :returns: string representation of value. + :rtype: str + """ + return self.value + + def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None, + global_mtu=None): + """Initialize map. + + :param bridges_key: Name of bridge:interface/port map config key + (default: 'data-port') + :type bridges_key: Optional[str] + :param bonds_key: Name of port-name:interface map config key + (default: 'dpdk-bond-mappings') + :type bonds_key: Optional[str] + :param enable_dpdk_key: Name of DPDK toggle config key + (default: 'enable-dpdk') + :type enable_dpdk_key: Optional[str] + :param global_mtu: Set a MTU on all interfaces at map initialization. + + The default is to have Open vSwitch get this from the underlying + interface as set up by bare metal provisioning. + + Note that you can augment the MTU on an individual interface basis + like this: + + ifdatamap = bpi.get_ifdatamap(bridge, port) + ifdatamap = { + port: { + **ifdata, + **{'mtu-request': my_individual_mtu_map[port]}, + } + for port, ifdata in ifdatamap.items() + } + :type global_mtu: Optional[int] + """ + bridges_key = bridges_key or 'data-port' + bonds_key = bonds_key or 'dpdk-bond-mappings' + enable_dpdk_key = enable_dpdk_key or 'enable-dpdk' + self._map = collections.defaultdict( + lambda: collections.defaultdict(dict)) + self._ifname_mac_map = collections.defaultdict(list) + self._mac_ifname_map = {} + self._mac_pci_address_map = {} + + # First we iterate over the list of physical interfaces visible to the + # system and update interface name to mac and mac to interface name map + for ifname in list_nics(): + if not is_phy_iface(ifname): + continue + mac = get_nic_hwaddr(ifname) + self._ifname_mac_map[ifname] = [mac] + self._mac_ifname_map[mac] = ifname + + # In light of the pre-deprecation notice in the docstring of this + # class we will expose the ability to configure OVS bonds as a + # DPDK-only feature, but generally use the data structures internally. + if config(enable_dpdk_key): + # resolve PCI address of interfaces listed in the bridges and bonds + # charm configuration options. Note that for already bound + # interfaces the helper will retrieve MAC address from the unit + # KV store as the information is no longer available in sysfs. + _pci_bridge_mac = resolve_pci_from_mapping_config( + bridges_key) + _pci_bond_mac = resolve_pci_from_mapping_config( + bonds_key) + + for pci_address, bridge_mac in _pci_bridge_mac.items(): + if bridge_mac.mac in self._mac_ifname_map: + # if we already have the interface name in our map it is + # visible to the system and therefore not bound to DPDK + continue + ifname = 'dpdk-{}'.format( + hashlib.sha1( + pci_address.encode('UTF-8')).hexdigest()[:7]) + self._ifname_mac_map[ifname] = [bridge_mac.mac] + self._mac_ifname_map[bridge_mac.mac] = ifname + self._mac_pci_address_map[bridge_mac.mac] = pci_address + + for pci_address, bond_mac in _pci_bond_mac.items(): + # for bonds we want to be able to get a list of macs from + # the bond name and also get at the interface name made up + # of the hash of the PCI address + ifname = 'dpdk-{}'.format( + hashlib.sha1( + pci_address.encode('UTF-8')).hexdigest()[:7]) + self._ifname_mac_map[bond_mac.entity].append(bond_mac.mac) + self._mac_ifname_map[bond_mac.mac] = ifname + self._mac_pci_address_map[bond_mac.mac] = pci_address + + config_bridges = config(bridges_key) or '' + for bridge, ifname_or_mac in ( + pair.split(':', 1) + for pair in config_bridges.split()): + if ':' in ifname_or_mac: + try: + ifname = self.ifname_from_mac(ifname_or_mac) + except KeyError: + # The interface is destined for a different unit in the + # deployment. + continue + macs = [ifname_or_mac] + else: + ifname = ifname_or_mac + macs = self.macs_from_ifname(ifname_or_mac) + + portname = ifname + for mac in macs: + try: + pci_address = self.pci_address_from_mac(mac) + iftype = self.interface_type.dpdk + ifname = self.ifname_from_mac(mac) + except KeyError: + pci_address = None + iftype = self.interface_type.system + + self.add_interface( + bridge, portname, ifname, iftype, pci_address, global_mtu) + + def __getitem__(self, key): + """Provide a Dict-like interface, get value of item. + + :param key: Key to look up value from. + :type key: any + :returns: Value + :rtype: any + """ + return self._map.__getitem__(key) + + def __iter__(self): + """Provide a Dict-like interface, iterate over keys. + + :returns: Iterator + :rtype: Iterator[any] + """ + return self._map.__iter__() + + def __len__(self): + """Provide a Dict-like interface, measure the length of internal map. + + :returns: Length + :rtype: int + """ + return len(self._map) + + def items(self): + """Provide a Dict-like interface, iterate over items. + + :returns: Key Value pairs + :rtype: Iterator[any, any] + """ + return self._map.items() + + def keys(self): + """Provide a Dict-like interface, iterate over keys. + + :returns: Iterator + :rtype: Iterator[any] + """ + return self._map.keys() + + def ifname_from_mac(self, mac): + """ + :returns: Name of interface + :rtype: str + :raises: KeyError + """ + return (get_bond_master(self._mac_ifname_map[mac]) or + self._mac_ifname_map[mac]) + + def macs_from_ifname(self, ifname): + """ + :returns: List of hardware address (MAC) of interface + :rtype: List[str] + :raises: KeyError + """ + return self._ifname_mac_map[ifname] + + def pci_address_from_mac(self, mac): + """ + :param mac: Hardware address (MAC) of interface + :type mac: str + :returns: PCI address of device associated with mac + :rtype: str + :raises: KeyError + """ + return self._mac_pci_address_map[mac] + + def add_interface(self, bridge, port, ifname, iftype, + pci_address, mtu_request): + """Add an interface to the map. + + :param bridge: Name of bridge on which the bond will be added + :type bridge: str + :param port: Name of port which will represent the bond on bridge + :type port: str + :param ifname: Name of interface that will make up the bonded port + :type ifname: str + :param iftype: Type of interface + :type iftype: BridgeBondMap.interface_type + :param pci_address: PCI address of interface + :type pci_address: Optional[str] + :param mtu_request: MTU to request for interface + :type mtu_request: Optional[int] + """ + self._map[bridge][port][ifname] = { + 'type': str(iftype), + } + if pci_address: + self._map[bridge][port][ifname].update({ + 'pci-address': pci_address, + }) + if mtu_request is not None: + self._map[bridge][port][ifname].update({ + 'mtu-request': str(mtu_request) + }) + + def get_ifdatamap(self, bridge, port): + """Get structure suitable for charmhelpers.contrib.network.ovs helpers. + + :param bridge: Name of bridge on which the port will be added + :type bridge: str + :param port: Name of port which will represent one or more interfaces + :type port: str + """ + for _bridge, _ports in self.items(): + for _port, _interfaces in _ports.items(): + if _bridge == bridge and _port == port: + ifdatamap = {} + for name, data in _interfaces.items(): + ifdatamap.update({ + name: { + 'type': data['type'], + }, + }) + if data.get('mtu-request') is not None: + ifdatamap[name].update({ + 'mtu_request': data['mtu-request'], + }) + if data.get('pci-address'): + ifdatamap[name].update({ + 'options': { + 'dpdk-devargs': data['pci-address'], + }, + }) + return ifdatamap + + +class BondConfig(object): + """Container and helpers for bond configuration options. + + Data is put into a dictionary and a convenient config get interface is + provided. + """ + + DEFAULT_LACP_CONFIG = { + 'mode': 'balance-tcp', + 'lacp': 'active', + 'lacp-time': 'fast' + } + ALL_BONDS = 'ALL_BONDS' + + BOND_MODES = ['active-backup', 'balance-slb', 'balance-tcp'] + BOND_LACP = ['active', 'passive', 'off'] + BOND_LACP_TIME = ['fast', 'slow'] + + def __init__(self, config_key=None): + """Parse specified configuration option. + + :param config_key: Configuration key to retrieve data from + (default: ``dpdk-bond-config``) + :type config_key: Optional[str] + """ + self.config_key = config_key or 'dpdk-bond-config' + + self.lacp_config = { + self.ALL_BONDS: copy.deepcopy(self.DEFAULT_LACP_CONFIG) + } + + lacp_config = config(self.config_key) + if lacp_config: + lacp_config_map = lacp_config.split() + for entry in lacp_config_map: + bond, entry = entry.partition(':')[0:3:2] + if not bond: + bond = self.ALL_BONDS + + mode, entry = entry.partition(':')[0:3:2] + if not mode: + mode = self.DEFAULT_LACP_CONFIG['mode'] + assert mode in self.BOND_MODES, \ + "Bond mode {} is invalid".format(mode) + + lacp, entry = entry.partition(':')[0:3:2] + if not lacp: + lacp = self.DEFAULT_LACP_CONFIG['lacp'] + assert lacp in self.BOND_LACP, \ + "Bond lacp {} is invalid".format(lacp) + + lacp_time, entry = entry.partition(':')[0:3:2] + if not lacp_time: + lacp_time = self.DEFAULT_LACP_CONFIG['lacp-time'] + assert lacp_time in self.BOND_LACP_TIME, \ + "Bond lacp-time {} is invalid".format(lacp_time) + + self.lacp_config[bond] = { + 'mode': mode, + 'lacp': lacp, + 'lacp-time': lacp_time + } + + def get_bond_config(self, bond): + """Get the LACP configuration for a bond + + :param bond: the bond name + :return: a dictionary with the configuration of the bond + :rtype: Dict[str,Dict[str,str]] + """ + return self.lacp_config.get(bond, self.lacp_config[self.ALL_BONDS]) + + def get_ovs_portdata(self, bond): + """Get structure suitable for charmhelpers.contrib.network.ovs helpers. + + :param bond: the bond name + :return: a dictionary with the configuration of the bond + :rtype: Dict[str,Union[str,Dict[str,str]]] + """ + bond_config = self.get_bond_config(bond) + return { + 'bond_mode': bond_config['mode'], + 'lacp': bond_config['lacp'], + 'other_config': { + 'lacp-time': bond_config['lacp-time'], + }, + } + + +class SRIOVContext(OSContextGenerator): + """Provide context for configuring SR-IOV devices.""" + + class sriov_config_mode(enum.Enum): + """Mode in which SR-IOV is configured. + + The configuration option identified by the ``numvfs_key`` parameter + is overloaded and defines in which mode the charm should interpret + the other SR-IOV-related configuration options. + """ + auto = 'auto' + blanket = 'blanket' + explicit = 'explicit' + + def _determine_numvfs(self, device, sriov_numvfs): + """Determine number of Virtual Functions (VFs) configured for device. + + :param device: Object describing a PCI Network interface card (NIC)/ + :type device: sriov_netplan_shim.pci.PCINetDevice + :param sriov_numvfs: Number of VFs requested for blanket configuration. + :type sriov_numvfs: int + :returns: Number of VFs to configure for device + :rtype: Optional[int] + """ + + def _get_capped_numvfs(requested): + """Get a number of VFs that does not exceed individual card limits. + + Depending and make and model of NIC the number of VFs supported + vary. Requesting more VFs than a card support would be a fatal + error, cap the requested number at the total number of VFs each + individual card supports. + + :param requested: Number of VFs requested + :type requested: int + :returns: Number of VFs allowed + :rtype: int + """ + actual = min(int(requested), int(device.sriov_totalvfs)) + if actual < int(requested): + log('Requested VFs ({}) too high for device {}. Falling back ' + 'to value supprted by device: {}' + .format(requested, device.interface_name, + device.sriov_totalvfs), + level=WARNING) + return actual + + if self._sriov_config_mode == self.sriov_config_mode.auto: + # auto-mode + # + # If device mapping configuration is present, return information + # on cards with mapping. + # + # If no device mapping configuration is present, return information + # for all cards. + # + # The maximum number of VFs supported by card will be used. + if (self._sriov_mapped_devices and + device.interface_name not in self._sriov_mapped_devices): + log('SR-IOV configured in auto mode: No device mapping for {}' + .format(device.interface_name), + level=DEBUG) + return + return _get_capped_numvfs(device.sriov_totalvfs) + elif self._sriov_config_mode == self.sriov_config_mode.blanket: + # blanket-mode + # + # User has specified a number of VFs that should apply to all + # cards with support for VFs. + return _get_capped_numvfs(sriov_numvfs) + elif self._sriov_config_mode == self.sriov_config_mode.explicit: + # explicit-mode + # + # User has given a list of interface names and associated number of + # VFs + if device.interface_name not in self._sriov_config_devices: + log('SR-IOV configured in explicit mode: No device:numvfs ' + 'pair for device {}, skipping.' + .format(device.interface_name), + level=DEBUG) + return + return _get_capped_numvfs( + self._sriov_config_devices[device.interface_name]) + else: + raise RuntimeError('This should not be reached') + + def __init__(self, numvfs_key=None, device_mappings_key=None): + """Initialize map from PCI devices and configuration options. + + :param numvfs_key: Config key for numvfs (default: 'sriov-numvfs') + :type numvfs_key: Optional[str] + :param device_mappings_key: Config key for device mappings + (default: 'sriov-device-mappings') + :type device_mappings_key: Optional[str] + :raises: RuntimeError + """ + numvfs_key = numvfs_key or 'sriov-numvfs' + device_mappings_key = device_mappings_key or 'sriov-device-mappings' + + devices = pci.PCINetDevices() + charm_config = config() + sriov_numvfs = charm_config.get(numvfs_key) or '' + sriov_device_mappings = charm_config.get(device_mappings_key) or '' + + # create list of devices from sriov_device_mappings config option + self._sriov_mapped_devices = [ + pair.split(':', 1)[1] + for pair in sriov_device_mappings.split() + ] + + # create map of device:numvfs from sriov_numvfs config option + self._sriov_config_devices = { + ifname: numvfs for ifname, numvfs in ( + pair.split(':', 1) for pair in sriov_numvfs.split() + if ':' in sriov_numvfs) + } + + # determine configuration mode from contents of sriov_numvfs + if sriov_numvfs == 'auto': + self._sriov_config_mode = self.sriov_config_mode.auto + elif sriov_numvfs.isdigit(): + self._sriov_config_mode = self.sriov_config_mode.blanket + elif ':' in sriov_numvfs: + self._sriov_config_mode = self.sriov_config_mode.explicit + else: + raise RuntimeError('Unable to determine mode of SR-IOV ' + 'configuration.') + + self._map = { + device.interface_name: self._determine_numvfs(device, sriov_numvfs) + for device in devices.pci_devices + if device.sriov and + self._determine_numvfs(device, sriov_numvfs) is not None + } + + def __call__(self): + """Provide SR-IOV context. + + :returns: Map interface name: min(configured, max) virtual functions. + Example: + { + 'eth0': 16, + 'eth1': 32, + 'eth2': 64, + } + :rtype: Dict[str,int] + """ + return self._map diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 5c8f6eff..e59e0d1e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -13,7 +13,7 @@ # limitations under the License. # Common python helper functions used for OpenStack charms. -from collections import OrderedDict +from collections import OrderedDict, namedtuple from functools import wraps import subprocess @@ -36,15 +36,20 @@ from charmhelpers.core import unitdata from charmhelpers.core.hookenv import ( + WORKLOAD_STATES, action_fail, action_set, config, + expected_peer_units, + expected_related_units, log as juju_log, charm_dir, INFO, ERROR, + metadata, related_units, relation_get, + relation_id, relation_ids, relation_set, status_set, @@ -53,6 +58,7 @@ cached, leader_set, leader_get, + local_unit, ) from charmhelpers.core.strutils import ( @@ -108,6 +114,10 @@ POLICYD_CONFIG_NAME, ) +from charmhelpers.contrib.openstack.ha.utils import ( + expect_ha, +) + CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' @@ -1810,6 +1820,16 @@ def os_application_version_set(package): application_version_set(application_version) +def os_application_status_set(check_function): + """Run the supplied function and set the application status accordingly. + + :param check_function: Function to run to get app states and messages. + :type check_function: function + """ + state, message = check_function() + status_set(state, message, application=True) + + def enable_memcache(source=None, release=None, package=None): """Determine if memcache should be enabled on the local unit @@ -2046,3 +2066,287 @@ def is_db_maintenance_mode(relid=None): 'WARN') pass return True in notifications + + +@cached +def container_scoped_relations(): + """Get all the container scoped relations + + :returns: List of relation names + :rtype: List + """ + md = metadata() + relations = [] + for relation_type in ('provides', 'requires', 'peers'): + for relation in md.get(relation_type, []): + if md[relation_type][relation].get('scope') == 'container': + relations.append(relation) + return relations + + +def is_db_ready(use_current_context=False, rel_name=None): + """Check remote database is ready to be used. + + Database relations are expected to provide a list of 'allowed' units to + confirm that the database is ready for use by those units. + + If db relation has provided this information and local unit is a member, + returns True otherwise False. + + :param use_current_context: Whether to limit checks to current hook + context. + :type use_current_context: bool + :param rel_name: Name of relation to check + :type rel_name: string + :returns: Whether remote db is ready. + :rtype: bool + :raises: Exception + """ + key = 'allowed_units' + + rel_name = rel_name or 'shared-db' + this_unit = local_unit() + + if use_current_context: + if relation_id() in relation_ids(rel_name): + rids_units = [(None, None)] + else: + raise Exception("use_current_context=True but not in {} " + "rel hook contexts (currently in {})." + .format(rel_name, relation_id())) + else: + rids_units = [(r_id, u) + for r_id in relation_ids(rel_name) + for u in related_units(r_id)] + + for rid, unit in rids_units: + allowed_units = relation_get(rid=rid, unit=unit, attribute=key) + if allowed_units and this_unit in allowed_units.split(): + juju_log("This unit ({}) is in allowed unit list from {}".format( + this_unit, + unit), 'DEBUG') + return True + + juju_log("This unit was not found in any allowed unit list") + return False + + +def is_expected_scale(peer_relation_name='cluster'): + """Query juju goal-state to determine whether our peer- and dependency- + relations are at the expected scale. + + Useful for deferring per unit per relation housekeeping work until we are + ready to complete it successfully and without unnecessary repetiton. + + Always returns True if version of juju used does not support goal-state. + + :param peer_relation_name: Name of peer relation + :type rel_name: string + :returns: True or False + :rtype: bool + """ + def _get_relation_id(rel_type): + return next((rid for rid in relation_ids(reltype=rel_type)), None) + + Relation = namedtuple('Relation', 'rel_type rel_id') + peer_rid = _get_relation_id(peer_relation_name) + # Units with no peers should still have a peer relation. + if not peer_rid: + juju_log('Not at expected scale, no peer relation found', 'DEBUG') + return False + expected_relations = [ + Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))] + if expect_ha(): + expected_relations.append( + Relation( + rel_type='ha', + rel_id=_get_relation_id('ha'))) + juju_log( + 'Checking scale of {} relations'.format( + ','.join([r.rel_type for r in expected_relations])), + 'DEBUG') + try: + if (len(related_units(relid=peer_rid)) < + len(list(expected_peer_units()))): + return False + for rel in expected_relations: + if not rel.rel_id: + juju_log( + 'Expected to find {} relation, but it is missing'.format( + rel.rel_type), + 'DEBUG') + return False + # Goal state returns every unit even for container scoped + # relations but the charm only ever has a relation with + # the local unit. + if rel.rel_type in container_scoped_relations(): + expected_count = 1 + else: + expected_count = len( + list(expected_related_units(reltype=rel.rel_type))) + if len(related_units(relid=rel.rel_id)) < expected_count: + juju_log( + ('Not at expected scale, not enough units on {} ' + 'relation'.format(rel.rel_type)), + 'DEBUG') + return False + except NotImplementedError: + return True + juju_log('All checks have passed, unit is at expected scale', 'DEBUG') + return True + + +def get_peer_key(unit_name): + """Get the peer key for this unit. + + The peer key is the key a unit uses to publish its status down the peer + relation + + :param unit_name: Name of unit + :type unit_name: string + :returns: Peer key for given unit + :rtype: string + """ + return 'unit-state-{}'.format(unit_name.replace('/', '-')) + + +UNIT_READY = 'READY' +UNIT_NOTREADY = 'NOTREADY' +UNIT_UNKNOWN = 'UNKNOWN' +UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN] + + +def inform_peers_unit_state(state, relation_name='cluster'): + """Inform peers of the state of this unit. + + :param state: State of unit to publish + :type state: string + :param relation_name: Name of relation to publish state on + :type relation_name: string + """ + if state not in UNIT_STATES: + raise ValueError( + "Setting invalid state {} for unit".format(state)) + for r_id in relation_ids(relation_name): + relation_set(relation_id=r_id, + relation_settings={ + get_peer_key(local_unit()): state}) + + +def get_peers_unit_state(relation_name='cluster'): + """Get the state of all peers. + + :param relation_name: Name of relation to check peers on. + :type relation_name: string + :returns: Unit states keyed on unit name. + :rtype: dict + :raises: ValueError + """ + r_ids = relation_ids(relation_name) + rids_units = [(r, u) for r in r_ids for u in related_units(r)] + unit_states = {} + for r_id, unit in rids_units: + settings = relation_get(unit=unit, rid=r_id) + unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN) + if unit_states[unit] not in UNIT_STATES: + raise ValueError( + "Unit in unknown state {}".format(unit_states[unit])) + return unit_states + + +def are_peers_ready(relation_name='cluster'): + """Check if all peers are ready. + + :param relation_name: Name of relation to check peers on. + :type relation_name: string + :returns: Whether all units are ready. + :rtype: bool + """ + unit_states = get_peers_unit_state(relation_name) + return all(v == UNIT_READY for v in unit_states.values()) + + +def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'): + """Inform peers if this unit is ready. + + The check function should return a tuple (state, message). A state + of 'READY' indicates the unit is READY. + + :param check_unit_ready_func: Function to run to check readiness + :type check_unit_ready_func: function + :param relation_name: Name of relation to check peers on. + :type relation_name: string + """ + unit_ready, msg = check_unit_ready_func() + if unit_ready: + state = UNIT_READY + else: + state = UNIT_NOTREADY + juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG') + inform_peers_unit_state(state, relation_name) + + +def check_api_unit_ready(check_db_ready=True): + """Check if this unit is ready. + + :param check_db_ready: Include checks of database readiness. + :type check_db_ready: bool + :returns: Whether unit state is ready and status message + :rtype: (bool, str) + """ + unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready) + return unit_state == WORKLOAD_STATES.ACTIVE, msg + + +def get_api_unit_status(check_db_ready=True): + """Return a workload status and message for this unit. + + :param check_db_ready: Include checks of database readiness. + :type check_db_ready: bool + :returns: Workload state and message + :rtype: (bool, str) + """ + unit_state = WORKLOAD_STATES.ACTIVE + msg = 'Unit is ready' + if is_db_maintenance_mode(): + unit_state = WORKLOAD_STATES.MAINTENANCE + msg = 'Database in maintenance mode.' + elif is_unit_paused_set(): + unit_state = WORKLOAD_STATES.BLOCKED + msg = 'Unit paused.' + elif check_db_ready and not is_db_ready(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Allowed_units list provided but this unit not present' + elif not is_db_initialised(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Database not initialised' + elif not is_expected_scale(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Charm and its dependencies not yet at expected scale' + juju_log(msg, 'DEBUG') + return unit_state, msg + + +def check_api_application_ready(): + """Check if this application is ready. + + :returns: Whether application state is ready and status message + :rtype: (bool, str) + """ + app_state, msg = get_api_application_status() + return app_state == WORKLOAD_STATES.ACTIVE, msg + + +def get_api_application_status(): + """Return a workload status and message for this application. + + :returns: Workload state and message + :rtype: (bool, str) + """ + app_state, msg = get_api_unit_status() + if app_state == WORKLOAD_STATES.ACTIVE: + if are_peers_ready(): + return WORKLOAD_STATES.ACTIVE, 'Application Ready' + else: + return WORKLOAD_STATES.WAITING, 'Some units are not ready' + return app_state, msg diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py index 866a2697..4ee6c1db 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -140,9 +140,16 @@ def vault_relation_complete(backend=None): :ptype backend: string :returns: whether the relation to vault is complete :rtype: bool""" - vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) - vault_kv() - return vault_kv.complete + try: + import hvac + except ImportError: + return False + try: + vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) + vault_kv() + return vault_kv.complete + except hvac.exceptions.InvalidRequest: + return False # TODO: contrib a high level unwrap method to hvac that works @@ -156,7 +163,16 @@ def retrieve_secret_id(url, token): :returns: secret_id to use for Vault Access :rtype: str""" import hvac - client = hvac.Client(url=url, token=token) + try: + # hvac 0.10.1 changed default adapter to JSONAdapter + client = hvac.Client(url=url, token=token, adapter=hvac.adapters.Request) + except AttributeError: + # hvac < 0.6.2 doesn't have adapter but uses the same response interface + client = hvac.Client(url=url, token=token) + else: + # hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate + if not isinstance(client.adapter, hvac.adapters.Request): + client.adapter = hvac.adapters.Request(base_uri=url, token=token) response = client._post('/v1/sys/wrapping/unwrap') if response.status_code == 200: data = response.json() diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index dabfb6c2..eb31b782 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -22,6 +22,7 @@ # Adam Gandelman # +import collections import errno import hashlib import math @@ -93,6 +94,88 @@ DEFAULT_MINIMUM_PGS = 2 +class OsdPostUpgradeError(Exception): + """Error class for OSD post-upgrade operations.""" + pass + + +class OSDSettingConflict(Exception): + """Error class for conflicting osd setting requests.""" + pass + + +class OSDSettingNotAllowed(Exception): + """Error class for a disallowed setting.""" + pass + + +OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed) + +OSD_SETTING_WHITELIST = [ + 'osd heartbeat grace', + 'osd heartbeat interval', +] + + +def _order_dict_by_key(rdict): + """Convert a dictionary into an OrderedDict sorted by key. + + :param rdict: Dictionary to be ordered. + :type rdict: dict + :returns: Ordered Dictionary. + :rtype: collections.OrderedDict + """ + return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0])) + + +def get_osd_settings(relation_name): + """Consolidate requested osd settings from all clients. + + Consolidate requested osd settings from all clients. Check that the + requested setting is on the whitelist and it does not conflict with + any other requested settings. + + :returns: Dictionary of settings + :rtype: dict + + :raises: OSDSettingNotAllowed + :raises: OSDSettingConflict + """ + rel_ids = relation_ids(relation_name) + osd_settings = {} + for relid in rel_ids: + for unit in related_units(relid): + unit_settings = relation_get('osd-settings', unit, relid) or '{}' + unit_settings = json.loads(unit_settings) + for key, value in unit_settings.items(): + if key not in OSD_SETTING_WHITELIST: + msg = 'Illegal settings "{}"'.format(key) + raise OSDSettingNotAllowed(msg) + if key in osd_settings: + if osd_settings[key] != unit_settings[key]: + msg = 'Conflicting settings for "{}"'.format(key) + raise OSDSettingConflict(msg) + else: + osd_settings[key] = value + return _order_dict_by_key(osd_settings) + + +def send_osd_settings(): + """Pass on requested OSD settings to osd units.""" + try: + settings = get_osd_settings('client') + except OSD_SETTING_EXCEPTIONS as e: + # There is a problem with the settings, not passing them on. Update + # status will notify the user. + log(e, level=ERROR) + return + data = { + 'osd-settings': json.dumps(settings, sort_keys=True)} + for relid in relation_ids('osd'): + relation_set(relation_id=relid, + relation_settings=data) + + def validator(value, valid_type, valid_range=None): """ Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values @@ -1635,5 +1718,67 @@ def __call__(self): continue ceph_conf[key] = conf[key] - return ceph_conf + + +class CephOSDConfContext(CephConfContext): + """Ceph config (ceph.conf) context. + + Consolidates settings from config-flags via CephConfContext with + settings provided by the mons. The config-flag values are preserved in + conf['osd'], settings from the mons which do not clash with config-flag + settings are in conf['osd_from_client'] and finally settings which do + clash are in conf['osd_from_client_conflict']. Rather than silently drop + the conflicting settings they are provided in the context so they can be + rendered commented out to give some visability to the admin. + """ + + def __init__(self, permitted_sections=None): + super(CephOSDConfContext, self).__init__( + permitted_sections=permitted_sections) + try: + self.settings_from_mons = get_osd_settings('mon') + except OSDSettingConflict: + log( + "OSD settings from mons are inconsistent, ignoring them", + level=WARNING) + self.settings_from_mons = {} + + def filter_osd_from_mon_settings(self): + """Filter settings from client relation against config-flags. + + :returns: A tuple ( + ,config-flag values, + ,client settings which do not conflict with config-flag values, + ,client settings which confilct with config-flag values) + :rtype: (OrderedDict, OrderedDict, OrderedDict) + """ + ceph_conf = super(CephOSDConfContext, self).__call__() + conflicting_entries = {} + clear_entries = {} + for key, value in self.settings_from_mons.items(): + if key in ceph_conf.get('osd', {}): + if ceph_conf['osd'][key] != value: + conflicting_entries[key] = value + else: + clear_entries[key] = value + clear_entries = _order_dict_by_key(clear_entries) + conflicting_entries = _order_dict_by_key(conflicting_entries) + return ceph_conf, clear_entries, conflicting_entries + + def __call__(self): + """Construct OSD config context. + + Standard context with two additional special keys. + osd_from_client_conflict: client settings which confilct with + config-flag values + osd_from_client: settings which do not conflict with config-flag + values + + :returns: OSD config context dict. + :rtype: dict + """ + conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings() + conf['osd_from_client_conflict'] = osd_conflict + conf['osd_from_client'] = osd_clear + return conf diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py index 82472ff1..74bab40e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -32,6 +32,10 @@ def loopback_devices(): /dev/loop0: [0807]:961814 (/tmp/my.img) + or: + + /dev/loop0: [0807]:961814 (/tmp/my.img (deleted)) + :returns: dict: a dict mapping {loopback_dev: backing_file} ''' loopbacks = {} @@ -39,9 +43,9 @@ def loopback_devices(): output = check_output(cmd) if six.PY3: output = output.decode('utf-8') - devs = [d.strip().split(' ') for d in output.splitlines() if d != ''] + devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] for dev, _, f in devs: - loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] + loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] return loopbacks diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 647f6e4b..d7c37c17 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -21,6 +21,7 @@ from __future__ import print_function import copy from distutils.version import LooseVersion +from enum import Enum from functools import wraps from collections import namedtuple import glob @@ -57,6 +58,14 @@ 'This may not be compatible with software you are ' 'running in your shell.') + +class WORKLOAD_STATES(Enum): + ACTIVE = 'active' + BLOCKED = 'blocked' + MAINTENANCE = 'maintenance' + WAITING = 'waiting' + + cache = {} @@ -1088,22 +1097,33 @@ def function_tag(): return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() -def status_set(workload_state, message): +def status_set(workload_state, message, application=False): """Set the workload state with a message Use status-set to set the workload state with a message which is visible to the user via juju status. If the status-set command is not found then - assume this is juju < 1.23 and juju-log the message unstead. + assume this is juju < 1.23 and juju-log the message instead. - workload_state -- valid juju workload state. - message -- status update message + workload_state -- valid juju workload state. str or WORKLOAD_STATES + message -- status update message + application -- Whether this is an application state set """ - valid_states = ['maintenance', 'blocked', 'waiting', 'active'] - if workload_state not in valid_states: - raise ValueError( - '{!r} is not a valid workload state'.format(workload_state) - ) - cmd = ['status-set', workload_state, message] + bad_state_msg = '{!r} is not a valid workload state' + + if isinstance(workload_state, str): + try: + # Convert string to enum. + workload_state = WORKLOAD_STATES[workload_state.upper()] + except KeyError: + raise ValueError(bad_state_msg.format(workload_state)) + + if workload_state not in WORKLOAD_STATES: + raise ValueError(bad_state_msg.format(workload_state)) + + cmd = ['status-set'] + if application: + cmd.append('--application') + cmd.extend([workload_state.value, message]) try: ret = subprocess.call(cmd) if ret == 0: @@ -1111,7 +1131,7 @@ def status_set(workload_state, message): except OSError as e: if e.errno != errno.ENOENT: raise - log_message = 'status-set failed: {} {}'.format(workload_state, + log_message = 'status-set failed: {} {}'.format(workload_state.value, message) log(log_message, level='INFO') @@ -1526,13 +1546,13 @@ def env_proxy_settings(selected_settings=None): """Get proxy settings from process environment variables. Get charm proxy settings from environment variables that correspond to - juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2, - see lp:1782236) in a format suitable for passing to an application that - reacts to proxy settings passed as environment variables. Some applications - support lowercase or uppercase notation (e.g. curl), some support only - lowercase (e.g. wget), there are also subjectively rare cases of only - uppercase notation support. no_proxy CIDR and wildcard support also varies - between runtimes and applications as there is no enforced standard. + juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see + lp:1782236) and juju-ftp-proxy in a format suitable for passing to an + application that reacts to proxy settings passed as environment variables. + Some applications support lowercase or uppercase notation (e.g. curl), some + support only lowercase (e.g. wget), there are also subjectively rare cases + of only uppercase notation support. no_proxy CIDR and wildcard support also + varies between runtimes and applications as there is no enforced standard. Some applications may connect to multiple destinations and expose config options that would affect only proxy settings for a specific destination diff --git a/ceph-radosgw/hooks/charmhelpers/core/sysctl.py b/ceph-radosgw/hooks/charmhelpers/core/sysctl.py index f1f4a28f..386428d6 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/sysctl.py +++ b/ceph-radosgw/hooks/charmhelpers/core/sysctl.py @@ -17,14 +17,17 @@ import yaml -from subprocess import check_call +from subprocess import check_call, CalledProcessError from charmhelpers.core.hookenv import ( log, DEBUG, ERROR, + WARNING, ) +from charmhelpers.core.host import is_container + __author__ = 'Jorge Niedbalski R. ' @@ -62,4 +65,11 @@ def create(sysctl_dict, sysctl_file, ignore=False): if ignore: call.append("-e") - check_call(call) + try: + check_call(call) + except CalledProcessError as e: + if is_container(): + log("Error setting some sysctl keys in this container: {}".format(e.output), + level=WARNING) + else: + raise e diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index d2752520..a3fd276d 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -25,6 +25,7 @@ import time import uuid +from contextlib import contextmanager from datetime import datetime from charmhelpers.core import hookenv @@ -175,12 +176,16 @@ def unmounted_disks(): context = pyudev.Context() for device in context.list_devices(DEVTYPE='disk'): if device['SUBSYSTEM'] == 'block': + if device.device_node is None: + continue + matched = False for block_type in [u'dm-', u'loop', u'ram', u'nbd']: if block_type in device.device_node: matched = True if matched: continue + disks.append(device.device_node) log("Found disks: {}".format(disks)) return [disk for disk in disks if not is_device_mounted(disk)] @@ -1108,6 +1113,7 @@ def get_mds_bootstrap_key(): admin_caps = collections.OrderedDict([ ('mds', ['allow *']), + ('mgr', ['allow *']), ('mon', ['allow *']), ('osd', ['allow *']) ]) @@ -2419,10 +2425,11 @@ def upgrade_osd(new_version): # way to update the code on the node. if not dirs_need_ownership_update('osd'): log('Restarting all OSDs to load new binaries', DEBUG) - if systemd(): - service_restart('ceph-osd.target') - else: - service_restart('ceph-osd-all') + with maintain_all_osd_states(): + if systemd(): + service_restart('ceph-osd.target') + else: + service_restart('ceph-osd-all') return # Need to change the ownership of all directories which are not OSD @@ -2467,11 +2474,12 @@ def _upgrade_single_osd(osd_num, osd_dir): :raises IOError: if an error occurs reading/writing to a file as part of the upgrade process """ - stop_osd(osd_num) - disable_osd(osd_num) - update_owner(osd_dir) - enable_osd(osd_num) - start_osd(osd_num) + with maintain_osd_state(osd_num): + stop_osd(osd_num) + disable_osd(osd_num) + update_owner(osd_dir) + enable_osd(osd_num) + start_osd(osd_num) def stop_osd(osd_num): @@ -2598,6 +2606,98 @@ def update_owner(path, recurse_dirs=True): secs=elapsed_time.total_seconds(), path=path), DEBUG) +def get_osd_state(osd_num, osd_goal_state=None): + """Get OSD state or loop until OSD state matches OSD goal state. + + If osd_goal_state is None, just return the current OSD state. + If osd_goal_state is not None, loop until the current OSD state matches + the OSD goal state. + + :param osd_num: the osd id to get state for + :param osd_goal_state: (Optional) string indicating state to wait for + Defaults to None + :returns: Returns a str, the OSD state. + :rtype: str + """ + while True: + asok = "/var/run/ceph/ceph-osd.{}.asok".format(osd_num) + cmd = [ + 'ceph', + 'daemon', + asok, + 'status' + ] + try: + result = json.loads(str(subprocess + .check_output(cmd) + .decode('UTF-8'))) + except (subprocess.CalledProcessError, ValueError) as e: + log("{}".format(e), level=DEBUG) + continue + osd_state = result['state'] + log("OSD {} state: {}, goal state: {}".format( + osd_num, osd_state, osd_goal_state), level=DEBUG) + if not osd_goal_state: + return osd_state + if osd_state == osd_goal_state: + return osd_state + time.sleep(3) + + +def get_all_osd_states(osd_goal_states=None): + """Get all OSD states or loop until all OSD states match OSD goal states. + + If osd_goal_states is None, just return a dictionary of current OSD states. + If osd_goal_states is not None, loop until the current OSD states match + the OSD goal states. + + :param osd_goal_states: (Optional) dict indicating states to wait for + Defaults to None + :returns: Returns a dictionary of current OSD states. + :rtype: dict + """ + osd_states = {} + for osd_num in get_local_osd_ids(): + if not osd_goal_states: + osd_states[osd_num] = get_osd_state(osd_num) + else: + osd_states[osd_num] = get_osd_state( + osd_num, + osd_goal_state=osd_goal_states[osd_num]) + return osd_states + + +@contextmanager +def maintain_osd_state(osd_num): + """Ensure the state of an OSD is maintained. + + Ensures the state of an OSD is the same at the end of a block nested + in a with statement as it was at the beginning of the block. + + :param osd_num: the osd id to maintain state for + """ + osd_state = get_osd_state(osd_num) + try: + yield + finally: + get_osd_state(osd_num, osd_goal_state=osd_state) + + +@contextmanager +def maintain_all_osd_states(): + """Ensure all local OSD states are maintained. + + Ensures the states of all local OSDs are the same at the end of a + block nested in a with statement as they were at the beginning of + the block. + """ + osd_states = get_all_osd_states() + try: + yield + finally: + get_all_osd_states(osd_goal_states=osd_states) + + def list_pools(client='admin'): """This will list the current pools that Ceph has @@ -2800,6 +2900,7 @@ def dirs_need_ownership_update(service): ('jewel', 'luminous'), ('luminous', 'mimic'), ('mimic', 'nautilus'), + ('nautilus', 'octopus'), ]) # Map UCA codenames to ceph codenames @@ -2816,6 +2917,7 @@ def dirs_need_ownership_update(service): 'rocky': 'mimic', 'stein': 'mimic', 'train': 'nautilus', + 'ussuri': 'octopus', } @@ -2968,3 +3070,57 @@ def osd_noout(enable): except subprocess.CalledProcessError as e: log(e) raise + + +class OSDConfigSetError(Exception): + """Error occured applying OSD settings.""" + pass + + +def apply_osd_settings(settings): + """Applies the provided osd settings + + Apply the provided settings to all local OSD unless settings are already + present. Settings stop being applied on encountering an error. + + :param settings: dict. Dictionary of settings to apply. + :returns: bool. True if commands ran succesfully. + :raises: OSDConfigSetError + """ + current_settings = {} + base_cmd = 'ceph daemon osd.{osd_id} config --format=json' + get_cmd = base_cmd + ' get {key}' + set_cmd = base_cmd + ' set {key} {value}' + + def _get_cli_key(key): + return(key.replace(' ', '_')) + # Retrieve the current values to check keys are correct and to make this a + # noop if setting are already applied. + for osd_id in get_local_osd_ids(): + for key, value in sorted(settings.items()): + cli_key = _get_cli_key(key) + cmd = get_cmd.format(osd_id=osd_id, key=cli_key) + out = json.loads( + subprocess.check_output(cmd.split()).decode('UTF-8')) + if 'error' in out: + log("Error retrieving osd setting: {}".format(out['error']), + level=ERROR) + return False + current_settings[key] = out[cli_key] + settings_diff = { + k: v + for k, v in settings.items() + if str(v) != str(current_settings[k])} + for key, value in sorted(settings_diff.items()): + log("Setting {} to {}".format(key, value), level=DEBUG) + cmd = set_cmd.format( + osd_id=osd_id, + key=_get_cli_key(key), + value=value) + out = json.loads( + subprocess.check_output(cmd.split()).decode('UTF-8')) + if 'error' in out: + log("Error applying osd setting: {}".format(out['error']), + level=ERROR) + raise OSDConfigSetError + return True From 6faef85121f366318ec675e378814a938e39112d Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Thu, 7 May 2020 11:12:53 +0100 Subject: [PATCH 1950/2699] Remove cinder spec from osd-devices The test specification doesn't need the 'cinder' spec as part of the osd devices, and this patchset brings it in line with the other bundles. Change-Id: If13f38eea33f3d83133396e01eff0b36eb90e82a --- ceph-mon/tests/bundles/bionic-ussuri.yaml | 2 +- ceph-mon/tests/bundles/focal-ussuri.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/tests/bundles/bionic-ussuri.yaml b/ceph-mon/tests/bundles/bionic-ussuri.yaml index 62d50d12..5fc27f81 100644 --- a/ceph-mon/tests/bundles/bionic-ussuri.yaml +++ b/ceph-mon/tests/bundles/bionic-ussuri.yaml @@ -5,7 +5,7 @@ applications: num_units: 3 series: bionic storage: - osd-devices: 'cinder,10G' + osd-devices: '10G' options: osd-devices: '/dev/test-non-existent' source: cloud:bionic-ussuri diff --git a/ceph-mon/tests/bundles/focal-ussuri.yaml b/ceph-mon/tests/bundles/focal-ussuri.yaml index 4e0582ef..73be35b5 100644 --- a/ceph-mon/tests/bundles/focal-ussuri.yaml +++ b/ceph-mon/tests/bundles/focal-ussuri.yaml @@ -55,7 +55,7 @@ applications: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: '10G' options: source: *openstack-origin osd-devices: '/dev/test-non-existent' From eb6e31909a50fd3a841d469b57251774379c3228 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 8 May 2020 09:12:07 +0000 Subject: [PATCH 1951/2699] Misc fixes --- ceph-iscsi/.gitmodules | 9 +- ceph-iscsi/charm-init.sh | 5 +- ceph-iscsi/mod/charm-helpers | 2 +- ceph-iscsi/mod/interface-ceph-client | 1 - ceph-iscsi/mod/operator | 2 +- ceph-iscsi/mod/ops-interface-ceph-client | 1 + ceph-iscsi/mod/ops-interface-tls-certificates | 1 + ceph-iscsi/src/charm.py | 287 ++++++++++-------- ceph-iscsi/src/interface_ceph_iscsi_peer.py | 18 +- ceph-iscsi/src/interface_tls_certificates.py | 116 ------- ceph-iscsi/templates/ceph.conf | 2 +- ceph-iscsi/templates/iscsi-gateway.cfg | 24 +- ceph-iscsi/tests/bundles/focal.yaml | 4 +- .../unit_tests/test_ceph_iscsi_charm.py | 149 ++++++--- 14 files changed, 304 insertions(+), 317 deletions(-) delete mode 160000 ceph-iscsi/mod/interface-ceph-client create mode 160000 ceph-iscsi/mod/ops-interface-ceph-client create mode 160000 ceph-iscsi/mod/ops-interface-tls-certificates delete mode 100644 ceph-iscsi/src/interface_tls_certificates.py diff --git a/ceph-iscsi/.gitmodules b/ceph-iscsi/.gitmodules index 6386c4f9..41ffcfcd 100644 --- a/ceph-iscsi/.gitmodules +++ b/ceph-iscsi/.gitmodules @@ -1,12 +1,15 @@ [submodule "mod/operator"] path = mod/operator url = https://github.com/canonical/operator -[submodule "mod/interface-ceph-client"] - path = mod/interface-ceph-client - url = https://github.com/gnuoy/oper-interface-ceph-client.git +[submodule "mod/ops-interface-ceph-client"] + path = mod/ops-interface-ceph-client + url = https://github.com/openstack-charmers/ops-interface-ceph-client.git [submodule "mod/ops-openstack"] path = mod/ops-openstack url = https://github.com/openstack-charmers/ops-openstack.git [submodule "mod/charm-helpers"] path = mod/charm-helpers url = https://github.com/juju/charm-helpers.git +[submodule "mod/ops-interface-tls-certificates"] + path = mod/ops-interface-tls-certificates + url = https://github.com/openstack-charmers/ops-interface-tls-certificates.git diff --git a/ceph-iscsi/charm-init.sh b/ceph-iscsi/charm-init.sh index 64372359..06fa76c9 100755 --- a/ceph-iscsi/charm-init.sh +++ b/ceph-iscsi/charm-init.sh @@ -14,11 +14,14 @@ if [[ -z "$UPDATE" ]]; then else git -C mod/operator pull origin master git -C mod/ops-openstack pull origin master + git -C mod/ops-interface-ceph-client pull origin master +# git -C mod/ops-interface-tls-certificates pull origin master git -C mod/charm-helpers pull origin master pip install -t lib -r build-requirements.txt --upgrade fi ln -f -t lib -s ../mod/operator/ops -ln -f -t lib -s ../mod/interface-ceph-client/interface_ceph_client.py +ln -f -t lib -s ../mod/ops-interface-ceph-client/interface_ceph_client.py ln -f -t lib -s ../mod/ops-openstack/ops_openstack.py ln -f -t lib -s ../mod/ops-openstack/adapters.py +ln -f -t lib -s ../mod/ops-interface-tls-certificates/ca_client.py diff --git a/ceph-iscsi/mod/charm-helpers b/ceph-iscsi/mod/charm-helpers index f3f36f85..b4aa4e33 160000 --- a/ceph-iscsi/mod/charm-helpers +++ b/ceph-iscsi/mod/charm-helpers @@ -1 +1 @@ -Subproject commit f3f36f85f54380a651ba05972e78467ad22468e3 +Subproject commit b4aa4e3398e7406dbf0f76a23f91afa6a72aed1a diff --git a/ceph-iscsi/mod/interface-ceph-client b/ceph-iscsi/mod/interface-ceph-client deleted file mode 160000 index 4f84bcad..00000000 --- a/ceph-iscsi/mod/interface-ceph-client +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 4f84bcad2d4b3ea415b5eccc850c85b9f4fc172e diff --git a/ceph-iscsi/mod/operator b/ceph-iscsi/mod/operator index bb9b534e..47af0fc9 160000 --- a/ceph-iscsi/mod/operator +++ b/ceph-iscsi/mod/operator @@ -1 +1 @@ -Subproject commit bb9b534e68b04286eef78a36ed930815ffb1968d +Subproject commit 47af0fc9f86dc5b16e88a83a00377864a1541734 diff --git a/ceph-iscsi/mod/ops-interface-ceph-client b/ceph-iscsi/mod/ops-interface-ceph-client new file mode 160000 index 00000000..30213fa6 --- /dev/null +++ b/ceph-iscsi/mod/ops-interface-ceph-client @@ -0,0 +1 @@ +Subproject commit 30213fa66f979eb93473242297677ea6554984df diff --git a/ceph-iscsi/mod/ops-interface-tls-certificates b/ceph-iscsi/mod/ops-interface-tls-certificates new file mode 160000 index 00000000..d03a251e --- /dev/null +++ b/ceph-iscsi/mod/ops-interface-tls-certificates @@ -0,0 +1 @@ +Subproject commit d03a251e87f02528789af0eb4cce88e471847e68 diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index bef471b3..ab76b07c 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -7,11 +7,15 @@ import sys import string import secrets +from pathlib import Path sys.path.append('lib') from ops.framework import ( StoredState, + EventSource, + EventBase, + ObjectEvents, ) from ops.main import main import ops.model @@ -19,12 +23,12 @@ import charmhelpers.core.templating as ch_templating import interface_ceph_client import interface_ceph_iscsi_peer -import interface_tls_certificates +import ca_client import adapters import ops_openstack import gwcli_client - +import cryptography.hazmat.primitives.serialization as serialization logger = logging.getLogger(__name__) @@ -71,7 +75,10 @@ def __init__(self, relation): @property def enable_tls(self): - return bool(self.relation.application_certs) + try: + return bool(self.relation.application_certificate) + except ca_client.CAClientError: + return False class CephISCSIGatewayAdapters(adapters.OpenStackRelationAdapters): @@ -92,44 +99,81 @@ class CephISCSIGatewayCharmBase(ops_openstack.OSBaseCharm): "mon", "allow *", "mgr", "allow r"] - RESTART_MAP = { - '/etc/ceph/ceph.conf': ['rbd-target-api', 'rbd-target-gw'], - '/etc/ceph/iscsi-gateway.cfg': ['rbd-target-api'], - '/etc/ceph/ceph.client.ceph-iscsi.keyring': ['rbd-target-api']} DEFAULT_TARGET = "iqn.2003-01.com.ubuntu.iscsi-gw:iscsi-igw" REQUIRED_RELATIONS = ['ceph-client', 'cluster'] - # Two has been tested before is probably fine too but needs + + # Two has been tested but four is probably fine too but needs # validating ALLOWED_UNIT_COUNTS = [2] + + CEPH_CONFIG_PATH = Path('/etc/ceph') + CEPH_ISCSI_CONFIG_PATH = CEPH_CONFIG_PATH / 'iscsi' + GW_CONF = CEPH_CONFIG_PATH / 'iscsi-gateway.cfg' + CEPH_CONF = CEPH_ISCSI_CONFIG_PATH / 'ceph.conf' + GW_KEYRING = CEPH_ISCSI_CONFIG_PATH / 'ceph.client.ceph-iscsi.keyring' + TLS_KEY_PATH = CEPH_CONFIG_PATH / 'iscsi-gateway.key' + TLS_PUB_KEY_PATH = CEPH_CONFIG_PATH / 'iscsi-gateway-pub.key' + TLS_CERT_PATH = CEPH_CONFIG_PATH / 'iscsi-gateway.crt' + TLS_KEY_AND_CERT_PATH = CEPH_CONFIG_PATH / 'iscsi-gateway.pem' + TLS_CA_CERT_PATH = Path( + '/usr/local/share/ca-certificates/vault_ca_cert.crt') + + GW_SERVICES = ['rbd-target-api', 'rbd-target-gw'] + + RESTART_MAP = { + str(GW_CONF): GW_SERVICES, + str(CEPH_CONF): GW_SERVICES, + str(GW_KEYRING): GW_SERVICES} + release = 'default' def __init__(self, framework, key): super().__init__(framework, key) logging.info("Using {} class".format(self.release)) - self.state.set_default(target_created=False) - self.state.set_default(enable_tls=False) - self.state.set_default(additional_trusted_ips=[]) + self.state.set_default( + target_created=False, + enable_tls=False, + additional_trusted_ips=[]) self.ceph_client = interface_ceph_client.CephClientRequires( self, 'ceph-client') self.peers = interface_ceph_iscsi_peer.CephISCSIGatewayPeers( self, 'cluster') - self.tls = interface_tls_certificates.TlsRequires(self, "certificates") + self.ca_client = ca_client.CAClient( + self, + 'certificates') self.adapters = CephISCSIGatewayAdapters( - (self.ceph_client, self.peers, self.tls), + (self.ceph_client, self.peers, self.ca_client), + self) + self.framework.observe( + self.ceph_client.on.broker_available, + self.request_ceph_pool) + self.framework.observe( + self.ceph_client.on.pools_available, + self.render_config) + self.framework.observe( + self.peers.on.has_peers, + self) + self.framework.observe( + self.ca_client.on.tls_app_config_ready, + self.on_tls_app_config_ready) + self.framework.observe( + self.ca_client.on.ca_available, + self.on_ca_available) + self.framework.observe( + self.on.config_changed, + self.render_config) + self.framework.observe( + self.on.upgrade_charm, + self.render_config) + self.framework.observe( + self.on.create_target_action, + self) + self.framework.observe( + self.on.add_trusted_ip_action, self) - self.framework.observe(self.on.ceph_client_relation_joined, self) - self.framework.observe(self.ceph_client.on.pools_available, self) - self.framework.observe(self.peers.on.has_peers, self) - self.framework.observe(self.peers.on.ready_peers, self) - self.framework.observe(self.on.create_target_action, self) - self.framework.observe(self.on.add_trusted_ip_action, self) - self.framework.observe(self.on.certificates_relation_joined, self) - self.framework.observe(self.on.certificates_relation_changed, self) - self.framework.observe(self.on.config_changed, self) - self.framework.observe(self.on.upgrade_charm, self) def on_install(self, event): if ch_host.is_container(): @@ -138,72 +182,6 @@ def on_install(self, event): else: self.install_pkgs() - def on_add_trusted_ip_action(self, event): - self.state.additional_trusted_ips.append( - event.params['ips'].split(' ')) - logging.info(self.state.additional_trusted_ips) - - def on_create_target_action(self, event): - gw_client = gwcli_client.GatewayClient() - target = event.params.get('iqn', self.DEFAULT_TARGET) - gateway_units = event.params.get( - 'gateway-units', - [u for u in self.peers.ready_peer_details.keys()]) - gw_client.create_target(target) - for gw_unit, gw_config in self.peers.ready_peer_details.items(): - added_gateways = [] - if gw_unit in gateway_units: - gw_client.add_gateway_to_target( - target, - gw_config['ip'], - gw_config['fqdn']) - added_gateways.append(gw_unit) - gw_client.create_pool( - event.params['pool-name'], - event.params['image-name'], - event.params['image-size']) - gw_client.add_client_to_target( - target, - event.params['client-initiatorname']) - gw_client.add_client_auth( - target, - event.params['client-initiatorname'], - event.params['client-username'], - event.params['client-password']) - gw_client.add_disk_to_client( - target, - event.params['client-initiatorname'], - event.params['pool-name'], - event.params['image-name']) - event.set_results({'iqn': target}) - - def setup_default_target(self): - gw_client = gwcli_client.GatewayClient() - gw_client.create_target(self.DEFAULT_TARGET) - for gw_unit, gw_config in self.peers.ready_peer_details.items(): - gw_client.add_gateway_to_target( - self.DEFAULT_TARGET, - gw_config['ip'], - gw_config['fqdn']) - self.state.target_created = True - - def on_ready_peers(self, event): - if not self.unit.is_leader(): - logging.info("Leader should do setup") - return - if not self.state.is_started: - logging.info("Cannot perform setup yet, not started") - event.defer() - return - if self.state.target_created: - logging.info("Initial target setup already complete") - return - else: - # This appears to race and sometime runs before the - # peer is 100% ready. There is probably little value - # in this anyway so may just remove it. - # self.setup_default_target() - return def on_has_peers(self, event): logging.info("Unit has peers") @@ -213,7 +191,7 @@ def on_has_peers(self, event): password = ''.join(secrets.choice(alphabet) for i in range(8)) self.peers.set_admin_password(password) - def on_ceph_client_relation_joined(self, event): + def request_ceph_pool(self, event): logging.info("Requesting replicated pool") self.ceph_client.create_replicated_pool( self.model.config['rbd-metadata-pool']) @@ -225,22 +203,23 @@ def on_ceph_client_relation_joined(self, event): 'osd heartbeat grace': 20, 'osd heartbeat interval': 5}) - def on_config_changed(self, event): - if self.state.is_started: - self.on_pools_available(event) - self.on_ceph_client_relation_joined(event) - - def on_upgrade_charm(self, event): - if self.state.is_started: - self.on_pools_available(event) - self.on_ceph_client_relation_joined(event) + def refresh_request(self, event): + self.render_config(event) + self.request_ceph_pool(event) - def on_pools_available(self, event): - logging.info("on_pools_available") + def render_config(self, event): if not self.peers.admin_password: logging.info("Defering setup") event.defer() return + if not self.ceph_client.pools_available: + logging.info("Defering setup") + event.defer() + return + + self.CEPH_ISCSI_CONFIG_PATH.mkdir( + exist_ok=True, + mode=0o750) def daemon_reload_and_restart(service_name): subprocess.check_call(['systemctl', 'daemon-reload']) @@ -250,21 +229,21 @@ def daemon_reload_and_restart(service_name): 'rbd-target-api': daemon_reload_and_restart} @ch_host.restart_on_change(self.RESTART_MAP, restart_functions=rfuncs) - def render_configs(): + def _render_configs(): for config_file in self.RESTART_MAP.keys(): ch_templating.render( os.path.basename(config_file), config_file, self.adapters) logging.info("Rendering config") - render_configs() + _render_configs() logging.info("Setting started state") self.peers.announce_ready() self.state.is_started = True self.update_status() logging.info("on_pools_available: status updated") - def on_certificates_relation_joined(self, event): + def on_ca_available(self, event): addresses = set() for binding_name in ['public', 'cluster']: binding = self.model.get_binding(binding_name) @@ -272,35 +251,36 @@ def on_certificates_relation_joined(self, event): addresses.add(binding.network.bind_address) sans = [str(s) for s in addresses] sans.append(socket.gethostname()) - self.tls.request_application_cert(socket.getfqdn(), sans) - - def on_certificates_relation_changed(self, event): - app_certs = self.tls.application_certs - if not all([self.tls.root_ca_cert, app_certs]): - return - if self.tls.chain: - # Append chain file so that clients that trust the root CA will - # trust certs signed by an intermediate in the chain - ca_cert_data = self.tls.root_ca_cert + os.linesep + self.tls.chain - else: - ca_cert_data = self.tls.root_ca_cert - pem_data = app_certs['cert'] + os.linesep + app_certs['key'] - tls_files = { - '/etc/ceph/iscsi-gateway.crt': app_certs['cert'], - '/etc/ceph/iscsi-gateway.key': app_certs['key'], - '/etc/ceph/iscsi-gateway.pem': pem_data, - '/usr/local/share/ca-certificates/vault_ca_cert.crt': ca_cert_data} - for tls_file, tls_data in sorted(tls_files.items()): - with open(tls_file, 'w') as f: - f.write(tls_data) + self.ca_client.request_application_certificate(socket.getfqdn(), sans) + + def on_tls_app_config_ready(self, event): + self.TLS_KEY_PATH.write_bytes( + self.ca_client.application_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption())) + self.TLS_CERT_PATH.write_bytes( + self.ca_client.application_certificate.public_bytes( + encoding=serialization.Encoding.PEM)) + self.TLS_CA_CERT_PATH.write_bytes( + self.ca_client.ca_certificate.public_bytes( + encoding=serialization.Encoding.PEM)) + self.TLS_KEY_AND_CERT_PATH.write_bytes( + self.ca_client.application_certificate.public_bytes( + encoding=serialization.Encoding.PEM) + + b'\n' + + self.ca_client.application_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption()) + ) + self.TLS_PUB_KEY_PATH.write_bytes( + self.ca_client.application_key.public_key().public_bytes( + format=serialization.PublicFormat.SubjectPublicKeyInfo, + encoding=serialization.Encoding.PEM)) subprocess.check_call(['update-ca-certificates']) - cert_out = subprocess.check_output( - ('openssl x509 -inform pem -in /etc/ceph/iscsi-gateway.pem ' - '-pubkey -noout').split()) - with open('/etc/ceph/iscsi-gateway-pub.key', 'w') as f: - f.write(cert_out.decode('UTF-8')) self.state.enable_tls = True - self.on_pools_available(event) + self.refresh_request(event) def custom_status_check(self): if ch_host.is_container(): @@ -313,6 +293,51 @@ def custom_status_check(self): return False return True + # Actions + + def on_add_trusted_ip_action(self, event): + if self.unit.is_leader(): + self.state.additional_trusted_ips = event.params.get('ips') + logging.info(len(self.state.additional_trusted_ips)) + self.peers.set_allowed_ips( + self.state.additional_trusted_ips) + else: + event.fail("Action must be run on leader") + + def on_create_target_action(self, event): + gw_client = gwcli_client.GatewayClient() + target = event.params.get('iqn', self.DEFAULT_TARGET) + gateway_units = event.params.get( + 'gateway-units', + [u for u in self.peers.ready_peer_details.keys()]) + gw_client.create_target(target) + for gw_unit, gw_config in self.peers.ready_peer_details.items(): + added_gateways = [] + if gw_unit in gateway_units: + gw_client.add_gateway_to_target( + target, + gw_config['ip'], + gw_config['fqdn']) + added_gateways.append(gw_unit) + gw_client.create_pool( + event.params['pool-name'], + event.params['image-name'], + event.params['image-size']) + gw_client.add_client_to_target( + target, + event.params['client-initiatorname']) + gw_client.add_client_auth( + target, + event.params['client-initiatorname'], + event.params['client-username'], + event.params['client-password']) + gw_client.add_disk_to_client( + target, + event.params['client-initiatorname'], + event.params['pool-name'], + event.params['image-name']) + event.set_results({'iqn': target}) + @ops_openstack.charm_class class CephISCSIGatewayCharmJewel(CephISCSIGatewayCharmBase): diff --git a/ceph-iscsi/src/interface_ceph_iscsi_peer.py b/ceph-iscsi/src/interface_ceph_iscsi_peer.py index 20eba6d9..e8a9df20 100644 --- a/ceph-iscsi/src/interface_ceph_iscsi_peer.py +++ b/ceph-iscsi/src/interface_ceph_iscsi_peer.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 +import json import logging import socket @@ -31,6 +32,7 @@ class CephISCSIGatewayPeers(Object): PASSWORD_KEY = 'admin_password' READY_KEY = 'gateway_ready' FQDN_KEY = 'gateway_fqdn' + ALLOWED_IPS_KEY = 'allowed_ips' def __init__(self, charm, relation_name): super().__init__(charm, relation_name) @@ -50,6 +52,11 @@ def set_admin_password(self, password): logging.info("Setting admin password") self.peer_rel.data[self.peer_rel.app][self.PASSWORD_KEY] = password + def set_allowed_ips(self, ips): + logging.info("Setting allowed ips") + ip_str = json.dumps(ips) + self.peer_rel.data[self.peer_rel.app][self.ALLOWED_IPS_KEY] = ip_str + def announce_ready(self): logging.info("announcing ready") self.peer_rel.data[self.this_unit][self.READY_KEY] = 'True' @@ -90,9 +97,18 @@ def cluster_bind_address(self): @property def admin_password(self): - # https://github.com/canonical/operator/issues/148 + if not self.peer_rel: + return None return self.peer_rel.data[self.peer_rel.app].get(self.PASSWORD_KEY) + @property + def allowed_ips(self): + if not self.peer_rel: + return None + ip_str = self.peer_rel.data[self.peer_rel.app].get( + self.ALLOWED_IPS_KEY) + return json.loads(ip_str) + @property def peer_addresses(self): addresses = [self.cluster_bind_address] diff --git a/ceph-iscsi/src/interface_tls_certificates.py b/ceph-iscsi/src/interface_tls_certificates.py deleted file mode 100644 index 0e12adb9..00000000 --- a/ceph-iscsi/src/interface_tls_certificates.py +++ /dev/null @@ -1,116 +0,0 @@ -import json -from ops.framework import ( - Object, -) - - -class TlsRequires(Object): - def __init__(self, parent, key): - super().__init__(parent, key) - self.name = self.relation_name = key - - def request_application_cert(self, cn, sans): - """ - Request a client certificate and key be generated for the given - common name (`cn`) and list of alternative names (`sans`). - - This can be called multiple times to request more than one client - certificate, although the common names must be unique. If called - again with the same common name, it will be ignored. - """ - relations = self.framework.model.relations[self.name] - if not relations: - return - # assume we'll only be connected to one provider - relation = relations[0] - unit = self.framework.model.unit - requests = relation.data[unit].get('application_cert_requests', '{}') - requests = json.loads(requests) - requests[cn] = {'sans': sans} - relation.data[unit]['application_cert_requests'] = json.dumps( - requests, - sort_keys=True) - - @property - def root_ca_cert(self): - """ - Root CA certificate. - """ - # only the leader of the provider should set the CA, or all units - # had better agree - for relation in self.framework.model.relations[self.name]: - for unit in relation.units: - if relation.data[unit].get('ca'): - return relation.data[unit].get('ca') - - @property - def chain(self): - """ - Root CA certificate. - """ - # only the leader of the provider should set the CA, or all units - # had better agree - for relation in self.framework.model.relations[self.name]: - for unit in relation.units: - if relation.data[unit].get('chain'): - return relation.data[unit].get('chain') - - @property - def server_certs(self): - """ - List of [Certificate][] instances for all available server certs. - """ - unit_name = self.framework.model.unit.name.replace('/', '_') - field = '{}.processed_requests'.format(unit_name) - - for relation in self.framework.model.relations[self.name]: - for unit in relation.units: - if field not in relation.data[unit]: - continue - certs_data = relation.data[unit][field] - if not certs_data: - continue - certs_data = json.loads(certs_data) - if not certs_data: - continue - return list(certs_data.values())[0] - - @property - def client_certs(self): - """ - List of [Certificate][] instances for all available client certs. - """ - unit_name = self.framework.model.unit.name.replace('/', '_') - field = '{}.processed_client_requests'.format(unit_name) - - for relation in self.framework.model.relations[self.name]: - for unit in relation.units: - if field not in relation.data[unit]: - continue - certs_data = relation.data[unit][field] - if not certs_data: - continue - certs_data = json.loads(certs_data) - if not certs_data: - continue - return list(certs_data.values())[0] - - @property - def application_certs(self): - """ - List of [Certificate][] instances for all available application certs. - """ - unit_name = self.framework.model.unit.name.replace('/', '_') - field = '{}.processed_application_requests'.format(unit_name) - - for relation in self.framework.model.relations[self.name]: - for unit in relation.units: - if field not in relation.data[unit]: - continue - certs_data = relation.data[unit][field] - if not certs_data: - continue - certs_data = json.loads(certs_data) - if not certs_data: - continue - return certs_data['app_data'] diff --git a/ceph-iscsi/templates/ceph.conf b/ceph-iscsi/templates/ceph.conf index a27fd58f..5d0227bc 100644 --- a/ceph-iscsi/templates/ceph.conf +++ b/ceph-iscsi/templates/ceph.conf @@ -6,7 +6,7 @@ [global] auth supported = {{ ceph_client.auth_supported }} mon host = {{ ceph_client.mon_hosts }} -keyring = /etc/ceph/$cluster.$name.keyring +keyring = /etc/ceph/iscsi/$cluster.$name.keyring [client.ceph-iscsi] client mount uid = 0 diff --git a/ceph-iscsi/templates/iscsi-gateway.cfg b/ceph-iscsi/templates/iscsi-gateway.cfg index 3c068427..c8db2bb5 100644 --- a/ceph-iscsi/templates/iscsi-gateway.cfg +++ b/ceph-iscsi/templates/iscsi-gateway.cfg @@ -1,32 +1,14 @@ [config] -# Name of the Ceph storage cluster. A suitable Ceph configuration file allowing -# # access to the Ceph storage cluster from the gateway node is required, if not -# # colocated on an OSD node. logger_level = DEBUG cluster_name = ceph cluster_client_name = client.ceph-iscsi pool = {{ options.rbd_metadata_pool }} -# -# # Place a copy of the ceph cluster's admin keyring in the gateway's /etc/ceph -# # drectory and reference the filename here -#gateway_keyring = ceph.client.admin.keyring + gateway_keyring = ceph.client.ceph-iscsi.keyring -# -# -# # API settings. -# # The API supports a number of options that allow you to tailor it to your -# # local environment. If you want to run the API under https, you will need to -# # create cert/key files that are compatible for each iSCSI gateway node, that is -# # not locked to a specific node. SSL cert and key files *must* be called -# # 'iscsi-gateway.crt' and 'iscsi-gateway.key' and placed in the '/etc/ceph/' directory -# # on *each* gateway node. With the SSL files in place, you can use 'api_secure = true' -# # to switch to https mode. -# -# # To support the API, the bear minimum settings are: +ceph_config_dir = /etc/ceph/iscsi + api_secure = {{ certificates.enable_tls }} api_user = admin api_password = {{ cluster.admin_password }} api_port = 5000 trusted_ip_list = {{ cluster.gw_hosts }} -# -# diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index cc7d12a4..598a5ad1 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -30,7 +30,7 @@ applications: - '0' - '1' ceph-osd: - charm: cs:~gnuoy/ceph-osd-5 + charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 storage: osd-devices: 'cinder,10G' @@ -41,7 +41,7 @@ applications: - '1' - '2' ceph-mon: - charm: cs:~gnuoy/ceph-mon-6 + charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: monitor-count: '3' diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index b3352d01..2f432507 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import json import unittest import sys @@ -31,6 +32,78 @@ import charm +TEST_CA = '''-----BEGIN CERTIFICATE----- +MIIC8TCCAdmgAwIBAgIUIchLT42Gy3QexrQbppgWb+xF2SgwDQYJKoZIhvcNAQEL +BQAwGjEYMBYGA1UEAwwPRGl2aW5lQXV0aG9yaXR5MB4XDTIwMDUwNTA5NDIzMVoX +DTIwMDYwNDA5NDIzMlowGjEYMBYGA1UEAwwPRGl2aW5lQXV0aG9yaXR5MIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA54oZkgz+xpaM8AKfHTT19lwqvVSr +W3uZiyyiNAWBX+Ru5/5RqQONKmjPqU3Bh966IBxo8hGYsk7MJ3LobvuG6j497SUc +nn4JECm/mOKGeQvSSGnor93ropyWAQDQ3U1JVxV/K4sw2EpwwxfaJAM4L5rVi9EK +TsN23cPI81DKLuDxeXGGDPXMgQuTqfGD74jk6oTpfEHNmQB1Lcj+t+HxQqyoHyo5 +RPNRpntgPAvrF8i1ktJ/EH4GJxSBwm7098JcMgQSif9PHzL0UKehC2mlNX7ljGQ+ +eOLo6XNHYnq6DfxO6c3TbOIYt7VSc8K3IG500/4IzIT3+mtZ3rrM3mQWDwIDAQAB +oy8wLTAaBgNVHREEEzARgg9EaXZpbmVBdXRob3JpdHkwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAQEAfzQSUzfaUv5Q4Eqz2YiWFx2zRYi0mUjYrGf9 +1qcprgpAq7F72+ed3uLGEmMr53+wgL4XdzLnSZwpYRFNBI7/t6hU3kxw9fJC5wMg +LHLdNlNqXAfoGVVTjcWPiQDF6tguccqyE3UWksl+2fncgkkcUpH4IP0AZVYlCsrz +mzs5P3ATpdTE1BZiw4WEiE4+N8ZC7Rcz0icfCEbKJduMkkxpJlvp5LwSsmtrpS3v +IZvomDHx8ypr+byzUTsfbAExdXVpctkG/zLMAi6/ZApO8GlD8ga8BUn2NGfBO5Q8 +28kEjS5DV835Re4hHE6pTC4HEjq0D2r1/4OG7ijt8emO5XPoMg== +-----END CERTIFICATE-----''' + +TEST_APP_CERT = '''-----BEGIN CERTIFICATE----- +MIID9jCCAt6gAwIBAgIUX5lsqmlS3aFLw7+IqSqadI7W1yswDQYJKoZIhvcNAQEL +BQAwRTFDMEEGA1UEAxM6VmF1bHQgSW50ZXJtZWRpYXRlIENlcnRpZmljYXRlIEF1 +dGhvcml0eSAoY2hhcm0tcGtpLWxvY2FsKTAeFw0yMDA1MDUwOTQyMTdaFw0yMTA1 +MDUwODQyNDdaMA4xDDAKBgNVBAMTA2FwcDCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBALfmMzGbbShmQGduZImaGsJWd6vGriVwgYlIV60Kb1MLxuLvMyzV +tBseRH1izKgPDEmMRafU9N4DC0jRb+04APBM8QBWEDrrYgRQQSNxlCDVMn4Q4iHO +72FwCqI1HuW0R5J3yik4FkW3Kb8Uq5KDsKWqTLtaBW5X40toi1bkyFTnRZ6/3vmt +9arAfqmZyXlZK3rN+uiznLx8/rYU5umkicNGfDcWI37wjdYvK/tIE79vPom5VhGb +R+rz+hri7JmiaYkzrTWWibyjPNK0aGHa5OUIiFJfAtfyjoT1d/pxwS301BWLicw1 +vSzCJcTwpkzh2EWvuquK2sUjgHNR1qAkGIECAwEAAaOCARMwggEPMA4GA1UdDwEB +/wQEAwIDqDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYE +FL0B0hMaFwG0I0WR4CiOZnrqRHoLMEkGCCsGAQUFBwEBBD0wOzA5BggrBgEFBQcw +AoYtaHR0cDovLzE3Mi4yMC4wLjE5OjgyMDAvdjEvY2hhcm0tcGtpLWxvY2FsL2Nh +MDMGA1UdEQQsMCqCA2FwcIIDYXBwgghhcHB1bml0MYIIYXBwdW5pdDKHBKwAAAGH +BKwAAAIwPwYDVR0fBDgwNjA0oDKgMIYuaHR0cDovLzE3Mi4yMC4wLjE5OjgyMDAv +djEvY2hhcm0tcGtpLWxvY2FsL2NybDANBgkqhkiG9w0BAQsFAAOCAQEAbf6kIurd +pBs/84YD59bgeytlo8RatUzquwCRgRSv6N81+dYFBHtEVOoLwy/4wJAH2uMSKK+/ +C13vTBj/cx+SxWSIccPS0rglwEKhRF/u3n9hrFAL3QMLQPEXAJ5rJtapZ7a8uIWy +bChTMhoL4bApCXG+SH4mbhkD6SWQ1zPgfXD4ZiVtjEVIdyn63/fbNFUfhFKba8BE +wQUYw0yWq0/8ILq/WPyjKBvhSinIauy+ybdzaDMEg0Grq1n0K5l/WyK+t9tQd+UG +cLjamd6EKZ2OvOxZN6/cJlHDY2NKfjGF6KhQ5D2cseYK7dhOQ9AFjUCB/NgIAH9D +8vVp8VJOx6plOw== +-----END CERTIFICATE-----''' + +TEST_APP_KEY = '''-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAt+YzMZttKGZAZ25kiZoawlZ3q8auJXCBiUhXrQpvUwvG4u8z +LNW0Gx5EfWLMqA8MSYxFp9T03gMLSNFv7TgA8EzxAFYQOutiBFBBI3GUINUyfhDi +Ic7vYXAKojUe5bRHknfKKTgWRbcpvxSrkoOwpapMu1oFblfjS2iLVuTIVOdFnr/e ++a31qsB+qZnJeVkres366LOcvHz+thTm6aSJw0Z8NxYjfvCN1i8r+0gTv28+iblW +EZtH6vP6GuLsmaJpiTOtNZaJvKM80rRoYdrk5QiIUl8C1/KOhPV3+nHBLfTUFYuJ +zDW9LMIlxPCmTOHYRa+6q4raxSOAc1HWoCQYgQIDAQABAoIBAD92GUSNNmYyoxcO +aXNy0rktza5hqccRxCHz7Q2yBCjMb53wneBi/vw8vbXnWmjEiKD43zDDtJzIwCQo +4k8ifHBwnNpY2ND8WZ7TcycgEtYhvIL0oJS6LLGbUJAZdMggJnLNE96VlFoKk0V1 +hJ/TAiqpUkF1F1q0yaNEOJGL8fYaI5Mz1pU+rspxS2uURFYGcD78Ouda5Pruwcp3 +A0Sbo+5P0FZRy79zpZbIzlvcS9R7wKuDJExCXXCsoZ+G0BWwTJPsDhkmcuXdS7f3 +3k3VO4Y8rcsOIHtI0Gj38yhO6giDjPeZWmXF6h7+zSWPaZydswTqtyS2BbvUmE3N +t/HYCOECgYEA2AYQZqAeFk5i7Qnb80pG9q1THZOM4V/FQsyfb9Bzw+nANP6LMd3D +tnY7BUNj0vTJVy/wnwFSmryQn3OqsxHYbOaor9xjuCauAGzp/4cj0anTySz0pZiQ +TzVepB35bj8ghRsQ1TO+7FQtMMZQGrNf1i6e3p9+hpKUA6ZwP0OEbpMCgYEA2e5E +Uqqj1u0pnUAeXp/2VbQS4rmxUrRsbdbiyoypNJOp+Olfi2DjQNgji0XDBdTLhDNv +nFtHY7TW4HJrwVAAqBlYKkunf6zGlP3iEGhk7RF1LSyGZXjfLACe7kzqlAx34Ue9 +9ynkesNKeT8kOOCC08llHuInMjfgfN0c7jWYNRsCgYEAgzBrlWd33iQMf9eU89MP +9Y6dA0EwNU5sBX0u9kCpjTjPuV88OTRsPsreXPvoC50NCR3cCzRKbh5F1g/wgn87 +6CbMGsDE7njPAwMhuEThw9pW+72JdWeJfBD1QMXTTNiZbzxYpKGgOPWF3DETRKPa +d8AoSxqhRCiQKwdQ85qVOnECgYAu6dfTY+B5N/ypWVAwVocU0/rsy8ScZTKiQov3 +xmf2ZYNFjhd/TZAeOWkNZishajmVb+0q34tyr09Cad9AchRyG2KbWEXqeisVj8HG +fnKbhhKPcvJLjcWdF1UfP3eP/08fM+508pO4yamSiEEn7Uy8grI9/7koWlb9Cixc +KzVk2QKBgQCdA3eoJHu4nTHRNgcvU3pxbRU4HQV8e+Hiw1tcxjprkACrNVvd7wZS +wULKjMb8z0RZyTBXLdNw3YKYOk/B7e/e9D+Zve4PTEL23Fcdt532x/7hBQ+7o6/4 +7RxsGx5/PXZI0/YKMKk9hsrdMl4/UAd0izvwPCQbB3eisuZYU/i8Jw== +-----END RSA PRIVATE KEY-----''' + class CharmTestCase(unittest.TestCase): @@ -57,6 +130,7 @@ class TestCephISCSIGatewayCharmBase(CharmTestCase): 'ch_templating', 'gwcli_client', 'subprocess', + 'os', ] def setUp(self): @@ -67,15 +141,9 @@ def setUp(self): self.gwc = MagicMock() self.gwcli_client.GatewayClient.return_value = self.gwc - # BEGIN: Workaround until - # https://github.com/canonical/operator/pull/196 lands + # BEGIN: Workaround until network_get is implemented class _TestingOPSModelBackend(_TestingModelBackend): - def relation_ids(self, relation_name): - return self._relation_ids_map.get(relation_name, []) - - # Hardcoded until network_get is implemented in - # _TestingModelBackend def network_get(self, endpoint_name, relation_id=None): network_data = { 'bind-addresses': [{ @@ -88,7 +156,7 @@ def network_get(self, endpoint_name, relation_id=None): return network_data self.harness._backend = _TestingOPSModelBackend( - self.harness._unit_name) + self.harness._unit_name, self.harness._meta) self.harness._model = model.Model( self.harness._unit_name, self.harness._meta, @@ -253,6 +321,8 @@ def test_on_ceph_client_relation_joined(self): 'allow r']}]) def test_on_pools_available(self): + self.os.path.exists.return_value = False + self.os.path.basename = os.path.basename rel_id = self.add_cluster_relation() self.harness.update_relation_data( rel_id, @@ -262,14 +332,16 @@ def test_on_pools_available(self): self.harness.begin() self.harness.charm.ceph_client.on.pools_available.emit() self.ch_templating.render.assert_has_calls([ - call('ceph.conf', '/etc/ceph/ceph.conf', ANY), + call('ceph.conf', '/etc/ceph/iscsi/ceph.conf', ANY), call('iscsi-gateway.cfg', '/etc/ceph/iscsi-gateway.cfg', ANY), call( 'ceph.client.ceph-iscsi.keyring', - '/etc/ceph/ceph.client.ceph-iscsi.keyring', ANY)]) + '/etc/ceph/iscsi/ceph.client.ceph-iscsi.keyring', ANY)], + any_order=True) self.assertTrue(self.harness.charm.state.is_started) rel_data = self.harness.get_relation_data(rel_id, 'ceph-iscsi/0') self.assertEqual(rel_data['gateway_ready'], 'True') + self.os.mkdir.assert_called_once_with('/etc/ceph/iscsi', 488) @patch('socket.gethostname') def test_on_certificates_relation_joined(self, _gethostname): @@ -290,41 +362,42 @@ def test_on_certificates_relation_joined(self, _gethostname): @patch('socket.gethostname') def test_on_certificates_relation_changed(self, _gethostname): + mock_TLS_CERT_PATH = MagicMock() + mock_TLS_CA_CERT_PATH = MagicMock() + mock_TLS_KEY_PATH = MagicMock() + mock_KEY_AND_CERT_PATH = MagicMock() + mock_TLS_PUB_KEY_PATH = MagicMock() _gethostname.return_value = 'server1' self.subprocess.check_output.return_value = b'pubkey' rel_id = self.harness.add_relation('certificates', 'vault') self.add_cluster_relation() self.harness.begin() - with patch('builtins.open', unittest.mock.mock_open()) as _open: - self.harness.add_relation_unit( - rel_id, - 'vault/0') - self.harness.update_relation_data( - rel_id, - 'vault/0', - { - 'ceph-iscsi_0.processed_application_requests': - '{"app_data": {"cert": "appcert", "key": "appkey"}}', - 'ca': 'ca'}) - expect_calls = [ - call('/etc/ceph/iscsi-gateway.crt', 'w'), - call('/etc/ceph/iscsi-gateway.key', 'w'), - call('/etc/ceph/iscsi-gateway.pem', 'w'), - call('/usr/local/share/ca-certificates/vault_ca_cert.crt', 'w')] - for open_call in expect_calls: - self.assertIn(open_call, _open.call_args_list) - handle = _open() - handle.write.assert_has_calls([ - call('appcert'), - call('appkey'), - call('appcert\nappkey'), - call('ca'), - call('pubkey')]) + self.harness.charm.TLS_CERT_PATH = mock_TLS_CERT_PATH + self.harness.charm.TLS_CA_CERT_PATH = mock_TLS_CA_CERT_PATH + self.harness.charm.TLS_KEY_PATH = mock_TLS_KEY_PATH + self.harness.charm.TLS_KEY_AND_CERT_PATH = mock_KEY_AND_CERT_PATH + self.harness.charm.TLS_PUB_KEY_PATH = mock_TLS_PUB_KEY_PATH + self.harness.add_relation_unit( + rel_id, + 'vault/0') + rel_data = { + 'app_data': { + 'cert': TEST_APP_CERT, + 'key': TEST_APP_KEY}} + self.harness.update_relation_data( + rel_id, + 'vault/0', + { + 'ceph-iscsi_0.processed_application_requests': json.dumps( + rel_data), + 'ca': TEST_CA}) + mock_TLS_CERT_PATH.write_bytes.assert_called_once() + mock_TLS_CA_CERT_PATH.write_bytes.assert_called_once() + mock_TLS_KEY_PATH.write_bytes.assert_called_once() + mock_KEY_AND_CERT_PATH.write_bytes.assert_called_once() + mock_TLS_PUB_KEY_PATH.write_bytes.assert_called_once() self.subprocess.check_call.assert_called_once_with( ['update-ca-certificates']) - self.subprocess.check_output.assert_called_once_with( - ['openssl', 'x509', '-inform', 'pem', '-in', - '/etc/ceph/iscsi-gateway.pem', '-pubkey', '-noout']) self.assertTrue(self.harness.charm.state.enable_tls) def test_custom_status_check(self): From a7fa38a8e00ce112d73aa265e55a43f39684ca40 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 8 May 2020 09:53:17 +0000 Subject: [PATCH 1952/2699] Update readme --- ceph-iscsi/README.md | 39 +++++++++++++++++---------------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index d424abab..692a6a83 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -16,6 +16,10 @@ deployed, this will provide multiple data paths to clients. > **Note**: Deploying four units is also theoretical possible but has not been tested. +The charm cannot be placed in a lxd container. However, it can be located +with the ceph-osd charms. Co-location with other charms is likely to be +fine but is untested. + A sample `bundle.yaml` file's contents: ```yaml @@ -86,29 +90,23 @@ Actions allow specific operations to be performed on a per-unit basis. Run this action to create an iscsi target. ```bash - $ juju run-action ceph-iscsi/0 create-target \ + $ juju run-action --wait ceph-iscsi/0 create-target \ image-size=2G \ image-name=bob \ pool-name=superssd \ client-initiatorname=iqn.1993-08.org.debian:01:aaa2299be916 \ client-username=usera \ client-password=testpass - Action queued with id: "28" -``` - -If the iqn of the created target is returned in the ouput from the action: - -```bash - $ juju show-action-output 28 - UnitId: ceph-iscsi/0 - results: - iqn: iqn.2003-01.com.ubuntu.iscsi-gw:iscsi-igw - status: completed - timing: - completed: 2020-04-02 13:32:02 +0000 UTC - enqueued: 2020-04-02 13:18:42 +0000 UTC - started: 2020-04-02 13:18:45 +0000 UTC -``` + unit-ceph-iscsi-0: + UnitId: ceph-iscsi/0 + id: "28" + results: + iqn: iqn.2003-01.com.ubuntu.iscsi-gw:iscsi-igw + status: completed + timing: + completed: 2020-05-08 09:49:52 +0000 UTC + enqueued: 2020-05-08 09:49:36 +0000 UTC + started: 2020-05-08 09:49:37 +0000 UTC ### pause @@ -157,9 +155,7 @@ Alternatively, configuration can be provided as part of a bundle: from the ceph-mon charm. ```bash - $ juju run-action ceph-mon/0 create-pool name=iscsi-targets - Action queued with id: "1" - $ juju show-action-output 1 + $ juju run-action --wait ceph-mon/0 create-pool name=iscsi-targets UnitId: ceph-mon/0 results: Stderr: | @@ -190,14 +186,13 @@ Alternatively, configuration can be provided as part of a bundle: between twelve and sixteen characters. ```bash - $ juju run-action ceph-iscsi/0 create-target \ + $ juju run-action --wait ceph-iscsi/0 create-target \ client-initiatorname="iqn.1998-01.com.vmware:node-caloric-02f98bac" \ client-username=vmwareclient \ client-password=12to16characters \ image-size=10G \ image-name=disk_1 \ pool-name=iscsi-targets - $ juju show-action-output 2 UnitId: ceph-iscsi/0 results: Stdout: | From 7cba4bfa243193d134515b189f573767eb9c78ca Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 8 May 2020 10:25:50 +0000 Subject: [PATCH 1953/2699] Fix unit tests --- ceph-iscsi/mod/ops-interface-ceph-client | 2 +- ceph-iscsi/src/charm.py | 11 ++++------- ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py | 7 +++++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/ceph-iscsi/mod/ops-interface-ceph-client b/ceph-iscsi/mod/ops-interface-ceph-client index 30213fa6..b0463a33 160000 --- a/ceph-iscsi/mod/ops-interface-ceph-client +++ b/ceph-iscsi/mod/ops-interface-ceph-client @@ -1 +1 @@ -Subproject commit 30213fa66f979eb93473242297677ea6554984df +Subproject commit b0463a336386daa26c0abf87943988c23d1f365e diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index ab76b07c..1e22a1cb 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -13,9 +13,6 @@ from ops.framework import ( StoredState, - EventSource, - EventBase, - ObjectEvents, ) from ops.main import main import ops.model @@ -99,7 +96,6 @@ class CephISCSIGatewayCharmBase(ops_openstack.OSBaseCharm): "mon", "allow *", "mgr", "allow r"] - DEFAULT_TARGET = "iqn.2003-01.com.ubuntu.iscsi-gw:iscsi-igw" REQUIRED_RELATIONS = ['ceph-client', 'cluster'] @@ -182,7 +178,6 @@ def on_install(self, event): else: self.install_pkgs() - def on_has_peers(self, event): logging.info("Unit has peers") if self.unit.is_leader() and not self.peers.admin_password: @@ -210,15 +205,17 @@ def refresh_request(self, event): def render_config(self, event): if not self.peers.admin_password: logging.info("Defering setup") + print("Defering setup admin") event.defer() return if not self.ceph_client.pools_available: + print("Defering setup pools") logging.info("Defering setup") event.defer() return self.CEPH_ISCSI_CONFIG_PATH.mkdir( - exist_ok=True, + exist_ok=True, mode=0o750) def daemon_reload_and_restart(service_name): @@ -280,7 +277,7 @@ def on_tls_app_config_ready(self, event): encoding=serialization.Encoding.PEM)) subprocess.check_call(['update-ca-certificates']) self.state.enable_tls = True - self.refresh_request(event) + self.render_config(event) def custom_status_check(self): if ch_host.is_container(): diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index 2f432507..433c848e 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -18,6 +18,7 @@ import json import unittest import sys +from pathlib import Path sys.path.append('lib') # noqa sys.path.append('src') # noqa @@ -330,7 +331,10 @@ def test_on_pools_available(self): {'admin_password': 'existing password', 'gateway_ready': False}) self.harness.begin() - self.harness.charm.ceph_client.on.pools_available.emit() + self.harness.charm.ceph_client.state.pools_available = True + with patch.object(Path, 'mkdir') as mock_mkdir: + self.harness.charm.ceph_client.on.pools_available.emit() + mock_mkdir.assert_called_once_with(exist_ok=True, mode=488) self.ch_templating.render.assert_has_calls([ call('ceph.conf', '/etc/ceph/iscsi/ceph.conf', ANY), call('iscsi-gateway.cfg', '/etc/ceph/iscsi-gateway.cfg', ANY), @@ -341,7 +345,6 @@ def test_on_pools_available(self): self.assertTrue(self.harness.charm.state.is_started) rel_data = self.harness.get_relation_data(rel_id, 'ceph-iscsi/0') self.assertEqual(rel_data['gateway_ready'], 'True') - self.os.mkdir.assert_called_once_with('/etc/ceph/iscsi', 488) @patch('socket.gethostname') def test_on_certificates_relation_joined(self, _gethostname): From 77bd1469e9f7c4854ebe69e002187d758f71690d Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 8 May 2020 10:29:21 +0000 Subject: [PATCH 1954/2699] Fix readme formatting --- ceph-iscsi/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index 692a6a83..1d8e1261 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -108,6 +108,7 @@ Run this action to create an iscsi target. enqueued: 2020-05-08 09:49:36 +0000 UTC started: 2020-05-08 09:49:37 +0000 UTC + ### pause Pause the ceph-iscsi unit. This action will stop the rbd services. From 1aa6d7b9990a28165f4c70da1ac6a91b0d2d620e Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 11 May 2020 11:22:16 +0000 Subject: [PATCH 1955/2699] Bump deps --- ceph-iscsi/mod/operator | 2 +- ceph-iscsi/mod/ops-interface-ceph-client | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-iscsi/mod/operator b/ceph-iscsi/mod/operator index 47af0fc9..ccf1dce2 160000 --- a/ceph-iscsi/mod/operator +++ b/ceph-iscsi/mod/operator @@ -1 +1 @@ -Subproject commit 47af0fc9f86dc5b16e88a83a00377864a1541734 +Subproject commit ccf1dce276141d1e8641d63382bb6c3055eee731 diff --git a/ceph-iscsi/mod/ops-interface-ceph-client b/ceph-iscsi/mod/ops-interface-ceph-client index b0463a33..088b68f5 160000 --- a/ceph-iscsi/mod/ops-interface-ceph-client +++ b/ceph-iscsi/mod/ops-interface-ceph-client @@ -1 +1 @@ -Subproject commit b0463a336386daa26c0abf87943988c23d1f365e +Subproject commit 088b68f5b36f76ac44056ccabbe93396c76de98d From 8b84722a6ecd91327deb89bb0b84cdc79aba77d8 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 11 May 2020 12:48:11 +0100 Subject: [PATCH 1956/2699] Set sane min pg size with autoscaler Resync charmhelpers to pickup behavioural changes in autoscaler configuration for pools. This ensures that pools that have a calculated pg size less than the default of 32 get a min size set to match the calculated pg size avoiding instance scale-up to 32 pgs. This avoids overloading of the max pgs per osd limit in smaller test clusters. Change-Id: Ic34d029dae9c67dbba3e2e502d7c9ac4576fcfa5 Closes-Bug: 1872748 --- .../contrib/storage/linux/ceph.py | 40 +++++++++++++++---- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index eb31b782..95a0d82a 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -92,6 +92,7 @@ DEFAULT_POOL_WEIGHT = 10.0 LEGACY_PG_COUNT = 200 DEFAULT_MINIMUM_PGS = 2 +AUTOSCALER_DEFAULT_PGS = 32 class OsdPostUpgradeError(Exception): @@ -399,16 +400,28 @@ def __init__(self, service, name, pg_num=None, replicas=2, def create(self): if not pool_exists(self.service, self.name): + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num)] + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, self.pg_num) + ), + self.name, str(self.pg_num) + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num) + ] + try: check_call(cmd) # Set the pool replica size update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 if nautilus_or_later: # Ensure we set the expected pool ratio update_pool(client=self.service, @@ -466,10 +479,24 @@ def create(self): k = int(erasure_profile['k']) m = int(erasure_profile['m']) pgs = self.get_pgs(k + m, self.percent_data) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile] + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, pgs) + ), + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + try: check_call(cmd) try: @@ -478,7 +505,6 @@ def create(self): name=self.app_name) except CalledProcessError: log('Could not set app name for pool {}'.format(self.name, level=WARNING)) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 if nautilus_or_later: # Ensure we set the expected pool ratio update_pool(client=self.service, From c5ecee4dbfc6821802641db3b9cffd147fa3abe3 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 11 May 2020 12:59:34 +0100 Subject: [PATCH 1957/2699] Rebuild for sync charm-helpers for 20.05 release Change-Id: I4367087b56d5f885352e76dace6a38906107f3cd --- ceph-fs/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index 39ab9a72..315feac0 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -3fef9e9b-3b33-4c1d-9fde-d2a783bbfa92 +c17ec0ce-937e-11ea-a49b-8b4b0c8e2b7f From c974e9ddc5828225a6c21ed57cc91280f98960fb Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 11 May 2020 12:59:46 +0100 Subject: [PATCH 1958/2699] Rebuild for sync charm-helpers for 20.05 release Change-Id: Idf08632a2bad878850b9fc79401aeda9d7229602 --- ceph-rbd-mirror/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/rebuild b/ceph-rbd-mirror/rebuild index 39ab9a72..315feac0 100644 --- a/ceph-rbd-mirror/rebuild +++ b/ceph-rbd-mirror/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -3fef9e9b-3b33-4c1d-9fde-d2a783bbfa92 +c17ec0ce-937e-11ea-a49b-8b4b0c8e2b7f From f6d23837a78bafa0db3877b301d479198fd3779d Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 11 May 2020 12:54:33 +0000 Subject: [PATCH 1959/2699] Implement allowed IPs action --- ceph-iscsi/actions.yaml | 6 +++++ ceph-iscsi/src/charm.py | 19 +++++++++++---- ceph-iscsi/src/interface_ceph_iscsi_peer.py | 23 +++++++++++++++---- ceph-iscsi/templates/iscsi-gateway.cfg | 2 +- .../unit_tests/test_ceph_iscsi_charm.py | 1 - 5 files changed, 40 insertions(+), 11 deletions(-) diff --git a/ceph-iscsi/actions.yaml b/ceph-iscsi/actions.yaml index 3f8091af..6f918326 100644 --- a/ceph-iscsi/actions.yaml +++ b/ceph-iscsi/actions.yaml @@ -18,6 +18,12 @@ add-trusted-ip: type: string default: '' description: "Space seperated list of trusted ips" + overwrite: + type: boolean + default: False + description: "If False append IPs to list" + required: + - ips create-target: description: "Create a new cache tier" params: diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 1e22a1cb..949630f5 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -64,6 +64,12 @@ def gw_hosts(self): hosts = self.relation.peer_addresses return ' '.join(sorted(hosts)) + @property + def trusted_ips(self): + ips = self.allowed_ips + ips.extend(self.relation.peer_addresses) + return ' '.join(sorted(ips)) + class TLSCertificatesAdapter(adapters.OpenStackOperRelationAdapter): @@ -129,8 +135,7 @@ def __init__(self, framework, key): logging.info("Using {} class".format(self.release)) self.state.set_default( target_created=False, - enable_tls=False, - additional_trusted_ips=[]) + enable_tls=False) self.ceph_client = interface_ceph_client.CephClientRequires( self, 'ceph-client') @@ -152,6 +157,9 @@ def __init__(self, framework, key): self.framework.observe( self.peers.on.has_peers, self) + self.framework.observe( + self.peers.on.allowed_ips_changed, + self.render_config) self.framework.observe( self.ca_client.on.tls_app_config_ready, self.on_tls_app_config_ready) @@ -294,10 +302,11 @@ def custom_status_check(self): def on_add_trusted_ip_action(self, event): if self.unit.is_leader(): - self.state.additional_trusted_ips = event.params.get('ips') - logging.info(len(self.state.additional_trusted_ips)) + ips = event.params.get('ips').split() self.peers.set_allowed_ips( - self.state.additional_trusted_ips) + ips, + append=not event.params['overwrite']) + self.render_config(event) else: event.fail("Action must be run on leader") diff --git a/ceph-iscsi/src/interface_ceph_iscsi_peer.py b/ceph-iscsi/src/interface_ceph_iscsi_peer.py index e8a9df20..d509762a 100644 --- a/ceph-iscsi/src/interface_ceph_iscsi_peer.py +++ b/ceph-iscsi/src/interface_ceph_iscsi_peer.py @@ -20,9 +20,14 @@ class ReadyPeersEvent(EventBase): pass +class AllowedIpsChangedEvent(EventBase): + pass + + class CephISCSIGatewayPeerEvents(ObjectEvents): has_peers = EventSource(HasPeersEvent) ready_peers = EventSource(ReadyPeersEvent) + allowed_ips_changed = EventSource(AllowedIpsChangedEvent) class CephISCSIGatewayPeers(Object): @@ -38,6 +43,8 @@ def __init__(self, charm, relation_name): super().__init__(charm, relation_name) self.relation_name = relation_name self.this_unit = self.framework.model.unit + self.state.set_default( + allowed_ips=[]) self.framework.observe( charm.on[relation_name].relation_changed, self.on_changed) @@ -47,14 +54,22 @@ def on_changed(self, event): self.on.has_peers.emit() if self.ready_peer_details: self.on.ready_peers.emit() + if self.allowed_ips != self.state.allowed_ips: + self.on.allowed_ips_changed.emit() + self.state.allowed_ips = self.allowed_ips def set_admin_password(self, password): logging.info("Setting admin password") self.peer_rel.data[self.peer_rel.app][self.PASSWORD_KEY] = password - def set_allowed_ips(self, ips): - logging.info("Setting allowed ips") - ip_str = json.dumps(ips) + def set_allowed_ips(self, ips, append=True): + logging.info("Setting allowed ips: {}".format(append)) + trusted_ips = [] + if append and self.allowed_ips: + trusted_ips = self.allowed_ips + trusted_ips.extend(ips) + trusted_ips = sorted(list(set(trusted_ips))) + ip_str = json.dumps(trusted_ips) self.peer_rel.data[self.peer_rel.app][self.ALLOWED_IPS_KEY] = ip_str def announce_ready(self): @@ -106,7 +121,7 @@ def allowed_ips(self): if not self.peer_rel: return None ip_str = self.peer_rel.data[self.peer_rel.app].get( - self.ALLOWED_IPS_KEY) + self.ALLOWED_IPS_KEY, '[]') return json.loads(ip_str) @property diff --git a/ceph-iscsi/templates/iscsi-gateway.cfg b/ceph-iscsi/templates/iscsi-gateway.cfg index c8db2bb5..c9f2bba7 100644 --- a/ceph-iscsi/templates/iscsi-gateway.cfg +++ b/ceph-iscsi/templates/iscsi-gateway.cfg @@ -11,4 +11,4 @@ api_secure = {{ certificates.enable_tls }} api_user = admin api_password = {{ cluster.admin_password }} api_port = 5000 -trusted_ip_list = {{ cluster.gw_hosts }} +trusted_ip_list = {{ cluster.trusted_ips }} diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index 433c848e..43ed6615 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -173,7 +173,6 @@ def test_init(self): self.harness.begin() self.assertFalse(self.harness.charm.state.target_created) self.assertFalse(self.harness.charm.state.enable_tls) - self.assertEqual(self.harness.charm.state.additional_trusted_ips, []) def add_cluster_relation(self): rel_id = self.harness.add_relation('cluster', 'ceph-iscsi') From 368795a7b76aa9fae57298c83f84a79ebe50eba4 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Wed, 6 May 2020 19:02:47 +0200 Subject: [PATCH 1960/2699] Pre-freeze 'make sync' Change-Id: Ic587b0a7b5f737258fcb069d0978cb7341d51158 --- ceph-proxy/charm-helpers-hooks.yaml | 7 +- .../contrib/hahelpers/__init__.py | 13 + .../charmhelpers/contrib/hahelpers/apache.py | 86 ++++ .../charmhelpers/contrib/hahelpers/cluster.py | 451 ++++++++++++++++++ .../contrib/openstack/ha/__init__.py | 13 + .../contrib/openstack/ha/utils.py | 348 ++++++++++++++ .../charmhelpers/contrib/openstack/ip.py | 197 ++++++++ .../charmhelpers/contrib/openstack/utils.py | 306 +++++++++++- .../contrib/storage/linux/ceph.py | 187 +++++++- .../contrib/storage/linux/loopback.py | 8 +- ceph-proxy/charmhelpers/core/hookenv.py | 56 ++- ceph-proxy/charmhelpers/core/sysctl.py | 14 +- ceph-proxy/tests/bundles/bionic-queens.yaml | 3 - ceph-proxy/tests/bundles/bionic-rocky.yaml | 2 - ceph-proxy/tests/bundles/bionic-stein.yaml | 2 - ceph-proxy/tests/bundles/bionic-train.yaml | 2 - ceph-proxy/tests/bundles/trusty-mitaka.yaml | 2 - ceph-proxy/tests/bundles/xenial-mitaka.yaml | 3 - ceph-proxy/tests/bundles/xenial-ocata.yaml | 2 - ceph-proxy/tests/bundles/xenial-pike.yaml | 2 - ceph-proxy/tests/bundles/xenial-queens.yaml | 2 - 21 files changed, 1653 insertions(+), 53 deletions(-) create mode 100644 ceph-proxy/charmhelpers/contrib/hahelpers/__init__.py create mode 100644 ceph-proxy/charmhelpers/contrib/hahelpers/apache.py create mode 100644 ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py create mode 100644 ceph-proxy/charmhelpers/contrib/openstack/ha/__init__.py create mode 100644 ceph-proxy/charmhelpers/contrib/openstack/ha/utils.py create mode 100644 ceph-proxy/charmhelpers/contrib/openstack/ip.py diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index af0da178..02a4f8e6 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -12,11 +12,14 @@ include: - lvm - payload.execd - contrib.openstack: - - utils - - exceptions - alternatives + - exceptions + - ha + - ip + - utils - contrib.network.ip - contrib.charmsupport - contrib.hardening|inc=* - contrib.python - contrib.openstack.policyd + - contrib.hahelpers diff --git a/ceph-proxy/charmhelpers/contrib/hahelpers/__init__.py b/ceph-proxy/charmhelpers/contrib/hahelpers/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/ceph-proxy/charmhelpers/contrib/hahelpers/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/charmhelpers/contrib/hahelpers/apache.py b/ceph-proxy/charmhelpers/contrib/hahelpers/apache.py new file mode 100644 index 00000000..2c1e371e --- /dev/null +++ b/ceph-proxy/charmhelpers/contrib/hahelpers/apache.py @@ -0,0 +1,86 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import os + +from charmhelpers.core import host +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + relation_ids, + related_units as relation_list, + log, + INFO, +) + + +def get_cert(cn=None): + # TODO: deal with multiple https endpoints via charm config + cert = config_get('ssl_cert') + key = config_get('ssl_key') + if not (cert and key): + log("Inspecting identity-service relations for SSL certificate.", + level=INFO) + cert = key = None + if cn: + ssl_cert_attr = 'ssl_cert_{}'.format(cn) + ssl_key_attr = 'ssl_key_{}'.format(cn) + else: + ssl_cert_attr = 'ssl_cert' + ssl_key_attr = 'ssl_key' + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not cert: + cert = relation_get(ssl_cert_attr, + rid=r_id, unit=unit) + if not key: + key = relation_get(ssl_key_attr, + rid=r_id, unit=unit) + return (cert, key) + + +def get_ca_cert(): + ca_cert = config_get('ssl_ca') + if ca_cert is None: + log("Inspecting identity-service relations for CA SSL certificate.", + level=INFO) + for r_id in (relation_ids('identity-service') + + relation_ids('identity-credentials')): + for unit in relation_list(r_id): + if ca_cert is None: + ca_cert = relation_get('ca_cert', + rid=r_id, unit=unit) + return ca_cert + + +def retrieve_ca_cert(cert_file): + cert = None + if os.path.isfile(cert_file): + with open(cert_file, 'rb') as crt: + cert = crt.read() + return cert + + +def install_ca_cert(ca_cert): + host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert') diff --git a/ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py b/ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py new file mode 100644 index 00000000..ba34fba0 --- /dev/null +++ b/ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py @@ -0,0 +1,451 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# Adam Gandelman +# + +""" +Helpers for clustering and determining "cluster leadership" and other +clustering-related helpers. +""" + +import functools +import subprocess +import os +import time + +from socket import gethostname as get_unit_hostname + +import six + +from charmhelpers.core.hookenv import ( + log, + relation_ids, + related_units as relation_list, + relation_get, + config as config_get, + INFO, + DEBUG, + WARNING, + unit_get, + is_leader as juju_is_leader, + status_set, +) +from charmhelpers.core.host import ( + modulo_distribution, +) +from charmhelpers.core.decorators import ( + retry_on_exception, +) +from charmhelpers.core.strutils import ( + bool_from_string, +) + +DC_RESOURCE_NAME = 'DC' + + +class HAIncompleteConfig(Exception): + pass + + +class HAIncorrectConfig(Exception): + pass + + +class CRMResourceNotFound(Exception): + pass + + +class CRMDCNotFound(Exception): + pass + + +def is_elected_leader(resource): + """ + Returns True if the charm executing this is the elected cluster leader. + + It relies on two mechanisms to determine leadership: + 1. If juju is sufficiently new and leadership election is supported, + the is_leader command will be used. + 2. If the charm is part of a corosync cluster, call corosync to + determine leadership. + 3. If the charm is not part of a corosync cluster, the leader is + determined as being "the alive unit with the lowest unit numer". In + other words, the oldest surviving unit. + """ + try: + return juju_is_leader() + except NotImplementedError: + log('Juju leadership election feature not enabled' + ', using fallback support', + level=WARNING) + + if is_clustered(): + if not is_crm_leader(resource): + log('Deferring action to CRM leader.', level=INFO) + return False + else: + peers = peer_units() + if peers and not oldest_peer(peers): + log('Deferring action to oldest service unit.', level=INFO) + return False + return True + + +def is_clustered(): + for r_id in (relation_ids('ha') or []): + for unit in (relation_list(r_id) or []): + clustered = relation_get('clustered', + rid=r_id, + unit=unit) + if clustered: + return True + return False + + +def is_crm_dc(): + """ + Determine leadership by querying the pacemaker Designated Controller + """ + cmd = ['crm', 'status'] + try: + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") + except subprocess.CalledProcessError as ex: + raise CRMDCNotFound(str(ex)) + + current_dc = '' + for line in status.split('\n'): + if line.startswith('Current DC'): + # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum + current_dc = line.split(':')[1].split()[0] + if current_dc == get_unit_hostname(): + return True + elif current_dc == 'NONE': + raise CRMDCNotFound('Current DC: NONE') + + return False + + +@retry_on_exception(5, base_delay=2, + exc_type=(CRMResourceNotFound, CRMDCNotFound)) +def is_crm_leader(resource, retry=False): + """ + Returns True if the charm calling this is the elected corosync leader, + as returned by calling the external "crm" command. + + We allow this operation to be retried to avoid the possibility of getting a + false negative. See LP #1396246 for more info. + """ + if resource == DC_RESOURCE_NAME: + return is_crm_dc() + cmd = ['crm', 'resource', 'show', resource] + try: + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") + except subprocess.CalledProcessError: + status = None + + if status and get_unit_hostname() in status: + return True + + if status and "resource %s is NOT running" % (resource) in status: + raise CRMResourceNotFound("CRM resource %s not found" % (resource)) + + return False + + +def is_leader(resource): + log("is_leader is deprecated. Please consider using is_crm_leader " + "instead.", level=WARNING) + return is_crm_leader(resource) + + +def peer_units(peer_relation="cluster"): + peers = [] + for r_id in (relation_ids(peer_relation) or []): + for unit in (relation_list(r_id) or []): + peers.append(unit) + return peers + + +def peer_ips(peer_relation='cluster', addr_key='private-address'): + '''Return a dict of peers and their private-address''' + peers = {} + for r_id in relation_ids(peer_relation): + for unit in relation_list(r_id): + peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) + return peers + + +def oldest_peer(peers): + """Determines who the oldest peer is by comparing unit numbers.""" + local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) + for peer in peers: + remote_unit_no = int(peer.split('/')[1]) + if remote_unit_no < local_unit_no: + return False + return True + + +def eligible_leader(resource): + log("eligible_leader is deprecated. Please consider using " + "is_elected_leader instead.", level=WARNING) + return is_elected_leader(resource) + + +def https(): + ''' + Determines whether enough data has been provided in configuration + or relation data to configure HTTPS + . + returns: boolean + ''' + use_https = config_get('use-https') + if use_https and bool_from_string(use_https): + return True + if config_get('ssl_cert') and config_get('ssl_key'): + return True + for r_id in relation_ids('certificates'): + for unit in relation_list(r_id): + ca = relation_get('ca', rid=r_id, unit=unit) + if ca: + return True + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN + rel_state = [ + relation_get('https_keystone', rid=r_id, unit=unit), + relation_get('ca_cert', rid=r_id, unit=unit), + ] + # NOTE: works around (LP: #1203241) + if (None not in rel_state) and ('' not in rel_state): + return True + return False + + +def determine_api_port(public_port, singlenode_mode=False): + ''' + Determine correct API server listening port based on + existence of HTTPS reverse proxy and/or haproxy. + + public_port: int: standard public port for given service + + singlenode_mode: boolean: Shuffle ports when only a single unit is present + + returns: int: the correct listening port for the API service + ''' + i = 0 + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): + i += 1 + if https(): + i += 1 + return public_port - (i * 10) + + +def determine_apache_port(public_port, singlenode_mode=False): + ''' + Description: Determine correct apache listening port based on public IP + + state of the cluster. + + public_port: int: standard public port for given service + + singlenode_mode: boolean: Shuffle ports when only a single unit is present + + returns: int: the correct listening port for the HAProxy service + ''' + i = 0 + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): + i += 1 + return public_port - (i * 10) + + +determine_apache_port_single = functools.partial( + determine_apache_port, singlenode_mode=True) + + +def get_hacluster_config(exclude_keys=None): + ''' + Obtains all relevant configuration from charm configuration required + for initiating a relation to hacluster: + + ha-bindiface, ha-mcastport, vip, os-internal-hostname, + os-admin-hostname, os-public-hostname, os-access-hostname + + param: exclude_keys: list of setting key(s) to be excluded. + returns: dict: A dict containing settings keyed by setting name. + raises: HAIncompleteConfig if settings are missing or incorrect. + ''' + settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname', + 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname'] + conf = {} + for setting in settings: + if exclude_keys and setting in exclude_keys: + continue + + conf[setting] = config_get(setting) + + if not valid_hacluster_config(): + raise HAIncorrectConfig('Insufficient or incorrect config data to ' + 'configure hacluster.') + return conf + + +def valid_hacluster_config(): + ''' + Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname + must be set. + + Note: ha-bindiface and ha-macastport both have defaults and will always + be set. We only care that either vip or dns-ha is set. + + :returns: boolean: valid config returns true. + raises: HAIncompatibileConfig if settings conflict. + raises: HAIncompleteConfig if settings are missing. + ''' + vip = config_get('vip') + dns = config_get('dns-ha') + if not(bool(vip) ^ bool(dns)): + msg = ('HA: Either vip or dns-ha must be set but not both in order to ' + 'use high availability') + status_set('blocked', msg) + raise HAIncorrectConfig(msg) + + # If dns-ha then one of os-*-hostname must be set + if dns: + dns_settings = ['os-internal-hostname', 'os-admin-hostname', + 'os-public-hostname', 'os-access-hostname'] + # At this point it is unknown if one or all of the possible + # network spaces are in HA. Validate at least one is set which is + # the minimum required. + for setting in dns_settings: + if config_get(setting): + log('DNS HA: At least one hostname is set {}: {}' + ''.format(setting, config_get(setting)), + level=DEBUG) + return True + + msg = ('DNS HA: At least one os-*-hostname(s) must be set to use ' + 'DNS HA') + status_set('blocked', msg) + raise HAIncompleteConfig(msg) + + log('VIP HA: VIP is set {}'.format(vip), level=DEBUG) + return True + + +def canonical_url(configs, vip_setting='vip'): + ''' + Returns the correct HTTP URL to this host given the state of HTTPS + configuration and hacluster. + + :configs : OSTemplateRenderer: A config tempating object to inspect for + a complete https context. + + :vip_setting: str: Setting in charm config that specifies + VIP address. + ''' + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + if is_clustered(): + addr = config_get(vip_setting) + else: + addr = unit_get('private-address') + return '%s://%s' % (scheme, addr) + + +def distributed_wait(modulo=None, wait=None, operation_name='operation'): + ''' Distribute operations by waiting based on modulo_distribution + + If modulo and or wait are not set, check config_get for those values. + If config values are not set, default to modulo=3 and wait=30. + + :param modulo: int The modulo number creates the group distribution + :param wait: int The constant time wait value + :param operation_name: string Operation name for status message + i.e. 'restart' + :side effect: Calls config_get() + :side effect: Calls log() + :side effect: Calls status_set() + :side effect: Calls time.sleep() + ''' + if modulo is None: + modulo = config_get('modulo-nodes') or 3 + if wait is None: + wait = config_get('known-wait') or 30 + if juju_is_leader(): + # The leader should never wait + calculated_wait = 0 + else: + # non_zero_wait=True guarantees the non-leader who gets modulo 0 + # will still wait + calculated_wait = modulo_distribution(modulo=modulo, wait=wait, + non_zero_wait=True) + msg = "Waiting {} seconds for {} ...".format(calculated_wait, + operation_name) + log(msg, DEBUG) + status_set('maintenance', msg) + time.sleep(calculated_wait) + + +def get_managed_services_and_ports(services, external_ports, + external_services=None, + port_conv_f=determine_apache_port_single): + """Get the services and ports managed by this charm. + + Return only the services and corresponding ports that are managed by this + charm. This excludes haproxy when there is a relation with hacluster. This + is because this charm passes responsability for stopping and starting + haproxy to hacluster. + + Similarly, if a relation with hacluster exists then the ports returned by + this method correspond to those managed by the apache server rather than + haproxy. + + :param services: List of services. + :type services: List[str] + :param external_ports: List of ports managed by external services. + :type external_ports: List[int] + :param external_services: List of services to be removed if ha relation is + present. + :type external_services: List[str] + :param port_conv_f: Function to apply to ports to calculate the ports + managed by services controlled by this charm. + :type port_convert_func: f() + :returns: A tuple containing a list of services first followed by a list of + ports. + :rtype: Tuple[List[str], List[int]] + """ + if external_services is None: + external_services = ['haproxy'] + if relation_ids('ha'): + for svc in external_services: + try: + services.remove(svc) + except ValueError: + pass + external_ports = [port_conv_f(p) for p in external_ports] + return services, external_ports diff --git a/ceph-proxy/charmhelpers/contrib/openstack/ha/__init__.py b/ceph-proxy/charmhelpers/contrib/openstack/ha/__init__.py new file mode 100644 index 00000000..9b088de8 --- /dev/null +++ b/ceph-proxy/charmhelpers/contrib/openstack/ha/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-proxy/charmhelpers/contrib/openstack/ha/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/ha/utils.py new file mode 100644 index 00000000..a5cbdf53 --- /dev/null +++ b/ceph-proxy/charmhelpers/contrib/openstack/ha/utils.py @@ -0,0 +1,348 @@ +# Copyright 2014-2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2016 Canonical Ltd. +# +# Authors: +# Openstack Charmers < +# + +""" +Helpers for high availability. +""" + +import hashlib +import json + +import re + +from charmhelpers.core.hookenv import ( + expected_related_units, + log, + relation_set, + charm_name, + config, + status_set, + DEBUG, +) + +from charmhelpers.core.host import ( + lsb_release +) + +from charmhelpers.contrib.openstack.ip import ( + resolve_address, + is_ipv6, +) + +from charmhelpers.contrib.network.ip import ( + get_iface_for_address, + get_netmask_for_address, +) + +from charmhelpers.contrib.hahelpers.cluster import ( + get_hacluster_config +) + +JSON_ENCODE_OPTIONS = dict( + sort_keys=True, + allow_nan=False, + indent=None, + separators=(',', ':'), +) + +VIP_GROUP_NAME = 'grp_{service}_vips' +DNSHA_GROUP_NAME = 'grp_{service}_hostnames' + + +class DNSHAException(Exception): + """Raised when an error occurs setting up DNS HA + """ + + pass + + +def update_dns_ha_resource_params(resources, resource_params, + relation_id=None, + crm_ocf='ocf:maas:dns'): + """ Configure DNS-HA resources based on provided configuration and + update resource dictionaries for the HA relation. + + @param resources: Pointer to dictionary of resources. + Usually instantiated in ha_joined(). + @param resource_params: Pointer to dictionary of resource parameters. + Usually instantiated in ha_joined() + @param relation_id: Relation ID of the ha relation + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ + _relation_data = {'resources': {}, 'resource_params': {}} + update_hacluster_dns_ha(charm_name(), + _relation_data, + crm_ocf) + resources.update(_relation_data['resources']) + resource_params.update(_relation_data['resource_params']) + relation_set(relation_id=relation_id, groups=_relation_data['groups']) + + +def assert_charm_supports_dns_ha(): + """Validate prerequisites for DNS HA + The MAAS client is only available on Xenial or greater + + :raises DNSHAException: if release is < 16.04 + """ + if lsb_release().get('DISTRIB_RELEASE') < '16.04': + msg = ('DNS HA is only supported on 16.04 and greater ' + 'versions of Ubuntu.') + status_set('blocked', msg) + raise DNSHAException(msg) + return True + + +def expect_ha(): + """ Determine if the unit expects to be in HA + + Check juju goal-state if ha relation is expected, check for VIP or dns-ha + settings which indicate the unit should expect to be related to hacluster. + + @returns boolean + """ + ha_related_units = [] + try: + ha_related_units = list(expected_related_units(reltype='ha')) + except (NotImplementedError, KeyError): + pass + return len(ha_related_units) > 0 or config('vip') or config('dns-ha') + + +def generate_ha_relation_data(service, + extra_settings=None, + haproxy_enabled=True): + """ Generate relation data for ha relation + + Based on configuration options and unit interfaces, generate a json + encoded dict of relation data items for the hacluster relation, + providing configuration for DNS HA or VIP's + haproxy clone sets. + + Example of supplying additional settings:: + + COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' + AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' + AGENT_CA_PARAMS = 'op monitor interval="5s"' + + ha_console_settings = { + 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, + 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, + 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, + 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) + generate_ha_relation_data('nova', extra_settings=ha_console_settings) + + + @param service: Name of the service being configured + @param extra_settings: Dict of additional resource data + @returns dict: json encoded data for use with relation_set + """ + _relation_data = {'resources': {}, 'resource_params': {}} + + if haproxy_enabled: + _meta = 'meta migration-threshold="INFINITY" failure-timeout="5s"' + _haproxy_res = 'res_{}_haproxy'.format(service) + _relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'} + _relation_data['resource_params'] = { + _haproxy_res: '{} op monitor interval="5s"'.format(_meta) + } + _relation_data['init_services'] = {_haproxy_res: 'haproxy'} + _relation_data['clones'] = { + 'cl_{}_haproxy'.format(service): _haproxy_res + } + + if extra_settings: + for k, v in extra_settings.items(): + if _relation_data.get(k): + _relation_data[k].update(v) + else: + _relation_data[k] = v + + if config('dns-ha'): + update_hacluster_dns_ha(service, _relation_data) + else: + update_hacluster_vip(service, _relation_data) + + return { + 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS) + for k, v in _relation_data.items() if v + } + + +def update_hacluster_dns_ha(service, relation_data, + crm_ocf='ocf:maas:dns'): + """ Configure DNS-HA resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ + # Validate the charm environment for DNS HA + assert_charm_supports_dns_ha() + + settings = ['os-admin-hostname', 'os-internal-hostname', + 'os-public-hostname', 'os-access-hostname'] + + # Check which DNS settings are set and update dictionaries + hostname_group = [] + for setting in settings: + hostname = config(setting) + if hostname is None: + log('DNS HA: Hostname setting {} is None. Ignoring.' + ''.format(setting), + DEBUG) + continue + m = re.search('os-(.+?)-hostname', setting) + if m: + endpoint_type = m.group(1) + # resolve_address's ADDRESS_MAP uses 'int' not 'internal' + if endpoint_type == 'internal': + endpoint_type = 'int' + else: + msg = ('Unexpected DNS hostname setting: {}. ' + 'Cannot determine endpoint_type name' + ''.format(setting)) + status_set('blocked', msg) + raise DNSHAException(msg) + + hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type) + if hostname_key in hostname_group: + log('DNS HA: Resource {}: {} already exists in ' + 'hostname group - skipping'.format(hostname_key, hostname), + DEBUG) + continue + + hostname_group.append(hostname_key) + relation_data['resources'][hostname_key] = crm_ocf + relation_data['resource_params'][hostname_key] = ( + 'params fqdn="{}" ip_address="{}"' + .format(hostname, resolve_address(endpoint_type=endpoint_type, + override=False))) + + if len(hostname_group) >= 1: + log('DNS HA: Hostname group is set with {} as members. ' + 'Informing the ha relation'.format(' '.join(hostname_group)), + DEBUG) + relation_data['groups'] = { + DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group) + } + else: + msg = 'DNS HA: Hostname group has no members.' + status_set('blocked', msg) + raise DNSHAException(msg) + + +def get_vip_settings(vip): + """Calculate which nic is on the correct network for the given vip. + + If nic or netmask discovery fail then fallback to using charm supplied + config. If fallback is used this is indicated via the fallback variable. + + @param vip: VIP to lookup nic and cidr for. + @returns (str, str, bool): eg (iface, netmask, fallback) + """ + iface = get_iface_for_address(vip) + netmask = get_netmask_for_address(vip) + fallback = False + if iface is None: + iface = config('vip_iface') + fallback = True + if netmask is None: + netmask = config('vip_cidr') + fallback = True + return iface, netmask, fallback + + +def update_hacluster_vip(service, relation_data): + """ Configure VIP resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. + """ + cluster_config = get_hacluster_config() + vip_group = [] + vips_to_delete = [] + for vip in cluster_config['vip'].split(): + if is_ipv6(vip): + res_vip = 'ocf:heartbeat:IPv6addr' + vip_params = 'ipv6addr' + else: + res_vip = 'ocf:heartbeat:IPaddr2' + vip_params = 'ip' + + iface, netmask, fallback = get_vip_settings(vip) + + vip_monitoring = 'op monitor timeout="20s" interval="10s" depth="0"' + if iface is not None: + # NOTE(jamespage): Delete old VIP resources + # Old style naming encoding iface in name + # does not work well in environments where + # interface/subnet wiring is not consistent + vip_key = 'res_{}_{}_vip'.format(service, iface) + if vip_key in vips_to_delete: + vip_key = '{}_{}'.format(vip_key, vip_params) + vips_to_delete.append(vip_key) + + vip_key = 'res_{}_{}_vip'.format( + service, + hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]) + + relation_data['resources'][vip_key] = res_vip + # NOTE(jamespage): + # Use option provided vip params if these where used + # instead of auto-detected values + if fallback: + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask, + vip_monitoring=vip_monitoring)) + else: + # NOTE(jamespage): + # let heartbeat figure out which interface and + # netmask to configure, which works nicely + # when network interface naming is not + # consistent across units. + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + vip_monitoring=vip_monitoring)) + + vip_group.append(vip_key) + + if vips_to_delete: + try: + relation_data['delete_resources'].extend(vips_to_delete) + except KeyError: + relation_data['delete_resources'] = vips_to_delete + + if len(vip_group) >= 1: + key = VIP_GROUP_NAME.format(service=service) + try: + relation_data['groups'][key] = ' '.join(vip_group) + except KeyError: + relation_data['groups'] = { + key: ' '.join(vip_group) + } diff --git a/ceph-proxy/charmhelpers/contrib/openstack/ip.py b/ceph-proxy/charmhelpers/contrib/openstack/ip.py new file mode 100644 index 00000000..723aebc1 --- /dev/null +++ b/ceph-proxy/charmhelpers/contrib/openstack/ip.py @@ -0,0 +1,197 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + NoNetworkBinding, + config, + unit_get, + service_name, + network_get_primary_address, +) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + is_address_in_network, + is_ipv6, + get_ipv6_addr, + resolve_network_cidr, +) +from charmhelpers.contrib.hahelpers.cluster import is_clustered + +PUBLIC = 'public' +INTERNAL = 'int' +ADMIN = 'admin' +ACCESS = 'access' + +ADDRESS_MAP = { + PUBLIC: { + 'binding': 'public', + 'config': 'os-public-network', + 'fallback': 'public-address', + 'override': 'os-public-hostname', + }, + INTERNAL: { + 'binding': 'internal', + 'config': 'os-internal-network', + 'fallback': 'private-address', + 'override': 'os-internal-hostname', + }, + ADMIN: { + 'binding': 'admin', + 'config': 'os-admin-network', + 'fallback': 'private-address', + 'override': 'os-admin-hostname', + }, + ACCESS: { + 'binding': 'access', + 'config': 'access-network', + 'fallback': 'private-address', + 'override': 'os-access-hostname', + }, +} + + +def canonical_url(configs, endpoint_type=PUBLIC): + """Returns the correct HTTP URL to this host given the state of HTTPS + configuration, hacluster and charm configuration. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :param endpoint_type: str endpoint type to resolve. + :param returns: str base URL for services on the current service unit. + """ + scheme = _get_scheme(configs) + + address = resolve_address(endpoint_type) + if is_ipv6(address): + address = "[{}]".format(address) + + return '%s://%s' % (scheme, address) + + +def _get_scheme(configs): + """Returns the scheme to use for the url (either http or https) + depending upon whether https is in the configs value. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :returns: either 'http' or 'https' depending on whether https is + configured within the configs context. + """ + scheme = 'http' + if configs and 'https' in configs.complete_contexts(): + scheme = 'https' + return scheme + + +def _get_address_override(endpoint_type=PUBLIC): + """Returns any address overrides that the user has defined based on the + endpoint type. + + Note: this function allows for the service name to be inserted into the + address if the user specifies {service_name}.somehost.org. + + :param endpoint_type: the type of endpoint to retrieve the override + value for. + :returns: any endpoint address or hostname that the user has overridden + or None if an override is not present. + """ + override_key = ADDRESS_MAP[endpoint_type]['override'] + addr_override = config(override_key) + if not addr_override: + return None + else: + return addr_override.format(service_name=service_name()) + + +def resolve_address(endpoint_type=PUBLIC, override=True): + """Return unit address depending on net config. + + If unit is clustered with vip(s) and has net splits defined, return vip on + correct network. If clustered with no nets defined, return primary vip. + + If not clustered, return unit address ensuring address is on configured net + split if one is configured, or a Juju 2.0 extra-binding has been used. + + :param endpoint_type: Network endpoing type + :param override: Accept hostname overrides or not + """ + resolved_address = None + if override: + resolved_address = _get_address_override(endpoint_type) + if resolved_address: + return resolved_address + + vips = config('vip') + if vips: + vips = vips.split() + + net_type = ADDRESS_MAP[endpoint_type]['config'] + net_addr = config(net_type) + net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] + binding = ADDRESS_MAP[endpoint_type]['binding'] + clustered = is_clustered() + + if clustered and vips: + if net_addr: + for vip in vips: + if is_address_in_network(net_addr, vip): + resolved_address = vip + break + else: + # NOTE: endeavour to check vips against network space + # bindings + try: + bound_cidr = resolve_network_cidr( + network_get_primary_address(binding) + ) + for vip in vips: + if is_address_in_network(bound_cidr, vip): + resolved_address = vip + break + except (NotImplementedError, NoNetworkBinding): + # If no net-splits configured and no support for extra + # bindings/network spaces so we expect a single vip + resolved_address = vips[0] + else: + if config('prefer-ipv6'): + fallback_addr = get_ipv6_addr(exc_list=vips)[0] + else: + fallback_addr = unit_get(net_fallback) + + if net_addr: + resolved_address = get_address_in_network(net_addr, fallback_addr) + else: + # NOTE: only try to use extra bindings if legacy network + # configuration is not in use + try: + resolved_address = network_get_primary_address(binding) + except (NotImplementedError, NoNetworkBinding): + resolved_address = fallback_addr + + if resolved_address is None: + raise ValueError("Unable to resolve a suitable IP address based on " + "charm state and configuration. (net_type=%s, " + "clustered=%s)" % (net_type, clustered)) + + return resolved_address + + +def get_vip_in_network(network): + matching_vip = None + vips = config('vip') + if vips: + for vip in vips.split(): + if is_address_in_network(network, vip): + matching_vip = vip + return matching_vip diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index 5c8f6eff..e59e0d1e 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -13,7 +13,7 @@ # limitations under the License. # Common python helper functions used for OpenStack charms. -from collections import OrderedDict +from collections import OrderedDict, namedtuple from functools import wraps import subprocess @@ -36,15 +36,20 @@ from charmhelpers.core import unitdata from charmhelpers.core.hookenv import ( + WORKLOAD_STATES, action_fail, action_set, config, + expected_peer_units, + expected_related_units, log as juju_log, charm_dir, INFO, ERROR, + metadata, related_units, relation_get, + relation_id, relation_ids, relation_set, status_set, @@ -53,6 +58,7 @@ cached, leader_set, leader_get, + local_unit, ) from charmhelpers.core.strutils import ( @@ -108,6 +114,10 @@ POLICYD_CONFIG_NAME, ) +from charmhelpers.contrib.openstack.ha.utils import ( + expect_ha, +) + CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' @@ -1810,6 +1820,16 @@ def os_application_version_set(package): application_version_set(application_version) +def os_application_status_set(check_function): + """Run the supplied function and set the application status accordingly. + + :param check_function: Function to run to get app states and messages. + :type check_function: function + """ + state, message = check_function() + status_set(state, message, application=True) + + def enable_memcache(source=None, release=None, package=None): """Determine if memcache should be enabled on the local unit @@ -2046,3 +2066,287 @@ def is_db_maintenance_mode(relid=None): 'WARN') pass return True in notifications + + +@cached +def container_scoped_relations(): + """Get all the container scoped relations + + :returns: List of relation names + :rtype: List + """ + md = metadata() + relations = [] + for relation_type in ('provides', 'requires', 'peers'): + for relation in md.get(relation_type, []): + if md[relation_type][relation].get('scope') == 'container': + relations.append(relation) + return relations + + +def is_db_ready(use_current_context=False, rel_name=None): + """Check remote database is ready to be used. + + Database relations are expected to provide a list of 'allowed' units to + confirm that the database is ready for use by those units. + + If db relation has provided this information and local unit is a member, + returns True otherwise False. + + :param use_current_context: Whether to limit checks to current hook + context. + :type use_current_context: bool + :param rel_name: Name of relation to check + :type rel_name: string + :returns: Whether remote db is ready. + :rtype: bool + :raises: Exception + """ + key = 'allowed_units' + + rel_name = rel_name or 'shared-db' + this_unit = local_unit() + + if use_current_context: + if relation_id() in relation_ids(rel_name): + rids_units = [(None, None)] + else: + raise Exception("use_current_context=True but not in {} " + "rel hook contexts (currently in {})." + .format(rel_name, relation_id())) + else: + rids_units = [(r_id, u) + for r_id in relation_ids(rel_name) + for u in related_units(r_id)] + + for rid, unit in rids_units: + allowed_units = relation_get(rid=rid, unit=unit, attribute=key) + if allowed_units and this_unit in allowed_units.split(): + juju_log("This unit ({}) is in allowed unit list from {}".format( + this_unit, + unit), 'DEBUG') + return True + + juju_log("This unit was not found in any allowed unit list") + return False + + +def is_expected_scale(peer_relation_name='cluster'): + """Query juju goal-state to determine whether our peer- and dependency- + relations are at the expected scale. + + Useful for deferring per unit per relation housekeeping work until we are + ready to complete it successfully and without unnecessary repetiton. + + Always returns True if version of juju used does not support goal-state. + + :param peer_relation_name: Name of peer relation + :type rel_name: string + :returns: True or False + :rtype: bool + """ + def _get_relation_id(rel_type): + return next((rid for rid in relation_ids(reltype=rel_type)), None) + + Relation = namedtuple('Relation', 'rel_type rel_id') + peer_rid = _get_relation_id(peer_relation_name) + # Units with no peers should still have a peer relation. + if not peer_rid: + juju_log('Not at expected scale, no peer relation found', 'DEBUG') + return False + expected_relations = [ + Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))] + if expect_ha(): + expected_relations.append( + Relation( + rel_type='ha', + rel_id=_get_relation_id('ha'))) + juju_log( + 'Checking scale of {} relations'.format( + ','.join([r.rel_type for r in expected_relations])), + 'DEBUG') + try: + if (len(related_units(relid=peer_rid)) < + len(list(expected_peer_units()))): + return False + for rel in expected_relations: + if not rel.rel_id: + juju_log( + 'Expected to find {} relation, but it is missing'.format( + rel.rel_type), + 'DEBUG') + return False + # Goal state returns every unit even for container scoped + # relations but the charm only ever has a relation with + # the local unit. + if rel.rel_type in container_scoped_relations(): + expected_count = 1 + else: + expected_count = len( + list(expected_related_units(reltype=rel.rel_type))) + if len(related_units(relid=rel.rel_id)) < expected_count: + juju_log( + ('Not at expected scale, not enough units on {} ' + 'relation'.format(rel.rel_type)), + 'DEBUG') + return False + except NotImplementedError: + return True + juju_log('All checks have passed, unit is at expected scale', 'DEBUG') + return True + + +def get_peer_key(unit_name): + """Get the peer key for this unit. + + The peer key is the key a unit uses to publish its status down the peer + relation + + :param unit_name: Name of unit + :type unit_name: string + :returns: Peer key for given unit + :rtype: string + """ + return 'unit-state-{}'.format(unit_name.replace('/', '-')) + + +UNIT_READY = 'READY' +UNIT_NOTREADY = 'NOTREADY' +UNIT_UNKNOWN = 'UNKNOWN' +UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN] + + +def inform_peers_unit_state(state, relation_name='cluster'): + """Inform peers of the state of this unit. + + :param state: State of unit to publish + :type state: string + :param relation_name: Name of relation to publish state on + :type relation_name: string + """ + if state not in UNIT_STATES: + raise ValueError( + "Setting invalid state {} for unit".format(state)) + for r_id in relation_ids(relation_name): + relation_set(relation_id=r_id, + relation_settings={ + get_peer_key(local_unit()): state}) + + +def get_peers_unit_state(relation_name='cluster'): + """Get the state of all peers. + + :param relation_name: Name of relation to check peers on. + :type relation_name: string + :returns: Unit states keyed on unit name. + :rtype: dict + :raises: ValueError + """ + r_ids = relation_ids(relation_name) + rids_units = [(r, u) for r in r_ids for u in related_units(r)] + unit_states = {} + for r_id, unit in rids_units: + settings = relation_get(unit=unit, rid=r_id) + unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN) + if unit_states[unit] not in UNIT_STATES: + raise ValueError( + "Unit in unknown state {}".format(unit_states[unit])) + return unit_states + + +def are_peers_ready(relation_name='cluster'): + """Check if all peers are ready. + + :param relation_name: Name of relation to check peers on. + :type relation_name: string + :returns: Whether all units are ready. + :rtype: bool + """ + unit_states = get_peers_unit_state(relation_name) + return all(v == UNIT_READY for v in unit_states.values()) + + +def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'): + """Inform peers if this unit is ready. + + The check function should return a tuple (state, message). A state + of 'READY' indicates the unit is READY. + + :param check_unit_ready_func: Function to run to check readiness + :type check_unit_ready_func: function + :param relation_name: Name of relation to check peers on. + :type relation_name: string + """ + unit_ready, msg = check_unit_ready_func() + if unit_ready: + state = UNIT_READY + else: + state = UNIT_NOTREADY + juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG') + inform_peers_unit_state(state, relation_name) + + +def check_api_unit_ready(check_db_ready=True): + """Check if this unit is ready. + + :param check_db_ready: Include checks of database readiness. + :type check_db_ready: bool + :returns: Whether unit state is ready and status message + :rtype: (bool, str) + """ + unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready) + return unit_state == WORKLOAD_STATES.ACTIVE, msg + + +def get_api_unit_status(check_db_ready=True): + """Return a workload status and message for this unit. + + :param check_db_ready: Include checks of database readiness. + :type check_db_ready: bool + :returns: Workload state and message + :rtype: (bool, str) + """ + unit_state = WORKLOAD_STATES.ACTIVE + msg = 'Unit is ready' + if is_db_maintenance_mode(): + unit_state = WORKLOAD_STATES.MAINTENANCE + msg = 'Database in maintenance mode.' + elif is_unit_paused_set(): + unit_state = WORKLOAD_STATES.BLOCKED + msg = 'Unit paused.' + elif check_db_ready and not is_db_ready(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Allowed_units list provided but this unit not present' + elif not is_db_initialised(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Database not initialised' + elif not is_expected_scale(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Charm and its dependencies not yet at expected scale' + juju_log(msg, 'DEBUG') + return unit_state, msg + + +def check_api_application_ready(): + """Check if this application is ready. + + :returns: Whether application state is ready and status message + :rtype: (bool, str) + """ + app_state, msg = get_api_application_status() + return app_state == WORKLOAD_STATES.ACTIVE, msg + + +def get_api_application_status(): + """Return a workload status and message for this application. + + :returns: Workload state and message + :rtype: (bool, str) + """ + app_state, msg = get_api_unit_status() + if app_state == WORKLOAD_STATES.ACTIVE: + if are_peers_ready(): + return WORKLOAD_STATES.ACTIVE, 'Application Ready' + else: + return WORKLOAD_STATES.WAITING, 'Some units are not ready' + return app_state, msg diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py index dabfb6c2..95a0d82a 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py @@ -22,6 +22,7 @@ # Adam Gandelman # +import collections import errno import hashlib import math @@ -91,6 +92,89 @@ DEFAULT_POOL_WEIGHT = 10.0 LEGACY_PG_COUNT = 200 DEFAULT_MINIMUM_PGS = 2 +AUTOSCALER_DEFAULT_PGS = 32 + + +class OsdPostUpgradeError(Exception): + """Error class for OSD post-upgrade operations.""" + pass + + +class OSDSettingConflict(Exception): + """Error class for conflicting osd setting requests.""" + pass + + +class OSDSettingNotAllowed(Exception): + """Error class for a disallowed setting.""" + pass + + +OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed) + +OSD_SETTING_WHITELIST = [ + 'osd heartbeat grace', + 'osd heartbeat interval', +] + + +def _order_dict_by_key(rdict): + """Convert a dictionary into an OrderedDict sorted by key. + + :param rdict: Dictionary to be ordered. + :type rdict: dict + :returns: Ordered Dictionary. + :rtype: collections.OrderedDict + """ + return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0])) + + +def get_osd_settings(relation_name): + """Consolidate requested osd settings from all clients. + + Consolidate requested osd settings from all clients. Check that the + requested setting is on the whitelist and it does not conflict with + any other requested settings. + + :returns: Dictionary of settings + :rtype: dict + + :raises: OSDSettingNotAllowed + :raises: OSDSettingConflict + """ + rel_ids = relation_ids(relation_name) + osd_settings = {} + for relid in rel_ids: + for unit in related_units(relid): + unit_settings = relation_get('osd-settings', unit, relid) or '{}' + unit_settings = json.loads(unit_settings) + for key, value in unit_settings.items(): + if key not in OSD_SETTING_WHITELIST: + msg = 'Illegal settings "{}"'.format(key) + raise OSDSettingNotAllowed(msg) + if key in osd_settings: + if osd_settings[key] != unit_settings[key]: + msg = 'Conflicting settings for "{}"'.format(key) + raise OSDSettingConflict(msg) + else: + osd_settings[key] = value + return _order_dict_by_key(osd_settings) + + +def send_osd_settings(): + """Pass on requested OSD settings to osd units.""" + try: + settings = get_osd_settings('client') + except OSD_SETTING_EXCEPTIONS as e: + # There is a problem with the settings, not passing them on. Update + # status will notify the user. + log(e, level=ERROR) + return + data = { + 'osd-settings': json.dumps(settings, sort_keys=True)} + for relid in relation_ids('osd'): + relation_set(relation_id=relid, + relation_settings=data) def validator(value, valid_type, valid_range=None): @@ -316,16 +400,28 @@ def __init__(self, service, name, pg_num=None, replicas=2, def create(self): if not pool_exists(self.service, self.name): + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num)] + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, self.pg_num) + ), + self.name, str(self.pg_num) + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num) + ] + try: check_call(cmd) # Set the pool replica size update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 if nautilus_or_later: # Ensure we set the expected pool ratio update_pool(client=self.service, @@ -383,10 +479,24 @@ def create(self): k = int(erasure_profile['k']) m = int(erasure_profile['m']) pgs = self.get_pgs(k + m, self.percent_data) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile] + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, pgs) + ), + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + try: check_call(cmd) try: @@ -395,7 +505,6 @@ def create(self): name=self.app_name) except CalledProcessError: log('Could not set app name for pool {}'.format(self.name, level=WARNING)) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 if nautilus_or_later: # Ensure we set the expected pool ratio update_pool(client=self.service, @@ -1635,5 +1744,67 @@ def __call__(self): continue ceph_conf[key] = conf[key] - return ceph_conf + + +class CephOSDConfContext(CephConfContext): + """Ceph config (ceph.conf) context. + + Consolidates settings from config-flags via CephConfContext with + settings provided by the mons. The config-flag values are preserved in + conf['osd'], settings from the mons which do not clash with config-flag + settings are in conf['osd_from_client'] and finally settings which do + clash are in conf['osd_from_client_conflict']. Rather than silently drop + the conflicting settings they are provided in the context so they can be + rendered commented out to give some visability to the admin. + """ + + def __init__(self, permitted_sections=None): + super(CephOSDConfContext, self).__init__( + permitted_sections=permitted_sections) + try: + self.settings_from_mons = get_osd_settings('mon') + except OSDSettingConflict: + log( + "OSD settings from mons are inconsistent, ignoring them", + level=WARNING) + self.settings_from_mons = {} + + def filter_osd_from_mon_settings(self): + """Filter settings from client relation against config-flags. + + :returns: A tuple ( + ,config-flag values, + ,client settings which do not conflict with config-flag values, + ,client settings which confilct with config-flag values) + :rtype: (OrderedDict, OrderedDict, OrderedDict) + """ + ceph_conf = super(CephOSDConfContext, self).__call__() + conflicting_entries = {} + clear_entries = {} + for key, value in self.settings_from_mons.items(): + if key in ceph_conf.get('osd', {}): + if ceph_conf['osd'][key] != value: + conflicting_entries[key] = value + else: + clear_entries[key] = value + clear_entries = _order_dict_by_key(clear_entries) + conflicting_entries = _order_dict_by_key(conflicting_entries) + return ceph_conf, clear_entries, conflicting_entries + + def __call__(self): + """Construct OSD config context. + + Standard context with two additional special keys. + osd_from_client_conflict: client settings which confilct with + config-flag values + osd_from_client: settings which do not conflict with config-flag + values + + :returns: OSD config context dict. + :rtype: dict + """ + conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings() + conf['osd_from_client_conflict'] = osd_conflict + conf['osd_from_client'] = osd_clear + return conf diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/loopback.py b/ceph-proxy/charmhelpers/contrib/storage/linux/loopback.py index 82472ff1..74bab40e 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/loopback.py @@ -32,6 +32,10 @@ def loopback_devices(): /dev/loop0: [0807]:961814 (/tmp/my.img) + or: + + /dev/loop0: [0807]:961814 (/tmp/my.img (deleted)) + :returns: dict: a dict mapping {loopback_dev: backing_file} ''' loopbacks = {} @@ -39,9 +43,9 @@ def loopback_devices(): output = check_output(cmd) if six.PY3: output = output.decode('utf-8') - devs = [d.strip().split(' ') for d in output.splitlines() if d != ''] + devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] for dev, _, f in devs: - loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] + loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] return loopbacks diff --git a/ceph-proxy/charmhelpers/core/hookenv.py b/ceph-proxy/charmhelpers/core/hookenv.py index 647f6e4b..d7c37c17 100644 --- a/ceph-proxy/charmhelpers/core/hookenv.py +++ b/ceph-proxy/charmhelpers/core/hookenv.py @@ -21,6 +21,7 @@ from __future__ import print_function import copy from distutils.version import LooseVersion +from enum import Enum from functools import wraps from collections import namedtuple import glob @@ -57,6 +58,14 @@ 'This may not be compatible with software you are ' 'running in your shell.') + +class WORKLOAD_STATES(Enum): + ACTIVE = 'active' + BLOCKED = 'blocked' + MAINTENANCE = 'maintenance' + WAITING = 'waiting' + + cache = {} @@ -1088,22 +1097,33 @@ def function_tag(): return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() -def status_set(workload_state, message): +def status_set(workload_state, message, application=False): """Set the workload state with a message Use status-set to set the workload state with a message which is visible to the user via juju status. If the status-set command is not found then - assume this is juju < 1.23 and juju-log the message unstead. + assume this is juju < 1.23 and juju-log the message instead. - workload_state -- valid juju workload state. - message -- status update message + workload_state -- valid juju workload state. str or WORKLOAD_STATES + message -- status update message + application -- Whether this is an application state set """ - valid_states = ['maintenance', 'blocked', 'waiting', 'active'] - if workload_state not in valid_states: - raise ValueError( - '{!r} is not a valid workload state'.format(workload_state) - ) - cmd = ['status-set', workload_state, message] + bad_state_msg = '{!r} is not a valid workload state' + + if isinstance(workload_state, str): + try: + # Convert string to enum. + workload_state = WORKLOAD_STATES[workload_state.upper()] + except KeyError: + raise ValueError(bad_state_msg.format(workload_state)) + + if workload_state not in WORKLOAD_STATES: + raise ValueError(bad_state_msg.format(workload_state)) + + cmd = ['status-set'] + if application: + cmd.append('--application') + cmd.extend([workload_state.value, message]) try: ret = subprocess.call(cmd) if ret == 0: @@ -1111,7 +1131,7 @@ def status_set(workload_state, message): except OSError as e: if e.errno != errno.ENOENT: raise - log_message = 'status-set failed: {} {}'.format(workload_state, + log_message = 'status-set failed: {} {}'.format(workload_state.value, message) log(log_message, level='INFO') @@ -1526,13 +1546,13 @@ def env_proxy_settings(selected_settings=None): """Get proxy settings from process environment variables. Get charm proxy settings from environment variables that correspond to - juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2, - see lp:1782236) in a format suitable for passing to an application that - reacts to proxy settings passed as environment variables. Some applications - support lowercase or uppercase notation (e.g. curl), some support only - lowercase (e.g. wget), there are also subjectively rare cases of only - uppercase notation support. no_proxy CIDR and wildcard support also varies - between runtimes and applications as there is no enforced standard. + juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see + lp:1782236) and juju-ftp-proxy in a format suitable for passing to an + application that reacts to proxy settings passed as environment variables. + Some applications support lowercase or uppercase notation (e.g. curl), some + support only lowercase (e.g. wget), there are also subjectively rare cases + of only uppercase notation support. no_proxy CIDR and wildcard support also + varies between runtimes and applications as there is no enforced standard. Some applications may connect to multiple destinations and expose config options that would affect only proxy settings for a specific destination diff --git a/ceph-proxy/charmhelpers/core/sysctl.py b/ceph-proxy/charmhelpers/core/sysctl.py index f1f4a28f..386428d6 100644 --- a/ceph-proxy/charmhelpers/core/sysctl.py +++ b/ceph-proxy/charmhelpers/core/sysctl.py @@ -17,14 +17,17 @@ import yaml -from subprocess import check_call +from subprocess import check_call, CalledProcessError from charmhelpers.core.hookenv import ( log, DEBUG, ERROR, + WARNING, ) +from charmhelpers.core.host import is_container + __author__ = 'Jorge Niedbalski R. ' @@ -62,4 +65,11 @@ def create(sysctl_dict, sysctl_file, ignore=False): if ignore: call.append("-e") - check_call(call) + try: + check_call(call) + except CalledProcessError as e: + if is_container(): + log("Error setting some sysctl keys in this container: {}".format(e.output), + level=WARNING) + else: + raise e diff --git a/ceph-proxy/tests/bundles/bionic-queens.yaml b/ceph-proxy/tests/bundles/bionic-queens.yaml index 2da0862b..cffef0e3 100644 --- a/ceph-proxy/tests/bundles/bionic-queens.yaml +++ b/ceph-proxy/tests/bundles/bionic-queens.yaml @@ -32,9 +32,6 @@ applications: keystone: charm: 'cs:~openstack-charmers-next/keystone' num_units: 1 - options: - admin-password: openstack - admin-token: ubuntutesting constraints: mem=1024 percona-cluster: charm: 'cs:~openstack-charmers-next/percona-cluster' diff --git a/ceph-proxy/tests/bundles/bionic-rocky.yaml b/ceph-proxy/tests/bundles/bionic-rocky.yaml index 6050fa48..18fcac89 100644 --- a/ceph-proxy/tests/bundles/bionic-rocky.yaml +++ b/ceph-proxy/tests/bundles/bionic-rocky.yaml @@ -42,8 +42,6 @@ applications: num_units: 1 options: openstack-origin: cloud:bionic-rocky - admin-password: openstack - admin-token: ubuntutesting constraints: mem=1024 percona-cluster: charm: 'cs:~openstack-charmers-next/percona-cluster' diff --git a/ceph-proxy/tests/bundles/bionic-stein.yaml b/ceph-proxy/tests/bundles/bionic-stein.yaml index c37ee8dc..69e093b4 100644 --- a/ceph-proxy/tests/bundles/bionic-stein.yaml +++ b/ceph-proxy/tests/bundles/bionic-stein.yaml @@ -42,8 +42,6 @@ applications: num_units: 1 options: openstack-origin: cloud:bionic-stein - admin-password: openstack - admin-token: ubuntutesting constraints: mem=1024 percona-cluster: charm: 'cs:~openstack-charmers-next/percona-cluster' diff --git a/ceph-proxy/tests/bundles/bionic-train.yaml b/ceph-proxy/tests/bundles/bionic-train.yaml index 29f0dbeb..dda2126e 100644 --- a/ceph-proxy/tests/bundles/bionic-train.yaml +++ b/ceph-proxy/tests/bundles/bionic-train.yaml @@ -42,8 +42,6 @@ applications: num_units: 1 options: openstack-origin: cloud:bionic-train - admin-password: openstack - admin-token: ubuntutesting constraints: mem=1024 percona-cluster: charm: 'cs:~openstack-charmers-next/percona-cluster' diff --git a/ceph-proxy/tests/bundles/trusty-mitaka.yaml b/ceph-proxy/tests/bundles/trusty-mitaka.yaml index 61df904f..b9774a6d 100644 --- a/ceph-proxy/tests/bundles/trusty-mitaka.yaml +++ b/ceph-proxy/tests/bundles/trusty-mitaka.yaml @@ -41,8 +41,6 @@ applications: charm: 'cs:~openstack-charmers-next/keystone' num_units: 1 options: - admin-password: openstack - admin-token: ubuntutesting openstack-origin: cloud:trusty-mitaka constraints: mem=1024 percona-cluster: diff --git a/ceph-proxy/tests/bundles/xenial-mitaka.yaml b/ceph-proxy/tests/bundles/xenial-mitaka.yaml index 1de24acd..e9982e4d 100644 --- a/ceph-proxy/tests/bundles/xenial-mitaka.yaml +++ b/ceph-proxy/tests/bundles/xenial-mitaka.yaml @@ -35,9 +35,6 @@ applications: keystone: charm: 'cs:~openstack-charmers-next/keystone' num_units: 1 - options: - admin-password: openstack - admin-token: ubuntutesting constraints: mem=1024 percona-cluster: charm: 'cs:~openstack-charmers-next/percona-cluster' diff --git a/ceph-proxy/tests/bundles/xenial-ocata.yaml b/ceph-proxy/tests/bundles/xenial-ocata.yaml index bd1fd236..d2a91ea7 100644 --- a/ceph-proxy/tests/bundles/xenial-ocata.yaml +++ b/ceph-proxy/tests/bundles/xenial-ocata.yaml @@ -42,8 +42,6 @@ applications: num_units: 1 options: openstack-origin: cloud:xenial-ocata - admin-password: openstack - admin-token: ubuntutesting constraints: mem=1024 percona-cluster: charm: 'cs:~openstack-charmers-next/percona-cluster' diff --git a/ceph-proxy/tests/bundles/xenial-pike.yaml b/ceph-proxy/tests/bundles/xenial-pike.yaml index 6b6dd7cb..5883015b 100644 --- a/ceph-proxy/tests/bundles/xenial-pike.yaml +++ b/ceph-proxy/tests/bundles/xenial-pike.yaml @@ -41,8 +41,6 @@ applications: charm: 'cs:~openstack-charmers-next/keystone' num_units: 1 options: - admin-password: openstack - admin-token: ubuntutesting openstack-origin: cloud:xenial-pike constraints: mem=1024 percona-cluster: diff --git a/ceph-proxy/tests/bundles/xenial-queens.yaml b/ceph-proxy/tests/bundles/xenial-queens.yaml index 48c298cb..addf5f6c 100644 --- a/ceph-proxy/tests/bundles/xenial-queens.yaml +++ b/ceph-proxy/tests/bundles/xenial-queens.yaml @@ -42,8 +42,6 @@ applications: num_units: 1 options: openstack-origin: cloud:xenial-queens - admin-password: openstack - admin-token: ubuntutesting constraints: mem=1024 percona-cluster: charm: 'cs:~openstack-charmers-next/percona-cluster' From 95822e949d79ea8f8ab2ebc7c73aced5553a2e3d Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Thu, 2 Apr 2020 16:03:26 +0100 Subject: [PATCH 1961/2699] Add focal-ussuri and bionic-ussuri bundles This patch adds the following bundles: * tests/bundles/bionic-ussuri-namespaced.yaml * tests/bundles/bionic-ussuri.yaml * tests/bundles/focal-ussuri-namespaced.yaml * tests/bundles/focal-ussuri.yaml The focal tests uses the force_deploy option in the tests.yaml The depends-on is to fix very recent lint errors in charms.ceph with E741 (very short variable names). Change-Id: I46445da361e37716b65bf941f687cbd6468ed212 Depends-On: Id92956e52cefed6d3d792dd95fe7091788fb2d1b --- ceph-radosgw/actions/actions.py | 2 +- ceph-radosgw/lib/charms_ceph/utils.py | 4 +- ceph-radosgw/metadata.yaml | 1 + .../bundles/bionic-ussuri-namespaced.yaml | 43 +++++++++ ceph-radosgw/tests/bundles/bionic-ussuri.yaml | 42 ++++++++ .../bundles/focal-ussuri-namespaced.yaml | 96 +++++++++++++++++++ ceph-radosgw/tests/bundles/focal-ussuri.yaml | 94 ++++++++++++++++++ ceph-radosgw/tests/tests.yaml | 8 ++ 8 files changed, 287 insertions(+), 3 deletions(-) create mode 100644 ceph-radosgw/tests/bundles/bionic-ussuri-namespaced.yaml create mode 100644 ceph-radosgw/tests/bundles/bionic-ussuri.yaml create mode 100644 ceph-radosgw/tests/bundles/focal-ussuri-namespaced.yaml create mode 100644 ceph-radosgw/tests/bundles/focal-ussuri.yaml diff --git a/ceph-radosgw/actions/actions.py b/ceph-radosgw/actions/actions.py index c0c23e3b..ddbdde04 100755 --- a/ceph-radosgw/actions/actions.py +++ b/ceph-radosgw/actions/actions.py @@ -119,7 +119,7 @@ def tidydefaults(args): ) except subprocess.CalledProcessError as cpe: action_fail('Unable delete default zone and zonegroup' - ': {}'.format(zone, cpe.output)) + ': {} - {}'.format(zone, cpe.output)) # A dictionary of all the defined actions to callables (which take diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index a3fd276d..1152bda9 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -1481,11 +1481,11 @@ def get_devices(name): :returns: Set(device names), which are strings """ if config(name): - devices = [l.strip() for l in config(name).split(' ')] + devices = [dev.strip() for dev in config(name).split(' ')] else: devices = [] storage_ids = storage_list(name) - devices.extend((storage_get('location', s) for s in storage_ids)) + devices.extend((storage_get('location', sid) for sid in storage_ids)) devices = filter(os.path.exists, devices) return set(devices) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 5d483d76..40e6099b 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -16,6 +16,7 @@ series: - xenial - bionic - eoan + - focal - trusty extra-bindings: public: diff --git a/ceph-radosgw/tests/bundles/bionic-ussuri-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-ussuri-namespaced.yaml new file mode 100644 index 00000000..ffa56dcd --- /dev/null +++ b/ceph-radosgw/tests/bundles/bionic-ussuri-namespaced.yaml @@ -0,0 +1,43 @@ +options: + source: &source cloud:bionic-ussuri +series: bionic +applications: + ceph-radosgw: + charm: ceph-radosgw + series: bionic + num_units: 1 + options: + source: *source + namespace-tenants: True + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/bionic-ussuri.yaml b/ceph-radosgw/tests/bundles/bionic-ussuri.yaml new file mode 100644 index 00000000..80954374 --- /dev/null +++ b/ceph-radosgw/tests/bundles/bionic-ussuri.yaml @@ -0,0 +1,42 @@ +options: + source: &source cloud:bionic-ussuri +series: bionic +applications: + ceph-radosgw: + charm: ceph-radosgw + series: bionic + num_units: 1 + options: + source: *source + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source +relations: +- - keystone:shared-db + - percona-cluster:shared-db +- - ceph-osd:mon + - ceph-mon:osd +- - ceph-radosgw:mon + - ceph-mon:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/focal-ussuri-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-ussuri-namespaced.yaml new file mode 100644 index 00000000..e90e0c91 --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-ussuri-namespaced.yaml @@ -0,0 +1,96 @@ +options: + source: &source distro + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + namespace-tenants: True + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + diff --git a/ceph-radosgw/tests/bundles/focal-ussuri.yaml b/ceph-radosgw/tests/bundles/focal-ussuri.yaml new file mode 100644 index 00000000..e5003281 --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-ussuri.yaml @@ -0,0 +1,94 @@ +options: + source: &source distro + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index e631981a..49b65c19 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,5 +1,9 @@ charm_name: ceph-radosgw gate_bundles: + - test-s3api: focal-ussuri + - test-s3api: focal-ussuri-namespaced + - test-s3api: bionic-ussuri + - test-s3api: bionic-ussuri-namespaced - test-s3api: bionic-train - test-s3api: bionic-train-namespaced - test-s3api: bionic-stein @@ -24,3 +28,7 @@ tests: - test-s3api: - zaza.openstack.charm_tests.ceph.tests.CephRGWTest - zaza.openstack.charm_tests.swift.tests.S3APITest +tests_options: + force_deploy: + - focal-ussuri + - focal-ussuri-namespaced From 8a6f83b81b783187d2e02b7e6ba3fcbcfd442802 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 12 May 2020 15:58:10 -0400 Subject: [PATCH 1962/2699] Format based on README template Change-Id: I480f69444438720316d472f13e48f8082a376c84 --- ceph-mon/README.md | 147 +++++++++++---------------------------------- 1 file changed, 34 insertions(+), 113 deletions(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index bb87aba7..cad0ca7b 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -16,7 +16,7 @@ A cloud with three MON nodes is a typical design whereas three OSD nodes are considered the minimum. For example, to deploy a Ceph cluster consisting of three OSDs and three MONs: - juju deploy --config ceph-osd.yaml -n 3 ceph-osd + juju deploy -n 3 --config ceph-osd.yaml ceph-osd juju deploy --to lxd:0 ceph-mon juju add-unit --to lxd:1 ceph-mon juju add-unit --to lxd:2 ceph-mon @@ -52,7 +52,7 @@ The ceph-mon charm exposes the following Ceph traffic types (bindings): For example, providing that spaces 'data-space' and 'cluster-space' exist, the deploy command above could look like this: - juju deploy --config ceph-mon.yaml -n 3 ceph-mon \ + juju deploy -n 3 --config ceph-mon.yaml ceph-mon \ --bind "public=data-space cluster=cluster-space" Alternatively, configuration can be provided as part of a bundle: @@ -78,7 +78,7 @@ implications of segregating Ceph network traffic. The charm supports Ceph metric monitoring with Prometheus. Add relations to the [prometheus][prometheus-charm] application in this way: - juju deploy cs:prometheus2 + juju deploy prometheus2 juju add-relation ceph-mon prometheus2 > **Note**: Prometheus support is available starting with Ceph Luminous @@ -87,115 +87,36 @@ The charm supports Ceph metric monitoring with Prometheus. Add relations to the ## Actions This section lists Juju [actions][juju-docs-actions] supported by the charm. -Actions allow specific operations to be performed on a per-unit basis. - -### copy-pool - -Copy contents of a pool to a new pool. - -### create-cache-tier - -Create a new cache tier. - -### create-crush-rule - -Create a new replicated CRUSH rule to use on a pool. - -### create-erasure-profile - -Create a new erasure code profile to use on a pool. - -### create-pool - -Create a pool. - -### crushmap-update - -Apply a new CRUSH map definition. - -> **Warning**: This action can break your cluster in unexpected ways if - misused. - -### delete-erasure-profile - -Delete an erasure code profile. - -### delete-pool - -Delete a pool. - -### get-erasure-profile - -Display an erasure code profile. - -### get-health - -Display cluster health. - -### list-erasure-profiles - -List erasure code profiles. - -### list-pools - -List pools. - -### pause-health - -Pause the cluster's health operations. - -### pool-get - -Get a value for a pool. - -### pool-set - -Set a value for a pool. - -### pool-statistics - -Display a pool's utilisation statistics. - -### remove-cache-tier - -Remove a cache tier. - -### remove-pool-snapshot - -Remove a pool's snapshot. - -### rename-pool - -Rename a pool. - -### resume-health - -Resume the cluster's health operations. - -### security-checklist - -Validate the running configuration against the OpenStack security guides -checklist. - -### set-noout - -Set the cluster's 'noout' flag. - -### set-pool-max-bytes - -Set a pool's quota for the maximum number of bytes. - -### show-disk-free - -Show disk utilisation by host and OSD. - -### snapshot-pool - -Create a pool snapshot. - -### unset-noout - -Unset the cluster's 'noout' flag. +Actions allow specific operations to be performed on a per-unit basis. To +display action descriptions run `juju actions ceph-mon`. If the charm is not +deployed then see file `actions.yaml`. + +* `copy-pool` +* `create-cache-tier` +* `create-crush-rule` +* `create-erasure-profile` +* `create-pool` +* `crushmap-update` +* `delete-erasure-profile` +* `delete-pool` +* `get-erasure-profile` +* `get-health` +* `list-erasure-profiles` +* `list-pools` +* `pause-health` +* `pool-get` +* `pool-set` +* `pool-statistics` +* `remove-cache-tier` +* `remove-pool-snapshot` +* `rename-pool` +* `resume-health` +* `security-checklist` +* `set-noout` +* `set-pool-max-bytes` +* `show-disk-free` +* `snapshot-pool` +* `unset-noout` # Bugs @@ -214,4 +135,4 @@ For general charm questions refer to the OpenStack [Charm Guide][cg]. [ceph-docs-monitors]: https://docs.ceph.com/docs/master/dev/mon-bootstrap [lp-bugs-charm-ceph-mon]: https://bugs.launchpad.net/charm-ceph-mon/+filebug [cdg-install-openstack]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/install-openstack.html -[prometheus-charm]: https://jaas.ai/prometheus2 \ No newline at end of file +[prometheus-charm]: https://jaas.ai/prometheus2 From 46034b63c10d79b6eb48fcb0659b5c4ee2c5e396 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 12 May 2020 16:22:18 -0400 Subject: [PATCH 1963/2699] Format based on README template Change-Id: I51163b690cc80158d63ad00cba3d6b6e84833b2e --- ceph-osd/README.md | 59 +++++++++++++++++++++++++++++----------------- 1 file changed, 37 insertions(+), 22 deletions(-) diff --git a/ceph-osd/README.md b/ceph-osd/README.md index f5822ea1..9a767804 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -178,11 +178,24 @@ options `osd-encrypt` and `osd-encrypt-keymanager` for the ceph-osd charm: ## Actions This section covers Juju [actions][juju-docs-actions] supported by the charm. -Actions allow specific operations to be performed on a per-unit basis. +Actions allow specific operations to be performed on a per-unit basis. To +display action descriptions run `juju actions ceph-osd`. If the charm is not +deployed then see file `actions.yaml`. -### osd-out +* `add-disk` +* `blacklist-add-disk` +* `blacklist-remove-disk` +* `list-disks` +* `osd-in` +* `osd-out` +* `security-checklist` +* `zap-disk` -Set as 'out' all OSD volumes on a unit. +## Working with OSDs + +### Set OSDs to 'out' + +Use the `osd-out` action to set all OSD volumes on a unit to 'out'. > **Warning**: This action has the potential of impacting your cluster significantly. The [Ceph documentation][ceph-docs-removing-osds] on this @@ -213,9 +226,9 @@ Example: juju run-action --wait ceph-osd/4 osd-out -### osd-in +### Set OSDs to 'in' -Set as 'in' all OSD volumes on a unit. +Use the `osd-in` action to set all OSD volumes on a unit to 'in'. The `osd-in` action is reciprocal to the `osd-out` action. The OSDs are set to 'in'. It is typically used when the `osd-out` action was used in conjunction @@ -225,12 +238,13 @@ Example: juju run-action --wait ceph-osd/4 osd-in -### list-disks +## Working with disks -List disks known to a unit. +### List disks -The `list-disks` action lists the unit's block devices by categorising them in -three ways: +Use the `list-disks` action to list disks known to a unit. + +The action lists the unit's block devices by categorising them in three ways: - `disks`: visible (known by udev), unused (not mounted), and not designated as an OSD journal (via the `osd-journal` configuration option) @@ -244,9 +258,9 @@ Example: juju run-action --wait ceph-osd/4 list-disks -### add-disk +### Add a disk -Add a disk to a unit. +Use the `add-disk` action to add a disk to a unit. A ceph-osd unit is automatically assigned OSD volumes based on the current value of the `osd-devices` application option. The `add-disk` action allows the @@ -269,15 +283,15 @@ Example: juju run-action --wait ceph-osd/4 add-disk osd-devices=/dev/vde -### blacklist-add-disk +### Blacklist a disk -Add a disk to a unit's blacklist. +Use the `blacklist-add-disk` action to add a disk to a unit's blacklist. -The `blacklist-add-disk` action allows the operator to add disks (that are -visible to the unit's underlying machine) to the unit's blacklist. A -blacklisted device will not be initialised as an OSD volume when the value of -the `osd-devices` application option changes. This action does not prevent a -device from being activated via the `add-disk` action. +The action allows the operator to add disks (that are visible to the unit's +underlying machine) to the unit's blacklist. A blacklisted device will not be +initialised as an OSD volume when the value of the `osd-devices` application +option changes. This action does not prevent a device from being activated via +the `add-disk` action. Use the `list-disks` action to list the unit's blacklist entries. @@ -296,9 +310,10 @@ Example: juju run-action --wait ceph-osd/0 \ blacklist-add-disk osd-devices='/dev/vda /dev/vdf' -### blacklist-remove-disk +### Un-blacklist a disk -Remove a disk from a unit's blacklist. +Use the `blacklist-remove-disk` action to remove a disk from a unit's +blacklist. **Parameters** @@ -315,9 +330,9 @@ Example: juju run-action --wait ceph-osd/1 \ blacklist-remove-disk osd-devices=/dev/vdb -### zap-disk +### Zap a disk -Purge a unit's disk of all data. +Use the `zap-disk` action to purge a disk of all data. In order to prevent unintentional data loss, the charm will not use a disk that has existing data already on it. To forcibly make a disk available, the From 5414223b6bbfeabcfd81cf1dfe571f2aa16d1a5d Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Thu, 2 Apr 2020 15:30:44 +0100 Subject: [PATCH 1964/2699] Add focal-ussuri and bionic-ussuri bundle This patch adds a focal-ussuri and bionic-ussuri bundles to the tests for the charm. Also removes 'admin-token' from the tests, as the keystone charm no longer accepts the option. Also, for focal, btrfs-tools has been removed and replaced by btrfs-progs; thus these are installed on focal instead. See linked Bug. Closes-Bug: #1871712 Change-Id: Ifc79beebb981571cebc38be6cb6c2748d22816c9 --- ceph-proxy/hooks/ceph.py | 1 + ceph-proxy/hooks/ceph_hooks.py | 12 +- ceph-proxy/metadata.yaml | 1 + ceph-proxy/tests/bundles/bionic-ussuri.yaml | 80 +++++++++++ ceph-proxy/tests/bundles/focal-ussuri.yaml | 148 ++++++++++++++++++++ ceph-proxy/tests/tests.yaml | 7 + 6 files changed, 247 insertions(+), 2 deletions(-) create mode 100644 ceph-proxy/tests/bundles/bionic-ussuri.yaml create mode 100644 ceph-proxy/tests/bundles/focal-ussuri.yaml diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index a85a84ae..214105a8 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -47,6 +47,7 @@ QUORUM = [LEADER, PEON] PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs'] +PACKAGES_FOCAL = ['ceph', 'gdisk', 'ntp', 'btrfs-progs', 'xfsprogs'] def ceph_user(): diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 53568cd9..456c10bc 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -40,8 +40,11 @@ def _add_path(path): service_name, status_set,) from charmhelpers.core.host import ( + cmp_pkgrevno, + CompareHostReleases, + lsb_release, mkdir, - cmp_pkgrevno,) +) from charmhelpers.fetch import ( apt_install, apt_update, @@ -86,7 +89,12 @@ def install(): def package_install(): add_source(config('source'), config('key')) apt_update(fatal=True) - apt_install(packages=ceph.PACKAGES, fatal=True) + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= "focal": + _packages = ceph.PACKAGES_FOCAL + else: + _packages = ceph.PACKAGES + apt_install(packages=_packages, fatal=True) def emit_cephconf(): diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index de628f67..f0e63b93 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -13,6 +13,7 @@ series: - xenial - bionic - eoan + - focal - trusty extra-bindings: public: diff --git a/ceph-proxy/tests/bundles/bionic-ussuri.yaml b/ceph-proxy/tests/bundles/bionic-ussuri.yaml new file mode 100644 index 00000000..14ac2aec --- /dev/null +++ b/ceph-proxy/tests/bundles/bionic-ussuri.yaml @@ -0,0 +1,80 @@ +series: bionic +applications: + ceph-mon: + charm: 'cs:~openstack-charmers-next/ceph-mon' + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-ussuri + ceph-osd: + charm: 'cs:~openstack-charmers-next/ceph-osd' + num_units: 3 + storage: + osd-devices: 10G + options: + source: cloud:bionic-ussuri + ceph-proxy: + charm: 'ceph-proxy' + num_units: 1 + options: + source: cloud:bionic-ussuri + ceph-radosgw: + charm: 'cs:~openstack-charmers-next/ceph-radosgw' + num_units: 1 + options: + source: cloud:bionic-ussuri + cinder: + charm: 'cs:~openstack-charmers-next/cinder' + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + cinder-ceph: + charm: 'cs:~openstack-charmers-next/cinder-ceph' + options: + restrict-ceph-pools: True + keystone: + charm: 'cs:~openstack-charmers-next/keystone' + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + admin-password: openstack + constraints: mem=1024 + percona-cluster: + charm: 'cs:~openstack-charmers-next/percona-cluster' + num_units: 1 + options: + source: cloud:bionic-ussuri + dataset-size: 50% + max-connections: 1000 + innodb-buffer-pool-size: 256M + root-password: ChangeMe123 + sst-password: ChangeMe123 + constraints: mem=4096 + rabbitmq-server: + charm: 'cs:~openstack-charmers-next/rabbitmq-server' + num_units: 1 + constraints: mem=1024 + options: + source: cloud:bionic-ussuri +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + - - 'cinder:shared-db' + - 'percona-cluster:shared-db' + - - 'keystone:shared-db' + - 'percona-cluster:shared-db' + - - 'cinder:identity-service' + - 'keystone:identity-service' + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/focal-ussuri.yaml b/ceph-proxy/tests/bundles/focal-ussuri.yaml new file mode 100644 index 00000000..1a5af635 --- /dev/null +++ b/ceph-proxy/tests/bundles/focal-ussuri.yaml @@ -0,0 +1,148 @@ +variables: + openstack-origin: &openstack-origin distro + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index b7448ec6..35807d57 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -1,4 +1,5 @@ charm_name: ceph-proxy + configure: - zaza.openstack.configure.ceph_proxy.setup_ceph_proxy @@ -13,6 +14,8 @@ gate_bundles: - bionic-rocky # mimic - bionic-stein - bionic-train + - bionic-ussuri + - focal-ussuri dev_bundles: # Icehouse - trusty-icehouse @@ -41,3 +44,7 @@ target_deploy_status: keystone: workload-status: active workload-status-message: "Unit is ready" + +tests_options: + force_deploy: + - focal-ussuri From 5d021263f95a541962fb2bd37e064870069a6768 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 15 May 2020 17:00:25 +0100 Subject: [PATCH 1965/2699] Trigger udev rescan if pv_dev disappears Workaround for kernel by in Ubuntu 20.04 LTS. When using by-dname device paths with MAAS and bcache, the pvcreate operation results in the by-dname entry for the block device being deleted. The subsequent vgcreate then fails as the path cannot be found. Trigger a rescan of block devices if the pv_dev path does not exists after the pvcreate operation. Change-Id: If7e11f6bd1effd2d5fc2dc5abbaba6865104006f Depends-On: Ifb16c47ae5ff316cbcfc3798de3446a3774fa012 Related-Bug: 1878752 --- ceph-osd/lib/charms_ceph/utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index a3fd276d..a33bc88e 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -1957,6 +1957,9 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid, vg_name = None if not lvm.is_lvm_physical_volume(pv_dev): lvm.create_lvm_physical_volume(pv_dev) + if not os.path.exists(pv_dev): + # NOTE: trigger rescan to work around bug 1878752 + rescan_osd_devices() if shared: vg_name = 'ceph-{}-{}'.format(lv_type, str(uuid.uuid4())) From 0824222ce471aca21e03da2ba5daef17b4a3a495 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Mon, 18 May 2020 14:38:37 +0200 Subject: [PATCH 1966/2699] Sync helpers for 20.05 Change-Id: I695760d4319b8c5c01ad0fd54d6c8d7d1d2633f8 --- ceph-mon/lib/charms_ceph/utils.py | 7 +++++-- ceph-mon/tox.ini | 5 +++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index a3fd276d..e9b289a4 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -1481,11 +1481,11 @@ def get_devices(name): :returns: Set(device names), which are strings """ if config(name): - devices = [l.strip() for l in config(name).split(' ')] + devices = [dev.strip() for dev in config(name).split(' ')] else: devices = [] storage_ids = storage_list(name) - devices.extend((storage_get('location', s) for s in storage_ids)) + devices.extend((storage_get('location', sid) for sid in storage_ids)) devices = filter(os.path.exists, devices) return set(devices) @@ -1957,6 +1957,9 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid, vg_name = None if not lvm.is_lvm_physical_volume(pv_dev): lvm.create_lvm_physical_volume(pv_dev) + if not os.path.exists(pv_dev): + # NOTE: trigger rescan to work around bug 1878752 + rescan_osd_devices() if shared: vg_name = 'ceph-{}-{}'.format(lv_type, str(uuid.uuid4())) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 20dbbfc5..b835733a 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -41,6 +41,11 @@ basepython = python3.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt From ec0f1e50b5a0f38e0fd881c3b2f17c43e7fcf624 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Mon, 18 May 2020 14:38:52 +0200 Subject: [PATCH 1967/2699] Sync helpers for 20.05 Change-Id: Ibebef746614a9de92c4e3c9ca109f313562784fc --- .../contrib/storage/linux/ceph.py | 40 +++++++++++++++---- ceph-osd/lib/charms_ceph/utils.py | 4 +- ceph-osd/tox.ini | 5 +++ 3 files changed, 40 insertions(+), 9 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index eb31b782..95a0d82a 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -92,6 +92,7 @@ DEFAULT_POOL_WEIGHT = 10.0 LEGACY_PG_COUNT = 200 DEFAULT_MINIMUM_PGS = 2 +AUTOSCALER_DEFAULT_PGS = 32 class OsdPostUpgradeError(Exception): @@ -399,16 +400,28 @@ def __init__(self, service, name, pg_num=None, replicas=2, def create(self): if not pool_exists(self.service, self.name): + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num)] + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, self.pg_num) + ), + self.name, str(self.pg_num) + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num) + ] + try: check_call(cmd) # Set the pool replica size update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 if nautilus_or_later: # Ensure we set the expected pool ratio update_pool(client=self.service, @@ -466,10 +479,24 @@ def create(self): k = int(erasure_profile['k']) m = int(erasure_profile['m']) pgs = self.get_pgs(k + m, self.percent_data) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile] + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, pgs) + ), + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + try: check_call(cmd) try: @@ -478,7 +505,6 @@ def create(self): name=self.app_name) except CalledProcessError: log('Could not set app name for pool {}'.format(self.name, level=WARNING)) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 if nautilus_or_later: # Ensure we set the expected pool ratio update_pool(client=self.service, diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index a33bc88e..e9b289a4 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -1481,11 +1481,11 @@ def get_devices(name): :returns: Set(device names), which are strings """ if config(name): - devices = [l.strip() for l in config(name).split(' ')] + devices = [dev.strip() for dev in config(name).split(' ')] else: devices = [] storage_ids = storage_list(name) - devices.extend((storage_get('location', s) for s in storage_ids)) + devices.extend((storage_get('location', sid) for sid in storage_ids)) devices = filter(os.path.exists, devices) return set(devices) diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 20dbbfc5..b835733a 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -41,6 +41,11 @@ basepython = python3.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt From f87b61de9225c85c13dfedf17c8e3b1e27a3f1fd Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Mon, 18 May 2020 14:39:07 +0200 Subject: [PATCH 1968/2699] Sync helpers for 20.05 Change-Id: Ib0a99f80e6a133950498accc6153b68598f7ad20 --- ceph-proxy/tox.ini | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 20dbbfc5..b835733a 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -41,6 +41,11 @@ basepython = python3.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt From b9d3c42babb94b7fef22f3bc4b7f98134a9e3d24 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Mon, 18 May 2020 14:39:21 +0200 Subject: [PATCH 1969/2699] Sync helpers for 20.05 Change-Id: Id86f9adb55988f47838ebc5236e2bca2cc384906 --- .../contrib/storage/linux/ceph.py | 40 +++++++++++++++---- ceph-radosgw/lib/charms_ceph/utils.py | 3 ++ ceph-radosgw/test-requirements.txt | 2 +- ceph-radosgw/tox.ini | 7 +++- 4 files changed, 43 insertions(+), 9 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index eb31b782..95a0d82a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -92,6 +92,7 @@ DEFAULT_POOL_WEIGHT = 10.0 LEGACY_PG_COUNT = 200 DEFAULT_MINIMUM_PGS = 2 +AUTOSCALER_DEFAULT_PGS = 32 class OsdPostUpgradeError(Exception): @@ -399,16 +400,28 @@ def __init__(self, service, name, pg_num=None, replicas=2, def create(self): if not pool_exists(self.service, self.name): + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num)] + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, self.pg_num) + ), + self.name, str(self.pg_num) + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num) + ] + try: check_call(cmd) # Set the pool replica size update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 if nautilus_or_later: # Ensure we set the expected pool ratio update_pool(client=self.service, @@ -466,10 +479,24 @@ def create(self): k = int(erasure_profile['k']) m = int(erasure_profile['m']) pgs = self.get_pgs(k + m, self.percent_data) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile] + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, pgs) + ), + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + try: check_call(cmd) try: @@ -478,7 +505,6 @@ def create(self): name=self.app_name) except CalledProcessError: log('Could not set app name for pool {}'.format(self.name, level=WARNING)) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 if nautilus_or_later: # Ensure we set the expected pool ratio update_pool(client=self.service, diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index 1152bda9..e9b289a4 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -1957,6 +1957,9 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid, vg_name = None if not lvm.is_lvm_physical_volume(pv_dev): lvm.create_lvm_physical_volume(pv_dev) + if not os.path.exists(pv_dev): + # NOTE: trigger rescan to work around bug 1878752 + rescan_osd_devices() if shared: vg_name = 'ceph-{}-{}'.format(lv_type, str(uuid.uuid4())) diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 44b50231..7d9c2587 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -10,7 +10,7 @@ charm-tools>=2.4.4 requests>=2.18.4 mock>=1.2 -flake8>=2.2.4 +flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 355780fa..b835733a 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -41,6 +41,11 @@ basepython = python3.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt @@ -111,5 +116,5 @@ commands = functest-run-suite --keep-model --bundle {posargs} [flake8] -ignore = E402,E226,W504 +ignore = E402,E226 exclude = */charmhelpers From fd6037b4e38aac82c198a155dc9ff41442102acf Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 19 May 2020 08:18:36 +0000 Subject: [PATCH 1970/2699] Sync charms.ceph Sync charms.ceph to pick up fix for LP #1851869 Change-Id: Ie3027d030c71faec0108fcde7fdd624b29623d49 Closes-Bug: #1851869 Partial-Bug: #1879464 --- ceph-mon/lib/charms_ceph/utils.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index a3fd276d..7a0cb7d4 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -1139,8 +1139,9 @@ def get_mds_bootstrap_key(): ]) rbd_mirror_caps = collections.OrderedDict([ - ('mon', ['profile rbd']), + ('mon', ['profile rbd; allow r']), ('osd', ['profile rbd']), + ('mgr', ['allow r']), ]) @@ -1481,11 +1482,11 @@ def get_devices(name): :returns: Set(device names), which are strings """ if config(name): - devices = [l.strip() for l in config(name).split(' ')] + devices = [dev.strip() for dev in config(name).split(' ')] else: devices = [] storage_ids = storage_list(name) - devices.extend((storage_get('location', s) for s in storage_ids)) + devices.extend((storage_get('location', sid) for sid in storage_ids)) devices = filter(os.path.exists, devices) return set(devices) @@ -1957,6 +1958,9 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid, vg_name = None if not lvm.is_lvm_physical_volume(pv_dev): lvm.create_lvm_physical_volume(pv_dev) + if not os.path.exists(pv_dev): + # NOTE: trigger rescan to work around bug 1878752 + rescan_osd_devices() if shared: vg_name = 'ceph-{}-{}'.format(lv_type, str(uuid.uuid4())) From a77395061496e3db92c160dd2784c4167345e7c4 Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Tue, 19 May 2020 14:36:40 -0500 Subject: [PATCH 1971/2699] Add focal series metadata Change-Id: Iebe13d3b86a07b5eb4c9e174f88931b5b7ea3291 --- ceph-rbd-mirror/src/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index 7de422e9..e50d8022 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -18,6 +18,7 @@ series: - xenial - bionic - eoan + - focal extra-bindings: public: cluster: From 171e9875655376e59167483d02293b9417fbbd56 Mon Sep 17 00:00:00 2001 From: Ponnuvel Palaniyappan Date: Tue, 26 May 2020 13:10:51 +0100 Subject: [PATCH 1972/2699] Remove duplicate key 'extra-bindings' from metadata.yaml Signed-off-by: Ponnuvel Palaniyappan --- ceph-iscsi/metadata.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index 0f3c7f69..1c953471 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -24,5 +24,3 @@ requires: peers: cluster: interface: ceph-iscsi-peer -extra-bindings: - public: From 3a7f2b96e487360f78cc9a4e7f52f7e1e7760f19 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 23 Mar 2020 08:26:19 +0100 Subject: [PATCH 1973/2699] Determine default port based on presence of TLS configuration Fix intermittent deployment failure with TLS. Default to TLS in the functional test. The call to ``configure_https`` in identity_changed is remains from the time when Keystone provided certificates, remove it. Hold service down until keys are rendered. Change-Id: Ia16e6200520972c503102d80cda35e36daea82a2 Closes-Bug: #1868387 --- ceph-radosgw/config.yaml | 5 ++- ceph-radosgw/hooks/ceph_radosgw_context.py | 8 ++-- ceph-radosgw/hooks/hooks.py | 41 +++++++++++-------- ceph-radosgw/hooks/utils.py | 16 ++++++++ .../bundles/bionic-queens-namespaced.yaml | 9 ++++ ceph-radosgw/tests/bundles/bionic-queens.yaml | 9 ++++ .../bundles/bionic-rocky-namespaced.yaml | 9 ++++ ceph-radosgw/tests/bundles/bionic-rocky.yaml | 9 ++++ .../bundles/bionic-stein-namespaced.yaml | 9 ++++ ceph-radosgw/tests/bundles/bionic-stein.yaml | 9 ++++ .../bundles/bionic-train-namespaced.yaml | 9 ++++ ceph-radosgw/tests/bundles/bionic-train.yaml | 9 ++++ .../bundles/bionic-ussuri-namespaced.yaml | 9 ++++ ceph-radosgw/tests/bundles/bionic-ussuri.yaml | 9 ++++ .../bundles/focal-ussuri-namespaced.yaml | 21 ++++++++++ ceph-radosgw/tests/bundles/focal-ussuri.yaml | 22 ++++++++++ ceph-radosgw/tests/bundles/xenial-queens.yaml | 9 ++++ ceph-radosgw/tests/tests.yaml | 38 ++++++++++------- .../unit_tests/test_ceph_radosgw_context.py | 2 + .../unit_tests/test_ceph_radosgw_utils.py | 8 ++++ ceph-radosgw/unit_tests/test_hooks.py | 27 ++++++++---- 21 files changed, 243 insertions(+), 44 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index a2791008..2123d97d 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -55,9 +55,12 @@ options: natively supported config in the charm. port: type: int - default: 80 + default: description: | The port that the RADOS Gateway will listen on. + . + The default is 80 when no TLS is configured and 443 when TLS is + configured. prefer-ipv6: type: boolean default: False diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 43dd7534..3c26fa48 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -41,13 +41,15 @@ ) from charmhelpers.contrib.storage.linux.ceph import CephConfContext +import utils + class ApacheSSLContext(context.ApacheSSLContext): interfaces = ['https'] service_namespace = 'ceph-radosgw' def __call__(self): - self.external_ports = [config('port')] + self.external_ports = [utils.listen_port()] return super(ApacheSSLContext, self).__call__() @@ -55,7 +57,7 @@ class HAProxyContext(context.HAProxyContext): def __call__(self): ctxt = super(HAProxyContext, self).__call__() - port = config('port') + port = utils.listen_port() # Apache ports a_cephradosgw_api = determine_apache_port(port, singlenode_mode=True) @@ -183,7 +185,7 @@ def __call__(self): if config('prefer-ipv6'): ensure_host_resolvable_v6(host) - port = determine_api_port(config('port'), singlenode_mode=True) + port = determine_api_port(utils.listen_port(), singlenode_mode=True) if config('prefer-ipv6'): port = "[::]:%s" % (port) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index cc6c536c..cefd170b 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -53,10 +53,12 @@ from charmhelpers.core.host import ( cmp_pkgrevno, is_container, + service, + service_pause, service_reload, service_restart, + service_resume, service_stop, - service, ) from charmhelpers.contrib.network.ip import ( get_relation_ip, @@ -80,20 +82,21 @@ generate_ha_relation_data, ) from utils import ( - register_configs, - setup_ipv6, - services, assess_status, disable_unused_apache_sites, + listen_port, + multisite_deployment, pause_unit_helper, - resume_unit_helper, + ready_for_service, + register_configs, + request_per_unit_key, restart_map, + restart_nonce_changed, + resume_unit_helper, service_name, + services, + setup_ipv6, systemd_based_radosgw, - request_per_unit_key, - ready_for_service, - restart_nonce_changed, - multisite_deployment, ) from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden @@ -168,6 +171,10 @@ def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() install_packages() + # hold the service down until we have keys from ceph + log('Disable service "{}" until we have keys for it.' + .format(service_name()), level=DEBUG) + service_pause(service_name()) if not os.path.exists('/etc/ceph'): os.makedirs('/etc/ceph') if is_leader(): @@ -225,7 +232,7 @@ def _config_changed(): update_nrpe_config() - open_port(port=config('port')) + open_port(port=listen_port()) _config_changed() @@ -270,7 +277,6 @@ def _mon_relation(): # host services. update_nrpe_config(checks_to_remove=['radosgw']) - service('enable', service_name()) # NOTE(jamespage): # Multi-site deployments need to defer restart as the # zone is not created until the master relation is @@ -280,7 +286,9 @@ def _mon_relation(): if (not is_unit_paused_set() and new_keyring and not multisite_deployment()): - service_restart(service_name()) + log('Resume service "{}" as we now have keys for it.' + .format(service_name()), level=DEBUG) + service_resume(service_name()) process_multisite_relations() else: @@ -291,7 +299,7 @@ def _mon_relation(): @hooks.hook('gateway-relation-joined') def gateway_relation(): relation_set(hostname=get_relation_ip('gateway-relation'), - port=config('port')) + port=listen_port()) @hooks.hook('identity-service-relation-joined') @@ -300,7 +308,7 @@ def identity_joined(relid=None): log('Integration with keystone requires ceph >= 0.55') sys.exit(1) - port = config('port') + port = listen_port() admin_url = '%s:%i/swift' % (canonical_url(CONFIGS, ADMIN), port) if leader_get('namespace_tenants') == 'True': internal_url = '%s:%s/swift/v1/AUTH_$(project_id)s' % \ @@ -341,7 +349,6 @@ def identity_changed(relid=None): def _identity_changed(): identity_joined(relid) CONFIGS.write_all() - configure_https() _identity_changed() @@ -492,7 +499,7 @@ def master_relation_joined(relation_id=None): internal_url = '{}:{}'.format( canonical_url(CONFIGS, INTERNAL), - config('port') + listen_port(), ) endpoints = [internal_url] realm = config('realm') @@ -582,7 +589,7 @@ def slave_relation_changed(relation_id=None, unit=None): internal_url = '{}:{}'.format( canonical_url(CONFIGS, INTERNAL), - config('port') + listen_port(), ) endpoints = [internal_url] diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 6d2ec213..13133ac3 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -100,6 +100,22 @@ ]) +def listen_port(): + """Determine port to listen to. + + The value in configuration will be used if specified, otherwise the default + will be determined based on presence of TLS configuration. + + :returns: Port number + :rtype: int + """ + if https(): + default_port = 443 + else: + default_port = 80 + return config('port') or default_port + + def resource_map(): """Dynamically generate a map of resources. diff --git a/ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml index 9f005463..807d0927 100644 --- a/ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml @@ -33,6 +33,9 @@ applications: num_units: 1 options: openstack-origin: *source + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 relations: - - keystone:shared-db - percona-cluster:shared-db @@ -42,3 +45,9 @@ relations: - ceph-mon:radosgw - - ceph-radosgw:identity-service - keystone:identity-service +- - vault:shared-db + - percona-cluster:shared-db +- - keystone:certificates + - vault:certificates +- - ceph-radosgw:certificates + - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-queens.yaml b/ceph-radosgw/tests/bundles/bionic-queens.yaml index 68a12cfa..5538461a 100644 --- a/ceph-radosgw/tests/bundles/bionic-queens.yaml +++ b/ceph-radosgw/tests/bundles/bionic-queens.yaml @@ -32,6 +32,9 @@ applications: num_units: 1 options: openstack-origin: *source + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 relations: - - keystone:shared-db - percona-cluster:shared-db @@ -41,3 +44,9 @@ relations: - ceph-mon:radosgw - - ceph-radosgw:identity-service - keystone:identity-service +- - vault:shared-db + - percona-cluster:shared-db +- - keystone:certificates + - vault:certificates +- - ceph-radosgw:certificates + - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml index d57a78ad..b78f0c59 100644 --- a/ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml @@ -33,6 +33,9 @@ applications: num_units: 1 options: openstack-origin: *source + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 relations: - - keystone:shared-db - percona-cluster:shared-db @@ -42,3 +45,9 @@ relations: - ceph-mon:radosgw - - ceph-radosgw:identity-service - keystone:identity-service +- - vault:shared-db + - percona-cluster:shared-db +- - keystone:certificates + - vault:certificates +- - ceph-radosgw:certificates + - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-rocky.yaml b/ceph-radosgw/tests/bundles/bionic-rocky.yaml index e4d7508b..4210aa84 100644 --- a/ceph-radosgw/tests/bundles/bionic-rocky.yaml +++ b/ceph-radosgw/tests/bundles/bionic-rocky.yaml @@ -32,6 +32,9 @@ applications: num_units: 1 options: openstack-origin: *source + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 relations: - - keystone:shared-db - percona-cluster:shared-db @@ -41,3 +44,9 @@ relations: - ceph-mon:radosgw - - ceph-radosgw:identity-service - keystone:identity-service +- - vault:shared-db + - percona-cluster:shared-db +- - keystone:certificates + - vault:certificates +- - ceph-radosgw:certificates + - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml index e9bfd072..461c277f 100644 --- a/ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml @@ -33,6 +33,9 @@ applications: num_units: 1 options: openstack-origin: *source + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 relations: - - keystone:shared-db - percona-cluster:shared-db @@ -42,3 +45,9 @@ relations: - ceph-mon:radosgw - - ceph-radosgw:identity-service - keystone:identity-service +- - vault:shared-db + - percona-cluster:shared-db +- - keystone:certificates + - vault:certificates +- - ceph-radosgw:certificates + - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-stein.yaml b/ceph-radosgw/tests/bundles/bionic-stein.yaml index 0cb9f50c..58b0d375 100644 --- a/ceph-radosgw/tests/bundles/bionic-stein.yaml +++ b/ceph-radosgw/tests/bundles/bionic-stein.yaml @@ -32,6 +32,9 @@ applications: num_units: 1 options: openstack-origin: *source + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 relations: - - keystone:shared-db - percona-cluster:shared-db @@ -41,3 +44,9 @@ relations: - ceph-mon:radosgw - - ceph-radosgw:identity-service - keystone:identity-service +- - vault:shared-db + - percona-cluster:shared-db +- - keystone:certificates + - vault:certificates +- - ceph-radosgw:certificates + - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-train-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-train-namespaced.yaml index 8773b257..c654ab78 100644 --- a/ceph-radosgw/tests/bundles/bionic-train-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/bionic-train-namespaced.yaml @@ -32,6 +32,9 @@ applications: num_units: 1 options: openstack-origin: *source + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 relations: - - keystone:shared-db - percona-cluster:shared-db @@ -41,3 +44,9 @@ relations: - ceph-mon:radosgw - - ceph-radosgw:identity-service - keystone:identity-service +- - vault:shared-db + - percona-cluster:shared-db +- - keystone:certificates + - vault:certificates +- - ceph-radosgw:certificates + - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-train.yaml b/ceph-radosgw/tests/bundles/bionic-train.yaml index 8858d027..785fdfc3 100644 --- a/ceph-radosgw/tests/bundles/bionic-train.yaml +++ b/ceph-radosgw/tests/bundles/bionic-train.yaml @@ -31,6 +31,9 @@ applications: num_units: 1 options: openstack-origin: *source + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 relations: - - keystone:shared-db - percona-cluster:shared-db @@ -40,3 +43,9 @@ relations: - ceph-mon:radosgw - - ceph-radosgw:identity-service - keystone:identity-service +- - vault:shared-db + - percona-cluster:shared-db +- - keystone:certificates + - vault:certificates +- - ceph-radosgw:certificates + - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-ussuri-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-ussuri-namespaced.yaml index ffa56dcd..c5036b63 100644 --- a/ceph-radosgw/tests/bundles/bionic-ussuri-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/bionic-ussuri-namespaced.yaml @@ -32,6 +32,9 @@ applications: num_units: 1 options: openstack-origin: *source + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 relations: - - keystone:shared-db - percona-cluster:shared-db @@ -41,3 +44,9 @@ relations: - ceph-mon:radosgw - - ceph-radosgw:identity-service - keystone:identity-service +- - vault:shared-db + - percona-cluster:shared-db +- - keystone:certificates + - vault:certificates +- - ceph-radosgw:certificates + - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-ussuri.yaml b/ceph-radosgw/tests/bundles/bionic-ussuri.yaml index 80954374..fcdd19de 100644 --- a/ceph-radosgw/tests/bundles/bionic-ussuri.yaml +++ b/ceph-radosgw/tests/bundles/bionic-ussuri.yaml @@ -31,6 +31,9 @@ applications: num_units: 1 options: openstack-origin: *source + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 relations: - - keystone:shared-db - percona-cluster:shared-db @@ -40,3 +43,9 @@ relations: - ceph-mon:radosgw - - ceph-radosgw:identity-service - keystone:identity-service +- - vault:shared-db + - percona-cluster:shared-db +- - keystone:certificates + - vault:certificates +- - ceph-radosgw:certificates + - vault:certificates diff --git a/ceph-radosgw/tests/bundles/focal-ussuri-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-ussuri-namespaced.yaml index e90e0c91..dd3f4591 100644 --- a/ceph-radosgw/tests/bundles/focal-ussuri-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/focal-ussuri-namespaced.yaml @@ -20,6 +20,7 @@ machines: '8': '9': '10': + '11': applications: @@ -78,6 +79,15 @@ applications: to: - '10' + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + relations: - - 'keystone:shared-db' @@ -94,3 +104,14 @@ relations: - - 'ceph-radosgw:identity-service' - 'keystone:identity-service' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/focal-ussuri.yaml b/ceph-radosgw/tests/bundles/focal-ussuri.yaml index e5003281..91c3a831 100644 --- a/ceph-radosgw/tests/bundles/focal-ussuri.yaml +++ b/ceph-radosgw/tests/bundles/focal-ussuri.yaml @@ -20,6 +20,7 @@ machines: '8': '9': '10': + '11': applications: @@ -77,6 +78,15 @@ applications: to: - '10' + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + relations: - - 'keystone:shared-db' @@ -92,3 +102,15 @@ relations: - - 'ceph-radosgw:identity-service' - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/xenial-queens.yaml b/ceph-radosgw/tests/bundles/xenial-queens.yaml index 0c3b3ba7..38492821 100644 --- a/ceph-radosgw/tests/bundles/xenial-queens.yaml +++ b/ceph-radosgw/tests/bundles/xenial-queens.yaml @@ -32,6 +32,9 @@ applications: num_units: 1 options: openstack-origin: *source + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 relations: - - keystone:shared-db - percona-cluster:shared-db @@ -41,3 +44,9 @@ relations: - ceph-mon:radosgw - - ceph-radosgw:identity-service - keystone:identity-service +- - vault:shared-db + - percona-cluster:shared-db +- - keystone:certificates + - vault:certificates +- - ceph-radosgw:certificates + - vault:certificates diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 49b65c19..d29f5d83 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,31 +1,39 @@ charm_name: ceph-radosgw gate_bundles: - - test-s3api: focal-ussuri - - test-s3api: focal-ussuri-namespaced - - test-s3api: bionic-ussuri - - test-s3api: bionic-ussuri-namespaced - - test-s3api: bionic-train - - test-s3api: bionic-train-namespaced - - test-s3api: bionic-stein - - test-s3api: bionic-stein-namespaced - - test-s3api: bionic-rocky - - test-s3api: bionic-rocky-namespaced - - test-s3api: bionic-queens - - test-s3api: bionic-queens-namespaced - - test-s3api: xenial-queens + - vault: focal-ussuri + - vault: focal-ussuri-namespaced + - vault: bionic-ussuri + - vault: bionic-ussuri-namespaced + - vault: bionic-train + - vault: bionic-train-namespaced + - vault: bionic-stein + - vault: bionic-stein-namespaced + - vault: bionic-rocky + - vault: bionic-rocky-namespaced + - vault: bionic-queens + - vault: bionic-queens-namespaced + - vault: xenial-queens - xenial-pike - xenial-ocata - xenial-mitaka - xenial-mitaka-namespaced - trusty-mitaka smoke_bundles: - - test-s3api: bionic-train + - vault: bionic-ussuri dev_bundles: + - vault: focal-ussuri - bionic-queens-multisite - bionic-rocky-multisite +target_deploy_status: + vault: + workload-status: blocked + workload-status-message: Vault needs to be initialized +configure: + - vault: + - zaza.openstack.charm_tests.vault.setup.auto_initialize tests: - zaza.openstack.charm_tests.ceph.tests.CephRGWTest - - test-s3api: + - vault: - zaza.openstack.charm_tests.ceph.tests.CephRGWTest - zaza.openstack.charm_tests.swift.tests.S3APITest tests_options: diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index eec5069a..18096f11 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -32,6 +32,7 @@ 'determine_api_port', 'cmp_pkgrevno', 'leader_get', + 'utils', ] @@ -59,6 +60,7 @@ def test_ctxt(self, _harelation_ids, _ctxtrelation_ids, _haconfig, _haconfig.side_effect = self.test_config.get _harelation_ids.return_value = [] haproxy_context = context.HAProxyContext() + self.utils.listen_port.return_value = 80 self.determine_api_port.return_value = 70 expect = { 'cephradosgw_bind_port': 70, diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index c398c470..cd878f7a 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -300,3 +300,11 @@ def test_multisite_deployment(self): self.assertTrue(utils.multisite_deployment()) self.test_config.set('realm', None) self.assertFalse(utils.multisite_deployment()) + + def test_listen_port(self): + self.https.return_value = False + self.assertEquals(80, utils.listen_port()) + self.https.return_value = True + self.assertEquals(443, utils.listen_port()) + self.test_config.set('port', 42) + self.assertEquals(42, utils.listen_port()) diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 96a6c866..6f4634bc 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -37,6 +37,7 @@ 'config', 'cmp_pkgrevno', 'execd_preinstall', + 'listen_port', 'log', 'open_port', 'os', @@ -53,6 +54,8 @@ 'service_reload', 'service_stop', 'service_restart', + 'service_pause', + 'service_resume', 'service', 'service_name', 'socket', @@ -155,6 +158,7 @@ def test_install(self, is_leader, leader_set): self.assertTrue(_install_packages.called) is_leader.assert_called_once() leader_set.assert_called_once_with(namespace_tenants=False) + self.service_pause.assert_called_once_with('radosgw') @patch.object(ceph_hooks, 'leader_set') @patch.object(ceph_hooks, 'is_leader') @@ -167,6 +171,7 @@ def test_install_without_namespacing(self, is_leader, leader_set): self.assertTrue(_install_packages.called) is_leader.assert_called_once() leader_set.assert_called_once_with(namespace_tenants=True) + self.service_pause.assert_called_once_with('radosgw') @patch.object(ceph_hooks, 'certs_joined') @patch.object(ceph_hooks, 'update_nrpe_config') @@ -191,8 +196,7 @@ def test_mon_relation(self): self.socket.gethostname.return_value = 'testinghostname' ceph_hooks.mon_relation() self.relation_set.assert_not_called() - self.service_restart.assert_called_once_with('radosgw') - self.service.assert_called_once_with('enable', 'radosgw') + self.service_resume.assert_called_once_with('radosgw') _ceph.import_radosgw_key.assert_called_with('seckey', name='rgw.testinghostname') self.CONFIGS.write_all.assert_called_with() @@ -210,8 +214,7 @@ def test_mon_relation_request_key(self): relation_id=None, key_name='rgw.testinghostname' ) - self.service_restart.assert_called_once_with('radosgw') - self.service.assert_called_once_with('enable', 'radosgw') + self.service_resume.assert_called_once_with('radosgw') _ceph.import_radosgw_key.assert_called_with('seckey', name='rgw.testinghostname') self.CONFIGS.write_all.assert_called_with() @@ -224,8 +227,7 @@ def test_mon_relation_nokey(self): self.relation_get.return_value = None ceph_hooks.mon_relation() self.assertFalse(_ceph.import_radosgw_key.called) - self.service_restart.assert_not_called() - self.service.assert_not_called() + self.service_resume.assert_not_called() self.CONFIGS.write_all.assert_called_with() @patch.object(ceph_hooks, 'send_request_if_needed') @@ -237,14 +239,14 @@ def test_mon_relation_send_broker_request(self, _ceph.import_radosgw_key.return_value = False self.relation_get.return_value = 'seckey' ceph_hooks.mon_relation() - self.service_restart.assert_not_called() - self.service.assert_not_called() + self.service_resume.assert_not_called() self.assertFalse(_ceph.import_radosgw_key.called) self.assertFalse(self.CONFIGS.called) self.assertTrue(mock_send_request_if_needed.called) def test_gateway_relation(self): self.get_relation_ip.return_value = '10.0.0.1' + self.listen_port.return_value = 80 ceph_hooks.gateway_relation() self.relation_set.assert_called_with(hostname='10.0.0.1', port=80) @@ -255,6 +257,7 @@ def test_gateway_relation(self): def test_identity_joined_early_version(self, _config, _leader_get): self.cmp_pkgrevno.return_value = -1 _leader_get.return_value = 'False' + self.listen_port.return_value = 80 ceph_hooks.identity_joined() self.sys.exit.assert_called_with(1) @@ -265,6 +268,8 @@ def test_identity_joined_early_version(self, _config, _leader_get): @patch('charmhelpers.contrib.openstack.ip.config') def test_identity_joined(self, _config, _resolve_address, _leader_get): + self.listen_port.return_value = 80 + def _test_identify_joined(expected): self.related_units = ['unit/0'] self.cmp_pkgrevno.return_value = 1 @@ -310,6 +315,7 @@ def test_identity_joined_namespaced(self, _config, def _test_identify_joined(expected): self.related_units = ['unit/0'] self.cmp_pkgrevno.return_value = 1 + self.listen_port.return_value = 80 _resolve_address.return_value = 'myserv' _config.side_effect = self.test_config.get self.test_config.set('region', 'region1') @@ -356,6 +362,7 @@ def test_identity_joined_public_name(self, _config, _unit_get, _unit_get.return_value = 'myserv' _is_clustered.return_value = False _leader_get.return_value = 'False' + self.listen_port.return_value = 80 ceph_hooks.identity_joined(relid='rid') self.relation_set.assert_has_calls([ call(swift_service='swift', @@ -521,6 +528,7 @@ class CephRadosMultisiteTests(CharmTestCase): 'relation_set', 'relation_get', 'leader_get', + 'listen_port', 'config', 'is_leader', 'multisite', @@ -574,6 +582,7 @@ def test_master_relation_joined_missing_config(self): def test_master_relation_joined_create_everything(self): for k, v in self._complete_config.items(): self.test_config.set(k, v) + self.listen_port.return_value = 80 self.is_leader.return_value = True self.leader_get.side_effect = lambda attr: self._leader_data.get(attr) self.multisite.list_realms.return_value = [] @@ -656,6 +665,7 @@ def test_master_relation_joined_create_nothing(self): def test_master_relation_joined_not_leader(self): for k, v in self._complete_config.items(): self.test_config.set(k, v) + self.listen_port.return_value = 80 self.is_leader.return_value = False self.leader_get.side_effect = lambda attr: self._leader_data.get(attr) ceph_hooks.master_relation_joined('master:1') @@ -698,6 +708,7 @@ def test_slave_relation_changed(self): for k, v in self._complete_config.items(): self.test_config.set(k, v) self.is_leader.return_value = True + self.listen_port.return_value = 80 self.leader_get.return_value = None self.relation_get.return_value = self._test_relation self.multisite.list_realms.return_value = [] From bc379a5fa7e028277577b7fe9cb685bfcdf206c8 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 29 May 2020 08:49:54 +0200 Subject: [PATCH 1974/2699] Add the cinder-ceph to nova-compute relation Change-Id: I7dccc2e77958c0390f94ce60fe64d34faedfc00a Closes-Bug: #1881246 --- ceph-mon/tests/bundles/bionic-queens.yaml | 2 ++ ceph-mon/tests/bundles/bionic-rocky.yaml | 2 ++ ceph-mon/tests/bundles/bionic-stein.yaml | 2 ++ ceph-mon/tests/bundles/bionic-train-with-fsid.yaml | 2 ++ ceph-mon/tests/bundles/bionic-train.yaml | 2 ++ ceph-mon/tests/bundles/bionic-ussuri.yaml | 2 ++ ceph-mon/tests/bundles/focal-ussuri.yaml | 3 +++ ceph-mon/tests/bundles/trusty-mitaka.yaml | 2 ++ ceph-mon/tests/bundles/xenial-mitaka.yaml | 2 ++ ceph-mon/tests/bundles/xenial-ocata.yaml | 2 ++ ceph-mon/tests/bundles/xenial-pike.yaml | 2 ++ ceph-mon/tests/bundles/xenial-queens.yaml | 2 ++ 12 files changed, 25 insertions(+) diff --git a/ceph-mon/tests/bundles/bionic-queens.yaml b/ceph-mon/tests/bundles/bionic-queens.yaml index d6115de4..4f81e82f 100644 --- a/ceph-mon/tests/bundles/bionic-queens.yaml +++ b/ceph-mon/tests/bundles/bionic-queens.yaml @@ -57,6 +57,8 @@ relations: - glance:image-service - - nova-compute:ceph - ceph-mon:client +- - nova-compute:ceph-access + - cinder-ceph:ceph-access - - keystone:shared-db - percona-cluster:shared-db - - glance:shared-db diff --git a/ceph-mon/tests/bundles/bionic-rocky.yaml b/ceph-mon/tests/bundles/bionic-rocky.yaml index 88a47e10..2154c6a3 100644 --- a/ceph-mon/tests/bundles/bionic-rocky.yaml +++ b/ceph-mon/tests/bundles/bionic-rocky.yaml @@ -71,6 +71,8 @@ relations: - glance:image-service - - nova-compute:ceph - ceph-mon:client +- - nova-compute:ceph-access + - cinder-ceph:ceph-access - - keystone:shared-db - percona-cluster:shared-db - - glance:shared-db diff --git a/ceph-mon/tests/bundles/bionic-stein.yaml b/ceph-mon/tests/bundles/bionic-stein.yaml index 2778b254..eeedbed3 100644 --- a/ceph-mon/tests/bundles/bionic-stein.yaml +++ b/ceph-mon/tests/bundles/bionic-stein.yaml @@ -71,6 +71,8 @@ relations: - glance:image-service - - nova-compute:ceph - ceph-mon:client +- - nova-compute:ceph-access + - cinder-ceph:ceph-access - - keystone:shared-db - percona-cluster:shared-db - - glance:shared-db diff --git a/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml b/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml index 40ce0925..0624b45d 100644 --- a/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml +++ b/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml @@ -73,6 +73,8 @@ relations: - glance:image-service - - nova-compute:ceph - ceph-mon:client +- - nova-compute:ceph-access + - cinder-ceph:ceph-access - - keystone:shared-db - percona-cluster:shared-db - - glance:shared-db diff --git a/ceph-mon/tests/bundles/bionic-train.yaml b/ceph-mon/tests/bundles/bionic-train.yaml index de35e827..2f4d65fb 100644 --- a/ceph-mon/tests/bundles/bionic-train.yaml +++ b/ceph-mon/tests/bundles/bionic-train.yaml @@ -75,6 +75,8 @@ relations: - glance:image-service - - nova-compute:ceph - ceph-mon:client +- - nova-compute:ceph-access + - cinder-ceph:ceph-access - - keystone:shared-db - percona-cluster:shared-db - - glance:shared-db diff --git a/ceph-mon/tests/bundles/bionic-ussuri.yaml b/ceph-mon/tests/bundles/bionic-ussuri.yaml index 5fc27f81..086a077f 100644 --- a/ceph-mon/tests/bundles/bionic-ussuri.yaml +++ b/ceph-mon/tests/bundles/bionic-ussuri.yaml @@ -75,6 +75,8 @@ relations: - glance:image-service - - nova-compute:ceph - ceph-mon:client +- - nova-compute:ceph-access + - cinder-ceph:ceph-access - - keystone:shared-db - percona-cluster:shared-db - - glance:shared-db diff --git a/ceph-mon/tests/bundles/focal-ussuri.yaml b/ceph-mon/tests/bundles/focal-ussuri.yaml index 73be35b5..227060fd 100644 --- a/ceph-mon/tests/bundles/focal-ussuri.yaml +++ b/ceph-mon/tests/bundles/focal-ussuri.yaml @@ -156,6 +156,9 @@ relations: - - 'nova-compute:ceph' - 'ceph-mon:client' + - - nova-compute:ceph-access + - cinder-ceph:ceph-access + - - 'keystone:shared-db' - 'keystone-mysql-router:shared-db' - - 'keystone-mysql-router:db-router' diff --git a/ceph-mon/tests/bundles/trusty-mitaka.yaml b/ceph-mon/tests/bundles/trusty-mitaka.yaml index 2e25f9ae..c6ae9dc5 100644 --- a/ceph-mon/tests/bundles/trusty-mitaka.yaml +++ b/ceph-mon/tests/bundles/trusty-mitaka.yaml @@ -104,6 +104,8 @@ relations: - glance:image-service - - nova-compute:ceph - ceph-mon:client +- - nova-compute:ceph-access + - cinder-ceph:ceph-access - - keystone:shared-db - percona-cluster:shared-db - - glance:shared-db diff --git a/ceph-mon/tests/bundles/xenial-mitaka.yaml b/ceph-mon/tests/bundles/xenial-mitaka.yaml index ae5967d1..90409212 100644 --- a/ceph-mon/tests/bundles/xenial-mitaka.yaml +++ b/ceph-mon/tests/bundles/xenial-mitaka.yaml @@ -54,6 +54,8 @@ relations: - glance:image-service - - nova-compute:ceph - ceph-mon:client +- - nova-compute:ceph-access + - cinder-ceph:ceph-access - - keystone:shared-db - percona-cluster:shared-db - - glance:shared-db diff --git a/ceph-mon/tests/bundles/xenial-ocata.yaml b/ceph-mon/tests/bundles/xenial-ocata.yaml index 8f02a5be..e4ad7e3c 100644 --- a/ceph-mon/tests/bundles/xenial-ocata.yaml +++ b/ceph-mon/tests/bundles/xenial-ocata.yaml @@ -68,6 +68,8 @@ relations: - glance:image-service - - nova-compute:ceph - ceph-mon:client +- - nova-compute:ceph-access + - cinder-ceph:ceph-access - - keystone:shared-db - percona-cluster:shared-db - - glance:shared-db diff --git a/ceph-mon/tests/bundles/xenial-pike.yaml b/ceph-mon/tests/bundles/xenial-pike.yaml index 63dc3193..2c0e88e7 100644 --- a/ceph-mon/tests/bundles/xenial-pike.yaml +++ b/ceph-mon/tests/bundles/xenial-pike.yaml @@ -68,6 +68,8 @@ relations: - glance:image-service - - nova-compute:ceph - ceph-mon:client +- - nova-compute:ceph-access + - cinder-ceph:ceph-access - - keystone:shared-db - percona-cluster:shared-db - - glance:shared-db diff --git a/ceph-mon/tests/bundles/xenial-queens.yaml b/ceph-mon/tests/bundles/xenial-queens.yaml index ac31a915..8e7f7ae1 100644 --- a/ceph-mon/tests/bundles/xenial-queens.yaml +++ b/ceph-mon/tests/bundles/xenial-queens.yaml @@ -71,6 +71,8 @@ relations: - glance:image-service - - nova-compute:ceph - ceph-mon:client +- - nova-compute:ceph-access + - cinder-ceph:ceph-access - - keystone:shared-db - percona-cluster:shared-db - - glance:shared-db From 34e8aeed68326fba38466a41cdb161d43e4e1df6 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Fri, 29 May 2020 14:14:00 -0400 Subject: [PATCH 1975/2699] Apply HA section template to README Change-Id: I06a7b3dafa3fa3c6adcfe2b05ac2ad6cb7183bc4 --- ceph-radosgw/README.md | 80 ++++++++++++++---------------------------- 1 file changed, 27 insertions(+), 53 deletions(-) diff --git a/ceph-radosgw/README.md b/ceph-radosgw/README.md index 9d95be57..5c08423e 100644 --- a/ceph-radosgw/README.md +++ b/ceph-radosgw/README.md @@ -1,5 +1,4 @@ -Overview -======== +# Overview Ceph is a distributed storage and network file system designed to provide excellent performance, reliability and scalability. @@ -7,8 +6,7 @@ excellent performance, reliability and scalability. This charm deploys the RADOS Gateway, a S3 and Swift compatible HTTP gateway for online object storage on-top of a ceph cluster. -Usage -===== +## Usage In order to use this charm, it is assumed that you have already deployed a ceph storage cluster using the 'ceph' charm with something like this:: @@ -27,8 +25,7 @@ You can then directly access the RADOS gateway by exposing the service:: The gateway can be accessed over port 80 (as show in juju status exposed ports). -Access -====== +## Access Note that you will need to login to one of the service units supporting the ceph charm to generate some access credentials:: @@ -39,8 +36,7 @@ ceph charm to generate some access credentials:: For security reasons the ceph-radosgw charm is not set up with appropriate permissions to administer the ceph cluster. -Keystone Integration -==================== +## Keystone Integration Ceph >= 0.55 integrates with Openstack Keystone for authentication of Swift requests. @@ -52,42 +48,19 @@ This is enabled by relating the ceph-radosgw service with keystone:: If you try to relate the radosgw to keystone with an earlier version of ceph the hook will error out to let you know. -HA/Clustering -============= +## High availability -There are two mutually exclusive high availability options: using virtual -IP(s) or DNS. In both cases, a relationship to hacluster is required which -provides the corosync back end HA functionality. +When more than one unit is deployed with the [hacluster][hacluster-charm] +application the charm will bring up an HA active/active cluster. -To use virtual IP(s) the clustered nodes must be on the same subnet such that -the VIP is a valid IP on the subnet for one of the node's interfaces and each -node has an interface in said subnet. The VIP becomes a highly-available API -endpoint. +There are two mutually exclusive high availability options: using virtual IP(s) +or DNS. In both cases the hacluster subordinate charm is used to provide the +Corosync and Pacemaker backend HA functionality. -At a minimum, the config option 'vip' must be set in order to use virtual IP -HA. If multiple networks are being used, a VIP should be provided for each -network, separated by spaces. Optionally, vip_iface or vip_cidr may be -specified. +See [OpenStack high availability][cdg-ha-apps] in the [OpenStack Charms +Deployment Guide][cdg] for details. -To use DNS high availability there are several prerequisites. However, DNS HA -does not require the clustered nodes to be on the same subnet. -Currently the DNS HA feature is only available for MAAS 2.0 or greater -environments. MAAS 2.0 requires Juju 2.0 or greater. The clustered nodes must -have static or "reserved" IP addresses registered in MAAS. The DNS hostname(s) -must be pre-registered in MAAS before use with DNS HA. - -At a minimum, the config option 'dns-ha' must be set to true and at least one -of 'os-public-hostname', 'os-internal-hostname' or 'os-internal-hostname' must -be set in order to use DNS HA. One or more of the above hostnames may be set. - -The charm will throw an exception in the following circumstances: -If neither 'vip' nor 'dns-ha' is set and the charm is related to hacluster -If both 'vip' and 'dns-ha' are set as they are mutually exclusive -If 'dns-ha' is set and none of the os-{admin,internal,public}-hostname(s) are -set - -Network Space support -===================== +## Network Space support This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above. @@ -111,11 +84,9 @@ NOTE: Spaces must be configured in the underlying provider prior to attempting t NOTE: Existing deployments using os-\*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. -Multi-Site replication -====================== +## Multi-Site replication -Overview --------- +### Overview This charm supports configuration of native replication between Ceph RADOS gateway deployments. @@ -125,8 +96,7 @@ using cross-model relations. By default either ceph-radosgw deployment will accept write operations. -Deployment ----------- +### Deployment NOTE: example bundles for the us-west and us-east models can be found in the bundles subdirectory of the ceph-radosgw charm. @@ -192,8 +162,7 @@ optionally be tidied using the 'tidydefaults' action: This operation is not reversible. -Failover/Recovery ------------------ +### Failover/Recovery In the event that the site hosting the zone which is the master for metadata (in this example us-east) has an outage, the master metadata zone must be @@ -213,8 +182,7 @@ has completed using the 'promote' action: juju run-action -m us-east --wait rgw-us-east/0 promote -Read/write vs Read-only ------------------------ +### Read/write vs Read-only By default all zones within a deployment will be read/write capable but only the master zone can be used to create new containers. @@ -229,8 +197,7 @@ promoting it to be the current master or by using the 'readwrite' action: juju run-action -m us-east --wait rgw-us-east/0 readwrite -Tenant Namespacing ------------------- +### Tenant Namespacing By default, Ceph Rados Gateway puts all tenant buckets into the same global namespace, disallowing multiple tenants to have buckets with the same name. @@ -248,4 +215,11 @@ namespace under their tenant id, as well as adding the tenant's ID parameter to the Keystone endpoint registration to allow seamless integration with OpenStack. Tenant namespacing cannot be toggled on in an existing installation as it will remove tenant access to existing buckets. Toggling this option on an already -deployed Rados Gateway will have no effect. \ No newline at end of file +deployed Rados Gateway will have no effect. + + + +[hacluster-charm]: https://jaas.ai/hacluster +[cg]: https://docs.openstack.org/charm-guide +[cdg]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide +[cdg-ha-apps]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-ha.html#ha-applications From 31fb97fe8c7867656b71ae93ecea8aa6e5572418 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 1 Jun 2020 10:27:47 +0100 Subject: [PATCH 1976/2699] Add missing relation to test bundles A recent commit to cinder-ceph means that this application considers itself blocked until its related to nova-compute. Add this relation to all bundles. Closes-Bug: 1881246 Change-Id: I335af616f9387edecb1a57177a06a0aa8dc51a86 --- ceph-osd/tests/bundles/bionic-queens.yaml | 2 ++ ceph-osd/tests/bundles/bionic-rocky.yaml | 2 ++ ceph-osd/tests/bundles/bionic-stein.yaml | 2 ++ ceph-osd/tests/bundles/bionic-train.yaml | 2 ++ ceph-osd/tests/bundles/bionic-ussuri.yaml | 2 ++ ceph-osd/tests/bundles/eoan-train.yaml | 2 ++ ceph-osd/tests/bundles/focal-ussuri.yaml | 3 +++ ceph-osd/tests/bundles/trusty-mitaka.yaml | 2 ++ ceph-osd/tests/bundles/xenial-mitaka.yaml | 2 ++ ceph-osd/tests/bundles/xenial-ocata.yaml | 2 ++ ceph-osd/tests/bundles/xenial-pike.yaml | 2 ++ ceph-osd/tests/bundles/xenial-queens.yaml | 2 ++ 12 files changed, 25 insertions(+) diff --git a/ceph-osd/tests/bundles/bionic-queens.yaml b/ceph-osd/tests/bundles/bionic-queens.yaml index 74c8cc3d..cb79f356 100644 --- a/ceph-osd/tests/bundles/bionic-queens.yaml +++ b/ceph-osd/tests/bundles/bionic-queens.yaml @@ -88,3 +88,5 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - cinder-ceph:ceph-access + - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/bionic-rocky.yaml b/ceph-osd/tests/bundles/bionic-rocky.yaml index 8c1a02ca..684a8d5e 100644 --- a/ceph-osd/tests/bundles/bionic-rocky.yaml +++ b/ceph-osd/tests/bundles/bionic-rocky.yaml @@ -102,3 +102,5 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - cinder-ceph:ceph-access + - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/bionic-stein.yaml b/ceph-osd/tests/bundles/bionic-stein.yaml index a226ba05..1332a5ad 100644 --- a/ceph-osd/tests/bundles/bionic-stein.yaml +++ b/ceph-osd/tests/bundles/bionic-stein.yaml @@ -102,3 +102,5 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - cinder-ceph:ceph-access + - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/bionic-train.yaml b/ceph-osd/tests/bundles/bionic-train.yaml index 34636498..e5263975 100644 --- a/ceph-osd/tests/bundles/bionic-train.yaml +++ b/ceph-osd/tests/bundles/bionic-train.yaml @@ -112,3 +112,5 @@ relations: - keystone - - placement - nova-cloud-controller +- - cinder-ceph:ceph-access + - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/bionic-ussuri.yaml b/ceph-osd/tests/bundles/bionic-ussuri.yaml index 647de631..6a858d4e 100644 --- a/ceph-osd/tests/bundles/bionic-ussuri.yaml +++ b/ceph-osd/tests/bundles/bionic-ussuri.yaml @@ -112,3 +112,5 @@ relations: - keystone - - placement - nova-cloud-controller +- - cinder-ceph:ceph-access + - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/eoan-train.yaml b/ceph-osd/tests/bundles/eoan-train.yaml index 4aab9457..d728b0b2 100644 --- a/ceph-osd/tests/bundles/eoan-train.yaml +++ b/ceph-osd/tests/bundles/eoan-train.yaml @@ -112,3 +112,5 @@ relations: - keystone - - placement - nova-cloud-controller +- - cinder-ceph:ceph-access + - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/focal-ussuri.yaml b/ceph-osd/tests/bundles/focal-ussuri.yaml index ace547bf..51868c30 100644 --- a/ceph-osd/tests/bundles/focal-ussuri.yaml +++ b/ceph-osd/tests/bundles/focal-ussuri.yaml @@ -217,3 +217,6 @@ relations: - - 'placement' - 'nova-cloud-controller' + + - - 'cinder-ceph:ceph-access' + - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/bundles/trusty-mitaka.yaml b/ceph-osd/tests/bundles/trusty-mitaka.yaml index f2d53bec..555895d3 100644 --- a/ceph-osd/tests/bundles/trusty-mitaka.yaml +++ b/ceph-osd/tests/bundles/trusty-mitaka.yaml @@ -138,3 +138,5 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - cinder-ceph:ceph-access + - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/xenial-mitaka.yaml b/ceph-osd/tests/bundles/xenial-mitaka.yaml index 845fb18b..4500ce95 100644 --- a/ceph-osd/tests/bundles/xenial-mitaka.yaml +++ b/ceph-osd/tests/bundles/xenial-mitaka.yaml @@ -88,3 +88,5 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - cinder-ceph:ceph-access + - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/xenial-ocata.yaml b/ceph-osd/tests/bundles/xenial-ocata.yaml index 0985a425..ef2bfb54 100644 --- a/ceph-osd/tests/bundles/xenial-ocata.yaml +++ b/ceph-osd/tests/bundles/xenial-ocata.yaml @@ -102,3 +102,5 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - cinder-ceph:ceph-access + - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/xenial-pike.yaml b/ceph-osd/tests/bundles/xenial-pike.yaml index 0c089bb1..2874cfe4 100644 --- a/ceph-osd/tests/bundles/xenial-pike.yaml +++ b/ceph-osd/tests/bundles/xenial-pike.yaml @@ -102,3 +102,5 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - cinder-ceph:ceph-access + - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/xenial-queens.yaml b/ceph-osd/tests/bundles/xenial-queens.yaml index ba4f5b72..8c5a762a 100644 --- a/ceph-osd/tests/bundles/xenial-queens.yaml +++ b/ceph-osd/tests/bundles/xenial-queens.yaml @@ -102,3 +102,5 @@ relations: - nova-compute:cloud-compute - - nova-cloud-controller:image-service - glance:image-service +- - cinder-ceph:ceph-access + - nova-compute:ceph-access From 851b51f31df30cc95d3e208a3eb31fde566b93c3 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 1 Jun 2020 08:41:42 +0100 Subject: [PATCH 1977/2699] Ensure bootstrapped OSD presented to ceph-mon On charm upgrade ensure that the number of bootstrapped OSD's is presented to the ceph-mon application. This ensures that the ceph-mon application does not switch into a 'waiting' state after upgrade from earlier versions of the ceph-* charms. Change-Id: If1425ef837a74212f002985f648ac1ecf9257201 Closes-Bug: 1861996 --- ceph-osd/hooks/ceph_hooks.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index d453b32d..51c41c21 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -673,6 +673,9 @@ def upgrade_charm(): install_udev_rules() remap_resolved_targets() maybe_refresh_nrpe_files() + # NOTE(jamespage): https://pad.lv/1861996 + # ensure number of bootstrapped OSD's is presented to ceph-mon + prepare_disks_and_activate() def remap_resolved_targets(): From 4fb6a8b6a899a8a49d7303fd5aeeed94ba56b41b Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 2 Jun 2020 14:22:18 +0100 Subject: [PATCH 1978/2699] Updates for 20.08 cycle start for groovy and libs - Adds groovy to the series in the metadata - Classic charms: sync charm-helpers. - Classic ceph based charms: also sync charms.ceph - Reactive charms: trigger a rebuild Change-Id: I2f7aaaa327a82f85e9b90b9369c81db86d848324 --- .../charmhelpers/contrib/openstack/utils.py | 2 +- .../contrib/storage/linux/ceph.py | 8 +++--- ceph-mon/hooks/charmhelpers/fetch/snap.py | 2 +- ceph-mon/metadata.yaml | 26 +++++++++---------- 4 files changed, 18 insertions(+), 20 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index e59e0d1e..fbf01561 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -225,7 +225,7 @@ ('train', ['2.22.0', '2.23.0']), ('ussuri', - ['2.24.0']), + ['2.24.0', '2.25.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 95a0d82a..814d5c72 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -432,13 +432,13 @@ def create(self): pool=self.name, name=self.app_name) except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + log('Could not set app name for pool {}'.format(self.name), level=WARNING) if 'pg_autoscaler' in enabled_manager_modules(): try: enable_pg_autoscale(self.service, self.name) except CalledProcessError as e: log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e, level=WARNING)) + self.name, e), level=WARNING) except CalledProcessError: raise @@ -504,7 +504,7 @@ def create(self): pool=self.name, name=self.app_name) except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + log('Could not set app name for pool {}'.format(self.name), level=WARNING) if nautilus_or_later: # Ensure we set the expected pool ratio update_pool(client=self.service, @@ -515,7 +515,7 @@ def create(self): enable_pg_autoscale(self.service, self.name) except CalledProcessError as e: log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e, level=WARNING)) + self.name, e), level=WARNING) except CalledProcessError: raise diff --git a/ceph-mon/hooks/charmhelpers/fetch/snap.py b/ceph-mon/hooks/charmhelpers/fetch/snap.py index 395836c7..fc70aa94 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/snap.py +++ b/ceph-mon/hooks/charmhelpers/fetch/snap.py @@ -69,7 +69,7 @@ def _snap_exec(commands): .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode log('Snap failed to acquire lock, trying again in {} seconds.' - .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN')) + .format(SNAP_NO_LOCK_RETRY_DELAY), level='WARN') sleep(SNAP_NO_LOCK_RETRY_DELAY) return return_code diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 62f824c2..254ee4e6 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -2,19 +2,20 @@ name: ceph-mon summary: Highly scalable distributed storage maintainer: OpenStack Charmers description: | - Ceph is a distributed storage and network file system designed to provide - excellent performance, reliability, and scalability. + Ceph is a distributed storage and network file system designed to provide + excellent performance, reliability, and scalability. tags: - - openstack - - storage - - file-servers - - misc +- openstack +- storage +- file-servers +- misc series: - - xenial - - bionic - - eoan - - focal - - trusty +- xenial +- bionic +- eoan +- focal +- trusty +- groovy peers: mon: interface: ceph @@ -37,9 +38,6 @@ provides: interface: ceph-radosgw rbd-mirror: interface: ceph-rbd-mirror - nrpe-external-master: - interface: nrpe-external-master - scope: container prometheus: interface: http requires: From 10a3c8655c65bf5d6a141abab4fb7b44c8f8c9c1 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 2 Jun 2020 14:22:18 +0100 Subject: [PATCH 1979/2699] Updates for 20.08 cycle start for groovy and libs - Adds groovy to the series in the metadata - Classic charms: sync charm-helpers. - Classic ceph based charms: also sync charms.ceph - Reactive charms: trigger a rebuild Change-Id: I56a59d0c4e72a35b7c4ac5d989e0d005fae20946 --- .../charmhelpers/contrib/openstack/utils.py | 2 +- .../contrib/storage/linux/ceph.py | 8 ++--- ceph-osd/hooks/charmhelpers/fetch/snap.py | 2 +- ceph-osd/lib/charms_ceph/utils.py | 3 +- ceph-osd/metadata.yaml | 29 ++++++++++--------- 5 files changed, 23 insertions(+), 21 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index e59e0d1e..fbf01561 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -225,7 +225,7 @@ ('train', ['2.22.0', '2.23.0']), ('ussuri', - ['2.24.0']), + ['2.24.0', '2.25.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 95a0d82a..814d5c72 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -432,13 +432,13 @@ def create(self): pool=self.name, name=self.app_name) except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + log('Could not set app name for pool {}'.format(self.name), level=WARNING) if 'pg_autoscaler' in enabled_manager_modules(): try: enable_pg_autoscale(self.service, self.name) except CalledProcessError as e: log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e, level=WARNING)) + self.name, e), level=WARNING) except CalledProcessError: raise @@ -504,7 +504,7 @@ def create(self): pool=self.name, name=self.app_name) except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + log('Could not set app name for pool {}'.format(self.name), level=WARNING) if nautilus_or_later: # Ensure we set the expected pool ratio update_pool(client=self.service, @@ -515,7 +515,7 @@ def create(self): enable_pg_autoscale(self.service, self.name) except CalledProcessError as e: log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e, level=WARNING)) + self.name, e), level=WARNING) except CalledProcessError: raise diff --git a/ceph-osd/hooks/charmhelpers/fetch/snap.py b/ceph-osd/hooks/charmhelpers/fetch/snap.py index 395836c7..fc70aa94 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/snap.py +++ b/ceph-osd/hooks/charmhelpers/fetch/snap.py @@ -69,7 +69,7 @@ def _snap_exec(commands): .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode log('Snap failed to acquire lock, trying again in {} seconds.' - .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN')) + .format(SNAP_NO_LOCK_RETRY_DELAY), level='WARN') sleep(SNAP_NO_LOCK_RETRY_DELAY) return return_code diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index e9b289a4..7a0cb7d4 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -1139,8 +1139,9 @@ def get_mds_bootstrap_key(): ]) rbd_mirror_caps = collections.OrderedDict([ - ('mon', ['profile rbd']), + ('mon', ['profile rbd; allow r']), ('osd', ['profile rbd']), + ('mgr', ['allow r']), ]) diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index bf0ccf36..4f385ca5 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -6,22 +6,23 @@ provides: interface: nrpe-external-master scope: container tags: - - openstack - - storage - - file-servers - - misc +- openstack +- storage +- file-servers +- misc series: - - xenial - - bionic - - eoan - - focal - - trusty +- xenial +- bionic +- eoan +- focal +- trusty +- groovy description: | - Ceph is a distributed storage and network file system designed to provide - excellent performance, reliability, and scalability. - . - This charm provides the Ceph OSD personality for expanding storage capacity - within a ceph deployment. + Ceph is a distributed storage and network file system designed to provide + excellent performance, reliability, and scalability. + . + This charm provides the Ceph OSD personality for expanding storage capacity + within a ceph deployment. extra-bindings: public: cluster: From 0e87fa0200acc3cd647a0b6f28fd6de8508b47e4 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 2 Jun 2020 14:22:18 +0100 Subject: [PATCH 1980/2699] Updates for 20.08 cycle start for groovy and libs - Adds groovy to the series in the metadata - Classic charms: sync charm-helpers. - Classic ceph based charms: also sync charms.ceph - Reactive charms: trigger a rebuild Change-Id: I156b0cecef76601ccc6f3a6714fc5118f6792b2d --- .../charmhelpers/contrib/openstack/utils.py | 2 +- .../contrib/storage/linux/ceph.py | 8 ++--- ceph-radosgw/hooks/charmhelpers/fetch/snap.py | 2 +- ceph-radosgw/lib/charms_ceph/utils.py | 3 +- ceph-radosgw/metadata.yaml | 29 ++++++++++--------- 5 files changed, 23 insertions(+), 21 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index e59e0d1e..fbf01561 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -225,7 +225,7 @@ ('train', ['2.22.0', '2.23.0']), ('ussuri', - ['2.24.0']), + ['2.24.0', '2.25.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 95a0d82a..814d5c72 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -432,13 +432,13 @@ def create(self): pool=self.name, name=self.app_name) except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + log('Could not set app name for pool {}'.format(self.name), level=WARNING) if 'pg_autoscaler' in enabled_manager_modules(): try: enable_pg_autoscale(self.service, self.name) except CalledProcessError as e: log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e, level=WARNING)) + self.name, e), level=WARNING) except CalledProcessError: raise @@ -504,7 +504,7 @@ def create(self): pool=self.name, name=self.app_name) except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + log('Could not set app name for pool {}'.format(self.name), level=WARNING) if nautilus_or_later: # Ensure we set the expected pool ratio update_pool(client=self.service, @@ -515,7 +515,7 @@ def create(self): enable_pg_autoscale(self.service, self.name) except CalledProcessError as e: log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e, level=WARNING)) + self.name, e), level=WARNING) except CalledProcessError: raise diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/snap.py b/ceph-radosgw/hooks/charmhelpers/fetch/snap.py index 395836c7..fc70aa94 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/snap.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/snap.py @@ -69,7 +69,7 @@ def _snap_exec(commands): .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode log('Snap failed to acquire lock, trying again in {} seconds.' - .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN')) + .format(SNAP_NO_LOCK_RETRY_DELAY), level='WARN') sleep(SNAP_NO_LOCK_RETRY_DELAY) return return_code diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index e9b289a4..7a0cb7d4 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -1139,8 +1139,9 @@ def get_mds_bootstrap_key(): ]) rbd_mirror_caps = collections.OrderedDict([ - ('mon', ['profile rbd']), + ('mon', ['profile rbd; allow r']), ('osd', ['profile rbd']), + ('mgr', ['allow r']), ]) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 40e6099b..9d750889 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -2,22 +2,23 @@ name: ceph-radosgw summary: Highly scalable distributed storage - RADOS HTTP Gateway maintainer: OpenStack Charmers description: | - Ceph is a distributed storage and network file system designed to provide - excellent performance, reliability, and scalability. - . - This charm provides the RADOS HTTP gateway supporting S3 and Swift protocols - for object storage. + Ceph is a distributed storage and network file system designed to provide + excellent performance, reliability, and scalability. + . + This charm provides the RADOS HTTP gateway supporting S3 and Swift protocols + for object storage. tags: - - openstack - - storage - - file-servers - - misc +- openstack +- storage +- file-servers +- misc series: - - xenial - - bionic - - eoan - - focal - - trusty +- xenial +- bionic +- eoan +- focal +- trusty +- groovy extra-bindings: public: admin: From ec07659dadf02d33ff111c304ed35d0b5c17e2ee Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 3 Jun 2020 10:38:29 +0200 Subject: [PATCH 1981/2699] The prometheus relation should not use the cluster public address Change-Id: I2a43fc77f1f8bc4c16aeeecb0ba9a37615642d1c --- ceph-mon/hooks/ceph_hooks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index cace5894..1e45482c 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -75,6 +75,7 @@ from charmhelpers.contrib.network.ip import ( get_ipv6_addr, format_ipv6_addr, + get_relation_ip, ) from charmhelpers.core.sysctl import create as create_sysctl from charmhelpers.core.templating import render @@ -436,9 +437,8 @@ def prometheus_relation(relid=None, unit=None, prometheus_permitted=None, log("checking if prometheus module is enabled") if prometheus_permitted and module_enabled: log("Updating prometheus") - addr = get_public_addr() data = { - 'hostname': format_ipv6_addr(addr) or addr, + 'hostname': get_relation_ip('prometheus'), 'port': 9283, } relation_set(relation_id=relid, From a22f49408168292783fabd5557aef45725953a24 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Fri, 5 Jun 2020 12:48:04 +0100 Subject: [PATCH 1982/2699] Add glance/nova-compute to bundles This is to resolve the associated bug where cinder-ceph has grown the capability to block if it doesn't have an appropriate relation to nova-compute. Change-Id: I7c47c5d8db06ce2a206eed9f617a2eff86088a9f Closes-Bug: #1881246 --- ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml | 9 +++++++++ .../src/tests/bundles/bionic-rocky-site-a.yaml | 11 +++++++++++ ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml | 11 +++++++++++ ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml | 11 +++++++++++ ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml | 11 +++++++++++ ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml | 11 +++++++++++ 6 files changed, 64 insertions(+) diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml index 8d1c28bd..c3ea69f2 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml @@ -29,6 +29,9 @@ applications: num_units: 1 options: openstack-origin: distro + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 @@ -86,6 +89,12 @@ relations: - cinder-ceph - - cinder-ceph - ceph-mon +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp +- - glance:image-service + - nova-compute:image-service - - glance - ceph-mon - - ceph-mon:osd diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml index 905212b9..56721c70 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml @@ -29,6 +29,11 @@ applications: num_units: 1 options: openstack-origin: cloud:bionic-rocky + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 @@ -66,6 +71,12 @@ relations: - cinder-ceph - - cinder-ceph - ceph-mon +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp +- - glance:image-service + - nova-compute:image-service - - glance - ceph-mon - - ceph-mon:osd diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml index 9da10be1..caba8e03 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml @@ -29,6 +29,11 @@ applications: num_units: 1 options: openstack-origin: cloud:bionic-rocky + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 @@ -86,6 +91,12 @@ relations: - cinder-ceph - - cinder-ceph - ceph-mon +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp +- - glance:image-service + - nova-compute:image-service - - glance - ceph-mon - - ceph-mon:osd diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml index b09dcf8d..aefc4fb5 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml @@ -29,6 +29,11 @@ applications: num_units: 1 options: openstack-origin: cloud:bionic-stein + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-stein ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 @@ -86,6 +91,12 @@ relations: - cinder-ceph - - cinder-ceph - ceph-mon +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp +- - glance:image-service + - nova-compute:image-service - - glance - ceph-mon - - ceph-mon:osd diff --git a/ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml b/ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml index d29319d1..fbe05e87 100644 --- a/ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml @@ -30,6 +30,11 @@ applications: num_units: 1 options: openstack-origin: cloud:xenial-pike + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:xenial-pike ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 @@ -87,6 +92,12 @@ relations: - cinder-ceph - - cinder-ceph - ceph-mon +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp +- - glance:image-service + - nova-compute:image-service - - glance - ceph-mon - - ceph-mon:osd diff --git a/ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml b/ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml index 167e0e70..1fd738ee 100644 --- a/ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml @@ -30,6 +30,11 @@ applications: num_units: 1 options: openstack-origin: cloud:xenial-queens + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:xenial-queens ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 @@ -87,6 +92,12 @@ relations: - cinder-ceph - - cinder-ceph - ceph-mon +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp +- - glance:image-service + - nova-compute:image-service - - glance - ceph-mon - - ceph-mon:osd From f53ed810da6e5f611f2ab9ae520c1fdf302bd0cc Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 9 Jun 2020 15:42:21 +0200 Subject: [PATCH 1983/2699] Fix Ceph upgrade issue by porting charm to common framework Move helpers as-is from reactive handler module to charm class. Set default of ``source`` configuration option to 'distro' to be in line with other reactive charms and the expectations of the ``openstack.utils.get_source_and_pgp_key`` function. Unpin flake8. Func-Test-Pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/313 Depends-On: I6fbde855ba8f83ef5e265bd5b5dfb0d01eae830b Change-Id: I2d8e8b244db935673406d2cce9ef34e7252cb50b Closes-Bug: #1879072 --- ceph-fs/src/config.yaml | 2 +- ceph-fs/src/layer.yaml | 15 +- ceph-fs/src/lib/__init__.py | 13 + ceph-fs/src/lib/charm/__init__.py | 13 + ceph-fs/src/lib/charm/openstack/__init__.py | 13 + ceph-fs/src/lib/charm/openstack/ceph_fs.py | 172 ++++++++++ ceph-fs/src/reactive/ceph_fs.py | 297 ++---------------- ceph-fs/src/templates/ceph.conf | 38 +-- ceph-fs/src/tests/tests.yaml | 1 + ceph-fs/test-requirements.txt | 2 +- ceph-fs/tox.ini | 2 +- ceph-fs/unit_tests/__init__.py | 32 +- .../test_lib_charm_openstack_ceph_fs.py | 82 +++++ ceph-fs/unit_tests/test_reactive_ceph_fs.py | 81 +++++ 14 files changed, 442 insertions(+), 321 deletions(-) create mode 100644 ceph-fs/src/lib/__init__.py create mode 100644 ceph-fs/src/lib/charm/__init__.py create mode 100644 ceph-fs/src/lib/charm/openstack/__init__.py create mode 100644 ceph-fs/src/lib/charm/openstack/ceph_fs.py create mode 100644 ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py create mode 100644 ceph-fs/unit_tests/test_reactive_ceph_fs.py diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index aa944462..85b68c37 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -5,7 +5,7 @@ options: description: Mon and OSD debug level. Max is 20. source: type: string - default: + default: distro description: | Optional configuration to support use of additional sources such as: - ppa:myteam/ppa diff --git a/ceph-fs/src/layer.yaml b/ceph-fs/src/layer.yaml index e7d3cb84..ae53d22d 100644 --- a/ceph-fs/src/layer.yaml +++ b/ceph-fs/src/layer.yaml @@ -1,5 +1,14 @@ -includes: ['layer:basic', 'layer:apt', 'interface:ceph-mds'] +includes: ['layer:ceph', 'interface:ceph-mds'] options: - status: - patch-hookenv: False + basic: + use_venv: True + include_system_packages: False repo: https://git.openstack.org/openstack/charm-ceph-fs +config: + deletes: + - debug + - ssl_ca + - ssl_cert + - ssl_key + - use-internal-endpoints + - verbose diff --git a/ceph-fs/src/lib/__init__.py b/ceph-fs/src/lib/__init__.py new file mode 100644 index 00000000..17dd8e7f --- /dev/null +++ b/ceph-fs/src/lib/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-fs/src/lib/charm/__init__.py b/ceph-fs/src/lib/charm/__init__.py new file mode 100644 index 00000000..17dd8e7f --- /dev/null +++ b/ceph-fs/src/lib/charm/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-fs/src/lib/charm/openstack/__init__.py b/ceph-fs/src/lib/charm/openstack/__init__.py new file mode 100644 index 00000000..17dd8e7f --- /dev/null +++ b/ceph-fs/src/lib/charm/openstack/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-fs/src/lib/charm/openstack/ceph_fs.py b/ceph-fs/src/lib/charm/openstack/ceph_fs.py new file mode 100644 index 00000000..26e73553 --- /dev/null +++ b/ceph-fs/src/lib/charm/openstack/ceph_fs.py @@ -0,0 +1,172 @@ +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import socket + +import dns.resolver + +import charms_openstack.adapters +import charms_openstack.charm +import charms_openstack.plugins + +import charmhelpers.core as ch_core + +# NOTE(fnordahl) theese out of style imports are here to help keeping helpers +# moved from reactive module as-is to make the diff managable. At some point +# in time we should replace them in favor of common helpers that would do the +# same job. +from charmhelpers.core.hookenv import ( + config, log, cached, DEBUG, unit_get, + network_get_primary_address, + status_set) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_ipv6_addr) + + +charms_openstack.charm.use_defaults('charm.default-select-release') + + +class CephFSCharmConfigurationAdapter( + charms_openstack.adapters.ConfigurationAdapter): + + @property + def hostname(self): + return self.charm_instance.hostname + + @property + def mds_name(self): + return self.charm_instance.hostname + + @property + def networks(self): + return self.charm_instance.get_networks('ceph-public-network') + + @property + def public_addr(self): + if ch_core.hookenv.config('prefer-ipv6'): + return get_ipv6_addr()[0] + else: + return self.charm_instance.get_public_addr() + + +class CephFSCharmRelationAdapters( + charms_openstack.adapters.OpenStackRelationAdapters): + relation_adapters = { + 'ceph-mds': charms_openstack.plugins.CephRelationAdapter, + } + + +class BaseCephFSCharm(charms_openstack.plugins.CephCharm): + abstract_class = True + name = 'ceph-fs' + python_version = 3 + required_relations = ['ceph-mds'] + user = 'ceph' + group = 'ceph' + adapters_class = CephFSCharmRelationAdapters + configuration_class = CephFSCharmConfigurationAdapter + ceph_service_type = charms_openstack.plugins.CephCharm.CephServiceType.mds + ceph_service_name_override = 'mds' + ceph_key_per_unit_name = True + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.services = [ + 'ceph-mds@{}'.format(self.hostname), + ] + self.restart_map = { + '/etc/ceph/ceph.conf': self.services, + } + + # NOTE(fnordahl) moved from reactive handler module, otherwise keeping + # these as-is to make the diff managable. At some point in time we should + # replace them in favor of common helpers that would do the same job. + @staticmethod + def get_networks(config_opt='ceph-public-network'): + """Get all configured networks from provided config option. + + If public network(s) are provided, go through them and return those for + which we have an address configured. + """ + networks = config(config_opt) + if networks: + networks = networks.split() + return [n for n in networks if get_address_in_network(n)] + + return [] + + @cached + def get_public_addr(self): + if config('ceph-public-network'): + return self.get_network_addrs('ceph-public-network')[0] + + try: + return network_get_primary_address('public') + except NotImplementedError: + log("network-get not supported", DEBUG) + + return self.get_host_ip() + + @cached + @staticmethod + def get_host_ip(hostname=None): + if config('prefer-ipv6'): + return get_ipv6_addr()[0] + + hostname = hostname or unit_get('private-address') + try: + # Test to see if already an IPv4 address + socket.inet_aton(hostname) + return hostname + except socket.error: + # This may throw an NXDOMAIN exception; in which case + # things are badly broken so just let it kill the hook + answers = dns.resolver.query(hostname, 'A') + if answers: + return answers[0].address + + def get_network_addrs(self, config_opt): + """Get all configured public networks addresses. + + If public network(s) are provided, go through them and return the + addresses we have configured on any of those networks. + """ + addrs = [] + networks = config(config_opt) + if networks: + networks = networks.split() + addrs = [get_address_in_network(n) for n in networks] + addrs = [a for a in addrs if a] + + if not addrs: + if networks: + msg = ("Could not find an address on any of '%s' - resolve " + "this error to retry" % networks) + status_set('blocked', msg) + raise Exception(msg) + else: + return [self.get_host_ip()] + + return addrs + + +class MitakaCephFSCharm(BaseCephFSCharm): + release = 'mitaka' + packages = ['ceph-mds', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs'] + + +class UssuriCephFSCharm(BaseCephFSCharm): + release = 'ussuri' + packages = ['ceph-mds', 'gdisk', 'ntp', 'btrfs-progs', 'xfsprogs'] diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index 9091e8ad..9c54bdc4 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -12,283 +12,36 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os -import socket -import subprocess - -import dns.resolver - from charms import reactive -from charms.reactive import when, when_not, hook -from charms.reactive.flags import set_flag, clear_flag, is_flag_set -from charmhelpers.core import hookenv -from charmhelpers.core import unitdata -from charmhelpers.core.hookenv import ( - application_version_set, config, log, ERROR, cached, DEBUG, unit_get, - network_get_primary_address, relation_ids, - status_set) -from charmhelpers.core.host import ( - CompareHostReleases, - lsb_release, - service_restart, - service) -from charmhelpers.contrib.network.ip import ( - get_address_in_network, - get_ipv6_addr) - -from charmhelpers.fetch import ( - get_upstream_version, -) -import jinja2 - -from charms.apt import queue_install, add_source - -PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs'] -PACKAGES_FOCAL = ['ceph', 'gdisk', 'ntp', 'btrfs-progs', 'xfsprogs'] - -TEMPLATES_DIR = 'templates' -VERSION_PACKAGE = 'ceph-common' - - -def render_template(template_name, context, template_dir=TEMPLATES_DIR): - templates = jinja2.Environment( - loader=jinja2.FileSystemLoader(template_dir)) - template = templates.get_template(template_name) - return template.render(context) - - -@when_not('apt.installed.ceph') -def install_ceph_base(): - add_source(config('source'), key=config('key')) - release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(release) >= 'focal': - queue_install(PACKAGES_FOCAL) - else: - queue_install(PACKAGES) - - -@when_not('apt.installed.ceph-mds') -def install_cephfs(): - queue_install(['ceph-mds']) - - -@when('cephfs.configured') -@when('ceph-mds.pools.available') -@when_not('cephfs.started') -def setup_mds(relation): - service_name = 'ceph-mds@{}'.format(socket.gethostname()) - if service_restart(service_name): - set_flag('cephfs.started') - service('enable', service_name) - application_version_set(get_upstream_version(VERSION_PACKAGE)) - else: - log(message='Error: restarting cpeh-mds', level=ERROR) - clear_flag('cephfs.started') - - -@when('ceph-mds.available') -def config_changed(ceph_client): - charm_ceph_conf = os.path.join(os.sep, - 'etc', - 'ceph', - 'ceph.conf') - key_path = os.path.join(os.sep, - 'var', - 'lib', - 'ceph', - 'mds', - 'ceph-{}'.format(socket.gethostname()) - ) - if not os.path.exists(key_path): - os.makedirs(key_path) - cephx_key = os.path.join(key_path, - 'keyring') - - ceph_context = { - 'fsid': ceph_client.fsid(), - 'auth_supported': ceph_client.auth(), - 'use_syslog': str(config('use-syslog')).lower(), - 'mon_hosts': ' '.join(ceph_client.mon_hosts()), - 'loglevel': config('loglevel'), - 'hostname': socket.gethostname(), - 'mds_name': socket.gethostname(), - } - - networks = get_networks('ceph-public-network') - if networks: - ceph_context['ceph_public_network'] = ', '.join(networks) - elif config('prefer-ipv6'): - dynamic_ipv6_address = get_ipv6_addr()[0] - ceph_context['public_addr'] = dynamic_ipv6_address - else: - ceph_context['public_addr'] = get_public_addr() - - try: - with open(charm_ceph_conf, 'w') as ceph_conf: - ceph_conf.write(render_template('ceph.conf', ceph_context)) - except IOError as err: - log("IOError writing ceph.conf: {}".format(err)) - clear_flag('cephfs.configured') - return - - try: - with open(cephx_key, 'w') as key_file: - key_file.write("[mds.{}]\n\tkey = {}\n".format( - socket.gethostname(), - ceph_client.mds_key() - )) - except IOError as err: - log("IOError writing mds-a.keyring: {}".format(err)) - clear_flag('cephfs.configured') - return - set_flag('cephfs.configured') - - -def get_networks(config_opt='ceph-public-network'): - """Get all configured networks from provided config option. - If public network(s) are provided, go through them and return those for - which we have an address configured. - """ - networks = config(config_opt) - if networks: - networks = networks.split() - return [n for n in networks if get_address_in_network(n)] +import charms_openstack.bus +import charms_openstack.charm as charm - return [] +charms_openstack.bus.discover() -@cached -def get_public_addr(): - if config('ceph-public-network'): - return get_network_addrs('ceph-public-network')[0] - try: - return network_get_primary_address('public') - except NotImplementedError: - log("network-get not supported", DEBUG) - - return get_host_ip() - - -@cached -def get_host_ip(hostname=None): - if config('prefer-ipv6'): - return get_ipv6_addr()[0] - - hostname = hostname or unit_get('private-address') - try: - # Test to see if already an IPv4 address - socket.inet_aton(hostname) - return hostname - except socket.error: - # This may throw an NXDOMAIN exception; in which case - # things are badly broken so just let it kill the hook - answers = dns.resolver.query(hostname, 'A') - if answers: - return answers[0].address - - -def get_network_addrs(config_opt): - """Get all configured public networks addresses. - - If public network(s) are provided, go through them and return the - addresses we have configured on any of those networks. - """ - addrs = [] - networks = config(config_opt) - if networks: - networks = networks.split() - addrs = [get_address_in_network(n) for n in networks] - addrs = [a for a in addrs if a] - - if not addrs: - if networks: - msg = ("Could not find an address on any of '%s' - resolve this " - "error to retry" % networks) - status_set('blocked', msg) - raise Exception(msg) - else: - return [get_host_ip()] - - return addrs - - -def assess_status(): - """Assess status of current unit""" - statuses = set([]) - messages = set([]) - - # Handle Series Upgrade - if unitdata.kv().get('charm.vault.series-upgrading'): - status_set("blocked", - "Ready for do-release-upgrade and reboot. " - "Set complete when finished.") - return - - if is_flag_set('cephfs.started'): - (status, message) = log_mds() - statuses.add(status) - messages.add(message) - if 'blocked' in statuses: - status = 'blocked' - elif 'waiting' in statuses: - status = 'waiting' - else: - status = 'active' - message = '; '.join(messages) - status_set(status, message) - - -def get_running_mds(): - """Returns a list of the pids of the current running MDS daemons""" - cmd = ['pgrep', 'ceph-mds'] - try: - result = subprocess.check_output(cmd).decode('utf-8') - return result.split() - except subprocess.CalledProcessError: - return [] - - -def log_mds(): - if len(relation_ids('ceph-mds')) < 1: - return 'blocked', 'Missing relation: monitor' - running_mds = get_running_mds() - if not running_mds: - return 'blocked', 'No MDS detected using current configuration' - else: - return 'active', 'Unit is ready ({} MDS)'.format(len(running_mds)) - - -# Per https://github.com/juju-solutions/charms.reactive/issues/33, -# this module may be imported multiple times so ensure the -# initialization hook is only registered once. I have to piggy back -# onto the namespace of a module imported before reactive discovery -# to do this. -if not hasattr(reactive, '_ceph_log_registered'): - # We need to register this to run every hook, not just during install - # and config-changed, to protect against race conditions. If we don't - # do this, then the config in the hook environment may show updates - # to running hooks well before the config-changed hook has been invoked - # and the intialization provided an opertunity to be run. - hookenv.atexit(assess_status) - reactive._ceph_log_registered = True - - -# Series upgrade hooks are a special case and reacting to the hook directly -# makes sense as we may not want other charm code to run -@hook('pre-series-upgrade') -def pre_series_upgrade(): - """Handler for pre-series-upgrade. - """ - unitdata.kv().set('charm.vault.series-upgrading', True) +charm.use_defaults( + 'charm.installed', + 'config.changed', + 'config.rendered', + 'upgrade-charm', + 'update-status', +) -@hook('post-series-upgrade') -def post_series_upgrade(): - """Handler for post-series-upgrade. - """ - release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(release) >= 'focal': - queue_install(PACKAGES_FOCAL) - unitdata.kv().set('charm.vault.series-upgrading', False) +@reactive.when_none('charm.paused', 'run-default-update-status') +@reactive.when('ceph-mds.available') +def config_changed(): + ceph_mds = reactive.endpoint_from_flag('ceph-mds.available') + with charm.provide_charm_instance() as cephfs_charm: + cephfs_charm.configure_ceph_keyring(ceph_mds.mds_key()) + cephfs_charm.render_with_interfaces([ceph_mds]) + if reactive.is_flag_set('config.changed.source'): + # update system source configuration and check for upgrade + cephfs_charm.install() + cephfs_charm.upgrade_if_available([ceph_mds]) + reactive.clear_flag('config.changed.source') + reactive.set_flag('cephfs.configured') + reactive.set_flag('config.rendered') + cephfs_charm.assess_status() diff --git a/ceph-fs/src/templates/ceph.conf b/ceph-fs/src/templates/ceph.conf index 9490e8c0..d064e443 100644 --- a/ceph-fs/src/templates/ceph.conf +++ b/ceph-fs/src/templates/ceph.conf @@ -1,24 +1,24 @@ [global] -auth cluster required = {{ auth_supported }} -auth service required = {{ auth_supported }} -auth client required = {{ auth_supported }} +auth cluster required = {{ ceph_mds.auth }} +auth service required = {{ ceph_mds.auth }} +auth client required = {{ ceph_mds.auth }} keyring = /etc/ceph/$cluster.$name.keyring -mon host = {{ mon_hosts }} -fsid = {{ fsid }} - -log to syslog = {{ use_syslog }} -err to syslog = {{ use_syslog }} -clog to syslog = {{ use_syslog }} -mon cluster log to syslog = {{ use_syslog }} -debug mon = {{ loglevel }}/5 -debug osd = {{ loglevel }}/5 - -{% if ceph_public_network %} -public network = {{ ceph_public_network }} +mon host = {{ ceph_mds.monitors }} +fsid = {{ ceph_mds.fsid }} + +log to syslog = {{ options.use_syslog }} +err to syslog = {{ options.use_syslog }} +clog to syslog = {{ options.use_syslog }} +mon cluster log to syslog = {{ options.use_syslog }} +debug mon = {{ options.loglevel }}/5 +debug osd = {{ options.loglevel }}/5 + +{% if options.networks %} +public network = {{ options.networks|join(',') }} {%- endif %} -{%- if public_addr %} -public addr = {{ public_addr }} +{%- if options.public_addr %} +public addr = {{ options.public_addr }} {%- endif %} [client] @@ -27,7 +27,7 @@ log file = /var/log/ceph.log [mds] keyring = /var/lib/ceph/mds/$cluster-$id/keyring -[mds.{{ mds_name }}] -host = {{ hostname }} +[mds.{{ options.mds_name }}] +host = {{ options.hostname }} diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index 46773214..f2b3fde7 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -23,6 +23,7 @@ configure: - zaza.openstack.charm_tests.keystone.setup.add_demo_user tests: - zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests + - zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest tests_options: force_deploy: - focal-ussuri diff --git a/ceph-fs/test-requirements.txt b/ceph-fs/test-requirements.txt index 14b380e4..94b97968 100644 --- a/ceph-fs/test-requirements.txt +++ b/ceph-fs/test-requirements.txt @@ -3,7 +3,7 @@ # requirements management in charms via bot-control. Thank you. # # Lint and unit test requirements -flake8>=2.2.4,<=2.4.1 +flake8>=2.2.4 stestr>=2.2.0 requests>=2.18.4 charms.reactive diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index f8f50927..5b41c1dd 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -89,4 +89,4 @@ commands = {posargs} [flake8] # E402 ignore necessary for path append before sys module import in actions -ignore = E402,W504 \ No newline at end of file +ignore = E402,W504 diff --git a/ceph-fs/unit_tests/__init__.py b/ceph-fs/unit_tests/__init__.py index 3e9250c6..3265b909 100644 --- a/ceph-fs/unit_tests/__init__.py +++ b/ceph-fs/unit_tests/__init__.py @@ -13,30 +13,14 @@ # limitations under the License. import sys -import mock +import unittest.mock as mock sys.path.append('src') +sys.path.append('src/lib') -apt_pkg = mock.MagicMock() -charmhelpers = mock.MagicMock() -sys.modules['apt_pkg'] = apt_pkg -sys.modules['charmhelpers'] = charmhelpers -sys.modules['charmhelpers.core'] = charmhelpers.core -sys.modules['charmhelpers.core.hookenv'] = charmhelpers.core.hookenv -sys.modules['charmhelpers.core.host'] = charmhelpers.core.host -sys.modules['charmhelpers.core.unitdata'] = charmhelpers.core.unitdata -sys.modules['charmhelpers.core.templating'] = charmhelpers.core.templating -sys.modules['charmhelpers.contrib'] = charmhelpers.contrib -sys.modules['charmhelpers.contrib.openstack'] = charmhelpers.contrib.openstack -sys.modules['charmhelpers.contrib.openstack.utils'] = ( - charmhelpers.contrib.openstack.utils) -sys.modules['charmhelpers.contrib.openstack.templating'] = ( - charmhelpers.contrib.openstack.templating) -sys.modules['charmhelpers.contrib.network'] = charmhelpers.contrib.network -sys.modules['charmhelpers.contrib.network.ip'] = ( - charmhelpers.contrib.network.ip) -sys.modules['charmhelpers.fetch'] = charmhelpers.fetch -sys.modules['charmhelpers.cli'] = charmhelpers.cli -sys.modules['charmhelpers.contrib.hahelpers'] = charmhelpers.contrib.hahelpers -sys.modules['charmhelpers.contrib.hahelpers.cluster'] = ( - charmhelpers.contrib.hahelpers.cluster) +# Mock out charmhelpers so that we can test without it. +import charms_openstack.test_mocks # noqa +charms_openstack.test_mocks.mock_charmhelpers() + +sys.modules['dns'] = mock.MagicMock() +sys.modules['dns.resolver'] = mock.MagicMock() diff --git a/ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py b/ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py new file mode 100644 index 00000000..c3964f35 --- /dev/null +++ b/ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py @@ -0,0 +1,82 @@ +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest.mock as mock + +import charms_openstack.test_utils as test_utils + +import charm.openstack.ceph_fs as ceph_fs + + +class TestMitakaCephFsCharm(test_utils.PatchHelper): + + def setUp(self): + super().setUp() + self.patch_release('mitaka') + self.patch('socket.gethostname', name='gethostname') + self.gethostname.return_value = 'somehost' + self.target = ceph_fs.MitakaCephFSCharm() + + def test_packages(self): + # Package list is the only difference between the past version and + # future versions of this charm, see ``TestCephFsCharm`` for the rest + # of the tests + self.assertEquals(self.target.packages, [ + 'ceph-mds', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs']) + + +class TestCephFsCharm(test_utils.PatchHelper): + + def setUp(self): + super().setUp() + self.patch_release('ussuri') + self.patch('socket.gethostname', name='gethostname') + self.gethostname.return_value = 'somehost' + self.target = ceph_fs.UssuriCephFSCharm() + + def patch_target(self, attr, return_value=None): + mocked = mock.patch.object(self.target, attr) + self._patches[attr] = mocked + started = mocked.start() + started.return_value = return_value + self._patches_start[attr] = started + setattr(self, attr, started) + + def test___init__(self): + self.assertEquals(self.target.services, [ + 'ceph-mds@somehost']) + self.assertDictEqual(self.target.restart_map, { + '/etc/ceph/ceph.conf': ['ceph-mds@somehost']}) + self.assertEquals(self.target.packages, [ + 'ceph-mds', 'gdisk', 'ntp', 'btrfs-progs', 'xfsprogs']) + + def test_configuration_class(self): + self.assertEquals(self.target.options.hostname, 'somehost') + self.assertEquals(self.target.options.mds_name, 'somehost') + self.patch_target('get_networks') + self.get_networks.return_value = ['fakeaddress'] + self.assertEquals(self.target.options.networks, ['fakeaddress']) + self.patch_object(ceph_fs.ch_core.hookenv, 'config') + self.config.side_effect = lambda x: {'prefer-ipv6': False}.get(x) + self.patch_object(ceph_fs, 'get_ipv6_addr') + self.get_ipv6_addr.return_value = ['2001:db8::fake'] + self.patch_target('get_public_addr') + self.get_public_addr.return_value = '192.0.2.42' + self.assertEquals( + self.target.options.public_addr, + '192.0.2.42') + self.config.side_effect = lambda x: {'prefer-ipv6': True}.get(x) + self.assertEquals( + self.target.options.public_addr, + '2001:db8::fake') diff --git a/ceph-fs/unit_tests/test_reactive_ceph_fs.py b/ceph-fs/unit_tests/test_reactive_ceph_fs.py new file mode 100644 index 00000000..c210afe0 --- /dev/null +++ b/ceph-fs/unit_tests/test_reactive_ceph_fs.py @@ -0,0 +1,81 @@ +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest.mock as mock + +import charm.openstack.ceph_fs as ceph_fs +import reactive.ceph_fs as handlers + +import charms_openstack.test_utils as test_utils + + +class TestRegisteredHooks(test_utils.TestRegisteredHooks): + + def test_hooks(self): + defaults = [ + 'charm.installed', + 'config.changed', + 'config.rendered', + 'upgrade-charm', + 'update-status', + ] + hook_set = { + 'when': { + 'config_changed': ('ceph-mds.available',), + }, + 'when_none': { + 'config_changed': ('charm.paused', + 'run-default-update-status',), + }, + } + # test that the hooks were registered via the reactive.ceph_fs module + self.registered_hooks_test_helper(handlers, hook_set, defaults) + + +class TestCephFSHandlers(test_utils.PatchHelper): + + def setUp(self): + super().setUp() + self.patch_release(ceph_fs.UssuriCephFSCharm.release) + self.target = mock.MagicMock() + self.patch_object(handlers.charm, 'provide_charm_instance', + new=mock.MagicMock()) + self.provide_charm_instance().__enter__.return_value = \ + self.target + self.provide_charm_instance().__exit__.return_value = None + + def test_config_changed(self): + self.patch_object(handlers.reactive, 'endpoint_from_flag') + self.patch_object(handlers.reactive, 'is_flag_set') + self.patch_object(handlers.reactive, 'clear_flag') + self.patch_object(handlers.reactive, 'set_flag') + ceph_mds = mock.MagicMock() + ceph_mds.mds_key.return_value = 'fakekey' + self.endpoint_from_flag.return_value = ceph_mds + self.is_flag_set.return_value = False + handlers.config_changed() + self.endpoint_from_flag.assert_called_once_with('ceph-mds.available') + self.target.configure_ceph_keyring.assert_called_once_with('fakekey') + self.target.render_with_interfaces.assert_called_once_with([ceph_mds]) + self.is_flag_set.assert_called_once_with('config.changed.source') + self.set_flag.assert_has_calls([ + mock.call('cephfs.configured'), + mock.call('config.rendered'), + ]) + self.target.install.assert_not_called() + self.target.upgrade_if_available.assert_not_called() + self.is_flag_set.return_value = True + handlers.config_changed() + self.target.install.assert_called_once_with() + self.target.upgrade_if_available.assert_called_once_with([ceph_mds]) From 442a56780bb3c14838f2c8415919867fe315167c Mon Sep 17 00:00:00 2001 From: Ponnuvel Palaniyappan Date: Thu, 11 Jun 2020 12:50:29 +0100 Subject: [PATCH 1984/2699] Remove a duplicate key and fix typos in config.yaml Change-Id: Ide6983c8d1b08eb43e5931ba7077c031c46b8ae3 --- ceph-osd/config.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index b84fd3ac..66080a9b 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -19,7 +19,7 @@ options: type: string default: description: | - Key ID to import to the apt keyring to support use with arbitary source + Key ID to import to the apt keyring to support use with arbitrary source configuration from outside of Launchpad archives or PPA's. use-syslog: type: boolean @@ -222,7 +222,7 @@ options: type: string default: description: | - Cloud instances provide ephermeral storage which is normally mounted + Cloud instances provide ephemeral storage which is normally mounted on /mnt. . Setting this option to the path of the ephemeral mountpoint will force @@ -291,7 +291,6 @@ options: nagios_context: type: string default: "juju" - type: string description: | Used by the nrpe-external-master subordinate charm. A string that will be prepended to instance name to set the hostname From 05fa40ca37e3664af079dd8ba53f9049c520b63e Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 26 Jun 2020 14:50:49 +0200 Subject: [PATCH 1985/2699] Rebuild to fix service start while paused Change-Id: I7495960fe920807cd92419d18e158cae4f23b15b Closes-Bug: #1885121 --- ceph-rbd-mirror/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/rebuild b/ceph-rbd-mirror/rebuild index 315feac0..19ea6dd7 100644 --- a/ceph-rbd-mirror/rebuild +++ b/ceph-rbd-mirror/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -c17ec0ce-937e-11ea-a49b-8b4b0c8e2b7f +06fc37a3-f9fe-49d9-92e0-dd7b0f749c1c From ab0217c2642f834a31e0916138e48ec64d7e7f3e Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 30 Jun 2020 09:37:14 +0000 Subject: [PATCH 1986/2699] Update deps and fix fallout Update the git submodules and fix fallout: * Add ops-interface-tls-certificates back in to deps update script * Charmhelpers has been switched to using pbr which broke the update-deps target. The problem seems to be that pbr cannot detect the version of charm-helpers when using a submodule and deploying to a local dir. To workaround this set PBR_VERSION manually. * cluster cannot be set as a extra binding anymore ("ERROR cannot deploy bundle: charm "ceph-iscsi" has invalid extra bindings: relation names (cluster) cannot be used in extra bindings") * Charm class should no longer provde a `key` argument in __init__ *1 * framework.observe must now pass the method to be invoked as the last argument *2 *1 https://github.com/canonical/operator/blob/master/ops/main.py#L309 *2 https://github.com/canonical/operator/blob/master/ops/framework.py#L553 --- ceph-iscsi/charm-init.sh | 6 +++++- ceph-iscsi/metadata.yaml | 1 - ceph-iscsi/mod/charm-helpers | 2 +- ceph-iscsi/mod/operator | 2 +- ceph-iscsi/mod/ops-interface-tls-certificates | 2 +- ceph-iscsi/mod/ops-openstack | 2 +- ceph-iscsi/src/charm.py | 10 +++++----- 7 files changed, 14 insertions(+), 11 deletions(-) diff --git a/ceph-iscsi/charm-init.sh b/ceph-iscsi/charm-init.sh index 06fa76c9..e400cde2 100755 --- a/ceph-iscsi/charm-init.sh +++ b/ceph-iscsi/charm-init.sh @@ -15,8 +15,12 @@ else git -C mod/operator pull origin master git -C mod/ops-openstack pull origin master git -C mod/ops-interface-ceph-client pull origin master -# git -C mod/ops-interface-tls-certificates pull origin master + git -C mod/ops-interface-tls-certificates pull origin master git -C mod/charm-helpers pull origin master + # pbr seems unable to detect the current tag when installing + # from a local checkout using a git submodule. To work around this + # manually set the version. + export PBR_VERSION=$(cd mod/charm-helpers; git describe --tags) pip install -t lib -r build-requirements.txt --upgrade fi diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index 1c953471..e96aab09 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -15,7 +15,6 @@ subordinate: false min-juju-version: 2.7.6 extra-bindings: public: - cluster: requires: ceph-client: interface: ceph-client diff --git a/ceph-iscsi/mod/charm-helpers b/ceph-iscsi/mod/charm-helpers index b4aa4e33..87fc7ee5 160000 --- a/ceph-iscsi/mod/charm-helpers +++ b/ceph-iscsi/mod/charm-helpers @@ -1 +1 @@ -Subproject commit b4aa4e3398e7406dbf0f76a23f91afa6a72aed1a +Subproject commit 87fc7ee50662f14abe55d9fe0d02ec20d128379f diff --git a/ceph-iscsi/mod/operator b/ceph-iscsi/mod/operator index ccf1dce2..59dd0987 160000 --- a/ceph-iscsi/mod/operator +++ b/ceph-iscsi/mod/operator @@ -1 +1 @@ -Subproject commit ccf1dce276141d1e8641d63382bb6c3055eee731 +Subproject commit 59dd09875421668366ffcaff123bec34a0054ec3 diff --git a/ceph-iscsi/mod/ops-interface-tls-certificates b/ceph-iscsi/mod/ops-interface-tls-certificates index d03a251e..f6e6ec1b 160000 --- a/ceph-iscsi/mod/ops-interface-tls-certificates +++ b/ceph-iscsi/mod/ops-interface-tls-certificates @@ -1 +1 @@ -Subproject commit d03a251e87f02528789af0eb4cce88e471847e68 +Subproject commit f6e6ec1b1d6a317aaeb2cb696e3ec7c1a7c3cd09 diff --git a/ceph-iscsi/mod/ops-openstack b/ceph-iscsi/mod/ops-openstack index 18b8f1fc..460b3898 160000 --- a/ceph-iscsi/mod/ops-openstack +++ b/ceph-iscsi/mod/ops-openstack @@ -1 +1 @@ -Subproject commit 18b8f1fcfe7bee87217e7d3152e377c2c3e3f2ff +Subproject commit 460b389811d25514c7ac280ff7c8b2f7c17dd790 diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 949630f5..ab2f10b2 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -130,8 +130,8 @@ class CephISCSIGatewayCharmBase(ops_openstack.OSBaseCharm): release = 'default' - def __init__(self, framework, key): - super().__init__(framework, key) + def __init__(self, framework): + super().__init__(framework) logging.info("Using {} class".format(self.release)) self.state.set_default( target_created=False, @@ -156,7 +156,7 @@ def __init__(self, framework, key): self.render_config) self.framework.observe( self.peers.on.has_peers, - self) + self.on_has_peers) self.framework.observe( self.peers.on.allowed_ips_changed, self.render_config) @@ -174,10 +174,10 @@ def __init__(self, framework, key): self.render_config) self.framework.observe( self.on.create_target_action, - self) + self.on_create_target_action) self.framework.observe( self.on.add_trusted_ip_action, - self) + self.on_add_trusted_ip_action) def on_install(self, event): if ch_host.is_container(): From dfd881aa2bd47328da07958ae8822497c9ed5432 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 30 Jun 2020 10:52:23 +0000 Subject: [PATCH 1987/2699] Set PBR_VERSION for updates and pinned builds The PBR_VERSION should be set for pinned builds as well as updates. --- ceph-iscsi/charm-init.sh | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ceph-iscsi/charm-init.sh b/ceph-iscsi/charm-init.sh index e400cde2..58e58bdf 100755 --- a/ceph-iscsi/charm-init.sh +++ b/ceph-iscsi/charm-init.sh @@ -9,6 +9,11 @@ done git submodule update --init +# pbr seems unable to detect the current tag when installing +# from a local checkout using a git submodule. To work around this +# manually set the version. +export PBR_VERSION=$(cd mod/charm-helpers; git describe --tags) + if [[ -z "$UPDATE" ]]; then pip install -t lib -r build-requirements.txt else @@ -17,10 +22,6 @@ else git -C mod/ops-interface-ceph-client pull origin master git -C mod/ops-interface-tls-certificates pull origin master git -C mod/charm-helpers pull origin master - # pbr seems unable to detect the current tag when installing - # from a local checkout using a git submodule. To work around this - # manually set the version. - export PBR_VERSION=$(cd mod/charm-helpers; git describe --tags) pip install -t lib -r build-requirements.txt --upgrade fi From dd17fcc70afe1c45f508cde955c0f98a62d58ff3 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 1 Jul 2020 18:52:23 +0200 Subject: [PATCH 1988/2699] Remove duplicate requirement Change-Id: I6ecc780ce05ad7e9d392888139f80678e2b343f5 Closes-Bug: #1885935 --- ceph-rbd-mirror/src/wheelhouse.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-rbd-mirror/src/wheelhouse.txt b/ceph-rbd-mirror/src/wheelhouse.txt index 17c12301..606a7f9b 100644 --- a/ceph-rbd-mirror/src/wheelhouse.txt +++ b/ceph-rbd-mirror/src/wheelhouse.txt @@ -1,3 +1,2 @@ -jinja2 psutil From 8cd400819011b3e561aa51842b2383627f3df7d3 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 1 Jul 2020 18:00:57 +0200 Subject: [PATCH 1989/2699] Remove duplicate requirement Func-Test-Pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/342 Change-Id: I2137edb851fbb7926f372783fa140d89190c813b Closes-Bug: #1885935 --- ceph-fs/src/wheelhouse.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-fs/src/wheelhouse.txt b/ceph-fs/src/wheelhouse.txt index 56042c3a..e169348c 100644 --- a/ceph-fs/src/wheelhouse.txt +++ b/ceph-fs/src/wheelhouse.txt @@ -2,5 +2,4 @@ netifaces dnspython3 ceph_api pyxattr -jinja2 psutil From 2944224dbbdc10f72c896020756c07fc44578de3 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Thu, 2 Jul 2020 14:56:41 +0100 Subject: [PATCH 1990/2699] Enable passing of integer value for pool-set action Enable passing of an integer to the pool-set function. Due to how juju appears to parse things on the command line, setting type to string causes it to fail to accept '3', "3" or 3 as a string. Only "'3'" works. However, if we remove the type from actions.yaml and do the validation in the charm, any value can be passed. Depends-On: I6081c23af61fd5e872982ff477b0a5cb27141d11 Change-Id: Idf3468d9ae28dafc09c86f08b7f8c6470a665b7a Closes-Bug: #1838650 --- ceph-mon/actions.yaml | 5 ++++- ceph-mon/actions/pool_set.py | 3 ++- ceph-mon/lib/charms_ceph/broker.py | 14 +++++++++++++- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index f7839d49..0081c6f0 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -256,7 +256,10 @@ pool-set: type: string description: "Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values" value: - type: string + # LP: #1838650 - unfortunately, Juju appears to consider '3' on the + # command line as not being a string, and has to be quoted as "'3'". So, + # we actually let the charm do the verification, and let any value + # through here. description: "The value to set" required: - key diff --git a/ceph-mon/actions/pool_set.py b/ceph-mon/actions/pool_set.py index 8549fe70..39ee9345 100755 --- a/ceph-mon/actions/pool_set.py +++ b/ceph-mon/actions/pool_set.py @@ -32,7 +32,8 @@ 'value': value} try: - handle_set_pool_value(service='admin', request=request) + # Bug: #1838650 -- force coercion to an int for the value if required. + handle_set_pool_value(service='admin', request=request, coerce=True) except CalledProcessError as e: log(str(e)) action_fail("Setting pool key: {} and value: {} failed with " diff --git a/ceph-mon/lib/charms_ceph/broker.py b/ceph-mon/lib/charms_ceph/broker.py index 726f9498..15552cd8 100644 --- a/ceph-mon/lib/charms_ceph/broker.py +++ b/ceph-mon/lib/charms_ceph/broker.py @@ -540,11 +540,13 @@ def handle_remove_cache_tier(request, service): pool.remove_cache_tier(cache_pool=cache_pool) -def handle_set_pool_value(request, service): +def handle_set_pool_value(request, service, coerce=False): """Sets an arbitrary pool value. :param request: dict of request operations and params :param service: The ceph client to run the command under. + :param coerce: Try to parse/coerce the value into the correct type. + Used by the action code that only gets Str from Juju :returns: dict. exit-code and reason if not 0 """ # Set arbitrary pool values @@ -558,6 +560,16 @@ def handle_set_pool_value(request, service): # Get the validation method validator_params = POOL_KEYS[params['key']] + # BUG: #1838650 - the function needs to try to coerce the value param to + # the type required for the validator to pass. Note, if this blows, then + # the param isn't parsable to the correct type. + if coerce: + try: + params['value'] = validator_params[0](params['value']) + except ValueError: + raise RuntimeError("Value {} isn't of type {}" + .format(params['value'], validator_params[0])) + # end of BUG: #1838650 if len(validator_params) == 1: # Validate that what the user passed is actually legal per Ceph's rules validator(params['value'], validator_params[0]) From fe3189d915999e8ada25c04eebc6a35f3d970def Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Fri, 3 Jul 2020 17:47:03 -0400 Subject: [PATCH 1991/2699] Review README --- ceph-iscsi/README.md | 295 ++++++++++++++++--------------------------- 1 file changed, 109 insertions(+), 186 deletions(-) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index 1d8e1261..9a7d2b4c 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -1,233 +1,156 @@ # Overview -The charm provides the Ceph iSCSI gateway service. It is intended to be used -in conjunction with the ceph-osd and ceph-mon charms. +The ceph-iscsi charm deploys the [Ceph iSCSI gateway +service][ceph-iscsi-upstream]. The charm is intended to be used in conjunction +with the [ceph-osd][ceph-osd-charm] and [ceph-mon][ceph-mon-charm] charms. -> **Warning**: This charm is in a preview state for testing and should not - be used outside of the lab. +> **Warning**: This charm is in a preview state and should not be used in + production. See the [OpenStack Charm Guide][cg-preview-charms] for more + information on preview charms. # Usage +## Configuration + +See file `config.yaml` for the full list of options, along with their +descriptions and default values. + ## Deployment -When deploying ceph-iscsi ensure that exactly two units of the charm are being -deployed, this will provide multiple data paths to clients. - -> **Note**: Deploying four units is also theoretical possible but has not - been tested. - -The charm cannot be placed in a lxd container. However, it can be located -with the ceph-osd charms. Co-location with other charms is likely to be -fine but is untested. - -A sample `bundle.yaml` file's contents: - -```yaml - series: focal - machines: - '0': - '1': - '2': - applications: - ceph-iscsi: - charm: cs:ceph-iscsi - num_units: 2 - to: - - '0' - - '1' - ceph-osd: - charm: cs:ceph-osd - num_units: 3 - storage: - osd-devices: /dev/vdb - to: - - '0' - - '1' - - '2' - ceph-mon: - charm: cs:ceph-mon - num_units: 3 - options: - monitor-count: '3' - to: - - lxd:0 - - lxd:1 - - lxd:2 - relations: - - - ceph-mon:client - - ceph-iscsi:ceph-client - - - ceph-osd:mon - - ceph-mon:osd -``` - -> **Important**: Make sure the designated block device passed to the ceph-osd - charms exists and is not currently in use. - -Deploy the bundle: - - juju deploy ./bundle.yaml - - -## Managing Targets - -The charm provides an action for creating a simple target. If more complex -managment of targets is requires then the `gwcli` tool should be used. `gwcli` -is available from the root account on the gateway nodes. - -```bash - $ juju ssh ceph-iscsi/1 - $ sudo gwcli - /> ls -``` +We are assuming a pre-existing Ceph cluster. + +To provide multiple data paths to clients deploy exactly two ceph-iscsi units: + + juju deploy -n 2 cs:~gnuoy/ceph-iscsi-08 + +Then add a relation to the ceph-mon application: + + juju add-relation ceph-iscsi:ceph-client ceph-mon:client + +**Notes**: + +* Deploying four ceph-iscsi units is theoretical possible but it is not an + officially supported configuration. +* The ceph-iscsi application cannot be containerised. +* Co-locating ceph-iscsi with another application is only supported with + ceph-osd, although doing so with other applications may still work. ## Actions This section covers Juju [actions][juju-docs-actions] supported by the charm. Actions allow specific operations to be performed on a per-unit basis. -### create-target +* `add-trusted-ip` +* `create-target` +* `pause` +* `resume` +* `security-checklist` -Run this action to create an iscsi target. +To display action descriptions run `juju actions ceph-iscsi`. If the charm is +not deployed then see file `actions.yaml`. -```bash - $ juju run-action --wait ceph-iscsi/0 create-target \ - image-size=2G \ - image-name=bob \ - pool-name=superssd \ - client-initiatorname=iqn.1993-08.org.debian:01:aaa2299be916 \ - client-username=usera \ - client-password=testpass - unit-ceph-iscsi-0: - UnitId: ceph-iscsi/0 - id: "28" - results: - iqn: iqn.2003-01.com.ubuntu.iscsi-gw:iscsi-igw - status: completed - timing: - completed: 2020-05-08 09:49:52 +0000 UTC - enqueued: 2020-05-08 09:49:36 +0000 UTC - started: 2020-05-08 09:49:37 +0000 UTC +## iSCSI target management +### Create an iSCSI target -### pause +An iSCSI target can be created easily with the charm's `create-target` action: -Pause the ceph-iscsi unit. This action will stop the rbd services. + juju run-action --wait ceph-iscsi/0 create-target \ + client-initiatorname=iqn.1993-08.org.debian:01:aaa2299be916 \ + client-username=myiscsiusername \ + client-password=myiscsipassword \ + image-size=5G \ + image-name=small \ + pool-name=images -### resume +In the above, all option values are generally user-defined with the exception +of the initiator name (`client-initiatorname`). An iSCSI initiator is +essentially an iSCSI client and so its name is client-dependent. Some +initiators may impose policy on credentials (`client-username` and +`client-password`). -Resume the ceph-iscsi unit. This action will start the rbd services if paused. +### The `gwcli` utility -## Network spaces +The management of targets, beyond the target-creation action described above, +can be accomplished via the `gwcli` utility. This CLI tool has its own shell, +and is available from any ceph-iscsi unit: -This charm supports the use of Juju [network spaces][juju-docs-spaces] (Juju -`v.2.0`). This feature optionally allows specific types of the application's -network traffic to be bound to subnets that the underlying hardware is -connected to. + juju ssh ceph-iscsi/1 + sudo gwcli + /> help -> **Note**: Spaces must be configured in the backing cloud prior to deployment. +## VMWare integration -The ceph-iscsi charm exposes the following traffic types (bindings): +Ceph can be used to back iSCSI targets for VMWare initiators. -- 'public' (front-side) -- 'cluster' (back-side) +Begin by accessing the VMWare admin web UI. -For example, providing that spaces 'data-space' and 'cluster-space' exist, the -deploy command above could look like this: +These instructions were written using VMWare ESXi 6.7.0. - juju deploy --config ceph-iscsi.yaml -n 2 ceph-iscsi \ - --bind "public=data-space cluster=cluster-space" +### Create a Ceph pool -Alternatively, configuration can be provided as part of a bundle: +If desired, create a Ceph pool to back the VMWare targets with the ceph-mon +charm's `create-pool` action: -```yaml - ceph-iscsi: - charm: cs:ceph-iscsi - num_units: 2 - bindings: - public: data-space - cluster: cluster-space -``` + juju run-action --wait ceph-mon/0 create-pool name=vmware-iscsi -# VMWare integration +### Enable the initiator -1. Create ceph pool if required. +From the web UI select the `Adapters` tab in the `Storage` context. Click +`Configure iSCSI` and enable iSCSI. - To create a new pool to back the iscsi targets run the create-pool action - from the ceph-mon charm. +Take a note of the initiator name, or UID. Here the UID we'll use is +`iqn.1998-01.com.vmware:node-gadomski-6a5e962a`. -```bash - $ juju run-action --wait ceph-mon/0 create-pool name=iscsi-targets - UnitId: ceph-mon/0 - results: - Stderr: | - pool 'iscsi-targets' created - set pool 2 size to 3 - set pool 2 target_size_ratio to 0.1 - enabled application 'unknown' on pool 'iscsi-targets' - set pool 2 pg_autoscale_mode to on - status: completed - timing: - completed: 2020-04-08 06:42:00 +0000 UTC - enqueued: 2020-04-08 06:41:38 +0000 UTC - started: 2020-04-08 06:41:42 +0000 UTC -``` +### Create an iSCSI target -2. Collect the Initiator name for adapter. +With the `create-target` action create a target for VMWare to use. Use the pool +that may have been created previously: - From the VMWare admin UI select the `Adapters` tab in the Storage - context. Ensure `iSCSI enabled` is set to `Enabled`. + juju run-action --wait ceph-iscsi/0 create-target \ + client-initiatorname=iqn.1998-01.com.vmware:node-gadomski-6a5e962a \ + client-username=vmwareclient \ + client-password=12to16characters \ + image-size=5G \ + image-name=disk-1 \ + pool-name=vmware-iscsi - Click 'Configure iSCSI' and take a note of the `iqn` name. +> **Note**: VMWare imposes a policy on credentials. The username should be more + than eight characters and the password between twelve and sixteen characters. -4. Create iSCSI target. +### Add a target to VMWare - Run the action to create a target for VMWare to use. +Follow the [Ceph iSCSI gateway for VMWare][ceph-iscsi-vmware-upstream] +documentation to use the new target. Use the (CHAP) username and password +passed to the `create-target` action. -> **Note**: The username should be more than eight characters and the password - between twelve and sixteen characters. +When finished, under the `Devices` tab you should see the created target. To +make more devices available to VMWare simply create more targets (use a +different image name and optionally a different image size). You will need to +`Rescan` and `Refresh` for the new devices to appear. -```bash - $ juju run-action --wait ceph-iscsi/0 create-target \ - client-initiatorname="iqn.1998-01.com.vmware:node-caloric-02f98bac" \ - client-username=vmwareclient \ - client-password=12to16characters \ - image-size=10G \ - image-name=disk_1 \ - pool-name=iscsi-targets - UnitId: ceph-iscsi/0 - results: - Stdout: | - Warning: Could not load preferences file /root/.gwcli/prefs.bin. - iqn: iqn.2003-01.com.ubuntu.iscsi-gw:iscsi-igw - status: completed - timing: - completed: 2020-04-08 06:58:34 +0000 UTC - enqueued: 2020-04-08 06:58:15 +0000 UTC - started: 2020-04-08 06:58:19 +0000 UTC -``` - -5. Add target to VMWare. - - Follow the [Ceph iSCSI Gateway][ceph-vmware] documentation to use the new - target. Use CHAP username and password provided to the `create-target` - action. - -> **Warning**: As of the time of writing the workaround to set the CHAP - credentials via the esx cli is still needed. - -## Development - -The charm needs to pull in its dependencies before it can be deployed. To -pull in the dependency versions that correspond to this version of the -charm then run the `build` tox target. - -To update all dependencies to their latest versions then run the `update-deps` -tox target. +> **Note**: At the time of writing, the redundant task of setting the + credentials via the ESX CLI is still a necessity. This will require you to + enable SSH under `Manage` > `Services` > `TSM-SSH` > `Actions` (Start). + + +[ceph-mon-charm]: https://jaas.ai/ceph-mon +[ceph-osd-charm]: https://jaas.ai/ceph-osd [cg]: https://docs.openstack.org/charm-guide +[cg-preview-charms]: https://docs.openstack.org/charm-guide/latest/openstack-charms.html#tech-preview-charms-beta [cdg]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide -[juju-docs-spaces]: https://jaas.ai/docs/spaces [juju-docs-actions]: https://jaas.ai/docs/actions -[ceph-vmware]: https://docs.ceph.com/docs/master/rbd/iscsi-initiator-esx/ +[ceph-iscsi-upstream]: https://docs.ceph.com/docs/master/rbd/iscsi-overview/ +[ceph-iscsi-vmware-upstream]: https://docs.ceph.com/docs/master/rbd/iscsi-initiator-esx/ +[lp-bugs-charm-ceph-iscsi]: https://bugs.launchpad.net/charm-ceph-iscsi/+filebug From 70d2a60732df0cb7603f4bfd8665212563553d7b Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Mon, 6 Jul 2020 11:17:49 -0400 Subject: [PATCH 1992/2699] Add an admonishment --- ceph-iscsi/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index 9a7d2b4c..74cccde9 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -69,6 +69,10 @@ essentially an iSCSI client and so its name is client-dependent. Some initiators may impose policy on credentials (`client-username` and `client-password`). +> **Important**: The underlying machines for the ceph-iscsi units must have + internal name resolution working (i.e. the machines must be able to resolve + each other's hostnames). + ### The `gwcli` utility The management of targets, beyond the target-creation action described above, From a78d81ab37327c7ddffc71ef341147ea8599ffab Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 7 Jul 2020 10:45:17 -0400 Subject: [PATCH 1993/2699] Edits based on review --- ceph-iscsi/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index 74cccde9..70f79d63 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -21,7 +21,7 @@ We are assuming a pre-existing Ceph cluster. To provide multiple data paths to clients deploy exactly two ceph-iscsi units: - juju deploy -n 2 cs:~gnuoy/ceph-iscsi-08 + juju deploy -n 2 cs:~openstack-charmers-next/ceph-iscsi Then add a relation to the ceph-mon application: From f19105f45c4e1167cb9aea3827b0b16491c42686 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 2 Jun 2020 14:22:18 +0100 Subject: [PATCH 1994/2699] Updates for 20.08 cycle start for groovy and libs - Adds groovy to the series in the metadata - Classic charms: sync charm-helpers. - Classic ceph based charms: also sync charms.ceph - Reactive charms: trigger a rebuild Change-Id: I6772e104b883b79cf85b5c19dc401b431c087878 --- ceph-rbd-mirror/rebuild | 2 +- ceph-rbd-mirror/src/metadata.yaml | 17 +++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/ceph-rbd-mirror/rebuild b/ceph-rbd-mirror/rebuild index 19ea6dd7..ff2b57ee 100644 --- a/ceph-rbd-mirror/rebuild +++ b/ceph-rbd-mirror/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -06fc37a3-f9fe-49d9-92e0-dd7b0f749c1c +918d9792-a4d0-11ea-8a27-a78395c748ed diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index e50d8022..09f956b1 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -10,15 +10,16 @@ description: | NOTE: The charm requires Ceph Luminous or later. tags: - - openstack - - storage - - file-servers - - misc +- openstack +- storage +- file-servers +- misc series: - - xenial - - bionic - - eoan - - focal +- xenial +- bionic +- eoan +- focal +- groovy extra-bindings: public: cluster: From 82d2bcd7b9f066a2734f53232312e6afc053bc92 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Tue, 7 Jul 2020 15:43:58 +0200 Subject: [PATCH 1995/2699] Add Victoria test bundles Change-Id: If4831f159ca42d112eb29f2ef4ea9928e90ed569 --- .../bundles/focal-victoria-namespaced.yaml | 117 ++++++++++++++++++ .../tests/bundles/focal-victoria.yaml | 116 +++++++++++++++++ .../bundles/groovy-victoria-namespaced.yaml | 117 ++++++++++++++++++ .../tests/bundles/groovy-victoria.yaml | 116 +++++++++++++++++ ceph-radosgw/tests/tests.yaml | 9 +- 5 files changed, 472 insertions(+), 3 deletions(-) create mode 100644 ceph-radosgw/tests/bundles/focal-victoria-namespaced.yaml create mode 100644 ceph-radosgw/tests/bundles/focal-victoria.yaml create mode 100644 ceph-radosgw/tests/bundles/groovy-victoria-namespaced.yaml create mode 100644 ceph-radosgw/tests/bundles/groovy-victoria.yaml diff --git a/ceph-radosgw/tests/bundles/focal-victoria-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-victoria-namespaced.yaml new file mode 100644 index 00000000..9e5c423c --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-victoria-namespaced.yaml @@ -0,0 +1,117 @@ +options: + source: &source cloud:focal-victoria + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + namespace-tenants: True + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/focal-victoria.yaml b/ceph-radosgw/tests/bundles/focal-victoria.yaml new file mode 100644 index 00000000..ddfba9d9 --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-victoria.yaml @@ -0,0 +1,116 @@ +options: + source: &source cloud:focal-victoria + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/groovy-victoria-namespaced.yaml b/ceph-radosgw/tests/bundles/groovy-victoria-namespaced.yaml new file mode 100644 index 00000000..33cf983b --- /dev/null +++ b/ceph-radosgw/tests/bundles/groovy-victoria-namespaced.yaml @@ -0,0 +1,117 @@ +options: + source: &source distro + +series: groovy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + namespace-tenants: True + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/groovy-victoria.yaml b/ceph-radosgw/tests/bundles/groovy-victoria.yaml new file mode 100644 index 00000000..b7eaa117 --- /dev/null +++ b/ceph-radosgw/tests/bundles/groovy-victoria.yaml @@ -0,0 +1,116 @@ +options: + source: &source distro + +series: groovy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index d29f5d83..22c8d24f 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -21,7 +21,10 @@ gate_bundles: smoke_bundles: - vault: bionic-ussuri dev_bundles: - - vault: focal-ussuri + - vault: groovy-victoria + - vault: groovy-victoria-namespaced + - vault: focal-victoria + - vault: focal-victoria-namespaced - bionic-queens-multisite - bionic-rocky-multisite target_deploy_status: @@ -38,5 +41,5 @@ tests: - zaza.openstack.charm_tests.swift.tests.S3APITest tests_options: force_deploy: - - focal-ussuri - - focal-ussuri-namespaced + - groovy-victoria + - groovy-victoria-namespaced From 520582d18a8c499a3797859f272ed334ac1611f4 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 8 Jul 2020 15:43:33 +0100 Subject: [PATCH 1996/2699] Fix upgrade bug (luminous) where mgr is not restarted From luminous to mimic, the ceph-mgr isn't restarted, leaving it at luminous, rather than mimic. This change ensures that the ceph-mgr is restarted when the version is luminous or later. Related charms.ceph change: I2844736af2de27ce071db859311ac99b9b3057ad Closes-Bug: #1849874 Change-Id: Id059fbfd46bed4b9f96e85251fa83067696857d5 --- ceph-mon/lib/charms_ceph/utils.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index 7a0cb7d4..1a51e7c5 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -2183,6 +2183,9 @@ def upgrade_monitor(new_version): log("Current ceph version is {}".format(current_version)) log("Upgrading to: {}".format(new_version)) + # Needed to determine if whether to stop/start ceph-mgr + luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0 + try: add_source(config('source'), config('key')) apt_update(fatal=True) @@ -2194,6 +2197,10 @@ def upgrade_monitor(new_version): try: if systemd(): service_stop('ceph-mon') + log("restarting ceph-mgr.target maybe: {}" + .format(luminous_or_later)) + if luminous_or_later: + service_stop('ceph-mgr.target') else: service_stop('ceph-mon-all') apt_install(packages=determine_packages(), fatal=True) @@ -2217,7 +2224,13 @@ def upgrade_monitor(new_version): perms=0o755) if systemd(): - service_start('ceph-mon') + service_restart('ceph-mon') + log("starting ceph-mgr.target maybe: {}".format(luminous_or_later)) + if luminous_or_later: + # due to BUG: #1849874 we have to force a restart to get it to + # drop the previous version of ceph-manager and start the new + # one. + service_restart('ceph-mgr.target') else: service_start('ceph-mon-all') except subprocess.CalledProcessError as err: From d823eba01d5e2f5ed5f5d6c46d35fe06829b861e Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 2 Jun 2020 14:22:18 +0100 Subject: [PATCH 1997/2699] Updates for 20.08 cycle start for groovy and libs - Adds groovy to the series in the metadata - Classic charms: sync charm-helpers. - Classic ceph based charms: also sync charms.ceph - Reactive charms: trigger a rebuild Change-Id: I9912fe43e6db71ed81e47175f9f790ed550c9c31 --- ceph-fs/rebuild | 2 +- ceph-fs/src/metadata.yaml | 17 +++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index 315feac0..fddb2abd 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -c17ec0ce-937e-11ea-a49b-8b4b0c8e2b7f +917e1d08-a4d0-11ea-8fb8-332b1d5ca7cc diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index f3099745..c636e3ea 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -5,15 +5,16 @@ description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. tags: - - openstack - - storage - - file-servers - - misc +- openstack +- storage +- file-servers +- misc series: - - xenial - - bionic - - eoan - - focal +- xenial +- bionic +- eoan +- focal +- groovy subordinate: false requires: ceph-mds: From 5425f40eb6c9f0af55b43443e86eb9669f797590 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 9 Jul 2020 16:22:39 +0200 Subject: [PATCH 1998/2699] Add Victoria test bundles Change-Id: Ibcb9032c11deb4c1062492a3f690843fad48cc6c --- ceph-fs/src/tests/bundles/focal-victoria.yaml | 219 ++++++++++++++++++ .../src/tests/bundles/groovy-victoria.yaml | 219 ++++++++++++++++++ ceph-fs/src/tests/tests.yaml | 5 +- 3 files changed, 441 insertions(+), 2 deletions(-) create mode 100644 ceph-fs/src/tests/bundles/focal-victoria.yaml create mode 100644 ceph-fs/src/tests/bundles/groovy-victoria.yaml diff --git a/ceph-fs/src/tests/bundles/focal-victoria.yaml b/ceph-fs/src/tests/bundles/focal-victoria.yaml new file mode 100644 index 00000000..d9309b36 --- /dev/null +++ b/ceph-fs/src/tests/bundles/focal-victoria.yaml @@ -0,0 +1,219 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-victoria + +series: &series focal + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + '3': + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + neutron-api-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-fs: + charm: ceph-fs + num_units: 1 + options: + source: *openstack-origin + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + network-manager: Neutron + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: *openstack-origin + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + manage-neutron-plugin-legacy-mode: true + neutron-plugin: ovs + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: *openstack-origin + + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex + openstack-origin: *openstack-origin + +relations: + + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'neutron-api:shared-db' + - 'neutron-api-mysql-router:shared-db' + - - 'neutron-api-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' + + - - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' + + - - 'neutron-api:identity-service' + - 'keystone:identity-service' + + - - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' + + - - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/groovy-victoria.yaml b/ceph-fs/src/tests/bundles/groovy-victoria.yaml new file mode 100644 index 00000000..4cd78fd4 --- /dev/null +++ b/ceph-fs/src/tests/bundles/groovy-victoria.yaml @@ -0,0 +1,219 @@ +variables: + openstack-origin: &openstack-origin distro + +series: &series groovy + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + '3': + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + neutron-api-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-fs: + charm: ceph-fs + num_units: 1 + options: + source: *openstack-origin + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + network-manager: Neutron + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: *openstack-origin + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + manage-neutron-plugin-legacy-mode: true + neutron-plugin: ovs + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: *openstack-origin + + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex + openstack-origin: *openstack-origin + +relations: + + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'neutron-api:shared-db' + - 'neutron-api-mysql-router:shared-db' + - - 'neutron-api-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' + + - - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' + + - - 'neutron-api:identity-service' + - 'keystone:identity-service' + + - - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' + + - - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index f2b3fde7..e025ace1 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -14,7 +14,8 @@ gate_bundles: smoke_bundles: - bionic-stein dev_bundles: - - bionic-train + - groovy-victoria + - focal-victoria configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network @@ -26,4 +27,4 @@ tests: - zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest tests_options: force_deploy: - - focal-ussuri + - groovy-victoria From 02b5e7ece9db90f9fda7cd263cc1d8174b1355a7 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Wed, 8 Jul 2020 18:10:28 -0400 Subject: [PATCH 1999/2699] Improve README This improvement is part of a wave of polish in preparation for the launch of the Ceph product. Add Configuration, Actions, and Bugs sections. In config.yaml, modernise example values for 'source' and use consistent words with the ceph-osd, ceph-mon, and ceph-fs charms. Change-Id: Iaf45df89db4b471c6379e09881758dae4b723783 --- ceph-fs/src/README.md | 76 +++++++++++++++++++++++++++++------------ ceph-fs/src/config.yaml | 8 ++--- 2 files changed, 59 insertions(+), 25 deletions(-) diff --git a/ceph-fs/src/README.md b/ceph-fs/src/README.md index 2061e62d..a097a935 100644 --- a/ceph-fs/src/README.md +++ b/ceph-fs/src/README.md @@ -1,35 +1,69 @@ -# CephFS Charm - -This charm exists to provide integration of CephFS. - # Overview -Ceph is a distributed storage and network file system designed to provide +[Ceph][ceph-upstream] is a unified, distributed storage system designed for excellent performance, reliability, and scalability. -This charm deploys a Ceph MDS cluster. +The ceph-fs charm deploys the metadata server daemon (MDS) for the Ceph +distributed file system (CephFS). It is used in conjunction with the +[ceph-mon][ceph-mon-charm] and the [ceph-osd][ceph-osd-charm] charms. + +Highly available CephFS is achieved by deploying multiple MDS servers (i.e. +multiple ceph-fs units). + +# Usage + +## Configuration + +This section covers common and/or important configuration options. See file +`config.yaml` for the full list of options, along with their descriptions and +default values. A YAML file (e.g. `ceph-osd.yaml`) is often used to store +configuration options. See the [Juju documentation][juju-docs-config-apps] for +details on configuring applications. + +#### `source` -Usage -===== +The `source` option states the software sources. A common value is an OpenStack +UCA release (e.g. 'cloud:xenial-queens' or 'cloud:bionic-ussuri'). See [Ceph +and the UCA][cloud-archive-ceph]. The underlying host's existing apt sources +will be used if this option is not specified (this behaviour can be explicitly +chosen by using the value of 'distro'). -Boot things up by using: +## Deployment - juju deploy -n 3 ceph-mon - juju deploy -n 3 ceph-osd +We are assuming a pre-existing Ceph cluster. -You can then deploy this charm by simply doing: +To deploy a single MDS node: juju deploy ceph-fs - juju add-relation ceph-fs ceph-mon -Once the ceph-mon and osd charms have bootstrapped the cluster, the ceph-mon -charm will notify the ceph-fs charm. +Then add a relation to the ceph-mon application: + + juju add-relation ceph-fs:ceph-mds ceph-mon:mds + +## Actions + +This section lists Juju [actions][juju-docs-actions] supported by the charm. +Actions allow specific operations to be performed on a per-unit basis. To +display action descriptions run `juju actions ceph-fs`. If the charm is not +deployed then see file `actions.yaml`. + +* `get-quota` +* `remove-quota` +* `set-quota` + +# Bugs + +Please report bugs on [Launchpad][lp-bugs-charm-ceph-fs]. -Contact Information -=================== +For general charm questions refer to the OpenStack [Charm Guide][cg]. -## Ceph + -- [Ceph website](http://ceph.com) -- [Ceph mailing lists](http://ceph.com/resources/mailing-list-irc/) -- [Ceph bug tracker](http://tracker.ceph.com/projects/ceph) +[cg]: https://docs.openstack.org/charm-guide +[ceph-upstream]: https://ceph.io +[ceph-mon-charm]: https://jaas.ai/ceph-mon +[ceph-osd-charm]: https://jaas.ai/ceph-osd +[juju-docs-actions]: https://jaas.ai/docs/actions +[juju-docs-config-apps]: https://juju.is/docs/configuring-applications +[lp-bugs-charm-ceph-fs]: https://bugs.launchpad.net/charm-ceph-fs/+filebug +[cloud-archive-ceph]: https://wiki.ubuntu.com/OpenStack/CloudArchive#Ceph_and_the_UCA diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index 85b68c37..0994d0af 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -8,14 +8,14 @@ options: default: distro description: | Optional configuration to support use of additional sources such as: + . - ppa:myteam/ppa - - cloud:trusty-proposed/kilo + - cloud:bionic-ussuri + - cloud:xenial-proposed/queens - http://my.archive.com/ubuntu main + . The last option should be used in conjunction with the key configuration option. - Note that a minimum ceph version of 0.48.2 is required for use with this - charm which is NOT provided by the packages in the main Ubuntu archive - for precise but is provided in the Ubuntu cloud archive. key: type: string default: From f9b7e61871f49b82a06b39962ede89ad2616cdac Mon Sep 17 00:00:00 2001 From: Brett Milford Date: Mon, 6 Jul 2020 15:23:37 +1000 Subject: [PATCH 2000/2699] Warning description for autotune config. Change-Id: Ieaccc18a39d018d120ae8bd6ee62b97f30d90e41 Partial-Bug: #1798794 --- ceph-osd/config.yaml | 7 +++++-- ceph-osd/hooks/ceph_hooks.py | 4 ++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 66080a9b..1b4240a6 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -317,8 +317,11 @@ options: Enabling this option will attempt to tune your network card sysctls and hard drive settings. This changes hard drive read ahead settings and max_sectors_kb. For the network card this will detect the link speed - and make appropriate sysctl changes. Enabling this option should - generally be safe. + and make appropriate sysctl changes. + WARNING: This option is DEPRECATED and will be removed in the next release. + Exercise caution when enabling this feature; examine and + confirm sysctl values are appropriate for your environment. See + http://pad.lv/1798794 for a full discussion. aa-profile-mode: type: string default: 'disable' diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 51c41c21..a4816d60 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -316,6 +316,8 @@ def install(): apt_update(fatal=True) apt_install(packages=ceph.determine_packages(), fatal=True) if config('autotune'): + log('The autotune config is deprecated and planned ' + 'for removal in the next release.', level=WARNING) tune_network_adapters() install_udev_rules() @@ -547,6 +549,8 @@ def prepare_disks_and_activate(): config('osd-encrypt-keymanager')) # Make it fast! if config('autotune'): + log('The autotune config is deprecated and planned ' + 'for removal in the next release.', level=WARNING) ceph.tune_dev(dev) ceph.start_osds(get_devices()) From 44f33aeaaffbc5467e20fc126d11c3eb7d9bb2e2 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 11 Jul 2020 08:43:30 +0000 Subject: [PATCH 2001/2699] Provides unit_name to secrets provider Pass the unit name to the secrets provider via relation data. The unit name is not available in CMRs so needs to be passes this way. Change-Id: I9fa162bbf45144d0827c669fc9b37d7b336366cd --- ceph-osd/hooks/ceph_hooks.py | 2 ++ ceph-osd/unit_tests/test_ceph_hooks.py | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 51c41c21..78280dba 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -40,6 +40,7 @@ relation_set, relations_of_type, Hooks, + local_unit, UnregisteredHookError, service_name, status_get, @@ -769,6 +770,7 @@ def secrets_storage_joined(relation_id=None): secret_backend='charm-vaultlocker', isolated=True, access_address=get_relation_ip('secrets-storage'), + unit_name=local_unit(), hostname=socket.gethostname()) diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 0d11c5f1..07b3ab4d 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -634,6 +634,7 @@ def test_get_bdev_enable_discard(self, mock_config, self.assertEqual(ceph_hooks.get_bdev_enable_discard(), expected) +@patch.object(ceph_hooks, 'local_unit') @patch.object(ceph_hooks, 'relation_get') @patch.object(ceph_hooks, 'relation_set') @patch.object(ceph_hooks, 'prepare_disks_and_activate') @@ -646,7 +647,9 @@ def test_secrets_storage_relation_joined(self, _get_relation_ip, _prepare_disks_and_activate, _relation_set, - _relation_get): + _relation_get, + _local_unit): + _local_unit.return_value = 'ceph-osd/0' _get_relation_ip.return_value = '10.23.1.2' _socket.gethostname.return_value = 'testhost' ceph_hooks.secrets_storage_joined() @@ -656,6 +659,7 @@ def test_secrets_storage_relation_joined(self, secret_backend='charm-vaultlocker', isolated=True, access_address='10.23.1.2', + unit_name='ceph-osd/0', hostname='testhost' ) _socket.gethostname.assert_called_once_with() @@ -665,7 +669,9 @@ def test_secrets_storage_relation_changed(self, _get_relation_ip, _prepare_disks_and_activate, _relation_set, - _relation_get): + _relation_get, + _local_unit): + _local_unit.return_value = 'ceph-osd/0' _relation_get.return_value = None ceph_hooks.secrets_storage_changed() _prepare_disks_and_activate.assert_called_once_with() From e8a2c92f30709a530d05161ce90b8f827ba1533f Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 13 Jul 2020 08:26:03 +0100 Subject: [PATCH 2002/2699] Switch to using openstack-python3-charm-jobs Change-Id: Ibc09bc7951babe0bc18d1c377242b0dbdf32ab89 Depends-On: Ib9ed240aef6680be7c972088ca5337b5740b9c36 --- ceph-osd/.zuul.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-osd/.zuul.yaml b/ceph-osd/.zuul.yaml index b3037e94..fd20909e 100644 --- a/ceph-osd/.zuul.yaml +++ b/ceph-osd/.zuul.yaml @@ -1,5 +1,4 @@ - project: templates: - - python35-charm-jobs - - openstack-python3-ussuri-jobs + - openstack-python3-charm-jobs - openstack-cover-jobs From 7a339dd553d1a01ad11645649ac86efe98d28fc9 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Wed, 8 Jul 2020 14:05:19 -0400 Subject: [PATCH 2003/2699] Improve README This improvement is part of a wave of polish in preparation for the launch of the Ceph product. In config.yaml, modernise example values for 'source' and use consistent words with the ceph-osd, ceph-mon, and ceph-fs charms. Also improve description for 'expected-osd-count'. Change-Id: Ieaf3b9c2700bcf72ebb34b159cf5c9b761b1514d --- ceph-mon/README.md | 73 +++++++++++++++++++++++++++++++++++++++----- ceph-mon/config.yaml | 16 +++++----- 2 files changed, 75 insertions(+), 14 deletions(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index cad0ca7b..722c8bbf 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -10,6 +10,63 @@ cluster. # Usage +## Configuration + +This section covers common and/or important configuration options. See file +`config.yaml` for the full list of options, along with their descriptions and +default values. See the [Juju documentation][juju-docs-config-apps] for details +on configuring applications. + +#### `customize-failure-domain` + +The `customize-failure-domain` option determines how a Ceph CRUSH map is +configured. + +A value of 'false' (the default) will lead to a map that will replicate data +across hosts (implemented as [Ceph bucket type][upstream-ceph-buckets] 'host'). +With a value of 'true' all MAAS-defined zones will be used to generate a map +that will replicate data across Ceph availability zones (implemented as bucket +type 'rack'). + +This option is also supported by the ceph-osd charm. Its value must be the same +for both charms. + +#### `monitor-count` + +The `monitor-count` option gives the number of ceph-mon units in the monitor +sub-cluster (where one ceph-mon unit represents one MON). The default value is +'3' and is generally a good choice, but it is good practice to set this +explicitly to avoid a possible race condition during the formation of the +sub-cluster. To establish quorum and enable partition tolerance an odd number +of ceph-mon units is required. + +> **Important**: A monitor count of less than three is not recommended for + production environments. Test environments can use a single ceph-mon unit by + setting this option to '1'. + +#### `expected-osd-count` + +The `expected-osd-count` option states the number of OSDs expected to be +deployed in the cluster. This value can influence the number of placement +groups (PGs) to use per pool. The PG calculation is based either on the actual +number of OSDs or this option's value, whichever is greater. The default value +is '0', which tells the charm to only consider the actual number of OSDs. If +the actual number of OSDs is less than three then this option must explicitly +state that number. Only until a sufficient (or prescribed) number of OSDs has +been attained will the charm be able to create Ceph pools. + +> **Note**: The inability to create a pool due to an insufficient number of + OSDs will cause any consuming application (characterised by a relation + involving the `ceph-mon:client` endpoint) to remain in the 'waiting' state. + +#### `source` + +The `source` option states the software sources. A common value is an OpenStack +UCA release (e.g. 'cloud:xenial-queens' or 'cloud:bionic-ussuri'). See [Ceph +and the UCA][cloud-archive-ceph]. The underlying host's existing apt sources +will be used if this option is not specified (this behaviour can be explicitly +chosen by using the value of 'distro'). + ## Deployment A cloud with three MON nodes is a typical design whereas three OSD nodes are @@ -17,12 +74,11 @@ considered the minimum. For example, to deploy a Ceph cluster consisting of three OSDs and three MONs: juju deploy -n 3 --config ceph-osd.yaml ceph-osd - juju deploy --to lxd:0 ceph-mon - juju add-unit --to lxd:1 ceph-mon - juju add-unit --to lxd:2 ceph-mon - juju add-relation ceph-osd ceph-mon + juju deploy -n 3 --to lxd:0,lxd:1,lxd:2 ceph-mon + juju add-relation ceph-osd:mon ceph-mon:osd -Here, a containerised MON is running alongside each OSD. +Here, a containerised MON is running alongside each OSD. We've assumed that the +machines spawned in the first command are assigned IDs of 0, 1, and 2. By default, the monitor cluster will not be complete until three ceph-mon units have been deployed. This is to ensure that a quorum is achieved prior to the @@ -46,8 +102,8 @@ connected to. The ceph-mon charm exposes the following Ceph traffic types (bindings): -- 'public' (front-side) -- 'cluster' (back-side) +* 'public' (front-side) +* 'cluster' (back-side) For example, providing that spaces 'data-space' and 'cluster-space' exist, the deploy command above could look like this: @@ -131,8 +187,11 @@ For general charm questions refer to the OpenStack [Charm Guide][cg]. [ceph-osd-charm]: https://jaas.ai/ceph-osd [juju-docs-actions]: https://jaas.ai/docs/actions [juju-docs-spaces]: https://jaas.ai/docs/spaces +[juju-docs-config-apps]: https://juju.is/docs/configuring-applications [ceph-docs-network-ref]: http://docs.ceph.com/docs/master/rados/configuration/network-config-ref [ceph-docs-monitors]: https://docs.ceph.com/docs/master/dev/mon-bootstrap [lp-bugs-charm-ceph-mon]: https://bugs.launchpad.net/charm-ceph-mon/+filebug [cdg-install-openstack]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/install-openstack.html [prometheus-charm]: https://jaas.ai/prometheus2 +[cloud-archive-ceph]: https://wiki.ubuntu.com/OpenStack/CloudArchive#Ceph_and_the_UCA +[upstream-ceph-buckets]: https://docs.ceph.com/docs/master/rados/operations/crush-map/#types-and-buckets diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 55e41380..690f7f77 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -15,7 +15,8 @@ options: Optional configuration to support use of additional sources such as: . - ppa:myteam/ppa - - cloud:xenial-proposed/ocata + - cloud:bionic-ussuri + - cloud:xenial-proposed/queens - http://my.archive.com/ubuntu main . The last option should be used in conjunction with the key configuration @@ -106,12 +107,13 @@ options: type: int default: 0 description: | - Number of OSDs expected to be deployed in the cluster. This value is used - for calculating the number of placement groups on pool creation. The - number of placement groups for new pools are based on the actual number - of OSDs in the cluster or the expected-osd-count, whichever is greater - A value of 0 will cause the charm to only consider the actual number of - OSDs in the cluster. + The number of OSDs expected to be deployed in the cluster. This value can + influence the number of placement groups (PGs) to use for pools. The PG + calculation is based either on the actual number of OSDs or this option's + value, whichever is greater. The default value is '0', which tells the + charm to only consider the actual number of OSDs. If the actual number of + OSDs is less than three then this option must explicitly state that + number. pgs-per-osd: type: int default: 100 From 65615ab72b1e5355b6021a6224b71f8fa7ff738e Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 13 Jul 2020 18:59:03 +0000 Subject: [PATCH 2004/2699] Sync charm-helpers for Victoria/Groovy updates This sync picks up the release and version details for Victoria/Groovy. Change-Id: I8b4c3046101ad41004b2fa5108b5aafed6c75070 --- .../contrib/openstack/amulet/deployment.py | 3 ++ .../contrib/openstack/amulet/utils.py | 1 + .../charmhelpers/contrib/openstack/context.py | 30 +++++++++++++++++++ .../charmhelpers/contrib/openstack/utils.py | 14 +++++++++ ceph-mon/hooks/charmhelpers/core/hookenv.py | 6 ++-- ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 8 +++++ ceph-mon/lib/charms_ceph/broker.py | 14 ++++++++- 7 files changed, 73 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index dd3aebe9..94ca079c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -300,6 +300,8 @@ def _get_openstack_release(self): ('disco', None): self.disco_stein, ('eoan', None): self.eoan_train, ('focal', None): self.focal_ussuri, + ('focal', 'cloud:focal-victoria'): self.focal_victoria, + ('groovy', None): self.groovy_victoria, } return releases[(self.series, self.openstack)] @@ -319,6 +321,7 @@ def _get_openstack_release_string(self): ('disco', 'stein'), ('eoan', 'train'), ('focal', 'ussuri'), + ('groovy', 'victoria'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 14864198..63aea1e3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -63,6 +63,7 @@ 'bionic_stein', 'disco_stein', 'bionic_train', 'eoan_train', 'bionic_ussuri', 'focal_ussuri', + 'focal_victoria', 'groovy_victoria', ] diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 335e2d5c..42abccf7 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -721,6 +721,12 @@ def __call__(self): rabbitmq_hosts = [] for unit in related_units(rid): host = relation_get('private-address', rid=rid, unit=unit) + if not relation_get('password', rid=rid, unit=unit): + log( + ("Skipping {} password not sent which indicates " + "unit is not ready.".format(host)), + level=DEBUG) + continue host = format_ipv6_addr(host) or host rabbitmq_hosts.append(host) @@ -2714,6 +2720,19 @@ def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None, self._ifname_mac_map[ifname] = [mac] self._mac_ifname_map[mac] = ifname + # check if interface is part of a linux bond + _bond_name = get_bond_master(ifname) + if _bond_name and _bond_name != ifname: + log('Add linux bond "{}" to map for physical interface "{}" ' + 'with mac "{}".'.format(_bond_name, ifname, mac), + level=DEBUG) + # for bonds we want to be able to get a list of the mac + # addresses for the physical interfaces the bond is made up of. + if self._ifname_mac_map.get(_bond_name): + self._ifname_mac_map[_bond_name].append(mac) + else: + self._ifname_mac_map[_bond_name] = [mac] + # In light of the pre-deprecation notice in the docstring of this # class we will expose the ability to configure OVS bonds as a # DPDK-only feature, but generally use the data structures internally. @@ -2779,6 +2798,17 @@ def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None, self.add_interface( bridge, portname, ifname, iftype, pci_address, global_mtu) + if not macs: + # We have not mapped the interface and it is probably some sort + # of virtual interface. Our user have put it in the config with + # a purpose so let's carry out their wish. LP: #1884743 + log('Add unmapped interface from config: name "{}" bridge "{}"' + .format(ifname, bridge), + level=DEBUG) + self.add_interface( + bridge, ifname, ifname, self.interface_type.system, None, + global_mtu) + def __getitem__(self, key): """Provide a Dict-like interface, get value of item. diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index fbf01561..f21625d3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -143,6 +143,7 @@ 'stein', 'train', 'ussuri', + 'victoria', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -164,6 +165,7 @@ ('disco', 'stein'), ('eoan', 'train'), ('focal', 'ussuri'), + ('groovy', 'victoria'), ]) @@ -186,6 +188,7 @@ ('2019.1', 'stein'), ('2019.2', 'train'), ('2020.1', 'ussuri'), + ('2020.2', 'victoria'), ]) # The ugly duckling - must list releases oldest to newest @@ -226,6 +229,8 @@ ['2.22.0', '2.23.0']), ('ussuri', ['2.24.0', '2.25.0']), + ('victoria', + ['2.25.0']), ]) # >= Liberty version->codename mapping @@ -241,6 +246,7 @@ ('19', 'stein'), ('20', 'train'), ('21', 'ussuri'), + ('22', 'victoria'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -253,6 +259,7 @@ ('14', 'stein'), ('15', 'train'), ('16', 'ussuri'), + ('17', 'victoria'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -265,6 +272,7 @@ ('14', 'stein'), ('15', 'train'), ('16', 'ussuri'), + ('17', 'victoria'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -277,6 +285,7 @@ ('15', 'stein'), ('16', 'train'), ('17', 'ussuri'), + ('18', 'victoria'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -289,6 +298,7 @@ ('15', 'stein'), ('16', 'train'), ('18', 'ussuri'), + ('19', 'victoria'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -301,6 +311,7 @@ ('12', 'stein'), ('13', 'train'), ('14', 'ussuri'), + ('15', 'victoria'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -313,6 +324,7 @@ ('12', 'stein'), ('13', 'train'), ('14', 'ussuri'), + ('15', 'victoria'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -325,6 +337,7 @@ ('18', 'stein'), ('19', 'train'), ('20', 'ussuri'), + ('21', 'victoria'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -337,6 +350,7 @@ ('15', 'stein'), ('16', 'train'), ('18', 'ussuri'), + ('19', 'victoria'), ]), } diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index d7c37c17..db7ce728 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -372,8 +372,10 @@ def load_previous(self, path=None): try: self._prev_dict = json.load(f) except ValueError as e: - log('Unable to parse previous config data - {}'.format(str(e)), - level=ERROR) + log('Found but was unable to parse previous config data, ' + 'ignoring which will report all values as changed - {}' + .format(str(e)), level=ERROR) + return for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 3ddaf0dd..33152840 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -190,6 +190,14 @@ 'ussuri/proposed': 'bionic-proposed/ussuri', 'bionic-ussuri/proposed': 'bionic-proposed/ussuri', 'bionic-proposed/ussuri': 'bionic-proposed/ussuri', + # Victoria + 'victoria': 'focal-updates/victoria', + 'focal-victoria': 'focal-updates/victoria', + 'focal-victoria/updates': 'focal-updates/victoria', + 'focal-updates/victoria': 'focal-updates/victoria', + 'victoria/proposed': 'focal-proposed/victoria', + 'focal-victoria/proposed': 'focal-proposed/victoria', + 'focal-proposed/victoria': 'focal-proposed/victoria', } diff --git a/ceph-mon/lib/charms_ceph/broker.py b/ceph-mon/lib/charms_ceph/broker.py index 726f9498..15552cd8 100644 --- a/ceph-mon/lib/charms_ceph/broker.py +++ b/ceph-mon/lib/charms_ceph/broker.py @@ -540,11 +540,13 @@ def handle_remove_cache_tier(request, service): pool.remove_cache_tier(cache_pool=cache_pool) -def handle_set_pool_value(request, service): +def handle_set_pool_value(request, service, coerce=False): """Sets an arbitrary pool value. :param request: dict of request operations and params :param service: The ceph client to run the command under. + :param coerce: Try to parse/coerce the value into the correct type. + Used by the action code that only gets Str from Juju :returns: dict. exit-code and reason if not 0 """ # Set arbitrary pool values @@ -558,6 +560,16 @@ def handle_set_pool_value(request, service): # Get the validation method validator_params = POOL_KEYS[params['key']] + # BUG: #1838650 - the function needs to try to coerce the value param to + # the type required for the validator to pass. Note, if this blows, then + # the param isn't parsable to the correct type. + if coerce: + try: + params['value'] = validator_params[0](params['value']) + except ValueError: + raise RuntimeError("Value {} isn't of type {}" + .format(params['value'], validator_params[0])) + # end of BUG: #1838650 if len(validator_params) == 1: # Validate that what the user passed is actually legal per Ceph's rules validator(params['value'], validator_params[0]) From c2a59c4f69e06af220aa37e904c730ce5e08b094 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 13 Jul 2020 18:59:12 +0000 Subject: [PATCH 2005/2699] Sync charm-helpers for Victoria/Groovy updates This sync picks up the release and version details for Victoria/Groovy. Change-Id: I459b37cb22ac00d8d4fd9983d9a995b475a0207d --- .../contrib/openstack/amulet/deployment.py | 3 ++ .../contrib/openstack/amulet/utils.py | 1 + .../charmhelpers/contrib/openstack/context.py | 30 +++++++++++++++++++ .../charmhelpers/contrib/openstack/utils.py | 14 +++++++++ ceph-osd/hooks/charmhelpers/core/hookenv.py | 6 ++-- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 8 +++++ ceph-osd/lib/charms_ceph/broker.py | 14 ++++++++- ceph-osd/lib/charms_ceph/utils.py | 15 +++++++++- 8 files changed, 87 insertions(+), 4 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index dd3aebe9..94ca079c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -300,6 +300,8 @@ def _get_openstack_release(self): ('disco', None): self.disco_stein, ('eoan', None): self.eoan_train, ('focal', None): self.focal_ussuri, + ('focal', 'cloud:focal-victoria'): self.focal_victoria, + ('groovy', None): self.groovy_victoria, } return releases[(self.series, self.openstack)] @@ -319,6 +321,7 @@ def _get_openstack_release_string(self): ('disco', 'stein'), ('eoan', 'train'), ('focal', 'ussuri'), + ('groovy', 'victoria'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 14864198..63aea1e3 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -63,6 +63,7 @@ 'bionic_stein', 'disco_stein', 'bionic_train', 'eoan_train', 'bionic_ussuri', 'focal_ussuri', + 'focal_victoria', 'groovy_victoria', ] diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 335e2d5c..42abccf7 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -721,6 +721,12 @@ def __call__(self): rabbitmq_hosts = [] for unit in related_units(rid): host = relation_get('private-address', rid=rid, unit=unit) + if not relation_get('password', rid=rid, unit=unit): + log( + ("Skipping {} password not sent which indicates " + "unit is not ready.".format(host)), + level=DEBUG) + continue host = format_ipv6_addr(host) or host rabbitmq_hosts.append(host) @@ -2714,6 +2720,19 @@ def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None, self._ifname_mac_map[ifname] = [mac] self._mac_ifname_map[mac] = ifname + # check if interface is part of a linux bond + _bond_name = get_bond_master(ifname) + if _bond_name and _bond_name != ifname: + log('Add linux bond "{}" to map for physical interface "{}" ' + 'with mac "{}".'.format(_bond_name, ifname, mac), + level=DEBUG) + # for bonds we want to be able to get a list of the mac + # addresses for the physical interfaces the bond is made up of. + if self._ifname_mac_map.get(_bond_name): + self._ifname_mac_map[_bond_name].append(mac) + else: + self._ifname_mac_map[_bond_name] = [mac] + # In light of the pre-deprecation notice in the docstring of this # class we will expose the ability to configure OVS bonds as a # DPDK-only feature, but generally use the data structures internally. @@ -2779,6 +2798,17 @@ def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None, self.add_interface( bridge, portname, ifname, iftype, pci_address, global_mtu) + if not macs: + # We have not mapped the interface and it is probably some sort + # of virtual interface. Our user have put it in the config with + # a purpose so let's carry out their wish. LP: #1884743 + log('Add unmapped interface from config: name "{}" bridge "{}"' + .format(ifname, bridge), + level=DEBUG) + self.add_interface( + bridge, ifname, ifname, self.interface_type.system, None, + global_mtu) + def __getitem__(self, key): """Provide a Dict-like interface, get value of item. diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index fbf01561..f21625d3 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -143,6 +143,7 @@ 'stein', 'train', 'ussuri', + 'victoria', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -164,6 +165,7 @@ ('disco', 'stein'), ('eoan', 'train'), ('focal', 'ussuri'), + ('groovy', 'victoria'), ]) @@ -186,6 +188,7 @@ ('2019.1', 'stein'), ('2019.2', 'train'), ('2020.1', 'ussuri'), + ('2020.2', 'victoria'), ]) # The ugly duckling - must list releases oldest to newest @@ -226,6 +229,8 @@ ['2.22.0', '2.23.0']), ('ussuri', ['2.24.0', '2.25.0']), + ('victoria', + ['2.25.0']), ]) # >= Liberty version->codename mapping @@ -241,6 +246,7 @@ ('19', 'stein'), ('20', 'train'), ('21', 'ussuri'), + ('22', 'victoria'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -253,6 +259,7 @@ ('14', 'stein'), ('15', 'train'), ('16', 'ussuri'), + ('17', 'victoria'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -265,6 +272,7 @@ ('14', 'stein'), ('15', 'train'), ('16', 'ussuri'), + ('17', 'victoria'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -277,6 +285,7 @@ ('15', 'stein'), ('16', 'train'), ('17', 'ussuri'), + ('18', 'victoria'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -289,6 +298,7 @@ ('15', 'stein'), ('16', 'train'), ('18', 'ussuri'), + ('19', 'victoria'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -301,6 +311,7 @@ ('12', 'stein'), ('13', 'train'), ('14', 'ussuri'), + ('15', 'victoria'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -313,6 +324,7 @@ ('12', 'stein'), ('13', 'train'), ('14', 'ussuri'), + ('15', 'victoria'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -325,6 +337,7 @@ ('18', 'stein'), ('19', 'train'), ('20', 'ussuri'), + ('21', 'victoria'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -337,6 +350,7 @@ ('15', 'stein'), ('16', 'train'), ('18', 'ussuri'), + ('19', 'victoria'), ]), } diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index d7c37c17..db7ce728 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -372,8 +372,10 @@ def load_previous(self, path=None): try: self._prev_dict = json.load(f) except ValueError as e: - log('Unable to parse previous config data - {}'.format(str(e)), - level=ERROR) + log('Found but was unable to parse previous config data, ' + 'ignoring which will report all values as changed - {}' + .format(str(e)), level=ERROR) + return for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 3ddaf0dd..33152840 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -190,6 +190,14 @@ 'ussuri/proposed': 'bionic-proposed/ussuri', 'bionic-ussuri/proposed': 'bionic-proposed/ussuri', 'bionic-proposed/ussuri': 'bionic-proposed/ussuri', + # Victoria + 'victoria': 'focal-updates/victoria', + 'focal-victoria': 'focal-updates/victoria', + 'focal-victoria/updates': 'focal-updates/victoria', + 'focal-updates/victoria': 'focal-updates/victoria', + 'victoria/proposed': 'focal-proposed/victoria', + 'focal-victoria/proposed': 'focal-proposed/victoria', + 'focal-proposed/victoria': 'focal-proposed/victoria', } diff --git a/ceph-osd/lib/charms_ceph/broker.py b/ceph-osd/lib/charms_ceph/broker.py index 726f9498..15552cd8 100644 --- a/ceph-osd/lib/charms_ceph/broker.py +++ b/ceph-osd/lib/charms_ceph/broker.py @@ -540,11 +540,13 @@ def handle_remove_cache_tier(request, service): pool.remove_cache_tier(cache_pool=cache_pool) -def handle_set_pool_value(request, service): +def handle_set_pool_value(request, service, coerce=False): """Sets an arbitrary pool value. :param request: dict of request operations and params :param service: The ceph client to run the command under. + :param coerce: Try to parse/coerce the value into the correct type. + Used by the action code that only gets Str from Juju :returns: dict. exit-code and reason if not 0 """ # Set arbitrary pool values @@ -558,6 +560,16 @@ def handle_set_pool_value(request, service): # Get the validation method validator_params = POOL_KEYS[params['key']] + # BUG: #1838650 - the function needs to try to coerce the value param to + # the type required for the validator to pass. Note, if this blows, then + # the param isn't parsable to the correct type. + if coerce: + try: + params['value'] = validator_params[0](params['value']) + except ValueError: + raise RuntimeError("Value {} isn't of type {}" + .format(params['value'], validator_params[0])) + # end of BUG: #1838650 if len(validator_params) == 1: # Validate that what the user passed is actually legal per Ceph's rules validator(params['value'], validator_params[0]) diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 7a0cb7d4..1a51e7c5 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -2183,6 +2183,9 @@ def upgrade_monitor(new_version): log("Current ceph version is {}".format(current_version)) log("Upgrading to: {}".format(new_version)) + # Needed to determine if whether to stop/start ceph-mgr + luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0 + try: add_source(config('source'), config('key')) apt_update(fatal=True) @@ -2194,6 +2197,10 @@ def upgrade_monitor(new_version): try: if systemd(): service_stop('ceph-mon') + log("restarting ceph-mgr.target maybe: {}" + .format(luminous_or_later)) + if luminous_or_later: + service_stop('ceph-mgr.target') else: service_stop('ceph-mon-all') apt_install(packages=determine_packages(), fatal=True) @@ -2217,7 +2224,13 @@ def upgrade_monitor(new_version): perms=0o755) if systemd(): - service_start('ceph-mon') + service_restart('ceph-mon') + log("starting ceph-mgr.target maybe: {}".format(luminous_or_later)) + if luminous_or_later: + # due to BUG: #1849874 we have to force a restart to get it to + # drop the previous version of ceph-manager and start the new + # one. + service_restart('ceph-mgr.target') else: service_start('ceph-mon-all') except subprocess.CalledProcessError as err: From eaa123a598286f0c44bd8fd77c61aace8d07ae3a Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 13 Jul 2020 18:59:24 +0000 Subject: [PATCH 2006/2699] Sync charm-helpers for Victoria/Groovy updates This sync picks up the release and version details for Victoria/Groovy. Change-Id: I0917cc6da9098131eaafd511c0e336141b763cf2 --- .../contrib/openstack/amulet/deployment.py | 3 ++ .../contrib/openstack/amulet/utils.py | 1 + .../charmhelpers/contrib/openstack/context.py | 30 +++++++++++++++++++ .../charmhelpers/contrib/openstack/utils.py | 14 +++++++++ .../hooks/charmhelpers/core/hookenv.py | 6 ++-- .../hooks/charmhelpers/fetch/ubuntu.py | 8 +++++ ceph-radosgw/lib/charms_ceph/broker.py | 14 ++++++++- ceph-radosgw/lib/charms_ceph/utils.py | 15 +++++++++- 8 files changed, 87 insertions(+), 4 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index dd3aebe9..94ca079c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -300,6 +300,8 @@ def _get_openstack_release(self): ('disco', None): self.disco_stein, ('eoan', None): self.eoan_train, ('focal', None): self.focal_ussuri, + ('focal', 'cloud:focal-victoria'): self.focal_victoria, + ('groovy', None): self.groovy_victoria, } return releases[(self.series, self.openstack)] @@ -319,6 +321,7 @@ def _get_openstack_release_string(self): ('disco', 'stein'), ('eoan', 'train'), ('focal', 'ussuri'), + ('groovy', 'victoria'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 14864198..63aea1e3 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -63,6 +63,7 @@ 'bionic_stein', 'disco_stein', 'bionic_train', 'eoan_train', 'bionic_ussuri', 'focal_ussuri', + 'focal_victoria', 'groovy_victoria', ] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 335e2d5c..42abccf7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -721,6 +721,12 @@ def __call__(self): rabbitmq_hosts = [] for unit in related_units(rid): host = relation_get('private-address', rid=rid, unit=unit) + if not relation_get('password', rid=rid, unit=unit): + log( + ("Skipping {} password not sent which indicates " + "unit is not ready.".format(host)), + level=DEBUG) + continue host = format_ipv6_addr(host) or host rabbitmq_hosts.append(host) @@ -2714,6 +2720,19 @@ def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None, self._ifname_mac_map[ifname] = [mac] self._mac_ifname_map[mac] = ifname + # check if interface is part of a linux bond + _bond_name = get_bond_master(ifname) + if _bond_name and _bond_name != ifname: + log('Add linux bond "{}" to map for physical interface "{}" ' + 'with mac "{}".'.format(_bond_name, ifname, mac), + level=DEBUG) + # for bonds we want to be able to get a list of the mac + # addresses for the physical interfaces the bond is made up of. + if self._ifname_mac_map.get(_bond_name): + self._ifname_mac_map[_bond_name].append(mac) + else: + self._ifname_mac_map[_bond_name] = [mac] + # In light of the pre-deprecation notice in the docstring of this # class we will expose the ability to configure OVS bonds as a # DPDK-only feature, but generally use the data structures internally. @@ -2779,6 +2798,17 @@ def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None, self.add_interface( bridge, portname, ifname, iftype, pci_address, global_mtu) + if not macs: + # We have not mapped the interface and it is probably some sort + # of virtual interface. Our user have put it in the config with + # a purpose so let's carry out their wish. LP: #1884743 + log('Add unmapped interface from config: name "{}" bridge "{}"' + .format(ifname, bridge), + level=DEBUG) + self.add_interface( + bridge, ifname, ifname, self.interface_type.system, None, + global_mtu) + def __getitem__(self, key): """Provide a Dict-like interface, get value of item. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index fbf01561..f21625d3 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -143,6 +143,7 @@ 'stein', 'train', 'ussuri', + 'victoria', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -164,6 +165,7 @@ ('disco', 'stein'), ('eoan', 'train'), ('focal', 'ussuri'), + ('groovy', 'victoria'), ]) @@ -186,6 +188,7 @@ ('2019.1', 'stein'), ('2019.2', 'train'), ('2020.1', 'ussuri'), + ('2020.2', 'victoria'), ]) # The ugly duckling - must list releases oldest to newest @@ -226,6 +229,8 @@ ['2.22.0', '2.23.0']), ('ussuri', ['2.24.0', '2.25.0']), + ('victoria', + ['2.25.0']), ]) # >= Liberty version->codename mapping @@ -241,6 +246,7 @@ ('19', 'stein'), ('20', 'train'), ('21', 'ussuri'), + ('22', 'victoria'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -253,6 +259,7 @@ ('14', 'stein'), ('15', 'train'), ('16', 'ussuri'), + ('17', 'victoria'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -265,6 +272,7 @@ ('14', 'stein'), ('15', 'train'), ('16', 'ussuri'), + ('17', 'victoria'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -277,6 +285,7 @@ ('15', 'stein'), ('16', 'train'), ('17', 'ussuri'), + ('18', 'victoria'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -289,6 +298,7 @@ ('15', 'stein'), ('16', 'train'), ('18', 'ussuri'), + ('19', 'victoria'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -301,6 +311,7 @@ ('12', 'stein'), ('13', 'train'), ('14', 'ussuri'), + ('15', 'victoria'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -313,6 +324,7 @@ ('12', 'stein'), ('13', 'train'), ('14', 'ussuri'), + ('15', 'victoria'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -325,6 +337,7 @@ ('18', 'stein'), ('19', 'train'), ('20', 'ussuri'), + ('21', 'victoria'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -337,6 +350,7 @@ ('15', 'stein'), ('16', 'train'), ('18', 'ussuri'), + ('19', 'victoria'), ]), } diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index d7c37c17..db7ce728 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -372,8 +372,10 @@ def load_previous(self, path=None): try: self._prev_dict = json.load(f) except ValueError as e: - log('Unable to parse previous config data - {}'.format(str(e)), - level=ERROR) + log('Found but was unable to parse previous config data, ' + 'ignoring which will report all values as changed - {}' + .format(str(e)), level=ERROR) + return for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 3ddaf0dd..33152840 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -190,6 +190,14 @@ 'ussuri/proposed': 'bionic-proposed/ussuri', 'bionic-ussuri/proposed': 'bionic-proposed/ussuri', 'bionic-proposed/ussuri': 'bionic-proposed/ussuri', + # Victoria + 'victoria': 'focal-updates/victoria', + 'focal-victoria': 'focal-updates/victoria', + 'focal-victoria/updates': 'focal-updates/victoria', + 'focal-updates/victoria': 'focal-updates/victoria', + 'victoria/proposed': 'focal-proposed/victoria', + 'focal-victoria/proposed': 'focal-proposed/victoria', + 'focal-proposed/victoria': 'focal-proposed/victoria', } diff --git a/ceph-radosgw/lib/charms_ceph/broker.py b/ceph-radosgw/lib/charms_ceph/broker.py index 726f9498..15552cd8 100644 --- a/ceph-radosgw/lib/charms_ceph/broker.py +++ b/ceph-radosgw/lib/charms_ceph/broker.py @@ -540,11 +540,13 @@ def handle_remove_cache_tier(request, service): pool.remove_cache_tier(cache_pool=cache_pool) -def handle_set_pool_value(request, service): +def handle_set_pool_value(request, service, coerce=False): """Sets an arbitrary pool value. :param request: dict of request operations and params :param service: The ceph client to run the command under. + :param coerce: Try to parse/coerce the value into the correct type. + Used by the action code that only gets Str from Juju :returns: dict. exit-code and reason if not 0 """ # Set arbitrary pool values @@ -558,6 +560,16 @@ def handle_set_pool_value(request, service): # Get the validation method validator_params = POOL_KEYS[params['key']] + # BUG: #1838650 - the function needs to try to coerce the value param to + # the type required for the validator to pass. Note, if this blows, then + # the param isn't parsable to the correct type. + if coerce: + try: + params['value'] = validator_params[0](params['value']) + except ValueError: + raise RuntimeError("Value {} isn't of type {}" + .format(params['value'], validator_params[0])) + # end of BUG: #1838650 if len(validator_params) == 1: # Validate that what the user passed is actually legal per Ceph's rules validator(params['value'], validator_params[0]) diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index 7a0cb7d4..1a51e7c5 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -2183,6 +2183,9 @@ def upgrade_monitor(new_version): log("Current ceph version is {}".format(current_version)) log("Upgrading to: {}".format(new_version)) + # Needed to determine if whether to stop/start ceph-mgr + luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0 + try: add_source(config('source'), config('key')) apt_update(fatal=True) @@ -2194,6 +2197,10 @@ def upgrade_monitor(new_version): try: if systemd(): service_stop('ceph-mon') + log("restarting ceph-mgr.target maybe: {}" + .format(luminous_or_later)) + if luminous_or_later: + service_stop('ceph-mgr.target') else: service_stop('ceph-mon-all') apt_install(packages=determine_packages(), fatal=True) @@ -2217,7 +2224,13 @@ def upgrade_monitor(new_version): perms=0o755) if systemd(): - service_start('ceph-mon') + service_restart('ceph-mon') + log("starting ceph-mgr.target maybe: {}".format(luminous_or_later)) + if luminous_or_later: + # due to BUG: #1849874 we have to force a restart to get it to + # drop the previous version of ceph-manager and start the new + # one. + service_restart('ceph-mgr.target') else: service_start('ceph-mon-all') except subprocess.CalledProcessError as err: From 7475831d99804b0bdb7ec539e1eb1843ed673c9f Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Tue, 14 Jul 2020 11:47:09 +0200 Subject: [PATCH 2007/2699] Add Victoria test bundles Change-Id: Idaacce6069bc40400b13ecff136e4e65382da869 --- ceph-mon/tests/bundles/focal-victoria.yaml | 233 ++++++++++++++++++++ ceph-mon/tests/bundles/groovy-victoria.yaml | 233 ++++++++++++++++++++ ceph-mon/tests/tests.yaml | 4 +- 3 files changed, 469 insertions(+), 1 deletion(-) create mode 100644 ceph-mon/tests/bundles/focal-victoria.yaml create mode 100644 ceph-mon/tests/bundles/groovy-victoria.yaml diff --git a/ceph-mon/tests/bundles/focal-victoria.yaml b/ceph-mon/tests/bundles/focal-victoria.yaml new file mode 100644 index 00000000..49a7767d --- /dev/null +++ b/ceph-mon/tests/bundles/focal-victoria.yaml @@ -0,0 +1,233 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-victoria + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: '10G' + options: + source: *openstack-origin + osd-devices: '/dev/test-non-existent' + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: ../../../ceph-mon + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: *openstack-origin + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + prometheus2: + charm: cs:prometheus2 + num_units: 1 + to: + - '16' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - nova-compute:ceph-access + - cinder-ceph:ceph-access + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'ceph-mon:prometheus' + - 'prometheus2:target' diff --git a/ceph-mon/tests/bundles/groovy-victoria.yaml b/ceph-mon/tests/bundles/groovy-victoria.yaml new file mode 100644 index 00000000..cef3f244 --- /dev/null +++ b/ceph-mon/tests/bundles/groovy-victoria.yaml @@ -0,0 +1,233 @@ +variables: + openstack-origin: &openstack-origin distro + +series: groovy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: '10G' + options: + source: *openstack-origin + osd-devices: '/dev/test-non-existent' + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: ../../../ceph-mon + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: *openstack-origin + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + prometheus2: + charm: cs:prometheus2 + num_units: 1 + to: + - '16' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - nova-compute:ceph-access + - cinder-ceph:ceph-access + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'ceph-mon:prometheus' + - 'prometheus2:target' diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index d92d4fcc..4bd8868e 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -15,6 +15,8 @@ gate_bundles: smoke_bundles: - bionic-train dev_bundles: + - groovy-victoria + - focal-victoria configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: @@ -25,4 +27,4 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest tests_options: force_deploy: - - focal-ussuri + - groovy-victoria From 4a94d875a86e125f2621ca58fd0c1b622354dddf Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Tue, 14 Jul 2020 11:51:05 +0200 Subject: [PATCH 2008/2699] Add Victoria test bundles Change-Id: I0c0f478bfbc0bc42384d1314338a8ce99846ad19 --- ceph-osd/tests/bundles/focal-victoria.yaml | 222 ++++++++++++++++++++ ceph-osd/tests/bundles/groovy-victoria.yaml | 222 ++++++++++++++++++++ ceph-osd/tests/tests.yaml | 4 +- 3 files changed, 447 insertions(+), 1 deletion(-) create mode 100644 ceph-osd/tests/bundles/focal-victoria.yaml create mode 100644 ceph-osd/tests/bundles/groovy-victoria.yaml diff --git a/ceph-osd/tests/bundles/focal-victoria.yaml b/ceph-osd/tests/bundles/focal-victoria.yaml new file mode 100644 index 00000000..c7e56257 --- /dev/null +++ b/ceph-osd/tests/bundles/focal-victoria.yaml @@ -0,0 +1,222 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-victoria + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: ../../../ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: 'None' + glance-api-version: '2' + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'cinder-ceph:ceph-access' + - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/bundles/groovy-victoria.yaml b/ceph-osd/tests/bundles/groovy-victoria.yaml new file mode 100644 index 00000000..1d1b1ffb --- /dev/null +++ b/ceph-osd/tests/bundles/groovy-victoria.yaml @@ -0,0 +1,222 @@ +variables: + openstack-origin: &openstack-origin distro + +series: groovy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: ../../../ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: 'None' + glance-api-version: '2' + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'cinder-ceph:ceph-access' + - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index cc63b6f4..24e242f3 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -14,6 +14,8 @@ gate_bundles: smoke_bundles: - bionic-train dev_bundles: + - focal-victoria + - groovy-victoria configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: @@ -23,4 +25,4 @@ tests: - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest tests_options: force_deploy: - - focal-ussuri + - groovy-victoria From 7368eabaed3a13506ed493eb62adccea49f37595 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 6 Jul 2020 17:13:32 +0100 Subject: [PATCH 2009/2699] Add a progress watchdog for OSD upgrades This patch (in charms.ceph [1], copied here) add the concept of a watchdog to the upgrade_monitor so that the charm can achieve two objectives of 1. Waiting for much longer, but 2. detecting whether the previous node has died / gone away. This is needed for 'large' OSDs where the time to upgrade a node may exceed the current limit of 10 minutes, but also not to wait for 30 minutes on a dead previous node. The watchdog implements two timeouts and an addition 'alive' key from the previous node to indicate that it is still running. Otherwise, functionality is identical. [1] See depends on below Depends-On: Ia450e936c2096f092af3be5a369b7abaf5023b16 Closes-Bug: #1762852 Change-Id: I6204a5ade684f0564c4be2d30df467c75baa6dba --- ceph-osd/lib/charms_ceph/utils.py | 298 +++++++++++++++++++++++++----- ceph-osd/tox.ini | 2 +- 2 files changed, 253 insertions(+), 47 deletions(-) diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 1a51e7c5..72e6b921 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -2169,15 +2169,18 @@ def roll_monitor_cluster(new_version, upgrade_key): status_set('blocked', 'failed to upgrade monitor') -# TODO(jamespage): -# Mimic support will need to ensure that ceph-mgr daemons are also -# restarted during upgrades - probably through use of one of the -# high level systemd targets shipped by the packaging. -def upgrade_monitor(new_version): +# For E731 we can't assign a lambda, therefore, instead pass this. +def noop(): + pass + + +def upgrade_monitor(new_version, kick_function=None): """Upgrade the current ceph monitor to the new version :param new_version: String version to upgrade to. """ + if kick_function is None: + kick_function = noop current_version = get_version() status_set("maintenance", "Upgrading monitor") log("Current ceph version is {}".format(current_version)) @@ -2186,6 +2189,7 @@ def upgrade_monitor(new_version): # Needed to determine if whether to stop/start ceph-mgr luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0 + kick_function() try: add_source(config('source'), config('key')) apt_update(fatal=True) @@ -2194,6 +2198,7 @@ def upgrade_monitor(new_version): err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) + kick_function() try: if systemd(): service_stop('ceph-mon') @@ -2204,6 +2209,7 @@ def upgrade_monitor(new_version): else: service_stop('ceph-mon-all') apt_install(packages=determine_packages(), fatal=True) + kick_function() owner = ceph_user() @@ -2217,6 +2223,8 @@ def upgrade_monitor(new_version): group=owner, follow_links=True) + kick_function() + # Ensure that mon directory is user writable hostname = socket.gethostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) @@ -2257,13 +2265,22 @@ def lock_and_roll(upgrade_key, service, my_name, version): start_timestamp)) monitor_key_set(upgrade_key, "{}_{}_{}_start".format( service, my_name, version), start_timestamp) + + # alive indication: + alive_function = ( + lambda: monitor_key_set( + upgrade_key, "{}_{}_{}_alive" + .format(service, my_name, version), time.time())) + dog = WatchDog(kick_interval=3 * 60, + kick_function=alive_function) + log("Rolling") # This should be quick if service == 'osd': - upgrade_osd(version) + upgrade_osd(version, kick_function=dog.kick_the_dog) elif service == 'mon': - upgrade_monitor(version) + upgrade_monitor(version, kick_function=dog.kick_the_dog) else: log("Unknown service {}. Unable to upgrade".format(service), level=ERROR) @@ -2294,45 +2311,225 @@ def wait_on_previous_node(upgrade_key, service, previous_node, version): """ log("Previous node is: {}".format(previous_node)) - previous_node_finished = monitor_key_exists( - upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version)) - - while previous_node_finished is False: - log("{} is not finished. Waiting".format(previous_node)) - # Has this node been trying to upgrade for longer than - # 10 minutes? - # If so then move on and consider that node dead. - - # NOTE: This assumes the clusters clocks are somewhat accurate - # If the hosts clock is really far off it may cause it to skip - # the previous node even though it shouldn't. - current_timestamp = time.time() - previous_node_start_time = monitor_key_get( + previous_node_started_f = ( + lambda: monitor_key_exists( upgrade_key, - "{}_{}_{}_start".format(service, previous_node, version)) - if (previous_node_start_time is not None and - ((current_timestamp - (10 * 60)) > - float(previous_node_start_time))): - # NOTE(jamespage): - # Previous node is probably dead as we've been waiting - # for 10 minutes - lets move on and upgrade - log("Waited 10 mins on node {}. current time: {} > " - "previous node start time: {} Moving on".format( - previous_node, - (current_timestamp - (10 * 60)), - previous_node_start_time)) - return - # NOTE(jamespage) - # Previous node has not started, or started less than - # 10 minutes ago - sleep a random amount of time and - # then check again. - wait_time = random.randrange(5, 30) - log('waiting for {} seconds'.format(wait_time)) - time.sleep(wait_time) - previous_node_finished = monitor_key_exists( + "{}_{}_{}_start".format(service, previous_node, version))) + previous_node_finished_f = ( + lambda: monitor_key_exists( upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version)) + "{}_{}_{}_done".format(service, previous_node, version))) + previous_node_alive_time_f = ( + lambda: monitor_key_get( + upgrade_key, + "{}_{}_{}_alive".format(service, previous_node, version))) + + # wait for 30 minutes until the previous node starts. We don't proceed + # unless we get a start condition. + try: + WatchDog.wait_until(previous_node_started_f, timeout=30 * 60) + except WatchDog.WatchDogTimeoutException: + log("Waited for previous node to start for 30 minutes. " + "It didn't start, so may have a serious issue. Continuing with " + "upgrade of this node.", + level=WARNING) + return + + # keep the time it started from this nodes' perspective. + previous_node_started_at = time.time() + log("Detected that previous node {} has started. Time now: {}" + .format(previous_node, previous_node_started_at)) + + # Now wait for the node to complete. The node may optionally be kicking + # with the *_alive key, which allows this node to wait longer as it 'knows' + # the other node is proceeding. + try: + WatchDog.timed_wait(kicked_at_function=previous_node_alive_time_f, + complete_function=previous_node_finished_f, + wait_time=30 * 60, + compatibility_wait_time=10 * 60, + max_kick_interval=5 * 60) + except WatchDog.WatchDogDeadException: + # previous node was kicking, but timed out; log this condition and move + # on. + now = time.time() + waited = int((now - previous_node_started_at) / 60) + log("Previous node started, but has now not ticked for 5 minutes. " + "Waited total of {} mins on node {}. current time: {} > " + "previous node start time: {}. " + "Continuing with upgrade of this node." + .format(waited, previous_node, now, previous_node_started_at), + level=WARNING) + except WatchDog.WatchDogTimeoutException: + # previous node never kicked, or simply took too long; log this + # condition and move on. + now = time.time() + waited = int((now - previous_node_started_at) / 60) + log("Previous node is taking too long; assuming it has died." + "Waited {} mins on node {}. current time: {} > " + "previous node start time: {}. " + "Continuing with upgrade of this node." + .format(waited, previous_node, now, previous_node_started_at), + level=WARNING) + + +class WatchDog(object): + """Watch a dog; basically a kickable timer with a timeout between two async + units. + + The idea is that you have an overall timeout and then can kick that timeout + with intermediary hits, with a max time between those kicks allowed. + + Note that this watchdog doesn't rely on the clock of the other side; just + roughly when it detects when the other side started. All timings are based + on the local clock. + + The kicker will not 'kick' more often than a set interval, regardless of + how often the kick_the_dog() function is called. The kicker provides a + function (lambda: -> None) that is called when the kick interval is + reached. + + The waiter calls the static method with a check function + (lambda: -> Boolean) that indicates when the wait should be over and the + maximum interval to wait. e.g. 30 minutes with a 5 minute kick interval. + + So the waiter calls wait(f, 30, 3) and the kicker sets up a 3 minute kick + interval, or however long it is expected for the key to propagate and to + allow for other delays. + + There is a compatibility mode where if the otherside never kicks, then it + simply waits for the compatability timer. + """ + + class WatchDogDeadException(Exception): + pass + + class WatchDogTimeoutException(Exception): + pass + + def __init__(self, kick_interval=3 * 60, kick_function=None): + """Initialise a new WatchDog + + :param kick_interval: the interval when this side kicks the other in + seconds. + :type kick_interval: Int + :param kick_function: The function to call that does the kick. + :type kick_function: Callable[] + """ + self.start_time = time.time() + self.last_run_func = None + self.last_kick_at = None + self.kick_interval = kick_interval + self.kick_f = kick_function + + def kick_the_dog(self): + """Might call the kick_function if it's time. + + This function can be called as frequently as needed, but will run the + self.kick_function after kick_interval seconds have passed. + """ + now = time.time() + if (self.last_run_func is None or + (now - self.last_run_func > self.kick_interval)): + if self.kick_f is not None: + self.kick_f() + self.last_run_func = now + self.last_kick_at = now + + @staticmethod + def wait_until(wait_f, timeout=10 * 60): + """Wait for timeout seconds until the passed function return True. + + :param wait_f: The function to call that will end the wait. + :type wait_f: Callable[[], Boolean] + :param timeout: The time to wait in seconds. + :type timeout: int + """ + start_time = time.time() + while(not wait_f()): + now = time.time() + if now > start_time + timeout: + raise WatchDog.WatchDogTimeoutException() + wait_time = random.randrange(5, 30) + log('wait_until: waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + + @staticmethod + def timed_wait(kicked_at_function, + complete_function, + wait_time=30 * 60, + compatibility_wait_time=10 * 60, + max_kick_interval=5 * 60): + """Wait a maximum time with an intermediate 'kick' time. + + This function will wait for max_kick_interval seconds unless the + kicked_at_function() call returns a time that is not older that + max_kick_interval (in seconds). i.e. the other side can signal that it + is still doing things during the max_kick_interval as long as it kicks + at least every max_kick_interval seconds. + + The maximum wait is "wait_time", but the otherside must keep kicking + during this period. + + The "compatibility_wait_time" is used if the other side never kicks + (i.e. the kicked_at_function() always returns None. In this case the + function wait up to "compatibility_wait_time". + + Note that the type of the return from the kicked_at_function is an + Optional[str], not a Float. The function will coerce this to a float + for the comparison. This represents the return value of + time.time() at the "other side". It's a string to simplify the + function obtaining the time value from the other side. + + The function raises WatchDogTimeoutException if either the + compatibility_wait_time or the wait_time are exceeded. + + The function raises WatchDogDeadException if the max_kick_interval is + exceeded. + + Note that it is possible that the first kick interval is extended to + compatibility_wait_time if the "other side" doesn't kick immediately. + The best solution is for the other side to kick early and often. + + :param kicked_at_function: The function to call to retrieve the time + that the other side 'kicked' at. None if the other side hasn't + kicked. + :type kicked_at_function: Callable[[], Optional[str]] + :param complete_function: The callable that returns True when done. + :type complete_function: Callable[[], Boolean] + :param wait_time: the maximum time to wait, even with kicks, in + seconds. + :type wait_time: int + :param compatibility_wait_time: The time to wait if no kicks are + received, in seconds. + :type compatibility_wait_time: int + :param max_kick_interval: The maximum time allowed between kicks before + the wait is over, in seconds: + :type max_kick_interval: int + :raises: WatchDog.WatchDogTimeoutException, + WatchDog.WatchDogDeadException + """ + start_time = time.time() + while True: + if complete_function(): + break + # the time when the waiting for unit last kicked. + kicked_at = kicked_at_function() + now = time.time() + if kicked_at is None: + # assume other end doesn't do alive kicks + if (now - start_time > compatibility_wait_time): + raise WatchDog.WatchDogTimeoutException() + else: + # other side is participating in kicks; must kick at least + # every 'max_kick_interval' to stay alive. + if (now - float(kicked_at) > max_kick_interval): + raise WatchDog.WatchDogDeadException() + if (now - start_time > wait_time): + raise WatchDog.WatchDogTimeoutException() + delay_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(delay_time)) + time.sleep(delay_time) def get_upgrade_position(osd_sorted_list, match_name): @@ -2412,11 +2609,14 @@ def roll_osd_cluster(new_version, upgrade_key): status_set('blocked', 'failed to upgrade osd') -def upgrade_osd(new_version): +def upgrade_osd(new_version, kick_function=None): """Upgrades the current osd :param new_version: str. The new version to upgrade to """ + if kick_function is None: + kick_function = noop + current_version = get_version() status_set("maintenance", "Upgrading osd") log("Current ceph version is {}".format(current_version)) @@ -2431,10 +2631,13 @@ def upgrade_osd(new_version): status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) + kick_function() + try: # Upgrade the packages before restarting the daemons. status_set('maintenance', 'Upgrading packages to %s' % new_version) apt_install(packages=determine_packages(), fatal=True) + kick_function() # If the upgrade does not need an ownership update of any of the # directories in the osd service directory, then simply restart @@ -2458,13 +2661,16 @@ def upgrade_osd(new_version): os.listdir(CEPH_BASE_DIR)) non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x), non_osd_dirs) - for path in non_osd_dirs: + for i, path in enumerate(non_osd_dirs): + if i % 100 == 0: + kick_function() update_owner(path) # Fast service restart wasn't an option because each of the OSD # directories need the ownership updated for all the files on # the OSD. Walk through the OSDs one-by-one upgrading the OSD. for osd_dir in _get_child_dirs(OSD_BASE_DIR): + kick_function() try: osd_num = _get_osd_num_from_dirname(osd_dir) _upgrade_single_osd(osd_num, osd_dir) diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index b835733a..8080ba6d 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -116,5 +116,5 @@ commands = functest-run-suite --keep-model --bundle {posargs} [flake8] -ignore = E402,E226 +ignore = E402,E226,W504 exclude = */charmhelpers From 977b4c9085a265e3e252a62d59a29a272872f307 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 22 Jul 2020 11:27:38 +0100 Subject: [PATCH 2010/2699] Align context and interface name for mon relation Ensure that the name of the local endpoint and the context are aligned - 'ceph-radosgw' is the type of the interface and not the name of the local endpoint which is 'mon'. This misnaming causes the charm to sit in blocked state for an extended period of time until the relation data bag is complete, skipping the intermediate waiting/incomplete state. Change-Id: I9d3fab6020817be23f26d410aa59878287dbf5b0 Closes-Bug: 1885144 --- ceph-radosgw/hooks/ceph_radosgw_context.py | 6 +++--- ceph-radosgw/hooks/utils.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 3c26fa48..8115e4e1 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -141,10 +141,10 @@ def ensure_host_resolvable_v6(hostname): class MonContext(context.CephContext): - interfaces = ['ceph-radosgw'] + interfaces = ['mon'] def __call__(self): - if not relation_ids('mon'): + if not relation_ids(self.interfaces[0]): return {} host = socket.gethostname() @@ -154,7 +154,7 @@ def __call__(self): auths = [] fsid = None - for rid in relation_ids('mon'): + for rid in relation_ids(self.interfaces[0]): for unit in related_units(rid): fsid = relation_get('fsid', rid=rid, unit=unit) _auth = relation_get('auth', rid=rid, unit=unit) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 13133ac3..17a76f73 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -62,7 +62,7 @@ # The interface is said to be satisfied if anyone of the interfaces in the # list has a complete context. REQUIRED_INTERFACES = { - 'mon': ['ceph-radosgw'], + 'mon': ['mon'], } CEPHRG_HA_RES = 'grp_cephrg_vips' TEMPLATES_DIR = 'templates' From 1ed9578271544d21d3f19ec6c8dab4c61e008e72 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 7 Jul 2020 19:04:19 -0400 Subject: [PATCH 2011/2699] Improve README This improvement is part of a wave of polish in preparation for the launch of the Ceph product. In config.yaml, improve 'osd-journal' option description. Also modernise example values for 'source' and use consistent words with the ceph-osd, ceph-mon, and ceph-fs charms. Change-Id: Iefbf57078115181c67b320e0c5b6cbd7dc05ac55 --- ceph-osd/README.md | 101 +++++++++++++++++++++++++++++++------------ ceph-osd/config.yaml | 10 +++-- 2 files changed, 80 insertions(+), 31 deletions(-) diff --git a/ceph-osd/README.md b/ceph-osd/README.md index 9a767804..52ee9e09 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -10,11 +10,50 @@ cluster. # Usage -## Storage devices +## Configuration + +This section covers common and/or important configuration options. See file +`config.yaml` for the full list of options, along with their descriptions and +default values. A YAML file (e.g. `ceph-osd.yaml`) is often used to store +configuration options. See the [Juju documentation][juju-docs-config-apps] for +details on configuring applications. + +#### `customize-failure-domain` + +The `customize-failure-domain` option determines how a Ceph CRUSH map is +configured. + +A value of 'false' (the default) will lead to a map that will replicate data +across hosts (implemented as [Ceph bucket type][upstream-ceph-buckets] 'host'). +With a value of 'true' all MAAS-defined zones will be used to generate a map +that will replicate data across Ceph availability zones (implemented as bucket +type 'rack'). + +This option is also supported by the ceph-mon charm. Its value must be the same +for both charms. + +#### `osd-devices` + +The `osd-devices` option lists what block devices can be used for OSDs across +the cluster. See section 'Storage devices' for an elaboration on this +fundamental topic. + +#### `source` + +The `source` option states the software sources. A common value is an OpenStack +UCA release (e.g. 'cloud:xenial-queens' or 'cloud:bionic-ussuri'). See [Ceph +and the UCA][cloud-archive-ceph]. The underlying host's existing apt sources +will be used if this option is not specified (this behaviour can be explicitly +chosen by using the value of 'distro'). + +### Storage devices + +A storage device is destined as an OSD (Object Storage Device). There can be +multiple OSDs per storage node (ceph-osd unit). The list of all possible storage devices for the cluster is defined by the -`osd-devices` option (default value is `/dev/vdb`). Configuration is typically -provided via a YAML file, like `ceph-osd.yaml`. See the following examples: +`osd-devices` option (default value is '/dev/vdb'). The following examples can +be used in the `ceph-osd.yaml` configuration file: 1. Block devices (regular) @@ -65,26 +104,31 @@ detects pre-existing data on a device. In this case the operator can either instruct the charm to ignore the disk (action `blacklist-add-disk`) or to have it purge all data on the disk (action `zap-disk`). +> **Important**: The recommended minimum number of OSDs in the cluster is three + and this is what the ceph-mon charm expects (the cluster will not form with a + lesser number). See option `expected-osd-count` in the ceph-mon charm to + overcome this but beware that going below three is not a supported + configuration. + ## Deployment -A cloud with three MON nodes is a typical design whereas three OSD nodes are +A cloud with three MON nodes is a typical design whereas three OSDs are considered the minimum. For example, to deploy a Ceph cluster consisting of -three OSDs and three MONs: +three OSDs (one per ceph-osd unit) and three MONs: - juju deploy --config ceph-osd.yaml -n 3 ceph-osd - juju deploy --to lxd:0 ceph-mon - juju add-unit --to lxd:1 ceph-mon - juju add-unit --to lxd:2 ceph-mon - juju add-relation ceph-osd ceph-mon + juju deploy -n 3 --config ceph-osd.yaml ceph-osd + juju deploy -n 3 --to lxd:0,lxd:1,lxd:2 ceph-mon + juju add-relation ceph-osd:mon ceph-mon:osd -Here, a containerised MON is running alongside each OSD. +Here, a containerised MON is running alongside each storage node. We've assumed +that the machines spawned in the first command are assigned IDs of 0, 1, and 2. > **Note**: Refer to the [Install OpenStack][cdg-install-openstack] page in the OpenStack Charms Deployment Guide for instructions on installing the ceph-osd application for use with OpenStack. For each ceph-osd unit, the ceph-osd charm will scan for all the devices -configured via the `osd-devices` option and attempt to assign to it all the +configured via the `osd-devices` option and attempt to assign to it all of the ones it finds. The cluster's initial pool of available storage is the "sum" of all these assigned devices. @@ -99,8 +143,8 @@ connected to. The ceph-osd charm exposes the following Ceph traffic types (bindings): -- 'public' (front-side) -- 'cluster' (back-side) +* 'public' (front-side) +* 'cluster' (back-side) For example, providing that spaces 'data-space' and 'cluster-space' exist, the deploy command above could look like this: @@ -142,10 +186,10 @@ intended for production. The profiles generated by the charm should **not** be used in the following scenarios: -- On any version of Ubuntu older than 16.04 -- On any version of Ceph older than Luminous -- When OSD journal devices are in use -- When Ceph BlueStore is enabled +* On any version of Ubuntu older than 16.04 +* On any version of Ceph older than Luminous +* When OSD journal devices are in use +* When Ceph BlueStore is enabled ## Block device encryption @@ -246,12 +290,12 @@ Use the `list-disks` action to list disks known to a unit. The action lists the unit's block devices by categorising them in three ways: -- `disks`: visible (known by udev), unused (not mounted), and not designated as +* `disks`: visible (known by udev), unused (not mounted), and not designated as an OSD journal (via the `osd-journal` configuration option) -- `blacklist`: like `disks` but blacklisted (see action `blacklist-add-disk`) +* `blacklist`: like `disks` but blacklisted (see action `blacklist-add-disk`) -- `non-pristine`: like `disks` but not eligible for use due to the presence of +* `non-pristine`: like `disks` but not eligible for use due to the presence of existing data Example: @@ -271,12 +315,12 @@ operator to manually add OSD volumes (for disks that are not listed by -- `osd-devices` (required) +* `osd-devices` (required) A space-separated list of devices to format and initialise as OSD volumes. -- `bucket` +* `bucket` The name of a Ceph bucket to add these devices to. Example: @@ -302,7 +346,7 @@ Use the `list-disks` action to list the unit's blacklist entries. -- `osd-devices` (required) +* `osd-devices` (required) A space-separated list of devices to add to a unit's blacklist. Example: @@ -319,7 +363,7 @@ blacklist. -- `osd-devices` (required) +* `osd-devices` (required) A space-separated list of devices to remove from a unit's blacklist. Each device should have an existing entry in the unit's blacklist. Use the @@ -344,12 +388,12 @@ the `add-disk` action. -- `devices` (required) +* `devices` (required) A space-separated list of devices to be recycled. -- `i-really-mean-it` (required) +* `i-really-mean-it` (required) An option that acts as a confirmation for performing the action. Example: @@ -371,7 +415,10 @@ For general charm questions refer to the OpenStack [Charm Guide][cg]. [juju-docs-storage]: https://jaas.ai/docs/storage [juju-docs-actions]: https://jaas.ai/docs/actions [juju-docs-spaces]: https://jaas.ai/docs/spaces +[juju-docs-config-apps]: https://juju.is/docs/configuring-applications [ceph-docs-removing-osds]: https://docs.ceph.com/docs/master/rados/operations/add-or-rm-osds/ [ceph-docs-network-ref]: http://docs.ceph.com/docs/master/rados/configuration/network-config-ref [lp-bugs-charm-ceph-osd]: https://bugs.launchpad.net/charm-ceph-osd/+filebug [cdg-install-openstack]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/install-openstack.html +[upstream-ceph-buckets]: https://docs.ceph.com/docs/master/rados/operations/crush-map/#types-and-buckets +[cloud-archive-ceph]: https://wiki.ubuntu.com/OpenStack/CloudArchive#Ceph_and_the_UCA diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 66080a9b..8ab31bb4 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -10,7 +10,8 @@ options: Optional configuration to support use of additional sources such as: . - ppa:myteam/ppa - - cloud:xenial-proposed/ocata + - cloud:bionic-ussuri + - cloud:xenial-proposed/queens - http://my.archive.com/ubuntu main . The last option should be used in conjunction with the key configuration @@ -76,9 +77,10 @@ options: type: string default: description: | - The device to use as a shared journal drive for all OSD's. By default - a journal partition will be created on each OSD volume device for use by - that OSD. + The device to use as a shared journal drive for all OSDs on a node. By + default a journal partition will be created on each OSD volume device for + use by that OSD. The default behaviour is also the fallback for the case + where the specified journal device does not exist on a node. . Only supported with ceph >= 0.48.3. bluestore-wal: From 1a6bc79a6d566101ee33b5f6bcb4f1d5f200334c Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Wed, 22 Jul 2020 12:38:48 -0400 Subject: [PATCH 2012/2699] Improve README This slight change is to sync with the ceph-osd README. Change-Id: I9012cac438a899828ee3f376a765c792d184a94a --- ceph-mon/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 722c8bbf..3bec4b07 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -69,16 +69,16 @@ chosen by using the value of 'distro'). ## Deployment -A cloud with three MON nodes is a typical design whereas three OSD nodes are +A cloud with three MON nodes is a typical design whereas three OSDs are considered the minimum. For example, to deploy a Ceph cluster consisting of -three OSDs and three MONs: +three OSDs (one per ceph-osd unit) and three MONs: juju deploy -n 3 --config ceph-osd.yaml ceph-osd juju deploy -n 3 --to lxd:0,lxd:1,lxd:2 ceph-mon juju add-relation ceph-osd:mon ceph-mon:osd -Here, a containerised MON is running alongside each OSD. We've assumed that the -machines spawned in the first command are assigned IDs of 0, 1, and 2. +Here, a containerised MON is running alongside each storage node. We've assumed +that the machines spawned in the first command are assigned IDs of 0, 1, and 2. By default, the monitor cluster will not be complete until three ceph-mon units have been deployed. This is to ensure that a quorum is achieved prior to the From b195d291b536ee6b3b5bba285aeb3f81c736782b Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Thu, 4 Jun 2020 20:40:08 +0100 Subject: [PATCH 2013/2699] Add glance/nova-compute to bundles This is to resolve the associated bug where cinder-ceph has grown the capability to block if it doesn't have an appropriate relation to nova-compute. Also remove the trusty-icehouse bundle as it is not longer supported. Note, on trusty-mitaka, the nova-cloud-controller is needed as otherwise nova-compute complains and then dies, leaving a nova-compute service is not running in the status message. Also, change the detection of the status messages for the radosgw charm as it has changed during the dev cycle. Change-Id: I072b79705a6a7dfb0d5cbd16095e6ececb432ec3 Closes-Bug: #1881246 --- ceph-proxy/tests/bundles/bionic-queens.yaml | 17 ++++++++- ceph-proxy/tests/bundles/bionic-rocky.yaml | 20 ++++++++++ ceph-proxy/tests/bundles/bionic-stein.yaml | 21 +++++++++- ceph-proxy/tests/bundles/bionic-train.yaml | 21 +++++++++- ceph-proxy/tests/bundles/bionic-ussuri.yaml | 20 ++++++++++ ceph-proxy/tests/bundles/focal-ussuri.yaml | 38 +++++++++++++++++++ ceph-proxy/tests/bundles/trusty-icehouse.yaml | 26 ------------- ceph-proxy/tests/bundles/trusty-mitaka.yaml | 36 ++++++++++++++++++ ceph-proxy/tests/bundles/xenial-mitaka.yaml | 16 ++++++++ ceph-proxy/tests/bundles/xenial-ocata.yaml | 20 ++++++++++ ceph-proxy/tests/bundles/xenial-pike.yaml | 20 ++++++++++ ceph-proxy/tests/bundles/xenial-queens.yaml | 20 ++++++++++ ceph-proxy/tests/tests.yaml | 9 +---- 13 files changed, 248 insertions(+), 36 deletions(-) delete mode 100644 ceph-proxy/tests/bundles/trusty-icehouse.yaml diff --git a/ceph-proxy/tests/bundles/bionic-queens.yaml b/ceph-proxy/tests/bundles/bionic-queens.yaml index cffef0e3..c6ec26e9 100644 --- a/ceph-proxy/tests/bundles/bionic-queens.yaml +++ b/ceph-proxy/tests/bundles/bionic-queens.yaml @@ -25,6 +25,12 @@ applications: glance-api-version: 2 overwrite: "false" constraints: mem=2048 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 cinder-ceph: charm: 'cs:~openstack-charmers-next/cinder-ceph' options: @@ -64,4 +70,13 @@ relations: - 'cinder:storage-backend' - - 'cinder-ceph:ceph' - 'ceph-proxy:client' - + - - 'glance:image-service' + - 'nova-compute:image-service' + - - 'glance:identity-service' + - 'keystone:identity-service' + - - 'glance:shared-db' + - 'percona-cluster:shared-db' + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/bionic-rocky.yaml b/ceph-proxy/tests/bundles/bionic-rocky.yaml index 18fcac89..a71711c7 100644 --- a/ceph-proxy/tests/bundles/bionic-rocky.yaml +++ b/ceph-proxy/tests/bundles/bionic-rocky.yaml @@ -43,6 +43,16 @@ applications: options: openstack-origin: cloud:bionic-rocky constraints: mem=1024 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky percona-cluster: charm: 'cs:~openstack-charmers-next/percona-cluster' num_units: 1 @@ -77,3 +87,13 @@ relations: - 'cinder:storage-backend' - - 'cinder-ceph:ceph' - 'ceph-proxy:client' + - - 'glance:image-service' + - 'nova-compute:image-service' + - - 'glance:identity-service' + - 'keystone:identity-service' + - - 'glance:shared-db' + - 'percona-cluster:shared-db' + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/bionic-stein.yaml b/ceph-proxy/tests/bundles/bionic-stein.yaml index 69e093b4..2c1f5359 100644 --- a/ceph-proxy/tests/bundles/bionic-stein.yaml +++ b/ceph-proxy/tests/bundles/bionic-stein.yaml @@ -43,6 +43,16 @@ applications: options: openstack-origin: cloud:bionic-stein constraints: mem=1024 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-stein + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-stein percona-cluster: charm: 'cs:~openstack-charmers-next/percona-cluster' num_units: 1 @@ -77,4 +87,13 @@ relations: - 'cinder:storage-backend' - - 'cinder-ceph:ceph' - 'ceph-proxy:client' - + - - 'glance:image-service' + - 'nova-compute:image-service' + - - 'glance:identity-service' + - 'keystone:identity-service' + - - 'glance:shared-db' + - 'percona-cluster:shared-db' + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/bionic-train.yaml b/ceph-proxy/tests/bundles/bionic-train.yaml index dda2126e..fd891bd6 100644 --- a/ceph-proxy/tests/bundles/bionic-train.yaml +++ b/ceph-proxy/tests/bundles/bionic-train.yaml @@ -43,6 +43,16 @@ applications: options: openstack-origin: cloud:bionic-train constraints: mem=1024 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-train + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-train percona-cluster: charm: 'cs:~openstack-charmers-next/percona-cluster' num_units: 1 @@ -77,4 +87,13 @@ relations: - 'cinder:storage-backend' - - 'cinder-ceph:ceph' - 'ceph-proxy:client' - + - - 'glance:image-service' + - 'nova-compute:image-service' + - - 'glance:identity-service' + - 'keystone:identity-service' + - - 'glance:shared-db' + - 'percona-cluster:shared-db' + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/bionic-ussuri.yaml b/ceph-proxy/tests/bundles/bionic-ussuri.yaml index 14ac2aec..33e2c0e9 100644 --- a/ceph-proxy/tests/bundles/bionic-ussuri.yaml +++ b/ceph-proxy/tests/bundles/bionic-ussuri.yaml @@ -44,6 +44,16 @@ applications: openstack-origin: cloud:bionic-ussuri admin-password: openstack constraints: mem=1024 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri percona-cluster: charm: 'cs:~openstack-charmers-next/percona-cluster' num_units: 1 @@ -78,3 +88,13 @@ relations: - 'cinder:storage-backend' - - 'cinder-ceph:ceph' - 'ceph-proxy:client' + - - 'glance:image-service' + - 'nova-compute:image-service' + - - 'glance:identity-service' + - 'keystone:identity-service' + - - 'glance:shared-db' + - 'percona-cluster:shared-db' + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/focal-ussuri.yaml b/ceph-proxy/tests/bundles/focal-ussuri.yaml index 1a5af635..d917b1c9 100644 --- a/ceph-proxy/tests/bundles/focal-ussuri.yaml +++ b/ceph-proxy/tests/bundles/focal-ussuri.yaml @@ -23,11 +23,15 @@ machines: '11': '12': '13': + '14': + '15': applications: cinder-mysql-router: charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router keystone-mysql-router: charm: cs:~openstack-charmers-next/mysql-router @@ -117,6 +121,23 @@ applications: to: - '13' + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + relations: - - 'ceph-osd:mon' @@ -146,3 +167,20 @@ relations: - - 'cinder-ceph:ceph' - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/trusty-icehouse.yaml b/ceph-proxy/tests/bundles/trusty-icehouse.yaml deleted file mode 100644 index c9c5b8da..00000000 --- a/ceph-proxy/tests/bundles/trusty-icehouse.yaml +++ /dev/null @@ -1,26 +0,0 @@ -series: trusty -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - options: - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - options: - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - options: -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' diff --git a/ceph-proxy/tests/bundles/trusty-mitaka.yaml b/ceph-proxy/tests/bundles/trusty-mitaka.yaml index b9774a6d..7dbef7a8 100644 --- a/ceph-proxy/tests/bundles/trusty-mitaka.yaml +++ b/ceph-proxy/tests/bundles/trusty-mitaka.yaml @@ -33,6 +33,21 @@ applications: glance-api-version: 2 overwrite: "false" constraints: mem=2048 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:trusty-mitaka + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:trusty-mitaka + nova-cloud-controller: + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: cloud:trusty-mitaka cinder-ceph: charm: 'cs:~openstack-charmers-next/cinder-ceph' options: @@ -77,3 +92,24 @@ relations: - 'cinder:storage-backend' - - 'cinder-ceph:ceph' - 'ceph-proxy:client' + - - 'glance:image-service' + - 'nova-compute:image-service' + - - 'glance:identity-service' + - 'keystone:identity-service' + - - 'glance:shared-db' + - 'percona-cluster:shared-db' + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + - - 'glance:image-service' + - 'nova-cloud-controller:image-service' + - - 'keystone:identity-service' + - 'nova-cloud-controller:identity-service' + - - 'nova-compute:cloud-compute' + - 'nova-cloud-controller:cloud-compute' + - - 'percona-cluster:shared-db' + - 'nova-cloud-controller:shared-db' + - - 'rabbitmq-server:amqp' + - 'nova-cloud-controller:amqp' + diff --git a/ceph-proxy/tests/bundles/xenial-mitaka.yaml b/ceph-proxy/tests/bundles/xenial-mitaka.yaml index e9982e4d..0c4ee4d4 100644 --- a/ceph-proxy/tests/bundles/xenial-mitaka.yaml +++ b/ceph-proxy/tests/bundles/xenial-mitaka.yaml @@ -32,6 +32,12 @@ applications: charm: 'cs:~openstack-charmers-next/cinder-ceph' options: restrict-ceph-pools: True + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 keystone: charm: 'cs:~openstack-charmers-next/keystone' num_units: 1 @@ -67,3 +73,13 @@ relations: - 'cinder:storage-backend' - - 'cinder-ceph:ceph' - 'ceph-proxy:client' + - - 'glance:image-service' + - 'nova-compute:image-service' + - - 'glance:identity-service' + - 'keystone:identity-service' + - - 'glance:shared-db' + - 'percona-cluster:shared-db' + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/xenial-ocata.yaml b/ceph-proxy/tests/bundles/xenial-ocata.yaml index d2a91ea7..d7aa8bd7 100644 --- a/ceph-proxy/tests/bundles/xenial-ocata.yaml +++ b/ceph-proxy/tests/bundles/xenial-ocata.yaml @@ -43,6 +43,16 @@ applications: options: openstack-origin: cloud:xenial-ocata constraints: mem=1024 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:xenial-ocata + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:xenial-ocata percona-cluster: charm: 'cs:~openstack-charmers-next/percona-cluster' num_units: 1 @@ -77,3 +87,13 @@ relations: - 'cinder:storage-backend' - - 'cinder-ceph:ceph' - 'ceph-proxy:client' + - - 'glance:image-service' + - 'nova-compute:image-service' + - - 'glance:identity-service' + - 'keystone:identity-service' + - - 'glance:shared-db' + - 'percona-cluster:shared-db' + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/xenial-pike.yaml b/ceph-proxy/tests/bundles/xenial-pike.yaml index 5883015b..0c48f986 100644 --- a/ceph-proxy/tests/bundles/xenial-pike.yaml +++ b/ceph-proxy/tests/bundles/xenial-pike.yaml @@ -43,6 +43,16 @@ applications: options: openstack-origin: cloud:xenial-pike constraints: mem=1024 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:xenial-pike + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:xenial-pike percona-cluster: charm: 'cs:~openstack-charmers-next/percona-cluster' num_units: 1 @@ -77,3 +87,13 @@ relations: - 'cinder:storage-backend' - - 'cinder-ceph:ceph' - 'ceph-proxy:client' + - - 'glance:image-service' + - 'nova-compute:image-service' + - - 'glance:identity-service' + - 'keystone:identity-service' + - - 'glance:shared-db' + - 'percona-cluster:shared-db' + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/xenial-queens.yaml b/ceph-proxy/tests/bundles/xenial-queens.yaml index addf5f6c..9fd00d56 100644 --- a/ceph-proxy/tests/bundles/xenial-queens.yaml +++ b/ceph-proxy/tests/bundles/xenial-queens.yaml @@ -43,6 +43,16 @@ applications: options: openstack-origin: cloud:xenial-queens constraints: mem=1024 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:xenial-queens + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:xenial-queens percona-cluster: charm: 'cs:~openstack-charmers-next/percona-cluster' num_units: 1 @@ -77,3 +87,13 @@ relations: - 'cinder:storage-backend' - - 'cinder-ceph:ceph' - 'ceph-proxy:client' + - - 'glance:image-service' + - 'nova-compute:image-service' + - - 'glance:identity-service' + - 'keystone:identity-service' + - - 'glance:shared-db' + - 'percona-cluster:shared-db' + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 35807d57..068d9e33 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -19,12 +19,7 @@ gate_bundles: dev_bundles: # Icehouse - trusty-icehouse - - trusty-juno - # Hammer - - trusty-kilo - - trusty-liberty # Jewel - - xenial-newton - xenial-ocata # Pike - xenial-pike @@ -36,8 +31,8 @@ target_deploy_status: workload-status: blocked workload-status-message: Ensure FSID and admin-key are set ceph-radosgw: - workload-status: blocked - workload-status-message: "Missing relations: mon" + workload-status: waiting + workload-status-message: "Incomplete relations: mon" cinder-ceph: workload-status: waiting workload-status-message: "Incomplete relations: ceph" From 2816e181781356d48e0ed855ff16e35822c3bbe2 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 2 Jun 2020 14:22:18 +0100 Subject: [PATCH 2014/2699] Updates for 20.08 cycle start for groovy and libs - Adds groovy to the series in the metadata - Classic charms: sync charm-helpers. - Classic ceph based charms: also sync charms.ceph - Reactive charms: trigger a rebuild Change-Id: I1b49dc4f3e24483d7a64c6d9c2c55634e3b526e0 --- .../charmhelpers/contrib/openstack/utils.py | 2 +- .../contrib/storage/linux/ceph.py | 8 +++---- ceph-proxy/charmhelpers/fetch/snap.py | 2 +- ceph-proxy/metadata.yaml | 23 ++++++++++--------- 4 files changed, 18 insertions(+), 17 deletions(-) diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index e59e0d1e..fbf01561 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -225,7 +225,7 @@ ('train', ['2.22.0', '2.23.0']), ('ussuri', - ['2.24.0']), + ['2.24.0', '2.25.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py index 95a0d82a..814d5c72 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py @@ -432,13 +432,13 @@ def create(self): pool=self.name, name=self.app_name) except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + log('Could not set app name for pool {}'.format(self.name), level=WARNING) if 'pg_autoscaler' in enabled_manager_modules(): try: enable_pg_autoscale(self.service, self.name) except CalledProcessError as e: log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e, level=WARNING)) + self.name, e), level=WARNING) except CalledProcessError: raise @@ -504,7 +504,7 @@ def create(self): pool=self.name, name=self.app_name) except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + log('Could not set app name for pool {}'.format(self.name), level=WARNING) if nautilus_or_later: # Ensure we set the expected pool ratio update_pool(client=self.service, @@ -515,7 +515,7 @@ def create(self): enable_pg_autoscale(self.service, self.name) except CalledProcessError as e: log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e, level=WARNING)) + self.name, e), level=WARNING) except CalledProcessError: raise diff --git a/ceph-proxy/charmhelpers/fetch/snap.py b/ceph-proxy/charmhelpers/fetch/snap.py index 395836c7..fc70aa94 100644 --- a/ceph-proxy/charmhelpers/fetch/snap.py +++ b/ceph-proxy/charmhelpers/fetch/snap.py @@ -69,7 +69,7 @@ def _snap_exec(commands): .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode log('Snap failed to acquire lock, trying again in {} seconds.' - .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN')) + .format(SNAP_NO_LOCK_RETRY_DELAY), level='WARN') sleep(SNAP_NO_LOCK_RETRY_DELAY) return return_code diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index f0e63b93..eafcca7a 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -2,19 +2,20 @@ name: ceph-proxy summary: Proxy to Juju external Ceph cluster maintainer: OpenStack Charmers description: | - Ceph is a distributed storage and network file system designed to provide - excellent performance, reliability, and scalability. + Ceph is a distributed storage and network file system designed to provide + excellent performance, reliability, and scalability. tags: - - openstack - - storage - - file-servers - - misc +- openstack +- storage +- file-servers +- misc series: - - xenial - - bionic - - eoan - - focal - - trusty +- xenial +- bionic +- eoan +- focal +- trusty +- groovy extra-bindings: public: cluster: From cc19dcafc162cd6843f76c6eb79566b187be36ca Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 13 Jul 2020 18:59:19 +0000 Subject: [PATCH 2015/2699] Sync charm-helpers for Victoria/Groovy updates This sync picks up the release and version details for Victoria/Groovy. Depends-On: https://review.opendev.org/#/c/733686/ Change-Id: I35e74aa488f0f67a9f98ea6807f22d503a1467cc --- ceph-proxy/charmhelpers/contrib/openstack/utils.py | 14 ++++++++++++++ ceph-proxy/charmhelpers/core/hookenv.py | 6 ++++-- ceph-proxy/charmhelpers/fetch/ubuntu.py | 8 ++++++++ 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index fbf01561..f21625d3 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -143,6 +143,7 @@ 'stein', 'train', 'ussuri', + 'victoria', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -164,6 +165,7 @@ ('disco', 'stein'), ('eoan', 'train'), ('focal', 'ussuri'), + ('groovy', 'victoria'), ]) @@ -186,6 +188,7 @@ ('2019.1', 'stein'), ('2019.2', 'train'), ('2020.1', 'ussuri'), + ('2020.2', 'victoria'), ]) # The ugly duckling - must list releases oldest to newest @@ -226,6 +229,8 @@ ['2.22.0', '2.23.0']), ('ussuri', ['2.24.0', '2.25.0']), + ('victoria', + ['2.25.0']), ]) # >= Liberty version->codename mapping @@ -241,6 +246,7 @@ ('19', 'stein'), ('20', 'train'), ('21', 'ussuri'), + ('22', 'victoria'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -253,6 +259,7 @@ ('14', 'stein'), ('15', 'train'), ('16', 'ussuri'), + ('17', 'victoria'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -265,6 +272,7 @@ ('14', 'stein'), ('15', 'train'), ('16', 'ussuri'), + ('17', 'victoria'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -277,6 +285,7 @@ ('15', 'stein'), ('16', 'train'), ('17', 'ussuri'), + ('18', 'victoria'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -289,6 +298,7 @@ ('15', 'stein'), ('16', 'train'), ('18', 'ussuri'), + ('19', 'victoria'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -301,6 +311,7 @@ ('12', 'stein'), ('13', 'train'), ('14', 'ussuri'), + ('15', 'victoria'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -313,6 +324,7 @@ ('12', 'stein'), ('13', 'train'), ('14', 'ussuri'), + ('15', 'victoria'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -325,6 +337,7 @@ ('18', 'stein'), ('19', 'train'), ('20', 'ussuri'), + ('21', 'victoria'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -337,6 +350,7 @@ ('15', 'stein'), ('16', 'train'), ('18', 'ussuri'), + ('19', 'victoria'), ]), } diff --git a/ceph-proxy/charmhelpers/core/hookenv.py b/ceph-proxy/charmhelpers/core/hookenv.py index d7c37c17..db7ce728 100644 --- a/ceph-proxy/charmhelpers/core/hookenv.py +++ b/ceph-proxy/charmhelpers/core/hookenv.py @@ -372,8 +372,10 @@ def load_previous(self, path=None): try: self._prev_dict = json.load(f) except ValueError as e: - log('Unable to parse previous config data - {}'.format(str(e)), - level=ERROR) + log('Found but was unable to parse previous config data, ' + 'ignoring which will report all values as changed - {}' + .format(str(e)), level=ERROR) + return for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py index 3ddaf0dd..33152840 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -190,6 +190,14 @@ 'ussuri/proposed': 'bionic-proposed/ussuri', 'bionic-ussuri/proposed': 'bionic-proposed/ussuri', 'bionic-proposed/ussuri': 'bionic-proposed/ussuri', + # Victoria + 'victoria': 'focal-updates/victoria', + 'focal-victoria': 'focal-updates/victoria', + 'focal-victoria/updates': 'focal-updates/victoria', + 'focal-updates/victoria': 'focal-updates/victoria', + 'victoria/proposed': 'focal-proposed/victoria', + 'focal-victoria/proposed': 'focal-proposed/victoria', + 'focal-proposed/victoria': 'focal-proposed/victoria', } From b00b3ec5d9cc9d2cc325d6320267227e2014d345 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Mon, 27 Jul 2020 11:06:46 +0200 Subject: [PATCH 2016/2699] Add Victoria test bundles Change-Id: I45a84f4b252118527a0991a61b526960864dacd7 --- ceph-proxy/tests/bundles/focal-victoria.yaml | 186 ++++++++++++++++++ ceph-proxy/tests/bundles/groovy-victoria.yaml | 186 ++++++++++++++++++ ceph-proxy/tests/tests.yaml | 4 +- 3 files changed, 375 insertions(+), 1 deletion(-) create mode 100644 ceph-proxy/tests/bundles/focal-victoria.yaml create mode 100644 ceph-proxy/tests/bundles/groovy-victoria.yaml diff --git a/ceph-proxy/tests/bundles/focal-victoria.yaml b/ceph-proxy/tests/bundles/focal-victoria.yaml new file mode 100644 index 00000000..da9782f4 --- /dev/null +++ b/ceph-proxy/tests/bundles/focal-victoria.yaml @@ -0,0 +1,186 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-victoria + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/groovy-victoria.yaml b/ceph-proxy/tests/bundles/groovy-victoria.yaml new file mode 100644 index 00000000..74a29970 --- /dev/null +++ b/ceph-proxy/tests/bundles/groovy-victoria.yaml @@ -0,0 +1,186 @@ +variables: + openstack-origin: &openstack-origin distro + +series: groovy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 068d9e33..9292c618 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -23,6 +23,8 @@ dev_bundles: - xenial-ocata # Pike - xenial-pike + - focal-victoria + - groovy-victoria smoke_bundles: - bionic-train @@ -42,4 +44,4 @@ target_deploy_status: tests_options: force_deploy: - - focal-ussuri + - groovy-victoria From 5e283a6b0e1360d995bd499b2767e893c2cc13f9 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 27 Jul 2020 20:09:24 +0100 Subject: [PATCH 2017/2699] Release sync for 20.08 - Classic charms: sync charm-helpers. - Classic ceph based charms: also sync charms.ceph - Reactive charms: trigger a rebuild - sync tox.ini - sync requirements.txt and test-requirements.txt Change-Id: If1bf8dbceba3aff96aeee6e5bf710185d1ed9612 --- ceph-fs/rebuild | 2 +- ceph-fs/src/test-requirements.txt | 17 ++++++----------- ceph-fs/src/tox.ini | 2 +- ceph-fs/test-requirements.txt | 18 ++++++++++++++---- ceph-fs/tox.ini | 5 +++++ 5 files changed, 27 insertions(+), 17 deletions(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index fddb2abd..9d7789d7 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -917e1d08-a4d0-11ea-8fb8-332b1d5ca7cc +d99b6438-d02c-11ea-9216-238ea56f93d6 diff --git a/ceph-fs/src/test-requirements.txt b/ceph-fs/src/test-requirements.txt index 7e9d6093..d3c9be84 100644 --- a/ceph-fs/src/test-requirements.txt +++ b/ceph-fs/src/test-requirements.txt @@ -1,13 +1,8 @@ -# This file is managed centrally. If you find the need to modify this as a -# one-off, please don't. Intead, consult #openstack-charms and ask about -# requirements management in charms via bot-control. Thank you. -charm-tools>=2.4.4 -coverage>=3.6 -mock>=1.2 -flake8>=2.2.4,<=2.4.1 -stestr>=2.2.0 -requests>=2.18.4 +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# Functional Test Requirements (let Zaza's dependencies solve all dependencies here!) git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack -pytz # workaround for 14.04 pip/tox -pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index 9c27bbfa..07a7adcb 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -47,4 +47,4 @@ commands = functest-run-suite --keep-model --bundle {posargs} [testenv:venv] -commands = {posargs} \ No newline at end of file +commands = {posargs} diff --git a/ceph-fs/test-requirements.txt b/ceph-fs/test-requirements.txt index 94b97968..0ab97f6e 100644 --- a/ceph-fs/test-requirements.txt +++ b/ceph-fs/test-requirements.txt @@ -1,9 +1,10 @@ -# This file is managed centrally. If you find the need to modify this as a -# one-off, please don't. Intead, consult #openstack-charms and ask about -# requirements management in charms via bot-control. Thank you. +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools # # Lint and unit test requirements -flake8>=2.2.4 +flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 requests>=2.18.4 charms.reactive @@ -11,3 +12,12 @@ mock>=1.2 nose>=1.3.7 coverage>=3.6 git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack +# +# Revisit for removal / mock improvement: +netifaces # vault +psycopg2-binary # vault +tenacity # vault +pbr # vault +cryptography # vault, keystone-saml-mellon +lxml # keystone-saml-mellon +hvac # vault, barbican-vault diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 5b41c1dd..afd48f02 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -50,6 +50,11 @@ basepython = python3.7 deps = -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/test-requirements.txt +commands = stestr run --slowest {posargs} + [testenv:pep8] basepython = python3 deps = -r{toxinidir}/test-requirements.txt From 4c6c71bf65ea0a4b9df4d7cbdfe135458d38f30d Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 27 Jul 2020 20:09:24 +0100 Subject: [PATCH 2018/2699] Release sync for 20.08 - Classic charms: sync charm-helpers. - Classic ceph based charms: also sync charms.ceph - Reactive charms: trigger a rebuild - sync tox.ini - sync requirements.txt and test-requirements.txt Change-Id: Ifd8c81255770f95980c7fd4117e6f07e44eea2ee --- ceph-mon/hooks/charmhelpers/__init__.py | 10 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 21 +- .../charmhelpers/contrib/openstack/utils.py | 17 +- ceph-mon/hooks/charmhelpers/core/host.py | 18 +- ceph-mon/lib/charms_ceph/utils.py | 298 +++++++++++++++--- ceph-mon/requirements.txt | 6 +- 6 files changed, 303 insertions(+), 67 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/__init__.py b/ceph-mon/hooks/charmhelpers/__init__.py index 61ef9071..1f57ed2a 100644 --- a/ceph-mon/hooks/charmhelpers/__init__.py +++ b/ceph-mon/hooks/charmhelpers/__init__.py @@ -49,7 +49,8 @@ def deprecate(warning, date=None, log=None): """Add a deprecation warning the first time the function is used. - The date, which is a string in semi-ISO8660 format indicate the year-month + + The date which is a string in semi-ISO8660 format indicates the year-month that the function is officially going to be removed. usage: @@ -62,10 +63,11 @@ def contributed_add_source_thing(...): The reason for passing the logging function (log) is so that hookenv.log can be used for a charm if needed. - :param warning: String to indicat where it has moved ot. - :param date: optional sting, in YYYY-MM format to indicate when the + :param warning: String to indicate what is to be used instead. + :param date: Optional string in YYYY-MM format to indicate when the function will definitely (probably) be removed. - :param log: The log function to call to log. If not, logs to stdout + :param log: The log function to call in order to log. If None, logs to + stdout """ def wrap(f): diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index d775861b..14b80d96 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -18,14 +18,14 @@ # Authors: # Matthew Wedgwood -import subprocess -import pwd +import glob import grp import os -import glob -import shutil +import pwd import re import shlex +import shutil +import subprocess import yaml from charmhelpers.core.hookenv import ( @@ -265,6 +265,11 @@ def __init__(self, hostname=None, primary=True): relation_set(relation_id=rid, relation_settings={'primary': self.primary}) self.remove_check_queue = set() + @classmethod + def does_nrpe_conf_dir_exist(cls): + """Return True if th nrpe_confdif directory exists.""" + return os.path.isdir(cls.nrpe_confdir) + def add_check(self, *args, **kwargs): shortname = None if kwargs.get('shortname') is None: @@ -310,6 +315,12 @@ def write(self): nrpe_monitors = {} monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} + + # check that the charm can write to the conf dir. If not, then nagios + # probably isn't installed, and we can defer. + if not self.does_nrpe_conf_dir_exist(): + return + for nrpecheck in self.checks: nrpecheck.write(self.nagios_context, self.hostname, self.nagios_servicegroups) @@ -400,7 +411,7 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): upstart_init = '/etc/init/%s.conf' % svc sysv_init = '/etc/init.d/%s' % svc - if host.init_is_systemd(): + if host.init_is_systemd(service_name=svc): nrpe.add_check( shortname=svc, description='process check {%s}' % unit_name, diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index f21625d3..0aa797c4 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -2241,10 +2241,13 @@ def inform_peers_unit_state(state, relation_name='cluster'): if state not in UNIT_STATES: raise ValueError( "Setting invalid state {} for unit".format(state)) + this_unit = local_unit() for r_id in relation_ids(relation_name): + juju_log('Telling peer behind relation {} that {} is {}'.format( + r_id, this_unit, state), 'DEBUG') relation_set(relation_id=r_id, relation_settings={ - get_peer_key(local_unit()): state}) + get_peer_key(this_unit): state}) def get_peers_unit_state(relation_name='cluster'): @@ -2276,8 +2279,10 @@ def are_peers_ready(relation_name='cluster'): :returns: Whether all units are ready. :rtype: bool """ - unit_states = get_peers_unit_state(relation_name) - return all(v == UNIT_READY for v in unit_states.values()) + unit_states = get_peers_unit_state(relation_name).values() + juju_log('{} peers are in the following states: {}'.format( + relation_name, unit_states), 'DEBUG') + return all(state == UNIT_READY for state in unit_states) def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'): @@ -2360,7 +2365,9 @@ def get_api_application_status(): app_state, msg = get_api_unit_status() if app_state == WORKLOAD_STATES.ACTIVE: if are_peers_ready(): - return WORKLOAD_STATES.ACTIVE, 'Application Ready' + msg = 'Application Ready' else: - return WORKLOAD_STATES.WAITING, 'Some units are not ready' + app_state = WORKLOAD_STATES.WAITING + msg = 'Some units are not ready' + juju_log(msg, 'DEBUG') return app_state, msg diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index b33ac906..a785efdf 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -193,7 +193,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", stopped = service_stop(service_name, **kwargs) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): + if init_is_systemd(service_name=service_name): service('disable', service_name) service('mask', service_name) elif os.path.exists(upstart_file): @@ -227,7 +227,7 @@ def service_resume(service_name, init_dir="/etc/init", """ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): + if init_is_systemd(service_name=service_name): service('unmask', service_name) service('enable', service_name) elif os.path.exists(upstart_file): @@ -257,7 +257,7 @@ def service(action, service_name, **kwargs): :param **kwargs: additional params to be passed to the service command in the form of key=value. """ - if init_is_systemd(): + if init_is_systemd(service_name=service_name): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] @@ -281,7 +281,7 @@ def service_running(service_name, **kwargs): units (e.g. service ceph-osd status id=2). The kwargs are ignored in systemd services. """ - if init_is_systemd(): + if init_is_systemd(service_name=service_name): return service('is-active', service_name) else: if os.path.exists(_UPSTART_CONF.format(service_name)): @@ -311,8 +311,14 @@ def service_running(service_name, **kwargs): SYSTEMD_SYSTEM = '/run/systemd/system' -def init_is_systemd(): - """Return True if the host system uses systemd, False otherwise.""" +def init_is_systemd(service_name=None): + """ + Returns whether the host uses systemd for the specified service. + + @param Optional[str] service_name: specific name of service + """ + if str(service_name).startswith("snap."): + return True if lsb_release()['DISTRIB_CODENAME'] == 'trusty': return False return os.path.isdir(SYSTEMD_SYSTEM) diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index 1a51e7c5..72e6b921 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -2169,15 +2169,18 @@ def roll_monitor_cluster(new_version, upgrade_key): status_set('blocked', 'failed to upgrade monitor') -# TODO(jamespage): -# Mimic support will need to ensure that ceph-mgr daemons are also -# restarted during upgrades - probably through use of one of the -# high level systemd targets shipped by the packaging. -def upgrade_monitor(new_version): +# For E731 we can't assign a lambda, therefore, instead pass this. +def noop(): + pass + + +def upgrade_monitor(new_version, kick_function=None): """Upgrade the current ceph monitor to the new version :param new_version: String version to upgrade to. """ + if kick_function is None: + kick_function = noop current_version = get_version() status_set("maintenance", "Upgrading monitor") log("Current ceph version is {}".format(current_version)) @@ -2186,6 +2189,7 @@ def upgrade_monitor(new_version): # Needed to determine if whether to stop/start ceph-mgr luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0 + kick_function() try: add_source(config('source'), config('key')) apt_update(fatal=True) @@ -2194,6 +2198,7 @@ def upgrade_monitor(new_version): err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) + kick_function() try: if systemd(): service_stop('ceph-mon') @@ -2204,6 +2209,7 @@ def upgrade_monitor(new_version): else: service_stop('ceph-mon-all') apt_install(packages=determine_packages(), fatal=True) + kick_function() owner = ceph_user() @@ -2217,6 +2223,8 @@ def upgrade_monitor(new_version): group=owner, follow_links=True) + kick_function() + # Ensure that mon directory is user writable hostname = socket.gethostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) @@ -2257,13 +2265,22 @@ def lock_and_roll(upgrade_key, service, my_name, version): start_timestamp)) monitor_key_set(upgrade_key, "{}_{}_{}_start".format( service, my_name, version), start_timestamp) + + # alive indication: + alive_function = ( + lambda: monitor_key_set( + upgrade_key, "{}_{}_{}_alive" + .format(service, my_name, version), time.time())) + dog = WatchDog(kick_interval=3 * 60, + kick_function=alive_function) + log("Rolling") # This should be quick if service == 'osd': - upgrade_osd(version) + upgrade_osd(version, kick_function=dog.kick_the_dog) elif service == 'mon': - upgrade_monitor(version) + upgrade_monitor(version, kick_function=dog.kick_the_dog) else: log("Unknown service {}. Unable to upgrade".format(service), level=ERROR) @@ -2294,45 +2311,225 @@ def wait_on_previous_node(upgrade_key, service, previous_node, version): """ log("Previous node is: {}".format(previous_node)) - previous_node_finished = monitor_key_exists( - upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version)) - - while previous_node_finished is False: - log("{} is not finished. Waiting".format(previous_node)) - # Has this node been trying to upgrade for longer than - # 10 minutes? - # If so then move on and consider that node dead. - - # NOTE: This assumes the clusters clocks are somewhat accurate - # If the hosts clock is really far off it may cause it to skip - # the previous node even though it shouldn't. - current_timestamp = time.time() - previous_node_start_time = monitor_key_get( + previous_node_started_f = ( + lambda: monitor_key_exists( upgrade_key, - "{}_{}_{}_start".format(service, previous_node, version)) - if (previous_node_start_time is not None and - ((current_timestamp - (10 * 60)) > - float(previous_node_start_time))): - # NOTE(jamespage): - # Previous node is probably dead as we've been waiting - # for 10 minutes - lets move on and upgrade - log("Waited 10 mins on node {}. current time: {} > " - "previous node start time: {} Moving on".format( - previous_node, - (current_timestamp - (10 * 60)), - previous_node_start_time)) - return - # NOTE(jamespage) - # Previous node has not started, or started less than - # 10 minutes ago - sleep a random amount of time and - # then check again. - wait_time = random.randrange(5, 30) - log('waiting for {} seconds'.format(wait_time)) - time.sleep(wait_time) - previous_node_finished = monitor_key_exists( + "{}_{}_{}_start".format(service, previous_node, version))) + previous_node_finished_f = ( + lambda: monitor_key_exists( upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version)) + "{}_{}_{}_done".format(service, previous_node, version))) + previous_node_alive_time_f = ( + lambda: monitor_key_get( + upgrade_key, + "{}_{}_{}_alive".format(service, previous_node, version))) + + # wait for 30 minutes until the previous node starts. We don't proceed + # unless we get a start condition. + try: + WatchDog.wait_until(previous_node_started_f, timeout=30 * 60) + except WatchDog.WatchDogTimeoutException: + log("Waited for previous node to start for 30 minutes. " + "It didn't start, so may have a serious issue. Continuing with " + "upgrade of this node.", + level=WARNING) + return + + # keep the time it started from this nodes' perspective. + previous_node_started_at = time.time() + log("Detected that previous node {} has started. Time now: {}" + .format(previous_node, previous_node_started_at)) + + # Now wait for the node to complete. The node may optionally be kicking + # with the *_alive key, which allows this node to wait longer as it 'knows' + # the other node is proceeding. + try: + WatchDog.timed_wait(kicked_at_function=previous_node_alive_time_f, + complete_function=previous_node_finished_f, + wait_time=30 * 60, + compatibility_wait_time=10 * 60, + max_kick_interval=5 * 60) + except WatchDog.WatchDogDeadException: + # previous node was kicking, but timed out; log this condition and move + # on. + now = time.time() + waited = int((now - previous_node_started_at) / 60) + log("Previous node started, but has now not ticked for 5 minutes. " + "Waited total of {} mins on node {}. current time: {} > " + "previous node start time: {}. " + "Continuing with upgrade of this node." + .format(waited, previous_node, now, previous_node_started_at), + level=WARNING) + except WatchDog.WatchDogTimeoutException: + # previous node never kicked, or simply took too long; log this + # condition and move on. + now = time.time() + waited = int((now - previous_node_started_at) / 60) + log("Previous node is taking too long; assuming it has died." + "Waited {} mins on node {}. current time: {} > " + "previous node start time: {}. " + "Continuing with upgrade of this node." + .format(waited, previous_node, now, previous_node_started_at), + level=WARNING) + + +class WatchDog(object): + """Watch a dog; basically a kickable timer with a timeout between two async + units. + + The idea is that you have an overall timeout and then can kick that timeout + with intermediary hits, with a max time between those kicks allowed. + + Note that this watchdog doesn't rely on the clock of the other side; just + roughly when it detects when the other side started. All timings are based + on the local clock. + + The kicker will not 'kick' more often than a set interval, regardless of + how often the kick_the_dog() function is called. The kicker provides a + function (lambda: -> None) that is called when the kick interval is + reached. + + The waiter calls the static method with a check function + (lambda: -> Boolean) that indicates when the wait should be over and the + maximum interval to wait. e.g. 30 minutes with a 5 minute kick interval. + + So the waiter calls wait(f, 30, 3) and the kicker sets up a 3 minute kick + interval, or however long it is expected for the key to propagate and to + allow for other delays. + + There is a compatibility mode where if the otherside never kicks, then it + simply waits for the compatability timer. + """ + + class WatchDogDeadException(Exception): + pass + + class WatchDogTimeoutException(Exception): + pass + + def __init__(self, kick_interval=3 * 60, kick_function=None): + """Initialise a new WatchDog + + :param kick_interval: the interval when this side kicks the other in + seconds. + :type kick_interval: Int + :param kick_function: The function to call that does the kick. + :type kick_function: Callable[] + """ + self.start_time = time.time() + self.last_run_func = None + self.last_kick_at = None + self.kick_interval = kick_interval + self.kick_f = kick_function + + def kick_the_dog(self): + """Might call the kick_function if it's time. + + This function can be called as frequently as needed, but will run the + self.kick_function after kick_interval seconds have passed. + """ + now = time.time() + if (self.last_run_func is None or + (now - self.last_run_func > self.kick_interval)): + if self.kick_f is not None: + self.kick_f() + self.last_run_func = now + self.last_kick_at = now + + @staticmethod + def wait_until(wait_f, timeout=10 * 60): + """Wait for timeout seconds until the passed function return True. + + :param wait_f: The function to call that will end the wait. + :type wait_f: Callable[[], Boolean] + :param timeout: The time to wait in seconds. + :type timeout: int + """ + start_time = time.time() + while(not wait_f()): + now = time.time() + if now > start_time + timeout: + raise WatchDog.WatchDogTimeoutException() + wait_time = random.randrange(5, 30) + log('wait_until: waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + + @staticmethod + def timed_wait(kicked_at_function, + complete_function, + wait_time=30 * 60, + compatibility_wait_time=10 * 60, + max_kick_interval=5 * 60): + """Wait a maximum time with an intermediate 'kick' time. + + This function will wait for max_kick_interval seconds unless the + kicked_at_function() call returns a time that is not older that + max_kick_interval (in seconds). i.e. the other side can signal that it + is still doing things during the max_kick_interval as long as it kicks + at least every max_kick_interval seconds. + + The maximum wait is "wait_time", but the otherside must keep kicking + during this period. + + The "compatibility_wait_time" is used if the other side never kicks + (i.e. the kicked_at_function() always returns None. In this case the + function wait up to "compatibility_wait_time". + + Note that the type of the return from the kicked_at_function is an + Optional[str], not a Float. The function will coerce this to a float + for the comparison. This represents the return value of + time.time() at the "other side". It's a string to simplify the + function obtaining the time value from the other side. + + The function raises WatchDogTimeoutException if either the + compatibility_wait_time or the wait_time are exceeded. + + The function raises WatchDogDeadException if the max_kick_interval is + exceeded. + + Note that it is possible that the first kick interval is extended to + compatibility_wait_time if the "other side" doesn't kick immediately. + The best solution is for the other side to kick early and often. + + :param kicked_at_function: The function to call to retrieve the time + that the other side 'kicked' at. None if the other side hasn't + kicked. + :type kicked_at_function: Callable[[], Optional[str]] + :param complete_function: The callable that returns True when done. + :type complete_function: Callable[[], Boolean] + :param wait_time: the maximum time to wait, even with kicks, in + seconds. + :type wait_time: int + :param compatibility_wait_time: The time to wait if no kicks are + received, in seconds. + :type compatibility_wait_time: int + :param max_kick_interval: The maximum time allowed between kicks before + the wait is over, in seconds: + :type max_kick_interval: int + :raises: WatchDog.WatchDogTimeoutException, + WatchDog.WatchDogDeadException + """ + start_time = time.time() + while True: + if complete_function(): + break + # the time when the waiting for unit last kicked. + kicked_at = kicked_at_function() + now = time.time() + if kicked_at is None: + # assume other end doesn't do alive kicks + if (now - start_time > compatibility_wait_time): + raise WatchDog.WatchDogTimeoutException() + else: + # other side is participating in kicks; must kick at least + # every 'max_kick_interval' to stay alive. + if (now - float(kicked_at) > max_kick_interval): + raise WatchDog.WatchDogDeadException() + if (now - start_time > wait_time): + raise WatchDog.WatchDogTimeoutException() + delay_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(delay_time)) + time.sleep(delay_time) def get_upgrade_position(osd_sorted_list, match_name): @@ -2412,11 +2609,14 @@ def roll_osd_cluster(new_version, upgrade_key): status_set('blocked', 'failed to upgrade osd') -def upgrade_osd(new_version): +def upgrade_osd(new_version, kick_function=None): """Upgrades the current osd :param new_version: str. The new version to upgrade to """ + if kick_function is None: + kick_function = noop + current_version = get_version() status_set("maintenance", "Upgrading osd") log("Current ceph version is {}".format(current_version)) @@ -2431,10 +2631,13 @@ def upgrade_osd(new_version): status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) + kick_function() + try: # Upgrade the packages before restarting the daemons. status_set('maintenance', 'Upgrading packages to %s' % new_version) apt_install(packages=determine_packages(), fatal=True) + kick_function() # If the upgrade does not need an ownership update of any of the # directories in the osd service directory, then simply restart @@ -2458,13 +2661,16 @@ def upgrade_osd(new_version): os.listdir(CEPH_BASE_DIR)) non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x), non_osd_dirs) - for path in non_osd_dirs: + for i, path in enumerate(non_osd_dirs): + if i % 100 == 0: + kick_function() update_owner(path) # Fast service restart wasn't an option because each of the OSD # directories need the ownership updated for all the files on # the OSD. Walk through the OSDs one-by-one upgrading the OSD. for osd_dir in _get_child_dirs(OSD_BASE_DIR): + kick_function() try: osd_num = _get_osd_num_from_dirname(osd_dir) _upgrade_single_osd(osd_num, osd_dir) diff --git a/ceph-mon/requirements.txt b/ceph-mon/requirements.txt index 343beed1..2316401b 100644 --- a/ceph-mon/requirements.txt +++ b/ceph-mon/requirements.txt @@ -13,5 +13,9 @@ netifaces>=0.10.4 netaddr>=0.7.12,!=0.7.16 Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 -dnspython>=1.12.0 + +# dnspython 2.0.0 dropped py3.5 support +dnspython<2.0.0; python_version < '3.6' +dnspython; python_version >= '3.6' + psutil>=1.1.1,<2.0.0 From fa0e005175e2ec178f4e1f530ec48efe5aa99125 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 27 Jul 2020 20:09:24 +0100 Subject: [PATCH 2019/2699] Release sync for 20.08 - Classic charms: sync charm-helpers. - Classic ceph based charms: also sync charms.ceph - Reactive charms: trigger a rebuild - sync tox.ini - sync requirements.txt and test-requirements.txt Change-Id: If07335447789d1521e1cd745bffff90e4900c7c1 --- ceph-osd/hooks/charmhelpers/__init__.py | 10 +++++---- .../charmhelpers/contrib/charmsupport/nrpe.py | 21 ++++++++++++++----- .../charmhelpers/contrib/openstack/utils.py | 17 ++++++++++----- ceph-osd/hooks/charmhelpers/core/host.py | 18 ++++++++++------ ceph-osd/requirements.txt | 6 +++++- ceph-osd/tox.ini | 2 +- 6 files changed, 52 insertions(+), 22 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/__init__.py b/ceph-osd/hooks/charmhelpers/__init__.py index 61ef9071..1f57ed2a 100644 --- a/ceph-osd/hooks/charmhelpers/__init__.py +++ b/ceph-osd/hooks/charmhelpers/__init__.py @@ -49,7 +49,8 @@ def deprecate(warning, date=None, log=None): """Add a deprecation warning the first time the function is used. - The date, which is a string in semi-ISO8660 format indicate the year-month + + The date which is a string in semi-ISO8660 format indicates the year-month that the function is officially going to be removed. usage: @@ -62,10 +63,11 @@ def contributed_add_source_thing(...): The reason for passing the logging function (log) is so that hookenv.log can be used for a charm if needed. - :param warning: String to indicat where it has moved ot. - :param date: optional sting, in YYYY-MM format to indicate when the + :param warning: String to indicate what is to be used instead. + :param date: Optional string in YYYY-MM format to indicate when the function will definitely (probably) be removed. - :param log: The log function to call to log. If not, logs to stdout + :param log: The log function to call in order to log. If None, logs to + stdout """ def wrap(f): diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index d775861b..14b80d96 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -18,14 +18,14 @@ # Authors: # Matthew Wedgwood -import subprocess -import pwd +import glob import grp import os -import glob -import shutil +import pwd import re import shlex +import shutil +import subprocess import yaml from charmhelpers.core.hookenv import ( @@ -265,6 +265,11 @@ def __init__(self, hostname=None, primary=True): relation_set(relation_id=rid, relation_settings={'primary': self.primary}) self.remove_check_queue = set() + @classmethod + def does_nrpe_conf_dir_exist(cls): + """Return True if th nrpe_confdif directory exists.""" + return os.path.isdir(cls.nrpe_confdir) + def add_check(self, *args, **kwargs): shortname = None if kwargs.get('shortname') is None: @@ -310,6 +315,12 @@ def write(self): nrpe_monitors = {} monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} + + # check that the charm can write to the conf dir. If not, then nagios + # probably isn't installed, and we can defer. + if not self.does_nrpe_conf_dir_exist(): + return + for nrpecheck in self.checks: nrpecheck.write(self.nagios_context, self.hostname, self.nagios_servicegroups) @@ -400,7 +411,7 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): upstart_init = '/etc/init/%s.conf' % svc sysv_init = '/etc/init.d/%s' % svc - if host.init_is_systemd(): + if host.init_is_systemd(service_name=svc): nrpe.add_check( shortname=svc, description='process check {%s}' % unit_name, diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index f21625d3..0aa797c4 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -2241,10 +2241,13 @@ def inform_peers_unit_state(state, relation_name='cluster'): if state not in UNIT_STATES: raise ValueError( "Setting invalid state {} for unit".format(state)) + this_unit = local_unit() for r_id in relation_ids(relation_name): + juju_log('Telling peer behind relation {} that {} is {}'.format( + r_id, this_unit, state), 'DEBUG') relation_set(relation_id=r_id, relation_settings={ - get_peer_key(local_unit()): state}) + get_peer_key(this_unit): state}) def get_peers_unit_state(relation_name='cluster'): @@ -2276,8 +2279,10 @@ def are_peers_ready(relation_name='cluster'): :returns: Whether all units are ready. :rtype: bool """ - unit_states = get_peers_unit_state(relation_name) - return all(v == UNIT_READY for v in unit_states.values()) + unit_states = get_peers_unit_state(relation_name).values() + juju_log('{} peers are in the following states: {}'.format( + relation_name, unit_states), 'DEBUG') + return all(state == UNIT_READY for state in unit_states) def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'): @@ -2360,7 +2365,9 @@ def get_api_application_status(): app_state, msg = get_api_unit_status() if app_state == WORKLOAD_STATES.ACTIVE: if are_peers_ready(): - return WORKLOAD_STATES.ACTIVE, 'Application Ready' + msg = 'Application Ready' else: - return WORKLOAD_STATES.WAITING, 'Some units are not ready' + app_state = WORKLOAD_STATES.WAITING + msg = 'Some units are not ready' + juju_log(msg, 'DEBUG') return app_state, msg diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index b33ac906..a785efdf 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -193,7 +193,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", stopped = service_stop(service_name, **kwargs) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): + if init_is_systemd(service_name=service_name): service('disable', service_name) service('mask', service_name) elif os.path.exists(upstart_file): @@ -227,7 +227,7 @@ def service_resume(service_name, init_dir="/etc/init", """ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): + if init_is_systemd(service_name=service_name): service('unmask', service_name) service('enable', service_name) elif os.path.exists(upstart_file): @@ -257,7 +257,7 @@ def service(action, service_name, **kwargs): :param **kwargs: additional params to be passed to the service command in the form of key=value. """ - if init_is_systemd(): + if init_is_systemd(service_name=service_name): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] @@ -281,7 +281,7 @@ def service_running(service_name, **kwargs): units (e.g. service ceph-osd status id=2). The kwargs are ignored in systemd services. """ - if init_is_systemd(): + if init_is_systemd(service_name=service_name): return service('is-active', service_name) else: if os.path.exists(_UPSTART_CONF.format(service_name)): @@ -311,8 +311,14 @@ def service_running(service_name, **kwargs): SYSTEMD_SYSTEM = '/run/systemd/system' -def init_is_systemd(): - """Return True if the host system uses systemd, False otherwise.""" +def init_is_systemd(service_name=None): + """ + Returns whether the host uses systemd for the specified service. + + @param Optional[str] service_name: specific name of service + """ + if str(service_name).startswith("snap."): + return True if lsb_release()['DISTRIB_CODENAME'] == 'trusty': return False return os.path.isdir(SYSTEMD_SYSTEM) diff --git a/ceph-osd/requirements.txt b/ceph-osd/requirements.txt index 343beed1..2316401b 100644 --- a/ceph-osd/requirements.txt +++ b/ceph-osd/requirements.txt @@ -13,5 +13,9 @@ netifaces>=0.10.4 netaddr>=0.7.12,!=0.7.16 Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 -dnspython>=1.12.0 + +# dnspython 2.0.0 dropped py3.5 support +dnspython<2.0.0; python_version < '3.6' +dnspython; python_version >= '3.6' + psutil>=1.1.1,<2.0.0 diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 8080ba6d..b835733a 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -116,5 +116,5 @@ commands = functest-run-suite --keep-model --bundle {posargs} [flake8] -ignore = E402,E226,W504 +ignore = E402,E226 exclude = */charmhelpers From f75f39716c1e40478dc202538b6bd75a61c0dcef Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 27 Jul 2020 20:09:24 +0100 Subject: [PATCH 2020/2699] Release sync for 20.08 - Classic charms: sync charm-helpers. - Classic ceph based charms: also sync charms.ceph - Reactive charms: trigger a rebuild - sync tox.ini - sync requirements.txt and test-requirements.txt Change-Id: I0003f91a5f451ed3234ae8f4bc273fa4b49f2b51 --- ceph-proxy/charmhelpers/__init__.py | 10 +++++---- .../charmhelpers/contrib/charmsupport/nrpe.py | 21 ++++++++++++++----- .../charmhelpers/contrib/openstack/utils.py | 17 ++++++++++----- ceph-proxy/charmhelpers/core/host.py | 18 ++++++++++------ ceph-proxy/requirements.txt | 6 +++++- 5 files changed, 51 insertions(+), 21 deletions(-) diff --git a/ceph-proxy/charmhelpers/__init__.py b/ceph-proxy/charmhelpers/__init__.py index 61ef9071..1f57ed2a 100644 --- a/ceph-proxy/charmhelpers/__init__.py +++ b/ceph-proxy/charmhelpers/__init__.py @@ -49,7 +49,8 @@ def deprecate(warning, date=None, log=None): """Add a deprecation warning the first time the function is used. - The date, which is a string in semi-ISO8660 format indicate the year-month + + The date which is a string in semi-ISO8660 format indicates the year-month that the function is officially going to be removed. usage: @@ -62,10 +63,11 @@ def contributed_add_source_thing(...): The reason for passing the logging function (log) is so that hookenv.log can be used for a charm if needed. - :param warning: String to indicat where it has moved ot. - :param date: optional sting, in YYYY-MM format to indicate when the + :param warning: String to indicate what is to be used instead. + :param date: Optional string in YYYY-MM format to indicate when the function will definitely (probably) be removed. - :param log: The log function to call to log. If not, logs to stdout + :param log: The log function to call in order to log. If None, logs to + stdout """ def wrap(f): diff --git a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py index d775861b..14b80d96 100644 --- a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py @@ -18,14 +18,14 @@ # Authors: # Matthew Wedgwood -import subprocess -import pwd +import glob import grp import os -import glob -import shutil +import pwd import re import shlex +import shutil +import subprocess import yaml from charmhelpers.core.hookenv import ( @@ -265,6 +265,11 @@ def __init__(self, hostname=None, primary=True): relation_set(relation_id=rid, relation_settings={'primary': self.primary}) self.remove_check_queue = set() + @classmethod + def does_nrpe_conf_dir_exist(cls): + """Return True if th nrpe_confdif directory exists.""" + return os.path.isdir(cls.nrpe_confdir) + def add_check(self, *args, **kwargs): shortname = None if kwargs.get('shortname') is None: @@ -310,6 +315,12 @@ def write(self): nrpe_monitors = {} monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} + + # check that the charm can write to the conf dir. If not, then nagios + # probably isn't installed, and we can defer. + if not self.does_nrpe_conf_dir_exist(): + return + for nrpecheck in self.checks: nrpecheck.write(self.nagios_context, self.hostname, self.nagios_servicegroups) @@ -400,7 +411,7 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): upstart_init = '/etc/init/%s.conf' % svc sysv_init = '/etc/init.d/%s' % svc - if host.init_is_systemd(): + if host.init_is_systemd(service_name=svc): nrpe.add_check( shortname=svc, description='process check {%s}' % unit_name, diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index f21625d3..0aa797c4 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -2241,10 +2241,13 @@ def inform_peers_unit_state(state, relation_name='cluster'): if state not in UNIT_STATES: raise ValueError( "Setting invalid state {} for unit".format(state)) + this_unit = local_unit() for r_id in relation_ids(relation_name): + juju_log('Telling peer behind relation {} that {} is {}'.format( + r_id, this_unit, state), 'DEBUG') relation_set(relation_id=r_id, relation_settings={ - get_peer_key(local_unit()): state}) + get_peer_key(this_unit): state}) def get_peers_unit_state(relation_name='cluster'): @@ -2276,8 +2279,10 @@ def are_peers_ready(relation_name='cluster'): :returns: Whether all units are ready. :rtype: bool """ - unit_states = get_peers_unit_state(relation_name) - return all(v == UNIT_READY for v in unit_states.values()) + unit_states = get_peers_unit_state(relation_name).values() + juju_log('{} peers are in the following states: {}'.format( + relation_name, unit_states), 'DEBUG') + return all(state == UNIT_READY for state in unit_states) def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'): @@ -2360,7 +2365,9 @@ def get_api_application_status(): app_state, msg = get_api_unit_status() if app_state == WORKLOAD_STATES.ACTIVE: if are_peers_ready(): - return WORKLOAD_STATES.ACTIVE, 'Application Ready' + msg = 'Application Ready' else: - return WORKLOAD_STATES.WAITING, 'Some units are not ready' + app_state = WORKLOAD_STATES.WAITING + msg = 'Some units are not ready' + juju_log(msg, 'DEBUG') return app_state, msg diff --git a/ceph-proxy/charmhelpers/core/host.py b/ceph-proxy/charmhelpers/core/host.py index b33ac906..a785efdf 100644 --- a/ceph-proxy/charmhelpers/core/host.py +++ b/ceph-proxy/charmhelpers/core/host.py @@ -193,7 +193,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", stopped = service_stop(service_name, **kwargs) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): + if init_is_systemd(service_name=service_name): service('disable', service_name) service('mask', service_name) elif os.path.exists(upstart_file): @@ -227,7 +227,7 @@ def service_resume(service_name, init_dir="/etc/init", """ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): + if init_is_systemd(service_name=service_name): service('unmask', service_name) service('enable', service_name) elif os.path.exists(upstart_file): @@ -257,7 +257,7 @@ def service(action, service_name, **kwargs): :param **kwargs: additional params to be passed to the service command in the form of key=value. """ - if init_is_systemd(): + if init_is_systemd(service_name=service_name): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] @@ -281,7 +281,7 @@ def service_running(service_name, **kwargs): units (e.g. service ceph-osd status id=2). The kwargs are ignored in systemd services. """ - if init_is_systemd(): + if init_is_systemd(service_name=service_name): return service('is-active', service_name) else: if os.path.exists(_UPSTART_CONF.format(service_name)): @@ -311,8 +311,14 @@ def service_running(service_name, **kwargs): SYSTEMD_SYSTEM = '/run/systemd/system' -def init_is_systemd(): - """Return True if the host system uses systemd, False otherwise.""" +def init_is_systemd(service_name=None): + """ + Returns whether the host uses systemd for the specified service. + + @param Optional[str] service_name: specific name of service + """ + if str(service_name).startswith("snap."): + return True if lsb_release()['DISTRIB_CODENAME'] == 'trusty': return False return os.path.isdir(SYSTEMD_SYSTEM) diff --git a/ceph-proxy/requirements.txt b/ceph-proxy/requirements.txt index 343beed1..2316401b 100644 --- a/ceph-proxy/requirements.txt +++ b/ceph-proxy/requirements.txt @@ -13,5 +13,9 @@ netifaces>=0.10.4 netaddr>=0.7.12,!=0.7.16 Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 -dnspython>=1.12.0 + +# dnspython 2.0.0 dropped py3.5 support +dnspython<2.0.0; python_version < '3.6' +dnspython; python_version >= '3.6' + psutil>=1.1.1,<2.0.0 From eb964c1082900cc85cd1c3593623721609dfadc1 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 27 Jul 2020 20:09:24 +0100 Subject: [PATCH 2021/2699] Release sync for 20.08 - Classic charms: sync charm-helpers. - Classic ceph based charms: also sync charms.ceph - Reactive charms: trigger a rebuild - sync tox.ini - sync requirements.txt and test-requirements.txt Change-Id: Iea035f14a61e6c5e300ec567a4b9d3c0921d0a71 --- ceph-radosgw/hooks/charmhelpers/__init__.py | 10 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 21 +- .../templates/openstack_https_frontend | 10 +- .../templates/openstack_https_frontend.conf | 10 +- .../charmhelpers/contrib/openstack/utils.py | 17 +- ceph-radosgw/hooks/charmhelpers/core/host.py | 18 +- ceph-radosgw/lib/charms_ceph/utils.py | 298 +++++++++++++++--- ceph-radosgw/requirements.txt | 6 +- 8 files changed, 319 insertions(+), 71 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/__init__.py b/ceph-radosgw/hooks/charmhelpers/__init__.py index 61ef9071..1f57ed2a 100644 --- a/ceph-radosgw/hooks/charmhelpers/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/__init__.py @@ -49,7 +49,8 @@ def deprecate(warning, date=None, log=None): """Add a deprecation warning the first time the function is used. - The date, which is a string in semi-ISO8660 format indicate the year-month + + The date which is a string in semi-ISO8660 format indicates the year-month that the function is officially going to be removed. usage: @@ -62,10 +63,11 @@ def contributed_add_source_thing(...): The reason for passing the logging function (log) is so that hookenv.log can be used for a charm if needed. - :param warning: String to indicat where it has moved ot. - :param date: optional sting, in YYYY-MM format to indicate when the + :param warning: String to indicate what is to be used instead. + :param date: Optional string in YYYY-MM format to indicate when the function will definitely (probably) be removed. - :param log: The log function to call to log. If not, logs to stdout + :param log: The log function to call in order to log. If None, logs to + stdout """ def wrap(f): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index d775861b..14b80d96 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -18,14 +18,14 @@ # Authors: # Matthew Wedgwood -import subprocess -import pwd +import glob import grp import os -import glob -import shutil +import pwd import re import shlex +import shutil +import subprocess import yaml from charmhelpers.core.hookenv import ( @@ -265,6 +265,11 @@ def __init__(self, hostname=None, primary=True): relation_set(relation_id=rid, relation_settings={'primary': self.primary}) self.remove_check_queue = set() + @classmethod + def does_nrpe_conf_dir_exist(cls): + """Return True if th nrpe_confdif directory exists.""" + return os.path.isdir(cls.nrpe_confdir) + def add_check(self, *args, **kwargs): shortname = None if kwargs.get('shortname') is None: @@ -310,6 +315,12 @@ def write(self): nrpe_monitors = {} monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} + + # check that the charm can write to the conf dir. If not, then nagios + # probably isn't installed, and we can defer. + if not self.does_nrpe_conf_dir_exist(): + return + for nrpecheck in self.checks: nrpecheck.write(self.nagios_context, self.hostname, self.nagios_servicegroups) @@ -400,7 +411,7 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): upstart_init = '/etc/init/%s.conf' % svc sysv_init = '/etc/init.d/%s' % svc - if host.init_is_systemd(): + if host.init_is_systemd(service_name=svc): nrpe.add_check( shortname=svc, description='process check {%s}' % unit_name, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend index f614b3fa..530719e9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -6,8 +6,14 @@ Listen {{ ext_port }} ServerName {{ endpoint }} SSLEngine on - SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 - SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM + + # This section is based on Mozilla's recommendation + # as the "intermediate" profile as of July 7th, 2020. + # https://wiki.mozilla.org/Security/Server_Side_TLS + SSLProtocol all -SSLv3 -TLSv1 -TLSv1.1 + SSLCipherSuite ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 + SSLHonorCipherOrder off + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf index f614b3fa..530719e9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -6,8 +6,14 @@ Listen {{ ext_port }} ServerName {{ endpoint }} SSLEngine on - SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 - SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM + + # This section is based on Mozilla's recommendation + # as the "intermediate" profile as of July 7th, 2020. + # https://wiki.mozilla.org/Security/Server_Side_TLS + SSLProtocol all -SSLv3 -TLSv1 -TLSv1.1 + SSLCipherSuite ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 + SSLHonorCipherOrder off + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index f21625d3..0aa797c4 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -2241,10 +2241,13 @@ def inform_peers_unit_state(state, relation_name='cluster'): if state not in UNIT_STATES: raise ValueError( "Setting invalid state {} for unit".format(state)) + this_unit = local_unit() for r_id in relation_ids(relation_name): + juju_log('Telling peer behind relation {} that {} is {}'.format( + r_id, this_unit, state), 'DEBUG') relation_set(relation_id=r_id, relation_settings={ - get_peer_key(local_unit()): state}) + get_peer_key(this_unit): state}) def get_peers_unit_state(relation_name='cluster'): @@ -2276,8 +2279,10 @@ def are_peers_ready(relation_name='cluster'): :returns: Whether all units are ready. :rtype: bool """ - unit_states = get_peers_unit_state(relation_name) - return all(v == UNIT_READY for v in unit_states.values()) + unit_states = get_peers_unit_state(relation_name).values() + juju_log('{} peers are in the following states: {}'.format( + relation_name, unit_states), 'DEBUG') + return all(state == UNIT_READY for state in unit_states) def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'): @@ -2360,7 +2365,9 @@ def get_api_application_status(): app_state, msg = get_api_unit_status() if app_state == WORKLOAD_STATES.ACTIVE: if are_peers_ready(): - return WORKLOAD_STATES.ACTIVE, 'Application Ready' + msg = 'Application Ready' else: - return WORKLOAD_STATES.WAITING, 'Some units are not ready' + app_state = WORKLOAD_STATES.WAITING + msg = 'Some units are not ready' + juju_log(msg, 'DEBUG') return app_state, msg diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index b33ac906..a785efdf 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -193,7 +193,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", stopped = service_stop(service_name, **kwargs) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): + if init_is_systemd(service_name=service_name): service('disable', service_name) service('mask', service_name) elif os.path.exists(upstart_file): @@ -227,7 +227,7 @@ def service_resume(service_name, init_dir="/etc/init", """ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): + if init_is_systemd(service_name=service_name): service('unmask', service_name) service('enable', service_name) elif os.path.exists(upstart_file): @@ -257,7 +257,7 @@ def service(action, service_name, **kwargs): :param **kwargs: additional params to be passed to the service command in the form of key=value. """ - if init_is_systemd(): + if init_is_systemd(service_name=service_name): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] @@ -281,7 +281,7 @@ def service_running(service_name, **kwargs): units (e.g. service ceph-osd status id=2). The kwargs are ignored in systemd services. """ - if init_is_systemd(): + if init_is_systemd(service_name=service_name): return service('is-active', service_name) else: if os.path.exists(_UPSTART_CONF.format(service_name)): @@ -311,8 +311,14 @@ def service_running(service_name, **kwargs): SYSTEMD_SYSTEM = '/run/systemd/system' -def init_is_systemd(): - """Return True if the host system uses systemd, False otherwise.""" +def init_is_systemd(service_name=None): + """ + Returns whether the host uses systemd for the specified service. + + @param Optional[str] service_name: specific name of service + """ + if str(service_name).startswith("snap."): + return True if lsb_release()['DISTRIB_CODENAME'] == 'trusty': return False return os.path.isdir(SYSTEMD_SYSTEM) diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index 1a51e7c5..72e6b921 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -2169,15 +2169,18 @@ def roll_monitor_cluster(new_version, upgrade_key): status_set('blocked', 'failed to upgrade monitor') -# TODO(jamespage): -# Mimic support will need to ensure that ceph-mgr daemons are also -# restarted during upgrades - probably through use of one of the -# high level systemd targets shipped by the packaging. -def upgrade_monitor(new_version): +# For E731 we can't assign a lambda, therefore, instead pass this. +def noop(): + pass + + +def upgrade_monitor(new_version, kick_function=None): """Upgrade the current ceph monitor to the new version :param new_version: String version to upgrade to. """ + if kick_function is None: + kick_function = noop current_version = get_version() status_set("maintenance", "Upgrading monitor") log("Current ceph version is {}".format(current_version)) @@ -2186,6 +2189,7 @@ def upgrade_monitor(new_version): # Needed to determine if whether to stop/start ceph-mgr luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0 + kick_function() try: add_source(config('source'), config('key')) apt_update(fatal=True) @@ -2194,6 +2198,7 @@ def upgrade_monitor(new_version): err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) + kick_function() try: if systemd(): service_stop('ceph-mon') @@ -2204,6 +2209,7 @@ def upgrade_monitor(new_version): else: service_stop('ceph-mon-all') apt_install(packages=determine_packages(), fatal=True) + kick_function() owner = ceph_user() @@ -2217,6 +2223,8 @@ def upgrade_monitor(new_version): group=owner, follow_links=True) + kick_function() + # Ensure that mon directory is user writable hostname = socket.gethostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) @@ -2257,13 +2265,22 @@ def lock_and_roll(upgrade_key, service, my_name, version): start_timestamp)) monitor_key_set(upgrade_key, "{}_{}_{}_start".format( service, my_name, version), start_timestamp) + + # alive indication: + alive_function = ( + lambda: monitor_key_set( + upgrade_key, "{}_{}_{}_alive" + .format(service, my_name, version), time.time())) + dog = WatchDog(kick_interval=3 * 60, + kick_function=alive_function) + log("Rolling") # This should be quick if service == 'osd': - upgrade_osd(version) + upgrade_osd(version, kick_function=dog.kick_the_dog) elif service == 'mon': - upgrade_monitor(version) + upgrade_monitor(version, kick_function=dog.kick_the_dog) else: log("Unknown service {}. Unable to upgrade".format(service), level=ERROR) @@ -2294,45 +2311,225 @@ def wait_on_previous_node(upgrade_key, service, previous_node, version): """ log("Previous node is: {}".format(previous_node)) - previous_node_finished = monitor_key_exists( - upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version)) - - while previous_node_finished is False: - log("{} is not finished. Waiting".format(previous_node)) - # Has this node been trying to upgrade for longer than - # 10 minutes? - # If so then move on and consider that node dead. - - # NOTE: This assumes the clusters clocks are somewhat accurate - # If the hosts clock is really far off it may cause it to skip - # the previous node even though it shouldn't. - current_timestamp = time.time() - previous_node_start_time = monitor_key_get( + previous_node_started_f = ( + lambda: monitor_key_exists( upgrade_key, - "{}_{}_{}_start".format(service, previous_node, version)) - if (previous_node_start_time is not None and - ((current_timestamp - (10 * 60)) > - float(previous_node_start_time))): - # NOTE(jamespage): - # Previous node is probably dead as we've been waiting - # for 10 minutes - lets move on and upgrade - log("Waited 10 mins on node {}. current time: {} > " - "previous node start time: {} Moving on".format( - previous_node, - (current_timestamp - (10 * 60)), - previous_node_start_time)) - return - # NOTE(jamespage) - # Previous node has not started, or started less than - # 10 minutes ago - sleep a random amount of time and - # then check again. - wait_time = random.randrange(5, 30) - log('waiting for {} seconds'.format(wait_time)) - time.sleep(wait_time) - previous_node_finished = monitor_key_exists( + "{}_{}_{}_start".format(service, previous_node, version))) + previous_node_finished_f = ( + lambda: monitor_key_exists( upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version)) + "{}_{}_{}_done".format(service, previous_node, version))) + previous_node_alive_time_f = ( + lambda: monitor_key_get( + upgrade_key, + "{}_{}_{}_alive".format(service, previous_node, version))) + + # wait for 30 minutes until the previous node starts. We don't proceed + # unless we get a start condition. + try: + WatchDog.wait_until(previous_node_started_f, timeout=30 * 60) + except WatchDog.WatchDogTimeoutException: + log("Waited for previous node to start for 30 minutes. " + "It didn't start, so may have a serious issue. Continuing with " + "upgrade of this node.", + level=WARNING) + return + + # keep the time it started from this nodes' perspective. + previous_node_started_at = time.time() + log("Detected that previous node {} has started. Time now: {}" + .format(previous_node, previous_node_started_at)) + + # Now wait for the node to complete. The node may optionally be kicking + # with the *_alive key, which allows this node to wait longer as it 'knows' + # the other node is proceeding. + try: + WatchDog.timed_wait(kicked_at_function=previous_node_alive_time_f, + complete_function=previous_node_finished_f, + wait_time=30 * 60, + compatibility_wait_time=10 * 60, + max_kick_interval=5 * 60) + except WatchDog.WatchDogDeadException: + # previous node was kicking, but timed out; log this condition and move + # on. + now = time.time() + waited = int((now - previous_node_started_at) / 60) + log("Previous node started, but has now not ticked for 5 minutes. " + "Waited total of {} mins on node {}. current time: {} > " + "previous node start time: {}. " + "Continuing with upgrade of this node." + .format(waited, previous_node, now, previous_node_started_at), + level=WARNING) + except WatchDog.WatchDogTimeoutException: + # previous node never kicked, or simply took too long; log this + # condition and move on. + now = time.time() + waited = int((now - previous_node_started_at) / 60) + log("Previous node is taking too long; assuming it has died." + "Waited {} mins on node {}. current time: {} > " + "previous node start time: {}. " + "Continuing with upgrade of this node." + .format(waited, previous_node, now, previous_node_started_at), + level=WARNING) + + +class WatchDog(object): + """Watch a dog; basically a kickable timer with a timeout between two async + units. + + The idea is that you have an overall timeout and then can kick that timeout + with intermediary hits, with a max time between those kicks allowed. + + Note that this watchdog doesn't rely on the clock of the other side; just + roughly when it detects when the other side started. All timings are based + on the local clock. + + The kicker will not 'kick' more often than a set interval, regardless of + how often the kick_the_dog() function is called. The kicker provides a + function (lambda: -> None) that is called when the kick interval is + reached. + + The waiter calls the static method with a check function + (lambda: -> Boolean) that indicates when the wait should be over and the + maximum interval to wait. e.g. 30 minutes with a 5 minute kick interval. + + So the waiter calls wait(f, 30, 3) and the kicker sets up a 3 minute kick + interval, or however long it is expected for the key to propagate and to + allow for other delays. + + There is a compatibility mode where if the otherside never kicks, then it + simply waits for the compatability timer. + """ + + class WatchDogDeadException(Exception): + pass + + class WatchDogTimeoutException(Exception): + pass + + def __init__(self, kick_interval=3 * 60, kick_function=None): + """Initialise a new WatchDog + + :param kick_interval: the interval when this side kicks the other in + seconds. + :type kick_interval: Int + :param kick_function: The function to call that does the kick. + :type kick_function: Callable[] + """ + self.start_time = time.time() + self.last_run_func = None + self.last_kick_at = None + self.kick_interval = kick_interval + self.kick_f = kick_function + + def kick_the_dog(self): + """Might call the kick_function if it's time. + + This function can be called as frequently as needed, but will run the + self.kick_function after kick_interval seconds have passed. + """ + now = time.time() + if (self.last_run_func is None or + (now - self.last_run_func > self.kick_interval)): + if self.kick_f is not None: + self.kick_f() + self.last_run_func = now + self.last_kick_at = now + + @staticmethod + def wait_until(wait_f, timeout=10 * 60): + """Wait for timeout seconds until the passed function return True. + + :param wait_f: The function to call that will end the wait. + :type wait_f: Callable[[], Boolean] + :param timeout: The time to wait in seconds. + :type timeout: int + """ + start_time = time.time() + while(not wait_f()): + now = time.time() + if now > start_time + timeout: + raise WatchDog.WatchDogTimeoutException() + wait_time = random.randrange(5, 30) + log('wait_until: waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + + @staticmethod + def timed_wait(kicked_at_function, + complete_function, + wait_time=30 * 60, + compatibility_wait_time=10 * 60, + max_kick_interval=5 * 60): + """Wait a maximum time with an intermediate 'kick' time. + + This function will wait for max_kick_interval seconds unless the + kicked_at_function() call returns a time that is not older that + max_kick_interval (in seconds). i.e. the other side can signal that it + is still doing things during the max_kick_interval as long as it kicks + at least every max_kick_interval seconds. + + The maximum wait is "wait_time", but the otherside must keep kicking + during this period. + + The "compatibility_wait_time" is used if the other side never kicks + (i.e. the kicked_at_function() always returns None. In this case the + function wait up to "compatibility_wait_time". + + Note that the type of the return from the kicked_at_function is an + Optional[str], not a Float. The function will coerce this to a float + for the comparison. This represents the return value of + time.time() at the "other side". It's a string to simplify the + function obtaining the time value from the other side. + + The function raises WatchDogTimeoutException if either the + compatibility_wait_time or the wait_time are exceeded. + + The function raises WatchDogDeadException if the max_kick_interval is + exceeded. + + Note that it is possible that the first kick interval is extended to + compatibility_wait_time if the "other side" doesn't kick immediately. + The best solution is for the other side to kick early and often. + + :param kicked_at_function: The function to call to retrieve the time + that the other side 'kicked' at. None if the other side hasn't + kicked. + :type kicked_at_function: Callable[[], Optional[str]] + :param complete_function: The callable that returns True when done. + :type complete_function: Callable[[], Boolean] + :param wait_time: the maximum time to wait, even with kicks, in + seconds. + :type wait_time: int + :param compatibility_wait_time: The time to wait if no kicks are + received, in seconds. + :type compatibility_wait_time: int + :param max_kick_interval: The maximum time allowed between kicks before + the wait is over, in seconds: + :type max_kick_interval: int + :raises: WatchDog.WatchDogTimeoutException, + WatchDog.WatchDogDeadException + """ + start_time = time.time() + while True: + if complete_function(): + break + # the time when the waiting for unit last kicked. + kicked_at = kicked_at_function() + now = time.time() + if kicked_at is None: + # assume other end doesn't do alive kicks + if (now - start_time > compatibility_wait_time): + raise WatchDog.WatchDogTimeoutException() + else: + # other side is participating in kicks; must kick at least + # every 'max_kick_interval' to stay alive. + if (now - float(kicked_at) > max_kick_interval): + raise WatchDog.WatchDogDeadException() + if (now - start_time > wait_time): + raise WatchDog.WatchDogTimeoutException() + delay_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(delay_time)) + time.sleep(delay_time) def get_upgrade_position(osd_sorted_list, match_name): @@ -2412,11 +2609,14 @@ def roll_osd_cluster(new_version, upgrade_key): status_set('blocked', 'failed to upgrade osd') -def upgrade_osd(new_version): +def upgrade_osd(new_version, kick_function=None): """Upgrades the current osd :param new_version: str. The new version to upgrade to """ + if kick_function is None: + kick_function = noop + current_version = get_version() status_set("maintenance", "Upgrading osd") log("Current ceph version is {}".format(current_version)) @@ -2431,10 +2631,13 @@ def upgrade_osd(new_version): status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) + kick_function() + try: # Upgrade the packages before restarting the daemons. status_set('maintenance', 'Upgrading packages to %s' % new_version) apt_install(packages=determine_packages(), fatal=True) + kick_function() # If the upgrade does not need an ownership update of any of the # directories in the osd service directory, then simply restart @@ -2458,13 +2661,16 @@ def upgrade_osd(new_version): os.listdir(CEPH_BASE_DIR)) non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x), non_osd_dirs) - for path in non_osd_dirs: + for i, path in enumerate(non_osd_dirs): + if i % 100 == 0: + kick_function() update_owner(path) # Fast service restart wasn't an option because each of the OSD # directories need the ownership updated for all the files on # the OSD. Walk through the OSDs one-by-one upgrading the OSD. for osd_dir in _get_child_dirs(OSD_BASE_DIR): + kick_function() try: osd_num = _get_osd_num_from_dirname(osd_dir) _upgrade_single_osd(osd_num, osd_dir) diff --git a/ceph-radosgw/requirements.txt b/ceph-radosgw/requirements.txt index 343beed1..2316401b 100644 --- a/ceph-radosgw/requirements.txt +++ b/ceph-radosgw/requirements.txt @@ -13,5 +13,9 @@ netifaces>=0.10.4 netaddr>=0.7.12,!=0.7.16 Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 -dnspython>=1.12.0 + +# dnspython 2.0.0 dropped py3.5 support +dnspython<2.0.0; python_version < '3.6' +dnspython; python_version >= '3.6' + psutil>=1.1.1,<2.0.0 From 09a426781c51f611c669cb386e6b7644ed726b1c Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 27 Jul 2020 20:09:24 +0100 Subject: [PATCH 2022/2699] Release sync for 20.08 - Classic charms: sync charm-helpers. - Classic ceph based charms: also sync charms.ceph - Reactive charms: trigger a rebuild - sync tox.ini - sync requirements.txt and test-requirements.txt Change-Id: I6147ebe469e3227f4d5b59374ab0262255b17a79 --- ceph-rbd-mirror/rebuild | 2 +- ceph-rbd-mirror/tox.ini | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/rebuild b/ceph-rbd-mirror/rebuild index ff2b57ee..494b96dc 100644 --- a/ceph-rbd-mirror/rebuild +++ b/ceph-rbd-mirror/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -918d9792-a4d0-11ea-8a27-a78395c748ed +d9a6ecf4-d02c-11ea-ba04-8b57253ede7e diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index 5b41c1dd..afd48f02 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -50,6 +50,11 @@ basepython = python3.7 deps = -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/test-requirements.txt +commands = stestr run --slowest {posargs} + [testenv:pep8] basepython = python3 deps = -r{toxinidir}/test-requirements.txt From 0fad639c0f08d9ead99fd970f89eb6caa2c0c85e Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 31 Jul 2020 15:55:44 +0000 Subject: [PATCH 2023/2699] Switch charm to use charmcraft for building Switch charm to use charmcraft for building, which involved: * Remove git submodules * Define charms runtime requirements in requirements.txt and pin to specific hashes. * Update .gitignore to ignore zip charm created by build process. * ops-openstack and ops interfaces were refactored so src/charm.py was updated accordingly. * Point functional test bundles at zip file. * Remove old charm build method (charm-init.sh) --- ceph-iscsi/.gitignore | 5 ++- ceph-iscsi/.gitmodules | 15 --------- ceph-iscsi/build-requirements.txt | 2 +- ceph-iscsi/charm-init.sh | 32 ------------------- ceph-iscsi/hooks/install | 1 - ceph-iscsi/lib/README.txt | 1 - ceph-iscsi/mod/charm-helpers | 1 - ceph-iscsi/mod/operator | 1 - ceph-iscsi/mod/ops-interface-ceph-client | 1 - ceph-iscsi/mod/ops-interface-tls-certificates | 1 - ceph-iscsi/mod/ops-openstack | 1 - ceph-iscsi/requirements.txt | 5 +++ ceph-iscsi/src/charm.py | 28 ++++++++-------- ceph-iscsi/tests/bundles/focal.yaml | 3 +- .../overlays/local-charm-overlay.yaml.j2 | 3 ++ ceph-iscsi/tox.ini | 4 +-- .../unit_tests/test_ceph_iscsi_charm.py | 1 - .../test_interface_ceph_iscsi_peer.py | 2 +- 18 files changed, 33 insertions(+), 74 deletions(-) delete mode 100755 ceph-iscsi/charm-init.sh delete mode 120000 ceph-iscsi/hooks/install delete mode 100644 ceph-iscsi/lib/README.txt delete mode 160000 ceph-iscsi/mod/charm-helpers delete mode 160000 ceph-iscsi/mod/operator delete mode 160000 ceph-iscsi/mod/ops-interface-ceph-client delete mode 160000 ceph-iscsi/mod/ops-interface-tls-certificates delete mode 160000 ceph-iscsi/mod/ops-openstack create mode 100644 ceph-iscsi/tests/bundles/overlays/local-charm-overlay.yaml.j2 diff --git a/ceph-iscsi/.gitignore b/ceph-iscsi/.gitignore index 18cac16c..4e3756b3 100644 --- a/ceph-iscsi/.gitignore +++ b/ceph-iscsi/.gitignore @@ -1,6 +1,9 @@ .tox -.swp +**/*.swp __pycache__ .stestr/ lib/* !lib/README.txt +build +ceph-iscsi.charm + diff --git a/ceph-iscsi/.gitmodules b/ceph-iscsi/.gitmodules index 41ffcfcd..e69de29b 100644 --- a/ceph-iscsi/.gitmodules +++ b/ceph-iscsi/.gitmodules @@ -1,15 +0,0 @@ -[submodule "mod/operator"] - path = mod/operator - url = https://github.com/canonical/operator -[submodule "mod/ops-interface-ceph-client"] - path = mod/ops-interface-ceph-client - url = https://github.com/openstack-charmers/ops-interface-ceph-client.git -[submodule "mod/ops-openstack"] - path = mod/ops-openstack - url = https://github.com/openstack-charmers/ops-openstack.git -[submodule "mod/charm-helpers"] - path = mod/charm-helpers - url = https://github.com/juju/charm-helpers.git -[submodule "mod/ops-interface-tls-certificates"] - path = mod/ops-interface-tls-certificates - url = https://github.com/openstack-charmers/ops-interface-tls-certificates.git diff --git a/ceph-iscsi/build-requirements.txt b/ceph-iscsi/build-requirements.txt index 1b770a72..1c2a91db 100644 --- a/ceph-iscsi/build-requirements.txt +++ b/ceph-iscsi/build-requirements.txt @@ -1 +1 @@ -mod/charm-helpers +charmcraft==0.3.0 diff --git a/ceph-iscsi/charm-init.sh b/ceph-iscsi/charm-init.sh deleted file mode 100755 index 58e58bdf..00000000 --- a/ceph-iscsi/charm-init.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -UPDATE="" -while getopts ":u" opt; do - case $opt in - u) UPDATE=true;; - esac -done - -git submodule update --init - -# pbr seems unable to detect the current tag when installing -# from a local checkout using a git submodule. To work around this -# manually set the version. -export PBR_VERSION=$(cd mod/charm-helpers; git describe --tags) - -if [[ -z "$UPDATE" ]]; then - pip install -t lib -r build-requirements.txt -else - git -C mod/operator pull origin master - git -C mod/ops-openstack pull origin master - git -C mod/ops-interface-ceph-client pull origin master - git -C mod/ops-interface-tls-certificates pull origin master - git -C mod/charm-helpers pull origin master - pip install -t lib -r build-requirements.txt --upgrade -fi - -ln -f -t lib -s ../mod/operator/ops -ln -f -t lib -s ../mod/ops-interface-ceph-client/interface_ceph_client.py -ln -f -t lib -s ../mod/ops-openstack/ops_openstack.py -ln -f -t lib -s ../mod/ops-openstack/adapters.py -ln -f -t lib -s ../mod/ops-interface-tls-certificates/ca_client.py diff --git a/ceph-iscsi/hooks/install b/ceph-iscsi/hooks/install deleted file mode 120000 index 25b1f68f..00000000 --- a/ceph-iscsi/hooks/install +++ /dev/null @@ -1 +0,0 @@ -../src/charm.py \ No newline at end of file diff --git a/ceph-iscsi/lib/README.txt b/ceph-iscsi/lib/README.txt deleted file mode 100644 index 7931d0d4..00000000 --- a/ceph-iscsi/lib/README.txt +++ /dev/null @@ -1 +0,0 @@ -Only generated files should be in here diff --git a/ceph-iscsi/mod/charm-helpers b/ceph-iscsi/mod/charm-helpers deleted file mode 160000 index 87fc7ee5..00000000 --- a/ceph-iscsi/mod/charm-helpers +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 87fc7ee50662f14abe55d9fe0d02ec20d128379f diff --git a/ceph-iscsi/mod/operator b/ceph-iscsi/mod/operator deleted file mode 160000 index 59dd0987..00000000 --- a/ceph-iscsi/mod/operator +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 59dd09875421668366ffcaff123bec34a0054ec3 diff --git a/ceph-iscsi/mod/ops-interface-ceph-client b/ceph-iscsi/mod/ops-interface-ceph-client deleted file mode 160000 index 088b68f5..00000000 --- a/ceph-iscsi/mod/ops-interface-ceph-client +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 088b68f5b36f76ac44056ccabbe93396c76de98d diff --git a/ceph-iscsi/mod/ops-interface-tls-certificates b/ceph-iscsi/mod/ops-interface-tls-certificates deleted file mode 160000 index f6e6ec1b..00000000 --- a/ceph-iscsi/mod/ops-interface-tls-certificates +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f6e6ec1b1d6a317aaeb2cb696e3ec7c1a7c3cd09 diff --git a/ceph-iscsi/mod/ops-openstack b/ceph-iscsi/mod/ops-openstack deleted file mode 160000 index 460b3898..00000000 --- a/ceph-iscsi/mod/ops-openstack +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 460b389811d25514c7ac280ff7c8b2f7c17dd790 diff --git a/ceph-iscsi/requirements.txt b/ceph-iscsi/requirements.txt index 8299f55a..30a07978 100644 --- a/ceph-iscsi/requirements.txt +++ b/ceph-iscsi/requirements.txt @@ -1 +1,6 @@ # requirements +git+https://github.com/juju/charm-helpers.git@87fc7ee5#egg=charmhelpers +git+https://github.com/canonical/operator.git@169794cdd#egg=ops +git+https://github.com/openstack-charmers/ops-interface-ceph-client@cc10f29d4#egg=interface_ceph_client +git+https://github.com/openstack-charmers/ops-openstack@ea51b43e#egg=ops_openstack +git+https://github.com/openstack-charmers/ops-interface-tls-certificates@2ec41b60#egg=ca_client diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index ab2f10b2..53d81c11 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -18,18 +18,18 @@ import ops.model import charmhelpers.core.host as ch_host import charmhelpers.core.templating as ch_templating -import interface_ceph_client +import interface_ceph_client.ceph_client as ceph_client import interface_ceph_iscsi_peer -import ca_client +import interface_tls_certificates.ca_client as ca_client -import adapters -import ops_openstack +import ops_openstack.adapters +import ops_openstack.core import gwcli_client import cryptography.hazmat.primitives.serialization as serialization logger = logging.getLogger(__name__) -class CephClientAdapter(adapters.OpenStackOperRelationAdapter): +class CephClientAdapter(ops_openstack.adapters.OpenStackOperRelationAdapter): def __init__(self, relation): super(CephClientAdapter, self).__init__(relation) @@ -48,7 +48,7 @@ def key(self): return self.relation.get_relation_data()['key'] -class PeerAdapter(adapters.OpenStackOperRelationAdapter): +class PeerAdapter(ops_openstack.adapters.OpenStackOperRelationAdapter): def __init__(self, relation): super(PeerAdapter, self).__init__(relation) @@ -71,7 +71,8 @@ def trusted_ips(self): return ' '.join(sorted(ips)) -class TLSCertificatesAdapter(adapters.OpenStackOperRelationAdapter): +class TLSCertificatesAdapter( + ops_openstack.adapters.OpenStackOperRelationAdapter): def __init__(self, relation): super(TLSCertificatesAdapter, self).__init__(relation) @@ -84,7 +85,8 @@ def enable_tls(self): return False -class CephISCSIGatewayAdapters(adapters.OpenStackRelationAdapters): +class CephISCSIGatewayAdapters( + ops_openstack.adapters.OpenStackRelationAdapters): relation_adapters = { 'ceph-client': CephClientAdapter, @@ -93,7 +95,7 @@ class CephISCSIGatewayAdapters(adapters.OpenStackRelationAdapters): } -class CephISCSIGatewayCharmBase(ops_openstack.OSBaseCharm): +class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm): state = StoredState() PACKAGES = ['ceph-iscsi', 'tcmu-runner', 'ceph-common'] @@ -136,7 +138,7 @@ def __init__(self, framework): self.state.set_default( target_created=False, enable_tls=False) - self.ceph_client = interface_ceph_client.CephClientRequires( + self.ceph_client = ceph_client.CephClientRequires( self, 'ceph-client') self.peers = interface_ceph_iscsi_peer.CephISCSIGatewayPeers( @@ -345,18 +347,18 @@ def on_create_target_action(self, event): event.set_results({'iqn': target}) -@ops_openstack.charm_class +@ops_openstack.core.charm_class class CephISCSIGatewayCharmJewel(CephISCSIGatewayCharmBase): state = StoredState() release = 'jewel' -@ops_openstack.charm_class +@ops_openstack.core.charm_class class CephISCSIGatewayCharmOcto(CephISCSIGatewayCharmBase): state = StoredState() release = 'octopus' if __name__ == '__main__': - main(ops_openstack.get_charm_class_for_release()) + main(ops_openstack.core.get_charm_class_for_release()) diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index 598a5ad1..3c5b3e00 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -1,3 +1,4 @@ +local_overlay_enabled: False series: focal machines: '0': @@ -22,7 +23,7 @@ applications: to: - '7' ceph-iscsi: - charm: ../../ + charm: ../../ceph-iscsi.charm num_units: 2 options: rbd-metadata-pool: tmbtil diff --git a/ceph-iscsi/tests/bundles/overlays/local-charm-overlay.yaml.j2 b/ceph-iscsi/tests/bundles/overlays/local-charm-overlay.yaml.j2 new file mode 100644 index 00000000..5cbfaf2b --- /dev/null +++ b/ceph-iscsi/tests/bundles/overlays/local-charm-overlay.yaml.j2 @@ -0,0 +1,3 @@ +applications: + ceph-iscsi: + charm: ../../ceph-iscsi.charm diff --git a/ceph-iscsi/tox.ini b/ceph-iscsi/tox.ini index cf9f42d3..ecb76c40 100644 --- a/ceph-iscsi/tox.ini +++ b/ceph-iscsi/tox.ini @@ -82,9 +82,9 @@ commands = {posargs} [testenv:build] basepython = python3 -deps = +deps = -r{toxinidir}/build-requirements.txt commands = - ./charm-init.sh + charmcraft build [testenv:update-deps] basepython = python3 diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index 43ed6615..f8561f5d 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -159,7 +159,6 @@ def network_get(self, endpoint_name, relation_id=None): self.harness._backend = _TestingOPSModelBackend( self.harness._unit_name, self.harness._meta) self.harness._model = model.Model( - self.harness._unit_name, self.harness._meta, self.harness._backend) self.harness._framework = framework.Framework( diff --git a/ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py b/ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py index f57b8b4c..db00964a 100644 --- a/ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py +++ b/ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py @@ -52,7 +52,7 @@ def on_ready_peers(self, event): receiver = TestReceiver(self.harness.framework, 'receiver') self.harness.framework.observe(self.peers.on.ready_peers, - receiver) + receiver.on_ready_peers) relation_id = self.harness.add_relation('cluster', 'ceph-iscsi') self.harness.add_relation_unit( relation_id, From 890f2072ef9a1e339a37402c96f1f40f88176634 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 3 Aug 2020 13:54:51 +0000 Subject: [PATCH 2024/2699] Switch zaza* back to master branch --- ceph-iscsi/test-requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-iscsi/test-requirements.txt b/ceph-iscsi/test-requirements.txt index 8d44cd78..7e9d6093 100644 --- a/ceph-iscsi/test-requirements.txt +++ b/ceph-iscsi/test-requirements.txt @@ -7,7 +7,7 @@ mock>=1.2 flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 requests>=2.18.4 -git+https://github.com/gnuoy/zaza.git@force-focal#egg=zaza -git+https://github.com/gnuoy/zaza-openstack-tests.git@ceph-iscsi-tests#egg=zaza.openstack +git+https://github.com/openstack-charmers/zaza.git#egg=zaza +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack pytz # workaround for 14.04 pip/tox pyudev # for ceph-* charm unit tests (not mocked?) From 68a616b03487e0d9894712163976fe5bd4364565 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Wed, 22 Jul 2020 20:37:15 -0400 Subject: [PATCH 2025/2699] Improve README This improvement is part of a wave of polish in preparation for the launch of the Ceph product. Change-Id: Icec5faee1c9e693aa149d571f322e66a0899a14c --- ceph-proxy/README.md | 73 +++++++++++++++++++++++++++++++++----------- 1 file changed, 56 insertions(+), 17 deletions(-) diff --git a/ceph-proxy/README.md b/ceph-proxy/README.md index 7951addc..4a7b4731 100644 --- a/ceph-proxy/README.md +++ b/ceph-proxy/README.md @@ -1,35 +1,74 @@ # Overview -Ceph is a distributed storage and network file system designed to provide +[Ceph][ceph-upstream] is a unified, distributed storage system designed for excellent performance, reliability, and scalability. -This charm allows connecting an existing Ceph deployment with a Juju environment. +The ceph-proxy charm deploys a proxy that acts as a [ceph-mon][ceph-mon-charm] +application for an external Ceph cluster. It joins a non-charmed Ceph cluster +to a Juju model. # Usage -Your config.yaml needs to provide the monitor-hosts and fsid options like below: +## Configuration + +This section covers common and/or important configuration options. See file +`config.yaml` for the full list of options, along with their descriptions and +default values. See the [Juju documentation][juju-docs-config-apps] for details +on configuring applications. + +#### `fsid` + +The `fsid` option supplies the UUID of the external cluster. + +#### `admin-key` + +The `admin-key` option supplies the admin Cephx key of the external cluster. + +#### `monitor-hosts` + +The `monitor-hosts` option supplies the network addresses (and ports) of the +Monitors of the external cluster. + +## Deployment + +Let file ``ceph-proxy.yaml`` contain the deployment configuration: -`config.yaml`: ```yaml -ceph-proxy: - monitor-hosts: IP_ADDRESS:PORT IP ADDRESS:PORT - fsid: FSID + ceph-proxy: + fsid: a4f1fb08-c83d-11ea-8f4a-635b3b062931 + admin-key: AQCJvBFfWX+GLhAAln5dFd1rZekcGLyMmy58bQ== + monitor-hosts: '10.246.114.21:6789 10.246.114.22:6789 10.246.114.7:6789' ``` -You must then provide this configuration to the new deployment: `juju deploy ceph-proxy -c config.yaml`. +To deploy: + + juju deploy --config ceph-proxy.yaml ceph-proxy + +Now add relations as you normally would between a ceph-mon application and +another application, except substitute ceph-proxy for ceph-mon. For instance, +to use the external Ceph cluster as the backend for an existing glance +application: + + juju add-relation ceph-proxy:client glance:ceph -This charm noes NOT insert itself between the clusters, but merely makes the external cluster available through Juju's environment by exposing the same relations that the existing ceph charms do. +## Actions -# Contact Information +Many of the ceph-mon charm's actions are supported. See file `config.yaml` for +the full list of options, along with their descriptions and default values. See +the [Juju documentation][juju-docs-config-apps] for details on configuring +applications. -## Authors +# Bugs -- Chris MacNaughton +Please report bugs on [Launchpad][lp-bugs-charm-ceph-proxy]. -Report bugs on [Launchpad](http://bugs.launchpad.net/charm-ceph-proxy/+filebug) +For general charm questions refer to the [OpenStack Charm Guide][cg]. -## Ceph + -- [Ceph website](http://ceph.com) -- [Ceph mailing lists](http://ceph.com/resources/mailing-list-irc/) -- [Ceph bug tracker](http://tracker.ceph.com/projects/ceph) +[ceph-upstream]: https://ceph.io +[cg]: https://docs.openstack.org/charm-guide +[ceph-mon-charm]: https://jaas.ai/ceph-mon +[juju-docs-actions]: https://jaas.ai/docs/actions +[juju-docs-config-apps]: https://juju.is/docs/configuring-applications +[lp-bugs-charm-ceph-proxy]: https://bugs.launchpad.net/charm-ceph-proxy/+filebug From 2881cacbd1021c04152d174d7f09dca43763ee79 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 4 Aug 2020 14:18:59 -0400 Subject: [PATCH 2026/2699] Fix actions section of the README Change-Id: If12cffe0ee061710030613e1a13a0196c7b878e6 --- ceph-proxy/README.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/README.md b/ceph-proxy/README.md index 4a7b4731..ad00a340 100644 --- a/ceph-proxy/README.md +++ b/ceph-proxy/README.md @@ -53,10 +53,8 @@ application: ## Actions -Many of the ceph-mon charm's actions are supported. See file `config.yaml` for -the full list of options, along with their descriptions and default values. See -the [Juju documentation][juju-docs-config-apps] for details on configuring -applications. +Many of the ceph-mon charm's actions are supported. See file `actions.yaml` for +the full list of actions, along with their descriptions. # Bugs From ed6017f27690baad1554467d7aecaf461c8e48ce Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 28 Jul 2020 14:45:39 +0100 Subject: [PATCH 2027/2699] Add support for erasure coding Add support for use of Erasure Coded pools with the Ceph RADOS Gateway. Only the data pool is actually Erasure Coded - all other pools continue to be replicated but have much smaller data footprints. Depends-On: Iec4de19f7b39f0b08158d96c5cc1561b40aefa10 Change-Id: I661639e67853ff471a7d7ddea0e3fc2fcb30fed1 --- ceph-radosgw/.gitignore | 1 + ceph-radosgw/config.yaml | 98 ++ ceph-radosgw/hooks/ceph_rgw.py | 56 +- ceph-radosgw/hooks/charmhelpers/__init__.py | 10 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 21 +- .../charmhelpers/contrib/openstack/context.py | 84 + .../contrib/openstack/templates/ceph.conf | 4 + .../templates/openstack_https_frontend | 10 +- .../templates/openstack_https_frontend.conf | 10 +- .../section-ceph-bluestore-compression | 28 + .../charmhelpers/contrib/openstack/utils.py | 17 +- .../contrib/storage/linux/ceph.py | 1405 ++++++++++++----- ceph-radosgw/hooks/charmhelpers/core/host.py | 18 +- ceph-radosgw/lib/charms_ceph/broker.py | 43 +- ceph-radosgw/lib/charms_ceph/utils.py | 298 +++- ceph-radosgw/unit_tests/test_ceph.py | 78 + 16 files changed, 1666 insertions(+), 515 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-ceph-bluestore-compression diff --git a/ceph-radosgw/.gitignore b/ceph-radosgw/.gitignore index 0e21f066..4030da5b 100644 --- a/ceph-radosgw/.gitignore +++ b/ceph-radosgw/.gitignore @@ -9,3 +9,4 @@ tags .unit-state.db func-results.json .stestr/ +**/__pycache__ diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 2123d97d..3dceefc1 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -121,6 +121,104 @@ options: that once a pool has been created, changes to this setting will be ignored. Setting this value to -1, enables the number of placement groups to be calculated based on the Ceph placement group calculator. + pool-type: + type: string + default: replicated + description: | + Ceph pool type to use for storage - valid values include ‘replicated’ + and ‘erasure-coded’. + ec-profile-name: + type: string + default: + description: | + Name for the EC profile to be created for the EC pools. If not defined + a profile name will be generated based on the name of the pool used by + the application. + ec-rbd-metadata-pool: + type: string + default: + description: | + Name of the metadata pool to be created (for RBD use-cases). If not + defined a metadata pool name will be generated based on the name of + the data pool used by the application. The metadata pool is always + replicated, not erasure coded. + ec-profile-k: + type: int + default: 1 + description: | + Number of data chunks that will be used for EC data pool. K+M factors + should never be greater than the number of available zones (or hosts) + for balancing. + ec-profile-m: + type: int + default: 2 + description: | + Number of coding chunks that will be used for EC data pool. K+M factors + should never be greater than the number of available zones (or hosts) + for balancing. + ec-profile-locality: + type: int + default: + description: | + (lrc plugin - l) Group the coding and data chunks into sets of size l. + For instance, for k=4 and m=2, when l=3 two groups of three are created. + Each set can be recovered without reading chunks from another set. Note + that using the lrc plugin does incur more raw storage usage than isa or + jerasure in order to reduce the cost of recovery operations. + ec-profile-crush-locality: + type: string + default: + description: | + (lrc plugin) The type of the crush bucket in which each set of chunks + defined by l will be stored. For instance, if it is set to rack, each + group of l chunks will be placed in a different rack. It is used to + create a CRUSH rule step such as step choose rack. If it is not set, + no such grouping is done. + ec-profile-durability-estimator: + type: int + default: + description: | + (shec plugin - c) The number of parity chunks each of which includes + each data chunk in its calculation range. The number is used as a + durability estimator. For instance, if c=2, 2 OSDs can be down + without losing data. + ec-profile-helper-chunks: + type: int + default: + description: | + (clay plugin - d) Number of OSDs requested to send data during + recovery of a single chunk. d needs to be chosen such that + k+1 <= d <= k+m-1. Larger the d, the better the savings. + ec-profile-scalar-mds: + type: string + default: + description: | + (clay plugin) specifies the plugin that is used as a building + block in the layered construction. It can be one of jerasure, + isa, shec (defaults to jerasure). + ec-profile-plugin: + type: string + default: jerasure + description: | + EC plugin to use for this applications pool. The following list of + plugins acceptable - jerasure, lrc, isa, shec, clay. + ec-profile-technique: + type: string + default: + description: | + EC profile technique used for this applications pool - will be + validated based on the plugin configured via ec-profile-plugin. + Supported techniques are ‘reed_sol_van’, ‘reed_sol_r6_op’, + ‘cauchy_orig’, ‘cauchy_good’, ‘liber8tion’ for jerasure, + ‘reed_sol_van’, ‘cauchy’ for isa and ‘single’, ‘multiple’ + for shec. + ec-profile-device-class: + type: string + default: + description: | + Device class from CRUSH map to use for placement groups for + erasure profile - valid values: ssd, hdd or nvme (or leave + unset to not use a device class). # Keystone integration operator-roles: type: string diff --git a/ceph-radosgw/hooks/ceph_rgw.py b/ceph-radosgw/hooks/ceph_rgw.py index 7d48ab8a..3aced5bd 100644 --- a/ceph-radosgw/hooks/ceph_rgw.py +++ b/ceph-radosgw/hooks/ceph_rgw.py @@ -18,6 +18,7 @@ from charmhelpers.core.hookenv import ( config, + service_name, ) from charmhelpers.core.host import ( @@ -111,18 +112,61 @@ def _add_light_pool(rq, pool, pg_num, prefix=None): replicas = config('ceph-osd-replication-count') prefix = prefix or 'default' - # Buckets likely to contain the most data and therefore # requiring the most PGs heavy = [ '.rgw.buckets.data' ] bucket_weight = config('rgw-buckets-pool-weight') - for pool in heavy: - pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) - rq.add_op_create_pool(name=pool, replica_count=replicas, - weight=bucket_weight, group='objects', - app_name=CEPH_POOL_APP_NAME) + + if config('pool-type') == 'erasure-coded': + # General EC plugin config + plugin = config('ec-profile-plugin') + technique = config('ec-profile-technique') + device_class = config('ec-profile-device-class') + bdm_k = config('ec-profile-k') + bdm_m = config('ec-profile-m') + # LRC plugin config + bdm_l = config('ec-profile-locality') + crush_locality = config('ec-profile-crush-locality') + # SHEC plugin config + bdm_c = config('ec-profile-durability-estimator') + # CLAY plugin config + bdm_d = config('ec-profile-helper-chunks') + scalar_mds = config('ec-profile-scalar-mds') + # Profile name + service = service_name() + profile_name = ( + config('ec-profile-name') or "{}-profile".format(service) + ) + rq.add_op_create_erasure_profile( + name=profile_name, + k=bdm_k, m=bdm_m, + lrc_locality=bdm_l, + lrc_crush_locality=crush_locality, + shec_durability_estimator=bdm_c, + clay_helper_chunks=bdm_d, + clay_scalar_mds=scalar_mds, + device_class=device_class, + erasure_type=plugin, + erasure_technique=technique + ) + + for pool in heavy: + pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) + rq.add_op_create_erasure_pool( + name=pool, + erasure_profile=profile_name, + weight=bucket_weight, + group="objects", + app_name=CEPH_POOL_APP_NAME + ) + else: + for pool in heavy: + pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) + rq.add_op_create_pool(name=pool, replica_count=replicas, + weight=bucket_weight, group='objects', + app_name=CEPH_POOL_APP_NAME) # NOTE: we want these pools to have a smaller pg_num/pgp_num than the # others since they are not expected to contain as much data diff --git a/ceph-radosgw/hooks/charmhelpers/__init__.py b/ceph-radosgw/hooks/charmhelpers/__init__.py index 61ef9071..1f57ed2a 100644 --- a/ceph-radosgw/hooks/charmhelpers/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/__init__.py @@ -49,7 +49,8 @@ def deprecate(warning, date=None, log=None): """Add a deprecation warning the first time the function is used. - The date, which is a string in semi-ISO8660 format indicate the year-month + + The date which is a string in semi-ISO8660 format indicates the year-month that the function is officially going to be removed. usage: @@ -62,10 +63,11 @@ def contributed_add_source_thing(...): The reason for passing the logging function (log) is so that hookenv.log can be used for a charm if needed. - :param warning: String to indicat where it has moved ot. - :param date: optional sting, in YYYY-MM format to indicate when the + :param warning: String to indicate what is to be used instead. + :param date: Optional string in YYYY-MM format to indicate when the function will definitely (probably) be removed. - :param log: The log function to call to log. If not, logs to stdout + :param log: The log function to call in order to log. If None, logs to + stdout """ def wrap(f): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index d775861b..14b80d96 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -18,14 +18,14 @@ # Authors: # Matthew Wedgwood -import subprocess -import pwd +import glob import grp import os -import glob -import shutil +import pwd import re import shlex +import shutil +import subprocess import yaml from charmhelpers.core.hookenv import ( @@ -265,6 +265,11 @@ def __init__(self, hostname=None, primary=True): relation_set(relation_id=rid, relation_settings={'primary': self.primary}) self.remove_check_queue = set() + @classmethod + def does_nrpe_conf_dir_exist(cls): + """Return True if th nrpe_confdif directory exists.""" + return os.path.isdir(cls.nrpe_confdir) + def add_check(self, *args, **kwargs): shortname = None if kwargs.get('shortname') is None: @@ -310,6 +315,12 @@ def write(self): nrpe_monitors = {} monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} + + # check that the charm can write to the conf dir. If not, then nagios + # probably isn't installed, and we can defer. + if not self.does_nrpe_conf_dir_exist(): + return + for nrpecheck in self.checks: nrpecheck.write(self.nagios_context, self.hostname, self.nagios_servicegroups) @@ -400,7 +411,7 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): upstart_init = '/etc/init/%s.conf' % svc sysv_init = '/etc/init.d/%s' % svc - if host.init_is_systemd(): + if host.init_is_systemd(service_name=svc): nrpe.add_check( shortname=svc, description='process check {%s}' % unit_name, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 42abccf7..0e41a9f3 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -29,6 +29,8 @@ import six +import charmhelpers.contrib.storage.linux.ceph as ch_ceph + from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( _config_ini as config_ini ) @@ -56,6 +58,7 @@ status_set, network_get_primary_address, WARNING, + service_name, ) from charmhelpers.core.sysctl import create as sysctl_create @@ -808,6 +811,12 @@ def __call__(self): ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) + if config('pool-type') and config('pool-type') == 'erasure-coded': + base_pool_name = config('rbd-pool') or config('rbd-pool-name') + if not base_pool_name: + base_pool_name = service_name() + ctxt['rbd_default_data_pool'] = base_pool_name + if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') @@ -3175,3 +3184,78 @@ def __call__(self): :rtype: Dict[str,int] """ return self._map + + +class CephBlueStoreCompressionContext(OSContextGenerator): + """Ceph BlueStore compression options.""" + + # Tuple with Tuples that map configuration option name to CephBrokerRq op + # property name + options = ( + ('bluestore-compression-algorithm', + 'compression-algorithm'), + ('bluestore-compression-mode', + 'compression-mode'), + ('bluestore-compression-required-ratio', + 'compression-required-ratio'), + ('bluestore-compression-min-blob-size', + 'compression-min-blob-size'), + ('bluestore-compression-min-blob-size-hdd', + 'compression-min-blob-size-hdd'), + ('bluestore-compression-min-blob-size-ssd', + 'compression-min-blob-size-ssd'), + ('bluestore-compression-max-blob-size', + 'compression-max-blob-size'), + ('bluestore-compression-max-blob-size-hdd', + 'compression-max-blob-size-hdd'), + ('bluestore-compression-max-blob-size-ssd', + 'compression-max-blob-size-ssd'), + ) + + def __init__(self): + """Initialize context by loading values from charm config. + + We keep two maps, one suitable for use with CephBrokerRq's and one + suitable for template generation. + """ + charm_config = config() + + # CephBrokerRq op map + self.op = {} + # Context exposed for template generation + self.ctxt = {} + for config_key, op_key in self.options: + value = charm_config.get(config_key) + self.ctxt.update({config_key.replace('-', '_'): value}) + self.op.update({op_key: value}) + + def __call__(self): + """Get context. + + :returns: Context + :rtype: Dict[str,any] + """ + return self.ctxt + + def get_op(self): + """Get values for use in CephBrokerRq op. + + :returns: Context values with CephBrokerRq op property name as key. + :rtype: Dict[str,any] + """ + return self.op + + def validate(self): + """Validate options. + + :raises: AssertionError + """ + # We slip in a dummy name on class instantiation to allow validation of + # the other options. It will not affect further use. + # + # NOTE: once we retire Python 3.5 we can fold this into a in-line + # dictionary comprehension in the call to the initializer. + dummy_op = {'name': 'dummy-name'} + dummy_op.update(self.op) + pool = ch_ceph.BasePool('dummy-service', op=dummy_op) + pool.validate() diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf index a11ce8ab..c0f22360 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/ceph.conf @@ -22,3 +22,7 @@ rbd default features = {{ rbd_features }} {{ key }} = {{ value }} {% endfor -%} {%- endif %} + +{% if rbd_default_data_pool -%} +rbd default data pool = {{ rbd_default_data_pool }} +{% endif %} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend index f614b3fa..530719e9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -6,8 +6,14 @@ Listen {{ ext_port }} ServerName {{ endpoint }} SSLEngine on - SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 - SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM + + # This section is based on Mozilla's recommendation + # as the "intermediate" profile as of July 7th, 2020. + # https://wiki.mozilla.org/Security/Server_Side_TLS + SSLProtocol all -SSLv3 -TLSv1 -TLSv1.1 + SSLCipherSuite ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 + SSLHonorCipherOrder off + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf index f614b3fa..530719e9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -6,8 +6,14 @@ Listen {{ ext_port }} ServerName {{ endpoint }} SSLEngine on - SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 - SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM + + # This section is based on Mozilla's recommendation + # as the "intermediate" profile as of July 7th, 2020. + # https://wiki.mozilla.org/Security/Server_Side_TLS + SSLProtocol all -SSLv3 -TLSv1 -TLSv1.1 + SSLCipherSuite ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 + SSLHonorCipherOrder off + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-ceph-bluestore-compression b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-ceph-bluestore-compression new file mode 100644 index 00000000..a6430100 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-ceph-bluestore-compression @@ -0,0 +1,28 @@ +{# section header omitted as options can belong to multiple sections #} +{% if bluestore_compression_algorithm -%} +bluestore compression algorithm = {{ bluestore_compression_algorithm }} +{% endif -%} +{% if bluestore_compression_mode -%} +bluestore compression mode = {{ bluestore_compression_mode }} +{% endif -%} +{% if bluestore_compression_required_ratio -%} +bluestore compression required ratio = {{ bluestore_compression_required_ratio }} +{% endif -%} +{% if bluestore_compression_min_blob_size -%} +bluestore compression min blob size = {{ bluestore_compression_min_blob_size }} +{% endif -%} +{% if bluestore_compression_min_blob_size_hdd -%} +bluestore compression min blob size hdd = {{ bluestore_compression_min_blob_size_hdd }} +{% endif -%} +{% if bluestore_compression_min_blob_size_ssd -%} +bluestore compression min blob size ssd = {{ bluestore_compression_min_blob_size_ssd }} +{% endif -%} +{% if bluestore_compression_max_blob_size -%} +bluestore compression max blob size = {{ bluestore_compression_max_blob_size }} +{% endif -%} +{% if bluestore_compression_max_blob_size_hdd -%} +bluestore compression max blob size hdd = {{ bluestore_compression_max_blob_size_hdd }} +{% endif -%} +{% if bluestore_compression_max_blob_size_ssd -%} +bluestore compression max blob size ssd = {{ bluestore_compression_max_blob_size_ssd }} +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index f21625d3..0aa797c4 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -2241,10 +2241,13 @@ def inform_peers_unit_state(state, relation_name='cluster'): if state not in UNIT_STATES: raise ValueError( "Setting invalid state {} for unit".format(state)) + this_unit = local_unit() for r_id in relation_ids(relation_name): + juju_log('Telling peer behind relation {} that {} is {}'.format( + r_id, this_unit, state), 'DEBUG') relation_set(relation_id=r_id, relation_settings={ - get_peer_key(local_unit()): state}) + get_peer_key(this_unit): state}) def get_peers_unit_state(relation_name='cluster'): @@ -2276,8 +2279,10 @@ def are_peers_ready(relation_name='cluster'): :returns: Whether all units are ready. :rtype: bool """ - unit_states = get_peers_unit_state(relation_name) - return all(v == UNIT_READY for v in unit_states.values()) + unit_states = get_peers_unit_state(relation_name).values() + juju_log('{} peers are in the following states: {}'.format( + relation_name, unit_states), 'DEBUG') + return all(state == UNIT_READY for state in unit_states) def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'): @@ -2360,7 +2365,9 @@ def get_api_application_status(): app_state, msg = get_api_unit_status() if app_state == WORKLOAD_STATES.ACTIVE: if are_peers_ready(): - return WORKLOAD_STATES.ACTIVE, 'Application Ready' + msg = 'Application Ready' else: - return WORKLOAD_STATES.WAITING, 'Some units are not ready' + app_state = WORKLOAD_STATES.WAITING + msg = 'Some units are not ready' + juju_log(msg, 'DEBUG') return app_state, msg diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 814d5c72..d9d43578 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -39,6 +39,7 @@ check_output, CalledProcessError, ) +from charmhelpers import deprecate from charmhelpers.core.hookenv import ( config, service_name, @@ -178,94 +179,293 @@ def send_osd_settings(): def validator(value, valid_type, valid_range=None): - """ - Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + """Helper function for type validation. + + Used to validate these: + https://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression + Example input: validator(value=1, valid_type=int, valid_range=[0, 2]) + This says I'm testing value=1. It must be an int inclusive in [0,2] - :param value: The value to validate + :param value: The value to validate. + :type value: any :param valid_type: The type that value should be. + :type valid_type: any :param valid_range: A range of values that value can assume. - :return: + :type valid_range: Optional[Union[List,Tuple]] + :raises: AssertionError, ValueError """ - assert isinstance(value, valid_type), "{} is not a {}".format( - value, - valid_type) + assert isinstance(value, valid_type), ( + "{} is not a {}".format(value, valid_type)) if valid_range is not None: - assert isinstance(valid_range, list), \ - "valid_range must be a list, was given {}".format(valid_range) + assert isinstance( + valid_range, list) or isinstance(valid_range, tuple), ( + "valid_range must be of type List or Tuple, " + "was given {} of type {}" + .format(valid_range, type(valid_range))) # If we're dealing with strings if isinstance(value, six.string_types): - assert value in valid_range, \ - "{} is not in the list {}".format(value, valid_range) + assert value in valid_range, ( + "{} is not in the list {}".format(value, valid_range)) # Integer, float should have a min and max else: if len(valid_range) != 2: raise ValueError( - "Invalid valid_range list of {} for {}. " + "Invalid valid_range list of {} for {}. " "List must be [min,max]".format(valid_range, value)) - assert value >= valid_range[0], \ - "{} is less than minimum allowed value of {}".format( - value, valid_range[0]) - assert value <= valid_range[1], \ - "{} is greater than maximum allowed value of {}".format( - value, valid_range[1]) + assert value >= valid_range[0], ( + "{} is less than minimum allowed value of {}" + .format(value, valid_range[0])) + assert value <= valid_range[1], ( + "{} is greater than maximum allowed value of {}" + .format(value, valid_range[1])) class PoolCreationError(Exception): - """ - A custom error to inform the caller that a pool creation failed. Provides an error message + """A custom exception to inform the caller that a pool creation failed. + + Provides an error message """ def __init__(self, message): super(PoolCreationError, self).__init__(message) -class Pool(object): - """ - An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. - Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). +class BasePool(object): + """An object oriented approach to Ceph pool creation. + + This base class is inherited by ReplicatedPool and ErasurePool. Do not call + create() on this base class as it will raise an exception. + + Instantiate a child class and call create(). """ + # Dictionary that maps pool operation properties to Tuples with valid type + # and valid range + op_validation_map = { + 'compression-algorithm': (str, ('lz4', 'snappy', 'zlib', 'zstd')), + 'compression-mode': (str, ('none', 'passive', 'aggressive', 'force')), + 'compression-required-ratio': (float, None), + 'compression-min-blob-size': (int, None), + 'compression-min-blob-size-hdd': (int, None), + 'compression-min-blob-size-ssd': (int, None), + 'compression-max-blob-size': (int, None), + 'compression-max-blob-size-hdd': (int, None), + 'compression-max-blob-size-ssd': (int, None), + } - def __init__(self, service, name): + def __init__(self, service, name=None, percent_data=None, app_name=None, + op=None): + """Initialize BasePool object. + + Pool information is either initialized from individual keyword + arguments or from a individual CephBrokerRq operation Dict. + + :param service: The Ceph user name to run commands under. + :type service: str + :param name: Name of pool to operate on. + :type name: str + :param percent_data: The expected pool size in relation to all + available resources in the Ceph cluster. Will be + used to set the ``target_size_ratio`` pool + property. (default: 10.0) + :type percent_data: Optional[float] + :param app_name: Ceph application name, usually one of: + ('cephfs', 'rbd', 'rgw') (default: 'unknown') + :type app_name: Optional[str] + :param op: Broker request Op to compile pool data from. + :type op: Optional[Dict[str,any]] + :raises: KeyError + """ + # NOTE: Do not perform initialization steps that require live data from + # a running cluster here. The *Pool classes may be used for validation. self.service = service - self.name = name + self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + self.op = op or {} + + if op: + # When initializing from op the `name` attribute is required and we + # will fail with KeyError if it is not provided. + self.name = op['name'] + self.percent_data = op.get('weight') + self.app_name = op.get('app-name') + else: + self.name = name + self.percent_data = percent_data + self.app_name = app_name + + # Set defaults for these if they are not provided + self.percent_data = self.percent_data or 10.0 + self.app_name = self.app_name or 'unknown' + + def validate(self): + """Check that value of supplied operation parameters are valid. + + :raises: ValueError + """ + for op_key, op_value in self.op.items(): + if op_key in self.op_validation_map and op_value is not None: + valid_type, valid_range = self.op_validation_map[op_key] + try: + validator(op_value, valid_type, valid_range) + except (AssertionError, ValueError) as e: + # Normalize on ValueError, also add information about which + # variable we had an issue with. + raise ValueError("'{}': {}".format(op_key, str(e))) + + def _create(self): + """Perform the pool creation, method MUST be overridden by child class. + """ + raise NotImplementedError + + def _post_create(self): + """Perform common post pool creation tasks. + + Note that pool properties subject to change during the lifetime of a + pool / deployment should go into the ``update`` method. + + Do not add calls for a specific pool type here, those should go into + one of the pool specific classes. + """ + if self.nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool( + client=self.service, + pool=self.name, + settings={ + 'target_size_ratio': str( + self.percent_data / 100.0), + }) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}' + .format(self.name), + level=WARNING) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}' + .format(self.name, e), + level=WARNING) - # Create the pool if it doesn't exist already - # To be implemented by subclasses def create(self): - pass + """Create pool and perform any post pool creation tasks. - def add_cache_tier(self, cache_pool, mode): + To allow for sharing of common code among pool specific classes the + processing has been broken out into the private methods ``_create`` + and ``_post_create``. + + Do not add any pool type specific handling here, that should go into + one of the pool specific classes. """ - Adds a new cache tier to an existing pool. - :param cache_pool: six.string_types. The cache tier pool name to add. - :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] - :return: None + if not pool_exists(self.service, self.name): + self.validate() + self._create() + self._post_create() + self.update() + + def set_quota(self): + """Set a quota if requested. + + :raises: CalledProcessError + """ + max_bytes = self.op.get('max-bytes') + max_objects = self.op.get('max-objects') + if max_bytes or max_objects: + set_pool_quota(service=self.service, pool_name=self.name, + max_bytes=max_bytes, max_objects=max_objects) + + def set_compression(self): + """Set compression properties if requested. + + :raises: CalledProcessError + """ + compression_properties = { + key.replace('-', '_'): value + for key, value in self.op.items() + if key in ( + 'compression-algorithm', + 'compression-mode', + 'compression-required-ratio', + 'compression-min-blob-size', + 'compression-min-blob-size-hdd', + 'compression-min-blob-size-ssd', + 'compression-max-blob-size', + 'compression-max-blob-size-hdd', + 'compression-max-blob-size-ssd') and value} + if compression_properties: + update_pool(self.service, self.name, compression_properties) + + def update(self): + """Update properties for an already existing pool. + + Do not add calls for a specific pool type here, those should go into + one of the pool specific classes. + """ + self.validate() + self.set_quota() + self.set_compression() + + def add_cache_tier(self, cache_pool, mode): + """Adds a new cache tier to an existing pool. + + :param cache_pool: The cache tier pool name to add. + :type cache_pool: str + :param mode: The caching mode to use for this pool. + valid range = ["readonly", "writeback"] + :type mode: str """ # Check the input types and values validator(value=cache_pool, valid_type=six.string_types) - validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) - - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) + validator( + value=mode, valid_type=six.string_types, + valid_range=["readonly", "writeback"]) + + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'add', self.name, cache_pool, + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'cache-mode', cache_pool, mode, + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'set-overlay', self.name, cache_pool, + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom', + ]) def remove_cache_tier(self, cache_pool): - """ - Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. - :param cache_pool: six.string_types. The cache tier pool name to remove. - :return: None + """Removes a cache tier from Ceph. + + Flushes all dirty objects from writeback pools and waits for that to + complete. + + :param cache_pool: The cache tier pool name to remove. + :type cache_pool: str """ # read-only is easy, writeback is much harder mode = get_cache_mode(self.service, cache_pool) if mode == 'readonly': - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'cache-mode', cache_pool, 'none' + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'remove', self.name, cache_pool, + ]) elif mode == 'writeback': pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', @@ -276,9 +476,15 @@ def remove_cache_tier(self, cache_pool): check_call(pool_forward_cmd) # Flush the cache and wait for it to return - check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + check_call([ + 'rados', '--id', self.service, + '-p', cache_pool, 'cache-flush-evict-all']) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'remove-overlay', self.name]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'remove', self.name, cache_pool]) def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, device_class=None): @@ -305,19 +511,23 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, selected for the specific rule, rather it is left to the user to tune in the form of 'expected-osd-count' config option. - :param pool_size: int. pool_size is either the number of replicas for + :param pool_size: pool_size is either the number of replicas for replicated pools or the K+M sum for erasure coded pools - :param percent_data: float. the percentage of data that is expected to + :type pool_size: int + :param percent_data: the percentage of data that is expected to be contained in the pool for the specific OSD set. Default value is to assume 10% of the data is for this pool, which is a relatively low % of the data but allows for the pg_num to be increased. NOTE: the default is primarily to handle the scenario where related charms requiring pools has not been upgraded to include an update to indicate their relative usage of the pools. - :param device_class: str. class of storage to use for basis of pgs + :type percent_data: float + :param device_class: class of storage to use for basis of pgs calculation; ceph supports nvme, ssd and hdd by default based on presence of devices of each type in the deployment. - :return: int. The number of pgs to use. + :type device_class: str + :returns: The number of pgs to use. + :rtype: int """ # Note: This calculation follows the approach that is provided @@ -357,7 +567,8 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, return LEGACY_PG_COUNT percent_data /= 100.0 - target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET + target_pgs_per_osd = config( + 'pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size # NOTE: ensure a sane minimum number of PGS otherwise we don't get any @@ -380,147 +591,174 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, return int(nearest) -class ReplicatedPool(Pool): - def __init__(self, service, name, pg_num=None, replicas=2, - percent_data=10.0, app_name=None): - super(ReplicatedPool, self).__init__(service=service, name=name) - self.replicas = replicas - self.percent_data = percent_data - if pg_num: +class Pool(BasePool): + """Compability shim for any descendents external to this library.""" + + @deprecate( + 'The ``Pool`` baseclass has been replaced by ``BasePool`` class.') + def __init__(self, service, name): + super(Pool, self).__init__(service, name=name) + + def create(self): + pass + + +class ReplicatedPool(BasePool): + def __init__(self, service, name=None, pg_num=None, replicas=None, + percent_data=None, app_name=None, op=None): + """Initialize ReplicatedPool object. + + Pool information is either initialized from individual keyword + arguments or from a individual CephBrokerRq operation Dict. + + Please refer to the docstring of the ``BasePool`` class for + documentation of the common parameters. + + :param pg_num: Express wish for number of Placement Groups (this value + is subject to validation against a running cluster prior + to use to avoid creating a pool with too many PGs) + :type pg_num: int + :param replicas: Number of copies there should be of each object added + to this replicated pool. + :type replicas: int + :raises: KeyError + """ + # NOTE: Do not perform initialization steps that require live data from + # a running cluster here. The *Pool classes may be used for validation. + + # The common parameters are handled in our parents initializer + super(ReplicatedPool, self).__init__( + service=service, name=name, percent_data=percent_data, + app_name=app_name, op=op) + + if op: + # When initializing from op `replicas` is a required attribute, and + # we will fail with KeyError if it is not provided. + self.replicas = op['replicas'] + self.pg_num = op.get('pg_num') + else: + self.replicas = replicas or 2 + self.pg_num = pg_num + + def _create(self): + # Do extra validation on pg_num with data from live cluster + if self.pg_num: # Since the number of placement groups were specified, ensure # that there aren't too many created. max_pgs = self.get_pgs(self.replicas, 100.0) - self.pg_num = min(pg_num, max_pgs) + self.pg_num = min(self.pg_num, max_pgs) else: - self.pg_num = self.get_pgs(self.replicas, percent_data) - if app_name: - self.app_name = app_name + self.pg_num = self.get_pgs(self.replicas, self.percent_data) + + # Create it + if self.nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, self.pg_num) + ), + self.name, str(self.pg_num) + ] else: - self.app_name = 'unknown' + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num) + ] + check_call(cmd) - def create(self): - if not pool_exists(self.service, self.name): - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 - # Create it - if nautilus_or_later: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - '--pg-num-min={}'.format( - min(AUTOSCALER_DEFAULT_PGS, self.pg_num) - ), - self.name, str(self.pg_num) - ] - else: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num) - ] + def _post_create(self): + # Set the pool replica size + update_pool(client=self.service, + pool=self.name, + settings={'size': str(self.replicas)}) + # Perform other common post pool creation tasks + super(ReplicatedPool, self)._post_create() - try: - check_call(cmd) - # Set the pool replica size - update_pool(client=self.service, - pool=self.name, - settings={'size': str(self.replicas)}) - if nautilus_or_later: - # Ensure we set the expected pool ratio - update_pool(client=self.service, - pool=self.name, - settings={'target_size_ratio': str(self.percent_data / 100.0)}) - try: - set_app_name_for_pool(client=self.service, - pool=self.name, - name=self.app_name) - except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name), level=WARNING) - if 'pg_autoscaler' in enabled_manager_modules(): - try: - enable_pg_autoscale(self.service, self.name) - except CalledProcessError as e: - log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e), level=WARNING) - except CalledProcessError: - raise - - -# Default jerasure erasure coded pool -class ErasurePool(Pool): - def __init__(self, service, name, erasure_code_profile="default", - percent_data=10.0, app_name=None): - super(ErasurePool, self).__init__(service=service, name=name) - self.erasure_code_profile = erasure_code_profile - self.percent_data = percent_data - if app_name: - self.app_name = app_name - else: - self.app_name = 'unknown' - def create(self): - if not pool_exists(self.service, self.name): - # Try to find the erasure profile information in order to properly - # size the number of placement groups. The size of an erasure - # coded placement group is calculated as k+m. - erasure_profile = get_erasure_profile(self.service, - self.erasure_code_profile) - - # Check for errors - if erasure_profile is None: - msg = ("Failed to discover erasure profile named " - "{}".format(self.erasure_code_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - if 'k' not in erasure_profile or 'm' not in erasure_profile: - # Error - msg = ("Unable to find k (data chunks) or m (coding chunks) " - "in erasure profile {}".format(erasure_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - - k = int(erasure_profile['k']) - m = int(erasure_profile['m']) - pgs = self.get_pgs(k + m, self.percent_data) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 - # Create it - if nautilus_or_later: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - '--pg-num-min={}'.format( - min(AUTOSCALER_DEFAULT_PGS, pgs) - ), - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile - ] - else: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile - ] +class ErasurePool(BasePool): + """Default jerasure erasure coded pool.""" - try: - check_call(cmd) - try: - set_app_name_for_pool(client=self.service, - pool=self.name, - name=self.app_name) - except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name), level=WARNING) - if nautilus_or_later: - # Ensure we set the expected pool ratio - update_pool(client=self.service, - pool=self.name, - settings={'target_size_ratio': str(self.percent_data / 100.0)}) - if 'pg_autoscaler' in enabled_manager_modules(): - try: - enable_pg_autoscale(self.service, self.name) - except CalledProcessError as e: - log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e), level=WARNING) - except CalledProcessError: - raise - - """Get an existing erasure code profile if it already exists. - Returns json formatted output""" + def __init__(self, service, name=None, erasure_code_profile=None, + percent_data=None, app_name=None, op=None, + allow_ec_overwrites=False): + """Initialize ReplicatedPool object. + + Pool information is either initialized from individual keyword + arguments or from a individual CephBrokerRq operation Dict. + + Please refer to the docstring of the ``BasePool`` class for + documentation of the common parameters. + + :param erasure_code_profile: EC Profile to use (default: 'default') + :type erasure_code_profile: Optional[str] + """ + # NOTE: Do not perform initialization steps that require live data from + # a running cluster here. The *Pool classes may be used for validation. + + # The common parameters are handled in our parents initializer + super(ErasurePool, self).__init__( + service=service, name=name, percent_data=percent_data, + app_name=app_name, op=op) + + if op: + # Note that the different default when initializing from op stems + # from different handling of this in the `charms.ceph` library. + self.erasure_code_profile = op.get('erasure-profile', + 'default-canonical') + else: + # We keep the class default when initialized from keyword arguments + # to not break the API for any other consumers. + self.erasure_code_profile = erasure_code_profile or 'default' + + self.allow_ec_overwrites = allow_ec_overwrites + + def _create(self): + # Try to find the erasure profile information in order to properly + # size the number of placement groups. The size of an erasure + # coded placement group is calculated as k+m. + erasure_profile = get_erasure_profile(self.service, + self.erasure_code_profile) + + # Check for errors + if erasure_profile is None: + msg = ("Failed to discover erasure profile named " + "{}".format(self.erasure_code_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + if 'k' not in erasure_profile or 'm' not in erasure_profile: + # Error + msg = ("Unable to find k (data chunks) or m (coding chunks) " + "in erasure profile {}".format(erasure_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + + k = int(erasure_profile['k']) + m = int(erasure_profile['m']) + pgs = self.get_pgs(k + m, self.percent_data) + self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + # Create it + if self.nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, pgs) + ), + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + check_call(cmd) + + def _post_create(self): + super(ErasurePool, self)._post_create() + if self.allow_ec_overwrites: + update_pool(self.service, self.name, + {'allow_ec_overwrites': 'true'}) def enabled_manager_modules(): @@ -541,22 +779,28 @@ def enabled_manager_modules(): def enable_pg_autoscale(service, pool_name): - """ - Enable Ceph's PG autoscaler for the specified pool. + """Enable Ceph's PG autoscaler for the specified pool. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types. The name of the pool to enable sutoscaling on - :raise: CalledProcessError if the command fails + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: The name of the pool to enable sutoscaling on + :type pool_name: str + :raises: CalledProcessError if the command fails """ - check_call(['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) + check_call([ + 'ceph', '--id', service, + 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) def get_mon_map(service): - """ - Returns the current monitor map. - :param service: six.string_types. The Ceph user name to run the command under - :return: json string. :raise: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails + """Return the current monitor map. + + :param service: The Ceph user name to run the command under + :type service: str + :returns: Dictionary with monitor map data + :rtype: Dict[str,any] + :raises: ValueError if the monmap fails to parse, CalledProcessError if our + ceph command fails. """ try: mon_status = check_output(['ceph', '--id', service, @@ -576,17 +820,16 @@ def get_mon_map(service): def hash_monitor_names(service): - """ + """Get a sorted list of monitor hashes in ascending order. + Uses the get_mon_map() function to get information about the monitor - cluster. - Hash the name of each monitor. Return a sorted list of monitor hashes - in an ascending order. - :param service: six.string_types. The Ceph user name to run the command under - :rtype : dict. json dict of monitor name, ip address and rank - example: { - 'name': 'ip-172-31-13-165', - 'rank': 0, - 'addr': '172.31.13.165:6789/0'} + cluster. Hash the name of each monitor. + + :param service: The Ceph user name to run the command under. + :type service: str + :returns: a sorted list of monitor hashes in an ascending order. + :rtype : List[str] + :raises: CalledProcessError, ValueError """ try: hash_list = [] @@ -603,46 +846,56 @@ def hash_monitor_names(service): def monitor_key_delete(service, key): - """ - Delete a key and value pair from the monitor cluster - :param service: six.string_types. The Ceph user name to run the command under + """Delete a key and value pair from the monitor cluster. + Deletes a key value pair on the monitor cluster. - :param key: six.string_types. The key to delete. + + :param service: The Ceph user name to run the command under + :type service: str + :param key: The key to delete. + :type key: str + :raises: CalledProcessError """ try: check_output( ['ceph', '--id', service, 'config-key', 'del', str(key)]) except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format( - e.output)) + log("Monitor config-key put failed with message: {}" + .format(e.output)) raise def monitor_key_set(service, key, value): - """ - Sets a key value pair on the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to set. - :param value: The value to set. This will be converted to a string - before setting + """Set a key value pair on the monitor cluster. + + :param service: The Ceph user name to run the command under. + :type service str + :param key: The key to set. + :type key: str + :param value: The value to set. This will be coerced into a string. + :type value: str + :raises: CalledProcessError """ try: check_output( ['ceph', '--id', service, 'config-key', 'put', str(key), str(value)]) except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format( - e.output)) + log("Monitor config-key put failed with message: {}" + .format(e.output)) raise def monitor_key_get(service, key): - """ - Gets the value of an existing key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to search for. + """Get the value of an existing key in the monitor cluster. + + :param service: The Ceph user name to run the command under + :type service: str + :param key: The key to search for. + :type key: str :return: Returns the value of that key or None if not found. + :rtype: Optional[str] """ try: output = check_output( @@ -650,19 +903,21 @@ def monitor_key_get(service, key): 'config-key', 'get', str(key)]).decode('UTF-8') return output except CalledProcessError as e: - log("Monitor config-key get failed with message: {}".format( - e.output)) + log("Monitor config-key get failed with message: {}" + .format(e.output)) return None def monitor_key_exists(service, key): - """ - Searches for the existence of a key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to search for - :return: Returns True if the key exists, False if not and raises an - exception if an unknown error occurs. :raise: CalledProcessError if - an unknown error occurs + """Search for existence of key in the monitor cluster. + + :param service: The Ceph user name to run the command under. + :type service: str + :param key: The key to search for. + :type key: str + :return: Returns True if the key exists, False if not. + :rtype: bool + :raises: CalledProcessError if an unknown error occurs. """ try: check_call( @@ -675,16 +930,20 @@ def monitor_key_exists(service, key): if e.returncode == errno.ENOENT: return False else: - log("Unknown error from ceph config-get exists: {} {}".format( - e.returncode, e.output)) + log("Unknown error from ceph config-get exists: {} {}" + .format(e.returncode, e.output)) raise def get_erasure_profile(service, name): - """ - :param service: six.string_types. The Ceph user name to run the command under - :param name: - :return: + """Get an existing erasure code profile if it exists. + + :param service: The Ceph user name to run the command under. + :type service: str + :param name: Name of profile. + :type name: str + :returns: Dictionary with profile data. + :rtype: Optional[Dict[str]] """ try: out = check_output(['ceph', '--id', service, @@ -698,54 +957,61 @@ def get_erasure_profile(service, name): def pool_set(service, pool_name, key, value): + """Sets a value for a RADOS pool in ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to set property on. + :type pool_name: str + :param key: Property key. + :type key: str + :param value: Value, will be coerced into str and shifted to lowercase. + :type value: str + :raises: CalledProcessError """ - Sets a value for a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param key: six.string_types - :param value: - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, - str(value).lower()] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'set', pool_name, key, str(value).lower()] + check_call(cmd) def snapshot_pool(service, pool_name, snapshot_name): + """Snapshots a RADOS pool in Ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to snapshot. + :type pool_name: str + :param snapshot_name: Name of snapshot to create. + :type snapshot_name: str + :raises: CalledProcessError """ - Snapshots a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'mksnap', pool_name, snapshot_name] + check_call(cmd) def remove_pool_snapshot(service, pool_name, snapshot_name): + """Remove a snapshot from a RADOS pool in Ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to remove snapshot from. + :type pool_name: str + :param snapshot_name: Name of snapshot to remove. + :type snapshot_name: str + :raises: CalledProcessError """ - Remove a snapshot from a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] + check_call(cmd) def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): - """ + """Set byte quota on a RADOS pool in Ceph. + :param service: The Ceph user name to run the command under :type service: str :param pool_name: Name of pool @@ -756,7 +1022,9 @@ def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): :type max_objects: int :raises: subprocess.CalledProcessError """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name] + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'set-quota', pool_name] if max_bytes: cmd = cmd + ['max_bytes', str(max_bytes)] if max_objects: @@ -765,119 +1033,216 @@ def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): def remove_pool_quota(service, pool_name): + """Remove byte quota on a RADOS pool in Ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to remove quota from. + :type pool_name: str + :raises: CalledProcessError """ - Set a byte quota on a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] + check_call(cmd) def remove_erasure_profile(service, profile_name): + """Remove erasure code profile. + + :param service: The Ceph user name to run the command under + :type service: str + :param profile_name: Name of profile to remove. + :type profile_name: str + :raises: CalledProcessError """ - Create a new erasure code profile if one does not already exist for it. Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command under - :param profile_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', - profile_name] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'erasure-code-profile', 'rm', profile_name] + check_call(cmd) -def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', - failure_domain='host', +def create_erasure_profile(service, profile_name, + erasure_plugin_name='jerasure', + failure_domain=None, data_chunks=2, coding_chunks=1, locality=None, durability_estimator=None, - device_class=None): - """ - Create a new erasure code profile if one does not already exist for it. Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command under - :param profile_name: six.string_types - :param erasure_plugin_name: six.string_types - :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', - 'room', 'root', 'row']) - :param data_chunks: int - :param coding_chunks: int - :param locality: int - :param durability_estimator: int - :param device_class: six.string_types - :return: None. Can raise CalledProcessError - """ - # Ensure this failure_domain is allowed by Ceph - validator(failure_domain, six.string_types, - ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) + helper_chunks=None, + scalar_mds=None, + crush_locality=None, + device_class=None, + erasure_plugin_technique=None): + """Create a new erasure code profile if one does not already exist for it. + + Updates the profile if it exists. Please refer to [0] for more details. - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, - 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks) - ] - if locality is not None and durability_estimator is not None: - raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + 0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + + :param service: The Ceph user name to run the command under. + :type service: str + :param profile_name: Name of profile. + :type profile_name: str + :param erasure_plugin_name: Erasure code plugin. + :type erasure_plugin_name: str + :param failure_domain: Failure domain, one of: + ('chassis', 'datacenter', 'host', 'osd', 'pdu', + 'pod', 'rack', 'region', 'room', 'root', 'row'). + :type failure_domain: str + :param data_chunks: Number of data chunks. + :type data_chunks: int + :param coding_chunks: Number of coding chunks. + :type coding_chunks: int + :param locality: Locality. + :type locality: int + :param durability_estimator: Durability estimator. + :type durability_estimator: int + :param helper_chunks: int + :type helper_chunks: int + :param device_class: Restrict placement to devices of specific class. + :type device_class: str + :param scalar_mds: one of ['isa', 'jerasure', 'shec'] + :type scalar_mds: str + :param crush_locality: LRC locality faulure domain, one of: + ('chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', + 'rack', 'region', 'room', 'root', 'row') or unset. + :type crush_locaity: str + :param erasure_plugin_technique: Coding technique for EC plugin + :type erasure_plugin_technique: str + :return: None. Can raise CalledProcessError, ValueError or AssertionError + """ + plugin_techniques = { + 'jerasure': [ + 'reed_sol_van', + 'reed_sol_r6_op', + 'cauchy_orig', + 'cauchy_good', + 'liberation', + 'blaum_roth', + 'liber8tion' + ], + 'lrc': [], + 'isa': [ + 'reed_sol_van', + 'cauchy', + ], + 'shec': [ + 'single', + 'multiple' + ], + 'clay': [], + } + failure_domains = [ + 'chassis', 'datacenter', + 'host', 'osd', + 'pdu', 'pod', + 'rack', 'region', + 'room', 'root', + 'row', + ] + device_classes = [ + 'ssd', + 'hdd', + 'nvme' + ] + + validator(erasure_plugin_name, six.string_types, + list(plugin_techniques.keys())) + + cmd = [ + 'ceph', '--id', service, + 'osd', 'erasure-code-profile', 'set', profile_name, + 'plugin={}'.format(erasure_plugin_name), + 'k={}'.format(str(data_chunks)), + 'm={}'.format(str(coding_chunks)), + ] + + if erasure_plugin_technique: + validator(erasure_plugin_technique, six.string_types, + plugin_techniques[erasure_plugin_name]) + cmd.append('technique={}'.format(erasure_plugin_technique)) luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 - # failure_domain changed in luminous - if luminous_or_later: - cmd.append('crush-failure-domain=' + failure_domain) - else: - cmd.append('ruleset-failure-domain=' + failure_domain) + + # Set failure domain from options if not provided in args + if not failure_domain and config('customize-failure-domain'): + # Defaults to 'host' so just need to deal with + # setting 'rack' if feature is enabled + failure_domain = 'rack' + + if failure_domain: + validator(failure_domain, six.string_types, failure_domains) + # failure_domain changed in luminous + if luminous_or_later: + cmd.append('crush-failure-domain={}'.format(failure_domain)) + else: + cmd.append('ruleset-failure-domain={}'.format(failure_domain)) # device class new in luminous if luminous_or_later and device_class: + validator(device_class, six.string_types, device_classes) cmd.append('crush-device-class={}'.format(device_class)) else: log('Skipping device class configuration (ceph < 12.0.0)', level=DEBUG) # Add plugin specific information - if locality is not None: - # For local erasure codes - cmd.append('l=' + str(locality)) - if durability_estimator is not None: - # For Shec erasure codes - cmd.append('c=' + str(durability_estimator)) + if erasure_plugin_name == 'lrc': + # LRC mandatory configuration + if locality: + cmd.append('l={}'.format(str(locality))) + else: + raise ValueError("locality must be provided for lrc plugin") + # LRC optional configuration + if crush_locality: + validator(crush_locality, six.string_types, failure_domains) + cmd.append('crush-locality={}'.format(crush_locality)) + + if erasure_plugin_name == 'shec': + # SHEC optional configuration + if durability_estimator: + cmd.append('c={}'.format((durability_estimator))) + + if erasure_plugin_name == 'clay': + # CLAY optional configuration + if helper_chunks: + cmd.append('d={}'.format(str(helper_chunks))) + if scalar_mds: + cmd.append('scalar-mds={}'.format(scalar_mds)) if erasure_profile_exists(service, profile_name): cmd.append('--force') - try: - check_call(cmd) - except CalledProcessError: - raise + check_call(cmd) def rename_pool(service, old_name, new_name): - """ - Rename a Ceph pool from old_name to new_name - :param service: six.string_types. The Ceph user name to run the command under - :param old_name: six.string_types - :param new_name: six.string_types - :return: None + """Rename a Ceph pool from old_name to new_name. + + :param service: The Ceph user name to run the command under. + :type service: str + :param old_name: Name of pool subject to rename. + :type old_name: str + :param new_name: Name to rename pool to. + :type new_name: str """ validator(value=old_name, valid_type=six.string_types) validator(value=new_name, valid_type=six.string_types) - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'rename', old_name, new_name] check_call(cmd) def erasure_profile_exists(service, name): - """ - Check to see if an Erasure code profile already exists. - :param service: six.string_types. The Ceph user name to run the command under - :param name: six.string_types - :return: int or None + """Check to see if an Erasure code profile already exists. + + :param service: The Ceph user name to run the command under + :type service: str + :param name: Name of profile to look for. + :type name: str + :returns: True if it exists, False otherwise. + :rtype: bool """ validator(value=name, valid_type=six.string_types) try: @@ -890,11 +1255,14 @@ def erasure_profile_exists(service, name): def get_cache_mode(service, pool_name): - """ - Find the current caching mode of the pool_name given. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :return: int or None + """Find the current caching mode of the pool_name given. + + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: Name of pool. + :type pool_name: str + :returns: Current cache mode. + :rtype: Optional[int] """ validator(value=service, valid_type=six.string_types) validator(value=pool_name, valid_type=six.string_types) @@ -976,17 +1344,23 @@ def create_rbd_image(service, pool, image, sizemb): def update_pool(client, pool, settings): + """Update pool properties. + + :param client: Client/User-name to authenticate with. + :type client: str + :param pool: Name of pool to operate on + :type pool: str + :param settings: Dictionary with key/value pairs to set. + :type settings: Dict[str, str] + :raises: CalledProcessError + """ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] for k, v in six.iteritems(settings): - cmd.append(k) - cmd.append(v) - - check_call(cmd) + check_call(cmd + [k, v]) def set_app_name_for_pool(client, pool, name): - """ - Calls `osd pool application enable` for the specified pool name + """Calls `osd pool application enable` for the specified pool name :param client: Name of the ceph client to use :type client: str @@ -1043,8 +1417,7 @@ def _keyring_path(service): def add_key(service, key): - """ - Add a key to a keyring. + """Add a key to a keyring. Creates the keyring if it doesn't already exist. @@ -1288,13 +1661,33 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ - def __init__(self, api_version=1, request_id=None): - self.api_version = api_version - if request_id: - self.request_id = request_id + def __init__(self, api_version=1, request_id=None, raw_request_data=None): + """Initialize CephBrokerRq object. + + Builds a new empty request or rebuilds a request from on-wire JSON + data. + + :param api_version: API version for request (default: 1). + :type api_version: Optional[int] + :param request_id: Unique identifier for request. + (default: string representation of generated UUID) + :type request_id: Optional[str] + :param raw_request_data: JSON-encoded string to build request from. + :type raw_request_data: Optional[str] + :raises: KeyError + """ + if raw_request_data: + request_data = json.loads(raw_request_data) + self.api_version = request_data['api-version'] + self.request_id = request_data['request-id'] + self.set_ops(request_data['ops']) else: - self.request_id = str(uuid.uuid1()) - self.ops = [] + self.api_version = api_version + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) + self.ops = [] def add_op(self, op): """Add an op if it is not already in the list. @@ -1336,12 +1729,119 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, group=group, namespace=namespace, app_name=app_name, max_bytes=max_bytes, max_objects=max_objects) + # Use function parameters and docstring to define types in a compatible + # manner. + # + # NOTE: Our caller should always use a kwarg Dict when calling us so + # no need to maintain fixed order/position for parameters. Please keep them + # sorted by name when adding new ones. + def _partial_build_common_op_create(self, + app_name=None, + compression_algorithm=None, + compression_mode=None, + compression_required_ratio=None, + compression_min_blob_size=None, + compression_min_blob_size_hdd=None, + compression_min_blob_size_ssd=None, + compression_max_blob_size=None, + compression_max_blob_size_hdd=None, + compression_max_blob_size_ssd=None, + group=None, + max_bytes=None, + max_objects=None, + namespace=None, + weight=None): + """Build common part of a create pool operation. + + :param app_name: Tag pool with application name. Note that there is + certain protocols emerging upstream with regard to + meaningful application names to use. + Examples are 'rbd' and 'rgw'. + :type app_name: Optional[str] + :param compression_algorithm: Compressor to use, one of: + ('lz4', 'snappy', 'zlib', 'zstd') + :type compression_algorithm: Optional[str] + :param compression_mode: When to compress data, one of: + ('none', 'passive', 'aggressive', 'force') + :type compression_mode: Optional[str] + :param compression_required_ratio: Minimum compression ratio for data + chunk, if the requested ratio is not + achieved the compressed version will + be thrown away and the original + stored. + :type compression_required_ratio: Optional[float] + :param compression_min_blob_size: Chunks smaller than this are never + compressed (unit: bytes). + :type compression_min_blob_size: Optional[int] + :param compression_min_blob_size_hdd: Chunks smaller than this are not + compressed when destined to + rotational media (unit: bytes). + :type compression_min_blob_size_hdd: Optional[int] + :param compression_min_blob_size_ssd: Chunks smaller than this are not + compressed when destined to flash + media (unit: bytes). + :type compression_min_blob_size_ssd: Optional[int] + :param compression_max_blob_size: Chunks larger than this are broken + into N * compression_max_blob_size + chunks before being compressed + (unit: bytes). + :type compression_max_blob_size: Optional[int] + :param compression_max_blob_size_hdd: Chunks larger than this are + broken into + N * compression_max_blob_size_hdd + chunks before being compressed + when destined for rotational + media (unit: bytes) + :type compression_max_blob_size_hdd: Optional[int] + :param compression_max_blob_size_ssd: Chunks larger than this are + broken into + N * compression_max_blob_size_ssd + chunks before being compressed + when destined for flash media + (unit: bytes). + :type compression_max_blob_size_ssd: Optional[int] + :param group: Group to add pool to + :type group: Optional[str] + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: Optional[int] + :param max_objects: Maximum objects quota to apply + :type max_objects: Optional[int] + :param namespace: Group namespace + :type namespace: Optional[str] + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + Used to calculate number of Placement Groups to create + for pool. + :type weight: Optional[float] + :returns: Dictionary with kwarg name as key. + :rtype: Dict[str,any] + :raises: AssertionError + """ + return { + 'app-name': app_name, + 'compression-algorithm': compression_algorithm, + 'compression-mode': compression_mode, + 'compression-required-ratio': compression_required_ratio, + 'compression-min-blob-size': compression_min_blob_size, + 'compression-min-blob-size-hdd': compression_min_blob_size_hdd, + 'compression-min-blob-size-ssd': compression_min_blob_size_ssd, + 'compression-max-blob-size': compression_max_blob_size, + 'compression-max-blob-size-hdd': compression_max_blob_size_hdd, + 'compression-max-blob-size-ssd': compression_max_blob_size_ssd, + 'group': group, + 'max-bytes': max_bytes, + 'max-objects': max_objects, + 'group-namespace': namespace, + 'weight': weight, + } + def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, - weight=None, group=None, namespace=None, - app_name=None, max_bytes=None, - max_objects=None): + **kwargs): """Adds an operation to create a replicated pool. + Refer to docstring for ``_partial_build_common_op_create`` for + documentation of keyword arguments. + :param name: Name of pool to create :type name: str :param replica_count: Number of copies Ceph should keep of your data. @@ -1349,66 +1849,114 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, :param pg_num: Request specific number of Placement Groups to create for pool. :type pg_num: int - :param weight: The percentage of data that is expected to be contained - in the pool from the total available space on the OSDs. - Used to calculate number of Placement Groups to create - for pool. - :type weight: float - :param group: Group to add pool to - :type group: str - :param namespace: Group namespace - :type namespace: str - :param app_name: (Optional) Tag pool with application name. Note that - there is certain protocols emerging upstream with - regard to meaningful application names to use. - Examples are ``rbd`` and ``rgw``. - :type app_name: str - :param max_bytes: Maximum bytes quota to apply - :type max_bytes: int - :param max_objects: Maximum objects quota to apply - :type max_objects: int + :raises: AssertionError if provided data is of invalid type/range """ - if pg_num and weight: + if pg_num and kwargs.get('weight'): raise ValueError('pg_num and weight are mutually exclusive') - self.add_op({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight, 'group': group, - 'group-namespace': namespace, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + op = { + 'op': 'create-pool', + 'name': name, + 'replicas': replica_count, + 'pg_num': pg_num, + } + op.update(self._partial_build_common_op_create(**kwargs)) + + # Initialize Pool-object to validate type and range of ops. + pool = ReplicatedPool('dummy-service', op=op) + pool.validate() + + self.add_op(op) def add_op_create_erasure_pool(self, name, erasure_profile=None, - weight=None, group=None, app_name=None, - max_bytes=None, max_objects=None): + allow_ec_overwrites=False, **kwargs): """Adds an operation to create a erasure coded pool. + Refer to docstring for ``_partial_build_common_op_create`` for + documentation of keyword arguments. + :param name: Name of pool to create :type name: str :param erasure_profile: Name of erasure code profile to use. If not set the ceph-mon unit handling the broker request will set its default value. :type erasure_profile: str - :param weight: The percentage of data that is expected to be contained - in the pool from the total available space on the OSDs. - :type weight: float - :param group: Group to add pool to - :type group: str - :param app_name: (Optional) Tag pool with application name. Note that - there is certain protocols emerging upstream with - regard to meaningful application names to use. - Examples are ``rbd`` and ``rgw``. - :type app_name: str - :param max_bytes: Maximum bytes quota to apply - :type max_bytes: int - :param max_objects: Maximum objects quota to apply - :type max_objects: int + :param allow_ec_overwrites: allow EC pools to be overriden + :type allow_ec_overwrites: bool + :raises: AssertionError if provided data is of invalid type/range """ - self.add_op({'op': 'create-pool', 'name': name, - 'pool-type': 'erasure', - 'erasure-profile': erasure_profile, - 'weight': weight, - 'group': group, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + op = { + 'op': 'create-pool', + 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'allow-ec-overwrites': allow_ec_overwrites, + } + op.update(self._partial_build_common_op_create(**kwargs)) + + # Initialize Pool-object to validate type and range of ops. + pool = ErasurePool('dummy-service', op) + pool.validate() + + self.add_op(op) + + def add_op_create_erasure_profile(self, name, + erasure_type='jerasure', + erasure_technique=None, + k=None, m=None, + failure_domain=None, + lrc_locality=None, + shec_durability_estimator=None, + clay_helper_chunks=None, + device_class=None, + clay_scalar_mds=None, + lrc_crush_locality=None): + """Adds an operation to create a erasure coding profile. + + :param name: Name of profile to create + :type name: str + :param erasure_type: Which of the erasure coding plugins should be used + :type erasure_type: string + :param erasure_technique: EC plugin technique to use + :type erasure_technique: string + :param k: Number of data chunks + :type k: int + :param m: Number of coding chunks + :type m: int + :param lrc_locality: Group the coding and data chunks into sets of size locality + (lrc plugin) + :type lrc_locality: int + :param durability_estimator: The number of parity chuncks each of which includes + a data chunk in its calculation range (shec plugin) + :type durability_estimator: int + :param helper_chunks: The number of helper chunks to use for recovery operations + (clay plugin) + :type: helper_chunks: int + :param failure_domain: Type of failure domain from Ceph bucket types + to be used + :type failure_domain: string + :param device_class: Device class to use for profile (ssd, hdd) + :type device_class: string + :param clay_scalar_mds: Plugin to use for CLAY layered construction + (jerasure|isa|shec) + :type clay_scaler_mds: string + :param lrc_crush_locality: Type of crush bucket in which set of chunks + defined by lrc_locality will be stored. + :type lrc_crush_locality: string + """ + self.add_op({'op': 'create-erasure-profile', + 'name': name, + 'k': k, + 'm': m, + 'l': lrc_locality, + 'c': shec_durability_estimator, + 'd': clay_helper_chunks, + 'erasure-type': erasure_type, + 'erasure-technique': erasure_technique, + 'failure-domain': failure_domain, + 'device-class': device_class, + 'scalar-mds': clay_scalar_mds, + 'crush-locality': lrc_crush_locality}) def set_ops(self, ops): """Set request ops to provided value. @@ -1522,18 +2070,15 @@ def exit_msg(self): def get_previous_request(rid): """Return the last ceph broker request sent on a given relation - @param rid: Relation id to query for request + :param rid: Relation id to query for request + :type rid: str + :returns: CephBrokerRq object or None if relation data not found. + :rtype: Optional[CephBrokerRq] """ - request = None broker_req = relation_get(attribute='broker_req', rid=rid, unit=local_unit()) if broker_req: - request_data = json.loads(broker_req) - request = CephBrokerRq(api_version=request_data['api-version'], - request_id=request_data['request-id']) - request.set_ops(request_data['ops']) - - return request + return CephBrokerRq(raw_request_data=broker_req) def get_request_states(request, relation='ceph'): diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index b33ac906..a785efdf 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -193,7 +193,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", stopped = service_stop(service_name, **kwargs) upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): + if init_is_systemd(service_name=service_name): service('disable', service_name) service('mask', service_name) elif os.path.exists(upstart_file): @@ -227,7 +227,7 @@ def service_resume(service_name, init_dir="/etc/init", """ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): + if init_is_systemd(service_name=service_name): service('unmask', service_name) service('enable', service_name) elif os.path.exists(upstart_file): @@ -257,7 +257,7 @@ def service(action, service_name, **kwargs): :param **kwargs: additional params to be passed to the service command in the form of key=value. """ - if init_is_systemd(): + if init_is_systemd(service_name=service_name): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] @@ -281,7 +281,7 @@ def service_running(service_name, **kwargs): units (e.g. service ceph-osd status id=2). The kwargs are ignored in systemd services. """ - if init_is_systemd(): + if init_is_systemd(service_name=service_name): return service('is-active', service_name) else: if os.path.exists(_UPSTART_CONF.format(service_name)): @@ -311,8 +311,14 @@ def service_running(service_name, **kwargs): SYSTEMD_SYSTEM = '/run/systemd/system' -def init_is_systemd(): - """Return True if the host system uses systemd, False otherwise.""" +def init_is_systemd(service_name=None): + """ + Returns whether the host uses systemd for the specified service. + + @param Optional[str] service_name: specific name of service + """ + if str(service_name).startswith("snap."): + return True if lsb_release()['DISTRIB_CODENAME'] == 'trusty': return False return os.path.isdir(SYSTEMD_SYSTEM) diff --git a/ceph-radosgw/lib/charms_ceph/broker.py b/ceph-radosgw/lib/charms_ceph/broker.py index 15552cd8..d5c83891 100644 --- a/ceph-radosgw/lib/charms_ceph/broker.py +++ b/ceph-radosgw/lib/charms_ceph/broker.py @@ -155,25 +155,47 @@ def handle_create_erasure_profile(request, service): :param service: The ceph client to run the command under. :returns: dict. exit-code and reason if not 0 """ - # "local" | "shec" or it defaults to "jerasure" + # "isa" | "lrc" | "shec" | "clay" or it defaults to "jerasure" erasure_type = request.get('erasure-type') - # "host" | "rack" or it defaults to "host" # Any valid Ceph bucket + # dependent on erasure coding type + erasure_technique = request.get('erasure-technique') + # "host" | "rack" | ... failure_domain = request.get('failure-domain') name = request.get('name') # Binary Distribution Matrix (BDM) parameters bdm_k = request.get('k') bdm_m = request.get('m') + # LRC parameters bdm_l = request.get('l') - - if failure_domain not in CEPH_BUCKET_TYPES: + crush_locality = request.get('crush-locality') + # SHEC parameters + bdm_c = request.get('c') + # CLAY parameters + bdm_d = request.get('d') + scalar_mds = request.get('scalar-mds') + # Device Class + device_class = request.get('device-class') + + if failure_domain and failure_domain not in CEPH_BUCKET_TYPES: msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - create_erasure_profile(service=service, erasure_plugin_name=erasure_type, - profile_name=name, failure_domain=failure_domain, - data_chunks=bdm_k, coding_chunks=bdm_m, - locality=bdm_l) + create_erasure_profile(service=service, + erasure_plugin_name=erasure_type, + profile_name=name, + failure_domain=failure_domain, + data_chunks=bdm_k, + coding_chunks=bdm_m, + locality=bdm_l, + durability_estimator=bdm_d, + helper_chunks=bdm_c, + scalar_mds=scalar_mds, + crush_locality=crush_locality, + device_class=device_class, + erasure_plugin_technique=erasure_technique) + + return {'exit-code': 0} def handle_add_permissions_to_key(request, service): @@ -387,6 +409,7 @@ def handle_erasure_pool(request, service): max_objects = request.get('max-objects') weight = request.get('weight') group_name = request.get('group') + allow_ec_overwrites = request.get('allow-ec-overwrites') if erasure_profile is None: erasure_profile = "default-canonical" @@ -416,7 +439,9 @@ def handle_erasure_pool(request, service): pool = ErasurePool(service=service, name=pool_name, erasure_code_profile=erasure_profile, - percent_data=weight, app_name=app_name) + percent_data=weight, + app_name=app_name, + allow_ec_overwrites=allow_ec_overwrites) # Ok make the erasure pool if not pool_exists(service=service, name=pool_name): log("Creating pool '{}' (erasure_profile={})" diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index 1a51e7c5..72e6b921 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -2169,15 +2169,18 @@ def roll_monitor_cluster(new_version, upgrade_key): status_set('blocked', 'failed to upgrade monitor') -# TODO(jamespage): -# Mimic support will need to ensure that ceph-mgr daemons are also -# restarted during upgrades - probably through use of one of the -# high level systemd targets shipped by the packaging. -def upgrade_monitor(new_version): +# For E731 we can't assign a lambda, therefore, instead pass this. +def noop(): + pass + + +def upgrade_monitor(new_version, kick_function=None): """Upgrade the current ceph monitor to the new version :param new_version: String version to upgrade to. """ + if kick_function is None: + kick_function = noop current_version = get_version() status_set("maintenance", "Upgrading monitor") log("Current ceph version is {}".format(current_version)) @@ -2186,6 +2189,7 @@ def upgrade_monitor(new_version): # Needed to determine if whether to stop/start ceph-mgr luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0 + kick_function() try: add_source(config('source'), config('key')) apt_update(fatal=True) @@ -2194,6 +2198,7 @@ def upgrade_monitor(new_version): err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) + kick_function() try: if systemd(): service_stop('ceph-mon') @@ -2204,6 +2209,7 @@ def upgrade_monitor(new_version): else: service_stop('ceph-mon-all') apt_install(packages=determine_packages(), fatal=True) + kick_function() owner = ceph_user() @@ -2217,6 +2223,8 @@ def upgrade_monitor(new_version): group=owner, follow_links=True) + kick_function() + # Ensure that mon directory is user writable hostname = socket.gethostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) @@ -2257,13 +2265,22 @@ def lock_and_roll(upgrade_key, service, my_name, version): start_timestamp)) monitor_key_set(upgrade_key, "{}_{}_{}_start".format( service, my_name, version), start_timestamp) + + # alive indication: + alive_function = ( + lambda: monitor_key_set( + upgrade_key, "{}_{}_{}_alive" + .format(service, my_name, version), time.time())) + dog = WatchDog(kick_interval=3 * 60, + kick_function=alive_function) + log("Rolling") # This should be quick if service == 'osd': - upgrade_osd(version) + upgrade_osd(version, kick_function=dog.kick_the_dog) elif service == 'mon': - upgrade_monitor(version) + upgrade_monitor(version, kick_function=dog.kick_the_dog) else: log("Unknown service {}. Unable to upgrade".format(service), level=ERROR) @@ -2294,45 +2311,225 @@ def wait_on_previous_node(upgrade_key, service, previous_node, version): """ log("Previous node is: {}".format(previous_node)) - previous_node_finished = monitor_key_exists( - upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version)) - - while previous_node_finished is False: - log("{} is not finished. Waiting".format(previous_node)) - # Has this node been trying to upgrade for longer than - # 10 minutes? - # If so then move on and consider that node dead. - - # NOTE: This assumes the clusters clocks are somewhat accurate - # If the hosts clock is really far off it may cause it to skip - # the previous node even though it shouldn't. - current_timestamp = time.time() - previous_node_start_time = monitor_key_get( + previous_node_started_f = ( + lambda: monitor_key_exists( upgrade_key, - "{}_{}_{}_start".format(service, previous_node, version)) - if (previous_node_start_time is not None and - ((current_timestamp - (10 * 60)) > - float(previous_node_start_time))): - # NOTE(jamespage): - # Previous node is probably dead as we've been waiting - # for 10 minutes - lets move on and upgrade - log("Waited 10 mins on node {}. current time: {} > " - "previous node start time: {} Moving on".format( - previous_node, - (current_timestamp - (10 * 60)), - previous_node_start_time)) - return - # NOTE(jamespage) - # Previous node has not started, or started less than - # 10 minutes ago - sleep a random amount of time and - # then check again. - wait_time = random.randrange(5, 30) - log('waiting for {} seconds'.format(wait_time)) - time.sleep(wait_time) - previous_node_finished = monitor_key_exists( + "{}_{}_{}_start".format(service, previous_node, version))) + previous_node_finished_f = ( + lambda: monitor_key_exists( upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version)) + "{}_{}_{}_done".format(service, previous_node, version))) + previous_node_alive_time_f = ( + lambda: monitor_key_get( + upgrade_key, + "{}_{}_{}_alive".format(service, previous_node, version))) + + # wait for 30 minutes until the previous node starts. We don't proceed + # unless we get a start condition. + try: + WatchDog.wait_until(previous_node_started_f, timeout=30 * 60) + except WatchDog.WatchDogTimeoutException: + log("Waited for previous node to start for 30 minutes. " + "It didn't start, so may have a serious issue. Continuing with " + "upgrade of this node.", + level=WARNING) + return + + # keep the time it started from this nodes' perspective. + previous_node_started_at = time.time() + log("Detected that previous node {} has started. Time now: {}" + .format(previous_node, previous_node_started_at)) + + # Now wait for the node to complete. The node may optionally be kicking + # with the *_alive key, which allows this node to wait longer as it 'knows' + # the other node is proceeding. + try: + WatchDog.timed_wait(kicked_at_function=previous_node_alive_time_f, + complete_function=previous_node_finished_f, + wait_time=30 * 60, + compatibility_wait_time=10 * 60, + max_kick_interval=5 * 60) + except WatchDog.WatchDogDeadException: + # previous node was kicking, but timed out; log this condition and move + # on. + now = time.time() + waited = int((now - previous_node_started_at) / 60) + log("Previous node started, but has now not ticked for 5 minutes. " + "Waited total of {} mins on node {}. current time: {} > " + "previous node start time: {}. " + "Continuing with upgrade of this node." + .format(waited, previous_node, now, previous_node_started_at), + level=WARNING) + except WatchDog.WatchDogTimeoutException: + # previous node never kicked, or simply took too long; log this + # condition and move on. + now = time.time() + waited = int((now - previous_node_started_at) / 60) + log("Previous node is taking too long; assuming it has died." + "Waited {} mins on node {}. current time: {} > " + "previous node start time: {}. " + "Continuing with upgrade of this node." + .format(waited, previous_node, now, previous_node_started_at), + level=WARNING) + + +class WatchDog(object): + """Watch a dog; basically a kickable timer with a timeout between two async + units. + + The idea is that you have an overall timeout and then can kick that timeout + with intermediary hits, with a max time between those kicks allowed. + + Note that this watchdog doesn't rely on the clock of the other side; just + roughly when it detects when the other side started. All timings are based + on the local clock. + + The kicker will not 'kick' more often than a set interval, regardless of + how often the kick_the_dog() function is called. The kicker provides a + function (lambda: -> None) that is called when the kick interval is + reached. + + The waiter calls the static method with a check function + (lambda: -> Boolean) that indicates when the wait should be over and the + maximum interval to wait. e.g. 30 minutes with a 5 minute kick interval. + + So the waiter calls wait(f, 30, 3) and the kicker sets up a 3 minute kick + interval, or however long it is expected for the key to propagate and to + allow for other delays. + + There is a compatibility mode where if the otherside never kicks, then it + simply waits for the compatability timer. + """ + + class WatchDogDeadException(Exception): + pass + + class WatchDogTimeoutException(Exception): + pass + + def __init__(self, kick_interval=3 * 60, kick_function=None): + """Initialise a new WatchDog + + :param kick_interval: the interval when this side kicks the other in + seconds. + :type kick_interval: Int + :param kick_function: The function to call that does the kick. + :type kick_function: Callable[] + """ + self.start_time = time.time() + self.last_run_func = None + self.last_kick_at = None + self.kick_interval = kick_interval + self.kick_f = kick_function + + def kick_the_dog(self): + """Might call the kick_function if it's time. + + This function can be called as frequently as needed, but will run the + self.kick_function after kick_interval seconds have passed. + """ + now = time.time() + if (self.last_run_func is None or + (now - self.last_run_func > self.kick_interval)): + if self.kick_f is not None: + self.kick_f() + self.last_run_func = now + self.last_kick_at = now + + @staticmethod + def wait_until(wait_f, timeout=10 * 60): + """Wait for timeout seconds until the passed function return True. + + :param wait_f: The function to call that will end the wait. + :type wait_f: Callable[[], Boolean] + :param timeout: The time to wait in seconds. + :type timeout: int + """ + start_time = time.time() + while(not wait_f()): + now = time.time() + if now > start_time + timeout: + raise WatchDog.WatchDogTimeoutException() + wait_time = random.randrange(5, 30) + log('wait_until: waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + + @staticmethod + def timed_wait(kicked_at_function, + complete_function, + wait_time=30 * 60, + compatibility_wait_time=10 * 60, + max_kick_interval=5 * 60): + """Wait a maximum time with an intermediate 'kick' time. + + This function will wait for max_kick_interval seconds unless the + kicked_at_function() call returns a time that is not older that + max_kick_interval (in seconds). i.e. the other side can signal that it + is still doing things during the max_kick_interval as long as it kicks + at least every max_kick_interval seconds. + + The maximum wait is "wait_time", but the otherside must keep kicking + during this period. + + The "compatibility_wait_time" is used if the other side never kicks + (i.e. the kicked_at_function() always returns None. In this case the + function wait up to "compatibility_wait_time". + + Note that the type of the return from the kicked_at_function is an + Optional[str], not a Float. The function will coerce this to a float + for the comparison. This represents the return value of + time.time() at the "other side". It's a string to simplify the + function obtaining the time value from the other side. + + The function raises WatchDogTimeoutException if either the + compatibility_wait_time or the wait_time are exceeded. + + The function raises WatchDogDeadException if the max_kick_interval is + exceeded. + + Note that it is possible that the first kick interval is extended to + compatibility_wait_time if the "other side" doesn't kick immediately. + The best solution is for the other side to kick early and often. + + :param kicked_at_function: The function to call to retrieve the time + that the other side 'kicked' at. None if the other side hasn't + kicked. + :type kicked_at_function: Callable[[], Optional[str]] + :param complete_function: The callable that returns True when done. + :type complete_function: Callable[[], Boolean] + :param wait_time: the maximum time to wait, even with kicks, in + seconds. + :type wait_time: int + :param compatibility_wait_time: The time to wait if no kicks are + received, in seconds. + :type compatibility_wait_time: int + :param max_kick_interval: The maximum time allowed between kicks before + the wait is over, in seconds: + :type max_kick_interval: int + :raises: WatchDog.WatchDogTimeoutException, + WatchDog.WatchDogDeadException + """ + start_time = time.time() + while True: + if complete_function(): + break + # the time when the waiting for unit last kicked. + kicked_at = kicked_at_function() + now = time.time() + if kicked_at is None: + # assume other end doesn't do alive kicks + if (now - start_time > compatibility_wait_time): + raise WatchDog.WatchDogTimeoutException() + else: + # other side is participating in kicks; must kick at least + # every 'max_kick_interval' to stay alive. + if (now - float(kicked_at) > max_kick_interval): + raise WatchDog.WatchDogDeadException() + if (now - start_time > wait_time): + raise WatchDog.WatchDogTimeoutException() + delay_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(delay_time)) + time.sleep(delay_time) def get_upgrade_position(osd_sorted_list, match_name): @@ -2412,11 +2609,14 @@ def roll_osd_cluster(new_version, upgrade_key): status_set('blocked', 'failed to upgrade osd') -def upgrade_osd(new_version): +def upgrade_osd(new_version, kick_function=None): """Upgrades the current osd :param new_version: str. The new version to upgrade to """ + if kick_function is None: + kick_function = noop + current_version = get_version() status_set("maintenance", "Upgrading osd") log("Current ceph version is {}".format(current_version)) @@ -2431,10 +2631,13 @@ def upgrade_osd(new_version): status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) + kick_function() + try: # Upgrade the packages before restarting the daemons. status_set('maintenance', 'Upgrading packages to %s' % new_version) apt_install(packages=determine_packages(), fatal=True) + kick_function() # If the upgrade does not need an ownership update of any of the # directories in the osd service directory, then simply restart @@ -2458,13 +2661,16 @@ def upgrade_osd(new_version): os.listdir(CEPH_BASE_DIR)) non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x), non_osd_dirs) - for path in non_osd_dirs: + for i, path in enumerate(non_osd_dirs): + if i % 100 == 0: + kick_function() update_owner(path) # Fast service restart wasn't an option because each of the OSD # directories need the ownership updated for all the files on # the OSD. Walk through the OSDs one-by-one upgrading the OSD. for osd_dir in _get_child_dirs(OSD_BASE_DIR): + kick_function() try: osd_num = _get_osd_num_from_dirname(osd_dir) _upgrade_single_osd(osd_num, osd_dir) diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index 6ef85dec..71318a8b 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -24,6 +24,7 @@ 'os', 'subprocess', 'mkdir', + 'service_name', ] @@ -31,6 +32,7 @@ class CephRadosGWCephTests(CharmTestCase): def setUp(self): super(CephRadosGWCephTests, self).setUp(ceph, TO_PATCH) self.config.side_effect = self.test_config.get + self.service_name.return_value = 'ceph-radosgw' def test_import_radosgw_key(self): self.os.path.exists.return_value = False @@ -138,6 +140,82 @@ def test_create_rgw_pools_rq_no_prefix_post_jewel(self, mock_broker, name='objects', permission='rwx') + @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' + '.add_op_create_erasure_profile') + @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' + '.add_op_create_erasure_pool') + @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' + '.add_op_request_access_to_group') + @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' + '.add_op_create_pool') + def test_create_rgw_pools_rq_no_prefix_ec(self, mock_broker, + mock_request_access, + mock_request_create_ec_pool, + mock_request_create_ec_profile): + self.test_config.set('rgw-lightweight-pool-pg-num', -1) + self.test_config.set('ceph-osd-replication-count', 3) + self.test_config.set('rgw-buckets-pool-weight', 19) + self.test_config.set('restrict-ceph-pools', True) + self.test_config.set('pool-type', 'erasure-coded') + self.test_config.set('ec-profile-k', 3) + self.test_config.set('ec-profile-m', 9) + self.test_config.set('ec-profile-technique', 'cauchy_good') + ceph.get_create_rgw_pools_rq(prefix=None) + mock_request_create_ec_profile.assert_called_once_with( + name='ceph-radosgw-profile', + k=3, m=9, + lrc_locality=None, + lrc_crush_locality=None, + shec_durability_estimator=None, + clay_helper_chunks=None, + clay_scalar_mds=None, + device_class=None, + erasure_type='jerasure', + erasure_technique='cauchy_good' + ) + mock_request_create_ec_pool.assert_has_calls([ + call(name='default.rgw.buckets.data', + erasure_profile='ceph-radosgw-profile', + weight=19, + group="objects", + app_name='rgw') + ]) + mock_broker.assert_has_calls([ + call(weight=0.10, replica_count=3, name='default.rgw.control', + group='objects', app_name='rgw'), + call(weight=0.10, replica_count=3, name='default.rgw.data.root', + group='objects', app_name='rgw'), + call(weight=0.10, replica_count=3, name='default.rgw.gc', + group='objects', app_name='rgw'), + call(weight=0.10, replica_count=3, name='default.rgw.log', + group='objects', app_name='rgw'), + call(weight=0.10, replica_count=3, name='default.rgw.intent-log', + group='objects', app_name='rgw'), + call(weight=0.10, replica_count=3, name='default.rgw.meta', + group='objects', app_name='rgw'), + call(weight=0.10, replica_count=3, name='default.rgw.usage', + group='objects', app_name='rgw'), + call(weight=0.10, replica_count=3, name='default.rgw.users.keys', + group='objects', app_name='rgw'), + call(weight=0.10, replica_count=3, name='default.rgw.users.email', + group='objects', app_name='rgw'), + call(weight=0.10, replica_count=3, name='default.rgw.users.swift', + group='objects', app_name='rgw'), + call(weight=0.10, replica_count=3, name='default.rgw.users.uid', + group='objects', app_name='rgw'), + call(weight=1.00, replica_count=3, + name='default.rgw.buckets.extra', + group='objects', app_name='rgw'), + call(weight=3.00, replica_count=3, + name='default.rgw.buckets.index', + group='objects', app_name='rgw'), + call(weight=0.10, replica_count=3, name='.rgw.root', + group='objects', app_name='rgw')], + ) + mock_request_access.assert_called_with(key_name='radosgw.gateway', + name='objects', + permission='rwx') + @patch.object(utils.apt_pkg, 'version_compare', lambda *args: -1) @patch.object(utils, 'lsb_release', lambda: {'DISTRIB_CODENAME': 'trusty'}) From 361ed04372c4e7a64090a9b9c2975cbb4f1d62d6 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Sat, 16 May 2020 22:40:48 +0200 Subject: [PATCH 2028/2699] Remove inherited configuration options invalid for charm A side effect of adding a config keyword in layer.yaml is that any deletes done by upstream layers are overridden, so we add the complete set of configuration options inherited that should not be included in the built charm. Change-Id: I91e4e52600e7d632bbcf62402e57ebf69b6a3ea5 Closes-Bug: #1879088 --- ceph-rbd-mirror/src/layer.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ceph-rbd-mirror/src/layer.yaml b/ceph-rbd-mirror/src/layer.yaml index cc2e549b..757f2093 100644 --- a/ceph-rbd-mirror/src/layer.yaml +++ b/ceph-rbd-mirror/src/layer.yaml @@ -7,3 +7,11 @@ options: basic: use_venv: True repo: https://github.com/openstack/charm-ceph-rbd-mirror +config: + deletes: + - debug + - ssl_ca + - ssl_cert + - ssl_key + - use-internal-endpoints + - verbose From 4f2169dce32fff8c38644680ffa58298e7e5bc3b Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 27 Jul 2020 14:56:09 +0100 Subject: [PATCH 2029/2699] Updates for improved EC support Sync charmhelpers and charms.openstack to pickup changes for improved Erasure Coded pool support. Update action code for EC profile creation for extended option support and other charmhelpers changes. Depends-On: I2547933964849f7af1c623b2fbc014fb332839ef Change-Id: Iec4de19f7b39f0b08158d96c5cc1561b40aefa10 --- ceph-mon/actions.yaml | 35 +- ceph-mon/actions/create_erasure_profile.py | 33 +- ceph-mon/actions/create_pool.py | 4 +- .../charmhelpers/contrib/openstack/context.py | 84 + .../contrib/storage/linux/ceph.py | 1405 ++++++++++++----- ceph-mon/lib/charms_ceph/broker.py | 43 +- ceph-mon/unit_tests/test_ceph_ops.py | 43 +- 7 files changed, 1181 insertions(+), 466 deletions(-) diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 0081c6f0..d6e5e36a 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -55,6 +55,7 @@ create-pool: enum: - replicated - erasure + - erasure-coded description: "The pool type which may either be replicated to recover from lost OSDs by keeping multiple copies of the objects or erasure to get a kind of generalized RAID5 capability." replicas: type: integer @@ -68,6 +69,9 @@ create-pool: type: integer default: 10 description: "The percentage of data that is expected to be contained in the pool for the specific OSD set. Default value is to assume 10% of the data is for this pool, which is a relatively low % of the data but allows for the pg_num to be increased." + allow-ec-overwrites: + type: boolean + description: "Permit overwrites for erasure coded pool types." required: - name additionalProperties: false @@ -111,7 +115,6 @@ create-erasure-profile: description: "The name of the profile" failure-domain: type: string - default: host enum: - chassis - datacenter @@ -133,6 +136,7 @@ create-erasure-profile: - isa - lrc - shec + - clay description: "The erasure plugin to use for this profile. See http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ for more details" data-chunks: type: integer @@ -144,10 +148,35 @@ create-erasure-profile: description: "The number of coding chunks, i.e. the number of additional chunks computed by the encoding functions. If there are 2 coding chunks, it means 2 OSDs can be out without losing data." locality-chunks: type: integer - description: "Group the coding and data chunks into sets of size locality. For instance, for k=4 and m=2, when locality=3 two groups of three are created. Each set can be recovered without reading chunks from another set." + description: "LRC plugin - Group the coding and data chunks into sets of size locality. For instance, for k=4 and m=2, when locality=3 two groups of three are created. Each set can be recovered without reading chunks from another set." + crush-locality: + type: string + enum: + - chassis + - datacenter + - host + - osd + - pdu + - pod + - rack + - region + - room + - root + - row + description: "LRC plugin - The type of CRUSH bucket in which each set of chunks defined by locality-chunks will be stored." durability-estimator: type: integer - description: "The number of parity chunks each of which includes each data chunk in its calculation range. The number is used as a durability estimator. For instance, if c=2, 2 OSDs can be down without losing data." + description: "SHEC plugin - the number of parity chunks each of which includes each data chunk in its calculation range. The number is used as a durability estimator. For instance, if c=2, 2 OSDs can be down without losing data." + helper-chunks: + type: integer + description: "CLAY plugin - number of OSDs requests to send data during recovery of a single chunk." + scalar-mds: + type: string + enum: + - jerasure + - isa + - shec + description: "CLAY plugin - specifies the plugin that is used as a building block in the layered construction." device-class: type: string enum: diff --git a/ceph-mon/actions/create_erasure_profile.py b/ceph-mon/actions/create_erasure_profile.py index 73ccfe0b..1cee8f3f 100755 --- a/ceph-mon/actions/create_erasure_profile.py +++ b/ceph-mon/actions/create_erasure_profile.py @@ -28,6 +28,8 @@ def make_erasure_profile(): plugin = action_get("plugin") failure_domain = action_get("failure-domain") device_class = action_get("device-class") + k = action_get("data-chunks") + m = action_get("coding-chunks") # jerasure requires k+m # isa requires k+m @@ -35,8 +37,6 @@ def make_erasure_profile(): # shec requires k+m+c if plugin == "jerasure": - k = action_get("data-chunks") - m = action_get("coding-chunks") try: create_erasure_profile(service='admin', erasure_plugin_name=plugin, @@ -50,8 +50,6 @@ def make_erasure_profile(): action_fail("Create erasure profile failed with " "message: {}".format(str(e))) elif plugin == "isa": - k = action_get("data-chunks") - m = action_get("coding-chunks") try: create_erasure_profile(service='admin', erasure_plugin_name=plugin, @@ -64,10 +62,9 @@ def make_erasure_profile(): log(e) action_fail("Create erasure profile failed with " "message: {}".format(str(e))) - elif plugin == "local": - k = action_get("data-chunks") - m = action_get("coding-chunks") + elif plugin == "lrc": l = action_get("locality-chunks") + crush_locality = action_get('crush-locality') try: create_erasure_profile(service='admin', erasure_plugin_name=plugin, @@ -75,6 +72,7 @@ def make_erasure_profile(): data_chunks=k, coding_chunks=m, locality=l, + crush_locality=crush_locality, failure_domain=failure_domain, device_class=device_class) except CalledProcessError as e: @@ -82,8 +80,6 @@ def make_erasure_profile(): action_fail("Create erasure profile failed with " "message: {}".format(str(e))) elif plugin == "shec": - k = action_get("data-chunks") - m = action_get("coding-chunks") c = action_get("durability-estimator") try: create_erasure_profile(service='admin', @@ -98,10 +94,27 @@ def make_erasure_profile(): log(e) action_fail("Create erasure profile failed with " "message: {}".format(str(e))) + elif plugin == "clay": + d = action_get("helper-chunks") + scalar_mds = action_get('scalar-mds') + try: + create_erasure_profile(service='admin', + erasure_plugin_name=plugin, + profile_name=name, + data_chunks=k, + coding_chunks=m, + helper_chunks=d, + scalar_mds=scalar_mds, + failure_domain=failure_domain, + device_class=device_class) + except CalledProcessError as e: + log(e) + action_fail("Create erasure profile failed with " + "message: {}".format(str(e))) else: # Unknown erasure plugin action_fail("Unknown erasure-plugin type of {}. " - "Only jerasure, isa, local or shec is " + "Only jerasure, isa, lrc, shec or clay is " "allowed".format(plugin)) diff --git a/ceph-mon/actions/create_pool.py b/ceph-mon/actions/create_pool.py index ae6b00c2..40686c2f 100755 --- a/ceph-mon/actions/create_pool.py +++ b/ceph-mon/actions/create_pool.py @@ -38,13 +38,15 @@ def create_pool(): ) replicated_pool.create() - elif pool_type == "erasure": + elif pool_type in ("erasure", "erasure-coded"): crush_profile_name = action_get("erasure-profile-name") + allow_ec_overwrites = action_get("allow-ec-overwrites") erasure_pool = ErasurePool(name=pool_name, erasure_code_profile=crush_profile_name, service='admin', app_name=app_name, percent_data=float(percent_data), + allow_ec_overwrites=allow_ec_overwrites, ) erasure_pool.create() else: diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 42abccf7..0e41a9f3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -29,6 +29,8 @@ import six +import charmhelpers.contrib.storage.linux.ceph as ch_ceph + from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( _config_ini as config_ini ) @@ -56,6 +58,7 @@ status_set, network_get_primary_address, WARNING, + service_name, ) from charmhelpers.core.sysctl import create as sysctl_create @@ -808,6 +811,12 @@ def __call__(self): ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) + if config('pool-type') and config('pool-type') == 'erasure-coded': + base_pool_name = config('rbd-pool') or config('rbd-pool-name') + if not base_pool_name: + base_pool_name = service_name() + ctxt['rbd_default_data_pool'] = base_pool_name + if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') @@ -3175,3 +3184,78 @@ def __call__(self): :rtype: Dict[str,int] """ return self._map + + +class CephBlueStoreCompressionContext(OSContextGenerator): + """Ceph BlueStore compression options.""" + + # Tuple with Tuples that map configuration option name to CephBrokerRq op + # property name + options = ( + ('bluestore-compression-algorithm', + 'compression-algorithm'), + ('bluestore-compression-mode', + 'compression-mode'), + ('bluestore-compression-required-ratio', + 'compression-required-ratio'), + ('bluestore-compression-min-blob-size', + 'compression-min-blob-size'), + ('bluestore-compression-min-blob-size-hdd', + 'compression-min-blob-size-hdd'), + ('bluestore-compression-min-blob-size-ssd', + 'compression-min-blob-size-ssd'), + ('bluestore-compression-max-blob-size', + 'compression-max-blob-size'), + ('bluestore-compression-max-blob-size-hdd', + 'compression-max-blob-size-hdd'), + ('bluestore-compression-max-blob-size-ssd', + 'compression-max-blob-size-ssd'), + ) + + def __init__(self): + """Initialize context by loading values from charm config. + + We keep two maps, one suitable for use with CephBrokerRq's and one + suitable for template generation. + """ + charm_config = config() + + # CephBrokerRq op map + self.op = {} + # Context exposed for template generation + self.ctxt = {} + for config_key, op_key in self.options: + value = charm_config.get(config_key) + self.ctxt.update({config_key.replace('-', '_'): value}) + self.op.update({op_key: value}) + + def __call__(self): + """Get context. + + :returns: Context + :rtype: Dict[str,any] + """ + return self.ctxt + + def get_op(self): + """Get values for use in CephBrokerRq op. + + :returns: Context values with CephBrokerRq op property name as key. + :rtype: Dict[str,any] + """ + return self.op + + def validate(self): + """Validate options. + + :raises: AssertionError + """ + # We slip in a dummy name on class instantiation to allow validation of + # the other options. It will not affect further use. + # + # NOTE: once we retire Python 3.5 we can fold this into a in-line + # dictionary comprehension in the call to the initializer. + dummy_op = {'name': 'dummy-name'} + dummy_op.update(self.op) + pool = ch_ceph.BasePool('dummy-service', op=dummy_op) + pool.validate() diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 814d5c72..d9d43578 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -39,6 +39,7 @@ check_output, CalledProcessError, ) +from charmhelpers import deprecate from charmhelpers.core.hookenv import ( config, service_name, @@ -178,94 +179,293 @@ def send_osd_settings(): def validator(value, valid_type, valid_range=None): - """ - Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + """Helper function for type validation. + + Used to validate these: + https://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression + Example input: validator(value=1, valid_type=int, valid_range=[0, 2]) + This says I'm testing value=1. It must be an int inclusive in [0,2] - :param value: The value to validate + :param value: The value to validate. + :type value: any :param valid_type: The type that value should be. + :type valid_type: any :param valid_range: A range of values that value can assume. - :return: + :type valid_range: Optional[Union[List,Tuple]] + :raises: AssertionError, ValueError """ - assert isinstance(value, valid_type), "{} is not a {}".format( - value, - valid_type) + assert isinstance(value, valid_type), ( + "{} is not a {}".format(value, valid_type)) if valid_range is not None: - assert isinstance(valid_range, list), \ - "valid_range must be a list, was given {}".format(valid_range) + assert isinstance( + valid_range, list) or isinstance(valid_range, tuple), ( + "valid_range must be of type List or Tuple, " + "was given {} of type {}" + .format(valid_range, type(valid_range))) # If we're dealing with strings if isinstance(value, six.string_types): - assert value in valid_range, \ - "{} is not in the list {}".format(value, valid_range) + assert value in valid_range, ( + "{} is not in the list {}".format(value, valid_range)) # Integer, float should have a min and max else: if len(valid_range) != 2: raise ValueError( - "Invalid valid_range list of {} for {}. " + "Invalid valid_range list of {} for {}. " "List must be [min,max]".format(valid_range, value)) - assert value >= valid_range[0], \ - "{} is less than minimum allowed value of {}".format( - value, valid_range[0]) - assert value <= valid_range[1], \ - "{} is greater than maximum allowed value of {}".format( - value, valid_range[1]) + assert value >= valid_range[0], ( + "{} is less than minimum allowed value of {}" + .format(value, valid_range[0])) + assert value <= valid_range[1], ( + "{} is greater than maximum allowed value of {}" + .format(value, valid_range[1])) class PoolCreationError(Exception): - """ - A custom error to inform the caller that a pool creation failed. Provides an error message + """A custom exception to inform the caller that a pool creation failed. + + Provides an error message """ def __init__(self, message): super(PoolCreationError, self).__init__(message) -class Pool(object): - """ - An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. - Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). +class BasePool(object): + """An object oriented approach to Ceph pool creation. + + This base class is inherited by ReplicatedPool and ErasurePool. Do not call + create() on this base class as it will raise an exception. + + Instantiate a child class and call create(). """ + # Dictionary that maps pool operation properties to Tuples with valid type + # and valid range + op_validation_map = { + 'compression-algorithm': (str, ('lz4', 'snappy', 'zlib', 'zstd')), + 'compression-mode': (str, ('none', 'passive', 'aggressive', 'force')), + 'compression-required-ratio': (float, None), + 'compression-min-blob-size': (int, None), + 'compression-min-blob-size-hdd': (int, None), + 'compression-min-blob-size-ssd': (int, None), + 'compression-max-blob-size': (int, None), + 'compression-max-blob-size-hdd': (int, None), + 'compression-max-blob-size-ssd': (int, None), + } - def __init__(self, service, name): + def __init__(self, service, name=None, percent_data=None, app_name=None, + op=None): + """Initialize BasePool object. + + Pool information is either initialized from individual keyword + arguments or from a individual CephBrokerRq operation Dict. + + :param service: The Ceph user name to run commands under. + :type service: str + :param name: Name of pool to operate on. + :type name: str + :param percent_data: The expected pool size in relation to all + available resources in the Ceph cluster. Will be + used to set the ``target_size_ratio`` pool + property. (default: 10.0) + :type percent_data: Optional[float] + :param app_name: Ceph application name, usually one of: + ('cephfs', 'rbd', 'rgw') (default: 'unknown') + :type app_name: Optional[str] + :param op: Broker request Op to compile pool data from. + :type op: Optional[Dict[str,any]] + :raises: KeyError + """ + # NOTE: Do not perform initialization steps that require live data from + # a running cluster here. The *Pool classes may be used for validation. self.service = service - self.name = name + self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + self.op = op or {} + + if op: + # When initializing from op the `name` attribute is required and we + # will fail with KeyError if it is not provided. + self.name = op['name'] + self.percent_data = op.get('weight') + self.app_name = op.get('app-name') + else: + self.name = name + self.percent_data = percent_data + self.app_name = app_name + + # Set defaults for these if they are not provided + self.percent_data = self.percent_data or 10.0 + self.app_name = self.app_name or 'unknown' + + def validate(self): + """Check that value of supplied operation parameters are valid. + + :raises: ValueError + """ + for op_key, op_value in self.op.items(): + if op_key in self.op_validation_map and op_value is not None: + valid_type, valid_range = self.op_validation_map[op_key] + try: + validator(op_value, valid_type, valid_range) + except (AssertionError, ValueError) as e: + # Normalize on ValueError, also add information about which + # variable we had an issue with. + raise ValueError("'{}': {}".format(op_key, str(e))) + + def _create(self): + """Perform the pool creation, method MUST be overridden by child class. + """ + raise NotImplementedError + + def _post_create(self): + """Perform common post pool creation tasks. + + Note that pool properties subject to change during the lifetime of a + pool / deployment should go into the ``update`` method. + + Do not add calls for a specific pool type here, those should go into + one of the pool specific classes. + """ + if self.nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool( + client=self.service, + pool=self.name, + settings={ + 'target_size_ratio': str( + self.percent_data / 100.0), + }) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}' + .format(self.name), + level=WARNING) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}' + .format(self.name, e), + level=WARNING) - # Create the pool if it doesn't exist already - # To be implemented by subclasses def create(self): - pass + """Create pool and perform any post pool creation tasks. - def add_cache_tier(self, cache_pool, mode): + To allow for sharing of common code among pool specific classes the + processing has been broken out into the private methods ``_create`` + and ``_post_create``. + + Do not add any pool type specific handling here, that should go into + one of the pool specific classes. """ - Adds a new cache tier to an existing pool. - :param cache_pool: six.string_types. The cache tier pool name to add. - :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] - :return: None + if not pool_exists(self.service, self.name): + self.validate() + self._create() + self._post_create() + self.update() + + def set_quota(self): + """Set a quota if requested. + + :raises: CalledProcessError + """ + max_bytes = self.op.get('max-bytes') + max_objects = self.op.get('max-objects') + if max_bytes or max_objects: + set_pool_quota(service=self.service, pool_name=self.name, + max_bytes=max_bytes, max_objects=max_objects) + + def set_compression(self): + """Set compression properties if requested. + + :raises: CalledProcessError + """ + compression_properties = { + key.replace('-', '_'): value + for key, value in self.op.items() + if key in ( + 'compression-algorithm', + 'compression-mode', + 'compression-required-ratio', + 'compression-min-blob-size', + 'compression-min-blob-size-hdd', + 'compression-min-blob-size-ssd', + 'compression-max-blob-size', + 'compression-max-blob-size-hdd', + 'compression-max-blob-size-ssd') and value} + if compression_properties: + update_pool(self.service, self.name, compression_properties) + + def update(self): + """Update properties for an already existing pool. + + Do not add calls for a specific pool type here, those should go into + one of the pool specific classes. + """ + self.validate() + self.set_quota() + self.set_compression() + + def add_cache_tier(self, cache_pool, mode): + """Adds a new cache tier to an existing pool. + + :param cache_pool: The cache tier pool name to add. + :type cache_pool: str + :param mode: The caching mode to use for this pool. + valid range = ["readonly", "writeback"] + :type mode: str """ # Check the input types and values validator(value=cache_pool, valid_type=six.string_types) - validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) - - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) + validator( + value=mode, valid_type=six.string_types, + valid_range=["readonly", "writeback"]) + + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'add', self.name, cache_pool, + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'cache-mode', cache_pool, mode, + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'set-overlay', self.name, cache_pool, + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom', + ]) def remove_cache_tier(self, cache_pool): - """ - Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. - :param cache_pool: six.string_types. The cache tier pool name to remove. - :return: None + """Removes a cache tier from Ceph. + + Flushes all dirty objects from writeback pools and waits for that to + complete. + + :param cache_pool: The cache tier pool name to remove. + :type cache_pool: str """ # read-only is easy, writeback is much harder mode = get_cache_mode(self.service, cache_pool) if mode == 'readonly': - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'cache-mode', cache_pool, 'none' + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'remove', self.name, cache_pool, + ]) elif mode == 'writeback': pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', @@ -276,9 +476,15 @@ def remove_cache_tier(self, cache_pool): check_call(pool_forward_cmd) # Flush the cache and wait for it to return - check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + check_call([ + 'rados', '--id', self.service, + '-p', cache_pool, 'cache-flush-evict-all']) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'remove-overlay', self.name]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'remove', self.name, cache_pool]) def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, device_class=None): @@ -305,19 +511,23 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, selected for the specific rule, rather it is left to the user to tune in the form of 'expected-osd-count' config option. - :param pool_size: int. pool_size is either the number of replicas for + :param pool_size: pool_size is either the number of replicas for replicated pools or the K+M sum for erasure coded pools - :param percent_data: float. the percentage of data that is expected to + :type pool_size: int + :param percent_data: the percentage of data that is expected to be contained in the pool for the specific OSD set. Default value is to assume 10% of the data is for this pool, which is a relatively low % of the data but allows for the pg_num to be increased. NOTE: the default is primarily to handle the scenario where related charms requiring pools has not been upgraded to include an update to indicate their relative usage of the pools. - :param device_class: str. class of storage to use for basis of pgs + :type percent_data: float + :param device_class: class of storage to use for basis of pgs calculation; ceph supports nvme, ssd and hdd by default based on presence of devices of each type in the deployment. - :return: int. The number of pgs to use. + :type device_class: str + :returns: The number of pgs to use. + :rtype: int """ # Note: This calculation follows the approach that is provided @@ -357,7 +567,8 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, return LEGACY_PG_COUNT percent_data /= 100.0 - target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET + target_pgs_per_osd = config( + 'pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size # NOTE: ensure a sane minimum number of PGS otherwise we don't get any @@ -380,147 +591,174 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, return int(nearest) -class ReplicatedPool(Pool): - def __init__(self, service, name, pg_num=None, replicas=2, - percent_data=10.0, app_name=None): - super(ReplicatedPool, self).__init__(service=service, name=name) - self.replicas = replicas - self.percent_data = percent_data - if pg_num: +class Pool(BasePool): + """Compability shim for any descendents external to this library.""" + + @deprecate( + 'The ``Pool`` baseclass has been replaced by ``BasePool`` class.') + def __init__(self, service, name): + super(Pool, self).__init__(service, name=name) + + def create(self): + pass + + +class ReplicatedPool(BasePool): + def __init__(self, service, name=None, pg_num=None, replicas=None, + percent_data=None, app_name=None, op=None): + """Initialize ReplicatedPool object. + + Pool information is either initialized from individual keyword + arguments or from a individual CephBrokerRq operation Dict. + + Please refer to the docstring of the ``BasePool`` class for + documentation of the common parameters. + + :param pg_num: Express wish for number of Placement Groups (this value + is subject to validation against a running cluster prior + to use to avoid creating a pool with too many PGs) + :type pg_num: int + :param replicas: Number of copies there should be of each object added + to this replicated pool. + :type replicas: int + :raises: KeyError + """ + # NOTE: Do not perform initialization steps that require live data from + # a running cluster here. The *Pool classes may be used for validation. + + # The common parameters are handled in our parents initializer + super(ReplicatedPool, self).__init__( + service=service, name=name, percent_data=percent_data, + app_name=app_name, op=op) + + if op: + # When initializing from op `replicas` is a required attribute, and + # we will fail with KeyError if it is not provided. + self.replicas = op['replicas'] + self.pg_num = op.get('pg_num') + else: + self.replicas = replicas or 2 + self.pg_num = pg_num + + def _create(self): + # Do extra validation on pg_num with data from live cluster + if self.pg_num: # Since the number of placement groups were specified, ensure # that there aren't too many created. max_pgs = self.get_pgs(self.replicas, 100.0) - self.pg_num = min(pg_num, max_pgs) + self.pg_num = min(self.pg_num, max_pgs) else: - self.pg_num = self.get_pgs(self.replicas, percent_data) - if app_name: - self.app_name = app_name + self.pg_num = self.get_pgs(self.replicas, self.percent_data) + + # Create it + if self.nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, self.pg_num) + ), + self.name, str(self.pg_num) + ] else: - self.app_name = 'unknown' + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num) + ] + check_call(cmd) - def create(self): - if not pool_exists(self.service, self.name): - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 - # Create it - if nautilus_or_later: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - '--pg-num-min={}'.format( - min(AUTOSCALER_DEFAULT_PGS, self.pg_num) - ), - self.name, str(self.pg_num) - ] - else: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num) - ] + def _post_create(self): + # Set the pool replica size + update_pool(client=self.service, + pool=self.name, + settings={'size': str(self.replicas)}) + # Perform other common post pool creation tasks + super(ReplicatedPool, self)._post_create() - try: - check_call(cmd) - # Set the pool replica size - update_pool(client=self.service, - pool=self.name, - settings={'size': str(self.replicas)}) - if nautilus_or_later: - # Ensure we set the expected pool ratio - update_pool(client=self.service, - pool=self.name, - settings={'target_size_ratio': str(self.percent_data / 100.0)}) - try: - set_app_name_for_pool(client=self.service, - pool=self.name, - name=self.app_name) - except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name), level=WARNING) - if 'pg_autoscaler' in enabled_manager_modules(): - try: - enable_pg_autoscale(self.service, self.name) - except CalledProcessError as e: - log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e), level=WARNING) - except CalledProcessError: - raise - - -# Default jerasure erasure coded pool -class ErasurePool(Pool): - def __init__(self, service, name, erasure_code_profile="default", - percent_data=10.0, app_name=None): - super(ErasurePool, self).__init__(service=service, name=name) - self.erasure_code_profile = erasure_code_profile - self.percent_data = percent_data - if app_name: - self.app_name = app_name - else: - self.app_name = 'unknown' - def create(self): - if not pool_exists(self.service, self.name): - # Try to find the erasure profile information in order to properly - # size the number of placement groups. The size of an erasure - # coded placement group is calculated as k+m. - erasure_profile = get_erasure_profile(self.service, - self.erasure_code_profile) - - # Check for errors - if erasure_profile is None: - msg = ("Failed to discover erasure profile named " - "{}".format(self.erasure_code_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - if 'k' not in erasure_profile or 'm' not in erasure_profile: - # Error - msg = ("Unable to find k (data chunks) or m (coding chunks) " - "in erasure profile {}".format(erasure_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - - k = int(erasure_profile['k']) - m = int(erasure_profile['m']) - pgs = self.get_pgs(k + m, self.percent_data) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 - # Create it - if nautilus_or_later: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - '--pg-num-min={}'.format( - min(AUTOSCALER_DEFAULT_PGS, pgs) - ), - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile - ] - else: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile - ] +class ErasurePool(BasePool): + """Default jerasure erasure coded pool.""" - try: - check_call(cmd) - try: - set_app_name_for_pool(client=self.service, - pool=self.name, - name=self.app_name) - except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name), level=WARNING) - if nautilus_or_later: - # Ensure we set the expected pool ratio - update_pool(client=self.service, - pool=self.name, - settings={'target_size_ratio': str(self.percent_data / 100.0)}) - if 'pg_autoscaler' in enabled_manager_modules(): - try: - enable_pg_autoscale(self.service, self.name) - except CalledProcessError as e: - log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e), level=WARNING) - except CalledProcessError: - raise - - """Get an existing erasure code profile if it already exists. - Returns json formatted output""" + def __init__(self, service, name=None, erasure_code_profile=None, + percent_data=None, app_name=None, op=None, + allow_ec_overwrites=False): + """Initialize ReplicatedPool object. + + Pool information is either initialized from individual keyword + arguments or from a individual CephBrokerRq operation Dict. + + Please refer to the docstring of the ``BasePool`` class for + documentation of the common parameters. + + :param erasure_code_profile: EC Profile to use (default: 'default') + :type erasure_code_profile: Optional[str] + """ + # NOTE: Do not perform initialization steps that require live data from + # a running cluster here. The *Pool classes may be used for validation. + + # The common parameters are handled in our parents initializer + super(ErasurePool, self).__init__( + service=service, name=name, percent_data=percent_data, + app_name=app_name, op=op) + + if op: + # Note that the different default when initializing from op stems + # from different handling of this in the `charms.ceph` library. + self.erasure_code_profile = op.get('erasure-profile', + 'default-canonical') + else: + # We keep the class default when initialized from keyword arguments + # to not break the API for any other consumers. + self.erasure_code_profile = erasure_code_profile or 'default' + + self.allow_ec_overwrites = allow_ec_overwrites + + def _create(self): + # Try to find the erasure profile information in order to properly + # size the number of placement groups. The size of an erasure + # coded placement group is calculated as k+m. + erasure_profile = get_erasure_profile(self.service, + self.erasure_code_profile) + + # Check for errors + if erasure_profile is None: + msg = ("Failed to discover erasure profile named " + "{}".format(self.erasure_code_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + if 'k' not in erasure_profile or 'm' not in erasure_profile: + # Error + msg = ("Unable to find k (data chunks) or m (coding chunks) " + "in erasure profile {}".format(erasure_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + + k = int(erasure_profile['k']) + m = int(erasure_profile['m']) + pgs = self.get_pgs(k + m, self.percent_data) + self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + # Create it + if self.nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, pgs) + ), + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + check_call(cmd) + + def _post_create(self): + super(ErasurePool, self)._post_create() + if self.allow_ec_overwrites: + update_pool(self.service, self.name, + {'allow_ec_overwrites': 'true'}) def enabled_manager_modules(): @@ -541,22 +779,28 @@ def enabled_manager_modules(): def enable_pg_autoscale(service, pool_name): - """ - Enable Ceph's PG autoscaler for the specified pool. + """Enable Ceph's PG autoscaler for the specified pool. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types. The name of the pool to enable sutoscaling on - :raise: CalledProcessError if the command fails + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: The name of the pool to enable sutoscaling on + :type pool_name: str + :raises: CalledProcessError if the command fails """ - check_call(['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) + check_call([ + 'ceph', '--id', service, + 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) def get_mon_map(service): - """ - Returns the current monitor map. - :param service: six.string_types. The Ceph user name to run the command under - :return: json string. :raise: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails + """Return the current monitor map. + + :param service: The Ceph user name to run the command under + :type service: str + :returns: Dictionary with monitor map data + :rtype: Dict[str,any] + :raises: ValueError if the monmap fails to parse, CalledProcessError if our + ceph command fails. """ try: mon_status = check_output(['ceph', '--id', service, @@ -576,17 +820,16 @@ def get_mon_map(service): def hash_monitor_names(service): - """ + """Get a sorted list of monitor hashes in ascending order. + Uses the get_mon_map() function to get information about the monitor - cluster. - Hash the name of each monitor. Return a sorted list of monitor hashes - in an ascending order. - :param service: six.string_types. The Ceph user name to run the command under - :rtype : dict. json dict of monitor name, ip address and rank - example: { - 'name': 'ip-172-31-13-165', - 'rank': 0, - 'addr': '172.31.13.165:6789/0'} + cluster. Hash the name of each monitor. + + :param service: The Ceph user name to run the command under. + :type service: str + :returns: a sorted list of monitor hashes in an ascending order. + :rtype : List[str] + :raises: CalledProcessError, ValueError """ try: hash_list = [] @@ -603,46 +846,56 @@ def hash_monitor_names(service): def monitor_key_delete(service, key): - """ - Delete a key and value pair from the monitor cluster - :param service: six.string_types. The Ceph user name to run the command under + """Delete a key and value pair from the monitor cluster. + Deletes a key value pair on the monitor cluster. - :param key: six.string_types. The key to delete. + + :param service: The Ceph user name to run the command under + :type service: str + :param key: The key to delete. + :type key: str + :raises: CalledProcessError """ try: check_output( ['ceph', '--id', service, 'config-key', 'del', str(key)]) except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format( - e.output)) + log("Monitor config-key put failed with message: {}" + .format(e.output)) raise def monitor_key_set(service, key, value): - """ - Sets a key value pair on the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to set. - :param value: The value to set. This will be converted to a string - before setting + """Set a key value pair on the monitor cluster. + + :param service: The Ceph user name to run the command under. + :type service str + :param key: The key to set. + :type key: str + :param value: The value to set. This will be coerced into a string. + :type value: str + :raises: CalledProcessError """ try: check_output( ['ceph', '--id', service, 'config-key', 'put', str(key), str(value)]) except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format( - e.output)) + log("Monitor config-key put failed with message: {}" + .format(e.output)) raise def monitor_key_get(service, key): - """ - Gets the value of an existing key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to search for. + """Get the value of an existing key in the monitor cluster. + + :param service: The Ceph user name to run the command under + :type service: str + :param key: The key to search for. + :type key: str :return: Returns the value of that key or None if not found. + :rtype: Optional[str] """ try: output = check_output( @@ -650,19 +903,21 @@ def monitor_key_get(service, key): 'config-key', 'get', str(key)]).decode('UTF-8') return output except CalledProcessError as e: - log("Monitor config-key get failed with message: {}".format( - e.output)) + log("Monitor config-key get failed with message: {}" + .format(e.output)) return None def monitor_key_exists(service, key): - """ - Searches for the existence of a key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to search for - :return: Returns True if the key exists, False if not and raises an - exception if an unknown error occurs. :raise: CalledProcessError if - an unknown error occurs + """Search for existence of key in the monitor cluster. + + :param service: The Ceph user name to run the command under. + :type service: str + :param key: The key to search for. + :type key: str + :return: Returns True if the key exists, False if not. + :rtype: bool + :raises: CalledProcessError if an unknown error occurs. """ try: check_call( @@ -675,16 +930,20 @@ def monitor_key_exists(service, key): if e.returncode == errno.ENOENT: return False else: - log("Unknown error from ceph config-get exists: {} {}".format( - e.returncode, e.output)) + log("Unknown error from ceph config-get exists: {} {}" + .format(e.returncode, e.output)) raise def get_erasure_profile(service, name): - """ - :param service: six.string_types. The Ceph user name to run the command under - :param name: - :return: + """Get an existing erasure code profile if it exists. + + :param service: The Ceph user name to run the command under. + :type service: str + :param name: Name of profile. + :type name: str + :returns: Dictionary with profile data. + :rtype: Optional[Dict[str]] """ try: out = check_output(['ceph', '--id', service, @@ -698,54 +957,61 @@ def get_erasure_profile(service, name): def pool_set(service, pool_name, key, value): + """Sets a value for a RADOS pool in ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to set property on. + :type pool_name: str + :param key: Property key. + :type key: str + :param value: Value, will be coerced into str and shifted to lowercase. + :type value: str + :raises: CalledProcessError """ - Sets a value for a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param key: six.string_types - :param value: - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, - str(value).lower()] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'set', pool_name, key, str(value).lower()] + check_call(cmd) def snapshot_pool(service, pool_name, snapshot_name): + """Snapshots a RADOS pool in Ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to snapshot. + :type pool_name: str + :param snapshot_name: Name of snapshot to create. + :type snapshot_name: str + :raises: CalledProcessError """ - Snapshots a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'mksnap', pool_name, snapshot_name] + check_call(cmd) def remove_pool_snapshot(service, pool_name, snapshot_name): + """Remove a snapshot from a RADOS pool in Ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to remove snapshot from. + :type pool_name: str + :param snapshot_name: Name of snapshot to remove. + :type snapshot_name: str + :raises: CalledProcessError """ - Remove a snapshot from a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] + check_call(cmd) def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): - """ + """Set byte quota on a RADOS pool in Ceph. + :param service: The Ceph user name to run the command under :type service: str :param pool_name: Name of pool @@ -756,7 +1022,9 @@ def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): :type max_objects: int :raises: subprocess.CalledProcessError """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name] + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'set-quota', pool_name] if max_bytes: cmd = cmd + ['max_bytes', str(max_bytes)] if max_objects: @@ -765,119 +1033,216 @@ def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): def remove_pool_quota(service, pool_name): + """Remove byte quota on a RADOS pool in Ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to remove quota from. + :type pool_name: str + :raises: CalledProcessError """ - Set a byte quota on a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] + check_call(cmd) def remove_erasure_profile(service, profile_name): + """Remove erasure code profile. + + :param service: The Ceph user name to run the command under + :type service: str + :param profile_name: Name of profile to remove. + :type profile_name: str + :raises: CalledProcessError """ - Create a new erasure code profile if one does not already exist for it. Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command under - :param profile_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', - profile_name] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'erasure-code-profile', 'rm', profile_name] + check_call(cmd) -def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', - failure_domain='host', +def create_erasure_profile(service, profile_name, + erasure_plugin_name='jerasure', + failure_domain=None, data_chunks=2, coding_chunks=1, locality=None, durability_estimator=None, - device_class=None): - """ - Create a new erasure code profile if one does not already exist for it. Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command under - :param profile_name: six.string_types - :param erasure_plugin_name: six.string_types - :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', - 'room', 'root', 'row']) - :param data_chunks: int - :param coding_chunks: int - :param locality: int - :param durability_estimator: int - :param device_class: six.string_types - :return: None. Can raise CalledProcessError - """ - # Ensure this failure_domain is allowed by Ceph - validator(failure_domain, six.string_types, - ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) + helper_chunks=None, + scalar_mds=None, + crush_locality=None, + device_class=None, + erasure_plugin_technique=None): + """Create a new erasure code profile if one does not already exist for it. + + Updates the profile if it exists. Please refer to [0] for more details. - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, - 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks) - ] - if locality is not None and durability_estimator is not None: - raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + 0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + + :param service: The Ceph user name to run the command under. + :type service: str + :param profile_name: Name of profile. + :type profile_name: str + :param erasure_plugin_name: Erasure code plugin. + :type erasure_plugin_name: str + :param failure_domain: Failure domain, one of: + ('chassis', 'datacenter', 'host', 'osd', 'pdu', + 'pod', 'rack', 'region', 'room', 'root', 'row'). + :type failure_domain: str + :param data_chunks: Number of data chunks. + :type data_chunks: int + :param coding_chunks: Number of coding chunks. + :type coding_chunks: int + :param locality: Locality. + :type locality: int + :param durability_estimator: Durability estimator. + :type durability_estimator: int + :param helper_chunks: int + :type helper_chunks: int + :param device_class: Restrict placement to devices of specific class. + :type device_class: str + :param scalar_mds: one of ['isa', 'jerasure', 'shec'] + :type scalar_mds: str + :param crush_locality: LRC locality faulure domain, one of: + ('chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', + 'rack', 'region', 'room', 'root', 'row') or unset. + :type crush_locaity: str + :param erasure_plugin_technique: Coding technique for EC plugin + :type erasure_plugin_technique: str + :return: None. Can raise CalledProcessError, ValueError or AssertionError + """ + plugin_techniques = { + 'jerasure': [ + 'reed_sol_van', + 'reed_sol_r6_op', + 'cauchy_orig', + 'cauchy_good', + 'liberation', + 'blaum_roth', + 'liber8tion' + ], + 'lrc': [], + 'isa': [ + 'reed_sol_van', + 'cauchy', + ], + 'shec': [ + 'single', + 'multiple' + ], + 'clay': [], + } + failure_domains = [ + 'chassis', 'datacenter', + 'host', 'osd', + 'pdu', 'pod', + 'rack', 'region', + 'room', 'root', + 'row', + ] + device_classes = [ + 'ssd', + 'hdd', + 'nvme' + ] + + validator(erasure_plugin_name, six.string_types, + list(plugin_techniques.keys())) + + cmd = [ + 'ceph', '--id', service, + 'osd', 'erasure-code-profile', 'set', profile_name, + 'plugin={}'.format(erasure_plugin_name), + 'k={}'.format(str(data_chunks)), + 'm={}'.format(str(coding_chunks)), + ] + + if erasure_plugin_technique: + validator(erasure_plugin_technique, six.string_types, + plugin_techniques[erasure_plugin_name]) + cmd.append('technique={}'.format(erasure_plugin_technique)) luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 - # failure_domain changed in luminous - if luminous_or_later: - cmd.append('crush-failure-domain=' + failure_domain) - else: - cmd.append('ruleset-failure-domain=' + failure_domain) + + # Set failure domain from options if not provided in args + if not failure_domain and config('customize-failure-domain'): + # Defaults to 'host' so just need to deal with + # setting 'rack' if feature is enabled + failure_domain = 'rack' + + if failure_domain: + validator(failure_domain, six.string_types, failure_domains) + # failure_domain changed in luminous + if luminous_or_later: + cmd.append('crush-failure-domain={}'.format(failure_domain)) + else: + cmd.append('ruleset-failure-domain={}'.format(failure_domain)) # device class new in luminous if luminous_or_later and device_class: + validator(device_class, six.string_types, device_classes) cmd.append('crush-device-class={}'.format(device_class)) else: log('Skipping device class configuration (ceph < 12.0.0)', level=DEBUG) # Add plugin specific information - if locality is not None: - # For local erasure codes - cmd.append('l=' + str(locality)) - if durability_estimator is not None: - # For Shec erasure codes - cmd.append('c=' + str(durability_estimator)) + if erasure_plugin_name == 'lrc': + # LRC mandatory configuration + if locality: + cmd.append('l={}'.format(str(locality))) + else: + raise ValueError("locality must be provided for lrc plugin") + # LRC optional configuration + if crush_locality: + validator(crush_locality, six.string_types, failure_domains) + cmd.append('crush-locality={}'.format(crush_locality)) + + if erasure_plugin_name == 'shec': + # SHEC optional configuration + if durability_estimator: + cmd.append('c={}'.format((durability_estimator))) + + if erasure_plugin_name == 'clay': + # CLAY optional configuration + if helper_chunks: + cmd.append('d={}'.format(str(helper_chunks))) + if scalar_mds: + cmd.append('scalar-mds={}'.format(scalar_mds)) if erasure_profile_exists(service, profile_name): cmd.append('--force') - try: - check_call(cmd) - except CalledProcessError: - raise + check_call(cmd) def rename_pool(service, old_name, new_name): - """ - Rename a Ceph pool from old_name to new_name - :param service: six.string_types. The Ceph user name to run the command under - :param old_name: six.string_types - :param new_name: six.string_types - :return: None + """Rename a Ceph pool from old_name to new_name. + + :param service: The Ceph user name to run the command under. + :type service: str + :param old_name: Name of pool subject to rename. + :type old_name: str + :param new_name: Name to rename pool to. + :type new_name: str """ validator(value=old_name, valid_type=six.string_types) validator(value=new_name, valid_type=six.string_types) - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'rename', old_name, new_name] check_call(cmd) def erasure_profile_exists(service, name): - """ - Check to see if an Erasure code profile already exists. - :param service: six.string_types. The Ceph user name to run the command under - :param name: six.string_types - :return: int or None + """Check to see if an Erasure code profile already exists. + + :param service: The Ceph user name to run the command under + :type service: str + :param name: Name of profile to look for. + :type name: str + :returns: True if it exists, False otherwise. + :rtype: bool """ validator(value=name, valid_type=six.string_types) try: @@ -890,11 +1255,14 @@ def erasure_profile_exists(service, name): def get_cache_mode(service, pool_name): - """ - Find the current caching mode of the pool_name given. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :return: int or None + """Find the current caching mode of the pool_name given. + + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: Name of pool. + :type pool_name: str + :returns: Current cache mode. + :rtype: Optional[int] """ validator(value=service, valid_type=six.string_types) validator(value=pool_name, valid_type=six.string_types) @@ -976,17 +1344,23 @@ def create_rbd_image(service, pool, image, sizemb): def update_pool(client, pool, settings): + """Update pool properties. + + :param client: Client/User-name to authenticate with. + :type client: str + :param pool: Name of pool to operate on + :type pool: str + :param settings: Dictionary with key/value pairs to set. + :type settings: Dict[str, str] + :raises: CalledProcessError + """ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] for k, v in six.iteritems(settings): - cmd.append(k) - cmd.append(v) - - check_call(cmd) + check_call(cmd + [k, v]) def set_app_name_for_pool(client, pool, name): - """ - Calls `osd pool application enable` for the specified pool name + """Calls `osd pool application enable` for the specified pool name :param client: Name of the ceph client to use :type client: str @@ -1043,8 +1417,7 @@ def _keyring_path(service): def add_key(service, key): - """ - Add a key to a keyring. + """Add a key to a keyring. Creates the keyring if it doesn't already exist. @@ -1288,13 +1661,33 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ - def __init__(self, api_version=1, request_id=None): - self.api_version = api_version - if request_id: - self.request_id = request_id + def __init__(self, api_version=1, request_id=None, raw_request_data=None): + """Initialize CephBrokerRq object. + + Builds a new empty request or rebuilds a request from on-wire JSON + data. + + :param api_version: API version for request (default: 1). + :type api_version: Optional[int] + :param request_id: Unique identifier for request. + (default: string representation of generated UUID) + :type request_id: Optional[str] + :param raw_request_data: JSON-encoded string to build request from. + :type raw_request_data: Optional[str] + :raises: KeyError + """ + if raw_request_data: + request_data = json.loads(raw_request_data) + self.api_version = request_data['api-version'] + self.request_id = request_data['request-id'] + self.set_ops(request_data['ops']) else: - self.request_id = str(uuid.uuid1()) - self.ops = [] + self.api_version = api_version + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) + self.ops = [] def add_op(self, op): """Add an op if it is not already in the list. @@ -1336,12 +1729,119 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, group=group, namespace=namespace, app_name=app_name, max_bytes=max_bytes, max_objects=max_objects) + # Use function parameters and docstring to define types in a compatible + # manner. + # + # NOTE: Our caller should always use a kwarg Dict when calling us so + # no need to maintain fixed order/position for parameters. Please keep them + # sorted by name when adding new ones. + def _partial_build_common_op_create(self, + app_name=None, + compression_algorithm=None, + compression_mode=None, + compression_required_ratio=None, + compression_min_blob_size=None, + compression_min_blob_size_hdd=None, + compression_min_blob_size_ssd=None, + compression_max_blob_size=None, + compression_max_blob_size_hdd=None, + compression_max_blob_size_ssd=None, + group=None, + max_bytes=None, + max_objects=None, + namespace=None, + weight=None): + """Build common part of a create pool operation. + + :param app_name: Tag pool with application name. Note that there is + certain protocols emerging upstream with regard to + meaningful application names to use. + Examples are 'rbd' and 'rgw'. + :type app_name: Optional[str] + :param compression_algorithm: Compressor to use, one of: + ('lz4', 'snappy', 'zlib', 'zstd') + :type compression_algorithm: Optional[str] + :param compression_mode: When to compress data, one of: + ('none', 'passive', 'aggressive', 'force') + :type compression_mode: Optional[str] + :param compression_required_ratio: Minimum compression ratio for data + chunk, if the requested ratio is not + achieved the compressed version will + be thrown away and the original + stored. + :type compression_required_ratio: Optional[float] + :param compression_min_blob_size: Chunks smaller than this are never + compressed (unit: bytes). + :type compression_min_blob_size: Optional[int] + :param compression_min_blob_size_hdd: Chunks smaller than this are not + compressed when destined to + rotational media (unit: bytes). + :type compression_min_blob_size_hdd: Optional[int] + :param compression_min_blob_size_ssd: Chunks smaller than this are not + compressed when destined to flash + media (unit: bytes). + :type compression_min_blob_size_ssd: Optional[int] + :param compression_max_blob_size: Chunks larger than this are broken + into N * compression_max_blob_size + chunks before being compressed + (unit: bytes). + :type compression_max_blob_size: Optional[int] + :param compression_max_blob_size_hdd: Chunks larger than this are + broken into + N * compression_max_blob_size_hdd + chunks before being compressed + when destined for rotational + media (unit: bytes) + :type compression_max_blob_size_hdd: Optional[int] + :param compression_max_blob_size_ssd: Chunks larger than this are + broken into + N * compression_max_blob_size_ssd + chunks before being compressed + when destined for flash media + (unit: bytes). + :type compression_max_blob_size_ssd: Optional[int] + :param group: Group to add pool to + :type group: Optional[str] + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: Optional[int] + :param max_objects: Maximum objects quota to apply + :type max_objects: Optional[int] + :param namespace: Group namespace + :type namespace: Optional[str] + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + Used to calculate number of Placement Groups to create + for pool. + :type weight: Optional[float] + :returns: Dictionary with kwarg name as key. + :rtype: Dict[str,any] + :raises: AssertionError + """ + return { + 'app-name': app_name, + 'compression-algorithm': compression_algorithm, + 'compression-mode': compression_mode, + 'compression-required-ratio': compression_required_ratio, + 'compression-min-blob-size': compression_min_blob_size, + 'compression-min-blob-size-hdd': compression_min_blob_size_hdd, + 'compression-min-blob-size-ssd': compression_min_blob_size_ssd, + 'compression-max-blob-size': compression_max_blob_size, + 'compression-max-blob-size-hdd': compression_max_blob_size_hdd, + 'compression-max-blob-size-ssd': compression_max_blob_size_ssd, + 'group': group, + 'max-bytes': max_bytes, + 'max-objects': max_objects, + 'group-namespace': namespace, + 'weight': weight, + } + def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, - weight=None, group=None, namespace=None, - app_name=None, max_bytes=None, - max_objects=None): + **kwargs): """Adds an operation to create a replicated pool. + Refer to docstring for ``_partial_build_common_op_create`` for + documentation of keyword arguments. + :param name: Name of pool to create :type name: str :param replica_count: Number of copies Ceph should keep of your data. @@ -1349,66 +1849,114 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, :param pg_num: Request specific number of Placement Groups to create for pool. :type pg_num: int - :param weight: The percentage of data that is expected to be contained - in the pool from the total available space on the OSDs. - Used to calculate number of Placement Groups to create - for pool. - :type weight: float - :param group: Group to add pool to - :type group: str - :param namespace: Group namespace - :type namespace: str - :param app_name: (Optional) Tag pool with application name. Note that - there is certain protocols emerging upstream with - regard to meaningful application names to use. - Examples are ``rbd`` and ``rgw``. - :type app_name: str - :param max_bytes: Maximum bytes quota to apply - :type max_bytes: int - :param max_objects: Maximum objects quota to apply - :type max_objects: int + :raises: AssertionError if provided data is of invalid type/range """ - if pg_num and weight: + if pg_num and kwargs.get('weight'): raise ValueError('pg_num and weight are mutually exclusive') - self.add_op({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight, 'group': group, - 'group-namespace': namespace, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + op = { + 'op': 'create-pool', + 'name': name, + 'replicas': replica_count, + 'pg_num': pg_num, + } + op.update(self._partial_build_common_op_create(**kwargs)) + + # Initialize Pool-object to validate type and range of ops. + pool = ReplicatedPool('dummy-service', op=op) + pool.validate() + + self.add_op(op) def add_op_create_erasure_pool(self, name, erasure_profile=None, - weight=None, group=None, app_name=None, - max_bytes=None, max_objects=None): + allow_ec_overwrites=False, **kwargs): """Adds an operation to create a erasure coded pool. + Refer to docstring for ``_partial_build_common_op_create`` for + documentation of keyword arguments. + :param name: Name of pool to create :type name: str :param erasure_profile: Name of erasure code profile to use. If not set the ceph-mon unit handling the broker request will set its default value. :type erasure_profile: str - :param weight: The percentage of data that is expected to be contained - in the pool from the total available space on the OSDs. - :type weight: float - :param group: Group to add pool to - :type group: str - :param app_name: (Optional) Tag pool with application name. Note that - there is certain protocols emerging upstream with - regard to meaningful application names to use. - Examples are ``rbd`` and ``rgw``. - :type app_name: str - :param max_bytes: Maximum bytes quota to apply - :type max_bytes: int - :param max_objects: Maximum objects quota to apply - :type max_objects: int + :param allow_ec_overwrites: allow EC pools to be overriden + :type allow_ec_overwrites: bool + :raises: AssertionError if provided data is of invalid type/range """ - self.add_op({'op': 'create-pool', 'name': name, - 'pool-type': 'erasure', - 'erasure-profile': erasure_profile, - 'weight': weight, - 'group': group, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + op = { + 'op': 'create-pool', + 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'allow-ec-overwrites': allow_ec_overwrites, + } + op.update(self._partial_build_common_op_create(**kwargs)) + + # Initialize Pool-object to validate type and range of ops. + pool = ErasurePool('dummy-service', op) + pool.validate() + + self.add_op(op) + + def add_op_create_erasure_profile(self, name, + erasure_type='jerasure', + erasure_technique=None, + k=None, m=None, + failure_domain=None, + lrc_locality=None, + shec_durability_estimator=None, + clay_helper_chunks=None, + device_class=None, + clay_scalar_mds=None, + lrc_crush_locality=None): + """Adds an operation to create a erasure coding profile. + + :param name: Name of profile to create + :type name: str + :param erasure_type: Which of the erasure coding plugins should be used + :type erasure_type: string + :param erasure_technique: EC plugin technique to use + :type erasure_technique: string + :param k: Number of data chunks + :type k: int + :param m: Number of coding chunks + :type m: int + :param lrc_locality: Group the coding and data chunks into sets of size locality + (lrc plugin) + :type lrc_locality: int + :param durability_estimator: The number of parity chuncks each of which includes + a data chunk in its calculation range (shec plugin) + :type durability_estimator: int + :param helper_chunks: The number of helper chunks to use for recovery operations + (clay plugin) + :type: helper_chunks: int + :param failure_domain: Type of failure domain from Ceph bucket types + to be used + :type failure_domain: string + :param device_class: Device class to use for profile (ssd, hdd) + :type device_class: string + :param clay_scalar_mds: Plugin to use for CLAY layered construction + (jerasure|isa|shec) + :type clay_scaler_mds: string + :param lrc_crush_locality: Type of crush bucket in which set of chunks + defined by lrc_locality will be stored. + :type lrc_crush_locality: string + """ + self.add_op({'op': 'create-erasure-profile', + 'name': name, + 'k': k, + 'm': m, + 'l': lrc_locality, + 'c': shec_durability_estimator, + 'd': clay_helper_chunks, + 'erasure-type': erasure_type, + 'erasure-technique': erasure_technique, + 'failure-domain': failure_domain, + 'device-class': device_class, + 'scalar-mds': clay_scalar_mds, + 'crush-locality': lrc_crush_locality}) def set_ops(self, ops): """Set request ops to provided value. @@ -1522,18 +2070,15 @@ def exit_msg(self): def get_previous_request(rid): """Return the last ceph broker request sent on a given relation - @param rid: Relation id to query for request + :param rid: Relation id to query for request + :type rid: str + :returns: CephBrokerRq object or None if relation data not found. + :rtype: Optional[CephBrokerRq] """ - request = None broker_req = relation_get(attribute='broker_req', rid=rid, unit=local_unit()) if broker_req: - request_data = json.loads(broker_req) - request = CephBrokerRq(api_version=request_data['api-version'], - request_id=request_data['request-id']) - request.set_ops(request_data['ops']) - - return request + return CephBrokerRq(raw_request_data=broker_req) def get_request_states(request, relation='ceph'): diff --git a/ceph-mon/lib/charms_ceph/broker.py b/ceph-mon/lib/charms_ceph/broker.py index 15552cd8..d5c83891 100644 --- a/ceph-mon/lib/charms_ceph/broker.py +++ b/ceph-mon/lib/charms_ceph/broker.py @@ -155,25 +155,47 @@ def handle_create_erasure_profile(request, service): :param service: The ceph client to run the command under. :returns: dict. exit-code and reason if not 0 """ - # "local" | "shec" or it defaults to "jerasure" + # "isa" | "lrc" | "shec" | "clay" or it defaults to "jerasure" erasure_type = request.get('erasure-type') - # "host" | "rack" or it defaults to "host" # Any valid Ceph bucket + # dependent on erasure coding type + erasure_technique = request.get('erasure-technique') + # "host" | "rack" | ... failure_domain = request.get('failure-domain') name = request.get('name') # Binary Distribution Matrix (BDM) parameters bdm_k = request.get('k') bdm_m = request.get('m') + # LRC parameters bdm_l = request.get('l') - - if failure_domain not in CEPH_BUCKET_TYPES: + crush_locality = request.get('crush-locality') + # SHEC parameters + bdm_c = request.get('c') + # CLAY parameters + bdm_d = request.get('d') + scalar_mds = request.get('scalar-mds') + # Device Class + device_class = request.get('device-class') + + if failure_domain and failure_domain not in CEPH_BUCKET_TYPES: msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - create_erasure_profile(service=service, erasure_plugin_name=erasure_type, - profile_name=name, failure_domain=failure_domain, - data_chunks=bdm_k, coding_chunks=bdm_m, - locality=bdm_l) + create_erasure_profile(service=service, + erasure_plugin_name=erasure_type, + profile_name=name, + failure_domain=failure_domain, + data_chunks=bdm_k, + coding_chunks=bdm_m, + locality=bdm_l, + durability_estimator=bdm_d, + helper_chunks=bdm_c, + scalar_mds=scalar_mds, + crush_locality=crush_locality, + device_class=device_class, + erasure_plugin_technique=erasure_technique) + + return {'exit-code': 0} def handle_add_permissions_to_key(request, service): @@ -387,6 +409,7 @@ def handle_erasure_pool(request, service): max_objects = request.get('max-objects') weight = request.get('weight') group_name = request.get('group') + allow_ec_overwrites = request.get('allow-ec-overwrites') if erasure_profile is None: erasure_profile = "default-canonical" @@ -416,7 +439,9 @@ def handle_erasure_pool(request, service): pool = ErasurePool(service=service, name=pool_name, erasure_code_profile=erasure_profile, - percent_data=weight, app_name=app_name) + percent_data=weight, + app_name=app_name, + allow_ec_overwrites=allow_ec_overwrites) # Ok make the erasure pool if not pool_exists(service=service, name=pool_name): log("Creating pool '{}' (erasure_profile={})" diff --git a/ceph-mon/unit_tests/test_ceph_ops.py b/ceph-mon/unit_tests/test_ceph_ops.py index f82dbd09..c2e1e917 100644 --- a/ceph-mon/unit_tests/test_ceph_ops.py +++ b/ceph-mon/unit_tests/test_ceph_ops.py @@ -38,13 +38,19 @@ def test_create_erasure_profile(self, mock_create_erasure): 'm': 2, }]}) rc = broker.process_requests(req) - mock_create_erasure.assert_called_with(service='admin', - profile_name='foo', - coding_chunks=2, - data_chunks=3, - locality=None, - failure_domain='rack', - erasure_plugin_name='jerasure') + mock_create_erasure.assert_called_with( + service='admin', + erasure_plugin_name='jerasure', + profile_name='foo', + failure_domain='rack', + data_chunks=3, coding_chunks=2, + locality=None, + durability_estimator=None, + helper_chunks=None, + scalar_mds=None, + crush_locality=None, + device_class=None, + erasure_plugin_technique=None) self.assertEqual(json.loads(rc), {'exit-code': 0}) @patch.object(broker, 'pool_exists') @@ -103,14 +109,17 @@ def test_process_requests_delete_pool(self, mock_delete_pool.assert_called_with(service='admin', name='foo') self.assertEqual(json.loads(rc), {'exit-code': 0}) + @patch('charmhelpers.contrib.storage.linux.ceph.cmp_pkgrevno') @patch.object(broker, 'pool_exists') @patch.object(broker.ErasurePool, 'create') @patch.object(broker, 'erasure_profile_exists') @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_process_requests_create_erasure_pool(self, mock_profile_exists, mock_erasure_pool, - mock_pool_exists): + mock_pool_exists, + mock_cmp_pkgrevno): mock_pool_exists.return_value = False + mock_cmp_pkgrevno.return_value = 1 reqs = json.dumps({'api-version': 1, 'ops': [{ 'op': 'create-pool', @@ -124,12 +133,15 @@ def test_process_requests_create_erasure_pool(self, mock_profile_exists, mock_erasure_pool.assert_called_with() self.assertEqual(json.loads(rc), {'exit-code': 0}) + @patch('charmhelpers.contrib.storage.linux.ceph.cmp_pkgrevno') @patch.object(broker, 'pool_exists') @patch.object(broker.Pool, 'add_cache_tier') @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_process_requests_create_cache_tier(self, mock_pool, - mock_pool_exists): + mock_pool_exists, + mock_cmp_pkgrevno): mock_pool_exists.return_value = True + mock_cmp_pkgrevno.return_value = 1 reqs = json.dumps({'api-version': 1, 'ops': [{ 'op': 'create-cache-tier', @@ -139,28 +151,32 @@ def test_process_requests_create_cache_tier(self, mock_pool, 'erasure-profile': 'default' }]}) rc = broker.process_requests(reqs) + self.assertEqual(json.loads(rc), {'exit-code': 0}) + mock_pool_exists.assert_any_call(service='admin', name='foo') mock_pool_exists.assert_any_call(service='admin', name='foo-ssd') mock_pool.assert_called_with(cache_pool='foo-ssd', mode='writeback') - self.assertEqual(json.loads(rc), {'exit-code': 0}) + @patch('charmhelpers.contrib.storage.linux.ceph.cmp_pkgrevno') @patch.object(broker, 'pool_exists') @patch.object(broker.Pool, 'remove_cache_tier') @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_process_requests_remove_cache_tier(self, mock_pool, - mock_pool_exists): + mock_pool_exists, + mock_cmp_pkgrevno): mock_pool_exists.return_value = True + mock_cmp_pkgrevno.return_value = 1 reqs = json.dumps({'api-version': 1, 'ops': [{ 'op': 'remove-cache-tier', 'hot-pool': 'foo-ssd', }]}) rc = broker.process_requests(reqs) + self.assertEqual(json.loads(rc), {'exit-code': 0}) mock_pool_exists.assert_any_call(service='admin', name='foo-ssd') mock_pool.assert_called_with(cache_pool='foo-ssd') - self.assertEqual(json.loads(rc), {'exit-code': 0}) @patch.object(broker, 'snapshot_pool') @patch.object(broker, 'log', lambda *args, **kwargs: None) @@ -173,10 +189,11 @@ def test_snapshot_pool(self, mock_snapshot_pool): }]}) mock_snapshot_pool.return_value = {'exit-code': 0} rc = broker.process_requests(reqs) + self.assertEqual(json.loads(rc), {'exit-code': 0}) + mock_snapshot_pool.assert_called_with(service='admin', pool_name='foo', snapshot_name='foo-snap1') - self.assertEqual(json.loads(rc), {'exit-code': 0}) @patch.object(broker, 'rename_pool') @patch.object(broker, 'log', lambda *args, **kwargs: None) From ba63f4895791cfce4f1c49b0c8b11dffed045652 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 9 Jul 2020 10:02:16 +0200 Subject: [PATCH 2030/2699] Add focal Victoria and groovy test bundles Change-Id: I743232ff41b651e0aaa9ae2e9681f3c4a9b3ad78 --- .../src/tests/bundles/focal-victoria.yaml | 166 ++++++++++++++++++ .../src/tests/bundles/groovy-victoria.yaml | 166 ++++++++++++++++++ ceph-rbd-mirror/src/tests/tests.yaml | 2 + 3 files changed, 334 insertions(+) create mode 100644 ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml new file mode 100644 index 00000000..e6ff0fe3 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml @@ -0,0 +1,166 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-victoria + +series: &series focal + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror-b: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + +relations: + +- - keystone:shared-db + - keystone-mysql-router:shared-db +- - keystone-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - glance:shared-db + - glance-mysql-router:shared-db +- - glance-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - cinder:shared-db + - cinder-mysql-router:shared-db +- - cinder-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - rabbitmq-server + - cinder + +- - keystone + - cinder +- - keystone + - glance + +- - cinder + - cinder-ceph +- - cinder-ceph + - ceph-mon + +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp + +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon + +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote + +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml b/ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml new file mode 100644 index 00000000..3b72edcd --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml @@ -0,0 +1,166 @@ +variables: + openstack-origin: &openstack-origin distro + +series: &series groovy + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror-b: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + +relations: + +- - keystone:shared-db + - keystone-mysql-router:shared-db +- - keystone-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - glance:shared-db + - glance-mysql-router:shared-db +- - glance-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - cinder:shared-db + - cinder-mysql-router:shared-db +- - cinder-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - rabbitmq-server + - cinder + +- - keystone + - cinder +- - keystone + - glance + +- - cinder + - cinder-ceph +- - cinder-ceph + - ceph-mon + +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp + +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon + +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote + +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index f62a849b..1ccbfde3 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -11,6 +11,8 @@ comment: | The e2e bundles are useful for development but adds no additional value to the functional tests. dev_bundles: +- groovy-victoria +- focal-victoria - bionic-queens-e2e - bionic-queens-e2e-lxd configure: From ceb9d130c40a10525f4cad78b757e2a9092f7dcb Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 18 Aug 2020 08:49:32 +0000 Subject: [PATCH 2031/2699] Update to work with changes to interface. The ceph-mds interface has been updated to work in the same way as the ceph-client interface. This requires some minor updates to the charm so pools are requested when the ceph cluster is first available and the service is configured once the pools have been confirmed to exist. Depends-On: I9f438bb678da1b69d8161390aad2cf58907bc1b5 Change-Id: I83148f73c7f0465ecfadaa9df92e4a53e30813de --- ceph-fs/src/reactive/ceph_fs.py | 13 +++++++++++-- ceph-fs/unit_tests/test_reactive_ceph_fs.py | 9 +++++++-- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index 9c54bdc4..215d908f 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -13,6 +13,7 @@ # limitations under the License. from charms import reactive +from charmhelpers.core import hookenv import charms_openstack.bus import charms_openstack.charm as charm @@ -31,9 +32,9 @@ @reactive.when_none('charm.paused', 'run-default-update-status') -@reactive.when('ceph-mds.available') +@reactive.when('ceph-mds.pools.available') def config_changed(): - ceph_mds = reactive.endpoint_from_flag('ceph-mds.available') + ceph_mds = reactive.endpoint_from_flag('ceph-mds.pools.available') with charm.provide_charm_instance() as cephfs_charm: cephfs_charm.configure_ceph_keyring(ceph_mds.mds_key()) cephfs_charm.render_with_interfaces([ceph_mds]) @@ -45,3 +46,11 @@ def config_changed(): reactive.set_flag('cephfs.configured') reactive.set_flag('config.rendered') cephfs_charm.assess_status() + + +@reactive.when_not('ceph.create_pool.req.sent') +@reactive.when('ceph-mds.connected') +def storage_ceph_connected(ceph): + ceph.announce_mds_name() + ceph.initialize_mds(hookenv.service_name()) + reactive.set_state('ceph.create_pool.req.sent') diff --git a/ceph-fs/unit_tests/test_reactive_ceph_fs.py b/ceph-fs/unit_tests/test_reactive_ceph_fs.py index c210afe0..14999591 100644 --- a/ceph-fs/unit_tests/test_reactive_ceph_fs.py +++ b/ceph-fs/unit_tests/test_reactive_ceph_fs.py @@ -32,7 +32,11 @@ def test_hooks(self): ] hook_set = { 'when': { - 'config_changed': ('ceph-mds.available',), + 'config_changed': ('ceph-mds.pools.available',), + 'storage_ceph_connected': ('ceph-mds.connected',), + }, + 'when_not': { + 'storage_ceph_connected': ('ceph.create_pool.req.sent',), }, 'when_none': { 'config_changed': ('charm.paused', @@ -65,7 +69,8 @@ def test_config_changed(self): self.endpoint_from_flag.return_value = ceph_mds self.is_flag_set.return_value = False handlers.config_changed() - self.endpoint_from_flag.assert_called_once_with('ceph-mds.available') + self.endpoint_from_flag.assert_called_once_with( + 'ceph-mds.pools.available') self.target.configure_ceph_keyring.assert_called_once_with('fakekey') self.target.render_with_interfaces.assert_called_once_with([ceph_mds]) self.is_flag_set.assert_called_once_with('config.changed.source') From a7f7b2bb73f14b1e5915288e5df54dabfedaf281 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 4 Aug 2020 12:39:10 +0000 Subject: [PATCH 2032/2699] Add .gitreview and .zuul.yaml This also includes a workaround to a bug in charmcraft which excludes many of the charm files from the build charm. Until that is resolved include ./add-to-archive.py to put add additional files to build charm. Change-Id: Ia4d794167485caf1f6c277e0a56c81149b65d70a --- ceph-iscsi/.build.manifest | 1 + ceph-iscsi/.gitignore | 1 - ceph-iscsi/.gitreview | 4 ++++ ceph-iscsi/.jujuignore | 8 ++++++++ ceph-iscsi/.zuul.yaml | 4 ++++ ceph-iscsi/build-requirements.txt | 2 +- ceph-iscsi/copyright | 16 ++++++++++++++++ ceph-iscsi/requirements.txt | 8 ++++---- ceph-iscsi/test-requirements.txt | 2 ++ ceph-iscsi/tox.ini | 18 +++++++++--------- ceph-iscsi/unit_tests/__init__.py | 19 +++++++++++++++++++ 11 files changed, 68 insertions(+), 15 deletions(-) create mode 100644 ceph-iscsi/.build.manifest create mode 100644 ceph-iscsi/.gitreview create mode 100644 ceph-iscsi/.jujuignore create mode 100644 ceph-iscsi/.zuul.yaml create mode 100644 ceph-iscsi/copyright diff --git a/ceph-iscsi/.build.manifest b/ceph-iscsi/.build.manifest new file mode 100644 index 00000000..00b562cd --- /dev/null +++ b/ceph-iscsi/.build.manifest @@ -0,0 +1 @@ +See venv directory diff --git a/ceph-iscsi/.gitignore b/ceph-iscsi/.gitignore index 4e3756b3..f3c3e4d8 100644 --- a/ceph-iscsi/.gitignore +++ b/ceph-iscsi/.gitignore @@ -6,4 +6,3 @@ lib/* !lib/README.txt build ceph-iscsi.charm - diff --git a/ceph-iscsi/.gitreview b/ceph-iscsi/.gitreview new file mode 100644 index 00000000..bcc8f8ba --- /dev/null +++ b/ceph-iscsi/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.opendev.org +port=29418 +project=openstack/charm-ceph-iscsi diff --git a/ceph-iscsi/.jujuignore b/ceph-iscsi/.jujuignore new file mode 100644 index 00000000..ec8dde9b --- /dev/null +++ b/ceph-iscsi/.jujuignore @@ -0,0 +1,8 @@ +.stestr.conf +.gitmodules +.gitreview +.gitignore +ceph-iscsi.charm +.zuul.yaml +.stestr +unit_tests diff --git a/ceph-iscsi/.zuul.yaml b/ceph-iscsi/.zuul.yaml new file mode 100644 index 00000000..fd20909e --- /dev/null +++ b/ceph-iscsi/.zuul.yaml @@ -0,0 +1,4 @@ +- project: + templates: + - openstack-python3-charm-jobs + - openstack-cover-jobs diff --git a/ceph-iscsi/build-requirements.txt b/ceph-iscsi/build-requirements.txt index 1c2a91db..96368501 100644 --- a/ceph-iscsi/build-requirements.txt +++ b/ceph-iscsi/build-requirements.txt @@ -1 +1 @@ -charmcraft==0.3.0 +git+https://github.com/canonical/charmcraft.git#egg=charmcraft diff --git a/ceph-iscsi/copyright b/ceph-iscsi/copyright new file mode 100644 index 00000000..d0b7f44f --- /dev/null +++ b/ceph-iscsi/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2015-2020, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ceph-iscsi/requirements.txt b/ceph-iscsi/requirements.txt index 30a07978..ff622ea3 100644 --- a/ceph-iscsi/requirements.txt +++ b/ceph-iscsi/requirements.txt @@ -1,6 +1,6 @@ # requirements git+https://github.com/juju/charm-helpers.git@87fc7ee5#egg=charmhelpers -git+https://github.com/canonical/operator.git@169794cdd#egg=ops -git+https://github.com/openstack-charmers/ops-interface-ceph-client@cc10f29d4#egg=interface_ceph_client -git+https://github.com/openstack-charmers/ops-openstack@ea51b43e#egg=ops_openstack -git+https://github.com/openstack-charmers/ops-interface-tls-certificates@2ec41b60#egg=ca_client +git+https://github.com/canonical/operator.git@0.8.0#egg=ops +git+https://opendev.org/openstack/charm-ops-interface-ceph-client@cc10f29d4#egg=interface_ceph_client +git+https://opendev.org/openstack/charm-ops-openstack@ea51b43e#egg=ops_openstack +git+https://opendev.org/openstack/charm-ops-interface-tls-certificates@2ec41b60#egg=ca_client diff --git a/ceph-iscsi/test-requirements.txt b/ceph-iscsi/test-requirements.txt index 7e9d6093..da47a9b4 100644 --- a/ceph-iscsi/test-requirements.txt +++ b/ceph-iscsi/test-requirements.txt @@ -7,6 +7,8 @@ mock>=1.2 flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 requests>=2.18.4 +# oslo.i18n dropped py35 support +oslo.i18n<4.0.0 git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack pytz # workaround for 14.04 pip/tox diff --git a/ceph-iscsi/tox.ini b/ceph-iscsi/tox.ini index ecb76c40..fbc6f079 100644 --- a/ceph-iscsi/tox.ini +++ b/ceph-iscsi/tox.ini @@ -17,8 +17,8 @@ install_command = commands = stestr run --slowest {posargs} whitelist_externals = git - ln - charm-init.sh + add-to-archive.py + bash passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt @@ -37,6 +37,11 @@ basepython = python3.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt @@ -86,12 +91,6 @@ deps = -r{toxinidir}/build-requirements.txt commands = charmcraft build -[testenv:update-deps] -basepython = python3 -deps = -commands = - ./charm-init.sh -u - [testenv:func-noop] basepython = python3 commands = @@ -118,4 +117,5 @@ commands = functest-run-suite --keep-model --bundle {posargs} [flake8] -ignore = E402,E226 +# Ignore E902 because the unit_tests directory is missing in the built charm. +ignore = E402,E226,E902 diff --git a/ceph-iscsi/unit_tests/__init__.py b/ceph-iscsi/unit_tests/__init__.py index e69de29b..577ab7e9 100644 --- a/ceph-iscsi/unit_tests/__init__.py +++ b/ceph-iscsi/unit_tests/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import mock + +# Mock out secrets to make py35 happy. +sys.modules['secrets'] = mock.MagicMock() From 2b4664ffe2a99ab221d0321a58ff2547da4b574b Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 19 Aug 2020 07:09:19 +0000 Subject: [PATCH 2033/2699] Add erasure-coded pool bundle This change adds a bundle that configures erasure coded pools for glance, cinder-ceph and nova. To verify that the pools are using EC it also adds a test which verifies the pool type. To verify that the nova pool is configured correctly in all scenarious all the bundles have been updated to set libvirt-image-backend to rbd which triggers the nova pool creation. During testing the Prometheus tests were failing which is down to a known bug *1 in the prometheus charm. The bundles have been updated to pin the prometheus2 charm to an earlier version to get round the bug. *1 https://bugs.launchpad.net/charm-prometheus2/+bug/1891942 Change-Id: I7d29aec3ba9924d5677be25137907b1465401f9c --- ceph-mon/tests/bundles/bionic-queens.yaml | 5 +- ceph-mon/tests/bundles/bionic-rocky.yaml | 4 +- ceph-mon/tests/bundles/bionic-stein.yaml | 4 +- .../tests/bundles/bionic-train-with-fsid.yaml | 1 + ceph-mon/tests/bundles/bionic-train.yaml | 4 +- ceph-mon/tests/bundles/bionic-ussuri.yaml | 4 +- ceph-mon/tests/bundles/focal-ussuri-ec.yaml | 269 ++++++++++++++++++ ceph-mon/tests/bundles/focal-ussuri.yaml | 4 +- ceph-mon/tests/bundles/focal-victoria.yaml | 4 +- ceph-mon/tests/bundles/groovy-victoria.yaml | 4 +- ceph-mon/tests/bundles/trusty-mitaka.yaml | 1 + ceph-mon/tests/bundles/xenial-mitaka.yaml | 2 + ceph-mon/tests/bundles/xenial-ocata.yaml | 1 + ceph-mon/tests/bundles/xenial-pike.yaml | 1 + ceph-mon/tests/bundles/xenial-queens.yaml | 4 +- ceph-mon/tests/tests.yaml | 2 + 16 files changed, 305 insertions(+), 9 deletions(-) create mode 100644 ceph-mon/tests/bundles/focal-ussuri-ec.yaml diff --git a/ceph-mon/tests/bundles/bionic-queens.yaml b/ceph-mon/tests/bundles/bionic-queens.yaml index 4f81e82f..614442e7 100644 --- a/ceph-mon/tests/bundles/bionic-queens.yaml +++ b/ceph-mon/tests/bundles/bionic-queens.yaml @@ -30,6 +30,8 @@ applications: nova-compute: charm: cs:~openstack-charmers-next/nova-compute num_units: 1 + options: + libvirt-image-backend: rbd glance: expose: True charm: cs:~openstack-charmers-next/glance @@ -48,7 +50,8 @@ applications: charm: cs:~openstack-charmers-next/nova-cloud-controller num_units: 1 prometheus2: - charm: cs:prometheus2 +# Pin prometheus2 charm version Bug #1891942 + charm: cs:prometheus2-18 num_units: 1 relations: - - nova-compute:amqp diff --git a/ceph-mon/tests/bundles/bionic-rocky.yaml b/ceph-mon/tests/bundles/bionic-rocky.yaml index 2154c6a3..dde594de 100644 --- a/ceph-mon/tests/bundles/bionic-rocky.yaml +++ b/ceph-mon/tests/bundles/bionic-rocky.yaml @@ -38,6 +38,7 @@ applications: charm: cs:~openstack-charmers-next/nova-compute num_units: 1 options: + libvirt-image-backend: rbd openstack-origin: cloud:bionic-rocky glance: expose: True @@ -62,7 +63,8 @@ applications: options: openstack-origin: cloud:bionic-rocky prometheus2: - charm: cs:prometheus2 +# Pin prometheus2 charm version Bug #1891942 + charm: cs:prometheus2-18 num_units: 1 relations: - - nova-compute:amqp diff --git a/ceph-mon/tests/bundles/bionic-stein.yaml b/ceph-mon/tests/bundles/bionic-stein.yaml index eeedbed3..92f889eb 100644 --- a/ceph-mon/tests/bundles/bionic-stein.yaml +++ b/ceph-mon/tests/bundles/bionic-stein.yaml @@ -38,6 +38,7 @@ applications: charm: cs:~openstack-charmers-next/nova-compute num_units: 1 options: + libvirt-image-backend: rbd openstack-origin: cloud:bionic-stein glance: expose: True @@ -62,7 +63,8 @@ applications: options: openstack-origin: cloud:bionic-stein prometheus2: - charm: cs:prometheus2 +# Pin prometheus2 charm version Bug #1891942 + charm: cs:prometheus2-18 num_units: 1 relations: - - nova-compute:amqp diff --git a/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml b/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml index 0624b45d..d41c63b3 100644 --- a/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml +++ b/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml @@ -39,6 +39,7 @@ applications: num_units: 1 options: openstack-origin: cloud:bionic-train/proposed + libvirt-image-backend: rbd glance: expose: True charm: cs:~openstack-charmers-next/glance diff --git a/ceph-mon/tests/bundles/bionic-train.yaml b/ceph-mon/tests/bundles/bionic-train.yaml index 2f4d65fb..4c24951a 100644 --- a/ceph-mon/tests/bundles/bionic-train.yaml +++ b/ceph-mon/tests/bundles/bionic-train.yaml @@ -38,6 +38,7 @@ applications: num_units: 1 options: openstack-origin: cloud:bionic-train + libvirt-image-backend: rbd glance: expose: True charm: cs:~openstack-charmers-next/glance @@ -66,7 +67,8 @@ applications: options: openstack-origin: cloud:bionic-train prometheus2: - charm: cs:prometheus2 +# Pin prometheus2 charm version Bug #1891942 + charm: cs:prometheus2-18 num_units: 1 relations: - - nova-compute:amqp diff --git a/ceph-mon/tests/bundles/bionic-ussuri.yaml b/ceph-mon/tests/bundles/bionic-ussuri.yaml index 086a077f..bbed4302 100644 --- a/ceph-mon/tests/bundles/bionic-ussuri.yaml +++ b/ceph-mon/tests/bundles/bionic-ussuri.yaml @@ -38,6 +38,7 @@ applications: num_units: 1 options: openstack-origin: cloud:bionic-ussuri + libvirt-image-backend: rbd glance: expose: True charm: cs:~openstack-charmers-next/glance @@ -66,7 +67,8 @@ applications: options: openstack-origin: cloud:bionic-ussuri prometheus2: - charm: cs:prometheus2 +# Pin prometheus2 charm version Bug #1891942 + charm: cs:prometheus2-18 num_units: 1 relations: - - nova-compute:amqp diff --git a/ceph-mon/tests/bundles/focal-ussuri-ec.yaml b/ceph-mon/tests/bundles/focal-ussuri-ec.yaml new file mode 100644 index 00000000..8cedf3b2 --- /dev/null +++ b/ceph-mon/tests/bundles/focal-ussuri-ec.yaml @@ -0,0 +1,269 @@ +variables: + openstack-origin: &openstack-origin distro + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + '19': + '20': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: '10G' + options: + source: *openstack-origin + to: + - '3' + - '4' + - '5' + - '17' + - '18' + - '19' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + to: + - '6' + - '7' + - '8' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '20' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: isa + libvirt-image-backend: rbd + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: jerasure + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: *openstack-origin + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: lrc + ec-profile-locality: 3 + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + prometheus2: +# Pin prometheus2 charm version Bug #1891942 + charm: cs:prometheus2-18 + num_units: 1 + to: + - '16' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'ceph-mon:prometheus' + - 'prometheus2:target' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' diff --git a/ceph-mon/tests/bundles/focal-ussuri.yaml b/ceph-mon/tests/bundles/focal-ussuri.yaml index 227060fd..533bee4a 100644 --- a/ceph-mon/tests/bundles/focal-ussuri.yaml +++ b/ceph-mon/tests/bundles/focal-ussuri.yaml @@ -97,6 +97,7 @@ applications: num_units: 1 options: openstack-origin: *openstack-origin + libvirt-image-backend: rbd to: - '11' @@ -141,7 +142,8 @@ applications: - '15' prometheus2: - charm: cs:prometheus2 +# Pin prometheus2 charm version Bug #1891942 + charm: cs:prometheus2-18 num_units: 1 to: - '16' diff --git a/ceph-mon/tests/bundles/focal-victoria.yaml b/ceph-mon/tests/bundles/focal-victoria.yaml index 49a7767d..1bfd8964 100644 --- a/ceph-mon/tests/bundles/focal-victoria.yaml +++ b/ceph-mon/tests/bundles/focal-victoria.yaml @@ -97,6 +97,7 @@ applications: num_units: 1 options: openstack-origin: *openstack-origin + libvirt-image-backend: rbd to: - '11' @@ -141,7 +142,8 @@ applications: - '15' prometheus2: - charm: cs:prometheus2 +# Pin prometheus2 charm version Bug #1891942 + charm: cs:prometheus2-18 num_units: 1 to: - '16' diff --git a/ceph-mon/tests/bundles/groovy-victoria.yaml b/ceph-mon/tests/bundles/groovy-victoria.yaml index cef3f244..49b42437 100644 --- a/ceph-mon/tests/bundles/groovy-victoria.yaml +++ b/ceph-mon/tests/bundles/groovy-victoria.yaml @@ -97,6 +97,7 @@ applications: num_units: 1 options: openstack-origin: *openstack-origin + libvirt-image-backend: rbd to: - '11' @@ -141,7 +142,8 @@ applications: - '15' prometheus2: - charm: cs:prometheus2 +# Pin prometheus2 charm version Bug #1891942 + charm: cs:prometheus2-18 num_units: 1 to: - '16' diff --git a/ceph-mon/tests/bundles/trusty-mitaka.yaml b/ceph-mon/tests/bundles/trusty-mitaka.yaml index c6ae9dc5..63db332e 100644 --- a/ceph-mon/tests/bundles/trusty-mitaka.yaml +++ b/ceph-mon/tests/bundles/trusty-mitaka.yaml @@ -59,6 +59,7 @@ applications: num_units: 1 options: openstack-origin: cloud:trusty-mitaka + libvirt-image-backend: rbd # workaround while awaiting release of next version of python-libjuju with # model-constraints support constraints: diff --git a/ceph-mon/tests/bundles/xenial-mitaka.yaml b/ceph-mon/tests/bundles/xenial-mitaka.yaml index 90409212..7cba4099 100644 --- a/ceph-mon/tests/bundles/xenial-mitaka.yaml +++ b/ceph-mon/tests/bundles/xenial-mitaka.yaml @@ -30,6 +30,8 @@ applications: nova-compute: charm: cs:~openstack-charmers-next/nova-compute num_units: 1 + options: + libvirt-image-backend: rbd glance: expose: True charm: cs:~openstack-charmers-next/glance diff --git a/ceph-mon/tests/bundles/xenial-ocata.yaml b/ceph-mon/tests/bundles/xenial-ocata.yaml index e4ad7e3c..c20d9e39 100644 --- a/ceph-mon/tests/bundles/xenial-ocata.yaml +++ b/ceph-mon/tests/bundles/xenial-ocata.yaml @@ -39,6 +39,7 @@ applications: num_units: 1 options: openstack-origin: cloud:xenial-ocata + libvirt-image-backend: rbd glance: expose: True charm: cs:~openstack-charmers-next/glance diff --git a/ceph-mon/tests/bundles/xenial-pike.yaml b/ceph-mon/tests/bundles/xenial-pike.yaml index 2c0e88e7..098691a8 100644 --- a/ceph-mon/tests/bundles/xenial-pike.yaml +++ b/ceph-mon/tests/bundles/xenial-pike.yaml @@ -39,6 +39,7 @@ applications: num_units: 1 options: openstack-origin: cloud:xenial-pike + libvirt-image-backend: rbd glance: expose: True charm: cs:~openstack-charmers-next/glance diff --git a/ceph-mon/tests/bundles/xenial-queens.yaml b/ceph-mon/tests/bundles/xenial-queens.yaml index 8e7f7ae1..9bfed07e 100644 --- a/ceph-mon/tests/bundles/xenial-queens.yaml +++ b/ceph-mon/tests/bundles/xenial-queens.yaml @@ -39,6 +39,7 @@ applications: num_units: 1 options: openstack-origin: cloud:xenial-queens + libvirt-image-backend: rbd glance: expose: True charm: cs:~openstack-charmers-next/glance @@ -62,7 +63,8 @@ applications: options: openstack-origin: cloud:xenial-queens prometheus2: - charm: cs:prometheus2 +# Pin prometheus2 charm version Bug #1891942 + charm: cs:prometheus2-18 num_units: 1 relations: - - nova-compute:amqp diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 4bd8868e..b5b2fb45 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,5 +1,6 @@ charm_name: ceph-mon gate_bundles: + - focal-ussuri-ec - focal-ussuri - bionic-ussuri - bionic-train @@ -20,6 +21,7 @@ dev_bundles: configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: + - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - zaza.openstack.charm_tests.ceph.tests.CephRelationTest - zaza.openstack.charm_tests.ceph.tests.CephTest From f09ca912d8621289d9c69d617745393cb2034934 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Thu, 2 Apr 2020 16:59:07 +0100 Subject: [PATCH 2034/2699] Add focal and ussuri bundles to the charm This patch updates the bundles to include up to focal-ussuri. The focal-ussuri bundle is in the dev bundles as it can't pass at the moment due to LP: #1865754. The bionic-ussuri bundle is in the dev bundles (i.e. not gate) as it fails due to LP: #1892201 Also deal with the related bug where cinder-ceph requires the relation with a nova-compute unit. Related-Bug: #1881246 Related-Bug: #1865754 Related-Bug: #1892201 Change-Id: I0a6f1de82ecc601509822277d657485e08dc893d --- .../src/tests/bundles/bionic-train.yaml | 113 +++++++++ .../src/tests/bundles/bionic-ussuri.yaml | 113 +++++++++ .../src/tests/bundles/focal-ussuri.yaml | 228 ++++++++++++++++++ ceph-rbd-mirror/src/tests/tests.yaml | 8 +- ceph-rbd-mirror/src/wheelhouse.txt | 1 - 5 files changed, 461 insertions(+), 2 deletions(-) create mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/focal-ussuri.yaml diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml new file mode 100644 index 00000000..071f9d3a --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml @@ -0,0 +1,113 @@ +series: bionic +applications: + mysql: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + source: cloud:bionic-train + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-train + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-train + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-train + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-train + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:bionic-train + #bluestore: False + #use-direct-io: False + storage: + osd-devices: '10G' + ceph-rbd-mirror: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:bionic-train + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-train + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:bionic-train + #bluestore: False + #use-direct-io: False + storage: + osd-devices: '10G' + ceph-rbd-mirror-b: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:bionic-train +relations: +- - mysql + - keystone +- - mysql + - cinder +- - mysql + - glance +- - rabbitmq-server + - cinder +- - keystone + - cinder +- - keystone + - glance +- - cinder + - cinder-ceph +- - cinder-ceph + - ceph-mon +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml new file mode 100644 index 00000000..7e1ef38c --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml @@ -0,0 +1,113 @@ +series: bionic +applications: + mysql: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + source: cloud:bionic-ussuri + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-ussuri + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-ussuri + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:bionic-ussuri + #bluestore: False + #use-direct-io: False + storage: + osd-devices: '10G' + ceph-rbd-mirror: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:bionic-ussuri + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-ussuri + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:bionic-ussuri + #bluestore: False + #use-direct-io: False + storage: + osd-devices: '10G' + ceph-rbd-mirror-b: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:bionic-ussuri +relations: +- - mysql + - keystone +- - mysql + - cinder +- - mysql + - glance +- - rabbitmq-server + - cinder +- - keystone + - cinder +- - keystone + - glance +- - cinder + - cinder-ceph +- - cinder-ceph + - ceph-mon +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-ussuri.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-ussuri.yaml new file mode 100644 index 00000000..c69ed249 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/focal-ussuri.yaml @@ -0,0 +1,228 @@ +variables: + openstack-origin: &openstack-origin distro + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + '19': + '20': + '21': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '3' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '4' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + openstack-origin: *openstack-origin + to: + - '5' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '6' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + to: + - '7' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '8' + - '9' + - '10' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + #bluestore: False + #use-direct-io: False + storage: + osd-devices: '10G' + to: + - '11' + - '12' + - '13' + + ceph-rbd-mirror: + series: focal + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + to: + - '14' + + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '15' + - '16' + - '17' + + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + #bluestore: False + #use-direct-io: False + storage: + osd-devices: '10G' + to: + - '18' + - '19' + - '20' + + ceph-rbd-mirror-b: + series: focal + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + to: + - '21' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'rabbitmq-server' + - 'cinder' + + - - 'keystone' + - 'cinder' + + - - 'keystone' + - 'glance' + + - - 'cinder' + - 'cinder-ceph' + + - - 'cinder-ceph' + - 'ceph-mon' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance' + - 'ceph-mon' + + - - 'ceph-mon:osd' + - 'ceph-osd:mon' + + - - 'ceph-mon' + - 'ceph-rbd-mirror:ceph-local' + + - - 'ceph-mon' + - 'ceph-rbd-mirror-b:ceph-remote' + + - - 'ceph-mon-b:osd' + - 'ceph-osd-b:mon' + + - - 'ceph-mon-b' + - 'ceph-rbd-mirror-b:ceph-local' + + - - 'ceph-mon-b' + - 'ceph-rbd-mirror:ceph-remote' diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index 1ccbfde3..77454602 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -1,7 +1,8 @@ charm_name: ceph-rbd-mirror smoke_bundles: -- bionic-stein +- bionic-train gate_bundles: +- bionic-train - bionic-stein - bionic-rocky - bionic-queens @@ -15,9 +16,14 @@ dev_bundles: - focal-victoria - bionic-queens-e2e - bionic-queens-e2e-lxd +- bionic-ussuri +- focal-ussuri configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: - zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorTest - zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorControlledFailoverTest - zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorDisasterFailoverTest +tests_options: + force_deploy: + - groovy-victoria diff --git a/ceph-rbd-mirror/src/wheelhouse.txt b/ceph-rbd-mirror/src/wheelhouse.txt index 606a7f9b..a4d92cc0 100644 --- a/ceph-rbd-mirror/src/wheelhouse.txt +++ b/ceph-rbd-mirror/src/wheelhouse.txt @@ -1,2 +1 @@ - psutil From 4b231c475f02a8ba7739073df2f821dde9285a23 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 3 Aug 2020 12:37:54 +0200 Subject: [PATCH 2035/2699] Unpin flake8, fix lint Change-Id: Iab73f1127bfbdf11626727f3044366d2e5745439 --- ceph-mon/actions/create_crush_rule.py | 1 + ceph-mon/actions/create_erasure_profile.py | 4 ++-- ceph-mon/actions/security_checklist.py | 1 + ceph-mon/files/nagios/check_ceph_status.py | 4 ++-- ceph-mon/hooks/ceph_hooks.py | 6 +++--- ceph-mon/test-requirements.txt | 2 +- ceph-mon/tox.ini | 2 +- ceph-mon/unit_tests/test_ceph_ops.py | 2 +- 8 files changed, 12 insertions(+), 10 deletions(-) diff --git a/ceph-mon/actions/create_crush_rule.py b/ceph-mon/actions/create_crush_rule.py index 5fea57ba..207b4f4f 100755 --- a/ceph-mon/actions/create_crush_rule.py +++ b/ceph-mon/actions/create_crush_rule.py @@ -41,5 +41,6 @@ def create_crush_rule(): except subprocess.CalledProcessError as e: hookenv.action_fail(str(e)) + if __name__ == '__main__': create_crush_rule() diff --git a/ceph-mon/actions/create_erasure_profile.py b/ceph-mon/actions/create_erasure_profile.py index 1cee8f3f..5306baa6 100755 --- a/ceph-mon/actions/create_erasure_profile.py +++ b/ceph-mon/actions/create_erasure_profile.py @@ -63,7 +63,7 @@ def make_erasure_profile(): action_fail("Create erasure profile failed with " "message: {}".format(str(e))) elif plugin == "lrc": - l = action_get("locality-chunks") + locality_chunks = action_get("locality-chunks") crush_locality = action_get('crush-locality') try: create_erasure_profile(service='admin', @@ -71,7 +71,7 @@ def make_erasure_profile(): profile_name=name, data_chunks=k, coding_chunks=m, - locality=l, + locality=locality_chunks, crush_locality=crush_locality, failure_domain=failure_domain, device_class=device_class) diff --git a/ceph-mon/actions/security_checklist.py b/ceph-mon/actions/security_checklist.py index 7afb3c1e..23b1caf1 100755 --- a/ceph-mon/actions/security_checklist.py +++ b/ceph-mon/actions/security_checklist.py @@ -42,5 +42,6 @@ def main(): } return audits.action_parse_results(audits.run(config)) + if __name__ == "__main__": sys.exit(main()) diff --git a/ceph-mon/files/nagios/check_ceph_status.py b/ceph-mon/files/nagios/check_ceph_status.py index 844e7f2f..2f49b23b 100755 --- a/ceph-mon/files/nagios/check_ceph_status.py +++ b/ceph-mon/files/nagios/check_ceph_status.py @@ -291,7 +291,7 @@ def main(args): EXIT_CODES = {'ok': 0, 'warning': 1, 'critical': 2, 'unknown': 3} exitcode = 'ok' try: - msg = check_ceph_status(args) + check_ceph_status(args) except UnknownError as msg: print(msg) exitcode = 'unknown' @@ -301,7 +301,7 @@ def main(args): except WarnError as msg: print(msg) exitcode = 'warning' - except: + except Exception: print("%s raised unknown exception '%s'" % ('check_ceph_status', sys.exc_info()[0])) print('=' * 60) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 1e45482c..3bda8f21 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -293,9 +293,9 @@ def config_changed(): # unconditionally verify that the fsid and monitor-secret are set now # otherwise we exit until a leader does this. if leader_get('fsid') is None or leader_get('monitor-secret') is None: - log('still waiting for leader to setup keys') - status_set('waiting', 'Waiting for leader to setup keys') - return + log('still waiting for leader to setup keys') + status_set('waiting', 'Waiting for leader to setup keys') + return emit_cephconf() diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 7d9c2587..44b50231 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -10,7 +10,7 @@ charm-tools>=2.4.4 requests>=2.18.4 mock>=1.2 -flake8>=2.2.4,<=2.4.1 +flake8>=2.2.4 stestr>=2.2.0 coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index b835733a..8080ba6d 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -116,5 +116,5 @@ commands = functest-run-suite --keep-model --bundle {posargs} [flake8] -ignore = E402,E226 +ignore = E402,E226,W504 exclude = */charmhelpers diff --git a/ceph-mon/unit_tests/test_ceph_ops.py b/ceph-mon/unit_tests/test_ceph_ops.py index c2e1e917..145aaf92 100644 --- a/ceph-mon/unit_tests/test_ceph_ops.py +++ b/ceph-mon/unit_tests/test_ceph_ops.py @@ -15,7 +15,7 @@ import json import unittest -from mock import ( +from unittest.mock import ( call, patch, ) From cf8a63060e21902b7ed02bde53325cb970a0ceca Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 18 Aug 2020 08:51:02 +0000 Subject: [PATCH 2036/2699] Add Support For Erase-Coded Pools Add support for erasure coded pools. 1) Pool name of replicated and EC pools can now be set via pool-name config option. 2) Weight of replicated and EC pools can now be set via ceph-pool-weight config option. 3) Charm no longer uses initialize_mds from the ceph-mds interface. This moves the charm inline with ceph-client charms where the charm explicitly creates the pools they need. 4) Metadata pool name format is preserved with an underscore rather than a hyphen. Change-Id: I97641c6daeeb2a1a65b081201772c89f6a7f539c --- ceph-fs/src/config.yaml | 128 ++++++++++++++++++++++++++++++++ ceph-fs/src/reactive/ceph_fs.py | 88 +++++++++++++++++++++- 2 files changed, 213 insertions(+), 3 deletions(-) diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index 0994d0af..c875baab 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -47,3 +47,131 @@ options: order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on your network interface. + ceph-osd-replication-count: + type: int + default: 3 + description: | + This value dictates the number of replicas ceph must make of any + object it stores within the images rbd pool. Of course, this only + applies if using Ceph as a backend store. Note that once the images + rbd pool has been created, changing this value will not have any + effect (although it can be changed in ceph by manually configuring + your ceph cluster). + ceph-pool-weight: + type: int + default: 5 + description: | + Defines a relative weighting of the pool as a percentage of the total + amount of data in the Ceph cluster. This effectively weights the number + of placement groups for the pool created to be appropriately portioned + to the amount of data expected. For example, if the compute images + for the OpenStack compute instances are expected to take up 20% of the + overall configuration then this value would be specified as 20. Note - + it is important to choose an appropriate value for the pool weight as + this directly affects the number of placement groups which will be + created for the pool. The number of placement groups for a pool can + only be increased, never decreased - so it is important to identify the + percent of data that will likely reside in the pool. + rbd-pool-name: + default: + type: string + description: | + Optionally specify an existing rbd pool that cinder should map to. + pool-type: + type: string + default: replicated + description: | + Ceph pool type to use for storage - valid values include ‘replicated’ + and ‘erasure-coded’. + ec-profile-name: + type: string + default: + description: | + Name for the EC profile to be created for the EC pools. If not defined + a profile name will be generated based on the name of the pool used by + the application. + ec-rbd-metadata-pool: + type: string + default: + description: | + Name of the metadata pool to be created (for RBD use-cases). If not + defined a metadata pool name will be generated based on the name of + the data pool used by the application. The metadata pool is always + replicated, not erasure coded. + ec-profile-k: + type: int + default: 1 + description: | + Number of data chunks that will be used for EC data pool. K+M factors + should never be greater than the number of available zones (or hosts) + for balancing. + ec-profile-m: + type: int + default: 2 + description: | + Number of coding chunks that will be used for EC data pool. K+M factors + should never be greater than the number of available zones (or hosts) + for balancing. + ec-profile-locality: + type: int + default: + description: | + (lrc plugin - l) Group the coding and data chunks into sets of size l. + For instance, for k=4 and m=2, when l=3 two groups of three are created. + Each set can be recovered without reading chunks from another set. Note + that using the lrc plugin does incur more raw storage usage than isa or + jerasure in order to reduce the cost of recovery operations. + ec-profile-crush-locality: + type: string + default: + description: | + (lrc plugin) The type of the crush bucket in which each set of chunks + defined by l will be stored. For instance, if it is set to rack, each + group of l chunks will be placed in a different rack. It is used to + create a CRUSH rule step such as step choose rack. If it is not set, + no such grouping is done. + ec-profile-durability-estimator: + type: int + default: + description: | + (shec plugin - c) The number of parity chunks each of which includes + each data chunk in its calculation range. The number is used as a + durability estimator. For instance, if c=2, 2 OSDs can be down + without losing data. + ec-profile-helper-chunks: + type: int + default: + description: | + (clay plugin - d) Number of OSDs requested to send data during + recovery of a single chunk. d needs to be chosen such that + k+1 <= d <= k+m-1. Larger the d, the better the savings. + ec-profile-scalar-mds: + type: string + default: + description: | + (clay plugin) specifies the plugin that is used as a building + block in the layered construction. It can be one of jerasure, + isa, shec (defaults to jerasure). + ec-profile-plugin: + type: string + default: jerasure + description: | + EC plugin to use for this applications pool. The following list of + plugins acceptable - jerasure, lrc, isa, shec, clay. + ec-profile-technique: + type: string + default: + description: | + EC profile technique used for this applications pool - will be + validated based on the plugin configured via ec-profile-plugin. + Supported techniques are ‘reed_sol_van’, ‘reed_sol_r6_op’, + ‘cauchy_orig’, ‘cauchy_good’, ‘liber8tion’ for jerasure, + ‘reed_sol_van’, ‘cauchy’ for isa and ‘single’, ‘multiple’ + for shec. + ec-profile-device-class: + type: string + default: + description: | + Device class from CRUSH map to use for placement groups for + erasure profile - valid values: ssd, hdd or nvme (or leave + unset to not use a device class). diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index 215d908f..03894ce8 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -13,7 +13,9 @@ # limitations under the License. from charms import reactive -from charmhelpers.core import hookenv +from charmhelpers.core.hookenv import ( + service_name, + config) import charms_openstack.bus import charms_openstack.charm as charm @@ -51,6 +53,86 @@ def config_changed(): @reactive.when_not('ceph.create_pool.req.sent') @reactive.when('ceph-mds.connected') def storage_ceph_connected(ceph): - ceph.announce_mds_name() - ceph.initialize_mds(hookenv.service_name()) + ceph_mds = reactive.endpoint_from_flag('ceph-mds.connected') + ceph_mds.announce_mds_name() + service = service_name() + weight = config('ceph-pool-weight') + replicas = config('ceph-osd-replication-count') + + if config('rbd-pool-name'): + pool_name = config('rbd-pool-name') + else: + pool_name = "{}_data".format(service) + + # The '_' rather than '-' in the default pool name + # maintains consistency with previous versions of the + # charm but is inconsistent with ceph-client charms. + metadata_pool_name = ( + config('metadata-pool') or + "{}_metadata".format(service) + ) + # Metadata sizing is approximately 20% of overall data weight + # https://ceph.io/planet/cephfs-ideal-pg-ratio-between-metadata-and-data-pools/ + metadata_weight = weight * 0.20 + # Resize data pool weight to accomodate metadata weight + weight = weight - metadata_weight + + if config('pool-type') == 'erasure-coded': + # General EC plugin config + plugin = config('ec-profile-plugin') + technique = config('ec-profile-technique') + device_class = config('ec-profile-device-class') + bdm_k = config('ec-profile-k') + bdm_m = config('ec-profile-m') + # LRC plugin config + bdm_l = config('ec-profile-locality') + crush_locality = config('ec-profile-crush-locality') + # SHEC plugin config + bdm_c = config('ec-profile-durability-estimator') + # CLAY plugin config + bdm_d = config('ec-profile-helper-chunks') + scalar_mds = config('ec-profile-scalar-mds') + # Profile name + profile_name = ( + config('ec-profile-name') or "{}-profile".format(service) + ) + # Create erasure profile + ceph_mds.create_erasure_profile( + name=profile_name, + k=bdm_k, m=bdm_m, + lrc_locality=bdm_l, + lrc_crush_locality=crush_locality, + shec_durability_estimator=bdm_c, + clay_helper_chunks=bdm_d, + clay_scalar_mds=scalar_mds, + device_class=device_class, + erasure_type=plugin, + erasure_technique=technique + ) + + # Create EC data pool + ceph_mds.create_erasure_pool( + name=pool_name, + erasure_profile=profile_name, + weight=weight, + app_name=ceph_mds.ceph_pool_app_name, + allow_ec_overwrites=True + ) + ceph_mds.create_replicated_pool( + name=metadata_pool_name, + weight=metadata_weight, + app_name=ceph_mds.ceph_pool_app_name + ) + else: + ceph_mds.create_replicated_pool( + name=pool_name, + replicas=replicas, + weight=weight, + app_name=ceph_mds.ceph_pool_app_name) + ceph_mds.create_replicated_pool( + name=metadata_pool_name, + replicas=replicas, + weight=metadata_weight, + app_name=ceph_mds.ceph_pool_app_name) + ceph_mds.request_cephfs(service) reactive.set_state('ceph.create_pool.req.sent') From e5d50de89039248d12aa63cf8a87389524581fd2 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 25 Aug 2020 16:27:16 -0400 Subject: [PATCH 2037/2699] Improve zap-disk action section Also fix an ordered list that was not rendering properly in the Charm Store. Change-Id: Ibd8ea8919e80bf332b6f2509739b5e6055e081dc --- ceph-osd/README.md | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/ceph-osd/README.md b/ceph-osd/README.md index 52ee9e09..77536df7 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -57,32 +57,26 @@ be used in the `ceph-osd.yaml` configuration file: 1. Block devices (regular) -```yaml - ceph-osd: - options: - osd-devices: /dev/vdb /dev/vdc /dev/vdd -``` + ceph-osd: + options: + osd-devices: /dev/vdb /dev/vdc /dev/vdd Each regular block device must be an absolute path to a device node. 2. Block devices (Juju storage) -```yaml - ceph-osd: - storage: - osd-devices: cinder,20G -``` + ceph-osd: + storage: + osd-devices: cinder,20G See the [Juju documentation][juju-docs-storage] for guidance on implementing Juju storage. 3. Directory-backed OSDs -```yaml - ceph-osd: - storage: - osd-devices: /var/tmp/osd-1 -``` + ceph-osd: + storage: + osd-devices: /var/tmp/osd-1 > **Note**: OSD directories can no longer be created starting with Ceph Nautilus. Existing OSD directories will continue to function after an upgrade @@ -398,7 +392,10 @@ the `add-disk` action. Example: - juju run-action --wait ceph-osd/3 zap-disk i-really-mean-it devices=/dev/vdc + juju run-action --wait ceph-osd/3 zap-disk i-really-mean-it=true devices=/dev/vdc + +> **Note**: The `zap-disk` action cannot be run on a mounted device, an active + Bluestore device, or an encrypted device. # Bugs From 3deac111f0cb5d4ae9a25cdb812d4686875e778c Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Wed, 26 Aug 2020 09:17:37 -0400 Subject: [PATCH 2038/2699] Fix ordered list Change-Id: Iffef8787165dfbda35df0ad047f59e4a38288a39 --- ceph-osd/README.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/ceph-osd/README.md b/ceph-osd/README.md index 77536df7..22b86500 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -52,31 +52,31 @@ A storage device is destined as an OSD (Object Storage Device). There can be multiple OSDs per storage node (ceph-osd unit). The list of all possible storage devices for the cluster is defined by the -`osd-devices` option (default value is '/dev/vdb'). The following examples can -be used in the `ceph-osd.yaml` configuration file: +`osd-devices` option (default value is '/dev/vdb'). The below examples can be +used in the `ceph-osd.yaml` configuration file. -1. Block devices (regular) +Block devices (regular), - ceph-osd: - options: - osd-devices: /dev/vdb /dev/vdc /dev/vdd + ceph-osd: + options: + osd-devices: /dev/vdb /dev/vdc /dev/vdd Each regular block device must be an absolute path to a device node. -2. Block devices (Juju storage) +Block devices (Juju storage), - ceph-osd: - storage: - osd-devices: cinder,20G + ceph-osd: + storage: + osd-devices: cinder,20G See the [Juju documentation][juju-docs-storage] for guidance on implementing Juju storage. -3. Directory-backed OSDs +Directory-backed OSDs, - ceph-osd: - storage: - osd-devices: /var/tmp/osd-1 + ceph-osd: + storage: + osd-devices: /var/tmp/osd-1 > **Note**: OSD directories can no longer be created starting with Ceph Nautilus. Existing OSD directories will continue to function after an upgrade From 7904773ef19916c261e7b6f4973a62620bc7aa37 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 3 Aug 2020 10:52:42 +0200 Subject: [PATCH 2039/2699] Add BlueStore Compression support Sync in updates from charm-helpers and charms.ceph. Remove unit tests that belong to charms.ceph. Depends-On: Ibec4e3221387199adbc1a920e130975d7b25343c Change-Id: I153c22efb952fc38c5e3d36eed5d85c953e695f7 --- .../contrib/storage/linux/ceph.py | 14 ++-- ceph-mon/lib/charms_ceph/broker.py | 81 +++++++------------ ceph-mon/unit_tests/test_ceph_ops.py | 47 +---------- 3 files changed, 38 insertions(+), 104 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index d9d43578..526b95ad 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -705,12 +705,12 @@ def __init__(self, service, name=None, erasure_code_profile=None, # from different handling of this in the `charms.ceph` library. self.erasure_code_profile = op.get('erasure-profile', 'default-canonical') + self.allow_ec_overwrites = op.get('allow-ec-overwrites') else: # We keep the class default when initialized from keyword arguments # to not break the API for any other consumers. self.erasure_code_profile = erasure_code_profile or 'default' - - self.allow_ec_overwrites = allow_ec_overwrites + self.allow_ec_overwrites = allow_ec_overwrites def _create(self): # Try to find the erasure profile information in order to properly @@ -1972,12 +1972,14 @@ def request(self): 'request-id': self.request_id}) def _ops_equal(self, other): + keys_to_compare = [ + 'replicas', 'name', 'op', 'pg_num', 'group-permission', + 'object-prefix-permissions', + ] + keys_to_compare += list(self._partial_build_common_op_create().keys()) if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in [ - 'replicas', 'name', 'op', 'pg_num', 'weight', - 'group', 'group-namespace', 'group-permission', - 'object-prefix-permissions']: + for key in keys_to_compare: if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: diff --git a/ceph-mon/lib/charms_ceph/broker.py b/ceph-mon/lib/charms_ceph/broker.py index d5c83891..8f040a5e 100644 --- a/ceph-mon/lib/charms_ceph/broker.py +++ b/ceph-mon/lib/charms_ceph/broker.py @@ -16,6 +16,7 @@ import json import os +from subprocess import check_call, check_output, CalledProcessError from tempfile import NamedTemporaryFile from charms_ceph.utils import ( @@ -41,18 +42,16 @@ pool_set, remove_pool_snapshot, rename_pool, - set_pool_quota, snapshot_pool, validator, ErasurePool, - Pool, + BasePool, ReplicatedPool, ) # This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ # This should do a decent job of preventing people from passing in bad values. # It will give a useful error message -from subprocess import check_call, check_output, CalledProcessError POOL_KEYS = { # "Ceph Key Name": [Python type, [Valid Range]] @@ -405,23 +404,11 @@ def handle_erasure_pool(request, service): """ pool_name = request.get('name') erasure_profile = request.get('erasure-profile') - max_bytes = request.get('max-bytes') - max_objects = request.get('max-objects') - weight = request.get('weight') group_name = request.get('group') - allow_ec_overwrites = request.get('allow-ec-overwrites') if erasure_profile is None: erasure_profile = "default-canonical" - app_name = request.get('app-name') - - # Check for missing params - if pool_name is None: - msg = "Missing parameter. name is required for the pool" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - if group_name: group_namespace = request.get('group-namespace') # Add the pool to the group named "group_name" @@ -437,21 +424,22 @@ def handle_erasure_pool(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - pool = ErasurePool(service=service, name=pool_name, - erasure_code_profile=erasure_profile, - percent_data=weight, - app_name=app_name, - allow_ec_overwrites=allow_ec_overwrites) + try: + pool = ErasurePool(service=service, + op=request) + except KeyError: + msg = "Missing parameter." + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + # Ok make the erasure pool if not pool_exists(service=service, name=pool_name): log("Creating pool '{}' (erasure_profile={})" .format(pool.name, erasure_profile), level=INFO) pool.create() - # Set a quota if requested - if max_bytes or max_objects: - set_pool_quota(service=service, pool_name=pool_name, - max_bytes=max_bytes, max_objects=max_objects) + # Set/update properties that are allowed to change after pool creation. + pool.update() def handle_replicated_pool(request, service): @@ -462,26 +450,19 @@ def handle_replicated_pool(request, service): :returns: dict. exit-code and reason if not 0. """ pool_name = request.get('name') - replicas = request.get('replicas') - max_bytes = request.get('max-bytes') - max_objects = request.get('max-objects') - weight = request.get('weight') group_name = request.get('group') # Optional params + # NOTE: Check this against the handling in the Pool classes, reconcile and + # remove. pg_num = request.get('pg_num') + replicas = request.get('replicas') if pg_num: # Cap pg_num to max allowed just in case. osds = get_osds(service) if osds: pg_num = min(pg_num, (len(osds) * 100 // replicas)) - - app_name = request.get('app-name') - # Check for missing params - if pool_name is None or replicas is None: - msg = "Missing parameter. name and replicas are required" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} + request.update({'pg_num': pg_num}) if group_name: group_namespace = request.get('group-namespace') @@ -490,18 +471,14 @@ def handle_replicated_pool(request, service): group=group_name, namespace=group_namespace) - kwargs = {} - if pg_num: - kwargs['pg_num'] = pg_num - if weight: - kwargs['percent_data'] = weight - if replicas: - kwargs['replicas'] = replicas - if app_name: - kwargs['app_name'] = app_name - - pool = ReplicatedPool(service=service, - name=pool_name, **kwargs) + try: + pool = ReplicatedPool(service=service, + op=request) + except KeyError: + msg = "Missing parameter." + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + if not pool_exists(service=service, name=pool_name): log("Creating pool '{}' (replicas={})".format(pool.name, replicas), level=INFO) @@ -510,10 +487,8 @@ def handle_replicated_pool(request, service): log("Pool '{}' already exists - skipping create".format(pool.name), level=DEBUG) - # Set a quota if requested - if max_bytes or max_objects: - set_pool_quota(service=service, pool_name=pool_name, - max_bytes=max_bytes, max_objects=max_objects) + # Set/update properties that are allowed to change after pool creation. + pool.update() def handle_create_cache_tier(request, service): @@ -540,7 +515,7 @@ def handle_create_cache_tier(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - p = Pool(service=service, name=storage_pool) + p = BasePool(service=service, name=storage_pool) p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) @@ -561,7 +536,7 @@ def handle_remove_cache_tier(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - pool = Pool(name=storage_pool, service=service) + pool = BasePool(name=storage_pool, service=service) pool.remove_cache_tier(cache_pool=cache_pool) diff --git a/ceph-mon/unit_tests/test_ceph_ops.py b/ceph-mon/unit_tests/test_ceph_ops.py index 145aaf92..000ddbad 100644 --- a/ceph-mon/unit_tests/test_ceph_ops.py +++ b/ceph-mon/unit_tests/test_ceph_ops.py @@ -16,7 +16,6 @@ import unittest from unittest.mock import ( - call, patch, ) @@ -53,48 +52,6 @@ def test_create_erasure_profile(self, mock_create_erasure): erasure_plugin_technique=None) self.assertEqual(json.loads(rc), {'exit-code': 0}) - @patch.object(broker, 'pool_exists') - @patch.object(broker, 'ReplicatedPool') - @patch.object(broker, 'log', lambda *args, **kwargs: None) - def test_process_requests_create_replicated_pool(self, - mock_replicated_pool, - mock_pool_exists): - mock_pool_exists.return_value = False - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'create-pool', - 'pool-type': 'replicated', - 'name': 'foo', - 'replicas': 3 - }]}) - rc = broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', name='foo') - calls = [call(name=u'foo', service='admin', replicas=3)] - mock_replicated_pool.assert_has_calls(calls) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @patch.object(broker, 'pool_exists') - @patch.object(broker, 'ReplicatedPool') - @patch.object(broker, 'log', lambda *args, **kwargs: None) - def test_process_requests_replicated_pool_weight(self, - mock_replicated_pool, - mock_pool_exists): - mock_pool_exists.return_value = False - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'create-pool', - 'pool-type': 'replicated', - 'name': 'foo', - 'weight': 40.0, - 'replicas': 3 - }]}) - rc = broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', name='foo') - calls = [call(name=u'foo', service='admin', replicas=3, - percent_data=40.0)] - mock_replicated_pool.assert_has_calls(calls) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - @patch.object(broker, 'delete_pool') @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_process_requests_delete_pool(self, @@ -135,7 +92,7 @@ def test_process_requests_create_erasure_pool(self, mock_profile_exists, @patch('charmhelpers.contrib.storage.linux.ceph.cmp_pkgrevno') @patch.object(broker, 'pool_exists') - @patch.object(broker.Pool, 'add_cache_tier') + @patch.object(broker.BasePool, 'add_cache_tier') @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_process_requests_create_cache_tier(self, mock_pool, mock_pool_exists, @@ -160,7 +117,7 @@ def test_process_requests_create_cache_tier(self, mock_pool, @patch('charmhelpers.contrib.storage.linux.ceph.cmp_pkgrevno') @patch.object(broker, 'pool_exists') - @patch.object(broker.Pool, 'remove_cache_tier') + @patch.object(broker.BasePool, 'remove_cache_tier') @patch.object(broker, 'log', lambda *args, **kwargs: None) def test_process_requests_remove_cache_tier(self, mock_pool, mock_pool_exists, From 4d04113470f03d0a8ee905cfcd7b4c7eab9afbb2 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Wed, 26 Aug 2020 16:15:23 +0200 Subject: [PATCH 2040/2699] Unpin flake8, fix lint Change-Id: I7f47c1dac0761101980ebba3f9aab8732cb0d1ce --- ceph-osd/actions/osd_in_out.py | 1 + ceph-osd/actions/security_checklist.py | 1 + ceph-osd/actions/zap_disk.py | 2 +- ceph-osd/files/nagios/check_ceph_osd_services.py | 6 +++--- ceph-osd/files/nagios/check_ceph_status.py | 5 +++-- ceph-osd/files/nagios/collect_ceph_osd_services.py | 4 ++-- ceph-osd/hooks/utils.py | 4 ++-- ceph-osd/test-requirements.txt | 2 +- ceph-osd/tox.ini | 2 +- 9 files changed, 15 insertions(+), 12 deletions(-) diff --git a/ceph-osd/actions/osd_in_out.py b/ceph-osd/actions/osd_in_out.py index f25168ab..844b7875 100755 --- a/ceph-osd/actions/osd_in_out.py +++ b/ceph-osd/actions/osd_in_out.py @@ -63,6 +63,7 @@ def osd_in(args): check_call(cmd) assess_status() + # A dictionary of all the defined actions to callables (which take # parsed arguments). ACTIONS = {"osd-out": osd_out, "osd-in": osd_in} diff --git a/ceph-osd/actions/security_checklist.py b/ceph-osd/actions/security_checklist.py index 3f1e10b2..2013c772 100755 --- a/ceph-osd/actions/security_checklist.py +++ b/ceph-osd/actions/security_checklist.py @@ -43,5 +43,6 @@ def main(): } return audits.action_parse_results(audits.run(config)) + if __name__ == "__main__": sys.exit(main()) diff --git a/ceph-osd/actions/zap_disk.py b/ceph-osd/actions/zap_disk.py index f85a48fd..b8a29bb2 100755 --- a/ceph-osd/actions/zap_disk.py +++ b/ceph-osd/actions/zap_disk.py @@ -66,7 +66,7 @@ def zap(): len(failed_devices), ", ".join(failed_devices)) if not_block_devices: - if message is not '': + if len(message): message += "\n\n" message += "{} devices are not block devices: {}".format( len(not_block_devices), diff --git a/ceph-osd/files/nagios/check_ceph_osd_services.py b/ceph-osd/files/nagios/check_ceph_osd_services.py index f53c5fa5..7f53b2d7 100755 --- a/ceph-osd/files/nagios/check_ceph_osd_services.py +++ b/ceph-osd/files/nagios/check_ceph_osd_services.py @@ -53,9 +53,9 @@ def run_main(): # command in the collect phase does fail, and so the start of the line is # 'Failed' state = STATE_OK - for l in lines: - print(l, end='') - if l.startswith('Failed'): + for line in lines: + print(line, end='') + if line.startswith('Failed'): state = STATE_CRITICAL return state diff --git a/ceph-osd/files/nagios/check_ceph_status.py b/ceph-osd/files/nagios/check_ceph_status.py index 358fafd9..843391d7 100755 --- a/ceph-osd/files/nagios/check_ceph_status.py +++ b/ceph-osd/files/nagios/check_ceph_status.py @@ -20,7 +20,8 @@ def check_ceph_status(args): .check_output(["ceph", "status"]) .decode('UTF-8') .split('\n')) - status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1) + status_data = dict( + line.strip().split(' ', 1) for line in lines if len(line) > 1) if ('health' not in status_data or 'monmap' not in status_data or @@ -40,7 +41,7 @@ def check_ceph_status(args): msg += '"' raise nagios_plugin.CriticalError(msg) - osds = re.search("^.*: (\d+) osds: (\d+) up, (\d+) in", + osds = re.search(r"^.*: (\d+) osds: (\d+) up, (\d+) in", status_data['osdmap']) if osds.group(1) > osds.group(2): # not all OSDs are "up" msg = 'CRITICAL: Some OSDs are not up. Total: {}, up: {}'.format( diff --git a/ceph-osd/files/nagios/collect_ceph_osd_services.py b/ceph-osd/files/nagios/collect_ceph_osd_services.py index 84764aba..7fa220b5 100755 --- a/ceph-osd/files/nagios/collect_ceph_osd_services.py +++ b/ceph-osd/files/nagios/collect_ceph_osd_services.py @@ -30,8 +30,8 @@ def lsb_release(): """Return /etc/lsb-release in a dict""" d = {} with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') + for el in lsb: + k, v = el.split('=') d[k.strip()] = v.strip() return d diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 912abe30..155c57f7 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -245,7 +245,7 @@ def get_blacklist(): def get_journal_devices(): if config('osd-journal'): - devices = [l.strip() for l in config('osd-journal').split(' ')] + devices = [el.strip() for el in config('osd-journal').split(' ')] else: devices = [] storage_ids = storage_list('osd-journals') @@ -287,6 +287,6 @@ def is_sata30orless(device): result = subprocess.check_output(["/usr/sbin/smartctl", "-i", device]) print(result) for line in str(result).split("\\n"): - if re.match("SATA Version is: *SATA (1\.|2\.|3\.0)", str(line)): + if re.match(r"SATA Version is: *SATA (1\.|2\.|3\.0)", str(line)): return True return False diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 7d9c2587..44b50231 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -10,7 +10,7 @@ charm-tools>=2.4.4 requests>=2.18.4 mock>=1.2 -flake8>=2.2.4,<=2.4.1 +flake8>=2.2.4 stestr>=2.2.0 coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index b835733a..8080ba6d 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -116,5 +116,5 @@ commands = functest-run-suite --keep-model --bundle {posargs} [flake8] -ignore = E402,E226 +ignore = E402,E226,W504 exclude = */charmhelpers From c7af476b688b97de805cfbe2e2ec2839c6b1d4a2 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 3 Aug 2020 10:48:43 +0200 Subject: [PATCH 2041/2699] Add BlueStore Compression support Sync in updates from charm-helpers and charms.ceph. Depends-On: I153c22efb952fc38c5e3d36eed5d85c953e695f7 Depends-On: Ibec4e3221387199adbc1a920e130975d7b25343c Change-Id: I028440002cdd36be13aaee4a0f50c6a0bca7abda --- ceph-osd/config.yaml | 119 ++ ceph-osd/hooks/ceph_hooks.py | 9 + .../charmhelpers/contrib/openstack/context.py | 77 + .../section-ceph-bluestore-compression | 28 + .../contrib/storage/linux/ceph.py | 1415 ++++++++++++----- ceph-osd/hooks/utils.py | 22 +- ceph-osd/lib/charms_ceph/broker.py | 116 +- ceph-osd/templates/ceph.conf | 1 + ceph-osd/unit_tests/test_ceph_hooks.py | 61 + ceph-osd/unit_tests/test_status.py | 17 +- 10 files changed, 1371 insertions(+), 494 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/templates/section-ceph-bluestore-compression diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 7f9094fd..cb3239fa 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -335,3 +335,122 @@ options: cluster as all ceph-osd processes must be restarted as part of changing the apparmor profile enforcement mode. Always test in pre-production before enabling AppArmor on a live cluster. + bluestore-compression-algorithm: + type: string + default: lz4 + description: | + The default compressor to use (if any) if the per-pool property + compression_algorithm is not set. + . + NOTE: The recommended approach is to adjust this configuration option on + the charm responsible for creating the specific pool you are interested + in tuning. Changing the configuration option on the ceph-osd charm will + affect ALL pools on the OSDs managed by the named application of the + ceph-osd charm in the Juju model. + bluestore-compression-mode: + type: string + default: + description: | + The default policy for using compression if the per-pool property + compression_mode is not set. 'none' means never use compression. + 'passive' means use compression when clients hint that data is + compressible. 'aggressive' means use compression unless clients hint that + data is not compressible. 'force' means use compression under all + circumstances even if the clients hint that the data is not compressible. + . + NOTE: The recommended approach is to adjust this configuration option on + the charm responsible for creating the specific pool you are interested + in tuning. Changing the configuration option on the ceph-osd charm will + affect ALL pools on the OSDs managed by the named application of the + ceph-osd charm in the Juju model. + bluestore-compression-required-ratio: + type: float + default: + description: | + The ratio of the size of the data chunk after compression relative to the + original size must be at least this small in order to store the + compressed version. The per-pool property `compression-required-ratio` + overrides this setting. + . + NOTE: The recommended approach is to adjust this configuration option on + the charm responsible for creating the specific pool you are interested + in tuning. Changing the configuration option on the ceph-osd charm will + affect ALL pools on the OSDs managed by the named application of the + ceph-osd charm in the Juju model. + bluestore-compression-min-blob-size: + type: int + default: + description: | + Chunks smaller than this are never compressed. The per-pool property + `compression_min_blob_size` overrides this setting. + . + NOTE: The recommended approach is to adjust this configuration option on + the charm responsible for creating the specific pool you are interested + in tuning. Changing the configuration option on the ceph-osd charm will + affect ALL pools on the OSDs managed by the named application of the + ceph-osd charm in the Juju model. + bluestore-compression-min-blob-size-hdd: + type: int + default: + description: | + Default value of bluestore compression min blob size for rotational + media. The per-pool property `compression-min-blob-size-hdd` overrides + this setting. + . + NOTE: The recommended approach is to adjust this configuration option on + the charm responsible for creating the specific pool you are interested + in tuning. Changing the configuration option on the ceph-osd charm will + affect ALL pools on the OSDs managed by the named application of the + ceph-osd charm in the Juju model. + bluestore-compression-min-blob-size-ssd: + type: int + default: + description: | + Default value of bluestore compression min blob size for solid state + media. The per-pool property `compression-min-blob-size-ssd` overrides + this setting. + . + NOTE: The recommended approach is to adjust this configuration option on + the charm responsible for creating the specific pool you are interested + in tuning. Changing the configuration option on the ceph-osd charm will + affect ALL pools on the OSDs managed by the named application of the + ceph-osd charm in the Juju model. + bluestore-compression-max-blob-size: + type: int + default: + description: | + Chunks larger than this are broken into smaller blobs sizing bluestore + compression max blob size before being compressed. The per-pool property + `compression_max_blob_size` overrides this setting. + . + NOTE: The recommended approach is to adjust this configuration option on + the charm responsible for creating the specific pool you are interested + in tuning. Changing the configuration option on the ceph-osd charm will + affect ALL pools on the OSDs managed by the named application of the + ceph-osd charm in the Juju model. + bluestore-compression-max-blob-size-hdd: + type: int + default: + description: | + Default value of bluestore compression max blob size for rotational + media. The per-pool property `compression-max-blob-size-hdd` overrides + this setting. + . + NOTE: The recommended approach is to adjust this configuration option on + the charm responsible for creating the specific pool you are interested + in tuning. Changing the configuration option on the ceph-osd charm will + affect ALL pools on the OSDs managed by the named application of the + ceph-osd charm in the Juju model. + bluestore-compression-max-blob-size-ssd: + type: int + default: + description: | + Default value of bluestore compression max blob size for solid state + media. The per-pool property `compression-max-blob-size-ssd` overrides + this setting. + . + NOTE: The recommended approach is to adjust this configuration option on + the charm responsible for creating the specific pool you are interested + in tuning. Changing the configuration option on the ceph-osd charm will + affect ALL pools on the OSDs managed by the named application of the + ceph-osd charm in the Juju model. diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index d562872d..b002cd14 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -70,6 +70,7 @@ get_upstream_version, ) from charmhelpers.core.sysctl import create as create_sysctl +import charmhelpers.contrib.openstack.context as ch_context from charmhelpers.contrib.openstack.context import ( AppArmorContext, ) @@ -439,6 +440,8 @@ def get_ceph_context(upgrading=False): sections = ['global', 'osd'] cephcontext.update( ch_ceph.CephOSDConfContext(permitted_sections=sections)()) + cephcontext.update( + ch_context.CephBlueStoreCompressionContext()()) return cephcontext @@ -854,6 +857,12 @@ def assess_status(): except ValueError as ex: status_set('blocked', str(ex)) + try: + bluestore_compression = ch_context.CephBlueStoreCompressionContext() + bluestore_compression.validate() + except ValueError as e: + status_set('blocked', 'Invalid configuration: {}'.format(str(e))) + @hooks.hook('update-status') @harden() diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 42abccf7..b5adbefc 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -29,6 +29,8 @@ import six +import charmhelpers.contrib.storage.linux.ceph as ch_ceph + from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( _config_ini as config_ini ) @@ -3175,3 +3177,78 @@ def __call__(self): :rtype: Dict[str,int] """ return self._map + + +class CephBlueStoreCompressionContext(OSContextGenerator): + """Ceph BlueStore compression options.""" + + # Tuple with Tuples that map configuration option name to CephBrokerRq op + # property name + options = ( + ('bluestore-compression-algorithm', + 'compression-algorithm'), + ('bluestore-compression-mode', + 'compression-mode'), + ('bluestore-compression-required-ratio', + 'compression-required-ratio'), + ('bluestore-compression-min-blob-size', + 'compression-min-blob-size'), + ('bluestore-compression-min-blob-size-hdd', + 'compression-min-blob-size-hdd'), + ('bluestore-compression-min-blob-size-ssd', + 'compression-min-blob-size-ssd'), + ('bluestore-compression-max-blob-size', + 'compression-max-blob-size'), + ('bluestore-compression-max-blob-size-hdd', + 'compression-max-blob-size-hdd'), + ('bluestore-compression-max-blob-size-ssd', + 'compression-max-blob-size-ssd'), + ) + + def __init__(self): + """Initialize context by loading values from charm config. + + We keep two maps, one suitable for use with CephBrokerRq's and one + suitable for template generation. + """ + charm_config = config() + + # CephBrokerRq op map + self.op = {} + # Context exposed for template generation + self.ctxt = {} + for config_key, op_key in self.options: + value = charm_config.get(config_key) + self.ctxt.update({config_key.replace('-', '_'): value}) + self.op.update({op_key: value}) + + def __call__(self): + """Get context. + + :returns: Context + :rtype: Dict[str,any] + """ + return self.ctxt + + def get_op(self): + """Get values for use in CephBrokerRq op. + + :returns: Context values with CephBrokerRq op property name as key. + :rtype: Dict[str,any] + """ + return self.op + + def validate(self): + """Validate options. + + :raises: AssertionError + """ + # We slip in a dummy name on class instantiation to allow validation of + # the other options. It will not affect further use. + # + # NOTE: once we retire Python 3.5 we can fold this into a in-line + # dictionary comprehension in the call to the initializer. + dummy_op = {'name': 'dummy-name'} + dummy_op.update(self.op) + pool = ch_ceph.BasePool('dummy-service', op=dummy_op) + pool.validate() diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/templates/section-ceph-bluestore-compression b/ceph-osd/hooks/charmhelpers/contrib/openstack/templates/section-ceph-bluestore-compression new file mode 100644 index 00000000..a6430100 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/templates/section-ceph-bluestore-compression @@ -0,0 +1,28 @@ +{# section header omitted as options can belong to multiple sections #} +{% if bluestore_compression_algorithm -%} +bluestore compression algorithm = {{ bluestore_compression_algorithm }} +{% endif -%} +{% if bluestore_compression_mode -%} +bluestore compression mode = {{ bluestore_compression_mode }} +{% endif -%} +{% if bluestore_compression_required_ratio -%} +bluestore compression required ratio = {{ bluestore_compression_required_ratio }} +{% endif -%} +{% if bluestore_compression_min_blob_size -%} +bluestore compression min blob size = {{ bluestore_compression_min_blob_size }} +{% endif -%} +{% if bluestore_compression_min_blob_size_hdd -%} +bluestore compression min blob size hdd = {{ bluestore_compression_min_blob_size_hdd }} +{% endif -%} +{% if bluestore_compression_min_blob_size_ssd -%} +bluestore compression min blob size ssd = {{ bluestore_compression_min_blob_size_ssd }} +{% endif -%} +{% if bluestore_compression_max_blob_size -%} +bluestore compression max blob size = {{ bluestore_compression_max_blob_size }} +{% endif -%} +{% if bluestore_compression_max_blob_size_hdd -%} +bluestore compression max blob size hdd = {{ bluestore_compression_max_blob_size_hdd }} +{% endif -%} +{% if bluestore_compression_max_blob_size_ssd -%} +bluestore compression max blob size ssd = {{ bluestore_compression_max_blob_size_ssd }} +{% endif -%} diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 814d5c72..526b95ad 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -39,6 +39,7 @@ check_output, CalledProcessError, ) +from charmhelpers import deprecate from charmhelpers.core.hookenv import ( config, service_name, @@ -178,94 +179,293 @@ def send_osd_settings(): def validator(value, valid_type, valid_range=None): - """ - Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + """Helper function for type validation. + + Used to validate these: + https://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression + Example input: validator(value=1, valid_type=int, valid_range=[0, 2]) + This says I'm testing value=1. It must be an int inclusive in [0,2] - :param value: The value to validate + :param value: The value to validate. + :type value: any :param valid_type: The type that value should be. + :type valid_type: any :param valid_range: A range of values that value can assume. - :return: + :type valid_range: Optional[Union[List,Tuple]] + :raises: AssertionError, ValueError """ - assert isinstance(value, valid_type), "{} is not a {}".format( - value, - valid_type) + assert isinstance(value, valid_type), ( + "{} is not a {}".format(value, valid_type)) if valid_range is not None: - assert isinstance(valid_range, list), \ - "valid_range must be a list, was given {}".format(valid_range) + assert isinstance( + valid_range, list) or isinstance(valid_range, tuple), ( + "valid_range must be of type List or Tuple, " + "was given {} of type {}" + .format(valid_range, type(valid_range))) # If we're dealing with strings if isinstance(value, six.string_types): - assert value in valid_range, \ - "{} is not in the list {}".format(value, valid_range) + assert value in valid_range, ( + "{} is not in the list {}".format(value, valid_range)) # Integer, float should have a min and max else: if len(valid_range) != 2: raise ValueError( - "Invalid valid_range list of {} for {}. " + "Invalid valid_range list of {} for {}. " "List must be [min,max]".format(valid_range, value)) - assert value >= valid_range[0], \ - "{} is less than minimum allowed value of {}".format( - value, valid_range[0]) - assert value <= valid_range[1], \ - "{} is greater than maximum allowed value of {}".format( - value, valid_range[1]) + assert value >= valid_range[0], ( + "{} is less than minimum allowed value of {}" + .format(value, valid_range[0])) + assert value <= valid_range[1], ( + "{} is greater than maximum allowed value of {}" + .format(value, valid_range[1])) class PoolCreationError(Exception): - """ - A custom error to inform the caller that a pool creation failed. Provides an error message + """A custom exception to inform the caller that a pool creation failed. + + Provides an error message """ def __init__(self, message): super(PoolCreationError, self).__init__(message) -class Pool(object): - """ - An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. - Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). +class BasePool(object): + """An object oriented approach to Ceph pool creation. + + This base class is inherited by ReplicatedPool and ErasurePool. Do not call + create() on this base class as it will raise an exception. + + Instantiate a child class and call create(). """ + # Dictionary that maps pool operation properties to Tuples with valid type + # and valid range + op_validation_map = { + 'compression-algorithm': (str, ('lz4', 'snappy', 'zlib', 'zstd')), + 'compression-mode': (str, ('none', 'passive', 'aggressive', 'force')), + 'compression-required-ratio': (float, None), + 'compression-min-blob-size': (int, None), + 'compression-min-blob-size-hdd': (int, None), + 'compression-min-blob-size-ssd': (int, None), + 'compression-max-blob-size': (int, None), + 'compression-max-blob-size-hdd': (int, None), + 'compression-max-blob-size-ssd': (int, None), + } - def __init__(self, service, name): + def __init__(self, service, name=None, percent_data=None, app_name=None, + op=None): + """Initialize BasePool object. + + Pool information is either initialized from individual keyword + arguments or from a individual CephBrokerRq operation Dict. + + :param service: The Ceph user name to run commands under. + :type service: str + :param name: Name of pool to operate on. + :type name: str + :param percent_data: The expected pool size in relation to all + available resources in the Ceph cluster. Will be + used to set the ``target_size_ratio`` pool + property. (default: 10.0) + :type percent_data: Optional[float] + :param app_name: Ceph application name, usually one of: + ('cephfs', 'rbd', 'rgw') (default: 'unknown') + :type app_name: Optional[str] + :param op: Broker request Op to compile pool data from. + :type op: Optional[Dict[str,any]] + :raises: KeyError + """ + # NOTE: Do not perform initialization steps that require live data from + # a running cluster here. The *Pool classes may be used for validation. self.service = service - self.name = name + self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + self.op = op or {} + + if op: + # When initializing from op the `name` attribute is required and we + # will fail with KeyError if it is not provided. + self.name = op['name'] + self.percent_data = op.get('weight') + self.app_name = op.get('app-name') + else: + self.name = name + self.percent_data = percent_data + self.app_name = app_name + + # Set defaults for these if they are not provided + self.percent_data = self.percent_data or 10.0 + self.app_name = self.app_name or 'unknown' + + def validate(self): + """Check that value of supplied operation parameters are valid. + + :raises: ValueError + """ + for op_key, op_value in self.op.items(): + if op_key in self.op_validation_map and op_value is not None: + valid_type, valid_range = self.op_validation_map[op_key] + try: + validator(op_value, valid_type, valid_range) + except (AssertionError, ValueError) as e: + # Normalize on ValueError, also add information about which + # variable we had an issue with. + raise ValueError("'{}': {}".format(op_key, str(e))) + + def _create(self): + """Perform the pool creation, method MUST be overridden by child class. + """ + raise NotImplementedError + + def _post_create(self): + """Perform common post pool creation tasks. + + Note that pool properties subject to change during the lifetime of a + pool / deployment should go into the ``update`` method. + + Do not add calls for a specific pool type here, those should go into + one of the pool specific classes. + """ + if self.nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool( + client=self.service, + pool=self.name, + settings={ + 'target_size_ratio': str( + self.percent_data / 100.0), + }) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}' + .format(self.name), + level=WARNING) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}' + .format(self.name, e), + level=WARNING) - # Create the pool if it doesn't exist already - # To be implemented by subclasses def create(self): - pass + """Create pool and perform any post pool creation tasks. - def add_cache_tier(self, cache_pool, mode): + To allow for sharing of common code among pool specific classes the + processing has been broken out into the private methods ``_create`` + and ``_post_create``. + + Do not add any pool type specific handling here, that should go into + one of the pool specific classes. """ - Adds a new cache tier to an existing pool. - :param cache_pool: six.string_types. The cache tier pool name to add. - :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] - :return: None + if not pool_exists(self.service, self.name): + self.validate() + self._create() + self._post_create() + self.update() + + def set_quota(self): + """Set a quota if requested. + + :raises: CalledProcessError + """ + max_bytes = self.op.get('max-bytes') + max_objects = self.op.get('max-objects') + if max_bytes or max_objects: + set_pool_quota(service=self.service, pool_name=self.name, + max_bytes=max_bytes, max_objects=max_objects) + + def set_compression(self): + """Set compression properties if requested. + + :raises: CalledProcessError + """ + compression_properties = { + key.replace('-', '_'): value + for key, value in self.op.items() + if key in ( + 'compression-algorithm', + 'compression-mode', + 'compression-required-ratio', + 'compression-min-blob-size', + 'compression-min-blob-size-hdd', + 'compression-min-blob-size-ssd', + 'compression-max-blob-size', + 'compression-max-blob-size-hdd', + 'compression-max-blob-size-ssd') and value} + if compression_properties: + update_pool(self.service, self.name, compression_properties) + + def update(self): + """Update properties for an already existing pool. + + Do not add calls for a specific pool type here, those should go into + one of the pool specific classes. + """ + self.validate() + self.set_quota() + self.set_compression() + + def add_cache_tier(self, cache_pool, mode): + """Adds a new cache tier to an existing pool. + + :param cache_pool: The cache tier pool name to add. + :type cache_pool: str + :param mode: The caching mode to use for this pool. + valid range = ["readonly", "writeback"] + :type mode: str """ # Check the input types and values validator(value=cache_pool, valid_type=six.string_types) - validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) - - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) + validator( + value=mode, valid_type=six.string_types, + valid_range=["readonly", "writeback"]) + + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'add', self.name, cache_pool, + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'cache-mode', cache_pool, mode, + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'set-overlay', self.name, cache_pool, + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom', + ]) def remove_cache_tier(self, cache_pool): - """ - Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. - :param cache_pool: six.string_types. The cache tier pool name to remove. - :return: None + """Removes a cache tier from Ceph. + + Flushes all dirty objects from writeback pools and waits for that to + complete. + + :param cache_pool: The cache tier pool name to remove. + :type cache_pool: str """ # read-only is easy, writeback is much harder mode = get_cache_mode(self.service, cache_pool) if mode == 'readonly': - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'cache-mode', cache_pool, 'none' + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'remove', self.name, cache_pool, + ]) elif mode == 'writeback': pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', @@ -276,9 +476,15 @@ def remove_cache_tier(self, cache_pool): check_call(pool_forward_cmd) # Flush the cache and wait for it to return - check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + check_call([ + 'rados', '--id', self.service, + '-p', cache_pool, 'cache-flush-evict-all']) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'remove-overlay', self.name]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'remove', self.name, cache_pool]) def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, device_class=None): @@ -305,19 +511,23 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, selected for the specific rule, rather it is left to the user to tune in the form of 'expected-osd-count' config option. - :param pool_size: int. pool_size is either the number of replicas for + :param pool_size: pool_size is either the number of replicas for replicated pools or the K+M sum for erasure coded pools - :param percent_data: float. the percentage of data that is expected to + :type pool_size: int + :param percent_data: the percentage of data that is expected to be contained in the pool for the specific OSD set. Default value is to assume 10% of the data is for this pool, which is a relatively low % of the data but allows for the pg_num to be increased. NOTE: the default is primarily to handle the scenario where related charms requiring pools has not been upgraded to include an update to indicate their relative usage of the pools. - :param device_class: str. class of storage to use for basis of pgs + :type percent_data: float + :param device_class: class of storage to use for basis of pgs calculation; ceph supports nvme, ssd and hdd by default based on presence of devices of each type in the deployment. - :return: int. The number of pgs to use. + :type device_class: str + :returns: The number of pgs to use. + :rtype: int """ # Note: This calculation follows the approach that is provided @@ -357,7 +567,8 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, return LEGACY_PG_COUNT percent_data /= 100.0 - target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET + target_pgs_per_osd = config( + 'pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size # NOTE: ensure a sane minimum number of PGS otherwise we don't get any @@ -380,147 +591,174 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, return int(nearest) -class ReplicatedPool(Pool): - def __init__(self, service, name, pg_num=None, replicas=2, - percent_data=10.0, app_name=None): - super(ReplicatedPool, self).__init__(service=service, name=name) - self.replicas = replicas - self.percent_data = percent_data - if pg_num: +class Pool(BasePool): + """Compability shim for any descendents external to this library.""" + + @deprecate( + 'The ``Pool`` baseclass has been replaced by ``BasePool`` class.') + def __init__(self, service, name): + super(Pool, self).__init__(service, name=name) + + def create(self): + pass + + +class ReplicatedPool(BasePool): + def __init__(self, service, name=None, pg_num=None, replicas=None, + percent_data=None, app_name=None, op=None): + """Initialize ReplicatedPool object. + + Pool information is either initialized from individual keyword + arguments or from a individual CephBrokerRq operation Dict. + + Please refer to the docstring of the ``BasePool`` class for + documentation of the common parameters. + + :param pg_num: Express wish for number of Placement Groups (this value + is subject to validation against a running cluster prior + to use to avoid creating a pool with too many PGs) + :type pg_num: int + :param replicas: Number of copies there should be of each object added + to this replicated pool. + :type replicas: int + :raises: KeyError + """ + # NOTE: Do not perform initialization steps that require live data from + # a running cluster here. The *Pool classes may be used for validation. + + # The common parameters are handled in our parents initializer + super(ReplicatedPool, self).__init__( + service=service, name=name, percent_data=percent_data, + app_name=app_name, op=op) + + if op: + # When initializing from op `replicas` is a required attribute, and + # we will fail with KeyError if it is not provided. + self.replicas = op['replicas'] + self.pg_num = op.get('pg_num') + else: + self.replicas = replicas or 2 + self.pg_num = pg_num + + def _create(self): + # Do extra validation on pg_num with data from live cluster + if self.pg_num: # Since the number of placement groups were specified, ensure # that there aren't too many created. max_pgs = self.get_pgs(self.replicas, 100.0) - self.pg_num = min(pg_num, max_pgs) + self.pg_num = min(self.pg_num, max_pgs) else: - self.pg_num = self.get_pgs(self.replicas, percent_data) - if app_name: - self.app_name = app_name + self.pg_num = self.get_pgs(self.replicas, self.percent_data) + + # Create it + if self.nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, self.pg_num) + ), + self.name, str(self.pg_num) + ] else: - self.app_name = 'unknown' + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num) + ] + check_call(cmd) - def create(self): - if not pool_exists(self.service, self.name): - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 - # Create it - if nautilus_or_later: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - '--pg-num-min={}'.format( - min(AUTOSCALER_DEFAULT_PGS, self.pg_num) - ), - self.name, str(self.pg_num) - ] - else: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num) - ] + def _post_create(self): + # Set the pool replica size + update_pool(client=self.service, + pool=self.name, + settings={'size': str(self.replicas)}) + # Perform other common post pool creation tasks + super(ReplicatedPool, self)._post_create() - try: - check_call(cmd) - # Set the pool replica size - update_pool(client=self.service, - pool=self.name, - settings={'size': str(self.replicas)}) - if nautilus_or_later: - # Ensure we set the expected pool ratio - update_pool(client=self.service, - pool=self.name, - settings={'target_size_ratio': str(self.percent_data / 100.0)}) - try: - set_app_name_for_pool(client=self.service, - pool=self.name, - name=self.app_name) - except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name), level=WARNING) - if 'pg_autoscaler' in enabled_manager_modules(): - try: - enable_pg_autoscale(self.service, self.name) - except CalledProcessError as e: - log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e), level=WARNING) - except CalledProcessError: - raise - - -# Default jerasure erasure coded pool -class ErasurePool(Pool): - def __init__(self, service, name, erasure_code_profile="default", - percent_data=10.0, app_name=None): - super(ErasurePool, self).__init__(service=service, name=name) - self.erasure_code_profile = erasure_code_profile - self.percent_data = percent_data - if app_name: - self.app_name = app_name - else: - self.app_name = 'unknown' - def create(self): - if not pool_exists(self.service, self.name): - # Try to find the erasure profile information in order to properly - # size the number of placement groups. The size of an erasure - # coded placement group is calculated as k+m. - erasure_profile = get_erasure_profile(self.service, - self.erasure_code_profile) - - # Check for errors - if erasure_profile is None: - msg = ("Failed to discover erasure profile named " - "{}".format(self.erasure_code_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - if 'k' not in erasure_profile or 'm' not in erasure_profile: - # Error - msg = ("Unable to find k (data chunks) or m (coding chunks) " - "in erasure profile {}".format(erasure_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - - k = int(erasure_profile['k']) - m = int(erasure_profile['m']) - pgs = self.get_pgs(k + m, self.percent_data) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 - # Create it - if nautilus_or_later: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - '--pg-num-min={}'.format( - min(AUTOSCALER_DEFAULT_PGS, pgs) - ), - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile - ] - else: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile - ] +class ErasurePool(BasePool): + """Default jerasure erasure coded pool.""" - try: - check_call(cmd) - try: - set_app_name_for_pool(client=self.service, - pool=self.name, - name=self.app_name) - except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name), level=WARNING) - if nautilus_or_later: - # Ensure we set the expected pool ratio - update_pool(client=self.service, - pool=self.name, - settings={'target_size_ratio': str(self.percent_data / 100.0)}) - if 'pg_autoscaler' in enabled_manager_modules(): - try: - enable_pg_autoscale(self.service, self.name) - except CalledProcessError as e: - log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e), level=WARNING) - except CalledProcessError: - raise - - """Get an existing erasure code profile if it already exists. - Returns json formatted output""" + def __init__(self, service, name=None, erasure_code_profile=None, + percent_data=None, app_name=None, op=None, + allow_ec_overwrites=False): + """Initialize ReplicatedPool object. + + Pool information is either initialized from individual keyword + arguments or from a individual CephBrokerRq operation Dict. + + Please refer to the docstring of the ``BasePool`` class for + documentation of the common parameters. + + :param erasure_code_profile: EC Profile to use (default: 'default') + :type erasure_code_profile: Optional[str] + """ + # NOTE: Do not perform initialization steps that require live data from + # a running cluster here. The *Pool classes may be used for validation. + + # The common parameters are handled in our parents initializer + super(ErasurePool, self).__init__( + service=service, name=name, percent_data=percent_data, + app_name=app_name, op=op) + + if op: + # Note that the different default when initializing from op stems + # from different handling of this in the `charms.ceph` library. + self.erasure_code_profile = op.get('erasure-profile', + 'default-canonical') + self.allow_ec_overwrites = op.get('allow-ec-overwrites') + else: + # We keep the class default when initialized from keyword arguments + # to not break the API for any other consumers. + self.erasure_code_profile = erasure_code_profile or 'default' + self.allow_ec_overwrites = allow_ec_overwrites + + def _create(self): + # Try to find the erasure profile information in order to properly + # size the number of placement groups. The size of an erasure + # coded placement group is calculated as k+m. + erasure_profile = get_erasure_profile(self.service, + self.erasure_code_profile) + + # Check for errors + if erasure_profile is None: + msg = ("Failed to discover erasure profile named " + "{}".format(self.erasure_code_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + if 'k' not in erasure_profile or 'm' not in erasure_profile: + # Error + msg = ("Unable to find k (data chunks) or m (coding chunks) " + "in erasure profile {}".format(erasure_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + + k = int(erasure_profile['k']) + m = int(erasure_profile['m']) + pgs = self.get_pgs(k + m, self.percent_data) + self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + # Create it + if self.nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, pgs) + ), + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + check_call(cmd) + + def _post_create(self): + super(ErasurePool, self)._post_create() + if self.allow_ec_overwrites: + update_pool(self.service, self.name, + {'allow_ec_overwrites': 'true'}) def enabled_manager_modules(): @@ -541,22 +779,28 @@ def enabled_manager_modules(): def enable_pg_autoscale(service, pool_name): - """ - Enable Ceph's PG autoscaler for the specified pool. + """Enable Ceph's PG autoscaler for the specified pool. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types. The name of the pool to enable sutoscaling on - :raise: CalledProcessError if the command fails + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: The name of the pool to enable sutoscaling on + :type pool_name: str + :raises: CalledProcessError if the command fails """ - check_call(['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) + check_call([ + 'ceph', '--id', service, + 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) def get_mon_map(service): - """ - Returns the current monitor map. - :param service: six.string_types. The Ceph user name to run the command under - :return: json string. :raise: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails + """Return the current monitor map. + + :param service: The Ceph user name to run the command under + :type service: str + :returns: Dictionary with monitor map data + :rtype: Dict[str,any] + :raises: ValueError if the monmap fails to parse, CalledProcessError if our + ceph command fails. """ try: mon_status = check_output(['ceph', '--id', service, @@ -576,17 +820,16 @@ def get_mon_map(service): def hash_monitor_names(service): - """ + """Get a sorted list of monitor hashes in ascending order. + Uses the get_mon_map() function to get information about the monitor - cluster. - Hash the name of each monitor. Return a sorted list of monitor hashes - in an ascending order. - :param service: six.string_types. The Ceph user name to run the command under - :rtype : dict. json dict of monitor name, ip address and rank - example: { - 'name': 'ip-172-31-13-165', - 'rank': 0, - 'addr': '172.31.13.165:6789/0'} + cluster. Hash the name of each monitor. + + :param service: The Ceph user name to run the command under. + :type service: str + :returns: a sorted list of monitor hashes in an ascending order. + :rtype : List[str] + :raises: CalledProcessError, ValueError """ try: hash_list = [] @@ -603,46 +846,56 @@ def hash_monitor_names(service): def monitor_key_delete(service, key): - """ - Delete a key and value pair from the monitor cluster - :param service: six.string_types. The Ceph user name to run the command under + """Delete a key and value pair from the monitor cluster. + Deletes a key value pair on the monitor cluster. - :param key: six.string_types. The key to delete. + + :param service: The Ceph user name to run the command under + :type service: str + :param key: The key to delete. + :type key: str + :raises: CalledProcessError """ try: check_output( ['ceph', '--id', service, 'config-key', 'del', str(key)]) except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format( - e.output)) + log("Monitor config-key put failed with message: {}" + .format(e.output)) raise def monitor_key_set(service, key, value): - """ - Sets a key value pair on the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to set. - :param value: The value to set. This will be converted to a string - before setting + """Set a key value pair on the monitor cluster. + + :param service: The Ceph user name to run the command under. + :type service str + :param key: The key to set. + :type key: str + :param value: The value to set. This will be coerced into a string. + :type value: str + :raises: CalledProcessError """ try: check_output( ['ceph', '--id', service, 'config-key', 'put', str(key), str(value)]) except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format( - e.output)) + log("Monitor config-key put failed with message: {}" + .format(e.output)) raise def monitor_key_get(service, key): - """ - Gets the value of an existing key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to search for. + """Get the value of an existing key in the monitor cluster. + + :param service: The Ceph user name to run the command under + :type service: str + :param key: The key to search for. + :type key: str :return: Returns the value of that key or None if not found. + :rtype: Optional[str] """ try: output = check_output( @@ -650,19 +903,21 @@ def monitor_key_get(service, key): 'config-key', 'get', str(key)]).decode('UTF-8') return output except CalledProcessError as e: - log("Monitor config-key get failed with message: {}".format( - e.output)) + log("Monitor config-key get failed with message: {}" + .format(e.output)) return None def monitor_key_exists(service, key): - """ - Searches for the existence of a key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to search for - :return: Returns True if the key exists, False if not and raises an - exception if an unknown error occurs. :raise: CalledProcessError if - an unknown error occurs + """Search for existence of key in the monitor cluster. + + :param service: The Ceph user name to run the command under. + :type service: str + :param key: The key to search for. + :type key: str + :return: Returns True if the key exists, False if not. + :rtype: bool + :raises: CalledProcessError if an unknown error occurs. """ try: check_call( @@ -675,16 +930,20 @@ def monitor_key_exists(service, key): if e.returncode == errno.ENOENT: return False else: - log("Unknown error from ceph config-get exists: {} {}".format( - e.returncode, e.output)) + log("Unknown error from ceph config-get exists: {} {}" + .format(e.returncode, e.output)) raise def get_erasure_profile(service, name): - """ - :param service: six.string_types. The Ceph user name to run the command under - :param name: - :return: + """Get an existing erasure code profile if it exists. + + :param service: The Ceph user name to run the command under. + :type service: str + :param name: Name of profile. + :type name: str + :returns: Dictionary with profile data. + :rtype: Optional[Dict[str]] """ try: out = check_output(['ceph', '--id', service, @@ -698,54 +957,61 @@ def get_erasure_profile(service, name): def pool_set(service, pool_name, key, value): + """Sets a value for a RADOS pool in ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to set property on. + :type pool_name: str + :param key: Property key. + :type key: str + :param value: Value, will be coerced into str and shifted to lowercase. + :type value: str + :raises: CalledProcessError """ - Sets a value for a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param key: six.string_types - :param value: - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, - str(value).lower()] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'set', pool_name, key, str(value).lower()] + check_call(cmd) def snapshot_pool(service, pool_name, snapshot_name): + """Snapshots a RADOS pool in Ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to snapshot. + :type pool_name: str + :param snapshot_name: Name of snapshot to create. + :type snapshot_name: str + :raises: CalledProcessError """ - Snapshots a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'mksnap', pool_name, snapshot_name] + check_call(cmd) def remove_pool_snapshot(service, pool_name, snapshot_name): + """Remove a snapshot from a RADOS pool in Ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to remove snapshot from. + :type pool_name: str + :param snapshot_name: Name of snapshot to remove. + :type snapshot_name: str + :raises: CalledProcessError """ - Remove a snapshot from a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] + check_call(cmd) def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): - """ + """Set byte quota on a RADOS pool in Ceph. + :param service: The Ceph user name to run the command under :type service: str :param pool_name: Name of pool @@ -756,7 +1022,9 @@ def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): :type max_objects: int :raises: subprocess.CalledProcessError """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name] + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'set-quota', pool_name] if max_bytes: cmd = cmd + ['max_bytes', str(max_bytes)] if max_objects: @@ -765,119 +1033,216 @@ def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): def remove_pool_quota(service, pool_name): + """Remove byte quota on a RADOS pool in Ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to remove quota from. + :type pool_name: str + :raises: CalledProcessError """ - Set a byte quota on a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] + check_call(cmd) def remove_erasure_profile(service, profile_name): + """Remove erasure code profile. + + :param service: The Ceph user name to run the command under + :type service: str + :param profile_name: Name of profile to remove. + :type profile_name: str + :raises: CalledProcessError """ - Create a new erasure code profile if one does not already exist for it. Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command under - :param profile_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', - profile_name] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'erasure-code-profile', 'rm', profile_name] + check_call(cmd) -def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', - failure_domain='host', +def create_erasure_profile(service, profile_name, + erasure_plugin_name='jerasure', + failure_domain=None, data_chunks=2, coding_chunks=1, locality=None, durability_estimator=None, - device_class=None): - """ - Create a new erasure code profile if one does not already exist for it. Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command under - :param profile_name: six.string_types - :param erasure_plugin_name: six.string_types - :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', - 'room', 'root', 'row']) - :param data_chunks: int - :param coding_chunks: int - :param locality: int - :param durability_estimator: int - :param device_class: six.string_types - :return: None. Can raise CalledProcessError - """ - # Ensure this failure_domain is allowed by Ceph - validator(failure_domain, six.string_types, - ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) + helper_chunks=None, + scalar_mds=None, + crush_locality=None, + device_class=None, + erasure_plugin_technique=None): + """Create a new erasure code profile if one does not already exist for it. + + Updates the profile if it exists. Please refer to [0] for more details. - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, - 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks) - ] - if locality is not None and durability_estimator is not None: - raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + 0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + + :param service: The Ceph user name to run the command under. + :type service: str + :param profile_name: Name of profile. + :type profile_name: str + :param erasure_plugin_name: Erasure code plugin. + :type erasure_plugin_name: str + :param failure_domain: Failure domain, one of: + ('chassis', 'datacenter', 'host', 'osd', 'pdu', + 'pod', 'rack', 'region', 'room', 'root', 'row'). + :type failure_domain: str + :param data_chunks: Number of data chunks. + :type data_chunks: int + :param coding_chunks: Number of coding chunks. + :type coding_chunks: int + :param locality: Locality. + :type locality: int + :param durability_estimator: Durability estimator. + :type durability_estimator: int + :param helper_chunks: int + :type helper_chunks: int + :param device_class: Restrict placement to devices of specific class. + :type device_class: str + :param scalar_mds: one of ['isa', 'jerasure', 'shec'] + :type scalar_mds: str + :param crush_locality: LRC locality faulure domain, one of: + ('chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', + 'rack', 'region', 'room', 'root', 'row') or unset. + :type crush_locaity: str + :param erasure_plugin_technique: Coding technique for EC plugin + :type erasure_plugin_technique: str + :return: None. Can raise CalledProcessError, ValueError or AssertionError + """ + plugin_techniques = { + 'jerasure': [ + 'reed_sol_van', + 'reed_sol_r6_op', + 'cauchy_orig', + 'cauchy_good', + 'liberation', + 'blaum_roth', + 'liber8tion' + ], + 'lrc': [], + 'isa': [ + 'reed_sol_van', + 'cauchy', + ], + 'shec': [ + 'single', + 'multiple' + ], + 'clay': [], + } + failure_domains = [ + 'chassis', 'datacenter', + 'host', 'osd', + 'pdu', 'pod', + 'rack', 'region', + 'room', 'root', + 'row', + ] + device_classes = [ + 'ssd', + 'hdd', + 'nvme' + ] + + validator(erasure_plugin_name, six.string_types, + list(plugin_techniques.keys())) + + cmd = [ + 'ceph', '--id', service, + 'osd', 'erasure-code-profile', 'set', profile_name, + 'plugin={}'.format(erasure_plugin_name), + 'k={}'.format(str(data_chunks)), + 'm={}'.format(str(coding_chunks)), + ] + + if erasure_plugin_technique: + validator(erasure_plugin_technique, six.string_types, + plugin_techniques[erasure_plugin_name]) + cmd.append('technique={}'.format(erasure_plugin_technique)) luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 - # failure_domain changed in luminous - if luminous_or_later: - cmd.append('crush-failure-domain=' + failure_domain) - else: - cmd.append('ruleset-failure-domain=' + failure_domain) + + # Set failure domain from options if not provided in args + if not failure_domain and config('customize-failure-domain'): + # Defaults to 'host' so just need to deal with + # setting 'rack' if feature is enabled + failure_domain = 'rack' + + if failure_domain: + validator(failure_domain, six.string_types, failure_domains) + # failure_domain changed in luminous + if luminous_or_later: + cmd.append('crush-failure-domain={}'.format(failure_domain)) + else: + cmd.append('ruleset-failure-domain={}'.format(failure_domain)) # device class new in luminous if luminous_or_later and device_class: + validator(device_class, six.string_types, device_classes) cmd.append('crush-device-class={}'.format(device_class)) else: log('Skipping device class configuration (ceph < 12.0.0)', level=DEBUG) # Add plugin specific information - if locality is not None: - # For local erasure codes - cmd.append('l=' + str(locality)) - if durability_estimator is not None: - # For Shec erasure codes - cmd.append('c=' + str(durability_estimator)) + if erasure_plugin_name == 'lrc': + # LRC mandatory configuration + if locality: + cmd.append('l={}'.format(str(locality))) + else: + raise ValueError("locality must be provided for lrc plugin") + # LRC optional configuration + if crush_locality: + validator(crush_locality, six.string_types, failure_domains) + cmd.append('crush-locality={}'.format(crush_locality)) + + if erasure_plugin_name == 'shec': + # SHEC optional configuration + if durability_estimator: + cmd.append('c={}'.format((durability_estimator))) + + if erasure_plugin_name == 'clay': + # CLAY optional configuration + if helper_chunks: + cmd.append('d={}'.format(str(helper_chunks))) + if scalar_mds: + cmd.append('scalar-mds={}'.format(scalar_mds)) if erasure_profile_exists(service, profile_name): cmd.append('--force') - try: - check_call(cmd) - except CalledProcessError: - raise + check_call(cmd) def rename_pool(service, old_name, new_name): - """ - Rename a Ceph pool from old_name to new_name - :param service: six.string_types. The Ceph user name to run the command under - :param old_name: six.string_types - :param new_name: six.string_types - :return: None + """Rename a Ceph pool from old_name to new_name. + + :param service: The Ceph user name to run the command under. + :type service: str + :param old_name: Name of pool subject to rename. + :type old_name: str + :param new_name: Name to rename pool to. + :type new_name: str """ validator(value=old_name, valid_type=six.string_types) validator(value=new_name, valid_type=six.string_types) - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'rename', old_name, new_name] check_call(cmd) def erasure_profile_exists(service, name): - """ - Check to see if an Erasure code profile already exists. - :param service: six.string_types. The Ceph user name to run the command under - :param name: six.string_types - :return: int or None + """Check to see if an Erasure code profile already exists. + + :param service: The Ceph user name to run the command under + :type service: str + :param name: Name of profile to look for. + :type name: str + :returns: True if it exists, False otherwise. + :rtype: bool """ validator(value=name, valid_type=six.string_types) try: @@ -890,11 +1255,14 @@ def erasure_profile_exists(service, name): def get_cache_mode(service, pool_name): - """ - Find the current caching mode of the pool_name given. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :return: int or None + """Find the current caching mode of the pool_name given. + + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: Name of pool. + :type pool_name: str + :returns: Current cache mode. + :rtype: Optional[int] """ validator(value=service, valid_type=six.string_types) validator(value=pool_name, valid_type=six.string_types) @@ -976,17 +1344,23 @@ def create_rbd_image(service, pool, image, sizemb): def update_pool(client, pool, settings): + """Update pool properties. + + :param client: Client/User-name to authenticate with. + :type client: str + :param pool: Name of pool to operate on + :type pool: str + :param settings: Dictionary with key/value pairs to set. + :type settings: Dict[str, str] + :raises: CalledProcessError + """ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] for k, v in six.iteritems(settings): - cmd.append(k) - cmd.append(v) - - check_call(cmd) + check_call(cmd + [k, v]) def set_app_name_for_pool(client, pool, name): - """ - Calls `osd pool application enable` for the specified pool name + """Calls `osd pool application enable` for the specified pool name :param client: Name of the ceph client to use :type client: str @@ -1043,8 +1417,7 @@ def _keyring_path(service): def add_key(service, key): - """ - Add a key to a keyring. + """Add a key to a keyring. Creates the keyring if it doesn't already exist. @@ -1288,13 +1661,33 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ - def __init__(self, api_version=1, request_id=None): - self.api_version = api_version - if request_id: - self.request_id = request_id + def __init__(self, api_version=1, request_id=None, raw_request_data=None): + """Initialize CephBrokerRq object. + + Builds a new empty request or rebuilds a request from on-wire JSON + data. + + :param api_version: API version for request (default: 1). + :type api_version: Optional[int] + :param request_id: Unique identifier for request. + (default: string representation of generated UUID) + :type request_id: Optional[str] + :param raw_request_data: JSON-encoded string to build request from. + :type raw_request_data: Optional[str] + :raises: KeyError + """ + if raw_request_data: + request_data = json.loads(raw_request_data) + self.api_version = request_data['api-version'] + self.request_id = request_data['request-id'] + self.set_ops(request_data['ops']) else: - self.request_id = str(uuid.uuid1()) - self.ops = [] + self.api_version = api_version + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) + self.ops = [] def add_op(self, op): """Add an op if it is not already in the list. @@ -1336,12 +1729,119 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, group=group, namespace=namespace, app_name=app_name, max_bytes=max_bytes, max_objects=max_objects) + # Use function parameters and docstring to define types in a compatible + # manner. + # + # NOTE: Our caller should always use a kwarg Dict when calling us so + # no need to maintain fixed order/position for parameters. Please keep them + # sorted by name when adding new ones. + def _partial_build_common_op_create(self, + app_name=None, + compression_algorithm=None, + compression_mode=None, + compression_required_ratio=None, + compression_min_blob_size=None, + compression_min_blob_size_hdd=None, + compression_min_blob_size_ssd=None, + compression_max_blob_size=None, + compression_max_blob_size_hdd=None, + compression_max_blob_size_ssd=None, + group=None, + max_bytes=None, + max_objects=None, + namespace=None, + weight=None): + """Build common part of a create pool operation. + + :param app_name: Tag pool with application name. Note that there is + certain protocols emerging upstream with regard to + meaningful application names to use. + Examples are 'rbd' and 'rgw'. + :type app_name: Optional[str] + :param compression_algorithm: Compressor to use, one of: + ('lz4', 'snappy', 'zlib', 'zstd') + :type compression_algorithm: Optional[str] + :param compression_mode: When to compress data, one of: + ('none', 'passive', 'aggressive', 'force') + :type compression_mode: Optional[str] + :param compression_required_ratio: Minimum compression ratio for data + chunk, if the requested ratio is not + achieved the compressed version will + be thrown away and the original + stored. + :type compression_required_ratio: Optional[float] + :param compression_min_blob_size: Chunks smaller than this are never + compressed (unit: bytes). + :type compression_min_blob_size: Optional[int] + :param compression_min_blob_size_hdd: Chunks smaller than this are not + compressed when destined to + rotational media (unit: bytes). + :type compression_min_blob_size_hdd: Optional[int] + :param compression_min_blob_size_ssd: Chunks smaller than this are not + compressed when destined to flash + media (unit: bytes). + :type compression_min_blob_size_ssd: Optional[int] + :param compression_max_blob_size: Chunks larger than this are broken + into N * compression_max_blob_size + chunks before being compressed + (unit: bytes). + :type compression_max_blob_size: Optional[int] + :param compression_max_blob_size_hdd: Chunks larger than this are + broken into + N * compression_max_blob_size_hdd + chunks before being compressed + when destined for rotational + media (unit: bytes) + :type compression_max_blob_size_hdd: Optional[int] + :param compression_max_blob_size_ssd: Chunks larger than this are + broken into + N * compression_max_blob_size_ssd + chunks before being compressed + when destined for flash media + (unit: bytes). + :type compression_max_blob_size_ssd: Optional[int] + :param group: Group to add pool to + :type group: Optional[str] + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: Optional[int] + :param max_objects: Maximum objects quota to apply + :type max_objects: Optional[int] + :param namespace: Group namespace + :type namespace: Optional[str] + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + Used to calculate number of Placement Groups to create + for pool. + :type weight: Optional[float] + :returns: Dictionary with kwarg name as key. + :rtype: Dict[str,any] + :raises: AssertionError + """ + return { + 'app-name': app_name, + 'compression-algorithm': compression_algorithm, + 'compression-mode': compression_mode, + 'compression-required-ratio': compression_required_ratio, + 'compression-min-blob-size': compression_min_blob_size, + 'compression-min-blob-size-hdd': compression_min_blob_size_hdd, + 'compression-min-blob-size-ssd': compression_min_blob_size_ssd, + 'compression-max-blob-size': compression_max_blob_size, + 'compression-max-blob-size-hdd': compression_max_blob_size_hdd, + 'compression-max-blob-size-ssd': compression_max_blob_size_ssd, + 'group': group, + 'max-bytes': max_bytes, + 'max-objects': max_objects, + 'group-namespace': namespace, + 'weight': weight, + } + def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, - weight=None, group=None, namespace=None, - app_name=None, max_bytes=None, - max_objects=None): + **kwargs): """Adds an operation to create a replicated pool. + Refer to docstring for ``_partial_build_common_op_create`` for + documentation of keyword arguments. + :param name: Name of pool to create :type name: str :param replica_count: Number of copies Ceph should keep of your data. @@ -1349,66 +1849,114 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, :param pg_num: Request specific number of Placement Groups to create for pool. :type pg_num: int - :param weight: The percentage of data that is expected to be contained - in the pool from the total available space on the OSDs. - Used to calculate number of Placement Groups to create - for pool. - :type weight: float - :param group: Group to add pool to - :type group: str - :param namespace: Group namespace - :type namespace: str - :param app_name: (Optional) Tag pool with application name. Note that - there is certain protocols emerging upstream with - regard to meaningful application names to use. - Examples are ``rbd`` and ``rgw``. - :type app_name: str - :param max_bytes: Maximum bytes quota to apply - :type max_bytes: int - :param max_objects: Maximum objects quota to apply - :type max_objects: int + :raises: AssertionError if provided data is of invalid type/range """ - if pg_num and weight: + if pg_num and kwargs.get('weight'): raise ValueError('pg_num and weight are mutually exclusive') - self.add_op({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight, 'group': group, - 'group-namespace': namespace, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + op = { + 'op': 'create-pool', + 'name': name, + 'replicas': replica_count, + 'pg_num': pg_num, + } + op.update(self._partial_build_common_op_create(**kwargs)) + + # Initialize Pool-object to validate type and range of ops. + pool = ReplicatedPool('dummy-service', op=op) + pool.validate() + + self.add_op(op) def add_op_create_erasure_pool(self, name, erasure_profile=None, - weight=None, group=None, app_name=None, - max_bytes=None, max_objects=None): + allow_ec_overwrites=False, **kwargs): """Adds an operation to create a erasure coded pool. + Refer to docstring for ``_partial_build_common_op_create`` for + documentation of keyword arguments. + :param name: Name of pool to create :type name: str :param erasure_profile: Name of erasure code profile to use. If not set the ceph-mon unit handling the broker request will set its default value. :type erasure_profile: str - :param weight: The percentage of data that is expected to be contained - in the pool from the total available space on the OSDs. - :type weight: float - :param group: Group to add pool to - :type group: str - :param app_name: (Optional) Tag pool with application name. Note that - there is certain protocols emerging upstream with - regard to meaningful application names to use. - Examples are ``rbd`` and ``rgw``. - :type app_name: str - :param max_bytes: Maximum bytes quota to apply - :type max_bytes: int - :param max_objects: Maximum objects quota to apply - :type max_objects: int + :param allow_ec_overwrites: allow EC pools to be overriden + :type allow_ec_overwrites: bool + :raises: AssertionError if provided data is of invalid type/range """ - self.add_op({'op': 'create-pool', 'name': name, - 'pool-type': 'erasure', - 'erasure-profile': erasure_profile, - 'weight': weight, - 'group': group, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + op = { + 'op': 'create-pool', + 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'allow-ec-overwrites': allow_ec_overwrites, + } + op.update(self._partial_build_common_op_create(**kwargs)) + + # Initialize Pool-object to validate type and range of ops. + pool = ErasurePool('dummy-service', op) + pool.validate() + + self.add_op(op) + + def add_op_create_erasure_profile(self, name, + erasure_type='jerasure', + erasure_technique=None, + k=None, m=None, + failure_domain=None, + lrc_locality=None, + shec_durability_estimator=None, + clay_helper_chunks=None, + device_class=None, + clay_scalar_mds=None, + lrc_crush_locality=None): + """Adds an operation to create a erasure coding profile. + + :param name: Name of profile to create + :type name: str + :param erasure_type: Which of the erasure coding plugins should be used + :type erasure_type: string + :param erasure_technique: EC plugin technique to use + :type erasure_technique: string + :param k: Number of data chunks + :type k: int + :param m: Number of coding chunks + :type m: int + :param lrc_locality: Group the coding and data chunks into sets of size locality + (lrc plugin) + :type lrc_locality: int + :param durability_estimator: The number of parity chuncks each of which includes + a data chunk in its calculation range (shec plugin) + :type durability_estimator: int + :param helper_chunks: The number of helper chunks to use for recovery operations + (clay plugin) + :type: helper_chunks: int + :param failure_domain: Type of failure domain from Ceph bucket types + to be used + :type failure_domain: string + :param device_class: Device class to use for profile (ssd, hdd) + :type device_class: string + :param clay_scalar_mds: Plugin to use for CLAY layered construction + (jerasure|isa|shec) + :type clay_scaler_mds: string + :param lrc_crush_locality: Type of crush bucket in which set of chunks + defined by lrc_locality will be stored. + :type lrc_crush_locality: string + """ + self.add_op({'op': 'create-erasure-profile', + 'name': name, + 'k': k, + 'm': m, + 'l': lrc_locality, + 'c': shec_durability_estimator, + 'd': clay_helper_chunks, + 'erasure-type': erasure_type, + 'erasure-technique': erasure_technique, + 'failure-domain': failure_domain, + 'device-class': device_class, + 'scalar-mds': clay_scalar_mds, + 'crush-locality': lrc_crush_locality}) def set_ops(self, ops): """Set request ops to provided value. @@ -1424,12 +1972,14 @@ def request(self): 'request-id': self.request_id}) def _ops_equal(self, other): + keys_to_compare = [ + 'replicas', 'name', 'op', 'pg_num', 'group-permission', + 'object-prefix-permissions', + ] + keys_to_compare += list(self._partial_build_common_op_create().keys()) if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in [ - 'replicas', 'name', 'op', 'pg_num', 'weight', - 'group', 'group-namespace', 'group-permission', - 'object-prefix-permissions']: + for key in keys_to_compare: if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: @@ -1522,18 +2072,15 @@ def exit_msg(self): def get_previous_request(rid): """Return the last ceph broker request sent on a given relation - @param rid: Relation id to query for request + :param rid: Relation id to query for request + :type rid: str + :returns: CephBrokerRq object or None if relation data not found. + :rtype: Optional[CephBrokerRq] """ - request = None broker_req = relation_get(attribute='broker_req', rid=rid, unit=local_unit()) if broker_req: - request_data = json.loads(broker_req) - request = CephBrokerRq(api_version=request_data['api-version'], - request_id=request_data['request-id']) - request.set_ops(request_data['ops']) - - return request + return CephBrokerRq(raw_request_data=broker_req) def get_request_states(request, relation='ceph'): diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 155c57f7..65308187 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -123,8 +123,28 @@ def import_osd_upgrade_key(key): def render_template(template_name, context, template_dir=TEMPLATES_DIR): + """Render Jinja2 template. + + In addition to the template directory specified by the caller the shared + 'templates' directory in the ``charmhelpers.contrib.openstack`` module will + be searched. + + :param template_name: Name of template file. + :type template_name: str + :param context: Template context. + :type context: Dict[str,any] + :param template_dir: Primary path to search for templates. + (default: contents of the ``TEMPLATES_DIR`` global) + :type template_dir: Optional[str] + :returns: The rendered template + :rtype: str + """ templates = jinja2.Environment( - loader=jinja2.FileSystemLoader(template_dir)) + loader=jinja2.ChoiceLoader(( + jinja2.FileSystemLoader(template_dir), + jinja2.PackageLoader('charmhelpers.contrib.openstack', + 'templates'), + ))) template = templates.get_template(template_name) return template.render(context) diff --git a/ceph-osd/lib/charms_ceph/broker.py b/ceph-osd/lib/charms_ceph/broker.py index 15552cd8..8f040a5e 100644 --- a/ceph-osd/lib/charms_ceph/broker.py +++ b/ceph-osd/lib/charms_ceph/broker.py @@ -16,6 +16,7 @@ import json import os +from subprocess import check_call, check_output, CalledProcessError from tempfile import NamedTemporaryFile from charms_ceph.utils import ( @@ -41,18 +42,16 @@ pool_set, remove_pool_snapshot, rename_pool, - set_pool_quota, snapshot_pool, validator, ErasurePool, - Pool, + BasePool, ReplicatedPool, ) # This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ # This should do a decent job of preventing people from passing in bad values. # It will give a useful error message -from subprocess import check_call, check_output, CalledProcessError POOL_KEYS = { # "Ceph Key Name": [Python type, [Valid Range]] @@ -155,25 +154,47 @@ def handle_create_erasure_profile(request, service): :param service: The ceph client to run the command under. :returns: dict. exit-code and reason if not 0 """ - # "local" | "shec" or it defaults to "jerasure" + # "isa" | "lrc" | "shec" | "clay" or it defaults to "jerasure" erasure_type = request.get('erasure-type') - # "host" | "rack" or it defaults to "host" # Any valid Ceph bucket + # dependent on erasure coding type + erasure_technique = request.get('erasure-technique') + # "host" | "rack" | ... failure_domain = request.get('failure-domain') name = request.get('name') # Binary Distribution Matrix (BDM) parameters bdm_k = request.get('k') bdm_m = request.get('m') + # LRC parameters bdm_l = request.get('l') - - if failure_domain not in CEPH_BUCKET_TYPES: + crush_locality = request.get('crush-locality') + # SHEC parameters + bdm_c = request.get('c') + # CLAY parameters + bdm_d = request.get('d') + scalar_mds = request.get('scalar-mds') + # Device Class + device_class = request.get('device-class') + + if failure_domain and failure_domain not in CEPH_BUCKET_TYPES: msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - create_erasure_profile(service=service, erasure_plugin_name=erasure_type, - profile_name=name, failure_domain=failure_domain, - data_chunks=bdm_k, coding_chunks=bdm_m, - locality=bdm_l) + create_erasure_profile(service=service, + erasure_plugin_name=erasure_type, + profile_name=name, + failure_domain=failure_domain, + data_chunks=bdm_k, + coding_chunks=bdm_m, + locality=bdm_l, + durability_estimator=bdm_d, + helper_chunks=bdm_c, + scalar_mds=scalar_mds, + crush_locality=crush_locality, + device_class=device_class, + erasure_plugin_technique=erasure_technique) + + return {'exit-code': 0} def handle_add_permissions_to_key(request, service): @@ -383,22 +404,11 @@ def handle_erasure_pool(request, service): """ pool_name = request.get('name') erasure_profile = request.get('erasure-profile') - max_bytes = request.get('max-bytes') - max_objects = request.get('max-objects') - weight = request.get('weight') group_name = request.get('group') if erasure_profile is None: erasure_profile = "default-canonical" - app_name = request.get('app-name') - - # Check for missing params - if pool_name is None: - msg = "Missing parameter. name is required for the pool" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - if group_name: group_namespace = request.get('group-namespace') # Add the pool to the group named "group_name" @@ -414,19 +424,22 @@ def handle_erasure_pool(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - pool = ErasurePool(service=service, name=pool_name, - erasure_code_profile=erasure_profile, - percent_data=weight, app_name=app_name) + try: + pool = ErasurePool(service=service, + op=request) + except KeyError: + msg = "Missing parameter." + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + # Ok make the erasure pool if not pool_exists(service=service, name=pool_name): log("Creating pool '{}' (erasure_profile={})" .format(pool.name, erasure_profile), level=INFO) pool.create() - # Set a quota if requested - if max_bytes or max_objects: - set_pool_quota(service=service, pool_name=pool_name, - max_bytes=max_bytes, max_objects=max_objects) + # Set/update properties that are allowed to change after pool creation. + pool.update() def handle_replicated_pool(request, service): @@ -437,26 +450,19 @@ def handle_replicated_pool(request, service): :returns: dict. exit-code and reason if not 0. """ pool_name = request.get('name') - replicas = request.get('replicas') - max_bytes = request.get('max-bytes') - max_objects = request.get('max-objects') - weight = request.get('weight') group_name = request.get('group') # Optional params + # NOTE: Check this against the handling in the Pool classes, reconcile and + # remove. pg_num = request.get('pg_num') + replicas = request.get('replicas') if pg_num: # Cap pg_num to max allowed just in case. osds = get_osds(service) if osds: pg_num = min(pg_num, (len(osds) * 100 // replicas)) - - app_name = request.get('app-name') - # Check for missing params - if pool_name is None or replicas is None: - msg = "Missing parameter. name and replicas are required" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} + request.update({'pg_num': pg_num}) if group_name: group_namespace = request.get('group-namespace') @@ -465,18 +471,14 @@ def handle_replicated_pool(request, service): group=group_name, namespace=group_namespace) - kwargs = {} - if pg_num: - kwargs['pg_num'] = pg_num - if weight: - kwargs['percent_data'] = weight - if replicas: - kwargs['replicas'] = replicas - if app_name: - kwargs['app_name'] = app_name - - pool = ReplicatedPool(service=service, - name=pool_name, **kwargs) + try: + pool = ReplicatedPool(service=service, + op=request) + except KeyError: + msg = "Missing parameter." + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + if not pool_exists(service=service, name=pool_name): log("Creating pool '{}' (replicas={})".format(pool.name, replicas), level=INFO) @@ -485,10 +487,8 @@ def handle_replicated_pool(request, service): log("Pool '{}' already exists - skipping create".format(pool.name), level=DEBUG) - # Set a quota if requested - if max_bytes or max_objects: - set_pool_quota(service=service, pool_name=pool_name, - max_bytes=max_bytes, max_objects=max_objects) + # Set/update properties that are allowed to change after pool creation. + pool.update() def handle_create_cache_tier(request, service): @@ -515,7 +515,7 @@ def handle_create_cache_tier(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - p = Pool(service=service, name=storage_pool) + p = BasePool(service=service, name=storage_pool) p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) @@ -536,7 +536,7 @@ def handle_remove_cache_tier(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - pool = Pool(name=storage_pool, service=service) + pool = BasePool(name=storage_pool, service=service) pool.remove_cache_tier(cache_pool=cache_pool) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 560e764c..1284b516 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -70,6 +70,7 @@ bluestore block wal size = {{ bluestore_block_wal_size }} {% if bluestore_block_db_size -%} bluestore block db size = {{ bluestore_block_db_size }} {%- endif %} +{% include 'section-ceph-bluestore-compression' %} {%- else %} osd journal size = {{ osd_journal_size }} filestore xattr use omap = true diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 07b3ab4d..7f8132cb 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -55,6 +55,8 @@ class CephHooksTestCase(unittest.TestCase): def setUp(self): super(CephHooksTestCase, self).setUp() + @patch.object(ceph_hooks.ch_context, 'CephBlueStoreCompressionContext', + lambda: lambda: {}) @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @patch.object(ceph_hooks, 'get_auth', lambda *args: False) @@ -97,6 +99,8 @@ def test_get_ceph_context(self, mock_config, mock_config2): 'bluestore_block_db_size': 0} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks.ch_context, 'CephBlueStoreCompressionContext', + lambda: lambda: {}) @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @patch.object(ceph_hooks, 'get_auth', lambda *args: False) @@ -141,6 +145,8 @@ def test_get_ceph_context_invalid_bdev_enable_discard(self, mock_config, 'bluestore_block_db_size': 0} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks.ch_context, 'CephBlueStoreCompressionContext', + lambda: lambda: {}) @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @patch.object(ceph_hooks, 'get_auth', lambda *args: False) @@ -184,6 +190,8 @@ def test_get_ceph_context_filestore_old(self, mock_config, mock_config2): 'bluestore_block_db_size': 0} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks.ch_context, 'CephBlueStoreCompressionContext', + lambda: lambda: {}) @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @patch.object(ceph_hooks, 'get_auth', lambda *args: False) @@ -233,6 +241,8 @@ def test_get_ceph_context_bluestore(self, mock_config, mock_config2): 'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks.ch_context, 'CephBlueStoreCompressionContext', + lambda: lambda: {}) @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @patch.object(ceph_hooks, 'get_auth', lambda *args: False) @@ -280,6 +290,8 @@ def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2): 'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks.ch_context, 'CephBlueStoreCompressionContext', + lambda: lambda: {}) @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @patch.object(ceph_hooks, 'get_auth', lambda *args: False) @@ -324,6 +336,8 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): 'bluestore_block_db_size': 0} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks.ch_context, 'CephBlueStoreCompressionContext', + lambda: lambda: {}) @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @patch.object(ceph_hooks, 'get_auth', lambda *args: False) @@ -370,6 +384,53 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'bluestore_block_db_size': 0} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks.ch_context, 'CephBlueStoreCompressionContext') + @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) + @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') + @patch.object(ceph_hooks, 'get_auth', lambda *args: False) + @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") + @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") + @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) + @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', + '10.0.0.2']) + @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph, 'config') + @patch.object(ceph_hooks, 'config') + def test_get_ceph_context_bluestore_compression( + self, mock_config, mock_config2, mock_bluestore_compression): + config = copy.deepcopy(CHARM_CONFIG) + mock_config.side_effect = lambda key: config[key] + mock_config2.side_effect = lambda key: config[key] + mock_bluestore_compression().return_value = { + 'fake-bluestore-compression-key': 'fake-value'} + ctxt = ceph_hooks.get_ceph_context() + expected = {'auth_supported': False, + 'ceph_cluster_network': '', + 'ceph_public_network': '', + 'cluster_addr': '10.1.0.1', + 'dio': 'true', + 'fsid': '1234', + 'loglevel': 1, + 'mon_hosts': '10.0.0.1 10.0.0.2', + 'old_auth': False, + 'crush_initial_weight': '0', + 'osd_journal_size': 1024, + 'osd_max_backfills': 1, + 'osd_recovery_max_active': 2, + 'osd_from_client': OrderedDict(), + 'osd_from_client_conflict': OrderedDict(), + 'public_addr': '10.0.0.1', + 'short_object_len': True, + 'upgrade_in_progress': False, + 'use_syslog': 'true', + 'bdev_discard': True, + 'bluestore': False, + 'bluestore_experimental': False, + 'bluestore_block_wal_size': 0, + 'bluestore_block_db_size': 0, + 'fake-bluestore-compression-key': 'fake-value'} + self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks, 'ceph') @patch.object(ceph_hooks, 'service_restart') @patch.object(ceph_hooks, 'service_reload') diff --git a/ceph-osd/unit_tests/test_status.py b/ceph-osd/unit_tests/test_status.py index c5c080a1..d296eb17 100644 --- a/ceph-osd/unit_tests/test_status.py +++ b/ceph-osd/unit_tests/test_status.py @@ -15,7 +15,7 @@ import mock import test_utils -from mock import patch +from mock import MagicMock, patch with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: @@ -65,6 +65,8 @@ def test_assess_status_monitor_relation_incomplete(self): self.status_set.assert_called_with('waiting', mock.ANY) self.application_version_set.assert_called_with('10.2.2') + @patch.object(hooks.ch_context, 'CephBlueStoreCompressionContext', + lambda: MagicMock()) def test_assess_status_monitor_complete_no_disks(self): self.relation_ids.return_value = ['mon:1'] self.related_units.return_value = CEPH_MONS @@ -74,6 +76,8 @@ def test_assess_status_monitor_complete_no_disks(self): self.status_set.assert_called_with('blocked', mock.ANY) self.application_version_set.assert_called_with('10.2.2') + @patch.object(hooks.ch_context, 'CephBlueStoreCompressionContext', + lambda: MagicMock()) def test_assess_status_monitor_complete_disks(self): self.relation_ids.return_value = ['mon:1'] self.related_units.return_value = CEPH_MONS @@ -117,3 +121,14 @@ def test_assess_status_monitor_vault_incomplete(self): hooks.assess_status() self.status_set.assert_called_with('waiting', mock.ANY) self.application_version_set.assert_called_with('12.2.4') + + @patch.object(hooks.ch_context, 'CephBlueStoreCompressionContext') + def test_assess_status_invalid_bluestore_compression_options( + self, _bluestore_compression): + self.relation_ids.return_value = ['mon:1'] + self.related_units.return_value = CEPH_MONS + _bluestore_compression().validate.side_effect = ValueError( + 'fake-config is invalid') + hooks.assess_status() + self.status_set.assert_called_with( + 'blocked', 'Invalid configuration: fake-config is invalid') From ada039714f44a0cc2a4c1b05d66ef102ba14a2fe Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 27 Aug 2020 09:52:54 +0000 Subject: [PATCH 2042/2699] Remove dummy manifest Change-Id: I04016c152b02112afac47fe9364b0c494984ff5b --- ceph-iscsi/.build.manifest | 1 - 1 file changed, 1 deletion(-) delete mode 100644 ceph-iscsi/.build.manifest diff --git a/ceph-iscsi/.build.manifest b/ceph-iscsi/.build.manifest deleted file mode 100644 index 00b562cd..00000000 --- a/ceph-iscsi/.build.manifest +++ /dev/null @@ -1 +0,0 @@ -See venv directory From b160947deba339cb362b1d796d1a8548606c8fcd Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 27 Aug 2020 11:15:41 +0100 Subject: [PATCH 2043/2699] Tidy configuration options Update descriptions and align metadata-pool configuration option with codebase. Metadata pools are used by CephFS for both replicated and erasure-coded data pools. Change-Id: Iee6ee64e7bd672f9932037bf91670d8f59083aa4 --- ceph-fs/src/config.yaml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index c875baab..f6f48d9c 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -76,7 +76,15 @@ options: default: type: string description: | - Optionally specify an existing rbd pool that cinder should map to. + Name of the data pool to be created/used. If not defined a data pool name + will be generated based on the name of the application. + metadata-pool: + type: string + default: + description: | + Name of the metadata pool to be created/used. If not defined a metadata + pool name will be generated based on the name of the application. + The metadata pool is always replicated, not erasure coded. pool-type: type: string default: replicated @@ -90,14 +98,6 @@ options: Name for the EC profile to be created for the EC pools. If not defined a profile name will be generated based on the name of the pool used by the application. - ec-rbd-metadata-pool: - type: string - default: - description: | - Name of the metadata pool to be created (for RBD use-cases). If not - defined a metadata pool name will be generated based on the name of - the data pool used by the application. The metadata pool is always - replicated, not erasure coded. ec-profile-k: type: int default: 1 From 8db7808e72af19788d1b30cff6a79b5c35085e03 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 6 Aug 2020 06:54:41 +0100 Subject: [PATCH 2044/2699] Use charms.ceph for Ceph broker Drop use of local copy of ceph_broker.py in preference to the centrally maintained copy in charms.ceph. Change-Id: I89aa0f9fc7d5d2d480ebabc1cb17a86dcbef21bf --- ceph-proxy/Makefile | 7 + .../contrib/storage/linux/ceph.py | 1415 ++++--- ceph-proxy/hooks/ceph_hooks.py | 4 +- ceph-proxy/hooks/install | 3 +- ceph-proxy/hooks/install_deps | 18 + ceph-proxy/hooks/upgrade-charm | 6 + ceph-proxy/lib/.keep | 3 - ceph-proxy/lib/charms_ceph/__init__.py | 0 .../charms_ceph/broker.py} | 810 ++-- ceph-proxy/lib/charms_ceph/crush_utils.py | 154 + ceph-proxy/lib/charms_ceph/utils.py | 3349 +++++++++++++++++ ceph-proxy/unit_tests/test_ceph_broker.py | 136 - 12 files changed, 5092 insertions(+), 813 deletions(-) create mode 100755 ceph-proxy/hooks/install_deps create mode 100755 ceph-proxy/hooks/upgrade-charm delete mode 100644 ceph-proxy/lib/.keep create mode 100644 ceph-proxy/lib/charms_ceph/__init__.py rename ceph-proxy/{hooks/ceph_broker.py => lib/charms_ceph/broker.py} (53%) create mode 100644 ceph-proxy/lib/charms_ceph/crush_utils.py create mode 100644 ceph-proxy/lib/charms_ceph/utils.py delete mode 100644 ceph-proxy/unit_tests/test_ceph_broker.py diff --git a/ceph-proxy/Makefile b/ceph-proxy/Makefile index 39458a3d..09b701f6 100644 --- a/ceph-proxy/Makefile +++ b/ceph-proxy/Makefile @@ -18,3 +18,10 @@ bin/charm_helpers_sync.py: sync: bin/charm_helpers_sync.py $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml + +bin/git_sync.py: + @mkdir -p bin + @wget -O bin/git_sync.py https://raw.githubusercontent.com/CanonicalLtd/git-sync/master/git_sync.py + +ceph-sync: bin/git_sync.py + $(PYTHON) bin/git_sync.py -d lib -s https://github.com/openstack/charms.ceph.git diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py index 814d5c72..526b95ad 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py @@ -39,6 +39,7 @@ check_output, CalledProcessError, ) +from charmhelpers import deprecate from charmhelpers.core.hookenv import ( config, service_name, @@ -178,94 +179,293 @@ def send_osd_settings(): def validator(value, valid_type, valid_range=None): - """ - Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + """Helper function for type validation. + + Used to validate these: + https://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression + Example input: validator(value=1, valid_type=int, valid_range=[0, 2]) + This says I'm testing value=1. It must be an int inclusive in [0,2] - :param value: The value to validate + :param value: The value to validate. + :type value: any :param valid_type: The type that value should be. + :type valid_type: any :param valid_range: A range of values that value can assume. - :return: + :type valid_range: Optional[Union[List,Tuple]] + :raises: AssertionError, ValueError """ - assert isinstance(value, valid_type), "{} is not a {}".format( - value, - valid_type) + assert isinstance(value, valid_type), ( + "{} is not a {}".format(value, valid_type)) if valid_range is not None: - assert isinstance(valid_range, list), \ - "valid_range must be a list, was given {}".format(valid_range) + assert isinstance( + valid_range, list) or isinstance(valid_range, tuple), ( + "valid_range must be of type List or Tuple, " + "was given {} of type {}" + .format(valid_range, type(valid_range))) # If we're dealing with strings if isinstance(value, six.string_types): - assert value in valid_range, \ - "{} is not in the list {}".format(value, valid_range) + assert value in valid_range, ( + "{} is not in the list {}".format(value, valid_range)) # Integer, float should have a min and max else: if len(valid_range) != 2: raise ValueError( - "Invalid valid_range list of {} for {}. " + "Invalid valid_range list of {} for {}. " "List must be [min,max]".format(valid_range, value)) - assert value >= valid_range[0], \ - "{} is less than minimum allowed value of {}".format( - value, valid_range[0]) - assert value <= valid_range[1], \ - "{} is greater than maximum allowed value of {}".format( - value, valid_range[1]) + assert value >= valid_range[0], ( + "{} is less than minimum allowed value of {}" + .format(value, valid_range[0])) + assert value <= valid_range[1], ( + "{} is greater than maximum allowed value of {}" + .format(value, valid_range[1])) class PoolCreationError(Exception): - """ - A custom error to inform the caller that a pool creation failed. Provides an error message + """A custom exception to inform the caller that a pool creation failed. + + Provides an error message """ def __init__(self, message): super(PoolCreationError, self).__init__(message) -class Pool(object): - """ - An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. - Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). +class BasePool(object): + """An object oriented approach to Ceph pool creation. + + This base class is inherited by ReplicatedPool and ErasurePool. Do not call + create() on this base class as it will raise an exception. + + Instantiate a child class and call create(). """ + # Dictionary that maps pool operation properties to Tuples with valid type + # and valid range + op_validation_map = { + 'compression-algorithm': (str, ('lz4', 'snappy', 'zlib', 'zstd')), + 'compression-mode': (str, ('none', 'passive', 'aggressive', 'force')), + 'compression-required-ratio': (float, None), + 'compression-min-blob-size': (int, None), + 'compression-min-blob-size-hdd': (int, None), + 'compression-min-blob-size-ssd': (int, None), + 'compression-max-blob-size': (int, None), + 'compression-max-blob-size-hdd': (int, None), + 'compression-max-blob-size-ssd': (int, None), + } - def __init__(self, service, name): + def __init__(self, service, name=None, percent_data=None, app_name=None, + op=None): + """Initialize BasePool object. + + Pool information is either initialized from individual keyword + arguments or from a individual CephBrokerRq operation Dict. + + :param service: The Ceph user name to run commands under. + :type service: str + :param name: Name of pool to operate on. + :type name: str + :param percent_data: The expected pool size in relation to all + available resources in the Ceph cluster. Will be + used to set the ``target_size_ratio`` pool + property. (default: 10.0) + :type percent_data: Optional[float] + :param app_name: Ceph application name, usually one of: + ('cephfs', 'rbd', 'rgw') (default: 'unknown') + :type app_name: Optional[str] + :param op: Broker request Op to compile pool data from. + :type op: Optional[Dict[str,any]] + :raises: KeyError + """ + # NOTE: Do not perform initialization steps that require live data from + # a running cluster here. The *Pool classes may be used for validation. self.service = service - self.name = name + self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + self.op = op or {} + + if op: + # When initializing from op the `name` attribute is required and we + # will fail with KeyError if it is not provided. + self.name = op['name'] + self.percent_data = op.get('weight') + self.app_name = op.get('app-name') + else: + self.name = name + self.percent_data = percent_data + self.app_name = app_name + + # Set defaults for these if they are not provided + self.percent_data = self.percent_data or 10.0 + self.app_name = self.app_name or 'unknown' + + def validate(self): + """Check that value of supplied operation parameters are valid. + + :raises: ValueError + """ + for op_key, op_value in self.op.items(): + if op_key in self.op_validation_map and op_value is not None: + valid_type, valid_range = self.op_validation_map[op_key] + try: + validator(op_value, valid_type, valid_range) + except (AssertionError, ValueError) as e: + # Normalize on ValueError, also add information about which + # variable we had an issue with. + raise ValueError("'{}': {}".format(op_key, str(e))) + + def _create(self): + """Perform the pool creation, method MUST be overridden by child class. + """ + raise NotImplementedError + + def _post_create(self): + """Perform common post pool creation tasks. + + Note that pool properties subject to change during the lifetime of a + pool / deployment should go into the ``update`` method. + + Do not add calls for a specific pool type here, those should go into + one of the pool specific classes. + """ + if self.nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool( + client=self.service, + pool=self.name, + settings={ + 'target_size_ratio': str( + self.percent_data / 100.0), + }) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}' + .format(self.name), + level=WARNING) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}' + .format(self.name, e), + level=WARNING) - # Create the pool if it doesn't exist already - # To be implemented by subclasses def create(self): - pass + """Create pool and perform any post pool creation tasks. - def add_cache_tier(self, cache_pool, mode): + To allow for sharing of common code among pool specific classes the + processing has been broken out into the private methods ``_create`` + and ``_post_create``. + + Do not add any pool type specific handling here, that should go into + one of the pool specific classes. """ - Adds a new cache tier to an existing pool. - :param cache_pool: six.string_types. The cache tier pool name to add. - :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] - :return: None + if not pool_exists(self.service, self.name): + self.validate() + self._create() + self._post_create() + self.update() + + def set_quota(self): + """Set a quota if requested. + + :raises: CalledProcessError + """ + max_bytes = self.op.get('max-bytes') + max_objects = self.op.get('max-objects') + if max_bytes or max_objects: + set_pool_quota(service=self.service, pool_name=self.name, + max_bytes=max_bytes, max_objects=max_objects) + + def set_compression(self): + """Set compression properties if requested. + + :raises: CalledProcessError + """ + compression_properties = { + key.replace('-', '_'): value + for key, value in self.op.items() + if key in ( + 'compression-algorithm', + 'compression-mode', + 'compression-required-ratio', + 'compression-min-blob-size', + 'compression-min-blob-size-hdd', + 'compression-min-blob-size-ssd', + 'compression-max-blob-size', + 'compression-max-blob-size-hdd', + 'compression-max-blob-size-ssd') and value} + if compression_properties: + update_pool(self.service, self.name, compression_properties) + + def update(self): + """Update properties for an already existing pool. + + Do not add calls for a specific pool type here, those should go into + one of the pool specific classes. + """ + self.validate() + self.set_quota() + self.set_compression() + + def add_cache_tier(self, cache_pool, mode): + """Adds a new cache tier to an existing pool. + + :param cache_pool: The cache tier pool name to add. + :type cache_pool: str + :param mode: The caching mode to use for this pool. + valid range = ["readonly", "writeback"] + :type mode: str """ # Check the input types and values validator(value=cache_pool, valid_type=six.string_types) - validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) - - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) + validator( + value=mode, valid_type=six.string_types, + valid_range=["readonly", "writeback"]) + + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'add', self.name, cache_pool, + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'cache-mode', cache_pool, mode, + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'set-overlay', self.name, cache_pool, + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom', + ]) def remove_cache_tier(self, cache_pool): - """ - Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. - :param cache_pool: six.string_types. The cache tier pool name to remove. - :return: None + """Removes a cache tier from Ceph. + + Flushes all dirty objects from writeback pools and waits for that to + complete. + + :param cache_pool: The cache tier pool name to remove. + :type cache_pool: str """ # read-only is easy, writeback is much harder mode = get_cache_mode(self.service, cache_pool) if mode == 'readonly': - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'cache-mode', cache_pool, 'none' + ]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'remove', self.name, cache_pool, + ]) elif mode == 'writeback': pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', @@ -276,9 +476,15 @@ def remove_cache_tier(self, cache_pool): check_call(pool_forward_cmd) # Flush the cache and wait for it to return - check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + check_call([ + 'rados', '--id', self.service, + '-p', cache_pool, 'cache-flush-evict-all']) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'remove-overlay', self.name]) + check_call([ + 'ceph', '--id', self.service, + 'osd', 'tier', 'remove', self.name, cache_pool]) def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, device_class=None): @@ -305,19 +511,23 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, selected for the specific rule, rather it is left to the user to tune in the form of 'expected-osd-count' config option. - :param pool_size: int. pool_size is either the number of replicas for + :param pool_size: pool_size is either the number of replicas for replicated pools or the K+M sum for erasure coded pools - :param percent_data: float. the percentage of data that is expected to + :type pool_size: int + :param percent_data: the percentage of data that is expected to be contained in the pool for the specific OSD set. Default value is to assume 10% of the data is for this pool, which is a relatively low % of the data but allows for the pg_num to be increased. NOTE: the default is primarily to handle the scenario where related charms requiring pools has not been upgraded to include an update to indicate their relative usage of the pools. - :param device_class: str. class of storage to use for basis of pgs + :type percent_data: float + :param device_class: class of storage to use for basis of pgs calculation; ceph supports nvme, ssd and hdd by default based on presence of devices of each type in the deployment. - :return: int. The number of pgs to use. + :type device_class: str + :returns: The number of pgs to use. + :rtype: int """ # Note: This calculation follows the approach that is provided @@ -357,7 +567,8 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, return LEGACY_PG_COUNT percent_data /= 100.0 - target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET + target_pgs_per_osd = config( + 'pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size # NOTE: ensure a sane minimum number of PGS otherwise we don't get any @@ -380,147 +591,174 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, return int(nearest) -class ReplicatedPool(Pool): - def __init__(self, service, name, pg_num=None, replicas=2, - percent_data=10.0, app_name=None): - super(ReplicatedPool, self).__init__(service=service, name=name) - self.replicas = replicas - self.percent_data = percent_data - if pg_num: +class Pool(BasePool): + """Compability shim for any descendents external to this library.""" + + @deprecate( + 'The ``Pool`` baseclass has been replaced by ``BasePool`` class.') + def __init__(self, service, name): + super(Pool, self).__init__(service, name=name) + + def create(self): + pass + + +class ReplicatedPool(BasePool): + def __init__(self, service, name=None, pg_num=None, replicas=None, + percent_data=None, app_name=None, op=None): + """Initialize ReplicatedPool object. + + Pool information is either initialized from individual keyword + arguments or from a individual CephBrokerRq operation Dict. + + Please refer to the docstring of the ``BasePool`` class for + documentation of the common parameters. + + :param pg_num: Express wish for number of Placement Groups (this value + is subject to validation against a running cluster prior + to use to avoid creating a pool with too many PGs) + :type pg_num: int + :param replicas: Number of copies there should be of each object added + to this replicated pool. + :type replicas: int + :raises: KeyError + """ + # NOTE: Do not perform initialization steps that require live data from + # a running cluster here. The *Pool classes may be used for validation. + + # The common parameters are handled in our parents initializer + super(ReplicatedPool, self).__init__( + service=service, name=name, percent_data=percent_data, + app_name=app_name, op=op) + + if op: + # When initializing from op `replicas` is a required attribute, and + # we will fail with KeyError if it is not provided. + self.replicas = op['replicas'] + self.pg_num = op.get('pg_num') + else: + self.replicas = replicas or 2 + self.pg_num = pg_num + + def _create(self): + # Do extra validation on pg_num with data from live cluster + if self.pg_num: # Since the number of placement groups were specified, ensure # that there aren't too many created. max_pgs = self.get_pgs(self.replicas, 100.0) - self.pg_num = min(pg_num, max_pgs) + self.pg_num = min(self.pg_num, max_pgs) else: - self.pg_num = self.get_pgs(self.replicas, percent_data) - if app_name: - self.app_name = app_name + self.pg_num = self.get_pgs(self.replicas, self.percent_data) + + # Create it + if self.nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, self.pg_num) + ), + self.name, str(self.pg_num) + ] else: - self.app_name = 'unknown' + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num) + ] + check_call(cmd) - def create(self): - if not pool_exists(self.service, self.name): - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 - # Create it - if nautilus_or_later: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - '--pg-num-min={}'.format( - min(AUTOSCALER_DEFAULT_PGS, self.pg_num) - ), - self.name, str(self.pg_num) - ] - else: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num) - ] + def _post_create(self): + # Set the pool replica size + update_pool(client=self.service, + pool=self.name, + settings={'size': str(self.replicas)}) + # Perform other common post pool creation tasks + super(ReplicatedPool, self)._post_create() - try: - check_call(cmd) - # Set the pool replica size - update_pool(client=self.service, - pool=self.name, - settings={'size': str(self.replicas)}) - if nautilus_or_later: - # Ensure we set the expected pool ratio - update_pool(client=self.service, - pool=self.name, - settings={'target_size_ratio': str(self.percent_data / 100.0)}) - try: - set_app_name_for_pool(client=self.service, - pool=self.name, - name=self.app_name) - except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name), level=WARNING) - if 'pg_autoscaler' in enabled_manager_modules(): - try: - enable_pg_autoscale(self.service, self.name) - except CalledProcessError as e: - log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e), level=WARNING) - except CalledProcessError: - raise - - -# Default jerasure erasure coded pool -class ErasurePool(Pool): - def __init__(self, service, name, erasure_code_profile="default", - percent_data=10.0, app_name=None): - super(ErasurePool, self).__init__(service=service, name=name) - self.erasure_code_profile = erasure_code_profile - self.percent_data = percent_data - if app_name: - self.app_name = app_name - else: - self.app_name = 'unknown' - def create(self): - if not pool_exists(self.service, self.name): - # Try to find the erasure profile information in order to properly - # size the number of placement groups. The size of an erasure - # coded placement group is calculated as k+m. - erasure_profile = get_erasure_profile(self.service, - self.erasure_code_profile) - - # Check for errors - if erasure_profile is None: - msg = ("Failed to discover erasure profile named " - "{}".format(self.erasure_code_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - if 'k' not in erasure_profile or 'm' not in erasure_profile: - # Error - msg = ("Unable to find k (data chunks) or m (coding chunks) " - "in erasure profile {}".format(erasure_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - - k = int(erasure_profile['k']) - m = int(erasure_profile['m']) - pgs = self.get_pgs(k + m, self.percent_data) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 - # Create it - if nautilus_or_later: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - '--pg-num-min={}'.format( - min(AUTOSCALER_DEFAULT_PGS, pgs) - ), - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile - ] - else: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile - ] +class ErasurePool(BasePool): + """Default jerasure erasure coded pool.""" - try: - check_call(cmd) - try: - set_app_name_for_pool(client=self.service, - pool=self.name, - name=self.app_name) - except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name), level=WARNING) - if nautilus_or_later: - # Ensure we set the expected pool ratio - update_pool(client=self.service, - pool=self.name, - settings={'target_size_ratio': str(self.percent_data / 100.0)}) - if 'pg_autoscaler' in enabled_manager_modules(): - try: - enable_pg_autoscale(self.service, self.name) - except CalledProcessError as e: - log('Could not configure auto scaling for pool {}: {}'.format( - self.name, e), level=WARNING) - except CalledProcessError: - raise - - """Get an existing erasure code profile if it already exists. - Returns json formatted output""" + def __init__(self, service, name=None, erasure_code_profile=None, + percent_data=None, app_name=None, op=None, + allow_ec_overwrites=False): + """Initialize ReplicatedPool object. + + Pool information is either initialized from individual keyword + arguments or from a individual CephBrokerRq operation Dict. + + Please refer to the docstring of the ``BasePool`` class for + documentation of the common parameters. + + :param erasure_code_profile: EC Profile to use (default: 'default') + :type erasure_code_profile: Optional[str] + """ + # NOTE: Do not perform initialization steps that require live data from + # a running cluster here. The *Pool classes may be used for validation. + + # The common parameters are handled in our parents initializer + super(ErasurePool, self).__init__( + service=service, name=name, percent_data=percent_data, + app_name=app_name, op=op) + + if op: + # Note that the different default when initializing from op stems + # from different handling of this in the `charms.ceph` library. + self.erasure_code_profile = op.get('erasure-profile', + 'default-canonical') + self.allow_ec_overwrites = op.get('allow-ec-overwrites') + else: + # We keep the class default when initialized from keyword arguments + # to not break the API for any other consumers. + self.erasure_code_profile = erasure_code_profile or 'default' + self.allow_ec_overwrites = allow_ec_overwrites + + def _create(self): + # Try to find the erasure profile information in order to properly + # size the number of placement groups. The size of an erasure + # coded placement group is calculated as k+m. + erasure_profile = get_erasure_profile(self.service, + self.erasure_code_profile) + + # Check for errors + if erasure_profile is None: + msg = ("Failed to discover erasure profile named " + "{}".format(self.erasure_code_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + if 'k' not in erasure_profile or 'm' not in erasure_profile: + # Error + msg = ("Unable to find k (data chunks) or m (coding chunks) " + "in erasure profile {}".format(erasure_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + + k = int(erasure_profile['k']) + m = int(erasure_profile['m']) + pgs = self.get_pgs(k + m, self.percent_data) + self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + # Create it + if self.nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, pgs) + ), + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + check_call(cmd) + + def _post_create(self): + super(ErasurePool, self)._post_create() + if self.allow_ec_overwrites: + update_pool(self.service, self.name, + {'allow_ec_overwrites': 'true'}) def enabled_manager_modules(): @@ -541,22 +779,28 @@ def enabled_manager_modules(): def enable_pg_autoscale(service, pool_name): - """ - Enable Ceph's PG autoscaler for the specified pool. + """Enable Ceph's PG autoscaler for the specified pool. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types. The name of the pool to enable sutoscaling on - :raise: CalledProcessError if the command fails + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: The name of the pool to enable sutoscaling on + :type pool_name: str + :raises: CalledProcessError if the command fails """ - check_call(['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) + check_call([ + 'ceph', '--id', service, + 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) def get_mon_map(service): - """ - Returns the current monitor map. - :param service: six.string_types. The Ceph user name to run the command under - :return: json string. :raise: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails + """Return the current monitor map. + + :param service: The Ceph user name to run the command under + :type service: str + :returns: Dictionary with monitor map data + :rtype: Dict[str,any] + :raises: ValueError if the monmap fails to parse, CalledProcessError if our + ceph command fails. """ try: mon_status = check_output(['ceph', '--id', service, @@ -576,17 +820,16 @@ def get_mon_map(service): def hash_monitor_names(service): - """ + """Get a sorted list of monitor hashes in ascending order. + Uses the get_mon_map() function to get information about the monitor - cluster. - Hash the name of each monitor. Return a sorted list of monitor hashes - in an ascending order. - :param service: six.string_types. The Ceph user name to run the command under - :rtype : dict. json dict of monitor name, ip address and rank - example: { - 'name': 'ip-172-31-13-165', - 'rank': 0, - 'addr': '172.31.13.165:6789/0'} + cluster. Hash the name of each monitor. + + :param service: The Ceph user name to run the command under. + :type service: str + :returns: a sorted list of monitor hashes in an ascending order. + :rtype : List[str] + :raises: CalledProcessError, ValueError """ try: hash_list = [] @@ -603,46 +846,56 @@ def hash_monitor_names(service): def monitor_key_delete(service, key): - """ - Delete a key and value pair from the monitor cluster - :param service: six.string_types. The Ceph user name to run the command under + """Delete a key and value pair from the monitor cluster. + Deletes a key value pair on the monitor cluster. - :param key: six.string_types. The key to delete. + + :param service: The Ceph user name to run the command under + :type service: str + :param key: The key to delete. + :type key: str + :raises: CalledProcessError """ try: check_output( ['ceph', '--id', service, 'config-key', 'del', str(key)]) except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format( - e.output)) + log("Monitor config-key put failed with message: {}" + .format(e.output)) raise def monitor_key_set(service, key, value): - """ - Sets a key value pair on the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to set. - :param value: The value to set. This will be converted to a string - before setting + """Set a key value pair on the monitor cluster. + + :param service: The Ceph user name to run the command under. + :type service str + :param key: The key to set. + :type key: str + :param value: The value to set. This will be coerced into a string. + :type value: str + :raises: CalledProcessError """ try: check_output( ['ceph', '--id', service, 'config-key', 'put', str(key), str(value)]) except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format( - e.output)) + log("Monitor config-key put failed with message: {}" + .format(e.output)) raise def monitor_key_get(service, key): - """ - Gets the value of an existing key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to search for. + """Get the value of an existing key in the monitor cluster. + + :param service: The Ceph user name to run the command under + :type service: str + :param key: The key to search for. + :type key: str :return: Returns the value of that key or None if not found. + :rtype: Optional[str] """ try: output = check_output( @@ -650,19 +903,21 @@ def monitor_key_get(service, key): 'config-key', 'get', str(key)]).decode('UTF-8') return output except CalledProcessError as e: - log("Monitor config-key get failed with message: {}".format( - e.output)) + log("Monitor config-key get failed with message: {}" + .format(e.output)) return None def monitor_key_exists(service, key): - """ - Searches for the existence of a key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to search for - :return: Returns True if the key exists, False if not and raises an - exception if an unknown error occurs. :raise: CalledProcessError if - an unknown error occurs + """Search for existence of key in the monitor cluster. + + :param service: The Ceph user name to run the command under. + :type service: str + :param key: The key to search for. + :type key: str + :return: Returns True if the key exists, False if not. + :rtype: bool + :raises: CalledProcessError if an unknown error occurs. """ try: check_call( @@ -675,16 +930,20 @@ def monitor_key_exists(service, key): if e.returncode == errno.ENOENT: return False else: - log("Unknown error from ceph config-get exists: {} {}".format( - e.returncode, e.output)) + log("Unknown error from ceph config-get exists: {} {}" + .format(e.returncode, e.output)) raise def get_erasure_profile(service, name): - """ - :param service: six.string_types. The Ceph user name to run the command under - :param name: - :return: + """Get an existing erasure code profile if it exists. + + :param service: The Ceph user name to run the command under. + :type service: str + :param name: Name of profile. + :type name: str + :returns: Dictionary with profile data. + :rtype: Optional[Dict[str]] """ try: out = check_output(['ceph', '--id', service, @@ -698,54 +957,61 @@ def get_erasure_profile(service, name): def pool_set(service, pool_name, key, value): + """Sets a value for a RADOS pool in ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to set property on. + :type pool_name: str + :param key: Property key. + :type key: str + :param value: Value, will be coerced into str and shifted to lowercase. + :type value: str + :raises: CalledProcessError """ - Sets a value for a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param key: six.string_types - :param value: - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, - str(value).lower()] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'set', pool_name, key, str(value).lower()] + check_call(cmd) def snapshot_pool(service, pool_name, snapshot_name): + """Snapshots a RADOS pool in Ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to snapshot. + :type pool_name: str + :param snapshot_name: Name of snapshot to create. + :type snapshot_name: str + :raises: CalledProcessError """ - Snapshots a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'mksnap', pool_name, snapshot_name] + check_call(cmd) def remove_pool_snapshot(service, pool_name, snapshot_name): + """Remove a snapshot from a RADOS pool in Ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to remove snapshot from. + :type pool_name: str + :param snapshot_name: Name of snapshot to remove. + :type snapshot_name: str + :raises: CalledProcessError """ - Remove a snapshot from a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] + check_call(cmd) def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): - """ + """Set byte quota on a RADOS pool in Ceph. + :param service: The Ceph user name to run the command under :type service: str :param pool_name: Name of pool @@ -756,7 +1022,9 @@ def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): :type max_objects: int :raises: subprocess.CalledProcessError """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name] + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'set-quota', pool_name] if max_bytes: cmd = cmd + ['max_bytes', str(max_bytes)] if max_objects: @@ -765,119 +1033,216 @@ def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): def remove_pool_quota(service, pool_name): + """Remove byte quota on a RADOS pool in Ceph. + + :param service: The Ceph user name to run the command under. + :type service: str + :param pool_name: Name of pool to remove quota from. + :type pool_name: str + :raises: CalledProcessError """ - Set a byte quota on a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] + check_call(cmd) def remove_erasure_profile(service, profile_name): + """Remove erasure code profile. + + :param service: The Ceph user name to run the command under + :type service: str + :param profile_name: Name of profile to remove. + :type profile_name: str + :raises: CalledProcessError """ - Create a new erasure code profile if one does not already exist for it. Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command under - :param profile_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', - profile_name] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = [ + 'ceph', '--id', service, + 'osd', 'erasure-code-profile', 'rm', profile_name] + check_call(cmd) -def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', - failure_domain='host', +def create_erasure_profile(service, profile_name, + erasure_plugin_name='jerasure', + failure_domain=None, data_chunks=2, coding_chunks=1, locality=None, durability_estimator=None, - device_class=None): - """ - Create a new erasure code profile if one does not already exist for it. Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command under - :param profile_name: six.string_types - :param erasure_plugin_name: six.string_types - :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', - 'room', 'root', 'row']) - :param data_chunks: int - :param coding_chunks: int - :param locality: int - :param durability_estimator: int - :param device_class: six.string_types - :return: None. Can raise CalledProcessError - """ - # Ensure this failure_domain is allowed by Ceph - validator(failure_domain, six.string_types, - ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) + helper_chunks=None, + scalar_mds=None, + crush_locality=None, + device_class=None, + erasure_plugin_technique=None): + """Create a new erasure code profile if one does not already exist for it. + + Updates the profile if it exists. Please refer to [0] for more details. - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, - 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks) - ] - if locality is not None and durability_estimator is not None: - raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + 0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + + :param service: The Ceph user name to run the command under. + :type service: str + :param profile_name: Name of profile. + :type profile_name: str + :param erasure_plugin_name: Erasure code plugin. + :type erasure_plugin_name: str + :param failure_domain: Failure domain, one of: + ('chassis', 'datacenter', 'host', 'osd', 'pdu', + 'pod', 'rack', 'region', 'room', 'root', 'row'). + :type failure_domain: str + :param data_chunks: Number of data chunks. + :type data_chunks: int + :param coding_chunks: Number of coding chunks. + :type coding_chunks: int + :param locality: Locality. + :type locality: int + :param durability_estimator: Durability estimator. + :type durability_estimator: int + :param helper_chunks: int + :type helper_chunks: int + :param device_class: Restrict placement to devices of specific class. + :type device_class: str + :param scalar_mds: one of ['isa', 'jerasure', 'shec'] + :type scalar_mds: str + :param crush_locality: LRC locality faulure domain, one of: + ('chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', + 'rack', 'region', 'room', 'root', 'row') or unset. + :type crush_locaity: str + :param erasure_plugin_technique: Coding technique for EC plugin + :type erasure_plugin_technique: str + :return: None. Can raise CalledProcessError, ValueError or AssertionError + """ + plugin_techniques = { + 'jerasure': [ + 'reed_sol_van', + 'reed_sol_r6_op', + 'cauchy_orig', + 'cauchy_good', + 'liberation', + 'blaum_roth', + 'liber8tion' + ], + 'lrc': [], + 'isa': [ + 'reed_sol_van', + 'cauchy', + ], + 'shec': [ + 'single', + 'multiple' + ], + 'clay': [], + } + failure_domains = [ + 'chassis', 'datacenter', + 'host', 'osd', + 'pdu', 'pod', + 'rack', 'region', + 'room', 'root', + 'row', + ] + device_classes = [ + 'ssd', + 'hdd', + 'nvme' + ] + + validator(erasure_plugin_name, six.string_types, + list(plugin_techniques.keys())) + + cmd = [ + 'ceph', '--id', service, + 'osd', 'erasure-code-profile', 'set', profile_name, + 'plugin={}'.format(erasure_plugin_name), + 'k={}'.format(str(data_chunks)), + 'm={}'.format(str(coding_chunks)), + ] + + if erasure_plugin_technique: + validator(erasure_plugin_technique, six.string_types, + plugin_techniques[erasure_plugin_name]) + cmd.append('technique={}'.format(erasure_plugin_technique)) luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 - # failure_domain changed in luminous - if luminous_or_later: - cmd.append('crush-failure-domain=' + failure_domain) - else: - cmd.append('ruleset-failure-domain=' + failure_domain) + + # Set failure domain from options if not provided in args + if not failure_domain and config('customize-failure-domain'): + # Defaults to 'host' so just need to deal with + # setting 'rack' if feature is enabled + failure_domain = 'rack' + + if failure_domain: + validator(failure_domain, six.string_types, failure_domains) + # failure_domain changed in luminous + if luminous_or_later: + cmd.append('crush-failure-domain={}'.format(failure_domain)) + else: + cmd.append('ruleset-failure-domain={}'.format(failure_domain)) # device class new in luminous if luminous_or_later and device_class: + validator(device_class, six.string_types, device_classes) cmd.append('crush-device-class={}'.format(device_class)) else: log('Skipping device class configuration (ceph < 12.0.0)', level=DEBUG) # Add plugin specific information - if locality is not None: - # For local erasure codes - cmd.append('l=' + str(locality)) - if durability_estimator is not None: - # For Shec erasure codes - cmd.append('c=' + str(durability_estimator)) + if erasure_plugin_name == 'lrc': + # LRC mandatory configuration + if locality: + cmd.append('l={}'.format(str(locality))) + else: + raise ValueError("locality must be provided for lrc plugin") + # LRC optional configuration + if crush_locality: + validator(crush_locality, six.string_types, failure_domains) + cmd.append('crush-locality={}'.format(crush_locality)) + + if erasure_plugin_name == 'shec': + # SHEC optional configuration + if durability_estimator: + cmd.append('c={}'.format((durability_estimator))) + + if erasure_plugin_name == 'clay': + # CLAY optional configuration + if helper_chunks: + cmd.append('d={}'.format(str(helper_chunks))) + if scalar_mds: + cmd.append('scalar-mds={}'.format(scalar_mds)) if erasure_profile_exists(service, profile_name): cmd.append('--force') - try: - check_call(cmd) - except CalledProcessError: - raise + check_call(cmd) def rename_pool(service, old_name, new_name): - """ - Rename a Ceph pool from old_name to new_name - :param service: six.string_types. The Ceph user name to run the command under - :param old_name: six.string_types - :param new_name: six.string_types - :return: None + """Rename a Ceph pool from old_name to new_name. + + :param service: The Ceph user name to run the command under. + :type service: str + :param old_name: Name of pool subject to rename. + :type old_name: str + :param new_name: Name to rename pool to. + :type new_name: str """ validator(value=old_name, valid_type=six.string_types) validator(value=new_name, valid_type=six.string_types) - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'rename', old_name, new_name] check_call(cmd) def erasure_profile_exists(service, name): - """ - Check to see if an Erasure code profile already exists. - :param service: six.string_types. The Ceph user name to run the command under - :param name: six.string_types - :return: int or None + """Check to see if an Erasure code profile already exists. + + :param service: The Ceph user name to run the command under + :type service: str + :param name: Name of profile to look for. + :type name: str + :returns: True if it exists, False otherwise. + :rtype: bool """ validator(value=name, valid_type=six.string_types) try: @@ -890,11 +1255,14 @@ def erasure_profile_exists(service, name): def get_cache_mode(service, pool_name): - """ - Find the current caching mode of the pool_name given. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :return: int or None + """Find the current caching mode of the pool_name given. + + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: Name of pool. + :type pool_name: str + :returns: Current cache mode. + :rtype: Optional[int] """ validator(value=service, valid_type=six.string_types) validator(value=pool_name, valid_type=six.string_types) @@ -976,17 +1344,23 @@ def create_rbd_image(service, pool, image, sizemb): def update_pool(client, pool, settings): + """Update pool properties. + + :param client: Client/User-name to authenticate with. + :type client: str + :param pool: Name of pool to operate on + :type pool: str + :param settings: Dictionary with key/value pairs to set. + :type settings: Dict[str, str] + :raises: CalledProcessError + """ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] for k, v in six.iteritems(settings): - cmd.append(k) - cmd.append(v) - - check_call(cmd) + check_call(cmd + [k, v]) def set_app_name_for_pool(client, pool, name): - """ - Calls `osd pool application enable` for the specified pool name + """Calls `osd pool application enable` for the specified pool name :param client: Name of the ceph client to use :type client: str @@ -1043,8 +1417,7 @@ def _keyring_path(service): def add_key(service, key): - """ - Add a key to a keyring. + """Add a key to a keyring. Creates the keyring if it doesn't already exist. @@ -1288,13 +1661,33 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ - def __init__(self, api_version=1, request_id=None): - self.api_version = api_version - if request_id: - self.request_id = request_id + def __init__(self, api_version=1, request_id=None, raw_request_data=None): + """Initialize CephBrokerRq object. + + Builds a new empty request or rebuilds a request from on-wire JSON + data. + + :param api_version: API version for request (default: 1). + :type api_version: Optional[int] + :param request_id: Unique identifier for request. + (default: string representation of generated UUID) + :type request_id: Optional[str] + :param raw_request_data: JSON-encoded string to build request from. + :type raw_request_data: Optional[str] + :raises: KeyError + """ + if raw_request_data: + request_data = json.loads(raw_request_data) + self.api_version = request_data['api-version'] + self.request_id = request_data['request-id'] + self.set_ops(request_data['ops']) else: - self.request_id = str(uuid.uuid1()) - self.ops = [] + self.api_version = api_version + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) + self.ops = [] def add_op(self, op): """Add an op if it is not already in the list. @@ -1336,12 +1729,119 @@ def add_op_create_pool(self, name, replica_count=3, pg_num=None, group=group, namespace=namespace, app_name=app_name, max_bytes=max_bytes, max_objects=max_objects) + # Use function parameters and docstring to define types in a compatible + # manner. + # + # NOTE: Our caller should always use a kwarg Dict when calling us so + # no need to maintain fixed order/position for parameters. Please keep them + # sorted by name when adding new ones. + def _partial_build_common_op_create(self, + app_name=None, + compression_algorithm=None, + compression_mode=None, + compression_required_ratio=None, + compression_min_blob_size=None, + compression_min_blob_size_hdd=None, + compression_min_blob_size_ssd=None, + compression_max_blob_size=None, + compression_max_blob_size_hdd=None, + compression_max_blob_size_ssd=None, + group=None, + max_bytes=None, + max_objects=None, + namespace=None, + weight=None): + """Build common part of a create pool operation. + + :param app_name: Tag pool with application name. Note that there is + certain protocols emerging upstream with regard to + meaningful application names to use. + Examples are 'rbd' and 'rgw'. + :type app_name: Optional[str] + :param compression_algorithm: Compressor to use, one of: + ('lz4', 'snappy', 'zlib', 'zstd') + :type compression_algorithm: Optional[str] + :param compression_mode: When to compress data, one of: + ('none', 'passive', 'aggressive', 'force') + :type compression_mode: Optional[str] + :param compression_required_ratio: Minimum compression ratio for data + chunk, if the requested ratio is not + achieved the compressed version will + be thrown away and the original + stored. + :type compression_required_ratio: Optional[float] + :param compression_min_blob_size: Chunks smaller than this are never + compressed (unit: bytes). + :type compression_min_blob_size: Optional[int] + :param compression_min_blob_size_hdd: Chunks smaller than this are not + compressed when destined to + rotational media (unit: bytes). + :type compression_min_blob_size_hdd: Optional[int] + :param compression_min_blob_size_ssd: Chunks smaller than this are not + compressed when destined to flash + media (unit: bytes). + :type compression_min_blob_size_ssd: Optional[int] + :param compression_max_blob_size: Chunks larger than this are broken + into N * compression_max_blob_size + chunks before being compressed + (unit: bytes). + :type compression_max_blob_size: Optional[int] + :param compression_max_blob_size_hdd: Chunks larger than this are + broken into + N * compression_max_blob_size_hdd + chunks before being compressed + when destined for rotational + media (unit: bytes) + :type compression_max_blob_size_hdd: Optional[int] + :param compression_max_blob_size_ssd: Chunks larger than this are + broken into + N * compression_max_blob_size_ssd + chunks before being compressed + when destined for flash media + (unit: bytes). + :type compression_max_blob_size_ssd: Optional[int] + :param group: Group to add pool to + :type group: Optional[str] + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: Optional[int] + :param max_objects: Maximum objects quota to apply + :type max_objects: Optional[int] + :param namespace: Group namespace + :type namespace: Optional[str] + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + Used to calculate number of Placement Groups to create + for pool. + :type weight: Optional[float] + :returns: Dictionary with kwarg name as key. + :rtype: Dict[str,any] + :raises: AssertionError + """ + return { + 'app-name': app_name, + 'compression-algorithm': compression_algorithm, + 'compression-mode': compression_mode, + 'compression-required-ratio': compression_required_ratio, + 'compression-min-blob-size': compression_min_blob_size, + 'compression-min-blob-size-hdd': compression_min_blob_size_hdd, + 'compression-min-blob-size-ssd': compression_min_blob_size_ssd, + 'compression-max-blob-size': compression_max_blob_size, + 'compression-max-blob-size-hdd': compression_max_blob_size_hdd, + 'compression-max-blob-size-ssd': compression_max_blob_size_ssd, + 'group': group, + 'max-bytes': max_bytes, + 'max-objects': max_objects, + 'group-namespace': namespace, + 'weight': weight, + } + def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, - weight=None, group=None, namespace=None, - app_name=None, max_bytes=None, - max_objects=None): + **kwargs): """Adds an operation to create a replicated pool. + Refer to docstring for ``_partial_build_common_op_create`` for + documentation of keyword arguments. + :param name: Name of pool to create :type name: str :param replica_count: Number of copies Ceph should keep of your data. @@ -1349,66 +1849,114 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, :param pg_num: Request specific number of Placement Groups to create for pool. :type pg_num: int - :param weight: The percentage of data that is expected to be contained - in the pool from the total available space on the OSDs. - Used to calculate number of Placement Groups to create - for pool. - :type weight: float - :param group: Group to add pool to - :type group: str - :param namespace: Group namespace - :type namespace: str - :param app_name: (Optional) Tag pool with application name. Note that - there is certain protocols emerging upstream with - regard to meaningful application names to use. - Examples are ``rbd`` and ``rgw``. - :type app_name: str - :param max_bytes: Maximum bytes quota to apply - :type max_bytes: int - :param max_objects: Maximum objects quota to apply - :type max_objects: int + :raises: AssertionError if provided data is of invalid type/range """ - if pg_num and weight: + if pg_num and kwargs.get('weight'): raise ValueError('pg_num and weight are mutually exclusive') - self.add_op({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight, 'group': group, - 'group-namespace': namespace, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + op = { + 'op': 'create-pool', + 'name': name, + 'replicas': replica_count, + 'pg_num': pg_num, + } + op.update(self._partial_build_common_op_create(**kwargs)) + + # Initialize Pool-object to validate type and range of ops. + pool = ReplicatedPool('dummy-service', op=op) + pool.validate() + + self.add_op(op) def add_op_create_erasure_pool(self, name, erasure_profile=None, - weight=None, group=None, app_name=None, - max_bytes=None, max_objects=None): + allow_ec_overwrites=False, **kwargs): """Adds an operation to create a erasure coded pool. + Refer to docstring for ``_partial_build_common_op_create`` for + documentation of keyword arguments. + :param name: Name of pool to create :type name: str :param erasure_profile: Name of erasure code profile to use. If not set the ceph-mon unit handling the broker request will set its default value. :type erasure_profile: str - :param weight: The percentage of data that is expected to be contained - in the pool from the total available space on the OSDs. - :type weight: float - :param group: Group to add pool to - :type group: str - :param app_name: (Optional) Tag pool with application name. Note that - there is certain protocols emerging upstream with - regard to meaningful application names to use. - Examples are ``rbd`` and ``rgw``. - :type app_name: str - :param max_bytes: Maximum bytes quota to apply - :type max_bytes: int - :param max_objects: Maximum objects quota to apply - :type max_objects: int + :param allow_ec_overwrites: allow EC pools to be overriden + :type allow_ec_overwrites: bool + :raises: AssertionError if provided data is of invalid type/range """ - self.add_op({'op': 'create-pool', 'name': name, - 'pool-type': 'erasure', - 'erasure-profile': erasure_profile, - 'weight': weight, - 'group': group, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + op = { + 'op': 'create-pool', + 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'allow-ec-overwrites': allow_ec_overwrites, + } + op.update(self._partial_build_common_op_create(**kwargs)) + + # Initialize Pool-object to validate type and range of ops. + pool = ErasurePool('dummy-service', op) + pool.validate() + + self.add_op(op) + + def add_op_create_erasure_profile(self, name, + erasure_type='jerasure', + erasure_technique=None, + k=None, m=None, + failure_domain=None, + lrc_locality=None, + shec_durability_estimator=None, + clay_helper_chunks=None, + device_class=None, + clay_scalar_mds=None, + lrc_crush_locality=None): + """Adds an operation to create a erasure coding profile. + + :param name: Name of profile to create + :type name: str + :param erasure_type: Which of the erasure coding plugins should be used + :type erasure_type: string + :param erasure_technique: EC plugin technique to use + :type erasure_technique: string + :param k: Number of data chunks + :type k: int + :param m: Number of coding chunks + :type m: int + :param lrc_locality: Group the coding and data chunks into sets of size locality + (lrc plugin) + :type lrc_locality: int + :param durability_estimator: The number of parity chuncks each of which includes + a data chunk in its calculation range (shec plugin) + :type durability_estimator: int + :param helper_chunks: The number of helper chunks to use for recovery operations + (clay plugin) + :type: helper_chunks: int + :param failure_domain: Type of failure domain from Ceph bucket types + to be used + :type failure_domain: string + :param device_class: Device class to use for profile (ssd, hdd) + :type device_class: string + :param clay_scalar_mds: Plugin to use for CLAY layered construction + (jerasure|isa|shec) + :type clay_scaler_mds: string + :param lrc_crush_locality: Type of crush bucket in which set of chunks + defined by lrc_locality will be stored. + :type lrc_crush_locality: string + """ + self.add_op({'op': 'create-erasure-profile', + 'name': name, + 'k': k, + 'm': m, + 'l': lrc_locality, + 'c': shec_durability_estimator, + 'd': clay_helper_chunks, + 'erasure-type': erasure_type, + 'erasure-technique': erasure_technique, + 'failure-domain': failure_domain, + 'device-class': device_class, + 'scalar-mds': clay_scalar_mds, + 'crush-locality': lrc_crush_locality}) def set_ops(self, ops): """Set request ops to provided value. @@ -1424,12 +1972,14 @@ def request(self): 'request-id': self.request_id}) def _ops_equal(self, other): + keys_to_compare = [ + 'replicas', 'name', 'op', 'pg_num', 'group-permission', + 'object-prefix-permissions', + ] + keys_to_compare += list(self._partial_build_common_op_create().keys()) if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in [ - 'replicas', 'name', 'op', 'pg_num', 'weight', - 'group', 'group-namespace', 'group-permission', - 'object-prefix-permissions']: + for key in keys_to_compare: if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: @@ -1522,18 +2072,15 @@ def exit_msg(self): def get_previous_request(rid): """Return the last ceph broker request sent on a given relation - @param rid: Relation id to query for request + :param rid: Relation id to query for request + :type rid: str + :returns: CephBrokerRq object or None if relation data not found. + :rtype: Optional[CephBrokerRq] """ - request = None broker_req = relation_get(attribute='broker_req', rid=rid, unit=local_unit()) if broker_req: - request_data = json.loads(broker_req) - request = CephBrokerRq(api_version=request_data['api-version'], - request_id=request_data['request-id']) - request.set_ops(request_data['ops']) - - return request + return CephBrokerRq(raw_request_data=broker_req) def get_request_states(request, relation='ceph'): diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 456c10bc..0c16d399 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -16,6 +16,7 @@ _path = os.path.dirname(os.path.realpath(__file__)) _root = os.path.abspath(os.path.join(_path, '..')) +_lib = os.path.abspath(os.path.join(_path, '../lib')) def _add_path(path): @@ -24,6 +25,7 @@ def _add_path(path): _add_path(_root) +_add_path(_lib) import ceph from charmhelpers.core.hookenv import ( @@ -63,7 +65,7 @@ def _add_path(path): from charmhelpers.core.templating import render -from ceph_broker import ( +from charms_ceph.broker import ( process_requests ) diff --git a/ceph-proxy/hooks/install b/ceph-proxy/hooks/install index eb058242..869ee204 100755 --- a/ceph-proxy/hooks/install +++ b/ceph-proxy/hooks/install @@ -2,7 +2,7 @@ # Wrapper to deal with newer Ubuntu versions that don't have py2 installed # by default. -declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml' 'dnspython') +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') check_and_install() { pkg="${1}-${2}" @@ -17,4 +17,5 @@ for dep in ${DEPS[@]}; do check_and_install ${PYTHON} ${dep} done +./hooks/install_deps exec ./hooks/install.real diff --git a/ceph-proxy/hooks/install_deps b/ceph-proxy/hooks/install_deps new file mode 100755 index 00000000..c480f29e --- /dev/null +++ b/ceph-proxy/hooks/install_deps @@ -0,0 +1,18 @@ +#!/bin/bash -e +# Wrapper to ensure that python dependencies are installed before we get into +# the python part of the hook execution + +declare -a DEPS=('dnspython' 'pyudev') + +check_and_install() { + pkg="${1}-${2}" + if ! dpkg -s ${pkg} 2>&1 > /dev/null; then + apt-get -y install ${pkg} + fi +} + +PYTHON="python3" + +for dep in ${DEPS[@]}; do + check_and_install ${PYTHON} ${dep} +done diff --git a/ceph-proxy/hooks/upgrade-charm b/ceph-proxy/hooks/upgrade-charm new file mode 100755 index 00000000..c32fb38c --- /dev/null +++ b/ceph-proxy/hooks/upgrade-charm @@ -0,0 +1,6 @@ +#!/bin/bash -e +# Wrapper to ensure that old python bytecode isn't hanging around +# after we upgrade the charm with newer libraries +rm -rf **/*.pyc + +./hooks/install_deps diff --git a/ceph-proxy/lib/.keep b/ceph-proxy/lib/.keep deleted file mode 100644 index f49b91ae..00000000 --- a/ceph-proxy/lib/.keep +++ /dev/null @@ -1,3 +0,0 @@ - This file was created by release-tools to ensure that this empty - directory is preserved in vcs re: lint check definitions in global - tox.ini files. This file can be removed if/when this dir is actually in use. diff --git a/ceph-proxy/lib/charms_ceph/__init__.py b/ceph-proxy/lib/charms_ceph/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-proxy/hooks/ceph_broker.py b/ceph-proxy/lib/charms_ceph/broker.py similarity index 53% rename from ceph-proxy/hooks/ceph_broker.py rename to ceph-proxy/lib/charms_ceph/broker.py index ec55a67d..8f040a5e 100644 --- a/ceph-proxy/hooks/ceph_broker.py +++ b/ceph-proxy/lib/charms_ceph/broker.py @@ -1,12 +1,29 @@ -#!/usr/bin/python +# Copyright 2016 Canonical Ltd # -# Copyright 2015 Canonical Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import collections import json -import six -from subprocess import check_call, CalledProcessError +import os +from subprocess import check_call, check_output, CalledProcessError +from tempfile import NamedTemporaryFile + +from charms_ceph.utils import ( + get_cephfs, + get_osd_weight +) +from charms_ceph.crush_utils import Crushmap from charmhelpers.core.hookenv import ( log, @@ -25,18 +42,17 @@ pool_set, remove_pool_snapshot, rename_pool, - set_pool_quota, snapshot_pool, validator, ErasurePool, - Pool, + BasePool, ReplicatedPool, ) - # This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ # This should do a decent job of preventing people from passing in bad values. # It will give a useful error message + POOL_KEYS = { # "Ceph Key Name": [Python type, [Valid Range]] "size": [int], @@ -51,8 +67,8 @@ "write_fadvise_dontneed": [bool], "noscrub": [bool], "nodeep-scrub": [bool], - "hit_set_type": [six.string_types, ["bloom", "explicit_hash", - "explicit_object"]], + "hit_set_type": [str, ["bloom", "explicit_hash", + "explicit_object"]], "hit_set_count": [int, [1, 1]], "hit_set_period": [int], "hit_set_fpp": [float, [0.0, 1.0]], @@ -64,6 +80,11 @@ "cache_min_flush_age": [int], "cache_min_evict_age": [int], "fast_read": [bool], + "allow_ec_overwrites": [bool], + "compression_mode": [str, ["none", "passive", "aggressive", "force"]], + "compression_algorithm": [str, ["lz4", "snappy", "zlib", "zstd"]], + "compression_required_ratio": [float, [0.0, 1.0]], + "crush_rule": [str], } CEPH_BUCKET_TYPES = [ @@ -96,6 +117,9 @@ def process_requests(reqs): This is a versioned api. API version must be supplied by the client making the request. + + :param reqs: dict of request parameters. + :returns: dict. exit-code and reason if not 0 """ request_id = reqs.get('request-id') try: @@ -115,7 +139,7 @@ def process_requests(reqs): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - msg = ("Missing or invalid api version (%s)" % version) + msg = ("Missing or invalid api version ({})".format(version)) resp = {'exit-code': 1, 'stderr': msg} if request_id: resp['request-id'] = request_id @@ -124,23 +148,251 @@ def process_requests(reqs): def handle_create_erasure_profile(request, service): - # "local" | "shec" or it defaults to "jerasure" + """Create an erasure profile. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + # "isa" | "lrc" | "shec" | "clay" or it defaults to "jerasure" erasure_type = request.get('erasure-type') - # "host" | "rack" or it defaults to "host" # Any valid Ceph bucket + # dependent on erasure coding type + erasure_technique = request.get('erasure-technique') + # "host" | "rack" | ... failure_domain = request.get('failure-domain') name = request.get('name') - k = request.get('k') - m = request.get('m') - l = request.get('l') - - if failure_domain not in CEPH_BUCKET_TYPES: + # Binary Distribution Matrix (BDM) parameters + bdm_k = request.get('k') + bdm_m = request.get('m') + # LRC parameters + bdm_l = request.get('l') + crush_locality = request.get('crush-locality') + # SHEC parameters + bdm_c = request.get('c') + # CLAY parameters + bdm_d = request.get('d') + scalar_mds = request.get('scalar-mds') + # Device Class + device_class = request.get('device-class') + + if failure_domain and failure_domain not in CEPH_BUCKET_TYPES: msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - create_erasure_profile(service=service, erasure_plugin_name=erasure_type, - profile_name=name, failure_domain=failure_domain, - data_chunks=k, coding_chunks=m, locality=l) + create_erasure_profile(service=service, + erasure_plugin_name=erasure_type, + profile_name=name, + failure_domain=failure_domain, + data_chunks=bdm_k, + coding_chunks=bdm_m, + locality=bdm_l, + durability_estimator=bdm_d, + helper_chunks=bdm_c, + scalar_mds=scalar_mds, + crush_locality=crush_locality, + device_class=device_class, + erasure_plugin_technique=erasure_technique) + + return {'exit-code': 0} + + +def handle_add_permissions_to_key(request, service): + """Groups are defined by the key cephx.groups.(namespace-)?-(name). This + key will contain a dict serialized to JSON with data about the group, + including pools and members. + + A group can optionally have a namespace defined that will be used to + further restrict pool access. + """ + resp = {'exit-code': 0} + + service_name = request.get('name') + group_name = request.get('group') + group_namespace = request.get('group-namespace') + if group_namespace: + group_name = "{}-{}".format(group_namespace, group_name) + group = get_group(group_name=group_name) + service_obj = get_service_groups(service=service_name, + namespace=group_namespace) + if request.get('object-prefix-permissions'): + service_obj['object_prefix_perms'] = request.get( + 'object-prefix-permissions') + format("Service object: {}".format(service_obj)) + permission = request.get('group-permission') or "rwx" + if service_name not in group['services']: + group['services'].append(service_name) + save_group(group=group, group_name=group_name) + if permission not in service_obj['group_names']: + service_obj['group_names'][permission] = [] + if group_name not in service_obj['group_names'][permission]: + service_obj['group_names'][permission].append(group_name) + save_service(service=service_obj, service_name=service_name) + service_obj['groups'] = _build_service_groups(service_obj, + group_namespace) + update_service_permissions(service_name, service_obj, group_namespace) + + return resp + + +def handle_set_key_permissions(request, service): + """Ensure the key has the requested permissions.""" + permissions = request.get('permissions') + client = request.get('client') + call = ['ceph', '--id', service, 'auth', 'caps', + 'client.{}'.format(client)] + permissions + try: + check_call(call) + except CalledProcessError as e: + log("Error updating key capabilities: {}".format(e), level=ERROR) + + +def update_service_permissions(service, service_obj=None, namespace=None): + """Update the key permissions for the named client in Ceph""" + if not service_obj: + service_obj = get_service_groups(service=service, namespace=namespace) + permissions = pool_permission_list_for_service(service_obj) + call = ['ceph', 'auth', 'caps', 'client.{}'.format(service)] + permissions + try: + check_call(call) + except CalledProcessError as e: + log("Error updating key capabilities: {}".format(e)) + + +def add_pool_to_group(pool, group, namespace=None): + """Add a named pool to a named group""" + group_name = group + if namespace: + group_name = "{}-{}".format(namespace, group_name) + group = get_group(group_name=group_name) + if pool not in group['pools']: + group["pools"].append(pool) + save_group(group, group_name=group_name) + for service in group['services']: + update_service_permissions(service, namespace=namespace) + + +def pool_permission_list_for_service(service): + """Build the permission string for Ceph for a given service""" + permissions = [] + permission_types = collections.OrderedDict() + for permission, group in sorted(service["group_names"].items()): + if permission not in permission_types: + permission_types[permission] = [] + for item in group: + permission_types[permission].append(item) + for permission, groups in permission_types.items(): + permission = "allow {}".format(permission) + for group in groups: + for pool in service['groups'][group].get('pools', []): + permissions.append("{} pool={}".format(permission, pool)) + for permission, prefixes in sorted( + service.get("object_prefix_perms", {}).items()): + for prefix in prefixes: + permissions.append("allow {} object_prefix {}".format(permission, + prefix)) + return ['mon', 'allow r, allow command "osd blacklist"', + 'osd', ', '.join(permissions)] + + +def get_service_groups(service, namespace=None): + """Services are objects stored with some metadata, they look like (for a + service named "nova"): + { + group_names: {'rwx': ['images']}, + groups: {} + } + After populating the group, it looks like: + { + group_names: {'rwx': ['images']}, + groups: { + 'images': { + pools: ['glance'], + services: ['nova'] + } + } + } + """ + service_json = monitor_key_get(service='admin', + key="cephx.services.{}".format(service)) + try: + service = json.loads(service_json) + except (TypeError, ValueError): + service = None + if service: + service['groups'] = _build_service_groups(service, namespace) + else: + service = {'group_names': {}, 'groups': {}} + return service + + +def _build_service_groups(service, namespace=None): + """Rebuild the 'groups' dict for a service group + + :returns: dict: dictionary keyed by group name of the following + format: + + { + 'images': { + pools: ['glance'], + services: ['nova', 'glance] + }, + 'vms':{ + pools: ['nova'], + services: ['nova'] + } + } + """ + all_groups = {} + for groups in service['group_names'].values(): + for group in groups: + name = group + if namespace: + name = "{}-{}".format(namespace, name) + all_groups[group] = get_group(group_name=name) + return all_groups + + +def get_group(group_name): + """A group is a structure to hold data about a named group, structured as: + { + pools: ['glance'], + services: ['nova'] + } + """ + group_key = get_group_key(group_name=group_name) + group_json = monitor_key_get(service='admin', key=group_key) + try: + group = json.loads(group_json) + except (TypeError, ValueError): + group = None + if not group: + group = { + 'pools': [], + 'services': [] + } + return group + + +def save_service(service_name, service): + """Persist a service in the monitor cluster""" + service['groups'] = {} + return monitor_key_set(service='admin', + key="cephx.services.{}".format(service_name), + value=json.dumps(service, sort_keys=True)) + + +def save_group(group, group_name): + """Persist a group in the monitor cluster""" + group_key = get_group_key(group_name=group_name) + return monitor_key_set(service='admin', + key=group_key, + value=json.dumps(group, sort_keys=True)) + + +def get_group_key(group_name): + """Build group key""" + return 'cephx.groups.{}'.format(group_name) def handle_erasure_pool(request, service): @@ -152,22 +404,11 @@ def handle_erasure_pool(request, service): """ pool_name = request.get('name') erasure_profile = request.get('erasure-profile') - max_bytes = request.get('max-bytes') - max_objects = request.get('max-objects') - weight = request.get('weight') group_name = request.get('group') if erasure_profile is None: erasure_profile = "default-canonical" - app_name = request.get('app-name') - - # Check for missing params - if pool_name is None: - msg = "Missing parameter. name is required for the pool" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - if group_name: group_namespace = request.get('group-namespace') # Add the pool to the group named "group_name" @@ -183,19 +424,22 @@ def handle_erasure_pool(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - pool = ErasurePool(service=service, name=pool_name, - erasure_code_profile=erasure_profile, - percent_data=weight, app_name=app_name) + try: + pool = ErasurePool(service=service, + op=request) + except KeyError: + msg = "Missing parameter." + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + # Ok make the erasure pool if not pool_exists(service=service, name=pool_name): log("Creating pool '{}' (erasure_profile={})" .format(pool.name, erasure_profile), level=INFO) pool.create() - # Set a quota if requested - if max_bytes or max_objects: - set_pool_quota(service=service, pool_name=pool_name, - max_bytes=max_bytes, max_objects=max_objects) + # Set/update properties that are allowed to change after pool creation. + pool.update() def handle_replicated_pool(request, service): @@ -206,26 +450,19 @@ def handle_replicated_pool(request, service): :returns: dict. exit-code and reason if not 0. """ pool_name = request.get('name') - replicas = request.get('replicas') - max_bytes = request.get('max-bytes') - max_objects = request.get('max-objects') - weight = request.get('weight') group_name = request.get('group') # Optional params + # NOTE: Check this against the handling in the Pool classes, reconcile and + # remove. pg_num = request.get('pg_num') + replicas = request.get('replicas') if pg_num: # Cap pg_num to max allowed just in case. osds = get_osds(service) if osds: pg_num = min(pg_num, (len(osds) * 100 // replicas)) - - app_name = request.get('app-name') - # Check for missing params - if pool_name is None or replicas is None: - msg = "Missing parameter. name and replicas are required" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} + request.update({'pg_num': pg_num}) if group_name: group_namespace = request.get('group-namespace') @@ -234,18 +471,14 @@ def handle_replicated_pool(request, service): group=group_name, namespace=group_namespace) - kwargs = {} - if pg_num: - kwargs['pg_num'] = pg_num - if weight: - kwargs['percent_data'] = weight - if replicas: - kwargs['replicas'] = replicas - if app_name: - kwargs['app_name'] = app_name - - pool = ReplicatedPool(service=service, - name=pool_name, **kwargs) + try: + pool = ReplicatedPool(service=service, + op=request) + except KeyError: + msg = "Missing parameter." + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + if not pool_exists(service=service, name=pool_name): log("Creating pool '{}' (replicas={})".format(pool.name, replicas), level=INFO) @@ -254,13 +487,18 @@ def handle_replicated_pool(request, service): log("Pool '{}' already exists - skipping create".format(pool.name), level=DEBUG) - # Set a quota if requested - if max_bytes or max_objects: - set_pool_quota(service=service, pool_name=pool_name, - max_bytes=max_bytes, max_objects=max_objects) + # Set/update properties that are allowed to change after pool creation. + pool.update() def handle_create_cache_tier(request, service): + """Create a cache tier on a cold pool. Modes supported are + "writeback" and "readonly". + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ # mode = "writeback" | "readonly" storage_pool = request.get('cold-pool') cache_pool = request.get('hot-pool') @@ -277,11 +515,17 @@ def handle_create_cache_tier(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - p = Pool(service=service, name=storage_pool) + p = BasePool(service=service, name=storage_pool) p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) def handle_remove_cache_tier(request, service): + """Remove a cache tier from the cold pool. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ storage_pool = request.get('cold-pool') cache_pool = request.get('hot-pool') # cache and storage pool must exist first @@ -292,23 +536,41 @@ def handle_remove_cache_tier(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - pool = Pool(name=storage_pool, service=service) + pool = BasePool(name=storage_pool, service=service) pool.remove_cache_tier(cache_pool=cache_pool) -def handle_set_pool_value(request, service): +def handle_set_pool_value(request, service, coerce=False): + """Sets an arbitrary pool value. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :param coerce: Try to parse/coerce the value into the correct type. + Used by the action code that only gets Str from Juju + :returns: dict. exit-code and reason if not 0 + """ # Set arbitrary pool values params = {'pool': request.get('name'), 'key': request.get('key'), 'value': request.get('value')} if params['key'] not in POOL_KEYS: - msg = "Invalid key '%s'" % params['key'] + msg = "Invalid key '{}'".format(params['key']) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} # Get the validation method validator_params = POOL_KEYS[params['key']] - if len(validator_params) is 1: + # BUG: #1838650 - the function needs to try to coerce the value param to + # the type required for the validator to pass. Note, if this blows, then + # the param isn't parsable to the correct type. + if coerce: + try: + params['value'] = validator_params[0](params['value']) + except ValueError: + raise RuntimeError("Value {} isn't of type {}" + .format(params['value'], validator_params[0])) + # end of BUG: #1838650 + if len(validator_params) == 1: # Validate that what the user passed is actually legal per Ceph's rules validator(params['value'], validator_params[0]) else: @@ -320,190 +582,247 @@ def handle_set_pool_value(request, service): value=params['value']) -def handle_add_permissions_to_key(request, service): - """Groups are defined by the key cephx.groups.(namespace-)?-(name). This - key will contain a dict serialized to JSON with data about the group, - including pools and members. +def handle_rgw_regionmap_update(request, service): + """Change the radosgw region map. - A group can optionally have a namespace defined that will be used to - further restrict pool access. + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 """ - resp = {'exit-code': 0} - - service_name = request.get('name') - group_name = request.get('group') - group_namespace = request.get('group-namespace') - if group_namespace: - group_name = "{}-{}".format(group_namespace, group_name) - group = get_group(group_name=group_name) - service_obj = get_service_groups(service=service_name, - namespace=group_namespace) - if request.get('object-prefix-permissions'): - service_obj['object_prefix_perms'] = request.get( - 'object-prefix-permissions') - format("Service object: {}".format(service_obj)) - permission = request.get('group-permission') or "rwx" - if service_name not in group['services']: - group['services'].append(service_name) - save_group(group=group, group_name=group_name) - if permission not in service_obj['group_names']: - service_obj['group_names'][permission] = [] - if group_name not in service_obj['group_names'][permission]: - service_obj['group_names'][permission].append(group_name) - save_service(service=service_obj, service_name=service_name) - service_obj['groups'] = _build_service_groups(service_obj, - group_namespace) - update_service_permissions(service_name, service_obj, group_namespace) - - return resp - - -def add_pool_to_group(pool, group, namespace=None): - """Add a named pool to a named group""" - group_name = group - if namespace: - group_name = "{}-{}".format(namespace, group_name) - group = get_group(group_name=group_name) - if pool not in group['pools']: - group["pools"].append(pool) - save_group(group, group_name=group_name) - for service in group['services']: - update_service_permissions(service, namespace=namespace) - + name = request.get('client-name') + if not name: + msg = "Missing rgw-region or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + try: + check_output(['radosgw-admin', + '--id', service, + 'regionmap', 'update', '--name', name]) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} -def pool_permission_list_for_service(service): - """Build the permission string for Ceph for a given service""" - permissions = [] - permission_types = collections.OrderedDict() - for permission, group in sorted(service["group_names"].items()): - if permission not in permission_types: - permission_types[permission] = [] - for item in group: - permission_types[permission].append(item) - for permission, groups in permission_types.items(): - permission = "allow {}".format(permission) - for group in groups: - for pool in service['groups'][group].get('pools', []): - permissions.append("{} pool={}".format(permission, pool)) - for permission, prefixes in sorted( - service.get("object_prefix_perms", {}).items()): - for prefix in prefixes: - permissions.append("allow {} object_prefix {}".format(permission, - prefix)) - return ['mon', 'allow r, allow command "osd blacklist"', - 'osd', ', '.join(permissions)] +def handle_rgw_regionmap_default(request, service): + """Create a radosgw region map. -def update_service_permissions(service, service_obj=None, namespace=None): - """Update the key permissions for the named client in Ceph""" - if not service_obj: - service_obj = get_service_groups(service=service, namespace=namespace) - permissions = pool_permission_list_for_service(service_obj) - call = ['ceph', 'auth', 'caps', 'client.{}'.format(service)] + permissions + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + region = request.get('rgw-region') + name = request.get('client-name') + if not region or not name: + msg = "Missing rgw-region or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} try: - check_call(call) - except CalledProcessError as e: - log("Error updating key capabilities: {}".format(e)) - - -def save_service(service_name, service): - """Persist a service in the monitor cluster""" - service['groups'] = {} - return monitor_key_set(service='admin', - key="cephx.services.{}".format(service_name), - value=json.dumps(service, sort_keys=True)) + check_output( + [ + 'radosgw-admin', + '--id', service, + 'regionmap', + 'default', + '--rgw-region', region, + '--name', name]) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + + +def handle_rgw_zone_set(request, service): + """Create a radosgw zone. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + json_file = request.get('zone-json') + name = request.get('client-name') + region_name = request.get('region-name') + zone_name = request.get('zone-name') + if not json_file or not name or not region_name or not zone_name: + msg = "Missing json-file or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + infile = NamedTemporaryFile(delete=False) + with open(infile.name, 'w') as infile_handle: + infile_handle.write(json_file) + try: + check_output( + [ + 'radosgw-admin', + '--id', service, + 'zone', + 'set', + '--rgw-zone', zone_name, + '--infile', infile.name, + '--name', name, + ] + ) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + os.unlink(infile.name) + + +def handle_put_osd_in_bucket(request, service): + """Move an osd into a specified crush bucket. + + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + osd_id = request.get('osd') + target_bucket = request.get('bucket') + if not osd_id or not target_bucket: + msg = "Missing OSD ID or Bucket" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + crushmap = Crushmap() + try: + crushmap.ensure_bucket_is_present(target_bucket) + check_output( + [ + 'ceph', + '--id', service, + 'osd', + 'crush', + 'set', + str(osd_id), + str(get_osd_weight(osd_id)), + "root={}".format(target_bucket) + ] + ) + except Exception as exc: + msg = "Failed to move OSD " \ + "{} into Bucket {} :: {}".format(osd_id, target_bucket, exc) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} -def save_group(group, group_name): - """Persist a group in the monitor cluster""" - group_key = get_group_key(group_name=group_name) - return monitor_key_set(service='admin', - key=group_key, - value=json.dumps(group, sort_keys=True)) +def handle_rgw_create_user(request, service): + """Create a new rados gateway user. -def get_group(group_name): - """A group is a structure to hold data about a named group, structured as: - { - pools: ['glance'], - services: ['nova'] - } + :param request: dict of request operations and params + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 """ - group_key = get_group_key(group_name=group_name) - group_json = monitor_key_get(service='admin', key=group_key) + user_id = request.get('rgw-uid') + display_name = request.get('display-name') + name = request.get('client-name') + if not name or not display_name or not user_id: + msg = "Missing client-name, display-name or rgw-uid" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} try: - group = json.loads(group_json) - except (TypeError, ValueError): - group = None - if not group: - group = { - 'pools': [], - 'services': [] - } - return group - + create_output = check_output( + [ + 'radosgw-admin', + '--id', service, + 'user', + 'create', + '--uid', user_id, + '--display-name', display_name, + '--name', name, + '--system' + ] + ) + try: + user_json = json.loads(str(create_output.decode('UTF-8'))) + return {'exit-code': 0, 'user': user_json} + except ValueError as err: + log(err, level=ERROR) + return {'exit-code': 1, 'stderr': err} + + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + + +def handle_create_cephfs(request, service): + """Create a new cephfs. + + :param request: The broker request + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 + """ + cephfs_name = request.get('mds_name') + data_pool = request.get('data_pool') + metadata_pool = request.get('metadata_pool') + # Check if the user params were provided + if not cephfs_name or not data_pool or not metadata_pool: + msg = "Missing mds_name, data_pool or metadata_pool params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} -def get_group_key(group_name): - """Build group key""" - return 'cephx.groups.{}'.format(group_name) + # Sanity check that the required pools exist + if not pool_exists(service=service, name=data_pool): + msg = "CephFS data pool does not exist. Cannot create CephFS" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + if not pool_exists(service=service, name=metadata_pool): + msg = "CephFS metadata pool does not exist. Cannot create CephFS" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + if get_cephfs(service=service): + # CephFS new has already been called + log("CephFS already created") + return -def get_service_groups(service, namespace=None): - """Services are objects stored with some metadata, they look like (for a - service named "nova"): - { - group_names: {'rwx': ['images']}, - groups: {} - } - After populating the group, it looks like: - { - group_names: {'rwx': ['images']}, - groups: { - 'images': { - pools: ['glance'], - services: ['nova'] - } - } - } - """ - service_json = monitor_key_get(service='admin', - key="cephx.services.{}".format(service)) + # Finally create CephFS try: - service = json.loads(service_json) - except (TypeError, ValueError): - service = None - if service: - service['groups'] = _build_service_groups(service, namespace) - else: - service = {'group_names': {}, 'groups': {}} - return service - + check_output(["ceph", + '--id', service, + "fs", "new", cephfs_name, + metadata_pool, + data_pool]) + except CalledProcessError as err: + if err.returncode == 22: + log("CephFS already created") + return + else: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} -def _build_service_groups(service, namespace=None): - """Rebuild the 'groups' dict for a service group - :returns: dict: dictionary keyed by group name of the following - format: +def handle_rgw_region_set(request, service): + # radosgw-admin region set --infile us.json --name client.radosgw.us-east-1 + """Set the rados gateway region. - { - 'images': { - pools: ['glance'], - services: ['nova', 'glance] - }, - 'vms':{ - pools: ['nova'], - services: ['nova'] - } - } + :param request: dict. The broker request. + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0 """ - all_groups = {} - for groups in service['group_names'].values(): - for group in groups: - name = group - if namespace: - name = "{}-{}".format(namespace, name) - all_groups[group] = get_group(group_name=name) - return all_groups + json_file = request.get('region-json') + name = request.get('client-name') + region_name = request.get('region-name') + zone_name = request.get('zone-name') + if not json_file or not name or not region_name or not zone_name: + msg = "Missing json-file or client-name params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + infile = NamedTemporaryFile(delete=False) + with open(infile.name, 'w') as infile_handle: + infile_handle.write(json_file) + try: + check_output( + [ + 'radosgw-admin', + '--id', service, + 'region', + 'set', + '--rgw-zone', zone_name, + '--infile', infile.name, + '--name', name, + ] + ) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + os.unlink(infile.name) def process_requests_v1(reqs): @@ -516,10 +835,10 @@ def process_requests_v1(reqs): operation failed along with an explanation). """ ret = None - log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) + log("Processing {} ceph broker requests".format(len(reqs)), level=INFO) for req in reqs: op = req.get('op') - log("Processing op='%s'" % op, level=DEBUG) + log("Processing op='{}'".format(op), level=DEBUG) # Use admin client since we do not have other client key locations # setup to use them for these operations. svc = 'admin' @@ -531,7 +850,8 @@ def process_requests_v1(reqs): ret = handle_erasure_pool(request=req, service=svc) else: ret = handle_replicated_pool(request=req, service=svc) - + elif op == "create-cephfs": + ret = handle_create_cephfs(request=req, service=svc) elif op == "create-cache-tier": ret = handle_create_cache_tier(request=req, service=svc) elif op == "remove-cache-tier": @@ -558,10 +878,24 @@ def process_requests_v1(reqs): snapshot_name=snapshot_name) elif op == "set-pool-value": ret = handle_set_pool_value(request=req, service=svc) + elif op == "rgw-region-set": + ret = handle_rgw_region_set(request=req, service=svc) + elif op == "rgw-zone-set": + ret = handle_rgw_zone_set(request=req, service=svc) + elif op == "rgw-regionmap-update": + ret = handle_rgw_regionmap_update(request=req, service=svc) + elif op == "rgw-regionmap-default": + ret = handle_rgw_regionmap_default(request=req, service=svc) + elif op == "rgw-create-user": + ret = handle_rgw_create_user(request=req, service=svc) + elif op == "move-osd-to-bucket": + ret = handle_put_osd_in_bucket(request=req, service=svc) elif op == "add-permissions-to-key": ret = handle_add_permissions_to_key(request=req, service=svc) + elif op == 'set-key-permissions': + ret = handle_set_key_permissions(request=req, service=svc) else: - msg = "Unknown operation '%s'" % op + msg = "Unknown operation '{}'".format(op) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} diff --git a/ceph-proxy/lib/charms_ceph/crush_utils.py b/ceph-proxy/lib/charms_ceph/crush_utils.py new file mode 100644 index 00000000..8fe09fa4 --- /dev/null +++ b/ceph-proxy/lib/charms_ceph/crush_utils.py @@ -0,0 +1,154 @@ +# Copyright 2014 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +from subprocess import check_output, CalledProcessError + +from charmhelpers.core.hookenv import ( + log, + ERROR, +) + +CRUSH_BUCKET = """root {name} {{ + id {id} # do not change unnecessarily + # weight 0.000 + alg straw2 + hash 0 # rjenkins1 +}} + +rule {name} {{ + ruleset 0 + type replicated + min_size 1 + max_size 10 + step take {name} + step chooseleaf firstn 0 type host + step emit +}}""" + +# This regular expression looks for a string like: +# root NAME { +# id NUMBER +# so that we can extract NAME and ID from the crushmap +CRUSHMAP_BUCKETS_RE = re.compile(r"root\s+(.+)\s+\{\s*id\s+(-?\d+)") + +# This regular expression looks for ID strings in the crushmap like: +# id NUMBER +# so that we can extract the IDs from a crushmap +CRUSHMAP_ID_RE = re.compile(r"id\s+(-?\d+)") + + +class Crushmap(object): + """An object oriented approach to Ceph crushmap management.""" + + def __init__(self): + self._crushmap = self.load_crushmap() + roots = re.findall(CRUSHMAP_BUCKETS_RE, self._crushmap) + buckets = [] + ids = list(map( + lambda x: int(x), + re.findall(CRUSHMAP_ID_RE, self._crushmap))) + ids = sorted(ids) + if roots != []: + for root in roots: + buckets.append(CRUSHBucket(root[0], root[1], True)) + + self._buckets = buckets + if ids != []: + self._ids = ids + else: + self._ids = [0] + + def load_crushmap(self): + try: + crush = str(check_output(['ceph', 'osd', 'getcrushmap']) + .decode('UTF-8')) + return str(check_output(['crushtool', '-d', '-'], + stdin=crush.stdout) + .decode('UTF-8')) + except CalledProcessError as e: + log("Error occured while loading and decompiling CRUSH map:" + "{}".format(e), ERROR) + raise "Failed to read CRUSH map" + + def ensure_bucket_is_present(self, bucket_name): + if bucket_name not in [bucket.name for bucket in self.buckets()]: + self.add_bucket(bucket_name) + self.save() + + def buckets(self): + """Return a list of buckets that are in the Crushmap.""" + return self._buckets + + def add_bucket(self, bucket_name): + """Add a named bucket to Ceph""" + new_id = min(self._ids) - 1 + self._ids.append(new_id) + self._buckets.append(CRUSHBucket(bucket_name, new_id)) + + def save(self): + """Persist Crushmap to Ceph""" + try: + crushmap = self.build_crushmap() + compiled = str(check_output(['crushtool', '-c', '/dev/stdin', '-o', + '/dev/stdout'], stdin=crushmap) + .decode('UTF-8')) + ceph_output = str(check_output(['ceph', 'osd', 'setcrushmap', '-i', + '/dev/stdin'], stdin=compiled) + .decode('UTF-8')) + return ceph_output + except CalledProcessError as e: + log("save error: {}".format(e)) + raise "Failed to save CRUSH map." + + def build_crushmap(self): + """Modifies the current CRUSH map to include the new buckets""" + tmp_crushmap = self._crushmap + for bucket in self._buckets: + if not bucket.default: + tmp_crushmap = "{}\n\n{}".format( + tmp_crushmap, + Crushmap.bucket_string(bucket.name, bucket.id)) + + return tmp_crushmap + + @staticmethod + def bucket_string(name, id): + return CRUSH_BUCKET.format(name=name, id=id) + + +class CRUSHBucket(object): + """CRUSH bucket description object.""" + + def __init__(self, name, id, default=False): + self.name = name + self.id = int(id) + self.default = default + + def __repr__(self): + return "Bucket {{Name: {name}, ID: {id}}}".format( + name=self.name, id=self.id) + + def __eq__(self, other): + """Override the default Equals behavior""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return NotImplemented + + def __ne__(self, other): + """Define a non-equality test""" + if isinstance(other, self.__class__): + return not self.__eq__(other) + return NotImplemented diff --git a/ceph-proxy/lib/charms_ceph/utils.py b/ceph-proxy/lib/charms_ceph/utils.py new file mode 100644 index 00000000..72e6b921 --- /dev/null +++ b/ceph-proxy/lib/charms_ceph/utils.py @@ -0,0 +1,3349 @@ +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import glob +import json +import os +import pyudev +import random +import re +import socket +import subprocess +import sys +import time +import uuid + +from contextlib import contextmanager +from datetime import datetime + +from charmhelpers.core import hookenv +from charmhelpers.core import templating +from charmhelpers.core.host import ( + chownr, + cmp_pkgrevno, + lsb_release, + mkdir, + owner, + service_restart, + service_start, + service_stop, + CompareHostReleases, + write_file, +) +from charmhelpers.core.hookenv import ( + cached, + config, + log, + status_set, + DEBUG, + ERROR, + WARNING, + storage_get, + storage_list, +) +from charmhelpers.fetch import ( + apt_cache, + add_source, apt_install, apt_update +) +from charmhelpers.contrib.storage.linux.ceph import ( + get_mon_map, + monitor_key_set, + monitor_key_exists, + monitor_key_get, +) +from charmhelpers.contrib.storage.linux.utils import ( + is_block_device, + is_device_mounted, +) +from charmhelpers.contrib.openstack.utils import ( + get_os_codename_install_source, +) +from charmhelpers.contrib.storage.linux import lvm +from charmhelpers.core.unitdata import kv + +CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph') +OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd') +HDPARM_FILE = os.path.join(os.sep, 'etc', 'hdparm.conf') + +LEADER = 'leader' +PEON = 'peon' +QUORUM = [LEADER, PEON] + +PACKAGES = ['ceph', 'gdisk', + 'radosgw', 'xfsprogs', + 'lvm2', 'parted', 'smartmontools'] + +CEPH_KEY_MANAGER = 'ceph' +VAULT_KEY_MANAGER = 'vault' +KEY_MANAGERS = [ + CEPH_KEY_MANAGER, + VAULT_KEY_MANAGER, +] + +LinkSpeed = { + "BASE_10": 10, + "BASE_100": 100, + "BASE_1000": 1000, + "GBASE_10": 10000, + "GBASE_40": 40000, + "GBASE_100": 100000, + "UNKNOWN": None +} + +# Mapping of adapter speed to sysctl settings +NETWORK_ADAPTER_SYSCTLS = { + # 10Gb + LinkSpeed["GBASE_10"]: { + 'net.core.rmem_default': 524287, + 'net.core.wmem_default': 524287, + 'net.core.rmem_max': 524287, + 'net.core.wmem_max': 524287, + 'net.core.optmem_max': 524287, + 'net.core.netdev_max_backlog': 300000, + 'net.ipv4.tcp_rmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_wmem': '10000000 10000000 10000000', + 'net.ipv4.tcp_mem': '10000000 10000000 10000000' + }, + # Mellanox 10/40Gb + LinkSpeed["GBASE_40"]: { + 'net.ipv4.tcp_timestamps': 0, + 'net.ipv4.tcp_sack': 1, + 'net.core.netdev_max_backlog': 250000, + 'net.core.rmem_max': 4194304, + 'net.core.wmem_max': 4194304, + 'net.core.rmem_default': 4194304, + 'net.core.wmem_default': 4194304, + 'net.core.optmem_max': 4194304, + 'net.ipv4.tcp_rmem': '4096 87380 4194304', + 'net.ipv4.tcp_wmem': '4096 65536 4194304', + 'net.ipv4.tcp_low_latency': 1, + 'net.ipv4.tcp_adv_win_scale': 1 + } +} + + +class Partition(object): + def __init__(self, name, number, size, start, end, sectors, uuid): + """A block device partition. + + :param name: Name of block device + :param number: Partition number + :param size: Capacity of the device + :param start: Starting block + :param end: Ending block + :param sectors: Number of blocks + :param uuid: UUID of the partition + """ + self.name = name, + self.number = number + self.size = size + self.start = start + self.end = end + self.sectors = sectors + self.uuid = uuid + + def __str__(self): + return "number: {} start: {} end: {} sectors: {} size: {} " \ + "name: {} uuid: {}".format(self.number, self.start, + self.end, + self.sectors, self.size, + self.name, self.uuid) + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + return not self.__eq__(other) + + +def unmounted_disks(): + """List of unmounted block devices on the current host.""" + disks = [] + context = pyudev.Context() + for device in context.list_devices(DEVTYPE='disk'): + if device['SUBSYSTEM'] == 'block': + if device.device_node is None: + continue + + matched = False + for block_type in [u'dm-', u'loop', u'ram', u'nbd']: + if block_type in device.device_node: + matched = True + if matched: + continue + + disks.append(device.device_node) + log("Found disks: {}".format(disks)) + return [disk for disk in disks if not is_device_mounted(disk)] + + +def save_sysctls(sysctl_dict, save_location): + """Persist the sysctls to the hard drive. + + :param sysctl_dict: dict + :param save_location: path to save the settings to + :raises: IOError if anything goes wrong with writing. + """ + try: + # Persist the settings for reboots + with open(save_location, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + except IOError as e: + log("Unable to persist sysctl settings to {}. Error {}".format( + save_location, e), level=ERROR) + raise + + +def tune_nic(network_interface): + """This will set optimal sysctls for the particular network adapter. + + :param network_interface: string The network adapter name. + """ + speed = get_link_speed(network_interface) + if speed in NETWORK_ADAPTER_SYSCTLS: + status_set('maintenance', 'Tuning device {}'.format( + network_interface)) + sysctl_file = os.path.join( + os.sep, + 'etc', + 'sysctl.d', + '51-ceph-osd-charm-{}.conf'.format(network_interface)) + try: + log("Saving sysctl_file: {} values: {}".format( + sysctl_file, NETWORK_ADAPTER_SYSCTLS[speed]), + level=DEBUG) + save_sysctls(sysctl_dict=NETWORK_ADAPTER_SYSCTLS[speed], + save_location=sysctl_file) + except IOError as e: + log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " + "failed. {}".format(network_interface, e), + level=ERROR) + + try: + # Apply the settings + log("Applying sysctl settings", level=DEBUG) + subprocess.check_output(["sysctl", "-p", sysctl_file]) + except subprocess.CalledProcessError as err: + log('sysctl -p {} failed with error {}'.format(sysctl_file, + err.output), + level=ERROR) + else: + log("No settings found for network adapter: {}".format( + network_interface), level=DEBUG) + + +def get_link_speed(network_interface): + """This will find the link speed for a given network device. Returns None + if an error occurs. + :param network_interface: string The network adapter interface. + :returns: LinkSpeed + """ + speed_path = os.path.join(os.sep, 'sys', 'class', 'net', + network_interface, 'speed') + # I'm not sure where else we'd check if this doesn't exist + if not os.path.exists(speed_path): + return LinkSpeed["UNKNOWN"] + + try: + with open(speed_path, 'r') as sysfs: + nic_speed = sysfs.readlines() + + # Did we actually read anything? + if not nic_speed: + return LinkSpeed["UNKNOWN"] + + # Try to find a sysctl match for this particular speed + for name, speed in LinkSpeed.items(): + if speed == int(nic_speed[0].strip()): + return speed + # Default to UNKNOWN if we can't find a match + return LinkSpeed["UNKNOWN"] + except IOError as e: + log("Unable to open {path} because of error: {error}".format( + path=speed_path, + error=e), level='error') + return LinkSpeed["UNKNOWN"] + + +def persist_settings(settings_dict): + # Write all settings to /etc/hdparm.conf + """ This will persist the hard drive settings to the /etc/hdparm.conf file + + The settings_dict should be in the form of {"uuid": {"key":"value"}} + + :param settings_dict: dict of settings to save + """ + if not settings_dict: + return + + try: + templating.render(source='hdparm.conf', target=HDPARM_FILE, + context=settings_dict) + except IOError as err: + log("Unable to open {path} because of error: {error}".format( + path=HDPARM_FILE, error=err), level=ERROR) + except Exception as e: + # The templating.render can raise a jinja2 exception if the + # template is not found. Rather than polluting the import + # space of this charm, simply catch Exception + log('Unable to render {path} due to error: {error}'.format( + path=HDPARM_FILE, error=e), level=ERROR) + + +def set_max_sectors_kb(dev_name, max_sectors_size): + """This function sets the max_sectors_kb size of a given block device. + + :param dev_name: Name of the block device to query + :param max_sectors_size: int of the max_sectors_size to save + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + try: + with open(max_sectors_kb_path, 'w') as f: + f.write(max_sectors_size) + except IOError as e: + log('Failed to write max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e), level=ERROR) + + +def get_max_sectors_kb(dev_name): + """This function gets the max_sectors_kb size of a given block device. + + :param dev_name: Name of the block device to query + :returns: int which is either the max_sectors_kb or 0 on error. + """ + max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_sectors_kb') + + # Read in what Linux has set by default + if os.path.exists(max_sectors_kb_path): + try: + with open(max_sectors_kb_path, 'r') as f: + max_sectors_kb = f.read().strip() + return int(max_sectors_kb) + except IOError as e: + log('Failed to read max_sectors_kb to {}. Error: {}'.format( + max_sectors_kb_path, e), level=ERROR) + # Bail. + return 0 + return 0 + + +def get_max_hw_sectors_kb(dev_name): + """This function gets the max_hw_sectors_kb for a given block device. + + :param dev_name: Name of the block device to query + :returns: int which is either the max_hw_sectors_kb or 0 on error. + """ + max_hw_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', + 'max_hw_sectors_kb') + # Read in what the hardware supports + if os.path.exists(max_hw_sectors_kb_path): + try: + with open(max_hw_sectors_kb_path, 'r') as f: + max_hw_sectors_kb = f.read().strip() + return int(max_hw_sectors_kb) + except IOError as e: + log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( + max_hw_sectors_kb_path, e), level=ERROR) + return 0 + return 0 + + +def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): + """This function sets the hard drive read ahead. + + :param dev_name: Name of the block device to set read ahead on. + :param read_ahead_sectors: int How many sectors to read ahead. + """ + try: + # Set the read ahead sectors to 256 + log('Setting read ahead to {} for device {}'.format( + read_ahead_sectors, + dev_name)) + subprocess.check_output(['hdparm', + '-a{}'.format(read_ahead_sectors), + dev_name]) + except subprocess.CalledProcessError as e: + log('hdparm failed with error: {}'.format(e.output), + level=ERROR) + + +def get_block_uuid(block_dev): + """This queries blkid to get the uuid for a block device. + + :param block_dev: Name of the block device to query. + :returns: The UUID of the device or None on Error. + """ + try: + block_info = str(subprocess + .check_output(['blkid', '-o', 'export', block_dev]) + .decode('UTF-8')) + for tag in block_info.split('\n'): + parts = tag.split('=') + if parts[0] == 'UUID': + return parts[1] + return None + except subprocess.CalledProcessError as err: + log('get_block_uuid failed with error: {}'.format(err.output), + level=ERROR) + return None + + +def check_max_sectors(save_settings_dict, + block_dev, + uuid): + """Tune the max_hw_sectors if needed. + + make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb or at + least 1MB for spinning disks + If the box has a RAID card with cache this could go much bigger. + + :param save_settings_dict: The dict used to persist settings + :param block_dev: A block device name: Example: /dev/sda + :param uuid: The uuid of the block device + """ + dev_name = None + path_parts = os.path.split(block_dev) + if len(path_parts) == 2: + dev_name = path_parts[1] + else: + log('Unable to determine the block device name from path: {}'.format( + block_dev)) + # Play it safe and bail + return + max_sectors_kb = get_max_sectors_kb(dev_name=dev_name) + max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name) + + if max_sectors_kb < max_hw_sectors_kb: + # OK we have a situation where the hardware supports more than Linux is + # currently requesting + config_max_sectors_kb = hookenv.config('max-sectors-kb') + if config_max_sectors_kb < max_hw_sectors_kb: + # Set the max_sectors_kb to the config.yaml value if it is less + # than the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, config_max_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid][ + "read_ahead_sect"] = config_max_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=config_max_sectors_kb) + else: + # Set to the max_hw_sectors_kb + log('Setting max_sectors_kb for device {} to {}'.format( + dev_name, max_hw_sectors_kb)) + save_settings_dict[ + "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb + set_max_sectors_kb(dev_name=dev_name, + max_sectors_size=max_hw_sectors_kb) + else: + log('max_sectors_kb match max_hw_sectors_kb. No change needed for ' + 'device: {}'.format(block_dev)) + + +def tune_dev(block_dev): + """Try to make some intelligent decisions with HDD tuning. Future work will + include optimizing SSDs. + + This function will change the read ahead sectors and the max write + sectors for each block device. + + :param block_dev: A block device name: Example: /dev/sda + """ + uuid = get_block_uuid(block_dev) + if uuid is None: + log('block device {} uuid is None. Unable to save to ' + 'hdparm.conf'.format(block_dev), level=DEBUG) + return + save_settings_dict = {} + log('Tuning device {}'.format(block_dev)) + status_set('maintenance', 'Tuning device {}'.format(block_dev)) + set_hdd_read_ahead(block_dev) + save_settings_dict["drive_settings"] = {} + save_settings_dict["drive_settings"][uuid] = {} + save_settings_dict["drive_settings"][uuid]['read_ahead_sect'] = 256 + + check_max_sectors(block_dev=block_dev, + save_settings_dict=save_settings_dict, + uuid=uuid) + + persist_settings(settings_dict=save_settings_dict) + status_set('maintenance', 'Finished tuning device {}'.format(block_dev)) + + +def ceph_user(): + if get_version() > 1: + return 'ceph' + else: + return "root" + + +class CrushLocation(object): + def __init__(self, + name, + identifier, + host, + rack, + row, + datacenter, + chassis, + root): + self.name = name + self.identifier = identifier + self.host = host + self.rack = rack + self.row = row + self.datacenter = datacenter + self.chassis = chassis + self.root = root + + def __str__(self): + return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ + "chassis :{} root: {}".format(self.name, self.identifier, + self.host, self.rack, self.row, + self.datacenter, self.chassis, + self.root) + + def __eq__(self, other): + return not self.name < other.name and not other.name < self.name + + def __ne__(self, other): + return self.name < other.name or other.name < self.name + + def __gt__(self, other): + return self.name > other.name + + def __ge__(self, other): + return not self.name < other.name + + def __le__(self, other): + return self.name < other.name + + +def get_osd_weight(osd_id): + """Returns the weight of the specified OSD. + + :returns: Float + :raises: ValueError if the monmap fails to parse. + :raises: CalledProcessError if our ceph command fails. + """ + try: + tree = str(subprocess + .check_output(['ceph', 'osd', 'tree', '--format=json']) + .decode('UTF-8')) + try: + json_tree = json.loads(tree) + # Make sure children are present in the json + if not json_tree['nodes']: + return None + for device in json_tree['nodes']: + if device['type'] == 'osd' and device['name'] == osd_id: + return device['crush_weight'] + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e)) + raise + + +def get_osd_tree(service): + """Returns the current osd map in JSON. + + :returns: List. + :raises: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + tree = str(subprocess + .check_output(['ceph', '--id', service, + 'osd', 'tree', '--format=json']) + .decode('UTF-8')) + try: + json_tree = json.loads(tree) + crush_list = [] + # Make sure children are present in the json + if not json_tree['nodes']: + return None + host_nodes = [ + node for node in json_tree['nodes'] + if node['type'] == 'host' + ] + for host in host_nodes: + crush_list.append( + CrushLocation( + name=host.get('name'), + identifier=host['id'], + host=host.get('host'), + rack=host.get('rack'), + row=host.get('row'), + datacenter=host.get('datacenter'), + chassis=host.get('chassis'), + root=host.get('root') + ) + ) + return crush_list + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v)) + raise + except subprocess.CalledProcessError as e: + log("ceph osd tree command failed with message: {}".format( + e)) + raise + + +def _get_child_dirs(path): + """Returns a list of directory names in the specified path. + + :param path: a full path listing of the parent directory to return child + directory names + :returns: list. A list of child directories under the parent directory + :raises: ValueError if the specified path does not exist or is not a + directory, + OSError if an error occurs reading the directory listing + """ + if not os.path.exists(path): + raise ValueError('Specfied path "%s" does not exist' % path) + if not os.path.isdir(path): + raise ValueError('Specified path "%s" is not a directory' % path) + + files_in_dir = [os.path.join(path, f) for f in os.listdir(path)] + return list(filter(os.path.isdir, files_in_dir)) + + +def _get_osd_num_from_dirname(dirname): + """Parses the dirname and returns the OSD id. + + Parses a string in the form of 'ceph-{osd#}' and returns the osd number + from the directory name. + + :param dirname: the directory name to return the OSD number from + :return int: the osd number the directory name corresponds to + :raises ValueError: if the osd number cannot be parsed from the provided + directory name. + """ + match = re.search(r'ceph-(?P\d+)', dirname) + if not match: + raise ValueError("dirname not in correct format: {}".format(dirname)) + + return match.group('osd_id') + + +def get_local_osd_ids(): + """This will list the /var/lib/ceph/osd/* directories and try + to split the ID off of the directory name and return it in + a list. + + :returns: list. A list of osd identifiers + :raises: OSError if something goes wrong with listing the directory. + """ + osd_ids = [] + osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') + if os.path.exists(osd_path): + try: + dirs = os.listdir(osd_path) + for osd_dir in dirs: + osd_id = osd_dir.split('-')[1] + if _is_int(osd_id): + osd_ids.append(osd_id) + except OSError: + raise + return osd_ids + + +def get_local_mon_ids(): + """This will list the /var/lib/ceph/mon/* directories and try + to split the ID off of the directory name and return it in + a list. + + :returns: list. A list of monitor identifiers + :raises: OSError if something goes wrong with listing the directory. + """ + mon_ids = [] + mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') + if os.path.exists(mon_path): + try: + dirs = os.listdir(mon_path) + for mon_dir in dirs: + # Basically this takes everything after ceph- as the monitor ID + match = re.search('ceph-(?P.*)', mon_dir) + if match: + mon_ids.append(match.group('mon_id')) + except OSError: + raise + return mon_ids + + +def _is_int(v): + """Return True if the object v can be turned into an integer.""" + try: + int(v) + return True + except ValueError: + return False + + +def get_version(): + """Derive Ceph release from an installed package.""" + import apt_pkg as apt + + cache = apt_cache() + package = "ceph" + try: + pkg = cache[package] + except KeyError: + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation ' \ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match(r'^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + return float(vers) + + +def error_out(msg): + log("FATAL ERROR: {}".format(msg), + level=ERROR) + sys.exit(1) + + +def is_quorum(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(str(subprocess + .check_output(cmd) + .decode('UTF-8'))) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] in QUORUM: + return True + else: + return False + else: + return False + + +def is_leader(): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "mon_status" + ] + if os.path.exists(asok): + try: + result = json.loads(str(subprocess + .check_output(cmd) + .decode('UTF-8'))) + except subprocess.CalledProcessError: + return False + except ValueError: + # Non JSON response from mon_status + return False + if result['state'] == LEADER: + return True + else: + return False + else: + return False + + +def manager_available(): + # if manager daemon isn't on this release, just say it is Fine + if cmp_pkgrevno('ceph', '11.0.0') < 0: + return True + cmd = ["sudo", "-u", "ceph", "ceph", "mgr", "dump", "-f", "json"] + try: + result = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return result['available'] + except subprocess.CalledProcessError as e: + log("'{}' failed: {}".format(" ".join(cmd), str(e))) + return False + except Exception: + return False + + +def wait_for_quorum(): + while not is_quorum(): + log("Waiting for quorum to be reached") + time.sleep(3) + + +def wait_for_manager(): + while not manager_available(): + log("Waiting for manager to be available") + time.sleep(5) + + +def add_bootstrap_hint(peer): + asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) + cmd = [ + "sudo", + "-u", + ceph_user(), + "ceph", + "--admin-daemon", + asok, + "add_bootstrap_peer_hint", + peer + ] + if os.path.exists(asok): + # Ignore any errors for this call + subprocess.call(cmd) + + +DISK_FORMATS = [ + 'xfs', + 'ext4', + 'btrfs' +] + +CEPH_PARTITIONS = [ + '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # ceph encrypted disk in creation + '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # ceph encrypted journal + '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data + '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data + '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal + '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # ceph disk in creation +] + + +def get_partition_list(dev): + """Lists the partitions of a block device. + + :param dev: Path to a block device. ex: /dev/sda + :returns: Returns a list of Partition objects. + :raises: CalledProcessException if lsblk fails + """ + partitions_list = [] + try: + partitions = get_partitions(dev) + # For each line of output + for partition in partitions: + parts = partition.split() + try: + partitions_list.append( + Partition(number=parts[0], + start=parts[1], + end=parts[2], + sectors=parts[3], + size=parts[4], + name=parts[5], + uuid=parts[6]) + ) + except IndexError: + partitions_list.append( + Partition(number=parts[0], + start=parts[1], + end=parts[2], + sectors=parts[3], + size=parts[4], + name="", + uuid=parts[5]) + ) + + return partitions_list + except subprocess.CalledProcessError: + raise + + +def is_pristine_disk(dev): + """ + Read first 2048 bytes (LBA 0 - 3) of block device to determine whether it + is actually all zeros and safe for us to use. + + Existing partitioning tools does not discern between a failure to read from + block device, failure to understand a partition table and the fact that a + block device has no partition table. Since we need to be positive about + which is which we need to read the device directly and confirm ourselves. + + :param dev: Path to block device + :type dev: str + :returns: True all 2048 bytes == 0x0, False if not + :rtype: bool + """ + want_bytes = 2048 + + try: + f = open(dev, 'rb') + except OSError as e: + log(e) + return False + + data = f.read(want_bytes) + read_bytes = len(data) + if read_bytes != want_bytes: + log('{}: short read, got {} bytes expected {}.' + .format(dev, read_bytes, want_bytes), level=WARNING) + return False + + return all(byte == 0x0 for byte in data) + + +def is_osd_disk(dev): + db = kv() + osd_devices = db.get('osd-devices', []) + if dev in osd_devices: + log('Device {} already processed by charm,' + ' skipping'.format(dev)) + return True + + partitions = get_partition_list(dev) + for partition in partitions: + try: + info = str(subprocess + .check_output(['sgdisk', '-i', partition.number, dev]) + .decode('UTF-8')) + info = info.split("\n") # IGNORE:E1103 + for line in info: + for ptype in CEPH_PARTITIONS: + sig = 'Partition GUID code: {}'.format(ptype) + if line.startswith(sig): + return True + except subprocess.CalledProcessError as e: + log("sgdisk inspection of partition {} on {} failed with " + "error: {}. Skipping".format(partition.minor, dev, e), + level=ERROR) + return False + + +def start_osds(devices): + # Scan for ceph block devices + rescan_osd_devices() + if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and + cmp_pkgrevno('ceph', '14.2.0') < 0): + # Use ceph-disk activate for directory based OSD's + for dev_or_path in devices: + if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): + subprocess.check_call( + ['ceph-disk', 'activate', dev_or_path]) + + +def udevadm_settle(): + cmd = ['udevadm', 'settle'] + subprocess.call(cmd) + + +def rescan_osd_devices(): + cmd = [ + 'udevadm', 'trigger', + '--subsystem-match=block', '--action=add' + ] + + subprocess.call(cmd) + + udevadm_settle() + + +_client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' + + +def is_bootstrapped(): + return os.path.exists( + '/var/lib/ceph/mon/ceph-{}/done'.format(socket.gethostname())) + + +def wait_for_bootstrap(): + while not is_bootstrapped(): + time.sleep(3) + + +def generate_monitor_secret(): + cmd = [ + 'ceph-authtool', + '/dev/stdout', + '--name=mon.', + '--gen-key' + ] + res = str(subprocess.check_output(cmd).decode('UTF-8')) + + return "{}==".format(res.split('=')[1].strip()) + + +# OSD caps taken from ceph-create-keys +_osd_bootstrap_caps = { + 'mon': [ + 'allow command osd create ...', + 'allow command osd crush set ...', + r'allow command auth add * osd allow\ * mon allow\ rwx', + 'allow command mon getmap' + ] +} + +_osd_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-osd' + ] +} + + +def parse_key(raw_key): + # get-or-create appears to have different output depending + # on whether its 'get' or 'create' + # 'create' just returns the key, 'get' is more verbose and + # needs parsing + key = None + if len(raw_key.splitlines()) == 1: + key = raw_key + else: + for element in raw_key.splitlines(): + if 'key' in element: + return element.split(' = ')[1].strip() # IGNORE:E1103 + return key + + +def get_osd_bootstrap_key(): + try: + # Attempt to get/create a key using the OSD bootstrap profile first + key = get_named_key('bootstrap-osd', + _osd_bootstrap_caps_profile) + except Exception: + # If that fails try with the older style permissions + key = get_named_key('bootstrap-osd', + _osd_bootstrap_caps) + return key + + +_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" + + +def import_radosgw_key(key): + if not os.path.exists(_radosgw_keyring): + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph-authtool', + _radosgw_keyring, + '--create-keyring', + '--name=client.radosgw.gateway', + '--add-key={}'.format(key) + ] + subprocess.check_call(cmd) + + +# OSD caps taken from ceph-create-keys +_radosgw_caps = { + 'mon': ['allow rw'], + 'osd': ['allow rwx'] +} +_upgrade_caps = { + 'mon': ['allow rwx'] +} + + +def get_radosgw_key(pool_list=None, name=None): + return get_named_key(name=name or 'radosgw.gateway', + caps=_radosgw_caps, + pool_list=pool_list) + + +def get_mds_key(name): + return create_named_keyring(entity='mds', + name=name, + caps=mds_caps) + + +_mds_bootstrap_caps_profile = { + 'mon': [ + 'allow profile bootstrap-mds' + ] +} + + +def get_mds_bootstrap_key(): + return get_named_key('bootstrap-mds', + _mds_bootstrap_caps_profile) + + +_default_caps = collections.OrderedDict([ + ('mon', ['allow r', + 'allow command "osd blacklist"']), + ('osd', ['allow rwx']), +]) + +admin_caps = collections.OrderedDict([ + ('mds', ['allow *']), + ('mgr', ['allow *']), + ('mon', ['allow *']), + ('osd', ['allow *']) +]) + +mds_caps = collections.OrderedDict([ + ('osd', ['allow *']), + ('mds', ['allow']), + ('mon', ['allow rwx']), +]) + +osd_upgrade_caps = collections.OrderedDict([ + ('mon', ['allow command "config-key"', + 'allow command "osd tree"', + 'allow command "config-key list"', + 'allow command "config-key put"', + 'allow command "config-key get"', + 'allow command "config-key exists"', + 'allow command "osd out"', + 'allow command "osd in"', + 'allow command "osd rm"', + 'allow command "auth del"', + ]) +]) + +rbd_mirror_caps = collections.OrderedDict([ + ('mon', ['profile rbd; allow r']), + ('osd', ['profile rbd']), + ('mgr', ['allow r']), +]) + + +def get_rbd_mirror_key(name): + return get_named_key(name=name, caps=rbd_mirror_caps) + + +def create_named_keyring(entity, name, caps=None): + caps = caps or _default_caps + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', 'get-or-create', '{entity}.{name}'.format(entity=entity, + name=name), + ] + for subsystem, subcaps in caps.items(): + cmd.extend([subsystem, '; '.join(subcaps)]) + log("Calling check_output: {}".format(cmd), level=DEBUG) + return (parse_key(str(subprocess + .check_output(cmd) + .decode('UTF-8')) + .strip())) # IGNORE:E1103 + + +def get_upgrade_key(): + return get_named_key('upgrade-osd', _upgrade_caps) + + +def get_named_key(name, caps=None, pool_list=None): + """Retrieve a specific named cephx key. + + :param name: String Name of key to get. + :param pool_list: The list of pools to give access to + :param caps: dict of cephx capabilities + :returns: Returns a cephx key + """ + key_name = 'client.{}'.format(name) + try: + # Does the key already exist? + output = str(subprocess.check_output( + [ + 'sudo', + '-u', ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', + 'get', + key_name, + ]).decode('UTF-8')).strip() + # NOTE(jamespage); + # Apply any changes to key capabilities, dealing with + # upgrades which requires new caps for operation. + upgrade_key_caps(key_name, + caps or _default_caps, + pool_list) + return parse_key(output) + except subprocess.CalledProcessError: + # Couldn't get the key, time to create it! + log("Creating new key for {}".format(name), level=DEBUG) + caps = caps or _default_caps + cmd = [ + "sudo", + "-u", + ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', 'get-or-create', key_name, + ] + # Add capabilities + for subsystem, subcaps in caps.items(): + if subsystem == 'osd': + if pool_list: + # This will output a string similar to: + # "pool=rgw pool=rbd pool=something" + pools = " ".join(['pool={0}'.format(i) for i in pool_list]) + subcaps[0] = subcaps[0] + " " + pools + cmd.extend([subsystem, '; '.join(subcaps)]) + + log("Calling check_output: {}".format(cmd), level=DEBUG) + return parse_key(str(subprocess + .check_output(cmd) + .decode('UTF-8')) + .strip()) # IGNORE:E1103 + + +def upgrade_key_caps(key, caps, pool_list=None): + """ Upgrade key to have capabilities caps """ + if not is_leader(): + # Not the MON leader OR not clustered + return + cmd = [ + "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key + ] + for subsystem, subcaps in caps.items(): + if subsystem == 'osd': + if pool_list: + # This will output a string similar to: + # "pool=rgw pool=rbd pool=something" + pools = " ".join(['pool={0}'.format(i) for i in pool_list]) + subcaps[0] = subcaps[0] + " " + pools + cmd.extend([subsystem, '; '.join(subcaps)]) + subprocess.check_call(cmd) + + +@cached +def systemd(): + return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' + + +def use_bluestore(): + """Determine whether bluestore should be used for OSD's + + :returns: whether bluestore disk format should be used + :rtype: bool""" + if cmp_pkgrevno('ceph', '12.2.0') < 0: + return False + return config('bluestore') + + +def bootstrap_monitor_cluster(secret): + """Bootstrap local ceph mon into the ceph cluster + + :param secret: cephx secret to use for monitor authentication + :type secret: str + :raises: Exception if ceph mon cannot be bootstrapped + """ + hostname = socket.gethostname() + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + done = '{}/done'.format(path) + if systemd(): + init_marker = '{}/systemd'.format(path) + else: + init_marker = '{}/upstart'.format(path) + + keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) + + if os.path.exists(done): + log('bootstrap_monitor_cluster: mon already initialized.') + else: + # Ceph >= 0.61.3 needs this for ceph-mon fs creation + mkdir('/var/run/ceph', owner=ceph_user(), + group=ceph_user(), perms=0o755) + mkdir(path, owner=ceph_user(), group=ceph_user(), + perms=0o755) + # end changes for Ceph >= 0.61.3 + try: + _create_monitor(keyring, + secret, + hostname, + path, + done, + init_marker) + except Exception: + raise + finally: + os.unlink(keyring) + + +def _create_monitor(keyring, secret, hostname, path, done, init_marker): + """Create monitor filesystem and enable and start ceph-mon process + + :param keyring: path to temporary keyring on disk + :type keyring: str + :param secret: cephx secret to use for monitor authentication + :type: secret: str + :param hostname: hostname of the local unit + :type hostname: str + :param path: full path to ceph mon directory + :type path: str + :param done: full path to 'done' marker for ceph mon + :type done: str + :param init_marker: full path to 'init' marker for ceph mon + :type init_marker: str + """ + subprocess.check_call(['ceph-authtool', keyring, + '--create-keyring', '--name=mon.', + '--add-key={}'.format(secret), + '--cap', 'mon', 'allow *']) + subprocess.check_call(['ceph-mon', '--mkfs', + '-i', hostname, + '--keyring', keyring]) + chownr('/var/log/ceph', ceph_user(), ceph_user()) + chownr(path, ceph_user(), ceph_user()) + with open(done, 'w'): + pass + with open(init_marker, 'w'): + pass + + if systemd(): + if cmp_pkgrevno('ceph', '14.0.0') >= 0: + systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) + else: + systemd_unit = 'ceph-mon' + subprocess.check_call(['systemctl', 'enable', systemd_unit]) + service_restart(systemd_unit) + else: + service_restart('ceph-mon-all') + + +def create_keyrings(): + """Create keyrings for operation of ceph-mon units + + NOTE: The quorum should be done before to execute this function. + + :raises: Exception if keyrings cannot be created + """ + if cmp_pkgrevno('ceph', '14.0.0') >= 0: + # NOTE(jamespage): At Nautilus, keys are created by the + # monitors automatically and just need + # exporting. + output = str(subprocess.check_output( + [ + 'sudo', + '-u', ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', 'get', 'client.admin', + ]).decode('UTF-8')).strip() + if not output: + # NOTE: key not yet created, raise exception and retry + raise Exception + # NOTE: octopus wants newline at end of file LP: #1864706 + output += '\n' + write_file(_client_admin_keyring, output, + owner=ceph_user(), group=ceph_user(), + perms=0o400) + else: + # NOTE(jamespage): Later ceph releases require explicit + # call to ceph-create-keys to setup the + # admin keys for the cluster; this command + # will wait for quorum in the cluster before + # returning. + # NOTE(fnordahl): Explicitly run `ceph-create-keys` for older + # ceph releases too. This improves bootstrap + # resilience as the charm will wait for + # presence of peer units before attempting + # to bootstrap. Note that charms deploying + # ceph-mon service should disable running of + # `ceph-create-keys` service in init system. + cmd = ['ceph-create-keys', '--id', socket.gethostname()] + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + # NOTE(fnordahl): The default timeout in ceph-create-keys of 600 + # seconds is not adequate. Increase timeout when + # timeout parameter available. For older releases + # we rely on retry_on_exception decorator. + # LP#1719436 + cmd.extend(['--timeout', '1800']) + subprocess.check_call(cmd) + osstat = os.stat(_client_admin_keyring) + if not osstat.st_size: + # NOTE(fnordahl): Retry will fail as long as this file exists. + # LP#1719436 + os.remove(_client_admin_keyring) + raise Exception + + +def update_monfs(): + hostname = socket.gethostname() + monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + if systemd(): + init_marker = '{}/systemd'.format(monfs) + else: + init_marker = '{}/upstart'.format(monfs) + if os.path.exists(monfs) and not os.path.exists(init_marker): + # Mark mon as managed by upstart so that + # it gets start correctly on reboots + with open(init_marker, 'w'): + pass + + +def get_partitions(dev): + cmd = ['partx', '--raw', '--noheadings', dev] + try: + out = str(subprocess.check_output(cmd).decode('UTF-8')).splitlines() + log("get partitions: {}".format(out), level=DEBUG) + return out + except subprocess.CalledProcessError as e: + log("Can't get info for {0}: {1}".format(dev, e.output)) + return [] + + +def get_lvs(dev): + """ + List logical volumes for the provided block device + + :param: dev: Full path to block device. + :raises subprocess.CalledProcessError: in the event that any supporting + operation failed. + :returns: list: List of logical volumes provided by the block device + """ + if not lvm.is_lvm_physical_volume(dev): + return [] + vg_name = lvm.list_lvm_volume_group(dev) + return lvm.list_logical_volumes('vg_name={}'.format(vg_name)) + + +def find_least_used_utility_device(utility_devices, lvs=False): + """ + Find a utility device which has the smallest number of partitions + among other devices in the supplied list. + + :utility_devices: A list of devices to be used for filestore journal + or bluestore wal or db. + :lvs: flag to indicate whether inspection should be based on LVM LV's + :return: string device name + """ + if lvs: + usages = map(lambda a: (len(get_lvs(a)), a), utility_devices) + else: + usages = map(lambda a: (len(get_partitions(a)), a), utility_devices) + least = min(usages, key=lambda t: t[0]) + return least[1] + + +def get_devices(name): + """ Merge config and juju storage based devices + + :name: THe name of the device type, eg: wal, osd, journal + :returns: Set(device names), which are strings + """ + if config(name): + devices = [dev.strip() for dev in config(name).split(' ')] + else: + devices = [] + storage_ids = storage_list(name) + devices.extend((storage_get('location', sid) for sid in storage_ids)) + devices = filter(os.path.exists, devices) + + return set(devices) + + +def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, + bluestore=False, key_manager=CEPH_KEY_MANAGER): + if dev.startswith('/dev'): + osdize_dev(dev, osd_format, osd_journal, + ignore_errors, encrypt, + bluestore, key_manager) + else: + if cmp_pkgrevno('ceph', '14.0.0') >= 0: + log("Directory backed OSDs can not be created on Nautilus", + level=WARNING) + return + osdize_dir(dev, encrypt, bluestore) + + +def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, + encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER): + """ + Prepare a block device for use as a Ceph OSD + + A block device will only be prepared once during the lifetime + of the calling charm unit; future executions will be skipped. + + :param: dev: Full path to block device to use + :param: osd_format: Format for OSD filesystem + :param: osd_journal: List of block devices to use for OSD journals + :param: ignore_errors: Don't fail in the event of any errors during + processing + :param: encrypt: Encrypt block devices using 'key_manager' + :param: bluestore: Use bluestore native ceph block device format + :param: key_manager: Key management approach for encryption keys + :raises subprocess.CalledProcessError: in the event that any supporting + subprocess operation failed + :raises ValueError: if an invalid key_manager is provided + """ + if key_manager not in KEY_MANAGERS: + raise ValueError('Unsupported key manager: {}'.format(key_manager)) + + db = kv() + osd_devices = db.get('osd-devices', []) + try: + if dev in osd_devices: + log('Device {} already processed by charm,' + ' skipping'.format(dev)) + return + + if not os.path.exists(dev): + log('Path {} does not exist - bailing'.format(dev)) + return + + if not is_block_device(dev): + log('Path {} is not a block device - bailing'.format(dev)) + return + + if is_osd_disk(dev): + log('Looks like {} is already an' + ' OSD data or journal, skipping.'.format(dev)) + if is_device_mounted(dev): + osd_devices.append(dev) + return + + if is_device_mounted(dev): + log('Looks like {} is in use, skipping.'.format(dev)) + return + + if is_active_bluestore_device(dev): + log('{} is in use as an active bluestore block device,' + ' skipping.'.format(dev)) + osd_devices.append(dev) + return + + if is_mapped_luks_device(dev): + log('{} is a mapped LUKS device,' + ' skipping.'.format(dev)) + return + + if cmp_pkgrevno('ceph', '12.2.4') >= 0: + cmd = _ceph_volume(dev, + osd_journal, + encrypt, + bluestore, + key_manager) + else: + cmd = _ceph_disk(dev, + osd_format, + osd_journal, + encrypt, + bluestore) + + try: + status_set('maintenance', 'Initializing device {}'.format(dev)) + log("osdize cmd: {}".format(cmd)) + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + try: + lsblk_output = subprocess.check_output( + ['lsblk', '-P']).decode('UTF-8') + except subprocess.CalledProcessError as e: + log("Couldn't get lsblk output: {}".format(e), ERROR) + if ignore_errors: + log('Unable to initialize device: {}'.format(dev), WARNING) + if lsblk_output: + log('lsblk output: {}'.format(lsblk_output), DEBUG) + else: + log('Unable to initialize device: {}'.format(dev), ERROR) + if lsblk_output: + log('lsblk output: {}'.format(lsblk_output), WARNING) + raise + + # NOTE: Record processing of device only on success to ensure that + # the charm only tries to initialize a device of OSD usage + # once during its lifetime. + osd_devices.append(dev) + finally: + db.set('osd-devices', osd_devices) + db.flush() + + +def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): + """ + Prepare a device for usage as a Ceph OSD using ceph-disk + + :param: dev: Full path to use for OSD block device setup, + The function looks up realpath of the device + :param: osd_journal: List of block devices to use for OSD journals + :param: encrypt: Use block device encryption (unsupported) + :param: bluestore: Use bluestore storage for OSD + :returns: list. 'ceph-disk' command and required parameters for + execution by check_call + """ + cmd = ['ceph-disk', 'prepare'] + + if encrypt: + cmd.append('--dmcrypt') + + if osd_format and not bluestore: + cmd.append('--fs-type') + cmd.append(osd_format) + + # NOTE(jamespage): enable experimental bluestore support + if use_bluestore(): + cmd.append('--bluestore') + wal = get_devices('bluestore-wal') + if wal: + cmd.append('--block.wal') + least_used_wal = find_least_used_utility_device(wal) + cmd.append(least_used_wal) + db = get_devices('bluestore-db') + if db: + cmd.append('--block.db') + least_used_db = find_least_used_utility_device(db) + cmd.append(least_used_db) + elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: + cmd.append('--filestore') + + cmd.append(os.path.realpath(dev)) + + if osd_journal: + least_used = find_least_used_utility_device(osd_journal) + cmd.append(least_used) + + return cmd + + +def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, + key_manager=CEPH_KEY_MANAGER): + """ + Prepare and activate a device for usage as a Ceph OSD using ceph-volume. + + This also includes creation of all PV's, VG's and LV's required to + support the initialization of the OSD. + + :param: dev: Full path to use for OSD block device setup + :param: osd_journal: List of block devices to use for OSD journals + :param: encrypt: Use block device encryption + :param: bluestore: Use bluestore storage for OSD + :param: key_manager: dm-crypt Key Manager to use + :raises subprocess.CalledProcessError: in the event that any supporting + LVM operation failed. + :returns: list. 'ceph-volume' command and required parameters for + execution by check_call + """ + cmd = ['ceph-volume', 'lvm', 'create'] + + osd_fsid = str(uuid.uuid4()) + cmd.append('--osd-fsid') + cmd.append(osd_fsid) + + if bluestore: + cmd.append('--bluestore') + main_device_type = 'block' + else: + cmd.append('--filestore') + main_device_type = 'data' + + if encrypt and key_manager == CEPH_KEY_MANAGER: + cmd.append('--dmcrypt') + + # On-disk journal volume creation + if not osd_journal and not bluestore: + journal_lv_type = 'journal' + cmd.append('--journal') + cmd.append(_allocate_logical_volume( + dev=dev, + lv_type=journal_lv_type, + osd_fsid=osd_fsid, + size='{}M'.format(calculate_volume_size('journal')), + encrypt=encrypt, + key_manager=key_manager) + ) + + cmd.append('--data') + cmd.append(_allocate_logical_volume(dev=dev, + lv_type=main_device_type, + osd_fsid=osd_fsid, + encrypt=encrypt, + key_manager=key_manager)) + + if bluestore: + for extra_volume in ('wal', 'db'): + devices = get_devices('bluestore-{}'.format(extra_volume)) + if devices: + cmd.append('--block.{}'.format(extra_volume)) + least_used = find_least_used_utility_device(devices, + lvs=True) + cmd.append(_allocate_logical_volume( + dev=least_used, + lv_type=extra_volume, + osd_fsid=osd_fsid, + size='{}M'.format(calculate_volume_size(extra_volume)), + shared=True, + encrypt=encrypt, + key_manager=key_manager) + ) + + elif osd_journal: + cmd.append('--journal') + least_used = find_least_used_utility_device(osd_journal, + lvs=True) + cmd.append(_allocate_logical_volume( + dev=least_used, + lv_type='journal', + osd_fsid=osd_fsid, + size='{}M'.format(calculate_volume_size('journal')), + shared=True, + encrypt=encrypt, + key_manager=key_manager) + ) + + return cmd + + +def _partition_name(dev): + """ + Derive the first partition name for a block device + + :param: dev: Full path to block device. + :returns: str: Full path to first partition on block device. + """ + if dev[-1].isdigit(): + return '{}p1'.format(dev) + else: + return '{}1'.format(dev) + + +def is_active_bluestore_device(dev): + """ + Determine whether provided device is part of an active + bluestore based OSD (as its block component). + + :param: dev: Full path to block device to check for Bluestore usage. + :returns: boolean: indicating whether device is in active use. + """ + if not lvm.is_lvm_physical_volume(dev): + return False + + vg_name = lvm.list_lvm_volume_group(dev) + try: + lv_name = lvm.list_logical_volumes('vg_name={}'.format(vg_name))[0] + except IndexError: + return False + + block_symlinks = glob.glob('/var/lib/ceph/osd/ceph-*/block') + for block_candidate in block_symlinks: + if os.path.islink(block_candidate): + target = os.readlink(block_candidate) + if target.endswith(lv_name): + return True + + return False + + +def is_luks_device(dev): + """ + Determine if dev is a LUKS-formatted block device. + + :param: dev: A full path to a block device to check for LUKS header + presence + :returns: boolean: indicates whether a device is used based on LUKS header. + """ + return True if _luks_uuid(dev) else False + + +def is_mapped_luks_device(dev): + """ + Determine if dev is a mapped LUKS device + :param: dev: A full path to a block device to be checked + :returns: boolean: indicates whether a device is mapped + """ + _, dirs, _ = next(os.walk( + '/sys/class/block/{}/holders/' + .format(os.path.basename(os.path.realpath(dev)))) + ) + is_held = len(dirs) > 0 + return is_held and is_luks_device(dev) + + +def get_conf(variable): + """ + Get the value of the given configuration variable from the + cluster. + + :param variable: ceph configuration variable + :returns: str. configured value for provided variable + + """ + return subprocess.check_output([ + 'ceph-osd', + '--show-config-value={}'.format(variable), + '--no-mon-config', + ]).strip() + + +def calculate_volume_size(lv_type): + """ + Determine the configured size for Bluestore DB/WAL or + Filestore Journal devices + + :param lv_type: volume type (db, wal or journal) + :raises KeyError: if invalid lv_type is supplied + :returns: int. Configured size in megabytes for volume type + """ + # lv_type -> ceph configuration option + _config_map = { + 'db': 'bluestore_block_db_size', + 'wal': 'bluestore_block_wal_size', + 'journal': 'osd_journal_size', + } + + # default sizes in MB + _default_size = { + 'db': 1024, + 'wal': 576, + 'journal': 1024, + } + + # conversion of ceph config units to MB + _units = { + 'db': 1048576, # Bytes -> MB + 'wal': 1048576, # Bytes -> MB + 'journal': 1, # Already in MB + } + + configured_size = get_conf(_config_map[lv_type]) + + if configured_size is None or int(configured_size) == 0: + return _default_size[lv_type] + else: + return int(configured_size) / _units[lv_type] + + +def _luks_uuid(dev): + """ + Check to see if dev is a LUKS encrypted volume, returning the UUID + of volume if it is. + + :param: dev: path to block device to check. + :returns: str. UUID of LUKS device or None if not a LUKS device + """ + try: + cmd = ['cryptsetup', 'luksUUID', dev] + return subprocess.check_output(cmd).decode('UTF-8').strip() + except subprocess.CalledProcessError: + return None + + +def _initialize_disk(dev, dev_uuid, encrypt=False, + key_manager=CEPH_KEY_MANAGER): + """ + Initialize a raw block device consuming 100% of the avaliable + disk space. + + Function assumes that block device has already been wiped. + + :param: dev: path to block device to initialize + :param: dev_uuid: UUID to use for any dm-crypt operations + :param: encrypt: Encrypt OSD devices using dm-crypt + :param: key_manager: Key management approach for dm-crypt keys + :raises: subprocess.CalledProcessError: if any parted calls fail + :returns: str: Full path to new partition. + """ + use_vaultlocker = encrypt and key_manager == VAULT_KEY_MANAGER + + if use_vaultlocker: + # NOTE(jamespage): Check to see if already initialized as a LUKS + # volume, which indicates this is a shared block + # device for journal, db or wal volumes. + luks_uuid = _luks_uuid(dev) + if luks_uuid: + return '/dev/mapper/crypt-{}'.format(luks_uuid) + + dm_crypt = '/dev/mapper/crypt-{}'.format(dev_uuid) + + if use_vaultlocker and not os.path.exists(dm_crypt): + subprocess.check_call([ + 'vaultlocker', + 'encrypt', + '--uuid', dev_uuid, + dev, + ]) + subprocess.check_call([ + 'dd', + 'if=/dev/zero', + 'of={}'.format(dm_crypt), + 'bs=512', + 'count=1', + ]) + + if use_vaultlocker: + return dm_crypt + else: + return dev + + +def _allocate_logical_volume(dev, lv_type, osd_fsid, + size=None, shared=False, + encrypt=False, + key_manager=CEPH_KEY_MANAGER): + """ + Allocate a logical volume from a block device, ensuring any + required initialization and setup of PV's and VG's to support + the LV. + + :param: dev: path to block device to allocate from. + :param: lv_type: logical volume type to create + (data, block, journal, wal, db) + :param: osd_fsid: UUID of the OSD associate with the LV + :param: size: Size in LVM format for the device; + if unset 100% of VG + :param: shared: Shared volume group (journal, wal, db) + :param: encrypt: Encrypt OSD devices using dm-crypt + :param: key_manager: dm-crypt Key Manager to use + :raises subprocess.CalledProcessError: in the event that any supporting + LVM or parted operation fails. + :returns: str: String in the format 'vg_name/lv_name'. + """ + lv_name = "osd-{}-{}".format(lv_type, osd_fsid) + current_volumes = lvm.list_logical_volumes() + if shared: + dev_uuid = str(uuid.uuid4()) + else: + dev_uuid = osd_fsid + pv_dev = _initialize_disk(dev, dev_uuid, encrypt, key_manager) + + vg_name = None + if not lvm.is_lvm_physical_volume(pv_dev): + lvm.create_lvm_physical_volume(pv_dev) + if not os.path.exists(pv_dev): + # NOTE: trigger rescan to work around bug 1878752 + rescan_osd_devices() + if shared: + vg_name = 'ceph-{}-{}'.format(lv_type, + str(uuid.uuid4())) + else: + vg_name = 'ceph-{}'.format(osd_fsid) + lvm.create_lvm_volume_group(vg_name, pv_dev) + else: + vg_name = lvm.list_lvm_volume_group(pv_dev) + + if lv_name not in current_volumes: + lvm.create_logical_volume(lv_name, vg_name, size) + + return "{}/{}".format(vg_name, lv_name) + + +def osdize_dir(path, encrypt=False, bluestore=False): + """Ask ceph-disk to prepare a directory to become an osd. + + :param path: str. The directory to osdize + :param encrypt: bool. Should the OSD directory be encrypted at rest + :returns: None + """ + + db = kv() + osd_devices = db.get('osd-devices', []) + if path in osd_devices: + log('Device {} already processed by charm,' + ' skipping'.format(path)) + return + + for t in ['upstart', 'systemd']: + if os.path.exists(os.path.join(path, t)): + log('Path {} is already used as an OSD dir - bailing'.format(path)) + return + + if cmp_pkgrevno('ceph', "0.56.6") < 0: + log('Unable to use directories for OSDs with ceph < 0.56.6', + level=ERROR) + return + + mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) + chownr('/var/lib/ceph', ceph_user(), ceph_user()) + cmd = [ + 'sudo', '-u', ceph_user(), + 'ceph-disk', + 'prepare', + '--data-dir', + path + ] + if cmp_pkgrevno('ceph', '0.60') >= 0: + if encrypt: + cmd.append('--dmcrypt') + + # NOTE(icey): enable experimental bluestore support + if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: + cmd.append('--bluestore') + elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: + cmd.append('--filestore') + log("osdize dir cmd: {}".format(cmd)) + subprocess.check_call(cmd) + + # NOTE: Record processing of device only on success to ensure that + # the charm only tries to initialize a device of OSD usage + # once during its lifetime. + osd_devices.append(path) + db.set('osd-devices', osd_devices) + db.flush() + + +def filesystem_mounted(fs): + return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 + + +def get_running_osds(): + """Returns a list of the pids of the current running OSD daemons""" + cmd = ['pgrep', 'ceph-osd'] + try: + result = str(subprocess.check_output(cmd).decode('UTF-8')) + return result.split() + except subprocess.CalledProcessError: + return [] + + +def get_cephfs(service): + """List the Ceph Filesystems that exist. + + :param service: The service name to run the ceph command under + :returns: list. Returns a list of the ceph filesystems + """ + if get_version() < 0.86: + # This command wasn't introduced until 0.86 ceph + return [] + try: + output = str(subprocess + .check_output(["ceph", '--id', service, "fs", "ls"]) + .decode('UTF-8')) + if not output: + return [] + """ + Example subprocess output: + 'name: ip-172-31-23-165, metadata pool: ip-172-31-23-165_metadata, + data pools: [ip-172-31-23-165_data ]\n' + output: filesystems: ['ip-172-31-23-165'] + """ + filesystems = [] + for line in output.splitlines(): + parts = line.split(',') + for part in parts: + if "name" in part: + filesystems.append(part.split(' ')[1]) + except subprocess.CalledProcessError: + return [] + + +def wait_for_all_monitors_to_upgrade(new_version, upgrade_key): + """Fairly self explanatory name. This function will wait + for all monitors in the cluster to upgrade or it will + return after a timeout period has expired. + + :param new_version: str of the version to watch + :param upgrade_key: the cephx key name to use + """ + done = False + start_time = time.time() + monitor_list = [] + + mon_map = get_mon_map('admin') + if mon_map['monmap']['mons']: + for mon in mon_map['monmap']['mons']: + monitor_list.append(mon['name']) + while not done: + try: + done = all(monitor_key_exists(upgrade_key, "{}_{}_{}_done".format( + "mon", mon, new_version + )) for mon in monitor_list) + current_time = time.time() + if current_time > (start_time + 10 * 60): + raise Exception + else: + # Wait 30 seconds and test again if all monitors are upgraded + time.sleep(30) + except subprocess.CalledProcessError: + raise + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +def roll_monitor_cluster(new_version, upgrade_key): + """This is tricky to get right so here's what we're going to do. + + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous monitor is upgraded yet. + + :param new_version: str of the version to upgrade to + :param upgrade_key: the cephx key name to use when upgrading + """ + log('roll_monitor_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + monitor_list = [] + mon_map = get_mon_map('admin') + if mon_map['monmap']['mons']: + for mon in mon_map['monmap']['mons']: + monitor_list.append(mon['name']) + else: + status_set('blocked', 'Unable to get monitor cluster information') + sys.exit(1) + log('monitor_list: {}'.format(monitor_list)) + + # A sorted list of osd unit names + mon_sorted_list = sorted(monitor_list) + + try: + position = mon_sorted_list.index(my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(upgrade_key=upgrade_key, + service='mon', + my_name=my_name, + version=new_version) + else: + # Check if the previous node has finished + status_set('waiting', + 'Waiting on {} to finish upgrading'.format( + mon_sorted_list[position - 1])) + wait_on_previous_node(upgrade_key=upgrade_key, + service='mon', + previous_node=mon_sorted_list[position - 1], + version=new_version) + lock_and_roll(upgrade_key=upgrade_key, + service='mon', + my_name=my_name, + version=new_version) + # NOTE(jamespage): + # Wait until all monitors have upgraded before bootstrapping + # the ceph-mgr daemons due to use of new mgr keyring profiles + if new_version == 'luminous': + wait_for_all_monitors_to_upgrade(new_version=new_version, + upgrade_key=upgrade_key) + bootstrap_manager() + except ValueError: + log("Failed to find {} in list {}.".format( + my_name, mon_sorted_list)) + status_set('blocked', 'failed to upgrade monitor') + + +# For E731 we can't assign a lambda, therefore, instead pass this. +def noop(): + pass + + +def upgrade_monitor(new_version, kick_function=None): + """Upgrade the current ceph monitor to the new version + + :param new_version: String version to upgrade to. + """ + if kick_function is None: + kick_function = noop + current_version = get_version() + status_set("maintenance", "Upgrading monitor") + log("Current ceph version is {}".format(current_version)) + log("Upgrading to: {}".format(new_version)) + + # Needed to determine if whether to stop/start ceph-mgr + luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0 + + kick_function() + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph source failed with message: {}".format( + err)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + kick_function() + try: + if systemd(): + service_stop('ceph-mon') + log("restarting ceph-mgr.target maybe: {}" + .format(luminous_or_later)) + if luminous_or_later: + service_stop('ceph-mgr.target') + else: + service_stop('ceph-mon-all') + apt_install(packages=determine_packages(), fatal=True) + kick_function() + + owner = ceph_user() + + # Ensure the files and directories under /var/lib/ceph is chowned + # properly as part of the move to the Jewel release, which moved the + # ceph daemons to running as ceph:ceph instead of root:root. + if new_version == 'jewel': + # Ensure the ownership of Ceph's directories is correct + chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), + owner=owner, + group=owner, + follow_links=True) + + kick_function() + + # Ensure that mon directory is user writable + hostname = socket.gethostname() + path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) + mkdir(path, owner=ceph_user(), group=ceph_user(), + perms=0o755) + + if systemd(): + service_restart('ceph-mon') + log("starting ceph-mgr.target maybe: {}".format(luminous_or_later)) + if luminous_or_later: + # due to BUG: #1849874 we have to force a restart to get it to + # drop the previous version of ceph-manager and start the new + # one. + service_restart('ceph-mgr.target') + else: + service_start('ceph-mon-all') + except subprocess.CalledProcessError as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + +def lock_and_roll(upgrade_key, service, my_name, version): + """Create a lock on the ceph monitor cluster and upgrade. + + :param upgrade_key: str. The cephx key to use + :param service: str. The cephx id to use + :param my_name: str. The current hostname + :param version: str. The version we are upgrading to + """ + start_timestamp = time.time() + + log('monitor_key_set {}_{}_{}_start {}'.format( + service, + my_name, + version, + start_timestamp)) + monitor_key_set(upgrade_key, "{}_{}_{}_start".format( + service, my_name, version), start_timestamp) + + # alive indication: + alive_function = ( + lambda: monitor_key_set( + upgrade_key, "{}_{}_{}_alive" + .format(service, my_name, version), time.time())) + dog = WatchDog(kick_interval=3 * 60, + kick_function=alive_function) + + log("Rolling") + + # This should be quick + if service == 'osd': + upgrade_osd(version, kick_function=dog.kick_the_dog) + elif service == 'mon': + upgrade_monitor(version, kick_function=dog.kick_the_dog) + else: + log("Unknown service {}. Unable to upgrade".format(service), + level=ERROR) + log("Done") + + stop_timestamp = time.time() + # Set a key to inform others I am finished + log('monitor_key_set {}_{}_{}_done {}'.format(service, + my_name, + version, + stop_timestamp)) + status_set('maintenance', 'Finishing upgrade') + monitor_key_set(upgrade_key, "{}_{}_{}_done".format(service, + my_name, + version), + stop_timestamp) + + +def wait_on_previous_node(upgrade_key, service, previous_node, version): + """A lock that sleeps the current thread while waiting for the previous + node to finish upgrading. + + :param upgrade_key: + :param service: str. the cephx id to use + :param previous_node: str. The name of the previous node to wait on + :param version: str. The version we are upgrading to + :returns: None + """ + log("Previous node is: {}".format(previous_node)) + + previous_node_started_f = ( + lambda: monitor_key_exists( + upgrade_key, + "{}_{}_{}_start".format(service, previous_node, version))) + previous_node_finished_f = ( + lambda: monitor_key_exists( + upgrade_key, + "{}_{}_{}_done".format(service, previous_node, version))) + previous_node_alive_time_f = ( + lambda: monitor_key_get( + upgrade_key, + "{}_{}_{}_alive".format(service, previous_node, version))) + + # wait for 30 minutes until the previous node starts. We don't proceed + # unless we get a start condition. + try: + WatchDog.wait_until(previous_node_started_f, timeout=30 * 60) + except WatchDog.WatchDogTimeoutException: + log("Waited for previous node to start for 30 minutes. " + "It didn't start, so may have a serious issue. Continuing with " + "upgrade of this node.", + level=WARNING) + return + + # keep the time it started from this nodes' perspective. + previous_node_started_at = time.time() + log("Detected that previous node {} has started. Time now: {}" + .format(previous_node, previous_node_started_at)) + + # Now wait for the node to complete. The node may optionally be kicking + # with the *_alive key, which allows this node to wait longer as it 'knows' + # the other node is proceeding. + try: + WatchDog.timed_wait(kicked_at_function=previous_node_alive_time_f, + complete_function=previous_node_finished_f, + wait_time=30 * 60, + compatibility_wait_time=10 * 60, + max_kick_interval=5 * 60) + except WatchDog.WatchDogDeadException: + # previous node was kicking, but timed out; log this condition and move + # on. + now = time.time() + waited = int((now - previous_node_started_at) / 60) + log("Previous node started, but has now not ticked for 5 minutes. " + "Waited total of {} mins on node {}. current time: {} > " + "previous node start time: {}. " + "Continuing with upgrade of this node." + .format(waited, previous_node, now, previous_node_started_at), + level=WARNING) + except WatchDog.WatchDogTimeoutException: + # previous node never kicked, or simply took too long; log this + # condition and move on. + now = time.time() + waited = int((now - previous_node_started_at) / 60) + log("Previous node is taking too long; assuming it has died." + "Waited {} mins on node {}. current time: {} > " + "previous node start time: {}. " + "Continuing with upgrade of this node." + .format(waited, previous_node, now, previous_node_started_at), + level=WARNING) + + +class WatchDog(object): + """Watch a dog; basically a kickable timer with a timeout between two async + units. + + The idea is that you have an overall timeout and then can kick that timeout + with intermediary hits, with a max time between those kicks allowed. + + Note that this watchdog doesn't rely on the clock of the other side; just + roughly when it detects when the other side started. All timings are based + on the local clock. + + The kicker will not 'kick' more often than a set interval, regardless of + how often the kick_the_dog() function is called. The kicker provides a + function (lambda: -> None) that is called when the kick interval is + reached. + + The waiter calls the static method with a check function + (lambda: -> Boolean) that indicates when the wait should be over and the + maximum interval to wait. e.g. 30 minutes with a 5 minute kick interval. + + So the waiter calls wait(f, 30, 3) and the kicker sets up a 3 minute kick + interval, or however long it is expected for the key to propagate and to + allow for other delays. + + There is a compatibility mode where if the otherside never kicks, then it + simply waits for the compatability timer. + """ + + class WatchDogDeadException(Exception): + pass + + class WatchDogTimeoutException(Exception): + pass + + def __init__(self, kick_interval=3 * 60, kick_function=None): + """Initialise a new WatchDog + + :param kick_interval: the interval when this side kicks the other in + seconds. + :type kick_interval: Int + :param kick_function: The function to call that does the kick. + :type kick_function: Callable[] + """ + self.start_time = time.time() + self.last_run_func = None + self.last_kick_at = None + self.kick_interval = kick_interval + self.kick_f = kick_function + + def kick_the_dog(self): + """Might call the kick_function if it's time. + + This function can be called as frequently as needed, but will run the + self.kick_function after kick_interval seconds have passed. + """ + now = time.time() + if (self.last_run_func is None or + (now - self.last_run_func > self.kick_interval)): + if self.kick_f is not None: + self.kick_f() + self.last_run_func = now + self.last_kick_at = now + + @staticmethod + def wait_until(wait_f, timeout=10 * 60): + """Wait for timeout seconds until the passed function return True. + + :param wait_f: The function to call that will end the wait. + :type wait_f: Callable[[], Boolean] + :param timeout: The time to wait in seconds. + :type timeout: int + """ + start_time = time.time() + while(not wait_f()): + now = time.time() + if now > start_time + timeout: + raise WatchDog.WatchDogTimeoutException() + wait_time = random.randrange(5, 30) + log('wait_until: waiting for {} seconds'.format(wait_time)) + time.sleep(wait_time) + + @staticmethod + def timed_wait(kicked_at_function, + complete_function, + wait_time=30 * 60, + compatibility_wait_time=10 * 60, + max_kick_interval=5 * 60): + """Wait a maximum time with an intermediate 'kick' time. + + This function will wait for max_kick_interval seconds unless the + kicked_at_function() call returns a time that is not older that + max_kick_interval (in seconds). i.e. the other side can signal that it + is still doing things during the max_kick_interval as long as it kicks + at least every max_kick_interval seconds. + + The maximum wait is "wait_time", but the otherside must keep kicking + during this period. + + The "compatibility_wait_time" is used if the other side never kicks + (i.e. the kicked_at_function() always returns None. In this case the + function wait up to "compatibility_wait_time". + + Note that the type of the return from the kicked_at_function is an + Optional[str], not a Float. The function will coerce this to a float + for the comparison. This represents the return value of + time.time() at the "other side". It's a string to simplify the + function obtaining the time value from the other side. + + The function raises WatchDogTimeoutException if either the + compatibility_wait_time or the wait_time are exceeded. + + The function raises WatchDogDeadException if the max_kick_interval is + exceeded. + + Note that it is possible that the first kick interval is extended to + compatibility_wait_time if the "other side" doesn't kick immediately. + The best solution is for the other side to kick early and often. + + :param kicked_at_function: The function to call to retrieve the time + that the other side 'kicked' at. None if the other side hasn't + kicked. + :type kicked_at_function: Callable[[], Optional[str]] + :param complete_function: The callable that returns True when done. + :type complete_function: Callable[[], Boolean] + :param wait_time: the maximum time to wait, even with kicks, in + seconds. + :type wait_time: int + :param compatibility_wait_time: The time to wait if no kicks are + received, in seconds. + :type compatibility_wait_time: int + :param max_kick_interval: The maximum time allowed between kicks before + the wait is over, in seconds: + :type max_kick_interval: int + :raises: WatchDog.WatchDogTimeoutException, + WatchDog.WatchDogDeadException + """ + start_time = time.time() + while True: + if complete_function(): + break + # the time when the waiting for unit last kicked. + kicked_at = kicked_at_function() + now = time.time() + if kicked_at is None: + # assume other end doesn't do alive kicks + if (now - start_time > compatibility_wait_time): + raise WatchDog.WatchDogTimeoutException() + else: + # other side is participating in kicks; must kick at least + # every 'max_kick_interval' to stay alive. + if (now - float(kicked_at) > max_kick_interval): + raise WatchDog.WatchDogDeadException() + if (now - start_time > wait_time): + raise WatchDog.WatchDogTimeoutException() + delay_time = random.randrange(5, 30) + log('waiting for {} seconds'.format(delay_time)) + time.sleep(delay_time) + + +def get_upgrade_position(osd_sorted_list, match_name): + """Return the upgrade position for the given osd. + + :param osd_sorted_list: Osds sorted + :type osd_sorted_list: [str] + :param match_name: The osd name to match + :type match_name: str + :returns: The position of the name + :rtype: int + :raises: ValueError if name is not found + """ + for index, item in enumerate(osd_sorted_list): + if item.name == match_name: + return index + raise ValueError("osd name '{}' not found in get_upgrade_position list" + .format(match_name)) + + +# Edge cases: +# 1. Previous node dies on upgrade, can we retry? +# 2. This assumes that the osd failure domain is not set to osd. +# It rolls an entire server at a time. +def roll_osd_cluster(new_version, upgrade_key): + """This is tricky to get right so here's what we're going to do. + + There's 2 possible cases: Either I'm first in line or not. + If I'm not first in line I'll wait a random time between 5-30 seconds + and test to see if the previous osd is upgraded yet. + + TODO: If you're not in the same failure domain it's safe to upgrade + 1. Examine all pools and adopt the most strict failure domain policy + Example: Pool 1: Failure domain = rack + Pool 2: Failure domain = host + Pool 3: Failure domain = row + + outcome: Failure domain = host + + :param new_version: str of the version to upgrade to + :param upgrade_key: the cephx key name to use when upgrading + """ + log('roll_osd_cluster called with {}'.format(new_version)) + my_name = socket.gethostname() + osd_tree = get_osd_tree(service=upgrade_key) + # A sorted list of osd unit names + osd_sorted_list = sorted(osd_tree) + log("osd_sorted_list: {}".format(osd_sorted_list)) + + try: + position = get_upgrade_position(osd_sorted_list, my_name) + log("upgrade position: {}".format(position)) + if position == 0: + # I'm first! Roll + # First set a key to inform others I'm about to roll + lock_and_roll(upgrade_key=upgrade_key, + service='osd', + my_name=my_name, + version=new_version) + else: + # Check if the previous node has finished + status_set('waiting', + 'Waiting on {} to finish upgrading'.format( + osd_sorted_list[position - 1].name)) + wait_on_previous_node( + upgrade_key=upgrade_key, + service='osd', + previous_node=osd_sorted_list[position - 1].name, + version=new_version) + lock_and_roll(upgrade_key=upgrade_key, + service='osd', + my_name=my_name, + version=new_version) + except ValueError: + log("Failed to find name {} in list {}".format( + my_name, osd_sorted_list)) + status_set('blocked', 'failed to upgrade osd') + + +def upgrade_osd(new_version, kick_function=None): + """Upgrades the current osd + + :param new_version: str. The new version to upgrade to + """ + if kick_function is None: + kick_function = noop + + current_version = get_version() + status_set("maintenance", "Upgrading osd") + log("Current ceph version is {}".format(current_version)) + log("Upgrading to: {}".format(new_version)) + + try: + add_source(config('source'), config('key')) + apt_update(fatal=True) + except subprocess.CalledProcessError as err: + log("Adding the ceph sources failed with message: {}".format( + err)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + kick_function() + + try: + # Upgrade the packages before restarting the daemons. + status_set('maintenance', 'Upgrading packages to %s' % new_version) + apt_install(packages=determine_packages(), fatal=True) + kick_function() + + # If the upgrade does not need an ownership update of any of the + # directories in the osd service directory, then simply restart + # all of the OSDs at the same time as this will be the fastest + # way to update the code on the node. + if not dirs_need_ownership_update('osd'): + log('Restarting all OSDs to load new binaries', DEBUG) + with maintain_all_osd_states(): + if systemd(): + service_restart('ceph-osd.target') + else: + service_restart('ceph-osd-all') + return + + # Need to change the ownership of all directories which are not OSD + # directories as well. + # TODO - this should probably be moved to the general upgrade function + # and done before mon/osd. + update_owner(CEPH_BASE_DIR, recurse_dirs=False) + non_osd_dirs = filter(lambda x: not x == 'osd', + os.listdir(CEPH_BASE_DIR)) + non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x), + non_osd_dirs) + for i, path in enumerate(non_osd_dirs): + if i % 100 == 0: + kick_function() + update_owner(path) + + # Fast service restart wasn't an option because each of the OSD + # directories need the ownership updated for all the files on + # the OSD. Walk through the OSDs one-by-one upgrading the OSD. + for osd_dir in _get_child_dirs(OSD_BASE_DIR): + kick_function() + try: + osd_num = _get_osd_num_from_dirname(osd_dir) + _upgrade_single_osd(osd_num, osd_dir) + except ValueError as ex: + # Directory could not be parsed - junk directory? + log('Could not parse osd directory %s: %s' % (osd_dir, ex), + WARNING) + continue + + except (subprocess.CalledProcessError, IOError) as err: + log("Stopping ceph and upgrading packages failed " + "with message: {}".format(err)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + +def _upgrade_single_osd(osd_num, osd_dir): + """Upgrades the single OSD directory. + + :param osd_num: the num of the OSD + :param osd_dir: the directory of the OSD to upgrade + :raises CalledProcessError: if an error occurs in a command issued as part + of the upgrade process + :raises IOError: if an error occurs reading/writing to a file as part + of the upgrade process + """ + with maintain_osd_state(osd_num): + stop_osd(osd_num) + disable_osd(osd_num) + update_owner(osd_dir) + enable_osd(osd_num) + start_osd(osd_num) + + +def stop_osd(osd_num): + """Stops the specified OSD number. + + :param osd_num: the osd number to stop + """ + if systemd(): + service_stop('ceph-osd@{}'.format(osd_num)) + else: + service_stop('ceph-osd', id=osd_num) + + +def start_osd(osd_num): + """Starts the specified OSD number. + + :param osd_num: the osd number to start. + """ + if systemd(): + service_start('ceph-osd@{}'.format(osd_num)) + else: + service_start('ceph-osd', id=osd_num) + + +def disable_osd(osd_num): + """Disables the specified OSD number. + + Ensures that the specified osd will not be automatically started at the + next reboot of the system. Due to differences between init systems, + this method cannot make any guarantees that the specified osd cannot be + started manually. + + :param osd_num: the osd id which should be disabled. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + to disable the OSD + :raises IOError, OSError: if the attempt to read/remove the ready file in + an upstart enabled system fails + """ + if systemd(): + # When running under systemd, the individual ceph-osd daemons run as + # templated units and can be directly addressed by referring to the + # templated service name ceph-osd@. Additionally, systemd + # allows one to disable a specific templated unit by running the + # 'systemctl disable ceph-osd@' command. When disabled, the + # OSD should remain disabled until re-enabled via systemd. + # Note: disabling an already disabled service in systemd returns 0, so + # no need to check whether it is enabled or not. + cmd = ['systemctl', 'disable', 'ceph-osd@{}'.format(osd_num)] + subprocess.check_call(cmd) + else: + # Neither upstart nor the ceph-osd upstart script provides for + # disabling the starting of an OSD automatically. The specific OSD + # cannot be prevented from running manually, however it can be + # prevented from running automatically on reboot by removing the + # 'ready' file in the OSD's root directory. This is due to the + # ceph-osd-all upstart script checking for the presence of this file + # before starting the OSD. + ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), + 'ready') + if os.path.exists(ready_file): + os.unlink(ready_file) + + +def enable_osd(osd_num): + """Enables the specified OSD number. + + Ensures that the specified osd_num will be enabled and ready to start + automatically in the event of a reboot. + + :param osd_num: the osd id which should be enabled. + :raises CalledProcessError: if the call to the systemd command issued + fails when enabling the service + :raises IOError: if the attempt to write the ready file in an usptart + enabled system fails + """ + if systemd(): + cmd = ['systemctl', 'enable', 'ceph-osd@{}'.format(osd_num)] + subprocess.check_call(cmd) + else: + # When running on upstart, the OSDs are started via the ceph-osd-all + # upstart script which will only start the osd if it has a 'ready' + # file. Make sure that file exists. + ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), + 'ready') + with open(ready_file, 'w') as f: + f.write('ready') + + # Make sure the correct user owns the file. It shouldn't be necessary + # as the upstart script should run with root privileges, but its better + # to have all the files matching ownership. + update_owner(ready_file) + + +def update_owner(path, recurse_dirs=True): + """Changes the ownership of the specified path. + + Changes the ownership of the specified path to the new ceph daemon user + using the system's native chown functionality. This may take awhile, + so this method will issue a set_status for any changes of ownership which + recurses into directory structures. + + :param path: the path to recursively change ownership for + :param recurse_dirs: boolean indicating whether to recursively change the + ownership of all the files in a path's subtree or to + simply change the ownership of the path. + :raises CalledProcessError: if an error occurs issuing the chown system + command + """ + user = ceph_user() + user_group = '{ceph_user}:{ceph_user}'.format(ceph_user=user) + cmd = ['chown', user_group, path] + if os.path.isdir(path) and recurse_dirs: + status_set('maintenance', ('Updating ownership of %s to %s' % + (path, user))) + cmd.insert(1, '-R') + + log('Changing ownership of {path} to {user}'.format( + path=path, user=user_group), DEBUG) + start = datetime.now() + subprocess.check_call(cmd) + elapsed_time = (datetime.now() - start) + + log('Took {secs} seconds to change the ownership of path: {path}'.format( + secs=elapsed_time.total_seconds(), path=path), DEBUG) + + +def get_osd_state(osd_num, osd_goal_state=None): + """Get OSD state or loop until OSD state matches OSD goal state. + + If osd_goal_state is None, just return the current OSD state. + If osd_goal_state is not None, loop until the current OSD state matches + the OSD goal state. + + :param osd_num: the osd id to get state for + :param osd_goal_state: (Optional) string indicating state to wait for + Defaults to None + :returns: Returns a str, the OSD state. + :rtype: str + """ + while True: + asok = "/var/run/ceph/ceph-osd.{}.asok".format(osd_num) + cmd = [ + 'ceph', + 'daemon', + asok, + 'status' + ] + try: + result = json.loads(str(subprocess + .check_output(cmd) + .decode('UTF-8'))) + except (subprocess.CalledProcessError, ValueError) as e: + log("{}".format(e), level=DEBUG) + continue + osd_state = result['state'] + log("OSD {} state: {}, goal state: {}".format( + osd_num, osd_state, osd_goal_state), level=DEBUG) + if not osd_goal_state: + return osd_state + if osd_state == osd_goal_state: + return osd_state + time.sleep(3) + + +def get_all_osd_states(osd_goal_states=None): + """Get all OSD states or loop until all OSD states match OSD goal states. + + If osd_goal_states is None, just return a dictionary of current OSD states. + If osd_goal_states is not None, loop until the current OSD states match + the OSD goal states. + + :param osd_goal_states: (Optional) dict indicating states to wait for + Defaults to None + :returns: Returns a dictionary of current OSD states. + :rtype: dict + """ + osd_states = {} + for osd_num in get_local_osd_ids(): + if not osd_goal_states: + osd_states[osd_num] = get_osd_state(osd_num) + else: + osd_states[osd_num] = get_osd_state( + osd_num, + osd_goal_state=osd_goal_states[osd_num]) + return osd_states + + +@contextmanager +def maintain_osd_state(osd_num): + """Ensure the state of an OSD is maintained. + + Ensures the state of an OSD is the same at the end of a block nested + in a with statement as it was at the beginning of the block. + + :param osd_num: the osd id to maintain state for + """ + osd_state = get_osd_state(osd_num) + try: + yield + finally: + get_osd_state(osd_num, osd_goal_state=osd_state) + + +@contextmanager +def maintain_all_osd_states(): + """Ensure all local OSD states are maintained. + + Ensures the states of all local OSDs are the same at the end of a + block nested in a with statement as they were at the beginning of + the block. + """ + osd_states = get_all_osd_states() + try: + yield + finally: + get_all_osd_states(osd_goal_states=osd_states) + + +def list_pools(client='admin'): + """This will list the current pools that Ceph has + + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Returns a list of available pools. + :rtype: list + :raises: subprocess.CalledProcessError if the subprocess fails to run. + """ + try: + pool_list = [] + pools = subprocess.check_output(['rados', '--id', client, 'lspools'], + universal_newlines=True, + stderr=subprocess.STDOUT) + for pool in pools.splitlines(): + pool_list.append(pool) + return pool_list + except subprocess.CalledProcessError as err: + log("rados lspools failed with error: {}".format(err.output)) + raise + + +def get_pool_param(pool, param, client='admin'): + """Get parameter from pool. + + :param pool: Name of pool to get variable from + :type pool: str + :param param: Name of variable to get + :type param: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Value of variable on pool or None + :rtype: str or None + :raises: subprocess.CalledProcessError + """ + try: + output = subprocess.check_output( + ['ceph', '--id', client, 'osd', 'pool', 'get', pool, param], + universal_newlines=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as cp: + if cp.returncode == 2 and 'ENOENT: option' in cp.output: + return None + raise + if ':' in output: + return output.split(':')[1].lstrip().rstrip() + + +def get_pool_erasure_profile(pool, client='admin'): + """Get erasure code profile for pool. + + :param pool: Name of pool to get variable from + :type pool: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Erasure code profile of pool or None + :rtype: str or None + :raises: subprocess.CalledProcessError + """ + try: + return get_pool_param(pool, 'erasure_code_profile', client=client) + except subprocess.CalledProcessError as cp: + if cp.returncode == 13 and 'EACCES: pool' in cp.output: + # Not a Erasure coded pool + return None + raise + + +def get_pool_quota(pool, client='admin'): + """Get pool quota. + + :param pool: Name of pool to get variable from + :type pool: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Dictionary with quota variables + :rtype: dict + :raises: subprocess.CalledProcessError + """ + output = subprocess.check_output( + ['ceph', '--id', client, 'osd', 'pool', 'get-quota', pool], + universal_newlines=True, stderr=subprocess.STDOUT) + rc = re.compile(r'\s+max\s+(\S+)\s*:\s+(\d+)') + result = {} + for line in output.splitlines(): + m = rc.match(line) + if m: + result.update({'max_{}'.format(m.group(1)): m.group(2)}) + return result + + +def get_pool_applications(pool='', client='admin'): + """Get pool applications. + + :param pool: (Optional) Name of pool to get applications for + Defaults to get for all pools + :type pool: str + :param client: (Optional) client id for ceph key to use + Defaults to ``admin`` + :type cilent: str + :returns: Dictionary with pool name as key + :rtype: dict + :raises: subprocess.CalledProcessError + """ + + cmd = ['ceph', '--id', client, 'osd', 'pool', 'application', 'get'] + if pool: + cmd.append(pool) + try: + output = subprocess.check_output(cmd, + universal_newlines=True, + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as cp: + if cp.returncode == 2 and 'ENOENT' in cp.output: + return {} + raise + return json.loads(output) + + +def list_pools_detail(): + """Get detailed information about pools. + + Structure: + {'pool_name_1': {'applications': {'application': {}}, + 'parameters': {'pg_num': '42', 'size': '42'}, + 'quota': {'max_bytes': '1000', + 'max_objects': '10'}, + }, + 'pool_name_2': ... + } + + :returns: Dictionary with detailed pool information. + :rtype: dict + :raises: subproces.CalledProcessError + """ + get_params = ['pg_num', 'size'] + result = {} + applications = get_pool_applications() + for pool in list_pools(): + result[pool] = { + 'applications': applications.get(pool, {}), + 'parameters': {}, + 'quota': get_pool_quota(pool), + } + for param in get_params: + result[pool]['parameters'].update({ + param: get_pool_param(pool, param)}) + erasure_profile = get_pool_erasure_profile(pool) + if erasure_profile: + result[pool]['parameters'].update({ + 'erasure_code_profile': erasure_profile}) + return result + + +def dirs_need_ownership_update(service): + """Determines if directories still need change of ownership. + + Examines the set of directories under the /var/lib/ceph/{service} directory + and determines if they have the correct ownership or not. This is + necessary due to the upgrade from Hammer to Jewel where the daemon user + changes from root: to ceph:. + + :param service: the name of the service folder to check (e.g. osd, mon) + :returns: boolean. True if the directories need a change of ownership, + False otherwise. + :raises IOError: if an error occurs reading the file stats from one of + the child directories. + :raises OSError: if the specified path does not exist or some other error + """ + expected_owner = expected_group = ceph_user() + path = os.path.join(CEPH_BASE_DIR, service) + for child in _get_child_dirs(path): + curr_owner, curr_group = owner(child) + + if (curr_owner == expected_owner) and (curr_group == expected_group): + continue + + # NOTE(lathiat): when config_changed runs on reboot, the OSD might not + # yet be mounted or started, and the underlying directory the OSD is + # mounted to is expected to be owned by root. So skip the check. This + # may also happen for OSD directories for OSDs that were removed. + if (service == 'osd' and + not os.path.exists(os.path.join(child, 'magic'))): + continue + + log('Directory "%s" needs its ownership updated' % child, DEBUG) + return True + + # All child directories had the expected ownership + return False + + +# A dict of valid ceph upgrade paths. Mapping is old -> new +UPGRADE_PATHS = collections.OrderedDict([ + ('firefly', 'hammer'), + ('hammer', 'jewel'), + ('jewel', 'luminous'), + ('luminous', 'mimic'), + ('mimic', 'nautilus'), + ('nautilus', 'octopus'), +]) + +# Map UCA codenames to ceph codenames +UCA_CODENAME_MAP = { + 'icehouse': 'firefly', + 'juno': 'firefly', + 'kilo': 'hammer', + 'liberty': 'hammer', + 'mitaka': 'jewel', + 'newton': 'jewel', + 'ocata': 'jewel', + 'pike': 'luminous', + 'queens': 'luminous', + 'rocky': 'mimic', + 'stein': 'mimic', + 'train': 'nautilus', + 'ussuri': 'octopus', +} + + +def pretty_print_upgrade_paths(): + """Pretty print supported upgrade paths for ceph""" + return ["{} -> {}".format(key, value) + for key, value in UPGRADE_PATHS.items()] + + +def resolve_ceph_version(source): + """Resolves a version of ceph based on source configuration + based on Ubuntu Cloud Archive pockets. + + @param: source: source configuration option of charm + :returns: ceph release codename or None if not resolvable + """ + os_release = get_os_codename_install_source(source) + return UCA_CODENAME_MAP.get(os_release) + + +def get_ceph_pg_stat(): + """Returns the result of ceph pg stat. + + :returns: dict + """ + try: + tree = str(subprocess + .check_output(['ceph', 'pg', 'stat', '--format=json']) + .decode('UTF-8')) + try: + json_tree = json.loads(tree) + if not json_tree['num_pg_by_state']: + return None + return json_tree + except ValueError as v: + log("Unable to parse ceph pg stat json: {}. Error: {}".format( + tree, v)) + raise + except subprocess.CalledProcessError as e: + log("ceph pg stat command failed with message: {}".format(e)) + raise + + +def get_ceph_health(): + """Returns the health of the cluster from a 'ceph status' + + :returns: dict tree of ceph status + :raises: CalledProcessError if our ceph command fails to get the overall + status, use get_ceph_health()['overall_status']. + """ + try: + tree = str(subprocess + .check_output(['ceph', 'status', '--format=json']) + .decode('UTF-8')) + try: + json_tree = json.loads(tree) + # Make sure children are present in the json + if not json_tree['overall_status']: + return None + + return json_tree + except ValueError as v: + log("Unable to parse ceph tree json: {}. Error: {}".format( + tree, v)) + raise + except subprocess.CalledProcessError as e: + log("ceph status command failed with message: {}".format(e)) + raise + + +def reweight_osd(osd_num, new_weight): + """Changes the crush weight of an OSD to the value specified. + + :param osd_num: the osd id which should be changed + :param new_weight: the new weight for the OSD + :returns: bool. True if output looks right, else false. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + """ + try: + cmd_result = str(subprocess + .check_output(['ceph', 'osd', 'crush', + 'reweight', "osd.{}".format(osd_num), + new_weight], + stderr=subprocess.STDOUT) + .decode('UTF-8')) + expected_result = "reweighted item id {ID} name \'osd.{ID}\'".format( + ID=osd_num) + " to {}".format(new_weight) + log(cmd_result) + if expected_result in cmd_result: + return True + return False + except subprocess.CalledProcessError as e: + log("ceph osd crush reweight command failed" + " with message: {}".format(e)) + raise + + +def determine_packages(): + """Determines packages for installation. + + :returns: list of ceph packages + """ + packages = PACKAGES.copy() + if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': + btrfs_package = 'btrfs-progs' + else: + btrfs_package = 'btrfs-tools' + packages.append(btrfs_package) + return packages + + +def bootstrap_manager(): + hostname = socket.gethostname() + path = '/var/lib/ceph/mgr/ceph-{}'.format(hostname) + keyring = os.path.join(path, 'keyring') + + if os.path.exists(keyring): + log('bootstrap_manager: mgr already initialized.') + else: + mkdir(path, owner=ceph_user(), group=ceph_user()) + subprocess.check_call(['ceph', 'auth', 'get-or-create', + 'mgr.{}'.format(hostname), 'mon', + 'allow profile mgr', 'osd', 'allow *', + 'mds', 'allow *', '--out-file', + keyring]) + chownr(path, ceph_user(), ceph_user()) + + unit = 'ceph-mgr@{}'.format(hostname) + subprocess.check_call(['systemctl', 'enable', unit]) + service_restart(unit) + + +def osd_noout(enable): + """Sets or unsets 'noout' + + :param enable: bool. True to set noout, False to unset. + :returns: bool. True if output looks right. + :raises CalledProcessError: if an error occurs invoking the systemd cmd + """ + operation = { + True: 'set', + False: 'unset', + } + try: + subprocess.check_call(['ceph', '--id', 'admin', + 'osd', operation[enable], + 'noout']) + log('running ceph osd {} noout'.format(operation[enable])) + return True + except subprocess.CalledProcessError as e: + log(e) + raise + + +class OSDConfigSetError(Exception): + """Error occured applying OSD settings.""" + pass + + +def apply_osd_settings(settings): + """Applies the provided osd settings + + Apply the provided settings to all local OSD unless settings are already + present. Settings stop being applied on encountering an error. + + :param settings: dict. Dictionary of settings to apply. + :returns: bool. True if commands ran succesfully. + :raises: OSDConfigSetError + """ + current_settings = {} + base_cmd = 'ceph daemon osd.{osd_id} config --format=json' + get_cmd = base_cmd + ' get {key}' + set_cmd = base_cmd + ' set {key} {value}' + + def _get_cli_key(key): + return(key.replace(' ', '_')) + # Retrieve the current values to check keys are correct and to make this a + # noop if setting are already applied. + for osd_id in get_local_osd_ids(): + for key, value in sorted(settings.items()): + cli_key = _get_cli_key(key) + cmd = get_cmd.format(osd_id=osd_id, key=cli_key) + out = json.loads( + subprocess.check_output(cmd.split()).decode('UTF-8')) + if 'error' in out: + log("Error retrieving osd setting: {}".format(out['error']), + level=ERROR) + return False + current_settings[key] = out[cli_key] + settings_diff = { + k: v + for k, v in settings.items() + if str(v) != str(current_settings[k])} + for key, value in sorted(settings_diff.items()): + log("Setting {} to {}".format(key, value), level=DEBUG) + cmd = set_cmd.format( + osd_id=osd_id, + key=_get_cli_key(key), + value=value) + out = json.loads( + subprocess.check_output(cmd.split()).decode('UTF-8')) + if 'error' in out: + log("Error applying osd setting: {}".format(out['error']), + level=ERROR) + raise OSDConfigSetError + return True diff --git a/ceph-proxy/unit_tests/test_ceph_broker.py b/ceph-proxy/unit_tests/test_ceph_broker.py deleted file mode 100644 index c1be6494..00000000 --- a/ceph-proxy/unit_tests/test_ceph_broker.py +++ /dev/null @@ -1,136 +0,0 @@ -import json -import unittest - -import mock - -import ceph_broker - - -class CephBrokerTestCase(unittest.TestCase): - def setUp(self): - super(CephBrokerTestCase, self).setUp() - - @mock.patch('ceph_broker.log') - def test_process_requests_noop(self, mock_log): - req = json.dumps({'api-version': 1, 'ops': []}) - rc = ceph_broker.process_requests(req) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @mock.patch('ceph_broker.log') - def test_process_requests_missing_api_version(self, mock_log): - req = json.dumps({'ops': []}) - rc = ceph_broker.process_requests(req) - self.assertEqual(json.loads(rc), { - 'exit-code': 1, - 'stderr': 'Missing or invalid api version (None)'}) - - @mock.patch('ceph_broker.log') - def test_process_requests_invalid_api_version(self, mock_log): - req = json.dumps({'api-version': 2, 'ops': []}) - rc = ceph_broker.process_requests(req) - print("Return: {}".format(rc)) - self.assertEqual(json.loads(rc), - {'exit-code': 1, - 'stderr': 'Missing or invalid api version (2)'}) - - @mock.patch('ceph_broker.log') - def test_process_requests_invalid(self, mock_log): - reqs = json.dumps({'api-version': 1, 'ops': [{'op': 'invalid_op'}]}) - rc = ceph_broker.process_requests(reqs) - self.assertEqual(json.loads(rc), - {'exit-code': 1, - 'stderr': "Unknown operation 'invalid_op'"}) - - @mock.patch('ceph_broker.get_osds') - @mock.patch('ceph_broker.ReplicatedPool') - @mock.patch('ceph_broker.pool_exists') - @mock.patch('ceph_broker.log') - def test_process_requests_create_pool_w_pg_num(self, mock_log, - mock_pool_exists, - mock_replicated_pool, - mock_get_osds): - mock_get_osds.return_value = [0, 1, 2] - mock_pool_exists.return_value = False - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'create-pool', - 'name': 'foo', - 'replicas': 3, - 'pg_num': 100}]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_replicated_pool.assert_called_with(service='admin', name='foo', - replicas=3, pg_num=100) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @mock.patch('ceph_broker.get_osds') - @mock.patch('ceph_broker.ReplicatedPool') - @mock.patch('ceph_broker.pool_exists') - @mock.patch('ceph_broker.log') - def test_process_requests_create_pool_w_pg_num_capped(self, mock_log, - mock_pool_exists, - mock_replicated_pool, - mock_get_osds): - mock_get_osds.return_value = [0, 1, 2] - mock_pool_exists.return_value = False - reqs = json.dumps({'api-version': 1, - 'ops': [{ - 'op': 'create-pool', - 'name': 'foo', - 'replicas': 3, - 'pg_num': 300}]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', - name='foo') - mock_replicated_pool.assert_called_with(service='admin', name='foo', - replicas=3, pg_num=100) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @mock.patch('ceph_broker.ReplicatedPool') - @mock.patch('ceph_broker.pool_exists') - @mock.patch('ceph_broker.log') - def test_process_requests_create_pool_exists(self, mock_log, - mock_pool_exists, - mock_replicated_pool): - mock_pool_exists.return_value = True - reqs = json.dumps({'api-version': 1, - 'ops': [{'op': 'create-pool', - 'name': 'foo', - 'replicas': 3}]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', - name='foo') - self.assertFalse(mock_replicated_pool.create.called) - self.assertEqual(json.loads(rc), {'exit-code': 0}) - - @mock.patch('ceph_broker.ReplicatedPool') - @mock.patch('ceph_broker.pool_exists') - @mock.patch('ceph_broker.log') - def test_process_requests_create_pool_rid(self, mock_log, - mock_pool_exists, - mock_replicated_pool): - mock_pool_exists.return_value = False - reqs = json.dumps({'api-version': 1, - 'request-id': '1ef5aede', - 'ops': [{ - 'op': 'create-pool', - 'name': 'foo', - 'replicas': 3}]}) - rc = ceph_broker.process_requests(reqs) - mock_pool_exists.assert_called_with(service='admin', name='foo') - mock_replicated_pool.assert_called_with(service='admin', - name='foo', - replicas=3) - self.assertEqual(json.loads(rc)['exit-code'], 0) - self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') - - @mock.patch('ceph_broker.log') - def test_process_requests_invalid_api_rid(self, mock_log): - reqs = json.dumps({'api-version': 0, 'request-id': '1ef5aede', - 'ops': [{'op': 'create-pool'}]}) - rc = ceph_broker.process_requests(reqs) - self.assertEqual(json.loads(rc)['exit-code'], 1) - self.assertEqual(json.loads(rc)['stderr'], - "Missing or invalid api version (0)") - self.assertEqual(json.loads(rc)['request-id'], '1ef5aede') From 6842897866ebffce057fad7a76e7eb8435214c77 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 6 Aug 2020 15:27:01 +0100 Subject: [PATCH 2045/2699] Add support for erasure coded pools Add test bundle for erasure-coding with check to validate that pools are created correctly. Change-Id: Ic6f959b2e598f7f9cfa13bd60fdab62a22e03b59 Func-Test-PR: https://github.com/openstack-charmers/zaza-openstack-tests/pull/396 --- ceph-proxy/tests/bundles/focal-ussuri-ec.yaml | 215 ++++++++++++++++++ ceph-proxy/tests/tests.yaml | 13 +- 2 files changed, 227 insertions(+), 1 deletion(-) create mode 100644 ceph-proxy/tests/bundles/focal-ussuri-ec.yaml diff --git a/ceph-proxy/tests/bundles/focal-ussuri-ec.yaml b/ceph-proxy/tests/bundles/focal-ussuri-ec.yaml new file mode 100644 index 00000000..100fe81c --- /dev/null +++ b/ceph-proxy/tests/bundles/focal-ussuri-ec.yaml @@ -0,0 +1,215 @@ +variables: + openstack-origin: &openstack-origin distro + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + - '16' + - '17' + - '18' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: lrc + ec-profile-locality: 3 + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: jerasure + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: isa + libvirt-image-backend: rbd + to: + - '15' + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:ceph' + - 'ceph-proxy:client' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 9292c618..61324925 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -2,9 +2,14 @@ charm_name: ceph-proxy configure: - zaza.openstack.configure.ceph_proxy.setup_ceph_proxy + - erasure-coded: + - zaza.openstack.configure.ceph_proxy.setup_ceph_proxy tests: - zaza.openstack.charm_tests.ceph.tests.CephProxyTest + - erasure-coded: + - zaza.openstack.charm_tests.ceph.tests.CephProxyTest + - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes gate_bundles: - trusty-mitaka # jewel @@ -16,6 +21,8 @@ gate_bundles: - bionic-train - bionic-ussuri - focal-ussuri + - erasure-coded: focal-ussuri-ec + dev_bundles: # Icehouse - trusty-icehouse @@ -25,8 +32,9 @@ dev_bundles: - xenial-pike - focal-victoria - groovy-victoria + smoke_bundles: - - bionic-train + - focal-ussuri target_deploy_status: ceph-proxy: @@ -41,6 +49,9 @@ target_deploy_status: keystone: workload-status: active workload-status-message: "Unit is ready" + nova-compute: + workload-status: waiting + workload-status-message: "Incomplete relations: storage-backend" tests_options: force_deploy: From dca0742d7b98eb203b92b0ef27b13abaed584548 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Fri, 28 Aug 2020 14:05:05 +0200 Subject: [PATCH 2046/2699] Add focal-victoria to the test gate Change-Id: I94f2c5035a42ea4dcb0ac1843807525bdaa6d5fc --- ceph-fs/src/tests/tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index e025ace1..51ada0fd 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -1,5 +1,6 @@ charm_name: ceph-fs gate_bundles: + - focal-victoria - focal-ussuri - bionic-ussuri - bionic-train @@ -15,7 +16,6 @@ smoke_bundles: - bionic-stein dev_bundles: - groovy-victoria - - focal-victoria configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network From ed3b7fe5bd2b67020a5a370a0f44c1923af30a84 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Fri, 28 Aug 2020 14:05:05 +0200 Subject: [PATCH 2047/2699] Add focal-victoria to the test gate Change-Id: I7af92d96eed6cef87677a7ccb761d1c13d91484a --- ceph-mon/tests/tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index b5b2fb45..cbfbc65f 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,5 +1,6 @@ charm_name: ceph-mon gate_bundles: + - focal-victoria - focal-ussuri-ec - focal-ussuri - bionic-ussuri @@ -17,7 +18,6 @@ smoke_bundles: - bionic-train dev_bundles: - groovy-victoria - - focal-victoria configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: From 39fb150434d2bf570dba0c1036dc310a3a42bcfa Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Fri, 28 Aug 2020 14:05:05 +0200 Subject: [PATCH 2048/2699] Add focal-victoria to the test gate Change-Id: Iad7b94f2ee4ccb39b290a3009cc29b1648ddd554 --- ceph-osd/tests/tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 24e242f3..5a116be2 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,5 +1,6 @@ charm_name: ceph-osd gate_bundles: + - focal-victoria - focal-ussuri - bionic-ussuri - bionic-train @@ -14,7 +15,6 @@ gate_bundles: smoke_bundles: - bionic-train dev_bundles: - - focal-victoria - groovy-victoria configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image From 2e5000e25c5e8e780b012fb575d932e156409166 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Fri, 28 Aug 2020 14:05:05 +0200 Subject: [PATCH 2049/2699] Add focal-victoria to the test gate Change-Id: Ie92d186b3862712a57a013ca6da4dc9bedff49d9 --- ceph-radosgw/tests/tests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 22c8d24f..acb664c4 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,5 +1,7 @@ charm_name: ceph-radosgw gate_bundles: + - vault: focal-victoria + - vault: focal-victoria-namespaced - vault: focal-ussuri - vault: focal-ussuri-namespaced - vault: bionic-ussuri @@ -23,8 +25,6 @@ smoke_bundles: dev_bundles: - vault: groovy-victoria - vault: groovy-victoria-namespaced - - vault: focal-victoria - - vault: focal-victoria-namespaced - bionic-queens-multisite - bionic-rocky-multisite target_deploy_status: From 6fa44002ebbcd0618b0a632178b0f1c14da65560 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Fri, 28 Aug 2020 20:45:10 -0400 Subject: [PATCH 2050/2699] Refresh README The removed content was placed in the CDG. Apply README template. Change-Id: Ib5bb2c03b5332d8bd39664e72170fc0683f7e8a8 --- ceph-rbd-mirror/src/README.md | 224 +++++++++------------------------- 1 file changed, 56 insertions(+), 168 deletions(-) diff --git a/ceph-rbd-mirror/src/README.md b/ceph-rbd-mirror/src/README.md index 39ffefa0..672783c9 100644 --- a/ceph-rbd-mirror/src/README.md +++ b/ceph-rbd-mirror/src/README.md @@ -1,218 +1,106 @@ # Overview -The `ceph-rbd-mirror` charm deploys the Ceph `rbd-mirror` daemon and helps -automate remote creation and configuration of mirroring for Ceph pools used for -hosting RBD images. Actions for operator driven failover and fallback of the -RBD image pools are also provided. +[Ceph][ceph-upstream] is a unified, distributed storage system designed for +excellent performance, reliability, and scalability. -> **Note**: The `ceph-rbd-mirror` charm addresses only one specific element in - datacentre redundancy. Refer to [Ceph RADOS Gateway Multisite Replication][ceph-multisite-replication] - and other work to arrive at a complete solution. +The ceph-rbd-mirror charm deploys the Ceph `rbd-mirror` daemon and helps +automate remote creation and configuration of mirroring for Ceph pools used for +hosting RBD images. -For more information on charms and RBD mirroring see the [Ceph RBD Mirroring][ceph-rbd-mirroring] -appendix in the [OpenStack Charms Deployment Guide][charms-deploy-guide]. +> **Note**: RBD mirroring is only one aspect of datacentre redundancy. Refer to + [Ceph RADOS Gateway Multisite Replication][ceph-multisite-replication] and + other work to arrive at a complete solution. -# Functionality +## Functionality The charm has the following major features: -- Support for a maximum of two Ceph clusters. The clusters may reside within a +* Support for a maximum of two Ceph clusters. The clusters may reside within a single model or be contained within two separate models. -- Specifically written for two-way replication. This provides the ability to +* Specifically written for two-way replication. This provides the ability to fail over and fall back to/from a single secondary site. Ceph does have support for mirroring to any number of clusters but the charm does not support this. -- Automatically creates and configures (for mirroring) pools in the remote +* Automatically creates and configures (for mirroring) pools in the remote cluster based on any pools in the local cluster that are labelled with the 'rbd' tag. -- Mirroring of whole pools only. Ceph itself has support for the mirroring of +* Mirroring of whole pools only. Ceph itself has support for the mirroring of individual images but the charm does not support this. -- Network space aware. The mirror daemon can be informed about network +* Network space aware. The mirror daemon can be informed about network configuration by binding the `public` and `cluster` endpoints. The daemon will use the network associated with the `cluster` endpoint for mirroring traffic. Other notes on RBD mirroring: -- Supports multiple running instances of the mirror daemon in each cluster. +* Supports multiple running instances of the mirror daemon in each cluster. Doing so allows for the dynamic re-distribution of the mirroring load amongst the daemons. This addresses both high availability and performance concerns. - Leverage this feature by scaling out the `ceph-rbd-mirror` application (i.e. + Leverage this feature by scaling out the ceph-rbd-mirror application (i.e. add more units). -- Requires that every RBD image within each pool is created with the +* Requires that every RBD image within each pool is created with the `journaling` and `exclusive-lock` image features enabled. The charm enables - these features by default and the `ceph-mon` charm will announce them over - the `client` relation when it has units connected to its `rbd-mirror` - endpoint. - -- The feature first appeared in Ceph `v.12.2` (Luminous). - -# Deployment - -It is assumed that the two Ceph clusters have been set up (i.e. `ceph-mon` and -`ceph-osd` charms are deployed and relations added). - -> **Note**: Minimal two-cluster test bundles can be found in the - `src/tests/bundles` subdirectory where both the one-model and two-model - scenarios are featured. - -## Using one model - -Deploy the charm for each cluster, giving each application a name to -distinguish one from the other (site 'a' and site 'b'): - - juju deploy ceph-rbd-mirror ceph-rbd-mirror-a - juju deploy ceph-rbd-mirror ceph-rbd-mirror-b - -Add a relation between the 'ceph-mon' of site 'a' and both the local (site 'a') -and remote (site 'b') units of 'ceph-rbd-mirror': - - juju add-relation ceph-mon-a ceph-rbd-mirror-a:ceph-local - juju add-relation ceph-mon-a ceph-rbd-mirror-b:ceph-remote - -Perform the analogous procedure for the 'ceph-mon' of site 'b': - - juju add-relation ceph-mon-b ceph-rbd-mirror-b:ceph-local - juju add-relation ceph-mon-b ceph-rbd-mirror-a:ceph-remote - -## Using two models - -In model 'site-a', deploy the charm and add the local relation: - - juju switch site-a - juju deploy ceph-rbd-mirror ceph-rbd-mirror-a - juju add-relation ceph-mon-a ceph-rbd-mirror-a:ceph-local - -To create the inter-site relation one must export one of the application -endpoints from the model by means of an "offer". Here, we make an offer for -'ceph-rbd-mirror': - - juju offer ceph-rbd-mirror-a:ceph-remote - Application "ceph-rbd-mirror-a" endpoints [ceph-remote] available at "admin/site-a.ceph-rbd-mirror-a" - -Perform the analogous procedure in the other model ('site-b'): - - juju switch site-b - juju deploy ceph-rbd-mirror ceph-rbd-mirror-b - juju add-relation ceph-mon-b ceph-rbd-mirror-b:ceph-local - juju offer ceph-rbd-mirror-b:ceph-remote - application "ceph-rbd-mirror-b" endpoints [ceph-remote] available at "admin/site-b.ceph-rbd-mirror-b" - -Add the *cross model relations* by referring to the offer URLs (included in the -output above) as if they were application endpoints in each respective model. + these features by default and the ceph-mon charm will announce them over the + `client` relation when it has units connected to its `rbd-mirror` endpoint. -For site 'a': - - juju switch site-a - juju add-relation ceph-mon-a admin/site-b.ceph-rbd-mirror-b - -For site 'b': - - juju switch site-b - juju add-relation ceph-mon-b admin/site-a.ceph-rbd-mirror-a +* The feature first appeared in Ceph Luminous (OpenStack Queens). # Usage -Usage procedures covered here touch upon pool creation, failover & fallback, -and recovery. In all cases we presuppose that each cluster resides within a -separate model. - -## Pools - -As of the 19.04 OpenStack Charms release, due to Ceph Luminous, any pool -associated with the RBD application during its creation will automatically be -labelled with the 'rbd' tag. The following occurs together: - -Pool creation ==> RBD application-association ==> 'rbd' tag - -RBD pools can be created by either a supporting charm (through the Ceph broker -protocol) or manually by the operator: - -1. A charm-created pool (e.g. via `glance`) will automatically be detected and -acted upon (i.e. a remote pool will be set up). - -1. A manually-created pool, whether done via the `ceph-mon` application or -through Ceph directly, will require an action to be run on the -`ceph-rbd-mirror` application leader in order for the remote pool to come -online. - -For example, a pool is created manually in site 'a' via `ceph-mon` and then -`ceph-rbd-mirror` (of site 'a') is informed about it: - - juju run-action -m site-a ceph-mon-a/leader --wait create-pool name=mypool app-name=rbd - juju run-action -m site-a ceph-rbd-mirror-a/leader --wait refresh-pools - -## Failover and fallback - -To manage failover and fallback, the `demote` and `promote` actions are applied -to the `ceph-rbd-mirror` application leader. - -Here, we fail over from site 'a' to site 'b' by demoting site 'a' and promoting -site 'b'. The rest of the commands are status checks: - - juju run-action -m site-a ceph-rbd-mirror-a/leader --wait status verbose=True - juju run-action -m site-b ceph-rbd-mirror-b/leader --wait status verbose=True - - juju run-action -m site-a ceph-rbd-mirror-a/leader --wait demote - - juju run-action -m site-a ceph-rbd-mirror-a/leader --wait status verbose=True - juju run-action -m site-b ceph-rbd-mirror-b/leader --wait status verbose=True - - juju run-action -m site-b ceph-rbd-mirror-b/leader --wait promote - -To fall back to site 'a': - - juju run-action -m site-b ceph-rbd-mirror-b/leader --wait demote - juju run-action -m site-a ceph-rbd-mirror-a/leader --wait promote - -> **Note**: When using Ceph Luminous, the mirror status information may not be - accurate. Specifically, the `entries_behind_master` counter may never get to - `0` even though the image has been fully synchronised. +## Configuration -## Recovering from abrupt shutdown +See file `config.yaml` of the built charm (or see the charm in the [Charm +Store][cs-ceph-rbd-mirror]) for the full list of configuration options, along +with their descriptions and default values. See the [Juju +documentation][juju-docs-config-apps] for details on configuring applications. -It is possible that an abrupt shutdown and/or an interruption to communication -channels may lead to a "split-brain" condition. This may cause the mirroring -daemon in each cluster to claim to be the primary. In such cases, the operator -must make a call as to which daemon is correct. Generally speaking, this -means deciding which cluster has the most recent data. +## Deployment -Elect a primary by applying the `demote` and `promote` actions to the -appropriate `ceph-rbd-mirror` leader. After doing so, the `resync-pools` -action must be run on the secondary cluster leader. The `promote` action may -require a force option. +A standard topology consists of two Ceph clusters with each cluster residing in +a separate Juju model. The deployment steps are a fairly involved and are +therefore covered under [Ceph RBD Mirroring][cdg-rbd-mirroring] in the +[OpenStack Charms Deployment Guide][cdg]. -Here, we make site 'a' be the primary by demoting site 'b' and promoting site -'a': +## Actions - juju run-action -m site-b ceph-rbd-mirror/leader --wait demote - juju run-action -m site-a ceph-rbd-mirror/leader --wait promote force=True +This section lists Juju [actions][juju-docs-actions] supported by the charm. +Actions allow specific operations to be performed on a per-unit basis. To +display action descriptions run `juju actions ceph-rbd-mirror`. If the charm is +not deployed then see file `actions.yaml`. - juju run-action -m site-a ceph-rbd-mirror/leader --wait status verbose=True - juju run-action -m site-b ceph-rbd-mirror/leader --wait status verbose=True +* `copy-pool` +* `demote` +* `promote` +* `refresh-pools` +* `resync-pools` +* `status` - juju run-action -m site-b ceph-rbd-mirror/leader --wait resync-pools i-really-mean-it=True +## Operations -> **Note**: When using Ceph Luminous, the mirror state information will not be - accurate after recovering from unclean shutdown. Regardless of the output of - the status information, you will be able to write to images after a forced - promote. +Operational procedures touch upon pool creation, failover & fallback, and +recovering from an abrupt shutdown. These topics are also covered under [Ceph +RBD Mirroring][cdg-rbd-mirroring] in the [OpenStack Charms Deployment +Guide][cdg]. # Bugs -Please report bugs for the `ceph-rbd-mirror` charm on [Launchpad][charm-ceph-rbd-mirror-bugs]. +Please report bugs on [Launchpad][lp-bugs-charm-ceph-rbd-mirror]. -For general questions, refer to the [OpenStack Charm Guide][charms-guide]. +For general charm questions refer to the [OpenStack Charm Guide][cg]. +[cg]: https://docs.openstack.org/charm-guide +[cdg]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/index.html +[ceph-upstream]: https://ceph.io [ceph-multisite-replication]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-rgw-multisite.html -[ceph-rbd-mirroring]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-ceph-rbd-mirror.html -[charms-deploy-guide]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/index.html -[charm-ceph-rbd-mirror-bugs]: https://bugs.launchpad.net/charm-ceph-rbd-mirror/+filebug -[charms-guide]: https://docs.openstack.org/charm-guide/latest/ +[cdg-rbd-mirroring]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-ceph-rbd-mirror.html +[lp-bugs-charm-ceph-rbd-mirror]: https://bugs.launchpad.net/charm-ceph-rbd-mirror/+filebug +[juju-docs-actions]: https://jaas.ai/docs/actions +[juju-docs-config-apps]: https://juju.is/docs/configuring-applications +[cs-ceph-rbd-mirror]: https://jaas.ai/ceph-rbd-mirror From 1e189bbacfeaae4b6190fc46d8ce19594f922af1 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 2 Sep 2020 06:52:06 +0000 Subject: [PATCH 2051/2699] Add doc strings Add doc strings and a small amount of tidy-up too. Change-Id: Iadd8bb5c08453b5650bb166d559ef942571931ba --- ceph-iscsi/src/charm.py | 90 +++++++++++++++++++++++++++++------------ 1 file changed, 65 insertions(+), 25 deletions(-) diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 53d81c11..2d25fcfc 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -1,5 +1,21 @@ #!/usr/bin/env python3 +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Charm for deploying and maintaining the Ceph iSCSI service.""" + import socket import logging import os @@ -30,42 +46,58 @@ class CephClientAdapter(ops_openstack.adapters.OpenStackOperRelationAdapter): - - def __init__(self, relation): - super(CephClientAdapter, self).__init__(relation) + """Adapter for ceph client interface.""" @property def mon_hosts(self): + """Sorted list of ceph mon addresses. + + :returns: Ceph MON addresses. + :rtype: str + """ hosts = self.relation.get_relation_data()['mon_hosts'] return ' '.join(sorted(hosts)) @property def auth_supported(self): + """Authention type. + + :returns: Authention type + :rtype: str + """ return self.relation.get_relation_data()['auth'] @property def key(self): - return self.relation.get_relation_data()['key'] - - -class PeerAdapter(ops_openstack.adapters.OpenStackOperRelationAdapter): - - def __init__(self, relation): - super(PeerAdapter, self).__init__(relation) + """Key client should use when communicating with Ceph cluster. + :returns: Key + :rtype: str + """ + return self.relation.get_relation_data()['key'] -class GatewayClientPeerAdapter(PeerAdapter): - def __init__(self, relation): - super(GatewayClientPeerAdapter, self).__init__(relation) +class GatewayClientPeerAdapter( + ops_openstack.adapters.OpenStackOperRelationAdapter): + """Adapter for Ceph iSCSI peer interface.""" @property def gw_hosts(self): + """List of peer addresses. + + :returns: Ceph iSCSI peer addresses. + :rtype: str + """ hosts = self.relation.peer_addresses return ' '.join(sorted(hosts)) @property def trusted_ips(self): + """List of IP addresses permitted to use API. + + :returns: Ceph iSCSI trusted ips. + :rtype: str + """ ips = self.allowed_ips ips.extend(self.relation.peer_addresses) return ' '.join(sorted(ips)) @@ -73,12 +105,15 @@ def trusted_ips(self): class TLSCertificatesAdapter( ops_openstack.adapters.OpenStackOperRelationAdapter): - - def __init__(self, relation): - super(TLSCertificatesAdapter, self).__init__(relation) + """Adapter for Ceph TLS Certificates interface.""" @property def enable_tls(self): + """Whether to enable TLS. + + :returns: Whether TLS should be enabled + :rtype: bool + """ try: return bool(self.relation.application_certificate) except ca_client.CAClientError: @@ -87,6 +122,7 @@ def enable_tls(self): class CephISCSIGatewayAdapters( ops_openstack.adapters.OpenStackRelationAdapters): + """Collection of relation adapters.""" relation_adapters = { 'ceph-client': CephClientAdapter, @@ -96,6 +132,7 @@ class CephISCSIGatewayAdapters( class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm): + """Ceph iSCSI Base Charm.""" state = StoredState() PACKAGES = ['ceph-iscsi', 'tcmu-runner', 'ceph-common'] @@ -133,6 +170,7 @@ class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm): release = 'default' def __init__(self, framework): + """Setup adapters and observers.""" super().__init__(framework) logging.info("Using {} class".format(self.release)) self.state.set_default( @@ -182,6 +220,7 @@ def __init__(self, framework): self.on_add_trusted_ip_action) def on_install(self, event): + """Install packages and check substrate is supported.""" if ch_host.is_container(): logging.info("Installing into a container is not supported") self.update_status() @@ -189,6 +228,7 @@ def on_install(self, event): self.install_pkgs() def on_has_peers(self, event): + """Setup and share admin password.""" logging.info("Unit has peers") if self.unit.is_leader() and not self.peers.admin_password: logging.info("Setting admin password") @@ -197,6 +237,7 @@ def on_has_peers(self, event): self.peers.set_admin_password(password) def request_ceph_pool(self, event): + """Request pools from Ceph cluster.""" logging.info("Requesting replicated pool") self.ceph_client.create_replicated_pool( self.model.config['rbd-metadata-pool']) @@ -209,17 +250,17 @@ def request_ceph_pool(self, event): 'osd heartbeat interval': 5}) def refresh_request(self, event): + """Re-request Ceph pools and render config.""" self.render_config(event) self.request_ceph_pool(event) def render_config(self, event): + """Render config and restart services if config files change.""" if not self.peers.admin_password: logging.info("Defering setup") - print("Defering setup admin") event.defer() return if not self.ceph_client.pools_available: - print("Defering setup pools") logging.info("Defering setup") event.defer() return @@ -251,6 +292,7 @@ def _render_configs(): logging.info("on_pools_available: status updated") def on_ca_available(self, event): + """Request TLS certificates.""" addresses = set() for binding_name in ['public', 'cluster']: binding = self.model.get_binding(binding_name) @@ -261,6 +303,7 @@ def on_ca_available(self, event): self.ca_client.request_application_certificate(socket.getfqdn(), sans) def on_tls_app_config_ready(self, event): + """Configure TLS.""" self.TLS_KEY_PATH.write_bytes( self.ca_client.application_key.private_bytes( encoding=serialization.Encoding.PEM, @@ -290,6 +333,7 @@ def on_tls_app_config_ready(self, event): self.render_config(event) def custom_status_check(self): + """Custom update status checks.""" if ch_host.is_container(): self.unit.status = ops.model.BlockedStatus( 'Charm cannot be deployed into a container') @@ -303,6 +347,7 @@ def custom_status_check(self): # Actions def on_add_trusted_ip_action(self, event): + """Add an IP to the allowed list for API access.""" if self.unit.is_leader(): ips = event.params.get('ips').split() self.peers.set_allowed_ips( @@ -313,6 +358,7 @@ def on_add_trusted_ip_action(self, event): event.fail("Action must be run on leader") def on_create_target_action(self, event): + """Create an iSCSI taget.""" gw_client = gwcli_client.GatewayClient() target = event.params.get('iqn', self.DEFAULT_TARGET) gateway_units = event.params.get( @@ -347,15 +393,9 @@ def on_create_target_action(self, event): event.set_results({'iqn': target}) -@ops_openstack.core.charm_class -class CephISCSIGatewayCharmJewel(CephISCSIGatewayCharmBase): - - state = StoredState() - release = 'jewel' - - @ops_openstack.core.charm_class class CephISCSIGatewayCharmOcto(CephISCSIGatewayCharmBase): + """Ceph iSCSI Charm for Octopus.""" state = StoredState() release = 'octopus' From ac8f8476d80cd92bcc564486d1aa47957a8847a8 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 2 Sep 2020 08:57:13 +0000 Subject: [PATCH 2052/2699] Avoid mutating self.allowed_ips in adapter Avoid mutating self.allowed_ips` in `GatewayClientPeerAdapter.trusted_ips` Change-Id: I371ba620794d9dc637832a4e02bf12237a63fca7 --- ceph-iscsi/src/charm.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 2d25fcfc..7b48bc92 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -16,6 +16,7 @@ """Charm for deploying and maintaining the Ceph iSCSI service.""" +import copy import socket import logging import os @@ -98,7 +99,7 @@ def trusted_ips(self): :returns: Ceph iSCSI trusted ips. :rtype: str """ - ips = self.allowed_ips + ips = copy.deepcopy(self.allowed_ips) ips.extend(self.relation.peer_addresses) return ' '.join(sorted(ips)) From 81822bfc427ebe1f6c091e154d679b70f7d78141 Mon Sep 17 00:00:00 2001 From: Robert Gildein Date: Thu, 3 Sep 2020 09:57:57 +0200 Subject: [PATCH 2053/2699] Fix docstring typo Change-Id: I27869f160d637507115fbf3c193c2f10e55743f7 --- ceph-iscsi/src/charm.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 2d25fcfc..f8f68007 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -60,9 +60,9 @@ def mon_hosts(self): @property def auth_supported(self): - """Authention type. + """Authentication type. - :returns: Authention type + :returns: Authentication type :rtype: str """ return self.relation.get_relation_data()['auth'] @@ -358,7 +358,7 @@ def on_add_trusted_ip_action(self, event): event.fail("Action must be run on leader") def on_create_target_action(self, event): - """Create an iSCSI taget.""" + """Create an iSCSI target.""" gw_client = gwcli_client.GatewayClient() target = event.params.get('iqn', self.DEFAULT_TARGET) gateway_units = event.params.get( @@ -400,5 +400,6 @@ class CephISCSIGatewayCharmOcto(CephISCSIGatewayCharmBase): state = StoredState() release = 'octopus' + if __name__ == '__main__': main(ops_openstack.core.get_charm_class_for_release()) From b37bb154d1bf888d6311d9551f4aaf6cdd2552d7 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 2 Sep 2020 07:02:13 +0000 Subject: [PATCH 2054/2699] Use '_stored' not 'state' As per issue 7 *1, switch to variable name '_stored' *1 https://github.com/openstack-charmers/charm-ceph-iscsi/issues/7 Depends-On: I52513ce2c25b03f2015835b96cccb3766806bceb Change-Id: Ic37c4ea10ba45d2c2f4c7f37e552d37188e7cc02 --- ceph-iscsi/requirements.txt | 6 +++--- ceph-iscsi/src/charm.py | 10 +++++----- ceph-iscsi/src/interface_ceph_iscsi_peer.py | 8 ++++---- ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py | 10 +++++----- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/ceph-iscsi/requirements.txt b/ceph-iscsi/requirements.txt index ff622ea3..f96906d3 100644 --- a/ceph-iscsi/requirements.txt +++ b/ceph-iscsi/requirements.txt @@ -1,6 +1,6 @@ # requirements git+https://github.com/juju/charm-helpers.git@87fc7ee5#egg=charmhelpers git+https://github.com/canonical/operator.git@0.8.0#egg=ops -git+https://opendev.org/openstack/charm-ops-interface-ceph-client@cc10f29d4#egg=interface_ceph_client -git+https://opendev.org/openstack/charm-ops-openstack@ea51b43e#egg=ops_openstack -git+https://opendev.org/openstack/charm-ops-interface-tls-certificates@2ec41b60#egg=ca_client +git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client +git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack +git+https://opendev.org/openstack/charm-ops-interface-tls-certificates#egg=ca_client diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 2d25fcfc..bc31ad14 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -134,7 +134,7 @@ class CephISCSIGatewayAdapters( class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm): """Ceph iSCSI Base Charm.""" - state = StoredState() + _stored = StoredState() PACKAGES = ['ceph-iscsi', 'tcmu-runner', 'ceph-common'] CEPH_CAPABILITIES = [ "osd", "allow *", @@ -173,7 +173,7 @@ def __init__(self, framework): """Setup adapters and observers.""" super().__init__(framework) logging.info("Using {} class".format(self.release)) - self.state.set_default( + self._stored.set_default( target_created=False, enable_tls=False) self.ceph_client = ceph_client.CephClientRequires( @@ -287,7 +287,7 @@ def _render_configs(): _render_configs() logging.info("Setting started state") self.peers.announce_ready() - self.state.is_started = True + self._stored.is_started = True self.update_status() logging.info("on_pools_available: status updated") @@ -329,7 +329,7 @@ def on_tls_app_config_ready(self, event): format=serialization.PublicFormat.SubjectPublicKeyInfo, encoding=serialization.Encoding.PEM)) subprocess.check_call(['update-ca-certificates']) - self.state.enable_tls = True + self._stored.enable_tls = True self.render_config(event) def custom_status_check(self): @@ -397,7 +397,7 @@ def on_create_target_action(self, event): class CephISCSIGatewayCharmOcto(CephISCSIGatewayCharmBase): """Ceph iSCSI Charm for Octopus.""" - state = StoredState() + _stored = StoredState() release = 'octopus' if __name__ == '__main__': diff --git a/ceph-iscsi/src/interface_ceph_iscsi_peer.py b/ceph-iscsi/src/interface_ceph_iscsi_peer.py index d509762a..b9613698 100644 --- a/ceph-iscsi/src/interface_ceph_iscsi_peer.py +++ b/ceph-iscsi/src/interface_ceph_iscsi_peer.py @@ -33,7 +33,7 @@ class CephISCSIGatewayPeerEvents(ObjectEvents): class CephISCSIGatewayPeers(Object): on = CephISCSIGatewayPeerEvents() - state = StoredState() + _stored = StoredState() PASSWORD_KEY = 'admin_password' READY_KEY = 'gateway_ready' FQDN_KEY = 'gateway_fqdn' @@ -43,7 +43,7 @@ def __init__(self, charm, relation_name): super().__init__(charm, relation_name) self.relation_name = relation_name self.this_unit = self.framework.model.unit - self.state.set_default( + self._stored.set_default( allowed_ips=[]) self.framework.observe( charm.on[relation_name].relation_changed, @@ -54,9 +54,9 @@ def on_changed(self, event): self.on.has_peers.emit() if self.ready_peer_details: self.on.ready_peers.emit() - if self.allowed_ips != self.state.allowed_ips: + if self.allowed_ips != self._stored.allowed_ips: self.on.allowed_ips_changed.emit() - self.state.allowed_ips = self.allowed_ips + self._stored.allowed_ips = self.allowed_ips def set_admin_password(self, password): logging.info("Setting admin password") diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index f8561f5d..9b2147a1 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -170,8 +170,8 @@ def network_get(self, endpoint_name, relation_id=None): def test_init(self): self.harness.begin() - self.assertFalse(self.harness.charm.state.target_created) - self.assertFalse(self.harness.charm.state.enable_tls) + self.assertFalse(self.harness.charm._stored.target_created) + self.assertFalse(self.harness.charm._stored.enable_tls) def add_cluster_relation(self): rel_id = self.harness.add_relation('cluster', 'ceph-iscsi') @@ -329,7 +329,7 @@ def test_on_pools_available(self): {'admin_password': 'existing password', 'gateway_ready': False}) self.harness.begin() - self.harness.charm.ceph_client.state.pools_available = True + self.harness.charm.ceph_client._stored.pools_available = True with patch.object(Path, 'mkdir') as mock_mkdir: self.harness.charm.ceph_client.on.pools_available.emit() mock_mkdir.assert_called_once_with(exist_ok=True, mode=488) @@ -340,7 +340,7 @@ def test_on_pools_available(self): 'ceph.client.ceph-iscsi.keyring', '/etc/ceph/iscsi/ceph.client.ceph-iscsi.keyring', ANY)], any_order=True) - self.assertTrue(self.harness.charm.state.is_started) + self.assertTrue(self.harness.charm._stored.is_started) rel_data = self.harness.get_relation_data(rel_id, 'ceph-iscsi/0') self.assertEqual(rel_data['gateway_ready'], 'True') @@ -399,7 +399,7 @@ def test_on_certificates_relation_changed(self, _gethostname): mock_TLS_PUB_KEY_PATH.write_bytes.assert_called_once() self.subprocess.check_call.assert_called_once_with( ['update-ca-certificates']) - self.assertTrue(self.harness.charm.state.enable_tls) + self.assertTrue(self.harness.charm._stored.enable_tls) def test_custom_status_check(self): self.harness.add_relation('ceph-client', 'ceph-mon') From 08d45b7ae62f9dc054867993a7b33568cd85b1a1 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 2 Sep 2020 09:21:46 +0000 Subject: [PATCH 2055/2699] Let the logging module to build the messages As per *1 let the logging module to build the messages for performance and robustness. *1 https://github.com/openstack-charmers/charm-ceph-iscsi/issues/11 Change-Id: I7f72419bb1d0bfde51716368da3b8ed0de470827 --- ceph-iscsi/src/charm.py | 2 +- ceph-iscsi/src/interface_ceph_iscsi_peer.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index ff97a962..633b07bf 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -172,7 +172,7 @@ class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm): def __init__(self, framework): """Setup adapters and observers.""" super().__init__(framework) - logging.info("Using {} class".format(self.release)) + logging.info("Using %s class", self.release) self._stored.set_default( target_created=False, enable_tls=False) diff --git a/ceph-iscsi/src/interface_ceph_iscsi_peer.py b/ceph-iscsi/src/interface_ceph_iscsi_peer.py index b9613698..a7f7ecbe 100644 --- a/ceph-iscsi/src/interface_ceph_iscsi_peer.py +++ b/ceph-iscsi/src/interface_ceph_iscsi_peer.py @@ -63,13 +63,13 @@ def set_admin_password(self, password): self.peer_rel.data[self.peer_rel.app][self.PASSWORD_KEY] = password def set_allowed_ips(self, ips, append=True): - logging.info("Setting allowed ips: {}".format(append)) trusted_ips = [] if append and self.allowed_ips: trusted_ips = self.allowed_ips trusted_ips.extend(ips) trusted_ips = sorted(list(set(trusted_ips))) ip_str = json.dumps(trusted_ips) + logging.info("Setting allowed ips to: %s", ip_str) self.peer_rel.data[self.peer_rel.app][self.ALLOWED_IPS_KEY] = ip_str def announce_ready(self): From 3470a868f055c47c0eb0357de611b98d62dc12e0 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 4 Sep 2020 16:58:11 +0200 Subject: [PATCH 2056/2699] Ensure that the actions use the configured admin user Change-Id: I357b26fb585d68d2fbe9bbf21bbf9bfc52ac5050 Closes-Bug: #1893790 --- ceph-proxy/actions/create-cache-tier | 10 +++++----- ceph-proxy/actions/create-cache-tier.py | 9 +++++---- ceph-proxy/actions/create-erasure-profile | 12 ++++++------ ceph-proxy/actions/create-pool | 7 ++++--- ceph-proxy/actions/delete-erasure-profile | 4 ++-- ceph-proxy/actions/get-erasure-profile | 4 ++-- ceph-proxy/actions/list-erasure-profiles | 4 ++-- ceph-proxy/actions/list-pools | 4 ++-- ceph-proxy/actions/pool-get | 4 ++-- ceph-proxy/actions/pool-set | 4 ++-- ceph-proxy/actions/pool-statistics | 4 ++-- ceph-proxy/actions/remove-cache-tier | 9 +++++---- ceph-proxy/actions/remove-cache-tier.py | 10 +++++----- ceph-proxy/actions/remove-pool-snapshot | 4 ++-- ceph-proxy/actions/rename-pool | 4 ++-- ceph-proxy/actions/set-pool-max-bytes | 4 ++-- ceph-proxy/actions/snapshot-pool | 4 ++-- 17 files changed, 52 insertions(+), 49 deletions(-) diff --git a/ceph-proxy/actions/create-cache-tier b/ceph-proxy/actions/create-cache-tier index e8170cf2..51a6518c 100755 --- a/ceph-proxy/actions/create-cache-tier +++ b/ceph-proxy/actions/create-cache-tier @@ -6,28 +6,28 @@ import sys sys.path.append('hooks') from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists -from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.core.hookenv import action_get, config, log, action_fail def make_cache_tier(): backer_pool = action_get("backer-pool") cache_pool = action_get("cache-pool") cache_mode = action_get("cache-mode") - + user = config('admin-user') # Pre flight checks - if not pool_exists('admin', backer_pool): + if not pool_exists(user, backer_pool): log("Please create {} pool before calling create-cache-tier".format( backer_pool)) action_fail("create-cache-tier failed. Backer pool {} must exist " "before calling this".format(backer_pool)) - if not pool_exists('admin', cache_pool): + if not pool_exists(user, cache_pool): log("Please create {} pool before calling create-cache-tier".format( cache_pool)) action_fail("create-cache-tier failed. Cache pool {} must exist " "before calling this".format(cache_pool)) - pool = Pool(service='admin', name=backer_pool) + pool = Pool(service=user, name=backer_pool) try: pool.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) except CalledProcessError as err: diff --git a/ceph-proxy/actions/create-cache-tier.py b/ceph-proxy/actions/create-cache-tier.py index 928e9418..97a1d1ef 100755 --- a/ceph-proxy/actions/create-cache-tier.py +++ b/ceph-proxy/actions/create-cache-tier.py @@ -18,28 +18,29 @@ def _add_path(path): from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists -from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.core.hookenv import action_get, config, log, action_fail def make_cache_tier(): backer_pool = action_get("backer-pool") cache_pool = action_get("cache-pool") cache_mode = action_get("cache-mode") + user = config('admin-user') # Pre flight checks - if not pool_exists('admin', backer_pool): + if not pool_exists(user, backer_pool): log("Please create {} pool before calling create-cache-tier".format( backer_pool)) action_fail("create-cache-tier failed. Backer pool {} must exist " "before calling this".format(backer_pool)) - if not pool_exists('admin', cache_pool): + if not pool_exists(user, cache_pool): log("Please create {} pool before calling create-cache-tier".format( cache_pool)) action_fail("create-cache-tier failed. Cache pool {} must exist " "before calling this".format(cache_pool)) - pool = Pool(service='admin', name=backer_pool) + pool = Pool(service=user, name=backer_pool) try: pool.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) except CalledProcessError as err: diff --git a/ceph-proxy/actions/create-erasure-profile b/ceph-proxy/actions/create-erasure-profile index 7400ccd3..016862c8 100755 --- a/ceph-proxy/actions/create-erasure-profile +++ b/ceph-proxy/actions/create-erasure-profile @@ -17,14 +17,14 @@ _add_path(_root) from charmhelpers.contrib.storage.linux.ceph import create_erasure_profile -from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.core.hookenv import action_get, config, log, action_fail def make_erasure_profile(): name = action_get("name") plugin = action_get("plugin") failure_domain = action_get("failure-domain") - + user = config('admin-user') # jerasure requires k+m # isa requires k+m # local requires k+m+l @@ -34,7 +34,7 @@ def make_erasure_profile(): k = action_get("data-chunks") m = action_get("coding-chunks") try: - create_erasure_profile(service='admin', + create_erasure_profile(service=user, erasure_plugin_name=plugin, profile_name=name, data_chunks=k, @@ -48,7 +48,7 @@ def make_erasure_profile(): k = action_get("data-chunks") m = action_get("coding-chunks") try: - create_erasure_profile(service='admin', + create_erasure_profile(service=user, erasure_plugin_name=plugin, profile_name=name, data_chunks=k, @@ -63,7 +63,7 @@ def make_erasure_profile(): m = action_get("coding-chunks") l = action_get("locality-chunks") try: - create_erasure_profile(service='admin', + create_erasure_profile(service=user, erasure_plugin_name=plugin, profile_name=name, data_chunks=k, @@ -79,7 +79,7 @@ def make_erasure_profile(): m = action_get("coding-chunks") c = action_get("durability-estimator") try: - create_erasure_profile(service='admin', + create_erasure_profile(service=user, erasure_plugin_name=plugin, profile_name=name, data_chunks=k, diff --git a/ceph-proxy/actions/create-pool b/ceph-proxy/actions/create-pool index 0dd0be36..ee6a7798 100755 --- a/ceph-proxy/actions/create-pool +++ b/ceph-proxy/actions/create-pool @@ -15,18 +15,19 @@ _add_path(_hooks) _add_path(_root) from subprocess import CalledProcessError -from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.core.hookenv import action_get, config, log, action_fail from charmhelpers.contrib.storage.linux.ceph import ErasurePool, ReplicatedPool def create_pool(): pool_name = action_get("name") pool_type = action_get("pool-type") + user = config('admin-user') try: if pool_type == "replicated": replicas = action_get("replicas") replicated_pool = ReplicatedPool(name=pool_name, - service='admin', + service=user, replicas=replicas) replicated_pool.create() @@ -34,7 +35,7 @@ def create_pool(): crush_profile_name = action_get("erasure-profile-name") erasure_pool = ErasurePool(name=pool_name, erasure_code_profile=crush_profile_name, - service='admin') + service=user) erasure_pool.create() else: log("Unknown pool type of {}. Only erasure or replicated is " diff --git a/ceph-proxy/actions/delete-erasure-profile b/ceph-proxy/actions/delete-erasure-profile index 8651d07a..7df8c445 100755 --- a/ceph-proxy/actions/delete-erasure-profile +++ b/ceph-proxy/actions/delete-erasure-profile @@ -18,14 +18,14 @@ _add_path(_hooks) _add_path(_root) from charmhelpers.contrib.storage.linux.ceph import remove_erasure_profile -from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.core.hookenv import action_get, config, log, action_fail def delete_erasure_profile(): name = action_get("name") try: - remove_erasure_profile(service='admin', profile_name=name) + remove_erasure_profile(service=config('admin-user'), profile_name=name) except CalledProcessError as e: action_fail("Remove erasure profile failed with error: {}".format( e.message)) diff --git a/ceph-proxy/actions/get-erasure-profile b/ceph-proxy/actions/get-erasure-profile index 39947bb5..1f6b311d 100755 --- a/ceph-proxy/actions/get-erasure-profile +++ b/ceph-proxy/actions/get-erasure-profile @@ -16,12 +16,12 @@ _add_path(_hooks) _add_path(_root) from charmhelpers.contrib.storage.linux.ceph import get_erasure_profile -from charmhelpers.core.hookenv import action_get, action_set +from charmhelpers.core.hookenv import action_get, action_set, config def make_erasure_profile(): name = action_get("name") - out = get_erasure_profile(service='admin', name=name) + out = get_erasure_profile(service=config('admin-user'), name=name) action_set({'message': out}) diff --git a/ceph-proxy/actions/list-erasure-profiles b/ceph-proxy/actions/list-erasure-profiles index fd0586fb..caaa68c4 100755 --- a/ceph-proxy/actions/list-erasure-profiles +++ b/ceph-proxy/actions/list-erasure-profiles @@ -16,13 +16,13 @@ def _add_path(path): _add_path(_hooks) _add_path(_root) -from charmhelpers.core.hookenv import action_get, log, action_set, action_fail +from charmhelpers.core.hookenv import action_get, log, config, action_set, action_fail if __name__ == '__main__': name = action_get("name") try: out = check_output(['ceph', - '--id', 'admin', + '--id', config('admin-user'), 'osd', 'erasure-code-profile', 'ls']).decode('UTF-8') diff --git a/ceph-proxy/actions/list-pools b/ceph-proxy/actions/list-pools index 67c1aed0..401619cd 100755 --- a/ceph-proxy/actions/list-pools +++ b/ceph-proxy/actions/list-pools @@ -16,11 +16,11 @@ def _add_path(path): _add_path(_hooks) _add_path(_root) -from charmhelpers.core.hookenv import log, action_set, action_fail +from charmhelpers.core.hookenv import log, config, action_set, action_fail if __name__ == '__main__': try: - out = check_output(['ceph', '--id', 'admin', + out = check_output(['ceph', '--id', config('admin-user'), 'osd', 'lspools']).decode('UTF-8') action_set({'message': out}) except CalledProcessError as e: diff --git a/ceph-proxy/actions/pool-get b/ceph-proxy/actions/pool-get index 3a42ab4d..f1a5077d 100755 --- a/ceph-proxy/actions/pool-get +++ b/ceph-proxy/actions/pool-get @@ -16,13 +16,13 @@ def _add_path(path): _add_path(_hooks) _add_path(_root) -from charmhelpers.core.hookenv import log, action_set, action_get, action_fail +from charmhelpers.core.hookenv import log, config, action_set, action_get, action_fail if __name__ == '__main__': name = action_get('pool-name') key = action_get('key') try: - out = check_output(['ceph', '--id', 'admin', + out = check_output(['ceph', '--id', config('admin-user'), 'osd', 'pool', 'get', name, key]).decode('UTF-8') action_set({'message': out}) except CalledProcessError as e: diff --git a/ceph-proxy/actions/pool-set b/ceph-proxy/actions/pool-set index 8963c908..44874eb2 100755 --- a/ceph-proxy/actions/pool-set +++ b/ceph-proxy/actions/pool-set @@ -15,7 +15,7 @@ def _add_path(path): _add_path(_hooks) _add_path(_root) -from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.core.hookenv import action_get, config, log, action_fail from ceph_broker import handle_set_pool_value if __name__ == '__main__': @@ -27,7 +27,7 @@ if __name__ == '__main__': 'value': value} try: - handle_set_pool_value(service='admin', request=request) + handle_set_pool_value(service=config('admin-user'), request=request) except CalledProcessError as e: log(str(e)) action_fail("Setting pool key: {} and value: {} failed with " diff --git a/ceph-proxy/actions/pool-statistics b/ceph-proxy/actions/pool-statistics index 403267f3..56e56a7a 100755 --- a/ceph-proxy/actions/pool-statistics +++ b/ceph-proxy/actions/pool-statistics @@ -15,11 +15,11 @@ _add_path(_hooks) _add_path(_root) from subprocess import check_output, CalledProcessError -from charmhelpers.core.hookenv import log, action_set, action_fail +from charmhelpers.core.hookenv import log, config, action_set, action_fail if __name__ == '__main__': try: - out = check_output(['ceph', '--id', 'admin', + out = check_output(['ceph', '--id', config('admin-user'), 'df']).decode('UTF-8') action_set({'message': out}) except CalledProcessError as e: diff --git a/ceph-proxy/actions/remove-cache-tier b/ceph-proxy/actions/remove-cache-tier index 79db9cf7..215704d3 100755 --- a/ceph-proxy/actions/remove-cache-tier +++ b/ceph-proxy/actions/remove-cache-tier @@ -5,7 +5,7 @@ import sys sys.path.append('hooks') from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists -from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.core.hookenv import action_get, config, log, action_fail __author__ = 'chris' @@ -13,21 +13,22 @@ __author__ = 'chris' def delete_cache_tier(): backer_pool = action_get("backer-pool") cache_pool = action_get("cache-pool") + user = config('admin-user') # Pre flight checks - if not pool_exists('admin', backer_pool): + if not pool_exists(user, backer_pool): log("Backer pool {} must exist before calling this".format( backer_pool)) action_fail("remove-cache-tier failed. Backer pool {} must exist " "before calling this".format(backer_pool)) - if not pool_exists('admin', cache_pool): + if not pool_exists(user, cache_pool): log("Cache pool {} must exist before calling this".format( cache_pool)) action_fail("remove-cache-tier failed. Cache pool {} must exist " "before calling this".format(cache_pool)) - pool = Pool(service='admin', name=backer_pool) + pool = Pool(service=user, name=backer_pool) try: pool.remove_cache_tier(cache_pool=cache_pool) except CalledProcessError as err: diff --git a/ceph-proxy/actions/remove-cache-tier.py b/ceph-proxy/actions/remove-cache-tier.py index 8c9b9375..a6f8f2b6 100755 --- a/ceph-proxy/actions/remove-cache-tier.py +++ b/ceph-proxy/actions/remove-cache-tier.py @@ -16,7 +16,7 @@ def _add_path(path): _add_path(_root) from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists -from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.core.hookenv import action_get, config, log, action_fail __author__ = 'chris' @@ -24,21 +24,21 @@ def _add_path(path): def delete_cache_tier(): backer_pool = action_get("backer-pool") cache_pool = action_get("cache-pool") - + user = config('admin-user') # Pre flight checks - if not pool_exists('admin', backer_pool): + if not pool_exists(user, backer_pool): log("Backer pool {} must exist before calling this".format( backer_pool)) action_fail("remove-cache-tier failed. Backer pool {} must exist " "before calling this".format(backer_pool)) - if not pool_exists('admin', cache_pool): + if not pool_exists(user, cache_pool): log("Cache pool {} must exist before calling this".format( cache_pool)) action_fail("remove-cache-tier failed. Cache pool {} must exist " "before calling this".format(cache_pool)) - pool = Pool(service='admin', name=backer_pool) + pool = Pool(service=user, name=backer_pool) try: pool.remove_cache_tier(cache_pool=cache_pool) except CalledProcessError as err: diff --git a/ceph-proxy/actions/remove-pool-snapshot b/ceph-proxy/actions/remove-pool-snapshot index 645ff07f..7569db5c 100755 --- a/ceph-proxy/actions/remove-pool-snapshot +++ b/ceph-proxy/actions/remove-pool-snapshot @@ -15,14 +15,14 @@ _add_path(_hooks) _add_path(_root) from subprocess import CalledProcessError -from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.core.hookenv import action_get, config, log, action_fail from charmhelpers.contrib.storage.linux.ceph import remove_pool_snapshot if __name__ == '__main__': name = action_get("pool-name") snapname = action_get("snapshot-name") try: - remove_pool_snapshot(service='admin', + remove_pool_snapshot(service=config('admin-user'), pool_name=name, snapshot_name=snapname) except CalledProcessError as e: diff --git a/ceph-proxy/actions/rename-pool b/ceph-proxy/actions/rename-pool index 3301830f..c8508b78 100755 --- a/ceph-proxy/actions/rename-pool +++ b/ceph-proxy/actions/rename-pool @@ -15,14 +15,14 @@ _add_path(_hooks) _add_path(_root) from subprocess import CalledProcessError -from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.core.hookenv import action_get, config, log, action_fail from charmhelpers.contrib.storage.linux.ceph import rename_pool if __name__ == '__main__': name = action_get("pool-name") new_name = action_get("new-name") try: - rename_pool(service='admin', old_name=name, new_name=new_name) + rename_pool(service=config('admin-user'), old_name=name, new_name=new_name) except CalledProcessError as e: log(str(e)) action_fail("Renaming pool failed with message: {}".format(str(e))) diff --git a/ceph-proxy/actions/set-pool-max-bytes b/ceph-proxy/actions/set-pool-max-bytes index c1550d41..91196b3e 100755 --- a/ceph-proxy/actions/set-pool-max-bytes +++ b/ceph-proxy/actions/set-pool-max-bytes @@ -15,14 +15,14 @@ _add_path(_hooks) _add_path(_root) from subprocess import CalledProcessError -from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.core.hookenv import action_get, config, log, action_fail from charmhelpers.contrib.storage.linux.ceph import set_pool_quota if __name__ == '__main__': max_bytes = action_get("max") name = action_get("pool-name") try: - set_pool_quota(service='admin', pool_name=name, max_bytes=max_bytes) + set_pool_quota(service=config('admin-user'), pool_name=name, max_bytes=max_bytes) except CalledProcessError as e: log(str(e)) action_fail("Set pool quota failed with message: {}".format(str(e))) diff --git a/ceph-proxy/actions/snapshot-pool b/ceph-proxy/actions/snapshot-pool index 0191bcc9..3eb6926e 100755 --- a/ceph-proxy/actions/snapshot-pool +++ b/ceph-proxy/actions/snapshot-pool @@ -15,14 +15,14 @@ _add_path(_hooks) _add_path(_root) from subprocess import CalledProcessError -from charmhelpers.core.hookenv import action_get, log, action_fail +from charmhelpers.core.hookenv import action_get, config, log, action_fail from charmhelpers.contrib.storage.linux.ceph import snapshot_pool if __name__ == '__main__': name = action_get("pool-name") snapname = action_get("snapshot-name") try: - snapshot_pool(service='admin', + snapshot_pool(service=config('admin-user'), pool_name=name, snapshot_name=snapname) except CalledProcessError as e: From 24304eb8fb91f06eae652cf6c03882050732be32 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 7 Sep 2020 09:12:42 +0200 Subject: [PATCH 2057/2699] Remove duplicated action files Closes-Bug #1894600 Change-Id: I74c737480c6ad56d5691689caeb84bca7154be38 --- ceph-proxy/actions/create-cache-tier | 17 +++++++- ceph-proxy/actions/create-cache-tier.py | 54 ------------------------- ceph-proxy/actions/remove-cache-tier | 20 ++++++--- ceph-proxy/actions/remove-cache-tier.py | 52 ------------------------ 4 files changed, 30 insertions(+), 113 deletions(-) delete mode 100755 ceph-proxy/actions/create-cache-tier.py delete mode 100755 ceph-proxy/actions/remove-cache-tier.py diff --git a/ceph-proxy/actions/create-cache-tier b/ceph-proxy/actions/create-cache-tier index 51a6518c..97a1d1ef 100755 --- a/ceph-proxy/actions/create-cache-tier +++ b/ceph-proxy/actions/create-cache-tier @@ -1,9 +1,21 @@ -#!/usr/bin/python +#!/usr/bin/env python3 __author__ = 'chris' +import os from subprocess import CalledProcessError import sys -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) + from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists from charmhelpers.core.hookenv import action_get, config, log, action_fail @@ -14,6 +26,7 @@ def make_cache_tier(): cache_pool = action_get("cache-pool") cache_mode = action_get("cache-mode") user = config('admin-user') + # Pre flight checks if not pool_exists(user, backer_pool): log("Please create {} pool before calling create-cache-tier".format( diff --git a/ceph-proxy/actions/create-cache-tier.py b/ceph-proxy/actions/create-cache-tier.py deleted file mode 100755 index 97a1d1ef..00000000 --- a/ceph-proxy/actions/create-cache-tier.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python3 -__author__ = 'chris' -import os -from subprocess import CalledProcessError -import sys - -_path = os.path.dirname(os.path.realpath(__file__)) -_hooks = os.path.abspath(os.path.join(_path, '../hooks')) -_root = os.path.abspath(os.path.join(_path, '..')) - - -def _add_path(path): - if path not in sys.path: - sys.path.insert(1, path) - -_add_path(_hooks) -_add_path(_root) - - -from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists -from charmhelpers.core.hookenv import action_get, config, log, action_fail - - -def make_cache_tier(): - backer_pool = action_get("backer-pool") - cache_pool = action_get("cache-pool") - cache_mode = action_get("cache-mode") - user = config('admin-user') - - # Pre flight checks - if not pool_exists(user, backer_pool): - log("Please create {} pool before calling create-cache-tier".format( - backer_pool)) - action_fail("create-cache-tier failed. Backer pool {} must exist " - "before calling this".format(backer_pool)) - - if not pool_exists(user, cache_pool): - log("Please create {} pool before calling create-cache-tier".format( - cache_pool)) - action_fail("create-cache-tier failed. Cache pool {} must exist " - "before calling this".format(cache_pool)) - - pool = Pool(service=user, name=backer_pool) - try: - pool.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) - except CalledProcessError as err: - log("Add cache tier failed with message: {}".format( - err.message)) - action_fail("create-cache-tier failed. Add cache tier failed with " - "message: {}".format(err.message)) - - -if __name__ == '__main__': - make_cache_tier() diff --git a/ceph-proxy/actions/remove-cache-tier b/ceph-proxy/actions/remove-cache-tier index 215704d3..a6f8f2b6 100755 --- a/ceph-proxy/actions/remove-cache-tier +++ b/ceph-proxy/actions/remove-cache-tier @@ -1,8 +1,19 @@ -#!/usr/bin/python +#!/usr/bin/env python3 +import os from subprocess import CalledProcessError import sys -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists from charmhelpers.core.hookenv import action_get, config, log, action_fail @@ -14,7 +25,6 @@ def delete_cache_tier(): backer_pool = action_get("backer-pool") cache_pool = action_get("cache-pool") user = config('admin-user') - # Pre flight checks if not pool_exists(user, backer_pool): log("Backer pool {} must exist before calling this".format( @@ -33,9 +43,9 @@ def delete_cache_tier(): pool.remove_cache_tier(cache_pool=cache_pool) except CalledProcessError as err: log("Removing the cache tier failed with message: {}".format( - err.message)) + str(err))) action_fail("remove-cache-tier failed. Removing the cache tier failed " - "with message: {}".format(err.message)) + "with message: {}".format(str(err))) if __name__ == '__main__': diff --git a/ceph-proxy/actions/remove-cache-tier.py b/ceph-proxy/actions/remove-cache-tier.py deleted file mode 100755 index a6f8f2b6..00000000 --- a/ceph-proxy/actions/remove-cache-tier.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python3 -import os -from subprocess import CalledProcessError -import sys - -_path = os.path.dirname(os.path.realpath(__file__)) -_hooks = os.path.abspath(os.path.join(_path, '../hooks')) -_root = os.path.abspath(os.path.join(_path, '..')) - - -def _add_path(path): - if path not in sys.path: - sys.path.insert(1, path) - -_add_path(_hooks) -_add_path(_root) - -from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists -from charmhelpers.core.hookenv import action_get, config, log, action_fail - -__author__ = 'chris' - - -def delete_cache_tier(): - backer_pool = action_get("backer-pool") - cache_pool = action_get("cache-pool") - user = config('admin-user') - # Pre flight checks - if not pool_exists(user, backer_pool): - log("Backer pool {} must exist before calling this".format( - backer_pool)) - action_fail("remove-cache-tier failed. Backer pool {} must exist " - "before calling this".format(backer_pool)) - - if not pool_exists(user, cache_pool): - log("Cache pool {} must exist before calling this".format( - cache_pool)) - action_fail("remove-cache-tier failed. Cache pool {} must exist " - "before calling this".format(cache_pool)) - - pool = Pool(service=user, name=backer_pool) - try: - pool.remove_cache_tier(cache_pool=cache_pool) - except CalledProcessError as err: - log("Removing the cache tier failed with message: {}".format( - str(err))) - action_fail("remove-cache-tier failed. Removing the cache tier failed " - "with message: {}".format(str(err))) - - -if __name__ == '__main__': - delete_cache_tier() From 029bd74db9ee4162fdc489debed1caecf4eed97b Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Fri, 28 Aug 2020 14:05:05 +0200 Subject: [PATCH 2058/2699] Add focal-victoria to the test gate Also add erasure coded Victoria bundles Change-Id: Ic809edab3d8fb60cb513bfaddd8f1373d969934e --- .../tests/bundles/focal-victoria-ec.yaml | 215 ++++++++++++++++++ .../tests/bundles/groovy-victoria-ec.yaml | 215 ++++++++++++++++++ ceph-proxy/tests/tests.yaml | 5 +- 3 files changed, 434 insertions(+), 1 deletion(-) create mode 100644 ceph-proxy/tests/bundles/focal-victoria-ec.yaml create mode 100644 ceph-proxy/tests/bundles/groovy-victoria-ec.yaml diff --git a/ceph-proxy/tests/bundles/focal-victoria-ec.yaml b/ceph-proxy/tests/bundles/focal-victoria-ec.yaml new file mode 100644 index 00000000..25f015fd --- /dev/null +++ b/ceph-proxy/tests/bundles/focal-victoria-ec.yaml @@ -0,0 +1,215 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-victoria + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + - '16' + - '17' + - '18' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: lrc + ec-profile-locality: 3 + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: jerasure + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: isa + libvirt-image-backend: rbd + to: + - '15' + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:ceph' + - 'ceph-proxy:client' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/groovy-victoria-ec.yaml b/ceph-proxy/tests/bundles/groovy-victoria-ec.yaml new file mode 100644 index 00000000..b0b04d8f --- /dev/null +++ b/ceph-proxy/tests/bundles/groovy-victoria-ec.yaml @@ -0,0 +1,215 @@ +variables: + openstack-origin: &openstack-origin distro + +series: groovy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + - '16' + - '17' + - '18' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: lrc + ec-profile-locality: 3 + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: jerasure + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: isa + libvirt-image-backend: rbd + to: + - '15' + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:ceph' + - 'ceph-proxy:client' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 61324925..03536b99 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -22,6 +22,8 @@ gate_bundles: - bionic-ussuri - focal-ussuri - erasure-coded: focal-ussuri-ec + - focal-victoria + - erasure-coded: focal-victoria-ec dev_bundles: # Icehouse @@ -30,8 +32,8 @@ dev_bundles: - xenial-ocata # Pike - xenial-pike - - focal-victoria - groovy-victoria + - erasure-coded: groovy-victoria-ec smoke_bundles: - focal-ussuri @@ -56,3 +58,4 @@ target_deploy_status: tests_options: force_deploy: - groovy-victoria + - groovy-victoria-ec From 8945cc865b2f266e2f9085346815f09e2503459f Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 10 Sep 2020 14:38:01 +0000 Subject: [PATCH 2059/2699] Sync charms.ceph to pick up EC fix for Ceph-fs Sync charms.ceph to pick up EC fix for Ceph-fs. This patch enables clients on the mds relation to request additional data pools to be added. Depends-On: I80b7a5cc87d7d53bb55d4d65999a0f9b3cdcb77d Change-Id: I48348ab2b6c8952c5e22008a074a60c1f35be952 --- ceph-mon/lib/charms_ceph/broker.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/ceph-mon/lib/charms_ceph/broker.py b/ceph-mon/lib/charms_ceph/broker.py index 8f040a5e..25427697 100644 --- a/ceph-mon/lib/charms_ceph/broker.py +++ b/ceph-mon/lib/charms_ceph/broker.py @@ -750,6 +750,7 @@ def handle_create_cephfs(request, service): """ cephfs_name = request.get('mds_name') data_pool = request.get('data_pool') + extra_pools = request.get('extra_pools', []) metadata_pool = request.get('metadata_pool') # Check if the user params were provided if not cephfs_name or not data_pool or not metadata_pool: @@ -758,14 +759,12 @@ def handle_create_cephfs(request, service): return {'exit-code': 1, 'stderr': msg} # Sanity check that the required pools exist - if not pool_exists(service=service, name=data_pool): - msg = "CephFS data pool does not exist. Cannot create CephFS" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - if not pool_exists(service=service, name=metadata_pool): - msg = "CephFS metadata pool does not exist. Cannot create CephFS" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} + for pool_name in [data_pool, metadata_pool] + extra_pools: + if not pool_exists(service=service, name=pool_name): + msg = "CephFS pool {} does not exist. Cannot create CephFS".format( + pool_name) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} if get_cephfs(service=service): # CephFS new has already been called @@ -786,6 +785,14 @@ def handle_create_cephfs(request, service): else: log(err.output, level=ERROR) return {'exit-code': 1, 'stderr': err.output} + for pool_name in extra_pools: + cmd = ["ceph", '--id', service, "fs", "add_data_pool", cephfs_name, + pool_name] + try: + check_output(cmd) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} def handle_rgw_region_set(request, service): From 5748956dda1e3a94b3ae3cb329fed061238d763c Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Fri, 11 Sep 2020 17:11:24 +0100 Subject: [PATCH 2060/2699] Ensure that assess-status hook doesn't error due to vault The assess-status hook can error out if the vault unit is offline whilst it is verifying the relation. This patchset ensures that it simply logs the error and puts up a warning saying it can't verify the relation. The state will 'fix itself' when vault comes back online. Change-Id: I9cb50be5d9d317c48ec3e3ae3ea8fed0687832dd --- ceph-osd/hooks/ceph_hooks.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index b002cd14..5f4e3ee0 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -23,6 +23,7 @@ import socket import subprocess import sys +import traceback sys.path.append('lib') import charms_ceph.utils as ceph @@ -825,9 +826,15 @@ def assess_status(): if not relation_ids('secrets-storage'): status_set('blocked', 'Missing relation: vault') return - if not vaultlocker.vault_relation_complete(): - status_set('waiting', 'Incomplete relation: vault') - return + try: + if not vaultlocker.vault_relation_complete(): + status_set('waiting', 'Incomplete relation: vault') + return + except Exception as e: + status_set('blocked', "Warning: couldn't verify vault relation") + log("Exception when verifying vault relation - maybe it was " + "offline?:\n{}".format(str(e))) + log("Traceback: {}".format(traceback.format_exc())) # Check for OSD device creation parity i.e. at least some devices # must have been presented and used for this charm to be operational From 94a1ccad4814637765a2f9fe7cf1cf89cbcd3a30 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 11 Sep 2020 13:23:05 +0000 Subject: [PATCH 2061/2699] Support for iSCSI targets backed by an EC pool Add support for creating an iscsi target backed by an EC pool. The associated testing pr adds a test so that an EC backed target is mounted in addition to a replication backed target. This change also includes renaming the config option pool-name to rbd-pool-name to make this charm more consistent with other charms that relate to ceph. Change-Id: I5772b4fc2415dd90029c3bde38bc57d490c9e910 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/412 --- ceph-iscsi/actions.yaml | 8 ++- ceph-iscsi/src/charm.py | 22 ++++++- ceph-iscsi/tests/bundles/focal.yaml | 13 ++++- .../unit_tests/test_ceph_iscsi_charm.py | 57 ++++++++++++++++++- 4 files changed, 92 insertions(+), 8 deletions(-) diff --git a/ceph-iscsi/actions.yaml b/ceph-iscsi/actions.yaml index 6f918326..47a14749 100644 --- a/ceph-iscsi/actions.yaml +++ b/ceph-iscsi/actions.yaml @@ -40,10 +40,14 @@ create-target: type: string default: disk_1 description: "Image name " - pool-name: + rbd-pool-name: type: string default: iscsi description: "Name of ceph pool to use to back target " + ec-rbd-metadata-pool: + type: string + default: iscsi + description: "Name of the metadata pool to use with rbd-pool-name if rbd-pool-name is erasure coded." client-initiatorname: type: string description: "The initiator name of the client that will mount the target" @@ -54,7 +58,7 @@ create-target: type: string description: "The CHAPs password to be created for the client" required: - - pool-name + - rbd-pool-name - image-size - image-name - client-initiatorname diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index b492f66e..ce89a0a4 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -365,6 +365,24 @@ def on_create_target_action(self, event): gateway_units = event.params.get( 'gateway-units', [u for u in self.peers.ready_peer_details.keys()]) + if event.params['ec-rbd-metadata-pool']: + # When using erasure-coded pools the image needs to be pre-created + # as the gwcli does not currently handle the creation. + cmd = [ + 'rbd', + '--user', 'ceph-iscsi', + '--conf', str(self.CEPH_CONF), + 'create', + '--size', event.params['image-size'], + '{}/{}'.format( + event.params['ec-rbd-metadata-pool'], + event.params['image-name']), + '--data-pool', event.params['rbd-pool-name']] + logging.info(cmd) + subprocess.check_call(cmd) + target_pool = event.params['ec-rbd-metadata-pool'] + else: + target_pool = event.params['rbd-pool-name'] gw_client.create_target(target) for gw_unit, gw_config in self.peers.ready_peer_details.items(): added_gateways = [] @@ -375,7 +393,7 @@ def on_create_target_action(self, event): gw_config['fqdn']) added_gateways.append(gw_unit) gw_client.create_pool( - event.params['pool-name'], + target_pool, event.params['image-name'], event.params['image-size']) gw_client.add_client_to_target( @@ -389,7 +407,7 @@ def on_create_target_action(self, event): gw_client.add_disk_to_client( target, event.params['client-initiatorname'], - event.params['pool-name'], + target_pool, event.params['image-name']) event.set_results({'iqn': target}) diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index 3c5b3e00..a7662356 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -2,7 +2,6 @@ local_overlay_enabled: False series: focal machines: '0': - constraints: mem=3072M '1': '2': '3': @@ -16,12 +15,17 @@ machines: constraints: mem=3072M '10': constraints: mem=3072M + '11': + '12': + '13': + '14': applications: ubuntu: charm: cs:ubuntu - num_units: 1 + num_units: 2 to: - '7' + - '14' ceph-iscsi: charm: ../../ceph-iscsi.charm num_units: 2 @@ -32,7 +36,7 @@ applications: - '1' ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 + num_units: 6 storage: osd-devices: 'cinder,10G' options: @@ -41,6 +45,9 @@ applications: - '0' - '1' - '2' + - '11' + - '12' + - '13' ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index 9b2147a1..04b93a9b 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -197,7 +197,8 @@ def test_on_create_target_action(self, _getfqdn): action_event.params = { 'iqn': 'iqn.mock.iscsi-gw:iscsi-igw', 'gateway-units': 'ceph-iscsi/0 ceph-iscsi/1', - 'pool-name': 'iscsi-pool', + 'rbd-pool-name': 'iscsi-pool', + 'ec-rbd-metadata-pool': '', 'image-name': 'disk1', 'image-size': '5G', 'client-initiatorname': 'client-initiator', @@ -232,6 +233,60 @@ def test_on_create_target_action(self, _getfqdn): 'iscsi-pool', 'disk1') + @patch('socket.getfqdn') + def test_on_create_target_action_ec(self, _getfqdn): + _getfqdn.return_value = 'ceph-iscsi-0.example' + self.add_cluster_relation() + self.harness.begin() + action_event = MagicMock() + action_event.params = { + 'iqn': 'iqn.mock.iscsi-gw:iscsi-igw', + 'gateway-units': 'ceph-iscsi/0 ceph-iscsi/1', + 'rbd-pool-name': 'iscsi-pool', + 'ec-rbd-metadata-pool': 'iscsi-metapool', + 'image-name': 'disk1', + 'image-size': '5G', + 'client-initiatorname': 'client-initiator', + 'client-username': 'myusername', + 'client-password': 'mypassword'} + self.harness.charm.on_create_target_action(action_event) + self.subprocess.check_call.assert_called_once_with( + [ + 'rbd', + '--user', 'ceph-iscsi', + '--conf', '/etc/ceph/iscsi/ceph.conf', + 'create', + '--size', '5G', + 'iscsi-metapool/disk1', + '--data-pool', 'iscsi-pool']) + self.gwc.add_gateway_to_target.assert_has_calls([ + call( + 'iqn.mock.iscsi-gw:iscsi-igw', + '10.0.0.10', + 'ceph-iscsi-0.example'), + call( + 'iqn.mock.iscsi-gw:iscsi-igw', + '10.0.0.2', + 'ceph-iscsi-1.example')]) + + self.gwc.create_pool.assert_called_once_with( + 'iscsi-metapool', + 'disk1', + '5G') + self.gwc.add_client_to_target.assert_called_once_with( + 'iqn.mock.iscsi-gw:iscsi-igw', + 'client-initiator') + self.gwc.add_client_auth.assert_called_once_with( + 'iqn.mock.iscsi-gw:iscsi-igw', + 'client-initiator', + 'myusername', + 'mypassword') + self.gwc.add_disk_to_client.assert_called_once_with( + 'iqn.mock.iscsi-gw:iscsi-igw', + 'client-initiator', + 'iscsi-metapool', + 'disk1') + @patch.object(charm.secrets, 'choice') def test_on_has_peers(self, _choice): rel_id = self.harness.add_relation('cluster', 'ceph-iscsi') From 922d03d924c26c25aa726b27d07b3b57b3146348 Mon Sep 17 00:00:00 2001 From: Gabriel Adrian Samfira Date: Mon, 7 Sep 2020 13:43:52 +0000 Subject: [PATCH 2062/2699] Add object-store relation Implements the swift-proxy interface. This is needed in order for the glance (or any other) charm to be able to consume RadosGW the same way they would consume Swift. Change-Id: Ia59e1286ca25a71bcdf74be38c9dffb07c5be64f --- ceph-radosgw/hooks/hooks.py | 13 +++++++++++++ ceph-radosgw/hooks/object-store-relation-joined | 1 + ceph-radosgw/metadata.yaml | 2 ++ ceph-radosgw/unit_tests/test_hooks.py | 12 ++++++++++++ 4 files changed, 28 insertions(+) create mode 120000 ceph-radosgw/hooks/object-store-relation-joined diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index cefd170b..b6db243e 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -181,6 +181,15 @@ def install(): leader_set(namespace_tenants=config('namespace-tenants')) +@hooks.hook('object-store-relation-joined') +def object_store_joined(relation_id=None): + relation_data = { + 'swift-url': + "{}:{}".format(canonical_url(CONFIGS, INTERNAL), listen_port()) + } + relation_set(relation_id=relation_id, relation_settings=relation_data) + + @hooks.hook('upgrade-charm.real') def upgrade_charm(): if is_leader() and not leader_get('namespace_tenants') == 'True': @@ -225,6 +234,10 @@ def _config_changed(): for r_id in relation_ids('certificates'): certs_joined(r_id) + # Refire object-store relations for VIP/port changes + for r_id in relation_ids('object-store'): + object_store_joined(r_id) + process_multisite_relations() CONFIGS.write_all() diff --git a/ceph-radosgw/hooks/object-store-relation-joined b/ceph-radosgw/hooks/object-store-relation-joined new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/object-store-relation-joined @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 9d750889..3bf23825 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -43,6 +43,8 @@ provides: interface: http master: interface: radosgw-multisite + object-store: + interface: swift-proxy peers: cluster: interface: swift-ha diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 6f4634bc..d31bcfec 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -250,6 +250,18 @@ def test_gateway_relation(self): ceph_hooks.gateway_relation() self.relation_set.assert_called_with(hostname='10.0.0.1', port=80) + @patch.object(ceph_hooks, "canonical_url") + def test_object_store_relation(self, _canonical_url): + relation_data = { + "swift-url": "http://radosgw:80" + } + self.listen_port.return_value = 80 + _canonical_url.return_value = "http://radosgw" + ceph_hooks.object_store_joined() + self.relation_set.assert_called_with( + relation_id=None, + relation_settings=relation_data) + @patch.object(ceph_hooks, 'leader_get') @patch('charmhelpers.contrib.openstack.ip.service_name', lambda *args: 'ceph-radosgw') From 8c880ff231cf3bb9ae8318e2eb2fb0178684299d Mon Sep 17 00:00:00 2001 From: Ponnuvel Palaniyappan Date: Tue, 30 Jun 2020 17:27:09 +0100 Subject: [PATCH 2063/2699] Remove chrony if inside a container When running ceph-mon in containers, best practice is to have chrony/ntp configured and installed on the bare metal and then have the container trust the system clock, as the container should not manage the system clock. The chrony package get installed automatically as part of the dependencies of other packages, which gets removed in this change. Also contains related changes for charms.ceph. Change-Id: If8beb28ea5b5e6317180e52c3e32463e472276f4 Closes-Bug: #1852441 Depends-On: Ie3c9c5899c1d46edd21c32868938d3290db321e7 --- ceph-mon/hooks/ceph_hooks.py | 4 ++++ ceph-mon/lib/charms_ceph/utils.py | 28 ++++++++++++++++++++++++++-- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 3bda8f21..bcf68696 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -58,6 +58,7 @@ from charmhelpers.fetch import ( apt_install, apt_update, + apt_purge, filter_installed_packages, add_source, get_upstream_version, @@ -160,6 +161,9 @@ def install(): add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.determine_packages(), fatal=True) + rm_packages = ceph.determine_packages_to_remove() + if rm_packages: + apt_purge(packages=rm_packages, fatal=True) try: # we defer and explicitly run `ceph-create-keys` from # add_keyring_to_ceph() as part of bootstrap process diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index 72e6b921..53cff539 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -41,6 +41,7 @@ service_stop, CompareHostReleases, write_file, + is_container, ) from charmhelpers.core.hookenv import ( cached, @@ -54,8 +55,12 @@ storage_list, ) from charmhelpers.fetch import ( + add_source, apt_cache, - add_source, apt_install, apt_update + apt_install, + apt_purge, + apt_update, + filter_missing_packages ) from charmhelpers.contrib.storage.linux.ceph import ( get_mon_map, @@ -85,6 +90,9 @@ 'radosgw', 'xfsprogs', 'lvm2', 'parted', 'smartmontools'] +REMOVE_PACKAGES = [] +CHRONY_PACKAGE = 'chrony' + CEPH_KEY_MANAGER = 'ceph' VAULT_KEY_MANAGER = 'vault' KEY_MANAGERS = [ @@ -2209,8 +2217,11 @@ def upgrade_monitor(new_version, kick_function=None): else: service_stop('ceph-mon-all') apt_install(packages=determine_packages(), fatal=True) - kick_function() + rm_packages = determine_packages_to_remove() + if rm_packages: + apt_purge(packages=rm_packages, fatal=True) + kick_function() owner = ceph_user() # Ensure the files and directories under /var/lib/ceph is chowned @@ -3252,6 +3263,19 @@ def determine_packages(): return packages +def determine_packages_to_remove(): + """Determines packages for removal + + :returns: list of packages to be removed + """ + rm_packages = REMOVE_PACKAGES.copy() + if is_container(): + install_list = filter_missing_packages(CHRONY_PACKAGE) + if not install_list: + rm_packages.append(CHRONY_PACKAGE) + return rm_packages + + def bootstrap_manager(): hostname = socket.gethostname() path = '/var/lib/ceph/mgr/ceph-{}'.format(hostname) From eeb96a3db262bd5fa3d511e5a5d61b218f971ee7 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 15 Sep 2020 11:20:23 +0000 Subject: [PATCH 2064/2699] Remove todo list All actions listed in the todo have been completed except for the mod_wsgi and security checklist items. Bugs have been created for these. Any future ommisions should be tracked in launchpad *1 rather than in a local todo file. *1 https://bugs.launchpad.net/charm-ceph-iscsi Change-Id: Ic3b4a5141ca279801c8ba24ea3d80f437d5e5f75 --- ceph-iscsi/todo.txt | 25 ------------------------- 1 file changed, 25 deletions(-) delete mode 100644 ceph-iscsi/todo.txt diff --git a/ceph-iscsi/todo.txt b/ceph-iscsi/todo.txt deleted file mode 100644 index 9776b5d9..00000000 --- a/ceph-iscsi/todo.txt +++ /dev/null @@ -1,25 +0,0 @@ -Todo -* Write README -* Move to openstack-charmers -* security checklist -* zaza tests for pause/resume -* remove hardcoded password -* switch to mod_wsgi - -* Refactor ceph broker code in charm helpers -* Rewrite ceph-client interface to stop using any relation* commands via charmhelpers -* Ceph heartbeat settings https://docs.ceph.com/docs/master/rbd/iscsi-requirements/ - -Mostly Done -* Certificates interface -* trusted_ips -* zaza tests for creating nd mounting a target -* Implement pause/resume -* Add iscsi target create action -* admin password -* implement source config option -* Proper Update Status -* Fix workload status so it reports missing relations -* Write spec -* Remove hardcoded ceph pool name and expose as a config option -* Add series upgrade From 5b1bd895e2cc93981131bbf92beffec80f75bc92 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 15 Sep 2020 11:57:58 +0000 Subject: [PATCH 2065/2699] Remove charmhelper and ops framework pins Change-Id: Ia8d5516722b45f53743f324758ad91dd465bd2ea --- ceph-iscsi/requirements.txt | 4 ++-- ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py | 16 ++++++++++++++-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/ceph-iscsi/requirements.txt b/ceph-iscsi/requirements.txt index f96906d3..5e7ce8e2 100644 --- a/ceph-iscsi/requirements.txt +++ b/ceph-iscsi/requirements.txt @@ -1,6 +1,6 @@ # requirements -git+https://github.com/juju/charm-helpers.git@87fc7ee5#egg=charmhelpers -git+https://github.com/canonical/operator.git@0.8.0#egg=ops +git+https://github.com/juju/charm-helpers.git#egg=charmhelpers +git+https://github.com/canonical/operator.git#egg=ops git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack git+https://opendev.org/openstack/charm-ops-interface-tls-certificates#egg=ca_client diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index 9b2147a1..aaa168dd 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -23,7 +23,7 @@ sys.path.append('lib') # noqa sys.path.append('src') # noqa -from mock import call, patch, MagicMock, ANY +from mock import call, patch, MagicMock, ANY, Mock from ops.testing import Harness, _TestingModelBackend from ops.model import ( @@ -31,7 +31,9 @@ ) from ops import framework, model -import charm +with patch('charmhelpers.core.host_factory.ubuntu.cmp_pkgrevno', + Mock(return_value=1)): + import charm TEST_CA = '''-----BEGIN CERTIFICATE----- MIIC8TCCAdmgAwIBAgIUIchLT42Gy3QexrQbppgWb+xF2SgwDQYJKoZIhvcNAQEL @@ -278,6 +280,7 @@ def test_on_has_peers_existing_password(self): 'existing password') def test_on_ceph_client_relation_joined(self): + self.maxDiff = None rel_id = self.harness.add_relation('ceph-client', 'ceph-mon') self.harness.update_config( key_values={'rbd-metadata-pool': 'iscsi-pool'}) @@ -298,6 +301,15 @@ def test_on_ceph_client_relation_joined(self): self.assertEqual( req_pool['ops'], [{ + 'compression-algorithm': None, + 'compression-max-blob-size': None, + 'compression-max-blob-size-hdd': None, + 'compression-max-blob-size-ssd': None, + 'compression-min-blob-size': None, + 'compression-min-blob-size-hdd': None, + 'compression-min-blob-size-ssd': None, + 'compression-mode': None, + 'compression-required-ratio': None, 'app-name': None, 'group': None, 'group-namespace': None, From 67c03318736c240189453396a709ca14044d704f Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 18 Sep 2020 09:15:30 +0000 Subject: [PATCH 2066/2699] Add test using erasure coded pools Change-Id: I38ea010b71df875938636d029ac98bc979c43155 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/422 --- .../tests/bundles/focal-ussuri-ec.yaml | 125 ++++++++++++++++++ ceph-radosgw/tests/tests.yaml | 2 + 2 files changed, 127 insertions(+) create mode 100644 ceph-radosgw/tests/bundles/focal-ussuri-ec.yaml diff --git a/ceph-radosgw/tests/bundles/focal-ussuri-ec.yaml b/ceph-radosgw/tests/bundles/focal-ussuri-ec.yaml new file mode 100644 index 00000000..740efaed --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-ussuri-ec.yaml @@ -0,0 +1,125 @@ +options: + source: &source distro + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + - '12' + - '13' + - '14' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index acb664c4..703a7fd8 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -2,6 +2,7 @@ charm_name: ceph-radosgw gate_bundles: - vault: focal-victoria - vault: focal-victoria-namespaced + - vault: focal-ussuri-ec - vault: focal-ussuri - vault: focal-ussuri-namespaced - vault: bionic-ussuri @@ -39,6 +40,7 @@ tests: - vault: - zaza.openstack.charm_tests.ceph.tests.CephRGWTest - zaza.openstack.charm_tests.swift.tests.S3APITest + - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes tests_options: force_deploy: - groovy-victoria From fd3a4f55f1c8b8bd4475a032f5179a773e17fbb3 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 18 Sep 2020 10:58:39 +0000 Subject: [PATCH 2067/2699] Create default rbd pool Create a default replicated or erasure coded pool for iscsi targets. Omitting the pool name when running the create target action will result in the target being backed by the default pool. Change-Id: I1c27fbbe281763ba5bdb369df92ca82b87f70891 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/415 --- ceph-iscsi/actions.yaml | 1 - ceph-iscsi/config.yaml | 130 +++++++++++++++++- ceph-iscsi/src/charm.py | 119 +++++++++++++++- ceph-iscsi/templates/iscsi-gateway.cfg | 2 +- ceph-iscsi/tests/bundles/focal-ec.yaml | 89 ++++++++++++ ceph-iscsi/tests/bundles/focal.yaml | 6 +- ceph-iscsi/tests/tests.yaml | 1 + .../unit_tests/test_ceph_iscsi_charm.py | 48 +++++-- 8 files changed, 370 insertions(+), 26 deletions(-) create mode 100644 ceph-iscsi/tests/bundles/focal-ec.yaml diff --git a/ceph-iscsi/actions.yaml b/ceph-iscsi/actions.yaml index 47a14749..a474f3c3 100644 --- a/ceph-iscsi/actions.yaml +++ b/ceph-iscsi/actions.yaml @@ -58,7 +58,6 @@ create-target: type: string description: "The CHAPs password to be created for the client" required: - - rbd-pool-name - image-size - image-name - client-initiatorname diff --git a/ceph-iscsi/config.yaml b/ceph-iscsi/config.yaml index e2416e77..4edf2aad 100644 --- a/ceph-iscsi/config.yaml +++ b/ceph-iscsi/config.yaml @@ -35,7 +35,7 @@ options: 192.168.0.0/24). If multiple networks are to be used, a space-delimited list of a.b.c.d/x can be provided. - rbd-metadata-pool: + gateway-metadata-pool: type: string default: iscsi description: | @@ -52,3 +52,131 @@ options: order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on your network interface. + ceph-osd-replication-count: + type: int + default: 3 + description: | + This value dictates the number of replicas ceph must make of any + object it stores within the images rbd pool. Of course, this only + applies if using Ceph as a backend store. Note that once the images + rbd pool has been created, changing this value will not have any + effect (although it can be changed in ceph by manually configuring + your ceph cluster). + ceph-pool-weight: + type: int + default: 5 + description: | + Defines a relative weighting of the pool as a percentage of the total + amount of data in the Ceph cluster. This effectively weights the number + of placement groups for the pool created to be appropriately portioned + to the amount of data expected. For example, if the compute images + for the OpenStack compute instances are expected to take up 20% of the + overall configuration then this value would be specified as 20. Note - + it is important to choose an appropriate value for the pool weight as + this directly affects the number of placement groups which will be + created for the pool. The number of placement groups for a pool can + only be increased, never decreased - so it is important to identify the + percent of data that will likely reside in the pool. + rbd-pool-name: + default: + type: string + description: | + Optionally specify an existing pool that gateway should map to. + pool-type: + type: string + default: replicated + description: | + Ceph pool type to use for storage - valid values include ‘replicated’ + and ‘erasure-coded’. + ec-profile-name: + type: string + default: + description: | + Name for the EC profile to be created for the EC pools. If not defined + a profile name will be generated based on the name of the pool used by + the application. + ec-rbd-metadata-pool: + type: string + default: + description: | + Name of the metadata pool to be created (for RBD use-cases). If not + defined a metadata pool name will be generated based on the name of + the data pool used by the application. The metadata pool is always + replicated, not erasure coded. + ec-profile-k: + type: int + default: 1 + description: | + Number of data chunks that will be used for EC data pool. K+M factors + should never be greater than the number of available zones (or hosts) + for balancing. + ec-profile-m: + type: int + default: 2 + description: | + Number of coding chunks that will be used for EC data pool. K+M factors + should never be greater than the number of available zones (or hosts) + for balancing. + ec-profile-locality: + type: int + default: + description: | + (lrc plugin - l) Group the coding and data chunks into sets of size l. + For instance, for k=4 and m=2, when l=3 two groups of three are created. + Each set can be recovered without reading chunks from another set. Note + that using the lrc plugin does incur more raw storage usage than isa or + jerasure in order to reduce the cost of recovery operations. + ec-profile-crush-locality: + type: string + default: + description: | + (lrc plugin) The type of the crush bucket in which each set of chunks + defined by l will be stored. For instance, if it is set to rack, each + group of l chunks will be placed in a different rack. It is used to + create a CRUSH rule step such as step choose rack. If it is not set, + no such grouping is done. + ec-profile-durability-estimator: + type: int + default: + description: | + (shec plugin - c) The number of parity chunks each of which includes + each data chunk in its calculation range. The number is used as a + durability estimator. For instance, if c=2, 2 OSDs can be down + without losing data. + ec-profile-helper-chunks: + type: int + default: + description: | + (clay plugin - d) Number of OSDs requested to send data during + recovery of a single chunk. d needs to be chosen such that + k+1 <= d <= k+m-1. Larger the d, the better the savings. + ec-profile-scalar-mds: + type: string + default: + description: | + (clay plugin) specifies the plugin that is used as a building + block in the layered construction. It can be one of jerasure, + isa, shec (defaults to jerasure). + ec-profile-plugin: + type: string + default: jerasure + description: | + EC plugin to use for this applications pool. The following list of + plugins acceptable - jerasure, lrc, isa, shec, clay. + ec-profile-technique: + type: string + default: + description: | + EC profile technique used for this applications pool - will be + validated based on the plugin configured via ec-profile-plugin. + Supported techniques are ‘reed_sol_van’, ‘reed_sol_r6_op’, + ‘cauchy_orig’, ‘cauchy_good’, ‘liber8tion’ for jerasure, + ‘reed_sol_van’, ‘cauchy’ for isa and ‘single’, ‘multiple’ + for shec. + ec-profile-device-class: + type: string + default: + description: | + Device class from CRUSH map to use for placement groups for + erasure profile - valid values: ssd, hdd or nvme (or leave + unset to not use a device class). diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index ce89a0a4..d9a60231 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -237,11 +237,100 @@ def on_has_peers(self, event): password = ''.join(secrets.choice(alphabet) for i in range(8)) self.peers.set_admin_password(password) + def config_get(self, key): + """Retrieve config option. + + :returns: Value of the corresponding config option or None. + :rtype: Any + """ + return self.model.config.get(key) + + @property + def data_pool_name(self): + """The name of the default rbd data pool to be used by targets. + + :returns: Data pool name. + :rtype: str + """ + if self.config_get('rbd-pool-name'): + pool_name = self.config_get('rbd-pool-name') + else: + pool_name = self.app.name + return pool_name + + @property + def metadata_pool_name(self): + """The name of the default rbd metadata pool to be used by targets. + + :returns: Metadata pool name. + :rtype: str + """ + return (self.config_get('ec-rbd-metadata-pool') or + "{}-metadata".format(self.app.name)) + def request_ceph_pool(self, event): """Request pools from Ceph cluster.""" logging.info("Requesting replicated pool") self.ceph_client.create_replicated_pool( - self.model.config['rbd-metadata-pool']) + self.config_get('gateway-metadata-pool')) + weight = self.config_get('ceph-pool-weight') + replicas = self.config_get('ceph-osd-replication-count') + if self.config_get('pool-type') == 'erasure-coded': + # General EC plugin config + plugin = self.config_get('ec-profile-plugin') + technique = self.config_get('ec-profile-technique') + device_class = self.config_get('ec-profile-device-class') + bdm_k = self.config_get('ec-profile-k') + bdm_m = self.config_get('ec-profile-m') + # LRC plugin config + bdm_l = self.config_get('ec-profile-locality') + crush_locality = self.config_get('ec-profile-crush-locality') + # SHEC plugin config + bdm_c = self.config_get('ec-profile-durability-estimator') + # CLAY plugin config + bdm_d = self.config_get('ec-profile-helper-chunks') + scalar_mds = self.config_get('ec-profile-scalar-mds') + # Profile name + profile_name = ( + self.config_get('ec-profile-name') or + "{}-profile".format(self.app.name) + ) + # Metadata sizing is approximately 1% of overall data weight + # but is in effect driven by the number of rbd's rather than + # their size - so it can be very lightweight. + metadata_weight = weight * 0.01 + # Resize data pool weight to accomodate metadata weight + weight = weight - metadata_weight + # Create erasure profile + self.ceph_client.create_erasure_profile( + name=profile_name, + k=bdm_k, m=bdm_m, + lrc_locality=bdm_l, + lrc_crush_locality=crush_locality, + shec_durability_estimator=bdm_c, + clay_helper_chunks=bdm_d, + clay_scalar_mds=scalar_mds, + device_class=device_class, + erasure_type=plugin, + erasure_technique=technique + ) + + # Create EC data pool + self.ceph_client.create_erasure_pool( + name=self.data_pool_name, + erasure_profile=profile_name, + weight=weight, + allow_ec_overwrites=True + ) + self.ceph_client.create_replicated_pool( + name=self.metadata_pool_name, + weight=metadata_weight + ) + else: + self.ceph_client.create_replicated_pool( + name=self.data_pool_name, + replicas=replicas, + weight=weight) logging.info("Requesting permissions") self.ceph_client.request_ceph_permissions( 'ceph-iscsi', @@ -358,6 +447,22 @@ def on_add_trusted_ip_action(self, event): else: event.fail("Action must be run on leader") + def calculate_target_pools(self, event): + if event.params['ec-rbd-metadata-pool']: + ec_rbd_metadata_pool = event.params['ec-rbd-metadata-pool'] + rbd_pool_name = event.params['rbd-pool-name'] + elif event.params['rbd-pool-name']: + ec_rbd_metadata_pool = None + rbd_pool_name = event.params['rbd-pool-name'] + # Action did not specify pools to derive them from charm config. + elif self.model.config['pool-type'] == 'erasure-coded': + ec_rbd_metadata_pool = self.metadata_pool_name + rbd_pool_name = self.data_pool_name + else: + ec_rbd_metadata_pool = None + rbd_pool_name = self.data_pool_name + return rbd_pool_name, ec_rbd_metadata_pool + def on_create_target_action(self, event): """Create an iSCSI target.""" gw_client = gwcli_client.GatewayClient() @@ -365,7 +470,9 @@ def on_create_target_action(self, event): gateway_units = event.params.get( 'gateway-units', [u for u in self.peers.ready_peer_details.keys()]) - if event.params['ec-rbd-metadata-pool']: + rbd_pool_name, ec_rbd_metadata_pool = self.calculate_target_pools( + event) + if ec_rbd_metadata_pool: # When using erasure-coded pools the image needs to be pre-created # as the gwcli does not currently handle the creation. cmd = [ @@ -375,14 +482,14 @@ def on_create_target_action(self, event): 'create', '--size', event.params['image-size'], '{}/{}'.format( - event.params['ec-rbd-metadata-pool'], + ec_rbd_metadata_pool, event.params['image-name']), - '--data-pool', event.params['rbd-pool-name']] + '--data-pool', rbd_pool_name] logging.info(cmd) subprocess.check_call(cmd) - target_pool = event.params['ec-rbd-metadata-pool'] + target_pool = ec_rbd_metadata_pool else: - target_pool = event.params['rbd-pool-name'] + target_pool = rbd_pool_name gw_client.create_target(target) for gw_unit, gw_config in self.peers.ready_peer_details.items(): added_gateways = [] diff --git a/ceph-iscsi/templates/iscsi-gateway.cfg b/ceph-iscsi/templates/iscsi-gateway.cfg index c9f2bba7..89005292 100644 --- a/ceph-iscsi/templates/iscsi-gateway.cfg +++ b/ceph-iscsi/templates/iscsi-gateway.cfg @@ -2,7 +2,7 @@ logger_level = DEBUG cluster_name = ceph cluster_client_name = client.ceph-iscsi -pool = {{ options.rbd_metadata_pool }} +pool = {{ options.gateway_metadata_pool }} gateway_keyring = ceph.client.ceph-iscsi.keyring ceph_config_dir = /etc/ceph/iscsi diff --git a/ceph-iscsi/tests/bundles/focal-ec.yaml b/ceph-iscsi/tests/bundles/focal-ec.yaml new file mode 100644 index 00000000..a5627293 --- /dev/null +++ b/ceph-iscsi/tests/bundles/focal-ec.yaml @@ -0,0 +1,89 @@ +local_overlay_enabled: False +series: focal +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + constraints: mem=3072M + '9': + constraints: mem=3072M + '10': + constraints: mem=3072M + '11': + '12': + '13': + '14': + '15': +applications: + ubuntu: + charm: cs:ubuntu + num_units: 3 + to: + - '7' + - '14' + - '15' + ceph-iscsi: + charm: ../../ceph-iscsi.charm + num_units: 2 + options: + gateway-metadata-pool: tmbtil + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '0' + - '1' + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + to: + - '0' + - '1' + - '2' + - '11' + - '12' + - '13' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + to: + - '3' + - '4' + - '5' + vault: + num_units: 1 + charm: cs:~openstack-charmers-next/vault + to: + - '6' + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + to: + - '8' + - '9' + - '10' + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router +relations: + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index a7662356..61057dd8 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -19,18 +19,20 @@ machines: '12': '13': '14': + '15': applications: ubuntu: charm: cs:ubuntu - num_units: 2 + num_units: 3 to: - '7' - '14' + - '15' ceph-iscsi: charm: ../../ceph-iscsi.charm num_units: 2 options: - rbd-metadata-pool: tmbtil + gateway-metadata-pool: tmbtil to: - '0' - '1' diff --git a/ceph-iscsi/tests/tests.yaml b/ceph-iscsi/tests/tests.yaml index 8f5990e2..9f8e4bdf 100644 --- a/ceph-iscsi/tests/tests.yaml +++ b/ceph-iscsi/tests/tests.yaml @@ -1,5 +1,6 @@ charm_name: ceph-iscsi gate_bundles: + - focal-ec - focal smoke_bundles: - focal diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index 0b78d67d..4adf580c 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -338,7 +338,7 @@ def test_on_ceph_client_relation_joined(self): self.maxDiff = None rel_id = self.harness.add_relation('ceph-client', 'ceph-mon') self.harness.update_config( - key_values={'rbd-metadata-pool': 'iscsi-pool'}) + key_values={'gateway-metadata-pool': 'iscsi-pool'}) self.harness.begin() self.harness.add_relation_unit( rel_id, @@ -366,25 +366,43 @@ def test_on_ceph_client_relation_joined(self): 'compression-mode': None, 'compression-required-ratio': None, 'app-name': None, + 'op': 'create-pool', + 'name': 'iscsi-pool', + 'replicas': 3, + 'pg_num': None, + 'weight': None, 'group': None, 'group-namespace': None, + 'app-name': None, 'max-bytes': None, - 'max-objects': None, - 'name': 'iscsi-pool', + 'max-objects': None}, + { + 'compression-algorithm': None, + 'compression-max-blob-size': None, + 'compression-max-blob-size-hdd': None, + 'compression-max-blob-size-ssd': None, + 'compression-min-blob-size': None, + 'compression-min-blob-size-hdd': None, + 'compression-min-blob-size-ssd': None, + 'compression-mode': None, + 'compression-required-ratio': None, 'op': 'create-pool', + 'name': 'ceph-iscsi', + 'replicas': None, 'pg_num': None, - 'replicas': 3, - 'weight': None}, - { - 'client': 'ceph-iscsi', - 'op': 'set-key-permissions', - 'permissions': [ - 'osd', - 'allow *', - 'mon', - 'allow *', - 'mgr', - 'allow r']}]) + 'weight': None, + 'group': None, + 'group-namespace': None, + 'app-name': None, + 'max-bytes': None, + 'max-objects': None}, + { + 'op': 'set-key-permissions', + 'permissions': [ + 'osd', 'allow *', + 'mon', 'allow *', + 'mgr', 'allow r'], + 'client': 'ceph-iscsi'}]) def test_on_pools_available(self): self.os.path.exists.return_value = False From c20abf677766dd709f7f74966ccb585c621e1f2f Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Mon, 21 Sep 2020 18:40:18 -0400 Subject: [PATCH 2068/2699] Update README Remove content that was placed in the CDG and add a link: https://review.opendev.org/#/c/749089/ Unleash Bugs section. Minor touchups. Leave preview status admonishment a while longer (charm will be promoted to stable in 20.10). Change-Id: I191bfd1f3cdcef7acfe09f93c5474aafce49bf8c --- ceph-iscsi/README.md | 64 +++++--------------------------------------- 1 file changed, 7 insertions(+), 57 deletions(-) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index 70f79d63..b671695a 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -31,7 +31,9 @@ Then add a relation to the ceph-mon application: * Deploying four ceph-iscsi units is theoretical possible but it is not an officially supported configuration. + * The ceph-iscsi application cannot be containerised. + * Co-locating ceph-iscsi with another application is only supported with ceph-osd, although doing so with other applications may still work. @@ -85,59 +87,9 @@ and is available from any ceph-iscsi unit: ## VMWare integration -Ceph can be used to back iSCSI targets for VMWare initiators. - -Begin by accessing the VMWare admin web UI. - -These instructions were written using VMWare ESXi 6.7.0. - -### Create a Ceph pool - -If desired, create a Ceph pool to back the VMWare targets with the ceph-mon -charm's `create-pool` action: - - juju run-action --wait ceph-mon/0 create-pool name=vmware-iscsi - -### Enable the initiator - -From the web UI select the `Adapters` tab in the `Storage` context. Click -`Configure iSCSI` and enable iSCSI. - -Take a note of the initiator name, or UID. Here the UID we'll use is -`iqn.1998-01.com.vmware:node-gadomski-6a5e962a`. - -### Create an iSCSI target - -With the `create-target` action create a target for VMWare to use. Use the pool -that may have been created previously: - - juju run-action --wait ceph-iscsi/0 create-target \ - client-initiatorname=iqn.1998-01.com.vmware:node-gadomski-6a5e962a \ - client-username=vmwareclient \ - client-password=12to16characters \ - image-size=5G \ - image-name=disk-1 \ - pool-name=vmware-iscsi - -> **Note**: VMWare imposes a policy on credentials. The username should be more - than eight characters and the password between twelve and sixteen characters. - -### Add a target to VMWare - -Follow the [Ceph iSCSI gateway for VMWare][ceph-iscsi-vmware-upstream] -documentation to use the new target. Use the (CHAP) username and password -passed to the `create-target` action. - -When finished, under the `Devices` tab you should see the created target. To -make more devices available to VMWare simply create more targets (use a -different image name and optionally a different image size). You will need to -`Rescan` and `Refresh` for the new devices to appear. - -> **Note**: At the time of writing, the redundant task of setting the - credentials via the ESX CLI is still a necessity. This will require you to - enable SSH under `Manage` > `Services` > `TSM-SSH` > `Actions` (Start). - - - [ceph-mon-charm]: https://jaas.ai/ceph-mon [ceph-osd-charm]: https://jaas.ai/ceph-osd [cg]: https://docs.openstack.org/charm-guide -[cg-preview-charms]: https://docs.openstack.org/charm-guide/latest/openstack-charms.html#tech-preview-charms-beta [cdg]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide +[cg-preview-charms]: https://docs.openstack.org/charm-guide/latest/openstack-charms.html#tech-preview-charms-beta +[cdg-ceph-iscsi]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-ceph-iscsi.html [juju-docs-actions]: https://jaas.ai/docs/actions [ceph-iscsi-upstream]: https://docs.ceph.com/docs/master/rbd/iscsi-overview/ -[ceph-iscsi-vmware-upstream]: https://docs.ceph.com/docs/master/rbd/iscsi-initiator-esx/ [lp-bugs-charm-ceph-iscsi]: https://bugs.launchpad.net/charm-ceph-iscsi/+filebug From 91cde52d4325c6eee40850861247a6fbecf4a290 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 10 Sep 2020 14:49:10 +0000 Subject: [PATCH 2069/2699] Align EC usage with best practice When using erasure coding three pools are needed. One metadata pool which is replicated, one default data pool which is replicated and one erasure coded pool. Add an additional ec-pool-weight option to allow the erasure coded pool usage to be modelled within the deployment. The consumer of the share will need to use xattrs to ensure that the hierarchy of directories and files are stored in EC pool while leaving the default replicated data pool for file backtraces. Depends-On: Ib19e545fc676fa9e986f3fddcfb92cf99e778a3f Depends-On: I48348ab2b6c8952c5e22008a074a60c1f35be952 Change-Id: I1af386d5e1d031f2f0387c7381af683f948172d9 Closes-Bug: #1895154 --- ceph-fs/src/config.yaml | 19 +- ceph-fs/src/reactive/ceph_fs.py | 16 +- .../src/tests/bundles/focal-ussuri-ec.yaml | 222 ++++++++++++++++++ ceph-fs/src/tests/tests.yaml | 1 + 4 files changed, 254 insertions(+), 4 deletions(-) create mode 100644 ceph-fs/src/tests/bundles/focal-ussuri-ec.yaml diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index f6f48d9c..f8488c4a 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -90,7 +90,24 @@ options: default: replicated description: | Ceph pool type to use for storage - valid values include ‘replicated’ - and ‘erasure-coded’. + and ‘erasure-coded’. Note that if erasure-coded is enabled then data + pools for both replicated and erasure-coded are created - use of the + erasure-coded pool is controlled using xattrs on directories and files. + ec-pool-weight: + type: int + default: 5 + description: | + Defines a relative weighting of the EC pool as a percentage of the total + amount of data in the Ceph cluster. This effectively weights the number + of placement groups for the pool created to be appropriately portioned + to the amount of data expected. For example, if the compute images + for the OpenStack compute instances are expected to take up 20% of the + overall configuration then this value would be specified as 20. Note - + it is important to choose an appropriate value for the pool weight as + this directly affects the number of placement groups which will be + created for the pool. The number of placement groups for a pool can + only be increased, never decreased - so it is important to identify the + percent of data that will likely reside in the pool. ec-profile-name: type: string default: diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index 03894ce8..fcda7053 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -76,6 +76,7 @@ def storage_ceph_connected(ceph): metadata_weight = weight * 0.20 # Resize data pool weight to accomodate metadata weight weight = weight - metadata_weight + extra_pools = [] if config('pool-type') == 'erasure-coded': # General EC plugin config @@ -92,6 +93,8 @@ def storage_ceph_connected(ceph): # CLAY plugin config bdm_d = config('ec-profile-helper-chunks') scalar_mds = config('ec-profile-scalar-mds') + # Weight for EC pool + ec_pool_weight = config('ec-pool-weight') # Profile name profile_name = ( config('ec-profile-name') or "{}-profile".format(service) @@ -111,18 +114,25 @@ def storage_ceph_connected(ceph): ) # Create EC data pool + ec_pool_name = 'ec_{}'.format(pool_name) ceph_mds.create_erasure_pool( - name=pool_name, + name=ec_pool_name, erasure_profile=profile_name, - weight=weight, + weight=ec_pool_weight, app_name=ceph_mds.ceph_pool_app_name, allow_ec_overwrites=True ) + ceph_mds.create_replicated_pool( + name=pool_name, + weight=weight, + app_name=ceph_mds.ceph_pool_app_name + ) ceph_mds.create_replicated_pool( name=metadata_pool_name, weight=metadata_weight, app_name=ceph_mds.ceph_pool_app_name ) + extra_pools = [ec_pool_name] else: ceph_mds.create_replicated_pool( name=pool_name, @@ -134,5 +144,5 @@ def storage_ceph_connected(ceph): replicas=replicas, weight=metadata_weight, app_name=ceph_mds.ceph_pool_app_name) - ceph_mds.request_cephfs(service) + ceph_mds.request_cephfs(service, extra_pools=extra_pools) reactive.set_state('ceph.create_pool.req.sent') diff --git a/ceph-fs/src/tests/bundles/focal-ussuri-ec.yaml b/ceph-fs/src/tests/bundles/focal-ussuri-ec.yaml new file mode 100644 index 00000000..39d9fed9 --- /dev/null +++ b/ceph-fs/src/tests/bundles/focal-ussuri-ec.yaml @@ -0,0 +1,222 @@ +variables: + openstack-origin: &openstack-origin distro + +series: &series focal + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + '3': + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + neutron-api-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-fs: + charm: ceph-fs + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + network-manager: Neutron + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: *openstack-origin + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + manage-neutron-plugin-legacy-mode: true + neutron-plugin: ovs + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: *openstack-origin + + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex + openstack-origin: *openstack-origin + +relations: + + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'neutron-api:shared-db' + - 'neutron-api-mysql-router:shared-db' + - - 'neutron-api-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' + + - - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' + + - - 'neutron-api:identity-service' + - 'keystone:identity-service' + + - - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' + + - - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index 51ada0fd..dd589508 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -1,5 +1,6 @@ charm_name: ceph-fs gate_bundles: + - focal-ussuri-ec - focal-victoria - focal-ussuri - bionic-ussuri From 941653661cc76a0d0a025b5c3833b2f158189180 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 31 Aug 2020 08:52:29 +0200 Subject: [PATCH 2070/2699] Add Ceph BlueStore Compression support Sync c-h and charms.ceph. Unpin flake8 Func-Test-Pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/398 Change-Id: Ib068f39d43bade2502bc2d3cbbfb86324a03407a --- ceph-radosgw/config.yaml | 66 ++++ ceph-radosgw/hooks/ceph_rgw.py | 39 ++- .../charmhelpers/contrib/openstack/context.py | 12 + .../contrib/storage/linux/ceph.py | 14 +- ceph-radosgw/hooks/hooks.py | 16 +- ceph-radosgw/hooks/utils.py | 12 +- ceph-radosgw/lib/charms_ceph/broker.py | 81 ++--- ceph-radosgw/test-requirements.txt | 2 +- ceph-radosgw/tests/tests.yaml | 1 + ceph-radosgw/tox.ini | 2 +- ceph-radosgw/unit_tests/test_ceph.py | 296 +++++++++++++----- .../unit_tests/test_ceph_radosgw_utils.py | 2 +- 12 files changed, 396 insertions(+), 147 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 3dceefc1..bb7d4ee3 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -433,3 +433,69 @@ options: This configuration option will not be enabled on a charm upgrade, and cannot be toggled on in an existing installation as it will remove tenant access to existing buckets. + bluestore-compression-algorithm: + type: string + default: + description: | + Compressor to use (if any) for pools requested by this charm. + . + NOTE: The ceph-osd charm sets a global default for this value (defaults + to 'lz4' unless configured by the end user) which will be used unless + specified for individual pools. + bluestore-compression-mode: + type: string + default: + description: | + Policy for using compression on pools requested by this charm. + . + 'none' means never use compression. + 'passive' means use compression when clients hint that data is + compressible. + 'aggressive' means use compression unless clients hint that + data is not compressible. + 'force' means use compression under all circumstances even if the clients + hint that the data is not compressible. + bluestore-compression-required-ratio: + type: float + default: + description: | + The ratio of the size of the data chunk after compression relative to the + original size must be at least this small in order to store the + compressed version on pools requested by this charm. + bluestore-compression-min-blob-size: + type: int + default: + description: | + Chunks smaller than this are never compressed on pools requested by + this charm. + bluestore-compression-min-blob-size-hdd: + type: int + default: + description: | + Value of bluestore compression min blob size for rotational media on + pools requested by this charm. + bluestore-compression-min-blob-size-ssd: + type: int + default: + description: | + Value of bluestore compression min blob size for solid state media on + pools requested by this charm. + bluestore-compression-max-blob-size: + type: int + default: + description: | + Chunks larger than this are broken into smaller blobs sizing bluestore + compression max blob size before being compressed on pools requested by + this charm. + bluestore-compression-max-blob-size-hdd: + type: int + default: + description: | + Value of bluestore compression max blob size for rotational media on + pools requested by this charm. + bluestore-compression-max-blob-size-ssd: + type: int + default: + description: | + Value of bluestore compression max blob size for solid state media on + pools requested by this charm. diff --git a/ceph-radosgw/hooks/ceph_rgw.py b/ceph-radosgw/hooks/ceph_rgw.py index 3aced5bd..26177f46 100644 --- a/ceph-radosgw/hooks/ceph_rgw.py +++ b/ceph-radosgw/hooks/ceph_rgw.py @@ -16,6 +16,8 @@ import os import subprocess +import charmhelpers.contrib.openstack.context as ch_context + from charmhelpers.core.hookenv import ( config, service_name, @@ -118,6 +120,7 @@ def _add_light_pool(rq, pool, pg_num, prefix=None): '.rgw.buckets.data' ] bucket_weight = config('rgw-buckets-pool-weight') + bluestore_compression = ch_context.CephBlueStoreCompressionContext() if config('pool-type') == 'erasure-coded': # General EC plugin config @@ -154,19 +157,35 @@ def _add_light_pool(rq, pool, pg_num, prefix=None): for pool in heavy: pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) - rq.add_op_create_erasure_pool( - name=pool, - erasure_profile=profile_name, - weight=bucket_weight, - group="objects", - app_name=CEPH_POOL_APP_NAME - ) + # NOTE(fnordahl): once we deprecate Python 3.5 support we can do + # the unpacking of the BlueStore compression arguments as part of + # the function arguments. Until then we need to build the dict + # prior to the function call. + kwargs = { + 'name': pool, + 'erasure_profile': profile_name, + 'weight': bucket_weight, + 'group': "objects", + 'app_name': CEPH_POOL_APP_NAME, + } + kwargs.update(bluestore_compression.get_kwargs()) + rq.add_op_create_erasure_pool(**kwargs) else: for pool in heavy: pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) - rq.add_op_create_pool(name=pool, replica_count=replicas, - weight=bucket_weight, group='objects', - app_name=CEPH_POOL_APP_NAME) + # NOTE(fnordahl): once we deprecate Python 3.5 support we can do + # the unpacking of the BlueStore compression arguments as part of + # the function arguments. Until then we need to build the dict + # prior to the function call. + kwargs = { + 'name': pool, + 'replica_count': replicas, + 'weight': bucket_weight, + 'group': 'objects', + 'app_name': CEPH_POOL_APP_NAME, + } + kwargs.update(bluestore_compression.get_kwargs()) + rq.add_op_create_replicated_pool(**kwargs) # NOTE: we want these pools to have a smaller pg_num/pgp_num than the # others since they are not expected to contain as much data diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 0e41a9f3..54aed7ff 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -3245,6 +3245,18 @@ def get_op(self): """ return self.op + def get_kwargs(self): + """Get values for use as keyword arguments. + + :returns: Context values with key suitable for use as kwargs to + CephBrokerRq add_op_create_*_pool methods. + :rtype: Dict[str,any] + """ + return { + k.replace('-', '_'): v + for k, v in self.op.items() + } + def validate(self): """Validate options. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index d9d43578..526b95ad 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -705,12 +705,12 @@ def __init__(self, service, name=None, erasure_code_profile=None, # from different handling of this in the `charms.ceph` library. self.erasure_code_profile = op.get('erasure-profile', 'default-canonical') + self.allow_ec_overwrites = op.get('allow-ec-overwrites') else: # We keep the class default when initialized from keyword arguments # to not break the API for any other consumers. self.erasure_code_profile = erasure_code_profile or 'default' - - self.allow_ec_overwrites = allow_ec_overwrites + self.allow_ec_overwrites = allow_ec_overwrites def _create(self): # Try to find the erasure profile information in order to properly @@ -1972,12 +1972,14 @@ def request(self): 'request-id': self.request_id}) def _ops_equal(self, other): + keys_to_compare = [ + 'replicas', 'name', 'op', 'pg_num', 'group-permission', + 'object-prefix-permissions', + ] + keys_to_compare += list(self._partial_build_common_op_create().keys()) if len(self.ops) == len(other.ops): for req_no in range(0, len(self.ops)): - for key in [ - 'replicas', 'name', 'op', 'pg_num', 'weight', - 'group', 'group-namespace', 'group-permission', - 'object-prefix-permissions']: + for key in keys_to_compare: if self.ops[req_no].get(key) != other.ops[req_no].get(key): return False else: diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index b6db243e..2e466142 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -258,9 +258,19 @@ def _mon_relation(): if request_per_unit_key(): relation_set(relation_id=rid, key_name=key_name) - # NOTE: prefer zone name if in use over pool-prefix. - rq = ceph.get_create_rgw_pools_rq( - prefix=config('zone') or config('pool-prefix')) + try: + # NOTE: prefer zone name if in use over pool-prefix. + rq = ceph.get_create_rgw_pools_rq( + prefix=config('zone') or config('pool-prefix')) + except ValueError as e: + # The end user has most likely provided a invalid value for + # a configuration option. Just log the traceback here, the + # end user will be notified by assess_status() called at + # the end of the hook execution. + log('Caught ValueError, invalid value provided for ' + 'configuration?: "{}"'.format(str(e)), + level=DEBUG) + return if is_request_complete(rq, relation='mon'): log('Broker request complete', level=DEBUG) CONFIGS.write_all() diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 17a76f73..c6a6c9b1 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -181,7 +181,7 @@ def get_optional_interfaces(): return optional_interfaces -def check_optional_relations(configs): +def check_optional_config_and_relations(configs): """Check that if we have a relation_id for high availability that we can get the hacluster config. If we can't then we are blocked. This function is called from assess_status/set_os_workload_status as the charm_func and @@ -233,6 +233,14 @@ def check_optional_relations(configs): not all(master_configured)): return ('waiting', 'waiting for configuration of master zone') + + # Check that provided Ceph BlueStoe configuration is valid. + try: + bluestore_compression = context.CephBlueStoreCompressionContext() + bluestore_compression.validate() + except ValueError as e: + return ('blocked', 'Invalid configuration: {}'.format(str(e))) + # return 'unknown' as the lowest priority to not clobber an existing # status. return 'unknown', '' @@ -291,7 +299,7 @@ def assess_status_func(configs): required_interfaces.update(get_optional_interfaces()) return make_assess_status_func( configs, required_interfaces, - charm_func=check_optional_relations, + charm_func=check_optional_config_and_relations, services=services(), ports=None) diff --git a/ceph-radosgw/lib/charms_ceph/broker.py b/ceph-radosgw/lib/charms_ceph/broker.py index d5c83891..8f040a5e 100644 --- a/ceph-radosgw/lib/charms_ceph/broker.py +++ b/ceph-radosgw/lib/charms_ceph/broker.py @@ -16,6 +16,7 @@ import json import os +from subprocess import check_call, check_output, CalledProcessError from tempfile import NamedTemporaryFile from charms_ceph.utils import ( @@ -41,18 +42,16 @@ pool_set, remove_pool_snapshot, rename_pool, - set_pool_quota, snapshot_pool, validator, ErasurePool, - Pool, + BasePool, ReplicatedPool, ) # This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ # This should do a decent job of preventing people from passing in bad values. # It will give a useful error message -from subprocess import check_call, check_output, CalledProcessError POOL_KEYS = { # "Ceph Key Name": [Python type, [Valid Range]] @@ -405,23 +404,11 @@ def handle_erasure_pool(request, service): """ pool_name = request.get('name') erasure_profile = request.get('erasure-profile') - max_bytes = request.get('max-bytes') - max_objects = request.get('max-objects') - weight = request.get('weight') group_name = request.get('group') - allow_ec_overwrites = request.get('allow-ec-overwrites') if erasure_profile is None: erasure_profile = "default-canonical" - app_name = request.get('app-name') - - # Check for missing params - if pool_name is None: - msg = "Missing parameter. name is required for the pool" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - if group_name: group_namespace = request.get('group-namespace') # Add the pool to the group named "group_name" @@ -437,21 +424,22 @@ def handle_erasure_pool(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - pool = ErasurePool(service=service, name=pool_name, - erasure_code_profile=erasure_profile, - percent_data=weight, - app_name=app_name, - allow_ec_overwrites=allow_ec_overwrites) + try: + pool = ErasurePool(service=service, + op=request) + except KeyError: + msg = "Missing parameter." + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + # Ok make the erasure pool if not pool_exists(service=service, name=pool_name): log("Creating pool '{}' (erasure_profile={})" .format(pool.name, erasure_profile), level=INFO) pool.create() - # Set a quota if requested - if max_bytes or max_objects: - set_pool_quota(service=service, pool_name=pool_name, - max_bytes=max_bytes, max_objects=max_objects) + # Set/update properties that are allowed to change after pool creation. + pool.update() def handle_replicated_pool(request, service): @@ -462,26 +450,19 @@ def handle_replicated_pool(request, service): :returns: dict. exit-code and reason if not 0. """ pool_name = request.get('name') - replicas = request.get('replicas') - max_bytes = request.get('max-bytes') - max_objects = request.get('max-objects') - weight = request.get('weight') group_name = request.get('group') # Optional params + # NOTE: Check this against the handling in the Pool classes, reconcile and + # remove. pg_num = request.get('pg_num') + replicas = request.get('replicas') if pg_num: # Cap pg_num to max allowed just in case. osds = get_osds(service) if osds: pg_num = min(pg_num, (len(osds) * 100 // replicas)) - - app_name = request.get('app-name') - # Check for missing params - if pool_name is None or replicas is None: - msg = "Missing parameter. name and replicas are required" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} + request.update({'pg_num': pg_num}) if group_name: group_namespace = request.get('group-namespace') @@ -490,18 +471,14 @@ def handle_replicated_pool(request, service): group=group_name, namespace=group_namespace) - kwargs = {} - if pg_num: - kwargs['pg_num'] = pg_num - if weight: - kwargs['percent_data'] = weight - if replicas: - kwargs['replicas'] = replicas - if app_name: - kwargs['app_name'] = app_name - - pool = ReplicatedPool(service=service, - name=pool_name, **kwargs) + try: + pool = ReplicatedPool(service=service, + op=request) + except KeyError: + msg = "Missing parameter." + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + if not pool_exists(service=service, name=pool_name): log("Creating pool '{}' (replicas={})".format(pool.name, replicas), level=INFO) @@ -510,10 +487,8 @@ def handle_replicated_pool(request, service): log("Pool '{}' already exists - skipping create".format(pool.name), level=DEBUG) - # Set a quota if requested - if max_bytes or max_objects: - set_pool_quota(service=service, pool_name=pool_name, - max_bytes=max_bytes, max_objects=max_objects) + # Set/update properties that are allowed to change after pool creation. + pool.update() def handle_create_cache_tier(request, service): @@ -540,7 +515,7 @@ def handle_create_cache_tier(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - p = Pool(service=service, name=storage_pool) + p = BasePool(service=service, name=storage_pool) p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) @@ -561,7 +536,7 @@ def handle_remove_cache_tier(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - pool = Pool(name=storage_pool, service=service) + pool = BasePool(name=storage_pool, service=service) pool.remove_cache_tier(cache_pool=cache_pool) diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 7d9c2587..44b50231 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -10,7 +10,7 @@ charm-tools>=2.4.4 requests>=2.18.4 mock>=1.2 -flake8>=2.2.4,<=2.4.1 +flake8>=2.2.4 stestr>=2.2.0 coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 703a7fd8..36526e8d 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -41,6 +41,7 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CephRGWTest - zaza.openstack.charm_tests.swift.tests.S3APITest - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes + - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation tests_options: force_deploy: - groovy-victoria diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index b835733a..8080ba6d 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -116,5 +116,5 @@ commands = functest-run-suite --keep-model --bundle {posargs} [flake8] -ignore = E402,E226 +ignore = E402,E226,W504 exclude = */charmhelpers diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index 71318a8b..f9dad5d7 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -51,95 +51,236 @@ def test_import_radosgw_key(self): '/etc/ceph/keyring.rados.gateway']) ]) + @patch.object(utils.context, 'CephBlueStoreCompressionContext') @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' - '.add_op_create_pool') - def test_create_rgw_pools_rq_with_prefix(self, mock_broker): + '.add_op_create_replicated_pool') + def test_create_rgw_pools_rq_with_prefix( + self, + mock_broker, + mock_bluestore_compression): self.test_config.set('rgw-lightweight-pool-pg-num', 10) self.test_config.set('ceph-osd-replication-count', 3) self.test_config.set('rgw-buckets-pool-weight', 19) ceph.get_create_rgw_pools_rq(prefix='us-east') mock_broker.assert_has_calls([ - call(replica_count=3, weight=19, name='us-east.rgw.buckets.data', - group='objects', app_name='rgw'), - call(pg_num=10, replica_count=3, name='us-east.rgw.control', - group='objects', app_name='rgw'), - call(pg_num=10, replica_count=3, name='us-east.rgw.data.root', - group='objects', app_name='rgw'), - call(pg_num=10, replica_count=3, name='us-east.rgw.gc', - group='objects', app_name='rgw'), - call(pg_num=10, replica_count=3, name='us-east.rgw.log', - group='objects', app_name='rgw'), - call(pg_num=10, replica_count=3, name='us-east.rgw.intent-log', - group='objects', app_name='rgw'), - call(pg_num=10, replica_count=3, name='us-east.rgw.meta', - group='objects', app_name='rgw'), - call(pg_num=10, replica_count=3, name='us-east.rgw.usage', - group='objects', app_name='rgw'), - call(pg_num=10, replica_count=3, name='us-east.rgw.users.keys', - group='objects', app_name='rgw'), - call(pg_num=10, replica_count=3, name='us-east.rgw.users.email', - group='objects', app_name='rgw'), - call(pg_num=10, replica_count=3, name='us-east.rgw.users.swift', - group='objects', app_name='rgw'), - call(pg_num=10, replica_count=3, name='us-east.rgw.users.uid', - group='objects', app_name='rgw'), - call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.extra', - group='objects', app_name='rgw'), - call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.index', - group='objects', app_name='rgw'), - call(pg_num=10, replica_count=3, name='.rgw.root', - group='objects', app_name='rgw')], - ) + call(name='us-east.rgw.buckets.data', replica_count=3, weight=19, + group='objects', app_name='rgw'), + call('us-east.rgw.control', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.data.root', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.gc', replica_count=3, pg_num=10, weight=None, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.log', replica_count=3, pg_num=10, weight=None, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.intent-log', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.meta', replica_count=3, pg_num=10, weight=None, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.usage', replica_count=3, pg_num=10, weight=None, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.users.keys', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.users.email', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.users.swift', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.users.uid', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.buckets.extra', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.buckets.index', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('.rgw.root', replica_count=3, pg_num=10, weight=None, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + ]) + + # confirm operation with bluestore compression + mock_broker.reset_mock() + mock_bluestore_compression().get_kwargs.return_value = { + 'compression_mode': 'fake', + } + ceph.get_create_rgw_pools_rq(prefix='us-east') + mock_broker.assert_has_calls([ + call(name='us-east.rgw.buckets.data', replica_count=3, weight=19, + group='objects', app_name='rgw', compression_mode='fake'), + call('us-east.rgw.control', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.data.root', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.gc', replica_count=3, pg_num=10, weight=None, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.log', replica_count=3, pg_num=10, weight=None, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.intent-log', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.meta', replica_count=3, pg_num=10, weight=None, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.usage', replica_count=3, pg_num=10, weight=None, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.users.keys', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.users.email', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.users.swift', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.users.uid', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.buckets.extra', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('us-east.rgw.buckets.index', replica_count=3, pg_num=10, + weight=None, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('.rgw.root', replica_count=3, pg_num=10, weight=None, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + ]) + @patch.object(utils.context, 'CephBlueStoreCompressionContext') @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' '.add_op_request_access_to_group') @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' - '.add_op_create_pool') - def test_create_rgw_pools_rq_no_prefix_post_jewel(self, mock_broker, - mock_request_access): + '.add_op_create_replicated_pool') + def test_create_rgw_pools_rq_no_prefix_post_jewel( + self, + mock_broker, + mock_request_access, + mock_bluestore_compression): self.test_config.set('rgw-lightweight-pool-pg-num', -1) self.test_config.set('ceph-osd-replication-count', 3) self.test_config.set('rgw-buckets-pool-weight', 19) self.test_config.set('restrict-ceph-pools', True) ceph.get_create_rgw_pools_rq(prefix=None) mock_broker.assert_has_calls([ - call(replica_count=3, weight=19, name='default.rgw.buckets.data', - group='objects', app_name='rgw'), - call(weight=0.10, replica_count=3, name='default.rgw.control', - group='objects', app_name='rgw'), - call(weight=0.10, replica_count=3, name='default.rgw.data.root', - group='objects', app_name='rgw'), - call(weight=0.10, replica_count=3, name='default.rgw.gc', - group='objects', app_name='rgw'), - call(weight=0.10, replica_count=3, name='default.rgw.log', - group='objects', app_name='rgw'), - call(weight=0.10, replica_count=3, name='default.rgw.intent-log', - group='objects', app_name='rgw'), - call(weight=0.10, replica_count=3, name='default.rgw.meta', - group='objects', app_name='rgw'), - call(weight=0.10, replica_count=3, name='default.rgw.usage', - group='objects', app_name='rgw'), - call(weight=0.10, replica_count=3, name='default.rgw.users.keys', - group='objects', app_name='rgw'), - call(weight=0.10, replica_count=3, name='default.rgw.users.email', - group='objects', app_name='rgw'), - call(weight=0.10, replica_count=3, name='default.rgw.users.swift', - group='objects', app_name='rgw'), - call(weight=0.10, replica_count=3, name='default.rgw.users.uid', - group='objects', app_name='rgw'), - call(weight=1.00, replica_count=3, - name='default.rgw.buckets.extra', - group='objects', app_name='rgw'), - call(weight=3.00, replica_count=3, - name='default.rgw.buckets.index', - group='objects', app_name='rgw'), - call(weight=0.10, replica_count=3, name='.rgw.root', - group='objects', app_name='rgw')], - ) + call(name='default.rgw.buckets.data', replica_count=3, weight=19, + group='objects', app_name='rgw'), + call('default.rgw.control', replica_count=3, pg_num=None, + weight=0.1, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.data.root', replica_count=3, pg_num=None, + weight=0.1, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.gc', replica_count=3, pg_num=None, weight=0.1, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.log', replica_count=3, pg_num=None, weight=0.1, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.intent-log', replica_count=3, pg_num=None, + weight=0.1, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.meta', replica_count=3, pg_num=None, weight=0.1, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.usage', replica_count=3, pg_num=None, weight=0.1, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.users.keys', replica_count=3, pg_num=None, + weight=0.1, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.users.email', replica_count=3, pg_num=None, + weight=0.1, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.users.swift', replica_count=3, pg_num=None, + weight=0.1, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.users.uid', replica_count=3, pg_num=None, + weight=0.1, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.buckets.extra', replica_count=3, pg_num=None, + weight=1.0, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.buckets.index', replica_count=3, pg_num=None, + weight=3.0, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('.rgw.root', replica_count=3, pg_num=None, weight=0.1, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + ]) mock_request_access.assert_called_with(key_name='radosgw.gateway', name='objects', permission='rwx') + # confirm operation with bluestore compression + mock_broker.reset_mock() + mock_bluestore_compression().get_kwargs.return_value = { + 'compression_mode': 'fake', + } + ceph.get_create_rgw_pools_rq(prefix=None) + mock_broker.assert_has_calls([ + call(name='default.rgw.buckets.data', replica_count=3, weight=19, + group='objects', app_name='rgw', compression_mode='fake'), + call('default.rgw.control', replica_count=3, pg_num=None, + weight=0.1, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.data.root', replica_count=3, pg_num=None, + weight=0.1, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.gc', replica_count=3, pg_num=None, weight=0.1, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.log', replica_count=3, pg_num=None, weight=0.1, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.intent-log', replica_count=3, pg_num=None, + weight=0.1, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.meta', replica_count=3, pg_num=None, weight=0.1, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.usage', replica_count=3, pg_num=None, weight=0.1, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.users.keys', replica_count=3, pg_num=None, + weight=0.1, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.users.email', replica_count=3, pg_num=None, + weight=0.1, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.users.swift', replica_count=3, pg_num=None, + weight=0.1, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.users.uid', replica_count=3, pg_num=None, + weight=0.1, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.buckets.extra', replica_count=3, pg_num=None, + weight=1.0, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('default.rgw.buckets.index', replica_count=3, pg_num=None, + weight=3.0, group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + call('.rgw.root', replica_count=3, pg_num=None, weight=0.1, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), + ]) + + @patch.object(utils.context, 'CephBlueStoreCompressionContext') @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' '.add_op_create_erasure_profile') @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' @@ -151,7 +292,8 @@ def test_create_rgw_pools_rq_no_prefix_post_jewel(self, mock_broker, def test_create_rgw_pools_rq_no_prefix_ec(self, mock_broker, mock_request_access, mock_request_create_ec_pool, - mock_request_create_ec_profile): + mock_request_create_ec_profile, + mock_bluestore_compression): self.test_config.set('rgw-lightweight-pool-pg-num', -1) self.test_config.set('ceph-osd-replication-count', 3) self.test_config.set('rgw-buckets-pool-weight', 19) @@ -215,6 +357,20 @@ def test_create_rgw_pools_rq_no_prefix_ec(self, mock_broker, mock_request_access.assert_called_with(key_name='radosgw.gateway', name='objects', permission='rwx') + # confirm operation with bluestore compression + mock_request_create_ec_pool.reset_mock() + mock_bluestore_compression().get_kwargs.return_value = { + 'compression_mode': 'fake', + } + ceph.get_create_rgw_pools_rq(prefix=None) + mock_request_create_ec_pool.assert_has_calls([ + call(name='default.rgw.buckets.data', + erasure_profile='ceph-radosgw-profile', + weight=19, + group="objects", + app_name='rgw', + compression_mode='fake') + ]) @patch.object(utils.apt_pkg, 'version_compare', lambda *args: -1) @patch.object(utils, 'lsb_release', diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index cd878f7a..60238a7b 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -56,7 +56,7 @@ def test_assess_status(self): self.application_version_set.assert_called_with('10.2.2') @patch.object(utils, 'get_optional_interfaces') - @patch.object(utils, 'check_optional_relations') + @patch.object(utils, 'check_optional_config_and_relations') @patch.object(utils, 'REQUIRED_INTERFACES') @patch.object(utils, 'services') @patch.object(utils, 'make_assess_status_func') From 81e0b49143ab8c313e085df6e7c396660370e1b2 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sat, 26 Sep 2020 18:27:01 +0100 Subject: [PATCH 2071/2699] Sync libraries & common files prior to freeze * charm-helpers sync for classic charms * charms.ceph sync for ceph charms * rebuild for reactive charms * sync tox.ini files as needed * sync requirements.txt files to sync to standard Change-Id: I9c2419e39899f02e99b07d5dbd0b4cd0f51e03ba --- .../hooks/charmhelpers/contrib/openstack/context.py | 12 ++++++++++++ ceph-mon/lib/charms_ceph/utils.py | 6 +++--- ceph-mon/requirements.txt | 1 + ceph-mon/test-requirements.txt | 1 + ceph-mon/tox.ini | 2 +- 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 0e41a9f3..54aed7ff 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -3245,6 +3245,18 @@ def get_op(self): """ return self.op + def get_kwargs(self): + """Get values for use as keyword arguments. + + :returns: Context values with key suitable for use as kwargs to + CephBrokerRq add_op_create_*_pool methods. + :rtype: Dict[str,any] + """ + return { + k.replace('-', '_'): v + for k, v in self.op.items() + } + def validate(self): """Validate options. diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index 53cff539..9da4dc12 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -631,7 +631,7 @@ def _get_child_dirs(path): OSError if an error occurs reading the directory listing """ if not os.path.exists(path): - raise ValueError('Specfied path "%s" does not exist' % path) + raise ValueError('Specified path "%s" does not exist' % path) if not os.path.isdir(path): raise ValueError('Specified path "%s" is not a directory' % path) @@ -2220,8 +2220,8 @@ def upgrade_monitor(new_version, kick_function=None): rm_packages = determine_packages_to_remove() if rm_packages: apt_purge(packages=rm_packages, fatal=True) - kick_function() + owner = ceph_user() # Ensure the files and directories under /var/lib/ceph is chowned @@ -3331,7 +3331,7 @@ def apply_osd_settings(settings): present. Settings stop being applied on encountering an error. :param settings: dict. Dictionary of settings to apply. - :returns: bool. True if commands ran succesfully. + :returns: bool. True if commands ran successfully. :raises: OSDConfigSetError """ current_settings = {} diff --git a/ceph-mon/requirements.txt b/ceph-mon/requirements.txt index 2316401b..8ba19415 100644 --- a/ceph-mon/requirements.txt +++ b/ceph-mon/requirements.txt @@ -7,6 +7,7 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # +setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 pbr>=1.8.0,<1.9.0 simplejson>=2.2.0 netifaces>=0.10.4 diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 44b50231..56fbf922 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -7,6 +7,7 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # +setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 charm-tools>=2.4.4 requests>=2.18.4 mock>=1.2 diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 8080ba6d..e2d58f59 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -116,5 +116,5 @@ commands = functest-run-suite --keep-model --bundle {posargs} [flake8] -ignore = E402,E226,W504 +ignore = E402,E226,W503,W504 exclude = */charmhelpers From b8e69a8a510f14cc2d26b2e8e2a98724fe8ec4f1 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 28 Sep 2020 11:46:51 +0100 Subject: [PATCH 2072/2699] Make EC profiles immutable Changing an existing EC profile can have some nasty side effects including crashing OSD's (which is why its guarded with a --force). Update the ceph helper to log a warning and return if an EC profile already exists, effectively making them immutable and avoiding any related issues. Reconfiguration of a pool would be undertaking using actions: - create new EC profile - create new pool using new EC profile - copy data from old pool to new pool - rename old pool - rename new pool to original pool name this obviously requires an outage in the consuming application. Change-Id: Ifb3825750f0299589f404e06103d79e393d608f3 Closes-Bug: 1897517 --- .../hooks/charmhelpers/contrib/openstack/context.py | 12 ++++++++++++ .../charmhelpers/contrib/storage/linux/ceph.py | 13 +++++++++---- ceph-mon/lib/charms_ceph/utils.py | 6 +++--- 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 0e41a9f3..54aed7ff 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -3245,6 +3245,18 @@ def get_op(self): """ return self.op + def get_kwargs(self): + """Get values for use as keyword arguments. + + :returns: Context values with key suitable for use as kwargs to + CephBrokerRq add_op_create_*_pool methods. + :rtype: Dict[str,any] + """ + return { + k.replace('-', '_'): v + for k, v in self.op.items() + } + def validate(self): """Validate options. diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 526b95ad..7882e2ce 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1074,7 +1074,10 @@ def create_erasure_profile(service, profile_name, erasure_plugin_technique=None): """Create a new erasure code profile if one does not already exist for it. - Updates the profile if it exists. Please refer to [0] for more details. + Profiles are considered immutable so will not be updated if the named + profile already exists. + + Please refer to [0] for more details. 0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ @@ -1110,6 +1113,11 @@ def create_erasure_profile(service, profile_name, :type erasure_plugin_technique: str :return: None. Can raise CalledProcessError, ValueError or AssertionError """ + if erasure_profile_exists(service, profile_name): + log('EC profile {} exists, skipping update'.format(profile_name), + level=WARNING) + return + plugin_techniques = { 'jerasure': [ 'reed_sol_van', @@ -1209,9 +1217,6 @@ def create_erasure_profile(service, profile_name, if scalar_mds: cmd.append('scalar-mds={}'.format(scalar_mds)) - if erasure_profile_exists(service, profile_name): - cmd.append('--force') - check_call(cmd) diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index 53cff539..9da4dc12 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -631,7 +631,7 @@ def _get_child_dirs(path): OSError if an error occurs reading the directory listing """ if not os.path.exists(path): - raise ValueError('Specfied path "%s" does not exist' % path) + raise ValueError('Specified path "%s" does not exist' % path) if not os.path.isdir(path): raise ValueError('Specified path "%s" is not a directory' % path) @@ -2220,8 +2220,8 @@ def upgrade_monitor(new_version, kick_function=None): rm_packages = determine_packages_to_remove() if rm_packages: apt_purge(packages=rm_packages, fatal=True) - kick_function() + owner = ceph_user() # Ensure the files and directories under /var/lib/ceph is chowned @@ -3331,7 +3331,7 @@ def apply_osd_settings(settings): present. Settings stop being applied on encountering an error. :param settings: dict. Dictionary of settings to apply. - :returns: bool. True if commands ran succesfully. + :returns: bool. True if commands ran successfully. :raises: OSDConfigSetError """ current_settings = {} From efba5de61c0a38f2b60d10623f7463fc0774c0ed Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 28 Sep 2020 11:50:28 +0100 Subject: [PATCH 2073/2699] Make EC profiles immutable Changing an existing EC profile can have some nasty side effects including crashing OSD's (which is why its guarded with a --force). Update the ceph helper to log a warning and return if an EC profile already exists, effectively making them immutable and avoiding any related issues. Reconfiguration of a pool would be undertaking using actions: - create new EC profile - create new pool using new EC profile - copy data from old pool to new pool - rename old pool - rename new pool to original pool name this obviously requires an outage in the consuming application. Change-Id: I630f6b6c5e3c6dd252a85cd373d7e204b9e77245 Closes-Bug: 1897517 --- .../charmhelpers/contrib/storage/linux/ceph.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py index 526b95ad..7882e2ce 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py @@ -1074,7 +1074,10 @@ def create_erasure_profile(service, profile_name, erasure_plugin_technique=None): """Create a new erasure code profile if one does not already exist for it. - Updates the profile if it exists. Please refer to [0] for more details. + Profiles are considered immutable so will not be updated if the named + profile already exists. + + Please refer to [0] for more details. 0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ @@ -1110,6 +1113,11 @@ def create_erasure_profile(service, profile_name, :type erasure_plugin_technique: str :return: None. Can raise CalledProcessError, ValueError or AssertionError """ + if erasure_profile_exists(service, profile_name): + log('EC profile {} exists, skipping update'.format(profile_name), + level=WARNING) + return + plugin_techniques = { 'jerasure': [ 'reed_sol_van', @@ -1209,9 +1217,6 @@ def create_erasure_profile(service, profile_name, if scalar_mds: cmd.append('scalar-mds={}'.format(scalar_mds)) - if erasure_profile_exists(service, profile_name): - cmd.append('--force') - check_call(cmd) From fd7c55f8838c6c26826a103e504285caf3570a38 Mon Sep 17 00:00:00 2001 From: Ioanna Alifieraki Date: Mon, 24 Aug 2020 11:23:25 +0100 Subject: [PATCH 2074/2699] Change file owner so that check_ceph_osd nrpe service can work on CIS hardened environments check_ceph_ods_services.py reads /var/lib/nagios file to report ceph status back to nagios. This service runs as nagios user and the file is owned by root. On CIS hardened servers the default mask is set to 027 making the permissions of the file 640 instead of 644. This results in the service not being able to read the file and the status reported to nagios is UNKNOWN even though ceph status is OK. Closes-Bug: #1879667 Change-Id: Ib67b9a2b86a1c22658aeaf41f8e464072ab1828f --- ceph-osd/files/nagios/collect_ceph_osd_services.py | 11 +++++++++++ ceph-osd/test-requirements.txt | 1 + 2 files changed, 12 insertions(+) diff --git a/ceph-osd/files/nagios/collect_ceph_osd_services.py b/ceph-osd/files/nagios/collect_ceph_osd_services.py index 84764aba..a01e00a2 100755 --- a/ceph-osd/files/nagios/collect_ceph_osd_services.py +++ b/ceph-osd/files/nagios/collect_ceph_osd_services.py @@ -6,6 +6,7 @@ import os import subprocess +from pwd import getpwnam # fasteners only exists in Bionic, so this will fail on xenial and trusty try: @@ -71,6 +72,16 @@ def do_status(): with open(_tmp_file, 'wt') as f: f.writelines(lines) + # In cis hardened environments check_ceph_osd_services cannot + # read _tmp_file due to restrained permissions (#LP1879667). + # Changing the owner of the file to nagios solves this problem. + # check_ceph_osd_services.py removes this file, so make + # sure that we change permissions on a file that exists. + nagios_uid = getpwnam('nagios').pw_uid + nagios_gid = getpwnam('nagios').pw_gid + if os.path.isfile(_tmp_file): + os.chown(_tmp_file, nagios_uid, nagios_gid) + def run_main(): # on bionic we can interprocess lock; we don't do it for older platforms diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 7d9c2587..93a87138 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -7,6 +7,7 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # +setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 charm-tools>=2.4.4 requests>=2.18.4 mock>=1.2 From 2db226dfee3f303f155d4e2b5c43725b3b5c5e5b Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sat, 26 Sep 2020 18:27:01 +0100 Subject: [PATCH 2075/2699] Sync libraries & common files prior to freeze * charm-helpers sync for classic charms * charms.ceph sync for ceph charms * rebuild for reactive charms * sync tox.ini files as needed * sync requirements.txt files to sync to standard Change-Id: I512ef8bbb52c08e782c66450b435a40a76b8a532 --- ceph-proxy/files/nagios/check_ceph_status.py | 6 ++-- ceph-proxy/hooks/ceph.py | 10 ++++--- ceph-proxy/lib/charms_ceph/broker.py | 23 +++++++++------ ceph-proxy/lib/charms_ceph/utils.py | 30 ++++++++++++++++++-- ceph-proxy/requirements.txt | 1 + ceph-proxy/test-requirements.txt | 3 +- ceph-proxy/tox.ini | 2 +- ceph-proxy/unit_tests/__init__.py | 1 + 8 files changed, 56 insertions(+), 20 deletions(-) diff --git a/ceph-proxy/files/nagios/check_ceph_status.py b/ceph-proxy/files/nagios/check_ceph_status.py index e7638f0f..c70e6459 100755 --- a/ceph-proxy/files/nagios/check_ceph_status.py +++ b/ceph-proxy/files/nagios/check_ceph_status.py @@ -16,12 +16,12 @@ def check_ceph_status(args): with open(args.status_file, "r") as f: lines = f.readlines() status_data = dict( - l.strip().split(' ', 1) for l in lines if len(l) > 1 + line.strip().split(' ', 1) for line in lines if len(line) > 1 ) else: lines = subprocess.check_output(["ceph", "status"]).split('\n') status_data = dict( - l.strip().split(' ', 1) for l in lines if len(l) > 1 + line.strip().split(' ', 1) for line in lines if len(line) > 1 ) if ('health' not in status_data or @@ -33,7 +33,7 @@ def check_ceph_status(args): msg = 'CRITICAL: ceph health status: "{}"'.format( status_data['health']) raise nagios_plugin.CriticalError(msg) - osds = re.search("^.*: (\d+) osds: (\d+) up, (\d+) in", + osds = re.search(r"^.*: (\d+) osds: (\d+) up, (\d+) in", status_data['osdmap']) if osds.group(1) > osds.group(2): # not all OSDs are "up" msg = 'CRITICAL: Some OSDs are not up. Total: {}, up: {}'.format( diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 214105a8..8ce22a85 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -89,7 +89,7 @@ def get_version(): package = "ceph" try: pkg = cache[package] - except: + except Exception: # the package is unknown to the current apt cache. e = 'Could not determine version of package with no installation ' \ 'candidate: %s' % package @@ -104,7 +104,7 @@ def get_version(): # x.y match only for 20XX.X # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', vers) + match = re.match(r'^(\d+)\.(\d+)', vers) if match: vers = match.group(0) @@ -274,6 +274,7 @@ def generate_monitor_secret(): return "{}==".format(res.split('=')[1].strip()) + # OSD caps taken from ceph-create-keys _osd_bootstrap_caps = { 'mon': [ @@ -311,7 +312,7 @@ def get_osd_bootstrap_key(): # Attempt to get/create a key using the OSD bootstrap profile first key = get_named_key('bootstrap-osd', _osd_bootstrap_caps_profile) - except: + except Exception: # If that fails try with the older style permissions key = get_named_key('bootstrap-osd', _osd_bootstrap_caps) @@ -335,6 +336,7 @@ def import_radosgw_key(key): ] subprocess.check_call(cmd) + # OSD caps taken from ceph-create-keys _radosgw_caps = { 'mon': ['allow rw'], @@ -516,7 +518,7 @@ def bootstrap_monitor_cluster(secret): service_restart('ceph-mon') else: service_restart('ceph-mon-all') - except: + except Exception: raise finally: os.unlink(keyring) diff --git a/ceph-proxy/lib/charms_ceph/broker.py b/ceph-proxy/lib/charms_ceph/broker.py index 8f040a5e..25427697 100644 --- a/ceph-proxy/lib/charms_ceph/broker.py +++ b/ceph-proxy/lib/charms_ceph/broker.py @@ -750,6 +750,7 @@ def handle_create_cephfs(request, service): """ cephfs_name = request.get('mds_name') data_pool = request.get('data_pool') + extra_pools = request.get('extra_pools', []) metadata_pool = request.get('metadata_pool') # Check if the user params were provided if not cephfs_name or not data_pool or not metadata_pool: @@ -758,14 +759,12 @@ def handle_create_cephfs(request, service): return {'exit-code': 1, 'stderr': msg} # Sanity check that the required pools exist - if not pool_exists(service=service, name=data_pool): - msg = "CephFS data pool does not exist. Cannot create CephFS" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - if not pool_exists(service=service, name=metadata_pool): - msg = "CephFS metadata pool does not exist. Cannot create CephFS" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} + for pool_name in [data_pool, metadata_pool] + extra_pools: + if not pool_exists(service=service, name=pool_name): + msg = "CephFS pool {} does not exist. Cannot create CephFS".format( + pool_name) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} if get_cephfs(service=service): # CephFS new has already been called @@ -786,6 +785,14 @@ def handle_create_cephfs(request, service): else: log(err.output, level=ERROR) return {'exit-code': 1, 'stderr': err.output} + for pool_name in extra_pools: + cmd = ["ceph", '--id', service, "fs", "add_data_pool", cephfs_name, + pool_name] + try: + check_output(cmd) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} def handle_rgw_region_set(request, service): diff --git a/ceph-proxy/lib/charms_ceph/utils.py b/ceph-proxy/lib/charms_ceph/utils.py index 72e6b921..9da4dc12 100644 --- a/ceph-proxy/lib/charms_ceph/utils.py +++ b/ceph-proxy/lib/charms_ceph/utils.py @@ -41,6 +41,7 @@ service_stop, CompareHostReleases, write_file, + is_container, ) from charmhelpers.core.hookenv import ( cached, @@ -54,8 +55,12 @@ storage_list, ) from charmhelpers.fetch import ( + add_source, apt_cache, - add_source, apt_install, apt_update + apt_install, + apt_purge, + apt_update, + filter_missing_packages ) from charmhelpers.contrib.storage.linux.ceph import ( get_mon_map, @@ -85,6 +90,9 @@ 'radosgw', 'xfsprogs', 'lvm2', 'parted', 'smartmontools'] +REMOVE_PACKAGES = [] +CHRONY_PACKAGE = 'chrony' + CEPH_KEY_MANAGER = 'ceph' VAULT_KEY_MANAGER = 'vault' KEY_MANAGERS = [ @@ -623,7 +631,7 @@ def _get_child_dirs(path): OSError if an error occurs reading the directory listing """ if not os.path.exists(path): - raise ValueError('Specfied path "%s" does not exist' % path) + raise ValueError('Specified path "%s" does not exist' % path) if not os.path.isdir(path): raise ValueError('Specified path "%s" is not a directory' % path) @@ -2209,6 +2217,9 @@ def upgrade_monitor(new_version, kick_function=None): else: service_stop('ceph-mon-all') apt_install(packages=determine_packages(), fatal=True) + rm_packages = determine_packages_to_remove() + if rm_packages: + apt_purge(packages=rm_packages, fatal=True) kick_function() owner = ceph_user() @@ -3252,6 +3263,19 @@ def determine_packages(): return packages +def determine_packages_to_remove(): + """Determines packages for removal + + :returns: list of packages to be removed + """ + rm_packages = REMOVE_PACKAGES.copy() + if is_container(): + install_list = filter_missing_packages(CHRONY_PACKAGE) + if not install_list: + rm_packages.append(CHRONY_PACKAGE) + return rm_packages + + def bootstrap_manager(): hostname = socket.gethostname() path = '/var/lib/ceph/mgr/ceph-{}'.format(hostname) @@ -3307,7 +3331,7 @@ def apply_osd_settings(settings): present. Settings stop being applied on encountering an error. :param settings: dict. Dictionary of settings to apply. - :returns: bool. True if commands ran succesfully. + :returns: bool. True if commands ran successfully. :raises: OSDConfigSetError """ current_settings = {} diff --git a/ceph-proxy/requirements.txt b/ceph-proxy/requirements.txt index 2316401b..8ba19415 100644 --- a/ceph-proxy/requirements.txt +++ b/ceph-proxy/requirements.txt @@ -7,6 +7,7 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # +setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 pbr>=1.8.0,<1.9.0 simplejson>=2.2.0 netifaces>=0.10.4 diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 7d9c2587..56fbf922 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -7,10 +7,11 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # +setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 charm-tools>=2.4.4 requests>=2.18.4 mock>=1.2 -flake8>=2.2.4,<=2.4.1 +flake8>=2.2.4 stestr>=2.2.0 coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index b835733a..e2d58f59 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -116,5 +116,5 @@ commands = functest-run-suite --keep-model --bundle {posargs} [flake8] -ignore = E402,E226 +ignore = E402,E226,W503,W504 exclude = */charmhelpers diff --git a/ceph-proxy/unit_tests/__init__.py b/ceph-proxy/unit_tests/__init__.py index ba8fe96e..34acae80 100644 --- a/ceph-proxy/unit_tests/__init__.py +++ b/ceph-proxy/unit_tests/__init__.py @@ -13,6 +13,7 @@ def _add_path(path): if path not in sys.path: sys.path.insert(1, path) + _add_path(_actions) _add_path(_hooks) _add_path(_charmhelpers) From f1d0dd06d7d89b2de633570a89bbec9b2b8220f3 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sat, 26 Sep 2020 18:27:01 +0100 Subject: [PATCH 2076/2699] Sync libraries & common files prior to freeze * charm-helpers sync for classic charms * charms.ceph sync for ceph charms * rebuild for reactive charms * sync tox.ini files as needed * sync requirements.txt files to sync to standard Required additional fix: * Also sync section-ceph-bluestore-compression template Change-Id: I621c3a0e4e3594808b4c6ec298ed79aada44bae0 --- ceph-osd/charm-helpers-hooks.yaml | 1 + .../charmhelpers/contrib/openstack/context.py | 19 ++++++++++++ .../contrib/storage/linux/ceph.py | 25 +++++++++++++--- ceph-osd/lib/charms_ceph/broker.py | 23 +++++++++----- ceph-osd/lib/charms_ceph/utils.py | 30 +++++++++++++++++-- ceph-osd/requirements.txt | 1 + ceph-osd/test-requirements.txt | 1 + ceph-osd/tox.ini | 2 +- 8 files changed, 86 insertions(+), 16 deletions(-) diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index 26d981a4..ca383631 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -23,3 +23,4 @@ include: - contrib.charmsupport - contrib.hardening|inc=* - contrib.openstack.policyd + - contrib.openstack.templates|inc=*/section-ceph-bluestore-compression diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index b5adbefc..54aed7ff 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -58,6 +58,7 @@ status_set, network_get_primary_address, WARNING, + service_name, ) from charmhelpers.core.sysctl import create as sysctl_create @@ -810,6 +811,12 @@ def __call__(self): ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) + if config('pool-type') and config('pool-type') == 'erasure-coded': + base_pool_name = config('rbd-pool') or config('rbd-pool-name') + if not base_pool_name: + base_pool_name = service_name() + ctxt['rbd_default_data_pool'] = base_pool_name + if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') @@ -3238,6 +3245,18 @@ def get_op(self): """ return self.op + def get_kwargs(self): + """Get values for use as keyword arguments. + + :returns: Context values with key suitable for use as kwargs to + CephBrokerRq add_op_create_*_pool methods. + :rtype: Dict[str,any] + """ + return { + k.replace('-', '_'): v + for k, v in self.op.items() + } + def validate(self): """Validate options. diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 526b95ad..084247a9 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -41,6 +41,7 @@ ) from charmhelpers import deprecate from charmhelpers.core.hookenv import ( + application_name, config, service_name, local_unit, @@ -162,6 +163,17 @@ def get_osd_settings(relation_name): return _order_dict_by_key(osd_settings) +def send_application_name(relid=None): + """Send the application name down the relation. + + :param relid: Relation id to set application name in. + :type relid: str + """ + relation_set( + relation_id=relid, + relation_settings={'application-name': application_name()}) + + def send_osd_settings(): """Pass on requested OSD settings to osd units.""" try: @@ -1074,7 +1086,10 @@ def create_erasure_profile(service, profile_name, erasure_plugin_technique=None): """Create a new erasure code profile if one does not already exist for it. - Updates the profile if it exists. Please refer to [0] for more details. + Profiles are considered immutable so will not be updated if the named + profile already exists. + + Please refer to [0] for more details. 0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ @@ -1110,6 +1125,11 @@ def create_erasure_profile(service, profile_name, :type erasure_plugin_technique: str :return: None. Can raise CalledProcessError, ValueError or AssertionError """ + if erasure_profile_exists(service, profile_name): + log('EC profile {} exists, skipping update'.format(profile_name), + level=WARNING) + return + plugin_techniques = { 'jerasure': [ 'reed_sol_van', @@ -1209,9 +1229,6 @@ def create_erasure_profile(service, profile_name, if scalar_mds: cmd.append('scalar-mds={}'.format(scalar_mds)) - if erasure_profile_exists(service, profile_name): - cmd.append('--force') - check_call(cmd) diff --git a/ceph-osd/lib/charms_ceph/broker.py b/ceph-osd/lib/charms_ceph/broker.py index 8f040a5e..25427697 100644 --- a/ceph-osd/lib/charms_ceph/broker.py +++ b/ceph-osd/lib/charms_ceph/broker.py @@ -750,6 +750,7 @@ def handle_create_cephfs(request, service): """ cephfs_name = request.get('mds_name') data_pool = request.get('data_pool') + extra_pools = request.get('extra_pools', []) metadata_pool = request.get('metadata_pool') # Check if the user params were provided if not cephfs_name or not data_pool or not metadata_pool: @@ -758,14 +759,12 @@ def handle_create_cephfs(request, service): return {'exit-code': 1, 'stderr': msg} # Sanity check that the required pools exist - if not pool_exists(service=service, name=data_pool): - msg = "CephFS data pool does not exist. Cannot create CephFS" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - if not pool_exists(service=service, name=metadata_pool): - msg = "CephFS metadata pool does not exist. Cannot create CephFS" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} + for pool_name in [data_pool, metadata_pool] + extra_pools: + if not pool_exists(service=service, name=pool_name): + msg = "CephFS pool {} does not exist. Cannot create CephFS".format( + pool_name) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} if get_cephfs(service=service): # CephFS new has already been called @@ -786,6 +785,14 @@ def handle_create_cephfs(request, service): else: log(err.output, level=ERROR) return {'exit-code': 1, 'stderr': err.output} + for pool_name in extra_pools: + cmd = ["ceph", '--id', service, "fs", "add_data_pool", cephfs_name, + pool_name] + try: + check_output(cmd) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} def handle_rgw_region_set(request, service): diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 72e6b921..9da4dc12 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -41,6 +41,7 @@ service_stop, CompareHostReleases, write_file, + is_container, ) from charmhelpers.core.hookenv import ( cached, @@ -54,8 +55,12 @@ storage_list, ) from charmhelpers.fetch import ( + add_source, apt_cache, - add_source, apt_install, apt_update + apt_install, + apt_purge, + apt_update, + filter_missing_packages ) from charmhelpers.contrib.storage.linux.ceph import ( get_mon_map, @@ -85,6 +90,9 @@ 'radosgw', 'xfsprogs', 'lvm2', 'parted', 'smartmontools'] +REMOVE_PACKAGES = [] +CHRONY_PACKAGE = 'chrony' + CEPH_KEY_MANAGER = 'ceph' VAULT_KEY_MANAGER = 'vault' KEY_MANAGERS = [ @@ -623,7 +631,7 @@ def _get_child_dirs(path): OSError if an error occurs reading the directory listing """ if not os.path.exists(path): - raise ValueError('Specfied path "%s" does not exist' % path) + raise ValueError('Specified path "%s" does not exist' % path) if not os.path.isdir(path): raise ValueError('Specified path "%s" is not a directory' % path) @@ -2209,6 +2217,9 @@ def upgrade_monitor(new_version, kick_function=None): else: service_stop('ceph-mon-all') apt_install(packages=determine_packages(), fatal=True) + rm_packages = determine_packages_to_remove() + if rm_packages: + apt_purge(packages=rm_packages, fatal=True) kick_function() owner = ceph_user() @@ -3252,6 +3263,19 @@ def determine_packages(): return packages +def determine_packages_to_remove(): + """Determines packages for removal + + :returns: list of packages to be removed + """ + rm_packages = REMOVE_PACKAGES.copy() + if is_container(): + install_list = filter_missing_packages(CHRONY_PACKAGE) + if not install_list: + rm_packages.append(CHRONY_PACKAGE) + return rm_packages + + def bootstrap_manager(): hostname = socket.gethostname() path = '/var/lib/ceph/mgr/ceph-{}'.format(hostname) @@ -3307,7 +3331,7 @@ def apply_osd_settings(settings): present. Settings stop being applied on encountering an error. :param settings: dict. Dictionary of settings to apply. - :returns: bool. True if commands ran succesfully. + :returns: bool. True if commands ran successfully. :raises: OSDConfigSetError """ current_settings = {} diff --git a/ceph-osd/requirements.txt b/ceph-osd/requirements.txt index 2316401b..8ba19415 100644 --- a/ceph-osd/requirements.txt +++ b/ceph-osd/requirements.txt @@ -7,6 +7,7 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # +setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 pbr>=1.8.0,<1.9.0 simplejson>=2.2.0 netifaces>=0.10.4 diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 44b50231..56fbf922 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -7,6 +7,7 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # +setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 charm-tools>=2.4.4 requests>=2.18.4 mock>=1.2 diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 8080ba6d..e2d58f59 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -116,5 +116,5 @@ commands = functest-run-suite --keep-model --bundle {posargs} [flake8] -ignore = E402,E226,W504 +ignore = E402,E226,W503,W504 exclude = */charmhelpers From 968190528a5bec3d588cef0fb0220b06a1c545e0 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sat, 26 Sep 2020 18:27:01 +0100 Subject: [PATCH 2077/2699] Sync libraries & common files prior to freeze * charm-helpers sync for classic charms * charms.ceph sync for ceph charms * rebuild for reactive charms * sync tox.ini files as needed * sync requirements.txt files to sync to standard Change-Id: I3f7c0bce48a37fa71fe9c5445b06892f649243aa --- ceph-fs/rebuild | 2 +- ceph-fs/requirements.txt | 1 + ceph-fs/test-requirements.txt | 3 ++- ceph-fs/tox.ini | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index 9d7789d7..328708e7 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -d99b6438-d02c-11ea-9216-238ea56f93d6 +89553aca-0016-11eb-86d5-fb92a6de5c09 diff --git a/ceph-fs/requirements.txt b/ceph-fs/requirements.txt index 5f2fff3a..aaaa3e03 100644 --- a/ceph-fs/requirements.txt +++ b/ceph-fs/requirements.txt @@ -3,6 +3,7 @@ # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools # +setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 # Build requirements charm-tools>=2.4.4 # importlib-resources 1.1.0 removed Python 3.5 support diff --git a/ceph-fs/test-requirements.txt b/ceph-fs/test-requirements.txt index 0ab97f6e..d078e270 100644 --- a/ceph-fs/test-requirements.txt +++ b/ceph-fs/test-requirements.txt @@ -3,8 +3,9 @@ # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools # +setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 # Lint and unit test requirements -flake8>=2.2.4,<=2.4.1 +flake8>=2.2.4 stestr>=2.2.0 requests>=2.18.4 charms.reactive diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index afd48f02..c91922e8 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -94,4 +94,4 @@ commands = {posargs} [flake8] # E402 ignore necessary for path append before sys module import in actions -ignore = E402,W504 +ignore = E402,W503,W504 From 6d09171a7d53ea7563f35952d5e47a29c8f530ad Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sat, 26 Sep 2020 18:27:01 +0100 Subject: [PATCH 2078/2699] Sync libraries & common files prior to freeze * charm-helpers sync for classic charms * charms.ceph sync for ceph charms * rebuild for reactive charms * sync tox.ini files as needed * sync requirements.txt files to sync to standard Change-Id: I13a2e11b53e290316919efca90aaa513a1f40975 --- ceph-radosgw/lib/charms_ceph/broker.py | 23 +++++++++++++------- ceph-radosgw/lib/charms_ceph/utils.py | 30 +++++++++++++++++++++++--- ceph-radosgw/requirements.txt | 1 + ceph-radosgw/test-requirements.txt | 1 + ceph-radosgw/tox.ini | 2 +- 5 files changed, 45 insertions(+), 12 deletions(-) diff --git a/ceph-radosgw/lib/charms_ceph/broker.py b/ceph-radosgw/lib/charms_ceph/broker.py index 8f040a5e..25427697 100644 --- a/ceph-radosgw/lib/charms_ceph/broker.py +++ b/ceph-radosgw/lib/charms_ceph/broker.py @@ -750,6 +750,7 @@ def handle_create_cephfs(request, service): """ cephfs_name = request.get('mds_name') data_pool = request.get('data_pool') + extra_pools = request.get('extra_pools', []) metadata_pool = request.get('metadata_pool') # Check if the user params were provided if not cephfs_name or not data_pool or not metadata_pool: @@ -758,14 +759,12 @@ def handle_create_cephfs(request, service): return {'exit-code': 1, 'stderr': msg} # Sanity check that the required pools exist - if not pool_exists(service=service, name=data_pool): - msg = "CephFS data pool does not exist. Cannot create CephFS" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - if not pool_exists(service=service, name=metadata_pool): - msg = "CephFS metadata pool does not exist. Cannot create CephFS" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} + for pool_name in [data_pool, metadata_pool] + extra_pools: + if not pool_exists(service=service, name=pool_name): + msg = "CephFS pool {} does not exist. Cannot create CephFS".format( + pool_name) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} if get_cephfs(service=service): # CephFS new has already been called @@ -786,6 +785,14 @@ def handle_create_cephfs(request, service): else: log(err.output, level=ERROR) return {'exit-code': 1, 'stderr': err.output} + for pool_name in extra_pools: + cmd = ["ceph", '--id', service, "fs", "add_data_pool", cephfs_name, + pool_name] + try: + check_output(cmd) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} def handle_rgw_region_set(request, service): diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index 72e6b921..9da4dc12 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -41,6 +41,7 @@ service_stop, CompareHostReleases, write_file, + is_container, ) from charmhelpers.core.hookenv import ( cached, @@ -54,8 +55,12 @@ storage_list, ) from charmhelpers.fetch import ( + add_source, apt_cache, - add_source, apt_install, apt_update + apt_install, + apt_purge, + apt_update, + filter_missing_packages ) from charmhelpers.contrib.storage.linux.ceph import ( get_mon_map, @@ -85,6 +90,9 @@ 'radosgw', 'xfsprogs', 'lvm2', 'parted', 'smartmontools'] +REMOVE_PACKAGES = [] +CHRONY_PACKAGE = 'chrony' + CEPH_KEY_MANAGER = 'ceph' VAULT_KEY_MANAGER = 'vault' KEY_MANAGERS = [ @@ -623,7 +631,7 @@ def _get_child_dirs(path): OSError if an error occurs reading the directory listing """ if not os.path.exists(path): - raise ValueError('Specfied path "%s" does not exist' % path) + raise ValueError('Specified path "%s" does not exist' % path) if not os.path.isdir(path): raise ValueError('Specified path "%s" is not a directory' % path) @@ -2209,6 +2217,9 @@ def upgrade_monitor(new_version, kick_function=None): else: service_stop('ceph-mon-all') apt_install(packages=determine_packages(), fatal=True) + rm_packages = determine_packages_to_remove() + if rm_packages: + apt_purge(packages=rm_packages, fatal=True) kick_function() owner = ceph_user() @@ -3252,6 +3263,19 @@ def determine_packages(): return packages +def determine_packages_to_remove(): + """Determines packages for removal + + :returns: list of packages to be removed + """ + rm_packages = REMOVE_PACKAGES.copy() + if is_container(): + install_list = filter_missing_packages(CHRONY_PACKAGE) + if not install_list: + rm_packages.append(CHRONY_PACKAGE) + return rm_packages + + def bootstrap_manager(): hostname = socket.gethostname() path = '/var/lib/ceph/mgr/ceph-{}'.format(hostname) @@ -3307,7 +3331,7 @@ def apply_osd_settings(settings): present. Settings stop being applied on encountering an error. :param settings: dict. Dictionary of settings to apply. - :returns: bool. True if commands ran succesfully. + :returns: bool. True if commands ran successfully. :raises: OSDConfigSetError """ current_settings = {} diff --git a/ceph-radosgw/requirements.txt b/ceph-radosgw/requirements.txt index 2316401b..8ba19415 100644 --- a/ceph-radosgw/requirements.txt +++ b/ceph-radosgw/requirements.txt @@ -7,6 +7,7 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # +setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 pbr>=1.8.0,<1.9.0 simplejson>=2.2.0 netifaces>=0.10.4 diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 44b50231..56fbf922 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -7,6 +7,7 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # +setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 charm-tools>=2.4.4 requests>=2.18.4 mock>=1.2 diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 8080ba6d..e2d58f59 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -116,5 +116,5 @@ commands = functest-run-suite --keep-model --bundle {posargs} [flake8] -ignore = E402,E226,W504 +ignore = E402,E226,W503,W504 exclude = */charmhelpers From 0f845e7a56b1f1f64bb752212492b812d161a978 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 18 Jun 2020 13:08:14 +0000 Subject: [PATCH 2079/2699] Support ceph client over CMRs Support ceph client over CMRs of and only if permit-insecure-cmr config option has been set to true, otherwise go into a blocked state. To support CMR clients try and get client service name from relation data first before falling back to using the remote unit name. Using the remote unit name fails when the clients are connecting via a cross-model relation. The clients side change is here: https://github.com/juju/charm-helpers/pull/481 Change-Id: If9616170b8af9eac309dc6e8edd670fb5cfd8e0f Closes-Bug: #1780712 --- ceph-mon/config.yaml | 7 +++ ceph-mon/hooks/ceph_hooks.py | 82 ++++++++++++++++++++++++-- ceph-mon/unit_tests/test_ceph_hooks.py | 23 +++++++- ceph-mon/unit_tests/test_status.py | 17 ++++++ 4 files changed, 120 insertions(+), 9 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 690f7f77..b3488354 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -293,3 +293,10 @@ options: feature for upgraded clusters, the pg-autotune option should be set to 'true'. To disable the autotuner for new clusters, the pg-autotune option should be set to 'false'. + permit-insecure-cmr: + type: boolean + default: False + description: | + The charm does not segregate access to pools from different models properly, + this means that the correct charm settings can result with client model B + having access to the data from model A. diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index bcf68696..ccd5b569 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -46,6 +46,7 @@ Hooks, UnregisteredHookError, service_name, relations_of_type, + relations, status_set, local_unit, application_version_set) @@ -608,6 +609,22 @@ def notify_mons(): relation_settings={'nonce': nonce}) +def get_client_application_name(relid, unit): + """Retrieve client application name from relation data. + + :param relid: Realtion ID + :type relid: str + :param unit: Remote unit name + :type unit: str + """ + if not unit: + unit = remote_unit() + app_name = relation_get(rid=relid, unit=unit).get( + 'application-name', + hookenv.remote_service_name(relid=relid)) + return app_name + + def handle_broker_request(relid, unit, add_legacy_response=False, recurse=True): """Retrieve broker request from relation, process, return response data. @@ -635,7 +652,7 @@ def handle_broker_request(relid, unit, add_legacy_response=False, log("Not leader - ignoring broker request", level=DEBUG) else: rsp = process_requests(settings['broker_req']) - unit_id = unit.replace('/', '-') + unit_id = settings.get('unit-name', unit).replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id response.update({unit_response_key: rsp}) if add_legacy_response: @@ -757,6 +774,8 @@ def radosgw_relation(relid=None, unit=None): apt_install(packages=filter_installed_packages(['radosgw'])) if not unit: unit = remote_unit() + if is_unsupported_cmr(unit): + return # NOTE: radosgw needs some usage OSD storage, so defer key # provision until OSD units are detected. @@ -791,6 +810,8 @@ def rbd_mirror_relation(relid=None, unit=None, recurse=True): '- providing rbd-mirror client with keys') if not unit: unit = remote_unit() + if is_unsupported_cmr(unit): + return # handle broker requests first to get a updated pool map data = (handle_broker_request(relid, unit, recurse=recurse)) data.update({ @@ -829,6 +850,8 @@ def mds_relation_joined(relid=None, unit=None): rid=relid, unit=unit) if not unit: unit = remote_unit() + if is_unsupported_cmr(unit): + return public_addr = get_public_addr() data = { 'fsid': leader_get('fsid'), @@ -843,6 +866,8 @@ def mds_relation_joined(relid=None, unit=None): @hooks.hook('admin-relation-changed') @hooks.hook('admin-relation-joined') def admin_relation_joined(relid=None): + if is_unsupported_cmr(remote_unit()): + return if ceph.is_quorum(): name = relation_get('keyring-name') if name is None: @@ -865,7 +890,7 @@ def client_relation(relid=None, unit=None): if ready_for_service(): log('mon cluster in quorum and osds bootstrapped ' '- providing client with keys, processing broker requests') - service_name = hookenv.remote_service_name(relid=relid) + service_name = get_client_application_name(relid, unit) if not service_name: log('Unable to determine remote service name, deferring ' 'processing of broker requests') @@ -879,6 +904,8 @@ def client_relation(relid=None, unit=None): data['rbd-features'] = rbd_features if not unit: unit = remote_unit() + if is_unsupported_cmr(unit): + return data.update( handle_broker_request(relid, unit, add_legacy_response=True)) relation_set(relation_id=relid, @@ -988,9 +1015,45 @@ def update_nrpe_config(): VERSION_PACKAGE = 'ceph-common' +def is_cmr_unit(unit_name): + '''Is the remote unit connected via a cross model relation. + + :param unit_name: Name of unit + :type unit_name: str + :returns: Whether unit is connected via cmr + :rtype: bool + ''' + return unit_name.startswith('remote-') + + +def is_unsupported_cmr(unit_name): + '''If unit is connected via CMR and if that is supported. + + :param unit_name: Name of unit + :type unit_name: str + :returns: Whether unit is supported + :rtype: bool + ''' + unsupported = False + if unit_name and is_cmr_unit(unit_name): + unsupported = not config('permit-insecure-cmr') + if unsupported: + log("CMR detected and not supported", "ERROR") + return unsupported + + def assess_status(): '''Assess status of current unit''' application_version_set(get_upstream_version(VERSION_PACKAGE)) + if not config('permit-insecure-cmr'): + units = [unit + for rtype in relations() + for relid in relation_ids(reltype=rtype) + for unit in related_units(relid=relid) + if is_cmr_unit(unit)] + if units: + status_set("blocked", "Unsupported CMR relation") + return if is_unit_upgrading_set(): status_set("blocked", "Ready for do-release-upgrade and reboot. " @@ -1084,8 +1147,15 @@ def post_series_upgrade(): if __name__ == '__main__': - try: - hooks.execute(sys.argv) - except UnregisteredHookError as e: - log('Unknown hook {} - skipping.'.format(e)) + remote_block = False + remote_unit_name = remote_unit() + if remote_unit_name and is_cmr_unit(remote_unit_name): + remote_block = not config('permit-insecure-cmr') + if remote_block: + log("Not running hook, CMR detected and not supported", "ERROR") + else: + try: + hooks.execute(sys.argv) + except UnregisteredHookError as e: + log('Unknown hook {} - skipping.'.format(e)) assess_status() diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 50dd5e05..b590df41 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -290,6 +290,23 @@ def test_notify_mons(self, mock_relation_ids, mock_related_units, relation_settings={ 'nonce': 'FAKE-UUID'}) + @patch.object(ceph_hooks.hookenv, 'remote_service_name') + @patch.object(ceph_hooks, 'relation_get') + @patch.object(ceph_hooks, 'remote_unit') + def test_get_client_application_name(self, remote_unit, relation_get, + remote_service_name): + relation_get.return_value = { + 'application-name': 'glance'} + remote_unit.return_value = 'glance/0' + self.assertEqual( + ceph_hooks.get_client_application_name('rel:1', None), + 'glance') + relation_get.return_value = {} + remote_service_name.return_value = 'glance' + self.assertEqual( + ceph_hooks.get_client_application_name('rel:1', None), + 'glance') + @patch.object(ceph_hooks.ceph, 'list_pools') @patch.object(ceph_hooks, 'mgr_enable_module') @patch.object(ceph_hooks, 'emit_cephconf') @@ -414,11 +431,11 @@ def test_related_osd_multi_relation(self, @patch.object(ceph_hooks, 'config') @patch.object(ceph_hooks.ceph, 'get_named_key') @patch.object(ceph_hooks, 'get_public_addr') - @patch.object(ceph_hooks.hookenv, 'remote_service_name') + @patch.object(ceph_hooks, 'get_client_application_name') @patch.object(ceph_hooks, 'ready_for_service') def test_client_relation(self, _ready_for_service, - _remote_service_name, + _get_client_application_name, _get_public_addr, _get_named_key, _config, @@ -426,7 +443,7 @@ def test_client_relation(self, _relation_set, _get_rbd_features, _send_osd_settings): - _remote_service_name.return_value = 'glance' + _get_client_application_name.return_value = 'glance' config = copy.deepcopy(CHARM_CONFIG) _config.side_effect = lambda key: config[key] _handle_broker_request.return_value = {} diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index 01ba5c45..71b845eb 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -35,6 +35,7 @@ 'config', 'ceph', 'is_relation_made', + 'relations', 'relation_ids', 'relation_get', 'related_units', @@ -184,3 +185,19 @@ def test_no_bootstrap_not_set(self): hooks.assess_status() self.status_set.assert_called_with('blocked', mock.ANY) self.application_version_set.assert_called_with('10.2.2') + + def test_cmr_remote_unit(self): + self.test_config.set('permit-insecure-cmr', False) + self.relations.return_value = ['client'] + self.relation_ids.return_value = ['client:1'] + self.related_units.return_value = ['remote-1'] + hooks.assess_status() + self.status_set.assert_called_with( + 'blocked', + 'Unsupported CMR relation') + self.status_set.reset_mock() + self.test_config.set('permit-insecure-cmr', True) + hooks.assess_status() + self.assertFalse( + mock.call('blocked', 'Unsupported CMR relation') in + self.status_set.call_args_list) From 83d4a829ef8e8993cd5af6236d87f9836d2411a6 Mon Sep 17 00:00:00 2001 From: Dan Ardelean Date: Tue, 22 Sep 2020 14:30:19 +0000 Subject: [PATCH 2080/2699] Fix pool creation for single zone setups. Deprecate 'pool-prefix' charm config. Change-Id: I34079d8975d995ea958f219e0516a972d73319f7 Closes-Bug: #1856106 Co-Authored-By: Andrei Bacos --- ceph-radosgw/config.yaml | 15 ++++++------ ceph-radosgw/hooks/hooks.py | 21 +++++++++++++++-- ceph-radosgw/unit_tests/test_actions.py | 4 ++++ .../unit_tests/test_ceph_radosgw_context.py | 8 +++---- ceph-radosgw/unit_tests/test_hooks.py | 23 ++++++++++++++++--- 5 files changed, 55 insertions(+), 16 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index bb7d4ee3..f5ae44a9 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -77,12 +77,13 @@ options: type: string default: description: | - The rados gateway stores objects in many different pools. If you would - like to have multiple rados gateways each pointing to a separate set of - pools set this prefix. The charm will then set up a new set of pools. - If your prefix has a dash in it that will be used to split the prefix - into region and zone. Please read the documentation on federated rados - gateways for more information on region and zone. + DEPRECATED, use zone instead - pool name can be inherited from the zone config + option. The rados gateway stores objects in many different pools. If you + would like to have multiple rados gateways each pointing to a separate + set of pools set this prefix. The charm will then set up a new set of pools. + If your prefix has a dash in it that will be used to split the prefix into + region and zone. Please read the documentation on federated rados gateways + for more information on region and zone. restrict-ceph-pools: type: boolean default: False @@ -414,7 +415,7 @@ options: Name of RADOS Gateway Zone Group to create for multi-site replication. zone: type: string - default: + default: default description: | Name of RADOS Gateway Zone to create for multi-site replication. This option must be specific to the local site e.g. us-west or us-east. diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 2e466142..4f47cebf 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -259,7 +259,6 @@ def _mon_relation(): relation_set(relation_id=rid, key_name=key_name) try: - # NOTE: prefer zone name if in use over pool-prefix. rq = ceph.get_create_rgw_pools_rq( prefix=config('zone') or config('pool-prefix')) except ValueError as e: @@ -271,6 +270,7 @@ def _mon_relation(): 'configuration?: "{}"'.format(str(e)), level=DEBUG) return + if is_request_complete(rq, relation='mon'): log('Broker request complete', level=DEBUG) CONFIGS.write_all() @@ -313,7 +313,24 @@ def _mon_relation(): .format(service_name()), level=DEBUG) service_resume(service_name()) - process_multisite_relations() + if multisite_deployment(): + process_multisite_relations() + elif is_leader(): + # In a non multi-site deployment create the + # zone using the default zonegroup and restart the service + internal_url = '{}:{}'.format( + canonical_url(CONFIGS, INTERNAL), + listen_port(), + ) + endpoints = [internal_url] + zonegroup = 'default' + zone = config('zone') + if zone not in multisite.list_zones(): + multisite.create_zone(zone, + endpoints=endpoints, + default=True, master=True, + zonegroup=zonegroup) + service_restart(service_name()) else: send_request_if_needed(rq, relation='mon') _mon_relation() diff --git a/ceph-radosgw/unit_tests/test_actions.py b/ceph-radosgw/unit_tests/test_actions.py index d9eb02c8..b88cd6a0 100644 --- a/ceph-radosgw/unit_tests/test_actions.py +++ b/ceph-radosgw/unit_tests/test_actions.py @@ -103,6 +103,7 @@ def test_promote(self): self.multisite.update_period.assert_called_once_with() def test_promote_unconfigured(self): + self.test_config.set('zone', None) actions.promote([]) self.action_fail.assert_called_once() @@ -116,6 +117,7 @@ def test_readonly(self): self.multisite.update_period.assert_called_once_with() def test_readonly_unconfigured(self): + self.test_config.set('zone', None) actions.readonly([]) self.action_fail.assert_called_once() @@ -129,6 +131,7 @@ def test_readwrite(self): self.multisite.update_period.assert_called_once_with() def test_readwrite_unconfigured(self): + self.test_config.set('zone', None) actions.readwrite([]) self.action_fail.assert_called_once() @@ -138,5 +141,6 @@ def test_tidydefaults(self): self.multisite.tidy_defaults.assert_called_once_with() def test_tidydefaults_unconfigured(self): + self.test_config.set('zone', None) actions.tidydefaults([]) self.action_fail.assert_called_once() diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 18096f11..cb0c039b 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -385,7 +385,7 @@ def _relation_get(attr, unit, rid): 'port': 70, 'client_radosgw_gateway': {'rgw init timeout': 60}, 'ipv6': False, - 'rgw_zone': None, + 'rgw_zone': 'default', 'fsid': 'testfsid', } self.assertEqual(expect, mon_ctxt()) @@ -433,7 +433,7 @@ def _relation_get(attr, unit, rid): 'port': 70, 'client_radosgw_gateway': {'rgw init timeout': 60}, 'ipv6': False, - 'rgw_zone': None, + 'rgw_zone': 'default', 'fsid': 'testfsid', } self.assertEqual(expect, mon_ctxt()) @@ -490,7 +490,7 @@ def _relation_get(attr, unit, rid): 'port': 70, 'client_radosgw_gateway': {'rgw init timeout': 60}, 'ipv6': False, - 'rgw_zone': None, + 'rgw_zone': 'default', 'fsid': 'testfsid', } self.assertEqual(expect, mon_ctxt()) @@ -529,7 +529,7 @@ def _relation_get(attr, unit, rid): 'port': 70, 'client_radosgw_gateway': {'rgw init timeout': 60}, 'ipv6': False, - 'rgw_zone': None, + 'rgw_zone': 'default', 'fsid': 'testfsid', } self.assertEqual(expect, mon_ctxt()) diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index d31bcfec..0bad5884 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -68,6 +68,7 @@ 'filter_missing_packages', 'ceph_utils', 'multisite_deployment', + 'multisite', ] @@ -189,10 +190,15 @@ def test_config_changed(self, update_nrpe_config, mock_certs_joined): @patch.object(ceph_hooks, 'is_request_complete', lambda *args, **kwargs: True) - def test_mon_relation(self): + @patch.object(ceph_hooks, 'is_leader') + @patch('charmhelpers.contrib.openstack.ip.resolve_address') + @patch('charmhelpers.contrib.openstack.ip.config') + def test_mon_relation(self, _config, _resolve_address, is_leader): _ceph = self.patch('ceph') _ceph.import_radosgw_key.return_value = True + is_leader.return_value = True self.relation_get.return_value = 'seckey' + self.multisite.list_zones.return_value = [] self.socket.gethostname.return_value = 'testinghostname' ceph_hooks.mon_relation() self.relation_set.assert_not_called() @@ -203,9 +209,14 @@ def test_mon_relation(self): @patch.object(ceph_hooks, 'is_request_complete', lambda *args, **kwargs: True) - def test_mon_relation_request_key(self): + @patch.object(ceph_hooks, 'is_leader') + @patch('charmhelpers.contrib.openstack.ip.resolve_address') + @patch('charmhelpers.contrib.openstack.ip.config') + def test_mon_relation_request_key(self, _config, + _resolve_address, is_leader): _ceph = self.patch('ceph') _ceph.import_radosgw_key.return_value = True + is_leader.return_value = True self.relation_get.return_value = 'seckey' self.socket.gethostname.return_value = 'testinghostname' self.request_per_unit_key.return_value = True @@ -221,10 +232,15 @@ def test_mon_relation_request_key(self): @patch.object(ceph_hooks, 'is_request_complete', lambda *args, **kwargs: True) - def test_mon_relation_nokey(self): + @patch.object(ceph_hooks, 'is_leader') + @patch('charmhelpers.contrib.openstack.ip.resolve_address') + @patch('charmhelpers.contrib.openstack.ip.config') + def test_mon_relation_nokey(self, _config, + _resolve_address, is_leader): _ceph = self.patch('ceph') _ceph.import_radosgw_key.return_value = False self.relation_get.return_value = None + is_leader.return_value = True ceph_hooks.mon_relation() self.assertFalse(_ceph.import_radosgw_key.called) self.service_resume.assert_not_called() @@ -493,6 +509,7 @@ class MiscMultisiteTests(CharmTestCase): 'slave_relation_changed', 'service_restart', 'service_name', + 'multisite' ] _relation_ids = { From d7147f4a3b1ec46e02825848d4f9726075ce4639 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 6 Oct 2020 13:40:18 +0200 Subject: [PATCH 2081/2699] Add BlueStore Compression support Ceph Bluestore Compression is a post-deploy configurable option and allowing to update the broker request is required. Drop code that gates the sending of pool broker request, the original issue has been fixed in the interface code and it is now safe to call multiple times. Fold Erasure Coding test into regular bundles from Mimic and up to allow testing both EC and BlueStore Compression at the same time without test bundle explosion. Unpin flake8 Change-Id: I9b529e61a8832a62f4db12cab8f352d468c8a3ad --- ceph-fs/src/config.yaml | 66 ++++++ ceph-fs/src/reactive/ceph_fs.py | 75 ++++-- ceph-fs/src/tests/bundles/bionic-rocky.yaml | 7 +- ceph-fs/src/tests/bundles/bionic-stein.yaml | 5 +- ceph-fs/src/tests/bundles/bionic-train.yaml | 5 +- ceph-fs/src/tests/bundles/bionic-ussuri.yaml | 5 +- .../src/tests/bundles/focal-ussuri-ec.yaml | 222 ------------------ ceph-fs/src/tests/bundles/focal-ussuri.yaml | 5 +- ceph-fs/src/tests/bundles/focal-victoria.yaml | 5 +- .../src/tests/bundles/groovy-victoria.yaml | 5 +- ceph-fs/src/tests/tests.yaml | 27 ++- ceph-fs/unit_tests/test_reactive_ceph_fs.py | 3 - 12 files changed, 169 insertions(+), 261 deletions(-) delete mode 100644 ceph-fs/src/tests/bundles/focal-ussuri-ec.yaml diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index f8488c4a..675c9b6e 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -192,3 +192,69 @@ options: Device class from CRUSH map to use for placement groups for erasure profile - valid values: ssd, hdd or nvme (or leave unset to not use a device class). + bluestore-compression-algorithm: + type: string + default: + description: | + Compressor to use (if any) for pools requested by this charm. + . + NOTE: The ceph-osd charm sets a global default for this value (defaults + to 'lz4' unless configured by the end user) which will be used unless + specified for individual pools. + bluestore-compression-mode: + type: string + default: + description: | + Policy for using compression on pools requested by this charm. + . + 'none' means never use compression. + 'passive' means use compression when clients hint that data is + compressible. + 'aggressive' means use compression unless clients hint that + data is not compressible. + 'force' means use compression under all circumstances even if the clients + hint that the data is not compressible. + bluestore-compression-required-ratio: + type: float + default: + description: | + The ratio of the size of the data chunk after compression relative to the + original size must be at least this small in order to store the + compressed version on pools requested by this charm. + bluestore-compression-min-blob-size: + type: int + default: + description: | + Chunks smaller than this are never compressed on pools requested by + this charm. + bluestore-compression-min-blob-size-hdd: + type: int + default: + description: | + Value of bluestore compression min blob size for rotational media on + pools requested by this charm. + bluestore-compression-min-blob-size-ssd: + type: int + default: + description: | + Value of bluestore compression min blob size for solid state media on + pools requested by this charm. + bluestore-compression-max-blob-size: + type: int + default: + description: | + Chunks larger than this are broken into smaller blobs sizing bluestore + compression max blob size before being compressed on pools requested by + this charm. + bluestore-compression-max-blob-size-hdd: + type: int + default: + description: | + Value of bluestore compression max blob size for rotational media on + pools requested by this charm. + bluestore-compression-max-blob-size-ssd: + type: int + default: + description: | + Value of bluestore compression max blob size for solid state media on + pools requested by this charm. diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index fcda7053..a9bbe94f 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -13,6 +13,9 @@ # limitations under the License. from charms import reactive + +import charmhelpers.core as ch_core + from charmhelpers.core.hookenv import ( service_name, config) @@ -50,7 +53,6 @@ def config_changed(): cephfs_charm.assess_status() -@reactive.when_not('ceph.create_pool.req.sent') @reactive.when('ceph-mds.connected') def storage_ceph_connected(ceph): ceph_mds = reactive.endpoint_from_flag('ceph-mds.connected') @@ -78,6 +80,18 @@ def storage_ceph_connected(ceph): weight = weight - metadata_weight extra_pools = [] + bluestore_compression = None + with charm.provide_charm_instance() as cephfs_charm: + # TODO: move this whole method into the charm class and add to the + # common pool creation logic in charms.openstack. For now we reuse + # the common bluestore compression wrapper here. + try: + bluestore_compression = cephfs_charm._get_bluestore_compression() + except ValueError as e: + ch_core.hookenv.log('Invalid value(s) provided for Ceph BlueStore ' + 'compression: "{}"' + .format(str(e))) + if config('pool-type') == 'erasure-coded': # General EC plugin config plugin = config('ec-profile-plugin') @@ -115,18 +129,34 @@ def storage_ceph_connected(ceph): # Create EC data pool ec_pool_name = 'ec_{}'.format(pool_name) - ceph_mds.create_erasure_pool( - name=ec_pool_name, - erasure_profile=profile_name, - weight=ec_pool_weight, - app_name=ceph_mds.ceph_pool_app_name, - allow_ec_overwrites=True - ) - ceph_mds.create_replicated_pool( - name=pool_name, - weight=weight, - app_name=ceph_mds.ceph_pool_app_name - ) + + # NOTE(fnordahl): once we deprecate Python 3.5 support we can do + # the unpacking of the BlueStore compression arguments as part of + # the function arguments. Until then we need to build the dict + # prior to the function call. + kwargs = { + 'name': ec_pool_name, + 'erasure_profile': profile_name, + 'weight': ec_pool_weight, + 'app_name': ceph_mds.ceph_pool_app_name, + 'allow_ec_overwrites': True, + } + if bluestore_compression: + kwargs.update(bluestore_compression) + ceph_mds.create_erasure_pool(**kwargs) + + # NOTE(fnordahl): once we deprecate Python 3.5 support we can do + # the unpacking of the BlueStore compression arguments as part of + # the function arguments. Until then we need to build the dict + # prior to the function call. + kwargs = { + 'name': pool_name, + 'weight': weight, + 'app_name': ceph_mds.ceph_pool_app_name, + } + if bluestore_compression: + kwargs.update(bluestore_compression) + ceph_mds.create_replicated_pool(**kwargs) ceph_mds.create_replicated_pool( name=metadata_pool_name, weight=metadata_weight, @@ -134,15 +164,22 @@ def storage_ceph_connected(ceph): ) extra_pools = [ec_pool_name] else: - ceph_mds.create_replicated_pool( - name=pool_name, - replicas=replicas, - weight=weight, - app_name=ceph_mds.ceph_pool_app_name) + # NOTE(fnordahl): once we deprecate Python 3.5 support we can do + # the unpacking of the BlueStore compression arguments as part of + # the function arguments. Until then we need to build the dict + # prior to the function call. + kwargs = { + 'name': pool_name, + 'replicas': replicas, + 'weight': weight, + 'app_name': ceph_mds.ceph_pool_app_name, + } + if bluestore_compression: + kwargs.update(bluestore_compression) + ceph_mds.create_replicated_pool(**kwargs) ceph_mds.create_replicated_pool( name=metadata_pool_name, replicas=replicas, weight=metadata_weight, app_name=ceph_mds.ceph_pool_app_name) ceph_mds.request_cephfs(service, extra_pools=extra_pools) - reactive.set_state('ceph.create_pool.req.sent') diff --git a/ceph-fs/src/tests/bundles/bionic-rocky.yaml b/ceph-fs/src/tests/bundles/bionic-rocky.yaml index 90c488c6..222a1aea 100644 --- a/ceph-fs/src/tests/bundles/bionic-rocky.yaml +++ b/ceph-fs/src/tests/bundles/bionic-rocky.yaml @@ -6,9 +6,12 @@ applications: num_units: 1 options: source: cloud:bionic-rocky + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 + num_units: 6 storage: osd-devices: 'cinder,10G' options: @@ -124,4 +127,4 @@ relations: - - 'neutron-openvswitch:amqp' - 'rabbitmq-server:amqp' - - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' \ No newline at end of file + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/bionic-stein.yaml b/ceph-fs/src/tests/bundles/bionic-stein.yaml index 3b05d7c6..2e59c83e 100644 --- a/ceph-fs/src/tests/bundles/bionic-stein.yaml +++ b/ceph-fs/src/tests/bundles/bionic-stein.yaml @@ -8,9 +8,12 @@ applications: num_units: 1 options: source: *source + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 + num_units: 6 storage: osd-devices: 'cinder,10G' options: diff --git a/ceph-fs/src/tests/bundles/bionic-train.yaml b/ceph-fs/src/tests/bundles/bionic-train.yaml index da7feee9..3dfe9e62 100644 --- a/ceph-fs/src/tests/bundles/bionic-train.yaml +++ b/ceph-fs/src/tests/bundles/bionic-train.yaml @@ -6,9 +6,12 @@ applications: num_units: 1 options: source: cloud:bionic-train + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 + num_units: 6 storage: osd-devices: 'cinder,10G' options: diff --git a/ceph-fs/src/tests/bundles/bionic-ussuri.yaml b/ceph-fs/src/tests/bundles/bionic-ussuri.yaml index 5eada6c7..b479d667 100644 --- a/ceph-fs/src/tests/bundles/bionic-ussuri.yaml +++ b/ceph-fs/src/tests/bundles/bionic-ussuri.yaml @@ -5,9 +5,12 @@ applications: num_units: 1 options: source: cloud:bionic-ussuri + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 + num_units: 6 storage: osd-devices: 'cinder,10G' options: diff --git a/ceph-fs/src/tests/bundles/focal-ussuri-ec.yaml b/ceph-fs/src/tests/bundles/focal-ussuri-ec.yaml deleted file mode 100644 index 39d9fed9..00000000 --- a/ceph-fs/src/tests/bundles/focal-ussuri-ec.yaml +++ /dev/null @@ -1,222 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: &series focal - -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - '3': - - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - neutron-api-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-fs: - charm: ceph-fs - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '3' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: *openstack-origin - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: *openstack-origin - - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: *openstack-origin - - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - manage-neutron-plugin-legacy-mode: true - neutron-plugin: ovs - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: *openstack-origin - - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: *openstack-origin - -relations: - - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'neutron-api:shared-db' - - 'neutron-api-mysql-router:shared-db' - - - 'neutron-api-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' - - - - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' - - - - 'neutron-api:identity-service' - - 'keystone:identity-service' - - - - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' - - - - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/focal-ussuri.yaml b/ceph-fs/src/tests/bundles/focal-ussuri.yaml index 7f348f1b..39d9fed9 100644 --- a/ceph-fs/src/tests/bundles/focal-ussuri.yaml +++ b/ceph-fs/src/tests/bundles/focal-ussuri.yaml @@ -41,12 +41,15 @@ applications: num_units: 1 options: source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 to: - '3' ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 + num_units: 6 storage: osd-devices: 'cinder,10G' options: diff --git a/ceph-fs/src/tests/bundles/focal-victoria.yaml b/ceph-fs/src/tests/bundles/focal-victoria.yaml index d9309b36..b23a8d52 100644 --- a/ceph-fs/src/tests/bundles/focal-victoria.yaml +++ b/ceph-fs/src/tests/bundles/focal-victoria.yaml @@ -41,12 +41,15 @@ applications: num_units: 1 options: source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 to: - '3' ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 + num_units: 6 storage: osd-devices: 'cinder,10G' options: diff --git a/ceph-fs/src/tests/bundles/groovy-victoria.yaml b/ceph-fs/src/tests/bundles/groovy-victoria.yaml index 4cd78fd4..bdc91e12 100644 --- a/ceph-fs/src/tests/bundles/groovy-victoria.yaml +++ b/ceph-fs/src/tests/bundles/groovy-victoria.yaml @@ -41,12 +41,15 @@ applications: num_units: 1 options: source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 to: - '3' ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 + num_units: 6 storage: osd-devices: 'cinder,10G' options: diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index dd589508..9a379eaf 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -1,12 +1,11 @@ charm_name: ceph-fs gate_bundles: - - focal-ussuri-ec - - focal-victoria - - focal-ussuri - - bionic-ussuri - - bionic-train - - bionic-stein - - bionic-rocky + - bluestore-compression: focal-victoria + - bluestore-compression: focal-ussuri + - bluestore-compression: bionic-ussuri + - bluestore-compression: bionic-train + - bluestore-compression: bionic-stein + - bluestore-compression: bionic-rocky - bionic-queens - xenial-queens # Xenial-pike is missing because of @@ -14,18 +13,28 @@ gate_bundles: - xenial-ocata - xenial-mitaka smoke_bundles: - - bionic-stein + - bluestore-compression: bionic-stein dev_bundles: - - groovy-victoria + - bluestore-compression: groovy-victoria configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network - zaza.openstack.charm_tests.nova.setup.create_flavors - zaza.openstack.charm_tests.nova.setup.manage_ssh_key - zaza.openstack.charm_tests.keystone.setup.add_demo_user + - bluestore-compression: + - zaza.openstack.charm_tests.glance.setup.add_lts_image + - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network + - zaza.openstack.charm_tests.nova.setup.create_flavors + - zaza.openstack.charm_tests.nova.setup.manage_ssh_key + - zaza.openstack.charm_tests.keystone.setup.add_demo_user tests: - zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests - zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest + - bluestore-compression: + - zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests + - zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest + - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation tests_options: force_deploy: - groovy-victoria diff --git a/ceph-fs/unit_tests/test_reactive_ceph_fs.py b/ceph-fs/unit_tests/test_reactive_ceph_fs.py index 14999591..8b9be2a8 100644 --- a/ceph-fs/unit_tests/test_reactive_ceph_fs.py +++ b/ceph-fs/unit_tests/test_reactive_ceph_fs.py @@ -35,9 +35,6 @@ def test_hooks(self): 'config_changed': ('ceph-mds.pools.available',), 'storage_ceph_connected': ('ceph-mds.connected',), }, - 'when_not': { - 'storage_ceph_connected': ('ceph.create_pool.req.sent',), - }, 'when_none': { 'config_changed': ('charm.paused', 'run-default-update-status',), From 07918586e22005560b603665f1bd41b9ff7ce62d Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sat, 26 Sep 2020 18:27:01 +0100 Subject: [PATCH 2082/2699] Sync libraries & common files prior to freeze * charm-helpers sync for classic charms * charms.ceph sync for ceph charms * rebuild for reactive charms * sync tox.ini files as needed * sync requirements.txt files to sync to standard Add master branch charm-helpers to wheelhouse.txt to support bluestore changes in charms.openstack. Change-Id: I1cb0d616a3d3033d6948677326062e0eb785c2a0 --- ceph-rbd-mirror/rebuild | 2 +- ceph-rbd-mirror/requirements.txt | 1 + ceph-rbd-mirror/src/wheelhouse.txt | 1 + ceph-rbd-mirror/test-requirements.txt | 3 ++- ceph-rbd-mirror/tox.ini | 2 +- 5 files changed, 6 insertions(+), 3 deletions(-) diff --git a/ceph-rbd-mirror/rebuild b/ceph-rbd-mirror/rebuild index 494b96dc..d1de7d8f 100644 --- a/ceph-rbd-mirror/rebuild +++ b/ceph-rbd-mirror/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -d9a6ecf4-d02c-11ea-ba04-8b57253ede7e +8968bf8c-0016-11eb-ba17-fbdeedded6c1 diff --git a/ceph-rbd-mirror/requirements.txt b/ceph-rbd-mirror/requirements.txt index 5f2fff3a..aaaa3e03 100644 --- a/ceph-rbd-mirror/requirements.txt +++ b/ceph-rbd-mirror/requirements.txt @@ -3,6 +3,7 @@ # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools # +setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 # Build requirements charm-tools>=2.4.4 # importlib-resources 1.1.0 removed Python 3.5 support diff --git a/ceph-rbd-mirror/src/wheelhouse.txt b/ceph-rbd-mirror/src/wheelhouse.txt index a4d92cc0..523142ae 100644 --- a/ceph-rbd-mirror/src/wheelhouse.txt +++ b/ceph-rbd-mirror/src/wheelhouse.txt @@ -1 +1,2 @@ +git+https://github.com/juju/charm-helpers.git#egg=charmhelpers psutil diff --git a/ceph-rbd-mirror/test-requirements.txt b/ceph-rbd-mirror/test-requirements.txt index 0ab97f6e..d078e270 100644 --- a/ceph-rbd-mirror/test-requirements.txt +++ b/ceph-rbd-mirror/test-requirements.txt @@ -3,8 +3,9 @@ # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools # +setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 # Lint and unit test requirements -flake8>=2.2.4,<=2.4.1 +flake8>=2.2.4 stestr>=2.2.0 requests>=2.18.4 charms.reactive diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index afd48f02..c91922e8 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -94,4 +94,4 @@ commands = {posargs} [flake8] # E402 ignore necessary for path append before sys module import in actions -ignore = E402,W504 +ignore = E402,W503,W504 From 44fce3814f4aac700f334c181aafe7b0b7438947 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Thu, 8 Oct 2020 22:01:35 +0200 Subject: [PATCH 2083/2699] Do not execute handlers in update-status hook Change-Id: I9093f83bd5ef642ad5da7d56543254ca9804b80b --- .../src/reactive/ceph_rbd_mirror_handlers.py | 37 +++++++++++-------- .../test_ceph_rbd_mirror_handlers.py | 13 +++++-- 2 files changed, 32 insertions(+), 18 deletions(-) diff --git a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py index 0b4fd58c..1a145eec 100644 --- a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py @@ -30,8 +30,11 @@ 'upgrade-charm') -@reactive.when_all('ceph-local.connected', 'ceph-remote.connected') -@reactive.when_not_all('ceph-local.available', 'ceph-remote.available') +@reactive.when_none('is-update-status-hook', + 'ceph-local.available', + 'ceph-remote.available') +@reactive.when('ceph-local.connected', + 'ceph-remote.connected') def request_keys(): with charm.provide_charm_instance() as charm_instance: for flag in ('ceph-local.connected', 'ceph-remote.connected'): @@ -43,9 +46,10 @@ def request_keys(): charm_instance.assess_status() -@reactive.when('config.changed') -@reactive.when('ceph-local.available') -@reactive.when('ceph-remote.available') +@reactive.when_none('is-update-status-hook') +@reactive.when('config.changed', + 'ceph-local.available', + 'ceph-remote.available') def config_changed(): with charm.provide_charm_instance() as charm_instance: charm_instance.upgrade_if_available([ @@ -55,8 +59,9 @@ def config_changed(): charm_instance.assess_status() -@reactive.when('ceph-local.available') -@reactive.when('ceph-remote.available') +@reactive.when_none('is-update-status-hook') +@reactive.when('ceph-local.available', + 'ceph-remote.available') def render_stuff(*args): with charm.provide_charm_instance() as charm_instance: for endpoint in args: @@ -79,10 +84,11 @@ def render_stuff(*args): reactive.set_flag('config.rendered') -@reactive.when('leadership.is_leader') -@reactive.when('refresh.pools') -@reactive.when('ceph-local.available') -@reactive.when('ceph-remote.available') +@reactive.when_none('is-update-status-hook') +@reactive.when('leadership.is_leader', + 'refresh.pools', + 'ceph-local.available', + 'ceph-remote.available') def refresh_pools(): for endpoint in 'ceph-local', 'ceph-remote': endpoint = reactive.endpoint_from_name(endpoint) @@ -90,10 +96,11 @@ def refresh_pools(): reactive.clear_flag('refresh.pools') -@reactive.when('leadership.is_leader') -@reactive.when('config.rendered') -@reactive.when('ceph-local.available') -@reactive.when('ceph-remote.available') +@reactive.when_none('is-update-status-hook') +@reactive.when('leadership.is_leader', + 'config.rendered', + 'ceph-local.available', + 'ceph-remote.available') def configure_pools(): local = reactive.endpoint_from_flag('ceph-local.available') remote = reactive.endpoint_from_flag('ceph-remote.available') diff --git a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py index 0b2af68c..0599b63e 100644 --- a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py @@ -52,15 +52,22 @@ def test_hooks(self): 'ceph-local.available', 'ceph-remote.available', ), - }, - 'when_all': { 'request_keys': ( 'ceph-local.connected', 'ceph-remote.connected', ), }, - 'when_not_all': { + 'when_none': { + 'config_changed': ( + 'is-update-status-hook',), + 'render_stuff': ( + 'is-update-status-hook',), + 'refresh_pools': ( + 'is-update-status-hook',), + 'configure_pools': ( + 'is-update-status-hook',), 'request_keys': ( + 'is-update-status-hook', 'ceph-local.available', 'ceph-remote.available', ), From 655fac89a959877ce8731321ce9bac3a38a6d2bc Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Fri, 2 Oct 2020 14:36:20 -0400 Subject: [PATCH 2084/2699] Update README for Ceph EC pools This updates the README for erasure coded Ceph pools for the case of Ceph-based Object storage. The new text should be as similar as possible for all the charms that support configuration options for EC pools. See the below review for the first of these charms whose README has been updated. https://review.opendev.org/#/c/749824/ Remove section on RGW multisite replication as it is already in the CDG. The charm now points there. Add basic README template sections (Actions, Bugs, Configuration). Standardise Overview, Deployment, and Network spaces sections. Improve Access and Keystone integration sections. Change-Id: I3839c1018b9bdf0d6712d3fb2e9f95b633591615 --- ceph-radosgw/README.md | 310 ++++++++++++++++++++--------------------- 1 file changed, 155 insertions(+), 155 deletions(-) diff --git a/ceph-radosgw/README.md b/ceph-radosgw/README.md index 5c08423e..f207a35a 100644 --- a/ceph-radosgw/README.md +++ b/ceph-radosgw/README.md @@ -1,225 +1,225 @@ # Overview -Ceph is a distributed storage and network file system designed to provide -excellent performance, reliability and scalability. +[Ceph][ceph-upstream] is a unified, distributed storage system designed for +excellent performance, reliability, and scalability. -This charm deploys the RADOS Gateway, a S3 and Swift compatible HTTP gateway -for online object storage on-top of a ceph cluster. +The ceph-radosgw charm deploys the RADOS Gateway, a S3 and Swift compatible +HTTP gateway. The deployment is done within the context of an existing Ceph +cluster. -## Usage +# Usage -In order to use this charm, it is assumed that you have already deployed a ceph -storage cluster using the 'ceph' charm with something like this:: +## Configuration - juju deploy -n 3 --config ceph.yaml ceph +This section covers common and/or important configuration options. See file +`config.yaml` for the full list of options, along with their descriptions and +default values. See the [Juju documentation][juju-docs-config-apps] for details +on configuring applications. -To deploy the RADOS gateway simple do:: +#### `pool-type` - juju deploy ceph-radosgw - juju add-relation ceph-radosgw ceph - -You can then directly access the RADOS gateway by exposing the service:: - - juju expose ceph-radosgw - -The gateway can be accessed over port 80 (as show in juju status exposed -ports). - -## Access - -Note that you will need to login to one of the service units supporting the -ceph charm to generate some access credentials:: - - juju ssh ceph/0 \ - 'sudo radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph"' +The `pool-type` option dictates the storage pool type. See section 'Ceph pool +type' for more information. -For security reasons the ceph-radosgw charm is not set up with appropriate -permissions to administer the ceph cluster. +#### `source` -## Keystone Integration +The `source` option states the software sources. A common value is an OpenStack +UCA release (e.g. 'cloud:xenial-queens' or 'cloud:bionic-ussuri'). See [Ceph +and the UCA][cloud-archive-ceph]. The underlying host's existing apt sources +will be used if this option is not specified (this behaviour can be explicitly +chosen by using the value of 'distro'). -Ceph >= 0.55 integrates with Openstack Keystone for authentication of Swift requests. +## Ceph pool type -This is enabled by relating the ceph-radosgw service with keystone:: +Ceph storage pools can be configured to ensure data resiliency either through +replication or by erasure coding. This charm supports both types via the +`pool-type` configuration option, which can take on the values of 'replicated' +and 'erasure-coded'. The default value is 'replicated'. - juju deploy keystone - juju add-relation keystone ceph-radosgw +For this charm, the pool type will be associated with Object storage. -If you try to relate the radosgw to keystone with an earlier version of ceph the hook -will error out to let you know. - -## High availability - -When more than one unit is deployed with the [hacluster][hacluster-charm] -application the charm will bring up an HA active/active cluster. - -There are two mutually exclusive high availability options: using virtual IP(s) -or DNS. In both cases the hacluster subordinate charm is used to provide the -Corosync and Pacemaker backend HA functionality. +> **Note**: Erasure-coded pools are supported starting with Ceph Luminous. -See [OpenStack high availability][cdg-ha-apps] in the [OpenStack Charms -Deployment Guide][cdg] for details. +### Replicated pools -## Network Space support +Replicated pools use a simple replication strategy in which each written object +is copied, in full, to multiple OSDs within the cluster. -This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above. +The `ceph-osd-replication-count` option sets the replica count for any object +stored within the rgw pools. Increasing this value increases data resilience at +the cost of consuming more real storage in the Ceph cluster. The default value +is '3'. -API endpoints can be bound to distinct network spaces supporting the network separation of public, internal and admin endpoints. +> **Important**: The `ceph-osd-replication-count` option must be set prior to + adding the relation to the ceph-mon application. Otherwise, the pool's + configuration will need to be set by interfacing with the cluster directly. -To use this feature, use the --bind option when deploying the charm: +### Erasure coded pools - juju deploy ceph-radosgw --bind "public=public-space internal=internal-space admin=admin-space" +Erasure coded pools use a technique that allows for the same resiliency as +replicated pools, yet reduces the amount of space required. Written data is +split into data chunks and error correction chunks, which are both distributed +throughout the cluster. -alternatively these can also be provided as part of a juju native bundle configuration: +> **Note**: Erasure coded pools require more memory and CPU cycles than + replicated pools do. - ceph-radosgw: - charm: cs:ceph-radosgw - num_units: 1 - bindings: - public: public-space - admin: admin-space - internal: internal-space +When using erasure coded pools for Object storage multiple pools will be +created: one erasure coded pool ('rgw.buckets.data' for storing actual RGW +data) and several replicated pools (for storing RGW omap metadata). The +`ceph-osd-replication-count` configuration option only applies to the metadata +(replicated) pools. -NOTE: Spaces must be configured in the underlying provider prior to attempting to use them. +Erasure coded pools can be configured via options whose names begin with the +`ec-` prefix. -NOTE: Existing deployments using os-\*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. +> **Important**: It is strongly recommended to tailor the `ec-profile-k` and + `ec-profile-m` options to the needs of the given environment. These latter + options have default values of '1' and '2' respectively, which result in the + same space requirements as those of a replicated pool. -## Multi-Site replication +See [Ceph Erasure Coding][cdg-ceph-erasure-coding] in the [OpenStack Charms +Deployment Guide][cdg] for more information. -### Overview +## Deployment -This charm supports configuration of native replication between Ceph RADOS -gateway deployments. +To deploy a single RADOS gateway node within an existing Ceph cluster: -This is supported both within a single model and between different models -using cross-model relations. + juju deploy ceph-radosgw + juju add-relation ceph-radosgw:mon ceph-mon:radosgw -By default either ceph-radosgw deployment will accept write operations. +Expose the service: -### Deployment + juju expose ceph-radosgw -NOTE: example bundles for the us-west and us-east models can be found -in the bundles subdirectory of the ceph-radosgw charm. +> **Note**: The `expose` command is only required if the backing cloud blocks + traffic by default. In general, MAAS is the only cloud type that does not + employ firewalling. -NOTE: switching from a standalone deployment to a multi-site replicated -deployment is not supported. +The gateway can be accessed over port 80 (as per `juju status ceph-radosgw` +output). -To deploy in this configuration ensure that the following configuration -options are set on the ceph-radosgw charm deployments - in this example -rgw-us-east and rgw-us-west are both instances of the ceph-radosgw charm: +## Multi-site replication - rgw-us-east: - realm: replicated - zonegroup: us - zone: us-east - rgw-us-west: - realm: replicated - zonegroup: us - zone: us-west +The charm supports native replication between multiple RADOS Gateway +deployments. This is documented under [Ceph RADOS Gateway multisite +replication][cdg-ceph-radosgw-multisite] in the [OpenStack Charms Deployment +Guide][cdg]. -When deploying with this configuration the ceph-radosgw applications will -deploy into a blocked state until the master/slave (cross-model) relation -is added. +## Tenant namespacing -Typically each ceph-radosgw deployment will be associated with a separate -ceph cluster at different physical locations - in this example the deployments -are in different models ('us-east' and 'us-west'). +By default, Ceph RADOS Gateway puts all tenant buckets into the same global +namespace, disallowing multiple tenants to have buckets with the same name. +Tenant namespacing can be enabled in this charm by deploying with configuration +like: -One ceph-radosgw application acts as the initial master for the deployment - -setup the master relation endpoint as the provider of the offer for the -cross-model relation: + ceph-radosgw: + charm: cs:ceph-radosgw + num_units: 1 + options: + namespace-tenants: True - juju offer -m us-east rgw-us-east:master +Enabling tenant namespacing will place all tenant buckets into their own +namespace under their tenant id, as well as adding the tenant's ID parameter to +the Keystone endpoint registration to allow seamless integration with OpenStack. +Tenant namespacing cannot be toggled on in an existing installation as it will +remove tenant access to existing buckets. Toggling this option on an already +deployed RADOS Gateway will have no effect. -The cross-model relation offer can then be consumed in the other model and -related to the slave ceph-radosgw application: +## Access - juju consume -m us-west admin/us-east.rgw-us-east - juju add-relation -m us-west rgw-us-west:slave rgw-us-east:master +For security reasons the charm is not designed to administer the Ceph cluster. +A user (e.g. 'ubuntu') for the Ceph Object Gateway service will need to be +created manually: -Once the relation has been added the realm, zonegroup and zone configuration -will be created in the master deployment and then synced to the slave -deployment. + juju ssh ceph-mon/0 'sudo radosgw-admin user create \ + --uid="ubuntu" --display-name="Charmed Ceph"' -The current sync status can be validated from either model: +## Keystone integration (Swift) - juju ssh -m us-east ceph-mon/0 - sudo radosgw-admin sync status - realm 142eb39c-67c4-42b3-9116-1f4ffca23964 (replicated) - zonegroup 7b69f059-425b-44f5-8a21-ade63c2034bd (us) - zone 4ee3bc39-b526-4ac9-a233-64ebeacc4574 (us-east) - metadata sync no sync (zone is master) - data sync source: db876cf0-62a8-4b95-88f4-d0f543136a07 (us-west) - syncing - full sync: 0/128 shards - incremental sync: 128/128 shards - data is caught up with source +Ceph RGW supports Keystone authentication of Swift requests. This is enabled +by adding a relation to an existing keystone application: -Once the deployment is complete, the default zone and zonegroup can -optionally be tidied using the 'tidydefaults' action: + juju add-relation ceph-radosgw:identity-service keystone:identity-service - juju run-action -m us-west --unit rgw-us-west/0 tidydefaults +## High availability -This operation is not reversible. +When more than one unit is deployed with the [hacluster][hacluster-charm] +application the charm will bring up an HA active/active cluster. -### Failover/Recovery +There are two mutually exclusive high availability options: using virtual IP(s) +or DNS. In both cases the hacluster subordinate charm is used to provide the +Corosync and Pacemaker backend HA functionality. -In the event that the site hosting the zone which is the master for metadata -(in this example us-east) has an outage, the master metadata zone must be -failed over to the slave site; this operation is performed using the 'promote' -action: +See [OpenStack high availability][cdg-ha-apps] in the [OpenStack Charms +Deployment Guide][cdg] for details. - juju run-action -m us-west --wait rgw-us-west/0 promote +## Network spaces -Once this action has completed, the slave site will be the master for metadata -updates and the deployment will accept new uploads of data. +This charm supports the use of Juju [network spaces][juju-docs-spaces] (Juju +`v.2.0`). This feature optionally allows specific types of the application's +network traffic to be bound to subnets that the underlying hardware is +connected to. -Once the failed site has been recovered it will resync and resume as a slave -to the promoted master site (us-west in this example). +> **Note**: Spaces must be configured in the backing cloud prior to deployment. -The master metadata zone can be failed back to its original location once resync -has completed using the 'promote' action: +API endpoints can be bound to distinct network spaces supporting the network +separation of public, internal and admin endpoints. - juju run-action -m us-east --wait rgw-us-east/0 promote +For example, providing that spaces 'public-space', 'internal-space', and +'admin-space' exist, the deploy command above could look like this: -### Read/write vs Read-only + juju deploy ceph-radosgw \ + --bind "public=public-space internal=internal-space admin=admin-space" -By default all zones within a deployment will be read/write capable but only -the master zone can be used to create new containers. +Alternatively, configuration can be provided as part of a bundle: -Non-master zones can optionally be marked as read-only by using the 'readonly' -action: +```yaml + ceph-radosgw: + charm: cs:ceph-radosgw + num_units: 1 + bindings: + public: public-space + internal: internal-space + admin: admin-space +``` - juju run-action -m us-east --wait rgw-us-east/0 readonly +> **Note**: Existing ceph-radosgw units configured with the `os-admin-network`, + `os-internal-network`, `os-public-network`, `os-public-hostname`, + `os-internal-hostname`, or `os-admin-hostname` options will continue to + honour them. Furthermore, these options override any space bindings, if set. -a zone that is currently read-only can be switched to read/write mode by either -promoting it to be the current master or by using the 'readwrite' action: +## Actions - juju run-action -m us-east --wait rgw-us-east/0 readwrite +This section lists Juju [actions][juju-docs-actions] supported by the charm. +Actions allow specific operations to be performed on a per-unit basis. To +display action descriptions run `juju actions ceph-radosgw`. If the charm is +not deployed then see file `actions.yaml`. -### Tenant Namespacing +* `pause` +* `resume` +* `promote` +* `readonly` +* `readwrite` +* `tidydefaults` -By default, Ceph Rados Gateway puts all tenant buckets into the same global -namespace, disallowing multiple tenants to have buckets with the same name. -Tenant namespacing can be enabled in this charm by deploying with configuration -like: +# Bugs - ceph-radosgw: - charm: cs:ceph-radosgw - num_units: 1 - options: - namespace-tenants: True +Please report bugs on [Launchpad][lp-bugs-charm-ceph-radosgw]. -Enabling tenant namespacing will place all tenant buckets into their own -namespace under their tenant id, as well as adding the tenant's ID parameter to -the Keystone endpoint registration to allow seamless integration with OpenStack. -Tenant namespacing cannot be toggled on in an existing installation as it will -remove tenant access to existing buckets. Toggling this option on an already -deployed Rados Gateway will have no effect. +For general charm questions refer to the OpenStack [Charm Guide][cg]. +[juju-docs-actions]: https://jaas.ai/docs/actions +[ceph-upstream]: https://ceph.io [hacluster-charm]: https://jaas.ai/hacluster [cg]: https://docs.openstack.org/charm-guide [cdg]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide [cdg-ha-apps]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-ha.html#ha-applications +[cloud-archive-ceph]: https://wiki.ubuntu.com/OpenStack/CloudArchive#Ceph_and_the_UCA +[juju-docs-config-apps]: https://juju.is/docs/configuring-applications +[cdg-ceph-erasure-coding]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-erasure-coding.html +[lp-bugs-charm-ceph-radosgw]: https://bugs.launchpad.net/charm-ceph-radosgw/+filebug +[juju-docs-spaces]: https://jaas.ai/docs/spaces +[cdg-ceph-radosgw-multisite]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-rgw-multisite.html From 5f717ae6812194ae787f5564967b0cbdece6087a Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Fri, 2 Oct 2020 16:03:11 -0400 Subject: [PATCH 2085/2699] Update README for Ceph EC pools This updates the README for erasure coded Ceph pools for the case of CephFS. The new text should be as similar as possible for all the charms that support configuration options for EC pools. See the below review for the first of these charms whose README has been updated. https://review.opendev.org/#/c/749824/ Other minor improvements. Change-Id: Ic6543e3241048591818358c972eadecd6ceab50c --- ceph-fs/src/README.md | 78 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 66 insertions(+), 12 deletions(-) diff --git a/ceph-fs/src/README.md b/ceph-fs/src/README.md index a097a935..76b96fdc 100644 --- a/ceph-fs/src/README.md +++ b/ceph-fs/src/README.md @@ -4,11 +4,8 @@ excellent performance, reliability, and scalability. The ceph-fs charm deploys the metadata server daemon (MDS) for the Ceph -distributed file system (CephFS). It is used in conjunction with the -[ceph-mon][ceph-mon-charm] and the [ceph-osd][ceph-osd-charm] charms. - -Highly available CephFS is achieved by deploying multiple MDS servers (i.e. -multiple ceph-fs units). +distributed file system (CephFS). The deployment is done within the context of +an existing Ceph cluster. # Usage @@ -20,6 +17,11 @@ default values. A YAML file (e.g. `ceph-osd.yaml`) is often used to store configuration options. See the [Juju documentation][juju-docs-config-apps] for details on configuring applications. +#### `pool-type` + +The `pool-type` option dictates the storage pool type. See section 'Ceph pool +type' for more information. + #### `source` The `source` option states the software sources. A common value is an OpenStack @@ -28,18 +30,70 @@ and the UCA][cloud-archive-ceph]. The underlying host's existing apt sources will be used if this option is not specified (this behaviour can be explicitly chosen by using the value of 'distro'). -## Deployment +## Ceph pool type -We are assuming a pre-existing Ceph cluster. +Ceph storage pools can be configured to ensure data resiliency either through +replication or by erasure coding. This charm supports both types via the +`pool-type` configuration option, which can take on the values of 'replicated' +and 'erasure-coded'. The default value is 'replicated'. -To deploy a single MDS node: +For this charm, the pool type will be associated with CephFS volumes. - juju deploy ceph-fs +> **Note**: Erasure-coded pools are supported starting with Ceph Luminous. + +### Replicated pools + +Replicated pools use a simple replication strategy in which each written object +is copied, in full, to multiple OSDs within the cluster. + +The `ceph-osd-replication-count` option sets the replica count for any object +stored within the 'ceph-fs-data' cephfs pool. Increasing this value increases +data resilience at the cost of consuming more real storage in the Ceph cluster. +The default value is '3'. + +> **Important**: The `ceph-osd-replication-count` option must be set prior to + adding the relation to the ceph-mon application. Otherwise, the pool's + configuration will need to be set by interfacing with the cluster directly. + +### Erasure coded pools + +Erasure coded pools use a technique that allows for the same resiliency as +replicated pools, yet reduces the amount of space required. Written data is +split into data chunks and error correction chunks, which are both distributed +throughout the cluster. -Then add a relation to the ceph-mon application: +> **Note**: Erasure coded pools require more memory and CPU cycles than + replicated pools do. +When using erasure coded pools for CephFS file systems two pools will be +created: a replicated pool (for storing MDS metadata) and an erasure coded pool +(for storing the data written into a CephFS volume). The +`ceph-osd-replication-count` configuration option only applies to the metadata +(replicated) pool. + +Erasure coded pools can be configured via options whose names begin with the +`ec-` prefix. + +> **Important**: It is strongly recommended to tailor the `ec-profile-k` and + `ec-profile-m` options to the needs of the given environment. These latter + options have default values of '1' and '2' respectively, which result in the + same space requirements as those of a replicated pool. + +See [Ceph Erasure Coding][cdg-ceph-erasure-coding] in the [OpenStack Charms +Deployment Guide][cdg] for more information. + +## Deployment + +To deploy a single MDS node within an existing Ceph cluster: + + juju deploy ceph-fs juju add-relation ceph-fs:ceph-mds ceph-mon:mds +## High availability + +Highly available CephFS is achieved by deploying multiple MDS servers (i.e. +multiple ceph-fs units). + ## Actions This section lists Juju [actions][juju-docs-actions] supported by the charm. @@ -60,10 +114,10 @@ For general charm questions refer to the OpenStack [Charm Guide][cg]. [cg]: https://docs.openstack.org/charm-guide +[cdg]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide [ceph-upstream]: https://ceph.io -[ceph-mon-charm]: https://jaas.ai/ceph-mon -[ceph-osd-charm]: https://jaas.ai/ceph-osd [juju-docs-actions]: https://jaas.ai/docs/actions [juju-docs-config-apps]: https://juju.is/docs/configuring-applications [lp-bugs-charm-ceph-fs]: https://bugs.launchpad.net/charm-ceph-fs/+filebug [cloud-archive-ceph]: https://wiki.ubuntu.com/OpenStack/CloudArchive#Ceph_and_the_UCA +[cdg-ceph-erasure-coding]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-erasure-coding.html From c354cee66a7e6f0e3eba5d7a365d752bf1df3e29 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 12 Oct 2020 11:20:48 +0200 Subject: [PATCH 2086/2699] Batch update to land Ubuntu Groovy support into the charms Cherry-Pick from https://github.com/juju/charm-helpers/commit/09752a15274b1bf0f42b422eb82fcf29fe4082c5 Change-Id: I1baf66021be290985c58a7ea5e484fd80e09b4cf --- ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py index 3edc0687..a3ec6947 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -25,7 +25,8 @@ 'cosmic', 'disco', 'eoan', - 'focal' + 'focal', + 'groovy' ) From 6088946dc6464017a22ac6eef937d3d3513b94fe Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 12 Oct 2020 11:21:00 +0200 Subject: [PATCH 2087/2699] Batch update to land Ubuntu Groovy support into the charms Cherry-Pick from https://github.com/juju/charm-helpers/commit/09752a15274b1bf0f42b422eb82fcf29fe4082c5 Change-Id: I19962a89fd54de38d66203c210b4cd9236bf9a19 --- ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index 3edc0687..a3ec6947 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -25,7 +25,8 @@ 'cosmic', 'disco', 'eoan', - 'focal' + 'focal', + 'groovy' ) From 57251d87ac35526bc127b0e7a6356e83c1bf0665 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 12 Oct 2020 11:21:24 +0200 Subject: [PATCH 2088/2699] Batch update to land Ubuntu Groovy support into the charms Cherry-Pick from https://github.com/juju/charm-helpers/commit/09752a15274b1bf0f42b422eb82fcf29fe4082c5 Change-Id: I6f48e3508bc004278eef4ffa275b4d2cfc07112a --- ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index 3edc0687..a3ec6947 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -25,7 +25,8 @@ 'cosmic', 'disco', 'eoan', - 'focal' + 'focal', + 'groovy' ) From 152ba9f478277184f672144a146c90d7b82a4189 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Oct 2020 12:16:03 +0000 Subject: [PATCH 2089/2699] Ensure unit-name is sent to ceph-mon When deploying with CMRs ensure unit-name is passed to ceph-mon. Change-Id: I6d56f3ec832193463069cb523c5dbb6e56982928 --- ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 526b95ad..738d734d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -2198,6 +2198,7 @@ def send_request_if_needed(request, relation='ceph'): for rid in relation_ids(relation): log('Sending request {}'.format(request.request_id), level=DEBUG) relation_set(relation_id=rid, broker_req=request.request) + relation_set(relation_id=rid, relation_settings={'unit-name': local_unit()}) def has_broker_rsp(rid=None, unit=None): From e2186a19ae0997cc2d51a28dbbba541e9c0b75d1 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Fri, 9 Oct 2020 14:55:50 -0400 Subject: [PATCH 2090/2699] Expose BlueStore Explain how BlueStore vs traditional filesystems are selected. Improve config.yaml correspondingly. Move the 'bluestore' option's location in that file. Remove blank lines for consistency. Change-Id: Iebc21bdcac742a437719afb53f26729abbf8e87f --- ceph-osd/README.md | 18 ++++++++++++++++++ ceph-osd/config.yaml | 30 ++++++++++++++++++------------ 2 files changed, 36 insertions(+), 12 deletions(-) diff --git a/ceph-osd/README.md b/ceph-osd/README.md index 22b86500..91e20009 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -18,6 +18,17 @@ default values. A YAML file (e.g. `ceph-osd.yaml`) is often used to store configuration options. See the [Juju documentation][juju-docs-config-apps] for details on configuring applications. +#### `bluestore` + +The `bluestore` option specifies whether the +[BlueStore][upstream-ceph-bluestore] storage backend is used for all OSD +devices. The feature is enabled by default (value 'True'). If set to 'True', +this option overrides the `osd-format` option as BlueStore does not use a +traditional filesystem. + +> **Important**: This option has no effect unless Ceph Luminous (or greater) is + in use. + #### `customize-failure-domain` The `customize-failure-domain` option determines how a Ceph CRUSH map is @@ -38,6 +49,12 @@ The `osd-devices` option lists what block devices can be used for OSDs across the cluster. See section 'Storage devices' for an elaboration on this fundamental topic. +#### `osd-format` + +The `osd-format` option specifies what filesystem to use for all OSD devices +('xfs' or 'ext4'). The default value is 'xfs'. This option only applies when +Ceph Luminous (or greater) is in use and option `bluestore` is set to 'False'. + #### `source` The `source` option states the software sources. A common value is an OpenStack @@ -418,4 +435,5 @@ For general charm questions refer to the OpenStack [Charm Guide][cg]. [lp-bugs-charm-ceph-osd]: https://bugs.launchpad.net/charm-ceph-osd/+filebug [cdg-install-openstack]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/install-openstack.html [upstream-ceph-buckets]: https://docs.ceph.com/docs/master/rados/operations/crush-map/#types-and-buckets +[upstream-ceph-bluestore]: https://docs.ceph.com/en/latest/rados/configuration/storage-devices/#bluestore [cloud-archive-ceph]: https://wiki.ubuntu.com/OpenStack/CloudArchive#Ceph_and_the_UCA diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index cb3239fa..19fd2e3e 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -83,6 +83,15 @@ options: where the specified journal device does not exist on a node. . Only supported with ceph >= 0.48.3. + bluestore: + type: boolean + default: True + description: | + Enable BlueStore storage backend for OSD devices. + . + Only supported with ceph >= 12.2.0. + . + Setting to 'False' will use FileStore as the storage format. bluestore-wal: type: string default: @@ -128,7 +137,6 @@ options: . A default value is not set as it is calculated by ceph-disk (before Luminous) or the charm itself, when ceph-volume is used (Luminous and above). - bluestore-block-db-size: type: int default: 0 @@ -141,24 +149,22 @@ options: . A default value is not set as it is calculated by ceph-disk (before Luminous) or the charm itself, when ceph-volume is used (Luminous and above). - osd-format: type: string default: xfs description: | - Format of filesystem to use for OSD devices; supported formats include: + Format of filesystem to use for OSD devices. Supported formats include: . - xfs (Default >= 0.48.3) - ext4 (Only option < 0.48.3) + xfs (Default with >= ceph 0.48.3) + ext4 (Only option < ceph 0.48.3) btrfs (experimental and not recommended) . - Only supported with ceph >= 0.48.3. - bluestore: - type: boolean - default: True - description: | - Enable bluestore storage format for OSD devices; Only applies for Ceph - Luminous or later. + Only supported with >= ceph 0.48.3. + . + Used with FileStore storage backend. + . + Always applies prior to ceph 12.2.0. Otherwise, only applies when the + "bluestore" option is False. osd-encrypt: type: boolean default: False From 1a4674c92a5cc21f889e11c735de1fdec376ad05 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Wed, 14 Oct 2020 10:45:14 -0400 Subject: [PATCH 2091/2699] Update README for stable release Change-Id: Ie2dfda984899b870dd7d3295fcf6b291d50dfe80 --- ceph-iscsi/README.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index b671695a..68001da6 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -4,10 +4,6 @@ The ceph-iscsi charm deploys the [Ceph iSCSI gateway service][ceph-iscsi-upstream]. The charm is intended to be used in conjunction with the [ceph-osd][ceph-osd-charm] and [ceph-mon][ceph-mon-charm] charms. -> **Warning**: This charm is in a preview state and should not be used in - production. See the [OpenStack Charm Guide][cg-preview-charms] for more - information on preview charms. - # Usage ## Configuration From 923ceb06013115b059c4680fef7506c449aeef94 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 14 Oct 2020 16:06:40 +0100 Subject: [PATCH 2092/2699] Rebuild reactive charms to pick up latest charm-helpers release. v0.20.18 has been released and needs incorporating into the reactive charms. Change-Id: If81ca0c3fefb20318358e16feb6fea7415a09e83 --- ceph-fs/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index 328708e7..0b82e555 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -89553aca-0016-11eb-86d5-fb92a6de5c09 +4fd31136-0e2c-11eb-b79e-a72d45c8e7f5 From 5d0b644cae3dfc18df74a88b506bb82c819a5adb Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 14 Oct 2020 16:06:40 +0100 Subject: [PATCH 2093/2699] Rebuild reactive charms to pick up latest charm-helpers release. v0.20.18 has been released and needs incorporating into the reactive charms. Change-Id: I61d670cdbfe28a02f13045a77b720684e28dd0af --- ceph-rbd-mirror/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/rebuild b/ceph-rbd-mirror/rebuild index d1de7d8f..6d82ba59 100644 --- a/ceph-rbd-mirror/rebuild +++ b/ceph-rbd-mirror/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -8968bf8c-0016-11eb-ba17-fbdeedded6c1 +4fdfb6f2-0e2c-11eb-8136-c75ae9682e61 From 8b9654716ab9dbb58a271fd3dc15b006d31f7b63 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 19 Oct 2020 11:05:08 +0200 Subject: [PATCH 2094/2699] Add blurb about Ceph BlueStore compression Change-Id: Ie6a2d1684b5f935017bb37e53f37e8b30a3fa6e9 --- ceph-radosgw/README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/ceph-radosgw/README.md b/ceph-radosgw/README.md index f207a35a..5b864ae9 100644 --- a/ceph-radosgw/README.md +++ b/ceph-radosgw/README.md @@ -81,6 +81,19 @@ Erasure coded pools can be configured via options whose names begin with the See [Ceph Erasure Coding][cdg-ceph-erasure-coding] in the [OpenStack Charms Deployment Guide][cdg] for more information. +## Ceph BlueStore compression + +This charm supports [BlueStore inline compression][ceph-bluestore-compression] +for its associated Ceph storage pool(s). The feature is enabled by assigning a +compression mode via the `bluestore-compression-mode` configuration option. The +default behaviour is to disable compression. + +The efficiency of compression depends heavily on what type of data is stored +in the pool and the charm provides a set of configuration options to fine tune +the compression behaviour. + +> **Note**: BlueStore compression is supported starting with Ceph Mimic. + ## Deployment To deploy a single RADOS gateway node within an existing Ceph cluster: @@ -223,3 +236,4 @@ For general charm questions refer to the OpenStack [Charm Guide][cg]. [lp-bugs-charm-ceph-radosgw]: https://bugs.launchpad.net/charm-ceph-radosgw/+filebug [juju-docs-spaces]: https://jaas.ai/docs/spaces [cdg-ceph-radosgw-multisite]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-rgw-multisite.html +[ceph-bluestore-compression]: https://docs.ceph.com/en/latest/rados/configuration/bluestore-config-ref/#inline-compression From 34c7d19a0825bcf75aa106f7b19cb01f546d3fec Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Tue, 20 Oct 2020 13:18:39 +0200 Subject: [PATCH 2095/2699] Add blurb about Ceph BlueStore compression Change-Id: I9e40bf5a0110655bc6b7ac97b9b5128cbc1c26bd --- ceph-fs/src/README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/ceph-fs/src/README.md b/ceph-fs/src/README.md index 76b96fdc..6acd4310 100644 --- a/ceph-fs/src/README.md +++ b/ceph-fs/src/README.md @@ -82,6 +82,19 @@ Erasure coded pools can be configured via options whose names begin with the See [Ceph Erasure Coding][cdg-ceph-erasure-coding] in the [OpenStack Charms Deployment Guide][cdg] for more information. +## Ceph BlueStore compression + +This charm supports [BlueStore inline compression][ceph-bluestore-compression] +for its associated Ceph storage pool(s). The feature is enabled by assigning a +compression mode via the `bluestore-compression-mode` configuration option. The +default behaviour is to disable compression. + +The efficiency of compression depends heavily on what type of data is stored +in the pool and the charm provides a set of configuration options to fine tune +the compression behaviour. + +> **Note**: BlueStore compression is supported starting with Ceph Mimic. + ## Deployment To deploy a single MDS node within an existing Ceph cluster: @@ -121,3 +134,4 @@ For general charm questions refer to the OpenStack [Charm Guide][cg]. [lp-bugs-charm-ceph-fs]: https://bugs.launchpad.net/charm-ceph-fs/+filebug [cloud-archive-ceph]: https://wiki.ubuntu.com/OpenStack/CloudArchive#Ceph_and_the_UCA [cdg-ceph-erasure-coding]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-erasure-coding.html +[ceph-bluestore-compression]: https://docs.ceph.com/en/latest/rados/configuration/bluestore-config-ref/#inline-compression From 2a8c26214e33f7678900c5c0c6c38146c8cf8452 Mon Sep 17 00:00:00 2001 From: Xav Paice Date: Mon, 12 Oct 2020 13:20:13 +1300 Subject: [PATCH 2096/2699] Add actions to reweight and remove OSDs. Adds action change-osd-weight, to allow changing the weight of an OSD. Adds action purge-osd, to remove an OSD entirely from the crush map. Change-Id: Ic1f468c96931a136ba897d4a02b8cd43dfdf056b Closes-bug: #1761048 --- ceph-mon/README.md | 2 + ceph-mon/actions.yaml | 24 +++++ ceph-mon/actions/change-osd-weight | 1 + ceph-mon/actions/change_osd_weight.py | 45 ++++++++++ ceph-mon/actions/purge-osd | 1 + ceph-mon/actions/purge_osd.py | 90 +++++++++++++++++++ .../test_action_change_osd_weight.py | 38 ++++++++ ceph-mon/unit_tests/test_action_purge_osd.py | 74 +++++++++++++++ 8 files changed, 275 insertions(+) create mode 120000 ceph-mon/actions/change-osd-weight create mode 100755 ceph-mon/actions/change_osd_weight.py create mode 120000 ceph-mon/actions/purge-osd create mode 100755 ceph-mon/actions/purge_osd.py create mode 100644 ceph-mon/unit_tests/test_action_change_osd_weight.py create mode 100644 ceph-mon/unit_tests/test_action_purge_osd.py diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 3bec4b07..27af091d 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -147,6 +147,7 @@ Actions allow specific operations to be performed on a per-unit basis. To display action descriptions run `juju actions ceph-mon`. If the charm is not deployed then see file `actions.yaml`. +* `change-osd-weight` * `copy-pool` * `create-cache-tier` * `create-crush-rule` @@ -163,6 +164,7 @@ deployed then see file `actions.yaml`. * `pool-get` * `pool-set` * `pool-statistics` +* `purge-osd` * `remove-cache-tier` * `remove-pool-snapshot` * `rename-pool` diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index d6e5e36a..6dc940a1 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -350,3 +350,27 @@ unset-noout: description: "Unset ceph noout across the cluster." security-checklist: description: Validate the running configuration against the OpenStack security guides checklist +purge-osd: + description: "Removes an OSD from a cluster map, removes its authentication key, removes the OSD from the OSD map. The OSD must have zero weight before running this action, to avoid excessive I/O on the cluster." + params: + osd: + type: integer + description: "ID of the OSD to remove, e.g. for osd.53, supply 53." + i-really-mean-it: + type: boolean + description: "This must be toggled to enable actually performing this action." + required: + - osd + - i-really-mean-it +change-osd-weight: + description: "Set the crush weight of an OSD to the new value supplied." + params: + osd: + type: integer + description: "ID of the OSD to operate on, e.g. for osd.53, supply 53." + weight: + type: number + description: "The new weight of the OSD, must be a decimal number, e.g. 1.04" + required: + - osd + - weight diff --git a/ceph-mon/actions/change-osd-weight b/ceph-mon/actions/change-osd-weight new file mode 120000 index 00000000..07705325 --- /dev/null +++ b/ceph-mon/actions/change-osd-weight @@ -0,0 +1 @@ +change_osd_weight.py \ No newline at end of file diff --git a/ceph-mon/actions/change_osd_weight.py b/ceph-mon/actions/change_osd_weight.py new file mode 100755 index 00000000..9a517349 --- /dev/null +++ b/ceph-mon/actions/change_osd_weight.py @@ -0,0 +1,45 @@ +#! /usr/bin/env python3 +# +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Changes the crush weight of an OSD.""" + +import sys + +sys.path.append("lib") +sys.path.append("hooks") + +from charmhelpers.core.hookenv import function_fail, function_get, log +from charms_ceph.utils import reweight_osd + + +def crush_reweight(osd_num, new_weight): + """Run reweight_osd to change OSD weight.""" + try: + result = reweight_osd(str(osd_num), str(new_weight)) + except Exception as e: + log(e) + function_fail("Reweight failed due to exception") + return + + if not result: + function_fail("Reweight failed to complete") + return + + +if __name__ == "__main__": + osd_num = function_get("osd") + new_weight = function_get("weight") + crush_reweight(osd_num, new_weight) diff --git a/ceph-mon/actions/purge-osd b/ceph-mon/actions/purge-osd new file mode 120000 index 00000000..7ff58b21 --- /dev/null +++ b/ceph-mon/actions/purge-osd @@ -0,0 +1 @@ +purge_osd.py \ No newline at end of file diff --git a/ceph-mon/actions/purge_osd.py b/ceph-mon/actions/purge_osd.py new file mode 100755 index 00000000..29328075 --- /dev/null +++ b/ceph-mon/actions/purge_osd.py @@ -0,0 +1,90 @@ +#! /usr/bin/env python3 +# +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Removes an OSD from a cluster map. + +Runs the ceph osd purge command, or earlier equivalents, removing an OSD from +the cluster map, removes its authentication key, removes the OSD from the OSD +map. +""" + +from subprocess import ( + check_call, + CalledProcessError, +) + +import sys +sys.path.append('lib') +sys.path.append('hooks') + + +from charmhelpers.core.hookenv import ( + function_get, + log, + function_fail +) +from charmhelpers.core.host import cmp_pkgrevno +from charmhelpers.contrib.storage.linux import ceph +from charms_ceph.utils import get_osd_weight + + +def purge_osd(osd): + """Run the OSD purge action. + + :param osd: the OSD ID to operate on + """ + svc = 'admin' + osd_str = str(osd) + osd_name = "osd.{}".format(osd_str) + current_osds = ceph.get_osds(svc) + if osd not in current_osds: + function_fail("OSD {} is not in the current list of OSDs".format(osd)) + return + + osd_weight = get_osd_weight(osd_name) + if osd_weight > 0: + function_fail("OSD has weight {}, must have zero weight before " + "this operation".format(osd_weight)) + return + + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 + if not function_get('i-really-mean-it'): + function_fail('i-really-mean-it is a required parameter') + return + if luminous_or_later: + cmds = [ + ["ceph", "osd", "out", osd_name], + ['ceph', 'osd', 'purge', osd_str, '--yes-i-really-mean-it'] + ] + else: + cmds = [ + ["ceph", "osd", "out", osd_name], + ["ceph", "osd", "crush", "remove", "osd.{}".format(osd)], + ["ceph", "auth", "del", osd_name], + ['ceph', 'osd', 'rm', osd_str], + ] + for cmd in cmds: + try: + check_call(cmd) + except CalledProcessError as e: + log(e) + function_fail("OSD Purge for OSD {} failed".format(osd)) + return + + +if __name__ == '__main__': + osd = function_get("osd") + purge_osd(osd) diff --git a/ceph-mon/unit_tests/test_action_change_osd_weight.py b/ceph-mon/unit_tests/test_action_change_osd_weight.py new file mode 100644 index 00000000..d3ce3ff4 --- /dev/null +++ b/ceph-mon/unit_tests/test_action_change_osd_weight.py @@ -0,0 +1,38 @@ +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for reweight_osd action.""" + +from actions import change_osd_weight as action +from mock import mock +from test_utils import CharmTestCase + + +class ReweightTestCase(CharmTestCase): + """Run tests for action.""" + + def setUp(self): + """Init mocks for test cases.""" + super(ReweightTestCase, self).setUp( + action, ["function_get", "function_fail"] + ) + + @mock.patch("actions.change_osd_weight.reweight_osd") + def test_reweight_osd(self, _reweight_osd): + """Test reweight_osd action has correct calls.""" + _reweight_osd.return_value = True + osd_num = 4 + new_weight = 1.2 + action.crush_reweight(osd_num, new_weight) + print(_reweight_osd.calls) + _reweight_osd.assert_has_calls([mock.call("4", "1.2")]) diff --git a/ceph-mon/unit_tests/test_action_purge_osd.py b/ceph-mon/unit_tests/test_action_purge_osd.py new file mode 100644 index 00000000..64d4f6fd --- /dev/null +++ b/ceph-mon/unit_tests/test_action_purge_osd.py @@ -0,0 +1,74 @@ +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for purge_osd action.""" + +from actions import purge_osd as action +from mock import mock +from test_utils import CharmTestCase + + +class PurgeTestCase(CharmTestCase): + """Run tests for action.""" + + def setUp(self): + """Init mocks for test cases.""" + super(PurgeTestCase, self).setUp( + action, ["check_call", "function_get", "function_fail", "open"] + ) + + @mock.patch("actions.purge_osd.get_osd_weight") + @mock.patch("actions.purge_osd.cmp_pkgrevno") + @mock.patch("charmhelpers.contrib.storage.linux.ceph.get_osds") + def test_purge_osd(self, _get_osds, _cmp_pkgrevno, _get_osd_weight): + """Test purge_osd action has correct calls.""" + _get_osds.return_value = [0, 1, 2, 3, 4, 5] + _cmp_pkgrevno.return_value = 1 + _get_osd_weight.return_value = 0 + osd = 4 + action.purge_osd(osd) + cmds = [ + mock.call(["ceph", "osd", "out", "osd.4"]), + mock.call( + ["ceph", "osd", "purge", str(osd), "--yes-i-really-mean-it"] + ), + ] + self.check_call.assert_has_calls(cmds) + + @mock.patch("actions.purge_osd.get_osd_weight") + @mock.patch("actions.purge_osd.cmp_pkgrevno") + @mock.patch("charmhelpers.contrib.storage.linux.ceph.get_osds") + def test_purge_invalid_osd( + self, _get_osds, _cmp_pkgrevno, _get_osd_weight + ): + """Test purge_osd action captures bad OSD string.""" + _get_osds.return_value = [0, 1, 2, 3, 4, 5] + _cmp_pkgrevno.return_value = 1 + _get_osd_weight.return_value = 0 + osd = 99 + action.purge_osd(osd) + self.function_fail.assert_called() + + @mock.patch("actions.purge_osd.get_osd_weight") + @mock.patch("actions.purge_osd.cmp_pkgrevno") + @mock.patch("charmhelpers.contrib.storage.linux.ceph.get_osds") + def test_purge_osd_weight_high( + self, _get_osds, _cmp_pkgrevno, _get_osd_weight + ): + """Test purge_osd action fails when OSD has weight >0.""" + _get_osds.return_value = [0, 1, 2, 3, 4, 5] + _cmp_pkgrevno.return_value = 1 + _get_osd_weight.return_value = 2.5 + osd = "4" + action.purge_osd(osd) + self.function_fail.assert_called() From f31402dcab48af3f417c135f4fcabe539e3f07bb Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 13 Oct 2020 17:56:12 +0000 Subject: [PATCH 2097/2699] Ensure the right key is selected Ensure the right key is selected on pre-systemd deploys. Whether to request unit specific keys is already gated on the request_per_unit_key *1 this patch applies the same logic to _key_name for selecting the key. *1 https://github.com/openstack/charm-ceph-radosgw/blob/master/hooks/hooks.py#L258 Also update testing to use cephx auth. Change-Id: I92fe75fb7f483cc70b35e48587cf376a16d856a5 Closes-Bug: #1899676 --- ceph-radosgw/hooks/hooks.py | 14 +++++++-- ceph-radosgw/hooks/multisite.py | 29 +++++++++++++++++-- .../bundles/bionic-queens-namespaced.yaml | 1 - ceph-radosgw/tests/bundles/bionic-queens.yaml | 1 - .../bundles/bionic-rocky-namespaced.yaml | 1 - ceph-radosgw/tests/bundles/bionic-rocky.yaml | 1 - .../bundles/bionic-stein-namespaced.yaml | 1 - ceph-radosgw/tests/bundles/bionic-stein.yaml | 1 - ceph-radosgw/tests/bundles/trusty-mitaka.yaml | 1 - .../bundles/xenial-mitaka-namespaced.yaml | 1 - ceph-radosgw/tests/bundles/xenial-mitaka.yaml | 1 - ceph-radosgw/tests/bundles/xenial-ocata.yaml | 1 - ceph-radosgw/tests/bundles/xenial-pike.yaml | 1 - ceph-radosgw/tests/bundles/xenial-queens.yaml | 1 - ceph-radosgw/unit_tests/test_hooks.py | 1 + ceph-radosgw/unit_tests/test_multisite.py | 11 +++++++ 16 files changed, 51 insertions(+), 16 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 4f47cebf..71beace3 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -255,7 +255,9 @@ def mon_relation(rid=None, unit=None): @restart_on_change(restart_map()) def _mon_relation(): key_name = 'rgw.{}'.format(socket.gethostname()) + legacy = True if request_per_unit_key(): + legacy = False relation_set(relation_id=rid, key_name=key_name) try: @@ -315,7 +317,7 @@ def _mon_relation(): if multisite_deployment(): process_multisite_relations() - elif is_leader(): + elif ready_for_service(legacy=legacy) and is_leader(): # In a non multi-site deployment create the # zone using the default zonegroup and restart the service internal_url = '{}:{}'.format( @@ -325,7 +327,15 @@ def _mon_relation(): endpoints = [internal_url] zonegroup = 'default' zone = config('zone') - if zone not in multisite.list_zones(): + if zone == 'default': + # If the requested zone is 'default' then the charm can + # race with radosgw systemd process in creating it. So, + # retry the zone list if it returns an empty list. + existing_zones = multisite.list_zones(retry_on_empty=True) + else: + existing_zones = multisite.list_zones() + log('Existing zones {}'.format(existing_zones), level=DEBUG) + if zone not in existing_zones: multisite.create_zone(zone, endpoints=endpoints, default=True, master=True, diff --git a/ceph-radosgw/hooks/multisite.py b/ceph-radosgw/hooks/multisite.py index a7ddbe9c..52c142e8 100644 --- a/ceph-radosgw/hooks/multisite.py +++ b/ceph-radosgw/hooks/multisite.py @@ -17,6 +17,7 @@ import functools import subprocess import socket +import utils import charmhelpers.core.hookenv as hookenv import charmhelpers.core.decorators as decorators @@ -48,7 +49,10 @@ def _call(cmd): def _key_name(): """Determine the name of the cephx key for the local unit""" - return 'rgw.{}'.format(socket.gethostname()) + if utils.request_per_unit_key(): + return 'rgw.{}'.format(socket.gethostname()) + else: + return 'radosgw.gateway' def _list(key): @@ -66,6 +70,9 @@ def _list(key): ] try: result = json.loads(_check_output(cmd)) + hookenv.log("Results: {}".format( + result), + level=hookenv.DEBUG) if isinstance(result, dict): return result['{}s'.format(key)] else: @@ -74,9 +81,27 @@ def _list(key): return [] +@decorators.retry_on_exception(num_retries=5, base_delay=3, + exc_type=ValueError) +def list_zones(retry_on_empty=False): + """ + List zones + + :param retry_on_empty: Whether to retry if no zones are returned. + :type retry_on_empty: bool + :return: List of specified entities found + :rtype: list + :raises: ValueError + """ + _zones = _list('zone') + if retry_on_empty and not _zones: + hookenv.log("No zones found", level=hookenv.DEBUG) + raise ValueError("No zones found") + return _zones + + list_realms = functools.partial(_list, 'realm') list_zonegroups = functools.partial(_list, 'zonegroup') -list_zones = functools.partial(_list, 'zone') list_users = functools.partial(_list, 'user') diff --git a/ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml index 807d0927..a3d4fec3 100644 --- a/ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml @@ -23,7 +23,6 @@ applications: num_units: 3 options: source: *source - auth-supported: 'none' percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 diff --git a/ceph-radosgw/tests/bundles/bionic-queens.yaml b/ceph-radosgw/tests/bundles/bionic-queens.yaml index 5538461a..c7a4393f 100644 --- a/ceph-radosgw/tests/bundles/bionic-queens.yaml +++ b/ceph-radosgw/tests/bundles/bionic-queens.yaml @@ -22,7 +22,6 @@ applications: num_units: 3 options: source: *source - auth-supported: 'none' percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 diff --git a/ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml index b78f0c59..277995f9 100644 --- a/ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml @@ -23,7 +23,6 @@ applications: num_units: 3 options: source: *source - auth-supported: 'none' percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 diff --git a/ceph-radosgw/tests/bundles/bionic-rocky.yaml b/ceph-radosgw/tests/bundles/bionic-rocky.yaml index 4210aa84..292229fd 100644 --- a/ceph-radosgw/tests/bundles/bionic-rocky.yaml +++ b/ceph-radosgw/tests/bundles/bionic-rocky.yaml @@ -22,7 +22,6 @@ applications: num_units: 3 options: source: *source - auth-supported: 'none' percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 diff --git a/ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml index 461c277f..62e7f7c2 100644 --- a/ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml @@ -23,7 +23,6 @@ applications: num_units: 3 options: source: *source - auth-supported: 'none' percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 diff --git a/ceph-radosgw/tests/bundles/bionic-stein.yaml b/ceph-radosgw/tests/bundles/bionic-stein.yaml index 58b0d375..b9bc6c12 100644 --- a/ceph-radosgw/tests/bundles/bionic-stein.yaml +++ b/ceph-radosgw/tests/bundles/bionic-stein.yaml @@ -22,7 +22,6 @@ applications: num_units: 3 options: source: *source - auth-supported: 'none' percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 diff --git a/ceph-radosgw/tests/bundles/trusty-mitaka.yaml b/ceph-radosgw/tests/bundles/trusty-mitaka.yaml index 119e1d8c..b56a0129 100644 --- a/ceph-radosgw/tests/bundles/trusty-mitaka.yaml +++ b/ceph-radosgw/tests/bundles/trusty-mitaka.yaml @@ -22,7 +22,6 @@ applications: num_units: 3 options: source: *source - auth-supported: 'none' percona-cluster: charm: cs:trusty/percona-cluster num_units: 1 diff --git a/ceph-radosgw/tests/bundles/xenial-mitaka-namespaced.yaml b/ceph-radosgw/tests/bundles/xenial-mitaka-namespaced.yaml index 9fecfdd4..a7af5178 100644 --- a/ceph-radosgw/tests/bundles/xenial-mitaka-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/xenial-mitaka-namespaced.yaml @@ -23,7 +23,6 @@ applications: num_units: 3 options: source: *source - auth-supported: 'none' percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 diff --git a/ceph-radosgw/tests/bundles/xenial-mitaka.yaml b/ceph-radosgw/tests/bundles/xenial-mitaka.yaml index 603a7813..92c05a58 100644 --- a/ceph-radosgw/tests/bundles/xenial-mitaka.yaml +++ b/ceph-radosgw/tests/bundles/xenial-mitaka.yaml @@ -22,7 +22,6 @@ applications: num_units: 3 options: source: *source - auth-supported: 'none' percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 diff --git a/ceph-radosgw/tests/bundles/xenial-ocata.yaml b/ceph-radosgw/tests/bundles/xenial-ocata.yaml index 6c7a7fe3..8b910432 100644 --- a/ceph-radosgw/tests/bundles/xenial-ocata.yaml +++ b/ceph-radosgw/tests/bundles/xenial-ocata.yaml @@ -22,7 +22,6 @@ applications: num_units: 3 options: source: *source - auth-supported: 'none' percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 diff --git a/ceph-radosgw/tests/bundles/xenial-pike.yaml b/ceph-radosgw/tests/bundles/xenial-pike.yaml index 63d31d2f..c9fd580a 100644 --- a/ceph-radosgw/tests/bundles/xenial-pike.yaml +++ b/ceph-radosgw/tests/bundles/xenial-pike.yaml @@ -22,7 +22,6 @@ applications: num_units: 3 options: source: *source - auth-supported: 'none' percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 diff --git a/ceph-radosgw/tests/bundles/xenial-queens.yaml b/ceph-radosgw/tests/bundles/xenial-queens.yaml index 38492821..e4fc7a5f 100644 --- a/ceph-radosgw/tests/bundles/xenial-queens.yaml +++ b/ceph-radosgw/tests/bundles/xenial-queens.yaml @@ -22,7 +22,6 @@ applications: num_units: 3 options: source: *source - auth-supported: 'none' percona-cluster: charm: cs:~openstack-charmers-next/percona-cluster num_units: 1 diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 0bad5884..eedb504d 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -69,6 +69,7 @@ 'ceph_utils', 'multisite_deployment', 'multisite', + 'ready_for_service', ] diff --git a/ceph-radosgw/unit_tests/test_multisite.py b/ceph-radosgw/unit_tests/test_multisite.py index f3e5f357..4ea5bcbf 100644 --- a/ceph-radosgw/unit_tests/test_multisite.py +++ b/ceph-radosgw/unit_tests/test_multisite.py @@ -31,17 +31,28 @@ class TestMultisiteHelpers(CharmTestCase): 'subprocess', 'socket', 'hookenv', + 'utils', ] def setUp(self): super(TestMultisiteHelpers, self).setUp(multisite, self.TO_PATCH) self.socket.gethostname.return_value = 'testhost' + self.utils.request_per_unit_key.return_value = True def _testdata(self, funcname): return os.path.join(os.path.dirname(__file__), 'testdata', '{}.json'.format(funcname)) + def test___key_name(self): + self.assertEqual( + multisite._key_name(), + 'rgw.testhost') + self.utils.request_per_unit_key.return_value = False + self.assertEqual( + multisite._key_name(), + 'radosgw.gateway') + def test_create_realm(self): with open(self._testdata(whoami()), 'rb') as f: self.subprocess.check_output.return_value = f.read() From fcfb7622fc67ba09f6d7dcb376b62f1b136a0d21 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 22 Oct 2020 14:52:25 +0000 Subject: [PATCH 2098/2699] Add missing openstack-origin to test bundles Change-Id: Ia2544d044aef0848319284fe2a87fa85d66eb3da Closes-Bug: #1901036 --- ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml | 1 + ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml | 1 + ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml | 1 + ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml | 3 +++ ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml | 3 +++ ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml | 1 + 6 files changed, 10 insertions(+) diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml index 56721c70..de113f1d 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml @@ -21,6 +21,7 @@ applications: options: block-device: None glance-api-version: 2 + openstack-origin: cloud:bionic-rocky cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph num_units: 0 diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml index caba8e03..fccde4dd 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml @@ -21,6 +21,7 @@ applications: options: block-device: None glance-api-version: 2 + openstack-origin: cloud:bionic-rocky cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph num_units: 0 diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml index aefc4fb5..46b2d14d 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml @@ -21,6 +21,7 @@ applications: options: block-device: None glance-api-version: 2 + openstack-origin: cloud:bionic-stein cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph num_units: 0 diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml index 071f9d3a..834975e3 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml @@ -21,6 +21,7 @@ applications: options: block-device: None glance-api-version: 2 + openstack-origin: cloud:bionic-train cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph num_units: 0 @@ -32,6 +33,8 @@ applications: nova-compute: charm: cs:~openstack-charmers-next/nova-compute num_units: 1 + options: + openstack-origin: cloud:bionic-train ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml index 7e1ef38c..11956c5c 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml @@ -21,6 +21,7 @@ applications: options: block-device: None glance-api-version: 2 + openstack-origin: cloud:bionic-ussuri cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph num_units: 0 @@ -32,6 +33,8 @@ applications: nova-compute: charm: cs:~openstack-charmers-next/nova-compute num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml index e6ff0fe3..8dd138a0 100644 --- a/ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml @@ -48,6 +48,7 @@ applications: options: block-device: None glance-api-version: 2 + openstack-origin: *openstack-origin cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph From 7cc8c48cd5073657131713e3ef9407ab7c8a6725 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Thu, 22 Oct 2020 12:55:10 -0400 Subject: [PATCH 2099/2699] Clarify config.yaml re dir-based OSDs Closes-Bug: #1901058 Change-Id: I47a51e17366f3b5cc873b83c5ac92bbd368bf503 --- ceph-osd/config.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 19fd2e3e..b9e5423f 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -62,8 +62,9 @@ options: used across all service units, in addition to any volumes attached via the --storage flag during deployment. . - For ceph >= 0.56.6 these can also be directories instead of devices - the - charm assumes anything not starting with /dev is a directory instead. + For ceph < 14.2.0 (Nautilus) these can also be directories instead of + devices. If the value does not start with "/dev" then it will be + interpreted as a directory. bdev-enable-discard: type: string default: auto From b04976c83887eef1241823468677be3c68606f33 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 26 Oct 2020 09:47:49 +0000 Subject: [PATCH 2100/2699] Add bluestore compression support Add bluestore compression support and update update_status to work with changes in ops_openstack.core. Depends-On: Id04426c564b9413d50c5c28a49bce9511142a801 Depends-On: I3953d28029d6daa6d771617c596a6e75fbacf258 Change-Id: I1941a13fc402ae91d3fc091e3f181ac49e3c2768 --- ceph-iscsi/README.md | 13 ++++ ceph-iscsi/config.yaml | 60 +++++++++++++++++++ ceph-iscsi/src/charm.py | 36 ++++++++--- ceph-iscsi/test-requirements.txt | 1 + ceph-iscsi/tests/bundles/focal-ec.yaml | 2 +- ceph-iscsi/tests/bundles/focal.yaml | 2 +- ceph-iscsi/tests/tests.yaml | 1 + .../unit_tests/test_ceph_iscsi_charm.py | 9 ++- 8 files changed, 113 insertions(+), 11 deletions(-) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index 68001da6..4fa38828 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -11,6 +11,19 @@ with the [ceph-osd][ceph-osd-charm] and [ceph-mon][ceph-mon-charm] charms. See file `config.yaml` for the full list of options, along with their descriptions and default values. +## Ceph BlueStore compression + +This charm supports [BlueStore inline compression][ceph-bluestore-compression] +for its associated Ceph storage pool(s). The feature is enabled by assigning a +compression mode via the `bluestore-compression-mode` configuration option. The +default behaviour is to disable compression. + +The efficiency of compression depends heavily on what type of data is stored +in the pool and the charm provides a set of configuration options to fine tune +the compression behaviour. + +**Note**: BlueStore compression is supported starting with Ceph Mimic. + ## Deployment We are assuming a pre-existing Ceph cluster. diff --git a/ceph-iscsi/config.yaml b/ceph-iscsi/config.yaml index 4edf2aad..47127af6 100644 --- a/ceph-iscsi/config.yaml +++ b/ceph-iscsi/config.yaml @@ -180,3 +180,63 @@ options: Device class from CRUSH map to use for placement groups for erasure profile - valid values: ssd, hdd or nvme (or leave unset to not use a device class). + bluestore-compression-algorithm: + type: string + default: + description: | + Compressor to use (if any) for pools requested by this charm. + . + NOTE: The ceph-osd charm sets a global default for this value (defaults + to 'lz4' unless configured by the end user) which will be used unless + specified for individual pools. + bluestore-compression-mode: + type: string + default: + description: | + Policy for using compression on pools requested by this charm. + . + 'none' means never use compression. + 'passive' means use compression when clients hint that data is + compressible. + 'aggressive' means use compression unless clients hint that + data is not compressible. + 'force' means use compression under all circumstances even if the clients + hint that the data is not compressible. + bluestore-compression-required-ratio: + type: float + default: + description: | + The ratio of the size of the data chunk after compression relative to the + original size must be at least this small in order to store the + compressed version on pools requested by this charm. + bluestore-compression-min-blob-size: + type: int + default: + description: | + Chunks smaller than this are never compressed on pools requested by + this charm. + bluestore-compression-min-blob-size-hdd: + type: int + default: + description: | + Value of bluestore compression min blob size for rotational media on + pools requested by this charm. + bluestore-compression-min-blob-size-ssd: + type: int + default: + description: | + Value of bluestore compression min blob size for solid state media on + pools requested by this charm. + bluestore-compression-max-blob-size: + type: int + default: + description: | + Chunks larger than this are broken into smaller blobs sizing bluestore + compression max blob size before being compressed on pools requested by + this charm. + bluestore-compression-max-blob-size-hdd: + type: int + default: + description: | + Value of bluestore compression max blob size for rotational media on + pools requested by this charm. diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index d9a60231..d7b961b1 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -41,6 +41,7 @@ import ops_openstack.adapters import ops_openstack.core +import ops_openstack.plugins.classes import gwcli_client import cryptography.hazmat.primitives.serialization as serialization logger = logging.getLogger(__name__) @@ -132,7 +133,8 @@ class CephISCSIGatewayAdapters( } -class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm): +class CephISCSIGatewayCharmBase( + ops_openstack.plugins.classes.BaseCephClientCharm): """Ceph iSCSI Base Charm.""" _stored = StoredState() @@ -173,6 +175,7 @@ class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm): def __init__(self, framework): """Setup adapters and observers.""" super().__init__(framework) + super().register_status_check(self.custom_status_check) logging.info("Using %s class", self.release) self._stored.set_default( target_created=False, @@ -210,6 +213,9 @@ def __init__(self, framework): self.framework.observe( self.on.config_changed, self.render_config) + self.framework.observe( + self.on.config_changed, + self.request_ceph_pool) self.framework.observe( self.on.upgrade_charm, self.render_config) @@ -270,7 +276,21 @@ def metadata_pool_name(self): def request_ceph_pool(self, event): """Request pools from Ceph cluster.""" + print("request_ceph_pool") + if not self.ceph_client.broker_available: + logging.info("Cannot request ceph setup at this time") + return logging.info("Requesting replicated pool") + try: + bcomp_kwargs = self.get_bluestore_compression() + except ValueError as e: + # The end user has most likely provided a invalid value for + # a configuration option. Just log the traceback here, the + # end user will be notified by assess_status() called at + # the end of the hook execution. + logging.warn('Caught ValueError, invalid value provided for ' + 'configuration?: "{}"'.format(str(e))) + return self.ceph_client.create_replicated_pool( self.config_get('gateway-metadata-pool')) weight = self.config_get('ceph-pool-weight') @@ -320,7 +340,8 @@ def request_ceph_pool(self, event): name=self.data_pool_name, erasure_profile=profile_name, weight=weight, - allow_ec_overwrites=True + allow_ec_overwrites=True, + **bcomp_kwargs ) self.ceph_client.create_replicated_pool( name=self.metadata_pool_name, @@ -330,7 +351,8 @@ def request_ceph_pool(self, event): self.ceph_client.create_replicated_pool( name=self.data_pool_name, replicas=replicas, - weight=weight) + weight=weight, + **bcomp_kwargs) logging.info("Requesting permissions") self.ceph_client.request_ceph_permissions( 'ceph-iscsi', @@ -425,14 +447,12 @@ def on_tls_app_config_ready(self, event): def custom_status_check(self): """Custom update status checks.""" if ch_host.is_container(): - self.unit.status = ops.model.BlockedStatus( + return ops.model.BlockedStatus( 'Charm cannot be deployed into a container') - return False if self.peers.unit_count not in self.ALLOWED_UNIT_COUNTS: - self.unit.status = ops.model.BlockedStatus( + return ops.model.BlockedStatus( '{} is an invalid unit count'.format(self.peers.unit_count)) - return False - return True + return ops.model.ActiveStatus() # Actions diff --git a/ceph-iscsi/test-requirements.txt b/ceph-iscsi/test-requirements.txt index da47a9b4..358e1bc3 100644 --- a/ceph-iscsi/test-requirements.txt +++ b/ceph-iscsi/test-requirements.txt @@ -7,6 +7,7 @@ mock>=1.2 flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 requests>=2.18.4 +psutil # oslo.i18n dropped py35 support oslo.i18n<4.0.0 git+https://github.com/openstack-charmers/zaza.git#egg=zaza diff --git a/ceph-iscsi/tests/bundles/focal-ec.yaml b/ceph-iscsi/tests/bundles/focal-ec.yaml index a5627293..1dc9b338 100644 --- a/ceph-iscsi/tests/bundles/focal-ec.yaml +++ b/ceph-iscsi/tests/bundles/focal-ec.yaml @@ -32,7 +32,7 @@ applications: charm: ../../ceph-iscsi.charm num_units: 2 options: - gateway-metadata-pool: tmbtil + gateway-metadata-pool: iscsi-foo-metadata pool-type: erasure-coded ec-profile-k: 4 ec-profile-m: 2 diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index 61057dd8..a980504b 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -32,7 +32,7 @@ applications: charm: ../../ceph-iscsi.charm num_units: 2 options: - gateway-metadata-pool: tmbtil + gateway-metadata-pool: iscsi-foo-metadata to: - '0' - '1' diff --git a/ceph-iscsi/tests/tests.yaml b/ceph-iscsi/tests/tests.yaml index 9f8e4bdf..7371f249 100644 --- a/ceph-iscsi/tests/tests.yaml +++ b/ceph-iscsi/tests/tests.yaml @@ -9,6 +9,7 @@ configure: - zaza.openstack.charm_tests.ceph.iscsi.setup.basic_guest_setup tests: - zaza.openstack.charm_tests.ceph.iscsi.tests.CephISCSIGatewayTest + - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation target_deploy_status: vault: workload-status: blocked diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index 4adf580c..d07f17ae 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -127,6 +127,13 @@ def patch_all(self): setattr(self, method, self.patch(method)) +class _CephISCSIGatewayCharmBase(charm.CephISCSIGatewayCharmBase): + + @staticmethod + def get_bluestore_compression(): + return {} + + class TestCephISCSIGatewayCharmBase(CharmTestCase): PATCHES = [ @@ -139,7 +146,7 @@ class TestCephISCSIGatewayCharmBase(CharmTestCase): def setUp(self): super().setUp(charm, self.PATCHES) self.harness = Harness( - charm.CephISCSIGatewayCharmBase, + _CephISCSIGatewayCharmBase, ) self.gwc = MagicMock() self.gwcli_client.GatewayClient.return_value = self.gwc From 21d27f961a9e7631ea31e35c683cdea2cce339a7 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 2 Nov 2020 13:41:42 +0000 Subject: [PATCH 2101/2699] Fix an incorrect module name in requirements Change-Id: Icbc894bad7e1a4860671f11bc405699e800e14a3 --- ceph-iscsi/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-iscsi/requirements.txt b/ceph-iscsi/requirements.txt index 5e7ce8e2..8cfee5a6 100644 --- a/ceph-iscsi/requirements.txt +++ b/ceph-iscsi/requirements.txt @@ -3,4 +3,4 @@ git+https://github.com/juju/charm-helpers.git#egg=charmhelpers git+https://github.com/canonical/operator.git#egg=ops git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack -git+https://opendev.org/openstack/charm-ops-interface-tls-certificates#egg=ca_client +git+https://opendev.org/openstack/charm-ops-interface-tls-certificates#egg=interface_tls_certificates From efde3be49dbae4e6731f3cc51246cd37f96419f7 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Thu, 8 Oct 2020 18:25:46 +0200 Subject: [PATCH 2102/2699] Forward broker requests on rbd-mirror relation To be able to support mirroring of pools with advanced features the RBD Mirror charm needs more information about the intent behind pools requested in a deployment. We solve this by forwarding all the broker requests in the deployment. It is up to the consumer of the rbd-mirror relation to filter the requests and relay the ones eligible for use on a remote cluster. Change-Id: I16196053bee93bdc4e5c62f5467d9e786b047b30 --- ceph-mon/hooks/ceph_hooks.py | 37 ++++++++++++++++++++++- ceph-mon/unit_tests/test_ceph_hooks.py | 42 +++++++++++++++++++++++++- 2 files changed, 77 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index ccd5b569..44e41744 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -82,6 +82,7 @@ from charmhelpers.core.sysctl import create as create_sysctl from charmhelpers.core.templating import render from charmhelpers.contrib.storage.linux.ceph import ( + CephBrokerRq, CephConfContext, OSD_SETTING_EXCEPTIONS, enable_pg_autoscale, @@ -625,6 +626,37 @@ def get_client_application_name(relid, unit): return app_name +def retrieve_client_broker_requests(): + """Retrieve broker requests from client-type relations. + + :returns: Map of broker requests by request-id. + :rtype: List[CephBrokerRq] + """ + def _get_request(relation_data): + if 'broker_req' in relation_data: + rq = CephBrokerRq(raw_request_data=relation_data['broker_req']) + yield rq.request_id, rq + # Note that empty return from generator produces empty generator and + # not None, ref PEP 479 + return + + # we use a dictionary with request_id as key to deduplicate the list. + # we cannot use the list(set([])) trick here as CephBrokerRq is an + # unhashable type. We also cannot just pass on the raw request either + # as we need to intelligently compare them to avoid false negatives + # due to reordering of keys + return { + request_id: request + # NOTE(fnordahl): the ``rbd-mirror`` endpoint is omitted here as it is + # typically a consumer of the ouptut of this function + for endpoint in ('client', 'mds', 'radosgw') + for relid in relation_ids(endpoint) + for unit in related_units(relid) + for request_id, request in _get_request( + relation_get(rid=relid, unit=unit)) + }.values() + + def handle_broker_request(relid, unit, add_legacy_response=False, recurse=True): """Retrieve broker request from relation, process, return response data. @@ -817,7 +849,10 @@ def rbd_mirror_relation(relid=None, unit=None, recurse=True): data.update({ 'auth': config('auth-supported'), 'ceph-public-address': get_public_addr(), - 'pools': json.dumps(ceph.list_pools_detail(), sort_keys=True) + 'pools': json.dumps(ceph.list_pools_detail(), sort_keys=True), + 'broker_requests': json.dumps( + [rq.request for rq in retrieve_client_broker_requests()], + sort_keys=True), }) cluster_addr = get_cluster_addr() if cluster_addr: diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index b590df41..80eedddd 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -792,6 +792,8 @@ def test_per_unit_radosgw_key(self): class RBDMirrorRelationTestCase(test_utils.CharmTestCase): TO_PATCH = [ + 'related_units', + 'relation_ids', 'relation_get', 'get_cluster_addr', 'get_public_addr', @@ -810,6 +812,14 @@ class RBDMirrorRelationTestCase(test_utils.CharmTestCase): test_key = 'OTQ1MDdiODYtMmZhZi00M2IwLTkzYTgtZWI0MGRhNzdmNzBlCg==' + class FakeCephBrokerRq(object): + + def __init__(self, raw_request_data=None): + if raw_request_data: + self.__dict__ = { + k.replace('-', '_'): v + for k, v in raw_request_data.items()} + def setUp(self): super(RBDMirrorRelationTestCase, self).setUp(ceph_hooks, self.TO_PATCH) self.relation_get.side_effect = self.test_relation.get @@ -823,15 +833,25 @@ def setUp(self): self.get_public_addr.return_value = '198.51.100.10' self.ceph.list_pools_detail.return_value = {'pool': {}} + @patch.object(ceph_hooks, 'retrieve_client_broker_requests') @patch.object(ceph_hooks, 'notify_client') - def test_rbd_mirror_relation(self, _notify_client): + def test_rbd_mirror_relation(self, + _notify_client, + _retrieve_client_broker_requests): self.handle_broker_request.return_value = {} base_relation_settings = { 'auth': self.test_config.get('auth-supported'), 'ceph-public-address': '198.51.100.10', 'ceph-cluster-address': '192.0.2.10', 'pools': json.dumps({'pool': {}}), + 'broker_requests': '["fakejsonstr0", "fakejsonstr1"]', } + _retrieve_client_broker_requests.return_value = [ + self.FakeCephBrokerRq(raw_request_data={ + 'request': 'fakejsonstr0'}), + self.FakeCephBrokerRq(raw_request_data={ + 'request': 'fakejsonstr1'}), + ] ceph_hooks.rbd_mirror_relation('rbd-mirror:51', 'ceph-rbd-mirror/0') self.handle_broker_request.assert_called_with( 'rbd-mirror:51', 'ceph-rbd-mirror/0', recurse=True) @@ -871,3 +891,23 @@ def test_rbd_mirror_relation(self, _notify_client): self.relation_set.assert_called_with( relation_id='rbd-mirror:42', relation_settings=key_relation_settings) + + @patch.object(ceph_hooks, 'CephBrokerRq') + def test_retrieve_client_broker_requests(self, _CephBrokerRq): + self.maxDiff = None + self.relation_ids.side_effect = lambda endpoint: { + 'client': ['ceph-client:0'], + 'mds': ['ceph-client:1'], + 'radosgw': ['ceph-client:2'], + }.get(endpoint) + self.related_units.return_value = ['unit/0', 'unit/1', 'unit/3'] + self.relation_get.side_effect = lambda **kwargs: { + 'ceph-client:0': {'broker_req': {'request-id': 'fakeid0'}}, + 'ceph-client:1': {'broker_req': {'request-id': 'fakeid1'}}, + 'ceph-client:2': {}, + }.get(kwargs['rid'], {}) + + _CephBrokerRq.side_effect = self.FakeCephBrokerRq + + for req in ceph_hooks.retrieve_client_broker_requests(): + self.assertIn(req.request_id, ('fakeid0', 'fakeid1')) From a14eac9ceb289dfa81fd1d5f48d8bf87743a30a3 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 5 Nov 2020 12:43:58 +0100 Subject: [PATCH 2103/2699] Add Groovy to the test gate Change-Id: I273eeb41f9750ddb32ae1ca5933278ac8af5c4b7 --- .../charmhelpers/contrib/openstack/utils.py | 2 +- .../contrib/storage/linux/ceph.py | 1 + ceph-osd/lib/charms_ceph/broker.py | 2 +- ceph-osd/lib/charms_ceph/utils.py | 25 +++++++++++++++---- ceph-osd/tests/tests.yaml | 3 +-- 5 files changed, 24 insertions(+), 9 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 0aa797c4..23e4adf5 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -230,7 +230,7 @@ ('ussuri', ['2.24.0', '2.25.0']), ('victoria', - ['2.25.0']), + ['2.25.0', '2.26.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 084247a9..0f69631d 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -2215,6 +2215,7 @@ def send_request_if_needed(request, relation='ceph'): for rid in relation_ids(relation): log('Sending request {}'.format(request.request_id), level=DEBUG) relation_set(relation_id=rid, broker_req=request.request) + relation_set(relation_id=rid, relation_settings={'unit-name': local_unit()}) def has_broker_rsp(rid=None, unit=None): diff --git a/ceph-osd/lib/charms_ceph/broker.py b/ceph-osd/lib/charms_ceph/broker.py index 25427697..d00baedc 100644 --- a/ceph-osd/lib/charms_ceph/broker.py +++ b/ceph-osd/lib/charms_ceph/broker.py @@ -750,7 +750,7 @@ def handle_create_cephfs(request, service): """ cephfs_name = request.get('mds_name') data_pool = request.get('data_pool') - extra_pools = request.get('extra_pools', []) + extra_pools = request.get('extra_pools', None) or [] metadata_pool = request.get('metadata_pool') # Check if the user params were provided if not cephfs_name or not data_pool or not metadata_pool: diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 9da4dc12..52d380b4 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -2141,6 +2141,8 @@ def roll_monitor_cluster(new_version, upgrade_key): # A sorted list of osd unit names mon_sorted_list = sorted(monitor_list) + # Install packages immediately but defer restarts to when it's our time. + upgrade_monitor(new_version, restart_daemons=False) try: position = mon_sorted_list.index(my_name) log("upgrade position: {}".format(position)) @@ -2182,7 +2184,7 @@ def noop(): pass -def upgrade_monitor(new_version, kick_function=None): +def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): """Upgrade the current ceph monitor to the new version :param new_version: String version to upgrade to. @@ -2207,6 +2209,22 @@ def upgrade_monitor(new_version, kick_function=None): status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) kick_function() + + try: + apt_install(packages=determine_packages(), fatal=True) + rm_packages = determine_packages_to_remove() + if rm_packages: + apt_purge(packages=rm_packages, fatal=True) + except subprocess.CalledProcessError as err: + log("Upgrading packages failed " + "with message: {}".format(err)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + if not restart_daemons: + log("Packages upgraded but not restarting daemons yet.") + return + try: if systemd(): service_stop('ceph-mon') @@ -2216,10 +2234,7 @@ def upgrade_monitor(new_version, kick_function=None): service_stop('ceph-mgr.target') else: service_stop('ceph-mon-all') - apt_install(packages=determine_packages(), fatal=True) - rm_packages = determine_packages_to_remove() - if rm_packages: - apt_purge(packages=rm_packages, fatal=True) + kick_function() owner = ceph_user() diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 5a116be2..1f6354e8 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,5 +1,6 @@ charm_name: ceph-osd gate_bundles: + - groovy-victoria - focal-victoria - focal-ussuri - bionic-ussuri @@ -14,8 +15,6 @@ gate_bundles: - trusty-mitaka smoke_bundles: - bionic-train -dev_bundles: - - groovy-victoria configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: From fc34b0497e433a1547f407ad248d19eee39aef07 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Thu, 8 Oct 2020 21:21:11 +0200 Subject: [PATCH 2104/2699] Add BlueStore Compression support Drop support for forwarding requests for manually created Erasure Coded pools. The initial implementation was based on a incomplete implementation of EC support in the supporting libraries. Make use of the forwarding of broker requests to support both BlueStore Compression and Erasure Coded pools created through the broker request protocol by charms in the source model. There is currently no support for forwarding or influencing these properties for manually created pools. Depends-On: I0d4ed457e1d59eabed3340f5dc7d8353d5d66f04 Depends-On: I16196053bee93bdc4e5c62f5467d9e786b047b30 Change-Id: I19f66f8594b9bdada5365582ce98365039235f9d --- ceph-rbd-mirror/.gitignore | 2 + .../lib/charm/openstack/ceph_rbd_mirror.py | 46 +++++++++++ .../src/reactive/ceph_rbd_mirror_handlers.py | 63 ++++++++++----- .../test_ceph_rbd_mirror_handlers.py | 32 +------- ...est_lib_charm_openstack_ceph_rbd_mirror.py | 76 +++++++++++++++++++ 5 files changed, 169 insertions(+), 50 deletions(-) diff --git a/ceph-rbd-mirror/.gitignore b/ceph-rbd-mirror/.gitignore index bcfbbf60..0446b60e 100644 --- a/ceph-rbd-mirror/.gitignore +++ b/ceph-rbd-mirror/.gitignore @@ -5,4 +5,6 @@ build .coverage cover/ +layers/ +interfaces/ *.swp diff --git a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py index 4b3270e1..0eb87cab 100644 --- a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py @@ -24,6 +24,7 @@ import charms_openstack.plugins import charmhelpers.core as ch_core +import charmhelpers.contrib.storage.linux.ceph as ch_ceph class CephRBDMirrorCharmRelationAdapters( @@ -156,3 +157,48 @@ def mirror_pool_enable(self, pool): subprocess.check_call(base_cmd + ['peer', 'add', pool, 'client.{}@remote' .format(self.ceph_id)]) + + def pools_in_broker_request(self, rq, ops_to_check=None): + """Extract pool names touched by a broker request. + + :param rq: Ceph Broker Request Object + :type rq: ch_ceph.CephBrokerRq + :param ops_to_check: Set providing which ops to check + :type ops_to_check: Optional[Set[str]] + :returns: Set of pool names + :rtype: Set[str] + """ + assert rq.api_version == 1 + ops_to_check = ops_to_check or set(('create-pool',)) + result_set = set() + for op in rq.ops: + if op['op'] in ops_to_check: + result_set.add(op['name']) + return result_set + + def collapse_and_filter_broker_requests(self, broker_requests, + allowed_ops, require_vp=None): + """Extract allowed ops from broker requests into one collapsed request. + + :param broker_requests: List of broker requests + :type broker_requests: List[ch_ceph.CephBrokerRq] + :param allowed_ops: Set of ops to allow + :type allowed_ops: Set + :param require_vp: Map of required key-value pairs in op + :type require_vp: Optional[Dict[str,any]] + :returns: Collapsed broker request + :rtype: Optional[ch_ceph.CephBrokerRq] + """ + require_vp = require_vp or {} + new_rq = ch_ceph.CephBrokerRq() + for rq in broker_requests: + assert rq['api-version'] == 1 + for op in rq['ops']: + if op['op'] in allowed_ops: + for k, v in require_vp.items(): + if k not in op or op[k] != v: + break + else: + new_rq.add_op(op) + if len(new_rq.ops): + return new_rq diff --git a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py index 1a145eec..baadf55a 100644 --- a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py @@ -18,6 +18,7 @@ import charms_openstack.charm as charm import charmhelpers.core as ch_core +import charmhelpers.contrib.storage.linux.ceph as ch_ceph charms_openstack.bus.discover() @@ -105,26 +106,50 @@ def configure_pools(): local = reactive.endpoint_from_flag('ceph-local.available') remote = reactive.endpoint_from_flag('ceph-remote.available') with charm.provide_charm_instance() as charm_instance: + rq = charm_instance.collapse_and_filter_broker_requests( + local.broker_requests, set(('create-pool',)), + require_vp={'app-name': 'rbd'}) + remote_rq = charm_instance.collapse_and_filter_broker_requests( + remote.broker_requests, set(('create-pool',)), + require_vp={'app-name': 'rbd'}) + pools_in_rq = charm_instance.pools_in_broker_request( + rq) if rq else set() + pools_in_rq |= charm_instance.pools_in_broker_request( + remote_rq) if remote_rq else set() for pool, attrs in charm_instance.eligible_pools(local.pools).items(): if not (charm_instance.mirror_pool_enabled(pool) and charm_instance.mirror_pool_has_peers(pool)): + ch_core.hookenv.log('Enabling mirroring for pool "{}"' + .format(pool), + level=ch_core.hookenv.INFO) charm_instance.mirror_pool_enable(pool) - pg_num = attrs['parameters'].get('pg_num', None) - max_bytes = attrs['quota'].get('max_bytes', None) - max_objects = attrs['quota'].get('max_objects', None) - if 'erasure_code_profile' in attrs['parameters']: - ec_profile = attrs['parameters'].get( - 'erasure_code_profile', None) - remote.create_erasure_pool(pool, - erasure_profile=ec_profile, - pg_num=pg_num, - app_name='rbd', - max_bytes=max_bytes, - max_objects=max_objects) - else: - size = attrs['parameters'].get('size', None) - remote.create_replicated_pool(pool, replicas=size, - pg_num=pg_num, - app_name='rbd', - max_bytes=max_bytes, - max_objects=max_objects) + if (pool not in pools_in_rq and + 'erasure_code_profile' not in attrs['parameters']): + # A pool exists that there is no broker request for which means + # it is a manually created pool. We will forward creation of + # replicated pools but forwarding of manually created Erasure + # Coded pools is not supported. + pg_num = attrs['parameters'].get('pg_num') + max_bytes = attrs['quota'].get('max_bytes') + max_objects = attrs['quota'].get('max_objects') + size = attrs['parameters'].get('size') + ch_core.hookenv.log('Adding manually created pool "{}" to ' + 'request.' + .format(pool), + level=ch_core.hookenv.INFO) + if not rq: + rq = ch_ceph.CephBrokerRq() + rq.add_op_create_replicated_pool( + pool, + replica_count=size if not size else int(size), + pg_num=pg_num if not pg_num else int(pg_num), + app_name='rbd', + max_bytes=max_bytes if not max_bytes else int(max_bytes), + max_objects=max_objects if not max_objects else int( + max_objects), + ) + ch_core.hookenv.log('Request for evaluation: "{}"' + .format(rq), + level=ch_core.hookenv.DEBUG) + if rq: + remote.maybe_send_rq(rq) diff --git a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py index 0599b63e..1b692a54 100644 --- a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py @@ -181,34 +181,4 @@ def test_configure_pools(self): 'cinder-ceph') self.crm_charm.mirror_pool_enable.assert_called_once_with( 'cinder-ceph') - endpoint_remote.create_replicated_pool.assert_called_once_with( - 'cinder-ceph', replicas=3, pg_num=42, app_name='rbd', - max_bytes=1024, max_objects=51) - self.assertFalse(endpoint_remote.create_erasure_pool.called) - self.endpoint_from_flag.side_effect = [endpoint_local, - endpoint_remote] - self.crm_charm.mirror_pool_enabled.return_value = True - self.crm_charm.mirror_pool_has_peers.return_value = True - self.crm_charm.mirror_pool_enabled.reset_mock() - self.crm_charm.mirror_pool_enable.reset_mock() - handlers.configure_pools() - self.crm_charm.mirror_pool_enabled.assert_called_once_with( - 'cinder-ceph') - self.crm_charm.mirror_pool_has_peers.assert_called_once_with( - 'cinder-ceph') - self.assertFalse(self.crm_charm.mirror_pool_enable.called) - endpoint_local.pools = { - 'cinder-ceph': { - 'applications': {'rbd': {}}, - 'parameters': {'pg_num': 42, 'erasure_code_profile': 'prof'}, - 'quota': {'max_bytes': 1024, 'max_objects': 51}, - }, - } - self.endpoint_from_flag.side_effect = [endpoint_local, - endpoint_remote] - endpoint_remote.create_replicated_pool.reset_mock() - self.crm_charm.eligible_pools.return_value = endpoint_local.pools - handlers.configure_pools() - endpoint_remote.create_erasure_pool.assert_called_once_with( - 'cinder-ceph', erasure_profile='prof', pg_num=42, app_name='rbd', - max_bytes=1024, max_objects=51) + endpoint_remote.maybe_send_rq.assert_called_once_with(mock.ANY) diff --git a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py index 93d02ba3..fee11651 100644 --- a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py @@ -116,3 +116,79 @@ def test_mirror_pool_has_peers(self): 'mode': 'pool', 'peers': []} self.assertFalse(crmc.mirror_pool_has_peers('apool')) + + def test_pools_in_broker_request(self): + rq = mock.MagicMock() + rq.api_version = 1 + rq.ops = [{'op': 'create-pool', 'name': 'fakepool'}] + crmc = ceph_rbd_mirror.CephRBDMirrorCharm() + self.assertIn('fakepool', crmc.pools_in_broker_request(rq)) + + def test_collapse_and_filter_broker_requests(self): + self.patch_object(ceph_rbd_mirror.ch_ceph, 'CephBrokerRq') + + class FakeCephBrokerRq(object): + + def __init__(self): + self.ops = [] + + def add_op(self, op): + self.ops.append(op) + + self.CephBrokerRq.side_effect = FakeCephBrokerRq + + broker_requests = [ + { + 'api-version': 1, + 'ops': [ + { + 'op': 'create-pool', + 'name': 'pool-rq0', + 'app-name': 'rbd', + }, + ] + }, + { + 'api-version': 1, + 'ops': [ + { + 'op': 'create-pool', + 'name': 'pool-rq1', + 'app-name': 'notrbd', + }, + ] + }, + { + 'api-version': 1, + 'ops': [ + { + 'op': 'create-pool', + 'name': 'pool-rq2', + 'app-name': 'rbd', + 'someotherkey': 'value', + }, + ] + }, + ] + crmc = ceph_rbd_mirror.CephRBDMirrorCharm() + rq = crmc.collapse_and_filter_broker_requests( + broker_requests, + set(('create-pool',)), + require_vp={'app-name': 'rbd'}) + self.assertDictEqual( + rq.ops[0], + {'app-name': 'rbd', 'name': 'pool-rq0', 'op': 'create-pool'}) + self.assertDictEqual( + rq.ops[1], + {'app-name': 'rbd', 'name': 'pool-rq2', 'op': 'create-pool', + 'someotherkey': 'value'}) + self.assertTrue(len(rq.ops) == 2) + rq = crmc.collapse_and_filter_broker_requests( + broker_requests, + set(('create-pool',)), + require_vp={'app-name': 'rbd', 'someotherkey': 'value'}) + self.assertDictEqual( + rq.ops[0], + {'app-name': 'rbd', 'name': 'pool-rq2', 'op': 'create-pool', + 'someotherkey': 'value'}) + self.assertTrue(len(rq.ops) == 1) From 4eefc7153ec4140939e13b197591007af04e5a5f Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 5 Nov 2020 12:43:42 +0100 Subject: [PATCH 2105/2699] Add Groovy to the test gate Also sync libraries Change-Id: I4e9d276edde7fb46ebf6b641edb3ad5df86cd040 --- .../contrib/openstack/cert_utils.py | 124 +++++++++++++----- .../charmhelpers/contrib/openstack/ip.py | 16 +++ .../charmhelpers/contrib/openstack/utils.py | 2 +- .../contrib/storage/linux/ceph.py | 13 ++ ceph-mon/lib/charms_ceph/broker.py | 2 +- ceph-mon/lib/charms_ceph/utils.py | 25 +++- ceph-mon/tests/tests.yaml | 3 +- 7 files changed, 146 insertions(+), 39 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py index b494af64..1eb21542 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -34,12 +34,14 @@ WARNING, ) from charmhelpers.contrib.openstack.ip import ( - ADMIN, resolve_address, get_vip_in_network, - INTERNAL, - PUBLIC, - ADDRESS_MAP) + ADDRESS_MAP, + get_default_api_bindings, +) +from charmhelpers.contrib.network.ip import ( + get_relation_ip, +) from charmhelpers.core.host import ( mkdir, @@ -113,44 +115,118 @@ def get_request(self): return req -def get_certificate_request(json_encode=True): - """Generate a certificatee requests based on the network confioguration +def get_certificate_request(json_encode=True, bindings=None): + """Generate a certificate requests based on the network configuration + :param json_encode: Encode request in JSON or not. Used for setting + directly on a relation. + :type json_encode: boolean + :param bindings: List of bindings to check in addition to default api + bindings. + :type bindings: list of strings + :returns: CertRequest request as dictionary or JSON string. + :rtype: Union[dict, json] """ + if bindings: + # Add default API bindings to bindings list + bindings = set(bindings + get_default_api_bindings()) + else: + # Use default API bindings + bindings = get_default_api_bindings() req = CertRequest(json_encode=json_encode) req.add_hostname_cn() # Add os-hostname entries - for net_type in [INTERNAL, ADMIN, PUBLIC]: - net_config = config(ADDRESS_MAP[net_type]['override']) + _sans = get_certificate_sans() + + # Handle specific hostnames per binding + for binding in bindings: + hostname_override = config(ADDRESS_MAP[binding]['override']) try: - net_addr = resolve_address(endpoint_type=net_type) + net_addr = resolve_address(endpoint_type=binding) ip = network_get_primary_address( - ADDRESS_MAP[net_type]['binding']) + ADDRESS_MAP[binding]['binding']) addresses = [net_addr, ip] vip = get_vip_in_network(resolve_network_cidr(ip)) if vip: addresses.append(vip) - if net_config: + # Add hostname certificate request + if hostname_override: req.add_entry( - net_type, - net_config, + binding, + hostname_override, addresses) - else: - # There is network address with no corresponding hostname. - # Add the ip to the hostname cert to allow for this. - req.add_hostname_cn_ip(addresses) + # Remove hostname specific addresses from _sans + for addr in addresses: + try: + _sans.remove(addr) + except (ValueError, KeyError): + pass + except NoNetworkBinding: log("Skipping request for certificate for ip in {} space, no " - "local address found".format(net_type), WARNING) + "local address found".format(binding), WARNING) + # Gurantee all SANs are covered + # These are network addresses with no corresponding hostname. + # Add the ips to the hostname cert to allow for this. + req.add_hostname_cn_ip(_sans) return req.get_request() +def get_certificate_sans(bindings=None): + """Get all possible IP addresses for certificate SANs. + """ + _sans = [unit_get('private-address')] + if bindings: + # Add default API bindings to bindings list + bindings = set(bindings + get_default_api_bindings()) + else: + # Use default API bindings + bindings = get_default_api_bindings() + + for binding in bindings: + # Check for config override + try: + net_config = config(ADDRESS_MAP[binding]['config']) + except KeyError: + # There is no configuration network for this binding name + net_config = None + # Using resolve_address is likely redundant. Keeping it here in + # case there is an edge case it handles. + net_addr = resolve_address(endpoint_type=binding) + ip = get_relation_ip(binding, cidr_network=net_config) + _sans = _sans + [net_addr, ip] + vip = get_vip_in_network(resolve_network_cidr(ip)) + if vip: + _sans.append(vip) + return set(_sans) + + def create_ip_cert_links(ssl_dir, custom_hostname_link=None): """Create symlinks for SAN records :param ssl_dir: str Directory to create symlinks in :param custom_hostname_link: str Additional link to be created """ + + # This includes the hostname cert and any specific bindng certs: + # admin, internal, public + req = get_certificate_request(json_encode=False)["cert_requests"] + # Specific certs + for cert_req in req.keys(): + requested_cert = os.path.join( + ssl_dir, + 'cert_{}'.format(cert_req)) + requested_key = os.path.join( + ssl_dir, + 'key_{}'.format(cert_req)) + for addr in req[cert_req]['sans']: + cert = os.path.join(ssl_dir, 'cert_{}'.format(addr)) + key = os.path.join(ssl_dir, 'key_{}'.format(addr)) + if os.path.isfile(requested_cert) and not os.path.isfile(cert): + os.symlink(requested_cert, cert) + os.symlink(requested_key, key) + + # Handle custom hostnames hostname = get_hostname(unit_get('private-address')) hostname_cert = os.path.join( ssl_dir, @@ -158,18 +234,6 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None): hostname_key = os.path.join( ssl_dir, 'key_{}'.format(hostname)) - # Add links to hostname cert, used if os-hostname vars not set - for net_type in [INTERNAL, ADMIN, PUBLIC]: - try: - addr = resolve_address(endpoint_type=net_type) - cert = os.path.join(ssl_dir, 'cert_{}'.format(addr)) - key = os.path.join(ssl_dir, 'key_{}'.format(addr)) - if os.path.isfile(hostname_cert) and not os.path.isfile(cert): - os.symlink(hostname_cert, cert) - os.symlink(hostname_key, key) - except NoNetworkBinding: - log("Skipping creating cert symlink for ip in {} space, no " - "local address found".format(net_type), WARNING) if custom_hostname_link: custom_cert = os.path.join( ssl_dir, diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py index 723aebc1..89cf276d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py @@ -33,6 +33,7 @@ ADMIN = 'admin' ACCESS = 'access' +# TODO: reconcile 'int' vs 'internal' binding names ADDRESS_MAP = { PUBLIC: { 'binding': 'public', @@ -58,6 +59,14 @@ 'fallback': 'private-address', 'override': 'os-access-hostname', }, + # Note (thedac) bridge to begin the reconciliation between 'int' vs + # 'internal' binding names + 'internal': { + 'binding': 'internal', + 'config': 'os-internal-network', + 'fallback': 'private-address', + 'override': 'os-internal-hostname', + }, } @@ -195,3 +204,10 @@ def get_vip_in_network(network): if is_address_in_network(network, vip): matching_vip = vip return matching_vip + + +def get_default_api_bindings(): + _default_bindings = [] + for binding in [INTERNAL, ADMIN, PUBLIC]: + _default_bindings.append(ADDRESS_MAP[binding]['binding']) + return _default_bindings diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 0aa797c4..23e4adf5 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -230,7 +230,7 @@ ('ussuri', ['2.24.0', '2.25.0']), ('victoria', - ['2.25.0']), + ['2.25.0', '2.26.0']), ]) # >= Liberty version->codename mapping diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 7882e2ce..0f69631d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -41,6 +41,7 @@ ) from charmhelpers import deprecate from charmhelpers.core.hookenv import ( + application_name, config, service_name, local_unit, @@ -162,6 +163,17 @@ def get_osd_settings(relation_name): return _order_dict_by_key(osd_settings) +def send_application_name(relid=None): + """Send the application name down the relation. + + :param relid: Relation id to set application name in. + :type relid: str + """ + relation_set( + relation_id=relid, + relation_settings={'application-name': application_name()}) + + def send_osd_settings(): """Pass on requested OSD settings to osd units.""" try: @@ -2203,6 +2215,7 @@ def send_request_if_needed(request, relation='ceph'): for rid in relation_ids(relation): log('Sending request {}'.format(request.request_id), level=DEBUG) relation_set(relation_id=rid, broker_req=request.request) + relation_set(relation_id=rid, relation_settings={'unit-name': local_unit()}) def has_broker_rsp(rid=None, unit=None): diff --git a/ceph-mon/lib/charms_ceph/broker.py b/ceph-mon/lib/charms_ceph/broker.py index 25427697..d00baedc 100644 --- a/ceph-mon/lib/charms_ceph/broker.py +++ b/ceph-mon/lib/charms_ceph/broker.py @@ -750,7 +750,7 @@ def handle_create_cephfs(request, service): """ cephfs_name = request.get('mds_name') data_pool = request.get('data_pool') - extra_pools = request.get('extra_pools', []) + extra_pools = request.get('extra_pools', None) or [] metadata_pool = request.get('metadata_pool') # Check if the user params were provided if not cephfs_name or not data_pool or not metadata_pool: diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index 9da4dc12..52d380b4 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -2141,6 +2141,8 @@ def roll_monitor_cluster(new_version, upgrade_key): # A sorted list of osd unit names mon_sorted_list = sorted(monitor_list) + # Install packages immediately but defer restarts to when it's our time. + upgrade_monitor(new_version, restart_daemons=False) try: position = mon_sorted_list.index(my_name) log("upgrade position: {}".format(position)) @@ -2182,7 +2184,7 @@ def noop(): pass -def upgrade_monitor(new_version, kick_function=None): +def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): """Upgrade the current ceph monitor to the new version :param new_version: String version to upgrade to. @@ -2207,6 +2209,22 @@ def upgrade_monitor(new_version, kick_function=None): status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) kick_function() + + try: + apt_install(packages=determine_packages(), fatal=True) + rm_packages = determine_packages_to_remove() + if rm_packages: + apt_purge(packages=rm_packages, fatal=True) + except subprocess.CalledProcessError as err: + log("Upgrading packages failed " + "with message: {}".format(err)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + if not restart_daemons: + log("Packages upgraded but not restarting daemons yet.") + return + try: if systemd(): service_stop('ceph-mon') @@ -2216,10 +2234,7 @@ def upgrade_monitor(new_version, kick_function=None): service_stop('ceph-mgr.target') else: service_stop('ceph-mon-all') - apt_install(packages=determine_packages(), fatal=True) - rm_packages = determine_packages_to_remove() - if rm_packages: - apt_purge(packages=rm_packages, fatal=True) + kick_function() owner = ceph_user() diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index cbfbc65f..0d91d962 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,5 +1,6 @@ charm_name: ceph-mon gate_bundles: + - groovy-victoria - focal-victoria - focal-ussuri-ec - focal-ussuri @@ -16,8 +17,6 @@ gate_bundles: - trusty-mitaka smoke_bundles: - bionic-train -dev_bundles: - - groovy-victoria configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: From 96c6e9bad1f0d8eddf3bddaf6e0c4a4122d5ba66 Mon Sep 17 00:00:00 2001 From: Ponnuvel Palaniyappan Date: Thu, 12 Nov 2020 14:06:53 +0000 Subject: [PATCH 2106/2699] Sync charm-helpers to pick up the commit: https://github.com/juju/charm-helpers/commit/09208c2a2c691435e178b2b481d043f687b7e61b Change-Id: I4ced679e814c3b58595a9bc123ce0d402b1c1811 Signed-off-by: Ponnuvel Palaniyappan --- ceph-osd/hooks/charmhelpers/core/host.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index a785efdf..87993699 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -19,6 +19,7 @@ # Nick Moffitt # Matthew Wedgwood +import errno import os import re import pwd @@ -677,7 +678,7 @@ def check_hash(path, checksum, hash_type='md5'): :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, + Can be any hash algorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. :raises ChecksumError: If the file fails the checksum @@ -889,7 +890,7 @@ def get_nic_hwaddr(nic): def chdir(directory): """Change the current working directory to a different directory for a code block and return the previous directory after the block exits. Useful to - run commands from a specificed directory. + run commands from a specified directory. :param str directory: The directory path to change to for this context. """ @@ -924,9 +925,13 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): for root, dirs, files in os.walk(path, followlinks=follow_links): for name in dirs + files: full = os.path.join(root, name) - broken_symlink = os.path.lexists(full) and not os.path.exists(full) - if not broken_symlink: + try: chown(full, uid, gid) + except (IOError, OSError) as e: + # Intended to ignore "file not found". Catching both to be + # compatible with both Python 2.7 and 3.x. + if e.errno == errno.ENOENT: + pass def lchownr(path, owner, group): From b1d98bd0c3d806bdf5af8a4115cd6dc0f048f1eb Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 17 Nov 2020 09:49:14 -0500 Subject: [PATCH 2107/2699] Refresh README Change-Id: I3e815acd79ee298395e5dfce1c06d4478d8c8a2d --- ceph-iscsi/README.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index 4fa38828..9a6f4610 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -22,7 +22,7 @@ The efficiency of compression depends heavily on what type of data is stored in the pool and the charm provides a set of configuration options to fine tune the compression behaviour. -**Note**: BlueStore compression is supported starting with Ceph Mimic. +> **Note**: BlueStore compression is supported starting with Ceph Mimic. ## Deployment @@ -30,7 +30,7 @@ We are assuming a pre-existing Ceph cluster. To provide multiple data paths to clients deploy exactly two ceph-iscsi units: - juju deploy -n 2 cs:~openstack-charmers-next/ceph-iscsi + juju deploy -n 2 ceph-iscsi Then add a relation to the ceph-mon application: @@ -49,7 +49,9 @@ Then add a relation to the ceph-mon application: ## Actions This section covers Juju [actions][juju-docs-actions] supported by the charm. -Actions allow specific operations to be performed on a per-unit basis. +Actions allow specific operations to be performed on a per-unit basis. To +display action descriptions run `juju actions ceph-iscsi`. If the charm is not +deployed then see file `actions.yaml`. * `add-trusted-ip` * `create-target` @@ -94,9 +96,9 @@ and is available from any ceph-iscsi unit: sudo gwcli /> help -## VMWare integration +## VMware integration -Ceph can be used to back iSCSI targets for VMWare initiators. This is +Ceph can be used to back iSCSI targets for VMware initiators. This is documented under [Ceph iSCSI][cdg-ceph-iscsi] in the [OpenStack Charms Deployment Guide][cdg]. From 7280e6d52d1045ab259395c53405c1dba47253a0 Mon Sep 17 00:00:00 2001 From: Marius Oprin Date: Thu, 19 Nov 2020 13:48:24 +0200 Subject: [PATCH 2108/2699] Sync charmhelpers Recent charmhelpers change forwards the broker requests to the ceph-rbd-mirror with information about the RBD mirroring mode. This is needed for Cinder Ceph Replication spec Change-Id: I1d2b5351574a8741e55a8e6482d0c4a168562050 Co-authored-by: Ionut Balutiou --- .../hooks/charmhelpers/contrib/network/ip.py | 3 +- .../charmhelpers/contrib/openstack/utils.py | 21 +++++++++- .../contrib/storage/linux/ceph.py | 6 +++ .../hooks/charmhelpers/core/decorators.py | 38 +++++++++++++++++++ ceph-mon/hooks/charmhelpers/core/host.py | 24 ++++++++---- 5 files changed, 81 insertions(+), 11 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index b13277bb..63e91cca 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -396,7 +396,8 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, if global_addrs: # Make sure any found global addresses are not temporary cmd = ['ip', 'addr', 'show', iface] - out = subprocess.check_output(cmd).decode('UTF-8') + out = subprocess.check_output( + cmd).decode('UTF-8', errors='replace') if dynamic_only: key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") else: diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 23e4adf5..f4c76214 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -18,6 +18,7 @@ import subprocess import json +import operator import os import sys import re @@ -33,7 +34,7 @@ from charmhelpers.contrib.network import ip -from charmhelpers.core import unitdata +from charmhelpers.core import decorators, unitdata from charmhelpers.core.hookenv import ( WORKLOAD_STATES, @@ -1295,7 +1296,7 @@ def _check_listening_on_ports_list(ports): Returns a list of ports being listened to and a list of the booleans. - @param ports: LIST or port numbers. + @param ports: LIST of port numbers. @returns [(port_num, boolean), ...], [boolean] """ ports_open = [port_has_listener('0.0.0.0', p) for p in ports] @@ -1564,6 +1565,21 @@ def manage_payload_services(action, services=None, charm_func=None): return success, messages +def make_wait_for_ports_barrier(ports, retry_count=5): + """Make a function to wait for port shutdowns. + + Create a function which closes over the provided ports. The function will + retry probing ports until they are closed or the retry count has been reached. + + """ + @decorators.retry_on_predicate(retry_count, operator.not_, base_delay=0.1) + def retry_port_check(): + _, ports_states = _check_listening_on_ports_list(ports) + juju_log("Probe ports {}, result: {}".format(ports, ports_states), level="DEBUG") + return any(ports_states) + return retry_port_check + + def pause_unit(assess_status_func, services=None, ports=None, charm_func=None): """Pause a unit by stopping the services and setting 'unit-paused' @@ -1599,6 +1615,7 @@ def pause_unit(assess_status_func, services=None, ports=None, services=services, charm_func=charm_func) set_unit_paused() + if assess_status_func: message = assess_status_func() if message: diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 0f69631d..d1c61754 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -268,6 +268,7 @@ class BasePool(object): 'compression-max-blob-size': (int, None), 'compression-max-blob-size-hdd': (int, None), 'compression-max-blob-size-ssd': (int, None), + 'rbd-mirroring-mode': (str, ('image', 'pool')) } def __init__(self, service, name=None, percent_data=None, app_name=None, @@ -1767,6 +1768,7 @@ def _partial_build_common_op_create(self, max_bytes=None, max_objects=None, namespace=None, + rbd_mirroring_mode='pool', weight=None): """Build common part of a create pool operation. @@ -1825,6 +1827,9 @@ def _partial_build_common_op_create(self, :type max_objects: Optional[int] :param namespace: Group namespace :type namespace: Optional[str] + :param rbd_mirroring_mode: Pool mirroring mode used when Ceph RBD + mirroring is enabled. + :type rbd_mirroring_mode: Optional[str] :param weight: The percentage of data that is expected to be contained in the pool from the total available space on the OSDs. Used to calculate number of Placement Groups to create @@ -1849,6 +1854,7 @@ def _partial_build_common_op_create(self, 'max-bytes': max_bytes, 'max-objects': max_objects, 'group-namespace': namespace, + 'rbd-mirroring-mode': rbd_mirroring_mode, 'weight': weight, } diff --git a/ceph-mon/hooks/charmhelpers/core/decorators.py b/ceph-mon/hooks/charmhelpers/core/decorators.py index 6ad41ee4..e7e95d17 100644 --- a/ceph-mon/hooks/charmhelpers/core/decorators.py +++ b/ceph-mon/hooks/charmhelpers/core/decorators.py @@ -53,3 +53,41 @@ def _retry_on_exception_inner_2(*args, **kwargs): return _retry_on_exception_inner_2 return _retry_on_exception_inner_1 + + +def retry_on_predicate(num_retries, predicate_fun, base_delay=0): + """Retry based on return value + + The return value of the decorated function is passed to the given predicate_fun. If the + result of the predicate is False, retry the decorated function up to num_retries times + + An exponential backoff up to base_delay^num_retries seconds can be introduced by setting + base_delay to a nonzero value. The default is to run with a zero (i.e. no) delay + + :param num_retries: Max. number of retries to perform + :type num_retries: int + :param predicate_fun: Predicate function to determine if a retry is necessary + :type predicate_fun: callable + :param base_delay: Starting value in seconds for exponential delay, defaults to 0 (no delay) + :type base_delay: float + """ + def _retry_on_pred_inner_1(f): + def _retry_on_pred_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + delay = base_delay + while True: + result = f(*args, **kwargs) + if predicate_fun(result) or retries <= 0: + return result + delay *= multiplier + multiplier += 1 + log("Result {}, retrying '{}' {} more times (delay={})".format( + result, f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_pred_inner_2 + + return _retry_on_pred_inner_1 diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index a785efdf..697a5f4b 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -19,6 +19,7 @@ # Nick Moffitt # Matthew Wedgwood +import errno import os import re import pwd @@ -677,7 +678,7 @@ def check_hash(path, checksum, hash_type='md5'): :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, + Can be any hash algorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. :raises ChecksumError: If the file fails the checksum @@ -825,7 +826,8 @@ def list_nics(nic_type=None): if nic_type: for int_type in int_types: cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = subprocess.check_output( + cmd).decode('UTF-8', errors='replace') ip_output = ip_output.split('\n') ip_output = (line for line in ip_output if line) for line in ip_output: @@ -841,7 +843,8 @@ def list_nics(nic_type=None): interfaces.append(iface) else: cmd = ['ip', 'a'] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = subprocess.check_output( + cmd).decode('UTF-8', errors='replace').split('\n') ip_output = (line.strip() for line in ip_output if line) key = re.compile(r'^[0-9]+:\s+(.+):') @@ -865,7 +868,8 @@ def set_nic_mtu(nic, mtu): def get_nic_mtu(nic): """Return the Maximum Transmission Unit (MTU) for a network interface.""" cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = subprocess.check_output( + cmd).decode('UTF-8', errors='replace').split('\n') mtu = "" for line in ip_output: words = line.split() @@ -877,7 +881,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): """Return the Media Access Control (MAC) for a network interface.""" cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = subprocess.check_output(cmd).decode('UTF-8', errors='replace') hwaddr = "" words = ip_output.split() if 'link/ether' in words: @@ -889,7 +893,7 @@ def get_nic_hwaddr(nic): def chdir(directory): """Change the current working directory to a different directory for a code block and return the previous directory after the block exits. Useful to - run commands from a specificed directory. + run commands from a specified directory. :param str directory: The directory path to change to for this context. """ @@ -924,9 +928,13 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): for root, dirs, files in os.walk(path, followlinks=follow_links): for name in dirs + files: full = os.path.join(root, name) - broken_symlink = os.path.lexists(full) and not os.path.exists(full) - if not broken_symlink: + try: chown(full, uid, gid) + except (IOError, OSError) as e: + # Intended to ignore "file not found". Catching both to be + # compatible with both Python 2.7 and 3.x. + if e.errno == errno.ENOENT: + pass def lchownr(path, owner, group): From 1004cbca2e70e717e6d7ee4fa7a60f959f81bd7f Mon Sep 17 00:00:00 2001 From: Robert Gildein Date: Thu, 15 Oct 2020 17:51:21 +0200 Subject: [PATCH 2109/2699] add function to check if device exists - if the device does not exists, the action failed with message: '/dev/: Device does not exists.' Closes-Bug: #1885336 Change-Id: I417c0074fe64c2afceecdfb09b8673930087f65f --- ceph-osd/actions/zap_disk.py | 26 ++++++++--- ceph-osd/unit_tests/test_actions_zap_disk.py | 47 +++++++++++++++++++- 2 files changed, 67 insertions(+), 6 deletions(-) diff --git a/ceph-osd/actions/zap_disk.py b/ceph-osd/actions/zap_disk.py index b8a29bb2..1002c742 100755 --- a/ceph-osd/actions/zap_disk.py +++ b/ceph-osd/actions/zap_disk.py @@ -31,15 +31,26 @@ from charms_ceph.utils import is_mapped_luks_device +class ZapDiskError(Exception): + pass + + def get_devices(): """Parse 'devices' action parameter, returns list.""" - devices = [] + devices, errors = [], [] + for path in hookenv.action_get('devices').split(' '): path = path.strip() if not os.path.isabs(path): - hookenv.action_fail('{}: Not absolute path.'.format(path)) - raise - devices.append(path) + errors.append('{}: Not absolute path.'.format(path)) + elif not os.path.exists(path): + errors.append('{}: Device does not exist.'.format(path)) + else: + devices.append(path) + + if errors: + raise ZapDiskError(", ".join(errors)) + return devices @@ -50,7 +61,12 @@ def zap(): failed_devices = [] not_block_devices = [] - devices = get_devices() + try: + devices = get_devices() + except ZapDiskError as error: + hookenv.action_fail("Failed due to: {}".format(error)) + return + for device in devices: if not is_block_device(device): not_block_devices.append(device) diff --git a/ceph-osd/unit_tests/test_actions_zap_disk.py b/ceph-osd/unit_tests/test_actions_zap_disk.py index 61266e3a..21a6ffcc 100644 --- a/ceph-osd/unit_tests/test_actions_zap_disk.py +++ b/ceph-osd/unit_tests/test_actions_zap_disk.py @@ -35,6 +35,7 @@ def setUp(self): self.kv.return_value = self.kv self.hookenv.local_unit.return_value = "ceph-osd-test/0" + @mock.patch('os.path.exists', mock.MagicMock(return_value=True)) @mock.patch.object(zap_disk, 'zap_disk') def test_authorized_zap_single_disk(self, _zap_disk): @@ -44,6 +45,7 @@ def side_effect(arg): 'devices': '/dev/vdb', 'i-really-mean-it': True, }.get(arg) + self.hookenv.action_get.side_effect = side_effect self.kv.get.return_value = ['/dev/vdb', '/dev/vdz'] zap_disk.zap() @@ -57,6 +59,7 @@ def side_effect(arg): "osd-devices=\"/dev/vdb\"" }) + @mock.patch('os.path.exists', mock.MagicMock(return_value=True)) @mock.patch.object(zap_disk, 'zap_disk') def test_authorized_zap_multiple_disks(self, _zap_disk): @@ -66,6 +69,7 @@ def side_effect(arg): 'devices': '/dev/vdb /dev/vdc', 'i-really-mean-it': True, }.get(arg) + self.hookenv.action_get.side_effect = side_effect self.kv.get.return_value = ['/dev/vdb', '/dev/vdz'] zap_disk.zap() @@ -82,15 +86,17 @@ def side_effect(arg): "osd-devices=\"/dev/vdb /dev/vdc\"" }) + @mock.patch('os.path.exists', mock.MagicMock(return_value=True)) @mock.patch.object(zap_disk, 'zap_disk') def test_wont_zap_non_block_device(self, - _zap_disk,): + _zap_disk): """Will not zap a disk that isn't a block device""" def side_effect(arg): return { 'devices': '/dev/vdb', 'i-really-mean-it': True, }.get(arg) + self.hookenv.action_get.side_effect = side_effect self.is_block_device.return_value = False zap_disk.zap() @@ -98,6 +104,7 @@ def side_effect(arg): self.hookenv.action_fail.assert_called_with( "1 devices are not block devices: /dev/vdb") + @mock.patch('os.path.exists', mock.MagicMock(return_value=True)) @mock.patch.object(zap_disk, 'zap_disk') def test_wont_zap_mounted_block_device(self, _zap_disk): @@ -107,6 +114,7 @@ def side_effect(arg): 'devices': '/dev/vdb', 'i-really-mean-it': True, }.get(arg) + self.hookenv.action_get.side_effect = side_effect self.is_device_mounted.return_value = True zap_disk.zap() @@ -114,6 +122,7 @@ def side_effect(arg): self.hookenv.action_fail.assert_called_with( "1 devices are mounted: /dev/vdb") + @mock.patch('os.path.exists', mock.MagicMock(return_value=True)) @mock.patch.object(zap_disk, 'zap_disk') def test_wont_zap__mounted_bluestore_device(self, _zap_disk): @@ -123,6 +132,7 @@ def side_effect(arg): 'devices': '/dev/vdb', 'i-really-mean-it': True, }.get(arg) + self.hookenv.action_get.side_effect = side_effect self.is_active_bluestore_device.return_value = True zap_disk.zap() @@ -130,6 +140,7 @@ def side_effect(arg): self.hookenv.action_fail.assert_called_with( "1 devices are mounted: /dev/vdb") + @mock.patch('os.path.exists', mock.MagicMock(return_value=True)) @mock.patch.object(zap_disk, 'zap_disk') def test_wont_zap__mapped_luks_device(self, _zap_disk): """Will not zap a disk that has a LUKS header""" @@ -138,6 +149,7 @@ def side_effect(arg): 'devices': '/dev/vdb', 'i-really-mean-it': True, }.get(arg) + self.hookenv.action_get.side_effect = side_effect self.is_active_bluestore_device.return_value = False self.is_mapped_luks_device.return_value = True @@ -146,6 +158,7 @@ def side_effect(arg): self.hookenv.action_fail.assert_called_with( "1 devices are mounted: /dev/vdb") + @mock.patch('os.path.exists', mock.MagicMock(return_value=True)) @mock.patch.object(zap_disk, 'zap_disk') def test_zap_luks_not_mapped(self, _zap_disk): """Will zap disk with extra config set""" @@ -170,3 +183,35 @@ def side_effect(arg): "run-action ceph-osd-test/0 add-disk " "osd-devices=\"/dev/vdb\"" }) + + @mock.patch.object(zap_disk, 'zap_disk') + def test_wont_zap_non_existent_device(self, _zap_disk): + """Won't zap non-existent disk""" + def side_effect(arg): + return { + 'devices': '/dev/not-valid-disk', + 'i-really-mean-it': True, + }.get(arg) + + self.hookenv.action_get.side_effect = side_effect + zap_disk.zap() + _zap_disk.assert_not_called() + self.hookenv.action_fail.assert_called_with( + 'Failed due to: /dev/not-valid-disk: Device does not exist.') + self.hookenv.action_set.assert_not_called() + + @mock.patch.object(zap_disk, 'zap_disk') + def test_wont_zap_not_abs_path(self, _zap_disk): + """Won't zap not absolute path""" + def side_effect(arg): + return { + 'devices': 'not-absolute', + 'i-really-mean-it': True, + }.get(arg) + + self.hookenv.action_get.side_effect = side_effect + zap_disk.zap() + _zap_disk.assert_not_called() + self.hookenv.action_fail.assert_called_with( + 'Failed due to: not-absolute: Not absolute path.') + self.hookenv.action_set.assert_not_called() From eebf5bb60e19f9563eb7b649f6985c64b6de5ce8 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 5 Nov 2020 12:43:25 +0100 Subject: [PATCH 2110/2699] Add Groovy to the test gate Func-Test-Pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/468 Change-Id: Ifaa7b5b3ecd419e3e5c361bfdcfc4a2a0ea0332d --- ceph-fs/src/tests/tests.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index 9a379eaf..df29c443 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -1,5 +1,6 @@ charm_name: ceph-fs gate_bundles: + - bluestore-compression: groovy-victoria - bluestore-compression: focal-victoria - bluestore-compression: focal-ussuri - bluestore-compression: bionic-ussuri @@ -14,8 +15,6 @@ gate_bundles: - xenial-mitaka smoke_bundles: - bluestore-compression: bionic-stein -dev_bundles: - - bluestore-compression: groovy-victoria configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network From aedda23571bba3201fbb487d53f710a3aafe959f Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 5 Nov 2020 12:44:31 +0100 Subject: [PATCH 2111/2699] Fix haproxy not running on fresh deployments Also add Groovy to the test gate and sync static libraries. Change-Id: I04ca79487085d9088811bc0ffd5f0981db50c42e Func-Test-Pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/468 Closes-Bug: #1904411 --- .../hooks/charmhelpers/contrib/network/ip.py | 3 +- .../contrib/openstack/cert_utils.py | 124 +++++++++++++----- .../charmhelpers/contrib/openstack/ip.py | 16 +++ .../openstack/templates/section-placement | 1 + .../charmhelpers/contrib/openstack/utils.py | 23 +++- .../contrib/storage/linux/ceph.py | 31 ++++- .../hooks/charmhelpers/core/decorators.py | 38 ++++++ ceph-radosgw/hooks/charmhelpers/core/host.py | 24 ++-- ceph-radosgw/hooks/hooks.py | 11 +- ceph-radosgw/hooks/utils.py | 8 ++ ceph-radosgw/lib/charms_ceph/broker.py | 2 +- ceph-radosgw/lib/charms_ceph/utils.py | 25 +++- ceph-radosgw/tests/tests.yaml | 4 +- 13 files changed, 254 insertions(+), 56 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index b13277bb..63e91cca 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -396,7 +396,8 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, if global_addrs: # Make sure any found global addresses are not temporary cmd = ['ip', 'addr', 'show', iface] - out = subprocess.check_output(cmd).decode('UTF-8') + out = subprocess.check_output( + cmd).decode('UTF-8', errors='replace') if dynamic_only: key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") else: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py index b494af64..1eb21542 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -34,12 +34,14 @@ WARNING, ) from charmhelpers.contrib.openstack.ip import ( - ADMIN, resolve_address, get_vip_in_network, - INTERNAL, - PUBLIC, - ADDRESS_MAP) + ADDRESS_MAP, + get_default_api_bindings, +) +from charmhelpers.contrib.network.ip import ( + get_relation_ip, +) from charmhelpers.core.host import ( mkdir, @@ -113,44 +115,118 @@ def get_request(self): return req -def get_certificate_request(json_encode=True): - """Generate a certificatee requests based on the network confioguration +def get_certificate_request(json_encode=True, bindings=None): + """Generate a certificate requests based on the network configuration + :param json_encode: Encode request in JSON or not. Used for setting + directly on a relation. + :type json_encode: boolean + :param bindings: List of bindings to check in addition to default api + bindings. + :type bindings: list of strings + :returns: CertRequest request as dictionary or JSON string. + :rtype: Union[dict, json] """ + if bindings: + # Add default API bindings to bindings list + bindings = set(bindings + get_default_api_bindings()) + else: + # Use default API bindings + bindings = get_default_api_bindings() req = CertRequest(json_encode=json_encode) req.add_hostname_cn() # Add os-hostname entries - for net_type in [INTERNAL, ADMIN, PUBLIC]: - net_config = config(ADDRESS_MAP[net_type]['override']) + _sans = get_certificate_sans() + + # Handle specific hostnames per binding + for binding in bindings: + hostname_override = config(ADDRESS_MAP[binding]['override']) try: - net_addr = resolve_address(endpoint_type=net_type) + net_addr = resolve_address(endpoint_type=binding) ip = network_get_primary_address( - ADDRESS_MAP[net_type]['binding']) + ADDRESS_MAP[binding]['binding']) addresses = [net_addr, ip] vip = get_vip_in_network(resolve_network_cidr(ip)) if vip: addresses.append(vip) - if net_config: + # Add hostname certificate request + if hostname_override: req.add_entry( - net_type, - net_config, + binding, + hostname_override, addresses) - else: - # There is network address with no corresponding hostname. - # Add the ip to the hostname cert to allow for this. - req.add_hostname_cn_ip(addresses) + # Remove hostname specific addresses from _sans + for addr in addresses: + try: + _sans.remove(addr) + except (ValueError, KeyError): + pass + except NoNetworkBinding: log("Skipping request for certificate for ip in {} space, no " - "local address found".format(net_type), WARNING) + "local address found".format(binding), WARNING) + # Gurantee all SANs are covered + # These are network addresses with no corresponding hostname. + # Add the ips to the hostname cert to allow for this. + req.add_hostname_cn_ip(_sans) return req.get_request() +def get_certificate_sans(bindings=None): + """Get all possible IP addresses for certificate SANs. + """ + _sans = [unit_get('private-address')] + if bindings: + # Add default API bindings to bindings list + bindings = set(bindings + get_default_api_bindings()) + else: + # Use default API bindings + bindings = get_default_api_bindings() + + for binding in bindings: + # Check for config override + try: + net_config = config(ADDRESS_MAP[binding]['config']) + except KeyError: + # There is no configuration network for this binding name + net_config = None + # Using resolve_address is likely redundant. Keeping it here in + # case there is an edge case it handles. + net_addr = resolve_address(endpoint_type=binding) + ip = get_relation_ip(binding, cidr_network=net_config) + _sans = _sans + [net_addr, ip] + vip = get_vip_in_network(resolve_network_cidr(ip)) + if vip: + _sans.append(vip) + return set(_sans) + + def create_ip_cert_links(ssl_dir, custom_hostname_link=None): """Create symlinks for SAN records :param ssl_dir: str Directory to create symlinks in :param custom_hostname_link: str Additional link to be created """ + + # This includes the hostname cert and any specific bindng certs: + # admin, internal, public + req = get_certificate_request(json_encode=False)["cert_requests"] + # Specific certs + for cert_req in req.keys(): + requested_cert = os.path.join( + ssl_dir, + 'cert_{}'.format(cert_req)) + requested_key = os.path.join( + ssl_dir, + 'key_{}'.format(cert_req)) + for addr in req[cert_req]['sans']: + cert = os.path.join(ssl_dir, 'cert_{}'.format(addr)) + key = os.path.join(ssl_dir, 'key_{}'.format(addr)) + if os.path.isfile(requested_cert) and not os.path.isfile(cert): + os.symlink(requested_cert, cert) + os.symlink(requested_key, key) + + # Handle custom hostnames hostname = get_hostname(unit_get('private-address')) hostname_cert = os.path.join( ssl_dir, @@ -158,18 +234,6 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None): hostname_key = os.path.join( ssl_dir, 'key_{}'.format(hostname)) - # Add links to hostname cert, used if os-hostname vars not set - for net_type in [INTERNAL, ADMIN, PUBLIC]: - try: - addr = resolve_address(endpoint_type=net_type) - cert = os.path.join(ssl_dir, 'cert_{}'.format(addr)) - key = os.path.join(ssl_dir, 'key_{}'.format(addr)) - if os.path.isfile(hostname_cert) and not os.path.isfile(cert): - os.symlink(hostname_cert, cert) - os.symlink(hostname_key, key) - except NoNetworkBinding: - log("Skipping creating cert symlink for ip in {} space, no " - "local address found".format(net_type), WARNING) if custom_hostname_link: custom_cert = os.path.join( ssl_dir, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index 723aebc1..89cf276d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -33,6 +33,7 @@ ADMIN = 'admin' ACCESS = 'access' +# TODO: reconcile 'int' vs 'internal' binding names ADDRESS_MAP = { PUBLIC: { 'binding': 'public', @@ -58,6 +59,14 @@ 'fallback': 'private-address', 'override': 'os-access-hostname', }, + # Note (thedac) bridge to begin the reconciliation between 'int' vs + # 'internal' binding names + 'internal': { + 'binding': 'internal', + 'config': 'os-internal-network', + 'fallback': 'private-address', + 'override': 'os-internal-hostname', + }, } @@ -195,3 +204,10 @@ def get_vip_in_network(network): if is_address_in_network(network, vip): matching_vip = vip return matching_vip + + +def get_default_api_bindings(): + _default_bindings = [] + for binding in [INTERNAL, ADMIN, PUBLIC]: + _default_bindings.append(ADDRESS_MAP[binding]['binding']) + return _default_bindings diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-placement b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-placement index 97724bdb..8c224ec9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-placement +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-placement @@ -15,5 +15,6 @@ password = {{ admin_password }} {% endif -%} {% if region -%} os_region_name = {{ region }} +region_name = {{ region }} {% endif -%} randomize_allocation_candidates = true diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 0aa797c4..f4c76214 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -18,6 +18,7 @@ import subprocess import json +import operator import os import sys import re @@ -33,7 +34,7 @@ from charmhelpers.contrib.network import ip -from charmhelpers.core import unitdata +from charmhelpers.core import decorators, unitdata from charmhelpers.core.hookenv import ( WORKLOAD_STATES, @@ -230,7 +231,7 @@ ('ussuri', ['2.24.0', '2.25.0']), ('victoria', - ['2.25.0']), + ['2.25.0', '2.26.0']), ]) # >= Liberty version->codename mapping @@ -1295,7 +1296,7 @@ def _check_listening_on_ports_list(ports): Returns a list of ports being listened to and a list of the booleans. - @param ports: LIST or port numbers. + @param ports: LIST of port numbers. @returns [(port_num, boolean), ...], [boolean] """ ports_open = [port_has_listener('0.0.0.0', p) for p in ports] @@ -1564,6 +1565,21 @@ def manage_payload_services(action, services=None, charm_func=None): return success, messages +def make_wait_for_ports_barrier(ports, retry_count=5): + """Make a function to wait for port shutdowns. + + Create a function which closes over the provided ports. The function will + retry probing ports until they are closed or the retry count has been reached. + + """ + @decorators.retry_on_predicate(retry_count, operator.not_, base_delay=0.1) + def retry_port_check(): + _, ports_states = _check_listening_on_ports_list(ports) + juju_log("Probe ports {}, result: {}".format(ports, ports_states), level="DEBUG") + return any(ports_states) + return retry_port_check + + def pause_unit(assess_status_func, services=None, ports=None, charm_func=None): """Pause a unit by stopping the services and setting 'unit-paused' @@ -1599,6 +1615,7 @@ def pause_unit(assess_status_func, services=None, ports=None, services=services, charm_func=charm_func) set_unit_paused() + if assess_status_func: message = assess_status_func() if message: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 738d734d..d1c61754 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -41,6 +41,7 @@ ) from charmhelpers import deprecate from charmhelpers.core.hookenv import ( + application_name, config, service_name, local_unit, @@ -162,6 +163,17 @@ def get_osd_settings(relation_name): return _order_dict_by_key(osd_settings) +def send_application_name(relid=None): + """Send the application name down the relation. + + :param relid: Relation id to set application name in. + :type relid: str + """ + relation_set( + relation_id=relid, + relation_settings={'application-name': application_name()}) + + def send_osd_settings(): """Pass on requested OSD settings to osd units.""" try: @@ -256,6 +268,7 @@ class BasePool(object): 'compression-max-blob-size': (int, None), 'compression-max-blob-size-hdd': (int, None), 'compression-max-blob-size-ssd': (int, None), + 'rbd-mirroring-mode': (str, ('image', 'pool')) } def __init__(self, service, name=None, percent_data=None, app_name=None, @@ -1074,7 +1087,10 @@ def create_erasure_profile(service, profile_name, erasure_plugin_technique=None): """Create a new erasure code profile if one does not already exist for it. - Updates the profile if it exists. Please refer to [0] for more details. + Profiles are considered immutable so will not be updated if the named + profile already exists. + + Please refer to [0] for more details. 0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ @@ -1110,6 +1126,11 @@ def create_erasure_profile(service, profile_name, :type erasure_plugin_technique: str :return: None. Can raise CalledProcessError, ValueError or AssertionError """ + if erasure_profile_exists(service, profile_name): + log('EC profile {} exists, skipping update'.format(profile_name), + level=WARNING) + return + plugin_techniques = { 'jerasure': [ 'reed_sol_van', @@ -1209,9 +1230,6 @@ def create_erasure_profile(service, profile_name, if scalar_mds: cmd.append('scalar-mds={}'.format(scalar_mds)) - if erasure_profile_exists(service, profile_name): - cmd.append('--force') - check_call(cmd) @@ -1750,6 +1768,7 @@ def _partial_build_common_op_create(self, max_bytes=None, max_objects=None, namespace=None, + rbd_mirroring_mode='pool', weight=None): """Build common part of a create pool operation. @@ -1808,6 +1827,9 @@ def _partial_build_common_op_create(self, :type max_objects: Optional[int] :param namespace: Group namespace :type namespace: Optional[str] + :param rbd_mirroring_mode: Pool mirroring mode used when Ceph RBD + mirroring is enabled. + :type rbd_mirroring_mode: Optional[str] :param weight: The percentage of data that is expected to be contained in the pool from the total available space on the OSDs. Used to calculate number of Placement Groups to create @@ -1832,6 +1854,7 @@ def _partial_build_common_op_create(self, 'max-bytes': max_bytes, 'max-objects': max_objects, 'group-namespace': namespace, + 'rbd-mirroring-mode': rbd_mirroring_mode, 'weight': weight, } diff --git a/ceph-radosgw/hooks/charmhelpers/core/decorators.py b/ceph-radosgw/hooks/charmhelpers/core/decorators.py index 6ad41ee4..e7e95d17 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/decorators.py +++ b/ceph-radosgw/hooks/charmhelpers/core/decorators.py @@ -53,3 +53,41 @@ def _retry_on_exception_inner_2(*args, **kwargs): return _retry_on_exception_inner_2 return _retry_on_exception_inner_1 + + +def retry_on_predicate(num_retries, predicate_fun, base_delay=0): + """Retry based on return value + + The return value of the decorated function is passed to the given predicate_fun. If the + result of the predicate is False, retry the decorated function up to num_retries times + + An exponential backoff up to base_delay^num_retries seconds can be introduced by setting + base_delay to a nonzero value. The default is to run with a zero (i.e. no) delay + + :param num_retries: Max. number of retries to perform + :type num_retries: int + :param predicate_fun: Predicate function to determine if a retry is necessary + :type predicate_fun: callable + :param base_delay: Starting value in seconds for exponential delay, defaults to 0 (no delay) + :type base_delay: float + """ + def _retry_on_pred_inner_1(f): + def _retry_on_pred_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + delay = base_delay + while True: + result = f(*args, **kwargs) + if predicate_fun(result) or retries <= 0: + return result + delay *= multiplier + multiplier += 1 + log("Result {}, retrying '{}' {} more times (delay={})".format( + result, f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_pred_inner_2 + + return _retry_on_pred_inner_1 diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index a785efdf..697a5f4b 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -19,6 +19,7 @@ # Nick Moffitt # Matthew Wedgwood +import errno import os import re import pwd @@ -677,7 +678,7 @@ def check_hash(path, checksum, hash_type='md5'): :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, + Can be any hash algorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. :raises ChecksumError: If the file fails the checksum @@ -825,7 +826,8 @@ def list_nics(nic_type=None): if nic_type: for int_type in int_types: cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = subprocess.check_output( + cmd).decode('UTF-8', errors='replace') ip_output = ip_output.split('\n') ip_output = (line for line in ip_output if line) for line in ip_output: @@ -841,7 +843,8 @@ def list_nics(nic_type=None): interfaces.append(iface) else: cmd = ['ip', 'a'] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = subprocess.check_output( + cmd).decode('UTF-8', errors='replace').split('\n') ip_output = (line.strip() for line in ip_output if line) key = re.compile(r'^[0-9]+:\s+(.+):') @@ -865,7 +868,8 @@ def set_nic_mtu(nic, mtu): def get_nic_mtu(nic): """Return the Maximum Transmission Unit (MTU) for a network interface.""" cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = subprocess.check_output( + cmd).decode('UTF-8', errors='replace').split('\n') mtu = "" for line in ip_output: words = line.split() @@ -877,7 +881,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): """Return the Media Access Control (MAC) for a network interface.""" cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = subprocess.check_output(cmd).decode('UTF-8', errors='replace') hwaddr = "" words = ip_output.split() if 'link/ether' in words: @@ -889,7 +893,7 @@ def get_nic_hwaddr(nic): def chdir(directory): """Change the current working directory to a different directory for a code block and return the previous directory after the block exits. Useful to - run commands from a specificed directory. + run commands from a specified directory. :param str directory: The directory path to change to for this context. """ @@ -924,9 +928,13 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): for root, dirs, files in os.walk(path, followlinks=follow_links): for name in dirs + files: full = os.path.join(root, name) - broken_symlink = os.path.lexists(full) and not os.path.exists(full) - if not broken_symlink: + try: chown(full, uid, gid) + except (IOError, OSError) as e: + # Intended to ignore "file not found". Catching both to be + # compatible with both Python 2.7 and 3.x. + if e.errno == errno.ENOENT: + pass def lchownr(path, owner, group): diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 71beace3..3e94a27f 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -156,14 +156,21 @@ def install_packages(): ) if pkgs: status_set('maintenance', 'Installing radosgw packages') + if ('apache2' in pkgs): + # NOTE(lourot): Apache's default config makes it listen on port 80, + # which will prevent HAProxy from listening on that same port. We + # use Apache in this setup however for SSL (different port). We + # need to let Apache free port 80 before we can install HAProxy + # otherwise HAProxy will crash. See lp:1904411 + log('Installing Apache') + apt_install(['apache2'], fatal=True) + disable_unused_apache_sites() apt_install(pkgs, fatal=True) pkgs = filter_missing_packages(APACHE_PACKAGES) if pkgs: apt_purge(pkgs) - disable_unused_apache_sites() - @hooks.hook('install.real') @harden() diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index c6a6c9b1..f05d190b 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -28,6 +28,7 @@ application_version_set, config, leader_get, + log, ) from charmhelpers.contrib.openstack import ( context, @@ -47,6 +48,8 @@ lsb_release, CompareHostReleases, init_is_systemd, + service, + service_running, ) from charmhelpers.fetch import ( apt_cache, @@ -349,6 +352,7 @@ def disable_unused_apache_sites(): """Ensure that unused apache configurations are disabled to prevent them from conflicting with the charm-provided version. """ + log('Disabling unused Apache sites') for apache_site_file in UNUSED_APACHE_SITE_FILES: apache_site = apache_site_file.split('/')[-1].split('.')[0] if os.path.exists(apache_site_file): @@ -362,6 +366,10 @@ def disable_unused_apache_sites(): with open(APACHE_PORTS_FILE, 'w') as ports: ports.write("") + if service_running('apache2'): + log('Restarting Apache') + service('restart', 'apache2') + def systemd_based_radosgw(): """Determine if install should use systemd based radosgw instances""" diff --git a/ceph-radosgw/lib/charms_ceph/broker.py b/ceph-radosgw/lib/charms_ceph/broker.py index 25427697..d00baedc 100644 --- a/ceph-radosgw/lib/charms_ceph/broker.py +++ b/ceph-radosgw/lib/charms_ceph/broker.py @@ -750,7 +750,7 @@ def handle_create_cephfs(request, service): """ cephfs_name = request.get('mds_name') data_pool = request.get('data_pool') - extra_pools = request.get('extra_pools', []) + extra_pools = request.get('extra_pools', None) or [] metadata_pool = request.get('metadata_pool') # Check if the user params were provided if not cephfs_name or not data_pool or not metadata_pool: diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index 9da4dc12..52d380b4 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -2141,6 +2141,8 @@ def roll_monitor_cluster(new_version, upgrade_key): # A sorted list of osd unit names mon_sorted_list = sorted(monitor_list) + # Install packages immediately but defer restarts to when it's our time. + upgrade_monitor(new_version, restart_daemons=False) try: position = mon_sorted_list.index(my_name) log("upgrade position: {}".format(position)) @@ -2182,7 +2184,7 @@ def noop(): pass -def upgrade_monitor(new_version, kick_function=None): +def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): """Upgrade the current ceph monitor to the new version :param new_version: String version to upgrade to. @@ -2207,6 +2209,22 @@ def upgrade_monitor(new_version, kick_function=None): status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) kick_function() + + try: + apt_install(packages=determine_packages(), fatal=True) + rm_packages = determine_packages_to_remove() + if rm_packages: + apt_purge(packages=rm_packages, fatal=True) + except subprocess.CalledProcessError as err: + log("Upgrading packages failed " + "with message: {}".format(err)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + if not restart_daemons: + log("Packages upgraded but not restarting daemons yet.") + return + try: if systemd(): service_stop('ceph-mon') @@ -2216,10 +2234,7 @@ def upgrade_monitor(new_version, kick_function=None): service_stop('ceph-mgr.target') else: service_stop('ceph-mon-all') - apt_install(packages=determine_packages(), fatal=True) - rm_packages = determine_packages_to_remove() - if rm_packages: - apt_purge(packages=rm_packages, fatal=True) + kick_function() owner = ceph_user() diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 36526e8d..3c9e4c14 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,5 +1,7 @@ charm_name: ceph-radosgw gate_bundles: + - vault: groovy-victoria + - vault: groovy-victoria-namespaced - vault: focal-victoria - vault: focal-victoria-namespaced - vault: focal-ussuri-ec @@ -24,8 +26,6 @@ gate_bundles: smoke_bundles: - vault: bionic-ussuri dev_bundles: - - vault: groovy-victoria - - vault: groovy-victoria-namespaced - bionic-queens-multisite - bionic-rocky-multisite target_deploy_status: From 305332510c3000bd81fdf28a93e2d47354dc12fd Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 5 Nov 2020 12:44:16 +0100 Subject: [PATCH 2112/2699] Add Groovy to the test gate Also sync libraries Change-Id: I60d6b713c152c14b5af37b5c87308c72408801e3 --- .../charmhelpers/contrib/hahelpers/apache.py | 6 ++- ceph-proxy/charmhelpers/contrib/network/ip.py | 3 +- .../charmhelpers/contrib/openstack/ip.py | 16 ++++++++ .../charmhelpers/contrib/openstack/utils.py | 23 +++++++++-- .../contrib/storage/linux/ceph.py | 19 ++++++++++ ceph-proxy/charmhelpers/core/decorators.py | 38 +++++++++++++++++++ ceph-proxy/charmhelpers/core/host.py | 27 ++++++++----- .../charmhelpers/core/host_factory/ubuntu.py | 3 +- ceph-proxy/lib/charms_ceph/broker.py | 2 +- ceph-proxy/lib/charms_ceph/utils.py | 25 +++++++++--- ceph-proxy/tests/tests.yaml | 4 +- 11 files changed, 143 insertions(+), 23 deletions(-) diff --git a/ceph-proxy/charmhelpers/contrib/hahelpers/apache.py b/ceph-proxy/charmhelpers/contrib/hahelpers/apache.py index 2c1e371e..a54702bc 100644 --- a/ceph-proxy/charmhelpers/contrib/hahelpers/apache.py +++ b/ceph-proxy/charmhelpers/contrib/hahelpers/apache.py @@ -34,6 +34,10 @@ INFO, ) +# This file contains the CA cert from the charms ssl_ca configuration +# option, in future the file name should be updated reflect that. +CONFIG_CA_CERT_FILE = 'keystone_juju_ca_cert' + def get_cert(cn=None): # TODO: deal with multiple https endpoints via charm config @@ -83,4 +87,4 @@ def retrieve_ca_cert(cert_file): def install_ca_cert(ca_cert): - host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert') + host.install_ca_cert(ca_cert, CONFIG_CA_CERT_FILE) diff --git a/ceph-proxy/charmhelpers/contrib/network/ip.py b/ceph-proxy/charmhelpers/contrib/network/ip.py index b13277bb..63e91cca 100644 --- a/ceph-proxy/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/charmhelpers/contrib/network/ip.py @@ -396,7 +396,8 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, if global_addrs: # Make sure any found global addresses are not temporary cmd = ['ip', 'addr', 'show', iface] - out = subprocess.check_output(cmd).decode('UTF-8') + out = subprocess.check_output( + cmd).decode('UTF-8', errors='replace') if dynamic_only: key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") else: diff --git a/ceph-proxy/charmhelpers/contrib/openstack/ip.py b/ceph-proxy/charmhelpers/contrib/openstack/ip.py index 723aebc1..89cf276d 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/ip.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/ip.py @@ -33,6 +33,7 @@ ADMIN = 'admin' ACCESS = 'access' +# TODO: reconcile 'int' vs 'internal' binding names ADDRESS_MAP = { PUBLIC: { 'binding': 'public', @@ -58,6 +59,14 @@ 'fallback': 'private-address', 'override': 'os-access-hostname', }, + # Note (thedac) bridge to begin the reconciliation between 'int' vs + # 'internal' binding names + 'internal': { + 'binding': 'internal', + 'config': 'os-internal-network', + 'fallback': 'private-address', + 'override': 'os-internal-hostname', + }, } @@ -195,3 +204,10 @@ def get_vip_in_network(network): if is_address_in_network(network, vip): matching_vip = vip return matching_vip + + +def get_default_api_bindings(): + _default_bindings = [] + for binding in [INTERNAL, ADMIN, PUBLIC]: + _default_bindings.append(ADDRESS_MAP[binding]['binding']) + return _default_bindings diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index 0aa797c4..f4c76214 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -18,6 +18,7 @@ import subprocess import json +import operator import os import sys import re @@ -33,7 +34,7 @@ from charmhelpers.contrib.network import ip -from charmhelpers.core import unitdata +from charmhelpers.core import decorators, unitdata from charmhelpers.core.hookenv import ( WORKLOAD_STATES, @@ -230,7 +231,7 @@ ('ussuri', ['2.24.0', '2.25.0']), ('victoria', - ['2.25.0']), + ['2.25.0', '2.26.0']), ]) # >= Liberty version->codename mapping @@ -1295,7 +1296,7 @@ def _check_listening_on_ports_list(ports): Returns a list of ports being listened to and a list of the booleans. - @param ports: LIST or port numbers. + @param ports: LIST of port numbers. @returns [(port_num, boolean), ...], [boolean] """ ports_open = [port_has_listener('0.0.0.0', p) for p in ports] @@ -1564,6 +1565,21 @@ def manage_payload_services(action, services=None, charm_func=None): return success, messages +def make_wait_for_ports_barrier(ports, retry_count=5): + """Make a function to wait for port shutdowns. + + Create a function which closes over the provided ports. The function will + retry probing ports until they are closed or the retry count has been reached. + + """ + @decorators.retry_on_predicate(retry_count, operator.not_, base_delay=0.1) + def retry_port_check(): + _, ports_states = _check_listening_on_ports_list(ports) + juju_log("Probe ports {}, result: {}".format(ports, ports_states), level="DEBUG") + return any(ports_states) + return retry_port_check + + def pause_unit(assess_status_func, services=None, ports=None, charm_func=None): """Pause a unit by stopping the services and setting 'unit-paused' @@ -1599,6 +1615,7 @@ def pause_unit(assess_status_func, services=None, ports=None, services=services, charm_func=charm_func) set_unit_paused() + if assess_status_func: message = assess_status_func() if message: diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py index 7882e2ce..d1c61754 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py @@ -41,6 +41,7 @@ ) from charmhelpers import deprecate from charmhelpers.core.hookenv import ( + application_name, config, service_name, local_unit, @@ -162,6 +163,17 @@ def get_osd_settings(relation_name): return _order_dict_by_key(osd_settings) +def send_application_name(relid=None): + """Send the application name down the relation. + + :param relid: Relation id to set application name in. + :type relid: str + """ + relation_set( + relation_id=relid, + relation_settings={'application-name': application_name()}) + + def send_osd_settings(): """Pass on requested OSD settings to osd units.""" try: @@ -256,6 +268,7 @@ class BasePool(object): 'compression-max-blob-size': (int, None), 'compression-max-blob-size-hdd': (int, None), 'compression-max-blob-size-ssd': (int, None), + 'rbd-mirroring-mode': (str, ('image', 'pool')) } def __init__(self, service, name=None, percent_data=None, app_name=None, @@ -1755,6 +1768,7 @@ def _partial_build_common_op_create(self, max_bytes=None, max_objects=None, namespace=None, + rbd_mirroring_mode='pool', weight=None): """Build common part of a create pool operation. @@ -1813,6 +1827,9 @@ def _partial_build_common_op_create(self, :type max_objects: Optional[int] :param namespace: Group namespace :type namespace: Optional[str] + :param rbd_mirroring_mode: Pool mirroring mode used when Ceph RBD + mirroring is enabled. + :type rbd_mirroring_mode: Optional[str] :param weight: The percentage of data that is expected to be contained in the pool from the total available space on the OSDs. Used to calculate number of Placement Groups to create @@ -1837,6 +1854,7 @@ def _partial_build_common_op_create(self, 'max-bytes': max_bytes, 'max-objects': max_objects, 'group-namespace': namespace, + 'rbd-mirroring-mode': rbd_mirroring_mode, 'weight': weight, } @@ -2203,6 +2221,7 @@ def send_request_if_needed(request, relation='ceph'): for rid in relation_ids(relation): log('Sending request {}'.format(request.request_id), level=DEBUG) relation_set(relation_id=rid, broker_req=request.request) + relation_set(relation_id=rid, relation_settings={'unit-name': local_unit()}) def has_broker_rsp(rid=None, unit=None): diff --git a/ceph-proxy/charmhelpers/core/decorators.py b/ceph-proxy/charmhelpers/core/decorators.py index 6ad41ee4..e7e95d17 100644 --- a/ceph-proxy/charmhelpers/core/decorators.py +++ b/ceph-proxy/charmhelpers/core/decorators.py @@ -53,3 +53,41 @@ def _retry_on_exception_inner_2(*args, **kwargs): return _retry_on_exception_inner_2 return _retry_on_exception_inner_1 + + +def retry_on_predicate(num_retries, predicate_fun, base_delay=0): + """Retry based on return value + + The return value of the decorated function is passed to the given predicate_fun. If the + result of the predicate is False, retry the decorated function up to num_retries times + + An exponential backoff up to base_delay^num_retries seconds can be introduced by setting + base_delay to a nonzero value. The default is to run with a zero (i.e. no) delay + + :param num_retries: Max. number of retries to perform + :type num_retries: int + :param predicate_fun: Predicate function to determine if a retry is necessary + :type predicate_fun: callable + :param base_delay: Starting value in seconds for exponential delay, defaults to 0 (no delay) + :type base_delay: float + """ + def _retry_on_pred_inner_1(f): + def _retry_on_pred_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + delay = base_delay + while True: + result = f(*args, **kwargs) + if predicate_fun(result) or retries <= 0: + return result + delay *= multiplier + multiplier += 1 + log("Result {}, retrying '{}' {} more times (delay={})".format( + result, f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_pred_inner_2 + + return _retry_on_pred_inner_1 diff --git a/ceph-proxy/charmhelpers/core/host.py b/ceph-proxy/charmhelpers/core/host.py index a785efdf..f826f6fe 100644 --- a/ceph-proxy/charmhelpers/core/host.py +++ b/ceph-proxy/charmhelpers/core/host.py @@ -19,6 +19,7 @@ # Nick Moffitt # Matthew Wedgwood +import errno import os import re import pwd @@ -59,6 +60,7 @@ ) # flake8: noqa -- ignore F401 for this import UPDATEDB_PATH = '/etc/updatedb.conf' +CA_CERT_DIR = '/usr/local/share/ca-certificates' def service_start(service_name, **kwargs): @@ -677,7 +679,7 @@ def check_hash(path, checksum, hash_type='md5'): :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, + Can be any hash algorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. :raises ChecksumError: If the file fails the checksum @@ -825,7 +827,8 @@ def list_nics(nic_type=None): if nic_type: for int_type in int_types: cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = subprocess.check_output( + cmd).decode('UTF-8', errors='replace') ip_output = ip_output.split('\n') ip_output = (line for line in ip_output if line) for line in ip_output: @@ -841,7 +844,8 @@ def list_nics(nic_type=None): interfaces.append(iface) else: cmd = ['ip', 'a'] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = subprocess.check_output( + cmd).decode('UTF-8', errors='replace').split('\n') ip_output = (line.strip() for line in ip_output if line) key = re.compile(r'^[0-9]+:\s+(.+):') @@ -865,7 +869,8 @@ def set_nic_mtu(nic, mtu): def get_nic_mtu(nic): """Return the Maximum Transmission Unit (MTU) for a network interface.""" cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = subprocess.check_output( + cmd).decode('UTF-8', errors='replace').split('\n') mtu = "" for line in ip_output: words = line.split() @@ -877,7 +882,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): """Return the Media Access Control (MAC) for a network interface.""" cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = subprocess.check_output(cmd).decode('UTF-8', errors='replace') hwaddr = "" words = ip_output.split() if 'link/ether' in words: @@ -889,7 +894,7 @@ def get_nic_hwaddr(nic): def chdir(directory): """Change the current working directory to a different directory for a code block and return the previous directory after the block exits. Useful to - run commands from a specificed directory. + run commands from a specified directory. :param str directory: The directory path to change to for this context. """ @@ -924,9 +929,13 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): for root, dirs, files in os.walk(path, followlinks=follow_links): for name in dirs + files: full = os.path.join(root, name) - broken_symlink = os.path.lexists(full) and not os.path.exists(full) - if not broken_symlink: + try: chown(full, uid, gid) + except (IOError, OSError) as e: + # Intended to ignore "file not found". Catching both to be + # compatible with both Python 2.7 and 3.x. + if e.errno == errno.ENOENT: + pass def lchownr(path, owner, group): @@ -1074,7 +1083,7 @@ def install_ca_cert(ca_cert, name=None): ca_cert = ca_cert.encode('utf8') if not name: name = 'juju-{}'.format(charm_name()) - cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) + cert_file = '{}/{}.crt'.format(CA_CERT_DIR, name) new_hash = hashlib.md5(ca_cert).hexdigest() if file_hash(cert_file) == new_hash: return diff --git a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py index 3edc0687..a3ec6947 100644 --- a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py @@ -25,7 +25,8 @@ 'cosmic', 'disco', 'eoan', - 'focal' + 'focal', + 'groovy' ) diff --git a/ceph-proxy/lib/charms_ceph/broker.py b/ceph-proxy/lib/charms_ceph/broker.py index 25427697..d00baedc 100644 --- a/ceph-proxy/lib/charms_ceph/broker.py +++ b/ceph-proxy/lib/charms_ceph/broker.py @@ -750,7 +750,7 @@ def handle_create_cephfs(request, service): """ cephfs_name = request.get('mds_name') data_pool = request.get('data_pool') - extra_pools = request.get('extra_pools', []) + extra_pools = request.get('extra_pools', None) or [] metadata_pool = request.get('metadata_pool') # Check if the user params were provided if not cephfs_name or not data_pool or not metadata_pool: diff --git a/ceph-proxy/lib/charms_ceph/utils.py b/ceph-proxy/lib/charms_ceph/utils.py index 9da4dc12..52d380b4 100644 --- a/ceph-proxy/lib/charms_ceph/utils.py +++ b/ceph-proxy/lib/charms_ceph/utils.py @@ -2141,6 +2141,8 @@ def roll_monitor_cluster(new_version, upgrade_key): # A sorted list of osd unit names mon_sorted_list = sorted(monitor_list) + # Install packages immediately but defer restarts to when it's our time. + upgrade_monitor(new_version, restart_daemons=False) try: position = mon_sorted_list.index(my_name) log("upgrade position: {}".format(position)) @@ -2182,7 +2184,7 @@ def noop(): pass -def upgrade_monitor(new_version, kick_function=None): +def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): """Upgrade the current ceph monitor to the new version :param new_version: String version to upgrade to. @@ -2207,6 +2209,22 @@ def upgrade_monitor(new_version, kick_function=None): status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) kick_function() + + try: + apt_install(packages=determine_packages(), fatal=True) + rm_packages = determine_packages_to_remove() + if rm_packages: + apt_purge(packages=rm_packages, fatal=True) + except subprocess.CalledProcessError as err: + log("Upgrading packages failed " + "with message: {}".format(err)) + status_set("blocked", "Upgrade to {} failed".format(new_version)) + sys.exit(1) + + if not restart_daemons: + log("Packages upgraded but not restarting daemons yet.") + return + try: if systemd(): service_stop('ceph-mon') @@ -2216,10 +2234,7 @@ def upgrade_monitor(new_version, kick_function=None): service_stop('ceph-mgr.target') else: service_stop('ceph-mon-all') - apt_install(packages=determine_packages(), fatal=True) - rm_packages = determine_packages_to_remove() - if rm_packages: - apt_purge(packages=rm_packages, fatal=True) + kick_function() owner = ceph_user() diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 03536b99..97ff13d4 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -24,6 +24,8 @@ gate_bundles: - erasure-coded: focal-ussuri-ec - focal-victoria - erasure-coded: focal-victoria-ec + - groovy-victoria + - erasure-coded: groovy-victoria-ec dev_bundles: # Icehouse @@ -32,8 +34,6 @@ dev_bundles: - xenial-ocata # Pike - xenial-pike - - groovy-victoria - - erasure-coded: groovy-victoria-ec smoke_bundles: - focal-ussuri From 4774c9c5117778cc211e1f4fdca944f4cbc2ccb2 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 10 Dec 2020 22:15:41 +0100 Subject: [PATCH 2113/2699] Sync release-tools Sync release-tools boilerplate containing these changes in order to pin pip < 20.3: https://github.com/openstack-charmers/release-tools/pull/125 https://github.com/openstack-charmers/release-tools/pull/126 Change-Id: Ide8ec614917d57bd89600686b80d1f10ebbb1cd2 --- ceph-fs/requirements.txt | 19 +++++++++++++++++-- ceph-fs/src/test-requirements.txt | 7 +++++++ ceph-fs/src/tox.ini | 13 ++++++++++++- ceph-fs/test-requirements.txt | 23 ++++++++++++++++++++++- 4 files changed, 58 insertions(+), 4 deletions(-) diff --git a/ceph-fs/requirements.txt b/ceph-fs/requirements.txt index aaaa3e03..a4d3eff0 100644 --- a/ceph-fs/requirements.txt +++ b/ceph-fs/requirements.txt @@ -3,9 +3,24 @@ # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools # +# NOTE(lourot): This might look like a duplication of test-requirements.txt but +# some tox targets use only test-requirements.txt whereas charm-build uses only +# requirements.txt setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 + # Build requirements charm-tools>=2.4.4 -# importlib-resources 1.1.0 removed Python 3.5 support -importlib-resources<1.1.0 + +# Workaround until https://github.com/juju/charm-tools/pull/589 gets +# published +keyring<21 + simplejson + +# Newer versions use keywords that didn't exist in python 3.5 yet (e.g. +# "ModuleNotFoundError") +# NOTE(lourot): This might look like a duplication of test-requirements.txt but +# some tox targets use only test-requirements.txt whereas charm-build uses only +# requirements.txt +importlib-metadata<3.0.0; python_version < '3.6' +importlib-resources<3.0.0; python_version < '3.6' diff --git a/ceph-fs/src/test-requirements.txt b/ceph-fs/src/test-requirements.txt index d3c9be84..520681e1 100644 --- a/ceph-fs/src/test-requirements.txt +++ b/ceph-fs/src/test-requirements.txt @@ -3,6 +3,13 @@ # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools # +# pep8 requirements +charm-tools>=2.4.4 + +# Workaround until https://github.com/juju/charm-tools/pull/589 gets +# published +keyring<21 + # Functional Test Requirements (let Zaza's dependencies solve all dependencies here!) git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index 07a7adcb..e7630475 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -11,6 +11,18 @@ skipsdist = True sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +requires = pip < 20.3 + virtualenv < 20.0 +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.2.0 [testenv] setenv = VIRTUAL_ENV={envdir} @@ -23,7 +35,6 @@ install_command = [testenv:pep8] basepython = python3 -deps=charm-tools commands = charm-proof [testenv:func-noop] diff --git a/ceph-fs/test-requirements.txt b/ceph-fs/test-requirements.txt index d078e270..8ab24b2e 100644 --- a/ceph-fs/test-requirements.txt +++ b/ceph-fs/test-requirements.txt @@ -6,10 +6,31 @@ setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 # Lint and unit test requirements flake8>=2.2.4 + stestr>=2.2.0 + +# Dependency of stestr. Workaround for +# https://github.com/mtreinish/stestr/issues/145 +cliff<3.0.0 + +# Dependencies of stestr. Newer versions use keywords that didn't exist in +# python 3.5 yet (e.g. "ModuleNotFoundError") +importlib-metadata<3.0.0; python_version < '3.6' +importlib-resources<3.0.0; python_version < '3.6' + +# Some Zuul nodes sometimes pull newer versions of these dependencies which +# dropped support for python 3.5: +osprofiler<2.7.0;python_version<'3.6' +stevedore<1.31.0;python_version<'3.6' + requests>=2.18.4 charms.reactive -mock>=1.2 + +# Newer mock seems to have some syntax which is newer than python3.5 (e.g. +# f'{something}' +mock>=1.2,<4.0.0; python_version < '3.6' +mock>=1.2; python_version >= '3.6' + nose>=1.3.7 coverage>=3.6 git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack From 43d122df2799a4e916f08c02b8d9a24453c27e82 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 10 Dec 2020 22:15:58 +0100 Subject: [PATCH 2114/2699] Sync release-tools Sync release-tools boilerplate containing these changes in order to pin pip < 20.3: https://github.com/openstack-charmers/release-tools/pull/125 https://github.com/openstack-charmers/release-tools/pull/126 Change-Id: I9f52fe90d44a95446d8a005d6cb1c98ea73a70e0 --- ceph-mon/requirements.txt | 6 ++++-- ceph-mon/test-requirements.txt | 33 ++++++++++++++++++++++++++++++++- ceph-mon/tox.ini | 12 ++++++++++++ 3 files changed, 48 insertions(+), 3 deletions(-) diff --git a/ceph-mon/requirements.txt b/ceph-mon/requirements.txt index 8ba19415..360ecbaa 100644 --- a/ceph-mon/requirements.txt +++ b/ceph-mon/requirements.txt @@ -7,11 +7,13 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # -setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 pbr>=1.8.0,<1.9.0 simplejson>=2.2.0 netifaces>=0.10.4 -netaddr>=0.7.12,!=0.7.16 + +# Strange import error with newer netaddr: +netaddr>0.7.16,<0.8.0 + Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 56fbf922..1aa96356 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -9,11 +9,42 @@ # setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 charm-tools>=2.4.4 + +# Workaround until https://github.com/juju/charm-tools/pull/589 gets +# published +keyring<21 + requests>=2.18.4 -mock>=1.2 + +# Newer mock seems to have some syntax which is newer than python3.5 (e.g. +# f'{something}' +mock>=1.2,<4.0.0; python_version < '3.6' +mock>=1.2; python_version >= '3.6' + flake8>=2.2.4 stestr>=2.2.0 + +# Dependency of stestr. Workaround for +# https://github.com/mtreinish/stestr/issues/145 +cliff<3.0.0 + +# Dependencies of stestr. Newer versions use keywords that didn't exist in +# python 3.5 yet (e.g. "ModuleNotFoundError") +importlib-metadata<3.0.0; python_version < '3.6' +importlib-resources<3.0.0; python_version < '3.6' + +# Some Zuul nodes sometimes pull newer versions of these dependencies which +# dropped support for python 3.5: +osprofiler<2.7.0;python_version<'3.6' +stevedore<1.31.0;python_version<'3.6' + coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack + +# Needed for charm-glance: +git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' +tempest<24.0.0;python_version<'3.6' + +croniter # needed for charm-rabbitmq-server unit tests diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index e2d58f59..ab9593f3 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -14,6 +14,18 @@ skipsdist = True sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +requires = pip < 20.3 + virtualenv < 20.0 +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.2.0 [testenv] setenv = VIRTUAL_ENV={envdir} From caef543d8b09097c51af44d3a8dc543028a1c70d Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 10 Dec 2020 22:16:15 +0100 Subject: [PATCH 2115/2699] Sync release-tools Sync release-tools boilerplate containing these changes in order to pin pip < 20.3: https://github.com/openstack-charmers/release-tools/pull/125 https://github.com/openstack-charmers/release-tools/pull/126 Change-Id: I750963a14adbe775f723c973db023456f193f752 --- ceph-osd/requirements.txt | 6 ++++-- ceph-osd/test-requirements.txt | 33 ++++++++++++++++++++++++++++++++- ceph-osd/tox.ini | 12 ++++++++++++ 3 files changed, 48 insertions(+), 3 deletions(-) diff --git a/ceph-osd/requirements.txt b/ceph-osd/requirements.txt index 8ba19415..360ecbaa 100644 --- a/ceph-osd/requirements.txt +++ b/ceph-osd/requirements.txt @@ -7,11 +7,13 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # -setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 pbr>=1.8.0,<1.9.0 simplejson>=2.2.0 netifaces>=0.10.4 -netaddr>=0.7.12,!=0.7.16 + +# Strange import error with newer netaddr: +netaddr>0.7.16,<0.8.0 + Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 56fbf922..1aa96356 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -9,11 +9,42 @@ # setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 charm-tools>=2.4.4 + +# Workaround until https://github.com/juju/charm-tools/pull/589 gets +# published +keyring<21 + requests>=2.18.4 -mock>=1.2 + +# Newer mock seems to have some syntax which is newer than python3.5 (e.g. +# f'{something}' +mock>=1.2,<4.0.0; python_version < '3.6' +mock>=1.2; python_version >= '3.6' + flake8>=2.2.4 stestr>=2.2.0 + +# Dependency of stestr. Workaround for +# https://github.com/mtreinish/stestr/issues/145 +cliff<3.0.0 + +# Dependencies of stestr. Newer versions use keywords that didn't exist in +# python 3.5 yet (e.g. "ModuleNotFoundError") +importlib-metadata<3.0.0; python_version < '3.6' +importlib-resources<3.0.0; python_version < '3.6' + +# Some Zuul nodes sometimes pull newer versions of these dependencies which +# dropped support for python 3.5: +osprofiler<2.7.0;python_version<'3.6' +stevedore<1.31.0;python_version<'3.6' + coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack + +# Needed for charm-glance: +git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' +tempest<24.0.0;python_version<'3.6' + +croniter # needed for charm-rabbitmq-server unit tests diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index e2d58f59..ab9593f3 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -14,6 +14,18 @@ skipsdist = True sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +requires = pip < 20.3 + virtualenv < 20.0 +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.2.0 [testenv] setenv = VIRTUAL_ENV={envdir} From fc7fa10c10ea8c883d5047718dac4adc56833359 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 10 Dec 2020 22:16:33 +0100 Subject: [PATCH 2116/2699] Sync release-tools Sync release-tools boilerplate containing these changes in order to pin pip < 20.3: https://github.com/openstack-charmers/release-tools/pull/125 https://github.com/openstack-charmers/release-tools/pull/126 Change-Id: I12b3c45b45e0b1d6db16902c81663cbe2ef84d17 --- ceph-proxy/requirements.txt | 6 ++++-- ceph-proxy/test-requirements.txt | 33 +++++++++++++++++++++++++++++++- ceph-proxy/tox.ini | 12 ++++++++++++ 3 files changed, 48 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/requirements.txt b/ceph-proxy/requirements.txt index 8ba19415..360ecbaa 100644 --- a/ceph-proxy/requirements.txt +++ b/ceph-proxy/requirements.txt @@ -7,11 +7,13 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # -setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 pbr>=1.8.0,<1.9.0 simplejson>=2.2.0 netifaces>=0.10.4 -netaddr>=0.7.12,!=0.7.16 + +# Strange import error with newer netaddr: +netaddr>0.7.16,<0.8.0 + Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 56fbf922..1aa96356 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -9,11 +9,42 @@ # setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 charm-tools>=2.4.4 + +# Workaround until https://github.com/juju/charm-tools/pull/589 gets +# published +keyring<21 + requests>=2.18.4 -mock>=1.2 + +# Newer mock seems to have some syntax which is newer than python3.5 (e.g. +# f'{something}' +mock>=1.2,<4.0.0; python_version < '3.6' +mock>=1.2; python_version >= '3.6' + flake8>=2.2.4 stestr>=2.2.0 + +# Dependency of stestr. Workaround for +# https://github.com/mtreinish/stestr/issues/145 +cliff<3.0.0 + +# Dependencies of stestr. Newer versions use keywords that didn't exist in +# python 3.5 yet (e.g. "ModuleNotFoundError") +importlib-metadata<3.0.0; python_version < '3.6' +importlib-resources<3.0.0; python_version < '3.6' + +# Some Zuul nodes sometimes pull newer versions of these dependencies which +# dropped support for python 3.5: +osprofiler<2.7.0;python_version<'3.6' +stevedore<1.31.0;python_version<'3.6' + coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack + +# Needed for charm-glance: +git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' +tempest<24.0.0;python_version<'3.6' + +croniter # needed for charm-rabbitmq-server unit tests diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index e2d58f59..ab9593f3 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -14,6 +14,18 @@ skipsdist = True sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +requires = pip < 20.3 + virtualenv < 20.0 +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.2.0 [testenv] setenv = VIRTUAL_ENV={envdir} From 4a8f5d1345d9832f7d09d845f2baa62ed9bfb448 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 10 Dec 2020 22:17:06 +0100 Subject: [PATCH 2117/2699] Sync release-tools Sync release-tools boilerplate containing these changes in order to pin pip < 20.3: https://github.com/openstack-charmers/release-tools/pull/125 https://github.com/openstack-charmers/release-tools/pull/126 Change-Id: I45ff5385a905bd5f6aa635d0920de5326abb7b24 --- ceph-rbd-mirror/requirements.txt | 19 +++++++++++++++++-- ceph-rbd-mirror/src/test-requirements.txt | 7 +++++++ ceph-rbd-mirror/src/tox.ini | 13 ++++++++++++- ceph-rbd-mirror/test-requirements.txt | 23 ++++++++++++++++++++++- 4 files changed, 58 insertions(+), 4 deletions(-) diff --git a/ceph-rbd-mirror/requirements.txt b/ceph-rbd-mirror/requirements.txt index aaaa3e03..a4d3eff0 100644 --- a/ceph-rbd-mirror/requirements.txt +++ b/ceph-rbd-mirror/requirements.txt @@ -3,9 +3,24 @@ # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools # +# NOTE(lourot): This might look like a duplication of test-requirements.txt but +# some tox targets use only test-requirements.txt whereas charm-build uses only +# requirements.txt setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 + # Build requirements charm-tools>=2.4.4 -# importlib-resources 1.1.0 removed Python 3.5 support -importlib-resources<1.1.0 + +# Workaround until https://github.com/juju/charm-tools/pull/589 gets +# published +keyring<21 + simplejson + +# Newer versions use keywords that didn't exist in python 3.5 yet (e.g. +# "ModuleNotFoundError") +# NOTE(lourot): This might look like a duplication of test-requirements.txt but +# some tox targets use only test-requirements.txt whereas charm-build uses only +# requirements.txt +importlib-metadata<3.0.0; python_version < '3.6' +importlib-resources<3.0.0; python_version < '3.6' diff --git a/ceph-rbd-mirror/src/test-requirements.txt b/ceph-rbd-mirror/src/test-requirements.txt index d3c9be84..520681e1 100644 --- a/ceph-rbd-mirror/src/test-requirements.txt +++ b/ceph-rbd-mirror/src/test-requirements.txt @@ -3,6 +3,13 @@ # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools # +# pep8 requirements +charm-tools>=2.4.4 + +# Workaround until https://github.com/juju/charm-tools/pull/589 gets +# published +keyring<21 + # Functional Test Requirements (let Zaza's dependencies solve all dependencies here!) git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack diff --git a/ceph-rbd-mirror/src/tox.ini b/ceph-rbd-mirror/src/tox.ini index 07a7adcb..e7630475 100644 --- a/ceph-rbd-mirror/src/tox.ini +++ b/ceph-rbd-mirror/src/tox.ini @@ -11,6 +11,18 @@ skipsdist = True sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +requires = pip < 20.3 + virtualenv < 20.0 +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.2.0 [testenv] setenv = VIRTUAL_ENV={envdir} @@ -23,7 +35,6 @@ install_command = [testenv:pep8] basepython = python3 -deps=charm-tools commands = charm-proof [testenv:func-noop] diff --git a/ceph-rbd-mirror/test-requirements.txt b/ceph-rbd-mirror/test-requirements.txt index d078e270..8ab24b2e 100644 --- a/ceph-rbd-mirror/test-requirements.txt +++ b/ceph-rbd-mirror/test-requirements.txt @@ -6,10 +6,31 @@ setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 # Lint and unit test requirements flake8>=2.2.4 + stestr>=2.2.0 + +# Dependency of stestr. Workaround for +# https://github.com/mtreinish/stestr/issues/145 +cliff<3.0.0 + +# Dependencies of stestr. Newer versions use keywords that didn't exist in +# python 3.5 yet (e.g. "ModuleNotFoundError") +importlib-metadata<3.0.0; python_version < '3.6' +importlib-resources<3.0.0; python_version < '3.6' + +# Some Zuul nodes sometimes pull newer versions of these dependencies which +# dropped support for python 3.5: +osprofiler<2.7.0;python_version<'3.6' +stevedore<1.31.0;python_version<'3.6' + requests>=2.18.4 charms.reactive -mock>=1.2 + +# Newer mock seems to have some syntax which is newer than python3.5 (e.g. +# f'{something}' +mock>=1.2,<4.0.0; python_version < '3.6' +mock>=1.2; python_version >= '3.6' + nose>=1.3.7 coverage>=3.6 git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack From bdbad804c8b629cf30c2ca6bab2900979c11a98a Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 10 Dec 2020 22:16:49 +0100 Subject: [PATCH 2118/2699] Sync release-tools Sync release-tools boilerplate containing these changes in order to pin pip < 20.3: https://github.com/openstack-charmers/release-tools/pull/125 https://github.com/openstack-charmers/release-tools/pull/126 https://github.com/openstack-charmers/release-tools/pull/127 https://github.com/openstack-charmers/release-tools/pull/129 Change-Id: Ie7ecbc84ddea41d7d4907f2e39fb723b3a8da549 --- ceph-radosgw/requirements.txt | 6 +++-- ceph-radosgw/test-requirements.txt | 35 +++++++++++++++++++++++++++++- ceph-radosgw/tox.ini | 12 ++++++++++ 3 files changed, 50 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/requirements.txt b/ceph-radosgw/requirements.txt index 8ba19415..360ecbaa 100644 --- a/ceph-radosgw/requirements.txt +++ b/ceph-radosgw/requirements.txt @@ -7,11 +7,13 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # -setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 pbr>=1.8.0,<1.9.0 simplejson>=2.2.0 netifaces>=0.10.4 -netaddr>=0.7.12,!=0.7.16 + +# Strange import error with newer netaddr: +netaddr>0.7.16,<0.8.0 + Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 56fbf922..9aea716b 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -9,11 +9,44 @@ # setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 charm-tools>=2.4.4 + +# Workaround until https://github.com/juju/charm-tools/pull/589 gets +# published +keyring<21 + requests>=2.18.4 -mock>=1.2 + +# Newer mock seems to have some syntax which is newer than python3.5 (e.g. +# f'{something}' +mock>=1.2,<4.0.0; python_version < '3.6' +mock>=1.2; python_version >= '3.6' + flake8>=2.2.4 stestr>=2.2.0 + +# Dependency of stestr. Workaround for +# https://github.com/mtreinish/stestr/issues/145 +cliff<3.0.0 + +# Dependencies of stestr. Newer versions use keywords that didn't exist in +# python 3.5 yet (e.g. "ModuleNotFoundError") +importlib-metadata<3.0.0; python_version < '3.6' +importlib-resources<3.0.0; python_version < '3.6' + +# Some Zuul nodes sometimes pull newer versions of these dependencies which +# dropped support for python 3.5: +osprofiler<2.7.0;python_version<'3.6' +stevedore<1.31.0;python_version<'3.6' +debtcollector<1.22.0;python_version<'3.6' +oslo.utils<=3.41.0;python_version<'3.6' + coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack + +# Needed for charm-glance: +git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' +tempest<24.0.0;python_version<'3.6' + +croniter # needed for charm-rabbitmq-server unit tests diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index e2d58f59..ab9593f3 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -14,6 +14,18 @@ skipsdist = True sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +requires = pip < 20.3 + virtualenv < 20.0 +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.2.0 [testenv] setenv = VIRTUAL_ENV={envdir} From 8a8e90848933cc23b3b6c50482087fcebf13692e Mon Sep 17 00:00:00 2001 From: Ionut Balutoiu Date: Tue, 27 Oct 2020 16:59:07 +0000 Subject: [PATCH 2119/2699] Handle RBD mirroring mode set in the relation Change-Id: I423eb38f5197879c5f8f7999acb11ece3d26a6a4 Co-authored-by: Marius Oprin Signed-off-by: Marius Oprin --- .../lib/charm/openstack/ceph_rbd_mirror.py | 34 +++++++++-- .../src/reactive/ceph_rbd_mirror_handlers.py | 10 +++- .../test_ceph_rbd_mirror_handlers.py | 18 ++++-- ...est_lib_charm_openstack_ceph_rbd_mirror.py | 57 ++++++++++++++++++- 4 files changed, 106 insertions(+), 13 deletions(-) diff --git a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py index 0eb87cab..c20ea48d 100644 --- a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py @@ -126,8 +126,8 @@ def _mirror_pool_info(self, pool): universal_newlines=True) return json.loads(output) - def mirror_pool_enabled(self, pool): - return self._mirror_pool_info(pool).get('mode', None) == 'pool' + def mirror_pool_enabled(self, pool, mode='pool'): + return self._mirror_pool_info(pool).get('mode', None) == mode def mirror_pool_has_peers(self, pool): return len(self._mirror_pool_info(pool).get('peers', [])) > 0 @@ -151,9 +151,9 @@ def mirror_pools_summary(self, pools): stats['image_states'][state] += value return stats - def mirror_pool_enable(self, pool): + def mirror_pool_enable(self, pool, mode='pool'): base_cmd = ['rbd', '--id', self.ceph_id, 'mirror', 'pool'] - subprocess.check_call(base_cmd + ['enable', pool, 'pool']) + subprocess.check_call(base_cmd + ['enable', pool, mode]) subprocess.check_call(base_cmd + ['peer', 'add', pool, 'client.{}@remote' .format(self.ceph_id)]) @@ -176,6 +176,32 @@ def pools_in_broker_request(self, rq, ops_to_check=None): result_set.add(op['name']) return result_set + def pool_mirroring_mode(self, pool, broker_requests=[]): + """Get the Ceph RBD mirroring mode for the pool. + + Checks if the pool RBD mirroring mode was explicitly set as part of + the 'create-pool' operation into any of the given broker requests. + If this is true, its value is returned, otherwise the default 'pool' + mirroring mode is used. + + :param pool: Pool name + :type pool: str + :param broker_requests: List of broker requests + :type broker_requests: List[ch_ceph.CephBrokerRq] + :returns: Ceph RBD mirroring mode + :rtype: str + """ + default_mirroring_mode = 'pool' + for rq in broker_requests: + if not rq: + continue + assert rq.api_version == 1 + for op in rq.ops: + if op['op'] == 'create-pool' and op['name'] == pool: + return op.get( + 'rbd-mirroring-mode', default_mirroring_mode) + return default_mirroring_mode + def collapse_and_filter_broker_requests(self, broker_requests, allowed_ops, require_vp=None): """Extract allowed ops from broker requests into one collapsed request. diff --git a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py index baadf55a..dcf24cc8 100644 --- a/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/src/reactive/ceph_rbd_mirror_handlers.py @@ -117,12 +117,16 @@ def configure_pools(): pools_in_rq |= charm_instance.pools_in_broker_request( remote_rq) if remote_rq else set() for pool, attrs in charm_instance.eligible_pools(local.pools).items(): - if not (charm_instance.mirror_pool_enabled(pool) and - charm_instance.mirror_pool_has_peers(pool)): + pool_mirroring_mode = charm_instance.pool_mirroring_mode( + pool, [rq, remote_rq]) + mirroring_enabled = charm_instance.mirror_pool_enabled( + pool, pool_mirroring_mode) + has_peers = charm_instance.mirror_pool_has_peers(pool) + if not (mirroring_enabled and has_peers): ch_core.hookenv.log('Enabling mirroring for pool "{}"' .format(pool), level=ch_core.hookenv.INFO) - charm_instance.mirror_pool_enable(pool) + charm_instance.mirror_pool_enable(pool, pool_mirroring_mode) if (pool not in pools_in_rq and 'erasure_code_profile' not in attrs['parameters']): # A pool exists that there is no broker request for which means diff --git a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py index 1b692a54..a5bf162c 100644 --- a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py @@ -157,11 +157,17 @@ def test_configure_pools(self): self.patch_object(handlers.reactive, 'endpoint_from_flag') endpoint_local = mock.MagicMock() endpoint_remote = mock.MagicMock() + self.crm_charm.collapse_and_filter_broker_requests.side_effect = [ + endpoint_local, endpoint_remote] endpoint_local.endpoint_name = 'ceph-local' endpoint_local.pools = { 'cinder-ceph': { 'applications': {'rbd': {}}, - 'parameters': {'pg_num': 42, 'size': 3}, + 'parameters': { + 'pg_num': 42, + 'size': 3, + 'rbd-mirroring-mode': 'pool' + }, 'quota': {'max_bytes': 1024, 'max_objects': 51}, }, } @@ -170,6 +176,8 @@ def test_configure_pools(self): endpoint_remote] self.crm_charm.eligible_pools.return_value = endpoint_local.pools self.crm_charm.mirror_pool_enabled.return_value = False + self.crm_charm.pool_mirroring_mode.return_value = 'pool' + handlers.configure_pools() self.endpoint_from_flag.assert_has_calls([ mock.call('ceph-local.available'), @@ -177,8 +185,10 @@ def test_configure_pools(self): ]) self.crm_charm.eligible_pools.assert_called_once_with( endpoint_local.pools) + self.crm_charm.pool_mirroring_mode.assert_called_once_with( + 'cinder-ceph', [endpoint_local, endpoint_remote]) self.crm_charm.mirror_pool_enabled.assert_called_once_with( - 'cinder-ceph') + 'cinder-ceph', 'pool') self.crm_charm.mirror_pool_enable.assert_called_once_with( - 'cinder-ceph') - endpoint_remote.maybe_send_rq.assert_called_once_with(mock.ANY) + 'cinder-ceph', 'pool') + endpoint_remote.maybe_send_rq.assert_called_once_with(endpoint_local) diff --git a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py index fee11651..739e65ba 100644 --- a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py @@ -14,6 +14,7 @@ import collections import mock +import json import subprocess import charms_openstack.test_utils as test_utils @@ -92,10 +93,10 @@ def test_mirror_pool_enabled(self): 'client.rbd-mirror.juju-c50b1a-zaza-4ce96f1e7e43-12'}] } crmc._mirror_pool_info = _mirror_pool_info - self.assertTrue(crmc.mirror_pool_enabled('apool')) + self.assertTrue(crmc.mirror_pool_enabled('apool', mode='pool')) _mirror_pool_info.assert_called_once_with('apool') _mirror_pool_info.return_value = {'mode': 'disabled'} - self.assertFalse(crmc.mirror_pool_enabled('apool')) + self.assertFalse(crmc.mirror_pool_enabled('apool', mode='pool')) def test_mirror_pool_has_peers(self): self.patch_object(ceph_rbd_mirror.socket, 'gethostname') @@ -192,3 +193,55 @@ def add_op(self, op): {'app-name': 'rbd', 'name': 'pool-rq2', 'op': 'create-pool', 'someotherkey': 'value'}) self.assertTrue(len(rq.ops) == 1) + + def test_pool_mirroring_mode(self): + self.patch_object(ceph_rbd_mirror.ch_ceph, 'CephBrokerRq') + + class FakeCephBrokerRq(object): + def __init__(self, raw_request_data=None): + request_data = json.loads(raw_request_data) + self.api_version = request_data['api-version'] + self.request_id = request_data['request-id'] + self.set_ops(request_data['ops']) + + def set_ops(self, ops): + self.ops = ops + + def add_op(self, op): + self.ops.append(op) + + self.CephBrokerRq.side_effect = FakeCephBrokerRq + + brq1_data = json.dumps({ + 'api-version': 1, + 'request-id': 'broker_rq1', + 'ops': [ + { + 'op': 'create-pool', + 'name': 'pool-rq0', + 'app-name': 'rbd-pool', + 'rbd-mirroring-mode': 'pool' + }, + ] + }) + brq2_data = json.dumps({ + 'api-version': 1, + 'request-id': 'broker_rq2', + 'ops': [ + { + 'op': 'create-pool', + 'name': 'pool-rq1', + 'app-name': 'rbd-image', + 'rbd-mirroring-mode': 'image' + }, + ] + }) + + brq1 = self.CephBrokerRq(raw_request_data=brq1_data) + brq2 = self.CephBrokerRq(raw_request_data=brq2_data) + broker_requests = [brq1, brq2, None] + crmc = ceph_rbd_mirror.CephRBDMirrorCharm() + rq0 = crmc.pool_mirroring_mode('pool-rq0', broker_requests) + self.assertEqual('pool', rq0) + rq1 = crmc.pool_mirroring_mode('pool-rq1', broker_requests) + self.assertEqual('image', rq1) From 92f4ba4898bfd05ee7f17e4c8aa5ae91a3c304df Mon Sep 17 00:00:00 2001 From: Martin Kalcok Date: Thu, 8 Oct 2020 15:20:40 +0200 Subject: [PATCH 2120/2699] Added `start` and `stop` actions for management of ceph OSDs Change-Id: If8b83ab06364903548c5841487034bc1bb9aaf0c Closes-Bug: #1477731 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/473 --- ceph-osd/README.md | 21 ++ ceph-osd/actions.yaml | 20 ++ ceph-osd/actions/service.py | 192 +++++++++++++++++ ceph-osd/actions/start | 1 + ceph-osd/actions/stop | 1 + ceph-osd/tests/tests.yaml | 1 + ceph-osd/unit_tests/test_actions_service.py | 223 ++++++++++++++++++++ 7 files changed, 459 insertions(+) create mode 100755 ceph-osd/actions/service.py create mode 120000 ceph-osd/actions/start create mode 120000 ceph-osd/actions/stop create mode 100644 ceph-osd/unit_tests/test_actions_service.py diff --git a/ceph-osd/README.md b/ceph-osd/README.md index 91e20009..c5601921 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -244,6 +244,8 @@ deployed then see file `actions.yaml`. * `osd-in` * `osd-out` * `security-checklist` +* `start` +* `stop` * `zap-disk` ## Working with OSDs @@ -293,6 +295,25 @@ Example: juju run-action --wait ceph-osd/4 osd-in +### Managing ceph OSDs + +Use the `stop` and `start` actions to manage ceph OSD services within the unit. +Both actions take one parameter, `osds`, which should contain comma-separated +numerical IDs of `ceph-osd` services or the keyword `all`. + +Example: + + # stop ceph-osd@0 and ceph-osd@1 + juju run-action --wait ceph-osd/0 stop osds=0,1 + # start all ceph-osd services on the unit + juju run-action --wait ceph-osd/0 start osds=all + + > **Note**: Stopping ceph-osd services will put the unit into the blocked + state. + + > **Important**: This action is not available on Trusty due to reliance on + systemd. + ## Working with disks ### List disks diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 880a922d..1674a08d 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -84,5 +84,25 @@ zap-disk: required: - devices - i-really-mean-it +start: + description: | + \ + Start OSD by ID + Documentation: https://jaas.ai/ceph-osd/ + params: + osds: + description: A comma-separated list of OSD IDs to start (or keyword 'all') + required: + - osds +stop: + description: | + \ + Stop OSD by ID + Documentation: https://jaas.ai/ceph-osd/ + params: + osds: + description: A comma-separated list of OSD IDs to stop (or keyword 'all') + required: + - osds security-checklist: description: Validate the running configuration against the OpenStack security guides checklist diff --git a/ceph-osd/actions/service.py b/ceph-osd/actions/service.py new file mode 100755 index 00000000..1b8fe1ff --- /dev/null +++ b/ceph-osd/actions/service.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python3 +# +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import sys +import shutil +import subprocess + + +sys.path.append('lib') +sys.path.append('hooks') + +from charmhelpers.core.hookenv import ( + function_fail, + function_get, + log, + WARNING, +) +from ceph_hooks import assess_status + +START = 'start' +STOP = 'stop' + +ALL = 'all' + + +def systemctl_execute(action, services): + """ + Execute `systemctl` action on specified services. + + Action can be either 'start' or 'stop' (defined by global constants + START, STOP). Parameter `services` is list of service names on which the + action will be executed. If the parameter `services` contains constant + ALL, the action will be executed on all ceph-osd services. + + :param action: Action to be executed (start or stop) + :type action: str + :param services: List of services to be targetd by the action + :type services: list[str] + :return: None + """ + if ALL in services: + cmd = ['systemctl', action, 'ceph-osd.target'] + else: + cmd = ['systemctl', action] + services + subprocess.check_call(cmd, timeout=300) + + +def osd_ids_to_service_names(osd_ids): + """ + Transform set of OSD IDs into the list of respective service names. + + Example: + >>> osd_ids_to_service_names({0,1}) + ['ceph-osd@0.service', 'ceph-osd@1.service'] + + :param osd_ids: Set of service IDs to be converted + :type osd_ids: set[str | int] + :return: List of service names + :rtype: list[str] + """ + service_list = [] + for id_ in osd_ids: + if id_ == ALL: + service_list.append(ALL) + else: + service_list.append("ceph-osd@{}.service".format(id_)) + return service_list + + +def check_service_is_present(service_list): + """ + Checks that every service, from the `service_list` parameter exists + on the system. Raises RuntimeError if any service is missing. + + :param service_list: List of systemd services + :type service_list: list[str] + :raises RuntimeError: if any service is missing + """ + if ALL in service_list: + return + + service_list_cmd = ['systemctl', 'list-units', '--full', + '--all', '--no-pager', '-t', 'service'] + present_services = subprocess.run(service_list_cmd, + stdout=subprocess.PIPE, + timeout=30).stdout.decode('utf-8') + + missing_services = [] + for service_name in service_list: + if service_name not in present_services: + missing_services.append(service_name) + + if missing_services: + raise RuntimeError('Some services are not present on this ' + 'unit: {}'.format(missing_services)) + + +def parse_arguments(): + """ + Fetch action arguments and parse them from comma separated list to + the set of OSD IDs + + :return: Set of OSD IDs + :rtype: set(str) + """ + raw_arg = function_get('osds') + + if raw_arg is None: + raise RuntimeError('Action argument "osds" is missing') + args = set() + + # convert OSD IDs from user's input into the set + for osd_id in str(raw_arg).split(','): + args.add(osd_id.strip()) + + if ALL in args and len(args) != 1: + args = {ALL} + log('keyword "all" was found in "osds" argument. Dropping other ' + 'explicitly defined OSD IDs', WARNING) + + return args + + +def execute_action(action): + """Core implementation of the 'start'/'stop' actions + + :param action: Either START or STOP (see global constants) + :return: None + """ + if action not in (START, STOP): + raise RuntimeError('Unknown action "{}"'.format(action)) + + osds = parse_arguments() + services = osd_ids_to_service_names(osds) + + check_service_is_present(services) + + systemctl_execute(action, services) + + assess_status() + + +def stop(): + """Shortcut to execute 'stop' action""" + execute_action(STOP) + + +def start(): + """Shortcut to execute 'start' action""" + execute_action(START) + + +ACTIONS = {'stop': stop, + 'start': start, + } + + +def main(args): + action_name = os.path.basename(args.pop(0)) + try: + action = ACTIONS[action_name] + except KeyError: + s = "Action {} undefined".format(action_name) + function_fail(s) + return + else: + try: + log("Running action '{}'.".format(action_name)) + if shutil.which('systemctl') is None: + raise RuntimeError("This action requires systemd") + action() + except Exception as e: + function_fail("Action '{}' failed: {}".format(action_name, str(e))) + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/ceph-osd/actions/start b/ceph-osd/actions/start new file mode 120000 index 00000000..12afe70c --- /dev/null +++ b/ceph-osd/actions/start @@ -0,0 +1 @@ +service.py \ No newline at end of file diff --git a/ceph-osd/actions/stop b/ceph-osd/actions/stop new file mode 120000 index 00000000..12afe70c --- /dev/null +++ b/ceph-osd/actions/stop @@ -0,0 +1 @@ +service.py \ No newline at end of file diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 1f6354e8..02c1a6bc 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -22,6 +22,7 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CephRelationTest - zaza.openstack.charm_tests.ceph.tests.CephTest - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest + - zaza.openstack.charm_tests.ceph.osd.tests.ServiceTest tests_options: force_deploy: - groovy-victoria diff --git a/ceph-osd/unit_tests/test_actions_service.py b/ceph-osd/unit_tests/test_actions_service.py new file mode 100644 index 00000000..daa64e8d --- /dev/null +++ b/ceph-osd/unit_tests/test_actions_service.py @@ -0,0 +1,223 @@ +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +from contextlib import contextmanager +from copy import copy + +from actions import service + +from test_utils import CharmTestCase + + +class CompletedProcessMock: + def __init__(self, stdout=b'', stderr=b''): + self.stdout = stdout + self.stderr = stderr + + +class ServiceActionTests(CharmTestCase): + _PRESENT_SERVICES = [ + "ceph-osd@0.service", + "ceph-osd@1.service", + "ceph-osd@2.service", + ] + + _TARGET_ALL = 'ceph-osd.target' + + _CHECK_CALL_TIMEOUT = 300 + + def __init__(self, methodName='runTest'): + super(ServiceActionTests, self).__init__(methodName) + self._func_args = {'osds': None} + + def setUp(self, obj=None, patches=None): + super(ServiceActionTests, self).setUp( + service, + ['subprocess', 'function_fail', 'function_get', + 'log', 'assess_status', 'shutil'] + ) + present_services = '\n'.join(self._PRESENT_SERVICES).encode('utf-8') + + self.shutil.which.return_value = '/bin/systemctl' + self.subprocess.check_call.return_value = None + self.function_get.side_effect = self.function_get_side_effect + self.subprocess.run.return_value = CompletedProcessMock( + stdout=present_services) + + def function_get_side_effect(self, arg): + return self._func_args.get(arg) + + @contextmanager + def func_call_arguments(self, osds=None): + default = copy(self._func_args) + try: + self._func_args = {'osds': osds} + yield + finally: + self._func_args = copy(default) + + def assert_action_start_fail(self, msg): + self.assert_function_fail(service.START, msg) + + def assert_action_stop_fail(self, msg): + self.assert_function_fail(service.STOP, msg) + + def assert_function_fail(self, action, msg): + expected_error = "Action '{}' failed: {}".format(action, msg) + self.function_fail.assert_called_with(expected_error) + + @staticmethod + def call_action_start(): + service.main(['start']) + + @staticmethod + def call_action_stop(): + service.main(['stop']) + + def test_systemctl_execute_all(self): + action = 'start' + services = service.ALL + + expected_call = mock.call(['systemctl', action, self._TARGET_ALL], + timeout=self._CHECK_CALL_TIMEOUT) + + service.systemctl_execute(action, services) + + self.subprocess.check_call.assert_has_calls([expected_call]) + + def systemctl_execute_specific(self): + action = 'start' + services = ['ceph-osd@1.service', 'ceph-osd@2.service'] + + systemctl_call = ['systemctl', action] + services + expected_call = mock.call(systemctl_call, + timeout=self._CHECK_CALL_TIMEOUT) + + service.systemctl_execute(action, services) + + self.subprocess.check_call.assert_has_calls([expected_call]) + + def test_id_translation(self): + service_ids = {1, service.ALL, 2} + expected_names = [ + 'ceph-osd@1.service', + service.ALL, + 'ceph-osd@2.service', + ] + service_names = service.osd_ids_to_service_names(service_ids) + self.assertEqual(sorted(service_names), sorted(expected_names)) + + def test_skip_service_presence_check(self): + service_list = [service.ALL] + + service.check_service_is_present(service_list) + + self.subprocess.run.assert_not_called() + + def test_raise_all_missing_services(self): + missing_service_id = '99,100' + missing_list = [] + for id_ in missing_service_id.split(','): + missing_list.append("ceph-osd@{}.service".format(id_)) + + service_list_cmd = ['systemctl', 'list-units', '--full', '--all', + '--no-pager', '-t', 'service'] + + err_msg = 'Some services are not present on this ' \ + 'unit: {}'.format(missing_list) + + with self.assertRaises(RuntimeError, msg=err_msg): + service.check_service_is_present(missing_list) + + self.subprocess.run.assert_called_with(service_list_cmd, + stdout=self.subprocess.PIPE, + timeout=30) + + def test_raise_on_missing_arguments(self): + err_msg = 'Action argument "osds" is missing' + with self.func_call_arguments(osds=None): + with self.assertRaises(RuntimeError, msg=err_msg): + service.parse_arguments() + + def test_parse_service_ids(self): + raw = '1,2,3' + expected_ids = {'1', '2', '3'} + + with self.func_call_arguments(osds=raw): + parsed = service.parse_arguments() + self.assertEqual(parsed, expected_ids) + + def test_parse_service_ids_with_all(self): + raw = '1,2,all' + expected_id = {service.ALL} + + with self.func_call_arguments(osds=raw): + parsed = service.parse_arguments() + self.assertEqual(parsed, expected_id) + + def test_fail_execute_unknown_action(self): + action = 'foo' + err_msg = 'Unknown action "{}"'.format(action) + with self.assertRaises(RuntimeError, msg=err_msg): + service.execute_action(action) + + @mock.patch.object(service, 'systemctl_execute') + def test_execute_action(self, _): + with self.func_call_arguments(osds=service.ALL): + service.execute_action(service.START) + service.systemctl_execute.assert_called_with(service.START, + [service.ALL]) + + service.execute_action(service.STOP) + service.systemctl_execute.assert_called_with(service.STOP, + [service.ALL]) + + @mock.patch.object(service, 'execute_action') + def test_action_stop(self, execute_action): + self.call_action_stop() + execute_action.assert_called_with(service.STOP) + + @mock.patch.object(service, 'execute_action') + def test_action_start(self, execute_action): + self.call_action_start() + execute_action.assert_called_with(service.START) + + def test_actions_requires_systemd(self): + """Actions will fail if systemd is not present on the system""" + self.shutil.which.return_value = None + expected_error = 'This action requires systemd' + with self.func_call_arguments(osds='all'): + self.call_action_start() + self.assert_action_start_fail(expected_error) + + self.call_action_stop() + self.assert_action_stop_fail(expected_error) + + self.subprocess.check_call.assert_not_called() + + def test_unknown_action(self): + action = 'foo' + err_msg = 'Action {} undefined'.format(action) + service.main([action]) + self.function_fail.assert_called_with(err_msg) + + @mock.patch.object(service, 'execute_action') + def test_action_failure(self, start_function): + err_msg = 'Test Error' + service.execute_action.side_effect = RuntimeError(err_msg) + + self.call_action_start() + + self.assert_action_start_fail(err_msg) From 1a54a7b72dda46d719e18fdad86161f5563ac398 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Mon, 30 Nov 2020 11:31:45 +0100 Subject: [PATCH 2121/2699] Fix race condition in default zone creation Change-Id: I241b83f748b36aad645d0296acb73d9b654ca60a Closes-Bug: #1905985 --- ceph-radosgw/hooks/hooks.py | 38 ++++++++++++++++------- ceph-radosgw/hooks/multisite.py | 2 +- ceph-radosgw/unit_tests/test_hooks.py | 13 +++++++- ceph-radosgw/unit_tests/test_multisite.py | 16 +++++----- 4 files changed, 47 insertions(+), 22 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 3e94a27f..0371b785 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -324,7 +324,8 @@ def _mon_relation(): if multisite_deployment(): process_multisite_relations() - elif ready_for_service(legacy=legacy) and is_leader(): + elif (ready_for_service(legacy=legacy) and is_leader() and + 'mon' in CONFIGS.complete_contexts()): # In a non multi-site deployment create the # zone using the default zonegroup and restart the service internal_url = '{}:{}'.format( @@ -334,19 +335,32 @@ def _mon_relation(): endpoints = [internal_url] zonegroup = 'default' zone = config('zone') - if zone == 'default': - # If the requested zone is 'default' then the charm can - # race with radosgw systemd process in creating it. So, - # retry the zone list if it returns an empty list. - existing_zones = multisite.list_zones(retry_on_empty=True) - else: - existing_zones = multisite.list_zones() + existing_zones = multisite.list_zones() log('Existing zones {}'.format(existing_zones), level=DEBUG) if zone not in existing_zones: - multisite.create_zone(zone, - endpoints=endpoints, - default=True, master=True, - zonegroup=zonegroup) + log("Zone '{}' doesn't exist, creating".format(zone)) + try: + multisite.create_zone(zone, + endpoints=endpoints, + default=True, master=True, + zonegroup=zonegroup) + except subprocess.CalledProcessError as e: + if 'File exists' in e.stderr.decode('UTF-8'): + # NOTE(lourot): may have been created in the + # background by the Rados Gateway daemon, see + # lp:1856106 + log("Zone '{}' existed already after all".format( + zone)) + else: + raise + + existing_zones = multisite.list_zones(retry_on_empty=True) + log('Existing zones {}'.format(existing_zones), + level=DEBUG) + if zone not in existing_zones: + raise RuntimeError("Could not create zone '{}'".format( + zone)) + service_restart(service_name()) else: send_request_if_needed(rq, relation='mon') diff --git a/ceph-radosgw/hooks/multisite.py b/ceph-radosgw/hooks/multisite.py index 52c142e8..18722423 100644 --- a/ceph-radosgw/hooks/multisite.py +++ b/ceph-radosgw/hooks/multisite.py @@ -30,7 +30,7 @@ def _check_output(cmd): """Logging wrapper for subprocess.check_ouput""" hookenv.log("Executing: {}".format(' '.join(cmd)), level=hookenv.DEBUG) - return subprocess.check_output(cmd).decode('UTF-8') + return subprocess.check_output(cmd, stderr=subprocess.PIPE).decode('UTF-8') @decorators.retry_on_exception(num_retries=5, base_delay=3, diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index eedb504d..988b235a 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -199,7 +199,10 @@ def test_mon_relation(self, _config, _resolve_address, is_leader): _ceph.import_radosgw_key.return_value = True is_leader.return_value = True self.relation_get.return_value = 'seckey' - self.multisite.list_zones.return_value = [] + self.multisite.list_zones.side_effect = [ + [], # at first the default zone doesn't exist, then... + ['default'], # ... it got created + ] self.socket.gethostname.return_value = 'testinghostname' ceph_hooks.mon_relation() self.relation_set.assert_not_called() @@ -219,6 +222,10 @@ def test_mon_relation_request_key(self, _config, _ceph.import_radosgw_key.return_value = True is_leader.return_value = True self.relation_get.return_value = 'seckey' + self.multisite.list_zones.side_effect = [ + [], # at first the default zone doesn't exist, then... + ['default'], # ... it got created + ] self.socket.gethostname.return_value = 'testinghostname' self.request_per_unit_key.return_value = True ceph_hooks.mon_relation() @@ -242,6 +249,10 @@ def test_mon_relation_nokey(self, _config, _ceph.import_radosgw_key.return_value = False self.relation_get.return_value = None is_leader.return_value = True + self.multisite.list_zones.side_effect = [ + [], # at first the default zone doesn't exist, then... + ['default'], # ... it got created + ] ceph_hooks.mon_relation() self.assertFalse(_ceph.import_radosgw_key.called) self.service_resume.assert_not_called() diff --git a/ceph-radosgw/unit_tests/test_multisite.py b/ceph-radosgw/unit_tests/test_multisite.py index 4ea5bcbf..8825cedb 100644 --- a/ceph-radosgw/unit_tests/test_multisite.py +++ b/ceph-radosgw/unit_tests/test_multisite.py @@ -62,7 +62,7 @@ def test_create_realm(self): 'radosgw-admin', '--id=rgw.testhost', 'realm', 'create', '--rgw-realm=beedata', '--default' - ]) + ], stderr=mock.ANY) def test_list_realms(self): with open(self._testdata(whoami()), 'rb') as f: @@ -97,7 +97,7 @@ def test_create_zonegroup(self): '--rgw-realm=beedata', '--default', '--master' - ]) + ], stderr=mock.ANY) def test_list_zonegroups(self): with open(self._testdata(whoami()), 'rb') as f: @@ -128,7 +128,7 @@ def test_create_zone(self): '--access-key=mykey', '--secret=mypassword', '--read-only=0', - ]) + ], stderr=mock.ANY) def test_modify_zone(self): multisite.modify_zone( @@ -145,7 +145,7 @@ def test_modify_zone(self): '--endpoints=http://localhost:80,https://localhost:443', '--access-key=mykey', '--secret=secret', '--read-only=1', - ]) + ], stderr=mock.ANY) def test_modify_zone_promote_master(self): multisite.modify_zone( @@ -160,7 +160,7 @@ def test_modify_zone_promote_master(self): '--master', '--default', '--read-only=0', - ]) + ], stderr=mock.ANY) def test_modify_zone_partial_credentials(self): multisite.modify_zone( @@ -174,7 +174,7 @@ def test_modify_zone_partial_credentials(self): '--rgw-zone=brundall-east', '--endpoints=http://localhost:80,https://localhost:443', '--read-only=0', - ]) + ], stderr=mock.ANY) def test_list_zones(self): with open(self._testdata(whoami()), 'rb') as f: @@ -234,7 +234,7 @@ def test_pull_realm(self): 'realm', 'pull', '--url=http://master:80', '--access-key=testkey', '--secret=testsecret', - ]) + ], stderr=mock.ANY) def test_pull_period(self): multisite.pull_period(url='http://master:80', @@ -245,4 +245,4 @@ def test_pull_period(self): 'period', 'pull', '--url=http://master:80', '--access-key=testkey', '--secret=testsecret', - ]) + ], stderr=mock.ANY) From 6baf99d23b95db01c3f0aea58949d06418391df6 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 15 Dec 2020 13:34:18 -0500 Subject: [PATCH 2122/2699] More tolerant ceph client relation unit test Made unit tests more tolerant. After recent work on the cinder-ceph replication, a new rbd-mirroring-mode attribute has been added to the create-pool broker requests. Change-Id: I6d28291ea111978b26567836c1608e65391c199c Co-authored-by: Aurelien Lourot --- .../unit_tests/test_ceph_iscsi_charm.py | 62 +++---------------- 1 file changed, 9 insertions(+), 53 deletions(-) diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index d07f17ae..9d2a6415 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -355,61 +355,17 @@ def test_on_ceph_client_relation_joined(self): 'ceph-mon/0', {'ingress-address': '10.0.0.3'}) rel_data = self.harness.get_relation_data(rel_id, 'ceph-iscsi/0') - req_osd_settings = json.loads(rel_data['osd-settings']) + actual_req_osd_settings = json.loads(rel_data['osd-settings']) self.assertEqual( - req_osd_settings, + actual_req_osd_settings, {'osd heartbeat grace': 20, 'osd heartbeat interval': 5}) - req_pool = json.loads(rel_data['broker_req']) - self.assertEqual( - req_pool['ops'], - [{ - 'compression-algorithm': None, - 'compression-max-blob-size': None, - 'compression-max-blob-size-hdd': None, - 'compression-max-blob-size-ssd': None, - 'compression-min-blob-size': None, - 'compression-min-blob-size-hdd': None, - 'compression-min-blob-size-ssd': None, - 'compression-mode': None, - 'compression-required-ratio': None, - 'app-name': None, - 'op': 'create-pool', - 'name': 'iscsi-pool', - 'replicas': 3, - 'pg_num': None, - 'weight': None, - 'group': None, - 'group-namespace': None, - 'app-name': None, - 'max-bytes': None, - 'max-objects': None}, - { - 'compression-algorithm': None, - 'compression-max-blob-size': None, - 'compression-max-blob-size-hdd': None, - 'compression-max-blob-size-ssd': None, - 'compression-min-blob-size': None, - 'compression-min-blob-size-hdd': None, - 'compression-min-blob-size-ssd': None, - 'compression-mode': None, - 'compression-required-ratio': None, - 'op': 'create-pool', - 'name': 'ceph-iscsi', - 'replicas': None, - 'pg_num': None, - 'weight': None, - 'group': None, - 'group-namespace': None, - 'app-name': None, - 'max-bytes': None, - 'max-objects': None}, - { - 'op': 'set-key-permissions', - 'permissions': [ - 'osd', 'allow *', - 'mon', 'allow *', - 'mgr', 'allow r'], - 'client': 'ceph-iscsi'}]) + actual_req_pool_ops = json.loads(rel_data['broker_req'])['ops'] + self.assertEqual(actual_req_pool_ops[0]['op'], 'create-pool') + self.assertEqual(actual_req_pool_ops[0]['name'], 'iscsi-pool') + self.assertEqual(actual_req_pool_ops[1]['op'], 'create-pool') + self.assertEqual(actual_req_pool_ops[1]['name'], 'ceph-iscsi') + self.assertEqual(actual_req_pool_ops[2]['op'], 'set-key-permissions') + self.assertEqual(actual_req_pool_ops[2]['client'], 'ceph-iscsi') def test_on_pools_available(self): self.os.path.exists.return_value = False From 014fa3fef3c2761d4a175833c6243ee9663c7c98 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 15 Dec 2020 13:34:18 -0500 Subject: [PATCH 2123/2699] Fix VMware links in README again Change-Id: I4aed8f5c25ad98312cb5701c774ad50cf4af7929 --- ceph-iscsi/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index 9a6f4610..66136595 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -99,8 +99,8 @@ and is available from any ceph-iscsi unit: ## VMware integration Ceph can be used to back iSCSI targets for VMware initiators. This is -documented under [Ceph iSCSI][cdg-ceph-iscsi] in the [OpenStack Charms -Deployment Guide][cdg]. +documented under [VMware integration][ceph-docs-vmware-integration] in the +[Charmed Ceph documentation][ceph-docs]. # Bugs @@ -114,8 +114,8 @@ For general charm questions refer to the [OpenStack Charm Guide][cg]. [ceph-osd-charm]: https://jaas.ai/ceph-osd [cg]: https://docs.openstack.org/charm-guide [cdg]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide -[cg-preview-charms]: https://docs.openstack.org/charm-guide/latest/openstack-charms.html#tech-preview-charms-beta -[cdg-ceph-iscsi]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-ceph-iscsi.html +[ceph-docs-vmware-integration]: https://ubuntu.com/ceph/docs/integration-vmware +[ceph-docs]: https://ubuntu.com/ceph/docs [juju-docs-actions]: https://jaas.ai/docs/actions [ceph-iscsi-upstream]: https://docs.ceph.com/docs/master/rbd/iscsi-overview/ [lp-bugs-charm-ceph-iscsi]: https://bugs.launchpad.net/charm-ceph-iscsi/+filebug From c2d95b97f361d48ed808e5840897bd3c6ed5e416 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 5 Nov 2020 11:46:28 +0100 Subject: [PATCH 2124/2699] Stop leaking secrets in the logs Change-Id: I9cb75ddaa8334a09727610889ca32a9a415481fd --- ceph-iscsi/README.md | 2 +- ceph-iscsi/src/gwcli_client.py | 18 ++++++++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index 9a6f4610..9bf5672f 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -38,7 +38,7 @@ Then add a relation to the ceph-mon application: **Notes**: -* Deploying four ceph-iscsi units is theoretical possible but it is not an +* Deploying four ceph-iscsi units is theoretically possible but it is not an officially supported configuration. * The ceph-iscsi application cannot be containerised. diff --git a/ceph-iscsi/src/gwcli_client.py b/ceph-iscsi/src/gwcli_client.py index d22cfb16..6dcae253 100644 --- a/ceph-iscsi/src/gwcli_client.py +++ b/ceph-iscsi/src/gwcli_client.py @@ -8,9 +8,23 @@ class GatewayClient(): def run(self, path, cmd): _cmd = ['gwcli', path] + # NOTE(lourot): we don't print the full command here as it might + # contain secrets. + logging.info(' '.join(_cmd) + ' ...') _cmd.extend(cmd.split()) - logging.info(_cmd) - subprocess.check_call(_cmd) + + error_msg = None + try: + subprocess.check_output(_cmd, stderr=subprocess.PIPE) + except subprocess.CalledProcessError as e: + error_msg = 'gwcli failed with {}'.format(e.returncode) + logging.error(error_msg) + logging.error('stdout: {}'.format(e.stdout)) + logging.error('stderr: {}'.format(e.stderr)) + + if error_msg: + # NOTE(lourot): we re-raise another free-of-secrets exception: + raise RuntimeError(error_msg) def create_target(self, iqn): self.run( From ac159892f382add8c35459e791306d1507383206 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 6 Nov 2020 14:54:15 +0000 Subject: [PATCH 2125/2699] Permit Four Gateways Allow clusters of 2 or 4 units. Change-Id: I8850f0998f360c36c7af3e5e430129cd8063309c func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/456 --- ceph-iscsi/src/charm.py | 4 +--- ceph-iscsi/tests/bundles/focal.yaml | 6 +++++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index d7b961b1..84a4fa9c 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -147,9 +147,7 @@ class CephISCSIGatewayCharmBase( DEFAULT_TARGET = "iqn.2003-01.com.ubuntu.iscsi-gw:iscsi-igw" REQUIRED_RELATIONS = ['ceph-client', 'cluster'] - # Two has been tested but four is probably fine too but needs - # validating - ALLOWED_UNIT_COUNTS = [2] + ALLOWED_UNIT_COUNTS = [2, 4] CEPH_CONFIG_PATH = Path('/etc/ceph') CEPH_ISCSI_CONFIG_PATH = CEPH_CONFIG_PATH / 'iscsi' diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index a980504b..2ee6e3cd 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -20,6 +20,8 @@ machines: '13': '14': '15': + '16': + '17': applications: ubuntu: charm: cs:ubuntu @@ -30,12 +32,14 @@ applications: - '15' ceph-iscsi: charm: ../../ceph-iscsi.charm - num_units: 2 + num_units: 4 options: gateway-metadata-pool: iscsi-foo-metadata to: - '0' - '1' + - '16' + - '17' ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 6 From 3077b4cf0f34ff9b509fe658f3185b5ea26cbbac Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Tue, 12 Jan 2021 10:54:49 +0100 Subject: [PATCH 2126/2699] Fix ambiguous relation cinder-ceph/ceph-mon could now refer to: - ceph/client - ceph-replication-device/client - juju-info/juju-info Change-Id: Id39640c0084992284a976c3cb02233e3ae6e20fb --- ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e-lxd.yaml | 4 ++-- ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml | 4 ++-- ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml | 4 ++-- ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml | 4 ++-- ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml | 4 ++-- ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml | 4 ++-- ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml | 4 ++-- ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml | 4 ++-- ceph-rbd-mirror/src/tests/bundles/focal-ussuri.yaml | 4 ++-- ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml | 4 ++-- ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml | 4 ++-- ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml | 4 ++-- ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml | 4 ++-- 13 files changed, 26 insertions(+), 26 deletions(-) diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e-lxd.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e-lxd.yaml index eb0fb912..40b8daba 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e-lxd.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e-lxd.yaml @@ -165,8 +165,8 @@ relations: - cinder - - cinder - cinder-ceph -- - cinder-ceph - - ceph-mon +- - cinder-ceph:ceph + - ceph-mon:client - - ceph-mon:osd - ceph-osd:mon - - ceph-mon diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml index eb0fb912..40b8daba 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml @@ -165,8 +165,8 @@ relations: - cinder - - cinder - cinder-ceph -- - cinder-ceph - - ceph-mon +- - cinder-ceph:ceph + - ceph-mon:client - - ceph-mon:osd - ceph-osd:mon - - ceph-mon diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml index c3ea69f2..b9ffc25b 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml @@ -87,8 +87,8 @@ relations: - glance - - cinder - cinder-ceph -- - cinder-ceph - - ceph-mon +- - cinder-ceph:ceph + - ceph-mon:client - - nova-compute:ceph-access - cinder-ceph:ceph-access - - nova-compute:amqp diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml index de113f1d..ab033ec5 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml @@ -70,8 +70,8 @@ relations: - glance - - cinder - cinder-ceph -- - cinder-ceph - - ceph-mon +- - cinder-ceph:ceph + - ceph-mon:client - - nova-compute:ceph-access - cinder-ceph:ceph-access - - nova-compute:amqp diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml index fccde4dd..2c6ba1d2 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml @@ -90,8 +90,8 @@ relations: - glance - - cinder - cinder-ceph -- - cinder-ceph - - ceph-mon +- - cinder-ceph:ceph + - ceph-mon:client - - nova-compute:ceph-access - cinder-ceph:ceph-access - - nova-compute:amqp diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml index 46b2d14d..8ededfd5 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml @@ -90,8 +90,8 @@ relations: - glance - - cinder - cinder-ceph -- - cinder-ceph - - ceph-mon +- - cinder-ceph:ceph + - ceph-mon:client - - nova-compute:ceph-access - cinder-ceph:ceph-access - - nova-compute:amqp diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml index 834975e3..7d3b949f 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml @@ -92,8 +92,8 @@ relations: - glance - - cinder - cinder-ceph -- - cinder-ceph - - ceph-mon +- - cinder-ceph:ceph + - ceph-mon:client - - nova-compute:ceph-access - cinder-ceph:ceph-access - - nova-compute:amqp diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml index 11956c5c..a62d70fd 100644 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml @@ -92,8 +92,8 @@ relations: - glance - - cinder - cinder-ceph -- - cinder-ceph - - ceph-mon +- - cinder-ceph:ceph + - ceph-mon:client - - nova-compute:ceph-access - cinder-ceph:ceph-access - - nova-compute:amqp diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-ussuri.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-ussuri.yaml index c69ed249..8294b095 100644 --- a/ceph-rbd-mirror/src/tests/bundles/focal-ussuri.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/focal-ussuri.yaml @@ -194,8 +194,8 @@ relations: - - 'cinder' - 'cinder-ceph' - - - 'cinder-ceph' - - 'ceph-mon' + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' - - 'nova-compute:ceph-access' - 'cinder-ceph:ceph-access' diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml index 8dd138a0..77a6e1fa 100644 --- a/ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml @@ -139,8 +139,8 @@ relations: - - cinder - cinder-ceph -- - cinder-ceph - - ceph-mon +- - cinder-ceph:ceph + - ceph-mon:client - - nova-compute:ceph-access - cinder-ceph:ceph-access diff --git a/ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml b/ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml index 3b72edcd..f0eb067a 100644 --- a/ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml @@ -138,8 +138,8 @@ relations: - - cinder - cinder-ceph -- - cinder-ceph - - ceph-mon +- - cinder-ceph:ceph + - ceph-mon:client - - nova-compute:ceph-access - cinder-ceph:ceph-access diff --git a/ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml b/ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml index fbe05e87..71c45d1c 100644 --- a/ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml @@ -90,8 +90,8 @@ relations: - glance - - cinder - cinder-ceph -- - cinder-ceph - - ceph-mon +- - cinder-ceph:ceph + - ceph-mon:client - - nova-compute:ceph-access - cinder-ceph:ceph-access - - nova-compute:amqp diff --git a/ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml b/ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml index 1fd738ee..47a34acb 100644 --- a/ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml @@ -90,8 +90,8 @@ relations: - glance - - cinder - cinder-ceph -- - cinder-ceph - - ceph-mon +- - cinder-ceph:ceph + - ceph-mon:client - - nova-compute:ceph-access - cinder-ceph:ceph-access - - nova-compute:amqp From 5fda1f3429e23156e01ecc6dbd813da2c4114594 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 13 Oct 2020 13:49:34 +0000 Subject: [PATCH 2127/2699] Process requests for radosgw per unit keys Process requests for radosgw per unit keys. Ideally the charm would move to using charms.ceph.utils for the get_radosgw_key method but this does not work atm. I have raised bug #1899643 to cover deduplicating hooks.ceph v charms.ceph.* Change-Id: I00f5a58f127baa1f7878b0ddb31b4fa009d9651e Closes-Bug: #1899634 --- ceph-proxy/hooks/ceph.py | 4 ++-- ceph-proxy/hooks/ceph_hooks.py | 10 +++++++++- ceph-proxy/tests/tests.yaml | 6 ++++++ 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 8ce22a85..d6213a70 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -347,8 +347,8 @@ def import_radosgw_key(key): } -def get_radosgw_key(): - return get_named_key('radosgw.gateway', _radosgw_caps) +def get_radosgw_key(name='radosgw.gateway'): + return get_named_key(name, _radosgw_caps) _default_caps = collections.OrderedDict([ diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 0c16d399..0c72f294 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -176,10 +176,18 @@ def radosgw_relation(relid=None, unit=None): ceph_addrs = config('monitor-hosts') data = { 'fsid': config('fsid'), - 'radosgw_key': ceph.get_radosgw_key(), 'auth': config('auth-supported'), 'ceph-public-address': ceph_addrs, } + key_name = relation_get('key_name', unit=unit, rid=relid) + if key_name: + # New style, per unit keys + data['{}_key'.format(key_name)] = ( + ceph.get_radosgw_key(name=key_name) + ) + else: + # Old style global radosgw key + data['radosgw_key'] = ceph.get_radosgw_key() settings = relation_get(rid=relid, unit=unit) or {} """Process broker request(s).""" diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 03536b99..a510c89e 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -54,6 +54,12 @@ target_deploy_status: nova-compute: workload-status: waiting workload-status-message: "Incomplete relations: storage-backend" + cinder-ceph: + workload-status: waiting + workload-status-message: "Ceph broker request incomplete" + glance: + workload-status: waiting + workload-status-message: "Incomplete relations: storage-backend" tests_options: force_deploy: From d435d025bbb8b9f8c1782fff7650d610460e5b23 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Tue, 12 Jan 2021 13:51:00 +0100 Subject: [PATCH 2128/2699] Remove Disco and Eoan bits Change-Id: I6cff076fd856020a4a438c9729fa7ac4e7a89a0f --- ceph-fs/src/metadata.yaml | 1 - ceph-fs/src/tests/bundles/disco-stein.yaml | 112 ------------------ ceph-fs/src/tests/bundles/eoan-train.yaml | 127 --------------------- 3 files changed, 240 deletions(-) delete mode 100644 ceph-fs/src/tests/bundles/disco-stein.yaml delete mode 100644 ceph-fs/src/tests/bundles/eoan-train.yaml diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index c636e3ea..fc444e36 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -12,7 +12,6 @@ tags: series: - xenial - bionic -- eoan - focal - groovy subordinate: false diff --git a/ceph-fs/src/tests/bundles/disco-stein.yaml b/ceph-fs/src/tests/bundles/disco-stein.yaml deleted file mode 100644 index 97974817..00000000 --- a/ceph-fs/src/tests/bundles/disco-stein.yaml +++ /dev/null @@ -1,112 +0,0 @@ -series: disco -applications: - ceph-fs: - charm: ceph-fs - series: disco - num_units: 1 - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - flat-network-providers: physnet1 - neutron-security-groups: true - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex -relations: -- - ceph-mon:mds - - ceph-fs:ceph-mds -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - 'neutron-api:shared-db' - - 'percona-cluster:shared-db' -- - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' -- - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' -- - 'neutron-api:identity-service' - - 'keystone:identity-service' -- - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' -- - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' -- - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/eoan-train.yaml b/ceph-fs/src/tests/bundles/eoan-train.yaml deleted file mode 100644 index 0062b631..00000000 --- a/ceph-fs/src/tests/bundles/eoan-train.yaml +++ /dev/null @@ -1,127 +0,0 @@ -series: eoan -applications: - ceph-fs: - charm: ceph-fs - series: eoan - num_units: 1 - options: - source: cloud:eoan-train/proposed - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: cloud:eoan-train/proposed - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: cloud:eoan-train/proposed - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:eoan-train/proposed - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:eoan-train/proposed - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:eoan-train/proposed - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:eoan-train/proposed - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:eoan-train/proposed - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:eoan-train/proposed - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:eoan-train/proposed - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: cloud:eoan-train -relations: -- - ceph-mon:mds - - ceph-fs:ceph-mds -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - placement - - percona-cluster -- - placement - - keystone -- - placement - - nova-cloud-controller -- - 'neutron-api:shared-db' - - 'percona-cluster:shared-db' -- - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' -- - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' -- - 'neutron-api:identity-service' - - 'keystone:identity-service' -- - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' -- - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' -- - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' From ed4cc6398b8dc4c6a0781137dac885d8f7e2c388 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Tue, 12 Jan 2021 13:51:18 +0100 Subject: [PATCH 2129/2699] Remove Disco and Eoan bits Change-Id: I41e5210d29029adb1993d507eef8b085b08e9741 --- ceph-mon/metadata.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 254ee4e6..8a9343c9 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -12,7 +12,6 @@ tags: series: - xenial - bionic -- eoan - focal - trusty - groovy From efbbdb70709cb8bd721b48403dbb3ae51b77a3a3 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Tue, 12 Jan 2021 13:51:55 +0100 Subject: [PATCH 2130/2699] Remove Disco and Eoan bits Change-Id: Id6642e0d3773d318c20b74b229786921f1b00dbf --- ceph-osd/metadata.yaml | 1 - ceph-osd/tests/bundles/eoan-train.yaml | 116 ------------------------- 2 files changed, 117 deletions(-) delete mode 100644 ceph-osd/tests/bundles/eoan-train.yaml diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 4f385ca5..76e841fe 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -13,7 +13,6 @@ tags: series: - xenial - bionic -- eoan - focal - trusty - groovy diff --git a/ceph-osd/tests/bundles/eoan-train.yaml b/ceph-osd/tests/bundles/eoan-train.yaml deleted file mode 100644 index d728b0b2..00000000 --- a/ceph-osd/tests/bundles/eoan-train.yaml +++ /dev/null @@ -1,116 +0,0 @@ -series: eoan -applications: - ceph-osd: - charm: ../../../ceph-osd - num_units: 3 - series: eoan - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: cloud:eoan-train - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: cloud:eoan-train - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:eoan-train - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:eoan-train - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:eoan-train - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:eoan-train - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:eoan-train - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:eoan-train - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:eoan-train - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: cloud:eoan-train -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - placement - - percona-cluster -- - placement - - keystone -- - placement - - nova-cloud-controller -- - cinder-ceph:ceph-access - - nova-compute:ceph-access From 0814380ff3de2b29fe6fa223b1996591e16e203b Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Tue, 12 Jan 2021 13:52:14 +0100 Subject: [PATCH 2131/2699] Remove Disco and Eoan bits Change-Id: I836636ab55d53429862d344d79c7ff5617313adb --- ceph-proxy/metadata.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index eafcca7a..a614e64d 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -12,7 +12,6 @@ tags: series: - xenial - bionic -- eoan - focal - trusty - groovy From 8ca07f576c0ac67aacfcba0f3f85d8c8d44b6f67 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Tue, 12 Jan 2021 13:52:32 +0100 Subject: [PATCH 2132/2699] Remove Disco and Eoan bits Change-Id: I2b9b621dc023150d171a2e911ccff1a02d459229 --- ceph-radosgw/metadata.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 3bf23825..a106435b 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -15,7 +15,6 @@ tags: series: - xenial - bionic -- eoan - focal - trusty - groovy From a5cd57037a86b28804c0b3989bdba2ecfaa51ec9 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 12 Jan 2021 15:24:12 +0000 Subject: [PATCH 2133/2699] Updates for testing period for 20.01 release * charm-helpers sync for classic charms * rebuild for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure master branch for charms.openstack - ensure master branch for charm-helpers Change-Id: Ieb501893e211e442398a03b338072705a9d8b51a --- .../charmhelpers/contrib/charmsupport/nrpe.py | 16 ++- .../charmhelpers/contrib/hahelpers/apache.py | 6 +- .../contrib/hardening/audits/apache.py | 5 + .../contrib/openstack/cert_utils.py | 113 ++++++++++++++---- .../charmhelpers/contrib/openstack/context.py | 44 ++++++- .../charmhelpers/contrib/openstack/utils.py | 30 +++++ ceph-mon/hooks/charmhelpers/core/host.py | 3 +- ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 2 +- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 47 +++++++- ceph-mon/test-requirements.txt | 2 + 10 files changed, 238 insertions(+), 30 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 14b80d96..c87cf489 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -139,10 +139,11 @@ class Check(object): """{description} check_command check_nrpe!{command} servicegroups {nagios_servicegroup} +{service_config_overrides} }} """) - def __init__(self, shortname, description, check_cmd): + def __init__(self, shortname, description, check_cmd, max_check_attempts=None): super(Check, self).__init__() # XXX: could be better to calculate this from the service name if not re.match(self.shortname_re, shortname): @@ -155,6 +156,7 @@ def __init__(self, shortname, description, check_cmd): # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= self.description = description self.check_cmd = self._locate_cmd(check_cmd) + self.max_check_attempts = max_check_attempts def _get_check_filename(self): return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) @@ -216,12 +218,19 @@ def write_service_config(self, nagios_context, hostname, nagios_servicegroups): self._remove_service_files() + if self.max_check_attempts: + service_config_overrides = ' max_check_attempts {}'.format( + self.max_check_attempts + ) # Note indentation is here rather than in the template to avoid trailing spaces + else: + service_config_overrides = '' # empty string to avoid printing 'None' templ_vars = { 'nagios_hostname': hostname, 'nagios_servicegroup': nagios_servicegroups, 'description': self.description, 'shortname': self.shortname, 'command': self.command, + 'service_config_overrides': service_config_overrides, } nrpe_service_text = Check.service_template.format(**templ_vars) nrpe_service_file = self._get_service_filename(hostname) @@ -327,6 +336,11 @@ def write(self): nrpe_monitors[nrpecheck.shortname] = { "command": nrpecheck.command, } + # If we were passed max_check_attempts, add that to the relation data + try: + nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts + except AttributeError: + pass # update-status hooks are configured to firing every 5 minutes by # default. When nagios-nrpe-server is restarted, the nagios server diff --git a/ceph-mon/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-mon/hooks/charmhelpers/contrib/hahelpers/apache.py index 2c1e371e..a54702bc 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -34,6 +34,10 @@ INFO, ) +# This file contains the CA cert from the charms ssl_ca configuration +# option, in future the file name should be updated reflect that. +CONFIG_CA_CERT_FILE = 'keystone_juju_ca_cert' + def get_cert(cn=None): # TODO: deal with multiple https endpoints via charm config @@ -83,4 +87,4 @@ def retrieve_ca_cert(cert_file): def install_ca_cert(ca_cert): - host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert') + host.install_ca_cert(ca_cert, CONFIG_CA_CERT_FILE) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py index 04825f5a..c1537625 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -98,3 +98,8 @@ def _disable_module(module): def _restart_apache(): """Restarts the apache process""" subprocess.check_output(['service', 'apache2', 'restart']) + + @staticmethod + def is_ssl_enabled(): + """Check if SSL module is enabled or not""" + return 'ssl' in DisabledModuleAudit._get_loaded_modules() diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py index 1eb21542..fc36d0f1 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -16,6 +16,7 @@ import os import json +from base64 import b64decode from charmhelpers.contrib.network.ip import ( get_hostname, @@ -28,10 +29,12 @@ related_units, relation_get, relation_ids, + remote_service_name, unit_get, NoNetworkBinding, log, WARNING, + INFO, ) from charmhelpers.contrib.openstack.ip import ( resolve_address, @@ -44,12 +47,14 @@ ) from charmhelpers.core.host import ( + CA_CERT_DIR, + install_ca_cert, mkdir, write_file, ) from charmhelpers.contrib.hahelpers.apache import ( - install_ca_cert + CONFIG_CA_CERT_FILE, ) @@ -129,38 +134,46 @@ def get_certificate_request(json_encode=True, bindings=None): """ if bindings: # Add default API bindings to bindings list - bindings = set(bindings + get_default_api_bindings()) + bindings = list(bindings + get_default_api_bindings()) else: # Use default API bindings bindings = get_default_api_bindings() req = CertRequest(json_encode=json_encode) req.add_hostname_cn() # Add os-hostname entries - _sans = get_certificate_sans() + _sans = get_certificate_sans(bindings=bindings) # Handle specific hostnames per binding for binding in bindings: - hostname_override = config(ADDRESS_MAP[binding]['override']) try: - net_addr = resolve_address(endpoint_type=binding) - ip = network_get_primary_address( - ADDRESS_MAP[binding]['binding']) + hostname_override = config(ADDRESS_MAP[binding]['override']) + except KeyError: + hostname_override = None + try: + try: + net_addr = resolve_address(endpoint_type=binding) + except KeyError: + net_addr = None + ip = network_get_primary_address(binding) addresses = [net_addr, ip] vip = get_vip_in_network(resolve_network_cidr(ip)) if vip: addresses.append(vip) + + # Clear any Nones or duplicates + addresses = list(set([i for i in addresses if i])) # Add hostname certificate request if hostname_override: req.add_entry( binding, hostname_override, addresses) - # Remove hostname specific addresses from _sans - for addr in addresses: - try: - _sans.remove(addr) - except (ValueError, KeyError): - pass + # Remove hostname specific addresses from _sans + for addr in addresses: + try: + _sans.remove(addr) + except (ValueError, KeyError): + pass except NoNetworkBinding: log("Skipping request for certificate for ip in {} space, no " @@ -174,11 +187,17 @@ def get_certificate_request(json_encode=True, bindings=None): def get_certificate_sans(bindings=None): """Get all possible IP addresses for certificate SANs. + + :param bindings: List of bindings to check in addition to default api + bindings. + :type bindings: list of strings + :returns: List of binding string names + :rtype: List[str] """ _sans = [unit_get('private-address')] if bindings: # Add default API bindings to bindings list - bindings = set(bindings + get_default_api_bindings()) + bindings = list(bindings + get_default_api_bindings()) else: # Use default API bindings bindings = get_default_api_bindings() @@ -192,25 +211,39 @@ def get_certificate_sans(bindings=None): net_config = None # Using resolve_address is likely redundant. Keeping it here in # case there is an edge case it handles. - net_addr = resolve_address(endpoint_type=binding) + try: + net_addr = resolve_address(endpoint_type=binding) + except KeyError: + net_addr = None ip = get_relation_ip(binding, cidr_network=net_config) _sans = _sans + [net_addr, ip] vip = get_vip_in_network(resolve_network_cidr(ip)) if vip: _sans.append(vip) - return set(_sans) + # Clear any Nones and duplicates + return list(set([i for i in _sans if i])) -def create_ip_cert_links(ssl_dir, custom_hostname_link=None): +def create_ip_cert_links(ssl_dir, custom_hostname_link=None, bindings=None): """Create symlinks for SAN records :param ssl_dir: str Directory to create symlinks in :param custom_hostname_link: str Additional link to be created + :param bindings: List of bindings to check in addition to default api + bindings. + :type bindings: list of strings """ + if bindings: + # Add default API bindings to bindings list + bindings = list(bindings + get_default_api_bindings()) + else: + # Use default API bindings + bindings = get_default_api_bindings() + # This includes the hostname cert and any specific bindng certs: # admin, internal, public - req = get_certificate_request(json_encode=False)["cert_requests"] + req = get_certificate_request(json_encode=False, bindings=bindings)["cert_requests"] # Specific certs for cert_req in req.keys(): requested_cert = os.path.join( @@ -274,8 +307,35 @@ def install_certs(ssl_dir, certs, chain=None, user='root', group='root'): content=bundle['key'], perms=0o640) +def _manage_ca_certs(ca, cert_relation_id): + """Manage CA certs. + + :param ca: CA Certificate from certificate relation. + :type ca: str + :param cert_relation_id: Relation id providing the certs + :type cert_relation_id: str + """ + config_ssl_ca = config('ssl_ca') + config_cert_file = '{}/{}.crt'.format(CA_CERT_DIR, CONFIG_CA_CERT_FILE) + if config_ssl_ca: + log("Installing CA certificate from charm ssl_ca config to {}".format( + config_cert_file), INFO) + install_ca_cert( + b64decode(config_ssl_ca).rstrip(), + name=CONFIG_CA_CERT_FILE) + elif os.path.exists(config_cert_file): + log("Removing CA certificate {}".format(config_cert_file), INFO) + os.remove(config_cert_file) + log("Installing CA certificate from certificate relation", INFO) + install_ca_cert( + ca.encode(), + name='{}_juju_ca_cert'.format( + remote_service_name(relid=cert_relation_id))) + + def process_certificates(service_name, relation_id, unit, - custom_hostname_link=None, user='root', group='root'): + custom_hostname_link=None, user='root', group='root', + bindings=None): """Process the certificates supplied down the relation :param service_name: str Name of service the certifcates are for. @@ -286,9 +346,19 @@ def process_certificates(service_name, relation_id, unit, :type user: str :param group: (Optional) Group of certificate files. Defaults to 'root' :type group: str + :param bindings: List of bindings to check in addition to default api + bindings. + :type bindings: list of strings :returns: True if certificates processed for local unit or False :rtype: bool """ + if bindings: + # Add default API bindings to bindings list + bindings = list(bindings + get_default_api_bindings()) + else: + # Use default API bindings + bindings = get_default_api_bindings() + data = relation_get(rid=relation_id, unit=unit) ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) mkdir(path=ssl_dir) @@ -298,11 +368,12 @@ def process_certificates(service_name, relation_id, unit, ca = data.get('ca') if certs: certs = json.loads(certs) - install_ca_cert(ca.encode()) + _manage_ca_certs(ca, relation_id) install_certs(ssl_dir, certs, chain, user=user, group=group) create_ip_cert_links( ssl_dir, - custom_hostname_link=custom_hostname_link) + custom_hostname_link=custom_hostname_link, + bindings=bindings) return True return False diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 54aed7ff..6255dac0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -1534,8 +1534,23 @@ def __call__(self): ctxt[k][section] = config_list else: ctxt[k] = v - log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) - return ctxt + if self.context_complete(ctxt): + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) + return ctxt + else: + return {} + + def context_complete(self, ctxt): + """Overridden here to ensure the context is actually complete. + + :param ctxt: The current context members + :type ctxt: Dict[str, ANY] + :returns: True if the context is complete + :rtype: bool + """ + if not ctxt.get('sections'): + return False + return super(SubordinateConfigContext, self).context_complete(ctxt) class LogLevelContext(OSContextGenerator): @@ -3050,6 +3065,9 @@ class sriov_config_mode(enum.Enum): blanket = 'blanket' explicit = 'explicit' + PCIDeviceNumVFs = collections.namedtuple( + 'PCIDeviceNumVFs', ['device', 'numvfs']) + def _determine_numvfs(self, device, sriov_numvfs): """Determine number of Virtual Functions (VFs) configured for device. @@ -3165,14 +3183,15 @@ def __init__(self, numvfs_key=None, device_mappings_key=None): 'configuration.') self._map = { - device.interface_name: self._determine_numvfs(device, sriov_numvfs) + device.pci_address: self.PCIDeviceNumVFs( + device, self._determine_numvfs(device, sriov_numvfs)) for device in devices.pci_devices if device.sriov and self._determine_numvfs(device, sriov_numvfs) is not None } def __call__(self): - """Provide SR-IOV context. + """Provide backward compatible SR-IOV context. :returns: Map interface name: min(configured, max) virtual functions. Example: @@ -3183,6 +3202,23 @@ def __call__(self): } :rtype: Dict[str,int] """ + return { + pcidnvfs.device.interface_name: pcidnvfs.numvfs + for _, pcidnvfs in self._map.items() + } + + @property + def get_map(self): + """Provide map of configured SR-IOV capable PCI devices. + + :returns: Map PCI-address: (PCIDevice, min(configured, max) VFs. + Example: + { + '0000:81:00.0': self.PCIDeviceNumVFs(, 32), + '0000:81:00.1': self.PCIDeviceNumVFs(, 32), + } + :rtype: Dict[str, self.PCIDeviceNumVFs] + """ return self._map diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index f4c76214..f27aa6c9 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -90,13 +90,16 @@ service_start, restart_on_change_helper, ) + from charmhelpers.fetch import ( apt_cache, + apt_install, import_key as fetch_import_key, add_source as fetch_add_source, SourceConfigError, GPGKeyError, get_upstream_version, + filter_installed_packages, filter_missing_packages, ubuntu_apt_pkg as apt, ) @@ -480,9 +483,14 @@ def get_swift_codename(version): return None +@deprecate("moved to charmhelpers.contrib.openstack.utils.get_installed_os_version()", "2021-01", log=juju_log) def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' + codename = get_installed_os_version() + if codename: + return codename + if snap_install_requested(): cmd = ['snap', 'list', package] try: @@ -570,6 +578,28 @@ def get_os_version_package(pkg, fatal=True): # error_out(e) +def get_installed_os_version(): + apt_install(filter_installed_packages(['openstack-release']), fatal=False) + print("OpenStack Release: {}".format(openstack_release())) + return openstack_release().get('OPENSTACK_CODENAME') + + +@cached +def openstack_release(): + """Return /etc/os-release in a dict.""" + d = {} + try: + with open('/etc/openstack-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + except FileNotFoundError: + pass + return d + + # Module local cache variable for the os_release. _os_rel = None diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 697a5f4b..f826f6fe 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -60,6 +60,7 @@ ) # flake8: noqa -- ignore F401 for this import UPDATEDB_PATH = '/etc/updatedb.conf' +CA_CERT_DIR = '/usr/local/share/ca-certificates' def service_start(service_name, **kwargs): @@ -1082,7 +1083,7 @@ def install_ca_cert(ca_cert, name=None): ca_cert = ca_cert.encode('utf8') if not name: name = 'juju-{}'.format(charm_name()) - cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) + cert_file = '{}/{}.crt'.format(CA_CERT_DIR, name) new_hash = hashlib.md5(ca_cert).hexdigest() if file_hash(cert_file) == new_hash: return diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 33152840..b5953019 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -646,7 +646,7 @@ def _add_apt_repository(spec): # passed as environment variables (See lp:1433761). This is not the case # LTS and non-LTS releases below bionic. _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https'])) + cmd_env=env_proxy_settings(['https', 'http'])) def _add_cloud_pocket(pocket): diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index 929a75d7..a2fbe0e5 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -129,7 +129,7 @@ def _dpkg_list(self, packages): else: data = line.split(None, 4) status = data.pop(0) - if status != 'ii': + if status not in ('ii', 'hi'): continue pkg = {} pkg.update({k.lower(): v for k, v in zip(headings, data)}) @@ -265,3 +265,48 @@ def version_compare(a, b): raise RuntimeError('Unable to compare "{}" and "{}", according to ' 'our logic they are neither greater, equal nor ' 'less than each other.') + + +class PkgVersion(): + """Allow package versions to be compared. + + For example:: + + >>> import charmhelpers.fetch as fetch + >>> (fetch.apt_pkg.PkgVersion('2:20.4.0') < + ... fetch.apt_pkg.PkgVersion('2:20.5.0')) + True + >>> pkgs = [fetch.apt_pkg.PkgVersion('2:20.4.0'), + ... fetch.apt_pkg.PkgVersion('2:21.4.0'), + ... fetch.apt_pkg.PkgVersion('2:17.4.0')] + >>> pkgs.sort() + >>> pkgs + [2:17.4.0, 2:20.4.0, 2:21.4.0] + """ + + def __init__(self, version): + self.version = version + + def __lt__(self, other): + return version_compare(self.version, other.version) == -1 + + def __le__(self, other): + return self.__lt__(other) or self.__eq__(other) + + def __gt__(self, other): + return version_compare(self.version, other.version) == 1 + + def __ge__(self, other): + return self.__gt__(other) or self.__eq__(other) + + def __eq__(self, other): + return version_compare(self.version, other.version) == 0 + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return self.version + + def __hash__(self): + return hash(repr(self)) diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 1aa96356..9aea716b 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -37,6 +37,8 @@ importlib-resources<3.0.0; python_version < '3.6' # dropped support for python 3.5: osprofiler<2.7.0;python_version<'3.6' stevedore<1.31.0;python_version<'3.6' +debtcollector<1.22.0;python_version<'3.6' +oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) From e97f40116cc35cae63746b1c96fa0e239c6b201c Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 12 Jan 2021 15:24:12 +0000 Subject: [PATCH 2134/2699] Updates for testing period for 20.01 release * charm-helpers sync for classic charms * rebuild for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure master branch for charms.openstack - ensure master branch for charm-helpers Change-Id: Iffb92ef062f307bc9b2b27e217431b2fc122acfc --- .../charmhelpers/contrib/charmsupport/nrpe.py | 16 +- .../charmhelpers/contrib/hahelpers/apache.py | 6 +- .../contrib/hardening/audits/apache.py | 5 + .../hooks/charmhelpers/contrib/network/ip.py | 3 +- .../contrib/openstack/cert_utils.py | 207 +++++++++++++++--- .../charmhelpers/contrib/openstack/context.py | 44 +++- .../charmhelpers/contrib/openstack/ip.py | 16 ++ .../charmhelpers/contrib/openstack/utils.py | 51 ++++- .../contrib/storage/linux/ceph.py | 6 + .../hooks/charmhelpers/core/decorators.py | 38 ++++ ceph-osd/hooks/charmhelpers/core/host.py | 14 +- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 2 +- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 47 +++- ceph-osd/test-requirements.txt | 2 + 14 files changed, 405 insertions(+), 52 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 14b80d96..c87cf489 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -139,10 +139,11 @@ class Check(object): """{description} check_command check_nrpe!{command} servicegroups {nagios_servicegroup} +{service_config_overrides} }} """) - def __init__(self, shortname, description, check_cmd): + def __init__(self, shortname, description, check_cmd, max_check_attempts=None): super(Check, self).__init__() # XXX: could be better to calculate this from the service name if not re.match(self.shortname_re, shortname): @@ -155,6 +156,7 @@ def __init__(self, shortname, description, check_cmd): # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= self.description = description self.check_cmd = self._locate_cmd(check_cmd) + self.max_check_attempts = max_check_attempts def _get_check_filename(self): return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) @@ -216,12 +218,19 @@ def write_service_config(self, nagios_context, hostname, nagios_servicegroups): self._remove_service_files() + if self.max_check_attempts: + service_config_overrides = ' max_check_attempts {}'.format( + self.max_check_attempts + ) # Note indentation is here rather than in the template to avoid trailing spaces + else: + service_config_overrides = '' # empty string to avoid printing 'None' templ_vars = { 'nagios_hostname': hostname, 'nagios_servicegroup': nagios_servicegroups, 'description': self.description, 'shortname': self.shortname, 'command': self.command, + 'service_config_overrides': service_config_overrides, } nrpe_service_text = Check.service_template.format(**templ_vars) nrpe_service_file = self._get_service_filename(hostname) @@ -327,6 +336,11 @@ def write(self): nrpe_monitors[nrpecheck.shortname] = { "command": nrpecheck.command, } + # If we were passed max_check_attempts, add that to the relation data + try: + nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts + except AttributeError: + pass # update-status hooks are configured to firing every 5 minutes by # default. When nagios-nrpe-server is restarted, the nagios server diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py index 2c1e371e..a54702bc 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -34,6 +34,10 @@ INFO, ) +# This file contains the CA cert from the charms ssl_ca configuration +# option, in future the file name should be updated reflect that. +CONFIG_CA_CERT_FILE = 'keystone_juju_ca_cert' + def get_cert(cn=None): # TODO: deal with multiple https endpoints via charm config @@ -83,4 +87,4 @@ def retrieve_ca_cert(cert_file): def install_ca_cert(ca_cert): - host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert') + host.install_ca_cert(ca_cert, CONFIG_CA_CERT_FILE) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py index 04825f5a..c1537625 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -98,3 +98,8 @@ def _disable_module(module): def _restart_apache(): """Restarts the apache process""" subprocess.check_output(['service', 'apache2', 'restart']) + + @staticmethod + def is_ssl_enabled(): + """Check if SSL module is enabled or not""" + return 'ssl' in DisabledModuleAudit._get_loaded_modules() diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index b13277bb..63e91cca 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -396,7 +396,8 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, if global_addrs: # Make sure any found global addresses are not temporary cmd = ['ip', 'addr', 'show', iface] - out = subprocess.check_output(cmd).decode('UTF-8') + out = subprocess.check_output( + cmd).decode('UTF-8', errors='replace') if dynamic_only: key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") else: diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py index b494af64..fc36d0f1 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -16,6 +16,7 @@ import os import json +from base64 import b64decode from charmhelpers.contrib.network.ip import ( get_hostname, @@ -28,26 +29,32 @@ related_units, relation_get, relation_ids, + remote_service_name, unit_get, NoNetworkBinding, log, WARNING, + INFO, ) from charmhelpers.contrib.openstack.ip import ( - ADMIN, resolve_address, get_vip_in_network, - INTERNAL, - PUBLIC, - ADDRESS_MAP) + ADDRESS_MAP, + get_default_api_bindings, +) +from charmhelpers.contrib.network.ip import ( + get_relation_ip, +) from charmhelpers.core.host import ( + CA_CERT_DIR, + install_ca_cert, mkdir, write_file, ) from charmhelpers.contrib.hahelpers.apache import ( - install_ca_cert + CONFIG_CA_CERT_FILE, ) @@ -113,44 +120,146 @@ def get_request(self): return req -def get_certificate_request(json_encode=True): - """Generate a certificatee requests based on the network confioguration +def get_certificate_request(json_encode=True, bindings=None): + """Generate a certificate requests based on the network configuration + :param json_encode: Encode request in JSON or not. Used for setting + directly on a relation. + :type json_encode: boolean + :param bindings: List of bindings to check in addition to default api + bindings. + :type bindings: list of strings + :returns: CertRequest request as dictionary or JSON string. + :rtype: Union[dict, json] """ + if bindings: + # Add default API bindings to bindings list + bindings = list(bindings + get_default_api_bindings()) + else: + # Use default API bindings + bindings = get_default_api_bindings() req = CertRequest(json_encode=json_encode) req.add_hostname_cn() # Add os-hostname entries - for net_type in [INTERNAL, ADMIN, PUBLIC]: - net_config = config(ADDRESS_MAP[net_type]['override']) + _sans = get_certificate_sans(bindings=bindings) + + # Handle specific hostnames per binding + for binding in bindings: try: - net_addr = resolve_address(endpoint_type=net_type) - ip = network_get_primary_address( - ADDRESS_MAP[net_type]['binding']) + hostname_override = config(ADDRESS_MAP[binding]['override']) + except KeyError: + hostname_override = None + try: + try: + net_addr = resolve_address(endpoint_type=binding) + except KeyError: + net_addr = None + ip = network_get_primary_address(binding) addresses = [net_addr, ip] vip = get_vip_in_network(resolve_network_cidr(ip)) if vip: addresses.append(vip) - if net_config: + + # Clear any Nones or duplicates + addresses = list(set([i for i in addresses if i])) + # Add hostname certificate request + if hostname_override: req.add_entry( - net_type, - net_config, + binding, + hostname_override, addresses) - else: - # There is network address with no corresponding hostname. - # Add the ip to the hostname cert to allow for this. - req.add_hostname_cn_ip(addresses) + # Remove hostname specific addresses from _sans + for addr in addresses: + try: + _sans.remove(addr) + except (ValueError, KeyError): + pass + except NoNetworkBinding: log("Skipping request for certificate for ip in {} space, no " - "local address found".format(net_type), WARNING) + "local address found".format(binding), WARNING) + # Gurantee all SANs are covered + # These are network addresses with no corresponding hostname. + # Add the ips to the hostname cert to allow for this. + req.add_hostname_cn_ip(_sans) return req.get_request() -def create_ip_cert_links(ssl_dir, custom_hostname_link=None): +def get_certificate_sans(bindings=None): + """Get all possible IP addresses for certificate SANs. + + :param bindings: List of bindings to check in addition to default api + bindings. + :type bindings: list of strings + :returns: List of binding string names + :rtype: List[str] + """ + _sans = [unit_get('private-address')] + if bindings: + # Add default API bindings to bindings list + bindings = list(bindings + get_default_api_bindings()) + else: + # Use default API bindings + bindings = get_default_api_bindings() + + for binding in bindings: + # Check for config override + try: + net_config = config(ADDRESS_MAP[binding]['config']) + except KeyError: + # There is no configuration network for this binding name + net_config = None + # Using resolve_address is likely redundant. Keeping it here in + # case there is an edge case it handles. + try: + net_addr = resolve_address(endpoint_type=binding) + except KeyError: + net_addr = None + ip = get_relation_ip(binding, cidr_network=net_config) + _sans = _sans + [net_addr, ip] + vip = get_vip_in_network(resolve_network_cidr(ip)) + if vip: + _sans.append(vip) + # Clear any Nones and duplicates + return list(set([i for i in _sans if i])) + + +def create_ip_cert_links(ssl_dir, custom_hostname_link=None, bindings=None): """Create symlinks for SAN records :param ssl_dir: str Directory to create symlinks in :param custom_hostname_link: str Additional link to be created + :param bindings: List of bindings to check in addition to default api + bindings. + :type bindings: list of strings """ + + if bindings: + # Add default API bindings to bindings list + bindings = list(bindings + get_default_api_bindings()) + else: + # Use default API bindings + bindings = get_default_api_bindings() + + # This includes the hostname cert and any specific bindng certs: + # admin, internal, public + req = get_certificate_request(json_encode=False, bindings=bindings)["cert_requests"] + # Specific certs + for cert_req in req.keys(): + requested_cert = os.path.join( + ssl_dir, + 'cert_{}'.format(cert_req)) + requested_key = os.path.join( + ssl_dir, + 'key_{}'.format(cert_req)) + for addr in req[cert_req]['sans']: + cert = os.path.join(ssl_dir, 'cert_{}'.format(addr)) + key = os.path.join(ssl_dir, 'key_{}'.format(addr)) + if os.path.isfile(requested_cert) and not os.path.isfile(cert): + os.symlink(requested_cert, cert) + os.symlink(requested_key, key) + + # Handle custom hostnames hostname = get_hostname(unit_get('private-address')) hostname_cert = os.path.join( ssl_dir, @@ -158,18 +267,6 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None): hostname_key = os.path.join( ssl_dir, 'key_{}'.format(hostname)) - # Add links to hostname cert, used if os-hostname vars not set - for net_type in [INTERNAL, ADMIN, PUBLIC]: - try: - addr = resolve_address(endpoint_type=net_type) - cert = os.path.join(ssl_dir, 'cert_{}'.format(addr)) - key = os.path.join(ssl_dir, 'key_{}'.format(addr)) - if os.path.isfile(hostname_cert) and not os.path.isfile(cert): - os.symlink(hostname_cert, cert) - os.symlink(hostname_key, key) - except NoNetworkBinding: - log("Skipping creating cert symlink for ip in {} space, no " - "local address found".format(net_type), WARNING) if custom_hostname_link: custom_cert = os.path.join( ssl_dir, @@ -210,8 +307,35 @@ def install_certs(ssl_dir, certs, chain=None, user='root', group='root'): content=bundle['key'], perms=0o640) +def _manage_ca_certs(ca, cert_relation_id): + """Manage CA certs. + + :param ca: CA Certificate from certificate relation. + :type ca: str + :param cert_relation_id: Relation id providing the certs + :type cert_relation_id: str + """ + config_ssl_ca = config('ssl_ca') + config_cert_file = '{}/{}.crt'.format(CA_CERT_DIR, CONFIG_CA_CERT_FILE) + if config_ssl_ca: + log("Installing CA certificate from charm ssl_ca config to {}".format( + config_cert_file), INFO) + install_ca_cert( + b64decode(config_ssl_ca).rstrip(), + name=CONFIG_CA_CERT_FILE) + elif os.path.exists(config_cert_file): + log("Removing CA certificate {}".format(config_cert_file), INFO) + os.remove(config_cert_file) + log("Installing CA certificate from certificate relation", INFO) + install_ca_cert( + ca.encode(), + name='{}_juju_ca_cert'.format( + remote_service_name(relid=cert_relation_id))) + + def process_certificates(service_name, relation_id, unit, - custom_hostname_link=None, user='root', group='root'): + custom_hostname_link=None, user='root', group='root', + bindings=None): """Process the certificates supplied down the relation :param service_name: str Name of service the certifcates are for. @@ -222,9 +346,19 @@ def process_certificates(service_name, relation_id, unit, :type user: str :param group: (Optional) Group of certificate files. Defaults to 'root' :type group: str + :param bindings: List of bindings to check in addition to default api + bindings. + :type bindings: list of strings :returns: True if certificates processed for local unit or False :rtype: bool """ + if bindings: + # Add default API bindings to bindings list + bindings = list(bindings + get_default_api_bindings()) + else: + # Use default API bindings + bindings = get_default_api_bindings() + data = relation_get(rid=relation_id, unit=unit) ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) mkdir(path=ssl_dir) @@ -234,11 +368,12 @@ def process_certificates(service_name, relation_id, unit, ca = data.get('ca') if certs: certs = json.loads(certs) - install_ca_cert(ca.encode()) + _manage_ca_certs(ca, relation_id) install_certs(ssl_dir, certs, chain, user=user, group=group) create_ip_cert_links( ssl_dir, - custom_hostname_link=custom_hostname_link) + custom_hostname_link=custom_hostname_link, + bindings=bindings) return True return False diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 54aed7ff..6255dac0 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -1534,8 +1534,23 @@ def __call__(self): ctxt[k][section] = config_list else: ctxt[k] = v - log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) - return ctxt + if self.context_complete(ctxt): + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) + return ctxt + else: + return {} + + def context_complete(self, ctxt): + """Overridden here to ensure the context is actually complete. + + :param ctxt: The current context members + :type ctxt: Dict[str, ANY] + :returns: True if the context is complete + :rtype: bool + """ + if not ctxt.get('sections'): + return False + return super(SubordinateConfigContext, self).context_complete(ctxt) class LogLevelContext(OSContextGenerator): @@ -3050,6 +3065,9 @@ class sriov_config_mode(enum.Enum): blanket = 'blanket' explicit = 'explicit' + PCIDeviceNumVFs = collections.namedtuple( + 'PCIDeviceNumVFs', ['device', 'numvfs']) + def _determine_numvfs(self, device, sriov_numvfs): """Determine number of Virtual Functions (VFs) configured for device. @@ -3165,14 +3183,15 @@ def __init__(self, numvfs_key=None, device_mappings_key=None): 'configuration.') self._map = { - device.interface_name: self._determine_numvfs(device, sriov_numvfs) + device.pci_address: self.PCIDeviceNumVFs( + device, self._determine_numvfs(device, sriov_numvfs)) for device in devices.pci_devices if device.sriov and self._determine_numvfs(device, sriov_numvfs) is not None } def __call__(self): - """Provide SR-IOV context. + """Provide backward compatible SR-IOV context. :returns: Map interface name: min(configured, max) virtual functions. Example: @@ -3183,6 +3202,23 @@ def __call__(self): } :rtype: Dict[str,int] """ + return { + pcidnvfs.device.interface_name: pcidnvfs.numvfs + for _, pcidnvfs in self._map.items() + } + + @property + def get_map(self): + """Provide map of configured SR-IOV capable PCI devices. + + :returns: Map PCI-address: (PCIDevice, min(configured, max) VFs. + Example: + { + '0000:81:00.0': self.PCIDeviceNumVFs(, 32), + '0000:81:00.1': self.PCIDeviceNumVFs(, 32), + } + :rtype: Dict[str, self.PCIDeviceNumVFs] + """ return self._map diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py index 723aebc1..89cf276d 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py @@ -33,6 +33,7 @@ ADMIN = 'admin' ACCESS = 'access' +# TODO: reconcile 'int' vs 'internal' binding names ADDRESS_MAP = { PUBLIC: { 'binding': 'public', @@ -58,6 +59,14 @@ 'fallback': 'private-address', 'override': 'os-access-hostname', }, + # Note (thedac) bridge to begin the reconciliation between 'int' vs + # 'internal' binding names + 'internal': { + 'binding': 'internal', + 'config': 'os-internal-network', + 'fallback': 'private-address', + 'override': 'os-internal-hostname', + }, } @@ -195,3 +204,10 @@ def get_vip_in_network(network): if is_address_in_network(network, vip): matching_vip = vip return matching_vip + + +def get_default_api_bindings(): + _default_bindings = [] + for binding in [INTERNAL, ADMIN, PUBLIC]: + _default_bindings.append(ADDRESS_MAP[binding]['binding']) + return _default_bindings diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 23e4adf5..f27aa6c9 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -18,6 +18,7 @@ import subprocess import json +import operator import os import sys import re @@ -33,7 +34,7 @@ from charmhelpers.contrib.network import ip -from charmhelpers.core import unitdata +from charmhelpers.core import decorators, unitdata from charmhelpers.core.hookenv import ( WORKLOAD_STATES, @@ -89,13 +90,16 @@ service_start, restart_on_change_helper, ) + from charmhelpers.fetch import ( apt_cache, + apt_install, import_key as fetch_import_key, add_source as fetch_add_source, SourceConfigError, GPGKeyError, get_upstream_version, + filter_installed_packages, filter_missing_packages, ubuntu_apt_pkg as apt, ) @@ -479,9 +483,14 @@ def get_swift_codename(version): return None +@deprecate("moved to charmhelpers.contrib.openstack.utils.get_installed_os_version()", "2021-01", log=juju_log) def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' + codename = get_installed_os_version() + if codename: + return codename + if snap_install_requested(): cmd = ['snap', 'list', package] try: @@ -569,6 +578,28 @@ def get_os_version_package(pkg, fatal=True): # error_out(e) +def get_installed_os_version(): + apt_install(filter_installed_packages(['openstack-release']), fatal=False) + print("OpenStack Release: {}".format(openstack_release())) + return openstack_release().get('OPENSTACK_CODENAME') + + +@cached +def openstack_release(): + """Return /etc/os-release in a dict.""" + d = {} + try: + with open('/etc/openstack-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + except FileNotFoundError: + pass + return d + + # Module local cache variable for the os_release. _os_rel = None @@ -1295,7 +1326,7 @@ def _check_listening_on_ports_list(ports): Returns a list of ports being listened to and a list of the booleans. - @param ports: LIST or port numbers. + @param ports: LIST of port numbers. @returns [(port_num, boolean), ...], [boolean] """ ports_open = [port_has_listener('0.0.0.0', p) for p in ports] @@ -1564,6 +1595,21 @@ def manage_payload_services(action, services=None, charm_func=None): return success, messages +def make_wait_for_ports_barrier(ports, retry_count=5): + """Make a function to wait for port shutdowns. + + Create a function which closes over the provided ports. The function will + retry probing ports until they are closed or the retry count has been reached. + + """ + @decorators.retry_on_predicate(retry_count, operator.not_, base_delay=0.1) + def retry_port_check(): + _, ports_states = _check_listening_on_ports_list(ports) + juju_log("Probe ports {}, result: {}".format(ports, ports_states), level="DEBUG") + return any(ports_states) + return retry_port_check + + def pause_unit(assess_status_func, services=None, ports=None, charm_func=None): """Pause a unit by stopping the services and setting 'unit-paused' @@ -1599,6 +1645,7 @@ def pause_unit(assess_status_func, services=None, ports=None, services=services, charm_func=charm_func) set_unit_paused() + if assess_status_func: message = assess_status_func() if message: diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 0f69631d..d1c61754 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -268,6 +268,7 @@ class BasePool(object): 'compression-max-blob-size': (int, None), 'compression-max-blob-size-hdd': (int, None), 'compression-max-blob-size-ssd': (int, None), + 'rbd-mirroring-mode': (str, ('image', 'pool')) } def __init__(self, service, name=None, percent_data=None, app_name=None, @@ -1767,6 +1768,7 @@ def _partial_build_common_op_create(self, max_bytes=None, max_objects=None, namespace=None, + rbd_mirroring_mode='pool', weight=None): """Build common part of a create pool operation. @@ -1825,6 +1827,9 @@ def _partial_build_common_op_create(self, :type max_objects: Optional[int] :param namespace: Group namespace :type namespace: Optional[str] + :param rbd_mirroring_mode: Pool mirroring mode used when Ceph RBD + mirroring is enabled. + :type rbd_mirroring_mode: Optional[str] :param weight: The percentage of data that is expected to be contained in the pool from the total available space on the OSDs. Used to calculate number of Placement Groups to create @@ -1849,6 +1854,7 @@ def _partial_build_common_op_create(self, 'max-bytes': max_bytes, 'max-objects': max_objects, 'group-namespace': namespace, + 'rbd-mirroring-mode': rbd_mirroring_mode, 'weight': weight, } diff --git a/ceph-osd/hooks/charmhelpers/core/decorators.py b/ceph-osd/hooks/charmhelpers/core/decorators.py index 6ad41ee4..e7e95d17 100644 --- a/ceph-osd/hooks/charmhelpers/core/decorators.py +++ b/ceph-osd/hooks/charmhelpers/core/decorators.py @@ -53,3 +53,41 @@ def _retry_on_exception_inner_2(*args, **kwargs): return _retry_on_exception_inner_2 return _retry_on_exception_inner_1 + + +def retry_on_predicate(num_retries, predicate_fun, base_delay=0): + """Retry based on return value + + The return value of the decorated function is passed to the given predicate_fun. If the + result of the predicate is False, retry the decorated function up to num_retries times + + An exponential backoff up to base_delay^num_retries seconds can be introduced by setting + base_delay to a nonzero value. The default is to run with a zero (i.e. no) delay + + :param num_retries: Max. number of retries to perform + :type num_retries: int + :param predicate_fun: Predicate function to determine if a retry is necessary + :type predicate_fun: callable + :param base_delay: Starting value in seconds for exponential delay, defaults to 0 (no delay) + :type base_delay: float + """ + def _retry_on_pred_inner_1(f): + def _retry_on_pred_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + delay = base_delay + while True: + result = f(*args, **kwargs) + if predicate_fun(result) or retries <= 0: + return result + delay *= multiplier + multiplier += 1 + log("Result {}, retrying '{}' {} more times (delay={})".format( + result, f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_pred_inner_2 + + return _retry_on_pred_inner_1 diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 87993699..f826f6fe 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -60,6 +60,7 @@ ) # flake8: noqa -- ignore F401 for this import UPDATEDB_PATH = '/etc/updatedb.conf' +CA_CERT_DIR = '/usr/local/share/ca-certificates' def service_start(service_name, **kwargs): @@ -826,7 +827,8 @@ def list_nics(nic_type=None): if nic_type: for int_type in int_types: cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = subprocess.check_output( + cmd).decode('UTF-8', errors='replace') ip_output = ip_output.split('\n') ip_output = (line for line in ip_output if line) for line in ip_output: @@ -842,7 +844,8 @@ def list_nics(nic_type=None): interfaces.append(iface) else: cmd = ['ip', 'a'] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = subprocess.check_output( + cmd).decode('UTF-8', errors='replace').split('\n') ip_output = (line.strip() for line in ip_output if line) key = re.compile(r'^[0-9]+:\s+(.+):') @@ -866,7 +869,8 @@ def set_nic_mtu(nic, mtu): def get_nic_mtu(nic): """Return the Maximum Transmission Unit (MTU) for a network interface.""" cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = subprocess.check_output( + cmd).decode('UTF-8', errors='replace').split('\n') mtu = "" for line in ip_output: words = line.split() @@ -878,7 +882,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): """Return the Media Access Control (MAC) for a network interface.""" cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = subprocess.check_output(cmd).decode('UTF-8', errors='replace') hwaddr = "" words = ip_output.split() if 'link/ether' in words: @@ -1079,7 +1083,7 @@ def install_ca_cert(ca_cert, name=None): ca_cert = ca_cert.encode('utf8') if not name: name = 'juju-{}'.format(charm_name()) - cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) + cert_file = '{}/{}.crt'.format(CA_CERT_DIR, name) new_hash = hashlib.md5(ca_cert).hexdigest() if file_hash(cert_file) == new_hash: return diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 33152840..b5953019 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -646,7 +646,7 @@ def _add_apt_repository(spec): # passed as environment variables (See lp:1433761). This is not the case # LTS and non-LTS releases below bionic. _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https'])) + cmd_env=env_proxy_settings(['https', 'http'])) def _add_cloud_pocket(pocket): diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index 929a75d7..a2fbe0e5 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -129,7 +129,7 @@ def _dpkg_list(self, packages): else: data = line.split(None, 4) status = data.pop(0) - if status != 'ii': + if status not in ('ii', 'hi'): continue pkg = {} pkg.update({k.lower(): v for k, v in zip(headings, data)}) @@ -265,3 +265,48 @@ def version_compare(a, b): raise RuntimeError('Unable to compare "{}" and "{}", according to ' 'our logic they are neither greater, equal nor ' 'less than each other.') + + +class PkgVersion(): + """Allow package versions to be compared. + + For example:: + + >>> import charmhelpers.fetch as fetch + >>> (fetch.apt_pkg.PkgVersion('2:20.4.0') < + ... fetch.apt_pkg.PkgVersion('2:20.5.0')) + True + >>> pkgs = [fetch.apt_pkg.PkgVersion('2:20.4.0'), + ... fetch.apt_pkg.PkgVersion('2:21.4.0'), + ... fetch.apt_pkg.PkgVersion('2:17.4.0')] + >>> pkgs.sort() + >>> pkgs + [2:17.4.0, 2:20.4.0, 2:21.4.0] + """ + + def __init__(self, version): + self.version = version + + def __lt__(self, other): + return version_compare(self.version, other.version) == -1 + + def __le__(self, other): + return self.__lt__(other) or self.__eq__(other) + + def __gt__(self, other): + return version_compare(self.version, other.version) == 1 + + def __ge__(self, other): + return self.__gt__(other) or self.__eq__(other) + + def __eq__(self, other): + return version_compare(self.version, other.version) == 0 + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return self.version + + def __hash__(self): + return hash(repr(self)) diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 1aa96356..9aea716b 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -37,6 +37,8 @@ importlib-resources<3.0.0; python_version < '3.6' # dropped support for python 3.5: osprofiler<2.7.0;python_version<'3.6' stevedore<1.31.0;python_version<'3.6' +debtcollector<1.22.0;python_version<'3.6' +oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) From d9281d3d900d01f052d8661c4da51b7073a043b7 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 12 Jan 2021 15:24:12 +0000 Subject: [PATCH 2135/2699] Updates for testing period for 20.01 release * charm-helpers sync for classic charms * rebuild for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure master branch for charms.openstack - ensure master branch for charm-helpers Change-Id: Ic916d3ee88ec30e89323d777f8efe81e79001dd5 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 16 ++- .../charmhelpers/contrib/hahelpers/apache.py | 6 +- .../contrib/hardening/audits/apache.py | 5 + .../contrib/openstack/cert_utils.py | 113 ++++++++++++++---- .../charmhelpers/contrib/openstack/context.py | 44 ++++++- .../charmhelpers/contrib/openstack/utils.py | 30 +++++ ceph-radosgw/hooks/charmhelpers/core/host.py | 3 +- .../hooks/charmhelpers/fetch/ubuntu.py | 2 +- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 47 +++++++- 9 files changed, 236 insertions(+), 30 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 14b80d96..c87cf489 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -139,10 +139,11 @@ class Check(object): """{description} check_command check_nrpe!{command} servicegroups {nagios_servicegroup} +{service_config_overrides} }} """) - def __init__(self, shortname, description, check_cmd): + def __init__(self, shortname, description, check_cmd, max_check_attempts=None): super(Check, self).__init__() # XXX: could be better to calculate this from the service name if not re.match(self.shortname_re, shortname): @@ -155,6 +156,7 @@ def __init__(self, shortname, description, check_cmd): # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= self.description = description self.check_cmd = self._locate_cmd(check_cmd) + self.max_check_attempts = max_check_attempts def _get_check_filename(self): return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) @@ -216,12 +218,19 @@ def write_service_config(self, nagios_context, hostname, nagios_servicegroups): self._remove_service_files() + if self.max_check_attempts: + service_config_overrides = ' max_check_attempts {}'.format( + self.max_check_attempts + ) # Note indentation is here rather than in the template to avoid trailing spaces + else: + service_config_overrides = '' # empty string to avoid printing 'None' templ_vars = { 'nagios_hostname': hostname, 'nagios_servicegroup': nagios_servicegroups, 'description': self.description, 'shortname': self.shortname, 'command': self.command, + 'service_config_overrides': service_config_overrides, } nrpe_service_text = Check.service_template.format(**templ_vars) nrpe_service_file = self._get_service_filename(hostname) @@ -327,6 +336,11 @@ def write(self): nrpe_monitors[nrpecheck.shortname] = { "command": nrpecheck.command, } + # If we were passed max_check_attempts, add that to the relation data + try: + nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts + except AttributeError: + pass # update-status hooks are configured to firing every 5 minutes by # default. When nagios-nrpe-server is restarted, the nagios server diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py index 2c1e371e..a54702bc 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -34,6 +34,10 @@ INFO, ) +# This file contains the CA cert from the charms ssl_ca configuration +# option, in future the file name should be updated reflect that. +CONFIG_CA_CERT_FILE = 'keystone_juju_ca_cert' + def get_cert(cn=None): # TODO: deal with multiple https endpoints via charm config @@ -83,4 +87,4 @@ def retrieve_ca_cert(cert_file): def install_ca_cert(ca_cert): - host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert') + host.install_ca_cert(ca_cert, CONFIG_CA_CERT_FILE) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py index 04825f5a..c1537625 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -98,3 +98,8 @@ def _disable_module(module): def _restart_apache(): """Restarts the apache process""" subprocess.check_output(['service', 'apache2', 'restart']) + + @staticmethod + def is_ssl_enabled(): + """Check if SSL module is enabled or not""" + return 'ssl' in DisabledModuleAudit._get_loaded_modules() diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py index 1eb21542..fc36d0f1 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -16,6 +16,7 @@ import os import json +from base64 import b64decode from charmhelpers.contrib.network.ip import ( get_hostname, @@ -28,10 +29,12 @@ related_units, relation_get, relation_ids, + remote_service_name, unit_get, NoNetworkBinding, log, WARNING, + INFO, ) from charmhelpers.contrib.openstack.ip import ( resolve_address, @@ -44,12 +47,14 @@ ) from charmhelpers.core.host import ( + CA_CERT_DIR, + install_ca_cert, mkdir, write_file, ) from charmhelpers.contrib.hahelpers.apache import ( - install_ca_cert + CONFIG_CA_CERT_FILE, ) @@ -129,38 +134,46 @@ def get_certificate_request(json_encode=True, bindings=None): """ if bindings: # Add default API bindings to bindings list - bindings = set(bindings + get_default_api_bindings()) + bindings = list(bindings + get_default_api_bindings()) else: # Use default API bindings bindings = get_default_api_bindings() req = CertRequest(json_encode=json_encode) req.add_hostname_cn() # Add os-hostname entries - _sans = get_certificate_sans() + _sans = get_certificate_sans(bindings=bindings) # Handle specific hostnames per binding for binding in bindings: - hostname_override = config(ADDRESS_MAP[binding]['override']) try: - net_addr = resolve_address(endpoint_type=binding) - ip = network_get_primary_address( - ADDRESS_MAP[binding]['binding']) + hostname_override = config(ADDRESS_MAP[binding]['override']) + except KeyError: + hostname_override = None + try: + try: + net_addr = resolve_address(endpoint_type=binding) + except KeyError: + net_addr = None + ip = network_get_primary_address(binding) addresses = [net_addr, ip] vip = get_vip_in_network(resolve_network_cidr(ip)) if vip: addresses.append(vip) + + # Clear any Nones or duplicates + addresses = list(set([i for i in addresses if i])) # Add hostname certificate request if hostname_override: req.add_entry( binding, hostname_override, addresses) - # Remove hostname specific addresses from _sans - for addr in addresses: - try: - _sans.remove(addr) - except (ValueError, KeyError): - pass + # Remove hostname specific addresses from _sans + for addr in addresses: + try: + _sans.remove(addr) + except (ValueError, KeyError): + pass except NoNetworkBinding: log("Skipping request for certificate for ip in {} space, no " @@ -174,11 +187,17 @@ def get_certificate_request(json_encode=True, bindings=None): def get_certificate_sans(bindings=None): """Get all possible IP addresses for certificate SANs. + + :param bindings: List of bindings to check in addition to default api + bindings. + :type bindings: list of strings + :returns: List of binding string names + :rtype: List[str] """ _sans = [unit_get('private-address')] if bindings: # Add default API bindings to bindings list - bindings = set(bindings + get_default_api_bindings()) + bindings = list(bindings + get_default_api_bindings()) else: # Use default API bindings bindings = get_default_api_bindings() @@ -192,25 +211,39 @@ def get_certificate_sans(bindings=None): net_config = None # Using resolve_address is likely redundant. Keeping it here in # case there is an edge case it handles. - net_addr = resolve_address(endpoint_type=binding) + try: + net_addr = resolve_address(endpoint_type=binding) + except KeyError: + net_addr = None ip = get_relation_ip(binding, cidr_network=net_config) _sans = _sans + [net_addr, ip] vip = get_vip_in_network(resolve_network_cidr(ip)) if vip: _sans.append(vip) - return set(_sans) + # Clear any Nones and duplicates + return list(set([i for i in _sans if i])) -def create_ip_cert_links(ssl_dir, custom_hostname_link=None): +def create_ip_cert_links(ssl_dir, custom_hostname_link=None, bindings=None): """Create symlinks for SAN records :param ssl_dir: str Directory to create symlinks in :param custom_hostname_link: str Additional link to be created + :param bindings: List of bindings to check in addition to default api + bindings. + :type bindings: list of strings """ + if bindings: + # Add default API bindings to bindings list + bindings = list(bindings + get_default_api_bindings()) + else: + # Use default API bindings + bindings = get_default_api_bindings() + # This includes the hostname cert and any specific bindng certs: # admin, internal, public - req = get_certificate_request(json_encode=False)["cert_requests"] + req = get_certificate_request(json_encode=False, bindings=bindings)["cert_requests"] # Specific certs for cert_req in req.keys(): requested_cert = os.path.join( @@ -274,8 +307,35 @@ def install_certs(ssl_dir, certs, chain=None, user='root', group='root'): content=bundle['key'], perms=0o640) +def _manage_ca_certs(ca, cert_relation_id): + """Manage CA certs. + + :param ca: CA Certificate from certificate relation. + :type ca: str + :param cert_relation_id: Relation id providing the certs + :type cert_relation_id: str + """ + config_ssl_ca = config('ssl_ca') + config_cert_file = '{}/{}.crt'.format(CA_CERT_DIR, CONFIG_CA_CERT_FILE) + if config_ssl_ca: + log("Installing CA certificate from charm ssl_ca config to {}".format( + config_cert_file), INFO) + install_ca_cert( + b64decode(config_ssl_ca).rstrip(), + name=CONFIG_CA_CERT_FILE) + elif os.path.exists(config_cert_file): + log("Removing CA certificate {}".format(config_cert_file), INFO) + os.remove(config_cert_file) + log("Installing CA certificate from certificate relation", INFO) + install_ca_cert( + ca.encode(), + name='{}_juju_ca_cert'.format( + remote_service_name(relid=cert_relation_id))) + + def process_certificates(service_name, relation_id, unit, - custom_hostname_link=None, user='root', group='root'): + custom_hostname_link=None, user='root', group='root', + bindings=None): """Process the certificates supplied down the relation :param service_name: str Name of service the certifcates are for. @@ -286,9 +346,19 @@ def process_certificates(service_name, relation_id, unit, :type user: str :param group: (Optional) Group of certificate files. Defaults to 'root' :type group: str + :param bindings: List of bindings to check in addition to default api + bindings. + :type bindings: list of strings :returns: True if certificates processed for local unit or False :rtype: bool """ + if bindings: + # Add default API bindings to bindings list + bindings = list(bindings + get_default_api_bindings()) + else: + # Use default API bindings + bindings = get_default_api_bindings() + data = relation_get(rid=relation_id, unit=unit) ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) mkdir(path=ssl_dir) @@ -298,11 +368,12 @@ def process_certificates(service_name, relation_id, unit, ca = data.get('ca') if certs: certs = json.loads(certs) - install_ca_cert(ca.encode()) + _manage_ca_certs(ca, relation_id) install_certs(ssl_dir, certs, chain, user=user, group=group) create_ip_cert_links( ssl_dir, - custom_hostname_link=custom_hostname_link) + custom_hostname_link=custom_hostname_link, + bindings=bindings) return True return False diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 54aed7ff..6255dac0 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -1534,8 +1534,23 @@ def __call__(self): ctxt[k][section] = config_list else: ctxt[k] = v - log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) - return ctxt + if self.context_complete(ctxt): + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) + return ctxt + else: + return {} + + def context_complete(self, ctxt): + """Overridden here to ensure the context is actually complete. + + :param ctxt: The current context members + :type ctxt: Dict[str, ANY] + :returns: True if the context is complete + :rtype: bool + """ + if not ctxt.get('sections'): + return False + return super(SubordinateConfigContext, self).context_complete(ctxt) class LogLevelContext(OSContextGenerator): @@ -3050,6 +3065,9 @@ class sriov_config_mode(enum.Enum): blanket = 'blanket' explicit = 'explicit' + PCIDeviceNumVFs = collections.namedtuple( + 'PCIDeviceNumVFs', ['device', 'numvfs']) + def _determine_numvfs(self, device, sriov_numvfs): """Determine number of Virtual Functions (VFs) configured for device. @@ -3165,14 +3183,15 @@ def __init__(self, numvfs_key=None, device_mappings_key=None): 'configuration.') self._map = { - device.interface_name: self._determine_numvfs(device, sriov_numvfs) + device.pci_address: self.PCIDeviceNumVFs( + device, self._determine_numvfs(device, sriov_numvfs)) for device in devices.pci_devices if device.sriov and self._determine_numvfs(device, sriov_numvfs) is not None } def __call__(self): - """Provide SR-IOV context. + """Provide backward compatible SR-IOV context. :returns: Map interface name: min(configured, max) virtual functions. Example: @@ -3183,6 +3202,23 @@ def __call__(self): } :rtype: Dict[str,int] """ + return { + pcidnvfs.device.interface_name: pcidnvfs.numvfs + for _, pcidnvfs in self._map.items() + } + + @property + def get_map(self): + """Provide map of configured SR-IOV capable PCI devices. + + :returns: Map PCI-address: (PCIDevice, min(configured, max) VFs. + Example: + { + '0000:81:00.0': self.PCIDeviceNumVFs(, 32), + '0000:81:00.1': self.PCIDeviceNumVFs(, 32), + } + :rtype: Dict[str, self.PCIDeviceNumVFs] + """ return self._map diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index f4c76214..f27aa6c9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -90,13 +90,16 @@ service_start, restart_on_change_helper, ) + from charmhelpers.fetch import ( apt_cache, + apt_install, import_key as fetch_import_key, add_source as fetch_add_source, SourceConfigError, GPGKeyError, get_upstream_version, + filter_installed_packages, filter_missing_packages, ubuntu_apt_pkg as apt, ) @@ -480,9 +483,14 @@ def get_swift_codename(version): return None +@deprecate("moved to charmhelpers.contrib.openstack.utils.get_installed_os_version()", "2021-01", log=juju_log) def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' + codename = get_installed_os_version() + if codename: + return codename + if snap_install_requested(): cmd = ['snap', 'list', package] try: @@ -570,6 +578,28 @@ def get_os_version_package(pkg, fatal=True): # error_out(e) +def get_installed_os_version(): + apt_install(filter_installed_packages(['openstack-release']), fatal=False) + print("OpenStack Release: {}".format(openstack_release())) + return openstack_release().get('OPENSTACK_CODENAME') + + +@cached +def openstack_release(): + """Return /etc/os-release in a dict.""" + d = {} + try: + with open('/etc/openstack-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + except FileNotFoundError: + pass + return d + + # Module local cache variable for the os_release. _os_rel = None diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 697a5f4b..f826f6fe 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -60,6 +60,7 @@ ) # flake8: noqa -- ignore F401 for this import UPDATEDB_PATH = '/etc/updatedb.conf' +CA_CERT_DIR = '/usr/local/share/ca-certificates' def service_start(service_name, **kwargs): @@ -1082,7 +1083,7 @@ def install_ca_cert(ca_cert, name=None): ca_cert = ca_cert.encode('utf8') if not name: name = 'juju-{}'.format(charm_name()) - cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) + cert_file = '{}/{}.crt'.format(CA_CERT_DIR, name) new_hash = hashlib.md5(ca_cert).hexdigest() if file_hash(cert_file) == new_hash: return diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 33152840..b5953019 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -646,7 +646,7 @@ def _add_apt_repository(spec): # passed as environment variables (See lp:1433761). This is not the case # LTS and non-LTS releases below bionic. _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https'])) + cmd_env=env_proxy_settings(['https', 'http'])) def _add_cloud_pocket(pocket): diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index 929a75d7..a2fbe0e5 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -129,7 +129,7 @@ def _dpkg_list(self, packages): else: data = line.split(None, 4) status = data.pop(0) - if status != 'ii': + if status not in ('ii', 'hi'): continue pkg = {} pkg.update({k.lower(): v for k, v in zip(headings, data)}) @@ -265,3 +265,48 @@ def version_compare(a, b): raise RuntimeError('Unable to compare "{}" and "{}", according to ' 'our logic they are neither greater, equal nor ' 'less than each other.') + + +class PkgVersion(): + """Allow package versions to be compared. + + For example:: + + >>> import charmhelpers.fetch as fetch + >>> (fetch.apt_pkg.PkgVersion('2:20.4.0') < + ... fetch.apt_pkg.PkgVersion('2:20.5.0')) + True + >>> pkgs = [fetch.apt_pkg.PkgVersion('2:20.4.0'), + ... fetch.apt_pkg.PkgVersion('2:21.4.0'), + ... fetch.apt_pkg.PkgVersion('2:17.4.0')] + >>> pkgs.sort() + >>> pkgs + [2:17.4.0, 2:20.4.0, 2:21.4.0] + """ + + def __init__(self, version): + self.version = version + + def __lt__(self, other): + return version_compare(self.version, other.version) == -1 + + def __le__(self, other): + return self.__lt__(other) or self.__eq__(other) + + def __gt__(self, other): + return version_compare(self.version, other.version) == 1 + + def __ge__(self, other): + return self.__gt__(other) or self.__eq__(other) + + def __eq__(self, other): + return version_compare(self.version, other.version) == 0 + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return self.version + + def __hash__(self): + return hash(repr(self)) From 7c96341ede52e8395c7bc78be73c434c08b48b0f Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Tue, 12 Jan 2021 13:52:49 +0100 Subject: [PATCH 2136/2699] Remove Disco and Eoan bits Change-Id: I81abbe4446a8bb702a8c05815cb253aa3389254d --- ceph-rbd-mirror/src/metadata.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index 09f956b1..e9c96ecd 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -17,7 +17,6 @@ tags: series: - xenial - bionic -- eoan - focal - groovy extra-bindings: From 8b3645e088e123afaed3ab8b32a3bb15fbaff8c0 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 12 Jan 2021 15:24:12 +0000 Subject: [PATCH 2137/2699] Updates for testing period for 20.01 release * charm-helpers sync for classic charms * rebuild for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure master branch for charms.openstack - ensure master branch for charm-helpers Change-Id: I73cdbc4f9a444cb3bc5e9daa4d1cc038b5ed26c5 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 16 ++++++- .../contrib/hardening/audits/apache.py | 5 ++ .../charmhelpers/contrib/openstack/utils.py | 30 ++++++++++++ ceph-proxy/charmhelpers/fetch/ubuntu.py | 2 +- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 47 ++++++++++++++++++- ceph-proxy/test-requirements.txt | 2 + 6 files changed, 99 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py index 14b80d96..c87cf489 100644 --- a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py @@ -139,10 +139,11 @@ class Check(object): """{description} check_command check_nrpe!{command} servicegroups {nagios_servicegroup} +{service_config_overrides} }} """) - def __init__(self, shortname, description, check_cmd): + def __init__(self, shortname, description, check_cmd, max_check_attempts=None): super(Check, self).__init__() # XXX: could be better to calculate this from the service name if not re.match(self.shortname_re, shortname): @@ -155,6 +156,7 @@ def __init__(self, shortname, description, check_cmd): # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= self.description = description self.check_cmd = self._locate_cmd(check_cmd) + self.max_check_attempts = max_check_attempts def _get_check_filename(self): return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) @@ -216,12 +218,19 @@ def write_service_config(self, nagios_context, hostname, nagios_servicegroups): self._remove_service_files() + if self.max_check_attempts: + service_config_overrides = ' max_check_attempts {}'.format( + self.max_check_attempts + ) # Note indentation is here rather than in the template to avoid trailing spaces + else: + service_config_overrides = '' # empty string to avoid printing 'None' templ_vars = { 'nagios_hostname': hostname, 'nagios_servicegroup': nagios_servicegroups, 'description': self.description, 'shortname': self.shortname, 'command': self.command, + 'service_config_overrides': service_config_overrides, } nrpe_service_text = Check.service_template.format(**templ_vars) nrpe_service_file = self._get_service_filename(hostname) @@ -327,6 +336,11 @@ def write(self): nrpe_monitors[nrpecheck.shortname] = { "command": nrpecheck.command, } + # If we were passed max_check_attempts, add that to the relation data + try: + nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts + except AttributeError: + pass # update-status hooks are configured to firing every 5 minutes by # default. When nagios-nrpe-server is restarted, the nagios server diff --git a/ceph-proxy/charmhelpers/contrib/hardening/audits/apache.py b/ceph-proxy/charmhelpers/contrib/hardening/audits/apache.py index 04825f5a..c1537625 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-proxy/charmhelpers/contrib/hardening/audits/apache.py @@ -98,3 +98,8 @@ def _disable_module(module): def _restart_apache(): """Restarts the apache process""" subprocess.check_output(['service', 'apache2', 'restart']) + + @staticmethod + def is_ssl_enabled(): + """Check if SSL module is enabled or not""" + return 'ssl' in DisabledModuleAudit._get_loaded_modules() diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index f4c76214..f27aa6c9 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -90,13 +90,16 @@ service_start, restart_on_change_helper, ) + from charmhelpers.fetch import ( apt_cache, + apt_install, import_key as fetch_import_key, add_source as fetch_add_source, SourceConfigError, GPGKeyError, get_upstream_version, + filter_installed_packages, filter_missing_packages, ubuntu_apt_pkg as apt, ) @@ -480,9 +483,14 @@ def get_swift_codename(version): return None +@deprecate("moved to charmhelpers.contrib.openstack.utils.get_installed_os_version()", "2021-01", log=juju_log) def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' + codename = get_installed_os_version() + if codename: + return codename + if snap_install_requested(): cmd = ['snap', 'list', package] try: @@ -570,6 +578,28 @@ def get_os_version_package(pkg, fatal=True): # error_out(e) +def get_installed_os_version(): + apt_install(filter_installed_packages(['openstack-release']), fatal=False) + print("OpenStack Release: {}".format(openstack_release())) + return openstack_release().get('OPENSTACK_CODENAME') + + +@cached +def openstack_release(): + """Return /etc/os-release in a dict.""" + d = {} + try: + with open('/etc/openstack-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + except FileNotFoundError: + pass + return d + + # Module local cache variable for the os_release. _os_rel = None diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py index 33152840..b5953019 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -646,7 +646,7 @@ def _add_apt_repository(spec): # passed as environment variables (See lp:1433761). This is not the case # LTS and non-LTS releases below bionic. _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https'])) + cmd_env=env_proxy_settings(['https', 'http'])) def _add_cloud_pocket(pocket): diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py index 929a75d7..a2fbe0e5 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -129,7 +129,7 @@ def _dpkg_list(self, packages): else: data = line.split(None, 4) status = data.pop(0) - if status != 'ii': + if status not in ('ii', 'hi'): continue pkg = {} pkg.update({k.lower(): v for k, v in zip(headings, data)}) @@ -265,3 +265,48 @@ def version_compare(a, b): raise RuntimeError('Unable to compare "{}" and "{}", according to ' 'our logic they are neither greater, equal nor ' 'less than each other.') + + +class PkgVersion(): + """Allow package versions to be compared. + + For example:: + + >>> import charmhelpers.fetch as fetch + >>> (fetch.apt_pkg.PkgVersion('2:20.4.0') < + ... fetch.apt_pkg.PkgVersion('2:20.5.0')) + True + >>> pkgs = [fetch.apt_pkg.PkgVersion('2:20.4.0'), + ... fetch.apt_pkg.PkgVersion('2:21.4.0'), + ... fetch.apt_pkg.PkgVersion('2:17.4.0')] + >>> pkgs.sort() + >>> pkgs + [2:17.4.0, 2:20.4.0, 2:21.4.0] + """ + + def __init__(self, version): + self.version = version + + def __lt__(self, other): + return version_compare(self.version, other.version) == -1 + + def __le__(self, other): + return self.__lt__(other) or self.__eq__(other) + + def __gt__(self, other): + return version_compare(self.version, other.version) == 1 + + def __ge__(self, other): + return self.__gt__(other) or self.__eq__(other) + + def __eq__(self, other): + return version_compare(self.version, other.version) == 0 + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return self.version + + def __hash__(self): + return hash(repr(self)) diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 1aa96356..9aea716b 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -37,6 +37,8 @@ importlib-resources<3.0.0; python_version < '3.6' # dropped support for python 3.5: osprofiler<2.7.0;python_version<'3.6' stevedore<1.31.0;python_version<'3.6' +debtcollector<1.22.0;python_version<'3.6' +oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) From 25a67689a73c78cad833462a685777a31eb59ea5 Mon Sep 17 00:00:00 2001 From: Jarred Wilson Date: Thu, 14 Jan 2021 14:07:07 -0500 Subject: [PATCH 2138/2699] Enable object versioning for a container This patch adds the config option rgw-swift-versioning-enabled boolean that enables swift versioning for the ceph-backed storage solution. This uses X-Versions-Location as it is the only header that radosgw interprets. closes-bug: #1910679 Change-Id: I5b42c34882b46e96f4cc92d91ec441a4bdfd76f6 --- ceph-radosgw/config.yaml | 9 +++++++++ ceph-radosgw/hooks/ceph_radosgw_context.py | 1 + ceph-radosgw/templates/ceph.conf | 1 + ceph-radosgw/unit_tests/test_ceph_radosgw_context.py | 4 ++++ 4 files changed, 15 insertions(+) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index f5ae44a9..d64d20f0 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -500,3 +500,12 @@ options: description: | Value of bluestore compression max blob size for solid state media on pools requested by this charm. + rgw-swift-versioning-enabled: + type: boolean + default: False + description: | + If True, swift object versioning will be enabled for radosgw. + + NOTE: X-Versions-Location is the only versioning-related header that + radosgw interprets. X-History-Location, supported by native OpenStack + Swift, is currently not supported by radosgw. diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 8115e4e1..a43fd603 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -207,6 +207,7 @@ def __call__(self): # not available externally). ~tribaal 'unit_public_ip': unit_public_ip(), 'fsid': fsid, + 'rgw_swift_versioning': config('rgw-swift-versioning-enabled'), } # NOTE(dosaboy): these sections must correspond to what is supported in diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 924927f4..ce3e7d5b 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -14,6 +14,7 @@ debug rgw = {{ loglevel }}/5 {% if ipv6 -%} ms bind ipv6 = true {% endif %} +rgw swift versioning enabled = {{ rgw_swift_versioning }} {% if global -%} # The following are user-provided options provided via the config-flags charm option. # User-provided [global] section config diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index cb0c039b..faf73bd1 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -387,6 +387,7 @@ def _relation_get(attr, unit, rid): 'ipv6': False, 'rgw_zone': 'default', 'fsid': 'testfsid', + 'rgw_swift_versioning': False, } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -435,6 +436,7 @@ def _relation_get(attr, unit, rid): 'ipv6': False, 'rgw_zone': 'default', 'fsid': 'testfsid', + 'rgw_swift_versioning': False, } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -492,6 +494,7 @@ def _relation_get(attr, unit, rid): 'ipv6': False, 'rgw_zone': 'default', 'fsid': 'testfsid', + 'rgw_swift_versioning': False, } self.assertEqual(expect, mon_ctxt()) @@ -531,6 +534,7 @@ def _relation_get(attr, unit, rid): 'ipv6': False, 'rgw_zone': 'default', 'fsid': 'testfsid', + 'rgw_swift_versioning': False, } self.assertEqual(expect, mon_ctxt()) From 720fa642ebbdffaaad0b45d622023742dba57e79 Mon Sep 17 00:00:00 2001 From: Robert Gildein Date: Fri, 8 Jan 2021 15:29:39 +0100 Subject: [PATCH 2139/2699] Add `osds` argument to the osd-in/osd-out action The `osds` parameter has been copied from function start/stop to preserve the same functionality. By default, osd-in/osd-out needs list of IDs as an argument or it will not do anything (previously, it applied the change to all the osds). It's possible to take in/out *all* with provided `osds` parameter as `all`. Closes-Bug: #1910150 Change-Id: I0275f015e2d0bbbb661d2b7dea59c320ba6c021c --- ceph-osd/actions.yaml | 10 ++ ceph-osd/actions/osd_in_out.py | 118 +++++++++++++----- ceph-osd/actions/service.py | 33 +---- ceph-osd/hooks/utils.py | 28 ++++- .../unit_tests/test_actions_osd_out_in.py | 100 ++++++++++++--- ceph-osd/unit_tests/test_actions_service.py | 52 ++------ ceph-osd/unit_tests/test_ceph_utils.py | 23 ++++ 7 files changed, 244 insertions(+), 120 deletions(-) diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 1674a08d..cc59e781 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -9,11 +9,21 @@ osd-out: \ USE WITH CAUTION - Mark unit OSDs as 'out'. Documentation: https://jaas.ai/ceph-osd/ + params: + osds: + description: A comma-separated list of OSD IDs to start (or keyword 'all') + required: + - osds osd-in: description: | \ Set the local osd units in the charm to 'in'. Documentation: https://jaas.ai/ceph-osd/ + params: + osds: + description: A comma-separated list of OSD IDs to start (or keyword 'all') + required: + - osds list-disks: description: | \ diff --git a/ceph-osd/actions/osd_in_out.py b/ceph-osd/actions/osd_in_out.py index 844b7875..8b579a98 100755 --- a/ceph-osd/actions/osd_in_out.py +++ b/ceph-osd/actions/osd_in_out.py @@ -18,52 +18,114 @@ import os import sys -from subprocess import check_call + +from subprocess import check_output, STDOUT sys.path.append('lib') sys.path.append('hooks') from charmhelpers.core.hookenv import ( - action_fail, + function_fail, + function_set, + log, + ERROR, ) from charms_ceph.utils import get_local_osd_ids from ceph_hooks import assess_status +from utils import parse_osds_arguments, ALL +IN = "in" +OUT = "out" -def osd_out(args): - """Pause the ceph-osd units on the local machine only. - Optionally uses the 'osd-number' from juju action param to only osd_out a - specific osd. +def check_osd_id(osds): + """Check ceph OSDs existence. - @raises CalledProcessError if the ceph commands fails. - @raises OSError if it can't get the local osd ids. + :param osds: list of osds IDs + :type osds: set + :returns: list of osds IDs present on the local machine and + list of failed osds IDs + :rtype: Tuple[set, set] + :raises OSError: if the unit can't get the local osd ids """ - for local_id in get_local_osd_ids(): - cmd = [ - 'ceph', - '--id', 'osd-upgrade', - 'osd', 'out', str(local_id)] - check_call(cmd) - assess_status() + all_local_osd = get_local_osd_ids() + if ALL in osds: + return set(all_local_osd), set() + + failed_osds = osds.difference(all_local_osd) + if failed_osds: + log("Ceph OSDs not present: {}".format(", ".join(failed_osds)), + level=ERROR) + + return osds, failed_osds + + +def ceph_osd_upgrade(action, osd_id): + """Execute ceph osd-upgrade command. + + :param action: action type IN/OUT + :type action: str + :param osd_id: osd ID + :type osd_id: str + :returns: output message + :rtype: str + :raises subprocess.CalledProcessError: if the ceph commands fails + """ + cmd = ["ceph", "--id", "osd-upgrade", "osd", action, osd_id] + output = check_output(cmd, stderr=STDOUT).decode("utf-8") + + log("ceph-osd {osd_id} was updated by the action osd-{action} with " + "output: {output}".format(osd_id=osd_id, action=action, output=output)) + + return output -def osd_in(args): - """Resume the ceph-osd units on this local machine only +def osd_in_out(action): + """Pause/Resume the ceph OSDs unit ont the local machine only. - @raises subprocess.CalledProcessError should the osd units fails to osd_in. - @raises OSError if the unit can't get the local osd ids + :param action: Either IN or OUT (see global constants) + :type action: string + :raises RuntimeError: if a supported action is not used + :raises subprocess.CalledProcessError: if the ceph commands fails + :raises OSError: if the unit can't get the local osd ids """ - for local_id in get_local_osd_ids(): - cmd = [ - 'ceph', - '--id', 'osd-upgrade', - 'osd', 'in', str(local_id)] - check_call(cmd) + if action not in (IN, OUT): + raise RuntimeError("Unknown action \"{}\"".format(action)) + + osds = parse_osds_arguments() + osds, failed_osds = check_osd_id(osds) + + if failed_osds: + function_fail("invalid ceph OSD device id: " + "{}".format(",".join(failed_osds))) + return + + outputs = [] + for osd_id in osds: + output = ceph_osd_upgrade(action, str(osd_id)) + outputs.append(output) + + function_set({ + "message": "osd-{action} action was successfully executed for ceph " + "OSD devices [{osds}]".format(action=action, + osds=",".join(osds)), + "outputs": os.linesep.join(outputs) + }) + assess_status() +def osd_in(): + """Shortcut to execute 'osd_in' action""" + osd_in_out(IN) + + +def osd_out(): + """Shortcut to execute 'osd_out' action""" + osd_in_out(OUT) + + # A dictionary of all the defined actions to callables (which take # parsed arguments). ACTIONS = {"osd-out": osd_out, "osd-in": osd_in} @@ -75,13 +137,13 @@ def main(args): action = ACTIONS[action_name] except KeyError: s = "Action {} undefined".format(action_name) - action_fail(s) + function_fail(s) return s else: try: - action(args) + action() except Exception as e: - action_fail("Action {} failed: {}".format(action_name, str(e))) + function_fail("Action {} failed: {}".format(action_name, str(e))) if __name__ == "__main__": diff --git a/ceph-osd/actions/service.py b/ceph-osd/actions/service.py index 1b8fe1ff..1768930d 100755 --- a/ceph-osd/actions/service.py +++ b/ceph-osd/actions/service.py @@ -26,17 +26,14 @@ from charmhelpers.core.hookenv import ( function_fail, - function_get, log, - WARNING, ) from ceph_hooks import assess_status +from utils import parse_osds_arguments, ALL START = 'start' STOP = 'stop' -ALL = 'all' - def systemctl_execute(action, services): """ @@ -110,32 +107,6 @@ def check_service_is_present(service_list): 'unit: {}'.format(missing_services)) -def parse_arguments(): - """ - Fetch action arguments and parse them from comma separated list to - the set of OSD IDs - - :return: Set of OSD IDs - :rtype: set(str) - """ - raw_arg = function_get('osds') - - if raw_arg is None: - raise RuntimeError('Action argument "osds" is missing') - args = set() - - # convert OSD IDs from user's input into the set - for osd_id in str(raw_arg).split(','): - args.add(osd_id.strip()) - - if ALL in args and len(args) != 1: - args = {ALL} - log('keyword "all" was found in "osds" argument. Dropping other ' - 'explicitly defined OSD IDs', WARNING) - - return args - - def execute_action(action): """Core implementation of the 'start'/'stop' actions @@ -145,7 +116,7 @@ def execute_action(action): if action not in (START, STOP): raise RuntimeError('Unknown action "{}"'.format(action)) - osds = parse_arguments() + osds = parse_osds_arguments() services = osd_ids_to_service_names(osds) check_service_is_present(services) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 65308187..8ac8ff1b 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -32,6 +32,7 @@ status_set, storage_get, storage_list, + function_get, ) from charmhelpers.core import unitdata from charmhelpers.fetch import ( @@ -49,7 +50,7 @@ get_ipv6_addr ) - +ALL = "all" # string value representing all "OSD devices" TEMPLATES_DIR = 'templates' try: @@ -310,3 +311,28 @@ def is_sata30orless(device): if re.match(r"SATA Version is: *SATA (1\.|2\.|3\.0)", str(line)): return True return False + + +def parse_osds_arguments(): + """Parse OSD IDs from action `osds` argument. + + Fetch action arguments and parse them from comma separated list to + the set of OSD IDs. + + :return: Set of OSD IDs + :rtype: set(str) + """ + raw_arg = function_get("osds") + + if raw_arg is None: + raise RuntimeError("Action argument \"osds\" is missing") + + # convert OSD IDs from user's input into the set + args = {osd_id.strip() for osd_id in str(raw_arg).split(',')} + + if ALL in args and len(args) != 1: + args = {ALL} + log("keyword \"all\" was found in \"osds\" argument. Dropping other " + "explicitly defined OSD IDs", WARNING) + + return args diff --git a/ceph-osd/unit_tests/test_actions_osd_out_in.py b/ceph-osd/unit_tests/test_actions_osd_out_in.py index f8b3546c..b14a014e 100644 --- a/ceph-osd/unit_tests/test_actions_osd_out_in.py +++ b/ceph-osd/unit_tests/test_actions_osd_out_in.py @@ -11,8 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import mock +import subprocess import sys @@ -23,46 +23,106 @@ import osd_in_out as actions +def mock_check_output(cmd, **kwargs): + action, osd_id = cmd[-2:] # get the last two arguments from cmd + return "marked {} osd.{}. \n".format(action, osd_id).encode("utf-8") + + class OSDOutTestCase(CharmTestCase): def setUp(self): super(OSDOutTestCase, self).setUp( - actions, ["check_call", + actions, ["check_output", "get_local_osd_ids", - "assess_status"]) + "assess_status", + "parse_osds_arguments", + "function_fail", + "function_set"]) + + self.check_output.side_effect = mock_check_output def test_osd_out(self): - self.get_local_osd_ids.return_value = [5] - actions.osd_out([]) - cmd = ['ceph', '--id', - 'osd-upgrade', 'osd', 'out', '5'] - self.check_call.assert_called_once_with(cmd) + self.get_local_osd_ids.return_value = ["5", "6", "7"] + self.parse_osds_arguments.return_value = {"5"} + actions.osd_out() + self.check_output.assert_called_once_with( + ["ceph", "--id", "osd-upgrade", "osd", "out", "5"], + stderr=subprocess.STDOUT + ) + self.assess_status.assert_called_once_with() + + def test_osd_out_all(self): + self.get_local_osd_ids.return_value = ["5", "6", "7"] + self.parse_osds_arguments.return_value = {"all"} + actions.osd_out() + self.check_output.assert_has_calls( + [mock.call( + ["ceph", "--id", "osd-upgrade", "osd", "out", i], + stderr=subprocess.STDOUT + ) for i in set(["5", "6", "7"])]) self.assess_status.assert_called_once_with() + def test_osd_out_not_local(self): + self.get_local_osd_ids.return_value = ["5"] + self.parse_osds_arguments.return_value = {"6", "7", "8"} + actions.osd_out() + self.check_output.assert_not_called() + self.function_fail.assert_called_once_with( + "invalid ceph OSD device id: " + "{}".format(",".join(set(["6", "7", "8"])))) + self.assess_status.assert_not_called() + class OSDInTestCase(CharmTestCase): def setUp(self): super(OSDInTestCase, self).setUp( - actions, ["check_call", + actions, ["check_output", "get_local_osd_ids", - "assess_status"]) + "assess_status", + "parse_osds_arguments", + "function_fail", + "function_set"]) + + self.check_output.side_effect = mock_check_output def test_osd_in(self): - self.get_local_osd_ids.return_value = [5] - actions.osd_in([]) - cmd = ['ceph', '--id', - 'osd-upgrade', 'osd', 'in', '5'] - self.check_call.assert_called_once_with(cmd) + self.get_local_osd_ids.return_value = ["5", "6", "7"] + self.parse_osds_arguments.return_value = {"5"} + actions.osd_in() + self.check_output.assert_called_once_with( + ["ceph", "--id", "osd-upgrade", "osd", "in", "5"], + stderr=subprocess.STDOUT + ) self.assess_status.assert_called_once_with() + def test_osd_in_all(self): + self.get_local_osd_ids.return_value = ["5", "6", "7"] + self.parse_osds_arguments.return_value = {"all"} + actions.osd_in() + self.check_output.assert_has_calls( + [mock.call( + ["ceph", "--id", "osd-upgrade", "osd", "in", i], + stderr=subprocess.STDOUT + ) for i in set(["5", "6", "7"])]) + self.assess_status.assert_called_once_with() + + def test_osd_in_not_local(self): + self.get_local_osd_ids.return_value = ["5"] + self.parse_osds_arguments.return_value = {"6"} + actions.osd_in() + self.check_output.assert_not_called() + self.function_fail.assert_called_once_with( + "invalid ceph OSD device id: 6") + self.assess_status.assert_not_called() + class MainTestCase(CharmTestCase): def setUp(self): - super(MainTestCase, self).setUp(actions, ["action_fail"]) + super(MainTestCase, self).setUp(actions, ["function_fail"]) def test_invokes_action(self): dummy_calls = [] - def dummy_action(args): + def dummy_action(): dummy_calls.append(True) with mock.patch.dict(actions.ACTIONS, {"foo": dummy_action}): @@ -75,12 +135,12 @@ def test_unknown_action(self): self.assertEqual("Action foo undefined", exit_string) def test_failing_action(self): - """Actions which traceback trigger action_fail() calls.""" + """Actions which traceback trigger function_fail() calls.""" dummy_calls = [] - self.action_fail.side_effect = dummy_calls.append + self.function_fail.side_effect = dummy_calls.append - def dummy_action(args): + def dummy_action(): raise ValueError("uh oh") with mock.patch.dict(actions.ACTIONS, {"foo": dummy_action}): diff --git a/ceph-osd/unit_tests/test_actions_service.py b/ceph-osd/unit_tests/test_actions_service.py index daa64e8d..57deafd3 100644 --- a/ceph-osd/unit_tests/test_actions_service.py +++ b/ceph-osd/unit_tests/test_actions_service.py @@ -14,9 +14,9 @@ import mock from contextlib import contextmanager -from copy import copy from actions import service +from hooks import utils from test_utils import CharmTestCase @@ -40,33 +40,27 @@ class ServiceActionTests(CharmTestCase): def __init__(self, methodName='runTest'): super(ServiceActionTests, self).__init__(methodName) - self._func_args = {'osds': None} def setUp(self, obj=None, patches=None): super(ServiceActionTests, self).setUp( service, - ['subprocess', 'function_fail', 'function_get', + ['subprocess', 'function_fail', 'log', 'assess_status', 'shutil'] ) present_services = '\n'.join(self._PRESENT_SERVICES).encode('utf-8') self.shutil.which.return_value = '/bin/systemctl' self.subprocess.check_call.return_value = None - self.function_get.side_effect = self.function_get_side_effect self.subprocess.run.return_value = CompletedProcessMock( stdout=present_services) - def function_get_side_effect(self, arg): - return self._func_args.get(arg) - @contextmanager def func_call_arguments(self, osds=None): - default = copy(self._func_args) - try: + with mock.patch("utils.function_get") as mock_function_get: self._func_args = {'osds': osds} + mock_function_get.side_effect = \ + lambda arg: self._func_args.get(arg) yield - finally: - self._func_args = copy(default) def assert_action_start_fail(self, msg): self.assert_function_fail(service.START, msg) @@ -88,7 +82,7 @@ def call_action_stop(): def test_systemctl_execute_all(self): action = 'start' - services = service.ALL + services = utils.ALL expected_call = mock.call(['systemctl', action, self._TARGET_ALL], timeout=self._CHECK_CALL_TIMEOUT) @@ -110,17 +104,17 @@ def systemctl_execute_specific(self): self.subprocess.check_call.assert_has_calls([expected_call]) def test_id_translation(self): - service_ids = {1, service.ALL, 2} + service_ids = {1, utils.ALL, 2} expected_names = [ 'ceph-osd@1.service', - service.ALL, + utils.ALL, 'ceph-osd@2.service', ] service_names = service.osd_ids_to_service_names(service_ids) self.assertEqual(sorted(service_names), sorted(expected_names)) def test_skip_service_presence_check(self): - service_list = [service.ALL] + service_list = [utils.ALL] service.check_service_is_present(service_list) @@ -145,28 +139,6 @@ def test_raise_all_missing_services(self): stdout=self.subprocess.PIPE, timeout=30) - def test_raise_on_missing_arguments(self): - err_msg = 'Action argument "osds" is missing' - with self.func_call_arguments(osds=None): - with self.assertRaises(RuntimeError, msg=err_msg): - service.parse_arguments() - - def test_parse_service_ids(self): - raw = '1,2,3' - expected_ids = {'1', '2', '3'} - - with self.func_call_arguments(osds=raw): - parsed = service.parse_arguments() - self.assertEqual(parsed, expected_ids) - - def test_parse_service_ids_with_all(self): - raw = '1,2,all' - expected_id = {service.ALL} - - with self.func_call_arguments(osds=raw): - parsed = service.parse_arguments() - self.assertEqual(parsed, expected_id) - def test_fail_execute_unknown_action(self): action = 'foo' err_msg = 'Unknown action "{}"'.format(action) @@ -175,14 +147,14 @@ def test_fail_execute_unknown_action(self): @mock.patch.object(service, 'systemctl_execute') def test_execute_action(self, _): - with self.func_call_arguments(osds=service.ALL): + with self.func_call_arguments(osds=utils.ALL): service.execute_action(service.START) service.systemctl_execute.assert_called_with(service.START, - [service.ALL]) + [utils.ALL]) service.execute_action(service.STOP) service.systemctl_execute.assert_called_with(service.STOP, - [service.ALL]) + [utils.ALL]) @mock.patch.object(service, 'execute_action') def test_action_stop(self, execute_action): diff --git a/ceph-osd/unit_tests/test_ceph_utils.py b/ceph-osd/unit_tests/test_ceph_utils.py index 88dfefe4..1172f1a7 100644 --- a/ceph-osd/unit_tests/test_ceph_utils.py +++ b/ceph-osd/unit_tests/test_ceph_utils.py @@ -115,3 +115,26 @@ def test_is_sata30orless_sata26(self, mock_subprocess_check_output): ret = utils.is_sata30orless('/dev/sda') mock_subprocess_check_output.assert_called() self.assertEqual(ret, True) + + @patch.object(utils, "function_get") + def test_raise_on_missing_arguments(self, mock_function_get): + mock_function_get.return_value = None + err_msg = "Action argument \"osds\" is missing" + with self.assertRaises(RuntimeError, msg=err_msg): + utils.parse_osds_arguments() + + @patch.object(utils, "function_get") + def test_parse_service_ids(self, mock_function_get): + mock_function_get.return_value = "1,2,3" + expected_ids = {"1", "2", "3"} + + parsed = utils.parse_osds_arguments() + self.assertEqual(parsed, expected_ids) + + @patch.object(utils, "function_get") + def test_parse_service_ids_with_all(self, mock_function_get): + mock_function_get.return_value = "1,2,all" + expected_id = {utils.ALL} + + parsed = utils.parse_osds_arguments() + self.assertEqual(parsed, expected_id) From abdd0acce1eb8008f5a5f5b1a00f6b458d096422 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 12 Jan 2021 15:24:12 +0000 Subject: [PATCH 2140/2699] Updates for testing period for 20.01 release * charm-helpers sync for classic charms * rebuild for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure master branch for charms.openstack - ensure master branch for charm-helpers Change-Id: If51c83b121eb90308d2e87ca06c73f826a5ce359 --- ceph-rbd-mirror/rebuild | 2 +- ceph-rbd-mirror/src/wheelhouse.txt | 2 ++ ceph-rbd-mirror/test-requirements.txt | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/rebuild b/ceph-rbd-mirror/rebuild index 6d82ba59..8a4ed038 100644 --- a/ceph-rbd-mirror/rebuild +++ b/ceph-rbd-mirror/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -4fdfb6f2-0e2c-11eb-8136-c75ae9682e61 +6f7c822e-5418-11eb-abb6-5f5809a3265d diff --git a/ceph-rbd-mirror/src/wheelhouse.txt b/ceph-rbd-mirror/src/wheelhouse.txt index 523142ae..fac0be8d 100644 --- a/ceph-rbd-mirror/src/wheelhouse.txt +++ b/ceph-rbd-mirror/src/wheelhouse.txt @@ -1,2 +1,4 @@ git+https://github.com/juju/charm-helpers.git#egg=charmhelpers psutil + +git+https://opendev.org/openstack/charms.openstack.git#egg=charms.openstack diff --git a/ceph-rbd-mirror/test-requirements.txt b/ceph-rbd-mirror/test-requirements.txt index 8ab24b2e..3f085244 100644 --- a/ceph-rbd-mirror/test-requirements.txt +++ b/ceph-rbd-mirror/test-requirements.txt @@ -22,6 +22,8 @@ importlib-resources<3.0.0; python_version < '3.6' # dropped support for python 3.5: osprofiler<2.7.0;python_version<'3.6' stevedore<1.31.0;python_version<'3.6' +debtcollector<1.22.0;python_version<'3.6' +oslo.utils<=3.41.0;python_version<'3.6' requests>=2.18.4 charms.reactive From 7626c710f862e17dfd0554e0ae1c07343589b45f Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 12 Jan 2021 15:24:12 +0000 Subject: [PATCH 2141/2699] Updates for testing period for 20.01 release Includes updates to charmhelpers/charms.openstack for cert_utils and unit-get for the install hook error on Juju 2.9 * charm-helpers sync for classic charms * rebuild for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure master branch for charms.openstack - ensure master branch for charm-helpers Change-Id: Ife8c24cefab36793f2e3f0b08e6a828504d4380f --- ceph-fs/rebuild | 2 +- ceph-fs/src/wheelhouse.txt | 4 ++++ ceph-fs/test-requirements.txt | 2 ++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index 0b82e555..4f90aa27 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -4fd31136-0e2c-11eb-b79e-a72d45c8e7f5 +82b1d4ea-5745-11eb-a4e0-97d1b49696a4 diff --git a/ceph-fs/src/wheelhouse.txt b/ceph-fs/src/wheelhouse.txt index e169348c..732aa8fc 100644 --- a/ceph-fs/src/wheelhouse.txt +++ b/ceph-fs/src/wheelhouse.txt @@ -3,3 +3,7 @@ dnspython3 ceph_api pyxattr psutil + +git+https://opendev.org/openstack/charms.openstack.git#egg=charms.openstack + +git+https://github.com/juju/charm-helpers.git#egg=charmhelpers diff --git a/ceph-fs/test-requirements.txt b/ceph-fs/test-requirements.txt index 8ab24b2e..3f085244 100644 --- a/ceph-fs/test-requirements.txt +++ b/ceph-fs/test-requirements.txt @@ -22,6 +22,8 @@ importlib-resources<3.0.0; python_version < '3.6' # dropped support for python 3.5: osprofiler<2.7.0;python_version<'3.6' stevedore<1.31.0;python_version<'3.6' +debtcollector<1.22.0;python_version<'3.6' +oslo.utils<=3.41.0;python_version<'3.6' requests>=2.18.4 charms.reactive From b4bc47cf6ff861a1ffcc858178f606bb7a4572e1 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 12 Jan 2021 15:24:12 +0000 Subject: [PATCH 2142/2699] Updates for testing period for 20.01 release Includes updates to charmhelpers/charms.openstack for cert_utils and unit-get for the install hook error on Juju 2.9 * charm-helpers sync for classic charms * rebuild for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure master branch for charms.openstack - ensure master branch for charm-helpers Change-Id: I40621954e3108f73b79257755aa518646e52c88f --- .../charmhelpers/contrib/charmsupport/nrpe.py | 16 ++++++- .../contrib/hardening/audits/apache.py | 5 ++ .../charmhelpers/contrib/openstack/ip.py | 21 ++++++++- .../charmhelpers/contrib/openstack/utils.py | 30 ++++++++++++ ceph-proxy/charmhelpers/fetch/ubuntu.py | 2 +- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 47 ++++++++++++++++++- ceph-proxy/test-requirements.txt | 2 + 7 files changed, 119 insertions(+), 4 deletions(-) diff --git a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py index 14b80d96..c87cf489 100644 --- a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py @@ -139,10 +139,11 @@ class Check(object): """{description} check_command check_nrpe!{command} servicegroups {nagios_servicegroup} +{service_config_overrides} }} """) - def __init__(self, shortname, description, check_cmd): + def __init__(self, shortname, description, check_cmd, max_check_attempts=None): super(Check, self).__init__() # XXX: could be better to calculate this from the service name if not re.match(self.shortname_re, shortname): @@ -155,6 +156,7 @@ def __init__(self, shortname, description, check_cmd): # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= self.description = description self.check_cmd = self._locate_cmd(check_cmd) + self.max_check_attempts = max_check_attempts def _get_check_filename(self): return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) @@ -216,12 +218,19 @@ def write_service_config(self, nagios_context, hostname, nagios_servicegroups): self._remove_service_files() + if self.max_check_attempts: + service_config_overrides = ' max_check_attempts {}'.format( + self.max_check_attempts + ) # Note indentation is here rather than in the template to avoid trailing spaces + else: + service_config_overrides = '' # empty string to avoid printing 'None' templ_vars = { 'nagios_hostname': hostname, 'nagios_servicegroup': nagios_servicegroups, 'description': self.description, 'shortname': self.shortname, 'command': self.command, + 'service_config_overrides': service_config_overrides, } nrpe_service_text = Check.service_template.format(**templ_vars) nrpe_service_file = self._get_service_filename(hostname) @@ -327,6 +336,11 @@ def write(self): nrpe_monitors[nrpecheck.shortname] = { "command": nrpecheck.command, } + # If we were passed max_check_attempts, add that to the relation data + try: + nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts + except AttributeError: + pass # update-status hooks are configured to firing every 5 minutes by # default. When nagios-nrpe-server is restarted, the nagios server diff --git a/ceph-proxy/charmhelpers/contrib/hardening/audits/apache.py b/ceph-proxy/charmhelpers/contrib/hardening/audits/apache.py index 04825f5a..c1537625 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-proxy/charmhelpers/contrib/hardening/audits/apache.py @@ -98,3 +98,8 @@ def _disable_module(module): def _restart_apache(): """Restarts the apache process""" subprocess.check_output(['service', 'apache2', 'restart']) + + @staticmethod + def is_ssl_enabled(): + """Check if SSL module is enabled or not""" + return 'ssl' in DisabledModuleAudit._get_loaded_modules() diff --git a/ceph-proxy/charmhelpers/contrib/openstack/ip.py b/ceph-proxy/charmhelpers/contrib/openstack/ip.py index 89cf276d..65573300 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/ip.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/ip.py @@ -123,6 +123,25 @@ def _get_address_override(endpoint_type=PUBLIC): return addr_override.format(service_name=service_name()) +def local_address(unit_get_fallback='public-address'): + """Return a network address for this unit. + + Attempt to retrieve a 'default' IP address for this unit + from network-get. If this is running with an old version of Juju then + fallback to unit_get. + + :param unit_get_fallback: Either 'public-address' or 'private-address'. + Only used with old versions of Juju. + :type unit_get_fallback: str + :returns: IP Address + :rtype: str + """ + try: + return network_get_primary_address('juju-info') + except NotImplementedError: + return unit_get(unit_get_fallback) + + def resolve_address(endpoint_type=PUBLIC, override=True): """Return unit address depending on net config. @@ -176,7 +195,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True): if config('prefer-ipv6'): fallback_addr = get_ipv6_addr(exc_list=vips)[0] else: - fallback_addr = unit_get(net_fallback) + fallback_addr = local_address(unit_get_fallback=net_fallback) if net_addr: resolved_address = get_address_in_network(net_addr, fallback_addr) diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index f4c76214..f27aa6c9 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -90,13 +90,16 @@ service_start, restart_on_change_helper, ) + from charmhelpers.fetch import ( apt_cache, + apt_install, import_key as fetch_import_key, add_source as fetch_add_source, SourceConfigError, GPGKeyError, get_upstream_version, + filter_installed_packages, filter_missing_packages, ubuntu_apt_pkg as apt, ) @@ -480,9 +483,14 @@ def get_swift_codename(version): return None +@deprecate("moved to charmhelpers.contrib.openstack.utils.get_installed_os_version()", "2021-01", log=juju_log) def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' + codename = get_installed_os_version() + if codename: + return codename + if snap_install_requested(): cmd = ['snap', 'list', package] try: @@ -570,6 +578,28 @@ def get_os_version_package(pkg, fatal=True): # error_out(e) +def get_installed_os_version(): + apt_install(filter_installed_packages(['openstack-release']), fatal=False) + print("OpenStack Release: {}".format(openstack_release())) + return openstack_release().get('OPENSTACK_CODENAME') + + +@cached +def openstack_release(): + """Return /etc/os-release in a dict.""" + d = {} + try: + with open('/etc/openstack-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + except FileNotFoundError: + pass + return d + + # Module local cache variable for the os_release. _os_rel = None diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py index 33152840..b5953019 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -646,7 +646,7 @@ def _add_apt_repository(spec): # passed as environment variables (See lp:1433761). This is not the case # LTS and non-LTS releases below bionic. _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https'])) + cmd_env=env_proxy_settings(['https', 'http'])) def _add_cloud_pocket(pocket): diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py index 929a75d7..a2fbe0e5 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -129,7 +129,7 @@ def _dpkg_list(self, packages): else: data = line.split(None, 4) status = data.pop(0) - if status != 'ii': + if status not in ('ii', 'hi'): continue pkg = {} pkg.update({k.lower(): v for k, v in zip(headings, data)}) @@ -265,3 +265,48 @@ def version_compare(a, b): raise RuntimeError('Unable to compare "{}" and "{}", according to ' 'our logic they are neither greater, equal nor ' 'less than each other.') + + +class PkgVersion(): + """Allow package versions to be compared. + + For example:: + + >>> import charmhelpers.fetch as fetch + >>> (fetch.apt_pkg.PkgVersion('2:20.4.0') < + ... fetch.apt_pkg.PkgVersion('2:20.5.0')) + True + >>> pkgs = [fetch.apt_pkg.PkgVersion('2:20.4.0'), + ... fetch.apt_pkg.PkgVersion('2:21.4.0'), + ... fetch.apt_pkg.PkgVersion('2:17.4.0')] + >>> pkgs.sort() + >>> pkgs + [2:17.4.0, 2:20.4.0, 2:21.4.0] + """ + + def __init__(self, version): + self.version = version + + def __lt__(self, other): + return version_compare(self.version, other.version) == -1 + + def __le__(self, other): + return self.__lt__(other) or self.__eq__(other) + + def __gt__(self, other): + return version_compare(self.version, other.version) == 1 + + def __ge__(self, other): + return self.__gt__(other) or self.__eq__(other) + + def __eq__(self, other): + return version_compare(self.version, other.version) == 0 + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return self.version + + def __hash__(self): + return hash(repr(self)) diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 1aa96356..9aea716b 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -37,6 +37,8 @@ importlib-resources<3.0.0; python_version < '3.6' # dropped support for python 3.5: osprofiler<2.7.0;python_version<'3.6' stevedore<1.31.0;python_version<'3.6' +debtcollector<1.22.0;python_version<'3.6' +oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) From 6e5b762bf671f0c3fad704e3fd695993f5a03a09 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 12 Jan 2021 15:24:12 +0000 Subject: [PATCH 2143/2699] Updates for testing period for 20.01 release Includes updates to charmhelpers/charms.openstack for cert_utils and unit-get for the install hook error on Juju 2.9 * charm-helpers sync for classic charms * rebuild for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure master branch for charms.openstack - ensure master branch for charm-helpers Change-Id: Ib761eb90cbc87620c990add6da7f4184e7dcb453 --- .../contrib/openstack/cert_utils.py | 8 +++---- .../charmhelpers/contrib/openstack/context.py | 8 +++---- .../charmhelpers/contrib/openstack/ip.py | 21 ++++++++++++++++++- 3 files changed, 28 insertions(+), 9 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py index fc36d0f1..24867497 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -30,7 +30,6 @@ relation_get, relation_ids, remote_service_name, - unit_get, NoNetworkBinding, log, WARNING, @@ -41,6 +40,7 @@ get_vip_in_network, ADDRESS_MAP, get_default_api_bindings, + local_address, ) from charmhelpers.contrib.network.ip import ( get_relation_ip, @@ -81,7 +81,7 @@ def add_entry(self, net_type, cn, addresses): def add_hostname_cn(self): """Add a request for the hostname of the machine""" - ip = unit_get('private-address') + ip = local_address(unit_get_fallback='private-address') addresses = [ip] # If a vip is being used without os-hostname config or # network spaces then we need to ensure the local units @@ -194,7 +194,7 @@ def get_certificate_sans(bindings=None): :returns: List of binding string names :rtype: List[str] """ - _sans = [unit_get('private-address')] + _sans = [local_address(unit_get_fallback='private-address')] if bindings: # Add default API bindings to bindings list bindings = list(bindings + get_default_api_bindings()) @@ -260,7 +260,7 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None, bindings=None): os.symlink(requested_key, key) # Handle custom hostnames - hostname = get_hostname(unit_get('private-address')) + hostname = get_hostname(local_address(unit_get_fallback='private-address')) hostname_cert = os.path.join( ssl_dir, 'cert_{}'.format(hostname)) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 6255dac0..c242d18d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -49,7 +49,6 @@ relation_ids, related_units, relation_set, - unit_get, unit_private_ip, charm_name, DEBUG, @@ -98,6 +97,7 @@ ADMIN, PUBLIC, ADDRESS_MAP, + local_address, ) from charmhelpers.contrib.network.ip import ( get_address_in_network, @@ -247,7 +247,7 @@ def __call__(self): hostname_key = "hostname" access_hostname = get_address_in_network( access_network, - unit_get('private-address')) + local_address(unit_get_fallback='private-address')) set_hostname = relation_get(attribute=hostname_key, unit=local_unit()) if set_hostname != access_hostname: @@ -1088,7 +1088,7 @@ def get_network_addresses(self): # NOTE(jamespage): Fallback must always be private address # as this is used to bind services on the # local unit. - fallback = unit_get("private-address") + fallback = local_address(unit_get_fallback="private-address") if net_config: addr = get_address_in_network(net_config, fallback) @@ -1260,7 +1260,7 @@ def neutron_ctxt(self): if is_clustered(): host = config('vip') else: - host = unit_get('private-address') + host = local_address(unit_get_fallback='private-address') ctxt = {'network_manager': self.network_manager, 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py index 89cf276d..65573300 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py @@ -123,6 +123,25 @@ def _get_address_override(endpoint_type=PUBLIC): return addr_override.format(service_name=service_name()) +def local_address(unit_get_fallback='public-address'): + """Return a network address for this unit. + + Attempt to retrieve a 'default' IP address for this unit + from network-get. If this is running with an old version of Juju then + fallback to unit_get. + + :param unit_get_fallback: Either 'public-address' or 'private-address'. + Only used with old versions of Juju. + :type unit_get_fallback: str + :returns: IP Address + :rtype: str + """ + try: + return network_get_primary_address('juju-info') + except NotImplementedError: + return unit_get(unit_get_fallback) + + def resolve_address(endpoint_type=PUBLIC, override=True): """Return unit address depending on net config. @@ -176,7 +195,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True): if config('prefer-ipv6'): fallback_addr = get_ipv6_addr(exc_list=vips)[0] else: - fallback_addr = unit_get(net_fallback) + fallback_addr = local_address(unit_get_fallback=net_fallback) if net_addr: resolved_address = get_address_in_network(net_addr, fallback_addr) From c35473d671cf4540a1122a47fd3602579a40a3f2 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 12 Jan 2021 15:24:12 +0000 Subject: [PATCH 2144/2699] Updates for testing period for 20.01 release Includes updates to charmhelpers/charms.openstack for cert_utils and unit-get for the install hook error on Juju 2.9 * charm-helpers sync for classic charms * rebuild for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure master branch for charms.openstack - ensure master branch for charm-helpers * Remove mocked out unit_get as it's no longer present in charm-helpers sync. Change-Id: I72fc602ca3f8546da39e0da52b3144ab372b8d90 --- .../contrib/openstack/cert_utils.py | 8 +++---- .../charmhelpers/contrib/openstack/context.py | 8 +++---- .../charmhelpers/contrib/openstack/ip.py | 21 ++++++++++++++++++- .../unit_tests/test_ceph_radosgw_context.py | 5 +---- 4 files changed, 29 insertions(+), 13 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py index fc36d0f1..24867497 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -30,7 +30,6 @@ relation_get, relation_ids, remote_service_name, - unit_get, NoNetworkBinding, log, WARNING, @@ -41,6 +40,7 @@ get_vip_in_network, ADDRESS_MAP, get_default_api_bindings, + local_address, ) from charmhelpers.contrib.network.ip import ( get_relation_ip, @@ -81,7 +81,7 @@ def add_entry(self, net_type, cn, addresses): def add_hostname_cn(self): """Add a request for the hostname of the machine""" - ip = unit_get('private-address') + ip = local_address(unit_get_fallback='private-address') addresses = [ip] # If a vip is being used without os-hostname config or # network spaces then we need to ensure the local units @@ -194,7 +194,7 @@ def get_certificate_sans(bindings=None): :returns: List of binding string names :rtype: List[str] """ - _sans = [unit_get('private-address')] + _sans = [local_address(unit_get_fallback='private-address')] if bindings: # Add default API bindings to bindings list bindings = list(bindings + get_default_api_bindings()) @@ -260,7 +260,7 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None, bindings=None): os.symlink(requested_key, key) # Handle custom hostnames - hostname = get_hostname(unit_get('private-address')) + hostname = get_hostname(local_address(unit_get_fallback='private-address')) hostname_cert = os.path.join( ssl_dir, 'cert_{}'.format(hostname)) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 6255dac0..c242d18d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -49,7 +49,6 @@ relation_ids, related_units, relation_set, - unit_get, unit_private_ip, charm_name, DEBUG, @@ -98,6 +97,7 @@ ADMIN, PUBLIC, ADDRESS_MAP, + local_address, ) from charmhelpers.contrib.network.ip import ( get_address_in_network, @@ -247,7 +247,7 @@ def __call__(self): hostname_key = "hostname" access_hostname = get_address_in_network( access_network, - unit_get('private-address')) + local_address(unit_get_fallback='private-address')) set_hostname = relation_get(attribute=hostname_key, unit=local_unit()) if set_hostname != access_hostname: @@ -1088,7 +1088,7 @@ def get_network_addresses(self): # NOTE(jamespage): Fallback must always be private address # as this is used to bind services on the # local unit. - fallback = unit_get("private-address") + fallback = local_address(unit_get_fallback="private-address") if net_config: addr = get_address_in_network(net_config, fallback) @@ -1260,7 +1260,7 @@ def neutron_ctxt(self): if is_clustered(): host = config('vip') else: - host = unit_get('private-address') + host = local_address(unit_get_fallback='private-address') ctxt = {'network_manager': self.network_manager, 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index 89cf276d..65573300 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -123,6 +123,25 @@ def _get_address_override(endpoint_type=PUBLIC): return addr_override.format(service_name=service_name()) +def local_address(unit_get_fallback='public-address'): + """Return a network address for this unit. + + Attempt to retrieve a 'default' IP address for this unit + from network-get. If this is running with an old version of Juju then + fallback to unit_get. + + :param unit_get_fallback: Either 'public-address' or 'private-address'. + Only used with old versions of Juju. + :type unit_get_fallback: str + :returns: IP Address + :rtype: str + """ + try: + return network_get_primary_address('juju-info') + except NotImplementedError: + return unit_get(unit_get_fallback) + + def resolve_address(endpoint_type=PUBLIC, override=True): """Return unit address depending on net config. @@ -176,7 +195,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True): if config('prefer-ipv6'): fallback_addr = get_ipv6_addr(exc_list=vips)[0] else: - fallback_addr = unit_get(net_fallback) + fallback_addr = local_address(unit_get_fallback=net_fallback) if net_addr: resolved_address = get_address_in_network(net_addr, fallback_addr) diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index cb0c039b..5117bcd7 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -45,16 +45,13 @@ def setUp(self): @patch('charmhelpers.contrib.openstack.context.get_relation_ip') @patch('charmhelpers.contrib.openstack.context.mkdir') - @patch('charmhelpers.contrib.openstack.context.unit_get') @patch('charmhelpers.contrib.openstack.context.local_unit') @patch('charmhelpers.contrib.openstack.context.config') @patch('charmhelpers.contrib.hahelpers.cluster.config_get') @patch('charmhelpers.contrib.openstack.context.relation_ids') @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') def test_ctxt(self, _harelation_ids, _ctxtrelation_ids, _haconfig, - _ctxtconfig, _local_unit, _unit_get, _mkdir, - _get_relation_ip): - _unit_get.return_value = '10.0.0.10' + _ctxtconfig, _local_unit, _mkdir, _get_relation_ip): _get_relation_ip.return_value = '10.0.0.10' _ctxtconfig.side_effect = self.test_config.get _haconfig.side_effect = self.test_config.get From 82429341f5bf1590ca4dcf8c5246c80c397ef33d Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 12 Jan 2021 15:24:12 +0000 Subject: [PATCH 2145/2699] Updates for testing period for 20.01 release Includes updates to charmhelpers/charms.openstack for cert_utils and unit-get for the install hook error on Juju 2.9 * charm-helpers sync for classic charms * rebuild for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure master branch for charms.openstack - ensure master branch for charm-helpers Change-Id: I0cf6408ad65a46a7eb0dbcaf461cc67b13d31172 --- ceph-rbd-mirror/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/rebuild b/ceph-rbd-mirror/rebuild index 8a4ed038..06e8054a 100644 --- a/ceph-rbd-mirror/rebuild +++ b/ceph-rbd-mirror/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -6f7c822e-5418-11eb-abb6-5f5809a3265d +82c053f8-5745-11eb-b798-b32b668d6814 From 1eeb1cc9270cd13c0a9f3acbc2c2d6c305f870ed Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Fri, 15 Jan 2021 15:57:01 +0000 Subject: [PATCH 2146/2699] Updates for testing period for 20.01 release Includes updates to charmhelpers/charms.openstack for cert_utils and unit-get for the install hook error on Juju 2.9 * charm-helpers sync for classic charms * rebuild for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure master branch for charms.openstack - ensure master branch for charm-helpers Change-Id: Ie6146da90c4adc38e817e644e1328ad6c41d678f --- .../contrib/openstack/cert_utils.py | 8 +++---- .../charmhelpers/contrib/openstack/context.py | 8 +++---- .../charmhelpers/contrib/openstack/ip.py | 21 ++++++++++++++++++- 3 files changed, 28 insertions(+), 9 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py index fc36d0f1..24867497 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -30,7 +30,6 @@ relation_get, relation_ids, remote_service_name, - unit_get, NoNetworkBinding, log, WARNING, @@ -41,6 +40,7 @@ get_vip_in_network, ADDRESS_MAP, get_default_api_bindings, + local_address, ) from charmhelpers.contrib.network.ip import ( get_relation_ip, @@ -81,7 +81,7 @@ def add_entry(self, net_type, cn, addresses): def add_hostname_cn(self): """Add a request for the hostname of the machine""" - ip = unit_get('private-address') + ip = local_address(unit_get_fallback='private-address') addresses = [ip] # If a vip is being used without os-hostname config or # network spaces then we need to ensure the local units @@ -194,7 +194,7 @@ def get_certificate_sans(bindings=None): :returns: List of binding string names :rtype: List[str] """ - _sans = [unit_get('private-address')] + _sans = [local_address(unit_get_fallback='private-address')] if bindings: # Add default API bindings to bindings list bindings = list(bindings + get_default_api_bindings()) @@ -260,7 +260,7 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None, bindings=None): os.symlink(requested_key, key) # Handle custom hostnames - hostname = get_hostname(unit_get('private-address')) + hostname = get_hostname(local_address(unit_get_fallback='private-address')) hostname_cert = os.path.join( ssl_dir, 'cert_{}'.format(hostname)) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 6255dac0..c242d18d 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -49,7 +49,6 @@ relation_ids, related_units, relation_set, - unit_get, unit_private_ip, charm_name, DEBUG, @@ -98,6 +97,7 @@ ADMIN, PUBLIC, ADDRESS_MAP, + local_address, ) from charmhelpers.contrib.network.ip import ( get_address_in_network, @@ -247,7 +247,7 @@ def __call__(self): hostname_key = "hostname" access_hostname = get_address_in_network( access_network, - unit_get('private-address')) + local_address(unit_get_fallback='private-address')) set_hostname = relation_get(attribute=hostname_key, unit=local_unit()) if set_hostname != access_hostname: @@ -1088,7 +1088,7 @@ def get_network_addresses(self): # NOTE(jamespage): Fallback must always be private address # as this is used to bind services on the # local unit. - fallback = unit_get("private-address") + fallback = local_address(unit_get_fallback="private-address") if net_config: addr = get_address_in_network(net_config, fallback) @@ -1260,7 +1260,7 @@ def neutron_ctxt(self): if is_clustered(): host = config('vip') else: - host = unit_get('private-address') + host = local_address(unit_get_fallback='private-address') ctxt = {'network_manager': self.network_manager, 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py index 89cf276d..65573300 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py @@ -123,6 +123,25 @@ def _get_address_override(endpoint_type=PUBLIC): return addr_override.format(service_name=service_name()) +def local_address(unit_get_fallback='public-address'): + """Return a network address for this unit. + + Attempt to retrieve a 'default' IP address for this unit + from network-get. If this is running with an old version of Juju then + fallback to unit_get. + + :param unit_get_fallback: Either 'public-address' or 'private-address'. + Only used with old versions of Juju. + :type unit_get_fallback: str + :returns: IP Address + :rtype: str + """ + try: + return network_get_primary_address('juju-info') + except NotImplementedError: + return unit_get(unit_get_fallback) + + def resolve_address(endpoint_type=PUBLIC, override=True): """Return unit address depending on net config. @@ -176,7 +195,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True): if config('prefer-ipv6'): fallback_addr = get_ipv6_addr(exc_list=vips)[0] else: - fallback_addr = unit_get(net_fallback) + fallback_addr = local_address(unit_get_fallback=net_fallback) if net_addr: resolved_address = get_address_in_network(net_addr, fallback_addr) From 92ef7607627064a40d4a8bf24b18245beba26d55 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Tue, 19 Jan 2021 17:29:59 +0100 Subject: [PATCH 2147/2699] Add Groovy to the metadata Change-Id: I7a15ca1c12b3cee723afbcd3677719cd4729ed10 --- ceph-iscsi/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index e96aab09..e42d44b7 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -11,6 +11,7 @@ tags: - misc series: - focal + - groovy subordinate: false min-juju-version: 2.7.6 extra-bindings: From 9fb28fdae82d79c0fd38effe97858d14493e2a43 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 20 Jan 2021 12:20:00 +0000 Subject: [PATCH 2148/2699] Hotfix charmhelpers sync for local_address() fix The network-get --primary-address juju-info fails on pre-2.8.? versions of juju. This results in a NoNetworkBinding error. Fallback to unit_get() if that occurs for local_address(). Change-Id: I39648aa65299c77abe5790e5ac3cc29541142d46 --- ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py index 65573300..b8c94c56 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py @@ -130,6 +130,9 @@ def local_address(unit_get_fallback='public-address'): from network-get. If this is running with an old version of Juju then fallback to unit_get. + Note on juju < 2.9 the binding to juju-info may not exist, so fall back to + the unit-get. + :param unit_get_fallback: Either 'public-address' or 'private-address'. Only used with old versions of Juju. :type unit_get_fallback: str @@ -138,7 +141,7 @@ def local_address(unit_get_fallback='public-address'): """ try: return network_get_primary_address('juju-info') - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): return unit_get(unit_get_fallback) From 3b8cf18cb8a734c16a637826568ea44b18800e5b Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 20 Jan 2021 12:20:16 +0000 Subject: [PATCH 2149/2699] Hotfix charmhelpers sync for local_address() fix The network-get --primary-address juju-info fails on pre-2.8.? versions of juju. This results in a NoNetworkBinding error. Fallback to unit_get() if that occurs for local_address(). Change-Id: I3a7d4e5093f15a1fb32310522229d0b4ebd61c59 --- ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py index 65573300..b8c94c56 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py @@ -130,6 +130,9 @@ def local_address(unit_get_fallback='public-address'): from network-get. If this is running with an old version of Juju then fallback to unit_get. + Note on juju < 2.9 the binding to juju-info may not exist, so fall back to + the unit-get. + :param unit_get_fallback: Either 'public-address' or 'private-address'. Only used with old versions of Juju. :type unit_get_fallback: str @@ -138,7 +141,7 @@ def local_address(unit_get_fallback='public-address'): """ try: return network_get_primary_address('juju-info') - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): return unit_get(unit_get_fallback) From 4dda7368b425392b47e747f3c09992281ede7817 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 20 Jan 2021 12:20:31 +0000 Subject: [PATCH 2150/2699] Hotfix charmhelpers sync for local_address() fix The network-get --primary-address juju-info fails on pre-2.8.? versions of juju. This results in a NoNetworkBinding error. Fallback to unit_get() if that occurs for local_address(). Change-Id: I309ef298fecfe8d5cb2fd4c3322c3d734094f5d4 --- ceph-proxy/charmhelpers/contrib/openstack/ip.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/charmhelpers/contrib/openstack/ip.py b/ceph-proxy/charmhelpers/contrib/openstack/ip.py index 65573300..b8c94c56 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/ip.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/ip.py @@ -130,6 +130,9 @@ def local_address(unit_get_fallback='public-address'): from network-get. If this is running with an old version of Juju then fallback to unit_get. + Note on juju < 2.9 the binding to juju-info may not exist, so fall back to + the unit-get. + :param unit_get_fallback: Either 'public-address' or 'private-address'. Only used with old versions of Juju. :type unit_get_fallback: str @@ -138,7 +141,7 @@ def local_address(unit_get_fallback='public-address'): """ try: return network_get_primary_address('juju-info') - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): return unit_get(unit_get_fallback) From ed35c8b7bb48cc940cb2216dc60dd423af8ad618 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 20 Jan 2021 12:20:55 +0000 Subject: [PATCH 2151/2699] Hotfix charmhelpers sync for local_address() fix The network-get --primary-address juju-info fails on pre-2.8.? versions of juju. This results in a NoNetworkBinding error. Fallback to unit_get() if that occurs for local_address(). Change-Id: I442967e1a616fe85426aa3eff9517b7615c83dd4 --- ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index 65573300..b8c94c56 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -130,6 +130,9 @@ def local_address(unit_get_fallback='public-address'): from network-get. If this is running with an old version of Juju then fallback to unit_get. + Note on juju < 2.9 the binding to juju-info may not exist, so fall back to + the unit-get. + :param unit_get_fallback: Either 'public-address' or 'private-address'. Only used with old versions of Juju. :type unit_get_fallback: str @@ -138,7 +141,7 @@ def local_address(unit_get_fallback='public-address'): """ try: return network_get_primary_address('juju-info') - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): return unit_get(unit_get_fallback) From 911ab34bdfa04e49a8920586c091c14b432754eb Mon Sep 17 00:00:00 2001 From: David Ames Date: Mon, 1 Feb 2021 14:07:06 -0800 Subject: [PATCH 2152/2699] Rebuild with udpated charm-tools 2.8.2 Validate the newest version of charm-tools: 2.8.2 https://github.com/juju/charm-tools/pull/598 Change-Id: I06992f94471e0f4f5b699dc6e96af2a64c29bac7 --- ceph-fs/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index 4f90aa27..efddec9a 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -82b1d4ea-5745-11eb-a4e0-97d1b49696a4 +be82de82-64d9-11eb-94b9-ff8b2804e655 From fa90fca3e44f09a99e737cbb961b1f6e4b3cec70 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 2 Feb 2021 17:05:27 +0000 Subject: [PATCH 2153/2699] Rebuild to use charm-tools pre 2.8 release Reverting to an earlier version of charm-tools to resolve some building issues seen with latest 2.8.2 version. Change-Id: I573a2dfb313501576ebe64af717497e7abcd828f --- ceph-fs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/requirements.txt b/ceph-fs/requirements.txt index a4d3eff0..1b0df5e1 100644 --- a/ceph-fs/requirements.txt +++ b/ceph-fs/requirements.txt @@ -9,7 +9,7 @@ setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 # Build requirements -charm-tools>=2.4.4 +charm-tools>=2.4.4,<2.8 # Workaround until https://github.com/juju/charm-tools/pull/589 gets # published From d082e24eb514464c87d49cb34c75a855e3c850bc Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 2 Feb 2021 17:05:56 +0000 Subject: [PATCH 2154/2699] Rebuild to use charm-tools pre 2.8 release Reverting to an earlier version of charm-tools to resolve some building issues seen with latest 2.8.2 version. Change-Id: I791e637ac6242eac8a7a2cb06ce6a65d31bdbaa9 --- ceph-rbd-mirror/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/requirements.txt b/ceph-rbd-mirror/requirements.txt index a4d3eff0..1b0df5e1 100644 --- a/ceph-rbd-mirror/requirements.txt +++ b/ceph-rbd-mirror/requirements.txt @@ -9,7 +9,7 @@ setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 # Build requirements -charm-tools>=2.4.4 +charm-tools>=2.4.4,<2.8 # Workaround until https://github.com/juju/charm-tools/pull/589 gets # published From 12d56d4d93ec71ca95aac97afb8e2b56dca8a7c7 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Wed, 10 Feb 2021 21:43:42 +0100 Subject: [PATCH 2155/2699] Pin pip < 20.3 Change-Id: I27af334ab6e66646a0fbb181a1436cf399c9bef7 --- ceph-iscsi/tox.ini | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/ceph-iscsi/tox.ini b/ceph-iscsi/tox.ini index fbc6f079..775ea578 100644 --- a/ceph-iscsi/tox.ini +++ b/ceph-iscsi/tox.ini @@ -7,6 +7,18 @@ skipsdist = True sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +requires = pip < 20.3 + virtualenv < 20.0 +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.2.0 [testenv] setenv = VIRTUAL_ENV={envdir} @@ -24,8 +36,8 @@ deps = -r{toxinidir}/test-requirements.txt [testenv:py35] basepython = python3.5 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +# python3.5 is irrelevant on a focal+ charm. +commands = /bin/true [testenv:py36] basepython = python3.6 From 54a4d996f175a9f7b1c84a58d2a80ccc74045c3f Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Fri, 5 Feb 2021 14:59:19 -0500 Subject: [PATCH 2156/2699] Update README Add a relation to the keystone application Small driveby improvements Add new Documentation section template Change-Id: I19fb3637ef6afe671cbc93d2f8c974e9c0ea1481 --- ceph-radosgw/README.md | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/ceph-radosgw/README.md b/ceph-radosgw/README.md index 5b864ae9..d5299108 100644 --- a/ceph-radosgw/README.md +++ b/ceph-radosgw/README.md @@ -96,11 +96,17 @@ the compression behaviour. ## Deployment -To deploy a single RADOS gateway node within an existing Ceph cluster: +Ceph RADOS Gateway is often containerised. Here a single unit is deployed to a +new container on machine '1' within an existing Ceph cluster: - juju deploy ceph-radosgw + juju deploy --to lxd:1 ceph-radosgw juju add-relation ceph-radosgw:mon ceph-mon:radosgw +If the RADOS Gateway is being integrated into OpenStack then a relation to the +keystone application is needed: + + juju add-relation ceph-radosgw:identity-service keystone:identity-service + Expose the service: juju expose ceph-radosgw @@ -109,7 +115,7 @@ Expose the service: traffic by default. In general, MAAS is the only cloud type that does not employ firewalling. -The gateway can be accessed over port 80 (as per `juju status ceph-radosgw` +The Gateway can be accessed over port 80 (as per `juju status ceph-radosgw` output). ## Multi-site replication @@ -210,18 +216,24 @@ display action descriptions run `juju actions ceph-radosgw`. If the charm is not deployed then see file `actions.yaml`. * `pause` -* `resume` * `promote` * `readonly` * `readwrite` +* `resume` * `tidydefaults` +# Documentation + +The OpenStack Charms project maintains two documentation guides: + +* [OpenStack Charm Guide][cg]: for project information, including development + and support notes +* [OpenStack Charms Deployment Guide][cdg]: for charm usage information + # Bugs Please report bugs on [Launchpad][lp-bugs-charm-ceph-radosgw]. -For general charm questions refer to the OpenStack [Charm Guide][cg]. - [juju-docs-actions]: https://jaas.ai/docs/actions From d17906ab07d5aea711fd0b85562f79fc30c76f13 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 17 Feb 2021 12:06:05 +0000 Subject: [PATCH 2157/2699] Updates to pin charm-tools to 2.8.3 * Update requirements.txt to pin charm-tools to 2.8.3 * Update tox.ini to change the build parameters. * This upgrades from <2.7 which adds reproducible charms * Bug was fixed that controlled the default output directory doesn't get 'builds' appended. The tox change puts it back so that stable & master both build to the same directory. This may be reviewed in the future. Change-Id: Ie689cc96f5458c3622cb3752f2dffd5cf8e5499d --- ceph-fs/requirements.txt | 2 +- ceph-fs/tox.ini | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-fs/requirements.txt b/ceph-fs/requirements.txt index 1b0df5e1..46b4e990 100644 --- a/ceph-fs/requirements.txt +++ b/ceph-fs/requirements.txt @@ -9,7 +9,7 @@ setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 # Build requirements -charm-tools>=2.4.4,<2.8 +charm-tools==2.8.3 # Workaround until https://github.com/juju/charm-tools/pull/589 gets # published diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index c91922e8..ce79fa16 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -28,7 +28,7 @@ deps = [testenv:build] basepython = python3 commands = - charm-build --log-level DEBUG -o {toxinidir}/build src {posargs} + charm-build --log-level DEBUG -o {toxinidir}/build/builds src {posargs} [testenv:py3] basepython = python3 From 3eed65214b6e9b4beb03228244f970a0a52b9550 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 17 Feb 2021 12:06:05 +0000 Subject: [PATCH 2158/2699] Updates to pin charm-tools to 2.8.3 * Update requirements.txt to pin charm-tools to 2.8.3 * Update tox.ini to change the build parameters. * This upgrades from <2.7 which adds reproducible charms * Bug was fixed that controlled the default output directory doesn't get 'builds' appended. The tox change puts it back so that stable & master both build to the same directory. This may be reviewed in the future. Change-Id: If47116b4cd9ef9b857ad1c9366631ba1b0562ce2 --- ceph-rbd-mirror/requirements.txt | 2 +- ceph-rbd-mirror/tox.ini | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-rbd-mirror/requirements.txt b/ceph-rbd-mirror/requirements.txt index 1b0df5e1..46b4e990 100644 --- a/ceph-rbd-mirror/requirements.txt +++ b/ceph-rbd-mirror/requirements.txt @@ -9,7 +9,7 @@ setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 # Build requirements -charm-tools>=2.4.4,<2.8 +charm-tools==2.8.3 # Workaround until https://github.com/juju/charm-tools/pull/589 gets # published diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index c91922e8..ce79fa16 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -28,7 +28,7 @@ deps = [testenv:build] basepython = python3 commands = - charm-build --log-level DEBUG -o {toxinidir}/build src {posargs} + charm-build --log-level DEBUG -o {toxinidir}/build/builds src {posargs} [testenv:py3] basepython = python3 From 2bc9ecfe1b34df66aee57f2491a3f053f6e7476a Mon Sep 17 00:00:00 2001 From: Pedro Date: Fri, 20 Jul 2018 22:26:34 +0200 Subject: [PATCH 2159/2699] Block same broker_req from running twice Goal is to avoid broker request retrials for every hook called on ceph-mon Change-Id: Iefd8d305409ba97804795eef9a5add81103d05a2 Closes-Bug: #1773910 Partial-Bug: #1913992 --- ceph-mon/hooks/ceph_hooks.py | 114 ++++++++++++++++++++----- ceph-mon/unit_tests/test_ceph_hooks.py | 65 +++++++++++++- 2 files changed, 152 insertions(+), 27 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 44e41744..8af31cae 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -591,6 +591,45 @@ def notify_client(): mds_relation_joined(relid=relid, unit=unit) +def req_already_treated(request_id, relid, req_unit): + """Check if broker request already handled. + + The local relation data holds all the broker request/responses that + are handled as a dictionary. There will be a single entry for each + unit that makes broker request in the form of broker-rsp-: + {reqeust-id: , ..}. Verify if request_id exists in the relation + data broker response for the requested unit. + + :param request_id: Request ID + :type request_id: str + :param relid: Relation ID + :type relid: str + :param req_unit: Requested unit name + :type req_unit: str + :returns: Whether request is already handled + :rtype: bool + """ + status = relation_get(rid=relid, unit=local_unit()) + response_key = 'broker-rsp-' + req_unit.replace('/', '-') + if not status.get(response_key): + return False + data = None + # relation_get returns the value of response key as a dict or json + # encoded string + if isinstance(status[response_key], str): + try: + data = json.loads(status[response_key]) + except (TypeError, json.decoder.JSONDecodeError): + log('Not able to decode broker response for relid {} requested' + 'unit {}'.format(relid, req_unit), level=WARNING) + return False + else: + data = status[response_key] + if data['request-id'] == request_id: + return True + return False + + def notify_mons(): """Update a nonce on the ``mon`` relation. @@ -675,36 +714,65 @@ def handle_broker_request(relid, unit, add_legacy_response=False, :returns: Dictionary of response data ready for use with relation_set. :rtype: dict """ + def _get_broker_req_id(request): + if isinstance(request, str): + try: + req_key = json.loads(request)['request-id'] + except (TypeError, json.decoder.JSONDecodeError): + log('Not able to decode request id for broker request {}'. + format(request), + level=WARNING) + req_key = None + else: + req_key = request['request-id'] + + return req_key + response = {} if not unit: unit = remote_unit() settings = relation_get(rid=relid, unit=unit) if 'broker_req' in settings: + broker_req_id = _get_broker_req_id(settings['broker_req']) + if broker_req_id is None: + return {} + if not ceph.is_leader(): - log("Not leader - ignoring broker request", level=DEBUG) - else: - rsp = process_requests(settings['broker_req']) - unit_id = settings.get('unit-name', unit).replace('/', '-') - unit_response_key = 'broker-rsp-' + unit_id - response.update({unit_response_key: rsp}) - if add_legacy_response: - response.update({'broker_rsp': rsp}) - - if relation_ids('rbd-mirror'): - # NOTE(fnordahl): juju relation level data candidate - # notify mons to flag that the other mon units should update - # their ``rbd-mirror`` relations with information about new - # pools. - log('Notifying peers after processing broker request.', + log("Not leader - ignoring broker request {}".format( + broker_req_id), + level=DEBUG) + return {} + + if req_already_treated(broker_req_id, relid, unit): + log("Ignoring already executed broker request {}".format( + broker_req_id), + level=DEBUG) + return {} + + rsp = process_requests(settings['broker_req']) + unit_id = settings.get('unit-name', unit).replace('/', '-') + unit_response_key = 'broker-rsp-' + unit_id + response.update({unit_response_key: rsp}) + if add_legacy_response: + response.update({'broker_rsp': rsp}) + + if relation_ids('rbd-mirror'): + # NOTE(fnordahl): juju relation level data candidate + # notify mons to flag that the other mon units should update + # their ``rbd-mirror`` relations with information about new + # pools. + log('Notifying peers after processing broker request {}.'.format( + broker_req_id), + level=DEBUG) + notify_mons() + + if recurse: + # update ``rbd-mirror`` relations for this unit with + # information about new pools. + log('Notifying this units rbd-mirror relations after ' + 'processing broker request {}.'.format(broker_req_id), level=DEBUG) - notify_mons() - - if recurse: - # update ``rbd-mirror`` relations for this unit with - # information about new pools. - log('Notifying this units rbd-mirror relations after ' - 'processing broker request.', level=DEBUG) - notify_rbd_mirrors() + notify_rbd_mirrors() return response diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 80eedddd..97fbcde4 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -474,6 +474,7 @@ def test_client_relation(self, 'rbd-features': 42, }) + @patch.object(ceph_hooks, 'req_already_treated') @patch.object(ceph_hooks, 'send_osd_settings') @patch.object(ceph_hooks, 'get_rbd_features') @patch.object(ceph_hooks, 'config') @@ -501,17 +502,19 @@ def test_client_relation_non_rel_hook(self, relation_set, get_named_key, _config, _get_rbd_features, - _send_osd_settings): + _send_osd_settings, + req_already_treated): # Check for LP #1738154 ready_for_service.return_value = True process_requests.return_value = 'AOK' is_leader.return_value = True - relation_get.return_value = {'broker_req': 'req'} + relation_get.return_value = {'broker_req': '{"request-id": "req"}'} remote_unit.return_value = None is_quorum.return_value = True config = copy.deepcopy(CHARM_CONFIG) _config.side_effect = lambda key: config[key] _get_rbd_features.return_value = None + req_already_treated.return_value = False ceph_hooks.client_relation(relid='rel1', unit='glance/0') _send_osd_settings.assert_called_once_with() relation_set.assert_called_once_with( @@ -534,6 +537,18 @@ def test_client_relation_non_rel_hook(self, relation_set, 'broker-rsp-glance-0': 'AOK', 'broker_rsp': 'AOK'}) + # Verify relation_set when broker request is already treated + relation_set.reset_mock() + req_already_treated.return_value = True + ceph_hooks.client_relation(relid='rel1', unit='glance/0') + relation_set.assert_called_once_with( + relation_id='rel1', + relation_settings={ + 'key': get_named_key(), + 'auth': False, + 'ceph-public-address': get_public_addr()}) + + @patch.object(ceph_hooks, 'req_already_treated') @patch.object(ceph_hooks, 'relation_ids') @patch.object(ceph_hooks, 'notify_mons') @patch.object(ceph_hooks, 'notify_rbd_mirrors') @@ -546,13 +561,17 @@ def test_handle_broker_request(self, mock_remote_unit, mock_relation_get, mock_broker_process_requests, mock_notify_rbd_mirrors, mock_notify_mons, - mock_relation_ids): + mock_relation_ids, + req_already_treated): mock_remote_unit.return_value = 'glance/0' + req_already_treated.return_value = False ceph_hooks.handle_broker_request('rel1', None) mock_remote_unit.assert_called_once_with() mock_relation_get.assert_called_once_with(rid='rel1', unit='glance/0') mock_relation_get.reset_mock() - mock_relation_get.return_value = {'broker_req': 'FAKE-REQUEST'} + mock_relation_get.return_value = { + 'broker_req': '{"request-id": "FAKE-REQUEST"}' + } mock_broker_process_requests.return_value = 'AOK' self.assertEqual( ceph_hooks.handle_broker_request('rel1', 'glance/0'), @@ -570,6 +589,44 @@ def test_handle_broker_request(self, mock_remote_unit, mock_relation_get, self.assertFalse(mock_notify_rbd_mirrors.called) mock_notify_mons.assert_called_once_with() + @patch.object(ceph_hooks, 'local_unit') + @patch.object(ceph_hooks, 'relation_get') + @patch.object(ceph_hooks.ceph, 'is_leader') + @patch.object(ceph_hooks, 'process_requests') + def test_multi_broker_req_ignored_on_rel(self, process_requests, + is_leader, + relation_get, + local_unit): + is_leader.return_value = True + relation_get.side_effect = [{'broker_req': {'request-id': '1'}}, + {'broker-rsp-glance-0': + {"request-id": "1"}}] + local_unit.return_value = "mon/0" + ceph_hooks.handle_broker_request(relid='rel1', + unit='glance/0', + recurse=False) + process_requests.assert_not_called() + + @patch.object(ceph_hooks, 'relation_ids') + @patch.object(ceph_hooks, 'local_unit') + @patch.object(ceph_hooks, 'relation_get') + @patch.object(ceph_hooks.ceph, 'is_leader') + @patch.object(ceph_hooks, 'process_requests') + def test_multi_broker_req_handled_on_rel(self, process_requests, + is_leader, + relation_get, + local_unit, + _relation_ids): + is_leader.return_value = True + relation_get.side_effect = [{'broker_req': {'request-id': '2'}}, + {'broker-rsp-glance-0': + {"request-id": "1"}}] + local_unit.return_value = "mon/0" + ceph_hooks.handle_broker_request(relid='rel1', + unit='glance/0', + recurse=False) + process_requests.assert_called_once_with({'request-id': '2'}) + class BootstrapSourceTestCase(test_utils.CharmTestCase): From 203e2c7c443a6aad44d5aed2ea3aea5b2503c537 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 15 Feb 2021 09:10:39 +0100 Subject: [PATCH 2160/2699] Add new osci.yaml This change is preparatory to migration Ubuntu OpenStack CI from Jenkins to Zuul Change-Id: I16e82e0f295a9c0f6f21e4ff343e2f1afda1d82d --- ceph-osd/osci.yaml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 ceph-osd/osci.yaml diff --git a/ceph-osd/osci.yaml b/ceph-osd/osci.yaml new file mode 100644 index 00000000..1abe9784 --- /dev/null +++ b/ceph-osd/osci.yaml @@ -0,0 +1,4 @@ +- project: + templates: + - charm-unit-jobs + - charm-functional-jobs \ No newline at end of file From 4395d156f2b71088daff2cef58004d8dfb8ec419 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 13 Nov 2020 15:22:38 +0100 Subject: [PATCH 2161/2699] Change case of default member role for Keystone As a part of the 20.05 charms release, the boostrap process for Keystone led to changing the case of the member role. This work was performed in https://review.opendev.org/#/c/712040/ so this change is aligning ceph-radosgw with that new default. Change-Id: I116bb1def1b6bc8c111f30018598673da4dfdb5d Closes-Bug: #1904183 --- ceph-radosgw/config.yaml | 2 +- ceph-radosgw/unit_tests/test_hooks.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index f5ae44a9..258f6e8f 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -223,7 +223,7 @@ options: # Keystone integration operator-roles: type: string - default: "Member" + default: "Member,member" description: | Comma-separated list of Swift operator roles; used when integrating with OpenStack Keystone. diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 988b235a..7d9e9cfe 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -410,7 +410,7 @@ def test_identity_joined_public_name(self, _config, _unit_get, swift_public_url='http://files.example.com:80/swift/v1', swift_internal_url='http://myserv:80/swift/v1', swift_admin_url='http://myserv:80/swift', - requested_roles='Member,Admin', + requested_roles='Member,member,Admin', relation_id='rid'), call(s3_service='s3', s3_region='RegionOne', From 071d764c2f3ada9c362f874892987da7c0aa94e1 Mon Sep 17 00:00:00 2001 From: Nicolas Bock Date: Thu, 18 Feb 2021 13:08:38 -0700 Subject: [PATCH 2162/2699] Fix wording for `osd-out` action The `osd-out` action stops osds. Signed-off-by: Nicolas Bock Change-Id: Idc23129a8ec39bc5451c9fae57b2045537b77777 --- ceph-osd/README.md | 4 ++-- ceph-osd/actions.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-osd/README.md b/ceph-osd/README.md index c5601921..d8727bf9 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -281,7 +281,7 @@ completely (e.g. the storage hardware is reaching EOL). Example: - juju run-action --wait ceph-osd/4 osd-out + juju run-action --wait ceph-osd/4 osd-out osds=osd.0,osd.1 ### Set OSDs to 'in' @@ -293,7 +293,7 @@ with the cluster 'noout' flag. Example: - juju run-action --wait ceph-osd/4 osd-in + juju run-action --wait ceph-osd/4 osd-in osds=all ### Managing ceph OSDs diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index cc59e781..6d6667fa 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -11,7 +11,7 @@ osd-out: Documentation: https://jaas.ai/ceph-osd/ params: osds: - description: A comma-separated list of OSD IDs to start (or keyword 'all') + description: A comma-separated list of OSD IDs to stop (or keyword 'all') required: - osds osd-in: From 5dcca8cc2d08000502fa3a3e77e66c79523a2e8e Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Fri, 19 Feb 2021 16:32:42 -0700 Subject: [PATCH 2163/2699] Prune gate tests Prune gate tests and move tests to dev_bundles. Change-Id: Ifa837d3fec22a331bd650d20d630ab3700b76e62 --- ceph-fs/src/tests/tests.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index df29c443..1a1da437 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -8,11 +8,12 @@ gate_bundles: - bluestore-compression: bionic-stein - bluestore-compression: bionic-rocky - bionic-queens - - xenial-queens + - xenial-mitaka +dev_bundles: + - xenial-ocata # Xenial-pike is missing because of # https://bugs.launchpad.net/charm-nova-compute/+bug/1862624 - - xenial-ocata - - xenial-mitaka + - xenial-queens smoke_bundles: - bluestore-compression: bionic-stein configure: From caa05c0e79d190cf60c26299d813878d165156e3 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Fri, 19 Feb 2021 16:32:43 -0700 Subject: [PATCH 2164/2699] Prune gate tests Prune gate tests and move tests to dev_bundles. Change-Id: If1380a8c92469b9705eb80d2d5c31d70529c1f18 --- ceph-mon/tests/tests.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 0d91d962..de2b01c0 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -8,13 +8,14 @@ gate_bundles: - bionic-train - bionic-train-with-fsid - bionic-stein - - bionic-rocky - bionic-queens - - xenial-queens - - xenial-pike - - xenial-ocata - xenial-mitaka +dev_bundles: - trusty-mitaka + - xenial-ocata + - xenial-pike + - xenial-queens + - bionic-rocky smoke_bundles: - bionic-train configure: From 8368ec4231ef96fb32ddd81c7b598fab8ffa9b69 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Fri, 19 Feb 2021 16:32:43 -0700 Subject: [PATCH 2165/2699] Prune gate tests Prune gate tests and move tests to dev_bundles. Change-Id: Ie8649989231dbf3477d39785276b4e47c7a0f2e4 --- ceph-osd/tests/tests.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 02c1a6bc..63baecd9 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -6,13 +6,14 @@ gate_bundles: - bionic-ussuri - bionic-train - bionic-stein - - bionic-rocky - bionic-queens - - xenial-queens - - xenial-pike - - xenial-ocata - xenial-mitaka +dev_bundles: - trusty-mitaka + - xenial-ocata + - xenial-pike + - xenial-queens + - bionic-rocky smoke_bundles: - bionic-train configure: From e901d40d193cd5227f4ea4420b2dbab7a993e910 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Fri, 19 Feb 2021 16:32:45 -0700 Subject: [PATCH 2166/2699] Prune gate tests Prune gate tests and move tests to dev_bundles. Change-Id: Ic1f970e727b8d738c859ddeb619929c9ce795c9f --- ceph-rbd-mirror/src/tests/tests.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index 77454602..dad4b246 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -4,10 +4,7 @@ smoke_bundles: gate_bundles: - bionic-train - bionic-stein -- bionic-rocky - bionic-queens -- xenial-queens -- xenial-pike comment: | The e2e bundles are useful for development but adds no additional value to the functional tests. @@ -16,8 +13,11 @@ dev_bundles: - focal-victoria - bionic-queens-e2e - bionic-queens-e2e-lxd +- bionic-rocky - bionic-ussuri - focal-ussuri +- xenial-queens +- xenial-pike configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: From 6fe08200668539ee86590dfad694ed1edfc3e33d Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Fri, 19 Feb 2021 16:32:44 -0700 Subject: [PATCH 2167/2699] Prune gate tests Prune gate tests and move tests to dev_bundles. Change-Id: I49d9d833b8cabd49b4caf3300066b593d354845c --- ceph-radosgw/tests/tests.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 3c9e4c14..7602108e 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -13,21 +13,21 @@ gate_bundles: - vault: bionic-train-namespaced - vault: bionic-stein - vault: bionic-stein-namespaced - - vault: bionic-rocky - - vault: bionic-rocky-namespaced - vault: bionic-queens - vault: bionic-queens-namespaced - - vault: xenial-queens - - xenial-pike - - xenial-ocata - xenial-mitaka - xenial-mitaka-namespaced - - trusty-mitaka smoke_bundles: - vault: bionic-ussuri dev_bundles: + - trusty-mitaka + - xenial-ocata + - xenial-pike + - vault: xenial-queens - bionic-queens-multisite - bionic-rocky-multisite + - vault: bionic-rocky + - vault: bionic-rocky-namespaced target_deploy_status: vault: workload-status: blocked From 7096428861aeccaffc850679d553fa00db924db5 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Fri, 19 Feb 2021 16:32:44 -0700 Subject: [PATCH 2168/2699] Prune gate tests Prune gate tests and move tests to dev_bundles. Change-Id: Id415916aaa3e89caafa25064ad5a2109c8d9ee57 --- ceph-proxy/tests/tests.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 0c7eb8a6..9849fca1 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -12,11 +12,8 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes gate_bundles: - - trusty-mitaka # jewel - xenial-mitaka # jewel - - xenial-queens # luminous - bionic-queens # luminous - - bionic-rocky # mimic - bionic-stein - bionic-train - bionic-ussuri @@ -31,9 +28,12 @@ dev_bundles: # Icehouse - trusty-icehouse # Jewel + - trusty-mitaka - xenial-ocata # Pike - xenial-pike + - xenial-queens # luminous + - bionic-rocky # mimic smoke_bundles: - focal-ussuri From 3105a0161e14434b3f85429cde01d09893a1dae8 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 15 Feb 2021 09:10:39 +0100 Subject: [PATCH 2169/2699] Add new osci.yaml This change is preparatory to migration Ubuntu OpenStack CI from Jenkins to Zuul Change-Id: I16e82e0f295a9c0f6f21e4ff343e2f1afda1d82d --- ceph-proxy/osci.yaml | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 ceph-proxy/osci.yaml diff --git a/ceph-proxy/osci.yaml b/ceph-proxy/osci.yaml new file mode 100644 index 00000000..c63b99b5 --- /dev/null +++ b/ceph-proxy/osci.yaml @@ -0,0 +1,34 @@ +- project: + templates: + - charm-unit-jobs + check: + jobs: + - bionic-queens # luminous + - bionic-stein + - bionic-train + - bionic-ussuri + - focal-ussuri + - focal-ussuri-ec + - focal-victoria + - focal-victoria-ec + - groovy-victoria + - groovy-victoria-ec +- job: + name: focal-ussuri-ec + parent: func-target + dependencies: &smoke-jobs + - bionic-ussuri + vars: + tox_extra_args: erasure-coded:focal-ussuri-ec +- job: + name: focal-victoria-ec + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: erasure-coded:focal-victoria-ec +- job: + name: groovy-victoria-ec + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: erasure-coded:groovy-victoria-ec \ No newline at end of file From 87bf08a7bdbc0a6bb72a96418f0cd3f798c6ff88 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 6 Jan 2021 10:11:34 +0100 Subject: [PATCH 2170/2699] Add new osci.yaml This change is preparatory to migration Ubuntu OpenStack CI from Jenkins to Zuul Change-Id: I559aa794935acd63a0456d4ccbb33c9205a54fda --- ceph-fs/osci.yaml | 72 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 ceph-fs/osci.yaml diff --git a/ceph-fs/osci.yaml b/ceph-fs/osci.yaml new file mode 100644 index 00000000..4761d776 --- /dev/null +++ b/ceph-fs/osci.yaml @@ -0,0 +1,72 @@ +- job: + name: bionic-queens_local + parent: bionic-queens + dependencies: + - osci-lint + - tox-py35 + - tox-py36 + - tox-py37 + - tox-py38 +- job: + name: groovy-victoria-bluestore + parent: func-target + dependencies: &smoke-jobs + - bionic-queens_local + vars: + tox_extra_args: bluestore-compression:groovy-victoria +- job: + name: xenial-mitaka_local + parent: xenial-mitaka + dependencies: *smoke-jobs +- job: + name: focal-victoria-bluestore + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: bluestore-compression:focal-victoria +- job: + name: focal-ussuri-bluestore + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: bluestore-compression:focal-ussuri +- job: + name: bionic-ussuri-bluestore + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: bluestore-compression:bionic-ussuri +- job: + name: bionic-train-bluestore + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: bluestore-compression:bionic-train +- job: + name: bionic-stein-bluestore + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: bluestore-compression:bionic-stein + +- project: + templates: + - charm-unit-jobs + # using overridden, shorter functional list because the charm does + # already in the tests.yaml + # - charm-functional-jobs + check: + jobs: + - bionic-queens_local + # Xenial-pike is missing because of + # https://bugs.launchpad.net/charm-nova-compute/+bug/1862624 + - xenial-mitaka_local + - groovy-victoria-bluestore + - focal-victoria-bluestore + - focal-ussuri-bluestore + - bionic-ussuri-bluestore + - bionic-train-bluestore + - bionic-stein-bluestore + vars: + needs_charm_build: true + charm_build_name: ceph-fs \ No newline at end of file From ac2ff0368e48ef925bec7cf589e6ed968fc3bd8c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 25 Feb 2021 10:29:44 +0100 Subject: [PATCH 2171/2699] Remove reference to ntp package This is motivated by the fact that NTP will be managed by operators through the NTP charm. In the other Ceph charms, NTP / Chrony are removed by the charm if the charm is in a container because the ceph-base package will install it; however, this is unneceessary for the radosgw package because it doesn't depend on the ceph-common package. Change-Id: I53ab9ece631bbaeed6e3cebba4ffbbcb1e32193d Closes-Bug: #1915351 --- ceph-radosgw/hooks/hooks.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 0371b785..bb833538 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -52,7 +52,6 @@ from charmhelpers.payload.execd import execd_preinstall from charmhelpers.core.host import ( cmp_pkgrevno, - is_container, service, service_pause, service_reload, @@ -111,7 +110,6 @@ PACKAGES = [ 'haproxy', - 'ntp', 'radosgw', 'apache2' ] @@ -145,9 +143,6 @@ def install_packages(): add_source(c.get('source'), c.get('key')) apt_update(fatal=True) - if is_container(): - PACKAGES.remove('ntp') - # NOTE: just use full package list if we're in an upgrade # config-changed execution pkgs = ( From 94b77b377943a18e5c09327399a57680872fbbe8 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 2 Mar 2021 12:56:11 -0500 Subject: [PATCH 2172/2699] Review action usage sections in README * Correct and improve in/out and stop/start actions * Make examples for all actions more consistent * Add link to Charmed Ceph documentation * Add a bug reference for the zap-disk action * Small miscellaenous improvements Change-Id: Iafa3cfce4c5b3eff599b662bfe2cfc3d9183cad3 --- ceph-osd/README.md | 94 +++++++++++++++++++++++++++++----------------- 1 file changed, 59 insertions(+), 35 deletions(-) diff --git a/ceph-osd/README.md b/ceph-osd/README.md index d8727bf9..5404838a 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -234,8 +234,8 @@ options `osd-encrypt` and `osd-encrypt-keymanager` for the ceph-osd charm: This section covers Juju [actions][juju-docs-actions] supported by the charm. Actions allow specific operations to be performed on a per-unit basis. To -display action descriptions run `juju actions ceph-osd`. If the charm is not -deployed then see file `actions.yaml`. +display action descriptions run `juju actions --schema ceph-osd`. If the charm +is not deployed then see file `actions.yaml`. * `add-disk` * `blacklist-add-disk` @@ -252,23 +252,22 @@ deployed then see file `actions.yaml`. ### Set OSDs to 'out' -Use the `osd-out` action to set all OSD volumes on a unit to 'out'. +Use the `osd-out` action to set OSD volumes on a unit to 'out'. > **Warning**: This action has the potential of impacting your cluster significantly. The [Ceph documentation][ceph-docs-removing-osds] on this topic is considered essential reading. -The `osd-out` action sets **all** OSDs on the unit as 'out'. Unless the cluster -itself is set to 'noout' this action will cause Ceph to rebalance data by -migrating PGs out of the unit's OSDs and onto OSDs available on other units. -The impact is twofold: +Unless the cluster itself is set to 'noout' this action will cause Ceph to +rebalance data by migrating PGs out of the affected OSDs and onto OSDs +available on other units. The impact is twofold: 1. The available space on the remaining OSDs is reduced. Not only is there less space for future workloads but there is a danger of exceeding the cluster's storage capacity. 1. The traffic and CPU load on the cluster is increased. -> **Note**: It has been reported that setting OSDs as 'out' may cause some PGs +> **Note**: It has been reported that setting OSDs to 'out' may cause some PGs to get stuck in the 'active+remapped' state. This is an upstream issue. The [ceph-mon][ceph-mon-charm] charm has an action called `set-noout` that sets @@ -279,40 +278,47 @@ whether the OSDs are being paused temporarily (e.g. the underlying machine is scheduled for maintenance) or whether they are being removed from the cluster completely (e.g. the storage hardware is reaching EOL). -Example: +Examples: + # Set OSDs '0' and '1' to 'out' on unit `ceph-osd/4` juju run-action --wait ceph-osd/4 osd-out osds=osd.0,osd.1 + # Set all OSDs to 'out' on unit `ceph-osd/2` + juju run-action --wait ceph-osd/2 osd-out osds=all + ### Set OSDs to 'in' -Use the `osd-in` action to set all OSD volumes on a unit to 'in'. +Use the `osd-in` action to set OSD volumes on a unit to 'in'. The `osd-in` action is reciprocal to the `osd-out` action. The OSDs are set to 'in'. It is typically used when the `osd-out` action was used in conjunction with the cluster 'noout' flag. -Example: +Examples: - juju run-action --wait ceph-osd/4 osd-in osds=all + # Set OSDs '0' and '1' to 'in' on unit `ceph-osd/4` + juju run-action --wait ceph-osd/4 osd-in osds=osd.0,osd.1 -### Managing ceph OSDs + # Set all OSDs to 'in' on unit `ceph-osd/2` + juju run-action --wait ceph-osd/2 osd-in osds=all -Use the `stop` and `start` actions to manage ceph OSD services within the unit. -Both actions take one parameter, `osds`, which should contain comma-separated -numerical IDs of `ceph-osd` services or the keyword `all`. +### Stop and start OSDs -Example: +Use the `stop` and `start` actions to stop and start OSD daemons on a unit. + +> **Important**: These actions are not available on the 'trusty' series due to + the reliance on `systemd`. + +Examples: + + # Stop services 'ceph-osd@0' and 'ceph-osd@1' on unit `ceph-osd/4` + juju run-action --wait ceph-osd/4 stop osds=0,1 - # stop ceph-osd@0 and ceph-osd@1 - juju run-action --wait ceph-osd/0 stop osds=0,1 - # start all ceph-osd services on the unit - juju run-action --wait ceph-osd/0 start osds=all + # Start all ceph-osd services on unit `ceph-osd/2` + juju run-action --wait ceph-osd/2 start osds=all - > **Note**: Stopping ceph-osd services will put the unit into the blocked - state. - - > **Important**: This action is not available on Trusty due to reliance on - systemd. +> **Note**: Stopping an OSD daemon will put the associated unit into a blocked + state. ## Working with disks @@ -332,6 +338,7 @@ The action lists the unit's block devices by categorising them in three ways: Example: + # List disks on unit `ceph-osd/4` juju run-action --wait ceph-osd/4 list-disks ### Add a disk @@ -357,6 +364,7 @@ operator to manually add OSD volumes (for disks that are not listed by Example: + # Add disk /dev/vde on unit `ceph-osd/4` juju run-action --wait ceph-osd/4 add-disk osd-devices=/dev/vde ### Blacklist a disk @@ -383,6 +391,7 @@ Use the `list-disks` action to list the unit's blacklist entries. Example: + # Blacklist disks /dev/vda and /dev/vdf on unit `ceph-osd/0` juju run-action --wait ceph-osd/0 \ blacklist-add-disk osd-devices='/dev/vda /dev/vdf' @@ -403,6 +412,7 @@ Each device should have an existing entry in the unit's blacklist. Use the Example: + # Un-blacklist disk /dev/vdb on unit `ceph-osd/1` juju run-action --wait ceph-osd/1 \ blacklist-remove-disk osd-devices=/dev/vdb @@ -411,10 +421,10 @@ Example: Use the `zap-disk` action to purge a disk of all data. In order to prevent unintentional data loss, the charm will not use a disk that -has existing data already on it. To forcibly make a disk available, the -`zap-disk` action can be used. Due to the destructive nature of this action the -`i-really-mean-it` option must be passed. This action is normally followed by -the `add-disk` action. +contains data. To forcibly make a disk available, the `zap-disk` action can be +used. Due to the destructive nature of this action the `i-really-mean-it` +option must be passed. This action is normally followed by the `add-disk` +action. **Parameters** @@ -426,27 +436,40 @@ the `add-disk` action. * `i-really-mean-it` (required) - An option that acts as a confirmation for performing the action. + A boolean option for confirming the action. Example: - juju run-action --wait ceph-osd/3 zap-disk i-really-mean-it=true devices=/dev/vdc + # Zap disk /dev/vdc on unit `ceph-osd/3` + juju run-action --wait ceph-osd/3 \ + zap-disk i-really-mean-it=true devices=/dev/vdc > **Note**: The `zap-disk` action cannot be run on a mounted device, an active - Bluestore device, or an encrypted device. + BlueStore device, or an encrypted device. There are also issues with + LVM-backed volumes (see [LP #1858519][lp-bug-1858519]). + +# Documentation + +The OpenStack Charms project maintains two documentation guides: + +* [OpenStack Charm Guide][cg]: for project information, including development + and support notes +* [OpenStack Charms Deployment Guide][cdg]: for charm usage information + +See also the [Charmed Ceph documentation][charmed-ceph-docs]. # Bugs Please report bugs on [Launchpad][lp-bugs-charm-ceph-osd]. -For general charm questions refer to the OpenStack [Charm Guide][cg]. - [ceph-upstream]: https://ceph.io [cg]: https://docs.openstack.org/charm-guide +[cdg]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide [ceph-mon-charm]: https://jaas.ai/ceph-mon [vault-charm]: https://jaas.ai/vault +[charmed-ceph-docs]: https://ubuntu.com/ceph/docs [juju-docs-storage]: https://jaas.ai/docs/storage [juju-docs-actions]: https://jaas.ai/docs/actions [juju-docs-spaces]: https://jaas.ai/docs/spaces @@ -458,3 +481,4 @@ For general charm questions refer to the OpenStack [Charm Guide][cg]. [upstream-ceph-buckets]: https://docs.ceph.com/docs/master/rados/operations/crush-map/#types-and-buckets [upstream-ceph-bluestore]: https://docs.ceph.com/en/latest/rados/configuration/storage-devices/#bluestore [cloud-archive-ceph]: https://wiki.ubuntu.com/OpenStack/CloudArchive#Ceph_and_the_UCA +[lp-bug-1858519]: https://bugs.launchpad.net/charm-ceph-osd/+bug/1858519 From 13075827614a11a6acbd5283aec2559c5ad07718 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 2 Mar 2021 20:50:25 +0000 Subject: [PATCH 2173/2699] Add hirsute and remove trusty from metadata.yaml This update adds the new hirsute Ubuntu release (21.04) and removes trusty support (14.04 which is EOL at 21.04). Change-Id: Ide508d37e11b37d2c4d19942fbeef9655390fae9 --- ceph-fs/src/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index fc444e36..ca022a0a 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -14,6 +14,7 @@ series: - bionic - focal - groovy +- hirsute subordinate: false requires: ceph-mds: From 0f492ac5ec2f61c660d51e7c367f203bd6efa1d1 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 2 Mar 2021 20:50:25 +0000 Subject: [PATCH 2174/2699] Add hirsute and remove trusty from metadata.yaml This update adds the new hirsute Ubuntu release (21.04) and removes trusty support (14.04 which is EOL at 21.04). Change-Id: Ib160d6ff913702486c9a9d81252d0129e8da79a9 --- ceph-mon/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 8a9343c9..3ec5a622 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -13,8 +13,8 @@ series: - xenial - bionic - focal -- trusty - groovy +- hirsute peers: mon: interface: ceph From 62877ec5a47d3398b42cfa53ecccb5d39ba3480f Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 2 Mar 2021 20:50:25 +0000 Subject: [PATCH 2175/2699] Add hirsute and remove trusty from metadata.yaml This update adds the new hirsute Ubuntu release (21.04) and removes trusty support (14.04 which is EOL at 21.04). Change-Id: Ie74ae1fde538b048a07a067ec67b6b3b26f22de6 --- ceph-osd/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 76e841fe..8fa6e056 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -14,8 +14,8 @@ series: - xenial - bionic - focal -- trusty - groovy +- hirsute description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. From f466a7da5103c6889ca8f0e064f112092f02b920 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 2 Mar 2021 20:50:25 +0000 Subject: [PATCH 2176/2699] Add hirsute and remove trusty from metadata.yaml This update adds the new hirsute Ubuntu release (21.04) and removes trusty support (14.04 which is EOL at 21.04). Change-Id: I744188799db42ea9960ea35c1e83e773c6a2ee36 --- ceph-proxy/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index a614e64d..4defc7ae 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -13,8 +13,8 @@ series: - xenial - bionic - focal -- trusty - groovy +- hirsute extra-bindings: public: cluster: From 3594c897b5744c584b4133f715d8d546e9f64ecc Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 2 Mar 2021 20:50:25 +0000 Subject: [PATCH 2177/2699] Add hirsute and remove trusty from metadata.yaml This update adds the new hirsute Ubuntu release (21.04) and removes trusty support (14.04 which is EOL at 21.04). Change-Id: If18475dd9a99c4a286bb2f5913f3ec4f2447c4f4 --- ceph-radosgw/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index a106435b..d54e017c 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -16,8 +16,8 @@ series: - xenial - bionic - focal -- trusty - groovy +- hirsute extra-bindings: public: admin: From 64bc97b7295b7a637ac0f9843b49a929f3f0629e Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 2 Mar 2021 20:50:25 +0000 Subject: [PATCH 2178/2699] Add hirsute and remove trusty from metadata.yaml This update adds the new hirsute Ubuntu release (21.04) and removes trusty support (14.04 which is EOL at 21.04). Change-Id: I7fce00f968b540d0a5824a2df262f0195bae8739 --- ceph-rbd-mirror/src/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index e9c96ecd..20f9742b 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -19,6 +19,7 @@ series: - bionic - focal - groovy +- hirsute extra-bindings: public: cluster: From a57fbed7c8b7bea706fe8b6f9c96eda36c15f1a6 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 8 Mar 2021 19:21:47 +0000 Subject: [PATCH 2179/2699] Handle requests if they have errored When checking if a request is a duplicate handle case where the request has errored and does not have an id. Closes-Bug: #1918143 Change-Id: I314b3658b57fdfaa77f4c1a0c1139e6b7dd4b1c4 --- ceph-mon/hooks/ceph_hooks.py | 2 +- ceph-mon/unit_tests/test_ceph_hooks.py | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 8af31cae..60c021dc 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -625,7 +625,7 @@ def req_already_treated(request_id, relid, req_unit): return False else: data = status[response_key] - if data['request-id'] == request_id: + if data.get('request-id') == request_id: return True return False diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 97fbcde4..52ac16b2 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -627,6 +627,31 @@ def test_multi_broker_req_handled_on_rel(self, process_requests, recurse=False) process_requests.assert_called_once_with({'request-id': '2'}) + @patch.object(ceph_hooks, 'relation_ids') + @patch.object(ceph_hooks, 'local_unit') + @patch.object(ceph_hooks, 'relation_get') + @patch.object(ceph_hooks.ceph, 'is_leader') + @patch.object(ceph_hooks, 'process_requests') + def test_multi_broker_req_handled_on_rel_errored(self, process_requests, + is_leader, + relation_get, + local_unit, + _relation_ids): + is_leader.return_value = True + relation_get.side_effect = [ + { + 'broker_req': {'request-id': '2'}}, + { + 'broker-rsp-glance-0': { + 'exit-code': 1, + 'stderr': 'Unexpected error'}}] + + local_unit.return_value = "mon/0" + ceph_hooks.handle_broker_request(relid='rel1', + unit='glance/0', + recurse=False) + process_requests.assert_called_once_with({'request-id': '2'}) + class BootstrapSourceTestCase(test_utils.CharmTestCase): From 70a23fe9fda26417cb0553f515d7e39e26453d53 Mon Sep 17 00:00:00 2001 From: Ionut Balutoiu Date: Fri, 22 Jan 2021 18:11:57 +0200 Subject: [PATCH 2180/2699] Cinder Ceph Replication tests * Add optional parameter `pools` to the Juju actions: `resync-pools`, `status`, `demote`, and `promote`. * Add `bionic-train-image-mirroring` to the `gate_bundles`. Change-Id: I9df77be628718bb60a1bdc65078c94d0b7bda9d4 Func-Test-Pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/486 --- ceph-rbd-mirror/src/actions.yaml | 29 ++- ceph-rbd-mirror/src/actions/actions.py | 22 +- .../bundles/bionic-train-image-mirroring.yaml | 120 +++++++++ .../bionic-ussuri-image-mirroring.yaml | 120 +++++++++ .../bundles/focal-ussuri-image-mirroring.yaml | 233 ++++++++++++++++++ .../focal-victoria-image-mirroring.yaml | 171 +++++++++++++ .../groovy-victoria-image-mirroring.yaml | 171 +++++++++++++ .../src/tests/bundles/groovy-victoria.yaml | 1 + ceph-rbd-mirror/src/tests/tests.yaml | 6 + ceph-rbd-mirror/unit_tests/test_actions.py | 46 +++- 10 files changed, 909 insertions(+), 10 deletions(-) create mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-train-image-mirroring.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-ussuri-image-mirroring.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/focal-ussuri-image-mirroring.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/focal-victoria-image-mirroring.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/groovy-victoria-image-mirroring.yaml diff --git a/ceph-rbd-mirror/src/actions.yaml b/ceph-rbd-mirror/src/actions.yaml index 972de6cb..dccfc81e 100644 --- a/ceph-rbd-mirror/src/actions.yaml +++ b/ceph-rbd-mirror/src/actions.yaml @@ -1,15 +1,25 @@ demote: description: | - Demote all primary images within all pools to non-primary. + Demote all primary images within given pools to non-primary. params: force: type: boolean + pools: + type: string + description: | + Comma-separated list of pools to demote. If this is not set, all the + pools will be demoted. promote: description: | - Promote all non-primary images within all pools to primary. + Promote all non-primary images within given pools to primary. params: force: type: boolean + pools: + type: string + description: | + Comma-separated list of pools to promote. If this is not set, all the + pools will be promoted. refresh-pools: description: | \ @@ -19,13 +29,19 @@ refresh-pools: resync-pools: description: | \ - USE WITH CAUTION - Force image resync for all images in pools on local - Ceph endpoint. + USE WITH CAUTION - Force image resync for all images in the given + pools on local Ceph endpoint. params: i-really-mean-it: type: boolean description: | This must be set to true to perform the action + pools: + type: string + description: | + Comma-separated list of pools to resync from the local Ceph endpoint. + If this is not set, all the pools from the local Ceph endpoint will + be resynced. required: - i-really-mean-it status: @@ -41,3 +57,8 @@ status: - plain - json - xml + pools: + type: string + description: | + Comma-separated list of pools to include in the status. If this is + not set, all the pools will be included. diff --git a/ceph-rbd-mirror/src/actions/actions.py b/ceph-rbd-mirror/src/actions/actions.py index caa48888..92465f77 100755 --- a/ceph-rbd-mirror/src/actions/actions.py +++ b/ceph-rbd-mirror/src/actions/actions.py @@ -40,12 +40,22 @@ charms_openstack.bus.discover() +def get_pools(): + """Get the list of pools given as parameter to perform the actions on.""" + pools = ch_core.hookenv.action_get('pools') + if pools: + return [p.strip() for p in pools.split(',')] + return None + + def rbd_mirror_action(args): """Perform RBD command on pools in local Ceph endpoint.""" action_name = os.path.basename(args[0]) with charms_openstack.charm.provide_charm_instance() as charm: ceph_local = reactive.endpoint_from_name('ceph-local') - pools = charm.eligible_pools(ceph_local.pools) + pools = get_pools() + if not pools: + pools = charm.eligible_pools(ceph_local.pools) result = {} cmd = ['rbd', '--id', charm.ceph_id, 'mirror', 'pool', action_name] if ch_core.hookenv.action_get('force'): @@ -103,7 +113,9 @@ def resync_pools(args): return with charms_openstack.charm.provide_charm_instance() as charm: ceph_local = reactive.endpoint_from_name('ceph-local') - pools = charm.eligible_pools(ceph_local.pools) + pools = get_pools() + if not pools: + pools = charm.eligible_pools(ceph_local.pools) result = collections.defaultdict(dict) for pool in pools: # list images in pool @@ -112,6 +124,12 @@ def resync_pools(args): '-p', pool, 'ls'], universal_newlines=True) images = json.loads(output) for image in images: + output = subprocess.check_output( + ['rbd', '--id', charm.ceph_id, '--format', 'json', 'info', + '{}/{}'.format(pool, image)], universal_newlines=True) + image_info = json.loads(output) + if image_info['mirroring']['state'] == 'disabled': + continue output = subprocess.check_output( ['rbd', '--id', charm.ceph_id, 'mirror', 'image', 'resync', '{}/{}'.format(pool, image)], universal_newlines=True) diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-train-image-mirroring.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-train-image-mirroring.yaml new file mode 100644 index 00000000..80b210c0 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-train-image-mirroring.yaml @@ -0,0 +1,120 @@ +series: bionic +applications: + mysql: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + source: cloud:bionic-train + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-train + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-train + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + openstack-origin: cloud:bionic-train + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + options: + rbd-mirroring-mode: image + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-train + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-train + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-train + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:bionic-train + #bluestore: False + #use-direct-io: False + storage: + osd-devices: '10G' + ceph-rbd-mirror: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:bionic-train + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-train + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:bionic-train + #bluestore: False + #use-direct-io: False + storage: + osd-devices: '10G' + ceph-rbd-mirror-b: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:bionic-train +relations: +- - mysql + - keystone +- - mysql + - cinder +- - mysql + - glance +- - rabbitmq-server + - cinder +- - keystone + - cinder +- - keystone + - glance +- - cinder + - cinder-ceph +- - cinder-ceph:ceph + - ceph-mon:client +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote +- - cinder-ceph:ceph-replication-device + - ceph-mon-b:client diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri-image-mirroring.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri-image-mirroring.yaml new file mode 100644 index 00000000..3e95360a --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri-image-mirroring.yaml @@ -0,0 +1,120 @@ +series: bionic +applications: + mysql: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + source: cloud:bionic-ussuri + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: cloud:bionic-ussuri + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + openstack-origin: cloud:bionic-ussuri + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + options: + rbd-mirroring-mode: image + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: cloud:bionic-ussuri + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-ussuri + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:bionic-ussuri + #bluestore: False + #use-direct-io: False + storage: + osd-devices: '10G' + ceph-rbd-mirror: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:bionic-ussuri + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: cloud:bionic-ussuri + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: cloud:bionic-ussuri + #bluestore: False + #use-direct-io: False + storage: + osd-devices: '10G' + ceph-rbd-mirror-b: + series: bionic + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: cloud:bionic-ussuri +relations: +- - mysql + - keystone +- - mysql + - cinder +- - mysql + - glance +- - rabbitmq-server + - cinder +- - keystone + - cinder +- - keystone + - glance +- - cinder + - cinder-ceph +- - cinder-ceph:ceph + - ceph-mon:client +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote +- - cinder-ceph:ceph-replication-device + - ceph-mon-b:client diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-ussuri-image-mirroring.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-ussuri-image-mirroring.yaml new file mode 100644 index 00000000..ea148e3d --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/focal-ussuri-image-mirroring.yaml @@ -0,0 +1,233 @@ +variables: + openstack-origin: &openstack-origin distro + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + '19': + '20': + '21': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '3' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '4' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + openstack-origin: *openstack-origin + to: + - '5' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + options: + rbd-mirroring-mode: image + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '6' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + to: + - '7' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '8' + - '9' + - '10' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + #bluestore: False + #use-direct-io: False + storage: + osd-devices: '10G' + to: + - '11' + - '12' + - '13' + + ceph-rbd-mirror: + series: focal + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + to: + - '14' + + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '15' + - '16' + - '17' + + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + #bluestore: False + #use-direct-io: False + storage: + osd-devices: '10G' + to: + - '18' + - '19' + - '20' + + ceph-rbd-mirror-b: + series: focal + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + to: + - '21' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'rabbitmq-server' + - 'cinder' + + - - 'keystone' + - 'cinder' + + - - 'keystone' + - 'glance' + + - - 'cinder' + - 'cinder-ceph' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'cinder-ceph:ceph-replication-device' + - 'ceph-mon-b:client' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance' + - 'ceph-mon' + + - - 'ceph-mon:osd' + - 'ceph-osd:mon' + + - - 'ceph-mon' + - 'ceph-rbd-mirror:ceph-local' + + - - 'ceph-mon' + - 'ceph-rbd-mirror-b:ceph-remote' + + - - 'ceph-mon-b:osd' + - 'ceph-osd-b:mon' + + - - 'ceph-mon-b' + - 'ceph-rbd-mirror-b:ceph-local' + + - - 'ceph-mon-b' + - 'ceph-rbd-mirror:ceph-remote' diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-victoria-image-mirroring.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-victoria-image-mirroring.yaml new file mode 100644 index 00000000..2d8d4337 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/focal-victoria-image-mirroring.yaml @@ -0,0 +1,171 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-victoria + +series: &series focal + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + openstack-origin: *openstack-origin + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + options: + rbd-mirroring-mode: image + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror-b: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + +relations: + +- - keystone:shared-db + - keystone-mysql-router:shared-db +- - keystone-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - glance:shared-db + - glance-mysql-router:shared-db +- - glance-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - cinder:shared-db + - cinder-mysql-router:shared-db +- - cinder-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - rabbitmq-server + - cinder + +- - keystone + - cinder +- - keystone + - glance + +- - cinder + - cinder-ceph +- - cinder-ceph:ceph + - ceph-mon:client +- - cinder-ceph:ceph-replication-device + - ceph-mon-b:client + +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp + +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon + +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote + +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/groovy-victoria-image-mirroring.yaml b/ceph-rbd-mirror/src/tests/bundles/groovy-victoria-image-mirroring.yaml new file mode 100644 index 00000000..90ad5b04 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/groovy-victoria-image-mirroring.yaml @@ -0,0 +1,171 @@ +variables: + openstack-origin: &openstack-origin distro + +series: &series groovy + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + openstack-origin: *openstack-origin + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + options: + rbd-mirroring-mode: image + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror-b: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + +relations: + +- - keystone:shared-db + - keystone-mysql-router:shared-db +- - keystone-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - glance:shared-db + - glance-mysql-router:shared-db +- - glance-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - cinder:shared-db + - cinder-mysql-router:shared-db +- - cinder-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - rabbitmq-server + - cinder + +- - keystone + - cinder +- - keystone + - glance + +- - cinder + - cinder-ceph +- - cinder-ceph:ceph + - ceph-mon:client +- - cinder-ceph:ceph-replication-device + - ceph-mon-b:client + +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp + +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon + +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote + +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml b/ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml index f0eb067a..8c1453ac 100644 --- a/ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml @@ -48,6 +48,7 @@ applications: options: block-device: None glance-api-version: 2 + openstack-origin: *openstack-origin cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index dad4b246..abbdd2b2 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -3,6 +3,7 @@ smoke_bundles: - bionic-train gate_bundles: - bionic-train +- bionic-train-image-mirroring - bionic-stein - bionic-queens comment: | @@ -10,12 +11,17 @@ comment: | the functional tests. dev_bundles: - groovy-victoria +- groovy-victoria-image-mirroring - focal-victoria +- focal-victoria-image-mirroring - bionic-queens-e2e - bionic-queens-e2e-lxd - bionic-rocky - bionic-ussuri +# This is a dev bundle because we hit https://bugs.launchpad.net/charm-ceph-rbd-mirror/+bug/1892201. +- bionic-ussuri-image-mirroring - focal-ussuri +- focal-ussuri-image-mirroring - xenial-queens - xenial-pike configure: diff --git a/ceph-rbd-mirror/unit_tests/test_actions.py b/ceph-rbd-mirror/unit_tests/test_actions.py index 1ec4bcc7..d2349bf5 100644 --- a/ceph-rbd-mirror/unit_tests/test_actions.py +++ b/ceph-rbd-mirror/unit_tests/test_actions.py @@ -55,6 +55,7 @@ def test_rbd_mirror_action(self): self.endpoint_from_name.assert_called_once_with('ceph-local') self.crm_charm.eligible_pools.assert_called_once_with(endpoint.pools) self.action_get.assert_has_calls([ + mock.call('pools'), mock.call('force'), mock.call('verbose'), mock.call('format'), @@ -76,7 +77,7 @@ def test_rbd_mirror_action(self): sorted(self.action_set.call_args[0][0]['output'].split('\n')), ['apool: Promoted 0 mirrored images', 'bpool: Promoted 0 mirrored images']) - self.action_get.side_effect = [True, True, False] + self.action_get.side_effect = [None, True, True, False] self.check_output.reset_mock() actions.rbd_mirror_action(['promote']) self.check_output.assert_has_calls([ @@ -90,6 +91,21 @@ def test_rbd_mirror_action(self): universal_newlines=True), ], any_order=True) self.action_get.assert_has_calls([ + mock.call('pools'), + mock.call('force'), + mock.call('verbose'), + mock.call('format'), + ]) + self.action_get.side_effect = ['apool', True, True, False] + self.check_output.reset_mock() + actions.rbd_mirror_action(['promote']) + self.check_output.assert_called_once_with( + ['rbd', '--id', 'acephid', 'mirror', 'pool', 'promote', + '--force', '--verbose', 'apool'], + stderr=actions.subprocess.STDOUT, + universal_newlines=True) + self.action_get.assert_has_calls([ + mock.call('pools'), mock.call('force'), mock.call('verbose'), mock.call('format'), @@ -123,16 +139,38 @@ def test_resync_pools(self): self.endpoint_from_name.return_value = endpoint self.crm_charm.eligible_pools.return_value = endpoint.pools self.crm_charm.ceph_id = 'acephid' - self.action_get.return_value = False + self.action_get.side_effect = [False, None] actions.resync_pools([]) + self.action_get.assert_has_calls([ + mock.call('i-really-mean-it'), + ]) self.assertFalse(self.check_output.called) self.assertFalse(self.action_set.called) - self.action_get.return_value = True + self.action_get.side_effect = [True, 'bpool'] + self.check_output.return_value = json.dumps([]) + actions.resync_pools([]) + self.action_get.assert_has_calls([ + mock.call('i-really-mean-it'), + mock.call('pools'), + ]) + self.check_output.assert_called_once_with( + ['rbd', '--id', 'acephid', '--format', 'json', + '-p', 'bpool', 'ls'], + universal_newlines=True) + self.action_set.assert_called_once_with({'output': ''}) + self.action_get.side_effect = [True, None] self.check_output.side_effect = [ - json.dumps(['imagea']), + json.dumps(['imagea', 'imageb']), + json.dumps({'mirroring': {'state': 'enabled'}}), 'resync flagged for imagea\n', + json.dumps({'mirroring': {'state': 'disabled'}}), ] + self.check_output.reset_mock() actions.resync_pools([]) + self.action_get.assert_has_calls([ + mock.call('i-really-mean-it'), + mock.call('pools'), + ]) self.assertEquals( sorted(self.action_set.call_args[0][0]['output'].split('\n')), ['apool/imagea: resync flagged for imagea']) From b1a3422b134a35ebf44dde03b1a97673fdb5cbb5 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 16 Mar 2021 10:38:45 +0100 Subject: [PATCH 2181/2699] Update get_mon_hosts to not specify mon ports Update get_mon_hosts to avoid specifying the port number for the MON daemons; we use the default so this is not required, and at Nautilus the MON daemons run both v1 and v2 messenger ports. Specifying the port in the ceph.conf file disables the v2 messenger port which is not the desired behaviour on upgrade or new installation. Change-Id: I5333cc85f755771733fe09e963d5523fd93c16d2 Closes-Bug: #1917742 --- ceph-osd/hooks/ceph_hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index d562872d..c84068c8 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -583,7 +583,7 @@ def get_mon_hosts(): relid)) if addr: - hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) + hosts.append('{}'.format(format_ipv6_addr(addr) or addr)) return sorted(hosts) From e0069b5c7fb535571692053b2896db9a635029ff Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 15 Feb 2021 09:10:39 +0100 Subject: [PATCH 2182/2699] Add new osci.yaml This change is preparatory to migration Ubuntu OpenStack CI from Jenkins to Zuul Change-Id: I16e82e0f295a9c0f6f21e4ff343e2f1afda1d82d --- ceph-radosgw/osci.yaml | 129 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 ceph-radosgw/osci.yaml diff --git a/ceph-radosgw/osci.yaml b/ceph-radosgw/osci.yaml new file mode 100644 index 00000000..7f04a868 --- /dev/null +++ b/ceph-radosgw/osci.yaml @@ -0,0 +1,129 @@ +- project: + templates: + - charm-unit-jobs + check: + jobs: + - vault-groovy-victoria_rgw + - vault-groovy-victoria-namespaced + - vault-focal-victoria_rgw + - vault-focal-victoria-namespaced + - vault-focal-ussuri-ec + - vault-focal-ussuri_rgw + - vault-focal-ussuri-namespaced + - vault-bionic-ussuri + - vault-bionic-ussuri-namespaced + - vault-bionic-train + - vault-bionic-train-namespaced + - vault-bionic-stein + - vault-bionic-stein-namespaced + - vault-bionic-queens + - vault-bionic-queens-namespaced + - xenial-mitaka_rgw + - xenial-mitaka-namespaced + +- job: + name: vault-bionic-ussuri + parent: func-target + dependencies: + - osci-lint + - tox-py35 + - tox-py36 + - tox-py37 + - tox-py38 + vars: + tox_extra_args: vault:bionic-ussuri +- job: + name: vault-groovy-victoria_rgw + parent: func-target + dependencies: &smoke-jobs + - vault-bionic-ussuri + vars: + tox_extra_args: vault:groovy-victoria +- job: + name: vault-groovy-victoria-namespaced + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:groovy-victoria-namespaced +- job: + name: vault-focal-victoria_rgw + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:focal-victoria +- job: + name: vault-focal-victoria-namespaced + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:focal-victoria-namespaced +- job: + name: vault-focal-ussuri-ec + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:focal-ussuri-ec +- job: + name: vault-focal-ussuri_rgw + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:focal-ussuri +- job: + name: vault-focal-ussuri-namespaced + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:focal-ussuri-namespaced +- job: + name: vault-bionic-ussuri-namespaced + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:bionic-ussuri-namespaced +- job: + name: vault-bionic-train + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:bionic-train +- job: + name: vault-bionic-train-namespaced + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:bionic-train-namespaced +- job: + name: vault-bionic-stein + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:bionic-stein +- job: + name: vault-bionic-stein-namespaced + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:bionic-stein-namespaced +- job: + name: vault-bionic-queens + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:bionic-queens +- job: + name: vault-bionic-queens-namespaced + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:bionic-queens-namespaced +- job: + name: xenial-mitaka_rgw + parent: xenial-mitaka + dependencies: *smoke-jobs +- job: + name: xenial-mitaka-namespaced + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: xenial-mitaka-namespaced \ No newline at end of file From fc77ceaf221c318bfcce8ae75c3149e59061aab7 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 15 Feb 2021 09:10:39 +0100 Subject: [PATCH 2183/2699] Add new osci.yaml This change is preparatory to migration Ubuntu OpenStack CI from Jenkins to Zuul Change-Id: I16e82e0f295a9c0f6f21e4ff343e2f1afda1d82d --- ceph-rbd-mirror/osci.yaml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 ceph-rbd-mirror/osci.yaml diff --git a/ceph-rbd-mirror/osci.yaml b/ceph-rbd-mirror/osci.yaml new file mode 100644 index 00000000..e7ae6cc0 --- /dev/null +++ b/ceph-rbd-mirror/osci.yaml @@ -0,0 +1,30 @@ +- project: + templates: + - charm-unit-jobs + check: + jobs: + - bionic-train_ceph-rbd-mirror + - bionic-stein_ceph-rbd-mirror + - bionic-queens_ceph-rbd-mirror + vars: + needs_charm_build: true + charm_build_name: ceph-rbd-mirror + +- job: + name: bionic-train_ceph-rbd-mirror + parent: bionic-train + dependencies: + - osci-lint + - tox-py35 + - tox-py36 + - tox-py37 + - tox-py38 +- job: + name: bionic-queens_ceph-rbd-mirror + parent: bionic-queens + dependencies: &smoke-jobs + - bionic-train_ceph-rbd-mirror +- job: + name: bionic-stein_ceph-rbd-mirror + parent: bionic-stein + dependencies: *smoke-jobs \ No newline at end of file From 01a09da1d351566114095aae17e46eecd507da83 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 24 Mar 2021 08:29:12 +0100 Subject: [PATCH 2184/2699] Add bindep to install required test dependencies Change-Id: I245a718152454921d9d7959ef2465e38735a772d --- ceph-mon/bindep.txt | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 ceph-mon/bindep.txt diff --git a/ceph-mon/bindep.txt b/ceph-mon/bindep.txt new file mode 100644 index 00000000..ba2ccb4b --- /dev/null +++ b/ceph-mon/bindep.txt @@ -0,0 +1,4 @@ +libxml2-dev [platform:dpkg test] +libxslt1-dev [platform:dpkg test] +build-essential [platform:dpkg test] +zlib1g-dev [platform:dpkg test] From 798ca49b2f836fd8e0808fc73de039e073a8e91a Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Wed, 24 Mar 2021 10:26:49 -0300 Subject: [PATCH 2185/2699] Close previously opened ports on port config change When the charm config option `port` is changed, the previously opened port is not closed. This leads to leaks of open ports (potential security issue), and long ports field on status after tests: Test: $ juju config ceph-radosgw port=1111 $ juju config ceph-radosgw port=2222 $ juju config ceph-radosgw port=3333 $ juju status ceph-radosgw ... Unit Workload Agent Machine Public address Ports Message ceph-radosgw/1* blocked idle 3 10.5.2.210 80/tcp,1111/tcp,2222/tcp,3333/tcp Missing relations: mon ... $ juju run --unit ceph-radosgw/1 'opened-ports' 80/tcp 1111/tcp 2222/tcp 3333/tcp Patched: $ juju run --unit ceph-radosgw/1 'opened-ports' 80/tcp 1111/tcp 1234/tcp 2222/tcp 3333/tcp 33331/tcp 33332/tcp 33334/tcp $ juju config ceph-radosgw port=33335 $ juju run --unit ceph-radosgw/1 'opened-ports' 33335/tcp $ juju status ceph-radosgw ... Unit Workload Agent Machine Public address Ports Message ceph-radosgw/1* blocked idle 3 10.5.2.210 33335/tcp Missing relations: mon @ unit log 2021-03-24 13:20:51 INFO juju-log Closed port 80 in favor of port 33335 2021-03-24 13:20:51 INFO juju-log Closed port 1111 in favor of port 33335 2021-03-24 13:20:51 INFO juju-log Closed port 1234 in favor of port 33335 2021-03-24 13:20:51 INFO juju-log Closed port 2222 in favor of port 33335 2021-03-24 13:20:52 INFO juju-log Closed port 3333 in favor of port 33335 2021-03-24 13:20:52 INFO juju-log Closed port 33331 in favor of port 33335 2021-03-24 13:20:52 INFO juju-log Closed port 33332 in favor of port 33335 2021-03-24 13:20:52 INFO juju-log Closed port 33334 in favor of port 33335 Signed-off-by: Mauricio Faria de Oliveira Closes-bug: #1921131 Change-Id: I5ac4b66137faffee82ae0f1e13718f21274f1f56 --- ceph-radosgw/hooks/hooks.py | 11 ++++++++++- ceph-radosgw/unit_tests/test_hooks.py | 1 + 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index bb833538..c9e5bf4e 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -32,6 +32,8 @@ related_units, config, open_port, + opened_ports, + close_port, relation_set, log, DEBUG, @@ -247,7 +249,14 @@ def _config_changed(): update_nrpe_config() - open_port(port=listen_port()) + port = listen_port() + open_port(port) + for opened_port in opened_ports(): + opened_port_number = opened_port.split('/')[0] + if opened_port_number != port: + close_port(opened_port_number) + log('Closed port %s in favor of port %s' % + (opened_port_number, port)) _config_changed() diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 7d9e9cfe..796070dc 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -40,6 +40,7 @@ 'listen_port', 'log', 'open_port', + 'opened_ports', 'os', 'relation_ids', 'relation_set', From d343c80a3cde5873216dcf6dfaf8d89a264983e9 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 14 Dec 2020 10:33:41 +0100 Subject: [PATCH 2186/2699] Add new osci.yaml This change is preparatory to migration Ubuntu OpenStack CI from Jenkins to Zuul Change-Id: Ia40af37310f73a25f609c067bcac678bb00064c2 --- ceph-mon/osci.yaml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 ceph-mon/osci.yaml diff --git a/ceph-mon/osci.yaml b/ceph-mon/osci.yaml new file mode 100644 index 00000000..5a117058 --- /dev/null +++ b/ceph-mon/osci.yaml @@ -0,0 +1,22 @@ +- project: + templates: + - charm-unit-jobs + - charm-functional-jobs + check: + jobs: + - focal-ussuri-ec-ceph-mon + - bionic-train-with-fsid + +- job: + name: focal-ussuri-ec-ceph-mon + parent: func-target + dependencies: &smoke-jobs + - bionic-ussuri + vars: + tox_extra_args: focal-ussuri-ec +- job: + name: bionic-train-with-fsid + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: bionic-train-with-fsid From c80fd9b02b95ad1c3e459e80bc7eeb75b4375cf4 Mon Sep 17 00:00:00 2001 From: Hemanth Nakkina Date: Tue, 2 Feb 2021 14:16:29 +0530 Subject: [PATCH 2187/2699] optimizations to reduce charm upgrade time charm upgrades takes longer time for ceph-mon to get into idle state when there are more number of OSDs and ceph clients whenever there is change in osd-relation data. In these cases osd_relation triggers notify_client() that takes significant amount of time as the client_relation() is executed for every related unit on each client relation. Some of the function calls and ceph commands can be reduced to be executed for every relation or once per notify_client instead of executing them for every related unit. ceph.get_named_key() is currently executed for every related unit and also each execution takes longer time as it is supposed to run minimum of 2 ceph commands. This patch tries to reduce the number of calls for ceph.get_named_key to once per relation. Partial-Bug: #1913992 Change-Id: Ic455cd7c4876efafee221bc6e7a5ec61fee5643f --- ceph-mon/hooks/ceph_hooks.py | 87 +++++++++++++++++++----- ceph-mon/unit_tests/test_ceph_hooks.py | 94 +++++++++++++++++++++----- 2 files changed, 147 insertions(+), 34 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 60c021dc..381c887e 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -580,10 +580,72 @@ def notify_rbd_mirrors(): rbd_mirror_relation(relid=relid, unit=unit, recurse=False) +def _get_ceph_info_from_configs(): + """Create dictionary of ceph information required to set client relation. + + :returns: Dictionary of ceph configurations needed for client relation + :rtpe: dict + """ + public_addr = get_public_addr() + rbd_features = get_rbd_features() + data = { + 'auth': config('auth-supported'), + 'ceph-public-address': public_addr + } + if rbd_features: + data['rbd-features'] = rbd_features + return data + + +def _handle_client_relation(relid, unit, data=None): + """Handle broker request and set the relation data + + :param relid: Relation ID + :type relid: str + :param unit: Unit name + :type unit: str + :param data: Initial relation data + :type data: dict + """ + if data is None: + data = {} + + if is_unsupported_cmr(unit): + return + data.update( + handle_broker_request(relid, unit, add_legacy_response=True)) + relation_set(relation_id=relid, relation_settings=data) + + def notify_client(): + send_osd_settings() + if not ready_for_service(): + log("mon cluster is not in quorum, skipping notify_client", + level=WARNING) + return + for relid in relation_ids('client'): + data = _get_ceph_info_from_configs() + + service_name = None + # Loop through all related units until client application name is found + # This is done in seperate loop to avoid calling ceph to retreive named + # key for every unit for unit in related_units(relid): - client_relation(relid, unit) + service_name = get_client_application_name(relid, unit) + if service_name: + data.update({'key': ceph.get_named_key(service_name)}) + break + + if not service_name: + log('Unable to determine remote service name, deferring processing' + ' of broker requests for relation {} '.format(relid)) + # continue with next relid + continue + + for unit in related_units(relid): + _handle_client_relation(relid, unit, data) + for relid in relation_ids('admin'): admin_relation_joined(relid) for relid in relation_ids('mds'): @@ -993,26 +1055,17 @@ def client_relation(relid=None, unit=None): if ready_for_service(): log('mon cluster in quorum and osds bootstrapped ' '- providing client with keys, processing broker requests') + if not unit: + unit = remote_unit() service_name = get_client_application_name(relid, unit) if not service_name: log('Unable to determine remote service name, deferring ' - 'processing of broker requests') + 'processing of broker requests for relation {} ' + 'remote unit {}'.format(relid, unit)) return - public_addr = get_public_addr() - data = {'key': ceph.get_named_key(service_name), - 'auth': config('auth-supported'), - 'ceph-public-address': public_addr} - rbd_features = get_rbd_features() - if rbd_features: - data['rbd-features'] = rbd_features - if not unit: - unit = remote_unit() - if is_unsupported_cmr(unit): - return - data.update( - handle_broker_request(relid, unit, add_legacy_response=True)) - relation_set(relation_id=relid, - relation_settings=data) + data = _get_ceph_info_from_configs() + data.update({'key': ceph.get_named_key(service_name)}) + _handle_client_relation(relid, unit, data) @hooks.hook('upgrade-charm.real') diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 52ac16b2..99a712c3 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -237,28 +237,81 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies( mock_notify_prometheus.assert_called_once_with() mock_service_pause.assert_called_with('ceph-create-keys') + @patch.object(ceph_hooks, 'relation_get') @patch.object(ceph_hooks, 'mds_relation_joined') @patch.object(ceph_hooks, 'admin_relation_joined') - @patch.object(ceph_hooks, 'client_relation') + @patch.object(ceph_hooks, 'relation_set') + @patch.object(ceph_hooks, 'handle_broker_request') + @patch.object(ceph_hooks, 'config') @patch.object(ceph_hooks, 'related_units') + @patch.object(ceph_hooks.ceph, 'get_named_key') + @patch.object(ceph_hooks.hookenv, 'remote_service_name') @patch.object(ceph_hooks, 'relation_ids') - def test_notify_client(self, mock_relation_ids, mock_related_units, - mock_client_relation, - mock_admin_relation_joined, - mock_mds_relation_joined): - mock_relation_ids.return_value = ['arelid'] - mock_related_units.return_value = ['aunit'] + @patch.object(ceph_hooks.ceph, 'is_leader') + @patch.object(ceph_hooks, 'get_rbd_features') + @patch.object(ceph_hooks, 'get_public_addr') + @patch.object(ceph_hooks, 'ready_for_service') + @patch.object(ceph_hooks, 'send_osd_settings') + def test_notify_client(self, + _send_osd_settings, + _ready_for_service, + _get_public_addr, + _get_rbd_features, + _is_leader, + _relation_ids, + _remote_service_name, + _get_named_key, + _related_units, + _config, + _handle_broker_request, + _relation_set, + _admin_relation_joined, + _mds_relation_joined, + _relation_get): + _relation_ids.return_value = ['arelid'] + _related_units.return_value = ['aunit/0'] + _relation_get.return_value = {'application-name': 'aunit'} + _remote_service_name.return_value = 'aunit' + _is_leader.return_value = True + config = copy.deepcopy(CHARM_CONFIG) + _config.side_effect = lambda key: config[key] + _handle_broker_request.return_value = {} + _get_rbd_features.return_value = None + ceph_hooks.notify_client() - mock_relation_ids.assert_has_calls([ - call('client'), + _send_osd_settings.assert_called_once_with() + _ready_for_service.assert_called_once_with() + _get_public_addr.assert_called_once_with() + _get_named_key.assert_called_once_with('aunit') + _handle_broker_request.assert_called_once_with( + 'arelid', 'aunit/0', add_legacy_response=True) + _relation_set.assert_called_once_with( + relation_id='arelid', + relation_settings={ + 'key': _get_named_key(), + 'auth': False, + 'ceph-public-address': _get_public_addr() + }) + + _relation_ids.assert_has_calls([ call('admin'), call('mds'), ]) - mock_related_units.assert_called_with('arelid') - mock_client_relation.assert_called_once_with('arelid', 'aunit') - mock_admin_relation_joined.assert_called_once_with('arelid') - mock_mds_relation_joined.assert_called_once_with(relid='arelid', - unit='aunit') + _admin_relation_joined.assert_called_once_with('arelid') + _mds_relation_joined.assert_called_once_with(relid='arelid', + unit='aunit/0') + + _get_rbd_features.return_value = 42 + _relation_set.reset_mock() + ceph_hooks.notify_client() + _relation_set.assert_called_once_with( + relation_id='arelid', + relation_settings={ + 'key': _get_named_key(), + 'auth': False, + 'ceph-public-address': _get_public_addr(), + 'rbd-features': 42, + }) @patch.object(ceph_hooks, 'rbd_mirror_relation') @patch.object(ceph_hooks, 'related_units') @@ -307,6 +360,7 @@ def test_get_client_application_name(self, remote_unit, relation_get, ceph_hooks.get_client_application_name('rel:1', None), 'glance') + @patch.object(ceph_hooks, 'notify_client') @patch.object(ceph_hooks.ceph, 'list_pools') @patch.object(ceph_hooks, 'mgr_enable_module') @patch.object(ceph_hooks, 'emit_cephconf') @@ -323,12 +377,14 @@ def test_config_changed_no_autotune(self, create_sysctl, emit_ceph_conf, mgr_enable_module, - list_pools): + list_pools, + notify_client): relations_of_type.return_value = False self.test_config.set('pg-autotune', 'false') ceph_hooks.config_changed() mgr_enable_module.assert_not_called() + @patch.object(ceph_hooks, 'notify_client') @patch.object(ceph_hooks.ceph, 'monitor_key_set') @patch.object(ceph_hooks.ceph, 'list_pools') @patch.object(ceph_hooks, 'mgr_enable_module') @@ -348,7 +404,9 @@ def test_config_changed_with_autotune(self, create_sysctl, emit_ceph_conf, mgr_enable_module, - list_pools, monitor_key_set): + list_pools, + monitor_key_set, + notify_client): relations_of_type.return_value = False cmp_pkgrevno.return_value = 1 self.test_config.set('pg-autotune', 'true') @@ -356,6 +414,7 @@ def test_config_changed_with_autotune(self, mgr_enable_module.assert_called_once_with('pg_autoscaler') monitor_key_set.assert_called_once_with('admin', 'autotune', 'true') + @patch.object(ceph_hooks, 'notify_client') @patch.object(ceph_hooks.ceph, 'list_pools') @patch.object(ceph_hooks, 'mgr_enable_module') @patch.object(ceph_hooks, 'emit_cephconf') @@ -374,7 +433,8 @@ def test_config_changed_with_default_autotune(self, create_sysctl, emit_ceph_conf, mgr_enable_module, - list_pools): + list_pools, + notify_client): relations_of_type.return_value = False cmp_pkgrevno.return_value = 1 self.test_config.set('pg-autotune', 'auto') From 49b4ffc7ed459edfccd220209b631a62e9235ba6 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sat, 3 Apr 2021 20:18:01 +0100 Subject: [PATCH 2188/2699] 21.04 libraries freeze for charms on master branch * charm-helpers sync for classic charms * build.lock file for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure stable/21.04 branch for charms.openstack - ensure stable/21.04 branch for charm-helpers Change-Id: I7de7b61d63aef57c3242631b969d5bb54fe76ab1 --- ceph-mon/charm-helpers-hooks.yaml | 2 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 4 +- .../contrib/openstack/amulet/utils.py | 1 + .../contrib/openstack/cert_utils.py | 27 +- .../charmhelpers/contrib/openstack/context.py | 16 +- .../contrib/openstack/deferred_events.py | 410 ++++++++++++++++++ .../contrib/openstack/exceptions.py | 5 + .../openstack/files/policy_rc_d_script.py | 196 +++++++++ .../contrib/openstack/policy_rcd.py | 173 ++++++++ .../charmhelpers/contrib/openstack/utils.py | 291 ++++++++++++- ceph-mon/hooks/charmhelpers/core/hookenv.py | 20 + ceph-mon/hooks/charmhelpers/core/host.py | 236 ++++++++-- .../charmhelpers/core/host_factory/ubuntu.py | 12 +- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 1 + ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 88 +++- ceph-mon/lib/charms_ceph/utils.py | 22 +- ceph-mon/test-requirements.txt | 4 +- 17 files changed, 1401 insertions(+), 107 deletions(-) create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py create mode 100755 ceph-mon/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py create mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/policy_rcd.py diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index df1e68a5..d0632d3f 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -repo: https://github.com/juju/charm-helpers +repo: https://github.com/juju/charm-helpers@stable/21.04 destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index c87cf489..e4cb06bc 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -337,10 +337,8 @@ def write(self): "command": nrpecheck.command, } # If we were passed max_check_attempts, add that to the relation data - try: + if nrpecheck.max_check_attempts is not None: nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts - except AttributeError: - pass # update-status hooks are configured to firing every 5 minutes by # default. When nagios-nrpe-server is restarted, the nagios server diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 63aea1e3..0a14af7e 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -42,6 +42,7 @@ import swiftclient from charmhelpers.core.decorators import retry_on_exception + from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py index 24867497..703fc6ef 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -47,7 +47,7 @@ ) from charmhelpers.core.host import ( - CA_CERT_DIR, + ca_cert_absolute_path, install_ca_cert, mkdir, write_file, @@ -307,6 +307,26 @@ def install_certs(ssl_dir, certs, chain=None, user='root', group='root'): content=bundle['key'], perms=0o640) +def get_cert_relation_ca_name(cert_relation_id=None): + """Determine CA certificate name as provided by relation. + + The filename on disk depends on the name chosen for the application on the + providing end of the certificates relation. + + :param cert_relation_id: (Optional) Relation id providing the certs + :type cert_relation_id: str + :returns: CA certificate filename without path nor extension + :rtype: str + """ + if cert_relation_id is None: + try: + cert_relation_id = relation_ids('certificates')[0] + except IndexError: + return '' + return '{}_juju_ca_cert'.format( + remote_service_name(relid=cert_relation_id)) + + def _manage_ca_certs(ca, cert_relation_id): """Manage CA certs. @@ -316,7 +336,7 @@ def _manage_ca_certs(ca, cert_relation_id): :type cert_relation_id: str """ config_ssl_ca = config('ssl_ca') - config_cert_file = '{}/{}.crt'.format(CA_CERT_DIR, CONFIG_CA_CERT_FILE) + config_cert_file = ca_cert_absolute_path(CONFIG_CA_CERT_FILE) if config_ssl_ca: log("Installing CA certificate from charm ssl_ca config to {}".format( config_cert_file), INFO) @@ -329,8 +349,7 @@ def _manage_ca_certs(ca, cert_relation_id): log("Installing CA certificate from certificate relation", INFO) install_ca_cert( ca.encode(), - name='{}_juju_ca_cert'.format( - remote_service_name(relid=cert_relation_id))) + name=get_cert_relation_ca_name(cert_relation_id)) def process_certificates(service_name, relation_id, unit, diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index c242d18d..b67dafda 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -74,7 +74,6 @@ pwgen, lsb_release, CompareHostReleases, - is_container, ) from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, @@ -1596,16 +1595,21 @@ def _calculate_workers(): @returns int: number of worker processes to use ''' - multiplier = config('worker-multiplier') or DEFAULT_MULTIPLIER + multiplier = config('worker-multiplier') + + # distinguish an empty config and an explicit config as 0.0 + if multiplier is None: + multiplier = DEFAULT_MULTIPLIER + count = int(_num_cpus() * multiplier) - if multiplier > 0 and count == 0: + if count <= 0: + # assign at least one worker count = 1 - if config('worker-multiplier') is None and is_container(): + if config('worker-multiplier') is None: # NOTE(jamespage): Limit unconfigured worker-multiplier # to MAX_DEFAULT_WORKERS to avoid insane - # worker configuration in LXD containers - # on large servers + # worker configuration on large servers # Reference: https://pad.lv/1665270 count = min(count, MAX_DEFAULT_WORKERS) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py new file mode 100644 index 00000000..fd073a04 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py @@ -0,0 +1,410 @@ +# Copyright 2021 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module for managing deferred service events. + +This module is used to manage deferred service events from both charm actions +and package actions. +""" + +import datetime +import glob +import yaml +import os +import time +import uuid + +import charmhelpers.contrib.openstack.policy_rcd as policy_rcd +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host +import charmhelpers.core.unitdata as unitdata + +import subprocess + + +# Deferred events generated from the charm are stored along side those +# generated from packaging. +DEFERRED_EVENTS_DIR = policy_rcd.POLICY_DEFERRED_EVENTS_DIR + + +class ServiceEvent(): + + def __init__(self, timestamp, service, reason, action, + policy_requestor_name=None, policy_requestor_type=None): + self.timestamp = timestamp + self.service = service + self.reason = reason + self.action = action + if not policy_requestor_name: + self.policy_requestor_name = hookenv.service_name() + if not policy_requestor_type: + self.policy_requestor_type = 'charm' + + def __eq__(self, other): + for attr in vars(self): + if getattr(self, attr) != getattr(other, attr): + return False + return True + + def matching_request(self, other): + for attr in ['service', 'action', 'reason']: + if getattr(self, attr) != getattr(other, attr): + return False + return True + + @classmethod + def from_dict(cls, data): + return cls( + data['timestamp'], + data['service'], + data['reason'], + data['action'], + data.get('policy_requestor_name'), + data.get('policy_requestor_type')) + + +def deferred_events_files(): + """Deferred event files + + Deferred event files that were generated by service_name() policy. + + :returns: Deferred event files + :rtype: List[str] + """ + return glob.glob('{}/*.deferred'.format(DEFERRED_EVENTS_DIR)) + + +def read_event_file(file_name): + """Read a file and return the corresponding objects. + + :param file_name: Name of file to read. + :type file_name: str + :returns: ServiceEvent from file. + :rtype: ServiceEvent + """ + with open(file_name, 'r') as f: + contents = yaml.safe_load(f) + event = ServiceEvent( + contents['timestamp'], + contents['service'], + contents['reason'], + contents['action']) + return event + + +def deferred_events(): + """Get list of deferred events. + + List of deferred events. Events are represented by dicts of the form: + + { + action: restart, + policy_requestor_name: neutron-openvswitch, + policy_requestor_type: charm, + reason: 'Pkg update', + service: openvswitch-switch, + time: 1614328743} + + :returns: List of deferred events. + :rtype: List[ServiceEvent] + """ + events = [] + for defer_file in deferred_events_files(): + events.append((defer_file, read_event_file(defer_file))) + return events + + +def duplicate_event_files(event): + """Get list of event files that have equivalent deferred events. + + :param event: Event to compare + :type event: ServiceEvent + :returns: List of event files + :rtype: List[str] + """ + duplicates = [] + for event_file, existing_event in deferred_events(): + if event.matching_request(existing_event): + duplicates.append(event_file) + return duplicates + + +def get_event_record_file(policy_requestor_type, policy_requestor_name): + """Generate filename for storing a new event. + + :param policy_requestor_type: System that blocked event + :type policy_requestor_type: str + :param policy_requestor_name: Name of application that blocked event + :type policy_requestor_name: str + :returns: File name + :rtype: str + """ + file_name = '{}/{}-{}-{}.deferred'.format( + DEFERRED_EVENTS_DIR, + policy_requestor_type, + policy_requestor_name, + uuid.uuid1()) + return file_name + + +def save_event(event): + """Write deferred events to backend. + + :param event: Event to save + :type event: ServiceEvent + """ + requestor_name = hookenv.service_name() + requestor_type = 'charm' + init_policy_log_dir() + if duplicate_event_files(event): + hookenv.log( + "Not writing new event, existing event found. {} {} {}".format( + event.service, + event.action, + event.reason), + level="DEBUG") + else: + record_file = get_event_record_file( + policy_requestor_type=requestor_type, + policy_requestor_name=requestor_name) + + with open(record_file, 'w') as f: + data = { + 'timestamp': event.timestamp, + 'service': event.service, + 'action': event.action, + 'reason': event.reason, + 'policy_requestor_type': requestor_type, + 'policy_requestor_name': requestor_name} + yaml.dump(data, f) + + +def clear_deferred_events(svcs, action): + """Remove any outstanding deferred events. + + Remove a deferred event if its service is in the services list and its + action matches. + + :param svcs: List of services to remove. + :type svcs: List[str] + :param action: Action to remove + :type action: str + """ + # XXX This function is not currently processing the action. It needs to + # match the action and also take account of try-restart and the + # equivalnce of stop-start and restart. + for defer_file in deferred_events_files(): + deferred_event = read_event_file(defer_file) + if deferred_event.service in svcs: + os.remove(defer_file) + + +def init_policy_log_dir(): + """Ensure directory to store events exists.""" + if not os.path.exists(DEFERRED_EVENTS_DIR): + os.mkdir(DEFERRED_EVENTS_DIR) + + +def get_deferred_events(): + """Return a list of deferred events requested by the charm and packages. + + :returns: List of deferred events + :rtype: List[ServiceEvent] + """ + events = [] + for _, event in deferred_events(): + events.append(event) + return events + + +def get_deferred_restarts(): + """List of deferred restart events requested by the charm and packages. + + :returns: List of deferred restarts + :rtype: List[ServiceEvent] + """ + return [e for e in get_deferred_events() if e.action == 'restart'] + + +def clear_deferred_restarts(services): + """Clear deferred restart events targetted at `services`. + + :param services: Services with deferred actions to clear. + :type services: List[str] + """ + clear_deferred_events(services, 'restart') + + +def process_svc_restart(service): + """Respond to a service restart having occured. + + :param service: Services that the action was performed against. + :type service: str + """ + clear_deferred_restarts([service]) + + +def is_restart_permitted(): + """Check whether restarts are permitted. + + :returns: Whether restarts are permitted + :rtype: bool + """ + if hookenv.config('enable-auto-restarts') is None: + return True + return hookenv.config('enable-auto-restarts') + + +def check_and_record_restart_request(service, changed_files): + """Check if restarts are permitted, if they are not log the request. + + :param service: Service to be restarted + :type service: str + :param changed_files: Files that have changed to trigger restarts. + :type changed_files: List[str] + :returns: Whether restarts are permitted + :rtype: bool + """ + changed_files = sorted(list(set(changed_files))) + permitted = is_restart_permitted() + if not permitted: + save_event(ServiceEvent( + timestamp=round(time.time()), + service=service, + reason='File(s) changed: {}'.format( + ', '.join(changed_files)), + action='restart')) + return permitted + + +def deferrable_svc_restart(service, reason=None): + """Restarts service if permitted, if not defer it. + + :param service: Service to be restarted + :type service: str + :param reason: Reason for restart + :type reason: Union[str, None] + """ + if is_restart_permitted(): + host.service_restart(service) + else: + save_event(ServiceEvent( + timestamp=round(time.time()), + service=service, + reason=reason, + action='restart')) + + +def configure_deferred_restarts(services): + """Setup deferred restarts. + + :param services: Services to block restarts of. + :type services: List[str] + """ + policy_rcd.install_policy_rcd() + if is_restart_permitted(): + policy_rcd.remove_policy_file() + else: + blocked_actions = ['stop', 'restart', 'try-restart'] + for svc in services: + policy_rcd.add_policy_block(svc, blocked_actions) + + +def get_service_start_time(service): + """Find point in time when the systemd unit transitioned to active state. + + :param service: Services to check timetsamp of. + :type service: str + """ + start_time = None + out = subprocess.check_output( + [ + 'systemctl', + 'show', + service, + '--property=ActiveEnterTimestamp']) + str_time = out.decode().rstrip().replace('ActiveEnterTimestamp=', '') + if str_time: + start_time = datetime.datetime.strptime( + str_time, + '%a %Y-%m-%d %H:%M:%S %Z') + return start_time + + +def check_restart_timestamps(): + """Check deferred restarts against systemd units start time. + + Check if a service has a deferred event and clear it if it has been + subsequently restarted. + """ + for event in get_deferred_restarts(): + start_time = get_service_start_time(event.service) + deferred_restart_time = datetime.datetime.fromtimestamp( + event.timestamp) + if start_time and start_time < deferred_restart_time: + hookenv.log( + ("Restart still required, {} was started at {}, restart was " + "requested after that at {}").format( + event.service, + start_time, + deferred_restart_time), + level='DEBUG') + else: + clear_deferred_restarts([event.service]) + + +def set_deferred_hook(hookname): + """Record that a hook has been deferred. + + :param hookname: Name of hook that was deferred. + :type hookname: str + """ + with unitdata.HookData()() as t: + kv = t[0] + deferred_hooks = kv.get('deferred-hooks', []) + if hookname not in deferred_hooks: + deferred_hooks.append(hookname) + kv.set('deferred-hooks', sorted(list(set(deferred_hooks)))) + + +def get_deferred_hooks(): + """Get a list of deferred hooks. + + :returns: List of hook names. + :rtype: List[str] + """ + with unitdata.HookData()() as t: + kv = t[0] + return kv.get('deferred-hooks', []) + + +def clear_deferred_hooks(): + """Clear any deferred hooks.""" + with unitdata.HookData()() as t: + kv = t[0] + kv.set('deferred-hooks', []) + + +def clear_deferred_hook(hookname): + """Clear a specific deferred hooks. + + :param hookname: Name of hook to remove. + :type hookname: str + """ + with unitdata.HookData()() as t: + kv = t[0] + deferred_hooks = kv.get('deferred-hooks', []) + if hookname in deferred_hooks: + deferred_hooks.remove(hookname) + kv.set('deferred-hooks', deferred_hooks) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py index f85ae4f4..b2330637 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py @@ -19,3 +19,8 @@ class OSContextError(Exception): This exception is principally used in contrib.openstack.context """ pass + + +class ServiceActionError(Exception): + """Raised when a service action (stop/start/ etc) failed.""" + pass diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py new file mode 100755 index 00000000..344a7662 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python3 + +"""This script is an implemenation of policy-rc.d + +For further information on policy-rc.d see *1 + +*1 https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt +""" +import collections +import glob +import os +import logging +import sys +import time +import uuid +import yaml + + +SystemPolicy = collections.namedtuple( + 'SystemPolicy', + [ + 'policy_requestor_name', + 'policy_requestor_type', + 'service', + 'blocked_actions']) + +DEFAULT_POLICY_CONFIG_DIR = '/etc/policy-rc.d' +DEFAULT_POLICY_LOG_DIR = '/var/lib/policy-rc.d' + + +def read_policy_file(policy_file): + """Return system policies from given file. + + :param file_name: Name of file to read. + :type file_name: str + :returns: Policy + :rtype: List[SystemPolicy] + """ + policies = [] + if os.path.exists(policy_file): + with open(policy_file, 'r') as f: + policy = yaml.safe_load(f) + for service, actions in policy['blocked_actions'].items(): + service = service.replace('.service', '') + policies.append(SystemPolicy( + policy_requestor_name=policy['policy_requestor_name'], + policy_requestor_type=policy['policy_requestor_type'], + service=service, + blocked_actions=actions)) + return policies + + +def get_policies(policy_config_dir): + """Return all system policies in policy_config_dir. + + :param policy_config_dir: Name of file to read. + :type policy_config_dir: str + :returns: Policy + :rtype: List[SystemPolicy] + """ + _policy = [] + for f in glob.glob('{}/*.policy'.format(policy_config_dir)): + _policy.extend(read_policy_file(f)) + return _policy + + +def record_blocked_action(service, action, blocking_policies, policy_log_dir): + """Record that an action was requested but deniedl + + :param service: Service that was blocked + :type service: str + :param action: Action that was blocked. + :type action: str + :param blocking_policies: Policies that blocked the action on the service. + :type blocking_policies: List[SystemPolicy] + :param policy_log_dir: Directory to place the blocking action record. + :type policy_log_dir: str + """ + if not os.path.exists(policy_log_dir): + os.mkdir(policy_log_dir) + seconds = round(time.time()) + for policy in blocking_policies: + if not os.path.exists(policy_log_dir): + os.mkdir(policy_log_dir) + file_name = '{}/{}-{}-{}.deferred'.format( + policy_log_dir, + policy.policy_requestor_type, + policy.policy_requestor_name, + uuid.uuid1()) + with open(file_name, 'w') as f: + data = { + 'timestamp': seconds, + 'service': service, + 'action': action, + 'reason': 'Package update', + 'policy_requestor_type': policy.policy_requestor_type, + 'policy_requestor_name': policy.policy_requestor_name} + yaml.dump(data, f) + + +def get_blocking_policies(service, action, policy_config_dir): + """Record that an action was requested but deniedl + + :param service: Service that action is requested against. + :type service: str + :param action: Action that is requested. + :type action: str + :param policy_config_dir: Directory that stores policy files. + :type policy_config_dir: str + :returns: Policies + :rtype: List[SystemPolicy] + """ + service = service.replace('.service', '') + blocking_policies = [ + policy + for policy in get_policies(policy_config_dir) + if policy.service == service and action in policy.blocked_actions] + return blocking_policies + + +def process_action_request(service, action, policy_config_dir, policy_log_dir): + """Take the requested action against service and check if it is permitted. + + :param service: Service that action is requested against. + :type service: str + :param action: Action that is requested. + :type action: str + :param policy_config_dir: Directory that stores policy files. + :type policy_config_dir: str + :param policy_log_dir: Directory that stores policy files. + :type policy_log_dir: str + :returns: Tuple of whether the action is permitted and explanation. + :rtype: (boolean, str) + """ + blocking_policies = get_blocking_policies( + service, + action, + policy_config_dir) + if blocking_policies: + policy_msg = [ + '{} {}'.format(p.policy_requestor_type, p.policy_requestor_name) + for p in sorted(blocking_policies)] + message = '{} of {} blocked by {}'.format( + action, + service, + ', '.join(policy_msg)) + record_blocked_action( + service, + action, + blocking_policies, + policy_log_dir) + action_permitted = False + else: + message = "Permitting {} {}".format(service, action) + action_permitted = True + return action_permitted, message + + +def main(): + logging.basicConfig( + filename='/var/log/policy-rc.d.log', + level=logging.DEBUG, + format='%(asctime)s %(message)s') + + service = sys.argv[1] + action = sys.argv[2] + + permitted, message = process_action_request( + service, + action, + DEFAULT_POLICY_CONFIG_DIR, + DEFAULT_POLICY_LOG_DIR) + logging.info(message) + + # https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt + # Exit status codes: + # 0 - action allowed + # 1 - unknown action (therefore, undefined policy) + # 100 - unknown initscript id + # 101 - action forbidden by policy + # 102 - subsystem error + # 103 - syntax error + # 104 - [reserved] + # 105 - behaviour uncertain, policy undefined. + # 106 - action not allowed. Use the returned fallback actions + # (which are implied to be "allowed") instead. + + if permitted: + return 0 + else: + return 101 + + +if __name__ == "__main__": + rc = main() + sys.exit(rc) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/policy_rcd.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/policy_rcd.py new file mode 100644 index 00000000..ecffbc68 --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/policy_rcd.py @@ -0,0 +1,173 @@ +# Copyright 2021 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module for managing policy-rc.d script and associated files. + +This module manages the installation of /usr/sbin/policy-rc.d, the +policy files and the event files. When a package update occurs the +packaging system calls: + +policy-rc.d [options] + +The return code of the script determines if the packaging system +will perform that action on the given service. The policy-rc.d +implementation installed by this module checks if an action is +permitted by checking policy files placed in /etc/policy-rc.d. +If a policy file exists which denies the requested action then +this is recorded in an event file which is placed in +/var/lib/policy-rc.d. +""" + +import os +import shutil +import tempfile +import yaml + +import charmhelpers.contrib.openstack.files as os_files +import charmhelpers.contrib.openstack.alternatives as alternatives +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host + +POLICY_HEADER = """# Managed by juju\n""" +POLICY_DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d' +POLICY_CONFIG_DIR = '/etc/policy-rc.d' + + +def get_policy_file_name(): + """Get the name of the policy file for this application. + + :returns: Policy file name + :rtype: str + """ + application_name = hookenv.service_name() + return '{}/charm-{}.policy'.format(POLICY_CONFIG_DIR, application_name) + + +def read_default_policy_file(): + """Return the policy file. + + A policy is in the form: + blocked_actions: + neutron-dhcp-agent: [restart, stop, try-restart] + neutron-l3-agent: [restart, stop, try-restart] + neutron-metadata-agent: [restart, stop, try-restart] + neutron-openvswitch-agent: [restart, stop, try-restart] + openvswitch-switch: [restart, stop, try-restart] + ovs-vswitchd: [restart, stop, try-restart] + ovs-vswitchd-dpdk: [restart, stop, try-restart] + ovsdb-server: [restart, stop, try-restart] + policy_requestor_name: neutron-openvswitch + policy_requestor_type: charm + + :returns: Policy + :rtype: Dict[str, Union[str, Dict[str, List[str]]] + """ + policy = {} + policy_file = get_policy_file_name() + if os.path.exists(policy_file): + with open(policy_file, 'r') as f: + policy = yaml.safe_load(f) + return policy + + +def write_policy_file(policy_file, policy): + """Write policy to disk. + + :param policy_file: Name of policy file + :type policy_file: str + :param policy: Policy + :type policy: Dict[str, Union[str, Dict[str, List[str]]]] + """ + with tempfile.NamedTemporaryFile('w', delete=False) as f: + f.write(POLICY_HEADER) + yaml.dump(policy, f) + tmp_file_name = f.name + shutil.move(tmp_file_name, policy_file) + + +def remove_policy_file(): + """Remove policy file.""" + try: + os.remove(get_policy_file_name()) + except FileNotFoundError: + pass + + +def install_policy_rcd(): + """Install policy-rc.d components.""" + source_file_dir = os.path.dirname(os.path.abspath(os_files.__file__)) + policy_rcd_exec = "/var/lib/charm/{}/policy-rc.d".format( + hookenv.service_name()) + host.mkdir(os.path.dirname(policy_rcd_exec)) + shutil.copy2( + '{}/policy_rc_d_script.py'.format(source_file_dir), + policy_rcd_exec) + # policy-rc.d must be installed via the alternatives system: + # https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt + if not os.path.exists('/usr/sbin/policy-rc.d'): + alternatives.install_alternative( + 'policy-rc.d', + '/usr/sbin/policy-rc.d', + policy_rcd_exec) + host.mkdir(POLICY_CONFIG_DIR) + + +def get_default_policy(): + """Return the default policy structure. + + :returns: Policy + :rtype: Dict[str, Union[str, Dict[str, List[str]]] + """ + policy = { + 'policy_requestor_name': hookenv.service_name(), + 'policy_requestor_type': 'charm', + 'blocked_actions': {}} + return policy + + +def add_policy_block(service, blocked_actions): + """Update a policy file with new list of actions. + + :param service: Service name + :type service: str + :param blocked_actions: Action to block + :type blocked_actions: List[str] + """ + policy = read_default_policy_file() or get_default_policy() + policy_file = get_policy_file_name() + if policy['blocked_actions'].get(service): + policy['blocked_actions'][service].extend(blocked_actions) + else: + policy['blocked_actions'][service] = blocked_actions + policy['blocked_actions'][service] = sorted( + list(set(policy['blocked_actions'][service]))) + write_policy_file(policy_file, policy) + + +def remove_policy_block(service, unblocked_actions): + """Remove list of actions from policy file. + + :param service: Service name + :type service: str + :param unblocked_actions: Action to unblock + :type unblocked_actions: List[str] + """ + policy_file = get_policy_file_name() + policy = read_default_policy_file() + for action in unblocked_actions: + try: + policy['blocked_actions'][service].remove(action) + except (KeyError, ValueError): + continue + write_policy_file(policy_file, policy) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index f27aa6c9..2ad8ab94 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -14,7 +14,7 @@ # Common python helper functions used for OpenStack charms. from collections import OrderedDict, namedtuple -from functools import wraps +from functools import partial, wraps import subprocess import json @@ -36,9 +36,12 @@ from charmhelpers.core import decorators, unitdata +import charmhelpers.contrib.openstack.deferred_events as deferred_events + from charmhelpers.core.hookenv import ( WORKLOAD_STATES, action_fail, + action_get, action_set, config, expected_peer_units, @@ -112,7 +115,7 @@ from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device -from charmhelpers.contrib.openstack.exceptions import OSContextError +from charmhelpers.contrib.openstack.exceptions import OSContextError, ServiceActionError from charmhelpers.contrib.openstack.policyd import ( policyd_status_message_prefix, POLICYD_CONFIG_NAME, @@ -148,6 +151,7 @@ 'train', 'ussuri', 'victoria', + 'wallaby', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -170,6 +174,7 @@ ('eoan', 'train'), ('focal', 'ussuri'), ('groovy', 'victoria'), + ('hirsute', 'wallaby'), ]) @@ -193,6 +198,7 @@ ('2019.2', 'train'), ('2020.1', 'ussuri'), ('2020.2', 'victoria'), + ('2021.1', 'wallaby'), ]) # The ugly duckling - must list releases oldest to newest @@ -301,8 +307,8 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), - ('18', 'ussuri'), - ('19', 'victoria'), + ('18', 'ussuri'), # Note this was actually 17.0 - 18.3 + ('19', 'victoria'), # Note this is really 18.6 ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -483,9 +489,26 @@ def get_swift_codename(version): return None -@deprecate("moved to charmhelpers.contrib.openstack.utils.get_installed_os_version()", "2021-01", log=juju_log) def get_os_codename_package(package, fatal=True): - '''Derive OpenStack release codename from an installed package.''' + """Derive OpenStack release codename from an installed package. + + Initially, see if the openstack-release pkg is available (by trying to + install it) and use it instead. + + If it isn't then it falls back to the existing method of checking the + version of the package passed and then resolving the version from that + using lookup tables. + + Note: if possible, charms should use get_installed_os_version() to + determine the version of the "openstack-release" pkg. + + :param package: the package to test for version information. + :type package: str + :param fatal: If True (default), then die via error_out() + :type fatal: bool + :returns: the OpenStack release codename (e.g. ussuri) + :rtype: str + """ codename = get_installed_os_version() if codename: @@ -579,8 +602,22 @@ def get_os_version_package(pkg, fatal=True): def get_installed_os_version(): - apt_install(filter_installed_packages(['openstack-release']), fatal=False) - print("OpenStack Release: {}".format(openstack_release())) + """Determine the OpenStack release code name from openstack-release pkg. + + This uses the "openstack-release" pkg (if it exists) to return the + OpenStack release codename (e.g. usurri, mitaka, ocata, etc.) + + Note, it caches the result so that it is only done once per hook. + + :returns: the OpenStack release codename, if available + :rtype: Optional[str] + """ + @cached + def _do_install(): + apt_install(filter_installed_packages(['openstack-release']), + fatal=False, quiet=True) + + _do_install() return openstack_release().get('OPENSTACK_CODENAME') @@ -1052,6 +1089,18 @@ def _determine_os_workload_status( try: if config(POLICYD_CONFIG_NAME): message = "{} {}".format(policyd_status_message_prefix(), message) + deferred_restarts = list(set( + [e.service for e in deferred_events.get_deferred_restarts()])) + if deferred_restarts: + svc_msg = "Services queued for restart: {}".format( + ', '.join(sorted(deferred_restarts))) + message = "{}. {}".format(message, svc_msg) + deferred_hooks = deferred_events.get_deferred_hooks() + if deferred_hooks: + svc_msg = "Hooks skipped due to disabled auto restarts: {}".format( + ', '.join(sorted(deferred_hooks))) + message = "{}. {}".format(message, svc_msg) + except Exception: pass @@ -1536,6 +1585,33 @@ def is_unit_paused_set(): return False +def is_hook_allowed(hookname, check_deferred_restarts=True): + """Check if hook can run. + + :param hookname: Name of hook to check.. + :type hookname: str + :param check_deferred_restarts: Whether to check deferred restarts. + :type check_deferred_restarts: bool + """ + permitted = True + reasons = [] + if is_unit_paused_set(): + reasons.append( + "Unit is pause or upgrading. Skipping {}".format(hookname)) + permitted = False + + if check_deferred_restarts: + if deferred_events.is_restart_permitted(): + permitted = True + deferred_events.clear_deferred_hook(hookname) + else: + if not config().changed('enable-auto-restarts'): + deferred_events.set_deferred_hook(hookname) + reasons.append("auto restarts are disabled") + permitted = False + return permitted, " and ".join(reasons) + + def manage_payload_services(action, services=None, charm_func=None): """Run an action against all services. @@ -1696,6 +1772,43 @@ def resume_unit(assess_status_func, services=None, ports=None, raise Exception("Couldn't resume: {}".format("; ".join(messages))) +def restart_services_action(services=None, when_all_stopped_func=None, + deferred_only=None): + """Manage a service restart request via charm action. + + :param services: Services to be restarted + :type model_name: List[str] + :param when_all_stopped_func: Function to call when all services are + stopped. + :type when_all_stopped_func: Callable[] + :param model_name: Only restart services which have a deferred restart + event. + :type model_name: bool + """ + if services and deferred_only: + raise ValueError( + "services and deferred_only are mutually exclusive") + if deferred_only: + services = list(set( + [a.service for a in deferred_events.get_deferred_restarts()])) + _, messages = manage_payload_services( + 'stop', + services=services, + charm_func=when_all_stopped_func) + if messages: + raise ServiceActionError( + "Error processing service stop request: {}".format( + "; ".join(messages))) + _, messages = manage_payload_services( + 'start', + services=services) + if messages: + raise ServiceActionError( + "Error processing service start request: {}".format( + "; ".join(messages))) + deferred_events.clear_deferred_restarts(services) + + def make_assess_status_func(*args, **kwargs): """Creates an assess_status_func() suitable for handing to pause_unit() and resume_unit(). @@ -1717,7 +1830,10 @@ def _assess_status_func(): def pausable_restart_on_change(restart_map, stopstart=False, - restart_functions=None): + restart_functions=None, + can_restart_now_f=None, + post_svc_restart_f=None, + pre_restarts_wait_f=None): """A restart_on_change decorator that checks to see if the unit is paused. If it is paused then the decorated function doesn't fire. @@ -1743,11 +1859,28 @@ def some_hook(...): function won't be called if the decorated function is never called. Note, retains backwards compatibility for passing a non-callable dictionary. - @param f: the function to decorate - @param restart_map: (optionally callable, which then returns the - restart_map) the restart map {conf_file: [services]} - @param stopstart: DEFAULT false; whether to stop, start or just restart - @returns decorator to use a restart_on_change with pausability + :param f: function to decorate. + :type f: Callable + :param restart_map: Optionally callable, which then returns the restart_map or + the restart map {conf_file: [services]} + :type restart_map: Union[Callable[[],], Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] + :returns: decorator to use a restart_on_change with pausability + :rtype: decorator + + """ def wrap(f): # py27 compatible nonlocal variable. When py3 only, replace with @@ -1763,8 +1896,13 @@ def wrapped_f(*args, **kwargs): if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( - (lambda: f(*args, **kwargs)), __restart_map_cache['cache'], - stopstart, restart_functions) + (lambda: f(*args, **kwargs)), + __restart_map_cache['cache'], + stopstart, + restart_functions, + can_restart_now_f, + post_svc_restart_f, + pre_restarts_wait_f) return wrapped_f return wrap @@ -2145,6 +2283,23 @@ def container_scoped_relations(): return relations +def container_scoped_relation_get(attribute=None): + """Get relation data from all container scoped relations. + + :param attribute: Name of attribute to get + :type attribute: Optional[str] + :returns: Iterator with relation data + :rtype: Iterator[Optional[any]] + """ + for endpoint_name in container_scoped_relations(): + for rid in relation_ids(endpoint_name): + for unit in related_units(rid): + yield relation_get( + attribute=attribute, + unit=unit, + rid=rid) + + def is_db_ready(use_current_context=False, rel_name=None): """Check remote database is ready to be used. @@ -2418,3 +2573,107 @@ def get_api_application_status(): msg = 'Some units are not ready' juju_log(msg, 'DEBUG') return app_state, msg + + +def sequence_status_check_functions(*functions): + """Sequence the functions passed so that they all get a chance to run as + the charm status check functions. + + :param *functions: a list of functions that return (state, message) + :type *functions: List[Callable[[OSConfigRender], (str, str)]] + :returns: the Callable that takes configs and returns (state, message) + :rtype: Callable[[OSConfigRender], (str, str)] + """ + def _inner_sequenced_functions(configs): + state, message = 'unknown', '' + for f in functions: + new_state, new_message = f(configs) + state = workload_state_compare(state, new_state) + if message: + message = "{}, {}".format(message, new_message) + else: + message = new_message + return state, message + + return _inner_sequenced_functions + + +SubordinatePackages = namedtuple('SubordinatePackages', ['install', 'purge']) + + +def get_subordinate_release_packages(os_release, package_type='deb'): + """Iterate over subordinate relations and get package information. + + :param os_release: OpenStack release to look for + :type os_release: str + :param package_type: Package type (one of 'deb' or 'snap') + :type package_type: str + :returns: Packages to install and packages to purge or None + :rtype: SubordinatePackages[set,set] + """ + install = set() + purge = set() + + for rdata in container_scoped_relation_get('releases-packages-map'): + rp_map = json.loads(rdata or '{}') + # The map provided by subordinate has OpenStack release name as key. + # Find package information from subordinate matching requested release + # or the most recent release prior to requested release by sorting the + # keys in reverse order. This follows established patterns in our + # charms for templates and reactive charm implementations, i.e. as long + # as nothing has changed the definitions for the prior OpenStack + # release is still valid. + for release in sorted(rp_map.keys(), reverse=True): + if (CompareOpenStackReleases(release) <= os_release and + package_type in rp_map[release]): + for name, container in ( + ('install', install), + ('purge', purge)): + for pkg in rp_map[release][package_type].get(name, []): + container.add(pkg) + break + return SubordinatePackages(install, purge) + + +os_restart_on_change = partial( + pausable_restart_on_change, + can_restart_now_f=deferred_events.check_and_record_restart_request, + post_svc_restart_f=deferred_events.process_svc_restart) + + +def restart_services_action_helper(all_services): + """Helper to run the restart-services action. + + NOTE: all_services is all services that could be restarted but + depending on the action arguments it may be a subset of + these that are actually restarted. + + :param all_services: All services that could be restarted + :type all_services: List[str] + """ + deferred_only = action_get("deferred-only") + services = action_get("services") + if services: + services = services.split() + else: + services = all_services + if deferred_only: + restart_services_action(deferred_only=True) + else: + restart_services_action(services=services) + + +def show_deferred_events_action_helper(): + """Helper to run the show-deferred-restarts action.""" + restarts = [] + for event in deferred_events.get_deferred_events(): + restarts.append('{} {} {}'.format( + str(event.timestamp), + event.service.ljust(40), + event.reason)) + restarts.sort() + output = { + 'restarts': restarts, + 'hooks': deferred_events.get_deferred_hooks()} + action_set({'output': "{}".format( + yaml.dump(output, default_flow_style=False))}) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index db7ce728..778aa4b6 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -226,6 +226,17 @@ def relation_id(relation_name=None, service_or_unit=None): raise ValueError('Must specify neither or both of relation_name and service_or_unit') +def departing_unit(): + """The departing unit for the current relation hook. + + Available since juju 2.8. + + :returns: the departing unit, or None if the information isn't available. + :rtype: Optional[str] + """ + return os.environ.get('JUJU_DEPARTING_UNIT', None) + + def local_unit(): """Local unit ID""" return os.environ['JUJU_UNIT_NAME'] @@ -1611,3 +1622,12 @@ def _contains_range(addresses): addresses.startswith(".") or ",." in addresses or " ." in addresses) + + +def is_subordinate(): + """Check whether charm is subordinate in unit metadata. + + :returns: True if unit is subordniate, False otherwise. + :rtype: bool + """ + return metadata().get('subordinate') is True diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index f826f6fe..d25e6c59 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -34,7 +34,7 @@ import six from contextlib import contextmanager -from collections import OrderedDict +from collections import OrderedDict, defaultdict from .hookenv import log, INFO, DEBUG, local_unit, charm_name from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -694,74 +694,223 @@ class ChecksumError(ValueError): pass -def restart_on_change(restart_map, stopstart=False, restart_functions=None): - """Restart services based on configuration files changing +class restart_on_change(object): + """Decorator and context manager to handle restarts. - This function is used a decorator, for example:: + Usage: - @restart_on_change({ - '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] - '/etc/apache/sites-enabled/*': [ 'apache2' ] - }) - def config_changed(): - pass # your code here + @restart_on_change(restart_map, ...) + def function_that_might_trigger_a_restart(...) + ... - In this example, the cinder-api and cinder-volume services - would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. The apache2 service would be - restarted if any file matching the pattern got changed, created - or removed. Standard wildcards are supported, see documentation - for the 'glob' module for more information. + Or: - @param restart_map: {path_file_name: [service_name, ...] - @param stopstart: DEFAULT false; whether to stop, start OR restart - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result from decorated function + with restart_on_change(restart_map, ...): + do_stuff_that_might_trigger_a_restart() + ... """ - def wrap(f): + + def __init__(self, restart_map, stopstart=False, restart_functions=None, + can_restart_now_f=None, post_svc_restart_f=None, + pre_restarts_wait_f=None): + """ + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart + services {svc: func, ...} + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] + """ + self.restart_map = restart_map + self.stopstart = stopstart + self.restart_functions = restart_functions + self.can_restart_now_f = can_restart_now_f + self.post_svc_restart_f = post_svc_restart_f + self.pre_restarts_wait_f = pre_restarts_wait_f + + def __call__(self, f): + """Work like a decorator. + + Returns a wrapped function that performs the restart if triggered. + + :param f: The function that is being wrapped. + :type f: Callable[[Any], Any] + :returns: the wrapped function + :rtype: Callable[[Any], Any] + """ @functools.wraps(f) def wrapped_f(*args, **kwargs): return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) + (lambda: f(*args, **kwargs)), + self.restart_map, + stopstart=self.stopstart, + restart_functions=self.restart_functions, + can_restart_now_f=self.can_restart_now_f, + post_svc_restart_f=self.post_svc_restart_f, + pre_restarts_wait_f=self.pre_restarts_wait_f) return wrapped_f - return wrap + + def __enter__(self): + """Enter the runtime context related to this object. """ + self.checksums = _pre_restart_on_change_helper(self.restart_map) + + def __exit__(self, exc_type, exc_val, exc_tb): + """Exit the runtime context related to this object. + + The parameters describe the exception that caused the context to be + exited. If the context was exited without an exception, all three + arguments will be None. + """ + if exc_type is None: + _post_restart_on_change_helper( + self.checksums, + self.restart_map, + stopstart=self.stopstart, + restart_functions=self.restart_functions, + can_restart_now_f=self.can_restart_now_f, + post_svc_restart_f=self.post_svc_restart_f, + pre_restarts_wait_f=self.pre_restarts_wait_f) + # All is good, so return False; any exceptions will propagate. + return False def restart_on_change_helper(lambda_f, restart_map, stopstart=False, - restart_functions=None): + restart_functions=None, + can_restart_now_f=None, + post_svc_restart_f=None, + pre_restarts_wait_f=None): """Helper function to perform the restart_on_change function. This is provided for decorators to restart services if files described in the restart_map have changed after an invocation of lambda_f(). - @param lambda_f: function to call. - @param restart_map: {file: [service, ...]} - @param stopstart: whether to stop, start or restart a service - @param restart_functions: nonstandard functions to use to restart services + This functions allows for a number of helper functions to be passed. + + `restart_functions` is a map with a service as the key and the + corresponding value being the function to call to restart the service. For + example if `restart_functions={'some-service': my_restart_func}` then + `my_restart_func` should a function which takes one argument which is the + service name to be retstarted. + + `can_restart_now_f` is a function which checks that a restart is permitted. + It should return a bool which indicates if a restart is allowed and should + take a service name (str) and a list of changed files (List[str]) as + arguments. + + `post_svc_restart_f` is a function which runs after a service has been + restarted. It takes the service name that was restarted as an argument. + + `pre_restarts_wait_f` is a function which is called before any restarts + occur. The use case for this is an application which wants to try and + stagger restarts between units. + + :param lambda_f: function to call. + :type lambda_f: Callable[[], ANY] + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart services {svc: func, ...} - @returns result of lambda_f() + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] + :returns: result of lambda_f() + :rtype: ANY + """ + checksums = _pre_restart_on_change_helper(restart_map) + r = lambda_f() + _post_restart_on_change_helper(checksums, + restart_map, + stopstart, + restart_functions, + can_restart_now_f, + post_svc_restart_f, + pre_restarts_wait_f) + return r + + +def _pre_restart_on_change_helper(restart_map): + """Take a snapshot of file hashes. + + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :returns: Dictionary of file paths and the files checksum. + :rtype: Dict[str, str] + """ + return {path: path_hash(path) for path in restart_map} + + +def _post_restart_on_change_helper(checksums, + restart_map, + stopstart=False, + restart_functions=None, + can_restart_now_f=None, + post_svc_restart_f=None, + pre_restarts_wait_f=None): + """Check whether files have changed. + + :param checksums: Dictionary of file paths and the files checksum. + :type checksums: Dict[str, str] + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] """ if restart_functions is None: restart_functions = {} - checksums = {path: path_hash(path) for path in restart_map} - r = lambda_f() + changed_files = defaultdict(list) + restarts = [] # create a list of lists of the services to restart - restarts = [restart_map[path] - for path in restart_map - if path_hash(path) != checksums[path]] + for path, services in restart_map.items(): + if path_hash(path) != checksums[path]: + restarts.append(services) + for svc in services: + changed_files[svc].append(path) # create a flat list of ordered services without duplicates from lists services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) if services_list: + if pre_restarts_wait_f: + pre_restarts_wait_f() actions = ('stop', 'start') if stopstart else ('restart',) for service_name in services_list: + if can_restart_now_f: + if not can_restart_now_f(service_name, + changed_files[service_name]): + continue if service_name in restart_functions: restart_functions[service_name](service_name) else: for action in actions: service(action, service_name) - return r + if post_svc_restart_f: + post_svc_restart_f(service_name) def pwgen(length=None): @@ -1068,6 +1217,17 @@ def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): return calculated_wait_time +def ca_cert_absolute_path(basename_without_extension): + """Returns absolute path to CA certificate. + + :param basename_without_extension: Filename without extension + :type basename_without_extension: str + :returns: Absolute full path + :rtype: str + """ + return '{}/{}.crt'.format(CA_CERT_DIR, basename_without_extension) + + def install_ca_cert(ca_cert, name=None): """ Install the given cert as a trusted CA. @@ -1083,7 +1243,7 @@ def install_ca_cert(ca_cert, name=None): ca_cert = ca_cert.encode('utf8') if not name: name = 'juju-{}'.format(charm_name()) - cert_file = '{}/{}.crt'.format(CA_CERT_DIR, name) + cert_file = ca_cert_absolute_path(name) new_hash = hashlib.md5(ca_cert).hexdigest() if file_hash(cert_file) == new_hash: return diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py index a3ec6947..7ee8a6ed 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -96,12 +96,14 @@ def cmp_pkgrevno(package, revno, pkgcache=None): the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. """ - from charmhelpers.fetch import apt_pkg + from charmhelpers.fetch import apt_pkg, get_installed_version if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + current_ver = get_installed_version(package) + else: + pkg = pkgcache[package] + current_ver = pkg.current_ver + + return apt_pkg.version_compare(current_ver.ver_str, revno) @cached diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 0cc7fc85..5b689f5b 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -105,6 +105,7 @@ def base_url(self, url): get_upstream_version = fetch.get_upstream_version apt_pkg = fetch.ubuntu_apt_pkg get_apt_dpkg_env = fetch.get_apt_dpkg_env + get_installed_version = fetch.get_installed_version elif __platform__ == "centos": yum_search = fetch.yum_search diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index b5953019..b38edcc1 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -13,6 +13,7 @@ # limitations under the License. from collections import OrderedDict +import os import platform import re import six @@ -20,6 +21,7 @@ import sys import time +from charmhelpers import deprecate from charmhelpers.core.host import get_distrib_codename, get_system_env from charmhelpers.core.hookenv import ( @@ -198,6 +200,14 @@ 'victoria/proposed': 'focal-proposed/victoria', 'focal-victoria/proposed': 'focal-proposed/victoria', 'focal-proposed/victoria': 'focal-proposed/victoria', + # Wallaby + 'wallaby': 'focal-updates/wallaby', + 'focal-wallaby': 'focal-updates/wallaby', + 'focal-wallaby/updates': 'focal-updates/wallaby', + 'focal-updates/wallaby': 'focal-updates/wallaby', + 'wallaby/proposed': 'focal-proposed/wallaby', + 'focal-wallaby/proposed': 'focal-proposed/wallaby', + 'focal-proposed/wallaby': 'focal-proposed/wallaby', } @@ -251,13 +261,19 @@ def apt_cache(*_, **__): # Detect this situation, log a warning and make the call to # ``apt_pkg.init()`` to avoid the consumer Python interpreter from # crashing with a segmentation fault. - log('Support for use of upstream ``apt_pkg`` module in conjunction' - 'with charm-helpers is deprecated since 2019-06-25', level=WARNING) + @deprecate( + 'Support for use of upstream ``apt_pkg`` module in conjunction' + 'with charm-helpers is deprecated since 2019-06-25', + date=None, log=lambda x: log(x, level=WARNING)) + def one_shot_log(): + pass + + one_shot_log() sys.modules['apt_pkg'].init() return ubuntu_apt_pkg.Cache() -def apt_install(packages, options=None, fatal=False): +def apt_install(packages, options=None, fatal=False, quiet=False): """Install one or more packages. :param packages: Package(s) to install @@ -267,6 +283,8 @@ def apt_install(packages, options=None, fatal=False): :param fatal: Whether the command's output should be checked and retried. :type fatal: bool + :param quiet: if True (default), supress log message to stdout/stderr + :type quiet: bool :raises: subprocess.CalledProcessError """ if options is None: @@ -279,9 +297,10 @@ def apt_install(packages, options=None, fatal=False): cmd.append(packages) else: cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - _run_apt_command(cmd, fatal) + if not quiet: + log("Installing {} with options: {}" + .format(packages, options)) + _run_apt_command(cmd, fatal, quiet=quiet) def apt_upgrade(options=None, fatal=False, dist=False): @@ -639,14 +658,17 @@ def _add_apt_repository(spec): :param spec: the parameter to pass to add_apt_repository :type spec: str """ + series = get_distrib_codename() if '{series}' in spec: - series = get_distrib_codename() spec = spec.replace('{series}', series) # software-properties package for bionic properly reacts to proxy settings - # passed as environment variables (See lp:1433761). This is not the case - # LTS and non-LTS releases below bionic. - _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https', 'http'])) + # set via apt.conf (see lp:1433761), however this is not the case for LTS + # and non-LTS releases before bionic. + if series in ('trusty', 'xenial'): + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https', 'http'])) + else: + _run_with_retries(['add-apt-repository', '--yes', spec]) def _add_cloud_pocket(pocket): @@ -723,7 +745,7 @@ def _verify_is_ubuntu_rel(release, os_release): def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), - retry_message="", cmd_env=None): + retry_message="", cmd_env=None, quiet=False): """Run a command and retry until success or max_retries is reached. :param cmd: The apt command to run. @@ -738,11 +760,20 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), :type retry_message: str :param: cmd_env: Environment variables to add to the command run. :type cmd_env: Option[None, Dict[str, str]] + :param quiet: if True, silence the output of the command from stdout and + stderr + :type quiet: bool """ env = get_apt_dpkg_env() if cmd_env: env.update(cmd_env) + kwargs = {} + if quiet: + devnull = os.devnull if six.PY2 else subprocess.DEVNULL + kwargs['stdout'] = devnull + kwargs['stderr'] = devnull + if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) @@ -753,7 +784,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_results = (None,) + retry_exitcodes while result in retry_results: try: - result = subprocess.check_call(cmd, env=env) + result = subprocess.check_call(cmd, env=env, **kwargs) except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > max_retries: @@ -763,7 +794,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), time.sleep(CMD_RETRY_DELAY) -def _run_apt_command(cmd, fatal=False): +def _run_apt_command(cmd, fatal=False, quiet=False): """Run an apt command with optional retries. :param cmd: The apt command to run. @@ -771,13 +802,22 @@ def _run_apt_command(cmd, fatal=False): :param fatal: Whether the command's output should be checked and retried. :type fatal: bool + :param quiet: if True, silence the output of the command from stdout and + stderr + :type quiet: bool """ if fatal: _run_with_retries( cmd, retry_exitcodes=(1, APT_NO_LOCK,), - retry_message="Couldn't acquire DPKG lock") + retry_message="Couldn't acquire DPKG lock", + quiet=quiet) else: - subprocess.call(cmd, env=get_apt_dpkg_env()) + kwargs = {} + if quiet: + devnull = os.devnull if six.PY2 else subprocess.DEVNULL + kwargs['stdout'] = devnull + kwargs['stderr'] = devnull + subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs) def get_upstream_version(package): @@ -799,6 +839,22 @@ def get_upstream_version(package): return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str) +def get_installed_version(package): + """Determine installed version of a package + + @returns None (if not installed) or the installed version as + Version object + """ + cache = apt_cache() + dpkg_result = cache._dpkg_list([package]).get(package, {}) + current_ver = None + installed_version = dpkg_result.get('version') + + if installed_version: + current_ver = ubuntu_apt_pkg.Version({'ver_str': installed_version}) + return current_ver + + def get_apt_dpkg_env(): """Get environment suitable for execution of APT and DPKG tools. diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index 52d380b4..e5c38793 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -56,11 +56,11 @@ ) from charmhelpers.fetch import ( add_source, - apt_cache, apt_install, apt_purge, apt_update, - filter_missing_packages + filter_missing_packages, + get_installed_version ) from charmhelpers.contrib.storage.linux.ceph import ( get_mon_map, @@ -497,10 +497,7 @@ def tune_dev(block_dev): def ceph_user(): - if get_version() > 1: - return 'ceph' - else: - return "root" + return 'ceph' class CrushLocation(object): @@ -715,22 +712,15 @@ def get_version(): """Derive Ceph release from an installed package.""" import apt_pkg as apt - cache = apt_cache() package = "ceph" - try: - pkg = cache[package] - except KeyError: - # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation ' \ - 'candidate: %s' % package - error_out(e) - if not pkg.current_ver: + current_ver = get_installed_version(package) + if not current_ver: # package is known, but no version is currently installed. e = 'Could not determine version of uninstalled package: %s' % package error_out(e) - vers = apt.upstream_version(pkg.current_ver.ver_str) + vers = apt.upstream_version(current_ver.ver_str) # x.y match only for 20XX.X # and ignore patch level for other packages diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 9aea716b..394e4d37 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -42,8 +42,8 @@ oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) -git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' -git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack +git+https://github.com/openstack-charmers/zaza.git@stable/21.04#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git@stable/21.04#egg=zaza.openstack # Needed for charm-glance: git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' From cbdd593b566b2460a4d3ce87c4169a713fed01b1 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sat, 3 Apr 2021 20:18:02 +0100 Subject: [PATCH 2189/2699] 21.04 libraries freeze for charms on master branch * charm-helpers sync for classic charms * build.lock file for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure stable/21.04 branch for charms.openstack - ensure stable/21.04 branch for charm-helpers Change-Id: I6c46959aa659454d28880e375e3488058227dca7 --- ceph-radosgw/charm-helpers-hooks.yaml | 2 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 4 +- .../contrib/openstack/amulet/utils.py | 1 + .../contrib/openstack/cert_utils.py | 27 +- .../charmhelpers/contrib/openstack/context.py | 16 +- .../contrib/openstack/deferred_events.py | 410 ++++++++++++++++++ .../contrib/openstack/exceptions.py | 5 + .../openstack/files/policy_rc_d_script.py | 196 +++++++++ .../contrib/openstack/policy_rcd.py | 173 ++++++++ .../charmhelpers/contrib/openstack/utils.py | 291 ++++++++++++- .../hooks/charmhelpers/core/hookenv.py | 20 + ceph-radosgw/hooks/charmhelpers/core/host.py | 236 ++++++++-- .../charmhelpers/core/host_factory/ubuntu.py | 12 +- .../hooks/charmhelpers/fetch/__init__.py | 1 + .../hooks/charmhelpers/fetch/ubuntu.py | 88 +++- ceph-radosgw/lib/charms_ceph/utils.py | 22 +- ceph-radosgw/test-requirements.txt | 4 +- 17 files changed, 1401 insertions(+), 107 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py create mode 100755 ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/policy_rcd.py diff --git a/ceph-radosgw/charm-helpers-hooks.yaml b/ceph-radosgw/charm-helpers-hooks.yaml index fa9cd645..9d1a4980 100644 --- a/ceph-radosgw/charm-helpers-hooks.yaml +++ b/ceph-radosgw/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -repo: https://github.com/juju/charm-helpers +repo: https://github.com/juju/charm-helpers@stable/21.04 destination: hooks/charmhelpers include: - core diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index c87cf489..e4cb06bc 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -337,10 +337,8 @@ def write(self): "command": nrpecheck.command, } # If we were passed max_check_attempts, add that to the relation data - try: + if nrpecheck.max_check_attempts is not None: nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts - except AttributeError: - pass # update-status hooks are configured to firing every 5 minutes by # default. When nagios-nrpe-server is restarted, the nagios server diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 63aea1e3..0a14af7e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -42,6 +42,7 @@ import swiftclient from charmhelpers.core.decorators import retry_on_exception + from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py index 24867497..703fc6ef 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -47,7 +47,7 @@ ) from charmhelpers.core.host import ( - CA_CERT_DIR, + ca_cert_absolute_path, install_ca_cert, mkdir, write_file, @@ -307,6 +307,26 @@ def install_certs(ssl_dir, certs, chain=None, user='root', group='root'): content=bundle['key'], perms=0o640) +def get_cert_relation_ca_name(cert_relation_id=None): + """Determine CA certificate name as provided by relation. + + The filename on disk depends on the name chosen for the application on the + providing end of the certificates relation. + + :param cert_relation_id: (Optional) Relation id providing the certs + :type cert_relation_id: str + :returns: CA certificate filename without path nor extension + :rtype: str + """ + if cert_relation_id is None: + try: + cert_relation_id = relation_ids('certificates')[0] + except IndexError: + return '' + return '{}_juju_ca_cert'.format( + remote_service_name(relid=cert_relation_id)) + + def _manage_ca_certs(ca, cert_relation_id): """Manage CA certs. @@ -316,7 +336,7 @@ def _manage_ca_certs(ca, cert_relation_id): :type cert_relation_id: str """ config_ssl_ca = config('ssl_ca') - config_cert_file = '{}/{}.crt'.format(CA_CERT_DIR, CONFIG_CA_CERT_FILE) + config_cert_file = ca_cert_absolute_path(CONFIG_CA_CERT_FILE) if config_ssl_ca: log("Installing CA certificate from charm ssl_ca config to {}".format( config_cert_file), INFO) @@ -329,8 +349,7 @@ def _manage_ca_certs(ca, cert_relation_id): log("Installing CA certificate from certificate relation", INFO) install_ca_cert( ca.encode(), - name='{}_juju_ca_cert'.format( - remote_service_name(relid=cert_relation_id))) + name=get_cert_relation_ca_name(cert_relation_id)) def process_certificates(service_name, relation_id, unit, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index c242d18d..b67dafda 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -74,7 +74,6 @@ pwgen, lsb_release, CompareHostReleases, - is_container, ) from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, @@ -1596,16 +1595,21 @@ def _calculate_workers(): @returns int: number of worker processes to use ''' - multiplier = config('worker-multiplier') or DEFAULT_MULTIPLIER + multiplier = config('worker-multiplier') + + # distinguish an empty config and an explicit config as 0.0 + if multiplier is None: + multiplier = DEFAULT_MULTIPLIER + count = int(_num_cpus() * multiplier) - if multiplier > 0 and count == 0: + if count <= 0: + # assign at least one worker count = 1 - if config('worker-multiplier') is None and is_container(): + if config('worker-multiplier') is None: # NOTE(jamespage): Limit unconfigured worker-multiplier # to MAX_DEFAULT_WORKERS to avoid insane - # worker configuration in LXD containers - # on large servers + # worker configuration on large servers # Reference: https://pad.lv/1665270 count = min(count, MAX_DEFAULT_WORKERS) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py new file mode 100644 index 00000000..fd073a04 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py @@ -0,0 +1,410 @@ +# Copyright 2021 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module for managing deferred service events. + +This module is used to manage deferred service events from both charm actions +and package actions. +""" + +import datetime +import glob +import yaml +import os +import time +import uuid + +import charmhelpers.contrib.openstack.policy_rcd as policy_rcd +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host +import charmhelpers.core.unitdata as unitdata + +import subprocess + + +# Deferred events generated from the charm are stored along side those +# generated from packaging. +DEFERRED_EVENTS_DIR = policy_rcd.POLICY_DEFERRED_EVENTS_DIR + + +class ServiceEvent(): + + def __init__(self, timestamp, service, reason, action, + policy_requestor_name=None, policy_requestor_type=None): + self.timestamp = timestamp + self.service = service + self.reason = reason + self.action = action + if not policy_requestor_name: + self.policy_requestor_name = hookenv.service_name() + if not policy_requestor_type: + self.policy_requestor_type = 'charm' + + def __eq__(self, other): + for attr in vars(self): + if getattr(self, attr) != getattr(other, attr): + return False + return True + + def matching_request(self, other): + for attr in ['service', 'action', 'reason']: + if getattr(self, attr) != getattr(other, attr): + return False + return True + + @classmethod + def from_dict(cls, data): + return cls( + data['timestamp'], + data['service'], + data['reason'], + data['action'], + data.get('policy_requestor_name'), + data.get('policy_requestor_type')) + + +def deferred_events_files(): + """Deferred event files + + Deferred event files that were generated by service_name() policy. + + :returns: Deferred event files + :rtype: List[str] + """ + return glob.glob('{}/*.deferred'.format(DEFERRED_EVENTS_DIR)) + + +def read_event_file(file_name): + """Read a file and return the corresponding objects. + + :param file_name: Name of file to read. + :type file_name: str + :returns: ServiceEvent from file. + :rtype: ServiceEvent + """ + with open(file_name, 'r') as f: + contents = yaml.safe_load(f) + event = ServiceEvent( + contents['timestamp'], + contents['service'], + contents['reason'], + contents['action']) + return event + + +def deferred_events(): + """Get list of deferred events. + + List of deferred events. Events are represented by dicts of the form: + + { + action: restart, + policy_requestor_name: neutron-openvswitch, + policy_requestor_type: charm, + reason: 'Pkg update', + service: openvswitch-switch, + time: 1614328743} + + :returns: List of deferred events. + :rtype: List[ServiceEvent] + """ + events = [] + for defer_file in deferred_events_files(): + events.append((defer_file, read_event_file(defer_file))) + return events + + +def duplicate_event_files(event): + """Get list of event files that have equivalent deferred events. + + :param event: Event to compare + :type event: ServiceEvent + :returns: List of event files + :rtype: List[str] + """ + duplicates = [] + for event_file, existing_event in deferred_events(): + if event.matching_request(existing_event): + duplicates.append(event_file) + return duplicates + + +def get_event_record_file(policy_requestor_type, policy_requestor_name): + """Generate filename for storing a new event. + + :param policy_requestor_type: System that blocked event + :type policy_requestor_type: str + :param policy_requestor_name: Name of application that blocked event + :type policy_requestor_name: str + :returns: File name + :rtype: str + """ + file_name = '{}/{}-{}-{}.deferred'.format( + DEFERRED_EVENTS_DIR, + policy_requestor_type, + policy_requestor_name, + uuid.uuid1()) + return file_name + + +def save_event(event): + """Write deferred events to backend. + + :param event: Event to save + :type event: ServiceEvent + """ + requestor_name = hookenv.service_name() + requestor_type = 'charm' + init_policy_log_dir() + if duplicate_event_files(event): + hookenv.log( + "Not writing new event, existing event found. {} {} {}".format( + event.service, + event.action, + event.reason), + level="DEBUG") + else: + record_file = get_event_record_file( + policy_requestor_type=requestor_type, + policy_requestor_name=requestor_name) + + with open(record_file, 'w') as f: + data = { + 'timestamp': event.timestamp, + 'service': event.service, + 'action': event.action, + 'reason': event.reason, + 'policy_requestor_type': requestor_type, + 'policy_requestor_name': requestor_name} + yaml.dump(data, f) + + +def clear_deferred_events(svcs, action): + """Remove any outstanding deferred events. + + Remove a deferred event if its service is in the services list and its + action matches. + + :param svcs: List of services to remove. + :type svcs: List[str] + :param action: Action to remove + :type action: str + """ + # XXX This function is not currently processing the action. It needs to + # match the action and also take account of try-restart and the + # equivalnce of stop-start and restart. + for defer_file in deferred_events_files(): + deferred_event = read_event_file(defer_file) + if deferred_event.service in svcs: + os.remove(defer_file) + + +def init_policy_log_dir(): + """Ensure directory to store events exists.""" + if not os.path.exists(DEFERRED_EVENTS_DIR): + os.mkdir(DEFERRED_EVENTS_DIR) + + +def get_deferred_events(): + """Return a list of deferred events requested by the charm and packages. + + :returns: List of deferred events + :rtype: List[ServiceEvent] + """ + events = [] + for _, event in deferred_events(): + events.append(event) + return events + + +def get_deferred_restarts(): + """List of deferred restart events requested by the charm and packages. + + :returns: List of deferred restarts + :rtype: List[ServiceEvent] + """ + return [e for e in get_deferred_events() if e.action == 'restart'] + + +def clear_deferred_restarts(services): + """Clear deferred restart events targetted at `services`. + + :param services: Services with deferred actions to clear. + :type services: List[str] + """ + clear_deferred_events(services, 'restart') + + +def process_svc_restart(service): + """Respond to a service restart having occured. + + :param service: Services that the action was performed against. + :type service: str + """ + clear_deferred_restarts([service]) + + +def is_restart_permitted(): + """Check whether restarts are permitted. + + :returns: Whether restarts are permitted + :rtype: bool + """ + if hookenv.config('enable-auto-restarts') is None: + return True + return hookenv.config('enable-auto-restarts') + + +def check_and_record_restart_request(service, changed_files): + """Check if restarts are permitted, if they are not log the request. + + :param service: Service to be restarted + :type service: str + :param changed_files: Files that have changed to trigger restarts. + :type changed_files: List[str] + :returns: Whether restarts are permitted + :rtype: bool + """ + changed_files = sorted(list(set(changed_files))) + permitted = is_restart_permitted() + if not permitted: + save_event(ServiceEvent( + timestamp=round(time.time()), + service=service, + reason='File(s) changed: {}'.format( + ', '.join(changed_files)), + action='restart')) + return permitted + + +def deferrable_svc_restart(service, reason=None): + """Restarts service if permitted, if not defer it. + + :param service: Service to be restarted + :type service: str + :param reason: Reason for restart + :type reason: Union[str, None] + """ + if is_restart_permitted(): + host.service_restart(service) + else: + save_event(ServiceEvent( + timestamp=round(time.time()), + service=service, + reason=reason, + action='restart')) + + +def configure_deferred_restarts(services): + """Setup deferred restarts. + + :param services: Services to block restarts of. + :type services: List[str] + """ + policy_rcd.install_policy_rcd() + if is_restart_permitted(): + policy_rcd.remove_policy_file() + else: + blocked_actions = ['stop', 'restart', 'try-restart'] + for svc in services: + policy_rcd.add_policy_block(svc, blocked_actions) + + +def get_service_start_time(service): + """Find point in time when the systemd unit transitioned to active state. + + :param service: Services to check timetsamp of. + :type service: str + """ + start_time = None + out = subprocess.check_output( + [ + 'systemctl', + 'show', + service, + '--property=ActiveEnterTimestamp']) + str_time = out.decode().rstrip().replace('ActiveEnterTimestamp=', '') + if str_time: + start_time = datetime.datetime.strptime( + str_time, + '%a %Y-%m-%d %H:%M:%S %Z') + return start_time + + +def check_restart_timestamps(): + """Check deferred restarts against systemd units start time. + + Check if a service has a deferred event and clear it if it has been + subsequently restarted. + """ + for event in get_deferred_restarts(): + start_time = get_service_start_time(event.service) + deferred_restart_time = datetime.datetime.fromtimestamp( + event.timestamp) + if start_time and start_time < deferred_restart_time: + hookenv.log( + ("Restart still required, {} was started at {}, restart was " + "requested after that at {}").format( + event.service, + start_time, + deferred_restart_time), + level='DEBUG') + else: + clear_deferred_restarts([event.service]) + + +def set_deferred_hook(hookname): + """Record that a hook has been deferred. + + :param hookname: Name of hook that was deferred. + :type hookname: str + """ + with unitdata.HookData()() as t: + kv = t[0] + deferred_hooks = kv.get('deferred-hooks', []) + if hookname not in deferred_hooks: + deferred_hooks.append(hookname) + kv.set('deferred-hooks', sorted(list(set(deferred_hooks)))) + + +def get_deferred_hooks(): + """Get a list of deferred hooks. + + :returns: List of hook names. + :rtype: List[str] + """ + with unitdata.HookData()() as t: + kv = t[0] + return kv.get('deferred-hooks', []) + + +def clear_deferred_hooks(): + """Clear any deferred hooks.""" + with unitdata.HookData()() as t: + kv = t[0] + kv.set('deferred-hooks', []) + + +def clear_deferred_hook(hookname): + """Clear a specific deferred hooks. + + :param hookname: Name of hook to remove. + :type hookname: str + """ + with unitdata.HookData()() as t: + kv = t[0] + deferred_hooks = kv.get('deferred-hooks', []) + if hookname in deferred_hooks: + deferred_hooks.remove(hookname) + kv.set('deferred-hooks', deferred_hooks) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/exceptions.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/exceptions.py index f85ae4f4..b2330637 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/exceptions.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/exceptions.py @@ -19,3 +19,8 @@ class OSContextError(Exception): This exception is principally used in contrib.openstack.context """ pass + + +class ServiceActionError(Exception): + """Raised when a service action (stop/start/ etc) failed.""" + pass diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py new file mode 100755 index 00000000..344a7662 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python3 + +"""This script is an implemenation of policy-rc.d + +For further information on policy-rc.d see *1 + +*1 https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt +""" +import collections +import glob +import os +import logging +import sys +import time +import uuid +import yaml + + +SystemPolicy = collections.namedtuple( + 'SystemPolicy', + [ + 'policy_requestor_name', + 'policy_requestor_type', + 'service', + 'blocked_actions']) + +DEFAULT_POLICY_CONFIG_DIR = '/etc/policy-rc.d' +DEFAULT_POLICY_LOG_DIR = '/var/lib/policy-rc.d' + + +def read_policy_file(policy_file): + """Return system policies from given file. + + :param file_name: Name of file to read. + :type file_name: str + :returns: Policy + :rtype: List[SystemPolicy] + """ + policies = [] + if os.path.exists(policy_file): + with open(policy_file, 'r') as f: + policy = yaml.safe_load(f) + for service, actions in policy['blocked_actions'].items(): + service = service.replace('.service', '') + policies.append(SystemPolicy( + policy_requestor_name=policy['policy_requestor_name'], + policy_requestor_type=policy['policy_requestor_type'], + service=service, + blocked_actions=actions)) + return policies + + +def get_policies(policy_config_dir): + """Return all system policies in policy_config_dir. + + :param policy_config_dir: Name of file to read. + :type policy_config_dir: str + :returns: Policy + :rtype: List[SystemPolicy] + """ + _policy = [] + for f in glob.glob('{}/*.policy'.format(policy_config_dir)): + _policy.extend(read_policy_file(f)) + return _policy + + +def record_blocked_action(service, action, blocking_policies, policy_log_dir): + """Record that an action was requested but deniedl + + :param service: Service that was blocked + :type service: str + :param action: Action that was blocked. + :type action: str + :param blocking_policies: Policies that blocked the action on the service. + :type blocking_policies: List[SystemPolicy] + :param policy_log_dir: Directory to place the blocking action record. + :type policy_log_dir: str + """ + if not os.path.exists(policy_log_dir): + os.mkdir(policy_log_dir) + seconds = round(time.time()) + for policy in blocking_policies: + if not os.path.exists(policy_log_dir): + os.mkdir(policy_log_dir) + file_name = '{}/{}-{}-{}.deferred'.format( + policy_log_dir, + policy.policy_requestor_type, + policy.policy_requestor_name, + uuid.uuid1()) + with open(file_name, 'w') as f: + data = { + 'timestamp': seconds, + 'service': service, + 'action': action, + 'reason': 'Package update', + 'policy_requestor_type': policy.policy_requestor_type, + 'policy_requestor_name': policy.policy_requestor_name} + yaml.dump(data, f) + + +def get_blocking_policies(service, action, policy_config_dir): + """Record that an action was requested but deniedl + + :param service: Service that action is requested against. + :type service: str + :param action: Action that is requested. + :type action: str + :param policy_config_dir: Directory that stores policy files. + :type policy_config_dir: str + :returns: Policies + :rtype: List[SystemPolicy] + """ + service = service.replace('.service', '') + blocking_policies = [ + policy + for policy in get_policies(policy_config_dir) + if policy.service == service and action in policy.blocked_actions] + return blocking_policies + + +def process_action_request(service, action, policy_config_dir, policy_log_dir): + """Take the requested action against service and check if it is permitted. + + :param service: Service that action is requested against. + :type service: str + :param action: Action that is requested. + :type action: str + :param policy_config_dir: Directory that stores policy files. + :type policy_config_dir: str + :param policy_log_dir: Directory that stores policy files. + :type policy_log_dir: str + :returns: Tuple of whether the action is permitted and explanation. + :rtype: (boolean, str) + """ + blocking_policies = get_blocking_policies( + service, + action, + policy_config_dir) + if blocking_policies: + policy_msg = [ + '{} {}'.format(p.policy_requestor_type, p.policy_requestor_name) + for p in sorted(blocking_policies)] + message = '{} of {} blocked by {}'.format( + action, + service, + ', '.join(policy_msg)) + record_blocked_action( + service, + action, + blocking_policies, + policy_log_dir) + action_permitted = False + else: + message = "Permitting {} {}".format(service, action) + action_permitted = True + return action_permitted, message + + +def main(): + logging.basicConfig( + filename='/var/log/policy-rc.d.log', + level=logging.DEBUG, + format='%(asctime)s %(message)s') + + service = sys.argv[1] + action = sys.argv[2] + + permitted, message = process_action_request( + service, + action, + DEFAULT_POLICY_CONFIG_DIR, + DEFAULT_POLICY_LOG_DIR) + logging.info(message) + + # https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt + # Exit status codes: + # 0 - action allowed + # 1 - unknown action (therefore, undefined policy) + # 100 - unknown initscript id + # 101 - action forbidden by policy + # 102 - subsystem error + # 103 - syntax error + # 104 - [reserved] + # 105 - behaviour uncertain, policy undefined. + # 106 - action not allowed. Use the returned fallback actions + # (which are implied to be "allowed") instead. + + if permitted: + return 0 + else: + return 101 + + +if __name__ == "__main__": + rc = main() + sys.exit(rc) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policy_rcd.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policy_rcd.py new file mode 100644 index 00000000..ecffbc68 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policy_rcd.py @@ -0,0 +1,173 @@ +# Copyright 2021 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module for managing policy-rc.d script and associated files. + +This module manages the installation of /usr/sbin/policy-rc.d, the +policy files and the event files. When a package update occurs the +packaging system calls: + +policy-rc.d [options] + +The return code of the script determines if the packaging system +will perform that action on the given service. The policy-rc.d +implementation installed by this module checks if an action is +permitted by checking policy files placed in /etc/policy-rc.d. +If a policy file exists which denies the requested action then +this is recorded in an event file which is placed in +/var/lib/policy-rc.d. +""" + +import os +import shutil +import tempfile +import yaml + +import charmhelpers.contrib.openstack.files as os_files +import charmhelpers.contrib.openstack.alternatives as alternatives +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host + +POLICY_HEADER = """# Managed by juju\n""" +POLICY_DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d' +POLICY_CONFIG_DIR = '/etc/policy-rc.d' + + +def get_policy_file_name(): + """Get the name of the policy file for this application. + + :returns: Policy file name + :rtype: str + """ + application_name = hookenv.service_name() + return '{}/charm-{}.policy'.format(POLICY_CONFIG_DIR, application_name) + + +def read_default_policy_file(): + """Return the policy file. + + A policy is in the form: + blocked_actions: + neutron-dhcp-agent: [restart, stop, try-restart] + neutron-l3-agent: [restart, stop, try-restart] + neutron-metadata-agent: [restart, stop, try-restart] + neutron-openvswitch-agent: [restart, stop, try-restart] + openvswitch-switch: [restart, stop, try-restart] + ovs-vswitchd: [restart, stop, try-restart] + ovs-vswitchd-dpdk: [restart, stop, try-restart] + ovsdb-server: [restart, stop, try-restart] + policy_requestor_name: neutron-openvswitch + policy_requestor_type: charm + + :returns: Policy + :rtype: Dict[str, Union[str, Dict[str, List[str]]] + """ + policy = {} + policy_file = get_policy_file_name() + if os.path.exists(policy_file): + with open(policy_file, 'r') as f: + policy = yaml.safe_load(f) + return policy + + +def write_policy_file(policy_file, policy): + """Write policy to disk. + + :param policy_file: Name of policy file + :type policy_file: str + :param policy: Policy + :type policy: Dict[str, Union[str, Dict[str, List[str]]]] + """ + with tempfile.NamedTemporaryFile('w', delete=False) as f: + f.write(POLICY_HEADER) + yaml.dump(policy, f) + tmp_file_name = f.name + shutil.move(tmp_file_name, policy_file) + + +def remove_policy_file(): + """Remove policy file.""" + try: + os.remove(get_policy_file_name()) + except FileNotFoundError: + pass + + +def install_policy_rcd(): + """Install policy-rc.d components.""" + source_file_dir = os.path.dirname(os.path.abspath(os_files.__file__)) + policy_rcd_exec = "/var/lib/charm/{}/policy-rc.d".format( + hookenv.service_name()) + host.mkdir(os.path.dirname(policy_rcd_exec)) + shutil.copy2( + '{}/policy_rc_d_script.py'.format(source_file_dir), + policy_rcd_exec) + # policy-rc.d must be installed via the alternatives system: + # https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt + if not os.path.exists('/usr/sbin/policy-rc.d'): + alternatives.install_alternative( + 'policy-rc.d', + '/usr/sbin/policy-rc.d', + policy_rcd_exec) + host.mkdir(POLICY_CONFIG_DIR) + + +def get_default_policy(): + """Return the default policy structure. + + :returns: Policy + :rtype: Dict[str, Union[str, Dict[str, List[str]]] + """ + policy = { + 'policy_requestor_name': hookenv.service_name(), + 'policy_requestor_type': 'charm', + 'blocked_actions': {}} + return policy + + +def add_policy_block(service, blocked_actions): + """Update a policy file with new list of actions. + + :param service: Service name + :type service: str + :param blocked_actions: Action to block + :type blocked_actions: List[str] + """ + policy = read_default_policy_file() or get_default_policy() + policy_file = get_policy_file_name() + if policy['blocked_actions'].get(service): + policy['blocked_actions'][service].extend(blocked_actions) + else: + policy['blocked_actions'][service] = blocked_actions + policy['blocked_actions'][service] = sorted( + list(set(policy['blocked_actions'][service]))) + write_policy_file(policy_file, policy) + + +def remove_policy_block(service, unblocked_actions): + """Remove list of actions from policy file. + + :param service: Service name + :type service: str + :param unblocked_actions: Action to unblock + :type unblocked_actions: List[str] + """ + policy_file = get_policy_file_name() + policy = read_default_policy_file() + for action in unblocked_actions: + try: + policy['blocked_actions'][service].remove(action) + except (KeyError, ValueError): + continue + write_policy_file(policy_file, policy) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index f27aa6c9..2ad8ab94 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -14,7 +14,7 @@ # Common python helper functions used for OpenStack charms. from collections import OrderedDict, namedtuple -from functools import wraps +from functools import partial, wraps import subprocess import json @@ -36,9 +36,12 @@ from charmhelpers.core import decorators, unitdata +import charmhelpers.contrib.openstack.deferred_events as deferred_events + from charmhelpers.core.hookenv import ( WORKLOAD_STATES, action_fail, + action_get, action_set, config, expected_peer_units, @@ -112,7 +115,7 @@ from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device -from charmhelpers.contrib.openstack.exceptions import OSContextError +from charmhelpers.contrib.openstack.exceptions import OSContextError, ServiceActionError from charmhelpers.contrib.openstack.policyd import ( policyd_status_message_prefix, POLICYD_CONFIG_NAME, @@ -148,6 +151,7 @@ 'train', 'ussuri', 'victoria', + 'wallaby', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -170,6 +174,7 @@ ('eoan', 'train'), ('focal', 'ussuri'), ('groovy', 'victoria'), + ('hirsute', 'wallaby'), ]) @@ -193,6 +198,7 @@ ('2019.2', 'train'), ('2020.1', 'ussuri'), ('2020.2', 'victoria'), + ('2021.1', 'wallaby'), ]) # The ugly duckling - must list releases oldest to newest @@ -301,8 +307,8 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), - ('18', 'ussuri'), - ('19', 'victoria'), + ('18', 'ussuri'), # Note this was actually 17.0 - 18.3 + ('19', 'victoria'), # Note this is really 18.6 ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -483,9 +489,26 @@ def get_swift_codename(version): return None -@deprecate("moved to charmhelpers.contrib.openstack.utils.get_installed_os_version()", "2021-01", log=juju_log) def get_os_codename_package(package, fatal=True): - '''Derive OpenStack release codename from an installed package.''' + """Derive OpenStack release codename from an installed package. + + Initially, see if the openstack-release pkg is available (by trying to + install it) and use it instead. + + If it isn't then it falls back to the existing method of checking the + version of the package passed and then resolving the version from that + using lookup tables. + + Note: if possible, charms should use get_installed_os_version() to + determine the version of the "openstack-release" pkg. + + :param package: the package to test for version information. + :type package: str + :param fatal: If True (default), then die via error_out() + :type fatal: bool + :returns: the OpenStack release codename (e.g. ussuri) + :rtype: str + """ codename = get_installed_os_version() if codename: @@ -579,8 +602,22 @@ def get_os_version_package(pkg, fatal=True): def get_installed_os_version(): - apt_install(filter_installed_packages(['openstack-release']), fatal=False) - print("OpenStack Release: {}".format(openstack_release())) + """Determine the OpenStack release code name from openstack-release pkg. + + This uses the "openstack-release" pkg (if it exists) to return the + OpenStack release codename (e.g. usurri, mitaka, ocata, etc.) + + Note, it caches the result so that it is only done once per hook. + + :returns: the OpenStack release codename, if available + :rtype: Optional[str] + """ + @cached + def _do_install(): + apt_install(filter_installed_packages(['openstack-release']), + fatal=False, quiet=True) + + _do_install() return openstack_release().get('OPENSTACK_CODENAME') @@ -1052,6 +1089,18 @@ def _determine_os_workload_status( try: if config(POLICYD_CONFIG_NAME): message = "{} {}".format(policyd_status_message_prefix(), message) + deferred_restarts = list(set( + [e.service for e in deferred_events.get_deferred_restarts()])) + if deferred_restarts: + svc_msg = "Services queued for restart: {}".format( + ', '.join(sorted(deferred_restarts))) + message = "{}. {}".format(message, svc_msg) + deferred_hooks = deferred_events.get_deferred_hooks() + if deferred_hooks: + svc_msg = "Hooks skipped due to disabled auto restarts: {}".format( + ', '.join(sorted(deferred_hooks))) + message = "{}. {}".format(message, svc_msg) + except Exception: pass @@ -1536,6 +1585,33 @@ def is_unit_paused_set(): return False +def is_hook_allowed(hookname, check_deferred_restarts=True): + """Check if hook can run. + + :param hookname: Name of hook to check.. + :type hookname: str + :param check_deferred_restarts: Whether to check deferred restarts. + :type check_deferred_restarts: bool + """ + permitted = True + reasons = [] + if is_unit_paused_set(): + reasons.append( + "Unit is pause or upgrading. Skipping {}".format(hookname)) + permitted = False + + if check_deferred_restarts: + if deferred_events.is_restart_permitted(): + permitted = True + deferred_events.clear_deferred_hook(hookname) + else: + if not config().changed('enable-auto-restarts'): + deferred_events.set_deferred_hook(hookname) + reasons.append("auto restarts are disabled") + permitted = False + return permitted, " and ".join(reasons) + + def manage_payload_services(action, services=None, charm_func=None): """Run an action against all services. @@ -1696,6 +1772,43 @@ def resume_unit(assess_status_func, services=None, ports=None, raise Exception("Couldn't resume: {}".format("; ".join(messages))) +def restart_services_action(services=None, when_all_stopped_func=None, + deferred_only=None): + """Manage a service restart request via charm action. + + :param services: Services to be restarted + :type model_name: List[str] + :param when_all_stopped_func: Function to call when all services are + stopped. + :type when_all_stopped_func: Callable[] + :param model_name: Only restart services which have a deferred restart + event. + :type model_name: bool + """ + if services and deferred_only: + raise ValueError( + "services and deferred_only are mutually exclusive") + if deferred_only: + services = list(set( + [a.service for a in deferred_events.get_deferred_restarts()])) + _, messages = manage_payload_services( + 'stop', + services=services, + charm_func=when_all_stopped_func) + if messages: + raise ServiceActionError( + "Error processing service stop request: {}".format( + "; ".join(messages))) + _, messages = manage_payload_services( + 'start', + services=services) + if messages: + raise ServiceActionError( + "Error processing service start request: {}".format( + "; ".join(messages))) + deferred_events.clear_deferred_restarts(services) + + def make_assess_status_func(*args, **kwargs): """Creates an assess_status_func() suitable for handing to pause_unit() and resume_unit(). @@ -1717,7 +1830,10 @@ def _assess_status_func(): def pausable_restart_on_change(restart_map, stopstart=False, - restart_functions=None): + restart_functions=None, + can_restart_now_f=None, + post_svc_restart_f=None, + pre_restarts_wait_f=None): """A restart_on_change decorator that checks to see if the unit is paused. If it is paused then the decorated function doesn't fire. @@ -1743,11 +1859,28 @@ def some_hook(...): function won't be called if the decorated function is never called. Note, retains backwards compatibility for passing a non-callable dictionary. - @param f: the function to decorate - @param restart_map: (optionally callable, which then returns the - restart_map) the restart map {conf_file: [services]} - @param stopstart: DEFAULT false; whether to stop, start or just restart - @returns decorator to use a restart_on_change with pausability + :param f: function to decorate. + :type f: Callable + :param restart_map: Optionally callable, which then returns the restart_map or + the restart map {conf_file: [services]} + :type restart_map: Union[Callable[[],], Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] + :returns: decorator to use a restart_on_change with pausability + :rtype: decorator + + """ def wrap(f): # py27 compatible nonlocal variable. When py3 only, replace with @@ -1763,8 +1896,13 @@ def wrapped_f(*args, **kwargs): if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( - (lambda: f(*args, **kwargs)), __restart_map_cache['cache'], - stopstart, restart_functions) + (lambda: f(*args, **kwargs)), + __restart_map_cache['cache'], + stopstart, + restart_functions, + can_restart_now_f, + post_svc_restart_f, + pre_restarts_wait_f) return wrapped_f return wrap @@ -2145,6 +2283,23 @@ def container_scoped_relations(): return relations +def container_scoped_relation_get(attribute=None): + """Get relation data from all container scoped relations. + + :param attribute: Name of attribute to get + :type attribute: Optional[str] + :returns: Iterator with relation data + :rtype: Iterator[Optional[any]] + """ + for endpoint_name in container_scoped_relations(): + for rid in relation_ids(endpoint_name): + for unit in related_units(rid): + yield relation_get( + attribute=attribute, + unit=unit, + rid=rid) + + def is_db_ready(use_current_context=False, rel_name=None): """Check remote database is ready to be used. @@ -2418,3 +2573,107 @@ def get_api_application_status(): msg = 'Some units are not ready' juju_log(msg, 'DEBUG') return app_state, msg + + +def sequence_status_check_functions(*functions): + """Sequence the functions passed so that they all get a chance to run as + the charm status check functions. + + :param *functions: a list of functions that return (state, message) + :type *functions: List[Callable[[OSConfigRender], (str, str)]] + :returns: the Callable that takes configs and returns (state, message) + :rtype: Callable[[OSConfigRender], (str, str)] + """ + def _inner_sequenced_functions(configs): + state, message = 'unknown', '' + for f in functions: + new_state, new_message = f(configs) + state = workload_state_compare(state, new_state) + if message: + message = "{}, {}".format(message, new_message) + else: + message = new_message + return state, message + + return _inner_sequenced_functions + + +SubordinatePackages = namedtuple('SubordinatePackages', ['install', 'purge']) + + +def get_subordinate_release_packages(os_release, package_type='deb'): + """Iterate over subordinate relations and get package information. + + :param os_release: OpenStack release to look for + :type os_release: str + :param package_type: Package type (one of 'deb' or 'snap') + :type package_type: str + :returns: Packages to install and packages to purge or None + :rtype: SubordinatePackages[set,set] + """ + install = set() + purge = set() + + for rdata in container_scoped_relation_get('releases-packages-map'): + rp_map = json.loads(rdata or '{}') + # The map provided by subordinate has OpenStack release name as key. + # Find package information from subordinate matching requested release + # or the most recent release prior to requested release by sorting the + # keys in reverse order. This follows established patterns in our + # charms for templates and reactive charm implementations, i.e. as long + # as nothing has changed the definitions for the prior OpenStack + # release is still valid. + for release in sorted(rp_map.keys(), reverse=True): + if (CompareOpenStackReleases(release) <= os_release and + package_type in rp_map[release]): + for name, container in ( + ('install', install), + ('purge', purge)): + for pkg in rp_map[release][package_type].get(name, []): + container.add(pkg) + break + return SubordinatePackages(install, purge) + + +os_restart_on_change = partial( + pausable_restart_on_change, + can_restart_now_f=deferred_events.check_and_record_restart_request, + post_svc_restart_f=deferred_events.process_svc_restart) + + +def restart_services_action_helper(all_services): + """Helper to run the restart-services action. + + NOTE: all_services is all services that could be restarted but + depending on the action arguments it may be a subset of + these that are actually restarted. + + :param all_services: All services that could be restarted + :type all_services: List[str] + """ + deferred_only = action_get("deferred-only") + services = action_get("services") + if services: + services = services.split() + else: + services = all_services + if deferred_only: + restart_services_action(deferred_only=True) + else: + restart_services_action(services=services) + + +def show_deferred_events_action_helper(): + """Helper to run the show-deferred-restarts action.""" + restarts = [] + for event in deferred_events.get_deferred_events(): + restarts.append('{} {} {}'.format( + str(event.timestamp), + event.service.ljust(40), + event.reason)) + restarts.sort() + output = { + 'restarts': restarts, + 'hooks': deferred_events.get_deferred_hooks()} + action_set({'output': "{}".format( + yaml.dump(output, default_flow_style=False))}) diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index db7ce728..778aa4b6 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -226,6 +226,17 @@ def relation_id(relation_name=None, service_or_unit=None): raise ValueError('Must specify neither or both of relation_name and service_or_unit') +def departing_unit(): + """The departing unit for the current relation hook. + + Available since juju 2.8. + + :returns: the departing unit, or None if the information isn't available. + :rtype: Optional[str] + """ + return os.environ.get('JUJU_DEPARTING_UNIT', None) + + def local_unit(): """Local unit ID""" return os.environ['JUJU_UNIT_NAME'] @@ -1611,3 +1622,12 @@ def _contains_range(addresses): addresses.startswith(".") or ",." in addresses or " ." in addresses) + + +def is_subordinate(): + """Check whether charm is subordinate in unit metadata. + + :returns: True if unit is subordniate, False otherwise. + :rtype: bool + """ + return metadata().get('subordinate') is True diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index f826f6fe..d25e6c59 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -34,7 +34,7 @@ import six from contextlib import contextmanager -from collections import OrderedDict +from collections import OrderedDict, defaultdict from .hookenv import log, INFO, DEBUG, local_unit, charm_name from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -694,74 +694,223 @@ class ChecksumError(ValueError): pass -def restart_on_change(restart_map, stopstart=False, restart_functions=None): - """Restart services based on configuration files changing +class restart_on_change(object): + """Decorator and context manager to handle restarts. - This function is used a decorator, for example:: + Usage: - @restart_on_change({ - '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] - '/etc/apache/sites-enabled/*': [ 'apache2' ] - }) - def config_changed(): - pass # your code here + @restart_on_change(restart_map, ...) + def function_that_might_trigger_a_restart(...) + ... - In this example, the cinder-api and cinder-volume services - would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. The apache2 service would be - restarted if any file matching the pattern got changed, created - or removed. Standard wildcards are supported, see documentation - for the 'glob' module for more information. + Or: - @param restart_map: {path_file_name: [service_name, ...] - @param stopstart: DEFAULT false; whether to stop, start OR restart - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result from decorated function + with restart_on_change(restart_map, ...): + do_stuff_that_might_trigger_a_restart() + ... """ - def wrap(f): + + def __init__(self, restart_map, stopstart=False, restart_functions=None, + can_restart_now_f=None, post_svc_restart_f=None, + pre_restarts_wait_f=None): + """ + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart + services {svc: func, ...} + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] + """ + self.restart_map = restart_map + self.stopstart = stopstart + self.restart_functions = restart_functions + self.can_restart_now_f = can_restart_now_f + self.post_svc_restart_f = post_svc_restart_f + self.pre_restarts_wait_f = pre_restarts_wait_f + + def __call__(self, f): + """Work like a decorator. + + Returns a wrapped function that performs the restart if triggered. + + :param f: The function that is being wrapped. + :type f: Callable[[Any], Any] + :returns: the wrapped function + :rtype: Callable[[Any], Any] + """ @functools.wraps(f) def wrapped_f(*args, **kwargs): return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) + (lambda: f(*args, **kwargs)), + self.restart_map, + stopstart=self.stopstart, + restart_functions=self.restart_functions, + can_restart_now_f=self.can_restart_now_f, + post_svc_restart_f=self.post_svc_restart_f, + pre_restarts_wait_f=self.pre_restarts_wait_f) return wrapped_f - return wrap + + def __enter__(self): + """Enter the runtime context related to this object. """ + self.checksums = _pre_restart_on_change_helper(self.restart_map) + + def __exit__(self, exc_type, exc_val, exc_tb): + """Exit the runtime context related to this object. + + The parameters describe the exception that caused the context to be + exited. If the context was exited without an exception, all three + arguments will be None. + """ + if exc_type is None: + _post_restart_on_change_helper( + self.checksums, + self.restart_map, + stopstart=self.stopstart, + restart_functions=self.restart_functions, + can_restart_now_f=self.can_restart_now_f, + post_svc_restart_f=self.post_svc_restart_f, + pre_restarts_wait_f=self.pre_restarts_wait_f) + # All is good, so return False; any exceptions will propagate. + return False def restart_on_change_helper(lambda_f, restart_map, stopstart=False, - restart_functions=None): + restart_functions=None, + can_restart_now_f=None, + post_svc_restart_f=None, + pre_restarts_wait_f=None): """Helper function to perform the restart_on_change function. This is provided for decorators to restart services if files described in the restart_map have changed after an invocation of lambda_f(). - @param lambda_f: function to call. - @param restart_map: {file: [service, ...]} - @param stopstart: whether to stop, start or restart a service - @param restart_functions: nonstandard functions to use to restart services + This functions allows for a number of helper functions to be passed. + + `restart_functions` is a map with a service as the key and the + corresponding value being the function to call to restart the service. For + example if `restart_functions={'some-service': my_restart_func}` then + `my_restart_func` should a function which takes one argument which is the + service name to be retstarted. + + `can_restart_now_f` is a function which checks that a restart is permitted. + It should return a bool which indicates if a restart is allowed and should + take a service name (str) and a list of changed files (List[str]) as + arguments. + + `post_svc_restart_f` is a function which runs after a service has been + restarted. It takes the service name that was restarted as an argument. + + `pre_restarts_wait_f` is a function which is called before any restarts + occur. The use case for this is an application which wants to try and + stagger restarts between units. + + :param lambda_f: function to call. + :type lambda_f: Callable[[], ANY] + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart services {svc: func, ...} - @returns result of lambda_f() + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] + :returns: result of lambda_f() + :rtype: ANY + """ + checksums = _pre_restart_on_change_helper(restart_map) + r = lambda_f() + _post_restart_on_change_helper(checksums, + restart_map, + stopstart, + restart_functions, + can_restart_now_f, + post_svc_restart_f, + pre_restarts_wait_f) + return r + + +def _pre_restart_on_change_helper(restart_map): + """Take a snapshot of file hashes. + + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :returns: Dictionary of file paths and the files checksum. + :rtype: Dict[str, str] + """ + return {path: path_hash(path) for path in restart_map} + + +def _post_restart_on_change_helper(checksums, + restart_map, + stopstart=False, + restart_functions=None, + can_restart_now_f=None, + post_svc_restart_f=None, + pre_restarts_wait_f=None): + """Check whether files have changed. + + :param checksums: Dictionary of file paths and the files checksum. + :type checksums: Dict[str, str] + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] """ if restart_functions is None: restart_functions = {} - checksums = {path: path_hash(path) for path in restart_map} - r = lambda_f() + changed_files = defaultdict(list) + restarts = [] # create a list of lists of the services to restart - restarts = [restart_map[path] - for path in restart_map - if path_hash(path) != checksums[path]] + for path, services in restart_map.items(): + if path_hash(path) != checksums[path]: + restarts.append(services) + for svc in services: + changed_files[svc].append(path) # create a flat list of ordered services without duplicates from lists services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) if services_list: + if pre_restarts_wait_f: + pre_restarts_wait_f() actions = ('stop', 'start') if stopstart else ('restart',) for service_name in services_list: + if can_restart_now_f: + if not can_restart_now_f(service_name, + changed_files[service_name]): + continue if service_name in restart_functions: restart_functions[service_name](service_name) else: for action in actions: service(action, service_name) - return r + if post_svc_restart_f: + post_svc_restart_f(service_name) def pwgen(length=None): @@ -1068,6 +1217,17 @@ def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): return calculated_wait_time +def ca_cert_absolute_path(basename_without_extension): + """Returns absolute path to CA certificate. + + :param basename_without_extension: Filename without extension + :type basename_without_extension: str + :returns: Absolute full path + :rtype: str + """ + return '{}/{}.crt'.format(CA_CERT_DIR, basename_without_extension) + + def install_ca_cert(ca_cert, name=None): """ Install the given cert as a trusted CA. @@ -1083,7 +1243,7 @@ def install_ca_cert(ca_cert, name=None): ca_cert = ca_cert.encode('utf8') if not name: name = 'juju-{}'.format(charm_name()) - cert_file = '{}/{}.crt'.format(CA_CERT_DIR, name) + cert_file = ca_cert_absolute_path(name) new_hash = hashlib.md5(ca_cert).hexdigest() if file_hash(cert_file) == new_hash: return diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index a3ec6947..7ee8a6ed 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -96,12 +96,14 @@ def cmp_pkgrevno(package, revno, pkgcache=None): the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. """ - from charmhelpers.fetch import apt_pkg + from charmhelpers.fetch import apt_pkg, get_installed_version if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + current_ver = get_installed_version(package) + else: + pkg = pkgcache[package] + current_ver = pkg.current_ver + + return apt_pkg.version_compare(current_ver.ver_str, revno) @cached diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 0cc7fc85..5b689f5b 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -105,6 +105,7 @@ def base_url(self, url): get_upstream_version = fetch.get_upstream_version apt_pkg = fetch.ubuntu_apt_pkg get_apt_dpkg_env = fetch.get_apt_dpkg_env + get_installed_version = fetch.get_installed_version elif __platform__ == "centos": yum_search = fetch.yum_search diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index b5953019..b38edcc1 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -13,6 +13,7 @@ # limitations under the License. from collections import OrderedDict +import os import platform import re import six @@ -20,6 +21,7 @@ import sys import time +from charmhelpers import deprecate from charmhelpers.core.host import get_distrib_codename, get_system_env from charmhelpers.core.hookenv import ( @@ -198,6 +200,14 @@ 'victoria/proposed': 'focal-proposed/victoria', 'focal-victoria/proposed': 'focal-proposed/victoria', 'focal-proposed/victoria': 'focal-proposed/victoria', + # Wallaby + 'wallaby': 'focal-updates/wallaby', + 'focal-wallaby': 'focal-updates/wallaby', + 'focal-wallaby/updates': 'focal-updates/wallaby', + 'focal-updates/wallaby': 'focal-updates/wallaby', + 'wallaby/proposed': 'focal-proposed/wallaby', + 'focal-wallaby/proposed': 'focal-proposed/wallaby', + 'focal-proposed/wallaby': 'focal-proposed/wallaby', } @@ -251,13 +261,19 @@ def apt_cache(*_, **__): # Detect this situation, log a warning and make the call to # ``apt_pkg.init()`` to avoid the consumer Python interpreter from # crashing with a segmentation fault. - log('Support for use of upstream ``apt_pkg`` module in conjunction' - 'with charm-helpers is deprecated since 2019-06-25', level=WARNING) + @deprecate( + 'Support for use of upstream ``apt_pkg`` module in conjunction' + 'with charm-helpers is deprecated since 2019-06-25', + date=None, log=lambda x: log(x, level=WARNING)) + def one_shot_log(): + pass + + one_shot_log() sys.modules['apt_pkg'].init() return ubuntu_apt_pkg.Cache() -def apt_install(packages, options=None, fatal=False): +def apt_install(packages, options=None, fatal=False, quiet=False): """Install one or more packages. :param packages: Package(s) to install @@ -267,6 +283,8 @@ def apt_install(packages, options=None, fatal=False): :param fatal: Whether the command's output should be checked and retried. :type fatal: bool + :param quiet: if True (default), supress log message to stdout/stderr + :type quiet: bool :raises: subprocess.CalledProcessError """ if options is None: @@ -279,9 +297,10 @@ def apt_install(packages, options=None, fatal=False): cmd.append(packages) else: cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - _run_apt_command(cmd, fatal) + if not quiet: + log("Installing {} with options: {}" + .format(packages, options)) + _run_apt_command(cmd, fatal, quiet=quiet) def apt_upgrade(options=None, fatal=False, dist=False): @@ -639,14 +658,17 @@ def _add_apt_repository(spec): :param spec: the parameter to pass to add_apt_repository :type spec: str """ + series = get_distrib_codename() if '{series}' in spec: - series = get_distrib_codename() spec = spec.replace('{series}', series) # software-properties package for bionic properly reacts to proxy settings - # passed as environment variables (See lp:1433761). This is not the case - # LTS and non-LTS releases below bionic. - _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https', 'http'])) + # set via apt.conf (see lp:1433761), however this is not the case for LTS + # and non-LTS releases before bionic. + if series in ('trusty', 'xenial'): + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https', 'http'])) + else: + _run_with_retries(['add-apt-repository', '--yes', spec]) def _add_cloud_pocket(pocket): @@ -723,7 +745,7 @@ def _verify_is_ubuntu_rel(release, os_release): def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), - retry_message="", cmd_env=None): + retry_message="", cmd_env=None, quiet=False): """Run a command and retry until success or max_retries is reached. :param cmd: The apt command to run. @@ -738,11 +760,20 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), :type retry_message: str :param: cmd_env: Environment variables to add to the command run. :type cmd_env: Option[None, Dict[str, str]] + :param quiet: if True, silence the output of the command from stdout and + stderr + :type quiet: bool """ env = get_apt_dpkg_env() if cmd_env: env.update(cmd_env) + kwargs = {} + if quiet: + devnull = os.devnull if six.PY2 else subprocess.DEVNULL + kwargs['stdout'] = devnull + kwargs['stderr'] = devnull + if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) @@ -753,7 +784,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_results = (None,) + retry_exitcodes while result in retry_results: try: - result = subprocess.check_call(cmd, env=env) + result = subprocess.check_call(cmd, env=env, **kwargs) except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > max_retries: @@ -763,7 +794,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), time.sleep(CMD_RETRY_DELAY) -def _run_apt_command(cmd, fatal=False): +def _run_apt_command(cmd, fatal=False, quiet=False): """Run an apt command with optional retries. :param cmd: The apt command to run. @@ -771,13 +802,22 @@ def _run_apt_command(cmd, fatal=False): :param fatal: Whether the command's output should be checked and retried. :type fatal: bool + :param quiet: if True, silence the output of the command from stdout and + stderr + :type quiet: bool """ if fatal: _run_with_retries( cmd, retry_exitcodes=(1, APT_NO_LOCK,), - retry_message="Couldn't acquire DPKG lock") + retry_message="Couldn't acquire DPKG lock", + quiet=quiet) else: - subprocess.call(cmd, env=get_apt_dpkg_env()) + kwargs = {} + if quiet: + devnull = os.devnull if six.PY2 else subprocess.DEVNULL + kwargs['stdout'] = devnull + kwargs['stderr'] = devnull + subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs) def get_upstream_version(package): @@ -799,6 +839,22 @@ def get_upstream_version(package): return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str) +def get_installed_version(package): + """Determine installed version of a package + + @returns None (if not installed) or the installed version as + Version object + """ + cache = apt_cache() + dpkg_result = cache._dpkg_list([package]).get(package, {}) + current_ver = None + installed_version = dpkg_result.get('version') + + if installed_version: + current_ver = ubuntu_apt_pkg.Version({'ver_str': installed_version}) + return current_ver + + def get_apt_dpkg_env(): """Get environment suitable for execution of APT and DPKG tools. diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index 52d380b4..e5c38793 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -56,11 +56,11 @@ ) from charmhelpers.fetch import ( add_source, - apt_cache, apt_install, apt_purge, apt_update, - filter_missing_packages + filter_missing_packages, + get_installed_version ) from charmhelpers.contrib.storage.linux.ceph import ( get_mon_map, @@ -497,10 +497,7 @@ def tune_dev(block_dev): def ceph_user(): - if get_version() > 1: - return 'ceph' - else: - return "root" + return 'ceph' class CrushLocation(object): @@ -715,22 +712,15 @@ def get_version(): """Derive Ceph release from an installed package.""" import apt_pkg as apt - cache = apt_cache() package = "ceph" - try: - pkg = cache[package] - except KeyError: - # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation ' \ - 'candidate: %s' % package - error_out(e) - if not pkg.current_ver: + current_ver = get_installed_version(package) + if not current_ver: # package is known, but no version is currently installed. e = 'Could not determine version of uninstalled package: %s' % package error_out(e) - vers = apt.upstream_version(pkg.current_ver.ver_str) + vers = apt.upstream_version(current_ver.ver_str) # x.y match only for 20XX.X # and ignore patch level for other packages diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 9aea716b..394e4d37 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -42,8 +42,8 @@ oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) -git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' -git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack +git+https://github.com/openstack-charmers/zaza.git@stable/21.04#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git@stable/21.04#egg=zaza.openstack # Needed for charm-glance: git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' From 7c88fc7df3171f32100b613043b8a97aca264aa9 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sat, 3 Apr 2021 20:18:02 +0100 Subject: [PATCH 2190/2699] 21.04 libraries freeze for charms on master branch * charm-helpers sync for classic charms * build.lock file for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure stable/21.04 branch for charms.openstack - ensure stable/21.04 branch for charm-helpers Change-Id: I388df195df1294f0fc92bcf06e49e1a207fc6510 --- ceph-rbd-mirror/src/build.lock | 196 ++++++++++++++++++++++ ceph-rbd-mirror/src/test-requirements.txt | 4 +- ceph-rbd-mirror/src/wheelhouse.txt | 4 +- ceph-rbd-mirror/test-requirements.txt | 2 +- ceph-rbd-mirror/tox.ini | 7 +- 5 files changed, 207 insertions(+), 6 deletions(-) create mode 100644 ceph-rbd-mirror/src/build.lock diff --git a/ceph-rbd-mirror/src/build.lock b/ceph-rbd-mirror/src/build.lock new file mode 100644 index 00000000..6f2031db --- /dev/null +++ b/ceph-rbd-mirror/src/build.lock @@ -0,0 +1,196 @@ +{ + "locks": [ + { + "type": "layer", + "item": "layer:leadership", + "url": "https://git.launchpad.net/layer-leadership", + "vcs": null, + "branch": "refs/heads/master", + "commit": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f" + }, + { + "type": "layer", + "item": "layer:options", + "url": "https://github.com/juju-solutions/layer-options.git", + "vcs": null, + "branch": "refs/heads/master", + "commit": "fcdcea4e5de3e1556c24e6704607862d0ba00a56" + }, + { + "type": "layer", + "item": "layer:basic", + "url": "https://github.com/juju-solutions/layer-basic.git", + "vcs": null, + "branch": "refs/heads/master", + "commit": "623e69c7b432456fd4364f6e1835424fd6b5425e" + }, + { + "type": "layer", + "item": "layer:openstack", + "url": "https://github.com/openstack/charm-layer-openstack", + "vcs": null, + "branch": "refs/heads/master", + "commit": "ba152d41b4a1109073d335415f43c4248109e7c7" + }, + { + "type": "layer", + "item": "layer:ceph", + "url": "https://github.com/openstack/charm-layer-ceph.git", + "vcs": null, + "branch": "refs/heads/master", + "commit": "17d40abd8d9ec3b8c32756ca981c80c4733c016f" + }, + { + "type": "layer", + "item": "ceph-rbd-mirror", + "url": null, + "vcs": null, + "branch": "refs/heads/master", + "commit": "7656e0878da9461cc4af2e7bc8faaa8b7842de03" + }, + { + "type": "layer", + "item": "interface:tls-certificates", + "url": "https://github.com/juju-solutions/interface-tls-certificates", + "vcs": null, + "branch": "refs/heads/master", + "commit": "d9850016d930a6d507b9fd45e2598d327922b140" + }, + { + "type": "layer", + "item": "interface:ceph-rbd-mirror", + "url": "https://github.com/openstack/charm-interface-ceph-rbd-mirror.git", + "vcs": null, + "branch": "refs/heads/master", + "commit": "1bb6e20b349e4573ba47863003382fa5375ce6e9" + }, + { + "type": "layer", + "item": "interface:nrpe-external-master", + "url": "https://github.com/cmars/nrpe-external-master-interface", + "vcs": null, + "branch": "refs/heads/master", + "commit": "2e0e1fdea6d83b55078200aacb537d60013ec5bc" + }, + { + "type": "python_module", + "package": "charms.openstack", + "url": "git+https://opendev.org/openstack/charms.openstack.git", + "branch": "refs/heads/stable/21.04", + "version": "bcd0c9b4b4a19d4a4125e0a6a3f808a843a74fa1", + "vcs": "git" + }, + { + "type": "python_module", + "package": "netaddr", + "vcs": null, + "version": "0.7.19" + }, + { + "type": "python_module", + "package": "pbr", + "vcs": null, + "version": "5.5.1" + }, + { + "type": "python_module", + "package": "setuptools", + "vcs": null, + "version": "41.6.0" + }, + { + "type": "python_module", + "package": "Jinja2", + "vcs": null, + "version": "2.10.1" + }, + { + "type": "python_module", + "package": "pip", + "vcs": null, + "version": "18.1" + }, + { + "type": "python_module", + "package": "pyaml", + "vcs": null, + "version": "20.4.0" + }, + { + "type": "python_module", + "package": "dnspython", + "vcs": null, + "version": "1.16.0" + }, + { + "type": "python_module", + "package": "MarkupSafe", + "vcs": null, + "version": "1.1.1" + }, + { + "type": "python_module", + "package": "charmhelpers", + "url": "git+https://github.com/juju/charm-helpers.git", + "branch": "refs/heads/stable/21.04", + "version": "8c48d2914b0e7396a2392c3933e2d7f321643ae6", + "vcs": "git" + }, + { + "type": "python_module", + "package": "wheel", + "vcs": null, + "version": "0.33.6" + }, + { + "type": "python_module", + "package": "charms.ceph", + "url": "git+https://github.com/openstack/charms.ceph.git", + "branch": "refs/heads/master", + "version": "9bfe43ee654d016d7f09ede406c45674821f2866", + "vcs": "git" + }, + { + "type": "python_module", + "package": "charms.reactive", + "vcs": null, + "version": "1.4.1" + }, + { + "type": "python_module", + "package": "Tempita", + "vcs": null, + "version": "0.5.2" + }, + { + "type": "python_module", + "package": "PyYAML", + "vcs": null, + "version": "5.2" + }, + { + "type": "python_module", + "package": "setuptools_scm", + "vcs": null, + "version": "1.17.0" + }, + { + "type": "python_module", + "package": "psutil", + "vcs": null, + "version": "5.8.0" + }, + { + "type": "python_module", + "package": "netifaces", + "vcs": null, + "version": "0.10.9" + }, + { + "type": "python_module", + "package": "six", + "vcs": null, + "version": "1.15.0" + } + ] +} \ No newline at end of file diff --git a/ceph-rbd-mirror/src/test-requirements.txt b/ceph-rbd-mirror/src/test-requirements.txt index 520681e1..eb4844b8 100644 --- a/ceph-rbd-mirror/src/test-requirements.txt +++ b/ceph-rbd-mirror/src/test-requirements.txt @@ -11,5 +11,5 @@ charm-tools>=2.4.4 keyring<21 # Functional Test Requirements (let Zaza's dependencies solve all dependencies here!) -git+https://github.com/openstack-charmers/zaza.git#egg=zaza -git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack +git+https://github.com/openstack-charmers/zaza.git@stable/21.04#egg=zaza +git+https://github.com/openstack-charmers/zaza-openstack-tests.git@stable/21.04#egg=zaza.openstack diff --git a/ceph-rbd-mirror/src/wheelhouse.txt b/ceph-rbd-mirror/src/wheelhouse.txt index fac0be8d..aa678af8 100644 --- a/ceph-rbd-mirror/src/wheelhouse.txt +++ b/ceph-rbd-mirror/src/wheelhouse.txt @@ -1,4 +1,4 @@ -git+https://github.com/juju/charm-helpers.git#egg=charmhelpers +git+https://github.com/juju/charm-helpers.git@stable/21.04#egg=charmhelpers psutil -git+https://opendev.org/openstack/charms.openstack.git#egg=charms.openstack +git+https://opendev.org/openstack/charms.openstack.git@stable/21.04#egg=charms.openstack diff --git a/ceph-rbd-mirror/test-requirements.txt b/ceph-rbd-mirror/test-requirements.txt index 3f085244..16bbc27d 100644 --- a/ceph-rbd-mirror/test-requirements.txt +++ b/ceph-rbd-mirror/test-requirements.txt @@ -35,7 +35,7 @@ mock>=1.2; python_version >= '3.6' nose>=1.3.7 coverage>=3.6 -git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack +git+https://github.com/openstack/charms.openstack.git@stable/21.04#egg=charms.openstack # # Revisit for removal / mock improvement: netifaces # vault diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index ce79fa16..391b2af8 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -28,7 +28,12 @@ deps = [testenv:build] basepython = python3 commands = - charm-build --log-level DEBUG -o {toxinidir}/build/builds src {posargs} + charm-build --log-level DEBUG --use-lock-file-branches -o {toxinidir}/build/builds src {posargs} + +[testenv:add-build-lock-file] +basepython = python3 +commands = + charm-build --log-level DEBUG --write-lock-file -o {toxinidir}/build/builds src {posargs} [testenv:py3] basepython = python3 From 80383dca060b97ba7c344dc30c5a9dba8f036ed5 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sat, 3 Apr 2021 20:18:02 +0100 Subject: [PATCH 2191/2699] 21.04 libraries freeze for charms on master branch * charm-helpers sync for classic charms * build.lock file for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure stable/21.04 branch for charms.openstack - ensure stable/21.04 branch for charm-helpers Change-Id: I493cb80145fe91163d011bc78f30e84f317142de --- ceph-proxy/charm-helpers-hooks.yaml | 5 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 4 +- .../contrib/openstack/deferred_events.py | 410 ++++++++++++++++++ .../contrib/openstack/exceptions.py | 5 + .../contrib/openstack/files/__init__.py | 16 + .../openstack/files/policy_rc_d_script.py | 196 +++++++++ .../contrib/openstack/policy_rcd.py | 173 ++++++++ .../charmhelpers/contrib/openstack/utils.py | 291 ++++++++++++- ceph-proxy/charmhelpers/core/hookenv.py | 20 + ceph-proxy/charmhelpers/core/host.py | 236 ++++++++-- .../charmhelpers/core/host_factory/ubuntu.py | 12 +- ceph-proxy/charmhelpers/fetch/__init__.py | 1 + ceph-proxy/charmhelpers/fetch/ubuntu.py | 88 +++- ceph-proxy/lib/charms_ceph/utils.py | 22 +- ceph-proxy/test-requirements.txt | 4 +- 15 files changed, 1386 insertions(+), 97 deletions(-) create mode 100644 ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py create mode 100644 ceph-proxy/charmhelpers/contrib/openstack/files/__init__.py create mode 100755 ceph-proxy/charmhelpers/contrib/openstack/files/policy_rc_d_script.py create mode 100644 ceph-proxy/charmhelpers/contrib/openstack/policy_rcd.py diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index 02a4f8e6..205ad8a6 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -repo: https://github.com/juju/charm-helpers +repo: https://github.com/juju/charm-helpers@stable/21.04 destination: charmhelpers include: - core @@ -13,9 +13,12 @@ include: - payload.execd - contrib.openstack: - alternatives + - deferred_events - exceptions + - files - ha - ip + - policy_rcd - utils - contrib.network.ip - contrib.charmsupport diff --git a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py index c87cf489..e4cb06bc 100644 --- a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py @@ -337,10 +337,8 @@ def write(self): "command": nrpecheck.command, } # If we were passed max_check_attempts, add that to the relation data - try: + if nrpecheck.max_check_attempts is not None: nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts - except AttributeError: - pass # update-status hooks are configured to firing every 5 minutes by # default. When nagios-nrpe-server is restarted, the nagios server diff --git a/ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py b/ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py new file mode 100644 index 00000000..fd073a04 --- /dev/null +++ b/ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py @@ -0,0 +1,410 @@ +# Copyright 2021 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module for managing deferred service events. + +This module is used to manage deferred service events from both charm actions +and package actions. +""" + +import datetime +import glob +import yaml +import os +import time +import uuid + +import charmhelpers.contrib.openstack.policy_rcd as policy_rcd +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host +import charmhelpers.core.unitdata as unitdata + +import subprocess + + +# Deferred events generated from the charm are stored along side those +# generated from packaging. +DEFERRED_EVENTS_DIR = policy_rcd.POLICY_DEFERRED_EVENTS_DIR + + +class ServiceEvent(): + + def __init__(self, timestamp, service, reason, action, + policy_requestor_name=None, policy_requestor_type=None): + self.timestamp = timestamp + self.service = service + self.reason = reason + self.action = action + if not policy_requestor_name: + self.policy_requestor_name = hookenv.service_name() + if not policy_requestor_type: + self.policy_requestor_type = 'charm' + + def __eq__(self, other): + for attr in vars(self): + if getattr(self, attr) != getattr(other, attr): + return False + return True + + def matching_request(self, other): + for attr in ['service', 'action', 'reason']: + if getattr(self, attr) != getattr(other, attr): + return False + return True + + @classmethod + def from_dict(cls, data): + return cls( + data['timestamp'], + data['service'], + data['reason'], + data['action'], + data.get('policy_requestor_name'), + data.get('policy_requestor_type')) + + +def deferred_events_files(): + """Deferred event files + + Deferred event files that were generated by service_name() policy. + + :returns: Deferred event files + :rtype: List[str] + """ + return glob.glob('{}/*.deferred'.format(DEFERRED_EVENTS_DIR)) + + +def read_event_file(file_name): + """Read a file and return the corresponding objects. + + :param file_name: Name of file to read. + :type file_name: str + :returns: ServiceEvent from file. + :rtype: ServiceEvent + """ + with open(file_name, 'r') as f: + contents = yaml.safe_load(f) + event = ServiceEvent( + contents['timestamp'], + contents['service'], + contents['reason'], + contents['action']) + return event + + +def deferred_events(): + """Get list of deferred events. + + List of deferred events. Events are represented by dicts of the form: + + { + action: restart, + policy_requestor_name: neutron-openvswitch, + policy_requestor_type: charm, + reason: 'Pkg update', + service: openvswitch-switch, + time: 1614328743} + + :returns: List of deferred events. + :rtype: List[ServiceEvent] + """ + events = [] + for defer_file in deferred_events_files(): + events.append((defer_file, read_event_file(defer_file))) + return events + + +def duplicate_event_files(event): + """Get list of event files that have equivalent deferred events. + + :param event: Event to compare + :type event: ServiceEvent + :returns: List of event files + :rtype: List[str] + """ + duplicates = [] + for event_file, existing_event in deferred_events(): + if event.matching_request(existing_event): + duplicates.append(event_file) + return duplicates + + +def get_event_record_file(policy_requestor_type, policy_requestor_name): + """Generate filename for storing a new event. + + :param policy_requestor_type: System that blocked event + :type policy_requestor_type: str + :param policy_requestor_name: Name of application that blocked event + :type policy_requestor_name: str + :returns: File name + :rtype: str + """ + file_name = '{}/{}-{}-{}.deferred'.format( + DEFERRED_EVENTS_DIR, + policy_requestor_type, + policy_requestor_name, + uuid.uuid1()) + return file_name + + +def save_event(event): + """Write deferred events to backend. + + :param event: Event to save + :type event: ServiceEvent + """ + requestor_name = hookenv.service_name() + requestor_type = 'charm' + init_policy_log_dir() + if duplicate_event_files(event): + hookenv.log( + "Not writing new event, existing event found. {} {} {}".format( + event.service, + event.action, + event.reason), + level="DEBUG") + else: + record_file = get_event_record_file( + policy_requestor_type=requestor_type, + policy_requestor_name=requestor_name) + + with open(record_file, 'w') as f: + data = { + 'timestamp': event.timestamp, + 'service': event.service, + 'action': event.action, + 'reason': event.reason, + 'policy_requestor_type': requestor_type, + 'policy_requestor_name': requestor_name} + yaml.dump(data, f) + + +def clear_deferred_events(svcs, action): + """Remove any outstanding deferred events. + + Remove a deferred event if its service is in the services list and its + action matches. + + :param svcs: List of services to remove. + :type svcs: List[str] + :param action: Action to remove + :type action: str + """ + # XXX This function is not currently processing the action. It needs to + # match the action and also take account of try-restart and the + # equivalnce of stop-start and restart. + for defer_file in deferred_events_files(): + deferred_event = read_event_file(defer_file) + if deferred_event.service in svcs: + os.remove(defer_file) + + +def init_policy_log_dir(): + """Ensure directory to store events exists.""" + if not os.path.exists(DEFERRED_EVENTS_DIR): + os.mkdir(DEFERRED_EVENTS_DIR) + + +def get_deferred_events(): + """Return a list of deferred events requested by the charm and packages. + + :returns: List of deferred events + :rtype: List[ServiceEvent] + """ + events = [] + for _, event in deferred_events(): + events.append(event) + return events + + +def get_deferred_restarts(): + """List of deferred restart events requested by the charm and packages. + + :returns: List of deferred restarts + :rtype: List[ServiceEvent] + """ + return [e for e in get_deferred_events() if e.action == 'restart'] + + +def clear_deferred_restarts(services): + """Clear deferred restart events targetted at `services`. + + :param services: Services with deferred actions to clear. + :type services: List[str] + """ + clear_deferred_events(services, 'restart') + + +def process_svc_restart(service): + """Respond to a service restart having occured. + + :param service: Services that the action was performed against. + :type service: str + """ + clear_deferred_restarts([service]) + + +def is_restart_permitted(): + """Check whether restarts are permitted. + + :returns: Whether restarts are permitted + :rtype: bool + """ + if hookenv.config('enable-auto-restarts') is None: + return True + return hookenv.config('enable-auto-restarts') + + +def check_and_record_restart_request(service, changed_files): + """Check if restarts are permitted, if they are not log the request. + + :param service: Service to be restarted + :type service: str + :param changed_files: Files that have changed to trigger restarts. + :type changed_files: List[str] + :returns: Whether restarts are permitted + :rtype: bool + """ + changed_files = sorted(list(set(changed_files))) + permitted = is_restart_permitted() + if not permitted: + save_event(ServiceEvent( + timestamp=round(time.time()), + service=service, + reason='File(s) changed: {}'.format( + ', '.join(changed_files)), + action='restart')) + return permitted + + +def deferrable_svc_restart(service, reason=None): + """Restarts service if permitted, if not defer it. + + :param service: Service to be restarted + :type service: str + :param reason: Reason for restart + :type reason: Union[str, None] + """ + if is_restart_permitted(): + host.service_restart(service) + else: + save_event(ServiceEvent( + timestamp=round(time.time()), + service=service, + reason=reason, + action='restart')) + + +def configure_deferred_restarts(services): + """Setup deferred restarts. + + :param services: Services to block restarts of. + :type services: List[str] + """ + policy_rcd.install_policy_rcd() + if is_restart_permitted(): + policy_rcd.remove_policy_file() + else: + blocked_actions = ['stop', 'restart', 'try-restart'] + for svc in services: + policy_rcd.add_policy_block(svc, blocked_actions) + + +def get_service_start_time(service): + """Find point in time when the systemd unit transitioned to active state. + + :param service: Services to check timetsamp of. + :type service: str + """ + start_time = None + out = subprocess.check_output( + [ + 'systemctl', + 'show', + service, + '--property=ActiveEnterTimestamp']) + str_time = out.decode().rstrip().replace('ActiveEnterTimestamp=', '') + if str_time: + start_time = datetime.datetime.strptime( + str_time, + '%a %Y-%m-%d %H:%M:%S %Z') + return start_time + + +def check_restart_timestamps(): + """Check deferred restarts against systemd units start time. + + Check if a service has a deferred event and clear it if it has been + subsequently restarted. + """ + for event in get_deferred_restarts(): + start_time = get_service_start_time(event.service) + deferred_restart_time = datetime.datetime.fromtimestamp( + event.timestamp) + if start_time and start_time < deferred_restart_time: + hookenv.log( + ("Restart still required, {} was started at {}, restart was " + "requested after that at {}").format( + event.service, + start_time, + deferred_restart_time), + level='DEBUG') + else: + clear_deferred_restarts([event.service]) + + +def set_deferred_hook(hookname): + """Record that a hook has been deferred. + + :param hookname: Name of hook that was deferred. + :type hookname: str + """ + with unitdata.HookData()() as t: + kv = t[0] + deferred_hooks = kv.get('deferred-hooks', []) + if hookname not in deferred_hooks: + deferred_hooks.append(hookname) + kv.set('deferred-hooks', sorted(list(set(deferred_hooks)))) + + +def get_deferred_hooks(): + """Get a list of deferred hooks. + + :returns: List of hook names. + :rtype: List[str] + """ + with unitdata.HookData()() as t: + kv = t[0] + return kv.get('deferred-hooks', []) + + +def clear_deferred_hooks(): + """Clear any deferred hooks.""" + with unitdata.HookData()() as t: + kv = t[0] + kv.set('deferred-hooks', []) + + +def clear_deferred_hook(hookname): + """Clear a specific deferred hooks. + + :param hookname: Name of hook to remove. + :type hookname: str + """ + with unitdata.HookData()() as t: + kv = t[0] + deferred_hooks = kv.get('deferred-hooks', []) + if hookname in deferred_hooks: + deferred_hooks.remove(hookname) + kv.set('deferred-hooks', deferred_hooks) diff --git a/ceph-proxy/charmhelpers/contrib/openstack/exceptions.py b/ceph-proxy/charmhelpers/contrib/openstack/exceptions.py index f85ae4f4..b2330637 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/exceptions.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/exceptions.py @@ -19,3 +19,8 @@ class OSContextError(Exception): This exception is principally used in contrib.openstack.context """ pass + + +class ServiceActionError(Exception): + """Raised when a service action (stop/start/ etc) failed.""" + pass diff --git a/ceph-proxy/charmhelpers/contrib/openstack/files/__init__.py b/ceph-proxy/charmhelpers/contrib/openstack/files/__init__.py new file mode 100644 index 00000000..9df5f746 --- /dev/null +++ b/ceph-proxy/charmhelpers/contrib/openstack/files/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/ceph-proxy/charmhelpers/contrib/openstack/files/policy_rc_d_script.py b/ceph-proxy/charmhelpers/contrib/openstack/files/policy_rc_d_script.py new file mode 100755 index 00000000..344a7662 --- /dev/null +++ b/ceph-proxy/charmhelpers/contrib/openstack/files/policy_rc_d_script.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python3 + +"""This script is an implemenation of policy-rc.d + +For further information on policy-rc.d see *1 + +*1 https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt +""" +import collections +import glob +import os +import logging +import sys +import time +import uuid +import yaml + + +SystemPolicy = collections.namedtuple( + 'SystemPolicy', + [ + 'policy_requestor_name', + 'policy_requestor_type', + 'service', + 'blocked_actions']) + +DEFAULT_POLICY_CONFIG_DIR = '/etc/policy-rc.d' +DEFAULT_POLICY_LOG_DIR = '/var/lib/policy-rc.d' + + +def read_policy_file(policy_file): + """Return system policies from given file. + + :param file_name: Name of file to read. + :type file_name: str + :returns: Policy + :rtype: List[SystemPolicy] + """ + policies = [] + if os.path.exists(policy_file): + with open(policy_file, 'r') as f: + policy = yaml.safe_load(f) + for service, actions in policy['blocked_actions'].items(): + service = service.replace('.service', '') + policies.append(SystemPolicy( + policy_requestor_name=policy['policy_requestor_name'], + policy_requestor_type=policy['policy_requestor_type'], + service=service, + blocked_actions=actions)) + return policies + + +def get_policies(policy_config_dir): + """Return all system policies in policy_config_dir. + + :param policy_config_dir: Name of file to read. + :type policy_config_dir: str + :returns: Policy + :rtype: List[SystemPolicy] + """ + _policy = [] + for f in glob.glob('{}/*.policy'.format(policy_config_dir)): + _policy.extend(read_policy_file(f)) + return _policy + + +def record_blocked_action(service, action, blocking_policies, policy_log_dir): + """Record that an action was requested but deniedl + + :param service: Service that was blocked + :type service: str + :param action: Action that was blocked. + :type action: str + :param blocking_policies: Policies that blocked the action on the service. + :type blocking_policies: List[SystemPolicy] + :param policy_log_dir: Directory to place the blocking action record. + :type policy_log_dir: str + """ + if not os.path.exists(policy_log_dir): + os.mkdir(policy_log_dir) + seconds = round(time.time()) + for policy in blocking_policies: + if not os.path.exists(policy_log_dir): + os.mkdir(policy_log_dir) + file_name = '{}/{}-{}-{}.deferred'.format( + policy_log_dir, + policy.policy_requestor_type, + policy.policy_requestor_name, + uuid.uuid1()) + with open(file_name, 'w') as f: + data = { + 'timestamp': seconds, + 'service': service, + 'action': action, + 'reason': 'Package update', + 'policy_requestor_type': policy.policy_requestor_type, + 'policy_requestor_name': policy.policy_requestor_name} + yaml.dump(data, f) + + +def get_blocking_policies(service, action, policy_config_dir): + """Record that an action was requested but deniedl + + :param service: Service that action is requested against. + :type service: str + :param action: Action that is requested. + :type action: str + :param policy_config_dir: Directory that stores policy files. + :type policy_config_dir: str + :returns: Policies + :rtype: List[SystemPolicy] + """ + service = service.replace('.service', '') + blocking_policies = [ + policy + for policy in get_policies(policy_config_dir) + if policy.service == service and action in policy.blocked_actions] + return blocking_policies + + +def process_action_request(service, action, policy_config_dir, policy_log_dir): + """Take the requested action against service and check if it is permitted. + + :param service: Service that action is requested against. + :type service: str + :param action: Action that is requested. + :type action: str + :param policy_config_dir: Directory that stores policy files. + :type policy_config_dir: str + :param policy_log_dir: Directory that stores policy files. + :type policy_log_dir: str + :returns: Tuple of whether the action is permitted and explanation. + :rtype: (boolean, str) + """ + blocking_policies = get_blocking_policies( + service, + action, + policy_config_dir) + if blocking_policies: + policy_msg = [ + '{} {}'.format(p.policy_requestor_type, p.policy_requestor_name) + for p in sorted(blocking_policies)] + message = '{} of {} blocked by {}'.format( + action, + service, + ', '.join(policy_msg)) + record_blocked_action( + service, + action, + blocking_policies, + policy_log_dir) + action_permitted = False + else: + message = "Permitting {} {}".format(service, action) + action_permitted = True + return action_permitted, message + + +def main(): + logging.basicConfig( + filename='/var/log/policy-rc.d.log', + level=logging.DEBUG, + format='%(asctime)s %(message)s') + + service = sys.argv[1] + action = sys.argv[2] + + permitted, message = process_action_request( + service, + action, + DEFAULT_POLICY_CONFIG_DIR, + DEFAULT_POLICY_LOG_DIR) + logging.info(message) + + # https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt + # Exit status codes: + # 0 - action allowed + # 1 - unknown action (therefore, undefined policy) + # 100 - unknown initscript id + # 101 - action forbidden by policy + # 102 - subsystem error + # 103 - syntax error + # 104 - [reserved] + # 105 - behaviour uncertain, policy undefined. + # 106 - action not allowed. Use the returned fallback actions + # (which are implied to be "allowed") instead. + + if permitted: + return 0 + else: + return 101 + + +if __name__ == "__main__": + rc = main() + sys.exit(rc) diff --git a/ceph-proxy/charmhelpers/contrib/openstack/policy_rcd.py b/ceph-proxy/charmhelpers/contrib/openstack/policy_rcd.py new file mode 100644 index 00000000..ecffbc68 --- /dev/null +++ b/ceph-proxy/charmhelpers/contrib/openstack/policy_rcd.py @@ -0,0 +1,173 @@ +# Copyright 2021 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module for managing policy-rc.d script and associated files. + +This module manages the installation of /usr/sbin/policy-rc.d, the +policy files and the event files. When a package update occurs the +packaging system calls: + +policy-rc.d [options] + +The return code of the script determines if the packaging system +will perform that action on the given service. The policy-rc.d +implementation installed by this module checks if an action is +permitted by checking policy files placed in /etc/policy-rc.d. +If a policy file exists which denies the requested action then +this is recorded in an event file which is placed in +/var/lib/policy-rc.d. +""" + +import os +import shutil +import tempfile +import yaml + +import charmhelpers.contrib.openstack.files as os_files +import charmhelpers.contrib.openstack.alternatives as alternatives +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host + +POLICY_HEADER = """# Managed by juju\n""" +POLICY_DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d' +POLICY_CONFIG_DIR = '/etc/policy-rc.d' + + +def get_policy_file_name(): + """Get the name of the policy file for this application. + + :returns: Policy file name + :rtype: str + """ + application_name = hookenv.service_name() + return '{}/charm-{}.policy'.format(POLICY_CONFIG_DIR, application_name) + + +def read_default_policy_file(): + """Return the policy file. + + A policy is in the form: + blocked_actions: + neutron-dhcp-agent: [restart, stop, try-restart] + neutron-l3-agent: [restart, stop, try-restart] + neutron-metadata-agent: [restart, stop, try-restart] + neutron-openvswitch-agent: [restart, stop, try-restart] + openvswitch-switch: [restart, stop, try-restart] + ovs-vswitchd: [restart, stop, try-restart] + ovs-vswitchd-dpdk: [restart, stop, try-restart] + ovsdb-server: [restart, stop, try-restart] + policy_requestor_name: neutron-openvswitch + policy_requestor_type: charm + + :returns: Policy + :rtype: Dict[str, Union[str, Dict[str, List[str]]] + """ + policy = {} + policy_file = get_policy_file_name() + if os.path.exists(policy_file): + with open(policy_file, 'r') as f: + policy = yaml.safe_load(f) + return policy + + +def write_policy_file(policy_file, policy): + """Write policy to disk. + + :param policy_file: Name of policy file + :type policy_file: str + :param policy: Policy + :type policy: Dict[str, Union[str, Dict[str, List[str]]]] + """ + with tempfile.NamedTemporaryFile('w', delete=False) as f: + f.write(POLICY_HEADER) + yaml.dump(policy, f) + tmp_file_name = f.name + shutil.move(tmp_file_name, policy_file) + + +def remove_policy_file(): + """Remove policy file.""" + try: + os.remove(get_policy_file_name()) + except FileNotFoundError: + pass + + +def install_policy_rcd(): + """Install policy-rc.d components.""" + source_file_dir = os.path.dirname(os.path.abspath(os_files.__file__)) + policy_rcd_exec = "/var/lib/charm/{}/policy-rc.d".format( + hookenv.service_name()) + host.mkdir(os.path.dirname(policy_rcd_exec)) + shutil.copy2( + '{}/policy_rc_d_script.py'.format(source_file_dir), + policy_rcd_exec) + # policy-rc.d must be installed via the alternatives system: + # https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt + if not os.path.exists('/usr/sbin/policy-rc.d'): + alternatives.install_alternative( + 'policy-rc.d', + '/usr/sbin/policy-rc.d', + policy_rcd_exec) + host.mkdir(POLICY_CONFIG_DIR) + + +def get_default_policy(): + """Return the default policy structure. + + :returns: Policy + :rtype: Dict[str, Union[str, Dict[str, List[str]]] + """ + policy = { + 'policy_requestor_name': hookenv.service_name(), + 'policy_requestor_type': 'charm', + 'blocked_actions': {}} + return policy + + +def add_policy_block(service, blocked_actions): + """Update a policy file with new list of actions. + + :param service: Service name + :type service: str + :param blocked_actions: Action to block + :type blocked_actions: List[str] + """ + policy = read_default_policy_file() or get_default_policy() + policy_file = get_policy_file_name() + if policy['blocked_actions'].get(service): + policy['blocked_actions'][service].extend(blocked_actions) + else: + policy['blocked_actions'][service] = blocked_actions + policy['blocked_actions'][service] = sorted( + list(set(policy['blocked_actions'][service]))) + write_policy_file(policy_file, policy) + + +def remove_policy_block(service, unblocked_actions): + """Remove list of actions from policy file. + + :param service: Service name + :type service: str + :param unblocked_actions: Action to unblock + :type unblocked_actions: List[str] + """ + policy_file = get_policy_file_name() + policy = read_default_policy_file() + for action in unblocked_actions: + try: + policy['blocked_actions'][service].remove(action) + except (KeyError, ValueError): + continue + write_policy_file(policy_file, policy) diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index f27aa6c9..2ad8ab94 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -14,7 +14,7 @@ # Common python helper functions used for OpenStack charms. from collections import OrderedDict, namedtuple -from functools import wraps +from functools import partial, wraps import subprocess import json @@ -36,9 +36,12 @@ from charmhelpers.core import decorators, unitdata +import charmhelpers.contrib.openstack.deferred_events as deferred_events + from charmhelpers.core.hookenv import ( WORKLOAD_STATES, action_fail, + action_get, action_set, config, expected_peer_units, @@ -112,7 +115,7 @@ from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device -from charmhelpers.contrib.openstack.exceptions import OSContextError +from charmhelpers.contrib.openstack.exceptions import OSContextError, ServiceActionError from charmhelpers.contrib.openstack.policyd import ( policyd_status_message_prefix, POLICYD_CONFIG_NAME, @@ -148,6 +151,7 @@ 'train', 'ussuri', 'victoria', + 'wallaby', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -170,6 +174,7 @@ ('eoan', 'train'), ('focal', 'ussuri'), ('groovy', 'victoria'), + ('hirsute', 'wallaby'), ]) @@ -193,6 +198,7 @@ ('2019.2', 'train'), ('2020.1', 'ussuri'), ('2020.2', 'victoria'), + ('2021.1', 'wallaby'), ]) # The ugly duckling - must list releases oldest to newest @@ -301,8 +307,8 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), - ('18', 'ussuri'), - ('19', 'victoria'), + ('18', 'ussuri'), # Note this was actually 17.0 - 18.3 + ('19', 'victoria'), # Note this is really 18.6 ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -483,9 +489,26 @@ def get_swift_codename(version): return None -@deprecate("moved to charmhelpers.contrib.openstack.utils.get_installed_os_version()", "2021-01", log=juju_log) def get_os_codename_package(package, fatal=True): - '''Derive OpenStack release codename from an installed package.''' + """Derive OpenStack release codename from an installed package. + + Initially, see if the openstack-release pkg is available (by trying to + install it) and use it instead. + + If it isn't then it falls back to the existing method of checking the + version of the package passed and then resolving the version from that + using lookup tables. + + Note: if possible, charms should use get_installed_os_version() to + determine the version of the "openstack-release" pkg. + + :param package: the package to test for version information. + :type package: str + :param fatal: If True (default), then die via error_out() + :type fatal: bool + :returns: the OpenStack release codename (e.g. ussuri) + :rtype: str + """ codename = get_installed_os_version() if codename: @@ -579,8 +602,22 @@ def get_os_version_package(pkg, fatal=True): def get_installed_os_version(): - apt_install(filter_installed_packages(['openstack-release']), fatal=False) - print("OpenStack Release: {}".format(openstack_release())) + """Determine the OpenStack release code name from openstack-release pkg. + + This uses the "openstack-release" pkg (if it exists) to return the + OpenStack release codename (e.g. usurri, mitaka, ocata, etc.) + + Note, it caches the result so that it is only done once per hook. + + :returns: the OpenStack release codename, if available + :rtype: Optional[str] + """ + @cached + def _do_install(): + apt_install(filter_installed_packages(['openstack-release']), + fatal=False, quiet=True) + + _do_install() return openstack_release().get('OPENSTACK_CODENAME') @@ -1052,6 +1089,18 @@ def _determine_os_workload_status( try: if config(POLICYD_CONFIG_NAME): message = "{} {}".format(policyd_status_message_prefix(), message) + deferred_restarts = list(set( + [e.service for e in deferred_events.get_deferred_restarts()])) + if deferred_restarts: + svc_msg = "Services queued for restart: {}".format( + ', '.join(sorted(deferred_restarts))) + message = "{}. {}".format(message, svc_msg) + deferred_hooks = deferred_events.get_deferred_hooks() + if deferred_hooks: + svc_msg = "Hooks skipped due to disabled auto restarts: {}".format( + ', '.join(sorted(deferred_hooks))) + message = "{}. {}".format(message, svc_msg) + except Exception: pass @@ -1536,6 +1585,33 @@ def is_unit_paused_set(): return False +def is_hook_allowed(hookname, check_deferred_restarts=True): + """Check if hook can run. + + :param hookname: Name of hook to check.. + :type hookname: str + :param check_deferred_restarts: Whether to check deferred restarts. + :type check_deferred_restarts: bool + """ + permitted = True + reasons = [] + if is_unit_paused_set(): + reasons.append( + "Unit is pause or upgrading. Skipping {}".format(hookname)) + permitted = False + + if check_deferred_restarts: + if deferred_events.is_restart_permitted(): + permitted = True + deferred_events.clear_deferred_hook(hookname) + else: + if not config().changed('enable-auto-restarts'): + deferred_events.set_deferred_hook(hookname) + reasons.append("auto restarts are disabled") + permitted = False + return permitted, " and ".join(reasons) + + def manage_payload_services(action, services=None, charm_func=None): """Run an action against all services. @@ -1696,6 +1772,43 @@ def resume_unit(assess_status_func, services=None, ports=None, raise Exception("Couldn't resume: {}".format("; ".join(messages))) +def restart_services_action(services=None, when_all_stopped_func=None, + deferred_only=None): + """Manage a service restart request via charm action. + + :param services: Services to be restarted + :type model_name: List[str] + :param when_all_stopped_func: Function to call when all services are + stopped. + :type when_all_stopped_func: Callable[] + :param model_name: Only restart services which have a deferred restart + event. + :type model_name: bool + """ + if services and deferred_only: + raise ValueError( + "services and deferred_only are mutually exclusive") + if deferred_only: + services = list(set( + [a.service for a in deferred_events.get_deferred_restarts()])) + _, messages = manage_payload_services( + 'stop', + services=services, + charm_func=when_all_stopped_func) + if messages: + raise ServiceActionError( + "Error processing service stop request: {}".format( + "; ".join(messages))) + _, messages = manage_payload_services( + 'start', + services=services) + if messages: + raise ServiceActionError( + "Error processing service start request: {}".format( + "; ".join(messages))) + deferred_events.clear_deferred_restarts(services) + + def make_assess_status_func(*args, **kwargs): """Creates an assess_status_func() suitable for handing to pause_unit() and resume_unit(). @@ -1717,7 +1830,10 @@ def _assess_status_func(): def pausable_restart_on_change(restart_map, stopstart=False, - restart_functions=None): + restart_functions=None, + can_restart_now_f=None, + post_svc_restart_f=None, + pre_restarts_wait_f=None): """A restart_on_change decorator that checks to see if the unit is paused. If it is paused then the decorated function doesn't fire. @@ -1743,11 +1859,28 @@ def some_hook(...): function won't be called if the decorated function is never called. Note, retains backwards compatibility for passing a non-callable dictionary. - @param f: the function to decorate - @param restart_map: (optionally callable, which then returns the - restart_map) the restart map {conf_file: [services]} - @param stopstart: DEFAULT false; whether to stop, start or just restart - @returns decorator to use a restart_on_change with pausability + :param f: function to decorate. + :type f: Callable + :param restart_map: Optionally callable, which then returns the restart_map or + the restart map {conf_file: [services]} + :type restart_map: Union[Callable[[],], Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] + :returns: decorator to use a restart_on_change with pausability + :rtype: decorator + + """ def wrap(f): # py27 compatible nonlocal variable. When py3 only, replace with @@ -1763,8 +1896,13 @@ def wrapped_f(*args, **kwargs): if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( - (lambda: f(*args, **kwargs)), __restart_map_cache['cache'], - stopstart, restart_functions) + (lambda: f(*args, **kwargs)), + __restart_map_cache['cache'], + stopstart, + restart_functions, + can_restart_now_f, + post_svc_restart_f, + pre_restarts_wait_f) return wrapped_f return wrap @@ -2145,6 +2283,23 @@ def container_scoped_relations(): return relations +def container_scoped_relation_get(attribute=None): + """Get relation data from all container scoped relations. + + :param attribute: Name of attribute to get + :type attribute: Optional[str] + :returns: Iterator with relation data + :rtype: Iterator[Optional[any]] + """ + for endpoint_name in container_scoped_relations(): + for rid in relation_ids(endpoint_name): + for unit in related_units(rid): + yield relation_get( + attribute=attribute, + unit=unit, + rid=rid) + + def is_db_ready(use_current_context=False, rel_name=None): """Check remote database is ready to be used. @@ -2418,3 +2573,107 @@ def get_api_application_status(): msg = 'Some units are not ready' juju_log(msg, 'DEBUG') return app_state, msg + + +def sequence_status_check_functions(*functions): + """Sequence the functions passed so that they all get a chance to run as + the charm status check functions. + + :param *functions: a list of functions that return (state, message) + :type *functions: List[Callable[[OSConfigRender], (str, str)]] + :returns: the Callable that takes configs and returns (state, message) + :rtype: Callable[[OSConfigRender], (str, str)] + """ + def _inner_sequenced_functions(configs): + state, message = 'unknown', '' + for f in functions: + new_state, new_message = f(configs) + state = workload_state_compare(state, new_state) + if message: + message = "{}, {}".format(message, new_message) + else: + message = new_message + return state, message + + return _inner_sequenced_functions + + +SubordinatePackages = namedtuple('SubordinatePackages', ['install', 'purge']) + + +def get_subordinate_release_packages(os_release, package_type='deb'): + """Iterate over subordinate relations and get package information. + + :param os_release: OpenStack release to look for + :type os_release: str + :param package_type: Package type (one of 'deb' or 'snap') + :type package_type: str + :returns: Packages to install and packages to purge or None + :rtype: SubordinatePackages[set,set] + """ + install = set() + purge = set() + + for rdata in container_scoped_relation_get('releases-packages-map'): + rp_map = json.loads(rdata or '{}') + # The map provided by subordinate has OpenStack release name as key. + # Find package information from subordinate matching requested release + # or the most recent release prior to requested release by sorting the + # keys in reverse order. This follows established patterns in our + # charms for templates and reactive charm implementations, i.e. as long + # as nothing has changed the definitions for the prior OpenStack + # release is still valid. + for release in sorted(rp_map.keys(), reverse=True): + if (CompareOpenStackReleases(release) <= os_release and + package_type in rp_map[release]): + for name, container in ( + ('install', install), + ('purge', purge)): + for pkg in rp_map[release][package_type].get(name, []): + container.add(pkg) + break + return SubordinatePackages(install, purge) + + +os_restart_on_change = partial( + pausable_restart_on_change, + can_restart_now_f=deferred_events.check_and_record_restart_request, + post_svc_restart_f=deferred_events.process_svc_restart) + + +def restart_services_action_helper(all_services): + """Helper to run the restart-services action. + + NOTE: all_services is all services that could be restarted but + depending on the action arguments it may be a subset of + these that are actually restarted. + + :param all_services: All services that could be restarted + :type all_services: List[str] + """ + deferred_only = action_get("deferred-only") + services = action_get("services") + if services: + services = services.split() + else: + services = all_services + if deferred_only: + restart_services_action(deferred_only=True) + else: + restart_services_action(services=services) + + +def show_deferred_events_action_helper(): + """Helper to run the show-deferred-restarts action.""" + restarts = [] + for event in deferred_events.get_deferred_events(): + restarts.append('{} {} {}'.format( + str(event.timestamp), + event.service.ljust(40), + event.reason)) + restarts.sort() + output = { + 'restarts': restarts, + 'hooks': deferred_events.get_deferred_hooks()} + action_set({'output': "{}".format( + yaml.dump(output, default_flow_style=False))}) diff --git a/ceph-proxy/charmhelpers/core/hookenv.py b/ceph-proxy/charmhelpers/core/hookenv.py index db7ce728..778aa4b6 100644 --- a/ceph-proxy/charmhelpers/core/hookenv.py +++ b/ceph-proxy/charmhelpers/core/hookenv.py @@ -226,6 +226,17 @@ def relation_id(relation_name=None, service_or_unit=None): raise ValueError('Must specify neither or both of relation_name and service_or_unit') +def departing_unit(): + """The departing unit for the current relation hook. + + Available since juju 2.8. + + :returns: the departing unit, or None if the information isn't available. + :rtype: Optional[str] + """ + return os.environ.get('JUJU_DEPARTING_UNIT', None) + + def local_unit(): """Local unit ID""" return os.environ['JUJU_UNIT_NAME'] @@ -1611,3 +1622,12 @@ def _contains_range(addresses): addresses.startswith(".") or ",." in addresses or " ." in addresses) + + +def is_subordinate(): + """Check whether charm is subordinate in unit metadata. + + :returns: True if unit is subordniate, False otherwise. + :rtype: bool + """ + return metadata().get('subordinate') is True diff --git a/ceph-proxy/charmhelpers/core/host.py b/ceph-proxy/charmhelpers/core/host.py index f826f6fe..d25e6c59 100644 --- a/ceph-proxy/charmhelpers/core/host.py +++ b/ceph-proxy/charmhelpers/core/host.py @@ -34,7 +34,7 @@ import six from contextlib import contextmanager -from collections import OrderedDict +from collections import OrderedDict, defaultdict from .hookenv import log, INFO, DEBUG, local_unit, charm_name from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -694,74 +694,223 @@ class ChecksumError(ValueError): pass -def restart_on_change(restart_map, stopstart=False, restart_functions=None): - """Restart services based on configuration files changing +class restart_on_change(object): + """Decorator and context manager to handle restarts. - This function is used a decorator, for example:: + Usage: - @restart_on_change({ - '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] - '/etc/apache/sites-enabled/*': [ 'apache2' ] - }) - def config_changed(): - pass # your code here + @restart_on_change(restart_map, ...) + def function_that_might_trigger_a_restart(...) + ... - In this example, the cinder-api and cinder-volume services - would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. The apache2 service would be - restarted if any file matching the pattern got changed, created - or removed. Standard wildcards are supported, see documentation - for the 'glob' module for more information. + Or: - @param restart_map: {path_file_name: [service_name, ...] - @param stopstart: DEFAULT false; whether to stop, start OR restart - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result from decorated function + with restart_on_change(restart_map, ...): + do_stuff_that_might_trigger_a_restart() + ... """ - def wrap(f): + + def __init__(self, restart_map, stopstart=False, restart_functions=None, + can_restart_now_f=None, post_svc_restart_f=None, + pre_restarts_wait_f=None): + """ + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart + services {svc: func, ...} + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] + """ + self.restart_map = restart_map + self.stopstart = stopstart + self.restart_functions = restart_functions + self.can_restart_now_f = can_restart_now_f + self.post_svc_restart_f = post_svc_restart_f + self.pre_restarts_wait_f = pre_restarts_wait_f + + def __call__(self, f): + """Work like a decorator. + + Returns a wrapped function that performs the restart if triggered. + + :param f: The function that is being wrapped. + :type f: Callable[[Any], Any] + :returns: the wrapped function + :rtype: Callable[[Any], Any] + """ @functools.wraps(f) def wrapped_f(*args, **kwargs): return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) + (lambda: f(*args, **kwargs)), + self.restart_map, + stopstart=self.stopstart, + restart_functions=self.restart_functions, + can_restart_now_f=self.can_restart_now_f, + post_svc_restart_f=self.post_svc_restart_f, + pre_restarts_wait_f=self.pre_restarts_wait_f) return wrapped_f - return wrap + + def __enter__(self): + """Enter the runtime context related to this object. """ + self.checksums = _pre_restart_on_change_helper(self.restart_map) + + def __exit__(self, exc_type, exc_val, exc_tb): + """Exit the runtime context related to this object. + + The parameters describe the exception that caused the context to be + exited. If the context was exited without an exception, all three + arguments will be None. + """ + if exc_type is None: + _post_restart_on_change_helper( + self.checksums, + self.restart_map, + stopstart=self.stopstart, + restart_functions=self.restart_functions, + can_restart_now_f=self.can_restart_now_f, + post_svc_restart_f=self.post_svc_restart_f, + pre_restarts_wait_f=self.pre_restarts_wait_f) + # All is good, so return False; any exceptions will propagate. + return False def restart_on_change_helper(lambda_f, restart_map, stopstart=False, - restart_functions=None): + restart_functions=None, + can_restart_now_f=None, + post_svc_restart_f=None, + pre_restarts_wait_f=None): """Helper function to perform the restart_on_change function. This is provided for decorators to restart services if files described in the restart_map have changed after an invocation of lambda_f(). - @param lambda_f: function to call. - @param restart_map: {file: [service, ...]} - @param stopstart: whether to stop, start or restart a service - @param restart_functions: nonstandard functions to use to restart services + This functions allows for a number of helper functions to be passed. + + `restart_functions` is a map with a service as the key and the + corresponding value being the function to call to restart the service. For + example if `restart_functions={'some-service': my_restart_func}` then + `my_restart_func` should a function which takes one argument which is the + service name to be retstarted. + + `can_restart_now_f` is a function which checks that a restart is permitted. + It should return a bool which indicates if a restart is allowed and should + take a service name (str) and a list of changed files (List[str]) as + arguments. + + `post_svc_restart_f` is a function which runs after a service has been + restarted. It takes the service name that was restarted as an argument. + + `pre_restarts_wait_f` is a function which is called before any restarts + occur. The use case for this is an application which wants to try and + stagger restarts between units. + + :param lambda_f: function to call. + :type lambda_f: Callable[[], ANY] + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart services {svc: func, ...} - @returns result of lambda_f() + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] + :returns: result of lambda_f() + :rtype: ANY + """ + checksums = _pre_restart_on_change_helper(restart_map) + r = lambda_f() + _post_restart_on_change_helper(checksums, + restart_map, + stopstart, + restart_functions, + can_restart_now_f, + post_svc_restart_f, + pre_restarts_wait_f) + return r + + +def _pre_restart_on_change_helper(restart_map): + """Take a snapshot of file hashes. + + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :returns: Dictionary of file paths and the files checksum. + :rtype: Dict[str, str] + """ + return {path: path_hash(path) for path in restart_map} + + +def _post_restart_on_change_helper(checksums, + restart_map, + stopstart=False, + restart_functions=None, + can_restart_now_f=None, + post_svc_restart_f=None, + pre_restarts_wait_f=None): + """Check whether files have changed. + + :param checksums: Dictionary of file paths and the files checksum. + :type checksums: Dict[str, str] + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] """ if restart_functions is None: restart_functions = {} - checksums = {path: path_hash(path) for path in restart_map} - r = lambda_f() + changed_files = defaultdict(list) + restarts = [] # create a list of lists of the services to restart - restarts = [restart_map[path] - for path in restart_map - if path_hash(path) != checksums[path]] + for path, services in restart_map.items(): + if path_hash(path) != checksums[path]: + restarts.append(services) + for svc in services: + changed_files[svc].append(path) # create a flat list of ordered services without duplicates from lists services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) if services_list: + if pre_restarts_wait_f: + pre_restarts_wait_f() actions = ('stop', 'start') if stopstart else ('restart',) for service_name in services_list: + if can_restart_now_f: + if not can_restart_now_f(service_name, + changed_files[service_name]): + continue if service_name in restart_functions: restart_functions[service_name](service_name) else: for action in actions: service(action, service_name) - return r + if post_svc_restart_f: + post_svc_restart_f(service_name) def pwgen(length=None): @@ -1068,6 +1217,17 @@ def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): return calculated_wait_time +def ca_cert_absolute_path(basename_without_extension): + """Returns absolute path to CA certificate. + + :param basename_without_extension: Filename without extension + :type basename_without_extension: str + :returns: Absolute full path + :rtype: str + """ + return '{}/{}.crt'.format(CA_CERT_DIR, basename_without_extension) + + def install_ca_cert(ca_cert, name=None): """ Install the given cert as a trusted CA. @@ -1083,7 +1243,7 @@ def install_ca_cert(ca_cert, name=None): ca_cert = ca_cert.encode('utf8') if not name: name = 'juju-{}'.format(charm_name()) - cert_file = '{}/{}.crt'.format(CA_CERT_DIR, name) + cert_file = ca_cert_absolute_path(name) new_hash = hashlib.md5(ca_cert).hexdigest() if file_hash(cert_file) == new_hash: return diff --git a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py index a3ec6947..7ee8a6ed 100644 --- a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py @@ -96,12 +96,14 @@ def cmp_pkgrevno(package, revno, pkgcache=None): the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. """ - from charmhelpers.fetch import apt_pkg + from charmhelpers.fetch import apt_pkg, get_installed_version if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + current_ver = get_installed_version(package) + else: + pkg = pkgcache[package] + current_ver = pkg.current_ver + + return apt_pkg.version_compare(current_ver.ver_str, revno) @cached diff --git a/ceph-proxy/charmhelpers/fetch/__init__.py b/ceph-proxy/charmhelpers/fetch/__init__.py index 0cc7fc85..5b689f5b 100644 --- a/ceph-proxy/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/charmhelpers/fetch/__init__.py @@ -105,6 +105,7 @@ def base_url(self, url): get_upstream_version = fetch.get_upstream_version apt_pkg = fetch.ubuntu_apt_pkg get_apt_dpkg_env = fetch.get_apt_dpkg_env + get_installed_version = fetch.get_installed_version elif __platform__ == "centos": yum_search = fetch.yum_search diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py index b5953019..b38edcc1 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -13,6 +13,7 @@ # limitations under the License. from collections import OrderedDict +import os import platform import re import six @@ -20,6 +21,7 @@ import sys import time +from charmhelpers import deprecate from charmhelpers.core.host import get_distrib_codename, get_system_env from charmhelpers.core.hookenv import ( @@ -198,6 +200,14 @@ 'victoria/proposed': 'focal-proposed/victoria', 'focal-victoria/proposed': 'focal-proposed/victoria', 'focal-proposed/victoria': 'focal-proposed/victoria', + # Wallaby + 'wallaby': 'focal-updates/wallaby', + 'focal-wallaby': 'focal-updates/wallaby', + 'focal-wallaby/updates': 'focal-updates/wallaby', + 'focal-updates/wallaby': 'focal-updates/wallaby', + 'wallaby/proposed': 'focal-proposed/wallaby', + 'focal-wallaby/proposed': 'focal-proposed/wallaby', + 'focal-proposed/wallaby': 'focal-proposed/wallaby', } @@ -251,13 +261,19 @@ def apt_cache(*_, **__): # Detect this situation, log a warning and make the call to # ``apt_pkg.init()`` to avoid the consumer Python interpreter from # crashing with a segmentation fault. - log('Support for use of upstream ``apt_pkg`` module in conjunction' - 'with charm-helpers is deprecated since 2019-06-25', level=WARNING) + @deprecate( + 'Support for use of upstream ``apt_pkg`` module in conjunction' + 'with charm-helpers is deprecated since 2019-06-25', + date=None, log=lambda x: log(x, level=WARNING)) + def one_shot_log(): + pass + + one_shot_log() sys.modules['apt_pkg'].init() return ubuntu_apt_pkg.Cache() -def apt_install(packages, options=None, fatal=False): +def apt_install(packages, options=None, fatal=False, quiet=False): """Install one or more packages. :param packages: Package(s) to install @@ -267,6 +283,8 @@ def apt_install(packages, options=None, fatal=False): :param fatal: Whether the command's output should be checked and retried. :type fatal: bool + :param quiet: if True (default), supress log message to stdout/stderr + :type quiet: bool :raises: subprocess.CalledProcessError """ if options is None: @@ -279,9 +297,10 @@ def apt_install(packages, options=None, fatal=False): cmd.append(packages) else: cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - _run_apt_command(cmd, fatal) + if not quiet: + log("Installing {} with options: {}" + .format(packages, options)) + _run_apt_command(cmd, fatal, quiet=quiet) def apt_upgrade(options=None, fatal=False, dist=False): @@ -639,14 +658,17 @@ def _add_apt_repository(spec): :param spec: the parameter to pass to add_apt_repository :type spec: str """ + series = get_distrib_codename() if '{series}' in spec: - series = get_distrib_codename() spec = spec.replace('{series}', series) # software-properties package for bionic properly reacts to proxy settings - # passed as environment variables (See lp:1433761). This is not the case - # LTS and non-LTS releases below bionic. - _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https', 'http'])) + # set via apt.conf (see lp:1433761), however this is not the case for LTS + # and non-LTS releases before bionic. + if series in ('trusty', 'xenial'): + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https', 'http'])) + else: + _run_with_retries(['add-apt-repository', '--yes', spec]) def _add_cloud_pocket(pocket): @@ -723,7 +745,7 @@ def _verify_is_ubuntu_rel(release, os_release): def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), - retry_message="", cmd_env=None): + retry_message="", cmd_env=None, quiet=False): """Run a command and retry until success or max_retries is reached. :param cmd: The apt command to run. @@ -738,11 +760,20 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), :type retry_message: str :param: cmd_env: Environment variables to add to the command run. :type cmd_env: Option[None, Dict[str, str]] + :param quiet: if True, silence the output of the command from stdout and + stderr + :type quiet: bool """ env = get_apt_dpkg_env() if cmd_env: env.update(cmd_env) + kwargs = {} + if quiet: + devnull = os.devnull if six.PY2 else subprocess.DEVNULL + kwargs['stdout'] = devnull + kwargs['stderr'] = devnull + if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) @@ -753,7 +784,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_results = (None,) + retry_exitcodes while result in retry_results: try: - result = subprocess.check_call(cmd, env=env) + result = subprocess.check_call(cmd, env=env, **kwargs) except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > max_retries: @@ -763,7 +794,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), time.sleep(CMD_RETRY_DELAY) -def _run_apt_command(cmd, fatal=False): +def _run_apt_command(cmd, fatal=False, quiet=False): """Run an apt command with optional retries. :param cmd: The apt command to run. @@ -771,13 +802,22 @@ def _run_apt_command(cmd, fatal=False): :param fatal: Whether the command's output should be checked and retried. :type fatal: bool + :param quiet: if True, silence the output of the command from stdout and + stderr + :type quiet: bool """ if fatal: _run_with_retries( cmd, retry_exitcodes=(1, APT_NO_LOCK,), - retry_message="Couldn't acquire DPKG lock") + retry_message="Couldn't acquire DPKG lock", + quiet=quiet) else: - subprocess.call(cmd, env=get_apt_dpkg_env()) + kwargs = {} + if quiet: + devnull = os.devnull if six.PY2 else subprocess.DEVNULL + kwargs['stdout'] = devnull + kwargs['stderr'] = devnull + subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs) def get_upstream_version(package): @@ -799,6 +839,22 @@ def get_upstream_version(package): return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str) +def get_installed_version(package): + """Determine installed version of a package + + @returns None (if not installed) or the installed version as + Version object + """ + cache = apt_cache() + dpkg_result = cache._dpkg_list([package]).get(package, {}) + current_ver = None + installed_version = dpkg_result.get('version') + + if installed_version: + current_ver = ubuntu_apt_pkg.Version({'ver_str': installed_version}) + return current_ver + + def get_apt_dpkg_env(): """Get environment suitable for execution of APT and DPKG tools. diff --git a/ceph-proxy/lib/charms_ceph/utils.py b/ceph-proxy/lib/charms_ceph/utils.py index 52d380b4..e5c38793 100644 --- a/ceph-proxy/lib/charms_ceph/utils.py +++ b/ceph-proxy/lib/charms_ceph/utils.py @@ -56,11 +56,11 @@ ) from charmhelpers.fetch import ( add_source, - apt_cache, apt_install, apt_purge, apt_update, - filter_missing_packages + filter_missing_packages, + get_installed_version ) from charmhelpers.contrib.storage.linux.ceph import ( get_mon_map, @@ -497,10 +497,7 @@ def tune_dev(block_dev): def ceph_user(): - if get_version() > 1: - return 'ceph' - else: - return "root" + return 'ceph' class CrushLocation(object): @@ -715,22 +712,15 @@ def get_version(): """Derive Ceph release from an installed package.""" import apt_pkg as apt - cache = apt_cache() package = "ceph" - try: - pkg = cache[package] - except KeyError: - # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation ' \ - 'candidate: %s' % package - error_out(e) - if not pkg.current_ver: + current_ver = get_installed_version(package) + if not current_ver: # package is known, but no version is currently installed. e = 'Could not determine version of uninstalled package: %s' % package error_out(e) - vers = apt.upstream_version(pkg.current_ver.ver_str) + vers = apt.upstream_version(current_ver.ver_str) # x.y match only for 20XX.X # and ignore patch level for other packages diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 9aea716b..394e4d37 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -42,8 +42,8 @@ oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) -git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' -git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack +git+https://github.com/openstack-charmers/zaza.git@stable/21.04#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git@stable/21.04#egg=zaza.openstack # Needed for charm-glance: git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' From 63c754637ff859a4c8cace1b779a13cd9d371da3 Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Wed, 24 Mar 2021 10:25:54 -0300 Subject: [PATCH 2192/2699] Remove endpoint settings without service prefix on config-changed Older charms pass endpoint data with the legacy method, without service prefix (e.g., `admin_url` instead of `swift_admin_url`.) After charm upgrade the endpoint data is set in the new method, with service prefix, however the legacy endpoint data is still there as it has not been removed. The keystone charms checks first for the legacy method, and if it's found, the new method is ignored and any endpoint changes made on the new charm (e.g., port) are not implemented. So make sure to remove the legacy endpoint settings from the relation, so the keystone charm can pick up eg, port changes, and even set up the s3 endpoint after charm upgrades between the legacy method and the new method. Simplied test-case: - Old charm: $ juju deploy cs:ceph-radosgw-285 # + keystone/percona-cluster $ openstack endpoint list --service swift | ... | http://10.5.2.210:80/swift | $ juju config ceph-radosgw port=1111 $ openstack endpoint list --service swift | ... | http://10.5.2.210:1111/swift | - New charm: $ juju upgrade-charm ceph-radosgw $ juju config ceph-radosgw port=2222 unit-keystone-0: 12:37:16 INFO unit.keystone/0.juju-log identity-service:6: {'admin_url': 'http://10.5.2.210:1111/swift', ... 'swift_admin_url': 'http://10.5.2.210:2222/swift', 'service': 'swift', ...} $ openstack endpoint list --service swift | ... | http://10.5.2.210:1111/swift | - Patched charm: $ juju upgrade-charm --path ~/charm-ceph-radosgw ceph-radosgw $ juju config ceph-radosgw port=3333 ... unit-keystone-0: 12:40:46 INFO unit.keystone/0.juju-log identity-service:6: endpoint: s3 {'admin_url': 'http://10.5.2.210:3333/', ..., 'service': 's3'} endpoint: swift {'admin_url': 'http://10.5.2.210:3333/swift', ..., 'service': 'swift'} $ openstack endpoint list --service swift | ... | http://10.5.2.210:3333/swift | $ openstack endpoint list --service s3 | ... | http://10.5.2.210:3333/ | Signed-off-by: Mauricio Faria de Oliveira Closes-bug: #1887722 Change-Id: Iaf3005b6507914004b6c9dcbb77957e0230fb4f4 --- ceph-radosgw/hooks/hooks.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index c9e5bf4e..b4ebcc8b 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -399,6 +399,14 @@ def identity_joined(relid=None): requested_roles = '' if roles: requested_roles = ','.join(roles) if len(roles) > 1 else roles[0] + # remove stale settings without service prefix left by old charms, + # which cause the keystone charm to ignore new settings w/ prefix. + relation_set(service='', + region='', + public_url='', + internal_url='', + admin_url='', + relation_id=relid) relation_set(swift_service='swift', swift_region=config('region'), swift_public_url=public_url, From 6b6f79d41cc47a903260c0de3966b9af7c00be9f Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sat, 3 Apr 2021 20:18:01 +0100 Subject: [PATCH 2193/2699] 21.04 libraries freeze for charms on master branch * charm-helpers sync for classic charms * build.lock file for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure stable/21.04 branch for charms.openstack - ensure stable/21.04 branch for charm-helpers * Manual fix to build.lock to work around bug [1] [1] https://github.com/juju/charm-tools/issues/603 Change-Id: I6be25d404436647a19e18231ccd0985dd49bb858 --- ceph-fs/src/build.lock | 198 ++++++++++++++++++++++++++++++ ceph-fs/src/test-requirements.txt | 4 +- ceph-fs/src/wheelhouse.txt | 4 +- ceph-fs/test-requirements.txt | 2 +- ceph-fs/tox.ini | 7 +- 5 files changed, 209 insertions(+), 6 deletions(-) create mode 100644 ceph-fs/src/build.lock diff --git a/ceph-fs/src/build.lock b/ceph-fs/src/build.lock new file mode 100644 index 00000000..cb26d297 --- /dev/null +++ b/ceph-fs/src/build.lock @@ -0,0 +1,198 @@ +{ + "locks": [ + { + "type": "layer", + "item": "layer:options", + "url": "https://github.com/juju-solutions/layer-options.git", + "vcs": null, + "branch": "refs/heads/master", + "commit": "fcdcea4e5de3e1556c24e6704607862d0ba00a56" + }, + { + "type": "layer", + "item": "layer:basic", + "url": "https://github.com/juju-solutions/layer-basic.git", + "vcs": null, + "branch": "refs/heads/master", + "commit": "623e69c7b432456fd4364f6e1835424fd6b5425e" + }, + { + "type": "layer", + "item": "layer:openstack", + "url": "https://github.com/openstack/charm-layer-openstack", + "vcs": null, + "branch": "refs/heads/master", + "commit": "ba152d41b4a1109073d335415f43c4248109e7c7" + }, + { + "type": "layer", + "item": "layer:ceph", + "url": "https://github.com/openstack/charm-layer-ceph.git", + "vcs": null, + "branch": "refs/heads/master", + "commit": "17d40abd8d9ec3b8c32756ca981c80c4733c016f" + }, + { + "type": "layer", + "item": "ceph-fs", + "url": null, + "vcs": null, + "branch": "refs/heads/master", + "commit": "c8feba48a592445f20ed247e45866077c01a114d" + }, + { + "type": "layer", + "item": "interface:tls-certificates", + "url": "https://github.com/juju-solutions/interface-tls-certificates", + "vcs": null, + "branch": "refs/heads/master", + "commit": "d9850016d930a6d507b9fd45e2598d327922b140" + }, + { + "type": "layer", + "item": "interface:ceph-mds", + "url": "https://opendev.org/openstack/charm-interface-ceph-client.git", + "vcs": null, + "branch": "refs/heads/master", + "commit": "72245e1d002fb9c65c9574d65b5952275b3411fb" + }, + { + "type": "python_module", + "package": "dnspython", + "vcs": null, + "version": "1.15.0" + }, + { + "type": "python_module", + "package": "charms.openstack", + "url": "git+https://opendev.org/openstack/charms.openstack.git", + "branch": "refs/heads/stable/21.04", + "version": "bcd0c9b4b4a19d4a4125e0a6a3f808a843a74fa1", + "vcs": "git" + }, + { + "type": "python_module", + "package": "netaddr", + "vcs": null, + "version": "0.7.19" + }, + { + "type": "python_module", + "package": "ceph_api", + "vcs": null, + "version": "0.4.0" + }, + { + "type": "python_module", + "package": "dnspython3", + "vcs": null, + "version": "1.15.0" + }, + { + "type": "python_module", + "package": "pbr", + "vcs": null, + "version": "5.5.1" + }, + { + "type": "python_module", + "package": "setuptools", + "vcs": null, + "version": "41.6.0" + }, + { + "type": "python_module", + "package": "pyxattr", + "vcs": null, + "version": "0.7.2" + }, + { + "type": "python_module", + "package": "Jinja2", + "vcs": null, + "version": "2.10.1" + }, + { + "type": "python_module", + "package": "pip", + "vcs": null, + "version": "18.1" + }, + { + "type": "python_module", + "package": "pyaml", + "vcs": null, + "version": "20.4.0" + }, + { + "type": "python_module", + "package": "MarkupSafe", + "vcs": null, + "version": "1.1.1" + }, + { + "type": "python_module", + "package": "charmhelpers", + "url": "git+https://github.com/juju/charm-helpers.git", + "branch": "refs/heads/stable/21.04", + "version": "8c48d2914b0e7396a2392c3933e2d7f321643ae6", + "vcs": "git" + }, + { + "type": "python_module", + "package": "wheel", + "vcs": null, + "version": "0.33.6" + }, + { + "type": "python_module", + "package": "charms.ceph", + "url": "git+https://github.com/openstack/charms.ceph.git", + "branch": "refs/heads/master", + "version": "9bfe43ee654d016d7f09ede406c45674821f2866", + "vcs": "git" + }, + { + "type": "python_module", + "package": "charms.reactive", + "vcs": null, + "version": "1.4.1" + }, + { + "type": "python_module", + "package": "Tempita", + "vcs": null, + "version": "0.5.2" + }, + { + "type": "python_module", + "package": "PyYAML", + "vcs": null, + "version": "5.2" + }, + { + "type": "python_module", + "package": "setuptools_scm", + "vcs": null, + "version": "1.17.0" + }, + { + "type": "python_module", + "package": "psutil", + "vcs": null, + "version": "5.8.0" + }, + { + "type": "python_module", + "package": "netifaces", + "vcs": null, + "version": "0.10.9" + }, + { + "type": "python_module", + "package": "six", + "vcs": null, + "version": "1.15.0" + } + ] +} diff --git a/ceph-fs/src/test-requirements.txt b/ceph-fs/src/test-requirements.txt index 520681e1..eb4844b8 100644 --- a/ceph-fs/src/test-requirements.txt +++ b/ceph-fs/src/test-requirements.txt @@ -11,5 +11,5 @@ charm-tools>=2.4.4 keyring<21 # Functional Test Requirements (let Zaza's dependencies solve all dependencies here!) -git+https://github.com/openstack-charmers/zaza.git#egg=zaza -git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack +git+https://github.com/openstack-charmers/zaza.git@stable/21.04#egg=zaza +git+https://github.com/openstack-charmers/zaza-openstack-tests.git@stable/21.04#egg=zaza.openstack diff --git a/ceph-fs/src/wheelhouse.txt b/ceph-fs/src/wheelhouse.txt index 732aa8fc..200a2c0a 100644 --- a/ceph-fs/src/wheelhouse.txt +++ b/ceph-fs/src/wheelhouse.txt @@ -4,6 +4,6 @@ ceph_api pyxattr psutil -git+https://opendev.org/openstack/charms.openstack.git#egg=charms.openstack +git+https://opendev.org/openstack/charms.openstack.git@stable/21.04#egg=charms.openstack -git+https://github.com/juju/charm-helpers.git#egg=charmhelpers +git+https://github.com/juju/charm-helpers.git@stable/21.04#egg=charmhelpers diff --git a/ceph-fs/test-requirements.txt b/ceph-fs/test-requirements.txt index 3f085244..16bbc27d 100644 --- a/ceph-fs/test-requirements.txt +++ b/ceph-fs/test-requirements.txt @@ -35,7 +35,7 @@ mock>=1.2; python_version >= '3.6' nose>=1.3.7 coverage>=3.6 -git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack +git+https://github.com/openstack/charms.openstack.git@stable/21.04#egg=charms.openstack # # Revisit for removal / mock improvement: netifaces # vault diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index ce79fa16..391b2af8 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -28,7 +28,12 @@ deps = [testenv:build] basepython = python3 commands = - charm-build --log-level DEBUG -o {toxinidir}/build/builds src {posargs} + charm-build --log-level DEBUG --use-lock-file-branches -o {toxinidir}/build/builds src {posargs} + +[testenv:add-build-lock-file] +basepython = python3 +commands = + charm-build --log-level DEBUG --write-lock-file -o {toxinidir}/build/builds src {posargs} [testenv:py3] basepython = python3 From 0badc7eedfde49d657c66dc90edc2cb4f74e9444 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sat, 3 Apr 2021 20:18:02 +0100 Subject: [PATCH 2194/2699] 21.04 libraries freeze for charms on master branch * charm-helpers sync for classic charms * build.lock file for reactive charms * ensure tox.ini is from release-tools * ensure requirements.txt files are from release-tools * On reactive charms: - ensure stable/21.04 branch for charms.openstack - ensure stable/21.04 branch for charm-helpers Change-Id: I773d5f9f699af1a2ea7be543c4e58e0f7bc4433a --- ceph-osd/charm-helpers-hooks.yaml | 2 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 4 +- .../contrib/openstack/amulet/utils.py | 1 + .../contrib/openstack/cert_utils.py | 27 +- .../charmhelpers/contrib/openstack/context.py | 16 +- .../contrib/openstack/deferred_events.py | 410 ++++++++++++++++++ .../contrib/openstack/exceptions.py | 5 + .../openstack/files/policy_rc_d_script.py | 196 +++++++++ .../contrib/openstack/policy_rcd.py | 173 ++++++++ .../charmhelpers/contrib/openstack/utils.py | 291 ++++++++++++- ceph-osd/hooks/charmhelpers/core/hookenv.py | 20 + ceph-osd/hooks/charmhelpers/core/host.py | 236 ++++++++-- .../charmhelpers/core/host_factory/ubuntu.py | 12 +- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 1 + ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 88 +++- ceph-osd/lib/charms_ceph/utils.py | 22 +- ceph-osd/test-requirements.txt | 4 +- 17 files changed, 1401 insertions(+), 107 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py create mode 100755 ceph-osd/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/policy_rcd.py diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index ca383631..bad497a2 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -repo: https://github.com/juju/charm-helpers +repo: https://github.com/juju/charm-helpers@stable/21.04 destination: hooks/charmhelpers include: - core diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index c87cf489..e4cb06bc 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -337,10 +337,8 @@ def write(self): "command": nrpecheck.command, } # If we were passed max_check_attempts, add that to the relation data - try: + if nrpecheck.max_check_attempts is not None: nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts - except AttributeError: - pass # update-status hooks are configured to firing every 5 minutes by # default. When nagios-nrpe-server is restarted, the nagios server diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 63aea1e3..0a14af7e 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -42,6 +42,7 @@ import swiftclient from charmhelpers.core.decorators import retry_on_exception + from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py index 24867497..703fc6ef 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -47,7 +47,7 @@ ) from charmhelpers.core.host import ( - CA_CERT_DIR, + ca_cert_absolute_path, install_ca_cert, mkdir, write_file, @@ -307,6 +307,26 @@ def install_certs(ssl_dir, certs, chain=None, user='root', group='root'): content=bundle['key'], perms=0o640) +def get_cert_relation_ca_name(cert_relation_id=None): + """Determine CA certificate name as provided by relation. + + The filename on disk depends on the name chosen for the application on the + providing end of the certificates relation. + + :param cert_relation_id: (Optional) Relation id providing the certs + :type cert_relation_id: str + :returns: CA certificate filename without path nor extension + :rtype: str + """ + if cert_relation_id is None: + try: + cert_relation_id = relation_ids('certificates')[0] + except IndexError: + return '' + return '{}_juju_ca_cert'.format( + remote_service_name(relid=cert_relation_id)) + + def _manage_ca_certs(ca, cert_relation_id): """Manage CA certs. @@ -316,7 +336,7 @@ def _manage_ca_certs(ca, cert_relation_id): :type cert_relation_id: str """ config_ssl_ca = config('ssl_ca') - config_cert_file = '{}/{}.crt'.format(CA_CERT_DIR, CONFIG_CA_CERT_FILE) + config_cert_file = ca_cert_absolute_path(CONFIG_CA_CERT_FILE) if config_ssl_ca: log("Installing CA certificate from charm ssl_ca config to {}".format( config_cert_file), INFO) @@ -329,8 +349,7 @@ def _manage_ca_certs(ca, cert_relation_id): log("Installing CA certificate from certificate relation", INFO) install_ca_cert( ca.encode(), - name='{}_juju_ca_cert'.format( - remote_service_name(relid=cert_relation_id))) + name=get_cert_relation_ca_name(cert_relation_id)) def process_certificates(service_name, relation_id, unit, diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index c242d18d..b67dafda 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -74,7 +74,6 @@ pwgen, lsb_release, CompareHostReleases, - is_container, ) from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, @@ -1596,16 +1595,21 @@ def _calculate_workers(): @returns int: number of worker processes to use ''' - multiplier = config('worker-multiplier') or DEFAULT_MULTIPLIER + multiplier = config('worker-multiplier') + + # distinguish an empty config and an explicit config as 0.0 + if multiplier is None: + multiplier = DEFAULT_MULTIPLIER + count = int(_num_cpus() * multiplier) - if multiplier > 0 and count == 0: + if count <= 0: + # assign at least one worker count = 1 - if config('worker-multiplier') is None and is_container(): + if config('worker-multiplier') is None: # NOTE(jamespage): Limit unconfigured worker-multiplier # to MAX_DEFAULT_WORKERS to avoid insane - # worker configuration in LXD containers - # on large servers + # worker configuration on large servers # Reference: https://pad.lv/1665270 count = min(count, MAX_DEFAULT_WORKERS) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py new file mode 100644 index 00000000..fd073a04 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py @@ -0,0 +1,410 @@ +# Copyright 2021 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module for managing deferred service events. + +This module is used to manage deferred service events from both charm actions +and package actions. +""" + +import datetime +import glob +import yaml +import os +import time +import uuid + +import charmhelpers.contrib.openstack.policy_rcd as policy_rcd +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host +import charmhelpers.core.unitdata as unitdata + +import subprocess + + +# Deferred events generated from the charm are stored along side those +# generated from packaging. +DEFERRED_EVENTS_DIR = policy_rcd.POLICY_DEFERRED_EVENTS_DIR + + +class ServiceEvent(): + + def __init__(self, timestamp, service, reason, action, + policy_requestor_name=None, policy_requestor_type=None): + self.timestamp = timestamp + self.service = service + self.reason = reason + self.action = action + if not policy_requestor_name: + self.policy_requestor_name = hookenv.service_name() + if not policy_requestor_type: + self.policy_requestor_type = 'charm' + + def __eq__(self, other): + for attr in vars(self): + if getattr(self, attr) != getattr(other, attr): + return False + return True + + def matching_request(self, other): + for attr in ['service', 'action', 'reason']: + if getattr(self, attr) != getattr(other, attr): + return False + return True + + @classmethod + def from_dict(cls, data): + return cls( + data['timestamp'], + data['service'], + data['reason'], + data['action'], + data.get('policy_requestor_name'), + data.get('policy_requestor_type')) + + +def deferred_events_files(): + """Deferred event files + + Deferred event files that were generated by service_name() policy. + + :returns: Deferred event files + :rtype: List[str] + """ + return glob.glob('{}/*.deferred'.format(DEFERRED_EVENTS_DIR)) + + +def read_event_file(file_name): + """Read a file and return the corresponding objects. + + :param file_name: Name of file to read. + :type file_name: str + :returns: ServiceEvent from file. + :rtype: ServiceEvent + """ + with open(file_name, 'r') as f: + contents = yaml.safe_load(f) + event = ServiceEvent( + contents['timestamp'], + contents['service'], + contents['reason'], + contents['action']) + return event + + +def deferred_events(): + """Get list of deferred events. + + List of deferred events. Events are represented by dicts of the form: + + { + action: restart, + policy_requestor_name: neutron-openvswitch, + policy_requestor_type: charm, + reason: 'Pkg update', + service: openvswitch-switch, + time: 1614328743} + + :returns: List of deferred events. + :rtype: List[ServiceEvent] + """ + events = [] + for defer_file in deferred_events_files(): + events.append((defer_file, read_event_file(defer_file))) + return events + + +def duplicate_event_files(event): + """Get list of event files that have equivalent deferred events. + + :param event: Event to compare + :type event: ServiceEvent + :returns: List of event files + :rtype: List[str] + """ + duplicates = [] + for event_file, existing_event in deferred_events(): + if event.matching_request(existing_event): + duplicates.append(event_file) + return duplicates + + +def get_event_record_file(policy_requestor_type, policy_requestor_name): + """Generate filename for storing a new event. + + :param policy_requestor_type: System that blocked event + :type policy_requestor_type: str + :param policy_requestor_name: Name of application that blocked event + :type policy_requestor_name: str + :returns: File name + :rtype: str + """ + file_name = '{}/{}-{}-{}.deferred'.format( + DEFERRED_EVENTS_DIR, + policy_requestor_type, + policy_requestor_name, + uuid.uuid1()) + return file_name + + +def save_event(event): + """Write deferred events to backend. + + :param event: Event to save + :type event: ServiceEvent + """ + requestor_name = hookenv.service_name() + requestor_type = 'charm' + init_policy_log_dir() + if duplicate_event_files(event): + hookenv.log( + "Not writing new event, existing event found. {} {} {}".format( + event.service, + event.action, + event.reason), + level="DEBUG") + else: + record_file = get_event_record_file( + policy_requestor_type=requestor_type, + policy_requestor_name=requestor_name) + + with open(record_file, 'w') as f: + data = { + 'timestamp': event.timestamp, + 'service': event.service, + 'action': event.action, + 'reason': event.reason, + 'policy_requestor_type': requestor_type, + 'policy_requestor_name': requestor_name} + yaml.dump(data, f) + + +def clear_deferred_events(svcs, action): + """Remove any outstanding deferred events. + + Remove a deferred event if its service is in the services list and its + action matches. + + :param svcs: List of services to remove. + :type svcs: List[str] + :param action: Action to remove + :type action: str + """ + # XXX This function is not currently processing the action. It needs to + # match the action and also take account of try-restart and the + # equivalnce of stop-start and restart. + for defer_file in deferred_events_files(): + deferred_event = read_event_file(defer_file) + if deferred_event.service in svcs: + os.remove(defer_file) + + +def init_policy_log_dir(): + """Ensure directory to store events exists.""" + if not os.path.exists(DEFERRED_EVENTS_DIR): + os.mkdir(DEFERRED_EVENTS_DIR) + + +def get_deferred_events(): + """Return a list of deferred events requested by the charm and packages. + + :returns: List of deferred events + :rtype: List[ServiceEvent] + """ + events = [] + for _, event in deferred_events(): + events.append(event) + return events + + +def get_deferred_restarts(): + """List of deferred restart events requested by the charm and packages. + + :returns: List of deferred restarts + :rtype: List[ServiceEvent] + """ + return [e for e in get_deferred_events() if e.action == 'restart'] + + +def clear_deferred_restarts(services): + """Clear deferred restart events targetted at `services`. + + :param services: Services with deferred actions to clear. + :type services: List[str] + """ + clear_deferred_events(services, 'restart') + + +def process_svc_restart(service): + """Respond to a service restart having occured. + + :param service: Services that the action was performed against. + :type service: str + """ + clear_deferred_restarts([service]) + + +def is_restart_permitted(): + """Check whether restarts are permitted. + + :returns: Whether restarts are permitted + :rtype: bool + """ + if hookenv.config('enable-auto-restarts') is None: + return True + return hookenv.config('enable-auto-restarts') + + +def check_and_record_restart_request(service, changed_files): + """Check if restarts are permitted, if they are not log the request. + + :param service: Service to be restarted + :type service: str + :param changed_files: Files that have changed to trigger restarts. + :type changed_files: List[str] + :returns: Whether restarts are permitted + :rtype: bool + """ + changed_files = sorted(list(set(changed_files))) + permitted = is_restart_permitted() + if not permitted: + save_event(ServiceEvent( + timestamp=round(time.time()), + service=service, + reason='File(s) changed: {}'.format( + ', '.join(changed_files)), + action='restart')) + return permitted + + +def deferrable_svc_restart(service, reason=None): + """Restarts service if permitted, if not defer it. + + :param service: Service to be restarted + :type service: str + :param reason: Reason for restart + :type reason: Union[str, None] + """ + if is_restart_permitted(): + host.service_restart(service) + else: + save_event(ServiceEvent( + timestamp=round(time.time()), + service=service, + reason=reason, + action='restart')) + + +def configure_deferred_restarts(services): + """Setup deferred restarts. + + :param services: Services to block restarts of. + :type services: List[str] + """ + policy_rcd.install_policy_rcd() + if is_restart_permitted(): + policy_rcd.remove_policy_file() + else: + blocked_actions = ['stop', 'restart', 'try-restart'] + for svc in services: + policy_rcd.add_policy_block(svc, blocked_actions) + + +def get_service_start_time(service): + """Find point in time when the systemd unit transitioned to active state. + + :param service: Services to check timetsamp of. + :type service: str + """ + start_time = None + out = subprocess.check_output( + [ + 'systemctl', + 'show', + service, + '--property=ActiveEnterTimestamp']) + str_time = out.decode().rstrip().replace('ActiveEnterTimestamp=', '') + if str_time: + start_time = datetime.datetime.strptime( + str_time, + '%a %Y-%m-%d %H:%M:%S %Z') + return start_time + + +def check_restart_timestamps(): + """Check deferred restarts against systemd units start time. + + Check if a service has a deferred event and clear it if it has been + subsequently restarted. + """ + for event in get_deferred_restarts(): + start_time = get_service_start_time(event.service) + deferred_restart_time = datetime.datetime.fromtimestamp( + event.timestamp) + if start_time and start_time < deferred_restart_time: + hookenv.log( + ("Restart still required, {} was started at {}, restart was " + "requested after that at {}").format( + event.service, + start_time, + deferred_restart_time), + level='DEBUG') + else: + clear_deferred_restarts([event.service]) + + +def set_deferred_hook(hookname): + """Record that a hook has been deferred. + + :param hookname: Name of hook that was deferred. + :type hookname: str + """ + with unitdata.HookData()() as t: + kv = t[0] + deferred_hooks = kv.get('deferred-hooks', []) + if hookname not in deferred_hooks: + deferred_hooks.append(hookname) + kv.set('deferred-hooks', sorted(list(set(deferred_hooks)))) + + +def get_deferred_hooks(): + """Get a list of deferred hooks. + + :returns: List of hook names. + :rtype: List[str] + """ + with unitdata.HookData()() as t: + kv = t[0] + return kv.get('deferred-hooks', []) + + +def clear_deferred_hooks(): + """Clear any deferred hooks.""" + with unitdata.HookData()() as t: + kv = t[0] + kv.set('deferred-hooks', []) + + +def clear_deferred_hook(hookname): + """Clear a specific deferred hooks. + + :param hookname: Name of hook to remove. + :type hookname: str + """ + with unitdata.HookData()() as t: + kv = t[0] + deferred_hooks = kv.get('deferred-hooks', []) + if hookname in deferred_hooks: + deferred_hooks.remove(hookname) + kv.set('deferred-hooks', deferred_hooks) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/exceptions.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/exceptions.py index f85ae4f4..b2330637 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/exceptions.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/exceptions.py @@ -19,3 +19,8 @@ class OSContextError(Exception): This exception is principally used in contrib.openstack.context """ pass + + +class ServiceActionError(Exception): + """Raised when a service action (stop/start/ etc) failed.""" + pass diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py new file mode 100755 index 00000000..344a7662 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python3 + +"""This script is an implemenation of policy-rc.d + +For further information on policy-rc.d see *1 + +*1 https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt +""" +import collections +import glob +import os +import logging +import sys +import time +import uuid +import yaml + + +SystemPolicy = collections.namedtuple( + 'SystemPolicy', + [ + 'policy_requestor_name', + 'policy_requestor_type', + 'service', + 'blocked_actions']) + +DEFAULT_POLICY_CONFIG_DIR = '/etc/policy-rc.d' +DEFAULT_POLICY_LOG_DIR = '/var/lib/policy-rc.d' + + +def read_policy_file(policy_file): + """Return system policies from given file. + + :param file_name: Name of file to read. + :type file_name: str + :returns: Policy + :rtype: List[SystemPolicy] + """ + policies = [] + if os.path.exists(policy_file): + with open(policy_file, 'r') as f: + policy = yaml.safe_load(f) + for service, actions in policy['blocked_actions'].items(): + service = service.replace('.service', '') + policies.append(SystemPolicy( + policy_requestor_name=policy['policy_requestor_name'], + policy_requestor_type=policy['policy_requestor_type'], + service=service, + blocked_actions=actions)) + return policies + + +def get_policies(policy_config_dir): + """Return all system policies in policy_config_dir. + + :param policy_config_dir: Name of file to read. + :type policy_config_dir: str + :returns: Policy + :rtype: List[SystemPolicy] + """ + _policy = [] + for f in glob.glob('{}/*.policy'.format(policy_config_dir)): + _policy.extend(read_policy_file(f)) + return _policy + + +def record_blocked_action(service, action, blocking_policies, policy_log_dir): + """Record that an action was requested but deniedl + + :param service: Service that was blocked + :type service: str + :param action: Action that was blocked. + :type action: str + :param blocking_policies: Policies that blocked the action on the service. + :type blocking_policies: List[SystemPolicy] + :param policy_log_dir: Directory to place the blocking action record. + :type policy_log_dir: str + """ + if not os.path.exists(policy_log_dir): + os.mkdir(policy_log_dir) + seconds = round(time.time()) + for policy in blocking_policies: + if not os.path.exists(policy_log_dir): + os.mkdir(policy_log_dir) + file_name = '{}/{}-{}-{}.deferred'.format( + policy_log_dir, + policy.policy_requestor_type, + policy.policy_requestor_name, + uuid.uuid1()) + with open(file_name, 'w') as f: + data = { + 'timestamp': seconds, + 'service': service, + 'action': action, + 'reason': 'Package update', + 'policy_requestor_type': policy.policy_requestor_type, + 'policy_requestor_name': policy.policy_requestor_name} + yaml.dump(data, f) + + +def get_blocking_policies(service, action, policy_config_dir): + """Record that an action was requested but deniedl + + :param service: Service that action is requested against. + :type service: str + :param action: Action that is requested. + :type action: str + :param policy_config_dir: Directory that stores policy files. + :type policy_config_dir: str + :returns: Policies + :rtype: List[SystemPolicy] + """ + service = service.replace('.service', '') + blocking_policies = [ + policy + for policy in get_policies(policy_config_dir) + if policy.service == service and action in policy.blocked_actions] + return blocking_policies + + +def process_action_request(service, action, policy_config_dir, policy_log_dir): + """Take the requested action against service and check if it is permitted. + + :param service: Service that action is requested against. + :type service: str + :param action: Action that is requested. + :type action: str + :param policy_config_dir: Directory that stores policy files. + :type policy_config_dir: str + :param policy_log_dir: Directory that stores policy files. + :type policy_log_dir: str + :returns: Tuple of whether the action is permitted and explanation. + :rtype: (boolean, str) + """ + blocking_policies = get_blocking_policies( + service, + action, + policy_config_dir) + if blocking_policies: + policy_msg = [ + '{} {}'.format(p.policy_requestor_type, p.policy_requestor_name) + for p in sorted(blocking_policies)] + message = '{} of {} blocked by {}'.format( + action, + service, + ', '.join(policy_msg)) + record_blocked_action( + service, + action, + blocking_policies, + policy_log_dir) + action_permitted = False + else: + message = "Permitting {} {}".format(service, action) + action_permitted = True + return action_permitted, message + + +def main(): + logging.basicConfig( + filename='/var/log/policy-rc.d.log', + level=logging.DEBUG, + format='%(asctime)s %(message)s') + + service = sys.argv[1] + action = sys.argv[2] + + permitted, message = process_action_request( + service, + action, + DEFAULT_POLICY_CONFIG_DIR, + DEFAULT_POLICY_LOG_DIR) + logging.info(message) + + # https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt + # Exit status codes: + # 0 - action allowed + # 1 - unknown action (therefore, undefined policy) + # 100 - unknown initscript id + # 101 - action forbidden by policy + # 102 - subsystem error + # 103 - syntax error + # 104 - [reserved] + # 105 - behaviour uncertain, policy undefined. + # 106 - action not allowed. Use the returned fallback actions + # (which are implied to be "allowed") instead. + + if permitted: + return 0 + else: + return 101 + + +if __name__ == "__main__": + rc = main() + sys.exit(rc) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/policy_rcd.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/policy_rcd.py new file mode 100644 index 00000000..ecffbc68 --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/policy_rcd.py @@ -0,0 +1,173 @@ +# Copyright 2021 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module for managing policy-rc.d script and associated files. + +This module manages the installation of /usr/sbin/policy-rc.d, the +policy files and the event files. When a package update occurs the +packaging system calls: + +policy-rc.d [options] + +The return code of the script determines if the packaging system +will perform that action on the given service. The policy-rc.d +implementation installed by this module checks if an action is +permitted by checking policy files placed in /etc/policy-rc.d. +If a policy file exists which denies the requested action then +this is recorded in an event file which is placed in +/var/lib/policy-rc.d. +""" + +import os +import shutil +import tempfile +import yaml + +import charmhelpers.contrib.openstack.files as os_files +import charmhelpers.contrib.openstack.alternatives as alternatives +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host + +POLICY_HEADER = """# Managed by juju\n""" +POLICY_DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d' +POLICY_CONFIG_DIR = '/etc/policy-rc.d' + + +def get_policy_file_name(): + """Get the name of the policy file for this application. + + :returns: Policy file name + :rtype: str + """ + application_name = hookenv.service_name() + return '{}/charm-{}.policy'.format(POLICY_CONFIG_DIR, application_name) + + +def read_default_policy_file(): + """Return the policy file. + + A policy is in the form: + blocked_actions: + neutron-dhcp-agent: [restart, stop, try-restart] + neutron-l3-agent: [restart, stop, try-restart] + neutron-metadata-agent: [restart, stop, try-restart] + neutron-openvswitch-agent: [restart, stop, try-restart] + openvswitch-switch: [restart, stop, try-restart] + ovs-vswitchd: [restart, stop, try-restart] + ovs-vswitchd-dpdk: [restart, stop, try-restart] + ovsdb-server: [restart, stop, try-restart] + policy_requestor_name: neutron-openvswitch + policy_requestor_type: charm + + :returns: Policy + :rtype: Dict[str, Union[str, Dict[str, List[str]]] + """ + policy = {} + policy_file = get_policy_file_name() + if os.path.exists(policy_file): + with open(policy_file, 'r') as f: + policy = yaml.safe_load(f) + return policy + + +def write_policy_file(policy_file, policy): + """Write policy to disk. + + :param policy_file: Name of policy file + :type policy_file: str + :param policy: Policy + :type policy: Dict[str, Union[str, Dict[str, List[str]]]] + """ + with tempfile.NamedTemporaryFile('w', delete=False) as f: + f.write(POLICY_HEADER) + yaml.dump(policy, f) + tmp_file_name = f.name + shutil.move(tmp_file_name, policy_file) + + +def remove_policy_file(): + """Remove policy file.""" + try: + os.remove(get_policy_file_name()) + except FileNotFoundError: + pass + + +def install_policy_rcd(): + """Install policy-rc.d components.""" + source_file_dir = os.path.dirname(os.path.abspath(os_files.__file__)) + policy_rcd_exec = "/var/lib/charm/{}/policy-rc.d".format( + hookenv.service_name()) + host.mkdir(os.path.dirname(policy_rcd_exec)) + shutil.copy2( + '{}/policy_rc_d_script.py'.format(source_file_dir), + policy_rcd_exec) + # policy-rc.d must be installed via the alternatives system: + # https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt + if not os.path.exists('/usr/sbin/policy-rc.d'): + alternatives.install_alternative( + 'policy-rc.d', + '/usr/sbin/policy-rc.d', + policy_rcd_exec) + host.mkdir(POLICY_CONFIG_DIR) + + +def get_default_policy(): + """Return the default policy structure. + + :returns: Policy + :rtype: Dict[str, Union[str, Dict[str, List[str]]] + """ + policy = { + 'policy_requestor_name': hookenv.service_name(), + 'policy_requestor_type': 'charm', + 'blocked_actions': {}} + return policy + + +def add_policy_block(service, blocked_actions): + """Update a policy file with new list of actions. + + :param service: Service name + :type service: str + :param blocked_actions: Action to block + :type blocked_actions: List[str] + """ + policy = read_default_policy_file() or get_default_policy() + policy_file = get_policy_file_name() + if policy['blocked_actions'].get(service): + policy['blocked_actions'][service].extend(blocked_actions) + else: + policy['blocked_actions'][service] = blocked_actions + policy['blocked_actions'][service] = sorted( + list(set(policy['blocked_actions'][service]))) + write_policy_file(policy_file, policy) + + +def remove_policy_block(service, unblocked_actions): + """Remove list of actions from policy file. + + :param service: Service name + :type service: str + :param unblocked_actions: Action to unblock + :type unblocked_actions: List[str] + """ + policy_file = get_policy_file_name() + policy = read_default_policy_file() + for action in unblocked_actions: + try: + policy['blocked_actions'][service].remove(action) + except (KeyError, ValueError): + continue + write_policy_file(policy_file, policy) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index f27aa6c9..2ad8ab94 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -14,7 +14,7 @@ # Common python helper functions used for OpenStack charms. from collections import OrderedDict, namedtuple -from functools import wraps +from functools import partial, wraps import subprocess import json @@ -36,9 +36,12 @@ from charmhelpers.core import decorators, unitdata +import charmhelpers.contrib.openstack.deferred_events as deferred_events + from charmhelpers.core.hookenv import ( WORKLOAD_STATES, action_fail, + action_get, action_set, config, expected_peer_units, @@ -112,7 +115,7 @@ from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device -from charmhelpers.contrib.openstack.exceptions import OSContextError +from charmhelpers.contrib.openstack.exceptions import OSContextError, ServiceActionError from charmhelpers.contrib.openstack.policyd import ( policyd_status_message_prefix, POLICYD_CONFIG_NAME, @@ -148,6 +151,7 @@ 'train', 'ussuri', 'victoria', + 'wallaby', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -170,6 +174,7 @@ ('eoan', 'train'), ('focal', 'ussuri'), ('groovy', 'victoria'), + ('hirsute', 'wallaby'), ]) @@ -193,6 +198,7 @@ ('2019.2', 'train'), ('2020.1', 'ussuri'), ('2020.2', 'victoria'), + ('2021.1', 'wallaby'), ]) # The ugly duckling - must list releases oldest to newest @@ -301,8 +307,8 @@ ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), - ('18', 'ussuri'), - ('19', 'victoria'), + ('18', 'ussuri'), # Note this was actually 17.0 - 18.3 + ('19', 'victoria'), # Note this is really 18.6 ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -483,9 +489,26 @@ def get_swift_codename(version): return None -@deprecate("moved to charmhelpers.contrib.openstack.utils.get_installed_os_version()", "2021-01", log=juju_log) def get_os_codename_package(package, fatal=True): - '''Derive OpenStack release codename from an installed package.''' + """Derive OpenStack release codename from an installed package. + + Initially, see if the openstack-release pkg is available (by trying to + install it) and use it instead. + + If it isn't then it falls back to the existing method of checking the + version of the package passed and then resolving the version from that + using lookup tables. + + Note: if possible, charms should use get_installed_os_version() to + determine the version of the "openstack-release" pkg. + + :param package: the package to test for version information. + :type package: str + :param fatal: If True (default), then die via error_out() + :type fatal: bool + :returns: the OpenStack release codename (e.g. ussuri) + :rtype: str + """ codename = get_installed_os_version() if codename: @@ -579,8 +602,22 @@ def get_os_version_package(pkg, fatal=True): def get_installed_os_version(): - apt_install(filter_installed_packages(['openstack-release']), fatal=False) - print("OpenStack Release: {}".format(openstack_release())) + """Determine the OpenStack release code name from openstack-release pkg. + + This uses the "openstack-release" pkg (if it exists) to return the + OpenStack release codename (e.g. usurri, mitaka, ocata, etc.) + + Note, it caches the result so that it is only done once per hook. + + :returns: the OpenStack release codename, if available + :rtype: Optional[str] + """ + @cached + def _do_install(): + apt_install(filter_installed_packages(['openstack-release']), + fatal=False, quiet=True) + + _do_install() return openstack_release().get('OPENSTACK_CODENAME') @@ -1052,6 +1089,18 @@ def _determine_os_workload_status( try: if config(POLICYD_CONFIG_NAME): message = "{} {}".format(policyd_status_message_prefix(), message) + deferred_restarts = list(set( + [e.service for e in deferred_events.get_deferred_restarts()])) + if deferred_restarts: + svc_msg = "Services queued for restart: {}".format( + ', '.join(sorted(deferred_restarts))) + message = "{}. {}".format(message, svc_msg) + deferred_hooks = deferred_events.get_deferred_hooks() + if deferred_hooks: + svc_msg = "Hooks skipped due to disabled auto restarts: {}".format( + ', '.join(sorted(deferred_hooks))) + message = "{}. {}".format(message, svc_msg) + except Exception: pass @@ -1536,6 +1585,33 @@ def is_unit_paused_set(): return False +def is_hook_allowed(hookname, check_deferred_restarts=True): + """Check if hook can run. + + :param hookname: Name of hook to check.. + :type hookname: str + :param check_deferred_restarts: Whether to check deferred restarts. + :type check_deferred_restarts: bool + """ + permitted = True + reasons = [] + if is_unit_paused_set(): + reasons.append( + "Unit is pause or upgrading. Skipping {}".format(hookname)) + permitted = False + + if check_deferred_restarts: + if deferred_events.is_restart_permitted(): + permitted = True + deferred_events.clear_deferred_hook(hookname) + else: + if not config().changed('enable-auto-restarts'): + deferred_events.set_deferred_hook(hookname) + reasons.append("auto restarts are disabled") + permitted = False + return permitted, " and ".join(reasons) + + def manage_payload_services(action, services=None, charm_func=None): """Run an action against all services. @@ -1696,6 +1772,43 @@ def resume_unit(assess_status_func, services=None, ports=None, raise Exception("Couldn't resume: {}".format("; ".join(messages))) +def restart_services_action(services=None, when_all_stopped_func=None, + deferred_only=None): + """Manage a service restart request via charm action. + + :param services: Services to be restarted + :type model_name: List[str] + :param when_all_stopped_func: Function to call when all services are + stopped. + :type when_all_stopped_func: Callable[] + :param model_name: Only restart services which have a deferred restart + event. + :type model_name: bool + """ + if services and deferred_only: + raise ValueError( + "services and deferred_only are mutually exclusive") + if deferred_only: + services = list(set( + [a.service for a in deferred_events.get_deferred_restarts()])) + _, messages = manage_payload_services( + 'stop', + services=services, + charm_func=when_all_stopped_func) + if messages: + raise ServiceActionError( + "Error processing service stop request: {}".format( + "; ".join(messages))) + _, messages = manage_payload_services( + 'start', + services=services) + if messages: + raise ServiceActionError( + "Error processing service start request: {}".format( + "; ".join(messages))) + deferred_events.clear_deferred_restarts(services) + + def make_assess_status_func(*args, **kwargs): """Creates an assess_status_func() suitable for handing to pause_unit() and resume_unit(). @@ -1717,7 +1830,10 @@ def _assess_status_func(): def pausable_restart_on_change(restart_map, stopstart=False, - restart_functions=None): + restart_functions=None, + can_restart_now_f=None, + post_svc_restart_f=None, + pre_restarts_wait_f=None): """A restart_on_change decorator that checks to see if the unit is paused. If it is paused then the decorated function doesn't fire. @@ -1743,11 +1859,28 @@ def some_hook(...): function won't be called if the decorated function is never called. Note, retains backwards compatibility for passing a non-callable dictionary. - @param f: the function to decorate - @param restart_map: (optionally callable, which then returns the - restart_map) the restart map {conf_file: [services]} - @param stopstart: DEFAULT false; whether to stop, start or just restart - @returns decorator to use a restart_on_change with pausability + :param f: function to decorate. + :type f: Callable + :param restart_map: Optionally callable, which then returns the restart_map or + the restart map {conf_file: [services]} + :type restart_map: Union[Callable[[],], Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] + :returns: decorator to use a restart_on_change with pausability + :rtype: decorator + + """ def wrap(f): # py27 compatible nonlocal variable. When py3 only, replace with @@ -1763,8 +1896,13 @@ def wrapped_f(*args, **kwargs): if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( - (lambda: f(*args, **kwargs)), __restart_map_cache['cache'], - stopstart, restart_functions) + (lambda: f(*args, **kwargs)), + __restart_map_cache['cache'], + stopstart, + restart_functions, + can_restart_now_f, + post_svc_restart_f, + pre_restarts_wait_f) return wrapped_f return wrap @@ -2145,6 +2283,23 @@ def container_scoped_relations(): return relations +def container_scoped_relation_get(attribute=None): + """Get relation data from all container scoped relations. + + :param attribute: Name of attribute to get + :type attribute: Optional[str] + :returns: Iterator with relation data + :rtype: Iterator[Optional[any]] + """ + for endpoint_name in container_scoped_relations(): + for rid in relation_ids(endpoint_name): + for unit in related_units(rid): + yield relation_get( + attribute=attribute, + unit=unit, + rid=rid) + + def is_db_ready(use_current_context=False, rel_name=None): """Check remote database is ready to be used. @@ -2418,3 +2573,107 @@ def get_api_application_status(): msg = 'Some units are not ready' juju_log(msg, 'DEBUG') return app_state, msg + + +def sequence_status_check_functions(*functions): + """Sequence the functions passed so that they all get a chance to run as + the charm status check functions. + + :param *functions: a list of functions that return (state, message) + :type *functions: List[Callable[[OSConfigRender], (str, str)]] + :returns: the Callable that takes configs and returns (state, message) + :rtype: Callable[[OSConfigRender], (str, str)] + """ + def _inner_sequenced_functions(configs): + state, message = 'unknown', '' + for f in functions: + new_state, new_message = f(configs) + state = workload_state_compare(state, new_state) + if message: + message = "{}, {}".format(message, new_message) + else: + message = new_message + return state, message + + return _inner_sequenced_functions + + +SubordinatePackages = namedtuple('SubordinatePackages', ['install', 'purge']) + + +def get_subordinate_release_packages(os_release, package_type='deb'): + """Iterate over subordinate relations and get package information. + + :param os_release: OpenStack release to look for + :type os_release: str + :param package_type: Package type (one of 'deb' or 'snap') + :type package_type: str + :returns: Packages to install and packages to purge or None + :rtype: SubordinatePackages[set,set] + """ + install = set() + purge = set() + + for rdata in container_scoped_relation_get('releases-packages-map'): + rp_map = json.loads(rdata or '{}') + # The map provided by subordinate has OpenStack release name as key. + # Find package information from subordinate matching requested release + # or the most recent release prior to requested release by sorting the + # keys in reverse order. This follows established patterns in our + # charms for templates and reactive charm implementations, i.e. as long + # as nothing has changed the definitions for the prior OpenStack + # release is still valid. + for release in sorted(rp_map.keys(), reverse=True): + if (CompareOpenStackReleases(release) <= os_release and + package_type in rp_map[release]): + for name, container in ( + ('install', install), + ('purge', purge)): + for pkg in rp_map[release][package_type].get(name, []): + container.add(pkg) + break + return SubordinatePackages(install, purge) + + +os_restart_on_change = partial( + pausable_restart_on_change, + can_restart_now_f=deferred_events.check_and_record_restart_request, + post_svc_restart_f=deferred_events.process_svc_restart) + + +def restart_services_action_helper(all_services): + """Helper to run the restart-services action. + + NOTE: all_services is all services that could be restarted but + depending on the action arguments it may be a subset of + these that are actually restarted. + + :param all_services: All services that could be restarted + :type all_services: List[str] + """ + deferred_only = action_get("deferred-only") + services = action_get("services") + if services: + services = services.split() + else: + services = all_services + if deferred_only: + restart_services_action(deferred_only=True) + else: + restart_services_action(services=services) + + +def show_deferred_events_action_helper(): + """Helper to run the show-deferred-restarts action.""" + restarts = [] + for event in deferred_events.get_deferred_events(): + restarts.append('{} {} {}'.format( + str(event.timestamp), + event.service.ljust(40), + event.reason)) + restarts.sort() + output = { + 'restarts': restarts, + 'hooks': deferred_events.get_deferred_hooks()} + action_set({'output': "{}".format( + yaml.dump(output, default_flow_style=False))}) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index db7ce728..778aa4b6 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -226,6 +226,17 @@ def relation_id(relation_name=None, service_or_unit=None): raise ValueError('Must specify neither or both of relation_name and service_or_unit') +def departing_unit(): + """The departing unit for the current relation hook. + + Available since juju 2.8. + + :returns: the departing unit, or None if the information isn't available. + :rtype: Optional[str] + """ + return os.environ.get('JUJU_DEPARTING_UNIT', None) + + def local_unit(): """Local unit ID""" return os.environ['JUJU_UNIT_NAME'] @@ -1611,3 +1622,12 @@ def _contains_range(addresses): addresses.startswith(".") or ",." in addresses or " ." in addresses) + + +def is_subordinate(): + """Check whether charm is subordinate in unit metadata. + + :returns: True if unit is subordniate, False otherwise. + :rtype: bool + """ + return metadata().get('subordinate') is True diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index f826f6fe..d25e6c59 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -34,7 +34,7 @@ import six from contextlib import contextmanager -from collections import OrderedDict +from collections import OrderedDict, defaultdict from .hookenv import log, INFO, DEBUG, local_unit, charm_name from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -694,74 +694,223 @@ class ChecksumError(ValueError): pass -def restart_on_change(restart_map, stopstart=False, restart_functions=None): - """Restart services based on configuration files changing +class restart_on_change(object): + """Decorator and context manager to handle restarts. - This function is used a decorator, for example:: + Usage: - @restart_on_change({ - '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] - '/etc/apache/sites-enabled/*': [ 'apache2' ] - }) - def config_changed(): - pass # your code here + @restart_on_change(restart_map, ...) + def function_that_might_trigger_a_restart(...) + ... - In this example, the cinder-api and cinder-volume services - would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. The apache2 service would be - restarted if any file matching the pattern got changed, created - or removed. Standard wildcards are supported, see documentation - for the 'glob' module for more information. + Or: - @param restart_map: {path_file_name: [service_name, ...] - @param stopstart: DEFAULT false; whether to stop, start OR restart - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result from decorated function + with restart_on_change(restart_map, ...): + do_stuff_that_might_trigger_a_restart() + ... """ - def wrap(f): + + def __init__(self, restart_map, stopstart=False, restart_functions=None, + can_restart_now_f=None, post_svc_restart_f=None, + pre_restarts_wait_f=None): + """ + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart + services {svc: func, ...} + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] + """ + self.restart_map = restart_map + self.stopstart = stopstart + self.restart_functions = restart_functions + self.can_restart_now_f = can_restart_now_f + self.post_svc_restart_f = post_svc_restart_f + self.pre_restarts_wait_f = pre_restarts_wait_f + + def __call__(self, f): + """Work like a decorator. + + Returns a wrapped function that performs the restart if triggered. + + :param f: The function that is being wrapped. + :type f: Callable[[Any], Any] + :returns: the wrapped function + :rtype: Callable[[Any], Any] + """ @functools.wraps(f) def wrapped_f(*args, **kwargs): return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) + (lambda: f(*args, **kwargs)), + self.restart_map, + stopstart=self.stopstart, + restart_functions=self.restart_functions, + can_restart_now_f=self.can_restart_now_f, + post_svc_restart_f=self.post_svc_restart_f, + pre_restarts_wait_f=self.pre_restarts_wait_f) return wrapped_f - return wrap + + def __enter__(self): + """Enter the runtime context related to this object. """ + self.checksums = _pre_restart_on_change_helper(self.restart_map) + + def __exit__(self, exc_type, exc_val, exc_tb): + """Exit the runtime context related to this object. + + The parameters describe the exception that caused the context to be + exited. If the context was exited without an exception, all three + arguments will be None. + """ + if exc_type is None: + _post_restart_on_change_helper( + self.checksums, + self.restart_map, + stopstart=self.stopstart, + restart_functions=self.restart_functions, + can_restart_now_f=self.can_restart_now_f, + post_svc_restart_f=self.post_svc_restart_f, + pre_restarts_wait_f=self.pre_restarts_wait_f) + # All is good, so return False; any exceptions will propagate. + return False def restart_on_change_helper(lambda_f, restart_map, stopstart=False, - restart_functions=None): + restart_functions=None, + can_restart_now_f=None, + post_svc_restart_f=None, + pre_restarts_wait_f=None): """Helper function to perform the restart_on_change function. This is provided for decorators to restart services if files described in the restart_map have changed after an invocation of lambda_f(). - @param lambda_f: function to call. - @param restart_map: {file: [service, ...]} - @param stopstart: whether to stop, start or restart a service - @param restart_functions: nonstandard functions to use to restart services + This functions allows for a number of helper functions to be passed. + + `restart_functions` is a map with a service as the key and the + corresponding value being the function to call to restart the service. For + example if `restart_functions={'some-service': my_restart_func}` then + `my_restart_func` should a function which takes one argument which is the + service name to be retstarted. + + `can_restart_now_f` is a function which checks that a restart is permitted. + It should return a bool which indicates if a restart is allowed and should + take a service name (str) and a list of changed files (List[str]) as + arguments. + + `post_svc_restart_f` is a function which runs after a service has been + restarted. It takes the service name that was restarted as an argument. + + `pre_restarts_wait_f` is a function which is called before any restarts + occur. The use case for this is an application which wants to try and + stagger restarts between units. + + :param lambda_f: function to call. + :type lambda_f: Callable[[], ANY] + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart services {svc: func, ...} - @returns result of lambda_f() + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] + :returns: result of lambda_f() + :rtype: ANY + """ + checksums = _pre_restart_on_change_helper(restart_map) + r = lambda_f() + _post_restart_on_change_helper(checksums, + restart_map, + stopstart, + restart_functions, + can_restart_now_f, + post_svc_restart_f, + pre_restarts_wait_f) + return r + + +def _pre_restart_on_change_helper(restart_map): + """Take a snapshot of file hashes. + + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :returns: Dictionary of file paths and the files checksum. + :rtype: Dict[str, str] + """ + return {path: path_hash(path) for path in restart_map} + + +def _post_restart_on_change_helper(checksums, + restart_map, + stopstart=False, + restart_functions=None, + can_restart_now_f=None, + post_svc_restart_f=None, + pre_restarts_wait_f=None): + """Check whether files have changed. + + :param checksums: Dictionary of file paths and the files checksum. + :type checksums: Dict[str, str] + :param restart_map: {file: [service, ...]} + :type restart_map: Dict[str, List[str,]] + :param stopstart: whether to stop, start or restart a service + :type stopstart: booleean + :param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + :type restart_functions: Dict[str, Callable[[str], None]] + :param can_restart_now_f: A function used to check if the restart is + permitted. + :type can_restart_now_f: Callable[[str, List[str]], boolean] + :param post_svc_restart_f: A function run after a service has + restarted. + :type post_svc_restart_f: Callable[[str], None] + :param pre_restarts_wait_f: A function callled before any restarts. + :type pre_restarts_wait_f: Callable[None, None] """ if restart_functions is None: restart_functions = {} - checksums = {path: path_hash(path) for path in restart_map} - r = lambda_f() + changed_files = defaultdict(list) + restarts = [] # create a list of lists of the services to restart - restarts = [restart_map[path] - for path in restart_map - if path_hash(path) != checksums[path]] + for path, services in restart_map.items(): + if path_hash(path) != checksums[path]: + restarts.append(services) + for svc in services: + changed_files[svc].append(path) # create a flat list of ordered services without duplicates from lists services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) if services_list: + if pre_restarts_wait_f: + pre_restarts_wait_f() actions = ('stop', 'start') if stopstart else ('restart',) for service_name in services_list: + if can_restart_now_f: + if not can_restart_now_f(service_name, + changed_files[service_name]): + continue if service_name in restart_functions: restart_functions[service_name](service_name) else: for action in actions: service(action, service_name) - return r + if post_svc_restart_f: + post_svc_restart_f(service_name) def pwgen(length=None): @@ -1068,6 +1217,17 @@ def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): return calculated_wait_time +def ca_cert_absolute_path(basename_without_extension): + """Returns absolute path to CA certificate. + + :param basename_without_extension: Filename without extension + :type basename_without_extension: str + :returns: Absolute full path + :rtype: str + """ + return '{}/{}.crt'.format(CA_CERT_DIR, basename_without_extension) + + def install_ca_cert(ca_cert, name=None): """ Install the given cert as a trusted CA. @@ -1083,7 +1243,7 @@ def install_ca_cert(ca_cert, name=None): ca_cert = ca_cert.encode('utf8') if not name: name = 'juju-{}'.format(charm_name()) - cert_file = '{}/{}.crt'.format(CA_CERT_DIR, name) + cert_file = ca_cert_absolute_path(name) new_hash = hashlib.md5(ca_cert).hexdigest() if file_hash(cert_file) == new_hash: return diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index a3ec6947..7ee8a6ed 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -96,12 +96,14 @@ def cmp_pkgrevno(package, revno, pkgcache=None): the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. """ - from charmhelpers.fetch import apt_pkg + from charmhelpers.fetch import apt_pkg, get_installed_version if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + current_ver = get_installed_version(package) + else: + pkg = pkgcache[package] + current_ver = pkg.current_ver + + return apt_pkg.version_compare(current_ver.ver_str, revno) @cached diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 0cc7fc85..5b689f5b 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -105,6 +105,7 @@ def base_url(self, url): get_upstream_version = fetch.get_upstream_version apt_pkg = fetch.ubuntu_apt_pkg get_apt_dpkg_env = fetch.get_apt_dpkg_env + get_installed_version = fetch.get_installed_version elif __platform__ == "centos": yum_search = fetch.yum_search diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index b5953019..b38edcc1 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -13,6 +13,7 @@ # limitations under the License. from collections import OrderedDict +import os import platform import re import six @@ -20,6 +21,7 @@ import sys import time +from charmhelpers import deprecate from charmhelpers.core.host import get_distrib_codename, get_system_env from charmhelpers.core.hookenv import ( @@ -198,6 +200,14 @@ 'victoria/proposed': 'focal-proposed/victoria', 'focal-victoria/proposed': 'focal-proposed/victoria', 'focal-proposed/victoria': 'focal-proposed/victoria', + # Wallaby + 'wallaby': 'focal-updates/wallaby', + 'focal-wallaby': 'focal-updates/wallaby', + 'focal-wallaby/updates': 'focal-updates/wallaby', + 'focal-updates/wallaby': 'focal-updates/wallaby', + 'wallaby/proposed': 'focal-proposed/wallaby', + 'focal-wallaby/proposed': 'focal-proposed/wallaby', + 'focal-proposed/wallaby': 'focal-proposed/wallaby', } @@ -251,13 +261,19 @@ def apt_cache(*_, **__): # Detect this situation, log a warning and make the call to # ``apt_pkg.init()`` to avoid the consumer Python interpreter from # crashing with a segmentation fault. - log('Support for use of upstream ``apt_pkg`` module in conjunction' - 'with charm-helpers is deprecated since 2019-06-25', level=WARNING) + @deprecate( + 'Support for use of upstream ``apt_pkg`` module in conjunction' + 'with charm-helpers is deprecated since 2019-06-25', + date=None, log=lambda x: log(x, level=WARNING)) + def one_shot_log(): + pass + + one_shot_log() sys.modules['apt_pkg'].init() return ubuntu_apt_pkg.Cache() -def apt_install(packages, options=None, fatal=False): +def apt_install(packages, options=None, fatal=False, quiet=False): """Install one or more packages. :param packages: Package(s) to install @@ -267,6 +283,8 @@ def apt_install(packages, options=None, fatal=False): :param fatal: Whether the command's output should be checked and retried. :type fatal: bool + :param quiet: if True (default), supress log message to stdout/stderr + :type quiet: bool :raises: subprocess.CalledProcessError """ if options is None: @@ -279,9 +297,10 @@ def apt_install(packages, options=None, fatal=False): cmd.append(packages) else: cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - _run_apt_command(cmd, fatal) + if not quiet: + log("Installing {} with options: {}" + .format(packages, options)) + _run_apt_command(cmd, fatal, quiet=quiet) def apt_upgrade(options=None, fatal=False, dist=False): @@ -639,14 +658,17 @@ def _add_apt_repository(spec): :param spec: the parameter to pass to add_apt_repository :type spec: str """ + series = get_distrib_codename() if '{series}' in spec: - series = get_distrib_codename() spec = spec.replace('{series}', series) # software-properties package for bionic properly reacts to proxy settings - # passed as environment variables (See lp:1433761). This is not the case - # LTS and non-LTS releases below bionic. - _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https', 'http'])) + # set via apt.conf (see lp:1433761), however this is not the case for LTS + # and non-LTS releases before bionic. + if series in ('trusty', 'xenial'): + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https', 'http'])) + else: + _run_with_retries(['add-apt-repository', '--yes', spec]) def _add_cloud_pocket(pocket): @@ -723,7 +745,7 @@ def _verify_is_ubuntu_rel(release, os_release): def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), - retry_message="", cmd_env=None): + retry_message="", cmd_env=None, quiet=False): """Run a command and retry until success or max_retries is reached. :param cmd: The apt command to run. @@ -738,11 +760,20 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), :type retry_message: str :param: cmd_env: Environment variables to add to the command run. :type cmd_env: Option[None, Dict[str, str]] + :param quiet: if True, silence the output of the command from stdout and + stderr + :type quiet: bool """ env = get_apt_dpkg_env() if cmd_env: env.update(cmd_env) + kwargs = {} + if quiet: + devnull = os.devnull if six.PY2 else subprocess.DEVNULL + kwargs['stdout'] = devnull + kwargs['stderr'] = devnull + if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) @@ -753,7 +784,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_results = (None,) + retry_exitcodes while result in retry_results: try: - result = subprocess.check_call(cmd, env=env) + result = subprocess.check_call(cmd, env=env, **kwargs) except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > max_retries: @@ -763,7 +794,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), time.sleep(CMD_RETRY_DELAY) -def _run_apt_command(cmd, fatal=False): +def _run_apt_command(cmd, fatal=False, quiet=False): """Run an apt command with optional retries. :param cmd: The apt command to run. @@ -771,13 +802,22 @@ def _run_apt_command(cmd, fatal=False): :param fatal: Whether the command's output should be checked and retried. :type fatal: bool + :param quiet: if True, silence the output of the command from stdout and + stderr + :type quiet: bool """ if fatal: _run_with_retries( cmd, retry_exitcodes=(1, APT_NO_LOCK,), - retry_message="Couldn't acquire DPKG lock") + retry_message="Couldn't acquire DPKG lock", + quiet=quiet) else: - subprocess.call(cmd, env=get_apt_dpkg_env()) + kwargs = {} + if quiet: + devnull = os.devnull if six.PY2 else subprocess.DEVNULL + kwargs['stdout'] = devnull + kwargs['stderr'] = devnull + subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs) def get_upstream_version(package): @@ -799,6 +839,22 @@ def get_upstream_version(package): return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str) +def get_installed_version(package): + """Determine installed version of a package + + @returns None (if not installed) or the installed version as + Version object + """ + cache = apt_cache() + dpkg_result = cache._dpkg_list([package]).get(package, {}) + current_ver = None + installed_version = dpkg_result.get('version') + + if installed_version: + current_ver = ubuntu_apt_pkg.Version({'ver_str': installed_version}) + return current_ver + + def get_apt_dpkg_env(): """Get environment suitable for execution of APT and DPKG tools. diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 52d380b4..e5c38793 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -56,11 +56,11 @@ ) from charmhelpers.fetch import ( add_source, - apt_cache, apt_install, apt_purge, apt_update, - filter_missing_packages + filter_missing_packages, + get_installed_version ) from charmhelpers.contrib.storage.linux.ceph import ( get_mon_map, @@ -497,10 +497,7 @@ def tune_dev(block_dev): def ceph_user(): - if get_version() > 1: - return 'ceph' - else: - return "root" + return 'ceph' class CrushLocation(object): @@ -715,22 +712,15 @@ def get_version(): """Derive Ceph release from an installed package.""" import apt_pkg as apt - cache = apt_cache() package = "ceph" - try: - pkg = cache[package] - except KeyError: - # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation ' \ - 'candidate: %s' % package - error_out(e) - if not pkg.current_ver: + current_ver = get_installed_version(package) + if not current_ver: # package is known, but no version is currently installed. e = 'Could not determine version of uninstalled package: %s' % package error_out(e) - vers = apt.upstream_version(pkg.current_ver.ver_str) + vers = apt.upstream_version(current_ver.ver_str) # x.y match only for 20XX.X # and ignore patch level for other packages diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 9aea716b..394e4d37 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -42,8 +42,8 @@ oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) -git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' -git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack +git+https://github.com/openstack-charmers/zaza.git@stable/21.04#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git@stable/21.04#egg=zaza.openstack # Needed for charm-glance: git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' From fe4a956f749fc9eafdc389e79ecc5d072bd6ed4f Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Fri, 9 Apr 2021 20:22:11 +0100 Subject: [PATCH 2195/2699] Update for libraries sync 21.04 * Updated build.lock generated on xenial * Modified wheelhouse.txt to pick up charms.ceph stable/21.04 branch Change-Id: Ifdf2620bb3c529fbdfa8bcd3f11b85428f32e3b3 --- ceph-fs/src/build.lock | 190 ++++++++++++++++++------------------- ceph-fs/src/wheelhouse.txt | 2 + 2 files changed, 97 insertions(+), 95 deletions(-) diff --git a/ceph-fs/src/build.lock b/ceph-fs/src/build.lock index cb26d297..1b94cdf8 100644 --- a/ceph-fs/src/build.lock +++ b/ceph-fs/src/build.lock @@ -1,198 +1,198 @@ { "locks": [ { - "type": "layer", "item": "layer:options", - "url": "https://github.com/juju-solutions/layer-options.git", "vcs": null, - "branch": "refs/heads/master", + "url": "https://github.com/juju-solutions/layer-options.git", + "branch": "fcdcea4e5de3e1556c24e6704607862d0ba00a56", + "type": "layer", "commit": "fcdcea4e5de3e1556c24e6704607862d0ba00a56" }, { - "type": "layer", "item": "layer:basic", - "url": "https://github.com/juju-solutions/layer-basic.git", "vcs": null, - "branch": "refs/heads/master", + "url": "https://github.com/juju-solutions/layer-basic.git", + "branch": "623e69c7b432456fd4364f6e1835424fd6b5425e", + "type": "layer", "commit": "623e69c7b432456fd4364f6e1835424fd6b5425e" }, { - "type": "layer", "item": "layer:openstack", - "url": "https://github.com/openstack/charm-layer-openstack", "vcs": null, - "branch": "refs/heads/master", + "url": "https://github.com/openstack/charm-layer-openstack", + "branch": "ba152d41b4a1109073d335415f43c4248109e7c7", + "type": "layer", "commit": "ba152d41b4a1109073d335415f43c4248109e7c7" }, { - "type": "layer", "item": "layer:ceph", - "url": "https://github.com/openstack/charm-layer-ceph.git", "vcs": null, - "branch": "refs/heads/master", + "url": "https://github.com/openstack/charm-layer-ceph.git", + "branch": "17d40abd8d9ec3b8c32756ca981c80c4733c016f", + "type": "layer", "commit": "17d40abd8d9ec3b8c32756ca981c80c4733c016f" }, { - "type": "layer", "item": "ceph-fs", - "url": null, "vcs": null, - "branch": "refs/heads/master", - "commit": "c8feba48a592445f20ed247e45866077c01a114d" + "url": null, + "branch": "46797fc9869d86352e513aa4f7a26c0d9ab3c6ec", + "type": "layer", + "commit": "46797fc9869d86352e513aa4f7a26c0d9ab3c6ec" }, { - "type": "layer", "item": "interface:tls-certificates", - "url": "https://github.com/juju-solutions/interface-tls-certificates", "vcs": null, - "branch": "refs/heads/master", + "url": "https://github.com/juju-solutions/interface-tls-certificates", + "branch": "d9850016d930a6d507b9fd45e2598d327922b140", + "type": "layer", "commit": "d9850016d930a6d507b9fd45e2598d327922b140" }, { - "type": "layer", "item": "interface:ceph-mds", - "url": "https://opendev.org/openstack/charm-interface-ceph-client.git", "vcs": null, - "branch": "refs/heads/master", + "url": "https://opendev.org/openstack/charm-interface-ceph-client.git", + "branch": "72245e1d002fb9c65c9574d65b5952275b3411fb", + "type": "layer", "commit": "72245e1d002fb9c65c9574d65b5952275b3411fb" }, { - "type": "python_module", - "package": "dnspython", "vcs": null, - "version": "1.15.0" - }, - { "type": "python_module", - "package": "charms.openstack", - "url": "git+https://opendev.org/openstack/charms.openstack.git", - "branch": "refs/heads/stable/21.04", - "version": "bcd0c9b4b4a19d4a4125e0a6a3f808a843a74fa1", - "vcs": "git" + "package": "charms.reactive", + "version": "1.4.1" }, { - "type": "python_module", - "package": "netaddr", "vcs": null, - "version": "0.7.19" + "type": "python_module", + "package": "netifaces", + "version": "0.10.9" }, { - "type": "python_module", - "package": "ceph_api", "vcs": null, - "version": "0.4.0" + "type": "python_module", + "package": "PyYAML", + "version": "5.2" }, { - "type": "python_module", - "package": "dnspython3", "vcs": null, - "version": "1.15.0" + "type": "python_module", + "package": "psutil", + "version": "5.8.0" }, { - "type": "python_module", - "package": "pbr", "vcs": null, - "version": "5.5.1" + "type": "python_module", + "package": "pyaml", + "version": "20.4.0" }, { - "type": "python_module", - "package": "setuptools", "vcs": null, - "version": "41.6.0" + "type": "python_module", + "package": "netaddr", + "version": "0.7.19" }, { + "vcs": null, "type": "python_module", "package": "pyxattr", - "vcs": null, "version": "0.7.2" }, { - "type": "python_module", - "package": "Jinja2", "vcs": null, - "version": "2.10.1" - }, - { "type": "python_module", - "package": "pip", - "vcs": null, - "version": "18.1" - }, - { - "type": "python_module", - "package": "pyaml", - "vcs": null, - "version": "20.4.0" + "package": "dnspython3", + "version": "1.15.0" }, { - "type": "python_module", - "package": "MarkupSafe", "vcs": null, - "version": "1.1.1" - }, - { "type": "python_module", - "package": "charmhelpers", - "url": "git+https://github.com/juju/charm-helpers.git", - "branch": "refs/heads/stable/21.04", - "version": "8c48d2914b0e7396a2392c3933e2d7f321643ae6", - "vcs": "git" + "package": "setuptools", + "version": "41.6.0" }, { + "vcs": null, "type": "python_module", "package": "wheel", - "vcs": null, "version": "0.33.6" }, { - "type": "python_module", - "package": "charms.ceph", - "url": "git+https://github.com/openstack/charms.ceph.git", - "branch": "refs/heads/master", - "version": "9bfe43ee654d016d7f09ede406c45674821f2866", - "vcs": "git" + "vcs": "git", + "url": "git+https://opendev.org/openstack/charms.openstack.git", + "package": "charms.openstack", + "branch": "stable/21.04", + "version": "bcd0c9b4b4a19d4a4125e0a6a3f808a843a74fa1", + "type": "python_module" }, { + "vcs": null, "type": "python_module", - "package": "charms.reactive", + "package": "six", + "version": "1.15.0" + }, + { "vcs": null, - "version": "1.4.1" + "type": "python_module", + "package": "MarkupSafe", + "version": "1.1.1" }, { + "vcs": null, "type": "python_module", "package": "Tempita", - "vcs": null, "version": "0.5.2" }, { - "type": "python_module", - "package": "PyYAML", "vcs": null, - "version": "5.2" + "type": "python_module", + "package": "pbr", + "version": "5.5.1" }, { + "vcs": null, "type": "python_module", "package": "setuptools_scm", - "vcs": null, "version": "1.17.0" }, { - "type": "python_module", - "package": "psutil", "vcs": null, - "version": "5.8.0" + "type": "python_module", + "package": "dnspython", + "version": "1.15.0" }, { - "type": "python_module", - "package": "netifaces", "vcs": null, - "version": "0.10.9" + "type": "python_module", + "package": "pip", + "version": "18.1" }, { + "vcs": null, "type": "python_module", - "package": "six", + "package": "Jinja2", + "version": "2.10.1" + }, + { + "vcs": "git", + "url": "git+https://github.com/juju/charm-helpers.git", + "package": "charmhelpers", + "branch": "stable/21.04", + "version": "8c48d2914b0e7396a2392c3933e2d7f321643ae6", + "type": "python_module" + }, + { + "vcs": "git", + "url": "git+https://opendev.org/openstack/charms.ceph.git", + "package": "charms.ceph", + "branch": "stable/21.04", + "version": "9bfe43ee654d016d7f09ede406c45674821f2866", + "type": "python_module" + }, + { "vcs": null, - "version": "1.15.0" + "type": "python_module", + "package": "ceph_api", + "version": "0.4.0" } ] -} +} \ No newline at end of file diff --git a/ceph-fs/src/wheelhouse.txt b/ceph-fs/src/wheelhouse.txt index 200a2c0a..433a8469 100644 --- a/ceph-fs/src/wheelhouse.txt +++ b/ceph-fs/src/wheelhouse.txt @@ -6,4 +6,6 @@ psutil git+https://opendev.org/openstack/charms.openstack.git@stable/21.04#egg=charms.openstack +git+https://opendev.org/openstack/charms.ceph.git@stable/21.04#egg=charms.ceph + git+https://github.com/juju/charm-helpers.git@stable/21.04#egg=charmhelpers From ac5a5418b0f3bdc0d2a7f55631367149bad8c6f4 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 3 Mar 2021 20:15:48 +0000 Subject: [PATCH 2196/2699] Test bundles for focal-wallaby and hirsute-wallaby These are the test bundles (and any associated changes) for focal-wallaby and hirsute-wallaby support. Sync charm-helpers Change-Id: I9f495e3ce85913c9f63201c9effa9b4c5932343d --- ceph-osd/osci.yaml | 2 +- ceph-osd/tests/bundles/focal-wallaby.yaml | 222 ++++++++++++++++++++ ceph-osd/tests/bundles/hirsute-wallaby.yaml | 222 ++++++++++++++++++++ ceph-osd/tests/tests.yaml | 5 +- 4 files changed, 449 insertions(+), 2 deletions(-) create mode 100644 ceph-osd/tests/bundles/focal-wallaby.yaml create mode 100644 ceph-osd/tests/bundles/hirsute-wallaby.yaml diff --git a/ceph-osd/osci.yaml b/ceph-osd/osci.yaml index 1abe9784..6a399fa9 100644 --- a/ceph-osd/osci.yaml +++ b/ceph-osd/osci.yaml @@ -1,4 +1,4 @@ - project: templates: - charm-unit-jobs - - charm-functional-jobs \ No newline at end of file + - charm-functional-jobs diff --git a/ceph-osd/tests/bundles/focal-wallaby.yaml b/ceph-osd/tests/bundles/focal-wallaby.yaml new file mode 100644 index 00000000..2de66d2f --- /dev/null +++ b/ceph-osd/tests/bundles/focal-wallaby.yaml @@ -0,0 +1,222 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-wallaby + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: ../../../ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: 'None' + glance-api-version: '2' + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'cinder-ceph:ceph-access' + - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/bundles/hirsute-wallaby.yaml b/ceph-osd/tests/bundles/hirsute-wallaby.yaml new file mode 100644 index 00000000..333aae02 --- /dev/null +++ b/ceph-osd/tests/bundles/hirsute-wallaby.yaml @@ -0,0 +1,222 @@ +variables: + openstack-origin: &openstack-origin distro + +series: hirsute + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: ../../../ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: 'None' + glance-api-version: '2' + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'cinder-ceph:ceph-access' + - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 63baecd9..912c0d9f 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,6 +1,8 @@ charm_name: ceph-osd gate_bundles: + - hirsute-wallaby - groovy-victoria + - focal-wallaby - focal-victoria - focal-ussuri - bionic-ussuri @@ -26,4 +28,5 @@ tests: - zaza.openstack.charm_tests.ceph.osd.tests.ServiceTest tests_options: force_deploy: - - groovy-victoria + - hirsute-wallaby + - trusty-mitaka From 696c5a24b99952c2998cb8f8ebc72795704752c0 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sun, 11 Apr 2021 16:45:53 +0100 Subject: [PATCH 2197/2699] 21.04 sync - add 'hirsute' in UBUNTU_RELEASES The 'hirsute' key in c-h/core/host_factory/ubuntu.py: UBUNTU_RELEASES had been missed out, and is needed for hirsute support in many of the charms. This sync is to add just that key. See also [1] Note that this sync is only for classic charms. [1] https://github.com/juju/charm-helpers/pull/598 Change-Id: I89019b188806981b786a9fa9a292195b52078193 --- ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py index 7ee8a6ed..5aa4196d 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -26,7 +26,8 @@ 'disco', 'eoan', 'focal', - 'groovy' + 'groovy', + 'hirsute', ) From d499065c0fc39ffc867ab5afde3c2da0a08ddc3b Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sun, 11 Apr 2021 16:45:53 +0100 Subject: [PATCH 2198/2699] 21.04 sync - add 'hirsute' in UBUNTU_RELEASES The 'hirsute' key in c-h/core/host_factory/ubuntu.py: UBUNTU_RELEASES had been missed out, and is needed for hirsute support in many of the charms. This sync is to add just that key. See also [1] Note that this sync is only for classic charms. [1] https://github.com/juju/charm-helpers/pull/598 Change-Id: I66384babf597072499799c0b3aaff00fedbe69a0 --- ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index 7ee8a6ed..5aa4196d 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -26,7 +26,8 @@ 'disco', 'eoan', 'focal', - 'groovy' + 'groovy', + 'hirsute', ) From 27f88df128f7bcc15fccd21ed2627ea362a7570a Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sun, 11 Apr 2021 16:45:53 +0100 Subject: [PATCH 2199/2699] 21.04 sync - add 'hirsute' in UBUNTU_RELEASES The 'hirsute' key in c-h/core/host_factory/ubuntu.py: UBUNTU_RELEASES had been missed out, and is needed for hirsute support in many of the charms. This sync is to add just that key. See also [1] Note that this sync is only for classic charms. [1] https://github.com/juju/charm-helpers/pull/598 Change-Id: I75107c4fd927f95eb3778b65a2172b0268390feb --- ceph-proxy/charmhelpers/core/host_factory/ubuntu.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py index 7ee8a6ed..5aa4196d 100644 --- a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py @@ -26,7 +26,8 @@ 'disco', 'eoan', 'focal', - 'groovy' + 'groovy', + 'hirsute', ) From ff886f000a08191abe783c74774fc1af7397bd00 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Sun, 11 Apr 2021 16:45:53 +0100 Subject: [PATCH 2200/2699] 21.04 sync - add 'hirsute' in UBUNTU_RELEASES The 'hirsute' key in c-h/core/host_factory/ubuntu.py: UBUNTU_RELEASES had been missed out, and is needed for hirsute support in many of the charms. This sync is to add just that key. See also [1] Note that this sync is only for classic charms. [1] https://github.com/juju/charm-helpers/pull/598 Change-Id: I8544b62b2c7e5f38488f564af57dbe815638bf32 --- ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index 7ee8a6ed..5aa4196d 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -26,7 +26,8 @@ 'disco', 'eoan', 'focal', - 'groovy' + 'groovy', + 'hirsute', ) From 6a3424f47c6febf1d35a15bded9bd519897e4ca8 Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Mon, 5 Apr 2021 12:29:21 -0300 Subject: [PATCH 2201/2699] Ensure identical types for comparing port number/string Fix an oversight and regression in commit 798ca49b2f83 ("Close previously opened ports on port config change"). The comparison between an integer and a string (returned by .split()) is always different and thus when upgrading the charm 'port 80' is closed. Make sure the types are set to str. Right now it should only be needed for port and not opened_port_number; but let's future proof both sides of the comparison. (Update: using str() vs int() as apparently int() might fail but str() should always work no matter what it got; thanks, Alex Kavanagh!) Before: $ juju run --unit ceph-radosgw/0 opened-ports 80/tcp $ juju upgrade-charm --path . ceph-radosgw $ juju run --unit ceph-radosgw/0 opened-ports $ @ log: 2021-04-05 15:08:04 INFO juju-log Closed port 80 in favor of port 80 $ python3 -q >>> x=80 >>> y='80/tcp' >>> z=y.split('/')[0] >>> z '80' >>> x 80 >>> x != z True >>> x=str(x) >>> x != z False After: $ juju run --unit ceph-radosgw/1 opened-ports 80/tcp $ juju upgrade-charm --path . ceph-radosgw $ juju run --unit ceph-radosgw/1 opened-ports 80/tcp Signed-off-by: Mauricio Faria de Oliveira Change-Id: I2bcdfec1459ea45d8f57b850b7fd935c360cc7c1 --- ceph-radosgw/hooks/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index b4ebcc8b..040a71e2 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -253,7 +253,7 @@ def _config_changed(): open_port(port) for opened_port in opened_ports(): opened_port_number = opened_port.split('/')[0] - if opened_port_number != port: + if str(opened_port_number) != str(port): close_port(opened_port_number) log('Closed port %s in favor of port %s' % (opened_port_number, port)) From f359648832d75c728f3ec884e896a415227e020a Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 13 Apr 2021 10:43:39 +0100 Subject: [PATCH 2202/2699] Add otp pool to broker request Ceph RADOS gateway >= Mimic has an additional metadata pool (otp). Add this to the broker request to ensure that its created correctly by the ceph-mon application rather than being auto-created by the radosgw application Change-Id: I5e9b4e449bd1bc300225d223329bb62f3a381705 Closes-Bug: 1921453 --- ceph-radosgw/hooks/ceph_rgw.py | 1 + ceph-radosgw/unit_tests/test_ceph.py | 14 ++++++++++++++ 2 files changed, 15 insertions(+) diff --git a/ceph-radosgw/hooks/ceph_rgw.py b/ceph-radosgw/hooks/ceph_rgw.py index 26177f46..463c281f 100644 --- a/ceph-radosgw/hooks/ceph_rgw.py +++ b/ceph-radosgw/hooks/ceph_rgw.py @@ -196,6 +196,7 @@ def _add_light_pool(rq, pool, pg_num, prefix=None): '.rgw.log', '.rgw.intent-log', '.rgw.meta', + '.rgw.otp', '.rgw.usage', '.rgw.users.keys', '.rgw.users.email', diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index f9dad5d7..7c4e36a3 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -83,6 +83,9 @@ def test_create_rgw_pools_rq_with_prefix( call('us-east.rgw.meta', replica_count=3, pg_num=10, weight=None, group='objects', namespace=None, app_name='rgw', max_bytes=None, max_objects=None), + call('us-east.rgw.otp', replica_count=3, pg_num=10, weight=None, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), call('us-east.rgw.usage', replica_count=3, pg_num=10, weight=None, group='objects', namespace=None, app_name='rgw', max_bytes=None, max_objects=None), @@ -136,6 +139,9 @@ def test_create_rgw_pools_rq_with_prefix( call('us-east.rgw.meta', replica_count=3, pg_num=10, weight=None, group='objects', namespace=None, app_name='rgw', max_bytes=None, max_objects=None), + call('us-east.rgw.otp', replica_count=3, pg_num=10, weight=None, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), call('us-east.rgw.usage', replica_count=3, pg_num=10, weight=None, group='objects', namespace=None, app_name='rgw', max_bytes=None, max_objects=None), @@ -198,6 +204,9 @@ def test_create_rgw_pools_rq_no_prefix_post_jewel( call('default.rgw.meta', replica_count=3, pg_num=None, weight=0.1, group='objects', namespace=None, app_name='rgw', max_bytes=None, max_objects=None), + call('default.rgw.otp', replica_count=3, pg_num=None, weight=0.1, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), call('default.rgw.usage', replica_count=3, pg_num=None, weight=0.1, group='objects', namespace=None, app_name='rgw', max_bytes=None, max_objects=None), @@ -254,6 +263,9 @@ def test_create_rgw_pools_rq_no_prefix_post_jewel( call('default.rgw.meta', replica_count=3, pg_num=None, weight=0.1, group='objects', namespace=None, app_name='rgw', max_bytes=None, max_objects=None), + call('default.rgw.otp', replica_count=3, pg_num=None, weight=0.1, + group='objects', namespace=None, app_name='rgw', + max_bytes=None, max_objects=None), call('default.rgw.usage', replica_count=3, pg_num=None, weight=0.1, group='objects', namespace=None, app_name='rgw', max_bytes=None, max_objects=None), @@ -335,6 +347,8 @@ def test_create_rgw_pools_rq_no_prefix_ec(self, mock_broker, group='objects', app_name='rgw'), call(weight=0.10, replica_count=3, name='default.rgw.meta', group='objects', app_name='rgw'), + call(weight=0.10, replica_count=3, name='default.rgw.otp', + group='objects', app_name='rgw'), call(weight=0.10, replica_count=3, name='default.rgw.usage', group='objects', app_name='rgw'), call(weight=0.10, replica_count=3, name='default.rgw.users.keys', From e5232ae5bcb0af38469489df5e112e38fd098f25 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 3 Mar 2021 20:27:54 +0000 Subject: [PATCH 2203/2699] Test bundles for focal-wallaby and hirsute-wallaby These are the test bundles (and any associated changes) for focal-wallaby and hirsute-wallaby support. hisute-wallaby test is disabled (moved to dev) due to [1] as bundle may reference a reactive charm. [1] https://github.com/juju-solutions/layer-basic/issues/194 Sync charm-helpers Change-Id: I01125ce328091db86c757d5ee14e3f3eaa913d73 --- ceph-proxy/osci.yaml | 18 +- .../tests/bundles/focal-wallaby-ec.yaml | 215 ++++++++++++++++++ ceph-proxy/tests/bundles/focal-wallaby.yaml | 186 +++++++++++++++ .../tests/bundles/hirsute-wallaby-ec.yaml | 215 ++++++++++++++++++ ceph-proxy/tests/bundles/hirsute-wallaby.yaml | 186 +++++++++++++++ ceph-proxy/tests/tests.yaml | 8 +- 6 files changed, 825 insertions(+), 3 deletions(-) create mode 100644 ceph-proxy/tests/bundles/focal-wallaby-ec.yaml create mode 100644 ceph-proxy/tests/bundles/focal-wallaby.yaml create mode 100644 ceph-proxy/tests/bundles/hirsute-wallaby-ec.yaml create mode 100644 ceph-proxy/tests/bundles/hirsute-wallaby.yaml diff --git a/ceph-proxy/osci.yaml b/ceph-proxy/osci.yaml index c63b99b5..44772406 100644 --- a/ceph-proxy/osci.yaml +++ b/ceph-proxy/osci.yaml @@ -11,8 +11,12 @@ - focal-ussuri-ec - focal-victoria - focal-victoria-ec + - focal-wallaby + - focal-wallaby-ec - groovy-victoria - groovy-victoria-ec + - hirsute-wallaby + - hirsute-wallaby-ec - job: name: focal-ussuri-ec parent: func-target @@ -26,9 +30,21 @@ dependencies: *smoke-jobs vars: tox_extra_args: erasure-coded:focal-victoria-ec +- job: + name: focal-wallaby-ec + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: erasure-coded:focal-wallaby-ec - job: name: groovy-victoria-ec parent: func-target dependencies: *smoke-jobs vars: - tox_extra_args: erasure-coded:groovy-victoria-ec \ No newline at end of file + tox_extra_args: erasure-coded:groovy-victoria-ec +- job: + name: hirsute-wallaby-ec + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: erasure-coded:hirsute-wallaby-ec diff --git a/ceph-proxy/tests/bundles/focal-wallaby-ec.yaml b/ceph-proxy/tests/bundles/focal-wallaby-ec.yaml new file mode 100644 index 00000000..cd693777 --- /dev/null +++ b/ceph-proxy/tests/bundles/focal-wallaby-ec.yaml @@ -0,0 +1,215 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-wallaby + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + - '16' + - '17' + - '18' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: lrc + ec-profile-locality: 3 + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: jerasure + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: isa + libvirt-image-backend: rbd + to: + - '15' + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:ceph' + - 'ceph-proxy:client' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/focal-wallaby.yaml b/ceph-proxy/tests/bundles/focal-wallaby.yaml new file mode 100644 index 00000000..203f1249 --- /dev/null +++ b/ceph-proxy/tests/bundles/focal-wallaby.yaml @@ -0,0 +1,186 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-wallaby + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/hirsute-wallaby-ec.yaml b/ceph-proxy/tests/bundles/hirsute-wallaby-ec.yaml new file mode 100644 index 00000000..8cce6d30 --- /dev/null +++ b/ceph-proxy/tests/bundles/hirsute-wallaby-ec.yaml @@ -0,0 +1,215 @@ +variables: + openstack-origin: &openstack-origin distro + +series: hirsute + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + - '16' + - '17' + - '18' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: lrc + ec-profile-locality: 3 + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: jerasure + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: isa + libvirt-image-backend: rbd + to: + - '15' + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:ceph' + - 'ceph-proxy:client' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/hirsute-wallaby.yaml b/ceph-proxy/tests/bundles/hirsute-wallaby.yaml new file mode 100644 index 00000000..43b5a1f7 --- /dev/null +++ b/ceph-proxy/tests/bundles/hirsute-wallaby.yaml @@ -0,0 +1,186 @@ +variables: + openstack-origin: &openstack-origin distro + +series: hirsute + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 9849fca1..5187892e 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -21,6 +21,8 @@ gate_bundles: - erasure-coded: focal-ussuri-ec - focal-victoria - erasure-coded: focal-victoria-ec + - focal-wallaby + - erasure-coded: focal-wallaby-ec - groovy-victoria - erasure-coded: groovy-victoria-ec @@ -34,6 +36,8 @@ dev_bundles: - xenial-pike - xenial-queens # luminous - bionic-rocky # mimic + - hirsute-wallaby + - erasure-coded: hirsute-wallaby-ec smoke_bundles: - focal-ussuri @@ -63,5 +67,5 @@ target_deploy_status: tests_options: force_deploy: - - groovy-victoria - - groovy-victoria-ec + - hirsute-wallaby + - hirsute-wallaby-ec From 78dea0676ce7be4326d94bd017e5343d0601db2e Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Fri, 16 Apr 2021 10:37:23 +0100 Subject: [PATCH 2204/2699] Disable hirsute-wallaby test hisute-wallaby test is disabled (moved to dev) due to [1] as bundle may reference a reactive charm. [1] https://github.com/juju-solutions/layer-basic/issues/194 Change-Id: Iee55ac0b05064f0ca73c5cd32df3cb63ebc8f5d0 --- ceph-osd/tests/tests.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 912c0d9f..4d8ca8b9 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,6 +1,6 @@ charm_name: ceph-osd + gate_bundles: - - hirsute-wallaby - groovy-victoria - focal-wallaby - focal-victoria @@ -10,22 +10,28 @@ gate_bundles: - bionic-stein - bionic-queens - xenial-mitaka + dev_bundles: - trusty-mitaka - xenial-ocata - xenial-pike - xenial-queens - bionic-rocky + - hirsute-wallaby + smoke_bundles: - bionic-train + configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image + tests: - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - zaza.openstack.charm_tests.ceph.tests.CephRelationTest - zaza.openstack.charm_tests.ceph.tests.CephTest - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - zaza.openstack.charm_tests.ceph.osd.tests.ServiceTest + tests_options: force_deploy: - hirsute-wallaby From 61cae1d8bc49eb09ad993a64bc6f2aed686414bd Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 3 May 2021 15:33:46 +0100 Subject: [PATCH 2205/2699] Updates to flip all libraries back to master This patchset updates all the requirements for charms.openstack, charm-helpers, charms.ceph, zaza and zaza-openstack-tests back to master branch. Change-Id: I89267c171f245fbc92ad67545e97d99f48f798e2 --- ceph-fs/src/build.lock | 198 ------------------------------ ceph-fs/src/test-requirements.txt | 4 +- ceph-fs/src/wheelhouse.txt | 6 +- ceph-fs/test-requirements.txt | 2 +- 4 files changed, 6 insertions(+), 204 deletions(-) delete mode 100644 ceph-fs/src/build.lock diff --git a/ceph-fs/src/build.lock b/ceph-fs/src/build.lock deleted file mode 100644 index 1b94cdf8..00000000 --- a/ceph-fs/src/build.lock +++ /dev/null @@ -1,198 +0,0 @@ -{ - "locks": [ - { - "item": "layer:options", - "vcs": null, - "url": "https://github.com/juju-solutions/layer-options.git", - "branch": "fcdcea4e5de3e1556c24e6704607862d0ba00a56", - "type": "layer", - "commit": "fcdcea4e5de3e1556c24e6704607862d0ba00a56" - }, - { - "item": "layer:basic", - "vcs": null, - "url": "https://github.com/juju-solutions/layer-basic.git", - "branch": "623e69c7b432456fd4364f6e1835424fd6b5425e", - "type": "layer", - "commit": "623e69c7b432456fd4364f6e1835424fd6b5425e" - }, - { - "item": "layer:openstack", - "vcs": null, - "url": "https://github.com/openstack/charm-layer-openstack", - "branch": "ba152d41b4a1109073d335415f43c4248109e7c7", - "type": "layer", - "commit": "ba152d41b4a1109073d335415f43c4248109e7c7" - }, - { - "item": "layer:ceph", - "vcs": null, - "url": "https://github.com/openstack/charm-layer-ceph.git", - "branch": "17d40abd8d9ec3b8c32756ca981c80c4733c016f", - "type": "layer", - "commit": "17d40abd8d9ec3b8c32756ca981c80c4733c016f" - }, - { - "item": "ceph-fs", - "vcs": null, - "url": null, - "branch": "46797fc9869d86352e513aa4f7a26c0d9ab3c6ec", - "type": "layer", - "commit": "46797fc9869d86352e513aa4f7a26c0d9ab3c6ec" - }, - { - "item": "interface:tls-certificates", - "vcs": null, - "url": "https://github.com/juju-solutions/interface-tls-certificates", - "branch": "d9850016d930a6d507b9fd45e2598d327922b140", - "type": "layer", - "commit": "d9850016d930a6d507b9fd45e2598d327922b140" - }, - { - "item": "interface:ceph-mds", - "vcs": null, - "url": "https://opendev.org/openstack/charm-interface-ceph-client.git", - "branch": "72245e1d002fb9c65c9574d65b5952275b3411fb", - "type": "layer", - "commit": "72245e1d002fb9c65c9574d65b5952275b3411fb" - }, - { - "vcs": null, - "type": "python_module", - "package": "charms.reactive", - "version": "1.4.1" - }, - { - "vcs": null, - "type": "python_module", - "package": "netifaces", - "version": "0.10.9" - }, - { - "vcs": null, - "type": "python_module", - "package": "PyYAML", - "version": "5.2" - }, - { - "vcs": null, - "type": "python_module", - "package": "psutil", - "version": "5.8.0" - }, - { - "vcs": null, - "type": "python_module", - "package": "pyaml", - "version": "20.4.0" - }, - { - "vcs": null, - "type": "python_module", - "package": "netaddr", - "version": "0.7.19" - }, - { - "vcs": null, - "type": "python_module", - "package": "pyxattr", - "version": "0.7.2" - }, - { - "vcs": null, - "type": "python_module", - "package": "dnspython3", - "version": "1.15.0" - }, - { - "vcs": null, - "type": "python_module", - "package": "setuptools", - "version": "41.6.0" - }, - { - "vcs": null, - "type": "python_module", - "package": "wheel", - "version": "0.33.6" - }, - { - "vcs": "git", - "url": "git+https://opendev.org/openstack/charms.openstack.git", - "package": "charms.openstack", - "branch": "stable/21.04", - "version": "bcd0c9b4b4a19d4a4125e0a6a3f808a843a74fa1", - "type": "python_module" - }, - { - "vcs": null, - "type": "python_module", - "package": "six", - "version": "1.15.0" - }, - { - "vcs": null, - "type": "python_module", - "package": "MarkupSafe", - "version": "1.1.1" - }, - { - "vcs": null, - "type": "python_module", - "package": "Tempita", - "version": "0.5.2" - }, - { - "vcs": null, - "type": "python_module", - "package": "pbr", - "version": "5.5.1" - }, - { - "vcs": null, - "type": "python_module", - "package": "setuptools_scm", - "version": "1.17.0" - }, - { - "vcs": null, - "type": "python_module", - "package": "dnspython", - "version": "1.15.0" - }, - { - "vcs": null, - "type": "python_module", - "package": "pip", - "version": "18.1" - }, - { - "vcs": null, - "type": "python_module", - "package": "Jinja2", - "version": "2.10.1" - }, - { - "vcs": "git", - "url": "git+https://github.com/juju/charm-helpers.git", - "package": "charmhelpers", - "branch": "stable/21.04", - "version": "8c48d2914b0e7396a2392c3933e2d7f321643ae6", - "type": "python_module" - }, - { - "vcs": "git", - "url": "git+https://opendev.org/openstack/charms.ceph.git", - "package": "charms.ceph", - "branch": "stable/21.04", - "version": "9bfe43ee654d016d7f09ede406c45674821f2866", - "type": "python_module" - }, - { - "vcs": null, - "type": "python_module", - "package": "ceph_api", - "version": "0.4.0" - } - ] -} \ No newline at end of file diff --git a/ceph-fs/src/test-requirements.txt b/ceph-fs/src/test-requirements.txt index eb4844b8..520681e1 100644 --- a/ceph-fs/src/test-requirements.txt +++ b/ceph-fs/src/test-requirements.txt @@ -11,5 +11,5 @@ charm-tools>=2.4.4 keyring<21 # Functional Test Requirements (let Zaza's dependencies solve all dependencies here!) -git+https://github.com/openstack-charmers/zaza.git@stable/21.04#egg=zaza -git+https://github.com/openstack-charmers/zaza-openstack-tests.git@stable/21.04#egg=zaza.openstack +git+https://github.com/openstack-charmers/zaza.git#egg=zaza +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack diff --git a/ceph-fs/src/wheelhouse.txt b/ceph-fs/src/wheelhouse.txt index 433a8469..303c3962 100644 --- a/ceph-fs/src/wheelhouse.txt +++ b/ceph-fs/src/wheelhouse.txt @@ -4,8 +4,8 @@ ceph_api pyxattr psutil -git+https://opendev.org/openstack/charms.openstack.git@stable/21.04#egg=charms.openstack +git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack -git+https://opendev.org/openstack/charms.ceph.git@stable/21.04#egg=charms.ceph +git+https://github.com/openstack/charms.ceph.git#egg=charms.ceph -git+https://github.com/juju/charm-helpers.git@stable/21.04#egg=charmhelpers +git+https://github.com/juju/charm-helpers.git#egg=charmhelpers diff --git a/ceph-fs/test-requirements.txt b/ceph-fs/test-requirements.txt index 16bbc27d..3f085244 100644 --- a/ceph-fs/test-requirements.txt +++ b/ceph-fs/test-requirements.txt @@ -35,7 +35,7 @@ mock>=1.2; python_version >= '3.6' nose>=1.3.7 coverage>=3.6 -git+https://github.com/openstack/charms.openstack.git@stable/21.04#egg=charms.openstack +git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack # # Revisit for removal / mock improvement: netifaces # vault From e79b7a34064d2064fd7cb7d21a5ff648df2f7cc0 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 3 May 2021 15:33:46 +0100 Subject: [PATCH 2206/2699] Updates to flip all libraries back to master This patchset updates all the requirements for charms.openstack, charm-helpers, charms.ceph, zaza and zaza-openstack-tests back to master branch. Change-Id: Idf99f5b51c66285d384e25797db029513367736c --- ceph-mon/charm-helpers-hooks.yaml | 2 +- ceph-mon/test-requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml index d0632d3f..df1e68a5 100644 --- a/ceph-mon/charm-helpers-hooks.yaml +++ b/ceph-mon/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -repo: https://github.com/juju/charm-helpers@stable/21.04 +repo: https://github.com/juju/charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 394e4d37..9aea716b 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -42,8 +42,8 @@ oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) -git+https://github.com/openstack-charmers/zaza.git@stable/21.04#egg=zaza;python_version>='3.0' -git+https://github.com/openstack-charmers/zaza-openstack-tests.git@stable/21.04#egg=zaza.openstack +git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' From 0c2bdec737558b119b04f385415e6a4a9b2e188a Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 3 May 2021 15:33:46 +0100 Subject: [PATCH 2207/2699] Updates to flip all libraries back to master This patchset updates all the requirements for charms.openstack, charm-helpers, charms.ceph, zaza and zaza-openstack-tests back to master branch. Change-Id: I77b76a839d4bfcf5fe84fa3d995911586bf23476 --- ceph-osd/charm-helpers-hooks.yaml | 2 +- ceph-osd/test-requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index bad497a2..ca383631 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -repo: https://github.com/juju/charm-helpers@stable/21.04 +repo: https://github.com/juju/charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 394e4d37..9aea716b 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -42,8 +42,8 @@ oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) -git+https://github.com/openstack-charmers/zaza.git@stable/21.04#egg=zaza;python_version>='3.0' -git+https://github.com/openstack-charmers/zaza-openstack-tests.git@stable/21.04#egg=zaza.openstack +git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' From 652d3c731d53a4f7ee8a75724d8f6e806d94c374 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 3 May 2021 15:33:46 +0100 Subject: [PATCH 2208/2699] Updates to flip all libraries back to master This patchset updates all the requirements for charms.openstack, charm-helpers, charms.ceph, zaza and zaza-openstack-tests back to master branch. Change-Id: I191a4ac0c66f2e5d36cc941d5eb2809408a89320 --- ceph-proxy/charm-helpers-hooks.yaml | 2 +- ceph-proxy/test-requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-proxy/charm-helpers-hooks.yaml b/ceph-proxy/charm-helpers-hooks.yaml index 205ad8a6..b7a1428b 100644 --- a/ceph-proxy/charm-helpers-hooks.yaml +++ b/ceph-proxy/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -repo: https://github.com/juju/charm-helpers@stable/21.04 +repo: https://github.com/juju/charm-helpers destination: charmhelpers include: - core diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 394e4d37..9aea716b 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -42,8 +42,8 @@ oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) -git+https://github.com/openstack-charmers/zaza.git@stable/21.04#egg=zaza;python_version>='3.0' -git+https://github.com/openstack-charmers/zaza-openstack-tests.git@stable/21.04#egg=zaza.openstack +git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' From 64ff35beae4a355b89cfe992edb0c521c79f35be Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 3 May 2021 15:33:46 +0100 Subject: [PATCH 2209/2699] Updates to flip all libraries back to master This patchset updates all the requirements for charms.openstack, charm-helpers, charms.ceph, zaza and zaza-openstack-tests back to master branch. Change-Id: I4346d715d47b77e862ecf75d6ab6f00e1dcab752 --- ceph-radosgw/charm-helpers-hooks.yaml | 2 +- ceph-radosgw/test-requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/charm-helpers-hooks.yaml b/ceph-radosgw/charm-helpers-hooks.yaml index 9d1a4980..fa9cd645 100644 --- a/ceph-radosgw/charm-helpers-hooks.yaml +++ b/ceph-radosgw/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -repo: https://github.com/juju/charm-helpers@stable/21.04 +repo: https://github.com/juju/charm-helpers destination: hooks/charmhelpers include: - core diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 394e4d37..9aea716b 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -42,8 +42,8 @@ oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) -git+https://github.com/openstack-charmers/zaza.git@stable/21.04#egg=zaza;python_version>='3.0' -git+https://github.com/openstack-charmers/zaza-openstack-tests.git@stable/21.04#egg=zaza.openstack +git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' From d6bbfbee15b1d618e3c3f90032ffd5bede6431c7 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 3 May 2021 15:33:46 +0100 Subject: [PATCH 2210/2699] Updates to flip all libraries back to master This patchset updates all the requirements for charms.openstack, charm-helpers, charms.ceph, zaza and zaza-openstack-tests back to master branch. Change-Id: If26550f2a6aca4f2e7b8a532e924f640d967950f --- ceph-rbd-mirror/src/build.lock | 196 ---------------------- ceph-rbd-mirror/src/test-requirements.txt | 4 +- ceph-rbd-mirror/src/wheelhouse.txt | 4 +- ceph-rbd-mirror/test-requirements.txt | 2 +- 4 files changed, 5 insertions(+), 201 deletions(-) delete mode 100644 ceph-rbd-mirror/src/build.lock diff --git a/ceph-rbd-mirror/src/build.lock b/ceph-rbd-mirror/src/build.lock deleted file mode 100644 index 6f2031db..00000000 --- a/ceph-rbd-mirror/src/build.lock +++ /dev/null @@ -1,196 +0,0 @@ -{ - "locks": [ - { - "type": "layer", - "item": "layer:leadership", - "url": "https://git.launchpad.net/layer-leadership", - "vcs": null, - "branch": "refs/heads/master", - "commit": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f" - }, - { - "type": "layer", - "item": "layer:options", - "url": "https://github.com/juju-solutions/layer-options.git", - "vcs": null, - "branch": "refs/heads/master", - "commit": "fcdcea4e5de3e1556c24e6704607862d0ba00a56" - }, - { - "type": "layer", - "item": "layer:basic", - "url": "https://github.com/juju-solutions/layer-basic.git", - "vcs": null, - "branch": "refs/heads/master", - "commit": "623e69c7b432456fd4364f6e1835424fd6b5425e" - }, - { - "type": "layer", - "item": "layer:openstack", - "url": "https://github.com/openstack/charm-layer-openstack", - "vcs": null, - "branch": "refs/heads/master", - "commit": "ba152d41b4a1109073d335415f43c4248109e7c7" - }, - { - "type": "layer", - "item": "layer:ceph", - "url": "https://github.com/openstack/charm-layer-ceph.git", - "vcs": null, - "branch": "refs/heads/master", - "commit": "17d40abd8d9ec3b8c32756ca981c80c4733c016f" - }, - { - "type": "layer", - "item": "ceph-rbd-mirror", - "url": null, - "vcs": null, - "branch": "refs/heads/master", - "commit": "7656e0878da9461cc4af2e7bc8faaa8b7842de03" - }, - { - "type": "layer", - "item": "interface:tls-certificates", - "url": "https://github.com/juju-solutions/interface-tls-certificates", - "vcs": null, - "branch": "refs/heads/master", - "commit": "d9850016d930a6d507b9fd45e2598d327922b140" - }, - { - "type": "layer", - "item": "interface:ceph-rbd-mirror", - "url": "https://github.com/openstack/charm-interface-ceph-rbd-mirror.git", - "vcs": null, - "branch": "refs/heads/master", - "commit": "1bb6e20b349e4573ba47863003382fa5375ce6e9" - }, - { - "type": "layer", - "item": "interface:nrpe-external-master", - "url": "https://github.com/cmars/nrpe-external-master-interface", - "vcs": null, - "branch": "refs/heads/master", - "commit": "2e0e1fdea6d83b55078200aacb537d60013ec5bc" - }, - { - "type": "python_module", - "package": "charms.openstack", - "url": "git+https://opendev.org/openstack/charms.openstack.git", - "branch": "refs/heads/stable/21.04", - "version": "bcd0c9b4b4a19d4a4125e0a6a3f808a843a74fa1", - "vcs": "git" - }, - { - "type": "python_module", - "package": "netaddr", - "vcs": null, - "version": "0.7.19" - }, - { - "type": "python_module", - "package": "pbr", - "vcs": null, - "version": "5.5.1" - }, - { - "type": "python_module", - "package": "setuptools", - "vcs": null, - "version": "41.6.0" - }, - { - "type": "python_module", - "package": "Jinja2", - "vcs": null, - "version": "2.10.1" - }, - { - "type": "python_module", - "package": "pip", - "vcs": null, - "version": "18.1" - }, - { - "type": "python_module", - "package": "pyaml", - "vcs": null, - "version": "20.4.0" - }, - { - "type": "python_module", - "package": "dnspython", - "vcs": null, - "version": "1.16.0" - }, - { - "type": "python_module", - "package": "MarkupSafe", - "vcs": null, - "version": "1.1.1" - }, - { - "type": "python_module", - "package": "charmhelpers", - "url": "git+https://github.com/juju/charm-helpers.git", - "branch": "refs/heads/stable/21.04", - "version": "8c48d2914b0e7396a2392c3933e2d7f321643ae6", - "vcs": "git" - }, - { - "type": "python_module", - "package": "wheel", - "vcs": null, - "version": "0.33.6" - }, - { - "type": "python_module", - "package": "charms.ceph", - "url": "git+https://github.com/openstack/charms.ceph.git", - "branch": "refs/heads/master", - "version": "9bfe43ee654d016d7f09ede406c45674821f2866", - "vcs": "git" - }, - { - "type": "python_module", - "package": "charms.reactive", - "vcs": null, - "version": "1.4.1" - }, - { - "type": "python_module", - "package": "Tempita", - "vcs": null, - "version": "0.5.2" - }, - { - "type": "python_module", - "package": "PyYAML", - "vcs": null, - "version": "5.2" - }, - { - "type": "python_module", - "package": "setuptools_scm", - "vcs": null, - "version": "1.17.0" - }, - { - "type": "python_module", - "package": "psutil", - "vcs": null, - "version": "5.8.0" - }, - { - "type": "python_module", - "package": "netifaces", - "vcs": null, - "version": "0.10.9" - }, - { - "type": "python_module", - "package": "six", - "vcs": null, - "version": "1.15.0" - } - ] -} \ No newline at end of file diff --git a/ceph-rbd-mirror/src/test-requirements.txt b/ceph-rbd-mirror/src/test-requirements.txt index eb4844b8..520681e1 100644 --- a/ceph-rbd-mirror/src/test-requirements.txt +++ b/ceph-rbd-mirror/src/test-requirements.txt @@ -11,5 +11,5 @@ charm-tools>=2.4.4 keyring<21 # Functional Test Requirements (let Zaza's dependencies solve all dependencies here!) -git+https://github.com/openstack-charmers/zaza.git@stable/21.04#egg=zaza -git+https://github.com/openstack-charmers/zaza-openstack-tests.git@stable/21.04#egg=zaza.openstack +git+https://github.com/openstack-charmers/zaza.git#egg=zaza +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack diff --git a/ceph-rbd-mirror/src/wheelhouse.txt b/ceph-rbd-mirror/src/wheelhouse.txt index aa678af8..10f9a4e5 100644 --- a/ceph-rbd-mirror/src/wheelhouse.txt +++ b/ceph-rbd-mirror/src/wheelhouse.txt @@ -1,4 +1,4 @@ -git+https://github.com/juju/charm-helpers.git@stable/21.04#egg=charmhelpers +git+https://github.com/juju/charm-helpers.git#egg=charmhelpers psutil -git+https://opendev.org/openstack/charms.openstack.git@stable/21.04#egg=charms.openstack +git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack diff --git a/ceph-rbd-mirror/test-requirements.txt b/ceph-rbd-mirror/test-requirements.txt index 16bbc27d..3f085244 100644 --- a/ceph-rbd-mirror/test-requirements.txt +++ b/ceph-rbd-mirror/test-requirements.txt @@ -35,7 +35,7 @@ mock>=1.2; python_version >= '3.6' nose>=1.3.7 coverage>=3.6 -git+https://github.com/openstack/charms.openstack.git@stable/21.04#egg=charms.openstack +git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack # # Revisit for removal / mock improvement: netifaces # vault From 13803c6ba0ed1088864a30795ecfb040beeb05dc Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 3 Mar 2021 19:54:10 +0000 Subject: [PATCH 2211/2699] Test bundles for focal-wallaby and hirsute-wallaby These are the test bundles (and any associated changes) for focal-wallaby and hirsute-wallaby support. hisute-wallaby test is disabled (moved to dev) due to [1]. [1] https://github.com/juju-solutions/layer-basic/issues/194 Change-Id: I108ce673e31500d1e3c2b2d9f0530bae6ece486a --- ceph-fs/osci.yaml | 18 +- ceph-fs/src/tests/bundles/focal-wallaby.yaml | 222 ++++++++++++++++++ .../src/tests/bundles/hirsute-wallaby.yaml | 222 ++++++++++++++++++ ceph-fs/src/tests/tests.yaml | 6 +- 4 files changed, 464 insertions(+), 4 deletions(-) create mode 100644 ceph-fs/src/tests/bundles/focal-wallaby.yaml create mode 100644 ceph-fs/src/tests/bundles/hirsute-wallaby.yaml diff --git a/ceph-fs/osci.yaml b/ceph-fs/osci.yaml index 4761d776..534ed802 100644 --- a/ceph-fs/osci.yaml +++ b/ceph-fs/osci.yaml @@ -8,16 +8,28 @@ - tox-py37 - tox-py38 - job: - name: groovy-victoria-bluestore + name: hirsute-wallaby-bluestore parent: func-target dependencies: &smoke-jobs - bionic-queens_local + vars: + tox_extra_args: bluestore-compression:hirsute-wallaby +- job: + name: groovy-victoria-bluestore + parent: func-target + dependencies: *smoke-jobs vars: tox_extra_args: bluestore-compression:groovy-victoria - job: name: xenial-mitaka_local parent: xenial-mitaka dependencies: *smoke-jobs +- job: + name: focal-wallaby-bluestore + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: bluestore-compression:focal-wallaby - job: name: focal-victoria-bluestore parent: func-target @@ -61,7 +73,9 @@ # Xenial-pike is missing because of # https://bugs.launchpad.net/charm-nova-compute/+bug/1862624 - xenial-mitaka_local + - hirsute-wallaby-bluestore - groovy-victoria-bluestore + - focal-wallaby-bluestore - focal-victoria-bluestore - focal-ussuri-bluestore - bionic-ussuri-bluestore @@ -69,4 +83,4 @@ - bionic-stein-bluestore vars: needs_charm_build: true - charm_build_name: ceph-fs \ No newline at end of file + charm_build_name: ceph-fs diff --git a/ceph-fs/src/tests/bundles/focal-wallaby.yaml b/ceph-fs/src/tests/bundles/focal-wallaby.yaml new file mode 100644 index 00000000..4134e10e --- /dev/null +++ b/ceph-fs/src/tests/bundles/focal-wallaby.yaml @@ -0,0 +1,222 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-wallaby + +series: &series focal + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + '3': + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + neutron-api-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-fs: + charm: ceph-fs + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + network-manager: Neutron + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: *openstack-origin + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + manage-neutron-plugin-legacy-mode: true + neutron-plugin: ovs + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: *openstack-origin + + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex + openstack-origin: *openstack-origin + +relations: + + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'neutron-api:shared-db' + - 'neutron-api-mysql-router:shared-db' + - - 'neutron-api-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' + + - - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' + + - - 'neutron-api:identity-service' + - 'keystone:identity-service' + + - - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' + + - - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/hirsute-wallaby.yaml b/ceph-fs/src/tests/bundles/hirsute-wallaby.yaml new file mode 100644 index 00000000..400be963 --- /dev/null +++ b/ceph-fs/src/tests/bundles/hirsute-wallaby.yaml @@ -0,0 +1,222 @@ +variables: + openstack-origin: &openstack-origin distro + +series: &series hirsute + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + '3': + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + neutron-api-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-fs: + charm: ceph-fs + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + network-manager: Neutron + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: *openstack-origin + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + manage-neutron-plugin-legacy-mode: true + neutron-plugin: ovs + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: *openstack-origin + + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex + openstack-origin: *openstack-origin + +relations: + + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'neutron-api:shared-db' + - 'neutron-api-mysql-router:shared-db' + - - 'neutron-api-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' + + - - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' + + - - 'neutron-api:identity-service' + - 'keystone:identity-service' + + - - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' + + - - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index 1a1da437..809f9126 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -1,6 +1,7 @@ charm_name: ceph-fs gate_bundles: - bluestore-compression: groovy-victoria + - bluestore-compression: focal-wallaby - bluestore-compression: focal-victoria - bluestore-compression: focal-ussuri - bluestore-compression: bionic-ussuri @@ -10,12 +11,13 @@ gate_bundles: - bionic-queens - xenial-mitaka dev_bundles: + - bluestore-compression: hirsute-wallaby - xenial-ocata # Xenial-pike is missing because of # https://bugs.launchpad.net/charm-nova-compute/+bug/1862624 - xenial-queens smoke_bundles: - - bluestore-compression: bionic-stein + - bluestore-compression: focal-ussuri configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network @@ -37,4 +39,4 @@ tests: - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation tests_options: force_deploy: - - groovy-victoria + - hirsute-wallaby From 0615cf202c43c310cfe2414480ba4adfdc11d3e2 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 3 Mar 2021 20:33:52 +0000 Subject: [PATCH 2212/2699] Test bundles for focal-wallaby and hirsute-wallaby These are the test bundles (and any associated changes) for focal-wallaby and hirsute-wallaby support. Sync libraries hisute-wallaby test is disabled (moved to dev) due to [1] as bundle may reference a reactive charm. [1] https://github.com/juju-solutions/layer-basic/issues/194 Change-Id: I99ee073206c7fad4897bf7b7bb767cf143b40d70 --- ceph-radosgw/osci.yaml | 32 ++++- .../bundles/focal-wallaby-namespaced.yaml | 117 ++++++++++++++++++ ceph-radosgw/tests/bundles/focal-wallaby.yaml | 116 +++++++++++++++++ .../bundles/hirsute-wallaby-namespaced.yaml | 117 ++++++++++++++++++ .../tests/bundles/hirsute-wallaby.yaml | 116 +++++++++++++++++ ceph-radosgw/tests/tests.yaml | 17 ++- 6 files changed, 510 insertions(+), 5 deletions(-) create mode 100644 ceph-radosgw/tests/bundles/focal-wallaby-namespaced.yaml create mode 100644 ceph-radosgw/tests/bundles/focal-wallaby.yaml create mode 100644 ceph-radosgw/tests/bundles/hirsute-wallaby-namespaced.yaml create mode 100644 ceph-radosgw/tests/bundles/hirsute-wallaby.yaml diff --git a/ceph-radosgw/osci.yaml b/ceph-radosgw/osci.yaml index 7f04a868..64b884b0 100644 --- a/ceph-radosgw/osci.yaml +++ b/ceph-radosgw/osci.yaml @@ -3,8 +3,12 @@ - charm-unit-jobs check: jobs: + - vault-hirsute-wallaby_rgw + - vault-hirsute-wallaby-namespaced - vault-groovy-victoria_rgw - vault-groovy-victoria-namespaced + - vault-focal-wallaby_rgw + - vault-focal-wallaby-namespaced - vault-focal-victoria_rgw - vault-focal-victoria-namespaced - vault-focal-ussuri-ec @@ -33,10 +37,22 @@ vars: tox_extra_args: vault:bionic-ussuri - job: - name: vault-groovy-victoria_rgw + name: vault-hirsute-wallaby_rgw parent: func-target dependencies: &smoke-jobs - vault-bionic-ussuri + vars: + tox_extra_args: vault:hirsute-wallaby +- job: + name: vault-hirsute-wallaby-namespaced + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:hirsute-wallaby-namespaced +- job: + name: vault-groovy-victoria_rgw + parent: func-target + dependencies: *smoke-jobs vars: tox_extra_args: vault:groovy-victoria - job: @@ -45,6 +61,18 @@ dependencies: *smoke-jobs vars: tox_extra_args: vault:groovy-victoria-namespaced +- job: + name: vault-focal-wallaby_rgw + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:focal-wallaby +- job: + name: vault-focal-wallaby-namespaced + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:focal-wallaby-namespaced - job: name: vault-focal-victoria_rgw parent: func-target @@ -126,4 +154,4 @@ parent: func-target dependencies: *smoke-jobs vars: - tox_extra_args: xenial-mitaka-namespaced \ No newline at end of file + tox_extra_args: xenial-mitaka-namespaced diff --git a/ceph-radosgw/tests/bundles/focal-wallaby-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-wallaby-namespaced.yaml new file mode 100644 index 00000000..cd11eda5 --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-wallaby-namespaced.yaml @@ -0,0 +1,117 @@ +options: + source: &source cloud:focal-wallaby + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + namespace-tenants: True + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/focal-wallaby.yaml b/ceph-radosgw/tests/bundles/focal-wallaby.yaml new file mode 100644 index 00000000..83f231e9 --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-wallaby.yaml @@ -0,0 +1,116 @@ +options: + source: &source cloud:focal-wallaby + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/hirsute-wallaby-namespaced.yaml b/ceph-radosgw/tests/bundles/hirsute-wallaby-namespaced.yaml new file mode 100644 index 00000000..daaac21d --- /dev/null +++ b/ceph-radosgw/tests/bundles/hirsute-wallaby-namespaced.yaml @@ -0,0 +1,117 @@ +options: + source: &source distro + +series: hirsute + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + namespace-tenants: True + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/hirsute-wallaby.yaml b/ceph-radosgw/tests/bundles/hirsute-wallaby.yaml new file mode 100644 index 00000000..5ec99eb6 --- /dev/null +++ b/ceph-radosgw/tests/bundles/hirsute-wallaby.yaml @@ -0,0 +1,116 @@ +options: + source: &source distro + +series: hirsute + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 7602108e..d4145091 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,7 +1,10 @@ charm_name: ceph-radosgw + gate_bundles: - vault: groovy-victoria - vault: groovy-victoria-namespaced + - vault: focal-wallaby + - vault: focal-wallaby-namespaced - vault: focal-victoria - vault: focal-victoria-namespaced - vault: focal-ussuri-ec @@ -17,8 +20,10 @@ gate_bundles: - vault: bionic-queens-namespaced - xenial-mitaka - xenial-mitaka-namespaced + smoke_bundles: - - vault: bionic-ussuri + - vault: focal-ussuri + dev_bundles: - trusty-mitaka - xenial-ocata @@ -28,13 +33,18 @@ dev_bundles: - bionic-rocky-multisite - vault: bionic-rocky - vault: bionic-rocky-namespaced + - vault: hirsute-wallaby + - vault: hirsute-wallaby-namespaced + target_deploy_status: vault: workload-status: blocked workload-status-message: Vault needs to be initialized + configure: - vault: - zaza.openstack.charm_tests.vault.setup.auto_initialize + tests: - zaza.openstack.charm_tests.ceph.tests.CephRGWTest - vault: @@ -42,7 +52,8 @@ tests: - zaza.openstack.charm_tests.swift.tests.S3APITest - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation + tests_options: force_deploy: - - groovy-victoria - - groovy-victoria-namespaced + - hirsute-wallaby + - hirsute-wallaby-namespaced From 9485e07086395fffb99ca61ea2ce43b031fad375 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 3 Mar 2021 20:38:55 +0000 Subject: [PATCH 2213/2699] Test bundles for focal-wallaby and hirsute-wallaby These are the test bundles (and any associated changes) for focal-wallaby and hirsute-wallaby support. Change-Id: I53d0b42b43731ac06d96932f3dcf72c116f360f3 --- .../focal-wallaby-image-mirroring.yaml | 171 ++++++++++++++++++ .../src/tests/bundles/focal-wallaby.yaml | 167 +++++++++++++++++ .../hirsute-wallaby-image-mirroring.yaml | 171 ++++++++++++++++++ .../src/tests/bundles/hirsute-wallaby.yaml | 166 +++++++++++++++++ ceph-rbd-mirror/src/tests/tests.yaml | 6 +- 5 files changed, 680 insertions(+), 1 deletion(-) create mode 100644 ceph-rbd-mirror/src/tests/bundles/focal-wallaby-image-mirroring.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/focal-wallaby.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby-image-mirroring.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby.yaml diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-wallaby-image-mirroring.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-wallaby-image-mirroring.yaml new file mode 100644 index 00000000..b28a8244 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/focal-wallaby-image-mirroring.yaml @@ -0,0 +1,171 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-wallaby + +series: &series focal + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + openstack-origin: *openstack-origin + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + options: + rbd-mirroring-mode: image + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror-b: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + +relations: + +- - keystone:shared-db + - keystone-mysql-router:shared-db +- - keystone-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - glance:shared-db + - glance-mysql-router:shared-db +- - glance-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - cinder:shared-db + - cinder-mysql-router:shared-db +- - cinder-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - rabbitmq-server + - cinder + +- - keystone + - cinder +- - keystone + - glance + +- - cinder + - cinder-ceph +- - cinder-ceph:ceph + - ceph-mon:client +- - cinder-ceph:ceph-replication-device + - ceph-mon-b:client + +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp + +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon + +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote + +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-wallaby.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-wallaby.yaml new file mode 100644 index 00000000..8e82a187 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/focal-wallaby.yaml @@ -0,0 +1,167 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-wallaby + +series: &series focal + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + openstack-origin: *openstack-origin + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror-b: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + +relations: + +- - keystone:shared-db + - keystone-mysql-router:shared-db +- - keystone-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - glance:shared-db + - glance-mysql-router:shared-db +- - glance-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - cinder:shared-db + - cinder-mysql-router:shared-db +- - cinder-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - rabbitmq-server + - cinder + +- - keystone + - cinder +- - keystone + - glance + +- - cinder + - cinder-ceph +- - cinder-ceph:ceph + - ceph-mon:client + +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp + +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon + +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote + +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby-image-mirroring.yaml b/ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby-image-mirroring.yaml new file mode 100644 index 00000000..61413d37 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby-image-mirroring.yaml @@ -0,0 +1,171 @@ +variables: + openstack-origin: &openstack-origin distro + +series: &series hirsute + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + openstack-origin: *openstack-origin + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + options: + rbd-mirroring-mode: image + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror-b: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + +relations: + +- - keystone:shared-db + - keystone-mysql-router:shared-db +- - keystone-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - glance:shared-db + - glance-mysql-router:shared-db +- - glance-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - cinder:shared-db + - cinder-mysql-router:shared-db +- - cinder-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - rabbitmq-server + - cinder + +- - keystone + - cinder +- - keystone + - glance + +- - cinder + - cinder-ceph +- - cinder-ceph:ceph + - ceph-mon:client +- - cinder-ceph:ceph-replication-device + - ceph-mon-b:client + +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp + +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon + +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote + +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby.yaml b/ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby.yaml new file mode 100644 index 00000000..e2e10c5f --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby.yaml @@ -0,0 +1,166 @@ +variables: + openstack-origin: &openstack-origin distro + +series: &series hirsute + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + num_units: 0 + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + + ceph-mon-b: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + + ceph-osd-b: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: /opt + + ceph-rbd-mirror-b: + series: *series + charm: ../../../ceph-rbd-mirror + num_units: 1 + options: + source: *openstack-origin + +relations: + +- - keystone:shared-db + - keystone-mysql-router:shared-db +- - keystone-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - glance:shared-db + - glance-mysql-router:shared-db +- - glance-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - cinder:shared-db + - cinder-mysql-router:shared-db +- - cinder-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - rabbitmq-server + - cinder + +- - keystone + - cinder +- - keystone + - glance + +- - cinder + - cinder-ceph +- - cinder-ceph:ceph + - ceph-mon:client + +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp + +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon + +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote + +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index abbdd2b2..bb6fa201 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -10,8 +10,12 @@ comment: | The e2e bundles are useful for development but adds no additional value to the functional tests. dev_bundles: +- hirsute-wallaby +- hirsute-wallaby-image-mirroring - groovy-victoria - groovy-victoria-image-mirroring +- focal-wallaby +- focal-wallaby-image-mirroring - focal-victoria - focal-victoria-image-mirroring - bionic-queens-e2e @@ -32,4 +36,4 @@ tests: - zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorDisasterFailoverTest tests_options: force_deploy: - - groovy-victoria + - hirsute-wallaby From 7470a0ae5b439e33d84a25c48fd55d03f73777be Mon Sep 17 00:00:00 2001 From: Cornellius Metto Date: Wed, 12 May 2021 10:44:31 +0300 Subject: [PATCH 2214/2699] Add configuration options for disk usage alerting thresholds The ceph cluster degrades to HEALTH_{WARN|CRIT} when the following default thresholds are breached: mon data avail warn = 30 mon data avail crit = 5 - These thresholds can be conservative. It might be desirable to change them. - A specific common scenario is when ceph-mon units are run in lxd containers which report the disk usage of the underlying host. The underlying host may have its own monitoring and its own thresholds which can lead to duplicate or conflicting alerts. Closes-Bug: #1890777 Change-Id: I13e35be71697b98b19260970bcf9812a43ef9369 --- ceph-mon/config.yaml | 14 ++++++++++++++ ceph-mon/hooks/ceph_hooks.py | 2 ++ ceph-mon/templates/ceph.conf | 2 ++ ceph-mon/unit_tests/test_ceph_hooks.py | 10 ++++++++++ 4 files changed, 28 insertions(+) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index b3488354..ba6f5a2e 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -103,6 +103,20 @@ options: A space-separated list of ceph mon hosts to use. This field is only used to migrate an existing cluster to a juju-managed solution and should otherwise be left unset. + monitor-data-available-warning: + type: int + default: 30 + description: | + Raise HEALTH_WARN status when the filesystem that houses a monitor's data + store reports that its available capacity is less than or equal to this + percentage. + monitor-data-available-critical: + type: int + default: 5 + description: | + Raise HEALTH_ERR status when the filesystem that houses a monitor's data + store reports that its available capacity is less than or equal to this + percentage. expected-osd-count: type: int default: 0 diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 381c887e..dd027627 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -192,6 +192,8 @@ def get_ceph_context(): 'ceph_cluster_network': cluster_network, 'loglevel': config('loglevel'), 'dio': str(config('use-direct-io')).lower(), + 'mon_data_avail_warn': int(config('monitor-data-available-warning')), + 'mon_data_avail_crit': int(config('monitor-data-available-critical')), } if config('prefer-ipv6'): diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 6f74d524..3ad2bc07 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -60,6 +60,8 @@ keyring = /var/lib/ceph/mon/$cluster-$id/keyring mon pg warn max object skew = 0 {% endif %} +mon data avail warn = {{ mon_data_avail_warn }} +mon data avail crit = {{ mon_data_avail_crit }} [mds] keyring = /var/lib/ceph/mds/$cluster-$id/keyring diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 99a712c3..ee4e8b4b 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -43,6 +43,8 @@ 'use-direct-io': True, 'osd-format': 'ext4', 'monitor-hosts': '', + 'monitor-data-available-warning': 30, + 'monitor-data-available-critical': 5, 'prefer-ipv6': False, 'default-rbd-features': None, 'nagios_degraded_thresh': '1', @@ -84,6 +86,8 @@ def test_get_ceph_context(self, mock_config, mock_config2, 'fsid': '1234', 'loglevel': 1, 'mon_hosts': '10.0.0.1 10.0.0.2', + 'mon_data_avail_warn': 30, + 'mon_data_avail_crit': 5, 'old_auth': False, 'public_addr': '10.0.0.1', 'use_syslog': 'true'} @@ -114,6 +118,8 @@ def test_get_ceph_context_rbd_features(self, mock_config, mock_config2, 'fsid': '1234', 'loglevel': 1, 'mon_hosts': '10.0.0.1 10.0.0.2', + 'mon_data_avail_warn': 30, + 'mon_data_avail_crit': 5, 'old_auth': False, 'public_addr': '10.0.0.1', 'use_syslog': 'true', @@ -145,6 +151,8 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2, 'fsid': '1234', 'loglevel': 1, 'mon_hosts': '10.0.0.1 10.0.0.2', + 'mon_data_avail_warn': 30, + 'mon_data_avail_crit': 5, 'old_auth': False, 'mon': {'mon sync max retries': 10}, 'public_addr': '10.0.0.1', @@ -178,6 +186,8 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'fsid': '1234', 'loglevel': 1, 'mon_hosts': '10.0.0.1 10.0.0.2', + 'mon_data_avail_warn': 30, + 'mon_data_avail_crit': 5, 'old_auth': False, 'mon': {'mon sync max retries': 10}, 'public_addr': '10.0.0.1', From d77901a9e7a4d9b2e8da5ddaabe80231afc96094 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 13 May 2021 08:45:35 -0400 Subject: [PATCH 2215/2699] c-h sync - restore proxy env vars for add-apt-repository Change-Id: Ic2b610be597fb6b84e8c8bb48409a4f080447474 --- ceph-osd/hooks/charmhelpers/cli/__init__.py | 11 +++++++++-- .../contrib/openstack/deferred_events.py | 12 +++++++++--- .../hooks/charmhelpers/contrib/openstack/utils.py | 7 ++++++- ceph-osd/hooks/charmhelpers/core/hookenv.py | 15 +++++++++++---- ceph-osd/hooks/charmhelpers/core/services/base.py | 9 +++++++-- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 12 +++--------- 6 files changed, 45 insertions(+), 21 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/cli/__init__.py b/ceph-osd/hooks/charmhelpers/cli/__init__.py index 389b490f..74ea7295 100644 --- a/ceph-osd/hooks/charmhelpers/cli/__init__.py +++ b/ceph-osd/hooks/charmhelpers/cli/__init__.py @@ -16,6 +16,7 @@ import argparse import sys +import six from six.moves import zip import charmhelpers.core.unitdata @@ -148,7 +149,10 @@ def wrapper(decorated): def run(self): "Run cli, processing arguments and executing subcommands." arguments = self.argument_parser.parse_args() - argspec = inspect.getargspec(arguments.func) + if six.PY2: + argspec = inspect.getargspec(arguments.func) + else: + argspec = inspect.getfullargspec(arguments.func) vargs = [] for arg in argspec.args: vargs.append(getattr(arguments, arg)) @@ -173,7 +177,10 @@ def describe_arguments(func): Analyze a function's signature and return a data structure suitable for passing in as arguments to an argparse parser's add_argument() method.""" - argspec = inspect.getargspec(func) + if six.PY2: + argspec = inspect.getargspec(func) + else: + argspec = inspect.getfullargspec(func) # we should probably raise an exception somewhere if func includes **kwargs if argspec.defaults: positional_args = argspec.args[:-len(argspec.defaults)] diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py index fd073a04..8765ee31 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py @@ -46,9 +46,13 @@ def __init__(self, timestamp, service, reason, action, self.service = service self.reason = reason self.action = action - if not policy_requestor_name: + if policy_requestor_name: + self.policy_requestor_name = policy_requestor_name + else: self.policy_requestor_name = hookenv.service_name() - if not policy_requestor_type: + if policy_requestor_type: + self.policy_requestor_type = policy_requestor_type + else: self.policy_requestor_type = 'charm' def __eq__(self, other): @@ -99,7 +103,9 @@ def read_event_file(file_name): contents['timestamp'], contents['service'], contents['reason'], - contents['action']) + contents['action'], + policy_requestor_name=contents.get('policy_requestor_name'), + policy_requestor_type=contents.get('policy_requestor_type')) return event diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 2ad8ab94..1656bd43 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -56,6 +56,7 @@ relation_id, relation_ids, relation_set, + service_name as ch_service_name, status_set, hook_name, application_version_set, @@ -1089,8 +1090,12 @@ def _determine_os_workload_status( try: if config(POLICYD_CONFIG_NAME): message = "{} {}".format(policyd_status_message_prefix(), message) + # Get deferred restarts events that have been triggered by a policy + # written by this charm. deferred_restarts = list(set( - [e.service for e in deferred_events.get_deferred_restarts()])) + [e.service + for e in deferred_events.get_deferred_restarts() + if e.policy_requestor_name == ch_service_name()])) if deferred_restarts: svc_msg = "Services queued for restart: {}".format( ', '.join(sorted(deferred_restarts))) diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 778aa4b6..47eebb51 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -468,15 +468,20 @@ def config(scope=None): @cached -def relation_get(attribute=None, unit=None, rid=None): +def relation_get(attribute=None, unit=None, rid=None, app=None): """Get relation information""" _args = ['relation-get', '--format=json'] + if app is not None: + if unit is not None: + raise ValueError("Cannot use both 'unit' and 'app'") + _args.append('--app') if rid: _args.append('-r') _args.append(rid) _args.append(attribute or '-') - if unit: - _args.append(unit) + # unit or application name + if unit or app: + _args.append(unit or app) try: return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: @@ -487,12 +492,14 @@ def relation_get(attribute=None, unit=None, rid=None): raise -def relation_set(relation_id=None, relation_settings=None, **kwargs): +def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] accepts_file = "--file" in subprocess.check_output( relation_cmd_line + ["--help"], universal_newlines=True) + if app: + relation_cmd_line.append('--app') if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) settings = relation_settings.copy() diff --git a/ceph-osd/hooks/charmhelpers/core/services/base.py b/ceph-osd/hooks/charmhelpers/core/services/base.py index 179ad4f0..9f880290 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/base.py +++ b/ceph-osd/hooks/charmhelpers/core/services/base.py @@ -14,9 +14,11 @@ import os import json -from inspect import getargspec +import inspect from collections import Iterable, OrderedDict +import six + from charmhelpers.core import host from charmhelpers.core import hookenv @@ -169,7 +171,10 @@ def provide_data(self): if not units: continue remote_service = units[0].split('/')[0] - argspec = getargspec(provider.provide_data) + if six.PY2: + argspec = inspect.getargspec(provider.provide_data) + else: + argspec = inspect.getfullargspec(provider.provide_data) if len(argspec.args) > 1: data = provider.provide_data(remote_service, service_ready) else: diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index b38edcc1..812a11a2 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -658,17 +658,11 @@ def _add_apt_repository(spec): :param spec: the parameter to pass to add_apt_repository :type spec: str """ - series = get_distrib_codename() if '{series}' in spec: + series = get_distrib_codename() spec = spec.replace('{series}', series) - # software-properties package for bionic properly reacts to proxy settings - # set via apt.conf (see lp:1433761), however this is not the case for LTS - # and non-LTS releases before bionic. - if series in ('trusty', 'xenial'): - _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https', 'http'])) - else: - _run_with_retries(['add-apt-repository', '--yes', spec]) + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https', 'http'])) def _add_cloud_pocket(pocket): From 0fc3ff4ad44de4a6c6a839a957ccea13796165d8 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 13 May 2021 08:45:41 -0400 Subject: [PATCH 2216/2699] c-h sync - restore proxy env vars for add-apt-repository Change-Id: I29bbef00f15c349e29d594d2d7d59086b860338a --- ceph-proxy/charmhelpers/cli/__init__.py | 11 +++++++++-- .../contrib/openstack/deferred_events.py | 12 +++++++++--- .../charmhelpers/contrib/openstack/utils.py | 7 ++++++- ceph-proxy/charmhelpers/core/hookenv.py | 15 +++++++++++---- ceph-proxy/charmhelpers/core/services/base.py | 9 +++++++-- ceph-proxy/charmhelpers/fetch/ubuntu.py | 12 +++--------- 6 files changed, 45 insertions(+), 21 deletions(-) diff --git a/ceph-proxy/charmhelpers/cli/__init__.py b/ceph-proxy/charmhelpers/cli/__init__.py index 389b490f..74ea7295 100644 --- a/ceph-proxy/charmhelpers/cli/__init__.py +++ b/ceph-proxy/charmhelpers/cli/__init__.py @@ -16,6 +16,7 @@ import argparse import sys +import six from six.moves import zip import charmhelpers.core.unitdata @@ -148,7 +149,10 @@ def wrapper(decorated): def run(self): "Run cli, processing arguments and executing subcommands." arguments = self.argument_parser.parse_args() - argspec = inspect.getargspec(arguments.func) + if six.PY2: + argspec = inspect.getargspec(arguments.func) + else: + argspec = inspect.getfullargspec(arguments.func) vargs = [] for arg in argspec.args: vargs.append(getattr(arguments, arg)) @@ -173,7 +177,10 @@ def describe_arguments(func): Analyze a function's signature and return a data structure suitable for passing in as arguments to an argparse parser's add_argument() method.""" - argspec = inspect.getargspec(func) + if six.PY2: + argspec = inspect.getargspec(func) + else: + argspec = inspect.getfullargspec(func) # we should probably raise an exception somewhere if func includes **kwargs if argspec.defaults: positional_args = argspec.args[:-len(argspec.defaults)] diff --git a/ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py b/ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py index fd073a04..8765ee31 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py @@ -46,9 +46,13 @@ def __init__(self, timestamp, service, reason, action, self.service = service self.reason = reason self.action = action - if not policy_requestor_name: + if policy_requestor_name: + self.policy_requestor_name = policy_requestor_name + else: self.policy_requestor_name = hookenv.service_name() - if not policy_requestor_type: + if policy_requestor_type: + self.policy_requestor_type = policy_requestor_type + else: self.policy_requestor_type = 'charm' def __eq__(self, other): @@ -99,7 +103,9 @@ def read_event_file(file_name): contents['timestamp'], contents['service'], contents['reason'], - contents['action']) + contents['action'], + policy_requestor_name=contents.get('policy_requestor_name'), + policy_requestor_type=contents.get('policy_requestor_type')) return event diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index 2ad8ab94..1656bd43 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -56,6 +56,7 @@ relation_id, relation_ids, relation_set, + service_name as ch_service_name, status_set, hook_name, application_version_set, @@ -1089,8 +1090,12 @@ def _determine_os_workload_status( try: if config(POLICYD_CONFIG_NAME): message = "{} {}".format(policyd_status_message_prefix(), message) + # Get deferred restarts events that have been triggered by a policy + # written by this charm. deferred_restarts = list(set( - [e.service for e in deferred_events.get_deferred_restarts()])) + [e.service + for e in deferred_events.get_deferred_restarts() + if e.policy_requestor_name == ch_service_name()])) if deferred_restarts: svc_msg = "Services queued for restart: {}".format( ', '.join(sorted(deferred_restarts))) diff --git a/ceph-proxy/charmhelpers/core/hookenv.py b/ceph-proxy/charmhelpers/core/hookenv.py index 778aa4b6..47eebb51 100644 --- a/ceph-proxy/charmhelpers/core/hookenv.py +++ b/ceph-proxy/charmhelpers/core/hookenv.py @@ -468,15 +468,20 @@ def config(scope=None): @cached -def relation_get(attribute=None, unit=None, rid=None): +def relation_get(attribute=None, unit=None, rid=None, app=None): """Get relation information""" _args = ['relation-get', '--format=json'] + if app is not None: + if unit is not None: + raise ValueError("Cannot use both 'unit' and 'app'") + _args.append('--app') if rid: _args.append('-r') _args.append(rid) _args.append(attribute or '-') - if unit: - _args.append(unit) + # unit or application name + if unit or app: + _args.append(unit or app) try: return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: @@ -487,12 +492,14 @@ def relation_get(attribute=None, unit=None, rid=None): raise -def relation_set(relation_id=None, relation_settings=None, **kwargs): +def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] accepts_file = "--file" in subprocess.check_output( relation_cmd_line + ["--help"], universal_newlines=True) + if app: + relation_cmd_line.append('--app') if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) settings = relation_settings.copy() diff --git a/ceph-proxy/charmhelpers/core/services/base.py b/ceph-proxy/charmhelpers/core/services/base.py index 179ad4f0..9f880290 100644 --- a/ceph-proxy/charmhelpers/core/services/base.py +++ b/ceph-proxy/charmhelpers/core/services/base.py @@ -14,9 +14,11 @@ import os import json -from inspect import getargspec +import inspect from collections import Iterable, OrderedDict +import six + from charmhelpers.core import host from charmhelpers.core import hookenv @@ -169,7 +171,10 @@ def provide_data(self): if not units: continue remote_service = units[0].split('/')[0] - argspec = getargspec(provider.provide_data) + if six.PY2: + argspec = inspect.getargspec(provider.provide_data) + else: + argspec = inspect.getfullargspec(provider.provide_data) if len(argspec.args) > 1: data = provider.provide_data(remote_service, service_ready) else: diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py index b38edcc1..812a11a2 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -658,17 +658,11 @@ def _add_apt_repository(spec): :param spec: the parameter to pass to add_apt_repository :type spec: str """ - series = get_distrib_codename() if '{series}' in spec: + series = get_distrib_codename() spec = spec.replace('{series}', series) - # software-properties package for bionic properly reacts to proxy settings - # set via apt.conf (see lp:1433761), however this is not the case for LTS - # and non-LTS releases before bionic. - if series in ('trusty', 'xenial'): - _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https', 'http'])) - else: - _run_with_retries(['add-apt-repository', '--yes', spec]) + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https', 'http'])) def _add_cloud_pocket(pocket): From 3b61ee03faeeda9772eb24bdff36f88ea7ea1c2e Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 13 May 2021 08:45:48 -0400 Subject: [PATCH 2217/2699] c-h sync - restore proxy env vars for add-apt-repository Change-Id: I083014f4886f2c7df643ca67a324bdc2f476cf81 --- ceph-radosgw/hooks/charmhelpers/cli/__init__.py | 11 +++++++++-- .../contrib/openstack/deferred_events.py | 12 +++++++++--- .../hooks/charmhelpers/contrib/openstack/utils.py | 7 ++++++- ceph-radosgw/hooks/charmhelpers/core/hookenv.py | 15 +++++++++++---- .../hooks/charmhelpers/core/services/base.py | 9 +++++++-- ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py | 12 +++--------- 6 files changed, 45 insertions(+), 21 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/cli/__init__.py b/ceph-radosgw/hooks/charmhelpers/cli/__init__.py index 389b490f..74ea7295 100644 --- a/ceph-radosgw/hooks/charmhelpers/cli/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/cli/__init__.py @@ -16,6 +16,7 @@ import argparse import sys +import six from six.moves import zip import charmhelpers.core.unitdata @@ -148,7 +149,10 @@ def wrapper(decorated): def run(self): "Run cli, processing arguments and executing subcommands." arguments = self.argument_parser.parse_args() - argspec = inspect.getargspec(arguments.func) + if six.PY2: + argspec = inspect.getargspec(arguments.func) + else: + argspec = inspect.getfullargspec(arguments.func) vargs = [] for arg in argspec.args: vargs.append(getattr(arguments, arg)) @@ -173,7 +177,10 @@ def describe_arguments(func): Analyze a function's signature and return a data structure suitable for passing in as arguments to an argparse parser's add_argument() method.""" - argspec = inspect.getargspec(func) + if six.PY2: + argspec = inspect.getargspec(func) + else: + argspec = inspect.getfullargspec(func) # we should probably raise an exception somewhere if func includes **kwargs if argspec.defaults: positional_args = argspec.args[:-len(argspec.defaults)] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py index fd073a04..8765ee31 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py @@ -46,9 +46,13 @@ def __init__(self, timestamp, service, reason, action, self.service = service self.reason = reason self.action = action - if not policy_requestor_name: + if policy_requestor_name: + self.policy_requestor_name = policy_requestor_name + else: self.policy_requestor_name = hookenv.service_name() - if not policy_requestor_type: + if policy_requestor_type: + self.policy_requestor_type = policy_requestor_type + else: self.policy_requestor_type = 'charm' def __eq__(self, other): @@ -99,7 +103,9 @@ def read_event_file(file_name): contents['timestamp'], contents['service'], contents['reason'], - contents['action']) + contents['action'], + policy_requestor_name=contents.get('policy_requestor_name'), + policy_requestor_type=contents.get('policy_requestor_type')) return event diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 2ad8ab94..1656bd43 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -56,6 +56,7 @@ relation_id, relation_ids, relation_set, + service_name as ch_service_name, status_set, hook_name, application_version_set, @@ -1089,8 +1090,12 @@ def _determine_os_workload_status( try: if config(POLICYD_CONFIG_NAME): message = "{} {}".format(policyd_status_message_prefix(), message) + # Get deferred restarts events that have been triggered by a policy + # written by this charm. deferred_restarts = list(set( - [e.service for e in deferred_events.get_deferred_restarts()])) + [e.service + for e in deferred_events.get_deferred_restarts() + if e.policy_requestor_name == ch_service_name()])) if deferred_restarts: svc_msg = "Services queued for restart: {}".format( ', '.join(sorted(deferred_restarts))) diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 778aa4b6..47eebb51 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -468,15 +468,20 @@ def config(scope=None): @cached -def relation_get(attribute=None, unit=None, rid=None): +def relation_get(attribute=None, unit=None, rid=None, app=None): """Get relation information""" _args = ['relation-get', '--format=json'] + if app is not None: + if unit is not None: + raise ValueError("Cannot use both 'unit' and 'app'") + _args.append('--app') if rid: _args.append('-r') _args.append(rid) _args.append(attribute or '-') - if unit: - _args.append(unit) + # unit or application name + if unit or app: + _args.append(unit or app) try: return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: @@ -487,12 +492,14 @@ def relation_get(attribute=None, unit=None, rid=None): raise -def relation_set(relation_id=None, relation_settings=None, **kwargs): +def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] accepts_file = "--file" in subprocess.check_output( relation_cmd_line + ["--help"], universal_newlines=True) + if app: + relation_cmd_line.append('--app') if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) settings = relation_settings.copy() diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/base.py b/ceph-radosgw/hooks/charmhelpers/core/services/base.py index 179ad4f0..9f880290 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/base.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/base.py @@ -14,9 +14,11 @@ import os import json -from inspect import getargspec +import inspect from collections import Iterable, OrderedDict +import six + from charmhelpers.core import host from charmhelpers.core import hookenv @@ -169,7 +171,10 @@ def provide_data(self): if not units: continue remote_service = units[0].split('/')[0] - argspec = getargspec(provider.provide_data) + if six.PY2: + argspec = inspect.getargspec(provider.provide_data) + else: + argspec = inspect.getfullargspec(provider.provide_data) if len(argspec.args) > 1: data = provider.provide_data(remote_service, service_ready) else: diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index b38edcc1..812a11a2 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -658,17 +658,11 @@ def _add_apt_repository(spec): :param spec: the parameter to pass to add_apt_repository :type spec: str """ - series = get_distrib_codename() if '{series}' in spec: + series = get_distrib_codename() spec = spec.replace('{series}', series) - # software-properties package for bionic properly reacts to proxy settings - # set via apt.conf (see lp:1433761), however this is not the case for LTS - # and non-LTS releases before bionic. - if series in ('trusty', 'xenial'): - _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https', 'http'])) - else: - _run_with_retries(['add-apt-repository', '--yes', spec]) + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https', 'http'])) def _add_cloud_pocket(pocket): From 5fc5d88022eace5ed8699ae7a1a17cb0d781f788 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 13 May 2021 09:08:07 -0400 Subject: [PATCH 2218/2699] rebuild - restore c-h proxy env vars for add-apt-repository Change-Id: I53017bdb31cc7cfbe9e11e0d856fbd065ba34563 --- ceph-fs/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index efddec9a..12717b9a 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -be82de82-64d9-11eb-94b9-ff8b2804e655 +f9c4c260-b3eb-11eb-b396-1786d65111bf From 4953d27f361fc97c574fd8b773dbef39207325bc Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 13 May 2021 09:08:14 -0400 Subject: [PATCH 2219/2699] rebuild - restore c-h proxy env vars for add-apt-repository Change-Id: I538683f77566b3719dfd0c6d7b4782bfa72be0d4 --- ceph-rbd-mirror/rebuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-rbd-mirror/rebuild b/ceph-rbd-mirror/rebuild index 06e8054a..cccb7641 100644 --- a/ceph-rbd-mirror/rebuild +++ b/ceph-rbd-mirror/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -82c053f8-5745-11eb-b798-b32b668d6814 +f9d3918c-b3eb-11eb-a947-c3dd9a34b317 From fa83114ff6314daa879c40441111dcd75429e716 Mon Sep 17 00:00:00 2001 From: Hemanth Nakkina Date: Fri, 21 May 2021 11:52:35 +0530 Subject: [PATCH 2220/2699] set rgw keystone revocation interval to 0 Ceph RGW checks revocation list for every 600 seconds. This is not required for non PKI tokens and PKI tokens are removed in OpenStack Pike release. This results in unnecessary logs in ceph and keystone. Set the rgw keystone revocation interval to 0 in ceph conf. Also this parameter is removed in upstream from Ceph Octopus. So ensure not to add this parameter from ceph release Octopus. Closes-Bug: #1758982 Change-Id: Iaeb10dc25bb52df9dd3746ecf4fe5859d4efd459 --- ceph-radosgw/hooks/ceph_radosgw_context.py | 2 ++ ceph-radosgw/templates/ceph.conf | 3 +++ .../unit_tests/test_ceph_radosgw_context.py | 22 ++++++++++++++----- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 8115e4e1..ea608fd2 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -92,6 +92,8 @@ def __call__(self): ctxt.pop('admin_domain_id') ctxt['auth_type'] = 'keystone' + if cmp_pkgrevno('radosgw', '15.0.0') < 0: + ctxt['keystone_revocation_parameter_supported'] = True if cmp_pkgrevno('radosgw', "11.0.0") >= 0: ctxt['user_roles'] = config('operator-roles') ctxt['admin_roles'] = config('admin-roles') diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 924927f4..74ca348b 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -54,6 +54,9 @@ rgw keystone admin tenant = {{ admin_tenant_name }} rgw keystone accepted roles = {{ user_roles }} rgw keystone accepted admin roles = {{ admin_roles }} rgw keystone token cache size = {{ cache_size }} +{% if keystone_revocation_parameter_supported -%} +rgw keystone revocation interval = 0 +{% endif -%} rgw s3 auth use keystone = true rgw s3 auth order = local, external {% if namespace_tenants %} diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 5117bcd7..c7c5629c 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -89,7 +89,7 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, jewel_installed=False, cmp_pkgrevno_side_effects=None): self.cmp_pkgrevno.side_effect = (cmp_pkgrevno_side_effects if cmp_pkgrevno_side_effects - else [-1, -1]) + else [-1, 1, -1]) self.test_config.set('operator-roles', 'Babel') self.test_config.set('admin-roles', 'Dart') self.test_config.set('cache-size', '42') @@ -129,11 +129,13 @@ def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, 'service_port': 9876, 'service_protocol': 'http', } - if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[1] >= 0: + if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[2] >= 0: expect['user_roles'] = 'Babel' expect['admin_roles'] = 'Dart' else: expect['user_roles'] = 'Babel,Dart' + if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[1] < 0: + expect['keystone_revocation_parameter_supported'] = True if jewel_installed: expect['auth_keystone_v3_supported'] = True self.assertEqual(expect, ids_ctxt()) @@ -153,7 +155,7 @@ def test_ids_ctxt_with_namespace(self, _log, _rids, _runits, _rget, cmp_pkgrevno_side_effects=None): self.cmp_pkgrevno.side_effect = (cmp_pkgrevno_side_effects if cmp_pkgrevno_side_effects - else [-1, -1]) + else [-1, 1, -1]) self.test_config.set('operator-roles', 'Babel') self.test_config.set('admin-roles', 'Dart') self.test_config.set('cache-size', '42') @@ -194,11 +196,13 @@ def test_ids_ctxt_with_namespace(self, _log, _rids, _runits, _rget, 'service_port': 9876, 'service_protocol': 'http', } - if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[1] >= 0: + if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[2] >= 0: expect['user_roles'] = 'Babel' expect['admin_roles'] = 'Dart' else: expect['user_roles'] = 'Babel,Dart' + if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[1] < 0: + expect['keystone_revocation_parameter_supported'] = True if jewel_installed: expect['auth_keystone_v3_supported'] = True self.assertEqual(expect, ids_ctxt()) @@ -250,6 +254,7 @@ def test_ids_ctxt_missing_admin_domain_id( 'auth_type': 'keystone', 'namespace_tenants': False, 'cache_size': '42', + 'keystone_revocation_parameter_supported': True, 'service_host': '127.0.0.4', 'service_port': 9876, 'service_protocol': 'http', @@ -311,6 +316,7 @@ def test_ids_ctxt_v3( 'auth_type': 'keystone', 'namespace_tenants': False, 'cache_size': '42', + 'keystone_revocation_parameter_supported': True, 'service_domain_id': '8e50f28a556911e8aaeed33789425d23', 'service_host': '127.0.0.4', 'service_port': 9876, @@ -324,11 +330,15 @@ def test_ids_ctxt_v3( def test_ids_ctxt_jewel(self): self.test_ids_ctxt(jewel_installed=True, - cmp_pkgrevno_side_effects=[0, -1]) + cmp_pkgrevno_side_effects=[0, 1, -1]) def test_ids_ctxt_luminous(self): self.test_ids_ctxt(jewel_installed=True, - cmp_pkgrevno_side_effects=[1, 0]) + cmp_pkgrevno_side_effects=[1, 1, 0]) + + def test_ids_ctxt_octopus(self): + self.test_ids_ctxt(jewel_installed=True, + cmp_pkgrevno_side_effects=[1, -1, 0]) @patch.object(charmhelpers.contrib.openstack.context, 'filter_installed_packages', return_value=['absent-pkg']) From de8410e7f28d7bce55adaeb73b477c68142acada Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 24 May 2021 13:17:49 +0100 Subject: [PATCH 2221/2699] Filter luks devices from pristine checks update-status needs to account for devices that are used as bluestore devices where the device is mapped to a luks dev. Change-Id: I92e67a5d62eb50dbf5da430d03a6b7ff5485ce46 Closes-Bug: #1929417 --- ceph-osd/hooks/ceph_hooks.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 5f4e3ee0..d8f38332 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -532,6 +532,9 @@ def prepare_disks_and_activate(): # filter osd-devices that are active bluestore devices devices = [dev for dev in devices if not ceph.is_active_bluestore_device(dev)] + # filter osd-devices that are used as dmcrypt devices + devices = [dev for dev in devices + if not ceph.is_mapped_luks_device(dev)] log('Checking for pristine devices: "{}"'.format(devices), level=DEBUG) if not all(ceph.is_pristine_disk(dev) for dev in devices): @@ -852,7 +855,8 @@ def assess_status(): osd_journals = get_journal_devices() for dev in list(set(ceph.unmounted_disks()) - set(osd_journals)): if (not ceph.is_active_bluestore_device(dev) and - not ceph.is_pristine_disk(dev)): + not ceph.is_pristine_disk(dev) and + not ceph.is_mapped_luks_device(dev)): pristine = False break if pristine: From 86564c4fc64cd42900bd5723264dd6600f0b9a34 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 3 Jun 2021 11:07:00 +0200 Subject: [PATCH 2222/2699] Add impish to metadata.yaml Change-Id: I5d36922ca668e9a160b3bdbbb213cf9a3a87f2c3 --- ceph-iscsi/metadata.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index e42d44b7..e75a35d6 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -12,6 +12,8 @@ tags: series: - focal - groovy + - hirsute + - impish subordinate: false min-juju-version: 2.7.6 extra-bindings: From 086cb708efb4bd33c477a58ef4593a74ffe8d957 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 3 Jun 2021 11:08:15 +0200 Subject: [PATCH 2223/2699] Add impish to metadata.yaml Change-Id: I48019237ab6419a9a5782c382bf8c3f64c59c067 --- ceph-osd/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 8fa6e056..530b7672 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -16,6 +16,7 @@ series: - focal - groovy - hirsute +- impish description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. From 3406d8ceda483d5df643c60f430294f180b3c047 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 13 May 2021 08:45:28 -0400 Subject: [PATCH 2224/2699] c-h sync - restore proxy env vars for add-apt-repository Change-Id: Id4886deff01dbf04861d8815d6816f13b0c6b735 --- ceph-mon/hooks/charmhelpers/cli/__init__.py | 11 +++++++++-- .../contrib/openstack/deferred_events.py | 12 +++++++++--- .../hooks/charmhelpers/contrib/openstack/utils.py | 7 ++++++- ceph-mon/hooks/charmhelpers/core/hookenv.py | 15 +++++++++++---- ceph-mon/hooks/charmhelpers/core/services/base.py | 9 +++++++-- ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 12 +++--------- 6 files changed, 45 insertions(+), 21 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/cli/__init__.py b/ceph-mon/hooks/charmhelpers/cli/__init__.py index 389b490f..74ea7295 100644 --- a/ceph-mon/hooks/charmhelpers/cli/__init__.py +++ b/ceph-mon/hooks/charmhelpers/cli/__init__.py @@ -16,6 +16,7 @@ import argparse import sys +import six from six.moves import zip import charmhelpers.core.unitdata @@ -148,7 +149,10 @@ def wrapper(decorated): def run(self): "Run cli, processing arguments and executing subcommands." arguments = self.argument_parser.parse_args() - argspec = inspect.getargspec(arguments.func) + if six.PY2: + argspec = inspect.getargspec(arguments.func) + else: + argspec = inspect.getfullargspec(arguments.func) vargs = [] for arg in argspec.args: vargs.append(getattr(arguments, arg)) @@ -173,7 +177,10 @@ def describe_arguments(func): Analyze a function's signature and return a data structure suitable for passing in as arguments to an argparse parser's add_argument() method.""" - argspec = inspect.getargspec(func) + if six.PY2: + argspec = inspect.getargspec(func) + else: + argspec = inspect.getfullargspec(func) # we should probably raise an exception somewhere if func includes **kwargs if argspec.defaults: positional_args = argspec.args[:-len(argspec.defaults)] diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py index fd073a04..8765ee31 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py @@ -46,9 +46,13 @@ def __init__(self, timestamp, service, reason, action, self.service = service self.reason = reason self.action = action - if not policy_requestor_name: + if policy_requestor_name: + self.policy_requestor_name = policy_requestor_name + else: self.policy_requestor_name = hookenv.service_name() - if not policy_requestor_type: + if policy_requestor_type: + self.policy_requestor_type = policy_requestor_type + else: self.policy_requestor_type = 'charm' def __eq__(self, other): @@ -99,7 +103,9 @@ def read_event_file(file_name): contents['timestamp'], contents['service'], contents['reason'], - contents['action']) + contents['action'], + policy_requestor_name=contents.get('policy_requestor_name'), + policy_requestor_type=contents.get('policy_requestor_type')) return event diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 2ad8ab94..1656bd43 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -56,6 +56,7 @@ relation_id, relation_ids, relation_set, + service_name as ch_service_name, status_set, hook_name, application_version_set, @@ -1089,8 +1090,12 @@ def _determine_os_workload_status( try: if config(POLICYD_CONFIG_NAME): message = "{} {}".format(policyd_status_message_prefix(), message) + # Get deferred restarts events that have been triggered by a policy + # written by this charm. deferred_restarts = list(set( - [e.service for e in deferred_events.get_deferred_restarts()])) + [e.service + for e in deferred_events.get_deferred_restarts() + if e.policy_requestor_name == ch_service_name()])) if deferred_restarts: svc_msg = "Services queued for restart: {}".format( ', '.join(sorted(deferred_restarts))) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 778aa4b6..47eebb51 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -468,15 +468,20 @@ def config(scope=None): @cached -def relation_get(attribute=None, unit=None, rid=None): +def relation_get(attribute=None, unit=None, rid=None, app=None): """Get relation information""" _args = ['relation-get', '--format=json'] + if app is not None: + if unit is not None: + raise ValueError("Cannot use both 'unit' and 'app'") + _args.append('--app') if rid: _args.append('-r') _args.append(rid) _args.append(attribute or '-') - if unit: - _args.append(unit) + # unit or application name + if unit or app: + _args.append(unit or app) try: return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: @@ -487,12 +492,14 @@ def relation_get(attribute=None, unit=None, rid=None): raise -def relation_set(relation_id=None, relation_settings=None, **kwargs): +def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] accepts_file = "--file" in subprocess.check_output( relation_cmd_line + ["--help"], universal_newlines=True) + if app: + relation_cmd_line.append('--app') if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) settings = relation_settings.copy() diff --git a/ceph-mon/hooks/charmhelpers/core/services/base.py b/ceph-mon/hooks/charmhelpers/core/services/base.py index 179ad4f0..9f880290 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/base.py +++ b/ceph-mon/hooks/charmhelpers/core/services/base.py @@ -14,9 +14,11 @@ import os import json -from inspect import getargspec +import inspect from collections import Iterable, OrderedDict +import six + from charmhelpers.core import host from charmhelpers.core import hookenv @@ -169,7 +171,10 @@ def provide_data(self): if not units: continue remote_service = units[0].split('/')[0] - argspec = getargspec(provider.provide_data) + if six.PY2: + argspec = inspect.getargspec(provider.provide_data) + else: + argspec = inspect.getfullargspec(provider.provide_data) if len(argspec.args) > 1: data = provider.provide_data(remote_service, service_ready) else: diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index b38edcc1..812a11a2 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -658,17 +658,11 @@ def _add_apt_repository(spec): :param spec: the parameter to pass to add_apt_repository :type spec: str """ - series = get_distrib_codename() if '{series}' in spec: + series = get_distrib_codename() spec = spec.replace('{series}', series) - # software-properties package for bionic properly reacts to proxy settings - # set via apt.conf (see lp:1433761), however this is not the case for LTS - # and non-LTS releases before bionic. - if series in ('trusty', 'xenial'): - _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https', 'http'])) - else: - _run_with_retries(['add-apt-repository', '--yes', spec]) + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https', 'http'])) def _add_cloud_pocket(pocket): From a7ac36b8f40b379065b3186d36bc6e1c6868344a Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Tue, 27 Apr 2021 13:57:01 +0200 Subject: [PATCH 2225/2699] Fix Zaza's expected Ubuntu workload status The Ubuntu charm doesn't show 'Ready' anymore, which was breaking the gate. Change-Id: If57eeb8e0f4eeffdaa4d406395826b92379203c7 --- ceph-iscsi/tests/tests.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-iscsi/tests/tests.yaml b/ceph-iscsi/tests/tests.yaml index 7371f249..d44445d9 100644 --- a/ceph-iscsi/tests/tests.yaml +++ b/ceph-iscsi/tests/tests.yaml @@ -11,6 +11,9 @@ tests: - zaza.openstack.charm_tests.ceph.iscsi.tests.CephISCSIGatewayTest - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation target_deploy_status: + ubuntu: + workload-status: active + workload-status-message: '' vault: workload-status: blocked workload-status-message: Vault needs to be initialized From 6cfa90ac8f1a74b3a96ba339c74d79b56249e4cb Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 9 Jun 2021 16:09:13 +0100 Subject: [PATCH 2226/2699] Pin charmcraft to 0.10.2 Charmcraft 1.0+ breaks py35 compatibility by introducing f-strings. As the charms are still being built on a xenial builder, we currently need to pin this for the builds. Close-bug: #1931436 Change-Id: I88f1a2ef629db4bd3add8c346b946b66cf3b08ed --- ceph-iscsi/build-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-iscsi/build-requirements.txt b/ceph-iscsi/build-requirements.txt index 96368501..271d8955 100644 --- a/ceph-iscsi/build-requirements.txt +++ b/ceph-iscsi/build-requirements.txt @@ -1 +1 @@ -git+https://github.com/canonical/charmcraft.git#egg=charmcraft +git+https://github.com/canonical/charmcraft.git@0.10.2#egg=charmcraft From 1e98ed9cca2ac9a87788b7760bf0b9e6df4c6883 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 3 Jun 2021 11:08:54 +0200 Subject: [PATCH 2227/2699] Add impish to metadata.yaml Change-Id: If0bc6f21f8139186f0d279a4a38d1522c295c127 --- ceph-radosgw/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index d54e017c..afbe5862 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -18,6 +18,7 @@ series: - focal - groovy - hirsute +- impish extra-bindings: public: admin: From ea69c1e7ade355a78d6a19c706d4f667b6680f3f Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 3 Jun 2021 11:09:17 +0200 Subject: [PATCH 2228/2699] Add impish to metadata.yaml Change-Id: I12925554fa90c97e8d6bdee46ad953cd7c516b7f --- ceph-rbd-mirror/src/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index 20f9742b..48680792 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -20,6 +20,7 @@ series: - focal - groovy - hirsute +- impish extra-bindings: public: cluster: From 4252c5198c51ada2065aeca4378d83084a36f0dd Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 3 Jun 2021 11:08:36 +0200 Subject: [PATCH 2229/2699] Add impish to metadata.yaml Change-Id: I20614430b45cc1919cd21b20c84854a891b4f0ec --- ceph-proxy/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 4defc7ae..5c25af59 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -15,6 +15,7 @@ series: - focal - groovy - hirsute +- impish extra-bindings: public: cluster: From 208a4597a7c3ed589788c262446f8fdaf72b89fa Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 3 Jun 2021 11:06:26 +0200 Subject: [PATCH 2230/2699] Add impish to metadata.yaml Change-Id: I6d93ffc924e760a497c5dc74b59a665f888412f0 --- ceph-fs/src/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index ca022a0a..bebfe94f 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -15,6 +15,7 @@ series: - focal - groovy - hirsute +- impish subordinate: false requires: ceph-mds: From 80b2b16c35c86ba7967b84e995448e7acac6f38f Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 24 Jun 2021 13:14:57 +0000 Subject: [PATCH 2231/2699] Initial cut of ceph-dashboard charm --- ceph-dashboard/.flake8 | 9 + ceph-dashboard/.gitignore | 8 + ceph-dashboard/.jujuignore | 3 + ceph-dashboard/.stestr.conf | 3 + ceph-dashboard/LICENSE | 202 +++++++++++ ceph-dashboard/README.md | 27 ++ ceph-dashboard/actions.yaml | 14 + ceph-dashboard/config.yaml | 10 + ceph-dashboard/metadata.yaml | 27 ++ ceph-dashboard/requirements-dev.txt | 3 + ceph-dashboard/requirements.txt | 5 + ceph-dashboard/src/charm.py | 192 ++++++++++ ceph-dashboard/src/interface_api_endpoints.py | 64 ++++ ceph-dashboard/src/interface_dashboard.py | 43 +++ ceph-dashboard/test-requirements.txt | 17 + ceph-dashboard/tests/__init__.py | 0 ceph-dashboard/tests/bundles/focal.yaml | 39 ++ .../overlays/local-charm-overlay.yaml.j2 | 3 + ceph-dashboard/tests/tests.yaml | 13 + ceph-dashboard/tox.ini | 134 +++++++ ceph-dashboard/unit_tests/__init__.py | 19 + .../unit_tests/test_ceph_dashboard_charm.py | 338 ++++++++++++++++++ .../test_interface_api_endpoints.py | 159 ++++++++ .../unit_tests/test_interface_dashboard.py | 98 +++++ 24 files changed, 1430 insertions(+) create mode 100644 ceph-dashboard/.flake8 create mode 100644 ceph-dashboard/.gitignore create mode 100644 ceph-dashboard/.jujuignore create mode 100644 ceph-dashboard/.stestr.conf create mode 100644 ceph-dashboard/LICENSE create mode 100644 ceph-dashboard/README.md create mode 100644 ceph-dashboard/actions.yaml create mode 100644 ceph-dashboard/config.yaml create mode 100644 ceph-dashboard/metadata.yaml create mode 100644 ceph-dashboard/requirements-dev.txt create mode 100644 ceph-dashboard/requirements.txt create mode 100755 ceph-dashboard/src/charm.py create mode 100644 ceph-dashboard/src/interface_api_endpoints.py create mode 100644 ceph-dashboard/src/interface_dashboard.py create mode 100644 ceph-dashboard/test-requirements.txt create mode 100644 ceph-dashboard/tests/__init__.py create mode 100644 ceph-dashboard/tests/bundles/focal.yaml create mode 100644 ceph-dashboard/tests/bundles/overlays/local-charm-overlay.yaml.j2 create mode 100644 ceph-dashboard/tests/tests.yaml create mode 100644 ceph-dashboard/tox.ini create mode 100644 ceph-dashboard/unit_tests/__init__.py create mode 100644 ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py create mode 100644 ceph-dashboard/unit_tests/test_interface_api_endpoints.py create mode 100644 ceph-dashboard/unit_tests/test_interface_dashboard.py diff --git a/ceph-dashboard/.flake8 b/ceph-dashboard/.flake8 new file mode 100644 index 00000000..8ef84fcd --- /dev/null +++ b/ceph-dashboard/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/ceph-dashboard/.gitignore b/ceph-dashboard/.gitignore new file mode 100644 index 00000000..0c9e80f2 --- /dev/null +++ b/ceph-dashboard/.gitignore @@ -0,0 +1,8 @@ +.tox +**/*.swp +__pycache__ +.stestr/ +lib/* +!lib/README.txt +build +*.charm diff --git a/ceph-dashboard/.jujuignore b/ceph-dashboard/.jujuignore new file mode 100644 index 00000000..6ccd559e --- /dev/null +++ b/ceph-dashboard/.jujuignore @@ -0,0 +1,3 @@ +/venv +*.py[cod] +*.charm diff --git a/ceph-dashboard/.stestr.conf b/ceph-dashboard/.stestr.conf new file mode 100644 index 00000000..5fcccaca --- /dev/null +++ b/ceph-dashboard/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/ceph-dashboard/LICENSE b/ceph-dashboard/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/ceph-dashboard/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ceph-dashboard/README.md b/ceph-dashboard/README.md new file mode 100644 index 00000000..b0dc8461 --- /dev/null +++ b/ceph-dashboard/README.md @@ -0,0 +1,27 @@ +# Overview + +The ceph-dashboard configures the [Ceph Dashboard][ceph-dashboard-upstream]. +The charm is intended to be used in conjunction with the +[ceph-mon][ceph-mon-charm] charm. + +# Usage + +## Configuration + +See file `config.yaml` for the full list of options, along with their +descriptions and default values. + +## Deployment + +We are assuming a pre-existing Ceph cluster. + +Deploy the ceph-dashboard as a subordinate to the ceph-mon charm. + + juju deploy ceph-dashboard + juju relate ceph-dashboard ceph-mon + + + + +[ceph-dashboard]: https://docs.ceph.com/en/latest/mgr/dashboard/ +[ceph-mon-charm]: https://jaas.ai/ceph-mon diff --git a/ceph-dashboard/actions.yaml b/ceph-dashboard/actions.yaml new file mode 100644 index 00000000..c76df649 --- /dev/null +++ b/ceph-dashboard/actions.yaml @@ -0,0 +1,14 @@ +# Copyright 2021 Canonical +# See LICENSE file for licensing details. + +add-user: + description: add a dashboard user + params: + username: + description: Name of user to create + type: string + default: "" + role: + description: Role to give user + type: string + default: "" diff --git a/ceph-dashboard/config.yaml b/ceph-dashboard/config.yaml new file mode 100644 index 00000000..d4bed64c --- /dev/null +++ b/ceph-dashboard/config.yaml @@ -0,0 +1,10 @@ +# Copyright 2021 Canonical +# See LICENSE file for licensing details. + +options: + public-hostname: + type: string + default: + description: | + The hostname or address of the public endpoints created for the + dashboard diff --git a/ceph-dashboard/metadata.yaml b/ceph-dashboard/metadata.yaml new file mode 100644 index 00000000..a052461b --- /dev/null +++ b/ceph-dashboard/metadata.yaml @@ -0,0 +1,27 @@ +# Copyright 2021 Canonical +# See LICENSE file for licensing details. +name: ceph-dashboard +display-name: Ceph Dashboard +maintainer: OpenStack Charmers +summary: Enable dashboard for Ceph +description: | + Enable the ceph dashboard on the ceph mon units +tags: +- openstack +- storage +- backup +extra-bindings: + public: +subordinate: true +series: +- focal +- groovy +requires: + dashboard: + interface: ceph-dashboard + scope: container + certificates: + interface: tls-certificates + loadbalancer: + interface: api-endpoints + diff --git a/ceph-dashboard/requirements-dev.txt b/ceph-dashboard/requirements-dev.txt new file mode 100644 index 00000000..4f2a3f5b --- /dev/null +++ b/ceph-dashboard/requirements-dev.txt @@ -0,0 +1,3 @@ +-r requirements.txt +coverage +flake8 diff --git a/ceph-dashboard/requirements.txt b/ceph-dashboard/requirements.txt new file mode 100644 index 00000000..56cbcc9d --- /dev/null +++ b/ceph-dashboard/requirements.txt @@ -0,0 +1,5 @@ +ops >= 1.2.0 +git+https://github.com/openstack/charms.ceph#egg=charms_ceph +git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack +#git+https://opendev.org/openstack/charm-ops-interface-tls-certificates#egg=interface_tls_certificates +git+https://github.com/gnuoy/ops-interface-tls-certificates@no-exception-for-inflight-request#egg=interface_tls_certificates diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py new file mode 100755 index 00000000..63be231c --- /dev/null +++ b/ceph-dashboard/src/charm.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python3 +# Copyright 2021 Canonical +# See LICENSE file for licensing details. +# +# Learn more at: https://juju.is/docs/sdk + +"""Charm for the Ceph Dashboard.""" + +import logging +import tempfile + +from ops.framework import StoredState +from ops.main import main +from ops.model import ActiveStatus, BlockedStatus, StatusBase +from ops.charm import ActionEvent +import interface_tls_certificates.ca_client as ca_client +import re +import secrets +import socket +import string +import subprocess +import ops_openstack.plugins.classes +import interface_dashboard +import interface_api_endpoints +import cryptography.hazmat.primitives.serialization as serialization +import charms_ceph.utils as ceph_utils + +from pathlib import Path + +logger = logging.getLogger(__name__) + + +class CephDashboardCharm(ops_openstack.core.OSBaseCharm): + """Ceph Dashboard charm.""" + + _stored = StoredState() + PACKAGES = ['ceph-mgr-dashboard'] + CEPH_CONFIG_PATH = Path('/etc/ceph') + TLS_KEY_PATH = CEPH_CONFIG_PATH / 'ceph-dashboard.key' + TLS_PUB_KEY_PATH = CEPH_CONFIG_PATH / 'ceph-dashboard-pub.key' + TLS_CERT_PATH = CEPH_CONFIG_PATH / 'ceph-dashboard.crt' + TLS_KEY_AND_CERT_PATH = CEPH_CONFIG_PATH / 'ceph-dashboard.pem' + TLS_CA_CERT_PATH = Path( + '/usr/local/share/ca-certificates/vault_ca_cert_dashboard.crt') + TLS_PORT = 8443 + + def __init__(self, *args) -> None: + """Setup adapters and observers.""" + super().__init__(*args) + super().register_status_check(self.check_dashboard) + self.mon = interface_dashboard.CephDashboardRequires( + self, + 'dashboard') + self.ca_client = ca_client.CAClient( + self, + 'certificates') + self.framework.observe( + self.mon.on.mon_ready, + self._configure_dashboard) + self.framework.observe( + self.ca_client.on.ca_available, + self._on_ca_available) + self.framework.observe( + self.ca_client.on.tls_server_config_ready, + self._on_tls_server_config_ready) + self.framework.observe(self.on.add_user_action, self._add_user_action) + self.ingress = interface_api_endpoints.APIEndpointsRequires( + self, + 'loadbalancer', + { + 'endpoints': [{ + 'service-type': 'ceph-dashboard', + 'frontend-port': self.TLS_PORT, + 'backend-port': self.TLS_PORT, + 'backend-ip': self._get_bind_ip(), + 'check-type': 'httpd'}]}) + self._stored.set_default(is_started=False) + + def _on_ca_available(self, _) -> None: + """Request TLS certificates.""" + addresses = set() + for binding_name in ['public']: + binding = self.model.get_binding(binding_name) + addresses.add(binding.network.ingress_address) + addresses.add(binding.network.bind_address) + sans = [str(s) for s in addresses] + sans.append(socket.gethostname()) + if self.config.get('public-hostname'): + sans.append(self.config.get('public-hostname')) + self.ca_client.request_server_certificate(socket.getfqdn(), sans) + + def check_dashboard(self) -> StatusBase: + """Check status of dashboard""" + self._stored.is_started = ceph_utils.is_dashboard_enabled() + if self._stored.is_started: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + result = sock.connect_ex((self._get_bind_ip(), self.TLS_PORT)) + if result == 0: + return ActiveStatus() + else: + return BlockedStatus( + 'Dashboard not responding') + else: + return BlockedStatus( + 'Dashboard is not enabled') + return ActiveStatus() + + def kick_dashboard(self) -> None: + """Disable and re-enable dashboard""" + ceph_utils.mgr_disable_dashboard() + ceph_utils.mgr_enable_dashboard() + + def _configure_dashboard(self, _) -> None: + """Configure dashboard""" + if self.unit.is_leader() and not ceph_utils.is_dashboard_enabled(): + ceph_utils.mgr_enable_dashboard() + ceph_utils.mgr_config_set( + 'mgr/dashboard/{hostname}/server_addr'.format( + hostname=socket.gethostname()), + str(self._get_bind_ip())) + self.update_status() + + def _get_bind_ip(self) -> str: + """Return the IP to bind the dashboard to""" + binding = self.model.get_binding('public') + return str(binding.network.ingress_address) + + def _on_tls_server_config_ready(self, _) -> None: + """Configure TLS.""" + self.TLS_KEY_PATH.write_bytes( + self.ca_client.server_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption())) + self.TLS_CERT_PATH.write_bytes( + self.ca_client.server_certificate.public_bytes( + encoding=serialization.Encoding.PEM)) + self.TLS_CA_CERT_PATH.write_bytes( + self.ca_client.ca_certificate.public_bytes( + encoding=serialization.Encoding.PEM) + + self.ca_client.root_ca_chain.public_bytes( + encoding=serialization.Encoding.PEM)) + + hostname = socket.gethostname() + subprocess.check_call(['update-ca-certificates']) + ceph_utils.dashboard_set_ssl_certificate( + self.TLS_CERT_PATH, + hostname=hostname) + ceph_utils.dashboard_set_ssl_certificate_key( + self.TLS_KEY_PATH, + hostname=hostname) + if self.unit.is_leader(): + ceph_utils.mgr_config_set( + 'mgr/dashboard/standby_behaviour', + 'redirect') + ceph_utils.mgr_config_set( + 'mgr/dashboard/ssl', + 'true') + # Set the ssl artifacte without the hostname which appears to + # be required even though they aren't used. + ceph_utils.dashboard_set_ssl_certificate( + self.TLS_CERT_PATH) + ceph_utils.dashboard_set_ssl_certificate_key( + self.TLS_KEY_PATH) + self.kick_dashboard() + + def _gen_user_password(self, length: int = 8) -> str: + """Generate a password""" + alphabet = string.ascii_letters + string.digits + return ''.join(secrets.choice(alphabet) for i in range(length)) + + def _add_user_action(self, event: ActionEvent) -> None: + """Create a user""" + username = event.params["username"] + role = event.params["role"] + if not all([username, role]): + event.fail("Config missing") + else: + password = self._gen_user_password() + with tempfile.NamedTemporaryFile(mode='w', delete=True) as fp: + fp.write(password) + fp.flush() + cmd_out = subprocess.check_output([ + 'ceph', 'dashboard', 'ac-user-create', '--enabled', + '-i', fp.name, username, role]).decode('UTF-8') + if re.match('User.*already exists', cmd_out): + event.fail("User already exists") + else: + event.set_results({"password": password}) + +if __name__ == "__main__": + main(CephDashboardCharm) diff --git a/ceph-dashboard/src/interface_api_endpoints.py b/ceph-dashboard/src/interface_api_endpoints.py new file mode 100644 index 00000000..8908e441 --- /dev/null +++ b/ceph-dashboard/src/interface_api_endpoints.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 + +import json + +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object) + + +class EndpointDataEvent(EventBase): + pass + + +class APIEndpointsEvents(ObjectEvents): + ep_ready = EventSource(EndpointDataEvent) + + +class APIEndpointsRequires(Object): + + on = APIEndpointsEvents() + _stored = StoredState() + + def __init__(self, charm, relation_name, config_dict): + super().__init__(charm, relation_name) + self.config_dict = config_dict + self.relation_name = relation_name + self.framework.observe( + charm.on[self.relation_name].relation_changed, + self._on_relation_changed) + + def _on_relation_changed(self, event): + """Handle the relation-changed event.""" + event.relation.data[self.model.unit]['endpoints'] = json.dumps( + self.config_dict['endpoints']) + + def update_config(self, config_dict): + """Allow for updates to relation.""" + self.config_dict = config_dict + relation = self.model.get_relation(self.relation_name) + if relation: + relation.data[self.model.unit]['endpoints'] = json.dumps( + self.config_dict['endpoints']) + + +class APIEndpointsProvides(Object): + + on = APIEndpointsEvents() + _stored = StoredState() + + def __init__(self, charm): + super().__init__(charm, "loadbalancer") + # Observe the relation-changed hook event and bind + # self.on_relation_changed() to handle the event. + self.framework.observe( + charm.on["loadbalancer"].relation_changed, + self._on_relation_changed) + self.charm = charm + + def _on_relation_changed(self, event): + """Handle a change to the loadbalancer relation.""" + self.on.ep_ready.emit() diff --git a/ceph-dashboard/src/interface_dashboard.py b/ceph-dashboard/src/interface_dashboard.py new file mode 100644 index 00000000..19183f7d --- /dev/null +++ b/ceph-dashboard/src/interface_dashboard.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 + +import logging + +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object) + + +class MonReadyEvent(EventBase): + pass + + +class CephDashboardEvents(ObjectEvents): + mon_ready = EventSource(MonReadyEvent) + + +class CephDashboardRequires(Object): + + on = CephDashboardEvents() + _stored = StoredState() + READY_KEY = 'mon-ready' + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self.relation_name = relation_name + self.framework.observe( + charm.on[relation_name].relation_changed, + self.on_changed) + + def on_changed(self, event): + logging.debug("CephDashboardRequires on_changed") + for u in self.dashboard_relation.units: + if self.dashboard_relation.data[u].get(self.READY_KEY) == 'True': + logging.debug("Emitting mon ready") + self.on.mon_ready.emit() + + @property + def dashboard_relation(self): + return self.framework.model.get_relation(self.relation_name) diff --git a/ceph-dashboard/test-requirements.txt b/ceph-dashboard/test-requirements.txt new file mode 100644 index 00000000..8057d2c6 --- /dev/null +++ b/ceph-dashboard/test-requirements.txt @@ -0,0 +1,17 @@ +# This file is managed centrally. If you find the need to modify this as a +# one-off, please don't. Intead, consult #openstack-charms and ask about +# requirements management in charms via bot-control. Thank you. +charm-tools>=2.4.4 +coverage>=3.6 +mock>=1.2 +flake8>=2.2.4,<=2.4.1 +pyflakes==2.1.1 +stestr>=2.2.0 +requests>=2.18.4 +psutil +# oslo.i18n dropped py35 support +oslo.i18n<4.0.0 +git+https://github.com/openstack-charmers/zaza.git#egg=zaza +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack +pytz # workaround for 14.04 pip/tox +pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/ceph-dashboard/tests/__init__.py b/ceph-dashboard/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-dashboard/tests/bundles/focal.yaml b/ceph-dashboard/tests/bundles/focal.yaml new file mode 100644 index 00000000..64c88694 --- /dev/null +++ b/ceph-dashboard/tests/bundles/focal.yaml @@ -0,0 +1,39 @@ +local_overlay_enabled: False +series: focal +applications: + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + ceph-mon: + charm: cs:~gnuoy/ceph-mon-26 + num_units: 3 + options: + monitor-count: '3' + vault: + num_units: 1 + charm: cs:~openstack-charmers-next/vault + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster-79 + constraints: mem=3072M + num_units: 3 + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + ceph-dashboard: + charm: ../../ceph-dashboard.charm + options: + public-hostname: 'ceph-dashboard.zaza.local' +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + - - 'ceph-dashboard:dashboard' + - 'ceph-mon:dashboard' + - - 'ceph-dashboard:certificates' + - 'vault:certificates' diff --git a/ceph-dashboard/tests/bundles/overlays/local-charm-overlay.yaml.j2 b/ceph-dashboard/tests/bundles/overlays/local-charm-overlay.yaml.j2 new file mode 100644 index 00000000..64c91cf5 --- /dev/null +++ b/ceph-dashboard/tests/bundles/overlays/local-charm-overlay.yaml.j2 @@ -0,0 +1,3 @@ +applications: + ceph-dashboard: + charm: ../../ceph-dashboard.charm diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml new file mode 100644 index 00000000..63e365f4 --- /dev/null +++ b/ceph-dashboard/tests/tests.yaml @@ -0,0 +1,13 @@ +charm_name: ceph-dasboard +gate_bundles: + - focal +smoke_bundles: + - focal +configure: + - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation +tests: + - zaza.openstack.charm_tests.ceph.dashboard.tests.CephDashboardTest +target_deploy_status: + vault: + workload-status: blocked + workload-status-message: Vault needs to be initialized diff --git a/ceph-dashboard/tox.ini b/ceph-dashboard/tox.ini new file mode 100644 index 00000000..3220044c --- /dev/null +++ b/ceph-dashboard/tox.ini @@ -0,0 +1,134 @@ +# Operator charm (with zaza): tox.ini + +[tox] +envlist = pep8,py3 +skipsdist = True +# NOTE: Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE: Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +requires = pip < 20.3 + virtualenv < 20.0 +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.2.0 + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 + CHARM_DIR={envdir} +install_command = + pip install {opts} {packages} +commands = stestr run --slowest {posargs} +whitelist_externals = + git + add-to-archive.py + bash + charmcraft +passenv = HOME TERM CS_* OS_* TEST_* +deps = -r{toxinidir}/test-requirements.txt + +[testenv:py35] +basepython = python3.5 +# python3.5 is irrelevant on a focal+ charm. +commands = /bin/true + +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:pep8] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = flake8 {posargs} src unit_tests tests + +[testenv:cover] +# Technique based heavily upon +# https://github.com/openstack/nova/blob/master/tox.ini +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run --slowest {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + */charmhelpers/* + unit_tests/* + +[testenv:venv] +basepython = python3 +commands = {posargs} + +[testenv:build] +basepython = python3 +deps = +commands = + charmcraft build + +[testenv:func-noop] +basepython = python3 +commands = + functest-run-suite --help + +[testenv:func] +basepython = python3 +commands = + functest-run-suite --keep-model + +[testenv:func-smoke] +basepython = python3 +commands = + functest-run-suite --keep-model --smoke + +[testenv:func-dev] +basepython = python3 +commands = + functest-run-suite --keep-model --dev + +[testenv:func-target] +basepython = python3 +commands = + functest-run-suite --keep-model --bundle {posargs} + +[flake8] +# Ignore E902 because the unit_tests directory is missing in the built charm. +ignore = E402,E226,E902 diff --git a/ceph-dashboard/unit_tests/__init__.py b/ceph-dashboard/unit_tests/__init__.py new file mode 100644 index 00000000..577ab7e9 --- /dev/null +++ b/ceph-dashboard/unit_tests/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import mock + +# Mock out secrets to make py35 happy. +sys.modules['secrets'] = mock.MagicMock() diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py new file mode 100644 index 00000000..c6447e61 --- /dev/null +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -0,0 +1,338 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import sys + +sys.path.append('lib') # noqa +sys.path.append('src') # noqa + +from mock import call, patch, MagicMock + +from ops.testing import Harness, _TestingModelBackend +from ops.model import ( + ActiveStatus, + BlockedStatus, +) +from ops import framework, model +import charm + +TEST_CA = '''-----BEGIN CERTIFICATE----- +MIIC8TCCAdmgAwIBAgIUAK1dgpjTc850TgQx6y3W1brByOwwDQYJKoZIhvcNAQEL +BQAwGjEYMBYGA1UEAwwPRGl2aW5lQXV0aG9yaXR5MB4XDTIxMDYyMTExNTg1OFoX +DTIxMDcyMTExNTg1OVowGjEYMBYGA1UEAwwPRGl2aW5lQXV0aG9yaXR5MIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA08gO8TDPARVhfVLOkYYRvCU1Rviv +RYmy+ptA82XIHO1HvAuLQ8x/4bGxE+IMKSNIl+DIF9TMdmOCvKOBgRKsoOibZNfW +MJIeQwff/8LMFWReAjOxcf9Bu2EqOqkLmUV72FU+Weta8r2kuFhgryqvz1rZeZzQ +jP6OsscoY2FVt/TnvUL5cCOSTpKuQLSr8pDms3OuFIyhFkUinpGbgJQ83xQO1tRh +MGiA87lahsLECTKXsLPyFMMPZ/QQuoDmuUHNkR2deOLcYRSWIBy23PctuV893gbM +2sFTprWo1PKXSmFUd3lg6G5wSM2XRQAP81CTA3Hp8Fj5XCpOHa4HFQLxDwIDAQAB +oy8wLTAaBgNVHREEEzARgg9EaXZpbmVBdXRob3JpdHkwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAQEAKsrUnYBJyyEIPXkWaemR5vmp0G+V6Xz3KvPB +hLYKRONMba8xFwrjRv7b0DNAws8TcXXOKtRtJWbnSIMGhfVESF6ohqEdn+J1crXs +2RpJgyF2u+l6gg9Sg2ngYMQYBkzjAHYTroO/itI4AWLPLHpgygzz8ho6ykWpDoxJ +QfrrtHCl90zweYDhl4g2joIOJSZdd36+Nx9f2guItRMN87EZy1mOrKs94HlW9jwj +mAfiGaYhgFn4JH2jVcZu4wVJErh4Z0A3UNNyOq4zlAq8pHa/54jerHTDB49UQbaI +vZ5PsZhTZLy3FImSbe25xMUZNTt/2MMjsQwSjwiQuxLSuicJAA== +-----END CERTIFICATE-----''' + +TEST_CERT = '''-----BEGIN CERTIFICATE----- +MIIEdjCCA16gAwIBAgIUPmsr+BnLb6Yy22Zg6hkXn1B6KZcwDQYJKoZIhvcNAQEL +BQAwRTFDMEEGA1UEAxM6VmF1bHQgSW50ZXJtZWRpYXRlIENlcnRpZmljYXRlIEF1 +dGhvcml0eSAoY2hhcm0tcGtpLWxvY2FsKTAeFw0yMTA2MjExMTU4MzNaFw0yMjA2 +MjExMDU5MDJaMD4xPDA6BgNVBAMTM2p1anUtOGMzOTI5LXphemEtZWZjMDU2ZjE2 +NmNkLTAucHJvamVjdC5zZXJ2ZXJzdGFjazCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANW0NkSLH53M2Aok6lxN4qSSUDTnIWeuKsemLp7FwZn6zN7fRa4V +utuGWbeYahdSIY6AG3w5opCyijM/+L4+HWoY5BWGFPj/U5V4CDF9jOerNDcoxKDy ++h+CbJ324xJrCBOjMyW8wqK/lzCadQzy6DymOtK0RBJNHXsXiGWta7UMFo2AZcqM +8OkOd0HkBeDM90dzTRSuy3pvqNBKmpwG4Hmg/ESh7VuobuHTtkD2/sGEVMGoXm7Q +qk6Yf8POzNqdPoHzvY40uZWqL3OwedGWDrnNbH4sTYb1xB7fwBthvs+LNPUDzRXA +NOYlKsfRrsiH9ELyMWUfarKXxg+7JelBIdECAwEAAaOCAWMwggFfMA4GA1UdDwEB +/wQEAwIDqDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYE +FEpYZVtgGevbnUrzWsjAXZix5zgzMEoGCCsGAQUFBwEBBD4wPDA6BggrBgEFBQcw +AoYuaHR0cDovLzE3Mi4yMC4wLjExOTo4MjAwL3YxL2NoYXJtLXBraS1sb2NhbC9j +YTCBgAYDVR0RBHkwd4IZY2VwaC1kYXNoYm9hcmQuemF6YS5sb2NhbIIfanVqdS04 +YzM5MjktemF6YS1lZmMwNTZmMTY2Y2QtMIIzanVqdS04YzM5MjktemF6YS1lZmMw +NTZmMTY2Y2QtMC5wcm9qZWN0LnNlcnZlcnN0YWNrhwSsFAD9MEAGA1UdHwQ5MDcw +NaAzoDGGL2h0dHA6Ly8xNzIuMjAuMC4xMTk6ODIwMC92MS9jaGFybS1wa2ktbG9j +YWwvY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQBRUsmnc5fnNh1TSO1hVdpYBo6SRqdN +VPuG3EV6QYPGnqadzGTr3uREUyZdkOUu4nhqDONMTdlfCwg744AIlY+eo2tpiNEp +GOeFV0qZOiGRq7q2kllCTYCnh7hKCTCSN17o9QDTCL6w46cmH5OXo84BHkozdBiO +cHPQ+uJ/VZaRCuOIlVS4Y4vTDB0LpNX2nHC/tMYL0zA5+pu+N6e8OWcCgKwObdh5 +38iuimYbbwv2QWBD+4eQUbxY0+TXlhdg42Um41N8BVdPapNAQRXIHrZJC5P6fXqX +uoZ6TvbI2U0GSfpjScPP5D2F6tWK7/3nbA8bPLUJ1MKDofBVtrlA4PIH +-----END CERTIFICATE-----''' + +TEST_KEY = '''-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA1bQ2RIsfnczYCiTqXE3ipJJQNOchZ64qx6YunsXBmfrM3t9F +rhW624ZZt5hqF1IhjoAbfDmikLKKMz/4vj4dahjkFYYU+P9TlXgIMX2M56s0NyjE +oPL6H4JsnfbjEmsIE6MzJbzCor+XMJp1DPLoPKY60rREEk0dexeIZa1rtQwWjYBl +yozw6Q53QeQF4Mz3R3NNFK7Lem+o0EqanAbgeaD8RKHtW6hu4dO2QPb+wYRUwahe +btCqTph/w87M2p0+gfO9jjS5laovc7B50ZYOuc1sfixNhvXEHt/AG2G+z4s09QPN +FcA05iUqx9GuyIf0QvIxZR9qspfGD7sl6UEh0QIDAQABAoIBAAHqAk5s3JSiQBEf +MYYwIGaO9O70XwU5tyJgp6w+YzSI3Yrlfw9HHIxY0LbnQ5P/5VMMbLKZJY6cOsao +vQafMc5AeNKEh+2PA+Wj1Jb04+0zSF1yHQjABGOB3I0xp+kDUmgynwOohCnHA4io +6YF7L39TkdVPTgjH7gqrNEqM2hkeBWg1LY5QARDtz6Nj10LRtpQXjx/zwfGfzV2c +TGpO8ArfPLS+a7LAJ+E+iSgDUX272Fd7DYAv7xRcRe8991umpqFzbY8FDigLWEdd +3muWnRsJjricYM+2OO0QO8fyKhWCE31Dvc0xMLgrSTWoZAl8t7/WxyowevuVAm5o +oclYFU0CgYEA4M6seEB/neaqAWMIshwIcwZWaLy7oQAQagjXbKohSAXNlYqgTuv7 +glk0P6uzeQOu0ejipwga6mQIc093WSzpG1sdT4bBysHS0b44Gx/6Cv0Jf6hmJGcU +wNo3XV8b0rHZ+KWDCfr1dUjxCA9rR2fOTJniCh9Ng28cyhrFyZ6HaUcCgYEA81sj +Z3ATs2uMxZePmGMWxOJqbQ+bHaoE+UG1dTQIVO//MmanJm3+o4ciH46D2QRWkYha +4Eqb5wnPKCQjun8JDpwgkLkd0EGGG4uJ6E6YqL3I0+cs5lwMWJ9M3oOaFGGoFAoP +V9lgz5f3yVdSChoubklS4KLeCiAojW/qX1rrKCcCgYEAuALz0YqZ6xm/1lrF52Ri +1iQ93oV934854FFUZDHuBBIb8WgDSBaJTGzQA737rfaBxngl7isIPQucjyZgvrGw +LSArocjgH6L/eYeGTU2jUhNFDyU8Vle5+RGld9w93fyOOqTf2e99s379LGfSnCQw +DSt4hmiQ/iCZJCU9+Ia2uEkCgYAGsPjWPUStaEWkoTg3jnHv0/HtMcKoHCaq292r +bVTVUQwJTL1H1zprMKoFiBuj+fSPZ9pn1GVZAvIJPoUk+Z08I5rZn91r/oE7fKi8 +FH0qFp3RBcg8RUepoCey7pdr/AttEaG+XqHE037isF33HSUtryJyPsgwKxYyXWNq +X8ubfQKBgBwIpk7N754lN0i6V08Dadz0BlpfFYGO/ZfTmvVrPUxwehogtvpGnjhO +xPs1epK65/vHbBtaUDExayOEIvVhVWcnaXdx3z1aw/Hr29NlOi62x4g/RRSloLZH +08UCW9F5C8Ian6kglB5bPrZiJxcmssj7vSA+O6k9BjsO+ebaSRgk +-----END RSA PRIVATE KEY-----''' + +TEST_CHAIN = '''-----BEGIN CERTIFICATE----- +MIIDADCCAeigAwIBAgIUN93XI0mOu3wkX5YureWnMImedUMwDQYJKoZIhvcNAQEL +BQAwGjEYMBYGA1UEAwwPRGl2aW5lQXV0aG9yaXR5MB4XDTIxMDYyMzEwMzcwMFoX +DTMyMDYwNjEwMzcwMFowRTFDMEEGA1UEAxM6VmF1bHQgSW50ZXJtZWRpYXRlIENl +cnRpZmljYXRlIEF1dGhvcml0eSAoY2hhcm0tcGtpLWxvY2FsKTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL1t5WYd7IVsfT5d4uztBhOPBA0EtrKw81Fe +Rp2TNdPUkkKSQxOYKV6F1ndyD88Nxx1mcxwi8U28b1azTNVaPRjSLxyDCOD0L5qk +LaFqppTWv8vLcjjlp6Ed3BLXoVMThWwMxJm/VSPuEXnWN5GrMR97Ae8vmnlrYDTF +re67j0zjDPhkyevVQ5+pLeZ/saQtNNeal1qzfWMPDQK0COfXolXmlmZGzhap742e +x4gE6alyYYrpTPA6CL9NbGhNovuz/LJvHN8fIdfw3jX+GW+yy312xDG+67PCW342 +VDrPcG+Vq/BhEPwL3blYgbmtNPDQ1plWJqoPqoJzbCxLesXZHP8CAwEAAaMTMBEw +DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEARv1bBEgwlDG3PuhF +Zt5kIeDLEnjFH2STz4LLERZXdKhTzuaV08QvYr+cL8XHi4Sop5BDkAuQq8mVC/xj +7DoW/Lb9SnxfsCIu6ugwKLfJ2El6r23kDzTauIaovDYNSEo21yBYALsFZjzMJotJ +XLpLklASTAdMmLP703hcgKgY8yxzS3WEXA9jekmn6z0y3+UZjIF5W9dW9gaQk0Eg +vsLN7xzG9TmQfk1OHUj7y+cEbYr0M3Jdif/gG8Kl2SuaYUmvU6leA5+oZVF/Inle +jdSckxCCd1rbvGd60AY5azD1pAuazijwW9Y9Icv2tS5oZI/4MN7YJEssj/ZLjEA7 +Alm0ZQ== +-----END CERTIFICATE-----''' + + +class CharmTestCase(unittest.TestCase): + + def setUp(self, obj, patches): + super().setUp() + self.patches = patches + self.obj = obj + self.patch_all() + + def patch(self, method): + _m = patch.object(self.obj, method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def patch_all(self): + for method in self.patches: + setattr(self, method, self.patch(method)) + + +class _CephDashboardCharm(charm.CephDashboardCharm): + + def _get_bind_ip(self): + return '10.0.0.10' + + +class TestCephDashboardCharmBase(CharmTestCase): + + PATCHES = [ + 'ceph_utils', + 'socket', + 'subprocess' + ] + + def setUp(self): + super().setUp(charm, self.PATCHES) + self.harness = Harness( + _CephDashboardCharm, + ) + + # BEGIN: Workaround until network_get is implemented + class _TestingOPSModelBackend(_TestingModelBackend): + + def network_get(self, endpoint_name, relation_id=None): + network_data = { + 'bind-addresses': [{ + 'interface-name': 'eth0', + 'addresses': [{ + 'cidr': '10.0.0.0/24', + 'value': '10.0.0.10'}]}], + 'ingress-addresses': ['10.0.0.10'], + 'egress-subnets': ['10.0.0.0/24']} + return network_data + + self.harness._backend = _TestingOPSModelBackend( + self.harness._unit_name, self.harness._meta) + self.harness._model = model.Model( + self.harness._meta, + self.harness._backend) + self.harness._framework = framework.Framework( + ":memory:", + self.harness._charm_dir, + self.harness._meta, + self.harness._model) + # END Workaround + self.socket.gethostname.return_value = 'server1' + self.socket.getfqdn.return_value = 'server1.local' + + def test_init(self): + self.harness.begin() + self.assertFalse(self.harness.charm._stored.is_started) + + def test__on_ca_available(self): + rel_id = self.harness.add_relation('certificates', 'vault') + self.harness.begin() + self.harness.add_relation_unit( + rel_id, + 'vault/0') + self.harness.update_relation_data( + rel_id, + 'vault/0', + {'ingress-address': '10.0.0.3'}) + rel_data = self.harness.get_relation_data(rel_id, 'ceph-dashboard/0') + self.assertEqual( + rel_data['cert_requests'], + '{"server1.local": {"sans": ["10.0.0.10", "server1"]}}') + + def test_check_dashboard(self): + socket_mock = MagicMock() + self.socket.socket.return_value = socket_mock + socket_mock.connect_ex.return_value = 0 + self.ceph_utils.is_dashboard_enabled.return_value = True + self.harness.begin() + self.assertEqual( + self.harness.charm.check_dashboard(), + ActiveStatus()) + + socket_mock.connect_ex.return_value = 1 + self.assertEqual( + self.harness.charm.check_dashboard(), + BlockedStatus('Dashboard not responding')) + + socket_mock.connect_ex.return_value = 0 + self.ceph_utils.is_dashboard_enabled.return_value = False + self.assertEqual( + self.harness.charm.check_dashboard(), + BlockedStatus('Dashboard is not enabled')) + + def test_kick_dashboard(self): + self.harness.begin() + self.harness.charm.kick_dashboard() + self.ceph_utils.mgr_disable_dashboard.assert_called_once_with() + self.ceph_utils.mgr_enable_dashboard.assert_called_once_with() + + def test__configure_dashboard(self): + self.harness.begin() + + self.ceph_utils.is_dashboard_enabled.return_value = True + self.harness.set_leader(False) + self.harness.charm._configure_dashboard(None) + self.assertFalse(self.ceph_utils.mgr_enable_dashboard.called) + self.ceph_utils.mgr_config_set.assert_called_once_with( + 'mgr/dashboard/server1/server_addr', + '10.0.0.10') + + self.ceph_utils.mgr_config_set.reset_mock() + self.ceph_utils.is_dashboard_enabled.return_value = True + self.harness.set_leader() + self.harness.charm._configure_dashboard(None) + self.assertFalse(self.ceph_utils.mgr_enable_dashboard.called) + self.ceph_utils.mgr_config_set.assert_called_once_with( + 'mgr/dashboard/server1/server_addr', + '10.0.0.10') + + self.ceph_utils.mgr_config_set.reset_mock() + self.ceph_utils.is_dashboard_enabled.return_value = False + self.harness.set_leader() + self.harness.charm._configure_dashboard(None) + self.ceph_utils.mgr_enable_dashboard.assert_called_once_with() + self.ceph_utils.mgr_config_set.assert_called_once_with( + 'mgr/dashboard/server1/server_addr', + '10.0.0.10') + + def test__get_bind_ip(self): + self.harness.begin() + self.assertEqual( + self.harness.charm._get_bind_ip(), + '10.0.0.10') + + @patch('socket.gethostname') + def test__on_tls_server_config_ready(self, _gethostname): + mock_TLS_KEY_PATH = MagicMock() + mock_TLS_CERT_PATH = MagicMock() + mock_TLS_CA_CERT_PATH = MagicMock() + _gethostname.return_value = 'server1' + rel_id = self.harness.add_relation('certificates', 'vault') + self.harness.begin() + self.harness.set_leader() + self.harness.charm.TLS_CERT_PATH = mock_TLS_CERT_PATH + self.harness.charm.TLS_CA_CERT_PATH = mock_TLS_CA_CERT_PATH + self.harness.charm.TLS_KEY_PATH = mock_TLS_KEY_PATH + self.harness.add_relation_unit( + rel_id, + 'vault/0') + self.harness.update_relation_data( + rel_id, + 'vault/0', + { + 'ceph-dashboard_0.server.cert': TEST_CERT, + 'ceph-dashboard_0.server.key': TEST_KEY, + 'chain': TEST_CHAIN, + 'ca': TEST_CA}) + mock_TLS_CERT_PATH.write_bytes.assert_called_once() + mock_TLS_CA_CERT_PATH.write_bytes.assert_called_once() + mock_TLS_KEY_PATH.write_bytes.assert_called_once() + self.subprocess.check_call.assert_called_once_with( + ['update-ca-certificates']) + self.ceph_utils.dashboard_set_ssl_certificate.assert_has_calls([ + call(mock_TLS_CERT_PATH, hostname='server1'), + call(mock_TLS_CERT_PATH)]) + self.ceph_utils.dashboard_set_ssl_certificate_key.assert_has_calls([ + call(mock_TLS_KEY_PATH, hostname='server1'), + call(mock_TLS_KEY_PATH)]) + self.ceph_utils.mgr_config_set.assert_has_calls([ + call('mgr/dashboard/standby_behaviour', 'redirect'), + call('mgr/dashboard/ssl', 'true')]) + self.ceph_utils.mgr_disable_dashboard.assert_called_once_with() + self.ceph_utils.mgr_enable_dashboard.assert_called_once_with() + + @patch.object(charm.secrets, 'choice') + def test__gen_user_password(self, _choice): + self.harness.begin() + _choice.return_value = 'r' + self.assertEqual( + self.harness.charm._gen_user_password(), + 'rrrrrrrr') + + @patch.object(charm.tempfile, 'NamedTemporaryFile') + @patch.object(charm.secrets, 'choice') + def test__add_user_action(self, _choice, _NTFile): + self.subprocess.check_output.return_value = b'' + _NTFile.return_value.__enter__.return_value.name = 'tempfilename' + _choice.return_value = 'r' + self.harness.begin() + action_event = MagicMock() + action_event.params = { + 'username': 'auser', + 'role': 'administrator'} + self.harness.charm._add_user_action(action_event) + self.subprocess.check_output.assert_called_once_with( + ['ceph', 'dashboard', 'ac-user-create', '--enabled', + '-i', 'tempfilename', 'auser', 'administrator']) diff --git a/ceph-dashboard/unit_tests/test_interface_api_endpoints.py b/ceph-dashboard/unit_tests/test_interface_api_endpoints.py new file mode 100644 index 00000000..69f576d0 --- /dev/null +++ b/ceph-dashboard/unit_tests/test_interface_api_endpoints.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import json +import unittest +import sys +sys.path.append('lib') # noqa +sys.path.append('src') # noqa +from ops.testing import Harness +from ops.charm import CharmBase +import interface_api_endpoints + + +class TestAPIEndpointsRequires(unittest.TestCase): + + class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.seen_events = [] + self.ingress = interface_api_endpoints.APIEndpointsRequires( + self, + 'loadbalancer', + { + 'endpoints': [{ + 'service-type': 'ceph-dashboard', + 'frontend-port': 8443, + 'backend-port': 8443, + 'backend-ip': '10.0.0.10', + 'check-type': 'httpd'}]}) + + def setUp(self): + super().setUp() + self.harness = Harness( + self.MyCharm, + meta=''' +name: my-charm +requires: + loadbalancer: + interface: api-endpoints +''' + ) + self.eps = [{ + 'service-type': 'ceph-dashboard', + 'frontend-port': 8443, + 'backend-port': 8443, + 'backend-ip': '10.0.0.10', + 'check-type': 'httpd'}] + + def add_loadbalancer_relation(self): + rel_id = self.harness.add_relation( + 'loadbalancer', + 'service-loadbalancer') + self.harness.add_relation_unit( + rel_id, + 'service-loadbalancer/0') + self.harness.update_relation_data( + rel_id, + 'service-loadbalancer/0', + {'ingress-address': '10.0.0.3'}) + return rel_id + + def test_init(self): + self.harness.begin() + self.assertEqual( + self.harness.charm.ingress.config_dict, + {'endpoints': self.eps}) + self.assertEqual( + self.harness.charm.ingress.relation_name, + 'loadbalancer') + + def test__on_relation_changed(self): + self.harness.begin() + rel_id = self.add_loadbalancer_relation() + rel_data = self.harness.get_relation_data( + rel_id, + 'my-charm/0') + self.assertEqual( + rel_data['endpoints'], + json.dumps(self.eps)) + + def test_update_config(self): + self.harness.begin() + rel_id = self.add_loadbalancer_relation() + new_eps = copy.deepcopy(self.eps) + new_eps.append({ + 'service-type': 'ceph-dashboard', + 'frontend-port': 9443, + 'backend-port': 9443, + 'backend-ip': '10.0.0.10', + 'check-type': 'https'}) + self.harness.charm.ingress.update_config( + {'endpoints': new_eps}) + rel_data = self.harness.get_relation_data( + rel_id, + 'my-charm/0') + self.assertEqual( + rel_data['endpoints'], + json.dumps(new_eps)) + + +class TestAPIEndpointsProvides(unittest.TestCase): + + class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.seen_events = [] + self.api_eps = interface_api_endpoints.APIEndpointsProvides(self) + self.framework.observe( + self.api_eps.on.ep_ready, + self._log_event) + + def _log_event(self, event): + self.seen_events.append(type(event).__name__) + + def setUp(self): + super().setUp() + self.harness = Harness( + self.MyCharm, + meta=''' +name: my-charm +provides: + loadbalancer: + interface: api-endpoints +''' + ) + + def test_on_changed(self): + self.harness.begin() + # No MonReadyEvent as relation is absent + self.assertEqual( + self.harness.charm.seen_events, + []) + rel_id = self.harness.add_relation('loadbalancer', 'ceph-dashboard') + self.harness.add_relation_unit( + rel_id, + 'ceph-dashboard/0') + self.harness.update_relation_data( + rel_id, + 'ceph-dashboard/0', + {'ingress-address': '10.0.0.3'}) + self.assertEqual( + self.harness.charm.seen_events, + ['EndpointDataEvent']) diff --git a/ceph-dashboard/unit_tests/test_interface_dashboard.py b/ceph-dashboard/unit_tests/test_interface_dashboard.py new file mode 100644 index 00000000..9e18777b --- /dev/null +++ b/ceph-dashboard/unit_tests/test_interface_dashboard.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import sys +sys.path.append('lib') # noqa +sys.path.append('src') # noqa +from ops.testing import Harness +from ops.charm import CharmBase, CharmMeta +import interface_dashboard + + +class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.framework.meta = CharmMeta.from_yaml(metadata=''' +name: my-charm +requires: + dashboard: + interface: ceph-dashboard + scope: container +''') + + self.seen_events = [] + self.mon = interface_dashboard.CephDashboardRequires( + self, + 'dashboard') + + self.framework.observe( + self.mon.on.mon_ready, + self._log_event) + + def _log_event(self, event): + self.seen_events.append(type(event).__name__) + + +class TestCephDashboardRequires(unittest.TestCase): + + def setUp(self): + super().setUp() + self.harness = Harness( + MyCharm, + ) + + def add_dashboard_relation(self): + rel_id = self.harness.add_relation('dashboard', 'ceph-mon') + self.harness.add_relation_unit( + rel_id, + 'ceph-mon/0') + return rel_id + + def test_relation_name(self): + self.harness.begin() + self.assertEqual( + self.harness.charm.mon.relation_name, + 'dashboard') + + def test_dashboard_relation(self): + self.harness.begin() + self.assertIsNone( + self.harness.charm.mon.dashboard_relation) + rel_id = self.add_dashboard_relation() + self.assertEqual( + self.harness.charm.mon.dashboard_relation.id, + rel_id) + + def test_on_changed(self): + self.harness.begin() + # No MonReadyEvent as relation is absent + self.assertEqual( + self.harness.charm.seen_events, + []) + rel_id = self.add_dashboard_relation() + # No MonReadyEvent as ceph-mon has not declared it is ready. + self.assertEqual( + self.harness.charm.seen_events, + []) + self.harness.update_relation_data( + rel_id, + 'ceph-mon/0', + {'mon-ready': 'True'}) + self.assertEqual( + self.harness.charm.seen_events, + ['MonReadyEvent']) From 08ef16a4a1b97e8baee346c8bbef3f92f08d51f9 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 3 Mar 2021 20:13:04 +0000 Subject: [PATCH 2232/2699] Test bundles for focal-wallaby and hirsute-wallaby These are the test bundles (and any associated changes) for focal-wallaby and hirsute-wallaby support. hisute-wallaby test is disabled (moved to dev) due to [1] as bundle may reference a reactive charm. [1] https://github.com/juju-solutions/layer-basic/issues/194 Sync charm-helpers Also pin prometheus2 charm in bundles to focal series as it does not support groovy or hirsute. Change-Id: I75602720f8e30be647aa8a1227d565c692ff8170 Co-authored-by: Aurelien Lourot --- ceph-mon/tests/bundles/focal-wallaby.yaml | 235 +++++++++++++++++++ ceph-mon/tests/bundles/groovy-victoria.yaml | 2 + ceph-mon/tests/bundles/hirsute-wallaby.yaml | 237 ++++++++++++++++++++ ceph-mon/tests/tests.yaml | 11 +- 4 files changed, 484 insertions(+), 1 deletion(-) create mode 100644 ceph-mon/tests/bundles/focal-wallaby.yaml create mode 100644 ceph-mon/tests/bundles/hirsute-wallaby.yaml diff --git a/ceph-mon/tests/bundles/focal-wallaby.yaml b/ceph-mon/tests/bundles/focal-wallaby.yaml new file mode 100644 index 00000000..21ce4e6f --- /dev/null +++ b/ceph-mon/tests/bundles/focal-wallaby.yaml @@ -0,0 +1,235 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-wallaby + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: '10G' + options: + source: *openstack-origin + osd-devices: '/dev/test-non-existent' + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: ../../../ceph-mon + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + libvirt-image-backend: rbd + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: *openstack-origin + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + prometheus2: +# Pin prometheus2 charm version Bug #1891942 + charm: cs:prometheus2-18 + num_units: 1 + to: + - '16' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - nova-compute:ceph-access + - cinder-ceph:ceph-access + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'ceph-mon:prometheus' + - 'prometheus2:target' diff --git a/ceph-mon/tests/bundles/groovy-victoria.yaml b/ceph-mon/tests/bundles/groovy-victoria.yaml index 49b42437..5321a42c 100644 --- a/ceph-mon/tests/bundles/groovy-victoria.yaml +++ b/ceph-mon/tests/bundles/groovy-victoria.yaml @@ -26,6 +26,7 @@ machines: '14': '15': '16': + series: focal applications: @@ -145,6 +146,7 @@ applications: # Pin prometheus2 charm version Bug #1891942 charm: cs:prometheus2-18 num_units: 1 + series: focal to: - '16' diff --git a/ceph-mon/tests/bundles/hirsute-wallaby.yaml b/ceph-mon/tests/bundles/hirsute-wallaby.yaml new file mode 100644 index 00000000..17ee2e0a --- /dev/null +++ b/ceph-mon/tests/bundles/hirsute-wallaby.yaml @@ -0,0 +1,237 @@ +variables: + openstack-origin: &openstack-origin distro + +series: hirsute + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + series: focal + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: '10G' + options: + source: *openstack-origin + osd-devices: '/dev/test-non-existent' + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: ../../../ceph-mon + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + libvirt-image-backend: rbd + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: *openstack-origin + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + prometheus2: +# Pin prometheus2 charm version Bug #1891942 + charm: cs:prometheus2-18 + num_units: 1 + series: focal + to: + - '16' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - nova-compute:ceph-access + - cinder-ceph:ceph-access + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'ceph-mon:prometheus' + - 'prometheus2:target' diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index de2b01c0..9b41ba55 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,6 +1,8 @@ charm_name: ceph-mon + gate_bundles: - groovy-victoria + - focal-wallaby - focal-victoria - focal-ussuri-ec - focal-ussuri @@ -10,16 +12,21 @@ gate_bundles: - bionic-stein - bionic-queens - xenial-mitaka + dev_bundles: - trusty-mitaka - xenial-ocata - xenial-pike - xenial-queens - bionic-rocky + - hirsute-wallaby + smoke_bundles: - bionic-train + configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image + tests: - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest @@ -27,6 +34,8 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CephTest - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest + tests_options: force_deploy: - - groovy-victoria + - trusty-mitaka + - hirsute-wallaby From 5db2d018dfba0ae16f763f12132dd3f155ad28a4 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 3 Jun 2021 11:07:47 +0200 Subject: [PATCH 2233/2699] Add impish to metadata.yaml Change-Id: I1651a644ae99228d94de224aef110f6a1835d635 --- ceph-mon/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 3ec5a622..5c9e306d 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -15,6 +15,7 @@ series: - focal - groovy - hirsute +- impish peers: mon: interface: ceph From d4b374b9af02ef266d30e4d2a7612710051fc98d Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 5 May 2021 13:56:57 +0100 Subject: [PATCH 2234/2699] Enable support for beast frontend Introduce support for the beast web frontend for the Ceph RADOS Gateway which brings improvements to speed and scalability. Default behaviour is changed in that for Octopus and later (aside from some unsupported architectures) beast is enabled by default; for older releases civetweb is still used. This may be overridden using the 'http-frontend' configuration option which accepts either 'beast' or 'civetweb' as valid values. 'beast' is only supported with Ceph Mimic or later. Closes-Bug: 1865396 Change-Id: Ib73e58e21219eca611cd4293da69bf80040f5803 --- ceph-radosgw/config.yaml | 7 ++ ceph-radosgw/hooks/ceph_radosgw_context.py | 68 ++++++++++++++++++- ceph-radosgw/hooks/hooks.py | 7 +- ceph-radosgw/templates/ceph.conf | 2 +- .../unit_tests/test_ceph_radosgw_context.py | 68 ++++++++++++++++++- 5 files changed, 148 insertions(+), 4 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index a77c5208..422cc903 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -509,3 +509,10 @@ options: NOTE: X-Versions-Location is the only versioning-related header that radosgw interprets. X-History-Location, supported by native OpenStack Swift, is currently not supported by radosgw. + http-frontend: + type: string + default: + description: | + Frontend HTTP engine to use for the Ceph RADOS Gateway; For Octopus and + later this defaults to 'beast' and for older releases (and on architectures + where beast is not supported) 'civetweb'. diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 9f6063d1..197556f6 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -23,10 +23,14 @@ determine_api_port, determine_apache_port, ) -from charmhelpers.core.host import cmp_pkgrevno +from charmhelpers.core.host import ( + cmp_pkgrevno, + arch, +) from charmhelpers.core.hookenv import ( DEBUG, WARNING, + ERROR, config, log, related_units, @@ -44,6 +48,12 @@ import utils +BEAST_FRONTEND = 'beast' +CIVETWEB_FRONTEND = 'civetweb' +SUPPORTED_FRONTENDS = (BEAST_FRONTEND, CIVETWEB_FRONTEND) +UNSUPPORTED_BEAST_ARCHS = ('s390x', 'riscv64') + + class ApacheSSLContext(context.ApacheSSLContext): interfaces = ['https'] service_namespace = 'ceph-radosgw' @@ -142,6 +152,55 @@ def ensure_host_resolvable_v6(hostname): shutil.rmtree(dtmp) +def resolve_http_frontend(): + """Automatically determine the HTTP frontend configuration + + Determines the best HTTP frontend configuration based + on the Ceph release in use and the architecture of the + machine being used. + + :returns http frontend configuration to use. + :rtype: str + """ + octopus_or_later = cmp_pkgrevno('radosgw', '15.2.0') >= 0 + pacific_or_later = cmp_pkgrevno('radosgw', '16.2.0') >= 0 + if octopus_or_later: + # Pacific or later supports beast on all architectures + # but octopus does not support s390x or riscv64 + if not pacific_or_later and arch() in UNSUPPORTED_BEAST_ARCHS: + return CIVETWEB_FRONTEND + else: + return BEAST_FRONTEND + return CIVETWEB_FRONTEND + + +def validate_http_frontend(frontend_config): + """Validate HTTP frontend configuration + + :param frontend_config: user provided config value + :type: str + :raises: ValueError if the provided config is not valid + """ + mimic_or_later = cmp_pkgrevno('radosgw', '13.2.0') >= 0 + pacific_or_later = cmp_pkgrevno('radosgw', '16.2.0') >= 0 + if frontend_config not in SUPPORTED_FRONTENDS: + e = ('Please provide either civetweb or beast for ' + 'http-frontend configuration') + log(e, level=ERROR) + raise ValueError(e) + if frontend_config == BEAST_FRONTEND: + if not mimic_or_later: + e = ('Use of the beast HTTP frontend requires Ceph ' + 'mimic or later.') + log(e, level=ERROR) + raise ValueError(e) + if not pacific_or_later and arch() in UNSUPPORTED_BEAST_ARCHS: + e = ('Use of the beast HTTP frontend on {} requires Ceph ' + 'pacific or later.'.format(arch())) + log(e, level=ERROR) + raise ValueError(e) + + class MonContext(context.CephContext): interfaces = ['mon'] @@ -191,6 +250,12 @@ def __call__(self): if config('prefer-ipv6'): port = "[::]:%s" % (port) + http_frontend = config('http-frontend') + if not http_frontend: + http_frontend = resolve_http_frontend() + else: + validate_http_frontend(http_frontend) + mon_hosts.sort() ctxt = { 'auth_supported': auth, @@ -210,6 +275,7 @@ def __call__(self): 'unit_public_ip': unit_public_ip(), 'fsid': fsid, 'rgw_swift_versioning': config('rgw-swift-versioning-enabled'), + 'frontend': http_frontend, } # NOTE(dosaboy): these sections must correspond to what is supported in diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 040a71e2..abc30460 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -42,6 +42,7 @@ is_leader, leader_set, leader_get, + WORKLOAD_STATES, ) from charmhelpers.fetch import ( apt_update, @@ -747,4 +748,8 @@ def process_multisite_relations(): hooks.execute(sys.argv) except UnregisteredHookError as e: log('Unknown hook {} - skipping.'.format(e)) - assess_status(CONFIGS) + except ValueError as e: + # Handle any invalid configuration values + status_set(WORKLOAD_STATES.BLOCKED, str(e)) + else: + assess_status(CONFIGS) diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index c255d362..29b5e26b 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -39,7 +39,7 @@ rgw_zone = {{ rgw_zone }} {% endif %} rgw init timeout = 1200 -rgw frontends = civetweb port={{ port }} +rgw frontends = {{ frontend }} port={{ port }} {% if auth_type == 'keystone' %} rgw keystone url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/ rgw keystone admin user = {{ admin_user }} diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 480f208c..6985571e 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -17,6 +17,7 @@ import ceph_radosgw_context as context import charmhelpers import charmhelpers.contrib.storage.linux.ceph as ceph +import charmhelpers.fetch as fetch from test_utils import CharmTestCase @@ -27,6 +28,7 @@ 'relation_ids', 'related_units', 'cmp_pkgrevno', + 'arch', 'socket', 'unit_public_ip', 'determine_api_port', @@ -42,6 +44,7 @@ def setUp(self): self.relation_get.side_effect = self.test_relation.get self.config.side_effect = self.test_config.get self.cmp_pkgrevno.return_value = 1 + self.arch.return_value = 'amd64' @patch('charmhelpers.contrib.openstack.context.get_relation_ip') @patch('charmhelpers.contrib.openstack.context.mkdir') @@ -356,7 +359,8 @@ def setUp(self): super(MonContextTest, self).setUp(context, TO_PATCH) self.config.side_effect = self.test_config.get self.unit_public_ip.return_value = '10.255.255.255' - self.cmp_pkgrevno.return_value = 1 + self.cmp_pkgrevno.side_effect = lambda *args: 1 + self.arch.return_value = 'amd64' @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') @@ -395,6 +399,7 @@ def _relation_get(attr, unit, rid): 'rgw_zone': 'default', 'fsid': 'testfsid', 'rgw_swift_versioning': False, + 'frontend': 'beast', } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -444,6 +449,7 @@ def _relation_get(attr, unit, rid): 'rgw_zone': 'default', 'fsid': 'testfsid', 'rgw_swift_versioning': False, + 'frontend': 'beast', } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -502,6 +508,7 @@ def _relation_get(attr, unit, rid): 'rgw_zone': 'default', 'fsid': 'testfsid', 'rgw_swift_versioning': False, + 'frontend': 'beast', } self.assertEqual(expect, mon_ctxt()) @@ -542,9 +549,68 @@ def _relation_get(attr, unit, rid): 'rgw_zone': 'default', 'fsid': 'testfsid', 'rgw_swift_versioning': False, + 'frontend': 'beast', } self.assertEqual(expect, mon_ctxt()) + def test_resolve_http_frontend(self): + _test_version = '12.2.0' + + def _compare_version(package, version): + return fetch.apt_pkg.version_compare( + _test_version, version + ) + + # Older releases, default and invalid configuration + self.cmp_pkgrevno.side_effect = _compare_version + self.assertEqual('civetweb', context.resolve_http_frontend()) + + # Default for Octopus but not Pacific + _test_version = '15.2.0' + self.assertEqual('beast', context.resolve_http_frontend()) + + self.arch.return_value = 's390x' + self.assertEqual('civetweb', context.resolve_http_frontend()) + + # Default for Pacific and later + _test_version = '16.2.0' + self.assertEqual('beast', context.resolve_http_frontend()) + self.arch.return_value = 'amd64' + self.assertEqual('beast', context.resolve_http_frontend()) + + def test_validate_http_frontend(self): + _test_version = '12.2.0' + + def _compare_version(package, version): + return fetch.apt_pkg.version_compare( + _test_version, version + ) + + self.cmp_pkgrevno.side_effect = _compare_version + + # Invalid configuration option + with self.assertRaises(ValueError): + context.validate_http_frontend('foobar') + + # beast config but ceph pre mimic + with self.assertRaises(ValueError): + context.validate_http_frontend('beast') + + # Mimic with valid configuration + _test_version = '13.2.0' + context.validate_http_frontend('beast') + context.validate_http_frontend('civetweb') + + # beast config on unsupported s390x/octopus + _test_version = '15.2.0' + self.arch.return_value = 's390x' + with self.assertRaises(ValueError): + context.validate_http_frontend('beast') + + # beast config on s390x/pacific + _test_version = '16.2.0' + context.validate_http_frontend('beast') + class ApacheContextTest(CharmTestCase): From db7ed1421beb4725db31a70f8d159a9f0d7a86d9 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 22 Jul 2021 14:51:58 +0200 Subject: [PATCH 2235/2699] Sync release-tools In order to fix ResolutionImpossible on tox invocation. https://github.com/openstack-charmers/release-tools/issues/151 https://github.com/openstack-charmers/release-tools/pull/152 Change-Id: I1eb0e1b428c1b850326202af03747826f2029336 --- ceph-fs/requirements.txt | 4 ---- ceph-fs/src/test-requirements.txt | 6 ------ ceph-fs/test-requirements.txt | 12 ++++++++---- ceph-fs/tox.ini | 3 ++- 4 files changed, 10 insertions(+), 15 deletions(-) diff --git a/ceph-fs/requirements.txt b/ceph-fs/requirements.txt index 46b4e990..b786b428 100644 --- a/ceph-fs/requirements.txt +++ b/ceph-fs/requirements.txt @@ -11,10 +11,6 @@ setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb # Build requirements charm-tools==2.8.3 -# Workaround until https://github.com/juju/charm-tools/pull/589 gets -# published -keyring<21 - simplejson # Newer versions use keywords that didn't exist in python 3.5 yet (e.g. diff --git a/ceph-fs/src/test-requirements.txt b/ceph-fs/src/test-requirements.txt index 520681e1..e7710236 100644 --- a/ceph-fs/src/test-requirements.txt +++ b/ceph-fs/src/test-requirements.txt @@ -3,12 +3,6 @@ # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools # -# pep8 requirements -charm-tools>=2.4.4 - -# Workaround until https://github.com/juju/charm-tools/pull/589 gets -# published -keyring<21 # Functional Test Requirements (let Zaza's dependencies solve all dependencies here!) git+https://github.com/openstack-charmers/zaza.git#egg=zaza diff --git a/ceph-fs/test-requirements.txt b/ceph-fs/test-requirements.txt index 3f085244..af069e1b 100644 --- a/ceph-fs/test-requirements.txt +++ b/ceph-fs/test-requirements.txt @@ -4,8 +4,6 @@ # https://github.com/openstack-charmers/release-tools # setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 -# Lint and unit test requirements -flake8>=2.2.4 stestr>=2.2.0 @@ -38,10 +36,16 @@ coverage>=3.6 git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack # # Revisit for removal / mock improvement: +# +# NOTE(lourot): newer versions of cryptography require a Rust compiler to build, +# see +# * https://github.com/openstack-charmers/zaza/issues/421 +# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html +# netifaces # vault psycopg2-binary # vault tenacity # vault -pbr # vault -cryptography # vault, keystone-saml-mellon +pbr==5.6.0 # vault +cryptography<3.4 # vault, keystone-saml-mellon lxml # keystone-saml-mellon hvac # vault, barbican-vault diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 391b2af8..5c818017 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -62,7 +62,8 @@ commands = stestr run --slowest {posargs} [testenv:pep8] basepython = python3 -deps = -r{toxinidir}/test-requirements.txt +deps = flake8==3.9.2 + charm-tools==2.8.3 commands = flake8 {posargs} src unit_tests [testenv:cover] From 14deea8ada0c041cd337834f8dd9cc24374af761 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 22 Jul 2021 14:52:07 +0200 Subject: [PATCH 2236/2699] Sync release-tools In order to fix ResolutionImpossible on tox invocation. https://github.com/openstack-charmers/release-tools/issues/151 https://github.com/openstack-charmers/release-tools/pull/152 Change-Id: I012eea8b5f7211fd4302e22bccd518f3b80481c1 --- ceph-mon/requirements.txt | 2 +- ceph-mon/test-requirements.txt | 8 +------- ceph-mon/tox.ini | 4 ++-- 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/ceph-mon/requirements.txt b/ceph-mon/requirements.txt index 360ecbaa..ead6e89a 100644 --- a/ceph-mon/requirements.txt +++ b/ceph-mon/requirements.txt @@ -7,7 +7,7 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # -pbr>=1.8.0,<1.9.0 +pbr==5.6.0 simplejson>=2.2.0 netifaces>=0.10.4 diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 9aea716b..dba2c767 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -8,11 +8,6 @@ # all of its own requirements and if it doesn't, fix it there. # setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 -charm-tools>=2.4.4 - -# Workaround until https://github.com/juju/charm-tools/pull/589 gets -# published -keyring<21 requests>=2.18.4 @@ -21,7 +16,6 @@ requests>=2.18.4 mock>=1.2,<4.0.0; python_version < '3.6' mock>=1.2; python_version >= '3.6' -flake8>=2.2.4 stestr>=2.2.0 # Dependency of stestr. Workaround for @@ -42,7 +36,7 @@ oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) -git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index ab9593f3..9ba3f9fe 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -65,8 +65,8 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +deps = flake8==3.9.2 + charm-tools==2.8.3 commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof From 8aa138fc6cd43997a4978319aaa8457c3f8884f5 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 22 Jul 2021 14:52:17 +0200 Subject: [PATCH 2237/2699] Sync release-tools In order to fix ResolutionImpossible on tox invocation. https://github.com/openstack-charmers/release-tools/issues/151 https://github.com/openstack-charmers/release-tools/pull/152 Change-Id: Ic2f5c854e7d6f2200941e828c1665bce0970f02b --- ceph-osd/requirements.txt | 2 +- ceph-osd/test-requirements.txt | 8 +------- ceph-osd/tox.ini | 4 ++-- 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/ceph-osd/requirements.txt b/ceph-osd/requirements.txt index 360ecbaa..ead6e89a 100644 --- a/ceph-osd/requirements.txt +++ b/ceph-osd/requirements.txt @@ -7,7 +7,7 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # -pbr>=1.8.0,<1.9.0 +pbr==5.6.0 simplejson>=2.2.0 netifaces>=0.10.4 diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 9aea716b..dba2c767 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -8,11 +8,6 @@ # all of its own requirements and if it doesn't, fix it there. # setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 -charm-tools>=2.4.4 - -# Workaround until https://github.com/juju/charm-tools/pull/589 gets -# published -keyring<21 requests>=2.18.4 @@ -21,7 +16,6 @@ requests>=2.18.4 mock>=1.2,<4.0.0; python_version < '3.6' mock>=1.2; python_version >= '3.6' -flake8>=2.2.4 stestr>=2.2.0 # Dependency of stestr. Workaround for @@ -42,7 +36,7 @@ oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) -git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index ab9593f3..9ba3f9fe 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -65,8 +65,8 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +deps = flake8==3.9.2 + charm-tools==2.8.3 commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof From 1d48cb5db588f7eb1357554720b59c025b2946c8 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 22 Jul 2021 14:52:27 +0200 Subject: [PATCH 2238/2699] Sync release-tools In order to fix ResolutionImpossible on tox invocation. https://github.com/openstack-charmers/release-tools/issues/151 https://github.com/openstack-charmers/release-tools/pull/152 Change-Id: Ie7f197c3d3ce821a2272844029d071dc5a715de4 --- ceph-proxy/requirements.txt | 2 +- ceph-proxy/test-requirements.txt | 8 +------- ceph-proxy/tox.ini | 4 ++-- 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/ceph-proxy/requirements.txt b/ceph-proxy/requirements.txt index 360ecbaa..ead6e89a 100644 --- a/ceph-proxy/requirements.txt +++ b/ceph-proxy/requirements.txt @@ -7,7 +7,7 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # -pbr>=1.8.0,<1.9.0 +pbr==5.6.0 simplejson>=2.2.0 netifaces>=0.10.4 diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 9aea716b..dba2c767 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -8,11 +8,6 @@ # all of its own requirements and if it doesn't, fix it there. # setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 -charm-tools>=2.4.4 - -# Workaround until https://github.com/juju/charm-tools/pull/589 gets -# published -keyring<21 requests>=2.18.4 @@ -21,7 +16,6 @@ requests>=2.18.4 mock>=1.2,<4.0.0; python_version < '3.6' mock>=1.2; python_version >= '3.6' -flake8>=2.2.4 stestr>=2.2.0 # Dependency of stestr. Workaround for @@ -42,7 +36,7 @@ oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) -git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index ab9593f3..9ba3f9fe 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -65,8 +65,8 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +deps = flake8==3.9.2 + charm-tools==2.8.3 commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof From 527b0393cc098222715849aea623514c23ea2f2a Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 22 Jul 2021 14:52:36 +0200 Subject: [PATCH 2239/2699] Sync release-tools In order to fix ResolutionImpossible on tox invocation. https://github.com/openstack-charmers/release-tools/issues/151 https://github.com/openstack-charmers/release-tools/pull/152 Change-Id: I1db2d63e04b6d5de2338057d76ecf651fe9cf668 --- ceph-radosgw/requirements.txt | 2 +- ceph-radosgw/test-requirements.txt | 8 +------- ceph-radosgw/tox.ini | 4 ++-- 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/ceph-radosgw/requirements.txt b/ceph-radosgw/requirements.txt index 360ecbaa..ead6e89a 100644 --- a/ceph-radosgw/requirements.txt +++ b/ceph-radosgw/requirements.txt @@ -7,7 +7,7 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # -pbr>=1.8.0,<1.9.0 +pbr==5.6.0 simplejson>=2.2.0 netifaces>=0.10.4 diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 9aea716b..dba2c767 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -8,11 +8,6 @@ # all of its own requirements and if it doesn't, fix it there. # setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 -charm-tools>=2.4.4 - -# Workaround until https://github.com/juju/charm-tools/pull/589 gets -# published -keyring<21 requests>=2.18.4 @@ -21,7 +16,6 @@ requests>=2.18.4 mock>=1.2,<4.0.0; python_version < '3.6' mock>=1.2; python_version >= '3.6' -flake8>=2.2.4 stestr>=2.2.0 # Dependency of stestr. Workaround for @@ -42,7 +36,7 @@ oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) -git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index ab9593f3..9ba3f9fe 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -65,8 +65,8 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +deps = flake8==3.9.2 + charm-tools==2.8.3 commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof From a33952ace9c24ee3f1305dc6e4939d4622813fbc Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Thu, 22 Jul 2021 14:52:46 +0200 Subject: [PATCH 2240/2699] Sync release-tools In order to fix ResolutionImpossible on tox invocation. https://github.com/openstack-charmers/release-tools/issues/151 https://github.com/openstack-charmers/release-tools/pull/152 Change-Id: I3e5148aabf2dbeb71b94de9c63d0d8ada29293fc --- ceph-rbd-mirror/requirements.txt | 4 ---- ceph-rbd-mirror/src/test-requirements.txt | 6 ------ ceph-rbd-mirror/test-requirements.txt | 12 ++++++++---- ceph-rbd-mirror/tox.ini | 3 ++- 4 files changed, 10 insertions(+), 15 deletions(-) diff --git a/ceph-rbd-mirror/requirements.txt b/ceph-rbd-mirror/requirements.txt index 46b4e990..b786b428 100644 --- a/ceph-rbd-mirror/requirements.txt +++ b/ceph-rbd-mirror/requirements.txt @@ -11,10 +11,6 @@ setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb # Build requirements charm-tools==2.8.3 -# Workaround until https://github.com/juju/charm-tools/pull/589 gets -# published -keyring<21 - simplejson # Newer versions use keywords that didn't exist in python 3.5 yet (e.g. diff --git a/ceph-rbd-mirror/src/test-requirements.txt b/ceph-rbd-mirror/src/test-requirements.txt index 520681e1..e7710236 100644 --- a/ceph-rbd-mirror/src/test-requirements.txt +++ b/ceph-rbd-mirror/src/test-requirements.txt @@ -3,12 +3,6 @@ # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools # -# pep8 requirements -charm-tools>=2.4.4 - -# Workaround until https://github.com/juju/charm-tools/pull/589 gets -# published -keyring<21 # Functional Test Requirements (let Zaza's dependencies solve all dependencies here!) git+https://github.com/openstack-charmers/zaza.git#egg=zaza diff --git a/ceph-rbd-mirror/test-requirements.txt b/ceph-rbd-mirror/test-requirements.txt index 3f085244..af069e1b 100644 --- a/ceph-rbd-mirror/test-requirements.txt +++ b/ceph-rbd-mirror/test-requirements.txt @@ -4,8 +4,6 @@ # https://github.com/openstack-charmers/release-tools # setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 -# Lint and unit test requirements -flake8>=2.2.4 stestr>=2.2.0 @@ -38,10 +36,16 @@ coverage>=3.6 git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack # # Revisit for removal / mock improvement: +# +# NOTE(lourot): newer versions of cryptography require a Rust compiler to build, +# see +# * https://github.com/openstack-charmers/zaza/issues/421 +# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html +# netifaces # vault psycopg2-binary # vault tenacity # vault -pbr # vault -cryptography # vault, keystone-saml-mellon +pbr==5.6.0 # vault +cryptography<3.4 # vault, keystone-saml-mellon lxml # keystone-saml-mellon hvac # vault, barbican-vault diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index 391b2af8..5c818017 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -62,7 +62,8 @@ commands = stestr run --slowest {posargs} [testenv:pep8] basepython = python3 -deps = -r{toxinidir}/test-requirements.txt +deps = flake8==3.9.2 + charm-tools==2.8.3 commands = flake8 {posargs} src unit_tests [testenv:cover] From 48cfa166722f8d3629cf4673a47608617e2c28c9 Mon Sep 17 00:00:00 2001 From: David Ames Date: Thu, 22 Jul 2021 14:06:29 -0700 Subject: [PATCH 2241/2699] Update catalog entry on addition of certificates Guarantee that the object-store URL is updated when the certificates relation is completed. Sync release-tools tox and requirements Change-Id: I4ca967f2c5c5eedfc56969785fcf23e4063d2a78 --- ceph-radosgw/hooks/hooks.py | 2 ++ ceph-radosgw/requirements.txt | 2 +- ceph-radosgw/test-requirements.txt | 8 +------- ceph-radosgw/tox.ini | 4 ++-- 4 files changed, 6 insertions(+), 10 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index abc30460..988f25d1 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -573,6 +573,8 @@ def _certs_changed(): process_certificates('ceph-radosgw', relation_id, unit) configure_https() _certs_changed() + for r_id in relation_ids('identity-service'): + identity_joined(relid=r_id) @hooks.hook('master-relation-joined') diff --git a/ceph-radosgw/requirements.txt b/ceph-radosgw/requirements.txt index 360ecbaa..ead6e89a 100644 --- a/ceph-radosgw/requirements.txt +++ b/ceph-radosgw/requirements.txt @@ -7,7 +7,7 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # -pbr>=1.8.0,<1.9.0 +pbr==5.6.0 simplejson>=2.2.0 netifaces>=0.10.4 diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 9aea716b..dba2c767 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -8,11 +8,6 @@ # all of its own requirements and if it doesn't, fix it there. # setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 -charm-tools>=2.4.4 - -# Workaround until https://github.com/juju/charm-tools/pull/589 gets -# published -keyring<21 requests>=2.18.4 @@ -21,7 +16,6 @@ requests>=2.18.4 mock>=1.2,<4.0.0; python_version < '3.6' mock>=1.2; python_version >= '3.6' -flake8>=2.2.4 stestr>=2.2.0 # Dependency of stestr. Workaround for @@ -42,7 +36,7 @@ oslo.utils<=3.41.0;python_version<'3.6' coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) -git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index ab9593f3..9ba3f9fe 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -65,8 +65,8 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +deps = flake8==3.9.2 + charm-tools==2.8.3 commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof From 794a0eac31f4b4e3125d3d98e4bc0753ec284cec Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Tue, 20 Jul 2021 06:16:34 -0700 Subject: [PATCH 2242/2699] Restart radosgw services on upgrade When radosgw packages are upgraded, the radosgw service needs to be restarted by the charm. Check to see that packages were installed on the upgrade path and if so, restart the radosgw service. Change-Id: I61055ea4605a9a7c490c18f611d0eb583c617ce3 Closes-Bug: #1906707 --- ceph-radosgw/hooks/hooks.py | 19 ++++++++++++++++++- ceph-radosgw/unit_tests/test_hooks.py | 21 +++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index abc30460..92a66d0e 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -141,6 +141,15 @@ def upgrade_available(): def install_packages(): + """Installs necessary packages for the ceph-radosgw service. + + Calling this method when the source config value has changed + will cause an upgrade of ceph packages to be performed. + + :returns: whether packages were installed or not + :rtype: boolean + """ + pkgs_installed = False c = config() if c.changed('source') or c.changed('key'): add_source(c.get('source'), c.get('key')) @@ -164,11 +173,14 @@ def install_packages(): apt_install(['apache2'], fatal=True) disable_unused_apache_sites() apt_install(pkgs, fatal=True) + pkgs_installed = True pkgs = filter_missing_packages(APACHE_PACKAGES) if pkgs: apt_purge(pkgs) + return pkgs_installed + @hooks.hook('install.real') @harden() @@ -212,7 +224,12 @@ def _config_changed(): log("Unit is pause or upgrading. Skipping config_changed", "WARN") return - install_packages() + # NOTE(wolsen) if an upgrade has been applied, then the radosgw + # service needs to be restarted as the package doesn't do it by + # itself. See LP#1906707 + if install_packages(): + log("Packages have been installed/upgraded... restarting", "INFO") + service_restart(service_name()) if config('prefer-ipv6'): status_set('maintenance', 'configuring ipv6') diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 796070dc..77de0467 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -190,6 +190,27 @@ def test_config_changed(self, update_nrpe_config, mock_certs_joined): update_nrpe_config.assert_called_with() mock_certs_joined.assert_called_once_with('certificates:1') + @patch.object(ceph_hooks, 'service_name') + @patch.object(ceph_hooks, 'service_restart') + @patch.object(ceph_hooks, 'certs_joined') + @patch.object(ceph_hooks, 'update_nrpe_config') + def test_config_changed_upgrade(self, update_nrpe_config, + mock_certs_joined, mock_service_restart, + mock_service_name): + _install_packages = self.patch('install_packages') + _install_packages.return_value = True + mock_service_name.return_value = 'radosgw@localhost' + _relations = { + 'certificates': ['certificates:1'] + } + self.relation_ids.side_effect = lambda name: _relations.get(name, []) + ceph_hooks.config_changed() + self.assertTrue(_install_packages.called) + self.CONFIGS.write_all.assert_called_with() + update_nrpe_config.assert_called_with() + mock_certs_joined.assert_called_once_with('certificates:1') + mock_service_restart.assert_called_once_with('radosgw@localhost') + @patch.object(ceph_hooks, 'is_request_complete', lambda *args, **kwargs: True) @patch.object(ceph_hooks, 'is_leader') From 52b35bb64fbbe8abd3b315d8db2d6dbf6119273b Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 5 Aug 2021 12:00:09 -0500 Subject: [PATCH 2243/2699] The MonContext can be complete when not all mons have provided an fsid When the MonContext becomes incomplete during regular operation from, for example, the replacement of an existing mon unit due to failure, Ceph Radosgw shoud be able to continue while the new mon bootstraps itself into the cluster. By ensuring that the context can complete with one of the mons not reporting an FSID, the remaining members of the monitor cluster can support the continuing functioning of RadosGW. Closes-Bug: #1938919 Change-Id: I293224f46d06cc427b2d3c8f4ae65366ed06909e --- ceph-radosgw/hooks/ceph_radosgw_context.py | 3 +- .../unit_tests/test_ceph_radosgw_context.py | 41 +++++++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 197556f6..38975a48 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -217,7 +217,8 @@ def __call__(self): for rid in relation_ids(self.interfaces[0]): for unit in related_units(rid): - fsid = relation_get('fsid', rid=rid, unit=unit) + if fsid is None: + fsid = relation_get('fsid', rid=rid, unit=unit) _auth = relation_get('auth', rid=rid, unit=unit) if _auth: auths.append(_auth) diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 6985571e..aff7e712 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -611,6 +611,47 @@ def _compare_version(package, version): _test_version = '16.2.0' context.validate_http_frontend('beast') + @patch.object(ceph, 'config', lambda *args: + '{"client.radosgw.gateway": {"rgw init timeout": 60}}') + def test_ctxt_inconsistent_fsids(self): + self.socket.gethostname.return_value = 'testhost' + mon_ctxt = context.MonContext() + addresses = ['10.5.4.1', '10.5.4.2', '10.5.4.3'] + fsids = ['testfsid', 'testfsid', None] + + def _relation_get(attr, unit, rid): + if attr == 'ceph-public-address': + return addresses.pop() + elif attr == 'auth': + return 'cephx' + elif attr == 'rgw.testhost_key': + return 'testkey' + elif attr == 'fsid': + return fsids.pop() + + self.relation_get.side_effect = _relation_get + self.relation_ids.return_value = ['mon:6'] + self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] + self.determine_api_port.return_value = 70 + expect = { + 'auth_supported': 'cephx', + 'hostname': 'testhost', + 'mon_hosts': '10.5.4.1 10.5.4.2 10.5.4.3', + 'old_auth': False, + 'systemd_rgw': True, + 'unit_public_ip': '10.255.255.255', + 'use_syslog': 'false', + 'loglevel': 1, + 'port': 70, + 'client_radosgw_gateway': {'rgw init timeout': 60}, + 'ipv6': False, + 'rgw_zone': 'default', + 'fsid': 'testfsid', + 'rgw_swift_versioning': False, + 'frontend': 'beast', + } + self.assertEqual(expect, mon_ctxt()) + class ApacheContextTest(CharmTestCase): From 36612fd09ef1d8444a1d0107b486b46411c71b21 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 5 Aug 2021 13:53:49 -0500 Subject: [PATCH 2244/2699] Only check for expected-osd-count until it has been met Checking for enough OSDs to be presented by OSD units is necessary during deploy time to ensure that clients can correctly connect and perform their operations; however, checking is useless post-deploy and can be harmful during replacement operations. This change introduces a restriction that this check should only be done on the leader, as well as ensuring that it short-circuits the check after the check passes. Closes-Bug: #1938970 Change-Id: Ie285bbc34692964acb35315f866fe617b0ef1305 --- ceph-mon/hooks/ceph_hooks.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index dd027627..9e4cfad7 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -925,9 +925,15 @@ def ready_for_service(): if not ceph.is_quorum(): log('mon cluster is not in quorum', level=DEBUG) return False - if not sufficient_osds(config('expected-osd-count') or 3): - log('insufficient osds bootstrapped', level=DEBUG) - return False + if is_leader(): + if leader_get('bootstrapped-osds') is None and \ + not sufficient_osds(config('expected-osd-count') or 3): + log('insufficient osds bootstrapped', level=DEBUG) + return False + leader_set({'bootstrapped-osds': True}) + else: + if leader_get('bootstrapped-osds') is None: + return False return True From e52fb7ee706c9b10d4b5100016311b7c8ebcb3a4 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Mon, 2 Aug 2021 14:29:42 -0300 Subject: [PATCH 2245/2699] Implement the 'list-inconsistent-objs' action Previously, if we wanted to list the inconsistent objects per PG, we needed to call 'ceph health detail' to get the list of inconsistent PGs and then 'rados list-inconsistent-obj $PG' for each PG. This patch aims to implement a single action that does all that and formats it as a pretty JSON. Closes-Bug: #1931751 Change-Id: I05bf90ff274e4f1b7ee9e278d62894b68ba2e787 --- ceph-mon/README.md | 1 + ceph-mon/actions.yaml | 12 +++ ceph-mon/actions/list-inconsistent-objs | 1 + ceph-mon/actions/list_inconsistent_objs.py | 91 +++++++++++++++++++ .../test_action_list_inconsistent.py | 89 ++++++++++++++++++ 5 files changed, 194 insertions(+) create mode 120000 ceph-mon/actions/list-inconsistent-objs create mode 100755 ceph-mon/actions/list_inconsistent_objs.py create mode 100644 ceph-mon/unit_tests/test_action_list_inconsistent.py diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 27af091d..0a12d23d 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -159,6 +159,7 @@ deployed then see file `actions.yaml`. * `get-erasure-profile` * `get-health` * `list-erasure-profiles` +* `list-inconsistent-objs` * `list-pools` * `pause-health` * `pool-get` diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 6dc940a1..7069c079 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -208,6 +208,18 @@ delete-erasure-profile: list-erasure-profiles: description: "List the names of all erasure code profiles" additionalProperties: false +list-inconsistent-objs: + description: "List the names of the inconsistent objecs per PG" + params: + format: + type: string + enum: + - json + - yaml + - text + default: text + description: "The output format, either json, yaml or text (default)" + additionalProperties: false list-pools: description: "List your cluster's pools" additionalProperties: false diff --git a/ceph-mon/actions/list-inconsistent-objs b/ceph-mon/actions/list-inconsistent-objs new file mode 120000 index 00000000..e6aa6390 --- /dev/null +++ b/ceph-mon/actions/list-inconsistent-objs @@ -0,0 +1 @@ +list_inconsistent_objs.py \ No newline at end of file diff --git a/ceph-mon/actions/list_inconsistent_objs.py b/ceph-mon/actions/list_inconsistent_objs.py new file mode 100755 index 00000000..6d8de5d0 --- /dev/null +++ b/ceph-mon/actions/list_inconsistent_objs.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import re +import sys +from subprocess import check_output, CalledProcessError +import yaml + +sys.path.append('hooks') + +from charmhelpers.core.hookenv import function_fail, function_get, \ + function_set, log + + +VALID_FORMATS = ('text', 'json', 'yaml') + + +def get_health_detail(): + return check_output(['ceph', 'health', 'detail']).decode('UTF-8') + + +def get_rados_inconsistent(pg): + return check_output(['rados', 'list-inconsistent-obj', pg]).decode('UTF-8') + + +def get_inconsistent_objs(): + # For the call to 'ceph health detail' we are interested in + # lines with the form: + # pg $PG is ...inconsistent... + rx = re.compile('pg (\\S+) .+inconsistent') + out = get_health_detail() + msg = {} # Maps PG -> object name list. + + for line in out.split('\n'): + res = rx.search(line) + if res is None: + continue + + pg = res.groups()[0] + out = get_rados_inconsistent(pg) + js = json.loads(out) + inconsistents = js.get('inconsistents') + + if not inconsistents: + continue + + msg.setdefault(pg, []).extend(x['object']['name'] + for x in inconsistents) + + return msg + + +def text_format(obj): + ret = '' + for pg, objs in obj.items(): + ret += '{}: {}'.format(pg, ','.join(objs)) + return ret + + +if __name__ == '__main__': + try: + fmt = function_get('format') + if fmt and fmt not in VALID_FORMATS: + function_fail('Unknown format specified: {}'.format(fmt)) + else: + msg = get_inconsistent_objs() + if fmt == 'yaml': + msg = yaml.dump(msg) + elif fmt == 'json': + msg = json.dumps(msg, indent=4, sort_keys=True) + else: + msg = text_format(msg) + function_set({'message': msg}) + except CalledProcessError as e: + log(e) + function_fail("Listing inconsistent objects failed with error {}" + .format(str(e))) diff --git a/ceph-mon/unit_tests/test_action_list_inconsistent.py b/ceph-mon/unit_tests/test_action_list_inconsistent.py new file mode 100644 index 00000000..6f006ce6 --- /dev/null +++ b/ceph-mon/unit_tests/test_action_list_inconsistent.py @@ -0,0 +1,89 @@ +# Copyright 2021 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the list_inconsistent_objs action.""" + +from actions import list_inconsistent_objs as action +from mock import mock +from test_utils import CharmTestCase + + +class ListInconsistentTestCase(CharmTestCase): + """Run tests for the action.""" + + def setUp(self): + """Init mocks for test cases.""" + super(ListInconsistentTestCase, self).setUp( + action, ["get_health_detail", "get_rados_inconsistent"] + ) + + @mock.patch("actions.list_inconsistent_objs.get_rados_inconsistent") + @mock.patch("actions.list_inconsistent_objs.get_health_detail") + def test_inconsistent_empty( + self, _get_health_detail, _get_rados_inconsistent + ): + """Test that the returned object is empty.""" + _get_health_detail.return_value = "nothing to see here" + _get_rados_inconsistent.return_value = """ + {"epoch": 0, "inconsistents": {1: 1}} + """ + ret = action.get_inconsistent_objs() + _get_health_detail.assert_called_once() + _get_rados_inconsistent.assert_not_called() + self.assertEqual(len(ret), 0) + self.assertEqual('', action.text_format(ret)) + + @mock.patch("actions.list_inconsistent_objs.get_rados_inconsistent") + @mock.patch("actions.list_inconsistent_objs.get_health_detail") + def test_inconsistent_entry( + self, _get_health_detail, _get_rados_inconsistent + ): + """Test that expected PG is in the returned value.""" + pg_id = '3.9' + _get_health_detail.return_value = """ + pg 2.1 is active + pg {} is active+inconsistent+clean + """.format(pg_id) + + _get_rados_inconsistent.return_value = """{ + "epoch": 95, + "inconsistents": [ { "errors": [ "size_mismatch" ], + "object": { "locator": "", "name": "testfile", + "nspace": "", "snap": "head" }, + "shards": [ { "data_digest": "0xa3ba020a", + "errors": [ "size_mismatch" ], + "omap_digest": "0xffffffff", + "osd": 0, "size": 21 }, + { "data_digest": "0xa3ba020a", + "errors": [ "size_mismatch" ], + "omap_digest": "0xffffffff", + "osd": 1, "size": 22 }, + { "data_digest": "0xa3ba020a", + "errors": [], + "omap_digest": "0xffffffff", + "osd": 2, "size": 23 } + ]}] + }""" + + ret = action.get_inconsistent_objs() + _get_health_detail.assert_called_once() + _get_rados_inconsistent.assert_called() + self.assertNotEqual(len(ret), 0) + self.assertIn(pg_id, ret) + + js = action.json.loads(_get_rados_inconsistent.return_value) + obj_name = js["inconsistents"][0]["object"]["name"] + + self.assertIn(obj_name, ret[pg_id]) + self.assertEqual(action.text_format(ret), + '{}: {}'.format(pg_id, obj_name)) From fd3f8caaac641c4a96861ca3cdcb0952e001e332 Mon Sep 17 00:00:00 2001 From: likui Date: Mon, 9 Aug 2021 20:19:22 +0800 Subject: [PATCH 2246/2699] Changed minversion in tox to 3.18.0 The patch bumps min version of tox to 3.18.0 in order to replace tox's whitelist_externals by allowlist_externals option: https://github.com/tox-dev/tox/blob/master/docs/changelog.rst#v3180-2020-07-23 Change-Id: I708abb9d0ff0d42379f8c9003f10e22afac2b33f --- ceph-mon/tox.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 9ba3f9fe..c6b36975 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -25,7 +25,7 @@ skip_missing_interpreters = False requires = pip < 20.3 virtualenv < 20.0 # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci -minversion = 3.2.0 +minversion = 3.18.0 [testenv] setenv = VIRTUAL_ENV={envdir} @@ -34,7 +34,7 @@ setenv = VIRTUAL_ENV={envdir} install_command = pip install {opts} {packages} commands = stestr run --slowest {posargs} -whitelist_externals = juju +allowlist_externals = juju passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt From 204b4c89cdb7962a906f8a5a7a6e528b18db6adb Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 9 Aug 2021 15:58:00 +0000 Subject: [PATCH 2247/2699] osci, zuul, gitreview and Pin charmcraft to 0.10.2 Add osci config, zuul config, git review and pin charmcraft. Charmcraft pin is to maintain support for building out of a venv. Change-Id: I0d11ce7dc7e7640c963d3807fcd91b64541c167d --- ceph-dashboard/.gitreview | 4 ++++ ceph-dashboard/.zuul.yaml | 4 ++++ ceph-dashboard/build-requirements.txt | 1 + ceph-dashboard/osci.yaml | 10 ++++++++++ ceph-dashboard/tests/tests.yaml | 3 +++ ceph-dashboard/tox.ini | 2 +- 6 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 ceph-dashboard/.gitreview create mode 100644 ceph-dashboard/.zuul.yaml create mode 100644 ceph-dashboard/build-requirements.txt create mode 100644 ceph-dashboard/osci.yaml diff --git a/ceph-dashboard/.gitreview b/ceph-dashboard/.gitreview new file mode 100644 index 00000000..1d6df895 --- /dev/null +++ b/ceph-dashboard/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.opendev.org +port=29418 +project=openstack/charm-ceph-dashboard diff --git a/ceph-dashboard/.zuul.yaml b/ceph-dashboard/.zuul.yaml new file mode 100644 index 00000000..fd20909e --- /dev/null +++ b/ceph-dashboard/.zuul.yaml @@ -0,0 +1,4 @@ +- project: + templates: + - openstack-python3-charm-jobs + - openstack-cover-jobs diff --git a/ceph-dashboard/build-requirements.txt b/ceph-dashboard/build-requirements.txt new file mode 100644 index 00000000..271d8955 --- /dev/null +++ b/ceph-dashboard/build-requirements.txt @@ -0,0 +1 @@ +git+https://github.com/canonical/charmcraft.git@0.10.2#egg=charmcraft diff --git a/ceph-dashboard/osci.yaml b/ceph-dashboard/osci.yaml new file mode 100644 index 00000000..854e706a --- /dev/null +++ b/ceph-dashboard/osci.yaml @@ -0,0 +1,10 @@ +- project: + templates: + - charm-unit-jobs + check: + jobs: + - focal + vars: + needs_charm_build: true + charm_build_name: ceph-dashboard + build_type: charmcraft diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml index 63e365f4..b6df5e85 100644 --- a/ceph-dashboard/tests/tests.yaml +++ b/ceph-dashboard/tests/tests.yaml @@ -11,3 +11,6 @@ target_deploy_status: vault: workload-status: blocked workload-status-message: Vault needs to be initialized + ceph-dashboard: + workload-status: blocked + workload-status-message-prefix: Dashboard not responding diff --git a/ceph-dashboard/tox.ini b/ceph-dashboard/tox.ini index 3220044c..31301b80 100644 --- a/ceph-dashboard/tox.ini +++ b/ceph-dashboard/tox.ini @@ -100,7 +100,7 @@ commands = {posargs} [testenv:build] basepython = python3 -deps = +deps = -r{toxinidir}/build-requirements.txt commands = charmcraft build From 7e45ee64f259cffea48324c3a884a8139f9207c5 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 15 Jun 2021 08:52:57 +0000 Subject: [PATCH 2248/2699] Add support dashboard relation Add support for the dashbaord relation. The relation enables the mons to signal to the dashboard that the cluster is ready. Change-Id: I279142d386a8bf369c0b9dff3b7be9d65f314bf5 --- ceph-mon/hooks/ceph_hooks.py | 18 ++++++++++++++++++ ceph-mon/hooks/dashboard-relation-joined | 1 + ceph-mon/metadata.yaml | 2 ++ ceph-mon/unit_tests/test_ceph_hooks.py | 18 ++++++++++++++++++ 4 files changed, 39 insertions(+) create mode 120000 ceph-mon/hooks/dashboard-relation-joined diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 9e4cfad7..fd10ff84 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -324,6 +324,9 @@ def config_changed(): status_set('maintenance', 'Bootstrapping single Ceph MGR') ceph.bootstrap_manager() + for relid in relation_ids('dashboard'): + dashboard_relation(relid) + # Update client relations notify_client() @@ -871,6 +874,10 @@ def osd_relation(relid=None, unit=None): notify_client() notify_rbd_mirrors() send_osd_settings() + + for relid in relation_ids('dashboard'): + dashboard_relation(relid) + else: log('mon cluster not in quorum - deferring fsid provision') @@ -937,6 +944,17 @@ def ready_for_service(): return True +@hooks.hook('dashboard-relation-joined') +def dashboard_relation(relid=None): + """Inform dashboard that mons are ready""" + if not ready_for_service(): + log("mon cluster is not in quorum, dashboard notification skipped", + level=WARNING) + return + + relation_set(relation_id=relid, relation_settings={'mon-ready': True}) + + @hooks.hook('radosgw-relation-changed') @hooks.hook('radosgw-relation-joined') def radosgw_relation(relid=None, unit=None): diff --git a/ceph-mon/hooks/dashboard-relation-joined b/ceph-mon/hooks/dashboard-relation-joined new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-mon/hooks/dashboard-relation-joined @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 5c9e306d..f9aedfbb 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -40,6 +40,8 @@ provides: interface: ceph-rbd-mirror prometheus: interface: http + dashboard: + interface: ceph-dashboard requires: bootstrap-source: interface: ceph-bootstrap diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index ee4e8b4b..f5679a16 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -353,6 +353,24 @@ def test_notify_mons(self, mock_relation_ids, mock_related_units, relation_settings={ 'nonce': 'FAKE-UUID'}) + @patch.object(ceph_hooks, 'relation_set') + @patch.object(ceph_hooks, 'ready_for_service') + def test_dashboard_relation(self, ready_for_service, relation_set): + ready_for_service.return_value = True + ceph_hooks.dashboard_relation() + relation_set.assert_called_once_with( + relation_id=None, + relation_settings={'mon-ready': True}) + relation_set.reset_mock() + ceph_hooks.dashboard_relation('rid1') + relation_set.assert_called_once_with( + relation_id='rid1', + relation_settings={'mon-ready': True}) + ready_for_service.return_value = False + relation_set.reset_mock() + ceph_hooks.dashboard_relation() + self.assertFalse(relation_set.called) + @patch.object(ceph_hooks.hookenv, 'remote_service_name') @patch.object(ceph_hooks, 'relation_get') @patch.object(ceph_hooks, 'remote_unit') From 55d6c7cec382317de52d9dc6fe9bfcd28f4da794 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 19 Aug 2021 13:22:57 +0000 Subject: [PATCH 2249/2699] Watch for config changed and gate on mons Setup observer for config-changed events and get configuring the dashboard on all the mons reporting ready. Change-Id: I6a90d7afde2266e2dfa535d05e022a544914124a --- ceph-dashboard/src/charm.py | 6 +++++ ceph-dashboard/src/interface_dashboard.py | 19 +++++++++++---- ceph-dashboard/tests/bundles/focal.yaml | 2 +- .../unit_tests/test_ceph_dashboard_charm.py | 12 +++++++++- .../unit_tests/test_interface_dashboard.py | 23 +++++++++++++++++++ 5 files changed, 56 insertions(+), 6 deletions(-) diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index 63be231c..15deae7a 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -48,6 +48,9 @@ def __init__(self, *args) -> None: """Setup adapters and observers.""" super().__init__(*args) super().register_status_check(self.check_dashboard) + self.framework.observe( + self.on.config_changed, + self._configure_dashboard) self.mon = interface_dashboard.CephDashboardRequires( self, 'dashboard') @@ -112,6 +115,9 @@ def kick_dashboard(self) -> None: def _configure_dashboard(self, _) -> None: """Configure dashboard""" + if not self.mon.mons_ready: + logging.info("Not configuring dashboard, mons not ready") + return if self.unit.is_leader() and not ceph_utils.is_dashboard_enabled(): ceph_utils.mgr_enable_dashboard() ceph_utils.mgr_config_set( diff --git a/ceph-dashboard/src/interface_dashboard.py b/ceph-dashboard/src/interface_dashboard.py index 19183f7d..8381aa1d 100644 --- a/ceph-dashboard/src/interface_dashboard.py +++ b/ceph-dashboard/src/interface_dashboard.py @@ -31,12 +31,23 @@ def __init__(self, charm, relation_name): charm.on[relation_name].relation_changed, self.on_changed) + @property + def mons_ready(self) -> bool: + """Check that all mons have reported ready.""" + ready = False + if self.dashboard_relation: + # There will only be one unit as this is a subordinate relation. + for unit in self.dashboard_relation.units: + unit_data = self.dashboard_relation.data[unit] + if unit_data.get(self.READY_KEY) == 'True': + ready = True + return ready + def on_changed(self, event): + """Emit mon_ready if mons are ready.""" logging.debug("CephDashboardRequires on_changed") - for u in self.dashboard_relation.units: - if self.dashboard_relation.data[u].get(self.READY_KEY) == 'True': - logging.debug("Emitting mon ready") - self.on.mon_ready.emit() + if self.mons_ready: + self.on.mon_ready.emit() @property def dashboard_relation(self): diff --git a/ceph-dashboard/tests/bundles/focal.yaml b/ceph-dashboard/tests/bundles/focal.yaml index 64c88694..02c7573e 100644 --- a/ceph-dashboard/tests/bundles/focal.yaml +++ b/ceph-dashboard/tests/bundles/focal.yaml @@ -9,7 +9,7 @@ applications: options: osd-devices: '/dev/test-non-existent' ceph-mon: - charm: cs:~gnuoy/ceph-mon-26 + charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: monitor-count: '3' diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py index c6447e61..c61f2324 100644 --- a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -239,9 +239,19 @@ def test_kick_dashboard(self): self.ceph_utils.mgr_enable_dashboard.assert_called_once_with() def test__configure_dashboard(self): + self.ceph_utils.is_dashboard_enabled.return_value = True + rel_id = self.harness.add_relation('dashboard', 'ceph-mon') self.harness.begin() + self.harness.add_relation_unit( + rel_id, + 'ceph-mon/0') + self.harness.update_relation_data( + rel_id, + 'ceph-mon/0', + { + 'mon-ready': 'True'}) - self.ceph_utils.is_dashboard_enabled.return_value = True + self.ceph_utils.mgr_config_set.reset_mock() self.harness.set_leader(False) self.harness.charm._configure_dashboard(None) self.assertFalse(self.ceph_utils.mgr_enable_dashboard.called) diff --git a/ceph-dashboard/unit_tests/test_interface_dashboard.py b/ceph-dashboard/unit_tests/test_interface_dashboard.py index 9e18777b..0d7b4e03 100644 --- a/ceph-dashboard/unit_tests/test_interface_dashboard.py +++ b/ceph-dashboard/unit_tests/test_interface_dashboard.py @@ -96,3 +96,26 @@ def test_on_changed(self): self.assertEqual( self.harness.charm.seen_events, ['MonReadyEvent']) + self.assertTrue( + self.harness.charm.mon.mons_ready) + + def test_on_changed_not_ready_unit(self): + self.harness.begin() + # No MonReadyEvent as relation is absent + self.assertEqual( + self.harness.charm.seen_events, + []) + rel_id = self.add_dashboard_relation() + # No MonReadyEvent as ceph-mon has not declared it is ready. + self.assertEqual( + self.harness.charm.seen_events, + []) + self.harness.update_relation_data( + rel_id, + 'ceph-mon/0', + {}) + self.assertEqual( + self.harness.charm.seen_events, + []) + self.assertFalse( + self.harness.charm.mon.mons_ready) From 294364817e6eb5db120ae00e62b6cf94ee2a3cb0 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 12 Aug 2021 10:01:27 +0000 Subject: [PATCH 2250/2699] Add config for password policy, audit and motd This change basically takes a number of dashboard configuration options which are managed via `ceph dasboard key value` and exposes them as charm config options. Change-Id: I92e0948e36f4156686c908f86b5e2398b504a742 --- ceph-dashboard/config.yaml | 70 ++++++++++ ceph-dashboard/src/charm.py | 123 ++++++++++++++++++ .../unit_tests/test_ceph_dashboard_charm.py | 87 +++++++++++-- 3 files changed, 267 insertions(+), 13 deletions(-) diff --git a/ceph-dashboard/config.yaml b/ceph-dashboard/config.yaml index d4bed64c..2690da13 100644 --- a/ceph-dashboard/config.yaml +++ b/ceph-dashboard/config.yaml @@ -2,9 +2,79 @@ # See LICENSE file for licensing details. options: + debug: + type: boolean + default: False + description: | + Control debug mode. It is recommended that debug be disabled in + production deployments. public-hostname: type: string default: description: | The hostname or address of the public endpoints created for the dashboard + enable-password-policy: + type: boolean + default: True + description: Enable password policy + password-policy-check-length: + type: boolean + default: True + description: | + Reject password if it is shorter then password-policy-min-length + password-policy-check-oldpwd: + type: boolean + default: True + description: Reject password if it matches previous password. + password-policy-check-username: + type: boolean + default: True + description: Reject password if username is included in password. + password-policy-check-exclusion-list: + type: boolean + default: True + description: Reject password if it contains a word from a forbidden list. + password-policy-check-complexity: + type: boolean + default: True + description: | + Check password meets a complexity score of password-policy-min-complexity. + See https://docs.ceph.com/en/latest/mgr/dashboard/#password-policy + password-policy-check-sequential-chars: + type: boolean + default: True + description: | + Reject password if it contains a sequence of sequential characters. e.g. + a password containing '123' or 'efg' would be rejected. + password-policy-check-repetitive-chars: + type: boolean + default: True + description: | + Reject password if password contains consecutive repeating charachters. + password-policy-min-length: + type: int + default: 8 + description: Set minimum password length. + password-policy-min-complexity: + type: int + default: 10 + description: | + Set minimum password complexity score. + See https://docs.ceph.com/en/latest/mgr/dashboard/#password-policy + audit-api-enabled: + type: boolean + default: False + description: | + Log requests made to the dashboard REST API to the Ceph audit log. + audit-api-log-payload: + type: boolean + default: True + description: | + Include payload in Ceph audit logs. audit-api-enabled must be set to True + to enable this., + motd: + type: string + default: "" + description: | + Message of the day settings. Should be in the format "severity|expires|message". Set to "" to disable. diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index 15deae7a..0fb219fd 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -13,6 +13,8 @@ from ops.main import main from ops.model import ActiveStatus, BlockedStatus, StatusBase from ops.charm import ActionEvent +from typing import List, Union + import interface_tls_certificates.ca_client as ca_client import re import secrets @@ -24,6 +26,7 @@ import interface_api_endpoints import cryptography.hazmat.primitives.serialization as serialization import charms_ceph.utils as ceph_utils +import charmhelpers.core.host as ch_host from pathlib import Path @@ -44,6 +47,98 @@ class CephDashboardCharm(ops_openstack.core.OSBaseCharm): '/usr/local/share/ca-certificates/vault_ca_cert_dashboard.crt') TLS_PORT = 8443 + class CharmCephOption(): + """Manage a charm option to ceph command to manage that option""" + + def __init__(self, charm_option_name, ceph_option_name, + min_version=None): + self.charm_option_name = charm_option_name + self.ceph_option_name = ceph_option_name + self.min_version = min_version + + def is_supported(self) -> bool: + """Is the option supported on this unit""" + if self.min_version: + return self.minimum_supported(self.min_version) + return True + + def minimum_supported(self, supported_version: str) -> bool: + """Check if installed Ceph release is >= to supported_version""" + return ch_host.cmp_pkgrevno('ceph-common', supported_version) < 1 + + def convert_option(self, value: Union[bool, str, int]) -> List[str]: + """Convert a value to the corresponding value part of the ceph + dashboard command""" + return [str(value)] + + def ceph_command(self, value: List[str]) -> List[str]: + """Shell command to set option to desired value""" + cmd = ['ceph', 'dashboard', self.ceph_option_name] + cmd.extend(self.convert_option(value)) + return cmd + + class DebugOption(CharmCephOption): + + def convert_option(self, value): + """Convert charm True/False to enable/disable""" + if value: + return ['enable'] + else: + return ['disable'] + + class MOTDOption(CharmCephOption): + + def convert_option(self, value): + """Split motd charm option into ['severity', 'time', 'message']""" + if value: + return value.split('|') + else: + return ['clear'] + + CHARM_TO_CEPH_OPTIONS = [ + DebugOption('debug', 'debug'), + CharmCephOption( + 'enable-password-policy', + 'set-pwd-policy-enabled'), + CharmCephOption( + 'password-policy-check-length', + 'set-pwd-policy-check-length-enabled'), + CharmCephOption( + 'password-policy-check-oldpwd', + 'set-pwd-policy-check-oldpwd-enabled'), + CharmCephOption( + 'password-policy-check-username', + 'set-pwd-policy-check-username-enabled'), + CharmCephOption( + 'password-policy-check-exclusion-list', + 'set-pwd-policy-check-exclusion-list-enabled'), + CharmCephOption( + 'password-policy-check-complexity', + 'set-pwd-policy-check-complexity-enabled'), + CharmCephOption( + 'password-policy-check-sequential-chars', + 'set-pwd-policy-check-sequential-chars-enabled'), + CharmCephOption( + 'password-policy-check-repetitive-chars', + 'set-pwd-policy-check-repetitive-chars-enabled'), + CharmCephOption( + 'password-policy-min-length', + 'set-pwd-policy-min-length'), + CharmCephOption( + 'password-policy-min-complexity', + 'set-pwd-policy-min-complexity'), + CharmCephOption( + 'audit-api-enabled', + 'set-audit-api-enabled'), + CharmCephOption( + 'audit-api-log-payload', + 'set-audit-api-log-payload'), + MOTDOption( + 'motd', + 'motd', + min_version='15.2.14') + ] + def __init__(self, *args) -> None: """Setup adapters and observers.""" super().__init__(*args) @@ -113,6 +208,33 @@ def kick_dashboard(self) -> None: ceph_utils.mgr_disable_dashboard() ceph_utils.mgr_enable_dashboard() + def _run_cmd(self, cmd: List[str]) -> None: + """Run command in subprocess + + `cmd` The command to run + """ + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as exc: + logging.exception("Command failed: {}".format(exc.output)) + + def _apply_ceph_config_from_charm_config(self) -> None: + """Read charm config and apply settings to dashboard config""" + for option in self.CHARM_TO_CEPH_OPTIONS: + try: + value = self.config[option.charm_option_name] + except KeyError: + logging.error( + "Unknown charm option {}, skipping".format( + option.charm_option_name)) + continue + if option.is_supported(): + self._run_cmd(option.ceph_command(value)) + else: + logging.warning( + "Skipping charm option {}, not supported".format( + option.charm_option_name)) + def _configure_dashboard(self, _) -> None: """Configure dashboard""" if not self.mon.mons_ready: @@ -120,6 +242,7 @@ def _configure_dashboard(self, _) -> None: return if self.unit.is_leader() and not ceph_utils.is_dashboard_enabled(): ceph_utils.mgr_enable_dashboard() + self._apply_ceph_config_from_charm_config() ceph_utils.mgr_config_set( 'mgr/dashboard/{hostname}/server_addr'.format( hostname=socket.gethostname()), diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py index c61f2324..23fcdb7d 100644 --- a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -155,12 +155,19 @@ class TestCephDashboardCharmBase(CharmTestCase): PATCHES = [ 'ceph_utils', 'socket', - 'subprocess' + 'subprocess', + 'ch_host', ] def setUp(self): super().setUp(charm, self.PATCHES) - self.harness = Harness( + self.harness = self.get_harness() + + self.socket.gethostname.return_value = 'server1' + self.socket.getfqdn.return_value = 'server1.local' + + def get_harness(self): + _harness = Harness( _CephDashboardCharm, ) @@ -178,24 +185,78 @@ def network_get(self, endpoint_name, relation_id=None): 'egress-subnets': ['10.0.0.0/24']} return network_data - self.harness._backend = _TestingOPSModelBackend( - self.harness._unit_name, self.harness._meta) - self.harness._model = model.Model( - self.harness._meta, - self.harness._backend) - self.harness._framework = framework.Framework( + _harness._backend = _TestingOPSModelBackend( + _harness._unit_name, _harness._meta) + _harness._model = model.Model( + _harness._meta, + _harness._backend) + _harness._framework = framework.Framework( ":memory:", - self.harness._charm_dir, - self.harness._meta, - self.harness._model) + _harness._charm_dir, + _harness._meta, + _harness._model) # END Workaround - self.socket.gethostname.return_value = 'server1' - self.socket.getfqdn.return_value = 'server1.local' + return _harness def test_init(self): self.harness.begin() self.assertFalse(self.harness.charm._stored.is_started) + def test_charm_config(self): + self.ceph_utils.is_dashboard_enabled.return_value = True + self.ch_host.cmp_pkgrevno.return_value = 0 + basic_boolean = [ + ('enable-password-policy', 'set-pwd-policy-enabled'), + ('password-policy-check-length', + 'set-pwd-policy-check-length-enabled'), + ('password-policy-check-oldpwd', + 'set-pwd-policy-check-oldpwd-enabled'), + ('password-policy-check-username', + 'set-pwd-policy-check-username-enabled'), + ('password-policy-check-exclusion-list', + 'set-pwd-policy-check-exclusion-list-enabled'), + ('password-policy-check-complexity', + 'set-pwd-policy-check-complexity-enabled'), + ('password-policy-check-sequential-chars', + 'set-pwd-policy-check-sequential-chars-enabled'), + ('password-policy-check-repetitive-chars', + 'set-pwd-policy-check-repetitive-chars-enabled'), + ('audit-api-enabled', + 'set-audit-api-enabled'), + ('audit-api-log-payload', + 'set-audit-api-log-payload')] + expect = [] + for charm_option, ceph_option in basic_boolean: + expect.append((charm_option, True, [ceph_option, 'True'])) + expect.append((charm_option, False, [ceph_option, 'False'])) + expect.extend([ + ('debug', True, ['debug', 'enable']), + ('debug', False, ['debug', 'disable'])]) + expect.extend([ + ('motd', 'warning|5w|enough is enough', ['motd', 'warning', '5w', + 'enough is enough']), + ('motd', '', ['motd', 'clear'])]) + base_cmd = ['ceph', 'dashboard'] + for charm_option, charm_value, expected_options in expect: + _harness = self.get_harness() + rel_id = _harness.add_relation('dashboard', 'ceph-mon') + _harness.add_relation_unit( + rel_id, + 'ceph-mon/0') + _harness.update_relation_data( + rel_id, + 'ceph-mon/0', + { + 'mon-ready': 'True'}) + _harness.begin() + expected_cmd = base_cmd + expected_options + self.subprocess.check_output.reset_mock() + _harness.update_config( + key_values={charm_option: charm_value}) + self.subprocess.check_output.assert_called_once_with( + expected_cmd, + stderr=self.subprocess.STDOUT) + def test__on_ca_available(self): rel_id = self.harness.add_relation('certificates', 'vault') self.harness.begin() From e3672a2a28dc6f9423f3138f08b622f96cb8ca7a Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 12 Aug 2021 16:37:08 +0000 Subject: [PATCH 2251/2699] Support user supplied certs Change-Id: I0bc72bb0e4c907fa042e9e4d4154b9541247ff15 --- ceph-dashboard/config.yaml | 18 +++++ ceph-dashboard/src/charm.py | 65 +++++++++++++++---- .../unit_tests/test_ceph_dashboard_charm.py | 58 +++++++++++++++-- 3 files changed, 123 insertions(+), 18 deletions(-) diff --git a/ceph-dashboard/config.yaml b/ceph-dashboard/config.yaml index 2690da13..a2883bda 100644 --- a/ceph-dashboard/config.yaml +++ b/ceph-dashboard/config.yaml @@ -78,3 +78,21 @@ options: default: "" description: | Message of the day settings. Should be in the format "severity|expires|message". Set to "" to disable. + ssl_cert: + type: string + default: + description: | + SSL certificate to install and use for API ports. Setting this value + and ssl_key will enable reverse proxying, point Neutron's entry in the + Keystone catalog to use https, and override any certificate and key + issued by Keystone (if it is configured to do so). + ssl_key: + type: string + default: + description: SSL key to use with certificate specified as ssl_cert. + ssl_ca: + type: string + default: + description: | + SSL CA to use with the certificate and key provided - this is only + required if you are providing a privately signed ssl_cert and ssl_key. diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index 0fb219fd..a413ada6 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -13,8 +13,9 @@ from ops.main import main from ops.model import ActiveStatus, BlockedStatus, StatusBase from ops.charm import ActionEvent -from typing import List, Union +from typing import List, Union, Tuple +import base64 import interface_tls_certificates.ca_client as ca_client import re import secrets @@ -32,6 +33,8 @@ logger = logging.getLogger(__name__) +TLS_Config = Tuple[Union[bytes, None], Union[bytes, None], Union[bytes, None]] + class CephDashboardCharm(ops_openstack.core.OSBaseCharm): """Ceph Dashboard charm.""" @@ -160,7 +163,7 @@ def __init__(self, *args) -> None: self._on_ca_available) self.framework.observe( self.ca_client.on.tls_server_config_ready, - self._on_tls_server_config_ready) + self._configure_dashboard) self.framework.observe(self.on.add_user_action, self._add_user_action) self.ingress = interface_api_endpoints.APIEndpointsRequires( self, @@ -243,6 +246,7 @@ def _configure_dashboard(self, _) -> None: if self.unit.is_leader() and not ceph_utils.is_dashboard_enabled(): ceph_utils.mgr_enable_dashboard() self._apply_ceph_config_from_charm_config() + self._configure_tls() ceph_utils.mgr_config_set( 'mgr/dashboard/{hostname}/server_addr'.format( hostname=socket.gethostname()), @@ -254,24 +258,57 @@ def _get_bind_ip(self) -> str: binding = self.model.get_binding('public') return str(binding.network.ingress_address) - def _on_tls_server_config_ready(self, _) -> None: - """Configure TLS.""" - self.TLS_KEY_PATH.write_bytes( - self.ca_client.server_key.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.TraditionalOpenSSL, - encryption_algorithm=serialization.NoEncryption())) - self.TLS_CERT_PATH.write_bytes( - self.ca_client.server_certificate.public_bytes( - encoding=serialization.Encoding.PEM)) - self.TLS_CA_CERT_PATH.write_bytes( + def _get_tls_from_config(self) -> TLS_Config: + """Extract TLS config from charm config.""" + raw_key = self.config.get("ssl_key") + raw_cert = self.config.get("ssl_cert") + raw_ca_cert = self.config.get("ssl_ca") + if not (raw_key and raw_key): + return None, None, None + key = base64.b64decode(raw_key) + cert = base64.b64decode(raw_cert) + if raw_ca_cert: + ca_cert = base64.b64decode(raw_ca_cert) + else: + ca_cert = None + return key, cert, ca_cert + + def _get_tls_from_relation(self) -> TLS_Config: + """Extract TLS config from certificatees relation.""" + if not self.ca_client.is_server_cert_ready: + return None, None, None + key = self.ca_client.server_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption()) + cert = self.ca_client.server_certificate.public_bytes( + encoding=serialization.Encoding.PEM) + ca_cert = ( self.ca_client.ca_certificate.public_bytes( encoding=serialization.Encoding.PEM) + self.ca_client.root_ca_chain.public_bytes( encoding=serialization.Encoding.PEM)) + return key, cert, ca_cert + + def _configure_tls(self) -> None: + """Configure TLS.""" + logging.debug("Attempting to collect TLS config from relation") + key, cert, ca_cert = self._get_tls_from_relation() + if not (key and cert): + logging.debug("Attempting to collect TLS config from charm " + "config") + key, cert, ca_cert = self._get_tls_from_config() + if not (key and cert): + logging.warn( + "Not configuring TLS, not all data present") + return + self.TLS_KEY_PATH.write_bytes(key) + self.TLS_CERT_PATH.write_bytes(cert) + if ca_cert: + self.TLS_CA_CERT_PATH.write_bytes(ca_cert) + subprocess.check_call(['update-ca-certificates']) hostname = socket.gethostname() - subprocess.check_call(['update-ca-certificates']) ceph_utils.dashboard_set_ssl_certificate( self.TLS_CERT_PATH, hostname=hostname) diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py index 23fcdb7d..2414b1a0 100644 --- a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import base64 import unittest import sys @@ -345,22 +346,32 @@ def test__get_bind_ip(self): '10.0.0.10') @patch('socket.gethostname') - def test__on_tls_server_config_ready(self, _gethostname): + def test_certificates_relation(self, _gethostname): + self.ceph_utils.is_dashboard_enabled.return_value = True mock_TLS_KEY_PATH = MagicMock() mock_TLS_CERT_PATH = MagicMock() mock_TLS_CA_CERT_PATH = MagicMock() _gethostname.return_value = 'server1' - rel_id = self.harness.add_relation('certificates', 'vault') + cert_rel_id = self.harness.add_relation('certificates', 'vault') + dash_rel_id = self.harness.add_relation('dashboard', 'ceph-mon') self.harness.begin() self.harness.set_leader() self.harness.charm.TLS_CERT_PATH = mock_TLS_CERT_PATH self.harness.charm.TLS_CA_CERT_PATH = mock_TLS_CA_CERT_PATH self.harness.charm.TLS_KEY_PATH = mock_TLS_KEY_PATH self.harness.add_relation_unit( - rel_id, + dash_rel_id, + 'ceph-mon/0') + self.harness.update_relation_data( + dash_rel_id, + 'ceph-mon/0', + { + 'mon-ready': 'True'}) + self.harness.add_relation_unit( + cert_rel_id, 'vault/0') self.harness.update_relation_data( - rel_id, + cert_rel_id, 'vault/0', { 'ceph-dashboard_0.server.cert': TEST_CERT, @@ -384,6 +395,45 @@ def test__on_tls_server_config_ready(self, _gethostname): self.ceph_utils.mgr_disable_dashboard.assert_called_once_with() self.ceph_utils.mgr_enable_dashboard.assert_called_once_with() + def test_certificates_from_config(self): + self.ceph_utils.is_dashboard_enabled.return_value = True + mock_TLS_KEY_PATH = MagicMock() + mock_TLS_CERT_PATH = MagicMock() + mock_TLS_CA_CERT_PATH = MagicMock() + dash_rel_id = self.harness.add_relation('dashboard', 'ceph-mon') + self.harness.begin() + self.harness.set_leader() + self.harness.add_relation_unit( + dash_rel_id, + 'ceph-mon/0') + self.harness.update_relation_data( + dash_rel_id, + 'ceph-mon/0', + { + 'mon-ready': 'True'}) + self.harness.charm.TLS_CERT_PATH = mock_TLS_CERT_PATH + self.harness.charm.TLS_CA_CERT_PATH = mock_TLS_CA_CERT_PATH + self.harness.charm.TLS_KEY_PATH = mock_TLS_KEY_PATH + self.subprocess.check_call.reset_mock() + self.harness.update_config( + key_values={ + 'ssl_key': base64.b64encode(TEST_KEY.encode("utf-8")), + 'ssl_cert': base64.b64encode(TEST_CERT.encode("utf-8")), + 'ssl_ca': base64.b64encode(TEST_CA.encode("utf-8"))}) + self.subprocess.check_call.assert_called_once_with( + ['update-ca-certificates']) + self.ceph_utils.dashboard_set_ssl_certificate.assert_has_calls([ + call(mock_TLS_CERT_PATH, hostname='server1'), + call(mock_TLS_CERT_PATH)]) + self.ceph_utils.dashboard_set_ssl_certificate_key.assert_has_calls([ + call(mock_TLS_KEY_PATH, hostname='server1'), + call(mock_TLS_KEY_PATH)]) + self.ceph_utils.mgr_config_set.assert_has_calls([ + call('mgr/dashboard/standby_behaviour', 'redirect'), + call('mgr/dashboard/ssl', 'true')]) + self.ceph_utils.mgr_disable_dashboard.assert_called_once_with() + self.ceph_utils.mgr_enable_dashboard.assert_called_once_with() + @patch.object(charm.secrets, 'choice') def test__gen_user_password(self, _choice): self.harness.begin() From f8dbdff0cc2619f8ca5580b7a6b6f6c1ac380bd1 Mon Sep 17 00:00:00 2001 From: Nikhil Kshirsagar Date: Fri, 13 Aug 2021 16:59:42 +0530 Subject: [PATCH 2252/2699] Do not zap a disk if it is used by lvm2 If the disk being zapped is used by lvm (if it contains the lvm label and hasn't been pvremove'd) it's safer to simply bail out of zapping it than attempt teardown through a force pvremove, because the disk being zapped might be in fact in use by some LV. Closes-Bug: 1858519 Change-Id: I111475c5a4584a3e367c604ab51ce2ef3789ff7f --- ceph-osd/actions/zap_disk.py | 14 +++++++++++-- ceph-osd/unit_tests/test_actions_zap_disk.py | 21 ++++++++++++++++++++ 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/ceph-osd/actions/zap_disk.py b/ceph-osd/actions/zap_disk.py index 1002c742..22733449 100755 --- a/ceph-osd/actions/zap_disk.py +++ b/ceph-osd/actions/zap_disk.py @@ -29,6 +29,8 @@ from charmhelpers.core.unitdata import kv from charms_ceph.utils import is_active_bluestore_device from charms_ceph.utils import is_mapped_luks_device +from charmhelpers.contrib.storage.linux.lvm import is_lvm_physical_volume +from charmhelpers.core.hookenv import log class ZapDiskError(Exception): @@ -61,6 +63,7 @@ def zap(): failed_devices = [] not_block_devices = [] + lvm_devices = [] try: devices = get_devices() except ZapDiskError as error: @@ -68,6 +71,8 @@ def zap(): return for device in devices: + if is_lvm_physical_volume(device): + lvm_devices.append(device) if not is_block_device(device): not_block_devices.append(device) if (is_device_mounted(device) or @@ -75,10 +80,15 @@ def zap(): is_mapped_luks_device(device)): failed_devices.append(device) - if failed_devices or not_block_devices: + if lvm_devices or failed_devices or not_block_devices: message = "" + if lvm_devices: + log('Cannot zap a device used by lvm') + message = "{} devices are lvm devices: {}".format( + len(lvm_devices), + ", ".join(lvm_devices)) if failed_devices: - message = "{} devices are mounted: {}".format( + message += "{} devices are mounted: {}".format( len(failed_devices), ", ".join(failed_devices)) if not_block_devices: diff --git a/ceph-osd/unit_tests/test_actions_zap_disk.py b/ceph-osd/unit_tests/test_actions_zap_disk.py index 21a6ffcc..375b026f 100644 --- a/ceph-osd/unit_tests/test_actions_zap_disk.py +++ b/ceph-osd/unit_tests/test_actions_zap_disk.py @@ -27,11 +27,13 @@ def setUp(self): 'is_device_mounted', 'is_active_bluestore_device', 'is_mapped_luks_device', + 'is_lvm_physical_volume', 'kv']) self.is_device_mounted.return_value = False self.is_block_device.return_value = True self.is_active_bluestore_device.return_value = False self.is_mapped_luks_device.return_value = False + self.is_lvm_physical_volume.return_value = False self.kv.return_value = self.kv self.hookenv.local_unit.return_value = "ceph-osd-test/0" @@ -215,3 +217,22 @@ def side_effect(arg): self.hookenv.action_fail.assert_called_with( 'Failed due to: not-absolute: Not absolute path.') self.hookenv.action_set.assert_not_called() + + @mock.patch('os.path.exists', mock.MagicMock(return_value=True)) + @mock.patch.object(zap_disk, 'zap_disk') + def test_wont_zap_lvm_device(self, _zap_disk): + """Won't zap lvm disk""" + def side_effect(arg): + return { + 'devices': '/dev/vdb', + 'i-really-mean-it': True, + }.get(arg) + + self.hookenv.action_get.side_effect = side_effect + self.is_lvm_physical_volume.return_value = True + + zap_disk.zap() + _zap_disk.assert_not_called() + self.hookenv.action_fail.assert_called_with( + '1 devices are lvm devices: /dev/vdb') + self.hookenv.action_set.assert_not_called() From 929ac976a766e875f9b83e917c27deea937cf293 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 25 Aug 2021 14:56:13 +0000 Subject: [PATCH 2253/2699] Add TLS checks Add checks that TLS data has been supplied and refactor the charms checks to make them more easy to extend. Also a drive by fix to minimum_supported to fix the comparison of ceph versions. Change-Id: I2f27529da53c7d482d64dff4c9aaf3b0a08369b4 --- ceph-dashboard/requirements.txt | 1 + ceph-dashboard/src/charm.py | 54 ++++++++++++++----- ceph-dashboard/tests/tests.yaml | 2 +- ceph-dashboard/unit_tests/__init__.py | 5 ++ .../unit_tests/test_ceph_dashboard_charm.py | 9 ++++ 5 files changed, 57 insertions(+), 14 deletions(-) diff --git a/ceph-dashboard/requirements.txt b/ceph-dashboard/requirements.txt index 56cbcc9d..f0aa163d 100644 --- a/ceph-dashboard/requirements.txt +++ b/ceph-dashboard/requirements.txt @@ -1,4 +1,5 @@ ops >= 1.2.0 +tenacity git+https://github.com/openstack/charms.ceph#egg=charms_ceph git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack #git+https://opendev.org/openstack/charm-ops-interface-tls-certificates#egg=interface_tls_certificates diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index a413ada6..42b3d707 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -22,6 +22,7 @@ import socket import string import subprocess +import tenacity import ops_openstack.plugins.classes import interface_dashboard import interface_api_endpoints @@ -67,7 +68,7 @@ def is_supported(self) -> bool: def minimum_supported(self, supported_version: str) -> bool: """Check if installed Ceph release is >= to supported_version""" - return ch_host.cmp_pkgrevno('ceph-common', supported_version) < 1 + return ch_host.cmp_pkgrevno('ceph-common', supported_version) >= 0 def convert_option(self, value: Union[bool, str, int]) -> List[str]: """Convert a value to the corresponding value part of the ceph @@ -190,20 +191,46 @@ def _on_ca_available(self, _) -> None: sans.append(self.config.get('public-hostname')) self.ca_client.request_server_certificate(socket.getfqdn(), sans) + def _check_for_certs(self) -> bool: + """Check that charm has TLS data it needs""" + # Check charm config for TLS data + key, cert, _ = self._get_tls_from_config() + if key and cert: + return True + # Check relation for TLS data + try: + self.ca_client.server_key + return True + except ca_client.CAClientError: + return False + + def _check_dashboard_responding(self) -> bool: + """Check the dashboard port is open""" + + @tenacity.retry(wait=tenacity.wait_fixed(2), + stop=tenacity.stop_after_attempt(30), reraise=True) + def _check_port(ip, port): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + result = sock.connect_ex((ip, port)) + assert result == 0 + + try: + _check_port(self._get_bind_ip(), self.TLS_PORT) + return True + except AssertionError: + return False + def check_dashboard(self) -> StatusBase: """Check status of dashboard""" - self._stored.is_started = ceph_utils.is_dashboard_enabled() - if self._stored.is_started: - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - result = sock.connect_ex((self._get_bind_ip(), self.TLS_PORT)) - if result == 0: - return ActiveStatus() - else: - return BlockedStatus( - 'Dashboard not responding') - else: - return BlockedStatus( - 'Dashboard is not enabled') + checks = [ + (ceph_utils.is_dashboard_enabled, 'Dashboard is not enabled'), + (self._check_for_certs, ('No certificates found. Please add a ' + 'certifcates relation or provide via ' + 'charm config')), + (self._check_dashboard_responding, 'Dashboard not responding')] + for check_f, msg in checks: + if not check_f(): + return BlockedStatus(msg) return ActiveStatus() def kick_dashboard(self) -> None: @@ -251,6 +278,7 @@ def _configure_dashboard(self, _) -> None: 'mgr/dashboard/{hostname}/server_addr'.format( hostname=socket.gethostname()), str(self._get_bind_ip())) + self._stored.is_started = True self.update_status() def _get_bind_ip(self) -> str: diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml index b6df5e85..e1e1bda3 100644 --- a/ceph-dashboard/tests/tests.yaml +++ b/ceph-dashboard/tests/tests.yaml @@ -13,4 +13,4 @@ target_deploy_status: workload-status-message: Vault needs to be initialized ceph-dashboard: workload-status: blocked - workload-status-message-prefix: Dashboard not responding + workload-status-message-prefix: No certificates found diff --git a/ceph-dashboard/unit_tests/__init__.py b/ceph-dashboard/unit_tests/__init__.py index 577ab7e9..adce0b68 100644 --- a/ceph-dashboard/unit_tests/__init__.py +++ b/ceph-dashboard/unit_tests/__init__.py @@ -17,3 +17,8 @@ # Mock out secrets to make py35 happy. sys.modules['secrets'] = mock.MagicMock() + +# Tenacity decorators need to be mocked before import +tenacity = mock.MagicMock() +tenacity.retry.side_effect = lambda *args, **kwargs: lambda x: x +sys.modules['tenacity'] = tenacity diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py index 2414b1a0..fbeb8d3f 100644 --- a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -279,6 +279,15 @@ def test_check_dashboard(self): socket_mock.connect_ex.return_value = 0 self.ceph_utils.is_dashboard_enabled.return_value = True self.harness.begin() + self.assertEqual( + self.harness.charm.check_dashboard(), + BlockedStatus('No certificates found. Please add a certifcates ' + 'relation or provide via charm config')) + self.harness.update_config( + key_values={ + 'ssl_key': base64.b64encode(TEST_KEY.encode("utf-8")), + 'ssl_cert': base64.b64encode(TEST_CERT.encode("utf-8")), + 'ssl_ca': base64.b64encode(TEST_CA.encode("utf-8"))}) self.assertEqual( self.harness.charm.check_dashboard(), ActiveStatus()) From c89e9b1e0946d41fd7611dd844ec604ecfbdadec Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 26 Aug 2021 09:32:22 +0000 Subject: [PATCH 2254/2699] Add support for embedded graphs and alertmanager This patch consits of: * New ops interface grafana_dashboard based on the existing reactive interface * New ops http interface. * Dashboard registers grafana dashboards with grafana. These dashboards are taken from upstream ceph *1. Ideally the charm would pick these up from ceph packaging but they are currently not included. With the exception of host-details.json and hosts-overview.json the dashboards are unchanged. host-details.json and hosts-overview.json both needed changed to make the compatable with the metrics telegraf is sending. * alertmanager-service and prometheus relations using the http ops interface allowing there api endpoints to be registered with the dashboard. NOTE: ceph-mon has an existing relation with Prometheus ( ceph-mon:prometheus <-> prometheus:target ) but prometheus does not publish its api endpoint on that relation. So, the dashboard adds a prometheus:website <-> ceph-dashboard:prometheus relation. This allows the dashboard to set set-prometheus-api-host. *1 https://github.com/ceph/ceph/tree/master/monitoring/grafana/dashboards Change-Id: Ic4522cc601895c9a79489df985a6e81fa70fb9e5 --- ceph-dashboard/README.md | 48 + ceph-dashboard/config.yaml | 5 + ceph-dashboard/metadata.yaml | 8 +- ceph-dashboard/src/charm.py | 63 + .../src/dashboards/ceph-cluster.json | 1229 +++++++++++++ .../src/dashboards/cephfs-overview.json | 309 ++++ .../src/dashboards/host-details.json | 1269 +++++++++++++ .../src/dashboards/hosts-overview.json | 852 +++++++++ .../src/dashboards/osd-device-details.json | 800 +++++++++ .../src/dashboards/osds-overview.json | 876 +++++++++ .../src/dashboards/pool-detail.json | 665 +++++++ .../src/dashboards/pool-overview.json | 1564 +++++++++++++++++ .../src/dashboards/radosgw-detail.json | 491 ++++++ .../src/dashboards/radosgw-overview.json | 630 +++++++ .../src/dashboards/radosgw-sync-overview.json | 440 +++++ .../src/dashboards/rbd-details.json | 409 +++++ .../src/dashboards/rbd-overview.json | 685 ++++++++ .../src/interface_grafana_dashboard.py | 126 ++ ceph-dashboard/src/interface_http.py | 67 + ceph-dashboard/tests/bundles/focal.yaml | 45 +- .../tests/bundles/overlays/focal.yaml.j2 | 4 + ceph-dashboard/tests/tests.yaml | 18 +- .../unit_tests/test_ceph_dashboard_charm.py | 21 + .../test_interface_api_endpoints.py | 2 +- .../test_interface_grafana_dashboard.py | 182 ++ .../unit_tests/test_interface_http.py | 97 + 26 files changed, 10898 insertions(+), 7 deletions(-) create mode 100644 ceph-dashboard/src/dashboards/ceph-cluster.json create mode 100644 ceph-dashboard/src/dashboards/cephfs-overview.json create mode 100644 ceph-dashboard/src/dashboards/host-details.json create mode 100644 ceph-dashboard/src/dashboards/hosts-overview.json create mode 100644 ceph-dashboard/src/dashboards/osd-device-details.json create mode 100644 ceph-dashboard/src/dashboards/osds-overview.json create mode 100644 ceph-dashboard/src/dashboards/pool-detail.json create mode 100644 ceph-dashboard/src/dashboards/pool-overview.json create mode 100644 ceph-dashboard/src/dashboards/radosgw-detail.json create mode 100644 ceph-dashboard/src/dashboards/radosgw-overview.json create mode 100644 ceph-dashboard/src/dashboards/radosgw-sync-overview.json create mode 100644 ceph-dashboard/src/dashboards/rbd-details.json create mode 100644 ceph-dashboard/src/dashboards/rbd-overview.json create mode 100644 ceph-dashboard/src/interface_grafana_dashboard.py create mode 100644 ceph-dashboard/src/interface_http.py create mode 100644 ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 create mode 100644 ceph-dashboard/unit_tests/test_interface_grafana_dashboard.py create mode 100644 ceph-dashboard/unit_tests/test_interface_http.py diff --git a/ceph-dashboard/README.md b/ceph-dashboard/README.md index b0dc8461..27edc762 100644 --- a/ceph-dashboard/README.md +++ b/ceph-dashboard/README.md @@ -20,6 +20,54 @@ Deploy the ceph-dashboard as a subordinate to the ceph-mon charm. juju deploy ceph-dashboard juju relate ceph-dashboard ceph-mon +## Embedded Grafana Dashboards + +To enable the embedded grafana dashboards within the Ceph dashboard +some additional relations are needed. + + juju relate ceph-dashboard:grafana-dashboard grafana:dashboards + juju relate ceph-dashboard:prometheus prometheus:website + juju relate ceph-mon:prometheus prometheus:target + juju relate ceph-osd:juju-info telegraf:juju-info + juju relate ceph-mon:juju-info telegraf:juju-info + +Grafana, Telegraf and Prometheus should be related in the standard way + + juju relate grafana:grafana-source prometheus:grafana-source + juju relate telegraf:prometheus-client prometheus:target + juju relate telegraf:dashboards grafana:dashboards + +Grafana must be using https so either supply a certificates and key via +the ssl\_\* charm config options or add a vault relation. + + juju deploy grafana:certificates vault:certificates + +Grafana should be set with the following charm options: + + juju config grafana anonymous=True + juju config grafana allow_embedding=True + juju config grafana install_plugins="https://storage.googleapis.com/plugins-community/vonage-status-panel/release/1.0.11/vonage-status-panel-1.0.11.zip,https://storage.googleapis.com/plugins-community/grafana-piechart-panel/release/1.6.2/grafana-piechart-panel-1.6.2.zip" + +Telegraf should be set with the following charm options: + + juju config telegraf hostname="{host}" + +NOTE: That is "{host}" verbatim, nothing needs to be substituted. + + +Currently the dashboard cannot autodect the api endpoint of the grafana +service, so the end of the deployment run the following: + + juju config ceph-dashboard grafana-api-url="https://:3000" + +## Enabling Prometheus Alerting + +To enable Prometheus alerting, add the following relations: + + juju relate ceph-dashboard:prometheus prometheus:website + juju relate ceph-mon:prometheus prometheus:target + juju relate ceph-dashboard:alertmanager-service prometheus-alertmanager:alertmanager-service + juju relate prometheus:alertmanager-service prometheus-alertmanager:alertmanager-service diff --git a/ceph-dashboard/config.yaml b/ceph-dashboard/config.yaml index a2883bda..8fcbd1f8 100644 --- a/ceph-dashboard/config.yaml +++ b/ceph-dashboard/config.yaml @@ -8,6 +8,11 @@ options: description: | Control debug mode. It is recommended that debug be disabled in production deployments. + grafana-api-url: + type: string + default: + description: | + URL of grafana api. The url must be using https. public-hostname: type: string default: diff --git a/ceph-dashboard/metadata.yaml b/ceph-dashboard/metadata.yaml index a052461b..40964bfa 100644 --- a/ceph-dashboard/metadata.yaml +++ b/ceph-dashboard/metadata.yaml @@ -24,4 +24,10 @@ requires: interface: tls-certificates loadbalancer: interface: api-endpoints - + alertmanager-service: + interface: http + prometheus: + interface: http +provides: + grafana-dashboard: + interface: grafana-dashboard diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index 42b3d707..85f38a79 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -6,6 +6,7 @@ """Charm for the Ceph Dashboard.""" +import json import logging import tempfile @@ -26,6 +27,8 @@ import ops_openstack.plugins.classes import interface_dashboard import interface_api_endpoints +import interface_grafana_dashboard +import interface_http import cryptography.hazmat.primitives.serialization as serialization import charms_ceph.utils as ceph_utils import charmhelpers.core.host as ch_host @@ -50,6 +53,7 @@ class CephDashboardCharm(ops_openstack.core.OSBaseCharm): TLS_CA_CERT_PATH = Path( '/usr/local/share/ca-certificates/vault_ca_cert_dashboard.crt') TLS_PORT = 8443 + DASH_DIR = Path('src/dashboards') class CharmCephOption(): """Manage a charm option to ceph command to manage that option""" @@ -176,8 +180,36 @@ def __init__(self, *args) -> None: 'backend-port': self.TLS_PORT, 'backend-ip': self._get_bind_ip(), 'check-type': 'httpd'}]}) + self.grafana_dashboard = \ + interface_grafana_dashboard.GrafanaDashboardProvides( + self, + 'grafana-dashboard') + self.alertmanager = interface_http.HTTPRequires( + self, + 'alertmanager-service') + self.prometheus = interface_http.HTTPRequires( + self, + 'prometheus') + self.framework.observe( + self.grafana_dashboard.on.dash_ready, + self._configure_dashboard) + self.framework.observe( + self.alertmanager.on.http_ready, + self._configure_dashboard) + self.framework.observe( + self.prometheus.on.http_ready, + self._configure_dashboard) self._stored.set_default(is_started=False) + def _register_dashboards(self) -> None: + """Register all dashboards with grafana""" + for dash_file in self.DASH_DIR.glob("*.json"): + self.grafana_dashboard.register_dashboard( + dash_file.stem, + json.loads(dash_file.read_text())) + logging.info( + "register_grafana_dashboard: {}".format(dash_file)) + def _on_ca_available(self, _) -> None: """Request TLS certificates.""" addresses = set() @@ -220,6 +252,13 @@ def _check_port(ip, port): except AssertionError: return False + def _check_grafana_config(self) -> bool: + """Check that garfana-api is set if the grafana is in use.""" + if self.grafana_dashboard.dashboard_relation: + return bool(self.config.get('grafana-api-url')) + else: + return True + def check_dashboard(self) -> StatusBase: """Check status of dashboard""" checks = [ @@ -227,6 +266,8 @@ def check_dashboard(self) -> StatusBase: (self._check_for_certs, ('No certificates found. Please add a ' 'certifcates relation or provide via ' 'charm config')), + (self._check_grafana_config, 'Charm config option grafana-api-url ' + 'not set'), (self._check_dashboard_responding, 'Dashboard not responding')] for check_f, msg in checks: if not check_f(): @@ -278,6 +319,28 @@ def _configure_dashboard(self, _) -> None: 'mgr/dashboard/{hostname}/server_addr'.format( hostname=socket.gethostname()), str(self._get_bind_ip())) + if self.unit.is_leader(): + grafana_ep = self.config.get('grafana-api-url') + if grafana_ep: + self._run_cmd([ + 'ceph', 'dashboard', 'set-grafana-api-url', grafana_ep]) + alertmanager_conn = self.alertmanager.get_service_ep_data() + if alertmanager_conn: + alertmanager_ep = 'http://{}:{}'.format( + alertmanager_conn['hostname'], + alertmanager_conn['port']) + self._run_cmd([ + 'ceph', 'dashboard', 'set-alertmanager-api-host', + alertmanager_ep]) + prometheus_conn = self.prometheus.get_service_ep_data() + if prometheus_conn: + prometheus_ep = 'http://{}:{}'.format( + prometheus_conn['hostname'], + prometheus_conn['port']) + self._run_cmd([ + 'ceph', 'dashboard', 'set-prometheus-api-host', + prometheus_ep]) + self._register_dashboards() self._stored.is_started = True self.update_status() diff --git a/ceph-dashboard/src/dashboards/ceph-cluster.json b/ceph-dashboard/src/dashboards/ceph-cluster.json new file mode 100644 index 00000000..61a425d0 --- /dev/null +++ b/ceph-dashboard/src/dashboards/ceph-cluster.json @@ -0,0 +1,1229 @@ +{ + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "heatmap", + "name": "Heatmap", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "vonage-status-panel", + "name": "Status Panel", + "version": "1.0.8" + } + ], + "annotations": { + "list": [] + }, + "description": "Ceph cluster overview", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1525415495309, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": true, + "colorValue": false, + "colors": [ + "rgba(50, 128, 45, 0.9)", + "rgba(237, 129, 40, 0.9)", + "rgb(255, 0, 0)" + ], + "datasource": "$datasource", + "editable": false, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 0, + "y": 0 + }, + "hideTimeOverride": true, + "id": 21, + "interval": "1m", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "ceph_health_status", + "format": "time_series", + "instant": true, + "interval": "$interval", + "intervalFactor": 1, + "refId": "A", + "step": 60 + } + ], + "thresholds": "1,2", + "timeFrom": null, + "title": "Health Status", + "transparent": false, + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "OK", + "value": "0" + }, + { + "op": "=", + "text": "WARN", + "value": "1" + }, + { + "op": "=", + "text": "ERR", + "value": "2" + } + ], + "valueName": "current" + }, + { + "colorMode": "Panel", + "colors": { + "crit": "rgb(255, 0, 0)", + "disable": "rgba(128, 128, 128, 0.9)", + "ok": "rgba(50, 128, 45, 0.9)", + "warn": "rgba(237, 129, 40, 0.9)" + }, + "cornerRadius": 0, + "datasource": "$datasource", + "displayName": "", + "flipCard": false, + "flipTime": 5, + "fontFormat": "Regular", + "gridPos": { + "h": 3, + "w": 2, + "x": 2, + "y": 0 + }, + "id": 43, + "isAutoScrollOnOverflow": false, + "isGrayOnNoData": false, + "isHideAlertsOnDisable": false, + "isIgnoreOKColors": false, + "links": [], + "targets": [ + { + "aggregation": "Last", + "alias": "All", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "count(ceph_osd_metadata)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "All", + "refId": "A", + "units": "none", + "valueHandler": "Number Threshold" + }, + { + "aggregation": "Last", + "alias": "In", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "sum(ceph_osds_in)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "In", + "refId": "B", + "units": "none", + "valueHandler": "Number Threshold" + }, + { + "aggregation": "Last", + "alias": "Out", + "decimals": 2, + "displayAliasType": "Warning / Critical", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "sum(ceph_osd_in == bool 0)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Out", + "refId": "C", + "units": "none", + "valueHandler": "Number Threshold", + "warn": 1 + }, + { + "aggregation": "Last", + "alias": "Up", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "sum(ceph_osd_up)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Up", + "refId": "D", + "units": "none", + "valueHandler": "Number Threshold" + }, + { + "aggregation": "Last", + "alias": "Down", + "crit": 2, + "decimals": 2, + "displayAliasType": "Warning / Critical", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "sum(ceph_osd_up == bool 0)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Down", + "refId": "E", + "units": "none", + "valueHandler": "Number Threshold", + "warn": 1 + } + ], + "title": "OSDs", + "type": "vonage-status-panel" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 2, + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 4, + "y": 0 + }, + "id": 47, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_osd_stat_bytes_used)/sum(ceph_osd_stat_bytes)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Used", + "refId": "A" + } + ], + "thresholds": "70,80", + "title": "Capacity used", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "gridPos": { + "h": 6, + "w": 8, + "x": 8, + "y": 0 + }, + "id": 53, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Active", + "color": "#508642", + "fill": 1, + "stack": "A" + }, + { + "alias": "Total", + "color": "#f9e2d2" + }, + { + "alias": "Degraded", + "color": "#eab839" + }, + { + "alias": "Undersized", + "color": "#f9934e" + }, + { + "alias": "Inconsistent", + "color": "#e24d42" + }, + { + "alias": "Down", + "color": "#bf1b00" + }, + { + "alias": "Inactive", + "color": "#bf1b00", + "fill": 4, + "linewidth": 0, + "stack": "A" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(ceph_pg_total)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Total", + "refId": "A" + }, + { + "expr": "sum(ceph_pg_active)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Active", + "refId": "B" + }, + { + "expr": "sum(ceph_pg_total - ceph_pg_active)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Inactive", + "refId": "G" + }, + { + "expr": "sum(ceph_pg_undersized)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Undersized", + "refId": "F" + }, + { + "expr": "sum(ceph_pg_degraded)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Degraded", + "refId": "C" + }, + { + "expr": "sum(ceph_pg_inconsistent)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Inconsistent", + "refId": "D" + }, + { + "expr": "sum(ceph_pg_down)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Down", + "refId": "E" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "PG States", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 0 + }, + "id": 66, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Avg Apply Latency", + "color": "#7eb26d" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "quantile(0.95, ceph_osd_apply_latency_ms)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Apply Latency P_95", + "refId": "A" + }, + { + "expr": "quantile(0.95, ceph_osd_commit_latency_ms)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Commit Latency P_95", + "refId": "B" + }, + { + "expr": "avg(ceph_osd_apply_latency_ms)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Avg Apply Latency", + "refId": "C" + }, + { + "expr": "avg(ceph_osd_commit_latency_ms)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Avg Commit Latency", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "OSD Latencies", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "clusterName": "", + "colorMode": "Panel", + "colors": { + "crit": "rgba(245, 54, 54, 0.9)", + "disable": "rgba(128, 128, 128, 0.9)", + "ok": "rgba(50, 128, 45, 0.9)", + "warn": "rgba(237, 129, 40, 0.9)" + }, + "cornerRadius": 1, + "datasource": "$datasource", + "displayName": "", + "flipCard": false, + "flipTime": 5, + "fontFormat": "Regular", + "gridPos": { + "h": 3, + "w": 2, + "x": 0, + "y": 3 + }, + "id": 41, + "isAutoScrollOnOverflow": false, + "isGrayOnNoData": false, + "isHideAlertsOnDisable": false, + "isIgnoreOKColors": false, + "links": [], + "targets": [ + { + "aggregation": "Last", + "alias": "In Quorum", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "sum(ceph_mon_quorum_status)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "In Quorum", + "refId": "A", + "units": "none", + "valueHandler": "Text Only" + }, + { + "aggregation": "Last", + "alias": "Total", + "crit": 1, + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "count(ceph_mon_quorum_status)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Total", + "refId": "B", + "units": "none", + "valueHandler": "Text Only", + "warn": 2 + }, + { + "aggregation": "Last", + "alias": "MONs out of Quorum", + "crit": 1.6, + "decimals": 2, + "displayAliasType": "Warning / Critical", + "displayType": "Annotation", + "displayValueWithAlias": "Never", + "expr": "count(ceph_mon_quorum_status) / sum(ceph_mon_quorum_status)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "MONs out of Quorum", + "refId": "C", + "units": "none", + "valueHandler": "Number Threshold", + "warn": 1.1 + } + ], + "title": "Monitors", + "type": "vonage-status-panel" + }, + { + "colorMode": "Disabled", + "colors": { + "crit": "rgba(245, 54, 54, 0.9)", + "disable": "rgba(128, 128, 128, 0.9)", + "ok": "rgba(50, 128, 45, 0.9)", + "warn": "rgba(237, 129, 40, 0.9)" + }, + "cornerRadius": 0, + "datasource": "$datasource", + "displayName": "", + "flipCard": false, + "flipTime": 5, + "fontFormat": "Regular", + "gridPos": { + "h": 3, + "w": 2, + "x": 2, + "y": 3 + }, + "id": 68, + "isAutoScrollOnOverflow": false, + "isGrayOnNoData": false, + "isHideAlertsOnDisable": false, + "isIgnoreOKColors": false, + "links": [], + "targets": [ + { + "aggregation": "Last", + "alias": "Clients", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "ceph_mds_server_handle_client_session", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Clients", + "refId": "A", + "units": "none", + "valueHandler": "Number Threshold" + } + ], + "title": "Client connections", + "type": "vonage-status-panel" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 6 + }, + "id": 45, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 0.5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Reads", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(ceph_osd_op_w_in_bytes[1m]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Writes", + "refId": "A" + }, + { + "expr": "sum(irate(ceph_osd_op_r_out_bytes[1m]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Reads", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Cluster I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 6 + }, + "id": 62, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(deriv(ceph_pool_stored[1m]))", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "In-/Egress", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": " Egress (-) / Ingress (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "cards": { + "cardPadding": null, + "cardRound": 1 + }, + "color": { + "cardColor": "rgb(0, 254, 255)", + "colorScale": "sqrt", + "colorScheme": "interpolateBlues", + "exponent": 0.5, + "min": null, + "mode": "spectrum" + }, + "dataFormat": "timeseries", + "datasource": "$datasource", + "gridPos": { + "h": 9, + "w": 6, + "x": 0, + "y": 15 + }, + "heatmap": {}, + "highlightCards": true, + "id": 55, + "legend": { + "show": true + }, + "links": [], + "span": 12, + "targets": [ + { + "expr": "ceph_osd_stat_bytes_used / ceph_osd_stat_bytes", + "format": "time_series", + "interval": "1m", + "intervalFactor": 1, + "legendFormat": "Util (%)", + "refId": "A", + "step": 60 + } + ], + "timeFrom": null, + "title": "OSD Capacity Utilization", + "tooltip": { + "show": true, + "showHistogram": false + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": "", + "yAxis": { + "decimals": 2, + "format": "percentunit", + "logBase": 1, + "max": null, + "min": null, + "show": true, + "splitFactor": null + }, + "yBucketNumber": null, + "yBucketSize": null + }, + { + "cards": { + "cardPadding": null, + "cardRound": 1 + }, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateBlues", + "exponent": 0.5, + "mode": "spectrum" + }, + "dataFormat": "timeseries", + "datasource": "$datasource", + "gridPos": { + "h": 9, + "w": 6, + "x": 6, + "y": 15 + }, + "heatmap": {}, + "highlightCards": true, + "id": 59, + "legend": { + "show": true + }, + "links": [], + "targets": [ + { + "expr": "ceph_osd_numpg", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "#PGs", + "refId": "A" + } + ], + "title": "PGs per OSD", + "tooltip": { + "show": true, + "showHistogram": false + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": "", + "yAxis": { + "decimals": null, + "format": "none", + "logBase": 1, + "max": null, + "min": null, + "show": true, + "splitFactor": null + }, + "yBucketNumber": null, + "yBucketSize": null + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 15 + }, + "id": 64, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(ceph_osd_recovery_ops[1m]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Op/s", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Recovery Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ops", + "label": "Recovery Ops/s", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "refresh": "30s", + "schemaVersion": 16, + "style": "dark", + "tags": [ + "ceph", + "cluster" + ], + "templating": { + "list": [ + { + "hide": 0, + "label": null, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "auto": true, + "auto_count": 10, + "auto_min": "1m", + "current": { + "text": "auto", + "value": "$__auto_interval_interval" + }, + "datasource": null, + "hide": 0, + "includeAll": false, + "label": "Interval", + "multi": false, + "name": "interval", + "options": [ + { + "selected": true, + "text": "auto", + "value": "$__auto_interval_interval" + }, + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + }, + { + "selected": false, + "text": "7d", + "value": "7d" + }, + { + "selected": false, + "text": "14d", + "value": "14d" + }, + { + "selected": false, + "text": "30d", + "value": "30d" + } + ], + "query": "1m,10m,30m,1h,6h,12h,1d,7d,14d,30d", + "refresh": 2, + "type": "interval" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Ceph - Cluster", + "version": 13 + } diff --git a/ceph-dashboard/src/dashboards/cephfs-overview.json b/ceph-dashboard/src/dashboards/cephfs-overview.json new file mode 100644 index 00000000..57922f55 --- /dev/null +++ b/ceph-dashboard/src/dashboards/cephfs-overview.json @@ -0,0 +1,309 @@ +{ + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.3.2" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1557392920097, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 10, + "panels": [], + "title": "MDS Performance", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/.*Reads/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(ceph_objecter_op_r{ceph_daemon=~\"($mds_servers).*\"})", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read Ops", + "refId": "A" + }, + { + "expr": "sum(ceph_objecter_op_w{ceph_daemon=~\"($mds_servers).*\"})", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write Ops", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "MDS Workload - $mds_servers", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": "Reads(-) / Writes (+)", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ceph_mds_server_handle_client_request{ceph_daemon=~\"($mds_servers).*\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ceph_daemon}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Client Request Load - $mds_servers", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": "Client Requests", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "15s", + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "MDS Server", + "multi": false, + "name": "mds_servers", + "options": [], + "query": "label_values(ceph_mds_inodes, ceph_daemon)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "15s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "MDS Performance", + "uid": "tbO9LAiZz", + "version": 2 +} diff --git a/ceph-dashboard/src/dashboards/host-details.json b/ceph-dashboard/src/dashboards/host-details.json new file mode 100644 index 00000000..46fd31a7 --- /dev/null +++ b/ceph-dashboard/src/dashboards/host-details.json @@ -0,0 +1,1269 @@ +{ + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.3.2" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1615564911000, + "links": [], + "panels": [ + { + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 16, + "title": "$ceph_hosts System Overview", + "type": "row" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 0, + "y": 1 + }, + "height": "160", + "id": 1, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": "", + "minSpan": 4, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(sum by (ceph_daemon) (ceph_osd_metadata{hostname='$ceph_hosts'}))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 40, + "textEditor": true + } + ], + "thresholds": "", + "title": "OSDs", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": { + "interrupt": "#447EBC", + "steal": "#6D1F62", + "system": "#890F02", + "user": "#3F6833", + "wait": "#C15C17" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Shows the CPU breakdown. When multiple servers are selected, only the first host's cpu data is shown", + "fill": 1, + "gridPos": { + "h": 10, + "w": 6, + "x": 3, + "y": 1 + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "cpu_usage_user{cpu=\"cpu-total\", host='$ceph_hosts'}", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "user - {{ host }}", + "refId": "A", + "step": 2 + }, + { + "expr": "cpu_usage_iowait{cpu=\"cpu-total\", host='$ceph_hosts'}", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "iowait - {{ host }}", + "refId": "C", + "step": 2 + }, + { + "expr": "cpu_usage_nice{cpu=\"cpu-total\", host='$ceph_hosts'}", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "nice - {{ host }}", + "refId": "D", + "step": 2 + }, + { + "expr": "cpu_usage_softirq{cpu=\"cpu-total\", host='$ceph_hosts'}", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "softirq - {{ host }}", + "refId": "E", + "step": 2 + }, + { + "expr": "cpu_usage_irq{cpu=\"cpu-total\", host='$ceph_hosts'}", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "irq - {{ host }}", + "refId": "F", + "step": 2 + }, + { + "expr": "cpu_usage_system{cpu=\"cpu-total\", host='$ceph_hosts'}", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "system - {{ host }}", + "refId": "G", + "step": 2 + }, + { + "expr": "cpu_usage_idle{cpu=\"cpu-total\", host='$ceph_hosts'}", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "idle - {{ host }}", + "refId": "B", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilization", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": "% Utilization", + "logBase": 1, + "max": "100", + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Available": "#508642", + "Free": "#508642", + "Total": "#bf1b00", + "Used": "#bf1b00", + "total": "#bf1b00", + "used": "#0a50a1" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 10, + "w": 6, + "x": 9, + "y": 1 + }, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "color": "#bf1b00", + "fill": 0, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "mem_used{host='$ceph_hosts'}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "used", + "refId": "D" + }, + { + "expr": "mem_free{host='$ceph_hosts'}", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "Free", + "refId": "A" + }, + { + "expr": "mem_buffered{host='$ceph_hosts'} + mem_cached{host='$ceph_hosts'}", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "buffers/cache", + "refId": "C" + }, + { + "expr": "mem_total{host='$ceph_hosts'}", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "RAM Usage", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": "RAM used", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Show the network load (rx,tx) across all interfaces (excluding loopback 'lo')", + "fill": 0, + "gridPos": { + "h": 10, + "w": 6, + "x": 15, + "y": 1 + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/.*tx/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (device) (\n irate(net_bytes_recv{host='$ceph_hosts',device!=\"lo\"}[1m])\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.rx", + "refId": "A", + "step": 10, + "textEditor": true + }, + { + "expr": "sum by (device) (\n irate(net_bytes_sent{host='$ceph_hosts',device!=\"lo\"}[1m])\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.tx", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Network Load", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": "Send (-) / Receive (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "gridPos": { + "h": 5, + "w": 3, + "x": 21, + "y": 1 + }, + "hideTimeOverride": true, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/.*tx/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(net_drop_in{host='$ceph_hosts'}[1m])", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "{{device}}.rx", + "refId": "A" + }, + { + "expr": "irate(net_drop_out{host='$ceph_hosts'}[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.tx", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Network drop rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "pps", + "label": "Send (-) / Receive (+)", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "$datasource", + "decimals": 0, + "description": "Each OSD consists of a Journal/WAL partition and a data partition. The RAW Capacity shown is the sum of the data partitions across all OSDs on the selected OSD hosts.", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 0, + "y": 6 + }, + "height": "160", + "id": 2, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": "", + "minSpan": 4, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_osd_stat_bytes and on (ceph_daemon) ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"})", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 40, + "textEditor": true + } + ], + "thresholds": "", + "title": "Raw Capacity", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "gridPos": { + "h": 5, + "w": 3, + "x": 21, + "y": 6 + }, + "hideTimeOverride": true, + "id": 19, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/.*tx/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(net_err_in{host='$ceph_hosts'}[1m])", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "{{device}}.rx", + "refId": "A" + }, + { + "expr": "irate(net_err_out{host='$ceph_hosts'}[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.tx", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Network error rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "pps", + "label": "Send (-) / Receive (+)", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 11 + }, + "id": 12, + "panels": [], + "repeat": null, + "title": "OSD Disk Performance Statistics", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "For any OSD devices on the host, this chart shows the iops per physical device. Each device is shown by it's name and corresponding OSD id value", + "fill": 1, + "gridPos": { + "h": 9, + "w": 11, + "x": 0, + "y": 12 + }, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/.*reads/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace((irate(diskio_writes{host='$ceph_hosts'}[5m])), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts'}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}}) writes", + "refId": "A", + "step": 10, + "textEditor": true + }, + { + "expr": "label_replace(label_replace((irate(diskio_reads{host='$ceph_hosts'}[5m])), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts'}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}}) reads", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "$ceph_hosts Disk IOPS", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ops", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "For OSD hosts, this chart shows the disk bandwidth (read bytes/sec + write bytes/sec) of the physical OSD device. Each device is shown by device name, and corresponding OSD id", + "fill": 1, + "gridPos": { + "h": 9, + "w": 11, + "x": 12, + "y": 12 + }, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/.*read/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr" : "label_replace(label_replace((irate(diskio_write_bytes[5m]) / 10 ), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts'}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}}) write", + "refId": "B" + }, + { + "expr" : "label_replace(label_replace((irate(diskio_read_bytes[5m]) / 10 ), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts'}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}}) read", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "$ceph_hosts Throughput by Disk", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "For OSD hosts, this chart shows the latency at the physical drive. Each drive is shown by device name, with it's corresponding OSD id", + "fill": 1, + "gridPos": { + "h": 9, + "w": 11, + "x": 0, + "y": 21 + }, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace((irate(diskio_weighted_io_time{host='$ceph_hosts'}[5m])), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts'}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}})", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "$ceph_hosts I/O Queued RQs * RQ time Waiting", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": "Weighted I/O time", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Show disk utilization % (util) of any OSD devices on the host by the physical device name and associated OSD id.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 11, + "x": 12, + "y": 21 + }, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "maxPerRow": 2, + "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace((irate(diskio_io_time{host='$ceph_hosts'}[5m])), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts'}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}})", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "$ceph_hosts I/O Queue existance time", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": "I/O time", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "10s", + "schemaVersion": 16, + "style": "dark", + "tags": [ + "overview" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Hostname", + "multi": false, + "name": "ceph_hosts", + "options": [], + "query": "label_values(node_scrape_collector_success, instance) ", + "refresh": 1, + "regex": "([^.:]*).*", + "skipUrlSync": false, + "sort": 3, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Host Details", + "uid": "rtOg0AiWz", + "version": 4 +} diff --git a/ceph-dashboard/src/dashboards/hosts-overview.json b/ceph-dashboard/src/dashboards/hosts-overview.json new file mode 100644 index 00000000..28e6707b --- /dev/null +++ b/ceph-dashboard/src/dashboards/hosts-overview.json @@ -0,0 +1,852 @@ +{ + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.3.2" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1557393917915, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 0, + "y": 0 + }, + "id": 5, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(sum by (hostname) (ceph_osd_metadata))", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "title": "OSD Hosts", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Average CPU busy across all hosts (OSD, RGW, MON etc) within the cluster", + "decimals": 2, + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 4, + "y": 0 + }, + "id": 6, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "avg(\n 1 - (\n avg by(instance) \n (cpu_usage_idle{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\", cpu='cpu-total'} / 100)))", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "title": "AVG CPU Busy", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Average Memory Usage across all hosts in the cluster (excludes buffer/cache usage)", + "decimals": 2, + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 8, + "y": 0 + }, + "id": 9, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "avg (((mem_total{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) - (\n (mem_free{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (mem_cached{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (mem_buffered{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) +\n (mem_slab{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})\n )) /\n (mem_total{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}))", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "title": "AVG RAM Utilization", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "IOPS Load at the device as reported by the OS on all OSD hosts", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 12, + "y": 0 + }, + "id": 2, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum ((irate(diskio_reads{instance=~\"($osd_hosts|$mds_hosts).*\"}[5m])) + \n(irate(diskio_writes{instance=~\"($osd_hosts|$mds_hosts).*\"}[5m])))", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "title": "Physical IOPS", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Average Disk utilization for all OSD data devices (i.e. excludes journal/WAL)", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 16, + "y": 0 + }, + "id": 20, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr" : "avg (label_replace(label_replace((irate(diskio_io_time[5m]) / 10 ), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{instance=~\"($osd_hosts|$mds_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"))", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "title": "AVG Disk Utilization", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 0, + "description": "Total send/receive network load across all hosts in the ceph cluster", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 20, + "y": 0 + }, + "id": 18, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (\n irate(net_bytes_recv{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n) +\nsum (\n irate(net_bytes_sent{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m]))", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "title": "Network Load", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Show the top 10 busiest hosts by cpu", + "fill": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 5 + }, + "id": 13, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk(10,100 * (\n 1 - (\n avg by(instance) \n (cpu_usage_idle{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\", cpu='cpu-total'} / 100))))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Busy - Top 10 Hosts", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "percent", + "label": null, + "logBase": 1, + "max": "100", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Top 10 hosts by network load", + "fill": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 19, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk(10, (sum by(instance) (\n (\n irate(net_bytes_recv{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n ) +\n (\n irate(net_bytes_sent{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n ))\n )\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network Load - Top 10 Hosts", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "10s", + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": "", + "current": {}, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": null, + "multi": false, + "name": "osd_hosts", + "options": [], + "query": "label_values(ceph_disk_occupation, exported_instance)", + "refresh": 1, + "regex": "([^.]*).*", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "ceph", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": null, + "multi": false, + "name": "mon_hosts", + "options": [], + "query": "label_values(ceph_mon_metadata, ceph_daemon)", + "refresh": 1, + "regex": "mon.(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": null, + "multi": false, + "name": "mds_hosts", + "options": [], + "query": "label_values(ceph_mds_inodes, ceph_daemon)", + "refresh": 1, + "regex": "mds.(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": null, + "multi": false, + "name": "rgw_hosts", + "options": [], + "query": "label_values(ceph_rgw_qlen, ceph_daemon)", + "refresh": 1, + "regex": "rgw.(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Host Overview", + "uid": "y0KGL0iZz", + "version": 3 +} diff --git a/ceph-dashboard/src/dashboards/osd-device-details.json b/ceph-dashboard/src/dashboards/osd-device-details.json new file mode 100644 index 00000000..eefb5912 --- /dev/null +++ b/ceph-dashboard/src/dashboards/osd-device-details.json @@ -0,0 +1,800 @@ +{ + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.3.2" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1557395861896, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 14, + "panels": [], + "title": "OSD Performance", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 0, + "y": 1 + }, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "read", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_osd_op_r_latency_sum{ceph_daemon=~\"$osd\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "read", + "refId": "A" + }, + { + "expr": "irate(ceph_osd_op_w_latency_sum{ceph_daemon=~\"$osd\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "write", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "$osd Latency", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 6, + "y": 1 + }, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Reads", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_osd_op_r{ceph_daemon=~\"$osd\"}[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Reads", + "refId": "A" + }, + { + "expr": "irate(ceph_osd_op_w{ceph_daemon=~\"$osd\"}[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Writes", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "$osd R/W IOPS", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 12, + "y": 1 + }, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Read Bytes", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_osd_op_r_out_bytes{ceph_daemon=~\"$osd\"}[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read Bytes", + "refId": "A" + }, + { + "expr": "irate(ceph_osd_op_w_in_bytes{ceph_daemon=~\"$osd\"}[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write Bytes", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "$osd R/W Bytes", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 12, + "panels": [], + "title": "Physical Device Performance", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 0, + "y": 11 + }, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/.*Reads/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(label_replace(irate(node_disk_read_time_seconds_total[1m]) / irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}/{{device}} Reads", + "refId": "A" + }, + { + "expr": "(label_replace(irate(node_disk_write_time_seconds_total[1m]) / irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}/{{device}} Writes", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Physical Device Latency for $osd", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 6, + "y": 11 + }, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/.*Reads/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}} on {{instance}} Writes", + "refId": "A" + }, + { + "expr": "label_replace(irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}} on {{instance}} Reads", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Physical Device R/W IOPS for $osd", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 9, + "w": 6, + "x": 12, + "y": 11 + }, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/.*Reads/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(irate(node_disk_read_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}} {{device}} Reads", + "refId": "A" + }, + { + "expr": "label_replace(irate(node_disk_written_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}} {{device}} Writes", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Physical Device R/W Bytes for $osd", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 9, + "w": 6, + "x": 18, + "y": 11 + }, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(irate(node_disk_io_time_seconds_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}} on {{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Physical Device Util% for $osd", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "OSD", + "multi": false, + "name": "osd", + "options": [], + "query": "label_values(ceph_osd_metadata,ceph_daemon)", + "refresh": 1, + "regex": "(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "OSD device details", + "uid": "CrAHE0iZz", + "version": 3 +} diff --git a/ceph-dashboard/src/dashboards/osds-overview.json b/ceph-dashboard/src/dashboards/osds-overview.json new file mode 100644 index 00000000..4b91df9e --- /dev/null +++ b/ceph-dashboard/src/dashboards/osds-overview.json @@ -0,0 +1,876 @@ +{ + + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "grafana-piechart-panel", + "name": "Pie Chart", + "version": "1.3.3" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1538083987689, + "links": [], + "panels": [ + { + "aliasColors": { + "@95%ile": "#e0752d" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 0 + }, + "id": 12, + "legend": { + "avg": false, + "current": true, + "max": false, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "avg (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "AVG read", + "refId": "A" + }, + { + "expr": "max (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "MAX read", + "refId": "B" + }, + { + "expr": "quantile(0.95,\n (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "@95%ile", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "OSD Read Latencies", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "columns": [], + "datasource": "$datasource", + "description": "This table shows the osd's that are delivering the 10 highest read latencies within the cluster", + "fontSize": "100%", + "gridPos": { + "h": 8, + "w": 4, + "x": 8, + "y": 0 + }, + "id": 15, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 2, + "desc": true + }, + "styles": [ + { + "alias": "OSD ID", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "ceph_daemon", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Latency (ms)", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "none" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk(10,\n (sort(\n (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n ))\n)\n\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Highest READ Latencies", + "transform": "table", + "type": "table" + }, + { + "aliasColors": { + "@95%ile write": "#e0752d" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 8, + "x": 12, + "y": 0 + }, + "id": 13, + "legend": { + "avg": false, + "current": true, + "max": false, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "avg (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "AVG write", + "refId": "A" + }, + { + "expr": "max (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "MAX write", + "refId": "B" + }, + { + "expr": "quantile(0.95,\n (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "@95%ile write", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "OSD Write Latencies", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "columns": [], + "datasource": "$datasource", + "description": "This table shows the osd's that are delivering the 10 highest write latencies within the cluster", + "fontSize": "100%", + "gridPos": { + "h": 8, + "w": 4, + "x": 20, + "y": 0 + }, + "id": 16, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 2, + "desc": true + }, + "styles": [ + { + "alias": "OSD ID", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "ceph_daemon", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Latency (ms)", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "none" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk(10,\n (sort(\n (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n ))\n)\n\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Highest WRITE Latencies", + "transform": "table", + "type": "table" + }, + { + "aliasColors": {}, + "breakPoint": "50%", + "cacheTimeout": null, + "combine": { + "label": "Others", + "threshold": 0 + }, + "datasource": "$datasource", + "fontSize": "80%", + "format": "none", + "gridPos": { + "h": 8, + "w": 4, + "x": 0, + "y": 8 + }, + "id": 2, + "interval": null, + "legend": { + "show": true, + "values": true + }, + "legendType": "Under graph", + "links": [], + "maxDataPoints": 3, + "nullPointMode": "connected", + "pieType": "pie", + "strokeWidth": 1, + "targets": [ + { + "expr": "count by (device_class) (ceph_osd_metadata)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device_class}}", + "refId": "A" + } + ], + "title": "OSD Types Summary", + "type": "grafana-piechart-panel", + "valueName": "current" + }, + { + "aliasColors": { + "Non-Encrypted": "#E5AC0E" + }, + "breakPoint": "50%", + "cacheTimeout": null, + "combine": { + "label": "Others", + "threshold": 0 + }, + "datasource": "$datasource", + "fontSize": "80%", + "format": "none", + "gridPos": { + "h": 8, + "w": 4, + "x": 4, + "y": 8 + }, + "height": "200px", + "hideTimeOverride": true, + "id": 4, + "interval": null, + "legend": { + "percentage": false, + "show": true, + "values": true + }, + "legendType": "Under graph", + "links": [], + "maxDataPoints": "1", + "minSpan": 4, + "nullPointMode": "connected", + "pieType": "pie", + "strokeWidth": 1, + "targets": [ + { + "expr": "count(ceph_bluefs_wal_total_bytes)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "bluestore", + "refId": "A", + "step": 240 + }, + { + "expr": "count(ceph_osd_metadata) - count(ceph_bluefs_wal_total_bytes)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "filestore", + "refId": "B", + "step": 240 + }, + { + "expr": "absent(ceph_bluefs_wal_total_bytes)*count(ceph_osd_metadata)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "filestore", + "refId": "C", + "step": 240 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "OSD Objectstore Types", + "type": "grafana-piechart-panel", + "valueName": "current" + }, + { + "aliasColors": {}, + "breakPoint": "50%", + "cacheTimeout": null, + "combine": { + "label": "Others", + "threshold": "0.05" + }, + "datasource": "$datasource", + "description": "The pie chart shows the various OSD sizes used within the cluster", + "fontSize": "80%", + "format": "none", + "gridPos": { + "h": 8, + "w": 4, + "x": 8, + "y": 8 + }, + "height": "220", + "hideTimeOverride": true, + "id": 8, + "interval": null, + "legend": { + "header": "", + "percentage": false, + "show": true, + "sideWidth": null, + "sortDesc": true, + "values": true + }, + "legendType": "Under graph", + "links": [], + "maxDataPoints": "", + "minSpan": 6, + "nullPointMode": "connected", + "pieType": "pie", + "strokeWidth": "1", + "targets": [ + { + "expr": "count(ceph_osd_stat_bytes < 1099511627776)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<1 TB", + "refId": "A", + "step": 2 + }, + { + "expr": "count(ceph_osd_stat_bytes >= 1099511627776 < 2199023255552)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<2 TB", + "refId": "B", + "step": 2 + }, + { + "expr": "count(ceph_osd_stat_bytes >= 2199023255552 < 3298534883328)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<3TB", + "refId": "C", + "step": 2 + }, + { + "expr": "count(ceph_osd_stat_bytes >= 3298534883328 < 4398046511104)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<4TB", + "refId": "D", + "step": 2 + }, + { + "expr": "count(ceph_osd_stat_bytes >= 4398046511104 < 6597069766656)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<6TB", + "refId": "E", + "step": 2 + }, + { + "expr": "count(ceph_osd_stat_bytes >= 6597069766656 < 8796093022208)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<8TB", + "refId": "F", + "step": 2 + }, + { + "expr": "count(ceph_osd_stat_bytes >= 8796093022208 < 10995116277760)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<10TB", + "refId": "G", + "step": 2 + }, + { + "expr": "count(ceph_osd_stat_bytes >= 10995116277760 < 13194139533312)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<12TB", + "refId": "H", + "step": 2 + }, + { + "expr": "count(ceph_osd_stat_bytes >= 13194139533312)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "12TB+", + "refId": "I", + "step": 2 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "OSD Size Summary", + "type": "grafana-piechart-panel", + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Each bar indicates the number of OSD's that have a PG count in a specific range as shown on the x axis.", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ceph_osd_numpg\n", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "PGs per OSD", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Distribution of PGs per OSD", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": 20, + "mode": "histogram", + "name": null, + "show": true, + "values": [ + "total" + ] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "# of OSDs", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 20, + "panels": [], + "title": "R/W Profile", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Show the read/write workload profile overtime", + "fill": 1, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "round(sum(irate(ceph_pool_rd[30s])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Reads", + "refId": "A" + }, + { + "expr": "round(sum(irate(ceph_pool_wr[30s])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Writes", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Read/Write Profile", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "refresh": "10s", + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "tags": [], + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "OSD Overview", + "uid": "lo02I1Aiz", + "version": 3 +} diff --git a/ceph-dashboard/src/dashboards/pool-detail.json b/ceph-dashboard/src/dashboards/pool-detail.json new file mode 100644 index 00000000..dd6bc392 --- /dev/null +++ b/ceph-dashboard/src/dashboards/pool-detail.json @@ -0,0 +1,665 @@ +{ + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.3.2" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1551858875941, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 2, + "format": "percentunit", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 7, + "w": 7, + "x": 0, + "y": 0 + }, + "id": 12, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "(ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail)) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": ".7,.8", + "title": "Capacity used", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Time till pool is full assuming the average fill rate of the last 6 hours", + "format": "s", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 7, + "y": 0 + }, + "id": 14, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "(ceph_pool_max_avail / deriv(ceph_pool_stored[6h])) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"} > 0", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "title": "Time till full", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "inf", + "value": "null" + }, + { + "op": "=", + "text": "inf", + "value": "N/A" + } + ], + "valueName": "current" + }, + { + "aliasColors": { + "read_op_per_sec": "#3F6833", + "write_op_per_sec": "#E5AC0E" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "deriv(ceph_pool_objects[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Objects per second", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "$pool_name Object Ingress/Egress", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ops", + "label": "Objects out(-) / in(+) ", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "read_op_per_sec": "#3F6833", + "write_op_per_sec": "#E5AC0E" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 7 + }, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "reads", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_pool_rd[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "reads", + "refId": "B" + }, + { + "expr": "irate(ceph_pool_wr[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "writes", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "$pool_name Client IOPS", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "iops", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "read_op_per_sec": "#3F6833", + "write_op_per_sec": "#E5AC0E" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 7 + }, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "reads", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_pool_rd_bytes[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "reads", + "refId": "A" + }, + { + "expr": "irate(ceph_pool_wr_bytes[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "writes", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "$pool_name Client Throughput", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "read_op_per_sec": "#3F6833", + "write_op_per_sec": "#E5AC0E" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 14 + }, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ceph_pool_objects * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Number of Objects", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "$pool_name Objects", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "Objects", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "15s", + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus admin.virt1.home.fajerski.name:9090", + "value": "Prometheus admin.virt1.home.fajerski.name:9090" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Pool Name", + "multi": false, + "name": "pool_name", + "options": [], + "query": "label_values(ceph_pool_metadata,name)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "15s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Ceph Pool Details", + "uid": "-xyV8KCiz", + "version": 1 +} diff --git a/ceph-dashboard/src/dashboards/pool-overview.json b/ceph-dashboard/src/dashboards/pool-overview.json new file mode 100644 index 00000000..c405f607 --- /dev/null +++ b/ceph-dashboard/src/dashboards/pool-overview.json @@ -0,0 +1,1564 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1617656284287, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 0 + }, + "id": 21, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(ceph_pool_metadata)", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Pools", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Count of the pools that have compression enabled", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 3, + "y": 0 + }, + "id": 7, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "pluginVersion": "6.7.4", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(ceph_pool_metadata{compression_mode!=\"none\"})", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Pools with Compression", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "description": "Total raw capacity available to the cluster", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 6, + "y": 0 + }, + "id": 27, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_osd_stat_bytes)", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Total Raw Capacity", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Total raw capacity consumed by user data and associated overheads (metadata + redundancy)", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 0 + }, + "id": 25, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_pool_bytes_used)", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Raw Capacity Consumed", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current", + "decimals": 2 + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "description": "Total of client data stored in the cluster", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 12, + "y": 0 + }, + "id": 23, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_pool_stored)", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Logical Stored ", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "description": "A compression saving is determined as the data eligible to be compressed minus the capacity used to store the data after compression", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 15, + "y": 0 + }, + "id": 9, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used)", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Compression Savings", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Indicates how suitable the data is within the pools that are/have been enabled for compression - averaged across all pools holding compressed data\n", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 18, + "y": 0 + }, + "id": 17, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "expr": "(sum(ceph_pool_compress_under_bytes > 0) / sum(ceph_pool_stored_raw and ceph_pool_compress_under_bytes > 0)) * 100", + "format": "table", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Compression Eligibility", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "This factor describes the average ratio of data eligible to be compressed divided by the data actually stored. It does not account for data written that was ineligible for compression (too small, or compression yield too low)", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 21, + "y": 0 + }, + "id": 15, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "80%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_pool_compress_under_bytes > 0) / sum(ceph_pool_compress_bytes_used > 0)", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Compression Factor", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 5, + "links": [], + "maxPerRow": 3, + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 5, + "desc": true + }, + "styles": [ + { + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "Time", + "thresholds": [], + "type": "hidden", + "unit": "short" + }, + { + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "instance", + "thresholds": [], + "type": "hidden", + "unit": "short" + }, + { + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "job", + "thresholds": [], + "type": "hidden", + "unit": "short" + }, + { + "alias": "Pool Name", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "name", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Pool ID", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "pattern": "pool_id", + "thresholds": [], + "type": "hidden", + "unit": "none" + }, + { + "alias": "Compression Factor", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 1, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "none" + }, + { + "alias": "% Used", + "align": "auto", + "colorMode": "value", + "colors": [ + "rgb(0, 0, 0)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [ + "70", + "85" + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Usable Free", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Compression Eligibility", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "percent" + }, + { + "alias": "Compression Savings", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 1, + "mappingType": 1, + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Growth (5d)", + "align": "auto", + "colorMode": "value", + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #F", + "thresholds": [ + "0", + "0" + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "IOPS", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #G", + "thresholds": [], + "type": "number", + "unit": "none" + }, + { + "alias": "Bandwidth", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value #H", + "thresholds": [], + "type": "number", + "unit": "Bps" + }, + { + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "__name__", + "thresholds": [], + "type": "hidden", + "unit": "short" + }, + { + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "type", + "thresholds": [], + "type": "hidden", + "unit": "short" + }, + { + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "compression_mode", + "thresholds": [], + "type": "hidden", + "unit": "short" + }, + { + "alias": "Type", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "description", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Stored", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 1, + "mappingType": 1, + "pattern": "Value #J", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #I", + "thresholds": [], + "type": "hidden", + "unit": "short" + }, + { + "alias": "Compression", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #K", + "thresholds": [], + "type": "string", + "unit": "short", + "valueMaps": [ + { + "text": "ON", + "value": "1" + } + ] + } + ], + "targets": [ + { + "expr": "(ceph_pool_percent_used * on(pool_id) group_left(name) ceph_pool_metadata)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "D" + }, + { + "expr": "ceph_pool_stored * on(pool_id) group_left ceph_pool_metadata", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "J" + }, + { + "expr": "ceph_pool_max_avail * on(pool_id) group_left(name) ceph_pool_metadata", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "B" + }, + { + "expr": "delta(ceph_pool_stored[5d])", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "F" + }, + { + "expr": "ceph_pool_metadata", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "I" + }, + { + "expr": "ceph_pool_metadata{compression_mode!=\"none\"}", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "K" + }, + { + "expr": "(ceph_pool_compress_under_bytes / ceph_pool_compress_bytes_used > 0) and on(pool_id) (((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100 > 0.5)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + }, + { + "expr": "((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "C" + }, + { + "expr": "(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used > 0)", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "E" + }, + { + "expr": "rate(ceph_pool_rd[30s]) + rate(ceph_pool_wr[30s])", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "G" + }, + { + "expr": "rate(ceph_pool_rd_bytes[30s]) + rate(ceph_pool_wr_bytes[30s])", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "H" + }, + { + "expr": "", + "interval": "", + "legendFormat": "", + "refId": "L" + } + ], + "title": "Pool Overview", + "transform": "table", + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "This chart shows the sum of read and write IOPS from all clients by pool", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "hiddenSeries": false, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "maxPerRow": 2, + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk($topk,round((rate(ceph_pool_rd[30s]) + rate(ceph_pool_wr[30s])),1) * on(pool_id) group_left(instance,name) ceph_pool_metadata) ", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{name}} ", + "refId": "F" + }, + { + "expr": "topk($topk,rate(ceph_pool_wr[30s]) + on(pool_id) group_left(instance,name) ceph_pool_metadata) ", + "format": "time_series", + "hide": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{name}} - write", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Top $topk Client IOPS by Pool", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "IOPS", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "The chart shows the sum of read and write bytes from all clients, by pool", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "maxPerRow": 2, + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk($topk,(rate(ceph_pool_rd_bytes[30s]) + rate(ceph_pool_wr_bytes[30s])) * on(pool_id) group_left(instance,name) ceph_pool_metadata)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{name}}", + "refId": "A", + "textEditor": true + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Top $topk Client Bandwidth by Pool", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": "Throughput", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Historical view of capacity usage, to help identify growth and trends in pool consumption", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 17 + }, + "hiddenSeries": false, + "id": 19, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "ceph_pool_bytes_used * on(pool_id) group_right ceph_pool_metadata", + "interval": "", + "legendFormat": "{{name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": "14d", + "timeRegions": [ + { + "colorMode": "background6", + "fill": true, + "fillColor": "rgba(234, 112, 112, 0.12)", + "line": false, + "lineColor": "rgba(237, 46, 24, 0.60)", + "op": "time" + } + ], + "timeShift": null, + "title": "Pool Capacity Usage (RAW)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "bytes", + "label": "Capacity Used", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "15s", + "schemaVersion": 22, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "Dashboard1", + "value": "Dashboard1" + }, + "hide": 0, + "includeAll": false, + "label": "Data Source", + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": { + "text": "15", + "value": "15" + }, + "hide": 0, + "label": "Top K", + "name": "topk", + "options": [ + { + "text": "15", + "value": "15" + } + ], + "query": "15", + "skipUrlSync": false, + "type": "textbox" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "15s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Ceph Pools Overview", + "uid": "z99hzWtmk", + "variables": { + "list": [] + }, + "version": 10 +} diff --git a/ceph-dashboard/src/dashboards/radosgw-detail.json b/ceph-dashboard/src/dashboards/radosgw-detail.json new file mode 100644 index 00000000..bf5b16b8 --- /dev/null +++ b/ceph-dashboard/src/dashboards/radosgw-detail.json @@ -0,0 +1,491 @@ +{ + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "grafana-piechart-panel", + "name": "Pie Chart", + "version": "1.3.3" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1534386250869, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 12, + "panels": [], + "repeat": null, + "title": "RGW Host Detail : $rgw_servers", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 1 + }, + "id": 34, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (ceph_daemon) (rate(ceph_rgw_get_initial_lat_sum{ceph_daemon=~\"($rgw_servers)\"}[30s]) / rate(ceph_rgw_get_initial_lat_count{ceph_daemon=~\"($rgw_servers)\"}[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GET {{ceph_daemon}}", + "refId": "A" + }, + { + "expr": "sum by (ceph_daemon)(rate(ceph_rgw_put_initial_lat_sum{ceph_daemon=~\"($rgw_servers)\"}[30s]) / rate(ceph_rgw_put_initial_lat_count{ceph_daemon=~\"($rgw_servers)\"}[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUT {{ceph_daemon}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "$rgw_servers GET/PUT Latencies", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 7, + "x": 6, + "y": 1 + }, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_rgw_get_b{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GETs {{ceph_daemon}}", + "refId": "B" + }, + { + "expr": "rate(ceph_rgw_put_b{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUTs {{ceph_daemon}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Bandwidth by HTTP Operation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "bytes", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + "GETs": "#7eb26d", + "Other": "#447ebc", + "PUTs": "#eab839", + "Requests": "#3f2b5b", + "Requests Failed": "#bf1b00" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 7, + "x": 13, + "y": 1 + }, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_rgw_failed_req{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Requests Failed {{ceph_daemon}}", + "refId": "B" + }, + { + "expr": "rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GETs {{ceph_daemon}}", + "refId": "C" + }, + { + "expr": "rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUTs {{ceph_daemon}}", + "refId": "D" + }, + { + "expr": "rate(ceph_rgw_req{ceph_daemon=~\"$rgw_servers\"}[30s]) -\n (rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s]) +\n rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Other {{ceph_daemon}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "HTTP Request Breakdown", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + "Failures": "#bf1b00", + "GETs": "#7eb26d", + "Other (HEAD,POST,DELETE)": "#447ebc", + "PUTs": "#eab839" + }, + "breakPoint": "50%", + "cacheTimeout": null, + "combine": { + "label": "Others", + "threshold": 0 + }, + "datasource": "$datasource", + "fontSize": "80%", + "format": "none", + "gridPos": { + "h": 8, + "w": 4, + "x": 20, + "y": 1 + }, + "id": 23, + "interval": null, + "legend": { + "show": true, + "values": true + }, + "legendType": "Under graph", + "links": [], + "maxDataPoints": 3, + "nullPointMode": "connected", + "pieType": "pie", + "strokeWidth": 1, + "targets": [ + { + "expr": "rate(ceph_rgw_failed_req{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Failures {{ceph_daemon}}", + "refId": "A" + }, + { + "expr": "rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GETs {{ceph_daemon}}", + "refId": "B" + }, + { + "expr": "rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUTs {{ceph_daemon}}", + "refId": "C" + }, + { + "expr": "rate(ceph_rgw_req{ceph_daemon=~\"$rgw_servers\"}[30s]) -\n (rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s]) +\n rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Other (DELETE,LIST) {{ceph_daemon}}", + "refId": "D" + } + ], + "title": "Workload Breakdown", + "type": "grafana-piechart-panel", + "valueName": "current" + } + ], + "refresh": "15s", + "schemaVersion": 16, + "style": "dark", + "tags": [ + "overview" + ], + "templating": { + "list": [ + { + "current": { + "tags": [], + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "rgw_servers", + "options": [], + "query": "label_values(ceph_rgw_req, ceph_daemon)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "15s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "RGW Instance Detail", + "uid": "x5ARzZtmk", + "version": 2 +} diff --git a/ceph-dashboard/src/dashboards/radosgw-overview.json b/ceph-dashboard/src/dashboards/radosgw-overview.json new file mode 100644 index 00000000..487d736b --- /dev/null +++ b/ceph-dashboard/src/dashboards/radosgw-overview.json @@ -0,0 +1,630 @@ +{ + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1534386107523, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [], + "title": "RGW Overview - All Gateways", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 1 + }, + "id": 29, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_rgw_get_initial_lat_sum[30s]) / rate(ceph_rgw_get_initial_lat_count[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GET AVG", + "refId": "A" + }, + { + "expr": "rate(ceph_rgw_put_initial_lat_sum[30s]) / rate(ceph_rgw_put_initial_lat_count[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUT AVG", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Average GET/PUT Latencies", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 7, + "x": 8, + "y": 1 + }, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by(rgw_host) (label_replace(rate(ceph_rgw_req[30s]), \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{rgw_host}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Total Requests/sec by RGW Instance", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Latencies are shown stacked, without a yaxis to provide a visual indication of GET latency imbalance across RGW hosts", + "fill": 1, + "gridPos": { + "h": 7, + "w": 6, + "x": 15, + "y": 1 + }, + "id": 31, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(rate(ceph_rgw_get_initial_lat_sum[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_get_initial_lat_count[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{rgw_host}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "GET Latencies by RGW Instance", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": false + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Total bytes transferred in/out of all radosgw instances within the cluster", + "fill": 1, + "gridPos": { + "h": 6, + "w": 8, + "x": 0, + "y": 8 + }, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(ceph_rgw_get_b[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GETs", + "refId": "A" + }, + { + "expr": "sum(rate(ceph_rgw_put_b[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUTs", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Bandwidth Consumed by Type", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Total bytes transferred in/out through get/put operations, by radosgw instance", + "fill": 1, + "gridPos": { + "h": 6, + "w": 7, + "x": 8, + "y": 8 + }, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by(rgw_host) (\n (label_replace(rate(ceph_rgw_get_b[30s]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")) + \n (label_replace(rate(ceph_rgw_put_b[30s]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\"))\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{rgw_host}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Bandwidth by RGW Instance", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Latencies are shown stacked, without a yaxis to provide a visual indication of PUT latency imbalance across RGW hosts", + "fill": 1, + "gridPos": { + "h": 6, + "w": 6, + "x": 15, + "y": 8 + }, + "id": 32, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(rate(ceph_rgw_put_initial_lat_sum[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_put_initial_lat_count[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{rgw_host}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "PUT Latencies by RGW Instance", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": false + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "refresh": "15s", + "schemaVersion": 16, + "style": "dark", + "tags": [ + "overview" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": null, + "multi": false, + "name": "rgw_servers", + "options": [], + "query": "label_values(ceph_rgw_req, ceph_daemon)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "current": { + "tags": [], + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "15s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "RGW Overview", + "uid": "WAkugZpiz", + "version": 2 +} diff --git a/ceph-dashboard/src/dashboards/radosgw-sync-overview.json b/ceph-dashboard/src/dashboards/radosgw-sync-overview.json new file mode 100644 index 00000000..e9136d78 --- /dev/null +++ b/ceph-dashboard/src/dashboards/radosgw-sync-overview.json @@ -0,0 +1,440 @@ +{ + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1534386107523, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_bytes_sum[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{source_zone}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Replication (throughput) from Source Zone", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "unit": "bytes", + "format": "Bps", + "decimals": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 7.4, + "x": 8.3, + "y": 0 + }, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_bytes_count[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{source_zone}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Replication (objects) from Source Zone", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "decimals": null, + "label": "Objects/s", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_poll_latency_sum[30s]) * 1000)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{source_zone}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Polling Request Latency from Source Zone", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "unit": "s", + "format": "ms", + "decimals": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_errors[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{source_zone}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Unsuccessful Object Replications from Source Zone", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "decimals": null, + "label": "Count/s", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "refresh": "15s", + "schemaVersion": 16, + "style": "dark", + "tags": [ + "overview" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": null, + "multi": false, + "name": "rgw_servers", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "current": { + "tags": [], + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "15s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "RGW Sync Overview", + "uid": "rgw-sync-overview", + "version": 2 +} diff --git a/ceph-dashboard/src/dashboards/rbd-details.json b/ceph-dashboard/src/dashboards/rbd-details.json new file mode 100644 index 00000000..59932a5e --- /dev/null +++ b/ceph-dashboard/src/dashboards/rbd-details.json @@ -0,0 +1,409 @@ +{ + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.3.3" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Detailed Performance of RBD Images (IOPS/Throughput/Latency)", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1584428820779, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$Datasource", + "fill": 1, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 0 + }, + "id": 6, + "legend": { + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_rbd_write_ops{pool=\"$Pool\", image=\"$Image\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write", + "refId": "A" + }, + { + "expr": "irate(ceph_rbd_read_ops{pool=\"$Pool\", image=\"$Image\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "IOPS", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "iops", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "iops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": true, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$Datasource", + "fill": 1, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 0 + }, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_rbd_write_bytes{pool=\"$Pool\", image=\"$Image\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write", + "refId": "A" + }, + { + "expr": "irate(ceph_rbd_read_bytes{pool=\"$Pool\", image=\"$Image\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Throughput", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": true, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$Datasource", + "fill": 1, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 0 + }, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_rbd_write_latency_sum{pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_write_latency_count{pool=\"$Pool\", image=\"$Image\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write", + "refId": "A" + }, + { + "expr": "irate(ceph_rbd_read_latency_sum{pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_read_latency_count{pool=\"$Pool\", image=\"$Image\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Average Latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ns", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "ns", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": true, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": {}, + "hide": 0, + "label": null, + "name": "Datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "$Datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "Pool", + "options": [], + "query": "label_values(pool)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$Datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "Image", + "options": [], + "query": "label_values(image)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "RBD Details", + "uid": "YhCYGcuZz", + "version": 7 +} diff --git a/ceph-dashboard/src/dashboards/rbd-overview.json b/ceph-dashboard/src/dashboards/rbd-overview.json new file mode 100644 index 00000000..eb15fbcb --- /dev/null +++ b/ceph-dashboard/src/dashboards/rbd-overview.json @@ -0,0 +1,685 @@ +{ + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.4.2" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1547242766440, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "round(sum(irate(ceph_rbd_write_ops[30s])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Writes", + "refId": "A" + }, + { + "expr": "round(sum(irate(ceph_rbd_read_ops[30s])))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Reads", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "IOPS", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "round(sum(irate(ceph_rbd_write_bytes[30s])))", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "Write", + "refId": "A" + }, + { + "expr": "round(sum(irate(ceph_rbd_read_bytes[30s])))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Read", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Throughput", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "round(sum(irate(ceph_rbd_write_latency_sum[30s])) / sum(irate(ceph_rbd_write_latency_count[30s])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write", + "refId": "A" + }, + { + "expr": "round(sum(irate(ceph_rbd_read_latency_sum[30s])) / sum(irate(ceph_rbd_read_latency_count[30s])))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Read", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average Latency", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ns", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "hideTimeOverride": false, + "id": 12, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 3, + "desc": true + }, + "styles": [ + { + "alias": "Pool", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "pool", + "thresholds": [], + "type": "string", + "unit": "short", + "valueMaps": [] + }, + { + "alias": "Image", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "image", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "IOPS", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "iops" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk(10, (sort((irate(ceph_rbd_write_ops[30s]) + on (image, pool, namespace) irate(ceph_rbd_read_ops[30s])))))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Highest IOPS", + "transform": "table", + "type": "table" + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 7 + }, + "id": 10, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 3, + "desc": true + }, + "styles": [ + { + "alias": "Pool", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "pool", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Image", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "image", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Throughput", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "Bps" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk(10, sort(sum(irate(ceph_rbd_read_bytes[30s]) + irate(ceph_rbd_write_bytes[30s])) by (pool, image, namespace)))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "title": "Highest Throughput", + "transform": "table", + "type": "table" + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 7 + }, + "id": 14, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 3, + "desc": true + }, + "styles": [ + { + "alias": "Pool", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "pool", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Image", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "image", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Latency", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "ns" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "topk(10,\n sum(\n irate(ceph_rbd_write_latency_sum[30s]) / clamp_min(irate(ceph_rbd_write_latency_count[30s]), 1) +\n irate(ceph_rbd_read_latency_sum[30s]) / clamp_min(irate(ceph_rbd_read_latency_count[30s]), 1)\n ) by (pool, image, namespace)\n)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "title": "Highest Latency", + "transform": "table", + "type": "table" + } + ], + "refresh": "30s", + "schemaVersion": 16, + "style": "dark", + "tags": [ + "overview" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "15s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "RBD Overview", + "uid": "41FrpeUiz", + "version": 8 +} diff --git a/ceph-dashboard/src/interface_grafana_dashboard.py b/ceph-dashboard/src/interface_grafana_dashboard.py new file mode 100644 index 00000000..e93c024e --- /dev/null +++ b/ceph-dashboard/src/interface_grafana_dashboard.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 + +import copy +import json +import hashlib +import logging +from uuid import uuid4 +from typing import List + +from ops.charm import RelationChangedEvent +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object) + + +class GrafanaDashboardEvent(EventBase): + pass + + +class GrafanaDashboardEvents(ObjectEvents): + dash_ready = EventSource(GrafanaDashboardEvent) + + +class GrafanaDashboardProvides(Object): + + on = GrafanaDashboardEvents() + _stored = StoredState() + + def __init__(self, charm: str, relation_name: str) -> None: + super().__init__(charm, relation_name) + self.relation_name = relation_name + self.framework.observe( + charm.on[self.relation_name].relation_changed, + self._on_relation_changed) + + def _on_relation_changed(self, event: RelationChangedEvent) -> None: + """Handle the relation-changed event.""" + self.on.dash_ready.emit() + + def get_requests_by_name(self, name: str, relation: str) -> List[str]: + """Get a let of requests on relation matching given name + + Check the relation data this unit has set on the given relation, + for requests which a matching name and return them. + """ + requests = [] + for k, v in relation.data[self.model.unit].items(): + if k.startswith('request'): + request = json.loads(v) + if request.get('name') == name: + requests.append(request) + return requests + + def get_request_key(self, request_id: str) -> str: + """Return the juju relation key for a given request_id""" + return 'request_{}'.format(request_id) + + def get_request_id(self, name: str, relation: str, digest: str) -> str: + """Return the request id for a request with given name and digest + + Look for an existing request which has a matching name and digest, if + there is one return the request id of that request. If no matching + request is found then generate a new request id. + """ + logging.debug("Checking for existing request for {}".format(name)) + for request in self.get_requests_by_name(name, relation): + if request.get('dashboard', {}).get('digest') == digest: + logging.debug("Found existing dashboard request") + request_id = request.get('request_id') + break + else: + logging.debug("Generating new request_id") + request_id = str(uuid4()) + return request_id + + def clear_old_requests(self, name: str, relation: str, + digest: str) -> None: + """Remove requests with matching name but different digest""" + old_requests = [] + for request in self.get_requests_by_name(name, relation): + if request.get('dashboard', {}).get('digest') != digest: + old_requests.append(request.get('request_id')) + for request_id in old_requests: + logging.debug("Actually Removing {}".format(request_id)) + rq_key = self.get_request_key(request_id) + relation.data[self.model.unit][rq_key] = '' + + def register_dashboard(self, name: str, dashboard: str): + """ + Request a dashboard to be imported. + + :param name: Name of dashboard. Informational only, so that you can + tell which dashboard request this was, e.g. to check for success or + failure. + :param dashboard: Data structure defining the dashboard. Must be JSON + serializable. (Note: This should *not* be pre-serialized JSON.) + """ + + _dashboard = copy.deepcopy(dashboard) + # In this interface the request id for a job name is preserved. + if self.dashboard_relation: + digest = hashlib.md5( + json.dumps(_dashboard).encode("utf8")).hexdigest() + _dashboard["digest"] = digest + _dashboard["source_model"] = self.model.name + request_id = self.get_request_id(name, self.dashboard_relation, + _dashboard.get('digest')) + rq_key = self.get_request_key(request_id) + self.dashboard_relation.data[self.model.unit][rq_key] = json.dumps( + { + 'request_id': request_id, + 'name': name, + 'dashboard': _dashboard, + }, + sort_keys=True) + self.clear_old_requests( + name, + self.dashboard_relation, + _dashboard.get('digest')) + + @property + def dashboard_relation(self): + return self.model.get_relation(self.relation_name) diff --git a/ceph-dashboard/src/interface_http.py b/ceph-dashboard/src/interface_http.py new file mode 100644 index 00000000..7ec2ed4c --- /dev/null +++ b/ceph-dashboard/src/interface_http.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 + +import logging +from typing import Dict, Union + +from ops.charm import RelationChangedEvent +from ops.model import Relation + +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object) + + +class HTTPEvent(EventBase): + pass + + +class HTTPEvents(ObjectEvents): + http_ready = EventSource(HTTPEvent) + + +class HTTPRequires(Object): + + on = HTTPEvents() + _stored = StoredState() + required_keys = {'hostname', 'port'} + + def __init__(self, charm: str, relation_name: str) -> None: + super().__init__(charm, relation_name) + self.relation_name = relation_name + self.framework.observe( + charm.on[relation_name].relation_changed, + self.on_changed) + + def on_changed(self, event: RelationChangedEvent) -> None: + """Handle the relation-changed event + + When the relation changes check the relation data from the remote + units to see if all the keys needed are present.""" + logging.debug("http on_changed") + if self.http_relation: + for u in self.http_relation.units: + rel_data = self.http_relation.data[u] + if self.required_keys.issubset(set(rel_data.keys())): + self.on.http_ready.emit() + + def get_service_ep_data(self) -> Union[Dict[str, str], None]: + """Return endpoint data for accessing the remote service. + + Return endpoint data for accessing the remote service. If the relation + or required keys are missing then return None""" + logging.debug("http on_changed") + if self.http_relation: + for u in self.http_relation.units: + rel_data = self.http_relation.data[u] + if self.required_keys.issubset( + set(self.http_relation.data[u].keys())): + return {'hostname': rel_data['hostname'], + 'port': rel_data['port']} + + @property + def http_relation(self) -> Union[Relation, None]: + """The relation matching self.relation_name if it exists""" + return self.model.get_relation(self.relation_name) diff --git a/ceph-dashboard/tests/bundles/focal.yaml b/ceph-dashboard/tests/bundles/focal.yaml index 02c7573e..9effd131 100644 --- a/ceph-dashboard/tests/bundles/focal.yaml +++ b/ceph-dashboard/tests/bundles/focal.yaml @@ -17,7 +17,7 @@ applications: num_units: 1 charm: cs:~openstack-charmers-next/vault mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster-79 + charm: cs:~openstack-charmers-next/mysql-innodb-cluster constraints: mem=3072M num_units: 3 vault-mysql-router: @@ -26,6 +26,27 @@ applications: charm: ../../ceph-dashboard.charm options: public-hostname: 'ceph-dashboard.zaza.local' + prometheus: + charm: cs:prometheus2 + num_units: 1 + grafana: + # SSL and allow_embedding are not released into cs:grafana yet, due + # Octrober 2021 + charm: cs:~llama-charmers-next/grafana + num_units: 1 + options: + anonymous: True + install_plugins: https://storage.googleapis.com/plugins-community/vonage-status-panel/release/1.0.11/vonage-status-panel-1.0.11.zip,https://storage.googleapis.com/plugins-community/grafana-piechart-panel/release/1.6.2/grafana-piechart-panel-1.6.2.zip + install_method: snap + allow_embedding: True + telegraf: + charm: telegraf + channel: stable + options: + hostname: "{host}" + prometheus-alertmanager: + charm: cs:prometheus-alertmanager + num_units: 1 relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' @@ -37,3 +58,25 @@ relations: - 'ceph-mon:dashboard' - - 'ceph-dashboard:certificates' - 'vault:certificates' + - - 'ceph-mon:prometheus' + - 'prometheus:target' + - - 'grafana:grafana-source' + - 'prometheus:grafana-source' + - - 'grafana:certificates' + - 'vault:certificates' + - - 'ceph-osd:juju-info' + - 'telegraf:juju-info' + - - 'ceph-mon:juju-info' + - 'telegraf:juju-info' + - - 'telegraf:prometheus-client' + - 'prometheus:target' + - - 'telegraf:dashboards' + - 'grafana:dashboards' + - - 'ceph-dashboard:grafana-dashboard' + - 'grafana:dashboards' + - - 'ceph-dashboard:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-dashboard:prometheus' + - 'prometheus:website' + - - 'prometheus:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' diff --git a/ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 b/ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 new file mode 100644 index 00000000..3248e700 --- /dev/null +++ b/ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 @@ -0,0 +1,4 @@ +applications: + grafana: + options: + http_proxy: '{{ TEST_HTTP_PROXY }}' diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml index e1e1bda3..23df3087 100644 --- a/ceph-dashboard/tests/tests.yaml +++ b/ceph-dashboard/tests/tests.yaml @@ -7,10 +7,20 @@ configure: - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation tests: - zaza.openstack.charm_tests.ceph.dashboard.tests.CephDashboardTest + - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest target_deploy_status: - vault: - workload-status: blocked - workload-status-message: Vault needs to be initialized ceph-dashboard: workload-status: blocked - workload-status-message-prefix: No certificates found + workload-status-message-regex: "No certificates found|Charm config option" + vault: + workload-status: blocked + workload-status-message-prefix: Vault needs to be initialized + grafana: + workload-status: active + workload-status-message-prefix: Started + prometheus2: + workload-status: active + workload-status-message-prefix: Ready + telegraf: + workload-status: active + workload-status-message-prefix: Monitoring diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py index fbeb8d3f..6d8dff9e 100644 --- a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -168,6 +168,7 @@ def setUp(self): self.socket.getfqdn.return_value = 'server1.local' def get_harness(self): + initial_config = {'grafana-api-url': None} _harness = Harness( _CephDashboardCharm, ) @@ -197,6 +198,7 @@ def network_get(self, endpoint_name, relation_id=None): _harness._meta, _harness._model) # END Workaround + _harness.update_config(initial_config) return _harness def test_init(self): @@ -303,6 +305,25 @@ def test_check_dashboard(self): self.harness.charm.check_dashboard(), BlockedStatus('Dashboard is not enabled')) + def test_check_dashboard_grafana(self): + socket_mock = MagicMock() + self.socket.socket.return_value = socket_mock + socket_mock.connect_ex.return_value = 0 + self.ceph_utils.is_dashboard_enabled.return_value = True + rel_id = self.harness.add_relation('grafana-dashboard', 'grafana') + self.harness.begin() + self.harness.add_relation_unit( + rel_id, + 'grafana/0') + self.harness.update_config( + key_values={ + 'ssl_key': base64.b64encode(TEST_KEY.encode("utf-8")), + 'ssl_cert': base64.b64encode(TEST_CERT.encode("utf-8")), + 'ssl_ca': base64.b64encode(TEST_CA.encode("utf-8"))}) + self.assertEqual( + self.harness.charm.check_dashboard(), + BlockedStatus('Charm config option grafana-api-url not set')) + def test_kick_dashboard(self): self.harness.begin() self.harness.charm.kick_dashboard() diff --git a/ceph-dashboard/unit_tests/test_interface_api_endpoints.py b/ceph-dashboard/unit_tests/test_interface_api_endpoints.py index 69f576d0..63e876bb 100644 --- a/ceph-dashboard/unit_tests/test_interface_api_endpoints.py +++ b/ceph-dashboard/unit_tests/test_interface_api_endpoints.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2020 Canonical Ltd. +# Copyright 2021 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/ceph-dashboard/unit_tests/test_interface_grafana_dashboard.py b/ceph-dashboard/unit_tests/test_interface_grafana_dashboard.py new file mode 100644 index 00000000..89bf0d7c --- /dev/null +++ b/ceph-dashboard/unit_tests/test_interface_grafana_dashboard.py @@ -0,0 +1,182 @@ +#!/usr/bin/env python3 + +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import hashlib +import json +import unittest +import sys +sys.path.append('lib') # noqa +sys.path.append('src') # noqa +from ops.testing import Harness +from ops.charm import CharmBase +import interface_grafana_dashboard + + +class TestGrafanaDashboardProvides(unittest.TestCase): + + class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.seen_events = [] + self.grafana_dashboard = \ + interface_grafana_dashboard.GrafanaDashboardProvides( + self, + 'grafana-dashboard') + self.seen_events = [] + + self.framework.observe( + self.grafana_dashboard.on.dash_ready, + self._log_event) + + def _log_event(self, event): + self.seen_events.append(type(event).__name__) + + def setUp(self): + super().setUp() + self.harness = Harness( + self.MyCharm, + meta=''' +name: my-charm +provides: + grafana-dashboard: + interface: grafana-dashboard +''' + ) + + def add_grafana_dashboard_relation(self): + rel_id = self.harness.add_relation( + 'grafana-dashboard', + 'grafana') + self.harness.add_relation_unit( + rel_id, + 'grafana/0') + self.harness.update_relation_data( + rel_id, + 'grafana/0', + {'ingress-address': '10.0.0.3'}) + return rel_id + + def test_init(self): + self.harness.begin() + self.assertEqual( + self.harness.charm.grafana_dashboard.relation_name, + 'grafana-dashboard') + + def test_on_changed(self): + self.harness.begin() + # No GrafanaDashboardEvent as relation is absent + self.assertEqual( + self.harness.charm.seen_events, + []) + self.add_grafana_dashboard_relation() + self.assertEqual( + self.harness.charm.seen_events, + ['GrafanaDashboardEvent']) + + def get_requests_on_relation(self, rel_data): + requests = {k: v for k, v in rel_data.items() + if k.startswith('request')} + return requests + + def test_register_dashboard(self): + self.harness.begin() + rel_id = self.add_grafana_dashboard_relation() + dashboard = { + 'uid': '123', + 'foo': 'ba1'} + digest = hashlib.md5(json.dumps(dashboard).encode("utf8")).hexdigest() + self.harness.charm.grafana_dashboard.register_dashboard( + 'my-dash.json', + dashboard) + rel_data = self.harness.get_relation_data( + rel_id, + 'my-charm/0') + requests = self.get_requests_on_relation(rel_data) + self.assertEqual( + len(requests), + 1) + key = list(requests.keys())[0] + expect = { + "dashboard": { + "digest": digest, + "foo": "ba1", + "source_model": None, # Model name appears as None in testing + # harness + "uid": "123"}, + "name": "my-dash.json", + "request_id": key.replace("request_", "")} + self.assertEqual( + requests[key], + json.dumps(expect)) + # Register the same dashboard again + self.harness.charm.grafana_dashboard.register_dashboard( + 'my-dash.json', + dashboard) + # Check the relation data is unchanged + requests = self.get_requests_on_relation(rel_data) + self.assertEqual( + len(requests), + 1) + new_key = list(requests.keys())[0] + # A duplicate was registered so the key should be unchanged. + self.assertEqual( + new_key, + key) + expect = { + "dashboard": { + "digest": digest, + "foo": "ba1", + "source_model": None, # Model name appears as None in testing + # harness + "uid": "123"}, + "name": "my-dash.json", + "request_id": new_key.replace("request_", "")} + self.assertEqual( + requests[new_key], + json.dumps(expect)) + # Update an existing dashboard with a new version. This should create + # a new request and remove the old one. + updated_dashboard = { + 'uid': '123', + 'foo': 'ba2'} + updated_digest = hashlib.md5( + json.dumps(updated_dashboard).encode("utf8")).hexdigest() + self.harness.charm.grafana_dashboard.register_dashboard( + 'my-dash.json', + updated_dashboard) + rel_data = self.harness.get_relation_data( + rel_id, + 'my-charm/0') + requests = self.get_requests_on_relation(rel_data) + # The old request should have been removed so there is still just one + # key. + self.assertEqual( + len(requests), + 1) + updated_key = list(requests.keys())[0] + expect = { + "dashboard": { + "digest": updated_digest, + "foo": "ba2", + "source_model": None, # Model name appears as None in testing + # harness + "uid": "123"}, + "name": "my-dash.json", + "request_id": updated_key.replace("request_", "")} + self.assertEqual( + requests[updated_key], + json.dumps(expect)) diff --git a/ceph-dashboard/unit_tests/test_interface_http.py b/ceph-dashboard/unit_tests/test_interface_http.py new file mode 100644 index 00000000..9f9e4a3b --- /dev/null +++ b/ceph-dashboard/unit_tests/test_interface_http.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import sys +sys.path.append('lib') # noqa +sys.path.append('src') # noqa +from ops.testing import Harness +from ops.charm import CharmBase, CharmMeta +import interface_http + + +class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.framework.meta = CharmMeta.from_yaml(metadata=''' +name: my-charm +requires: + prometheus: + interface: http +''') + + self.seen_events = [] + self.prometheus = interface_http.HTTPRequires( + self, + 'prometheus') + self.framework.observe( + self.prometheus.on.http_ready, + self._log_event) + + def _log_event(self, event): + self.seen_events.append(type(event).__name__) + + +class TestHTTPRequires(unittest.TestCase): + + def setUp(self): + super().setUp() + self.harness = Harness( + MyCharm, + ) + + def add_http_relation(self): + rel_id = self.harness.add_relation('prometheus', 'prometheus') + self.harness.add_relation_unit( + rel_id, + 'prometheus/0') + return rel_id + + def test_relation_name(self): + self.harness.begin() + self.assertEqual( + self.harness.charm.prometheus.relation_name, + 'prometheus') + + def test_http_ready_event(self): + self.harness.begin() + rel_id = self.add_http_relation() + self.assertEqual( + self.harness.charm.seen_events, + []) + self.harness.update_relation_data( + rel_id, + 'prometheus/0', + { + 'hostname': 'promhost', + 'port': 3000}) + self.assertEqual( + self.harness.charm.seen_events, + ['HTTPEvent']) + + def test_get_service_ep_data(self): + self.harness.begin() + rel_id = self.add_http_relation() + self.harness.update_relation_data( + rel_id, + 'prometheus/0', + { + 'hostname': 'promhost', + 'port': 3000}) + self.assertEqual( + self.harness.charm.prometheus.get_service_ep_data(), + {'hostname': 'promhost', 'port': 3000}) From 9714bf0f56a41dec16adbf8db75e12e6d8193378 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 31 Aug 2021 18:43:59 +0000 Subject: [PATCH 2255/2699] Use standard name for CA cert Use standard name for CA cert and add config step to ensure thar tests do not procede until all dashbaord units are ready func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/628 Change-Id: I13953c41eeef8196715a05dbfcc1573ae40b40de --- ceph-dashboard/src/charm.py | 9 ++++++--- ceph-dashboard/tests/tests.yaml | 1 + ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py | 10 +++++----- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index 85f38a79..bed3f58d 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -50,8 +50,9 @@ class CephDashboardCharm(ops_openstack.core.OSBaseCharm): TLS_PUB_KEY_PATH = CEPH_CONFIG_PATH / 'ceph-dashboard-pub.key' TLS_CERT_PATH = CEPH_CONFIG_PATH / 'ceph-dashboard.crt' TLS_KEY_AND_CERT_PATH = CEPH_CONFIG_PATH / 'ceph-dashboard.pem' - TLS_CA_CERT_PATH = Path( - '/usr/local/share/ca-certificates/vault_ca_cert_dashboard.crt') + TLS_CA_CERT_DIR = Path('/usr/local/share/ca-certificates') + TLS_VAULT_CA_CERT_PATH = TLS_CA_CERT_DIR / 'vault_juju_ca_cert.crt' + TLS_CHARM_CA_CERT_PATH = TLS_CA_CERT_DIR / 'charm_config_juju_ca_cert.crt' TLS_PORT = 8443 DASH_DIR = Path('src/dashboards') @@ -385,10 +386,12 @@ def _configure_tls(self) -> None: """Configure TLS.""" logging.debug("Attempting to collect TLS config from relation") key, cert, ca_cert = self._get_tls_from_relation() + ca_cert_path = self.TLS_VAULT_CA_CERT_PATH if not (key and cert): logging.debug("Attempting to collect TLS config from charm " "config") key, cert, ca_cert = self._get_tls_from_config() + ca_cert_path = self.TLS_CHARM_CA_CERT_PATH if not (key and cert): logging.warn( "Not configuring TLS, not all data present") @@ -396,7 +399,7 @@ def _configure_tls(self) -> None: self.TLS_KEY_PATH.write_bytes(key) self.TLS_CERT_PATH.write_bytes(cert) if ca_cert: - self.TLS_CA_CERT_PATH.write_bytes(ca_cert) + ca_cert_path.write_bytes(ca_cert) subprocess.check_call(['update-ca-certificates']) hostname = socket.gethostname() diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml index 23df3087..15110b59 100644 --- a/ceph-dashboard/tests/tests.yaml +++ b/ceph-dashboard/tests/tests.yaml @@ -5,6 +5,7 @@ smoke_bundles: - focal configure: - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation + - zaza.openstack.charm_tests.ceph.dashboard.setup.check_dashboard_cert tests: - zaza.openstack.charm_tests.ceph.dashboard.tests.CephDashboardTest - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py index 6d8dff9e..b835c0ad 100644 --- a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -380,14 +380,14 @@ def test_certificates_relation(self, _gethostname): self.ceph_utils.is_dashboard_enabled.return_value = True mock_TLS_KEY_PATH = MagicMock() mock_TLS_CERT_PATH = MagicMock() - mock_TLS_CA_CERT_PATH = MagicMock() + mock_TLS_VAULT_CA_CERT_PATH = MagicMock() _gethostname.return_value = 'server1' cert_rel_id = self.harness.add_relation('certificates', 'vault') dash_rel_id = self.harness.add_relation('dashboard', 'ceph-mon') self.harness.begin() self.harness.set_leader() self.harness.charm.TLS_CERT_PATH = mock_TLS_CERT_PATH - self.harness.charm.TLS_CA_CERT_PATH = mock_TLS_CA_CERT_PATH + self.harness.charm.TLS_VAULT_CA_CERT_PATH = mock_TLS_VAULT_CA_CERT_PATH self.harness.charm.TLS_KEY_PATH = mock_TLS_KEY_PATH self.harness.add_relation_unit( dash_rel_id, @@ -409,7 +409,7 @@ def test_certificates_relation(self, _gethostname): 'chain': TEST_CHAIN, 'ca': TEST_CA}) mock_TLS_CERT_PATH.write_bytes.assert_called_once() - mock_TLS_CA_CERT_PATH.write_bytes.assert_called_once() + mock_TLS_VAULT_CA_CERT_PATH.write_bytes.assert_called_once() mock_TLS_KEY_PATH.write_bytes.assert_called_once() self.subprocess.check_call.assert_called_once_with( ['update-ca-certificates']) @@ -429,7 +429,7 @@ def test_certificates_from_config(self): self.ceph_utils.is_dashboard_enabled.return_value = True mock_TLS_KEY_PATH = MagicMock() mock_TLS_CERT_PATH = MagicMock() - mock_TLS_CA_CERT_PATH = MagicMock() + mock_TLS_CHARM_CA_CERT_PATH = MagicMock() dash_rel_id = self.harness.add_relation('dashboard', 'ceph-mon') self.harness.begin() self.harness.set_leader() @@ -442,7 +442,7 @@ def test_certificates_from_config(self): { 'mon-ready': 'True'}) self.harness.charm.TLS_CERT_PATH = mock_TLS_CERT_PATH - self.harness.charm.TLS_CA_CERT_PATH = mock_TLS_CA_CERT_PATH + self.harness.charm.TLS_CHARM_CA_CERT_PATH = mock_TLS_CHARM_CA_CERT_PATH self.harness.charm.TLS_KEY_PATH = mock_TLS_KEY_PATH self.subprocess.check_call.reset_mock() self.harness.update_config( From 82401f6663cbcbc8d27990eeccd70c33a4ab2c72 Mon Sep 17 00:00:00 2001 From: Dmitrii Shcherbakov Date: Wed, 1 Sep 2021 23:19:53 +0300 Subject: [PATCH 2256/2699] Notify more relations when cluster is bootstrapped Currently mon_relation only calls notify_rbd_mirrors when the cluster is already bootstrapped which leads to broker requests not being handled for other relations in some cases. The change also moves the bootstrap attempt code into a separate function and adds unit tests for mon_relation to cover different branches for various inputs. Closes-Bug: #1942224 Change-Id: Id9b611d128acb7d49a9a9ad9c096b232fefd6c68 --- ceph-mon/hooks/ceph_hooks.py | 139 +++++++++++++------------ ceph-mon/unit_tests/test_ceph_hooks.py | 77 ++++++++++++++ 2 files changed, 151 insertions(+), 65 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index fd10ff84..f2e97c02 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -482,79 +482,88 @@ def mon_relation(): if ceph.is_bootstrapped(): # The ceph-mon unit chosen for handling broker requests is based on # internal Ceph MON leadership and not Juju leadership. To update - # the rbd-mirror relation on all ceph-mon units after pool creation + # the relations on all ceph-mon units after pool creation # the unit handling the broker request will update a nonce on the # mon relation. - notify_rbd_mirrors() + notify_relations() else: - status_set('maintenance', 'Bootstrapping MON cluster') - # the following call raises an exception - # if it can't add the keyring - try: - ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) - except FileNotFoundError as e: # NOQA -- PEP8 is still PY2 - log("Couldn't bootstrap the monitor yet: {}".format(str(e))) - exit(0) - ceph.wait_for_bootstrap() - ceph.wait_for_quorum() - ceph.create_keyrings() - if cmp_pkgrevno('ceph', '12.0.0') >= 0: - status_set('maintenance', 'Bootstrapping Ceph MGR') - ceph.bootstrap_manager() - if ceph.monitor_key_exists('admin', 'autotune'): - autotune = ceph.monitor_key_get('admin', 'autotune') - else: - ceph.wait_for_manager() - autotune = config('pg-autotune') - if (cmp_pkgrevno('ceph', '14.2.0') >= 0 and - (autotune == 'true' or - autotune == 'auto')): - ceph.monitor_key_set('admin', 'autotune', 'true') - else: - ceph.monitor_key_set('admin', 'autotune', 'false') - if ceph.monitor_key_get('admin', 'autotune') == 'true': - try: - mgr_enable_module('pg_autoscaler') - except subprocess.CalledProcessError: - log("Failed to initialize autoscaler, it must be " - "initialized on the last monitor", level='info') - # If we can and want to - if is_leader() and config('customize-failure-domain'): - # But only if the environment supports it - if os.environ.get('JUJU_AVAILABILITY_ZONE'): - cmds = [ - "ceph osd getcrushmap -o /tmp/crush.map", - "crushtool -d /tmp/crush.map| " - "sed 's/step chooseleaf firstn 0 type host/step " - "chooseleaf firstn 0 type rack/' > " - "/tmp/crush.decompiled", - "crushtool -c /tmp/crush.decompiled -o /tmp/crush.map", - "crushtool -i /tmp/crush.map --test", - "ceph osd setcrushmap -i /tmp/crush.map" - ] - for cmd in cmds: - try: - subprocess.check_call(cmd, shell=True) - except subprocess.CalledProcessError as e: - log("Failed to modify crush map:", level='error') - log("Cmd: {}".format(cmd), level='error') - log("Error: {}".format(e.output), level='error') - break - else: - log( - "Your Juju environment doesn't" - "have support for Availability Zones" - ) - notify_osds() - notify_radosgws() - notify_client() - notify_rbd_mirrors() - notify_prometheus() + if attempt_mon_cluster_bootstrap(): + notify_relations() else: log('Not enough mons ({}), punting.' .format(len(get_mon_hosts()))) +def attempt_mon_cluster_bootstrap(): + status_set('maintenance', 'Bootstrapping MON cluster') + # the following call raises an exception + # if it can't add the keyring + try: + ceph.bootstrap_monitor_cluster(leader_get('monitor-secret')) + except FileNotFoundError as e: # NOQA -- PEP8 is still PY2 + log("Couldn't bootstrap the monitor yet: {}".format(str(e))) + return False + ceph.wait_for_bootstrap() + ceph.wait_for_quorum() + ceph.create_keyrings() + if cmp_pkgrevno('ceph', '12.0.0') >= 0: + status_set('maintenance', 'Bootstrapping Ceph MGR') + ceph.bootstrap_manager() + if ceph.monitor_key_exists('admin', 'autotune'): + autotune = ceph.monitor_key_get('admin', 'autotune') + else: + ceph.wait_for_manager() + autotune = config('pg-autotune') + if (cmp_pkgrevno('ceph', '14.2.0') >= 0 and + (autotune == 'true' or + autotune == 'auto')): + ceph.monitor_key_set('admin', 'autotune', 'true') + else: + ceph.monitor_key_set('admin', 'autotune', 'false') + if ceph.monitor_key_get('admin', 'autotune') == 'true': + try: + mgr_enable_module('pg_autoscaler') + except subprocess.CalledProcessError: + log("Failed to initialize autoscaler, it must be " + "initialized on the last monitor", level='info') + # If we can and want to + if is_leader() and config('customize-failure-domain'): + # But only if the environment supports it + if os.environ.get('JUJU_AVAILABILITY_ZONE'): + cmds = [ + "ceph osd getcrushmap -o /tmp/crush.map", + "crushtool -d /tmp/crush.map| " + "sed 's/step chooseleaf firstn 0 type host/step " + "chooseleaf firstn 0 type rack/' > " + "/tmp/crush.decompiled", + "crushtool -c /tmp/crush.decompiled -o /tmp/crush.map", + "crushtool -i /tmp/crush.map --test", + "ceph osd setcrushmap -i /tmp/crush.map" + ] + for cmd in cmds: + try: + subprocess.check_call(cmd, shell=True) + except subprocess.CalledProcessError as e: + log("Failed to modify crush map:", level='error') + log("Cmd: {}".format(cmd), level='error') + log("Error: {}".format(e.output), level='error') + break + else: + log( + "Your Juju environment doesn't" + "have support for Availability Zones" + ) + return True + + +def notify_relations(): + notify_osds() + notify_radosgws() + notify_client() + notify_rbd_mirrors() + notify_prometheus() + + def notify_prometheus(): if relation_ids('prometheus') and ceph.is_bootstrapped(): prometheus_permitted = cmp_pkgrevno('ceph', '12.2.0') >= 0 diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index f5679a16..8a97de27 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -470,6 +470,83 @@ def test_config_changed_with_default_autotune(self, mgr_enable_module.assert_not_called() +class CephMonRelationTestCase(test_utils.CharmTestCase): + + def setUp(self): + super(CephMonRelationTestCase, self).setUp(ceph_hooks, [ + 'config', + 'is_leader', + 'is_relation_made', + 'leader_get', + 'leader_set', + 'log', + 'relation_ids', + 'related_units', + 'relation_get', + 'relations_of_type', + 'status_set', + 'get_mon_hosts', + 'notify_relations', + 'emit_cephconf', + ]) + self.config.side_effect = self.test_config.get + self.leader_get.side_effect = self.test_leader_settings.get + self.leader_set.side_effect = self.test_leader_settings.set + self.relation_get.side_effect = self.test_relation.get + self.test_config.set('monitor-count', 3) + self.test_leader_settings.set({'monitor-secret': '42'}) + self.get_mon_hosts.return_value = ['foo', 'bar', 'baz'] + + @patch.object(ceph_hooks.ceph, 'is_bootstrapped') + def test_mon_relation_bootstrapped(self, _is_bootstrapped): + _is_bootstrapped.return_value = True + ceph_hooks.mon_relation() + self.notify_relations.assert_called_with() + + @patch.object(ceph_hooks, 'attempt_mon_cluster_bootstrap') + @patch.object(ceph_hooks.ceph, 'is_bootstrapped') + def test_mon_relation_attempt_bootstrap_success(self, _is_bootstrapped, + _attempt_bootstrap): + _is_bootstrapped.return_value = False + _attempt_bootstrap.return_value = True + ceph_hooks.mon_relation() + self.notify_relations.assert_called_with() + + @patch.object(ceph_hooks, 'attempt_mon_cluster_bootstrap') + @patch.object(ceph_hooks.ceph, 'is_bootstrapped') + def test_mon_relation_attempt_bootstrap_failure(self, _is_bootstrapped, + _attempt_bootstrap): + _is_bootstrapped.return_value = False + _attempt_bootstrap.return_value = False + ceph_hooks.mon_relation() + self.notify_relations.assert_not_called() + + @patch.object(ceph_hooks, 'attempt_mon_cluster_bootstrap') + @patch.object(ceph_hooks.ceph, 'is_bootstrapped') + def test_mon_relation_no_enough_mons(self, _is_bootstrapped, + _attempt_bootstrap): + _is_bootstrapped.return_value = False + _attempt_bootstrap.return_value = False + self.get_mon_hosts.return_value = ['foo', 'bar'] + ceph_hooks.mon_relation() + self.notify_relations.assert_not_called() + self.log.assert_called_once_with('Not enough mons (2), punting.') + + @patch.object(ceph_hooks, 'attempt_mon_cluster_bootstrap') + @patch.object(ceph_hooks.ceph, 'is_bootstrapped') + def test_mon_relation_no_secret(self, _is_bootstrapped, + _attempt_bootstrap): + _is_bootstrapped.return_value = False + _attempt_bootstrap.return_value = False + self.get_mon_hosts.return_value = ['foo', 'bar'] + self.test_leader_settings.set({'monitor-secret': None}) + ceph_hooks.mon_relation() + self.notify_relations.assert_not_called() + _attempt_bootstrap.assert_not_called() + self.log.assert_called_once_with( + 'still waiting for leader to setup keys') + + class RelatedUnitsTestCase(unittest.TestCase): _units = { From 60d0caa30a2afd8ee84323bdd4f44ae07732fe5a Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Mon, 30 Aug 2021 18:13:43 -0300 Subject: [PATCH 2257/2699] Only consider mounted OSD directories When gathering the list of local OSD ids, the charm would consider the entries under '/var/lib/ceph/osd/ceph-XXX' where 'XXX" was the OSD id. However, if an entry under that directory isn't mounted, then the OSD that would represent that entry should be discarded, as it's no longer active. This patchset thus filters those entries by looking for them in the mount points. Closes-Bug: #1934938 Change-Id: I69c6356e450cc0c96de4afe571b438d4a2ea5177 --- ceph-osd/lib/charms_ceph/crush_utils.py | 6 +- ceph-osd/lib/charms_ceph/utils.py | 247 ++++++++++++++---- .../unit_tests/test_actions_osd_out_in.py | 17 ++ 3 files changed, 223 insertions(+), 47 deletions(-) diff --git a/ceph-osd/lib/charms_ceph/crush_utils.py b/ceph-osd/lib/charms_ceph/crush_utils.py index 8fe09fa4..37084bf1 100644 --- a/ceph-osd/lib/charms_ceph/crush_utils.py +++ b/ceph-osd/lib/charms_ceph/crush_utils.py @@ -79,9 +79,9 @@ def load_crushmap(self): stdin=crush.stdout) .decode('UTF-8')) except CalledProcessError as e: - log("Error occured while loading and decompiling CRUSH map:" + log("Error occurred while loading and decompiling CRUSH map:" "{}".format(e), ERROR) - raise "Failed to read CRUSH map" + raise def ensure_bucket_is_present(self, bucket_name): if bucket_name not in [bucket.name for bucket in self.buckets()]: @@ -111,7 +111,7 @@ def save(self): return ceph_output except CalledProcessError as e: log("save error: {}".format(e)) - raise "Failed to save CRUSH map." + raise def build_crushmap(self): """Modifies the current CRUSH map to include the new buckets""" diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index e5c38793..9b7299dd 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -14,6 +14,7 @@ import collections import glob +import itertools import json import os import pyudev @@ -24,6 +25,7 @@ import sys import time import uuid +import functools from contextlib import contextmanager from datetime import datetime @@ -501,30 +503,33 @@ def ceph_user(): class CrushLocation(object): - def __init__(self, - name, - identifier, - host, - rack, - row, - datacenter, - chassis, - root): - self.name = name + def __init__(self, identifier, name, osd="", host="", chassis="", + rack="", row="", pdu="", pod="", room="", + datacenter="", zone="", region="", root=""): self.identifier = identifier + self.name = name + self.osd = osd self.host = host + self.chassis = chassis self.rack = rack self.row = row + self.pdu = pdu + self.pod = pod + self.room = room self.datacenter = datacenter - self.chassis = chassis + self.zone = zone + self.region = region self.root = root def __str__(self): - return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ - "chassis :{} root: {}".format(self.name, self.identifier, - self.host, self.rack, self.row, - self.datacenter, self.chassis, - self.root) + return "name: {} id: {} osd: {} host: {} chassis: {} rack: {} " \ + "row: {} pdu: {} pod: {} room: {} datacenter: {} zone: {} " \ + "region: {} root: {}".format(self.name, self.identifier, + self.osd, self.host, self.chassis, + self.rack, self.row, self.pdu, + self.pod, self.room, + self.datacenter, self.zone, + self.region, self.root) def __eq__(self, other): return not self.name < other.name and not other.name < self.name @@ -571,10 +576,53 @@ def get_osd_weight(osd_id): raise +def _filter_nodes_and_set_attributes(node, node_lookup_map, lookup_type): + """Get all nodes of the desired type, with all their attributes. + + These attributes can be direct or inherited from ancestors. + """ + attribute_dict = {node['type']: node['name']} + if node['type'] == lookup_type: + attribute_dict['name'] = node['name'] + attribute_dict['identifier'] = node['id'] + return [attribute_dict] + elif not node.get('children'): + return [attribute_dict] + else: + descendant_attribute_dicts = [ + _filter_nodes_and_set_attributes(node_lookup_map[node_id], + node_lookup_map, lookup_type) + for node_id in node.get('children', []) + ] + return [dict(attribute_dict, **descendant_attribute_dict) + for descendant_attribute_dict + in itertools.chain.from_iterable(descendant_attribute_dicts)] + + +def _flatten_roots(nodes, lookup_type='host'): + """Get a flattened list of nodes of the desired type. + + :param nodes: list of nodes defined as a dictionary of attributes and + children + :type nodes: List[Dict[int, Any]] + :param lookup_type: type of searched node + :type lookup_type: str + :returns: flattened list of nodes + :rtype: List[Dict[str, Any]] + """ + lookup_map = {node['id']: node for node in nodes} + root_attributes_dicts = [_filter_nodes_and_set_attributes(node, lookup_map, + lookup_type) + for node in nodes if node['type'] == 'root'] + # get a flattened list of roots. + return list(itertools.chain.from_iterable(root_attributes_dicts)) + + def get_osd_tree(service): """Returns the current osd map in JSON. :returns: List. + :rtype: List[CrushLocation] :raises: ValueError if the monmap fails to parse. Also raises CalledProcessError if our ceph command fails """ @@ -585,35 +633,14 @@ def get_osd_tree(service): .decode('UTF-8')) try: json_tree = json.loads(tree) - crush_list = [] - # Make sure children are present in the json - if not json_tree['nodes']: - return None - host_nodes = [ - node for node in json_tree['nodes'] - if node['type'] == 'host' - ] - for host in host_nodes: - crush_list.append( - CrushLocation( - name=host.get('name'), - identifier=host['id'], - host=host.get('host'), - rack=host.get('rack'), - row=host.get('row'), - datacenter=host.get('datacenter'), - chassis=host.get('chassis'), - root=host.get('root') - ) - ) - return crush_list + roots = _flatten_roots(json_tree["nodes"]) + return [CrushLocation(**host) for host in roots] except ValueError as v: log("Unable to parse ceph tree json: {}. Error: {}".format( tree, v)) raise except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( - e)) + log("ceph osd tree command failed with message: {}".format(e)) raise @@ -669,7 +696,9 @@ def get_local_osd_ids(): dirs = os.listdir(osd_path) for osd_dir in dirs: osd_id = osd_dir.split('-')[1] - if _is_int(osd_id): + if (_is_int(osd_id) and + filesystem_mounted(os.path.join( + os.sep, osd_path, osd_dir))): osd_ids.append(osd_id) except OSError: raise @@ -3271,13 +3300,14 @@ def determine_packages(): def determine_packages_to_remove(): """Determines packages for removal + Note: if in a container, then the CHRONY_PACKAGE is removed. + :returns: list of packages to be removed + :rtype: List[str] """ rm_packages = REMOVE_PACKAGES.copy() if is_container(): - install_list = filter_missing_packages(CHRONY_PACKAGE) - if not install_list: - rm_packages.append(CHRONY_PACKAGE) + rm_packages.extend(filter_missing_packages([CHRONY_PACKAGE])) return rm_packages @@ -3376,3 +3406,132 @@ def _get_cli_key(key): level=ERROR) raise OSDConfigSetError return True + + +def enabled_manager_modules(): + """Return a list of enabled manager modules. + + :rtype: List[str] + """ + cmd = ['ceph', 'mgr', 'module', 'ls'] + try: + modules = subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError as e: + log("Failed to list ceph modules: {}".format(e), WARNING) + return [] + modules = json.loads(modules) + return modules['enabled_modules'] + + +def is_mgr_module_enabled(module): + """Is a given manager module enabled. + + :param module: + :type module: str + :returns: Whether the named module is enabled + :rtype: bool + """ + return module in enabled_manager_modules() + + +is_dashboard_enabled = functools.partial(is_mgr_module_enabled, 'dashboard') + + +def mgr_enable_module(module): + """Enable a Ceph Manager Module. + + :param module: The module name to enable + :type module: str + + :raises: subprocess.CalledProcessError + """ + if not is_mgr_module_enabled(module): + subprocess.check_call(['ceph', 'mgr', 'module', 'enable', module]) + return True + return False + + +mgr_enable_dashboard = functools.partial(mgr_enable_module, 'dashboard') + + +def mgr_disable_module(module): + """Enable a Ceph Manager Module. + + :param module: The module name to enable + :type module: str + + :raises: subprocess.CalledProcessError + """ + if is_mgr_module_enabled(module): + subprocess.check_call(['ceph', 'mgr', 'module', 'disable', module]) + return True + return False + + +mgr_disable_dashboard = functools.partial(mgr_disable_module, 'dashboard') + + +def ceph_config_set(name, value, who): + """Set a ceph config option + + :param name: key to set + :type name: str + :param value: value corresponding to key + :type value: str + :param who: Config area the key is associated with (e.g. 'dashboard') + :type who: str + + :raises: subprocess.CalledProcessError + """ + subprocess.check_call(['ceph', 'config', 'set', who, name, value]) + + +mgr_config_set = functools.partial(ceph_config_set, who='mgr') + + +def ceph_config_get(name, who): + """Retrieve the value of a ceph config option + + :param name: key to lookup + :type name: str + :param who: Config area the key is associated with (e.g. 'dashboard') + :type who: str + :returns: Value associated with key + :rtype: str + :raises: subprocess.CalledProcessError + """ + return subprocess.check_output( + ['ceph', 'config', 'get', who, name]).decode('UTF-8') + + +mgr_config_get = functools.partial(ceph_config_get, who='mgr') + + +def _dashboard_set_ssl_artifact(path, artifact_name, hostname=None): + """Set SSL dashboard config option. + + :param path: Path to file + :type path: str + :param artifact_name: Option name for setting the artifact + :type artifact_name: str + :param hostname: If hostname is set artifact will only be associated with + the dashboard on that host. + :type hostname: str + :raises: subprocess.CalledProcessError + """ + cmd = ['ceph', 'dashboard', artifact_name] + if hostname: + cmd.append(hostname) + cmd.extend(['-i', path]) + log(cmd, level=DEBUG) + subprocess.check_call(cmd) + + +dashboard_set_ssl_certificate = functools.partial( + _dashboard_set_ssl_artifact, + artifact_name='set-ssl-certificate') + + +dashboard_set_ssl_certificate_key = functools.partial( + _dashboard_set_ssl_artifact, + artifact_name='set-ssl-certificate-key') diff --git a/ceph-osd/unit_tests/test_actions_osd_out_in.py b/ceph-osd/unit_tests/test_actions_osd_out_in.py index b14a014e..9c0a3790 100644 --- a/ceph-osd/unit_tests/test_actions_osd_out_in.py +++ b/ceph-osd/unit_tests/test_actions_osd_out_in.py @@ -115,6 +115,23 @@ def test_osd_in_not_local(self): self.assess_status.assert_not_called() +class OSDMountTestCase(CharmTestCase): + def setUp(self): + super(OSDMountTestCase, self).setUp(actions, []) + + @mock.patch('os.path.exists') + @mock.patch('os.listdir') + @mock.patch('charms_ceph.utils.filesystem_mounted') + def test_mounted_osds(self, fs_mounted, listdir, exists): + exists.return_value = True + listdir.return_value = [ + '/var/lib/ceph/osd/ceph-1', '/var/lib/ceph/osd/ceph-2'] + fs_mounted.side_effect = lambda x: x == listdir.return_value[0] + osds = actions.get_local_osd_ids() + self.assertIn(listdir.return_value[0][-1], osds) + self.assertNotIn(listdir.return_value[1][-1], osds) + + class MainTestCase(CharmTestCase): def setUp(self): super(MainTestCase, self).setUp(actions, ["function_fail"]) From 5fd6cb88c6e990468ece18daf63c0e2eefc80fcf Mon Sep 17 00:00:00 2001 From: Robert Gildein Date: Tue, 2 Mar 2021 11:14:37 +0100 Subject: [PATCH 2258/2699] Add an action to provide information about AZ The 'get-availability-zone' action will get information about an availability zone that will contain information about the CRUSH structure. Specifically 'rack' and 'row'. Closes-Bug: #1911006 Change-Id: I99ebbef5f23d6efe3c848b089c7f2b0d26ad0077 --- ceph-osd/README.md | 1 + ceph-osd/actions.yaml | 15 ++ ceph-osd/actions/get-availability-zone | 1 + ceph-osd/actions/get_availability_zone.py | 136 ++++++++++++++++++ ceph-osd/hooks/install | 2 +- ceph-osd/unit_tests/__init__.py | 4 + .../test_actions_get_availability_zone.py | 119 +++++++++++++++ 7 files changed, 277 insertions(+), 1 deletion(-) create mode 120000 ceph-osd/actions/get-availability-zone create mode 100755 ceph-osd/actions/get_availability_zone.py create mode 100644 ceph-osd/unit_tests/test_actions_get_availability_zone.py diff --git a/ceph-osd/README.md b/ceph-osd/README.md index 5404838a..b1840933 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -240,6 +240,7 @@ is not deployed then see file `actions.yaml`. * `add-disk` * `blacklist-add-disk` * `blacklist-remove-disk` +* `get-availibility-zone` * `list-disks` * `osd-in` * `osd-out` diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 6d6667fa..7c405bad 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -116,3 +116,18 @@ stop: - osds security-checklist: description: Validate the running configuration against the OpenStack security guides checklist +get-availability-zone: + description: | + Obtain information about the availability zone, which will contain information about the CRUSH + structure. Specifically 'rack' and 'row'. + params: + format: + type: string + default: text + enum: + - text + - json + description: Specify output format (text|json). + show-all: + type: boolean + description: Option to view information for all units. Default is 'false'. diff --git a/ceph-osd/actions/get-availability-zone b/ceph-osd/actions/get-availability-zone new file mode 120000 index 00000000..47227f6f --- /dev/null +++ b/ceph-osd/actions/get-availability-zone @@ -0,0 +1 @@ +get_availability_zone.py \ No newline at end of file diff --git a/ceph-osd/actions/get_availability_zone.py b/ceph-osd/actions/get_availability_zone.py new file mode 100755 index 00000000..bcbeade6 --- /dev/null +++ b/ceph-osd/actions/get_availability_zone.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import sys + +from tabulate import tabulate + +sys.path.append("hooks") +sys.path.append("lib") + +from charms_ceph.utils import get_osd_tree +from charmhelpers.core import hookenv +from utils import get_unit_hostname + + +CRUSH_MAP_HIERARCHY = [ + "root", # 10 + "region", # 9 + "datacenter", # 8 + "room", # 7 + "pod", # 6 + "pdu", # 5 + "row", # 4 + "rack", # 3 + "chassis", # 2 + "host", # 1 + "osd", # 0 +] + + +def _get_human_readable(availability_zones): + """Get human readable table format. + + :param availability_zones: information about the availability zone + :type availability_zones: Dict[str, Dict[str, str]] + :returns: formatted data as table + :rtype: str + """ + data = availability_zones.get( + "all-units", {get_unit_hostname(): availability_zones["unit"]} + ) + data = [[unit, *crush_map.values()] for unit, crush_map in data.items()] + return tabulate( + data, tablefmt="grid", headers=["unit", *CRUSH_MAP_HIERARCHY] + ) + + +def _get_crush_map(crush_location): + """Get Crush Map hierarchy from CrushLocation. + + :param crush_location: CrushLocation from function get_osd_tree + :type crush_location: charms_ceph.utils.CrushLocation + :returns: dictionary contains the Crush Map hierarchy, where + the keys are according to the defined types of the + Ceph Map Hierarchy + :rtype: Dict[str, str] + """ + return { + crush_map_type: getattr(crush_location, crush_map_type) + for crush_map_type in CRUSH_MAP_HIERARCHY + if getattr(crush_location, crush_map_type, None) + } + + +def get_availability_zones(show_all=False): + """Get information about the availability zones. + + Returns dictionary contains the unit as the current unit and other_units + (if the action was executed with the parameter show-all) that provide + information about other units. + + :param show_all: define whether the result should contain AZ information + for all units + :type show_all: bool + :returns: {"unit": , + "all-units": {: }} + :rtype: Dict[str, Dict[str, str]] + """ + results = {"unit": {}, "all-units": {}} + osd_tree = get_osd_tree(service="osd-upgrade") + + this_unit_host = get_unit_hostname() + for crush_location in osd_tree: + crush_map = _get_crush_map(crush_location) + if this_unit_host == crush_location.name: + results["unit"] = crush_map + + results["all-units"][crush_location.name] = crush_map + + if not show_all: + results.pop("all-units") + + return results + + +def format_availability_zones(availability_zones, human_readable=True): + """Format availability zones to action output format.""" + if human_readable: + return _get_human_readable(availability_zones) + + return json.dumps(availability_zones) + + +def main(): + try: + show_all = hookenv.action_get("show-all") + human_readable = hookenv.action_get("format") == "text" + availability_zones = get_availability_zones(show_all) + if not availability_zones["unit"]: + hookenv.log( + "Availability zone information for current unit not found.", + hookenv.DEBUG + ) + + formatted_azs = format_availability_zones(availability_zones, + human_readable) + hookenv.action_set({"availability-zone": formatted_azs}) + except Exception as error: + hookenv.action_fail("Action failed: {}".format(str(error))) + + +if __name__ == "__main__": + main() diff --git a/ceph-osd/hooks/install b/ceph-osd/hooks/install index 8aded7f5..0064ac5f 100755 --- a/ceph-osd/hooks/install +++ b/ceph-osd/hooks/install @@ -2,7 +2,7 @@ # Wrapper to deal with newer Ubuntu versions that don't have py2 installed # by default. -declare -a DEPS=('apt' 'pip' 'yaml') +declare -a DEPS=('apt' 'pip' 'yaml' 'tabulate') check_and_install() { pkg="${1}-${2}" diff --git a/ceph-osd/unit_tests/__init__.py b/ceph-osd/unit_tests/__init__.py index 633fa7da..b8024d8a 100644 --- a/ceph-osd/unit_tests/__init__.py +++ b/ceph-osd/unit_tests/__init__.py @@ -13,7 +13,11 @@ # limitations under the License. import sys +from unittest.mock import MagicMock + sys.path.append('hooks') sys.path.append('lib') sys.path.append('actions') sys.path.append('unit_tests') + +sys.modules["tabulate"] = MagicMock() diff --git a/ceph-osd/unit_tests/test_actions_get_availability_zone.py b/ceph-osd/unit_tests/test_actions_get_availability_zone.py new file mode 100644 index 00000000..cec7b477 --- /dev/null +++ b/ceph-osd/unit_tests/test_actions_get_availability_zone.py @@ -0,0 +1,119 @@ +# Copyright 2021 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json + +from actions import get_availability_zone +from lib.charms_ceph.utils import CrushLocation + +from test_utils import CharmTestCase + + +TABULATE_OUTPUT = """ ++-------------+---------+-------------+ +| unit | root | region | ++=============+=========+=============+ +| juju-ceph-0 | default | juju-ceph-0 | ++-------------+---------+-------------+ +| juju-ceph-1 | default | juju-ceph-1 | ++-------------+---------+-------------+ +| juju-ceph-2 | default | juju-ceph-2 | ++-------------+---------+-------------+ +""" + +AVAILABILITY_ZONES = { + "unit": {"root": "default", "host": "juju-ceph-0"}, + "all-units": { + "juju-ceph-0": {"root": "default", "host": "juju-ceph-0"}, + "juju-ceph-1": {"root": "default", "host": "juju-ceph-1"}, + "juju-ceph-2": {"root": "default", "host": "juju-ceph-2"} + } +} + + +class GetAvailabilityZoneActionTests(CharmTestCase): + def setUp(self): + super(GetAvailabilityZoneActionTests, self).setUp( + get_availability_zone, + ["get_osd_tree", "get_unit_hostname", "tabulate"] + ) + self.tabulate.return_value = TABULATE_OUTPUT + self.get_unit_hostname.return_value = "juju-ceph-0" + + def test_get_human_readable(self): + """Test formatting as human readable.""" + table = get_availability_zone._get_human_readable(AVAILABILITY_ZONES) + self.assertTrue(table == TABULATE_OUTPUT) + + def test_get_crush_map(self): + """Test get Crush Map hierarchy from CrushLocation.""" + crush_location = CrushLocation( + name="test", identifier="t1", host="test", rack=None, row=None, + datacenter=None, chassis=None, root="default") + crush_map = get_availability_zone._get_crush_map(crush_location) + self.assertDictEqual(crush_map, {"root": "default", "host": "test"}) + + crush_location = CrushLocation( + name="test", identifier="t1", host="test", rack="AZ", + row="customAZ", datacenter=None, chassis=None, root="default") + crush_map = get_availability_zone._get_crush_map(crush_location) + self.assertDictEqual(crush_map, {"root": "default", "row": "customAZ", + "rack": "AZ", "host": "test"}) + + def test_get_availability_zones(self): + """Test function to get information about availability zones.""" + self.get_unit_hostname.return_value = "test_1" + self.get_osd_tree.return_value = [ + CrushLocation(name="test_1", identifier="t1", host="test_1", + rack="AZ1", row="AZ", datacenter=None, + chassis=None, root="default"), + CrushLocation(name="test_2", identifier="t2", host="test_2", + rack="AZ1", row="AZ", datacenter=None, + chassis=None, root="default"), + CrushLocation(name="test_3", identifier="t3", host="test_3", + rack="AZ2", row="AZ", datacenter=None, + chassis=None, root="default"), + CrushLocation(name="test_4", identifier="t4", host="test_4", + rack="AZ2", row="AZ", datacenter=None, + chassis=None, root="default"), + ] + results = get_availability_zone.get_availability_zones() + + self.assertDictEqual(results, { + "unit": dict(root="default", row="AZ", rack="AZ1", host="test_1")}) + + results = get_availability_zone.get_availability_zones(show_all=True) + self.assertDictEqual(results, { + "unit": dict(root="default", row="AZ", rack="AZ1", host="test_1"), + "all-units": { + "test_1": dict(root="default", row="AZ", rack="AZ1", + host="test_1"), + "test_2": dict(root="default", row="AZ", rack="AZ1", + host="test_2"), + "test_3": dict(root="default", row="AZ", rack="AZ2", + host="test_3"), + "test_4": dict(root="default", row="AZ", rack="AZ2", + host="test_4"), + }}) + + def test_format_availability_zones(self): + """Test function to formatted availability zones.""" + # human readable format + results_table = get_availability_zone.format_availability_zones( + AVAILABILITY_ZONES, True) + self.assertEqual(results_table, TABULATE_OUTPUT) + + # json format + results_json = get_availability_zone.format_availability_zones( + AVAILABILITY_ZONES, False) + self.assertDictEqual(json.loads(results_json), AVAILABILITY_ZONES) From 330e584a9dfaaf96627028602def5fcd068f2de8 Mon Sep 17 00:00:00 2001 From: David Negreira Date: Fri, 3 Sep 2021 14:41:30 +0200 Subject: [PATCH 2259/2699] Add accepted formats on 'key' configuration Add a bit more information on config.yaml about the type of keys that can be passed as a parameter to the 'key' configuration. Signed-off-by: David Negreira Change-Id: Ieeb0f598ca9a7188f81619c2b4fe88af14f260fd Closes-Bug: #1942605 --- ceph-osd/config.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index b9e5423f..e501c232 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -22,6 +22,8 @@ options: description: | Key ID to import to the apt keyring to support use with arbitrary source configuration from outside of Launchpad archives or PPA's. + The accepted formats should be a GPG key in ASCII armor format, + including BEGIN and END markers or a keyid. use-syslog: type: boolean default: False From 50917d0d8bd2436c916bf0dd7ee129ce72707f1f Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 24 Aug 2021 16:56:03 +0000 Subject: [PATCH 2260/2699] Add radosgw-user relation Add a radosgw-user relation to allow charms to request a user. The requesting charm should supply the 'system-role' key in the app relation data bag to indicate whether the requested user should be a system user. This charm creates the user if it does not exist or looks up the users credentials if it does. The username and credentials are then passed back to the requestor via the app relation data bag. The units radosgw url and daemon id are also passed back this time using the unit relation data bag. Change-Id: Ieff1943b02f490559ccd245f60b744fb76a5d832 --- ceph-radosgw/hooks/hooks.py | 97 +++++++++++++++++++ ceph-radosgw/hooks/multisite.py | 57 ++++++++++- .../hooks/radosgw-user-relation-changed | 1 + .../hooks/radosgw-user-relation-departed | 1 + ceph-radosgw/metadata.yaml | 2 + ceph-radosgw/unit_tests/test_hooks.py | 51 ++++++++++ 6 files changed, 205 insertions(+), 4 deletions(-) create mode 120000 ceph-radosgw/hooks/radosgw-user-relation-changed create mode 120000 ceph-radosgw/hooks/radosgw-user-relation-departed diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index abc30460..36f99340 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -28,6 +28,7 @@ from charmhelpers.core.hookenv import ( relation_get, + relation_id as ch_relation_id, relation_ids, related_units, config, @@ -42,8 +43,10 @@ is_leader, leader_set, leader_get, + remote_service_name, WORKLOAD_STATES, ) +from charmhelpers.core.strutils import bool_from_string from charmhelpers.fetch import ( apt_update, apt_install, @@ -243,6 +246,9 @@ def _config_changed(): for r_id in relation_ids('object-store'): object_store_joined(r_id) + for r_id in relation_ids('radosgw-user'): + radosgw_user_changed(r_id) + process_multisite_relations() CONFIGS.write_all() @@ -367,6 +373,10 @@ def _mon_relation(): zone)) service_restart(service_name()) + + for r_id in relation_ids('radosgw-user'): + radosgw_user_changed(r_id) + else: send_request_if_needed(rq, relation='mon') _mon_relation() @@ -575,6 +585,91 @@ def _certs_changed(): _certs_changed() +def get_radosgw_username(r_id): + """Generate a username based on a relation id""" + gw_user = 'juju-' + r_id.replace(":", "-") + return gw_user + + +def get_radosgw_system_username(r_id): + """Generate a username for a system user based on a relation id""" + gw_user = get_radosgw_username(r_id) + # There is no way to switch a user from being a system user to a + # non-system user, so add the '-system' suffix to ensure there is + # no clash if the user request is updated in the future. + gw_user = gw_user + "-system" + return gw_user + + +@hooks.hook('radosgw-user-relation-departed') +def radosgw_user_departed(): + # If there are no related units then the last unit + # is currently departing. + if not related_units(): + r_id = ch_relation_id() + for user in [get_radosgw_system_username(r_id), + get_radosgw_username(r_id)]: + multisite.suspend_user(user) + + +@hooks.hook('radosgw-user-relation-changed') +def radosgw_user_changed(relation_id=None): + if not ready_for_service(legacy=False): + log('unit not ready, deferring radosgw_user configuration') + return + if relation_id: + r_ids = [relation_id] + else: + r_ids = relation_ids('radosgw-user') + # The leader manages the users and sets the credentials using the + # the application relation data bag. + if is_leader(): + for r_id in r_ids: + remote_app = remote_service_name(r_id) + relation_data = relation_get( + rid=r_id, + app=remote_app) + if 'system-role' not in relation_data: + log('system-role not in relation data, cannot create user', + level=DEBUG) + return + system_user = bool_from_string( + relation_data.get('system-role', 'false')) + if system_user: + gw_user = get_radosgw_system_username(r_id) + # If there is a pre-existing non-system user then ensure it is + # suspended + multisite.suspend_user(get_radosgw_username(r_id)) + else: + gw_user = get_radosgw_username(r_id) + # If there is a pre-existing system user then ensure it is + # suspended + multisite.suspend_user(get_radosgw_system_username(r_id)) + if gw_user in multisite.list_users(): + (access_key, secret_key) = multisite.get_user_creds(gw_user) + else: + (access_key, secret_key) = multisite.create_user( + gw_user, + system_user=system_user) + relation_set( + app=remote_app, + relation_id=r_id, + relation_settings={ + 'uid': gw_user, + 'access-key': access_key, + 'secret-key': secret_key}) + # Each unit publishes its own endpoint data and daemon id using the + # unit relation data bag. + for r_id in r_ids: + relation_set( + relation_id=r_id, + relation_settings={ + 'internal-url': "{}:{}".format( + canonical_url(CONFIGS, INTERNAL), + listen_port()), + 'daemon-id': socket.gethostname()}) + + @hooks.hook('master-relation-joined') def master_relation_joined(relation_id=None): if not ready_for_service(legacy=False): @@ -732,6 +827,8 @@ def leader_settings_changed(): if not is_leader(): for r_id in relation_ids('master'): master_relation_joined(r_id) + for r_id in relation_ids('radosgw-user'): + radosgw_user_changed(r_id) def process_multisite_relations(): diff --git a/ceph-radosgw/hooks/multisite.py b/ceph-radosgw/hooks/multisite.py index 18722423..df2638a3 100644 --- a/ceph-radosgw/hooks/multisite.py +++ b/ceph-radosgw/hooks/multisite.py @@ -316,12 +316,48 @@ def tidy_defaults(): update_period() -def create_system_user(username): +def get_user_creds(username): + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'user', 'info', + '--uid={}'.format(username) + ] + result = json.loads(_check_output(cmd)) + return (result['keys'][0]['access_key'], + result['keys'][0]['secret_key']) + + +def suspend_user(username): """ - Create a RADOS Gateway system use for sync usage + Suspend a RADOS Gateway user :param username: username of user to create :type username: str + """ + if username not in list_users(): + hookenv.log( + "Cannot suspended user {}. User not found.".format(username), + level=hookenv.DEBUG) + return + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'user', 'suspend', + '--uid={}'.format(username) + ] + _check_output(cmd) + hookenv.log( + "Suspended user {}".format(username), + level=hookenv.DEBUG) + + +def create_user(username, system_user=False): + """ + Create a RADOS Gateway user + + :param username: username of user to create + :type username: str + :param system_user: Whether to grant system user role + :type system_user: bool :return: access key and secret :rtype: (str, str) """ @@ -329,9 +365,10 @@ def create_system_user(username): RGW_ADMIN, '--id={}'.format(_key_name()), 'user', 'create', '--uid={}'.format(username), - '--display-name=Synchronization User', - '--system', + '--display-name=Synchronization User' ] + if system_user: + cmd.append('--system') try: result = json.loads(_check_output(cmd)) return (result['keys'][0]['access_key'], @@ -340,6 +377,18 @@ def create_system_user(username): return (None, None) +def create_system_user(username): + """ + Create a RADOS Gateway system user + + :param username: username of user to create + :type username: str + :return: access key and secret + :rtype: (str, str) + """ + create_user(username, system_user=True) + + def pull_realm(url, access_key, secret): """ Pull in a RADOS Gateway Realm from a master RGW instance diff --git a/ceph-radosgw/hooks/radosgw-user-relation-changed b/ceph-radosgw/hooks/radosgw-user-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/radosgw-user-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/radosgw-user-relation-departed b/ceph-radosgw/hooks/radosgw-user-relation-departed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/radosgw-user-relation-departed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index afbe5862..4d3b216b 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -45,6 +45,8 @@ provides: interface: radosgw-multisite object-store: interface: swift-proxy + radosgw-user: + interface: radosgw-user peers: cluster: interface: swift-ha diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 796070dc..ebb17c5d 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -46,6 +46,7 @@ 'relation_set', 'relation_get', 'related_units', + 'remote_service_name', 'status_set', 'subprocess', 'sys', @@ -509,6 +510,56 @@ def test_certs_changed(self, mock_configure_https): ) mock_configure_https.assert_called_once_with() + @patch.object(ceph_hooks, 'canonical_url') + @patch.object(ceph_hooks, 'is_leader') + def test_radosgw_user_changed(self, is_leader, canonical_url): + relation_data = { + 'radosgw-user:3': {'system-role': 'false'}, + 'radosgw-user:5': {'system-role': 'true'}} + user = { + 'juju-radosgw-user-3': ('access1', 'key1'), + 'juju-radosgw-user-5-system': ('access2', 'key2')} + self.ready_for_service.return_value = True + is_leader.return_value = True + self.remote_service_name.return_value = 'ceph-dashboard' + canonical_url.return_value = 'http://radosgw' + self.listen_port.return_value = 80 + self.socket.gethostname.return_value = 'testinghostname' + self.relation_ids.return_value = relation_data.keys() + self.relation_get.side_effect = lambda rid, app: relation_data[rid] + self.multisite.list_users.return_value = ['juju-radosgw-user-3'] + self.multisite.get_user_creds.side_effect = lambda u: user[u] + self.multisite.create_user.side_effect = lambda u, system_user: user[u] + ceph_hooks.radosgw_user_changed() + expected = [ + call( + app='ceph-dashboard', + relation_id='radosgw-user:3', + relation_settings={ + 'uid': 'juju-radosgw-user-3', + 'access-key': 'access1', + 'secret-key': 'key1'}), + call( + app='ceph-dashboard', + relation_id='radosgw-user:5', + relation_settings={ + 'uid': 'juju-radosgw-user-5-system', + 'access-key': 'access2', + 'secret-key': 'key2'}), + call( + relation_id='radosgw-user:3', + relation_settings={ + 'internal-url': 'http://radosgw:80', + 'daemon-id': 'testinghostname'}), + call( + relation_id='radosgw-user:5', + relation_settings={ + 'internal-url': 'http://radosgw:80', + 'daemon-id': 'testinghostname'})] + self.relation_set.assert_has_calls( + expected, + any_order=True) + class MiscMultisiteTests(CharmTestCase): From 682b31cb26954f1ad6620c1e249deab267b243d2 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 1 Sep 2021 07:52:37 +0000 Subject: [PATCH 2261/2699] Add support for object gateway integration To add support for object gateways this change adds the radosgw-user interface. The focal bundle has one rados gateway application as more than that is not supported on Octopus. The hersute bundle has two rados gateway applications but does not contain the lma applications as telegraf is not supported on hersute. Depends-On: Ieff1943b02f490559ccd245f60b744fb76a5d832 Change-Id: I38939b9938a5ba2ed6e3fb489f66a255f7aa8fe4 --- ceph-dashboard/README.md | 8 + ceph-dashboard/metadata.yaml | 3 + ceph-dashboard/osci.yaml | 21 ++- ceph-dashboard/src/charm.py | 72 +++++++- ceph-dashboard/src/interface_radosgw_user.py | 76 ++++++++ ceph-dashboard/tests/bundles/focal.yaml | 9 + ceph-dashboard/tests/bundles/hirsute.yaml | 63 +++++++ ceph-dashboard/tests/tests.yaml | 6 +- .../unit_tests/test_ceph_dashboard_charm.py | 145 ++++++++++++++- .../unit_tests/test_interface_radosgw_user.py | 169 ++++++++++++++++++ 10 files changed, 567 insertions(+), 5 deletions(-) create mode 100644 ceph-dashboard/src/interface_radosgw_user.py create mode 100644 ceph-dashboard/tests/bundles/hirsute.yaml create mode 100644 ceph-dashboard/unit_tests/test_interface_radosgw_user.py diff --git a/ceph-dashboard/README.md b/ceph-dashboard/README.md index 27edc762..0055f2d1 100644 --- a/ceph-dashboard/README.md +++ b/ceph-dashboard/README.md @@ -69,6 +69,14 @@ To enable Prometheus alerting, add the following relations: juju relate ceph-dashboard:alertmanager-service prometheus-alertmanager:alertmanager-service juju relate prometheus:alertmanager-service prometheus-alertmanager:alertmanager-service +## Object Gateway + +To enable object gateway management add the following relation: + + juju relate ceph-dashboard:radosgw-dashboard ceph-radosgw:radosgw-user + +NOTE: On Octopus or earlier the dashboard can only be related to one ceph-radosgw application. + [ceph-dashboard]: https://docs.ceph.com/en/latest/mgr/dashboard/ diff --git a/ceph-dashboard/metadata.yaml b/ceph-dashboard/metadata.yaml index 40964bfa..d66e9fda 100644 --- a/ceph-dashboard/metadata.yaml +++ b/ceph-dashboard/metadata.yaml @@ -16,6 +16,7 @@ subordinate: true series: - focal - groovy +- hirsute requires: dashboard: interface: ceph-dashboard @@ -28,6 +29,8 @@ requires: interface: http prometheus: interface: http + radosgw-dashboard: + interface: radosgw-user provides: grafana-dashboard: interface: grafana-dashboard diff --git a/ceph-dashboard/osci.yaml b/ceph-dashboard/osci.yaml index 854e706a..019527dc 100644 --- a/ceph-dashboard/osci.yaml +++ b/ceph-dashboard/osci.yaml @@ -3,8 +3,27 @@ - charm-unit-jobs check: jobs: - - focal + - focal-octopus + - hirsute-pacific vars: needs_charm_build: true charm_build_name: ceph-dashboard build_type: charmcraft +- job: + name: focal-octopus + parent: func-target + dependencies: + - osci-lint + - tox-py35 + - tox-py36 + - tox-py37 + - tox-py38 + vars: + tox_extra_args: focal +- job: + name: hirsute-pacific + parent: func-target + dependencies: &smoke-jobs + - focal-octopus + vars: + tox_extra_args: hirsute diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index bed3f58d..3f6ea485 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -29,6 +29,7 @@ import interface_api_endpoints import interface_grafana_dashboard import interface_http +import interface_radosgw_user import cryptography.hazmat.primitives.serialization as serialization import charms_ceph.utils as ceph_utils import charmhelpers.core.host as ch_host @@ -161,6 +162,10 @@ def __init__(self, *args) -> None: self.ca_client = ca_client.CAClient( self, 'certificates') + self.radosgw_user = interface_radosgw_user.RadosGWUserRequires( + self, + 'radosgw-dashboard', + request_system_role=True) self.framework.observe( self.mon.on.mon_ready, self._configure_dashboard) @@ -170,6 +175,9 @@ def __init__(self, *args) -> None: self.framework.observe( self.ca_client.on.tls_server_config_ready, self._configure_dashboard) + self.framework.observe( + self.radosgw_user.on.gw_user_ready, + self._configure_dashboard) self.framework.observe(self.on.add_user_action, self._add_user_action) self.ingress = interface_api_endpoints.APIEndpointsRequires( self, @@ -211,6 +219,50 @@ def _register_dashboards(self) -> None: logging.info( "register_grafana_dashboard: {}".format(dash_file)) + def _update_legacy_radosgw_creds(self, access_key: str, + secret_key: str) -> None: + """Update dashboard db with access & secret key for rados gateways. + + This method uses the legacy format which only supports one gateway. + """ + self._apply_file_setting('set-rgw-api-access-key', access_key) + self._apply_file_setting('set-rgw-api-secret-key', secret_key) + + def _update_multi_radosgw_creds(self, creds: str) -> None: + """Update dashboard db with access & secret key for rados gateway.""" + access_keys = {c['daemon_id']: c['access_key'] for c in creds} + secret_keys = {c['daemon_id']: c['secret_key'] for c in creds} + self._apply_file_setting( + 'set-rgw-api-access-key', + json.dumps(access_keys)) + self._apply_file_setting( + 'set-rgw-api-secret-key', + json.dumps(secret_keys)) + + def _support_multiple_gateways(self) -> bool: + """Check if version of dashboard supports multiple rados gateways""" + return ch_host.cmp_pkgrevno('ceph-common', '16.0') > 0 + + def _manage_radosgw(self) -> None: + """Register rados gateways in dashboard db""" + if self.unit.is_leader(): + creds = self.radosgw_user.get_user_creds() + if len(creds) < 1: + logging.info("No object gateway creds found") + return + if self._support_multiple_gateways(): + self._update_multi_radosgw_creds(creds) + else: + if len(creds) > 1: + logging.error( + "Cannot enable object gateway support. Ceph release " + "does not support multiple object gateways in the " + "dashboard") + else: + self._update_legacy_radosgw_creds( + creds[0]['access_key'], + creds[0]['secret_key']) + def _on_ca_available(self, _) -> None: """Request TLS certificates.""" addresses = set() @@ -280,16 +332,31 @@ def kick_dashboard(self) -> None: ceph_utils.mgr_disable_dashboard() ceph_utils.mgr_enable_dashboard() - def _run_cmd(self, cmd: List[str]) -> None: + def _run_cmd(self, cmd: List[str]) -> str: """Run command in subprocess `cmd` The command to run """ try: - subprocess.check_output(cmd, stderr=subprocess.STDOUT) + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + return output.decode('UTF-8') except subprocess.CalledProcessError as exc: logging.exception("Command failed: {}".format(exc.output)) + def _apply_setting(self, ceph_setting: str, value: List[str]) -> str: + """Apply a dashboard setting""" + cmd = ['ceph', 'dashboard', ceph_setting] + cmd.extend(value) + return self._run_cmd(cmd) + + def _apply_file_setting(self, ceph_setting: str, + file_contents: str) -> str: + """Apply a setting via a file""" + with tempfile.NamedTemporaryFile(mode='w', delete=True) as _file: + _file.write(file_contents) + _file.flush() + return self._apply_setting(ceph_setting, ['-i', _file.name]) + def _apply_ceph_config_from_charm_config(self) -> None: """Read charm config and apply settings to dashboard config""" for option in self.CHARM_TO_CEPH_OPTIONS: @@ -342,6 +409,7 @@ def _configure_dashboard(self, _) -> None: 'ceph', 'dashboard', 'set-prometheus-api-host', prometheus_ep]) self._register_dashboards() + self._manage_radosgw() self._stored.is_started = True self.update_status() diff --git a/ceph-dashboard/src/interface_radosgw_user.py b/ceph-dashboard/src/interface_radosgw_user.py new file mode 100644 index 00000000..16278666 --- /dev/null +++ b/ceph-dashboard/src/interface_radosgw_user.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 + +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object) + + +class RadosGWUserEvent(EventBase): + pass + + +class RadosGWUserEvents(ObjectEvents): + gw_user_ready = EventSource(RadosGWUserEvent) + + +class RadosGWUserRequires(Object): + + on = RadosGWUserEvents() + _stored = StoredState() + + def __init__(self, charm, relation_name, request_system_role=False): + super().__init__(charm, relation_name) + self.relation_name = relation_name + self.request_system_role = request_system_role + self.framework.observe( + charm.on[self.relation_name].relation_joined, + self.request_user) + self.framework.observe( + charm.on[self.relation_name].relation_changed, + self._on_relation_changed) + + def request_user(self, event): + if self.model.unit.is_leader(): + for relation in self.framework.model.relations[self.relation_name]: + relation.data[self.model.app]['system-role'] = json.dumps( + self.request_system_role) + + def get_user_creds(self): + creds = [] + for relation in self.framework.model.relations[self.relation_name]: + app_data = relation.data[relation.app] + for unit in relation.units: + unit_data = relation.data[unit] + cred_data = { + 'access_key': app_data.get('access-key'), + 'secret_key': app_data.get('secret-key'), + 'uid': app_data.get('uid'), + 'daemon_id': unit_data.get('daemon-id')} + if all(cred_data.values()): + creds.append(cred_data) + creds = sorted(creds, key=lambda k: k['daemon_id']) + return creds + + def _on_relation_changed(self, event): + """Handle the relation-changed event.""" + if self.get_user_creds(): + self.on.gw_user_ready.emit() diff --git a/ceph-dashboard/tests/bundles/focal.yaml b/ceph-dashboard/tests/bundles/focal.yaml index 9effd131..726dffdc 100644 --- a/ceph-dashboard/tests/bundles/focal.yaml +++ b/ceph-dashboard/tests/bundles/focal.yaml @@ -47,6 +47,9 @@ applications: prometheus-alertmanager: charm: cs:prometheus-alertmanager num_units: 1 + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 3 relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' @@ -80,3 +83,9 @@ relations: - 'prometheus:website' - - 'prometheus:alertmanager-service' - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + - - 'ceph-radosgw:certificates' + - 'vault:certificates' + - - 'ceph-dashboard:radosgw-dashboard' + - 'ceph-radosgw:radosgw-user' diff --git a/ceph-dashboard/tests/bundles/hirsute.yaml b/ceph-dashboard/tests/bundles/hirsute.yaml new file mode 100644 index 00000000..ce0cee56 --- /dev/null +++ b/ceph-dashboard/tests/bundles/hirsute.yaml @@ -0,0 +1,63 @@ +local_overlay_enabled: False +series: hirsute +applications: + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + vault: + num_units: 1 + charm: cs:~openstack-charmers-next/vault + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + constraints: mem=3072M + num_units: 3 + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + ceph-dashboard: + charm: ../../ceph-dashboard.charm + options: + public-hostname: 'ceph-dashboard.zaza.local' + ceph-radosgw-east: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 3 + options: + pool-prefix: east + region: east + ceph-radosgw-west: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 3 + options: + pool-prefix: west + region: west +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + - - 'ceph-dashboard:dashboard' + - 'ceph-mon:dashboard' + - - 'ceph-dashboard:certificates' + - 'vault:certificates' + - - 'ceph-radosgw-east:mon' + - 'ceph-mon:radosgw' + - - 'ceph-radosgw-east:certificates' + - 'vault:certificates' + - - 'ceph-dashboard:radosgw-dashboard' + - 'ceph-radosgw-east:radosgw-user' + - - 'ceph-radosgw-west:mon' + - 'ceph-mon:radosgw' + - - 'ceph-radosgw-west:certificates' + - 'vault:certificates' + - - 'ceph-dashboard:radosgw-dashboard' + - 'ceph-radosgw-west:radosgw-user' diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml index 15110b59..2e6ee879 100644 --- a/ceph-dashboard/tests/tests.yaml +++ b/ceph-dashboard/tests/tests.yaml @@ -1,6 +1,7 @@ charm_name: ceph-dasboard gate_bundles: - focal + - hirsute smoke_bundles: - focal configure: @@ -12,7 +13,7 @@ tests: target_deploy_status: ceph-dashboard: workload-status: blocked - workload-status-message-regex: "No certificates found|Charm config option" + workload-status-message-regex: "No certificates found|Charm config option|Unit is ready" vault: workload-status: blocked workload-status-message-prefix: Vault needs to be initialized @@ -25,3 +26,6 @@ target_deploy_status: telegraf: workload-status: active workload-status-message-prefix: Monitoring +tests_options: + force_deploy: + - hirsute diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py index b835c0ad..c0dcd0c0 100644 --- a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -21,7 +21,7 @@ sys.path.append('lib') # noqa sys.path.append('src') # noqa -from mock import call, patch, MagicMock +from mock import ANY, call, patch, MagicMock from ops.testing import Harness, _TestingModelBackend from ops.model import ( @@ -155,6 +155,7 @@ class TestCephDashboardCharmBase(CharmTestCase): PATCHES = [ 'ceph_utils', + 'ch_host', 'socket', 'subprocess', 'ch_host', @@ -464,6 +465,148 @@ def test_certificates_from_config(self): self.ceph_utils.mgr_disable_dashboard.assert_called_once_with() self.ceph_utils.mgr_enable_dashboard.assert_called_once_with() + def test_rados_gateway(self): + self.ceph_utils.is_dashboard_enabled.return_value = True + self.ch_host.cmp_pkgrevno.return_value = 1 + mon_rel_id = self.harness.add_relation('dashboard', 'ceph-mon') + rel_id = self.harness.add_relation('radosgw-dashboard', 'ceph-radosgw') + self.harness.begin() + self.harness.set_leader() + self.harness.add_relation_unit( + mon_rel_id, + 'ceph-mon/0') + self.harness.update_relation_data( + mon_rel_id, + 'ceph-mon/0', + { + 'mon-ready': 'True'}) + self.harness.add_relation_unit( + rel_id, + 'ceph-radosgw/0') + self.harness.add_relation_unit( + rel_id, + 'ceph-radosgw/1') + self.harness.update_relation_data( + rel_id, + 'ceph-radosgw/0', + { + 'daemon-id': 'juju-80416c-zaza-7af97ef8a776-3'}) + self.harness.update_relation_data( + rel_id, + 'ceph-radosgw/1', + { + 'daemon-id': 'juju-80416c-zaza-7af97ef8a776-4'}) + self.harness.update_relation_data( + rel_id, + 'ceph-radosgw', + { + 'access-key': 'XNUZVPL364U0BL1OXWJZ', + 'secret-key': 'SgBo115xJcW90nkQ5EaNQ6fPeyeUUT0GxhwQbLFo', + 'uid': 'radosgw-user-9'}) + self.subprocess.check_output.assert_has_calls([ + call(['ceph', 'dashboard', 'set-rgw-api-access-key', '-i', ANY], + stderr=self.subprocess.STDOUT), + call().decode('UTF-8'), + call(['ceph', 'dashboard', 'set-rgw-api-secret-key', '-i', ANY], + stderr=self.subprocess.STDOUT), + call().decode('UTF-8'), + ]) + + def test_rados_gateway_multi_relations_pacific(self): + self.ceph_utils.is_dashboard_enabled.return_value = True + self.ch_host.cmp_pkgrevno.return_value = 1 + rel_id1 = self.harness.add_relation('radosgw-dashboard', 'ceph-eu') + rel_id2 = self.harness.add_relation('radosgw-dashboard', 'ceph-us') + mon_rel_id = self.harness.add_relation('dashboard', 'ceph-mon') + self.harness.begin() + self.harness.set_leader() + self.harness.add_relation_unit( + mon_rel_id, + 'ceph-mon/0') + self.harness.update_relation_data( + mon_rel_id, + 'ceph-mon/0', + { + 'mon-ready': 'True'}) + self.harness.add_relation_unit( + rel_id1, + 'ceph-eu/0') + self.harness.add_relation_unit( + rel_id2, + 'ceph-us/0') + self.harness.update_relation_data( + rel_id1, + 'ceph-eu/0', + { + 'daemon-id': 'juju-80416c-zaza-7af97ef8a776-3'}) + self.harness.update_relation_data( + rel_id2, + 'ceph-us/0', + { + 'daemon-id': 'juju-dddddd-zaza-sdfsfsfs-4'}) + self.harness.update_relation_data( + rel_id1, + 'ceph-eu', + { + 'access-key': 'XNUZVPL364U0BL1OXWJZ', + 'secret-key': 'SgBo115xJcW90nkQ5EaNQ6fPeyeUUT0GxhwQbLFo', + 'uid': 'radosgw-user-9'}) + self.subprocess.check_output.reset_mock() + self.harness.update_relation_data( + rel_id2, + 'ceph-us', + { + 'access-key': 'JGHKJGDKJGJGJHGYYYYM', + 'secret-key': 'iljkdfhHKHKd88LKxNLSKDiijfjfjfldjfjlf44', + 'uid': 'radosgw-user-10'}) + self.subprocess.check_output.assert_has_calls([ + call(['ceph', 'dashboard', 'set-rgw-api-access-key', '-i', ANY], + stderr=self.subprocess.STDOUT), + call().decode('UTF-8'), + call(['ceph', 'dashboard', 'set-rgw-api-secret-key', '-i', ANY], + stderr=self.subprocess.STDOUT), + call().decode('UTF-8'), + ]) + + def test_rados_gateway_multi_relations_octopus(self): + self.ch_host.cmp_pkgrevno.return_value = -1 + rel_id1 = self.harness.add_relation('radosgw-dashboard', 'ceph-eu') + rel_id2 = self.harness.add_relation('radosgw-dashboard', 'ceph-us') + self.harness.begin() + self.harness.set_leader() + self.harness.add_relation_unit( + rel_id1, + 'ceph-eu/0') + self.harness.add_relation_unit( + rel_id2, + 'ceph-us/0') + self.harness.update_relation_data( + rel_id1, + 'ceph-eu/0', + { + 'daemon-id': 'juju-80416c-zaza-7af97ef8a776-3'}) + self.harness.update_relation_data( + rel_id2, + 'ceph-us/0', + { + 'daemon-id': 'juju-dddddd-zaza-sdfsfsfs-4'}) + self.harness.update_relation_data( + rel_id1, + 'ceph-eu', + { + 'access-key': 'XNUZVPL364U0BL1OXWJZ', + 'secret-key': 'SgBo115xJcW90nkQ5EaNQ6fPeyeUUT0GxhwQbLFo', + 'uid': 'radosgw-user-9'}) + self.subprocess.check_output.reset_mock() + self.harness.update_relation_data( + rel_id2, + 'ceph-us', + { + 'access-key': 'JGHKJGDKJGJGJHGYYYYM', + 'secret-key': 'iljkdfhHKHKd88LKxNLSKDiijfjfjfldjfjlf44', + 'uid': 'radosgw-user-10'}) + self.assertFalse(self.subprocess.check_output.called) + @patch.object(charm.secrets, 'choice') def test__gen_user_password(self, _choice): self.harness.begin() diff --git a/ceph-dashboard/unit_tests/test_interface_radosgw_user.py b/ceph-dashboard/unit_tests/test_interface_radosgw_user.py new file mode 100644 index 00000000..06645a19 --- /dev/null +++ b/ceph-dashboard/unit_tests/test_interface_radosgw_user.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python3 + +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import sys +sys.path.append('lib') # noqa +sys.path.append('src') # noqa +from ops.testing import Harness +from ops.charm import CharmBase +import interface_radosgw_user + + +class TestRadosGWUserRequires(unittest.TestCase): + + class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.seen_events = [] + self.radosgw_user = interface_radosgw_user.RadosGWUserRequires( + self, + 'radosgw-dashboard') + + self.framework.observe( + self.radosgw_user.on.gw_user_ready, + self._log_event) + + def _log_event(self, event): + self.seen_events.append(type(event).__name__) + + def setUp(self): + super().setUp() + self.harness = Harness( + self.MyCharm, + meta=''' +name: my-charm +requires: + radosgw-dashboard: + interface: radosgw-user +''' + ) + + def test_init(self): + self.harness.begin() + self.assertEqual( + self.harness.charm.radosgw_user.relation_name, + 'radosgw-dashboard') + + def test_add_radosgw_dashboard_relation(self): + rel_id1 = self.harness.add_relation('radosgw-dashboard', 'ceph-eu') + rel_id2 = self.harness.add_relation('radosgw-dashboard', 'ceph-us') + self.harness.begin() + self.assertEqual( + self.harness.charm.seen_events, + []) + self.harness.set_leader() + self.harness.add_relation_unit( + rel_id1, + 'ceph-eu/0') + self.harness.add_relation_unit( + rel_id1, + 'ceph-eu/1') + self.harness.add_relation_unit( + rel_id2, + 'ceph-us/0') + self.harness.add_relation_unit( + rel_id2, + 'ceph-us/1') + self.harness.update_relation_data( + rel_id1, + 'ceph-eu/0', + { + 'daemon-id': 'juju-80416c-zaza-7af97ef8a776-3'}) + self.harness.update_relation_data( + rel_id1, + 'ceph-eu/1', + { + 'daemon-id': 'juju-80416c-zaza-7af97ef8a776-4'}) + self.harness.update_relation_data( + rel_id2, + 'ceph-us/0', + { + 'daemon-id': 'juju-dddddd-zaza-sdfsfsfs-4'}) + self.harness.update_relation_data( + rel_id2, + 'ceph-us/1', + { + 'daemon-id': 'juju-dddddd-zaza-sdfsfsfs-5'}) + self.harness.update_relation_data( + rel_id1, + 'ceph-eu', + { + 'access-key': 'XNUZVPL364U0BL1OXWJZ', + 'secret-key': 'SgBo115xJcW90nkQ5EaNQ6fPeyeUUT0GxhwQbLFo', + 'uid': 'radosgw-user-9'}) + self.assertEqual( + self.harness.charm.seen_events, + ['RadosGWUserEvent']) + self.harness.update_relation_data( + rel_id2, + 'ceph-us', + { + 'access-key': 'JGHKJGDKJGJGJHGYYYYM', + 'secret-key': 'iljkdfhHKHKd88LKxNLSKDiijfjfjfldjfjlf44', + 'uid': 'radosgw-user-10'}) + self.assertEqual( + self.harness.charm.radosgw_user.get_user_creds(), + [ + { + 'access_key': 'XNUZVPL364U0BL1OXWJZ', + 'daemon_id': 'juju-80416c-zaza-7af97ef8a776-3', + 'secret_key': 'SgBo115xJcW90nkQ5EaNQ6fPeyeUUT0GxhwQbLFo', + 'uid': 'radosgw-user-9'}, + { + 'access_key': 'XNUZVPL364U0BL1OXWJZ', + 'daemon_id': 'juju-80416c-zaza-7af97ef8a776-4', + 'secret_key': 'SgBo115xJcW90nkQ5EaNQ6fPeyeUUT0GxhwQbLFo', + 'uid': 'radosgw-user-9'}, + { + 'access_key': 'JGHKJGDKJGJGJHGYYYYM', + 'daemon_id': 'juju-dddddd-zaza-sdfsfsfs-4', + 'secret_key': 'iljkdfhHKHKd88LKxNLSKDiijfjfjfldjfjlf44', + 'uid': 'radosgw-user-10'}, + { + 'access_key': 'JGHKJGDKJGJGJHGYYYYM', + 'daemon_id': 'juju-dddddd-zaza-sdfsfsfs-5', + 'secret_key': 'iljkdfhHKHKd88LKxNLSKDiijfjfjfldjfjlf44', + 'uid': 'radosgw-user-10'}]) + + def test_add_radosgw_dashboard_relation_missing_data(self): + rel_id1 = self.harness.add_relation('radosgw-dashboard', 'ceph-eu') + self.harness.begin() + self.assertEqual( + self.harness.charm.seen_events, + []) + self.harness.set_leader() + self.harness.add_relation_unit( + rel_id1, + 'ceph-eu/0') + self.harness.update_relation_data( + rel_id1, + 'ceph-eu/0', + { + 'daemon-id': 'juju-80416c-zaza-7af97ef8a776-3'}) + self.harness.update_relation_data( + rel_id1, + 'ceph-eu', + { + 'secret-key': 'SgBo115xJcW90nkQ5EaNQ6fPeyeUUT0GxhwQbLFo', + 'uid': 'radosgw-user-9'}) + self.assertEqual( + self.harness.charm.radosgw_user.get_user_creds(), + []) + self.assertEqual( + self.harness.charm.seen_events, + []) From f7c54372889ca28a641d30f592ee91dffd3a27d7 Mon Sep 17 00:00:00 2001 From: Garrett Thompson Date: Tue, 7 Sep 2021 08:03:30 -0600 Subject: [PATCH 2262/2699] Force the groovy-victoria bundle to deploy. Groovy Gorilla (20.10) is now EOL, and functional tests will fail due to it not being supported anymore. This will move the bundle to the force section, allowing tests to be run. Change-Id: I71e69346c288bcae492bb66be8b67feb4892012c --- ceph-mon/tests/tests.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 9b41ba55..2aafffa7 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -39,3 +39,4 @@ tests_options: force_deploy: - trusty-mitaka - hirsute-wallaby + - groovy-victoria From e3ffd240d3e027d01f42884509b893a1c7070c80 Mon Sep 17 00:00:00 2001 From: Garrett Thompson Date: Tue, 7 Sep 2021 14:28:30 -0600 Subject: [PATCH 2263/2699] Change noout to be a CRITICAL alert instead of WARNING. When the noout flag is set in a Ceph cluster, the Nagios check currently marks this as a warning (like Ceph itself). However, setting it to CRITICAL will raise visbility, and indicate to the operator that this should be a temporary state. Closes-Bug: 1926551 Change-Id: I9831cfea3f63e82fbc8bfebc938a9795b69111c7 --- ceph-mon/files/nagios/check_ceph_status.py | 4 + ceph-mon/unit_tests/ceph_noout.json | 206 ++++++++++++++++++ ceph-mon/unit_tests/ceph_noout_luminous.json | 102 +++++++++ ceph-mon/unit_tests/test_check_ceph_status.py | 24 ++ 4 files changed, 336 insertions(+) create mode 100644 ceph-mon/unit_tests/ceph_noout.json create mode 100644 ceph-mon/unit_tests/ceph_noout_luminous.json diff --git a/ceph-mon/files/nagios/check_ceph_status.py b/ceph-mon/files/nagios/check_ceph_status.py index 2f49b23b..045bcac5 100755 --- a/ceph-mon/files/nagios/check_ceph_status.py +++ b/ceph-mon/files/nagios/check_ceph_status.py @@ -200,6 +200,10 @@ def check_ceph_status(args): if args.raise_nodeepscrub: if re.match("nodeep-scrub flag", status): status_critical = True + # Check if noout is set + if re.match("noout flag", status): + status_critical = True + status_msg.append("noout flag is set") if overall_status == 'HEALTH_CRITICAL' or \ overall_status == 'HEALTH_ERR': # HEALTH_ERR, report critical diff --git a/ceph-mon/unit_tests/ceph_noout.json b/ceph-mon/unit_tests/ceph_noout.json new file mode 100644 index 00000000..3a57eb2b --- /dev/null +++ b/ceph-mon/unit_tests/ceph_noout.json @@ -0,0 +1,206 @@ +{ + "health": { + "health": { + "health_services": [ + { + "mons": [ + { + "name": "juju-c62a41-21-lxd-0", + "kb_total": 334602320, + "kb_used": 2127960, + "kb_avail": 315454468, + "avail_percent": 94, + "last_updated": "2018-11-08 09:47:09.932189", + "store_stats": { + "bytes_total": 34880542, + "bytes_sst": 0, + "bytes_log": 1647123, + "bytes_misc": 33233419, + "last_updated": "0.000000" + }, + "health": "HEALTH_WARN" + }, + { + "name": "juju-c62a41-24-lxd-0", + "kb_total": 334602320, + "kb_used": 2128116, + "kb_avail": 315454312, + "avail_percent": 94, + "last_updated": "2018-11-08 09:47:16.418007", + "store_stats": { + "bytes_total": 36811676, + "bytes_sst": 0, + "bytes_log": 3574345, + "bytes_misc": 33237331, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + }, + { + "name": "juju-c62a41-25-lxd-0", + "kb_total": 334602320, + "kb_used": 2128860, + "kb_avail": 315453568, + "avail_percent": 94, + "last_updated": "2018-11-08 09:47:21.198816", + "store_stats": { + "bytes_total": 37388424, + "bytes_sst": 0, + "bytes_log": 4151569, + "bytes_misc": 33236855, + "last_updated": "0.000000" + }, + "health": "HEALTH_OK" + } + ] + } + ] + }, + "timechecks": { + "epoch": 14, + "round": 4480, + "round_status": "finished", + "mons": [ + { + "name": "juju-c62a41-21-lxd-0", + "skew": 0.000000, + "latency": 0.000000, + "health": "HEALTH_OK" + }, + { + "name": "juju-c62a41-24-lxd-0", + "skew": 0.000282, + "latency": 0.000989, + "health": "HEALTH_OK" + }, + { + "name": "juju-c62a41-25-lxd-0", + "skew": -0.001223, + "latency": 0.000776, + "health": "HEALTH_OK" + } + ] + }, + "summary": [ + { + "severity": "HEALTH_WARN", + "summary": "noout flag(s) set" + }, + { + "severity": "HEALTH_WARN", + "summary": "19 pgs backfill_wait" + }, + { + "severity": "HEALTH_WARN", + "summary": "4 pgs backfilling" + }, + { + "severity": "HEALTH_WARN", + "summary": "1 pgs peering" + }, + { + "severity": "HEALTH_WARN", + "summary": "24 pgs stuck unclean" + }, + { + "severity": "HEALTH_WARN", + "summary": "recovery 17386\/112794 objects misplaced (15.414%)" + }, + { + "severity": "HEALTH_WARN", + "summary": "pool pool1 has many more objects per pg than average (too few pgs?)" + }, + { + "severity": "HEALTH_WARN", + "summary": "nodeep-scrub flag(s) set" + } + ], + "overall_status": "HEALTH_WARN", + "detail": [] + }, + "fsid": "66af7af5-2f60-4e0e-94dc-49f49bd37284", + "election_epoch": 14, + "quorum": [ + 0, + 1, + 2 + ], + "quorum_names": [ + "juju-c62a41-21-lxd-0", + "juju-c62a41-24-lxd-0", + "juju-c62a41-25-lxd-0" + ], + "monmap": { + "epoch": 2, + "fsid": "66af7af5-2f60-4e0e-94dc-49f49bd37284", + "modified": "2018-10-31 15:37:56.902830", + "created": "2018-10-31 15:37:40.288870", + "mons": [ + { + "rank": 0, + "name": "juju-c62a41-21-lxd-0", + "addr": "100.84.195.4:6789\/0" + }, + { + "rank": 1, + "name": "juju-c62a41-24-lxd-0", + "addr": "100.84.196.4:6789\/0" + }, + { + "rank": 2, + "name": "juju-c62a41-25-lxd-0", + "addr": "100.84.196.5:6789\/0" + } + ] + }, + "osdmap": { + "osdmap": { + "epoch": 316, + "num_osds": 48, + "num_up_osds": 48, + "num_in_osds": 48, + "full": false, + "nearfull": false, + "num_remapped_pgs": 22 + } + }, + "pgmap": { + "pgs_by_state": [ + { + "state_name": "active+clean", + "count": 3448 + }, + { + "state_name": "active+remapped+wait_backfill", + "count": 19 + }, + { + "state_name": "active+remapped+backfilling", + "count": 4 + }, + { + "state_name": "peering", + "count": 1 + } + ], + "version": 141480, + "num_pgs": 3472, + "data_bytes": 157009583781, + "bytes_used": 487185850368, + "bytes_avail": 75282911256576, + "bytes_total": 75770097106944, + "misplaced_objects": 17386, + "misplaced_total": 112794, + "misplaced_ratio": 0.154139, + "recovering_objects_per_sec": 436, + "recovering_bytes_per_sec": 1832614589, + "recovering_keys_per_sec": 0, + "num_objects_recovered": 446, + "num_bytes_recovered": 1870659584, + "num_keys_recovered": 0 + }, + "fsmap": { + "epoch": 1, + "by_rank": [] + } +} diff --git a/ceph-mon/unit_tests/ceph_noout_luminous.json b/ceph-mon/unit_tests/ceph_noout_luminous.json new file mode 100644 index 00000000..4658af8a --- /dev/null +++ b/ceph-mon/unit_tests/ceph_noout_luminous.json @@ -0,0 +1,102 @@ +{ + "fsid": "b03a2900-e297-11e8-a7db-00163ed10659", + "health": { + "checks": { + "OSDMAP_FLAGS": { + "severity": "HEALTH_WARN", + "summary": { + "message": "noout flag(s) set" + } + } + }, + "status": "HEALTH_WARN" + }, + "election_epoch": 5, + "quorum": [ + 0 + ], + "quorum_names": [ + "juju-460e0f-11" + ], + "monmap": { + "epoch": 1, + "fsid": "b03a2900-e297-11e8-a7db-00163ed10659", + "modified": "2018-11-07 14:17:12.324408", + "created": "2018-11-07 14:17:12.324408", + "features": { + "persistent": [ + "kraken", + "luminous" + ], + "optional": [] + }, + "mons": [ + { + "rank": 0, + "name": "juju-460e0f-11", + "addr": "192.168.100.81:6789/0", + "public_addr": "192.168.100.81:6789/0" + } + ] + }, + "osdmap": { + "osdmap": { + "epoch": 518, + "num_osds": 9, + "num_up_osds": 9, + "num_in_osds": 9, + "full": false, + "nearfull": false, + "num_remapped_pgs": 0 + } + }, + "pgmap": { + "pgs_by_state": [ + { + "state_name": "active+clean", + "count": 128 + } + ], + "num_pgs": 128, + "num_pools": 1, + "num_objects": 14896, + "data_bytes": 62440603919, + "bytes_used": 14225776640, + "bytes_avail": 9450938368, + "bytes_total": 23676715008 + }, + "fsmap": { + "epoch": 1, + "by_rank": [] + }, + "mgrmap": { + "epoch": 5, + "active_gid": 14097, + "active_name": "juju-460e0f-11", + "active_addr": "192.168.100.81:6800/204", + "available": true, + "standbys": [], + "modules": [ + "balancer", + "restful", + "status" + ], + "available_modules": [ + "balancer", + "dashboard", + "influx", + "localpool", + "prometheus", + "restful", + "selftest", + "status", + "zabbix" + ], + "services": {} + }, + "servicemap": { + "epoch": 1, + "modified": "0.000000", + "services": {} + } +} diff --git a/ceph-mon/unit_tests/test_check_ceph_status.py b/ceph-mon/unit_tests/test_check_ceph_status.py index 377976e3..b7b06155 100644 --- a/ceph-mon/unit_tests/test_check_ceph_status.py +++ b/ceph-mon/unit_tests/test_check_ceph_status.py @@ -120,6 +120,17 @@ def test_health_crit_deepscrub(self, mock_ceph_version, mock_subprocess): self.assertRaises(check_ceph_status.CriticalError, lambda: check_ceph_status.check_ceph_status(args)) + # Error, pre-luminous, noout + @patch('check_ceph_status.get_ceph_version') + def test_health_crit_noout(self, mock_ceph_version, mock_subprocess): + mock_ceph_version.return_value = [10, 2, 9] + with open('unit_tests/ceph_noout.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args("") + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) + # All OK, luminous @patch('check_ceph_status.get_ceph_version') def test_health_ok_luminous(self, mock_ceph_version, mock_subprocess): @@ -209,6 +220,19 @@ def test_health_crit_deepscrub_luminous(self, self.assertRaises(check_ceph_status.CriticalError, lambda: check_ceph_status.check_ceph_status(args)) + # Error, luminous, noout + @patch('check_ceph_status.get_ceph_version') + def test_health_crit_noout_luminous(self, + mock_ceph_version, + mock_subprocess): + mock_ceph_version.return_value = [12, 2, 0] + with open('unit_tests/ceph_noout_luminous.json') as f: + tree = f.read() + mock_subprocess.return_value = tree.encode('UTF-8') + args = check_ceph_status.parse_args("") + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) + # Additional Ok, luminous, deepscrub @patch('check_ceph_status.get_ceph_version') def test_additional_ok_deepscrub_luminous(self, From ae09ab9cc1678691761a763df0bec7406fc5f8d0 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 9 Sep 2021 08:14:18 +0000 Subject: [PATCH 2264/2699] Increase password complexity Use upper and lower case letter as well as digits for characters in password, also increase password length. Change-Id: I2739166a95a09b4363eebe6b8cc6aac8e87f3c2d --- ceph-dashboard/src/charm.py | 6 ++++-- ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index bed3f58d..9a2cc02b 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -424,9 +424,11 @@ def _configure_tls(self) -> None: self.TLS_KEY_PATH) self.kick_dashboard() - def _gen_user_password(self, length: int = 8) -> str: + def _gen_user_password(self, length: int = 12) -> str: """Generate a password""" - alphabet = string.ascii_letters + string.digits + alphabet = ( + string.ascii_lowercase + string.ascii_uppercase + string.digits) + return ''.join(secrets.choice(alphabet) for i in range(length)) def _add_user_action(self, event: ActionEvent) -> None: diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py index b835c0ad..14d226a0 100644 --- a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -470,7 +470,7 @@ def test__gen_user_password(self, _choice): _choice.return_value = 'r' self.assertEqual( self.harness.charm._gen_user_password(), - 'rrrrrrrr') + 'rrrrrrrrrrrr') @patch.object(charm.tempfile, 'NamedTemporaryFile') @patch.object(charm.secrets, 'choice') From 7b0d416b750a95ae97ff43879a0908d47cccbb7e Mon Sep 17 00:00:00 2001 From: Robert Gildein Date: Fri, 19 Mar 2021 14:26:06 +0100 Subject: [PATCH 2265/2699] Add format option to "list-pools" action These changes provide more detailed outputs for the "list-pools" action. The default action output has not changed (" , , ..."), but when you pass the "format=json" parameter, it will provide a list of pools with details about each pool. The list of pools (with or without details) are parsed from `ceph osd dump`. Closes-Bug: #1920135 Change-Id: I6e2b834628312ed458527420ca83052d29bd2b9a --- ceph-mon/README.md | 26 ++++ ceph-mon/actions.yaml | 9 ++ ceph-mon/actions/list_pools.py | 56 +++++++-- ceph-mon/unit_tests/test_action_list_pools.py | 115 ++++++++++++++++++ 4 files changed, 198 insertions(+), 8 deletions(-) create mode 100644 ceph-mon/unit_tests/test_action_list_pools.py diff --git a/ceph-mon/README.md b/ceph-mon/README.md index 27af091d..b5b30a8d 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -176,6 +176,31 @@ deployed then see file `actions.yaml`. * `snapshot-pool` * `unset-noout` +## Presenting the list of Ceph pools with details + +The following example returns the list of pools with details: `id`, `name`, +`size` and `min_size`. +The [jq][jq] utility has been used to parse the action output in json format. + + juju run-action --wait ceph-mon/leader list-pools detail=true \ + --format json | jq '.[].results.pools | fromjson | .[] + | {pool:.pool, name:.pool_name, size:.size, min_size:.min_size}' + +Sample output: + + { + "pool": 1, + "name": "test", + "size": 3, + "min_size": 2 + } + { + "pool": 2, + "name": "test2", + "size": 3, + "min_size": 2 + } + # Bugs Please report bugs on [Launchpad][lp-bugs-charm-ceph-mon]. @@ -197,3 +222,4 @@ For general charm questions refer to the OpenStack [Charm Guide][cg]. [prometheus-charm]: https://jaas.ai/prometheus2 [cloud-archive-ceph]: https://wiki.ubuntu.com/OpenStack/CloudArchive#Ceph_and_the_UCA [upstream-ceph-buckets]: https://docs.ceph.com/docs/master/rados/operations/crush-map/#types-and-buckets +[jq]: https://stedolan.github.io/jq/ diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 6dc940a1..fe36463f 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -210,6 +210,15 @@ list-erasure-profiles: additionalProperties: false list-pools: description: "List your cluster's pools" + params: + format: + type: string + default: text + enum: + - text + - text-full + - json + description: "Specify output format (text|text-full|json). The formats `text-full` and `json` provide the same level of details." additionalProperties: false set-pool-max-bytes: description: "Set pool quotas for the maximum number of bytes." diff --git a/ceph-mon/actions/list_pools.py b/ceph-mon/actions/list_pools.py index aa4ca745..10c05611 100755 --- a/ceph-mon/actions/list_pools.py +++ b/ceph-mon/actions/list_pools.py @@ -13,19 +13,59 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import json +import os import sys from subprocess import check_output, CalledProcessError -sys.path.append('hooks') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, "../hooks")) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + + +_add_path(_hooks) + + +from charmhelpers.core.hookenv import ( + log, + function_fail, + function_get, + function_set +) -from charmhelpers.core.hookenv import log, action_set, action_fail -if __name__ == '__main__': +def get_list_pools(output_format="text"): + """Get list of Ceph pools. + + :param output_format: specify output format + :type output_format: str + :returns: joined list of string or + dump list of pools with details + :rtype: str + """ + if output_format == "text": + return check_output(["ceph", "--id", "admin", "osd", + "lspools"]).decode("UTF-8") + + ceph_osd_dump = check_output(["ceph", "--id", "admin", "osd", "dump", + "--format=json"]).decode("UTF-8") + pools = json.loads(ceph_osd_dump).get("pools", []) + return json.dumps(pools, + indent=2 if output_format == "text-full" else None) + + +def main(): try: - out = check_output(['ceph', '--id', 'admin', - 'osd', 'lspools']).decode('UTF-8') - action_set({'message': out}) + list_pools = get_list_pools(function_get("format")) + function_set({"message": list_pools}) except CalledProcessError as e: log(e) - action_fail("List pools failed with error: {}".format(str(e))) + function_fail("List pools failed with error: {}".format(str(e))) + + +if __name__ == "__main__": + main() diff --git a/ceph-mon/unit_tests/test_action_list_pools.py b/ceph-mon/unit_tests/test_action_list_pools.py new file mode 100644 index 00000000..f42ba439 --- /dev/null +++ b/ceph-mon/unit_tests/test_action_list_pools.py @@ -0,0 +1,115 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json + +from actions import list_pools +from test_utils import CharmTestCase + + +class ListPoolsTestCase(CharmTestCase): + ceph_osd_dump = b""" + {"epoch": 19, "fsid": "90e7e074-8263-11eb-9c5c-fa163eee3d70", "created": + "2021-03-11 12:16:36.284078", "modified": "2021-03-18 10:41:23.173546", + "flags": "sortbitwise,recovery_deletes,purged_snapdirs", "crush_version": + 7, "full_ratio": 0.95, "backfillfull_ratio": 0.9, "nearfull_ratio": 0.85, + "cluster_snapshot": "", "pool_max": 2, "max_osd": 3, + "require_min_compat_client": "jewel", "min_compat_client": "jewel", + "require_osd_release": "luminous", "pools": [{"pool": 1, "pool_name": + "test", "flags": 1, "flags_names": "hashpspool", "type": 1, "size": 3, + "min_size": 2, "crush_rule": 0, "object_hash": 2, "pg_num": 8, + "pg_placement_num": 8, "crash_replay_interval": 0, "last_change": "16", + "last_force_op_resend": "0", "last_force_op_resend_preluminous": "0", + "auid": 0, "snap_mode": "selfmanaged", "snap_seq": 0, "snap_epoch": 0, + "pool_snaps": [], "removed_snaps": "[]", "quota_max_bytes": 0, + "quota_max_objects": 0, "tiers": [], "tier_of": -1, "read_tier": -1, + "write_tier": -1, "cache_mode": "none", "target_max_bytes": 0, + "target_max_objects": 0, "cache_target_dirty_ratio_micro": 400000, + "cache_target_dirty_high_ratio_micro": 600000, + "cache_target_full_ratio_micro": 800000, "cache_min_flush_age": 0, + "cache_min_evict_age": 0, "erasure_code_profile": "", "hit_set_params": + {"type": "none"}, "hit_set_period": 0, "hit_set_count": 0, + "use_gmt_hitset": true, "min_read_recency_for_promote": 0, + "min_write_recency_for_promote": 0, "hit_set_grade_decay_rate": 0, + "hit_set_search_last_n": 0, "grade_table": [], "stripe_width": 0, + "expected_num_objects": 0, "fast_read": false, "options": {}, + "application_metadata": {"unknown": {}}}, {"pool": 2, "pool_name": + "test2", "flags": 1, "flags_names": "hashpspool", "type": 1, "size": 3, + "min_size": 2, "crush_rule": 0, "object_hash": 2, "pg_num": 8, + "pg_placement_num": 8, "crash_replay_interval": 0, "last_change": "19", + "last_force_op_resend": "0", "last_force_op_resend_preluminous": "0", + "auid": 0, "snap_mode": "selfmanaged", "snap_seq": 0, "snap_epoch": 0, + "pool_snaps": [], "removed_snaps": "[]", "quota_max_bytes": 0, + "quota_max_objects": 0, "tiers": [], "tier_of": -1, "read_tier": -1, + "write_tier": -1, "cache_mode": "none", "target_max_bytes": 0, + "target_max_objects": 0, "cache_target_dirty_ratio_micro": 400000, + "cache_target_dirty_high_ratio_micro": 600000, + "cache_target_full_ratio_micro": 800000, "cache_min_flush_age": 0, + "cache_min_evict_age": 0, "erasure_code_profile": "", "hit_set_params": + {"type": "none"}, "hit_set_period": 0, "hit_set_count": 0, + "use_gmt_hitset": true, "min_read_recency_for_promote": 0, + "min_write_recency_for_promote": 0, "hit_set_grade_decay_rate": 0, + "hit_set_search_last_n": 0, "grade_table": [], "stripe_width": 0, + "expected_num_objects": 0, "fast_read": false, "options": {}, + "application_metadata": {"unknown": {}}}], "osds": [{"osd": 0, "uuid": + "52755316-e15b-430f-82f6-e98f2800f979", "up": 1, "in": 1, "weight": 1.0, + "primary_affinity": 1.0, "last_clean_begin": 0, "last_clean_end": 0, + "up_from": 5, "up_thru": 17, "down_at": 0, "lost_at": 0, "public_addr": + "10.5.0.21:6800/19211", "cluster_addr": "10.5.0.21:6801/19211", + "heartbeat_back_addr": "10.5.0.21:6802/19211", "heartbeat_front_addr": + "10.5.0.21:6803/19211", "state": ["exists", "up"]}, {"osd": 1, "uuid": + "ac221f5d-0e99-468a-b3fd-8b3e47dcd8e3", "up": 1, "in": 1, "weight": 1.0, + "primary_affinity": 1.0, "last_clean_begin": 0, "last_clean_end": 0, + "up_from": 9, "up_thru": 17, "down_at": 0, "lost_at": 0, "public_addr": + "10.5.0.5:6800/19128", "cluster_addr": "10.5.0.5:6801/19128", + "heartbeat_back_addr": "10.5.0.5:6802/19128", "heartbeat_front_addr": + "10.5.0.5:6803/19128", "state": ["exists", "up"]}, {"osd": 2, "uuid": + "1e379cd3-0fb2-4645-a574-5096dc8e6f11", "up": 1, "in": 1, "weight": 1.0, + "primary_affinity": 1.0, "last_clean_begin": 0, "last_clean_end": 0, + "up_from": 13, "up_thru": 17, "down_at": 0, "lost_at": 0, "public_addr": + "10.5.0.51:6800/19302", "cluster_addr": "10.5.0.51:6801/19302", + "heartbeat_back_addr": "10.5.0.51:6802/19302", "heartbeat_front_addr": + "10.5.0.51:6803/19302", "state": ["exists", "up"]}], "osd_xinfo": + [{"osd": 0, "down_stamp": "0.000000", "laggy_probability": 0.0, + "laggy_interval": 0, "features": 4611087853746454523, "old_weight": 0}, + {"osd": 1, "down_stamp": "0.000000", "laggy_probability": 0.0, + "laggy_interval": 0, "features": 4611087853746454523, "old_weight": 0}, + {"osd": 2, "down_stamp": "0.000000", "laggy_probability": 0.0, + "laggy_interval": 0, "features": 4611087853746454523, "old_weight": 0}], + "pg_upmap": [], "pg_upmap_items": [], "pg_temp": [], "primary_temp": [], + "blacklist": {}, "erasure_code_profiles": {"default": {"k": "2", "m": "1", + "plugin": "jerasure", "technique": "reed_sol_van"}}}""" + + def setUp(self): + super(ListPoolsTestCase, self).setUp( + list_pools, ["check_output", "function_fail", "function_get", + "function_set"]) + self.function_get.return_value = "json" # format=json + self.check_output.return_value = self.ceph_osd_dump + + def test_getting_list_pools_without_details(self): + """Test getting list of pools without details.""" + self.function_get.return_value = "text" + self.check_output.return_value = b"1 test,2 test2" + list_pools.main() + self.function_get.assert_called_once_with("format") + self.function_set.assert_called_once_with( + {"message": "1 test,2 test2"}) + + def test_getting_list_pools_with_details(self): + """Test getting list of pools with details.""" + list_pools.main() + self.function_get.assert_called_once_with("format") + pools = json.loads(self.function_set.call_args.args[0]["message"]) + self.assertEqual(pools[0]["pool"], 1) + self.assertEqual(pools[0]["size"], 3) + self.assertEqual(pools[0]["min_size"], 2) From 8398c92b5e263f12536e2915244a37796aa92860 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 9 Sep 2021 16:01:27 +0000 Subject: [PATCH 2266/2699] Add ceph-fs to bundles Change-Id: Ifb4d1b7192c9076cf1e966ab9c728380a160fe2e --- ceph-dashboard/tests/bundles/focal.yaml | 5 +++++ ceph-dashboard/tests/bundles/hirsute.yaml | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/ceph-dashboard/tests/bundles/focal.yaml b/ceph-dashboard/tests/bundles/focal.yaml index 726dffdc..215364d0 100644 --- a/ceph-dashboard/tests/bundles/focal.yaml +++ b/ceph-dashboard/tests/bundles/focal.yaml @@ -50,6 +50,9 @@ applications: ceph-radosgw: charm: cs:~openstack-charmers-next/ceph-radosgw num_units: 3 + ceph-fs: + charm: cs:~openstack-charmers-next/ceph-fs + num_units: 1 relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' @@ -89,3 +92,5 @@ relations: - 'vault:certificates' - - 'ceph-dashboard:radosgw-dashboard' - 'ceph-radosgw:radosgw-user' + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' diff --git a/ceph-dashboard/tests/bundles/hirsute.yaml b/ceph-dashboard/tests/bundles/hirsute.yaml index ce0cee56..215e38e1 100644 --- a/ceph-dashboard/tests/bundles/hirsute.yaml +++ b/ceph-dashboard/tests/bundles/hirsute.yaml @@ -38,6 +38,9 @@ applications: options: pool-prefix: west region: west + ceph-fs: + charm: cs:~openstack-charmers-next/ceph-fs + num_units: 1 relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' @@ -61,3 +64,5 @@ relations: - 'vault:certificates' - - 'ceph-dashboard:radosgw-dashboard' - 'ceph-radosgw-west:radosgw-user' + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' From 044dbda01d0c5c212cef6d8c70ac4fe6138c0d38 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 20 Aug 2021 14:18:43 +0000 Subject: [PATCH 2267/2699] Add support for Ceph dashboard support This change adds an admin-access interface to pass the admin credentials and api endpoint details to another charm. The dashboard charm uses this information registern the iscsi gateways with the dashboard. Change-Id: I5336ecab6a08e71dda22ba0347b8bee2f5683c3f --- ceph-iscsi/metadata.yaml | 3 + ceph-iscsi/osci.yaml | 29 +++++++ ceph-iscsi/requirements.txt | 1 + ceph-iscsi/src/charm.py | 48 ++++++++++- ceph-iscsi/templates/iscsi-gateway.cfg | 4 + .../unit_tests/test_ceph_iscsi_charm.py | 86 +++++++++++++++---- 6 files changed, 148 insertions(+), 23 deletions(-) create mode 100644 ceph-iscsi/osci.yaml diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index e75a35d6..4edc3bd5 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -23,6 +23,9 @@ requires: interface: ceph-client certificates: interface: tls-certificates +provides: + admin-access: + interface: ceph-iscsi-admin-access peers: cluster: interface: ceph-iscsi-peer diff --git a/ceph-iscsi/osci.yaml b/ceph-iscsi/osci.yaml new file mode 100644 index 00000000..d55a58f8 --- /dev/null +++ b/ceph-iscsi/osci.yaml @@ -0,0 +1,29 @@ +- project: + templates: + - charm-unit-jobs + check: + jobs: + - ceph-iscsi-focal-octopus + - ceph-iscsi-focal-octopus-ec + vars: + needs_charm_build: true + charm_build_name: ceph-iscsi + build_type: charmcraft +- job: + name: ceph-iscsi-focal-octopus + parent: func-target + dependencies: + - osci-lint + - tox-py35 + - tox-py36 + - tox-py37 + - tox-py38 + vars: + tox_extra_args: focal +- job: + name: ceph-iscsi-focal-octopus-ec + parent: func-target + dependencies: &smoke-jobs + - ceph-iscsi-focal-octopus + vars: + tox_extra_args: focal-ec diff --git a/ceph-iscsi/requirements.txt b/ceph-iscsi/requirements.txt index 8cfee5a6..160abfd1 100644 --- a/ceph-iscsi/requirements.txt +++ b/ceph-iscsi/requirements.txt @@ -4,3 +4,4 @@ git+https://github.com/canonical/operator.git#egg=ops git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack git+https://opendev.org/openstack/charm-ops-interface-tls-certificates#egg=interface_tls_certificates +git+https://github.com/openstack-charmers/ops-interface-ceph-iscsi-admin-access#egg=interface_ceph_iscsi_admin_access diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 84a4fa9c..c5eb3fdd 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -17,12 +17,12 @@ """Charm for deploying and maintaining the Ceph iSCSI service.""" import copy -import socket import logging import os import subprocess import sys import string +import socket import secrets from pathlib import Path @@ -36,6 +36,7 @@ import charmhelpers.core.host as ch_host import charmhelpers.core.templating as ch_templating import interface_ceph_client.ceph_client as ceph_client +import interface_ceph_iscsi_admin_access.admin_access as admin_access import interface_ceph_iscsi_peer import interface_tls_certificates.ca_client as ca_client @@ -102,7 +103,20 @@ def trusted_ips(self): """ ips = copy.deepcopy(self.allowed_ips) ips.extend(self.relation.peer_addresses) - return ' '.join(sorted(ips)) + return ','.join(sorted(ips)) + + +class AdminAccessAdapter( + ops_openstack.adapters.OpenStackOperRelationAdapter): + + @property + def trusted_ips(self): + """List of IP addresses permitted to use API. + + :returns: Ceph iSCSI clients + :rtype: str + """ + return ','.join(sorted(self.relation.client_addresses)) class TLSCertificatesAdapter( @@ -130,6 +144,7 @@ class CephISCSIGatewayAdapters( 'ceph-client': CephClientAdapter, 'cluster': GatewayClientPeerAdapter, 'certificates': TLSCertificatesAdapter, + 'admin-access': AdminAccessAdapter, } @@ -184,12 +199,19 @@ def __init__(self, framework): self.peers = interface_ceph_iscsi_peer.CephISCSIGatewayPeers( self, 'cluster') + self.admin_access = \ + admin_access.CephISCSIAdminAccessProvides( + self, + 'admin-access') self.ca_client = ca_client.CAClient( self, 'certificates') self.adapters = CephISCSIGatewayAdapters( - (self.ceph_client, self.peers, self.ca_client), + (self.ceph_client, self.peers, self.ca_client, self.admin_access), self) + self.framework.observe( + self.admin_access.on.admin_access_request, + self.publish_admin_access_info) self.framework.observe( self.ceph_client.on.broker_available, self.request_ceph_pool) @@ -240,6 +262,7 @@ def on_has_peers(self, event): alphabet = string.ascii_letters + string.digits password = ''.join(secrets.choice(alphabet) for i in range(8)) self.peers.set_admin_password(password) + self.publish_admin_access_info(event) def config_get(self, key): """Retrieve config option. @@ -274,7 +297,6 @@ def metadata_pool_name(self): def request_ceph_pool(self, event): """Request pools from Ceph cluster.""" - print("request_ceph_pool") if not self.ceph_client.broker_available: logging.info("Cannot request ceph setup at this time") return @@ -440,8 +462,26 @@ def on_tls_app_config_ready(self, event): encoding=serialization.Encoding.PEM)) subprocess.check_call(['update-ca-certificates']) self._stored.enable_tls = True + # Endpoint has switch to TLS, need to inform users. + self.publish_admin_access_info(event) self.render_config(event) + def publish_admin_access_info(self, event): + """Publish creds and endpoint to related charms""" + if not self.peers.admin_password: + logging.info("Defering setup") + event.defer() + return + if self._stored.enable_tls: + scheme = 'https' + else: + scheme = 'http' + self.admin_access.publish_gateway( + socket.getfqdn(), + 'admin', + self.peers.admin_password, + scheme) + def custom_status_check(self): """Custom update status checks.""" if ch_host.is_container(): diff --git a/ceph-iscsi/templates/iscsi-gateway.cfg b/ceph-iscsi/templates/iscsi-gateway.cfg index 89005292..eb7d0e02 100644 --- a/ceph-iscsi/templates/iscsi-gateway.cfg +++ b/ceph-iscsi/templates/iscsi-gateway.cfg @@ -11,4 +11,8 @@ api_secure = {{ certificates.enable_tls }} api_user = admin api_password = {{ cluster.admin_password }} api_port = 5000 +{% if admin_access.trusted_ips -%} +trusted_ip_list = {{ cluster.trusted_ips }},{{ admin_access.trusted_ips }} +{% else -%} trusted_ip_list = {{ cluster.trusted_ips }} +{% endif -%} diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index 9d2a6415..9a37bac6 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -139,8 +139,10 @@ class TestCephISCSIGatewayCharmBase(CharmTestCase): PATCHES = [ 'ch_templating', 'gwcli_client', - 'subprocess', 'os', + 'secrets', + 'socket', + 'subprocess', ] def setUp(self): @@ -148,6 +150,12 @@ def setUp(self): self.harness = Harness( _CephISCSIGatewayCharmBase, ) + self.test_hostname = 'server1' + self.socket.gethostname.return_value = self.test_hostname + self.test_fqdn = self.test_hostname + '.foo' + self.socket.getfqdn.return_value = self.test_fqdn + self.secrets.choice.return_value = 'r' + self.test_admin_password = 'rrrrrrrr' self.gwc = MagicMock() self.gwcli_client.GatewayClient.return_value = self.gwc @@ -182,7 +190,7 @@ def test_init(self): self.assertFalse(self.harness.charm._stored.target_created) self.assertFalse(self.harness.charm._stored.enable_tls) - def add_cluster_relation(self): + def add_base_cluster_relation(self): rel_id = self.harness.add_relation('cluster', 'ceph-iscsi') self.harness.add_relation_unit( rel_id, @@ -197,10 +205,33 @@ def add_cluster_relation(self): }) return rel_id + def complete_cluster_relation(self, rel_id): + self.harness.update_relation_data( + rel_id, + 'ceph-iscsi/1', + { + 'ingress-address': '10.0.0.2', + 'gateway_ready': 'True', + 'gateway_fqdn': 'ceph-iscsi-1.example' + }) + + def add_admin_access_relation(self): + rel_id = self.harness.add_relation('admin-access', 'ceph-dashboard') + self.harness.add_relation_unit( + rel_id, + 'ceph-dashboard/0') + self.harness.update_relation_data( + rel_id, + 'ceph-dashboard/0', + { + 'ingress-address': '10.0.0.2', + }) + return rel_id + @patch('socket.getfqdn') def test_on_create_target_action(self, _getfqdn): _getfqdn.return_value = 'ceph-iscsi-0.example' - self.add_cluster_relation() + self.add_base_cluster_relation() self.harness.begin() action_event = MagicMock() action_event.params = { @@ -245,7 +276,7 @@ def test_on_create_target_action(self, _getfqdn): @patch('socket.getfqdn') def test_on_create_target_action_ec(self, _getfqdn): _getfqdn.return_value = 'ceph-iscsi-0.example' - self.add_cluster_relation() + self.add_base_cluster_relation() self.harness.begin() action_event = MagicMock() action_event.params = { @@ -296,10 +327,8 @@ def test_on_create_target_action_ec(self, _getfqdn): 'iscsi-metapool', 'disk1') - @patch.object(charm.secrets, 'choice') - def test_on_has_peers(self, _choice): + def test_on_has_peers(self): rel_id = self.harness.add_relation('cluster', 'ceph-iscsi') - _choice.return_value = 'r' self.harness.begin() self.harness.add_relation_unit( rel_id, @@ -316,10 +345,10 @@ def test_on_has_peers(self, _choice): 'gateway_fqdn': 'ceph-iscsi-1.example' }) self.assertEqual( - self.harness.charm.peers.admin_password, 'rrrrrrrr') + self.harness.charm.peers.admin_password, self.test_admin_password) def test_on_has_peers_not_leader(self): - self.add_cluster_relation() + self.add_base_cluster_relation() self.harness.begin() self.assertIsNone( self.harness.charm.peers.admin_password) @@ -329,7 +358,7 @@ def test_on_has_peers_not_leader(self): self.harness.charm.peers.admin_password) def test_on_has_peers_existing_password(self): - rel_id = self.add_cluster_relation() + rel_id = self.add_base_cluster_relation() self.harness.update_relation_data( rel_id, 'ceph-iscsi', @@ -370,7 +399,7 @@ def test_on_ceph_client_relation_joined(self): def test_on_pools_available(self): self.os.path.exists.return_value = False self.os.path.basename = os.path.basename - rel_id = self.add_cluster_relation() + rel_id = self.add_base_cluster_relation() self.harness.update_relation_data( rel_id, 'ceph-iscsi', @@ -392,9 +421,7 @@ def test_on_pools_available(self): rel_data = self.harness.get_relation_data(rel_id, 'ceph-iscsi/0') self.assertEqual(rel_data['gateway_ready'], 'True') - @patch('socket.gethostname') - def test_on_certificates_relation_joined(self, _gethostname): - _gethostname.return_value = 'server1' + def test_on_certificates_relation_joined(self): rel_id = self.harness.add_relation('certificates', 'vault') self.harness.begin() self.harness.add_relation_unit( @@ -407,19 +434,17 @@ def test_on_certificates_relation_joined(self, _gethostname): rel_data = self.harness.get_relation_data(rel_id, 'ceph-iscsi/0') self.assertEqual( rel_data['application_cert_requests'], - '{"server1": {"sans": ["10.0.0.10", "server1"]}}') + '{"server1.foo": {"sans": ["10.0.0.10", "server1"]}}') - @patch('socket.gethostname') - def test_on_certificates_relation_changed(self, _gethostname): + def test_on_certificates_relation_changed(self): mock_TLS_CERT_PATH = MagicMock() mock_TLS_CA_CERT_PATH = MagicMock() mock_TLS_KEY_PATH = MagicMock() mock_KEY_AND_CERT_PATH = MagicMock() mock_TLS_PUB_KEY_PATH = MagicMock() - _gethostname.return_value = 'server1' self.subprocess.check_output.return_value = b'pubkey' rel_id = self.harness.add_relation('certificates', 'vault') - self.add_cluster_relation() + self.add_base_cluster_relation() self.harness.begin() self.harness.charm.TLS_CERT_PATH = mock_TLS_CERT_PATH self.harness.charm.TLS_CA_CERT_PATH = mock_TLS_CA_CERT_PATH @@ -460,3 +485,26 @@ def test_custom_status_check(self): self.assertIsInstance( self.harness.charm.unit.status, BlockedStatus) + + def test_publish_admin_access_info(self): + cluster_rel_id = self.add_base_cluster_relation() + admin_access_rel_id = self.add_admin_access_relation() + self.harness.begin() + self.harness.set_leader() + self.complete_cluster_relation(cluster_rel_id) + self.assertEqual( + self.harness.get_relation_data( + admin_access_rel_id, + 'ceph-iscsi/0'), + { + 'host': '10.0.0.10', + 'name': self.test_fqdn, + 'port': '5000', + 'scheme': 'http'}) + self.assertEqual( + self.harness.get_relation_data( + admin_access_rel_id, + 'ceph-iscsi'), + { + 'password': self.test_admin_password, + 'username': 'admin'}) From 5e4b3212a19874a3260aa905a8c7f31ae247cc91 Mon Sep 17 00:00:00 2001 From: Eric Chen Date: Tue, 7 Sep 2021 05:56:03 +0000 Subject: [PATCH 2268/2699] Remove ntp installation in ceph-fs. For the case in container, it is not necessary to install ntp. For the bare metal, we now expect operators to use a subordinate ntp charm instead. Therefore, we remove ntp installation in ceph-fs. Closes-Bug: #1935013 Change-Id: I48f1a6a1ee6153bbc86cc9acf9302d8eb9d7db43 --- ceph-fs/src/lib/charm/openstack/ceph_fs.py | 4 ++-- ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-fs/src/lib/charm/openstack/ceph_fs.py b/ceph-fs/src/lib/charm/openstack/ceph_fs.py index 26e73553..a9433c44 100644 --- a/ceph-fs/src/lib/charm/openstack/ceph_fs.py +++ b/ceph-fs/src/lib/charm/openstack/ceph_fs.py @@ -164,9 +164,9 @@ def get_network_addrs(self, config_opt): class MitakaCephFSCharm(BaseCephFSCharm): release = 'mitaka' - packages = ['ceph-mds', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs'] + packages = ['ceph-mds', 'gdisk', 'btrfs-tools', 'xfsprogs'] class UssuriCephFSCharm(BaseCephFSCharm): release = 'ussuri' - packages = ['ceph-mds', 'gdisk', 'ntp', 'btrfs-progs', 'xfsprogs'] + packages = ['ceph-mds', 'gdisk', 'btrfs-progs', 'xfsprogs'] diff --git a/ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py b/ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py index c3964f35..8cf4faf9 100644 --- a/ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py +++ b/ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py @@ -33,7 +33,7 @@ def test_packages(self): # future versions of this charm, see ``TestCephFsCharm`` for the rest # of the tests self.assertEquals(self.target.packages, [ - 'ceph-mds', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs']) + 'ceph-mds', 'gdisk', 'btrfs-tools', 'xfsprogs']) class TestCephFsCharm(test_utils.PatchHelper): @@ -59,7 +59,7 @@ def test___init__(self): self.assertDictEqual(self.target.restart_map, { '/etc/ceph/ceph.conf': ['ceph-mds@somehost']}) self.assertEquals(self.target.packages, [ - 'ceph-mds', 'gdisk', 'ntp', 'btrfs-progs', 'xfsprogs']) + 'ceph-mds', 'gdisk', 'btrfs-progs', 'xfsprogs']) def test_configuration_class(self): self.assertEquals(self.target.options.hostname, 'somehost') From d2516ba9ee864f9f43a19d3d871a5268cc677523 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 9 Sep 2021 12:31:29 +0000 Subject: [PATCH 2269/2699] Add delete user action Change-Id: Iff88333ecdbdc3ece3a9053470d239e260030612 --- ceph-dashboard/actions.yaml | 7 +++++++ ceph-dashboard/src/charm.py | 13 +++++++++++++ .../unit_tests/test_ceph_dashboard_charm.py | 11 +++++++++++ 3 files changed, 31 insertions(+) diff --git a/ceph-dashboard/actions.yaml b/ceph-dashboard/actions.yaml index c76df649..df585b55 100644 --- a/ceph-dashboard/actions.yaml +++ b/ceph-dashboard/actions.yaml @@ -12,3 +12,10 @@ add-user: description: Role to give user type: string default: "" +delete-user: + description: delete a dashboard user + params: + username: + description: Name of user to delete + type: string + default: "" diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index d4597f3f..75ea0db8 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -179,6 +179,9 @@ def __init__(self, *args) -> None: self.radosgw_user.on.gw_user_ready, self._configure_dashboard) self.framework.observe(self.on.add_user_action, self._add_user_action) + self.framework.observe( + self.on.delete_user_action, + self._delete_user_action) self.ingress = interface_api_endpoints.APIEndpointsRequires( self, 'loadbalancer', @@ -518,5 +521,15 @@ def _add_user_action(self, event: ActionEvent) -> None: else: event.set_results({"password": password}) + def _delete_user_action(self, event: ActionEvent) -> None: + """Delete a user""" + username = event.params["username"] + try: + self._run_cmd(['ceph', 'dashboard', 'ac-user-delete', username]) + event.set_results({"message": "User {} deleted".format(username)}) + except subprocess.CalledProcessError as exc: + event.fail(exc.output) + + if __name__ == "__main__": main(CephDashboardCharm) diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py index 77c47d64..1332ea13 100644 --- a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -630,3 +630,14 @@ def test__add_user_action(self, _choice, _NTFile): self.subprocess.check_output.assert_called_once_with( ['ceph', 'dashboard', 'ac-user-create', '--enabled', '-i', 'tempfilename', 'auser', 'administrator']) + + def test__delete_user_action(self): + self.subprocess.check_output.return_value = b'' + self.harness.begin() + action_event = MagicMock() + action_event.params = { + 'username': 'auser'} + self.harness.charm._delete_user_action(action_event) + self.subprocess.check_output.assert_called_once_with( + ['ceph', 'dashboard', 'ac-user-delete', 'auser'], + stderr=self.subprocess.STDOUT) From fcdee5c71a0ed774feb190bc2c8529c21c65987d Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 13 Sep 2021 10:04:54 +0000 Subject: [PATCH 2270/2699] Add copyright file Change-Id: Ic987f0775e441064251859680f9da01b6f78fe60 --- ceph-dashboard/copyright | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 ceph-dashboard/copyright diff --git a/ceph-dashboard/copyright b/ceph-dashboard/copyright new file mode 100644 index 00000000..d0b7f44f --- /dev/null +++ b/ceph-dashboard/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2015-2020, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. From c32bb5e4a9050f18e71900c23527485b66c293d9 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 20 Aug 2021 14:19:11 +0000 Subject: [PATCH 2271/2699] Support iscsi ceph-iscsi does not support wallaby yet so the hirsute bundle is unchanged. Change-Id: I9db6557b396e86180070cbbf29f14c7814fcd357 --- ceph-dashboard/metadata.yaml | 2 ++ ceph-dashboard/requirements.txt | 1 + ceph-dashboard/src/charm.py | 38 +++++++++++++++++++++++-- ceph-dashboard/tests/bundles/focal.yaml | 11 +++++++ 4 files changed, 50 insertions(+), 2 deletions(-) diff --git a/ceph-dashboard/metadata.yaml b/ceph-dashboard/metadata.yaml index d66e9fda..fcbb1ddb 100644 --- a/ceph-dashboard/metadata.yaml +++ b/ceph-dashboard/metadata.yaml @@ -31,6 +31,8 @@ requires: interface: http radosgw-dashboard: interface: radosgw-user + iscsi-dashboard: + interface: ceph-iscsi-admin-access provides: grafana-dashboard: interface: grafana-dashboard diff --git a/ceph-dashboard/requirements.txt b/ceph-dashboard/requirements.txt index f0aa163d..03eb941c 100644 --- a/ceph-dashboard/requirements.txt +++ b/ceph-dashboard/requirements.txt @@ -4,3 +4,4 @@ git+https://github.com/openstack/charms.ceph#egg=charms_ceph git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack #git+https://opendev.org/openstack/charm-ops-interface-tls-certificates#egg=interface_tls_certificates git+https://github.com/gnuoy/ops-interface-tls-certificates@no-exception-for-inflight-request#egg=interface_tls_certificates +git+https://github.com/openstack-charmers/ops-interface-ceph-iscsi-admin-access#egg=interface_ceph_iscsi_admin_access diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index d4597f3f..57fc1741 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -25,6 +25,7 @@ import subprocess import tenacity import ops_openstack.plugins.classes +import interface_ceph_iscsi_admin_access.admin_access as admin_access import interface_dashboard import interface_api_endpoints import interface_grafana_dashboard @@ -166,6 +167,9 @@ def __init__(self, *args) -> None: self, 'radosgw-dashboard', request_system_role=True) + self.iscsi_user = admin_access.CephISCSIAdminAccessRequires( + self, + 'iscsi-dashboard') self.framework.observe( self.mon.on.mon_ready, self._configure_dashboard) @@ -178,6 +182,9 @@ def __init__(self, *args) -> None: self.framework.observe( self.radosgw_user.on.gw_user_ready, self._configure_dashboard) + self.framework.observe( + self.iscsi_user.on.admin_access_ready, + self._configure_dashboard) self.framework.observe(self.on.add_user_action, self._add_user_action) self.ingress = interface_api_endpoints.APIEndpointsRequires( self, @@ -350,12 +357,16 @@ def _apply_setting(self, ceph_setting: str, value: List[str]) -> str: return self._run_cmd(cmd) def _apply_file_setting(self, ceph_setting: str, - file_contents: str) -> str: + file_contents: str, + extra_args: List[str] = None) -> None: """Apply a setting via a file""" with tempfile.NamedTemporaryFile(mode='w', delete=True) as _file: _file.write(file_contents) _file.flush() - return self._apply_setting(ceph_setting, ['-i', _file.name]) + settings = ['-i', _file.name] + if extra_args: + settings.extend(extra_args) + self._apply_setting(ceph_setting, settings) def _apply_ceph_config_from_charm_config(self) -> None: """Read charm config and apply settings to dashboard config""" @@ -410,6 +421,7 @@ def _configure_dashboard(self, _) -> None: prometheus_ep]) self._register_dashboards() self._manage_radosgw() + self._manage_iscsigw() self._stored.is_started = True self.update_status() @@ -450,6 +462,28 @@ def _get_tls_from_relation(self) -> TLS_Config: encoding=serialization.Encoding.PEM)) return key, cert, ca_cert + def _update_iscsigw_creds(self, creds): + self._apply_file_setting( + 'iscsi-gateway-add', + '{}://{}:{}@{}:{}'.format( + creds['scheme'], + creds['username'], + creds['password'], + creds['host'], + creds['port']), + [creds['name']]) + + def _manage_iscsigw(self) -> None: + """Register rados gateways in dashboard db""" + if self.unit.is_leader(): + creds = self.iscsi_user.get_user_creds() + if len(creds) < 1: + logging.info("No iscsi gateway creds found") + return + else: + for c in creds: + self._update_iscsigw_creds(c) + def _configure_tls(self) -> None: """Configure TLS.""" logging.debug("Attempting to collect TLS config from relation") diff --git a/ceph-dashboard/tests/bundles/focal.yaml b/ceph-dashboard/tests/bundles/focal.yaml index 215364d0..8a06e61f 100644 --- a/ceph-dashboard/tests/bundles/focal.yaml +++ b/ceph-dashboard/tests/bundles/focal.yaml @@ -53,6 +53,11 @@ applications: ceph-fs: charm: cs:~openstack-charmers-next/ceph-fs num_units: 1 + ceph-iscsi: + charm: cs:~openstack-charmers-next/ceph-iscsi + num_units: 2 + options: + gateway-metadata-pool: iscsi-foo-metadata relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' @@ -94,3 +99,9 @@ relations: - 'ceph-radosgw:radosgw-user' - - 'ceph-mon:mds' - 'ceph-fs:ceph-mds' + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-dashboard:iscsi-dashboard' + - 'ceph-iscsi:admin-access' From c70b68ad017fc5728ad014d74e445290605486be Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 13 Sep 2021 16:03:42 +0000 Subject: [PATCH 2272/2699] LB relation updates * Ensure vip passed back from lb is included in certificate request * Pull interface from its own repo Change-Id: Ib9fa865c115d54591483db6679dbeb645b3e353f --- ceph-dashboard/metadata.yaml | 2 +- ceph-dashboard/requirements.txt | 1 + ceph-dashboard/src/charm.py | 49 ++++-- ceph-dashboard/src/interface_api_endpoints.py | 64 ------- .../unit_tests/test_ceph_dashboard_charm.py | 38 +++++ .../test_interface_api_endpoints.py | 159 ------------------ 6 files changed, 77 insertions(+), 236 deletions(-) delete mode 100644 ceph-dashboard/src/interface_api_endpoints.py delete mode 100644 ceph-dashboard/unit_tests/test_interface_api_endpoints.py diff --git a/ceph-dashboard/metadata.yaml b/ceph-dashboard/metadata.yaml index fcbb1ddb..0011610f 100644 --- a/ceph-dashboard/metadata.yaml +++ b/ceph-dashboard/metadata.yaml @@ -24,7 +24,7 @@ requires: certificates: interface: tls-certificates loadbalancer: - interface: api-endpoints + interface: openstack-loadbalancer alertmanager-service: interface: http prometheus: diff --git a/ceph-dashboard/requirements.txt b/ceph-dashboard/requirements.txt index 03eb941c..3b6834cc 100644 --- a/ceph-dashboard/requirements.txt +++ b/ceph-dashboard/requirements.txt @@ -5,3 +5,4 @@ git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack #git+https://opendev.org/openstack/charm-ops-interface-tls-certificates#egg=interface_tls_certificates git+https://github.com/gnuoy/ops-interface-tls-certificates@no-exception-for-inflight-request#egg=interface_tls_certificates git+https://github.com/openstack-charmers/ops-interface-ceph-iscsi-admin-access#egg=interface_ceph_iscsi_admin_access +git+https://github.com/openstack-charmers/ops-interface-openstack-loadbalancer#egg=interface_openstack_loadbalancer diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index 409c0ce3..87bf3fc3 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -18,6 +18,7 @@ import base64 import interface_tls_certificates.ca_client as ca_client +import interface_openstack_loadbalancer.loadbalancer as ops_lb_interface import re import secrets import socket @@ -27,7 +28,6 @@ import ops_openstack.plugins.classes import interface_ceph_iscsi_admin_access.admin_access as admin_access import interface_dashboard -import interface_api_endpoints import interface_grafana_dashboard import interface_http import interface_radosgw_user @@ -57,6 +57,7 @@ class CephDashboardCharm(ops_openstack.core.OSBaseCharm): TLS_CHARM_CA_CERT_PATH = TLS_CA_CERT_DIR / 'charm_config_juju_ca_cert.crt' TLS_PORT = 8443 DASH_DIR = Path('src/dashboards') + LB_SERVICE_NAME = "ceph-dashboard" class CharmCephOption(): """Manage a charm option to ceph command to manage that option""" @@ -175,7 +176,7 @@ def __init__(self, *args) -> None: self._configure_dashboard) self.framework.observe( self.ca_client.on.ca_available, - self._on_ca_available) + self._configure_dashboard) self.framework.observe( self.ca_client.on.tls_server_config_ready, self._configure_dashboard) @@ -189,16 +190,9 @@ def __init__(self, *args) -> None: self.framework.observe( self.on.delete_user_action, self._delete_user_action) - self.ingress = interface_api_endpoints.APIEndpointsRequires( + self.ingress = ops_lb_interface.OSLoadbalancerRequires( self, - 'loadbalancer', - { - 'endpoints': [{ - 'service-type': 'ceph-dashboard', - 'frontend-port': self.TLS_PORT, - 'backend-port': self.TLS_PORT, - 'backend-ip': self._get_bind_ip(), - 'check-type': 'httpd'}]}) + 'loadbalancer') self.grafana_dashboard = \ interface_grafana_dashboard.GrafanaDashboardProvides( self, @@ -218,8 +212,23 @@ def __init__(self, *args) -> None: self.framework.observe( self.prometheus.on.http_ready, self._configure_dashboard) + self.framework.observe( + self.ingress.on.lb_relation_ready, + self._request_loadbalancer) + self.framework.observe( + self.ingress.on.lb_configured, + self._configure_dashboard) self._stored.set_default(is_started=False) + def _request_loadbalancer(self, _) -> None: + """Send request to create loadbalancer""" + self.ingress.request_loadbalancer( + self.LB_SERVICE_NAME, + self.TLS_PORT, + self.TLS_PORT, + self._get_bind_ip(), + 'httpd') + def _register_dashboards(self) -> None: """Register all dashboards with grafana""" for dash_file in self.DASH_DIR.glob("*.json"): @@ -273,9 +282,24 @@ def _manage_radosgw(self) -> None: creds[0]['access_key'], creds[0]['secret_key']) - def _on_ca_available(self, _) -> None: + def request_certificates(self) -> None: """Request TLS certificates.""" + if not self.ca_client.is_joined: + logging.debug( + "Cannot request certificates, relation not present.") + return addresses = set() + if self.ingress.relations: + lb_response = self.ingress.get_frontend_data() + if lb_response: + lb_config = lb_response[self.LB_SERVICE_NAME] + addresses.update( + [i for d in lb_config.values() for i in d['ip']]) + else: + logging.debug( + ("Defering certificate request until loadbalancer has " + "responded.")) + return for binding_name in ['public']: binding = self.model.get_binding(binding_name) addresses.add(binding.network.ingress_address) @@ -390,6 +414,7 @@ def _apply_ceph_config_from_charm_config(self) -> None: def _configure_dashboard(self, _) -> None: """Configure dashboard""" + self.request_certificates() if not self.mon.mons_ready: logging.info("Not configuring dashboard, mons not ready") return diff --git a/ceph-dashboard/src/interface_api_endpoints.py b/ceph-dashboard/src/interface_api_endpoints.py deleted file mode 100644 index 8908e441..00000000 --- a/ceph-dashboard/src/interface_api_endpoints.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python3 - -import json - -from ops.framework import ( - StoredState, - EventBase, - ObjectEvents, - EventSource, - Object) - - -class EndpointDataEvent(EventBase): - pass - - -class APIEndpointsEvents(ObjectEvents): - ep_ready = EventSource(EndpointDataEvent) - - -class APIEndpointsRequires(Object): - - on = APIEndpointsEvents() - _stored = StoredState() - - def __init__(self, charm, relation_name, config_dict): - super().__init__(charm, relation_name) - self.config_dict = config_dict - self.relation_name = relation_name - self.framework.observe( - charm.on[self.relation_name].relation_changed, - self._on_relation_changed) - - def _on_relation_changed(self, event): - """Handle the relation-changed event.""" - event.relation.data[self.model.unit]['endpoints'] = json.dumps( - self.config_dict['endpoints']) - - def update_config(self, config_dict): - """Allow for updates to relation.""" - self.config_dict = config_dict - relation = self.model.get_relation(self.relation_name) - if relation: - relation.data[self.model.unit]['endpoints'] = json.dumps( - self.config_dict['endpoints']) - - -class APIEndpointsProvides(Object): - - on = APIEndpointsEvents() - _stored = StoredState() - - def __init__(self, charm): - super().__init__(charm, "loadbalancer") - # Observe the relation-changed hook event and bind - # self.on_relation_changed() to handle the event. - self.framework.observe( - charm.on["loadbalancer"].relation_changed, - self._on_relation_changed) - self.charm = charm - - def _on_relation_changed(self, event): - """Handle a change to the loadbalancer relation.""" - self.on.ep_ready.emit() diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py index 1332ea13..b58825ff 100644 --- a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -15,6 +15,7 @@ # limitations under the License. import base64 +import json import unittest import sys @@ -385,6 +386,9 @@ def test_certificates_relation(self, _gethostname): _gethostname.return_value = 'server1' cert_rel_id = self.harness.add_relation('certificates', 'vault') dash_rel_id = self.harness.add_relation('dashboard', 'ceph-mon') + lb_rel_id = self.harness.add_relation( + 'loadbalancer', + 'openstack-loadbalancer') self.harness.begin() self.harness.set_leader() self.harness.charm.TLS_CERT_PATH = mock_TLS_CERT_PATH @@ -401,6 +405,40 @@ def test_certificates_relation(self, _gethostname): self.harness.add_relation_unit( cert_rel_id, 'vault/0') + self.harness.add_relation_unit( + lb_rel_id, + 'openstack-loadbalancer/0') + # If lb relation is present but has not responded then certs should + # not have been requested yet. + self.assertEqual( + self.harness.get_relation_data( + cert_rel_id, + 'ceph-dashboard/0'), + {}) + self.harness.update_relation_data( + lb_rel_id, + 'openstack-loadbalancer', + { + 'frontends': json.dumps( + { + 'ceph-dashboard': { + 'admin': { + 'ip': ['10.20.0.101'], + 'port': 8443, + 'protocol': 'http'}, + 'internal': { + 'ip': ['10.30.0.101'], + 'port': 8443, + 'protocol': 'http'}, + 'public': { + 'ip': ['10.10.0.101'], + 'port': 8443, + 'protocol': 'http'}}})}) + self.assertNotEqual( + self.harness.get_relation_data( + cert_rel_id, + 'ceph-dashboard/0'), + {}) self.harness.update_relation_data( cert_rel_id, 'vault/0', diff --git a/ceph-dashboard/unit_tests/test_interface_api_endpoints.py b/ceph-dashboard/unit_tests/test_interface_api_endpoints.py deleted file mode 100644 index 63e876bb..00000000 --- a/ceph-dashboard/unit_tests/test_interface_api_endpoints.py +++ /dev/null @@ -1,159 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2021 Canonical Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import json -import unittest -import sys -sys.path.append('lib') # noqa -sys.path.append('src') # noqa -from ops.testing import Harness -from ops.charm import CharmBase -import interface_api_endpoints - - -class TestAPIEndpointsRequires(unittest.TestCase): - - class MyCharm(CharmBase): - - def __init__(self, *args): - super().__init__(*args) - self.seen_events = [] - self.ingress = interface_api_endpoints.APIEndpointsRequires( - self, - 'loadbalancer', - { - 'endpoints': [{ - 'service-type': 'ceph-dashboard', - 'frontend-port': 8443, - 'backend-port': 8443, - 'backend-ip': '10.0.0.10', - 'check-type': 'httpd'}]}) - - def setUp(self): - super().setUp() - self.harness = Harness( - self.MyCharm, - meta=''' -name: my-charm -requires: - loadbalancer: - interface: api-endpoints -''' - ) - self.eps = [{ - 'service-type': 'ceph-dashboard', - 'frontend-port': 8443, - 'backend-port': 8443, - 'backend-ip': '10.0.0.10', - 'check-type': 'httpd'}] - - def add_loadbalancer_relation(self): - rel_id = self.harness.add_relation( - 'loadbalancer', - 'service-loadbalancer') - self.harness.add_relation_unit( - rel_id, - 'service-loadbalancer/0') - self.harness.update_relation_data( - rel_id, - 'service-loadbalancer/0', - {'ingress-address': '10.0.0.3'}) - return rel_id - - def test_init(self): - self.harness.begin() - self.assertEqual( - self.harness.charm.ingress.config_dict, - {'endpoints': self.eps}) - self.assertEqual( - self.harness.charm.ingress.relation_name, - 'loadbalancer') - - def test__on_relation_changed(self): - self.harness.begin() - rel_id = self.add_loadbalancer_relation() - rel_data = self.harness.get_relation_data( - rel_id, - 'my-charm/0') - self.assertEqual( - rel_data['endpoints'], - json.dumps(self.eps)) - - def test_update_config(self): - self.harness.begin() - rel_id = self.add_loadbalancer_relation() - new_eps = copy.deepcopy(self.eps) - new_eps.append({ - 'service-type': 'ceph-dashboard', - 'frontend-port': 9443, - 'backend-port': 9443, - 'backend-ip': '10.0.0.10', - 'check-type': 'https'}) - self.harness.charm.ingress.update_config( - {'endpoints': new_eps}) - rel_data = self.harness.get_relation_data( - rel_id, - 'my-charm/0') - self.assertEqual( - rel_data['endpoints'], - json.dumps(new_eps)) - - -class TestAPIEndpointsProvides(unittest.TestCase): - - class MyCharm(CharmBase): - - def __init__(self, *args): - super().__init__(*args) - self.seen_events = [] - self.api_eps = interface_api_endpoints.APIEndpointsProvides(self) - self.framework.observe( - self.api_eps.on.ep_ready, - self._log_event) - - def _log_event(self, event): - self.seen_events.append(type(event).__name__) - - def setUp(self): - super().setUp() - self.harness = Harness( - self.MyCharm, - meta=''' -name: my-charm -provides: - loadbalancer: - interface: api-endpoints -''' - ) - - def test_on_changed(self): - self.harness.begin() - # No MonReadyEvent as relation is absent - self.assertEqual( - self.harness.charm.seen_events, - []) - rel_id = self.harness.add_relation('loadbalancer', 'ceph-dashboard') - self.harness.add_relation_unit( - rel_id, - 'ceph-dashboard/0') - self.harness.update_relation_data( - rel_id, - 'ceph-dashboard/0', - {'ingress-address': '10.0.0.3'}) - self.assertEqual( - self.harness.charm.seen_events, - ['EndpointDataEvent']) From 28d248242848a25952d9509beef08d40f2cd93cf Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 9 Sep 2021 12:39:13 +0000 Subject: [PATCH 2273/2699] Update README to explain tls setup Change-Id: Icf9d398606ec5bfdbf2a11caf909a61b8515b863 --- ceph-dashboard/README.md | 122 +++++++++++++++++++++++++++++++-------- 1 file changed, 97 insertions(+), 25 deletions(-) diff --git a/ceph-dashboard/README.md b/ceph-dashboard/README.md index 27edc762..1288df01 100644 --- a/ceph-dashboard/README.md +++ b/ceph-dashboard/README.md @@ -1,15 +1,37 @@ # Overview -The ceph-dashboard configures the [Ceph Dashboard][ceph-dashboard-upstream]. -The charm is intended to be used in conjunction with the -[ceph-mon][ceph-mon-charm] charm. +The ceph-dashboard charm deploys the [Ceph Dashboard][upstream-ceph-dashboard], +a built-in web-based Ceph management and monitoring application. # Usage ## Configuration -See file `config.yaml` for the full list of options, along with their -descriptions and default values. +This section covers common and/or important configuration options. See file +`config.yaml` for the full list of options, along with their descriptions and +default values. See the [Juju documentation][juju-docs-config-apps] for details +on configuring applications. + +#### `grafana-api-url` + +Sets the url of the grafana api when using embedded graphs. See +[Embedded Grafana Dashboards](#Embedded-Grafana-Dashboards) + +#### `public-hostname` + +Sets the hostname or address of the public endpoint used to access +the dashboard. + +#### `enable-password-policy` + +Sets whether certain password restrictions are enforced when a user +is created or changes their password. + +#### `password-*` + +There are a number of `password-*` options which impose constraints +on which passwords can be used. These options are ignored unless +`enable-password-policy` is set to `True`. ## Deployment @@ -18,42 +40,64 @@ We are assuming a pre-existing Ceph cluster. Deploy the ceph-dashboard as a subordinate to the ceph-mon charm. juju deploy ceph-dashboard - juju relate ceph-dashboard ceph-mon + juju add-relation ceph-dashboard:dashboard ceph-mon:dashboard + + +TLS is a requirement for this charm. Enable it by adding a relation to the +vault application: + + juju add-relation ceph-dashboard:certificates vault:certificates + +See [Managing TLS certificates][cdg-tls] in the +[OpenStack Charms Deployment Guide][cdg] for more information on TLS. + +> **Note**: This charm also supports TLS configuration via charm options + `ssl_cert`, `ssl_key`, and `ssl_ca`. + ## Embedded Grafana Dashboards To enable the embedded grafana dashboards within the Ceph dashboard some additional relations are needed. - juju relate ceph-dashboard:grafana-dashboard grafana:dashboards - juju relate ceph-dashboard:prometheus prometheus:website - juju relate ceph-mon:prometheus prometheus:target - juju relate ceph-osd:juju-info telegraf:juju-info - juju relate ceph-mon:juju-info telegraf:juju-info + juju add-relation ceph-dashboard:grafana-dashboard grafana:dashboards + juju add-relation ceph-dashboard:prometheus prometheus:website + juju add-relation ceph-mon:prometheus prometheus:target + juju add-relation ceph-osd:juju-info telegraf:juju-info + juju add-relation ceph-mon:juju-info telegraf:juju-info Grafana, Telegraf and Prometheus should be related in the standard way - juju relate grafana:grafana-source prometheus:grafana-source - juju relate telegraf:prometheus-client prometheus:target - juju relate telegraf:dashboards grafana:dashboards + juju add-relation grafana:grafana-source prometheus:grafana-source + juju add-relation telegraf:prometheus-client prometheus:target + juju add-relation telegraf:dashboards grafana:dashboards -Grafana must be using https so either supply a certificates and key via -the ssl\_\* charm config options or add a vault relation. - juju deploy grafana:certificates vault:certificates +When Grafana is integrated with the Ceph Dashboard it requires TLS, so +add a relation to Vault (the grafana charm also supports TLS configuration via +ssl\_\* charm options): + + juju add-relation grafana:certificates vault:certificates Grafana should be set with the following charm options: juju config grafana anonymous=True juju config grafana allow_embedding=True + +The grafana charm also requires the vonage-status-panel and +grafana-piechart-panel plugins. The Grafana charm `install_plugins` +config option should be set to include URLs from which these plugins +can be downloaded. They are currently available from +https://storage.googleapis.com/plugins-community. For example: + juju config grafana install_plugins="https://storage.googleapis.com/plugins-community/vonage-status-panel/release/1.0.11/vonage-status-panel-1.0.11.zip,https://storage.googleapis.com/plugins-community/grafana-piechart-panel/release/1.6.2/grafana-piechart-panel-1.6.2.zip" Telegraf should be set with the following charm options: juju config telegraf hostname="{host}" -NOTE: That is "{host}" verbatim, nothing needs to be substituted. - +> **Note**: The above command is to be invoked verbatim; no substitution is +required. Currently the dashboard cannot autodect the api endpoint of the grafana service, so the end of the deployment run the following: @@ -64,12 +108,40 @@ service, so the end of the deployment run the following: To enable Prometheus alerting, add the following relations: - juju relate ceph-dashboard:prometheus prometheus:website - juju relate ceph-mon:prometheus prometheus:target - juju relate ceph-dashboard:alertmanager-service prometheus-alertmanager:alertmanager-service - juju relate prometheus:alertmanager-service prometheus-alertmanager:alertmanager-service + juju add-relation ceph-dashboard:prometheus prometheus:website + juju add-relation ceph-mon:prometheus prometheus:target + juju add-relation ceph-dashboard:alertmanager-service prometheus-alertmanager:alertmanager-service + juju add-relation prometheus:alertmanager-service prometheus-alertmanager:alertmanager-service + +## Actions + +This section lists Juju [actions][juju-docs-actions] supported by the charm. +Actions allow specific operations to be performed on a per-unit basis. To +display action descriptions run `juju actions --schema add-user`. If the charm +is not deployed then see file `actions.yaml`. + +* `add-user` +* `delete-user` + +# Documentation + +The OpenStack Charms project maintains two documentation guides: + +* [OpenStack Charm Guide][cg]: for project information, including development + and support notes +* [OpenStack Charms Deployment Guide][cdg]: for charm usage information + + +# Bugs + +Please report bugs on [Launchpad][lp-bugs-charm-ceph-dashboard]. -[ceph-dashboard]: https://docs.ceph.com/en/latest/mgr/dashboard/ -[ceph-mon-charm]: https://jaas.ai/ceph-mon +[juju-docs-actions]: https://juju.is/docs/working-with-actions +[juju-docs-config-apps]: https://juju.is/docs/configuring-applications +[upstream-ceph-dashboard]: https://docs.ceph.com/en/latest/mgr/dashboard/ +[cg]: https://docs.openstack.org/charm-guide +[cdg]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide +[cdg-tls]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-certificate-management.html +[lp-bugs-charm-ceph-dashboard]: https://bugs.launchpad.net/charm-ceph-dashboard From 846b21e8b17215b15fd3781e57461d6a3f7e49ec Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 21 Sep 2021 13:12:31 +0100 Subject: [PATCH 2274/2699] Add xena bundles - add non-voting focal-xena bundle - add non-voting impish-xena bundle - rebuild to pick up charm-helpers changes - update tox/pip.sh to ensure setuptools<50.0.0 Change-Id: I1a8c12dc00e05d5d01606f68eb151d86acf61815 --- ceph-fs/osci.yaml | 20 +- ceph-fs/pip.sh | 18 ++ ceph-fs/rebuild | 2 +- ceph-fs/src/tests/bundles/focal-xena.yaml | 222 +++++++++++++++++++++ ceph-fs/src/tests/bundles/impish-xena.yaml | 222 +++++++++++++++++++++ ceph-fs/src/tests/tests.yaml | 2 + ceph-fs/src/tox.ini | 4 +- ceph-fs/tox.ini | 17 +- 8 files changed, 501 insertions(+), 6 deletions(-) create mode 100755 ceph-fs/pip.sh create mode 100644 ceph-fs/src/tests/bundles/focal-xena.yaml create mode 100644 ceph-fs/src/tests/bundles/impish-xena.yaml diff --git a/ceph-fs/osci.yaml b/ceph-fs/osci.yaml index 534ed802..683aa103 100644 --- a/ceph-fs/osci.yaml +++ b/ceph-fs/osci.yaml @@ -1,17 +1,23 @@ - job: name: bionic-queens_local parent: bionic-queens - dependencies: + dependencies: - osci-lint - tox-py35 - tox-py36 - tox-py37 - tox-py38 - job: - name: hirsute-wallaby-bluestore + name: impish-xena-bluestore parent: func-target dependencies: &smoke-jobs - bionic-queens_local + vars: + tox_extra_args: bluestore-compression:impish-xena +- job: + name: hirsute-wallaby-bluestore + parent: func-target + dependencies: *smoke-jobs vars: tox_extra_args: bluestore-compression:hirsute-wallaby - job: @@ -30,6 +36,12 @@ dependencies: *smoke-jobs vars: tox_extra_args: bluestore-compression:focal-wallaby +- job: + name: focal-xena-bluestore + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: bluestore-compression:focal-xena - job: name: focal-victoria-bluestore parent: func-target @@ -73,8 +85,12 @@ # Xenial-pike is missing because of # https://bugs.launchpad.net/charm-nova-compute/+bug/1862624 - xenial-mitaka_local + - impish-xena-bluestore: + voting: false - hirsute-wallaby-bluestore - groovy-victoria-bluestore + - focal-xena-bluestore: + voting: false - focal-wallaby-bluestore - focal-victoria-bluestore - focal-ussuri-bluestore diff --git a/ceph-fs/pip.sh b/ceph-fs/pip.sh new file mode 100755 index 00000000..9a7e6b09 --- /dev/null +++ b/ceph-fs/pip.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# setuptools 58.0 dropped the support for use_2to3=true which is needed to +# install blessings (an indirect dependency of charm-tools). +# +# More details on the beahvior of tox and virtualenv creation can be found at +# https://github.com/tox-dev/tox/issues/448 +# +# This script is wrapper to force the use of the pinned versions early in the +# process when the virtualenv was created and upgraded before installing the +# depedencies declared in the target. +pip install 'pip<20.3' 'setuptools<50.0.0' +pip "$@" diff --git a/ceph-fs/rebuild b/ceph-fs/rebuild index 12717b9a..f7d381af 100644 --- a/ceph-fs/rebuild +++ b/ceph-fs/rebuild @@ -2,4 +2,4 @@ # when dependencies of the charm change, # but nothing in the charm needs to. # simply change the uuid to something new -f9c4c260-b3eb-11eb-b396-1786d65111bf +53a974a8-1178-11ec-a86e-07dd4090d760 diff --git a/ceph-fs/src/tests/bundles/focal-xena.yaml b/ceph-fs/src/tests/bundles/focal-xena.yaml new file mode 100644 index 00000000..08d5372d --- /dev/null +++ b/ceph-fs/src/tests/bundles/focal-xena.yaml @@ -0,0 +1,222 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-xena + +series: &series focal + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + '3': + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + neutron-api-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-fs: + charm: ceph-fs + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + network-manager: Neutron + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: *openstack-origin + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + manage-neutron-plugin-legacy-mode: true + neutron-plugin: ovs + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: *openstack-origin + + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex + openstack-origin: *openstack-origin + +relations: + + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'neutron-api:shared-db' + - 'neutron-api-mysql-router:shared-db' + - - 'neutron-api-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' + + - - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' + + - - 'neutron-api:identity-service' + - 'keystone:identity-service' + + - - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' + + - - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/impish-xena.yaml b/ceph-fs/src/tests/bundles/impish-xena.yaml new file mode 100644 index 00000000..ebce11e7 --- /dev/null +++ b/ceph-fs/src/tests/bundles/impish-xena.yaml @@ -0,0 +1,222 @@ +variables: + openstack-origin: &openstack-origin distro + +series: &series impish + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + '3': + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + neutron-api-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-fs: + charm: ceph-fs + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + network-manager: Neutron + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: *openstack-origin + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + manage-neutron-plugin-legacy-mode: true + neutron-plugin: ovs + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: *openstack-origin + + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex + openstack-origin: *openstack-origin + +relations: + + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'neutron-api:shared-db' + - 'neutron-api-mysql-router:shared-db' + - - 'neutron-api-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' + + - - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' + + - - 'neutron-api:identity-service' + - 'keystone:identity-service' + + - - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' + + - - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index 809f9126..e38bbe87 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -11,6 +11,7 @@ gate_bundles: - bionic-queens - xenial-mitaka dev_bundles: + - bluestore-compression: impish-xena - bluestore-compression: hirsute-wallaby - xenial-ocata # Xenial-pike is missing because of @@ -40,3 +41,4 @@ tests: tests_options: force_deploy: - hirsute-wallaby + - impish-xena diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index e7630475..b40d2952 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -22,12 +22,12 @@ skip_missing_interpreters = False requires = pip < 20.3 virtualenv < 20.0 # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci -minversion = 3.2.0 +minversion = 3.18.0 [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 -whitelist_externals = juju +allowlist_externals = juju passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt install_command = diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 5c818017..22159df2 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -11,6 +11,21 @@ envlist = pep8,py3 sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +requires = + pip < 20.3 + virtualenv < 20.0 + setuptools<50.0.0 + +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.18.0 [testenv] setenv = VIRTUAL_ENV={envdir} @@ -21,7 +36,7 @@ setenv = VIRTUAL_ENV={envdir} JUJU_REPOSITORY={toxinidir}/build passenv = http_proxy https_proxy INTERFACE_PATH LAYER_PATH JUJU_REPOSITORY install_command = - pip install {opts} {packages} + {toxinidir}/pip.sh install {opts} {packages} deps = -r{toxinidir}/requirements.txt From 3753bbfd063f7d1f726c64b0b9805bea67e4080a Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 21 Sep 2021 14:11:16 +0100 Subject: [PATCH 2275/2699] Add xena bundles - add non-voting focal-xena bundle - add non-voting impish-xena bundle - charm-helpers sync for new charm-helpers changes - update tox/pip.sh to ensure setuptools<50.0.0 Change-Id: If511b7fee8cf676b6ba7017aa60fe916ac9a26d9 --- .../contrib/openstack/amulet/__init__.py | 13 - .../contrib/openstack/amulet/deployment.py | 387 ---- .../contrib/openstack/amulet/utils.py | 1595 ----------------- .../charmhelpers/contrib/openstack/context.py | 70 +- .../charmhelpers/contrib/openstack/policyd.py | 2 +- .../charmhelpers/contrib/openstack/utils.py | 53 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-mon/hooks/charmhelpers/core/strutils.py | 9 +- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 2 + .../charmhelpers/fetch/python/packages.py | 6 +- ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 152 +- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 2 +- ceph-mon/hooks/charmhelpers/osplatform.py | 3 + ceph-mon/lib/charms_ceph/crush_utils.py | 6 +- ceph-mon/lib/charms_ceph/utils.py | 247 ++- ceph-mon/osci.yaml | 4 + ceph-mon/pip.sh | 18 + ceph-mon/tests/bundles/focal-xena.yaml | 235 +++ ceph-mon/tests/bundles/impish-xena.yaml | 237 +++ ceph-mon/tests/tests.yaml | 5 +- ceph-mon/tox.ini | 9 +- 21 files changed, 944 insertions(+), 2112 deletions(-) delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py create mode 100755 ceph-mon/pip.sh create mode 100644 ceph-mon/tests/bundles/focal-xena.yaml create mode 100644 ceph-mon/tests/bundles/impish-xena.yaml diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py deleted file mode 100644 index 94ca079c..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ /dev/null @@ -1,387 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import re -import sys -import six -from collections import OrderedDict -from charmhelpers.contrib.amulet.deployment import ( - AmuletDeployment -) -from charmhelpers.contrib.openstack.amulet.utils import ( - OPENSTACK_RELEASES_PAIRS -) - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - - -class OpenStackAmuletDeployment(AmuletDeployment): - """OpenStack amulet deployment. - - This class inherits from AmuletDeployment and has additional support - that is specifically for use by OpenStack charms. - """ - - def __init__(self, series=None, openstack=None, source=None, - stable=True, log_level=DEBUG): - """Initialize the deployment environment.""" - super(OpenStackAmuletDeployment, self).__init__(series) - self.log = self.get_logger(level=log_level) - self.log.info('OpenStackAmuletDeployment: init') - self.openstack = openstack - self.source = source - self.stable = stable - - def get_logger(self, name="deployment-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def _determine_branch_locations(self, other_services): - """Determine the branch locations for the other services. - - Determine if the local branch being tested is derived from its - stable or next (dev) branch, and based on this, use the corresonding - stable or next branches for the other_services.""" - - self.log.info('OpenStackAmuletDeployment: determine branch locations') - - # Charms outside the ~openstack-charmers - base_charms = { - 'mysql': ['trusty'], - 'mongodb': ['trusty'], - 'nrpe': ['trusty', 'xenial'], - } - - for svc in other_services: - # If a location has been explicitly set, use it - if svc.get('location'): - continue - if svc['name'] in base_charms: - # NOTE: not all charms have support for all series we - # want/need to test against, so fix to most recent - # that each base charm supports - target_series = self.series - if self.series not in base_charms[svc['name']]: - target_series = base_charms[svc['name']][-1] - svc['location'] = 'cs:{}/{}'.format(target_series, - svc['name']) - elif self.stable: - svc['location'] = 'cs:{}/{}'.format(self.series, - svc['name']) - else: - svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( - self.series, - svc['name'] - ) - - return other_services - - def _add_services(self, this_service, other_services, use_source=None, - no_origin=None): - """Add services to the deployment and optionally set - openstack-origin/source. - - :param this_service dict: Service dictionary describing the service - whose amulet tests are being run - :param other_services dict: List of service dictionaries describing - the services needed to support the target - service - :param use_source list: List of services which use the 'source' config - option rather than 'openstack-origin' - :param no_origin list: List of services which do not support setting - the Cloud Archive. - Service Dict: - { - 'name': str charm-name, - 'units': int number of units, - 'constraints': dict of juju constraints, - 'location': str location of charm, - } - eg - this_service = { - 'name': 'openvswitch-odl', - 'constraints': {'mem': '8G'}, - } - other_services = [ - { - 'name': 'nova-compute', - 'units': 2, - 'constraints': {'mem': '4G'}, - 'location': cs:~bob/xenial/nova-compute - }, - { - 'name': 'mysql', - 'constraints': {'mem': '2G'}, - }, - {'neutron-api-odl'}] - use_source = ['mysql'] - no_origin = ['neutron-api-odl'] - """ - self.log.info('OpenStackAmuletDeployment: adding services') - - other_services = self._determine_branch_locations(other_services) - - super(OpenStackAmuletDeployment, self)._add_services(this_service, - other_services) - - services = other_services - services.append(this_service) - - use_source = use_source or [] - no_origin = no_origin or [] - - # Charms which should use the source config option - use_source = list(set( - use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy', 'percona-cluster', 'lxd'])) - - # Charms which can not use openstack-origin, ie. many subordinates - no_origin = list(set( - no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', - 'nrpe', 'openvswitch-odl', 'neutron-api-odl', - 'odl-controller', 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt', - 'ceilometer-agent'])) - - if self.openstack: - for svc in services: - if svc['name'] not in use_source + no_origin: - config = {'openstack-origin': self.openstack} - self.d.configure(svc['name'], config) - - if self.source: - for svc in services: - if svc['name'] in use_source and svc['name'] not in no_origin: - config = {'source': self.source} - self.d.configure(svc['name'], config) - - def _configure_services(self, configs): - """Configure all of the services.""" - self.log.info('OpenStackAmuletDeployment: configure services') - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=None): - """Wait for all units to have a specific extended status, except - for any defined as excluded. Unless specified via message, any - status containing any case of 'ready' will be considered a match. - - Examples of message usage: - - Wait for all unit status to CONTAIN any case of 'ready' or 'ok': - message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) - - Wait for all units to reach this status (exact match): - message = re.compile('^Unit is ready and clustered$') - - Wait for all units to reach any one of these (exact match): - message = re.compile('Unit is ready|OK|Ready') - - Wait for at least one unit to reach this status (exact match): - message = {'ready'} - - See Amulet's sentry.wait_for_messages() for message usage detail. - https://github.com/juju/amulet/blob/master/amulet/sentry.py - - :param message: Expected status match - :param exclude_services: List of juju service names to ignore, - not to be used in conjuction with include_only. - :param include_only: List of juju service names to exclusively check, - not to be used in conjuction with exclude_services. - :param timeout: Maximum time in seconds to wait for status match - :returns: None. Raises if timeout is hit. - """ - if not timeout: - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) - self.log.info('Waiting for extended status on units for {}s...' - ''.format(timeout)) - - all_services = self.d.services.keys() - - if exclude_services and include_only: - raise ValueError('exclude_services can not be used ' - 'with include_only') - - if message: - if isinstance(message, re._pattern_type): - match = message.pattern - else: - match = message - - self.log.debug('Custom extended status wait match: ' - '{}'.format(match)) - else: - self.log.debug('Default extended status wait match: contains ' - 'READY (case-insensitive)') - message = re.compile('.*ready.*', re.IGNORECASE) - - if exclude_services: - self.log.debug('Excluding services from extended status match: ' - '{}'.format(exclude_services)) - else: - exclude_services = [] - - if include_only: - services = include_only - else: - services = list(set(all_services) - set(exclude_services)) - - self.log.debug('Waiting up to {}s for extended status on services: ' - '{}'.format(timeout, services)) - service_messages = {service: message for service in services} - - # Check for idleness - self.d.sentry.wait(timeout=timeout) - # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) - # Check for ready messages - self.d.sentry.wait_for_messages(service_messages, timeout=timeout) - - self.log.info('OK') - - def _get_openstack_release(self): - """Get openstack release. - - Return an integer representing the enum value of the openstack - release. - """ - # Must be ordered by OpenStack release (not by Ubuntu release): - for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): - setattr(self, os_pair, i) - - releases = { - ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, - ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, - ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('xenial', None): self.xenial_mitaka, - ('xenial', 'cloud:xenial-newton'): self.xenial_newton, - ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, - ('xenial', 'cloud:xenial-pike'): self.xenial_pike, - ('xenial', 'cloud:xenial-queens'): self.xenial_queens, - ('yakkety', None): self.yakkety_newton, - ('zesty', None): self.zesty_ocata, - ('artful', None): self.artful_pike, - ('bionic', None): self.bionic_queens, - ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, - ('bionic', 'cloud:bionic-stein'): self.bionic_stein, - ('bionic', 'cloud:bionic-train'): self.bionic_train, - ('bionic', 'cloud:bionic-ussuri'): self.bionic_ussuri, - ('cosmic', None): self.cosmic_rocky, - ('disco', None): self.disco_stein, - ('eoan', None): self.eoan_train, - ('focal', None): self.focal_ussuri, - ('focal', 'cloud:focal-victoria'): self.focal_victoria, - ('groovy', None): self.groovy_victoria, - } - return releases[(self.series, self.openstack)] - - def _get_openstack_release_string(self): - """Get openstack release string. - - Return a string representing the openstack release. - """ - releases = OrderedDict([ - ('trusty', 'icehouse'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ('disco', 'stein'), - ('eoan', 'train'), - ('focal', 'ussuri'), - ('groovy', 'victoria'), - ]) - if self.openstack: - os_origin = self.openstack.split(':')[1] - return os_origin.split('%s-' % self.series)[1].split('/')[0] - else: - return releases[self.series] - - def get_percona_service_entry(self, memory_constraint=None): - """Return a amulet service entry for percona cluster. - - :param memory_constraint: Override the default memory constraint - in the service entry. - :type memory_constraint: str - :returns: Amulet service entry. - :rtype: dict - """ - memory_constraint = memory_constraint or '3072M' - svc_entry = { - 'name': 'percona-cluster', - 'constraints': {'mem': memory_constraint}} - if self._get_openstack_release() <= self.trusty_mitaka: - svc_entry['location'] = 'cs:trusty/percona-cluster' - return svc_entry - - def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools in a ceph + cinder + glance - test scenario, based on OpenStack release and whether ceph radosgw - is flagged as present or not.""" - - if self._get_openstack_release() == self.trusty_icehouse: - # Icehouse - pools = [ - 'data', - 'metadata', - 'rbd', - 'cinder-ceph', - 'glance' - ] - elif (self.trusty_kilo <= self._get_openstack_release() <= - self.zesty_ocata): - # Kilo through Ocata - pools = [ - 'rbd', - 'cinder-ceph', - 'glance' - ] - else: - # Pike and later - pools = [ - 'cinder-ceph', - 'glance' - ] - - if radosgw: - pools.extend([ - '.rgw.root', - '.rgw.control', - '.rgw', - '.rgw.gc', - '.users.uid' - ]) - - return pools diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py deleted file mode 100644 index 0a14af7e..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ /dev/null @@ -1,1595 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import json -import logging -import os -import re -import six -import time -import urllib -import urlparse - -import cinderclient.v1.client as cinder_client -import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1 as glance_client -import glanceclient.v2 as glance_clientv2 -import heatclient.v1.client as heat_client -from keystoneclient.v2_0 import client as keystone_client -from keystoneauth1.identity import ( - v3, - v2, -) -from keystoneauth1 import session as keystone_session -from keystoneclient.v3 import client as keystone_client_v3 -from novaclient import exceptions - -import novaclient.client as nova_client -import novaclient -import pika -import swiftclient - -from charmhelpers.core.decorators import retry_on_exception - -from charmhelpers.contrib.amulet.utils import ( - AmuletUtils -) -from charmhelpers.core.host import CompareHostReleases - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - -NOVA_CLIENT_VERSION = "2" - -OPENSTACK_RELEASES_PAIRS = [ - 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', - 'trusty_mitaka', 'xenial_mitaka', - 'xenial_newton', 'yakkety_newton', - 'xenial_ocata', 'zesty_ocata', - 'xenial_pike', 'artful_pike', - 'xenial_queens', 'bionic_queens', - 'bionic_rocky', 'cosmic_rocky', - 'bionic_stein', 'disco_stein', - 'bionic_train', 'eoan_train', - 'bionic_ussuri', 'focal_ussuri', - 'focal_victoria', 'groovy_victoria', -] - - -class OpenStackAmuletUtils(AmuletUtils): - """OpenStack amulet utilities. - - This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charm tests. - """ - - def __init__(self, log_level=ERROR): - """Initialize the deployment environment.""" - super(OpenStackAmuletUtils, self).__init__(log_level) - - def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, openstack_release=None): - """Validate endpoint data. Pick the correct validator based on - OpenStack release. Expected data should be in the v2 format: - { - 'id': id, - 'region': region, - 'adminurl': adminurl, - 'internalurl': internalurl, - 'publicurl': publicurl, - 'service_id': service_id} - - """ - validation_function = self.validate_v2_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_endpoint_data - expected = { - 'id': expected['id'], - 'region': expected['region'], - 'region_id': 'RegionOne', - 'url': self.valid_url, - 'interface': self.not_null, - 'service_id': expected['service_id']} - return validation_function(endpoints, admin_port, internal_port, - public_port, expected) - - def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): - """Validate endpoint data. - - Validate actual endpoint data vs expected endpoint data. The ports - are used to find the matching endpoint. - """ - self.log.debug('Validating endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = False - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if (admin_port in ep.adminurl and - internal_port in ep.internalurl and - public_port in ep.publicurl): - found = True - actual = {'id': ep.id, - 'region': ep.region, - 'adminurl': ep.adminurl, - 'internalurl': ep.internalurl, - 'publicurl': ep.publicurl, - 'service_id': ep.service_id} - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if not found: - return 'endpoint not found' - - def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, expected_num_eps=3): - """Validate keystone v3 endpoint data. - - Validate the v3 endpoint data which has changed from v2. The - ports are used to find the matching endpoint. - - The new v3 endpoint data looks like: - - ['}, - region=RegionOne, - region_id=RegionOne, - service_id=17f842a0dc084b928e476fafe67e4095, - url=http://10.5.6.5:9312>, - '}, - region=RegionOne, - region_id=RegionOne, - service_id=72fc8736fb41435e8b3584205bb2cfa3, - url=http://10.5.6.6:35357/v3>, - ... ] - """ - self.log.debug('Validating v3 endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = [] - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if ((admin_port in ep.url and ep.interface == 'admin') or - (internal_port in ep.url and ep.interface == 'internal') or - (public_port in ep.url and ep.interface == 'public')): - found.append(ep.interface) - # note we ignore the links member. - actual = {'id': ep.id, - 'region': ep.region, - 'region_id': ep.region_id, - 'interface': self.not_null, - 'url': ep.url, - 'service_id': ep.service_id, } - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if len(found) != expected_num_eps: - return 'Unexpected number of endpoints found' - - def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): - """Convert v2 endpoint data into v3. - - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - """ - self.log.warn("Endpoint ID and Region ID validation is limited to not " - "null checks after v2 to v3 conversion") - for svc in ep_data.keys(): - assert len(ep_data[svc]) == 1, "Unknown data format" - svc_ep_data = ep_data[svc][0] - ep_data[svc] = [ - { - 'url': svc_ep_data['adminURL'], - 'interface': 'admin', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['publicURL'], - 'interface': 'public', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['internalURL'], - 'interface': 'internal', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}] - return ep_data - - def validate_svc_catalog_endpoint_data(self, expected, actual, - openstack_release=None): - """Validate service catalog endpoint data. Pick the correct validator - for the OpenStack version. Expected data should be in the v2 format: - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - - """ - validation_function = self.validate_v2_svc_catalog_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_svc_catalog_endpoint_data - expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) - return validation_function(expected, actual) - - def validate_v2_svc_catalog_endpoint_data(self, expected, actual): - """Validate service catalog endpoint data. - - Validate a list of actual service catalog endpoints vs a list of - expected service catalog endpoints. - """ - self.log.debug('Validating service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - ret = self._validate_dict_data(expected[k][0], actual[k][0]) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_v3_svc_catalog_endpoint_data(self, expected, actual): - """Validate the keystone v3 catalog endpoint data. - - Validate a list of dictinaries that make up the keystone v3 service - catalogue. - - It is in the form of: - - - {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:35357/v3'}, - {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}, - {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}], - u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'f629388955bc407f8b11d8b7ca168086', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9312'}]} - - Note, that an added complication is that the order of admin, public, - internal against 'interface' in each region. - - Thus, the function sorts the expected and actual lists using the - interface key as a sort key, prior to the comparison. - """ - self.log.debug('Validating v3 service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - l_expected = sorted(v, key=lambda x: x['interface']) - l_actual = sorted(actual[k], key=lambda x: x['interface']) - if len(l_actual) != len(l_expected): - return ("endpoint {} has differing number of interfaces " - " - expected({}), actual({})" - .format(k, len(l_expected), len(l_actual))) - for i_expected, i_actual in zip(l_expected, l_actual): - self.log.debug("checking interface {}" - .format(i_expected['interface'])) - ret = self._validate_dict_data(i_expected, i_actual) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_tenant_data(self, expected, actual): - """Validate tenant data. - - Validate a list of actual tenant data vs list of expected tenant - data. - """ - self.log.debug('Validating tenant data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'enabled': act.enabled, 'description': act.description, - 'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected tenant data - {}".format(ret) - if not found: - return "tenant {} does not exist".format(e['name']) - return ret - - def validate_role_data(self, expected, actual): - """Validate role data. - - Validate a list of actual role data vs a list of expected role - data. - """ - self.log.debug('Validating role data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected role data - {}".format(ret) - if not found: - return "role {} does not exist".format(e['name']) - return ret - - def validate_user_data(self, expected, actual, api_version=None): - """Validate user data. - - Validate a list of actual user data vs a list of expected user - data. - """ - self.log.debug('Validating user data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - if e['name'] == act.name: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'id': act.id} - if api_version == 3: - a['default_project_id'] = getattr(act, - 'default_project_id', - 'none') - else: - a['tenantId'] = act.tenantId - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected user data - {}".format(ret) - if not found: - return "user {} does not exist".format(e['name']) - return ret - - def validate_flavor_data(self, expected, actual): - """Validate flavor data. - - Validate a list of actual flavors vs a list of expected flavors. - """ - self.log.debug('Validating flavor data...') - self.log.debug('actual: {}'.format(repr(actual))) - act = [a.name for a in actual] - return self._validate_list_data(expected, act) - - def tenant_exists(self, keystone, tenant): - """Return True if tenant exists.""" - self.log.debug('Checking if tenant exists ({})...'.format(tenant)) - return tenant in [t.name for t in keystone.tenants.list()] - - @retry_on_exception(num_retries=5, base_delay=1) - def keystone_wait_for_propagation(self, sentry_relation_pairs, - api_version): - """Iterate over list of sentry and relation tuples and verify that - api_version has the expected value. - - :param sentry_relation_pairs: list of sentry, relation name tuples used - for monitoring propagation of relation - data - :param api_version: api_version to expect in relation data - :returns: None if successful. Raise on error. - """ - for (sentry, relation_name) in sentry_relation_pairs: - rel = sentry.relation('identity-service', - relation_name) - self.log.debug('keystone relation data: {}'.format(rel)) - if rel.get('api_version') != str(api_version): - raise Exception("api_version not propagated through relation" - " data yet ('{}' != '{}')." - "".format(rel.get('api_version'), api_version)) - - def keystone_configure_api_version(self, sentry_relation_pairs, deployment, - api_version): - """Configure preferred-api-version of keystone in deployment and - monitor provided list of relation objects for propagation - before returning to caller. - - :param sentry_relation_pairs: list of sentry, relation tuples used for - monitoring propagation of relation data - :param deployment: deployment to configure - :param api_version: value preferred-api-version will be set to - :returns: None if successful. Raise on error. - """ - self.log.debug("Setting keystone preferred-api-version: '{}'" - "".format(api_version)) - - config = {'preferred-api-version': api_version} - deployment.d.configure('keystone', config) - deployment._auto_wait_for_status() - self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - - def authenticate_cinder_admin(self, keystone, api_version=2): - """Authenticates admin user with cinder.""" - self.log.debug('Authenticating cinder admin...') - _clients = { - 1: cinder_client.Client, - 2: cinder_clientv2.Client} - return _clients[api_version](session=keystone.session) - - def authenticate_keystone(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Authenticate with Keystone""" - self.log.debug('Authenticating with keystone...') - if not api_version: - api_version = 2 - sess, auth = self.get_keystone_session( - keystone_ip=keystone_ip, - username=username, - password=password, - api_version=api_version, - admin_port=admin_port, - user_domain_name=user_domain_name, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name - ) - if api_version == 2: - client = keystone_client.Client(session=sess) - else: - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client - - def get_keystone_session(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Return a keystone session object""" - ep = self.get_keystone_endpoint(keystone_ip, - api_version=api_version, - admin_port=admin_port) - if api_version == 2: - auth = v2.Password( - username=username, - password=password, - tenant_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - else: - auth = v3.Password( - user_domain_name=user_domain_name, - username=username, - password=password, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - return (sess, auth) - - def get_keystone_endpoint(self, keystone_ip, api_version=None, - admin_port=False): - """Return keystone endpoint""" - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if api_version == 2: - ep = base_ep + "/v2.0" - else: - ep = base_ep + "/v3" - return ep - - def get_default_keystone_session(self, keystone_sentry, - openstack_release=None, api_version=2): - """Return a keystone session object and client object assuming standard - default settings - - Example call in amulet tests: - self.keystone_session, self.keystone = u.get_default_keystone_session( - self.keystone_sentry, - openstack_release=self._get_openstack_release()) - - The session can then be used to auth other clients: - neutronclient.Client(session=session) - aodh_client.Client(session=session) - eyc - """ - self.log.debug('Authenticating keystone admin...') - # 11 => xenial_queens - if api_version == 3 or (openstack_release and openstack_release >= 11): - client_class = keystone_client_v3.Client - api_version = 3 - else: - client_class = keystone_client.Client - keystone_ip = keystone_sentry.info['public-address'] - session, auth = self.get_keystone_session( - keystone_ip, - api_version=api_version, - username='admin', - password='openstack', - project_name='admin', - user_domain_name='admin_domain', - project_domain_name='admin_domain') - client = client_class(session=session) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(session) - return session, client - - def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant=None, api_version=None, - keystone_ip=None, user_domain_name=None, - project_domain_name=None, - project_name=None): - """Authenticates admin user with the keystone admin endpoint.""" - self.log.debug('Authenticating keystone admin...') - if not keystone_ip: - keystone_ip = keystone_sentry.info['public-address'] - - # To support backward compatibility usage of this function - if not project_name: - project_name = tenant - if api_version == 3 and not user_domain_name: - user_domain_name = 'admin_domain' - if api_version == 3 and not project_domain_name: - project_domain_name = 'admin_domain' - if api_version == 3 and not project_name: - project_name = 'admin' - - return self.authenticate_keystone( - keystone_ip, user, password, - api_version=api_version, - user_domain_name=user_domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - admin_port=True) - - def authenticate_keystone_user(self, keystone, user, password, tenant): - """Authenticates a regular user with the keystone public endpoint.""" - self.log.debug('Authenticating keystone user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - keystone_ip = urlparse.urlparse(ep).hostname - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant) - - def authenticate_glance_admin(self, keystone, force_v1_client=False): - """Authenticates admin user with glance.""" - self.log.debug('Authenticating glance admin...') - ep = keystone.service_catalog.url_for(service_type='image', - interface='adminURL') - if not force_v1_client and keystone.session: - return glance_clientv2.Client("2", session=keystone.session) - else: - return glance_client.Client(ep, token=keystone.auth_token) - - def authenticate_heat_admin(self, keystone): - """Authenticates the admin user with heat.""" - self.log.debug('Authenticating heat admin...') - ep = keystone.service_catalog.url_for(service_type='orchestration', - interface='publicURL') - if keystone.session: - return heat_client.Client(endpoint=ep, session=keystone.session) - else: - return heat_client.Client(endpoint=ep, token=keystone.auth_token) - - def authenticate_nova_user(self, keystone, user, password, tenant): - """Authenticates a regular user with nova-api.""" - self.log.debug('Authenticating nova user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return nova_client.Client(NOVA_CLIENT_VERSION, - session=keystone.session, - auth_url=ep) - elif novaclient.__version__[0] >= "7": - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, password=password, - project_name=tenant, auth_url=ep) - else: - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) - - def authenticate_swift_user(self, keystone, user, password, tenant): - """Authenticates a regular user with swift api.""" - self.log.debug('Authenticating swift user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return swiftclient.Connection(session=keystone.session) - else: - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') - - def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create the specified flavor.""" - try: - nova.flavors.find(name=name) - except (exceptions.NotFound, exceptions.NoUniqueMatch): - self.log.debug('Creating flavor ({})'.format(name)) - nova.flavors.create(name, ram, vcpus, disk, flavorid, - ephemeral, swap, rxtx_factor, is_public) - - def glance_create_image(self, glance, image_name, image_url, - download_dir='tests', - hypervisor_type=None, - disk_format='qcow2', - architecture='x86_64', - container_format='bare'): - """Download an image and upload it to glance, validate its status - and return an image object pointer. KVM defaults, can override for - LXD. - - :param glance: pointer to authenticated glance api connection - :param image_name: display name for new image - :param image_url: url to retrieve - :param download_dir: directory to store downloaded image file - :param hypervisor_type: glance image hypervisor property - :param disk_format: glance image disk format - :param architecture: glance image architecture property - :param container_format: glance image container format - :returns: glance image pointer - """ - self.log.debug('Creating glance image ({}) from ' - '{}...'.format(image_name, image_url)) - - # Download image - http_proxy = os.getenv('OS_TEST_HTTP_PROXY') - self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - abs_file_name = os.path.join(download_dir, image_name) - if not os.path.exists(abs_file_name): - opener.retrieve(image_url, abs_file_name) - - # Create glance image - glance_properties = { - 'architecture': architecture, - } - if hypervisor_type: - glance_properties['hypervisor_type'] = hypervisor_type - # Create glance image - if float(glance.version) < 2.0: - with open(abs_file_name) as f: - image = glance.images.create( - name=image_name, - is_public=True, - disk_format=disk_format, - container_format=container_format, - properties=glance_properties, - data=f) - else: - image = glance.images.create( - name=image_name, - visibility="public", - disk_format=disk_format, - container_format=container_format) - glance.images.upload(image.id, open(abs_file_name, 'rb')) - glance.images.update(image.id, **glance_properties) - - # Wait for image to reach active status - img_id = image.id - ret = self.resource_reaches_status(glance.images, img_id, - expected_stat='active', - msg='Image status wait') - if not ret: - msg = 'Glance image failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new image - self.log.debug('Validating image attributes...') - val_img_name = glance.images.get(img_id).name - val_img_stat = glance.images.get(img_id).status - val_img_cfmt = glance.images.get(img_id).container_format - val_img_dfmt = glance.images.get(img_id).disk_format - - if float(glance.version) < 2.0: - val_img_pub = glance.images.get(img_id).is_public - else: - val_img_pub = glance.images.get(img_id).visibility == "public" - - msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' - 'container fmt:{} disk fmt:{}'.format( - val_img_name, val_img_pub, img_id, - val_img_stat, val_img_cfmt, val_img_dfmt)) - - if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == container_format \ - and val_img_dfmt == disk_format: - self.log.debug(msg_attr) - else: - msg = ('Image validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return image - - def create_cirros_image(self, glance, image_name, hypervisor_type=None): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection - :param image_name: display name for new image - :param hypervisor_type: glance image hypervisor property - :returns: glance image pointer - """ - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'glance_create_image instead of ' - 'create_cirros_image.') - - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) - - # Get cirros image URL - http_proxy = os.getenv('OS_TEST_HTTP_PROXY') - self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - f.close() - - return self.glance_create_image( - glance, - image_name, - cirros_url, - hypervisor_type=hypervisor_type) - - def delete_image(self, glance, image): - """Delete the specified image.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_image.') - self.log.debug('Deleting glance image ({})...'.format(image)) - return self.delete_resource(glance.images, image, msg='glance image') - - def create_instance(self, nova, image_name, instance_name, flavor): - """Create the specified instance.""" - self.log.debug('Creating instance ' - '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.glance.find_image(image_name) - flavor = nova.flavors.find(name=flavor) - instance = nova.servers.create(name=instance_name, image=image, - flavor=flavor) - - count = 1 - status = instance.status - while status != 'ACTIVE' and count < 60: - time.sleep(3) - instance = nova.servers.get(instance.id) - status = instance.status - self.log.debug('instance status: {}'.format(status)) - count += 1 - - if status != 'ACTIVE': - self.log.error('instance creation timed out') - return None - - return instance - - def delete_instance(self, nova, instance): - """Delete the specified instance.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_instance.') - self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, - msg='nova instance') - - def create_or_get_keypair(self, nova, keypair_name="testkey"): - """Create a new keypair, or return pointer if it already exists.""" - try: - _keypair = nova.keypairs.get(keypair_name) - self.log.debug('Keypair ({}) already exists, ' - 'using it.'.format(keypair_name)) - return _keypair - except Exception: - self.log.debug('Keypair ({}) does not exist, ' - 'creating it.'.format(keypair_name)) - - _keypair = nova.keypairs.create(name=keypair_name) - return _keypair - - def _get_cinder_obj_name(self, cinder_object): - """Retrieve name of cinder object. - - :param cinder_object: cinder snapshot or volume object - :returns: str cinder object name - """ - # v1 objects store name in 'display_name' attr but v2+ use 'name' - try: - return cinder_object.display_name - except AttributeError: - return cinder_object.name - - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, - img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, OR - optionally as a clone of an existing volume, OR optionally - from a snapshot. Wait for the new volume status to reach - the expected status, validate and return a resource pointer. - - :param vol_name: cinder volume display name - :param vol_size: size in gigabytes - :param img_id: optional glance image id - :param src_vol_id: optional source volume id to clone - :param snap_id: optional snapshot id to use - :returns: cinder volume pointer - """ - # Handle parameter input and avoid impossible combinations - if img_id and not src_vol_id and not snap_id: - # Create volume from image - self.log.debug('Creating cinder volume from glance image...') - bootable = 'true' - elif src_vol_id and not img_id and not snap_id: - # Clone an existing volume - self.log.debug('Cloning cinder volume...') - bootable = cinder.volumes.get(src_vol_id).bootable - elif snap_id and not src_vol_id and not img_id: - # Create volume from snapshot - self.log.debug('Creating cinder volume from snapshot...') - snap = cinder.volume_snapshots.find(id=snap_id) - vol_size = snap.size - snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id - bootable = cinder.volumes.get(snap_vol_id).bootable - elif not img_id and not src_vol_id and not snap_id: - # Create volume - self.log.debug('Creating cinder volume...') - bootable = 'false' - else: - # Impossible combination of parameters - msg = ('Invalid method use - name:{} size:{} img_id:{} ' - 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, - img_id, src_vol_id, - snap_id)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create new volume - try: - vol_new = cinder.volumes.create(display_name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except TypeError: - vol_new = cinder.volumes.create(name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except Exception as e: - msg = 'Failed to create volume: {}'.format(e) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Wait for volume to reach available status - ret = self.resource_reaches_status(cinder.volumes, vol_id, - expected_stat="available", - msg="Volume status wait") - if not ret: - msg = 'Cinder volume failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new volume - self.log.debug('Validating volume attributes...') - val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) - val_vol_boot = cinder.volumes.get(vol_id).bootable - val_vol_stat = cinder.volumes.get(vol_id).status - val_vol_size = cinder.volumes.get(vol_id).size - msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' - '{} size:{}'.format(val_vol_name, vol_id, - val_vol_stat, val_vol_boot, - val_vol_size)) - - if val_vol_boot == bootable and val_vol_stat == 'available' \ - and val_vol_name == vol_name and val_vol_size == vol_size: - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return vol_new - - def delete_resource(self, resource, resource_id, - msg="resource", max_wait=120): - """Delete one openstack resource, such as one instance, keypair, - image, volume, stack, etc., and confirm deletion within max wait time. - - :param resource: pointer to os resource type, ex:glance_client.images - :param resource_id: unique name or id for the openstack resource - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, otherwise False - """ - self.log.debug('Deleting OpenStack resource ' - '{} ({})'.format(resource_id, msg)) - num_before = len(list(resource.list())) - resource.delete(resource_id) - - tries = 0 - num_after = len(list(resource.list())) - while num_after != (num_before - 1) and tries < (max_wait / 4): - self.log.debug('{} delete check: ' - '{} [{}:{}] {}'.format(msg, tries, - num_before, - num_after, - resource_id)) - time.sleep(4) - num_after = len(list(resource.list())) - tries += 1 - - self.log.debug('{}: expected, actual count = {}, ' - '{}'.format(msg, num_before - 1, num_after)) - - if num_after == (num_before - 1): - return True - else: - self.log.error('{} delete timed out'.format(msg)) - return False - - def resource_reaches_status(self, resource, resource_id, - expected_stat='available', - msg='resource', max_wait=120): - """Wait for an openstack resources status to reach an - expected status within a specified time. Useful to confirm that - nova instances, cinder vols, snapshots, glance images, heat stacks - and other resources eventually reach the expected status. - - :param resource: pointer to os resource type, ex: heat_client.stacks - :param resource_id: unique id for the openstack resource - :param expected_stat: status to expect resource to reach - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, False if status is not reached - """ - - tries = 0 - resource_stat = resource.get(resource_id).status - while resource_stat != expected_stat and tries < (max_wait / 4): - self.log.debug('{} status check: ' - '{} [{}:{}] {}'.format(msg, tries, - resource_stat, - expected_stat, - resource_id)) - time.sleep(4) - resource_stat = resource.get(resource_id).status - tries += 1 - - self.log.debug('{}: expected, actual status = {}, ' - '{}'.format(msg, resource_stat, expected_stat)) - - if resource_stat == expected_stat: - return True - else: - self.log.debug('{} never reached expected status: ' - '{}'.format(resource_id, expected_stat)) - return False - - def get_ceph_osd_id_cmd(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return ("`initctl list | grep 'ceph-osd ' | " - "awk 'NR=={} {{ print $2 }}' | " - "grep -o '[0-9]*'`".format(index + 1)) - - def get_ceph_pools(self, sentry_unit): - """Return a dict of ceph pools from a single ceph unit, with - pool name as keys, pool id as vals.""" - pools = {} - cmd = 'sudo ceph osd lspools' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # For mimic ceph osd lspools output - output = output.replace("\n", ",") - - # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, - for pool in str(output).split(','): - pool_id_name = pool.split(' ') - if len(pool_id_name) == 2: - pool_id = pool_id_name[0] - pool_name = pool_id_name[1] - pools[pool_name] = int(pool_id) - - self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], - pools)) - return pools - - def get_ceph_df(self, sentry_unit): - """Return dict of ceph df json output, including ceph pool state. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :returns: Dict of ceph df output - """ - cmd = 'sudo ceph df --format=json' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return json.loads(output) - - def get_ceph_pool_sample(self, sentry_unit, pool_id=0): - """Take a sample of attributes of a ceph pool, returning ceph - pool name, object count and disk space used for the specified - pool ID number. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :param pool_id: Ceph pool ID - :returns: List of pool name, object count, kb disk space used - """ - df = self.get_ceph_df(sentry_unit) - for pool in df['pools']: - if pool['id'] == pool_id: - pool_name = pool['name'] - obj_count = pool['stats']['objects'] - kb_used = pool['stats']['kb_used'] - - self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, pool_id, - obj_count, kb_used)) - return pool_name, obj_count, kb_used - - def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): - """Validate ceph pool samples taken over time, such as pool - object counts or pool kb used, before adding, after adding, and - after deleting items which affect those pool attributes. The - 2nd element is expected to be greater than the 1st; 3rd is expected - to be less than the 2nd. - - :param samples: List containing 3 data samples - :param sample_type: String for logging and usage context - :returns: None if successful, Failure message otherwise - """ - original, created, deleted = range(3) - if samples[created] <= samples[original] or \ - samples[deleted] >= samples[created]: - return ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - else: - self.log.debug('Ceph {} samples (OK): ' - '{}'.format(sample_type, samples)) - return None - - # rabbitmq/amqp specific helpers: - - def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): - """Wait for rmq units extended status to show cluster readiness, - after an optional initial sleep period. Initial sleep is likely - necessary to be effective following a config change, as status - message may not instantly update to non-ready.""" - - if init_sleep: - time.sleep(init_sleep) - - message = re.compile('^Unit is ready and clustered$') - deployment._auto_wait_for_status(message=message, - timeout=timeout, - include_only=['rabbitmq-server']) - - def add_rmq_test_user(self, sentry_units, - username="testuser1", password="changeme"): - """Add a test user via the first rmq juju unit, check connection as - the new user against all sentry units. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful. Raise on error. - """ - self.log.debug('Adding rmq user ({})...'.format(username)) - - # Check that user does not already exist - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - if username in output: - self.log.warning('User ({}) already exists, returning ' - 'gracefully.'.format(username)) - return - - perms = '".*" ".*" ".*"' - cmds = ['rabbitmqctl add_user {} {}'.format(username, password), - 'rabbitmqctl set_permissions {} {}'.format(username, perms)] - - # Add user via first unit - for cmd in cmds: - output, _ = self.run_cmd_unit(sentry_units[0], cmd) - - # Check connection against the other sentry_units - self.log.debug('Checking user connect against units...') - for sentry_unit in sentry_units: - connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, - username=username, - password=password) - connection.close() - - def delete_rmq_test_user(self, sentry_units, username="testuser1"): - """Delete a rabbitmq user via the first rmq juju unit. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful or no such user. - """ - self.log.debug('Deleting rmq user ({})...'.format(username)) - - # Check that the user exists - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - - if username not in output: - self.log.warning('User ({}) does not exist, returning ' - 'gracefully.'.format(username)) - return - - # Delete the user - cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) - - def get_rmq_cluster_status(self, sentry_unit): - """Execute rabbitmq cluster status command on a unit and return - the full output. - - :param unit: sentry unit - :returns: String containing console output of cluster status command - """ - cmd = 'rabbitmqctl cluster_status' - output, _ = self.run_cmd_unit(sentry_unit, cmd) - self.log.debug('{} cluster_status:\n{}'.format( - sentry_unit.info['unit_name'], output)) - return str(output) - - def get_rmq_cluster_running_nodes(self, sentry_unit): - """Parse rabbitmqctl cluster_status output string, return list of - running rabbitmq cluster nodes. - - :param unit: sentry unit - :returns: List containing node names of running nodes - """ - # NOTE(beisner): rabbitmqctl cluster_status output is not - # json-parsable, do string chop foo, then json.loads that. - str_stat = self.get_rmq_cluster_status(sentry_unit) - if 'running_nodes' in str_stat: - pos_start = str_stat.find("{running_nodes,") + 15 - pos_end = str_stat.find("]},", pos_start) + 1 - str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') - run_nodes = json.loads(str_run_nodes) - return run_nodes - else: - return [] - - def validate_rmq_cluster_running_nodes(self, sentry_units): - """Check that all rmq unit hostnames are represented in the - cluster_status output of all units. - - :param host_names: dict of juju unit names to host names - :param units: list of sentry unit pointers (all rmq units) - :returns: None if successful, otherwise return error message - """ - host_names = self.get_unit_hostnames(sentry_units) - errors = [] - - # Query every unit for cluster_status running nodes - for query_unit in sentry_units: - query_unit_name = query_unit.info['unit_name'] - running_nodes = self.get_rmq_cluster_running_nodes(query_unit) - - # Confirm that every unit is represented in the queried unit's - # cluster_status running nodes output. - for validate_unit in sentry_units: - val_host_name = host_names[validate_unit.info['unit_name']] - val_node_name = 'rabbit@{}'.format(val_host_name) - - if val_node_name not in running_nodes: - errors.append('Cluster member check failed on {}: {} not ' - 'in {}\n'.format(query_unit_name, - val_node_name, - running_nodes)) - if errors: - return ''.join(errors) - - def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): - """Check a single juju rmq unit for ssl and port in the config file.""" - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - conf_file = '/etc/rabbitmq/rabbitmq.config' - conf_contents = str(self.file_contents_safe(sentry_unit, - conf_file, max_wait=16)) - # Checks - conf_ssl = 'ssl' in conf_contents - conf_port = str(port) in conf_contents - - # Port explicitly checked in config - if port and conf_port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif port and not conf_port and conf_ssl: - self.log.debug('SSL is enabled @{} but not on port {} ' - '({})'.format(host, port, unit_name)) - return False - # Port not checked (useful when checking that ssl is disabled) - elif not port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif not conf_ssl: - self.log.debug('SSL not enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return False - else: - msg = ('Unknown condition when checking SSL status @{}:{} ' - '({})'.format(host, port, unit_name)) - amulet.raise_status(amulet.FAIL, msg) - - def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): - """Check that ssl is enabled on rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :param port: optional ssl port override to validate - :returns: None if successful, otherwise return error message - """ - for sentry_unit in sentry_units: - if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): - return ('Unexpected condition: ssl is disabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def validate_rmq_ssl_disabled_units(self, sentry_units): - """Check that ssl is enabled on listed rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :returns: True if successful. Raise on error. - """ - for sentry_unit in sentry_units: - if self.rmq_ssl_is_enabled_on_unit(sentry_unit): - return ('Unexpected condition: ssl is enabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def configure_rmq_ssl_on(self, sentry_units, deployment, - port=None, max_wait=60): - """Turn ssl charm config option on, with optional non-default - ssl port specification. Confirm that it is enabled on every - unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param port: amqp port, use defaults if None - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: on') - - # Enable RMQ SSL - config = {'ssl': 'on'} - if port: - config['ssl_port'] = port - - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): - """Turn ssl charm config option off, confirm that it is disabled - on every unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: off') - - # Disable RMQ SSL - config = {'ssl': 'off'} - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def connect_amqp_by_unit(self, sentry_unit, ssl=False, - port=None, fatal=True, - username="testuser1", password="changeme"): - """Establish and return a pika amqp connection to the rabbitmq service - running on a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :param fatal: boolean, default to True (raises on connect error) - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: pika amqp connection pointer or None if failed and non-fatal - """ - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - # Default port logic if port is not specified - if ssl and not port: - port = 5671 - elif not ssl and not port: - port = 5672 - - self.log.debug('Connecting to amqp on {}:{} ({}) as ' - '{}...'.format(host, port, unit_name, username)) - - try: - credentials = pika.PlainCredentials(username, password) - parameters = pika.ConnectionParameters(host=host, port=port, - credentials=credentials, - ssl=ssl, - connection_attempts=3, - retry_delay=5, - socket_timeout=1) - connection = pika.BlockingConnection(parameters) - assert connection.is_open is True - assert connection.is_closing is False - self.log.debug('Connect OK') - return connection - except Exception as e: - msg = ('amqp connection failed to {}:{} as ' - '{} ({})'.format(host, port, username, str(e))) - if fatal: - amulet.raise_status(amulet.FAIL, msg) - else: - self.log.warn(msg) - return None - - def publish_amqp_message_by_unit(self, sentry_unit, message, - queue="test", ssl=False, - username="testuser1", - password="changeme", - port=None): - """Publish an amqp message to a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param message: amqp message string - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: None. Raises exception if publish failed. - """ - self.log.debug('Publishing message to {} queue:\n{}'.format(queue, - message)) - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - - # NOTE(beisner): extra debug here re: pika hang potential: - # https://github.com/pika/pika/issues/297 - # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw - self.log.debug('Defining channel...') - channel = connection.channel() - self.log.debug('Declaring queue...') - channel.queue_declare(queue=queue, auto_delete=False, durable=True) - self.log.debug('Publishing message...') - channel.basic_publish(exchange='', routing_key=queue, body=message) - self.log.debug('Closing channel...') - channel.close() - self.log.debug('Closing connection...') - connection.close() - - def get_amqp_message_by_unit(self, sentry_unit, queue="test", - username="testuser1", - password="changeme", - ssl=False, port=None): - """Get an amqp message from a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: amqp message body as string. Raise if get fails. - """ - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - channel = connection.channel() - method_frame, _, body = channel.basic_get(queue) - - if method_frame: - self.log.debug('Retreived message from {} queue:\n{}'.format(queue, - body)) - channel.basic_ack(method_frame.delivery_tag) - channel.close() - connection.close() - return body - else: - msg = 'No message retrieved.' - amulet.raise_status(amulet.FAIL, msg) - - def validate_memcache(self, sentry_unit, conf, os_release, - earliest_release=5, section='keystone_authtoken', - check_kvs=None): - """Check Memcache is running and is configured to be used - - Example call from Amulet test: - - def test_110_memcache(self): - u.validate_memcache(self.neutron_api_sentry, - '/etc/neutron/neutron.conf', - self._get_openstack_release()) - - :param sentry_unit: sentry unit - :param conf: OpenStack config file to check memcache settings - :param os_release: Current OpenStack release int code - :param earliest_release: Earliest Openstack release to check int code - :param section: OpenStack config file section to check - :param check_kvs: Dict of settings to check in config file - :returns: None - """ - if os_release < earliest_release: - self.log.debug('Skipping memcache checks for deployment. {} <' - 'mitaka'.format(os_release)) - return - _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} - self.log.debug('Checking memcached is running') - ret = self.validate_services_by_name({sentry_unit: ['memcached']}) - if ret: - amulet.raise_status(amulet.FAIL, msg='Memcache running check' - 'failed {}'.format(ret)) - else: - self.log.debug('OK') - self.log.debug('Checking memcache url is configured in {}'.format( - conf)) - if self.validate_config_data(sentry_unit, conf, section, _kvs): - message = "Memcache config error in: {}".format(conf) - amulet.raise_status(amulet.FAIL, msg=message) - else: - self.log.debug('OK') - self.log.debug('Checking memcache configuration in ' - '/etc/memcached.conf') - contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', - fatal=True) - ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if CompareHostReleases(ubuntu_release) <= 'trusty': - memcache_listen_addr = 'ip6-localhost' - else: - memcache_listen_addr = '::1' - expected = { - '-p': '11211', - '-l': memcache_listen_addr} - found = [] - for key, value in expected.items(): - for line in contents.split('\n'): - if line.startswith(key): - self.log.debug('Checking {} is set to {}'.format( - key, - value)) - assert value == line.split()[-1] - self.log.debug(line.split()[-1]) - found.append(key) - if sorted(found) == sorted(expected.keys()): - self.log.debug('OK') - else: - message = "Memcache config error in: /etc/memcached.conf" - amulet.raise_status(amulet.FAIL, msg=message) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index b67dafda..57b03537 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -25,7 +25,10 @@ import time from base64 import b64decode -from subprocess import check_call, CalledProcessError +from subprocess import ( + check_call, + check_output, + CalledProcessError) import six @@ -453,18 +456,24 @@ def __call__(self): serv_host = format_ipv6_addr(serv_host) or serv_host auth_host = rdata.get('auth_host') auth_host = format_ipv6_addr(auth_host) or auth_host + int_host = rdata.get('internal_host') + int_host = format_ipv6_addr(int_host) or int_host svc_protocol = rdata.get('service_protocol') or 'http' auth_protocol = rdata.get('auth_protocol') or 'http' + int_protocol = rdata.get('internal_protocol') or 'http' api_version = rdata.get('api_version') or '2.0' ctxt.update({'service_port': rdata.get('service_port'), 'service_host': serv_host, 'auth_host': auth_host, 'auth_port': rdata.get('auth_port'), + 'internal_host': int_host, + 'internal_port': rdata.get('internal_port'), 'admin_tenant_name': rdata.get('service_tenant'), 'admin_user': rdata.get('service_username'), 'admin_password': rdata.get('service_password'), 'service_protocol': svc_protocol, 'auth_protocol': auth_protocol, + 'internal_protocol': int_protocol, 'api_version': api_version}) if float(api_version) > 2: @@ -1781,6 +1790,10 @@ def __call__(self): 'rel_key': 'enable-port-forwarding', 'default': False, }, + 'enable_fwaas': { + 'rel_key': 'enable-fwaas', + 'default': False, + }, 'global_physnet_mtu': { 'rel_key': 'global-physnet-mtu', 'default': 1500, @@ -1815,6 +1828,11 @@ def __call__(self): if ctxt['enable_port_forwarding']: l3_extension_plugins.append('port_forwarding') + if ctxt['enable_fwaas']: + l3_extension_plugins.append('fwaas_v2') + if ctxt['enable_nfg_logging']: + l3_extension_plugins.append('fwaas_v2_log') + ctxt['l3_extension_plugins'] = l3_extension_plugins return ctxt @@ -2379,6 +2397,12 @@ def __call__(self): ctxt['enable_metadata_network'] = True ctxt['enable_isolated_metadata'] = True + ctxt['append_ovs_config'] = False + cmp_release = CompareOpenStackReleases( + os_release('neutron-common', base='icehouse')) + if cmp_release >= 'queens' and config('enable-dpdk'): + ctxt['append_ovs_config'] = True + return ctxt @staticmethod @@ -2570,22 +2594,48 @@ def cpu_mask(self): :returns: hex formatted CPU mask :rtype: str """ - num_cores = config('dpdk-socket-cores') - mask = 0 + return self.cpu_masks()['dpdk_lcore_mask'] + + def cpu_masks(self): + """Get hex formatted CPU masks + + The mask is based on using the first config:dpdk-socket-cores + cores of each NUMA node in the unit, followed by the + next config:pmd-socket-cores + + :returns: Dict of hex formatted CPU masks + :rtype: Dict[str, str] + """ + num_lcores = config('dpdk-socket-cores') + pmd_cores = config('pmd-socket-cores') + lcore_mask = 0 + pmd_mask = 0 for cores in self._numa_node_cores().values(): - for core in cores[:num_cores]: - mask = mask | 1 << core - return format(mask, '#04x') + for core in cores[:num_lcores]: + lcore_mask = lcore_mask | 1 << core + for core in cores[num_lcores:][:pmd_cores]: + pmd_mask = pmd_mask | 1 << core + return { + 'pmd_cpu_mask': format(pmd_mask, '#04x'), + 'dpdk_lcore_mask': format(lcore_mask, '#04x')} def socket_memory(self): - """Formatted list of socket memory configuration per NUMA node + """Formatted list of socket memory configuration per socket. - :returns: socket memory configuration per NUMA node + :returns: socket memory configuration per socket. :rtype: str """ + lscpu_out = check_output( + ['lscpu', '-p=socket']).decode('UTF-8').strip() + sockets = set() + for line in lscpu_out.split('\n'): + try: + sockets.add(int(line)) + except ValueError: + # lscpu output is headed by comments so ignore them. + pass sm_size = config('dpdk-socket-memory') - node_regex = '/sys/devices/system/node/node*' - mem_list = [str(sm_size) for _ in glob.glob(node_regex)] + mem_list = [str(sm_size) for _ in sockets] if mem_list: return ','.join(mem_list) else: diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py index f2bb21e9..e003c1f3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py @@ -334,7 +334,7 @@ def maybe_do_policyd_overrides(openstack_release, restart_handler() -@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead") +@charmhelpers.deprecate("Use maybe_do_policyd_overrides instead") def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): """This function is designed to be called from the config changed hook. diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 1656bd43..008a8ec0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -106,6 +106,8 @@ filter_installed_packages, filter_missing_packages, ubuntu_apt_pkg as apt, + OPENSTACK_RELEASES, + UBUNTU_OPENSTACK_RELEASE, ) from charmhelpers.fetch.snap import ( @@ -132,54 +134,9 @@ DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 'restricted main multiverse universe') -OPENSTACK_RELEASES = ( - 'diablo', - 'essex', - 'folsom', - 'grizzly', - 'havana', - 'icehouse', - 'juno', - 'kilo', - 'liberty', - 'mitaka', - 'newton', - 'ocata', - 'pike', - 'queens', - 'rocky', - 'stein', - 'train', - 'ussuri', - 'victoria', - 'wallaby', -) - -UBUNTU_OPENSTACK_RELEASE = OrderedDict([ - ('oneiric', 'diablo'), - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), - ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ('disco', 'stein'), - ('eoan', 'train'), - ('focal', 'ussuri'), - ('groovy', 'victoria'), - ('hirsute', 'wallaby'), -]) - - OPENSTACK_CODENAMES = OrderedDict([ + # NOTE(lourot): 'yyyy.i' isn't actually mapping with any real version + # number. This just means the i-th version of the year yyyy. ('2011.2', 'diablo'), ('2012.1', 'essex'), ('2012.2', 'folsom'), @@ -200,6 +157,8 @@ ('2020.1', 'ussuri'), ('2020.2', 'victoria'), ('2021.1', 'wallaby'), + ('2021.2', 'xena'), + ('2022.1', 'yoga'), ]) # The ugly duckling - must list releases oldest to newest diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py index 5aa4196d..e710c0e0 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -28,6 +28,7 @@ 'focal', 'groovy', 'hirsute', + 'impish', ) diff --git a/ceph-mon/hooks/charmhelpers/core/strutils.py b/ceph-mon/hooks/charmhelpers/core/strutils.py index e8df0452..28c6b3f5 100644 --- a/ceph-mon/hooks/charmhelpers/core/strutils.py +++ b/ceph-mon/hooks/charmhelpers/core/strutils.py @@ -18,8 +18,11 @@ import six import re +TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'} +FALSEY_STRINGS = {'n', 'no', 'false', 'f', 'off'} -def bool_from_string(value): + +def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY_STRINGS, assume_false=False): """Interpret string value as boolean. Returns True if value translates to True otherwise False. @@ -32,9 +35,9 @@ def bool_from_string(value): value = value.strip().lower() - if value in ['y', 'yes', 'true', 't', 'on']: + if value in truthy_strings: return True - elif value in ['n', 'no', 'false', 'f', 'off']: + elif value in falsey_strings or assume_false: return False msg = "Unable to interpret string value '%s' as boolean" % (value) diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 5b689f5b..30228790 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -106,6 +106,8 @@ def base_url(self, url): apt_pkg = fetch.ubuntu_apt_pkg get_apt_dpkg_env = fetch.get_apt_dpkg_env get_installed_version = fetch.get_installed_version + OPENSTACK_RELEASES = fetch.OPENSTACK_RELEASES + UBUNTU_OPENSTACK_RELEASE = fetch.UBUNTU_OPENSTACK_RELEASE elif __platform__ == "centos": yum_search = fetch.yum_search diff --git a/ceph-mon/hooks/charmhelpers/fetch/python/packages.py b/ceph-mon/hooks/charmhelpers/fetch/python/packages.py index 6e95028b..b4f470ef 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/python/packages.py +++ b/ceph-mon/hooks/charmhelpers/fetch/python/packages.py @@ -142,8 +142,10 @@ def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" if six.PY2: apt_install('python-virtualenv') + extra_flags = [] else: - apt_install('python3-virtualenv') + apt_install(['python3-virtualenv', 'virtualenv']) + extra_flags = ['--python=python3'] if path: venv_path = path @@ -151,4 +153,4 @@ def pip_create_virtualenv(path=None): venv_path = os.path.join(charm_dir(), 'venv') if not os.path.exists(venv_path): - subprocess.check_call(['virtualenv', venv_path]) + subprocess.check_call(['virtualenv', venv_path] + extra_flags) diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 812a11a2..c9433c12 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -208,12 +208,79 @@ 'wallaby/proposed': 'focal-proposed/wallaby', 'focal-wallaby/proposed': 'focal-proposed/wallaby', 'focal-proposed/wallaby': 'focal-proposed/wallaby', + # Xena + 'xena': 'focal-updates/xena', + 'focal-xena': 'focal-updates/xena', + 'focal-xena/updates': 'focal-updates/xena', + 'focal-updates/xena': 'focal-updates/xena', + 'xena/proposed': 'focal-proposed/xena', + 'focal-xena/proposed': 'focal-proposed/xena', + 'focal-proposed/xena': 'focal-proposed/xena', + # Yoga + 'yoga': 'focal-updates/yoga', + 'focal-yoga': 'focal-updates/yoga', + 'focal-yoga/updates': 'focal-updates/yoga', + 'focal-updates/yoga': 'focal-updates/yoga', + 'yoga/proposed': 'focal-proposed/yoga', + 'focal-yoga/proposed': 'focal-proposed/yoga', + 'focal-proposed/yoga': 'focal-proposed/yoga', } +OPENSTACK_RELEASES = ( + 'diablo', + 'essex', + 'folsom', + 'grizzly', + 'havana', + 'icehouse', + 'juno', + 'kilo', + 'liberty', + 'mitaka', + 'newton', + 'ocata', + 'pike', + 'queens', + 'rocky', + 'stein', + 'train', + 'ussuri', + 'victoria', + 'wallaby', + 'xena', + 'yoga', +) + + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ('wily', 'liberty'), + ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zesty', 'ocata'), + ('artful', 'pike'), + ('bionic', 'queens'), + ('cosmic', 'rocky'), + ('disco', 'stein'), + ('eoan', 'train'), + ('focal', 'ussuri'), + ('groovy', 'victoria'), + ('hirsute', 'wallaby'), + ('impish', 'xena'), +]) + + APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. -CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times. +CMD_RETRY_COUNT = 10 # Retry a failing fatal command X times. def filter_installed_packages(packages): @@ -574,6 +641,10 @@ def add_source(source, key=None, fail_invalid=False): with be used. If staging is NOT used then the cloud archive [3] will be added, and the 'ubuntu-cloud-keyring' package will be added for the current distro. + '': translate to cloud: based on the current + distro version (i.e. for 'ussuri' this will either be 'bionic-ussuri' or + 'distro'. + '/proposed': as above, but for proposed. Otherwise the source is not recognised and this is logged to the juju log. However, no error is raised, unless sys_error_on_exit is True. @@ -600,6 +671,12 @@ def add_source(source, key=None, fail_invalid=False): @raises SourceConfigError() if for cloud:, the is not a valid pocket in CLOUD_ARCHIVE_POCKETS """ + # extract the OpenStack versions from the CLOUD_ARCHIVE_POCKETS; can't use + # the list in contrib.openstack.utils as it might not be included in + # classic charms and would break everything. Having OpenStack specific + # code in this file is a bit of an antipattern, anyway. + os_versions_regex = "({})".format("|".join(OPENSTACK_RELEASES)) + _mapping = OrderedDict([ (r"^distro$", lambda: None), # This is a NOP (r"^(?:proposed|distro-proposed)$", _add_proposed), @@ -609,6 +686,9 @@ def add_source(source, key=None, fail_invalid=False): (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), (r"^cloud:(.*)$", _add_cloud_pocket), (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), + (r"^{}\/proposed$".format(os_versions_regex), + _add_bare_openstack_proposed), + (r"^{}$".format(os_versions_regex), _add_bare_openstack), ]) if source is None: source = '' @@ -662,7 +742,8 @@ def _add_apt_repository(spec): series = get_distrib_codename() spec = spec.replace('{series}', series) _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https', 'http'])) + cmd_env=env_proxy_settings(['https', 'http', 'no_proxy']) + ) def _add_cloud_pocket(pocket): @@ -738,6 +819,73 @@ def _verify_is_ubuntu_rel(release, os_release): 'version ({})'.format(release, os_release, ubuntu_rel)) +def _add_bare_openstack(openstack_release): + """Add cloud or distro based on the release given. + + The spec given is, say, 'ussuri', but this could apply cloud:bionic-ussuri + or 'distro' depending on whether the ubuntu release is bionic or focal. + + :param openstack_release: the OpenStack codename to determine the release + for. + :type openstack_release: str + :raises: SourceConfigError + """ + # TODO(ajkavanagh) - surely this means we should be removing cloud archives + # if they exist? + __add_bare_helper(openstack_release, "{}-{}", lambda: None) + + +def _add_bare_openstack_proposed(openstack_release): + """Add cloud of distro but with proposed. + + The spec given is, say, 'ussuri' but this could apply + cloud:bionic-ussuri/proposed or 'distro/proposed' depending on whether the + ubuntu release is bionic or focal. + + :param openstack_release: the OpenStack codename to determine the release + for. + :type openstack_release: str + :raises: SourceConfigError + """ + __add_bare_helper(openstack_release, "{}-{}/proposed", _add_proposed) + + +def __add_bare_helper(openstack_release, pocket_format, final_function): + """Helper for _add_bare_openstack[_proposed] + + The bulk of the work between the two functions is exactly the same except + for the pocket format and the function that is run if it's the distro + version. + + :param openstack_release: the OpenStack codename. e.g. ussuri + :type openstack_release: str + :param pocket_format: the pocket formatter string to construct a pocket str + from the openstack_release and the current ubuntu version. + :type pocket_format: str + :param final_function: the function to call if it is the distro version. + :type final_function: Callable + :raises SourceConfigError on error + """ + ubuntu_version = get_distrib_codename() + possible_pocket = pocket_format.format(ubuntu_version, openstack_release) + if possible_pocket in CLOUD_ARCHIVE_POCKETS: + _add_cloud_pocket(possible_pocket) + return + # Otherwise it's almost certainly the distro version; verify that it + # exists. + try: + assert UBUNTU_OPENSTACK_RELEASE[ubuntu_version] == openstack_release + except KeyError: + raise SourceConfigError( + "Invalid ubuntu version {} isn't known to this library" + .format(ubuntu_version)) + except AssertionError: + raise SourceConfigError( + 'Invalid OpenStack release specificed: {} for ubuntu version {}' + .format(openstack_release, ubuntu_version)) + final_function() + + def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_message="", cmd_env=None, quiet=False): """Run a command and retry until success or max_retries is reached. diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index a2fbe0e5..1f9bc73a 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -264,7 +264,7 @@ def version_compare(a, b): else: raise RuntimeError('Unable to compare "{}" and "{}", according to ' 'our logic they are neither greater, equal nor ' - 'less than each other.') + 'less than each other.'.format(a, b)) class PkgVersion(): diff --git a/ceph-mon/hooks/charmhelpers/osplatform.py b/ceph-mon/hooks/charmhelpers/osplatform.py index 78c81af5..1ace468f 100644 --- a/ceph-mon/hooks/charmhelpers/osplatform.py +++ b/ceph-mon/hooks/charmhelpers/osplatform.py @@ -28,6 +28,9 @@ def get_platform(): elif "elementary" in current_platform: # ElementaryOS fails to run tests locally without this. return "ubuntu" + elif "Pop!_OS" in current_platform: + # Pop!_OS also fails to run tests locally without this. + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) diff --git a/ceph-mon/lib/charms_ceph/crush_utils.py b/ceph-mon/lib/charms_ceph/crush_utils.py index 8fe09fa4..37084bf1 100644 --- a/ceph-mon/lib/charms_ceph/crush_utils.py +++ b/ceph-mon/lib/charms_ceph/crush_utils.py @@ -79,9 +79,9 @@ def load_crushmap(self): stdin=crush.stdout) .decode('UTF-8')) except CalledProcessError as e: - log("Error occured while loading and decompiling CRUSH map:" + log("Error occurred while loading and decompiling CRUSH map:" "{}".format(e), ERROR) - raise "Failed to read CRUSH map" + raise def ensure_bucket_is_present(self, bucket_name): if bucket_name not in [bucket.name for bucket in self.buckets()]: @@ -111,7 +111,7 @@ def save(self): return ceph_output except CalledProcessError as e: log("save error: {}".format(e)) - raise "Failed to save CRUSH map." + raise def build_crushmap(self): """Modifies the current CRUSH map to include the new buckets""" diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index e5c38793..9b7299dd 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -14,6 +14,7 @@ import collections import glob +import itertools import json import os import pyudev @@ -24,6 +25,7 @@ import sys import time import uuid +import functools from contextlib import contextmanager from datetime import datetime @@ -501,30 +503,33 @@ def ceph_user(): class CrushLocation(object): - def __init__(self, - name, - identifier, - host, - rack, - row, - datacenter, - chassis, - root): - self.name = name + def __init__(self, identifier, name, osd="", host="", chassis="", + rack="", row="", pdu="", pod="", room="", + datacenter="", zone="", region="", root=""): self.identifier = identifier + self.name = name + self.osd = osd self.host = host + self.chassis = chassis self.rack = rack self.row = row + self.pdu = pdu + self.pod = pod + self.room = room self.datacenter = datacenter - self.chassis = chassis + self.zone = zone + self.region = region self.root = root def __str__(self): - return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ - "chassis :{} root: {}".format(self.name, self.identifier, - self.host, self.rack, self.row, - self.datacenter, self.chassis, - self.root) + return "name: {} id: {} osd: {} host: {} chassis: {} rack: {} " \ + "row: {} pdu: {} pod: {} room: {} datacenter: {} zone: {} " \ + "region: {} root: {}".format(self.name, self.identifier, + self.osd, self.host, self.chassis, + self.rack, self.row, self.pdu, + self.pod, self.room, + self.datacenter, self.zone, + self.region, self.root) def __eq__(self, other): return not self.name < other.name and not other.name < self.name @@ -571,10 +576,53 @@ def get_osd_weight(osd_id): raise +def _filter_nodes_and_set_attributes(node, node_lookup_map, lookup_type): + """Get all nodes of the desired type, with all their attributes. + + These attributes can be direct or inherited from ancestors. + """ + attribute_dict = {node['type']: node['name']} + if node['type'] == lookup_type: + attribute_dict['name'] = node['name'] + attribute_dict['identifier'] = node['id'] + return [attribute_dict] + elif not node.get('children'): + return [attribute_dict] + else: + descendant_attribute_dicts = [ + _filter_nodes_and_set_attributes(node_lookup_map[node_id], + node_lookup_map, lookup_type) + for node_id in node.get('children', []) + ] + return [dict(attribute_dict, **descendant_attribute_dict) + for descendant_attribute_dict + in itertools.chain.from_iterable(descendant_attribute_dicts)] + + +def _flatten_roots(nodes, lookup_type='host'): + """Get a flattened list of nodes of the desired type. + + :param nodes: list of nodes defined as a dictionary of attributes and + children + :type nodes: List[Dict[int, Any]] + :param lookup_type: type of searched node + :type lookup_type: str + :returns: flattened list of nodes + :rtype: List[Dict[str, Any]] + """ + lookup_map = {node['id']: node for node in nodes} + root_attributes_dicts = [_filter_nodes_and_set_attributes(node, lookup_map, + lookup_type) + for node in nodes if node['type'] == 'root'] + # get a flattened list of roots. + return list(itertools.chain.from_iterable(root_attributes_dicts)) + + def get_osd_tree(service): """Returns the current osd map in JSON. :returns: List. + :rtype: List[CrushLocation] :raises: ValueError if the monmap fails to parse. Also raises CalledProcessError if our ceph command fails """ @@ -585,35 +633,14 @@ def get_osd_tree(service): .decode('UTF-8')) try: json_tree = json.loads(tree) - crush_list = [] - # Make sure children are present in the json - if not json_tree['nodes']: - return None - host_nodes = [ - node for node in json_tree['nodes'] - if node['type'] == 'host' - ] - for host in host_nodes: - crush_list.append( - CrushLocation( - name=host.get('name'), - identifier=host['id'], - host=host.get('host'), - rack=host.get('rack'), - row=host.get('row'), - datacenter=host.get('datacenter'), - chassis=host.get('chassis'), - root=host.get('root') - ) - ) - return crush_list + roots = _flatten_roots(json_tree["nodes"]) + return [CrushLocation(**host) for host in roots] except ValueError as v: log("Unable to parse ceph tree json: {}. Error: {}".format( tree, v)) raise except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( - e)) + log("ceph osd tree command failed with message: {}".format(e)) raise @@ -669,7 +696,9 @@ def get_local_osd_ids(): dirs = os.listdir(osd_path) for osd_dir in dirs: osd_id = osd_dir.split('-')[1] - if _is_int(osd_id): + if (_is_int(osd_id) and + filesystem_mounted(os.path.join( + os.sep, osd_path, osd_dir))): osd_ids.append(osd_id) except OSError: raise @@ -3271,13 +3300,14 @@ def determine_packages(): def determine_packages_to_remove(): """Determines packages for removal + Note: if in a container, then the CHRONY_PACKAGE is removed. + :returns: list of packages to be removed + :rtype: List[str] """ rm_packages = REMOVE_PACKAGES.copy() if is_container(): - install_list = filter_missing_packages(CHRONY_PACKAGE) - if not install_list: - rm_packages.append(CHRONY_PACKAGE) + rm_packages.extend(filter_missing_packages([CHRONY_PACKAGE])) return rm_packages @@ -3376,3 +3406,132 @@ def _get_cli_key(key): level=ERROR) raise OSDConfigSetError return True + + +def enabled_manager_modules(): + """Return a list of enabled manager modules. + + :rtype: List[str] + """ + cmd = ['ceph', 'mgr', 'module', 'ls'] + try: + modules = subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError as e: + log("Failed to list ceph modules: {}".format(e), WARNING) + return [] + modules = json.loads(modules) + return modules['enabled_modules'] + + +def is_mgr_module_enabled(module): + """Is a given manager module enabled. + + :param module: + :type module: str + :returns: Whether the named module is enabled + :rtype: bool + """ + return module in enabled_manager_modules() + + +is_dashboard_enabled = functools.partial(is_mgr_module_enabled, 'dashboard') + + +def mgr_enable_module(module): + """Enable a Ceph Manager Module. + + :param module: The module name to enable + :type module: str + + :raises: subprocess.CalledProcessError + """ + if not is_mgr_module_enabled(module): + subprocess.check_call(['ceph', 'mgr', 'module', 'enable', module]) + return True + return False + + +mgr_enable_dashboard = functools.partial(mgr_enable_module, 'dashboard') + + +def mgr_disable_module(module): + """Enable a Ceph Manager Module. + + :param module: The module name to enable + :type module: str + + :raises: subprocess.CalledProcessError + """ + if is_mgr_module_enabled(module): + subprocess.check_call(['ceph', 'mgr', 'module', 'disable', module]) + return True + return False + + +mgr_disable_dashboard = functools.partial(mgr_disable_module, 'dashboard') + + +def ceph_config_set(name, value, who): + """Set a ceph config option + + :param name: key to set + :type name: str + :param value: value corresponding to key + :type value: str + :param who: Config area the key is associated with (e.g. 'dashboard') + :type who: str + + :raises: subprocess.CalledProcessError + """ + subprocess.check_call(['ceph', 'config', 'set', who, name, value]) + + +mgr_config_set = functools.partial(ceph_config_set, who='mgr') + + +def ceph_config_get(name, who): + """Retrieve the value of a ceph config option + + :param name: key to lookup + :type name: str + :param who: Config area the key is associated with (e.g. 'dashboard') + :type who: str + :returns: Value associated with key + :rtype: str + :raises: subprocess.CalledProcessError + """ + return subprocess.check_output( + ['ceph', 'config', 'get', who, name]).decode('UTF-8') + + +mgr_config_get = functools.partial(ceph_config_get, who='mgr') + + +def _dashboard_set_ssl_artifact(path, artifact_name, hostname=None): + """Set SSL dashboard config option. + + :param path: Path to file + :type path: str + :param artifact_name: Option name for setting the artifact + :type artifact_name: str + :param hostname: If hostname is set artifact will only be associated with + the dashboard on that host. + :type hostname: str + :raises: subprocess.CalledProcessError + """ + cmd = ['ceph', 'dashboard', artifact_name] + if hostname: + cmd.append(hostname) + cmd.extend(['-i', path]) + log(cmd, level=DEBUG) + subprocess.check_call(cmd) + + +dashboard_set_ssl_certificate = functools.partial( + _dashboard_set_ssl_artifact, + artifact_name='set-ssl-certificate') + + +dashboard_set_ssl_certificate_key = functools.partial( + _dashboard_set_ssl_artifact, + artifact_name='set-ssl-certificate-key') diff --git a/ceph-mon/osci.yaml b/ceph-mon/osci.yaml index 5a117058..8465bf75 100644 --- a/ceph-mon/osci.yaml +++ b/ceph-mon/osci.yaml @@ -6,6 +6,10 @@ jobs: - focal-ussuri-ec-ceph-mon - bionic-train-with-fsid + - focal-xena: + voting: false + - impish-xena: + voting: false - job: name: focal-ussuri-ec-ceph-mon diff --git a/ceph-mon/pip.sh b/ceph-mon/pip.sh new file mode 100755 index 00000000..9a7e6b09 --- /dev/null +++ b/ceph-mon/pip.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# setuptools 58.0 dropped the support for use_2to3=true which is needed to +# install blessings (an indirect dependency of charm-tools). +# +# More details on the beahvior of tox and virtualenv creation can be found at +# https://github.com/tox-dev/tox/issues/448 +# +# This script is wrapper to force the use of the pinned versions early in the +# process when the virtualenv was created and upgraded before installing the +# depedencies declared in the target. +pip install 'pip<20.3' 'setuptools<50.0.0' +pip "$@" diff --git a/ceph-mon/tests/bundles/focal-xena.yaml b/ceph-mon/tests/bundles/focal-xena.yaml new file mode 100644 index 00000000..991293f5 --- /dev/null +++ b/ceph-mon/tests/bundles/focal-xena.yaml @@ -0,0 +1,235 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-xena + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: '10G' + options: + source: *openstack-origin + osd-devices: '/dev/test-non-existent' + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: ../../../ceph-mon + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + libvirt-image-backend: rbd + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: *openstack-origin + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + prometheus2: +# Pin prometheus2 charm version Bug #1891942 + charm: cs:prometheus2-18 + num_units: 1 + to: + - '16' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - nova-compute:ceph-access + - cinder-ceph:ceph-access + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'ceph-mon:prometheus' + - 'prometheus2:target' diff --git a/ceph-mon/tests/bundles/impish-xena.yaml b/ceph-mon/tests/bundles/impish-xena.yaml new file mode 100644 index 00000000..df5c37a8 --- /dev/null +++ b/ceph-mon/tests/bundles/impish-xena.yaml @@ -0,0 +1,237 @@ +variables: + openstack-origin: &openstack-origin distro + +series: impist + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + series: focal + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: '10G' + options: + source: *openstack-origin + osd-devices: '/dev/test-non-existent' + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: ../../../ceph-mon + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + libvirt-image-backend: rbd + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: *openstack-origin + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + prometheus2: +# Pin prometheus2 charm version Bug #1891942 + charm: cs:prometheus2-18 + num_units: 1 + series: focal + to: + - '16' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - nova-compute:ceph-access + - cinder-ceph:ceph-access + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'ceph-mon:prometheus' + - 'prometheus2:target' diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 2aafffa7..2ea97e57 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -2,6 +2,7 @@ charm_name: ceph-mon gate_bundles: - groovy-victoria + - focal-xena - focal-wallaby - focal-victoria - focal-ussuri-ec @@ -20,6 +21,7 @@ dev_bundles: - xenial-queens - bionic-rocky - hirsute-wallaby + - impish-xena smoke_bundles: - bionic-train @@ -38,5 +40,6 @@ tests: tests_options: force_deploy: - trusty-mitaka - - hirsute-wallaby - groovy-victoria + - hirsute-wallaby + - impish-xena diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index c6b36975..ba4fd5b6 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -22,8 +22,11 @@ skip_missing_interpreters = False # * It is also necessary to pin virtualenv as a newer virtualenv would still # lead to fetching the latest pip in the func* tox targets, see # https://stackoverflow.com/a/38133283 -requires = pip < 20.3 - virtualenv < 20.0 +requires = + pip < 20.3 + virtualenv < 20.0 + setuptools < 50.0.0 + # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci minversion = 3.18.0 @@ -32,7 +35,7 @@ setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} install_command = - pip install {opts} {packages} + {toxinidir}/pip.sh install {opts} {packages} commands = stestr run --slowest {posargs} allowlist_externals = juju passenv = HOME TERM CS_* OS_* TEST_* From 821c99443ee2f81499014c90bae1496ad4c39746 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 21 Sep 2021 14:31:40 +0100 Subject: [PATCH 2276/2699] Add xena bundles - add non-voting focal-xena bundle - add non-voting impish-xena bundle - rebuild to pick up charm-helpers changes - update tox/pip.sh to ensure setuptools<50.0.0 Change-Id: Idd5275cb2440ee712dae62b1ef4ba5a6d846135d --- .../charmhelpers/contrib/charmsupport/nrpe.py | 5 +- .../contrib/charmsupport/volumes.py | 4 +- .../charmhelpers/contrib/hahelpers/cluster.py | 6 +- .../hardening/host/templates/login.defs | 4 +- .../charmhelpers/contrib/hardening/utils.py | 4 +- ceph-proxy/charmhelpers/contrib/network/ip.py | 4 +- .../contrib/openstack/deferred_events.py | 4 +- .../openstack/files/policy_rc_d_script.py | 2 +- .../charmhelpers/contrib/openstack/policyd.py | 6 +- .../charmhelpers/contrib/openstack/utils.py | 71 ++--- .../contrib/storage/linux/ceph.py | 15 +- .../charmhelpers/contrib/storage/linux/lvm.py | 4 +- ceph-proxy/charmhelpers/core/hookenv.py | 11 +- ceph-proxy/charmhelpers/core/host.py | 12 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-proxy/charmhelpers/core/strutils.py | 9 +- ceph-proxy/charmhelpers/core/unitdata.py | 6 +- ceph-proxy/charmhelpers/fetch/__init__.py | 6 +- .../charmhelpers/fetch/python/packages.py | 10 +- ceph-proxy/charmhelpers/fetch/snap.py | 4 +- ceph-proxy/charmhelpers/fetch/ubuntu.py | 166 +++++++++++- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 6 +- ceph-proxy/charmhelpers/osplatform.py | 3 + ceph-proxy/lib/charms_ceph/crush_utils.py | 6 +- ceph-proxy/lib/charms_ceph/utils.py | 247 ++++++++++++++---- ceph-proxy/osci.yaml | 15 ++ ceph-proxy/pip.sh | 18 ++ ceph-proxy/tests/bundles/focal-xena-ec.yaml | 215 +++++++++++++++ ceph-proxy/tests/bundles/focal-xena.yaml | 186 +++++++++++++ ceph-proxy/tests/bundles/impish-xena-ec.yaml | 215 +++++++++++++++ ceph-proxy/tests/bundles/impish-xena.yaml | 186 +++++++++++++ ceph-proxy/tests/tests.yaml | 6 + ceph-proxy/tox.ini | 13 +- 33 files changed, 1293 insertions(+), 177 deletions(-) create mode 100755 ceph-proxy/pip.sh create mode 100644 ceph-proxy/tests/bundles/focal-xena-ec.yaml create mode 100644 ceph-proxy/tests/bundles/focal-xena.yaml create mode 100644 ceph-proxy/tests/bundles/impish-xena-ec.yaml create mode 100644 ceph-proxy/tests/bundles/impish-xena.yaml diff --git a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py index e4cb06bc..8d1753c3 100644 --- a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2012-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,6 @@ # limitations under the License. """Compatibility with the nrpe-external-master charm""" -# Copyright 2012 Canonical Ltd. # # Authors: # Matthew Wedgwood @@ -511,7 +510,7 @@ def add_haproxy_checks(nrpe, unit_name): def remove_deprecated_check(nrpe, deprecated_services): """ - Remove checks fro deprecated services in list + Remove checks for deprecated services in list :param nrpe: NRPE object to remove check from :type nrpe: NRPE diff --git a/ceph-proxy/charmhelpers/contrib/charmsupport/volumes.py b/ceph-proxy/charmhelpers/contrib/charmsupport/volumes.py index 7ea43f08..f7c6fbdc 100644 --- a/ceph-proxy/charmhelpers/contrib/charmsupport/volumes.py +++ b/ceph-proxy/charmhelpers/contrib/charmsupport/volumes.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ type: boolean default: true description: > - If false, a volume is mounted as sepecified in "volume-map" + If false, a volume is mounted as specified in "volume-map" If true, ephemeral storage will be used, meaning that log data will only exist as long as the machine. YOU HAVE BEEN WARNED. volume-map: diff --git a/ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py b/ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py index ba34fba0..f0b629a2 100644 --- a/ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -86,7 +86,7 @@ def is_elected_leader(resource): 2. If the charm is part of a corosync cluster, call corosync to determine leadership. 3. If the charm is not part of a corosync cluster, the leader is - determined as being "the alive unit with the lowest unit numer". In + determined as being "the alive unit with the lowest unit number". In other words, the oldest surviving unit. """ try: @@ -418,7 +418,7 @@ def get_managed_services_and_ports(services, external_ports, Return only the services and corresponding ports that are managed by this charm. This excludes haproxy when there is a relation with hacluster. This - is because this charm passes responsability for stopping and starting + is because this charm passes responsibility for stopping and starting haproxy to hacluster. Similarly, if a relation with hacluster exists then the ports returned by diff --git a/ceph-proxy/charmhelpers/contrib/hardening/host/templates/login.defs b/ceph-proxy/charmhelpers/contrib/hardening/host/templates/login.defs index db137d6d..7d107637 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/host/templates/login.defs +++ b/ceph-proxy/charmhelpers/contrib/hardening/host/templates/login.defs @@ -187,7 +187,7 @@ SYS_GID_MAX {{ sys_gid_max }} # # Max number of login retries if password is bad. This will most likely be -# overriden by PAM, since the default pam_unix module has it's own built +# overridden by PAM, since the default pam_unix module has it's own built # in of 3 retries. However, this is a safe fallback in case you are using # an authentication module that does not enforce PAM_MAXTRIES. # @@ -235,7 +235,7 @@ USERGROUPS_ENAB yes # # Instead of the real user shell, the program specified by this parameter # will be launched, although its visible name (argv[0]) will be the shell's. -# The program may do whatever it wants (logging, additional authentification, +# The program may do whatever it wants (logging, additional authentication, # banner, ...) before running the actual shell. # # FAKE_SHELL /bin/fakeshell diff --git a/ceph-proxy/charmhelpers/contrib/hardening/utils.py b/ceph-proxy/charmhelpers/contrib/hardening/utils.py index ff7485c2..56afa4b6 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/utils.py +++ b/ceph-proxy/charmhelpers/contrib/hardening/utils.py @@ -1,4 +1,4 @@ -# Copyright 2016 Canonical Limited. +# Copyright 2016-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -85,7 +85,7 @@ def _get_user_provided_overrides(modules): def _apply_overrides(settings, overrides, schema): - """Get overrides config overlayed onto modules defaults. + """Get overrides config overlaid onto modules defaults. :param modules: require stack modules config. :returns: dictionary of modules config with user overrides applied. diff --git a/ceph-proxy/charmhelpers/contrib/network/ip.py b/ceph-proxy/charmhelpers/contrib/network/ip.py index 63e91cca..b356d64c 100644 --- a/ceph-proxy/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/charmhelpers/contrib/network/ip.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -578,7 +578,7 @@ def get_relation_ip(interface, cidr_network=None): @returns IPv6 or IPv4 address """ # Select the interface address first - # For possible use as a fallback bellow with get_address_in_network + # For possible use as a fallback below with get_address_in_network try: # Get the interface specific IP address = network_get_primary_address(interface) diff --git a/ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py b/ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py index 8765ee31..94eacf6c 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py @@ -244,7 +244,7 @@ def get_deferred_restarts(): def clear_deferred_restarts(services): - """Clear deferred restart events targetted at `services`. + """Clear deferred restart events targeted at `services`. :param services: Services with deferred actions to clear. :type services: List[str] @@ -253,7 +253,7 @@ def clear_deferred_restarts(services): def process_svc_restart(service): - """Respond to a service restart having occured. + """Respond to a service restart having occurred. :param service: Services that the action was performed against. :type service: str diff --git a/ceph-proxy/charmhelpers/contrib/openstack/files/policy_rc_d_script.py b/ceph-proxy/charmhelpers/contrib/openstack/files/policy_rc_d_script.py index 344a7662..431e972b 100755 --- a/ceph-proxy/charmhelpers/contrib/openstack/files/policy_rc_d_script.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/files/policy_rc_d_script.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -"""This script is an implemenation of policy-rc.d +"""This script is an implementation of policy-rc.d For further information on policy-rc.d see *1 diff --git a/ceph-proxy/charmhelpers/contrib/openstack/policyd.py b/ceph-proxy/charmhelpers/contrib/openstack/policyd.py index f2bb21e9..6fa06f26 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/policyd.py @@ -1,4 +1,4 @@ -# Copyright 2019 Canonical Ltd +# Copyright 2019-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -59,7 +59,7 @@ The functions should be called from the install and upgrade hooks in the charm. The `maybe_do_policyd_overrides_on_config_changed` function is designed to be called on the config-changed hook, in that it does an additional check to -ensure that an already overriden policy.d in an upgrade or install hooks isn't +ensure that an already overridden policy.d in an upgrade or install hooks isn't repeated. In order the *enable* this functionality, the charm's install, config_changed, @@ -334,7 +334,7 @@ def maybe_do_policyd_overrides(openstack_release, restart_handler() -@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead") +@charmhelpers.deprecate("Use maybe_do_policyd_overrides instead") def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): """This function is designed to be called from the config changed hook. diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index 1656bd43..d5d301e6 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -106,6 +106,8 @@ filter_installed_packages, filter_missing_packages, ubuntu_apt_pkg as apt, + OPENSTACK_RELEASES, + UBUNTU_OPENSTACK_RELEASE, ) from charmhelpers.fetch.snap import ( @@ -132,54 +134,9 @@ DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 'restricted main multiverse universe') -OPENSTACK_RELEASES = ( - 'diablo', - 'essex', - 'folsom', - 'grizzly', - 'havana', - 'icehouse', - 'juno', - 'kilo', - 'liberty', - 'mitaka', - 'newton', - 'ocata', - 'pike', - 'queens', - 'rocky', - 'stein', - 'train', - 'ussuri', - 'victoria', - 'wallaby', -) - -UBUNTU_OPENSTACK_RELEASE = OrderedDict([ - ('oneiric', 'diablo'), - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), - ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ('disco', 'stein'), - ('eoan', 'train'), - ('focal', 'ussuri'), - ('groovy', 'victoria'), - ('hirsute', 'wallaby'), -]) - - OPENSTACK_CODENAMES = OrderedDict([ + # NOTE(lourot): 'yyyy.i' isn't actually mapping with any real version + # number. This just means the i-th version of the year yyyy. ('2011.2', 'diablo'), ('2012.1', 'essex'), ('2012.2', 'folsom'), @@ -200,6 +157,8 @@ ('2020.1', 'ussuri'), ('2020.2', 'victoria'), ('2021.1', 'wallaby'), + ('2021.2', 'xena'), + ('2022.1', 'yoga'), ]) # The ugly duckling - must list releases oldest to newest @@ -701,7 +660,7 @@ def import_key(keyid): def get_source_and_pgp_key(source_and_key): """Look for a pgp key ID or ascii-armor key in the given input. - :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is + :param source_and_key: String, "source_spec|keyid" where '|keyid' is optional. :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id if there was no '|' in the source_and_key string. @@ -721,7 +680,7 @@ def configure_installation_source(source_plus_key): The functionality is provided by charmhelpers.fetch.add_source() The difference between the two functions is that add_source() signature requires the key to be passed directly, whereas this function passes an - optional key by appending '|' to the end of the source specificiation + optional key by appending '|' to the end of the source specification 'source'. Another difference from add_source() is that the function calls sys.exit(1) @@ -808,7 +767,7 @@ def get_endpoint_notifications(service_names, rel_name='identity-service'): def endpoint_changed(service_name, rel_name='identity-service'): - """Whether a new notification has been recieved for an endpoint. + """Whether a new notification has been received for an endpoint. :param service_name: Service name eg nova, neutron, placement etc :type service_name: str @@ -834,7 +793,7 @@ def endpoint_changed(service_name, rel_name='identity-service'): def save_endpoint_changed_triggers(service_names, rel_name='identity-service'): - """Save the enpoint triggers in db so it can be tracked if they changed. + """Save the endpoint triggers in db so it can be tracked if they changed. :param service_names: List of service name. :type service_name: List @@ -1502,9 +1461,9 @@ def remote_restart(rel_name, remote_service=None): if remote_service: trigger['remote-service'] = remote_service for rid in relation_ids(rel_name): - # This subordinate can be related to two seperate services using + # This subordinate can be related to two separate services using # different subordinate relations so only issue the restart if - # the principle is conencted down the relation we think it is + # the principle is connected down the relation we think it is if related_units(relid=rid): relation_set(relation_id=rid, relation_settings=trigger, @@ -1621,7 +1580,7 @@ def manage_payload_services(action, services=None, charm_func=None): """Run an action against all services. An optional charm_func() can be called. It should raise an Exception to - indicate that the function failed. If it was succesfull it should return + indicate that the function failed. If it was successful it should return None or an optional message. The signature for charm_func is: @@ -1880,7 +1839,7 @@ def some_hook(...): :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] :returns: decorator to use a restart_on_change with pausability :rtype: decorator diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py index d1c61754..3eb46d70 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# -# Copyright 2012 Canonical Ltd. -# # This file is sourced from lp:openstack-charm-helpers # # Authors: @@ -605,7 +602,7 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, class Pool(BasePool): - """Compability shim for any descendents external to this library.""" + """Compatibility shim for any descendents external to this library.""" @deprecate( 'The ``Pool`` baseclass has been replaced by ``BasePool`` class.') @@ -1535,7 +1532,7 @@ def map_block_storage(service, pool, image): def filesystem_mounted(fs): - """Determine whether a filesytems is already mounted.""" + """Determine whether a filesystem is already mounted.""" return fs in [f for f, m in mounts()] @@ -1904,7 +1901,7 @@ def add_op_create_erasure_pool(self, name, erasure_profile=None, set the ceph-mon unit handling the broker request will set its default value. :type erasure_profile: str - :param allow_ec_overwrites: allow EC pools to be overriden + :param allow_ec_overwrites: allow EC pools to be overridden :type allow_ec_overwrites: bool :raises: AssertionError if provided data is of invalid type/range """ @@ -1949,7 +1946,7 @@ def add_op_create_erasure_profile(self, name, :param lrc_locality: Group the coding and data chunks into sets of size locality (lrc plugin) :type lrc_locality: int - :param durability_estimator: The number of parity chuncks each of which includes + :param durability_estimator: The number of parity chunks each of which includes a data chunk in its calculation range (shec plugin) :type durability_estimator: int :param helper_chunks: The number of helper chunks to use for recovery operations @@ -2327,7 +2324,7 @@ class CephOSDConfContext(CephConfContext): settings are in conf['osd_from_client'] and finally settings which do clash are in conf['osd_from_client_conflict']. Rather than silently drop the conflicting settings they are provided in the context so they can be - rendered commented out to give some visability to the admin. + rendered commented out to give some visibility to the admin. """ def __init__(self, permitted_sections=None): diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/lvm.py b/ceph-proxy/charmhelpers/contrib/storage/linux/lvm.py index c8bde692..d0a57211 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/lvm.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ ################################################## def deactivate_lvm_volume_group(block_device): ''' - Deactivate any volume gruop associated with an LVM physical volume. + Deactivate any volume group associated with an LVM physical volume. :param block_device: str: Full path to LVM physical volume ''' diff --git a/ceph-proxy/charmhelpers/core/hookenv.py b/ceph-proxy/charmhelpers/core/hookenv.py index 47eebb51..e94247a2 100644 --- a/ceph-proxy/charmhelpers/core/hookenv.py +++ b/ceph-proxy/charmhelpers/core/hookenv.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2013-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,6 @@ # limitations under the License. "Interactions with the Juju environment" -# Copyright 2013 Canonical Ltd. # # Authors: # Charm Helpers Developers @@ -610,7 +609,7 @@ def expected_related_units(reltype=None): relation_type())) :param reltype: Relation type to list data for, default is to list data for - the realtion type we are currently executing a hook for. + the relation type we are currently executing a hook for. :type reltype: str :returns: iterator :rtype: types.GeneratorType @@ -627,7 +626,7 @@ def expected_related_units(reltype=None): @cached def relation_for_unit(unit=None, rid=None): - """Get the json represenation of a unit's relation""" + """Get the json representation of a unit's relation""" unit = unit or remote_unit() relation = relation_get(unit=unit, rid=rid) for key in relation: @@ -1614,11 +1613,11 @@ def env_proxy_settings(selected_settings=None): def _contains_range(addresses): """Check for cidr or wildcard domain in a string. - Given a string comprising a comma seperated list of ip addresses + Given a string comprising a comma separated list of ip addresses and domain names, determine whether the string contains IP ranges or wildcard domains. - :param addresses: comma seperated list of domains and ip addresses. + :param addresses: comma separated list of domains and ip addresses. :type addresses: str """ return ( diff --git a/ceph-proxy/charmhelpers/core/host.py b/ceph-proxy/charmhelpers/core/host.py index d25e6c59..994ec8a0 100644 --- a/ceph-proxy/charmhelpers/core/host.py +++ b/ceph-proxy/charmhelpers/core/host.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -217,7 +217,7 @@ def service_resume(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", **kwargs): """Resume a system service. - Reenable starting again at boot. Start the service. + Re-enable starting again at boot. Start the service. :param service_name: the name of the service to resume :param init_dir: the path to the init dir @@ -727,7 +727,7 @@ def __init__(self, restart_map, stopstart=False, restart_functions=None, :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] """ self.restart_map = restart_map @@ -828,7 +828,7 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False, :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] :returns: result of lambda_f() :rtype: ANY @@ -880,7 +880,7 @@ def _post_restart_on_change_helper(checksums, :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] """ if restart_functions is None: @@ -914,7 +914,7 @@ def _post_restart_on_change_helper(checksums, def pwgen(length=None): - """Generate a random pasword.""" + """Generate a random password.""" if length is None: # A random length is ok to use a weak PRNG length = random.choice(range(35, 45)) diff --git a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py index 5aa4196d..e710c0e0 100644 --- a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py @@ -28,6 +28,7 @@ 'focal', 'groovy', 'hirsute', + 'impish', ) diff --git a/ceph-proxy/charmhelpers/core/strutils.py b/ceph-proxy/charmhelpers/core/strutils.py index e8df0452..28c6b3f5 100644 --- a/ceph-proxy/charmhelpers/core/strutils.py +++ b/ceph-proxy/charmhelpers/core/strutils.py @@ -18,8 +18,11 @@ import six import re +TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'} +FALSEY_STRINGS = {'n', 'no', 'false', 'f', 'off'} -def bool_from_string(value): + +def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY_STRINGS, assume_false=False): """Interpret string value as boolean. Returns True if value translates to True otherwise False. @@ -32,9 +35,9 @@ def bool_from_string(value): value = value.strip().lower() - if value in ['y', 'yes', 'true', 't', 'on']: + if value in truthy_strings: return True - elif value in ['n', 'no', 'false', 'f', 'off']: + elif value in falsey_strings or assume_false: return False msg = "Unable to interpret string value '%s' as boolean" % (value) diff --git a/ceph-proxy/charmhelpers/core/unitdata.py b/ceph-proxy/charmhelpers/core/unitdata.py index ab554327..d9b8d0b0 100644 --- a/ceph-proxy/charmhelpers/core/unitdata.py +++ b/ceph-proxy/charmhelpers/core/unitdata.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -61,7 +61,7 @@ def config_changed(): 'previous value', prev, 'current value', cur) - # Get some unit specific bookeeping + # Get some unit specific bookkeeping if not db.get('pkg_key'): key = urllib.urlopen('https://example.com/pkg_key').read() db.set('pkg_key', key) @@ -449,7 +449,7 @@ def config_changed(): 'previous value', prev, 'current value', cur) - # Get some unit specific bookeeping + # Get some unit specific bookkeeping if not db.get('pkg_key'): key = urllib.urlopen('https://example.com/pkg_key').read() db.set('pkg_key', key) diff --git a/ceph-proxy/charmhelpers/fetch/__init__.py b/ceph-proxy/charmhelpers/fetch/__init__.py index 5b689f5b..9497ee05 100644 --- a/ceph-proxy/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/charmhelpers/fetch/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -106,6 +106,8 @@ def base_url(self, url): apt_pkg = fetch.ubuntu_apt_pkg get_apt_dpkg_env = fetch.get_apt_dpkg_env get_installed_version = fetch.get_installed_version + OPENSTACK_RELEASES = fetch.OPENSTACK_RELEASES + UBUNTU_OPENSTACK_RELEASE = fetch.UBUNTU_OPENSTACK_RELEASE elif __platform__ == "centos": yum_search = fetch.yum_search @@ -203,7 +205,7 @@ def plugins(fetch_handlers=None): classname) plugin_list.append(handler_class()) except NotImplementedError: - # Skip missing plugins so that they can be ommitted from + # Skip missing plugins so that they can be omitted from # installation if desired log("FetchHandler {} not found, skipping plugin".format( handler_name)) diff --git a/ceph-proxy/charmhelpers/fetch/python/packages.py b/ceph-proxy/charmhelpers/fetch/python/packages.py index 6e95028b..60048354 100644 --- a/ceph-proxy/charmhelpers/fetch/python/packages.py +++ b/ceph-proxy/charmhelpers/fetch/python/packages.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # coding: utf-8 -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ def pip_execute(*args, **kwargs): - """Overriden pip_execute() to stop sys.path being changed. + """Overridden pip_execute() to stop sys.path being changed. The act of importing main from the pip module seems to cause add wheels from the /usr/share/python-wheels which are installed by various tools. @@ -142,8 +142,10 @@ def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" if six.PY2: apt_install('python-virtualenv') + extra_flags = [] else: - apt_install('python3-virtualenv') + apt_install(['python3-virtualenv', 'virtualenv']) + extra_flags = ['--python=python3'] if path: venv_path = path @@ -151,4 +153,4 @@ def pip_create_virtualenv(path=None): venv_path = os.path.join(charm_dir(), 'venv') if not os.path.exists(venv_path): - subprocess.check_call(['virtualenv', venv_path]) + subprocess.check_call(['virtualenv', venv_path] + extra_flags) diff --git a/ceph-proxy/charmhelpers/fetch/snap.py b/ceph-proxy/charmhelpers/fetch/snap.py index fc70aa94..36d6bce9 100644 --- a/ceph-proxy/charmhelpers/fetch/snap.py +++ b/ceph-proxy/charmhelpers/fetch/snap.py @@ -1,4 +1,4 @@ -# Copyright 2014-2017 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -65,7 +65,7 @@ def _snap_exec(commands): retry_count += + 1 if retry_count > SNAP_NO_LOCK_RETRY_COUNT: raise CouldNotAcquireLockException( - 'Could not aquire lock after {} attempts' + 'Could not acquire lock after {} attempts' .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode log('Snap failed to acquire lock, trying again in {} seconds.' diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py index 812a11a2..6c7cf6fc 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -208,12 +208,79 @@ 'wallaby/proposed': 'focal-proposed/wallaby', 'focal-wallaby/proposed': 'focal-proposed/wallaby', 'focal-proposed/wallaby': 'focal-proposed/wallaby', + # Xena + 'xena': 'focal-updates/xena', + 'focal-xena': 'focal-updates/xena', + 'focal-xena/updates': 'focal-updates/xena', + 'focal-updates/xena': 'focal-updates/xena', + 'xena/proposed': 'focal-proposed/xena', + 'focal-xena/proposed': 'focal-proposed/xena', + 'focal-proposed/xena': 'focal-proposed/xena', + # Yoga + 'yoga': 'focal-updates/yoga', + 'focal-yoga': 'focal-updates/yoga', + 'focal-yoga/updates': 'focal-updates/yoga', + 'focal-updates/yoga': 'focal-updates/yoga', + 'yoga/proposed': 'focal-proposed/yoga', + 'focal-yoga/proposed': 'focal-proposed/yoga', + 'focal-proposed/yoga': 'focal-proposed/yoga', } +OPENSTACK_RELEASES = ( + 'diablo', + 'essex', + 'folsom', + 'grizzly', + 'havana', + 'icehouse', + 'juno', + 'kilo', + 'liberty', + 'mitaka', + 'newton', + 'ocata', + 'pike', + 'queens', + 'rocky', + 'stein', + 'train', + 'ussuri', + 'victoria', + 'wallaby', + 'xena', + 'yoga', +) + + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ('wily', 'liberty'), + ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zesty', 'ocata'), + ('artful', 'pike'), + ('bionic', 'queens'), + ('cosmic', 'rocky'), + ('disco', 'stein'), + ('eoan', 'train'), + ('focal', 'ussuri'), + ('groovy', 'victoria'), + ('hirsute', 'wallaby'), + ('impish', 'xena'), +]) + + APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. -CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times. +CMD_RETRY_COUNT = 10 # Retry a failing fatal command X times. def filter_installed_packages(packages): @@ -246,9 +313,9 @@ def filter_missing_packages(packages): def apt_cache(*_, **__): """Shim returning an object simulating the apt_pkg Cache. - :param _: Accept arguments for compability, not used. + :param _: Accept arguments for compatibility, not used. :type _: any - :param __: Accept keyword arguments for compability, not used. + :param __: Accept keyword arguments for compatibility, not used. :type __: any :returns:Object used to interrogate the system apt and dpkg databases. :rtype:ubuntu_apt_pkg.Cache @@ -283,7 +350,7 @@ def apt_install(packages, options=None, fatal=False, quiet=False): :param fatal: Whether the command's output should be checked and retried. :type fatal: bool - :param quiet: if True (default), supress log message to stdout/stderr + :param quiet: if True (default), suppress log message to stdout/stderr :type quiet: bool :raises: subprocess.CalledProcessError """ @@ -397,7 +464,7 @@ def import_key(key): A Radix64 format keyid is also supported for backwards compatibility. In this case Ubuntu keyserver will be queried for a key via HTTPS by its keyid. This method - is less preferrable because https proxy servers may + is less preferable because https proxy servers may require traffic decryption which is equivalent to a man-in-the-middle attack (a proxy server impersonates keyserver TLS certificates and has to be explicitly @@ -574,6 +641,10 @@ def add_source(source, key=None, fail_invalid=False): with be used. If staging is NOT used then the cloud archive [3] will be added, and the 'ubuntu-cloud-keyring' package will be added for the current distro. + '': translate to cloud: based on the current + distro version (i.e. for 'ussuri' this will either be 'bionic-ussuri' or + 'distro'. + '/proposed': as above, but for proposed. Otherwise the source is not recognised and this is logged to the juju log. However, no error is raised, unless sys_error_on_exit is True. @@ -592,7 +663,7 @@ def add_source(source, key=None, fail_invalid=False): id may also be used, but be aware that only insecure protocols are available to retrieve the actual public key from a public keyserver placing your Juju environment at risk. ppa and cloud archive keys - are securely added automtically, so sould not be provided. + are securely added automatically, so should not be provided. @param fail_invalid: (boolean) if True, then the function raises a SourceConfigError is there is no matching installation source. @@ -600,6 +671,12 @@ def add_source(source, key=None, fail_invalid=False): @raises SourceConfigError() if for cloud:, the is not a valid pocket in CLOUD_ARCHIVE_POCKETS """ + # extract the OpenStack versions from the CLOUD_ARCHIVE_POCKETS; can't use + # the list in contrib.openstack.utils as it might not be included in + # classic charms and would break everything. Having OpenStack specific + # code in this file is a bit of an antipattern, anyway. + os_versions_regex = "({})".format("|".join(OPENSTACK_RELEASES)) + _mapping = OrderedDict([ (r"^distro$", lambda: None), # This is a NOP (r"^(?:proposed|distro-proposed)$", _add_proposed), @@ -609,6 +686,9 @@ def add_source(source, key=None, fail_invalid=False): (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), (r"^cloud:(.*)$", _add_cloud_pocket), (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), + (r"^{}\/proposed$".format(os_versions_regex), + _add_bare_openstack_proposed), + (r"^{}$".format(os_versions_regex), _add_bare_openstack), ]) if source is None: source = '' @@ -640,7 +720,7 @@ def _add_proposed(): Uses get_distrib_codename to determine the correct stanza for the deb line. - For intel architecutres PROPOSED_POCKET is used for the release, but for + For Intel architectures PROPOSED_POCKET is used for the release, but for other architectures PROPOSED_PORTS_POCKET is used for the release. """ release = get_distrib_codename() @@ -662,7 +742,8 @@ def _add_apt_repository(spec): series = get_distrib_codename() spec = spec.replace('{series}', series) _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https', 'http'])) + cmd_env=env_proxy_settings(['https', 'http', 'no_proxy']) + ) def _add_cloud_pocket(pocket): @@ -738,6 +819,73 @@ def _verify_is_ubuntu_rel(release, os_release): 'version ({})'.format(release, os_release, ubuntu_rel)) +def _add_bare_openstack(openstack_release): + """Add cloud or distro based on the release given. + + The spec given is, say, 'ussuri', but this could apply cloud:bionic-ussuri + or 'distro' depending on whether the ubuntu release is bionic or focal. + + :param openstack_release: the OpenStack codename to determine the release + for. + :type openstack_release: str + :raises: SourceConfigError + """ + # TODO(ajkavanagh) - surely this means we should be removing cloud archives + # if they exist? + __add_bare_helper(openstack_release, "{}-{}", lambda: None) + + +def _add_bare_openstack_proposed(openstack_release): + """Add cloud of distro but with proposed. + + The spec given is, say, 'ussuri' but this could apply + cloud:bionic-ussuri/proposed or 'distro/proposed' depending on whether the + ubuntu release is bionic or focal. + + :param openstack_release: the OpenStack codename to determine the release + for. + :type openstack_release: str + :raises: SourceConfigError + """ + __add_bare_helper(openstack_release, "{}-{}/proposed", _add_proposed) + + +def __add_bare_helper(openstack_release, pocket_format, final_function): + """Helper for _add_bare_openstack[_proposed] + + The bulk of the work between the two functions is exactly the same except + for the pocket format and the function that is run if it's the distro + version. + + :param openstack_release: the OpenStack codename. e.g. ussuri + :type openstack_release: str + :param pocket_format: the pocket formatter string to construct a pocket str + from the openstack_release and the current ubuntu version. + :type pocket_format: str + :param final_function: the function to call if it is the distro version. + :type final_function: Callable + :raises SourceConfigError on error + """ + ubuntu_version = get_distrib_codename() + possible_pocket = pocket_format.format(ubuntu_version, openstack_release) + if possible_pocket in CLOUD_ARCHIVE_POCKETS: + _add_cloud_pocket(possible_pocket) + return + # Otherwise it's almost certainly the distro version; verify that it + # exists. + try: + assert UBUNTU_OPENSTACK_RELEASE[ubuntu_version] == openstack_release + except KeyError: + raise SourceConfigError( + "Invalid ubuntu version {} isn't known to this library" + .format(ubuntu_version)) + except AssertionError: + raise SourceConfigError( + 'Invalid OpenStack release specified: {} for Ubuntu version {}' + .format(openstack_release, ubuntu_version)) + final_function() + + def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_message="", cmd_env=None, quiet=False): """Run a command and retry until success or max_retries is reached. diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py index a2fbe0e5..436e1776 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -1,4 +1,4 @@ -# Copyright 2019 Canonical Ltd +# Copyright 2019-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -209,7 +209,7 @@ def _populate(self): def init(): - """Compability shim that does nothing.""" + """Compatibility shim that does nothing.""" pass @@ -264,7 +264,7 @@ def version_compare(a, b): else: raise RuntimeError('Unable to compare "{}" and "{}", according to ' 'our logic they are neither greater, equal nor ' - 'less than each other.') + 'less than each other.'.format(a, b)) class PkgVersion(): diff --git a/ceph-proxy/charmhelpers/osplatform.py b/ceph-proxy/charmhelpers/osplatform.py index 78c81af5..1ace468f 100644 --- a/ceph-proxy/charmhelpers/osplatform.py +++ b/ceph-proxy/charmhelpers/osplatform.py @@ -28,6 +28,9 @@ def get_platform(): elif "elementary" in current_platform: # ElementaryOS fails to run tests locally without this. return "ubuntu" + elif "Pop!_OS" in current_platform: + # Pop!_OS also fails to run tests locally without this. + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) diff --git a/ceph-proxy/lib/charms_ceph/crush_utils.py b/ceph-proxy/lib/charms_ceph/crush_utils.py index 8fe09fa4..37084bf1 100644 --- a/ceph-proxy/lib/charms_ceph/crush_utils.py +++ b/ceph-proxy/lib/charms_ceph/crush_utils.py @@ -79,9 +79,9 @@ def load_crushmap(self): stdin=crush.stdout) .decode('UTF-8')) except CalledProcessError as e: - log("Error occured while loading and decompiling CRUSH map:" + log("Error occurred while loading and decompiling CRUSH map:" "{}".format(e), ERROR) - raise "Failed to read CRUSH map" + raise def ensure_bucket_is_present(self, bucket_name): if bucket_name not in [bucket.name for bucket in self.buckets()]: @@ -111,7 +111,7 @@ def save(self): return ceph_output except CalledProcessError as e: log("save error: {}".format(e)) - raise "Failed to save CRUSH map." + raise def build_crushmap(self): """Modifies the current CRUSH map to include the new buckets""" diff --git a/ceph-proxy/lib/charms_ceph/utils.py b/ceph-proxy/lib/charms_ceph/utils.py index e5c38793..9b7299dd 100644 --- a/ceph-proxy/lib/charms_ceph/utils.py +++ b/ceph-proxy/lib/charms_ceph/utils.py @@ -14,6 +14,7 @@ import collections import glob +import itertools import json import os import pyudev @@ -24,6 +25,7 @@ import sys import time import uuid +import functools from contextlib import contextmanager from datetime import datetime @@ -501,30 +503,33 @@ def ceph_user(): class CrushLocation(object): - def __init__(self, - name, - identifier, - host, - rack, - row, - datacenter, - chassis, - root): - self.name = name + def __init__(self, identifier, name, osd="", host="", chassis="", + rack="", row="", pdu="", pod="", room="", + datacenter="", zone="", region="", root=""): self.identifier = identifier + self.name = name + self.osd = osd self.host = host + self.chassis = chassis self.rack = rack self.row = row + self.pdu = pdu + self.pod = pod + self.room = room self.datacenter = datacenter - self.chassis = chassis + self.zone = zone + self.region = region self.root = root def __str__(self): - return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ - "chassis :{} root: {}".format(self.name, self.identifier, - self.host, self.rack, self.row, - self.datacenter, self.chassis, - self.root) + return "name: {} id: {} osd: {} host: {} chassis: {} rack: {} " \ + "row: {} pdu: {} pod: {} room: {} datacenter: {} zone: {} " \ + "region: {} root: {}".format(self.name, self.identifier, + self.osd, self.host, self.chassis, + self.rack, self.row, self.pdu, + self.pod, self.room, + self.datacenter, self.zone, + self.region, self.root) def __eq__(self, other): return not self.name < other.name and not other.name < self.name @@ -571,10 +576,53 @@ def get_osd_weight(osd_id): raise +def _filter_nodes_and_set_attributes(node, node_lookup_map, lookup_type): + """Get all nodes of the desired type, with all their attributes. + + These attributes can be direct or inherited from ancestors. + """ + attribute_dict = {node['type']: node['name']} + if node['type'] == lookup_type: + attribute_dict['name'] = node['name'] + attribute_dict['identifier'] = node['id'] + return [attribute_dict] + elif not node.get('children'): + return [attribute_dict] + else: + descendant_attribute_dicts = [ + _filter_nodes_and_set_attributes(node_lookup_map[node_id], + node_lookup_map, lookup_type) + for node_id in node.get('children', []) + ] + return [dict(attribute_dict, **descendant_attribute_dict) + for descendant_attribute_dict + in itertools.chain.from_iterable(descendant_attribute_dicts)] + + +def _flatten_roots(nodes, lookup_type='host'): + """Get a flattened list of nodes of the desired type. + + :param nodes: list of nodes defined as a dictionary of attributes and + children + :type nodes: List[Dict[int, Any]] + :param lookup_type: type of searched node + :type lookup_type: str + :returns: flattened list of nodes + :rtype: List[Dict[str, Any]] + """ + lookup_map = {node['id']: node for node in nodes} + root_attributes_dicts = [_filter_nodes_and_set_attributes(node, lookup_map, + lookup_type) + for node in nodes if node['type'] == 'root'] + # get a flattened list of roots. + return list(itertools.chain.from_iterable(root_attributes_dicts)) + + def get_osd_tree(service): """Returns the current osd map in JSON. :returns: List. + :rtype: List[CrushLocation] :raises: ValueError if the monmap fails to parse. Also raises CalledProcessError if our ceph command fails """ @@ -585,35 +633,14 @@ def get_osd_tree(service): .decode('UTF-8')) try: json_tree = json.loads(tree) - crush_list = [] - # Make sure children are present in the json - if not json_tree['nodes']: - return None - host_nodes = [ - node for node in json_tree['nodes'] - if node['type'] == 'host' - ] - for host in host_nodes: - crush_list.append( - CrushLocation( - name=host.get('name'), - identifier=host['id'], - host=host.get('host'), - rack=host.get('rack'), - row=host.get('row'), - datacenter=host.get('datacenter'), - chassis=host.get('chassis'), - root=host.get('root') - ) - ) - return crush_list + roots = _flatten_roots(json_tree["nodes"]) + return [CrushLocation(**host) for host in roots] except ValueError as v: log("Unable to parse ceph tree json: {}. Error: {}".format( tree, v)) raise except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( - e)) + log("ceph osd tree command failed with message: {}".format(e)) raise @@ -669,7 +696,9 @@ def get_local_osd_ids(): dirs = os.listdir(osd_path) for osd_dir in dirs: osd_id = osd_dir.split('-')[1] - if _is_int(osd_id): + if (_is_int(osd_id) and + filesystem_mounted(os.path.join( + os.sep, osd_path, osd_dir))): osd_ids.append(osd_id) except OSError: raise @@ -3271,13 +3300,14 @@ def determine_packages(): def determine_packages_to_remove(): """Determines packages for removal + Note: if in a container, then the CHRONY_PACKAGE is removed. + :returns: list of packages to be removed + :rtype: List[str] """ rm_packages = REMOVE_PACKAGES.copy() if is_container(): - install_list = filter_missing_packages(CHRONY_PACKAGE) - if not install_list: - rm_packages.append(CHRONY_PACKAGE) + rm_packages.extend(filter_missing_packages([CHRONY_PACKAGE])) return rm_packages @@ -3376,3 +3406,132 @@ def _get_cli_key(key): level=ERROR) raise OSDConfigSetError return True + + +def enabled_manager_modules(): + """Return a list of enabled manager modules. + + :rtype: List[str] + """ + cmd = ['ceph', 'mgr', 'module', 'ls'] + try: + modules = subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError as e: + log("Failed to list ceph modules: {}".format(e), WARNING) + return [] + modules = json.loads(modules) + return modules['enabled_modules'] + + +def is_mgr_module_enabled(module): + """Is a given manager module enabled. + + :param module: + :type module: str + :returns: Whether the named module is enabled + :rtype: bool + """ + return module in enabled_manager_modules() + + +is_dashboard_enabled = functools.partial(is_mgr_module_enabled, 'dashboard') + + +def mgr_enable_module(module): + """Enable a Ceph Manager Module. + + :param module: The module name to enable + :type module: str + + :raises: subprocess.CalledProcessError + """ + if not is_mgr_module_enabled(module): + subprocess.check_call(['ceph', 'mgr', 'module', 'enable', module]) + return True + return False + + +mgr_enable_dashboard = functools.partial(mgr_enable_module, 'dashboard') + + +def mgr_disable_module(module): + """Enable a Ceph Manager Module. + + :param module: The module name to enable + :type module: str + + :raises: subprocess.CalledProcessError + """ + if is_mgr_module_enabled(module): + subprocess.check_call(['ceph', 'mgr', 'module', 'disable', module]) + return True + return False + + +mgr_disable_dashboard = functools.partial(mgr_disable_module, 'dashboard') + + +def ceph_config_set(name, value, who): + """Set a ceph config option + + :param name: key to set + :type name: str + :param value: value corresponding to key + :type value: str + :param who: Config area the key is associated with (e.g. 'dashboard') + :type who: str + + :raises: subprocess.CalledProcessError + """ + subprocess.check_call(['ceph', 'config', 'set', who, name, value]) + + +mgr_config_set = functools.partial(ceph_config_set, who='mgr') + + +def ceph_config_get(name, who): + """Retrieve the value of a ceph config option + + :param name: key to lookup + :type name: str + :param who: Config area the key is associated with (e.g. 'dashboard') + :type who: str + :returns: Value associated with key + :rtype: str + :raises: subprocess.CalledProcessError + """ + return subprocess.check_output( + ['ceph', 'config', 'get', who, name]).decode('UTF-8') + + +mgr_config_get = functools.partial(ceph_config_get, who='mgr') + + +def _dashboard_set_ssl_artifact(path, artifact_name, hostname=None): + """Set SSL dashboard config option. + + :param path: Path to file + :type path: str + :param artifact_name: Option name for setting the artifact + :type artifact_name: str + :param hostname: If hostname is set artifact will only be associated with + the dashboard on that host. + :type hostname: str + :raises: subprocess.CalledProcessError + """ + cmd = ['ceph', 'dashboard', artifact_name] + if hostname: + cmd.append(hostname) + cmd.extend(['-i', path]) + log(cmd, level=DEBUG) + subprocess.check_call(cmd) + + +dashboard_set_ssl_certificate = functools.partial( + _dashboard_set_ssl_artifact, + artifact_name='set-ssl-certificate') + + +dashboard_set_ssl_certificate_key = functools.partial( + _dashboard_set_ssl_artifact, + artifact_name='set-ssl-certificate-key') diff --git a/ceph-proxy/osci.yaml b/ceph-proxy/osci.yaml index 44772406..b70ac138 100644 --- a/ceph-proxy/osci.yaml +++ b/ceph-proxy/osci.yaml @@ -13,10 +13,19 @@ - focal-victoria-ec - focal-wallaby - focal-wallaby-ec + - focal-xena: + voting: false + - focal-wallaby-ec: + voting: false - groovy-victoria - groovy-victoria-ec - hirsute-wallaby - hirsute-wallaby-ec + - impish-xena: + voting: false + - impish-xena-ec: + voting: false + - hirsute-wallaby-ec - job: name: focal-ussuri-ec parent: func-target @@ -48,3 +57,9 @@ dependencies: *smoke-jobs vars: tox_extra_args: erasure-coded:hirsute-wallaby-ec +- job: + name: impish-xena-ec + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: erasure-coded:impish-xena-ec diff --git a/ceph-proxy/pip.sh b/ceph-proxy/pip.sh new file mode 100755 index 00000000..9a7e6b09 --- /dev/null +++ b/ceph-proxy/pip.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# setuptools 58.0 dropped the support for use_2to3=true which is needed to +# install blessings (an indirect dependency of charm-tools). +# +# More details on the beahvior of tox and virtualenv creation can be found at +# https://github.com/tox-dev/tox/issues/448 +# +# This script is wrapper to force the use of the pinned versions early in the +# process when the virtualenv was created and upgraded before installing the +# depedencies declared in the target. +pip install 'pip<20.3' 'setuptools<50.0.0' +pip "$@" diff --git a/ceph-proxy/tests/bundles/focal-xena-ec.yaml b/ceph-proxy/tests/bundles/focal-xena-ec.yaml new file mode 100644 index 00000000..f53e21e8 --- /dev/null +++ b/ceph-proxy/tests/bundles/focal-xena-ec.yaml @@ -0,0 +1,215 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-xena + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + - '16' + - '17' + - '18' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: lrc + ec-profile-locality: 3 + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: jerasure + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: isa + libvirt-image-backend: rbd + to: + - '15' + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:ceph' + - 'ceph-proxy:client' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/focal-xena.yaml b/ceph-proxy/tests/bundles/focal-xena.yaml new file mode 100644 index 00000000..225a9489 --- /dev/null +++ b/ceph-proxy/tests/bundles/focal-xena.yaml @@ -0,0 +1,186 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-xena + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/impish-xena-ec.yaml b/ceph-proxy/tests/bundles/impish-xena-ec.yaml new file mode 100644 index 00000000..ad864ab9 --- /dev/null +++ b/ceph-proxy/tests/bundles/impish-xena-ec.yaml @@ -0,0 +1,215 @@ +variables: + openstack-origin: &openstack-origin distro + +series: impish + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + - '16' + - '17' + - '18' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: lrc + ec-profile-locality: 3 + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: jerasure + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: isa + libvirt-image-backend: rbd + to: + - '15' + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:ceph' + - 'ceph-proxy:client' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/impish-xena.yaml b/ceph-proxy/tests/bundles/impish-xena.yaml new file mode 100644 index 00000000..56508086 --- /dev/null +++ b/ceph-proxy/tests/bundles/impish-xena.yaml @@ -0,0 +1,186 @@ +variables: + openstack-origin: &openstack-origin distro + +series: impish + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 5187892e..a2220a87 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -23,6 +23,8 @@ gate_bundles: - erasure-coded: focal-victoria-ec - focal-wallaby - erasure-coded: focal-wallaby-ec + - focal-xena + - erasure-coded: focal-xena-ec - groovy-victoria - erasure-coded: groovy-victoria-ec @@ -38,6 +40,8 @@ dev_bundles: - bionic-rocky # mimic - hirsute-wallaby - erasure-coded: hirsute-wallaby-ec + - impish-xena + - erasure-coded: impish-xena-ec smoke_bundles: - focal-ussuri @@ -69,3 +73,5 @@ tests_options: force_deploy: - hirsute-wallaby - hirsute-wallaby-ec + - impish-xena + - impish-xena-ec diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 9ba3f9fe..ba4fd5b6 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -22,19 +22,22 @@ skip_missing_interpreters = False # * It is also necessary to pin virtualenv as a newer virtualenv would still # lead to fetching the latest pip in the func* tox targets, see # https://stackoverflow.com/a/38133283 -requires = pip < 20.3 - virtualenv < 20.0 +requires = + pip < 20.3 + virtualenv < 20.0 + setuptools < 50.0.0 + # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci -minversion = 3.2.0 +minversion = 3.18.0 [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} install_command = - pip install {opts} {packages} + {toxinidir}/pip.sh install {opts} {packages} commands = stestr run --slowest {posargs} -whitelist_externals = juju +allowlist_externals = juju passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt From cb7fafc9c18f537a2f2ad575334d856d4cce806a Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 21 Sep 2021 14:18:45 +0100 Subject: [PATCH 2277/2699] Add xena bundles - add non-voting focal-xena bundle - add non-voting impish-xena bundle - rebuild to pick up charm-helpers changes - update tox/pip.sh to ensure setuptools<50.0.0 Change-Id: Iaaef29dfd8e682121dc0256e5cf3d97293cf84b3 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 5 +- .../contrib/charmsupport/volumes.py | 4 +- .../charmhelpers/contrib/hahelpers/cluster.py | 6 +- .../hardening/host/templates/login.defs | 4 +- .../charmhelpers/contrib/hardening/utils.py | 4 +- .../hooks/charmhelpers/contrib/network/ip.py | 4 +- .../contrib/openstack/amulet/__init__.py | 13 - .../contrib/openstack/amulet/deployment.py | 387 ---- .../contrib/openstack/amulet/utils.py | 1595 ----------------- .../contrib/openstack/cert_utils.py | 12 +- .../charmhelpers/contrib/openstack/context.py | 78 +- .../contrib/openstack/deferred_events.py | 4 +- .../openstack/files/policy_rc_d_script.py | 2 +- .../charmhelpers/contrib/openstack/neutron.py | 6 +- .../charmhelpers/contrib/openstack/policyd.py | 6 +- .../charmhelpers/contrib/openstack/utils.py | 71 +- .../contrib/openstack/vaultlocker.py | 4 +- .../contrib/storage/linux/ceph.py | 15 +- .../charmhelpers/contrib/storage/linux/lvm.py | 4 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 11 +- ceph-osd/hooks/charmhelpers/core/host.py | 12 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-osd/hooks/charmhelpers/core/strutils.py | 9 +- ceph-osd/hooks/charmhelpers/core/unitdata.py | 6 +- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 6 +- .../charmhelpers/fetch/python/packages.py | 10 +- ceph-osd/hooks/charmhelpers/fetch/snap.py | 4 +- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 166 +- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 6 +- ceph-osd/hooks/charmhelpers/osplatform.py | 3 + ceph-osd/osci.yaml | 6 + ceph-osd/pip.sh | 18 + ceph-osd/tests/bundles/focal-xena.yaml | 222 +++ ceph-osd/tests/bundles/impish-xena.yaml | 222 +++ ceph-osd/tests/tests.yaml | 6 +- ceph-osd/tox.ini | 13 +- 36 files changed, 794 insertions(+), 2151 deletions(-) delete mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/__init__.py delete mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py delete mode 100644 ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py create mode 100755 ceph-osd/pip.sh create mode 100644 ceph-osd/tests/bundles/focal-xena.yaml create mode 100644 ceph-osd/tests/bundles/impish-xena.yaml diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index e4cb06bc..8d1753c3 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2012-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,6 @@ # limitations under the License. """Compatibility with the nrpe-external-master charm""" -# Copyright 2012 Canonical Ltd. # # Authors: # Matthew Wedgwood @@ -511,7 +510,7 @@ def add_haproxy_checks(nrpe, unit_name): def remove_deprecated_check(nrpe, deprecated_services): """ - Remove checks fro deprecated services in list + Remove checks for deprecated services in list :param nrpe: NRPE object to remove check from :type nrpe: NRPE diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py index 7ea43f08..f7c6fbdc 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ type: boolean default: true description: > - If false, a volume is mounted as sepecified in "volume-map" + If false, a volume is mounted as specified in "volume-map" If true, ephemeral storage will be used, meaning that log data will only exist as long as the machine. YOU HAVE BEEN WARNED. volume-map: diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py index ba34fba0..f0b629a2 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -86,7 +86,7 @@ def is_elected_leader(resource): 2. If the charm is part of a corosync cluster, call corosync to determine leadership. 3. If the charm is not part of a corosync cluster, the leader is - determined as being "the alive unit with the lowest unit numer". In + determined as being "the alive unit with the lowest unit number". In other words, the oldest surviving unit. """ try: @@ -418,7 +418,7 @@ def get_managed_services_and_ports(services, external_ports, Return only the services and corresponding ports that are managed by this charm. This excludes haproxy when there is a relation with hacluster. This - is because this charm passes responsability for stopping and starting + is because this charm passes responsibility for stopping and starting haproxy to hacluster. Similarly, if a relation with hacluster exists then the ports returned by diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/login.defs b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/login.defs index db137d6d..7d107637 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/login.defs +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/templates/login.defs @@ -187,7 +187,7 @@ SYS_GID_MAX {{ sys_gid_max }} # # Max number of login retries if password is bad. This will most likely be -# overriden by PAM, since the default pam_unix module has it's own built +# overridden by PAM, since the default pam_unix module has it's own built # in of 3 retries. However, this is a safe fallback in case you are using # an authentication module that does not enforce PAM_MAXTRIES. # @@ -235,7 +235,7 @@ USERGROUPS_ENAB yes # # Instead of the real user shell, the program specified by this parameter # will be launched, although its visible name (argv[0]) will be the shell's. -# The program may do whatever it wants (logging, additional authentification, +# The program may do whatever it wants (logging, additional authentication, # banner, ...) before running the actual shell. # # FAKE_SHELL /bin/fakeshell diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py index ff7485c2..56afa4b6 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py @@ -1,4 +1,4 @@ -# Copyright 2016 Canonical Limited. +# Copyright 2016-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -85,7 +85,7 @@ def _get_user_provided_overrides(modules): def _apply_overrides(settings, overrides, schema): - """Get overrides config overlayed onto modules defaults. + """Get overrides config overlaid onto modules defaults. :param modules: require stack modules config. :returns: dictionary of modules config with user overrides applied. diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index 63e91cca..b356d64c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -578,7 +578,7 @@ def get_relation_ip(interface, cidr_network=None): @returns IPv6 or IPv4 address """ # Select the interface address first - # For possible use as a fallback bellow with get_address_in_network + # For possible use as a fallback below with get_address_in_network try: # Get the interface specific IP address = network_get_primary_address(interface) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py deleted file mode 100644 index 94ca079c..00000000 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ /dev/null @@ -1,387 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import re -import sys -import six -from collections import OrderedDict -from charmhelpers.contrib.amulet.deployment import ( - AmuletDeployment -) -from charmhelpers.contrib.openstack.amulet.utils import ( - OPENSTACK_RELEASES_PAIRS -) - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - - -class OpenStackAmuletDeployment(AmuletDeployment): - """OpenStack amulet deployment. - - This class inherits from AmuletDeployment and has additional support - that is specifically for use by OpenStack charms. - """ - - def __init__(self, series=None, openstack=None, source=None, - stable=True, log_level=DEBUG): - """Initialize the deployment environment.""" - super(OpenStackAmuletDeployment, self).__init__(series) - self.log = self.get_logger(level=log_level) - self.log.info('OpenStackAmuletDeployment: init') - self.openstack = openstack - self.source = source - self.stable = stable - - def get_logger(self, name="deployment-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def _determine_branch_locations(self, other_services): - """Determine the branch locations for the other services. - - Determine if the local branch being tested is derived from its - stable or next (dev) branch, and based on this, use the corresonding - stable or next branches for the other_services.""" - - self.log.info('OpenStackAmuletDeployment: determine branch locations') - - # Charms outside the ~openstack-charmers - base_charms = { - 'mysql': ['trusty'], - 'mongodb': ['trusty'], - 'nrpe': ['trusty', 'xenial'], - } - - for svc in other_services: - # If a location has been explicitly set, use it - if svc.get('location'): - continue - if svc['name'] in base_charms: - # NOTE: not all charms have support for all series we - # want/need to test against, so fix to most recent - # that each base charm supports - target_series = self.series - if self.series not in base_charms[svc['name']]: - target_series = base_charms[svc['name']][-1] - svc['location'] = 'cs:{}/{}'.format(target_series, - svc['name']) - elif self.stable: - svc['location'] = 'cs:{}/{}'.format(self.series, - svc['name']) - else: - svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( - self.series, - svc['name'] - ) - - return other_services - - def _add_services(self, this_service, other_services, use_source=None, - no_origin=None): - """Add services to the deployment and optionally set - openstack-origin/source. - - :param this_service dict: Service dictionary describing the service - whose amulet tests are being run - :param other_services dict: List of service dictionaries describing - the services needed to support the target - service - :param use_source list: List of services which use the 'source' config - option rather than 'openstack-origin' - :param no_origin list: List of services which do not support setting - the Cloud Archive. - Service Dict: - { - 'name': str charm-name, - 'units': int number of units, - 'constraints': dict of juju constraints, - 'location': str location of charm, - } - eg - this_service = { - 'name': 'openvswitch-odl', - 'constraints': {'mem': '8G'}, - } - other_services = [ - { - 'name': 'nova-compute', - 'units': 2, - 'constraints': {'mem': '4G'}, - 'location': cs:~bob/xenial/nova-compute - }, - { - 'name': 'mysql', - 'constraints': {'mem': '2G'}, - }, - {'neutron-api-odl'}] - use_source = ['mysql'] - no_origin = ['neutron-api-odl'] - """ - self.log.info('OpenStackAmuletDeployment: adding services') - - other_services = self._determine_branch_locations(other_services) - - super(OpenStackAmuletDeployment, self)._add_services(this_service, - other_services) - - services = other_services - services.append(this_service) - - use_source = use_source or [] - no_origin = no_origin or [] - - # Charms which should use the source config option - use_source = list(set( - use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy', 'percona-cluster', 'lxd'])) - - # Charms which can not use openstack-origin, ie. many subordinates - no_origin = list(set( - no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', - 'nrpe', 'openvswitch-odl', 'neutron-api-odl', - 'odl-controller', 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt', - 'ceilometer-agent'])) - - if self.openstack: - for svc in services: - if svc['name'] not in use_source + no_origin: - config = {'openstack-origin': self.openstack} - self.d.configure(svc['name'], config) - - if self.source: - for svc in services: - if svc['name'] in use_source and svc['name'] not in no_origin: - config = {'source': self.source} - self.d.configure(svc['name'], config) - - def _configure_services(self, configs): - """Configure all of the services.""" - self.log.info('OpenStackAmuletDeployment: configure services') - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=None): - """Wait for all units to have a specific extended status, except - for any defined as excluded. Unless specified via message, any - status containing any case of 'ready' will be considered a match. - - Examples of message usage: - - Wait for all unit status to CONTAIN any case of 'ready' or 'ok': - message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) - - Wait for all units to reach this status (exact match): - message = re.compile('^Unit is ready and clustered$') - - Wait for all units to reach any one of these (exact match): - message = re.compile('Unit is ready|OK|Ready') - - Wait for at least one unit to reach this status (exact match): - message = {'ready'} - - See Amulet's sentry.wait_for_messages() for message usage detail. - https://github.com/juju/amulet/blob/master/amulet/sentry.py - - :param message: Expected status match - :param exclude_services: List of juju service names to ignore, - not to be used in conjuction with include_only. - :param include_only: List of juju service names to exclusively check, - not to be used in conjuction with exclude_services. - :param timeout: Maximum time in seconds to wait for status match - :returns: None. Raises if timeout is hit. - """ - if not timeout: - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) - self.log.info('Waiting for extended status on units for {}s...' - ''.format(timeout)) - - all_services = self.d.services.keys() - - if exclude_services and include_only: - raise ValueError('exclude_services can not be used ' - 'with include_only') - - if message: - if isinstance(message, re._pattern_type): - match = message.pattern - else: - match = message - - self.log.debug('Custom extended status wait match: ' - '{}'.format(match)) - else: - self.log.debug('Default extended status wait match: contains ' - 'READY (case-insensitive)') - message = re.compile('.*ready.*', re.IGNORECASE) - - if exclude_services: - self.log.debug('Excluding services from extended status match: ' - '{}'.format(exclude_services)) - else: - exclude_services = [] - - if include_only: - services = include_only - else: - services = list(set(all_services) - set(exclude_services)) - - self.log.debug('Waiting up to {}s for extended status on services: ' - '{}'.format(timeout, services)) - service_messages = {service: message for service in services} - - # Check for idleness - self.d.sentry.wait(timeout=timeout) - # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) - # Check for ready messages - self.d.sentry.wait_for_messages(service_messages, timeout=timeout) - - self.log.info('OK') - - def _get_openstack_release(self): - """Get openstack release. - - Return an integer representing the enum value of the openstack - release. - """ - # Must be ordered by OpenStack release (not by Ubuntu release): - for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): - setattr(self, os_pair, i) - - releases = { - ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, - ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, - ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('xenial', None): self.xenial_mitaka, - ('xenial', 'cloud:xenial-newton'): self.xenial_newton, - ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, - ('xenial', 'cloud:xenial-pike'): self.xenial_pike, - ('xenial', 'cloud:xenial-queens'): self.xenial_queens, - ('yakkety', None): self.yakkety_newton, - ('zesty', None): self.zesty_ocata, - ('artful', None): self.artful_pike, - ('bionic', None): self.bionic_queens, - ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, - ('bionic', 'cloud:bionic-stein'): self.bionic_stein, - ('bionic', 'cloud:bionic-train'): self.bionic_train, - ('bionic', 'cloud:bionic-ussuri'): self.bionic_ussuri, - ('cosmic', None): self.cosmic_rocky, - ('disco', None): self.disco_stein, - ('eoan', None): self.eoan_train, - ('focal', None): self.focal_ussuri, - ('focal', 'cloud:focal-victoria'): self.focal_victoria, - ('groovy', None): self.groovy_victoria, - } - return releases[(self.series, self.openstack)] - - def _get_openstack_release_string(self): - """Get openstack release string. - - Return a string representing the openstack release. - """ - releases = OrderedDict([ - ('trusty', 'icehouse'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ('disco', 'stein'), - ('eoan', 'train'), - ('focal', 'ussuri'), - ('groovy', 'victoria'), - ]) - if self.openstack: - os_origin = self.openstack.split(':')[1] - return os_origin.split('%s-' % self.series)[1].split('/')[0] - else: - return releases[self.series] - - def get_percona_service_entry(self, memory_constraint=None): - """Return a amulet service entry for percona cluster. - - :param memory_constraint: Override the default memory constraint - in the service entry. - :type memory_constraint: str - :returns: Amulet service entry. - :rtype: dict - """ - memory_constraint = memory_constraint or '3072M' - svc_entry = { - 'name': 'percona-cluster', - 'constraints': {'mem': memory_constraint}} - if self._get_openstack_release() <= self.trusty_mitaka: - svc_entry['location'] = 'cs:trusty/percona-cluster' - return svc_entry - - def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools in a ceph + cinder + glance - test scenario, based on OpenStack release and whether ceph radosgw - is flagged as present or not.""" - - if self._get_openstack_release() == self.trusty_icehouse: - # Icehouse - pools = [ - 'data', - 'metadata', - 'rbd', - 'cinder-ceph', - 'glance' - ] - elif (self.trusty_kilo <= self._get_openstack_release() <= - self.zesty_ocata): - # Kilo through Ocata - pools = [ - 'rbd', - 'cinder-ceph', - 'glance' - ] - else: - # Pike and later - pools = [ - 'cinder-ceph', - 'glance' - ] - - if radosgw: - pools.extend([ - '.rgw.root', - '.rgw.control', - '.rgw', - '.rgw.gc', - '.users.uid' - ]) - - return pools diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py deleted file mode 100644 index 0a14af7e..00000000 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ /dev/null @@ -1,1595 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import json -import logging -import os -import re -import six -import time -import urllib -import urlparse - -import cinderclient.v1.client as cinder_client -import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1 as glance_client -import glanceclient.v2 as glance_clientv2 -import heatclient.v1.client as heat_client -from keystoneclient.v2_0 import client as keystone_client -from keystoneauth1.identity import ( - v3, - v2, -) -from keystoneauth1 import session as keystone_session -from keystoneclient.v3 import client as keystone_client_v3 -from novaclient import exceptions - -import novaclient.client as nova_client -import novaclient -import pika -import swiftclient - -from charmhelpers.core.decorators import retry_on_exception - -from charmhelpers.contrib.amulet.utils import ( - AmuletUtils -) -from charmhelpers.core.host import CompareHostReleases - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - -NOVA_CLIENT_VERSION = "2" - -OPENSTACK_RELEASES_PAIRS = [ - 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', - 'trusty_mitaka', 'xenial_mitaka', - 'xenial_newton', 'yakkety_newton', - 'xenial_ocata', 'zesty_ocata', - 'xenial_pike', 'artful_pike', - 'xenial_queens', 'bionic_queens', - 'bionic_rocky', 'cosmic_rocky', - 'bionic_stein', 'disco_stein', - 'bionic_train', 'eoan_train', - 'bionic_ussuri', 'focal_ussuri', - 'focal_victoria', 'groovy_victoria', -] - - -class OpenStackAmuletUtils(AmuletUtils): - """OpenStack amulet utilities. - - This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charm tests. - """ - - def __init__(self, log_level=ERROR): - """Initialize the deployment environment.""" - super(OpenStackAmuletUtils, self).__init__(log_level) - - def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, openstack_release=None): - """Validate endpoint data. Pick the correct validator based on - OpenStack release. Expected data should be in the v2 format: - { - 'id': id, - 'region': region, - 'adminurl': adminurl, - 'internalurl': internalurl, - 'publicurl': publicurl, - 'service_id': service_id} - - """ - validation_function = self.validate_v2_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_endpoint_data - expected = { - 'id': expected['id'], - 'region': expected['region'], - 'region_id': 'RegionOne', - 'url': self.valid_url, - 'interface': self.not_null, - 'service_id': expected['service_id']} - return validation_function(endpoints, admin_port, internal_port, - public_port, expected) - - def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): - """Validate endpoint data. - - Validate actual endpoint data vs expected endpoint data. The ports - are used to find the matching endpoint. - """ - self.log.debug('Validating endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = False - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if (admin_port in ep.adminurl and - internal_port in ep.internalurl and - public_port in ep.publicurl): - found = True - actual = {'id': ep.id, - 'region': ep.region, - 'adminurl': ep.adminurl, - 'internalurl': ep.internalurl, - 'publicurl': ep.publicurl, - 'service_id': ep.service_id} - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if not found: - return 'endpoint not found' - - def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, expected_num_eps=3): - """Validate keystone v3 endpoint data. - - Validate the v3 endpoint data which has changed from v2. The - ports are used to find the matching endpoint. - - The new v3 endpoint data looks like: - - ['}, - region=RegionOne, - region_id=RegionOne, - service_id=17f842a0dc084b928e476fafe67e4095, - url=http://10.5.6.5:9312>, - '}, - region=RegionOne, - region_id=RegionOne, - service_id=72fc8736fb41435e8b3584205bb2cfa3, - url=http://10.5.6.6:35357/v3>, - ... ] - """ - self.log.debug('Validating v3 endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = [] - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if ((admin_port in ep.url and ep.interface == 'admin') or - (internal_port in ep.url and ep.interface == 'internal') or - (public_port in ep.url and ep.interface == 'public')): - found.append(ep.interface) - # note we ignore the links member. - actual = {'id': ep.id, - 'region': ep.region, - 'region_id': ep.region_id, - 'interface': self.not_null, - 'url': ep.url, - 'service_id': ep.service_id, } - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if len(found) != expected_num_eps: - return 'Unexpected number of endpoints found' - - def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): - """Convert v2 endpoint data into v3. - - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - """ - self.log.warn("Endpoint ID and Region ID validation is limited to not " - "null checks after v2 to v3 conversion") - for svc in ep_data.keys(): - assert len(ep_data[svc]) == 1, "Unknown data format" - svc_ep_data = ep_data[svc][0] - ep_data[svc] = [ - { - 'url': svc_ep_data['adminURL'], - 'interface': 'admin', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['publicURL'], - 'interface': 'public', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['internalURL'], - 'interface': 'internal', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}] - return ep_data - - def validate_svc_catalog_endpoint_data(self, expected, actual, - openstack_release=None): - """Validate service catalog endpoint data. Pick the correct validator - for the OpenStack version. Expected data should be in the v2 format: - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - - """ - validation_function = self.validate_v2_svc_catalog_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_svc_catalog_endpoint_data - expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) - return validation_function(expected, actual) - - def validate_v2_svc_catalog_endpoint_data(self, expected, actual): - """Validate service catalog endpoint data. - - Validate a list of actual service catalog endpoints vs a list of - expected service catalog endpoints. - """ - self.log.debug('Validating service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - ret = self._validate_dict_data(expected[k][0], actual[k][0]) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_v3_svc_catalog_endpoint_data(self, expected, actual): - """Validate the keystone v3 catalog endpoint data. - - Validate a list of dictinaries that make up the keystone v3 service - catalogue. - - It is in the form of: - - - {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:35357/v3'}, - {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}, - {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}], - u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'f629388955bc407f8b11d8b7ca168086', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9312'}]} - - Note, that an added complication is that the order of admin, public, - internal against 'interface' in each region. - - Thus, the function sorts the expected and actual lists using the - interface key as a sort key, prior to the comparison. - """ - self.log.debug('Validating v3 service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - l_expected = sorted(v, key=lambda x: x['interface']) - l_actual = sorted(actual[k], key=lambda x: x['interface']) - if len(l_actual) != len(l_expected): - return ("endpoint {} has differing number of interfaces " - " - expected({}), actual({})" - .format(k, len(l_expected), len(l_actual))) - for i_expected, i_actual in zip(l_expected, l_actual): - self.log.debug("checking interface {}" - .format(i_expected['interface'])) - ret = self._validate_dict_data(i_expected, i_actual) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_tenant_data(self, expected, actual): - """Validate tenant data. - - Validate a list of actual tenant data vs list of expected tenant - data. - """ - self.log.debug('Validating tenant data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'enabled': act.enabled, 'description': act.description, - 'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected tenant data - {}".format(ret) - if not found: - return "tenant {} does not exist".format(e['name']) - return ret - - def validate_role_data(self, expected, actual): - """Validate role data. - - Validate a list of actual role data vs a list of expected role - data. - """ - self.log.debug('Validating role data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected role data - {}".format(ret) - if not found: - return "role {} does not exist".format(e['name']) - return ret - - def validate_user_data(self, expected, actual, api_version=None): - """Validate user data. - - Validate a list of actual user data vs a list of expected user - data. - """ - self.log.debug('Validating user data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - if e['name'] == act.name: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'id': act.id} - if api_version == 3: - a['default_project_id'] = getattr(act, - 'default_project_id', - 'none') - else: - a['tenantId'] = act.tenantId - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected user data - {}".format(ret) - if not found: - return "user {} does not exist".format(e['name']) - return ret - - def validate_flavor_data(self, expected, actual): - """Validate flavor data. - - Validate a list of actual flavors vs a list of expected flavors. - """ - self.log.debug('Validating flavor data...') - self.log.debug('actual: {}'.format(repr(actual))) - act = [a.name for a in actual] - return self._validate_list_data(expected, act) - - def tenant_exists(self, keystone, tenant): - """Return True if tenant exists.""" - self.log.debug('Checking if tenant exists ({})...'.format(tenant)) - return tenant in [t.name for t in keystone.tenants.list()] - - @retry_on_exception(num_retries=5, base_delay=1) - def keystone_wait_for_propagation(self, sentry_relation_pairs, - api_version): - """Iterate over list of sentry and relation tuples and verify that - api_version has the expected value. - - :param sentry_relation_pairs: list of sentry, relation name tuples used - for monitoring propagation of relation - data - :param api_version: api_version to expect in relation data - :returns: None if successful. Raise on error. - """ - for (sentry, relation_name) in sentry_relation_pairs: - rel = sentry.relation('identity-service', - relation_name) - self.log.debug('keystone relation data: {}'.format(rel)) - if rel.get('api_version') != str(api_version): - raise Exception("api_version not propagated through relation" - " data yet ('{}' != '{}')." - "".format(rel.get('api_version'), api_version)) - - def keystone_configure_api_version(self, sentry_relation_pairs, deployment, - api_version): - """Configure preferred-api-version of keystone in deployment and - monitor provided list of relation objects for propagation - before returning to caller. - - :param sentry_relation_pairs: list of sentry, relation tuples used for - monitoring propagation of relation data - :param deployment: deployment to configure - :param api_version: value preferred-api-version will be set to - :returns: None if successful. Raise on error. - """ - self.log.debug("Setting keystone preferred-api-version: '{}'" - "".format(api_version)) - - config = {'preferred-api-version': api_version} - deployment.d.configure('keystone', config) - deployment._auto_wait_for_status() - self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - - def authenticate_cinder_admin(self, keystone, api_version=2): - """Authenticates admin user with cinder.""" - self.log.debug('Authenticating cinder admin...') - _clients = { - 1: cinder_client.Client, - 2: cinder_clientv2.Client} - return _clients[api_version](session=keystone.session) - - def authenticate_keystone(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Authenticate with Keystone""" - self.log.debug('Authenticating with keystone...') - if not api_version: - api_version = 2 - sess, auth = self.get_keystone_session( - keystone_ip=keystone_ip, - username=username, - password=password, - api_version=api_version, - admin_port=admin_port, - user_domain_name=user_domain_name, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name - ) - if api_version == 2: - client = keystone_client.Client(session=sess) - else: - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client - - def get_keystone_session(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Return a keystone session object""" - ep = self.get_keystone_endpoint(keystone_ip, - api_version=api_version, - admin_port=admin_port) - if api_version == 2: - auth = v2.Password( - username=username, - password=password, - tenant_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - else: - auth = v3.Password( - user_domain_name=user_domain_name, - username=username, - password=password, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - return (sess, auth) - - def get_keystone_endpoint(self, keystone_ip, api_version=None, - admin_port=False): - """Return keystone endpoint""" - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if api_version == 2: - ep = base_ep + "/v2.0" - else: - ep = base_ep + "/v3" - return ep - - def get_default_keystone_session(self, keystone_sentry, - openstack_release=None, api_version=2): - """Return a keystone session object and client object assuming standard - default settings - - Example call in amulet tests: - self.keystone_session, self.keystone = u.get_default_keystone_session( - self.keystone_sentry, - openstack_release=self._get_openstack_release()) - - The session can then be used to auth other clients: - neutronclient.Client(session=session) - aodh_client.Client(session=session) - eyc - """ - self.log.debug('Authenticating keystone admin...') - # 11 => xenial_queens - if api_version == 3 or (openstack_release and openstack_release >= 11): - client_class = keystone_client_v3.Client - api_version = 3 - else: - client_class = keystone_client.Client - keystone_ip = keystone_sentry.info['public-address'] - session, auth = self.get_keystone_session( - keystone_ip, - api_version=api_version, - username='admin', - password='openstack', - project_name='admin', - user_domain_name='admin_domain', - project_domain_name='admin_domain') - client = client_class(session=session) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(session) - return session, client - - def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant=None, api_version=None, - keystone_ip=None, user_domain_name=None, - project_domain_name=None, - project_name=None): - """Authenticates admin user with the keystone admin endpoint.""" - self.log.debug('Authenticating keystone admin...') - if not keystone_ip: - keystone_ip = keystone_sentry.info['public-address'] - - # To support backward compatibility usage of this function - if not project_name: - project_name = tenant - if api_version == 3 and not user_domain_name: - user_domain_name = 'admin_domain' - if api_version == 3 and not project_domain_name: - project_domain_name = 'admin_domain' - if api_version == 3 and not project_name: - project_name = 'admin' - - return self.authenticate_keystone( - keystone_ip, user, password, - api_version=api_version, - user_domain_name=user_domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - admin_port=True) - - def authenticate_keystone_user(self, keystone, user, password, tenant): - """Authenticates a regular user with the keystone public endpoint.""" - self.log.debug('Authenticating keystone user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - keystone_ip = urlparse.urlparse(ep).hostname - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant) - - def authenticate_glance_admin(self, keystone, force_v1_client=False): - """Authenticates admin user with glance.""" - self.log.debug('Authenticating glance admin...') - ep = keystone.service_catalog.url_for(service_type='image', - interface='adminURL') - if not force_v1_client and keystone.session: - return glance_clientv2.Client("2", session=keystone.session) - else: - return glance_client.Client(ep, token=keystone.auth_token) - - def authenticate_heat_admin(self, keystone): - """Authenticates the admin user with heat.""" - self.log.debug('Authenticating heat admin...') - ep = keystone.service_catalog.url_for(service_type='orchestration', - interface='publicURL') - if keystone.session: - return heat_client.Client(endpoint=ep, session=keystone.session) - else: - return heat_client.Client(endpoint=ep, token=keystone.auth_token) - - def authenticate_nova_user(self, keystone, user, password, tenant): - """Authenticates a regular user with nova-api.""" - self.log.debug('Authenticating nova user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return nova_client.Client(NOVA_CLIENT_VERSION, - session=keystone.session, - auth_url=ep) - elif novaclient.__version__[0] >= "7": - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, password=password, - project_name=tenant, auth_url=ep) - else: - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) - - def authenticate_swift_user(self, keystone, user, password, tenant): - """Authenticates a regular user with swift api.""" - self.log.debug('Authenticating swift user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return swiftclient.Connection(session=keystone.session) - else: - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') - - def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create the specified flavor.""" - try: - nova.flavors.find(name=name) - except (exceptions.NotFound, exceptions.NoUniqueMatch): - self.log.debug('Creating flavor ({})'.format(name)) - nova.flavors.create(name, ram, vcpus, disk, flavorid, - ephemeral, swap, rxtx_factor, is_public) - - def glance_create_image(self, glance, image_name, image_url, - download_dir='tests', - hypervisor_type=None, - disk_format='qcow2', - architecture='x86_64', - container_format='bare'): - """Download an image and upload it to glance, validate its status - and return an image object pointer. KVM defaults, can override for - LXD. - - :param glance: pointer to authenticated glance api connection - :param image_name: display name for new image - :param image_url: url to retrieve - :param download_dir: directory to store downloaded image file - :param hypervisor_type: glance image hypervisor property - :param disk_format: glance image disk format - :param architecture: glance image architecture property - :param container_format: glance image container format - :returns: glance image pointer - """ - self.log.debug('Creating glance image ({}) from ' - '{}...'.format(image_name, image_url)) - - # Download image - http_proxy = os.getenv('OS_TEST_HTTP_PROXY') - self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - abs_file_name = os.path.join(download_dir, image_name) - if not os.path.exists(abs_file_name): - opener.retrieve(image_url, abs_file_name) - - # Create glance image - glance_properties = { - 'architecture': architecture, - } - if hypervisor_type: - glance_properties['hypervisor_type'] = hypervisor_type - # Create glance image - if float(glance.version) < 2.0: - with open(abs_file_name) as f: - image = glance.images.create( - name=image_name, - is_public=True, - disk_format=disk_format, - container_format=container_format, - properties=glance_properties, - data=f) - else: - image = glance.images.create( - name=image_name, - visibility="public", - disk_format=disk_format, - container_format=container_format) - glance.images.upload(image.id, open(abs_file_name, 'rb')) - glance.images.update(image.id, **glance_properties) - - # Wait for image to reach active status - img_id = image.id - ret = self.resource_reaches_status(glance.images, img_id, - expected_stat='active', - msg='Image status wait') - if not ret: - msg = 'Glance image failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new image - self.log.debug('Validating image attributes...') - val_img_name = glance.images.get(img_id).name - val_img_stat = glance.images.get(img_id).status - val_img_cfmt = glance.images.get(img_id).container_format - val_img_dfmt = glance.images.get(img_id).disk_format - - if float(glance.version) < 2.0: - val_img_pub = glance.images.get(img_id).is_public - else: - val_img_pub = glance.images.get(img_id).visibility == "public" - - msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' - 'container fmt:{} disk fmt:{}'.format( - val_img_name, val_img_pub, img_id, - val_img_stat, val_img_cfmt, val_img_dfmt)) - - if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == container_format \ - and val_img_dfmt == disk_format: - self.log.debug(msg_attr) - else: - msg = ('Image validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return image - - def create_cirros_image(self, glance, image_name, hypervisor_type=None): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection - :param image_name: display name for new image - :param hypervisor_type: glance image hypervisor property - :returns: glance image pointer - """ - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'glance_create_image instead of ' - 'create_cirros_image.') - - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) - - # Get cirros image URL - http_proxy = os.getenv('OS_TEST_HTTP_PROXY') - self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - f.close() - - return self.glance_create_image( - glance, - image_name, - cirros_url, - hypervisor_type=hypervisor_type) - - def delete_image(self, glance, image): - """Delete the specified image.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_image.') - self.log.debug('Deleting glance image ({})...'.format(image)) - return self.delete_resource(glance.images, image, msg='glance image') - - def create_instance(self, nova, image_name, instance_name, flavor): - """Create the specified instance.""" - self.log.debug('Creating instance ' - '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.glance.find_image(image_name) - flavor = nova.flavors.find(name=flavor) - instance = nova.servers.create(name=instance_name, image=image, - flavor=flavor) - - count = 1 - status = instance.status - while status != 'ACTIVE' and count < 60: - time.sleep(3) - instance = nova.servers.get(instance.id) - status = instance.status - self.log.debug('instance status: {}'.format(status)) - count += 1 - - if status != 'ACTIVE': - self.log.error('instance creation timed out') - return None - - return instance - - def delete_instance(self, nova, instance): - """Delete the specified instance.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_instance.') - self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, - msg='nova instance') - - def create_or_get_keypair(self, nova, keypair_name="testkey"): - """Create a new keypair, or return pointer if it already exists.""" - try: - _keypair = nova.keypairs.get(keypair_name) - self.log.debug('Keypair ({}) already exists, ' - 'using it.'.format(keypair_name)) - return _keypair - except Exception: - self.log.debug('Keypair ({}) does not exist, ' - 'creating it.'.format(keypair_name)) - - _keypair = nova.keypairs.create(name=keypair_name) - return _keypair - - def _get_cinder_obj_name(self, cinder_object): - """Retrieve name of cinder object. - - :param cinder_object: cinder snapshot or volume object - :returns: str cinder object name - """ - # v1 objects store name in 'display_name' attr but v2+ use 'name' - try: - return cinder_object.display_name - except AttributeError: - return cinder_object.name - - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, - img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, OR - optionally as a clone of an existing volume, OR optionally - from a snapshot. Wait for the new volume status to reach - the expected status, validate and return a resource pointer. - - :param vol_name: cinder volume display name - :param vol_size: size in gigabytes - :param img_id: optional glance image id - :param src_vol_id: optional source volume id to clone - :param snap_id: optional snapshot id to use - :returns: cinder volume pointer - """ - # Handle parameter input and avoid impossible combinations - if img_id and not src_vol_id and not snap_id: - # Create volume from image - self.log.debug('Creating cinder volume from glance image...') - bootable = 'true' - elif src_vol_id and not img_id and not snap_id: - # Clone an existing volume - self.log.debug('Cloning cinder volume...') - bootable = cinder.volumes.get(src_vol_id).bootable - elif snap_id and not src_vol_id and not img_id: - # Create volume from snapshot - self.log.debug('Creating cinder volume from snapshot...') - snap = cinder.volume_snapshots.find(id=snap_id) - vol_size = snap.size - snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id - bootable = cinder.volumes.get(snap_vol_id).bootable - elif not img_id and not src_vol_id and not snap_id: - # Create volume - self.log.debug('Creating cinder volume...') - bootable = 'false' - else: - # Impossible combination of parameters - msg = ('Invalid method use - name:{} size:{} img_id:{} ' - 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, - img_id, src_vol_id, - snap_id)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create new volume - try: - vol_new = cinder.volumes.create(display_name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except TypeError: - vol_new = cinder.volumes.create(name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except Exception as e: - msg = 'Failed to create volume: {}'.format(e) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Wait for volume to reach available status - ret = self.resource_reaches_status(cinder.volumes, vol_id, - expected_stat="available", - msg="Volume status wait") - if not ret: - msg = 'Cinder volume failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new volume - self.log.debug('Validating volume attributes...') - val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) - val_vol_boot = cinder.volumes.get(vol_id).bootable - val_vol_stat = cinder.volumes.get(vol_id).status - val_vol_size = cinder.volumes.get(vol_id).size - msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' - '{} size:{}'.format(val_vol_name, vol_id, - val_vol_stat, val_vol_boot, - val_vol_size)) - - if val_vol_boot == bootable and val_vol_stat == 'available' \ - and val_vol_name == vol_name and val_vol_size == vol_size: - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return vol_new - - def delete_resource(self, resource, resource_id, - msg="resource", max_wait=120): - """Delete one openstack resource, such as one instance, keypair, - image, volume, stack, etc., and confirm deletion within max wait time. - - :param resource: pointer to os resource type, ex:glance_client.images - :param resource_id: unique name or id for the openstack resource - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, otherwise False - """ - self.log.debug('Deleting OpenStack resource ' - '{} ({})'.format(resource_id, msg)) - num_before = len(list(resource.list())) - resource.delete(resource_id) - - tries = 0 - num_after = len(list(resource.list())) - while num_after != (num_before - 1) and tries < (max_wait / 4): - self.log.debug('{} delete check: ' - '{} [{}:{}] {}'.format(msg, tries, - num_before, - num_after, - resource_id)) - time.sleep(4) - num_after = len(list(resource.list())) - tries += 1 - - self.log.debug('{}: expected, actual count = {}, ' - '{}'.format(msg, num_before - 1, num_after)) - - if num_after == (num_before - 1): - return True - else: - self.log.error('{} delete timed out'.format(msg)) - return False - - def resource_reaches_status(self, resource, resource_id, - expected_stat='available', - msg='resource', max_wait=120): - """Wait for an openstack resources status to reach an - expected status within a specified time. Useful to confirm that - nova instances, cinder vols, snapshots, glance images, heat stacks - and other resources eventually reach the expected status. - - :param resource: pointer to os resource type, ex: heat_client.stacks - :param resource_id: unique id for the openstack resource - :param expected_stat: status to expect resource to reach - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, False if status is not reached - """ - - tries = 0 - resource_stat = resource.get(resource_id).status - while resource_stat != expected_stat and tries < (max_wait / 4): - self.log.debug('{} status check: ' - '{} [{}:{}] {}'.format(msg, tries, - resource_stat, - expected_stat, - resource_id)) - time.sleep(4) - resource_stat = resource.get(resource_id).status - tries += 1 - - self.log.debug('{}: expected, actual status = {}, ' - '{}'.format(msg, resource_stat, expected_stat)) - - if resource_stat == expected_stat: - return True - else: - self.log.debug('{} never reached expected status: ' - '{}'.format(resource_id, expected_stat)) - return False - - def get_ceph_osd_id_cmd(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return ("`initctl list | grep 'ceph-osd ' | " - "awk 'NR=={} {{ print $2 }}' | " - "grep -o '[0-9]*'`".format(index + 1)) - - def get_ceph_pools(self, sentry_unit): - """Return a dict of ceph pools from a single ceph unit, with - pool name as keys, pool id as vals.""" - pools = {} - cmd = 'sudo ceph osd lspools' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # For mimic ceph osd lspools output - output = output.replace("\n", ",") - - # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, - for pool in str(output).split(','): - pool_id_name = pool.split(' ') - if len(pool_id_name) == 2: - pool_id = pool_id_name[0] - pool_name = pool_id_name[1] - pools[pool_name] = int(pool_id) - - self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], - pools)) - return pools - - def get_ceph_df(self, sentry_unit): - """Return dict of ceph df json output, including ceph pool state. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :returns: Dict of ceph df output - """ - cmd = 'sudo ceph df --format=json' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return json.loads(output) - - def get_ceph_pool_sample(self, sentry_unit, pool_id=0): - """Take a sample of attributes of a ceph pool, returning ceph - pool name, object count and disk space used for the specified - pool ID number. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :param pool_id: Ceph pool ID - :returns: List of pool name, object count, kb disk space used - """ - df = self.get_ceph_df(sentry_unit) - for pool in df['pools']: - if pool['id'] == pool_id: - pool_name = pool['name'] - obj_count = pool['stats']['objects'] - kb_used = pool['stats']['kb_used'] - - self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, pool_id, - obj_count, kb_used)) - return pool_name, obj_count, kb_used - - def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): - """Validate ceph pool samples taken over time, such as pool - object counts or pool kb used, before adding, after adding, and - after deleting items which affect those pool attributes. The - 2nd element is expected to be greater than the 1st; 3rd is expected - to be less than the 2nd. - - :param samples: List containing 3 data samples - :param sample_type: String for logging and usage context - :returns: None if successful, Failure message otherwise - """ - original, created, deleted = range(3) - if samples[created] <= samples[original] or \ - samples[deleted] >= samples[created]: - return ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - else: - self.log.debug('Ceph {} samples (OK): ' - '{}'.format(sample_type, samples)) - return None - - # rabbitmq/amqp specific helpers: - - def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): - """Wait for rmq units extended status to show cluster readiness, - after an optional initial sleep period. Initial sleep is likely - necessary to be effective following a config change, as status - message may not instantly update to non-ready.""" - - if init_sleep: - time.sleep(init_sleep) - - message = re.compile('^Unit is ready and clustered$') - deployment._auto_wait_for_status(message=message, - timeout=timeout, - include_only=['rabbitmq-server']) - - def add_rmq_test_user(self, sentry_units, - username="testuser1", password="changeme"): - """Add a test user via the first rmq juju unit, check connection as - the new user against all sentry units. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful. Raise on error. - """ - self.log.debug('Adding rmq user ({})...'.format(username)) - - # Check that user does not already exist - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - if username in output: - self.log.warning('User ({}) already exists, returning ' - 'gracefully.'.format(username)) - return - - perms = '".*" ".*" ".*"' - cmds = ['rabbitmqctl add_user {} {}'.format(username, password), - 'rabbitmqctl set_permissions {} {}'.format(username, perms)] - - # Add user via first unit - for cmd in cmds: - output, _ = self.run_cmd_unit(sentry_units[0], cmd) - - # Check connection against the other sentry_units - self.log.debug('Checking user connect against units...') - for sentry_unit in sentry_units: - connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, - username=username, - password=password) - connection.close() - - def delete_rmq_test_user(self, sentry_units, username="testuser1"): - """Delete a rabbitmq user via the first rmq juju unit. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful or no such user. - """ - self.log.debug('Deleting rmq user ({})...'.format(username)) - - # Check that the user exists - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - - if username not in output: - self.log.warning('User ({}) does not exist, returning ' - 'gracefully.'.format(username)) - return - - # Delete the user - cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) - - def get_rmq_cluster_status(self, sentry_unit): - """Execute rabbitmq cluster status command on a unit and return - the full output. - - :param unit: sentry unit - :returns: String containing console output of cluster status command - """ - cmd = 'rabbitmqctl cluster_status' - output, _ = self.run_cmd_unit(sentry_unit, cmd) - self.log.debug('{} cluster_status:\n{}'.format( - sentry_unit.info['unit_name'], output)) - return str(output) - - def get_rmq_cluster_running_nodes(self, sentry_unit): - """Parse rabbitmqctl cluster_status output string, return list of - running rabbitmq cluster nodes. - - :param unit: sentry unit - :returns: List containing node names of running nodes - """ - # NOTE(beisner): rabbitmqctl cluster_status output is not - # json-parsable, do string chop foo, then json.loads that. - str_stat = self.get_rmq_cluster_status(sentry_unit) - if 'running_nodes' in str_stat: - pos_start = str_stat.find("{running_nodes,") + 15 - pos_end = str_stat.find("]},", pos_start) + 1 - str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') - run_nodes = json.loads(str_run_nodes) - return run_nodes - else: - return [] - - def validate_rmq_cluster_running_nodes(self, sentry_units): - """Check that all rmq unit hostnames are represented in the - cluster_status output of all units. - - :param host_names: dict of juju unit names to host names - :param units: list of sentry unit pointers (all rmq units) - :returns: None if successful, otherwise return error message - """ - host_names = self.get_unit_hostnames(sentry_units) - errors = [] - - # Query every unit for cluster_status running nodes - for query_unit in sentry_units: - query_unit_name = query_unit.info['unit_name'] - running_nodes = self.get_rmq_cluster_running_nodes(query_unit) - - # Confirm that every unit is represented in the queried unit's - # cluster_status running nodes output. - for validate_unit in sentry_units: - val_host_name = host_names[validate_unit.info['unit_name']] - val_node_name = 'rabbit@{}'.format(val_host_name) - - if val_node_name not in running_nodes: - errors.append('Cluster member check failed on {}: {} not ' - 'in {}\n'.format(query_unit_name, - val_node_name, - running_nodes)) - if errors: - return ''.join(errors) - - def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): - """Check a single juju rmq unit for ssl and port in the config file.""" - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - conf_file = '/etc/rabbitmq/rabbitmq.config' - conf_contents = str(self.file_contents_safe(sentry_unit, - conf_file, max_wait=16)) - # Checks - conf_ssl = 'ssl' in conf_contents - conf_port = str(port) in conf_contents - - # Port explicitly checked in config - if port and conf_port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif port and not conf_port and conf_ssl: - self.log.debug('SSL is enabled @{} but not on port {} ' - '({})'.format(host, port, unit_name)) - return False - # Port not checked (useful when checking that ssl is disabled) - elif not port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif not conf_ssl: - self.log.debug('SSL not enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return False - else: - msg = ('Unknown condition when checking SSL status @{}:{} ' - '({})'.format(host, port, unit_name)) - amulet.raise_status(amulet.FAIL, msg) - - def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): - """Check that ssl is enabled on rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :param port: optional ssl port override to validate - :returns: None if successful, otherwise return error message - """ - for sentry_unit in sentry_units: - if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): - return ('Unexpected condition: ssl is disabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def validate_rmq_ssl_disabled_units(self, sentry_units): - """Check that ssl is enabled on listed rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :returns: True if successful. Raise on error. - """ - for sentry_unit in sentry_units: - if self.rmq_ssl_is_enabled_on_unit(sentry_unit): - return ('Unexpected condition: ssl is enabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def configure_rmq_ssl_on(self, sentry_units, deployment, - port=None, max_wait=60): - """Turn ssl charm config option on, with optional non-default - ssl port specification. Confirm that it is enabled on every - unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param port: amqp port, use defaults if None - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: on') - - # Enable RMQ SSL - config = {'ssl': 'on'} - if port: - config['ssl_port'] = port - - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): - """Turn ssl charm config option off, confirm that it is disabled - on every unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: off') - - # Disable RMQ SSL - config = {'ssl': 'off'} - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def connect_amqp_by_unit(self, sentry_unit, ssl=False, - port=None, fatal=True, - username="testuser1", password="changeme"): - """Establish and return a pika amqp connection to the rabbitmq service - running on a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :param fatal: boolean, default to True (raises on connect error) - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: pika amqp connection pointer or None if failed and non-fatal - """ - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - # Default port logic if port is not specified - if ssl and not port: - port = 5671 - elif not ssl and not port: - port = 5672 - - self.log.debug('Connecting to amqp on {}:{} ({}) as ' - '{}...'.format(host, port, unit_name, username)) - - try: - credentials = pika.PlainCredentials(username, password) - parameters = pika.ConnectionParameters(host=host, port=port, - credentials=credentials, - ssl=ssl, - connection_attempts=3, - retry_delay=5, - socket_timeout=1) - connection = pika.BlockingConnection(parameters) - assert connection.is_open is True - assert connection.is_closing is False - self.log.debug('Connect OK') - return connection - except Exception as e: - msg = ('amqp connection failed to {}:{} as ' - '{} ({})'.format(host, port, username, str(e))) - if fatal: - amulet.raise_status(amulet.FAIL, msg) - else: - self.log.warn(msg) - return None - - def publish_amqp_message_by_unit(self, sentry_unit, message, - queue="test", ssl=False, - username="testuser1", - password="changeme", - port=None): - """Publish an amqp message to a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param message: amqp message string - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: None. Raises exception if publish failed. - """ - self.log.debug('Publishing message to {} queue:\n{}'.format(queue, - message)) - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - - # NOTE(beisner): extra debug here re: pika hang potential: - # https://github.com/pika/pika/issues/297 - # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw - self.log.debug('Defining channel...') - channel = connection.channel() - self.log.debug('Declaring queue...') - channel.queue_declare(queue=queue, auto_delete=False, durable=True) - self.log.debug('Publishing message...') - channel.basic_publish(exchange='', routing_key=queue, body=message) - self.log.debug('Closing channel...') - channel.close() - self.log.debug('Closing connection...') - connection.close() - - def get_amqp_message_by_unit(self, sentry_unit, queue="test", - username="testuser1", - password="changeme", - ssl=False, port=None): - """Get an amqp message from a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: amqp message body as string. Raise if get fails. - """ - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - channel = connection.channel() - method_frame, _, body = channel.basic_get(queue) - - if method_frame: - self.log.debug('Retreived message from {} queue:\n{}'.format(queue, - body)) - channel.basic_ack(method_frame.delivery_tag) - channel.close() - connection.close() - return body - else: - msg = 'No message retrieved.' - amulet.raise_status(amulet.FAIL, msg) - - def validate_memcache(self, sentry_unit, conf, os_release, - earliest_release=5, section='keystone_authtoken', - check_kvs=None): - """Check Memcache is running and is configured to be used - - Example call from Amulet test: - - def test_110_memcache(self): - u.validate_memcache(self.neutron_api_sentry, - '/etc/neutron/neutron.conf', - self._get_openstack_release()) - - :param sentry_unit: sentry unit - :param conf: OpenStack config file to check memcache settings - :param os_release: Current OpenStack release int code - :param earliest_release: Earliest Openstack release to check int code - :param section: OpenStack config file section to check - :param check_kvs: Dict of settings to check in config file - :returns: None - """ - if os_release < earliest_release: - self.log.debug('Skipping memcache checks for deployment. {} <' - 'mitaka'.format(os_release)) - return - _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} - self.log.debug('Checking memcached is running') - ret = self.validate_services_by_name({sentry_unit: ['memcached']}) - if ret: - amulet.raise_status(amulet.FAIL, msg='Memcache running check' - 'failed {}'.format(ret)) - else: - self.log.debug('OK') - self.log.debug('Checking memcache url is configured in {}'.format( - conf)) - if self.validate_config_data(sentry_unit, conf, section, _kvs): - message = "Memcache config error in: {}".format(conf) - amulet.raise_status(amulet.FAIL, msg=message) - else: - self.log.debug('OK') - self.log.debug('Checking memcache configuration in ' - '/etc/memcached.conf') - contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', - fatal=True) - ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if CompareHostReleases(ubuntu_release) <= 'trusty': - memcache_listen_addr = 'ip6-localhost' - else: - memcache_listen_addr = '::1' - expected = { - '-p': '11211', - '-l': memcache_listen_addr} - found = [] - for key, value in expected.items(): - for line in contents.split('\n'): - if line.startswith(key): - self.log.debug('Checking {} is set to {}'.format( - key, - value)) - assert value == line.split()[-1] - self.log.debug(line.split()[-1]) - found.append(key) - if sorted(found) == sorted(expected.keys()): - self.log.debug('OK') - else: - message = "Memcache config error in: /etc/memcached.conf" - amulet.raise_status(amulet.FAIL, msg=message) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py index 703fc6ef..5c961c58 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -1,4 +1,4 @@ -# Copyright 2014-2018 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Common python helper functions used for OpenStack charm certificats. +# Common python helper functions used for OpenStack charm certificates. import os import json @@ -71,7 +71,7 @@ def __init__(self, json_encode=True): def add_entry(self, net_type, cn, addresses): """Add a request to the batch - :param net_type: str netwrok space name request is for + :param net_type: str network space name request is for :param cn: str Canonical Name for certificate :param addresses: [] List of addresses to be used as SANs """ @@ -85,7 +85,7 @@ def add_hostname_cn(self): addresses = [ip] # If a vip is being used without os-hostname config or # network spaces then we need to ensure the local units - # cert has the approriate vip in the SAN list + # cert has the appropriate vip in the SAN list vip = get_vip_in_network(resolve_network_cidr(ip)) if vip: addresses.append(vip) @@ -178,7 +178,7 @@ def get_certificate_request(json_encode=True, bindings=None): except NoNetworkBinding: log("Skipping request for certificate for ip in {} space, no " "local address found".format(binding), WARNING) - # Gurantee all SANs are covered + # Guarantee all SANs are covered # These are network addresses with no corresponding hostname. # Add the ips to the hostname cert to allow for this. req.add_hostname_cn_ip(_sans) @@ -357,7 +357,7 @@ def process_certificates(service_name, relation_id, unit, bindings=None): """Process the certificates supplied down the relation - :param service_name: str Name of service the certifcates are for. + :param service_name: str Name of service the certificates are for. :param relation_id: str Relation id providing the certs :param unit: str Unit providing the certs :param custom_hostname_link: str Name of custom link to create diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index b67dafda..54081f0c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,7 +25,10 @@ import time from base64 import b64decode -from subprocess import check_call, CalledProcessError +from subprocess import ( + check_call, + check_output, + CalledProcessError) import six @@ -453,18 +456,24 @@ def __call__(self): serv_host = format_ipv6_addr(serv_host) or serv_host auth_host = rdata.get('auth_host') auth_host = format_ipv6_addr(auth_host) or auth_host + int_host = rdata.get('internal_host') + int_host = format_ipv6_addr(int_host) or int_host svc_protocol = rdata.get('service_protocol') or 'http' auth_protocol = rdata.get('auth_protocol') or 'http' + int_protocol = rdata.get('internal_protocol') or 'http' api_version = rdata.get('api_version') or '2.0' ctxt.update({'service_port': rdata.get('service_port'), 'service_host': serv_host, 'auth_host': auth_host, 'auth_port': rdata.get('auth_port'), + 'internal_host': int_host, + 'internal_port': rdata.get('internal_port'), 'admin_tenant_name': rdata.get('service_tenant'), 'admin_user': rdata.get('service_username'), 'admin_password': rdata.get('service_password'), 'service_protocol': svc_protocol, 'auth_protocol': auth_protocol, + 'internal_protocol': int_protocol, 'api_version': api_version}) if float(api_version) > 2: @@ -1358,7 +1367,7 @@ def resolve_ports(self, ports): mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) for entry in ports: if re.match(mac_regex, entry): - # NIC is in known NICs and does NOT hace an IP address + # NIC is in known NICs and does NOT have an IP address if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: # If the nic is part of a bridge then don't use it if is_bridge_member(hwaddr_to_nic[entry]): @@ -1781,6 +1790,10 @@ def __call__(self): 'rel_key': 'enable-port-forwarding', 'default': False, }, + 'enable_fwaas': { + 'rel_key': 'enable-fwaas', + 'default': False, + }, 'global_physnet_mtu': { 'rel_key': 'global-physnet-mtu', 'default': 1500, @@ -1815,6 +1828,11 @@ def __call__(self): if ctxt['enable_port_forwarding']: l3_extension_plugins.append('port_forwarding') + if ctxt['enable_fwaas']: + l3_extension_plugins.append('fwaas_v2') + if ctxt['enable_nfg_logging']: + l3_extension_plugins.append('fwaas_v2_log') + ctxt['l3_extension_plugins'] = l3_extension_plugins return ctxt @@ -2379,6 +2397,12 @@ def __call__(self): ctxt['enable_metadata_network'] = True ctxt['enable_isolated_metadata'] = True + ctxt['append_ovs_config'] = False + cmp_release = CompareOpenStackReleases( + os_release('neutron-common', base='icehouse')) + if cmp_release >= 'queens' and config('enable-dpdk'): + ctxt['append_ovs_config'] = True + return ctxt @staticmethod @@ -2570,22 +2594,48 @@ def cpu_mask(self): :returns: hex formatted CPU mask :rtype: str """ - num_cores = config('dpdk-socket-cores') - mask = 0 + return self.cpu_masks()['dpdk_lcore_mask'] + + def cpu_masks(self): + """Get hex formatted CPU masks + + The mask is based on using the first config:dpdk-socket-cores + cores of each NUMA node in the unit, followed by the + next config:pmd-socket-cores + + :returns: Dict of hex formatted CPU masks + :rtype: Dict[str, str] + """ + num_lcores = config('dpdk-socket-cores') + pmd_cores = config('pmd-socket-cores') + lcore_mask = 0 + pmd_mask = 0 for cores in self._numa_node_cores().values(): - for core in cores[:num_cores]: - mask = mask | 1 << core - return format(mask, '#04x') + for core in cores[:num_lcores]: + lcore_mask = lcore_mask | 1 << core + for core in cores[num_lcores:][:pmd_cores]: + pmd_mask = pmd_mask | 1 << core + return { + 'pmd_cpu_mask': format(pmd_mask, '#04x'), + 'dpdk_lcore_mask': format(lcore_mask, '#04x')} def socket_memory(self): - """Formatted list of socket memory configuration per NUMA node + """Formatted list of socket memory configuration per socket. - :returns: socket memory configuration per NUMA node + :returns: socket memory configuration per socket. :rtype: str """ + lscpu_out = check_output( + ['lscpu', '-p=socket']).decode('UTF-8').strip() + sockets = set() + for line in lscpu_out.split('\n'): + try: + sockets.add(int(line)) + except ValueError: + # lscpu output is headed by comments so ignore them. + pass sm_size = config('dpdk-socket-memory') - node_regex = '/sys/devices/system/node/node*' - mem_list = [str(sm_size) for _ in glob.glob(node_regex)] + mem_list = [str(sm_size) for _ in sockets] if mem_list: return ','.join(mem_list) else: @@ -2650,7 +2700,7 @@ def __call__(self): class BridgePortInterfaceMap(object): - """Build a map of bridge ports and interaces from charm configuration. + """Build a map of bridge ports and interfaces from charm configuration. NOTE: the handling of this detail in the charm is pre-deprecated. @@ -3099,7 +3149,7 @@ def _get_capped_numvfs(requested): actual = min(int(requested), int(device.sriov_totalvfs)) if actual < int(requested): log('Requested VFs ({}) too high for device {}. Falling back ' - 'to value supprted by device: {}' + 'to value supported by device: {}' .format(requested, device.interface_name, device.sriov_totalvfs), level=WARNING) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py index 8765ee31..94eacf6c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py @@ -244,7 +244,7 @@ def get_deferred_restarts(): def clear_deferred_restarts(services): - """Clear deferred restart events targetted at `services`. + """Clear deferred restart events targeted at `services`. :param services: Services with deferred actions to clear. :type services: List[str] @@ -253,7 +253,7 @@ def clear_deferred_restarts(services): def process_svc_restart(service): - """Respond to a service restart having occured. + """Respond to a service restart having occurred. :param service: Services that the action was performed against. :type service: str diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py index 344a7662..431e972b 100755 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -"""This script is an implemenation of policy-rc.d +"""This script is an implementation of policy-rc.d For further information on policy-rc.d see *1 diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py index fb5607f3..b41314cb 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Various utilies for dealing with Neutron and the renaming from Quantum. +# Various utilities for dealing with Neutron and the renaming from Quantum. import six from subprocess import check_output @@ -251,7 +251,7 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None): def network_manager(): ''' Deals with the renaming of Quantum to Neutron in H and any situations - that require compatability (eg, deploying H with network-manager=quantum, + that require compatibility (eg, deploying H with network-manager=quantum, upgrading from G). ''' release = os_release('nova-common') diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py index f2bb21e9..6fa06f26 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py @@ -1,4 +1,4 @@ -# Copyright 2019 Canonical Ltd +# Copyright 2019-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -59,7 +59,7 @@ The functions should be called from the install and upgrade hooks in the charm. The `maybe_do_policyd_overrides_on_config_changed` function is designed to be called on the config-changed hook, in that it does an additional check to -ensure that an already overriden policy.d in an upgrade or install hooks isn't +ensure that an already overridden policy.d in an upgrade or install hooks isn't repeated. In order the *enable* this functionality, the charm's install, config_changed, @@ -334,7 +334,7 @@ def maybe_do_policyd_overrides(openstack_release, restart_handler() -@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead") +@charmhelpers.deprecate("Use maybe_do_policyd_overrides instead") def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): """This function is designed to be called from the config changed hook. diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 1656bd43..d5d301e6 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -106,6 +106,8 @@ filter_installed_packages, filter_missing_packages, ubuntu_apt_pkg as apt, + OPENSTACK_RELEASES, + UBUNTU_OPENSTACK_RELEASE, ) from charmhelpers.fetch.snap import ( @@ -132,54 +134,9 @@ DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 'restricted main multiverse universe') -OPENSTACK_RELEASES = ( - 'diablo', - 'essex', - 'folsom', - 'grizzly', - 'havana', - 'icehouse', - 'juno', - 'kilo', - 'liberty', - 'mitaka', - 'newton', - 'ocata', - 'pike', - 'queens', - 'rocky', - 'stein', - 'train', - 'ussuri', - 'victoria', - 'wallaby', -) - -UBUNTU_OPENSTACK_RELEASE = OrderedDict([ - ('oneiric', 'diablo'), - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), - ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ('disco', 'stein'), - ('eoan', 'train'), - ('focal', 'ussuri'), - ('groovy', 'victoria'), - ('hirsute', 'wallaby'), -]) - - OPENSTACK_CODENAMES = OrderedDict([ + # NOTE(lourot): 'yyyy.i' isn't actually mapping with any real version + # number. This just means the i-th version of the year yyyy. ('2011.2', 'diablo'), ('2012.1', 'essex'), ('2012.2', 'folsom'), @@ -200,6 +157,8 @@ ('2020.1', 'ussuri'), ('2020.2', 'victoria'), ('2021.1', 'wallaby'), + ('2021.2', 'xena'), + ('2022.1', 'yoga'), ]) # The ugly duckling - must list releases oldest to newest @@ -701,7 +660,7 @@ def import_key(keyid): def get_source_and_pgp_key(source_and_key): """Look for a pgp key ID or ascii-armor key in the given input. - :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is + :param source_and_key: String, "source_spec|keyid" where '|keyid' is optional. :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id if there was no '|' in the source_and_key string. @@ -721,7 +680,7 @@ def configure_installation_source(source_plus_key): The functionality is provided by charmhelpers.fetch.add_source() The difference between the two functions is that add_source() signature requires the key to be passed directly, whereas this function passes an - optional key by appending '|' to the end of the source specificiation + optional key by appending '|' to the end of the source specification 'source'. Another difference from add_source() is that the function calls sys.exit(1) @@ -808,7 +767,7 @@ def get_endpoint_notifications(service_names, rel_name='identity-service'): def endpoint_changed(service_name, rel_name='identity-service'): - """Whether a new notification has been recieved for an endpoint. + """Whether a new notification has been received for an endpoint. :param service_name: Service name eg nova, neutron, placement etc :type service_name: str @@ -834,7 +793,7 @@ def endpoint_changed(service_name, rel_name='identity-service'): def save_endpoint_changed_triggers(service_names, rel_name='identity-service'): - """Save the enpoint triggers in db so it can be tracked if they changed. + """Save the endpoint triggers in db so it can be tracked if they changed. :param service_names: List of service name. :type service_name: List @@ -1502,9 +1461,9 @@ def remote_restart(rel_name, remote_service=None): if remote_service: trigger['remote-service'] = remote_service for rid in relation_ids(rel_name): - # This subordinate can be related to two seperate services using + # This subordinate can be related to two separate services using # different subordinate relations so only issue the restart if - # the principle is conencted down the relation we think it is + # the principle is connected down the relation we think it is if related_units(relid=rid): relation_set(relation_id=rid, relation_settings=trigger, @@ -1621,7 +1580,7 @@ def manage_payload_services(action, services=None, charm_func=None): """Run an action against all services. An optional charm_func() can be called. It should raise an Exception to - indicate that the function failed. If it was succesfull it should return + indicate that the function failed. If it was successful it should return None or an optional message. The signature for charm_func is: @@ -1880,7 +1839,7 @@ def some_hook(...): :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] :returns: decorator to use a restart_on_change with pausability :rtype: decorator diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py index 4ee6c1db..e5418c39 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -1,4 +1,4 @@ -# Copyright 2018 Canonical Limited. +# Copyright 2018-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -48,7 +48,7 @@ def __call__(self): "but it's not available. Is secrets-stroage relation " "made, but encrypt option not set?", level=hookenv.WARNING) - # return an emptry context on hvac import error + # return an empty context on hvac import error return {} ctxt = {} # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index d1c61754..3eb46d70 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# -# Copyright 2012 Canonical Ltd. -# # This file is sourced from lp:openstack-charm-helpers # # Authors: @@ -605,7 +602,7 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, class Pool(BasePool): - """Compability shim for any descendents external to this library.""" + """Compatibility shim for any descendents external to this library.""" @deprecate( 'The ``Pool`` baseclass has been replaced by ``BasePool`` class.') @@ -1535,7 +1532,7 @@ def map_block_storage(service, pool, image): def filesystem_mounted(fs): - """Determine whether a filesytems is already mounted.""" + """Determine whether a filesystem is already mounted.""" return fs in [f for f, m in mounts()] @@ -1904,7 +1901,7 @@ def add_op_create_erasure_pool(self, name, erasure_profile=None, set the ceph-mon unit handling the broker request will set its default value. :type erasure_profile: str - :param allow_ec_overwrites: allow EC pools to be overriden + :param allow_ec_overwrites: allow EC pools to be overridden :type allow_ec_overwrites: bool :raises: AssertionError if provided data is of invalid type/range """ @@ -1949,7 +1946,7 @@ def add_op_create_erasure_profile(self, name, :param lrc_locality: Group the coding and data chunks into sets of size locality (lrc plugin) :type lrc_locality: int - :param durability_estimator: The number of parity chuncks each of which includes + :param durability_estimator: The number of parity chunks each of which includes a data chunk in its calculation range (shec plugin) :type durability_estimator: int :param helper_chunks: The number of helper chunks to use for recovery operations @@ -2327,7 +2324,7 @@ class CephOSDConfContext(CephConfContext): settings are in conf['osd_from_client'] and finally settings which do clash are in conf['osd_from_client_conflict']. Rather than silently drop the conflicting settings they are provided in the context so they can be - rendered commented out to give some visability to the admin. + rendered commented out to give some visibility to the admin. """ def __init__(self, permitted_sections=None): diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py index c8bde692..d0a57211 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ ################################################## def deactivate_lvm_volume_group(block_device): ''' - Deactivate any volume gruop associated with an LVM physical volume. + Deactivate any volume group associated with an LVM physical volume. :param block_device: str: Full path to LVM physical volume ''' diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index 47eebb51..e94247a2 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2013-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,6 @@ # limitations under the License. "Interactions with the Juju environment" -# Copyright 2013 Canonical Ltd. # # Authors: # Charm Helpers Developers @@ -610,7 +609,7 @@ def expected_related_units(reltype=None): relation_type())) :param reltype: Relation type to list data for, default is to list data for - the realtion type we are currently executing a hook for. + the relation type we are currently executing a hook for. :type reltype: str :returns: iterator :rtype: types.GeneratorType @@ -627,7 +626,7 @@ def expected_related_units(reltype=None): @cached def relation_for_unit(unit=None, rid=None): - """Get the json represenation of a unit's relation""" + """Get the json representation of a unit's relation""" unit = unit or remote_unit() relation = relation_get(unit=unit, rid=rid) for key in relation: @@ -1614,11 +1613,11 @@ def env_proxy_settings(selected_settings=None): def _contains_range(addresses): """Check for cidr or wildcard domain in a string. - Given a string comprising a comma seperated list of ip addresses + Given a string comprising a comma separated list of ip addresses and domain names, determine whether the string contains IP ranges or wildcard domains. - :param addresses: comma seperated list of domains and ip addresses. + :param addresses: comma separated list of domains and ip addresses. :type addresses: str """ return ( diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index d25e6c59..994ec8a0 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -217,7 +217,7 @@ def service_resume(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", **kwargs): """Resume a system service. - Reenable starting again at boot. Start the service. + Re-enable starting again at boot. Start the service. :param service_name: the name of the service to resume :param init_dir: the path to the init dir @@ -727,7 +727,7 @@ def __init__(self, restart_map, stopstart=False, restart_functions=None, :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] """ self.restart_map = restart_map @@ -828,7 +828,7 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False, :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] :returns: result of lambda_f() :rtype: ANY @@ -880,7 +880,7 @@ def _post_restart_on_change_helper(checksums, :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] """ if restart_functions is None: @@ -914,7 +914,7 @@ def _post_restart_on_change_helper(checksums, def pwgen(length=None): - """Generate a random pasword.""" + """Generate a random password.""" if length is None: # A random length is ok to use a weak PRNG length = random.choice(range(35, 45)) diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index 5aa4196d..e710c0e0 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -28,6 +28,7 @@ 'focal', 'groovy', 'hirsute', + 'impish', ) diff --git a/ceph-osd/hooks/charmhelpers/core/strutils.py b/ceph-osd/hooks/charmhelpers/core/strutils.py index e8df0452..28c6b3f5 100644 --- a/ceph-osd/hooks/charmhelpers/core/strutils.py +++ b/ceph-osd/hooks/charmhelpers/core/strutils.py @@ -18,8 +18,11 @@ import six import re +TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'} +FALSEY_STRINGS = {'n', 'no', 'false', 'f', 'off'} -def bool_from_string(value): + +def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY_STRINGS, assume_false=False): """Interpret string value as boolean. Returns True if value translates to True otherwise False. @@ -32,9 +35,9 @@ def bool_from_string(value): value = value.strip().lower() - if value in ['y', 'yes', 'true', 't', 'on']: + if value in truthy_strings: return True - elif value in ['n', 'no', 'false', 'f', 'off']: + elif value in falsey_strings or assume_false: return False msg = "Unable to interpret string value '%s' as boolean" % (value) diff --git a/ceph-osd/hooks/charmhelpers/core/unitdata.py b/ceph-osd/hooks/charmhelpers/core/unitdata.py index ab554327..d9b8d0b0 100644 --- a/ceph-osd/hooks/charmhelpers/core/unitdata.py +++ b/ceph-osd/hooks/charmhelpers/core/unitdata.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -61,7 +61,7 @@ def config_changed(): 'previous value', prev, 'current value', cur) - # Get some unit specific bookeeping + # Get some unit specific bookkeeping if not db.get('pkg_key'): key = urllib.urlopen('https://example.com/pkg_key').read() db.set('pkg_key', key) @@ -449,7 +449,7 @@ def config_changed(): 'previous value', prev, 'current value', cur) - # Get some unit specific bookeeping + # Get some unit specific bookkeeping if not db.get('pkg_key'): key = urllib.urlopen('https://example.com/pkg_key').read() db.set('pkg_key', key) diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 5b689f5b..9497ee05 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -106,6 +106,8 @@ def base_url(self, url): apt_pkg = fetch.ubuntu_apt_pkg get_apt_dpkg_env = fetch.get_apt_dpkg_env get_installed_version = fetch.get_installed_version + OPENSTACK_RELEASES = fetch.OPENSTACK_RELEASES + UBUNTU_OPENSTACK_RELEASE = fetch.UBUNTU_OPENSTACK_RELEASE elif __platform__ == "centos": yum_search = fetch.yum_search @@ -203,7 +205,7 @@ def plugins(fetch_handlers=None): classname) plugin_list.append(handler_class()) except NotImplementedError: - # Skip missing plugins so that they can be ommitted from + # Skip missing plugins so that they can be omitted from # installation if desired log("FetchHandler {} not found, skipping plugin".format( handler_name)) diff --git a/ceph-osd/hooks/charmhelpers/fetch/python/packages.py b/ceph-osd/hooks/charmhelpers/fetch/python/packages.py index 6e95028b..60048354 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/python/packages.py +++ b/ceph-osd/hooks/charmhelpers/fetch/python/packages.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # coding: utf-8 -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ def pip_execute(*args, **kwargs): - """Overriden pip_execute() to stop sys.path being changed. + """Overridden pip_execute() to stop sys.path being changed. The act of importing main from the pip module seems to cause add wheels from the /usr/share/python-wheels which are installed by various tools. @@ -142,8 +142,10 @@ def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" if six.PY2: apt_install('python-virtualenv') + extra_flags = [] else: - apt_install('python3-virtualenv') + apt_install(['python3-virtualenv', 'virtualenv']) + extra_flags = ['--python=python3'] if path: venv_path = path @@ -151,4 +153,4 @@ def pip_create_virtualenv(path=None): venv_path = os.path.join(charm_dir(), 'venv') if not os.path.exists(venv_path): - subprocess.check_call(['virtualenv', venv_path]) + subprocess.check_call(['virtualenv', venv_path] + extra_flags) diff --git a/ceph-osd/hooks/charmhelpers/fetch/snap.py b/ceph-osd/hooks/charmhelpers/fetch/snap.py index fc70aa94..36d6bce9 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/snap.py +++ b/ceph-osd/hooks/charmhelpers/fetch/snap.py @@ -1,4 +1,4 @@ -# Copyright 2014-2017 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -65,7 +65,7 @@ def _snap_exec(commands): retry_count += + 1 if retry_count > SNAP_NO_LOCK_RETRY_COUNT: raise CouldNotAcquireLockException( - 'Could not aquire lock after {} attempts' + 'Could not acquire lock after {} attempts' .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode log('Snap failed to acquire lock, trying again in {} seconds.' diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 812a11a2..6c7cf6fc 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -208,12 +208,79 @@ 'wallaby/proposed': 'focal-proposed/wallaby', 'focal-wallaby/proposed': 'focal-proposed/wallaby', 'focal-proposed/wallaby': 'focal-proposed/wallaby', + # Xena + 'xena': 'focal-updates/xena', + 'focal-xena': 'focal-updates/xena', + 'focal-xena/updates': 'focal-updates/xena', + 'focal-updates/xena': 'focal-updates/xena', + 'xena/proposed': 'focal-proposed/xena', + 'focal-xena/proposed': 'focal-proposed/xena', + 'focal-proposed/xena': 'focal-proposed/xena', + # Yoga + 'yoga': 'focal-updates/yoga', + 'focal-yoga': 'focal-updates/yoga', + 'focal-yoga/updates': 'focal-updates/yoga', + 'focal-updates/yoga': 'focal-updates/yoga', + 'yoga/proposed': 'focal-proposed/yoga', + 'focal-yoga/proposed': 'focal-proposed/yoga', + 'focal-proposed/yoga': 'focal-proposed/yoga', } +OPENSTACK_RELEASES = ( + 'diablo', + 'essex', + 'folsom', + 'grizzly', + 'havana', + 'icehouse', + 'juno', + 'kilo', + 'liberty', + 'mitaka', + 'newton', + 'ocata', + 'pike', + 'queens', + 'rocky', + 'stein', + 'train', + 'ussuri', + 'victoria', + 'wallaby', + 'xena', + 'yoga', +) + + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ('wily', 'liberty'), + ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zesty', 'ocata'), + ('artful', 'pike'), + ('bionic', 'queens'), + ('cosmic', 'rocky'), + ('disco', 'stein'), + ('eoan', 'train'), + ('focal', 'ussuri'), + ('groovy', 'victoria'), + ('hirsute', 'wallaby'), + ('impish', 'xena'), +]) + + APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. -CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times. +CMD_RETRY_COUNT = 10 # Retry a failing fatal command X times. def filter_installed_packages(packages): @@ -246,9 +313,9 @@ def filter_missing_packages(packages): def apt_cache(*_, **__): """Shim returning an object simulating the apt_pkg Cache. - :param _: Accept arguments for compability, not used. + :param _: Accept arguments for compatibility, not used. :type _: any - :param __: Accept keyword arguments for compability, not used. + :param __: Accept keyword arguments for compatibility, not used. :type __: any :returns:Object used to interrogate the system apt and dpkg databases. :rtype:ubuntu_apt_pkg.Cache @@ -283,7 +350,7 @@ def apt_install(packages, options=None, fatal=False, quiet=False): :param fatal: Whether the command's output should be checked and retried. :type fatal: bool - :param quiet: if True (default), supress log message to stdout/stderr + :param quiet: if True (default), suppress log message to stdout/stderr :type quiet: bool :raises: subprocess.CalledProcessError """ @@ -397,7 +464,7 @@ def import_key(key): A Radix64 format keyid is also supported for backwards compatibility. In this case Ubuntu keyserver will be queried for a key via HTTPS by its keyid. This method - is less preferrable because https proxy servers may + is less preferable because https proxy servers may require traffic decryption which is equivalent to a man-in-the-middle attack (a proxy server impersonates keyserver TLS certificates and has to be explicitly @@ -574,6 +641,10 @@ def add_source(source, key=None, fail_invalid=False): with be used. If staging is NOT used then the cloud archive [3] will be added, and the 'ubuntu-cloud-keyring' package will be added for the current distro. + '': translate to cloud: based on the current + distro version (i.e. for 'ussuri' this will either be 'bionic-ussuri' or + 'distro'. + '/proposed': as above, but for proposed. Otherwise the source is not recognised and this is logged to the juju log. However, no error is raised, unless sys_error_on_exit is True. @@ -592,7 +663,7 @@ def add_source(source, key=None, fail_invalid=False): id may also be used, but be aware that only insecure protocols are available to retrieve the actual public key from a public keyserver placing your Juju environment at risk. ppa and cloud archive keys - are securely added automtically, so sould not be provided. + are securely added automatically, so should not be provided. @param fail_invalid: (boolean) if True, then the function raises a SourceConfigError is there is no matching installation source. @@ -600,6 +671,12 @@ def add_source(source, key=None, fail_invalid=False): @raises SourceConfigError() if for cloud:, the is not a valid pocket in CLOUD_ARCHIVE_POCKETS """ + # extract the OpenStack versions from the CLOUD_ARCHIVE_POCKETS; can't use + # the list in contrib.openstack.utils as it might not be included in + # classic charms and would break everything. Having OpenStack specific + # code in this file is a bit of an antipattern, anyway. + os_versions_regex = "({})".format("|".join(OPENSTACK_RELEASES)) + _mapping = OrderedDict([ (r"^distro$", lambda: None), # This is a NOP (r"^(?:proposed|distro-proposed)$", _add_proposed), @@ -609,6 +686,9 @@ def add_source(source, key=None, fail_invalid=False): (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), (r"^cloud:(.*)$", _add_cloud_pocket), (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), + (r"^{}\/proposed$".format(os_versions_regex), + _add_bare_openstack_proposed), + (r"^{}$".format(os_versions_regex), _add_bare_openstack), ]) if source is None: source = '' @@ -640,7 +720,7 @@ def _add_proposed(): Uses get_distrib_codename to determine the correct stanza for the deb line. - For intel architecutres PROPOSED_POCKET is used for the release, but for + For Intel architectures PROPOSED_POCKET is used for the release, but for other architectures PROPOSED_PORTS_POCKET is used for the release. """ release = get_distrib_codename() @@ -662,7 +742,8 @@ def _add_apt_repository(spec): series = get_distrib_codename() spec = spec.replace('{series}', series) _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https', 'http'])) + cmd_env=env_proxy_settings(['https', 'http', 'no_proxy']) + ) def _add_cloud_pocket(pocket): @@ -738,6 +819,73 @@ def _verify_is_ubuntu_rel(release, os_release): 'version ({})'.format(release, os_release, ubuntu_rel)) +def _add_bare_openstack(openstack_release): + """Add cloud or distro based on the release given. + + The spec given is, say, 'ussuri', but this could apply cloud:bionic-ussuri + or 'distro' depending on whether the ubuntu release is bionic or focal. + + :param openstack_release: the OpenStack codename to determine the release + for. + :type openstack_release: str + :raises: SourceConfigError + """ + # TODO(ajkavanagh) - surely this means we should be removing cloud archives + # if they exist? + __add_bare_helper(openstack_release, "{}-{}", lambda: None) + + +def _add_bare_openstack_proposed(openstack_release): + """Add cloud of distro but with proposed. + + The spec given is, say, 'ussuri' but this could apply + cloud:bionic-ussuri/proposed or 'distro/proposed' depending on whether the + ubuntu release is bionic or focal. + + :param openstack_release: the OpenStack codename to determine the release + for. + :type openstack_release: str + :raises: SourceConfigError + """ + __add_bare_helper(openstack_release, "{}-{}/proposed", _add_proposed) + + +def __add_bare_helper(openstack_release, pocket_format, final_function): + """Helper for _add_bare_openstack[_proposed] + + The bulk of the work between the two functions is exactly the same except + for the pocket format and the function that is run if it's the distro + version. + + :param openstack_release: the OpenStack codename. e.g. ussuri + :type openstack_release: str + :param pocket_format: the pocket formatter string to construct a pocket str + from the openstack_release and the current ubuntu version. + :type pocket_format: str + :param final_function: the function to call if it is the distro version. + :type final_function: Callable + :raises SourceConfigError on error + """ + ubuntu_version = get_distrib_codename() + possible_pocket = pocket_format.format(ubuntu_version, openstack_release) + if possible_pocket in CLOUD_ARCHIVE_POCKETS: + _add_cloud_pocket(possible_pocket) + return + # Otherwise it's almost certainly the distro version; verify that it + # exists. + try: + assert UBUNTU_OPENSTACK_RELEASE[ubuntu_version] == openstack_release + except KeyError: + raise SourceConfigError( + "Invalid ubuntu version {} isn't known to this library" + .format(ubuntu_version)) + except AssertionError: + raise SourceConfigError( + 'Invalid OpenStack release specified: {} for Ubuntu version {}' + .format(openstack_release, ubuntu_version)) + final_function() + + def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_message="", cmd_env=None, quiet=False): """Run a command and retry until success or max_retries is reached. diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index a2fbe0e5..436e1776 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -1,4 +1,4 @@ -# Copyright 2019 Canonical Ltd +# Copyright 2019-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -209,7 +209,7 @@ def _populate(self): def init(): - """Compability shim that does nothing.""" + """Compatibility shim that does nothing.""" pass @@ -264,7 +264,7 @@ def version_compare(a, b): else: raise RuntimeError('Unable to compare "{}" and "{}", according to ' 'our logic they are neither greater, equal nor ' - 'less than each other.') + 'less than each other.'.format(a, b)) class PkgVersion(): diff --git a/ceph-osd/hooks/charmhelpers/osplatform.py b/ceph-osd/hooks/charmhelpers/osplatform.py index 78c81af5..1ace468f 100644 --- a/ceph-osd/hooks/charmhelpers/osplatform.py +++ b/ceph-osd/hooks/charmhelpers/osplatform.py @@ -28,6 +28,9 @@ def get_platform(): elif "elementary" in current_platform: # ElementaryOS fails to run tests locally without this. return "ubuntu" + elif "Pop!_OS" in current_platform: + # Pop!_OS also fails to run tests locally without this. + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) diff --git a/ceph-osd/osci.yaml b/ceph-osd/osci.yaml index 6a399fa9..4e322d5e 100644 --- a/ceph-osd/osci.yaml +++ b/ceph-osd/osci.yaml @@ -2,3 +2,9 @@ templates: - charm-unit-jobs - charm-functional-jobs + check: + jobs: + - focal-xena: + voting: false + - impish-xena: + voting: false diff --git a/ceph-osd/pip.sh b/ceph-osd/pip.sh new file mode 100755 index 00000000..9a7e6b09 --- /dev/null +++ b/ceph-osd/pip.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# setuptools 58.0 dropped the support for use_2to3=true which is needed to +# install blessings (an indirect dependency of charm-tools). +# +# More details on the beahvior of tox and virtualenv creation can be found at +# https://github.com/tox-dev/tox/issues/448 +# +# This script is wrapper to force the use of the pinned versions early in the +# process when the virtualenv was created and upgraded before installing the +# depedencies declared in the target. +pip install 'pip<20.3' 'setuptools<50.0.0' +pip "$@" diff --git a/ceph-osd/tests/bundles/focal-xena.yaml b/ceph-osd/tests/bundles/focal-xena.yaml new file mode 100644 index 00000000..932d2bb6 --- /dev/null +++ b/ceph-osd/tests/bundles/focal-xena.yaml @@ -0,0 +1,222 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-xena + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: ../../../ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: 'None' + glance-api-version: '2' + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'cinder-ceph:ceph-access' + - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/bundles/impish-xena.yaml b/ceph-osd/tests/bundles/impish-xena.yaml new file mode 100644 index 00000000..639ff3d5 --- /dev/null +++ b/ceph-osd/tests/bundles/impish-xena.yaml @@ -0,0 +1,222 @@ +variables: + openstack-origin: &openstack-origin distro + +series: impish + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: ../../../ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: 'None' + glance-api-version: '2' + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'cinder-ceph:ceph-access' + - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 4d8ca8b9..1938e14b 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -2,6 +2,7 @@ charm_name: ceph-osd gate_bundles: - groovy-victoria + - focal-xena - focal-wallaby - focal-victoria - focal-ussuri @@ -18,6 +19,7 @@ dev_bundles: - xenial-queens - bionic-rocky - hirsute-wallaby + - impish-xena smoke_bundles: - bionic-train @@ -34,5 +36,7 @@ tests: tests_options: force_deploy: - - hirsute-wallaby - trusty-mitaka + - groovy-victoria + - hirsute-wallaby + - impish-xena diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 9ba3f9fe..ba4fd5b6 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -22,19 +22,22 @@ skip_missing_interpreters = False # * It is also necessary to pin virtualenv as a newer virtualenv would still # lead to fetching the latest pip in the func* tox targets, see # https://stackoverflow.com/a/38133283 -requires = pip < 20.3 - virtualenv < 20.0 +requires = + pip < 20.3 + virtualenv < 20.0 + setuptools < 50.0.0 + # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci -minversion = 3.2.0 +minversion = 3.18.0 [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} install_command = - pip install {opts} {packages} + {toxinidir}/pip.sh install {opts} {packages} commands = stestr run --slowest {posargs} -whitelist_externals = juju +allowlist_externals = juju passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt From c057641a3e4afe3e3df073f68b0877f1cb5ee6f1 Mon Sep 17 00:00:00 2001 From: Xav Paice Date: Fri, 5 Mar 2021 17:01:14 +1300 Subject: [PATCH 2278/2699] Add get-quorum-status action Adds a new get-quorum-status action to return some distilled info from 'ceph quorum_status', primarily for verification of which mon units are online. Partial-Bug: #1917690 Change-Id: I608832d849ee3e4f5d150082c328b63c6ab43de7 --- ceph-mon/actions.yaml | 10 ++++++ ceph-mon/actions/ceph_ops.py | 29 +++++++++++++++- ceph-mon/actions/get-quorum-status | 1 + ceph-mon/actions/get_quorum_status.py | 37 ++++++++++++++++++++ ceph-mon/unit_tests/test_actions_mon.py | 45 +++++++++++++++++++++++-- 5 files changed, 119 insertions(+), 3 deletions(-) create mode 120000 ceph-mon/actions/get-quorum-status create mode 100755 ceph-mon/actions/get_quorum_status.py diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index e93645f6..c1ca254c 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -395,3 +395,13 @@ change-osd-weight: required: - osd - weight +get-quorum-status: + description: "Return lists of the known mons, and online mons, to determine if there is quorum." + params: + format: + type: string + default: text + enum: + - text + - json + description: Specify output format (text|json). diff --git a/ceph-mon/actions/ceph_ops.py b/ceph-mon/actions/ceph_ops.py index 875fe88d..5cc7b13a 100755 --- a/ceph-mon/actions/ceph_ops.py +++ b/ceph-mon/actions/ceph_ops.py @@ -12,12 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json from subprocess import CalledProcessError, check_output import sys sys.path.append('hooks') -from charmhelpers.core.hookenv import action_get, action_fail +from charmhelpers.core.hookenv import ( + action_get, + action_fail, +) from charmhelpers.contrib.storage.linux.ceph import pool_set, \ set_pool_quota, snapshot_pool, remove_pool_snapshot @@ -143,3 +147,26 @@ def snapshot_ceph_pool(): snapshot_pool(service='ceph', pool_name=pool_name, snapshot_name=snapshot_name) + + +def get_quorum_status(format_type="text"): + """ + Return the output of 'ceph quorum_status'. + + On error, function_fail() is called with the exception info. + """ + ceph_output = check_output(['ceph', 'quorum_status'], + timeout=60).decode("utf-8") + ceph_output_json = json.loads(ceph_output) + + if format_type == "json": + return {"message": json.dumps(ceph_output_json)} + else: + return { + "election-epoch": ceph_output_json.get("election_epoch"), + "quorum-age": ceph_output_json.get("quorum_age"), + "quorum-leader-name": ceph_output_json.get("quorum_leader_name", + "unknown"), + "quorum-names": ", ".join(ceph_output_json.get("quorum_names", + [])), + } diff --git a/ceph-mon/actions/get-quorum-status b/ceph-mon/actions/get-quorum-status new file mode 120000 index 00000000..2ec9f01b --- /dev/null +++ b/ceph-mon/actions/get-quorum-status @@ -0,0 +1 @@ +get_quorum_status.py \ No newline at end of file diff --git a/ceph-mon/actions/get_quorum_status.py b/ceph-mon/actions/get_quorum_status.py new file mode 100755 index 00000000..31f04890 --- /dev/null +++ b/ceph-mon/actions/get_quorum_status.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run action to collect Ceph quorum_status output.""" +import json +import sys + +from subprocess import CalledProcessError + +sys.path.append('hooks') + +from ceph_ops import get_quorum_status +from charmhelpers.core.hookenv import function_fail, function_get, function_set + +if __name__ == "__main__": + """Run action to collect Ceph quorum_status output.""" + try: + function_set(get_quorum_status(function_get("format"))) + except CalledProcessError as error: + function_fail("Failed to run ceph quorum_status, {}".format(error)) + except (json.decoder.JSONDecodeErro, KeyError) as error: + function_fail( + "Failed to parse ceph quorum_status output. {}".format(error) + ) diff --git a/ceph-mon/unit_tests/test_actions_mon.py b/ceph-mon/unit_tests/test_actions_mon.py index a4425aa1..fb749bf9 100644 --- a/ceph-mon/unit_tests/test_actions_mon.py +++ b/ceph-mon/unit_tests/test_actions_mon.py @@ -10,9 +10,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from mock import mock +import json import sys +from mock import mock from test_utils import CharmTestCase @@ -52,3 +52,44 @@ def test_get_health(self): actions.get_health() cmd = ['ceph', 'health'] self.check_output.assert_called_once_with(cmd) + + @mock.patch('socket.gethostname') + def test_get_quorum_status(self, mock_hostname): + mock_hostname.return_value = 'mockhost' + cmd_out = ( + '{"election_epoch":4,"quorum":[0,1,2],"quorum_names":["juju-18410c' + '-zaza-b7061340ed19-1","juju-18410c-zaza-b7061340ed19-0","juju-184' + '10c-zaza-b7061340ed19-2"],"quorum_leader_name":"juju-18410c-zaza-' + 'b7061340ed19-1","quorum_age":97785,"monmap":{"epoch":1,"fsid":"4f' + '9dd22a-1b71-11ec-a02a-fa163ee765d3","modified":"2021-09-22 06:51:' + '10.975225","created":"2021-09-22 06:51:10.975225","min_mon_releas' + 'e":14,"min_mon_release_name":"nautilus","features":{"persistent":' + '["kraken","luminous","mimic","osdmap-prune","nautilus"],"optional' + '":[]},"mons":[{"rank":0,"name":"juju-18410c-zaza-b7061340ed19-1",' + '"public_addrs":{"addrvec":[{"type":"v2","addr":"10.5.0.122:3300",' + '"nonce":0},{"type":"v1","addr":"10.5.0.122:6789","nonce":0}]},"ad' + 'dr":"10.5.0.122:6789/0","public_addr":"10.5.0.122:6789/0"},{"rank' + '":1,"name":"juju-18410c-zaza-b7061340ed19-0","public_addrs":{"add' + 'rvec":[{"type":"v2","addr":"10.5.2.239:3300","nonce":0},{"type":"' + 'v1","addr":"10.5.2.239:6789","nonce":0}]},"addr":"10.5.2.239:6789' + '/0","public_addr":"10.5.2.239:6789/0"},{"rank":2,"name":"juju-184' + '10c-zaza-b7061340ed19-2","public_addrs":{"addrvec":[{"type":"v2",' + '"addr":"10.5.3.201:3300","nonce":0},{"type":"v1","addr":"10.5.3.2' + '01:6789","nonce":0}]},"addr":"10.5.3.201:6789/0","public_addr":"1' + '0.5.3.201:6789/0"}]}}' + ) + self.check_output.return_value = cmd_out.encode() + + result = actions.get_quorum_status() + self.assertDictEqual(result, { + "election-epoch": 4, + "quorum-age": 97785, + "quorum-names": "juju-18410c-zaza-b7061340ed19-1, " + "juju-18410c-zaza-b7061340ed19-0, " + "juju-18410c-zaza-b7061340ed19-2", + "quorum-leader-name": "juju-18410c-zaza-b7061340ed19-1", + }) + + result = actions.get_quorum_status(format_type="json") + self.assertDictEqual(json.loads(result["message"]), + json.loads(cmd_out)) From 62c52a6209edad2c15f974d2fdc960f8cbc98d23 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Mon, 27 Sep 2021 14:26:09 -0400 Subject: [PATCH 2279/2699] Fix up README There was a section out of place at the very bottom. I took the opportunity to include various improvements throughout. Change-Id: I58e1394c6cb7371945111ba34506d02e2723f767 --- ceph-dashboard/README.md | 73 ++++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 36 deletions(-) diff --git a/ceph-dashboard/README.md b/ceph-dashboard/README.md index 6b9007a0..2d81704d 100644 --- a/ceph-dashboard/README.md +++ b/ceph-dashboard/README.md @@ -14,8 +14,8 @@ on configuring applications. #### `grafana-api-url` -Sets the url of the grafana api when using embedded graphs. See -[Embedded Grafana Dashboards](#Embedded-Grafana-Dashboards) +Sets the URL of the Grafana API when using embedded graphs. See +[Embedded Grafana dashboards][anchor-grafana-dashboards]. #### `public-hostname` @@ -29,9 +29,9 @@ is created or changes their password. #### `password-*` -There are a number of `password-*` options which impose constraints -on which passwords can be used. These options are ignored unless -`enable-password-policy` is set to `True`. +There are a number of `password-*` options which impose constraints on which +passwords can be used. These options are ignored unless +`enable-password-policy` is set to 'True'. ## Deployment @@ -42,7 +42,6 @@ Deploy the ceph-dashboard as a subordinate to the ceph-mon charm. juju deploy ceph-dashboard juju add-relation ceph-dashboard:dashboard ceph-mon:dashboard - TLS is a requirement for this charm. Enable it by adding a relation to the vault application: @@ -54,11 +53,11 @@ See [Managing TLS certificates][cdg-tls] in the > **Note**: This charm also supports TLS configuration via charm options `ssl_cert`, `ssl_key`, and `ssl_ca`. +## Embedded Grafana dashboards -## Embedded Grafana Dashboards - -To enable the embedded grafana dashboards within the Ceph dashboard -some additional relations are needed. +To embed Grafana dashboards within the Ceph dashboard some additional relations +are required (Grafana, Telegraf, and Prometheus are assumed to be +pre-existing): juju add-relation ceph-dashboard:grafana-dashboard grafana:dashboards juju add-relation ceph-dashboard:prometheus prometheus:website @@ -66,59 +65,69 @@ some additional relations are needed. juju add-relation ceph-osd:juju-info telegraf:juju-info juju add-relation ceph-mon:juju-info telegraf:juju-info -Grafana, Telegraf and Prometheus should be related in the standard way +Grafana, Telegraf, and Prometheus should be related in the standard way: juju add-relation grafana:grafana-source prometheus:grafana-source juju add-relation telegraf:prometheus-client prometheus:target juju add-relation telegraf:dashboards grafana:dashboards - When Grafana is integrated with the Ceph Dashboard it requires TLS, so add a relation to Vault (the grafana charm also supports TLS configuration via -ssl\_\* charm options): +`ssl_*` charm options): juju add-relation grafana:certificates vault:certificates -Grafana should be set with the following charm options: +Grafana should be configured with the following charm options: juju config grafana anonymous=True juju config grafana allow_embedding=True The grafana charm also requires the vonage-status-panel and -grafana-piechart-panel plugins. The Grafana charm `install_plugins` -config option should be set to include URLs from which these plugins -can be downloaded. They are currently available from -https://storage.googleapis.com/plugins-community. For example: +grafana-piechart-panel plugins. The `install_plugins` configuration option +should be set to include URLs from which these plugins can be downloaded. They +are currently available from https://storage.googleapis.com/plugins-community. +For example: juju config grafana install_plugins="https://storage.googleapis.com/plugins-community/vonage-status-panel/release/1.0.11/vonage-status-panel-1.0.11.zip,https://storage.googleapis.com/plugins-community/grafana-piechart-panel/release/1.6.2/grafana-piechart-panel-1.6.2.zip" -Telegraf should be set with the following charm options: +Telegraf should be configured with the following charm option: juju config telegraf hostname="{host}" > **Note**: The above command is to be invoked verbatim; no substitution is -required. + required. -Currently the dashboard cannot autodect the api endpoint of the grafana -service, so the end of the deployment run the following: +Currently the dashboard does not autodetect the API endpoint of the Grafana +service. It needs to be provided via a configuration option: - juju config ceph-dashboard grafana-api-url="https://:3000" + juju config ceph-dashboard grafana-api-url="https://:3000" -## Enabling Prometheus Alerting +## Prometheus alerting -To enable Prometheus alerting, add the following relations: +To enable alerting for an existing Prometheus service add the following +relations: juju add-relation ceph-dashboard:prometheus prometheus:website juju add-relation ceph-mon:prometheus prometheus:target juju add-relation ceph-dashboard:alertmanager-service prometheus-alertmanager:alertmanager-service juju add-relation prometheus:alertmanager-service prometheus-alertmanager:alertmanager-service +## Ceph Object storage + +To enable Object storage management of an existing Ceph RADOS Gateway service +add the following relation: + + juju relate ceph-dashboard:radosgw-dashboard ceph-radosgw:radosgw-user + +> **Note**: For Ceph versions older than Pacific the dashboard can only be + related to a single ceph-radosgw application. + ## Actions This section lists Juju [actions][juju-docs-actions] supported by the charm. Actions allow specific operations to be performed on a per-unit basis. To -display action descriptions run `juju actions --schema add-user`. If the charm -is not deployed then see file `actions.yaml`. +display action descriptions run `juju actions --schema ceph-dashboard`. If the +charm is not deployed then see file `actions.yaml`. * `add-user` * `delete-user` @@ -131,19 +140,10 @@ The OpenStack Charms project maintains two documentation guides: and support notes * [OpenStack Charms Deployment Guide][cdg]: for charm usage information - # Bugs Please report bugs on [Launchpad][lp-bugs-charm-ceph-dashboard]. -## Object Gateway - -To enable object gateway management add the following relation: - - juju relate ceph-dashboard:radosgw-dashboard ceph-radosgw:radosgw-user - -NOTE: On Octopus or earlier the dashboard can only be related to one ceph-radosgw application. - [juju-docs-actions]: https://juju.is/docs/working-with-actions @@ -153,3 +153,4 @@ NOTE: On Octopus or earlier the dashboard can only be related to one ceph-radosg [cdg]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide [cdg-tls]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-certificate-management.html [lp-bugs-charm-ceph-dashboard]: https://bugs.launchpad.net/charm-ceph-dashboard +[anchor-grafana-dashboards]: #embedded-grafana-dashboards From 4640498c3b00b7e879b166c6ec52c0b3db1198f6 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 28 Sep 2021 14:53:03 +0100 Subject: [PATCH 2280/2699] charm-helpers sync for 21-10 Change-Id: I7e6c2303ae2eed691475a1c5209c27b9173e2bf2 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 5 ++--- .../contrib/charmsupport/volumes.py | 4 ++-- .../charmhelpers/contrib/hahelpers/cluster.py | 6 +++--- .../hardening/host/templates/login.defs | 4 ++-- .../charmhelpers/contrib/hardening/utils.py | 4 ++-- .../hooks/charmhelpers/contrib/network/ip.py | 4 ++-- .../contrib/openstack/cert_utils.py | 12 ++++++------ .../charmhelpers/contrib/openstack/context.py | 8 ++++---- .../contrib/openstack/deferred_events.py | 4 ++-- .../openstack/files/policy_rc_d_script.py | 2 +- .../charmhelpers/contrib/openstack/neutron.py | 6 +++--- .../charmhelpers/contrib/openstack/policyd.py | 4 ++-- .../charmhelpers/contrib/openstack/utils.py | 18 +++++++++--------- .../contrib/openstack/vaultlocker.py | 4 ++-- .../charmhelpers/contrib/storage/linux/ceph.py | 15 ++++++--------- .../charmhelpers/contrib/storage/linux/lvm.py | 4 ++-- ceph-mon/hooks/charmhelpers/core/hookenv.py | 11 +++++------ ceph-mon/hooks/charmhelpers/core/host.py | 12 ++++++------ ceph-mon/hooks/charmhelpers/core/unitdata.py | 6 +++--- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 4 ++-- .../charmhelpers/fetch/python/packages.py | 4 ++-- ceph-mon/hooks/charmhelpers/fetch/snap.py | 4 ++-- ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 16 ++++++++-------- .../hooks/charmhelpers/fetch/ubuntu_apt_pkg.py | 4 ++-- ceph-mon/osci.yaml | 3 +++ 25 files changed, 83 insertions(+), 85 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index e4cb06bc..8d1753c3 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2012-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,6 @@ # limitations under the License. """Compatibility with the nrpe-external-master charm""" -# Copyright 2012 Canonical Ltd. # # Authors: # Matthew Wedgwood @@ -511,7 +510,7 @@ def add_haproxy_checks(nrpe, unit_name): def remove_deprecated_check(nrpe, deprecated_services): """ - Remove checks fro deprecated services in list + Remove checks for deprecated services in list :param nrpe: NRPE object to remove check from :type nrpe: NRPE diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py index 7ea43f08..f7c6fbdc 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ type: boolean default: true description: > - If false, a volume is mounted as sepecified in "volume-map" + If false, a volume is mounted as specified in "volume-map" If true, ephemeral storage will be used, meaning that log data will only exist as long as the machine. YOU HAVE BEEN WARNED. volume-map: diff --git a/ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py index ba34fba0..f0b629a2 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -86,7 +86,7 @@ def is_elected_leader(resource): 2. If the charm is part of a corosync cluster, call corosync to determine leadership. 3. If the charm is not part of a corosync cluster, the leader is - determined as being "the alive unit with the lowest unit numer". In + determined as being "the alive unit with the lowest unit number". In other words, the oldest surviving unit. """ try: @@ -418,7 +418,7 @@ def get_managed_services_and_ports(services, external_ports, Return only the services and corresponding ports that are managed by this charm. This excludes haproxy when there is a relation with hacluster. This - is because this charm passes responsability for stopping and starting + is because this charm passes responsibility for stopping and starting haproxy to hacluster. Similarly, if a relation with hacluster exists then the ports returned by diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/login.defs b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/login.defs index db137d6d..7d107637 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/login.defs +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/login.defs @@ -187,7 +187,7 @@ SYS_GID_MAX {{ sys_gid_max }} # # Max number of login retries if password is bad. This will most likely be -# overriden by PAM, since the default pam_unix module has it's own built +# overridden by PAM, since the default pam_unix module has it's own built # in of 3 retries. However, this is a safe fallback in case you are using # an authentication module that does not enforce PAM_MAXTRIES. # @@ -235,7 +235,7 @@ USERGROUPS_ENAB yes # # Instead of the real user shell, the program specified by this parameter # will be launched, although its visible name (argv[0]) will be the shell's. -# The program may do whatever it wants (logging, additional authentification, +# The program may do whatever it wants (logging, additional authentication, # banner, ...) before running the actual shell. # # FAKE_SHELL /bin/fakeshell diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py index ff7485c2..56afa4b6 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py @@ -1,4 +1,4 @@ -# Copyright 2016 Canonical Limited. +# Copyright 2016-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -85,7 +85,7 @@ def _get_user_provided_overrides(modules): def _apply_overrides(settings, overrides, schema): - """Get overrides config overlayed onto modules defaults. + """Get overrides config overlaid onto modules defaults. :param modules: require stack modules config. :returns: dictionary of modules config with user overrides applied. diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index 63e91cca..b356d64c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -578,7 +578,7 @@ def get_relation_ip(interface, cidr_network=None): @returns IPv6 or IPv4 address """ # Select the interface address first - # For possible use as a fallback bellow with get_address_in_network + # For possible use as a fallback below with get_address_in_network try: # Get the interface specific IP address = network_get_primary_address(interface) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py index 703fc6ef..5c961c58 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -1,4 +1,4 @@ -# Copyright 2014-2018 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Common python helper functions used for OpenStack charm certificats. +# Common python helper functions used for OpenStack charm certificates. import os import json @@ -71,7 +71,7 @@ def __init__(self, json_encode=True): def add_entry(self, net_type, cn, addresses): """Add a request to the batch - :param net_type: str netwrok space name request is for + :param net_type: str network space name request is for :param cn: str Canonical Name for certificate :param addresses: [] List of addresses to be used as SANs """ @@ -85,7 +85,7 @@ def add_hostname_cn(self): addresses = [ip] # If a vip is being used without os-hostname config or # network spaces then we need to ensure the local units - # cert has the approriate vip in the SAN list + # cert has the appropriate vip in the SAN list vip = get_vip_in_network(resolve_network_cidr(ip)) if vip: addresses.append(vip) @@ -178,7 +178,7 @@ def get_certificate_request(json_encode=True, bindings=None): except NoNetworkBinding: log("Skipping request for certificate for ip in {} space, no " "local address found".format(binding), WARNING) - # Gurantee all SANs are covered + # Guarantee all SANs are covered # These are network addresses with no corresponding hostname. # Add the ips to the hostname cert to allow for this. req.add_hostname_cn_ip(_sans) @@ -357,7 +357,7 @@ def process_certificates(service_name, relation_id, unit, bindings=None): """Process the certificates supplied down the relation - :param service_name: str Name of service the certifcates are for. + :param service_name: str Name of service the certificates are for. :param relation_id: str Relation id providing the certs :param unit: str Unit providing the certs :param custom_hostname_link: str Name of custom link to create diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 57b03537..54081f0c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -1367,7 +1367,7 @@ def resolve_ports(self, ports): mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) for entry in ports: if re.match(mac_regex, entry): - # NIC is in known NICs and does NOT hace an IP address + # NIC is in known NICs and does NOT have an IP address if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: # If the nic is part of a bridge then don't use it if is_bridge_member(hwaddr_to_nic[entry]): @@ -2700,7 +2700,7 @@ def __call__(self): class BridgePortInterfaceMap(object): - """Build a map of bridge ports and interaces from charm configuration. + """Build a map of bridge ports and interfaces from charm configuration. NOTE: the handling of this detail in the charm is pre-deprecated. @@ -3149,7 +3149,7 @@ def _get_capped_numvfs(requested): actual = min(int(requested), int(device.sriov_totalvfs)) if actual < int(requested): log('Requested VFs ({}) too high for device {}. Falling back ' - 'to value supprted by device: {}' + 'to value supported by device: {}' .format(requested, device.interface_name, device.sriov_totalvfs), level=WARNING) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py index 8765ee31..94eacf6c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py @@ -244,7 +244,7 @@ def get_deferred_restarts(): def clear_deferred_restarts(services): - """Clear deferred restart events targetted at `services`. + """Clear deferred restart events targeted at `services`. :param services: Services with deferred actions to clear. :type services: List[str] @@ -253,7 +253,7 @@ def clear_deferred_restarts(services): def process_svc_restart(service): - """Respond to a service restart having occured. + """Respond to a service restart having occurred. :param service: Services that the action was performed against. :type service: str diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py index 344a7662..431e972b 100755 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -"""This script is an implemenation of policy-rc.d +"""This script is an implementation of policy-rc.d For further information on policy-rc.d see *1 diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py index fb5607f3..b41314cb 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Various utilies for dealing with Neutron and the renaming from Quantum. +# Various utilities for dealing with Neutron and the renaming from Quantum. import six from subprocess import check_output @@ -251,7 +251,7 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None): def network_manager(): ''' Deals with the renaming of Quantum to Neutron in H and any situations - that require compatability (eg, deploying H with network-manager=quantum, + that require compatibility (eg, deploying H with network-manager=quantum, upgrading from G). ''' release = os_release('nova-common') diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py index e003c1f3..6fa06f26 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py @@ -1,4 +1,4 @@ -# Copyright 2019 Canonical Ltd +# Copyright 2019-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -59,7 +59,7 @@ The functions should be called from the install and upgrade hooks in the charm. The `maybe_do_policyd_overrides_on_config_changed` function is designed to be called on the config-changed hook, in that it does an additional check to -ensure that an already overriden policy.d in an upgrade or install hooks isn't +ensure that an already overridden policy.d in an upgrade or install hooks isn't repeated. In order the *enable* this functionality, the charm's install, config_changed, diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 008a8ec0..d5d301e6 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -660,7 +660,7 @@ def import_key(keyid): def get_source_and_pgp_key(source_and_key): """Look for a pgp key ID or ascii-armor key in the given input. - :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is + :param source_and_key: String, "source_spec|keyid" where '|keyid' is optional. :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id if there was no '|' in the source_and_key string. @@ -680,7 +680,7 @@ def configure_installation_source(source_plus_key): The functionality is provided by charmhelpers.fetch.add_source() The difference between the two functions is that add_source() signature requires the key to be passed directly, whereas this function passes an - optional key by appending '|' to the end of the source specificiation + optional key by appending '|' to the end of the source specification 'source'. Another difference from add_source() is that the function calls sys.exit(1) @@ -767,7 +767,7 @@ def get_endpoint_notifications(service_names, rel_name='identity-service'): def endpoint_changed(service_name, rel_name='identity-service'): - """Whether a new notification has been recieved for an endpoint. + """Whether a new notification has been received for an endpoint. :param service_name: Service name eg nova, neutron, placement etc :type service_name: str @@ -793,7 +793,7 @@ def endpoint_changed(service_name, rel_name='identity-service'): def save_endpoint_changed_triggers(service_names, rel_name='identity-service'): - """Save the enpoint triggers in db so it can be tracked if they changed. + """Save the endpoint triggers in db so it can be tracked if they changed. :param service_names: List of service name. :type service_name: List @@ -1461,9 +1461,9 @@ def remote_restart(rel_name, remote_service=None): if remote_service: trigger['remote-service'] = remote_service for rid in relation_ids(rel_name): - # This subordinate can be related to two seperate services using + # This subordinate can be related to two separate services using # different subordinate relations so only issue the restart if - # the principle is conencted down the relation we think it is + # the principle is connected down the relation we think it is if related_units(relid=rid): relation_set(relation_id=rid, relation_settings=trigger, @@ -1580,7 +1580,7 @@ def manage_payload_services(action, services=None, charm_func=None): """Run an action against all services. An optional charm_func() can be called. It should raise an Exception to - indicate that the function failed. If it was succesfull it should return + indicate that the function failed. If it was successful it should return None or an optional message. The signature for charm_func is: @@ -1839,7 +1839,7 @@ def some_hook(...): :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] :returns: decorator to use a restart_on_change with pausability :rtype: decorator diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py index 4ee6c1db..e5418c39 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -1,4 +1,4 @@ -# Copyright 2018 Canonical Limited. +# Copyright 2018-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -48,7 +48,7 @@ def __call__(self): "but it's not available. Is secrets-stroage relation " "made, but encrypt option not set?", level=hookenv.WARNING) - # return an emptry context on hvac import error + # return an empty context on hvac import error return {} ctxt = {} # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index d1c61754..3eb46d70 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# -# Copyright 2012 Canonical Ltd. -# # This file is sourced from lp:openstack-charm-helpers # # Authors: @@ -605,7 +602,7 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, class Pool(BasePool): - """Compability shim for any descendents external to this library.""" + """Compatibility shim for any descendents external to this library.""" @deprecate( 'The ``Pool`` baseclass has been replaced by ``BasePool`` class.') @@ -1535,7 +1532,7 @@ def map_block_storage(service, pool, image): def filesystem_mounted(fs): - """Determine whether a filesytems is already mounted.""" + """Determine whether a filesystem is already mounted.""" return fs in [f for f, m in mounts()] @@ -1904,7 +1901,7 @@ def add_op_create_erasure_pool(self, name, erasure_profile=None, set the ceph-mon unit handling the broker request will set its default value. :type erasure_profile: str - :param allow_ec_overwrites: allow EC pools to be overriden + :param allow_ec_overwrites: allow EC pools to be overridden :type allow_ec_overwrites: bool :raises: AssertionError if provided data is of invalid type/range """ @@ -1949,7 +1946,7 @@ def add_op_create_erasure_profile(self, name, :param lrc_locality: Group the coding and data chunks into sets of size locality (lrc plugin) :type lrc_locality: int - :param durability_estimator: The number of parity chuncks each of which includes + :param durability_estimator: The number of parity chunks each of which includes a data chunk in its calculation range (shec plugin) :type durability_estimator: int :param helper_chunks: The number of helper chunks to use for recovery operations @@ -2327,7 +2324,7 @@ class CephOSDConfContext(CephConfContext): settings are in conf['osd_from_client'] and finally settings which do clash are in conf['osd_from_client_conflict']. Rather than silently drop the conflicting settings they are provided in the context so they can be - rendered commented out to give some visability to the admin. + rendered commented out to give some visibility to the admin. """ def __init__(self, permitted_sections=None): diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py index c8bde692..d0a57211 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ ################################################## def deactivate_lvm_volume_group(block_device): ''' - Deactivate any volume gruop associated with an LVM physical volume. + Deactivate any volume group associated with an LVM physical volume. :param block_device: str: Full path to LVM physical volume ''' diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index 47eebb51..e94247a2 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2013-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,6 @@ # limitations under the License. "Interactions with the Juju environment" -# Copyright 2013 Canonical Ltd. # # Authors: # Charm Helpers Developers @@ -610,7 +609,7 @@ def expected_related_units(reltype=None): relation_type())) :param reltype: Relation type to list data for, default is to list data for - the realtion type we are currently executing a hook for. + the relation type we are currently executing a hook for. :type reltype: str :returns: iterator :rtype: types.GeneratorType @@ -627,7 +626,7 @@ def expected_related_units(reltype=None): @cached def relation_for_unit(unit=None, rid=None): - """Get the json represenation of a unit's relation""" + """Get the json representation of a unit's relation""" unit = unit or remote_unit() relation = relation_get(unit=unit, rid=rid) for key in relation: @@ -1614,11 +1613,11 @@ def env_proxy_settings(selected_settings=None): def _contains_range(addresses): """Check for cidr or wildcard domain in a string. - Given a string comprising a comma seperated list of ip addresses + Given a string comprising a comma separated list of ip addresses and domain names, determine whether the string contains IP ranges or wildcard domains. - :param addresses: comma seperated list of domains and ip addresses. + :param addresses: comma separated list of domains and ip addresses. :type addresses: str """ return ( diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index d25e6c59..994ec8a0 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -217,7 +217,7 @@ def service_resume(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", **kwargs): """Resume a system service. - Reenable starting again at boot. Start the service. + Re-enable starting again at boot. Start the service. :param service_name: the name of the service to resume :param init_dir: the path to the init dir @@ -727,7 +727,7 @@ def __init__(self, restart_map, stopstart=False, restart_functions=None, :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] """ self.restart_map = restart_map @@ -828,7 +828,7 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False, :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] :returns: result of lambda_f() :rtype: ANY @@ -880,7 +880,7 @@ def _post_restart_on_change_helper(checksums, :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] """ if restart_functions is None: @@ -914,7 +914,7 @@ def _post_restart_on_change_helper(checksums, def pwgen(length=None): - """Generate a random pasword.""" + """Generate a random password.""" if length is None: # A random length is ok to use a weak PRNG length = random.choice(range(35, 45)) diff --git a/ceph-mon/hooks/charmhelpers/core/unitdata.py b/ceph-mon/hooks/charmhelpers/core/unitdata.py index ab554327..d9b8d0b0 100644 --- a/ceph-mon/hooks/charmhelpers/core/unitdata.py +++ b/ceph-mon/hooks/charmhelpers/core/unitdata.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -61,7 +61,7 @@ def config_changed(): 'previous value', prev, 'current value', cur) - # Get some unit specific bookeeping + # Get some unit specific bookkeeping if not db.get('pkg_key'): key = urllib.urlopen('https://example.com/pkg_key').read() db.set('pkg_key', key) @@ -449,7 +449,7 @@ def config_changed(): 'previous value', prev, 'current value', cur) - # Get some unit specific bookeeping + # Get some unit specific bookkeeping if not db.get('pkg_key'): key = urllib.urlopen('https://example.com/pkg_key').read() db.set('pkg_key', key) diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 30228790..9497ee05 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -205,7 +205,7 @@ def plugins(fetch_handlers=None): classname) plugin_list.append(handler_class()) except NotImplementedError: - # Skip missing plugins so that they can be ommitted from + # Skip missing plugins so that they can be omitted from # installation if desired log("FetchHandler {} not found, skipping plugin".format( handler_name)) diff --git a/ceph-mon/hooks/charmhelpers/fetch/python/packages.py b/ceph-mon/hooks/charmhelpers/fetch/python/packages.py index b4f470ef..60048354 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/python/packages.py +++ b/ceph-mon/hooks/charmhelpers/fetch/python/packages.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # coding: utf-8 -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ def pip_execute(*args, **kwargs): - """Overriden pip_execute() to stop sys.path being changed. + """Overridden pip_execute() to stop sys.path being changed. The act of importing main from the pip module seems to cause add wheels from the /usr/share/python-wheels which are installed by various tools. diff --git a/ceph-mon/hooks/charmhelpers/fetch/snap.py b/ceph-mon/hooks/charmhelpers/fetch/snap.py index fc70aa94..36d6bce9 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/snap.py +++ b/ceph-mon/hooks/charmhelpers/fetch/snap.py @@ -1,4 +1,4 @@ -# Copyright 2014-2017 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -65,7 +65,7 @@ def _snap_exec(commands): retry_count += + 1 if retry_count > SNAP_NO_LOCK_RETRY_COUNT: raise CouldNotAcquireLockException( - 'Could not aquire lock after {} attempts' + 'Could not acquire lock after {} attempts' .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode log('Snap failed to acquire lock, trying again in {} seconds.' diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index c9433c12..6c7cf6fc 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -313,9 +313,9 @@ def filter_missing_packages(packages): def apt_cache(*_, **__): """Shim returning an object simulating the apt_pkg Cache. - :param _: Accept arguments for compability, not used. + :param _: Accept arguments for compatibility, not used. :type _: any - :param __: Accept keyword arguments for compability, not used. + :param __: Accept keyword arguments for compatibility, not used. :type __: any :returns:Object used to interrogate the system apt and dpkg databases. :rtype:ubuntu_apt_pkg.Cache @@ -350,7 +350,7 @@ def apt_install(packages, options=None, fatal=False, quiet=False): :param fatal: Whether the command's output should be checked and retried. :type fatal: bool - :param quiet: if True (default), supress log message to stdout/stderr + :param quiet: if True (default), suppress log message to stdout/stderr :type quiet: bool :raises: subprocess.CalledProcessError """ @@ -464,7 +464,7 @@ def import_key(key): A Radix64 format keyid is also supported for backwards compatibility. In this case Ubuntu keyserver will be queried for a key via HTTPS by its keyid. This method - is less preferrable because https proxy servers may + is less preferable because https proxy servers may require traffic decryption which is equivalent to a man-in-the-middle attack (a proxy server impersonates keyserver TLS certificates and has to be explicitly @@ -663,7 +663,7 @@ def add_source(source, key=None, fail_invalid=False): id may also be used, but be aware that only insecure protocols are available to retrieve the actual public key from a public keyserver placing your Juju environment at risk. ppa and cloud archive keys - are securely added automtically, so sould not be provided. + are securely added automatically, so should not be provided. @param fail_invalid: (boolean) if True, then the function raises a SourceConfigError is there is no matching installation source. @@ -720,7 +720,7 @@ def _add_proposed(): Uses get_distrib_codename to determine the correct stanza for the deb line. - For intel architecutres PROPOSED_POCKET is used for the release, but for + For Intel architectures PROPOSED_POCKET is used for the release, but for other architectures PROPOSED_PORTS_POCKET is used for the release. """ release = get_distrib_codename() @@ -881,7 +881,7 @@ def __add_bare_helper(openstack_release, pocket_format, final_function): .format(ubuntu_version)) except AssertionError: raise SourceConfigError( - 'Invalid OpenStack release specificed: {} for ubuntu version {}' + 'Invalid OpenStack release specified: {} for Ubuntu version {}' .format(openstack_release, ubuntu_version)) final_function() diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index 1f9bc73a..436e1776 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -1,4 +1,4 @@ -# Copyright 2019 Canonical Ltd +# Copyright 2019-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -209,7 +209,7 @@ def _populate(self): def init(): - """Compability shim that does nothing.""" + """Compatibility shim that does nothing.""" pass diff --git a/ceph-mon/osci.yaml b/ceph-mon/osci.yaml index 8465bf75..2c5b5942 100644 --- a/ceph-mon/osci.yaml +++ b/ceph-mon/osci.yaml @@ -6,6 +6,9 @@ jobs: - focal-ussuri-ec-ceph-mon - bionic-train-with-fsid + # Disabling victoria due to https://github.com/openstack-charmers/zaza-openstack-tests/issues/647 + - focal-victoria: + voting: false - focal-xena: voting: false - impish-xena: From a8417ed01aeefdaf72972e7f2d747bdfcc6e0a80 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 8 Sep 2021 17:16:36 -0300 Subject: [PATCH 2281/2699] Add balancer module support for 'upmap' This allows the user to change the configuration parameter 'balancer-mode' via Juju in order to set the balancer mode for Ceph. Change-Id: I60dbd5f163e0c9d004275eff65db7ada41ad2660 Closes-Bug: #1888914 --- ceph-mon/config.yaml | 6 +++++ ceph-mon/hooks/ceph_hooks.py | 2 ++ ceph-mon/hooks/utils.py | 18 ++++++++++++++ ceph-mon/unit_tests/test_ceph_hooks.py | 4 +++ ceph-mon/unit_tests/test_ceph_utils.py | 34 ++++++++++++++++++++++++++ 5 files changed, 64 insertions(+) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index ba6f5a2e..9cbfaf95 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -314,3 +314,9 @@ options: The charm does not segregate access to pools from different models properly, this means that the correct charm settings can result with client model B having access to the data from model A. + balancer-mode: + type: string + default: + description: | + The balancer mode used by the Ceph manager. Can only be set for Luminous or + later versions, and only when the balancer module is enabled. diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index f2e97c02..ea05a470 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -102,6 +102,7 @@ mgr_disable_module, mgr_enable_module, is_mgr_module_enabled, + set_balancer_mode, ) from charmhelpers.contrib.charmsupport import nrpe @@ -245,6 +246,7 @@ def config_changed(): assert_charm_supports_ipv6() check_for_upgrade() + set_balancer_mode(config('balancer-mode')) log('Monitor hosts are ' + repr(get_mon_hosts())) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index fda4ca0d..3e29c7c9 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -38,6 +38,7 @@ from charmhelpers.core.host import ( lsb_release, CompareHostReleases, + cmp_pkgrevno, ) from charmhelpers.contrib.network.ip import ( get_address_in_network, @@ -109,6 +110,23 @@ def mgr_disable_module(module): return False +def set_balancer_mode(mode): + '''Set the balancer mode used by the Ceph manager.''' + if not mode: + return + elif cmp_pkgrevno('ceph-common', '12.0.0') < 0: + log('Luminous or later is required to set the balancer mode') + return + elif not is_mgr_module_enabled('balancer'): + log("Balancer module is disabled") + return + + try: + subprocess.check_call(['ceph', 'balancer', 'mode', mode], shell=True) + except subprocess.CalledProcessError: + log('Failed to set balancer mode:', level='ERROR') + + @cached def get_unit_hostname(): return socket.gethostname() diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 8a97de27..707ee52c 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -409,6 +409,7 @@ def test_config_changed_no_autotune(self, notify_client): relations_of_type.return_value = False self.test_config.set('pg-autotune', 'false') + self.test_config.set('balancer-mode', '') ceph_hooks.config_changed() mgr_enable_module.assert_not_called() @@ -438,6 +439,7 @@ def test_config_changed_with_autotune(self, relations_of_type.return_value = False cmp_pkgrevno.return_value = 1 self.test_config.set('pg-autotune', 'true') + self.test_config.set('balancer-mode', '') ceph_hooks.config_changed() mgr_enable_module.assert_called_once_with('pg_autoscaler') monitor_key_set.assert_called_once_with('admin', 'autotune', 'true') @@ -466,6 +468,7 @@ def test_config_changed_with_default_autotune(self, relations_of_type.return_value = False cmp_pkgrevno.return_value = 1 self.test_config.set('pg-autotune', 'auto') + self.test_config.set('balancer-mode', '') ceph_hooks.config_changed() mgr_enable_module.assert_not_called() @@ -928,6 +931,7 @@ def test_config_changed_no_bootstrap_changed(self, self.relations_of_type.return_value = [] self.is_relation_made.return_value = True self.test_config.set_changed('no-bootstrap', True) + self.test_config.set('balancer-mode', '') ceph_hooks.config_changed() bootstrap_source_rel_changed.assert_called_once() diff --git a/ceph-mon/unit_tests/test_ceph_utils.py b/ceph-mon/unit_tests/test_ceph_utils.py index 1be5ea0b..0ea4c626 100644 --- a/ceph-mon/unit_tests/test_ceph_utils.py +++ b/ceph-mon/unit_tests/test_ceph_utils.py @@ -354,3 +354,37 @@ def test_is_required_osd_release_json_loads_error(self, loads, with self.assertRaises(utils.OsdPostUpgradeError): utils._is_required_osd_release(release) + + @mock.patch.object(utils.subprocess, 'check_call') + @mock.patch.object(utils, 'is_mgr_module_enabled') + @mock.patch.object(utils, 'cmp_pkgrevno') + def test_balancer_mode(self, + cmp_pkgrevno, + is_mgr_module_enabled, + check_call): + cmp_pkgrevno.return_value = 0 + is_mgr_module_enabled.return_value = True + utils.set_balancer_mode('upmap') + check_call.assert_called_with(['ceph', 'balancer', 'mode', + 'upmap'], shell=True) + + @mock.patch.object(utils.subprocess, 'check_call') + @mock.patch.object(utils, 'cmp_pkgrevno') + def test_balancer_mode_before_luminous(self, + cmp_pkgrevno, + check_call): + cmp_pkgrevno.return_value = -1 + utils.set_balancer_mode('upmap') + check_call.assert_not_called() + + @mock.patch.object(utils.subprocess, 'check_call') + @mock.patch.object(utils, 'is_mgr_module_enabled') + @mock.patch.object(utils, 'cmp_pkgrevno') + def test_balancer_mode_no_balancer(self, + cmp_pkgrevno, + is_mgr_module_enabled, + check_call): + cmp_pkgrevno.return_value = 0 + is_mgr_module_enabled.return_value = False + utils.set_balancer_mode('upmap') + check_call.assert_not_called() From c18a6c3e130deae4dfb357672947c9064d6a9dd8 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 21 Sep 2021 14:43:15 +0100 Subject: [PATCH 2282/2699] Add xena bundles - add non-voting focal-xena bundle - add non-voting impish-xena bundle - rebuild to pick up charm-helpers changes - update tox/pip.sh to ensure setuptools<50.0.0 - Remove redundant (and failing) IdentityContext tests - Remove EOL groovy-* gate tests. Change-Id: I32c8195ff76164de565e6af7c329645be40769f1 Co-authored-by: Aurelien Lourot --- .../charmhelpers/contrib/charmsupport/nrpe.py | 5 +- .../contrib/charmsupport/volumes.py | 4 +- .../charmhelpers/contrib/hahelpers/cluster.py | 6 +- .../hardening/host/templates/login.defs | 4 +- .../charmhelpers/contrib/hardening/utils.py | 4 +- .../hooks/charmhelpers/contrib/network/ip.py | 4 +- .../contrib/openstack/amulet/__init__.py | 13 - .../contrib/openstack/amulet/deployment.py | 387 ---- .../contrib/openstack/amulet/utils.py | 1595 ----------------- .../contrib/openstack/cert_utils.py | 12 +- .../charmhelpers/contrib/openstack/context.py | 78 +- .../contrib/openstack/deferred_events.py | 4 +- .../openstack/files/policy_rc_d_script.py | 2 +- .../charmhelpers/contrib/openstack/neutron.py | 6 +- .../charmhelpers/contrib/openstack/policyd.py | 6 +- .../contrib/openstack/templates/haproxy.cfg | 16 +- .../templates/wsgi-openstack-api.conf | 6 +- .../templates/wsgi-openstack-metadata.conf | 6 +- .../charmhelpers/contrib/openstack/utils.py | 71 +- .../contrib/openstack/vaultlocker.py | 4 +- .../contrib/storage/linux/ceph.py | 15 +- .../charmhelpers/contrib/storage/linux/lvm.py | 4 +- .../hooks/charmhelpers/core/hookenv.py | 11 +- ceph-radosgw/hooks/charmhelpers/core/host.py | 12 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + .../hooks/charmhelpers/core/strutils.py | 9 +- .../hooks/charmhelpers/core/unitdata.py | 6 +- .../hooks/charmhelpers/fetch/__init__.py | 6 +- .../charmhelpers/fetch/python/packages.py | 10 +- ceph-radosgw/hooks/charmhelpers/fetch/snap.py | 4 +- .../hooks/charmhelpers/fetch/ubuntu.py | 166 +- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 6 +- ceph-radosgw/hooks/charmhelpers/osplatform.py | 3 + ceph-radosgw/lib/charms_ceph/crush_utils.py | 6 +- ceph-radosgw/lib/charms_ceph/utils.py | 247 ++- ceph-radosgw/osci.yaml | 32 +- ceph-radosgw/pip.sh | 18 + .../tests/bundles/focal-xena-namespaced.yaml | 117 ++ ceph-radosgw/tests/bundles/focal-xena.yaml | 116 ++ .../tests/bundles/impish-xena-namespaced.yaml | 117 ++ ceph-radosgw/tests/bundles/impish-xena.yaml | 116 ++ ceph-radosgw/tests/tests.yaml | 12 +- ceph-radosgw/tox.ini | 13 +- .../unit_tests/test_ceph_radosgw_context.py | 285 --- 44 files changed, 1066 insertions(+), 2499 deletions(-) delete mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/__init__.py delete mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py delete mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py create mode 100755 ceph-radosgw/pip.sh create mode 100644 ceph-radosgw/tests/bundles/focal-xena-namespaced.yaml create mode 100644 ceph-radosgw/tests/bundles/focal-xena.yaml create mode 100644 ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml create mode 100644 ceph-radosgw/tests/bundles/impish-xena.yaml diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index e4cb06bc..8d1753c3 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2012-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,6 @@ # limitations under the License. """Compatibility with the nrpe-external-master charm""" -# Copyright 2012 Canonical Ltd. # # Authors: # Matthew Wedgwood @@ -511,7 +510,7 @@ def add_haproxy_checks(nrpe, unit_name): def remove_deprecated_check(nrpe, deprecated_services): """ - Remove checks fro deprecated services in list + Remove checks for deprecated services in list :param nrpe: NRPE object to remove check from :type nrpe: NRPE diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/volumes.py index 7ea43f08..f7c6fbdc 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ type: boolean default: true description: > - If false, a volume is mounted as sepecified in "volume-map" + If false, a volume is mounted as specified in "volume-map" If true, ephemeral storage will be used, meaning that log data will only exist as long as the machine. YOU HAVE BEEN WARNED. volume-map: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index ba34fba0..f0b629a2 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -86,7 +86,7 @@ def is_elected_leader(resource): 2. If the charm is part of a corosync cluster, call corosync to determine leadership. 3. If the charm is not part of a corosync cluster, the leader is - determined as being "the alive unit with the lowest unit numer". In + determined as being "the alive unit with the lowest unit number". In other words, the oldest surviving unit. """ try: @@ -418,7 +418,7 @@ def get_managed_services_and_ports(services, external_ports, Return only the services and corresponding ports that are managed by this charm. This excludes haproxy when there is a relation with hacluster. This - is because this charm passes responsability for stopping and starting + is because this charm passes responsibility for stopping and starting haproxy to hacluster. Similarly, if a relation with hacluster exists then the ports returned by diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/login.defs b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/login.defs index db137d6d..7d107637 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/login.defs +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/templates/login.defs @@ -187,7 +187,7 @@ SYS_GID_MAX {{ sys_gid_max }} # # Max number of login retries if password is bad. This will most likely be -# overriden by PAM, since the default pam_unix module has it's own built +# overridden by PAM, since the default pam_unix module has it's own built # in of 3 retries. However, this is a safe fallback in case you are using # an authentication module that does not enforce PAM_MAXTRIES. # @@ -235,7 +235,7 @@ USERGROUPS_ENAB yes # # Instead of the real user shell, the program specified by this parameter # will be launched, although its visible name (argv[0]) will be the shell's. -# The program may do whatever it wants (logging, additional authentification, +# The program may do whatever it wants (logging, additional authentication, # banner, ...) before running the actual shell. # # FAKE_SHELL /bin/fakeshell diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py index ff7485c2..56afa4b6 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py @@ -1,4 +1,4 @@ -# Copyright 2016 Canonical Limited. +# Copyright 2016-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -85,7 +85,7 @@ def _get_user_provided_overrides(modules): def _apply_overrides(settings, overrides, schema): - """Get overrides config overlayed onto modules defaults. + """Get overrides config overlaid onto modules defaults. :param modules: require stack modules config. :returns: dictionary of modules config with user overrides applied. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index 63e91cca..b356d64c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -578,7 +578,7 @@ def get_relation_ip(interface, cidr_network=None): @returns IPv6 or IPv4 address """ # Select the interface address first - # For possible use as a fallback bellow with get_address_in_network + # For possible use as a fallback below with get_address_in_network try: # Get the interface specific IP address = network_get_primary_address(interface) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py deleted file mode 100644 index 94ca079c..00000000 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ /dev/null @@ -1,387 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import re -import sys -import six -from collections import OrderedDict -from charmhelpers.contrib.amulet.deployment import ( - AmuletDeployment -) -from charmhelpers.contrib.openstack.amulet.utils import ( - OPENSTACK_RELEASES_PAIRS -) - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - - -class OpenStackAmuletDeployment(AmuletDeployment): - """OpenStack amulet deployment. - - This class inherits from AmuletDeployment and has additional support - that is specifically for use by OpenStack charms. - """ - - def __init__(self, series=None, openstack=None, source=None, - stable=True, log_level=DEBUG): - """Initialize the deployment environment.""" - super(OpenStackAmuletDeployment, self).__init__(series) - self.log = self.get_logger(level=log_level) - self.log.info('OpenStackAmuletDeployment: init') - self.openstack = openstack - self.source = source - self.stable = stable - - def get_logger(self, name="deployment-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def _determine_branch_locations(self, other_services): - """Determine the branch locations for the other services. - - Determine if the local branch being tested is derived from its - stable or next (dev) branch, and based on this, use the corresonding - stable or next branches for the other_services.""" - - self.log.info('OpenStackAmuletDeployment: determine branch locations') - - # Charms outside the ~openstack-charmers - base_charms = { - 'mysql': ['trusty'], - 'mongodb': ['trusty'], - 'nrpe': ['trusty', 'xenial'], - } - - for svc in other_services: - # If a location has been explicitly set, use it - if svc.get('location'): - continue - if svc['name'] in base_charms: - # NOTE: not all charms have support for all series we - # want/need to test against, so fix to most recent - # that each base charm supports - target_series = self.series - if self.series not in base_charms[svc['name']]: - target_series = base_charms[svc['name']][-1] - svc['location'] = 'cs:{}/{}'.format(target_series, - svc['name']) - elif self.stable: - svc['location'] = 'cs:{}/{}'.format(self.series, - svc['name']) - else: - svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( - self.series, - svc['name'] - ) - - return other_services - - def _add_services(self, this_service, other_services, use_source=None, - no_origin=None): - """Add services to the deployment and optionally set - openstack-origin/source. - - :param this_service dict: Service dictionary describing the service - whose amulet tests are being run - :param other_services dict: List of service dictionaries describing - the services needed to support the target - service - :param use_source list: List of services which use the 'source' config - option rather than 'openstack-origin' - :param no_origin list: List of services which do not support setting - the Cloud Archive. - Service Dict: - { - 'name': str charm-name, - 'units': int number of units, - 'constraints': dict of juju constraints, - 'location': str location of charm, - } - eg - this_service = { - 'name': 'openvswitch-odl', - 'constraints': {'mem': '8G'}, - } - other_services = [ - { - 'name': 'nova-compute', - 'units': 2, - 'constraints': {'mem': '4G'}, - 'location': cs:~bob/xenial/nova-compute - }, - { - 'name': 'mysql', - 'constraints': {'mem': '2G'}, - }, - {'neutron-api-odl'}] - use_source = ['mysql'] - no_origin = ['neutron-api-odl'] - """ - self.log.info('OpenStackAmuletDeployment: adding services') - - other_services = self._determine_branch_locations(other_services) - - super(OpenStackAmuletDeployment, self)._add_services(this_service, - other_services) - - services = other_services - services.append(this_service) - - use_source = use_source or [] - no_origin = no_origin or [] - - # Charms which should use the source config option - use_source = list(set( - use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy', 'percona-cluster', 'lxd'])) - - # Charms which can not use openstack-origin, ie. many subordinates - no_origin = list(set( - no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', - 'nrpe', 'openvswitch-odl', 'neutron-api-odl', - 'odl-controller', 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt', - 'ceilometer-agent'])) - - if self.openstack: - for svc in services: - if svc['name'] not in use_source + no_origin: - config = {'openstack-origin': self.openstack} - self.d.configure(svc['name'], config) - - if self.source: - for svc in services: - if svc['name'] in use_source and svc['name'] not in no_origin: - config = {'source': self.source} - self.d.configure(svc['name'], config) - - def _configure_services(self, configs): - """Configure all of the services.""" - self.log.info('OpenStackAmuletDeployment: configure services') - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=None): - """Wait for all units to have a specific extended status, except - for any defined as excluded. Unless specified via message, any - status containing any case of 'ready' will be considered a match. - - Examples of message usage: - - Wait for all unit status to CONTAIN any case of 'ready' or 'ok': - message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) - - Wait for all units to reach this status (exact match): - message = re.compile('^Unit is ready and clustered$') - - Wait for all units to reach any one of these (exact match): - message = re.compile('Unit is ready|OK|Ready') - - Wait for at least one unit to reach this status (exact match): - message = {'ready'} - - See Amulet's sentry.wait_for_messages() for message usage detail. - https://github.com/juju/amulet/blob/master/amulet/sentry.py - - :param message: Expected status match - :param exclude_services: List of juju service names to ignore, - not to be used in conjuction with include_only. - :param include_only: List of juju service names to exclusively check, - not to be used in conjuction with exclude_services. - :param timeout: Maximum time in seconds to wait for status match - :returns: None. Raises if timeout is hit. - """ - if not timeout: - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) - self.log.info('Waiting for extended status on units for {}s...' - ''.format(timeout)) - - all_services = self.d.services.keys() - - if exclude_services and include_only: - raise ValueError('exclude_services can not be used ' - 'with include_only') - - if message: - if isinstance(message, re._pattern_type): - match = message.pattern - else: - match = message - - self.log.debug('Custom extended status wait match: ' - '{}'.format(match)) - else: - self.log.debug('Default extended status wait match: contains ' - 'READY (case-insensitive)') - message = re.compile('.*ready.*', re.IGNORECASE) - - if exclude_services: - self.log.debug('Excluding services from extended status match: ' - '{}'.format(exclude_services)) - else: - exclude_services = [] - - if include_only: - services = include_only - else: - services = list(set(all_services) - set(exclude_services)) - - self.log.debug('Waiting up to {}s for extended status on services: ' - '{}'.format(timeout, services)) - service_messages = {service: message for service in services} - - # Check for idleness - self.d.sentry.wait(timeout=timeout) - # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) - # Check for ready messages - self.d.sentry.wait_for_messages(service_messages, timeout=timeout) - - self.log.info('OK') - - def _get_openstack_release(self): - """Get openstack release. - - Return an integer representing the enum value of the openstack - release. - """ - # Must be ordered by OpenStack release (not by Ubuntu release): - for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): - setattr(self, os_pair, i) - - releases = { - ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, - ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, - ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('xenial', None): self.xenial_mitaka, - ('xenial', 'cloud:xenial-newton'): self.xenial_newton, - ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, - ('xenial', 'cloud:xenial-pike'): self.xenial_pike, - ('xenial', 'cloud:xenial-queens'): self.xenial_queens, - ('yakkety', None): self.yakkety_newton, - ('zesty', None): self.zesty_ocata, - ('artful', None): self.artful_pike, - ('bionic', None): self.bionic_queens, - ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, - ('bionic', 'cloud:bionic-stein'): self.bionic_stein, - ('bionic', 'cloud:bionic-train'): self.bionic_train, - ('bionic', 'cloud:bionic-ussuri'): self.bionic_ussuri, - ('cosmic', None): self.cosmic_rocky, - ('disco', None): self.disco_stein, - ('eoan', None): self.eoan_train, - ('focal', None): self.focal_ussuri, - ('focal', 'cloud:focal-victoria'): self.focal_victoria, - ('groovy', None): self.groovy_victoria, - } - return releases[(self.series, self.openstack)] - - def _get_openstack_release_string(self): - """Get openstack release string. - - Return a string representing the openstack release. - """ - releases = OrderedDict([ - ('trusty', 'icehouse'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ('disco', 'stein'), - ('eoan', 'train'), - ('focal', 'ussuri'), - ('groovy', 'victoria'), - ]) - if self.openstack: - os_origin = self.openstack.split(':')[1] - return os_origin.split('%s-' % self.series)[1].split('/')[0] - else: - return releases[self.series] - - def get_percona_service_entry(self, memory_constraint=None): - """Return a amulet service entry for percona cluster. - - :param memory_constraint: Override the default memory constraint - in the service entry. - :type memory_constraint: str - :returns: Amulet service entry. - :rtype: dict - """ - memory_constraint = memory_constraint or '3072M' - svc_entry = { - 'name': 'percona-cluster', - 'constraints': {'mem': memory_constraint}} - if self._get_openstack_release() <= self.trusty_mitaka: - svc_entry['location'] = 'cs:trusty/percona-cluster' - return svc_entry - - def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools in a ceph + cinder + glance - test scenario, based on OpenStack release and whether ceph radosgw - is flagged as present or not.""" - - if self._get_openstack_release() == self.trusty_icehouse: - # Icehouse - pools = [ - 'data', - 'metadata', - 'rbd', - 'cinder-ceph', - 'glance' - ] - elif (self.trusty_kilo <= self._get_openstack_release() <= - self.zesty_ocata): - # Kilo through Ocata - pools = [ - 'rbd', - 'cinder-ceph', - 'glance' - ] - else: - # Pike and later - pools = [ - 'cinder-ceph', - 'glance' - ] - - if radosgw: - pools.extend([ - '.rgw.root', - '.rgw.control', - '.rgw', - '.rgw.gc', - '.users.uid' - ]) - - return pools diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py deleted file mode 100644 index 0a14af7e..00000000 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ /dev/null @@ -1,1595 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import json -import logging -import os -import re -import six -import time -import urllib -import urlparse - -import cinderclient.v1.client as cinder_client -import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1 as glance_client -import glanceclient.v2 as glance_clientv2 -import heatclient.v1.client as heat_client -from keystoneclient.v2_0 import client as keystone_client -from keystoneauth1.identity import ( - v3, - v2, -) -from keystoneauth1 import session as keystone_session -from keystoneclient.v3 import client as keystone_client_v3 -from novaclient import exceptions - -import novaclient.client as nova_client -import novaclient -import pika -import swiftclient - -from charmhelpers.core.decorators import retry_on_exception - -from charmhelpers.contrib.amulet.utils import ( - AmuletUtils -) -from charmhelpers.core.host import CompareHostReleases - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - -NOVA_CLIENT_VERSION = "2" - -OPENSTACK_RELEASES_PAIRS = [ - 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', - 'trusty_mitaka', 'xenial_mitaka', - 'xenial_newton', 'yakkety_newton', - 'xenial_ocata', 'zesty_ocata', - 'xenial_pike', 'artful_pike', - 'xenial_queens', 'bionic_queens', - 'bionic_rocky', 'cosmic_rocky', - 'bionic_stein', 'disco_stein', - 'bionic_train', 'eoan_train', - 'bionic_ussuri', 'focal_ussuri', - 'focal_victoria', 'groovy_victoria', -] - - -class OpenStackAmuletUtils(AmuletUtils): - """OpenStack amulet utilities. - - This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charm tests. - """ - - def __init__(self, log_level=ERROR): - """Initialize the deployment environment.""" - super(OpenStackAmuletUtils, self).__init__(log_level) - - def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, openstack_release=None): - """Validate endpoint data. Pick the correct validator based on - OpenStack release. Expected data should be in the v2 format: - { - 'id': id, - 'region': region, - 'adminurl': adminurl, - 'internalurl': internalurl, - 'publicurl': publicurl, - 'service_id': service_id} - - """ - validation_function = self.validate_v2_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_endpoint_data - expected = { - 'id': expected['id'], - 'region': expected['region'], - 'region_id': 'RegionOne', - 'url': self.valid_url, - 'interface': self.not_null, - 'service_id': expected['service_id']} - return validation_function(endpoints, admin_port, internal_port, - public_port, expected) - - def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): - """Validate endpoint data. - - Validate actual endpoint data vs expected endpoint data. The ports - are used to find the matching endpoint. - """ - self.log.debug('Validating endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = False - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if (admin_port in ep.adminurl and - internal_port in ep.internalurl and - public_port in ep.publicurl): - found = True - actual = {'id': ep.id, - 'region': ep.region, - 'adminurl': ep.adminurl, - 'internalurl': ep.internalurl, - 'publicurl': ep.publicurl, - 'service_id': ep.service_id} - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if not found: - return 'endpoint not found' - - def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, expected_num_eps=3): - """Validate keystone v3 endpoint data. - - Validate the v3 endpoint data which has changed from v2. The - ports are used to find the matching endpoint. - - The new v3 endpoint data looks like: - - ['}, - region=RegionOne, - region_id=RegionOne, - service_id=17f842a0dc084b928e476fafe67e4095, - url=http://10.5.6.5:9312>, - '}, - region=RegionOne, - region_id=RegionOne, - service_id=72fc8736fb41435e8b3584205bb2cfa3, - url=http://10.5.6.6:35357/v3>, - ... ] - """ - self.log.debug('Validating v3 endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = [] - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if ((admin_port in ep.url and ep.interface == 'admin') or - (internal_port in ep.url and ep.interface == 'internal') or - (public_port in ep.url and ep.interface == 'public')): - found.append(ep.interface) - # note we ignore the links member. - actual = {'id': ep.id, - 'region': ep.region, - 'region_id': ep.region_id, - 'interface': self.not_null, - 'url': ep.url, - 'service_id': ep.service_id, } - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if len(found) != expected_num_eps: - return 'Unexpected number of endpoints found' - - def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): - """Convert v2 endpoint data into v3. - - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - """ - self.log.warn("Endpoint ID and Region ID validation is limited to not " - "null checks after v2 to v3 conversion") - for svc in ep_data.keys(): - assert len(ep_data[svc]) == 1, "Unknown data format" - svc_ep_data = ep_data[svc][0] - ep_data[svc] = [ - { - 'url': svc_ep_data['adminURL'], - 'interface': 'admin', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['publicURL'], - 'interface': 'public', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['internalURL'], - 'interface': 'internal', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}] - return ep_data - - def validate_svc_catalog_endpoint_data(self, expected, actual, - openstack_release=None): - """Validate service catalog endpoint data. Pick the correct validator - for the OpenStack version. Expected data should be in the v2 format: - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - - """ - validation_function = self.validate_v2_svc_catalog_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_svc_catalog_endpoint_data - expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) - return validation_function(expected, actual) - - def validate_v2_svc_catalog_endpoint_data(self, expected, actual): - """Validate service catalog endpoint data. - - Validate a list of actual service catalog endpoints vs a list of - expected service catalog endpoints. - """ - self.log.debug('Validating service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - ret = self._validate_dict_data(expected[k][0], actual[k][0]) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_v3_svc_catalog_endpoint_data(self, expected, actual): - """Validate the keystone v3 catalog endpoint data. - - Validate a list of dictinaries that make up the keystone v3 service - catalogue. - - It is in the form of: - - - {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:35357/v3'}, - {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}, - {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}], - u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'f629388955bc407f8b11d8b7ca168086', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9312'}]} - - Note, that an added complication is that the order of admin, public, - internal against 'interface' in each region. - - Thus, the function sorts the expected and actual lists using the - interface key as a sort key, prior to the comparison. - """ - self.log.debug('Validating v3 service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - l_expected = sorted(v, key=lambda x: x['interface']) - l_actual = sorted(actual[k], key=lambda x: x['interface']) - if len(l_actual) != len(l_expected): - return ("endpoint {} has differing number of interfaces " - " - expected({}), actual({})" - .format(k, len(l_expected), len(l_actual))) - for i_expected, i_actual in zip(l_expected, l_actual): - self.log.debug("checking interface {}" - .format(i_expected['interface'])) - ret = self._validate_dict_data(i_expected, i_actual) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_tenant_data(self, expected, actual): - """Validate tenant data. - - Validate a list of actual tenant data vs list of expected tenant - data. - """ - self.log.debug('Validating tenant data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'enabled': act.enabled, 'description': act.description, - 'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected tenant data - {}".format(ret) - if not found: - return "tenant {} does not exist".format(e['name']) - return ret - - def validate_role_data(self, expected, actual): - """Validate role data. - - Validate a list of actual role data vs a list of expected role - data. - """ - self.log.debug('Validating role data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected role data - {}".format(ret) - if not found: - return "role {} does not exist".format(e['name']) - return ret - - def validate_user_data(self, expected, actual, api_version=None): - """Validate user data. - - Validate a list of actual user data vs a list of expected user - data. - """ - self.log.debug('Validating user data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - if e['name'] == act.name: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'id': act.id} - if api_version == 3: - a['default_project_id'] = getattr(act, - 'default_project_id', - 'none') - else: - a['tenantId'] = act.tenantId - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected user data - {}".format(ret) - if not found: - return "user {} does not exist".format(e['name']) - return ret - - def validate_flavor_data(self, expected, actual): - """Validate flavor data. - - Validate a list of actual flavors vs a list of expected flavors. - """ - self.log.debug('Validating flavor data...') - self.log.debug('actual: {}'.format(repr(actual))) - act = [a.name for a in actual] - return self._validate_list_data(expected, act) - - def tenant_exists(self, keystone, tenant): - """Return True if tenant exists.""" - self.log.debug('Checking if tenant exists ({})...'.format(tenant)) - return tenant in [t.name for t in keystone.tenants.list()] - - @retry_on_exception(num_retries=5, base_delay=1) - def keystone_wait_for_propagation(self, sentry_relation_pairs, - api_version): - """Iterate over list of sentry and relation tuples and verify that - api_version has the expected value. - - :param sentry_relation_pairs: list of sentry, relation name tuples used - for monitoring propagation of relation - data - :param api_version: api_version to expect in relation data - :returns: None if successful. Raise on error. - """ - for (sentry, relation_name) in sentry_relation_pairs: - rel = sentry.relation('identity-service', - relation_name) - self.log.debug('keystone relation data: {}'.format(rel)) - if rel.get('api_version') != str(api_version): - raise Exception("api_version not propagated through relation" - " data yet ('{}' != '{}')." - "".format(rel.get('api_version'), api_version)) - - def keystone_configure_api_version(self, sentry_relation_pairs, deployment, - api_version): - """Configure preferred-api-version of keystone in deployment and - monitor provided list of relation objects for propagation - before returning to caller. - - :param sentry_relation_pairs: list of sentry, relation tuples used for - monitoring propagation of relation data - :param deployment: deployment to configure - :param api_version: value preferred-api-version will be set to - :returns: None if successful. Raise on error. - """ - self.log.debug("Setting keystone preferred-api-version: '{}'" - "".format(api_version)) - - config = {'preferred-api-version': api_version} - deployment.d.configure('keystone', config) - deployment._auto_wait_for_status() - self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - - def authenticate_cinder_admin(self, keystone, api_version=2): - """Authenticates admin user with cinder.""" - self.log.debug('Authenticating cinder admin...') - _clients = { - 1: cinder_client.Client, - 2: cinder_clientv2.Client} - return _clients[api_version](session=keystone.session) - - def authenticate_keystone(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Authenticate with Keystone""" - self.log.debug('Authenticating with keystone...') - if not api_version: - api_version = 2 - sess, auth = self.get_keystone_session( - keystone_ip=keystone_ip, - username=username, - password=password, - api_version=api_version, - admin_port=admin_port, - user_domain_name=user_domain_name, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name - ) - if api_version == 2: - client = keystone_client.Client(session=sess) - else: - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client - - def get_keystone_session(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Return a keystone session object""" - ep = self.get_keystone_endpoint(keystone_ip, - api_version=api_version, - admin_port=admin_port) - if api_version == 2: - auth = v2.Password( - username=username, - password=password, - tenant_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - else: - auth = v3.Password( - user_domain_name=user_domain_name, - username=username, - password=password, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - return (sess, auth) - - def get_keystone_endpoint(self, keystone_ip, api_version=None, - admin_port=False): - """Return keystone endpoint""" - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if api_version == 2: - ep = base_ep + "/v2.0" - else: - ep = base_ep + "/v3" - return ep - - def get_default_keystone_session(self, keystone_sentry, - openstack_release=None, api_version=2): - """Return a keystone session object and client object assuming standard - default settings - - Example call in amulet tests: - self.keystone_session, self.keystone = u.get_default_keystone_session( - self.keystone_sentry, - openstack_release=self._get_openstack_release()) - - The session can then be used to auth other clients: - neutronclient.Client(session=session) - aodh_client.Client(session=session) - eyc - """ - self.log.debug('Authenticating keystone admin...') - # 11 => xenial_queens - if api_version == 3 or (openstack_release and openstack_release >= 11): - client_class = keystone_client_v3.Client - api_version = 3 - else: - client_class = keystone_client.Client - keystone_ip = keystone_sentry.info['public-address'] - session, auth = self.get_keystone_session( - keystone_ip, - api_version=api_version, - username='admin', - password='openstack', - project_name='admin', - user_domain_name='admin_domain', - project_domain_name='admin_domain') - client = client_class(session=session) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(session) - return session, client - - def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant=None, api_version=None, - keystone_ip=None, user_domain_name=None, - project_domain_name=None, - project_name=None): - """Authenticates admin user with the keystone admin endpoint.""" - self.log.debug('Authenticating keystone admin...') - if not keystone_ip: - keystone_ip = keystone_sentry.info['public-address'] - - # To support backward compatibility usage of this function - if not project_name: - project_name = tenant - if api_version == 3 and not user_domain_name: - user_domain_name = 'admin_domain' - if api_version == 3 and not project_domain_name: - project_domain_name = 'admin_domain' - if api_version == 3 and not project_name: - project_name = 'admin' - - return self.authenticate_keystone( - keystone_ip, user, password, - api_version=api_version, - user_domain_name=user_domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - admin_port=True) - - def authenticate_keystone_user(self, keystone, user, password, tenant): - """Authenticates a regular user with the keystone public endpoint.""" - self.log.debug('Authenticating keystone user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - keystone_ip = urlparse.urlparse(ep).hostname - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant) - - def authenticate_glance_admin(self, keystone, force_v1_client=False): - """Authenticates admin user with glance.""" - self.log.debug('Authenticating glance admin...') - ep = keystone.service_catalog.url_for(service_type='image', - interface='adminURL') - if not force_v1_client and keystone.session: - return glance_clientv2.Client("2", session=keystone.session) - else: - return glance_client.Client(ep, token=keystone.auth_token) - - def authenticate_heat_admin(self, keystone): - """Authenticates the admin user with heat.""" - self.log.debug('Authenticating heat admin...') - ep = keystone.service_catalog.url_for(service_type='orchestration', - interface='publicURL') - if keystone.session: - return heat_client.Client(endpoint=ep, session=keystone.session) - else: - return heat_client.Client(endpoint=ep, token=keystone.auth_token) - - def authenticate_nova_user(self, keystone, user, password, tenant): - """Authenticates a regular user with nova-api.""" - self.log.debug('Authenticating nova user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return nova_client.Client(NOVA_CLIENT_VERSION, - session=keystone.session, - auth_url=ep) - elif novaclient.__version__[0] >= "7": - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, password=password, - project_name=tenant, auth_url=ep) - else: - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) - - def authenticate_swift_user(self, keystone, user, password, tenant): - """Authenticates a regular user with swift api.""" - self.log.debug('Authenticating swift user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return swiftclient.Connection(session=keystone.session) - else: - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') - - def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create the specified flavor.""" - try: - nova.flavors.find(name=name) - except (exceptions.NotFound, exceptions.NoUniqueMatch): - self.log.debug('Creating flavor ({})'.format(name)) - nova.flavors.create(name, ram, vcpus, disk, flavorid, - ephemeral, swap, rxtx_factor, is_public) - - def glance_create_image(self, glance, image_name, image_url, - download_dir='tests', - hypervisor_type=None, - disk_format='qcow2', - architecture='x86_64', - container_format='bare'): - """Download an image and upload it to glance, validate its status - and return an image object pointer. KVM defaults, can override for - LXD. - - :param glance: pointer to authenticated glance api connection - :param image_name: display name for new image - :param image_url: url to retrieve - :param download_dir: directory to store downloaded image file - :param hypervisor_type: glance image hypervisor property - :param disk_format: glance image disk format - :param architecture: glance image architecture property - :param container_format: glance image container format - :returns: glance image pointer - """ - self.log.debug('Creating glance image ({}) from ' - '{}...'.format(image_name, image_url)) - - # Download image - http_proxy = os.getenv('OS_TEST_HTTP_PROXY') - self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - abs_file_name = os.path.join(download_dir, image_name) - if not os.path.exists(abs_file_name): - opener.retrieve(image_url, abs_file_name) - - # Create glance image - glance_properties = { - 'architecture': architecture, - } - if hypervisor_type: - glance_properties['hypervisor_type'] = hypervisor_type - # Create glance image - if float(glance.version) < 2.0: - with open(abs_file_name) as f: - image = glance.images.create( - name=image_name, - is_public=True, - disk_format=disk_format, - container_format=container_format, - properties=glance_properties, - data=f) - else: - image = glance.images.create( - name=image_name, - visibility="public", - disk_format=disk_format, - container_format=container_format) - glance.images.upload(image.id, open(abs_file_name, 'rb')) - glance.images.update(image.id, **glance_properties) - - # Wait for image to reach active status - img_id = image.id - ret = self.resource_reaches_status(glance.images, img_id, - expected_stat='active', - msg='Image status wait') - if not ret: - msg = 'Glance image failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new image - self.log.debug('Validating image attributes...') - val_img_name = glance.images.get(img_id).name - val_img_stat = glance.images.get(img_id).status - val_img_cfmt = glance.images.get(img_id).container_format - val_img_dfmt = glance.images.get(img_id).disk_format - - if float(glance.version) < 2.0: - val_img_pub = glance.images.get(img_id).is_public - else: - val_img_pub = glance.images.get(img_id).visibility == "public" - - msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' - 'container fmt:{} disk fmt:{}'.format( - val_img_name, val_img_pub, img_id, - val_img_stat, val_img_cfmt, val_img_dfmt)) - - if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == container_format \ - and val_img_dfmt == disk_format: - self.log.debug(msg_attr) - else: - msg = ('Image validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return image - - def create_cirros_image(self, glance, image_name, hypervisor_type=None): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection - :param image_name: display name for new image - :param hypervisor_type: glance image hypervisor property - :returns: glance image pointer - """ - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'glance_create_image instead of ' - 'create_cirros_image.') - - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) - - # Get cirros image URL - http_proxy = os.getenv('OS_TEST_HTTP_PROXY') - self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - f.close() - - return self.glance_create_image( - glance, - image_name, - cirros_url, - hypervisor_type=hypervisor_type) - - def delete_image(self, glance, image): - """Delete the specified image.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_image.') - self.log.debug('Deleting glance image ({})...'.format(image)) - return self.delete_resource(glance.images, image, msg='glance image') - - def create_instance(self, nova, image_name, instance_name, flavor): - """Create the specified instance.""" - self.log.debug('Creating instance ' - '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.glance.find_image(image_name) - flavor = nova.flavors.find(name=flavor) - instance = nova.servers.create(name=instance_name, image=image, - flavor=flavor) - - count = 1 - status = instance.status - while status != 'ACTIVE' and count < 60: - time.sleep(3) - instance = nova.servers.get(instance.id) - status = instance.status - self.log.debug('instance status: {}'.format(status)) - count += 1 - - if status != 'ACTIVE': - self.log.error('instance creation timed out') - return None - - return instance - - def delete_instance(self, nova, instance): - """Delete the specified instance.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_instance.') - self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, - msg='nova instance') - - def create_or_get_keypair(self, nova, keypair_name="testkey"): - """Create a new keypair, or return pointer if it already exists.""" - try: - _keypair = nova.keypairs.get(keypair_name) - self.log.debug('Keypair ({}) already exists, ' - 'using it.'.format(keypair_name)) - return _keypair - except Exception: - self.log.debug('Keypair ({}) does not exist, ' - 'creating it.'.format(keypair_name)) - - _keypair = nova.keypairs.create(name=keypair_name) - return _keypair - - def _get_cinder_obj_name(self, cinder_object): - """Retrieve name of cinder object. - - :param cinder_object: cinder snapshot or volume object - :returns: str cinder object name - """ - # v1 objects store name in 'display_name' attr but v2+ use 'name' - try: - return cinder_object.display_name - except AttributeError: - return cinder_object.name - - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, - img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, OR - optionally as a clone of an existing volume, OR optionally - from a snapshot. Wait for the new volume status to reach - the expected status, validate and return a resource pointer. - - :param vol_name: cinder volume display name - :param vol_size: size in gigabytes - :param img_id: optional glance image id - :param src_vol_id: optional source volume id to clone - :param snap_id: optional snapshot id to use - :returns: cinder volume pointer - """ - # Handle parameter input and avoid impossible combinations - if img_id and not src_vol_id and not snap_id: - # Create volume from image - self.log.debug('Creating cinder volume from glance image...') - bootable = 'true' - elif src_vol_id and not img_id and not snap_id: - # Clone an existing volume - self.log.debug('Cloning cinder volume...') - bootable = cinder.volumes.get(src_vol_id).bootable - elif snap_id and not src_vol_id and not img_id: - # Create volume from snapshot - self.log.debug('Creating cinder volume from snapshot...') - snap = cinder.volume_snapshots.find(id=snap_id) - vol_size = snap.size - snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id - bootable = cinder.volumes.get(snap_vol_id).bootable - elif not img_id and not src_vol_id and not snap_id: - # Create volume - self.log.debug('Creating cinder volume...') - bootable = 'false' - else: - # Impossible combination of parameters - msg = ('Invalid method use - name:{} size:{} img_id:{} ' - 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, - img_id, src_vol_id, - snap_id)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create new volume - try: - vol_new = cinder.volumes.create(display_name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except TypeError: - vol_new = cinder.volumes.create(name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except Exception as e: - msg = 'Failed to create volume: {}'.format(e) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Wait for volume to reach available status - ret = self.resource_reaches_status(cinder.volumes, vol_id, - expected_stat="available", - msg="Volume status wait") - if not ret: - msg = 'Cinder volume failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new volume - self.log.debug('Validating volume attributes...') - val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) - val_vol_boot = cinder.volumes.get(vol_id).bootable - val_vol_stat = cinder.volumes.get(vol_id).status - val_vol_size = cinder.volumes.get(vol_id).size - msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' - '{} size:{}'.format(val_vol_name, vol_id, - val_vol_stat, val_vol_boot, - val_vol_size)) - - if val_vol_boot == bootable and val_vol_stat == 'available' \ - and val_vol_name == vol_name and val_vol_size == vol_size: - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return vol_new - - def delete_resource(self, resource, resource_id, - msg="resource", max_wait=120): - """Delete one openstack resource, such as one instance, keypair, - image, volume, stack, etc., and confirm deletion within max wait time. - - :param resource: pointer to os resource type, ex:glance_client.images - :param resource_id: unique name or id for the openstack resource - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, otherwise False - """ - self.log.debug('Deleting OpenStack resource ' - '{} ({})'.format(resource_id, msg)) - num_before = len(list(resource.list())) - resource.delete(resource_id) - - tries = 0 - num_after = len(list(resource.list())) - while num_after != (num_before - 1) and tries < (max_wait / 4): - self.log.debug('{} delete check: ' - '{} [{}:{}] {}'.format(msg, tries, - num_before, - num_after, - resource_id)) - time.sleep(4) - num_after = len(list(resource.list())) - tries += 1 - - self.log.debug('{}: expected, actual count = {}, ' - '{}'.format(msg, num_before - 1, num_after)) - - if num_after == (num_before - 1): - return True - else: - self.log.error('{} delete timed out'.format(msg)) - return False - - def resource_reaches_status(self, resource, resource_id, - expected_stat='available', - msg='resource', max_wait=120): - """Wait for an openstack resources status to reach an - expected status within a specified time. Useful to confirm that - nova instances, cinder vols, snapshots, glance images, heat stacks - and other resources eventually reach the expected status. - - :param resource: pointer to os resource type, ex: heat_client.stacks - :param resource_id: unique id for the openstack resource - :param expected_stat: status to expect resource to reach - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, False if status is not reached - """ - - tries = 0 - resource_stat = resource.get(resource_id).status - while resource_stat != expected_stat and tries < (max_wait / 4): - self.log.debug('{} status check: ' - '{} [{}:{}] {}'.format(msg, tries, - resource_stat, - expected_stat, - resource_id)) - time.sleep(4) - resource_stat = resource.get(resource_id).status - tries += 1 - - self.log.debug('{}: expected, actual status = {}, ' - '{}'.format(msg, resource_stat, expected_stat)) - - if resource_stat == expected_stat: - return True - else: - self.log.debug('{} never reached expected status: ' - '{}'.format(resource_id, expected_stat)) - return False - - def get_ceph_osd_id_cmd(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return ("`initctl list | grep 'ceph-osd ' | " - "awk 'NR=={} {{ print $2 }}' | " - "grep -o '[0-9]*'`".format(index + 1)) - - def get_ceph_pools(self, sentry_unit): - """Return a dict of ceph pools from a single ceph unit, with - pool name as keys, pool id as vals.""" - pools = {} - cmd = 'sudo ceph osd lspools' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # For mimic ceph osd lspools output - output = output.replace("\n", ",") - - # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, - for pool in str(output).split(','): - pool_id_name = pool.split(' ') - if len(pool_id_name) == 2: - pool_id = pool_id_name[0] - pool_name = pool_id_name[1] - pools[pool_name] = int(pool_id) - - self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], - pools)) - return pools - - def get_ceph_df(self, sentry_unit): - """Return dict of ceph df json output, including ceph pool state. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :returns: Dict of ceph df output - """ - cmd = 'sudo ceph df --format=json' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return json.loads(output) - - def get_ceph_pool_sample(self, sentry_unit, pool_id=0): - """Take a sample of attributes of a ceph pool, returning ceph - pool name, object count and disk space used for the specified - pool ID number. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :param pool_id: Ceph pool ID - :returns: List of pool name, object count, kb disk space used - """ - df = self.get_ceph_df(sentry_unit) - for pool in df['pools']: - if pool['id'] == pool_id: - pool_name = pool['name'] - obj_count = pool['stats']['objects'] - kb_used = pool['stats']['kb_used'] - - self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, pool_id, - obj_count, kb_used)) - return pool_name, obj_count, kb_used - - def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): - """Validate ceph pool samples taken over time, such as pool - object counts or pool kb used, before adding, after adding, and - after deleting items which affect those pool attributes. The - 2nd element is expected to be greater than the 1st; 3rd is expected - to be less than the 2nd. - - :param samples: List containing 3 data samples - :param sample_type: String for logging and usage context - :returns: None if successful, Failure message otherwise - """ - original, created, deleted = range(3) - if samples[created] <= samples[original] or \ - samples[deleted] >= samples[created]: - return ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - else: - self.log.debug('Ceph {} samples (OK): ' - '{}'.format(sample_type, samples)) - return None - - # rabbitmq/amqp specific helpers: - - def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): - """Wait for rmq units extended status to show cluster readiness, - after an optional initial sleep period. Initial sleep is likely - necessary to be effective following a config change, as status - message may not instantly update to non-ready.""" - - if init_sleep: - time.sleep(init_sleep) - - message = re.compile('^Unit is ready and clustered$') - deployment._auto_wait_for_status(message=message, - timeout=timeout, - include_only=['rabbitmq-server']) - - def add_rmq_test_user(self, sentry_units, - username="testuser1", password="changeme"): - """Add a test user via the first rmq juju unit, check connection as - the new user against all sentry units. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful. Raise on error. - """ - self.log.debug('Adding rmq user ({})...'.format(username)) - - # Check that user does not already exist - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - if username in output: - self.log.warning('User ({}) already exists, returning ' - 'gracefully.'.format(username)) - return - - perms = '".*" ".*" ".*"' - cmds = ['rabbitmqctl add_user {} {}'.format(username, password), - 'rabbitmqctl set_permissions {} {}'.format(username, perms)] - - # Add user via first unit - for cmd in cmds: - output, _ = self.run_cmd_unit(sentry_units[0], cmd) - - # Check connection against the other sentry_units - self.log.debug('Checking user connect against units...') - for sentry_unit in sentry_units: - connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, - username=username, - password=password) - connection.close() - - def delete_rmq_test_user(self, sentry_units, username="testuser1"): - """Delete a rabbitmq user via the first rmq juju unit. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful or no such user. - """ - self.log.debug('Deleting rmq user ({})...'.format(username)) - - # Check that the user exists - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - - if username not in output: - self.log.warning('User ({}) does not exist, returning ' - 'gracefully.'.format(username)) - return - - # Delete the user - cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) - - def get_rmq_cluster_status(self, sentry_unit): - """Execute rabbitmq cluster status command on a unit and return - the full output. - - :param unit: sentry unit - :returns: String containing console output of cluster status command - """ - cmd = 'rabbitmqctl cluster_status' - output, _ = self.run_cmd_unit(sentry_unit, cmd) - self.log.debug('{} cluster_status:\n{}'.format( - sentry_unit.info['unit_name'], output)) - return str(output) - - def get_rmq_cluster_running_nodes(self, sentry_unit): - """Parse rabbitmqctl cluster_status output string, return list of - running rabbitmq cluster nodes. - - :param unit: sentry unit - :returns: List containing node names of running nodes - """ - # NOTE(beisner): rabbitmqctl cluster_status output is not - # json-parsable, do string chop foo, then json.loads that. - str_stat = self.get_rmq_cluster_status(sentry_unit) - if 'running_nodes' in str_stat: - pos_start = str_stat.find("{running_nodes,") + 15 - pos_end = str_stat.find("]},", pos_start) + 1 - str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') - run_nodes = json.loads(str_run_nodes) - return run_nodes - else: - return [] - - def validate_rmq_cluster_running_nodes(self, sentry_units): - """Check that all rmq unit hostnames are represented in the - cluster_status output of all units. - - :param host_names: dict of juju unit names to host names - :param units: list of sentry unit pointers (all rmq units) - :returns: None if successful, otherwise return error message - """ - host_names = self.get_unit_hostnames(sentry_units) - errors = [] - - # Query every unit for cluster_status running nodes - for query_unit in sentry_units: - query_unit_name = query_unit.info['unit_name'] - running_nodes = self.get_rmq_cluster_running_nodes(query_unit) - - # Confirm that every unit is represented in the queried unit's - # cluster_status running nodes output. - for validate_unit in sentry_units: - val_host_name = host_names[validate_unit.info['unit_name']] - val_node_name = 'rabbit@{}'.format(val_host_name) - - if val_node_name not in running_nodes: - errors.append('Cluster member check failed on {}: {} not ' - 'in {}\n'.format(query_unit_name, - val_node_name, - running_nodes)) - if errors: - return ''.join(errors) - - def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): - """Check a single juju rmq unit for ssl and port in the config file.""" - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - conf_file = '/etc/rabbitmq/rabbitmq.config' - conf_contents = str(self.file_contents_safe(sentry_unit, - conf_file, max_wait=16)) - # Checks - conf_ssl = 'ssl' in conf_contents - conf_port = str(port) in conf_contents - - # Port explicitly checked in config - if port and conf_port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif port and not conf_port and conf_ssl: - self.log.debug('SSL is enabled @{} but not on port {} ' - '({})'.format(host, port, unit_name)) - return False - # Port not checked (useful when checking that ssl is disabled) - elif not port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif not conf_ssl: - self.log.debug('SSL not enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return False - else: - msg = ('Unknown condition when checking SSL status @{}:{} ' - '({})'.format(host, port, unit_name)) - amulet.raise_status(amulet.FAIL, msg) - - def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): - """Check that ssl is enabled on rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :param port: optional ssl port override to validate - :returns: None if successful, otherwise return error message - """ - for sentry_unit in sentry_units: - if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): - return ('Unexpected condition: ssl is disabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def validate_rmq_ssl_disabled_units(self, sentry_units): - """Check that ssl is enabled on listed rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :returns: True if successful. Raise on error. - """ - for sentry_unit in sentry_units: - if self.rmq_ssl_is_enabled_on_unit(sentry_unit): - return ('Unexpected condition: ssl is enabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def configure_rmq_ssl_on(self, sentry_units, deployment, - port=None, max_wait=60): - """Turn ssl charm config option on, with optional non-default - ssl port specification. Confirm that it is enabled on every - unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param port: amqp port, use defaults if None - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: on') - - # Enable RMQ SSL - config = {'ssl': 'on'} - if port: - config['ssl_port'] = port - - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): - """Turn ssl charm config option off, confirm that it is disabled - on every unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: off') - - # Disable RMQ SSL - config = {'ssl': 'off'} - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def connect_amqp_by_unit(self, sentry_unit, ssl=False, - port=None, fatal=True, - username="testuser1", password="changeme"): - """Establish and return a pika amqp connection to the rabbitmq service - running on a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :param fatal: boolean, default to True (raises on connect error) - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: pika amqp connection pointer or None if failed and non-fatal - """ - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - # Default port logic if port is not specified - if ssl and not port: - port = 5671 - elif not ssl and not port: - port = 5672 - - self.log.debug('Connecting to amqp on {}:{} ({}) as ' - '{}...'.format(host, port, unit_name, username)) - - try: - credentials = pika.PlainCredentials(username, password) - parameters = pika.ConnectionParameters(host=host, port=port, - credentials=credentials, - ssl=ssl, - connection_attempts=3, - retry_delay=5, - socket_timeout=1) - connection = pika.BlockingConnection(parameters) - assert connection.is_open is True - assert connection.is_closing is False - self.log.debug('Connect OK') - return connection - except Exception as e: - msg = ('amqp connection failed to {}:{} as ' - '{} ({})'.format(host, port, username, str(e))) - if fatal: - amulet.raise_status(amulet.FAIL, msg) - else: - self.log.warn(msg) - return None - - def publish_amqp_message_by_unit(self, sentry_unit, message, - queue="test", ssl=False, - username="testuser1", - password="changeme", - port=None): - """Publish an amqp message to a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param message: amqp message string - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: None. Raises exception if publish failed. - """ - self.log.debug('Publishing message to {} queue:\n{}'.format(queue, - message)) - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - - # NOTE(beisner): extra debug here re: pika hang potential: - # https://github.com/pika/pika/issues/297 - # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw - self.log.debug('Defining channel...') - channel = connection.channel() - self.log.debug('Declaring queue...') - channel.queue_declare(queue=queue, auto_delete=False, durable=True) - self.log.debug('Publishing message...') - channel.basic_publish(exchange='', routing_key=queue, body=message) - self.log.debug('Closing channel...') - channel.close() - self.log.debug('Closing connection...') - connection.close() - - def get_amqp_message_by_unit(self, sentry_unit, queue="test", - username="testuser1", - password="changeme", - ssl=False, port=None): - """Get an amqp message from a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: amqp message body as string. Raise if get fails. - """ - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - channel = connection.channel() - method_frame, _, body = channel.basic_get(queue) - - if method_frame: - self.log.debug('Retreived message from {} queue:\n{}'.format(queue, - body)) - channel.basic_ack(method_frame.delivery_tag) - channel.close() - connection.close() - return body - else: - msg = 'No message retrieved.' - amulet.raise_status(amulet.FAIL, msg) - - def validate_memcache(self, sentry_unit, conf, os_release, - earliest_release=5, section='keystone_authtoken', - check_kvs=None): - """Check Memcache is running and is configured to be used - - Example call from Amulet test: - - def test_110_memcache(self): - u.validate_memcache(self.neutron_api_sentry, - '/etc/neutron/neutron.conf', - self._get_openstack_release()) - - :param sentry_unit: sentry unit - :param conf: OpenStack config file to check memcache settings - :param os_release: Current OpenStack release int code - :param earliest_release: Earliest Openstack release to check int code - :param section: OpenStack config file section to check - :param check_kvs: Dict of settings to check in config file - :returns: None - """ - if os_release < earliest_release: - self.log.debug('Skipping memcache checks for deployment. {} <' - 'mitaka'.format(os_release)) - return - _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} - self.log.debug('Checking memcached is running') - ret = self.validate_services_by_name({sentry_unit: ['memcached']}) - if ret: - amulet.raise_status(amulet.FAIL, msg='Memcache running check' - 'failed {}'.format(ret)) - else: - self.log.debug('OK') - self.log.debug('Checking memcache url is configured in {}'.format( - conf)) - if self.validate_config_data(sentry_unit, conf, section, _kvs): - message = "Memcache config error in: {}".format(conf) - amulet.raise_status(amulet.FAIL, msg=message) - else: - self.log.debug('OK') - self.log.debug('Checking memcache configuration in ' - '/etc/memcached.conf') - contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', - fatal=True) - ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if CompareHostReleases(ubuntu_release) <= 'trusty': - memcache_listen_addr = 'ip6-localhost' - else: - memcache_listen_addr = '::1' - expected = { - '-p': '11211', - '-l': memcache_listen_addr} - found = [] - for key, value in expected.items(): - for line in contents.split('\n'): - if line.startswith(key): - self.log.debug('Checking {} is set to {}'.format( - key, - value)) - assert value == line.split()[-1] - self.log.debug(line.split()[-1]) - found.append(key) - if sorted(found) == sorted(expected.keys()): - self.log.debug('OK') - else: - message = "Memcache config error in: /etc/memcached.conf" - amulet.raise_status(amulet.FAIL, msg=message) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py index 703fc6ef..5c961c58 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -1,4 +1,4 @@ -# Copyright 2014-2018 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Common python helper functions used for OpenStack charm certificats. +# Common python helper functions used for OpenStack charm certificates. import os import json @@ -71,7 +71,7 @@ def __init__(self, json_encode=True): def add_entry(self, net_type, cn, addresses): """Add a request to the batch - :param net_type: str netwrok space name request is for + :param net_type: str network space name request is for :param cn: str Canonical Name for certificate :param addresses: [] List of addresses to be used as SANs """ @@ -85,7 +85,7 @@ def add_hostname_cn(self): addresses = [ip] # If a vip is being used without os-hostname config or # network spaces then we need to ensure the local units - # cert has the approriate vip in the SAN list + # cert has the appropriate vip in the SAN list vip = get_vip_in_network(resolve_network_cidr(ip)) if vip: addresses.append(vip) @@ -178,7 +178,7 @@ def get_certificate_request(json_encode=True, bindings=None): except NoNetworkBinding: log("Skipping request for certificate for ip in {} space, no " "local address found".format(binding), WARNING) - # Gurantee all SANs are covered + # Guarantee all SANs are covered # These are network addresses with no corresponding hostname. # Add the ips to the hostname cert to allow for this. req.add_hostname_cn_ip(_sans) @@ -357,7 +357,7 @@ def process_certificates(service_name, relation_id, unit, bindings=None): """Process the certificates supplied down the relation - :param service_name: str Name of service the certifcates are for. + :param service_name: str Name of service the certificates are for. :param relation_id: str Relation id providing the certs :param unit: str Unit providing the certs :param custom_hostname_link: str Name of custom link to create diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index b67dafda..54081f0c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,7 +25,10 @@ import time from base64 import b64decode -from subprocess import check_call, CalledProcessError +from subprocess import ( + check_call, + check_output, + CalledProcessError) import six @@ -453,18 +456,24 @@ def __call__(self): serv_host = format_ipv6_addr(serv_host) or serv_host auth_host = rdata.get('auth_host') auth_host = format_ipv6_addr(auth_host) or auth_host + int_host = rdata.get('internal_host') + int_host = format_ipv6_addr(int_host) or int_host svc_protocol = rdata.get('service_protocol') or 'http' auth_protocol = rdata.get('auth_protocol') or 'http' + int_protocol = rdata.get('internal_protocol') or 'http' api_version = rdata.get('api_version') or '2.0' ctxt.update({'service_port': rdata.get('service_port'), 'service_host': serv_host, 'auth_host': auth_host, 'auth_port': rdata.get('auth_port'), + 'internal_host': int_host, + 'internal_port': rdata.get('internal_port'), 'admin_tenant_name': rdata.get('service_tenant'), 'admin_user': rdata.get('service_username'), 'admin_password': rdata.get('service_password'), 'service_protocol': svc_protocol, 'auth_protocol': auth_protocol, + 'internal_protocol': int_protocol, 'api_version': api_version}) if float(api_version) > 2: @@ -1358,7 +1367,7 @@ def resolve_ports(self, ports): mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) for entry in ports: if re.match(mac_regex, entry): - # NIC is in known NICs and does NOT hace an IP address + # NIC is in known NICs and does NOT have an IP address if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: # If the nic is part of a bridge then don't use it if is_bridge_member(hwaddr_to_nic[entry]): @@ -1781,6 +1790,10 @@ def __call__(self): 'rel_key': 'enable-port-forwarding', 'default': False, }, + 'enable_fwaas': { + 'rel_key': 'enable-fwaas', + 'default': False, + }, 'global_physnet_mtu': { 'rel_key': 'global-physnet-mtu', 'default': 1500, @@ -1815,6 +1828,11 @@ def __call__(self): if ctxt['enable_port_forwarding']: l3_extension_plugins.append('port_forwarding') + if ctxt['enable_fwaas']: + l3_extension_plugins.append('fwaas_v2') + if ctxt['enable_nfg_logging']: + l3_extension_plugins.append('fwaas_v2_log') + ctxt['l3_extension_plugins'] = l3_extension_plugins return ctxt @@ -2379,6 +2397,12 @@ def __call__(self): ctxt['enable_metadata_network'] = True ctxt['enable_isolated_metadata'] = True + ctxt['append_ovs_config'] = False + cmp_release = CompareOpenStackReleases( + os_release('neutron-common', base='icehouse')) + if cmp_release >= 'queens' and config('enable-dpdk'): + ctxt['append_ovs_config'] = True + return ctxt @staticmethod @@ -2570,22 +2594,48 @@ def cpu_mask(self): :returns: hex formatted CPU mask :rtype: str """ - num_cores = config('dpdk-socket-cores') - mask = 0 + return self.cpu_masks()['dpdk_lcore_mask'] + + def cpu_masks(self): + """Get hex formatted CPU masks + + The mask is based on using the first config:dpdk-socket-cores + cores of each NUMA node in the unit, followed by the + next config:pmd-socket-cores + + :returns: Dict of hex formatted CPU masks + :rtype: Dict[str, str] + """ + num_lcores = config('dpdk-socket-cores') + pmd_cores = config('pmd-socket-cores') + lcore_mask = 0 + pmd_mask = 0 for cores in self._numa_node_cores().values(): - for core in cores[:num_cores]: - mask = mask | 1 << core - return format(mask, '#04x') + for core in cores[:num_lcores]: + lcore_mask = lcore_mask | 1 << core + for core in cores[num_lcores:][:pmd_cores]: + pmd_mask = pmd_mask | 1 << core + return { + 'pmd_cpu_mask': format(pmd_mask, '#04x'), + 'dpdk_lcore_mask': format(lcore_mask, '#04x')} def socket_memory(self): - """Formatted list of socket memory configuration per NUMA node + """Formatted list of socket memory configuration per socket. - :returns: socket memory configuration per NUMA node + :returns: socket memory configuration per socket. :rtype: str """ + lscpu_out = check_output( + ['lscpu', '-p=socket']).decode('UTF-8').strip() + sockets = set() + for line in lscpu_out.split('\n'): + try: + sockets.add(int(line)) + except ValueError: + # lscpu output is headed by comments so ignore them. + pass sm_size = config('dpdk-socket-memory') - node_regex = '/sys/devices/system/node/node*' - mem_list = [str(sm_size) for _ in glob.glob(node_regex)] + mem_list = [str(sm_size) for _ in sockets] if mem_list: return ','.join(mem_list) else: @@ -2650,7 +2700,7 @@ def __call__(self): class BridgePortInterfaceMap(object): - """Build a map of bridge ports and interaces from charm configuration. + """Build a map of bridge ports and interfaces from charm configuration. NOTE: the handling of this detail in the charm is pre-deprecated. @@ -3099,7 +3149,7 @@ def _get_capped_numvfs(requested): actual = min(int(requested), int(device.sriov_totalvfs)) if actual < int(requested): log('Requested VFs ({}) too high for device {}. Falling back ' - 'to value supprted by device: {}' + 'to value supported by device: {}' .format(requested, device.interface_name, device.sriov_totalvfs), level=WARNING) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py index 8765ee31..94eacf6c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py @@ -244,7 +244,7 @@ def get_deferred_restarts(): def clear_deferred_restarts(services): - """Clear deferred restart events targetted at `services`. + """Clear deferred restart events targeted at `services`. :param services: Services with deferred actions to clear. :type services: List[str] @@ -253,7 +253,7 @@ def clear_deferred_restarts(services): def process_svc_restart(service): - """Respond to a service restart having occured. + """Respond to a service restart having occurred. :param service: Services that the action was performed against. :type service: str diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py index 344a7662..431e972b 100755 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -"""This script is an implemenation of policy-rc.d +"""This script is an implementation of policy-rc.d For further information on policy-rc.d see *1 diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index fb5607f3..b41314cb 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Various utilies for dealing with Neutron and the renaming from Quantum. +# Various utilities for dealing with Neutron and the renaming from Quantum. import six from subprocess import check_output @@ -251,7 +251,7 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None): def network_manager(): ''' Deals with the renaming of Quantum to Neutron in H and any situations - that require compatability (eg, deploying H with network-manager=quantum, + that require compatibility (eg, deploying H with network-manager=quantum, upgrading from G). ''' release = os_release('nova-common') diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py index f2bb21e9..6fa06f26 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py @@ -1,4 +1,4 @@ -# Copyright 2019 Canonical Ltd +# Copyright 2019-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -59,7 +59,7 @@ The functions should be called from the install and upgrade hooks in the charm. The `maybe_do_policyd_overrides_on_config_changed` function is designed to be called on the config-changed hook, in that it does an additional check to -ensure that an already overriden policy.d in an upgrade or install hooks isn't +ensure that an already overridden policy.d in an upgrade or install hooks isn't repeated. In order the *enable* this functionality, the charm's install, config_changed, @@ -334,7 +334,7 @@ def maybe_do_policyd_overrides(openstack_release, restart_handler() -@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead") +@charmhelpers.deprecate("Use maybe_do_policyd_overrides instead") def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): """This function is designed to be called from the config changed hook. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index d36af2aa..875e1393 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -1,10 +1,22 @@ global - log /var/lib/haproxy/dev/log local0 - log /var/lib/haproxy/dev/log local1 notice + # NOTE: on startup haproxy chroot's to /var/lib/haproxy. + # + # Unfortunately the program will open some files prior to the call to + # chroot never to reopen them, and some after. So looking at the on-disk + # layout of haproxy resources you will find some resources relative to / + # such as the admin socket, and some relative to /var/lib/haproxy such as + # the log socket. + # + # The logging socket is (re-)opened after the chroot and must be relative + # to /var/lib/haproxy. + log /dev/log local0 + log /dev/log local1 notice maxconn 20000 user haproxy group haproxy spread-checks 0 + # The admin socket is opened prior to the chroot never to be reopened, so + # it lives outside the chroot directory in the filesystem. stats socket /var/run/haproxy/admin.sock mode 600 level admin stats timeout 2m diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf index 23b62a38..b9ca3963 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf @@ -15,7 +15,7 @@ Listen {{ public_port }} {% if port -%} WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ - display-name=%{GROUP} + display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8 WSGIProcessGroup {{ service_name }} WSGIScriptAlias / {{ script }} WSGIApplicationGroup %{GLOBAL} @@ -41,7 +41,7 @@ Listen {{ public_port }} {% if admin_port -%} WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ - display-name=%{GROUP} + display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8 WSGIProcessGroup {{ service_name }}-admin WSGIScriptAlias / {{ admin_script }} WSGIApplicationGroup %{GLOBAL} @@ -67,7 +67,7 @@ Listen {{ public_port }} {% if public_port -%} WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ - display-name=%{GROUP} + display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8 WSGIProcessGroup {{ service_name }}-public WSGIScriptAlias / {{ public_script }} WSGIApplicationGroup %{GLOBAL} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf index 23b62a38..b9ca3963 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf @@ -15,7 +15,7 @@ Listen {{ public_port }} {% if port -%} WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ - display-name=%{GROUP} + display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8 WSGIProcessGroup {{ service_name }} WSGIScriptAlias / {{ script }} WSGIApplicationGroup %{GLOBAL} @@ -41,7 +41,7 @@ Listen {{ public_port }} {% if admin_port -%} WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ - display-name=%{GROUP} + display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8 WSGIProcessGroup {{ service_name }}-admin WSGIScriptAlias / {{ admin_script }} WSGIApplicationGroup %{GLOBAL} @@ -67,7 +67,7 @@ Listen {{ public_port }} {% if public_port -%} WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ - display-name=%{GROUP} + display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8 WSGIProcessGroup {{ service_name }}-public WSGIScriptAlias / {{ public_script }} WSGIApplicationGroup %{GLOBAL} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 1656bd43..d5d301e6 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -106,6 +106,8 @@ filter_installed_packages, filter_missing_packages, ubuntu_apt_pkg as apt, + OPENSTACK_RELEASES, + UBUNTU_OPENSTACK_RELEASE, ) from charmhelpers.fetch.snap import ( @@ -132,54 +134,9 @@ DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 'restricted main multiverse universe') -OPENSTACK_RELEASES = ( - 'diablo', - 'essex', - 'folsom', - 'grizzly', - 'havana', - 'icehouse', - 'juno', - 'kilo', - 'liberty', - 'mitaka', - 'newton', - 'ocata', - 'pike', - 'queens', - 'rocky', - 'stein', - 'train', - 'ussuri', - 'victoria', - 'wallaby', -) - -UBUNTU_OPENSTACK_RELEASE = OrderedDict([ - ('oneiric', 'diablo'), - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), - ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ('disco', 'stein'), - ('eoan', 'train'), - ('focal', 'ussuri'), - ('groovy', 'victoria'), - ('hirsute', 'wallaby'), -]) - - OPENSTACK_CODENAMES = OrderedDict([ + # NOTE(lourot): 'yyyy.i' isn't actually mapping with any real version + # number. This just means the i-th version of the year yyyy. ('2011.2', 'diablo'), ('2012.1', 'essex'), ('2012.2', 'folsom'), @@ -200,6 +157,8 @@ ('2020.1', 'ussuri'), ('2020.2', 'victoria'), ('2021.1', 'wallaby'), + ('2021.2', 'xena'), + ('2022.1', 'yoga'), ]) # The ugly duckling - must list releases oldest to newest @@ -701,7 +660,7 @@ def import_key(keyid): def get_source_and_pgp_key(source_and_key): """Look for a pgp key ID or ascii-armor key in the given input. - :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is + :param source_and_key: String, "source_spec|keyid" where '|keyid' is optional. :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id if there was no '|' in the source_and_key string. @@ -721,7 +680,7 @@ def configure_installation_source(source_plus_key): The functionality is provided by charmhelpers.fetch.add_source() The difference between the two functions is that add_source() signature requires the key to be passed directly, whereas this function passes an - optional key by appending '|' to the end of the source specificiation + optional key by appending '|' to the end of the source specification 'source'. Another difference from add_source() is that the function calls sys.exit(1) @@ -808,7 +767,7 @@ def get_endpoint_notifications(service_names, rel_name='identity-service'): def endpoint_changed(service_name, rel_name='identity-service'): - """Whether a new notification has been recieved for an endpoint. + """Whether a new notification has been received for an endpoint. :param service_name: Service name eg nova, neutron, placement etc :type service_name: str @@ -834,7 +793,7 @@ def endpoint_changed(service_name, rel_name='identity-service'): def save_endpoint_changed_triggers(service_names, rel_name='identity-service'): - """Save the enpoint triggers in db so it can be tracked if they changed. + """Save the endpoint triggers in db so it can be tracked if they changed. :param service_names: List of service name. :type service_name: List @@ -1502,9 +1461,9 @@ def remote_restart(rel_name, remote_service=None): if remote_service: trigger['remote-service'] = remote_service for rid in relation_ids(rel_name): - # This subordinate can be related to two seperate services using + # This subordinate can be related to two separate services using # different subordinate relations so only issue the restart if - # the principle is conencted down the relation we think it is + # the principle is connected down the relation we think it is if related_units(relid=rid): relation_set(relation_id=rid, relation_settings=trigger, @@ -1621,7 +1580,7 @@ def manage_payload_services(action, services=None, charm_func=None): """Run an action against all services. An optional charm_func() can be called. It should raise an Exception to - indicate that the function failed. If it was succesfull it should return + indicate that the function failed. If it was successful it should return None or an optional message. The signature for charm_func is: @@ -1880,7 +1839,7 @@ def some_hook(...): :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] :returns: decorator to use a restart_on_change with pausability :rtype: decorator diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py index 4ee6c1db..e5418c39 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -1,4 +1,4 @@ -# Copyright 2018 Canonical Limited. +# Copyright 2018-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -48,7 +48,7 @@ def __call__(self): "but it's not available. Is secrets-stroage relation " "made, but encrypt option not set?", level=hookenv.WARNING) - # return an emptry context on hvac import error + # return an empty context on hvac import error return {} ctxt = {} # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index d1c61754..3eb46d70 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# -# Copyright 2012 Canonical Ltd. -# # This file is sourced from lp:openstack-charm-helpers # # Authors: @@ -605,7 +602,7 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, class Pool(BasePool): - """Compability shim for any descendents external to this library.""" + """Compatibility shim for any descendents external to this library.""" @deprecate( 'The ``Pool`` baseclass has been replaced by ``BasePool`` class.') @@ -1535,7 +1532,7 @@ def map_block_storage(service, pool, image): def filesystem_mounted(fs): - """Determine whether a filesytems is already mounted.""" + """Determine whether a filesystem is already mounted.""" return fs in [f for f, m in mounts()] @@ -1904,7 +1901,7 @@ def add_op_create_erasure_pool(self, name, erasure_profile=None, set the ceph-mon unit handling the broker request will set its default value. :type erasure_profile: str - :param allow_ec_overwrites: allow EC pools to be overriden + :param allow_ec_overwrites: allow EC pools to be overridden :type allow_ec_overwrites: bool :raises: AssertionError if provided data is of invalid type/range """ @@ -1949,7 +1946,7 @@ def add_op_create_erasure_profile(self, name, :param lrc_locality: Group the coding and data chunks into sets of size locality (lrc plugin) :type lrc_locality: int - :param durability_estimator: The number of parity chuncks each of which includes + :param durability_estimator: The number of parity chunks each of which includes a data chunk in its calculation range (shec plugin) :type durability_estimator: int :param helper_chunks: The number of helper chunks to use for recovery operations @@ -2327,7 +2324,7 @@ class CephOSDConfContext(CephConfContext): settings are in conf['osd_from_client'] and finally settings which do clash are in conf['osd_from_client_conflict']. Rather than silently drop the conflicting settings they are provided in the context so they can be - rendered commented out to give some visability to the admin. + rendered commented out to give some visibility to the admin. """ def __init__(self, permitted_sections=None): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py index c8bde692..d0a57211 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ ################################################## def deactivate_lvm_volume_group(block_device): ''' - Deactivate any volume gruop associated with an LVM physical volume. + Deactivate any volume group associated with an LVM physical volume. :param block_device: str: Full path to LVM physical volume ''' diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index 47eebb51..e94247a2 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2013-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,6 @@ # limitations under the License. "Interactions with the Juju environment" -# Copyright 2013 Canonical Ltd. # # Authors: # Charm Helpers Developers @@ -610,7 +609,7 @@ def expected_related_units(reltype=None): relation_type())) :param reltype: Relation type to list data for, default is to list data for - the realtion type we are currently executing a hook for. + the relation type we are currently executing a hook for. :type reltype: str :returns: iterator :rtype: types.GeneratorType @@ -627,7 +626,7 @@ def expected_related_units(reltype=None): @cached def relation_for_unit(unit=None, rid=None): - """Get the json represenation of a unit's relation""" + """Get the json representation of a unit's relation""" unit = unit or remote_unit() relation = relation_get(unit=unit, rid=rid) for key in relation: @@ -1614,11 +1613,11 @@ def env_proxy_settings(selected_settings=None): def _contains_range(addresses): """Check for cidr or wildcard domain in a string. - Given a string comprising a comma seperated list of ip addresses + Given a string comprising a comma separated list of ip addresses and domain names, determine whether the string contains IP ranges or wildcard domains. - :param addresses: comma seperated list of domains and ip addresses. + :param addresses: comma separated list of domains and ip addresses. :type addresses: str """ return ( diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index d25e6c59..994ec8a0 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -217,7 +217,7 @@ def service_resume(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", **kwargs): """Resume a system service. - Reenable starting again at boot. Start the service. + Re-enable starting again at boot. Start the service. :param service_name: the name of the service to resume :param init_dir: the path to the init dir @@ -727,7 +727,7 @@ def __init__(self, restart_map, stopstart=False, restart_functions=None, :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] """ self.restart_map = restart_map @@ -828,7 +828,7 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False, :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] :returns: result of lambda_f() :rtype: ANY @@ -880,7 +880,7 @@ def _post_restart_on_change_helper(checksums, :param post_svc_restart_f: A function run after a service has restarted. :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function callled before any restarts. + :param pre_restarts_wait_f: A function called before any restarts. :type pre_restarts_wait_f: Callable[None, None] """ if restart_functions is None: @@ -914,7 +914,7 @@ def _post_restart_on_change_helper(checksums, def pwgen(length=None): - """Generate a random pasword.""" + """Generate a random password.""" if length is None: # A random length is ok to use a weak PRNG length = random.choice(range(35, 45)) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index 5aa4196d..e710c0e0 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -28,6 +28,7 @@ 'focal', 'groovy', 'hirsute', + 'impish', ) diff --git a/ceph-radosgw/hooks/charmhelpers/core/strutils.py b/ceph-radosgw/hooks/charmhelpers/core/strutils.py index e8df0452..28c6b3f5 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/strutils.py +++ b/ceph-radosgw/hooks/charmhelpers/core/strutils.py @@ -18,8 +18,11 @@ import six import re +TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'} +FALSEY_STRINGS = {'n', 'no', 'false', 'f', 'off'} -def bool_from_string(value): + +def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY_STRINGS, assume_false=False): """Interpret string value as boolean. Returns True if value translates to True otherwise False. @@ -32,9 +35,9 @@ def bool_from_string(value): value = value.strip().lower() - if value in ['y', 'yes', 'true', 't', 'on']: + if value in truthy_strings: return True - elif value in ['n', 'no', 'false', 'f', 'off']: + elif value in falsey_strings or assume_false: return False msg = "Unable to interpret string value '%s' as boolean" % (value) diff --git a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py index ab554327..d9b8d0b0 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py +++ b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -61,7 +61,7 @@ def config_changed(): 'previous value', prev, 'current value', cur) - # Get some unit specific bookeeping + # Get some unit specific bookkeeping if not db.get('pkg_key'): key = urllib.urlopen('https://example.com/pkg_key').read() db.set('pkg_key', key) @@ -449,7 +449,7 @@ def config_changed(): 'previous value', prev, 'current value', cur) - # Get some unit specific bookeeping + # Get some unit specific bookkeeping if not db.get('pkg_key'): key = urllib.urlopen('https://example.com/pkg_key').read() db.set('pkg_key', key) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 5b689f5b..9497ee05 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -106,6 +106,8 @@ def base_url(self, url): apt_pkg = fetch.ubuntu_apt_pkg get_apt_dpkg_env = fetch.get_apt_dpkg_env get_installed_version = fetch.get_installed_version + OPENSTACK_RELEASES = fetch.OPENSTACK_RELEASES + UBUNTU_OPENSTACK_RELEASE = fetch.UBUNTU_OPENSTACK_RELEASE elif __platform__ == "centos": yum_search = fetch.yum_search @@ -203,7 +205,7 @@ def plugins(fetch_handlers=None): classname) plugin_list.append(handler_class()) except NotImplementedError: - # Skip missing plugins so that they can be ommitted from + # Skip missing plugins so that they can be omitted from # installation if desired log("FetchHandler {} not found, skipping plugin".format( handler_name)) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/python/packages.py b/ceph-radosgw/hooks/charmhelpers/fetch/python/packages.py index 6e95028b..60048354 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/python/packages.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/python/packages.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # coding: utf-8 -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ def pip_execute(*args, **kwargs): - """Overriden pip_execute() to stop sys.path being changed. + """Overridden pip_execute() to stop sys.path being changed. The act of importing main from the pip module seems to cause add wheels from the /usr/share/python-wheels which are installed by various tools. @@ -142,8 +142,10 @@ def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" if six.PY2: apt_install('python-virtualenv') + extra_flags = [] else: - apt_install('python3-virtualenv') + apt_install(['python3-virtualenv', 'virtualenv']) + extra_flags = ['--python=python3'] if path: venv_path = path @@ -151,4 +153,4 @@ def pip_create_virtualenv(path=None): venv_path = os.path.join(charm_dir(), 'venv') if not os.path.exists(venv_path): - subprocess.check_call(['virtualenv', venv_path]) + subprocess.check_call(['virtualenv', venv_path] + extra_flags) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/snap.py b/ceph-radosgw/hooks/charmhelpers/fetch/snap.py index fc70aa94..36d6bce9 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/snap.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/snap.py @@ -1,4 +1,4 @@ -# Copyright 2014-2017 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -65,7 +65,7 @@ def _snap_exec(commands): retry_count += + 1 if retry_count > SNAP_NO_LOCK_RETRY_COUNT: raise CouldNotAcquireLockException( - 'Could not aquire lock after {} attempts' + 'Could not acquire lock after {} attempts' .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode log('Snap failed to acquire lock, trying again in {} seconds.' diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 812a11a2..6c7cf6fc 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2021 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -208,12 +208,79 @@ 'wallaby/proposed': 'focal-proposed/wallaby', 'focal-wallaby/proposed': 'focal-proposed/wallaby', 'focal-proposed/wallaby': 'focal-proposed/wallaby', + # Xena + 'xena': 'focal-updates/xena', + 'focal-xena': 'focal-updates/xena', + 'focal-xena/updates': 'focal-updates/xena', + 'focal-updates/xena': 'focal-updates/xena', + 'xena/proposed': 'focal-proposed/xena', + 'focal-xena/proposed': 'focal-proposed/xena', + 'focal-proposed/xena': 'focal-proposed/xena', + # Yoga + 'yoga': 'focal-updates/yoga', + 'focal-yoga': 'focal-updates/yoga', + 'focal-yoga/updates': 'focal-updates/yoga', + 'focal-updates/yoga': 'focal-updates/yoga', + 'yoga/proposed': 'focal-proposed/yoga', + 'focal-yoga/proposed': 'focal-proposed/yoga', + 'focal-proposed/yoga': 'focal-proposed/yoga', } +OPENSTACK_RELEASES = ( + 'diablo', + 'essex', + 'folsom', + 'grizzly', + 'havana', + 'icehouse', + 'juno', + 'kilo', + 'liberty', + 'mitaka', + 'newton', + 'ocata', + 'pike', + 'queens', + 'rocky', + 'stein', + 'train', + 'ussuri', + 'victoria', + 'wallaby', + 'xena', + 'yoga', +) + + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ('wily', 'liberty'), + ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zesty', 'ocata'), + ('artful', 'pike'), + ('bionic', 'queens'), + ('cosmic', 'rocky'), + ('disco', 'stein'), + ('eoan', 'train'), + ('focal', 'ussuri'), + ('groovy', 'victoria'), + ('hirsute', 'wallaby'), + ('impish', 'xena'), +]) + + APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. -CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times. +CMD_RETRY_COUNT = 10 # Retry a failing fatal command X times. def filter_installed_packages(packages): @@ -246,9 +313,9 @@ def filter_missing_packages(packages): def apt_cache(*_, **__): """Shim returning an object simulating the apt_pkg Cache. - :param _: Accept arguments for compability, not used. + :param _: Accept arguments for compatibility, not used. :type _: any - :param __: Accept keyword arguments for compability, not used. + :param __: Accept keyword arguments for compatibility, not used. :type __: any :returns:Object used to interrogate the system apt and dpkg databases. :rtype:ubuntu_apt_pkg.Cache @@ -283,7 +350,7 @@ def apt_install(packages, options=None, fatal=False, quiet=False): :param fatal: Whether the command's output should be checked and retried. :type fatal: bool - :param quiet: if True (default), supress log message to stdout/stderr + :param quiet: if True (default), suppress log message to stdout/stderr :type quiet: bool :raises: subprocess.CalledProcessError """ @@ -397,7 +464,7 @@ def import_key(key): A Radix64 format keyid is also supported for backwards compatibility. In this case Ubuntu keyserver will be queried for a key via HTTPS by its keyid. This method - is less preferrable because https proxy servers may + is less preferable because https proxy servers may require traffic decryption which is equivalent to a man-in-the-middle attack (a proxy server impersonates keyserver TLS certificates and has to be explicitly @@ -574,6 +641,10 @@ def add_source(source, key=None, fail_invalid=False): with be used. If staging is NOT used then the cloud archive [3] will be added, and the 'ubuntu-cloud-keyring' package will be added for the current distro. + '': translate to cloud: based on the current + distro version (i.e. for 'ussuri' this will either be 'bionic-ussuri' or + 'distro'. + '/proposed': as above, but for proposed. Otherwise the source is not recognised and this is logged to the juju log. However, no error is raised, unless sys_error_on_exit is True. @@ -592,7 +663,7 @@ def add_source(source, key=None, fail_invalid=False): id may also be used, but be aware that only insecure protocols are available to retrieve the actual public key from a public keyserver placing your Juju environment at risk. ppa and cloud archive keys - are securely added automtically, so sould not be provided. + are securely added automatically, so should not be provided. @param fail_invalid: (boolean) if True, then the function raises a SourceConfigError is there is no matching installation source. @@ -600,6 +671,12 @@ def add_source(source, key=None, fail_invalid=False): @raises SourceConfigError() if for cloud:, the is not a valid pocket in CLOUD_ARCHIVE_POCKETS """ + # extract the OpenStack versions from the CLOUD_ARCHIVE_POCKETS; can't use + # the list in contrib.openstack.utils as it might not be included in + # classic charms and would break everything. Having OpenStack specific + # code in this file is a bit of an antipattern, anyway. + os_versions_regex = "({})".format("|".join(OPENSTACK_RELEASES)) + _mapping = OrderedDict([ (r"^distro$", lambda: None), # This is a NOP (r"^(?:proposed|distro-proposed)$", _add_proposed), @@ -609,6 +686,9 @@ def add_source(source, key=None, fail_invalid=False): (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), (r"^cloud:(.*)$", _add_cloud_pocket), (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), + (r"^{}\/proposed$".format(os_versions_regex), + _add_bare_openstack_proposed), + (r"^{}$".format(os_versions_regex), _add_bare_openstack), ]) if source is None: source = '' @@ -640,7 +720,7 @@ def _add_proposed(): Uses get_distrib_codename to determine the correct stanza for the deb line. - For intel architecutres PROPOSED_POCKET is used for the release, but for + For Intel architectures PROPOSED_POCKET is used for the release, but for other architectures PROPOSED_PORTS_POCKET is used for the release. """ release = get_distrib_codename() @@ -662,7 +742,8 @@ def _add_apt_repository(spec): series = get_distrib_codename() spec = spec.replace('{series}', series) _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https', 'http'])) + cmd_env=env_proxy_settings(['https', 'http', 'no_proxy']) + ) def _add_cloud_pocket(pocket): @@ -738,6 +819,73 @@ def _verify_is_ubuntu_rel(release, os_release): 'version ({})'.format(release, os_release, ubuntu_rel)) +def _add_bare_openstack(openstack_release): + """Add cloud or distro based on the release given. + + The spec given is, say, 'ussuri', but this could apply cloud:bionic-ussuri + or 'distro' depending on whether the ubuntu release is bionic or focal. + + :param openstack_release: the OpenStack codename to determine the release + for. + :type openstack_release: str + :raises: SourceConfigError + """ + # TODO(ajkavanagh) - surely this means we should be removing cloud archives + # if they exist? + __add_bare_helper(openstack_release, "{}-{}", lambda: None) + + +def _add_bare_openstack_proposed(openstack_release): + """Add cloud of distro but with proposed. + + The spec given is, say, 'ussuri' but this could apply + cloud:bionic-ussuri/proposed or 'distro/proposed' depending on whether the + ubuntu release is bionic or focal. + + :param openstack_release: the OpenStack codename to determine the release + for. + :type openstack_release: str + :raises: SourceConfigError + """ + __add_bare_helper(openstack_release, "{}-{}/proposed", _add_proposed) + + +def __add_bare_helper(openstack_release, pocket_format, final_function): + """Helper for _add_bare_openstack[_proposed] + + The bulk of the work between the two functions is exactly the same except + for the pocket format and the function that is run if it's the distro + version. + + :param openstack_release: the OpenStack codename. e.g. ussuri + :type openstack_release: str + :param pocket_format: the pocket formatter string to construct a pocket str + from the openstack_release and the current ubuntu version. + :type pocket_format: str + :param final_function: the function to call if it is the distro version. + :type final_function: Callable + :raises SourceConfigError on error + """ + ubuntu_version = get_distrib_codename() + possible_pocket = pocket_format.format(ubuntu_version, openstack_release) + if possible_pocket in CLOUD_ARCHIVE_POCKETS: + _add_cloud_pocket(possible_pocket) + return + # Otherwise it's almost certainly the distro version; verify that it + # exists. + try: + assert UBUNTU_OPENSTACK_RELEASE[ubuntu_version] == openstack_release + except KeyError: + raise SourceConfigError( + "Invalid ubuntu version {} isn't known to this library" + .format(ubuntu_version)) + except AssertionError: + raise SourceConfigError( + 'Invalid OpenStack release specified: {} for Ubuntu version {}' + .format(openstack_release, ubuntu_version)) + final_function() + + def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_message="", cmd_env=None, quiet=False): """Run a command and retry until success or max_retries is reached. diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index a2fbe0e5..436e1776 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -1,4 +1,4 @@ -# Copyright 2019 Canonical Ltd +# Copyright 2019-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -209,7 +209,7 @@ def _populate(self): def init(): - """Compability shim that does nothing.""" + """Compatibility shim that does nothing.""" pass @@ -264,7 +264,7 @@ def version_compare(a, b): else: raise RuntimeError('Unable to compare "{}" and "{}", according to ' 'our logic they are neither greater, equal nor ' - 'less than each other.') + 'less than each other.'.format(a, b)) class PkgVersion(): diff --git a/ceph-radosgw/hooks/charmhelpers/osplatform.py b/ceph-radosgw/hooks/charmhelpers/osplatform.py index 78c81af5..1ace468f 100644 --- a/ceph-radosgw/hooks/charmhelpers/osplatform.py +++ b/ceph-radosgw/hooks/charmhelpers/osplatform.py @@ -28,6 +28,9 @@ def get_platform(): elif "elementary" in current_platform: # ElementaryOS fails to run tests locally without this. return "ubuntu" + elif "Pop!_OS" in current_platform: + # Pop!_OS also fails to run tests locally without this. + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) diff --git a/ceph-radosgw/lib/charms_ceph/crush_utils.py b/ceph-radosgw/lib/charms_ceph/crush_utils.py index 8fe09fa4..37084bf1 100644 --- a/ceph-radosgw/lib/charms_ceph/crush_utils.py +++ b/ceph-radosgw/lib/charms_ceph/crush_utils.py @@ -79,9 +79,9 @@ def load_crushmap(self): stdin=crush.stdout) .decode('UTF-8')) except CalledProcessError as e: - log("Error occured while loading and decompiling CRUSH map:" + log("Error occurred while loading and decompiling CRUSH map:" "{}".format(e), ERROR) - raise "Failed to read CRUSH map" + raise def ensure_bucket_is_present(self, bucket_name): if bucket_name not in [bucket.name for bucket in self.buckets()]: @@ -111,7 +111,7 @@ def save(self): return ceph_output except CalledProcessError as e: log("save error: {}".format(e)) - raise "Failed to save CRUSH map." + raise def build_crushmap(self): """Modifies the current CRUSH map to include the new buckets""" diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index e5c38793..9b7299dd 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -14,6 +14,7 @@ import collections import glob +import itertools import json import os import pyudev @@ -24,6 +25,7 @@ import sys import time import uuid +import functools from contextlib import contextmanager from datetime import datetime @@ -501,30 +503,33 @@ def ceph_user(): class CrushLocation(object): - def __init__(self, - name, - identifier, - host, - rack, - row, - datacenter, - chassis, - root): - self.name = name + def __init__(self, identifier, name, osd="", host="", chassis="", + rack="", row="", pdu="", pod="", room="", + datacenter="", zone="", region="", root=""): self.identifier = identifier + self.name = name + self.osd = osd self.host = host + self.chassis = chassis self.rack = rack self.row = row + self.pdu = pdu + self.pod = pod + self.room = room self.datacenter = datacenter - self.chassis = chassis + self.zone = zone + self.region = region self.root = root def __str__(self): - return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \ - "chassis :{} root: {}".format(self.name, self.identifier, - self.host, self.rack, self.row, - self.datacenter, self.chassis, - self.root) + return "name: {} id: {} osd: {} host: {} chassis: {} rack: {} " \ + "row: {} pdu: {} pod: {} room: {} datacenter: {} zone: {} " \ + "region: {} root: {}".format(self.name, self.identifier, + self.osd, self.host, self.chassis, + self.rack, self.row, self.pdu, + self.pod, self.room, + self.datacenter, self.zone, + self.region, self.root) def __eq__(self, other): return not self.name < other.name and not other.name < self.name @@ -571,10 +576,53 @@ def get_osd_weight(osd_id): raise +def _filter_nodes_and_set_attributes(node, node_lookup_map, lookup_type): + """Get all nodes of the desired type, with all their attributes. + + These attributes can be direct or inherited from ancestors. + """ + attribute_dict = {node['type']: node['name']} + if node['type'] == lookup_type: + attribute_dict['name'] = node['name'] + attribute_dict['identifier'] = node['id'] + return [attribute_dict] + elif not node.get('children'): + return [attribute_dict] + else: + descendant_attribute_dicts = [ + _filter_nodes_and_set_attributes(node_lookup_map[node_id], + node_lookup_map, lookup_type) + for node_id in node.get('children', []) + ] + return [dict(attribute_dict, **descendant_attribute_dict) + for descendant_attribute_dict + in itertools.chain.from_iterable(descendant_attribute_dicts)] + + +def _flatten_roots(nodes, lookup_type='host'): + """Get a flattened list of nodes of the desired type. + + :param nodes: list of nodes defined as a dictionary of attributes and + children + :type nodes: List[Dict[int, Any]] + :param lookup_type: type of searched node + :type lookup_type: str + :returns: flattened list of nodes + :rtype: List[Dict[str, Any]] + """ + lookup_map = {node['id']: node for node in nodes} + root_attributes_dicts = [_filter_nodes_and_set_attributes(node, lookup_map, + lookup_type) + for node in nodes if node['type'] == 'root'] + # get a flattened list of roots. + return list(itertools.chain.from_iterable(root_attributes_dicts)) + + def get_osd_tree(service): """Returns the current osd map in JSON. :returns: List. + :rtype: List[CrushLocation] :raises: ValueError if the monmap fails to parse. Also raises CalledProcessError if our ceph command fails """ @@ -585,35 +633,14 @@ def get_osd_tree(service): .decode('UTF-8')) try: json_tree = json.loads(tree) - crush_list = [] - # Make sure children are present in the json - if not json_tree['nodes']: - return None - host_nodes = [ - node for node in json_tree['nodes'] - if node['type'] == 'host' - ] - for host in host_nodes: - crush_list.append( - CrushLocation( - name=host.get('name'), - identifier=host['id'], - host=host.get('host'), - rack=host.get('rack'), - row=host.get('row'), - datacenter=host.get('datacenter'), - chassis=host.get('chassis'), - root=host.get('root') - ) - ) - return crush_list + roots = _flatten_roots(json_tree["nodes"]) + return [CrushLocation(**host) for host in roots] except ValueError as v: log("Unable to parse ceph tree json: {}. Error: {}".format( tree, v)) raise except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( - e)) + log("ceph osd tree command failed with message: {}".format(e)) raise @@ -669,7 +696,9 @@ def get_local_osd_ids(): dirs = os.listdir(osd_path) for osd_dir in dirs: osd_id = osd_dir.split('-')[1] - if _is_int(osd_id): + if (_is_int(osd_id) and + filesystem_mounted(os.path.join( + os.sep, osd_path, osd_dir))): osd_ids.append(osd_id) except OSError: raise @@ -3271,13 +3300,14 @@ def determine_packages(): def determine_packages_to_remove(): """Determines packages for removal + Note: if in a container, then the CHRONY_PACKAGE is removed. + :returns: list of packages to be removed + :rtype: List[str] """ rm_packages = REMOVE_PACKAGES.copy() if is_container(): - install_list = filter_missing_packages(CHRONY_PACKAGE) - if not install_list: - rm_packages.append(CHRONY_PACKAGE) + rm_packages.extend(filter_missing_packages([CHRONY_PACKAGE])) return rm_packages @@ -3376,3 +3406,132 @@ def _get_cli_key(key): level=ERROR) raise OSDConfigSetError return True + + +def enabled_manager_modules(): + """Return a list of enabled manager modules. + + :rtype: List[str] + """ + cmd = ['ceph', 'mgr', 'module', 'ls'] + try: + modules = subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError as e: + log("Failed to list ceph modules: {}".format(e), WARNING) + return [] + modules = json.loads(modules) + return modules['enabled_modules'] + + +def is_mgr_module_enabled(module): + """Is a given manager module enabled. + + :param module: + :type module: str + :returns: Whether the named module is enabled + :rtype: bool + """ + return module in enabled_manager_modules() + + +is_dashboard_enabled = functools.partial(is_mgr_module_enabled, 'dashboard') + + +def mgr_enable_module(module): + """Enable a Ceph Manager Module. + + :param module: The module name to enable + :type module: str + + :raises: subprocess.CalledProcessError + """ + if not is_mgr_module_enabled(module): + subprocess.check_call(['ceph', 'mgr', 'module', 'enable', module]) + return True + return False + + +mgr_enable_dashboard = functools.partial(mgr_enable_module, 'dashboard') + + +def mgr_disable_module(module): + """Enable a Ceph Manager Module. + + :param module: The module name to enable + :type module: str + + :raises: subprocess.CalledProcessError + """ + if is_mgr_module_enabled(module): + subprocess.check_call(['ceph', 'mgr', 'module', 'disable', module]) + return True + return False + + +mgr_disable_dashboard = functools.partial(mgr_disable_module, 'dashboard') + + +def ceph_config_set(name, value, who): + """Set a ceph config option + + :param name: key to set + :type name: str + :param value: value corresponding to key + :type value: str + :param who: Config area the key is associated with (e.g. 'dashboard') + :type who: str + + :raises: subprocess.CalledProcessError + """ + subprocess.check_call(['ceph', 'config', 'set', who, name, value]) + + +mgr_config_set = functools.partial(ceph_config_set, who='mgr') + + +def ceph_config_get(name, who): + """Retrieve the value of a ceph config option + + :param name: key to lookup + :type name: str + :param who: Config area the key is associated with (e.g. 'dashboard') + :type who: str + :returns: Value associated with key + :rtype: str + :raises: subprocess.CalledProcessError + """ + return subprocess.check_output( + ['ceph', 'config', 'get', who, name]).decode('UTF-8') + + +mgr_config_get = functools.partial(ceph_config_get, who='mgr') + + +def _dashboard_set_ssl_artifact(path, artifact_name, hostname=None): + """Set SSL dashboard config option. + + :param path: Path to file + :type path: str + :param artifact_name: Option name for setting the artifact + :type artifact_name: str + :param hostname: If hostname is set artifact will only be associated with + the dashboard on that host. + :type hostname: str + :raises: subprocess.CalledProcessError + """ + cmd = ['ceph', 'dashboard', artifact_name] + if hostname: + cmd.append(hostname) + cmd.extend(['-i', path]) + log(cmd, level=DEBUG) + subprocess.check_call(cmd) + + +dashboard_set_ssl_certificate = functools.partial( + _dashboard_set_ssl_artifact, + artifact_name='set-ssl-certificate') + + +dashboard_set_ssl_certificate_key = functools.partial( + _dashboard_set_ssl_artifact, + artifact_name='set-ssl-certificate-key') diff --git a/ceph-radosgw/osci.yaml b/ceph-radosgw/osci.yaml index 64b884b0..cc369b42 100644 --- a/ceph-radosgw/osci.yaml +++ b/ceph-radosgw/osci.yaml @@ -3,10 +3,16 @@ - charm-unit-jobs check: jobs: + - vault-impish-xena_rgw: + voting: false + - vault-impish-xena-namespaced: + voting: false - vault-hirsute-wallaby_rgw - vault-hirsute-wallaby-namespaced - - vault-groovy-victoria_rgw - - vault-groovy-victoria-namespaced + - vault-focal-xena_rgw: + voting: false + - vault-focal-xena-namespaced: + voting: false - vault-focal-wallaby_rgw - vault-focal-wallaby-namespaced - vault-focal-victoria_rgw @@ -37,10 +43,22 @@ vars: tox_extra_args: vault:bionic-ussuri - job: - name: vault-hirsute-wallaby_rgw + name: vault-impish-xena_rgw parent: func-target dependencies: &smoke-jobs - vault-bionic-ussuri + vars: + tox_extra_args: vault:impish-xena +- job: + name: vault-impish-xena-namespaced + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:impish-xena-namespaced +- job: + name: vault-hirsute-wallaby_rgw + parent: func-target + dependencies: *smoke-jobs vars: tox_extra_args: vault:hirsute-wallaby - job: @@ -50,17 +68,17 @@ vars: tox_extra_args: vault:hirsute-wallaby-namespaced - job: - name: vault-groovy-victoria_rgw + name: vault-focal-xena_rgw parent: func-target dependencies: *smoke-jobs vars: - tox_extra_args: vault:groovy-victoria + tox_extra_args: vault:focal-xena - job: - name: vault-groovy-victoria-namespaced + name: vault-focal-xena-namespaced parent: func-target dependencies: *smoke-jobs vars: - tox_extra_args: vault:groovy-victoria-namespaced + tox_extra_args: vault:focal-xena-namespaced - job: name: vault-focal-wallaby_rgw parent: func-target diff --git a/ceph-radosgw/pip.sh b/ceph-radosgw/pip.sh new file mode 100755 index 00000000..9a7e6b09 --- /dev/null +++ b/ceph-radosgw/pip.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# setuptools 58.0 dropped the support for use_2to3=true which is needed to +# install blessings (an indirect dependency of charm-tools). +# +# More details on the beahvior of tox and virtualenv creation can be found at +# https://github.com/tox-dev/tox/issues/448 +# +# This script is wrapper to force the use of the pinned versions early in the +# process when the virtualenv was created and upgraded before installing the +# depedencies declared in the target. +pip install 'pip<20.3' 'setuptools<50.0.0' +pip "$@" diff --git a/ceph-radosgw/tests/bundles/focal-xena-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-xena-namespaced.yaml new file mode 100644 index 00000000..ad973e41 --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-xena-namespaced.yaml @@ -0,0 +1,117 @@ +options: + source: &source cloud:focal-xena + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + namespace-tenants: True + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/focal-xena.yaml b/ceph-radosgw/tests/bundles/focal-xena.yaml new file mode 100644 index 00000000..5a590e0c --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-xena.yaml @@ -0,0 +1,116 @@ +options: + source: &source cloud:focal-xena + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml b/ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml new file mode 100644 index 00000000..a748f555 --- /dev/null +++ b/ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml @@ -0,0 +1,117 @@ +options: + source: &source distro + +series: impish + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + namespace-tenants: True + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/impish-xena.yaml b/ceph-radosgw/tests/bundles/impish-xena.yaml new file mode 100644 index 00000000..49d34ea0 --- /dev/null +++ b/ceph-radosgw/tests/bundles/impish-xena.yaml @@ -0,0 +1,116 @@ +options: + source: &source distro + +series: impish + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index d4145091..c6c3ddeb 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,8 +1,8 @@ charm_name: ceph-radosgw gate_bundles: - - vault: groovy-victoria - - vault: groovy-victoria-namespaced + - vault: focal-xena + - vault: focal-xena-namespaced - vault: focal-wallaby - vault: focal-wallaby-namespaced - vault: focal-victoria @@ -33,8 +33,12 @@ dev_bundles: - bionic-rocky-multisite - vault: bionic-rocky - vault: bionic-rocky-namespaced + - vault: groovy-victoria + - vault: groovy-victoria-namespaced - vault: hirsute-wallaby - vault: hirsute-wallaby-namespaced + - vault: impish-xena + - vault: impish-xena-namespaced target_deploy_status: vault: @@ -57,3 +61,7 @@ tests_options: force_deploy: - hirsute-wallaby - hirsute-wallaby-namespaced + - groovy-victoria + - groovy-victoria-namespaced + - impish-xena + - impish-xena-namespaced diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 9ba3f9fe..ba4fd5b6 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -22,19 +22,22 @@ skip_missing_interpreters = False # * It is also necessary to pin virtualenv as a newer virtualenv would still # lead to fetching the latest pip in the func* tox targets, see # https://stackoverflow.com/a/38133283 -requires = pip < 20.3 - virtualenv < 20.0 +requires = + pip < 20.3 + virtualenv < 20.0 + setuptools < 50.0.0 + # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci -minversion = 3.2.0 +minversion = 3.18.0 [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} install_command = - pip install {opts} {packages} + {toxinidir}/pip.sh install {opts} {packages} commands = stestr run --slowest {posargs} -whitelist_externals = juju +allowlist_externals = juju passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index aff7e712..3f0decd2 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -15,7 +15,6 @@ from mock import patch import ceph_radosgw_context as context -import charmhelpers import charmhelpers.contrib.storage.linux.ceph as ceph import charmhelpers.fetch as fetch @@ -69,290 +68,6 @@ def test_ctxt(self, _harelation_ids, _ctxtrelation_ids, _haconfig, self.assertEqual(expect, haproxy_context()) -class IdentityServiceContextTest(CharmTestCase): - - def setUp(self): - super(IdentityServiceContextTest, self).setUp(context, TO_PATCH) - self.relation_get.side_effect = self.test_relation.get - self.config.side_effect = self.test_config.get - self.maxDiff = None - self.cmp_pkgrevno.return_value = 1 - self.leader_get.return_value = 'False' - - @patch.object(charmhelpers.contrib.openstack.context, - 'filter_installed_packages', return_value=['absent-pkg']) - @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') - @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') - @patch.object(charmhelpers.contrib.openstack.context, 'related_units') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') - @patch.object(charmhelpers.contrib.openstack.context, 'log') - def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, - _format_ipv6_addr, _filter_installed_packages, - jewel_installed=False, cmp_pkgrevno_side_effects=None): - self.cmp_pkgrevno.side_effect = (cmp_pkgrevno_side_effects - if cmp_pkgrevno_side_effects - else [-1, 1, -1]) - self.test_config.set('operator-roles', 'Babel') - self.test_config.set('admin-roles', 'Dart') - self.test_config.set('cache-size', '42') - self.relation_ids.return_value = ['identity-service:5'] - self.related_units.return_value = ['keystone/0'] - _format_ipv6_addr.return_value = False - _rids.return_value = 'rid1' - _runits.return_value = 'runit' - _ctxt_comp.return_value = True - id_data = { - 'service_port': 9876, - 'service_host': '127.0.0.4', - 'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'service_domain_id': '8e50f28a556911e8aaeed33789425d23', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'service_tenant': 'ten', - 'service_username': 'admin', - 'service_password': 'adminpass', - } - _rget.return_value = id_data - ids_ctxt = context.IdentityServiceContext() - expect = { - 'admin_domain_id': '8e50f28a556911e8aaeed33789425d23', - 'admin_password': 'adminpass', - 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'admin_tenant_name': 'ten', - 'admin_user': 'admin', - 'api_version': '2.0', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'auth_protocol': 'http', - 'auth_type': 'keystone', - 'namespace_tenants': False, - 'cache_size': '42', - 'service_host': '127.0.0.4', - 'service_port': 9876, - 'service_protocol': 'http', - } - if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[2] >= 0: - expect['user_roles'] = 'Babel' - expect['admin_roles'] = 'Dart' - else: - expect['user_roles'] = 'Babel,Dart' - if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[1] < 0: - expect['keystone_revocation_parameter_supported'] = True - if jewel_installed: - expect['auth_keystone_v3_supported'] = True - self.assertEqual(expect, ids_ctxt()) - - @patch.object(charmhelpers.contrib.openstack.context, - 'filter_installed_packages', return_value=['absent-pkg']) - @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') - @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') - @patch.object(charmhelpers.contrib.openstack.context, 'related_units') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') - @patch.object(charmhelpers.contrib.openstack.context, 'log') - def test_ids_ctxt_with_namespace(self, _log, _rids, _runits, _rget, - _ctxt_comp, _format_ipv6_addr, - _filter_installed_packages, - jewel_installed=False, - cmp_pkgrevno_side_effects=None): - self.cmp_pkgrevno.side_effect = (cmp_pkgrevno_side_effects - if cmp_pkgrevno_side_effects - else [-1, 1, -1]) - self.test_config.set('operator-roles', 'Babel') - self.test_config.set('admin-roles', 'Dart') - self.test_config.set('cache-size', '42') - self.relation_ids.return_value = ['identity-service:5'] - self.related_units.return_value = ['keystone/0'] - _format_ipv6_addr.return_value = False - _rids.return_value = 'rid1' - _runits.return_value = 'runit' - _ctxt_comp.return_value = True - self.leader_get.return_value = 'True' - id_data = { - 'service_port': 9876, - 'service_host': '127.0.0.4', - 'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'service_domain_id': '8e50f28a556911e8aaeed33789425d23', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'service_tenant': 'ten', - 'service_username': 'admin', - 'service_password': 'adminpass', - } - _rget.return_value = id_data - ids_ctxt = context.IdentityServiceContext() - expect = { - 'admin_domain_id': '8e50f28a556911e8aaeed33789425d23', - 'admin_password': 'adminpass', - 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'admin_tenant_name': 'ten', - 'admin_user': 'admin', - 'api_version': '2.0', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'auth_protocol': 'http', - 'auth_type': 'keystone', - 'namespace_tenants': True, - 'cache_size': '42', - 'service_host': '127.0.0.4', - 'service_port': 9876, - 'service_protocol': 'http', - } - if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[2] >= 0: - expect['user_roles'] = 'Babel' - expect['admin_roles'] = 'Dart' - else: - expect['user_roles'] = 'Babel,Dart' - if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[1] < 0: - expect['keystone_revocation_parameter_supported'] = True - if jewel_installed: - expect['auth_keystone_v3_supported'] = True - self.assertEqual(expect, ids_ctxt()) - - @patch.object(charmhelpers.contrib.openstack.context, - 'filter_installed_packages', return_value=['absent-pkg']) - @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') - @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') - @patch.object(charmhelpers.contrib.openstack.context, 'related_units') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') - @patch.object(charmhelpers.contrib.openstack.context, 'log') - def test_ids_ctxt_missing_admin_domain_id( - self, _log, _rids, _runits, _rget, _ctxt_comp, _format_ipv6_addr, - _filter_installed_packages, jewel_installed=False): - self.test_config.set('operator-roles', 'Babel') - self.test_config.set('admin-roles', 'Dart') - self.test_config.set('cache-size', '42') - self.relation_ids.return_value = ['identity-service:5'] - self.related_units.return_value = ['keystone/0'] - _format_ipv6_addr.return_value = False - _rids.return_value = ['rid1'] - _runits.return_value = ['runit'] - _ctxt_comp.return_value = True - self.cmp_pkgrevno.return_value = -1 - if jewel_installed: - self.cmp_pkgrevno.return_value = 0 - id_data = { - 'service_port': 9876, - 'service_host': '127.0.0.4', - 'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'service_tenant': 'ten', - 'service_username': 'admin', - 'service_password': 'adminpass', - } - _rget.return_value = id_data - ids_ctxt = context.IdentityServiceContext() - expect = { - 'admin_password': 'adminpass', - 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'admin_tenant_name': 'ten', - 'admin_user': 'admin', - 'api_version': '2.0', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'auth_protocol': 'http', - 'auth_type': 'keystone', - 'namespace_tenants': False, - 'cache_size': '42', - 'keystone_revocation_parameter_supported': True, - 'service_host': '127.0.0.4', - 'service_port': 9876, - 'service_protocol': 'http', - 'user_roles': 'Babel,Dart', - } - if jewel_installed: - expect['auth_keystone_v3_supported'] = True - self.assertEqual(expect, ids_ctxt()) - - @patch.object(charmhelpers.contrib.openstack.context, - 'filter_installed_packages', return_value=['absent-pkg']) - @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') - @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') - @patch.object(charmhelpers.contrib.openstack.context, 'related_units') - @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') - @patch.object(charmhelpers.contrib.openstack.context, 'log') - def test_ids_ctxt_v3( - self, _log, _rids, _runits, _rget, _ctxt_comp, _format_ipv6_addr, - _filter_installed_packages, jewel_installed=False): - self.test_config.set('operator-roles', 'Babel') - self.test_config.set('admin-roles', 'Dart') - self.test_config.set('cache-size', '42') - self.relation_ids.return_value = ['identity-service:5'] - self.related_units.return_value = ['keystone/0'] - _format_ipv6_addr.return_value = False - _rids.return_value = ['rid1'] - _runits.return_value = ['runit'] - _ctxt_comp.return_value = True - self.cmp_pkgrevno.return_value = -1 - if jewel_installed: - self.cmp_pkgrevno.return_value = 0 - id_data = { - 'service_port': 9876, - 'service_host': '127.0.0.4', - 'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'service_domain_id': '8e50f28a556911e8aaeed33789425d23', - 'service_domain': 'service_domain', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'service_tenant': 'ten', - 'service_username': 'admin', - 'service_password': 'adminpass', - 'api_version': '3', - } - _rget.return_value = id_data - ids_ctxt = context.IdentityServiceContext() - expect = { - 'admin_domain_id': '8e50f28a556911e8aaeed33789425d23', - 'admin_domain_name': 'service_domain', - 'admin_password': 'adminpass', - 'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'admin_tenant_name': 'ten', - 'admin_user': 'admin', - 'api_version': '3', - 'auth_host': '127.0.0.5', - 'auth_port': 5432, - 'auth_protocol': 'http', - 'auth_type': 'keystone', - 'namespace_tenants': False, - 'cache_size': '42', - 'keystone_revocation_parameter_supported': True, - 'service_domain_id': '8e50f28a556911e8aaeed33789425d23', - 'service_host': '127.0.0.4', - 'service_port': 9876, - 'service_project_id': '2852107b8f8f473aaf0d769c7bbcf86b', - 'service_protocol': 'http', - 'user_roles': 'Babel,Dart', - } - if jewel_installed: - expect['auth_keystone_v3_supported'] = True - self.assertEqual(expect, ids_ctxt()) - - def test_ids_ctxt_jewel(self): - self.test_ids_ctxt(jewel_installed=True, - cmp_pkgrevno_side_effects=[0, 1, -1]) - - def test_ids_ctxt_luminous(self): - self.test_ids_ctxt(jewel_installed=True, - cmp_pkgrevno_side_effects=[1, 1, 0]) - - def test_ids_ctxt_octopus(self): - self.test_ids_ctxt(jewel_installed=True, - cmp_pkgrevno_side_effects=[1, -1, 0]) - - @patch.object(charmhelpers.contrib.openstack.context, - 'filter_installed_packages', return_value=['absent-pkg']) - @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') - @patch.object(charmhelpers.contrib.openstack.context, 'log') - def test_ids_ctxt_no_rels(self, _log, _rids, _filter_installed_packages): - _rids.return_value = [] - ids_ctxt = context.IdentityServiceContext() - self.assertEqual(ids_ctxt(), None) - - class MonContextTest(CharmTestCase): def setUp(self): From a55f5f28dc02ea0fd78a3a58295b53d031cab1d0 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 30 Sep 2021 16:39:34 +0000 Subject: [PATCH 2283/2699] Support multiple gateway units Fix bug which was causing rgw not to be added to the dashboard when there were multiple units of the rgw application on Octopus. func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/646 Change-Id: I971e70afe8233982c0774b34a73fd6de70d3a0d8 --- ceph-dashboard/src/charm.py | 7 +++++-- ceph-dashboard/tests/tests.yaml | 1 + ceph-dashboard/tox.ini | 4 ++++ 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index 87bf3fc3..8d6faeee 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -266,13 +266,16 @@ def _manage_radosgw(self) -> None: """Register rados gateways in dashboard db""" if self.unit.is_leader(): creds = self.radosgw_user.get_user_creds() - if len(creds) < 1: + cred_count = len(set([ + (c['access_key'], c['secret_key']) + for c in creds])) + if cred_count < 1: logging.info("No object gateway creds found") return if self._support_multiple_gateways(): self._update_multi_radosgw_creds(creds) else: - if len(creds) > 1: + if cred_count > 1: logging.error( "Cannot enable object gateway support. Ceph release " "does not support multiple object gateways in the " diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml index 2e6ee879..e18b2244 100644 --- a/ceph-dashboard/tests/tests.yaml +++ b/ceph-dashboard/tests/tests.yaml @@ -7,6 +7,7 @@ smoke_bundles: configure: - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation - zaza.openstack.charm_tests.ceph.dashboard.setup.check_dashboard_cert + - zaza.openstack.charm_tests.ceph.dashboard.setup.set_grafana_url tests: - zaza.openstack.charm_tests.ceph.dashboard.tests.CephDashboardTest - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest diff --git a/ceph-dashboard/tox.ini b/ceph-dashboard/tox.ini index 31301b80..4ca4d73a 100644 --- a/ceph-dashboard/tox.ini +++ b/ceph-dashboard/tox.ini @@ -32,6 +32,8 @@ whitelist_externals = add-to-archive.py bash charmcraft + ls + pwd passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt @@ -127,6 +129,8 @@ commands = [testenv:func-target] basepython = python3 commands = + pwd + ls -l functest-run-suite --keep-model --bundle {posargs} [flake8] From 0755ad0614526d48a5c1f243d256a3ea5472c39a Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 30 Sep 2021 09:13:46 +0000 Subject: [PATCH 2284/2699] Points deps at opendev Change-Id: I56f88305d31e934315760e9bc7687c49bac63965 --- ceph-dashboard/requirements.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-dashboard/requirements.txt b/ceph-dashboard/requirements.txt index 3b6834cc..eafe2d30 100644 --- a/ceph-dashboard/requirements.txt +++ b/ceph-dashboard/requirements.txt @@ -2,7 +2,6 @@ ops >= 1.2.0 tenacity git+https://github.com/openstack/charms.ceph#egg=charms_ceph git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack -#git+https://opendev.org/openstack/charm-ops-interface-tls-certificates#egg=interface_tls_certificates -git+https://github.com/gnuoy/ops-interface-tls-certificates@no-exception-for-inflight-request#egg=interface_tls_certificates +git+https://opendev.org/openstack/charm-ops-interface-tls-certificates#egg=interface_tls_certificates git+https://github.com/openstack-charmers/ops-interface-ceph-iscsi-admin-access#egg=interface_ceph_iscsi_admin_access git+https://github.com/openstack-charmers/ops-interface-openstack-loadbalancer#egg=interface_openstack_loadbalancer From 48cd09e40e3847b65c8e164ceebce106fbaa21df Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Fri, 8 Oct 2021 15:35:30 -0300 Subject: [PATCH 2285/2699] Clear the default value for osd-devices Using /dev/vdb as default can go in conflict when using juju storage to attach devices dynamically as OSD and journal, because juju may be attaching as the first disk a volume that's meant to be used as a journal and making the list of devices overlap. Change-Id: I97c7657a82ea463aa090fc6266a4988c8c6bfeb4 Closes-Bug: #1946504 --- ceph-osd/README.md | 4 ++-- ceph-osd/config.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-osd/README.md b/ceph-osd/README.md index b1840933..4139bb33 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -69,8 +69,8 @@ A storage device is destined as an OSD (Object Storage Device). There can be multiple OSDs per storage node (ceph-osd unit). The list of all possible storage devices for the cluster is defined by the -`osd-devices` option (default value is '/dev/vdb'). The below examples can be -used in the `ceph-osd.yaml` configuration file. +`osd-devices` option. The below examples can be used in the `ceph-osd.yaml` +configuration file. Block devices (regular), diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index e501c232..b7aa2f72 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -56,7 +56,7 @@ options: natively supported config in the charm. osd-devices: type: string - default: /dev/vdb + default: description: | The devices to format and set up as OSD volumes. . From 75eee5812872dfc9f06b991a517a84ac743a4885 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 7 Oct 2021 05:38:49 +0000 Subject: [PATCH 2286/2699] Fix graphs to use dns name dns_name appears to be a more reliable key for the graphs as instance may return the IP or the hostname which breaks the host match Change-Id: Ie2b890a1eddd62aeb28d4e0261b6626976bb9c14 --- ceph-dashboard/build-requirements.txt | 1 + .../src/dashboards/hosts-overview.json | 18 +++++++++--------- ceph-dashboard/test-requirements.txt | 1 + 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/ceph-dashboard/build-requirements.txt b/ceph-dashboard/build-requirements.txt index 271d8955..38b1a777 100644 --- a/ceph-dashboard/build-requirements.txt +++ b/ceph-dashboard/build-requirements.txt @@ -1 +1,2 @@ +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. git+https://github.com/canonical/charmcraft.git@0.10.2#egg=charmcraft diff --git a/ceph-dashboard/src/dashboards/hosts-overview.json b/ceph-dashboard/src/dashboards/hosts-overview.json index 28e6707b..9c1a3729 100644 --- a/ceph-dashboard/src/dashboards/hosts-overview.json +++ b/ceph-dashboard/src/dashboards/hosts-overview.json @@ -184,7 +184,7 @@ "tableColumn": "", "targets": [ { - "expr": "avg(\n 1 - (\n avg by(instance) \n (cpu_usage_idle{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\", cpu='cpu-total'} / 100)))", + "expr": "avg(\n 1 - (\n avg by(dns_name) \n (cpu_usage_idle{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\", cpu='cpu-total'} / 100)))", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -267,7 +267,7 @@ "tableColumn": "", "targets": [ { - "expr": "avg (((mem_total{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) - (\n (mem_free{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (mem_cached{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (mem_buffered{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) +\n (mem_slab{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})\n )) /\n (mem_total{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}))", + "expr": "avg (((mem_total{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) - (\n (mem_free{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (mem_cached{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (mem_buffered{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) +\n (mem_slab{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})\n )) /\n (mem_total{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}))", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -349,7 +349,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum ((irate(diskio_reads{instance=~\"($osd_hosts|$mds_hosts).*\"}[5m])) + \n(irate(diskio_writes{instance=~\"($osd_hosts|$mds_hosts).*\"}[5m])))", + "expr": "sum ((irate(diskio_reads{dns_name=~\"($osd_hosts|$mds_hosts).*\"}[5m])) + \n(irate(diskio_writes{dns_name=~\"($osd_hosts|$mds_hosts).*\"}[5m])))", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -431,7 +431,7 @@ "tableColumn": "", "targets": [ { - "expr" : "avg (label_replace(label_replace((irate(diskio_io_time[5m]) / 10 ), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{instance=~\"($osd_hosts|$mds_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"))", + "expr" : "avg (label_replace(label_replace((irate(diskio_io_time[5m]) / 10 ), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{dns_name=~\"($osd_hosts|$mds_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"))", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -514,7 +514,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum (\n irate(net_bytes_recv{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n) +\nsum (\n irate(net_bytes_sent{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m]))", + "expr": "sum (\n irate(net_bytes_recv{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n) +\nsum (\n irate(net_bytes_sent{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m]))", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -572,10 +572,10 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10,100 * (\n 1 - (\n avg by(instance) \n (cpu_usage_idle{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\", cpu='cpu-total'} / 100))))", + "expr": "topk(10,100 * (\n 1 - (\n avg by(dns_name) \n (cpu_usage_idle{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\", cpu='cpu-total'} / 100))))", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{instance}}", + "legendFormat": "{{dns_name}}", "refId": "A" } ], @@ -659,10 +659,10 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10, (sum by(instance) (\n (\n irate(net_bytes_recv{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n ) +\n (\n irate(net_bytes_sent{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n ))\n )\n)", + "expr": "topk(10, (sum by(dns_name) (\n (\n irate(net_bytes_recv{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n ) +\n (\n irate(net_bytes_sent{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n ))\n )\n)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{instance}}", + "legendFormat": "{{dns_name}}", "refId": "A" } ], diff --git a/ceph-dashboard/test-requirements.txt b/ceph-dashboard/test-requirements.txt index 8057d2c6..fb837fb3 100644 --- a/ceph-dashboard/test-requirements.txt +++ b/ceph-dashboard/test-requirements.txt @@ -15,3 +15,4 @@ git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack pytz # workaround for 14.04 pip/tox pyudev # for ceph-* charm unit tests (not mocked?) +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. From dbaded3d36faef6b9ccb6a1e8bd54da6f8ddb7b9 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 12 Oct 2021 21:50:07 -0400 Subject: [PATCH 2287/2699] Add note about Grafana certificate Some environments may experience failure as Grafana graphs will silently fail to display due to an unverified TLS certificate for the Grafana server. Add a note about this and how to overcome it. Change-Id: I29ec8d63a50b31268129f7717f4931fc75d53d59 --- ceph-dashboard/README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ceph-dashboard/README.md b/ceph-dashboard/README.md index 2d81704d..f28cfba8 100644 --- a/ceph-dashboard/README.md +++ b/ceph-dashboard/README.md @@ -77,6 +77,12 @@ add a relation to Vault (the grafana charm also supports TLS configuration via juju add-relation grafana:certificates vault:certificates +> **Important**: Ceph Dashboard will (silently) fail to display Grafana output + if the client browser cannot validate the Grafana server's TLS certificate. + Either ensure the signing CA certificate is known to the browser or, if in a + testing environment, contact the Grafana dashboard directly and have the + browser accept the unverified certificate. + Grafana should be configured with the following charm options: juju config grafana anonymous=True From e909eee59876fe2043351472de9a042181cead75 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 27 Oct 2021 08:34:44 +0000 Subject: [PATCH 2288/2699] Download grafana plugins locally Rather than be have test fails due to the vagaries of the internet download the grafana plugins locally. Change-Id: I8101ec2b1bbf0fdfacadd43684abe96bcc3afa75 --- ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 b/ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 index 3248e700..3539227a 100644 --- a/ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 +++ b/ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 @@ -2,3 +2,4 @@ applications: grafana: options: http_proxy: '{{ TEST_HTTP_PROXY }}' + install_plugins: {{ TEST_GRAFANA_PLUGIN_VONAGE_URL }},{{ TEST_GRAFANA_PLUGIN_PIECHART_URL }} From 7bd0d04f09b73beec9602240eacf6fffa5dc17c9 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 26 Oct 2021 13:02:38 +0000 Subject: [PATCH 2289/2699] Fix exception when chain is missing Handle the situation where a chain file has not been passed along the certificates relation. Co-Authored-By: Billy Olsen Closes-Bug: #1948809 Change-Id: I2665ec0f7a7b2b7059899a5a937728c892593daf --- ceph-dashboard/src/charm.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index 8d6faeee..650dcc23 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -486,11 +486,22 @@ def _get_tls_from_relation(self) -> TLS_Config: encryption_algorithm=serialization.NoEncryption()) cert = self.ca_client.server_certificate.public_bytes( encoding=serialization.Encoding.PEM) + try: + root_ca_chain = self.ca_client.root_ca_chain.public_bytes( + encoding=serialization.Encoding.PEM + ) + except ca_client.CAClientError: + # A root ca chain is not always available. If configured to just + # use vault with self-signed certificates, you will not get a ca + # chain. Instead, you will get a CAClientError being raised. For + # now, use a bytes() object for the root_ca_chain as it shouldn't + # cause problems and if a ca_cert_chain comes later, then it will + # get updated. + root_ca_chain = bytes() ca_cert = ( self.ca_client.ca_certificate.public_bytes( encoding=serialization.Encoding.PEM) + - self.ca_client.root_ca_chain.public_bytes( - encoding=serialization.Encoding.PEM)) + root_ca_chain) return key, cert, ca_cert def _update_iscsigw_creds(self, creds): From f2c83957c070d2d5b9ed057535d2cede596b8950 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 27 Oct 2021 08:20:56 +0000 Subject: [PATCH 2290/2699] Add support for bionic Change-Id: I319a85eb3dc4c995b959a085c56dad563e793c37 --- ceph-dashboard/metadata.yaml | 1 + ceph-dashboard/osci.yaml | 9 +- ceph-dashboard/requirements.txt | 1 + .../tests/bundles/bionic-octopus.yaml | 101 ++++++++++++++++++ .../bundles/overlays/bionic-octopus.yaml.j2 | 5 + ceph-dashboard/tests/tests.yaml | 1 + 6 files changed, 117 insertions(+), 1 deletion(-) create mode 100644 ceph-dashboard/tests/bundles/bionic-octopus.yaml create mode 100644 ceph-dashboard/tests/bundles/overlays/bionic-octopus.yaml.j2 diff --git a/ceph-dashboard/metadata.yaml b/ceph-dashboard/metadata.yaml index 0011610f..ea47f3e2 100644 --- a/ceph-dashboard/metadata.yaml +++ b/ceph-dashboard/metadata.yaml @@ -14,6 +14,7 @@ extra-bindings: public: subordinate: true series: +- bionic - focal - groovy - hirsute diff --git a/ceph-dashboard/osci.yaml b/ceph-dashboard/osci.yaml index 019527dc..46cf1eca 100644 --- a/ceph-dashboard/osci.yaml +++ b/ceph-dashboard/osci.yaml @@ -3,6 +3,7 @@ - charm-unit-jobs check: jobs: + - bionic-octopus - focal-octopus - hirsute-pacific vars: @@ -21,9 +22,15 @@ vars: tox_extra_args: focal - job: - name: hirsute-pacific + name: bionic-octopus parent: func-target dependencies: &smoke-jobs - focal-octopus + vars: + tox_extra_args: bionic-octopus +- job: + name: hirsute-pacific + parent: func-target + dependencies: *smoke-jobs vars: tox_extra_args: hirsute diff --git a/ceph-dashboard/requirements.txt b/ceph-dashboard/requirements.txt index eafe2d30..991628aa 100644 --- a/ceph-dashboard/requirements.txt +++ b/ceph-dashboard/requirements.txt @@ -1,3 +1,4 @@ +importlib-resources ops >= 1.2.0 tenacity git+https://github.com/openstack/charms.ceph#egg=charms_ceph diff --git a/ceph-dashboard/tests/bundles/bionic-octopus.yaml b/ceph-dashboard/tests/bundles/bionic-octopus.yaml new file mode 100644 index 00000000..9982e4e6 --- /dev/null +++ b/ceph-dashboard/tests/bundles/bionic-octopus.yaml @@ -0,0 +1,101 @@ +local_overlay_enabled: False +series: bionic +applications: + percona-cluster: + charm: cs:~openstack-charmers-next/percona-cluster + num_units: 1 + options: + dataset-size: 25% + max-connections: 1000 + source: cloud:bionic-ussuri + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: cloud:bionic-ussuri + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: cloud:bionic-ussuri + vault: + num_units: 1 + charm: cs:~openstack-charmers-next/vault + ceph-dashboard: + charm: ../../ceph-dashboard.charm + options: + public-hostname: 'ceph-dashboard.zaza.local' + prometheus: + charm: cs:prometheus2 + num_units: 1 + grafana: + # SSL and allow_embedding are not released into cs:grafana yet, due + # Octrober 2021 + charm: cs:~llama-charmers-next/grafana + num_units: 1 + options: + anonymous: True + install_plugins: https://storage.googleapis.com/plugins-community/vonage-status-panel/release/1.0.11/vonage-status-panel-1.0.11.zip,https://storage.googleapis.com/plugins-community/grafana-piechart-panel/release/1.6.2/grafana-piechart-panel-1.6.2.zip + install_method: snap + allow_embedding: True + telegraf: + charm: telegraf + channel: stable + options: + hostname: "{host}" + prometheus-alertmanager: + charm: cs:prometheus-alertmanager + num_units: 1 + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 3 + options: + source: cloud:bionic-ussuri + ceph-fs: + charm: cs:~openstack-charmers-next/ceph-fs + num_units: 1 + options: + source: cloud:bionic-ussuri +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'percona-cluster:shared-db' + - - 'ceph-dashboard:dashboard' + - 'ceph-mon:dashboard' + - - 'ceph-dashboard:certificates' + - 'vault:certificates' + - - 'ceph-mon:prometheus' + - 'prometheus:target' + - - 'grafana:grafana-source' + - 'prometheus:grafana-source' + - - 'grafana:certificates' + - 'vault:certificates' + - - 'ceph-osd:juju-info' + - 'telegraf:juju-info' + - - 'ceph-mon:juju-info' + - 'telegraf:juju-info' + - - 'telegraf:prometheus-client' + - 'prometheus:target' + - - 'telegraf:dashboards' + - 'grafana:dashboards' + - - 'ceph-dashboard:grafana-dashboard' + - 'grafana:dashboards' + - - 'ceph-dashboard:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-dashboard:prometheus' + - 'prometheus:website' + - - 'prometheus:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + - - 'ceph-radosgw:certificates' + - 'vault:certificates' + - - 'ceph-dashboard:radosgw-dashboard' + - 'ceph-radosgw:radosgw-user' + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' diff --git a/ceph-dashboard/tests/bundles/overlays/bionic-octopus.yaml.j2 b/ceph-dashboard/tests/bundles/overlays/bionic-octopus.yaml.j2 new file mode 100644 index 00000000..3539227a --- /dev/null +++ b/ceph-dashboard/tests/bundles/overlays/bionic-octopus.yaml.j2 @@ -0,0 +1,5 @@ +applications: + grafana: + options: + http_proxy: '{{ TEST_HTTP_PROXY }}' + install_plugins: {{ TEST_GRAFANA_PLUGIN_VONAGE_URL }},{{ TEST_GRAFANA_PLUGIN_PIECHART_URL }} diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml index e18b2244..8434950f 100644 --- a/ceph-dashboard/tests/tests.yaml +++ b/ceph-dashboard/tests/tests.yaml @@ -1,6 +1,7 @@ charm_name: ceph-dasboard gate_bundles: - focal + - bionic-octopus - hirsute smoke_bundles: - focal From 568b5304ff1a1c9d16b5937d4fb51f2b95524273 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Wed, 27 Oct 2021 13:29:52 -0400 Subject: [PATCH 2291/2699] Improve README Some essential bits were missing from the initial 21.10 release. It would be great if this change could be backported. Change-Id: Ibd616cdf9a08fa990fd2c49024c88f5529e24128 --- ceph-dashboard/README.md | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/ceph-dashboard/README.md b/ceph-dashboard/README.md index f28cfba8..96123f44 100644 --- a/ceph-dashboard/README.md +++ b/ceph-dashboard/README.md @@ -1,7 +1,11 @@ # Overview The ceph-dashboard charm deploys the [Ceph Dashboard][upstream-ceph-dashboard], -a built-in web-based Ceph management and monitoring application. +a built-in web-based Ceph management and monitoring application. It works in +conjunction with the [openstack-loadbalancer][loadbalancer-charm] charm, which +in turn utilises the [hacluster][hacluster-charm] charm. + +> **Note**: The ceph-dashboard charm is currently in tech-preview. # Usage @@ -53,6 +57,34 @@ See [Managing TLS certificates][cdg-tls] in the > **Note**: This charm also supports TLS configuration via charm options `ssl_cert`, `ssl_key`, and `ssl_ca`. +### Load balancer + +The dashboard is accessed via a load balancer using VIPs and implemented via +the openstack-loadbalancer and hacluster charms: + + juju deploy -n 3 --config vip=10.5.20.200 cs:~openstack-charmers/openstack-loadbalancer + juju deploy hacluster openstack-loadbalancer-hacluster + juju add-relation openstack-loadbalancer:ha openstack-loadbalancer-hacluster:ha + +Now add a relation between the openstack-loadbalancer and ceph-dashboard +applications: + + juju add-relation ceph-dashboard:loadbalancer openstack-loadbalancer:loadbalancer + +### Dashboard user + +Credentials are needed to log in to the dashboard. Set these up by applying an +action to any ceph-dashboard unit. For example, to create an administrator user +called 'admin': + + juju run-action --wait ceph-dashboard/0 add-user username=admin role=administrator + +The command's output will include a generated password. + +The dashboard can then be accessed on the configured VIP and on port 8443: + +https://10.5.20.200:8443 + ## Embedded Grafana dashboards To embed Grafana dashboards within the Ceph dashboard some additional relations @@ -123,7 +155,7 @@ relations: To enable Object storage management of an existing Ceph RADOS Gateway service add the following relation: - juju relate ceph-dashboard:radosgw-dashboard ceph-radosgw:radosgw-user + juju add-relation ceph-dashboard:radosgw-dashboard ceph-radosgw:radosgw-user > **Note**: For Ceph versions older than Pacific the dashboard can only be related to a single ceph-radosgw application. @@ -160,3 +192,5 @@ Please report bugs on [Launchpad][lp-bugs-charm-ceph-dashboard]. [cdg-tls]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-certificate-management.html [lp-bugs-charm-ceph-dashboard]: https://bugs.launchpad.net/charm-ceph-dashboard [anchor-grafana-dashboards]: #embedded-grafana-dashboards +[loadbalancer-charm]: https://jaas.ai/u/openstack-charmers/openstack-loadbalancer +[hacluster-charm]: https://jaas.ai/hacluster From 63339394b88a5652f80ca9df335b49e311d151ae Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 29 Oct 2021 17:00:39 -0400 Subject: [PATCH 2292/2699] Add yoga bundles and release-tool syncs * charm-helpers sync for classic charms * pin pyparsing for aodhclient * pin cffi for py35 * add non-voting focal-yoga bundle * add non-voting jammy-yoga bundle * add series metadata for jammy * switch xena bundles to voting * run focal-ussuri as smoke tests * remove trusty, xenial, and groovy metadata and tests Change-Id: I3d5670334344384fb8be5855926f2c9441f793ba --- ceph-rbd-mirror/osci.yaml | 50 ++++++++- ceph-rbd-mirror/pip.sh | 18 ++++ ceph-rbd-mirror/requirements.txt | 1 + ceph-rbd-mirror/src/metadata.yaml | 1 - .../{xenial-pike.yaml => focal-xena.yaml} | 101 +++++++++++++----- .../{xenial-queens.yaml => focal-yoga.yaml} | 101 +++++++++++++----- ...{groovy-victoria.yaml => impish-xena.yaml} | 3 +- ...a-image-mirroring.yaml => jammy-yoga.yaml} | 7 +- ceph-rbd-mirror/src/tests/tests.yaml | 47 ++++---- ceph-rbd-mirror/src/tox.ini | 4 +- ceph-rbd-mirror/test-requirements.txt | 2 + ceph-rbd-mirror/tox.ini | 17 ++- 12 files changed, 272 insertions(+), 80 deletions(-) create mode 100755 ceph-rbd-mirror/pip.sh rename ceph-rbd-mirror/src/tests/bundles/{xenial-pike.yaml => focal-xena.yaml} (57%) rename ceph-rbd-mirror/src/tests/bundles/{xenial-queens.yaml => focal-yoga.yaml} (57%) rename ceph-rbd-mirror/src/tests/bundles/{groovy-victoria.yaml => impish-xena.yaml} (98%) rename ceph-rbd-mirror/src/tests/bundles/{groovy-victoria-image-mirroring.yaml => jammy-yoga.yaml} (95%) diff --git a/ceph-rbd-mirror/osci.yaml b/ceph-rbd-mirror/osci.yaml index e7ae6cc0..1713f491 100644 --- a/ceph-rbd-mirror/osci.yaml +++ b/ceph-rbd-mirror/osci.yaml @@ -6,6 +6,18 @@ - bionic-train_ceph-rbd-mirror - bionic-stein_ceph-rbd-mirror - bionic-queens_ceph-rbd-mirror + - bionic-ussuri_ceph-rbd-mirror + - focal-ussuri_ceph-rbd-mirror + - focal-victoria_ceph-rbd-mirror + - focal-wallaby_ceph-rbd-mirror + - focal-xena_ceph-rbd-mirror + - focal-yoga_ceph-rbd-mirror: + voting: false + - hirsute-wallaby_ceph-rbd-mirror + - impish-xena_ceph-rbd-mirror: + voting: false + - jammy-yoga_ceph-rbd-mirror: + voting: false vars: needs_charm_build: true charm_build_name: ceph-rbd-mirror @@ -27,4 +39,40 @@ - job: name: bionic-stein_ceph-rbd-mirror parent: bionic-stein - dependencies: *smoke-jobs \ No newline at end of file + dependencies: *smoke-jobs +- job: + name: bionic-ussuri_ceph-rbd-mirror + parent: bionic-stein + dependencies: *smoke-jobs +- job: + name: focal-ussuri_ceph-rbd-mirror + parent: bionic-stein + dependencies: *smoke-jobs +- job: + name: focal-victoria_ceph-rbd-mirror + parent: bionic-stein + dependencies: *smoke-jobs +- job: + name: focal-wallaby_ceph-rbd-mirror + parent: bionic-stein + dependencies: *smoke-jobs +- job: + name: focal-xena_ceph-rbd-mirror + parent: bionic-stein + dependencies: *smoke-jobs +- job: + name: focal-yoga_ceph-rbd-mirror + parent: bionic-stein + dependencies: *smoke-jobs +- job: + name: hirsute-wallaby_ceph-rbd-mirror + parent: bionic-stein + dependencies: *smoke-jobs +- job: + name: impish-xena_ceph-rbd-mirror + parent: bionic-stein + dependencies: *smoke-jobs +- job: + name: jammy-yoga_ceph-rbd-mirror + parent: bionic-stein + dependencies: *smoke-jobs diff --git a/ceph-rbd-mirror/pip.sh b/ceph-rbd-mirror/pip.sh new file mode 100755 index 00000000..9a7e6b09 --- /dev/null +++ b/ceph-rbd-mirror/pip.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# setuptools 58.0 dropped the support for use_2to3=true which is needed to +# install blessings (an indirect dependency of charm-tools). +# +# More details on the beahvior of tox and virtualenv creation can be found at +# https://github.com/tox-dev/tox/issues/448 +# +# This script is wrapper to force the use of the pinned versions early in the +# process when the virtualenv was created and upgraded before installing the +# depedencies declared in the target. +pip install 'pip<20.3' 'setuptools<50.0.0' +pip "$@" diff --git a/ceph-rbd-mirror/requirements.txt b/ceph-rbd-mirror/requirements.txt index b786b428..a68620f6 100644 --- a/ceph-rbd-mirror/requirements.txt +++ b/ceph-rbd-mirror/requirements.txt @@ -9,6 +9,7 @@ setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 # Build requirements +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. charm-tools==2.8.3 simplejson diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index 48680792..f141caf0 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -15,7 +15,6 @@ tags: - file-servers - misc series: -- xenial - bionic - focal - groovy diff --git a/ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-xena.yaml similarity index 57% rename from ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml rename to ceph-rbd-mirror/src/tests/bundles/focal-xena.yaml index 71c45d1c..345cc604 100644 --- a/ceph-rbd-mirror/src/tests/bundles/xenial-pike.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/focal-xena.yaml @@ -1,111 +1,164 @@ -series: xenial +variables: + openstack-origin: &openstack-origin cloud:focal-xena + +series: &series focal + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + applications: - mysql: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 options: - source: cloud:xenial-pike + source: *openstack-origin + to: + - '0' + - '1' + - '2' + keystone: charm: cs:~openstack-charmers-next/keystone num_units: 1 options: - openstack-origin: cloud:xenial-pike + openstack-origin: *openstack-origin + rabbitmq-server: charm: cs:~openstack-charmers-next/rabbitmq-server num_units: 1 options: - source: cloud:xenial-pike + source: *openstack-origin + cinder: charm: cs:~openstack-charmers-next/cinder num_units: 1 options: block-device: None glance-api-version: 2 - openstack-origin: cloud:xenial-pike + openstack-origin: *openstack-origin + cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph num_units: 0 + glance: charm: cs:~openstack-charmers-next/glance num_units: 1 options: - openstack-origin: cloud:xenial-pike + openstack-origin: *openstack-origin + nova-compute: charm: cs:~openstack-charmers-next/nova-compute num_units: 1 options: - openstack-origin: cloud:xenial-pike + openstack-origin: *openstack-origin + ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: expected-osd-count: 3 - source: cloud:xenial-pike + source: *openstack-origin + ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 options: - source: cloud:xenial-pike + source: *openstack-origin bluestore: False use-direct-io: False osd-devices: /opt + ceph-rbd-mirror: - series: xenial + series: *series charm: ../../../ceph-rbd-mirror num_units: 1 options: - source: cloud:xenial-pike + source: *openstack-origin + ceph-mon-b: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: expected-osd-count: 3 - source: cloud:xenial-pike + source: *openstack-origin + ceph-osd-b: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 options: - source: cloud:xenial-pike + source: *openstack-origin bluestore: False use-direct-io: False osd-devices: /opt + ceph-rbd-mirror-b: - series: xenial + series: *series charm: ../../../ceph-rbd-mirror num_units: 1 options: - source: cloud:xenial-pike + source: *openstack-origin + relations: -- - mysql - - keystone -- - mysql - - cinder -- - mysql - - glance + +- - keystone:shared-db + - keystone-mysql-router:shared-db +- - keystone-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - glance:shared-db + - glance-mysql-router:shared-db +- - glance-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - cinder:shared-db + - cinder-mysql-router:shared-db +- - cinder-mysql-router:db-router + - mysql-innodb-cluster:db-router + - - rabbitmq-server - cinder + - - keystone - cinder - - keystone - glance + - - cinder - cinder-ceph - - cinder-ceph:ceph - ceph-mon:client + - - nova-compute:ceph-access - cinder-ceph:ceph-access - - nova-compute:amqp - rabbitmq-server:amqp + - - glance:image-service - nova-compute:image-service - - glance - ceph-mon + - - ceph-mon:osd - ceph-osd:mon - - ceph-mon - ceph-rbd-mirror:ceph-local - - ceph-mon - ceph-rbd-mirror-b:ceph-remote + - - ceph-mon-b:osd - ceph-osd-b:mon - - ceph-mon-b diff --git a/ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-yoga.yaml similarity index 57% rename from ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml rename to ceph-rbd-mirror/src/tests/bundles/focal-yoga.yaml index 47a34acb..5d2a4ed6 100644 --- a/ceph-rbd-mirror/src/tests/bundles/xenial-queens.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/focal-yoga.yaml @@ -1,111 +1,164 @@ -series: xenial +variables: + openstack-origin: &openstack-origin cloud:focal-yoga + +series: &series focal + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + applications: - mysql: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 options: - source: cloud:xenial-queens + source: *openstack-origin + to: + - '0' + - '1' + - '2' + keystone: charm: cs:~openstack-charmers-next/keystone num_units: 1 options: - openstack-origin: cloud:xenial-queens + openstack-origin: *openstack-origin + rabbitmq-server: charm: cs:~openstack-charmers-next/rabbitmq-server num_units: 1 options: - source: cloud:xenial-queens + source: *openstack-origin + cinder: charm: cs:~openstack-charmers-next/cinder num_units: 1 options: block-device: None glance-api-version: 2 - openstack-origin: cloud:xenial-queens + openstack-origin: *openstack-origin + cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph num_units: 0 + glance: charm: cs:~openstack-charmers-next/glance num_units: 1 options: - openstack-origin: cloud:xenial-queens + openstack-origin: *openstack-origin + nova-compute: charm: cs:~openstack-charmers-next/nova-compute num_units: 1 options: - openstack-origin: cloud:xenial-queens + openstack-origin: *openstack-origin + ceph-mon: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: expected-osd-count: 3 - source: cloud:xenial-queens + source: *openstack-origin + ceph-osd: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 options: - source: cloud:xenial-queens + source: *openstack-origin bluestore: False use-direct-io: False osd-devices: /opt + ceph-rbd-mirror: - series: xenial + series: *series charm: ../../../ceph-rbd-mirror num_units: 1 options: - source: cloud:xenial-queens + source: *openstack-origin + ceph-mon-b: charm: cs:~openstack-charmers-next/ceph-mon num_units: 3 options: expected-osd-count: 3 - source: cloud:xenial-queens + source: *openstack-origin + ceph-osd-b: charm: cs:~openstack-charmers-next/ceph-osd num_units: 3 options: - source: cloud:xenial-queens + source: *openstack-origin bluestore: False use-direct-io: False osd-devices: /opt + ceph-rbd-mirror-b: - series: xenial + series: *series charm: ../../../ceph-rbd-mirror num_units: 1 options: - source: cloud:xenial-queens + source: *openstack-origin + relations: -- - mysql - - keystone -- - mysql - - cinder -- - mysql - - glance + +- - keystone:shared-db + - keystone-mysql-router:shared-db +- - keystone-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - glance:shared-db + - glance-mysql-router:shared-db +- - glance-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - cinder:shared-db + - cinder-mysql-router:shared-db +- - cinder-mysql-router:db-router + - mysql-innodb-cluster:db-router + - - rabbitmq-server - cinder + - - keystone - cinder - - keystone - glance + - - cinder - cinder-ceph - - cinder-ceph:ceph - ceph-mon:client + - - nova-compute:ceph-access - cinder-ceph:ceph-access - - nova-compute:amqp - rabbitmq-server:amqp + - - glance:image-service - nova-compute:image-service - - glance - ceph-mon + - - ceph-mon:osd - ceph-osd:mon - - ceph-mon - ceph-rbd-mirror:ceph-local - - ceph-mon - ceph-rbd-mirror-b:ceph-remote + - - ceph-mon-b:osd - ceph-osd-b:mon - - ceph-mon-b diff --git a/ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml b/ceph-rbd-mirror/src/tests/bundles/impish-xena.yaml similarity index 98% rename from ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml rename to ceph-rbd-mirror/src/tests/bundles/impish-xena.yaml index 8c1453ac..40aa1461 100644 --- a/ceph-rbd-mirror/src/tests/bundles/groovy-victoria.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/impish-xena.yaml @@ -1,7 +1,7 @@ variables: openstack-origin: &openstack-origin distro -series: &series groovy +series: &series impish machines: '0': @@ -48,7 +48,6 @@ applications: options: block-device: None glance-api-version: 2 - openstack-origin: *openstack-origin cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph diff --git a/ceph-rbd-mirror/src/tests/bundles/groovy-victoria-image-mirroring.yaml b/ceph-rbd-mirror/src/tests/bundles/jammy-yoga.yaml similarity index 95% rename from ceph-rbd-mirror/src/tests/bundles/groovy-victoria-image-mirroring.yaml rename to ceph-rbd-mirror/src/tests/bundles/jammy-yoga.yaml index 90ad5b04..17507a07 100644 --- a/ceph-rbd-mirror/src/tests/bundles/groovy-victoria-image-mirroring.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/jammy-yoga.yaml @@ -1,7 +1,7 @@ variables: openstack-origin: &openstack-origin distro -series: &series groovy +series: &series jammy machines: '0': @@ -48,13 +48,10 @@ applications: options: block-device: None glance-api-version: 2 - openstack-origin: *openstack-origin cinder-ceph: charm: cs:~openstack-charmers-next/cinder-ceph num_units: 0 - options: - rbd-mirroring-mode: image glance: charm: cs:~openstack-charmers-next/glance @@ -143,8 +140,6 @@ relations: - cinder-ceph - - cinder-ceph:ceph - ceph-mon:client -- - cinder-ceph:ceph-replication-device - - ceph-mon-b:client - - nova-compute:ceph-access - cinder-ceph:ceph-access diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index bb6fa201..bd170e77 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -1,33 +1,37 @@ charm_name: ceph-rbd-mirror smoke_bundles: -- bionic-train +- focal-ussuri gate_bundles: -- bionic-train -- bionic-train-image-mirroring -- bionic-stein - bionic-queens +- bionic-queens-e2e +- bionic-queens-e2e-lxd +- bionic-stein +- bionic-ussuri +- focal-ussuri +- focal-ussuri-image-mirroring +- focal-victoria +- focal-victoria-image-mirroring +- focal-wallaby +- focal-wallaby-image-mirroring +- focal-xena +- focal-xena-image-mirroring +- hirsute-wallaby +- hirsute-wallaby-image-mirroring +- impish-xena +- impish-xena-image-mirroring comment: | The e2e bundles are useful for development but adds no additional value to the functional tests. dev_bundles: -- hirsute-wallaby -- hirsute-wallaby-image-mirroring -- groovy-victoria -- groovy-victoria-image-mirroring -- focal-wallaby -- focal-wallaby-image-mirroring -- focal-victoria -- focal-victoria-image-mirroring -- bionic-queens-e2e -- bionic-queens-e2e-lxd - bionic-rocky -- bionic-ussuri +- bionic-train +- bionic-train-image-mirroring # This is a dev bundle because we hit https://bugs.launchpad.net/charm-ceph-rbd-mirror/+bug/1892201. - bionic-ussuri-image-mirroring -- focal-ussuri -- focal-ussuri-image-mirroring -- xenial-queens -- xenial-pike +- focal-yoga +- focal-yoga-image-mirroring +- jammy-yoga +- jammy-yoga-image-mirroring configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: @@ -37,3 +41,8 @@ tests: tests_options: force_deploy: - hirsute-wallaby + - hirsute-wallaby-image-mirroring + - impish-xena + - impish-xena-image-mirroring + - jammy-yoga + - jammy-yoga-image-mirroring diff --git a/ceph-rbd-mirror/src/tox.ini b/ceph-rbd-mirror/src/tox.ini index e7630475..b40d2952 100644 --- a/ceph-rbd-mirror/src/tox.ini +++ b/ceph-rbd-mirror/src/tox.ini @@ -22,12 +22,12 @@ skip_missing_interpreters = False requires = pip < 20.3 virtualenv < 20.0 # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci -minversion = 3.2.0 +minversion = 3.18.0 [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 -whitelist_externals = juju +allowlist_externals = juju passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt install_command = diff --git a/ceph-rbd-mirror/test-requirements.txt b/ceph-rbd-mirror/test-requirements.txt index af069e1b..bb1307f5 100644 --- a/ceph-rbd-mirror/test-requirements.txt +++ b/ceph-rbd-mirror/test-requirements.txt @@ -3,6 +3,8 @@ # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools # +pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 stestr>=2.2.0 diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index 5c818017..22159df2 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -11,6 +11,21 @@ envlist = pep8,py3 sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +requires = + pip < 20.3 + virtualenv < 20.0 + setuptools<50.0.0 + +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.18.0 [testenv] setenv = VIRTUAL_ENV={envdir} @@ -21,7 +36,7 @@ setenv = VIRTUAL_ENV={envdir} JUJU_REPOSITORY={toxinidir}/build passenv = http_proxy https_proxy INTERFACE_PATH LAYER_PATH JUJU_REPOSITORY install_command = - pip install {opts} {packages} + {toxinidir}/pip.sh install {opts} {packages} deps = -r{toxinidir}/requirements.txt From f8cf67351c7740fc0ca6ee83520ac5617ff3f641 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 29 Oct 2021 17:00:38 -0400 Subject: [PATCH 2293/2699] Add yoga bundles and release-tool syncs * charm-helpers sync for classic charms * sync from release-tools * switch to release-specific zosci functional tests * run focal-ussuri as smoke tests * remove trusty, xenial, and groovy metadata/tests * drop py35 and add py39 * charms.ceph sync Change-Id: I214c0517b223da5fce9e942269fd8703422d1a2b --- .../charmhelpers/contrib/openstack/utils.py | 23 +- .../contrib/storage/linux/ceph.py | 11 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 1 + ceph-mon/lib/charms_ceph/utils.py | 6 + ceph-mon/metadata.yaml | 1 - ceph-mon/osci.yaml | 17 +- ceph-mon/test-requirements.txt | 2 + ceph-mon/tests/bundles/focal-yoga.yaml | 235 ++++++++++++++++++ .../{groovy-victoria.yaml => jammy-yoga.yaml} | 2 +- ceph-mon/tests/bundles/trusty-mitaka.yaml | 143 ----------- ceph-mon/tests/bundles/xenial-mitaka.yaml | 94 ------- ceph-mon/tests/bundles/xenial-ocata.yaml | 107 -------- ceph-mon/tests/bundles/xenial-pike.yaml | 107 -------- ceph-mon/tests/bundles/xenial-queens.yaml | 113 --------- ceph-mon/tests/tests.yaml | 35 ++- ceph-mon/tox.ini | 5 + 17 files changed, 301 insertions(+), 602 deletions(-) create mode 100644 ceph-mon/tests/bundles/focal-yoga.yaml rename ceph-mon/tests/bundles/{groovy-victoria.yaml => jammy-yoga.yaml} (99%) delete mode 100644 ceph-mon/tests/bundles/trusty-mitaka.yaml delete mode 100644 ceph-mon/tests/bundles/xenial-mitaka.yaml delete mode 100644 ceph-mon/tests/bundles/xenial-ocata.yaml delete mode 100644 ceph-mon/tests/bundles/xenial-pike.yaml delete mode 100644 ceph-mon/tests/bundles/xenial-queens.yaml diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index d5d301e6..9cc96d60 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -1413,7 +1413,8 @@ def incomplete_relation_data(configs, required_interfaces): for i in incomplete_relations} -def do_action_openstack_upgrade(package, upgrade_callback, configs): +def do_action_openstack_upgrade(package, upgrade_callback, configs, + force_upgrade=False): """Perform action-managed OpenStack upgrade. Upgrades packages to the configured openstack-origin version and sets @@ -1427,12 +1428,13 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs): @param package: package name for determining if upgrade available @param upgrade_callback: function callback to charm's upgrade function @param configs: templating object derived from OSConfigRenderer class + @param force_upgrade: perform dist-upgrade regardless of new openstack @return: True if upgrade successful; False if upgrade failed or skipped """ ret = False - if openstack_upgrade_available(package): + if openstack_upgrade_available(package) or force_upgrade: if config('action-managed-upgrade'): juju_log('Upgrading OpenStack release') @@ -2599,6 +2601,23 @@ def get_subordinate_release_packages(os_release, package_type='deb'): return SubordinatePackages(install, purge) +def get_subordinate_services(): + """Iterate over subordinate relations and get service information. + + In a similar fashion as with get_subordinate_release_packages(), + principle charms can retrieve a list of services advertised by their + subordinate charms. This is useful to know about subordinate services when + pausing, resuming or upgrading a principle unit. + + :returns: Name of all services advertised by all subordinates + :rtype: Set[str] + """ + services = set() + for rdata in container_scoped_relation_get('services'): + services |= set(json.loads(rdata or '[]')) + return services + + os_restart_on_change = partial( pausable_restart_on_change, can_restart_now_f=deferred_events.check_and_record_restart_request, diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 3eb46d70..c70aeb20 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -294,7 +294,6 @@ def __init__(self, service, name=None, percent_data=None, app_name=None, # NOTE: Do not perform initialization steps that require live data from # a running cluster here. The *Pool classes may be used for validation. self.service = service - self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 self.op = op or {} if op: @@ -341,7 +340,8 @@ def _post_create(self): Do not add calls for a specific pool type here, those should go into one of the pool specific classes. """ - if self.nautilus_or_later: + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + if nautilus_or_later: # Ensure we set the expected pool ratio update_pool( client=self.service, @@ -660,8 +660,9 @@ def _create(self): else: self.pg_num = self.get_pgs(self.replicas, self.percent_data) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - if self.nautilus_or_later: + if nautilus_or_later: cmd = [ 'ceph', '--id', self.service, 'osd', 'pool', 'create', '--pg-num-min={}'.format( @@ -745,9 +746,9 @@ def _create(self): k = int(erasure_profile['k']) m = int(erasure_profile['m']) pgs = self.get_pgs(k + m, self.percent_data) - self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - if self.nautilus_or_later: + if nautilus_or_later: cmd = [ 'ceph', '--id', self.service, 'osd', 'pool', 'create', '--pg-num-min={}'.format( diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py index e710c0e0..0906c5c0 100644 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -29,6 +29,7 @@ 'groovy', 'hirsute', 'impish', + 'jammy', ) diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index 6c7cf6fc..cf8328f0 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -275,6 +275,7 @@ ('groovy', 'victoria'), ('hirsute', 'wallaby'), ('impish', 'xena'), + ('jammy', 'yoga'), ]) diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index 9b7299dd..025ab866 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -3169,6 +3169,8 @@ def dirs_need_ownership_update(service): ('luminous', 'mimic'), ('mimic', 'nautilus'), ('nautilus', 'octopus'), + ('octopus', 'pacific'), + ('pacific', 'quincy'), ]) # Map UCA codenames to ceph codenames @@ -3186,6 +3188,10 @@ def dirs_need_ownership_update(service): 'stein': 'mimic', 'train': 'nautilus', 'ussuri': 'octopus', + 'victoria': 'octopus', + 'wallaby': 'pacific', + 'xena': 'pacific', + 'yoga': 'quincy', } diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index f9aedfbb..4543b1b1 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -10,7 +10,6 @@ tags: - file-servers - misc series: -- xenial - bionic - focal - groovy diff --git a/ceph-mon/osci.yaml b/ceph-mon/osci.yaml index 2c5b5942..baacc7d5 100644 --- a/ceph-mon/osci.yaml +++ b/ceph-mon/osci.yaml @@ -1,18 +1,17 @@ - project: templates: - - charm-unit-jobs - - charm-functional-jobs + - charm-yoga-unit-jobs + - charm-yoga-functional-jobs + - charm-xena-functional-jobs + - charm-wallaby-functional-jobs + - charm-victoria-functional-jobs + - charm-ussuri-functional-jobs + - charm-stein-functional-jobs + - charm-queens-functional-jobs check: jobs: - focal-ussuri-ec-ceph-mon - bionic-train-with-fsid - # Disabling victoria due to https://github.com/openstack-charmers/zaza-openstack-tests/issues/647 - - focal-victoria: - voting: false - - focal-xena: - voting: false - - impish-xena: - voting: false - job: name: focal-ussuri-ec-ceph-mon diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index dba2c767..f853625d 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -7,6 +7,8 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # +pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 requests>=2.18.4 diff --git a/ceph-mon/tests/bundles/focal-yoga.yaml b/ceph-mon/tests/bundles/focal-yoga.yaml new file mode 100644 index 00000000..74d5fc5d --- /dev/null +++ b/ceph-mon/tests/bundles/focal-yoga.yaml @@ -0,0 +1,235 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-yoga + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: '10G' + options: + source: *openstack-origin + osd-devices: '/dev/test-non-existent' + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: ../../../ceph-mon + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + libvirt-image-backend: rbd + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: *openstack-origin + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + prometheus2: +# Pin prometheus2 charm version Bug #1891942 + charm: cs:prometheus2-18 + num_units: 1 + to: + - '16' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - nova-compute:ceph-access + - cinder-ceph:ceph-access + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'ceph-mon:prometheus' + - 'prometheus2:target' diff --git a/ceph-mon/tests/bundles/groovy-victoria.yaml b/ceph-mon/tests/bundles/jammy-yoga.yaml similarity index 99% rename from ceph-mon/tests/bundles/groovy-victoria.yaml rename to ceph-mon/tests/bundles/jammy-yoga.yaml index 5321a42c..6f0a35a0 100644 --- a/ceph-mon/tests/bundles/groovy-victoria.yaml +++ b/ceph-mon/tests/bundles/jammy-yoga.yaml @@ -1,7 +1,7 @@ variables: openstack-origin: &openstack-origin distro -series: groovy +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' diff --git a/ceph-mon/tests/bundles/trusty-mitaka.yaml b/ceph-mon/tests/bundles/trusty-mitaka.yaml deleted file mode 100644 index 63db332e..00000000 --- a/ceph-mon/tests/bundles/trusty-mitaka.yaml +++ /dev/null @@ -1,143 +0,0 @@ -series: trusty -applications: - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - ceph-mon: - charm: ../../../ceph-mon - series: trusty - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - source: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - percona-cluster: - charm: cs:trusty/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:trusty-mitaka - libvirt-image-backend: rbd - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service diff --git a/ceph-mon/tests/bundles/xenial-mitaka.yaml b/ceph-mon/tests/bundles/xenial-mitaka.yaml deleted file mode 100644 index 7cba4099..00000000 --- a/ceph-mon/tests/bundles/xenial-mitaka.yaml +++ /dev/null @@ -1,94 +0,0 @@ -series: xenial -applications: - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: ../../../ceph-mon - series: xenial - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - libvirt-image-backend: rbd - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service diff --git a/ceph-mon/tests/bundles/xenial-ocata.yaml b/ceph-mon/tests/bundles/xenial-ocata.yaml deleted file mode 100644 index c20d9e39..00000000 --- a/ceph-mon/tests/bundles/xenial-ocata.yaml +++ /dev/null @@ -1,107 +0,0 @@ -series: xenial -applications: - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:xenial-ocata - ceph-mon: - charm: ../../../ceph-mon - series: xenial - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - source: cloud:xenial-ocata - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:xenial-ocata - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:xenial-ocata - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:xenial-ocata - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:xenial-ocata - libvirt-image-backend: rbd - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:xenial-ocata - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:xenial-ocata - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:xenial-ocata -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service diff --git a/ceph-mon/tests/bundles/xenial-pike.yaml b/ceph-mon/tests/bundles/xenial-pike.yaml deleted file mode 100644 index 098691a8..00000000 --- a/ceph-mon/tests/bundles/xenial-pike.yaml +++ /dev/null @@ -1,107 +0,0 @@ -series: xenial -applications: - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:xenial-pike - ceph-mon: - charm: ../../../ceph-mon - series: xenial - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - source: cloud:xenial-pike - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:xenial-pike - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:xenial-pike - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:xenial-pike - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:xenial-pike - libvirt-image-backend: rbd - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:xenial-pike - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:xenial-pike - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:xenial-pike -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service diff --git a/ceph-mon/tests/bundles/xenial-queens.yaml b/ceph-mon/tests/bundles/xenial-queens.yaml deleted file mode 100644 index 9bfed07e..00000000 --- a/ceph-mon/tests/bundles/xenial-queens.yaml +++ /dev/null @@ -1,113 +0,0 @@ -series: xenial -applications: - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:xenial-queens - ceph-mon: - charm: ../../../ceph-mon - series: xenial - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - source: cloud:xenial-queens - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:xenial-queens - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:xenial-queens - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:xenial-queens - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:xenial-queens - libvirt-image-backend: rbd - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:xenial-queens - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:xenial-queens - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:xenial-queens - prometheus2: -# Pin prometheus2 charm version Bug #1891942 - charm: cs:prometheus2-18 - num_units: 1 -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - ceph-mon:prometheus - - prometheus2:target diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 2ea97e57..303ddfa6 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,30 +1,26 @@ charm_name: ceph-mon gate_bundles: - - groovy-victoria - - focal-xena - - focal-wallaby - - focal-victoria - - focal-ussuri-ec - - focal-ussuri - - bionic-ussuri - - bionic-train - - bionic-train-with-fsid - - bionic-stein - bionic-queens - - xenial-mitaka + - bionic-stein + - bionic-ussuri + - focal-ussuri + - focal-ussuri-ec + - focal-victoria + - focal-wallaby + - focal-xena + - hirsute-wallaby + - impish-xena dev_bundles: - - trusty-mitaka - - xenial-ocata - - xenial-pike - - xenial-queens - bionic-rocky - - hirsute-wallaby - - impish-xena + - bionic-train + - bionic-train-with-fsid + - focal-yoga + - jammy-yoga smoke_bundles: - - bionic-train + - focal-ussuri configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image @@ -39,7 +35,6 @@ tests: tests_options: force_deploy: - - trusty-mitaka - - groovy-victoria - hirsute-wallaby - impish-xena + - jammy-yoga diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index ba4fd5b6..86d1e904 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -61,6 +61,11 @@ basepython = python3.8 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py39] +basepython = python3.9 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt From 77d5b68a7640779331aa44cd6cb7ad5cb336f4b2 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 29 Oct 2021 17:00:39 -0400 Subject: [PATCH 2294/2699] Add yoga bundles and release-tool syncs * charm-helpers sync for classic charms * sync from release-tools * switch to release-specific zosci functional tests * run focal-ussuri as smoke tests * remove trusty, xenial, and groovy metadata/tests * drop py35 and add py39 * charms.ceph sync Change-Id: I4d517c40a4450ef1395dd0240513eeebc069384c --- .../charmhelpers/contrib/openstack/utils.py | 23 +- .../contrib/storage/linux/ceph.py | 11 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 1 + ceph-osd/lib/charms_ceph/utils.py | 6 + ceph-osd/metadata.yaml | 1 - ceph-osd/osci.yaml | 16 +- ceph-osd/test-requirements.txt | 2 + ceph-osd/tests/bundles/focal-yoga.yaml | 222 ++++++++++++++++++ .../{groovy-victoria.yaml => jammy-yoga.yaml} | 2 +- ceph-osd/tests/bundles/trusty-mitaka.yaml | 142 ----------- ceph-osd/tests/bundles/xenial-mitaka.yaml | 92 -------- ceph-osd/tests/bundles/xenial-ocata.yaml | 106 --------- ceph-osd/tests/bundles/xenial-pike.yaml | 106 --------- ceph-osd/tests/bundles/xenial-queens.yaml | 106 --------- ceph-osd/tests/tests.yaml | 31 +-- ceph-osd/tox.ini | 5 + 17 files changed, 286 insertions(+), 587 deletions(-) create mode 100644 ceph-osd/tests/bundles/focal-yoga.yaml rename ceph-osd/tests/bundles/{groovy-victoria.yaml => jammy-yoga.yaml} (99%) delete mode 100644 ceph-osd/tests/bundles/trusty-mitaka.yaml delete mode 100644 ceph-osd/tests/bundles/xenial-mitaka.yaml delete mode 100644 ceph-osd/tests/bundles/xenial-ocata.yaml delete mode 100644 ceph-osd/tests/bundles/xenial-pike.yaml delete mode 100644 ceph-osd/tests/bundles/xenial-queens.yaml diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index d5d301e6..9cc96d60 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -1413,7 +1413,8 @@ def incomplete_relation_data(configs, required_interfaces): for i in incomplete_relations} -def do_action_openstack_upgrade(package, upgrade_callback, configs): +def do_action_openstack_upgrade(package, upgrade_callback, configs, + force_upgrade=False): """Perform action-managed OpenStack upgrade. Upgrades packages to the configured openstack-origin version and sets @@ -1427,12 +1428,13 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs): @param package: package name for determining if upgrade available @param upgrade_callback: function callback to charm's upgrade function @param configs: templating object derived from OSConfigRenderer class + @param force_upgrade: perform dist-upgrade regardless of new openstack @return: True if upgrade successful; False if upgrade failed or skipped """ ret = False - if openstack_upgrade_available(package): + if openstack_upgrade_available(package) or force_upgrade: if config('action-managed-upgrade'): juju_log('Upgrading OpenStack release') @@ -2599,6 +2601,23 @@ def get_subordinate_release_packages(os_release, package_type='deb'): return SubordinatePackages(install, purge) +def get_subordinate_services(): + """Iterate over subordinate relations and get service information. + + In a similar fashion as with get_subordinate_release_packages(), + principle charms can retrieve a list of services advertised by their + subordinate charms. This is useful to know about subordinate services when + pausing, resuming or upgrading a principle unit. + + :returns: Name of all services advertised by all subordinates + :rtype: Set[str] + """ + services = set() + for rdata in container_scoped_relation_get('services'): + services |= set(json.loads(rdata or '[]')) + return services + + os_restart_on_change = partial( pausable_restart_on_change, can_restart_now_f=deferred_events.check_and_record_restart_request, diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 3eb46d70..c70aeb20 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -294,7 +294,6 @@ def __init__(self, service, name=None, percent_data=None, app_name=None, # NOTE: Do not perform initialization steps that require live data from # a running cluster here. The *Pool classes may be used for validation. self.service = service - self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 self.op = op or {} if op: @@ -341,7 +340,8 @@ def _post_create(self): Do not add calls for a specific pool type here, those should go into one of the pool specific classes. """ - if self.nautilus_or_later: + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + if nautilus_or_later: # Ensure we set the expected pool ratio update_pool( client=self.service, @@ -660,8 +660,9 @@ def _create(self): else: self.pg_num = self.get_pgs(self.replicas, self.percent_data) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - if self.nautilus_or_later: + if nautilus_or_later: cmd = [ 'ceph', '--id', self.service, 'osd', 'pool', 'create', '--pg-num-min={}'.format( @@ -745,9 +746,9 @@ def _create(self): k = int(erasure_profile['k']) m = int(erasure_profile['m']) pgs = self.get_pgs(k + m, self.percent_data) - self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - if self.nautilus_or_later: + if nautilus_or_later: cmd = [ 'ceph', '--id', self.service, 'osd', 'pool', 'create', '--pg-num-min={}'.format( diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index e710c0e0..0906c5c0 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -29,6 +29,7 @@ 'groovy', 'hirsute', 'impish', + 'jammy', ) diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 6c7cf6fc..cf8328f0 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -275,6 +275,7 @@ ('groovy', 'victoria'), ('hirsute', 'wallaby'), ('impish', 'xena'), + ('jammy', 'yoga'), ]) diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 9b7299dd..025ab866 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -3169,6 +3169,8 @@ def dirs_need_ownership_update(service): ('luminous', 'mimic'), ('mimic', 'nautilus'), ('nautilus', 'octopus'), + ('octopus', 'pacific'), + ('pacific', 'quincy'), ]) # Map UCA codenames to ceph codenames @@ -3186,6 +3188,10 @@ def dirs_need_ownership_update(service): 'stein': 'mimic', 'train': 'nautilus', 'ussuri': 'octopus', + 'victoria': 'octopus', + 'wallaby': 'pacific', + 'xena': 'pacific', + 'yoga': 'quincy', } diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 530b7672..7069f780 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -11,7 +11,6 @@ tags: - file-servers - misc series: -- xenial - bionic - focal - groovy diff --git a/ceph-osd/osci.yaml b/ceph-osd/osci.yaml index 4e322d5e..0b2a0aa5 100644 --- a/ceph-osd/osci.yaml +++ b/ceph-osd/osci.yaml @@ -1,10 +1,10 @@ - project: templates: - - charm-unit-jobs - - charm-functional-jobs - check: - jobs: - - focal-xena: - voting: false - - impish-xena: - voting: false + - charm-yoga-unit-jobs + - charm-yoga-functional-jobs + - charm-xena-functional-jobs + - charm-wallaby-functional-jobs + - charm-victoria-functional-jobs + - charm-ussuri-functional-jobs + - charm-stein-functional-jobs + - charm-queens-functional-jobs diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index dba2c767..f853625d 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -7,6 +7,8 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # +pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 requests>=2.18.4 diff --git a/ceph-osd/tests/bundles/focal-yoga.yaml b/ceph-osd/tests/bundles/focal-yoga.yaml new file mode 100644 index 00000000..7beef444 --- /dev/null +++ b/ceph-osd/tests/bundles/focal-yoga.yaml @@ -0,0 +1,222 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-yoga + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-osd: + charm: ../../../ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + cinder: + expose: True + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: 'None' + glance-api-version: '2' + to: + - '13' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'cinder-ceph:ceph-access' + - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/bundles/groovy-victoria.yaml b/ceph-osd/tests/bundles/jammy-yoga.yaml similarity index 99% rename from ceph-osd/tests/bundles/groovy-victoria.yaml rename to ceph-osd/tests/bundles/jammy-yoga.yaml index 1d1b1ffb..5a6895cc 100644 --- a/ceph-osd/tests/bundles/groovy-victoria.yaml +++ b/ceph-osd/tests/bundles/jammy-yoga.yaml @@ -1,7 +1,7 @@ variables: openstack-origin: &openstack-origin distro -series: groovy +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' diff --git a/ceph-osd/tests/bundles/trusty-mitaka.yaml b/ceph-osd/tests/bundles/trusty-mitaka.yaml deleted file mode 100644 index 555895d3..00000000 --- a/ceph-osd/tests/bundles/trusty-mitaka.yaml +++ /dev/null @@ -1,142 +0,0 @@ -series: trusty -applications: - ceph-osd: - charm: ../../../ceph-osd - num_units: 3 - series: trusty - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - source: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - percona-cluster: - charm: cs:trusty/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:trusty-mitaka - # workaround while awaiting release of next version of python-libjuju with - # model-constraints support - constraints: - virt-type=kvm -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - cinder-ceph:ceph-access - - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/xenial-mitaka.yaml b/ceph-osd/tests/bundles/xenial-mitaka.yaml deleted file mode 100644 index 4500ce95..00000000 --- a/ceph-osd/tests/bundles/xenial-mitaka.yaml +++ /dev/null @@ -1,92 +0,0 @@ -series: xenial -applications: - ceph-osd: - charm: ../../../ceph-osd - num_units: 3 - series: xenial - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - cinder-ceph:ceph-access - - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/xenial-ocata.yaml b/ceph-osd/tests/bundles/xenial-ocata.yaml deleted file mode 100644 index ef2bfb54..00000000 --- a/ceph-osd/tests/bundles/xenial-ocata.yaml +++ /dev/null @@ -1,106 +0,0 @@ -series: xenial -applications: - ceph-osd: - charm: ../../../ceph-osd - num_units: 3 - series: xenial - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:xenial-ocata - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - source: cloud:xenial-ocata - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:xenial-ocata - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:xenial-ocata - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:xenial-ocata - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:xenial-ocata - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:xenial-ocata - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:xenial-ocata - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:xenial-ocata -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - cinder-ceph:ceph-access - - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/xenial-pike.yaml b/ceph-osd/tests/bundles/xenial-pike.yaml deleted file mode 100644 index 2874cfe4..00000000 --- a/ceph-osd/tests/bundles/xenial-pike.yaml +++ /dev/null @@ -1,106 +0,0 @@ -series: xenial -applications: - ceph-osd: - charm: ../../../ceph-osd - num_units: 3 - series: xenial - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:xenial-pike - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - source: cloud:xenial-pike - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:xenial-pike - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:xenial-pike - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:xenial-pike - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:xenial-pike - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:xenial-pike - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:xenial-pike - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:xenial-pike -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - cinder-ceph:ceph-access - - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/xenial-queens.yaml b/ceph-osd/tests/bundles/xenial-queens.yaml deleted file mode 100644 index 8c5a762a..00000000 --- a/ceph-osd/tests/bundles/xenial-queens.yaml +++ /dev/null @@ -1,106 +0,0 @@ -series: xenial -applications: - ceph-osd: - charm: ../../../ceph-osd - num_units: 3 - series: xenial - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:xenial-queens - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - source: cloud:xenial-queens - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:xenial-queens - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:xenial-queens - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:xenial-queens - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:xenial-queens - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:xenial-queens - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:xenial-queens - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:xenial-queens -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - cinder-ceph:ceph-access - - nova-compute:ceph-access diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 1938e14b..de4d04a8 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,28 +1,24 @@ charm_name: ceph-osd gate_bundles: - - groovy-victoria - - focal-xena - - focal-wallaby - - focal-victoria - - focal-ussuri - - bionic-ussuri - - bionic-train - - bionic-stein - bionic-queens - - xenial-mitaka + - bionic-stein + - bionic-ussuri + - focal-ussuri + - focal-victoria + - focal-wallaby + - focal-xena + - hirsute-wallaby + - impish-xena dev_bundles: - - trusty-mitaka - - xenial-ocata - - xenial-pike - - xenial-queens - bionic-rocky - - hirsute-wallaby - - impish-xena + - bionic-train + - focal-yoga + - jammy-yoga smoke_bundles: - - bionic-train + - focal-ussuri configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image @@ -36,7 +32,6 @@ tests: tests_options: force_deploy: - - trusty-mitaka - - groovy-victoria - hirsute-wallaby - impish-xena + - jammy-yoga diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index ba4fd5b6..86d1e904 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -61,6 +61,11 @@ basepython = python3.8 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py39] +basepython = python3.9 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt From dba51c14c928730bc12f1bd244f18b76d0ec22d3 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Wed, 17 Nov 2021 14:26:52 -0500 Subject: [PATCH 2295/2699] Switch to yoga unit jobs and add py39 Change-Id: I94a2ecc844a5aff7b4bc40dc62699b5098618f92 --- ceph-rbd-mirror/osci.yaml | 5 ++--- ceph-rbd-mirror/tox.ini | 5 +++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/ceph-rbd-mirror/osci.yaml b/ceph-rbd-mirror/osci.yaml index 1713f491..12d7f88c 100644 --- a/ceph-rbd-mirror/osci.yaml +++ b/ceph-rbd-mirror/osci.yaml @@ -1,6 +1,6 @@ - project: templates: - - charm-unit-jobs + - charm-yoga-unit-jobs check: jobs: - bionic-train_ceph-rbd-mirror @@ -27,10 +27,9 @@ parent: bionic-train dependencies: - osci-lint - - tox-py35 - tox-py36 - - tox-py37 - tox-py38 + - tox-py39 - job: name: bionic-queens_ceph-rbd-mirror parent: bionic-queens diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index 22159df2..faf6092e 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -75,6 +75,11 @@ basepython = python3.8 deps = -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} +[testenv:py39] +basepython = python3.9 +deps = -r{toxinidir}/test-requirements.txt +commands = stestr run --slowest {posargs} + [testenv:pep8] basepython = python3 deps = flake8==3.9.2 From ea0cb60d3abfb73afaaf5a5339f1578a3560b83c Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 29 Oct 2021 17:00:39 -0400 Subject: [PATCH 2296/2699] Add yoga bundles and release-tool syncs * charm-helpers sync for classic charms * sync from release-tools * switch to release-specific zosci functional tests * run focal-ussuri as smoke tests * remove trusty, xenial, and groovy metadata/tests * drop py35 and add py39 * charms.ceph sync Change-Id: I2dda45edafeee8173a9fcb174f3dc18718d664e3 --- .../charmhelpers/contrib/openstack/utils.py | 23 +- .../contrib/storage/linux/ceph.py | 11 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-proxy/charmhelpers/fetch/ubuntu.py | 1 + ceph-proxy/lib/charms_ceph/utils.py | 6 + ceph-proxy/metadata.yaml | 1 - ceph-proxy/osci.yaml | 45 ++-- ceph-proxy/test-requirements.txt | 2 + ceph-proxy/tests/bundles/focal-yoga-ec.yaml | 215 ++++++++++++++++++ ceph-proxy/tests/bundles/focal-yoga.yaml | 186 +++++++++++++++ ...vy-victoria-ec.yaml => jammy-yoga-ec.yaml} | 2 +- .../{groovy-victoria.yaml => jammy-yoga.yaml} | 2 +- ceph-proxy/tests/bundles/trusty-mitaka.yaml | 115 ---------- ceph-proxy/tests/bundles/xenial-mitaka.yaml | 85 ------- ceph-proxy/tests/bundles/xenial-ocata.yaml | 99 -------- ceph-proxy/tests/bundles/xenial-pike.yaml | 99 -------- ceph-proxy/tests/bundles/xenial-queens.yaml | 99 -------- ceph-proxy/tests/tests.yaml | 27 +-- ceph-proxy/tox.ini | 5 + 19 files changed, 482 insertions(+), 542 deletions(-) create mode 100644 ceph-proxy/tests/bundles/focal-yoga-ec.yaml create mode 100644 ceph-proxy/tests/bundles/focal-yoga.yaml rename ceph-proxy/tests/bundles/{groovy-victoria-ec.yaml => jammy-yoga-ec.yaml} (99%) rename ceph-proxy/tests/bundles/{groovy-victoria.yaml => jammy-yoga.yaml} (99%) delete mode 100644 ceph-proxy/tests/bundles/trusty-mitaka.yaml delete mode 100644 ceph-proxy/tests/bundles/xenial-mitaka.yaml delete mode 100644 ceph-proxy/tests/bundles/xenial-ocata.yaml delete mode 100644 ceph-proxy/tests/bundles/xenial-pike.yaml delete mode 100644 ceph-proxy/tests/bundles/xenial-queens.yaml diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index d5d301e6..9cc96d60 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -1413,7 +1413,8 @@ def incomplete_relation_data(configs, required_interfaces): for i in incomplete_relations} -def do_action_openstack_upgrade(package, upgrade_callback, configs): +def do_action_openstack_upgrade(package, upgrade_callback, configs, + force_upgrade=False): """Perform action-managed OpenStack upgrade. Upgrades packages to the configured openstack-origin version and sets @@ -1427,12 +1428,13 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs): @param package: package name for determining if upgrade available @param upgrade_callback: function callback to charm's upgrade function @param configs: templating object derived from OSConfigRenderer class + @param force_upgrade: perform dist-upgrade regardless of new openstack @return: True if upgrade successful; False if upgrade failed or skipped """ ret = False - if openstack_upgrade_available(package): + if openstack_upgrade_available(package) or force_upgrade: if config('action-managed-upgrade'): juju_log('Upgrading OpenStack release') @@ -2599,6 +2601,23 @@ def get_subordinate_release_packages(os_release, package_type='deb'): return SubordinatePackages(install, purge) +def get_subordinate_services(): + """Iterate over subordinate relations and get service information. + + In a similar fashion as with get_subordinate_release_packages(), + principle charms can retrieve a list of services advertised by their + subordinate charms. This is useful to know about subordinate services when + pausing, resuming or upgrading a principle unit. + + :returns: Name of all services advertised by all subordinates + :rtype: Set[str] + """ + services = set() + for rdata in container_scoped_relation_get('services'): + services |= set(json.loads(rdata or '[]')) + return services + + os_restart_on_change = partial( pausable_restart_on_change, can_restart_now_f=deferred_events.check_and_record_restart_request, diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py index 3eb46d70..c70aeb20 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py @@ -294,7 +294,6 @@ def __init__(self, service, name=None, percent_data=None, app_name=None, # NOTE: Do not perform initialization steps that require live data from # a running cluster here. The *Pool classes may be used for validation. self.service = service - self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 self.op = op or {} if op: @@ -341,7 +340,8 @@ def _post_create(self): Do not add calls for a specific pool type here, those should go into one of the pool specific classes. """ - if self.nautilus_or_later: + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + if nautilus_or_later: # Ensure we set the expected pool ratio update_pool( client=self.service, @@ -660,8 +660,9 @@ def _create(self): else: self.pg_num = self.get_pgs(self.replicas, self.percent_data) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - if self.nautilus_or_later: + if nautilus_or_later: cmd = [ 'ceph', '--id', self.service, 'osd', 'pool', 'create', '--pg-num-min={}'.format( @@ -745,9 +746,9 @@ def _create(self): k = int(erasure_profile['k']) m = int(erasure_profile['m']) pgs = self.get_pgs(k + m, self.percent_data) - self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - if self.nautilus_or_later: + if nautilus_or_later: cmd = [ 'ceph', '--id', self.service, 'osd', 'pool', 'create', '--pg-num-min={}'.format( diff --git a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py index e710c0e0..0906c5c0 100644 --- a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py @@ -29,6 +29,7 @@ 'groovy', 'hirsute', 'impish', + 'jammy', ) diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py index 6c7cf6fc..cf8328f0 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -275,6 +275,7 @@ ('groovy', 'victoria'), ('hirsute', 'wallaby'), ('impish', 'xena'), + ('jammy', 'yoga'), ]) diff --git a/ceph-proxy/lib/charms_ceph/utils.py b/ceph-proxy/lib/charms_ceph/utils.py index 9b7299dd..025ab866 100644 --- a/ceph-proxy/lib/charms_ceph/utils.py +++ b/ceph-proxy/lib/charms_ceph/utils.py @@ -3169,6 +3169,8 @@ def dirs_need_ownership_update(service): ('luminous', 'mimic'), ('mimic', 'nautilus'), ('nautilus', 'octopus'), + ('octopus', 'pacific'), + ('pacific', 'quincy'), ]) # Map UCA codenames to ceph codenames @@ -3186,6 +3188,10 @@ def dirs_need_ownership_update(service): 'stein': 'mimic', 'train': 'nautilus', 'ussuri': 'octopus', + 'victoria': 'octopus', + 'wallaby': 'pacific', + 'xena': 'pacific', + 'yoga': 'quincy', } diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 5c25af59..3eeb1b52 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -10,7 +10,6 @@ tags: - file-servers - misc series: -- xenial - bionic - focal - groovy diff --git a/ceph-proxy/osci.yaml b/ceph-proxy/osci.yaml index b70ac138..335b04d7 100644 --- a/ceph-proxy/osci.yaml +++ b/ceph-proxy/osci.yaml @@ -1,31 +1,26 @@ - project: templates: - - charm-unit-jobs + - charm-yoga-unit-jobs + - charm-yoga-functional-jobs + - charm-xena-functional-jobs + - charm-wallaby-functional-jobs + - charm-victoria-functional-jobs + - charm-ussuri-functional-jobs + - charm-stein-functional-jobs + - charm-queens-functional-jobs check: jobs: - - bionic-queens # luminous - - bionic-stein - - bionic-train - - bionic-ussuri - - focal-ussuri - focal-ussuri-ec - - focal-victoria - focal-victoria-ec - - focal-wallaby - focal-wallaby-ec - - focal-xena: - voting: false - - focal-wallaby-ec: + - focal-xena-ec + - focal-yoga-ec: voting: false - - groovy-victoria - - groovy-victoria-ec - - hirsute-wallaby - hirsute-wallaby-ec - - impish-xena: - voting: false - impish-xena-ec: voting: false - - hirsute-wallaby-ec + - jammy-yoga-ec: + voting: false - job: name: focal-ussuri-ec parent: func-target @@ -46,11 +41,17 @@ vars: tox_extra_args: erasure-coded:focal-wallaby-ec - job: - name: groovy-victoria-ec + name: focal-xena-ec parent: func-target dependencies: *smoke-jobs vars: - tox_extra_args: erasure-coded:groovy-victoria-ec + tox_extra_args: erasure-coded:focal-xena-ec +- job: + name: focal-yoga-ec + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: erasure-coded:focal-yoga-ec - job: name: hirsute-wallaby-ec parent: func-target @@ -63,3 +64,9 @@ dependencies: *smoke-jobs vars: tox_extra_args: erasure-coded:impish-xena-ec +- job: + name: jammy-yoga-ec + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: erasure-coded:jammy-yoga-ec diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index dba2c767..f853625d 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -7,6 +7,8 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # +pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 requests>=2.18.4 diff --git a/ceph-proxy/tests/bundles/focal-yoga-ec.yaml b/ceph-proxy/tests/bundles/focal-yoga-ec.yaml new file mode 100644 index 00000000..e60614da --- /dev/null +++ b/ceph-proxy/tests/bundles/focal-yoga-ec.yaml @@ -0,0 +1,215 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-yoga + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + - '16' + - '17' + - '18' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: lrc + ec-profile-locality: 3 + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: jerasure + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: isa + libvirt-image-backend: rbd + to: + - '15' + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:ceph' + - 'ceph-proxy:client' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/focal-yoga.yaml b/ceph-proxy/tests/bundles/focal-yoga.yaml new file mode 100644 index 00000000..f4d6fb6d --- /dev/null +++ b/ceph-proxy/tests/bundles/focal-yoga.yaml @@ -0,0 +1,186 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-yoga + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + cinder-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + + ceph-proxy: + charm: ceph-proxy + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: cs:~openstack-charmers-next/ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + to: + - '10' + + cinder: + charm: cs:~openstack-charmers-next/cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + + cinder-ceph: + charm: cs:~openstack-charmers-next/cinder-ceph + options: + restrict-ceph-pools: True + + keystone: + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + + glance: + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/groovy-victoria-ec.yaml b/ceph-proxy/tests/bundles/jammy-yoga-ec.yaml similarity index 99% rename from ceph-proxy/tests/bundles/groovy-victoria-ec.yaml rename to ceph-proxy/tests/bundles/jammy-yoga-ec.yaml index b0b04d8f..5ea74baa 100644 --- a/ceph-proxy/tests/bundles/groovy-victoria-ec.yaml +++ b/ceph-proxy/tests/bundles/jammy-yoga-ec.yaml @@ -1,7 +1,7 @@ variables: openstack-origin: &openstack-origin distro -series: groovy +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' diff --git a/ceph-proxy/tests/bundles/groovy-victoria.yaml b/ceph-proxy/tests/bundles/jammy-yoga.yaml similarity index 99% rename from ceph-proxy/tests/bundles/groovy-victoria.yaml rename to ceph-proxy/tests/bundles/jammy-yoga.yaml index 74a29970..178f679a 100644 --- a/ceph-proxy/tests/bundles/groovy-victoria.yaml +++ b/ceph-proxy/tests/bundles/jammy-yoga.yaml @@ -1,7 +1,7 @@ variables: openstack-origin: &openstack-origin distro -series: groovy +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' diff --git a/ceph-proxy/tests/bundles/trusty-mitaka.yaml b/ceph-proxy/tests/bundles/trusty-mitaka.yaml deleted file mode 100644 index 7dbef7a8..00000000 --- a/ceph-proxy/tests/bundles/trusty-mitaka.yaml +++ /dev/null @@ -1,115 +0,0 @@ -series: trusty -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:trusty-mitaka - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - options: - source: cloud:trusty-mitaka - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - options: - source: cloud:trusty-mitaka - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - options: - source: trusty-mitaka - cinder: - charm: 'cs:~openstack-charmers-next/cinder' - num_units: 1 - options: - openstack-origin: cloud:trusty-mitaka - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:trusty-mitaka - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:trusty-mitaka - nova-cloud-controller: - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:trusty-mitaka - cinder-ceph: - charm: 'cs:~openstack-charmers-next/cinder-ceph' - options: - restrict-ceph-pools: True - keystone: - charm: 'cs:~openstack-charmers-next/keystone' - num_units: 1 - options: - openstack-origin: cloud:trusty-mitaka - constraints: mem=1024 - percona-cluster: - charm: 'cs:trusty/percona-cluster' - num_units: 1 - options: - source: cloud:trusty-mitaka - dataset-size: 50% - max-connections: 1000 - innodb-buffer-pool-size: 256M - root-password: ChangeMe123 - sst-password: ChangeMe123 - constraints: mem=4096 - rabbitmq-server: - charm: 'cs:~openstack-charmers-next/rabbitmq-server' - num_units: 1 - constraints: mem=1024 - options: - source: cloud:trusty-mitaka -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - 'cinder:shared-db' - - 'percona-cluster:shared-db' - - - 'keystone:shared-db' - - 'percona-cluster:shared-db' - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - 'glance:image-service' - - 'nova-compute:image-service' - - - 'glance:identity-service' - - 'keystone:identity-service' - - - 'glance:shared-db' - - 'percona-cluster:shared-db' - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - 'glance:image-service' - - 'nova-cloud-controller:image-service' - - - 'keystone:identity-service' - - 'nova-cloud-controller:identity-service' - - - 'nova-compute:cloud-compute' - - 'nova-cloud-controller:cloud-compute' - - - 'percona-cluster:shared-db' - - 'nova-cloud-controller:shared-db' - - - 'rabbitmq-server:amqp' - - 'nova-cloud-controller:amqp' - diff --git a/ceph-proxy/tests/bundles/xenial-mitaka.yaml b/ceph-proxy/tests/bundles/xenial-mitaka.yaml deleted file mode 100644 index 0c4ee4d4..00000000 --- a/ceph-proxy/tests/bundles/xenial-mitaka.yaml +++ /dev/null @@ -1,85 +0,0 @@ -series: xenial -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - options: - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - options: - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - options: - cinder: - charm: 'cs:~openstack-charmers-next/cinder' - num_units: 1 - options: - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - cinder-ceph: - charm: 'cs:~openstack-charmers-next/cinder-ceph' - options: - restrict-ceph-pools: True - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - keystone: - charm: 'cs:~openstack-charmers-next/keystone' - num_units: 1 - constraints: mem=1024 - percona-cluster: - charm: 'cs:~openstack-charmers-next/percona-cluster' - num_units: 1 - options: - dataset-size: 50% - max-connections: 1000 - innodb-buffer-pool-size: 256M - root-password: ChangeMe123 - sst-password: ChangeMe123 - constraints: mem=4096 - rabbitmq-server: - charm: 'cs:~openstack-charmers-next/rabbitmq-server' - num_units: 1 - constraints: mem=1024 -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - 'cinder:shared-db' - - 'percona-cluster:shared-db' - - - 'keystone:shared-db' - - 'percona-cluster:shared-db' - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - 'glance:image-service' - - 'nova-compute:image-service' - - - 'glance:identity-service' - - 'keystone:identity-service' - - - 'glance:shared-db' - - 'percona-cluster:shared-db' - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/xenial-ocata.yaml b/ceph-proxy/tests/bundles/xenial-ocata.yaml deleted file mode 100644 index d7aa8bd7..00000000 --- a/ceph-proxy/tests/bundles/xenial-ocata.yaml +++ /dev/null @@ -1,99 +0,0 @@ -series: xenial -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:xenial-ocata - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - options: - source: cloud:xenial-ocata - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - options: - source: cloud:xenial-ocata - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - options: - source: xenial-ocata - cinder: - charm: 'cs:~openstack-charmers-next/cinder' - num_units: 1 - options: - openstack-origin: cloud:xenial-ocata - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - cinder-ceph: - charm: 'cs:~openstack-charmers-next/cinder-ceph' - options: - restrict-ceph-pools: True - keystone: - charm: 'cs:~openstack-charmers-next/keystone' - num_units: 1 - options: - openstack-origin: cloud:xenial-ocata - constraints: mem=1024 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:xenial-ocata - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:xenial-ocata - percona-cluster: - charm: 'cs:~openstack-charmers-next/percona-cluster' - num_units: 1 - options: - source: cloud:xenial-ocata - dataset-size: 50% - max-connections: 1000 - innodb-buffer-pool-size: 256M - root-password: ChangeMe123 - sst-password: ChangeMe123 - constraints: mem=4096 - rabbitmq-server: - charm: 'cs:~openstack-charmers-next/rabbitmq-server' - num_units: 1 - constraints: mem=1024 - options: - source: cloud:xenial-ocata -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - 'cinder:shared-db' - - 'percona-cluster:shared-db' - - - 'keystone:shared-db' - - 'percona-cluster:shared-db' - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - 'glance:image-service' - - 'nova-compute:image-service' - - - 'glance:identity-service' - - 'keystone:identity-service' - - - 'glance:shared-db' - - 'percona-cluster:shared-db' - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/xenial-pike.yaml b/ceph-proxy/tests/bundles/xenial-pike.yaml deleted file mode 100644 index 0c48f986..00000000 --- a/ceph-proxy/tests/bundles/xenial-pike.yaml +++ /dev/null @@ -1,99 +0,0 @@ -series: bionic -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:xenial-pike - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - options: - source: cloud:xenial-pike - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - options: - source: cloud:xenial-pike - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - options: - source: xenial-pike - cinder: - charm: 'cs:~openstack-charmers-next/cinder' - num_units: 1 - options: - openstack-origin: cloud:xenial-pike - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - cinder-ceph: - charm: 'cs:~openstack-charmers-next/cinder-ceph' - options: - restrict-ceph-pools: True - keystone: - charm: 'cs:~openstack-charmers-next/keystone' - num_units: 1 - options: - openstack-origin: cloud:xenial-pike - constraints: mem=1024 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:xenial-pike - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:xenial-pike - percona-cluster: - charm: 'cs:~openstack-charmers-next/percona-cluster' - num_units: 1 - options: - source: cloud:xenial-pike - dataset-size: 50% - max-connections: 1000 - innodb-buffer-pool-size: 256M - root-password: ChangeMe123 - sst-password: ChangeMe123 - constraints: mem=4096 - rabbitmq-server: - charm: 'cs:~openstack-charmers-next/rabbitmq-server' - num_units: 1 - constraints: mem=1024 - options: - source: cloud:xenial-pike -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - 'cinder:shared-db' - - 'percona-cluster:shared-db' - - - 'keystone:shared-db' - - 'percona-cluster:shared-db' - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - 'glance:image-service' - - 'nova-compute:image-service' - - - 'glance:identity-service' - - 'keystone:identity-service' - - - 'glance:shared-db' - - 'percona-cluster:shared-db' - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/xenial-queens.yaml b/ceph-proxy/tests/bundles/xenial-queens.yaml deleted file mode 100644 index 9fd00d56..00000000 --- a/ceph-proxy/tests/bundles/xenial-queens.yaml +++ /dev/null @@ -1,99 +0,0 @@ -series: xenial -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:xenial-queens - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - options: - source: cloud:xenial-queens - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - options: - source: cloud:xenial-queens - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - options: - source: cloud:xenial-queens - cinder: - charm: 'cs:~openstack-charmers-next/cinder' - num_units: 1 - options: - openstack-origin: cloud:xenial-queens - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - cinder-ceph: - charm: 'cs:~openstack-charmers-next/cinder-ceph' - options: - restrict-ceph-pools: True - keystone: - charm: 'cs:~openstack-charmers-next/keystone' - num_units: 1 - options: - openstack-origin: cloud:xenial-queens - constraints: mem=1024 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:xenial-queens - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:xenial-queens - percona-cluster: - charm: 'cs:~openstack-charmers-next/percona-cluster' - num_units: 1 - options: - source: cloud:xenial-queens - dataset-size: 50% - max-connections: 1000 - innodb-buffer-pool-size: 256M - root-password: ChangeMe123 - sst-password: ChangeMe123 - constraints: mem=4096 - rabbitmq-server: - charm: 'cs:~openstack-charmers-next/rabbitmq-server' - num_units: 1 - constraints: mem=1024 - options: - source: cloud:xenial-queens -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - 'cinder:shared-db' - - 'percona-cluster:shared-db' - - - 'keystone:shared-db' - - 'percona-cluster:shared-db' - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - 'glance:image-service' - - 'nova-compute:image-service' - - - 'glance:identity-service' - - 'keystone:identity-service' - - - 'glance:shared-db' - - 'percona-cluster:shared-db' - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index a2220a87..cefa87df 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -12,10 +12,8 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes gate_bundles: - - xenial-mitaka # jewel - - bionic-queens # luminous + - bionic-queens - bionic-stein - - bionic-train - bionic-ussuri - focal-ussuri - erasure-coded: focal-ussuri-ec @@ -25,24 +23,19 @@ gate_bundles: - erasure-coded: focal-wallaby-ec - focal-xena - erasure-coded: focal-xena-ec - - groovy-victoria - - erasure-coded: groovy-victoria-ec - -dev_bundles: - # Icehouse - - trusty-icehouse - # Jewel - - trusty-mitaka - - xenial-ocata - # Pike - - xenial-pike - - xenial-queens # luminous - - bionic-rocky # mimic - hirsute-wallaby - erasure-coded: hirsute-wallaby-ec - impish-xena - erasure-coded: impish-xena-ec +dev_bundles: + - bionic-rocky # mimic + - bionic-train + - focal-yoga + - erasure-coded: focal-yoga-ec + - jammy-yoga + - erasure-coded: jammy-yoga-ec + smoke_bundles: - focal-ussuri @@ -75,3 +68,5 @@ tests_options: - hirsute-wallaby-ec - impish-xena - impish-xena-ec + - jammy-yoga + - jammy-yoga-ec diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index ba4fd5b6..86d1e904 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -61,6 +61,11 @@ basepython = python3.8 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py39] +basepython = python3.9 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt From 21aae90e70317365a1f2fa56026b6174b7fbcc83 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 29 Oct 2021 17:00:38 -0400 Subject: [PATCH 2297/2699] Add yoga bundles and release-tool syncs * charm-helpers sync for classic charms * sync from release-tools * switch to release-specific zosci functional tests * run focal-ussuri as smoke tests * remove trusty, xenial, and groovy metadata/tests * drop py35 and add py39 * drop bluestore model alias to simplify osci.yaml Change-Id: I1df6bb74fd96d934aa97cce18307a3227b165882 --- ceph-fs/osci.yaml | 105 +-------- ceph-fs/requirements.txt | 1 + ceph-fs/src/metadata.yaml | 1 - ceph-fs/src/tests/bundles/focal-yoga.yaml | 222 ++++++++++++++++++ .../{groovy-victoria.yaml => jammy-yoga.yaml} | 2 +- ceph-fs/src/tests/bundles/xenial-mitaka.yaml | 112 --------- ceph-fs/src/tests/bundles/xenial-ocata.yaml | 127 ---------- ceph-fs/src/tests/bundles/xenial-pike.yaml | 123 ---------- ceph-fs/src/tests/bundles/xenial-queens.yaml | 127 ---------- ceph-fs/src/tests/tests.yaml | 41 ++-- ceph-fs/test-requirements.txt | 2 + ceph-fs/tox.ini | 5 + 12 files changed, 254 insertions(+), 614 deletions(-) create mode 100644 ceph-fs/src/tests/bundles/focal-yoga.yaml rename ceph-fs/src/tests/bundles/{groovy-victoria.yaml => jammy-yoga.yaml} (99%) delete mode 100644 ceph-fs/src/tests/bundles/xenial-mitaka.yaml delete mode 100644 ceph-fs/src/tests/bundles/xenial-ocata.yaml delete mode 100644 ceph-fs/src/tests/bundles/xenial-pike.yaml delete mode 100644 ceph-fs/src/tests/bundles/xenial-queens.yaml diff --git a/ceph-fs/osci.yaml b/ceph-fs/osci.yaml index 683aa103..6569e1e2 100644 --- a/ceph-fs/osci.yaml +++ b/ceph-fs/osci.yaml @@ -1,102 +1,13 @@ -- job: - name: bionic-queens_local - parent: bionic-queens - dependencies: - - osci-lint - - tox-py35 - - tox-py36 - - tox-py37 - - tox-py38 -- job: - name: impish-xena-bluestore - parent: func-target - dependencies: &smoke-jobs - - bionic-queens_local - vars: - tox_extra_args: bluestore-compression:impish-xena -- job: - name: hirsute-wallaby-bluestore - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: bluestore-compression:hirsute-wallaby -- job: - name: groovy-victoria-bluestore - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: bluestore-compression:groovy-victoria -- job: - name: xenial-mitaka_local - parent: xenial-mitaka - dependencies: *smoke-jobs -- job: - name: focal-wallaby-bluestore - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: bluestore-compression:focal-wallaby -- job: - name: focal-xena-bluestore - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: bluestore-compression:focal-xena -- job: - name: focal-victoria-bluestore - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: bluestore-compression:focal-victoria -- job: - name: focal-ussuri-bluestore - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: bluestore-compression:focal-ussuri -- job: - name: bionic-ussuri-bluestore - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: bluestore-compression:bionic-ussuri -- job: - name: bionic-train-bluestore - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: bluestore-compression:bionic-train -- job: - name: bionic-stein-bluestore - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: bluestore-compression:bionic-stein - - project: templates: - - charm-unit-jobs - # using overridden, shorter functional list because the charm does - # already in the tests.yaml - # - charm-functional-jobs - check: - jobs: - - bionic-queens_local - # Xenial-pike is missing because of - # https://bugs.launchpad.net/charm-nova-compute/+bug/1862624 - - xenial-mitaka_local - - impish-xena-bluestore: - voting: false - - hirsute-wallaby-bluestore - - groovy-victoria-bluestore - - focal-xena-bluestore: - voting: false - - focal-wallaby-bluestore - - focal-victoria-bluestore - - focal-ussuri-bluestore - - bionic-ussuri-bluestore - - bionic-train-bluestore - - bionic-stein-bluestore + - charm-yoga-unit-jobs + - charm-yoga-functional-jobs + - charm-xena-functional-jobs + - charm-wallaby-functional-jobs + - charm-victoria-functional-jobs + - charm-ussuri-functional-jobs + - charm-stein-functional-jobs + - charm-queens-functional-jobs vars: needs_charm_build: true charm_build_name: ceph-fs diff --git a/ceph-fs/requirements.txt b/ceph-fs/requirements.txt index b786b428..a68620f6 100644 --- a/ceph-fs/requirements.txt +++ b/ceph-fs/requirements.txt @@ -9,6 +9,7 @@ setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 # Build requirements +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. charm-tools==2.8.3 simplejson diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index bebfe94f..4fc58d1d 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -10,7 +10,6 @@ tags: - file-servers - misc series: -- xenial - bionic - focal - groovy diff --git a/ceph-fs/src/tests/bundles/focal-yoga.yaml b/ceph-fs/src/tests/bundles/focal-yoga.yaml new file mode 100644 index 00000000..7e5bf103 --- /dev/null +++ b/ceph-fs/src/tests/bundles/focal-yoga.yaml @@ -0,0 +1,222 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-yoga + +series: &series focal + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + '3': + + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + glance-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + nova-cloud-controller-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + placement-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + neutron-api-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + ceph-fs: + charm: ceph-fs + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + + rabbitmq-server: + charm: cs:~openstack-charmers-next/rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + + glance: + expose: True + charm: cs:~openstack-charmers-next/glance + num_units: 1 + options: + openstack-origin: *openstack-origin + + nova-cloud-controller: + expose: True + charm: cs:~openstack-charmers-next/nova-cloud-controller + num_units: 1 + options: + network-manager: Neutron + openstack-origin: *openstack-origin + + nova-compute: + charm: cs:~openstack-charmers-next/nova-compute + num_units: 2 + constraints: mem=8G + options: + config-flags: default_ephemeral_format=ext4 + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: *openstack-origin + + placement: + charm: cs:~openstack-charmers-next/placement + num_units: 1 + options: + openstack-origin: *openstack-origin + + neutron-api: + charm: cs:~openstack-charmers-next/neutron-api + num_units: 1 + options: + manage-neutron-plugin-legacy-mode: true + neutron-plugin: ovs + flat-network-providers: physnet1 + neutron-security-groups: true + openstack-origin: *openstack-origin + + neutron-openvswitch: + charm: cs:~openstack-charmers-next/neutron-openvswitch + + neutron-gateway: + charm: cs:~openstack-charmers-next/neutron-gateway + num_units: 1 + options: + bridge-mappings: physnet1:br-ex + openstack-origin: *openstack-origin + +relations: + + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'neutron-api:shared-db' + - 'neutron-api-mysql-router:shared-db' + - - 'neutron-api-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' + + - - 'neutron-api:neutron-plugin-api' + - 'neutron-gateway:neutron-plugin-api' + + - - 'neutron-api:identity-service' + - 'keystone:identity-service' + + - - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' + + - - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-openvswitch:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/groovy-victoria.yaml b/ceph-fs/src/tests/bundles/jammy-yoga.yaml similarity index 99% rename from ceph-fs/src/tests/bundles/groovy-victoria.yaml rename to ceph-fs/src/tests/bundles/jammy-yoga.yaml index bdc91e12..35764860 100644 --- a/ceph-fs/src/tests/bundles/groovy-victoria.yaml +++ b/ceph-fs/src/tests/bundles/jammy-yoga.yaml @@ -1,7 +1,7 @@ variables: openstack-origin: &openstack-origin distro -series: &series groovy +series: &series jammy machines: '0': diff --git a/ceph-fs/src/tests/bundles/xenial-mitaka.yaml b/ceph-fs/src/tests/bundles/xenial-mitaka.yaml deleted file mode 100644 index 71486c14..00000000 --- a/ceph-fs/src/tests/bundles/xenial-mitaka.yaml +++ /dev/null @@ -1,112 +0,0 @@ -series: xenial -applications: - ceph-fs: - charm: ceph-fs - series: xenial - num_units: 1 - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - flat-network-providers: physnet1 - neutron-security-groups: true - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex -relations: -- - ceph-mon:mds - - ceph-fs:ceph-mds -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - 'neutron-api:shared-db' - - 'percona-cluster:shared-db' -- - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' -- - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' -- - 'neutron-api:identity-service' - - 'keystone:identity-service' -- - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' -- - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' -- - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/xenial-ocata.yaml b/ceph-fs/src/tests/bundles/xenial-ocata.yaml deleted file mode 100644 index ee645f3e..00000000 --- a/ceph-fs/src/tests/bundles/xenial-ocata.yaml +++ /dev/null @@ -1,127 +0,0 @@ -series: xenial -applications: - ceph-fs: - charm: ceph-fs - series: xenial - num_units: 1 - options: - source: cloud:xenial-ocata - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:xenial-ocata - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: cloud:xenial-ocata - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:xenial-ocata - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:xenial-ocata - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:xenial-ocata - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:xenial-ocata - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: cloud:xenial-ocata - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: cloud:xenial-ocata - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: cloud:xenial-ocata - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: cloud:xenial-ocata -relations: -- - ceph-mon:mds - - ceph-fs:ceph-mds -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - 'neutron-api:shared-db' - - 'percona-cluster:shared-db' -- - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' -- - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' -- - 'neutron-api:identity-service' - - 'keystone:identity-service' -- - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' -- - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' -- - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/xenial-pike.yaml b/ceph-fs/src/tests/bundles/xenial-pike.yaml deleted file mode 100644 index da172d31..00000000 --- a/ceph-fs/src/tests/bundles/xenial-pike.yaml +++ /dev/null @@ -1,123 +0,0 @@ -series: xenial -applications: - ceph-fs: - charm: ceph-fs - series: xenial - num_units: 1 - options: - source: cloud:xenial-pike - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:xenial-pike - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: cloud:xenial-pike - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:xenial-pike - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:xenial-pike - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:xenial-pike - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:xenial-pike - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: cloud:xenial-pike - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G root-disk=20G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: cloud:xenial-pike - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: cloud:xenial-pike - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: cloud:xenial-pike -relations: -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-mon:mds - - ceph-fs:ceph-mds -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - 'neutron-api:shared-db' - - 'percona-cluster:shared-db' -- - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' -- - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' -- - 'neutron-api:identity-service' - - 'keystone:identity-service' -- - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' -- - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' -- - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/xenial-queens.yaml b/ceph-fs/src/tests/bundles/xenial-queens.yaml deleted file mode 100644 index dc0cc9bc..00000000 --- a/ceph-fs/src/tests/bundles/xenial-queens.yaml +++ /dev/null @@ -1,127 +0,0 @@ -series: xenial -applications: - ceph-fs: - charm: ceph-fs - series: xenial - num_units: 1 - options: - source: cloud:xenial-queens - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:xenial-queens - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: cloud:xenial-queens - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:xenial-queens - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:xenial-queens - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:xenial-queens - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:xenial-queens - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: cloud:xenial-queens - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: cloud:xenial-queens - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: cloud:xenial-queens - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: cloud:xenial-queens -relations: -- - ceph-mon:mds - - ceph-fs:ceph-mds -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - 'neutron-api:shared-db' - - 'percona-cluster:shared-db' -- - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' -- - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' -- - 'neutron-api:identity-service' - - 'keystone:identity-service' -- - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' -- - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' -- - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' \ No newline at end of file diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index e38bbe87..ba64ac30 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -1,44 +1,33 @@ charm_name: ceph-fs gate_bundles: - - bluestore-compression: groovy-victoria - - bluestore-compression: focal-wallaby - - bluestore-compression: focal-victoria - - bluestore-compression: focal-ussuri - - bluestore-compression: bionic-ussuri - - bluestore-compression: bionic-train - - bluestore-compression: bionic-stein - - bluestore-compression: bionic-rocky - bionic-queens - - xenial-mitaka + - bionic-stein + - bionic-ussuri + - focal-ussuri + - focal-victoria + - focal-wallaby + - focal-xena + - hirsute-wallaby + - impish-wallaby dev_bundles: - - bluestore-compression: impish-xena - - bluestore-compression: hirsute-wallaby - - xenial-ocata - # Xenial-pike is missing because of - # https://bugs.launchpad.net/charm-nova-compute/+bug/1862624 - - xenial-queens + - bionic-rocky + - bionic-train + - focal-yoga + - jammy-yoga smoke_bundles: - - bluestore-compression: focal-ussuri + - focal-ussuri configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network - zaza.openstack.charm_tests.nova.setup.create_flavors - zaza.openstack.charm_tests.nova.setup.manage_ssh_key - zaza.openstack.charm_tests.keystone.setup.add_demo_user - - bluestore-compression: - - zaza.openstack.charm_tests.glance.setup.add_lts_image - - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network - - zaza.openstack.charm_tests.nova.setup.create_flavors - - zaza.openstack.charm_tests.nova.setup.manage_ssh_key - - zaza.openstack.charm_tests.keystone.setup.add_demo_user tests: - zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests - zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest - - bluestore-compression: - - zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests - - zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest - - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation + - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation tests_options: force_deploy: - hirsute-wallaby - impish-xena + - jammy-yoga diff --git a/ceph-fs/test-requirements.txt b/ceph-fs/test-requirements.txt index af069e1b..bb1307f5 100644 --- a/ceph-fs/test-requirements.txt +++ b/ceph-fs/test-requirements.txt @@ -3,6 +3,8 @@ # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools # +pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 stestr>=2.2.0 diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 22159df2..faf6092e 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -75,6 +75,11 @@ basepython = python3.8 deps = -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} +[testenv:py39] +basepython = python3.9 +deps = -r{toxinidir}/test-requirements.txt +commands = stestr run --slowest {posargs} + [testenv:pep8] basepython = python3 deps = flake8==3.9.2 From 5e41b9426b23cc3b99a772b112a0209c1f85be13 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Fri, 19 Nov 2021 17:17:34 -0500 Subject: [PATCH 2298/2699] Use openstack-charmers namespace when deploying The charm will not deploy without specifying the 'openstack-charmers' namespace as the charm is not promulgated at this time. It would be good for this change to be backported. Change-Id: I930e37d99f00b2b57cd644ca24d0f9b57bdfc66f --- ceph-dashboard/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-dashboard/README.md b/ceph-dashboard/README.md index 96123f44..e8287784 100644 --- a/ceph-dashboard/README.md +++ b/ceph-dashboard/README.md @@ -41,9 +41,9 @@ passwords can be used. These options are ignored unless We are assuming a pre-existing Ceph cluster. -Deploy the ceph-dashboard as a subordinate to the ceph-mon charm. +Deploy ceph-dashboard as a subordinate to the ceph-mon charm: - juju deploy ceph-dashboard + juju deploy cs:~openstack-charmers/ceph-dashboard juju add-relation ceph-dashboard:dashboard ceph-mon:dashboard TLS is a requirement for this charm. Enable it by adding a relation to the From 66be55d9777c8ad05521719f88b382f7d6055d07 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 29 Oct 2021 17:00:39 -0400 Subject: [PATCH 2299/2699] Add yoga bundles and release-tool syncs * charm-helpers sync for classic charms * sync from release-tools * switch to release-specific zosci functional tests * run focal-ussuri as smoke tests * remove trusty, xenial, and groovy metadata/tests * drop py35 and add py39 * charms.ceph sync Change-Id: I8b0ac822cdf37d70ac39f1b115f95a448afb624d --- .../charmhelpers/contrib/openstack/utils.py | 23 +++- .../contrib/storage/linux/ceph.py | 11 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + .../hooks/charmhelpers/fetch/ubuntu.py | 1 + ceph-radosgw/lib/charms_ceph/utils.py | 6 + ceph-radosgw/metadata.yaml | 1 - ceph-radosgw/osci.yaml | 97 ++++++++------- ceph-radosgw/test-requirements.txt | 2 + .../tests/bundles/focal-yoga-namespaced.yaml | 117 ++++++++++++++++++ ceph-radosgw/tests/bundles/focal-yoga.yaml | 116 +++++++++++++++++ ...spaced.yaml => jammy-yoga-namespaced.yaml} | 2 +- .../{groovy-victoria.yaml => jammy-yoga.yaml} | 2 +- ceph-radosgw/tests/bundles/trusty-mitaka.yaml | 42 ------- .../bundles/xenial-mitaka-namespaced.yaml | 43 ------- ceph-radosgw/tests/bundles/xenial-mitaka.yaml | 42 ------- ceph-radosgw/tests/bundles/xenial-ocata.yaml | 42 ------- ceph-radosgw/tests/bundles/xenial-pike.yaml | 42 ------- ceph-radosgw/tests/bundles/xenial-queens.yaml | 51 -------- ceph-radosgw/tests/tests.yaml | 54 ++++---- ceph-radosgw/tox.ini | 5 + 20 files changed, 352 insertions(+), 348 deletions(-) create mode 100644 ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml create mode 100644 ceph-radosgw/tests/bundles/focal-yoga.yaml rename ceph-radosgw/tests/bundles/{groovy-victoria-namespaced.yaml => jammy-yoga-namespaced.yaml} (99%) rename ceph-radosgw/tests/bundles/{groovy-victoria.yaml => jammy-yoga.yaml} (99%) delete mode 100644 ceph-radosgw/tests/bundles/trusty-mitaka.yaml delete mode 100644 ceph-radosgw/tests/bundles/xenial-mitaka-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/xenial-mitaka.yaml delete mode 100644 ceph-radosgw/tests/bundles/xenial-ocata.yaml delete mode 100644 ceph-radosgw/tests/bundles/xenial-pike.yaml delete mode 100644 ceph-radosgw/tests/bundles/xenial-queens.yaml diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index d5d301e6..9cc96d60 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -1413,7 +1413,8 @@ def incomplete_relation_data(configs, required_interfaces): for i in incomplete_relations} -def do_action_openstack_upgrade(package, upgrade_callback, configs): +def do_action_openstack_upgrade(package, upgrade_callback, configs, + force_upgrade=False): """Perform action-managed OpenStack upgrade. Upgrades packages to the configured openstack-origin version and sets @@ -1427,12 +1428,13 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs): @param package: package name for determining if upgrade available @param upgrade_callback: function callback to charm's upgrade function @param configs: templating object derived from OSConfigRenderer class + @param force_upgrade: perform dist-upgrade regardless of new openstack @return: True if upgrade successful; False if upgrade failed or skipped """ ret = False - if openstack_upgrade_available(package): + if openstack_upgrade_available(package) or force_upgrade: if config('action-managed-upgrade'): juju_log('Upgrading OpenStack release') @@ -2599,6 +2601,23 @@ def get_subordinate_release_packages(os_release, package_type='deb'): return SubordinatePackages(install, purge) +def get_subordinate_services(): + """Iterate over subordinate relations and get service information. + + In a similar fashion as with get_subordinate_release_packages(), + principle charms can retrieve a list of services advertised by their + subordinate charms. This is useful to know about subordinate services when + pausing, resuming or upgrading a principle unit. + + :returns: Name of all services advertised by all subordinates + :rtype: Set[str] + """ + services = set() + for rdata in container_scoped_relation_get('services'): + services |= set(json.loads(rdata or '[]')) + return services + + os_restart_on_change = partial( pausable_restart_on_change, can_restart_now_f=deferred_events.check_and_record_restart_request, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 3eb46d70..c70aeb20 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -294,7 +294,6 @@ def __init__(self, service, name=None, percent_data=None, app_name=None, # NOTE: Do not perform initialization steps that require live data from # a running cluster here. The *Pool classes may be used for validation. self.service = service - self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 self.op = op or {} if op: @@ -341,7 +340,8 @@ def _post_create(self): Do not add calls for a specific pool type here, those should go into one of the pool specific classes. """ - if self.nautilus_or_later: + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + if nautilus_or_later: # Ensure we set the expected pool ratio update_pool( client=self.service, @@ -660,8 +660,9 @@ def _create(self): else: self.pg_num = self.get_pgs(self.replicas, self.percent_data) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - if self.nautilus_or_later: + if nautilus_or_later: cmd = [ 'ceph', '--id', self.service, 'osd', 'pool', 'create', '--pg-num-min={}'.format( @@ -745,9 +746,9 @@ def _create(self): k = int(erasure_profile['k']) m = int(erasure_profile['m']) pgs = self.get_pgs(k + m, self.percent_data) - self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - if self.nautilus_or_later: + if nautilus_or_later: cmd = [ 'ceph', '--id', self.service, 'osd', 'pool', 'create', '--pg-num-min={}'.format( diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index e710c0e0..0906c5c0 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -29,6 +29,7 @@ 'groovy', 'hirsute', 'impish', + 'jammy', ) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 6c7cf6fc..cf8328f0 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -275,6 +275,7 @@ ('groovy', 'victoria'), ('hirsute', 'wallaby'), ('impish', 'xena'), + ('jammy', 'yoga'), ]) diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index 9b7299dd..025ab866 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -3169,6 +3169,8 @@ def dirs_need_ownership_update(service): ('luminous', 'mimic'), ('mimic', 'nautilus'), ('nautilus', 'octopus'), + ('octopus', 'pacific'), + ('pacific', 'quincy'), ]) # Map UCA codenames to ceph codenames @@ -3186,6 +3188,10 @@ def dirs_need_ownership_update(service): 'stein': 'mimic', 'train': 'nautilus', 'ussuri': 'octopus', + 'victoria': 'octopus', + 'wallaby': 'pacific', + 'xena': 'pacific', + 'yoga': 'quincy', } diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 4d3b216b..4399dcc1 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -13,7 +13,6 @@ tags: - file-servers - misc series: -- xenial - bionic - focal - groovy diff --git a/ceph-radosgw/osci.yaml b/ceph-radosgw/osci.yaml index cc369b42..046cdf01 100644 --- a/ceph-radosgw/osci.yaml +++ b/ceph-radosgw/osci.yaml @@ -1,52 +1,65 @@ - project: templates: - - charm-unit-jobs + - charm-yoga-unit-jobs check: jobs: - - vault-impish-xena_rgw: + - vault-bionic-queens + - vault-bionic-queens-namespaced + - vault-bionic-stein + - vault-bionic-stein-namespaced + - vault-bionic-ussuri + - vault-bionic-ussuri-namespaced + - vault-focal-ussuri-ec + - vault-focal-ussuri_rgw + - vault-focal-ussuri-namespaced + - vault-focal-victoria_rgw + - vault-focal-victoria-namespaced + - vault-focal-wallaby_rgw + - vault-focal-wallaby-namespaced + - vault-focal-xena_rgw + - vault-focal-xena-namespaced + - vault-focal-yoga_rgw: voting: false - - vault-impish-xena-namespaced: + - vault-focal-yoga-namespaced: voting: false - vault-hirsute-wallaby_rgw - vault-hirsute-wallaby-namespaced - - vault-focal-xena_rgw: + - vault-impish-xena_rgw: voting: false - - vault-focal-xena-namespaced: + - vault-impish-xena-namespaced: + voting: false + - vault-jammy-yoga_rgw: + voting: false + - vault-jammy-yoga-namespaced: voting: false - - vault-focal-wallaby_rgw - - vault-focal-wallaby-namespaced - - vault-focal-victoria_rgw - - vault-focal-victoria-namespaced - - vault-focal-ussuri-ec - - vault-focal-ussuri_rgw - - vault-focal-ussuri-namespaced - - vault-bionic-ussuri - - vault-bionic-ussuri-namespaced - - vault-bionic-train - - vault-bionic-train-namespaced - - vault-bionic-stein - - vault-bionic-stein-namespaced - - vault-bionic-queens - - vault-bionic-queens-namespaced - - xenial-mitaka_rgw - - xenial-mitaka-namespaced - job: name: vault-bionic-ussuri parent: func-target dependencies: - osci-lint - - tox-py35 - tox-py36 - - tox-py37 - tox-py38 + - tox-py39 vars: tox_extra_args: vault:bionic-ussuri - job: - name: vault-impish-xena_rgw + name: vault-jammy-yoga_rgw parent: func-target dependencies: &smoke-jobs - vault-bionic-ussuri + vars: + tox_extra_args: vault:jammy-yoga +- job: + name: vault-jammy-yoga-namespaced + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:jammy-yoga-namespaced +- job: + name: vault-impish-xena_rgw + parent: func-target + dependencies: *smoke-jobs vars: tox_extra_args: vault:impish-xena - job: @@ -67,6 +80,18 @@ dependencies: *smoke-jobs vars: tox_extra_args: vault:hirsute-wallaby-namespaced +- job: + name: vault-focal-yoga_rgw + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:focal-yoga +- job: + name: vault-focal-yoga-namespaced + parent: func-target + dependencies: *smoke-jobs + vars: + tox_extra_args: vault:focal-yoga-namespaced - job: name: vault-focal-xena_rgw parent: func-target @@ -127,18 +152,6 @@ dependencies: *smoke-jobs vars: tox_extra_args: vault:bionic-ussuri-namespaced -- job: - name: vault-bionic-train - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:bionic-train -- job: - name: vault-bionic-train-namespaced - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:bionic-train-namespaced - job: name: vault-bionic-stein parent: func-target @@ -163,13 +176,3 @@ dependencies: *smoke-jobs vars: tox_extra_args: vault:bionic-queens-namespaced -- job: - name: xenial-mitaka_rgw - parent: xenial-mitaka - dependencies: *smoke-jobs -- job: - name: xenial-mitaka-namespaced - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: xenial-mitaka-namespaced diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index dba2c767..f853625d 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -7,6 +7,8 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # +pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 requests>=2.18.4 diff --git a/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml new file mode 100644 index 00000000..61d3ad80 --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml @@ -0,0 +1,117 @@ +options: + source: &source cloud:focal-yoga + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + namespace-tenants: True + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/focal-yoga.yaml b/ceph-radosgw/tests/bundles/focal-yoga.yaml new file mode 100644 index 00000000..b39bec22 --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-yoga.yaml @@ -0,0 +1,116 @@ +options: + source: &source cloud:focal-yoga + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + mysql-innodb-cluster: + charm: cs:~openstack-charmers-next/mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + + ceph-radosgw: + charm: ceph-radosgw + num_units: 1 + options: + source: *source + to: + - '3' + + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + + keystone: + expose: True + charm: cs:~openstack-charmers-next/keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + + vault-mysql-router: + charm: cs:~openstack-charmers-next/mysql-router + + vault: + charm: cs:~openstack-charmers-next/vault + num_units: 1 + to: + - '11' + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/groovy-victoria-namespaced.yaml b/ceph-radosgw/tests/bundles/jammy-yoga-namespaced.yaml similarity index 99% rename from ceph-radosgw/tests/bundles/groovy-victoria-namespaced.yaml rename to ceph-radosgw/tests/bundles/jammy-yoga-namespaced.yaml index 33cf983b..078cd2b7 100644 --- a/ceph-radosgw/tests/bundles/groovy-victoria-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/jammy-yoga-namespaced.yaml @@ -1,7 +1,7 @@ options: source: &source distro -series: groovy +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' diff --git a/ceph-radosgw/tests/bundles/groovy-victoria.yaml b/ceph-radosgw/tests/bundles/jammy-yoga.yaml similarity index 99% rename from ceph-radosgw/tests/bundles/groovy-victoria.yaml rename to ceph-radosgw/tests/bundles/jammy-yoga.yaml index b7eaa117..adbf5831 100644 --- a/ceph-radosgw/tests/bundles/groovy-victoria.yaml +++ b/ceph-radosgw/tests/bundles/jammy-yoga.yaml @@ -1,7 +1,7 @@ options: source: &source distro -series: groovy +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' diff --git a/ceph-radosgw/tests/bundles/trusty-mitaka.yaml b/ceph-radosgw/tests/bundles/trusty-mitaka.yaml deleted file mode 100644 index b56a0129..00000000 --- a/ceph-radosgw/tests/bundles/trusty-mitaka.yaml +++ /dev/null @@ -1,42 +0,0 @@ -options: - source: &source cloud:trusty-mitaka -series: trusty -applications: - ceph-radosgw: - charm: ceph-radosgw - series: trusty - num_units: 1 - options: - source: *source - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:trusty/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/xenial-mitaka-namespaced.yaml b/ceph-radosgw/tests/bundles/xenial-mitaka-namespaced.yaml deleted file mode 100644 index a7af5178..00000000 --- a/ceph-radosgw/tests/bundles/xenial-mitaka-namespaced.yaml +++ /dev/null @@ -1,43 +0,0 @@ -options: - source: &source distro -series: xenial -applications: - ceph-radosgw: - charm: ceph-radosgw - series: xenial - num_units: 1 - options: - source: *source - namespace-tenants: True - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/xenial-mitaka.yaml b/ceph-radosgw/tests/bundles/xenial-mitaka.yaml deleted file mode 100644 index 92c05a58..00000000 --- a/ceph-radosgw/tests/bundles/xenial-mitaka.yaml +++ /dev/null @@ -1,42 +0,0 @@ -options: - source: &source distro -series: xenial -applications: - ceph-radosgw: - charm: ceph-radosgw - series: xenial - num_units: 1 - options: - source: *source - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/xenial-ocata.yaml b/ceph-radosgw/tests/bundles/xenial-ocata.yaml deleted file mode 100644 index 8b910432..00000000 --- a/ceph-radosgw/tests/bundles/xenial-ocata.yaml +++ /dev/null @@ -1,42 +0,0 @@ -options: - source: &source cloud:xenial-ocata -series: xenial -applications: - ceph-radosgw: - charm: ceph-radosgw - series: xenial - num_units: 1 - options: - source: *source - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/xenial-pike.yaml b/ceph-radosgw/tests/bundles/xenial-pike.yaml deleted file mode 100644 index c9fd580a..00000000 --- a/ceph-radosgw/tests/bundles/xenial-pike.yaml +++ /dev/null @@ -1,42 +0,0 @@ -options: - source: &source cloud:xenial-pike -series: xenial -applications: - ceph-radosgw: - charm: ceph-radosgw - series: xenial - num_units: 1 - options: - source: *source - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service diff --git a/ceph-radosgw/tests/bundles/xenial-queens.yaml b/ceph-radosgw/tests/bundles/xenial-queens.yaml deleted file mode 100644 index e4fc7a5f..00000000 --- a/ceph-radosgw/tests/bundles/xenial-queens.yaml +++ /dev/null @@ -1,51 +0,0 @@ -options: - source: &source cloud:xenial-queens -series: xenial -applications: - ceph-radosgw: - charm: ceph-radosgw - series: xenial - num_units: 1 - options: - source: *source - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service -- - vault:shared-db - - percona-cluster:shared-db -- - keystone:certificates - - vault:certificates -- - ceph-radosgw:certificates - - vault:certificates diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index c6c3ddeb..29448f20 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,44 +1,40 @@ charm_name: ceph-radosgw gate_bundles: - - vault: focal-xena - - vault: focal-xena-namespaced - - vault: focal-wallaby - - vault: focal-wallaby-namespaced - - vault: focal-victoria - - vault: focal-victoria-namespaced - - vault: focal-ussuri-ec - - vault: focal-ussuri - - vault: focal-ussuri-namespaced - - vault: bionic-ussuri - - vault: bionic-ussuri-namespaced - - vault: bionic-train - - vault: bionic-train-namespaced - - vault: bionic-stein - - vault: bionic-stein-namespaced - vault: bionic-queens - vault: bionic-queens-namespaced - - xenial-mitaka - - xenial-mitaka-namespaced + - vault: bionic-stein + - vault: bionic-stein-namespaced + - vault: bionic-ussuri + - vault: bionic-ussuri-namespaced + - vault: focal-ussuri + - vault: focal-ussuri-ec + - vault: focal-ussuri-namespaced + - vault: focal-victoria + - vault: focal-victoria-namespaced + - vault: focal-wallaby + - vault: focal-wallaby-namespaced + - vault: focal-xena + - vault: focal-xena-namespaced + - vault: hirsute-wallaby + - vault: hirsute-wallaby-namespaced + - vault: impish-xena + - vault: impish-xena-namespaced smoke_bundles: - vault: focal-ussuri dev_bundles: - - trusty-mitaka - - xenial-ocata - - xenial-pike - - vault: xenial-queens - bionic-queens-multisite - bionic-rocky-multisite - vault: bionic-rocky - vault: bionic-rocky-namespaced - - vault: groovy-victoria - - vault: groovy-victoria-namespaced - - vault: hirsute-wallaby - - vault: hirsute-wallaby-namespaced - - vault: impish-xena - - vault: impish-xena-namespaced + - vault: bionic-train + - vault: bionic-train-namespaced + - vault: focal-yoga + - vault: focal-yoga-namespaced + - vault: jammy-yoga + - vault: jammy-yoga-namespaced target_deploy_status: vault: @@ -61,7 +57,7 @@ tests_options: force_deploy: - hirsute-wallaby - hirsute-wallaby-namespaced - - groovy-victoria - - groovy-victoria-namespaced - impish-xena - impish-xena-namespaced + - jammy-yoga + - jammy-yoga-namespaced diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index ba4fd5b6..86d1e904 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -61,6 +61,11 @@ basepython = python3.8 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py39] +basepython = python3.9 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt From 747cf736fd324349d2099f2ac94115ec9bb7fb99 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Fri, 26 Nov 2021 12:31:43 +0000 Subject: [PATCH 2300/2699] Fix get_mon_map() for octopus and later The "ceph mon_status" command seems to have disappeared on octopus and later, and is replaced by "ceph quorum_status". This changes the get_mon_map() function to detect the underlying ceph version and do the right thing. Note that the fix is actually in charm-helpers, and this has been manually synced into the charm [1]. [1] https://github.com/juju/charm-helpers/pull/659 Change-Id: I59cf6fc19cf2a91b0aef37059cdb0ed37379b5cb Closes-Bug: #1951094 --- ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index c70aeb20..9a34e4b0 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -814,8 +814,10 @@ def get_mon_map(service): ceph command fails. """ try: + octopus_or_later = cmp_pkgrevno('ceph-common', '15.0.0') >= 0 + mon_status_cmd = 'quorum_status' if octopus_or_later else 'mon_status' mon_status = check_output(['ceph', '--id', service, - 'mon_status', '--format=json']) + mon_status_cmd, '--format=json']) if six.PY3: mon_status = mon_status.decode('UTF-8') try: From 74b8644104e787b2f462e9c4fad237897f47f792 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 26 Nov 2021 13:28:05 +0100 Subject: [PATCH 2301/2699] Initial commit --- ceph-nfs/.flake8 | 9 + ceph-nfs/.gitignore | 8 + ceph-nfs/.jujuignore | 3 + ceph-nfs/.stestr.conf | 3 + ceph-nfs/CONTRIBUTING.md | 34 +++ ceph-nfs/LICENSE | 202 ++++++++++++++++++ ceph-nfs/README.md | 24 +++ ceph-nfs/actions.yaml | 16 ++ ceph-nfs/build-requirements.txt | 1 + ceph-nfs/charmcraft.yaml | 10 + ceph-nfs/config.yaml | 14 ++ ceph-nfs/metadata.yaml | 23 ++ ceph-nfs/osci.yaml | 29 +++ ceph-nfs/requirements-dev.txt | 3 + ceph-nfs/requirements.txt | 5 + ceph-nfs/src/charm.py | 140 ++++++++++++ .../templates/ceph.client.ceph-nfs.keyring | 3 + ceph-nfs/templates/ceph.conf | 15 ++ ceph-nfs/test-requirements.txt | 16 ++ ceph-nfs/tests/bundles/focal-octopus.yaml | 27 +++ ceph-nfs/tests/tests.yaml | 12 ++ ceph-nfs/tox.ini | 133 ++++++++++++ ceph-nfs/unit_tests/__init__.py | 0 ceph-nfs/unit_tests/test_charm.py | 72 +++++++ 24 files changed, 802 insertions(+) create mode 100644 ceph-nfs/.flake8 create mode 100644 ceph-nfs/.gitignore create mode 100644 ceph-nfs/.jujuignore create mode 100644 ceph-nfs/.stestr.conf create mode 100644 ceph-nfs/CONTRIBUTING.md create mode 100644 ceph-nfs/LICENSE create mode 100644 ceph-nfs/README.md create mode 100644 ceph-nfs/actions.yaml create mode 100644 ceph-nfs/build-requirements.txt create mode 100644 ceph-nfs/charmcraft.yaml create mode 100644 ceph-nfs/config.yaml create mode 100644 ceph-nfs/metadata.yaml create mode 100644 ceph-nfs/osci.yaml create mode 100644 ceph-nfs/requirements-dev.txt create mode 100644 ceph-nfs/requirements.txt create mode 100755 ceph-nfs/src/charm.py create mode 100644 ceph-nfs/templates/ceph.client.ceph-nfs.keyring create mode 100644 ceph-nfs/templates/ceph.conf create mode 100644 ceph-nfs/test-requirements.txt create mode 100644 ceph-nfs/tests/bundles/focal-octopus.yaml create mode 100644 ceph-nfs/tests/tests.yaml create mode 100644 ceph-nfs/tox.ini create mode 100644 ceph-nfs/unit_tests/__init__.py create mode 100644 ceph-nfs/unit_tests/test_charm.py diff --git a/ceph-nfs/.flake8 b/ceph-nfs/.flake8 new file mode 100644 index 00000000..8ef84fcd --- /dev/null +++ b/ceph-nfs/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/ceph-nfs/.gitignore b/ceph-nfs/.gitignore new file mode 100644 index 00000000..f3c3e4d8 --- /dev/null +++ b/ceph-nfs/.gitignore @@ -0,0 +1,8 @@ +.tox +**/*.swp +__pycache__ +.stestr/ +lib/* +!lib/README.txt +build +ceph-iscsi.charm diff --git a/ceph-nfs/.jujuignore b/ceph-nfs/.jujuignore new file mode 100644 index 00000000..6ccd559e --- /dev/null +++ b/ceph-nfs/.jujuignore @@ -0,0 +1,3 @@ +/venv +*.py[cod] +*.charm diff --git a/ceph-nfs/.stestr.conf b/ceph-nfs/.stestr.conf new file mode 100644 index 00000000..5fcccaca --- /dev/null +++ b/ceph-nfs/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/ceph-nfs/CONTRIBUTING.md b/ceph-nfs/CONTRIBUTING.md new file mode 100644 index 00000000..61ef5c87 --- /dev/null +++ b/ceph-nfs/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# ceph-nfs + +## Developing + +Create and activate a virtualenv with the development requirements: + + virtualenv -p python3 venv + source venv/bin/activate + pip install -r requirements-dev.txt + +## Code overview + +TEMPLATE-TODO: +One of the most important things a consumer of your charm (or library) +needs to know is what set of functionality it provides. Which categories +does it fit into? Which events do you listen to? Which libraries do you +consume? Which ones do you export and how are they used? + +## Intended use case + +TEMPLATE-TODO: +Why were these decisions made? What's the scope of your charm? + +## Roadmap + +If this Charm doesn't fulfill all of the initial functionality you were +hoping for or planning on, please add a Roadmap or TODO here + +## Testing + +The Python operator framework includes a very nice harness for testing +operator behaviour without full deployment. Just `run_tests`: + + ./run_tests diff --git a/ceph-nfs/LICENSE b/ceph-nfs/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/ceph-nfs/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ceph-nfs/README.md b/ceph-nfs/README.md new file mode 100644 index 00000000..0276d994 --- /dev/null +++ b/ceph-nfs/README.md @@ -0,0 +1,24 @@ +# ceph-nfs + +## Description + +TODO: Describe your charm in a few paragraphs of Markdown + +## Usage + +TODO: Provide high-level usage, such as required config or relations + + +## Relations + +TODO: Provide any relations which are provided or required by your charm + +## OCI Images + +TODO: Include a link to the default image your charm uses + +## Contributing + +Please see the [Juju SDK docs](https://juju.is/docs/sdk) for guidelines +on enhancements to this charm following best practice guidelines, and +`CONTRIBUTING.md` for developer guidance. diff --git a/ceph-nfs/actions.yaml b/ceph-nfs/actions.yaml new file mode 100644 index 00000000..b38973f5 --- /dev/null +++ b/ceph-nfs/actions.yaml @@ -0,0 +1,16 @@ +# Copyright 2021 OpenStack Charmers +# See LICENSE file for licensing details. +# +# TEMPLATE-TODO: change this example to suit your needs. +# If you don't need actions, you can remove the file entirely. +# It ties in to the example _on_fortune_action handler in src/charm.py +# +# Learn more about actions at: https://juju.is/docs/sdk/actions + +fortune: + description: Returns a pithy phrase. + params: + fail: + description: "Fail with this message" + type: string + default: "" diff --git a/ceph-nfs/build-requirements.txt b/ceph-nfs/build-requirements.txt new file mode 100644 index 00000000..271d8955 --- /dev/null +++ b/ceph-nfs/build-requirements.txt @@ -0,0 +1 @@ +git+https://github.com/canonical/charmcraft.git@0.10.2#egg=charmcraft diff --git a/ceph-nfs/charmcraft.yaml b/ceph-nfs/charmcraft.yaml new file mode 100644 index 00000000..048d4544 --- /dev/null +++ b/ceph-nfs/charmcraft.yaml @@ -0,0 +1,10 @@ +# Learn more about charmcraft.yaml configuration at: +# https://juju.is/docs/sdk/charmcraft-config +type: "charm" +bases: + - build-on: + - name: "ubuntu" + channel: "20.04" + run-on: + - name: "ubuntu" + channel: "20.04" diff --git a/ceph-nfs/config.yaml b/ceph-nfs/config.yaml new file mode 100644 index 00000000..edb967c4 --- /dev/null +++ b/ceph-nfs/config.yaml @@ -0,0 +1,14 @@ +# Copyright 2021 OpenStack Charmers +# See LICENSE file for licensing details. +# +# TEMPLATE-TODO: change this example to suit your needs. +# If you don't need a config, you can remove the file entirely. +# It ties in to the example _on_config_changed handler in src/charm.py +# +# Learn more about config at: https://juju.is/docs/sdk/config + +options: + thing: + default: 🎠+ description: A thing used by the charm. + type: string diff --git a/ceph-nfs/metadata.yaml b/ceph-nfs/metadata.yaml new file mode 100644 index 00000000..16d85320 --- /dev/null +++ b/ceph-nfs/metadata.yaml @@ -0,0 +1,23 @@ +# Copyright 2021 OpenStack Charmers +# See LICENSE file for licensing details. + +# For a complete list of supported options, see: +# https://discourse.charmhub.io/t/charm-metadata-v2/3674/15 +name: ceph-nfs +display-name: | + TEMPLATE-TODO: fill out a display name for the Charmcraft store +description: | + TEMPLATE-TODO: fill out the charm's description +summary: | + TEMPLATE-TODO: fill out the charm's summary + +# TEMPLATE-TODO: replace with containers for your workload (delete for non-k8s) +containers: + httpbin: + resource: httpbin-image + +# TEMPLATE-TODO: each container defined above must specify an oci-image resource +resources: + httpbin-image: + type: oci-image + description: OCI image for httpbin (kennethreitz/httpbin) diff --git a/ceph-nfs/osci.yaml b/ceph-nfs/osci.yaml new file mode 100644 index 00000000..d55941bd --- /dev/null +++ b/ceph-nfs/osci.yaml @@ -0,0 +1,29 @@ +- project: + templates: + - charm-unit-jobs + check: + jobs: + - octopus + - pacific + vars: + needs_charm_build: true + charm_build_name: ceph-iscsi + build_type: charmcraft +- job: + name: focal-octopus + parent: func-target + dependencies: + - osci-lint + - tox-py35 + - tox-py36 + - tox-py37 + - tox-py38 + vars: + tox_extra_args: focal-octopus +- job: + name: focal-pacific + parent: func-target + dependencies: &smoke-jobs + - focal-octopus + vars: + tox_extra_args: focal-pacific diff --git a/ceph-nfs/requirements-dev.txt b/ceph-nfs/requirements-dev.txt new file mode 100644 index 00000000..4f2a3f5b --- /dev/null +++ b/ceph-nfs/requirements-dev.txt @@ -0,0 +1,3 @@ +-r requirements.txt +coverage +flake8 diff --git a/ceph-nfs/requirements.txt b/ceph-nfs/requirements.txt new file mode 100644 index 00000000..37368f68 --- /dev/null +++ b/ceph-nfs/requirements.txt @@ -0,0 +1,5 @@ +# requirements +git+https://github.com/juju/charm-helpers.git#egg=charmhelpers +git+https://github.com/canonical/operator.git#egg=ops +git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client +git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py new file mode 100755 index 00000000..4e5866a1 --- /dev/null +++ b/ceph-nfs/src/charm.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +# Copyright 2021 OpenStack Charmers +# See LICENSE file for licensing details. +# +# Learn more at: https://juju.is/docs/sdk + +"""Charm the service. + +Refer to the following post for a quick-start guide that will help you +develop a new k8s charm using the Operator Framework: + + https://discourse.charmhub.io/t/4208 +""" + +import logging + +from ops.charm import CharmBase +from ops.framework import StoredState +from ops.main import main +from ops.model import ActiveStatus + +import ops_openstack.adapters +import ops_openstack.core +import ops_openstack.plugins.classes + +logger = logging.getLogger(__name__) + + +class CephClientAdapter(ops_openstack.adapters.OpenStackOperRelationAdapter): + """Adapter for ceph client interface.""" + + @property + def mon_hosts(self): + """Sorted list of ceph mon addresses. + + :returns: Ceph MON addresses. + :rtype: str + """ + hosts = self.relation.get_relation_data()['mon_hosts'] + return ' '.join(sorted(hosts)) + + @property + def auth_supported(self): + """Authentication type. + + :returns: Authentication type + :rtype: str + """ + return self.relation.get_relation_data()['auth'] + + @property + def key(self): + """Key client should use when communicating with Ceph cluster. + + :returns: Key + :rtype: str + """ + return self.relation.get_relation_data()['key'] + + +class CephNfsCharm(CharmBase): + """Charm the service.""" + + _stored = StoredState() + + def __init__(self, *args): + super().__init__(*args) + self.framework.observe(self.on.httpbin_pebble_ready, self._on_httpbin_pebble_ready) + self.framework.observe(self.on.config_changed, self._on_config_changed) + self.framework.observe(self.on.fortune_action, self._on_fortune_action) + self._stored.set_default(things=[]) + + def _on_httpbin_pebble_ready(self, event): + """Define and start a workload using the Pebble API. + + TEMPLATE-TODO: change this example to suit your needs. + You'll need to specify the right entrypoint and environment + configuration for your specific workload. Tip: you can see the + standard entrypoint of an existing container using docker inspect + + Learn more about Pebble layers at https://github.com/canonical/pebble + """ + # Get a reference the container attribute on the PebbleReadyEvent + container = event.workload + # Define an initial Pebble layer configuration + pebble_layer = { + "summary": "httpbin layer", + "description": "pebble config layer for httpbin", + "services": { + "httpbin": { + "override": "replace", + "summary": "httpbin", + "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", + "startup": "enabled", + "environment": {"thing": self.model.config["thing"]}, + } + }, + } + # Add intial Pebble config layer using the Pebble API + container.add_layer("httpbin", pebble_layer, combine=True) + # Autostart any services that were defined with startup: enabled + container.autostart() + # Learn more about statuses in the SDK docs: + # https://juju.is/docs/sdk/constructs#heading--statuses + self.unit.status = ActiveStatus() + + def _on_config_changed(self, _): + """Just an example to show how to deal with changed configuration. + + TEMPLATE-TODO: change this example to suit your needs. + If you don't need to handle config, you can remove this method, + the hook created in __init__.py for it, the corresponding test, + and the config.py file. + + Learn more about config at https://juju.is/docs/sdk/config + """ + current = self.config["thing"] + if current not in self._stored.things: + logger.debug("found a new thing: %r", current) + self._stored.things.append(current) + + def _on_fortune_action(self, event): + """Just an example to show how to receive actions. + + TEMPLATE-TODO: change this example to suit your needs. + If you don't need to handle actions, you can remove this method, + the hook created in __init__.py for it, the corresponding test, + and the actions.py file. + + Learn more about actions at https://juju.is/docs/sdk/actions + """ + fail = event.params["fail"] + if fail: + event.fail(fail) + else: + event.set_results({"fortune": "A bug in the code is worth two in the documentation."}) + + +if __name__ == "__main__": + main(CephNfsCharm) diff --git a/ceph-nfs/templates/ceph.client.ceph-nfs.keyring b/ceph-nfs/templates/ceph.client.ceph-nfs.keyring new file mode 100644 index 00000000..8ad51ff3 --- /dev/null +++ b/ceph-nfs/templates/ceph.client.ceph-nfs.keyring @@ -0,0 +1,3 @@ +[client.ceph-nfs] + key = {{ ceph_client.key }} + diff --git a/ceph-nfs/templates/ceph.conf b/ceph-nfs/templates/ceph.conf new file mode 100644 index 00000000..dfd7431c --- /dev/null +++ b/ceph-nfs/templates/ceph.conf @@ -0,0 +1,15 @@ +############################################################################### +# [ WARNING ] +# configuration file maintained by Juju +# local changes will be overwritten. +############################################################################### +[global] +auth supported = {{ ceph_client.auth_supported }} +mon host = {{ ceph_client.mon_hosts }} +keyring = /etc/ceph/nfs/$cluster.$name.keyring + +[client.ceph-nfs] +client mount uid = 0 +client mount gid = 0 +log file = /var/log/ceph/ceph-client.nfs.log + diff --git a/ceph-nfs/test-requirements.txt b/ceph-nfs/test-requirements.txt new file mode 100644 index 00000000..73db69bb --- /dev/null +++ b/ceph-nfs/test-requirements.txt @@ -0,0 +1,16 @@ +# This file is managed centrally. If you find the need to modify this as a +# one-off, please don't. Intead, consult #openstack-charms and ask about +# requirements management in charms via bot-control. Thank you. +charm-tools>=2.4.4 +coverage>=3.6 +mock>=1.2 +flake8>=2.2.4 +stestr>=2.2.0 +requests>=2.18.4 +psutil +# oslo.i18n dropped py35 support +oslo.i18n<4.0.0 +git+https://github.com/openstack-charmers/zaza.git#egg=zaza +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack +pytz # workaround for 14.04 pip/tox +pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/ceph-nfs/tests/bundles/focal-octopus.yaml b/ceph-nfs/tests/bundles/focal-octopus.yaml new file mode 100644 index 00000000..bf8426aa --- /dev/null +++ b/ceph-nfs/tests/bundles/focal-octopus.yaml @@ -0,0 +1,27 @@ +local_overlay_enabled: False +series: focal +applications: + ubuntu: + charm: cs:ubuntu + num_units: 2 + ceph-nfs: + charm: ../../ceph-nfs.charm + num_units: 2 + ceph-osd: + charm: cs:~openstack-charmers-next/ceph-osd + num_units: 3 + storage: + osd-devices: '2,10G' + options: + osd-devices: '/dev/test-non-existent' + ceph-mon: + charm: cs:~openstack-charmers-next/ceph-mon + num_units: 3 + options: + monitor-count: '3' + excpected-osd-count: 6 +relations: + - - 'ceph-mon:client' + - 'ceph-nfs:ceph-client' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' \ No newline at end of file diff --git a/ceph-nfs/tests/tests.yaml b/ceph-nfs/tests/tests.yaml new file mode 100644 index 00000000..3b0373c6 --- /dev/null +++ b/ceph-nfs/tests/tests.yaml @@ -0,0 +1,12 @@ +charm_name: ceph-nfs +gate_bundles: + - focal-octopuc + - focal-pacific +smoke_bundles: + - focal-octopus +configure: [] +tests: [] +target_deploy_status: + ubuntu: + workload-status: active + workload-status-message: '' diff --git a/ceph-nfs/tox.ini b/ceph-nfs/tox.ini new file mode 100644 index 00000000..775ea578 --- /dev/null +++ b/ceph-nfs/tox.ini @@ -0,0 +1,133 @@ +# Operator charm (with zaza): tox.ini + +[tox] +envlist = pep8,py3 +skipsdist = True +# NOTE: Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE: Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +requires = pip < 20.3 + virtualenv < 20.0 +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.2.0 + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 + CHARM_DIR={envdir} +install_command = + pip install {opts} {packages} +commands = stestr run --slowest {posargs} +whitelist_externals = + git + add-to-archive.py + bash +passenv = HOME TERM CS_* OS_* TEST_* +deps = -r{toxinidir}/test-requirements.txt + +[testenv:py35] +basepython = python3.5 +# python3.5 is irrelevant on a focal+ charm. +commands = /bin/true + +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:pep8] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = flake8 {posargs} src unit_tests tests + +[testenv:cover] +# Technique based heavily upon +# https://github.com/openstack/nova/blob/master/tox.ini +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run --slowest {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + */charmhelpers/* + unit_tests/* + +[testenv:venv] +basepython = python3 +commands = {posargs} + +[testenv:build] +basepython = python3 +deps = -r{toxinidir}/build-requirements.txt +commands = + charmcraft build + +[testenv:func-noop] +basepython = python3 +commands = + functest-run-suite --help + +[testenv:func] +basepython = python3 +commands = + functest-run-suite --keep-model + +[testenv:func-smoke] +basepython = python3 +commands = + functest-run-suite --keep-model --smoke + +[testenv:func-dev] +basepython = python3 +commands = + functest-run-suite --keep-model --dev + +[testenv:func-target] +basepython = python3 +commands = + functest-run-suite --keep-model --bundle {posargs} + +[flake8] +# Ignore E902 because the unit_tests directory is missing in the built charm. +ignore = E402,E226,E902 diff --git a/ceph-nfs/unit_tests/__init__.py b/ceph-nfs/unit_tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-nfs/unit_tests/test_charm.py b/ceph-nfs/unit_tests/test_charm.py new file mode 100644 index 00000000..2df5aa87 --- /dev/null +++ b/ceph-nfs/unit_tests/test_charm.py @@ -0,0 +1,72 @@ +# Copyright 2021 OpenStack Charmers +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + + +import unittest +import sys + +sys.path.append('lib') # noqa +sys.path.append('src') # noqa + +from unittest.mock import Mock + +from charm import CephNfsCharm +from ops.model import ActiveStatus +from ops.testing import Harness + + +class TestCharm(unittest.TestCase): + def setUp(self): + self.harness = Harness(CephNfsCharm) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + + def test_config_changed(self): + self.assertEqual(list(self.harness.charm._stored.things), []) + self.harness.update_config({"thing": "foo"}) + self.assertEqual(list(self.harness.charm._stored.things), ["foo"]) + + def test_action(self): + # the harness doesn't (yet!) help much with actions themselves + action_event = Mock(params={"fail": ""}) + self.harness.charm._on_fortune_action(action_event) + + self.assertTrue(action_event.set_results.called) + + def test_action_fail(self): + action_event = Mock(params={"fail": "fail this"}) + self.harness.charm._on_fortune_action(action_event) + + self.assertEqual(action_event.fail.call_args, [("fail this",)]) + + def test_httpbin_pebble_ready(self): + # Check the initial Pebble plan is empty + initial_plan = self.harness.get_container_pebble_plan("httpbin") + self.assertEqual(initial_plan.to_yaml(), "{}\n") + # Expected plan after Pebble ready with default config + expected_plan = { + "services": { + "httpbin": { + "override": "replace", + "summary": "httpbin", + "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", + "startup": "enabled", + "environment": {"thing": "ðŸŽ"}, + } + }, + } + # Get the httpbin container from the model + container = self.harness.model.unit.get_container("httpbin") + # Emit the PebbleReadyEvent carrying the httpbin container + self.harness.charm.on.httpbin_pebble_ready.emit(container) + # Get the plan now we've run PebbleReady + updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict() + # Check we've got the plan we expected + self.assertEqual(expected_plan, updated_plan) + # Check the service was started + service = self.harness.model.unit.get_container("httpbin").get_service("httpbin") + self.assertTrue(service.is_running()) + # Ensure we set an ActiveStatus with no message + self.assertEqual(self.harness.model.unit.status, ActiveStatus()) From 851ecc4aff7772840c0f21dbd4872826c93f65d2 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 30 Nov 2021 14:37:32 +0100 Subject: [PATCH 2302/2699] Add in Ceph client handling, fix up lint and tests --- ceph-nfs/actions.yaml | 14 +- ceph-nfs/charmcraft.yaml | 10 - ceph-nfs/config.yaml | 31 +++ ceph-nfs/metadata.yaml | 41 ++-- ceph-nfs/src/charm.py | 247 +++++++++++++++------ ceph-nfs/templates/ceph.conf | 8 +- ceph-nfs/templates/ganesha.conf | 186 ++++++++++++++++ ceph-nfs/tests/bundles/focal-octopus.yaml | 2 +- ceph-nfs/unit_tests/test_ceph_nfs_charm.py | 67 ++++++ ceph-nfs/unit_tests/test_charm.py | 72 ------ 10 files changed, 493 insertions(+), 185 deletions(-) delete mode 100644 ceph-nfs/charmcraft.yaml create mode 100644 ceph-nfs/templates/ganesha.conf create mode 100644 ceph-nfs/unit_tests/test_ceph_nfs_charm.py delete mode 100644 ceph-nfs/unit_tests/test_charm.py diff --git a/ceph-nfs/actions.yaml b/ceph-nfs/actions.yaml index b38973f5..1b55fb09 100644 --- a/ceph-nfs/actions.yaml +++ b/ceph-nfs/actions.yaml @@ -7,10 +7,10 @@ # # Learn more about actions at: https://juju.is/docs/sdk/actions -fortune: - description: Returns a pithy phrase. - params: - fail: - description: "Fail with this message" - type: string - default: "" +# fortune: +# description: Returns a pithy phrase. +# params: +# fail: +# description: "Fail with this message" +# type: string +# default: "" diff --git a/ceph-nfs/charmcraft.yaml b/ceph-nfs/charmcraft.yaml deleted file mode 100644 index 048d4544..00000000 --- a/ceph-nfs/charmcraft.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# Learn more about charmcraft.yaml configuration at: -# https://juju.is/docs/sdk/charmcraft-config -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "20.04" - run-on: - - name: "ubuntu" - channel: "20.04" diff --git a/ceph-nfs/config.yaml b/ceph-nfs/config.yaml index edb967c4..af036c5f 100644 --- a/ceph-nfs/config.yaml +++ b/ceph-nfs/config.yaml @@ -12,3 +12,34 @@ options: default: 🎠description: A thing used by the charm. type: string + ceph-osd-replication-count: + type: int + default: 3 + description: | + This value dictates the number of replicas ceph must make of any + object it stores within the images rbd pool. Of course, this only + applies if using Ceph as a backend store. Note that once the images + rbd pool has been created, changing this value will not have any + effect (although it can be changed in ceph by manually configuring + your ceph cluster). + ceph-pool-weight: + type: int + default: 5 + description: | + Defines a relative weighting of the pool as a percentage of the total + amount of data in the Ceph cluster. This effectively weights the number + of placement groups for the pool created to be appropriately portioned + to the amount of data expected. For example, if the compute images + for the OpenStack compute instances are expected to take up 20% of the + overall configuration then this value would be specified as 20. Note - + it is important to choose an appropriate value for the pool weight as + this directly affects the number of placement groups which will be + created for the pool. The number of placement groups for a pool can + only be increased, never decreased - so it is important to identify the + percent of data that will likely reside in the pool. + rbd-pool-name: + default: + type: string + description: | + Optionally specify an existing pool that shares should map to. Defaults + to the application's name. diff --git a/ceph-nfs/metadata.yaml b/ceph-nfs/metadata.yaml index 16d85320..fe73c794 100644 --- a/ceph-nfs/metadata.yaml +++ b/ceph-nfs/metadata.yaml @@ -1,23 +1,22 @@ -# Copyright 2021 OpenStack Charmers -# See LICENSE file for licensing details. - -# For a complete list of supported options, see: -# https://discourse.charmhub.io/t/charm-metadata-v2/3674/15 name: ceph-nfs -display-name: | - TEMPLATE-TODO: fill out a display name for the Charmcraft store +summary: Gateway for provisioning NFS shares backed by ceph. +maintainer: OpenStack Charmers description: | - TEMPLATE-TODO: fill out the charm's description -summary: | - TEMPLATE-TODO: fill out the charm's summary - -# TEMPLATE-TODO: replace with containers for your workload (delete for non-k8s) -containers: - httpbin: - resource: httpbin-image - -# TEMPLATE-TODO: each container defined above must specify an oci-image resource -resources: - httpbin-image: - type: oci-image - description: OCI image for httpbin (kennethreitz/httpbin) + The NFS gateway is provided by NFS-Ganesha and provides NFS shares + that are backed by CephFS. +tags: + - openstack + - storage + - misc +series: + - focal + - groovy + - hirsute + - impish +subordinate: false +min-juju-version: 2.7.6 +extra-bindings: + public: +requires: + ceph-client: + interface: ceph-client diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index 4e5866a1..d3e62f36 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -13,11 +13,18 @@ """ import logging +import os +from pathlib import Path +import subprocess from ops.charm import CharmBase from ops.framework import StoredState from ops.main import main -from ops.model import ActiveStatus +# from ops.model import ActiveStatus + +import charmhelpers.core.host as ch_host +import charmhelpers.core.templating as ch_templating +import interface_ceph_client.ceph_client as ceph_client import ops_openstack.adapters import ops_openstack.core @@ -58,83 +65,183 @@ def key(self): return self.relation.get_relation_data()['key'] -class CephNfsCharm(CharmBase): - """Charm the service.""" - - _stored = StoredState() +class CephNFSAdapters( + ops_openstack.adapters.OpenStackRelationAdapters): + """Collection of relation adapters.""" - def __init__(self, *args): - super().__init__(*args) - self.framework.observe(self.on.httpbin_pebble_ready, self._on_httpbin_pebble_ready) - self.framework.observe(self.on.config_changed, self._on_config_changed) - self.framework.observe(self.on.fortune_action, self._on_fortune_action) - self._stored.set_default(things=[]) + relation_adapters = { + 'ceph-client': CephClientAdapter, + } - def _on_httpbin_pebble_ready(self, event): - """Define and start a workload using the Pebble API. - TEMPLATE-TODO: change this example to suit your needs. - You'll need to specify the right entrypoint and environment - configuration for your specific workload. Tip: you can see the - standard entrypoint of an existing container using docker inspect +class CephNfsCharm(CharmBase): + """Ceph NFS Base Charm.""" - Learn more about Pebble layers at https://github.com/canonical/pebble - """ - # Get a reference the container attribute on the PebbleReadyEvent - container = event.workload - # Define an initial Pebble layer configuration - pebble_layer = { - "summary": "httpbin layer", - "description": "pebble config layer for httpbin", - "services": { - "httpbin": { - "override": "replace", - "summary": "httpbin", - "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", - "startup": "enabled", - "environment": {"thing": self.model.config["thing"]}, - } - }, - } - # Add intial Pebble config layer using the Pebble API - container.add_layer("httpbin", pebble_layer, combine=True) - # Autostart any services that were defined with startup: enabled - container.autostart() - # Learn more about statuses in the SDK docs: - # https://juju.is/docs/sdk/constructs#heading--statuses - self.unit.status = ActiveStatus() - - def _on_config_changed(self, _): - """Just an example to show how to deal with changed configuration. - - TEMPLATE-TODO: change this example to suit your needs. - If you don't need to handle config, you can remove this method, - the hook created in __init__.py for it, the corresponding test, - and the config.py file. - - Learn more about config at https://juju.is/docs/sdk/config + _stored = StoredState() + PACKAGES = ['nfs-ganesha', 'ceph-common'] + + CEPH_CAPABILITIES = [ + "mds", "allow *", + "osd", "allow rw", + "mon", "allow r, " + "allow command \"auth del\", " + "allow command \"auth caps\", " + "allow command \"auth get\", " + "allow command \"auth get-or-create\""] + + REQUIRED_RELATIONS = ['ceph-client', 'cluster'] + + CEPH_CONFIG_PATH = Path('/etc/ceph') + GANESHA_CONFIG_PATH = Path('/etc/ganesha') + + CEPH_GANESHA_CONFIG_PATH = CEPH_CONFIG_PATH / 'ganesha' + CEPH_CONF = CEPH_GANESHA_CONFIG_PATH / 'ceph.conf' + GANESHA_KEYRING = CEPH_GANESHA_CONFIG_PATH / 'ceph.client.ceph-ganesha.keyring' + GANESHA_CONF = GANESHA_CONFIG_PATH / 'ganesha.conf' + + SERVICES = ['nfs-ganesha'] + + RESTART_MAP = { + str(GANESHA_CONF): SERVICES, + str(CEPH_CONF): SERVICES, + str(GANESHA_KEYRING): SERVICES} + + release = 'default' + + def __init__(self, framework): + super().__init__(framework) + # super().register_status_check(self.custom_status_check) + logging.info("Using %s class", self.release) + self._stored.set_default( + is_started=False, + ) + self.ceph_client = ceph_client.CephClientRequires( + self, + 'ceph-client') + self.adapters = CephNFSAdapters( + (self.ceph_client,), + self) + self.framework.observe( + self.ceph_client.on.broker_available, + self.request_ceph_pool) + self.framework.observe( + self.ceph_client.on.pools_available, + self.render_config) + self.framework.observe( + self.on.config_changed, + self.request_ceph_pool) + self.framework.observe( + self.on.upgrade_charm, + self.render_config) + + def config_get(self, key): + """Retrieve config option. + + :returns: Value of the corresponding config option or None. + :rtype: Any """ - current = self.config["thing"] - if current not in self._stored.things: - logger.debug("found a new thing: %r", current) - self._stored.things.append(current) + return self.model.config.get(key) - def _on_fortune_action(self, event): - """Just an example to show how to receive actions. - - TEMPLATE-TODO: change this example to suit your needs. - If you don't need to handle actions, you can remove this method, - the hook created in __init__.py for it, the corresponding test, - and the actions.py file. + @property + def pool_name(self): + """The name of the default rbd data pool to be used for shares. - Learn more about actions at https://juju.is/docs/sdk/actions + :returns: Data pool name. + :rtype: str """ - fail = event.params["fail"] - if fail: - event.fail(fail) + if self.config_get('rbd-pool-name'): + pool_name = self.config_get('rbd-pool-name') else: - event.set_results({"fortune": "A bug in the code is worth two in the documentation."}) + pool_name = self.app.name + return pool_name + + @property + def client_name(self): + return self.app.name + + def request_ceph_pool(self, event): + """Request pools from Ceph cluster.""" + if not self.ceph_client.broker_available: + logging.info("Cannot request ceph setup at this time") + return + try: + bcomp_kwargs = self.get_bluestore_compression() + except ValueError as e: + # The end user has most likely provided a invalid value for + # a configuration option. Just log the traceback here, the + # end user will be notified by assess_status() called at + # the end of the hook execution. + logging.warn('Caught ValueError, invalid value provided for ' + 'configuration?: "{}"'.format(str(e))) + return + weight = self.config_get('ceph-pool-weight') + replicas = self.config_get('ceph-osd-replication-count') + + logging.info("Requesting replicated pool") + self.ceph_client.create_replicated_pool( + name=self.pool_name, + replicas=replicas, + weight=weight, + **bcomp_kwargs) + logging.info("Requesting permissions") + self.ceph_client.request_ceph_permissions( + self.client_name, + self.CEPH_CAPABILITIES) + + def refresh_request(self, event): + """Re-request Ceph pools and render config.""" + self.render_config(event) + self.request_ceph_pool(event) + + def render_config(self, event): + """Render config and restart services if config files change.""" + if not self.ceph_client.pools_available: + logging.info("Defering setup") + event.defer() + return + + self.CEPH_GANESHA_PATH.mkdir( + exist_ok=True, + mode=0o750) + + def daemon_reload_and_restart(service_name): + subprocess.check_call(['systemctl', 'daemon-reload']) + subprocess.check_call(['systemctl', 'restart', service_name]) + + rfuncs = {} + + @ch_host.restart_on_change(self.RESTART_MAP, restart_functions=rfuncs) + def _render_configs(): + for config_file in self.RESTART_MAP.keys(): + ch_templating.render( + os.path.basename(config_file), + config_file, + self.adapters) + logging.info("Rendering config") + _render_configs() + logging.info("Setting started state") + self._stored.is_started = True + self.update_status() + logging.info("on_pools_available: status updated") + + # def custom_status_check(self): + # """Custom update status checks.""" + # if ch_host.is_container(): + # return ops.model.BlockedStatus( + # 'Charm cannot be deployed into a container') + # if self.peers.unit_count not in self.ALLOWED_UNIT_COUNTS: + # return ops.model.BlockedStatus( + # '{} is an invalid unit count'.format(self.peers.unit_count)) + # return ops.model.ActiveStatus() + + +@ops_openstack.core.charm_class +class CephNFSCharmOcto(CephNfsCharm): + """Ceph iSCSI Charm for Octopus.""" + + _stored = StoredState() + release = 'octopus' -if __name__ == "__main__": - main(CephNfsCharm) +if __name__ == '__main__': + main(ops_openstack.core.get_charm_class_for_release()) diff --git a/ceph-nfs/templates/ceph.conf b/ceph-nfs/templates/ceph.conf index dfd7431c..6bedad17 100644 --- a/ceph-nfs/templates/ceph.conf +++ b/ceph-nfs/templates/ceph.conf @@ -6,10 +6,10 @@ [global] auth supported = {{ ceph_client.auth_supported }} mon host = {{ ceph_client.mon_hosts }} -keyring = /etc/ceph/nfs/$cluster.$name.keyring +keyring = /etc/ceph/{{ options.application_name }}/$cluster.$name.keyring -[client.ceph-nfs] +[client.{{ options.application_name }}] client mount uid = 0 client mount gid = 0 -log file = /var/log/ceph/ceph-client.nfs.log - +log file = /var/log/ceph/ceph-client.{{ options.application_name }}.log +{% endif -%} diff --git a/ceph-nfs/templates/ganesha.conf b/ceph-nfs/templates/ganesha.conf new file mode 100644 index 00000000..6ab0e4a5 --- /dev/null +++ b/ceph-nfs/templates/ganesha.conf @@ -0,0 +1,186 @@ +# The following is copied from the Ganesha source examples: +# https://github.com/nfs-ganesha/nfs-ganesha/blob/576e3bafccb6da5c7ea18d7099013f7494ce8d2c/src/config_samples/ceph.conf +# +# It is possible to use FSAL_CEPH to provide an NFS gateway to CephFS. The +# following sample config should be useful as a starting point for +# configuration. This basic configuration is suitable for a standalone NFS +# server, or an active/passive configuration managed by some sort of clustering +# software (e.g. pacemaker, docker, etc.). +# +# Note too that it is also possible to put a config file in RADOS, and give +# ganesha a rados URL from which to fetch it. For instance, if the config +# file is stored in a RADOS pool called "nfs-ganesha", in a namespace called +# "ganesha-namespace" with an object name of "ganesha-config": +# +# %url rados://nfs-ganesha/ganesha-namespace/ganesha-config +# +# If we only export cephfs (or RGW), store the configs and recovery data in +# RADOS, and mandate NFSv4.1+ for access, we can avoid any sort of local +# storage, and ganesha can run as an unprivileged user (even inside a +# locked-down container). +# + +NFS_CORE_PARAM +{ + # Ganesha can lift the NFS grace period early if NLM is disabled. + Enable_NLM = false; + + # rquotad doesn't add any value here. CephFS doesn't support per-uid + # quotas anyway. + Enable_RQUOTA = false; + + # In this configuration, we're just exporting NFSv4. In practice, it's + # best to use NFSv4.1+ to get the benefit of sessions. + Protocols = 4; +} + +NFSv4 +{ + # Modern versions of libcephfs have delegation support, though they + # are not currently recommended in clustered configurations. They are + # disabled by default but can be reenabled for singleton or + # active/passive configurations. + # Delegations = false; + + # One can use any recovery backend with this configuration, but being + # able to store it in RADOS is a nice feature that makes it easy to + # migrate the daemon to another host. + # + # For a single-node or active/passive configuration, rados_ng driver + # is preferred. For active/active clustered configurations, the + # rados_cluster backend can be used instead. See the + # ganesha-rados-grace manpage for more information. + RecoveryBackend = rados_cluster; + + # NFSv4.0 clients do not send a RECLAIM_COMPLETE, so we end up having + # to wait out the entire grace period if there are any. Avoid them. + Minor_Versions = 1,2; +} + +# The libcephfs client will aggressively cache information while it +# can, so there is little benefit to ganesha actively caching the same +# objects. Doing so can also hurt cache coherency. Here, we disable +# as much attribute and directory caching as we can. +MDCACHE { + # Size the dirent cache down as small as possible. + Dir_Chunk = 0; +} + +EXPORT +{ + # Unique export ID number for this export + Export_ID=100; + + # We're only interested in NFSv4 in this configuration + Protocols = 4; + + # NFSv4 does not allow UDP transport + Transports = TCP; + + # + # Path into the cephfs tree. + # + # Note that FSAL_CEPH does not support subtree checking, so there is + # no way to validate that a filehandle presented by a client is + # reachable via an exported subtree. + # + # For that reason, we just export "/" here. + Path = /; + + # + # The pseudoroot path. This is where the export will appear in the + # NFS pseudoroot namespace. + # + Pseudo = /cephfs_a/; + + # We want to be able to read and write + Access_Type = RW; + + # Time out attribute cache entries immediately + Attr_Expiration_Time = 0; + + # Enable read delegations? libcephfs v13.0.1 and later allow the + # ceph client to set a delegation. While it's possible to allow RW + # delegations it's not recommended to enable them until ganesha + # acquires CB_GETATTR support. + # + # Note too that delegations may not be safe in clustered + # configurations, so it's probably best to just disable them until + # this problem is resolved: + # + # http://tracker.ceph.com/issues/24802 + # + # Delegations = R; + + # NFS servers usually decide to "squash" incoming requests from the + # root user to a "nobody" user. It's possible to disable that, but for + # now, we leave it enabled. + # Squash = root; + + FSAL { + # FSAL_CEPH export + Name = CEPH; + + # + # Ceph filesystems have a name string associated with them, and + # modern versions of libcephfs can mount them based on the + # name. The default is to mount the default filesystem in the + # cluster (usually the first one created). + # + # Filesystem = "cephfs_a"; + + # + # Ceph clusters have their own authentication scheme (cephx). + # Ganesha acts as a cephfs client. This is the client username + # to use. This user will need to be created before running + # ganesha. + # + # Typically ceph clients have a name like "client.foo". This + # setting should not contain the "client." prefix. + # + # See: + # + # http://docs.ceph.com/docs/jewel/rados/operations/user-management/ + # + # The default is to set this to NULL, which means that the + # userid is set to the default in libcephfs (which is + # typically "admin"). + # + User_Id = "{{ client_name }}"; + + # + # Key to use for the session (if any). If not set, it uses the + # normal search path for cephx keyring files to find a key: + # + # Secret_Access_Key = "YOUR SECRET KEY HERE"; + } +} + +# The below were taken from the Manila docs at +# https://docs.openstack.org/manila/queens/contributor/ganesha.html + +# To read exports from RADOS objects +RADOS_URLS { + ceph_conf = "/etc/ceph/ganesha/ceph.conf"; + userid = "{{ client_name }}"; +} + +%url rados://{{ pool_name }}/ganesha-export-index +# To store client recovery data in the same RADOS pool + +RADOS_KV { + ceph_conf = "/etc/ceph/ganesha/ceph.conf"; + userid = "{{ client_name }}"; + pool = {{ pool_name }}; +} + +# Config block for FSAL_CEPH +CEPH +{ + # Path to a ceph.conf file for this ceph cluster. + # Ceph_Conf = /etc/ceph/ceph.conf; + + # User file-creation mask. These bits will be masked off from the unix + # permissions on newly-created inodes. + # umask = 0; +} diff --git a/ceph-nfs/tests/bundles/focal-octopus.yaml b/ceph-nfs/tests/bundles/focal-octopus.yaml index bf8426aa..d49b9e34 100644 --- a/ceph-nfs/tests/bundles/focal-octopus.yaml +++ b/ceph-nfs/tests/bundles/focal-octopus.yaml @@ -19,7 +19,7 @@ applications: num_units: 3 options: monitor-count: '3' - excpected-osd-count: 6 + expected-osd-count: 6 relations: - - 'ceph-mon:client' - 'ceph-nfs:ceph-client' diff --git a/ceph-nfs/unit_tests/test_ceph_nfs_charm.py b/ceph-nfs/unit_tests/test_ceph_nfs_charm.py new file mode 100644 index 00000000..c9893159 --- /dev/null +++ b/ceph-nfs/unit_tests/test_ceph_nfs_charm.py @@ -0,0 +1,67 @@ +# Copyright 2021 OpenStack Charmers +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + + +import unittest +import sys + +sys.path.append('lib') # noqa +sys.path.append('src') # noqa + +from unittest.mock import patch, Mock + +from charm import CephNfsCharm +# from ops.model import ActiveStatus +from ops.testing import Harness + +with patch('charmhelpers.core.host_factory.ubuntu.cmp_pkgrevno', + Mock(return_value=1)): + import charm + + +class CharmTestCase(unittest.TestCase): + + def setUp(self, obj, patches): + super().setUp() + self.patches = patches + self.obj = obj + self.patch_all() + + def patch(self, method): + _m = patch.object(self.obj, method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def patch_all(self): + for method in self.patches: + setattr(self, method, self.patch(method)) + + +class _CephNfsCharm(CephNfsCharm): + + @staticmethod + def get_bluestore_compression(): + return {} + + +class TestCephNFSCharmBase(CharmTestCase): + + PATCHES = [ + 'ch_templating', + 'os', + 'subprocess', + ] + + def setUp(self): + super().setUp(charm, self.PATCHES) + self.harness = Harness( + _CephNfsCharm, + ) + self.addCleanup(self.harness.cleanup) + + def test_init(self): + self.harness.begin() + self.assertFalse(self.harness.charm._stored.is_started) diff --git a/ceph-nfs/unit_tests/test_charm.py b/ceph-nfs/unit_tests/test_charm.py deleted file mode 100644 index 2df5aa87..00000000 --- a/ceph-nfs/unit_tests/test_charm.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2021 OpenStack Charmers -# See LICENSE file for licensing details. -# -# Learn more about testing at: https://juju.is/docs/sdk/testing - - -import unittest -import sys - -sys.path.append('lib') # noqa -sys.path.append('src') # noqa - -from unittest.mock import Mock - -from charm import CephNfsCharm -from ops.model import ActiveStatus -from ops.testing import Harness - - -class TestCharm(unittest.TestCase): - def setUp(self): - self.harness = Harness(CephNfsCharm) - self.addCleanup(self.harness.cleanup) - self.harness.begin() - - def test_config_changed(self): - self.assertEqual(list(self.harness.charm._stored.things), []) - self.harness.update_config({"thing": "foo"}) - self.assertEqual(list(self.harness.charm._stored.things), ["foo"]) - - def test_action(self): - # the harness doesn't (yet!) help much with actions themselves - action_event = Mock(params={"fail": ""}) - self.harness.charm._on_fortune_action(action_event) - - self.assertTrue(action_event.set_results.called) - - def test_action_fail(self): - action_event = Mock(params={"fail": "fail this"}) - self.harness.charm._on_fortune_action(action_event) - - self.assertEqual(action_event.fail.call_args, [("fail this",)]) - - def test_httpbin_pebble_ready(self): - # Check the initial Pebble plan is empty - initial_plan = self.harness.get_container_pebble_plan("httpbin") - self.assertEqual(initial_plan.to_yaml(), "{}\n") - # Expected plan after Pebble ready with default config - expected_plan = { - "services": { - "httpbin": { - "override": "replace", - "summary": "httpbin", - "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", - "startup": "enabled", - "environment": {"thing": "ðŸŽ"}, - } - }, - } - # Get the httpbin container from the model - container = self.harness.model.unit.get_container("httpbin") - # Emit the PebbleReadyEvent carrying the httpbin container - self.harness.charm.on.httpbin_pebble_ready.emit(container) - # Get the plan now we've run PebbleReady - updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict() - # Check we've got the plan we expected - self.assertEqual(expected_plan, updated_plan) - # Check the service was started - service = self.harness.model.unit.get_container("httpbin").get_service("httpbin") - self.assertTrue(service.is_running()) - # Ensure we set an ActiveStatus with no message - self.assertEqual(self.harness.model.unit.status, ActiveStatus()) From 42580a097c7fdbe1cf8ea616d1d34026fa9c9f83 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Thu, 28 Oct 2021 15:19:42 -0400 Subject: [PATCH 2303/2699] Fix README, extraneous options line Change-Id: I8e5b48de0662c99fffca56a9a48111e75d9bc543 --- ceph-osd/README.md | 11 ++++------- ceph-osd/requirements.txt | 3 +++ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ceph-osd/README.md b/ceph-osd/README.md index 4139bb33..ea2d5798 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -75,8 +75,7 @@ configuration file. Block devices (regular), ceph-osd: - options: - osd-devices: /dev/vdb /dev/vdc /dev/vdd + osd-devices: /dev/vdb /dev/vdc /dev/vdd Each regular block device must be an absolute path to a device node. @@ -210,8 +209,7 @@ Jewel, set option `osd-encrypt` for the ceph-osd charm: ```yaml ceph-osd: - options: - osd-encrypt: True + osd-encrypt: True ``` Here, dm-crypt keys are stored in the MON sub-cluster. @@ -222,9 +220,8 @@ options `osd-encrypt` and `osd-encrypt-keymanager` for the ceph-osd charm: ```yaml ceph-osd: - options: - osd-encrypt: True - osd-encrypt-keymanager: vault + osd-encrypt: True + osd-encrypt-keymanager: vault ``` > **Important**: Post deployment configuration will only affect block devices diff --git a/ceph-osd/requirements.txt b/ceph-osd/requirements.txt index ead6e89a..10d37185 100644 --- a/ceph-osd/requirements.txt +++ b/ceph-osd/requirements.txt @@ -22,3 +22,6 @@ dnspython<2.0.0; python_version < '3.6' dnspython; python_version >= '3.6' psutil>=1.1.1,<2.0.0 + +# cffi 1.15.0 drops support for py35 +cffi==1.14.6; python_version < '3.6' From af1efcc116d254e0983254a1763770a892bc93ac Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 10 Nov 2021 12:23:20 +0000 Subject: [PATCH 2304/2699] Fix create_system_user so it returns creds Fix the create_system_user method so it returns the access_key and secret when a user is created. This patch also includes the following changes: * Improve logging of multisite methods to help with debugging issues. * Fix multisite relations in bundles. Func-Test-Pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/667 Closes-Bug: #1950329 Change-Id: I0528fe7f4a89c69f2790a0e472f6f43e23c2de19 --- ceph-radosgw/hooks/hooks.py | 26 +++++++++++++ ceph-radosgw/hooks/multisite.py | 2 +- ceph-radosgw/test-requirements.txt | 2 + .../bundles/bionic-queens-multisite.yaml | 4 +- .../tests/bundles/bionic-rocky-multisite.yaml | 4 +- ceph-radosgw/unit_tests/test_multisite.py | 39 +++++++++++++++++++ .../testdata/test_create_system_user.json | 38 ++++++++++++++++++ .../unit_tests/testdata/test_create_user.json | 38 ++++++++++++++++++ 8 files changed, 148 insertions(+), 5 deletions(-) create mode 100644 ceph-radosgw/unit_tests/testdata/test_create_system_user.json create mode 100644 ceph-radosgw/unit_tests/testdata/test_create_user.json diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index b10a0076..34290210 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -707,6 +707,10 @@ def master_relation_joined(relation_id=None): secret = leader_get('secret') if not all((realm, zonegroup, zone)): + log('Cannot setup multisite configuration, required config is ' + 'missing. realm, zonegroup and zone charm config options must all ' + 'be set', + 'WARN') return relation_set(relation_id=relation_id, @@ -717,9 +721,12 @@ def master_relation_joined(relation_id=None): secret=secret) if not is_leader(): + log('Cannot setup multisite configuration, this unit is not the ' + 'leader') return if not leader_get('restart_nonce'): + log('No restart_nonce found') # NOTE(jamespage): # This is an ugly kludge to force creation of the required data # items in the .rgw.root pool prior to the radosgw process being @@ -730,10 +737,12 @@ def master_relation_joined(relation_id=None): mutation = False if realm not in multisite.list_realms(): + log('Realm {} not found, creating now'.format(realm)) multisite.create_realm(realm, default=True) mutation = True if zonegroup not in multisite.list_zonegroups(): + log('Zonegroup {} not found, creating now'.format(zonegroup)) multisite.create_zonegroup(zonegroup, endpoints=endpoints, default=True, master=True, @@ -741,6 +750,7 @@ def master_relation_joined(relation_id=None): mutation = True if zone not in multisite.list_zones(): + log('Zone {} not found, creating now'.format(zone)) multisite.create_zone(zone, endpoints=endpoints, default=True, master=True, @@ -748,6 +758,7 @@ def master_relation_joined(relation_id=None): mutation = True if MULTISITE_SYSTEM_USER not in multisite.list_users(): + log('User {} not found, creating now'.format(MULTISITE_SYSTEM_USER)) access_key, secret = multisite.create_system_user( MULTISITE_SYSTEM_USER ) @@ -759,9 +770,14 @@ def master_relation_joined(relation_id=None): mutation = True if mutation: + log( + 'Mutation detected. Restarting {}.'.format(service_name()), + 'INFO') multisite.update_period() service_restart(service_name()) leader_set(restart_nonce=str(uuid.uuid4())) + else: + log('No mutation detected.', 'INFO') relation_set(relation_id=relation_id, access_key=access_key, @@ -771,6 +787,8 @@ def master_relation_joined(relation_id=None): @hooks.hook('slave-relation-changed') def slave_relation_changed(relation_id=None, unit=None): if not is_leader(): + log('Cannot setup multisite configuration, this unit is not the ' + 'leader') return if not ready_for_service(legacy=False): log('unit not ready, deferring multisite configuration') @@ -801,6 +819,7 @@ def slave_relation_changed(relation_id=None, unit=None): return if not leader_get('restart_nonce'): + log('No restart_nonce found') # NOTE(jamespage): # This is an ugly kludge to force creation of the required data # items in the .rgw.root pool prior to the radosgw process being @@ -811,6 +830,7 @@ def slave_relation_changed(relation_id=None, unit=None): mutation = False if realm not in multisite.list_realms(): + log('Realm {} not found, pulling now'.format(realm)) multisite.pull_realm(url=master_data['url'], access_key=master_data['access_key'], secret=master_data['secret']) @@ -821,6 +841,7 @@ def slave_relation_changed(relation_id=None, unit=None): mutation = True if zone not in multisite.list_zones(): + log('Zone {} not found, creating now'.format(zone)) multisite.create_zone(zone, endpoints=endpoints, default=False, master=False, @@ -830,9 +851,14 @@ def slave_relation_changed(relation_id=None, unit=None): mutation = True if mutation: + log( + 'Mutation detected. Restarting {}.'.format(service_name()), + 'INFO') multisite.update_period() service_restart(service_name()) leader_set(restart_nonce=str(uuid.uuid4())) + else: + log('No mutation detected.', 'INFO') @hooks.hook('leader-settings-changed') diff --git a/ceph-radosgw/hooks/multisite.py b/ceph-radosgw/hooks/multisite.py index df2638a3..4bf35a86 100644 --- a/ceph-radosgw/hooks/multisite.py +++ b/ceph-radosgw/hooks/multisite.py @@ -386,7 +386,7 @@ def create_system_user(username): :return: access key and secret :rtype: (str, str) """ - create_user(username, system_user=True) + return create_user(username, system_user=True) def pull_realm(url, access_key, secret): diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index dba2c767..f853625d 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -7,6 +7,8 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # +pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 requests>=2.18.4 diff --git a/ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml b/ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml index ca5686c9..c3ca68bc 100644 --- a/ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml +++ b/ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml @@ -73,5 +73,5 @@ relations: - west-ceph-mon:radosgw - - slave-ceph-radosgw:identity-service - keystone:identity-service -- - slave-ceph-radosgw:master - - ceph-radosgw:slave +- - slave-ceph-radosgw:slave + - ceph-radosgw:master diff --git a/ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml b/ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml index 97eb3f32..82f4b1fa 100644 --- a/ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml +++ b/ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml @@ -73,5 +73,5 @@ relations: - west-ceph-mon:radosgw - - slave-ceph-radosgw:identity-service - keystone:identity-service -- - slave-ceph-radosgw:master - - ceph-radosgw:slave +- - slave-ceph-radosgw:slave + - ceph-radosgw:master diff --git a/ceph-radosgw/unit_tests/test_multisite.py b/ceph-radosgw/unit_tests/test_multisite.py index 8825cedb..07cd07bb 100644 --- a/ceph-radosgw/unit_tests/test_multisite.py +++ b/ceph-radosgw/unit_tests/test_multisite.py @@ -78,6 +78,45 @@ def test_set_default_zone(self): '--rgw-realm=newrealm' ]) + def test_create_user(self): + with open(self._testdata(whoami()), 'rb') as f: + self.subprocess.check_output.return_value = f.read() + access_key, secret = multisite.create_user( + 'mrbees', + ) + self.assertEqual( + access_key, + '41JJQK1HN2NAE5DEZUF9') + self.assertEqual( + secret, + '1qhCgxmUDAJI9saFAVdvUTG5MzMjlpMxr5agaaa4') + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'user', 'create', + '--uid=mrbees', + '--display-name=Synchronization User', + ], stderr=mock.ANY) + + def test_create_system_user(self): + with open(self._testdata(whoami()), 'rb') as f: + self.subprocess.check_output.return_value = f.read() + access_key, secret = multisite.create_system_user( + 'mrbees', + ) + self.assertEqual( + access_key, + '41JJQK1HN2NAE5DEZUF9') + self.assertEqual( + secret, + '1qhCgxmUDAJI9saFAVdvUTG5MzMjlpMxr5agaaa4') + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'user', 'create', + '--uid=mrbees', + '--display-name=Synchronization User', + '--system' + ], stderr=mock.ANY) + def test_create_zonegroup(self): with open(self._testdata(whoami()), 'rb') as f: self.subprocess.check_output.return_value = f.read() diff --git a/ceph-radosgw/unit_tests/testdata/test_create_system_user.json b/ceph-radosgw/unit_tests/testdata/test_create_system_user.json new file mode 100644 index 00000000..83be1d99 --- /dev/null +++ b/ceph-radosgw/unit_tests/testdata/test_create_system_user.json @@ -0,0 +1,38 @@ +{ + "auid": 0, + "bucket_quota": { + "check_on_raw": false, + "enabled": false, + "max_objects": -1, + "max_size": -1, + "max_size_kb": 0 + }, + "caps": [], + "default_placement": "", + "display_name": "Synchronization User", + "email": "", + "keys": [ + { + "access_key": "41JJQK1HN2NAE5DEZUF9", + "secret_key": "1qhCgxmUDAJI9saFAVdvUTG5MzMjlpMxr5agaaa4", + "user": "mrbees" + } + ], + "max_buckets": 1000, + "op_mask": "read, write, delete", + "placement_tags": [], + "subusers": [], + "suspended": 0, + "swift_keys": [], + "system": "true", + "temp_url_keys": [], + "type": "rgw", + "user_id": "mrbees", + "user_quota": { + "check_on_raw": false, + "enabled": false, + "max_objects": -1, + "max_size": -1, + "max_size_kb": 0 + } +} diff --git a/ceph-radosgw/unit_tests/testdata/test_create_user.json b/ceph-radosgw/unit_tests/testdata/test_create_user.json new file mode 100644 index 00000000..83be1d99 --- /dev/null +++ b/ceph-radosgw/unit_tests/testdata/test_create_user.json @@ -0,0 +1,38 @@ +{ + "auid": 0, + "bucket_quota": { + "check_on_raw": false, + "enabled": false, + "max_objects": -1, + "max_size": -1, + "max_size_kb": 0 + }, + "caps": [], + "default_placement": "", + "display_name": "Synchronization User", + "email": "", + "keys": [ + { + "access_key": "41JJQK1HN2NAE5DEZUF9", + "secret_key": "1qhCgxmUDAJI9saFAVdvUTG5MzMjlpMxr5agaaa4", + "user": "mrbees" + } + ], + "max_buckets": 1000, + "op_mask": "read, write, delete", + "placement_tags": [], + "subusers": [], + "suspended": 0, + "swift_keys": [], + "system": "true", + "temp_url_keys": [], + "type": "rgw", + "user_id": "mrbees", + "user_quota": { + "check_on_raw": false, + "enabled": false, + "max_objects": -1, + "max_size": -1, + "max_size_kb": 0 + } +} From dd03e065bb987cea2951f0f1164aa730a1a7bd5b Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 10 Dec 2021 15:03:39 +0100 Subject: [PATCH 2305/2699] Stop setting default capabilities in get_named_key. When get_named_key is called on a keyring that has customised capabilities, it overwrites the capabilities to the default capabilities. Depends-On: I777650ae2194609a95e5cfb101e343824b3d0a34 Change-Id: I2930b32943fb75778cf3b56bb7f1d2a9f9c7c454 --- ceph-mon/lib/charms_ceph/utils.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index 025ab866..4f7ae865 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -1229,12 +1229,6 @@ def get_named_key(name, caps=None, pool_list=None): 'get', key_name, ]).decode('UTF-8')).strip() - # NOTE(jamespage); - # Apply any changes to key capabilities, dealing with - # upgrades which requires new caps for operation. - upgrade_key_caps(key_name, - caps or _default_caps, - pool_list) return parse_key(output) except subprocess.CalledProcessError: # Couldn't get the key, time to create it! From 206ce4ed59da1c132aac3f5a9130a13161bec0b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Mon, 8 Jun 2020 22:32:14 +0200 Subject: [PATCH 2306/2699] Use unittest.mock instead of mock The mock third party library was needed for mock support in py2 runtimes. Since we now only support py36 and later, we can use the standard lib unittest.mock module instead. Note that https://github.com/openstack/charms.openstack is used during tests and he need `mock`, unfortunatelly it doesn't declare `mock` in its requirements so it retrieve mock from other charm project (cross dependency). So we depend on charms.openstack first and when Ib1ed5b598a52375e29e247db9ab4786df5b6d142 will be merged then CI will pass without errors. Depends-On: Ib1ed5b598a52375e29e247db9ab4786df5b6d142 Change-Id: Ib658c7f61fe4aceafc1919e366d24ce81ec1dd63 --- ceph-osd/.zuul.yaml | 2 +- ceph-osd/test-requirements.txt | 6 +----- ceph-osd/unit_tests/test_actions_add_disk.py | 2 +- ceph-osd/unit_tests/test_actions_blacklist.py | 2 +- ceph-osd/unit_tests/test_actions_osd_out_in.py | 5 +++-- ceph-osd/unit_tests/test_actions_service.py | 2 +- ceph-osd/unit_tests/test_actions_zap_disk.py | 2 +- ceph-osd/unit_tests/test_ceph_hooks.py | 2 +- ceph-osd/unit_tests/test_ceph_utils.py | 2 +- ceph-osd/unit_tests/test_config.py | 2 +- ceph-osd/unit_tests/test_status.py | 4 ++-- ceph-osd/unit_tests/test_tuning.py | 2 +- ceph-osd/unit_tests/test_upgrade.py | 2 +- ceph-osd/unit_tests/test_utils.py | 2 +- 14 files changed, 17 insertions(+), 20 deletions(-) diff --git a/ceph-osd/.zuul.yaml b/ceph-osd/.zuul.yaml index fd20909e..0eed1965 100644 --- a/ceph-osd/.zuul.yaml +++ b/ceph-osd/.zuul.yaml @@ -1,4 +1,4 @@ - project: templates: - - openstack-python3-charm-jobs + - openstack-python3-ussuri-jobs - openstack-cover-jobs diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index f853625d..7ce4d17c 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -13,11 +13,7 @@ setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb requests>=2.18.4 -# Newer mock seems to have some syntax which is newer than python3.5 (e.g. -# f'{something}' -mock>=1.2,<4.0.0; python_version < '3.6' -mock>=1.2; python_version >= '3.6' - +flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 # Dependency of stestr. Workaround for diff --git a/ceph-osd/unit_tests/test_actions_add_disk.py b/ceph-osd/unit_tests/test_actions_add_disk.py index 0fdef53a..e29b99ef 100644 --- a/ceph-osd/unit_tests/test_actions_add_disk.py +++ b/ceph-osd/unit_tests/test_actions_add_disk.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock from actions import add_disk diff --git a/ceph-osd/unit_tests/test_actions_blacklist.py b/ceph-osd/unit_tests/test_actions_blacklist.py index a74e96fd..9484ccd5 100644 --- a/ceph-osd/unit_tests/test_actions_blacklist.py +++ b/ceph-osd/unit_tests/test_actions_blacklist.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock from charmhelpers.core import hookenv diff --git a/ceph-osd/unit_tests/test_actions_osd_out_in.py b/ceph-osd/unit_tests/test_actions_osd_out_in.py index 9c0a3790..5808adfe 100644 --- a/ceph-osd/unit_tests/test_actions_osd_out_in.py +++ b/ceph-osd/unit_tests/test_actions_osd_out_in.py @@ -11,11 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import mock -import subprocess +import subprocess import sys +from unittest import mock + from test_utils import CharmTestCase sys.path.append('hooks') diff --git a/ceph-osd/unit_tests/test_actions_service.py b/ceph-osd/unit_tests/test_actions_service.py index 57deafd3..5c3af854 100644 --- a/ceph-osd/unit_tests/test_actions_service.py +++ b/ceph-osd/unit_tests/test_actions_service.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +import unittest.mock as mock from contextlib import contextmanager from actions import service diff --git a/ceph-osd/unit_tests/test_actions_zap_disk.py b/ceph-osd/unit_tests/test_actions_zap_disk.py index 375b026f..00ce17f3 100644 --- a/ceph-osd/unit_tests/test_actions_zap_disk.py +++ b/ceph-osd/unit_tests/test_actions_zap_disk.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock from actions import zap_disk diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 7f8132cb..5d13f86c 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -17,7 +17,7 @@ import copy import unittest -from mock import patch, MagicMock, call +from unittest.mock import patch, MagicMock, call import charmhelpers.contrib.storage.linux.ceph as ceph diff --git a/ceph-osd/unit_tests/test_ceph_utils.py b/ceph-osd/unit_tests/test_ceph_utils.py index 1172f1a7..722558b2 100644 --- a/ceph-osd/unit_tests/test_ceph_utils.py +++ b/ceph-osd/unit_tests/test_ceph_utils.py @@ -15,7 +15,7 @@ import unittest -from mock import patch +from unittest.mock import patch with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: diff --git a/ceph-osd/unit_tests/test_config.py b/ceph-osd/unit_tests/test_config.py index fe5c1094..574a3021 100644 --- a/ceph-osd/unit_tests/test_config.py +++ b/ceph-osd/unit_tests/test_config.py @@ -18,7 +18,7 @@ import sys import test_utils -from mock import patch, MagicMock +from unittest.mock import patch, MagicMock # python-apt is not installed as part of test-requirements but is imported by # some charmhelpers modules so create a fake import. diff --git a/ceph-osd/unit_tests/test_status.py b/ceph-osd/unit_tests/test_status.py index d296eb17..433f92e6 100644 --- a/ceph-osd/unit_tests/test_status.py +++ b/ceph-osd/unit_tests/test_status.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock import test_utils -from mock import MagicMock, patch +from unittest.mock import MagicMock, patch with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: diff --git a/ceph-osd/unit_tests/test_tuning.py b/ceph-osd/unit_tests/test_tuning.py index bb0631f4..92e61d52 100644 --- a/ceph-osd/unit_tests/test_tuning.py +++ b/ceph-osd/unit_tests/test_tuning.py @@ -1,5 +1,5 @@ __author__ = 'Chris Holcombe ' -from mock import patch, call +from unittest.mock import patch, call import test_utils import charms_ceph.utils as ceph diff --git a/ceph-osd/unit_tests/test_upgrade.py b/ceph-osd/unit_tests/test_upgrade.py index efc2800e..dd3f223b 100644 --- a/ceph-osd/unit_tests/test_upgrade.py +++ b/ceph-osd/unit_tests/test_upgrade.py @@ -1,4 +1,4 @@ -from mock import call, patch +from unittest.mock import call, patch from test_utils import CharmTestCase from ceph_hooks import check_for_upgrade, notify_mon_of_upgrade diff --git a/ceph-osd/unit_tests/test_utils.py b/ceph-osd/unit_tests/test_utils.py index 639552e2..b8d41361 100644 --- a/ceph-osd/unit_tests/test_utils.py +++ b/ceph-osd/unit_tests/test_utils.py @@ -19,7 +19,7 @@ import yaml from contextlib import contextmanager -from mock import patch, MagicMock +from unittest.mock import patch, MagicMock # python-apt is not installed as part of test-requirements but is imported by # some charmhelpers modules so create a fake import. From b0d658fe740ab96008c4e1d85df62aabc23b7073 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Mon, 8 Jun 2020 22:33:10 +0200 Subject: [PATCH 2307/2699] Use unittest.mock instead of mock The mock third party library was needed for mock support in py2 runtimes. Since we now only support py36 and later, we can use the standard lib unittest.mock module instead. Note that https://github.com/openstack/charms.openstack is used during tests and he need `mock`, unfortunatelly it doesn't declare `mock` in its requirements so it retrieve mock from other charm project (cross dependency). So we depend on charms.openstack first and when Ib1ed5b598a52375e29e247db9ab4786df5b6d142 will be merged then CI will pass without errors. Depends-On: Ib1ed5b598a52375e29e247db9ab4786df5b6d142 Change-Id: I33cd69aca44f48af2766c92ede46ff12367160c5 --- ceph-proxy/.zuul.yaml | 1 - ceph-proxy/test-requirements.txt | 6 ------ ceph-proxy/unit_tests/test_ceph.py | 2 +- ceph-proxy/unit_tests/test_ceph_hooks.py | 2 +- ceph-proxy/unit_tests/test_utils.py | 2 +- 5 files changed, 3 insertions(+), 10 deletions(-) diff --git a/ceph-proxy/.zuul.yaml b/ceph-proxy/.zuul.yaml index 34184f1d..fd189e2f 100644 --- a/ceph-proxy/.zuul.yaml +++ b/ceph-proxy/.zuul.yaml @@ -1,4 +1,3 @@ - project: templates: - - python35-charm-jobs - openstack-python3-ussuri-jobs diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index f853625d..0d12fc2a 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -12,12 +12,6 @@ cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 requests>=2.18.4 - -# Newer mock seems to have some syntax which is newer than python3.5 (e.g. -# f'{something}' -mock>=1.2,<4.0.0; python_version < '3.6' -mock>=1.2; python_version >= '3.6' - stestr>=2.2.0 # Dependency of stestr. Workaround for diff --git a/ceph-proxy/unit_tests/test_ceph.py b/ceph-proxy/unit_tests/test_ceph.py index bd8d7097..211c61b6 100644 --- a/ceph-proxy/unit_tests/test_ceph.py +++ b/ceph-proxy/unit_tests/test_ceph.py @@ -2,7 +2,7 @@ import subprocess import unittest -import mock +from unittest import mock import ceph diff --git a/ceph-proxy/unit_tests/test_ceph_hooks.py b/ceph-proxy/unit_tests/test_ceph_hooks.py index 63dccdb5..2af966a1 100644 --- a/ceph-proxy/unit_tests/test_ceph_hooks.py +++ b/ceph-proxy/unit_tests/test_ceph_hooks.py @@ -1,4 +1,4 @@ -import mock +from unittest import mock import sys # python-apt is not installed as part of test-requirements but is imported by diff --git a/ceph-proxy/unit_tests/test_utils.py b/ceph-proxy/unit_tests/test_utils.py index ed0e7a1e..372b2d8a 100644 --- a/ceph-proxy/unit_tests/test_utils.py +++ b/ceph-proxy/unit_tests/test_utils.py @@ -4,7 +4,7 @@ import yaml from contextlib import contextmanager -from mock import patch, MagicMock +from unittest.mock import patch, MagicMock def load_config(): From 80b14ead68ff9f478c54fcf15086ca98824f4e04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Mon, 8 Jun 2020 22:30:42 +0200 Subject: [PATCH 2308/2699] Use unittest.mock instead of mock The mock third party library was needed for mock support in py2 runtimes. Since we now only support py36 and later, we can use the standard lib unittest.mock module instead. Note that https://github.com/openstack/charms.openstack is used during tests and he need `mock`, unfortunatelly it doesn't declare `mock` in its requirements so it retrieve mock from other charm project (cross dependency). So we depend on charms.openstack first and when Ib1ed5b598a52375e29e247db9ab4786df5b6d142 will be merged then CI will pass without errors. Depends-On: Ib1ed5b598a52375e29e247db9ab4786df5b6d142 Change-Id: If34c7c237f705b7b78261ccd0fc4a1655663300a --- ceph-fs/.zuul.yaml | 2 -- ceph-fs/test-requirements.txt | 5 ----- ceph-fs/unit_tests/test_actions.py | 2 +- ceph-fs/unit_tests/test_utils.py | 2 +- 4 files changed, 2 insertions(+), 9 deletions(-) diff --git a/ceph-fs/.zuul.yaml b/ceph-fs/.zuul.yaml index 650658d7..0eed1965 100644 --- a/ceph-fs/.zuul.yaml +++ b/ceph-fs/.zuul.yaml @@ -1,6 +1,4 @@ - project: templates: - - python35-charm-jobs - - openstack-python35-jobs - openstack-python3-ussuri-jobs - openstack-cover-jobs diff --git a/ceph-fs/test-requirements.txt b/ceph-fs/test-requirements.txt index bb1307f5..208032f0 100644 --- a/ceph-fs/test-requirements.txt +++ b/ceph-fs/test-requirements.txt @@ -28,11 +28,6 @@ oslo.utils<=3.41.0;python_version<'3.6' requests>=2.18.4 charms.reactive -# Newer mock seems to have some syntax which is newer than python3.5 (e.g. -# f'{something}' -mock>=1.2,<4.0.0; python_version < '3.6' -mock>=1.2; python_version >= '3.6' - nose>=1.3.7 coverage>=3.6 git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack diff --git a/ceph-fs/unit_tests/test_actions.py b/ceph-fs/unit_tests/test_actions.py index 347556e6..21649054 100644 --- a/ceph-fs/unit_tests/test_actions.py +++ b/ceph-fs/unit_tests/test_actions.py @@ -2,7 +2,7 @@ sys.path.append('src/actions') import unittest -from mock import patch, call, Mock +from unittest.mock import patch, call, Mock __author__ = 'Chris Holcombe ' diff --git a/ceph-fs/unit_tests/test_utils.py b/ceph-fs/unit_tests/test_utils.py index 781901e4..06d0b072 100644 --- a/ceph-fs/unit_tests/test_utils.py +++ b/ceph-fs/unit_tests/test_utils.py @@ -17,7 +17,7 @@ import os import yaml -from mock import patch +from unittest.mock import patch def load_config(): From f9f12f5438311afd29de37602a8dd479fe213743 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Mon, 8 Jun 2020 22:35:04 +0200 Subject: [PATCH 2309/2699] Use unittest.mock instead of mock The mock third party library was needed for mock support in py2 runtimes. Since we now only support py36 and later, we can use the standard lib unittest.mock module instead. Note that https://github.com/openstack/charms.openstack is used during tests and he need `mock`, unfortunatelly it doesn't declare `mock` in its requirements so it retrieve mock from other charm project (cross dependency). So we depend on charms.openstack first and when Ib1ed5b598a52375e29e247db9ab4786df5b6d142 will be merged then CI will pass without errors. Depends-On: Ib1ed5b598a52375e29e247db9ab4786df5b6d142 Change-Id: I6cf34358b919d4ab568a329395207676bd2e531d --- ceph-rbd-mirror/.zuul.yaml | 1 - ceph-rbd-mirror/test-requirements.txt | 5 ----- ceph-rbd-mirror/unit_tests/test_actions.py | 2 +- ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py | 2 +- .../unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py | 2 +- 5 files changed, 3 insertions(+), 9 deletions(-) diff --git a/ceph-rbd-mirror/.zuul.yaml b/ceph-rbd-mirror/.zuul.yaml index 34184f1d..fd189e2f 100644 --- a/ceph-rbd-mirror/.zuul.yaml +++ b/ceph-rbd-mirror/.zuul.yaml @@ -1,4 +1,3 @@ - project: templates: - - python35-charm-jobs - openstack-python3-ussuri-jobs diff --git a/ceph-rbd-mirror/test-requirements.txt b/ceph-rbd-mirror/test-requirements.txt index bb1307f5..208032f0 100644 --- a/ceph-rbd-mirror/test-requirements.txt +++ b/ceph-rbd-mirror/test-requirements.txt @@ -28,11 +28,6 @@ oslo.utils<=3.41.0;python_version<'3.6' requests>=2.18.4 charms.reactive -# Newer mock seems to have some syntax which is newer than python3.5 (e.g. -# f'{something}' -mock>=1.2,<4.0.0; python_version < '3.6' -mock>=1.2; python_version >= '3.6' - nose>=1.3.7 coverage>=3.6 git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack diff --git a/ceph-rbd-mirror/unit_tests/test_actions.py b/ceph-rbd-mirror/unit_tests/test_actions.py index d2349bf5..270eb1a3 100644 --- a/ceph-rbd-mirror/unit_tests/test_actions.py +++ b/ceph-rbd-mirror/unit_tests/test_actions.py @@ -14,7 +14,7 @@ import collections import json -import mock +from unittest import mock import sys sys.modules['charms.layer'] = mock.MagicMock() diff --git a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py index a5bf162c..3d5c5b28 100644 --- a/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py +++ b/ceph-rbd-mirror/unit_tests/test_ceph_rbd_mirror_handlers.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock import charm.openstack.ceph_rbd_mirror as crm import reactive.ceph_rbd_mirror_handlers as handlers diff --git a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py index 739e65ba..e0c16452 100644 --- a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py @@ -13,8 +13,8 @@ # limitations under the License. import collections -import mock import json +from unittest import mock import subprocess import charms_openstack.test_utils as test_utils From bce230e8be6c375c634b8250f54635ed703ba1a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Mon, 8 Jun 2020 22:34:09 +0200 Subject: [PATCH 2310/2699] Use unittest.mock instead of mock The mock third party library was needed for mock support in py2 runtimes. Since we now only support py36 and later, we can use the standard lib unittest.mock module instead. Note that https://github.com/openstack/charms.openstack is used during tests and he need `mock`, unfortunatelly it doesn't declare `mock` in its requirements so it retrieve mock from other charm project (cross dependency). So we depend on charms.openstack first and when Ib1ed5b598a52375e29e247db9ab4786df5b6d142 will be merged then CI will pass without errors. Depends-On: Ib1ed5b598a52375e29e247db9ab4786df5b6d142 Change-Id: If352ea32d18cd3d1d8bc5577a32c0397e1cb7e93 --- ceph-radosgw/.zuul.yaml | 1 - ceph-radosgw/test-requirements.txt | 5 ----- ceph-radosgw/unit_tests/test_actions.py | 4 ++-- ceph-radosgw/unit_tests/test_ceph.py | 2 +- ceph-radosgw/unit_tests/test_ceph_radosgw_context.py | 2 +- ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py | 2 +- ceph-radosgw/unit_tests/test_hooks.py | 2 +- ceph-radosgw/unit_tests/test_multisite.py | 2 +- ceph-radosgw/unit_tests/test_utils.py | 2 +- 9 files changed, 8 insertions(+), 14 deletions(-) diff --git a/ceph-radosgw/.zuul.yaml b/ceph-radosgw/.zuul.yaml index 34184f1d..fd189e2f 100644 --- a/ceph-radosgw/.zuul.yaml +++ b/ceph-radosgw/.zuul.yaml @@ -1,4 +1,3 @@ - project: templates: - - python35-charm-jobs - openstack-python3-ussuri-jobs diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index f853625d..0aabe171 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -13,11 +13,6 @@ setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb requests>=2.18.4 -# Newer mock seems to have some syntax which is newer than python3.5 (e.g. -# f'{something}' -mock>=1.2,<4.0.0; python_version < '3.6' -mock>=1.2; python_version >= '3.6' - stestr>=2.2.0 # Dependency of stestr. Workaround for diff --git a/ceph-radosgw/unit_tests/test_actions.py b/ceph-radosgw/unit_tests/test_actions.py index b88cd6a0..795da2a6 100644 --- a/ceph-radosgw/unit_tests/test_actions.py +++ b/ceph-radosgw/unit_tests/test_actions.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock -from mock import patch +from unittest import mock +from unittest.mock import patch from test_utils import CharmTestCase diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index 7c4e36a3..28e73402 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import patch, call +from unittest.mock import patch, call import ceph_rgw as ceph # noqa import utils # noqa diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 3f0decd2..3da43c0a 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import patch +from unittest.mock import patch import ceph_radosgw_context as context import charmhelpers.contrib.storage.linux.ceph as ceph diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index 60238a7b..9d39758f 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import ( +from unittest.mock import ( patch, MagicMock, ) diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 1d7c3730..87f37abc 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import ( +from unittest.mock import ( patch, call, MagicMock, ANY ) diff --git a/ceph-radosgw/unit_tests/test_multisite.py b/ceph-radosgw/unit_tests/test_multisite.py index 07cd07bb..6234a8dc 100644 --- a/ceph-radosgw/unit_tests/test_multisite.py +++ b/ceph-radosgw/unit_tests/test_multisite.py @@ -14,7 +14,7 @@ import inspect import os -import mock +from unittest import mock import multisite diff --git a/ceph-radosgw/unit_tests/test_utils.py b/ceph-radosgw/unit_tests/test_utils.py index 5be80ec7..a3feee41 100644 --- a/ceph-radosgw/unit_tests/test_utils.py +++ b/ceph-radosgw/unit_tests/test_utils.py @@ -17,7 +17,7 @@ import unittest import yaml -from mock import patch +from unittest.mock import patch def load_config(): From 421d27855dbe6f2c274770e1dcc1e9aff06e3c1c Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 16 Dec 2021 09:20:28 +0000 Subject: [PATCH 2311/2699] Use unittest.mock instead of mock The mock third party library was needed for mock support in py2 runtimes. Since we now only support py36 and later, we can use the standard lib unittest.mock module instead. Change-Id: Idffdcf1153821c3d9514f3410e5609ea8c99fe74 --- ceph-mon/.zuul.yaml | 1 - ceph-mon/test-requirements.txt | 5 ----- ceph-mon/unit_tests/test_action_change_osd_weight.py | 2 +- ceph-mon/unit_tests/test_action_list_inconsistent.py | 2 +- ceph-mon/unit_tests/test_action_list_pools.py | 12 ++++++++---- ceph-mon/unit_tests/test_action_purge_osd.py | 2 +- ceph-mon/unit_tests/test_actions_mon.py | 2 +- ceph-mon/unit_tests/test_ceph_actions.py | 2 +- ceph-mon/unit_tests/test_ceph_hooks.py | 2 +- ceph-mon/unit_tests/test_ceph_utils.py | 4 ++-- ceph-mon/unit_tests/test_check_ceph_status.py | 2 +- ceph-mon/unit_tests/test_status.py | 2 +- ceph-mon/unit_tests/test_upgrade.py | 2 +- ceph-mon/unit_tests/test_utils.py | 2 +- 14 files changed, 20 insertions(+), 22 deletions(-) diff --git a/ceph-mon/.zuul.yaml b/ceph-mon/.zuul.yaml index b3037e94..0eed1965 100644 --- a/ceph-mon/.zuul.yaml +++ b/ceph-mon/.zuul.yaml @@ -1,5 +1,4 @@ - project: templates: - - python35-charm-jobs - openstack-python3-ussuri-jobs - openstack-cover-jobs diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index f853625d..0aabe171 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -13,11 +13,6 @@ setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb requests>=2.18.4 -# Newer mock seems to have some syntax which is newer than python3.5 (e.g. -# f'{something}' -mock>=1.2,<4.0.0; python_version < '3.6' -mock>=1.2; python_version >= '3.6' - stestr>=2.2.0 # Dependency of stestr. Workaround for diff --git a/ceph-mon/unit_tests/test_action_change_osd_weight.py b/ceph-mon/unit_tests/test_action_change_osd_weight.py index d3ce3ff4..fbe26bd7 100644 --- a/ceph-mon/unit_tests/test_action_change_osd_weight.py +++ b/ceph-mon/unit_tests/test_action_change_osd_weight.py @@ -14,7 +14,7 @@ """Tests for reweight_osd action.""" from actions import change_osd_weight as action -from mock import mock +import unittest.mock as mock from test_utils import CharmTestCase diff --git a/ceph-mon/unit_tests/test_action_list_inconsistent.py b/ceph-mon/unit_tests/test_action_list_inconsistent.py index 6f006ce6..6a3694cf 100644 --- a/ceph-mon/unit_tests/test_action_list_inconsistent.py +++ b/ceph-mon/unit_tests/test_action_list_inconsistent.py @@ -14,7 +14,7 @@ """Tests for the list_inconsistent_objs action.""" from actions import list_inconsistent_objs as action -from mock import mock +import unittest.mock as mock from test_utils import CharmTestCase diff --git a/ceph-mon/unit_tests/test_action_list_pools.py b/ceph-mon/unit_tests/test_action_list_pools.py index f42ba439..2491085d 100644 --- a/ceph-mon/unit_tests/test_action_list_pools.py +++ b/ceph-mon/unit_tests/test_action_list_pools.py @@ -107,9 +107,13 @@ def test_getting_list_pools_without_details(self): def test_getting_list_pools_with_details(self): """Test getting list of pools with details.""" + self.pools = None + + def _function_set(message): + self.pools = json.loads(message['message']) + self.function_set.side_effect = _function_set list_pools.main() self.function_get.assert_called_once_with("format") - pools = json.loads(self.function_set.call_args.args[0]["message"]) - self.assertEqual(pools[0]["pool"], 1) - self.assertEqual(pools[0]["size"], 3) - self.assertEqual(pools[0]["min_size"], 2) + self.assertEqual(self.pools[0]["pool"], 1) + self.assertEqual(self.pools[0]["size"], 3) + self.assertEqual(self.pools[0]["min_size"], 2) diff --git a/ceph-mon/unit_tests/test_action_purge_osd.py b/ceph-mon/unit_tests/test_action_purge_osd.py index 64d4f6fd..2146f9e8 100644 --- a/ceph-mon/unit_tests/test_action_purge_osd.py +++ b/ceph-mon/unit_tests/test_action_purge_osd.py @@ -14,7 +14,7 @@ """Tests for purge_osd action.""" from actions import purge_osd as action -from mock import mock +import unittest.mock as mock from test_utils import CharmTestCase diff --git a/ceph-mon/unit_tests/test_actions_mon.py b/ceph-mon/unit_tests/test_actions_mon.py index fb749bf9..edbb4561 100644 --- a/ceph-mon/unit_tests/test_actions_mon.py +++ b/ceph-mon/unit_tests/test_actions_mon.py @@ -12,7 +12,7 @@ # limitations under the License. import json import sys -from mock import mock +import unittest.mock as mock from test_utils import CharmTestCase diff --git a/ceph-mon/unit_tests/test_ceph_actions.py b/ceph-mon/unit_tests/test_ceph_actions.py index c54830fa..81f31e7c 100644 --- a/ceph-mon/unit_tests/test_ceph_actions.py +++ b/ceph-mon/unit_tests/test_ceph_actions.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +import unittest.mock as mock import subprocess import test_utils diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 707ee52c..59a58a52 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -3,7 +3,7 @@ import unittest import sys -from mock import patch, MagicMock, DEFAULT, call +from unittest.mock import patch, MagicMock, DEFAULT, call # python-apt is not installed as part of test-requirements but is imported by # some charmhelpers modules so create a fake import. diff --git a/ceph-mon/unit_tests/test_ceph_utils.py b/ceph-mon/unit_tests/test_ceph_utils.py index 0ea4c626..4332c71a 100644 --- a/ceph-mon/unit_tests/test_ceph_utils.py +++ b/ceph-mon/unit_tests/test_ceph_utils.py @@ -12,7 +12,7 @@ # limitations under the License. import json -import mock +import unittest.mock as mock import test_utils @@ -250,7 +250,7 @@ def test_set_require_osd_release_success(self, log, check_call): def test_set_require_osd_release_raise_call_error(self, log, check_call): release = 'luminous' check_call.side_effect = utils.subprocess.CalledProcessError( - 0, mock.mock.MagicMock() + 0, mock.MagicMock() ) expected_call = mock.call( ['ceph', 'osd', 'require-osd-release', release] diff --git a/ceph-mon/unit_tests/test_check_ceph_status.py b/ceph-mon/unit_tests/test_check_ceph_status.py index b7b06155..5342ce55 100644 --- a/ceph-mon/unit_tests/test_check_ceph_status.py +++ b/ceph-mon/unit_tests/test_check_ceph_status.py @@ -16,7 +16,7 @@ import os import sys -from mock import patch +from unittest.mock import patch # import the module we want to test os.sys.path.insert(1, os.path.join(sys.path[0], 'files/nagios')) diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index 71b845eb..ff181e16 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +import unittest.mock as mock import sys import test_utils diff --git a/ceph-mon/unit_tests/test_upgrade.py b/ceph-mon/unit_tests/test_upgrade.py index f784f7cb..f60bb43b 100644 --- a/ceph-mon/unit_tests/test_upgrade.py +++ b/ceph-mon/unit_tests/test_upgrade.py @@ -1,4 +1,4 @@ -from mock import patch +from unittest.mock import patch from ceph_hooks import check_for_upgrade from test_utils import CharmTestCase diff --git a/ceph-mon/unit_tests/test_utils.py b/ceph-mon/unit_tests/test_utils.py index 83fe5ae2..ce139de5 100644 --- a/ceph-mon/unit_tests/test_utils.py +++ b/ceph-mon/unit_tests/test_utils.py @@ -17,7 +17,7 @@ import os import yaml -from mock import patch +from unittest.mock import patch def load_config(): From f5845159cca4954faa9a38dfc421a257f2894f3e Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 16 Dec 2021 09:26:42 +0000 Subject: [PATCH 2312/2699] Use unittest.mock instead of mock The mock third party library was needed for mock support in py2 runtimes. Since we now only support py36 and later, we can use the standard lib unittest.mock module instead. Change-Id: If62bed2598c42b861a11ceab21e9f1ab0c0cf9dd --- ceph-dashboard/.zuul.yaml | 2 +- ceph-dashboard/osci.yaml | 5 ++--- ceph-dashboard/test-requirements.txt | 1 - ceph-dashboard/tox.ini | 5 +++++ ceph-dashboard/unit_tests/__init__.py | 2 +- ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py | 2 +- 6 files changed, 10 insertions(+), 7 deletions(-) diff --git a/ceph-dashboard/.zuul.yaml b/ceph-dashboard/.zuul.yaml index fd20909e..0eed1965 100644 --- a/ceph-dashboard/.zuul.yaml +++ b/ceph-dashboard/.zuul.yaml @@ -1,4 +1,4 @@ - project: templates: - - openstack-python3-charm-jobs + - openstack-python3-ussuri-jobs - openstack-cover-jobs diff --git a/ceph-dashboard/osci.yaml b/ceph-dashboard/osci.yaml index 46cf1eca..2088973f 100644 --- a/ceph-dashboard/osci.yaml +++ b/ceph-dashboard/osci.yaml @@ -1,6 +1,6 @@ - project: templates: - - charm-unit-jobs + - charm-yoga-unit-jobs check: jobs: - bionic-octopus @@ -15,10 +15,9 @@ parent: func-target dependencies: - osci-lint - - tox-py35 - tox-py36 - - tox-py37 - tox-py38 + - tox-py39 vars: tox_extra_args: focal - job: diff --git a/ceph-dashboard/test-requirements.txt b/ceph-dashboard/test-requirements.txt index fb837fb3..90c36911 100644 --- a/ceph-dashboard/test-requirements.txt +++ b/ceph-dashboard/test-requirements.txt @@ -3,7 +3,6 @@ # requirements management in charms via bot-control. Thank you. charm-tools>=2.4.4 coverage>=3.6 -mock>=1.2 flake8>=2.2.4,<=2.4.1 pyflakes==2.1.1 stestr>=2.2.0 diff --git a/ceph-dashboard/tox.ini b/ceph-dashboard/tox.ini index 4ca4d73a..79ddb1aa 100644 --- a/ceph-dashboard/tox.ini +++ b/ceph-dashboard/tox.ini @@ -57,6 +57,11 @@ basepython = python3.8 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py39] +basepython = python3.9 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt diff --git a/ceph-dashboard/unit_tests/__init__.py b/ceph-dashboard/unit_tests/__init__.py index adce0b68..59620d3a 100644 --- a/ceph-dashboard/unit_tests/__init__.py +++ b/ceph-dashboard/unit_tests/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. import sys -import mock +import unittest.mock as mock # Mock out secrets to make py35 happy. sys.modules['secrets'] = mock.MagicMock() diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py index b58825ff..5542120e 100644 --- a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -22,7 +22,7 @@ sys.path.append('lib') # noqa sys.path.append('src') # noqa -from mock import ANY, call, patch, MagicMock +from unittest.mock import ANY, call, patch, MagicMock from ops.testing import Harness, _TestingModelBackend from ops.model import ( From 5b5c15bb3d7969bf5612396c96570518aedd7508 Mon Sep 17 00:00:00 2001 From: James Troup Date: Tue, 16 Nov 2021 21:57:48 +0000 Subject: [PATCH 2313/2699] Spelling fixes from codespell with additional consistency cleanups. Includes a resync of charms.ceph. Change-Id: I281fe17acaea826b79bfa902a8d9e7a8b59482a9 --- ceph-osd/hooks/ceph_hooks.py | 14 +-- ceph-osd/lib/charms_ceph/utils.py | 190 +++++++++++++++--------------- ceph-osd/unit_tests/test_utils.py | 4 +- 3 files changed, 101 insertions(+), 107 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 0e49e049..fb30f221 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright 2016 Canonical Ltd +# Copyright 2016-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -198,7 +198,7 @@ def tune_network_adapters(): def aa_profile_changed(service_name='ceph-osd-all'): """ - Reload AA profie and restart OSD processes. + Reload AA profile and restart OSD processes. """ log("Loading new AppArmor profile") service_reload('apparmor') @@ -372,7 +372,7 @@ def get_ceph_context(upgrading=False): """Returns the current context dictionary for generating ceph.conf :param upgrading: bool - determines if the context is invoked as - part of an upgrade proedure Setting this to true + part of an upgrade procedure. Setting this to true causes settings useful during an upgrade to be defined in the ceph.conf file """ @@ -471,7 +471,7 @@ def config_changed(): # Check if an upgrade was requested check_for_upgrade() - # Pre-flight checks + # Preflight checks if config('osd-format') not in ceph.DISK_FORMATS: log('Invalid OSD disk format configuration specified', level=ERROR) sys.exit(1) @@ -745,9 +745,9 @@ def update_nrpe_config(): # whether ceph is okay, the check_systemd.py or 'status ceph-osd' still # needs to be called with the contents of ../osd/ceph-*/whoami files. To # get around this conundrum, instead a cron.d job that runs as root will - # perform the checks every minute, and write to a tempory file the results, - # and the nrpe check will grep this file and error out (return 2) if the - # first 3 characters of a line are not 'OK:'. + # perform the checks every minute, and write to a temporary file the + # results, and the nrpe check will grep this file and error out (return 2) + # if the first 3 characters of a line are not 'OK:'. cmd = ('MAILTO=""\n' '* * * * * root ' diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 025ab866..de917a08 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -1,4 +1,4 @@ -# Copyright 2017 Canonical Ltd +# Copyright 2017-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -293,7 +293,7 @@ def get_link_speed(network_interface): def persist_settings(settings_dict): # Write all settings to /etc/hdparm.conf - """ This will persist the hard drive settings to the /etc/hdparm.conf file + """This will persist the hard drive settings to the /etc/hdparm.conf file The settings_dict should be in the form of {"uuid": {"key":"value"}} @@ -552,7 +552,7 @@ def get_osd_weight(osd_id): :returns: Float :raises: ValueError if the monmap fails to parse. - :raises: CalledProcessError if our ceph command fails. + :raises: CalledProcessError if our Ceph command fails. """ try: tree = str(subprocess @@ -560,7 +560,7 @@ def get_osd_weight(osd_id): .decode('UTF-8')) try: json_tree = json.loads(tree) - # Make sure children are present in the json + # Make sure children are present in the JSON if not json_tree['nodes']: return None for device in json_tree['nodes']: @@ -619,12 +619,12 @@ def _flatten_roots(nodes, lookup_type='host'): def get_osd_tree(service): - """Returns the current osd map in JSON. + """Returns the current OSD map in JSON. :returns: List. :rtype: List[CrushLocation] :raises: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails + Also raises CalledProcessError if our Ceph command fails """ try: tree = str(subprocess @@ -666,12 +666,12 @@ def _get_child_dirs(path): def _get_osd_num_from_dirname(dirname): """Parses the dirname and returns the OSD id. - Parses a string in the form of 'ceph-{osd#}' and returns the osd number + Parses a string in the form of 'ceph-{osd#}' and returns the OSD number from the directory name. :param dirname: the directory name to return the OSD number from - :return int: the osd number the directory name corresponds to - :raises ValueError: if the osd number cannot be parsed from the provided + :return int: the OSD number the directory name corresponds to + :raises ValueError: if the OSD number cannot be parsed from the provided directory name. """ match = re.search(r'ceph-(?P\d+)', dirname) @@ -686,7 +686,7 @@ def get_local_osd_ids(): to split the ID off of the directory name and return it in a list. - :returns: list. A list of osd identifiers + :returns: list. A list of OSD identifiers :raises: OSError if something goes wrong with listing the directory. """ osd_ids = [] @@ -875,12 +875,12 @@ def add_bootstrap_hint(peer): ] CEPH_PARTITIONS = [ - '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # ceph encrypted disk in creation - '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # ceph encrypted journal - '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data - '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data - '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal - '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # ceph disk in creation + '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # Ceph encrypted disk in creation + '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # Ceph encrypted journal + '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # Ceph encrypted OSD data + '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # Ceph OSD data + '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # Ceph OSD journal + '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # Ceph disk in creation ] @@ -984,7 +984,7 @@ def is_osd_disk(dev): def start_osds(devices): - # Scan for ceph block devices + # Scan for Ceph block devices rescan_osd_devices() if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and cmp_pkgrevno('ceph', '14.2.0') < 0): @@ -1229,12 +1229,6 @@ def get_named_key(name, caps=None, pool_list=None): 'get', key_name, ]).decode('UTF-8')).strip() - # NOTE(jamespage); - # Apply any changes to key capabilities, dealing with - # upgrades which requires new caps for operation. - upgrade_key_caps(key_name, - caps or _default_caps, - pool_list) return parse_key(output) except subprocess.CalledProcessError: # Couldn't get the key, time to create it! @@ -1270,7 +1264,7 @@ def get_named_key(name, caps=None, pool_list=None): def upgrade_key_caps(key, caps, pool_list=None): - """ Upgrade key to have capabilities caps """ + """Upgrade key to have capabilities caps""" if not is_leader(): # Not the MON leader OR not clustered return @@ -1304,11 +1298,11 @@ def use_bluestore(): def bootstrap_monitor_cluster(secret): - """Bootstrap local ceph mon into the ceph cluster + """Bootstrap local Ceph mon into the Ceph cluster :param secret: cephx secret to use for monitor authentication :type secret: str - :raises: Exception if ceph mon cannot be bootstrapped + :raises: Exception if Ceph mon cannot be bootstrapped """ hostname = socket.gethostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) @@ -1351,11 +1345,11 @@ def _create_monitor(keyring, secret, hostname, path, done, init_marker): :type: secret: str :param hostname: hostname of the local unit :type hostname: str - :param path: full path to ceph mon directory + :param path: full path to Ceph mon directory :type path: str - :param done: full path to 'done' marker for ceph mon + :param done: full path to 'done' marker for Ceph mon :type done: str - :param init_marker: full path to 'init' marker for ceph mon + :param init_marker: full path to 'init' marker for Ceph mon :type init_marker: str """ subprocess.check_call(['ceph-authtool', keyring, @@ -1415,13 +1409,13 @@ def create_keyrings(): owner=ceph_user(), group=ceph_user(), perms=0o400) else: - # NOTE(jamespage): Later ceph releases require explicit + # NOTE(jamespage): Later Ceph releases require explicit # call to ceph-create-keys to setup the # admin keys for the cluster; this command # will wait for quorum in the cluster before # returning. # NOTE(fnordahl): Explicitly run `ceph-create-keys` for older - # ceph releases too. This improves bootstrap + # Ceph releases too. This improves bootstrap # resilience as the charm will wait for # presence of peer units before attempting # to bootstrap. Note that charms deploying @@ -1503,9 +1497,9 @@ def find_least_used_utility_device(utility_devices, lvs=False): def get_devices(name): - """ Merge config and juju storage based devices + """Merge config and Juju storage based devices - :name: THe name of the device type, eg: wal, osd, journal + :name: The name of the device type, e.g.: wal, osd, journal :returns: Set(device names), which are strings """ if config(name): @@ -1547,7 +1541,7 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, :param: ignore_errors: Don't fail in the event of any errors during processing :param: encrypt: Encrypt block devices using 'key_manager' - :param: bluestore: Use bluestore native ceph block device format + :param: bluestore: Use bluestore native Ceph block device format :param: key_manager: Key management approach for encryption keys :raises subprocess.CalledProcessError: in the event that any supporting subprocess operation failed @@ -1840,7 +1834,7 @@ def get_conf(variable): Get the value of the given configuration variable from the cluster. - :param variable: ceph configuration variable + :param variable: Ceph configuration variable :returns: str. configured value for provided variable """ @@ -1860,7 +1854,7 @@ def calculate_volume_size(lv_type): :raises KeyError: if invalid lv_type is supplied :returns: int. Configured size in megabytes for volume type """ - # lv_type -> ceph configuration option + # lv_type -> Ceph configuration option _config_map = { 'db': 'bluestore_block_db_size', 'wal': 'bluestore_block_wal_size', @@ -1874,7 +1868,7 @@ def calculate_volume_size(lv_type): 'journal': 1024, } - # conversion of ceph config units to MB + # conversion of Ceph config units to MB _units = { 'db': 1048576, # Bytes -> MB 'wal': 1048576, # Bytes -> MB @@ -1907,7 +1901,7 @@ def _luks_uuid(dev): def _initialize_disk(dev, dev_uuid, encrypt=False, key_manager=CEPH_KEY_MANAGER): """ - Initialize a raw block device consuming 100% of the avaliable + Initialize a raw block device consuming 100% of the available disk space. Function assumes that block device has already been wiped. @@ -2004,7 +1998,7 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid, def osdize_dir(path, encrypt=False, bluestore=False): - """Ask ceph-disk to prepare a directory to become an osd. + """Ask ceph-disk to prepare a directory to become an OSD. :param path: str. The directory to osdize :param encrypt: bool. Should the OSD directory be encrypted at rest @@ -2074,11 +2068,11 @@ def get_running_osds(): def get_cephfs(service): """List the Ceph Filesystems that exist. - :param service: The service name to run the ceph command under - :returns: list. Returns a list of the ceph filesystems + :param service: The service name to run the Ceph command under + :returns: list. Returns a list of the Ceph filesystems """ if get_version() < 0.86: - # This command wasn't introduced until 0.86 ceph + # This command wasn't introduced until 0.86 Ceph return [] try: output = str(subprocess @@ -2157,7 +2151,7 @@ def roll_monitor_cluster(new_version, upgrade_key): sys.exit(1) log('monitor_list: {}'.format(monitor_list)) - # A sorted list of osd unit names + # A sorted list of OSD unit names mon_sorted_list = sorted(monitor_list) # Install packages immediately but defer restarts to when it's our time. @@ -2204,7 +2198,7 @@ def noop(): def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): - """Upgrade the current ceph monitor to the new version + """Upgrade the current Ceph monitor to the new version :param new_version: String version to upgrade to. """ @@ -2212,7 +2206,7 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): kick_function = noop current_version = get_version() status_set("maintenance", "Upgrading monitor") - log("Current ceph version is {}".format(current_version)) + log("Current Ceph version is {}".format(current_version)) log("Upgrading to: {}".format(new_version)) # Needed to determine if whether to stop/start ceph-mgr @@ -2223,7 +2217,7 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): add_source(config('source'), config('key')) apt_update(fatal=True) except subprocess.CalledProcessError as err: - log("Adding the ceph source failed with message: {}".format( + log("Adding the Ceph source failed with message: {}".format( err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2294,7 +2288,7 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): def lock_and_roll(upgrade_key, service, my_name, version): - """Create a lock on the ceph monitor cluster and upgrade. + """Create a lock on the Ceph monitor cluster and upgrade. :param upgrade_key: str. The cephx key to use :param service: str. The cephx id to use @@ -2443,7 +2437,7 @@ class WatchDog(object): allow for other delays. There is a compatibility mode where if the otherside never kicks, then it - simply waits for the compatability timer. + simply waits for the compatibility timer. """ class WatchDogDeadException(Exception): @@ -2578,11 +2572,11 @@ def timed_wait(kicked_at_function, def get_upgrade_position(osd_sorted_list, match_name): - """Return the upgrade position for the given osd. + """Return the upgrade position for the given OSD. - :param osd_sorted_list: Osds sorted + :param osd_sorted_list: OSDs sorted :type osd_sorted_list: [str] - :param match_name: The osd name to match + :param match_name: The OSD name to match :type match_name: str :returns: The position of the name :rtype: int @@ -2591,20 +2585,20 @@ def get_upgrade_position(osd_sorted_list, match_name): for index, item in enumerate(osd_sorted_list): if item.name == match_name: return index - raise ValueError("osd name '{}' not found in get_upgrade_position list" + raise ValueError("OSD name '{}' not found in get_upgrade_position list" .format(match_name)) # Edge cases: # 1. Previous node dies on upgrade, can we retry? -# 2. This assumes that the osd failure domain is not set to osd. +# 2. This assumes that the OSD failure domain is not set to OSD. # It rolls an entire server at a time. def roll_osd_cluster(new_version, upgrade_key): """This is tricky to get right so here's what we're going to do. There's 2 possible cases: Either I'm first in line or not. If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous osd is upgraded yet. + and test to see if the previous OSD is upgraded yet. TODO: If you're not in the same failure domain it's safe to upgrade 1. Examine all pools and adopt the most strict failure domain policy @@ -2620,7 +2614,7 @@ def roll_osd_cluster(new_version, upgrade_key): log('roll_osd_cluster called with {}'.format(new_version)) my_name = socket.gethostname() osd_tree = get_osd_tree(service=upgrade_key) - # A sorted list of osd unit names + # A sorted list of OSD unit names osd_sorted_list = sorted(osd_tree) log("osd_sorted_list: {}".format(osd_sorted_list)) @@ -2655,7 +2649,7 @@ def roll_osd_cluster(new_version, upgrade_key): def upgrade_osd(new_version, kick_function=None): - """Upgrades the current osd + """Upgrades the current OSD :param new_version: str. The new version to upgrade to """ @@ -2663,15 +2657,15 @@ def upgrade_osd(new_version, kick_function=None): kick_function = noop current_version = get_version() - status_set("maintenance", "Upgrading osd") - log("Current ceph version is {}".format(current_version)) + status_set("maintenance", "Upgrading OSD") + log("Current Ceph version is {}".format(current_version)) log("Upgrading to: {}".format(new_version)) try: add_source(config('source'), config('key')) apt_update(fatal=True) except subprocess.CalledProcessError as err: - log("Adding the ceph sources failed with message: {}".format( + log("Adding the Ceph sources failed with message: {}".format( err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2685,7 +2679,7 @@ def upgrade_osd(new_version, kick_function=None): kick_function() # If the upgrade does not need an ownership update of any of the - # directories in the osd service directory, then simply restart + # directories in the OSD service directory, then simply restart # all of the OSDs at the same time as this will be the fastest # way to update the code on the node. if not dirs_need_ownership_update('osd'): @@ -2700,7 +2694,7 @@ def upgrade_osd(new_version, kick_function=None): # Need to change the ownership of all directories which are not OSD # directories as well. # TODO - this should probably be moved to the general upgrade function - # and done before mon/osd. + # and done before mon/OSD. update_owner(CEPH_BASE_DIR, recurse_dirs=False) non_osd_dirs = filter(lambda x: not x == 'osd', os.listdir(CEPH_BASE_DIR)) @@ -2721,12 +2715,12 @@ def upgrade_osd(new_version, kick_function=None): _upgrade_single_osd(osd_num, osd_dir) except ValueError as ex: # Directory could not be parsed - junk directory? - log('Could not parse osd directory %s: %s' % (osd_dir, ex), + log('Could not parse OSD directory %s: %s' % (osd_dir, ex), WARNING) continue except (subprocess.CalledProcessError, IOError) as err: - log("Stopping ceph and upgrading packages failed " + log("Stopping Ceph and upgrading packages failed " "with message: {}".format(err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2753,7 +2747,7 @@ def _upgrade_single_osd(osd_num, osd_dir): def stop_osd(osd_num): """Stops the specified OSD number. - :param osd_num: the osd number to stop + :param osd_num: the OSD number to stop """ if systemd(): service_stop('ceph-osd@{}'.format(osd_num)) @@ -2764,7 +2758,7 @@ def stop_osd(osd_num): def start_osd(osd_num): """Starts the specified OSD number. - :param osd_num: the osd number to start. + :param osd_num: the OSD number to start. """ if systemd(): service_start('ceph-osd@{}'.format(osd_num)) @@ -2775,12 +2769,12 @@ def start_osd(osd_num): def disable_osd(osd_num): """Disables the specified OSD number. - Ensures that the specified osd will not be automatically started at the + Ensures that the specified OSD will not be automatically started at the next reboot of the system. Due to differences between init systems, - this method cannot make any guarantees that the specified osd cannot be + this method cannot make any guarantees that the specified OSD cannot be started manually. - :param osd_num: the osd id which should be disabled. + :param osd_num: the OSD id which should be disabled. :raises CalledProcessError: if an error occurs invoking the systemd cmd to disable the OSD :raises IOError, OSError: if the attempt to read/remove the ready file in @@ -2820,7 +2814,7 @@ def enable_osd(osd_num): :param osd_num: the osd id which should be enabled. :raises CalledProcessError: if the call to the systemd command issued fails when enabling the service - :raises IOError: if the attempt to write the ready file in an usptart + :raises IOError: if the attempt to write the ready file in an upstart enabled system fails """ if systemd(): @@ -2828,7 +2822,7 @@ def enable_osd(osd_num): subprocess.check_call(cmd) else: # When running on upstart, the OSDs are started via the ceph-osd-all - # upstart script which will only start the osd if it has a 'ready' + # upstart script which will only start the OSD if it has a 'ready' # file. Make sure that file exists. ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), 'ready') @@ -2881,7 +2875,7 @@ def get_osd_state(osd_num, osd_goal_state=None): If osd_goal_state is not None, loop until the current OSD state matches the OSD goal state. - :param osd_num: the osd id to get state for + :param osd_num: the OSD id to get state for :param osd_goal_state: (Optional) string indicating state to wait for Defaults to None :returns: Returns a str, the OSD state. @@ -2942,7 +2936,7 @@ def maintain_osd_state(osd_num): Ensures the state of an OSD is the same at the end of a block nested in a with statement as it was at the beginning of the block. - :param osd_num: the osd id to maintain state for + :param osd_num: the OSD id to maintain state for """ osd_state = get_osd_state(osd_num) try: @@ -2969,9 +2963,9 @@ def maintain_all_osd_states(): def list_pools(client='admin'): """This will list the current pools that Ceph has - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Returns a list of available pools. :rtype: list :raises: subprocess.CalledProcessError if the subprocess fails to run. @@ -2996,9 +2990,9 @@ def get_pool_param(pool, param, client='admin'): :type pool: str :param param: Name of variable to get :type param: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Value of variable on pool or None :rtype: str or None :raises: subprocess.CalledProcessError @@ -3020,9 +3014,9 @@ def get_pool_erasure_profile(pool, client='admin'): :param pool: Name of pool to get variable from :type pool: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Erasure code profile of pool or None :rtype: str or None :raises: subprocess.CalledProcessError @@ -3041,9 +3035,9 @@ def get_pool_quota(pool, client='admin'): :param pool: Name of pool to get variable from :type pool: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Dictionary with quota variables :rtype: dict :raises: subprocess.CalledProcessError @@ -3066,9 +3060,9 @@ def get_pool_applications(pool='', client='admin'): :param pool: (Optional) Name of pool to get applications for Defaults to get for all pools :type pool: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Dictionary with pool name as key :rtype: dict :raises: subprocess.CalledProcessError @@ -3131,7 +3125,7 @@ def dirs_need_ownership_update(service): necessary due to the upgrade from Hammer to Jewel where the daemon user changes from root: to ceph:. - :param service: the name of the service folder to check (e.g. osd, mon) + :param service: the name of the service folder to check (e.g. OSD, mon) :returns: boolean. True if the directories need a change of ownership, False otherwise. :raises IOError: if an error occurs reading the file stats from one of @@ -3161,7 +3155,7 @@ def dirs_need_ownership_update(service): return False -# A dict of valid ceph upgrade paths. Mapping is old -> new +# A dict of valid Ceph upgrade paths. Mapping is old -> new UPGRADE_PATHS = collections.OrderedDict([ ('firefly', 'hammer'), ('hammer', 'jewel'), @@ -3173,7 +3167,7 @@ def dirs_need_ownership_update(service): ('pacific', 'quincy'), ]) -# Map UCA codenames to ceph codenames +# Map UCA codenames to Ceph codenames UCA_CODENAME_MAP = { 'icehouse': 'firefly', 'juno': 'firefly', @@ -3196,24 +3190,24 @@ def dirs_need_ownership_update(service): def pretty_print_upgrade_paths(): - """Pretty print supported upgrade paths for ceph""" + """Pretty print supported upgrade paths for Ceph""" return ["{} -> {}".format(key, value) for key, value in UPGRADE_PATHS.items()] def resolve_ceph_version(source): - """Resolves a version of ceph based on source configuration + """Resolves a version of Ceph based on source configuration based on Ubuntu Cloud Archive pockets. @param: source: source configuration option of charm - :returns: ceph release codename or None if not resolvable + :returns: Ceph release codename or None if not resolvable """ os_release = get_os_codename_install_source(source) return UCA_CODENAME_MAP.get(os_release) def get_ceph_pg_stat(): - """Returns the result of ceph pg stat. + """Returns the result of 'ceph pg stat'. :returns: dict """ @@ -3248,7 +3242,7 @@ def get_ceph_health(): .decode('UTF-8')) try: json_tree = json.loads(tree) - # Make sure children are present in the json + # Make sure children are present in the JSON if not json_tree['overall_status']: return None @@ -3265,7 +3259,7 @@ def get_ceph_health(): def reweight_osd(osd_num, new_weight): """Changes the crush weight of an OSD to the value specified. - :param osd_num: the osd id which should be changed + :param osd_num: the OSD id which should be changed :param new_weight: the new weight for the OSD :returns: bool. True if output looks right, else false. :raises CalledProcessError: if an error occurs invoking the systemd cmd @@ -3292,7 +3286,7 @@ def reweight_osd(osd_num, new_weight): def determine_packages(): """Determines packages for installation. - :returns: list of ceph packages + :returns: list of Ceph packages """ packages = PACKAGES.copy() if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': @@ -3361,12 +3355,12 @@ def osd_noout(enable): class OSDConfigSetError(Exception): - """Error occured applying OSD settings.""" + """Error occurred applying OSD settings.""" pass def apply_osd_settings(settings): - """Applies the provided osd settings + """Applies the provided OSD settings Apply the provided settings to all local OSD unless settings are already present. Settings stop being applied on encountering an error. @@ -3391,7 +3385,7 @@ def _get_cli_key(key): out = json.loads( subprocess.check_output(cmd.split()).decode('UTF-8')) if 'error' in out: - log("Error retrieving osd setting: {}".format(out['error']), + log("Error retrieving OSD setting: {}".format(out['error']), level=ERROR) return False current_settings[key] = out[cli_key] @@ -3408,7 +3402,7 @@ def _get_cli_key(key): out = json.loads( subprocess.check_output(cmd.split()).decode('UTF-8')) if 'error' in out: - log("Error applying osd setting: {}".format(out['error']), + log("Error applying OSD setting: {}".format(out['error']), level=ERROR) raise OSDConfigSetError return True @@ -3478,7 +3472,7 @@ def mgr_disable_module(module): def ceph_config_set(name, value, who): - """Set a ceph config option + """Set a Ceph config option :param name: key to set :type name: str @@ -3496,7 +3490,7 @@ def ceph_config_set(name, value, who): def ceph_config_get(name, who): - """Retrieve the value of a ceph config option + """Retrieve the value of a Ceph config option :param name: key to lookup :type name: str diff --git a/ceph-osd/unit_tests/test_utils.py b/ceph-osd/unit_tests/test_utils.py index b8d41361..7d4531b2 100644 --- a/ceph-osd/unit_tests/test_utils.py +++ b/ceph-osd/unit_tests/test_utils.py @@ -1,4 +1,4 @@ -# Copyright 2016 Canonical Ltd +# Copyright 2016-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -30,7 +30,7 @@ def load_config(): ''' - Walk backwords from __file__ looking for config.yaml, load and return the + Walk backwards from __file__ looking for config.yaml, load and return the 'options' section' ''' config = None From d800cb59c6f08243d3a26559b880c937c5fb4f2d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 5 Jan 2022 20:10:57 +0100 Subject: [PATCH 2314/2699] Update to get ganesha setup. This change also includes a new helper class to actually create CephFS shares that are then provided by Ganesha. --- ceph-nfs/.gitignore | 3 +- ceph-nfs/config.yaml | 25 ++- ceph-nfs/metadata.yaml | 4 +- ceph-nfs/requirements.txt | 2 +- ceph-nfs/src/charm.py | 109 ++++++++++--- ceph-nfs/src/ganesha.py | 153 ++++++++++++++++++ ceph-nfs/src/interface_ceph_nfs_peer.py | 51 ++++++ .../templates/ceph.client.ceph-nfs.keyring | 3 - ceph-nfs/templates/ceph.conf | 8 +- ceph-nfs/templates/ceph.keyring | 3 + ceph-nfs/templates/ganesha.conf | 102 +----------- ceph-nfs/tests/bundles/focal-octopus.yaml | 8 +- ceph-nfs/tests/tests.yaml | 2 +- 13 files changed, 332 insertions(+), 141 deletions(-) create mode 100644 ceph-nfs/src/ganesha.py create mode 100644 ceph-nfs/src/interface_ceph_nfs_peer.py delete mode 100644 ceph-nfs/templates/ceph.client.ceph-nfs.keyring create mode 100644 ceph-nfs/templates/ceph.keyring diff --git a/ceph-nfs/.gitignore b/ceph-nfs/.gitignore index f3c3e4d8..166180b1 100644 --- a/ceph-nfs/.gitignore +++ b/ceph-nfs/.gitignore @@ -4,5 +4,4 @@ __pycache__ .stestr/ lib/* !lib/README.txt -build -ceph-iscsi.charm +*.charm diff --git a/ceph-nfs/config.yaml b/ceph-nfs/config.yaml index af036c5f..9a6b9862 100644 --- a/ceph-nfs/config.yaml +++ b/ceph-nfs/config.yaml @@ -8,10 +8,25 @@ # Learn more about config at: https://juju.is/docs/sdk/config options: - thing: - default: 🎠- description: A thing used by the charm. + source: type: string + default: ppa:chris.macnaughton/focal-ussuri + description: | + Optional configuration to support use of additional sources such as: + - ppa:myteam/ppa + - cloud:trusty-proposed/kilo + - http://my.archive.com/ubuntu main + The last option should be used in conjunction with the key configuration + option. + Note that a minimum ceph version of 0.48.2 is required for use with this + charm which is NOT provided by the packages in the main Ubuntu archive + for precise but is provided in the Ubuntu cloud archive. + key: + type: string + default: + description: | + Key ID to import to the apt keyring to support use with arbitary source + configuration from outside of Launchpad archives or PPA's. ceph-osd-replication-count: type: int default: 3 @@ -41,5 +56,5 @@ options: default: type: string description: | - Optionally specify an existing pool that shares should map to. Defaults - to the application's name. + Optionally specify an existing pool that Ganesha should store recovery + data into. Defaults to the application's name. diff --git a/ceph-nfs/metadata.yaml b/ceph-nfs/metadata.yaml index fe73c794..94c0284d 100644 --- a/ceph-nfs/metadata.yaml +++ b/ceph-nfs/metadata.yaml @@ -5,7 +5,6 @@ description: | The NFS gateway is provided by NFS-Ganesha and provides NFS shares that are backed by CephFS. tags: - - openstack - storage - misc series: @@ -20,3 +19,6 @@ extra-bindings: requires: ceph-client: interface: ceph-client +peers: + cluster: + interface: ceph-nfs-peer \ No newline at end of file diff --git a/ceph-nfs/requirements.txt b/ceph-nfs/requirements.txt index 37368f68..5d99db30 100644 --- a/ceph-nfs/requirements.txt +++ b/ceph-nfs/requirements.txt @@ -2,4 +2,4 @@ git+https://github.com/juju/charm-helpers.git#egg=charmhelpers git+https://github.com/canonical/operator.git#egg=ops git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client -git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack +git+https://github.com/ChrisMacNaughton/charm-ops-openstack.git@feature/charm-instance-to-relation-adapter#egg=ops_openstack diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index d3e62f36..f53de8ea 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -15,9 +15,9 @@ import logging import os from pathlib import Path +import socket import subprocess -from ops.charm import CharmBase from ops.framework import StoredState from ops.main import main # from ops.model import ActiveStatus @@ -25,6 +25,9 @@ import charmhelpers.core.host as ch_host import charmhelpers.core.templating as ch_templating import interface_ceph_client.ceph_client as ceph_client +import interface_ceph_nfs_peer +# TODO: Add the below class functionaity to action / relations +# from ganesha import GaneshaNfs import ops_openstack.adapters import ops_openstack.core @@ -65,6 +68,32 @@ def key(self): return self.relation.get_relation_data()['key'] +class CephNFSContext(object): + """Adapter for ceph NFS config.""" + + name = 'ceph_nfs' + + def __init__(self, charm_instance): + self.charm_instance = charm_instance + + @property + def pool_name(self): + """The name of the default rbd data pool to be used for shares. + + :returns: Data pool name. + :rtype: str + """ + return self.charm_instance.config_get('rbd-pool-name', self.charm_instance.app.name) + + @property + def client_name(self): + return self.charm_instance.app.name + + @property + def hostname(self): + return socket.gethostname() + + class CephNFSAdapters( ops_openstack.adapters.OpenStackRelationAdapters): """Collection of relation adapters.""" @@ -74,13 +103,14 @@ class CephNFSAdapters( } -class CephNfsCharm(CharmBase): +class CephNfsCharm( + ops_openstack.plugins.classes.BaseCephClientCharm): """Ceph NFS Base Charm.""" - _stored = StoredState() - PACKAGES = ['nfs-ganesha', 'ceph-common'] + PACKAGES = ['nfs-ganesha-ceph', 'nfs-ganesha-rados-grace', 'ceph-common'] CEPH_CAPABILITIES = [ + "mgr", "allow rw", "mds", "allow *", "osd", "allow rw", "mon", "allow r, " @@ -89,14 +119,14 @@ class CephNfsCharm(CharmBase): "allow command \"auth get\", " "allow command \"auth get-or-create\""] - REQUIRED_RELATIONS = ['ceph-client', 'cluster'] + REQUIRED_RELATIONS = ['ceph-client'] CEPH_CONFIG_PATH = Path('/etc/ceph') GANESHA_CONFIG_PATH = Path('/etc/ganesha') CEPH_GANESHA_CONFIG_PATH = CEPH_CONFIG_PATH / 'ganesha' - CEPH_CONF = CEPH_GANESHA_CONFIG_PATH / 'ceph.conf' - GANESHA_KEYRING = CEPH_GANESHA_CONFIG_PATH / 'ceph.client.ceph-ganesha.keyring' + CEPH_CONF = CEPH_CONFIG_PATH / 'ceph.conf' + GANESHA_KEYRING = CEPH_GANESHA_CONFIG_PATH / 'ceph.keyring' GANESHA_CONF = GANESHA_CONFIG_PATH / 'ganesha.conf' SERVICES = ['nfs-ganesha'] @@ -114,13 +144,18 @@ def __init__(self, framework): logging.info("Using %s class", self.release) self._stored.set_default( is_started=False, + is_cluster_setup=False ) self.ceph_client = ceph_client.CephClientRequires( self, 'ceph-client') + self.peers = interface_ceph_nfs_peer.CephNfsPeers( + self, + 'cluster') self.adapters = CephNFSAdapters( - (self.ceph_client,), - self) + (self.ceph_client, self.peers), + contexts=(CephNFSContext(self),), + charm_instance=self) self.framework.observe( self.ceph_client.on.broker_available, self.request_ceph_pool) @@ -133,14 +168,20 @@ def __init__(self, framework): self.framework.observe( self.on.upgrade_charm, self.render_config) + self.framework.observe( + self.ceph_client.on.pools_available, + self.setup_ganesha), + self.framework.observe( + self.peers.on.pool_initialised, + self.on_pool_initialised) - def config_get(self, key): + def config_get(self, key, default=None): """Retrieve config option. :returns: Value of the corresponding config option or None. :rtype: Any """ - return self.model.config.get(key) + return self.model.config.get(key, default) @property def pool_name(self): @@ -149,11 +190,7 @@ def pool_name(self): :returns: Data pool name. :rtype: str """ - if self.config_get('rbd-pool-name'): - pool_name = self.config_get('rbd-pool-name') - else: - pool_name = self.app.name - return pool_name + return self.config_get('rbd-pool-name', self.app.name) @property def client_name(self): @@ -180,6 +217,7 @@ def request_ceph_pool(self, event): logging.info("Requesting replicated pool") self.ceph_client.create_replicated_pool( name=self.pool_name, + app_name='ganesha', replicas=replicas, weight=weight, **bcomp_kwargs) @@ -200,7 +238,7 @@ def render_config(self, event): event.defer() return - self.CEPH_GANESHA_PATH.mkdir( + self.CEPH_GANESHA_CONFIG_PATH.mkdir( exist_ok=True, mode=0o750) @@ -223,16 +261,35 @@ def _render_configs(): self._stored.is_started = True self.update_status() logging.info("on_pools_available: status updated") + if not self._stored.is_cluster_setup: + subprocess.check_call([ + 'ganesha-rados-grace', '--userid', self.client_name, + '--cephconf', '/etc/ceph/ganesha/ceph.conf', '--pool', self.pool_name, + 'add', socket.gethostname()]) + self._stored.is_cluster_setup = True + + def setup_ganesha(self, event): + if not self.model.unit.is_leader(): + return + cmd = [ + 'rados', '-p', self.pool_name, + '-c', '/etc/ceph/ganesha/ceph.conf', + '--id', self.client_name, + 'put', 'ganesha-export-index', '/dev/null' + ] + try: + subprocess.check_call(cmd) + self.peers.pool_initialised() + except subprocess.CalledProcessError: + logging.error("Failed to setup ganesha index object") + event.defer() - # def custom_status_check(self): - # """Custom update status checks.""" - # if ch_host.is_container(): - # return ops.model.BlockedStatus( - # 'Charm cannot be deployed into a container') - # if self.peers.unit_count not in self.ALLOWED_UNIT_COUNTS: - # return ops.model.BlockedStatus( - # '{} is an invalid unit count'.format(self.peers.unit_count)) - # return ops.model.ActiveStatus() + def on_pool_initialised(self, event): + try: + subprocess.check_call(['systemctl', 'restart', 'nfs-ganesha']) + except subprocess.CalledProcessError: + logging.error("Failed torestart nfs-ganesha") + event.defer() @ops_openstack.core.charm_class diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py new file mode 100644 index 00000000..b7e03642 --- /dev/null +++ b/ceph-nfs/src/ganesha.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python3 +# Copyright 2021 OpenStack Charmers +# See LICENSE file for licensing details. + +import json +import logging +import subprocess +import tempfile +import uuid + +logger = logging.getLogger(__name__) + + +# TODO: Add ACL with client IPs +# TODO: Add ACL with kerberos +GANESHA_EXPORT_TEMPLATE = """EXPORT {{ + # Each EXPORT must have a unique Export_Id. + Export_Id = {id}; + + # The directory in the exported file system this export + # is rooted on. + Path = '{path}'; + + # FSAL, Ganesha's module component + FSAL {{ + # FSAL name + Name = "Ceph"; + User_Id = "{user_id}"; + Secret_Access_Key = "{secret_key}"; + }} + + # Path of export in the NFSv4 pseudo filesystem + Pseudo = '{path}'; + + SecType = "sys"; + CLIENT {{ + Access_Type = "rw"; + Clients = 0.0.0.0; + }} + # User id squashing, one of None, Root, All + Squash = "None"; +}} +""" + + +class GaneshaNfs(object): + + export_index = "ganesha-export-index" + export_counter = "ganesha-export-counter" + + def __init__(self, client_name, ceph_pool): + self.client_name = client_name + self.name = str(uuid.uuid4()) + self.ceph_pool = ceph_pool + self.access_id = 'ganesha-{}'.format(self.name) + + def create_share(self): + self.export_path = self._create_cephfs_share() + export_id = self._get_next_export_id() + export_template = GANESHA_EXPORT_TEMPLATE.format( + id=export_id, + path=self.export_path, + user_id=self.access_id, + secret_key=self._ceph_auth_key(), + ) + logging.debug("Export template:: \n{}".format(export_template)) + tmp_file = self._tmpfile(export_template) + self.rados_put('ganesha-export-{}'.format(export_id), tmp_file.name) + self._ganesha_add_export(self.export_path, tmp_file.name) + + def _ganesha_add_export(self, export_path, tmp_path): + return self._dbus_send( + 'ExportMgr', 'AddExport', + 'string:{}'.format(tmp_path), 'string:EXPORT(Path={})'.format(export_path)) + + def _dbus_send(self, section, action, *args): + cmd = [ + 'dbus-send', '--print-reply', '--system', '--dest=org.ganesha.nfsd', + '/org/ganesha/nfsd/{}'.format(section), + 'org.ganesha.nfsd.exportmgr.{}'.format(action)] + [*args] + logging.debug("About to call: {}".format(cmd)) + return subprocess.check_output(cmd) + + def _create_cephfs_share(self): + """Create an authorise a CephFS share. + + :returns: export path + :rtype: union[str, bool] + """ + try: + self._ceph_subvolume_command('create', 'ceph-fs', self.name) + except subprocess.CalledProcessError: + logging.error("failed to create subvolume") + return False + + try: + self._ceph_subvolume_command( + 'authorize', 'ceph-fs', self.name, + 'ganesha-{name}'.format(name=self.name)) + except subprocess.CalledProcessError: + logging.error("failed to authorize subvolume") + return False + + try: + output = self._ceph_subvolume_command('getpath', 'ceph-fs', self.name) + return output.decode('utf-8').strip() + except subprocess.CalledProcessError: + logging.error("failed to get path") + return False + + def _ceph_subvolume_command(self, *cmd): + return self._ceph_fs_command('subvolume', *cmd) + + def _ceph_fs_command(self, *cmd): + return self._ceph_command('fs', *cmd) + + def _ceph_auth_key(self): + output = self._ceph_command( + 'auth', 'get', 'client.{}'.format(self.access_id), '--format=json') + return json.loads(output.decode('UTF-8'))[0]['key'] + + def _ceph_command(self, *cmd): + cmd = ["ceph", "--id", self.client_name, "--conf=/etc/ceph/ganesha/ceph.conf"] + [*cmd] + return subprocess.check_output(cmd) + + def _get_next_export_id(self): + next_id = int(self.rados_get(self.export_counter)) + file = self._tmpfile(next_id + 1) + self.rados_put(self.export_counter, file.name) + return next_id + + def _tmpfile(self, value): + file = tempfile.NamedTemporaryFile(mode='w+') + file.write(str(value)) + file.seek(0) + return file + + def rados_get(self, name): + cmd = [ + 'rados', '-p', self.ceph_pool, '--id', self.client_name, + 'get', name, '/dev/stdout' + ] + logging.debug("About to call: {}".format(cmd)) + output = subprocess.check_output(cmd) + return output.decode('utf-8') + + def rados_put(self, name, source): + cmd = [ + 'rados', '-p', self.ceph_pool, '--id', self.client_name, + 'put', name, source + ] + logging.debug("About to call: {}".format(cmd)) + subprocess.check_call(cmd) diff --git a/ceph-nfs/src/interface_ceph_nfs_peer.py b/ceph-nfs/src/interface_ceph_nfs_peer.py new file mode 100644 index 00000000..87669739 --- /dev/null +++ b/ceph-nfs/src/interface_ceph_nfs_peer.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 + +# import json +import logging +# import socket + +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object) + + +class PoolInitialisedEvent(EventBase): + pass + + +class CephNfsPeerEvents(ObjectEvents): + pool_initialised = EventSource(PoolInitialisedEvent) + + +class CephNfsPeers(Object): + + on = CephNfsPeerEvents() + _stored = StoredState() + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self.relation_name = relation_name + self.this_unit = self.framework.model.unit + self._stored.set_default( + pool_initialised=False) + self.framework.observe( + charm.on[relation_name].relation_changed, + self.on_changed) + + def on_changed(self, event): + logging.info("CephNfsPeers on_changed") + if self.pool_initialised == 'True' and not self._stored.pool_initialised: + self.on.pool_initialised.emit() + self._stored.pool_initialised = True + + def pool_initialised(self): + logging.info("Setting pool initialised") + self.peer_rel.data[self.peer_rel.app]['pool_initialised'] = 'True' + self.on.pool_initialised.emit() + + @property + def peer_rel(self): + return self.framework.model.get_relation(self.relation_name) diff --git a/ceph-nfs/templates/ceph.client.ceph-nfs.keyring b/ceph-nfs/templates/ceph.client.ceph-nfs.keyring deleted file mode 100644 index 8ad51ff3..00000000 --- a/ceph-nfs/templates/ceph.client.ceph-nfs.keyring +++ /dev/null @@ -1,3 +0,0 @@ -[client.ceph-nfs] - key = {{ ceph_client.key }} - diff --git a/ceph-nfs/templates/ceph.conf b/ceph-nfs/templates/ceph.conf index 6bedad17..d64755f4 100644 --- a/ceph-nfs/templates/ceph.conf +++ b/ceph-nfs/templates/ceph.conf @@ -6,10 +6,10 @@ [global] auth supported = {{ ceph_client.auth_supported }} mon host = {{ ceph_client.mon_hosts }} -keyring = /etc/ceph/{{ options.application_name }}/$cluster.$name.keyring +keyring = /etc/ceph/ganesha/$cluster.keyring -[client.{{ options.application_name }}] +[client.{{ ceph_nfs.client_name }}] client mount uid = 0 client mount gid = 0 -log file = /var/log/ceph/ceph-client.{{ options.application_name }}.log -{% endif -%} +log file = /var/log/ceph/ceph-client.{{ ceph_nfs.client_name }}.log + diff --git a/ceph-nfs/templates/ceph.keyring b/ceph-nfs/templates/ceph.keyring new file mode 100644 index 00000000..2bfa3209 --- /dev/null +++ b/ceph-nfs/templates/ceph.keyring @@ -0,0 +1,3 @@ +[client.{{ ceph_nfs.client_name }}] + key = {{ ceph_client.key }} + diff --git a/ceph-nfs/templates/ganesha.conf b/ceph-nfs/templates/ganesha.conf index 6ab0e4a5..a75d9c73 100644 --- a/ceph-nfs/templates/ganesha.conf +++ b/ceph-nfs/templates/ganesha.conf @@ -66,112 +66,20 @@ MDCACHE { Dir_Chunk = 0; } -EXPORT -{ - # Unique export ID number for this export - Export_ID=100; - - # We're only interested in NFSv4 in this configuration - Protocols = 4; - - # NFSv4 does not allow UDP transport - Transports = TCP; - - # - # Path into the cephfs tree. - # - # Note that FSAL_CEPH does not support subtree checking, so there is - # no way to validate that a filehandle presented by a client is - # reachable via an exported subtree. - # - # For that reason, we just export "/" here. - Path = /; - - # - # The pseudoroot path. This is where the export will appear in the - # NFS pseudoroot namespace. - # - Pseudo = /cephfs_a/; - - # We want to be able to read and write - Access_Type = RW; - - # Time out attribute cache entries immediately - Attr_Expiration_Time = 0; - - # Enable read delegations? libcephfs v13.0.1 and later allow the - # ceph client to set a delegation. While it's possible to allow RW - # delegations it's not recommended to enable them until ganesha - # acquires CB_GETATTR support. - # - # Note too that delegations may not be safe in clustered - # configurations, so it's probably best to just disable them until - # this problem is resolved: - # - # http://tracker.ceph.com/issues/24802 - # - # Delegations = R; - - # NFS servers usually decide to "squash" incoming requests from the - # root user to a "nobody" user. It's possible to disable that, but for - # now, we leave it enabled. - # Squash = root; - - FSAL { - # FSAL_CEPH export - Name = CEPH; - - # - # Ceph filesystems have a name string associated with them, and - # modern versions of libcephfs can mount them based on the - # name. The default is to mount the default filesystem in the - # cluster (usually the first one created). - # - # Filesystem = "cephfs_a"; - - # - # Ceph clusters have their own authentication scheme (cephx). - # Ganesha acts as a cephfs client. This is the client username - # to use. This user will need to be created before running - # ganesha. - # - # Typically ceph clients have a name like "client.foo". This - # setting should not contain the "client." prefix. - # - # See: - # - # http://docs.ceph.com/docs/jewel/rados/operations/user-management/ - # - # The default is to set this to NULL, which means that the - # userid is set to the default in libcephfs (which is - # typically "admin"). - # - User_Id = "{{ client_name }}"; - - # - # Key to use for the session (if any). If not set, it uses the - # normal search path for cephx keyring files to find a key: - # - # Secret_Access_Key = "YOUR SECRET KEY HERE"; - } -} - -# The below were taken from the Manila docs at -# https://docs.openstack.org/manila/queens/contributor/ganesha.html - # To read exports from RADOS objects RADOS_URLS { ceph_conf = "/etc/ceph/ganesha/ceph.conf"; - userid = "{{ client_name }}"; + userid = "{{ ceph_nfs.client_name }}"; } -%url rados://{{ pool_name }}/ganesha-export-index +%url rados://{{ ceph_nfs.pool_name }}/ganesha-export-index # To store client recovery data in the same RADOS pool RADOS_KV { ceph_conf = "/etc/ceph/ganesha/ceph.conf"; - userid = "{{ client_name }}"; - pool = {{ pool_name }}; + userid = "{{ ceph_nfs.client_name }}"; + pool = "{{ ceph_nfs.pool_name }}"; + nodeid = "{{ ceph_nfs.hostname }}"; } # Config block for FSAL_CEPH diff --git a/ceph-nfs/tests/bundles/focal-octopus.yaml b/ceph-nfs/tests/bundles/focal-octopus.yaml index d49b9e34..b08c4cfa 100644 --- a/ceph-nfs/tests/bundles/focal-octopus.yaml +++ b/ceph-nfs/tests/bundles/focal-octopus.yaml @@ -20,8 +20,14 @@ applications: options: monitor-count: '3' expected-osd-count: 6 + ceph-fs: + charm: cs:~openstack-charmers-next/ceph-fs + num_units: 1 + relations: - - 'ceph-mon:client' - 'ceph-nfs:ceph-client' - - 'ceph-osd:mon' - - 'ceph-mon:osd' \ No newline at end of file + - 'ceph-mon:osd' + - - 'ceph-fs' + - 'ceph-mon' diff --git a/ceph-nfs/tests/tests.yaml b/ceph-nfs/tests/tests.yaml index 3b0373c6..0e35d54f 100644 --- a/ceph-nfs/tests/tests.yaml +++ b/ceph-nfs/tests/tests.yaml @@ -9,4 +9,4 @@ tests: [] target_deploy_status: ubuntu: workload-status: active - workload-status-message: '' + workload-status-message-prefix: '' From 0cb08194f6ba9e39290d5810730ccdb9993182b5 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 5 Jan 2022 13:37:41 -0600 Subject: [PATCH 2315/2699] Add docstrings and frame for IP ACL --- ceph-nfs/src/ganesha.py | 34 ++++++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index b7e03642..a295a5b3 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -11,7 +11,6 @@ logger = logging.getLogger(__name__) -# TODO: Add ACL with client IPs # TODO: Add ACL with kerberos GANESHA_EXPORT_TEMPLATE = """EXPORT {{ # Each EXPORT must have a unique Export_Id. @@ -35,7 +34,7 @@ SecType = "sys"; CLIENT {{ Access_Type = "rw"; - Clients = 0.0.0.0; + Clients = {clients} }} # User id squashing, one of None, Root, All Squash = "None"; @@ -55,6 +54,7 @@ def __init__(self, client_name, ceph_pool): self.access_id = 'ganesha-{}'.format(self.name) def create_share(self): + """Create a CephFS Share and export it via Ganesha""" self.export_path = self._create_cephfs_share() export_id = self._get_next_export_id() export_template = GANESHA_EXPORT_TEMPLATE.format( @@ -62,6 +62,7 @@ def create_share(self): path=self.export_path, user_id=self.access_id, secret_key=self._ceph_auth_key(), + clients='0.0.0.0' ) logging.debug("Export template:: \n{}".format(export_template)) tmp_file = self._tmpfile(export_template) @@ -69,11 +70,13 @@ def create_share(self): self._ganesha_add_export(self.export_path, tmp_file.name) def _ganesha_add_export(self, export_path, tmp_path): + """Add a configured NFS export to Ganesha""" return self._dbus_send( 'ExportMgr', 'AddExport', 'string:{}'.format(tmp_path), 'string:EXPORT(Path={})'.format(export_path)) def _dbus_send(self, section, action, *args): + """Send a command to Ganesha via Dbus""" cmd = [ 'dbus-send', '--print-reply', '--system', '--dest=org.ganesha.nfsd', '/org/ganesha/nfsd/{}'.format(section), @@ -109,21 +112,34 @@ def _create_cephfs_share(self): return False def _ceph_subvolume_command(self, *cmd): + """Run a ceph fs subvolume command""" return self._ceph_fs_command('subvolume', *cmd) def _ceph_fs_command(self, *cmd): + """Run a ceph fs command""" return self._ceph_command('fs', *cmd) def _ceph_auth_key(self): + """Retrieve the CephX key associated with this id + + :returns: The access key + :rtype: str + """ output = self._ceph_command( 'auth', 'get', 'client.{}'.format(self.access_id), '--format=json') return json.loads(output.decode('UTF-8'))[0]['key'] def _ceph_command(self, *cmd): + """Run a ceph command""" cmd = ["ceph", "--id", self.client_name, "--conf=/etc/ceph/ganesha/ceph.conf"] + [*cmd] return subprocess.check_output(cmd) def _get_next_export_id(self): + """Retrieve the next available export ID, and update the rados key + + :returns: The export ID + :rtype: str + """ next_id = int(self.rados_get(self.export_counter)) file = self._tmpfile(next_id + 1) self.rados_put(self.export_counter, file.name) @@ -136,6 +152,13 @@ def _tmpfile(self, value): return file def rados_get(self, name): + """Retrieve the content of the RADOS object with a given name + + :param name: Name of the RADOS object to retrieve + + :returns: Contents of the RADOS object + :rtype: str + """ cmd = [ 'rados', '-p', self.ceph_pool, '--id', self.client_name, 'get', name, '/dev/stdout' @@ -145,6 +168,13 @@ def rados_get(self, name): return output.decode('utf-8') def rados_put(self, name, source): + """Store the contents of the source file in a named RADOS object. + + :param name: Name of the RADOS object to retrieve + :param source: Path to a file to upload to RADOS. + + :returns: None + """ cmd = [ 'rados', '-p', self.ceph_pool, '--id', self.client_name, 'put', name, source From cc3da39f4f73f4509ff153bb35ca30497c75fc1e Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 5 Jan 2022 16:07:36 -0600 Subject: [PATCH 2316/2699] add reload nonce, and fix up some setup bits --- ceph-nfs/actions.yaml | 22 ++++++++-------------- ceph-nfs/src/charm.py | 22 ++++++++++++++++++++-- ceph-nfs/src/ganesha.py | 24 ++++++++++++++++-------- ceph-nfs/src/interface_ceph_nfs_peer.py | 14 +++++++++++++- ceph-nfs/templates/ganesha.conf | 4 ++-- 5 files changed, 59 insertions(+), 27 deletions(-) diff --git a/ceph-nfs/actions.yaml b/ceph-nfs/actions.yaml index 1b55fb09..3e15c292 100644 --- a/ceph-nfs/actions.yaml +++ b/ceph-nfs/actions.yaml @@ -1,16 +1,10 @@ -# Copyright 2021 OpenStack Charmers +# Copyright 2022 Canonical # See LICENSE file for licensing details. -# -# TEMPLATE-TODO: change this example to suit your needs. -# If you don't need actions, you can remove the file entirely. -# It ties in to the example _on_fortune_action handler in src/charm.py -# -# Learn more about actions at: https://juju.is/docs/sdk/actions -# fortune: -# description: Returns a pithy phrase. -# params: -# fail: -# description: "Fail with this message" -# type: string -# default: "" +create-share: + description: Create a new CephFS Backed NFS export + params: + allowed-ips: + description: IP Addresses to grant Read/Write access to + type: string + default: "0.0.0.0" \ No newline at end of file diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index f53de8ea..9682a764 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -17,6 +17,7 @@ from pathlib import Path import socket import subprocess +import tempfile from ops.framework import StoredState from ops.main import main @@ -174,6 +175,9 @@ def __init__(self, framework): self.framework.observe( self.peers.on.pool_initialised, self.on_pool_initialised) + self.framework.observe( + self.peers.on.reload_nonce, + self.on_reload_nonce) def config_get(self, key, default=None): """Retrieve config option. @@ -264,7 +268,7 @@ def _render_configs(): if not self._stored.is_cluster_setup: subprocess.check_call([ 'ganesha-rados-grace', '--userid', self.client_name, - '--cephconf', '/etc/ceph/ganesha/ceph.conf', '--pool', self.pool_name, + '--cephconf', self.CEPH_CONF, '--pool', self.pool_name, 'add', socket.gethostname()]) self._stored.is_cluster_setup = True @@ -273,11 +277,21 @@ def setup_ganesha(self, event): return cmd = [ 'rados', '-p', self.pool_name, - '-c', '/etc/ceph/ganesha/ceph.conf', + '-c', self.CEPH_CONF, '--id', self.client_name, 'put', 'ganesha-export-index', '/dev/null' ] try: + subprocess.check_call(cmd) + counter = tempfile.NamedTemporaryFile('w+') + counter.write('1000') + counter.seek(0) + cmd = [ + 'rados', '-p', self.pool_name, + '-c', self.CEPH_CONF, + '--id', self.client_name, + 'put', 'ganesha-export-counter', counter.name + ] subprocess.check_call(cmd) self.peers.pool_initialised() except subprocess.CalledProcessError: @@ -291,6 +305,10 @@ def on_pool_initialised(self, event): logging.error("Failed torestart nfs-ganesha") event.defer() + def on_reload_nonce(self, _event): + logging.info("Reloading Ganesha after nonce triggered reload") + subprocess.call(['killall', '-HUP', 'ganesha.nfsd']) + @ops_openstack.core.charm_class class CephNFSCharmOcto(CephNfsCharm): diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index a295a5b3..daa4268e 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -34,7 +34,7 @@ SecType = "sys"; CLIENT {{ Access_Type = "rw"; - Clients = {clients} + Clients = {clients}; }} # User id squashing, one of None, Root, All Squash = "None"; @@ -64,10 +64,12 @@ def create_share(self): secret_key=self._ceph_auth_key(), clients='0.0.0.0' ) - logging.debug("Export template:: \n{}".format(export_template)) + logging.debug("Export template::\n{}".format(export_template)) tmp_file = self._tmpfile(export_template) - self.rados_put('ganesha-export-{}'.format(export_id), tmp_file.name) + self._rados_put('ganesha-export-{}'.format(export_id), tmp_file.name) self._ganesha_add_export(self.export_path, tmp_file.name) + self._add_share_to_index(export_id) + return self.export_path def _ganesha_add_export(self, export_path, tmp_path): """Add a configured NFS export to Ganesha""" @@ -131,7 +133,7 @@ def _ceph_auth_key(self): def _ceph_command(self, *cmd): """Run a ceph command""" - cmd = ["ceph", "--id", self.client_name, "--conf=/etc/ceph/ganesha/ceph.conf"] + [*cmd] + cmd = ["ceph", "--id", self.client_name, "--conf=/etc/ceph/ceph.conf"] + [*cmd] return subprocess.check_output(cmd) def _get_next_export_id(self): @@ -140,9 +142,9 @@ def _get_next_export_id(self): :returns: The export ID :rtype: str """ - next_id = int(self.rados_get(self.export_counter)) + next_id = int(self._rados_get(self.export_counter)) file = self._tmpfile(next_id + 1) - self.rados_put(self.export_counter, file.name) + self._rados_put(self.export_counter, file.name) return next_id def _tmpfile(self, value): @@ -151,7 +153,7 @@ def _tmpfile(self, value): file.seek(0) return file - def rados_get(self, name): + def _rados_get(self, name): """Retrieve the content of the RADOS object with a given name :param name: Name of the RADOS object to retrieve @@ -167,7 +169,7 @@ def rados_get(self, name): output = subprocess.check_output(cmd) return output.decode('utf-8') - def rados_put(self, name, source): + def _rados_put(self, name, source): """Store the contents of the source file in a named RADOS object. :param name: Name of the RADOS object to retrieve @@ -181,3 +183,9 @@ def rados_put(self, name, source): ] logging.debug("About to call: {}".format(cmd)) subprocess.check_call(cmd) + + def _add_share_to_index(self, export_id): + index = self._rados_get(self.export_index) + index += '%url rados://{}/ganesha-export-{}'.format(self.ceph_pool, export_id) + tmpfile = self._tmpfile(index) + self._rados_put(self.export_index, tmpfile.name) \ No newline at end of file diff --git a/ceph-nfs/src/interface_ceph_nfs_peer.py b/ceph-nfs/src/interface_ceph_nfs_peer.py index 87669739..ff10c5e3 100644 --- a/ceph-nfs/src/interface_ceph_nfs_peer.py +++ b/ceph-nfs/src/interface_ceph_nfs_peer.py @@ -3,6 +3,7 @@ # import json import logging # import socket +import uuid from ops.framework import ( StoredState, @@ -15,9 +16,12 @@ class PoolInitialisedEvent(EventBase): pass +class ReloadNonceEvent(EventBase): + pass class CephNfsPeerEvents(ObjectEvents): pool_initialised = EventSource(PoolInitialisedEvent) + reload_nonce = EventSource(ReloadNonceEvent) class CephNfsPeers(Object): @@ -30,7 +34,8 @@ def __init__(self, charm, relation_name): self.relation_name = relation_name self.this_unit = self.framework.model.unit self._stored.set_default( - pool_initialised=False) + pool_initialised=False, + reload_nonce=None) self.framework.observe( charm.on[relation_name].relation_changed, self.on_changed) @@ -40,12 +45,19 @@ def on_changed(self, event): if self.pool_initialised == 'True' and not self._stored.pool_initialised: self.on.pool_initialised.emit() self._stored.pool_initialised = True + if self._stored.reload_nonce != self.reload_nonce(): + self.on.reload_nonce.emit() + self._stored.reload_nonce = self.reload_nonce() def pool_initialised(self): logging.info("Setting pool initialised") self.peer_rel.data[self.peer_rel.app]['pool_initialised'] = 'True' self.on.pool_initialised.emit() + def trigger_reload(self): + self.peer_rel.data[self.peer_rel.app]['reload_nonce'] = uuid.uuid4() + self.on.reload_nonce.emit() + @property def peer_rel(self): return self.framework.model.get_relation(self.relation_name) diff --git a/ceph-nfs/templates/ganesha.conf b/ceph-nfs/templates/ganesha.conf index a75d9c73..6e55f3f0 100644 --- a/ceph-nfs/templates/ganesha.conf +++ b/ceph-nfs/templates/ganesha.conf @@ -68,7 +68,7 @@ MDCACHE { # To read exports from RADOS objects RADOS_URLS { - ceph_conf = "/etc/ceph/ganesha/ceph.conf"; + ceph_conf = "/etc/ceph/ceph.conf"; userid = "{{ ceph_nfs.client_name }}"; } @@ -76,7 +76,7 @@ RADOS_URLS { # To store client recovery data in the same RADOS pool RADOS_KV { - ceph_conf = "/etc/ceph/ganesha/ceph.conf"; + ceph_conf = "/etc/ceph/ceph.conf"; userid = "{{ ceph_nfs.client_name }}"; pool = "{{ ceph_nfs.pool_name }}"; nodeid = "{{ ceph_nfs.hostname }}"; From 5005c055244895c1bd6fdf2b5a317845fbb65d4c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 7 Jan 2022 12:57:36 -0600 Subject: [PATCH 2317/2699] Add action to create a new share --- ceph-nfs/actions.yaml | 4 +++- ceph-nfs/config.yaml | 6 ++++++ ceph-nfs/metadata.yaml | 3 +++ ceph-nfs/src/charm.py | 28 +++++++++++++++++++++---- ceph-nfs/src/ganesha.py | 2 +- ceph-nfs/src/interface_ceph_nfs_peer.py | 16 ++++++++++---- 6 files changed, 49 insertions(+), 10 deletions(-) diff --git a/ceph-nfs/actions.yaml b/ceph-nfs/actions.yaml index 3e15c292..4b55f8fd 100644 --- a/ceph-nfs/actions.yaml +++ b/ceph-nfs/actions.yaml @@ -7,4 +7,6 @@ create-share: allowed-ips: description: IP Addresses to grant Read/Write access to type: string - default: "0.0.0.0" \ No newline at end of file + default: "0.0.0.0" + # TODO: CephFS Share name +# TODO: Update, delete share \ No newline at end of file diff --git a/ceph-nfs/config.yaml b/ceph-nfs/config.yaml index 9a6b9862..3c53bb4d 100644 --- a/ceph-nfs/config.yaml +++ b/ceph-nfs/config.yaml @@ -58,3 +58,9 @@ options: description: | Optionally specify an existing pool that Ganesha should store recovery data into. Defaults to the application's name. + vip: + type: string + default: + description: | + VIP to associate with this service. This VIP will only be functional + with a relation to the hacluster charm. \ No newline at end of file diff --git a/ceph-nfs/metadata.yaml b/ceph-nfs/metadata.yaml index 94c0284d..4128ea92 100644 --- a/ceph-nfs/metadata.yaml +++ b/ceph-nfs/metadata.yaml @@ -19,6 +19,9 @@ extra-bindings: requires: ceph-client: interface: ceph-client + hacluster: + interface: hacluster + scope: container peers: cluster: interface: ceph-nfs-peer \ No newline at end of file diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index 9682a764..9caafe14 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -28,7 +28,7 @@ import interface_ceph_client.ceph_client as ceph_client import interface_ceph_nfs_peer # TODO: Add the below class functionaity to action / relations -# from ganesha import GaneshaNfs +from ganesha import GaneshaNfs import ops_openstack.adapters import ops_openstack.core @@ -178,6 +178,10 @@ def __init__(self, framework): self.framework.observe( self.peers.on.reload_nonce, self.on_reload_nonce) + # Actions + self.framework.observe( + self.on.create_share_action, + self.create_share_action) def config_get(self, key, default=None): """Retrieve config option. @@ -265,14 +269,14 @@ def _render_configs(): self._stored.is_started = True self.update_status() logging.info("on_pools_available: status updated") + + def setup_ganesha(self, event): if not self._stored.is_cluster_setup: subprocess.check_call([ 'ganesha-rados-grace', '--userid', self.client_name, '--cephconf', self.CEPH_CONF, '--pool', self.pool_name, 'add', socket.gethostname()]) self._stored.is_cluster_setup = True - - def setup_ganesha(self, event): if not self.model.unit.is_leader(): return cmd = [ @@ -293,7 +297,7 @@ def setup_ganesha(self, event): 'put', 'ganesha-export-counter', counter.name ] subprocess.check_call(cmd) - self.peers.pool_initialised() + self.peers.initialised_pool() except subprocess.CalledProcessError: logging.error("Failed to setup ganesha index object") event.defer() @@ -309,6 +313,22 @@ def on_reload_nonce(self, _event): logging.info("Reloading Ganesha after nonce triggered reload") subprocess.call(['killall', '-HUP', 'ganesha.nfsd']) + def access_address(self) -> str: + """Return the IP to advertise Ganesha on""" + binding = self.model.get_binding('public') + if self.model.get_relation('hacluster'): + return self.config_get('vip') + else: + return str(binding.network.ingress_address) + + def create_share_action(self, event): + if not self.model.unit.is_leader(): + event.fail("Share creation needs to be run from the application leader") + return + client = GaneshaNfs(self.client_name, self.pool_name) + export_path = client.create_share() + self.peers.trigger_reload() + event.set_results({"message": "Share created", "path": export_path, "ip": self.access_address()}) @ops_openstack.core.charm_class class CephNFSCharmOcto(CephNfsCharm): diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index daa4268e..d3f9ad02 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -134,7 +134,7 @@ def _ceph_auth_key(self): def _ceph_command(self, *cmd): """Run a ceph command""" cmd = ["ceph", "--id", self.client_name, "--conf=/etc/ceph/ceph.conf"] + [*cmd] - return subprocess.check_output(cmd) + return subprocess.check_output(cmd, stderr=subprocess.DEVNULL) def _get_next_export_id(self): """Retrieve the next available export ID, and update the rados key diff --git a/ceph-nfs/src/interface_ceph_nfs_peer.py b/ceph-nfs/src/interface_ceph_nfs_peer.py index ff10c5e3..e38325d3 100644 --- a/ceph-nfs/src/interface_ceph_nfs_peer.py +++ b/ceph-nfs/src/interface_ceph_nfs_peer.py @@ -45,19 +45,27 @@ def on_changed(self, event): if self.pool_initialised == 'True' and not self._stored.pool_initialised: self.on.pool_initialised.emit() self._stored.pool_initialised = True - if self._stored.reload_nonce != self.reload_nonce(): + if self._stored.reload_nonce != self.reload_nonce: self.on.reload_nonce.emit() - self._stored.reload_nonce = self.reload_nonce() + self._stored.reload_nonce = self.reload_nonce - def pool_initialised(self): + def initialised_pool(self): logging.info("Setting pool initialised") self.peer_rel.data[self.peer_rel.app]['pool_initialised'] = 'True' self.on.pool_initialised.emit() def trigger_reload(self): - self.peer_rel.data[self.peer_rel.app]['reload_nonce'] = uuid.uuid4() + self.peer_rel.data[self.peer_rel.app]['reload_nonce'] = str(uuid.uuid4()) self.on.reload_nonce.emit() + @property + def pool_initialised(self): + return self.peer_rel.data[self.peer_rel.app].get('pool_initialised') + + @property + def reload_nonce(self): + return self.peer_rel.data[self.peer_rel.app].get('reload_nonce') + @property def peer_rel(self): return self.framework.model.get_relation(self.relation_name) From e6ab130a7faeaca37b63f526fc2e1ae7c507c497 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 7 Jan 2022 15:35:28 -0600 Subject: [PATCH 2318/2699] refactor ganesha.py and add size controls to new shares --- ceph-nfs/actions.yaml | 10 +++++++- ceph-nfs/src/charm.py | 3 ++- ceph-nfs/src/ganesha.py | 52 +++++++++++++++++++++++++++++------------ 3 files changed, 48 insertions(+), 17 deletions(-) diff --git a/ceph-nfs/actions.yaml b/ceph-nfs/actions.yaml index 4b55f8fd..c822315b 100644 --- a/ceph-nfs/actions.yaml +++ b/ceph-nfs/actions.yaml @@ -5,8 +5,16 @@ create-share: description: Create a new CephFS Backed NFS export params: allowed-ips: - description: IP Addresses to grant Read/Write access to + description: | + IP Addresses to grant Read/Write access to. the default allows + read/write access to any address that cana access this application. type: string default: "0.0.0.0" + size: + description: | + Size in gigabytes of the share. When unset, the share will not be + restricted in size. + type: integer + default: # TODO: CephFS Share name # TODO: Update, delete share \ No newline at end of file diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index 9caafe14..ac377aab 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -325,8 +325,9 @@ def create_share_action(self, event): if not self.model.unit.is_leader(): event.fail("Share creation needs to be run from the application leader") return + share_size = event.params.get('size') client = GaneshaNfs(self.client_name, self.pool_name) - export_path = client.create_share() + export_path = client.create_share(size=share_size) self.peers.trigger_reload() event.set_results({"message": "Share created", "path": export_path, "ip": self.access_address()}) diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index d3f9ad02..4e9e6251 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -49,19 +49,26 @@ class GaneshaNfs(object): def __init__(self, client_name, ceph_pool): self.client_name = client_name - self.name = str(uuid.uuid4()) self.ceph_pool = ceph_pool - self.access_id = 'ganesha-{}'.format(self.name) - def create_share(self): - """Create a CephFS Share and export it via Ganesha""" - self.export_path = self._create_cephfs_share() + def create_share(self, name=None, size=None): + """Create a CephFS Share and export it via Ganesha + + :param name: String name of the share to create + :param size: Int size in gigabytes of the share to create + """ + if name is None: + name = str(uuid.uuid4()) + if size is not None: + size_in_bytes = size * 1024 * 1024 + access_id = 'ganesha-{}'.format(name) + self.export_path = self._create_cephfs_share(name, size_in_bytes) export_id = self._get_next_export_id() export_template = GANESHA_EXPORT_TEMPLATE.format( id=export_id, path=self.export_path, - user_id=self.access_id, - secret_key=self._ceph_auth_key(), + user_id=access_id, + secret_key=self._ceph_auth_key(access_id), clients='0.0.0.0' ) logging.debug("Export template::\n{}".format(export_template)) @@ -71,6 +78,15 @@ def create_share(self): self._add_share_to_index(export_id) return self.export_path + def list_shares(self): + pass + + def get_share(self, id): + pass + + def update_share(self, id): + pass + def _ganesha_add_export(self, export_path, tmp_path): """Add a configured NFS export to Ganesha""" return self._dbus_send( @@ -86,28 +102,34 @@ def _dbus_send(self, section, action, *args): logging.debug("About to call: {}".format(cmd)) return subprocess.check_output(cmd) - def _create_cephfs_share(self): + def _create_cephfs_share(self, name, size_in_bytes=None): """Create an authorise a CephFS share. + :param name: String name of the share to create + :param size_in_bytes: Integer size in bytes of the size to create + :returns: export path :rtype: union[str, bool] """ try: - self._ceph_subvolume_command('create', 'ceph-fs', self.name) + if size_in_bytes is not None: + self._ceph_subvolume_command('create', 'ceph-fs', name, str(size_in_bytes)) + else: + self._ceph_subvolume_command('create', 'ceph-fs', name) except subprocess.CalledProcessError: logging.error("failed to create subvolume") return False try: self._ceph_subvolume_command( - 'authorize', 'ceph-fs', self.name, - 'ganesha-{name}'.format(name=self.name)) + 'authorize', 'ceph-fs', name, + 'ganesha-{name}'.format(name=name)) except subprocess.CalledProcessError: logging.error("failed to authorize subvolume") return False try: - output = self._ceph_subvolume_command('getpath', 'ceph-fs', self.name) + output = self._ceph_subvolume_command('getpath', 'ceph-fs', name) return output.decode('utf-8').strip() except subprocess.CalledProcessError: logging.error("failed to get path") @@ -121,14 +143,14 @@ def _ceph_fs_command(self, *cmd): """Run a ceph fs command""" return self._ceph_command('fs', *cmd) - def _ceph_auth_key(self): + def _ceph_auth_key(self, access_id): """Retrieve the CephX key associated with this id :returns: The access key :rtype: str """ output = self._ceph_command( - 'auth', 'get', 'client.{}'.format(self.access_id), '--format=json') + 'auth', 'get', 'client.{}'.format(access_id), '--format=json') return json.loads(output.decode('UTF-8'))[0]['key'] def _ceph_command(self, *cmd): @@ -188,4 +210,4 @@ def _add_share_to_index(self, export_id): index = self._rados_get(self.export_index) index += '%url rados://{}/ganesha-export-{}'.format(self.ceph_pool, export_id) tmpfile = self._tmpfile(index) - self._rados_put(self.export_index, tmpfile.name) \ No newline at end of file + self._rados_put(self.export_index, tmpfile.name) From d4d7d523ebe5a5a0f2d7019a183d30b94f513682 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 7 Jan 2022 15:59:59 -0600 Subject: [PATCH 2319/2699] Cleanup a unit when it is departing --- ceph-nfs/src/charm.py | 10 ++++++++++ ceph-nfs/src/interface_ceph_nfs_peer.py | 13 +++++++++++++ 2 files changed, 23 insertions(+) diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index ac377aab..ad7b8e7a 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -175,6 +175,9 @@ def __init__(self, framework): self.framework.observe( self.peers.on.pool_initialised, self.on_pool_initialised) + self.framework.observe( + self.peers.on.departing, + self.on_departing) self.framework.observe( self.peers.on.reload_nonce, self.on_reload_nonce) @@ -270,6 +273,13 @@ def _render_configs(): self.update_status() logging.info("on_pools_available: status updated") + def on_departing(self, event): + subprocess.check_call([ + 'ganesha-rados-grace', '--userid', self.client_name, + '--cephconf', self.CEPH_CONF, '--pool', self.pool_name, + 'remove', socket.gethostname()]) + self._stored.is_cluster_setup = False + def setup_ganesha(self, event): if not self._stored.is_cluster_setup: subprocess.check_call([ diff --git a/ceph-nfs/src/interface_ceph_nfs_peer.py b/ceph-nfs/src/interface_ceph_nfs_peer.py index e38325d3..72f4cf9b 100644 --- a/ceph-nfs/src/interface_ceph_nfs_peer.py +++ b/ceph-nfs/src/interface_ceph_nfs_peer.py @@ -2,6 +2,7 @@ # import json import logging +import os # import socket import uuid @@ -19,9 +20,13 @@ class PoolInitialisedEvent(EventBase): class ReloadNonceEvent(EventBase): pass +class DepartedEvent(EventBase): + pass + class CephNfsPeerEvents(ObjectEvents): pool_initialised = EventSource(PoolInitialisedEvent) reload_nonce = EventSource(ReloadNonceEvent) + departing = EventSource(DepartedEvent) class CephNfsPeers(Object): @@ -39,6 +44,9 @@ def __init__(self, charm, relation_name): self.framework.observe( charm.on[relation_name].relation_changed, self.on_changed) + self.framework.observe( + charm.on[relation_name].relation_departed, + self.on_departed) def on_changed(self, event): logging.info("CephNfsPeers on_changed") @@ -49,6 +57,11 @@ def on_changed(self, event): self.on.reload_nonce.emit() self._stored.reload_nonce = self.reload_nonce + def on_departed(self, event): + logging.warning("CephNfsPeers on_departed") + if this_unit.name == os.getenv('JUJU_DEPARTING_UNIT'): + self.on.departing.emit() + def initialised_pool(self): logging.info("Setting pool initialised") self.peer_rel.data[self.peer_rel.app]['pool_initialised'] = 'True' From 33c9d013b268a84d405a7c075cc54953ac8164c4 Mon Sep 17 00:00:00 2001 From: Samuel Walladge Date: Thu, 13 Jan 2022 07:42:39 +1030 Subject: [PATCH 2320/2699] Display information if missing OSD relation When ceph-mon is blocked on waiting for enough OSDs to be available, it will display a message to that effect. But this is misleading if ceph-mon has not been related to ceph-osd. So if the two are not related, and ceph-mon is waiting for OSDS, then display a message about the relation missing. Closes-Bug: #1886558 Change-Id: Ic5ee9d33d2bb874af7fc7c325773f88c5661fcc6 --- ceph-mon/hooks/ceph_hooks.py | 2 ++ ceph-mon/unit_tests/test_status.py | 48 ++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index ea05a470..557903a7 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -1296,6 +1296,8 @@ def assess_status(): expected_osd_count = config('expected-osd-count') or 3 if sufficient_osds(expected_osd_count): status_set('active', 'Unit is ready and clustered') + elif not relation_ids('osd'): + status_set('blocked', 'Missing relation: OSD') else: status_set( 'waiting', diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py index ff181e16..fffe17ff 100644 --- a/ceph-mon/unit_tests/test_status.py +++ b/ceph-mon/unit_tests/test_status.py @@ -102,6 +102,54 @@ def test_assess_status_peers_complete_active(self, _peer_units, self.status_set.assert_called_with('active', mock.ANY) self.application_version_set.assert_called_with('10.2.2') + @mock.patch.object(hooks, 'relation_ids') + @mock.patch.object(hooks, 'get_osd_settings') + @mock.patch.object(hooks, 'has_rbd_mirrors') + @mock.patch.object(hooks, 'sufficient_osds') + @mock.patch.object(hooks, 'get_peer_units') + def test_assess_status_no_osd_relation( + self, + _peer_units, + _sufficient_osds, + _has_rbd_mirrors, + _get_osd_settings, + _relation_ids + ): + _peer_units.return_value = ENOUGH_PEERS_COMPLETE + _sufficient_osds.return_value = False + _relation_ids.return_value = [] + self.ceph.is_bootstrapped.return_value = True + self.ceph.is_quorum.return_value = True + _has_rbd_mirrors.return_value = False + _get_osd_settings.return_value = {} + hooks.assess_status() + self.status_set.assert_called_with('blocked', 'Missing relation: OSD') + self.application_version_set.assert_called_with('10.2.2') + + @mock.patch.object(hooks, 'relation_ids') + @mock.patch.object(hooks, 'get_osd_settings') + @mock.patch.object(hooks, 'has_rbd_mirrors') + @mock.patch.object(hooks, 'sufficient_osds') + @mock.patch.object(hooks, 'get_peer_units') + def test_assess_status_osd_relation_but_insufficient_osds( + self, + _peer_units, + _sufficient_osds, + _has_rbd_mirrors, + _get_osd_settings, + _relation_ids + ): + _peer_units.return_value = ENOUGH_PEERS_COMPLETE + _sufficient_osds.return_value = False + _relation_ids.return_value = ['osd:1'] + self.ceph.is_bootstrapped.return_value = True + self.ceph.is_quorum.return_value = True + _has_rbd_mirrors.return_value = False + _get_osd_settings.return_value = {} + hooks.assess_status() + self.status_set.assert_called_with('waiting', mock.ANY) + self.application_version_set.assert_called_with('10.2.2') + @mock.patch.object(hooks, 'get_osd_settings') @mock.patch.object(hooks, 'has_rbd_mirrors') @mock.patch.object(hooks, 'sufficient_osds') From 7b25e949dfe7676b42459544cf1012835cd80ff8 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 26 Jan 2022 14:33:28 +0100 Subject: [PATCH 2321/2699] Add action to list existing shares This change also adds typing information for all library functionality. --- ceph-nfs/actions.yaml | 15 ++- ceph-nfs/src/charm.py | 26 ++++- ceph-nfs/src/ganesha.py | 125 +++++++++++++++++++----- ceph-nfs/src/interface_ceph_nfs_peer.py | 11 ++- ceph-nfs/tests/nfs_ganesha.py | 125 ++++++++++++++++++++++++ ceph-nfs/tests/tests.yaml | 3 +- ceph-nfs/unit_tests/test_ganesha.py | 43 ++++++++ 7 files changed, 315 insertions(+), 33 deletions(-) create mode 100644 ceph-nfs/tests/nfs_ganesha.py create mode 100644 ceph-nfs/unit_tests/test_ganesha.py diff --git a/ceph-nfs/actions.yaml b/ceph-nfs/actions.yaml index c822315b..3533a365 100644 --- a/ceph-nfs/actions.yaml +++ b/ceph-nfs/actions.yaml @@ -6,15 +6,22 @@ create-share: params: allowed-ips: description: | - IP Addresses to grant Read/Write access to. the default allows - read/write access to any address that cana access this application. + Comma separated list of IP Addresses to grant Read/Write access to. + The default allows read/write access to any address that cana access + this application. type: string - default: "0.0.0.0" + default: "0.0.0.0/0" size: description: | Size in gigabytes of the share. When unset, the share will not be restricted in size. type: integer default: - # TODO: CephFS Share name + name: + description: | + Name of the share that will be exported. + type: string + default: +list-shares: + description: List all shares that this application is managing # TODO: Update, delete share \ No newline at end of file diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index ad7b8e7a..0026eae5 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -185,6 +185,9 @@ def __init__(self, framework): self.framework.observe( self.on.create_share_action, self.create_share_action) + self.framework.observe( + self.on.list_shares_action, + self.list_shares_action) def config_get(self, key, default=None): """Retrieve config option. @@ -254,6 +257,7 @@ def render_config(self, event): mode=0o750) def daemon_reload_and_restart(service_name): + logging.debug("restarting {} after config change".format(service_name)) subprocess.check_call(['systemctl', 'daemon-reload']) subprocess.check_call(['systemctl', 'restart', service_name]) @@ -274,6 +278,7 @@ def _render_configs(): logging.info("on_pools_available: status updated") def on_departing(self, event): + logging.debug("Removing this unit from Ganesha cluster") subprocess.check_call([ 'ganesha-rados-grace', '--userid', self.client_name, '--cephconf', self.CEPH_CONF, '--pool', self.pool_name, @@ -296,10 +301,12 @@ def setup_ganesha(self, event): 'put', 'ganesha-export-index', '/dev/null' ] try: + logging.debug("Creating ganesha-export-index in Ceph") subprocess.check_call(cmd) counter = tempfile.NamedTemporaryFile('w+') counter.write('1000') counter.seek(0) + logging.debug("Creating ganesha-export-counter in Ceph") cmd = [ 'rados', '-p', self.pool_name, '-c', self.CEPH_CONF, @@ -314,6 +321,7 @@ def setup_ganesha(self, event): def on_pool_initialised(self, event): try: + logging.debug("Restarting Ganesha after pool initialisation") subprocess.check_call(['systemctl', 'restart', 'nfs-ganesha']) except subprocess.CalledProcessError: logging.error("Failed torestart nfs-ganesha") @@ -336,10 +344,24 @@ def create_share_action(self, event): event.fail("Share creation needs to be run from the application leader") return share_size = event.params.get('size') + name = event.params.get('name') + allowed_ips = event.params.get('allowed-ips') + allowed_ips = [ip.strip() for ip in allowed_ips.split(',')] client = GaneshaNfs(self.client_name, self.pool_name) - export_path = client.create_share(size=share_size) + export_path = client.create_share(size=share_size, name=name, access_ips=allowed_ips) self.peers.trigger_reload() - event.set_results({"message": "Share created", "path": export_path, "ip": self.access_address()}) + event.set_results({ + "message": "Share created", + "path": export_path, + "ip": self.access_address()}) + + def list_shares_action(self, event): + client = GaneshaNfs(self.client_name, self.pool_name) + exports = client.list_shares() + event.set_results({ + "exports": [{"id": export.export_id, "name": export.name} for export in exports] + }) + @ops_openstack.core.charm_class class CephNFSCharmOcto(CephNfsCharm): diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index 4e9e6251..e352fd44 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -5,6 +5,7 @@ import json import logging import subprocess +from typing import List, Optional import tempfile import uuid @@ -12,7 +13,8 @@ # TODO: Add ACL with kerberos -GANESHA_EXPORT_TEMPLATE = """EXPORT {{ +GANESHA_EXPORT_TEMPLATE = """## This export is managed by the CephNFS charm ## +EXPORT {{ # Each EXPORT must have a unique Export_Id. Export_Id = {id}; @@ -42,6 +44,56 @@ """ +class Export(object): + """Object that encodes and decodes Ganesha export blocks""" + def __init__(self, export_id: int, path: str, + user_id: str, access_key: str, clients: List[str], + name: Optional[str] = None): + self.export_id = export_id + self.path = path + self.user_id = user_id + self.access_key = access_key + self.clients = clients + if '0.0.0.0/0' in self.clients: + self.clients[self.clients.index('0.0.0.0/0')] = '0.0.0.0' + if self.path: + self.name = self.path.split('/')[-2] + + def from_export(export: str) -> 'Export': + if not export.startswith('## This export is managed by the CephNFS charm ##'): + raise RuntimeError('This export is not managed by the CephNFS charm.') + clients = [] + strip_chars = " ;'\"" + for line in [line.strip() for line in export.splitlines()]: + if line.startswith('Export_Id'): + export_id = int(line.split('=', 1)[1].strip(strip_chars)) + if line.startswith('Path'): + path = line.split('=', 1)[1].strip(strip_chars) + if line.startswith('User_Id'): + user_id = line.split('=', 1)[1].strip(strip_chars) + if line.startswith('Secret_Access_Key'): + access_key = line.split('=', 1)[1].strip(strip_chars) + if line.startswith('Clients'): + clients = line.split('=', 1)[1].strip(strip_chars) + clients = clients.split(', ') + return Export( + export_id=export_id, + path=path, + user_id=user_id, + access_key=access_key, + clients=clients + ) + + def to_export(self) -> str: + return GANESHA_EXPORT_TEMPLATE.format( + id=self.export_id, + path=self.path, + user_id=self.user_id, + secret_key=self.access_key, + clients=', '.join(self.clients) + ) + + class GaneshaNfs(object): export_index = "ganesha-export-index" @@ -51,26 +103,36 @@ def __init__(self, client_name, ceph_pool): self.client_name = client_name self.ceph_pool = ceph_pool - def create_share(self, name=None, size=None): + def create_share(self, name: str = None, size: int = None, + access_ips: List[str] = None) -> str: """Create a CephFS Share and export it via Ganesha :param name: String name of the share to create :param size: Int size in gigabytes of the share to create + + :returns: Path to the export """ if name is None: name = str(uuid.uuid4()) + else: + existing_shares = [share for share in self.list_shares() if share.name == name] + if existing_shares: + return existing_shares[0].path if size is not None: size_in_bytes = size * 1024 * 1024 + if access_ips is None: + access_ips = ['0.0.0.0/0'] access_id = 'ganesha-{}'.format(name) self.export_path = self._create_cephfs_share(name, size_in_bytes) export_id = self._get_next_export_id() - export_template = GANESHA_EXPORT_TEMPLATE.format( - id=export_id, + export = Export( + export_id=export_id, path=self.export_path, user_id=access_id, - secret_key=self._ceph_auth_key(access_id), - clients='0.0.0.0' + access_key=self._ceph_auth_key(access_id), + clients=access_ips ) + export_template = export.to_export() logging.debug("Export template::\n{}".format(export_template)) tmp_file = self._tmpfile(export_template) self._rados_put('ganesha-export-{}'.format(export_id), tmp_file.name) @@ -78,8 +140,23 @@ def create_share(self, name=None, size=None): self._add_share_to_index(export_id) return self.export_path - def list_shares(self): - pass + def list_shares(self) -> List[Export]: + share_urls = [ + url.replace('%url rados://{}/'.format(self.ceph_pool), '') + for url + in self._rados_get('ganesha-export-index').splitlines()] + exports_raw = [ + self._rados_get(url) + for url in share_urls + if url.strip() + ] + exports = [] + for export_raw in exports_raw: + try: + exports.append(Export.from_export(export_raw)) + except RuntimeError: + logging.warning("Encountered an independently created export") + return exports def get_share(self, id): pass @@ -87,13 +164,13 @@ def get_share(self, id): def update_share(self, id): pass - def _ganesha_add_export(self, export_path, tmp_path): + def _ganesha_add_export(self, export_path: str, tmp_path: str): """Add a configured NFS export to Ganesha""" return self._dbus_send( 'ExportMgr', 'AddExport', 'string:{}'.format(tmp_path), 'string:EXPORT(Path={})'.format(export_path)) - def _dbus_send(self, section, action, *args): + def _dbus_send(self, section: str, action: str, *args): """Send a command to Ganesha via Dbus""" cmd = [ 'dbus-send', '--print-reply', '--system', '--dest=org.ganesha.nfsd', @@ -102,7 +179,7 @@ def _dbus_send(self, section, action, *args): logging.debug("About to call: {}".format(cmd)) return subprocess.check_output(cmd) - def _create_cephfs_share(self, name, size_in_bytes=None): + def _create_cephfs_share(self, name: str, size_in_bytes: int = None): """Create an authorise a CephFS share. :param name: String name of the share to create @@ -135,15 +212,15 @@ def _create_cephfs_share(self, name, size_in_bytes=None): logging.error("failed to get path") return False - def _ceph_subvolume_command(self, *cmd): + def _ceph_subvolume_command(self, *cmd: List[str]) -> subprocess.CompletedProcess: """Run a ceph fs subvolume command""" return self._ceph_fs_command('subvolume', *cmd) - def _ceph_fs_command(self, *cmd): + def _ceph_fs_command(self, *cmd: List[str]) -> subprocess.CompletedProcess: """Run a ceph fs command""" return self._ceph_command('fs', *cmd) - def _ceph_auth_key(self, access_id): + def _ceph_auth_key(self, access_id: str) -> str: """Retrieve the CephX key associated with this id :returns: The access key @@ -153,12 +230,12 @@ def _ceph_auth_key(self, access_id): 'auth', 'get', 'client.{}'.format(access_id), '--format=json') return json.loads(output.decode('UTF-8'))[0]['key'] - def _ceph_command(self, *cmd): + def _ceph_command(self, *cmd: List[str]) -> subprocess.CompletedProcess: """Run a ceph command""" cmd = ["ceph", "--id", self.client_name, "--conf=/etc/ceph/ceph.conf"] + [*cmd] return subprocess.check_output(cmd, stderr=subprocess.DEVNULL) - def _get_next_export_id(self): + def _get_next_export_id(self) -> int: """Retrieve the next available export ID, and update the rados key :returns: The export ID @@ -169,15 +246,15 @@ def _get_next_export_id(self): self._rados_put(self.export_counter, file.name) return next_id - def _tmpfile(self, value): + def _tmpfile(self, value: str) -> tempfile._TemporaryFileWrapper: file = tempfile.NamedTemporaryFile(mode='w+') file.write(str(value)) file.seek(0) return file - def _rados_get(self, name): + def _rados_get(self, name: str) -> str: """Retrieve the content of the RADOS object with a given name - + :param name: Name of the RADOS object to retrieve :returns: Contents of the RADOS object @@ -191,12 +268,12 @@ def _rados_get(self, name): output = subprocess.check_output(cmd) return output.decode('utf-8') - def _rados_put(self, name, source): + def _rados_put(self, name: str, source: str): """Store the contents of the source file in a named RADOS object. - + :param name: Name of the RADOS object to retrieve :param source: Path to a file to upload to RADOS. - + :returns: None """ cmd = [ @@ -206,8 +283,8 @@ def _rados_put(self, name, source): logging.debug("About to call: {}".format(cmd)) subprocess.check_call(cmd) - def _add_share_to_index(self, export_id): + def _add_share_to_index(self, export_id: int): index = self._rados_get(self.export_index) - index += '%url rados://{}/ganesha-export-{}'.format(self.ceph_pool, export_id) + index += '\n%url rados://{}/ganesha-export-{}'.format(self.ceph_pool, export_id) tmpfile = self._tmpfile(index) self._rados_put(self.export_index, tmpfile.name) diff --git a/ceph-nfs/src/interface_ceph_nfs_peer.py b/ceph-nfs/src/interface_ceph_nfs_peer.py index 72f4cf9b..08ff5469 100644 --- a/ceph-nfs/src/interface_ceph_nfs_peer.py +++ b/ceph-nfs/src/interface_ceph_nfs_peer.py @@ -17,12 +17,15 @@ class PoolInitialisedEvent(EventBase): pass + class ReloadNonceEvent(EventBase): pass + class DepartedEvent(EventBase): pass + class CephNfsPeerEvents(ObjectEvents): pool_initialised = EventSource(PoolInitialisedEvent) reload_nonce = EventSource(ReloadNonceEvent) @@ -50,16 +53,20 @@ def __init__(self, charm, relation_name): def on_changed(self, event): logging.info("CephNfsPeers on_changed") + logging.debug('pool_initialised: {}'.format(self.pool_initialised)) if self.pool_initialised == 'True' and not self._stored.pool_initialised: + logging.info("emiting pool initialised") self.on.pool_initialised.emit() - self._stored.pool_initialised = True + self._stored.pool_initialised = True + logging.debug('reload_nonce: {}'.format(self.reload_nonce)) if self._stored.reload_nonce != self.reload_nonce: + logging.info("emiting reload nonce") self.on.reload_nonce.emit() self._stored.reload_nonce = self.reload_nonce def on_departed(self, event): logging.warning("CephNfsPeers on_departed") - if this_unit.name == os.getenv('JUJU_DEPARTING_UNIT'): + if self.this_unit.name == os.getenv('JUJU_DEPARTING_UNIT'): self.on.departing.emit() def initialised_pool(self): diff --git a/ceph-nfs/tests/nfs_ganesha.py b/ceph-nfs/tests/nfs_ganesha.py new file mode 100644 index 00000000..44dfc389 --- /dev/null +++ b/ceph-nfs/tests/nfs_ganesha.py @@ -0,0 +1,125 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate ``Ceph NFS`` testing.""" + +import logging +import subprocess +import tenacity +from typing import Dict +import unittest +import yaml +import zaza +import zaza.utilities.installers + + +class NfsGaneshaTest(unittest.TestCase): + mount_dir = '/mnt/test' + share_protocol = 'nfs' + mounts_share = False + + def tearDown(self): + if self.mounts_share: + try: + zaza.utilities.generic.run_via_ssh( + unit_name='ubuntu/0', + cmd='sudo umount /mnt/test && sudo rmdir /mnt/test') + zaza.utilities.generic.run_via_ssh( + unit_name='ubuntu/1', + cmd='sudo umount /mnt/test && sudo rmdir /mnt/test') + except subprocess.CalledProcessError: + logging.warning("Failed to cleanup mounts") + + def _create_share(self, name: str, size: int = 10) -> Dict[str, str]: + action = zaza.model.run_action_on_leader( + 'ceph-nfs', + 'create-share', + action_params={ + 'name': name, + 'size': size, + }) + self.assertEqual(action.status, 'completed') + results = action.results + logging.debug("Action results: {}".format(results)) + return results + + def _mount_share(self, unit_name: str, share_ip: str, export_path: str): + ssh_cmd = ( + 'sudo mkdir -p {0} && ' + 'sudo mount -t {1} -o nfsvers=4.1,proto=tcp {2}:{3} {0}'.format( + self.mount_dir, + self.share_protocol, + share_ip, + export_path)) + + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(5), + wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)): + with attempt: + zaza.utilities.generic.run_via_ssh( + unit_name=unit_name, + cmd=ssh_cmd) + + def _install_dependencies(self, unit: str): + logging.debug("About to install nfs-common on {}".format(unit)) + zaza.utilities.generic.run_via_ssh( + unit_name=unit, + cmd='sudo apt-get install -yq nfs-common') + + @tenacity.retry( + stop=tenacity.stop_after_attempt(5), + wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)) + def _write_testing_file_on_instance(self, instance_name: str): + zaza.utilities.generic.run_via_ssh( + unit_name=instance_name, + cmd='echo "test" | sudo tee {}/test'.format(self.mount_dir)) + + @tenacity.retry( + stop=tenacity.stop_after_attempt(5), + wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)) + def _verify_testing_file_on_instance(self, instance_name: str): + run_with_juju_ssh = zaza.utilities.installers.make_juju_ssh_fn('ubuntu/1', sudo=True) + output = run_with_juju_ssh( + 'sudo cat {}/test'.format(self.mount_dir)) + logging.info("Verification output: {}".format(output)) + self.assertEqual('test\r\n', output) + + def test_create_share(self): + for unit in ['0', '1']: + self._install_dependencies('ubuntu/{}'.format(unit)) + logging.info("Creating a share") + share = self._create_share('test_ganesha_share') + export_path = share['path'] + ip = share['ip'] + logging.info("Mounting share on ubuntu units") + self.mounts_share = True + self._mount_share('ubuntu/0', ip, export_path) + self._mount_share('ubuntu/1', ip, export_path) + logging.info("writing to the share on ubuntu/0") + self._write_testing_file_on_instance('ubuntu/0') + logging.info("reading from the share on ubuntu/1") + self._verify_testing_file_on_instance('ubuntu/1') + + def test_list_shares(self): + self._create_share('test_ganesha_list_share') + action = zaza.model.run_action_on_leader( + 'ceph-nfs', + 'list-shares', + action_params={}) + self.assertEqual(action.status, 'completed') + results = action.results + logging.debug("Action results: {}".format(results)) + logging.debug("exports: {}".format(results['exports'])) + exports = yaml.safe_load(results['exports']) + self.assertIn('test_ganesha_list_share', [export['name'] for export in exports]) diff --git a/ceph-nfs/tests/tests.yaml b/ceph-nfs/tests/tests.yaml index 0e35d54f..4052a341 100644 --- a/ceph-nfs/tests/tests.yaml +++ b/ceph-nfs/tests/tests.yaml @@ -5,7 +5,8 @@ gate_bundles: smoke_bundles: - focal-octopus configure: [] -tests: [] +tests: + - tests.nfs_ganesha.NfsGaneshaTest target_deploy_status: ubuntu: workload-status: active diff --git a/ceph-nfs/unit_tests/test_ganesha.py b/ceph-nfs/unit_tests/test_ganesha.py new file mode 100644 index 00000000..39c625ae --- /dev/null +++ b/ceph-nfs/unit_tests/test_ganesha.py @@ -0,0 +1,43 @@ +import unittest +import ganesha + + +EXAMPLE_EXPORT = """## This export is managed by the CephNFS charm ## +EXPORT { + # Each EXPORT must have a unique Export_Id. + Export_Id = 1000; + + # The directory in the exported file system this export + # is rooted on. + Path = '/volumes/_nogroup/test_ganesha_share/e12a49ef-1b2b-40b3-ba6c-7e6695bcc950'; + + # FSAL, Ganesha's module component + FSAL { + # FSAL name + Name = "Ceph"; + User_Id = "ganesha-test_ganesha_share"; + Secret_Access_Key = "AQCT9+9h4cwJOxAAue2fFvvGTWziUiR9koCHEw=="; + } + + # Path of export in the NFSv4 pseudo filesystem + Pseudo = '/volumes/_nogroup/test_ganesha_share/e12a49ef-1b2b-40b3-ba6c-7e6695bcc950'; + + SecType = "sys"; + CLIENT { + Access_Type = "rw"; + Clients = 0.0.0.0; + } + # User id squashing, one of None, Root, All + Squash = "None"; +} +""" + + +class ExportTest(unittest.TestCase): + + def test_parser(self): + export = ganesha.Export.from_export(EXAMPLE_EXPORT) + self.assertEqual(export.export_id, 1000) + self.assertEqual(export.clients, ['0.0.0.0']) + self.assertEqual(export.to_export(), EXAMPLE_EXPORT) + self.assertEqual(export.name, 'test_ganesha_share') From a35e42e22d90b85db6d2af69ba0a38995868ca44 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 26 Jan 2022 17:07:55 +0100 Subject: [PATCH 2322/2699] Migrate to a more fully-formed export parser --- ceph-nfs/src/ganesha.py | 67 ++++------ ceph-nfs/src/manager.py | 200 ++++++++++++++++++++++++++++ ceph-nfs/tox.ini | 2 +- ceph-nfs/unit_tests/test_ganesha.py | 3 +- 4 files changed, 227 insertions(+), 45 deletions(-) create mode 100644 ceph-nfs/src/manager.py diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index e352fd44..906d2a83 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -4,8 +4,9 @@ import json import logging +import manager import subprocess -from typing import List, Optional +from typing import Dict, List, Optional import tempfile import uuid @@ -13,7 +14,7 @@ # TODO: Add ACL with kerberos -GANESHA_EXPORT_TEMPLATE = """## This export is managed by the CephNFS charm ## +GANESHA_EXPORT_TEMPLATE = """ EXPORT {{ # Each EXPORT must have a unique Export_Id. Export_Id = {id}; @@ -46,52 +47,34 @@ class Export(object): """Object that encodes and decodes Ganesha export blocks""" - def __init__(self, export_id: int, path: str, - user_id: str, access_key: str, clients: List[str], - name: Optional[str] = None): - self.export_id = export_id - self.path = path - self.user_id = user_id - self.access_key = access_key - self.clients = clients - if '0.0.0.0/0' in self.clients: - self.clients[self.clients.index('0.0.0.0/0')] = '0.0.0.0' + def __init__(self, export_options: Optional[Dict] = None): + if export_options is None: + export_options = {} + self.export_options = export_options if self.path: self.name = self.path.split('/')[-2] def from_export(export: str) -> 'Export': - if not export.startswith('## This export is managed by the CephNFS charm ##'): - raise RuntimeError('This export is not managed by the CephNFS charm.') - clients = [] - strip_chars = " ;'\"" - for line in [line.strip() for line in export.splitlines()]: - if line.startswith('Export_Id'): - export_id = int(line.split('=', 1)[1].strip(strip_chars)) - if line.startswith('Path'): - path = line.split('=', 1)[1].strip(strip_chars) - if line.startswith('User_Id'): - user_id = line.split('=', 1)[1].strip(strip_chars) - if line.startswith('Secret_Access_Key'): - access_key = line.split('=', 1)[1].strip(strip_chars) - if line.startswith('Clients'): - clients = line.split('=', 1)[1].strip(strip_chars) - clients = clients.split(', ') - return Export( - export_id=export_id, - path=path, - user_id=user_id, - access_key=access_key, - clients=clients - ) + return Export(export_options=manager.parseconf(export)) def to_export(self) -> str: - return GANESHA_EXPORT_TEMPLATE.format( - id=self.export_id, - path=self.path, - user_id=self.user_id, - secret_key=self.access_key, - clients=', '.join(self.clients) - ) + return manager.mkconf(self.export_options) + + @property + def export(self): + return self.export_options['EXPORT'] + + @property + def clients(self): + return self.export['CLIENT'] + + @property + def export_id(self): + return self.export['Export_Id'] + + @property + def path(self): + return self.export['Path'] class GaneshaNfs(object): diff --git a/ceph-nfs/src/manager.py b/ceph-nfs/src/manager.py new file mode 100644 index 00000000..fd625ed8 --- /dev/null +++ b/ceph-nfs/src/manager.py @@ -0,0 +1,200 @@ +# Copyright (c) 2014 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# The contents of this file were copied, almost straight, from +# https://github.com/openstack/manila/blob/a3aaea91494665a25bdccebf69d9e85e8475983d/manila/share/drivers/ganesha/manager.py#L205 +# +# The key differences is the lack of other Ganesha control code +# and the removal of oslo's JSON helpers. + + +import io +import json +import re +import sys + + +IWIDTH = 4 + + +def _conf2json(conf): + """Convert Ganesha config to JSON.""" + + # tokenize config string + token_list = [io.StringIO()] + state = { + 'in_quote': False, + 'in_comment': False, + 'escape': False, + } + + cbk = [] + for char in conf: + if state['in_quote']: + if not state['escape']: + if char == '"': + state['in_quote'] = False + cbk.append(lambda: token_list.append(io.StringIO())) + elif char == '\\': + cbk.append(lambda: state.update({'escape': True})) + else: + if char == "#": + state['in_comment'] = True + if state['in_comment']: + if char == "\n": + state['in_comment'] = False + else: + if char == '"': + token_list.append(io.StringIO()) + state['in_quote'] = True + state['escape'] = False + if not state['in_comment']: + token_list[-1].write(char) + while cbk: + cbk.pop(0)() + + if state['in_quote']: + raise RuntimeError("Unterminated quoted string") + + # jsonify tokens + js_token_list = ["{"] + for tok in token_list: + tok = tok.getvalue() + + if tok[0] == '"': + js_token_list.append(tok) + continue + + for pat, s in [ + # add omitted "=" signs to block openings + (r'([^=\s])\s*{', '\\1={'), + # delete trailing semicolons in blocks + (r';\s*}', '}'), + # add omitted semicolons after blocks + (r'}\s*([^}\s])', '};\\1'), + # separate syntactically significant characters + (r'([;{}=])', ' \\1 ')]: + tok = re.sub(pat, s, tok) + + # map tokens to JSON equivalents + for word in tok.split(): + if word == "=": + word = ":" + elif word == ";": + word = ',' + elif (word in ['{', '}'] or + re.search(r'\A-?[1-9]\d*(\.\d+)?\Z', word)): + pass + else: + word = json.dumps(word) + js_token_list.append(word) + js_token_list.append("}") + + # group quoted strings + token_grp_list = [] + for tok in js_token_list: + if tok[0] == '"': + if not (token_grp_list and isinstance(token_grp_list[-1], list)): + token_grp_list.append([]) + token_grp_list[-1].append(tok) + else: + token_grp_list.append(tok) + + # process quoted string groups by joining them + js_token_list2 = [] + for x in token_grp_list: + if isinstance(x, list): + x = ''.join(['"'] + [tok[1:-1] for tok in x] + ['"']) + js_token_list2.append(x) + + return ''.join(js_token_list2) + + +def _dump_to_conf(confdict, out=sys.stdout, indent=0): + """Output confdict in Ganesha config format.""" + if isinstance(confdict, dict): + for k, v in confdict.items(): + if v is None: + continue + if isinstance(v, dict): + out.write(' ' * (indent * IWIDTH) + k + ' ') + out.write("{\n") + _dump_to_conf(v, out, indent + 1) + out.write(' ' * (indent * IWIDTH) + '}') + elif isinstance(v, list): + for item in v: + out.write(' ' * (indent * IWIDTH) + k + ' ') + out.write("{\n") + _dump_to_conf(item, out, indent + 1) + out.write(' ' * (indent * IWIDTH) + '}\n') + # The 'CLIENTS' Ganesha string option is an exception in that it's + # string value can't be enclosed within quotes as can be done for + # other string options in a valid Ganesha conf file. + elif k.upper() == 'CLIENTS': + out.write(' ' * (indent * IWIDTH) + k + ' = ' + v + ';') + else: + out.write(' ' * (indent * IWIDTH) + k + ' ') + out.write('= ') + _dump_to_conf(v, out, indent) + out.write(';') + out.write('\n') + else: + dj = json.dumps(confdict) + out.write(dj) + + +def parseconf(conf): + """Parse Ganesha config. + Both native format and JSON are supported. + Convert config to a (nested) dictionary. + """ + def list_to_dict(src_list): + # Convert a list of key-value pairs stored as tuples to a dict. + # For tuples with identical keys, preserve all the values in a + # list. e.g., argument [('k', 'v1'), ('k', 'v2')] to function + # returns {'k': ['v1', 'v2']}. + dst_dict = {} + for i in src_list: + if isinstance(i, tuple): + k, v = i + if isinstance(v, list): + v = list_to_dict(v) + if k in dst_dict: + dst_dict[k] = [dst_dict[k]] + dst_dict[k].append(v) + else: + dst_dict[k] = v + return dst_dict + + try: + # allow config to be specified in JSON -- + # for sake of people who might feel Ganesha config foreign. + d = json.loads(conf) + except ValueError: + # Customize JSON decoder to convert Ganesha config to a list + # of key-value pairs stored as tuples. This allows multiple + # occurrences of a config block to be later converted to a + # dict key-value pair, with block name being the key and a + # list of block contents being the value. + li = json.loads(_conf2json(conf), object_pairs_hook=lambda x: x) + d = list_to_dict(li) + return d + + +def mkconf(confdict): + """Create Ganesha config string from confdict.""" + s = io.StringIO() + _dump_to_conf(confdict, s) + return s.getvalue() diff --git a/ceph-nfs/tox.ini b/ceph-nfs/tox.ini index 775ea578..52928f32 100644 --- a/ceph-nfs/tox.ini +++ b/ceph-nfs/tox.ini @@ -130,4 +130,4 @@ commands = [flake8] # Ignore E902 because the unit_tests directory is missing in the built charm. -ignore = E402,E226,E902 +ignore = E402,E226,E902,W504 diff --git a/ceph-nfs/unit_tests/test_ganesha.py b/ceph-nfs/unit_tests/test_ganesha.py index 39c625ae..19bdb4df 100644 --- a/ceph-nfs/unit_tests/test_ganesha.py +++ b/ceph-nfs/unit_tests/test_ganesha.py @@ -38,6 +38,5 @@ class ExportTest(unittest.TestCase): def test_parser(self): export = ganesha.Export.from_export(EXAMPLE_EXPORT) self.assertEqual(export.export_id, 1000) - self.assertEqual(export.clients, ['0.0.0.0']) - self.assertEqual(export.to_export(), EXAMPLE_EXPORT) + self.assertEqual(export.clients, {'Access_Type': 'rw', 'Clients': '0.0.0.0'}) self.assertEqual(export.name, 'test_ganesha_share') From 34a863f99f5610935b44011def4a1561a737644e Mon Sep 17 00:00:00 2001 From: Samuel Walladge Date: Thu, 27 Jan 2022 12:11:08 +1030 Subject: [PATCH 2323/2699] Fix typo in functional test definition impist -> impish; this should fix the functional tests for impish-xena. Change-Id: Ifaac1f371119d7430b6274f9d299fbe0288bac43 --- ceph-mon/tests/bundles/impish-xena.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/tests/bundles/impish-xena.yaml b/ceph-mon/tests/bundles/impish-xena.yaml index df5c37a8..920683b7 100644 --- a/ceph-mon/tests/bundles/impish-xena.yaml +++ b/ceph-mon/tests/bundles/impish-xena.yaml @@ -1,7 +1,7 @@ variables: openstack-origin: &openstack-origin distro -series: impist +series: impish comment: - 'machines section to decide order of deployment. database sooner = faster' From a8fe85d19e5c6a98f6e9291c40ce3cf62bee6c25 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Tue, 25 Jan 2022 18:23:43 -0300 Subject: [PATCH 2324/2699] Create a new key to support OSD disk removal This patchset creates a new key that permits ceph-mon units to execute a set of commands that is needed to properly implement full disk removal from within ceph-osd units. Change-Id: Ib959e81833eb2094d02c7bdd507b1c8b7fbcd3db --- ceph-mon/hooks/ceph_hooks.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index ea05a470..457b4e0c 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -868,6 +868,15 @@ def osd_relation(relid=None, unit=None): 'ceph-public-address': public_addr, 'osd_upgrade_key': ceph.get_named_key('osd-upgrade', caps=ceph.osd_upgrade_caps), + 'osd_disk_removal_key': ceph.get_named_key( + 'osd-removal', + caps={'mon': [ + 'allow command "osd safe-to-destroy"', + 'allow command "osd crush reweight"', + 'allow command "osd purge"', + 'allow command "osd destroy"', + ]} + ) } data.update(handle_broker_request(relid, unit)) @@ -1134,10 +1143,7 @@ def upgrade_charm(): # NOTE(jamespage): # Reprocess broker requests to ensure that any cephx # key permission changes are applied - notify_client() - notify_radosgws() - notify_rbd_mirrors() - notify_prometheus() + notify_relations() @hooks.hook('nrpe-external-master-relation-joined') From d256a7fcc402775a45410da293ced5e1fc748033 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 25 Jan 2022 19:05:13 +0000 Subject: [PATCH 2325/2699] Migrate charm to charmhub latest/edge track Change-Id: I8ee68aa5d714b2052f846862b9db265956b43830 --- ceph-fs/charmcraft.yaml | 13 + ceph-fs/metadata.yaml | 1 + ceph-fs/osci.yaml | 10 +- ceph-fs/src/metadata.yaml | 3 - ceph-fs/src/tests/bundles/bionic-queens.yaml | 112 --------- ceph-fs/src/tests/bundles/bionic-rocky.yaml | 130 ---------- ceph-fs/src/tests/bundles/bionic-stein.yaml | 128 ---------- ceph-fs/src/tests/bundles/bionic-train.yaml | 141 ----------- ceph-fs/src/tests/bundles/bionic-ussuri.yaml | 142 ----------- ceph-fs/src/tests/bundles/focal-ussuri.yaml | 222 ------------------ ceph-fs/src/tests/bundles/focal-victoria.yaml | 222 ------------------ ceph-fs/src/tests/bundles/focal-wallaby.yaml | 222 ------------------ ceph-fs/src/tests/bundles/focal-xena.yaml | 51 ++-- ceph-fs/src/tests/bundles/focal-yoga.yaml | 51 ++-- .../src/tests/bundles/hirsute-wallaby.yaml | 222 ------------------ ceph-fs/src/tests/bundles/impish-xena.yaml | 51 ++-- ceph-fs/src/tests/bundles/jammy-yoga.yaml | 51 ++-- ceph-fs/src/tests/tests.yaml | 12 +- 18 files changed, 154 insertions(+), 1630 deletions(-) create mode 100644 ceph-fs/charmcraft.yaml create mode 120000 ceph-fs/metadata.yaml delete mode 100644 ceph-fs/src/tests/bundles/bionic-queens.yaml delete mode 100644 ceph-fs/src/tests/bundles/bionic-rocky.yaml delete mode 100644 ceph-fs/src/tests/bundles/bionic-stein.yaml delete mode 100644 ceph-fs/src/tests/bundles/bionic-train.yaml delete mode 100644 ceph-fs/src/tests/bundles/bionic-ussuri.yaml delete mode 100644 ceph-fs/src/tests/bundles/focal-ussuri.yaml delete mode 100644 ceph-fs/src/tests/bundles/focal-victoria.yaml delete mode 100644 ceph-fs/src/tests/bundles/focal-wallaby.yaml delete mode 100644 ceph-fs/src/tests/bundles/hirsute-wallaby.yaml diff --git a/ceph-fs/charmcraft.yaml b/ceph-fs/charmcraft.yaml new file mode 100644 index 00000000..a850351d --- /dev/null +++ b/ceph-fs/charmcraft.yaml @@ -0,0 +1,13 @@ +type: charm + +parts: + charm: + source: src/ + plugin: reactive + build-snaps: [charm] + +bases: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 diff --git a/ceph-fs/metadata.yaml b/ceph-fs/metadata.yaml new file mode 120000 index 00000000..07686838 --- /dev/null +++ b/ceph-fs/metadata.yaml @@ -0,0 +1 @@ +src/metadata.yaml \ No newline at end of file diff --git a/ceph-fs/osci.yaml b/ceph-fs/osci.yaml index 6569e1e2..551746b2 100644 --- a/ceph-fs/osci.yaml +++ b/ceph-fs/osci.yaml @@ -1,13 +1,9 @@ - project: templates: - - charm-yoga-unit-jobs - - charm-yoga-functional-jobs + - charm-unit-jobs-py38 + - charm-unit-jobs-py39 - charm-xena-functional-jobs - - charm-wallaby-functional-jobs - - charm-victoria-functional-jobs - - charm-ussuri-functional-jobs - - charm-stein-functional-jobs - - charm-queens-functional-jobs + - charm-yoga-functional-jobs vars: needs_charm_build: true charm_build_name: ceph-fs diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 4fc58d1d..01727358 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -10,10 +10,7 @@ tags: - file-servers - misc series: -- bionic - focal -- groovy -- hirsute - impish subordinate: false requires: diff --git a/ceph-fs/src/tests/bundles/bionic-queens.yaml b/ceph-fs/src/tests/bundles/bionic-queens.yaml deleted file mode 100644 index fc6301f2..00000000 --- a/ceph-fs/src/tests/bundles/bionic-queens.yaml +++ /dev/null @@ -1,112 +0,0 @@ -series: bionic -applications: - ceph-fs: - charm: ceph-fs - series: bionic - num_units: 1 - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - flat-network-providers: physnet1 - neutron-security-groups: true - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex -relations: -- - ceph-mon:mds - - ceph-fs:ceph-mds -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - 'neutron-api:shared-db' - - 'percona-cluster:shared-db' -- - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' -- - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' -- - 'neutron-api:identity-service' - - 'keystone:identity-service' -- - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' -- - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' -- - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' \ No newline at end of file diff --git a/ceph-fs/src/tests/bundles/bionic-rocky.yaml b/ceph-fs/src/tests/bundles/bionic-rocky.yaml deleted file mode 100644 index 222a1aea..00000000 --- a/ceph-fs/src/tests/bundles/bionic-rocky.yaml +++ /dev/null @@ -1,130 +0,0 @@ -series: bionic -applications: - ceph-fs: - charm: ceph-fs - series: bionic - num_units: 1 - options: - source: cloud:bionic-rocky - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:bionic-rocky - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: cloud:bionic-rocky - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:bionic-rocky - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-rocky - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: cloud:bionic-rocky - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: cloud:bionic-rocky - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: cloud:bionic-rocky - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: cloud:bionic-rocky -relations: -- - ceph-mon:mds - - ceph-fs:ceph-mds -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - 'neutron-api:shared-db' - - 'percona-cluster:shared-db' -- - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' -- - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' -- - 'neutron-api:identity-service' - - 'keystone:identity-service' -- - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' -- - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' -- - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/bionic-stein.yaml b/ceph-fs/src/tests/bundles/bionic-stein.yaml deleted file mode 100644 index 2e59c83e..00000000 --- a/ceph-fs/src/tests/bundles/bionic-stein.yaml +++ /dev/null @@ -1,128 +0,0 @@ -series: bionic -options: - source: &source cloud:bionic-stein -applications: - ceph-fs: - charm: ceph-fs - series: bionic - num_units: 1 - options: - source: *source - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - source: *source - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - source: *source - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *source - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *source - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: *source - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - constraints: mem=8G root-disk=60G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: *source - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: *source - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: *source -relations: -- - ceph-mon:mds - - ceph-fs:ceph-mds -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - 'neutron-api:shared-db' - - 'percona-cluster:shared-db' -- - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' -- - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' -- - 'neutron-api:identity-service' - - 'keystone:identity-service' -- - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' -- - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' -- - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/bionic-train.yaml b/ceph-fs/src/tests/bundles/bionic-train.yaml deleted file mode 100644 index 3dfe9e62..00000000 --- a/ceph-fs/src/tests/bundles/bionic-train.yaml +++ /dev/null @@ -1,141 +0,0 @@ -series: bionic -applications: - ceph-fs: - charm: ceph-fs - series: bionic - num_units: 1 - options: - source: cloud:bionic-train - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: cloud:bionic-train - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: cloud:bionic-train - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:bionic-train - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-train - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-train - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-train - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: cloud:bionic-train - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: cloud:bionic-train - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: cloud:bionic-train - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: cloud:bionic-train - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: cloud:bionic-train -relations: -- - ceph-mon:mds - - ceph-fs:ceph-mds -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - placement - - percona-cluster -- - placement - - keystone -- - placement - - nova-cloud-controller -- - 'neutron-api:shared-db' - - 'percona-cluster:shared-db' -- - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' -- - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' -- - 'neutron-api:identity-service' - - 'keystone:identity-service' -- - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' -- - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' -- - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/bionic-ussuri.yaml b/ceph-fs/src/tests/bundles/bionic-ussuri.yaml deleted file mode 100644 index b479d667..00000000 --- a/ceph-fs/src/tests/bundles/bionic-ussuri.yaml +++ /dev/null @@ -1,142 +0,0 @@ -series: bionic -applications: - ceph-fs: - charm: ceph-fs - num_units: 1 - options: - source: cloud:bionic-ussuri - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: cloud:bionic-ussuri - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: cloud:bionic-ussuri - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:bionic-ussuri - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-ussuri - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: cloud:bionic-ussuri - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: cloud:bionic-ussuri - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - manage-neutron-plugin-legacy-mode: true - neutron-plugin: ovs - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: cloud:bionic-ussuri - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: cloud:bionic-ussuri -relations: -- - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' -- - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' -- - 'nova-compute:image-service' - - 'glance:image-service' -- - 'nova-compute:ceph' - - 'ceph-mon:client' -- - 'keystone:shared-db' - - 'percona-cluster:shared-db' -- - 'glance:shared-db' - - 'percona-cluster:shared-db' -- - 'glance:identity-service' - - 'keystone:identity-service' -- - 'glance:amqp' - - 'rabbitmq-server:amqp' -- - 'glance:ceph' - - 'ceph-mon:client' -- - 'ceph-osd:mon' - - 'ceph-mon:osd' -- - 'nova-cloud-controller:shared-db' - - 'percona-cluster:shared-db' -- - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' -- - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' -- - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' -- - 'nova-cloud-controller:image-service' - - 'glance:image-service' -- - 'placement' - - 'percona-cluster' -- - 'placement' - - 'keystone' -- - 'placement' - - 'nova-cloud-controller' -- - 'neutron-api:shared-db' - - 'percona-cluster:shared-db' -- - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' -- - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' -- - 'neutron-api:identity-service' - - 'keystone:identity-service' -- - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' -- - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' -- - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' -- - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/focal-ussuri.yaml b/ceph-fs/src/tests/bundles/focal-ussuri.yaml deleted file mode 100644 index 39d9fed9..00000000 --- a/ceph-fs/src/tests/bundles/focal-ussuri.yaml +++ /dev/null @@ -1,222 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: &series focal - -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - '3': - - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - neutron-api-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-fs: - charm: ceph-fs - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '3' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: *openstack-origin - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: *openstack-origin - - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: *openstack-origin - - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - manage-neutron-plugin-legacy-mode: true - neutron-plugin: ovs - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: *openstack-origin - - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: *openstack-origin - -relations: - - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'neutron-api:shared-db' - - 'neutron-api-mysql-router:shared-db' - - - 'neutron-api-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' - - - - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' - - - - 'neutron-api:identity-service' - - 'keystone:identity-service' - - - - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' - - - - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/focal-victoria.yaml b/ceph-fs/src/tests/bundles/focal-victoria.yaml deleted file mode 100644 index b23a8d52..00000000 --- a/ceph-fs/src/tests/bundles/focal-victoria.yaml +++ /dev/null @@ -1,222 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-victoria - -series: &series focal - -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - '3': - - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - neutron-api-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-fs: - charm: ceph-fs - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '3' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: *openstack-origin - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: *openstack-origin - - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: *openstack-origin - - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - manage-neutron-plugin-legacy-mode: true - neutron-plugin: ovs - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: *openstack-origin - - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: *openstack-origin - -relations: - - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'neutron-api:shared-db' - - 'neutron-api-mysql-router:shared-db' - - - 'neutron-api-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' - - - - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' - - - - 'neutron-api:identity-service' - - 'keystone:identity-service' - - - - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' - - - - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/focal-wallaby.yaml b/ceph-fs/src/tests/bundles/focal-wallaby.yaml deleted file mode 100644 index 4134e10e..00000000 --- a/ceph-fs/src/tests/bundles/focal-wallaby.yaml +++ /dev/null @@ -1,222 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-wallaby - -series: &series focal - -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - '3': - - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - neutron-api-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-fs: - charm: ceph-fs - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '3' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: *openstack-origin - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: *openstack-origin - - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: *openstack-origin - - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - manage-neutron-plugin-legacy-mode: true - neutron-plugin: ovs - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: *openstack-origin - - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: *openstack-origin - -relations: - - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'neutron-api:shared-db' - - 'neutron-api-mysql-router:shared-db' - - - 'neutron-api-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' - - - - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' - - - - 'neutron-api:identity-service' - - 'keystone:identity-service' - - - - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' - - - - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/focal-xena.yaml b/ceph-fs/src/tests/bundles/focal-xena.yaml index 08d5372d..a5370c13 100644 --- a/ceph-fs/src/tests/bundles/focal-xena.yaml +++ b/ceph-fs/src/tests/bundles/focal-xena.yaml @@ -16,18 +16,23 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge neutron-api-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -35,6 +40,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-fs: charm: ceph-fs @@ -48,51 +54,57 @@ applications: - '3' ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 6 storage: osd-devices: 'cinder,10G' options: osd-devices: '/dev/test-non-existent' source: *openstack-origin + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' source: *openstack-origin + channel: latest/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin + channel: latest/edge glance: expose: True - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin + channel: latest/edge nova-cloud-controller: expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + charm: ch:nova-cloud-controller num_units: 1 options: network-manager: Neutron openstack-origin: *openstack-origin + channel: latest/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 2 constraints: mem=8G options: @@ -101,15 +113,17 @@ applications: enable-resize: true migration-auth-type: ssh openstack-origin: *openstack-origin + channel: latest/edge placement: - charm: cs:~openstack-charmers-next/placement + charm: ch:placement num_units: 1 options: openstack-origin: *openstack-origin + channel: latest/edge neutron-api: - charm: cs:~openstack-charmers-next/neutron-api + charm: ch:neutron-api num_units: 1 options: manage-neutron-plugin-legacy-mode: true @@ -117,16 +131,19 @@ applications: flat-network-providers: physnet1 neutron-security-groups: true openstack-origin: *openstack-origin + channel: latest/edge neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch + charm: ch:neutron-openvswitch + channel: latest/edge neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway + charm: ch:neutron-gateway num_units: 1 options: bridge-mappings: physnet1:br-ex openstack-origin: *openstack-origin + channel: latest/edge relations: diff --git a/ceph-fs/src/tests/bundles/focal-yoga.yaml b/ceph-fs/src/tests/bundles/focal-yoga.yaml index 7e5bf103..a0edf3b5 100644 --- a/ceph-fs/src/tests/bundles/focal-yoga.yaml +++ b/ceph-fs/src/tests/bundles/focal-yoga.yaml @@ -16,18 +16,23 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge neutron-api-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -35,6 +40,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-fs: charm: ceph-fs @@ -48,51 +54,57 @@ applications: - '3' ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 6 storage: osd-devices: 'cinder,10G' options: osd-devices: '/dev/test-non-existent' source: *openstack-origin + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' source: *openstack-origin + channel: latest/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin + channel: latest/edge glance: expose: True - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin + channel: latest/edge nova-cloud-controller: expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + charm: ch:nova-cloud-controller num_units: 1 options: network-manager: Neutron openstack-origin: *openstack-origin + channel: latest/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 2 constraints: mem=8G options: @@ -101,15 +113,17 @@ applications: enable-resize: true migration-auth-type: ssh openstack-origin: *openstack-origin + channel: latest/edge placement: - charm: cs:~openstack-charmers-next/placement + charm: ch:placement num_units: 1 options: openstack-origin: *openstack-origin + channel: latest/edge neutron-api: - charm: cs:~openstack-charmers-next/neutron-api + charm: ch:neutron-api num_units: 1 options: manage-neutron-plugin-legacy-mode: true @@ -117,16 +131,19 @@ applications: flat-network-providers: physnet1 neutron-security-groups: true openstack-origin: *openstack-origin + channel: latest/edge neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch + charm: ch:neutron-openvswitch + channel: latest/edge neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway + charm: ch:neutron-gateway num_units: 1 options: bridge-mappings: physnet1:br-ex openstack-origin: *openstack-origin + channel: latest/edge relations: diff --git a/ceph-fs/src/tests/bundles/hirsute-wallaby.yaml b/ceph-fs/src/tests/bundles/hirsute-wallaby.yaml deleted file mode 100644 index 400be963..00000000 --- a/ceph-fs/src/tests/bundles/hirsute-wallaby.yaml +++ /dev/null @@ -1,222 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: &series hirsute - -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - '3': - - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - neutron-api-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-fs: - charm: ceph-fs - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '3' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: *openstack-origin - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: *openstack-origin - - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: *openstack-origin - - neutron-api: - charm: cs:~openstack-charmers-next/neutron-api - num_units: 1 - options: - manage-neutron-plugin-legacy-mode: true - neutron-plugin: ovs - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: *openstack-origin - - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - - neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: *openstack-origin - -relations: - - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'neutron-api:shared-db' - - 'neutron-api-mysql-router:shared-db' - - - 'neutron-api-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' - - - - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' - - - - 'neutron-api:identity-service' - - 'keystone:identity-service' - - - - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' - - - - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/impish-xena.yaml b/ceph-fs/src/tests/bundles/impish-xena.yaml index ebce11e7..ee6a5ede 100644 --- a/ceph-fs/src/tests/bundles/impish-xena.yaml +++ b/ceph-fs/src/tests/bundles/impish-xena.yaml @@ -16,18 +16,23 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge neutron-api-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -35,6 +40,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-fs: charm: ceph-fs @@ -48,51 +54,57 @@ applications: - '3' ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 6 storage: osd-devices: 'cinder,10G' options: osd-devices: '/dev/test-non-existent' source: *openstack-origin + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' source: *openstack-origin + channel: latest/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin + channel: latest/edge glance: expose: True - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin + channel: latest/edge nova-cloud-controller: expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + charm: ch:nova-cloud-controller num_units: 1 options: network-manager: Neutron openstack-origin: *openstack-origin + channel: latest/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 2 constraints: mem=8G options: @@ -101,15 +113,17 @@ applications: enable-resize: true migration-auth-type: ssh openstack-origin: *openstack-origin + channel: latest/edge placement: - charm: cs:~openstack-charmers-next/placement + charm: ch:placement num_units: 1 options: openstack-origin: *openstack-origin + channel: latest/edge neutron-api: - charm: cs:~openstack-charmers-next/neutron-api + charm: ch:neutron-api num_units: 1 options: manage-neutron-plugin-legacy-mode: true @@ -117,16 +131,19 @@ applications: flat-network-providers: physnet1 neutron-security-groups: true openstack-origin: *openstack-origin + channel: latest/edge neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch + charm: ch:neutron-openvswitch + channel: latest/edge neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway + charm: ch:neutron-gateway num_units: 1 options: bridge-mappings: physnet1:br-ex openstack-origin: *openstack-origin + channel: latest/edge relations: diff --git a/ceph-fs/src/tests/bundles/jammy-yoga.yaml b/ceph-fs/src/tests/bundles/jammy-yoga.yaml index 35764860..e3fa43e4 100644 --- a/ceph-fs/src/tests/bundles/jammy-yoga.yaml +++ b/ceph-fs/src/tests/bundles/jammy-yoga.yaml @@ -16,18 +16,23 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge neutron-api-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -35,6 +40,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-fs: charm: ceph-fs @@ -48,51 +54,57 @@ applications: - '3' ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 6 storage: osd-devices: 'cinder,10G' options: osd-devices: '/dev/test-non-existent' source: *openstack-origin + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' source: *openstack-origin + channel: latest/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin + channel: latest/edge glance: expose: True - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin + channel: latest/edge nova-cloud-controller: expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + charm: ch:nova-cloud-controller num_units: 1 options: network-manager: Neutron openstack-origin: *openstack-origin + channel: latest/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 2 constraints: mem=8G options: @@ -101,15 +113,17 @@ applications: enable-resize: true migration-auth-type: ssh openstack-origin: *openstack-origin + channel: latest/edge placement: - charm: cs:~openstack-charmers-next/placement + charm: ch:placement num_units: 1 options: openstack-origin: *openstack-origin + channel: latest/edge neutron-api: - charm: cs:~openstack-charmers-next/neutron-api + charm: ch:neutron-api num_units: 1 options: manage-neutron-plugin-legacy-mode: true @@ -117,16 +131,19 @@ applications: flat-network-providers: physnet1 neutron-security-groups: true openstack-origin: *openstack-origin + channel: latest/edge neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch + charm: ch:neutron-openvswitch + channel: latest/edge neutron-gateway: - charm: cs:~openstack-charmers-next/neutron-gateway + charm: ch:neutron-gateway num_units: 1 options: bridge-mappings: physnet1:br-ex openstack-origin: *openstack-origin + channel: latest/edge relations: diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index ba64ac30..8d0a4d2f 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -1,21 +1,12 @@ charm_name: ceph-fs gate_bundles: - - bionic-queens - - bionic-stein - - bionic-ussuri - - focal-ussuri - - focal-victoria - - focal-wallaby - focal-xena - - hirsute-wallaby - impish-wallaby dev_bundles: - - bionic-rocky - - bionic-train - focal-yoga - jammy-yoga smoke_bundles: - - focal-ussuri + - focal-xena configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network @@ -28,6 +19,5 @@ tests: - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation tests_options: force_deploy: - - hirsute-wallaby - impish-xena - jammy-yoga From 95b898fb3e6d4a00cefa977108dfa2a58f1da05a Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 26 Jan 2022 12:13:16 +0000 Subject: [PATCH 2326/2699] Migrate charm to charmhub latest/edge track Change-Id: If9f1d3c100e80fb356cd28698a917b7a0f1955f0 --- ceph-osd/charmcraft.yaml | 27 +++ ceph-osd/metadata.yaml | 3 - ceph-osd/osci.yaml | 10 +- ceph-osd/tests/bundles/bionic-queens.yaml | 92 -------- ceph-osd/tests/bundles/bionic-rocky.yaml | 106 ---------- ceph-osd/tests/bundles/bionic-stein.yaml | 106 ---------- ceph-osd/tests/bundles/bionic-train.yaml | 116 ---------- ceph-osd/tests/bundles/bionic-ussuri.yaml | 116 ---------- ceph-osd/tests/bundles/focal-ussuri.yaml | 222 -------------------- ceph-osd/tests/bundles/focal-victoria.yaml | 222 -------------------- ceph-osd/tests/bundles/focal-wallaby.yaml | 222 -------------------- ceph-osd/tests/bundles/focal-xena.yaml | 45 ++-- ceph-osd/tests/bundles/focal-yoga.yaml | 45 ++-- ceph-osd/tests/bundles/hirsute-wallaby.yaml | 222 -------------------- ceph-osd/tests/bundles/impish-xena.yaml | 45 ++-- ceph-osd/tests/bundles/jammy-yoga.yaml | 45 ++-- ceph-osd/tests/tests.yaml | 12 +- 17 files changed, 151 insertions(+), 1505 deletions(-) create mode 100644 ceph-osd/charmcraft.yaml delete mode 100644 ceph-osd/tests/bundles/bionic-queens.yaml delete mode 100644 ceph-osd/tests/bundles/bionic-rocky.yaml delete mode 100644 ceph-osd/tests/bundles/bionic-stein.yaml delete mode 100644 ceph-osd/tests/bundles/bionic-train.yaml delete mode 100644 ceph-osd/tests/bundles/bionic-ussuri.yaml delete mode 100644 ceph-osd/tests/bundles/focal-ussuri.yaml delete mode 100644 ceph-osd/tests/bundles/focal-victoria.yaml delete mode 100644 ceph-osd/tests/bundles/focal-wallaby.yaml delete mode 100644 ceph-osd/tests/bundles/hirsute-wallaby.yaml diff --git a/ceph-osd/charmcraft.yaml b/ceph-osd/charmcraft.yaml new file mode 100644 index 00000000..ba84f314 --- /dev/null +++ b/ceph-osd/charmcraft.yaml @@ -0,0 +1,27 @@ +type: charm + +parts: + charm: + plugin: dump + source: . + prime: + - actions/* + - files/* + - hooks/* + - lib/* + - templates/* + - actions.yaml + - config.yaml + - copyright + - hardening.yaml + - icon.svg + - LICENSE + - Makefile + - metadata.yaml + - README.md + +bases: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 7069f780..97bacc8d 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -11,10 +11,7 @@ tags: - file-servers - misc series: -- bionic - focal -- groovy -- hirsute - impish description: | Ceph is a distributed storage and network file system designed to provide diff --git a/ceph-osd/osci.yaml b/ceph-osd/osci.yaml index 0b2a0aa5..da6cd318 100644 --- a/ceph-osd/osci.yaml +++ b/ceph-osd/osci.yaml @@ -1,10 +1,6 @@ - project: templates: - - charm-yoga-unit-jobs - - charm-yoga-functional-jobs + - charm-unit-jobs-py38 + - charm-unit-jobs-py39 - charm-xena-functional-jobs - - charm-wallaby-functional-jobs - - charm-victoria-functional-jobs - - charm-ussuri-functional-jobs - - charm-stein-functional-jobs - - charm-queens-functional-jobs + - charm-yoga-functional-jobs diff --git a/ceph-osd/tests/bundles/bionic-queens.yaml b/ceph-osd/tests/bundles/bionic-queens.yaml deleted file mode 100644 index cb79f356..00000000 --- a/ceph-osd/tests/bundles/bionic-queens.yaml +++ /dev/null @@ -1,92 +0,0 @@ -series: bionic -applications: - ceph-osd: - charm: ../../../ceph-osd - series: bionic - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:ceph-mon - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - percona-cluster: - charm: cs:percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - rabbitmq-server: - charm: cs:rabbitmq-server - num_units: 1 - keystone: - expose: True - charm: cs:keystone - num_units: 1 - nova-compute: - charm: cs:nova-compute - num_units: 1 - glance: - expose: True - charm: cs:glance - num_units: 1 - cinder: - expose: True - charm: cs:cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - cinder-ceph: - charm: cs:cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:nova-cloud-controller - num_units: 1 -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - cinder-ceph:ceph-access - - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/bionic-rocky.yaml b/ceph-osd/tests/bundles/bionic-rocky.yaml deleted file mode 100644 index 684a8d5e..00000000 --- a/ceph-osd/tests/bundles/bionic-rocky.yaml +++ /dev/null @@ -1,106 +0,0 @@ -series: bionic -applications: - ceph-osd: - charm: ../../../ceph-osd - num_units: 3 - series: bionic - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:bionic-rocky - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - source: cloud:bionic-rocky - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:bionic-rocky - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-rocky - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:bionic-rocky - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - cinder-ceph:ceph-access - - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/bionic-stein.yaml b/ceph-osd/tests/bundles/bionic-stein.yaml deleted file mode 100644 index 1332a5ad..00000000 --- a/ceph-osd/tests/bundles/bionic-stein.yaml +++ /dev/null @@ -1,106 +0,0 @@ -series: bionic -applications: - ceph-osd: - charm: ../../../ceph-osd - num_units: 3 - series: bionic - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:bionic-stein - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - source: cloud:bionic-stein - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:bionic-stein - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-stein - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-stein - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-stein - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-stein - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:bionic-stein - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:bionic-stein -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - cinder-ceph:ceph-access - - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/bionic-train.yaml b/ceph-osd/tests/bundles/bionic-train.yaml deleted file mode 100644 index e5263975..00000000 --- a/ceph-osd/tests/bundles/bionic-train.yaml +++ /dev/null @@ -1,116 +0,0 @@ -series: bionic -applications: - ceph-osd: - charm: ../../../ceph-osd - num_units: 3 - series: bionic - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: cloud:bionic-train - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: cloud:bionic-train - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:bionic-train - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-train - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-train - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-train - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-train - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:bionic-train - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:bionic-train - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: cloud:bionic-train -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - placement - - percona-cluster -- - placement - - keystone -- - placement - - nova-cloud-controller -- - cinder-ceph:ceph-access - - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/bionic-ussuri.yaml b/ceph-osd/tests/bundles/bionic-ussuri.yaml deleted file mode 100644 index 6a858d4e..00000000 --- a/ceph-osd/tests/bundles/bionic-ussuri.yaml +++ /dev/null @@ -1,116 +0,0 @@ -series: bionic -applications: - ceph-osd: - charm: ../../../ceph-osd - num_units: 3 - series: bionic - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: cloud:bionic-ussuri - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: cloud:bionic-ussuri - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:bionic-ussuri - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-ussuri - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:bionic-ussuri - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - placement - - percona-cluster -- - placement - - keystone -- - placement - - nova-cloud-controller -- - cinder-ceph:ceph-access - - nova-compute:ceph-access diff --git a/ceph-osd/tests/bundles/focal-ussuri.yaml b/ceph-osd/tests/bundles/focal-ussuri.yaml deleted file mode 100644 index 51868c30..00000000 --- a/ceph-osd/tests/bundles/focal-ussuri.yaml +++ /dev/null @@ -1,222 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-osd: - charm: ../../../ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - to: - - '3' - - '4' - - '5' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '11' - - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: 'None' - glance-api-version: '2' - to: - - '13' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'cinder-ceph:ceph-access' - - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/bundles/focal-victoria.yaml b/ceph-osd/tests/bundles/focal-victoria.yaml deleted file mode 100644 index c7e56257..00000000 --- a/ceph-osd/tests/bundles/focal-victoria.yaml +++ /dev/null @@ -1,222 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-victoria - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-osd: - charm: ../../../ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - to: - - '3' - - '4' - - '5' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '11' - - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: 'None' - glance-api-version: '2' - to: - - '13' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'cinder-ceph:ceph-access' - - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/bundles/focal-wallaby.yaml b/ceph-osd/tests/bundles/focal-wallaby.yaml deleted file mode 100644 index 2de66d2f..00000000 --- a/ceph-osd/tests/bundles/focal-wallaby.yaml +++ /dev/null @@ -1,222 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-wallaby - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-osd: - charm: ../../../ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - to: - - '3' - - '4' - - '5' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '11' - - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: 'None' - glance-api-version: '2' - to: - - '13' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'cinder-ceph:ceph-access' - - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/bundles/focal-xena.yaml b/ceph-osd/tests/bundles/focal-xena.yaml index 932d2bb6..12b55e65 100644 --- a/ceph-osd/tests/bundles/focal-xena.yaml +++ b/ceph-osd/tests/bundles/focal-xena.yaml @@ -29,18 +29,23 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -48,6 +53,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-osd: charm: ../../../ceph-osd @@ -63,7 +69,7 @@ applications: - '5' ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' @@ -72,44 +78,49 @@ applications: - '6' - '7' - '8' + channel: latest/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin to: - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin to: - '10' + channel: latest/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin to: - '11' + channel: latest/edge glance: expose: True - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin to: - '12' + channel: latest/edge cinder: expose: True - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: openstack-origin: *openstack-origin @@ -117,26 +128,30 @@ applications: glance-api-version: '2' to: - '13' + channel: latest/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph + channel: latest/edge nova-cloud-controller: expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + charm: ch:nova-cloud-controller num_units: 1 options: openstack-origin: *openstack-origin to: - '14' + channel: latest/edge placement: - charm: cs:~openstack-charmers-next/placement + charm: ch:placement num_units: 1 options: openstack-origin: *openstack-origin to: - '15' + channel: latest/edge relations: - - 'nova-compute:amqp' diff --git a/ceph-osd/tests/bundles/focal-yoga.yaml b/ceph-osd/tests/bundles/focal-yoga.yaml index 7beef444..11bac0f8 100644 --- a/ceph-osd/tests/bundles/focal-yoga.yaml +++ b/ceph-osd/tests/bundles/focal-yoga.yaml @@ -29,18 +29,23 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -48,6 +53,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-osd: charm: ../../../ceph-osd @@ -63,7 +69,7 @@ applications: - '5' ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' @@ -72,44 +78,49 @@ applications: - '6' - '7' - '8' + channel: latest/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin to: - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin to: - '10' + channel: latest/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin to: - '11' + channel: latest/edge glance: expose: True - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin to: - '12' + channel: latest/edge cinder: expose: True - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: openstack-origin: *openstack-origin @@ -117,26 +128,30 @@ applications: glance-api-version: '2' to: - '13' + channel: latest/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph + channel: latest/edge nova-cloud-controller: expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + charm: ch:nova-cloud-controller num_units: 1 options: openstack-origin: *openstack-origin to: - '14' + channel: latest/edge placement: - charm: cs:~openstack-charmers-next/placement + charm: ch:placement num_units: 1 options: openstack-origin: *openstack-origin to: - '15' + channel: latest/edge relations: - - 'nova-compute:amqp' diff --git a/ceph-osd/tests/bundles/hirsute-wallaby.yaml b/ceph-osd/tests/bundles/hirsute-wallaby.yaml deleted file mode 100644 index 333aae02..00000000 --- a/ceph-osd/tests/bundles/hirsute-wallaby.yaml +++ /dev/null @@ -1,222 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: hirsute - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-osd: - charm: ../../../ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - to: - - '3' - - '4' - - '5' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '11' - - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: 'None' - glance-api-version: '2' - to: - - '13' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'cinder-ceph:ceph-access' - - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/bundles/impish-xena.yaml b/ceph-osd/tests/bundles/impish-xena.yaml index 639ff3d5..aaf65aac 100644 --- a/ceph-osd/tests/bundles/impish-xena.yaml +++ b/ceph-osd/tests/bundles/impish-xena.yaml @@ -29,18 +29,23 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -48,6 +53,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-osd: charm: ../../../ceph-osd @@ -63,7 +69,7 @@ applications: - '5' ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' @@ -72,44 +78,49 @@ applications: - '6' - '7' - '8' + channel: latest/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin to: - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin to: - '10' + channel: latest/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin to: - '11' + channel: latest/edge glance: expose: True - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin to: - '12' + channel: latest/edge cinder: expose: True - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: openstack-origin: *openstack-origin @@ -117,26 +128,30 @@ applications: glance-api-version: '2' to: - '13' + channel: latest/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph + channel: latest/edge nova-cloud-controller: expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + charm: ch:nova-cloud-controller num_units: 1 options: openstack-origin: *openstack-origin to: - '14' + channel: latest/edge placement: - charm: cs:~openstack-charmers-next/placement + charm: ch:placement num_units: 1 options: openstack-origin: *openstack-origin to: - '15' + channel: latest/edge relations: - - 'nova-compute:amqp' diff --git a/ceph-osd/tests/bundles/jammy-yoga.yaml b/ceph-osd/tests/bundles/jammy-yoga.yaml index 5a6895cc..2374923c 100644 --- a/ceph-osd/tests/bundles/jammy-yoga.yaml +++ b/ceph-osd/tests/bundles/jammy-yoga.yaml @@ -29,18 +29,23 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -48,6 +53,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-osd: charm: ../../../ceph-osd @@ -63,7 +69,7 @@ applications: - '5' ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' @@ -72,44 +78,49 @@ applications: - '6' - '7' - '8' + channel: latest/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin to: - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin to: - '10' + channel: latest/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin to: - '11' + channel: latest/edge glance: expose: True - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin to: - '12' + channel: latest/edge cinder: expose: True - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: openstack-origin: *openstack-origin @@ -117,26 +128,30 @@ applications: glance-api-version: '2' to: - '13' + channel: latest/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph + channel: latest/edge nova-cloud-controller: expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + charm: ch:nova-cloud-controller num_units: 1 options: openstack-origin: *openstack-origin to: - '14' + channel: latest/edge placement: - charm: cs:~openstack-charmers-next/placement + charm: ch:placement num_units: 1 options: openstack-origin: *openstack-origin to: - '15' + channel: latest/edge relations: - - 'nova-compute:amqp' diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index de4d04a8..4780f66e 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,24 +1,15 @@ charm_name: ceph-osd gate_bundles: - - bionic-queens - - bionic-stein - - bionic-ussuri - - focal-ussuri - - focal-victoria - - focal-wallaby - focal-xena - - hirsute-wallaby - impish-xena dev_bundles: - - bionic-rocky - - bionic-train - focal-yoga - jammy-yoga smoke_bundles: - - focal-ussuri + - focal-xena configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image @@ -32,6 +23,5 @@ tests: tests_options: force_deploy: - - hirsute-wallaby - impish-xena - jammy-yoga From fa02a6fa78327cf02d645a3816bf0b3c23a32542 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 26 Jan 2022 12:21:07 +0000 Subject: [PATCH 2327/2699] Migrate charm to charmhub latest/edge track Change-Id: I68d0db36dd156d52c9f5f4b0ab07cdd5eb45c1b0 --- ceph-radosgw/charmcraft.yaml | 27 ++++ ceph-radosgw/metadata.yaml | 3 - ceph-radosgw/osci.yaml | 133 +++--------------- .../bundles/bionic-queens-multisite.yaml | 77 ---------- .../bundles/bionic-queens-namespaced.yaml | 52 ------- ceph-radosgw/tests/bundles/bionic-queens.yaml | 51 ------- .../tests/bundles/bionic-rocky-multisite.yaml | 77 ---------- .../bundles/bionic-rocky-namespaced.yaml | 52 ------- ceph-radosgw/tests/bundles/bionic-rocky.yaml | 51 ------- .../bundles/bionic-stein-namespaced.yaml | 52 ------- ceph-radosgw/tests/bundles/bionic-stein.yaml | 51 ------- .../bundles/bionic-train-namespaced.yaml | 52 ------- ceph-radosgw/tests/bundles/bionic-train.yaml | 51 ------- .../bundles/bionic-ussuri-namespaced.yaml | 52 ------- ceph-radosgw/tests/bundles/bionic-ussuri.yaml | 51 ------- .../tests/bundles/focal-ussuri-ec.yaml | 125 ---------------- .../bundles/focal-ussuri-namespaced.yaml | 117 --------------- ceph-radosgw/tests/bundles/focal-ussuri.yaml | 116 --------------- .../bundles/focal-victoria-namespaced.yaml | 117 --------------- .../tests/bundles/focal-victoria.yaml | 116 --------------- .../bundles/focal-wallaby-namespaced.yaml | 117 --------------- ceph-radosgw/tests/bundles/focal-wallaby.yaml | 116 --------------- .../tests/bundles/focal-xena-namespaced.yaml | 21 ++- ceph-radosgw/tests/bundles/focal-xena.yaml | 21 ++- .../tests/bundles/focal-yoga-namespaced.yaml | 21 ++- ceph-radosgw/tests/bundles/focal-yoga.yaml | 21 ++- .../bundles/hirsute-wallaby-namespaced.yaml | 117 --------------- .../tests/bundles/hirsute-wallaby.yaml | 116 --------------- .../tests/bundles/impish-xena-namespaced.yaml | 21 ++- ceph-radosgw/tests/bundles/impish-xena.yaml | 21 ++- .../tests/bundles/jammy-yoga-namespaced.yaml | 21 ++- ceph-radosgw/tests/bundles/jammy-yoga.yaml | 21 ++- ceph-radosgw/tests/tests.yaml | 25 +--- 33 files changed, 157 insertions(+), 1925 deletions(-) create mode 100644 ceph-radosgw/charmcraft.yaml delete mode 100644 ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml delete mode 100644 ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/bionic-queens.yaml delete mode 100644 ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml delete mode 100644 ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/bionic-rocky.yaml delete mode 100644 ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/bionic-stein.yaml delete mode 100644 ceph-radosgw/tests/bundles/bionic-train-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/bionic-train.yaml delete mode 100644 ceph-radosgw/tests/bundles/bionic-ussuri-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/bionic-ussuri.yaml delete mode 100644 ceph-radosgw/tests/bundles/focal-ussuri-ec.yaml delete mode 100644 ceph-radosgw/tests/bundles/focal-ussuri-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/focal-ussuri.yaml delete mode 100644 ceph-radosgw/tests/bundles/focal-victoria-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/focal-victoria.yaml delete mode 100644 ceph-radosgw/tests/bundles/focal-wallaby-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/focal-wallaby.yaml delete mode 100644 ceph-radosgw/tests/bundles/hirsute-wallaby-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/hirsute-wallaby.yaml diff --git a/ceph-radosgw/charmcraft.yaml b/ceph-radosgw/charmcraft.yaml new file mode 100644 index 00000000..ba84f314 --- /dev/null +++ b/ceph-radosgw/charmcraft.yaml @@ -0,0 +1,27 @@ +type: charm + +parts: + charm: + plugin: dump + source: . + prime: + - actions/* + - files/* + - hooks/* + - lib/* + - templates/* + - actions.yaml + - config.yaml + - copyright + - hardening.yaml + - icon.svg + - LICENSE + - Makefile + - metadata.yaml + - README.md + +bases: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 4399dcc1..d7dd0827 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -13,10 +13,7 @@ tags: - file-servers - misc series: -- bionic - focal -- groovy -- hirsute - impish extra-bindings: public: diff --git a/ceph-radosgw/osci.yaml b/ceph-radosgw/osci.yaml index 046cdf01..a661541e 100644 --- a/ceph-radosgw/osci.yaml +++ b/ceph-radosgw/osci.yaml @@ -1,29 +1,15 @@ - project: templates: - - charm-yoga-unit-jobs + - charm-unit-jobs-py38 + - charm-unit-jobs-py39 check: jobs: - - vault-bionic-queens - - vault-bionic-queens-namespaced - - vault-bionic-stein - - vault-bionic-stein-namespaced - - vault-bionic-ussuri - - vault-bionic-ussuri-namespaced - - vault-focal-ussuri-ec - - vault-focal-ussuri_rgw - - vault-focal-ussuri-namespaced - - vault-focal-victoria_rgw - - vault-focal-victoria-namespaced - - vault-focal-wallaby_rgw - - vault-focal-wallaby-namespaced - vault-focal-xena_rgw - vault-focal-xena-namespaced - vault-focal-yoga_rgw: voting: false - vault-focal-yoga-namespaced: voting: false - - vault-hirsute-wallaby_rgw - - vault-hirsute-wallaby-namespaced - vault-impish-xena_rgw: voting: false - vault-impish-xena-namespaced: @@ -34,145 +20,60 @@ voting: false - job: - name: vault-bionic-ussuri + name: vault-jammy-yoga_rgw parent: func-target dependencies: - osci-lint - - tox-py36 - tox-py38 - tox-py39 - vars: - tox_extra_args: vault:bionic-ussuri -- job: - name: vault-jammy-yoga_rgw - parent: func-target - dependencies: &smoke-jobs - - vault-bionic-ussuri vars: tox_extra_args: vault:jammy-yoga - job: name: vault-jammy-yoga-namespaced parent: func-target - dependencies: *smoke-jobs + dependencies: + - vault-jammy-yoga_rgw vars: tox_extra_args: vault:jammy-yoga-namespaced - job: name: vault-impish-xena_rgw parent: func-target - dependencies: *smoke-jobs + dependencies: + - vault-jammy-yoga_rgw vars: tox_extra_args: vault:impish-xena - job: name: vault-impish-xena-namespaced parent: func-target - dependencies: *smoke-jobs + dependencies: + - vault-jammy-yoga_rgw vars: tox_extra_args: vault:impish-xena-namespaced -- job: - name: vault-hirsute-wallaby_rgw - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:hirsute-wallaby -- job: - name: vault-hirsute-wallaby-namespaced - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:hirsute-wallaby-namespaced - job: name: vault-focal-yoga_rgw parent: func-target - dependencies: *smoke-jobs + dependencies: + - vault-jammy-yoga_rgw vars: tox_extra_args: vault:focal-yoga - job: name: vault-focal-yoga-namespaced parent: func-target - dependencies: *smoke-jobs + dependencies: + - vault-jammy-yoga_rgw vars: tox_extra_args: vault:focal-yoga-namespaced - job: name: vault-focal-xena_rgw parent: func-target - dependencies: *smoke-jobs + dependencies: + - vault-jammy-yoga_rgw vars: tox_extra_args: vault:focal-xena - job: name: vault-focal-xena-namespaced parent: func-target - dependencies: *smoke-jobs + dependencies: + - vault-jammy-yoga_rgw vars: tox_extra_args: vault:focal-xena-namespaced -- job: - name: vault-focal-wallaby_rgw - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:focal-wallaby -- job: - name: vault-focal-wallaby-namespaced - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:focal-wallaby-namespaced -- job: - name: vault-focal-victoria_rgw - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:focal-victoria -- job: - name: vault-focal-victoria-namespaced - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:focal-victoria-namespaced -- job: - name: vault-focal-ussuri-ec - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:focal-ussuri-ec -- job: - name: vault-focal-ussuri_rgw - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:focal-ussuri -- job: - name: vault-focal-ussuri-namespaced - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:focal-ussuri-namespaced -- job: - name: vault-bionic-ussuri-namespaced - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:bionic-ussuri-namespaced -- job: - name: vault-bionic-stein - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:bionic-stein -- job: - name: vault-bionic-stein-namespaced - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:bionic-stein-namespaced -- job: - name: vault-bionic-queens - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:bionic-queens -- job: - name: vault-bionic-queens-namespaced - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: vault:bionic-queens-namespaced diff --git a/ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml b/ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml deleted file mode 100644 index c3ca68bc..00000000 --- a/ceph-radosgw/tests/bundles/bionic-queens-multisite.yaml +++ /dev/null @@ -1,77 +0,0 @@ -options: - source: &source distro -series: bionic -applications: - ceph-radosgw: - series: bionic - charm: ../../../ceph-radosgw - num_units: 1 - options: - source: *source - realm: testrealm - zonegroup: testzonegroup - zone: east-1 - region: east-1 - east-ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - east-ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - slave-ceph-radosgw: - series: bionic - charm: ../../../ceph-radosgw - num_units: 1 - options: - source: *source - realm: testrealm - zonegroup: testzonegroup - zone: west-1 - region: west-1 - west-ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - west-ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - region: "east-1 west-1" -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - east-ceph-osd:mon - - east-ceph-mon:osd -- - ceph-radosgw:mon - - east-ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service -- - west-ceph-osd:mon - - west-ceph-mon:osd -- - slave-ceph-radosgw:mon - - west-ceph-mon:radosgw -- - slave-ceph-radosgw:identity-service - - keystone:identity-service -- - slave-ceph-radosgw:slave - - ceph-radosgw:master diff --git a/ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml deleted file mode 100644 index a3d4fec3..00000000 --- a/ceph-radosgw/tests/bundles/bionic-queens-namespaced.yaml +++ /dev/null @@ -1,52 +0,0 @@ -options: - source: &source distro -series: bionic -applications: - ceph-radosgw: - charm: ceph-radosgw - num_units: 1 - series: bionic - options: - source: *source - namespace-tenants: True - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service -- - vault:shared-db - - percona-cluster:shared-db -- - keystone:certificates - - vault:certificates -- - ceph-radosgw:certificates - - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-queens.yaml b/ceph-radosgw/tests/bundles/bionic-queens.yaml deleted file mode 100644 index c7a4393f..00000000 --- a/ceph-radosgw/tests/bundles/bionic-queens.yaml +++ /dev/null @@ -1,51 +0,0 @@ -options: - source: &source distro -series: bionic -applications: - ceph-radosgw: - charm: ceph-radosgw - num_units: 1 - series: bionic - options: - source: *source - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service -- - vault:shared-db - - percona-cluster:shared-db -- - keystone:certificates - - vault:certificates -- - ceph-radosgw:certificates - - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml b/ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml deleted file mode 100644 index 82f4b1fa..00000000 --- a/ceph-radosgw/tests/bundles/bionic-rocky-multisite.yaml +++ /dev/null @@ -1,77 +0,0 @@ -options: - source: &source cloud:bionic-rocky -series: bionic -applications: - ceph-radosgw: - series: bionic - charm: ../../../ceph-radosgw - num_units: 1 - options: - source: *source - realm: testrealm - zonegroup: testzonegroup - zone: east-1 - region: east-1 - east-ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - east-ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - slave-ceph-radosgw: - series: bionic - charm: ../../../ceph-radosgw - num_units: 1 - options: - source: *source - realm: testrealm - zonegroup: testzonegroup - zone: west-1 - region: west-1 - west-ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - west-ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - region: "east-1 west-1" -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - east-ceph-osd:mon - - east-ceph-mon:osd -- - ceph-radosgw:mon - - east-ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service -- - west-ceph-osd:mon - - west-ceph-mon:osd -- - slave-ceph-radosgw:mon - - west-ceph-mon:radosgw -- - slave-ceph-radosgw:identity-service - - keystone:identity-service -- - slave-ceph-radosgw:slave - - ceph-radosgw:master diff --git a/ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml deleted file mode 100644 index 277995f9..00000000 --- a/ceph-radosgw/tests/bundles/bionic-rocky-namespaced.yaml +++ /dev/null @@ -1,52 +0,0 @@ -options: - source: &source cloud:bionic-rocky -series: bionic -applications: - ceph-radosgw: - charm: ceph-radosgw - series: bionic - num_units: 1 - options: - source: *source - namespace-tenants: True - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service -- - vault:shared-db - - percona-cluster:shared-db -- - keystone:certificates - - vault:certificates -- - ceph-radosgw:certificates - - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-rocky.yaml b/ceph-radosgw/tests/bundles/bionic-rocky.yaml deleted file mode 100644 index 292229fd..00000000 --- a/ceph-radosgw/tests/bundles/bionic-rocky.yaml +++ /dev/null @@ -1,51 +0,0 @@ -options: - source: &source cloud:bionic-rocky -series: bionic -applications: - ceph-radosgw: - charm: ceph-radosgw - series: bionic - num_units: 1 - options: - source: *source - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service -- - vault:shared-db - - percona-cluster:shared-db -- - keystone:certificates - - vault:certificates -- - ceph-radosgw:certificates - - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml deleted file mode 100644 index 62e7f7c2..00000000 --- a/ceph-radosgw/tests/bundles/bionic-stein-namespaced.yaml +++ /dev/null @@ -1,52 +0,0 @@ -options: - source: &source cloud:bionic-stein -series: bionic -applications: - ceph-radosgw: - charm: ceph-radosgw - series: bionic - num_units: 1 - options: - source: *source - namespace-tenants: True - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service -- - vault:shared-db - - percona-cluster:shared-db -- - keystone:certificates - - vault:certificates -- - ceph-radosgw:certificates - - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-stein.yaml b/ceph-radosgw/tests/bundles/bionic-stein.yaml deleted file mode 100644 index b9bc6c12..00000000 --- a/ceph-radosgw/tests/bundles/bionic-stein.yaml +++ /dev/null @@ -1,51 +0,0 @@ -options: - source: &source cloud:bionic-stein -series: bionic -applications: - ceph-radosgw: - charm: ceph-radosgw - series: bionic - num_units: 1 - options: - source: *source - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service -- - vault:shared-db - - percona-cluster:shared-db -- - keystone:certificates - - vault:certificates -- - ceph-radosgw:certificates - - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-train-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-train-namespaced.yaml deleted file mode 100644 index c654ab78..00000000 --- a/ceph-radosgw/tests/bundles/bionic-train-namespaced.yaml +++ /dev/null @@ -1,52 +0,0 @@ -options: - source: &source cloud:bionic-train -series: bionic -applications: - ceph-radosgw: - charm: ceph-radosgw - series: bionic - num_units: 1 - options: - source: *source - namespace-tenants: True - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service -- - vault:shared-db - - percona-cluster:shared-db -- - keystone:certificates - - vault:certificates -- - ceph-radosgw:certificates - - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-train.yaml b/ceph-radosgw/tests/bundles/bionic-train.yaml deleted file mode 100644 index 785fdfc3..00000000 --- a/ceph-radosgw/tests/bundles/bionic-train.yaml +++ /dev/null @@ -1,51 +0,0 @@ -options: - source: &source cloud:bionic-train -series: bionic -applications: - ceph-radosgw: - charm: ceph-radosgw - series: bionic - num_units: 1 - options: - source: *source - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service -- - vault:shared-db - - percona-cluster:shared-db -- - keystone:certificates - - vault:certificates -- - ceph-radosgw:certificates - - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-ussuri-namespaced.yaml b/ceph-radosgw/tests/bundles/bionic-ussuri-namespaced.yaml deleted file mode 100644 index c5036b63..00000000 --- a/ceph-radosgw/tests/bundles/bionic-ussuri-namespaced.yaml +++ /dev/null @@ -1,52 +0,0 @@ -options: - source: &source cloud:bionic-ussuri -series: bionic -applications: - ceph-radosgw: - charm: ceph-radosgw - series: bionic - num_units: 1 - options: - source: *source - namespace-tenants: True - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service -- - vault:shared-db - - percona-cluster:shared-db -- - keystone:certificates - - vault:certificates -- - ceph-radosgw:certificates - - vault:certificates diff --git a/ceph-radosgw/tests/bundles/bionic-ussuri.yaml b/ceph-radosgw/tests/bundles/bionic-ussuri.yaml deleted file mode 100644 index fcdd19de..00000000 --- a/ceph-radosgw/tests/bundles/bionic-ussuri.yaml +++ /dev/null @@ -1,51 +0,0 @@ -options: - source: &source cloud:bionic-ussuri -series: bionic -applications: - ceph-radosgw: - charm: ceph-radosgw - series: bionic - num_units: 1 - options: - source: *source - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 -relations: -- - keystone:shared-db - - percona-cluster:shared-db -- - ceph-osd:mon - - ceph-mon:osd -- - ceph-radosgw:mon - - ceph-mon:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service -- - vault:shared-db - - percona-cluster:shared-db -- - keystone:certificates - - vault:certificates -- - ceph-radosgw:certificates - - vault:certificates diff --git a/ceph-radosgw/tests/bundles/focal-ussuri-ec.yaml b/ceph-radosgw/tests/bundles/focal-ussuri-ec.yaml deleted file mode 100644 index 740efaed..00000000 --- a/ceph-radosgw/tests/bundles/focal-ussuri-ec.yaml +++ /dev/null @@ -1,125 +0,0 @@ -options: - source: &source distro - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - - ceph-radosgw: - charm: ceph-radosgw - num_units: 1 - options: - source: *source - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '3' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - - '12' - - '13' - - '14' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - - vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 - to: - - '11' - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/focal-ussuri-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-ussuri-namespaced.yaml deleted file mode 100644 index dd3f4591..00000000 --- a/ceph-radosgw/tests/bundles/focal-ussuri-namespaced.yaml +++ /dev/null @@ -1,117 +0,0 @@ -options: - source: &source distro - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - - ceph-radosgw: - charm: ceph-radosgw - num_units: 1 - options: - source: *source - namespace-tenants: True - to: - - '3' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - - vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 - to: - - '11' - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/focal-ussuri.yaml b/ceph-radosgw/tests/bundles/focal-ussuri.yaml deleted file mode 100644 index 91c3a831..00000000 --- a/ceph-radosgw/tests/bundles/focal-ussuri.yaml +++ /dev/null @@ -1,116 +0,0 @@ -options: - source: &source distro - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - - ceph-radosgw: - charm: ceph-radosgw - num_units: 1 - options: - source: *source - to: - - '3' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - - vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 - to: - - '11' - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/focal-victoria-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-victoria-namespaced.yaml deleted file mode 100644 index 9e5c423c..00000000 --- a/ceph-radosgw/tests/bundles/focal-victoria-namespaced.yaml +++ /dev/null @@ -1,117 +0,0 @@ -options: - source: &source cloud:focal-victoria - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - - ceph-radosgw: - charm: ceph-radosgw - num_units: 1 - options: - source: *source - namespace-tenants: True - to: - - '3' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - - vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 - to: - - '11' - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/focal-victoria.yaml b/ceph-radosgw/tests/bundles/focal-victoria.yaml deleted file mode 100644 index ddfba9d9..00000000 --- a/ceph-radosgw/tests/bundles/focal-victoria.yaml +++ /dev/null @@ -1,116 +0,0 @@ -options: - source: &source cloud:focal-victoria - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - - ceph-radosgw: - charm: ceph-radosgw - num_units: 1 - options: - source: *source - to: - - '3' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - - vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 - to: - - '11' - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/focal-wallaby-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-wallaby-namespaced.yaml deleted file mode 100644 index cd11eda5..00000000 --- a/ceph-radosgw/tests/bundles/focal-wallaby-namespaced.yaml +++ /dev/null @@ -1,117 +0,0 @@ -options: - source: &source cloud:focal-wallaby - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - - ceph-radosgw: - charm: ceph-radosgw - num_units: 1 - options: - source: *source - namespace-tenants: True - to: - - '3' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - - vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 - to: - - '11' - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/focal-wallaby.yaml b/ceph-radosgw/tests/bundles/focal-wallaby.yaml deleted file mode 100644 index 83f231e9..00000000 --- a/ceph-radosgw/tests/bundles/focal-wallaby.yaml +++ /dev/null @@ -1,116 +0,0 @@ -options: - source: &source cloud:focal-wallaby - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - - ceph-radosgw: - charm: ceph-radosgw - num_units: 1 - options: - source: *source - to: - - '3' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - - vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 - to: - - '11' - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/focal-xena-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-xena-namespaced.yaml index ad973e41..3adb5dcb 100644 --- a/ceph-radosgw/tests/bundles/focal-xena-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/focal-xena-namespaced.yaml @@ -25,10 +25,11 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *source @@ -36,6 +37,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-radosgw: charm: ceph-radosgw @@ -47,7 +49,7 @@ applications: - '3' ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 constraints: "mem=2048" storage: @@ -59,9 +61,10 @@ applications: - '4' - '5' - '6' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: source: *source @@ -69,24 +72,28 @@ applications: - '7' - '8' - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *source to: - '10' + channel: latest/edge vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge vault: - charm: cs:~openstack-charmers-next/vault + charm: ch:vault num_units: 1 to: - '11' + channel: latest/edge relations: diff --git a/ceph-radosgw/tests/bundles/focal-xena.yaml b/ceph-radosgw/tests/bundles/focal-xena.yaml index 5a590e0c..49bb7845 100644 --- a/ceph-radosgw/tests/bundles/focal-xena.yaml +++ b/ceph-radosgw/tests/bundles/focal-xena.yaml @@ -25,10 +25,11 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *source @@ -36,6 +37,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-radosgw: charm: ceph-radosgw @@ -46,7 +48,7 @@ applications: - '3' ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 constraints: "mem=2048" storage: @@ -58,9 +60,10 @@ applications: - '4' - '5' - '6' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: source: *source @@ -68,24 +71,28 @@ applications: - '7' - '8' - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *source to: - '10' + channel: latest/edge vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge vault: - charm: cs:~openstack-charmers-next/vault + charm: ch:vault num_units: 1 to: - '11' + channel: latest/edge relations: diff --git a/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml index 61d3ad80..c377403f 100644 --- a/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml @@ -25,10 +25,11 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *source @@ -36,6 +37,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-radosgw: charm: ceph-radosgw @@ -47,7 +49,7 @@ applications: - '3' ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 constraints: "mem=2048" storage: @@ -59,9 +61,10 @@ applications: - '4' - '5' - '6' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: source: *source @@ -69,24 +72,28 @@ applications: - '7' - '8' - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *source to: - '10' + channel: latest/edge vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge vault: - charm: cs:~openstack-charmers-next/vault + charm: ch:vault num_units: 1 to: - '11' + channel: latest/edge relations: diff --git a/ceph-radosgw/tests/bundles/focal-yoga.yaml b/ceph-radosgw/tests/bundles/focal-yoga.yaml index b39bec22..e1ce28f8 100644 --- a/ceph-radosgw/tests/bundles/focal-yoga.yaml +++ b/ceph-radosgw/tests/bundles/focal-yoga.yaml @@ -25,10 +25,11 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *source @@ -36,6 +37,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-radosgw: charm: ceph-radosgw @@ -46,7 +48,7 @@ applications: - '3' ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 constraints: "mem=2048" storage: @@ -58,9 +60,10 @@ applications: - '4' - '5' - '6' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: source: *source @@ -68,24 +71,28 @@ applications: - '7' - '8' - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *source to: - '10' + channel: latest/edge vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge vault: - charm: cs:~openstack-charmers-next/vault + charm: ch:vault num_units: 1 to: - '11' + channel: latest/edge relations: diff --git a/ceph-radosgw/tests/bundles/hirsute-wallaby-namespaced.yaml b/ceph-radosgw/tests/bundles/hirsute-wallaby-namespaced.yaml deleted file mode 100644 index daaac21d..00000000 --- a/ceph-radosgw/tests/bundles/hirsute-wallaby-namespaced.yaml +++ /dev/null @@ -1,117 +0,0 @@ -options: - source: &source distro - -series: hirsute - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - - ceph-radosgw: - charm: ceph-radosgw - num_units: 1 - options: - source: *source - namespace-tenants: True - to: - - '3' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - - vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 - to: - - '11' - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/hirsute-wallaby.yaml b/ceph-radosgw/tests/bundles/hirsute-wallaby.yaml deleted file mode 100644 index 5ec99eb6..00000000 --- a/ceph-radosgw/tests/bundles/hirsute-wallaby.yaml +++ /dev/null @@ -1,116 +0,0 @@ -options: - source: &source distro - -series: hirsute - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - - ceph-radosgw: - charm: ceph-radosgw - num_units: 1 - options: - source: *source - to: - - '3' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - - vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - vault: - charm: cs:~openstack-charmers-next/vault - num_units: 1 - to: - - '11' - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml b/ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml index a748f555..374aa34c 100644 --- a/ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml @@ -25,10 +25,11 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *source @@ -36,6 +37,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-radosgw: charm: ceph-radosgw @@ -47,7 +49,7 @@ applications: - '3' ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 constraints: "mem=2048" storage: @@ -59,9 +61,10 @@ applications: - '4' - '5' - '6' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: source: *source @@ -69,24 +72,28 @@ applications: - '7' - '8' - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *source to: - '10' + channel: latest/edge vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge vault: - charm: cs:~openstack-charmers-next/vault + charm: ch:vault num_units: 1 to: - '11' + channel: latest/edge relations: diff --git a/ceph-radosgw/tests/bundles/impish-xena.yaml b/ceph-radosgw/tests/bundles/impish-xena.yaml index 49d34ea0..26dd2da9 100644 --- a/ceph-radosgw/tests/bundles/impish-xena.yaml +++ b/ceph-radosgw/tests/bundles/impish-xena.yaml @@ -25,10 +25,11 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *source @@ -36,6 +37,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-radosgw: charm: ceph-radosgw @@ -46,7 +48,7 @@ applications: - '3' ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 constraints: "mem=2048" storage: @@ -58,9 +60,10 @@ applications: - '4' - '5' - '6' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: source: *source @@ -68,24 +71,28 @@ applications: - '7' - '8' - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *source to: - '10' + channel: latest/edge vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge vault: - charm: cs:~openstack-charmers-next/vault + charm: ch:vault num_units: 1 to: - '11' + channel: latest/edge relations: diff --git a/ceph-radosgw/tests/bundles/jammy-yoga-namespaced.yaml b/ceph-radosgw/tests/bundles/jammy-yoga-namespaced.yaml index 078cd2b7..ce018839 100644 --- a/ceph-radosgw/tests/bundles/jammy-yoga-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/jammy-yoga-namespaced.yaml @@ -25,10 +25,11 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *source @@ -36,6 +37,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-radosgw: charm: ceph-radosgw @@ -47,7 +49,7 @@ applications: - '3' ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 constraints: "mem=2048" storage: @@ -59,9 +61,10 @@ applications: - '4' - '5' - '6' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: source: *source @@ -69,24 +72,28 @@ applications: - '7' - '8' - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *source to: - '10' + channel: latest/edge vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge vault: - charm: cs:~openstack-charmers-next/vault + charm: ch:vault num_units: 1 to: - '11' + channel: latest/edge relations: diff --git a/ceph-radosgw/tests/bundles/jammy-yoga.yaml b/ceph-radosgw/tests/bundles/jammy-yoga.yaml index adbf5831..63d1133a 100644 --- a/ceph-radosgw/tests/bundles/jammy-yoga.yaml +++ b/ceph-radosgw/tests/bundles/jammy-yoga.yaml @@ -25,10 +25,11 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *source @@ -36,6 +37,7 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-radosgw: charm: ceph-radosgw @@ -46,7 +48,7 @@ applications: - '3' ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 constraints: "mem=2048" storage: @@ -58,9 +60,10 @@ applications: - '4' - '5' - '6' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: source: *source @@ -68,24 +71,28 @@ applications: - '7' - '8' - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *source to: - '10' + channel: latest/edge vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge vault: - charm: cs:~openstack-charmers-next/vault + charm: ch:vault num_units: 1 to: - '11' + channel: latest/edge relations: diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 29448f20..06d627b5 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,36 +1,15 @@ charm_name: ceph-radosgw gate_bundles: - - vault: bionic-queens - - vault: bionic-queens-namespaced - - vault: bionic-stein - - vault: bionic-stein-namespaced - - vault: bionic-ussuri - - vault: bionic-ussuri-namespaced - - vault: focal-ussuri - - vault: focal-ussuri-ec - - vault: focal-ussuri-namespaced - - vault: focal-victoria - - vault: focal-victoria-namespaced - - vault: focal-wallaby - - vault: focal-wallaby-namespaced - vault: focal-xena - vault: focal-xena-namespaced - - vault: hirsute-wallaby - - vault: hirsute-wallaby-namespaced - vault: impish-xena - vault: impish-xena-namespaced smoke_bundles: - - vault: focal-ussuri + - vault: focal-xena dev_bundles: - - bionic-queens-multisite - - bionic-rocky-multisite - - vault: bionic-rocky - - vault: bionic-rocky-namespaced - - vault: bionic-train - - vault: bionic-train-namespaced - vault: focal-yoga - vault: focal-yoga-namespaced - vault: jammy-yoga @@ -55,8 +34,6 @@ tests: tests_options: force_deploy: - - hirsute-wallaby - - hirsute-wallaby-namespaced - impish-xena - impish-xena-namespaced - jammy-yoga From 14134713b538eaba54b75b1caf19d054617e29a7 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 26 Jan 2022 11:18:03 +0000 Subject: [PATCH 2328/2699] Migrate charm to charmhub latest/edge track Change-Id: I82c5640230149a227183b67aff5ce14e147cd55e Co-authored-by: Aurelien Lourot --- ceph-mon/charmcraft.yaml | 27 ++ ceph-mon/metadata.yaml | 3 - ceph-mon/osci.yaml | 28 +- ceph-mon/tests/bundles/bionic-queens.yaml | 100 ------- ceph-mon/tests/bundles/bionic-rocky.yaml | 113 -------- ceph-mon/tests/bundles/bionic-stein.yaml | 113 -------- .../tests/bundles/bionic-train-with-fsid.yaml | 118 -------- ceph-mon/tests/bundles/bionic-train.yaml | 123 -------- ceph-mon/tests/bundles/bionic-ussuri.yaml | 123 -------- ceph-mon/tests/bundles/focal-ussuri-ec.yaml | 269 ------------------ ceph-mon/tests/bundles/focal-ussuri.yaml | 235 --------------- ceph-mon/tests/bundles/focal-victoria.yaml | 235 --------------- ceph-mon/tests/bundles/focal-wallaby.yaml | 235 --------------- ceph-mon/tests/bundles/focal-xena.yaml | 45 ++- ceph-mon/tests/bundles/focal-yoga.yaml | 45 ++- ceph-mon/tests/bundles/hirsute-wallaby.yaml | 237 --------------- ceph-mon/tests/bundles/impish-xena.yaml | 45 ++- ceph-mon/tests/bundles/jammy-yoga.yaml | 45 ++- ceph-mon/tests/tests.yaml | 14 +- 19 files changed, 151 insertions(+), 2002 deletions(-) create mode 100644 ceph-mon/charmcraft.yaml delete mode 100644 ceph-mon/tests/bundles/bionic-queens.yaml delete mode 100644 ceph-mon/tests/bundles/bionic-rocky.yaml delete mode 100644 ceph-mon/tests/bundles/bionic-stein.yaml delete mode 100644 ceph-mon/tests/bundles/bionic-train-with-fsid.yaml delete mode 100644 ceph-mon/tests/bundles/bionic-train.yaml delete mode 100644 ceph-mon/tests/bundles/bionic-ussuri.yaml delete mode 100644 ceph-mon/tests/bundles/focal-ussuri-ec.yaml delete mode 100644 ceph-mon/tests/bundles/focal-ussuri.yaml delete mode 100644 ceph-mon/tests/bundles/focal-victoria.yaml delete mode 100644 ceph-mon/tests/bundles/focal-wallaby.yaml delete mode 100644 ceph-mon/tests/bundles/hirsute-wallaby.yaml diff --git a/ceph-mon/charmcraft.yaml b/ceph-mon/charmcraft.yaml new file mode 100644 index 00000000..ba84f314 --- /dev/null +++ b/ceph-mon/charmcraft.yaml @@ -0,0 +1,27 @@ +type: charm + +parts: + charm: + plugin: dump + source: . + prime: + - actions/* + - files/* + - hooks/* + - lib/* + - templates/* + - actions.yaml + - config.yaml + - copyright + - hardening.yaml + - icon.svg + - LICENSE + - Makefile + - metadata.yaml + - README.md + +bases: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 4543b1b1..1b7039b6 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -10,10 +10,7 @@ tags: - file-servers - misc series: -- bionic - focal -- groovy -- hirsute - impish peers: mon: diff --git a/ceph-mon/osci.yaml b/ceph-mon/osci.yaml index baacc7d5..da6cd318 100644 --- a/ceph-mon/osci.yaml +++ b/ceph-mon/osci.yaml @@ -1,28 +1,6 @@ - project: templates: - - charm-yoga-unit-jobs - - charm-yoga-functional-jobs + - charm-unit-jobs-py38 + - charm-unit-jobs-py39 - charm-xena-functional-jobs - - charm-wallaby-functional-jobs - - charm-victoria-functional-jobs - - charm-ussuri-functional-jobs - - charm-stein-functional-jobs - - charm-queens-functional-jobs - check: - jobs: - - focal-ussuri-ec-ceph-mon - - bionic-train-with-fsid - -- job: - name: focal-ussuri-ec-ceph-mon - parent: func-target - dependencies: &smoke-jobs - - bionic-ussuri - vars: - tox_extra_args: focal-ussuri-ec -- job: - name: bionic-train-with-fsid - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: bionic-train-with-fsid + - charm-yoga-functional-jobs diff --git a/ceph-mon/tests/bundles/bionic-queens.yaml b/ceph-mon/tests/bundles/bionic-queens.yaml deleted file mode 100644 index 614442e7..00000000 --- a/ceph-mon/tests/bundles/bionic-queens.yaml +++ /dev/null @@ -1,100 +0,0 @@ -series: bionic -applications: - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - ceph-mon: - charm: ../../../ceph-mon - series: bionic - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - libvirt-image-backend: rbd - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - prometheus2: -# Pin prometheus2 charm version Bug #1891942 - charm: cs:prometheus2-18 - num_units: 1 -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - ceph-mon:prometheus - - prometheus2:target diff --git a/ceph-mon/tests/bundles/bionic-rocky.yaml b/ceph-mon/tests/bundles/bionic-rocky.yaml deleted file mode 100644 index dde594de..00000000 --- a/ceph-mon/tests/bundles/bionic-rocky.yaml +++ /dev/null @@ -1,113 +0,0 @@ -series: bionic -applications: - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:bionic-rocky - ceph-mon: - charm: ../../../ceph-mon - series: bionic - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - source: cloud:bionic-rocky - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:bionic-rocky - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-rocky - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - libvirt-image-backend: rbd - openstack-origin: cloud:bionic-rocky - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:bionic-rocky - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - prometheus2: -# Pin prometheus2 charm version Bug #1891942 - charm: cs:prometheus2-18 - num_units: 1 -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - ceph-mon:prometheus - - prometheus2:target diff --git a/ceph-mon/tests/bundles/bionic-stein.yaml b/ceph-mon/tests/bundles/bionic-stein.yaml deleted file mode 100644 index 92f889eb..00000000 --- a/ceph-mon/tests/bundles/bionic-stein.yaml +++ /dev/null @@ -1,113 +0,0 @@ -series: bionic -applications: - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - osd-devices: '/srv/ceph /dev/test-non-existent' - source: cloud:bionic-stein - ceph-mon: - charm: ../../../ceph-mon - series: bionic - num_units: 3 - options: - monitor-count: '3' - auth-supported: 'none' - source: cloud:bionic-stein - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:bionic-stein - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-stein - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-stein - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - libvirt-image-backend: rbd - openstack-origin: cloud:bionic-stein - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-stein - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:bionic-stein - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:bionic-stein - prometheus2: -# Pin prometheus2 charm version Bug #1891942 - charm: cs:prometheus2-18 - num_units: 1 -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - ceph-mon:prometheus - - prometheus2:target diff --git a/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml b/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml deleted file mode 100644 index d41c63b3..00000000 --- a/ceph-mon/tests/bundles/bionic-train-with-fsid.yaml +++ /dev/null @@ -1,118 +0,0 @@ -series: bionic -applications: - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - osd-devices: '/dev/test-non-existent' - source: cloud:bionic-train/proposed - ceph-mon: - charm: ../../../ceph-mon - series: bionic - num_units: 3 - options: - monitor-count: '3' - source: cloud:bionic-train/proposed - fsid: 3930914c-4fc5-4720-8975-b7bf554f647c - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:bionic-train/proposed - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-train/proposed - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-train/proposed - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-train/proposed - libvirt-image-backend: rbd - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-train/proposed - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:bionic-train/proposed - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:bionic-train/proposed - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: cloud:bionic-train -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - placement - - percona-cluster -- - placement - - keystone -- - placement - - nova-cloud-controller diff --git a/ceph-mon/tests/bundles/bionic-train.yaml b/ceph-mon/tests/bundles/bionic-train.yaml deleted file mode 100644 index 4c24951a..00000000 --- a/ceph-mon/tests/bundles/bionic-train.yaml +++ /dev/null @@ -1,123 +0,0 @@ -series: bionic -applications: - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - series: bionic - storage: - osd-devices: '10G' - options: - osd-devices: '/dev/test-non-existent' - source: cloud:bionic-train - ceph-mon: - charm: ../../../ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: cloud:bionic-train - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:bionic-train - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-train - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-train - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-train - libvirt-image-backend: rbd - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-train - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:bionic-train - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:bionic-train - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: cloud:bionic-train - prometheus2: -# Pin prometheus2 charm version Bug #1891942 - charm: cs:prometheus2-18 - num_units: 1 -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - placement - - percona-cluster -- - placement - - keystone -- - placement - - nova-cloud-controller -- - ceph-mon:prometheus - - prometheus2:target diff --git a/ceph-mon/tests/bundles/bionic-ussuri.yaml b/ceph-mon/tests/bundles/bionic-ussuri.yaml deleted file mode 100644 index bbed4302..00000000 --- a/ceph-mon/tests/bundles/bionic-ussuri.yaml +++ /dev/null @@ -1,123 +0,0 @@ -series: bionic -applications: - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - series: bionic - storage: - osd-devices: '10G' - options: - osd-devices: '/dev/test-non-existent' - source: cloud:bionic-ussuri - ceph-mon: - charm: ../../../ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: cloud:bionic-ussuri - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:bionic-ussuri - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-ussuri - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - libvirt-image-backend: rbd - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: cloud:bionic-ussuri - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - prometheus2: -# Pin prometheus2 charm version Bug #1891942 - charm: cs:prometheus2-18 - num_units: 1 -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:image-service - - glance:image-service -- - nova-compute:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - keystone:shared-db - - percona-cluster:shared-db -- - glance:shared-db - - percona-cluster:shared-db -- - glance:identity-service - - keystone:identity-service -- - glance:amqp - - rabbitmq-server:amqp -- - glance:ceph - - ceph-mon:client -- - cinder:shared-db - - percona-cluster:shared-db -- - cinder:identity-service - - keystone:identity-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:image-service - - glance:image-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-osd:mon - - ceph-mon:osd -- - nova-cloud-controller:shared-db - - percona-cluster:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:image-service - - glance:image-service -- - placement - - percona-cluster -- - placement - - keystone -- - placement - - nova-cloud-controller -- - ceph-mon:prometheus - - prometheus2:target diff --git a/ceph-mon/tests/bundles/focal-ussuri-ec.yaml b/ceph-mon/tests/bundles/focal-ussuri-ec.yaml deleted file mode 100644 index 8cedf3b2..00000000 --- a/ceph-mon/tests/bundles/focal-ussuri-ec.yaml +++ /dev/null @@ -1,269 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': - '19': - '20': - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: '10G' - options: - source: *openstack-origin - to: - - '3' - - '4' - - '5' - - '17' - - '18' - - '19' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - to: - - '6' - - '7' - - '8' - - ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw - num_units: 1 - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '20' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: isa - libvirt-image-backend: rbd - to: - - '11' - - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: jerasure - to: - - '12' - - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: *openstack-origin - to: - - '13' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - options: - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: lrc - ec-profile-locality: 3 - - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - - prometheus2: -# Pin prometheus2 charm version Bug #1891942 - charm: cs:prometheus2-18 - num_units: 1 - to: - - '16' - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'ceph-mon:prometheus' - - 'prometheus2:target' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' diff --git a/ceph-mon/tests/bundles/focal-ussuri.yaml b/ceph-mon/tests/bundles/focal-ussuri.yaml deleted file mode 100644 index 533bee4a..00000000 --- a/ceph-mon/tests/bundles/focal-ussuri.yaml +++ /dev/null @@ -1,235 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - source: *openstack-origin - osd-devices: '/dev/test-non-existent' - to: - - '3' - - '4' - - '5' - - ceph-mon: - charm: ../../../ceph-mon - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - to: - - '6' - - '7' - - '8' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - libvirt-image-backend: rbd - to: - - '11' - - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: *openstack-origin - to: - - '13' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - - prometheus2: -# Pin prometheus2 charm version Bug #1891942 - charm: cs:prometheus2-18 - num_units: 1 - to: - - '16' - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - nova-compute:ceph-access - - cinder-ceph:ceph-access - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'ceph-mon:prometheus' - - 'prometheus2:target' diff --git a/ceph-mon/tests/bundles/focal-victoria.yaml b/ceph-mon/tests/bundles/focal-victoria.yaml deleted file mode 100644 index 1bfd8964..00000000 --- a/ceph-mon/tests/bundles/focal-victoria.yaml +++ /dev/null @@ -1,235 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-victoria - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - source: *openstack-origin - osd-devices: '/dev/test-non-existent' - to: - - '3' - - '4' - - '5' - - ceph-mon: - charm: ../../../ceph-mon - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - to: - - '6' - - '7' - - '8' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - libvirt-image-backend: rbd - to: - - '11' - - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: *openstack-origin - to: - - '13' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - - prometheus2: -# Pin prometheus2 charm version Bug #1891942 - charm: cs:prometheus2-18 - num_units: 1 - to: - - '16' - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - nova-compute:ceph-access - - cinder-ceph:ceph-access - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'ceph-mon:prometheus' - - 'prometheus2:target' diff --git a/ceph-mon/tests/bundles/focal-wallaby.yaml b/ceph-mon/tests/bundles/focal-wallaby.yaml deleted file mode 100644 index 21ce4e6f..00000000 --- a/ceph-mon/tests/bundles/focal-wallaby.yaml +++ /dev/null @@ -1,235 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-wallaby - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - source: *openstack-origin - osd-devices: '/dev/test-non-existent' - to: - - '3' - - '4' - - '5' - - ceph-mon: - charm: ../../../ceph-mon - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - to: - - '6' - - '7' - - '8' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - libvirt-image-backend: rbd - to: - - '11' - - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: *openstack-origin - to: - - '13' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - - prometheus2: -# Pin prometheus2 charm version Bug #1891942 - charm: cs:prometheus2-18 - num_units: 1 - to: - - '16' - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - nova-compute:ceph-access - - cinder-ceph:ceph-access - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'ceph-mon:prometheus' - - 'prometheus2:target' diff --git a/ceph-mon/tests/bundles/focal-xena.yaml b/ceph-mon/tests/bundles/focal-xena.yaml index 991293f5..82602a31 100644 --- a/ceph-mon/tests/bundles/focal-xena.yaml +++ b/ceph-mon/tests/bundles/focal-xena.yaml @@ -31,18 +31,23 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -50,9 +55,10 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 storage: osd-devices: '10G' @@ -63,6 +69,7 @@ applications: - '3' - '4' - '5' + channel: latest/edge ceph-mon: charm: ../../../ceph-mon @@ -76,43 +83,47 @@ applications: - '8' rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin to: - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin to: - '10' + channel: latest/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin libvirt-image-backend: rbd to: - '11' + channel: latest/edge glance: expose: True - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin to: - '12' + channel: latest/edge cinder: expose: True - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: block-device: 'None' @@ -120,26 +131,30 @@ applications: openstack-origin: *openstack-origin to: - '13' + channel: latest/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph + channel: latest/edge nova-cloud-controller: expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + charm: ch:nova-cloud-controller num_units: 1 options: openstack-origin: *openstack-origin to: - '14' + channel: latest/edge placement: - charm: cs:~openstack-charmers-next/placement + charm: ch:placement num_units: 1 options: openstack-origin: *openstack-origin to: - '15' + channel: latest/edge prometheus2: # Pin prometheus2 charm version Bug #1891942 diff --git a/ceph-mon/tests/bundles/focal-yoga.yaml b/ceph-mon/tests/bundles/focal-yoga.yaml index 74d5fc5d..1b264c44 100644 --- a/ceph-mon/tests/bundles/focal-yoga.yaml +++ b/ceph-mon/tests/bundles/focal-yoga.yaml @@ -31,18 +31,23 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -50,9 +55,10 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 storage: osd-devices: '10G' @@ -63,6 +69,7 @@ applications: - '3' - '4' - '5' + channel: latest/edge ceph-mon: charm: ../../../ceph-mon @@ -76,43 +83,47 @@ applications: - '8' rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin to: - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin to: - '10' + channel: latest/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin libvirt-image-backend: rbd to: - '11' + channel: latest/edge glance: expose: True - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin to: - '12' + channel: latest/edge cinder: expose: True - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: block-device: 'None' @@ -120,26 +131,30 @@ applications: openstack-origin: *openstack-origin to: - '13' + channel: latest/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph + channel: latest/edge nova-cloud-controller: expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + charm: ch:nova-cloud-controller num_units: 1 options: openstack-origin: *openstack-origin to: - '14' + channel: latest/edge placement: - charm: cs:~openstack-charmers-next/placement + charm: ch:placement num_units: 1 options: openstack-origin: *openstack-origin to: - '15' + channel: latest/edge prometheus2: # Pin prometheus2 charm version Bug #1891942 diff --git a/ceph-mon/tests/bundles/hirsute-wallaby.yaml b/ceph-mon/tests/bundles/hirsute-wallaby.yaml deleted file mode 100644 index 17ee2e0a..00000000 --- a/ceph-mon/tests/bundles/hirsute-wallaby.yaml +++ /dev/null @@ -1,237 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: hirsute - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - series: focal - - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - source: *openstack-origin - osd-devices: '/dev/test-non-existent' - to: - - '3' - - '4' - - '5' - - ceph-mon: - charm: ../../../ceph-mon - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - to: - - '6' - - '7' - - '8' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - keystone: - expose: True - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - libvirt-image-backend: rbd - to: - - '11' - - glance: - expose: True - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - - cinder: - expose: True - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: *openstack-origin - to: - - '13' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - - nova-cloud-controller: - expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - - placement: - charm: cs:~openstack-charmers-next/placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - - prometheus2: -# Pin prometheus2 charm version Bug #1891942 - charm: cs:prometheus2-18 - num_units: 1 - series: focal - to: - - '16' - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - nova-compute:ceph-access - - cinder-ceph:ceph-access - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'ceph-mon:prometheus' - - 'prometheus2:target' diff --git a/ceph-mon/tests/bundles/impish-xena.yaml b/ceph-mon/tests/bundles/impish-xena.yaml index df5c37a8..90685e52 100644 --- a/ceph-mon/tests/bundles/impish-xena.yaml +++ b/ceph-mon/tests/bundles/impish-xena.yaml @@ -32,18 +32,23 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -51,9 +56,10 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 storage: osd-devices: '10G' @@ -64,6 +70,7 @@ applications: - '3' - '4' - '5' + channel: latest/edge ceph-mon: charm: ../../../ceph-mon @@ -77,43 +84,47 @@ applications: - '8' rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin to: - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin to: - '10' + channel: latest/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin libvirt-image-backend: rbd to: - '11' + channel: latest/edge glance: expose: True - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin to: - '12' + channel: latest/edge cinder: expose: True - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: block-device: 'None' @@ -121,26 +132,30 @@ applications: openstack-origin: *openstack-origin to: - '13' + channel: latest/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph + channel: latest/edge nova-cloud-controller: expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + charm: ch:nova-cloud-controller num_units: 1 options: openstack-origin: *openstack-origin to: - '14' + channel: latest/edge placement: - charm: cs:~openstack-charmers-next/placement + charm: ch:placement num_units: 1 options: openstack-origin: *openstack-origin to: - '15' + channel: latest/edge prometheus2: # Pin prometheus2 charm version Bug #1891942 diff --git a/ceph-mon/tests/bundles/jammy-yoga.yaml b/ceph-mon/tests/bundles/jammy-yoga.yaml index 6f0a35a0..0b98bc77 100644 --- a/ceph-mon/tests/bundles/jammy-yoga.yaml +++ b/ceph-mon/tests/bundles/jammy-yoga.yaml @@ -32,18 +32,23 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge nova-cloud-controller-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge placement-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -51,9 +56,10 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 storage: osd-devices: '10G' @@ -64,6 +70,7 @@ applications: - '3' - '4' - '5' + channel: latest/edge ceph-mon: charm: ../../../ceph-mon @@ -77,43 +84,47 @@ applications: - '8' rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin to: - '9' + channel: latest/edge keystone: expose: True - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin to: - '10' + channel: latest/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin libvirt-image-backend: rbd to: - '11' + channel: latest/edge glance: expose: True - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin to: - '12' + channel: latest/edge cinder: expose: True - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: block-device: 'None' @@ -121,26 +132,30 @@ applications: openstack-origin: *openstack-origin to: - '13' + channel: latest/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph + channel: latest/edge nova-cloud-controller: expose: True - charm: cs:~openstack-charmers-next/nova-cloud-controller + charm: ch:nova-cloud-controller num_units: 1 options: openstack-origin: *openstack-origin to: - '14' + channel: latest/edge placement: - charm: cs:~openstack-charmers-next/placement + charm: ch:placement num_units: 1 options: openstack-origin: *openstack-origin to: - '15' + channel: latest/edge prometheus2: # Pin prometheus2 charm version Bug #1891942 diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 303ddfa6..453bcaad 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,26 +1,15 @@ charm_name: ceph-mon gate_bundles: - - bionic-queens - - bionic-stein - - bionic-ussuri - - focal-ussuri - - focal-ussuri-ec - - focal-victoria - - focal-wallaby - focal-xena - - hirsute-wallaby - impish-xena dev_bundles: - - bionic-rocky - - bionic-train - - bionic-train-with-fsid - focal-yoga - jammy-yoga smoke_bundles: - - focal-ussuri + - focal-xena configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image @@ -35,6 +24,5 @@ tests: tests_options: force_deploy: - - hirsute-wallaby - impish-xena - jammy-yoga From 62cd3065e42620d4a2dc209ea48d4ae721062d14 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 28 Jan 2022 13:52:07 +0100 Subject: [PATCH 2329/2699] Add support for deleting a share --- ceph-nfs/actions.yaml | 11 ++++ ceph-nfs/src/charm.py | 16 ++++++ ceph-nfs/src/ganesha.py | 96 +++++++++++++++++++++++++++++++---- ceph-nfs/tests/__init__.py | 0 ceph-nfs/tests/nfs_ganesha.py | 9 ++++ 5 files changed, 123 insertions(+), 9 deletions(-) create mode 100644 ceph-nfs/tests/__init__.py diff --git a/ceph-nfs/actions.yaml b/ceph-nfs/actions.yaml index 3533a365..44a55ae8 100644 --- a/ceph-nfs/actions.yaml +++ b/ceph-nfs/actions.yaml @@ -22,6 +22,17 @@ create-share: Name of the share that will be exported. type: string default: +delete-share: + description: | + Delete a CephFS Backed NFS export. Note that this does not delete + the backing CephFS share. + params: + name: + description: | + Name of the share that will be deleted. If this share doesn't + exist then this action will have no effect. + type: string + default: list-shares: description: List all shares that this application is managing # TODO: Update, delete share \ No newline at end of file diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index 0026eae5..231f7489 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -188,6 +188,10 @@ def __init__(self, framework): self.framework.observe( self.on.list_shares_action, self.list_shares_action) + self.framework.observe( + self.on.delete_share_action, + self.delete_share_action + ) def config_get(self, key, default=None): """Retrieve config option. @@ -362,6 +366,18 @@ def list_shares_action(self, event): "exports": [{"id": export.export_id, "name": export.name} for export in exports] }) + def delete_share_action(self, event): + if not self.model.unit.is_leader(): + event.fail("Share creation needs to be run from the application leader") + return + client = GaneshaNfs(self.client_name, self.pool_name) + name = event.params.get('name') + client.delete_share(name) + self.peers.trigger_reload() + event.set_results({ + "message": "Share deleted", + }) + @ops_openstack.core.charm_class class CephNFSCharmOcto(CephNfsCharm): diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index 906d2a83..86d42556 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -47,6 +47,9 @@ class Export(object): """Object that encodes and decodes Ganesha export blocks""" + + name = None + def __init__(self, export_options: Optional[Dict] = None): if export_options is None: export_options = {} @@ -104,16 +107,35 @@ def create_share(self, name: str = None, size: int = None, if size is not None: size_in_bytes = size * 1024 * 1024 if access_ips is None: - access_ips = ['0.0.0.0/0'] + access_ips = ['0.0.0.0'] + # Ganesha deals with networks just fine, except when the network is + # 0.0.0.0/0, then it has to be 0.0.0.0 which works as expected :-/ + if '0.0.0.0/0' in access_ips: + access_ips[access_ips.index('0.0.0.0/0')] = '0.0.0.0' + access_id = 'ganesha-{}'.format(name) self.export_path = self._create_cephfs_share(name, size_in_bytes) export_id = self._get_next_export_id() export = Export( - export_id=export_id, - path=self.export_path, - user_id=access_id, - access_key=self._ceph_auth_key(access_id), - clients=access_ips + { + 'EXPORT': { + 'Export_Id': export_id, + 'Path': self.export_path, + 'FSAL': { + 'Name': 'Ceph', + 'User_Id': access_id, + 'Secret_Access_Key': self._ceph_auth_key(access_id) + }, + 'Pseudo': self.export_path, + 'Squash': 'None', + 'CLIENT': [ + { + 'Access_Type': 'RW', + 'Clients': ', '.join(access_ips), + } + ] + } + } ) export_template = export.to_export() logging.debug("Export template::\n{}".format(export_template)) @@ -141,6 +163,19 @@ def list_shares(self) -> List[Export]: logging.warning("Encountered an independently created export") return exports + def delete_share(self, name: str): + share = [share for share in self.list_shares() if share.name == name] + if share: + share = share[0] + else: + return + logging.info("About to remove export {} ({})".format(share.name, share.export_id)) + self._ganesha_remove_export(share.export_id) + logging.debug("Removing export from index") + self._remove_share_from_index(share.export_id) + logging.debug("Removing export file from RADOS") + self._rados_rm('ganesha-export-{}'.format(share.export_id)) + def get_share(self, id): pass @@ -153,6 +188,13 @@ def _ganesha_add_export(self, export_path: str, tmp_path: str): 'ExportMgr', 'AddExport', 'string:{}'.format(tmp_path), 'string:EXPORT(Path={})'.format(export_path)) + def _ganesha_remove_export(self, share_id: int): + """Remove a configured NFS export from Ganesha""" + self._dbus_send( + 'ExportMgr', + 'RemoveExport', + "uint16:{}".format(share_id)) + def _dbus_send(self, section: str, action: str, *args): """Send a command to Ganesha via Dbus""" cmd = [ @@ -266,8 +308,44 @@ def _rados_put(self, name: str, source: str): logging.debug("About to call: {}".format(cmd)) subprocess.check_call(cmd) + def _rados_rm(self, name: str): + """Remove a named RADOS object. + + :param name: Name of the RADOS object to remove + :param source: Path to a file to upload to RADOS. + + :returns: None + """ + cmd = [ + 'rados', '-p', self.ceph_pool, '--id', self.client_name, + 'rm', name + ] + logging.debug("About to call: {}".format(cmd)) + subprocess.check_call(cmd) + def _add_share_to_index(self, export_id: int): - index = self._rados_get(self.export_index) - index += '\n%url rados://{}/ganesha-export-{}'.format(self.ceph_pool, export_id) - tmpfile = self._tmpfile(index) + """Add an export RADOS object's URL to the RADOS URL index.""" + index_data = self._rados_get(self.export_index) + url = '%url rados://{}/ganesha-export-{}'.format(self.ceph_pool, export_id) + rados_urls = index_data.split('\n') + if url not in rados_urls: + rados_urls.append(url) + tmpfile = self._tmpfile('\n'.join(rados_urls)) + self._rados_put(self.export_index, tmpfile.name) + + def _remove_share_from_index(self, export_id: int): + """Remove an export RADOS object's URL from the RADOS URL index.""" + index_data = self._rados_get(self.export_index) + if not index_data: + return + + unwanted_url = "%url rados://{0}/{1}".format( + self.ceph_pool, + 'ganesha-export-{}'.format(export_id)) + logging.debug("Looking for '{}' in index".format(unwanted_url)) + rados_urls = index_data.split('\n') + logging.debug("Index URLs: {}".format(rados_urls)) + index = [url.strip() for url in rados_urls if url != unwanted_url] + logging.debug("Index URLs without unwanted: {}".format(index)) + tmpfile = self._tmpfile('\n'.join(index)) self._rados_put(self.export_index, tmpfile.name) diff --git a/ceph-nfs/tests/__init__.py b/ceph-nfs/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-nfs/tests/nfs_ganesha.py b/ceph-nfs/tests/nfs_ganesha.py index 44dfc389..86d8cfb2 100644 --- a/ceph-nfs/tests/nfs_ganesha.py +++ b/ceph-nfs/tests/nfs_ganesha.py @@ -28,6 +28,7 @@ class NfsGaneshaTest(unittest.TestCase): mount_dir = '/mnt/test' share_protocol = 'nfs' mounts_share = False + created_share = None def tearDown(self): if self.mounts_share: @@ -40,6 +41,13 @@ def tearDown(self): cmd='sudo umount /mnt/test && sudo rmdir /mnt/test') except subprocess.CalledProcessError: logging.warning("Failed to cleanup mounts") + if self.created_share: + zaza.model.run_action_on_leader( + 'ceph-nfs', + 'delete-share', + action_params={ + 'name': self.created_share, + }) def _create_share(self, name: str, size: int = 10) -> Dict[str, str]: action = zaza.model.run_action_on_leader( @@ -50,6 +58,7 @@ def _create_share(self, name: str, size: int = 10) -> Dict[str, str]: 'size': size, }) self.assertEqual(action.status, 'completed') + self.created_share = name results = action.results logging.debug("Action results: {}".format(results)) return results From 481d99e4ccbdc98a9898713c045d2f010fe329d0 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 1 Feb 2022 19:35:15 +0000 Subject: [PATCH 2330/2699] Update to build using charmcraft Due to a build problem with the reactive plugin, this change falls back on overriding the steps and doing a manual build, but it also ensures the CI system builds the charm using charmcraft. Changes: - add a build-requirements.txt - modify charmcraft.yaml - modify osci.yaml -> indicate build with charmcraft - modify tox.ini -> tox -e build does charmcraft build/rename -> tox -e build-reactive does the reactive build - modify bundles to use the .charm artifact in tests. and fix deprecation warning re: prefix - tox inception to enable tox -e func-test in the CI Change-Id: I1bbdebb22fcd6cac4ceae74d90d70a8d94f3c769 --- ceph-fs/build-requirements.txt | 7 ++++++ ceph-fs/charmcraft.yaml | 18 +++++++++++++--- ceph-fs/osci.yaml | 1 + ceph-fs/rename.sh | 13 +++++++++++ ceph-fs/src/test-requirements.txt | 3 +++ ceph-fs/src/tests/bundles/focal-xena.yaml | 5 ++++- ceph-fs/src/tests/bundles/focal-yoga.yaml | 4 +++- ceph-fs/src/tests/bundles/impish-xena.yaml | 4 +++- ceph-fs/src/tests/bundles/jammy-yoga.yaml | 4 +++- ceph-fs/test-requirements.txt | 6 ++++++ ceph-fs/tox.ini | 25 ++++++++++++++++++++++ 11 files changed, 83 insertions(+), 7 deletions(-) create mode 100644 ceph-fs/build-requirements.txt create mode 100755 ceph-fs/rename.sh diff --git a/ceph-fs/build-requirements.txt b/ceph-fs/build-requirements.txt new file mode 100644 index 00000000..b6d2452f --- /dev/null +++ b/ceph-fs/build-requirements.txt @@ -0,0 +1,7 @@ +# NOTES(lourot): +# * We don't install charmcraft via pip anymore because it anyway spins up a +# container and scp the system's charmcraft snap inside it. So the charmcraft +# snap is necessary on the system anyway. +# * `tox -e build` successfully validated with charmcraft 1.2.1 + +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. diff --git a/ceph-fs/charmcraft.yaml b/ceph-fs/charmcraft.yaml index a850351d..49682169 100644 --- a/ceph-fs/charmcraft.yaml +++ b/ceph-fs/charmcraft.yaml @@ -2,9 +2,21 @@ type: charm parts: charm: - source: src/ - plugin: reactive - build-snaps: [charm] + build-packages: + - tox + - git + - python3-dev + override-build: | + apt-get install ca-certificates -y + tox -e build-reactive + override-stage: | + echo "Copying charm to staging area: $CHARMCRAFT_STAGE" + NAME=$(ls $CHARMCRAFT_PART_BUILD/build/builds) + cp -r $CHARMCRAFT_PART_BUILD/build/builds/$NAME/* $CHARMCRAFT_STAGE/ + override-prime: | + # For some reason, the normal priming chokes on the fact that there's a + # hooks directory. + cp -r $CHARMCRAFT_STAGE/* . bases: - name: ubuntu diff --git a/ceph-fs/osci.yaml b/ceph-fs/osci.yaml index 551746b2..e84a9f3e 100644 --- a/ceph-fs/osci.yaml +++ b/ceph-fs/osci.yaml @@ -7,3 +7,4 @@ vars: needs_charm_build: true charm_build_name: ceph-fs + build_type: charmcraft diff --git a/ceph-fs/rename.sh b/ceph-fs/rename.sh new file mode 100755 index 00000000..d0c35c97 --- /dev/null +++ b/ceph-fs/rename.sh @@ -0,0 +1,13 @@ +#!/bin/bash +charm=$(grep "charm_build_name" osci.yaml | awk '{print $2}') +echo "renaming ${charm}_*.charm to ${charm}.charm" +echo -n "pwd: " +pwd +ls -al +echo "Removing bad downloaded charm maybe?" +if [[ -e "${charm}.charm" ]]; +then + rm "${charm}.charm" +fi +echo "Renaming charm here." +mv ${charm}_*.charm ${charm}.charm diff --git a/ceph-fs/src/test-requirements.txt b/ceph-fs/src/test-requirements.txt index e7710236..9c7afb7f 100644 --- a/ceph-fs/src/test-requirements.txt +++ b/ceph-fs/src/test-requirements.txt @@ -4,6 +4,9 @@ # https://github.com/openstack-charmers/release-tools # +# Need tox to be available from tox... inception yes, but its a workaround for now +tox + # Functional Test Requirements (let Zaza's dependencies solve all dependencies here!) git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack diff --git a/ceph-fs/src/tests/bundles/focal-xena.yaml b/ceph-fs/src/tests/bundles/focal-xena.yaml index a5370c13..6de4b967 100644 --- a/ceph-fs/src/tests/bundles/focal-xena.yaml +++ b/ceph-fs/src/tests/bundles/focal-xena.yaml @@ -1,6 +1,9 @@ variables: openstack-origin: &openstack-origin cloud:focal-xena +local_overlay_enabled: False + + series: &series focal machines: @@ -43,7 +46,7 @@ applications: channel: latest/edge ceph-fs: - charm: ceph-fs + charm: ../../../ceph-fs.charm num_units: 1 options: source: *openstack-origin diff --git a/ceph-fs/src/tests/bundles/focal-yoga.yaml b/ceph-fs/src/tests/bundles/focal-yoga.yaml index a0edf3b5..b6177072 100644 --- a/ceph-fs/src/tests/bundles/focal-yoga.yaml +++ b/ceph-fs/src/tests/bundles/focal-yoga.yaml @@ -1,6 +1,8 @@ variables: openstack-origin: &openstack-origin cloud:focal-yoga +local_overlay_enabled: False + series: &series focal machines: @@ -43,7 +45,7 @@ applications: channel: latest/edge ceph-fs: - charm: ceph-fs + charm: ../../../ceph-fs.charm num_units: 1 options: source: *openstack-origin diff --git a/ceph-fs/src/tests/bundles/impish-xena.yaml b/ceph-fs/src/tests/bundles/impish-xena.yaml index ee6a5ede..fd96fc12 100644 --- a/ceph-fs/src/tests/bundles/impish-xena.yaml +++ b/ceph-fs/src/tests/bundles/impish-xena.yaml @@ -1,6 +1,8 @@ variables: openstack-origin: &openstack-origin distro +local_overlay_enabled: False + series: &series impish machines: @@ -43,7 +45,7 @@ applications: channel: latest/edge ceph-fs: - charm: ceph-fs + charm: ../../../ceph-fs.charm num_units: 1 options: source: *openstack-origin diff --git a/ceph-fs/src/tests/bundles/jammy-yoga.yaml b/ceph-fs/src/tests/bundles/jammy-yoga.yaml index e3fa43e4..f49df651 100644 --- a/ceph-fs/src/tests/bundles/jammy-yoga.yaml +++ b/ceph-fs/src/tests/bundles/jammy-yoga.yaml @@ -1,6 +1,8 @@ variables: openstack-origin: &openstack-origin distro +local_overlay_enabled: False + series: &series jammy machines: @@ -43,7 +45,7 @@ applications: channel: latest/edge ceph-fs: - charm: ceph-fs + charm: ../../../ceph-fs.charm num_units: 1 options: source: *openstack-origin diff --git a/ceph-fs/test-requirements.txt b/ceph-fs/test-requirements.txt index 208032f0..a11a7d07 100644 --- a/ceph-fs/test-requirements.txt +++ b/ceph-fs/test-requirements.txt @@ -28,6 +28,11 @@ oslo.utils<=3.41.0;python_version<'3.6' requests>=2.18.4 charms.reactive +# Newer mock seems to have some syntax which is newer than python3.5 (e.g. +# f'{something}' +mock>=1.2,<4.0.0; python_version < '3.6' +mock>=1.2; python_version >= '3.6' + nose>=1.3.7 coverage>=3.6 git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack @@ -46,3 +51,4 @@ pbr==5.6.0 # vault cryptography<3.4 # vault, keystone-saml-mellon lxml # keystone-saml-mellon hvac # vault, barbican-vault +psutil # cinder-lvm diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index faf6092e..2d60b8a4 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -37,11 +37,24 @@ setenv = VIRTUAL_ENV={envdir} passenv = http_proxy https_proxy INTERFACE_PATH LAYER_PATH JUJU_REPOSITORY install_command = {toxinidir}/pip.sh install {opts} {packages} +allowlist_externals = + charmcraft + bash + tox + rename.sh deps = -r{toxinidir}/requirements.txt [testenv:build] basepython = python3 +deps = -r{toxinidir}/build-requirements.txt +commands = + charmcraft clean + charmcraft -v build + {toxinidir}/rename.sh + +[testenv:build-reactive] +basepython = python3 commands = charm-build --log-level DEBUG --use-lock-file-branches -o {toxinidir}/build/builds src {posargs} @@ -86,6 +99,18 @@ deps = flake8==3.9.2 charm-tools==2.8.3 commands = flake8 {posargs} src unit_tests +[testenv:func-target] +# Hack to get functional tests working in the charmcraft +# world. We should fix this. +basepython = python3 +passenv = HOME TERM CS_* OS_* TEST_* +deps = -r{toxinidir}/src/test-requirements.txt +changedir = {toxinidir}/src +commands = + bash -c "if [ ! -f ../*.charm ]; then echo 'Charm does not exist. Run tox -e build'; exit 1; fi" + tox --version + tox -e func-target {posargs} + [testenv:cover] # Technique based heavily upon # https://github.com/openstack/nova/blob/master/tox.ini From 54cbc3e214344f6ad04e9e30c7e0452cdab91827 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 2 Feb 2022 10:00:50 +0100 Subject: [PATCH 2331/2699] Add support for granting and removing access --- ceph-nfs/.gitignore | 1 + ceph-nfs/README.md | 4 +- ceph-nfs/actions.yaml | 30 ++++++++ ceph-nfs/src/charm.py | 80 ++++++++++++++----- ceph-nfs/src/ganesha.py | 115 +++++++++++++++++++--------- ceph-nfs/tests/nfs_ganesha.py | 2 +- ceph-nfs/unit_tests/test_ganesha.py | 37 ++++++++- 7 files changed, 209 insertions(+), 60 deletions(-) diff --git a/ceph-nfs/.gitignore b/ceph-nfs/.gitignore index 166180b1..3d40c3d6 100644 --- a/ceph-nfs/.gitignore +++ b/ceph-nfs/.gitignore @@ -5,3 +5,4 @@ __pycache__ lib/* !lib/README.txt *.charm +.vscode/settings.json diff --git a/ceph-nfs/README.md b/ceph-nfs/README.md index 0276d994..9f5a9089 100644 --- a/ceph-nfs/README.md +++ b/ceph-nfs/README.md @@ -2,13 +2,13 @@ ## Description -TODO: Describe your charm in a few paragraphs of Markdown +CephNFS is a charm designed to enable management of NFS shares backed +by CephFS. It supports Ceph Pacific and above. ## Usage TODO: Provide high-level usage, such as required config or relations - ## Relations TODO: Provide any relations which are provided or required by your charm diff --git a/ceph-nfs/actions.yaml b/ceph-nfs/actions.yaml index 44a55ae8..a2a9877d 100644 --- a/ceph-nfs/actions.yaml +++ b/ceph-nfs/actions.yaml @@ -22,6 +22,36 @@ create-share: Name of the share that will be exported. type: string default: +grant-access: + description: | + Grant the specified client access to a share. + params: + name: + description: Name of the share + type: string + default: + client: + description: IP address or network to change access for + type: string + default: + mode: + description: Access mode to grant + type: string + default: "RW" + +revoke-access: + description: | + Revoke the specified client's access to a share. + params: + name: + description: Name of the share + type: string + default: + client: + description: IP address or network to change access for + type: string + default: + delete-share: description: | Delete a CephFS Backed NFS export. Note that this does not delete diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index 231f7489..0698b045 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -192,6 +192,14 @@ def __init__(self, framework): self.on.delete_share_action, self.delete_share_action ) + self.framework.observe( + self.on.grant_access_action, + self.grant_access_action + ) + self.framework.observe( + self.on.revoke_access_action, + self.revoke_access_action + ) def config_get(self, key, default=None): """Retrieve config option. @@ -304,24 +312,25 @@ def setup_ganesha(self, event): '--id', self.client_name, 'put', 'ganesha-export-index', '/dev/null' ] - try: - logging.debug("Creating ganesha-export-index in Ceph") - subprocess.check_call(cmd) - counter = tempfile.NamedTemporaryFile('w+') - counter.write('1000') - counter.seek(0) - logging.debug("Creating ganesha-export-counter in Ceph") - cmd = [ - 'rados', '-p', self.pool_name, - '-c', self.CEPH_CONF, - '--id', self.client_name, - 'put', 'ganesha-export-counter', counter.name - ] - subprocess.check_call(cmd) - self.peers.initialised_pool() - except subprocess.CalledProcessError: - logging.error("Failed to setup ganesha index object") - event.defer() + if not self.peers.pool_initialised: + try: + logging.debug("Creating ganesha-export-index in Ceph") + subprocess.check_call(cmd) + counter = tempfile.NamedTemporaryFile('w+') + counter.write('1000') + counter.seek(0) + logging.debug("Creating ganesha-export-counter in Ceph") + cmd = [ + 'rados', '-p', self.pool_name, + '-c', self.CEPH_CONF, + '--id', self.client_name, + 'put', 'ganesha-export-counter', counter.name + ] + subprocess.check_call(cmd) + self.peers.initialised_pool() + except subprocess.CalledProcessError: + logging.error("Failed to setup ganesha index object") + event.defer() def on_pool_initialised(self, event): try: @@ -378,6 +387,41 @@ def delete_share_action(self, event): "message": "Share deleted", }) + def grant_access_action(self, event): + if not self.model.unit.is_leader(): + event.fail("Share creation needs to be run from the application leader") + return + client = GaneshaNfs(self.client_name, self.pool_name) + name = event.params.get('name') + address = event.params.get('client') + mode = event.params.get('mode') + if mode not in ['r', 'rw']: + event.fail('Mode must be either r (read) or rw (read/write)') + res = client.grant_access(name, address, mode) + if res is not None: + event.fail(res) + return + self.peers.trigger_reload() + event.set_results({ + "message": "Acess granted", + }) + + def revoke_access_action(self, event): + if not self.model.unit.is_leader(): + event.fail("Share creation needs to be run from the application leader") + return + client = GaneshaNfs(self.client_name, self.pool_name) + name = event.params.get('name') + address = event.params.get('client') + res = client.revoke_access(name, address) + if res is not None: + event.fail(res) + return + self.peers.trigger_reload() + event.set_results({ + "message": "Acess revoked", + }) + @ops_openstack.core.charm_class class CephNFSCharmOcto(CephNfsCharm): diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index 86d42556..3b120177 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -14,35 +14,6 @@ # TODO: Add ACL with kerberos -GANESHA_EXPORT_TEMPLATE = """ -EXPORT {{ - # Each EXPORT must have a unique Export_Id. - Export_Id = {id}; - - # The directory in the exported file system this export - # is rooted on. - Path = '{path}'; - - # FSAL, Ganesha's module component - FSAL {{ - # FSAL name - Name = "Ceph"; - User_Id = "{user_id}"; - Secret_Access_Key = "{secret_key}"; - }} - - # Path of export in the NFSv4 pseudo filesystem - Pseudo = '{path}'; - - SecType = "sys"; - CLIENT {{ - Access_Type = "rw"; - Clients = {clients}; - }} - # User id squashing, one of None, Root, All - Squash = "None"; -}} -""" class Export(object): @@ -53,9 +24,13 @@ class Export(object): def __init__(self, export_options: Optional[Dict] = None): if export_options is None: export_options = {} + if isinstance(export_options, Export): + raise RuntimeError('export_options must be a dictionary') self.export_options = export_options if self.path: self.name = self.path.split('/')[-2] + if not isinstance(self.export_options['EXPORT']['CLIENT'], list): + self.export_options['EXPORT']['CLIENT'] = [self.export_options['EXPORT']['CLIENT']] def from_export(export: str) -> 'Export': return Export(export_options=manager.parseconf(export)) @@ -68,16 +43,50 @@ def export(self): return self.export_options['EXPORT'] @property - def clients(self): - return self.export['CLIENT'] + def clients(self) -> List[Dict[str, str]]: + return self.export_options['EXPORT']['CLIENT'] @property - def export_id(self): - return self.export['Export_Id'] + def clients_by_mode(self): + clients_by_mode = {'r': [], 'rw': []} + for client in self.clients: + if client['Access_Type'].lower() == 'r': + clients_by_mode['r'] += [s.strip() for s in client['Clients'].split(',')] + elif client['Access_Type'].lower() == 'rw': + clients_by_mode['rw'] += [s.strip() for s in client['Clients'].split(',')] + else: + raise RuntimeError("Invalid access type") + return clients_by_mode + + @property + def export_id(self) -> int: + return int(self.export_options['EXPORT']['Export_Id']) @property - def path(self): - return self.export['Path'] + def path(self) -> str: + return self.export_options['EXPORT']['Path'] + + def add_client(self, client: str, mode: str): + if mode not in ['r', 'rw']: + return 'Mode must be either r (read) or rw (read/write)' + clients_by_mode = self.clients_by_mode + if client not in clients_by_mode[mode.lower()]: + clients_by_mode[mode.lower()].append(client) + self.export_options['EXPORT']['CLIENT'] = [] + for (mode, clients) in clients_by_mode.items(): + if clients: + self.export_options['EXPORT']['CLIENT'].append( + {'Access_Type': mode, 'Clients': ', '.join(clients)}) + + def remove_client(self, client: str): + clients_by_mode = self.clients_by_mode + for (mode, clients) in clients_by_mode.items(): + clients_by_mode[mode] = [old_client for old_client in clients if old_client != client] + self.export_options['EXPORT']['CLIENT'] = [] + for (mode, clients) in clients_by_mode.items(): + if clients: + self.export_options['EXPORT']['CLIENT'].append( + {'Access_Type': mode, 'Clients': ', '.join(clients)}) class GaneshaNfs(object): @@ -176,15 +185,39 @@ def delete_share(self, name: str): logging.debug("Removing export file from RADOS") self._rados_rm('ganesha-export-{}'.format(share.export_id)) - def get_share(self, id): - pass + def grant_access(self, name: str, client: str, mode: str) -> Optional[str]: + share = self.get_share(name) + if share is None: + return 'Share does not exist' + share.add_client(client, mode) + export_template = share.to_export() + logging.debug("Export template::\n{}".format(export_template)) + tmp_file = self._tmpfile(export_template) + self._rados_put('ganesha-export-{}'.format(share.export_id), tmp_file.name) + self._ganesha_update_export(share.export_id, tmp_file.name) + + def revoke_access(self, name: str, client: str): + share = self.get_share(name) + if share is None: + return 'Share does not exist' + share.remove_client(client) + export_template = share.to_export() + logging.debug("Export template::\n{}".format(export_template)) + tmp_file = self._tmpfile(export_template) + self._rados_put('ganesha-export-{}'.format(share.export_id), tmp_file.name) + self._ganesha_update_export(share.export_id, tmp_file.name) + + def get_share(self, name: str) -> Optional[Export]: + share = [share for share in self.list_shares() if share.name == name] + if share: + return share[0] def update_share(self, id): pass def _ganesha_add_export(self, export_path: str, tmp_path: str): """Add a configured NFS export to Ganesha""" - return self._dbus_send( + self._dbus_send( 'ExportMgr', 'AddExport', 'string:{}'.format(tmp_path), 'string:EXPORT(Path={})'.format(export_path)) @@ -195,6 +228,12 @@ def _ganesha_remove_export(self, share_id: int): 'RemoveExport', "uint16:{}".format(share_id)) + def _ganesha_update_export(self, share_id: int, tmp_path: str): + """Update a configured NFS export in Ganesha""" + self._dbus_send( + 'ExportMgr', 'UpdateExport', + 'string:{}'.format(tmp_path), 'string:EXPORT(Export_Id={})'.format(share_id)) + def _dbus_send(self, section: str, action: str, *args): """Send a command to Ganesha via Dbus""" cmd = [ diff --git a/ceph-nfs/tests/nfs_ganesha.py b/ceph-nfs/tests/nfs_ganesha.py index 86d8cfb2..037382ef 100644 --- a/ceph-nfs/tests/nfs_ganesha.py +++ b/ceph-nfs/tests/nfs_ganesha.py @@ -79,6 +79,7 @@ def _mount_share(self, unit_name: str, share_ip: str, export_path: str): zaza.utilities.generic.run_via_ssh( unit_name=unit_name, cmd=ssh_cmd) + self.mounts_share = True def _install_dependencies(self, unit: str): logging.debug("About to install nfs-common on {}".format(unit)) @@ -112,7 +113,6 @@ def test_create_share(self): export_path = share['path'] ip = share['ip'] logging.info("Mounting share on ubuntu units") - self.mounts_share = True self._mount_share('ubuntu/0', ip, export_path) self._mount_share('ubuntu/1', ip, export_path) logging.info("writing to the share on ubuntu/0") diff --git a/ceph-nfs/unit_tests/test_ganesha.py b/ceph-nfs/unit_tests/test_ganesha.py index 19bdb4df..a1354ffe 100644 --- a/ceph-nfs/unit_tests/test_ganesha.py +++ b/ceph-nfs/unit_tests/test_ganesha.py @@ -38,5 +38,40 @@ class ExportTest(unittest.TestCase): def test_parser(self): export = ganesha.Export.from_export(EXAMPLE_EXPORT) self.assertEqual(export.export_id, 1000) - self.assertEqual(export.clients, {'Access_Type': 'rw', 'Clients': '0.0.0.0'}) + self.assertEqual(export.clients, [{'Access_Type': 'rw', 'Clients': '0.0.0.0'}]) self.assertEqual(export.name, 'test_ganesha_share') + + def test_add_client(self): + export = ganesha.Export.from_export(EXAMPLE_EXPORT) + export.add_client('10.0.0.0/8', 'rw') + self.assertEqual( + export.clients, + [{'Access_Type': 'rw', 'Clients': '0.0.0.0, 10.0.0.0/8'}]) + # adding again shouldn't duplicate export + export.add_client('10.0.0.0/8', 'rw') + self.assertEqual( + export.clients, + [{'Access_Type': 'rw', 'Clients': '0.0.0.0, 10.0.0.0/8'}]) + + export.add_client('192.168.0.0/16', 'r') + self.assertEqual( + export.clients, + [{'Access_Type': 'r', 'Clients': '192.168.0.0/16'}, + {'Access_Type': 'rw', 'Clients': '0.0.0.0, 10.0.0.0/8'}, + ]) + + def test_remove_client(self): + export = ganesha.Export.from_export(EXAMPLE_EXPORT) + export.add_client('10.0.0.0/8', 'rw') + export.add_client('192.168.0.0/16', 'r') + self.assertEqual( + export.clients, + [{'Access_Type': 'r', 'Clients': '192.168.0.0/16'}, + {'Access_Type': 'rw', 'Clients': '0.0.0.0, 10.0.0.0/8'}, + ]) + export.remove_client('0.0.0.0') + self.assertEqual( + export.clients, + [{'Access_Type': 'r', 'Clients': '192.168.0.0/16'}, + {'Access_Type': 'rw', 'Clients': '10.0.0.0/8'}, + ]) From c7d10589b396600e09cf29f1d3d2721f4b22506f Mon Sep 17 00:00:00 2001 From: Cornellius Metto Date: Mon, 7 Feb 2022 13:04:35 +0300 Subject: [PATCH 2332/2699] Charmhelpers Sync: https health checks for HAProxy Change-Id: I4848168b5a45c3430dec58f786d4453e40539361 --- ceph-radosgw/hooks/charmhelpers/__init__.py | 17 +-- .../hooks/charmhelpers/cli/__init__.py | 13 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 40 +++++- .../charmhelpers/contrib/hahelpers/cluster.py | 15 +- .../contrib/hardening/apache/checks/config.py | 5 +- .../contrib/hardening/audits/apache.py | 8 +- .../contrib/hardening/audits/apt.py | 5 +- .../contrib/hardening/audits/file.py | 3 +- .../charmhelpers/contrib/hardening/harden.py | 13 +- .../contrib/hardening/host/checks/login.py | 4 +- .../contrib/hardening/host/checks/sysctl.py | 7 +- .../contrib/hardening/mysql/checks/config.py | 7 +- .../contrib/hardening/templating.py | 6 +- .../charmhelpers/contrib/hardening/utils.py | 3 +- .../hooks/charmhelpers/contrib/network/ip.py | 23 +--- .../charmhelpers/contrib/openstack/context.py | 46 +++---- .../files/check_deferred_restarts.py | 128 ++++++++++++++++++ .../contrib/openstack/keystone.py | 12 +- .../charmhelpers/contrib/openstack/neutron.py | 10 +- .../charmhelpers/contrib/openstack/policyd.py | 46 +------ .../contrib/openstack/templates/haproxy.cfg | 4 + .../templates/openstack_https_frontend | 2 + .../templates/openstack_https_frontend.conf | 2 + .../templates/wsgi-openstack-api.conf | 6 + .../templates/wsgi-openstack-metadata.conf | 6 + .../contrib/openstack/templating.py | 27 ++-- .../charmhelpers/contrib/openstack/utils.py | 110 +++++++++------ .../hooks/charmhelpers/contrib/python.py | 2 - .../contrib/storage/linux/ceph.py | 69 ++++------ .../contrib/storage/linux/loopback.py | 10 +- .../hooks/charmhelpers/core/hookenv.py | 81 ++++++----- ceph-radosgw/hooks/charmhelpers/core/host.py | 12 +- .../hooks/charmhelpers/core/services/base.py | 7 +- .../charmhelpers/core/services/helpers.py | 4 +- .../hooks/charmhelpers/core/strutils.py | 9 +- .../hooks/charmhelpers/core/templating.py | 11 +- .../hooks/charmhelpers/fetch/__init__.py | 10 +- .../hooks/charmhelpers/fetch/archiveurl.py | 29 +--- .../hooks/charmhelpers/fetch/centos.py | 7 +- .../hooks/charmhelpers/fetch/python/debug.py | 2 - .../charmhelpers/fetch/python/packages.py | 14 +- .../hooks/charmhelpers/fetch/ubuntu.py | 37 ++--- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 25 +++- 43 files changed, 465 insertions(+), 432 deletions(-) create mode 100755 ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py diff --git a/ceph-radosgw/hooks/charmhelpers/__init__.py b/ceph-radosgw/hooks/charmhelpers/__init__.py index 1f57ed2a..ddf30450 100644 --- a/ceph-radosgw/hooks/charmhelpers/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/__init__.py @@ -14,30 +14,15 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. -from __future__ import print_function -from __future__ import absolute_import - import functools import inspect import subprocess -import sys -try: - import six # NOQA:F401 -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # NOQA:F401 try: import yaml # NOQA:F401 except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) import yaml # NOQA:F401 diff --git a/ceph-radosgw/hooks/charmhelpers/cli/__init__.py b/ceph-radosgw/hooks/charmhelpers/cli/__init__.py index 74ea7295..2b0c4b7a 100644 --- a/ceph-radosgw/hooks/charmhelpers/cli/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/cli/__init__.py @@ -16,9 +16,6 @@ import argparse import sys -import six -from six.moves import zip - import charmhelpers.core.unitdata @@ -149,10 +146,7 @@ def wrapper(decorated): def run(self): "Run cli, processing arguments and executing subcommands." arguments = self.argument_parser.parse_args() - if six.PY2: - argspec = inspect.getargspec(arguments.func) - else: - argspec = inspect.getfullargspec(arguments.func) + argspec = inspect.getfullargspec(arguments.func) vargs = [] for arg in argspec.args: vargs.append(getattr(arguments, arg)) @@ -177,10 +171,7 @@ def describe_arguments(func): Analyze a function's signature and return a data structure suitable for passing in as arguments to an argparse parser's add_argument() method.""" - if six.PY2: - argspec = inspect.getargspec(func) - else: - argspec = inspect.getfullargspec(func) + argspec = inspect.getfullargspec(func) # we should probably raise an exception somewhere if func includes **kwargs if argspec.defaults: positional_args = argspec.args[:-len(argspec.defaults)] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 8d1753c3..bad7a533 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -28,6 +28,7 @@ import yaml from charmhelpers.core.hookenv import ( + application_name, config, hook_name, local_unit, @@ -174,7 +175,8 @@ def _locate_cmd(self, check_cmd): if os.path.exists(os.path.join(path, parts[0])): command = os.path.join(path, parts[0]) if len(parts) > 1: - command += " " + " ".join(parts[1:]) + safe_args = [shlex.quote(arg) for arg in parts[1:]] + command += " " + " ".join(safe_args) return command log('Check command not found: {}'.format(parts[0])) return '' @@ -520,3 +522,39 @@ def remove_deprecated_check(nrpe, deprecated_services): for dep_svc in deprecated_services: log('Deprecated service: {}'.format(dep_svc)) nrpe.remove_check(shortname=dep_svc) + + +def add_deferred_restarts_check(nrpe): + """ + Add NRPE check for services with deferred restarts. + + :param NRPE nrpe: NRPE object to add check to + """ + unit_name = local_unit().replace('/', '-') + shortname = unit_name + '_deferred_restarts' + check_cmd = 'check_deferred_restarts.py --application {}'.format( + application_name()) + + log('Adding deferred restarts nrpe check: {}'.format(shortname)) + nrpe.add_check( + shortname=shortname, + description='Check deferred service restarts {}'.format(unit_name), + check_cmd=check_cmd) + + +def remove_deferred_restarts_check(nrpe): + """ + Remove NRPE check for services with deferred service restarts. + + :param NRPE nrpe: NRPE object to remove check from + """ + unit_name = local_unit().replace('/', '-') + shortname = unit_name + '_deferred_restarts' + check_cmd = 'check_deferred_restarts.py --application {}'.format( + application_name()) + + log('Removing deferred restarts nrpe check: {}'.format(shortname)) + nrpe.remove_check( + shortname=shortname, + description='Check deferred service restarts {}'.format(unit_name), + check_cmd=check_cmd) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index f0b629a2..146beba6 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -32,8 +32,6 @@ from socket import gethostname as get_unit_hostname -import six - from charmhelpers.core.hookenv import ( log, relation_ids, @@ -125,16 +123,16 @@ def is_crm_dc(): """ cmd = ['crm', 'status'] try: - status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - if not isinstance(status, six.text_type): - status = six.text_type(status, "utf-8") + status = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('utf-8') except subprocess.CalledProcessError as ex: raise CRMDCNotFound(str(ex)) current_dc = '' for line in status.split('\n'): if line.startswith('Current DC'): - # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum + # Current DC: juju-lytrusty-machine-2 (168108163) + # - partition with quorum current_dc = line.split(':')[1].split()[0] if current_dc == get_unit_hostname(): return True @@ -158,9 +156,8 @@ def is_crm_leader(resource, retry=False): return is_crm_dc() cmd = ['crm', 'resource', 'show', resource] try: - status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - if not isinstance(status, six.text_type): - status = six.text_type(status, "utf-8") + status = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('utf-8') except subprocess.CalledProcessError: status = None diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 341da9ee..e81a5f0b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -14,7 +14,6 @@ import os import re -import six import subprocess @@ -95,9 +94,7 @@ def __call__(self): settings = utils.get_settings('apache') ctxt = settings['hardening'] - out = subprocess.check_output(['apache2', '-v']) - if six.PY3: - out = out.decode('utf-8') + out = subprocess.check_output(['apache2', '-v']).decode('utf-8') ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', out).group(1) ctxt['apache_icondir'] = '/usr/share/apache2/icons/' diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py index c1537625..31db8f62 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -15,8 +15,6 @@ import re import subprocess -import six - from charmhelpers.core.hookenv import ( log, INFO, @@ -35,7 +33,7 @@ class DisabledModuleAudit(BaseAudit): def __init__(self, modules): if modules is None: self.modules = [] - elif isinstance(modules, six.string_types): + elif isinstance(modules, str): self.modules = [modules] else: self.modules = modules @@ -68,9 +66,7 @@ def ensure_compliance(self): @staticmethod def _get_loaded_modules(): """Returns the modules which are enabled in Apache.""" - output = subprocess.check_output(['apache2ctl', '-M']) - if six.PY3: - output = output.decode('utf-8') + output = subprocess.check_output(['apache2ctl', '-M']).decode('utf-8') modules = [] for line in output.splitlines(): # Each line of the enabled module output looks like: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py index cad7bf73..1b22925b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import # required for external apt import -from six import string_types - from charmhelpers.fetch import ( apt_cache, apt_purge @@ -51,7 +48,7 @@ class RestrictedPackages(BaseAudit): def __init__(self, pkgs, **kwargs): super(RestrictedPackages, self).__init__(**kwargs) - if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): + if isinstance(pkgs, str) or not hasattr(pkgs, '__iter__'): self.pkgs = pkgs.split() else: self.pkgs = pkgs diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/file.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/file.py index 257c6351..84cc2494 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/file.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/audits/file.py @@ -23,7 +23,6 @@ check_call, ) from traceback import format_exc -from six import string_types from stat import ( S_ISGID, S_ISUID @@ -63,7 +62,7 @@ def __init__(self, paths, always_comply=False, *args, **kwargs): """ super(BaseFileAudit, self).__init__(*args, **kwargs) self.always_comply = always_comply - if isinstance(paths, string_types) or not hasattr(paths, '__iter__'): + if isinstance(paths, str) or not hasattr(paths, '__iter__'): self.paths = [paths] else: self.paths = paths diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py index 63f21b9c..45ad076d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/harden.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - from collections import OrderedDict from charmhelpers.core.hookenv import ( @@ -53,18 +51,17 @@ def harden(overrides=None): overrides = [] def _harden_inner1(f): - # As this has to be py2.7 compat, we can't use nonlocal. Use a trick - # to capture the dictionary that can then be updated. - _logged = {'done': False} + _logged = False def _harden_inner2(*args, **kwargs): # knock out hardening via a config var; normally it won't get # disabled. + nonlocal _logged if _DISABLE_HARDENING_FOR_UNIT_TEST: return f(*args, **kwargs) - if not _logged['done']: + if not _logged: log("Hardening function '%s'" % (f.__name__), level=DEBUG) - _logged['done'] = True + _logged = True RUN_CATALOG = OrderedDict([('os', run_os_checks), ('ssh', run_ssh_checks), ('mysql', run_mysql_checks), @@ -74,7 +71,7 @@ def _harden_inner2(*args, **kwargs): if enabled: modules_to_run = [] # modules will always be performed in the following order - for module, func in six.iteritems(RUN_CATALOG): + for module, func in RUN_CATALOG.items(): if module in enabled: enabled.remove(module) modules_to_run.append(func) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/login.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/login.py index fe2bc6ef..fd500c8b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/login.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/login.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from six import string_types - from charmhelpers.contrib.hardening.audits.file import TemplatedFile from charmhelpers.contrib.hardening.host import TEMPLATES_DIR from charmhelpers.contrib.hardening import utils @@ -41,7 +39,7 @@ def __call__(self): # a string assume it to be octal and turn it into an octal # string. umask = settings['environment']['umask'] - if not isinstance(umask, string_types): + if not isinstance(umask, str): umask = '%s' % oct(umask) ctxt = { diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py index f1ea5813..8a57d83d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -15,7 +15,6 @@ import os import platform import re -import six import subprocess from charmhelpers.core.hookenv import ( @@ -183,9 +182,9 @@ def __call__(self): ctxt['sysctl'][key] = d[2] or None - # Translate for python3 - return {'sysctl_settings': - [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]} + return { + 'sysctl_settings': [(k, v) for k, v in ctxt['sysctl'].items()] + } class SysctlConf(TemplatedFile): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py index a79f33b7..8bf9f36c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import subprocess from charmhelpers.core.hookenv import ( @@ -82,6 +81,6 @@ class MySQLConfContext(object): """ def __call__(self): settings = utils.get_settings('mysql') - # Translate for python3 - return {'mysql_settings': - [(k, v) for k, v in six.iteritems(settings['security'])]} + return { + 'mysql_settings': [(k, v) for k, v in settings['security'].items()] + } diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py index 5b6765f7..4dee5465 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/templating.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import six from charmhelpers.core.hookenv import ( log, @@ -27,10 +26,7 @@ from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_update apt_update(fatal=True) - if six.PY2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py index 56afa4b6..f93851a9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardening/utils.py @@ -16,7 +16,6 @@ import grp import os import pwd -import six import yaml from charmhelpers.core.hookenv import ( @@ -91,7 +90,7 @@ def _apply_overrides(settings, overrides, schema): :returns: dictionary of modules config with user overrides applied. """ if overrides: - for k, v in six.iteritems(overrides): + for k, v in overrides.items(): if k in schema: if schema[k] is None: settings[k] = v diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index b356d64c..de56584d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -15,7 +15,6 @@ import glob import re import subprocess -import six import socket from functools import partial @@ -39,20 +38,14 @@ import netifaces except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-netifaces', fatal=True) - else: - apt_install('python3-netifaces', fatal=True) + apt_install('python3-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-netaddr', fatal=True) - else: - apt_install('python3-netaddr', fatal=True) + apt_install('python3-netaddr', fatal=True) import netaddr @@ -462,15 +455,12 @@ def ns_query(address): try: import dns.resolver except ImportError: - if six.PY2: - apt_install('python-dnspython', fatal=True) - else: - apt_install('python3-dnspython', fatal=True) + apt_install('python3-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): rtype = 'PTR' - elif isinstance(address, six.string_types): + elif isinstance(address, str): rtype = 'A' else: return None @@ -513,10 +503,7 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - if six.PY2: - apt_install("python-dnspython", fatal=True) - else: - apt_install("python3-dnspython", fatal=True) + apt_install("python3-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 54081f0c..8522641b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -30,8 +30,6 @@ check_output, CalledProcessError) -import six - import charmhelpers.contrib.storage.linux.ceph as ch_ceph from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( @@ -130,10 +128,7 @@ try: import psutil except ImportError: - if six.PY2: - apt_install('python-psutil', fatal=True) - else: - apt_install('python3-psutil', fatal=True) + apt_install('python3-psutil', fatal=True) import psutil CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -150,10 +145,7 @@ def ensure_packages(packages): def context_complete(ctxt): - _missing = [] - for k, v in six.iteritems(ctxt): - if v is None or v == '': - _missing.append(k) + _missing = [k for k, v in ctxt.items() if v is None or v == ''] if _missing: log('Missing required data: %s' % ' '.join(_missing), level=INFO) @@ -180,7 +172,7 @@ def context_complete(self, ctxt): # Fresh start self.complete = False self.missing_data = [] - for k, v in six.iteritems(ctxt): + for k, v in ctxt.items(): if v is None or v == '': if k not in self.missing_data: self.missing_data.append(k) @@ -1111,10 +1103,14 @@ def get_network_addresses(self): endpoint = resolve_address(net_type) addresses.append((addr, endpoint)) - return sorted(set(addresses)) + # Log the set of addresses to have a trail log and capture if tuples + # change over time in the same unit (LP: #1952414). + sorted_addresses = sorted(set(addresses)) + log('get_network_addresses: {}'.format(sorted_addresses)) + return sorted_addresses def __call__(self): - if isinstance(self.external_ports, six.string_types): + if isinstance(self.external_ports, str): self.external_ports = [self.external_ports] if not self.external_ports or not https(): @@ -1531,9 +1527,9 @@ def __call__(self): continue sub_config = sub_config[self.config_file] - for k, v in six.iteritems(sub_config): + for k, v in sub_config.items(): if k == 'sections': - for section, config_list in six.iteritems(v): + for section, config_list in v.items(): log("adding section '%s'" % (section), level=DEBUG) if ctxt[k].get(section): @@ -1887,8 +1883,11 @@ def __call__(self): normalized.update({port: port for port in resolved if port in ports}) if resolved: - return {normalized[port]: bridge for port, bridge in - six.iteritems(portmap) if port in normalized.keys()} + return { + normalized[port]: bridge + for port, bridge in portmap.items() + if port in normalized.keys() + } return None @@ -2291,15 +2290,10 @@ def _get_canonical_name(self, name=None): name = name or socket.gethostname() fqdn = '' - if six.PY2: - exc = socket.error - else: - exc = OSError - try: addrs = socket.getaddrinfo( name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) - except exc: + except OSError: pass else: for addr in addrs: @@ -2416,12 +2410,12 @@ def get_existing_ovs_use_veth(): existing_ovs_use_veth = None # If there is a dhcp_agent.ini file read the current setting if os.path.isfile(DHCP_AGENT_INI): - # config_ini does the right thing and returns None if the setting is - # commented. + # config_ini does the right thing and returns None if the setting + # is commented. existing_ovs_use_veth = ( config_ini(DHCP_AGENT_INI)["DEFAULT"].get("ovs_use_veth")) # Convert to Bool if necessary - if isinstance(existing_ovs_use_veth, six.string_types): + if isinstance(existing_ovs_use_veth, str): return bool_from_string(existing_ovs_use_veth) return existing_ovs_use_veth diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py new file mode 100755 index 00000000..5f392b3c --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py @@ -0,0 +1,128 @@ +#!/usr/bin/python3 + +# Copyright 2014-2022 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Checks for services with deferred restarts. + +This Nagios check will parse /var/lib/policy-rd.d/ +to find any restarts that are currently deferred. +""" + +import argparse +import glob +import sys +import yaml + + +DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d' + + +def get_deferred_events(): + """Return a list of deferred events dicts from policy-rc.d files. + + Events are read from DEFERRED_EVENTS_DIR and are of the form: + { + action: restart, + policy_requestor_name: rabbitmq-server, + policy_requestor_type: charm, + reason: 'Pkg update', + service: rabbitmq-server, + time: 1614328743 + } + + :raises OSError: Raised in case of a system error while reading a policy file + :raises yaml.YAMLError: Raised if parsing a policy file fails + + :returns: List of deferred event dictionaries + :rtype: list + """ + deferred_events_files = glob.glob( + '{}/*.deferred'.format(DEFERRED_EVENTS_DIR)) + + deferred_events = [] + for event_file in deferred_events_files: + with open(event_file, 'r') as f: + event = yaml.safe_load(f) + deferred_events.append(event) + + return deferred_events + + +def get_deferred_restart_services(application=None): + """Returns a list of services with deferred restarts. + + :param str application: Name of the application that blocked the service restart. + If application is None, all services with deferred restarts + are returned. Services which are blocked by a non-charm + requestor are always returned. + + :raises OSError: Raised in case of a system error while reading a policy file + :raises yaml.YAMLError: Raised if parsing a policy file fails + + :returns: List of services with deferred restarts belonging to application. + :rtype: list + """ + + deferred_restart_events = filter( + lambda e: e['action'] == 'restart', get_deferred_events()) + + deferred_restart_services = set() + for restart_event in deferred_restart_events: + if application: + if ( + restart_event['policy_requestor_type'] != 'charm' or + restart_event['policy_requestor_type'] == 'charm' and + restart_event['policy_requestor_name'] == application + ): + deferred_restart_services.add(restart_event['service']) + else: + deferred_restart_services.add(restart_event['service']) + + return list(deferred_restart_services) + + +def main(): + """Check for services with deferred restarts.""" + parser = argparse.ArgumentParser( + description='Check for services with deferred restarts') + parser.add_argument( + '--application', help='Check services belonging to this application only') + + args = parser.parse_args() + + services = set(get_deferred_restart_services(args.application)) + + if len(services) == 0: + print('OK: No deferred service restarts.') + sys.exit(0) + else: + print( + 'CRITICAL: Restarts are deferred for services: {}.'.format(', '.join(services))) + sys.exit(1) + + +if __name__ == '__main__': + try: + main() + except OSError as e: + print('CRITICAL: A system error occurred: {} ({})'.format(e.errno, e.strerror)) + sys.exit(1) + except yaml.YAMLError as e: + print('CRITICAL: Failed to parse a policy file: {}'.format(str(e))) + sys.exit(1) + except Exception as e: + print('CRITICAL: An unknown error occurred: {}'.format(str(e))) + sys.exit(1) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py index d7e02ccd..5775aa44 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/keystone.py @@ -1,4 +1,3 @@ -#!/usr/bin/python # # Copyright 2017 Canonical Ltd # @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six from charmhelpers.fetch import apt_install from charmhelpers.contrib.openstack.context import IdentityServiceContext from charmhelpers.core.hookenv import ( @@ -117,10 +115,7 @@ def __init__(self, endpoint, **kwargs): from keystoneclient.auth.identity import v2 from keystoneclient import session except ImportError: - if six.PY2: - apt_install(["python-keystoneclient"], fatal=True) - else: - apt_install(["python3-keystoneclient"], fatal=True) + apt_install(["python3-keystoneclient"], fatal=True) from keystoneclient.v2_0 import client from keystoneclient.auth.identity import v2 @@ -151,10 +146,7 @@ def __init__(self, endpoint, **kwargs): from keystoneclient import session from keystoneclient.auth.identity import v3 except ImportError: - if six.PY2: - apt_install(["python-keystoneclient"], fatal=True) - else: - apt_install(["python3-keystoneclient"], fatal=True) + apt_install(["python3-keystoneclient"], fatal=True) from keystoneclient.v3 import client from keystoneclient.auth import token_endpoint diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py index b41314cb..47772467 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/neutron.py @@ -14,7 +14,6 @@ # Various utilities for dealing with Neutron and the renaming from Quantum. -import six from subprocess import check_output from charmhelpers.core.hookenv import ( @@ -349,11 +348,4 @@ def parse_vlan_range_mappings(mappings): Returns dict of the form {provider: (start, end)}. """ _mappings = parse_mappings(mappings) - if not _mappings: - return {} - - mappings = {} - for p, r in six.iteritems(_mappings): - mappings[p] = tuple(r.split(':')) - - return mappings + return {p: tuple(r.split(':')) for p, r in _mappings.items()} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py index 6fa06f26..767943c2 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/policyd.py @@ -15,7 +15,6 @@ import collections import contextlib import os -import six import shutil import yaml import zipfile @@ -204,12 +203,6 @@ def __str__(self): return self.log_message -if six.PY2: - BadZipFile = zipfile.BadZipfile -else: - BadZipFile = zipfile.BadZipFile - - def is_policyd_override_valid_on_this_release(openstack_release): """Check that the charm is running on at least Ubuntu Xenial, and at least the queens release. @@ -487,10 +480,10 @@ def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): if blacklisted_keys_present: raise BadPolicyYamlFile("blacklisted keys {} present." .format(", ".join(blacklisted_keys_present))) - if not all(isinstance(k, six.string_types) for k in keys): + if not all(isinstance(k, str) for k in keys): raise BadPolicyYamlFile("keys in yaml aren't all strings?") # check that the dictionary looks like a mapping of str to str - if not all(isinstance(v, six.string_types) for v in doc.values()): + if not all(isinstance(v, str) for v in doc.values()): raise BadPolicyYamlFile("values in yaml aren't all strings?") return doc @@ -530,8 +523,7 @@ def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) - _scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir - for direntry in _scanner(path): + for direntry in os.scandir(path): # see if the path should be kept. if direntry.path in keep_paths: continue @@ -558,36 +550,6 @@ def maybe_create_directory_for(path, user, group): ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) -@contextlib.contextmanager -def _fallback_scandir(path): - """Fallback os.scandir implementation. - - provide a fallback implementation of os.scandir if this module ever gets - used in a py2 or py34 charm. Uses os.listdir() to get the names in the path, - and then mocks the is_dir() function using os.path.isdir() to check for - directory. - - :param path: the path to list the directories for - :type path: str - :returns: Generator that provides _FBDirectory objects - :rtype: ContextManager[_FBDirectory] - """ - for f in os.listdir(path): - yield _FBDirectory(f) - - -class _FBDirectory(object): - """Mock a scandir Directory object with enough to use in - clean_policyd_dir_for - """ - - def __init__(self, path): - self.path = path - - def is_dir(self): - return os.path.isdir(self.path) - - def path_for_policy_file(service, name): """Return the full path for a policy.d file that will be written to the service's policy.d directory. @@ -768,7 +730,7 @@ def process_policy_resource_file(resource_file, _group) # Every thing worked, so we mark up a success. completed = True - except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: + except (zipfile.BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), level=POLICYD_LOG_LEVEL_DEFAULT) except IOError as e: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 875e1393..626ecbab 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -82,7 +82,11 @@ backend {{ service }}_{{ frontend }} {% endif -%} {% endif -%} {% for unit, address in frontends[frontend]['backends'].items() -%} + {% if https -%} + server {{ unit }} {{ address }}:{{ ports[1] }} check check-ssl verify none + {% else -%} server {{ unit }} {{ address }}:{{ ports[1] }} check + {% endif -%} {% endfor %} {% endfor -%} {% endfor -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend index 530719e9..6ed869a5 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -22,6 +22,8 @@ Listen {{ ext_port }} ProxyPassReverse / http://localhost:{{ int }}/ ProxyPreserveHost on RequestHeader set X-Forwarded-Proto "https" + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 {% endfor -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf index 530719e9..6ed869a5 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -22,6 +22,8 @@ Listen {{ ext_port }} ProxyPassReverse / http://localhost:{{ int }}/ ProxyPreserveHost on RequestHeader set X-Forwarded-Proto "https" + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 {% endfor -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf index b9ca3963..6c4e37e4 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf @@ -20,6 +20,8 @@ Listen {{ public_port }} WSGIScriptAlias / {{ script }} WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 = 2.4> ErrorLogFormat "%{cu}t %M" @@ -46,6 +48,8 @@ Listen {{ public_port }} WSGIScriptAlias / {{ admin_script }} WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 = 2.4> ErrorLogFormat "%{cu}t %M" @@ -72,6 +76,8 @@ Listen {{ public_port }} WSGIScriptAlias / {{ public_script }} WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 = 2.4> ErrorLogFormat "%{cu}t %M" diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf index b9ca3963..6c4e37e4 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf @@ -20,6 +20,8 @@ Listen {{ public_port }} WSGIScriptAlias / {{ script }} WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 = 2.4> ErrorLogFormat "%{cu}t %M" @@ -46,6 +48,8 @@ Listen {{ public_port }} WSGIScriptAlias / {{ admin_script }} WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 = 2.4> ErrorLogFormat "%{cu}t %M" @@ -72,6 +76,8 @@ Listen {{ public_port }} WSGIScriptAlias / {{ public_script }} WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 = 2.4> ErrorLogFormat "%{cu}t %M" diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py index 050f8af5..3b7c6a9f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templating.py @@ -14,8 +14,6 @@ import os -import six - from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, @@ -29,10 +27,7 @@ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions @@ -62,7 +57,7 @@ def get_loader(templates_dir, os_release): order by OpenStack release. """ tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) - for rel in six.itervalues(OPENSTACK_CODENAMES)] + for rel in OPENSTACK_CODENAMES.values()] if not os.path.isdir(templates_dir): log('Templates directory not found @ %s.' % templates_dir, @@ -225,10 +220,7 @@ def __init__(self, templates_dir, openstack_release): # if this code is running, the object is created pre-install hook. # jinja2 shouldn't get touched until the module is reloaded on next # hook execution, with proper jinja2 bits successfully imported. - if six.PY2: - apt_install('python-jinja2') - else: - apt_install('python3-jinja2') + apt_install('python3-jinja2') def register(self, config_file, contexts, config_template=None): """ @@ -318,9 +310,7 @@ def write(self, config_file): log('Config not registered: %s' % config_file, level=ERROR) raise OSConfigException - _out = self.render(config_file) - if six.PY3: - _out = _out.encode('UTF-8') + _out = self.render(config_file).encode('UTF-8') with open(config_file, 'wb') as out: out.write(_out) @@ -331,7 +321,8 @@ def write_all(self): """ Write out all registered config files. """ - [self.write(k) for k in six.iterkeys(self.templates)] + for k in self.templates.keys(): + self.write(k) def set_release(self, openstack_release): """ @@ -347,8 +338,8 @@ def complete_contexts(self): Returns a list of context interfaces that yield a complete context. ''' interfaces = [] - [interfaces.extend(i.complete_contexts()) - for i in six.itervalues(self.templates)] + for i in self.templates.values(): + interfaces.extend(i.complete_contexts()) return interfaces def get_incomplete_context_data(self, interfaces): @@ -360,7 +351,7 @@ def get_incomplete_context_data(self, interfaces): ''' incomplete_context_data = {} - for i in six.itervalues(self.templates): + for i in self.templates.values(): for context in i.contexts: for interface in interfaces: related = False diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 9cc96d60..c8747c16 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -25,7 +25,6 @@ import itertools import functools -import six import traceback import uuid import yaml @@ -362,6 +361,8 @@ def get_os_codename_install_source(src): rel = '' if src is None: return rel + if src in OPENSTACK_RELEASES: + return src if src in ['distro', 'distro-proposed', 'proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] @@ -401,7 +402,7 @@ def get_os_codename_version(vers): def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): '''Determine OpenStack version number from codename.''' - for k, v in six.iteritems(version_map): + for k, v in version_map.items(): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -411,7 +412,8 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): def get_os_version_codename_swift(codename): '''Determine OpenStack version number of swift from codename.''' - for k, v in six.iteritems(SWIFT_CODENAMES): + # for k, v in six.iteritems(SWIFT_CODENAMES): + for k, v in SWIFT_CODENAMES.items(): if k == codename: return v[-1] e = 'Could not derive swift version for '\ @@ -421,17 +423,17 @@ def get_os_version_codename_swift(codename): def get_swift_codename(version): '''Determine OpenStack codename that corresponds to swift version.''' - codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] + codenames = [k for k, v in SWIFT_CODENAMES.items() if version in v] if len(codenames) > 1: # If more than one release codename contains this version we determine # the actual codename based on the highest available install source. for codename in reversed(codenames): releases = UBUNTU_OPENSTACK_RELEASE - release = [k for k, v in six.iteritems(releases) if codename in v] - ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) - if six.PY3: - ret = ret.decode('UTF-8') + release = [k for k, v in releases.items() if codename in v] + ret = (subprocess + .check_output(['apt-cache', 'policy', 'swift']) + .decode('UTF-8')) if codename in ret or release[0] in ret: return codename elif len(codenames) == 1: @@ -441,7 +443,7 @@ def get_swift_codename(version): match = re.match(r'^(\d+)\.(\d+)', version) if match: major_minor_version = match.group(0) - for codename, versions in six.iteritems(SWIFT_CODENAMES): + for codename, versions in SWIFT_CODENAMES.items(): for release_version in versions: if release_version.startswith(major_minor_version): return codename @@ -477,9 +479,7 @@ def get_os_codename_package(package, fatal=True): if snap_install_requested(): cmd = ['snap', 'list', package] try: - out = subprocess.check_output(cmd) - if six.PY3: - out = out.decode('UTF-8') + out = subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError: return None lines = out.split('\n') @@ -549,16 +549,14 @@ def get_os_version_package(pkg, fatal=True): if 'swift' in pkg: vers_map = SWIFT_CODENAMES - for cname, version in six.iteritems(vers_map): + for cname, version in vers_map.items(): if cname == codename: return version[-1] else: vers_map = OPENSTACK_CODENAMES - for version, cname in six.iteritems(vers_map): + for version, cname in vers_map.items(): if cname == codename: return version - # e = "Could not determine OpenStack version for package: %s" % pkg - # error_out(e) def get_installed_os_version(): @@ -821,10 +819,10 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): if not os.path.exists(os.path.dirname(juju_rc_path)): os.mkdir(os.path.dirname(juju_rc_path)) with open(juju_rc_path, 'wt') as rc_script: - rc_script.write( - "#!/bin/bash\n") - [rc_script.write('export %s=%s\n' % (u, p)) - for u, p in six.iteritems(env_vars) if u != "script_path"] + rc_script.write("#!/bin/bash\n") + for u, p in env_vars.items(): + if u != "script_path": + rc_script.write('export %s=%s\n' % (u, p)) def openstack_upgrade_available(package): @@ -1039,7 +1037,7 @@ def _determine_os_workload_status( state, message, lambda: charm_func(configs)) if state is None: - state, message = _ows_check_services_running(services, ports) + state, message = ows_check_services_running(services, ports) if state is None: state = 'active' @@ -1213,7 +1211,12 @@ def _ows_check_charm_func(state, message, charm_func_with_configs): return state, message +@deprecate("use ows_check_services_running() instead", "2022-05", log=juju_log) def _ows_check_services_running(services, ports): + return ows_check_services_running(services, ports) + + +def ows_check_services_running(services, ports): """Check that the services that should be running are actually running and that any ports specified are being listened to. @@ -1413,45 +1416,75 @@ def incomplete_relation_data(configs, required_interfaces): for i in incomplete_relations} -def do_action_openstack_upgrade(package, upgrade_callback, configs, - force_upgrade=False): +def do_action_openstack_upgrade(package, upgrade_callback, configs): """Perform action-managed OpenStack upgrade. Upgrades packages to the configured openstack-origin version and sets the corresponding action status as a result. - If the charm was installed from source we cannot upgrade it. For backwards compatibility a config flag (action-managed-upgrade) must be set for this code to run, otherwise a full service level upgrade will fire on config-changed. - @param package: package name for determining if upgrade available + @param package: package name for determining if openstack upgrade available @param upgrade_callback: function callback to charm's upgrade function @param configs: templating object derived from OSConfigRenderer class - @param force_upgrade: perform dist-upgrade regardless of new openstack @return: True if upgrade successful; False if upgrade failed or skipped """ ret = False - if openstack_upgrade_available(package) or force_upgrade: + if openstack_upgrade_available(package): if config('action-managed-upgrade'): juju_log('Upgrading OpenStack release') try: upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed.'}) + action_set({'outcome': 'success, upgrade completed'}) ret = True except Exception: - action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'outcome': 'upgrade failed, see traceback'}) action_set({'traceback': traceback.format_exc()}) - action_fail('do_openstack_upgrade resulted in an ' + action_fail('upgrade callback resulted in an ' 'unexpected error') else: action_set({'outcome': 'action-managed-upgrade config is ' - 'False, skipped upgrade.'}) + 'False, skipped upgrade'}) + else: + action_set({'outcome': 'no upgrade available'}) + + return ret + + +def do_action_package_upgrade(package, upgrade_callback, configs): + """Perform package upgrade within the current OpenStack release. + + Upgrades packages only if there is not an openstack upgrade available, + and sets the corresponding action status as a result. + + @param package: package name for determining if openstack upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if not openstack_upgrade_available(package): + juju_log('Upgrading packages') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed'}) + ret = True + except Exception: + action_set({'outcome': 'upgrade failed, see traceback'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('upgrade callback resulted in an ' + 'unexpected error') else: - action_set({'outcome': 'no upgrade available.'}) + action_set({'outcome': 'upgrade skipped because an openstack upgrade ' + 'is available'}) return ret @@ -1849,21 +1882,20 @@ def some_hook(...): """ def wrap(f): - # py27 compatible nonlocal variable. When py3 only, replace with - # nonlocal keyword - __restart_map_cache = {'cache': None} + __restart_map_cache = None @functools.wraps(f) def wrapped_f(*args, **kwargs): + nonlocal __restart_map_cache if is_unit_paused_set(): return f(*args, **kwargs) - if __restart_map_cache['cache'] is None: - __restart_map_cache['cache'] = restart_map() \ + if __restart_map_cache is None: + __restart_map_cache = restart_map() \ if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( (lambda: f(*args, **kwargs)), - __restart_map_cache['cache'], + __restart_map_cache, stopstart, restart_functions, can_restart_now_f, @@ -1888,7 +1920,7 @@ def ordered(orderme): raise ValueError('argument must be a dict type') result = OrderedDict() - for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): + for k, v in sorted(orderme.items(), key=lambda x: x[0]): if isinstance(v, dict): result[k] = ordered(v) else: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/python.py b/ceph-radosgw/hooks/charmhelpers/contrib/python.py index 84cba8c4..fcded680 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/python.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/python.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import - # deprecated aliases for backwards compatibility from charmhelpers.fetch.python import debug # noqa from charmhelpers.fetch.python import packages # noqa diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index c70aeb20..244b7af9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -23,7 +23,6 @@ import errno import hashlib import math -import six import os import shutil @@ -218,7 +217,7 @@ def validator(value, valid_type, valid_range=None): "was given {} of type {}" .format(valid_range, type(valid_range))) # If we're dealing with strings - if isinstance(value, six.string_types): + if isinstance(value, str): assert value in valid_range, ( "{} is not in the list {}".format(value, valid_range)) # Integer, float should have a min and max @@ -434,9 +433,9 @@ def add_cache_tier(self, cache_pool, mode): :type mode: str """ # Check the input types and values - validator(value=cache_pool, valid_type=six.string_types) + validator(value=cache_pool, valid_type=str) validator( - value=mode, valid_type=six.string_types, + value=mode, valid_type=str, valid_range=["readonly", "writeback"]) check_call([ @@ -779,9 +778,7 @@ def enabled_manager_modules(): """ cmd = ['ceph', 'mgr', 'module', 'ls'] try: - modules = check_output(cmd) - if six.PY3: - modules = modules.decode('UTF-8') + modules = check_output(cmd).decode('utf-8') except CalledProcessError as e: log("Failed to list ceph modules: {}".format(e), WARNING) return [] @@ -814,10 +811,10 @@ def get_mon_map(service): ceph command fails. """ try: - mon_status = check_output(['ceph', '--id', service, - 'mon_status', '--format=json']) - if six.PY3: - mon_status = mon_status.decode('UTF-8') + octopus_or_later = cmp_pkgrevno('ceph-common', '15.0.0') >= 0 + mon_status_cmd = 'quorum_status' if octopus_or_later else 'mon_status' + mon_status = (check_output(['ceph', '--id', service, mon_status_cmd, + '--format=json'])).decode('utf-8') try: return json.loads(mon_status) except ValueError as v: @@ -959,9 +956,7 @@ def get_erasure_profile(service, name): try: out = check_output(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', - name, '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + name, '--format=json']).decode('utf-8') return json.loads(out) except (CalledProcessError, OSError, ValueError): return None @@ -1164,8 +1159,7 @@ def create_erasure_profile(service, profile_name, 'nvme' ] - validator(erasure_plugin_name, six.string_types, - list(plugin_techniques.keys())) + validator(erasure_plugin_name, str, list(plugin_techniques.keys())) cmd = [ 'ceph', '--id', service, @@ -1176,7 +1170,7 @@ def create_erasure_profile(service, profile_name, ] if erasure_plugin_technique: - validator(erasure_plugin_technique, six.string_types, + validator(erasure_plugin_technique, str, plugin_techniques[erasure_plugin_name]) cmd.append('technique={}'.format(erasure_plugin_technique)) @@ -1189,7 +1183,7 @@ def create_erasure_profile(service, profile_name, failure_domain = 'rack' if failure_domain: - validator(failure_domain, six.string_types, failure_domains) + validator(failure_domain, str, failure_domains) # failure_domain changed in luminous if luminous_or_later: cmd.append('crush-failure-domain={}'.format(failure_domain)) @@ -1198,7 +1192,7 @@ def create_erasure_profile(service, profile_name, # device class new in luminous if luminous_or_later and device_class: - validator(device_class, six.string_types, device_classes) + validator(device_class, str, device_classes) cmd.append('crush-device-class={}'.format(device_class)) else: log('Skipping device class configuration (ceph < 12.0.0)', @@ -1213,7 +1207,7 @@ def create_erasure_profile(service, profile_name, raise ValueError("locality must be provided for lrc plugin") # LRC optional configuration if crush_locality: - validator(crush_locality, six.string_types, failure_domains) + validator(crush_locality, str, failure_domains) cmd.append('crush-locality={}'.format(crush_locality)) if erasure_plugin_name == 'shec': @@ -1241,8 +1235,8 @@ def rename_pool(service, old_name, new_name): :param new_name: Name to rename pool to. :type new_name: str """ - validator(value=old_name, valid_type=six.string_types) - validator(value=new_name, valid_type=six.string_types) + validator(value=old_name, valid_type=str) + validator(value=new_name, valid_type=str) cmd = [ 'ceph', '--id', service, @@ -1260,7 +1254,7 @@ def erasure_profile_exists(service, name): :returns: True if it exists, False otherwise. :rtype: bool """ - validator(value=name, valid_type=six.string_types) + validator(value=name, valid_type=str) try: check_call(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', @@ -1280,12 +1274,10 @@ def get_cache_mode(service, pool_name): :returns: Current cache mode. :rtype: Optional[int] """ - validator(value=service, valid_type=six.string_types) - validator(value=pool_name, valid_type=six.string_types) + validator(value=service, valid_type=str) + validator(value=pool_name, valid_type=str) out = check_output(['ceph', '--id', service, - 'osd', 'dump', '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + 'osd', 'dump', '--format=json']).decode('utf-8') try: osd_json = json.loads(out) for pool in osd_json['pools']: @@ -1299,9 +1291,8 @@ def get_cache_mode(service, pool_name): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, 'lspools']) - if six.PY3: - out = out.decode('UTF-8') + out = check_output( + ['rados', '--id', service, 'lspools']).decode('utf-8') except CalledProcessError: return False @@ -1320,13 +1311,11 @@ def get_osds(service, device_class=None): out = check_output(['ceph', '--id', service, 'osd', 'crush', 'class', 'ls-osd', device_class, - '--format=json']) + '--format=json']).decode('utf-8') else: out = check_output(['ceph', '--id', service, 'osd', 'ls', - '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + '--format=json']).decode('utf-8') return json.loads(out) @@ -1343,9 +1332,7 @@ def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]) - if six.PY3: - out = out.decode('UTF-8') + service, '--pool', pool]).decode('utf-8') except CalledProcessError: return False @@ -1371,7 +1358,7 @@ def update_pool(client, pool, settings): :raises: CalledProcessError """ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] - for k, v in six.iteritems(settings): + for k, v in settings.items(): check_call(cmd + [k, v]) @@ -1509,9 +1496,7 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']) - if six.PY3: - out = out.decode('UTF-8') + out = check_output(['rbd', 'showmapped']).decode('utf-8') except CalledProcessError: return False diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py index 74bab40e..04daea29 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -19,8 +19,6 @@ check_output, ) -import six - ################################################## # loopback device helpers. @@ -40,9 +38,7 @@ def loopback_devices(): ''' loopbacks = {} cmd = ['losetup', '-a'] - output = check_output(cmd) - if six.PY3: - output = output.decode('utf-8') + output = check_output(cmd).decode('utf-8') devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] for dev, _, f in devs: loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] @@ -57,7 +53,7 @@ def create_loopback(file_path): ''' file_path = os.path.abspath(file_path) check_call(['losetup', '--find', file_path]) - for d, f in six.iteritems(loopback_devices()): + for d, f in loopback_devices().items(): if f == file_path: return d @@ -71,7 +67,7 @@ def ensure_loopback_device(path, size): :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) ''' - for d, f in six.iteritems(loopback_devices()): + for d, f in loopback_devices().items(): if f == path: return d diff --git a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py index e94247a2..370c3e8f 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/hookenv.py +++ b/ceph-radosgw/hooks/charmhelpers/core/hookenv.py @@ -17,12 +17,11 @@ # Authors: # Charm Helpers Developers -from __future__ import print_function import copy from distutils.version import LooseVersion from enum import Enum from functools import wraps -from collections import namedtuple +from collections import namedtuple, UserDict import glob import os import json @@ -36,12 +35,6 @@ from charmhelpers import deprecate -import six -if not six.PY3: - from UserDict import UserDict -else: - from collections import UserDict - CRITICAL = "CRITICAL" ERROR = "ERROR" @@ -112,7 +105,7 @@ def log(message, level=None): command = ['juju-log'] if level: command += ['-l', level] - if not isinstance(message, six.string_types): + if not isinstance(message, str): message = repr(message) command += [message[:SH_MAX_ARG]] # Missing juju-log should not cause failures in unit tests @@ -132,7 +125,7 @@ def log(message, level=None): def function_log(message): """Write a function progress message""" command = ['function-log'] - if not isinstance(message, six.string_types): + if not isinstance(message, str): message = repr(message) command += [message[:SH_MAX_ARG]] # Missing function-log should not cause failures in unit tests @@ -445,12 +438,6 @@ def config(scope=None): """ global _cache_config config_cmd_line = ['config-get', '--all', '--format=json'] - try: - # JSON Decode Exception for Python3.5+ - exc_json = json.decoder.JSONDecodeError - except AttributeError: - # JSON Decode Exception for Python2.7 through Python3.4 - exc_json = ValueError try: if _cache_config is None: config_data = json.loads( @@ -459,7 +446,7 @@ def config(scope=None): if scope is not None: return _cache_config.get(scope) return _cache_config - except (exc_json, UnicodeDecodeError) as e: + except (json.decoder.JSONDecodeError, UnicodeDecodeError) as e: log('Unable to parse output from config-get: config_cmd_line="{}" ' 'message="{}"' .format(config_cmd_line, str(e)), level=ERROR) @@ -491,12 +478,26 @@ def relation_get(attribute=None, unit=None, rid=None, app=None): raise +@cached +def _relation_set_accepts_file(): + """Return True if the juju relation-set command accepts a file. + + Cache the result as it won't change during the execution of a hook, and + thus we can make relation_set() more efficient by only checking for the + first relation_set() call. + + :returns: True if relation_set accepts a file. + :rtype: bool + :raises: subprocess.CalledProcessError if the check fails. + """ + return "--file" in subprocess.check_output( + ["relation-set", "--help"], universal_newlines=True) + + def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] - accepts_file = "--file" in subprocess.check_output( - relation_cmd_line + ["--help"], universal_newlines=True) if app: relation_cmd_line.append('--app') if relation_id is not None: @@ -508,7 +509,7 @@ def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): # sites pass in things like dicts or numbers. if value is not None: settings[key] = "{}".format(value) - if accepts_file: + if _relation_set_accepts_file(): # --file was introduced in Juju 1.23.2. Use it by default if # available, since otherwise we'll break if the relation data is # too big. Ideally we should tell relation-set to read the data from @@ -1003,14 +1004,8 @@ def cmd_exists(cmd): @cached -@deprecate("moved to function_get()", log=log) def action_get(key=None): - """ - .. deprecated:: 0.20.7 - Alias for :func:`function_get`. - - Gets the value of an action parameter, or all key/value param pairs. - """ + """Gets the value of an action parameter, or all key/value param pairs.""" cmd = ['action-get'] if key is not None: cmd.append(key) @@ -1020,8 +1015,12 @@ def action_get(key=None): @cached +@deprecate("moved to action_get()", log=log) def function_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" + """ + .. deprecated:: + Gets the value of an action parameter, or all key/value param pairs. + """ cmd = ['function-get'] # Fallback for older charms. if not cmd_exists('function-get'): @@ -1034,22 +1033,20 @@ def function_get(key=None): return function_data -@deprecate("moved to function_set()", log=log) def action_set(values): - """ - .. deprecated:: 0.20.7 - Alias for :func:`function_set`. - - Sets the values to be returned after the action finishes. - """ + """Sets the values to be returned after the action finishes.""" cmd = ['action-set'] for k, v in list(values.items()): cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) +@deprecate("moved to action_set()", log=log) def function_set(values): - """Sets the values to be returned after the function finishes""" + """ + .. deprecated:: + Sets the values to be returned after the function finishes. + """ cmd = ['function-set'] # Fallback for older charms. if not cmd_exists('function-get'): @@ -1060,12 +1057,8 @@ def function_set(values): subprocess.check_call(cmd) -@deprecate("moved to function_fail()", log=log) def action_fail(message): """ - .. deprecated:: 0.20.7 - Alias for :func:`function_fail`. - Sets the action status to failed and sets the error message. The results set by action_set are preserved. @@ -1073,10 +1066,14 @@ def action_fail(message): subprocess.check_call(['action-fail', message]) +@deprecate("moved to action_fail()", log=log) def function_fail(message): - """Sets the function status to failed and sets the error message. + """ + .. deprecated:: + Sets the function status to failed and sets the error message. - The results set by function_set are preserved.""" + The results set by function_set are preserved. + """ cmd = ['function-fail'] # Fallback for older charms. if not cmd_exists('function-fail'): diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 994ec8a0..2b0a36fb 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -31,7 +31,6 @@ import hashlib import functools import itertools -import six from contextlib import contextmanager from collections import OrderedDict, defaultdict @@ -263,7 +262,7 @@ def service(action, service_name, **kwargs): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] - for key, value in six.iteritems(kwargs): + for key, value in kwargs.items(): parameter = '%s=%s' % (key, value) cmd.append(parameter) return subprocess.call(cmd) == 0 @@ -289,7 +288,7 @@ def service_running(service_name, **kwargs): if os.path.exists(_UPSTART_CONF.format(service_name)): try: cmd = ['status', service_name] - for key, value in six.iteritems(kwargs): + for key, value in kwargs.items(): parameter = '%s=%s' % (key, value) cmd.append(parameter) output = subprocess.check_output( @@ -564,7 +563,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) - if six.PY3 and isinstance(content, six.string_types): + if isinstance(content, str): content = content.encode('UTF-8') target.write(content) return @@ -967,7 +966,7 @@ def get_bond_master(interface): def list_nics(nic_type=None): """Return a list of nics of given type(s)""" - if isinstance(nic_type, six.string_types): + if isinstance(nic_type, str): int_types = [nic_type] else: int_types = nic_type @@ -1081,8 +1080,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): try: chown(full, uid, gid) except (IOError, OSError) as e: - # Intended to ignore "file not found". Catching both to be - # compatible with both Python 2.7 and 3.x. + # Intended to ignore "file not found". if e.errno == errno.ENOENT: pass diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/base.py b/ceph-radosgw/hooks/charmhelpers/core/services/base.py index 9f880290..7c37c65c 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/base.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/base.py @@ -17,8 +17,6 @@ import inspect from collections import Iterable, OrderedDict -import six - from charmhelpers.core import host from charmhelpers.core import hookenv @@ -171,10 +169,7 @@ def provide_data(self): if not units: continue remote_service = units[0].split('/')[0] - if six.PY2: - argspec = inspect.getargspec(provider.provide_data) - else: - argspec = inspect.getfullargspec(provider.provide_data) + argspec = inspect.getfullargspec(provider.provide_data) if len(argspec.args) > 1: data = provider.provide_data(remote_service, service_ready) else: diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py index 3e6e30d2..5bf62dd5 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/helpers.py @@ -179,7 +179,7 @@ def __init__(self, *args): self.required_options = args self['config'] = hookenv.config() with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.load(fp).get('options', {}) + self.config = yaml.safe_load(fp).get('options', {}) def __bool__(self): for option in self.required_options: @@ -227,7 +227,7 @@ def read_context(self, file_name): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'r') as file_stream: - data = yaml.load(file_stream) + data = yaml.safe_load(file_stream) if not data: raise OSError("%s is empty" % file_name) return data diff --git a/ceph-radosgw/hooks/charmhelpers/core/strutils.py b/ceph-radosgw/hooks/charmhelpers/core/strutils.py index 28c6b3f5..31366871 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/strutils.py +++ b/ceph-radosgw/hooks/charmhelpers/core/strutils.py @@ -15,7 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import re TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'} @@ -27,8 +26,8 @@ def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY Returns True if value translates to True otherwise False. """ - if isinstance(value, six.string_types): - value = six.text_type(value) + if isinstance(value, str): + value = str(value) else: msg = "Unable to interpret non-string value '%s' as boolean" % (value) raise ValueError(msg) @@ -61,8 +60,8 @@ def bytes_from_string(value): 'P': 5, 'PB': 5, } - if isinstance(value, six.string_types): - value = six.text_type(value) + if isinstance(value, str): + value = str(value) else: msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) diff --git a/ceph-radosgw/hooks/charmhelpers/core/templating.py b/ceph-radosgw/hooks/charmhelpers/core/templating.py index 9014015c..cb0213dc 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/templating.py +++ b/ceph-radosgw/hooks/charmhelpers/core/templating.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import sys from charmhelpers.core import host from charmhelpers.core import hookenv @@ -43,9 +42,8 @@ def render(source, target, context, owner='root', group='root', The rendered template will be written to the file as well as being returned as a string. - Note: Using this requires python-jinja2 or python3-jinja2; if it is not - installed, calling this will attempt to use charmhelpers.fetch.apt_install - to install it. + Note: Using this requires python3-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. """ try: from jinja2 import FileSystemLoader, Environment, exceptions @@ -57,10 +55,7 @@ def render(source, target, context, owner='root', group='root', 'charmhelpers.fetch to install it', level=hookenv.ERROR) raise - if sys.version_info.major == 2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions if template_loader: diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py index 9497ee05..1283f25b 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/__init__.py @@ -20,11 +20,7 @@ log, ) -import six -if six.PY3: - from urllib.parse import urlparse, urlunparse -else: - from urlparse import urlparse, urlunparse +from urllib.parse import urlparse, urlunparse # The order of this list is very important. Handlers should be listed in from @@ -134,14 +130,14 @@ def configure_sources(update=False, sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None - if isinstance(sources, six.string_types): + if isinstance(sources, str): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: - if isinstance(keys, six.string_types): + if isinstance(keys, str): keys = [keys] if len(sources) != len(keys): diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py index d25587ad..2cb2e88b 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py @@ -26,26 +26,15 @@ ) from charmhelpers.core.host import mkdir, check_hash -import six -if six.PY3: - from urllib.request import ( - build_opener, install_opener, urlopen, urlretrieve, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - ) - from urllib.parse import urlparse, urlunparse, parse_qs - from urllib.error import URLError -else: - from urllib import urlretrieve - from urllib2 import ( - build_opener, install_opener, urlopen, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - URLError - ) - from urlparse import urlparse, urlunparse, parse_qs +from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, +) +from urllib.parse import urlparse, urlunparse, parse_qs +from urllib.error import URLError def splituser(host): - '''urllib.splituser(), but six's support of this seems broken''' _userprog = re.compile('^(.*)@(.*)$') match = _userprog.match(host) if match: @@ -54,7 +43,6 @@ def splituser(host): def splitpasswd(user): - '''urllib.splitpasswd(), but six's support of this is missing''' _passwdprog = re.compile('^([^:]*):(.*)$', re.S) match = _passwdprog.match(user) if match: @@ -150,10 +138,7 @@ def install(self, source, dest=None, checksum=None, hash_type='sha1'): raise UnhandledSource(e.strerror) options = parse_qs(url_parts.fragment) for key, value in options.items(): - if not six.PY3: - algorithms = hashlib.algorithms - else: - algorithms = hashlib.algorithms_available + algorithms = hashlib.algorithms_available if key in algorithms: if len(value) != 1: raise TypeError( diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/centos.py b/ceph-radosgw/hooks/charmhelpers/fetch/centos.py index a91dcff0..f8492018 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/centos.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/centos.py @@ -15,7 +15,6 @@ import subprocess import os import time -import six import yum from tempfile import NamedTemporaryFile @@ -42,7 +41,7 @@ def install(packages, options=None, fatal=False): if options is not None: cmd.extend(options) cmd.append('install') - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -71,7 +70,7 @@ def update(fatal=False): def purge(packages, fatal=False): """Purge one or more packages.""" cmd = ['yum', '--assumeyes', 'remove'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -83,7 +82,7 @@ def yum_search(packages): """Search for a package.""" output = {} cmd = ['yum', 'search'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/python/debug.py b/ceph-radosgw/hooks/charmhelpers/fetch/python/debug.py index 757135ee..dd5cca80 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/python/debug.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/python/debug.py @@ -15,8 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import atexit import sys diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/python/packages.py b/ceph-radosgw/hooks/charmhelpers/fetch/python/packages.py index 60048354..93f1fa3f 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/python/packages.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/python/packages.py @@ -16,7 +16,6 @@ # limitations under the License. import os -import six import subprocess import sys @@ -40,10 +39,7 @@ def pip_execute(*args, **kwargs): from pip import main as _pip_execute except ImportError: apt_update() - if six.PY2: - apt_install('python-pip') - else: - apt_install('python3-pip') + apt_install('python3-pip') from pip import main as _pip_execute _pip_execute(*args, **kwargs) finally: @@ -140,12 +136,8 @@ def pip_list(): def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" - if six.PY2: - apt_install('python-virtualenv') - extra_flags = [] - else: - apt_install(['python3-virtualenv', 'virtualenv']) - extra_flags = ['--python=python3'] + apt_install(['python3-virtualenv', 'virtualenv']) + extra_flags = ['--python=python3'] if path: venv_path = path diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index cf8328f0..e6f8a0ad 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -13,10 +13,8 @@ # limitations under the License. from collections import OrderedDict -import os import platform import re -import six import subprocess import sys import time @@ -361,7 +359,7 @@ def apt_install(packages, options=None, fatal=False, quiet=False): cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -413,7 +411,7 @@ def apt_purge(packages, fatal=False): :raises: subprocess.CalledProcessError """ cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -440,7 +438,7 @@ def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark.""" log("Marking {} as {}".format(packages, mark)) cmd = ['apt-mark', mark] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -485,10 +483,7 @@ def import_key(key): if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and '-----END PGP PUBLIC KEY BLOCK-----' in key): log("Writing provided PGP key in the binary format", level=DEBUG) - if six.PY3: - key_bytes = key.encode('utf-8') - else: - key_bytes = key + key_bytes = key.encode('utf-8') key_name = _get_keyid_by_gpg_key(key_bytes) key_gpg = _dearmor_gpg_key(key_bytes) _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) @@ -528,9 +523,8 @@ def _get_keyid_by_gpg_key(key_material): stderr=subprocess.PIPE, stdin=subprocess.PIPE) out, err = ps.communicate(input=key_material) - if six.PY3: - out = out.decode('utf-8') - err = err.decode('utf-8') + out = out.decode('utf-8') + err = err.decode('utf-8') if 'gpg: no valid OpenPGP data found.' in err: raise GPGKeyError('Invalid GPG key material provided') # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) @@ -588,8 +582,7 @@ def _dearmor_gpg_key(key_asc): stdin=subprocess.PIPE) out, err = ps.communicate(input=key_asc) # no need to decode output as it is binary (invalid utf-8), only error - if six.PY3: - err = err.decode('utf-8') + err = err.decode('utf-8') if 'gpg: no valid OpenPGP data found.' in err: raise GPGKeyError('Invalid GPG key material. Check your network setup' ' (MTU, routing, DNS) and/or proxy server settings' @@ -693,7 +686,7 @@ def add_source(source, key=None, fail_invalid=False): ]) if source is None: source = '' - for r, fn in six.iteritems(_mapping): + for r, fn in _mapping.items(): m = re.match(r, source) if m: if key: @@ -726,7 +719,7 @@ def _add_proposed(): """ release = get_distrib_codename() arch = platform.machine() - if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): + if arch not in ARCH_TO_PROPOSED_POCKET.keys(): raise SourceConfigError("Arch {} not supported for (distro-)proposed" .format(arch)) with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: @@ -913,9 +906,8 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), kwargs = {} if quiet: - devnull = os.devnull if six.PY2 else subprocess.DEVNULL - kwargs['stdout'] = devnull - kwargs['stderr'] = devnull + kwargs['stdout'] = subprocess.DEVNULL + kwargs['stderr'] = subprocess.DEVNULL if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -957,9 +949,8 @@ def _run_apt_command(cmd, fatal=False, quiet=False): else: kwargs = {} if quiet: - devnull = os.devnull if six.PY2 else subprocess.DEVNULL - kwargs['stdout'] = devnull - kwargs['stderr'] = devnull + kwargs['stdout'] = subprocess.DEVNULL + kwargs['stderr'] = subprocess.DEVNULL subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs) @@ -989,7 +980,7 @@ def get_installed_version(package): Version object """ cache = apt_cache() - dpkg_result = cache._dpkg_list([package]).get(package, {}) + dpkg_result = cache.dpkg_list([package]).get(package, {}) current_ver = None installed_version = dpkg_result.get('version') diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index 436e1776..6da355fd 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -40,6 +40,9 @@ import subprocess import sys +from charmhelpers import deprecate +from charmhelpers.core.hookenv import log + class _container(dict): """Simple container for attributes.""" @@ -79,7 +82,7 @@ def __getitem__(self, package): apt_result = self._apt_cache_show([package])[package] apt_result['name'] = apt_result.pop('package') pkg = Package(apt_result) - dpkg_result = self._dpkg_list([package]).get(package, {}) + dpkg_result = self.dpkg_list([package]).get(package, {}) current_ver = None installed_version = dpkg_result.get('version') if installed_version: @@ -88,9 +91,29 @@ def __getitem__(self, package): pkg.architecture = dpkg_result.get('architecture') return pkg + @deprecate("use dpkg_list() instead.", "2022-05", log=log) def _dpkg_list(self, packages): + return self.dpkg_list(packages) + + def dpkg_list(self, packages): """Get data from system dpkg database for package. + Note that this method is also useful for querying package names + containing wildcards, for example + + apt_cache().dpkg_list(['nvidia-vgpu-ubuntu-*']) + + may return + + { + 'nvidia-vgpu-ubuntu-470': { + 'name': 'nvidia-vgpu-ubuntu-470', + 'version': '470.68', + 'architecture': 'amd64', + 'description': 'NVIDIA vGPU driver - version 470.68' + } + } + :param packages: Packages to get data from :type packages: List[str] :returns: Structured data about installed packages, keys like From 4bc7b89b973e530d76b55ea195434ea00722e65d Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Mon, 7 Feb 2022 15:23:19 +0100 Subject: [PATCH 2333/2699] Migrate to charmhub Change-Id: I8b998968aecf8f6d2485d4f5153211ceeac81b27 --- ceph-dashboard/build-requirements.txt | 7 +- ceph-dashboard/charmcraft.yaml | 27 +++++ ceph-dashboard/osci.yaml | 20 +--- ceph-dashboard/rename.sh | 13 +++ ceph-dashboard/tests/README.md | 19 ++++ .../tests/bundles/bionic-octopus.yaml | 101 ------------------ ceph-dashboard/tests/bundles/focal.yaml | 26 +++-- ceph-dashboard/tests/bundles/hirsute.yaml | 68 ------------ .../bundles/overlays/bionic-octopus.yaml.j2 | 5 - ceph-dashboard/tests/tests.yaml | 5 - ceph-dashboard/tox.ini | 11 +- 11 files changed, 94 insertions(+), 208 deletions(-) create mode 100644 ceph-dashboard/charmcraft.yaml create mode 100755 ceph-dashboard/rename.sh create mode 100644 ceph-dashboard/tests/README.md delete mode 100644 ceph-dashboard/tests/bundles/bionic-octopus.yaml delete mode 100644 ceph-dashboard/tests/bundles/hirsute.yaml delete mode 100644 ceph-dashboard/tests/bundles/overlays/bionic-octopus.yaml.j2 diff --git a/ceph-dashboard/build-requirements.txt b/ceph-dashboard/build-requirements.txt index 38b1a777..b6d2452f 100644 --- a/ceph-dashboard/build-requirements.txt +++ b/ceph-dashboard/build-requirements.txt @@ -1,2 +1,7 @@ +# NOTES(lourot): +# * We don't install charmcraft via pip anymore because it anyway spins up a +# container and scp the system's charmcraft snap inside it. So the charmcraft +# snap is necessary on the system anyway. +# * `tox -e build` successfully validated with charmcraft 1.2.1 + cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. -git+https://github.com/canonical/charmcraft.git@0.10.2#egg=charmcraft diff --git a/ceph-dashboard/charmcraft.yaml b/ceph-dashboard/charmcraft.yaml new file mode 100644 index 00000000..72933212 --- /dev/null +++ b/ceph-dashboard/charmcraft.yaml @@ -0,0 +1,27 @@ +type: charm + +parts: + charm: + after: + - update-certificates + charm-python-packages: + # NOTE(lourot): see + # * https://github.com/canonical/charmcraft/issues/551 + # * https://github.com/canonical/charmcraft/issues/632 + - setuptools < 58 + build-packages: + - git + + update-certificates: + plugin: nil + # See https://github.com/canonical/charmcraft/issues/658 + override-build: | + apt update + apt install -y ca-certificates + update-ca-certificates + +bases: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 diff --git a/ceph-dashboard/osci.yaml b/ceph-dashboard/osci.yaml index 2088973f..6502b8ab 100644 --- a/ceph-dashboard/osci.yaml +++ b/ceph-dashboard/osci.yaml @@ -1,11 +1,10 @@ - project: templates: - - charm-yoga-unit-jobs + - charm-unit-jobs-py38 + - charm-unit-jobs-py39 check: jobs: - - bionic-octopus - focal-octopus - - hirsute-pacific vars: needs_charm_build: true charm_build_name: ceph-dashboard @@ -14,22 +13,9 @@ name: focal-octopus parent: func-target dependencies: + - charm-build - osci-lint - - tox-py36 - tox-py38 - tox-py39 vars: tox_extra_args: focal -- job: - name: bionic-octopus - parent: func-target - dependencies: &smoke-jobs - - focal-octopus - vars: - tox_extra_args: bionic-octopus -- job: - name: hirsute-pacific - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: hirsute diff --git a/ceph-dashboard/rename.sh b/ceph-dashboard/rename.sh new file mode 100755 index 00000000..d0c35c97 --- /dev/null +++ b/ceph-dashboard/rename.sh @@ -0,0 +1,13 @@ +#!/bin/bash +charm=$(grep "charm_build_name" osci.yaml | awk '{print $2}') +echo "renaming ${charm}_*.charm to ${charm}.charm" +echo -n "pwd: " +pwd +ls -al +echo "Removing bad downloaded charm maybe?" +if [[ -e "${charm}.charm" ]]; +then + rm "${charm}.charm" +fi +echo "Renaming charm here." +mv ${charm}_*.charm ${charm}.charm diff --git a/ceph-dashboard/tests/README.md b/ceph-dashboard/tests/README.md new file mode 100644 index 00000000..31363eee --- /dev/null +++ b/ceph-dashboard/tests/README.md @@ -0,0 +1,19 @@ +# Overview + +This directory provides Zaza test definitions and bundles to verify basic +deployment functionality from the perspective of this charm, its requirements +and its features, as exercised in a subset of the full OpenStack deployment +test bundle topology. + +Run the smoke tests with: + +```bash +cd ../ +tox -e build +cd tests/ +tox -e func-smoke +``` + +For full details on functional testing of OpenStack charms please refer to +the [functional testing](https://docs.openstack.org/charm-guide/latest/reference/testing.html#functional-testing) +section of the OpenStack Charm Guide. diff --git a/ceph-dashboard/tests/bundles/bionic-octopus.yaml b/ceph-dashboard/tests/bundles/bionic-octopus.yaml deleted file mode 100644 index 9982e4e6..00000000 --- a/ceph-dashboard/tests/bundles/bionic-octopus.yaml +++ /dev/null @@ -1,101 +0,0 @@ -local_overlay_enabled: False -series: bionic -applications: - percona-cluster: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - dataset-size: 25% - max-connections: 1000 - source: cloud:bionic-ussuri - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: cloud:bionic-ussuri - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: cloud:bionic-ussuri - vault: - num_units: 1 - charm: cs:~openstack-charmers-next/vault - ceph-dashboard: - charm: ../../ceph-dashboard.charm - options: - public-hostname: 'ceph-dashboard.zaza.local' - prometheus: - charm: cs:prometheus2 - num_units: 1 - grafana: - # SSL and allow_embedding are not released into cs:grafana yet, due - # Octrober 2021 - charm: cs:~llama-charmers-next/grafana - num_units: 1 - options: - anonymous: True - install_plugins: https://storage.googleapis.com/plugins-community/vonage-status-panel/release/1.0.11/vonage-status-panel-1.0.11.zip,https://storage.googleapis.com/plugins-community/grafana-piechart-panel/release/1.6.2/grafana-piechart-panel-1.6.2.zip - install_method: snap - allow_embedding: True - telegraf: - charm: telegraf - channel: stable - options: - hostname: "{host}" - prometheus-alertmanager: - charm: cs:prometheus-alertmanager - num_units: 1 - ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw - num_units: 3 - options: - source: cloud:bionic-ussuri - ceph-fs: - charm: cs:~openstack-charmers-next/ceph-fs - num_units: 1 - options: - source: cloud:bionic-ussuri -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'percona-cluster:shared-db' - - - 'ceph-dashboard:dashboard' - - 'ceph-mon:dashboard' - - - 'ceph-dashboard:certificates' - - 'vault:certificates' - - - 'ceph-mon:prometheus' - - 'prometheus:target' - - - 'grafana:grafana-source' - - 'prometheus:grafana-source' - - - 'grafana:certificates' - - 'vault:certificates' - - - 'ceph-osd:juju-info' - - 'telegraf:juju-info' - - - 'ceph-mon:juju-info' - - 'telegraf:juju-info' - - - 'telegraf:prometheus-client' - - 'prometheus:target' - - - 'telegraf:dashboards' - - 'grafana:dashboards' - - - 'ceph-dashboard:grafana-dashboard' - - 'grafana:dashboards' - - - 'ceph-dashboard:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - - 'ceph-dashboard:prometheus' - - 'prometheus:website' - - - 'prometheus:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - 'ceph-radosgw:certificates' - - 'vault:certificates' - - - 'ceph-dashboard:radosgw-dashboard' - - 'ceph-radosgw:radosgw-user' - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' diff --git a/ceph-dashboard/tests/bundles/focal.yaml b/ceph-dashboard/tests/bundles/focal.yaml index 8a06e61f..34a95591 100644 --- a/ceph-dashboard/tests/bundles/focal.yaml +++ b/ceph-dashboard/tests/bundles/focal.yaml @@ -2,26 +2,31 @@ local_overlay_enabled: False series: focal applications: ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 6 storage: osd-devices: 'cinder,10G' options: osd-devices: '/dev/test-non-existent' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' + channel: latest/edge vault: num_units: 1 - charm: cs:~openstack-charmers-next/vault + charm: ch:vault + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster constraints: mem=3072M num_units: 3 + channel: latest/edge vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge ceph-dashboard: charm: ../../ceph-dashboard.charm options: @@ -32,7 +37,7 @@ applications: grafana: # SSL and allow_embedding are not released into cs:grafana yet, due # Octrober 2021 - charm: cs:~llama-charmers-next/grafana + charm: ch:grafana num_units: 1 options: anonymous: True @@ -48,16 +53,19 @@ applications: charm: cs:prometheus-alertmanager num_units: 1 ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw + charm: ch:ceph-radosgw num_units: 3 + channel: latest/edge ceph-fs: - charm: cs:~openstack-charmers-next/ceph-fs + charm: ch:ceph-fs num_units: 1 + channel: latest/edge ceph-iscsi: - charm: cs:~openstack-charmers-next/ceph-iscsi + charm: ch:ceph-iscsi num_units: 2 options: gateway-metadata-pool: iscsi-foo-metadata + channel: latest/edge relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' diff --git a/ceph-dashboard/tests/bundles/hirsute.yaml b/ceph-dashboard/tests/bundles/hirsute.yaml deleted file mode 100644 index 215e38e1..00000000 --- a/ceph-dashboard/tests/bundles/hirsute.yaml +++ /dev/null @@ -1,68 +0,0 @@ -local_overlay_enabled: False -series: hirsute -applications: - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - monitor-count: '3' - vault: - num_units: 1 - charm: cs:~openstack-charmers-next/vault - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - constraints: mem=3072M - num_units: 3 - vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - ceph-dashboard: - charm: ../../ceph-dashboard.charm - options: - public-hostname: 'ceph-dashboard.zaza.local' - ceph-radosgw-east: - charm: cs:~openstack-charmers-next/ceph-radosgw - num_units: 3 - options: - pool-prefix: east - region: east - ceph-radosgw-west: - charm: cs:~openstack-charmers-next/ceph-radosgw - num_units: 3 - options: - pool-prefix: west - region: west - ceph-fs: - charm: cs:~openstack-charmers-next/ceph-fs - num_units: 1 -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - 'ceph-dashboard:dashboard' - - 'ceph-mon:dashboard' - - - 'ceph-dashboard:certificates' - - 'vault:certificates' - - - 'ceph-radosgw-east:mon' - - 'ceph-mon:radosgw' - - - 'ceph-radosgw-east:certificates' - - 'vault:certificates' - - - 'ceph-dashboard:radosgw-dashboard' - - 'ceph-radosgw-east:radosgw-user' - - - 'ceph-radosgw-west:mon' - - 'ceph-mon:radosgw' - - - 'ceph-radosgw-west:certificates' - - 'vault:certificates' - - - 'ceph-dashboard:radosgw-dashboard' - - 'ceph-radosgw-west:radosgw-user' - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' diff --git a/ceph-dashboard/tests/bundles/overlays/bionic-octopus.yaml.j2 b/ceph-dashboard/tests/bundles/overlays/bionic-octopus.yaml.j2 deleted file mode 100644 index 3539227a..00000000 --- a/ceph-dashboard/tests/bundles/overlays/bionic-octopus.yaml.j2 +++ /dev/null @@ -1,5 +0,0 @@ -applications: - grafana: - options: - http_proxy: '{{ TEST_HTTP_PROXY }}' - install_plugins: {{ TEST_GRAFANA_PLUGIN_VONAGE_URL }},{{ TEST_GRAFANA_PLUGIN_PIECHART_URL }} diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml index 8434950f..163d0e94 100644 --- a/ceph-dashboard/tests/tests.yaml +++ b/ceph-dashboard/tests/tests.yaml @@ -1,8 +1,6 @@ charm_name: ceph-dasboard gate_bundles: - focal - - bionic-octopus - - hirsute smoke_bundles: - focal configure: @@ -28,6 +26,3 @@ target_deploy_status: telegraf: workload-status: active workload-status-message-prefix: Monitoring -tests_options: - force_deploy: - - hirsute diff --git a/ceph-dashboard/tox.ini b/ceph-dashboard/tox.ini index 79ddb1aa..f49c50d8 100644 --- a/ceph-dashboard/tox.ini +++ b/ceph-dashboard/tox.ini @@ -15,8 +15,12 @@ skip_missing_interpreters = False # * It is also necessary to pin virtualenv as a newer virtualenv would still # lead to fetching the latest pip in the func* tox targets, see # https://stackoverflow.com/a/38133283 +# * It is necessary to declare setuptools as a dependency otherwise tox will +# fail very early at not being able to load it. The version pinning is in +# line with `pip.sh`. requires = pip < 20.3 virtualenv < 20.0 + setuptools < 50.0.0 # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci minversion = 3.2.0 @@ -27,11 +31,12 @@ setenv = VIRTUAL_ENV={envdir} install_command = pip install {opts} {packages} commands = stestr run --slowest {posargs} -whitelist_externals = +allowlist_externals = git add-to-archive.py bash charmcraft + rename.sh ls pwd passenv = HOME TERM CS_* OS_* TEST_* @@ -109,7 +114,9 @@ commands = {posargs} basepython = python3 deps = -r{toxinidir}/build-requirements.txt commands = - charmcraft build + charmcraft clean + charmcraft -v build + {toxinidir}/rename.sh [testenv:func-noop] basepython = python3 From 04eb5534ae0a91f97d924860876125bd4f94cfa4 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 10 Feb 2022 09:30:44 +0100 Subject: [PATCH 2334/2699] Add basic info to README --- ceph-nfs/README.md | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/ceph-nfs/README.md b/ceph-nfs/README.md index 9f5a9089..23e80084 100644 --- a/ceph-nfs/README.md +++ b/ceph-nfs/README.md @@ -7,15 +7,28 @@ by CephFS. It supports Ceph Pacific and above. ## Usage -TODO: Provide high-level usage, such as required config or relations +CephNFS provides an additional service when deployed with Ceph and CephFS. +It should be related to CephMon: -## Relations + juju add-relation ceph-nfs:ceph-client ceph-mon:client -TODO: Provide any relations which are provided or required by your charm +Once all relations have settled, it is possible to create a new export: + + juju run-action --wait ceph-nfs/0 create-share name=test-share size=10 allowed-ips=10.0.0.0/24 + +The above command has creates an NFS share that is 10GB in size, and is +accessible from any machine in the 10.0.0.0-10.0.0.255 network space. To +grant access to a new network address, the `grant-access` action should be +used: -## OCI Images + juju run-action --wait ceph-nfs/0 grant-access name=test-share allowed-ips=192.168.0.10 mode=r -TODO: Include a link to the default image your charm uses +This command has granted read-only access to the named share to a specific +address: `192.168.0.1`. + +## Relations + +TODO: Provide any relations which are provided or required by your charm ## Contributing From 88979bef99cbff902b686d405909e5bc8811a330 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 10 Feb 2022 17:29:59 +0100 Subject: [PATCH 2335/2699] Fix OSCI voting on xena Change-Id: I2d1eaa2d0d69bfc5d72d8350a7f94f6a76698644 --- ceph-radosgw/osci.yaml | 50 ++++++++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/ceph-radosgw/osci.yaml b/ceph-radosgw/osci.yaml index a661541e..9f041901 100644 --- a/ceph-radosgw/osci.yaml +++ b/ceph-radosgw/osci.yaml @@ -18,62 +18,70 @@ voting: false - vault-jammy-yoga-namespaced: voting: false - - job: - name: vault-jammy-yoga_rgw + name: vault-focal-xena_rgw + parent: func-target + dependencies: + - osci-lint + - tox-py38 + - tox-py39 + vars: + tox_extra_args: vault:focal-xena +- job: + name: vault-focal-xena-namespaced parent: func-target dependencies: - osci-lint - tox-py38 - tox-py39 + vars: + tox_extra_args: vault:focal-xena-namespaced + +- job: + name: vault-jammy-yoga_rgw + parent: func-target + dependencies: + - vault-focal-xena_rgw + - vault-focal-xena-namespaced vars: tox_extra_args: vault:jammy-yoga - job: name: vault-jammy-yoga-namespaced parent: func-target dependencies: - - vault-jammy-yoga_rgw + - vault-focal-xena_rgw + - vault-focal-xena-namespaced vars: tox_extra_args: vault:jammy-yoga-namespaced - job: name: vault-impish-xena_rgw parent: func-target dependencies: - - vault-jammy-yoga_rgw + - vault-focal-xena_rgw + - vault-focal-xena-namespaced vars: tox_extra_args: vault:impish-xena - job: name: vault-impish-xena-namespaced parent: func-target dependencies: - - vault-jammy-yoga_rgw + - vault-focal-xena_rgw + - vault-focal-xena-namespaced vars: tox_extra_args: vault:impish-xena-namespaced - job: name: vault-focal-yoga_rgw parent: func-target dependencies: - - vault-jammy-yoga_rgw + - vault-focal-xena_rgw + - vault-focal-xena-namespaced vars: tox_extra_args: vault:focal-yoga - job: name: vault-focal-yoga-namespaced parent: func-target dependencies: - - vault-jammy-yoga_rgw + - vault-focal-xena_rgw + - vault-focal-xena-namespaced vars: tox_extra_args: vault:focal-yoga-namespaced -- job: - name: vault-focal-xena_rgw - parent: func-target - dependencies: - - vault-jammy-yoga_rgw - vars: - tox_extra_args: vault:focal-xena -- job: - name: vault-focal-xena-namespaced - parent: func-target - dependencies: - - vault-jammy-yoga_rgw - vars: - tox_extra_args: vault:focal-xena-namespaced From f32f631082425f817562515de63ddfc31ed6edf6 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 10 Feb 2022 09:31:53 +0100 Subject: [PATCH 2336/2699] Add support for the openstack-loadbalancer charm When CephNFS is related to the openstack-loadbalancer, all NFS connections are load-balanced through a TCP loadbalancer running with a VIP and passed back, round-robin, to the nfs-ganesha servers providing service. --- ceph-nfs/metadata.yaml | 5 +-- ceph-nfs/osci.yaml | 14 ++----- ceph-nfs/requirements.txt | 1 + ceph-nfs/src/charm.py | 41 +++++++++++++++++-- ceph-nfs/test-requirements.txt | 1 - ...{focal-octopus.yaml => focal-pacific.yaml} | 21 +++++++--- .../bundles/overlays/focal-pacific.yaml.j2 | 4 ++ 7 files changed, 64 insertions(+), 23 deletions(-) rename ceph-nfs/tests/bundles/{focal-octopus.yaml => focal-pacific.yaml} (58%) create mode 100644 ceph-nfs/tests/bundles/overlays/focal-pacific.yaml.j2 diff --git a/ceph-nfs/metadata.yaml b/ceph-nfs/metadata.yaml index 4128ea92..04ccef9c 100644 --- a/ceph-nfs/metadata.yaml +++ b/ceph-nfs/metadata.yaml @@ -19,9 +19,8 @@ extra-bindings: requires: ceph-client: interface: ceph-client - hacluster: - interface: hacluster - scope: container + loadbalancer: + interface: openstack-loadbalancer peers: cluster: interface: ceph-nfs-peer \ No newline at end of file diff --git a/ceph-nfs/osci.yaml b/ceph-nfs/osci.yaml index d55941bd..e5ee05d6 100644 --- a/ceph-nfs/osci.yaml +++ b/ceph-nfs/osci.yaml @@ -3,14 +3,13 @@ - charm-unit-jobs check: jobs: - - octopus - - pacific + - focal-pacific vars: needs_charm_build: true - charm_build_name: ceph-iscsi + charm_build_name: ceph-nfs build_type: charmcraft - job: - name: focal-octopus + name: focal-pacific parent: func-target dependencies: - osci-lint @@ -18,12 +17,5 @@ - tox-py36 - tox-py37 - tox-py38 - vars: - tox_extra_args: focal-octopus -- job: - name: focal-pacific - parent: func-target - dependencies: &smoke-jobs - - focal-octopus vars: tox_extra_args: focal-pacific diff --git a/ceph-nfs/requirements.txt b/ceph-nfs/requirements.txt index 5d99db30..ea346e73 100644 --- a/ceph-nfs/requirements.txt +++ b/ceph-nfs/requirements.txt @@ -3,3 +3,4 @@ git+https://github.com/juju/charm-helpers.git#egg=charmhelpers git+https://github.com/canonical/operator.git#egg=ops git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client git+https://github.com/ChrisMacNaughton/charm-ops-openstack.git@feature/charm-instance-to-relation-adapter#egg=ops_openstack +git+https://github.com/openstack-charmers/ops-interface-openstack-loadbalancer#egg=interface_openstack_loadbalancer diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index 0698b045..0301222d 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -27,6 +27,9 @@ import charmhelpers.core.templating as ch_templating import interface_ceph_client.ceph_client as ceph_client import interface_ceph_nfs_peer + +import interface_openstack_loadbalancer.loadbalancer as ops_lb_interface + # TODO: Add the below class functionaity to action / relations from ganesha import GaneshaNfs @@ -132,6 +135,9 @@ class CephNfsCharm( SERVICES = ['nfs-ganesha'] + LB_SERVICE_NAME = "nfs-ganesha" + NFS_PORT = 2049 + RESTART_MAP = { str(GANESHA_CONF): SERVICES, str(CEPH_CONF): SERVICES, @@ -153,6 +159,9 @@ def __init__(self, framework): self.peers = interface_ceph_nfs_peer.CephNfsPeers( self, 'cluster') + self.ingress = ops_lb_interface.OSLoadbalancerRequires( + self, + 'loadbalancer') self.adapters = CephNFSAdapters( (self.ceph_client, self.peers), contexts=(CephNFSContext(self),), @@ -181,6 +190,12 @@ def __init__(self, framework): self.framework.observe( self.peers.on.reload_nonce, self.on_reload_nonce) + self.framework.observe( + self.ingress.on.lb_relation_ready, + self._request_loadbalancer) + self.framework.observe( + self.ingress.on.lb_configured, + self.render_config) # Actions self.framework.observe( self.on.create_share_action, @@ -201,6 +216,20 @@ def __init__(self, framework): self.revoke_access_action ) + def _request_loadbalancer(self, _) -> None: + """Send request to create loadbalancer""" + self.ingress.request_loadbalancer( + self.LB_SERVICE_NAME, + self.NFS_PORT, + self.NFS_PORT, + self._get_bind_ip(), + 'tcp') + + def _get_bind_ip(self) -> str: + """Return the IP to bind the dashboard to""" + binding = self.model.get_binding('public') + return str(binding.network.ingress_address) + def config_get(self, key, default=None): """Retrieve config option. @@ -347,10 +376,16 @@ def on_reload_nonce(self, _event): def access_address(self) -> str: """Return the IP to advertise Ganesha on""" binding = self.model.get_binding('public') - if self.model.get_relation('hacluster'): - return self.config_get('vip') + ingress_address = str(binding.network.ingress_address) + if self.ingress.relations: + lb_response = self.ingress.get_frontend_data() + if lb_response: + lb_config = lb_response[self.LB_SERVICE_NAME] + return [i for d in lb_config.values() for i in d['ip']][0] + else: + return ingress_address else: - return str(binding.network.ingress_address) + return ingress_address def create_share_action(self, event): if not self.model.unit.is_leader(): diff --git a/ceph-nfs/test-requirements.txt b/ceph-nfs/test-requirements.txt index 73db69bb..4e84afc2 100644 --- a/ceph-nfs/test-requirements.txt +++ b/ceph-nfs/test-requirements.txt @@ -13,4 +13,3 @@ oslo.i18n<4.0.0 git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack pytz # workaround for 14.04 pip/tox -pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/ceph-nfs/tests/bundles/focal-octopus.yaml b/ceph-nfs/tests/bundles/focal-pacific.yaml similarity index 58% rename from ceph-nfs/tests/bundles/focal-octopus.yaml rename to ceph-nfs/tests/bundles/focal-pacific.yaml index b08c4cfa..0f2718ee 100644 --- a/ceph-nfs/tests/bundles/focal-octopus.yaml +++ b/ceph-nfs/tests/bundles/focal-pacific.yaml @@ -8,22 +8,29 @@ applications: charm: ../../ceph-nfs.charm num_units: 2 ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 storage: osd-devices: '2,10G' options: - osd-devices: '/dev/test-non-existent' + source: cloud:focal-wallaby ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' expected-osd-count: 6 + source: cloud:focal-wallaby ceph-fs: - charm: cs:~openstack-charmers-next/ceph-fs + charm: ch:ceph-fs num_units: 1 - + loadbalancer: + charm: ch:openstack-loadbalancer + num_units: 3 + hacluster: + charm: ch:hacluster + options: + cluster_count: 3 relations: - - 'ceph-mon:client' - 'ceph-nfs:ceph-client' @@ -31,3 +38,7 @@ relations: - 'ceph-mon:osd' - - 'ceph-fs' - 'ceph-mon' + - - ceph-nfs + - loadbalancer + - - 'loadbalancer:ha' + - 'hacluster:ha' diff --git a/ceph-nfs/tests/bundles/overlays/focal-pacific.yaml.j2 b/ceph-nfs/tests/bundles/overlays/focal-pacific.yaml.j2 new file mode 100644 index 00000000..fa52dfc1 --- /dev/null +++ b/ceph-nfs/tests/bundles/overlays/focal-pacific.yaml.j2 @@ -0,0 +1,4 @@ +applications: + loadbalancer: + options: + vip: '{{ TEST_VIP00 }}' From f4b4d65ae72219b213b9f0c8abc00f69771082a3 Mon Sep 17 00:00:00 2001 From: Aqsa Malik Date: Fri, 4 Feb 2022 13:06:53 +0100 Subject: [PATCH 2337/2699] Add profile-name parameter in create-pool action This change adds a profile name parameter in the create-pool action that allows a replicated pool to be created with a CRUSH profile other than the default replicated_rule. Closes-Bug: #1905573 Change-Id: Ib21ded8f4a977b4a2d57c6b6b4bb82721b12c4ea --- ceph-mon/actions/create_pool.py | 2 ++ .../contrib/storage/linux/ceph.py | 27 +++++++++++++++---- ceph-mon/unit_tests/test_ceph_ops.py | 21 +++++++++++++++ 3 files changed, 45 insertions(+), 5 deletions(-) diff --git a/ceph-mon/actions/create_pool.py b/ceph-mon/actions/create_pool.py index 40686c2f..f8faee1f 100755 --- a/ceph-mon/actions/create_pool.py +++ b/ceph-mon/actions/create_pool.py @@ -30,10 +30,12 @@ def create_pool(): try: if pool_type == "replicated": replicas = action_get("replicas") + crush_profile_name = action_get("profile-name") replicated_pool = ReplicatedPool(name=pool_name, service='admin', replicas=replicas, app_name=app_name, + profile_name=crush_profile_name, percent_data=float(percent_data), ) replicated_pool.create() diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 9a34e4b0..369699fd 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -615,7 +615,8 @@ def create(self): class ReplicatedPool(BasePool): def __init__(self, service, name=None, pg_num=None, replicas=None, - percent_data=None, app_name=None, op=None): + percent_data=None, app_name=None, op=None, + profile_name='replicated_rule'): """Initialize ReplicatedPool object. Pool information is either initialized from individual keyword @@ -632,6 +633,8 @@ def __init__(self, service, name=None, pg_num=None, replicas=None, to this replicated pool. :type replicas: int :raises: KeyError + :param profile_name: Crush Profile to use + :type profile_name: Optional[str] """ # NOTE: Do not perform initialization steps that require live data from # a running cluster here. The *Pool classes may be used for validation. @@ -646,11 +649,20 @@ def __init__(self, service, name=None, pg_num=None, replicas=None, # we will fail with KeyError if it is not provided. self.replicas = op['replicas'] self.pg_num = op.get('pg_num') + self.profile_name = op.get('crush-profile', profile_name) else: self.replicas = replicas or 2 self.pg_num = pg_num + self.profile_name = profile_name or 'replicated_rule' def _create(self): + # Validate if crush profile exists + if self.profile_name is None: + msg = ("Failed to discover crush profile named " + "{}".format(self.profile_name)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + # Do extra validation on pg_num with data from live cluster if self.pg_num: # Since the number of placement groups were specified, ensure @@ -668,12 +680,12 @@ def _create(self): '--pg-num-min={}'.format( min(AUTOSCALER_DEFAULT_PGS, self.pg_num) ), - self.name, str(self.pg_num) + self.name, str(self.pg_num), self.profile_name ] else: cmd = [ 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num) + self.name, str(self.pg_num), self.profile_name ] check_call(cmd) @@ -692,7 +704,7 @@ class ErasurePool(BasePool): def __init__(self, service, name=None, erasure_code_profile=None, percent_data=None, app_name=None, op=None, allow_ec_overwrites=False): - """Initialize ReplicatedPool object. + """Initialize ErasurePool object. Pool information is either initialized from individual keyword arguments or from a individual CephBrokerRq operation Dict. @@ -1859,7 +1871,7 @@ def _partial_build_common_op_create(self, } def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, - **kwargs): + crush_profile=None, **kwargs): """Adds an operation to create a replicated pool. Refer to docstring for ``_partial_build_common_op_create`` for @@ -1873,6 +1885,10 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, for pool. :type pg_num: int :raises: AssertionError if provided data is of invalid type/range + :param crush_profile: Name of crush profile to use. If not set the + ceph-mon unit handling the broker request will + set its default value. + :type crush_profile: Optional[str] """ if pg_num and kwargs.get('weight'): raise ValueError('pg_num and weight are mutually exclusive') @@ -1882,6 +1898,7 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, 'name': name, 'replicas': replica_count, 'pg_num': pg_num, + 'crush-profile': crush_profile } op.update(self._partial_build_common_op_create(**kwargs)) diff --git a/ceph-mon/unit_tests/test_ceph_ops.py b/ceph-mon/unit_tests/test_ceph_ops.py index 000ddbad..25e095e4 100644 --- a/ceph-mon/unit_tests/test_ceph_ops.py +++ b/ceph-mon/unit_tests/test_ceph_ops.py @@ -66,6 +66,27 @@ def test_process_requests_delete_pool(self, mock_delete_pool.assert_called_with(service='admin', name='foo') self.assertEqual(json.loads(rc), {'exit-code': 0}) + @patch('charmhelpers.contrib.storage.linux.ceph.cmp_pkgrevno') + @patch.object(broker, 'pool_exists') + @patch.object(broker.ReplicatedPool, 'create') + @patch.object(broker, 'log', lambda *args, **kwargs: None) + def test_process_requests_create_replicated_pool(self, + mock_replicated_pool, + mock_pool_exists, + mock_cmp_pkgrevno): + mock_pool_exists.return_value = False + mock_cmp_pkgrevno.return_value = 1 + reqs = json.dumps({'api-version': 1, + 'ops': [{ + 'op': 'create-pool', + 'name': 'foo', + 'replicas': 3 + }]}) + rc = broker.process_requests(reqs) + mock_pool_exists.assert_called_with(service='admin', name='foo') + mock_replicated_pool.assert_called_with() + self.assertEqual(json.loads(rc), {'exit-code': 0}) + @patch('charmhelpers.contrib.storage.linux.ceph.cmp_pkgrevno') @patch.object(broker, 'pool_exists') @patch.object(broker.ErasurePool, 'create') From 5fa60cdd1c794aa0be93ed88bf00e570619cd193 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 15 Feb 2022 15:42:48 +0000 Subject: [PATCH 2338/2699] Update to classic charms to build using charmcraft in CI This update is to ensure that the Zuul Canonical CI builds the charm before functional tests and ensure that that artifact is used for the functional tests. This is to try to ensure that the charm that gets landed to the charmhub is the same charm that was tested with. Change-Id: I56e04707036b994c52cc9cfcb19e6c37cd41309e --- ceph-radosgw/.gitignore | 1 + ceph-radosgw/build-requirements.txt | 7 +++++++ ceph-radosgw/osci.yaml | 5 +++++ ceph-radosgw/rename.sh | 13 +++++++++++++ .../tests/bundles/focal-xena-namespaced.yaml | 2 +- ceph-radosgw/tests/bundles/focal-xena.yaml | 2 +- .../tests/bundles/focal-yoga-namespaced.yaml | 2 +- ceph-radosgw/tests/bundles/focal-yoga.yaml | 2 +- .../tests/bundles/impish-xena-namespaced.yaml | 2 +- ceph-radosgw/tests/bundles/impish-xena.yaml | 2 +- .../tests/bundles/jammy-yoga-namespaced.yaml | 2 +- ceph-radosgw/tests/bundles/jammy-yoga.yaml | 2 +- ceph-radosgw/tox.ini | 12 +++++++++++- 13 files changed, 45 insertions(+), 9 deletions(-) create mode 100644 ceph-radosgw/build-requirements.txt create mode 100755 ceph-radosgw/rename.sh diff --git a/ceph-radosgw/.gitignore b/ceph-radosgw/.gitignore index 4030da5b..813cc4a8 100644 --- a/ceph-radosgw/.gitignore +++ b/ceph-radosgw/.gitignore @@ -4,6 +4,7 @@ bin .tox tags *.sw[nop] +*.charm *.pyc .idea .unit-state.db diff --git a/ceph-radosgw/build-requirements.txt b/ceph-radosgw/build-requirements.txt new file mode 100644 index 00000000..b6d2452f --- /dev/null +++ b/ceph-radosgw/build-requirements.txt @@ -0,0 +1,7 @@ +# NOTES(lourot): +# * We don't install charmcraft via pip anymore because it anyway spins up a +# container and scp the system's charmcraft snap inside it. So the charmcraft +# snap is necessary on the system anyway. +# * `tox -e build` successfully validated with charmcraft 1.2.1 + +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. diff --git a/ceph-radosgw/osci.yaml b/ceph-radosgw/osci.yaml index 9f041901..55a22c68 100644 --- a/ceph-radosgw/osci.yaml +++ b/ceph-radosgw/osci.yaml @@ -18,11 +18,16 @@ voting: false - vault-jammy-yoga-namespaced: voting: false + vars: + needs_charm_build: true + charm_build_name: ceph-radosgw + build_type: charmcraft - job: name: vault-focal-xena_rgw parent: func-target dependencies: - osci-lint + - charm-build - tox-py38 - tox-py39 vars: diff --git a/ceph-radosgw/rename.sh b/ceph-radosgw/rename.sh new file mode 100755 index 00000000..d0c35c97 --- /dev/null +++ b/ceph-radosgw/rename.sh @@ -0,0 +1,13 @@ +#!/bin/bash +charm=$(grep "charm_build_name" osci.yaml | awk '{print $2}') +echo "renaming ${charm}_*.charm to ${charm}.charm" +echo -n "pwd: " +pwd +ls -al +echo "Removing bad downloaded charm maybe?" +if [[ -e "${charm}.charm" ]]; +then + rm "${charm}.charm" +fi +echo "Renaming charm here." +mv ${charm}_*.charm ${charm}.charm diff --git a/ceph-radosgw/tests/bundles/focal-xena-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-xena-namespaced.yaml index 3adb5dcb..022d5620 100644 --- a/ceph-radosgw/tests/bundles/focal-xena-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/focal-xena-namespaced.yaml @@ -40,7 +40,7 @@ applications: channel: latest/edge ceph-radosgw: - charm: ceph-radosgw + charm: ../../ceph-radosgw.charm num_units: 1 options: source: *source diff --git a/ceph-radosgw/tests/bundles/focal-xena.yaml b/ceph-radosgw/tests/bundles/focal-xena.yaml index 49bb7845..e0a1e1c9 100644 --- a/ceph-radosgw/tests/bundles/focal-xena.yaml +++ b/ceph-radosgw/tests/bundles/focal-xena.yaml @@ -40,7 +40,7 @@ applications: channel: latest/edge ceph-radosgw: - charm: ceph-radosgw + charm: ../../ceph-radosgw.charm num_units: 1 options: source: *source diff --git a/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml index c377403f..7d05aa82 100644 --- a/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml @@ -40,7 +40,7 @@ applications: channel: latest/edge ceph-radosgw: - charm: ceph-radosgw + charm: ../../ceph-radosgw.charm num_units: 1 options: source: *source diff --git a/ceph-radosgw/tests/bundles/focal-yoga.yaml b/ceph-radosgw/tests/bundles/focal-yoga.yaml index e1ce28f8..697a9be8 100644 --- a/ceph-radosgw/tests/bundles/focal-yoga.yaml +++ b/ceph-radosgw/tests/bundles/focal-yoga.yaml @@ -40,7 +40,7 @@ applications: channel: latest/edge ceph-radosgw: - charm: ceph-radosgw + charm: ../../ceph-radosgw.charm num_units: 1 options: source: *source diff --git a/ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml b/ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml index 374aa34c..8e94f9a0 100644 --- a/ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml @@ -40,7 +40,7 @@ applications: channel: latest/edge ceph-radosgw: - charm: ceph-radosgw + charm: ../../ceph-radosgw.charm num_units: 1 options: source: *source diff --git a/ceph-radosgw/tests/bundles/impish-xena.yaml b/ceph-radosgw/tests/bundles/impish-xena.yaml index 26dd2da9..e26477e8 100644 --- a/ceph-radosgw/tests/bundles/impish-xena.yaml +++ b/ceph-radosgw/tests/bundles/impish-xena.yaml @@ -40,7 +40,7 @@ applications: channel: latest/edge ceph-radosgw: - charm: ceph-radosgw + charm: ../../ceph-radosgw.charm num_units: 1 options: source: *source diff --git a/ceph-radosgw/tests/bundles/jammy-yoga-namespaced.yaml b/ceph-radosgw/tests/bundles/jammy-yoga-namespaced.yaml index ce018839..64629ae1 100644 --- a/ceph-radosgw/tests/bundles/jammy-yoga-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/jammy-yoga-namespaced.yaml @@ -40,7 +40,7 @@ applications: channel: latest/edge ceph-radosgw: - charm: ceph-radosgw + charm: ../../ceph-radosgw.charm num_units: 1 options: source: *source diff --git a/ceph-radosgw/tests/bundles/jammy-yoga.yaml b/ceph-radosgw/tests/bundles/jammy-yoga.yaml index 63d1133a..45ae1af8 100644 --- a/ceph-radosgw/tests/bundles/jammy-yoga.yaml +++ b/ceph-radosgw/tests/bundles/jammy-yoga.yaml @@ -40,7 +40,7 @@ applications: channel: latest/edge ceph-radosgw: - charm: ceph-radosgw + charm: ../../ceph-radosgw.charm num_units: 1 options: source: *source diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 86d1e904..81fd2492 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -37,10 +37,20 @@ setenv = VIRTUAL_ENV={envdir} install_command = {toxinidir}/pip.sh install {opts} {packages} commands = stestr run --slowest {posargs} -allowlist_externals = juju +allowlist_externals = + charmcraft + rename.sh passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt +[testenv:build] +basepython = python3 +deps = -r{toxinidir}/build-requirements.txt +commands = + charmcraft clean + charmcraft -v build + {toxinidir}/rename.sh + [testenv:py35] basepython = python3.5 deps = -r{toxinidir}/requirements.txt From 408db29d616af09a56bb619cdf9a9d5ad3c909bb Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 15 Feb 2022 15:13:49 +0000 Subject: [PATCH 2339/2699] Update to classic charms to build using charmcraft in CI This update is to ensure that the Zuul Canonical CI builds the charm before functional tests and ensure that that artifact is used for the functional tests. This is to try to ensure that the charm that gets landed to the charmhub is the same charm that was tested with. Change-Id: I546438f1af6b3f779aa01e9ddd8c8ae7c3b7d063 --- ceph-mon/.gitignore | 1 + ceph-mon/build-requirements.txt | 7 ++++++ ceph-mon/osci.yaml | 4 ++++ ceph-mon/rename.sh | 13 ++++++++++ ceph-mon/tests/bundles/focal-xena.yaml | 32 ++++++++++++------------- ceph-mon/tests/bundles/focal-yoga.yaml | 32 ++++++++++++------------- ceph-mon/tests/bundles/impish-xena.yaml | 32 ++++++++++++------------- ceph-mon/tests/bundles/jammy-yoga.yaml | 32 ++++++++++++------------- ceph-mon/tox.ini | 12 +++++++++- 9 files changed, 100 insertions(+), 65 deletions(-) create mode 100644 ceph-mon/build-requirements.txt create mode 100755 ceph-mon/rename.sh diff --git a/ceph-mon/.gitignore b/ceph-mon/.gitignore index b7e47dbe..901e8bd5 100644 --- a/ceph-mon/.gitignore +++ b/ceph-mon/.gitignore @@ -4,6 +4,7 @@ bin .testrepository .tox *.sw[nop] +*.charm .idea *.pyc func-results.json diff --git a/ceph-mon/build-requirements.txt b/ceph-mon/build-requirements.txt new file mode 100644 index 00000000..b6d2452f --- /dev/null +++ b/ceph-mon/build-requirements.txt @@ -0,0 +1,7 @@ +# NOTES(lourot): +# * We don't install charmcraft via pip anymore because it anyway spins up a +# container and scp the system's charmcraft snap inside it. So the charmcraft +# snap is necessary on the system anyway. +# * `tox -e build` successfully validated with charmcraft 1.2.1 + +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. diff --git a/ceph-mon/osci.yaml b/ceph-mon/osci.yaml index da6cd318..2234cbeb 100644 --- a/ceph-mon/osci.yaml +++ b/ceph-mon/osci.yaml @@ -4,3 +4,7 @@ - charm-unit-jobs-py39 - charm-xena-functional-jobs - charm-yoga-functional-jobs + vars: + needs_charm_build: true + charm_build_name: ceph-mon + build_type: charmcraft diff --git a/ceph-mon/rename.sh b/ceph-mon/rename.sh new file mode 100755 index 00000000..d0c35c97 --- /dev/null +++ b/ceph-mon/rename.sh @@ -0,0 +1,13 @@ +#!/bin/bash +charm=$(grep "charm_build_name" osci.yaml | awk '{print $2}') +echo "renaming ${charm}_*.charm to ${charm}.charm" +echo -n "pwd: " +pwd +ls -al +echo "Removing bad downloaded charm maybe?" +if [[ -e "${charm}.charm" ]]; +then + rm "${charm}.charm" +fi +echo "Renaming charm here." +mv ${charm}_*.charm ${charm}.charm diff --git a/ceph-mon/tests/bundles/focal-xena.yaml b/ceph-mon/tests/bundles/focal-xena.yaml index 82602a31..e82685eb 100644 --- a/ceph-mon/tests/bundles/focal-xena.yaml +++ b/ceph-mon/tests/bundles/focal-xena.yaml @@ -32,19 +32,19 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga placement-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -55,7 +55,7 @@ applications: - '0' - '1' - '2' - channel: latest/edge + channel: stable/yoga ceph-osd: charm: ch:ceph-osd @@ -69,10 +69,10 @@ applications: - '3' - '4' - '5' - channel: latest/edge + channel: stable/yoga ceph-mon: - charm: ../../../ceph-mon + charm: ../../ceph-mon.charm num_units: 3 options: source: *openstack-origin @@ -89,7 +89,7 @@ applications: source: *openstack-origin to: - '9' - channel: latest/edge + channel: stable/yoga keystone: expose: True @@ -99,7 +99,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: latest/edge + channel: stable/yoga nova-compute: charm: ch:nova-compute @@ -109,7 +109,7 @@ applications: libvirt-image-backend: rbd to: - '11' - channel: latest/edge + channel: stable/yoga glance: expose: True @@ -119,7 +119,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: latest/edge + channel: stable/yoga cinder: expose: True @@ -131,11 +131,11 @@ applications: openstack-origin: *openstack-origin to: - '13' - channel: latest/edge + channel: stable/yoga cinder-ceph: charm: ch:cinder-ceph - channel: latest/edge + channel: stable/yoga nova-cloud-controller: expose: True @@ -145,7 +145,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: latest/edge + channel: stable/yoga placement: charm: ch:placement @@ -154,7 +154,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: latest/edge + channel: stable/yoga prometheus2: # Pin prometheus2 charm version Bug #1891942 diff --git a/ceph-mon/tests/bundles/focal-yoga.yaml b/ceph-mon/tests/bundles/focal-yoga.yaml index 1b264c44..1934967d 100644 --- a/ceph-mon/tests/bundles/focal-yoga.yaml +++ b/ceph-mon/tests/bundles/focal-yoga.yaml @@ -32,19 +32,19 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga placement-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -55,7 +55,7 @@ applications: - '0' - '1' - '2' - channel: latest/edge + channel: stable/yoga ceph-osd: charm: ch:ceph-osd @@ -69,10 +69,10 @@ applications: - '3' - '4' - '5' - channel: latest/edge + channel: stable/yoga ceph-mon: - charm: ../../../ceph-mon + charm: ../../ceph-mon.charm num_units: 3 options: source: *openstack-origin @@ -89,7 +89,7 @@ applications: source: *openstack-origin to: - '9' - channel: latest/edge + channel: stable/yoga keystone: expose: True @@ -99,7 +99,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: latest/edge + channel: stable/yoga nova-compute: charm: ch:nova-compute @@ -109,7 +109,7 @@ applications: libvirt-image-backend: rbd to: - '11' - channel: latest/edge + channel: stable/yoga glance: expose: True @@ -119,7 +119,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: latest/edge + channel: stable/yoga cinder: expose: True @@ -131,11 +131,11 @@ applications: openstack-origin: *openstack-origin to: - '13' - channel: latest/edge + channel: stable/yoga cinder-ceph: charm: ch:cinder-ceph - channel: latest/edge + channel: stable/yoga nova-cloud-controller: expose: True @@ -145,7 +145,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: latest/edge + channel: stable/yoga placement: charm: ch:placement @@ -154,7 +154,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: latest/edge + channel: stable/yoga prometheus2: # Pin prometheus2 charm version Bug #1891942 diff --git a/ceph-mon/tests/bundles/impish-xena.yaml b/ceph-mon/tests/bundles/impish-xena.yaml index bc43d2da..670fe090 100644 --- a/ceph-mon/tests/bundles/impish-xena.yaml +++ b/ceph-mon/tests/bundles/impish-xena.yaml @@ -33,19 +33,19 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga placement-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -56,7 +56,7 @@ applications: - '0' - '1' - '2' - channel: latest/edge + channel: stable/yoga ceph-osd: charm: ch:ceph-osd @@ -70,10 +70,10 @@ applications: - '3' - '4' - '5' - channel: latest/edge + channel: stable/yoga ceph-mon: - charm: ../../../ceph-mon + charm: ../../ceph-mon.charm num_units: 3 options: source: *openstack-origin @@ -90,7 +90,7 @@ applications: source: *openstack-origin to: - '9' - channel: latest/edge + channel: stable/yoga keystone: expose: True @@ -100,7 +100,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: latest/edge + channel: stable/yoga nova-compute: charm: ch:nova-compute @@ -110,7 +110,7 @@ applications: libvirt-image-backend: rbd to: - '11' - channel: latest/edge + channel: stable/yoga glance: expose: True @@ -120,7 +120,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: latest/edge + channel: stable/yoga cinder: expose: True @@ -132,11 +132,11 @@ applications: openstack-origin: *openstack-origin to: - '13' - channel: latest/edge + channel: stable/yoga cinder-ceph: charm: ch:cinder-ceph - channel: latest/edge + channel: stable/yoga nova-cloud-controller: expose: True @@ -146,7 +146,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: latest/edge + channel: stable/yoga placement: charm: ch:placement @@ -155,7 +155,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: latest/edge + channel: stable/yoga prometheus2: # Pin prometheus2 charm version Bug #1891942 diff --git a/ceph-mon/tests/bundles/jammy-yoga.yaml b/ceph-mon/tests/bundles/jammy-yoga.yaml index 0b98bc77..00f207ac 100644 --- a/ceph-mon/tests/bundles/jammy-yoga.yaml +++ b/ceph-mon/tests/bundles/jammy-yoga.yaml @@ -33,19 +33,19 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga placement-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: stable/yoga mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -56,7 +56,7 @@ applications: - '0' - '1' - '2' - channel: latest/edge + channel: stable/yoga ceph-osd: charm: ch:ceph-osd @@ -70,10 +70,10 @@ applications: - '3' - '4' - '5' - channel: latest/edge + channel: stable/yoga ceph-mon: - charm: ../../../ceph-mon + charm: ../../ceph-mon.charm num_units: 3 options: source: *openstack-origin @@ -90,7 +90,7 @@ applications: source: *openstack-origin to: - '9' - channel: latest/edge + channel: stable/yoga keystone: expose: True @@ -100,7 +100,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: latest/edge + channel: stable/yoga nova-compute: charm: ch:nova-compute @@ -110,7 +110,7 @@ applications: libvirt-image-backend: rbd to: - '11' - channel: latest/edge + channel: stable/yoga glance: expose: True @@ -120,7 +120,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: latest/edge + channel: stable/yoga cinder: expose: True @@ -132,11 +132,11 @@ applications: openstack-origin: *openstack-origin to: - '13' - channel: latest/edge + channel: stable/yoga cinder-ceph: charm: ch:cinder-ceph - channel: latest/edge + channel: stable/yoga nova-cloud-controller: expose: True @@ -146,7 +146,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: latest/edge + channel: stable/yoga placement: charm: ch:placement @@ -155,7 +155,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: latest/edge + channel: stable/yoga prometheus2: # Pin prometheus2 charm version Bug #1891942 diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 86d1e904..81fd2492 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -37,10 +37,20 @@ setenv = VIRTUAL_ENV={envdir} install_command = {toxinidir}/pip.sh install {opts} {packages} commands = stestr run --slowest {posargs} -allowlist_externals = juju +allowlist_externals = + charmcraft + rename.sh passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt +[testenv:build] +basepython = python3 +deps = -r{toxinidir}/build-requirements.txt +commands = + charmcraft clean + charmcraft -v build + {toxinidir}/rename.sh + [testenv:py35] basepython = python3.5 deps = -r{toxinidir}/requirements.txt From 4151a085717f99b2169e7a9427d85224c8ae0bb2 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Tue, 15 Feb 2022 15:27:51 +0000 Subject: [PATCH 2340/2699] Update to classic charms to build using charmcraft in CI This update is to ensure that the Zuul Canonical CI builds the charm before functional tests and ensure that that artifact is used for the functional tests. This is to try to ensure that the charm that gets landed to the charmhub is the same charm that was tested with. Change-Id: I83118e15ff91480370182b404b3d3b7d24b5c67c --- ceph-osd/.gitignore | 1 + ceph-osd/build-requirements.txt | 7 +++++++ ceph-osd/osci.yaml | 4 ++++ ceph-osd/rename.sh | 13 +++++++++++++ ceph-osd/requirements.txt | 3 --- ceph-osd/test-requirements.txt | 1 - ceph-osd/tests/bundles/focal-xena.yaml | 18 +++++++++--------- ceph-osd/tests/bundles/focal-yoga.yaml | 18 +++++++++--------- ceph-osd/tests/bundles/impish-xena.yaml | 18 +++++++++--------- ceph-osd/tests/bundles/jammy-yoga.yaml | 18 +++++++++--------- ceph-osd/tox.ini | 12 +++++++++++- 11 files changed, 72 insertions(+), 41 deletions(-) create mode 100644 ceph-osd/build-requirements.txt create mode 100755 ceph-osd/rename.sh diff --git a/ceph-osd/.gitignore b/ceph-osd/.gitignore index 53bc7bb1..b7937771 100644 --- a/ceph-osd/.gitignore +++ b/ceph-osd/.gitignore @@ -5,6 +5,7 @@ .stestr bin *.sw[nop] +*.charm *.pyc .unit-state.db .idea diff --git a/ceph-osd/build-requirements.txt b/ceph-osd/build-requirements.txt new file mode 100644 index 00000000..b6d2452f --- /dev/null +++ b/ceph-osd/build-requirements.txt @@ -0,0 +1,7 @@ +# NOTES(lourot): +# * We don't install charmcraft via pip anymore because it anyway spins up a +# container and scp the system's charmcraft snap inside it. So the charmcraft +# snap is necessary on the system anyway. +# * `tox -e build` successfully validated with charmcraft 1.2.1 + +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. diff --git a/ceph-osd/osci.yaml b/ceph-osd/osci.yaml index da6cd318..2326d25c 100644 --- a/ceph-osd/osci.yaml +++ b/ceph-osd/osci.yaml @@ -4,3 +4,7 @@ - charm-unit-jobs-py39 - charm-xena-functional-jobs - charm-yoga-functional-jobs + vars: + needs_charm_build: true + charm_build_name: ceph-osd + build_type: charmcraft diff --git a/ceph-osd/rename.sh b/ceph-osd/rename.sh new file mode 100755 index 00000000..d0c35c97 --- /dev/null +++ b/ceph-osd/rename.sh @@ -0,0 +1,13 @@ +#!/bin/bash +charm=$(grep "charm_build_name" osci.yaml | awk '{print $2}') +echo "renaming ${charm}_*.charm to ${charm}.charm" +echo -n "pwd: " +pwd +ls -al +echo "Removing bad downloaded charm maybe?" +if [[ -e "${charm}.charm" ]]; +then + rm "${charm}.charm" +fi +echo "Renaming charm here." +mv ${charm}_*.charm ${charm}.charm diff --git a/ceph-osd/requirements.txt b/ceph-osd/requirements.txt index 10d37185..ead6e89a 100644 --- a/ceph-osd/requirements.txt +++ b/ceph-osd/requirements.txt @@ -22,6 +22,3 @@ dnspython<2.0.0; python_version < '3.6' dnspython; python_version >= '3.6' psutil>=1.1.1,<2.0.0 - -# cffi 1.15.0 drops support for py35 -cffi==1.14.6; python_version < '3.6' diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 7ce4d17c..0aabe171 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -13,7 +13,6 @@ setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb requests>=2.18.4 -flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 # Dependency of stestr. Workaround for diff --git a/ceph-osd/tests/bundles/focal-xena.yaml b/ceph-osd/tests/bundles/focal-xena.yaml index 12b55e65..5fb13313 100644 --- a/ceph-osd/tests/bundles/focal-xena.yaml +++ b/ceph-osd/tests/bundles/focal-xena.yaml @@ -56,7 +56,7 @@ applications: channel: latest/edge ceph-osd: - charm: ../../../ceph-osd + charm: ../../ceph-osd.charm num_units: 3 storage: osd-devices: 'cinder,10G' @@ -78,7 +78,7 @@ applications: - '6' - '7' - '8' - channel: latest/edge + channel: quincy/edge rabbitmq-server: charm: ch:rabbitmq-server @@ -97,7 +97,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: latest/edge + channel: yoga/edge nova-compute: charm: ch:nova-compute @@ -106,7 +106,7 @@ applications: openstack-origin: *openstack-origin to: - '11' - channel: latest/edge + channel: yoga/edge glance: expose: True @@ -116,7 +116,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: latest/edge + channel: yoga/edge cinder: expose: True @@ -128,11 +128,11 @@ applications: glance-api-version: '2' to: - '13' - channel: latest/edge + channel: yoga/edge cinder-ceph: charm: ch:cinder-ceph - channel: latest/edge + channel: yoga/edge nova-cloud-controller: expose: True @@ -142,7 +142,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: latest/edge + channel: yoga/edge placement: charm: ch:placement @@ -151,7 +151,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: latest/edge + channel: yoga/edge relations: - - 'nova-compute:amqp' diff --git a/ceph-osd/tests/bundles/focal-yoga.yaml b/ceph-osd/tests/bundles/focal-yoga.yaml index 11bac0f8..4a8d004c 100644 --- a/ceph-osd/tests/bundles/focal-yoga.yaml +++ b/ceph-osd/tests/bundles/focal-yoga.yaml @@ -56,7 +56,7 @@ applications: channel: latest/edge ceph-osd: - charm: ../../../ceph-osd + charm: ../../ceph-osd.charm num_units: 3 storage: osd-devices: 'cinder,10G' @@ -78,7 +78,7 @@ applications: - '6' - '7' - '8' - channel: latest/edge + channel: quincy/edge rabbitmq-server: charm: ch:rabbitmq-server @@ -97,7 +97,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: latest/edge + channel: yoga/edge nova-compute: charm: ch:nova-compute @@ -106,7 +106,7 @@ applications: openstack-origin: *openstack-origin to: - '11' - channel: latest/edge + channel: yoga/edge glance: expose: True @@ -116,7 +116,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: latest/edge + channel: yoga/edge cinder: expose: True @@ -128,11 +128,11 @@ applications: glance-api-version: '2' to: - '13' - channel: latest/edge + channel: yoga/edge cinder-ceph: charm: ch:cinder-ceph - channel: latest/edge + channel: yoga/edge nova-cloud-controller: expose: True @@ -142,7 +142,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: latest/edge + channel: yoga/edge placement: charm: ch:placement @@ -151,7 +151,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: latest/edge + channel: yoga/edge relations: - - 'nova-compute:amqp' diff --git a/ceph-osd/tests/bundles/impish-xena.yaml b/ceph-osd/tests/bundles/impish-xena.yaml index aaf65aac..f49d208b 100644 --- a/ceph-osd/tests/bundles/impish-xena.yaml +++ b/ceph-osd/tests/bundles/impish-xena.yaml @@ -56,7 +56,7 @@ applications: channel: latest/edge ceph-osd: - charm: ../../../ceph-osd + charm: ../../ceph-osd.charm num_units: 3 storage: osd-devices: 'cinder,10G' @@ -78,7 +78,7 @@ applications: - '6' - '7' - '8' - channel: latest/edge + channel: quincy/edge rabbitmq-server: charm: ch:rabbitmq-server @@ -97,7 +97,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: latest/edge + channel: yoga/edge nova-compute: charm: ch:nova-compute @@ -106,7 +106,7 @@ applications: openstack-origin: *openstack-origin to: - '11' - channel: latest/edge + channel: yoga/edge glance: expose: True @@ -116,7 +116,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: latest/edge + channel: yoga/edge cinder: expose: True @@ -128,11 +128,11 @@ applications: glance-api-version: '2' to: - '13' - channel: latest/edge + channel: yoga/edge cinder-ceph: charm: ch:cinder-ceph - channel: latest/edge + channel: yoga/edge nova-cloud-controller: expose: True @@ -142,7 +142,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: latest/edge + channel: yoga/edge placement: charm: ch:placement @@ -151,7 +151,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: latest/edge + channel: yoga/edge relations: - - 'nova-compute:amqp' diff --git a/ceph-osd/tests/bundles/jammy-yoga.yaml b/ceph-osd/tests/bundles/jammy-yoga.yaml index 2374923c..77c5d22c 100644 --- a/ceph-osd/tests/bundles/jammy-yoga.yaml +++ b/ceph-osd/tests/bundles/jammy-yoga.yaml @@ -56,7 +56,7 @@ applications: channel: latest/edge ceph-osd: - charm: ../../../ceph-osd + charm: ../../ceph-osd.charm num_units: 3 storage: osd-devices: 'cinder,10G' @@ -78,7 +78,7 @@ applications: - '6' - '7' - '8' - channel: latest/edge + channel: quincy/edge rabbitmq-server: charm: ch:rabbitmq-server @@ -97,7 +97,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: latest/edge + channel: yoga/edge nova-compute: charm: ch:nova-compute @@ -106,7 +106,7 @@ applications: openstack-origin: *openstack-origin to: - '11' - channel: latest/edge + channel: yoga/edge glance: expose: True @@ -116,7 +116,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: latest/edge + channel: yoga/edge cinder: expose: True @@ -128,11 +128,11 @@ applications: glance-api-version: '2' to: - '13' - channel: latest/edge + channel: yoga/edge cinder-ceph: charm: ch:cinder-ceph - channel: latest/edge + channel: yoga/edge nova-cloud-controller: expose: True @@ -142,7 +142,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: latest/edge + channel: yoga/edge placement: charm: ch:placement @@ -151,7 +151,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: latest/edge + channel: yoga/edge relations: - - 'nova-compute:amqp' diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 86d1e904..81fd2492 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -37,10 +37,20 @@ setenv = VIRTUAL_ENV={envdir} install_command = {toxinidir}/pip.sh install {opts} {packages} commands = stestr run --slowest {posargs} -allowlist_externals = juju +allowlist_externals = + charmcraft + rename.sh passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt +[testenv:build] +basepython = python3 +deps = -r{toxinidir}/build-requirements.txt +commands = + charmcraft clean + charmcraft -v build + {toxinidir}/rename.sh + [testenv:py35] basepython = python3.5 deps = -r{toxinidir}/requirements.txt From c07b39431ddf2abbb48b46681180a9e70a821ba9 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 26 Jan 2022 12:17:56 +0000 Subject: [PATCH 2341/2699] Update to classic charms to build using charmcraft in CI This update is to ensure that the Zuul Canonical CI builds the charm before functional tests and ensure that that artifact is used for the functional tests. This is to try to ensure that the charm that gets landed to the charmhub is the same charm that was tested with. Change-Id: I17cb0cfe88be075d79d94edcdd2cea845bf78b8f Co-authored-by: Aurelien Lourot --- ceph-proxy/.gitignore | 1 + ceph-proxy/LICENSE | 202 ++++++++++++++++ ceph-proxy/build-requirements.txt | 7 + ceph-proxy/charmcraft.yaml | 28 +++ ceph-proxy/metadata.yaml | 3 - ceph-proxy/osci.yaml | 54 ++--- ceph-proxy/rename.sh | 13 ++ ceph-proxy/test-requirements.txt | 1 + ceph-proxy/tests/bundles/bionic-queens.yaml | 82 ------- ceph-proxy/tests/bundles/bionic-rocky.yaml | 99 -------- ceph-proxy/tests/bundles/bionic-stein.yaml | 99 -------- ceph-proxy/tests/bundles/bionic-train.yaml | 99 -------- ceph-proxy/tests/bundles/bionic-ussuri.yaml | 100 -------- ceph-proxy/tests/bundles/focal-ussuri-ec.yaml | 215 ------------------ ceph-proxy/tests/bundles/focal-ussuri.yaml | 186 --------------- .../tests/bundles/focal-victoria-ec.yaml | 215 ------------------ ceph-proxy/tests/bundles/focal-victoria.yaml | 186 --------------- .../tests/bundles/focal-wallaby-ec.yaml | 215 ------------------ ceph-proxy/tests/bundles/focal-wallaby.yaml | 186 --------------- ceph-proxy/tests/bundles/focal-xena-ec.yaml | 41 ++-- ceph-proxy/tests/bundles/focal-xena.yaml | 41 ++-- ceph-proxy/tests/bundles/focal-yoga-ec.yaml | 41 ++-- ceph-proxy/tests/bundles/focal-yoga.yaml | 41 ++-- .../tests/bundles/hirsute-wallaby-ec.yaml | 215 ------------------ ceph-proxy/tests/bundles/hirsute-wallaby.yaml | 186 --------------- ceph-proxy/tests/bundles/impish-xena-ec.yaml | 41 ++-- ceph-proxy/tests/bundles/impish-xena.yaml | 41 ++-- ceph-proxy/tests/bundles/jammy-yoga-ec.yaml | 41 ++-- ceph-proxy/tests/bundles/jammy-yoga.yaml | 41 ++-- ceph-proxy/tests/tests.yaml | 20 +- ceph-proxy/tox.ini | 12 +- 31 files changed, 496 insertions(+), 2256 deletions(-) create mode 100644 ceph-proxy/LICENSE create mode 100644 ceph-proxy/build-requirements.txt create mode 100644 ceph-proxy/charmcraft.yaml create mode 100755 ceph-proxy/rename.sh delete mode 100644 ceph-proxy/tests/bundles/bionic-queens.yaml delete mode 100644 ceph-proxy/tests/bundles/bionic-rocky.yaml delete mode 100644 ceph-proxy/tests/bundles/bionic-stein.yaml delete mode 100644 ceph-proxy/tests/bundles/bionic-train.yaml delete mode 100644 ceph-proxy/tests/bundles/bionic-ussuri.yaml delete mode 100644 ceph-proxy/tests/bundles/focal-ussuri-ec.yaml delete mode 100644 ceph-proxy/tests/bundles/focal-ussuri.yaml delete mode 100644 ceph-proxy/tests/bundles/focal-victoria-ec.yaml delete mode 100644 ceph-proxy/tests/bundles/focal-victoria.yaml delete mode 100644 ceph-proxy/tests/bundles/focal-wallaby-ec.yaml delete mode 100644 ceph-proxy/tests/bundles/focal-wallaby.yaml delete mode 100644 ceph-proxy/tests/bundles/hirsute-wallaby-ec.yaml delete mode 100644 ceph-proxy/tests/bundles/hirsute-wallaby.yaml diff --git a/ceph-proxy/.gitignore b/ceph-proxy/.gitignore index 9e552b19..b81658ef 100644 --- a/ceph-proxy/.gitignore +++ b/ceph-proxy/.gitignore @@ -3,6 +3,7 @@ bin .testrepository .tox *.sw[nop] +*.charm *.pyc .unit-state.db .stestr diff --git a/ceph-proxy/LICENSE b/ceph-proxy/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/ceph-proxy/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ceph-proxy/build-requirements.txt b/ceph-proxy/build-requirements.txt new file mode 100644 index 00000000..b6d2452f --- /dev/null +++ b/ceph-proxy/build-requirements.txt @@ -0,0 +1,7 @@ +# NOTES(lourot): +# * We don't install charmcraft via pip anymore because it anyway spins up a +# container and scp the system's charmcraft snap inside it. So the charmcraft +# snap is necessary on the system anyway. +# * `tox -e build` successfully validated with charmcraft 1.2.1 + +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. diff --git a/ceph-proxy/charmcraft.yaml b/ceph-proxy/charmcraft.yaml new file mode 100644 index 00000000..11d5f7cd --- /dev/null +++ b/ceph-proxy/charmcraft.yaml @@ -0,0 +1,28 @@ +type: charm + +parts: + charm: + plugin: dump + source: . + prime: + - actions/* + - charmhelpers/* + - files/* + - hooks/* + - lib/* + - templates/* + - actions.yaml + - config.yaml + - copyright + - hardening.yaml + - icon.svg + - LICENSE + - Makefile + - metadata.yaml + - README.md + +bases: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 3eeb1b52..34e02da9 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -10,10 +10,7 @@ tags: - file-servers - misc series: -- bionic - focal -- groovy -- hirsute - impish extra-bindings: public: diff --git a/ceph-proxy/osci.yaml b/ceph-proxy/osci.yaml index 335b04d7..9b8d20da 100644 --- a/ceph-proxy/osci.yaml +++ b/ceph-proxy/osci.yaml @@ -1,72 +1,50 @@ - project: templates: - - charm-yoga-unit-jobs + - charm-unit-jobs-py38 + - charm-unit-jobs-py39 - charm-yoga-functional-jobs - charm-xena-functional-jobs - - charm-wallaby-functional-jobs - - charm-victoria-functional-jobs - - charm-ussuri-functional-jobs - - charm-stein-functional-jobs - - charm-queens-functional-jobs check: jobs: - - focal-ussuri-ec - - focal-victoria-ec - - focal-wallaby-ec - focal-xena-ec - focal-yoga-ec: voting: false - - hirsute-wallaby-ec - impish-xena-ec: voting: false - jammy-yoga-ec: voting: false -- job: - name: focal-ussuri-ec - parent: func-target - dependencies: &smoke-jobs - - bionic-ussuri - vars: - tox_extra_args: erasure-coded:focal-ussuri-ec -- job: - name: focal-victoria-ec - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: erasure-coded:focal-victoria-ec -- job: - name: focal-wallaby-ec - parent: func-target - dependencies: *smoke-jobs vars: - tox_extra_args: erasure-coded:focal-wallaby-ec + needs_charm_build: true + charm_build_name: ceph-proxy + build_type: charmcraft - job: name: focal-xena-ec parent: func-target - dependencies: *smoke-jobs + dependencies: + - osci-lint + - charm-build + - tox-py38 + - tox-py39 vars: tox_extra_args: erasure-coded:focal-xena-ec - job: name: focal-yoga-ec parent: func-target - dependencies: *smoke-jobs + dependencies: + - focal-xena-ec vars: tox_extra_args: erasure-coded:focal-yoga-ec -- job: - name: hirsute-wallaby-ec - parent: func-target - dependencies: *smoke-jobs - vars: - tox_extra_args: erasure-coded:hirsute-wallaby-ec - job: name: impish-xena-ec parent: func-target - dependencies: *smoke-jobs + dependencies: + - focal-xena-ec vars: tox_extra_args: erasure-coded:impish-xena-ec - job: name: jammy-yoga-ec parent: func-target - dependencies: *smoke-jobs + dependencies: + - focal-xena-ec vars: tox_extra_args: erasure-coded:jammy-yoga-ec diff --git a/ceph-proxy/rename.sh b/ceph-proxy/rename.sh new file mode 100755 index 00000000..d0c35c97 --- /dev/null +++ b/ceph-proxy/rename.sh @@ -0,0 +1,13 @@ +#!/bin/bash +charm=$(grep "charm_build_name" osci.yaml | awk '{print $2}') +echo "renaming ${charm}_*.charm to ${charm}.charm" +echo -n "pwd: " +pwd +ls -al +echo "Removing bad downloaded charm maybe?" +if [[ -e "${charm}.charm" ]]; +then + rm "${charm}.charm" +fi +echo "Renaming charm here." +mv ${charm}_*.charm ${charm}.charm diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 0d12fc2a..0aabe171 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -12,6 +12,7 @@ cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 requests>=2.18.4 + stestr>=2.2.0 # Dependency of stestr. Workaround for diff --git a/ceph-proxy/tests/bundles/bionic-queens.yaml b/ceph-proxy/tests/bundles/bionic-queens.yaml deleted file mode 100644 index c6ec26e9..00000000 --- a/ceph-proxy/tests/bundles/bionic-queens.yaml +++ /dev/null @@ -1,82 +0,0 @@ -series: bionic -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - cinder: - charm: 'cs:~openstack-charmers-next/cinder' - num_units: 1 - options: - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - cinder-ceph: - charm: 'cs:~openstack-charmers-next/cinder-ceph' - options: - restrict-ceph-pools: True - keystone: - charm: 'cs:~openstack-charmers-next/keystone' - num_units: 1 - constraints: mem=1024 - percona-cluster: - charm: 'cs:~openstack-charmers-next/percona-cluster' - num_units: 1 - options: - dataset-size: 50% - max-connections: 1000 - innodb-buffer-pool-size: 256M - root-password: ChangeMe123 - sst-password: ChangeMe123 - constraints: mem=4096 - rabbitmq-server: - charm: 'cs:~openstack-charmers-next/rabbitmq-server' - num_units: 1 - constraints: mem=1024 -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - 'cinder:shared-db' - - 'percona-cluster:shared-db' - - - 'keystone:shared-db' - - 'percona-cluster:shared-db' - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - 'glance:image-service' - - 'nova-compute:image-service' - - - 'glance:identity-service' - - 'keystone:identity-service' - - - 'glance:shared-db' - - 'percona-cluster:shared-db' - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/bionic-rocky.yaml b/ceph-proxy/tests/bundles/bionic-rocky.yaml deleted file mode 100644 index a71711c7..00000000 --- a/ceph-proxy/tests/bundles/bionic-rocky.yaml +++ /dev/null @@ -1,99 +0,0 @@ -series: bionic -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-rocky - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - options: - source: cloud:bionic-rocky - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - options: - source: cloud:bionic-rocky - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - options: - source: cloud:bionic-rocky - cinder: - charm: 'cs:~openstack-charmers-next/cinder' - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - cinder-ceph: - charm: 'cs:~openstack-charmers-next/cinder-ceph' - options: - restrict-ceph-pools: True - keystone: - charm: 'cs:~openstack-charmers-next/keystone' - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - constraints: mem=1024 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - percona-cluster: - charm: 'cs:~openstack-charmers-next/percona-cluster' - num_units: 1 - options: - source: cloud:bionic-rocky - dataset-size: 50% - max-connections: 1000 - innodb-buffer-pool-size: 256M - root-password: ChangeMe123 - sst-password: ChangeMe123 - constraints: mem=4096 - rabbitmq-server: - charm: 'cs:~openstack-charmers-next/rabbitmq-server' - num_units: 1 - constraints: mem=1024 - options: - source: cloud:bionic-rocky -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - 'cinder:shared-db' - - 'percona-cluster:shared-db' - - - 'keystone:shared-db' - - 'percona-cluster:shared-db' - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - 'glance:image-service' - - 'nova-compute:image-service' - - - 'glance:identity-service' - - 'keystone:identity-service' - - - 'glance:shared-db' - - 'percona-cluster:shared-db' - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/bionic-stein.yaml b/ceph-proxy/tests/bundles/bionic-stein.yaml deleted file mode 100644 index 2c1f5359..00000000 --- a/ceph-proxy/tests/bundles/bionic-stein.yaml +++ /dev/null @@ -1,99 +0,0 @@ -series: bionic -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-stein - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - options: - source: cloud:bionic-stein - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - options: - source: cloud:bionic-stein - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - options: - source: cloud:bionic-stein - cinder: - charm: 'cs:~openstack-charmers-next/cinder' - num_units: 1 - options: - openstack-origin: cloud:bionic-stein - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - cinder-ceph: - charm: 'cs:~openstack-charmers-next/cinder-ceph' - options: - restrict-ceph-pools: True - keystone: - charm: 'cs:~openstack-charmers-next/keystone' - num_units: 1 - options: - openstack-origin: cloud:bionic-stein - constraints: mem=1024 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-stein - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-stein - percona-cluster: - charm: 'cs:~openstack-charmers-next/percona-cluster' - num_units: 1 - options: - source: cloud:bionic-stein - dataset-size: 50% - max-connections: 1000 - innodb-buffer-pool-size: 256M - root-password: ChangeMe123 - sst-password: ChangeMe123 - constraints: mem=4096 - rabbitmq-server: - charm: 'cs:~openstack-charmers-next/rabbitmq-server' - num_units: 1 - constraints: mem=1024 - options: - source: cloud:bionic-stein -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - 'cinder:shared-db' - - 'percona-cluster:shared-db' - - - 'keystone:shared-db' - - 'percona-cluster:shared-db' - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - 'glance:image-service' - - 'nova-compute:image-service' - - - 'glance:identity-service' - - 'keystone:identity-service' - - - 'glance:shared-db' - - 'percona-cluster:shared-db' - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/bionic-train.yaml b/ceph-proxy/tests/bundles/bionic-train.yaml deleted file mode 100644 index fd891bd6..00000000 --- a/ceph-proxy/tests/bundles/bionic-train.yaml +++ /dev/null @@ -1,99 +0,0 @@ -series: bionic -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-train - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - options: - source: cloud:bionic-train - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - options: - source: cloud:bionic-train - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - options: - source: cloud:bionic-train - cinder: - charm: 'cs:~openstack-charmers-next/cinder' - num_units: 1 - options: - openstack-origin: cloud:bionic-train - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - cinder-ceph: - charm: 'cs:~openstack-charmers-next/cinder-ceph' - options: - restrict-ceph-pools: True - keystone: - charm: 'cs:~openstack-charmers-next/keystone' - num_units: 1 - options: - openstack-origin: cloud:bionic-train - constraints: mem=1024 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-train - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-train - percona-cluster: - charm: 'cs:~openstack-charmers-next/percona-cluster' - num_units: 1 - options: - source: cloud:bionic-train - dataset-size: 50% - max-connections: 1000 - innodb-buffer-pool-size: 256M - root-password: ChangeMe123 - sst-password: ChangeMe123 - constraints: mem=4096 - rabbitmq-server: - charm: 'cs:~openstack-charmers-next/rabbitmq-server' - num_units: 1 - constraints: mem=1024 - options: - source: cloud:bionic-train -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - 'cinder:shared-db' - - 'percona-cluster:shared-db' - - - 'keystone:shared-db' - - 'percona-cluster:shared-db' - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - 'glance:image-service' - - 'nova-compute:image-service' - - - 'glance:identity-service' - - 'keystone:identity-service' - - - 'glance:shared-db' - - 'percona-cluster:shared-db' - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/bionic-ussuri.yaml b/ceph-proxy/tests/bundles/bionic-ussuri.yaml deleted file mode 100644 index 33e2c0e9..00000000 --- a/ceph-proxy/tests/bundles/bionic-ussuri.yaml +++ /dev/null @@ -1,100 +0,0 @@ -series: bionic -applications: - ceph-mon: - charm: 'cs:~openstack-charmers-next/ceph-mon' - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-ussuri - ceph-osd: - charm: 'cs:~openstack-charmers-next/ceph-osd' - num_units: 3 - storage: - osd-devices: 10G - options: - source: cloud:bionic-ussuri - ceph-proxy: - charm: 'ceph-proxy' - num_units: 1 - options: - source: cloud:bionic-ussuri - ceph-radosgw: - charm: 'cs:~openstack-charmers-next/ceph-radosgw' - num_units: 1 - options: - source: cloud:bionic-ussuri - cinder: - charm: 'cs:~openstack-charmers-next/cinder' - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - cinder-ceph: - charm: 'cs:~openstack-charmers-next/cinder-ceph' - options: - restrict-ceph-pools: True - keystone: - charm: 'cs:~openstack-charmers-next/keystone' - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - admin-password: openstack - constraints: mem=1024 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - percona-cluster: - charm: 'cs:~openstack-charmers-next/percona-cluster' - num_units: 1 - options: - source: cloud:bionic-ussuri - dataset-size: 50% - max-connections: 1000 - innodb-buffer-pool-size: 256M - root-password: ChangeMe123 - sst-password: ChangeMe123 - constraints: mem=4096 - rabbitmq-server: - charm: 'cs:~openstack-charmers-next/rabbitmq-server' - num_units: 1 - constraints: mem=1024 - options: - source: cloud:bionic-ussuri -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - 'cinder:shared-db' - - 'percona-cluster:shared-db' - - - 'keystone:shared-db' - - 'percona-cluster:shared-db' - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - 'glance:image-service' - - 'nova-compute:image-service' - - - 'glance:identity-service' - - 'keystone:identity-service' - - - 'glance:shared-db' - - 'percona-cluster:shared-db' - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/focal-ussuri-ec.yaml b/ceph-proxy/tests/bundles/focal-ussuri-ec.yaml deleted file mode 100644 index 100fe81c..00000000 --- a/ceph-proxy/tests/bundles/focal-ussuri-ec.yaml +++ /dev/null @@ -1,215 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': - -applications: - - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - '16' - - '17' - - '18' - - ceph-proxy: - charm: ceph-proxy - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '10' - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - options: - restrict-ceph-pools: True - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: lrc - ec-profile-locality: 3 - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - constraints: mem=1024 - options: - source: *openstack-origin - to: - - '13' - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: jerasure - to: - - '14' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: isa - libvirt-image-backend: rbd - to: - - '15' - - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:ceph' - - 'ceph-proxy:client' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:ceph' - - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/focal-ussuri.yaml b/ceph-proxy/tests/bundles/focal-ussuri.yaml deleted file mode 100644 index d917b1c9..00000000 --- a/ceph-proxy/tests/bundles/focal-ussuri.yaml +++ /dev/null @@ -1,186 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - -applications: - - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - ceph-proxy: - charm: ceph-proxy - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - to: - - '10' - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - options: - restrict-ceph-pools: True - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - constraints: mem=1024 - options: - source: *openstack-origin - to: - - '13' - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/focal-victoria-ec.yaml b/ceph-proxy/tests/bundles/focal-victoria-ec.yaml deleted file mode 100644 index 25f015fd..00000000 --- a/ceph-proxy/tests/bundles/focal-victoria-ec.yaml +++ /dev/null @@ -1,215 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-victoria - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': - -applications: - - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - '16' - - '17' - - '18' - - ceph-proxy: - charm: ceph-proxy - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '10' - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - options: - restrict-ceph-pools: True - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: lrc - ec-profile-locality: 3 - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - constraints: mem=1024 - options: - source: *openstack-origin - to: - - '13' - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: jerasure - to: - - '14' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: isa - libvirt-image-backend: rbd - to: - - '15' - - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:ceph' - - 'ceph-proxy:client' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:ceph' - - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/focal-victoria.yaml b/ceph-proxy/tests/bundles/focal-victoria.yaml deleted file mode 100644 index da9782f4..00000000 --- a/ceph-proxy/tests/bundles/focal-victoria.yaml +++ /dev/null @@ -1,186 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-victoria - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - -applications: - - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - ceph-proxy: - charm: ceph-proxy - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - to: - - '10' - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - options: - restrict-ceph-pools: True - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - constraints: mem=1024 - options: - source: *openstack-origin - to: - - '13' - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/focal-wallaby-ec.yaml b/ceph-proxy/tests/bundles/focal-wallaby-ec.yaml deleted file mode 100644 index cd693777..00000000 --- a/ceph-proxy/tests/bundles/focal-wallaby-ec.yaml +++ /dev/null @@ -1,215 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-wallaby - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': - -applications: - - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - '16' - - '17' - - '18' - - ceph-proxy: - charm: ceph-proxy - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '10' - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - options: - restrict-ceph-pools: True - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: lrc - ec-profile-locality: 3 - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - constraints: mem=1024 - options: - source: *openstack-origin - to: - - '13' - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: jerasure - to: - - '14' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: isa - libvirt-image-backend: rbd - to: - - '15' - - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:ceph' - - 'ceph-proxy:client' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:ceph' - - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/focal-wallaby.yaml b/ceph-proxy/tests/bundles/focal-wallaby.yaml deleted file mode 100644 index 203f1249..00000000 --- a/ceph-proxy/tests/bundles/focal-wallaby.yaml +++ /dev/null @@ -1,186 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-wallaby - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - -applications: - - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - ceph-proxy: - charm: ceph-proxy - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - to: - - '10' - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - options: - restrict-ceph-pools: True - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - constraints: mem=1024 - options: - source: *openstack-origin - to: - - '13' - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/focal-xena-ec.yaml b/ceph-proxy/tests/bundles/focal-xena-ec.yaml index f53e21e8..d5632805 100644 --- a/ceph-proxy/tests/bundles/focal-xena-ec.yaml +++ b/ceph-proxy/tests/bundles/focal-xena-ec.yaml @@ -32,14 +32,17 @@ machines: applications: cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -47,9 +50,10 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 @@ -58,9 +62,10 @@ applications: - '3' - '4' - '5' + channel: quincy/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 6 storage: osd-devices: 10G @@ -73,9 +78,10 @@ applications: - '16' - '17' - '18' + channel: quincy/edge ceph-proxy: - charm: ceph-proxy + charm: ../../ceph-proxy.charm num_units: 1 options: source: *openstack-origin @@ -83,7 +89,7 @@ applications: - '9' ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw + charm: ch:ceph-radosgw num_units: 1 options: source: *openstack-origin @@ -92,9 +98,10 @@ applications: ec-profile-m: 2 to: - '10' + channel: quincy/edge cinder: - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: openstack-origin: *openstack-origin @@ -105,9 +112,10 @@ applications: constraints: mem=2048 to: - '11' + channel: yoga/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph options: restrict-ceph-pools: True pool-type: erasure-coded @@ -115,9 +123,10 @@ applications: ec-profile-m: 2 ec-profile-plugin: lrc ec-profile-locality: 3 + channel: yoga/edge keystone: - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin @@ -125,18 +134,20 @@ applications: constraints: mem=1024 to: - '12' + channel: yoga/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 options: source: *openstack-origin to: - '13' + channel: latest/edge glance: - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin @@ -146,9 +157,10 @@ applications: ec-profile-plugin: jerasure to: - '14' + channel: yoga/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin @@ -159,6 +171,7 @@ applications: libvirt-image-backend: rbd to: - '15' + channel: yoga/edge relations: diff --git a/ceph-proxy/tests/bundles/focal-xena.yaml b/ceph-proxy/tests/bundles/focal-xena.yaml index 225a9489..f9b5c376 100644 --- a/ceph-proxy/tests/bundles/focal-xena.yaml +++ b/ceph-proxy/tests/bundles/focal-xena.yaml @@ -29,14 +29,17 @@ machines: applications: cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -44,9 +47,10 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 @@ -55,9 +59,10 @@ applications: - '3' - '4' - '5' + channel: quincy/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 storage: osd-devices: 10G @@ -67,9 +72,10 @@ applications: - '6' - '7' - '8' + channel: quincy/edge ceph-proxy: - charm: ceph-proxy + charm: ../../ceph-proxy.charm num_units: 1 options: source: *openstack-origin @@ -77,15 +83,16 @@ applications: - '9' ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw + charm: ch:ceph-radosgw num_units: 1 options: source: *openstack-origin to: - '10' + channel: quincy/edge cinder: - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: openstack-origin: *openstack-origin @@ -96,14 +103,16 @@ applications: constraints: mem=2048 to: - '11' + channel: yoga/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph options: restrict-ceph-pools: True + channel: yoga/edge keystone: - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin @@ -111,31 +120,35 @@ applications: constraints: mem=1024 to: - '12' + channel: yoga/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 options: source: *openstack-origin to: - '13' + channel: latest/edge glance: - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin to: - '14' + channel: yoga/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin to: - '15' + channel: yoga/edge relations: diff --git a/ceph-proxy/tests/bundles/focal-yoga-ec.yaml b/ceph-proxy/tests/bundles/focal-yoga-ec.yaml index e60614da..c9cd5b4e 100644 --- a/ceph-proxy/tests/bundles/focal-yoga-ec.yaml +++ b/ceph-proxy/tests/bundles/focal-yoga-ec.yaml @@ -32,14 +32,17 @@ machines: applications: cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -47,9 +50,10 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 @@ -58,9 +62,10 @@ applications: - '3' - '4' - '5' + channel: quincy/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 6 storage: osd-devices: 10G @@ -73,9 +78,10 @@ applications: - '16' - '17' - '18' + channel: quincy/edge ceph-proxy: - charm: ceph-proxy + charm: ../../ceph-proxy.charm num_units: 1 options: source: *openstack-origin @@ -83,7 +89,7 @@ applications: - '9' ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw + charm: ch:ceph-radosgw num_units: 1 options: source: *openstack-origin @@ -92,9 +98,10 @@ applications: ec-profile-m: 2 to: - '10' + channel: quincy/edge cinder: - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: openstack-origin: *openstack-origin @@ -105,9 +112,10 @@ applications: constraints: mem=2048 to: - '11' + channel: yoga/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph options: restrict-ceph-pools: True pool-type: erasure-coded @@ -115,9 +123,10 @@ applications: ec-profile-m: 2 ec-profile-plugin: lrc ec-profile-locality: 3 + channel: yoga/edge keystone: - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin @@ -125,18 +134,20 @@ applications: constraints: mem=1024 to: - '12' + channel: yoga/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 options: source: *openstack-origin to: - '13' + channel: latest/edge glance: - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin @@ -146,9 +157,10 @@ applications: ec-profile-plugin: jerasure to: - '14' + channel: yoga/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin @@ -159,6 +171,7 @@ applications: libvirt-image-backend: rbd to: - '15' + channel: yoga/edge relations: diff --git a/ceph-proxy/tests/bundles/focal-yoga.yaml b/ceph-proxy/tests/bundles/focal-yoga.yaml index f4d6fb6d..b6315472 100644 --- a/ceph-proxy/tests/bundles/focal-yoga.yaml +++ b/ceph-proxy/tests/bundles/focal-yoga.yaml @@ -29,14 +29,17 @@ machines: applications: cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -44,9 +47,10 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 @@ -55,9 +59,10 @@ applications: - '3' - '4' - '5' + channel: quincy/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 storage: osd-devices: 10G @@ -67,9 +72,10 @@ applications: - '6' - '7' - '8' + channel: quincy/edge ceph-proxy: - charm: ceph-proxy + charm: ../../ceph-proxy.charm num_units: 1 options: source: *openstack-origin @@ -77,15 +83,16 @@ applications: - '9' ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw + charm: ch:ceph-radosgw num_units: 1 options: source: *openstack-origin to: - '10' + channel: quincy/edge cinder: - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: openstack-origin: *openstack-origin @@ -96,14 +103,16 @@ applications: constraints: mem=2048 to: - '11' + channel: yoga/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph options: restrict-ceph-pools: True + channel: yoga/edge keystone: - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin @@ -111,31 +120,35 @@ applications: constraints: mem=1024 to: - '12' + channel: yoga/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 options: source: *openstack-origin to: - '13' + channel: latest/edge glance: - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin to: - '14' + channel: yoga/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin to: - '15' + channel: yoga/edge relations: diff --git a/ceph-proxy/tests/bundles/hirsute-wallaby-ec.yaml b/ceph-proxy/tests/bundles/hirsute-wallaby-ec.yaml deleted file mode 100644 index 8cce6d30..00000000 --- a/ceph-proxy/tests/bundles/hirsute-wallaby-ec.yaml +++ /dev/null @@ -1,215 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: hirsute - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': - -applications: - - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 6 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - '16' - - '17' - - '18' - - ceph-proxy: - charm: ceph-proxy - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '10' - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - options: - restrict-ceph-pools: True - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: lrc - ec-profile-locality: 3 - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - constraints: mem=1024 - options: - source: *openstack-origin - to: - - '13' - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: jerasure - to: - - '14' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: isa - libvirt-image-backend: rbd - to: - - '15' - - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:ceph' - - 'ceph-proxy:client' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:ceph' - - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/hirsute-wallaby.yaml b/ceph-proxy/tests/bundles/hirsute-wallaby.yaml deleted file mode 100644 index 43b5a1f7..00000000 --- a/ceph-proxy/tests/bundles/hirsute-wallaby.yaml +++ /dev/null @@ -1,186 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: hirsute - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - -applications: - - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - ceph-proxy: - charm: ceph-proxy - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - to: - - '10' - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - options: - restrict-ceph-pools: True - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - constraints: mem=1024 - options: - source: *openstack-origin - to: - - '13' - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/impish-xena-ec.yaml b/ceph-proxy/tests/bundles/impish-xena-ec.yaml index ad864ab9..ff4096f6 100644 --- a/ceph-proxy/tests/bundles/impish-xena-ec.yaml +++ b/ceph-proxy/tests/bundles/impish-xena-ec.yaml @@ -32,14 +32,17 @@ machines: applications: cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -47,9 +50,10 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 @@ -58,9 +62,10 @@ applications: - '3' - '4' - '5' + channel: quincy/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 6 storage: osd-devices: 10G @@ -73,9 +78,10 @@ applications: - '16' - '17' - '18' + channel: quincy/edge ceph-proxy: - charm: ceph-proxy + charm: ../../ceph-proxy.charm num_units: 1 options: source: *openstack-origin @@ -83,7 +89,7 @@ applications: - '9' ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw + charm: ch:ceph-radosgw num_units: 1 options: source: *openstack-origin @@ -92,9 +98,10 @@ applications: ec-profile-m: 2 to: - '10' + channel: quincy/edge cinder: - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: openstack-origin: *openstack-origin @@ -105,9 +112,10 @@ applications: constraints: mem=2048 to: - '11' + channel: yoga/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph options: restrict-ceph-pools: True pool-type: erasure-coded @@ -115,9 +123,10 @@ applications: ec-profile-m: 2 ec-profile-plugin: lrc ec-profile-locality: 3 + channel: yoga/edge keystone: - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin @@ -125,18 +134,20 @@ applications: constraints: mem=1024 to: - '12' + channel: yoga/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 options: source: *openstack-origin to: - '13' + channel: latest/edge glance: - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin @@ -146,9 +157,10 @@ applications: ec-profile-plugin: jerasure to: - '14' + channel: yoga/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin @@ -159,6 +171,7 @@ applications: libvirt-image-backend: rbd to: - '15' + channel: yoga/edge relations: diff --git a/ceph-proxy/tests/bundles/impish-xena.yaml b/ceph-proxy/tests/bundles/impish-xena.yaml index 56508086..0710d61b 100644 --- a/ceph-proxy/tests/bundles/impish-xena.yaml +++ b/ceph-proxy/tests/bundles/impish-xena.yaml @@ -29,14 +29,17 @@ machines: applications: cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -44,9 +47,10 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 @@ -55,9 +59,10 @@ applications: - '3' - '4' - '5' + channel: quincy/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 storage: osd-devices: 10G @@ -67,9 +72,10 @@ applications: - '6' - '7' - '8' + channel: quincy/edge ceph-proxy: - charm: ceph-proxy + charm: ../../ceph-proxy.charm num_units: 1 options: source: *openstack-origin @@ -77,15 +83,16 @@ applications: - '9' ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw + charm: ch:ceph-radosgw num_units: 1 options: source: *openstack-origin to: - '10' + channel: quincy/edge cinder: - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: openstack-origin: *openstack-origin @@ -96,14 +103,16 @@ applications: constraints: mem=2048 to: - '11' + channel: yoga/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph options: restrict-ceph-pools: True + channel: yoga/edge keystone: - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin @@ -111,31 +120,35 @@ applications: constraints: mem=1024 to: - '12' + channel: yoga/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 options: source: *openstack-origin to: - '13' + channel: latest/edge glance: - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin to: - '14' + channel: yoga/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin to: - '15' + channel: yoga/edge relations: diff --git a/ceph-proxy/tests/bundles/jammy-yoga-ec.yaml b/ceph-proxy/tests/bundles/jammy-yoga-ec.yaml index 5ea74baa..26ae8716 100644 --- a/ceph-proxy/tests/bundles/jammy-yoga-ec.yaml +++ b/ceph-proxy/tests/bundles/jammy-yoga-ec.yaml @@ -32,14 +32,17 @@ machines: applications: cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -47,9 +50,10 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 @@ -58,9 +62,10 @@ applications: - '3' - '4' - '5' + channel: quincy/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 6 storage: osd-devices: 10G @@ -73,9 +78,10 @@ applications: - '16' - '17' - '18' + channel: quincy/edge ceph-proxy: - charm: ceph-proxy + charm: ../../ceph-proxy.charm num_units: 1 options: source: *openstack-origin @@ -83,7 +89,7 @@ applications: - '9' ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw + charm: ch:ceph-radosgw num_units: 1 options: source: *openstack-origin @@ -92,9 +98,10 @@ applications: ec-profile-m: 2 to: - '10' + channel: quincy/edge cinder: - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: openstack-origin: *openstack-origin @@ -105,9 +112,10 @@ applications: constraints: mem=2048 to: - '11' + channel: yoga/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph options: restrict-ceph-pools: True pool-type: erasure-coded @@ -115,9 +123,10 @@ applications: ec-profile-m: 2 ec-profile-plugin: lrc ec-profile-locality: 3 + channel: yoga/edge keystone: - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin @@ -125,18 +134,20 @@ applications: constraints: mem=1024 to: - '12' + channel: yoga/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 options: source: *openstack-origin to: - '13' + channel: latest/edge glance: - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin @@ -146,9 +157,10 @@ applications: ec-profile-plugin: jerasure to: - '14' + channel: yoga/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin @@ -159,6 +171,7 @@ applications: libvirt-image-backend: rbd to: - '15' + channel: yoga/edge relations: diff --git a/ceph-proxy/tests/bundles/jammy-yoga.yaml b/ceph-proxy/tests/bundles/jammy-yoga.yaml index 178f679a..0a5a4b19 100644 --- a/ceph-proxy/tests/bundles/jammy-yoga.yaml +++ b/ceph-proxy/tests/bundles/jammy-yoga.yaml @@ -29,14 +29,17 @@ machines: applications: cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -44,9 +47,10 @@ applications: - '0' - '1' - '2' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 @@ -55,9 +59,10 @@ applications: - '3' - '4' - '5' + channel: quincy/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 storage: osd-devices: 10G @@ -67,9 +72,10 @@ applications: - '6' - '7' - '8' + channel: quincy/edge ceph-proxy: - charm: ceph-proxy + charm: ../../ceph-proxy.charm num_units: 1 options: source: *openstack-origin @@ -77,15 +83,16 @@ applications: - '9' ceph-radosgw: - charm: cs:~openstack-charmers-next/ceph-radosgw + charm: ch:ceph-radosgw num_units: 1 options: source: *openstack-origin to: - '10' + channel: quincy/edge cinder: - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: openstack-origin: *openstack-origin @@ -96,14 +103,16 @@ applications: constraints: mem=2048 to: - '11' + channel: yoga/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph options: restrict-ceph-pools: True + channel: yoga/edge keystone: - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin @@ -111,31 +120,35 @@ applications: constraints: mem=1024 to: - '12' + channel: yoga/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 options: source: *openstack-origin to: - '13' + channel: latest/edge glance: - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin to: - '14' + channel: yoga/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin to: - '15' + channel: yoga/edge relations: diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index cefa87df..691fcc44 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -12,32 +12,19 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes gate_bundles: - - bionic-queens - - bionic-stein - - bionic-ussuri - - focal-ussuri - - erasure-coded: focal-ussuri-ec - - focal-victoria - - erasure-coded: focal-victoria-ec - - focal-wallaby - - erasure-coded: focal-wallaby-ec - focal-xena - erasure-coded: focal-xena-ec - - hirsute-wallaby - - erasure-coded: hirsute-wallaby-ec - impish-xena - erasure-coded: impish-xena-ec dev_bundles: - - bionic-rocky # mimic - - bionic-train - focal-yoga - erasure-coded: focal-yoga-ec - jammy-yoga - erasure-coded: jammy-yoga-ec smoke_bundles: - - focal-ussuri + - focal-xena target_deploy_status: ceph-proxy: @@ -46,9 +33,6 @@ target_deploy_status: ceph-radosgw: workload-status: waiting workload-status-message: "Incomplete relations: mon" - cinder-ceph: - workload-status: waiting - workload-status-message: "Incomplete relations: ceph" keystone: workload-status: active workload-status-message: "Unit is ready" @@ -64,8 +48,6 @@ target_deploy_status: tests_options: force_deploy: - - hirsute-wallaby - - hirsute-wallaby-ec - impish-xena - impish-xena-ec - jammy-yoga diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 86d1e904..81fd2492 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -37,10 +37,20 @@ setenv = VIRTUAL_ENV={envdir} install_command = {toxinidir}/pip.sh install {opts} {packages} commands = stestr run --slowest {posargs} -allowlist_externals = juju +allowlist_externals = + charmcraft + rename.sh passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt +[testenv:build] +basepython = python3 +deps = -r{toxinidir}/build-requirements.txt +commands = + charmcraft clean + charmcraft -v build + {toxinidir}/rename.sh + [testenv:py35] basepython = python3.5 deps = -r{toxinidir}/requirements.txt From decdbaed0531bf67b60ae0f4ea21846b9b47b7d2 Mon Sep 17 00:00:00 2001 From: Cornellius Metto Date: Thu, 11 Nov 2021 16:03:13 +0000 Subject: [PATCH 2342/2699] Enable HAProxy HTTP Health Checks Ceph radosgw supports [0] the swift health check endpoint "/swift/healthcheck". This change adds the haproxy configuration [1] necessary to take the response of "GET /swift/healthcheck" into account when determining the health of a radosgw service. For testing, I verified that: - HAProxy starts and responds to requests normally with this configuration. - Servers with status != 2xx or 3xx are removed from the backend. - Servers that take too long to respond are also removed from the backend. The default timeout value is 2s. [0] https://tracker.ceph.com/issues/11682 [1] https://www.haproxy.com/documentation/hapee/2-0r1/onepage/#4.2-option%20httpchk Closes-Bug: 1946280 Change-Id: I82634255ca3423fec3fc15c1e714dcb31db5da7a --- ceph-radosgw/hooks/ceph_radosgw_context.py | 13 ++++++++++++- .../unit_tests/test_ceph_radosgw_context.py | 6 +++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 38975a48..556ae038 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -22,6 +22,7 @@ from charmhelpers.contrib.hahelpers.cluster import ( determine_api_port, determine_apache_port, + https, ) from charmhelpers.core.host import ( cmp_pkgrevno, @@ -68,12 +69,13 @@ class HAProxyContext(context.HAProxyContext): def __call__(self): ctxt = super(HAProxyContext, self).__call__() port = utils.listen_port() + service = 'cephradosgw-server' # Apache ports a_cephradosgw_api = determine_apache_port(port, singlenode_mode=True) port_mapping = { - 'cephradosgw-server': [port, a_cephradosgw_api] + service: [port, a_cephradosgw_api] } ctxt['cephradosgw_bind_port'] = determine_api_port( @@ -82,7 +84,16 @@ def __call__(self): ) # for haproxy.conf + backend_options = { + service: [{ + 'option': 'httpchk GET /swift/healthcheck', + }] + } + ctxt['service_ports'] = port_mapping + ctxt['backend_options'] = backend_options + ctxt['https'] = https() + return ctxt diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 3da43c0a..ac631371 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -63,7 +63,11 @@ def test_ctxt(self, _harelation_ids, _ctxtrelation_ids, _haconfig, self.determine_api_port.return_value = 70 expect = { 'cephradosgw_bind_port': 70, - 'service_ports': {'cephradosgw-server': [80, 70]} + 'service_ports': {'cephradosgw-server': [80, 70]}, + 'backend_options': {'cephradosgw-server': [{ + 'option': 'httpchk GET /swift/healthcheck', + }]}, + 'https': False } self.assertEqual(expect, haproxy_context()) From dc34269ddc902f3dd010c00af6e5cd71a53647f6 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Fri, 19 Nov 2021 18:24:39 -0300 Subject: [PATCH 2343/2699] Enhance the 'add-disk' action for disk replacement As part of the task to improve disk replacement, the 'add-disk' action needs some changes. This includes: - Creating 'bcache' devices to accelerate disk access. - Creating caching partitions of a specified size. - Recycling previously deactivated OSD ids. Change-Id: Id5027f30d51c23d2be4c34f82867d65a50b35137 Depends-On: I43d0a0bc11664c37532c0117711affc93c9d1ad1 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/675 --- ceph-osd/actions.yaml | 18 ++ ceph-osd/actions/add_disk.py | 123 +++++++++- ceph-osd/hooks/utils.py | 228 +++++++++++++++++++ ceph-osd/lib/charms_ceph/utils.py | 16 +- ceph-osd/metadata.yaml | 5 + ceph-osd/test-requirements.txt | 2 + ceph-osd/unit_tests/test_actions_add_disk.py | 48 +++- ceph-osd/unit_tests/test_ceph_utils.py | 122 +++++++++- 8 files changed, 546 insertions(+), 16 deletions(-) diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 7c405bad..f4233aed 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -41,6 +41,24 @@ add-disk: bucket: type: string description: The name of the bucket in Ceph to add these devices into + osd-ids: + type: string + description: | + The OSD ids to recycle. If specified, the number of elements in this + list must be the same as the number of 'osd-devices'. + cache-devices: + type: string + description: | + A list of devices to act as caching devices for 'bcache', using the + 'osd-devices' as backing. If the number of elements in this list is + less than the number of 'osd-devices', then the caching ones will be + distributed in a round-robin fashion. + partition-size: + type: integer + description: | + The size of the partitions to create for the caching devices. If left + unspecified, then the full size of the devices will be split evenly + across partitions. required: - osd-devices blacklist-add-disk: diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index b725c9b0..6f2f9819 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -23,20 +23,51 @@ import charmhelpers.contrib.storage.linux.ceph as ch_ceph import charmhelpers.core.hookenv as hookenv +from charmhelpers.core.hookenv import function_fail from charmhelpers.core.unitdata import kv +from utils import (PartitionIter, device_size, DeviceError) import ceph_hooks import charms_ceph.utils -def add_device(request, device_path, bucket=None): - charms_ceph.utils.osdize(device_path, hookenv.config('osd-format'), +def add_device(request, device_path, bucket=None, + osd_id=None, part_iter=None): + """Add a new device to be used by the OSD unit. + + :param request: A broker request to notify monitors of changes. + :type request: CephBrokerRq + + :param device_path: The absolute path to the device to be added. + :type device_path: str + + :param bucket: The bucket name in ceph to add the device into, or None. + :type bucket: Option[str, None] + + :param osd_id: The OSD Id to use, or None. + :type osd_id: Option[str, None] + + :param part_iter: The partition iterator that will create partitions on + demand, to service bcache creation, or None, if no + partitions need to be created. + :type part_iter: Option[PartitionIter, None] + """ + if part_iter is not None: + effective_dev = part_iter.create_bcache(device_path) + if not effective_dev: + raise DeviceError( + 'Failed to create bcache for device {}'.format(device_path)) + else: + effective_dev = device_path + + charms_ceph.utils.osdize(effective_dev, hookenv.config('osd-format'), ceph_hooks.get_journal_devices(), hookenv.config('ignore-device-errors'), hookenv.config('osd-encrypt'), hookenv.config('bluestore'), - hookenv.config('osd-encrypt-keymanager')) + hookenv.config('osd-encrypt-keymanager'), + osd_id) # Make it fast! if hookenv.config('autotune'): charms_ceph.utils.tune_dev(device_path) @@ -63,9 +94,10 @@ def add_device(request, device_path, bucket=None): return request -def get_devices(): +def get_devices(key): + """Get a list of the devices passed for this action, for a key.""" devices = [] - for path in hookenv.action_get('osd-devices').split(' '): + for path in (hookenv.action_get(key) or '').split(): path = path.strip() if os.path.isabs(path): devices.append(path) @@ -73,10 +105,83 @@ def get_devices(): return devices +def cache_storage(): + """Return a list of Juju storage for caches.""" + cache_ids = hookenv.storage_list('cache-devices') + return [hookenv.storage_get('location', cid) for cid in cache_ids] + + +def validate_osd_id(osd_id): + """Test that an OSD id is actually valid.""" + if isinstance(osd_id, str): + if osd_id.startswith('osd.'): + osd_id = osd_id[4:] + try: + return int(osd_id) >= 0 + except ValueError: + return False + elif isinstance(osd_id, int): + return osd_id >= 0 + return False + + +def validate_partition_size(psize, devices, caches): + """Test that the cache devices have enough room.""" + sizes = [device_size(cache) for cache in caches] + n_caches = len(caches) + for idx in range(len(devices)): + cache_idx = idx % n_caches + prev = sizes[cache_idx] - psize + if prev < 0: + function_fail('''Cache device {} does not have enough + room to provide {} {}GB partitions'''.format( + caches[cache_idx], (idx + 1) // n_caches, psize)) + sys.exit(1) + sizes[cache_idx] = prev + + if __name__ == "__main__": request = ch_ceph.CephBrokerRq() - for dev in get_devices(): - request = add_device(request=request, - device_path=dev, - bucket=hookenv.action_get("bucket")) + devices = get_devices('osd-devices') + caches = get_devices('cache-devices') or cache_storage() + if caches: + psize = hookenv.action_get('partition-size') + if psize: + validate_partition_size(psize, devices, caches) + + part_iter = PartitionIter(caches, psize, devices) + else: + part_iter = None + + osd_ids = hookenv.action_get('osd-ids') + if osd_ids: + # Validate number and format for OSD ids. + osd_ids = osd_ids.split() + if len(osd_ids) != len(devices): + function_fail('The number of osd-ids and osd-devices must match') + sys.exit(1) + for osd_id in osd_ids: + if not validate_osd_id(osd_id): + function_fail('Invalid OSD ID passed: {}'.format(osd_id)) + sys.exit(1) + else: + osd_ids = [None] * len(devices) + + errors = [] + for dev, osd_id in zip(devices, osd_ids): + try: + request = add_device(request=request, + device_path=dev, + bucket=hookenv.action_get("bucket"), + osd_id=osd_id, part_iter=part_iter) + except Exception: + errors.append(dev) + ch_ceph.send_request_if_needed(request, relation='mon') + if errors: + if part_iter is not None: + for error in errors: + part_iter.cleanup(error) + + function_fail('Failed to add devices: {}', ','.join(errors)) + sys.exit(1) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 8ac8ff1b..26f5f836 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import re import os import socket import subprocess import sys +import time sys.path.append('lib') import charms_ceph.utils as ceph @@ -336,3 +338,229 @@ def parse_osds_arguments(): "explicitly defined OSD IDs", WARNING) return args + + +class DeviceError(Exception): + + """Exception type used to signal errors raised by calling + external commands that manipulate devices. + """ + pass + + +def _check_output(args): + try: + return subprocess.check_output(args).decode('UTF-8') + except subprocess.CalledProcessError as e: + raise DeviceError(str(e)) + + +def _check_call(args): + try: + return subprocess.check_call(args) + except subprocess.CalledProcessError as e: + raise DeviceError(str(e)) + + +def setup_bcache(backing, cache): + """Create a bcache device out of the backing storage and caching device. + + :param backing: The path to the backing device. + :type backing: str + + :param cache: The path to the caching device. + :type cache: str + + :returns: The full path of the newly created bcache device. + :rtype: str + """ + _check_call(['sudo', 'make-bcache', '-B', backing, + '-C', cache, '--writeback']) + + def bcache_name(dev): + rv = _check_output(['lsblk', '-p', '-b', cache, '-J', '-o', 'NAME']) + for x in json.loads(rv)['blockdevices'][0].get('children', []): + if x['name'] != dev: + return x['name'] + + for _ in range(100): + rv = bcache_name(cache) + if rv is not None: + return rv + + # Tell the kernel to refresh the partitions. + time.sleep(0.3) + _check_call(['sudo', 'partprobe']) + + +def get_partition_names(dev): + """Given a raw device, return a set of the partitions it contains. + + :param dev: The path to the device. + :type dev: str + + :returns: A set with the partitions of the passed device. + :rtype: set[str] + """ + rv = _check_output(['lsblk', '-b', dev, '-J', '-p', '-o', 'NAME']) + rv = json.loads(rv)['blockdevices'][0].get('children', {}) + return set(x['name'] for x in rv) + + +def create_partition(cache, size, n_iter): + """Create a partition of a specific size in a device. If needed, + make sure the device has a GPT ready. + + :param cache: The path to the caching device from which to create + the partition. + :type cache: str + + :param size: The size (in GB) of the partition to create. + :type size: int + + :param n_iter: The iteration number. If zero, this function will + also create the GPT on the caching device. + :type n_iter: int + + :returns: The full path of the newly created partition. + :rtype: str + """ + if not n_iter: + # In our first iteration, make sure the device has a GPT. + _check_call(['sudo', 'parted', '-s', cache, 'mklabel', 'gpt']) + prev_partitions = get_partition_names(cache) + cmd = ['sudo', 'parted', '-s', cache, 'mkpart', 'primary', + str(n_iter * size) + 'GB', str((n_iter + 1) * size) + 'GB'] + + _check_call(cmd) + for _ in range(100): + ret = get_partition_names(cache) - prev_partitions + if ret: + return next(iter(ret)) + + time.sleep(0.3) + _check_call(['sudo', 'partprobe']) + + raise DeviceError('Failed to create partition') + + +def device_size(dev): + """Compute the size of a device, in GB. + + :param dev: The full path to the device. + :type dev: str + + :returns: The size in GB of the specified device. + :rtype: int + """ + ret = _check_output(['lsblk', '-b', '-d', dev, '-J', '-o', 'SIZE']) + ret = int(json.loads(ret)['blockdevices'][0]['size']) + return ret / (1024 * 1024 * 1024) # Return size in GB. + + +def bcache_remove(bcache, cache_dev): + """Remove a bcache kernel device, given its caching. + + :param bache: The path of the bcache device. + :type bcache: str + + :param cache_dev: The caching device used for the bcache name. + :type cache_dev: str + """ + rv = _check_output(['sudo', 'bcache-super-show', cache_dev]) + uuid = None + # Fetch the UUID for the caching device. + for line in rv.split('\n'): + idx = line.find('cset.uuid') + if idx >= 0: + uuid = line[idx + 9:].strip() + break + else: + return + bcache_name = bcache[bcache.rfind('/') + 1:] + with open('/sys/block/{}/bcache/stop'.format(bcache_name), 'wb') as f: + f.write(b'1') + with open('/sys/fs/bcache/{}/stop'.format(uuid), 'wb') as f: + f.write(b'1') + + +def wipe_disk(dev): + """Destroy all data in a specific device, including partition tables.""" + _check_call(['sudo', 'wipefs', '-a', dev]) + + +class PartitionIter: + + """Class used to create partitions iteratively. + + Objects of this type are used to create partitions out of + the specified cache devices, either with a specific size, + or with a size proportional to what is needed.""" + + def __init__(self, caches, psize, devices): + """Construct a partition iterator. + + :param caches: The list of cache devices to use. + :type caches: iterable + + :param psize: The size of the partitions (in GB), or None + :type psize: Option[int, None] + + :param devices: The backing devices. Only used to get their length. + :type devices: iterable + """ + self.caches = [[cache, 0] for cache in caches] + self.idx = 0 + if not psize: + factor = min(1.0, len(caches) / len(devices)) + self.psize = [factor * device_size(cache) for cache in caches] + else: + self.psize = psize + self.created = {} + + def __iter__(self): + return self + + def __next__(self): + """Return a newly created partition. + + The object keeps track of the currently used caching device, + so upon creating a new partition, it will move to the next one, + distributing the load among them in a round-robin fashion. + """ + cache, n_iter = self.caches[self.idx] + size = self.psize + if not isinstance(size, (int, float)): + size = self.psize[self.idx] + + self.caches[self.idx][1] += 1 + self.idx = (self.idx + 1) % len(self.caches) + log('Creating partition in device {} of size {}'.format(cache, size)) + return create_partition(cache, size, n_iter) + + def create_bcache(self, backing): + """Create a bcache device, using the internal caching device, + and an external backing one. + + :param backing: The path to the backing device. + :type backing: str + + :returns: The name for the newly created bcache device. + :rtype: str + """ + cache = next(self) + ret = setup_bcache(backing, cache) + if ret is not None: + self.created[backing] = (ret, cache) + log('Bcache device created: {}'.format(cache)) + return ret + + def cleanup(self, device): + args = self.created.get(device) + if not args: + return + + try: + bcache_remove(*args) + except DeviceError: + log('Failed to cleanup bcache device: {}'.format(args[0])) diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index de917a08..643f2e03 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -1514,11 +1514,11 @@ def get_devices(name): def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, - bluestore=False, key_manager=CEPH_KEY_MANAGER): + bluestore=False, key_manager=CEPH_KEY_MANAGER, osd_id=None): if dev.startswith('/dev'): osdize_dev(dev, osd_format, osd_journal, ignore_errors, encrypt, - bluestore, key_manager) + bluestore, key_manager, osd_id) else: if cmp_pkgrevno('ceph', '14.0.0') >= 0: log("Directory backed OSDs can not be created on Nautilus", @@ -1528,7 +1528,8 @@ def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, - encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER): + encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER, + osd_id=None): """ Prepare a block device for use as a Ceph OSD @@ -1593,7 +1594,8 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, osd_journal, encrypt, bluestore, - key_manager) + key_manager, + osd_id) else: cmd = _ceph_disk(dev, osd_format, @@ -1677,7 +1679,7 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, - key_manager=CEPH_KEY_MANAGER): + key_manager=CEPH_KEY_MANAGER, osd_id=None): """ Prepare and activate a device for usage as a Ceph OSD using ceph-volume. @@ -1689,6 +1691,7 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, :param: encrypt: Use block device encryption :param: bluestore: Use bluestore storage for OSD :param: key_manager: dm-crypt Key Manager to use + :param: osd_id: The OSD-id to recycle, or None to create a new one :raises subprocess.CalledProcessError: in the event that any supporting LVM operation failed. :returns: list. 'ceph-volume' command and required parameters for @@ -1710,6 +1713,9 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, if encrypt and key_manager == CEPH_KEY_MANAGER: cmd.append('--dmcrypt') + if osd_id is not None: + cmd.extend(['--osd-id', str(osd_id)]) + # On-disk journal volume creation if not osd_journal and not bluestore: journal_lv_type = 'journal' diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 7069f780..cbefb053 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -48,3 +48,8 @@ storage: type: block multiple: range: 0- + cache-devices: + type: block + multiple: + range: 0- + minimum-size: 10G diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 7ce4d17c..cb6913c5 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -42,3 +42,5 @@ git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' tempest<24.0.0;python_version<'3.6' croniter # needed for charm-rabbitmq-server unit tests +pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. diff --git a/ceph-osd/unit_tests/test_actions_add_disk.py b/ceph-osd/unit_tests/test_actions_add_disk.py index e29b99ef..dd2bb64d 100644 --- a/ceph-osd/unit_tests/test_actions_add_disk.py +++ b/ceph-osd/unit_tests/test_actions_add_disk.py @@ -54,4 +54,50 @@ def fake_config(key): self.hookenv.relation_set.assert_has_calls([call]) mock_osdize.assert_has_calls([mock.call('/dev/myosddev', None, '', True, True, True, - True)]) + True, None)]) + + piter = add_disk.PartitionIter(['/dev/cache'], 100, ['/dev/myosddev']) + mock_create_bcache = mock.MagicMock(side_effect=lambda b: b) + with mock.patch.object(add_disk.PartitionIter, 'create_bcache', + mock_create_bcache) as mock_call: + add_disk.add_device(request, '/dev/myosddev', part_iter=piter) + mock_call.assert_called() + + mock_create_bcache.side_effect = lambda b: None + with mock.patch.object(add_disk.PartitionIter, 'create_bcache', + mock_create_bcache) as mock_call: + with self.assertRaises(add_disk.DeviceError): + add_disk.add_device(request, '/dev/myosddev', part_iter=piter) + + def test_get_devices(self): + self.hookenv.action_get.return_value = '/dev/foo bar' + rv = add_disk.get_devices('') + self.assertEqual(rv, ['/dev/foo']) + self.hookenv.action_get.return_value = None + rv = add_disk.get_devices('') + self.assertEqual(rv, []) + + @mock.patch.object(add_disk, 'device_size') + @mock.patch.object(add_disk, 'function_fail') + def test_validate_psize(self, function_fail, device_size): + caches = {'cache1': 100, 'cache2': 200} + device_size.side_effect = lambda c: caches[c] + function_fail.return_value = None + with self.assertRaises(SystemExit): + add_disk.validate_partition_size( + 60, ['a', 'b', 'c'], list(caches.keys())) + self.assertIsNone(add_disk.validate_partition_size( + 60, ['a', 'b'], list(caches.keys()))) + + def test_cache_storage(self): + self.hookenv.storage_list.return_value = [{'location': 'a', 'key': 1}, + {'location': 'b'}] + self.hookenv.storage_get.side_effect = lambda k, elem: elem.get(k) + rv = add_disk.cache_storage() + self.assertEqual(['a', 'b'], rv) + + def test_validate_osd_id(self): + for elem in ('osd.1', '1', 0, 113): + self.assertTrue(add_disk.validate_osd_id(elem)) + for elem in ('osd.-1', '-3', '???', -100, 3.4, {}): + self.assertFalse(add_disk.validate_osd_id(elem)) diff --git a/ceph-osd/unit_tests/test_ceph_utils.py b/ceph-osd/unit_tests/test_ceph_utils.py index 722558b2..f338eb3a 100644 --- a/ceph-osd/unit_tests/test_ceph_utils.py +++ b/ceph-osd/unit_tests/test_ceph_utils.py @@ -15,7 +15,7 @@ import unittest -from unittest.mock import patch +from unittest.mock import patch, mock_open with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: @@ -138,3 +138,123 @@ def test_parse_service_ids_with_all(self, mock_function_get): parsed = utils.parse_osds_arguments() self.assertEqual(parsed, expected_id) + + @patch('subprocess.check_call') + @patch('subprocess.check_output') + def test_setup_bcache(self, check_output, check_call): + check_output.return_value = b''' + { + "blockdevices": [ + {"name":"/dev/nvme0n1", + "children": [ + {"name":"/dev/bcache0"} + ] + } + ] + } + ''' + self.assertEqual(utils.setup_bcache('', ''), '/dev/bcache0') + + @patch('subprocess.check_output') + def test_get_partition_names(self, check_output): + check_output.return_value = b''' + { + "blockdevices": [ + {"name":"/dev/sdd", + "children": [ + {"name":"/dev/sdd1"} + ] + } + ] + } + ''' + partitions = utils.get_partition_names('') + self.assertEqual(partitions, set(['/dev/sdd1'])) + # Check for a raw device with no partitions. + check_output.return_value = b''' + {"blockdevices": [{"name":"/dev/sdd"}]} + ''' + self.assertEqual(set(), utils.get_partition_names('')) + + @patch.object(utils, 'get_partition_names') + @patch('subprocess.check_call') + def test_create_partition(self, check_call, get_partition_names): + first_call = True + + def gpn(dev): + nonlocal first_call + if first_call: + first_call = False + return set() + return set(['/dev/nvm0n1p1']) + get_partition_names.side_effect = gpn + partition_name = utils.create_partition('/dev/nvm0n1', 101, 0) + self.assertEqual(partition_name, '/dev/nvm0n1p1') + args = check_call.call_args[0][0] + self.assertIn('/dev/nvm0n1', args) + self.assertIn('101GB', args) + + @patch('subprocess.check_output') + def test_device_size(self, check_output): + check_output.return_value = b''' + { + "blockdevices": [{"size":800166076416}] + } + ''' + self.assertEqual(745, int(utils.device_size(''))) + + @patch('subprocess.check_output') + def test_bcache_remove(self, check_output): + check_output.return_value = b''' + sb.magic ok + sb.first_sector 8 [match] + sb.csum 63F23B706BA0FE6A [match] + sb.version 3 [cache device] + dev.label (empty) + dev.uuid ca4ce5e1-4cf3-4330-b1c9-2c735b14cd0b + dev.sectors_per_block 1 + dev.sectors_per_bucket 1024 + dev.cache.first_sector 1024 + dev.cache.cache_sectors 1562822656 + dev.cache.total_sectors 1562823680 + dev.cache.ordered yes + dev.cache.discard no + dev.cache.pos 0 + dev.cache.replacement 0 [lru] + cset.uuid 424242 + ''' + mo = mock_open() + with patch('builtins.open', mo): + utils.bcache_remove('/dev/bcache0', '/dev/nvme0n1p1') + mo.assert_any_call('/sys/block/bcache0/bcache/stop', 'wb') + mo.assert_any_call('/sys/fs/bcache/424242/stop', 'wb') + + @patch.object(utils, 'create_partition') + @patch.object(utils, 'setup_bcache') + def test_partition_iter(self, setup_bcache, create_partition): + create_partition.side_effect = \ + lambda c, s, n: c + '|' + str(s) + '|' + str(n) + setup_bcache.side_effect = lambda *args: args + piter = utils.PartitionIter(['/dev/nvm0n1', '/dev/nvm0n2'], + 200, ['dev1', 'dev2', 'dev3']) + piter.create_bcache('dev1') + setup_bcache.assert_called_with('dev1', '/dev/nvm0n1|200|0') + setup_bcache.mock_reset() + piter.create_bcache('dev2') + setup_bcache.assert_called_with('dev2', '/dev/nvm0n2|200|0') + piter.create_bcache('dev3') + setup_bcache.assert_called_with('dev3', '/dev/nvm0n1|200|1') + + @patch.object(utils, 'device_size') + @patch.object(utils, 'create_partition') + @patch.object(utils, 'setup_bcache') + def test_partition_iter_no_size(self, setup_bcache, create_partition, + device_size): + device_size.return_value = 300 + piter = utils.PartitionIter(['/dev/nvm0n1'], 0, + ['dev1', 'dev2', 'dev3']) + create_partition.side_effect = lambda c, sz, g: sz + + # 300GB across 3 devices, i.e: 100 for each. + self.assertEqual(100, next(piter)) + self.assertEqual(100, next(piter)) From 4c6835c0cc2ecf321b5ff3cb4540bbde19256179 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 18 Feb 2022 12:25:37 -0500 Subject: [PATCH 2344/2699] Fix handling of profile-name The current code: self.profile_name = op.get('crush-profile', profile_name) will only default to profile_name if the 'crush-profile' key doesn't exist in the op dictionary. If the 'crush-profile' key exists and is set to None, the default profile_name is not used. This change will use the default profile_name in both cases. A full charm-helpers sync is done here. Closes-Bug: #1960622 Change-Id: If9749e16eadfab5523d06c82f3899a83b8c6fdc1 --- ceph-mon/hooks/charmhelpers/__init__.py | 17 +-- ceph-mon/hooks/charmhelpers/cli/__init__.py | 13 +-- .../charmhelpers/contrib/charmsupport/nrpe.py | 40 ++++++- .../charmhelpers/contrib/hahelpers/cluster.py | 15 +-- .../contrib/hardening/apache/checks/config.py | 5 +- .../contrib/hardening/audits/apache.py | 8 +- .../contrib/hardening/audits/apt.py | 5 +- .../contrib/hardening/audits/file.py | 3 +- .../charmhelpers/contrib/hardening/harden.py | 13 +-- .../contrib/hardening/host/checks/login.py | 4 +- .../contrib/hardening/host/checks/sysctl.py | 7 +- .../contrib/hardening/mysql/checks/config.py | 7 +- .../contrib/hardening/templating.py | 6 +- .../charmhelpers/contrib/hardening/utils.py | 3 +- .../hooks/charmhelpers/contrib/network/ip.py | 23 +--- .../charmhelpers/contrib/openstack/context.py | 46 ++++---- .../contrib/openstack/keystone.py | 12 +- .../charmhelpers/contrib/openstack/neutron.py | 10 +- .../charmhelpers/contrib/openstack/policyd.py | 46 +------- .../contrib/openstack/templating.py | 27 ++--- .../charmhelpers/contrib/openstack/utils.py | 110 +++++++++++------- .../contrib/storage/linux/ceph.py | 69 +++++------ .../contrib/storage/linux/loopback.py | 10 +- ceph-mon/hooks/charmhelpers/core/hookenv.py | 81 +++++++------ ceph-mon/hooks/charmhelpers/core/host.py | 12 +- .../hooks/charmhelpers/core/services/base.py | 7 +- .../charmhelpers/core/services/helpers.py | 4 +- ceph-mon/hooks/charmhelpers/core/strutils.py | 9 +- .../hooks/charmhelpers/core/templating.py | 11 +- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 10 +- .../hooks/charmhelpers/fetch/archiveurl.py | 29 ++--- ceph-mon/hooks/charmhelpers/fetch/centos.py | 7 +- .../hooks/charmhelpers/fetch/python/debug.py | 2 - .../charmhelpers/fetch/python/packages.py | 14 +-- ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 37 +++--- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 25 +++- 36 files changed, 316 insertions(+), 431 deletions(-) diff --git a/ceph-mon/hooks/charmhelpers/__init__.py b/ceph-mon/hooks/charmhelpers/__init__.py index 1f57ed2a..ddf30450 100644 --- a/ceph-mon/hooks/charmhelpers/__init__.py +++ b/ceph-mon/hooks/charmhelpers/__init__.py @@ -14,30 +14,15 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. -from __future__ import print_function -from __future__ import absolute_import - import functools import inspect import subprocess -import sys -try: - import six # NOQA:F401 -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # NOQA:F401 try: import yaml # NOQA:F401 except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) import yaml # NOQA:F401 diff --git a/ceph-mon/hooks/charmhelpers/cli/__init__.py b/ceph-mon/hooks/charmhelpers/cli/__init__.py index 74ea7295..2b0c4b7a 100644 --- a/ceph-mon/hooks/charmhelpers/cli/__init__.py +++ b/ceph-mon/hooks/charmhelpers/cli/__init__.py @@ -16,9 +16,6 @@ import argparse import sys -import six -from six.moves import zip - import charmhelpers.core.unitdata @@ -149,10 +146,7 @@ def wrapper(decorated): def run(self): "Run cli, processing arguments and executing subcommands." arguments = self.argument_parser.parse_args() - if six.PY2: - argspec = inspect.getargspec(arguments.func) - else: - argspec = inspect.getfullargspec(arguments.func) + argspec = inspect.getfullargspec(arguments.func) vargs = [] for arg in argspec.args: vargs.append(getattr(arguments, arg)) @@ -177,10 +171,7 @@ def describe_arguments(func): Analyze a function's signature and return a data structure suitable for passing in as arguments to an argparse parser's add_argument() method.""" - if six.PY2: - argspec = inspect.getargspec(func) - else: - argspec = inspect.getfullargspec(func) + argspec = inspect.getfullargspec(func) # we should probably raise an exception somewhere if func includes **kwargs if argspec.defaults: positional_args = argspec.args[:-len(argspec.defaults)] diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 8d1753c3..bad7a533 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -28,6 +28,7 @@ import yaml from charmhelpers.core.hookenv import ( + application_name, config, hook_name, local_unit, @@ -174,7 +175,8 @@ def _locate_cmd(self, check_cmd): if os.path.exists(os.path.join(path, parts[0])): command = os.path.join(path, parts[0]) if len(parts) > 1: - command += " " + " ".join(parts[1:]) + safe_args = [shlex.quote(arg) for arg in parts[1:]] + command += " " + " ".join(safe_args) return command log('Check command not found: {}'.format(parts[0])) return '' @@ -520,3 +522,39 @@ def remove_deprecated_check(nrpe, deprecated_services): for dep_svc in deprecated_services: log('Deprecated service: {}'.format(dep_svc)) nrpe.remove_check(shortname=dep_svc) + + +def add_deferred_restarts_check(nrpe): + """ + Add NRPE check for services with deferred restarts. + + :param NRPE nrpe: NRPE object to add check to + """ + unit_name = local_unit().replace('/', '-') + shortname = unit_name + '_deferred_restarts' + check_cmd = 'check_deferred_restarts.py --application {}'.format( + application_name()) + + log('Adding deferred restarts nrpe check: {}'.format(shortname)) + nrpe.add_check( + shortname=shortname, + description='Check deferred service restarts {}'.format(unit_name), + check_cmd=check_cmd) + + +def remove_deferred_restarts_check(nrpe): + """ + Remove NRPE check for services with deferred service restarts. + + :param NRPE nrpe: NRPE object to remove check from + """ + unit_name = local_unit().replace('/', '-') + shortname = unit_name + '_deferred_restarts' + check_cmd = 'check_deferred_restarts.py --application {}'.format( + application_name()) + + log('Removing deferred restarts nrpe check: {}'.format(shortname)) + nrpe.remove_check( + shortname=shortname, + description='Check deferred service restarts {}'.format(unit_name), + check_cmd=check_cmd) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py index f0b629a2..146beba6 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -32,8 +32,6 @@ from socket import gethostname as get_unit_hostname -import six - from charmhelpers.core.hookenv import ( log, relation_ids, @@ -125,16 +123,16 @@ def is_crm_dc(): """ cmd = ['crm', 'status'] try: - status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - if not isinstance(status, six.text_type): - status = six.text_type(status, "utf-8") + status = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('utf-8') except subprocess.CalledProcessError as ex: raise CRMDCNotFound(str(ex)) current_dc = '' for line in status.split('\n'): if line.startswith('Current DC'): - # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum + # Current DC: juju-lytrusty-machine-2 (168108163) + # - partition with quorum current_dc = line.split(':')[1].split()[0] if current_dc == get_unit_hostname(): return True @@ -158,9 +156,8 @@ def is_crm_leader(resource, retry=False): return is_crm_dc() cmd = ['crm', 'resource', 'show', resource] try: - status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - if not isinstance(status, six.text_type): - status = six.text_type(status, "utf-8") + status = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('utf-8') except subprocess.CalledProcessError: status = None diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 341da9ee..e81a5f0b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -14,7 +14,6 @@ import os import re -import six import subprocess @@ -95,9 +94,7 @@ def __call__(self): settings = utils.get_settings('apache') ctxt = settings['hardening'] - out = subprocess.check_output(['apache2', '-v']) - if six.PY3: - out = out.decode('utf-8') + out = subprocess.check_output(['apache2', '-v']).decode('utf-8') ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', out).group(1) ctxt['apache_icondir'] = '/usr/share/apache2/icons/' diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py index c1537625..31db8f62 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -15,8 +15,6 @@ import re import subprocess -import six - from charmhelpers.core.hookenv import ( log, INFO, @@ -35,7 +33,7 @@ class DisabledModuleAudit(BaseAudit): def __init__(self, modules): if modules is None: self.modules = [] - elif isinstance(modules, six.string_types): + elif isinstance(modules, str): self.modules = [modules] else: self.modules = modules @@ -68,9 +66,7 @@ def ensure_compliance(self): @staticmethod def _get_loaded_modules(): """Returns the modules which are enabled in Apache.""" - output = subprocess.check_output(['apache2ctl', '-M']) - if six.PY3: - output = output.decode('utf-8') + output = subprocess.check_output(['apache2ctl', '-M']).decode('utf-8') modules = [] for line in output.splitlines(): # Each line of the enabled module output looks like: diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py index cad7bf73..1b22925b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import # required for external apt import -from six import string_types - from charmhelpers.fetch import ( apt_cache, apt_purge @@ -51,7 +48,7 @@ class RestrictedPackages(BaseAudit): def __init__(self, pkgs, **kwargs): super(RestrictedPackages, self).__init__(**kwargs) - if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): + if isinstance(pkgs, str) or not hasattr(pkgs, '__iter__'): self.pkgs = pkgs.split() else: self.pkgs = pkgs diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py index 257c6351..84cc2494 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py @@ -23,7 +23,6 @@ check_call, ) from traceback import format_exc -from six import string_types from stat import ( S_ISGID, S_ISUID @@ -63,7 +62,7 @@ def __init__(self, paths, always_comply=False, *args, **kwargs): """ super(BaseFileAudit, self).__init__(*args, **kwargs) self.always_comply = always_comply - if isinstance(paths, string_types) or not hasattr(paths, '__iter__'): + if isinstance(paths, str) or not hasattr(paths, '__iter__'): self.paths = [paths] else: self.paths = paths diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py index 63f21b9c..45ad076d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - from collections import OrderedDict from charmhelpers.core.hookenv import ( @@ -53,18 +51,17 @@ def harden(overrides=None): overrides = [] def _harden_inner1(f): - # As this has to be py2.7 compat, we can't use nonlocal. Use a trick - # to capture the dictionary that can then be updated. - _logged = {'done': False} + _logged = False def _harden_inner2(*args, **kwargs): # knock out hardening via a config var; normally it won't get # disabled. + nonlocal _logged if _DISABLE_HARDENING_FOR_UNIT_TEST: return f(*args, **kwargs) - if not _logged['done']: + if not _logged: log("Hardening function '%s'" % (f.__name__), level=DEBUG) - _logged['done'] = True + _logged = True RUN_CATALOG = OrderedDict([('os', run_os_checks), ('ssh', run_ssh_checks), ('mysql', run_mysql_checks), @@ -74,7 +71,7 @@ def _harden_inner2(*args, **kwargs): if enabled: modules_to_run = [] # modules will always be performed in the following order - for module, func in six.iteritems(RUN_CATALOG): + for module, func in RUN_CATALOG.items(): if module in enabled: enabled.remove(module) modules_to_run.append(func) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py index fe2bc6ef..fd500c8b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from six import string_types - from charmhelpers.contrib.hardening.audits.file import TemplatedFile from charmhelpers.contrib.hardening.host import TEMPLATES_DIR from charmhelpers.contrib.hardening import utils @@ -41,7 +39,7 @@ def __call__(self): # a string assume it to be octal and turn it into an octal # string. umask = settings['environment']['umask'] - if not isinstance(umask, string_types): + if not isinstance(umask, str): umask = '%s' % oct(umask) ctxt = { diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py index f1ea5813..8a57d83d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -15,7 +15,6 @@ import os import platform import re -import six import subprocess from charmhelpers.core.hookenv import ( @@ -183,9 +182,9 @@ def __call__(self): ctxt['sysctl'][key] = d[2] or None - # Translate for python3 - return {'sysctl_settings': - [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]} + return { + 'sysctl_settings': [(k, v) for k, v in ctxt['sysctl'].items()] + } class SysctlConf(TemplatedFile): diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py index a79f33b7..8bf9f36c 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import subprocess from charmhelpers.core.hookenv import ( @@ -82,6 +81,6 @@ class MySQLConfContext(object): """ def __call__(self): settings = utils.get_settings('mysql') - # Translate for python3 - return {'mysql_settings': - [(k, v) for k, v in six.iteritems(settings['security'])]} + return { + 'mysql_settings': [(k, v) for k, v in settings['security'].items()] + } diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py index 5b6765f7..4dee5465 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import six from charmhelpers.core.hookenv import ( log, @@ -27,10 +26,7 @@ from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_update apt_update(fatal=True) - if six.PY2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py index 56afa4b6..f93851a9 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py @@ -16,7 +16,6 @@ import grp import os import pwd -import six import yaml from charmhelpers.core.hookenv import ( @@ -91,7 +90,7 @@ def _apply_overrides(settings, overrides, schema): :returns: dictionary of modules config with user overrides applied. """ if overrides: - for k, v in six.iteritems(overrides): + for k, v in overrides.items(): if k in schema: if schema[k] is None: settings[k] = v diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py index b356d64c..de56584d 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py @@ -15,7 +15,6 @@ import glob import re import subprocess -import six import socket from functools import partial @@ -39,20 +38,14 @@ import netifaces except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-netifaces', fatal=True) - else: - apt_install('python3-netifaces', fatal=True) + apt_install('python3-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-netaddr', fatal=True) - else: - apt_install('python3-netaddr', fatal=True) + apt_install('python3-netaddr', fatal=True) import netaddr @@ -462,15 +455,12 @@ def ns_query(address): try: import dns.resolver except ImportError: - if six.PY2: - apt_install('python-dnspython', fatal=True) - else: - apt_install('python3-dnspython', fatal=True) + apt_install('python3-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): rtype = 'PTR' - elif isinstance(address, six.string_types): + elif isinstance(address, str): rtype = 'A' else: return None @@ -513,10 +503,7 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - if six.PY2: - apt_install("python-dnspython", fatal=True) - else: - apt_install("python3-dnspython", fatal=True) + apt_install("python3-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 54081f0c..8522641b 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -30,8 +30,6 @@ check_output, CalledProcessError) -import six - import charmhelpers.contrib.storage.linux.ceph as ch_ceph from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( @@ -130,10 +128,7 @@ try: import psutil except ImportError: - if six.PY2: - apt_install('python-psutil', fatal=True) - else: - apt_install('python3-psutil', fatal=True) + apt_install('python3-psutil', fatal=True) import psutil CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -150,10 +145,7 @@ def ensure_packages(packages): def context_complete(ctxt): - _missing = [] - for k, v in six.iteritems(ctxt): - if v is None or v == '': - _missing.append(k) + _missing = [k for k, v in ctxt.items() if v is None or v == ''] if _missing: log('Missing required data: %s' % ' '.join(_missing), level=INFO) @@ -180,7 +172,7 @@ def context_complete(self, ctxt): # Fresh start self.complete = False self.missing_data = [] - for k, v in six.iteritems(ctxt): + for k, v in ctxt.items(): if v is None or v == '': if k not in self.missing_data: self.missing_data.append(k) @@ -1111,10 +1103,14 @@ def get_network_addresses(self): endpoint = resolve_address(net_type) addresses.append((addr, endpoint)) - return sorted(set(addresses)) + # Log the set of addresses to have a trail log and capture if tuples + # change over time in the same unit (LP: #1952414). + sorted_addresses = sorted(set(addresses)) + log('get_network_addresses: {}'.format(sorted_addresses)) + return sorted_addresses def __call__(self): - if isinstance(self.external_ports, six.string_types): + if isinstance(self.external_ports, str): self.external_ports = [self.external_ports] if not self.external_ports or not https(): @@ -1531,9 +1527,9 @@ def __call__(self): continue sub_config = sub_config[self.config_file] - for k, v in six.iteritems(sub_config): + for k, v in sub_config.items(): if k == 'sections': - for section, config_list in six.iteritems(v): + for section, config_list in v.items(): log("adding section '%s'" % (section), level=DEBUG) if ctxt[k].get(section): @@ -1887,8 +1883,11 @@ def __call__(self): normalized.update({port: port for port in resolved if port in ports}) if resolved: - return {normalized[port]: bridge for port, bridge in - six.iteritems(portmap) if port in normalized.keys()} + return { + normalized[port]: bridge + for port, bridge in portmap.items() + if port in normalized.keys() + } return None @@ -2291,15 +2290,10 @@ def _get_canonical_name(self, name=None): name = name or socket.gethostname() fqdn = '' - if six.PY2: - exc = socket.error - else: - exc = OSError - try: addrs = socket.getaddrinfo( name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) - except exc: + except OSError: pass else: for addr in addrs: @@ -2416,12 +2410,12 @@ def get_existing_ovs_use_veth(): existing_ovs_use_veth = None # If there is a dhcp_agent.ini file read the current setting if os.path.isfile(DHCP_AGENT_INI): - # config_ini does the right thing and returns None if the setting is - # commented. + # config_ini does the right thing and returns None if the setting + # is commented. existing_ovs_use_veth = ( config_ini(DHCP_AGENT_INI)["DEFAULT"].get("ovs_use_veth")) # Convert to Bool if necessary - if isinstance(existing_ovs_use_veth, six.string_types): + if isinstance(existing_ovs_use_veth, str): return bool_from_string(existing_ovs_use_veth) return existing_ovs_use_veth diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py index d7e02ccd..5775aa44 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py @@ -1,4 +1,3 @@ -#!/usr/bin/python # # Copyright 2017 Canonical Ltd # @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six from charmhelpers.fetch import apt_install from charmhelpers.contrib.openstack.context import IdentityServiceContext from charmhelpers.core.hookenv import ( @@ -117,10 +115,7 @@ def __init__(self, endpoint, **kwargs): from keystoneclient.auth.identity import v2 from keystoneclient import session except ImportError: - if six.PY2: - apt_install(["python-keystoneclient"], fatal=True) - else: - apt_install(["python3-keystoneclient"], fatal=True) + apt_install(["python3-keystoneclient"], fatal=True) from keystoneclient.v2_0 import client from keystoneclient.auth.identity import v2 @@ -151,10 +146,7 @@ def __init__(self, endpoint, **kwargs): from keystoneclient import session from keystoneclient.auth.identity import v3 except ImportError: - if six.PY2: - apt_install(["python-keystoneclient"], fatal=True) - else: - apt_install(["python3-keystoneclient"], fatal=True) + apt_install(["python3-keystoneclient"], fatal=True) from keystoneclient.v3 import client from keystoneclient.auth import token_endpoint diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py index b41314cb..47772467 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py @@ -14,7 +14,6 @@ # Various utilities for dealing with Neutron and the renaming from Quantum. -import six from subprocess import check_output from charmhelpers.core.hookenv import ( @@ -349,11 +348,4 @@ def parse_vlan_range_mappings(mappings): Returns dict of the form {provider: (start, end)}. """ _mappings = parse_mappings(mappings) - if not _mappings: - return {} - - mappings = {} - for p, r in six.iteritems(_mappings): - mappings[p] = tuple(r.split(':')) - - return mappings + return {p: tuple(r.split(':')) for p, r in _mappings.items()} diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py index 6fa06f26..767943c2 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py @@ -15,7 +15,6 @@ import collections import contextlib import os -import six import shutil import yaml import zipfile @@ -204,12 +203,6 @@ def __str__(self): return self.log_message -if six.PY2: - BadZipFile = zipfile.BadZipfile -else: - BadZipFile = zipfile.BadZipFile - - def is_policyd_override_valid_on_this_release(openstack_release): """Check that the charm is running on at least Ubuntu Xenial, and at least the queens release. @@ -487,10 +480,10 @@ def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): if blacklisted_keys_present: raise BadPolicyYamlFile("blacklisted keys {} present." .format(", ".join(blacklisted_keys_present))) - if not all(isinstance(k, six.string_types) for k in keys): + if not all(isinstance(k, str) for k in keys): raise BadPolicyYamlFile("keys in yaml aren't all strings?") # check that the dictionary looks like a mapping of str to str - if not all(isinstance(v, six.string_types) for v in doc.values()): + if not all(isinstance(v, str) for v in doc.values()): raise BadPolicyYamlFile("values in yaml aren't all strings?") return doc @@ -530,8 +523,7 @@ def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) - _scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir - for direntry in _scanner(path): + for direntry in os.scandir(path): # see if the path should be kept. if direntry.path in keep_paths: continue @@ -558,36 +550,6 @@ def maybe_create_directory_for(path, user, group): ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) -@contextlib.contextmanager -def _fallback_scandir(path): - """Fallback os.scandir implementation. - - provide a fallback implementation of os.scandir if this module ever gets - used in a py2 or py34 charm. Uses os.listdir() to get the names in the path, - and then mocks the is_dir() function using os.path.isdir() to check for - directory. - - :param path: the path to list the directories for - :type path: str - :returns: Generator that provides _FBDirectory objects - :rtype: ContextManager[_FBDirectory] - """ - for f in os.listdir(path): - yield _FBDirectory(f) - - -class _FBDirectory(object): - """Mock a scandir Directory object with enough to use in - clean_policyd_dir_for - """ - - def __init__(self, path): - self.path = path - - def is_dir(self): - return os.path.isdir(self.path) - - def path_for_policy_file(service, name): """Return the full path for a policy.d file that will be written to the service's policy.d directory. @@ -768,7 +730,7 @@ def process_policy_resource_file(resource_file, _group) # Every thing worked, so we mark up a success. completed = True - except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: + except (zipfile.BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), level=POLICYD_LOG_LEVEL_DEFAULT) except IOError as e: diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py index 050f8af5..3b7c6a9f 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py @@ -14,8 +14,6 @@ import os -import six - from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, @@ -29,10 +27,7 @@ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions @@ -62,7 +57,7 @@ def get_loader(templates_dir, os_release): order by OpenStack release. """ tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) - for rel in six.itervalues(OPENSTACK_CODENAMES)] + for rel in OPENSTACK_CODENAMES.values()] if not os.path.isdir(templates_dir): log('Templates directory not found @ %s.' % templates_dir, @@ -225,10 +220,7 @@ def __init__(self, templates_dir, openstack_release): # if this code is running, the object is created pre-install hook. # jinja2 shouldn't get touched until the module is reloaded on next # hook execution, with proper jinja2 bits successfully imported. - if six.PY2: - apt_install('python-jinja2') - else: - apt_install('python3-jinja2') + apt_install('python3-jinja2') def register(self, config_file, contexts, config_template=None): """ @@ -318,9 +310,7 @@ def write(self, config_file): log('Config not registered: %s' % config_file, level=ERROR) raise OSConfigException - _out = self.render(config_file) - if six.PY3: - _out = _out.encode('UTF-8') + _out = self.render(config_file).encode('UTF-8') with open(config_file, 'wb') as out: out.write(_out) @@ -331,7 +321,8 @@ def write_all(self): """ Write out all registered config files. """ - [self.write(k) for k in six.iterkeys(self.templates)] + for k in self.templates.keys(): + self.write(k) def set_release(self, openstack_release): """ @@ -347,8 +338,8 @@ def complete_contexts(self): Returns a list of context interfaces that yield a complete context. ''' interfaces = [] - [interfaces.extend(i.complete_contexts()) - for i in six.itervalues(self.templates)] + for i in self.templates.values(): + interfaces.extend(i.complete_contexts()) return interfaces def get_incomplete_context_data(self, interfaces): @@ -360,7 +351,7 @@ def get_incomplete_context_data(self, interfaces): ''' incomplete_context_data = {} - for i in six.itervalues(self.templates): + for i in self.templates.values(): for context in i.contexts: for interface in interfaces: related = False diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py index 9cc96d60..c8747c16 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py @@ -25,7 +25,6 @@ import itertools import functools -import six import traceback import uuid import yaml @@ -362,6 +361,8 @@ def get_os_codename_install_source(src): rel = '' if src is None: return rel + if src in OPENSTACK_RELEASES: + return src if src in ['distro', 'distro-proposed', 'proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] @@ -401,7 +402,7 @@ def get_os_codename_version(vers): def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): '''Determine OpenStack version number from codename.''' - for k, v in six.iteritems(version_map): + for k, v in version_map.items(): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -411,7 +412,8 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): def get_os_version_codename_swift(codename): '''Determine OpenStack version number of swift from codename.''' - for k, v in six.iteritems(SWIFT_CODENAMES): + # for k, v in six.iteritems(SWIFT_CODENAMES): + for k, v in SWIFT_CODENAMES.items(): if k == codename: return v[-1] e = 'Could not derive swift version for '\ @@ -421,17 +423,17 @@ def get_os_version_codename_swift(codename): def get_swift_codename(version): '''Determine OpenStack codename that corresponds to swift version.''' - codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] + codenames = [k for k, v in SWIFT_CODENAMES.items() if version in v] if len(codenames) > 1: # If more than one release codename contains this version we determine # the actual codename based on the highest available install source. for codename in reversed(codenames): releases = UBUNTU_OPENSTACK_RELEASE - release = [k for k, v in six.iteritems(releases) if codename in v] - ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) - if six.PY3: - ret = ret.decode('UTF-8') + release = [k for k, v in releases.items() if codename in v] + ret = (subprocess + .check_output(['apt-cache', 'policy', 'swift']) + .decode('UTF-8')) if codename in ret or release[0] in ret: return codename elif len(codenames) == 1: @@ -441,7 +443,7 @@ def get_swift_codename(version): match = re.match(r'^(\d+)\.(\d+)', version) if match: major_minor_version = match.group(0) - for codename, versions in six.iteritems(SWIFT_CODENAMES): + for codename, versions in SWIFT_CODENAMES.items(): for release_version in versions: if release_version.startswith(major_minor_version): return codename @@ -477,9 +479,7 @@ def get_os_codename_package(package, fatal=True): if snap_install_requested(): cmd = ['snap', 'list', package] try: - out = subprocess.check_output(cmd) - if six.PY3: - out = out.decode('UTF-8') + out = subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError: return None lines = out.split('\n') @@ -549,16 +549,14 @@ def get_os_version_package(pkg, fatal=True): if 'swift' in pkg: vers_map = SWIFT_CODENAMES - for cname, version in six.iteritems(vers_map): + for cname, version in vers_map.items(): if cname == codename: return version[-1] else: vers_map = OPENSTACK_CODENAMES - for version, cname in six.iteritems(vers_map): + for version, cname in vers_map.items(): if cname == codename: return version - # e = "Could not determine OpenStack version for package: %s" % pkg - # error_out(e) def get_installed_os_version(): @@ -821,10 +819,10 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): if not os.path.exists(os.path.dirname(juju_rc_path)): os.mkdir(os.path.dirname(juju_rc_path)) with open(juju_rc_path, 'wt') as rc_script: - rc_script.write( - "#!/bin/bash\n") - [rc_script.write('export %s=%s\n' % (u, p)) - for u, p in six.iteritems(env_vars) if u != "script_path"] + rc_script.write("#!/bin/bash\n") + for u, p in env_vars.items(): + if u != "script_path": + rc_script.write('export %s=%s\n' % (u, p)) def openstack_upgrade_available(package): @@ -1039,7 +1037,7 @@ def _determine_os_workload_status( state, message, lambda: charm_func(configs)) if state is None: - state, message = _ows_check_services_running(services, ports) + state, message = ows_check_services_running(services, ports) if state is None: state = 'active' @@ -1213,7 +1211,12 @@ def _ows_check_charm_func(state, message, charm_func_with_configs): return state, message +@deprecate("use ows_check_services_running() instead", "2022-05", log=juju_log) def _ows_check_services_running(services, ports): + return ows_check_services_running(services, ports) + + +def ows_check_services_running(services, ports): """Check that the services that should be running are actually running and that any ports specified are being listened to. @@ -1413,45 +1416,75 @@ def incomplete_relation_data(configs, required_interfaces): for i in incomplete_relations} -def do_action_openstack_upgrade(package, upgrade_callback, configs, - force_upgrade=False): +def do_action_openstack_upgrade(package, upgrade_callback, configs): """Perform action-managed OpenStack upgrade. Upgrades packages to the configured openstack-origin version and sets the corresponding action status as a result. - If the charm was installed from source we cannot upgrade it. For backwards compatibility a config flag (action-managed-upgrade) must be set for this code to run, otherwise a full service level upgrade will fire on config-changed. - @param package: package name for determining if upgrade available + @param package: package name for determining if openstack upgrade available @param upgrade_callback: function callback to charm's upgrade function @param configs: templating object derived from OSConfigRenderer class - @param force_upgrade: perform dist-upgrade regardless of new openstack @return: True if upgrade successful; False if upgrade failed or skipped """ ret = False - if openstack_upgrade_available(package) or force_upgrade: + if openstack_upgrade_available(package): if config('action-managed-upgrade'): juju_log('Upgrading OpenStack release') try: upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed.'}) + action_set({'outcome': 'success, upgrade completed'}) ret = True except Exception: - action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'outcome': 'upgrade failed, see traceback'}) action_set({'traceback': traceback.format_exc()}) - action_fail('do_openstack_upgrade resulted in an ' + action_fail('upgrade callback resulted in an ' 'unexpected error') else: action_set({'outcome': 'action-managed-upgrade config is ' - 'False, skipped upgrade.'}) + 'False, skipped upgrade'}) + else: + action_set({'outcome': 'no upgrade available'}) + + return ret + + +def do_action_package_upgrade(package, upgrade_callback, configs): + """Perform package upgrade within the current OpenStack release. + + Upgrades packages only if there is not an openstack upgrade available, + and sets the corresponding action status as a result. + + @param package: package name for determining if openstack upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if not openstack_upgrade_available(package): + juju_log('Upgrading packages') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed'}) + ret = True + except Exception: + action_set({'outcome': 'upgrade failed, see traceback'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('upgrade callback resulted in an ' + 'unexpected error') else: - action_set({'outcome': 'no upgrade available.'}) + action_set({'outcome': 'upgrade skipped because an openstack upgrade ' + 'is available'}) return ret @@ -1849,21 +1882,20 @@ def some_hook(...): """ def wrap(f): - # py27 compatible nonlocal variable. When py3 only, replace with - # nonlocal keyword - __restart_map_cache = {'cache': None} + __restart_map_cache = None @functools.wraps(f) def wrapped_f(*args, **kwargs): + nonlocal __restart_map_cache if is_unit_paused_set(): return f(*args, **kwargs) - if __restart_map_cache['cache'] is None: - __restart_map_cache['cache'] = restart_map() \ + if __restart_map_cache is None: + __restart_map_cache = restart_map() \ if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( (lambda: f(*args, **kwargs)), - __restart_map_cache['cache'], + __restart_map_cache, stopstart, restart_functions, can_restart_now_f, @@ -1888,7 +1920,7 @@ def ordered(orderme): raise ValueError('argument must be a dict type') result = OrderedDict() - for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): + for k, v in sorted(orderme.items(), key=lambda x: x[0]): if isinstance(v, dict): result[k] = ordered(v) else: diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 369699fd..2e70a351 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -23,7 +23,6 @@ import errno import hashlib import math -import six import os import shutil @@ -218,7 +217,7 @@ def validator(value, valid_type, valid_range=None): "was given {} of type {}" .format(valid_range, type(valid_range))) # If we're dealing with strings - if isinstance(value, six.string_types): + if isinstance(value, str): assert value in valid_range, ( "{} is not in the list {}".format(value, valid_range)) # Integer, float should have a min and max @@ -434,9 +433,9 @@ def add_cache_tier(self, cache_pool, mode): :type mode: str """ # Check the input types and values - validator(value=cache_pool, valid_type=six.string_types) + validator(value=cache_pool, valid_type=str) validator( - value=mode, valid_type=six.string_types, + value=mode, valid_type=str, valid_range=["readonly", "writeback"]) check_call([ @@ -649,7 +648,7 @@ def __init__(self, service, name=None, pg_num=None, replicas=None, # we will fail with KeyError if it is not provided. self.replicas = op['replicas'] self.pg_num = op.get('pg_num') - self.profile_name = op.get('crush-profile', profile_name) + self.profile_name = op.get('crush-profile') or profile_name else: self.replicas = replicas or 2 self.pg_num = pg_num @@ -791,9 +790,7 @@ def enabled_manager_modules(): """ cmd = ['ceph', 'mgr', 'module', 'ls'] try: - modules = check_output(cmd) - if six.PY3: - modules = modules.decode('UTF-8') + modules = check_output(cmd).decode('utf-8') except CalledProcessError as e: log("Failed to list ceph modules: {}".format(e), WARNING) return [] @@ -828,10 +825,8 @@ def get_mon_map(service): try: octopus_or_later = cmp_pkgrevno('ceph-common', '15.0.0') >= 0 mon_status_cmd = 'quorum_status' if octopus_or_later else 'mon_status' - mon_status = check_output(['ceph', '--id', service, - mon_status_cmd, '--format=json']) - if six.PY3: - mon_status = mon_status.decode('UTF-8') + mon_status = (check_output(['ceph', '--id', service, mon_status_cmd, + '--format=json'])).decode('utf-8') try: return json.loads(mon_status) except ValueError as v: @@ -973,9 +968,7 @@ def get_erasure_profile(service, name): try: out = check_output(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', - name, '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + name, '--format=json']).decode('utf-8') return json.loads(out) except (CalledProcessError, OSError, ValueError): return None @@ -1178,8 +1171,7 @@ def create_erasure_profile(service, profile_name, 'nvme' ] - validator(erasure_plugin_name, six.string_types, - list(plugin_techniques.keys())) + validator(erasure_plugin_name, str, list(plugin_techniques.keys())) cmd = [ 'ceph', '--id', service, @@ -1190,7 +1182,7 @@ def create_erasure_profile(service, profile_name, ] if erasure_plugin_technique: - validator(erasure_plugin_technique, six.string_types, + validator(erasure_plugin_technique, str, plugin_techniques[erasure_plugin_name]) cmd.append('technique={}'.format(erasure_plugin_technique)) @@ -1203,7 +1195,7 @@ def create_erasure_profile(service, profile_name, failure_domain = 'rack' if failure_domain: - validator(failure_domain, six.string_types, failure_domains) + validator(failure_domain, str, failure_domains) # failure_domain changed in luminous if luminous_or_later: cmd.append('crush-failure-domain={}'.format(failure_domain)) @@ -1212,7 +1204,7 @@ def create_erasure_profile(service, profile_name, # device class new in luminous if luminous_or_later and device_class: - validator(device_class, six.string_types, device_classes) + validator(device_class, str, device_classes) cmd.append('crush-device-class={}'.format(device_class)) else: log('Skipping device class configuration (ceph < 12.0.0)', @@ -1227,7 +1219,7 @@ def create_erasure_profile(service, profile_name, raise ValueError("locality must be provided for lrc plugin") # LRC optional configuration if crush_locality: - validator(crush_locality, six.string_types, failure_domains) + validator(crush_locality, str, failure_domains) cmd.append('crush-locality={}'.format(crush_locality)) if erasure_plugin_name == 'shec': @@ -1255,8 +1247,8 @@ def rename_pool(service, old_name, new_name): :param new_name: Name to rename pool to. :type new_name: str """ - validator(value=old_name, valid_type=six.string_types) - validator(value=new_name, valid_type=six.string_types) + validator(value=old_name, valid_type=str) + validator(value=new_name, valid_type=str) cmd = [ 'ceph', '--id', service, @@ -1274,7 +1266,7 @@ def erasure_profile_exists(service, name): :returns: True if it exists, False otherwise. :rtype: bool """ - validator(value=name, valid_type=six.string_types) + validator(value=name, valid_type=str) try: check_call(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', @@ -1294,12 +1286,10 @@ def get_cache_mode(service, pool_name): :returns: Current cache mode. :rtype: Optional[int] """ - validator(value=service, valid_type=six.string_types) - validator(value=pool_name, valid_type=six.string_types) + validator(value=service, valid_type=str) + validator(value=pool_name, valid_type=str) out = check_output(['ceph', '--id', service, - 'osd', 'dump', '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + 'osd', 'dump', '--format=json']).decode('utf-8') try: osd_json = json.loads(out) for pool in osd_json['pools']: @@ -1313,9 +1303,8 @@ def get_cache_mode(service, pool_name): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, 'lspools']) - if six.PY3: - out = out.decode('UTF-8') + out = check_output( + ['rados', '--id', service, 'lspools']).decode('utf-8') except CalledProcessError: return False @@ -1334,13 +1323,11 @@ def get_osds(service, device_class=None): out = check_output(['ceph', '--id', service, 'osd', 'crush', 'class', 'ls-osd', device_class, - '--format=json']) + '--format=json']).decode('utf-8') else: out = check_output(['ceph', '--id', service, 'osd', 'ls', - '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + '--format=json']).decode('utf-8') return json.loads(out) @@ -1357,9 +1344,7 @@ def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]) - if six.PY3: - out = out.decode('UTF-8') + service, '--pool', pool]).decode('utf-8') except CalledProcessError: return False @@ -1385,7 +1370,7 @@ def update_pool(client, pool, settings): :raises: CalledProcessError """ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] - for k, v in six.iteritems(settings): + for k, v in settings.items(): check_call(cmd + [k, v]) @@ -1523,9 +1508,7 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']) - if six.PY3: - out = out.decode('UTF-8') + out = check_output(['rbd', 'showmapped']).decode('utf-8') except CalledProcessError: return False diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py index 74bab40e..04daea29 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -19,8 +19,6 @@ check_output, ) -import six - ################################################## # loopback device helpers. @@ -40,9 +38,7 @@ def loopback_devices(): ''' loopbacks = {} cmd = ['losetup', '-a'] - output = check_output(cmd) - if six.PY3: - output = output.decode('utf-8') + output = check_output(cmd).decode('utf-8') devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] for dev, _, f in devs: loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] @@ -57,7 +53,7 @@ def create_loopback(file_path): ''' file_path = os.path.abspath(file_path) check_call(['losetup', '--find', file_path]) - for d, f in six.iteritems(loopback_devices()): + for d, f in loopback_devices().items(): if f == file_path: return d @@ -71,7 +67,7 @@ def ensure_loopback_device(path, size): :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) ''' - for d, f in six.iteritems(loopback_devices()): + for d, f in loopback_devices().items(): if f == path: return d diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py index e94247a2..370c3e8f 100644 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ b/ceph-mon/hooks/charmhelpers/core/hookenv.py @@ -17,12 +17,11 @@ # Authors: # Charm Helpers Developers -from __future__ import print_function import copy from distutils.version import LooseVersion from enum import Enum from functools import wraps -from collections import namedtuple +from collections import namedtuple, UserDict import glob import os import json @@ -36,12 +35,6 @@ from charmhelpers import deprecate -import six -if not six.PY3: - from UserDict import UserDict -else: - from collections import UserDict - CRITICAL = "CRITICAL" ERROR = "ERROR" @@ -112,7 +105,7 @@ def log(message, level=None): command = ['juju-log'] if level: command += ['-l', level] - if not isinstance(message, six.string_types): + if not isinstance(message, str): message = repr(message) command += [message[:SH_MAX_ARG]] # Missing juju-log should not cause failures in unit tests @@ -132,7 +125,7 @@ def log(message, level=None): def function_log(message): """Write a function progress message""" command = ['function-log'] - if not isinstance(message, six.string_types): + if not isinstance(message, str): message = repr(message) command += [message[:SH_MAX_ARG]] # Missing function-log should not cause failures in unit tests @@ -445,12 +438,6 @@ def config(scope=None): """ global _cache_config config_cmd_line = ['config-get', '--all', '--format=json'] - try: - # JSON Decode Exception for Python3.5+ - exc_json = json.decoder.JSONDecodeError - except AttributeError: - # JSON Decode Exception for Python2.7 through Python3.4 - exc_json = ValueError try: if _cache_config is None: config_data = json.loads( @@ -459,7 +446,7 @@ def config(scope=None): if scope is not None: return _cache_config.get(scope) return _cache_config - except (exc_json, UnicodeDecodeError) as e: + except (json.decoder.JSONDecodeError, UnicodeDecodeError) as e: log('Unable to parse output from config-get: config_cmd_line="{}" ' 'message="{}"' .format(config_cmd_line, str(e)), level=ERROR) @@ -491,12 +478,26 @@ def relation_get(attribute=None, unit=None, rid=None, app=None): raise +@cached +def _relation_set_accepts_file(): + """Return True if the juju relation-set command accepts a file. + + Cache the result as it won't change during the execution of a hook, and + thus we can make relation_set() more efficient by only checking for the + first relation_set() call. + + :returns: True if relation_set accepts a file. + :rtype: bool + :raises: subprocess.CalledProcessError if the check fails. + """ + return "--file" in subprocess.check_output( + ["relation-set", "--help"], universal_newlines=True) + + def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] - accepts_file = "--file" in subprocess.check_output( - relation_cmd_line + ["--help"], universal_newlines=True) if app: relation_cmd_line.append('--app') if relation_id is not None: @@ -508,7 +509,7 @@ def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): # sites pass in things like dicts or numbers. if value is not None: settings[key] = "{}".format(value) - if accepts_file: + if _relation_set_accepts_file(): # --file was introduced in Juju 1.23.2. Use it by default if # available, since otherwise we'll break if the relation data is # too big. Ideally we should tell relation-set to read the data from @@ -1003,14 +1004,8 @@ def cmd_exists(cmd): @cached -@deprecate("moved to function_get()", log=log) def action_get(key=None): - """ - .. deprecated:: 0.20.7 - Alias for :func:`function_get`. - - Gets the value of an action parameter, or all key/value param pairs. - """ + """Gets the value of an action parameter, or all key/value param pairs.""" cmd = ['action-get'] if key is not None: cmd.append(key) @@ -1020,8 +1015,12 @@ def action_get(key=None): @cached +@deprecate("moved to action_get()", log=log) def function_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" + """ + .. deprecated:: + Gets the value of an action parameter, or all key/value param pairs. + """ cmd = ['function-get'] # Fallback for older charms. if not cmd_exists('function-get'): @@ -1034,22 +1033,20 @@ def function_get(key=None): return function_data -@deprecate("moved to function_set()", log=log) def action_set(values): - """ - .. deprecated:: 0.20.7 - Alias for :func:`function_set`. - - Sets the values to be returned after the action finishes. - """ + """Sets the values to be returned after the action finishes.""" cmd = ['action-set'] for k, v in list(values.items()): cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) +@deprecate("moved to action_set()", log=log) def function_set(values): - """Sets the values to be returned after the function finishes""" + """ + .. deprecated:: + Sets the values to be returned after the function finishes. + """ cmd = ['function-set'] # Fallback for older charms. if not cmd_exists('function-get'): @@ -1060,12 +1057,8 @@ def function_set(values): subprocess.check_call(cmd) -@deprecate("moved to function_fail()", log=log) def action_fail(message): """ - .. deprecated:: 0.20.7 - Alias for :func:`function_fail`. - Sets the action status to failed and sets the error message. The results set by action_set are preserved. @@ -1073,10 +1066,14 @@ def action_fail(message): subprocess.check_call(['action-fail', message]) +@deprecate("moved to action_fail()", log=log) def function_fail(message): - """Sets the function status to failed and sets the error message. + """ + .. deprecated:: + Sets the function status to failed and sets the error message. - The results set by function_set are preserved.""" + The results set by function_set are preserved. + """ cmd = ['function-fail'] # Fallback for older charms. if not cmd_exists('function-fail'): diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 994ec8a0..2b0a36fb 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -31,7 +31,6 @@ import hashlib import functools import itertools -import six from contextlib import contextmanager from collections import OrderedDict, defaultdict @@ -263,7 +262,7 @@ def service(action, service_name, **kwargs): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] - for key, value in six.iteritems(kwargs): + for key, value in kwargs.items(): parameter = '%s=%s' % (key, value) cmd.append(parameter) return subprocess.call(cmd) == 0 @@ -289,7 +288,7 @@ def service_running(service_name, **kwargs): if os.path.exists(_UPSTART_CONF.format(service_name)): try: cmd = ['status', service_name] - for key, value in six.iteritems(kwargs): + for key, value in kwargs.items(): parameter = '%s=%s' % (key, value) cmd.append(parameter) output = subprocess.check_output( @@ -564,7 +563,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) - if six.PY3 and isinstance(content, six.string_types): + if isinstance(content, str): content = content.encode('UTF-8') target.write(content) return @@ -967,7 +966,7 @@ def get_bond_master(interface): def list_nics(nic_type=None): """Return a list of nics of given type(s)""" - if isinstance(nic_type, six.string_types): + if isinstance(nic_type, str): int_types = [nic_type] else: int_types = nic_type @@ -1081,8 +1080,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): try: chown(full, uid, gid) except (IOError, OSError) as e: - # Intended to ignore "file not found". Catching both to be - # compatible with both Python 2.7 and 3.x. + # Intended to ignore "file not found". if e.errno == errno.ENOENT: pass diff --git a/ceph-mon/hooks/charmhelpers/core/services/base.py b/ceph-mon/hooks/charmhelpers/core/services/base.py index 9f880290..7c37c65c 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/base.py +++ b/ceph-mon/hooks/charmhelpers/core/services/base.py @@ -17,8 +17,6 @@ import inspect from collections import Iterable, OrderedDict -import six - from charmhelpers.core import host from charmhelpers.core import hookenv @@ -171,10 +169,7 @@ def provide_data(self): if not units: continue remote_service = units[0].split('/')[0] - if six.PY2: - argspec = inspect.getargspec(provider.provide_data) - else: - argspec = inspect.getfullargspec(provider.provide_data) + argspec = inspect.getfullargspec(provider.provide_data) if len(argspec.args) > 1: data = provider.provide_data(remote_service, service_ready) else: diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py index 3e6e30d2..5bf62dd5 100644 --- a/ceph-mon/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-mon/hooks/charmhelpers/core/services/helpers.py @@ -179,7 +179,7 @@ def __init__(self, *args): self.required_options = args self['config'] = hookenv.config() with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.load(fp).get('options', {}) + self.config = yaml.safe_load(fp).get('options', {}) def __bool__(self): for option in self.required_options: @@ -227,7 +227,7 @@ def read_context(self, file_name): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'r') as file_stream: - data = yaml.load(file_stream) + data = yaml.safe_load(file_stream) if not data: raise OSError("%s is empty" % file_name) return data diff --git a/ceph-mon/hooks/charmhelpers/core/strutils.py b/ceph-mon/hooks/charmhelpers/core/strutils.py index 28c6b3f5..31366871 100644 --- a/ceph-mon/hooks/charmhelpers/core/strutils.py +++ b/ceph-mon/hooks/charmhelpers/core/strutils.py @@ -15,7 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import re TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'} @@ -27,8 +26,8 @@ def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY Returns True if value translates to True otherwise False. """ - if isinstance(value, six.string_types): - value = six.text_type(value) + if isinstance(value, str): + value = str(value) else: msg = "Unable to interpret non-string value '%s' as boolean" % (value) raise ValueError(msg) @@ -61,8 +60,8 @@ def bytes_from_string(value): 'P': 5, 'PB': 5, } - if isinstance(value, six.string_types): - value = six.text_type(value) + if isinstance(value, str): + value = str(value) else: msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) diff --git a/ceph-mon/hooks/charmhelpers/core/templating.py b/ceph-mon/hooks/charmhelpers/core/templating.py index 9014015c..cb0213dc 100644 --- a/ceph-mon/hooks/charmhelpers/core/templating.py +++ b/ceph-mon/hooks/charmhelpers/core/templating.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import sys from charmhelpers.core import host from charmhelpers.core import hookenv @@ -43,9 +42,8 @@ def render(source, target, context, owner='root', group='root', The rendered template will be written to the file as well as being returned as a string. - Note: Using this requires python-jinja2 or python3-jinja2; if it is not - installed, calling this will attempt to use charmhelpers.fetch.apt_install - to install it. + Note: Using this requires python3-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. """ try: from jinja2 import FileSystemLoader, Environment, exceptions @@ -57,10 +55,7 @@ def render(source, target, context, owner='root', group='root', 'charmhelpers.fetch to install it', level=hookenv.ERROR) raise - if sys.version_info.major == 2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions if template_loader: diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py index 9497ee05..1283f25b 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-mon/hooks/charmhelpers/fetch/__init__.py @@ -20,11 +20,7 @@ log, ) -import six -if six.PY3: - from urllib.parse import urlparse, urlunparse -else: - from urlparse import urlparse, urlunparse +from urllib.parse import urlparse, urlunparse # The order of this list is very important. Handlers should be listed in from @@ -134,14 +130,14 @@ def configure_sources(update=False, sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None - if isinstance(sources, six.string_types): + if isinstance(sources, str): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: - if isinstance(keys, six.string_types): + if isinstance(keys, str): keys = [keys] if len(sources) != len(keys): diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py index d25587ad..2cb2e88b 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py @@ -26,26 +26,15 @@ ) from charmhelpers.core.host import mkdir, check_hash -import six -if six.PY3: - from urllib.request import ( - build_opener, install_opener, urlopen, urlretrieve, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - ) - from urllib.parse import urlparse, urlunparse, parse_qs - from urllib.error import URLError -else: - from urllib import urlretrieve - from urllib2 import ( - build_opener, install_opener, urlopen, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - URLError - ) - from urlparse import urlparse, urlunparse, parse_qs +from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, +) +from urllib.parse import urlparse, urlunparse, parse_qs +from urllib.error import URLError def splituser(host): - '''urllib.splituser(), but six's support of this seems broken''' _userprog = re.compile('^(.*)@(.*)$') match = _userprog.match(host) if match: @@ -54,7 +43,6 @@ def splituser(host): def splitpasswd(user): - '''urllib.splitpasswd(), but six's support of this is missing''' _passwdprog = re.compile('^([^:]*):(.*)$', re.S) match = _passwdprog.match(user) if match: @@ -150,10 +138,7 @@ def install(self, source, dest=None, checksum=None, hash_type='sha1'): raise UnhandledSource(e.strerror) options = parse_qs(url_parts.fragment) for key, value in options.items(): - if not six.PY3: - algorithms = hashlib.algorithms - else: - algorithms = hashlib.algorithms_available + algorithms = hashlib.algorithms_available if key in algorithms: if len(value) != 1: raise TypeError( diff --git a/ceph-mon/hooks/charmhelpers/fetch/centos.py b/ceph-mon/hooks/charmhelpers/fetch/centos.py index a91dcff0..f8492018 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/centos.py +++ b/ceph-mon/hooks/charmhelpers/fetch/centos.py @@ -15,7 +15,6 @@ import subprocess import os import time -import six import yum from tempfile import NamedTemporaryFile @@ -42,7 +41,7 @@ def install(packages, options=None, fatal=False): if options is not None: cmd.extend(options) cmd.append('install') - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -71,7 +70,7 @@ def update(fatal=False): def purge(packages, fatal=False): """Purge one or more packages.""" cmd = ['yum', '--assumeyes', 'remove'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -83,7 +82,7 @@ def yum_search(packages): """Search for a package.""" output = {} cmd = ['yum', 'search'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) diff --git a/ceph-mon/hooks/charmhelpers/fetch/python/debug.py b/ceph-mon/hooks/charmhelpers/fetch/python/debug.py index 757135ee..dd5cca80 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/python/debug.py +++ b/ceph-mon/hooks/charmhelpers/fetch/python/debug.py @@ -15,8 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import atexit import sys diff --git a/ceph-mon/hooks/charmhelpers/fetch/python/packages.py b/ceph-mon/hooks/charmhelpers/fetch/python/packages.py index 60048354..93f1fa3f 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/python/packages.py +++ b/ceph-mon/hooks/charmhelpers/fetch/python/packages.py @@ -16,7 +16,6 @@ # limitations under the License. import os -import six import subprocess import sys @@ -40,10 +39,7 @@ def pip_execute(*args, **kwargs): from pip import main as _pip_execute except ImportError: apt_update() - if six.PY2: - apt_install('python-pip') - else: - apt_install('python3-pip') + apt_install('python3-pip') from pip import main as _pip_execute _pip_execute(*args, **kwargs) finally: @@ -140,12 +136,8 @@ def pip_list(): def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" - if six.PY2: - apt_install('python-virtualenv') - extra_flags = [] - else: - apt_install(['python3-virtualenv', 'virtualenv']) - extra_flags = ['--python=python3'] + apt_install(['python3-virtualenv', 'virtualenv']) + extra_flags = ['--python=python3'] if path: venv_path = path diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py index cf8328f0..e6f8a0ad 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py @@ -13,10 +13,8 @@ # limitations under the License. from collections import OrderedDict -import os import platform import re -import six import subprocess import sys import time @@ -361,7 +359,7 @@ def apt_install(packages, options=None, fatal=False, quiet=False): cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -413,7 +411,7 @@ def apt_purge(packages, fatal=False): :raises: subprocess.CalledProcessError """ cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -440,7 +438,7 @@ def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark.""" log("Marking {} as {}".format(packages, mark)) cmd = ['apt-mark', mark] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -485,10 +483,7 @@ def import_key(key): if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and '-----END PGP PUBLIC KEY BLOCK-----' in key): log("Writing provided PGP key in the binary format", level=DEBUG) - if six.PY3: - key_bytes = key.encode('utf-8') - else: - key_bytes = key + key_bytes = key.encode('utf-8') key_name = _get_keyid_by_gpg_key(key_bytes) key_gpg = _dearmor_gpg_key(key_bytes) _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) @@ -528,9 +523,8 @@ def _get_keyid_by_gpg_key(key_material): stderr=subprocess.PIPE, stdin=subprocess.PIPE) out, err = ps.communicate(input=key_material) - if six.PY3: - out = out.decode('utf-8') - err = err.decode('utf-8') + out = out.decode('utf-8') + err = err.decode('utf-8') if 'gpg: no valid OpenPGP data found.' in err: raise GPGKeyError('Invalid GPG key material provided') # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) @@ -588,8 +582,7 @@ def _dearmor_gpg_key(key_asc): stdin=subprocess.PIPE) out, err = ps.communicate(input=key_asc) # no need to decode output as it is binary (invalid utf-8), only error - if six.PY3: - err = err.decode('utf-8') + err = err.decode('utf-8') if 'gpg: no valid OpenPGP data found.' in err: raise GPGKeyError('Invalid GPG key material. Check your network setup' ' (MTU, routing, DNS) and/or proxy server settings' @@ -693,7 +686,7 @@ def add_source(source, key=None, fail_invalid=False): ]) if source is None: source = '' - for r, fn in six.iteritems(_mapping): + for r, fn in _mapping.items(): m = re.match(r, source) if m: if key: @@ -726,7 +719,7 @@ def _add_proposed(): """ release = get_distrib_codename() arch = platform.machine() - if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): + if arch not in ARCH_TO_PROPOSED_POCKET.keys(): raise SourceConfigError("Arch {} not supported for (distro-)proposed" .format(arch)) with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: @@ -913,9 +906,8 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), kwargs = {} if quiet: - devnull = os.devnull if six.PY2 else subprocess.DEVNULL - kwargs['stdout'] = devnull - kwargs['stderr'] = devnull + kwargs['stdout'] = subprocess.DEVNULL + kwargs['stderr'] = subprocess.DEVNULL if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -957,9 +949,8 @@ def _run_apt_command(cmd, fatal=False, quiet=False): else: kwargs = {} if quiet: - devnull = os.devnull if six.PY2 else subprocess.DEVNULL - kwargs['stdout'] = devnull - kwargs['stderr'] = devnull + kwargs['stdout'] = subprocess.DEVNULL + kwargs['stderr'] = subprocess.DEVNULL subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs) @@ -989,7 +980,7 @@ def get_installed_version(package): Version object """ cache = apt_cache() - dpkg_result = cache._dpkg_list([package]).get(package, {}) + dpkg_result = cache.dpkg_list([package]).get(package, {}) current_ver = None installed_version = dpkg_result.get('version') diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index 436e1776..6da355fd 100644 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -40,6 +40,9 @@ import subprocess import sys +from charmhelpers import deprecate +from charmhelpers.core.hookenv import log + class _container(dict): """Simple container for attributes.""" @@ -79,7 +82,7 @@ def __getitem__(self, package): apt_result = self._apt_cache_show([package])[package] apt_result['name'] = apt_result.pop('package') pkg = Package(apt_result) - dpkg_result = self._dpkg_list([package]).get(package, {}) + dpkg_result = self.dpkg_list([package]).get(package, {}) current_ver = None installed_version = dpkg_result.get('version') if installed_version: @@ -88,9 +91,29 @@ def __getitem__(self, package): pkg.architecture = dpkg_result.get('architecture') return pkg + @deprecate("use dpkg_list() instead.", "2022-05", log=log) def _dpkg_list(self, packages): + return self.dpkg_list(packages) + + def dpkg_list(self, packages): """Get data from system dpkg database for package. + Note that this method is also useful for querying package names + containing wildcards, for example + + apt_cache().dpkg_list(['nvidia-vgpu-ubuntu-*']) + + may return + + { + 'nvidia-vgpu-ubuntu-470': { + 'name': 'nvidia-vgpu-ubuntu-470', + 'version': '470.68', + 'architecture': 'amd64', + 'description': 'NVIDIA vGPU driver - version 470.68' + } + } + :param packages: Packages to get data from :type packages: List[str] :returns: Structured data about installed packages, keys like From b874a1adf28894c18bf93fea58a8d86f5ba5987c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 24 Feb 2022 11:41:11 +0100 Subject: [PATCH 2345/2699] Ceph Quincy dropped support for the Civetweb http frontend. Change-Id: I2428cd34110fbc8f7775eb79fe70c34a4eafe3eb --- ceph-radosgw/config.yaml | 3 ++- ceph-radosgw/hooks/ceph_radosgw_context.py | 5 +++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 422cc903..a7beb36c 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -515,4 +515,5 @@ options: description: | Frontend HTTP engine to use for the Ceph RADOS Gateway; For Octopus and later this defaults to 'beast' and for older releases (and on architectures - where beast is not supported) 'civetweb'. + where beast is not supported) 'civetweb'. Civetweb support is removed at + Ceph Quincy. diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index 38975a48..1f1fdb55 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -183,6 +183,7 @@ def validate_http_frontend(frontend_config): """ mimic_or_later = cmp_pkgrevno('radosgw', '13.2.0') >= 0 pacific_or_later = cmp_pkgrevno('radosgw', '16.2.0') >= 0 + quincy_or_later = cmp_pkgrevno('radosgw', '17.0.0') >= 0 if frontend_config not in SUPPORTED_FRONTENDS: e = ('Please provide either civetweb or beast for ' 'http-frontend configuration') @@ -199,6 +200,10 @@ def validate_http_frontend(frontend_config): 'pacific or later.'.format(arch())) log(e, level=ERROR) raise ValueError(e) + if frontend_config == CIVETWEB_FRONTEND and quincy_or_later: + e = 'Civetweb frontend is not supported after Ceph Pacific.' + log(e, level=ERROR) + raise ValueError(e) class MonContext(context.CephContext): From 2e77537d873ac41b115e2a767d91316c4579d558 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 25 Feb 2022 11:09:30 +0100 Subject: [PATCH 2346/2699] Ensure we restart the correct systemd unit on upgrade. Closes-Bug: #1962296 Depends-On: I7acabfcb28b5faee3d4a6018595d24914db552b2 Change-Id: I8dbd66981f6f668db67454315036ad5557c14c81 --- ceph-mon/lib/charms_ceph/utils.py | 239 +++++++++++++++++------------- 1 file changed, 139 insertions(+), 100 deletions(-) diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index 4f7ae865..a22462ec 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -1,4 +1,4 @@ -# Copyright 2017 Canonical Ltd +# Copyright 2017-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -293,7 +293,7 @@ def get_link_speed(network_interface): def persist_settings(settings_dict): # Write all settings to /etc/hdparm.conf - """ This will persist the hard drive settings to the /etc/hdparm.conf file + """This will persist the hard drive settings to the /etc/hdparm.conf file The settings_dict should be in the form of {"uuid": {"key":"value"}} @@ -552,7 +552,7 @@ def get_osd_weight(osd_id): :returns: Float :raises: ValueError if the monmap fails to parse. - :raises: CalledProcessError if our ceph command fails. + :raises: CalledProcessError if our Ceph command fails. """ try: tree = str(subprocess @@ -560,7 +560,7 @@ def get_osd_weight(osd_id): .decode('UTF-8')) try: json_tree = json.loads(tree) - # Make sure children are present in the json + # Make sure children are present in the JSON if not json_tree['nodes']: return None for device in json_tree['nodes']: @@ -619,12 +619,12 @@ def _flatten_roots(nodes, lookup_type='host'): def get_osd_tree(service): - """Returns the current osd map in JSON. + """Returns the current OSD map in JSON. :returns: List. :rtype: List[CrushLocation] :raises: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails + Also raises CalledProcessError if our Ceph command fails """ try: tree = str(subprocess @@ -666,12 +666,12 @@ def _get_child_dirs(path): def _get_osd_num_from_dirname(dirname): """Parses the dirname and returns the OSD id. - Parses a string in the form of 'ceph-{osd#}' and returns the osd number + Parses a string in the form of 'ceph-{osd#}' and returns the OSD number from the directory name. :param dirname: the directory name to return the OSD number from - :return int: the osd number the directory name corresponds to - :raises ValueError: if the osd number cannot be parsed from the provided + :return int: the OSD number the directory name corresponds to + :raises ValueError: if the OSD number cannot be parsed from the provided directory name. """ match = re.search(r'ceph-(?P\d+)', dirname) @@ -686,7 +686,7 @@ def get_local_osd_ids(): to split the ID off of the directory name and return it in a list. - :returns: list. A list of osd identifiers + :returns: list. A list of OSD identifiers :raises: OSError if something goes wrong with listing the directory. """ osd_ids = [] @@ -875,12 +875,12 @@ def add_bootstrap_hint(peer): ] CEPH_PARTITIONS = [ - '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # ceph encrypted disk in creation - '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # ceph encrypted journal - '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data - '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data - '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal - '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # ceph disk in creation + '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # Ceph encrypted disk in creation + '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # Ceph encrypted journal + '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # Ceph encrypted OSD data + '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # Ceph OSD data + '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # Ceph OSD journal + '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # Ceph disk in creation ] @@ -984,7 +984,7 @@ def is_osd_disk(dev): def start_osds(devices): - # Scan for ceph block devices + # Scan for Ceph block devices rescan_osd_devices() if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and cmp_pkgrevno('ceph', '14.2.0') < 0): @@ -1264,7 +1264,7 @@ def get_named_key(name, caps=None, pool_list=None): def upgrade_key_caps(key, caps, pool_list=None): - """ Upgrade key to have capabilities caps """ + """Upgrade key to have capabilities caps""" if not is_leader(): # Not the MON leader OR not clustered return @@ -1298,11 +1298,11 @@ def use_bluestore(): def bootstrap_monitor_cluster(secret): - """Bootstrap local ceph mon into the ceph cluster + """Bootstrap local Ceph mon into the Ceph cluster :param secret: cephx secret to use for monitor authentication :type secret: str - :raises: Exception if ceph mon cannot be bootstrapped + :raises: Exception if Ceph mon cannot be bootstrapped """ hostname = socket.gethostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) @@ -1345,11 +1345,11 @@ def _create_monitor(keyring, secret, hostname, path, done, init_marker): :type: secret: str :param hostname: hostname of the local unit :type hostname: str - :param path: full path to ceph mon directory + :param path: full path to Ceph mon directory :type path: str - :param done: full path to 'done' marker for ceph mon + :param done: full path to 'done' marker for Ceph mon :type done: str - :param init_marker: full path to 'init' marker for ceph mon + :param init_marker: full path to 'init' marker for Ceph mon :type init_marker: str """ subprocess.check_call(['ceph-authtool', keyring, @@ -1409,13 +1409,13 @@ def create_keyrings(): owner=ceph_user(), group=ceph_user(), perms=0o400) else: - # NOTE(jamespage): Later ceph releases require explicit + # NOTE(jamespage): Later Ceph releases require explicit # call to ceph-create-keys to setup the # admin keys for the cluster; this command # will wait for quorum in the cluster before # returning. # NOTE(fnordahl): Explicitly run `ceph-create-keys` for older - # ceph releases too. This improves bootstrap + # Ceph releases too. This improves bootstrap # resilience as the charm will wait for # presence of peer units before attempting # to bootstrap. Note that charms deploying @@ -1497,9 +1497,9 @@ def find_least_used_utility_device(utility_devices, lvs=False): def get_devices(name): - """ Merge config and juju storage based devices + """Merge config and Juju storage based devices - :name: THe name of the device type, eg: wal, osd, journal + :name: The name of the device type, e.g.: wal, osd, journal :returns: Set(device names), which are strings """ if config(name): @@ -1514,11 +1514,11 @@ def get_devices(name): def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, - bluestore=False, key_manager=CEPH_KEY_MANAGER): + bluestore=False, key_manager=CEPH_KEY_MANAGER, osd_id=None): if dev.startswith('/dev'): osdize_dev(dev, osd_format, osd_journal, ignore_errors, encrypt, - bluestore, key_manager) + bluestore, key_manager, osd_id) else: if cmp_pkgrevno('ceph', '14.0.0') >= 0: log("Directory backed OSDs can not be created on Nautilus", @@ -1528,7 +1528,8 @@ def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, - encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER): + encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER, + osd_id=None): """ Prepare a block device for use as a Ceph OSD @@ -1541,7 +1542,7 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, :param: ignore_errors: Don't fail in the event of any errors during processing :param: encrypt: Encrypt block devices using 'key_manager' - :param: bluestore: Use bluestore native ceph block device format + :param: bluestore: Use bluestore native Ceph block device format :param: key_manager: Key management approach for encryption keys :raises subprocess.CalledProcessError: in the event that any supporting subprocess operation failed @@ -1593,7 +1594,8 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, osd_journal, encrypt, bluestore, - key_manager) + key_manager, + osd_id) else: cmd = _ceph_disk(dev, osd_format, @@ -1677,7 +1679,7 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, - key_manager=CEPH_KEY_MANAGER): + key_manager=CEPH_KEY_MANAGER, osd_id=None): """ Prepare and activate a device for usage as a Ceph OSD using ceph-volume. @@ -1689,6 +1691,7 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, :param: encrypt: Use block device encryption :param: bluestore: Use bluestore storage for OSD :param: key_manager: dm-crypt Key Manager to use + :param: osd_id: The OSD-id to recycle, or None to create a new one :raises subprocess.CalledProcessError: in the event that any supporting LVM operation failed. :returns: list. 'ceph-volume' command and required parameters for @@ -1710,6 +1713,9 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, if encrypt and key_manager == CEPH_KEY_MANAGER: cmd.append('--dmcrypt') + if osd_id is not None: + cmd.extend(['--osd-id', str(osd_id)]) + # On-disk journal volume creation if not osd_journal and not bluestore: journal_lv_type = 'journal' @@ -1834,7 +1840,7 @@ def get_conf(variable): Get the value of the given configuration variable from the cluster. - :param variable: ceph configuration variable + :param variable: Ceph configuration variable :returns: str. configured value for provided variable """ @@ -1854,7 +1860,7 @@ def calculate_volume_size(lv_type): :raises KeyError: if invalid lv_type is supplied :returns: int. Configured size in megabytes for volume type """ - # lv_type -> ceph configuration option + # lv_type -> Ceph configuration option _config_map = { 'db': 'bluestore_block_db_size', 'wal': 'bluestore_block_wal_size', @@ -1868,7 +1874,7 @@ def calculate_volume_size(lv_type): 'journal': 1024, } - # conversion of ceph config units to MB + # conversion of Ceph config units to MB _units = { 'db': 1048576, # Bytes -> MB 'wal': 1048576, # Bytes -> MB @@ -1901,7 +1907,7 @@ def _luks_uuid(dev): def _initialize_disk(dev, dev_uuid, encrypt=False, key_manager=CEPH_KEY_MANAGER): """ - Initialize a raw block device consuming 100% of the avaliable + Initialize a raw block device consuming 100% of the available disk space. Function assumes that block device has already been wiped. @@ -1998,7 +2004,7 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid, def osdize_dir(path, encrypt=False, bluestore=False): - """Ask ceph-disk to prepare a directory to become an osd. + """Ask ceph-disk to prepare a directory to become an OSD. :param path: str. The directory to osdize :param encrypt: bool. Should the OSD directory be encrypted at rest @@ -2068,11 +2074,11 @@ def get_running_osds(): def get_cephfs(service): """List the Ceph Filesystems that exist. - :param service: The service name to run the ceph command under - :returns: list. Returns a list of the ceph filesystems + :param service: The service name to run the Ceph command under + :returns: list. Returns a list of the Ceph filesystems """ if get_version() < 0.86: - # This command wasn't introduced until 0.86 ceph + # This command wasn't introduced until 0.86 Ceph return [] try: output = str(subprocess @@ -2151,7 +2157,7 @@ def roll_monitor_cluster(new_version, upgrade_key): sys.exit(1) log('monitor_list: {}'.format(monitor_list)) - # A sorted list of osd unit names + # A sorted list of OSD unit names mon_sorted_list = sorted(monitor_list) # Install packages immediately but defer restarts to when it's our time. @@ -2186,6 +2192,20 @@ def roll_monitor_cluster(new_version, upgrade_key): wait_for_all_monitors_to_upgrade(new_version=new_version, upgrade_key=upgrade_key) bootstrap_manager() + + # NOTE(jmcvaughn): + # Nautilus and later binaries use msgr2 by default, but existing + # clusters that have been upgraded from pre-Nautilus will not + # automatically have msgr2 enabled. Without this, Ceph will show + # a warning only (with no impact to operations), but newly added units + # will not be able to join the cluster. Therefore, we ensure it is + # enabled on upgrade for all versions including and after Nautilus + # (to cater for previous charm versions that will not have done this). + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0 + if nautilus_or_later: + wait_for_all_monitors_to_upgrade(new_version=new_version, + upgrade_key=upgrade_key) + enable_msgr2() except ValueError: log("Failed to find {} in list {}.".format( my_name, mon_sorted_list)) @@ -2198,7 +2218,7 @@ def noop(): def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): - """Upgrade the current ceph monitor to the new version + """Upgrade the current Ceph monitor to the new version :param new_version: String version to upgrade to. """ @@ -2206,18 +2226,19 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): kick_function = noop current_version = get_version() status_set("maintenance", "Upgrading monitor") - log("Current ceph version is {}".format(current_version)) + log("Current Ceph version is {}".format(current_version)) log("Upgrading to: {}".format(new_version)) # Needed to determine if whether to stop/start ceph-mgr luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0 - + # Needed to differentiate between systemd unit names + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0 kick_function() try: add_source(config('source'), config('key')) apt_update(fatal=True) except subprocess.CalledProcessError as err: - log("Adding the ceph source failed with message: {}".format( + log("Adding the Ceph source failed with message: {}".format( err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2240,7 +2261,11 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): try: if systemd(): - service_stop('ceph-mon') + if nautilus_or_later: + systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) + else: + systemd_unit = 'ceph-mon' + service_stop(systemd_unit) log("restarting ceph-mgr.target maybe: {}" .format(luminous_or_later)) if luminous_or_later: @@ -2271,7 +2296,11 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): perms=0o755) if systemd(): - service_restart('ceph-mon') + if nautilus_or_later: + systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) + else: + systemd_unit = 'ceph-mon' + service_restart(systemd_unit) log("starting ceph-mgr.target maybe: {}".format(luminous_or_later)) if luminous_or_later: # due to BUG: #1849874 we have to force a restart to get it to @@ -2288,7 +2317,7 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): def lock_and_roll(upgrade_key, service, my_name, version): - """Create a lock on the ceph monitor cluster and upgrade. + """Create a lock on the Ceph monitor cluster and upgrade. :param upgrade_key: str. The cephx key to use :param service: str. The cephx id to use @@ -2437,7 +2466,7 @@ class WatchDog(object): allow for other delays. There is a compatibility mode where if the otherside never kicks, then it - simply waits for the compatability timer. + simply waits for the compatibility timer. """ class WatchDogDeadException(Exception): @@ -2572,11 +2601,11 @@ def timed_wait(kicked_at_function, def get_upgrade_position(osd_sorted_list, match_name): - """Return the upgrade position for the given osd. + """Return the upgrade position for the given OSD. - :param osd_sorted_list: Osds sorted + :param osd_sorted_list: OSDs sorted :type osd_sorted_list: [str] - :param match_name: The osd name to match + :param match_name: The OSD name to match :type match_name: str :returns: The position of the name :rtype: int @@ -2585,20 +2614,20 @@ def get_upgrade_position(osd_sorted_list, match_name): for index, item in enumerate(osd_sorted_list): if item.name == match_name: return index - raise ValueError("osd name '{}' not found in get_upgrade_position list" + raise ValueError("OSD name '{}' not found in get_upgrade_position list" .format(match_name)) # Edge cases: # 1. Previous node dies on upgrade, can we retry? -# 2. This assumes that the osd failure domain is not set to osd. +# 2. This assumes that the OSD failure domain is not set to OSD. # It rolls an entire server at a time. def roll_osd_cluster(new_version, upgrade_key): """This is tricky to get right so here's what we're going to do. There's 2 possible cases: Either I'm first in line or not. If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous osd is upgraded yet. + and test to see if the previous OSD is upgraded yet. TODO: If you're not in the same failure domain it's safe to upgrade 1. Examine all pools and adopt the most strict failure domain policy @@ -2614,7 +2643,7 @@ def roll_osd_cluster(new_version, upgrade_key): log('roll_osd_cluster called with {}'.format(new_version)) my_name = socket.gethostname() osd_tree = get_osd_tree(service=upgrade_key) - # A sorted list of osd unit names + # A sorted list of OSD unit names osd_sorted_list = sorted(osd_tree) log("osd_sorted_list: {}".format(osd_sorted_list)) @@ -2649,7 +2678,7 @@ def roll_osd_cluster(new_version, upgrade_key): def upgrade_osd(new_version, kick_function=None): - """Upgrades the current osd + """Upgrades the current OSD :param new_version: str. The new version to upgrade to """ @@ -2657,15 +2686,15 @@ def upgrade_osd(new_version, kick_function=None): kick_function = noop current_version = get_version() - status_set("maintenance", "Upgrading osd") - log("Current ceph version is {}".format(current_version)) + status_set("maintenance", "Upgrading OSD") + log("Current Ceph version is {}".format(current_version)) log("Upgrading to: {}".format(new_version)) try: add_source(config('source'), config('key')) apt_update(fatal=True) except subprocess.CalledProcessError as err: - log("Adding the ceph sources failed with message: {}".format( + log("Adding the Ceph sources failed with message: {}".format( err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2679,7 +2708,7 @@ def upgrade_osd(new_version, kick_function=None): kick_function() # If the upgrade does not need an ownership update of any of the - # directories in the osd service directory, then simply restart + # directories in the OSD service directory, then simply restart # all of the OSDs at the same time as this will be the fastest # way to update the code on the node. if not dirs_need_ownership_update('osd'): @@ -2694,7 +2723,7 @@ def upgrade_osd(new_version, kick_function=None): # Need to change the ownership of all directories which are not OSD # directories as well. # TODO - this should probably be moved to the general upgrade function - # and done before mon/osd. + # and done before mon/OSD. update_owner(CEPH_BASE_DIR, recurse_dirs=False) non_osd_dirs = filter(lambda x: not x == 'osd', os.listdir(CEPH_BASE_DIR)) @@ -2715,12 +2744,12 @@ def upgrade_osd(new_version, kick_function=None): _upgrade_single_osd(osd_num, osd_dir) except ValueError as ex: # Directory could not be parsed - junk directory? - log('Could not parse osd directory %s: %s' % (osd_dir, ex), + log('Could not parse OSD directory %s: %s' % (osd_dir, ex), WARNING) continue except (subprocess.CalledProcessError, IOError) as err: - log("Stopping ceph and upgrading packages failed " + log("Stopping Ceph and upgrading packages failed " "with message: {}".format(err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2747,7 +2776,7 @@ def _upgrade_single_osd(osd_num, osd_dir): def stop_osd(osd_num): """Stops the specified OSD number. - :param osd_num: the osd number to stop + :param osd_num: the OSD number to stop """ if systemd(): service_stop('ceph-osd@{}'.format(osd_num)) @@ -2758,7 +2787,7 @@ def stop_osd(osd_num): def start_osd(osd_num): """Starts the specified OSD number. - :param osd_num: the osd number to start. + :param osd_num: the OSD number to start. """ if systemd(): service_start('ceph-osd@{}'.format(osd_num)) @@ -2769,12 +2798,12 @@ def start_osd(osd_num): def disable_osd(osd_num): """Disables the specified OSD number. - Ensures that the specified osd will not be automatically started at the + Ensures that the specified OSD will not be automatically started at the next reboot of the system. Due to differences between init systems, - this method cannot make any guarantees that the specified osd cannot be + this method cannot make any guarantees that the specified OSD cannot be started manually. - :param osd_num: the osd id which should be disabled. + :param osd_num: the OSD id which should be disabled. :raises CalledProcessError: if an error occurs invoking the systemd cmd to disable the OSD :raises IOError, OSError: if the attempt to read/remove the ready file in @@ -2814,7 +2843,7 @@ def enable_osd(osd_num): :param osd_num: the osd id which should be enabled. :raises CalledProcessError: if the call to the systemd command issued fails when enabling the service - :raises IOError: if the attempt to write the ready file in an usptart + :raises IOError: if the attempt to write the ready file in an upstart enabled system fails """ if systemd(): @@ -2822,7 +2851,7 @@ def enable_osd(osd_num): subprocess.check_call(cmd) else: # When running on upstart, the OSDs are started via the ceph-osd-all - # upstart script which will only start the osd if it has a 'ready' + # upstart script which will only start the OSD if it has a 'ready' # file. Make sure that file exists. ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), 'ready') @@ -2875,7 +2904,7 @@ def get_osd_state(osd_num, osd_goal_state=None): If osd_goal_state is not None, loop until the current OSD state matches the OSD goal state. - :param osd_num: the osd id to get state for + :param osd_num: the OSD id to get state for :param osd_goal_state: (Optional) string indicating state to wait for Defaults to None :returns: Returns a str, the OSD state. @@ -2936,7 +2965,7 @@ def maintain_osd_state(osd_num): Ensures the state of an OSD is the same at the end of a block nested in a with statement as it was at the beginning of the block. - :param osd_num: the osd id to maintain state for + :param osd_num: the OSD id to maintain state for """ osd_state = get_osd_state(osd_num) try: @@ -2963,9 +2992,9 @@ def maintain_all_osd_states(): def list_pools(client='admin'): """This will list the current pools that Ceph has - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Returns a list of available pools. :rtype: list :raises: subprocess.CalledProcessError if the subprocess fails to run. @@ -2990,9 +3019,9 @@ def get_pool_param(pool, param, client='admin'): :type pool: str :param param: Name of variable to get :type param: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Value of variable on pool or None :rtype: str or None :raises: subprocess.CalledProcessError @@ -3014,9 +3043,9 @@ def get_pool_erasure_profile(pool, client='admin'): :param pool: Name of pool to get variable from :type pool: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Erasure code profile of pool or None :rtype: str or None :raises: subprocess.CalledProcessError @@ -3035,9 +3064,9 @@ def get_pool_quota(pool, client='admin'): :param pool: Name of pool to get variable from :type pool: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Dictionary with quota variables :rtype: dict :raises: subprocess.CalledProcessError @@ -3060,9 +3089,9 @@ def get_pool_applications(pool='', client='admin'): :param pool: (Optional) Name of pool to get applications for Defaults to get for all pools :type pool: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Dictionary with pool name as key :rtype: dict :raises: subprocess.CalledProcessError @@ -3125,7 +3154,7 @@ def dirs_need_ownership_update(service): necessary due to the upgrade from Hammer to Jewel where the daemon user changes from root: to ceph:. - :param service: the name of the service folder to check (e.g. osd, mon) + :param service: the name of the service folder to check (e.g. OSD, mon) :returns: boolean. True if the directories need a change of ownership, False otherwise. :raises IOError: if an error occurs reading the file stats from one of @@ -3155,7 +3184,7 @@ def dirs_need_ownership_update(service): return False -# A dict of valid ceph upgrade paths. Mapping is old -> new +# A dict of valid Ceph upgrade paths. Mapping is old -> new UPGRADE_PATHS = collections.OrderedDict([ ('firefly', 'hammer'), ('hammer', 'jewel'), @@ -3167,7 +3196,7 @@ def dirs_need_ownership_update(service): ('pacific', 'quincy'), ]) -# Map UCA codenames to ceph codenames +# Map UCA codenames to Ceph codenames UCA_CODENAME_MAP = { 'icehouse': 'firefly', 'juno': 'firefly', @@ -3190,24 +3219,24 @@ def dirs_need_ownership_update(service): def pretty_print_upgrade_paths(): - """Pretty print supported upgrade paths for ceph""" + """Pretty print supported upgrade paths for Ceph""" return ["{} -> {}".format(key, value) for key, value in UPGRADE_PATHS.items()] def resolve_ceph_version(source): - """Resolves a version of ceph based on source configuration + """Resolves a version of Ceph based on source configuration based on Ubuntu Cloud Archive pockets. @param: source: source configuration option of charm - :returns: ceph release codename or None if not resolvable + :returns: Ceph release codename or None if not resolvable """ os_release = get_os_codename_install_source(source) return UCA_CODENAME_MAP.get(os_release) def get_ceph_pg_stat(): - """Returns the result of ceph pg stat. + """Returns the result of 'ceph pg stat'. :returns: dict """ @@ -3242,7 +3271,7 @@ def get_ceph_health(): .decode('UTF-8')) try: json_tree = json.loads(tree) - # Make sure children are present in the json + # Make sure children are present in the JSON if not json_tree['overall_status']: return None @@ -3259,7 +3288,7 @@ def get_ceph_health(): def reweight_osd(osd_num, new_weight): """Changes the crush weight of an OSD to the value specified. - :param osd_num: the osd id which should be changed + :param osd_num: the OSD id which should be changed :param new_weight: the new weight for the OSD :returns: bool. True if output looks right, else false. :raises CalledProcessError: if an error occurs invoking the systemd cmd @@ -3286,7 +3315,7 @@ def reweight_osd(osd_num, new_weight): def determine_packages(): """Determines packages for installation. - :returns: list of ceph packages + :returns: list of Ceph packages """ packages = PACKAGES.copy() if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': @@ -3332,6 +3361,16 @@ def bootstrap_manager(): service_restart(unit) +def enable_msgr2(): + """ + Enables msgr2 + + :raises: subprocess.CalledProcessError if the command fails + """ + cmd = ['ceph', 'mon', 'enable-msgr2'] + subprocess.check_call(cmd) + + def osd_noout(enable): """Sets or unsets 'noout' @@ -3355,12 +3394,12 @@ def osd_noout(enable): class OSDConfigSetError(Exception): - """Error occured applying OSD settings.""" + """Error occurred applying OSD settings.""" pass def apply_osd_settings(settings): - """Applies the provided osd settings + """Applies the provided OSD settings Apply the provided settings to all local OSD unless settings are already present. Settings stop being applied on encountering an error. @@ -3385,7 +3424,7 @@ def _get_cli_key(key): out = json.loads( subprocess.check_output(cmd.split()).decode('UTF-8')) if 'error' in out: - log("Error retrieving osd setting: {}".format(out['error']), + log("Error retrieving OSD setting: {}".format(out['error']), level=ERROR) return False current_settings[key] = out[cli_key] @@ -3402,7 +3441,7 @@ def _get_cli_key(key): out = json.loads( subprocess.check_output(cmd.split()).decode('UTF-8')) if 'error' in out: - log("Error applying osd setting: {}".format(out['error']), + log("Error applying OSD setting: {}".format(out['error']), level=ERROR) raise OSDConfigSetError return True @@ -3472,7 +3511,7 @@ def mgr_disable_module(module): def ceph_config_set(name, value, who): - """Set a ceph config option + """Set a Ceph config option :param name: key to set :type name: str @@ -3490,7 +3529,7 @@ def ceph_config_set(name, value, who): def ceph_config_get(name, who): - """Retrieve the value of a ceph config option + """Retrieve the value of a Ceph config option :param name: key to lookup :type name: str From bd87cf18300643cd06718e93c269c4ba1424130e Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Fri, 25 Feb 2022 12:18:47 +0100 Subject: [PATCH 2347/2699] Remove charmcraft workaround and build on all archs Change-Id: I0ba367e4bb181aee76621b3ae3a38dade3b2e8e1 --- ceph-dashboard/charmcraft.yaml | 20 ++++++++++++++------ ceph-dashboard/tests/README.md | 1 - 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/ceph-dashboard/charmcraft.yaml b/ceph-dashboard/charmcraft.yaml index 72933212..ff59b025 100644 --- a/ceph-dashboard/charmcraft.yaml +++ b/ceph-dashboard/charmcraft.yaml @@ -7,8 +7,7 @@ parts: charm-python-packages: # NOTE(lourot): see # * https://github.com/canonical/charmcraft/issues/551 - # * https://github.com/canonical/charmcraft/issues/632 - - setuptools < 58 + - setuptools build-packages: - git @@ -21,7 +20,16 @@ parts: update-ca-certificates bases: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 + - build-on: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 + - s390x + - ppc64el + - arm64 + run-on: + - name: ubuntu + channel: "20.04" + - name: ubuntu + channel: "21.10" diff --git a/ceph-dashboard/tests/README.md b/ceph-dashboard/tests/README.md index 31363eee..d002a1e4 100644 --- a/ceph-dashboard/tests/README.md +++ b/ceph-dashboard/tests/README.md @@ -10,7 +10,6 @@ Run the smoke tests with: ```bash cd ../ tox -e build -cd tests/ tox -e func-smoke ``` From 4e032422d13b4ed88d3c4870c986baf0d2712d78 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Fri, 4 Mar 2022 10:40:16 +0100 Subject: [PATCH 2348/2699] Update the channel names for quincy channels Update the channel for quincy channels; the channels were specified as stable/yoga which causes Juju to pick the stable charm. Fix this and point it at the right track and channel - quincy/edge in this case. Change-Id: I5d68c0c15f3a7ab5ff80ddda0d376d9031cbdb13 --- ceph-mon/tests/bundles/focal-xena.yaml | 30 ++++++++++++------------- ceph-mon/tests/bundles/focal-yoga.yaml | 30 ++++++++++++------------- ceph-mon/tests/bundles/impish-xena.yaml | 30 ++++++++++++------------- ceph-mon/tests/bundles/jammy-yoga.yaml | 30 ++++++++++++------------- 4 files changed, 60 insertions(+), 60 deletions(-) diff --git a/ceph-mon/tests/bundles/focal-xena.yaml b/ceph-mon/tests/bundles/focal-xena.yaml index e82685eb..5c0b9967 100644 --- a/ceph-mon/tests/bundles/focal-xena.yaml +++ b/ceph-mon/tests/bundles/focal-xena.yaml @@ -32,19 +32,19 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge glance-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge cinder-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge placement-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -55,7 +55,7 @@ applications: - '0' - '1' - '2' - channel: stable/yoga + channel: 8.0.19/edge ceph-osd: charm: ch:ceph-osd @@ -69,7 +69,7 @@ applications: - '3' - '4' - '5' - channel: stable/yoga + channel: quincy/edge ceph-mon: charm: ../../ceph-mon.charm @@ -89,7 +89,7 @@ applications: source: *openstack-origin to: - '9' - channel: stable/yoga + channel: 3.9/edge keystone: expose: True @@ -99,7 +99,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: stable/yoga + channel: yoga/edge nova-compute: charm: ch:nova-compute @@ -109,7 +109,7 @@ applications: libvirt-image-backend: rbd to: - '11' - channel: stable/yoga + channel: yoga/edge glance: expose: True @@ -119,7 +119,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: stable/yoga + channel: yoga/edge cinder: expose: True @@ -131,11 +131,11 @@ applications: openstack-origin: *openstack-origin to: - '13' - channel: stable/yoga + channel: yoga/edge cinder-ceph: charm: ch:cinder-ceph - channel: stable/yoga + channel: yoga/edge nova-cloud-controller: expose: True @@ -145,7 +145,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: stable/yoga + channel: yoga/edge placement: charm: ch:placement @@ -154,7 +154,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: stable/yoga + channel: yoga/edge prometheus2: # Pin prometheus2 charm version Bug #1891942 diff --git a/ceph-mon/tests/bundles/focal-yoga.yaml b/ceph-mon/tests/bundles/focal-yoga.yaml index 1934967d..8599ac9f 100644 --- a/ceph-mon/tests/bundles/focal-yoga.yaml +++ b/ceph-mon/tests/bundles/focal-yoga.yaml @@ -32,19 +32,19 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge glance-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge cinder-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge placement-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -55,7 +55,7 @@ applications: - '0' - '1' - '2' - channel: stable/yoga + channel: 8.0.19/edge ceph-osd: charm: ch:ceph-osd @@ -69,7 +69,7 @@ applications: - '3' - '4' - '5' - channel: stable/yoga + channel: quincy/edge ceph-mon: charm: ../../ceph-mon.charm @@ -89,7 +89,7 @@ applications: source: *openstack-origin to: - '9' - channel: stable/yoga + channel: 3.9/edge keystone: expose: True @@ -99,7 +99,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: stable/yoga + channel: yoga/edge nova-compute: charm: ch:nova-compute @@ -109,7 +109,7 @@ applications: libvirt-image-backend: rbd to: - '11' - channel: stable/yoga + channel: yoga/edge glance: expose: True @@ -119,7 +119,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: stable/yoga + channel: yoga/edge cinder: expose: True @@ -131,11 +131,11 @@ applications: openstack-origin: *openstack-origin to: - '13' - channel: stable/yoga + channel: yoga/edge cinder-ceph: charm: ch:cinder-ceph - channel: stable/yoga + channel: yoga/edge nova-cloud-controller: expose: True @@ -145,7 +145,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: stable/yoga + channel: yoga/edge placement: charm: ch:placement @@ -154,7 +154,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: stable/yoga + channel: yoga/edge prometheus2: # Pin prometheus2 charm version Bug #1891942 diff --git a/ceph-mon/tests/bundles/impish-xena.yaml b/ceph-mon/tests/bundles/impish-xena.yaml index 670fe090..08c59e30 100644 --- a/ceph-mon/tests/bundles/impish-xena.yaml +++ b/ceph-mon/tests/bundles/impish-xena.yaml @@ -33,19 +33,19 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge glance-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge cinder-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge placement-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -56,7 +56,7 @@ applications: - '0' - '1' - '2' - channel: stable/yoga + channel: 8.0.19/edge ceph-osd: charm: ch:ceph-osd @@ -70,7 +70,7 @@ applications: - '3' - '4' - '5' - channel: stable/yoga + channel: quincy/edge ceph-mon: charm: ../../ceph-mon.charm @@ -90,7 +90,7 @@ applications: source: *openstack-origin to: - '9' - channel: stable/yoga + channel: 3.9/edge keystone: expose: True @@ -100,7 +100,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: stable/yoga + channel: yoga/edge nova-compute: charm: ch:nova-compute @@ -110,7 +110,7 @@ applications: libvirt-image-backend: rbd to: - '11' - channel: stable/yoga + channel: yoga/edge glance: expose: True @@ -120,7 +120,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: stable/yoga + channel: yoga/edge cinder: expose: True @@ -132,11 +132,11 @@ applications: openstack-origin: *openstack-origin to: - '13' - channel: stable/yoga + channel: yoga/edge cinder-ceph: charm: ch:cinder-ceph - channel: stable/yoga + channel: yoga/edge nova-cloud-controller: expose: True @@ -146,7 +146,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: stable/yoga + channel: yoga/edge placement: charm: ch:placement @@ -155,7 +155,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: stable/yoga + channel: yoga/edge prometheus2: # Pin prometheus2 charm version Bug #1891942 diff --git a/ceph-mon/tests/bundles/jammy-yoga.yaml b/ceph-mon/tests/bundles/jammy-yoga.yaml index 00f207ac..969abd16 100644 --- a/ceph-mon/tests/bundles/jammy-yoga.yaml +++ b/ceph-mon/tests/bundles/jammy-yoga.yaml @@ -33,19 +33,19 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge glance-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge cinder-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge placement-mysql-router: charm: ch:mysql-router - channel: stable/yoga + channel: 8.0.19/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -56,7 +56,7 @@ applications: - '0' - '1' - '2' - channel: stable/yoga + channel: 8.0.19/edge ceph-osd: charm: ch:ceph-osd @@ -70,7 +70,7 @@ applications: - '3' - '4' - '5' - channel: stable/yoga + channel: quincy/edge ceph-mon: charm: ../../ceph-mon.charm @@ -90,7 +90,7 @@ applications: source: *openstack-origin to: - '9' - channel: stable/yoga + channel: 3.9/edge keystone: expose: True @@ -100,7 +100,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: stable/yoga + channel: yoga/edge nova-compute: charm: ch:nova-compute @@ -110,7 +110,7 @@ applications: libvirt-image-backend: rbd to: - '11' - channel: stable/yoga + channel: yoga/edge glance: expose: True @@ -120,7 +120,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: stable/yoga + channel: yoga/edge cinder: expose: True @@ -132,11 +132,11 @@ applications: openstack-origin: *openstack-origin to: - '13' - channel: stable/yoga + channel: yoga/edge cinder-ceph: charm: ch:cinder-ceph - channel: stable/yoga + channel: yoga/edge nova-cloud-controller: expose: True @@ -146,7 +146,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: stable/yoga + channel: yoga/edge placement: charm: ch:placement @@ -155,7 +155,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: stable/yoga + channel: yoga/edge prometheus2: # Pin prometheus2 charm version Bug #1891942 From 3bde32290f9fc803228e7004ef1f1cb8ac65b77a Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 23 Feb 2022 19:47:35 -0300 Subject: [PATCH 2349/2699] Enhance the permissions for the removal key In addition to the enabled commands, we also need the OSD command 'safe-to-stop' to fully implement OSD removal. Change-Id: I4ff51182148d25f07f5f2de2342cc970ffc1b7d9 --- ceph-mon/hooks/ceph_hooks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 927ab8cf..ad880697 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -875,6 +875,7 @@ def osd_relation(relid=None, unit=None): 'allow command "osd crush reweight"', 'allow command "osd purge"', 'allow command "osd destroy"', + 'allow command "osd ok-to-stop"', ]} ) } From a261f7b2fbe172cdb9d70e7d40e406b321ff555e Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 9 Mar 2022 14:08:18 +0100 Subject: [PATCH 2350/2699] Ensure that tests work and bundles can accomodate charmhub sources --- ceph-nfs/.gitignore | 1 + ceph-nfs/config.yaml | 2 +- ceph-nfs/src/charm.py | 11 ++-- ceph-nfs/src/ganesha.py | 5 +- ceph-nfs/tests/bundles/focal-pacific.yaml | 13 +++- ceph-nfs/tests/bundles/focal-quincy.yaml | 53 ++++++++++++++++ ...ic.yaml.j2 => local-charm-overlay.yaml.j2} | 0 ceph-nfs/tests/nfs_ganesha.py | 60 +++++++++++++++---- ceph-nfs/tests/tests.yaml | 4 +- 9 files changed, 126 insertions(+), 23 deletions(-) create mode 100644 ceph-nfs/tests/bundles/focal-quincy.yaml rename ceph-nfs/tests/bundles/overlays/{focal-pacific.yaml.j2 => local-charm-overlay.yaml.j2} (100%) diff --git a/ceph-nfs/.gitignore b/ceph-nfs/.gitignore index 3d40c3d6..8beafeb3 100644 --- a/ceph-nfs/.gitignore +++ b/ceph-nfs/.gitignore @@ -6,3 +6,4 @@ lib/* !lib/README.txt *.charm .vscode/settings.json +build diff --git a/ceph-nfs/config.yaml b/ceph-nfs/config.yaml index 3c53bb4d..2f373e28 100644 --- a/ceph-nfs/config.yaml +++ b/ceph-nfs/config.yaml @@ -10,7 +10,7 @@ options: source: type: string - default: ppa:chris.macnaughton/focal-ussuri + default: proposed description: | Optional configuration to support use of additional sources such as: - ppa:myteam/ppa diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index 0301222d..acc5efc2 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -397,6 +397,9 @@ def create_share_action(self, event): allowed_ips = [ip.strip() for ip in allowed_ips.split(',')] client = GaneshaNfs(self.client_name, self.pool_name) export_path = client.create_share(size=share_size, name=name, access_ips=allowed_ips) + if not export_path: + event.fail("Failed to create share, check the log for more details") + return self.peers.trigger_reload() event.set_results({ "message": "Share created", @@ -430,8 +433,8 @@ def grant_access_action(self, event): name = event.params.get('name') address = event.params.get('client') mode = event.params.get('mode') - if mode not in ['r', 'rw']: - event.fail('Mode must be either r (read) or rw (read/write)') + if mode not in ['R', 'RW']: + event.fail('Mode must be either R (read) or RW (read/write)') res = client.grant_access(name, address, mode) if res is not None: event.fail(res) @@ -459,8 +462,8 @@ def revoke_access_action(self, event): @ops_openstack.core.charm_class -class CephNFSCharmOcto(CephNfsCharm): - """Ceph iSCSI Charm for Octopus.""" +class CephNFSCharmPacific(CephNfsCharm): + """Ceph iSCSI Charm for Pacific.""" _stored = StoredState() release = 'octopus' diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index 3b120177..9d9a5618 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -123,7 +123,10 @@ def create_share(self, name: str = None, size: int = None, access_ips[access_ips.index('0.0.0.0/0')] = '0.0.0.0' access_id = 'ganesha-{}'.format(name) - self.export_path = self._create_cephfs_share(name, size_in_bytes) + path = self._create_cephfs_share(name, size_in_bytes) + if not path: + return + self.export_path = path export_id = self._get_next_export_id() export = Export( { diff --git a/ceph-nfs/tests/bundles/focal-pacific.yaml b/ceph-nfs/tests/bundles/focal-pacific.yaml index 0f2718ee..0032189c 100644 --- a/ceph-nfs/tests/bundles/focal-pacific.yaml +++ b/ceph-nfs/tests/bundles/focal-pacific.yaml @@ -7,8 +7,11 @@ applications: ceph-nfs: charm: ../../ceph-nfs.charm num_units: 2 + options: + source: proposed ceph-osd: charm: ch:ceph-osd + channel: pacific/edge num_units: 3 storage: osd-devices: '2,10G' @@ -16,6 +19,7 @@ applications: source: cloud:focal-wallaby ceph-mon: charm: ch:ceph-mon + channel: pacific/edge num_units: 3 options: monitor-count: '3' @@ -23,12 +27,17 @@ applications: source: cloud:focal-wallaby ceph-fs: charm: ch:ceph-fs - num_units: 1 + channel: pacific/edge + num_units: 2 + options: + source: cloud:focal-wallaby loadbalancer: - charm: ch:openstack-loadbalancer + charm: ../../../charm-openstack-loadbalancer/openstack-loadbalancer.charm + channel: edge num_units: 3 hacluster: charm: ch:hacluster + channel: 2.0.3/edge options: cluster_count: 3 relations: diff --git a/ceph-nfs/tests/bundles/focal-quincy.yaml b/ceph-nfs/tests/bundles/focal-quincy.yaml new file mode 100644 index 00000000..9cddefd0 --- /dev/null +++ b/ceph-nfs/tests/bundles/focal-quincy.yaml @@ -0,0 +1,53 @@ +local_overlay_enabled: False +series: focal +applications: + ubuntu: + charm: cs:ubuntu + num_units: 2 + ceph-nfs: + charm: ../../ceph-nfs.charm + num_units: 2 + options: + source: proposed + ceph-osd: + charm: ch:ceph-osd + channel: quincy/edge + num_units: 3 + storage: + osd-devices: '2,10G' + options: + source: cloud:focal-yoga + ceph-mon: + charm: ch:ceph-mon + channel: quincy/edge + num_units: 3 + options: + monitor-count: '3' + expected-osd-count: 6 + source: cloud:focal-yoga + ceph-fs: + charm: ch:ceph-fs + channel: quincy/edge + num_units: 2 + options: + source: cloud:focal-yoga + loadbalancer: + charm: ../../../charm-openstack-loadbalancer/openstack-loadbalancer.charm + # channel: edge + num_units: 3 + hacluster: + charm: ch:hacluster + channel: 2.0.3/edge + options: + cluster_count: 3 +relations: + - - 'ceph-mon:client' + - 'ceph-nfs:ceph-client' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-fs' + - 'ceph-mon' + - - ceph-nfs + - loadbalancer + - - 'loadbalancer:ha' + - 'hacluster:ha' diff --git a/ceph-nfs/tests/bundles/overlays/focal-pacific.yaml.j2 b/ceph-nfs/tests/bundles/overlays/local-charm-overlay.yaml.j2 similarity index 100% rename from ceph-nfs/tests/bundles/overlays/focal-pacific.yaml.j2 rename to ceph-nfs/tests/bundles/overlays/local-charm-overlay.yaml.j2 diff --git a/ceph-nfs/tests/nfs_ganesha.py b/ceph-nfs/tests/nfs_ganesha.py index 037382ef..2545876d 100644 --- a/ceph-nfs/tests/nfs_ganesha.py +++ b/ceph-nfs/tests/nfs_ganesha.py @@ -17,6 +17,7 @@ import logging import subprocess import tenacity +import time from typing import Dict import unittest import yaml @@ -49,13 +50,15 @@ def tearDown(self): 'name': self.created_share, }) - def _create_share(self, name: str, size: int = 10) -> Dict[str, str]: + def _create_share(self, name: str, size: int = 10, + access_ip: str = '0.0.0.0') -> Dict[str, str]: action = zaza.model.run_action_on_leader( 'ceph-nfs', 'create-share', action_params={ 'name': name, 'size': size, + 'allowed-ips': access_ip, }) self.assertEqual(action.status, 'completed') self.created_share = name @@ -63,7 +66,18 @@ def _create_share(self, name: str, size: int = 10) -> Dict[str, str]: logging.debug("Action results: {}".format(results)) return results - def _mount_share(self, unit_name: str, share_ip: str, export_path: str): + def _grant_access(self, share_name: str, access_ip: str, access_mode: str): + action = zaza.model.run_action_on_leader( + 'ceph-nfs', + 'grant-access', + action_params={ + 'name': share_name, + 'client': access_ip, + 'mode': access_mode, + }) + self.assertEqual(action.status, 'completed') + + def _mount_share(self, unit_name: str, share_ip: str, export_path: str, retry: bool = True): ssh_cmd = ( 'sudo mkdir -p {0} && ' 'sudo mount -t {1} -o nfsvers=4.1,proto=tcp {2}:{3} {0}'.format( @@ -71,14 +85,18 @@ def _mount_share(self, unit_name: str, share_ip: str, export_path: str): self.share_protocol, share_ip, export_path)) - - for attempt in tenacity.Retrying( - stop=tenacity.stop_after_attempt(5), - wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)): - with attempt: - zaza.utilities.generic.run_via_ssh( - unit_name=unit_name, - cmd=ssh_cmd) + if retry: + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(5), + wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)): + with attempt: + zaza.utilities.generic.run_via_ssh( + unit_name=unit_name, + cmd=ssh_cmd) + else: + zaza.utilities.generic.run_via_ssh( + unit_name=unit_name, + cmd=ssh_cmd) self.mounts_share = True def _install_dependencies(self, unit: str): @@ -106,17 +124,33 @@ def _verify_testing_file_on_instance(self, instance_name: str): self.assertEqual('test\r\n', output) def test_create_share(self): - for unit in ['0', '1']: - self._install_dependencies('ubuntu/{}'.format(unit)) logging.info("Creating a share") + # Todo - enable ACL testing + # ubuntu_0_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/0')) + # ubuntu_1_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/1')) + # share = self._create_share('test_ganesha_share', access_ip=ubuntu_0_ip) share = self._create_share('test_ganesha_share') + zaza.model.wait_for_application_states(states={ + 'ubuntu': { + "workload-status-message-regex": "^$", + } + }) + for unit in ['0', '1']: + self._install_dependencies('ubuntu/{}'.format(unit)) export_path = share['path'] ip = share['ip'] logging.info("Mounting share on ubuntu units") self._mount_share('ubuntu/0', ip, export_path) - self._mount_share('ubuntu/1', ip, export_path) logging.info("writing to the share on ubuntu/0") self._write_testing_file_on_instance('ubuntu/0') + # Todo - enable ACL testing + # try: + # self._mount_share('ubuntu/1', ip, export_path, retry=False) + # self.fail('Mounting should not have succeeded') + # except: # noqa: E722 + # pass + # self._grant_access('test_ganesha_share', access_ip=ubuntu_1_ip, access_mode='RW') + self._mount_share('ubuntu/1', ip, export_path) logging.info("reading from the share on ubuntu/1") self._verify_testing_file_on_instance('ubuntu/1') diff --git a/ceph-nfs/tests/tests.yaml b/ceph-nfs/tests/tests.yaml index 4052a341..37d4d29b 100644 --- a/ceph-nfs/tests/tests.yaml +++ b/ceph-nfs/tests/tests.yaml @@ -1,9 +1,9 @@ charm_name: ceph-nfs gate_bundles: - - focal-octopuc + - focal-quincy - focal-pacific smoke_bundles: - - focal-octopus + - focal-pacific configure: [] tests: - tests.nfs_ganesha.NfsGaneshaTest From d89602885a71623429a5b48a8ef4594593a0e727 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Thu, 10 Mar 2022 20:29:46 -0300 Subject: [PATCH 2351/2699] Add 'mgr' permissions in removal key These additional changes are needed for the 'ok-to-stop' and 'safe-to-destroy' commands to work correctly within OSD units and prevent them from hanging indefinitely. Change-Id: Ic0e1933bcba76126717f439dd5175d1fe835a807 --- ceph-mon/hooks/ceph_hooks.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index ad880697..fc2c7676 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -870,13 +870,15 @@ def osd_relation(relid=None, unit=None): caps=ceph.osd_upgrade_caps), 'osd_disk_removal_key': ceph.get_named_key( 'osd-removal', - caps={'mon': [ - 'allow command "osd safe-to-destroy"', - 'allow command "osd crush reweight"', - 'allow command "osd purge"', - 'allow command "osd destroy"', - 'allow command "osd ok-to-stop"', - ]} + caps={ + 'mgr': ['allow r'], + 'mon': [ + 'allow r', + 'allow command "osd crush reweight"', + 'allow command "osd purge"', + 'allow command "osd destroy"', + ] + } ) } From 5e70e9a044c91ce2bde76c6d7c14fb66a9617744 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 11 Mar 2022 08:28:39 +0100 Subject: [PATCH 2352/2699] Point openstack-loadbalancer at charmhub --- ceph-nfs/tests/bundles/focal-pacific.yaml | 4 ++-- ceph-nfs/tests/bundles/focal-quincy.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-nfs/tests/bundles/focal-pacific.yaml b/ceph-nfs/tests/bundles/focal-pacific.yaml index 0032189c..655107a5 100644 --- a/ceph-nfs/tests/bundles/focal-pacific.yaml +++ b/ceph-nfs/tests/bundles/focal-pacific.yaml @@ -32,8 +32,8 @@ applications: options: source: cloud:focal-wallaby loadbalancer: - charm: ../../../charm-openstack-loadbalancer/openstack-loadbalancer.charm - channel: edge + charm: ch:openstack-loadbalancer + channel: latest/edge num_units: 3 hacluster: charm: ch:hacluster diff --git a/ceph-nfs/tests/bundles/focal-quincy.yaml b/ceph-nfs/tests/bundles/focal-quincy.yaml index 9cddefd0..9cd77a27 100644 --- a/ceph-nfs/tests/bundles/focal-quincy.yaml +++ b/ceph-nfs/tests/bundles/focal-quincy.yaml @@ -32,8 +32,8 @@ applications: options: source: cloud:focal-yoga loadbalancer: - charm: ../../../charm-openstack-loadbalancer/openstack-loadbalancer.charm - # channel: edge + charm: ch:openstack-loadbalancer + channel: latest/edge num_units: 3 hacluster: charm: ch:hacluster From 0f2c64d502cc4df220b8cedb27005ce99ea4bec4 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 11 Mar 2022 08:28:49 +0100 Subject: [PATCH 2353/2699] Add HA note to readme --- ceph-nfs/README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ceph-nfs/README.md b/ceph-nfs/README.md index 23e80084..418ce82c 100644 --- a/ceph-nfs/README.md +++ b/ceph-nfs/README.md @@ -26,6 +26,16 @@ used: This command has granted read-only access to the named share to a specific address: `192.168.0.1`. +## High Availability + +To gain high availability for NFS shares, it is necessary to scale ceph-nfs and relate it to a loadbalancer charm: + + juju add-unit ceph-nfs + juju deploy openstack-loadbalancer loadbalancer --config vip=10.5.0.100 + juju add-relation ceph-nfs loadbalancer + +Once everything settles, your shares will be accessible over the loadbalancer's vip (`10.5.0.100` in this example), and connections will load-balance across backends. + ## Relations TODO: Provide any relations which are provided or required by your charm From 99460416d80e8c91a0b97a6a616125b0fabf7fc9 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 15 Mar 2022 10:05:58 +0100 Subject: [PATCH 2354/2699] Add note about share deletion --- ceph-nfs/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ceph-nfs/README.md b/ceph-nfs/README.md index 418ce82c..1010965c 100644 --- a/ceph-nfs/README.md +++ b/ceph-nfs/README.md @@ -26,6 +26,10 @@ used: This command has granted read-only access to the named share to a specific address: `192.168.0.1`. +It is possible to delete the created share with: + + juju run-action --wait ceph-nfs/0 delete-share name=test-share + ## High Availability To gain high availability for NFS shares, it is necessary to scale ceph-nfs and relate it to a loadbalancer charm: From 627bf5ed4921b5525a0cc27e00df29c5fff339ca Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 15 Mar 2022 18:31:54 +0000 Subject: [PATCH 2355/2699] Drop local-charm-overlay.yaml.j2 and add ZAZA_FEATURE_BUG472=1 The overlay duplicates what is already in focal.yaml, and it is causing the gate deployment to fail as follows: ERROR cannot deploy bundle: the provided bundle has the following errors: charm path in application "ceph-dashboard" does not exist: /ceph-dashboard.charm Additionally this sets ZAZA_FEATURE_BUG472=1 for test runs as deployments cannot get a public address otherwise: https://github.com/openstack-charmers/zaza/issues/472 Depends-On: https://review.opendev.org/c/openstack/charm-ceph-dashboard/+/830967 Change-Id: I2b65cbb9c8d5c8b978b1474ac216a527bea09cb5 --- .../tests/bundles/overlays/local-charm-overlay.yaml.j2 | 3 --- ceph-dashboard/tox.ini | 1 + 2 files changed, 1 insertion(+), 3 deletions(-) delete mode 100644 ceph-dashboard/tests/bundles/overlays/local-charm-overlay.yaml.j2 diff --git a/ceph-dashboard/tests/bundles/overlays/local-charm-overlay.yaml.j2 b/ceph-dashboard/tests/bundles/overlays/local-charm-overlay.yaml.j2 deleted file mode 100644 index 64c91cf5..00000000 --- a/ceph-dashboard/tests/bundles/overlays/local-charm-overlay.yaml.j2 +++ /dev/null @@ -1,3 +0,0 @@ -applications: - ceph-dashboard: - charm: ../../ceph-dashboard.charm diff --git a/ceph-dashboard/tox.ini b/ceph-dashboard/tox.ini index f49c50d8..5926de88 100644 --- a/ceph-dashboard/tox.ini +++ b/ceph-dashboard/tox.ini @@ -28,6 +28,7 @@ minversion = 3.2.0 setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} + ZAZA_FEATURE_BUG472=1 install_command = pip install {opts} {packages} commands = stestr run --slowest {posargs} From e3a79b711561746841e8e30b2543402f3d4befc1 Mon Sep 17 00:00:00 2001 From: Aurelien Lourot Date: Mon, 7 Feb 2022 15:24:07 +0100 Subject: [PATCH 2356/2699] Migrate to charmhub Change-Id: I2caffbca2b6a1a6661831c6771fe4b7d45deddd3 --- ceph-iscsi/build-requirements.txt | 8 +++++- ceph-iscsi/charmcraft.yaml | 27 +++++++++++++++++++ ceph-iscsi/osci.yaml | 8 +++--- ceph-iscsi/rename.sh | 13 +++++++++ ceph-iscsi/tests/README.md | 19 +++++++++++++ ceph-iscsi/tests/bundles/focal-ec.yaml | 17 ++++++++---- ceph-iscsi/tests/bundles/focal.yaml | 17 ++++++++---- .../overlays/local-charm-overlay.yaml.j2 | 3 --- ceph-iscsi/tests/tests.yaml | 4 +-- ceph-iscsi/tox.ini | 17 ++++++++++-- .../unit_tests/test_ceph_iscsi_charm.py | 14 +++------- 11 files changed, 115 insertions(+), 32 deletions(-) create mode 100644 ceph-iscsi/charmcraft.yaml create mode 100755 ceph-iscsi/rename.sh create mode 100644 ceph-iscsi/tests/README.md delete mode 100644 ceph-iscsi/tests/bundles/overlays/local-charm-overlay.yaml.j2 diff --git a/ceph-iscsi/build-requirements.txt b/ceph-iscsi/build-requirements.txt index 271d8955..b6d2452f 100644 --- a/ceph-iscsi/build-requirements.txt +++ b/ceph-iscsi/build-requirements.txt @@ -1 +1,7 @@ -git+https://github.com/canonical/charmcraft.git@0.10.2#egg=charmcraft +# NOTES(lourot): +# * We don't install charmcraft via pip anymore because it anyway spins up a +# container and scp the system's charmcraft snap inside it. So the charmcraft +# snap is necessary on the system anyway. +# * `tox -e build` successfully validated with charmcraft 1.2.1 + +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. diff --git a/ceph-iscsi/charmcraft.yaml b/ceph-iscsi/charmcraft.yaml new file mode 100644 index 00000000..72933212 --- /dev/null +++ b/ceph-iscsi/charmcraft.yaml @@ -0,0 +1,27 @@ +type: charm + +parts: + charm: + after: + - update-certificates + charm-python-packages: + # NOTE(lourot): see + # * https://github.com/canonical/charmcraft/issues/551 + # * https://github.com/canonical/charmcraft/issues/632 + - setuptools < 58 + build-packages: + - git + + update-certificates: + plugin: nil + # See https://github.com/canonical/charmcraft/issues/658 + override-build: | + apt update + apt install -y ca-certificates + update-ca-certificates + +bases: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 diff --git a/ceph-iscsi/osci.yaml b/ceph-iscsi/osci.yaml index d55a58f8..04fd9c79 100644 --- a/ceph-iscsi/osci.yaml +++ b/ceph-iscsi/osci.yaml @@ -1,6 +1,7 @@ - project: templates: - - charm-unit-jobs + - charm-unit-jobs-py38 + - charm-unit-jobs-py39 check: jobs: - ceph-iscsi-focal-octopus @@ -13,11 +14,10 @@ name: ceph-iscsi-focal-octopus parent: func-target dependencies: + - charm-build - osci-lint - - tox-py35 - - tox-py36 - - tox-py37 - tox-py38 + - tox-py39 vars: tox_extra_args: focal - job: diff --git a/ceph-iscsi/rename.sh b/ceph-iscsi/rename.sh new file mode 100755 index 00000000..d0c35c97 --- /dev/null +++ b/ceph-iscsi/rename.sh @@ -0,0 +1,13 @@ +#!/bin/bash +charm=$(grep "charm_build_name" osci.yaml | awk '{print $2}') +echo "renaming ${charm}_*.charm to ${charm}.charm" +echo -n "pwd: " +pwd +ls -al +echo "Removing bad downloaded charm maybe?" +if [[ -e "${charm}.charm" ]]; +then + rm "${charm}.charm" +fi +echo "Renaming charm here." +mv ${charm}_*.charm ${charm}.charm diff --git a/ceph-iscsi/tests/README.md b/ceph-iscsi/tests/README.md new file mode 100644 index 00000000..31363eee --- /dev/null +++ b/ceph-iscsi/tests/README.md @@ -0,0 +1,19 @@ +# Overview + +This directory provides Zaza test definitions and bundles to verify basic +deployment functionality from the perspective of this charm, its requirements +and its features, as exercised in a subset of the full OpenStack deployment +test bundle topology. + +Run the smoke tests with: + +```bash +cd ../ +tox -e build +cd tests/ +tox -e func-smoke +``` + +For full details on functional testing of OpenStack charms please refer to +the [functional testing](https://docs.openstack.org/charm-guide/latest/reference/testing.html#functional-testing) +section of the OpenStack Charm Guide. diff --git a/ceph-iscsi/tests/bundles/focal-ec.yaml b/ceph-iscsi/tests/bundles/focal-ec.yaml index 1dc9b338..a381c85d 100644 --- a/ceph-iscsi/tests/bundles/focal-ec.yaml +++ b/ceph-iscsi/tests/bundles/focal-ec.yaml @@ -36,11 +36,13 @@ applications: pool-type: erasure-coded ec-profile-k: 4 ec-profile-m: 2 + # Use proposed until fix for #1883112 is backported + source: distro-proposed to: - '0' - '1' ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 6 storage: osd-devices: 'cinder,10G' @@ -53,8 +55,9 @@ applications: - '11' - '12' - '13' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' @@ -62,20 +65,24 @@ applications: - '3' - '4' - '5' + channel: latest/edge vault: num_units: 1 - charm: cs:~openstack-charmers-next/vault + charm: ch:vault to: - '6' + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 to: - '8' - '9' - '10' + channel: latest/edge vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge relations: - - 'ceph-mon:client' - 'ceph-iscsi:ceph-client' diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index 2ee6e3cd..ee42dbf4 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -35,13 +35,15 @@ applications: num_units: 4 options: gateway-metadata-pool: iscsi-foo-metadata + # Use proposed until fix for #1883112 is backported + source: distro-proposed to: - '0' - '1' - '16' - '17' ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 6 storage: osd-devices: 'cinder,10G' @@ -54,8 +56,9 @@ applications: - '11' - '12' - '13' + channel: latest/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' @@ -63,20 +66,24 @@ applications: - '3' - '4' - '5' + channel: latest/edge vault: num_units: 1 - charm: cs:~openstack-charmers-next/vault + charm: ch:vault to: - '6' + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 to: - '8' - '9' - '10' + channel: latest/edge vault-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge relations: - - 'ceph-mon:client' - 'ceph-iscsi:ceph-client' diff --git a/ceph-iscsi/tests/bundles/overlays/local-charm-overlay.yaml.j2 b/ceph-iscsi/tests/bundles/overlays/local-charm-overlay.yaml.j2 deleted file mode 100644 index 5cbfaf2b..00000000 --- a/ceph-iscsi/tests/bundles/overlays/local-charm-overlay.yaml.j2 +++ /dev/null @@ -1,3 +0,0 @@ -applications: - ceph-iscsi: - charm: ../../ceph-iscsi.charm diff --git a/ceph-iscsi/tests/tests.yaml b/ceph-iscsi/tests/tests.yaml index d44445d9..8ab33a0b 100644 --- a/ceph-iscsi/tests/tests.yaml +++ b/ceph-iscsi/tests/tests.yaml @@ -13,7 +13,7 @@ tests: target_deploy_status: ubuntu: workload-status: active - workload-status-message: '' + workload-status-message-prefix: '' vault: workload-status: blocked - workload-status-message: Vault needs to be initialized + workload-status-message-prefix: Vault needs to be initialized diff --git a/ceph-iscsi/tox.ini b/ceph-iscsi/tox.ini index 775ea578..6081150b 100644 --- a/ceph-iscsi/tox.ini +++ b/ceph-iscsi/tox.ini @@ -15,8 +15,12 @@ skip_missing_interpreters = False # * It is also necessary to pin virtualenv as a newer virtualenv would still # lead to fetching the latest pip in the func* tox targets, see # https://stackoverflow.com/a/38133283 +# * It is necessary to declare setuptools as a dependency otherwise tox will +# fail very early at not being able to load it. The version pinning is in +# line with `pip.sh`. requires = pip < 20.3 virtualenv < 20.0 + setuptools < 50.0.0 # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci minversion = 3.2.0 @@ -27,10 +31,12 @@ setenv = VIRTUAL_ENV={envdir} install_command = pip install {opts} {packages} commands = stestr run --slowest {posargs} -whitelist_externals = +allowlist_externals = git add-to-archive.py bash + charmcraft + rename.sh passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt @@ -54,6 +60,11 @@ basepython = python3.8 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py39] +basepython = python3.9 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt @@ -101,7 +112,9 @@ commands = {posargs} basepython = python3 deps = -r{toxinidir}/build-requirements.txt commands = - charmcraft build + charmcraft clean + charmcraft -v build + {toxinidir}/rename.sh [testenv:func-noop] basepython = python3 diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index 9a37bac6..850e9997 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -195,14 +195,6 @@ def add_base_cluster_relation(self): self.harness.add_relation_unit( rel_id, 'ceph-iscsi/1') - self.harness.update_relation_data( - rel_id, - 'ceph-iscsi/1', - { - 'ingress-address': '10.0.0.2', - 'gateway_ready': 'True', - 'gateway_fqdn': 'ceph-iscsi-1.example' - }) return rel_id def complete_cluster_relation(self, rel_id): @@ -231,7 +223,8 @@ def add_admin_access_relation(self): @patch('socket.getfqdn') def test_on_create_target_action(self, _getfqdn): _getfqdn.return_value = 'ceph-iscsi-0.example' - self.add_base_cluster_relation() + cluster_rel_id = self.add_base_cluster_relation() + self.complete_cluster_relation(cluster_rel_id) self.harness.begin() action_event = MagicMock() action_event.params = { @@ -276,7 +269,8 @@ def test_on_create_target_action(self, _getfqdn): @patch('socket.getfqdn') def test_on_create_target_action_ec(self, _getfqdn): _getfqdn.return_value = 'ceph-iscsi-0.example' - self.add_base_cluster_relation() + cluster_rel_id = self.add_base_cluster_relation() + self.complete_cluster_relation(cluster_rel_id) self.harness.begin() action_event = MagicMock() action_event.params = { From d03f2f2fd1088a17e2f9e2bebe180a26b29d8aa4 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 16 Mar 2022 16:18:36 -0300 Subject: [PATCH 2357/2699] Update osd-removal permissions for Ceph Pacific For Luminous, read permissions for the mgr was enough, but for Pacific and beyond, we need broader permissions. Change-Id: If9f3934d299a9d118832f54dd88afc920adce959 --- ceph-mon/hooks/ceph_hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index fc2c7676..ec437984 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -871,7 +871,7 @@ def osd_relation(relid=None, unit=None): 'osd_disk_removal_key': ceph.get_named_key( 'osd-removal', caps={ - 'mgr': ['allow r'], + 'mgr': ['allow *'], 'mon': [ 'allow r', 'allow command "osd crush reweight"', From deddcda7930dff2939d97e8f61b66444516c621d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 21 Mar 2022 12:27:20 +0100 Subject: [PATCH 2358/2699] Migrate to haproxy interface for VIPs --- ceph-nfs/actions.yaml | 4 - ceph-nfs/config.yaml | 6 +- ceph-nfs/metadata.yaml | 5 +- ceph-nfs/requirements.txt | 2 +- ceph-nfs/src/charm.py | 74 +++++++++++-------- ceph-nfs/src/ganesha.py | 12 +-- ceph-nfs/tests/bundles/focal-pacific.yaml | 10 +-- ceph-nfs/tests/bundles/focal-quincy.yaml | 10 +-- .../overlays/local-charm-overlay.yaml.j2 | 2 +- ceph-nfs/tests/nfs_ganesha.py | 27 ++++--- 10 files changed, 76 insertions(+), 76 deletions(-) diff --git a/ceph-nfs/actions.yaml b/ceph-nfs/actions.yaml index a2a9877d..adef1463 100644 --- a/ceph-nfs/actions.yaml +++ b/ceph-nfs/actions.yaml @@ -34,10 +34,6 @@ grant-access: description: IP address or network to change access for type: string default: - mode: - description: Access mode to grant - type: string - default: "RW" revoke-access: description: | diff --git a/ceph-nfs/config.yaml b/ceph-nfs/config.yaml index 2f373e28..a02c7a10 100644 --- a/ceph-nfs/config.yaml +++ b/ceph-nfs/config.yaml @@ -62,5 +62,7 @@ options: type: string default: description: | - VIP to associate with this service. This VIP will only be functional - with a relation to the hacluster charm. \ No newline at end of file + Virtual IP(s) to use to front API services in HA configuration. + . + If multiple networks are being used, a VIP should be provided for each + network, separated by spaces. diff --git a/ceph-nfs/metadata.yaml b/ceph-nfs/metadata.yaml index 04ccef9c..6f402dc9 100644 --- a/ceph-nfs/metadata.yaml +++ b/ceph-nfs/metadata.yaml @@ -19,8 +19,9 @@ extra-bindings: requires: ceph-client: interface: ceph-client - loadbalancer: - interface: openstack-loadbalancer + ha: + interface: hacluster + scope: container peers: cluster: interface: ceph-nfs-peer \ No newline at end of file diff --git a/ceph-nfs/requirements.txt b/ceph-nfs/requirements.txt index ea346e73..cda466ad 100644 --- a/ceph-nfs/requirements.txt +++ b/ceph-nfs/requirements.txt @@ -3,4 +3,4 @@ git+https://github.com/juju/charm-helpers.git#egg=charmhelpers git+https://github.com/canonical/operator.git#egg=ops git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client git+https://github.com/ChrisMacNaughton/charm-ops-openstack.git@feature/charm-instance-to-relation-adapter#egg=ops_openstack -git+https://github.com/openstack-charmers/ops-interface-openstack-loadbalancer#egg=interface_openstack_loadbalancer +git+https://opendev.org/openstack/charm-interface-hacluster#egg=interface_hacluster diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index acc5efc2..e505ecfa 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -12,6 +12,7 @@ https://discourse.charmhub.io/t/4208 """ +import ipaddress import logging import os from pathlib import Path @@ -28,7 +29,7 @@ import interface_ceph_client.ceph_client as ceph_client import interface_ceph_nfs_peer -import interface_openstack_loadbalancer.loadbalancer as ops_lb_interface +import interface_hacluster.ops_ha_interface as ops_ha_interface # TODO: Add the below class functionaity to action / relations from ganesha import GaneshaNfs @@ -159,9 +160,8 @@ def __init__(self, framework): self.peers = interface_ceph_nfs_peer.CephNfsPeers( self, 'cluster') - self.ingress = ops_lb_interface.OSLoadbalancerRequires( - self, - 'loadbalancer') + self.ha = ops_ha_interface.HAServiceRequires(self, 'ha') + self.adapters = CephNFSAdapters( (self.ceph_client, self.peers), contexts=(CephNFSContext(self),), @@ -191,11 +191,8 @@ def __init__(self, framework): self.peers.on.reload_nonce, self.on_reload_nonce) self.framework.observe( - self.ingress.on.lb_relation_ready, - self._request_loadbalancer) - self.framework.observe( - self.ingress.on.lb_configured, - self.render_config) + self.ha.on.ha_ready, + self._configure_hacluster) # Actions self.framework.observe( self.on.create_share_action, @@ -216,15 +213,6 @@ def __init__(self, framework): self.revoke_access_action ) - def _request_loadbalancer(self, _) -> None: - """Send request to create loadbalancer""" - self.ingress.request_loadbalancer( - self.LB_SERVICE_NAME, - self.NFS_PORT, - self.NFS_PORT, - self._get_bind_ip(), - 'tcp') - def _get_bind_ip(self) -> str: """Return the IP to bind the dashboard to""" binding = self.model.get_binding('public') @@ -361,6 +349,18 @@ def setup_ganesha(self, event): logging.error("Failed to setup ganesha index object") event.defer() + def _configure_hacluster(self, _): + vip_config = self.config.get('vip') + if not vip_config: + logging.warn("Cannot setup vips, vip config missing") + return + for vip in vip_config.split(): + self.ha.add_vip('vip', vip) + self.ha.add_systemd_service('ganesha-systemd', 'nfs-ganesha') + self.ha.add_colocation( + self.model.app.name, 'ALWAYS', ['ganesha-vip', 'ganesha-systemd']) + self.ha.bind_resources() + def on_pool_initialised(self, event): try: logging.debug("Restarting Ganesha after pool initialisation") @@ -373,19 +373,34 @@ def on_reload_nonce(self, _event): logging.info("Reloading Ganesha after nonce triggered reload") subprocess.call(['killall', '-HUP', 'ganesha.nfsd']) + def _get_binding_subnet_map(self): + bindings = {} + for binding_name in self.meta.extra_bindings.keys(): + network = self.model.get_binding(binding_name).network + bindings[binding_name] = [i.subnet for i in network.interfaces] + return bindings + + @property + def vips(self): + return self.config.get('vip').split() + + def _get_space_vip_mapping(self): + bindings = {} + for binding_name, subnets in self._get_binding_subnet_map().items(): + bindings[binding_name] = [ + vip + for subnet in subnets + for vip in self.vips + if ipaddress.ip_address(vip) in subnet] + return bindings + def access_address(self) -> str: """Return the IP to advertise Ganesha on""" binding = self.model.get_binding('public') ingress_address = str(binding.network.ingress_address) - if self.ingress.relations: - lb_response = self.ingress.get_frontend_data() - if lb_response: - lb_config = lb_response[self.LB_SERVICE_NAME] - return [i for d in lb_config.values() for i in d['ip']][0] - else: - return ingress_address - else: - return ingress_address + # Try to get the VIP for the public binding, fall back to ingress on it + return self._get_space_vip_mapping().get( + 'public', [ingress_address])[0] def create_share_action(self, event): if not self.model.unit.is_leader(): @@ -432,10 +447,7 @@ def grant_access_action(self, event): client = GaneshaNfs(self.client_name, self.pool_name) name = event.params.get('name') address = event.params.get('client') - mode = event.params.get('mode') - if mode not in ['R', 'RW']: - event.fail('Mode must be either R (read) or RW (read/write)') - res = client.grant_access(name, address, mode) + res = client.grant_access(name, address) if res is not None: event.fail(res) return diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index 9d9a5618..196cf827 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -66,15 +66,17 @@ def export_id(self) -> int: def path(self) -> str: return self.export_options['EXPORT']['Path'] - def add_client(self, client: str, mode: str): - if mode not in ['r', 'rw']: - return 'Mode must be either r (read) or rw (read/write)' + def add_client(self, client: str): + mode = "rw" clients_by_mode = self.clients_by_mode + logging.info(f"About to add {client} to {clients_by_mode}") if client not in clients_by_mode[mode.lower()]: clients_by_mode[mode.lower()].append(client) + logging.info(f"new clients_by_mode: to {clients_by_mode}") self.export_options['EXPORT']['CLIENT'] = [] for (mode, clients) in clients_by_mode.items(): if clients: + logging.info(f"Adding {clients} to self.export_options") self.export_options['EXPORT']['CLIENT'].append( {'Access_Type': mode, 'Clients': ', '.join(clients)}) @@ -188,11 +190,11 @@ def delete_share(self, name: str): logging.debug("Removing export file from RADOS") self._rados_rm('ganesha-export-{}'.format(share.export_id)) - def grant_access(self, name: str, client: str, mode: str) -> Optional[str]: + def grant_access(self, name: str, client: str) -> Optional[str]: share = self.get_share(name) if share is None: return 'Share does not exist' - share.add_client(client, mode) + share.add_client(client) export_template = share.to_export() logging.debug("Export template::\n{}".format(export_template)) tmp_file = self._tmpfile(export_template) diff --git a/ceph-nfs/tests/bundles/focal-pacific.yaml b/ceph-nfs/tests/bundles/focal-pacific.yaml index 655107a5..faad3034 100644 --- a/ceph-nfs/tests/bundles/focal-pacific.yaml +++ b/ceph-nfs/tests/bundles/focal-pacific.yaml @@ -31,15 +31,11 @@ applications: num_units: 2 options: source: cloud:focal-wallaby - loadbalancer: - charm: ch:openstack-loadbalancer - channel: latest/edge - num_units: 3 hacluster: charm: ch:hacluster channel: 2.0.3/edge options: - cluster_count: 3 + cluster_count: 2 relations: - - 'ceph-mon:client' - 'ceph-nfs:ceph-client' @@ -47,7 +43,5 @@ relations: - 'ceph-mon:osd' - - 'ceph-fs' - 'ceph-mon' - - - ceph-nfs - - loadbalancer - - - 'loadbalancer:ha' + - - 'ceph-nfs:ha' - 'hacluster:ha' diff --git a/ceph-nfs/tests/bundles/focal-quincy.yaml b/ceph-nfs/tests/bundles/focal-quincy.yaml index 9cd77a27..07c78965 100644 --- a/ceph-nfs/tests/bundles/focal-quincy.yaml +++ b/ceph-nfs/tests/bundles/focal-quincy.yaml @@ -31,15 +31,11 @@ applications: num_units: 2 options: source: cloud:focal-yoga - loadbalancer: - charm: ch:openstack-loadbalancer - channel: latest/edge - num_units: 3 hacluster: charm: ch:hacluster channel: 2.0.3/edge options: - cluster_count: 3 + cluster_count: 2 relations: - - 'ceph-mon:client' - 'ceph-nfs:ceph-client' @@ -47,7 +43,5 @@ relations: - 'ceph-mon:osd' - - 'ceph-fs' - 'ceph-mon' - - - ceph-nfs - - loadbalancer - - - 'loadbalancer:ha' + - - 'ceph-nfs:ha' - 'hacluster:ha' diff --git a/ceph-nfs/tests/bundles/overlays/local-charm-overlay.yaml.j2 b/ceph-nfs/tests/bundles/overlays/local-charm-overlay.yaml.j2 index fa52dfc1..a8bed22b 100644 --- a/ceph-nfs/tests/bundles/overlays/local-charm-overlay.yaml.j2 +++ b/ceph-nfs/tests/bundles/overlays/local-charm-overlay.yaml.j2 @@ -1,4 +1,4 @@ applications: - loadbalancer: + ceph-nfs: options: vip: '{{ TEST_VIP00 }}' diff --git a/ceph-nfs/tests/nfs_ganesha.py b/ceph-nfs/tests/nfs_ganesha.py index 2545876d..577d1094 100644 --- a/ceph-nfs/tests/nfs_ganesha.py +++ b/ceph-nfs/tests/nfs_ganesha.py @@ -66,18 +66,18 @@ def _create_share(self, name: str, size: int = 10, logging.debug("Action results: {}".format(results)) return results - def _grant_access(self, share_name: str, access_ip: str, access_mode: str): + def _grant_access(self, share_name: str, access_ip: str): action = zaza.model.run_action_on_leader( 'ceph-nfs', 'grant-access', action_params={ 'name': share_name, 'client': access_ip, - 'mode': access_mode, }) self.assertEqual(action.status, 'completed') def _mount_share(self, unit_name: str, share_ip: str, export_path: str, retry: bool = True): + self._install_dependencies(unit_name) ssh_cmd = ( 'sudo mkdir -p {0} && ' 'sudo mount -t {1} -o nfsvers=4.1,proto=tcp {2}:{3} {0}'.format( @@ -126,17 +126,15 @@ def _verify_testing_file_on_instance(self, instance_name: str): def test_create_share(self): logging.info("Creating a share") # Todo - enable ACL testing - # ubuntu_0_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/0')) - # ubuntu_1_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/1')) - # share = self._create_share('test_ganesha_share', access_ip=ubuntu_0_ip) - share = self._create_share('test_ganesha_share') + ubuntu_0_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/0')) + ubuntu_1_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/1')) + share = self._create_share('test_ganesha_share', access_ip=ubuntu_0_ip) + # share = self._create_share('test_ganesha_share') zaza.model.wait_for_application_states(states={ 'ubuntu': { "workload-status-message-regex": "^$", } }) - for unit in ['0', '1']: - self._install_dependencies('ubuntu/{}'.format(unit)) export_path = share['path'] ip = share['ip'] logging.info("Mounting share on ubuntu units") @@ -144,12 +142,13 @@ def test_create_share(self): logging.info("writing to the share on ubuntu/0") self._write_testing_file_on_instance('ubuntu/0') # Todo - enable ACL testing - # try: - # self._mount_share('ubuntu/1', ip, export_path, retry=False) - # self.fail('Mounting should not have succeeded') - # except: # noqa: E722 - # pass - # self._grant_access('test_ganesha_share', access_ip=ubuntu_1_ip, access_mode='RW') + try: + self._mount_share('ubuntu/1', ip, export_path, retry=False) + self.fail('Mounting should not have succeeded') + except: # noqa: E722 + pass + self._grant_access('test_ganesha_share', access_ip=ubuntu_1_ip) + self._mount_share('ubuntu/1', ip, export_path) logging.info("reading from the share on ubuntu/1") self._verify_testing_file_on_instance('ubuntu/1') From 631cb8e409c9efc571e293e2d7da886b85060f1e Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 21 Mar 2022 15:00:12 +0100 Subject: [PATCH 2359/2699] ensure that we can delete shares in cephfs --- ceph-nfs/actions.yaml | 4 ++++ ceph-nfs/src/charm.py | 3 ++- ceph-nfs/src/ganesha.py | 14 +++++++++++++- ceph-nfs/tests/nfs_ganesha.py | 1 + 4 files changed, 20 insertions(+), 2 deletions(-) diff --git a/ceph-nfs/actions.yaml b/ceph-nfs/actions.yaml index adef1463..0845fa1f 100644 --- a/ceph-nfs/actions.yaml +++ b/ceph-nfs/actions.yaml @@ -59,6 +59,10 @@ delete-share: exist then this action will have no effect. type: string default: + purge: + type: boolean + default: False + description: Delete the backing CephFS share as well. list-shares: description: List all shares that this application is managing # TODO: Update, delete share \ No newline at end of file diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index e505ecfa..bc600351 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -434,7 +434,8 @@ def delete_share_action(self, event): return client = GaneshaNfs(self.client_name, self.pool_name) name = event.params.get('name') - client.delete_share(name) + purge = event.params.get('purge') + client.delete_share(name, purge=purge) self.peers.trigger_reload() event.set_results({ "message": "Share deleted", diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index 196cf827..b55a2396 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -177,7 +177,7 @@ def list_shares(self) -> List[Export]: logging.warning("Encountered an independently created export") return exports - def delete_share(self, name: str): + def delete_share(self, name: str, purge=False): share = [share for share in self.list_shares() if share.name == name] if share: share = share[0] @@ -189,6 +189,8 @@ def delete_share(self, name: str): self._remove_share_from_index(share.export_id) logging.debug("Removing export file from RADOS") self._rados_rm('ganesha-export-{}'.format(share.export_id)) + if purge: + self._delete_cephfs_share(name) def grant_access(self, name: str, client: str) -> Optional[str]: share = self.get_share(name) @@ -248,6 +250,16 @@ def _dbus_send(self, section: str, action: str, *args): logging.debug("About to call: {}".format(cmd)) return subprocess.check_output(cmd) + def _delete_cephfs_share(self, name: str): + """Delete a CephFS share. + + :param name: String name of the share to create + """ + self._ceph_subvolume_command( + 'deauthorize', 'ceph-fs', name, + 'ganesha-{name}'.format(name=name)) + self._ceph_subvolume_command('rm', 'ceph-fs', name) + def _create_cephfs_share(self, name: str, size_in_bytes: int = None): """Create an authorise a CephFS share. diff --git a/ceph-nfs/tests/nfs_ganesha.py b/ceph-nfs/tests/nfs_ganesha.py index 577d1094..a1bbf76a 100644 --- a/ceph-nfs/tests/nfs_ganesha.py +++ b/ceph-nfs/tests/nfs_ganesha.py @@ -48,6 +48,7 @@ def tearDown(self): 'delete-share', action_params={ 'name': self.created_share, + 'purge': True }) def _create_share(self, name: str, size: int = 10, From 3b3edbb23a85dd00665710e4e6c8a431d97352d2 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 22 Mar 2022 08:05:34 +0100 Subject: [PATCH 2360/2699] Add support for resizine a CephFS share This also protects against resizing the share to smaller than the in-use value of the share. --- ceph-nfs/actions.yaml | 12 ++++++++++++ ceph-nfs/src/charm.py | 16 +++++++++++++++- ceph-nfs/src/ganesha.py | 4 ++++ 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/ceph-nfs/actions.yaml b/ceph-nfs/actions.yaml index 0845fa1f..ccb3e744 100644 --- a/ceph-nfs/actions.yaml +++ b/ceph-nfs/actions.yaml @@ -34,6 +34,18 @@ grant-access: description: IP address or network to change access for type: string default: +resize-share: + description: | + Resize a specified share. + params: + name: + description: Name of the share + type: string + default: + size: + description: What size (GB) should the share be + type: integer + default: revoke-access: description: | diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index bc600351..f43556d1 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -200,6 +200,9 @@ def __init__(self, framework): self.framework.observe( self.on.list_shares_action, self.list_shares_action) + self.framework.observe( + self.on.resize_share_action, + self.resize_share_action) self.framework.observe( self.on.delete_share_action, self.delete_share_action @@ -470,7 +473,18 @@ def revoke_access_action(self, event): return self.peers.trigger_reload() event.set_results({ - "message": "Acess revoked", + "message": "Access revoked", + }) + + def resize_share_action(self, event): + name = event.params.get('name') + size = event.params.get('size') + if size is None: + event.fail("Size must be set") + client = GaneshaNfs(self.client_name, self.pool_name) + client.resize_share(name=name, size=size) + event.set_results({ + "message": f"{name} is now {size}GB", }) diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index b55a2396..34a87bd3 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -177,6 +177,10 @@ def list_shares(self) -> List[Export]: logging.warning("Encountered an independently created export") return exports + def resize_share(self, name: str, size: int): + size_in_bytes = size * 1024 * 1024 + self._ceph_subvolume_command('resize', 'ceph-fs', name, str(size_in_bytes), '--no_shrink') + def delete_share(self, name: str, purge=False): share = [share for share in self.list_shares() if share.name == name] if share: From 51277d9b85d5a61b6a60c6fc71967bfbf3d0ad00 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 22 Mar 2022 09:04:16 +0100 Subject: [PATCH 2361/2699] Remove functionality for auth-supported Closes-Bug: #1841445 Change-Id: I394d025ff5c0b4a73c6683d67b0949484a5924a1 --- ceph-mon/config.yaml | 12 +++++++++--- ceph-mon/hooks/ceph_hooks.py | 14 +++++++------- ceph-mon/unit_tests/test_ceph_hooks.py | 22 +++++++++++----------- 3 files changed, 27 insertions(+), 21 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 9cbfaf95..9b910b35 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -65,10 +65,16 @@ options: type: string default: cephx description: | - Which authentication flavour to use. + [DEPRECATED] Which authentication flavour to use. . - Valid options are "cephx" and "none". If "none" is specified, - keys will still be created and deployed so that it can be + This option no longer has any effect. It's insecure and breaks expected + Ceph functionality when assigned to None. The charm now ignores the + option and always sets auth to cephx. + . + Original description: + . + [DEPRECATED] Valid options are "cephx" and "none". If "none" is + specified, keys will still be created and deployed so that it can be enabled later. monitor-secret: type: string diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index ec437984..826c5d4b 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -184,7 +184,7 @@ def get_ceph_context(): cluster_network = ', '.join(networks) cephcontext = { - 'auth_supported': config('auth-supported'), + 'auth_supported': 'cephx', 'mon_hosts': config('monitor-hosts') or ' '.join(get_mon_hosts()), 'fsid': leader_get('fsid'), 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, @@ -605,7 +605,7 @@ def _get_ceph_info_from_configs(): public_addr = get_public_addr() rbd_features = get_rbd_features() data = { - 'auth': config('auth-supported'), + 'auth': 'cephx', 'ceph-public-address': public_addr } if rbd_features: @@ -864,7 +864,7 @@ def osd_relation(relid=None, unit=None): data = { 'fsid': leader_get('fsid'), 'osd_bootstrap_key': ceph.get_osd_bootstrap_key(), - 'auth': config('auth-supported'), + 'auth': 'cephx', 'ceph-public-address': public_addr, 'osd_upgrade_key': ceph.get_named_key('osd-upgrade', caps=ceph.osd_upgrade_caps), @@ -996,7 +996,7 @@ def radosgw_relation(relid=None, unit=None): public_addr = get_public_addr() data = { 'fsid': leader_get('fsid'), - 'auth': config('auth-supported'), + 'auth': 'cephx', 'ceph-public-address': public_addr, } key_name = relation_get('key_name', unit=unit, rid=relid) @@ -1026,7 +1026,7 @@ def rbd_mirror_relation(relid=None, unit=None, recurse=True): # handle broker requests first to get a updated pool map data = (handle_broker_request(relid, unit, recurse=recurse)) data.update({ - 'auth': config('auth-supported'), + 'auth': 'cephx', 'ceph-public-address': get_public_addr(), 'pools': json.dumps(ceph.list_pools_detail(), sort_keys=True), 'broker_requests': json.dumps( @@ -1071,7 +1071,7 @@ def mds_relation_joined(relid=None, unit=None): 'fsid': leader_get('fsid'), '{}_mds_key'.format(mds_name): ceph.get_mds_key(name=mds_name), - 'auth': config('auth-supported'), + 'auth': 'cephx', 'ceph-public-address': public_addr} data.update(handle_broker_request(relid, unit)) relation_set(relation_id=relid, relation_settings=data) @@ -1090,7 +1090,7 @@ def admin_relation_joined(relid=None): mon_hosts = config('monitor-hosts') or ' '.join(get_mon_hosts()) data = {'key': ceph.get_named_key(name=name, caps=ceph.admin_caps), 'fsid': leader_get('fsid'), - 'auth': config('auth-supported'), + 'auth': 'cephx', 'mon_hosts': mon_hosts, } relation_set(relation_id=relid, diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 59a58a52..dc5ff7a2 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -78,7 +78,7 @@ def test_get_ceph_context(self, mock_config, mock_config2, mock_config.side_effect = lambda key: config[key] mock_config2.side_effect = lambda key: config[key] ctxt = ceph_hooks.get_ceph_context() - expected = {'auth_supported': False, + expected = {'auth_supported': 'cephx', 'ceph_cluster_network': '', 'ceph_public_network': '', 'cluster_addr': '10.1.0.1', @@ -110,7 +110,7 @@ def test_get_ceph_context_rbd_features(self, mock_config, mock_config2, mock_config.side_effect = lambda key: config[key] mock_config2.side_effect = lambda key: config[key] ctxt = ceph_hooks.get_ceph_context() - expected = {'auth_supported': False, + expected = {'auth_supported': 'cephx', 'ceph_cluster_network': '', 'ceph_public_network': '', 'cluster_addr': '10.1.0.1', @@ -143,7 +143,7 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2, mock_config.side_effect = lambda key: config[key] mock_config2.side_effect = lambda key: config[key] ctxt = ceph_hooks.get_ceph_context() - expected = {'auth_supported': False, + expected = {'auth_supported': 'cephx', 'ceph_cluster_network': '', 'ceph_public_network': '', 'cluster_addr': '10.1.0.1', @@ -178,7 +178,7 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, mock_config.side_effect = lambda key: config[key] mock_config2.side_effect = lambda key: config[key] ctxt = ceph_hooks.get_ceph_context() - expected = {'auth_supported': False, + expected = {'auth_supported': 'cephx', 'ceph_cluster_network': '', 'ceph_public_network': '', 'cluster_addr': '10.1.0.1', @@ -299,7 +299,7 @@ def test_notify_client(self, relation_id='arelid', relation_settings={ 'key': _get_named_key(), - 'auth': False, + 'auth': 'cephx', 'ceph-public-address': _get_public_addr() }) @@ -318,7 +318,7 @@ def test_notify_client(self, relation_id='arelid', relation_settings={ 'key': _get_named_key(), - 'auth': False, + 'auth': 'cephx', 'ceph-public-address': _get_public_addr(), 'rbd-features': 42, }) @@ -627,7 +627,7 @@ def test_client_relation(self, relation_id='rel1', relation_settings={ 'key': _get_named_key(), - 'auth': False, + 'auth': 'cephx', 'ceph-public-address': _get_public_addr() }) _get_rbd_features.return_value = 42 @@ -637,7 +637,7 @@ def test_client_relation(self, relation_id='rel1', relation_settings={ 'key': _get_named_key(), - 'auth': False, + 'auth': 'cephx', 'ceph-public-address': _get_public_addr(), 'rbd-features': 42, }) @@ -689,7 +689,7 @@ def test_client_relation_non_rel_hook(self, relation_set, relation_id='rel1', relation_settings={ 'key': get_named_key(), - 'auth': False, + 'auth': 'cephx', 'ceph-public-address': get_public_addr(), 'broker-rsp-glance-0': 'AOK', 'broker_rsp': 'AOK'}) @@ -700,7 +700,7 @@ def test_client_relation_non_rel_hook(self, relation_set, relation_id=None, relation_settings={ 'key': get_named_key(), - 'auth': False, + 'auth': 'cephx', 'ceph-public-address': get_public_addr(), 'broker-rsp-glance-0': 'AOK', 'broker_rsp': 'AOK'}) @@ -713,7 +713,7 @@ def test_client_relation_non_rel_hook(self, relation_set, relation_id='rel1', relation_settings={ 'key': get_named_key(), - 'auth': False, + 'auth': 'cephx', 'ceph-public-address': get_public_addr()}) @patch.object(ceph_hooks, 'req_already_treated') From 9c9ead57cd452c26ebadd8a492b00fbfcba2c43d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 23 Mar 2022 14:06:12 +0100 Subject: [PATCH 2362/2699] Update readme with ACL and VIP updates --- ceph-nfs/README.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/ceph-nfs/README.md b/ceph-nfs/README.md index 1010965c..bae2b9f2 100644 --- a/ceph-nfs/README.md +++ b/ceph-nfs/README.md @@ -21,9 +21,9 @@ accessible from any machine in the 10.0.0.0-10.0.0.255 network space. To grant access to a new network address, the `grant-access` action should be used: - juju run-action --wait ceph-nfs/0 grant-access name=test-share allowed-ips=192.168.0.10 mode=r + juju run-action --wait ceph-nfs/0 grant-access name=test-share allowed-ips=192.168.0.10 -This command has granted read-only access to the named share to a specific +This command has granted access to the named share to a specific address: `192.168.0.1`. It is possible to delete the created share with: @@ -34,9 +34,10 @@ It is possible to delete the created share with: To gain high availability for NFS shares, it is necessary to scale ceph-nfs and relate it to a loadbalancer charm: - juju add-unit ceph-nfs - juju deploy openstack-loadbalancer loadbalancer --config vip=10.5.0.100 - juju add-relation ceph-nfs loadbalancer + juju add-unit ceph-nfs -n 2 + juju config vip=10.5.0.100 + juju deploy hacluster + juju add-relation ceph-nfs hacluster Once everything settles, your shares will be accessible over the loadbalancer's vip (`10.5.0.100` in this example), and connections will load-balance across backends. From 97977a16bcfc21ea5e47824713571ca618483635 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 25 Mar 2022 13:53:33 +0100 Subject: [PATCH 2363/2699] Migrate to charmhub handling --- ceph-nfs/build-requirements.txt | 8 +++++++- ceph-nfs/charmcraft.yaml | 26 ++++++++++++++++++++++++++ ceph-nfs/osci.yaml | 13 +++++++++++++ ceph-nfs/rename.sh | 13 +++++++++++++ ceph-nfs/tox.ini | 19 ++++++++++++++++--- 5 files changed, 75 insertions(+), 4 deletions(-) create mode 100644 ceph-nfs/charmcraft.yaml create mode 100755 ceph-nfs/rename.sh diff --git a/ceph-nfs/build-requirements.txt b/ceph-nfs/build-requirements.txt index 271d8955..0fbd084b 100644 --- a/ceph-nfs/build-requirements.txt +++ b/ceph-nfs/build-requirements.txt @@ -1 +1,7 @@ -git+https://github.com/canonical/charmcraft.git@0.10.2#egg=charmcraft +# NOTES(lourot): +# * We don't install charmcraft via pip anymore because it anyway spins up a +# container and scp the system's charmcraft snap inside it. So the charmcraft +# snap is necessary on the system anyway. +# * `tox -e build` successfully validated with charmcraft 1.2.1 + +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. \ No newline at end of file diff --git a/ceph-nfs/charmcraft.yaml b/ceph-nfs/charmcraft.yaml new file mode 100644 index 00000000..c8792009 --- /dev/null +++ b/ceph-nfs/charmcraft.yaml @@ -0,0 +1,26 @@ +type: charm + +parts: + charm: + after: + - update-certificates + charm-python-packages: + # NOTE(lourot): see + # * https://github.com/canonical/charmcraft/issues/551 + # * https://github.com/canonical/charmcraft/issues/632 + - setuptools < 58 + build-packages: + - git + + update-certificates: + plugin: nil + # See https://github.com/canonical/charmcraft/issues/658 + override-build: | + apt update + apt install -y ca-certificates + update-ca-certificates +bases: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 \ No newline at end of file diff --git a/ceph-nfs/osci.yaml b/ceph-nfs/osci.yaml index e5ee05d6..7f46aa73 100644 --- a/ceph-nfs/osci.yaml +++ b/ceph-nfs/osci.yaml @@ -12,6 +12,7 @@ name: focal-pacific parent: func-target dependencies: + charm-build - osci-lint - tox-py35 - tox-py36 @@ -19,3 +20,15 @@ - tox-py38 vars: tox_extra_args: focal-pacific +- job: + name: focal-quincy + parent: func-target + dependencies: + - charm-build + - osci-lint + - tox-py35 + - tox-py36 + - tox-py37 + - tox-py38 + vars: + tox_extra_args: focal-quincy \ No newline at end of file diff --git a/ceph-nfs/rename.sh b/ceph-nfs/rename.sh new file mode 100755 index 00000000..283a01bf --- /dev/null +++ b/ceph-nfs/rename.sh @@ -0,0 +1,13 @@ +#!/bin/bash +charm=$(grep "charm_build_name" osci.yaml | awk '{print $2}') +echo "renaming ${charm}_*.charm to ${charm}.charm" +echo -n "pwd: " +pwd +ls -al +echo "Removing bad downloaded charm maybe?" +if [[ -e "${charm}.charm" ]]; +then + rm "${charm}.charm" +fi +echo "Renaming charm here." +mv ${charm}_*.charm ${charm}.charm \ No newline at end of file diff --git a/ceph-nfs/tox.ini b/ceph-nfs/tox.ini index 52928f32..09f49807 100644 --- a/ceph-nfs/tox.ini +++ b/ceph-nfs/tox.ini @@ -15,8 +15,12 @@ skip_missing_interpreters = False # * It is also necessary to pin virtualenv as a newer virtualenv would still # lead to fetching the latest pip in the func* tox targets, see # https://stackoverflow.com/a/38133283 +# * It is necessary to declare setuptools as a dependency otherwise tox will +# fail very early at not being able to load it. The version pinning is in +# line with `pip.sh`. requires = pip < 20.3 virtualenv < 20.0 + setuptools < 50.0.0 # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci minversion = 3.2.0 @@ -27,10 +31,12 @@ setenv = VIRTUAL_ENV={envdir} install_command = pip install {opts} {packages} commands = stestr run --slowest {posargs} -whitelist_externals = +allowlist_externals = git add-to-archive.py bash + charmcraft + rename.sh passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt @@ -54,6 +60,11 @@ basepython = python3.8 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py39] +basepython = python3.9 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt @@ -101,7 +112,9 @@ commands = {posargs} basepython = python3 deps = -r{toxinidir}/build-requirements.txt commands = - charmcraft build + charmcraft clean + charmcraft -v build + {toxinidir}/rename.sh [testenv:func-noop] basepython = python3 @@ -130,4 +143,4 @@ commands = [flake8] # Ignore E902 because the unit_tests directory is missing in the built charm. -ignore = E402,E226,E902,W504 +ignore = E402,E226,E902 \ No newline at end of file From 1414f7a5e247c63021322ee7665b6f3b12719742 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 25 Mar 2022 13:57:24 +0100 Subject: [PATCH 2364/2699] Improve consistency This change does a couple of things to improve the consistency of the code: - Replace all uses of Nfs with NFS - Remove repeated instantiation of the GaneshaNFS object - Make name a peroperty fo the Export --- ceph-nfs/src/charm.py | 30 ++++++++++++------------- ceph-nfs/src/ganesha.py | 11 ++++----- ceph-nfs/src/interface_ceph_nfs_peer.py | 10 ++++----- 3 files changed, 25 insertions(+), 26 deletions(-) diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index f43556d1..77904182 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -32,7 +32,7 @@ import interface_hacluster.ops_ha_interface as ops_ha_interface # TODO: Add the below class functionaity to action / relations -from ganesha import GaneshaNfs +from ganesha import GaneshaNFS import ops_openstack.adapters import ops_openstack.core @@ -108,7 +108,7 @@ class CephNFSAdapters( } -class CephNfsCharm( +class CephNFSCharm( ops_openstack.plugins.classes.BaseCephClientCharm): """Ceph NFS Base Charm.""" @@ -157,7 +157,7 @@ def __init__(self, framework): self.ceph_client = ceph_client.CephClientRequires( self, 'ceph-client') - self.peers = interface_ceph_nfs_peer.CephNfsPeers( + self.peers = interface_ceph_nfs_peer.CephNFSPeers( self, 'cluster') self.ha = ops_ha_interface.HAServiceRequires(self, 'ha') @@ -242,6 +242,10 @@ def pool_name(self): def client_name(self): return self.app.name + @property + def ganesha_client(self): + GaneshaNFS(self.client_name, self.pool_name) + def request_ceph_pool(self, event): """Request pools from Ceph cluster.""" if not self.ceph_client.broker_available: @@ -413,8 +417,7 @@ def create_share_action(self, event): name = event.params.get('name') allowed_ips = event.params.get('allowed-ips') allowed_ips = [ip.strip() for ip in allowed_ips.split(',')] - client = GaneshaNfs(self.client_name, self.pool_name) - export_path = client.create_share(size=share_size, name=name, access_ips=allowed_ips) + export_path = self.ganesha_client.create_share(size=share_size, name=name, access_ips=allowed_ips) if not export_path: event.fail("Failed to create share, check the log for more details") return @@ -425,8 +428,7 @@ def create_share_action(self, event): "ip": self.access_address()}) def list_shares_action(self, event): - client = GaneshaNfs(self.client_name, self.pool_name) - exports = client.list_shares() + exports = self.ganesha_client.list_shares() event.set_results({ "exports": [{"id": export.export_id, "name": export.name} for export in exports] }) @@ -435,10 +437,9 @@ def delete_share_action(self, event): if not self.model.unit.is_leader(): event.fail("Share creation needs to be run from the application leader") return - client = GaneshaNfs(self.client_name, self.pool_name) name = event.params.get('name') purge = event.params.get('purge') - client.delete_share(name, purge=purge) + self.ganesha_client.delete_share(name, purge=purge) self.peers.trigger_reload() event.set_results({ "message": "Share deleted", @@ -448,10 +449,9 @@ def grant_access_action(self, event): if not self.model.unit.is_leader(): event.fail("Share creation needs to be run from the application leader") return - client = GaneshaNfs(self.client_name, self.pool_name) name = event.params.get('name') address = event.params.get('client') - res = client.grant_access(name, address) + res = self.ganesha_client.grant_access(name, address) if res is not None: event.fail(res) return @@ -464,10 +464,9 @@ def revoke_access_action(self, event): if not self.model.unit.is_leader(): event.fail("Share creation needs to be run from the application leader") return - client = GaneshaNfs(self.client_name, self.pool_name) name = event.params.get('name') address = event.params.get('client') - res = client.revoke_access(name, address) + res = self.ganesha_client.revoke_access(name, address) if res is not None: event.fail(res) return @@ -481,15 +480,14 @@ def resize_share_action(self, event): size = event.params.get('size') if size is None: event.fail("Size must be set") - client = GaneshaNfs(self.client_name, self.pool_name) - client.resize_share(name=name, size=size) + self.ganesha_client.resize_share(name=name, size=size) event.set_results({ "message": f"{name} is now {size}GB", }) @ops_openstack.core.charm_class -class CephNFSCharmPacific(CephNfsCharm): +class CephNFSCharmPacific(CephNFSCharm): """Ceph iSCSI Charm for Pacific.""" _stored = StoredState() diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index 34a87bd3..f8560921 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -19,16 +19,12 @@ class Export(object): """Object that encodes and decodes Ganesha export blocks""" - name = None - def __init__(self, export_options: Optional[Dict] = None): if export_options is None: export_options = {} if isinstance(export_options, Export): raise RuntimeError('export_options must be a dictionary') self.export_options = export_options - if self.path: - self.name = self.path.split('/')[-2] if not isinstance(self.export_options['EXPORT']['CLIENT'], list): self.export_options['EXPORT']['CLIENT'] = [self.export_options['EXPORT']['CLIENT']] @@ -38,6 +34,11 @@ def from_export(export: str) -> 'Export': def to_export(self) -> str: return manager.mkconf(self.export_options) + @property + def name(self): + if self.path: + return self.path.split('/')[-2] + @property def export(self): return self.export_options['EXPORT'] @@ -91,7 +92,7 @@ def remove_client(self, client: str): {'Access_Type': mode, 'Clients': ', '.join(clients)}) -class GaneshaNfs(object): +class GaneshaNFS(object): export_index = "ganesha-export-index" export_counter = "ganesha-export-counter" diff --git a/ceph-nfs/src/interface_ceph_nfs_peer.py b/ceph-nfs/src/interface_ceph_nfs_peer.py index 08ff5469..8e371b3c 100644 --- a/ceph-nfs/src/interface_ceph_nfs_peer.py +++ b/ceph-nfs/src/interface_ceph_nfs_peer.py @@ -26,15 +26,15 @@ class DepartedEvent(EventBase): pass -class CephNfsPeerEvents(ObjectEvents): +class CephNFSPeerEvents(ObjectEvents): pool_initialised = EventSource(PoolInitialisedEvent) reload_nonce = EventSource(ReloadNonceEvent) departing = EventSource(DepartedEvent) -class CephNfsPeers(Object): +class CephNFSPeers(Object): - on = CephNfsPeerEvents() + on = CephNFSPeerEvents() _stored = StoredState() def __init__(self, charm, relation_name): @@ -52,7 +52,7 @@ def __init__(self, charm, relation_name): self.on_departed) def on_changed(self, event): - logging.info("CephNfsPeers on_changed") + logging.info("CephNFSPeers on_changed") logging.debug('pool_initialised: {}'.format(self.pool_initialised)) if self.pool_initialised == 'True' and not self._stored.pool_initialised: logging.info("emiting pool initialised") @@ -65,7 +65,7 @@ def on_changed(self, event): self._stored.reload_nonce = self.reload_nonce def on_departed(self, event): - logging.warning("CephNfsPeers on_departed") + logging.warning("CephNFSPeers on_departed") if self.this_unit.name == os.getenv('JUJU_DEPARTING_UNIT'): self.on.departing.emit() From b0fc5c318d09c179dea766a849a2588c7c907440 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 28 Mar 2022 12:59:10 +0200 Subject: [PATCH 2365/2699] Update for lint and unit test errors --- ceph-nfs/.flake8 | 2 ++ ceph-nfs/src/charm.py | 3 ++- ceph-nfs/src/ganesha.py | 5 ++-- ceph-nfs/tests/nfs_ganesha.py | 1 - ceph-nfs/unit_tests/test_ceph_nfs_charm.py | 6 ++--- ceph-nfs/unit_tests/test_ganesha.py | 29 +++++++++++----------- 6 files changed, 24 insertions(+), 22 deletions(-) diff --git a/ceph-nfs/.flake8 b/ceph-nfs/.flake8 index 8ef84fcd..293e63b1 100644 --- a/ceph-nfs/.flake8 +++ b/ceph-nfs/.flake8 @@ -7,3 +7,5 @@ exclude: build dist *.egg_info + # Excluded because it is imported almost verbatim from Manila + src/manager.py \ No newline at end of file diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index 77904182..126ed3a0 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -417,7 +417,8 @@ def create_share_action(self, event): name = event.params.get('name') allowed_ips = event.params.get('allowed-ips') allowed_ips = [ip.strip() for ip in allowed_ips.split(',')] - export_path = self.ganesha_client.create_share(size=share_size, name=name, access_ips=allowed_ips) + export_path = self.ganesha_client.create_share( + size=share_size, name=name, access_ips=allowed_ips) if not export_path: event.fail("Failed to create share, check the log for more details") return diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index f8560921..9997488a 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -93,7 +93,6 @@ def remove_client(self, client: str): class GaneshaNFS(object): - export_index = "ganesha-export-index" export_counter = "ganesha-export-counter" @@ -261,8 +260,8 @@ def _delete_cephfs_share(self, name: str): :param name: String name of the share to create """ self._ceph_subvolume_command( - 'deauthorize', 'ceph-fs', name, - 'ganesha-{name}'.format(name=name)) + 'deauthorize', 'ceph-fs', name, + 'ganesha-{name}'.format(name=name)) self._ceph_subvolume_command('rm', 'ceph-fs', name) def _create_cephfs_share(self, name: str, size_in_bytes: int = None): diff --git a/ceph-nfs/tests/nfs_ganesha.py b/ceph-nfs/tests/nfs_ganesha.py index a1bbf76a..42bd962f 100644 --- a/ceph-nfs/tests/nfs_ganesha.py +++ b/ceph-nfs/tests/nfs_ganesha.py @@ -17,7 +17,6 @@ import logging import subprocess import tenacity -import time from typing import Dict import unittest import yaml diff --git a/ceph-nfs/unit_tests/test_ceph_nfs_charm.py b/ceph-nfs/unit_tests/test_ceph_nfs_charm.py index c9893159..6b92a603 100644 --- a/ceph-nfs/unit_tests/test_ceph_nfs_charm.py +++ b/ceph-nfs/unit_tests/test_ceph_nfs_charm.py @@ -12,7 +12,7 @@ from unittest.mock import patch, Mock -from charm import CephNfsCharm +from charm import CephNFSCharm # from ops.model import ActiveStatus from ops.testing import Harness @@ -40,7 +40,7 @@ def patch_all(self): setattr(self, method, self.patch(method)) -class _CephNfsCharm(CephNfsCharm): +class _CephNFSCharm(CephNFSCharm): @staticmethod def get_bluestore_compression(): @@ -58,7 +58,7 @@ class TestCephNFSCharmBase(CharmTestCase): def setUp(self): super().setUp(charm, self.PATCHES) self.harness = Harness( - _CephNfsCharm, + _CephNFSCharm, ) self.addCleanup(self.harness.cleanup) diff --git a/ceph-nfs/unit_tests/test_ganesha.py b/ceph-nfs/unit_tests/test_ganesha.py index a1354ffe..2ad40088 100644 --- a/ceph-nfs/unit_tests/test_ganesha.py +++ b/ceph-nfs/unit_tests/test_ganesha.py @@ -43,35 +43,36 @@ def test_parser(self): def test_add_client(self): export = ganesha.Export.from_export(EXAMPLE_EXPORT) - export.add_client('10.0.0.0/8', 'rw') + export.add_client('10.0.0.0/8') self.assertEqual( export.clients, [{'Access_Type': 'rw', 'Clients': '0.0.0.0, 10.0.0.0/8'}]) # adding again shouldn't duplicate export - export.add_client('10.0.0.0/8', 'rw') + export.add_client('10.0.0.0/8') self.assertEqual( export.clients, [{'Access_Type': 'rw', 'Clients': '0.0.0.0, 10.0.0.0/8'}]) - export.add_client('192.168.0.0/16', 'r') + export.add_client('192.168.0.0/16') self.assertEqual( export.clients, - [{'Access_Type': 'r', 'Clients': '192.168.0.0/16'}, - {'Access_Type': 'rw', 'Clients': '0.0.0.0, 10.0.0.0/8'}, - ]) + [{ + 'Access_Type': 'rw', 'Clients': '0.0.0.0, 10.0.0.0/8, 192.168.0.0/16' + }]) def test_remove_client(self): export = ganesha.Export.from_export(EXAMPLE_EXPORT) - export.add_client('10.0.0.0/8', 'rw') - export.add_client('192.168.0.0/16', 'r') + export.add_client('10.0.0.0/8') + export.add_client('192.168.0.0/16') self.assertEqual( export.clients, - [{'Access_Type': 'r', 'Clients': '192.168.0.0/16'}, - {'Access_Type': 'rw', 'Clients': '0.0.0.0, 10.0.0.0/8'}, - ]) + [{ + 'Access_Type': 'rw', + 'Clients': '0.0.0.0, 10.0.0.0/8, 192.168.0.0/16' + }]) export.remove_client('0.0.0.0') self.assertEqual( export.clients, - [{'Access_Type': 'r', 'Clients': '192.168.0.0/16'}, - {'Access_Type': 'rw', 'Clients': '10.0.0.0/8'}, - ]) + [ + {'Access_Type': 'rw', 'Clients': '10.0.0.0/8, 192.168.0.0/16'}, + ]) From 981043750a37aa0864079762c5f37b48f8f4ba60 Mon Sep 17 00:00:00 2001 From: Robert Gildein Date: Tue, 29 Mar 2022 16:21:56 +0200 Subject: [PATCH 2366/2699] Add list-crush-rules action This action provides a list of crush rules defined in CEPH clusters. Closes-bug: #1957458 Change-Id: I2a5fdae776e00d869a624e1107ab42cf69bb2f50 --- ceph-mon/actions.yaml | 12 ++ ceph-mon/actions/list-crush-rules | 1 + ceph-mon/actions/list_crush_rules.py | 76 +++++++++ .../test_action_change_osd_weight.py | 1 - .../test_action_list_crush_rules.py | 155 ++++++++++++++++++ 5 files changed, 244 insertions(+), 1 deletion(-) create mode 120000 ceph-mon/actions/list-crush-rules create mode 100755 ceph-mon/actions/list_crush_rules.py create mode 100644 ceph-mon/unit_tests/test_action_list_crush_rules.py diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index c1ca254c..a33eaa63 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -405,3 +405,15 @@ get-quorum-status: - text - json description: Specify output format (text|json). +list-crush-rules: + description: "List CEPH crush rules" + params: + format: + type: string + enum: + - json + - yaml + - text + default: text + description: "The output format, either json, yaml or text (default)" + additionalProperties: false diff --git a/ceph-mon/actions/list-crush-rules b/ceph-mon/actions/list-crush-rules new file mode 120000 index 00000000..30736b0d --- /dev/null +++ b/ceph-mon/actions/list-crush-rules @@ -0,0 +1 @@ +list_crush_rules.py \ No newline at end of file diff --git a/ceph-mon/actions/list_crush_rules.py b/ceph-mon/actions/list_crush_rules.py new file mode 100755 index 00000000..a28fcc2b --- /dev/null +++ b/ceph-mon/actions/list_crush_rules.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os +import sys +import yaml +from subprocess import check_output, CalledProcessError + +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, "../hooks")) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + + +_add_path(_hooks) + + +from charmhelpers.core.hookenv import ( + ERROR, + log, + function_fail, + function_get, + function_set +) + + +def get_list_crush_rules(output_format="text"): + """Get list of Ceph crush rules. + + :param output_format: specify output format + :type output_format: str + :returns: text: list of tuple ( ) or + yaml: list of crush rules in yaml format + json: list of crush rules in json format + :rtype: str + """ + crush_rules = check_output(["ceph", "--id", "admin", "osd", "crush", + "rule", "dump", "-f", "json"]).decode("UTF-8") + crush_rules = json.loads(crush_rules) + + if output_format == "text": + return ",".join(["({}, {})".format(rule["rule_id"], rule["rule_name"]) + for rule in crush_rules]) + elif output_format == "yaml": + return yaml.dump(crush_rules) + else: + return json.dumps(crush_rules) + + +def main(): + try: + list_crush_rules = get_list_crush_rules(function_get("format")) + function_set({"message": list_crush_rules}) + except CalledProcessError as error: + log(error, ERROR) + function_fail("List crush rules failed with error: {}".format(error)) + + +if __name__ == "__main__": + main() diff --git a/ceph-mon/unit_tests/test_action_change_osd_weight.py b/ceph-mon/unit_tests/test_action_change_osd_weight.py index fbe26bd7..e0bff653 100644 --- a/ceph-mon/unit_tests/test_action_change_osd_weight.py +++ b/ceph-mon/unit_tests/test_action_change_osd_weight.py @@ -34,5 +34,4 @@ def test_reweight_osd(self, _reweight_osd): osd_num = 4 new_weight = 1.2 action.crush_reweight(osd_num, new_weight) - print(_reweight_osd.calls) _reweight_osd.assert_has_calls([mock.call("4", "1.2")]) diff --git a/ceph-mon/unit_tests/test_action_list_crush_rules.py b/ceph-mon/unit_tests/test_action_list_crush_rules.py new file mode 100644 index 00000000..87f52ecb --- /dev/null +++ b/ceph-mon/unit_tests/test_action_list_crush_rules.py @@ -0,0 +1,155 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import yaml + +from actions import list_crush_rules +from test_utils import CharmTestCase + + +class ListCrushRulesTestCase(CharmTestCase): + ceph_osd_crush_rule_dump = b""" + [ + { + "rule_id": 0, + "rule_name": "replicated_rule", + "ruleset": 0, + "type": 1, + "min_size": 1, + "max_size": 10, + "steps": [ + { + "op": "take", + "item": -1, + "item_name": "default" + }, + { + "op": "chooseleaf_firstn", + "num": 0, + "type": "host" + }, + { + "op": "emit" + } + ] + }, + { + "rule_id": 1, + "rule_name": "test-host", + "ruleset": 1, + "type": 1, + "min_size": 1, + "max_size": 10, + "steps": [ + { + "op": "take", + "item": -1, + "item_name": "default" + }, + { + "op": "chooseleaf_firstn", + "num": 0, + "type": "host" + }, + { + "op": "emit" + } + ] + }, + { + "rule_id": 2, + "rule_name": "test-chassis", + "ruleset": 2, + "type": 1, + "min_size": 1, + "max_size": 10, + "steps": [ + { + "op": "take", + "item": -1, + "item_name": "default" + }, + { + "op": "chooseleaf_firstn", + "num": 0, + "type": "chassis" + }, + { + "op": "emit" + } + ] + }, + { + "rule_id": 3, + "rule_name": "test-rack-hdd", + "ruleset": 3, + "type": 1, + "min_size": 1, + "max_size": 10, + "steps": [ + { + "op": "take", + "item": -2, + "item_name": "default~hdd" + }, + { + "op": "chooseleaf_firstn", + "num": 0, + "type": "rack" + }, + { + "op": "emit" + } + ] + } + ] + """ + + def setUp(self): + super(ListCrushRulesTestCase, self).setUp( + list_crush_rules, ["check_output", "function_fail", "function_get", + "function_set"]) + self.function_get.return_value = "json" # format=json + self.check_output.return_value = self.ceph_osd_crush_rule_dump + + def test_getting_list_crush_rules_text_format(self): + """Test getting list of crush rules in text format.""" + self.function_get.return_value = "text" + list_crush_rules.main() + self.function_get.assert_called_once_with("format") + self.function_set.assert_called_once_with( + {"message": "(0, replicated_rule),(1, test-host)," + "(2, test-chassis),(3, test-rack-hdd)"}) + + def test_getting_list_crush_rules_json_format(self): + """Test getting list of crush rules in json format.""" + crush_rules = self.ceph_osd_crush_rule_dump.decode("UTF-8") + crush_rules = json.loads(crush_rules) + self.function_get.return_value = "json" + list_crush_rules.main() + self.function_get.assert_called_once_with("format") + self.function_set.assert_called_once_with( + {"message": json.dumps(crush_rules)}) + + def test_getting_list_crush_rules_yaml_format(self): + """Test getting list of crush rules in yaml format.""" + crush_rules = self.ceph_osd_crush_rule_dump.decode("UTF-8") + crush_rules = json.loads(crush_rules) + self.function_get.return_value = "yaml" + list_crush_rules.main() + self.function_get.assert_called_once_with("format") + self.function_set.assert_called_once_with( + {"message": yaml.dump(crush_rules)}) From cc52141bd6ae64d4e8b8050e4858c218660a2518 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 30 Mar 2022 13:35:04 +0100 Subject: [PATCH 2367/2699] Resolve issue with mod_proxy decoding The Ceph RADOS Gateway uses some unusual URI's for multisite replication; ensure that mod_proxy passes the 'raw' URI down to the radosgw http endpoint so that client and server side signatures continue to match. This seems quite Ceph specific so the template is specialised into the charm rather than updated in charm-helpers. Change-Id: Iede49ba8904500076d53388345e154a3ed18e761 Closes-Bug: 1966669 --- .../templates/openstack_https_frontend.conf | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 ceph-radosgw/templates/openstack_https_frontend.conf diff --git a/ceph-radosgw/templates/openstack_https_frontend.conf b/ceph-radosgw/templates/openstack_https_frontend.conf new file mode 100644 index 00000000..6463f415 --- /dev/null +++ b/ceph-radosgw/templates/openstack_https_frontend.conf @@ -0,0 +1,37 @@ +{% if endpoints -%} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} + + ServerName {{ endpoint }} + SSLEngine on + + # This section is based on Mozilla's recommendation + # as the "intermediate" profile as of July 7th, 2020. + # https://wiki.mozilla.org/Security/Server_Side_TLS + SSLProtocol all -SSLv3 -TLSv1 -TLSv1.1 + SSLCipherSuite ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 + SSLHonorCipherOrder off + + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 + SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} + ProxyPass / http://localhost:{{ int }}/ nocanon + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + RequestHeader set X-Forwarded-Proto "https" + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 + +{% endfor -%} + + Order deny,allow + Allow from all + + + Order allow,deny + Allow from all + +{% endif -%} From 8bc24e217c4c68818711dee5072ddb300c3516ce Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 29 Mar 2022 11:44:05 +0200 Subject: [PATCH 2368/2699] Updates to enable jammy and finalise charmcraft builds - Add 22.04 to charmcraft.yaml - Update metadata to include jammy - Remove impish from metadata - ensure that the source is yoga Change-Id: Ibb93704c6d66f522cf112ad115b3a294d7a1eb03 --- ceph-osd/.zuul.yaml | 4 ++-- ceph-osd/charmcraft.yaml | 24 ++++++++++++++++++++---- ceph-osd/config.yaml | 2 +- ceph-osd/metadata.yaml | 2 +- ceph-osd/osci.yaml | 3 +-- ceph-osd/tox.ini | 15 +++++---------- 6 files changed, 30 insertions(+), 20 deletions(-) diff --git a/ceph-osd/.zuul.yaml b/ceph-osd/.zuul.yaml index 0eed1965..1ffc530a 100644 --- a/ceph-osd/.zuul.yaml +++ b/ceph-osd/.zuul.yaml @@ -1,4 +1,4 @@ - project: templates: - - openstack-python3-ussuri-jobs - - openstack-cover-jobs + - openstack-python3-charm-yoga-jobs + - openstack-cover-jobs \ No newline at end of file diff --git a/ceph-osd/charmcraft.yaml b/ceph-osd/charmcraft.yaml index ba84f314..d160fc85 100644 --- a/ceph-osd/charmcraft.yaml +++ b/ceph-osd/charmcraft.yaml @@ -21,7 +21,23 @@ parts: - README.md bases: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 + - build-on: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 + - s390x + - ppc64el + - arm64 + - name: ubuntu + channel: "22.04" + architectures: + - amd64 + - s390x + - ppc64el + - arm64 + run-on: + - name: ubuntu + channel: "20.04" + - name: ubuntu + channel: "22.04" \ No newline at end of file diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index b7aa2f72..8345428a 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -5,7 +5,7 @@ options: description: OSD debug level. Max is 20. source: type: string - default: + default: yoga description: | Optional configuration to support use of additional sources such as: . diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 5034e854..42aee0be 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -12,7 +12,7 @@ tags: - misc series: - focal -- impish +- jammy description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. diff --git a/ceph-osd/osci.yaml b/ceph-osd/osci.yaml index 2326d25c..3b1c1591 100644 --- a/ceph-osd/osci.yaml +++ b/ceph-osd/osci.yaml @@ -1,10 +1,9 @@ - project: templates: - charm-unit-jobs-py38 - - charm-unit-jobs-py39 - charm-xena-functional-jobs - charm-yoga-functional-jobs vars: needs_charm_build: true charm_build_name: ceph-osd - build_type: charmcraft + build_type: charmcraft \ No newline at end of file diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 81fd2492..1b8f281b 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -51,21 +51,11 @@ commands = charmcraft -v build {toxinidir}/rename.sh -[testenv:py35] -basepython = python3.5 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - [testenv:py36] basepython = python3.6 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -[testenv:py37] -basepython = python3.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - [testenv:py38] basepython = python3.8 deps = -r{toxinidir}/requirements.txt @@ -76,6 +66,11 @@ basepython = python3.9 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py310] +basepython = python3.10 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt From 79a8d432b118025723a42379aaaeb7f76b1d4fbd Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Fri, 19 Nov 2021 18:24:39 -0300 Subject: [PATCH 2369/2699] Implement the 'remove-disk' action This new action allows users to either purge an OSD, or remove it, opening up the possibility of recycling the previous OSD id. In addition, this action will clean up any bcache devices that were created in previous steps. Change-Id: If3566031ba3f02dac0bc86938dcf9e85a66a66f0 Depends-On: Ib959e81833eb2094d02c7bdd507b1c8b7fbcd3db func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/683 --- ceph-osd/actions.yaml | 36 ++ ceph-osd/actions/add_disk.py | 13 +- ceph-osd/actions/remove-disk | 1 + ceph-osd/actions/remove_disk.py | 358 ++++++++++++++++++ ceph-osd/hooks/ceph_hooks.py | 4 + ceph-osd/hooks/utils.py | 197 ++++++++-- ceph-osd/lib/charms_ceph/utils.py | 4 + ceph-osd/templates/ceph.conf | 3 + ceph-osd/unit_tests/test_actions_add_disk.py | 6 +- .../unit_tests/test_actions_remove_disk.py | 136 +++++++ ceph-osd/unit_tests/test_ceph_utils.py | 110 +++++- 11 files changed, 817 insertions(+), 51 deletions(-) create mode 120000 ceph-osd/actions/remove-disk create mode 100755 ceph-osd/actions/remove_disk.py create mode 100644 ceph-osd/unit_tests/test_actions_remove_disk.py diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index f4233aed..56a22c6c 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -149,3 +149,39 @@ get-availability-zone: show-all: type: boolean description: Option to view information for all units. Default is 'false'. +remove-disk: + description: | + Remove disks from Ceph, producing a report afterwards indicating the user + as to how to replace them in the closest way possible. + params: + osd-devices: + type: string + description: A space-separated list of devices to remove + osd-ids: + type: string + description: | + A space separated list of OSD ids to remove. This parameter is mutually + exclusive with the parameter 'osd-devices'. + purge: + type: boolean + description: | + Whether to fully purge the OSD or let the id be available for reuse. + default: false + timeout: + type: integer + description: | + The time in minutes to wait for the OSD to be safe to remove. + default: 5 + force: + type: boolean + description: | + Whether to forcefully remove the OSD even if it's determined to be + unsafe to destroy it. + default: false + format: + type: string + enum: + - text + - json + default: text + description: The output format returned for the command. diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index 6f2f9819..57d49fcf 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -61,6 +61,9 @@ def add_device(request, device_path, bucket=None, else: effective_dev = device_path + if osd_id is not None and osd_id.startswith('osd.'): + osd_id = osd_id[4:] + charms_ceph.utils.osdize(effective_dev, hookenv.config('osd-format'), ceph_hooks.get_journal_devices(), hookenv.config('ignore-device-errors'), @@ -91,6 +94,14 @@ def add_device(request, device_path, bucket=None, } ) + if part_iter is not None: + # Update the alias map so we can refer to an OSD via the original + # device instead of the newly created cache name. + aliases = db.get('osd-aliases', {}) + aliases[device_path] = effective_dev + db.set('osd-aliases', aliases) + db.flush() + return request @@ -183,5 +194,5 @@ def validate_partition_size(psize, devices, caches): for error in errors: part_iter.cleanup(error) - function_fail('Failed to add devices: {}', ','.join(errors)) + function_fail('Failed to add devices: {}'.format(','.join(errors))) sys.exit(1) diff --git a/ceph-osd/actions/remove-disk b/ceph-osd/actions/remove-disk new file mode 120000 index 00000000..29934df0 --- /dev/null +++ b/ceph-osd/actions/remove-disk @@ -0,0 +1 @@ +./remove_disk.py \ No newline at end of file diff --git a/ceph-osd/actions/remove_disk.py b/ceph-osd/actions/remove_disk.py new file mode 100755 index 00000000..7a48cba1 --- /dev/null +++ b/ceph-osd/actions/remove_disk.py @@ -0,0 +1,358 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import errno +import json +from math import ceil +import subprocess +import sys +import time + +sys.path.append('lib') +sys.path.append('hooks') + +import charmhelpers.core.hookenv as hookenv +from charmhelpers.core.hookenv import function_fail + +import charms_ceph.utils +from charmhelpers.core.unitdata import kv +from utils import (get_bcache_names, bcache_remove, device_size, + get_parent_device, remove_lvm, wipefs_safely) + + +def normalize_osd_id(osd_id): + """Make sure an OSD id has the form 'osd.'. + + :param osd_id: The OSD id, either a string or the integer ID. + :type osd_id: Option[int, str] + + :returns: A string with the form 'osd.. + :rtype: str + """ + if not isinstance(osd_id, str) or not osd_id.startswith('osd.'): + osd_id = 'osd.' + str(osd_id) + return osd_id + + +def get_device_map(): + """Get a list of osd.id, device-path for every device that + is being used by local OSD. + + :returns: A list of OSD ids and devices. + :rtype: list[dict['id', 'path']] + """ + ret = [] + vlist = subprocess.check_output(['ceph-volume', 'lvm', 'list', + '--format=json']) + for osd_id, data in json.loads(vlist.decode('utf8')).items(): + osd_id = normalize_osd_id(osd_id) + for elem in data: + for device in elem['devices']: + ret.append({'id': osd_id, 'path': device}) + return ret + + +def map_device_to_id(dev_map, device): + """Get the OSD id for a device or bcache name. + + :param dev_map: A map with the same form as that returned by + the function 'get_device_map'. + :type dev_map: list[dict['id', 'path']] + + :param device: The path to the device. + :type device: str + + :returns: The OSD id in use by the device, if any. + :rtype: Option[None, str] + """ + for elem in dev_map: + if device == elem['path']: + return elem['id'] + + +def map_id_to_device(dev_map, osd_id): + """Get the device path for an OSD id. + + :param dev_map: A map with the same form as that returned by + the function 'get_device_map'. + :type dev_map: list[dict['id', 'path']] + + :param osd_id: The OSD id to check against. + :type osd_id: str + + :returns: The device path being used by the OSD id, if any. + :rtype: Option[None, str] + """ + for elem in dev_map: + if elem['id'] == osd_id: + return elem['path'] + + +def safe_to_destroy(osd_id): + """Test whether an OSD id is safe to destroy per the Ceph cluster.""" + ret = subprocess.call(['ceph', '--id', 'osd-removal', + 'osd', 'safe-to-destroy', osd_id]) + return ret == 0 + + +def safe_to_stop(osd_id): + """Test whether an OSD is safe to stop.""" + ret = subprocess.call(['ceph', '--id', 'osd-removal', + 'osd', 'ok-to-stop', osd_id]) + return ret == 0 + + +def reweight_osd(osd_id): + """Set the weight of the OSD id to zero.""" + subprocess.check_call(['ceph', '--id', 'osd-removal', + 'osd', 'crush', 'reweight', osd_id, '0']) + + +def destroy(osd_id, purge=False): + """Destroy or purge an OSD id.""" + for _ in range(10): + # We might get here before the OSD is marked as down. As such, + # retry if the error code is EBUSY. + try: + subprocess.check_call(['ceph', '--id', 'osd-removal', 'osd', + 'purge' if purge else 'destroy', + osd_id, '--yes-i-really-mean-it']) + return + except subprocess.CalledProcessError as e: + if e.returncode != errno.EBUSY: + raise + time.sleep(0.1) + + +class RemoveException(Exception): + """Exception type used to notify of errors for this action.""" + pass + + +class ActionOSD: + + """Class used to encapsulate all the needed information to + perform OSD removal.""" + + def __init__(self, dev_map, dev=None, osd_id=None, aliases={}): + """Construct an action-OSD. + + :param dev_map: A map with the same form as that returned by + the function 'get_device_map'. + :type dev_map: list[dict['id', 'path']] + + :param dev: The device being used by an OSD. + :type dev: Option[None, str] + + :param osd_id: The OSD id. + :type osd_id: Option[None, int, str] + """ + if dev is not None: + if osd_id is not None: + raise RemoveException( + 'osd-ids and osd-devices are mutually exclusive') + elif dev in aliases: + self.alias = dev + self.device = aliases.get(dev) + else: + self.device, self.alias = dev, None + + self.osd_id = map_device_to_id(dev_map, self.device) + self.bcache_backing, self.bcache_caching = \ + get_bcache_names(self.device) + if self.osd_id is None: + raise RemoveException('Device {} is not being used' + .format(self.device)) + else: + self.alias = None + self.osd_id = normalize_osd_id(osd_id) + self.device = map_id_to_device(dev_map, self.osd_id) + if self.device is None: + raise RemoveException('Invalid osd ID: {}'.format(self.osd_id)) + + self.bcache_backing, self.bcache_caching = \ + get_bcache_names(self.device) + + self.report = {} # maps device -> actions. + + @property + def osd_device(self): + return self.bcache_backing or self.device + + def remove(self, purge, timeout, force): + """Remove the OSD from the cluster. + + :param purge: Whether to purge or just destroy the OSD. + :type purge: bool + + :param timeout: The number of minutes to wait for until the OSD + is safe to destroy. + :type timeout: int + + :param force: Whether to proceed with OSD removal, even when + it's not safe to do so. + :type force: bool + """ + # Set the CRUSH weight to 0. + hookenv.log('Reweighting OSD', hookenv.DEBUG) + reweight_osd(self.osd_id) + + # Ensure that the OSD is safe to stop and destroy. + end = (datetime.datetime.now() + + datetime.timedelta(seconds=timeout * 60)) + safe_stop, safe_destroy = False, False + + while True: + if not safe_stop and safe_to_stop(self.osd_id): + safe_stop = True + if not safe_destroy and safe_to_destroy(self.osd_id): + safe_destroy = True + + if safe_stop and safe_destroy: + break + + curr = datetime.datetime.now() + if curr >= end: + if force: + hookenv.log( + 'OSD not safe to destroy, but "force" was specified', + hookenv.DEBUG) + break + + raise RemoveException( + 'timed out waiting for an OSD to be safe to destroy') + time.sleep(min(1, (end - curr).total_seconds())) + + # Stop the OSD service. + hookenv.log('Stopping the OSD service', hookenv.DEBUG) + charms_ceph.utils.stop_osd(self.osd_id[4:]) + + # Remove the OSD from the cluster. + hookenv.log('Destroying the OSD', hookenv.DEBUG) + destroy(self.osd_id, purge) + report = self.report.setdefault(self.osd_device, + {'osd-ids': self.osd_id}) + + if self.bcache_backing: + # Remove anything related to bcache. + size = int(ceil(device_size(self.bcache_caching))) + caching = get_parent_device(self.bcache_caching) + report.update({'cache-devices': caching, 'partition-size': size}) + bcache_remove(self.device, self.bcache_backing, + self.bcache_caching) + else: + remove_lvm(self.device) + wipefs_safely(self.device) + + +def make_same_length(l1, l2): + """Make sure 2 lists have the same length, padding out with None's.""" + ln = max(len(l1), len(l2)) + l1.extend([None] * (ln - len(l1))) + l2.extend([None] * (ln - len(l2))) + + +def write_report(report, ftype): + """Generate a report on how to re-established the removed disks + to be part of the cluster again, then set the 'message' attribute to + either a JSON object or a textual representation. + + :param report: The initial, raw report from the 'ActionOSD' objects. + :type report: dict + + :param ftype: Either 'text' or 'json'; specifies the type of report + :type ftype: Enum['text', 'json'] + """ + if ftype == 'text': + msg = '{} disks have been removed\n'.format(len(report)) + msg += 'To replace them, run:\n' + for device, action_args in report.items(): + args = json.dumps(action_args, separators=(' ', '=')) + args = args.replace('{', '').replace('}', '').replace('"', '') + msg += 'juju run-action {} add-disk {} {}'.format( + hookenv.local_unit(), 'osd-devices=' + device, args) + else: + msg = json.dumps(report) + + hookenv.action_set({'message': msg}) + + +def get_list(key): + """Retrieve the action arguments based on the key as a list.""" + ret = hookenv.action_get(key) + return ret.split() if ret else [] + + +def advertise_osd_count(count): + """Let the Ceph-mon know of the updated OSD number.""" + for relid in hookenv.relation_ids('mon'): + hookenv.relation_set( + relation_id=relid, + relation_settings={'bootstrapped-osds': count} + ) + + +def main(): + osd_ids = get_list('osd-ids') + osd_devs = get_list('osd-devices') + purge = hookenv.action_get('purge') + force = hookenv.action_get('force') + timeout = hookenv.action_get('timeout') + + if timeout <= 0: + function_fail('timeout must be > 0') + sys.exit(1) + elif not osd_ids and not osd_devs: + function_fail('One of osd-ids or osd-devices must be provided') + sys.exit(1) + + make_same_length(osd_ids, osd_devs) + errors = [] + report = {} + dev_map = get_device_map() + charm_devices = kv().get('osd-devices', []) + aliases = kv().get('osd-aliases', {}) + + for dev, osd_id in zip(osd_devs, osd_ids): + try: + action_osd = ActionOSD(dev_map, dev=dev, osd_id=osd_id, + aliases=aliases) + if action_osd.device not in charm_devices: + errors.append('Device {} not being used by Ceph' + .format(action_osd.device)) + continue + action_osd.remove(purge, timeout, force) + charm_devices.remove(action_osd.device) + if action_osd.alias: + aliases.pop(action_osd.alias) + report.update(action_osd.report) + except RemoveException as e: + errors.append(str(e)) + + kv().set('osd-devices', charm_devices) + kv().set('osd-aliases', aliases) + kv().flush() + advertise_osd_count(len(charm_devices)) + write_report(report, hookenv.action_get('format')) + + if errors: + function_fail('Failed to remove devices: {}'.format(','.join(errors))) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index fb30f221..7c03190b 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -79,6 +79,7 @@ is_osd_bootstrap_ready, import_osd_bootstrap_key, import_osd_upgrade_key, + import_osd_removal_key, get_host_ip, get_networks, assert_charm_supports_ipv6, @@ -662,11 +663,14 @@ def get_bdev_enable_discard(): def mon_relation(): bootstrap_key = relation_get('osd_bootstrap_key') upgrade_key = relation_get('osd_upgrade_key') + removal_key = relation_get('osd_disk_removal_key') if get_fsid() and get_auth() and bootstrap_key: log('mon has provided conf- scanning disks') emit_cephconf() import_osd_bootstrap_key(bootstrap_key) import_osd_upgrade_key(upgrade_key) + if removal_key: + import_osd_removal_key(removal_key) prepare_disks_and_activate() _, settings, _ = (ch_ceph.CephOSDConfContext() .filter_osd_from_mon_settings()) diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 26f5f836..44f96c62 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -72,6 +72,7 @@ _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" _upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" +_removal_keyring = "/var/lib/ceph/osd/ceph.client.osd-removal.keyring" def is_osd_bootstrap_ready(): @@ -83,27 +84,31 @@ def is_osd_bootstrap_ready(): return os.path.exists(_bootstrap_keyring) -def import_osd_bootstrap_key(key): - """ - Ensure that the osd-bootstrap keyring is setup. - - :param key: The cephx key to add to the bootstrap keyring - :type key: str - :raises: subprocess.CalledProcessError""" - if not os.path.exists(_bootstrap_keyring): +def _import_key(key, path, name): + if not os.path.exists(path): cmd = [ - "sudo", - "-u", + 'sudo', + '-u', ceph.ceph_user(), 'ceph-authtool', - _bootstrap_keyring, + path, '--create-keyring', - '--name=client.bootstrap-osd', + '--name={}'.format(name), '--add-key={}'.format(key) ] subprocess.check_call(cmd) +def import_osd_bootstrap_key(key): + """ + Ensure that the osd-bootstrap keyring is setup. + + :param key: The cephx key to add to the bootstrap keyring + :type key: str + :raises: subprocess.CalledProcessError""" + _import_key(key, _bootstrap_keyring, 'client.bootstrap-osd') + + def import_osd_upgrade_key(key): """ Ensure that the osd-upgrade keyring is setup. @@ -111,18 +116,17 @@ def import_osd_upgrade_key(key): :param key: The cephx key to add to the upgrade keyring :type key: str :raises: subprocess.CalledProcessError""" - if not os.path.exists(_upgrade_keyring): - cmd = [ - "sudo", - "-u", - ceph.ceph_user(), - 'ceph-authtool', - _upgrade_keyring, - '--create-keyring', - '--name=client.osd-upgrade', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) + _import_key(key, _upgrade_keyring, 'client.osd-upgrade') + + +def import_osd_removal_key(key): + """ + Ensure that the osd-removal keyring is setup. + + :param key: The cephx key to add to the upgrade keyring + :type key: str + :raises: subprocess.CalledProcessError""" + _import_key(key, _removal_keyring, 'client.osd-removal') def render_template(template_name, context, template_dir=TEMPLATES_DIR): @@ -348,16 +352,16 @@ class DeviceError(Exception): pass -def _check_output(args): +def _check_output(args, **kwargs): try: - return subprocess.check_output(args).decode('UTF-8') + return subprocess.check_output(args, **kwargs).decode('UTF-8') except subprocess.CalledProcessError as e: raise DeviceError(str(e)) -def _check_call(args): +def _check_call(args, **kwargs): try: - return subprocess.check_call(args) + return subprocess.check_call(args, **kwargs) except subprocess.CalledProcessError as e: raise DeviceError(str(e)) @@ -458,16 +462,37 @@ def device_size(dev): return ret / (1024 * 1024 * 1024) # Return size in GB. -def bcache_remove(bcache, cache_dev): +def remove_lvm(device): + """Remove any physical and logical volumes associated to a device.""" + vgs = [] + try: + rv = _check_output(['sudo', 'pvdisplay', device]) + except DeviceError: + # Assume no physical volumes. + return + + for line in rv.splitlines(): + line = line.strip() + if line.startswith('VG Name'): + vgs.append(line.split()[2]) + if vgs: + _check_call(['sudo', 'vgremove', '-y'] + vgs) + _check_call(['sudo', 'pvremove', '-y', device]) + + +def bcache_remove(bcache, backing, caching): """Remove a bcache kernel device, given its caching. :param bache: The path of the bcache device. :type bcache: str - :param cache_dev: The caching device used for the bcache name. - :type cache_dev: str + :param backing: The backing device for bcache + :type backing: str + + :param caching: The caching device for bcache + :type caching: str """ - rv = _check_output(['sudo', 'bcache-super-show', cache_dev]) + rv = _check_output(['sudo', 'bcache-super-show', backing]) uuid = None # Fetch the UUID for the caching device. for line in rv.split('\n'): @@ -478,15 +503,47 @@ def bcache_remove(bcache, cache_dev): else: return bcache_name = bcache[bcache.rfind('/') + 1:] - with open('/sys/block/{}/bcache/stop'.format(bcache_name), 'wb') as f: - f.write(b'1') - with open('/sys/fs/bcache/{}/stop'.format(uuid), 'wb') as f: - f.write(b'1') + def write_one(path): + os.system('echo 1 | sudo tee {}'.format(path)) + + # The command ceph-volume typically creates PV's and VG's for the + # OSD device. Remove them now before deleting the bcache. + remove_lvm(bcache) + + # NOTE: We *must* do the following steps in this order. For + # kernels 4.x and prior, not doing so will cause the bcache device + # to be undeletable. + # In addition, we have to use 'sudo tee' as done above, since it + # can cause permission issues in some implementations. + write_one('/sys/block/{}/bcache/detach'.format(bcache_name)) + write_one('/sys/block/{}/bcache/stop'.format(bcache_name)) + write_one('/sys/fs/bcache/{}/stop'.format(uuid)) + + # We wipe the bcache signatures here because the bcache tools will not + # create the devices otherwise. There is a 'force' option, but it's not + # always available, so we do the portable thing here. + wipefs_safely(backing) + wipefs_safely(caching) -def wipe_disk(dev): + +def wipe_disk(dev, timeout=None): """Destroy all data in a specific device, including partition tables.""" - _check_call(['sudo', 'wipefs', '-a', dev]) + _check_call(['sudo', 'wipefs', '-a', dev], timeout=timeout) + + +def wipefs_safely(dev): + for _ in range(10): + try: + wipe_disk(dev, 1) + return + except DeviceError: + time.sleep(0.3) + except subprocess.TimeoutExpired: + # If this command times out, then it's likely because + # the disk is dead, so give up. + return + raise DeviceError('Failed to wipe bcache device: {}'.format(dev)) class PartitionIter: @@ -556,11 +613,71 @@ def create_bcache(self, backing): return ret def cleanup(self, device): + """Destroy any created partitions and bcache names for a device.""" args = self.created.get(device) if not args: return + bcache, caching = args try: - bcache_remove(*args) + bcache_remove(bcache, device, caching) except DeviceError: - log('Failed to cleanup bcache device: {}'.format(args[0])) + log('Failed to cleanup bcache device: {}'.format(bcache)) + + +def _device_suffix(dev): + ix = dev.rfind('/') + if ix >= 0: + dev = dev[ix + 1:] + return dev + + +def get_bcache_names(dev): + """Return the backing and caching devices for a bcache device, + in that specific order. + + :param dev: The path to the bcache device, i.e: /dev/bcache0 + :type dev: str + + :returns: A tuple with the backing and caching devices. + :rtype: list[Option[None, str], Option[None, str]] + """ + if dev is None: + return None, None + + dev_name = _device_suffix(dev) + bcache_path = '/sys/block/{}/slaves'.format(dev_name) + if (not os.path.exists('/sys/block/{}/bcache'.format(dev_name)) or + not os.path.exists(bcache_path)): + return None, None + + cache = os.listdir(bcache_path) + if len(cache) < 2: + return None, None + + backing = '/dev/' + cache[0] + caching = '/dev/' + cache[1] + out = _check_output(['sudo', 'bcache-super-show', backing]) + if 'backing device' not in out: + return caching, backing + return backing, caching + + +def get_parent_device(dev): + """Return the device's parent, assuming if it's a block device.""" + try: + rv = subprocess.check_output(['lsblk', '-as', dev, '-J']) + rv = json.loads(rv.decode('UTF-8')) + except subprocess.CalledProcessError: + return dev + + children = rv.get('blockdevices', []) + if not children: + return dev + + children = children[0].get('children', []) + for child in children: + if 'children' not in child: + return '/dev/' + child['name'] + + return dev diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 643f2e03..429b8900 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -1162,6 +1162,10 @@ def get_mds_bootstrap_key(): 'allow command "osd in"', 'allow command "osd rm"', 'allow command "auth del"', + 'allow command "osd safe-to-destroy"', + 'allow command "osd crush reweight"', + 'allow command "osd purge"', + 'allow command "osd destroy"', ]) ]) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 1284b516..782a231d 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -51,6 +51,9 @@ enable experimental unrecoverable data corrupting features = bluestore rocksdb [client.osd-upgrade] keyring = /var/lib/ceph/osd/ceph.client.osd-upgrade.keyring +[client.osd-removal] +keyring = /var/lib/ceph/osd/ceph.client.osd-removal.keyring + [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring diff --git a/ceph-osd/unit_tests/test_actions_add_disk.py b/ceph-osd/unit_tests/test_actions_add_disk.py index dd2bb64d..1d06394f 100644 --- a/ceph-osd/unit_tests/test_actions_add_disk.py +++ b/ceph-osd/unit_tests/test_actions_add_disk.py @@ -44,7 +44,7 @@ def fake_config(key): db = mock.MagicMock() self.kv.return_value = db - db.get.return_value = ['/dev/myosddev'] + db.get.side_effect = {'osd-devices': ['/dev/myosddev']}.get request = {'ops': []} add_disk.add_device(request, '/dev/myosddev') @@ -57,11 +57,13 @@ def fake_config(key): True, None)]) piter = add_disk.PartitionIter(['/dev/cache'], 100, ['/dev/myosddev']) - mock_create_bcache = mock.MagicMock(side_effect=lambda b: b) + mock_create_bcache = mock.MagicMock(side_effect=lambda b: '/dev/cache') with mock.patch.object(add_disk.PartitionIter, 'create_bcache', mock_create_bcache) as mock_call: add_disk.add_device(request, '/dev/myosddev', part_iter=piter) mock_call.assert_called() + db.set.assert_called_with('osd-aliases', + {'/dev/myosddev': '/dev/cache'}) mock_create_bcache.side_effect = lambda b: None with mock.patch.object(add_disk.PartitionIter, 'create_bcache', diff --git a/ceph-osd/unit_tests/test_actions_remove_disk.py b/ceph-osd/unit_tests/test_actions_remove_disk.py new file mode 100644 index 00000000..369d3f1f --- /dev/null +++ b/ceph-osd/unit_tests/test_actions_remove_disk.py @@ -0,0 +1,136 @@ +# Copyright 2021 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import mock + +from actions import remove_disk + +from test_utils import CharmTestCase + + +class RemoveDiskActionTests(CharmTestCase): + + @mock.patch.object(remove_disk.subprocess, 'check_output') + def test_get_device_map(self, check_output): + check_output.return_value = b''' +{ + "1": [{"devices": ["/dev/sdx1"]}], + "2": [{"devices": ["/dev/sdc2", "/dev/sdc3"]}] +} + ''' + rv = remove_disk.get_device_map() + self.assertEqual(rv[0]['path'], '/dev/sdx1') + self.assertEqual(rv[1]['id'], rv[2]['id']) + + def test_normalize_osd_id(self): + self.assertEqual('osd.1', remove_disk.normalize_osd_id(1)) + self.assertEqual('osd.2', remove_disk.normalize_osd_id('osd.2')) + self.assertEqual('osd.3', remove_disk.normalize_osd_id('3')) + + def test_map_device_id(self): + dev_map = [ + {'id': 'osd.1', 'path': '/dev/sdc1'}, + {'id': 'osd.2', 'path': '/dev/sdd2'}, + {'id': 'osd.2', 'path': '/dev/sdx3'} + ] + self.assertEqual( + 'osd.1', + remove_disk.map_device_to_id(dev_map, '/dev/sdc1')) + self.assertIsNone( + remove_disk.map_device_to_id(dev_map, '/dev/sdx4')) + + self.assertEqual( + '/dev/sdd2', + remove_disk.map_id_to_device(dev_map, 'osd.2')) + self.assertIsNone( + remove_disk.map_id_to_device(dev_map, 'osd.3')) + + @mock.patch.object(remove_disk, 'get_bcache_names') + def test_action_osd_constructor(self, bcache_names): + bcache_names.return_value = ('bcache0', '/dev/bcache0') + dev_map = [ + {'path': '/dev/sdx1', 'id': 'osd.1'} + ] + with self.assertRaises(remove_disk.RemoveException): + remove_disk.ActionOSD(dev_map, dev='/dev/sdx1', osd_id='osd.1') + obj = remove_disk.ActionOSD(dev_map, dev='/dev/sdx1') + self.assertEqual(obj.osd_id, 'osd.1') + obj = remove_disk.ActionOSD(dev_map, osd_id='1') + self.assertEqual(obj.device, '/dev/sdx1') + + @mock.patch.object(remove_disk, 'device_size') + @mock.patch.object(remove_disk.charms_ceph.utils, 'stop_osd') + @mock.patch.object(remove_disk, 'bcache_remove') + @mock.patch.object(remove_disk.subprocess, 'call') + @mock.patch.object(remove_disk.subprocess, 'check_call') + @mock.patch.object(remove_disk, 'get_bcache_names') + def test_action_osd_remove(self, get_bcache_names, check_call, + call, bcache_remove, stop_osd, device_size): + call.return_value = 0 + get_bcache_names.return_value = ('/dev/backing', '/dev/caching') + device_size.side_effect = lambda x: 1 if x == '/dev/caching' else 0 + dev_map = [ + {'path': '/dev/bcache0', 'id': 'osd.1'} + ] + prefix_args = ['ceph', '--id', 'osd-removal'] + obj = remove_disk.ActionOSD(dev_map, osd_id='1') + + obj.remove(True, 1, True) + call.assert_any_call(prefix_args + ['osd', 'safe-to-destroy', 'osd.1']) + check_call.assert_any_call(prefix_args + ['osd', 'purge', 'osd.1', + '--yes-i-really-mean-it']) + check_call.assert_any_call(prefix_args + ['osd', 'crush', 'reweight', + 'osd.1', '0']) + bcache_remove.assert_called_with( + '/dev/bcache0', '/dev/backing', '/dev/caching') + report = obj.report + self.assertIn('/dev/backing', report) + report = report['/dev/backing'] + self.assertIn('osd-ids', report) + self.assertIn('osd.1', report['osd-ids']) + self.assertIn('cache-devices', report) + self.assertIn('partition-size', report) + self.assertEqual('/dev/caching', report['cache-devices']) + self.assertEqual(1, report['partition-size']) + + # Test the timeout check. + with self.assertRaises(remove_disk.RemoveException): + call.return_value = 1 + obj.remove(False, 0, False) + + @mock.patch.object(remove_disk.hookenv, 'local_unit') + @mock.patch.object(remove_disk.hookenv, 'action_set') + def test_write_report(self, action_set, local_unit): + output = {} + local_unit.return_value = 'ceph-osd/0' + action_set.side_effect = lambda x: output.update(x) + report = {'dev@': {'osd-ids': 'osd.1', 'cache-devices': 'cache@', + 'partition-size': 5}} + remove_disk.write_report(report, 'text') + self.assertIn('message', output) + msg = output['message'] + self.assertIn('juju run-action ceph-osd/0 add-disk', msg) + self.assertIn('osd-devices=dev@', msg) + self.assertIn('osd-ids=osd.1', msg) + self.assertIn('cache-devices=cache@', msg) + self.assertIn('partition-size=5', msg) + + def test_make_same_length(self): + l1, l2 = [1], [] + remove_disk.make_same_length(l1, l2) + self.assertEqual(len(l1), len(l2)) + self.assertIsNone(l2[0]) + prev_len = len(l1) + remove_disk.make_same_length(l1, l2) + self.assertEqual(len(l1), prev_len) diff --git a/ceph-osd/unit_tests/test_ceph_utils.py b/ceph-osd/unit_tests/test_ceph_utils.py index f338eb3a..f0fbabd6 100644 --- a/ceph-osd/unit_tests/test_ceph_utils.py +++ b/ceph-osd/unit_tests/test_ceph_utils.py @@ -15,7 +15,7 @@ import unittest -from unittest.mock import patch, mock_open +from unittest.mock import patch with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: @@ -204,7 +204,10 @@ def test_device_size(self, check_output): self.assertEqual(745, int(utils.device_size(''))) @patch('subprocess.check_output') - def test_bcache_remove(self, check_output): + @patch.object(utils, 'remove_lvm') + @patch.object(utils, 'wipe_disk') + @patch('os.system') + def test_bcache_remove(self, system, wipe_disk, remove_lvm, check_output): check_output.return_value = b''' sb.magic ok sb.first_sector 8 [match] @@ -223,15 +226,93 @@ def test_bcache_remove(self, check_output): dev.cache.replacement 0 [lru] cset.uuid 424242 ''' - mo = mock_open() - with patch('builtins.open', mo): - utils.bcache_remove('/dev/bcache0', '/dev/nvme0n1p1') - mo.assert_any_call('/sys/block/bcache0/bcache/stop', 'wb') - mo.assert_any_call('/sys/fs/bcache/424242/stop', 'wb') + utils.bcache_remove('/dev/bcache0', 'backing', 'caching') + system.assert_any_call( + 'echo 1 | sudo tee /sys/block/bcache0/bcache/detach') + system.assert_any_call( + 'echo 1 | sudo tee /sys/block/bcache0/bcache/stop') + system.assert_any_call( + 'echo 1 | sudo tee /sys/fs/bcache/424242/stop') + wipe_disk.assert_any_call('backing', 1) + wipe_disk.assert_any_call('caching', 1) + @patch('os.listdir') + @patch('os.path.exists') + @patch('subprocess.check_output') + def test_get_bcache_names(self, check_output, exists, listdir): + exists.return_value = True + check_output.return_value = b''' +sb.magic ok +sb.first_sector 8 [match] +sb.csum A71D96D4364343BF [match] +sb.version 1 [backing device] + +dev.label (empty) +dev.uuid cca84a86-3f68-4ffb-8be1-4449c9fb29a8 +dev.sectors_per_block 1 +dev.sectors_per_bucket 1024 +dev.data.first_sector 16 +dev.data.cache_mode 1 [writeback] +dev.data.cache_state 1 [clean] + +cset.uuid 57add9da-e5de-47c6-8f39-3e16aafb8d31 + ''' + listdir.return_value = ['backing', 'caching'] + values = utils.get_bcache_names('/dev/bcache0') + self.assertEqual(2, len(values)) + self.assertEqual(values[0], '/dev/backing') + check_output.return_value = b''' +sb.magic ok +sb.first_sector 8 [match] +sb.csum 6802E76075FF7B77 [match] +sb.version 3 [cache device] + +dev.label (empty) +dev.uuid fb6e9d06-12e2-46ca-b8fd-797ecec1a126 +dev.sectors_per_block 1 +dev.sectors_per_bucket 1024 +dev.cache.first_sector 1024 +dev.cache.cache_sectors 10238976 +dev.cache.total_sectors 10240000 +dev.cache.ordered yes +dev.cache.discard no +dev.cache.pos 0 +dev.cache.replacement 0 [lru] + +cset.uuid 57add9da-e5de-47c6-8f39-3e16aafb8d31 + ''' + values = utils.get_bcache_names('/dev/bcache0') + self.assertEqual(values[0], '/dev/caching') + + @patch('subprocess.check_output') + @patch('subprocess.check_call') + def test_remove_lvm(self, check_call, check_output): + check_output.return_value = b''' +--- Physical volume --- + PV Name /dev/bcache0 + VG Name ceph-1 + VG Name ceph-2 + ''' + utils.remove_lvm('/dev/bcache0') + check_call.assert_any_call( + ['sudo', 'vgremove', '-y', 'ceph-1', 'ceph-2']) + check_call.assert_any_call(['sudo', 'pvremove', '-y', '/dev/bcache0']) + + check_call.reset_mock() + + def just_raise(*args): + raise utils.DeviceError() + + check_output.side_effect = just_raise + utils.remove_lvm('') + check_call.assert_not_called() + + @patch.object(utils, 'wipe_disk') + @patch.object(utils, 'bcache_remove') @patch.object(utils, 'create_partition') @patch.object(utils, 'setup_bcache') - def test_partition_iter(self, setup_bcache, create_partition): + def test_partition_iter(self, setup_bcache, create_partition, + bcache_remove, wipe_disk): create_partition.side_effect = \ lambda c, s, n: c + '|' + str(s) + '|' + str(n) setup_bcache.side_effect = lambda *args: args @@ -239,6 +320,8 @@ def test_partition_iter(self, setup_bcache, create_partition): 200, ['dev1', 'dev2', 'dev3']) piter.create_bcache('dev1') setup_bcache.assert_called_with('dev1', '/dev/nvm0n1|200|0') + piter.cleanup('dev1') + bcache_remove.assert_called() setup_bcache.mock_reset() piter.create_bcache('dev2') setup_bcache.assert_called_with('dev2', '/dev/nvm0n2|200|0') @@ -258,3 +341,14 @@ def test_partition_iter_no_size(self, setup_bcache, create_partition, # 300GB across 3 devices, i.e: 100 for each. self.assertEqual(100, next(piter)) self.assertEqual(100, next(piter)) + + @patch.object(utils.subprocess, 'check_output') + def test_parent_device(self, check_output): + check_output.return_value = b''' +{"blockdevices": [ + {"name": "loop1p1", + "children": [ + {"name": "loop1"}] + }] +}''' + self.assertEqual(utils.get_parent_device('/dev/loop1p1'), '/dev/loop1') From 04617bc33d2fd3b6cfad13dc5723f1e354b79423 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 1 Apr 2022 13:00:06 +0200 Subject: [PATCH 2370/2699] Resolve type change in Ceph Quincy for enabled_manager_modules Change-Id: I4f81391e51312ec5795e3a3b840b2461e48cb3c4 --- ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py index 2e70a351..1b20b8fe 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -789,6 +789,9 @@ def enabled_manager_modules(): :rtype: List[str] """ cmd = ['ceph', 'mgr', 'module', 'ls'] + quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0 + if quincy_or_later: + cmd.append('--format=json') try: modules = check_output(cmd).decode('utf-8') except CalledProcessError as e: From d6eb091d11a4203a3e7b343485b75c9637138a40 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 7 Apr 2022 09:07:37 +0200 Subject: [PATCH 2371/2699] Updates for jammy enablement - charmcraft: build-on 20.04 -> run-on 20.04/22.04 [*archs] - Refresh tox targets - Drop impish bundles and OSCI testing - Add jammy metadata - Default source is yoga Change-Id: Ic3a9837af9cb2ad4851c8dfb00de589f43618975 --- ceph-iscsi/.zuul.yaml | 2 +- ceph-iscsi/charmcraft.yaml | 16 +++-- ceph-iscsi/config.yaml | 2 +- ceph-iscsi/metadata.yaml | 4 +- ceph-iscsi/tests/bundles/jammy-ec.yaml | 94 +++++++++++++++++++++++++ ceph-iscsi/tests/bundles/jammy.yaml | 95 ++++++++++++++++++++++++++ ceph-iscsi/tests/tests.yaml | 3 + ceph-iscsi/tox.ini | 5 ++ 8 files changed, 212 insertions(+), 9 deletions(-) create mode 100644 ceph-iscsi/tests/bundles/jammy-ec.yaml create mode 100644 ceph-iscsi/tests/bundles/jammy.yaml diff --git a/ceph-iscsi/.zuul.yaml b/ceph-iscsi/.zuul.yaml index fd20909e..7ffc71cb 100644 --- a/ceph-iscsi/.zuul.yaml +++ b/ceph-iscsi/.zuul.yaml @@ -1,4 +1,4 @@ - project: templates: - - openstack-python3-charm-jobs + - openstack-python3-charm-yoga-jobs - openstack-cover-jobs diff --git a/ceph-iscsi/charmcraft.yaml b/ceph-iscsi/charmcraft.yaml index 72933212..23219175 100644 --- a/ceph-iscsi/charmcraft.yaml +++ b/ceph-iscsi/charmcraft.yaml @@ -21,7 +21,15 @@ parts: update-ca-certificates bases: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 + - build-on: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 + run-on: + - name: ubuntu + channel: "20.04" + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.04" + architectures: [amd64, s390x, ppc64el, arm64] \ No newline at end of file diff --git a/ceph-iscsi/config.yaml b/ceph-iscsi/config.yaml index 47127af6..ba1120f1 100644 --- a/ceph-iscsi/config.yaml +++ b/ceph-iscsi/config.yaml @@ -5,7 +5,7 @@ options: description: Mon and OSD debug level. Max is 20. source: type: string - default: + default: yoga description: | Optional configuration to support use of additional sources such as: - ppa:myteam/ppa diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index 4edc3bd5..04c1bde3 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -11,9 +11,7 @@ tags: - misc series: - focal - - groovy - - hirsute - - impish + - jammy subordinate: false min-juju-version: 2.7.6 extra-bindings: diff --git a/ceph-iscsi/tests/bundles/jammy-ec.yaml b/ceph-iscsi/tests/bundles/jammy-ec.yaml new file mode 100644 index 00000000..61132188 --- /dev/null +++ b/ceph-iscsi/tests/bundles/jammy-ec.yaml @@ -0,0 +1,94 @@ +local_overlay_enabled: False +series: jammy +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + constraints: mem=3072M + '9': + constraints: mem=3072M + '10': + constraints: mem=3072M + '11': + '12': + '13': + '14': + '15': +applications: + ubuntu: + charm: cs:ubuntu + num_units: 3 + to: + - '7' + - '14' + - '15' + ceph-iscsi: + charm: ../../ceph-iscsi.charm + num_units: 2 + options: + gateway-metadata-pool: iscsi-foo-metadata + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '0' + - '1' + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + to: + - '0' + - '1' + - '2' + - '11' + - '12' + - '13' + channel: latest/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + to: + - '3' + - '4' + - '5' + channel: latest/edge + vault: + num_units: 1 + charm: ch:vault + to: + - '6' + channel: latest/edge + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + to: + - '8' + - '9' + - '10' + channel: latest/edge + vault-mysql-router: + charm: ch:mysql-router + channel: latest/edge +relations: + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/bundles/jammy.yaml b/ceph-iscsi/tests/bundles/jammy.yaml new file mode 100644 index 00000000..11fac588 --- /dev/null +++ b/ceph-iscsi/tests/bundles/jammy.yaml @@ -0,0 +1,95 @@ +local_overlay_enabled: False +series: jammy +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + constraints: mem=3072M + '9': + constraints: mem=3072M + '10': + constraints: mem=3072M + '11': + '12': + '13': + '14': + '15': + '16': + '17': +applications: + ubuntu: + charm: cs:ubuntu + num_units: 3 + to: + - '7' + - '14' + - '15' + ceph-iscsi: + charm: ../../ceph-iscsi.charm + num_units: 4 + options: + gateway-metadata-pool: iscsi-foo-metadata + to: + - '0' + - '1' + - '16' + - '17' + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + to: + - '0' + - '1' + - '2' + - '11' + - '12' + - '13' + channel: latest/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + to: + - '3' + - '4' + - '5' + channel: latest/edge + vault: + num_units: 1 + charm: ch:vault + to: + - '6' + channel: latest/edge + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + to: + - '8' + - '9' + - '10' + channel: latest/edge + vault-mysql-router: + charm: ch:mysql-router + channel: latest/edge +relations: + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/tests.yaml b/ceph-iscsi/tests/tests.yaml index 8ab33a0b..6f0ce1bd 100644 --- a/ceph-iscsi/tests/tests.yaml +++ b/ceph-iscsi/tests/tests.yaml @@ -4,6 +4,9 @@ gate_bundles: - focal smoke_bundles: - focal +dev_bundles: + - jammy-ec + - jammy configure: - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation - zaza.openstack.charm_tests.ceph.iscsi.setup.basic_guest_setup diff --git a/ceph-iscsi/tox.ini b/ceph-iscsi/tox.ini index 6081150b..c50ea0f3 100644 --- a/ceph-iscsi/tox.ini +++ b/ceph-iscsi/tox.ini @@ -65,6 +65,11 @@ basepython = python3.9 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py310] +basepython = python3.10 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt From 272c381a0377b0c868c41207b39d33bdb800bfbc Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 7 Apr 2022 09:07:37 +0200 Subject: [PATCH 2372/2699] Updates for jammy enablement - charmcraft: build-on 20.04 -> run-on 20.04/22.04 [*archs] - Refresh tox targets - Drop impish bundles and OSCI testing - Add jammy metadata - Default source is yoga - Charmhelpers and charms.ceph sync Change-Id: I1e963b2d18aaa8ec61fb5d255f23508879f08e83 --- ceph-proxy/.zuul.yaml | 2 +- ceph-proxy/charmcraft.yaml | 16 +- ceph-proxy/charmhelpers/__init__.py | 17 +- ceph-proxy/charmhelpers/cli/__init__.py | 13 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 40 ++- .../charmhelpers/contrib/hahelpers/cluster.py | 15 +- .../contrib/hardening/apache/checks/config.py | 5 +- .../contrib/hardening/audits/apache.py | 8 +- .../contrib/hardening/audits/apt.py | 5 +- .../contrib/hardening/audits/file.py | 3 +- .../charmhelpers/contrib/hardening/harden.py | 13 +- .../contrib/hardening/host/checks/login.py | 4 +- .../contrib/hardening/host/checks/sysctl.py | 7 +- .../contrib/hardening/mysql/checks/config.py | 7 +- .../contrib/hardening/templating.py | 6 +- .../charmhelpers/contrib/hardening/utils.py | 3 +- ceph-proxy/charmhelpers/contrib/network/ip.py | 23 +- .../files/check_deferred_restarts.py | 128 +++++++++ .../charmhelpers/contrib/openstack/policyd.py | 46 +--- .../charmhelpers/contrib/openstack/utils.py | 110 +++++--- ceph-proxy/charmhelpers/contrib/python.py | 2 - .../contrib/storage/linux/ceph.py | 99 +++---- .../contrib/storage/linux/loopback.py | 10 +- ceph-proxy/charmhelpers/core/hookenv.py | 81 +++--- ceph-proxy/charmhelpers/core/host.py | 41 ++- ceph-proxy/charmhelpers/core/services/base.py | 7 +- .../charmhelpers/core/services/helpers.py | 4 +- ceph-proxy/charmhelpers/core/strutils.py | 9 +- ceph-proxy/charmhelpers/core/templating.py | 11 +- ceph-proxy/charmhelpers/fetch/__init__.py | 10 +- ceph-proxy/charmhelpers/fetch/archiveurl.py | 29 +-- ceph-proxy/charmhelpers/fetch/centos.py | 7 +- ceph-proxy/charmhelpers/fetch/python/debug.py | 2 - .../charmhelpers/fetch/python/packages.py | 14 +- ceph-proxy/charmhelpers/fetch/ubuntu.py | 37 +-- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 25 +- ceph-proxy/lib/charms_ceph/utils.py | 245 ++++++++++-------- ceph-proxy/metadata.yaml | 2 +- ceph-proxy/tests/bundles/impish-xena-ec.yaml | 228 ---------------- ceph-proxy/tests/bundles/impish-xena.yaml | 199 -------------- ceph-proxy/tests/tests.yaml | 4 - 41 files changed, 619 insertions(+), 918 deletions(-) create mode 100755 ceph-proxy/charmhelpers/contrib/openstack/files/check_deferred_restarts.py delete mode 100644 ceph-proxy/tests/bundles/impish-xena-ec.yaml delete mode 100644 ceph-proxy/tests/bundles/impish-xena.yaml diff --git a/ceph-proxy/.zuul.yaml b/ceph-proxy/.zuul.yaml index fd189e2f..7dd3db96 100644 --- a/ceph-proxy/.zuul.yaml +++ b/ceph-proxy/.zuul.yaml @@ -1,3 +1,3 @@ - project: templates: - - openstack-python3-ussuri-jobs + - openstack-python3-charm-yoga-jobs diff --git a/ceph-proxy/charmcraft.yaml b/ceph-proxy/charmcraft.yaml index 11d5f7cd..68b9a010 100644 --- a/ceph-proxy/charmcraft.yaml +++ b/ceph-proxy/charmcraft.yaml @@ -22,7 +22,15 @@ parts: - README.md bases: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 + - build-on: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 + run-on: + - name: ubuntu + channel: "20.04" + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.04" + architectures: [amd64, s390x, ppc64el, arm64] \ No newline at end of file diff --git a/ceph-proxy/charmhelpers/__init__.py b/ceph-proxy/charmhelpers/__init__.py index 1f57ed2a..ddf30450 100644 --- a/ceph-proxy/charmhelpers/__init__.py +++ b/ceph-proxy/charmhelpers/__init__.py @@ -14,30 +14,15 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. -from __future__ import print_function -from __future__ import absolute_import - import functools import inspect import subprocess -import sys -try: - import six # NOQA:F401 -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # NOQA:F401 try: import yaml # NOQA:F401 except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) import yaml # NOQA:F401 diff --git a/ceph-proxy/charmhelpers/cli/__init__.py b/ceph-proxy/charmhelpers/cli/__init__.py index 74ea7295..2b0c4b7a 100644 --- a/ceph-proxy/charmhelpers/cli/__init__.py +++ b/ceph-proxy/charmhelpers/cli/__init__.py @@ -16,9 +16,6 @@ import argparse import sys -import six -from six.moves import zip - import charmhelpers.core.unitdata @@ -149,10 +146,7 @@ def wrapper(decorated): def run(self): "Run cli, processing arguments and executing subcommands." arguments = self.argument_parser.parse_args() - if six.PY2: - argspec = inspect.getargspec(arguments.func) - else: - argspec = inspect.getfullargspec(arguments.func) + argspec = inspect.getfullargspec(arguments.func) vargs = [] for arg in argspec.args: vargs.append(getattr(arguments, arg)) @@ -177,10 +171,7 @@ def describe_arguments(func): Analyze a function's signature and return a data structure suitable for passing in as arguments to an argparse parser's add_argument() method.""" - if six.PY2: - argspec = inspect.getargspec(func) - else: - argspec = inspect.getfullargspec(func) + argspec = inspect.getfullargspec(func) # we should probably raise an exception somewhere if func includes **kwargs if argspec.defaults: positional_args = argspec.args[:-len(argspec.defaults)] diff --git a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py index 8d1753c3..bad7a533 100644 --- a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py @@ -28,6 +28,7 @@ import yaml from charmhelpers.core.hookenv import ( + application_name, config, hook_name, local_unit, @@ -174,7 +175,8 @@ def _locate_cmd(self, check_cmd): if os.path.exists(os.path.join(path, parts[0])): command = os.path.join(path, parts[0]) if len(parts) > 1: - command += " " + " ".join(parts[1:]) + safe_args = [shlex.quote(arg) for arg in parts[1:]] + command += " " + " ".join(safe_args) return command log('Check command not found: {}'.format(parts[0])) return '' @@ -520,3 +522,39 @@ def remove_deprecated_check(nrpe, deprecated_services): for dep_svc in deprecated_services: log('Deprecated service: {}'.format(dep_svc)) nrpe.remove_check(shortname=dep_svc) + + +def add_deferred_restarts_check(nrpe): + """ + Add NRPE check for services with deferred restarts. + + :param NRPE nrpe: NRPE object to add check to + """ + unit_name = local_unit().replace('/', '-') + shortname = unit_name + '_deferred_restarts' + check_cmd = 'check_deferred_restarts.py --application {}'.format( + application_name()) + + log('Adding deferred restarts nrpe check: {}'.format(shortname)) + nrpe.add_check( + shortname=shortname, + description='Check deferred service restarts {}'.format(unit_name), + check_cmd=check_cmd) + + +def remove_deferred_restarts_check(nrpe): + """ + Remove NRPE check for services with deferred service restarts. + + :param NRPE nrpe: NRPE object to remove check from + """ + unit_name = local_unit().replace('/', '-') + shortname = unit_name + '_deferred_restarts' + check_cmd = 'check_deferred_restarts.py --application {}'.format( + application_name()) + + log('Removing deferred restarts nrpe check: {}'.format(shortname)) + nrpe.remove_check( + shortname=shortname, + description='Check deferred service restarts {}'.format(unit_name), + check_cmd=check_cmd) diff --git a/ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py b/ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py index f0b629a2..146beba6 100644 --- a/ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py @@ -32,8 +32,6 @@ from socket import gethostname as get_unit_hostname -import six - from charmhelpers.core.hookenv import ( log, relation_ids, @@ -125,16 +123,16 @@ def is_crm_dc(): """ cmd = ['crm', 'status'] try: - status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - if not isinstance(status, six.text_type): - status = six.text_type(status, "utf-8") + status = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('utf-8') except subprocess.CalledProcessError as ex: raise CRMDCNotFound(str(ex)) current_dc = '' for line in status.split('\n'): if line.startswith('Current DC'): - # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum + # Current DC: juju-lytrusty-machine-2 (168108163) + # - partition with quorum current_dc = line.split(':')[1].split()[0] if current_dc == get_unit_hostname(): return True @@ -158,9 +156,8 @@ def is_crm_leader(resource, retry=False): return is_crm_dc() cmd = ['crm', 'resource', 'show', resource] try: - status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - if not isinstance(status, six.text_type): - status = six.text_type(status, "utf-8") + status = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('utf-8') except subprocess.CalledProcessError: status = None diff --git a/ceph-proxy/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-proxy/charmhelpers/contrib/hardening/apache/checks/config.py index 341da9ee..e81a5f0b 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-proxy/charmhelpers/contrib/hardening/apache/checks/config.py @@ -14,7 +14,6 @@ import os import re -import six import subprocess @@ -95,9 +94,7 @@ def __call__(self): settings = utils.get_settings('apache') ctxt = settings['hardening'] - out = subprocess.check_output(['apache2', '-v']) - if six.PY3: - out = out.decode('utf-8') + out = subprocess.check_output(['apache2', '-v']).decode('utf-8') ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', out).group(1) ctxt['apache_icondir'] = '/usr/share/apache2/icons/' diff --git a/ceph-proxy/charmhelpers/contrib/hardening/audits/apache.py b/ceph-proxy/charmhelpers/contrib/hardening/audits/apache.py index c1537625..31db8f62 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-proxy/charmhelpers/contrib/hardening/audits/apache.py @@ -15,8 +15,6 @@ import re import subprocess -import six - from charmhelpers.core.hookenv import ( log, INFO, @@ -35,7 +33,7 @@ class DisabledModuleAudit(BaseAudit): def __init__(self, modules): if modules is None: self.modules = [] - elif isinstance(modules, six.string_types): + elif isinstance(modules, str): self.modules = [modules] else: self.modules = modules @@ -68,9 +66,7 @@ def ensure_compliance(self): @staticmethod def _get_loaded_modules(): """Returns the modules which are enabled in Apache.""" - output = subprocess.check_output(['apache2ctl', '-M']) - if six.PY3: - output = output.decode('utf-8') + output = subprocess.check_output(['apache2ctl', '-M']).decode('utf-8') modules = [] for line in output.splitlines(): # Each line of the enabled module output looks like: diff --git a/ceph-proxy/charmhelpers/contrib/hardening/audits/apt.py b/ceph-proxy/charmhelpers/contrib/hardening/audits/apt.py index cad7bf73..1b22925b 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-proxy/charmhelpers/contrib/hardening/audits/apt.py @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import # required for external apt import -from six import string_types - from charmhelpers.fetch import ( apt_cache, apt_purge @@ -51,7 +48,7 @@ class RestrictedPackages(BaseAudit): def __init__(self, pkgs, **kwargs): super(RestrictedPackages, self).__init__(**kwargs) - if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): + if isinstance(pkgs, str) or not hasattr(pkgs, '__iter__'): self.pkgs = pkgs.split() else: self.pkgs = pkgs diff --git a/ceph-proxy/charmhelpers/contrib/hardening/audits/file.py b/ceph-proxy/charmhelpers/contrib/hardening/audits/file.py index 257c6351..84cc2494 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/audits/file.py +++ b/ceph-proxy/charmhelpers/contrib/hardening/audits/file.py @@ -23,7 +23,6 @@ check_call, ) from traceback import format_exc -from six import string_types from stat import ( S_ISGID, S_ISUID @@ -63,7 +62,7 @@ def __init__(self, paths, always_comply=False, *args, **kwargs): """ super(BaseFileAudit, self).__init__(*args, **kwargs) self.always_comply = always_comply - if isinstance(paths, string_types) or not hasattr(paths, '__iter__'): + if isinstance(paths, str) or not hasattr(paths, '__iter__'): self.paths = [paths] else: self.paths = paths diff --git a/ceph-proxy/charmhelpers/contrib/hardening/harden.py b/ceph-proxy/charmhelpers/contrib/hardening/harden.py index 63f21b9c..45ad076d 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/harden.py +++ b/ceph-proxy/charmhelpers/contrib/hardening/harden.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - from collections import OrderedDict from charmhelpers.core.hookenv import ( @@ -53,18 +51,17 @@ def harden(overrides=None): overrides = [] def _harden_inner1(f): - # As this has to be py2.7 compat, we can't use nonlocal. Use a trick - # to capture the dictionary that can then be updated. - _logged = {'done': False} + _logged = False def _harden_inner2(*args, **kwargs): # knock out hardening via a config var; normally it won't get # disabled. + nonlocal _logged if _DISABLE_HARDENING_FOR_UNIT_TEST: return f(*args, **kwargs) - if not _logged['done']: + if not _logged: log("Hardening function '%s'" % (f.__name__), level=DEBUG) - _logged['done'] = True + _logged = True RUN_CATALOG = OrderedDict([('os', run_os_checks), ('ssh', run_ssh_checks), ('mysql', run_mysql_checks), @@ -74,7 +71,7 @@ def _harden_inner2(*args, **kwargs): if enabled: modules_to_run = [] # modules will always be performed in the following order - for module, func in six.iteritems(RUN_CATALOG): + for module, func in RUN_CATALOG.items(): if module in enabled: enabled.remove(module) modules_to_run.append(func) diff --git a/ceph-proxy/charmhelpers/contrib/hardening/host/checks/login.py b/ceph-proxy/charmhelpers/contrib/hardening/host/checks/login.py index fe2bc6ef..fd500c8b 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/host/checks/login.py +++ b/ceph-proxy/charmhelpers/contrib/hardening/host/checks/login.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from six import string_types - from charmhelpers.contrib.hardening.audits.file import TemplatedFile from charmhelpers.contrib.hardening.host import TEMPLATES_DIR from charmhelpers.contrib.hardening import utils @@ -41,7 +39,7 @@ def __call__(self): # a string assume it to be octal and turn it into an octal # string. umask = settings['environment']['umask'] - if not isinstance(umask, string_types): + if not isinstance(umask, str): umask = '%s' % oct(umask) ctxt = { diff --git a/ceph-proxy/charmhelpers/contrib/hardening/host/checks/sysctl.py b/ceph-proxy/charmhelpers/contrib/hardening/host/checks/sysctl.py index f1ea5813..8a57d83d 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/host/checks/sysctl.py +++ b/ceph-proxy/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -15,7 +15,6 @@ import os import platform import re -import six import subprocess from charmhelpers.core.hookenv import ( @@ -183,9 +182,9 @@ def __call__(self): ctxt['sysctl'][key] = d[2] or None - # Translate for python3 - return {'sysctl_settings': - [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]} + return { + 'sysctl_settings': [(k, v) for k, v in ctxt['sysctl'].items()] + } class SysctlConf(TemplatedFile): diff --git a/ceph-proxy/charmhelpers/contrib/hardening/mysql/checks/config.py b/ceph-proxy/charmhelpers/contrib/hardening/mysql/checks/config.py index a79f33b7..8bf9f36c 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/mysql/checks/config.py +++ b/ceph-proxy/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import subprocess from charmhelpers.core.hookenv import ( @@ -82,6 +81,6 @@ class MySQLConfContext(object): """ def __call__(self): settings = utils.get_settings('mysql') - # Translate for python3 - return {'mysql_settings': - [(k, v) for k, v in six.iteritems(settings['security'])]} + return { + 'mysql_settings': [(k, v) for k, v in settings['security'].items()] + } diff --git a/ceph-proxy/charmhelpers/contrib/hardening/templating.py b/ceph-proxy/charmhelpers/contrib/hardening/templating.py index 5b6765f7..4dee5465 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/templating.py +++ b/ceph-proxy/charmhelpers/contrib/hardening/templating.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import six from charmhelpers.core.hookenv import ( log, @@ -27,10 +26,7 @@ from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_update apt_update(fatal=True) - if six.PY2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment diff --git a/ceph-proxy/charmhelpers/contrib/hardening/utils.py b/ceph-proxy/charmhelpers/contrib/hardening/utils.py index 56afa4b6..f93851a9 100644 --- a/ceph-proxy/charmhelpers/contrib/hardening/utils.py +++ b/ceph-proxy/charmhelpers/contrib/hardening/utils.py @@ -16,7 +16,6 @@ import grp import os import pwd -import six import yaml from charmhelpers.core.hookenv import ( @@ -91,7 +90,7 @@ def _apply_overrides(settings, overrides, schema): :returns: dictionary of modules config with user overrides applied. """ if overrides: - for k, v in six.iteritems(overrides): + for k, v in overrides.items(): if k in schema: if schema[k] is None: settings[k] = v diff --git a/ceph-proxy/charmhelpers/contrib/network/ip.py b/ceph-proxy/charmhelpers/contrib/network/ip.py index b356d64c..de56584d 100644 --- a/ceph-proxy/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/charmhelpers/contrib/network/ip.py @@ -15,7 +15,6 @@ import glob import re import subprocess -import six import socket from functools import partial @@ -39,20 +38,14 @@ import netifaces except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-netifaces', fatal=True) - else: - apt_install('python3-netifaces', fatal=True) + apt_install('python3-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-netaddr', fatal=True) - else: - apt_install('python3-netaddr', fatal=True) + apt_install('python3-netaddr', fatal=True) import netaddr @@ -462,15 +455,12 @@ def ns_query(address): try: import dns.resolver except ImportError: - if six.PY2: - apt_install('python-dnspython', fatal=True) - else: - apt_install('python3-dnspython', fatal=True) + apt_install('python3-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): rtype = 'PTR' - elif isinstance(address, six.string_types): + elif isinstance(address, str): rtype = 'A' else: return None @@ -513,10 +503,7 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - if six.PY2: - apt_install("python-dnspython", fatal=True) - else: - apt_install("python3-dnspython", fatal=True) + apt_install("python3-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) diff --git a/ceph-proxy/charmhelpers/contrib/openstack/files/check_deferred_restarts.py b/ceph-proxy/charmhelpers/contrib/openstack/files/check_deferred_restarts.py new file mode 100755 index 00000000..5f392b3c --- /dev/null +++ b/ceph-proxy/charmhelpers/contrib/openstack/files/check_deferred_restarts.py @@ -0,0 +1,128 @@ +#!/usr/bin/python3 + +# Copyright 2014-2022 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Checks for services with deferred restarts. + +This Nagios check will parse /var/lib/policy-rd.d/ +to find any restarts that are currently deferred. +""" + +import argparse +import glob +import sys +import yaml + + +DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d' + + +def get_deferred_events(): + """Return a list of deferred events dicts from policy-rc.d files. + + Events are read from DEFERRED_EVENTS_DIR and are of the form: + { + action: restart, + policy_requestor_name: rabbitmq-server, + policy_requestor_type: charm, + reason: 'Pkg update', + service: rabbitmq-server, + time: 1614328743 + } + + :raises OSError: Raised in case of a system error while reading a policy file + :raises yaml.YAMLError: Raised if parsing a policy file fails + + :returns: List of deferred event dictionaries + :rtype: list + """ + deferred_events_files = glob.glob( + '{}/*.deferred'.format(DEFERRED_EVENTS_DIR)) + + deferred_events = [] + for event_file in deferred_events_files: + with open(event_file, 'r') as f: + event = yaml.safe_load(f) + deferred_events.append(event) + + return deferred_events + + +def get_deferred_restart_services(application=None): + """Returns a list of services with deferred restarts. + + :param str application: Name of the application that blocked the service restart. + If application is None, all services with deferred restarts + are returned. Services which are blocked by a non-charm + requestor are always returned. + + :raises OSError: Raised in case of a system error while reading a policy file + :raises yaml.YAMLError: Raised if parsing a policy file fails + + :returns: List of services with deferred restarts belonging to application. + :rtype: list + """ + + deferred_restart_events = filter( + lambda e: e['action'] == 'restart', get_deferred_events()) + + deferred_restart_services = set() + for restart_event in deferred_restart_events: + if application: + if ( + restart_event['policy_requestor_type'] != 'charm' or + restart_event['policy_requestor_type'] == 'charm' and + restart_event['policy_requestor_name'] == application + ): + deferred_restart_services.add(restart_event['service']) + else: + deferred_restart_services.add(restart_event['service']) + + return list(deferred_restart_services) + + +def main(): + """Check for services with deferred restarts.""" + parser = argparse.ArgumentParser( + description='Check for services with deferred restarts') + parser.add_argument( + '--application', help='Check services belonging to this application only') + + args = parser.parse_args() + + services = set(get_deferred_restart_services(args.application)) + + if len(services) == 0: + print('OK: No deferred service restarts.') + sys.exit(0) + else: + print( + 'CRITICAL: Restarts are deferred for services: {}.'.format(', '.join(services))) + sys.exit(1) + + +if __name__ == '__main__': + try: + main() + except OSError as e: + print('CRITICAL: A system error occurred: {} ({})'.format(e.errno, e.strerror)) + sys.exit(1) + except yaml.YAMLError as e: + print('CRITICAL: Failed to parse a policy file: {}'.format(str(e))) + sys.exit(1) + except Exception as e: + print('CRITICAL: An unknown error occurred: {}'.format(str(e))) + sys.exit(1) diff --git a/ceph-proxy/charmhelpers/contrib/openstack/policyd.py b/ceph-proxy/charmhelpers/contrib/openstack/policyd.py index 6fa06f26..767943c2 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/policyd.py @@ -15,7 +15,6 @@ import collections import contextlib import os -import six import shutil import yaml import zipfile @@ -204,12 +203,6 @@ def __str__(self): return self.log_message -if six.PY2: - BadZipFile = zipfile.BadZipfile -else: - BadZipFile = zipfile.BadZipFile - - def is_policyd_override_valid_on_this_release(openstack_release): """Check that the charm is running on at least Ubuntu Xenial, and at least the queens release. @@ -487,10 +480,10 @@ def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): if blacklisted_keys_present: raise BadPolicyYamlFile("blacklisted keys {} present." .format(", ".join(blacklisted_keys_present))) - if not all(isinstance(k, six.string_types) for k in keys): + if not all(isinstance(k, str) for k in keys): raise BadPolicyYamlFile("keys in yaml aren't all strings?") # check that the dictionary looks like a mapping of str to str - if not all(isinstance(v, six.string_types) for v in doc.values()): + if not all(isinstance(v, str) for v in doc.values()): raise BadPolicyYamlFile("values in yaml aren't all strings?") return doc @@ -530,8 +523,7 @@ def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) - _scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir - for direntry in _scanner(path): + for direntry in os.scandir(path): # see if the path should be kept. if direntry.path in keep_paths: continue @@ -558,36 +550,6 @@ def maybe_create_directory_for(path, user, group): ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) -@contextlib.contextmanager -def _fallback_scandir(path): - """Fallback os.scandir implementation. - - provide a fallback implementation of os.scandir if this module ever gets - used in a py2 or py34 charm. Uses os.listdir() to get the names in the path, - and then mocks the is_dir() function using os.path.isdir() to check for - directory. - - :param path: the path to list the directories for - :type path: str - :returns: Generator that provides _FBDirectory objects - :rtype: ContextManager[_FBDirectory] - """ - for f in os.listdir(path): - yield _FBDirectory(f) - - -class _FBDirectory(object): - """Mock a scandir Directory object with enough to use in - clean_policyd_dir_for - """ - - def __init__(self, path): - self.path = path - - def is_dir(self): - return os.path.isdir(self.path) - - def path_for_policy_file(service, name): """Return the full path for a policy.d file that will be written to the service's policy.d directory. @@ -768,7 +730,7 @@ def process_policy_resource_file(resource_file, _group) # Every thing worked, so we mark up a success. completed = True - except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: + except (zipfile.BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), level=POLICYD_LOG_LEVEL_DEFAULT) except IOError as e: diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index 9cc96d60..c8747c16 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -25,7 +25,6 @@ import itertools import functools -import six import traceback import uuid import yaml @@ -362,6 +361,8 @@ def get_os_codename_install_source(src): rel = '' if src is None: return rel + if src in OPENSTACK_RELEASES: + return src if src in ['distro', 'distro-proposed', 'proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] @@ -401,7 +402,7 @@ def get_os_codename_version(vers): def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): '''Determine OpenStack version number from codename.''' - for k, v in six.iteritems(version_map): + for k, v in version_map.items(): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -411,7 +412,8 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): def get_os_version_codename_swift(codename): '''Determine OpenStack version number of swift from codename.''' - for k, v in six.iteritems(SWIFT_CODENAMES): + # for k, v in six.iteritems(SWIFT_CODENAMES): + for k, v in SWIFT_CODENAMES.items(): if k == codename: return v[-1] e = 'Could not derive swift version for '\ @@ -421,17 +423,17 @@ def get_os_version_codename_swift(codename): def get_swift_codename(version): '''Determine OpenStack codename that corresponds to swift version.''' - codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] + codenames = [k for k, v in SWIFT_CODENAMES.items() if version in v] if len(codenames) > 1: # If more than one release codename contains this version we determine # the actual codename based on the highest available install source. for codename in reversed(codenames): releases = UBUNTU_OPENSTACK_RELEASE - release = [k for k, v in six.iteritems(releases) if codename in v] - ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) - if six.PY3: - ret = ret.decode('UTF-8') + release = [k for k, v in releases.items() if codename in v] + ret = (subprocess + .check_output(['apt-cache', 'policy', 'swift']) + .decode('UTF-8')) if codename in ret or release[0] in ret: return codename elif len(codenames) == 1: @@ -441,7 +443,7 @@ def get_swift_codename(version): match = re.match(r'^(\d+)\.(\d+)', version) if match: major_minor_version = match.group(0) - for codename, versions in six.iteritems(SWIFT_CODENAMES): + for codename, versions in SWIFT_CODENAMES.items(): for release_version in versions: if release_version.startswith(major_minor_version): return codename @@ -477,9 +479,7 @@ def get_os_codename_package(package, fatal=True): if snap_install_requested(): cmd = ['snap', 'list', package] try: - out = subprocess.check_output(cmd) - if six.PY3: - out = out.decode('UTF-8') + out = subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError: return None lines = out.split('\n') @@ -549,16 +549,14 @@ def get_os_version_package(pkg, fatal=True): if 'swift' in pkg: vers_map = SWIFT_CODENAMES - for cname, version in six.iteritems(vers_map): + for cname, version in vers_map.items(): if cname == codename: return version[-1] else: vers_map = OPENSTACK_CODENAMES - for version, cname in six.iteritems(vers_map): + for version, cname in vers_map.items(): if cname == codename: return version - # e = "Could not determine OpenStack version for package: %s" % pkg - # error_out(e) def get_installed_os_version(): @@ -821,10 +819,10 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): if not os.path.exists(os.path.dirname(juju_rc_path)): os.mkdir(os.path.dirname(juju_rc_path)) with open(juju_rc_path, 'wt') as rc_script: - rc_script.write( - "#!/bin/bash\n") - [rc_script.write('export %s=%s\n' % (u, p)) - for u, p in six.iteritems(env_vars) if u != "script_path"] + rc_script.write("#!/bin/bash\n") + for u, p in env_vars.items(): + if u != "script_path": + rc_script.write('export %s=%s\n' % (u, p)) def openstack_upgrade_available(package): @@ -1039,7 +1037,7 @@ def _determine_os_workload_status( state, message, lambda: charm_func(configs)) if state is None: - state, message = _ows_check_services_running(services, ports) + state, message = ows_check_services_running(services, ports) if state is None: state = 'active' @@ -1213,7 +1211,12 @@ def _ows_check_charm_func(state, message, charm_func_with_configs): return state, message +@deprecate("use ows_check_services_running() instead", "2022-05", log=juju_log) def _ows_check_services_running(services, ports): + return ows_check_services_running(services, ports) + + +def ows_check_services_running(services, ports): """Check that the services that should be running are actually running and that any ports specified are being listened to. @@ -1413,45 +1416,75 @@ def incomplete_relation_data(configs, required_interfaces): for i in incomplete_relations} -def do_action_openstack_upgrade(package, upgrade_callback, configs, - force_upgrade=False): +def do_action_openstack_upgrade(package, upgrade_callback, configs): """Perform action-managed OpenStack upgrade. Upgrades packages to the configured openstack-origin version and sets the corresponding action status as a result. - If the charm was installed from source we cannot upgrade it. For backwards compatibility a config flag (action-managed-upgrade) must be set for this code to run, otherwise a full service level upgrade will fire on config-changed. - @param package: package name for determining if upgrade available + @param package: package name for determining if openstack upgrade available @param upgrade_callback: function callback to charm's upgrade function @param configs: templating object derived from OSConfigRenderer class - @param force_upgrade: perform dist-upgrade regardless of new openstack @return: True if upgrade successful; False if upgrade failed or skipped """ ret = False - if openstack_upgrade_available(package) or force_upgrade: + if openstack_upgrade_available(package): if config('action-managed-upgrade'): juju_log('Upgrading OpenStack release') try: upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed.'}) + action_set({'outcome': 'success, upgrade completed'}) ret = True except Exception: - action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'outcome': 'upgrade failed, see traceback'}) action_set({'traceback': traceback.format_exc()}) - action_fail('do_openstack_upgrade resulted in an ' + action_fail('upgrade callback resulted in an ' 'unexpected error') else: action_set({'outcome': 'action-managed-upgrade config is ' - 'False, skipped upgrade.'}) + 'False, skipped upgrade'}) + else: + action_set({'outcome': 'no upgrade available'}) + + return ret + + +def do_action_package_upgrade(package, upgrade_callback, configs): + """Perform package upgrade within the current OpenStack release. + + Upgrades packages only if there is not an openstack upgrade available, + and sets the corresponding action status as a result. + + @param package: package name for determining if openstack upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if not openstack_upgrade_available(package): + juju_log('Upgrading packages') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed'}) + ret = True + except Exception: + action_set({'outcome': 'upgrade failed, see traceback'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('upgrade callback resulted in an ' + 'unexpected error') else: - action_set({'outcome': 'no upgrade available.'}) + action_set({'outcome': 'upgrade skipped because an openstack upgrade ' + 'is available'}) return ret @@ -1849,21 +1882,20 @@ def some_hook(...): """ def wrap(f): - # py27 compatible nonlocal variable. When py3 only, replace with - # nonlocal keyword - __restart_map_cache = {'cache': None} + __restart_map_cache = None @functools.wraps(f) def wrapped_f(*args, **kwargs): + nonlocal __restart_map_cache if is_unit_paused_set(): return f(*args, **kwargs) - if __restart_map_cache['cache'] is None: - __restart_map_cache['cache'] = restart_map() \ + if __restart_map_cache is None: + __restart_map_cache = restart_map() \ if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( (lambda: f(*args, **kwargs)), - __restart_map_cache['cache'], + __restart_map_cache, stopstart, restart_functions, can_restart_now_f, @@ -1888,7 +1920,7 @@ def ordered(orderme): raise ValueError('argument must be a dict type') result = OrderedDict() - for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): + for k, v in sorted(orderme.items(), key=lambda x: x[0]): if isinstance(v, dict): result[k] = ordered(v) else: diff --git a/ceph-proxy/charmhelpers/contrib/python.py b/ceph-proxy/charmhelpers/contrib/python.py index 84cba8c4..fcded680 100644 --- a/ceph-proxy/charmhelpers/contrib/python.py +++ b/ceph-proxy/charmhelpers/contrib/python.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import - # deprecated aliases for backwards compatibility from charmhelpers.fetch.python import debug # noqa from charmhelpers.fetch.python import packages # noqa diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py index c70aeb20..1b20b8fe 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py @@ -23,7 +23,6 @@ import errno import hashlib import math -import six import os import shutil @@ -218,7 +217,7 @@ def validator(value, valid_type, valid_range=None): "was given {} of type {}" .format(valid_range, type(valid_range))) # If we're dealing with strings - if isinstance(value, six.string_types): + if isinstance(value, str): assert value in valid_range, ( "{} is not in the list {}".format(value, valid_range)) # Integer, float should have a min and max @@ -434,9 +433,9 @@ def add_cache_tier(self, cache_pool, mode): :type mode: str """ # Check the input types and values - validator(value=cache_pool, valid_type=six.string_types) + validator(value=cache_pool, valid_type=str) validator( - value=mode, valid_type=six.string_types, + value=mode, valid_type=str, valid_range=["readonly", "writeback"]) check_call([ @@ -615,7 +614,8 @@ def create(self): class ReplicatedPool(BasePool): def __init__(self, service, name=None, pg_num=None, replicas=None, - percent_data=None, app_name=None, op=None): + percent_data=None, app_name=None, op=None, + profile_name='replicated_rule'): """Initialize ReplicatedPool object. Pool information is either initialized from individual keyword @@ -632,6 +632,8 @@ def __init__(self, service, name=None, pg_num=None, replicas=None, to this replicated pool. :type replicas: int :raises: KeyError + :param profile_name: Crush Profile to use + :type profile_name: Optional[str] """ # NOTE: Do not perform initialization steps that require live data from # a running cluster here. The *Pool classes may be used for validation. @@ -646,11 +648,20 @@ def __init__(self, service, name=None, pg_num=None, replicas=None, # we will fail with KeyError if it is not provided. self.replicas = op['replicas'] self.pg_num = op.get('pg_num') + self.profile_name = op.get('crush-profile') or profile_name else: self.replicas = replicas or 2 self.pg_num = pg_num + self.profile_name = profile_name or 'replicated_rule' def _create(self): + # Validate if crush profile exists + if self.profile_name is None: + msg = ("Failed to discover crush profile named " + "{}".format(self.profile_name)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + # Do extra validation on pg_num with data from live cluster if self.pg_num: # Since the number of placement groups were specified, ensure @@ -668,12 +679,12 @@ def _create(self): '--pg-num-min={}'.format( min(AUTOSCALER_DEFAULT_PGS, self.pg_num) ), - self.name, str(self.pg_num) + self.name, str(self.pg_num), self.profile_name ] else: cmd = [ 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num) + self.name, str(self.pg_num), self.profile_name ] check_call(cmd) @@ -692,7 +703,7 @@ class ErasurePool(BasePool): def __init__(self, service, name=None, erasure_code_profile=None, percent_data=None, app_name=None, op=None, allow_ec_overwrites=False): - """Initialize ReplicatedPool object. + """Initialize ErasurePool object. Pool information is either initialized from individual keyword arguments or from a individual CephBrokerRq operation Dict. @@ -778,10 +789,11 @@ def enabled_manager_modules(): :rtype: List[str] """ cmd = ['ceph', 'mgr', 'module', 'ls'] + quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0 + if quincy_or_later: + cmd.append('--format=json') try: - modules = check_output(cmd) - if six.PY3: - modules = modules.decode('UTF-8') + modules = check_output(cmd).decode('utf-8') except CalledProcessError as e: log("Failed to list ceph modules: {}".format(e), WARNING) return [] @@ -814,10 +826,10 @@ def get_mon_map(service): ceph command fails. """ try: - mon_status = check_output(['ceph', '--id', service, - 'mon_status', '--format=json']) - if six.PY3: - mon_status = mon_status.decode('UTF-8') + octopus_or_later = cmp_pkgrevno('ceph-common', '15.0.0') >= 0 + mon_status_cmd = 'quorum_status' if octopus_or_later else 'mon_status' + mon_status = (check_output(['ceph', '--id', service, mon_status_cmd, + '--format=json'])).decode('utf-8') try: return json.loads(mon_status) except ValueError as v: @@ -959,9 +971,7 @@ def get_erasure_profile(service, name): try: out = check_output(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', - name, '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + name, '--format=json']).decode('utf-8') return json.loads(out) except (CalledProcessError, OSError, ValueError): return None @@ -1164,8 +1174,7 @@ def create_erasure_profile(service, profile_name, 'nvme' ] - validator(erasure_plugin_name, six.string_types, - list(plugin_techniques.keys())) + validator(erasure_plugin_name, str, list(plugin_techniques.keys())) cmd = [ 'ceph', '--id', service, @@ -1176,7 +1185,7 @@ def create_erasure_profile(service, profile_name, ] if erasure_plugin_technique: - validator(erasure_plugin_technique, six.string_types, + validator(erasure_plugin_technique, str, plugin_techniques[erasure_plugin_name]) cmd.append('technique={}'.format(erasure_plugin_technique)) @@ -1189,7 +1198,7 @@ def create_erasure_profile(service, profile_name, failure_domain = 'rack' if failure_domain: - validator(failure_domain, six.string_types, failure_domains) + validator(failure_domain, str, failure_domains) # failure_domain changed in luminous if luminous_or_later: cmd.append('crush-failure-domain={}'.format(failure_domain)) @@ -1198,7 +1207,7 @@ def create_erasure_profile(service, profile_name, # device class new in luminous if luminous_or_later and device_class: - validator(device_class, six.string_types, device_classes) + validator(device_class, str, device_classes) cmd.append('crush-device-class={}'.format(device_class)) else: log('Skipping device class configuration (ceph < 12.0.0)', @@ -1213,7 +1222,7 @@ def create_erasure_profile(service, profile_name, raise ValueError("locality must be provided for lrc plugin") # LRC optional configuration if crush_locality: - validator(crush_locality, six.string_types, failure_domains) + validator(crush_locality, str, failure_domains) cmd.append('crush-locality={}'.format(crush_locality)) if erasure_plugin_name == 'shec': @@ -1241,8 +1250,8 @@ def rename_pool(service, old_name, new_name): :param new_name: Name to rename pool to. :type new_name: str """ - validator(value=old_name, valid_type=six.string_types) - validator(value=new_name, valid_type=six.string_types) + validator(value=old_name, valid_type=str) + validator(value=new_name, valid_type=str) cmd = [ 'ceph', '--id', service, @@ -1260,7 +1269,7 @@ def erasure_profile_exists(service, name): :returns: True if it exists, False otherwise. :rtype: bool """ - validator(value=name, valid_type=six.string_types) + validator(value=name, valid_type=str) try: check_call(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', @@ -1280,12 +1289,10 @@ def get_cache_mode(service, pool_name): :returns: Current cache mode. :rtype: Optional[int] """ - validator(value=service, valid_type=six.string_types) - validator(value=pool_name, valid_type=six.string_types) + validator(value=service, valid_type=str) + validator(value=pool_name, valid_type=str) out = check_output(['ceph', '--id', service, - 'osd', 'dump', '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + 'osd', 'dump', '--format=json']).decode('utf-8') try: osd_json = json.loads(out) for pool in osd_json['pools']: @@ -1299,9 +1306,8 @@ def get_cache_mode(service, pool_name): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, 'lspools']) - if six.PY3: - out = out.decode('UTF-8') + out = check_output( + ['rados', '--id', service, 'lspools']).decode('utf-8') except CalledProcessError: return False @@ -1320,13 +1326,11 @@ def get_osds(service, device_class=None): out = check_output(['ceph', '--id', service, 'osd', 'crush', 'class', 'ls-osd', device_class, - '--format=json']) + '--format=json']).decode('utf-8') else: out = check_output(['ceph', '--id', service, 'osd', 'ls', - '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + '--format=json']).decode('utf-8') return json.loads(out) @@ -1343,9 +1347,7 @@ def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]) - if six.PY3: - out = out.decode('UTF-8') + service, '--pool', pool]).decode('utf-8') except CalledProcessError: return False @@ -1371,7 +1373,7 @@ def update_pool(client, pool, settings): :raises: CalledProcessError """ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] - for k, v in six.iteritems(settings): + for k, v in settings.items(): check_call(cmd + [k, v]) @@ -1509,9 +1511,7 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']) - if six.PY3: - out = out.decode('UTF-8') + out = check_output(['rbd', 'showmapped']).decode('utf-8') except CalledProcessError: return False @@ -1857,7 +1857,7 @@ def _partial_build_common_op_create(self, } def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, - **kwargs): + crush_profile=None, **kwargs): """Adds an operation to create a replicated pool. Refer to docstring for ``_partial_build_common_op_create`` for @@ -1871,6 +1871,10 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, for pool. :type pg_num: int :raises: AssertionError if provided data is of invalid type/range + :param crush_profile: Name of crush profile to use. If not set the + ceph-mon unit handling the broker request will + set its default value. + :type crush_profile: Optional[str] """ if pg_num and kwargs.get('weight'): raise ValueError('pg_num and weight are mutually exclusive') @@ -1880,6 +1884,7 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, 'name': name, 'replicas': replica_count, 'pg_num': pg_num, + 'crush-profile': crush_profile } op.update(self._partial_build_common_op_create(**kwargs)) diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/loopback.py b/ceph-proxy/charmhelpers/contrib/storage/linux/loopback.py index 74bab40e..04daea29 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/loopback.py @@ -19,8 +19,6 @@ check_output, ) -import six - ################################################## # loopback device helpers. @@ -40,9 +38,7 @@ def loopback_devices(): ''' loopbacks = {} cmd = ['losetup', '-a'] - output = check_output(cmd) - if six.PY3: - output = output.decode('utf-8') + output = check_output(cmd).decode('utf-8') devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] for dev, _, f in devs: loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] @@ -57,7 +53,7 @@ def create_loopback(file_path): ''' file_path = os.path.abspath(file_path) check_call(['losetup', '--find', file_path]) - for d, f in six.iteritems(loopback_devices()): + for d, f in loopback_devices().items(): if f == file_path: return d @@ -71,7 +67,7 @@ def ensure_loopback_device(path, size): :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) ''' - for d, f in six.iteritems(loopback_devices()): + for d, f in loopback_devices().items(): if f == path: return d diff --git a/ceph-proxy/charmhelpers/core/hookenv.py b/ceph-proxy/charmhelpers/core/hookenv.py index e94247a2..370c3e8f 100644 --- a/ceph-proxy/charmhelpers/core/hookenv.py +++ b/ceph-proxy/charmhelpers/core/hookenv.py @@ -17,12 +17,11 @@ # Authors: # Charm Helpers Developers -from __future__ import print_function import copy from distutils.version import LooseVersion from enum import Enum from functools import wraps -from collections import namedtuple +from collections import namedtuple, UserDict import glob import os import json @@ -36,12 +35,6 @@ from charmhelpers import deprecate -import six -if not six.PY3: - from UserDict import UserDict -else: - from collections import UserDict - CRITICAL = "CRITICAL" ERROR = "ERROR" @@ -112,7 +105,7 @@ def log(message, level=None): command = ['juju-log'] if level: command += ['-l', level] - if not isinstance(message, six.string_types): + if not isinstance(message, str): message = repr(message) command += [message[:SH_MAX_ARG]] # Missing juju-log should not cause failures in unit tests @@ -132,7 +125,7 @@ def log(message, level=None): def function_log(message): """Write a function progress message""" command = ['function-log'] - if not isinstance(message, six.string_types): + if not isinstance(message, str): message = repr(message) command += [message[:SH_MAX_ARG]] # Missing function-log should not cause failures in unit tests @@ -445,12 +438,6 @@ def config(scope=None): """ global _cache_config config_cmd_line = ['config-get', '--all', '--format=json'] - try: - # JSON Decode Exception for Python3.5+ - exc_json = json.decoder.JSONDecodeError - except AttributeError: - # JSON Decode Exception for Python2.7 through Python3.4 - exc_json = ValueError try: if _cache_config is None: config_data = json.loads( @@ -459,7 +446,7 @@ def config(scope=None): if scope is not None: return _cache_config.get(scope) return _cache_config - except (exc_json, UnicodeDecodeError) as e: + except (json.decoder.JSONDecodeError, UnicodeDecodeError) as e: log('Unable to parse output from config-get: config_cmd_line="{}" ' 'message="{}"' .format(config_cmd_line, str(e)), level=ERROR) @@ -491,12 +478,26 @@ def relation_get(attribute=None, unit=None, rid=None, app=None): raise +@cached +def _relation_set_accepts_file(): + """Return True if the juju relation-set command accepts a file. + + Cache the result as it won't change during the execution of a hook, and + thus we can make relation_set() more efficient by only checking for the + first relation_set() call. + + :returns: True if relation_set accepts a file. + :rtype: bool + :raises: subprocess.CalledProcessError if the check fails. + """ + return "--file" in subprocess.check_output( + ["relation-set", "--help"], universal_newlines=True) + + def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] - accepts_file = "--file" in subprocess.check_output( - relation_cmd_line + ["--help"], universal_newlines=True) if app: relation_cmd_line.append('--app') if relation_id is not None: @@ -508,7 +509,7 @@ def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): # sites pass in things like dicts or numbers. if value is not None: settings[key] = "{}".format(value) - if accepts_file: + if _relation_set_accepts_file(): # --file was introduced in Juju 1.23.2. Use it by default if # available, since otherwise we'll break if the relation data is # too big. Ideally we should tell relation-set to read the data from @@ -1003,14 +1004,8 @@ def cmd_exists(cmd): @cached -@deprecate("moved to function_get()", log=log) def action_get(key=None): - """ - .. deprecated:: 0.20.7 - Alias for :func:`function_get`. - - Gets the value of an action parameter, or all key/value param pairs. - """ + """Gets the value of an action parameter, or all key/value param pairs.""" cmd = ['action-get'] if key is not None: cmd.append(key) @@ -1020,8 +1015,12 @@ def action_get(key=None): @cached +@deprecate("moved to action_get()", log=log) def function_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" + """ + .. deprecated:: + Gets the value of an action parameter, or all key/value param pairs. + """ cmd = ['function-get'] # Fallback for older charms. if not cmd_exists('function-get'): @@ -1034,22 +1033,20 @@ def function_get(key=None): return function_data -@deprecate("moved to function_set()", log=log) def action_set(values): - """ - .. deprecated:: 0.20.7 - Alias for :func:`function_set`. - - Sets the values to be returned after the action finishes. - """ + """Sets the values to be returned after the action finishes.""" cmd = ['action-set'] for k, v in list(values.items()): cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) +@deprecate("moved to action_set()", log=log) def function_set(values): - """Sets the values to be returned after the function finishes""" + """ + .. deprecated:: + Sets the values to be returned after the function finishes. + """ cmd = ['function-set'] # Fallback for older charms. if not cmd_exists('function-get'): @@ -1060,12 +1057,8 @@ def function_set(values): subprocess.check_call(cmd) -@deprecate("moved to function_fail()", log=log) def action_fail(message): """ - .. deprecated:: 0.20.7 - Alias for :func:`function_fail`. - Sets the action status to failed and sets the error message. The results set by action_set are preserved. @@ -1073,10 +1066,14 @@ def action_fail(message): subprocess.check_call(['action-fail', message]) +@deprecate("moved to action_fail()", log=log) def function_fail(message): - """Sets the function status to failed and sets the error message. + """ + .. deprecated:: + Sets the function status to failed and sets the error message. - The results set by function_set are preserved.""" + The results set by function_set are preserved. + """ cmd = ['function-fail'] # Fallback for older charms. if not cmd_exists('function-fail'): diff --git a/ceph-proxy/charmhelpers/core/host.py b/ceph-proxy/charmhelpers/core/host.py index 994ec8a0..ad2cab46 100644 --- a/ceph-proxy/charmhelpers/core/host.py +++ b/ceph-proxy/charmhelpers/core/host.py @@ -31,7 +31,6 @@ import hashlib import functools import itertools -import six from contextlib import contextmanager from collections import OrderedDict, defaultdict @@ -115,6 +114,33 @@ def service_stop(service_name, **kwargs): return service('stop', service_name, **kwargs) +def service_enable(service_name, **kwargs): + """Enable a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_enable('ceph-osd', id=4) + + :param service_name: the name of the service to enable + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + return service('enable', service_name, **kwargs) + + def service_restart(service_name, **kwargs): """Restart a system service. @@ -135,7 +161,7 @@ def service_restart(service_name, **kwargs): :param service_name: the name of the service to restart :param **kwargs: additional parameters to pass to the init system when managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs + parameters to the init system's commandline. kwargs are ignored for init systems not allowing additional parameters via the commandline (systemd). """ @@ -263,7 +289,7 @@ def service(action, service_name, **kwargs): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] - for key, value in six.iteritems(kwargs): + for key, value in kwargs.items(): parameter = '%s=%s' % (key, value) cmd.append(parameter) return subprocess.call(cmd) == 0 @@ -289,7 +315,7 @@ def service_running(service_name, **kwargs): if os.path.exists(_UPSTART_CONF.format(service_name)): try: cmd = ['status', service_name] - for key, value in six.iteritems(kwargs): + for key, value in kwargs.items(): parameter = '%s=%s' % (key, value) cmd.append(parameter) output = subprocess.check_output( @@ -564,7 +590,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) - if six.PY3 and isinstance(content, six.string_types): + if isinstance(content, str): content = content.encode('UTF-8') target.write(content) return @@ -967,7 +993,7 @@ def get_bond_master(interface): def list_nics(nic_type=None): """Return a list of nics of given type(s)""" - if isinstance(nic_type, six.string_types): + if isinstance(nic_type, str): int_types = [nic_type] else: int_types = nic_type @@ -1081,8 +1107,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): try: chown(full, uid, gid) except (IOError, OSError) as e: - # Intended to ignore "file not found". Catching both to be - # compatible with both Python 2.7 and 3.x. + # Intended to ignore "file not found". if e.errno == errno.ENOENT: pass diff --git a/ceph-proxy/charmhelpers/core/services/base.py b/ceph-proxy/charmhelpers/core/services/base.py index 9f880290..7c37c65c 100644 --- a/ceph-proxy/charmhelpers/core/services/base.py +++ b/ceph-proxy/charmhelpers/core/services/base.py @@ -17,8 +17,6 @@ import inspect from collections import Iterable, OrderedDict -import six - from charmhelpers.core import host from charmhelpers.core import hookenv @@ -171,10 +169,7 @@ def provide_data(self): if not units: continue remote_service = units[0].split('/')[0] - if six.PY2: - argspec = inspect.getargspec(provider.provide_data) - else: - argspec = inspect.getfullargspec(provider.provide_data) + argspec = inspect.getfullargspec(provider.provide_data) if len(argspec.args) > 1: data = provider.provide_data(remote_service, service_ready) else: diff --git a/ceph-proxy/charmhelpers/core/services/helpers.py b/ceph-proxy/charmhelpers/core/services/helpers.py index 3e6e30d2..5bf62dd5 100644 --- a/ceph-proxy/charmhelpers/core/services/helpers.py +++ b/ceph-proxy/charmhelpers/core/services/helpers.py @@ -179,7 +179,7 @@ def __init__(self, *args): self.required_options = args self['config'] = hookenv.config() with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.load(fp).get('options', {}) + self.config = yaml.safe_load(fp).get('options', {}) def __bool__(self): for option in self.required_options: @@ -227,7 +227,7 @@ def read_context(self, file_name): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'r') as file_stream: - data = yaml.load(file_stream) + data = yaml.safe_load(file_stream) if not data: raise OSError("%s is empty" % file_name) return data diff --git a/ceph-proxy/charmhelpers/core/strutils.py b/ceph-proxy/charmhelpers/core/strutils.py index 28c6b3f5..31366871 100644 --- a/ceph-proxy/charmhelpers/core/strutils.py +++ b/ceph-proxy/charmhelpers/core/strutils.py @@ -15,7 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import re TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'} @@ -27,8 +26,8 @@ def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY Returns True if value translates to True otherwise False. """ - if isinstance(value, six.string_types): - value = six.text_type(value) + if isinstance(value, str): + value = str(value) else: msg = "Unable to interpret non-string value '%s' as boolean" % (value) raise ValueError(msg) @@ -61,8 +60,8 @@ def bytes_from_string(value): 'P': 5, 'PB': 5, } - if isinstance(value, six.string_types): - value = six.text_type(value) + if isinstance(value, str): + value = str(value) else: msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) diff --git a/ceph-proxy/charmhelpers/core/templating.py b/ceph-proxy/charmhelpers/core/templating.py index 9014015c..cb0213dc 100644 --- a/ceph-proxy/charmhelpers/core/templating.py +++ b/ceph-proxy/charmhelpers/core/templating.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import sys from charmhelpers.core import host from charmhelpers.core import hookenv @@ -43,9 +42,8 @@ def render(source, target, context, owner='root', group='root', The rendered template will be written to the file as well as being returned as a string. - Note: Using this requires python-jinja2 or python3-jinja2; if it is not - installed, calling this will attempt to use charmhelpers.fetch.apt_install - to install it. + Note: Using this requires python3-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. """ try: from jinja2 import FileSystemLoader, Environment, exceptions @@ -57,10 +55,7 @@ def render(source, target, context, owner='root', group='root', 'charmhelpers.fetch to install it', level=hookenv.ERROR) raise - if sys.version_info.major == 2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions if template_loader: diff --git a/ceph-proxy/charmhelpers/fetch/__init__.py b/ceph-proxy/charmhelpers/fetch/__init__.py index 9497ee05..1283f25b 100644 --- a/ceph-proxy/charmhelpers/fetch/__init__.py +++ b/ceph-proxy/charmhelpers/fetch/__init__.py @@ -20,11 +20,7 @@ log, ) -import six -if six.PY3: - from urllib.parse import urlparse, urlunparse -else: - from urlparse import urlparse, urlunparse +from urllib.parse import urlparse, urlunparse # The order of this list is very important. Handlers should be listed in from @@ -134,14 +130,14 @@ def configure_sources(update=False, sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None - if isinstance(sources, six.string_types): + if isinstance(sources, str): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: - if isinstance(keys, six.string_types): + if isinstance(keys, str): keys = [keys] if len(sources) != len(keys): diff --git a/ceph-proxy/charmhelpers/fetch/archiveurl.py b/ceph-proxy/charmhelpers/fetch/archiveurl.py index d25587ad..2cb2e88b 100644 --- a/ceph-proxy/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/charmhelpers/fetch/archiveurl.py @@ -26,26 +26,15 @@ ) from charmhelpers.core.host import mkdir, check_hash -import six -if six.PY3: - from urllib.request import ( - build_opener, install_opener, urlopen, urlretrieve, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - ) - from urllib.parse import urlparse, urlunparse, parse_qs - from urllib.error import URLError -else: - from urllib import urlretrieve - from urllib2 import ( - build_opener, install_opener, urlopen, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - URLError - ) - from urlparse import urlparse, urlunparse, parse_qs +from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, +) +from urllib.parse import urlparse, urlunparse, parse_qs +from urllib.error import URLError def splituser(host): - '''urllib.splituser(), but six's support of this seems broken''' _userprog = re.compile('^(.*)@(.*)$') match = _userprog.match(host) if match: @@ -54,7 +43,6 @@ def splituser(host): def splitpasswd(user): - '''urllib.splitpasswd(), but six's support of this is missing''' _passwdprog = re.compile('^([^:]*):(.*)$', re.S) match = _passwdprog.match(user) if match: @@ -150,10 +138,7 @@ def install(self, source, dest=None, checksum=None, hash_type='sha1'): raise UnhandledSource(e.strerror) options = parse_qs(url_parts.fragment) for key, value in options.items(): - if not six.PY3: - algorithms = hashlib.algorithms - else: - algorithms = hashlib.algorithms_available + algorithms = hashlib.algorithms_available if key in algorithms: if len(value) != 1: raise TypeError( diff --git a/ceph-proxy/charmhelpers/fetch/centos.py b/ceph-proxy/charmhelpers/fetch/centos.py index a91dcff0..f8492018 100644 --- a/ceph-proxy/charmhelpers/fetch/centos.py +++ b/ceph-proxy/charmhelpers/fetch/centos.py @@ -15,7 +15,6 @@ import subprocess import os import time -import six import yum from tempfile import NamedTemporaryFile @@ -42,7 +41,7 @@ def install(packages, options=None, fatal=False): if options is not None: cmd.extend(options) cmd.append('install') - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -71,7 +70,7 @@ def update(fatal=False): def purge(packages, fatal=False): """Purge one or more packages.""" cmd = ['yum', '--assumeyes', 'remove'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -83,7 +82,7 @@ def yum_search(packages): """Search for a package.""" output = {} cmd = ['yum', 'search'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) diff --git a/ceph-proxy/charmhelpers/fetch/python/debug.py b/ceph-proxy/charmhelpers/fetch/python/debug.py index 757135ee..dd5cca80 100644 --- a/ceph-proxy/charmhelpers/fetch/python/debug.py +++ b/ceph-proxy/charmhelpers/fetch/python/debug.py @@ -15,8 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import atexit import sys diff --git a/ceph-proxy/charmhelpers/fetch/python/packages.py b/ceph-proxy/charmhelpers/fetch/python/packages.py index 60048354..93f1fa3f 100644 --- a/ceph-proxy/charmhelpers/fetch/python/packages.py +++ b/ceph-proxy/charmhelpers/fetch/python/packages.py @@ -16,7 +16,6 @@ # limitations under the License. import os -import six import subprocess import sys @@ -40,10 +39,7 @@ def pip_execute(*args, **kwargs): from pip import main as _pip_execute except ImportError: apt_update() - if six.PY2: - apt_install('python-pip') - else: - apt_install('python3-pip') + apt_install('python3-pip') from pip import main as _pip_execute _pip_execute(*args, **kwargs) finally: @@ -140,12 +136,8 @@ def pip_list(): def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" - if six.PY2: - apt_install('python-virtualenv') - extra_flags = [] - else: - apt_install(['python3-virtualenv', 'virtualenv']) - extra_flags = ['--python=python3'] + apt_install(['python3-virtualenv', 'virtualenv']) + extra_flags = ['--python=python3'] if path: venv_path = path diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py index cf8328f0..e6f8a0ad 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -13,10 +13,8 @@ # limitations under the License. from collections import OrderedDict -import os import platform import re -import six import subprocess import sys import time @@ -361,7 +359,7 @@ def apt_install(packages, options=None, fatal=False, quiet=False): cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -413,7 +411,7 @@ def apt_purge(packages, fatal=False): :raises: subprocess.CalledProcessError """ cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -440,7 +438,7 @@ def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark.""" log("Marking {} as {}".format(packages, mark)) cmd = ['apt-mark', mark] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -485,10 +483,7 @@ def import_key(key): if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and '-----END PGP PUBLIC KEY BLOCK-----' in key): log("Writing provided PGP key in the binary format", level=DEBUG) - if six.PY3: - key_bytes = key.encode('utf-8') - else: - key_bytes = key + key_bytes = key.encode('utf-8') key_name = _get_keyid_by_gpg_key(key_bytes) key_gpg = _dearmor_gpg_key(key_bytes) _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) @@ -528,9 +523,8 @@ def _get_keyid_by_gpg_key(key_material): stderr=subprocess.PIPE, stdin=subprocess.PIPE) out, err = ps.communicate(input=key_material) - if six.PY3: - out = out.decode('utf-8') - err = err.decode('utf-8') + out = out.decode('utf-8') + err = err.decode('utf-8') if 'gpg: no valid OpenPGP data found.' in err: raise GPGKeyError('Invalid GPG key material provided') # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) @@ -588,8 +582,7 @@ def _dearmor_gpg_key(key_asc): stdin=subprocess.PIPE) out, err = ps.communicate(input=key_asc) # no need to decode output as it is binary (invalid utf-8), only error - if six.PY3: - err = err.decode('utf-8') + err = err.decode('utf-8') if 'gpg: no valid OpenPGP data found.' in err: raise GPGKeyError('Invalid GPG key material. Check your network setup' ' (MTU, routing, DNS) and/or proxy server settings' @@ -693,7 +686,7 @@ def add_source(source, key=None, fail_invalid=False): ]) if source is None: source = '' - for r, fn in six.iteritems(_mapping): + for r, fn in _mapping.items(): m = re.match(r, source) if m: if key: @@ -726,7 +719,7 @@ def _add_proposed(): """ release = get_distrib_codename() arch = platform.machine() - if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): + if arch not in ARCH_TO_PROPOSED_POCKET.keys(): raise SourceConfigError("Arch {} not supported for (distro-)proposed" .format(arch)) with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: @@ -913,9 +906,8 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), kwargs = {} if quiet: - devnull = os.devnull if six.PY2 else subprocess.DEVNULL - kwargs['stdout'] = devnull - kwargs['stderr'] = devnull + kwargs['stdout'] = subprocess.DEVNULL + kwargs['stderr'] = subprocess.DEVNULL if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -957,9 +949,8 @@ def _run_apt_command(cmd, fatal=False, quiet=False): else: kwargs = {} if quiet: - devnull = os.devnull if six.PY2 else subprocess.DEVNULL - kwargs['stdout'] = devnull - kwargs['stderr'] = devnull + kwargs['stdout'] = subprocess.DEVNULL + kwargs['stderr'] = subprocess.DEVNULL subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs) @@ -989,7 +980,7 @@ def get_installed_version(package): Version object """ cache = apt_cache() - dpkg_result = cache._dpkg_list([package]).get(package, {}) + dpkg_result = cache.dpkg_list([package]).get(package, {}) current_ver = None installed_version = dpkg_result.get('version') diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py index 436e1776..6da355fd 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -40,6 +40,9 @@ import subprocess import sys +from charmhelpers import deprecate +from charmhelpers.core.hookenv import log + class _container(dict): """Simple container for attributes.""" @@ -79,7 +82,7 @@ def __getitem__(self, package): apt_result = self._apt_cache_show([package])[package] apt_result['name'] = apt_result.pop('package') pkg = Package(apt_result) - dpkg_result = self._dpkg_list([package]).get(package, {}) + dpkg_result = self.dpkg_list([package]).get(package, {}) current_ver = None installed_version = dpkg_result.get('version') if installed_version: @@ -88,9 +91,29 @@ def __getitem__(self, package): pkg.architecture = dpkg_result.get('architecture') return pkg + @deprecate("use dpkg_list() instead.", "2022-05", log=log) def _dpkg_list(self, packages): + return self.dpkg_list(packages) + + def dpkg_list(self, packages): """Get data from system dpkg database for package. + Note that this method is also useful for querying package names + containing wildcards, for example + + apt_cache().dpkg_list(['nvidia-vgpu-ubuntu-*']) + + may return + + { + 'nvidia-vgpu-ubuntu-470': { + 'name': 'nvidia-vgpu-ubuntu-470', + 'version': '470.68', + 'architecture': 'amd64', + 'description': 'NVIDIA vGPU driver - version 470.68' + } + } + :param packages: Packages to get data from :type packages: List[str] :returns: Structured data about installed packages, keys like diff --git a/ceph-proxy/lib/charms_ceph/utils.py b/ceph-proxy/lib/charms_ceph/utils.py index 025ab866..a22462ec 100644 --- a/ceph-proxy/lib/charms_ceph/utils.py +++ b/ceph-proxy/lib/charms_ceph/utils.py @@ -1,4 +1,4 @@ -# Copyright 2017 Canonical Ltd +# Copyright 2017-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -293,7 +293,7 @@ def get_link_speed(network_interface): def persist_settings(settings_dict): # Write all settings to /etc/hdparm.conf - """ This will persist the hard drive settings to the /etc/hdparm.conf file + """This will persist the hard drive settings to the /etc/hdparm.conf file The settings_dict should be in the form of {"uuid": {"key":"value"}} @@ -552,7 +552,7 @@ def get_osd_weight(osd_id): :returns: Float :raises: ValueError if the monmap fails to parse. - :raises: CalledProcessError if our ceph command fails. + :raises: CalledProcessError if our Ceph command fails. """ try: tree = str(subprocess @@ -560,7 +560,7 @@ def get_osd_weight(osd_id): .decode('UTF-8')) try: json_tree = json.loads(tree) - # Make sure children are present in the json + # Make sure children are present in the JSON if not json_tree['nodes']: return None for device in json_tree['nodes']: @@ -619,12 +619,12 @@ def _flatten_roots(nodes, lookup_type='host'): def get_osd_tree(service): - """Returns the current osd map in JSON. + """Returns the current OSD map in JSON. :returns: List. :rtype: List[CrushLocation] :raises: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails + Also raises CalledProcessError if our Ceph command fails """ try: tree = str(subprocess @@ -666,12 +666,12 @@ def _get_child_dirs(path): def _get_osd_num_from_dirname(dirname): """Parses the dirname and returns the OSD id. - Parses a string in the form of 'ceph-{osd#}' and returns the osd number + Parses a string in the form of 'ceph-{osd#}' and returns the OSD number from the directory name. :param dirname: the directory name to return the OSD number from - :return int: the osd number the directory name corresponds to - :raises ValueError: if the osd number cannot be parsed from the provided + :return int: the OSD number the directory name corresponds to + :raises ValueError: if the OSD number cannot be parsed from the provided directory name. """ match = re.search(r'ceph-(?P\d+)', dirname) @@ -686,7 +686,7 @@ def get_local_osd_ids(): to split the ID off of the directory name and return it in a list. - :returns: list. A list of osd identifiers + :returns: list. A list of OSD identifiers :raises: OSError if something goes wrong with listing the directory. """ osd_ids = [] @@ -875,12 +875,12 @@ def add_bootstrap_hint(peer): ] CEPH_PARTITIONS = [ - '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # ceph encrypted disk in creation - '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # ceph encrypted journal - '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data - '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data - '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal - '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # ceph disk in creation + '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # Ceph encrypted disk in creation + '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # Ceph encrypted journal + '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # Ceph encrypted OSD data + '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # Ceph OSD data + '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # Ceph OSD journal + '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # Ceph disk in creation ] @@ -984,7 +984,7 @@ def is_osd_disk(dev): def start_osds(devices): - # Scan for ceph block devices + # Scan for Ceph block devices rescan_osd_devices() if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and cmp_pkgrevno('ceph', '14.2.0') < 0): @@ -1229,12 +1229,6 @@ def get_named_key(name, caps=None, pool_list=None): 'get', key_name, ]).decode('UTF-8')).strip() - # NOTE(jamespage); - # Apply any changes to key capabilities, dealing with - # upgrades which requires new caps for operation. - upgrade_key_caps(key_name, - caps or _default_caps, - pool_list) return parse_key(output) except subprocess.CalledProcessError: # Couldn't get the key, time to create it! @@ -1270,7 +1264,7 @@ def get_named_key(name, caps=None, pool_list=None): def upgrade_key_caps(key, caps, pool_list=None): - """ Upgrade key to have capabilities caps """ + """Upgrade key to have capabilities caps""" if not is_leader(): # Not the MON leader OR not clustered return @@ -1304,11 +1298,11 @@ def use_bluestore(): def bootstrap_monitor_cluster(secret): - """Bootstrap local ceph mon into the ceph cluster + """Bootstrap local Ceph mon into the Ceph cluster :param secret: cephx secret to use for monitor authentication :type secret: str - :raises: Exception if ceph mon cannot be bootstrapped + :raises: Exception if Ceph mon cannot be bootstrapped """ hostname = socket.gethostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) @@ -1351,11 +1345,11 @@ def _create_monitor(keyring, secret, hostname, path, done, init_marker): :type: secret: str :param hostname: hostname of the local unit :type hostname: str - :param path: full path to ceph mon directory + :param path: full path to Ceph mon directory :type path: str - :param done: full path to 'done' marker for ceph mon + :param done: full path to 'done' marker for Ceph mon :type done: str - :param init_marker: full path to 'init' marker for ceph mon + :param init_marker: full path to 'init' marker for Ceph mon :type init_marker: str """ subprocess.check_call(['ceph-authtool', keyring, @@ -1415,13 +1409,13 @@ def create_keyrings(): owner=ceph_user(), group=ceph_user(), perms=0o400) else: - # NOTE(jamespage): Later ceph releases require explicit + # NOTE(jamespage): Later Ceph releases require explicit # call to ceph-create-keys to setup the # admin keys for the cluster; this command # will wait for quorum in the cluster before # returning. # NOTE(fnordahl): Explicitly run `ceph-create-keys` for older - # ceph releases too. This improves bootstrap + # Ceph releases too. This improves bootstrap # resilience as the charm will wait for # presence of peer units before attempting # to bootstrap. Note that charms deploying @@ -1503,9 +1497,9 @@ def find_least_used_utility_device(utility_devices, lvs=False): def get_devices(name): - """ Merge config and juju storage based devices + """Merge config and Juju storage based devices - :name: THe name of the device type, eg: wal, osd, journal + :name: The name of the device type, e.g.: wal, osd, journal :returns: Set(device names), which are strings """ if config(name): @@ -1520,11 +1514,11 @@ def get_devices(name): def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, - bluestore=False, key_manager=CEPH_KEY_MANAGER): + bluestore=False, key_manager=CEPH_KEY_MANAGER, osd_id=None): if dev.startswith('/dev'): osdize_dev(dev, osd_format, osd_journal, ignore_errors, encrypt, - bluestore, key_manager) + bluestore, key_manager, osd_id) else: if cmp_pkgrevno('ceph', '14.0.0') >= 0: log("Directory backed OSDs can not be created on Nautilus", @@ -1534,7 +1528,8 @@ def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, - encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER): + encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER, + osd_id=None): """ Prepare a block device for use as a Ceph OSD @@ -1547,7 +1542,7 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, :param: ignore_errors: Don't fail in the event of any errors during processing :param: encrypt: Encrypt block devices using 'key_manager' - :param: bluestore: Use bluestore native ceph block device format + :param: bluestore: Use bluestore native Ceph block device format :param: key_manager: Key management approach for encryption keys :raises subprocess.CalledProcessError: in the event that any supporting subprocess operation failed @@ -1599,7 +1594,8 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, osd_journal, encrypt, bluestore, - key_manager) + key_manager, + osd_id) else: cmd = _ceph_disk(dev, osd_format, @@ -1683,7 +1679,7 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, - key_manager=CEPH_KEY_MANAGER): + key_manager=CEPH_KEY_MANAGER, osd_id=None): """ Prepare and activate a device for usage as a Ceph OSD using ceph-volume. @@ -1695,6 +1691,7 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, :param: encrypt: Use block device encryption :param: bluestore: Use bluestore storage for OSD :param: key_manager: dm-crypt Key Manager to use + :param: osd_id: The OSD-id to recycle, or None to create a new one :raises subprocess.CalledProcessError: in the event that any supporting LVM operation failed. :returns: list. 'ceph-volume' command and required parameters for @@ -1716,6 +1713,9 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, if encrypt and key_manager == CEPH_KEY_MANAGER: cmd.append('--dmcrypt') + if osd_id is not None: + cmd.extend(['--osd-id', str(osd_id)]) + # On-disk journal volume creation if not osd_journal and not bluestore: journal_lv_type = 'journal' @@ -1840,7 +1840,7 @@ def get_conf(variable): Get the value of the given configuration variable from the cluster. - :param variable: ceph configuration variable + :param variable: Ceph configuration variable :returns: str. configured value for provided variable """ @@ -1860,7 +1860,7 @@ def calculate_volume_size(lv_type): :raises KeyError: if invalid lv_type is supplied :returns: int. Configured size in megabytes for volume type """ - # lv_type -> ceph configuration option + # lv_type -> Ceph configuration option _config_map = { 'db': 'bluestore_block_db_size', 'wal': 'bluestore_block_wal_size', @@ -1874,7 +1874,7 @@ def calculate_volume_size(lv_type): 'journal': 1024, } - # conversion of ceph config units to MB + # conversion of Ceph config units to MB _units = { 'db': 1048576, # Bytes -> MB 'wal': 1048576, # Bytes -> MB @@ -1907,7 +1907,7 @@ def _luks_uuid(dev): def _initialize_disk(dev, dev_uuid, encrypt=False, key_manager=CEPH_KEY_MANAGER): """ - Initialize a raw block device consuming 100% of the avaliable + Initialize a raw block device consuming 100% of the available disk space. Function assumes that block device has already been wiped. @@ -2004,7 +2004,7 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid, def osdize_dir(path, encrypt=False, bluestore=False): - """Ask ceph-disk to prepare a directory to become an osd. + """Ask ceph-disk to prepare a directory to become an OSD. :param path: str. The directory to osdize :param encrypt: bool. Should the OSD directory be encrypted at rest @@ -2074,11 +2074,11 @@ def get_running_osds(): def get_cephfs(service): """List the Ceph Filesystems that exist. - :param service: The service name to run the ceph command under - :returns: list. Returns a list of the ceph filesystems + :param service: The service name to run the Ceph command under + :returns: list. Returns a list of the Ceph filesystems """ if get_version() < 0.86: - # This command wasn't introduced until 0.86 ceph + # This command wasn't introduced until 0.86 Ceph return [] try: output = str(subprocess @@ -2157,7 +2157,7 @@ def roll_monitor_cluster(new_version, upgrade_key): sys.exit(1) log('monitor_list: {}'.format(monitor_list)) - # A sorted list of osd unit names + # A sorted list of OSD unit names mon_sorted_list = sorted(monitor_list) # Install packages immediately but defer restarts to when it's our time. @@ -2192,6 +2192,20 @@ def roll_monitor_cluster(new_version, upgrade_key): wait_for_all_monitors_to_upgrade(new_version=new_version, upgrade_key=upgrade_key) bootstrap_manager() + + # NOTE(jmcvaughn): + # Nautilus and later binaries use msgr2 by default, but existing + # clusters that have been upgraded from pre-Nautilus will not + # automatically have msgr2 enabled. Without this, Ceph will show + # a warning only (with no impact to operations), but newly added units + # will not be able to join the cluster. Therefore, we ensure it is + # enabled on upgrade for all versions including and after Nautilus + # (to cater for previous charm versions that will not have done this). + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0 + if nautilus_or_later: + wait_for_all_monitors_to_upgrade(new_version=new_version, + upgrade_key=upgrade_key) + enable_msgr2() except ValueError: log("Failed to find {} in list {}.".format( my_name, mon_sorted_list)) @@ -2204,7 +2218,7 @@ def noop(): def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): - """Upgrade the current ceph monitor to the new version + """Upgrade the current Ceph monitor to the new version :param new_version: String version to upgrade to. """ @@ -2212,18 +2226,19 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): kick_function = noop current_version = get_version() status_set("maintenance", "Upgrading monitor") - log("Current ceph version is {}".format(current_version)) + log("Current Ceph version is {}".format(current_version)) log("Upgrading to: {}".format(new_version)) # Needed to determine if whether to stop/start ceph-mgr luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0 - + # Needed to differentiate between systemd unit names + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0 kick_function() try: add_source(config('source'), config('key')) apt_update(fatal=True) except subprocess.CalledProcessError as err: - log("Adding the ceph source failed with message: {}".format( + log("Adding the Ceph source failed with message: {}".format( err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2246,7 +2261,11 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): try: if systemd(): - service_stop('ceph-mon') + if nautilus_or_later: + systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) + else: + systemd_unit = 'ceph-mon' + service_stop(systemd_unit) log("restarting ceph-mgr.target maybe: {}" .format(luminous_or_later)) if luminous_or_later: @@ -2277,7 +2296,11 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): perms=0o755) if systemd(): - service_restart('ceph-mon') + if nautilus_or_later: + systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) + else: + systemd_unit = 'ceph-mon' + service_restart(systemd_unit) log("starting ceph-mgr.target maybe: {}".format(luminous_or_later)) if luminous_or_later: # due to BUG: #1849874 we have to force a restart to get it to @@ -2294,7 +2317,7 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): def lock_and_roll(upgrade_key, service, my_name, version): - """Create a lock on the ceph monitor cluster and upgrade. + """Create a lock on the Ceph monitor cluster and upgrade. :param upgrade_key: str. The cephx key to use :param service: str. The cephx id to use @@ -2443,7 +2466,7 @@ class WatchDog(object): allow for other delays. There is a compatibility mode where if the otherside never kicks, then it - simply waits for the compatability timer. + simply waits for the compatibility timer. """ class WatchDogDeadException(Exception): @@ -2578,11 +2601,11 @@ def timed_wait(kicked_at_function, def get_upgrade_position(osd_sorted_list, match_name): - """Return the upgrade position for the given osd. + """Return the upgrade position for the given OSD. - :param osd_sorted_list: Osds sorted + :param osd_sorted_list: OSDs sorted :type osd_sorted_list: [str] - :param match_name: The osd name to match + :param match_name: The OSD name to match :type match_name: str :returns: The position of the name :rtype: int @@ -2591,20 +2614,20 @@ def get_upgrade_position(osd_sorted_list, match_name): for index, item in enumerate(osd_sorted_list): if item.name == match_name: return index - raise ValueError("osd name '{}' not found in get_upgrade_position list" + raise ValueError("OSD name '{}' not found in get_upgrade_position list" .format(match_name)) # Edge cases: # 1. Previous node dies on upgrade, can we retry? -# 2. This assumes that the osd failure domain is not set to osd. +# 2. This assumes that the OSD failure domain is not set to OSD. # It rolls an entire server at a time. def roll_osd_cluster(new_version, upgrade_key): """This is tricky to get right so here's what we're going to do. There's 2 possible cases: Either I'm first in line or not. If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous osd is upgraded yet. + and test to see if the previous OSD is upgraded yet. TODO: If you're not in the same failure domain it's safe to upgrade 1. Examine all pools and adopt the most strict failure domain policy @@ -2620,7 +2643,7 @@ def roll_osd_cluster(new_version, upgrade_key): log('roll_osd_cluster called with {}'.format(new_version)) my_name = socket.gethostname() osd_tree = get_osd_tree(service=upgrade_key) - # A sorted list of osd unit names + # A sorted list of OSD unit names osd_sorted_list = sorted(osd_tree) log("osd_sorted_list: {}".format(osd_sorted_list)) @@ -2655,7 +2678,7 @@ def roll_osd_cluster(new_version, upgrade_key): def upgrade_osd(new_version, kick_function=None): - """Upgrades the current osd + """Upgrades the current OSD :param new_version: str. The new version to upgrade to """ @@ -2663,15 +2686,15 @@ def upgrade_osd(new_version, kick_function=None): kick_function = noop current_version = get_version() - status_set("maintenance", "Upgrading osd") - log("Current ceph version is {}".format(current_version)) + status_set("maintenance", "Upgrading OSD") + log("Current Ceph version is {}".format(current_version)) log("Upgrading to: {}".format(new_version)) try: add_source(config('source'), config('key')) apt_update(fatal=True) except subprocess.CalledProcessError as err: - log("Adding the ceph sources failed with message: {}".format( + log("Adding the Ceph sources failed with message: {}".format( err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2685,7 +2708,7 @@ def upgrade_osd(new_version, kick_function=None): kick_function() # If the upgrade does not need an ownership update of any of the - # directories in the osd service directory, then simply restart + # directories in the OSD service directory, then simply restart # all of the OSDs at the same time as this will be the fastest # way to update the code on the node. if not dirs_need_ownership_update('osd'): @@ -2700,7 +2723,7 @@ def upgrade_osd(new_version, kick_function=None): # Need to change the ownership of all directories which are not OSD # directories as well. # TODO - this should probably be moved to the general upgrade function - # and done before mon/osd. + # and done before mon/OSD. update_owner(CEPH_BASE_DIR, recurse_dirs=False) non_osd_dirs = filter(lambda x: not x == 'osd', os.listdir(CEPH_BASE_DIR)) @@ -2721,12 +2744,12 @@ def upgrade_osd(new_version, kick_function=None): _upgrade_single_osd(osd_num, osd_dir) except ValueError as ex: # Directory could not be parsed - junk directory? - log('Could not parse osd directory %s: %s' % (osd_dir, ex), + log('Could not parse OSD directory %s: %s' % (osd_dir, ex), WARNING) continue except (subprocess.CalledProcessError, IOError) as err: - log("Stopping ceph and upgrading packages failed " + log("Stopping Ceph and upgrading packages failed " "with message: {}".format(err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2753,7 +2776,7 @@ def _upgrade_single_osd(osd_num, osd_dir): def stop_osd(osd_num): """Stops the specified OSD number. - :param osd_num: the osd number to stop + :param osd_num: the OSD number to stop """ if systemd(): service_stop('ceph-osd@{}'.format(osd_num)) @@ -2764,7 +2787,7 @@ def stop_osd(osd_num): def start_osd(osd_num): """Starts the specified OSD number. - :param osd_num: the osd number to start. + :param osd_num: the OSD number to start. """ if systemd(): service_start('ceph-osd@{}'.format(osd_num)) @@ -2775,12 +2798,12 @@ def start_osd(osd_num): def disable_osd(osd_num): """Disables the specified OSD number. - Ensures that the specified osd will not be automatically started at the + Ensures that the specified OSD will not be automatically started at the next reboot of the system. Due to differences between init systems, - this method cannot make any guarantees that the specified osd cannot be + this method cannot make any guarantees that the specified OSD cannot be started manually. - :param osd_num: the osd id which should be disabled. + :param osd_num: the OSD id which should be disabled. :raises CalledProcessError: if an error occurs invoking the systemd cmd to disable the OSD :raises IOError, OSError: if the attempt to read/remove the ready file in @@ -2820,7 +2843,7 @@ def enable_osd(osd_num): :param osd_num: the osd id which should be enabled. :raises CalledProcessError: if the call to the systemd command issued fails when enabling the service - :raises IOError: if the attempt to write the ready file in an usptart + :raises IOError: if the attempt to write the ready file in an upstart enabled system fails """ if systemd(): @@ -2828,7 +2851,7 @@ def enable_osd(osd_num): subprocess.check_call(cmd) else: # When running on upstart, the OSDs are started via the ceph-osd-all - # upstart script which will only start the osd if it has a 'ready' + # upstart script which will only start the OSD if it has a 'ready' # file. Make sure that file exists. ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), 'ready') @@ -2881,7 +2904,7 @@ def get_osd_state(osd_num, osd_goal_state=None): If osd_goal_state is not None, loop until the current OSD state matches the OSD goal state. - :param osd_num: the osd id to get state for + :param osd_num: the OSD id to get state for :param osd_goal_state: (Optional) string indicating state to wait for Defaults to None :returns: Returns a str, the OSD state. @@ -2942,7 +2965,7 @@ def maintain_osd_state(osd_num): Ensures the state of an OSD is the same at the end of a block nested in a with statement as it was at the beginning of the block. - :param osd_num: the osd id to maintain state for + :param osd_num: the OSD id to maintain state for """ osd_state = get_osd_state(osd_num) try: @@ -2969,9 +2992,9 @@ def maintain_all_osd_states(): def list_pools(client='admin'): """This will list the current pools that Ceph has - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Returns a list of available pools. :rtype: list :raises: subprocess.CalledProcessError if the subprocess fails to run. @@ -2996,9 +3019,9 @@ def get_pool_param(pool, param, client='admin'): :type pool: str :param param: Name of variable to get :type param: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Value of variable on pool or None :rtype: str or None :raises: subprocess.CalledProcessError @@ -3020,9 +3043,9 @@ def get_pool_erasure_profile(pool, client='admin'): :param pool: Name of pool to get variable from :type pool: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Erasure code profile of pool or None :rtype: str or None :raises: subprocess.CalledProcessError @@ -3041,9 +3064,9 @@ def get_pool_quota(pool, client='admin'): :param pool: Name of pool to get variable from :type pool: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Dictionary with quota variables :rtype: dict :raises: subprocess.CalledProcessError @@ -3066,9 +3089,9 @@ def get_pool_applications(pool='', client='admin'): :param pool: (Optional) Name of pool to get applications for Defaults to get for all pools :type pool: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Dictionary with pool name as key :rtype: dict :raises: subprocess.CalledProcessError @@ -3131,7 +3154,7 @@ def dirs_need_ownership_update(service): necessary due to the upgrade from Hammer to Jewel where the daemon user changes from root: to ceph:. - :param service: the name of the service folder to check (e.g. osd, mon) + :param service: the name of the service folder to check (e.g. OSD, mon) :returns: boolean. True if the directories need a change of ownership, False otherwise. :raises IOError: if an error occurs reading the file stats from one of @@ -3161,7 +3184,7 @@ def dirs_need_ownership_update(service): return False -# A dict of valid ceph upgrade paths. Mapping is old -> new +# A dict of valid Ceph upgrade paths. Mapping is old -> new UPGRADE_PATHS = collections.OrderedDict([ ('firefly', 'hammer'), ('hammer', 'jewel'), @@ -3173,7 +3196,7 @@ def dirs_need_ownership_update(service): ('pacific', 'quincy'), ]) -# Map UCA codenames to ceph codenames +# Map UCA codenames to Ceph codenames UCA_CODENAME_MAP = { 'icehouse': 'firefly', 'juno': 'firefly', @@ -3196,24 +3219,24 @@ def dirs_need_ownership_update(service): def pretty_print_upgrade_paths(): - """Pretty print supported upgrade paths for ceph""" + """Pretty print supported upgrade paths for Ceph""" return ["{} -> {}".format(key, value) for key, value in UPGRADE_PATHS.items()] def resolve_ceph_version(source): - """Resolves a version of ceph based on source configuration + """Resolves a version of Ceph based on source configuration based on Ubuntu Cloud Archive pockets. @param: source: source configuration option of charm - :returns: ceph release codename or None if not resolvable + :returns: Ceph release codename or None if not resolvable """ os_release = get_os_codename_install_source(source) return UCA_CODENAME_MAP.get(os_release) def get_ceph_pg_stat(): - """Returns the result of ceph pg stat. + """Returns the result of 'ceph pg stat'. :returns: dict """ @@ -3248,7 +3271,7 @@ def get_ceph_health(): .decode('UTF-8')) try: json_tree = json.loads(tree) - # Make sure children are present in the json + # Make sure children are present in the JSON if not json_tree['overall_status']: return None @@ -3265,7 +3288,7 @@ def get_ceph_health(): def reweight_osd(osd_num, new_weight): """Changes the crush weight of an OSD to the value specified. - :param osd_num: the osd id which should be changed + :param osd_num: the OSD id which should be changed :param new_weight: the new weight for the OSD :returns: bool. True if output looks right, else false. :raises CalledProcessError: if an error occurs invoking the systemd cmd @@ -3292,7 +3315,7 @@ def reweight_osd(osd_num, new_weight): def determine_packages(): """Determines packages for installation. - :returns: list of ceph packages + :returns: list of Ceph packages """ packages = PACKAGES.copy() if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': @@ -3338,6 +3361,16 @@ def bootstrap_manager(): service_restart(unit) +def enable_msgr2(): + """ + Enables msgr2 + + :raises: subprocess.CalledProcessError if the command fails + """ + cmd = ['ceph', 'mon', 'enable-msgr2'] + subprocess.check_call(cmd) + + def osd_noout(enable): """Sets or unsets 'noout' @@ -3361,12 +3394,12 @@ def osd_noout(enable): class OSDConfigSetError(Exception): - """Error occured applying OSD settings.""" + """Error occurred applying OSD settings.""" pass def apply_osd_settings(settings): - """Applies the provided osd settings + """Applies the provided OSD settings Apply the provided settings to all local OSD unless settings are already present. Settings stop being applied on encountering an error. @@ -3391,7 +3424,7 @@ def _get_cli_key(key): out = json.loads( subprocess.check_output(cmd.split()).decode('UTF-8')) if 'error' in out: - log("Error retrieving osd setting: {}".format(out['error']), + log("Error retrieving OSD setting: {}".format(out['error']), level=ERROR) return False current_settings[key] = out[cli_key] @@ -3408,7 +3441,7 @@ def _get_cli_key(key): out = json.loads( subprocess.check_output(cmd.split()).decode('UTF-8')) if 'error' in out: - log("Error applying osd setting: {}".format(out['error']), + log("Error applying OSD setting: {}".format(out['error']), level=ERROR) raise OSDConfigSetError return True @@ -3478,7 +3511,7 @@ def mgr_disable_module(module): def ceph_config_set(name, value, who): - """Set a ceph config option + """Set a Ceph config option :param name: key to set :type name: str @@ -3496,7 +3529,7 @@ def ceph_config_set(name, value, who): def ceph_config_get(name, who): - """Retrieve the value of a ceph config option + """Retrieve the value of a Ceph config option :param name: key to lookup :type name: str diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 34e02da9..3ffef3fd 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -11,7 +11,7 @@ tags: - misc series: - focal -- impish +- jammy extra-bindings: public: cluster: diff --git a/ceph-proxy/tests/bundles/impish-xena-ec.yaml b/ceph-proxy/tests/bundles/impish-xena-ec.yaml deleted file mode 100644 index ff4096f6..00000000 --- a/ceph-proxy/tests/bundles/impish-xena-ec.yaml +++ /dev/null @@ -1,228 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: impish - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': - -applications: - - cinder-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - channel: quincy/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - '16' - - '17' - - '18' - channel: quincy/edge - - ceph-proxy: - charm: ../../ceph-proxy.charm - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: ch:ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '10' - channel: quincy/edge - - cinder: - charm: ch:cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - channel: yoga/edge - - cinder-ceph: - charm: ch:cinder-ceph - options: - restrict-ceph-pools: True - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: lrc - ec-profile-locality: 3 - channel: yoga/edge - - keystone: - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - channel: yoga/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - constraints: mem=1024 - options: - source: *openstack-origin - to: - - '13' - channel: latest/edge - - glance: - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: jerasure - to: - - '14' - channel: yoga/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: isa - libvirt-image-backend: rbd - to: - - '15' - channel: yoga/edge - - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:ceph' - - 'ceph-proxy:client' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:ceph' - - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/impish-xena.yaml b/ceph-proxy/tests/bundles/impish-xena.yaml deleted file mode 100644 index 0710d61b..00000000 --- a/ceph-proxy/tests/bundles/impish-xena.yaml +++ /dev/null @@ -1,199 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: impish - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - -applications: - - cinder-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - channel: quincy/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - channel: quincy/edge - - ceph-proxy: - charm: ../../ceph-proxy.charm - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: ch:ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - to: - - '10' - channel: quincy/edge - - cinder: - charm: ch:cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - channel: yoga/edge - - cinder-ceph: - charm: ch:cinder-ceph - options: - restrict-ceph-pools: True - channel: yoga/edge - - keystone: - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - channel: yoga/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - constraints: mem=1024 - options: - source: *openstack-origin - to: - - '13' - channel: latest/edge - - glance: - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - channel: yoga/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: yoga/edge - - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 691fcc44..fba0805b 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -14,8 +14,6 @@ tests: gate_bundles: - focal-xena - erasure-coded: focal-xena-ec - - impish-xena - - erasure-coded: impish-xena-ec dev_bundles: - focal-yoga @@ -48,7 +46,5 @@ target_deploy_status: tests_options: force_deploy: - - impish-xena - - impish-xena-ec - jammy-yoga - jammy-yoga-ec From 52d8976436d0da2b08f636dd9a26f234005ea366 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Tue, 5 Apr 2022 11:52:25 -0700 Subject: [PATCH 2373/2699] Updates for jammy enablement - charmcraft: build-on 20.04 -> run-on 20.04/22.04 [*archs] - Refresh tox targets - Drop impish bundles and OSCI testing - Add jammy metadata - Default source is yoga - Resync charmhelpers and charms.ceph Change-Id: Ib62d7f882f22146419dfe920045b73452f9af2cb --- ceph-mon/charmcraft.yaml | 17 +- ceph-mon/config.yaml | 2 +- .../charmhelpers/contrib/openstack/context.py | 18 +- .../files/check_deferred_restarts.py | 128 +++++++++ ceph-mon/hooks/charmhelpers/core/host.py | 29 +- ceph-mon/metadata.yaml | 2 +- ceph-mon/osci.yaml | 2 +- ceph-mon/test-requirements.txt | 7 + ceph-mon/tests/bundles/impish-xena.yaml | 252 ------------------ ceph-mon/tests/tests.yaml | 6 +- ceph-mon/tox.ini | 10 +- 11 files changed, 197 insertions(+), 276 deletions(-) create mode 100755 ceph-mon/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py delete mode 100644 ceph-mon/tests/bundles/impish-xena.yaml diff --git a/ceph-mon/charmcraft.yaml b/ceph-mon/charmcraft.yaml index ba84f314..b3a85236 100644 --- a/ceph-mon/charmcraft.yaml +++ b/ceph-mon/charmcraft.yaml @@ -21,7 +21,16 @@ parts: - README.md bases: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 + - build-on: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 + run-on: + - name: ubuntu + channel: "20.04" + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.04" + architectures: [amd64, s390x, ppc64el, arm64] + diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 9b910b35..c2509362 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -10,7 +10,7 @@ options: If set to True, supporting services will log to syslog. source: type: string - default: + default: yoga description: | Optional configuration to support use of additional sources such as: . diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py index 8522641b..32c69ff7 100644 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py @@ -118,12 +118,7 @@ ) from charmhelpers.core.unitdata import kv -try: - from sriov_netplan_shim import pci -except ImportError: - # The use of the function and contexts that require the pci module is - # optional. - pass +from charmhelpers.contrib.hardware import pci try: import psutil @@ -426,6 +421,9 @@ def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): ('password', ctxt.get('admin_password', '')), ('signing_dir', ctxt.get('signing_dir', '')),)) + if ctxt.get('service_type'): + c.update((('service_type', ctxt.get('service_type')),)) + return c def __call__(self): @@ -468,6 +466,9 @@ def __call__(self): 'internal_protocol': int_protocol, 'api_version': api_version}) + if rdata.get('service_type'): + ctxt['service_type'] = rdata.get('service_type') + if float(api_version) > 2: ctxt.update({ 'admin_domain_name': rdata.get('service_domain'), @@ -539,6 +540,9 @@ def __call__(self): 'api_version': api_version }) + if rdata.get('service_type'): + ctxt['service_type'] = rdata.get('service_type') + if float(api_version) > 2: ctxt.update({'admin_domain_name': rdata.get('domain')}) @@ -3120,7 +3124,7 @@ def _determine_numvfs(self, device, sriov_numvfs): """Determine number of Virtual Functions (VFs) configured for device. :param device: Object describing a PCI Network interface card (NIC)/ - :type device: sriov_netplan_shim.pci.PCINetDevice + :type device: contrib.hardware.pci.PCINetDevice :param sriov_numvfs: Number of VFs requested for blanket configuration. :type sriov_numvfs: int :returns: Number of VFs to configure for device diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py new file mode 100755 index 00000000..5f392b3c --- /dev/null +++ b/ceph-mon/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py @@ -0,0 +1,128 @@ +#!/usr/bin/python3 + +# Copyright 2014-2022 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Checks for services with deferred restarts. + +This Nagios check will parse /var/lib/policy-rd.d/ +to find any restarts that are currently deferred. +""" + +import argparse +import glob +import sys +import yaml + + +DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d' + + +def get_deferred_events(): + """Return a list of deferred events dicts from policy-rc.d files. + + Events are read from DEFERRED_EVENTS_DIR and are of the form: + { + action: restart, + policy_requestor_name: rabbitmq-server, + policy_requestor_type: charm, + reason: 'Pkg update', + service: rabbitmq-server, + time: 1614328743 + } + + :raises OSError: Raised in case of a system error while reading a policy file + :raises yaml.YAMLError: Raised if parsing a policy file fails + + :returns: List of deferred event dictionaries + :rtype: list + """ + deferred_events_files = glob.glob( + '{}/*.deferred'.format(DEFERRED_EVENTS_DIR)) + + deferred_events = [] + for event_file in deferred_events_files: + with open(event_file, 'r') as f: + event = yaml.safe_load(f) + deferred_events.append(event) + + return deferred_events + + +def get_deferred_restart_services(application=None): + """Returns a list of services with deferred restarts. + + :param str application: Name of the application that blocked the service restart. + If application is None, all services with deferred restarts + are returned. Services which are blocked by a non-charm + requestor are always returned. + + :raises OSError: Raised in case of a system error while reading a policy file + :raises yaml.YAMLError: Raised if parsing a policy file fails + + :returns: List of services with deferred restarts belonging to application. + :rtype: list + """ + + deferred_restart_events = filter( + lambda e: e['action'] == 'restart', get_deferred_events()) + + deferred_restart_services = set() + for restart_event in deferred_restart_events: + if application: + if ( + restart_event['policy_requestor_type'] != 'charm' or + restart_event['policy_requestor_type'] == 'charm' and + restart_event['policy_requestor_name'] == application + ): + deferred_restart_services.add(restart_event['service']) + else: + deferred_restart_services.add(restart_event['service']) + + return list(deferred_restart_services) + + +def main(): + """Check for services with deferred restarts.""" + parser = argparse.ArgumentParser( + description='Check for services with deferred restarts') + parser.add_argument( + '--application', help='Check services belonging to this application only') + + args = parser.parse_args() + + services = set(get_deferred_restart_services(args.application)) + + if len(services) == 0: + print('OK: No deferred service restarts.') + sys.exit(0) + else: + print( + 'CRITICAL: Restarts are deferred for services: {}.'.format(', '.join(services))) + sys.exit(1) + + +if __name__ == '__main__': + try: + main() + except OSError as e: + print('CRITICAL: A system error occurred: {} ({})'.format(e.errno, e.strerror)) + sys.exit(1) + except yaml.YAMLError as e: + print('CRITICAL: Failed to parse a policy file: {}'.format(str(e))) + sys.exit(1) + except Exception as e: + print('CRITICAL: An unknown error occurred: {}'.format(str(e))) + sys.exit(1) diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py index 2b0a36fb..ad2cab46 100644 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ b/ceph-mon/hooks/charmhelpers/core/host.py @@ -114,6 +114,33 @@ def service_stop(service_name, **kwargs): return service('stop', service_name, **kwargs) +def service_enable(service_name, **kwargs): + """Enable a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_enable('ceph-osd', id=4) + + :param service_name: the name of the service to enable + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + return service('enable', service_name, **kwargs) + + def service_restart(service_name, **kwargs): """Restart a system service. @@ -134,7 +161,7 @@ def service_restart(service_name, **kwargs): :param service_name: the name of the service to restart :param **kwargs: additional parameters to pass to the init system when managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs + parameters to the init system's commandline. kwargs are ignored for init systems not allowing additional parameters via the commandline (systemd). """ diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 1b7039b6..cbc8e4d6 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -11,7 +11,7 @@ tags: - misc series: - focal -- impish +- jammy peers: mon: interface: ceph diff --git a/ceph-mon/osci.yaml b/ceph-mon/osci.yaml index 2234cbeb..f2ffe001 100644 --- a/ceph-mon/osci.yaml +++ b/ceph-mon/osci.yaml @@ -1,7 +1,7 @@ - project: templates: - charm-unit-jobs-py38 - - charm-unit-jobs-py39 + - charm-unit-jobs-py310 - charm-xena-functional-jobs - charm-yoga-functional-jobs vars: diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 0aabe171..e9401604 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -11,6 +11,13 @@ pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 +# NOTE: newer versions of cryptography require a Rust compiler to build, +# see +# * https://github.com/openstack-charmers/zaza/issues/421 +# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html +# +cryptography<3.4 + requests>=2.18.4 stestr>=2.2.0 diff --git a/ceph-mon/tests/bundles/impish-xena.yaml b/ceph-mon/tests/bundles/impish-xena.yaml deleted file mode 100644 index 08c59e30..00000000 --- a/ceph-mon/tests/bundles/impish-xena.yaml +++ /dev/null @@ -1,252 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: impish - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - series: focal - - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - glance-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - placement-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: 8.0.19/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - source: *openstack-origin - osd-devices: '/dev/test-non-existent' - to: - - '3' - - '4' - - '5' - channel: quincy/edge - - ceph-mon: - charm: ../../ceph-mon.charm - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - to: - - '6' - - '7' - - '8' - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - channel: 3.9/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - channel: yoga/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - libvirt-image-backend: rbd - to: - - '11' - channel: yoga/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - channel: yoga/edge - - cinder: - expose: True - charm: ch:cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: *openstack-origin - to: - - '13' - channel: yoga/edge - - cinder-ceph: - charm: ch:cinder-ceph - channel: yoga/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - channel: yoga/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: yoga/edge - - prometheus2: -# Pin prometheus2 charm version Bug #1891942 - charm: cs:prometheus2-18 - num_units: 1 - series: focal - to: - - '16' - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - nova-compute:ceph-access - - cinder-ceph:ceph-access - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'ceph-mon:prometheus' - - 'prometheus2:target' diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 453bcaad..9246aa2d 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -2,14 +2,13 @@ charm_name: ceph-mon gate_bundles: - focal-xena - - impish-xena + - focal-yoga dev_bundles: - - focal-yoga - jammy-yoga smoke_bundles: - - focal-xena + - focal-yoga configure: - zaza.openstack.charm_tests.glance.setup.add_lts_image @@ -24,5 +23,4 @@ tests: tests_options: force_deploy: - - impish-xena - jammy-yoga diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 81fd2492..f4e8a47c 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -51,11 +51,6 @@ commands = charmcraft -v build {toxinidir}/rename.sh -[testenv:py35] -basepython = python3.5 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - [testenv:py36] basepython = python3.6 deps = -r{toxinidir}/requirements.txt @@ -76,6 +71,11 @@ basepython = python3.9 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py310] +basepython = python3.10 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt From 2bc52b647b12e7af3e12af4d8bc4898130282520 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Mon, 28 Mar 2022 15:15:49 -0300 Subject: [PATCH 2374/2699] Add SAML support to ceph-dashboard This patchset adds support to setup authentication via the SAML protocol for the ceph-dashboard. Change-Id: I96c0d856d173a76739a6d2a9d4ad4811d3d196c3 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/741 --- ceph-dashboard/config.yaml | 20 +++ ceph-dashboard/metadata.yaml | 1 + ceph-dashboard/src/charm.py | 39 +++++- ceph-dashboard/tests/bundles/focal-yoga.yaml | 122 ++++++++++++++++++ .../unit_tests/test_ceph_dashboard_charm.py | 28 ++++ 5 files changed, 209 insertions(+), 1 deletion(-) create mode 100644 ceph-dashboard/tests/bundles/focal-yoga.yaml diff --git a/ceph-dashboard/config.yaml b/ceph-dashboard/config.yaml index 8fcbd1f8..1460d023 100644 --- a/ceph-dashboard/config.yaml +++ b/ceph-dashboard/config.yaml @@ -83,6 +83,26 @@ options: default: "" description: | Message of the day settings. Should be in the format "severity|expires|message". Set to "" to disable. + saml-base-url: + type: string + default: "" + description: | + The base URL from where the Ceph dashboard is accessed. Must support the SAML protocol. + saml-idp-metadata: + type: string + default: "" + description: | + URL that points to the IdP metadata XML. Can be remote or local. + saml-username-attribute: + type: string + default: "" + description: | + The attribute that is used to get the username from the authentication response. + saml-idp-entity-id: + type: string + default: "uid" + description: | + Unique ID to disambiguate when more than one entity id exists on the IdP metadata. ssl_cert: type: string default: diff --git a/ceph-dashboard/metadata.yaml b/ceph-dashboard/metadata.yaml index ea47f3e2..a35e9cea 100644 --- a/ceph-dashboard/metadata.yaml +++ b/ceph-dashboard/metadata.yaml @@ -18,6 +18,7 @@ series: - focal - groovy - hirsute +- jammy requires: dashboard: interface: ceph-dashboard diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index 650dcc23..a3d3a2f7 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -14,6 +14,7 @@ from ops.main import main from ops.model import ActiveStatus, BlockedStatus, StatusBase from ops.charm import ActionEvent +from ops_openstack.core import charm_class, get_charm_class_for_release from typing import List, Union, Tuple import base64 @@ -425,6 +426,7 @@ def _configure_dashboard(self, _) -> None: ceph_utils.mgr_enable_dashboard() self._apply_ceph_config_from_charm_config() self._configure_tls() + self._configure_saml() ceph_utils.mgr_config_set( 'mgr/dashboard/{hostname}/server_addr'.format( hostname=socket.gethostname()), @@ -568,6 +570,26 @@ def _configure_tls(self) -> None: self.TLS_KEY_PATH) self.kick_dashboard() + def _configure_saml(self) -> None: + if 'python3-onelogin-saml2' not in self.PACKAGES: + return + + base_url = self.config.get('saml-base-url') + idp_metadata = self.config.get('saml-idp-metadata') + if not base_url or not idp_metadata: + return + + cmd = ['ceph', 'dashboard', 'sso', 'setup', 'saml2', + base_url, idp_metadata] + username_attr = self.config.get('saml-username-attribute') + if username_attr: + cmd.append(username_attr) + idp_entity_id = self.config.get('saml-idp-entity-id') + if idp_entity_id: + cmd.append(idp_entity_id) + + self._run_cmd(cmd) + def _gen_user_password(self, length: int = 12) -> str: """Generate a password""" alphabet = ( @@ -604,5 +626,20 @@ def _delete_user_action(self, event: ActionEvent) -> None: event.fail(exc.output) +@charm_class +class CephDashboardCharmOctopus(CephDashboardCharm): + + _stored = StoredState() + release = 'octopus' + + +@charm_class +class CephDashboardCharmQuincy(CephDashboardCharm): + + _stored = StoredState() + PACKAGES = ['ceph-mgr-dashboard', 'python3-onelogin-saml2'] + release = 'quincy' + + if __name__ == "__main__": - main(CephDashboardCharm) + main(get_charm_class_for_release()) diff --git a/ceph-dashboard/tests/bundles/focal-yoga.yaml b/ceph-dashboard/tests/bundles/focal-yoga.yaml new file mode 100644 index 00000000..d0b688c7 --- /dev/null +++ b/ceph-dashboard/tests/bundles/focal-yoga.yaml @@ -0,0 +1,122 @@ +local_overlay_enabled: False +series: focal +variables: + openstack-origin: &openstack-origin cloud:focal-yoga +applications: + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + channel: quincy/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + channel: quincy/edge + vault: + num_units: 1 + charm: ch:vault + channel: latest/edge + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + constraints: mem=3072M + num_units: 3 + options: + source: *openstack-origin + channel: latest/edge + vault-mysql-router: + charm: ch:mysql-router + channel: latest/edge + ceph-dashboard: + charm: ../../ceph-dashboard.charm + options: + public-hostname: 'ceph-dashboard.zaza.local' + source: *openstack-origin + prometheus: + charm: cs:prometheus2 + num_units: 1 + grafana: + # SSL and allow_embedding are not released into cs:grafana yet, due + # October 2021 + charm: ch:grafana + num_units: 1 + options: + anonymous: True + install_plugins: https://storage.googleapis.com/plugins-community/vonage-status-panel/release/1.0.11/vonage-status-panel-1.0.11.zip,https://storage.googleapis.com/plugins-community/grafana-piechart-panel/release/1.6.2/grafana-piechart-panel-1.6.2.zip + install_method: snap + allow_embedding: True + telegraf: + charm: telegraf + channel: stable + options: + hostname: "{host}" + prometheus-alertmanager: + charm: cs:prometheus-alertmanager + num_units: 1 + ceph-radosgw: + charm: ch:ceph-radosgw + num_units: 3 + channel: latest/edge + ceph-fs: + charm: ch:ceph-fs + num_units: 1 + channel: latest/edge + ceph-iscsi: + charm: ch:ceph-iscsi + num_units: 2 + options: + gateway-metadata-pool: iscsi-foo-metadata + channel: latest/edge +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + - - 'ceph-dashboard:dashboard' + - 'ceph-mon:dashboard' + - - 'ceph-dashboard:certificates' + - 'vault:certificates' + - - 'ceph-mon:prometheus' + - 'prometheus:target' + - - 'grafana:grafana-source' + - 'prometheus:grafana-source' + - - 'grafana:certificates' + - 'vault:certificates' + - - 'ceph-osd:juju-info' + - 'telegraf:juju-info' + - - 'ceph-mon:juju-info' + - 'telegraf:juju-info' + - - 'telegraf:prometheus-client' + - 'prometheus:target' + - - 'telegraf:dashboards' + - 'grafana:dashboards' + - - 'ceph-dashboard:grafana-dashboard' + - 'grafana:dashboards' + - - 'ceph-dashboard:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-dashboard:prometheus' + - 'prometheus:website' + - - 'prometheus:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + - - 'ceph-radosgw:certificates' + - 'vault:certificates' + - - 'ceph-dashboard:radosgw-dashboard' + - 'ceph-radosgw:radosgw-user' + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-dashboard:iscsi-dashboard' + - 'ceph-iscsi:admin-access' diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py index 5542120e..96c749c4 100644 --- a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -679,3 +679,31 @@ def test__delete_user_action(self): self.subprocess.check_output.assert_called_once_with( ['ceph', 'dashboard', 'ac-user-delete', 'auser'], stderr=self.subprocess.STDOUT) + + def test_saml(self): + self.subprocess.check_output.return_value = b'' + self.harness.begin() + self.harness.charm.PACKAGES.append('python3-onelogin-saml2') + self.harness.charm._configure_saml() + self.subprocess.check_output.assert_not_called() + + base_url = 'https://saml-base' + idp_meta = 'file://idp.xml' + username_attr = 'uid' + entity_id = 'some_id' + + self.harness.update_config( + key_values={ + 'saml-base-url': base_url, + 'saml-idp-metadata': idp_meta, + 'saml-username-attribute': username_attr, + 'saml-idp-entity-id': entity_id, + } + ) + + self.harness.charm._configure_saml() + self.subprocess.check_output.assert_called_with( + ['ceph', 'dashboard', 'sso', 'setup', 'saml2', + base_url, idp_meta, username_attr, entity_id], + stderr=ANY + ) From 3effc6f60cf20a827b688b4025164d88f60c3885 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Tue, 5 Apr 2022 12:41:44 -0700 Subject: [PATCH 2375/2699] Updates for jammy quincy and finalise charmcraft builds - Update charmcraft.yaml for arch builds - Remove impish-xena tests - Update osci.yaml to include py3.10 default job - Resync charmhelpers and charms.ceph Change-Id: I41abace773d8926eaa00076dd83f52849c96150d --- ceph-osd/charm-helpers-hooks.yaml | 1 + ceph-osd/charmcraft.yaml | 14 +- ceph-osd/hooks/charmhelpers/__init__.py | 17 +- ceph-osd/hooks/charmhelpers/cli/__init__.py | 13 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 40 ++- .../charmhelpers/contrib/hahelpers/cluster.py | 15 +- .../contrib/hardening/apache/checks/config.py | 5 +- .../contrib/hardening/audits/apache.py | 8 +- .../contrib/hardening/audits/apt.py | 5 +- .../contrib/hardening/audits/file.py | 3 +- .../charmhelpers/contrib/hardening/harden.py | 13 +- .../contrib/hardening/host/checks/login.py | 4 +- .../contrib/hardening/host/checks/sysctl.py | 7 +- .../contrib/hardening/mysql/checks/config.py | 7 +- .../contrib/hardening/templating.py | 6 +- .../charmhelpers/contrib/hardening/utils.py | 3 +- .../charmhelpers/contrib/hardware/__init__.py | 13 + .../charmhelpers/contrib/hardware/pci.py | 288 ++++++++++++++++++ .../hooks/charmhelpers/contrib/network/ip.py | 23 +- .../charmhelpers/contrib/openstack/context.py | 64 ++-- .../files/check_deferred_restarts.py | 128 ++++++++ .../contrib/openstack/keystone.py | 12 +- .../charmhelpers/contrib/openstack/neutron.py | 10 +- .../charmhelpers/contrib/openstack/policyd.py | 46 +-- .../contrib/openstack/templating.py | 27 +- .../charmhelpers/contrib/openstack/utils.py | 110 ++++--- ceph-osd/hooks/charmhelpers/contrib/python.py | 2 - .../contrib/storage/linux/ceph.py | 99 +++--- .../contrib/storage/linux/loopback.py | 10 +- ceph-osd/hooks/charmhelpers/core/hookenv.py | 81 +++-- ceph-osd/hooks/charmhelpers/core/host.py | 41 ++- .../hooks/charmhelpers/core/services/base.py | 7 +- .../charmhelpers/core/services/helpers.py | 4 +- ceph-osd/hooks/charmhelpers/core/strutils.py | 9 +- .../hooks/charmhelpers/core/templating.py | 11 +- ceph-osd/hooks/charmhelpers/fetch/__init__.py | 10 +- .../hooks/charmhelpers/fetch/archiveurl.py | 29 +- ceph-osd/hooks/charmhelpers/fetch/centos.py | 7 +- .../hooks/charmhelpers/fetch/python/debug.py | 2 - .../charmhelpers/fetch/python/packages.py | 14 +- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 37 +-- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 25 +- ceph-osd/lib/charms_ceph/utils.py | 43 ++- ceph-osd/osci.yaml | 3 +- ceph-osd/tests/bundles/impish-xena.yaml | 237 -------------- ceph-osd/tests/tests.yaml | 4 +- 46 files changed, 853 insertions(+), 704 deletions(-) create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardware/__init__.py create mode 100644 ceph-osd/hooks/charmhelpers/contrib/hardware/pci.py create mode 100755 ceph-osd/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py delete mode 100644 ceph-osd/tests/bundles/impish-xena.yaml diff --git a/ceph-osd/charm-helpers-hooks.yaml b/ceph-osd/charm-helpers-hooks.yaml index ca383631..da52e9c8 100644 --- a/ceph-osd/charm-helpers-hooks.yaml +++ b/ceph-osd/charm-helpers-hooks.yaml @@ -22,5 +22,6 @@ include: - utils - contrib.charmsupport - contrib.hardening|inc=* + - contrib.hardware - contrib.openstack.policyd - contrib.openstack.templates|inc=*/section-ceph-bluestore-compression diff --git a/ceph-osd/charmcraft.yaml b/ceph-osd/charmcraft.yaml index d160fc85..4190b63d 100644 --- a/ceph-osd/charmcraft.yaml +++ b/ceph-osd/charmcraft.yaml @@ -26,18 +26,10 @@ bases: channel: "20.04" architectures: - amd64 - - s390x - - ppc64el - - arm64 - - name: ubuntu - channel: "22.04" - architectures: - - amd64 - - s390x - - ppc64el - - arm64 run-on: - name: ubuntu channel: "20.04" + architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "22.04" \ No newline at end of file + channel: "22.04" + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-osd/hooks/charmhelpers/__init__.py b/ceph-osd/hooks/charmhelpers/__init__.py index 1f57ed2a..ddf30450 100644 --- a/ceph-osd/hooks/charmhelpers/__init__.py +++ b/ceph-osd/hooks/charmhelpers/__init__.py @@ -14,30 +14,15 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. -from __future__ import print_function -from __future__ import absolute_import - import functools import inspect import subprocess -import sys -try: - import six # NOQA:F401 -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # NOQA:F401 try: import yaml # NOQA:F401 except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) import yaml # NOQA:F401 diff --git a/ceph-osd/hooks/charmhelpers/cli/__init__.py b/ceph-osd/hooks/charmhelpers/cli/__init__.py index 74ea7295..2b0c4b7a 100644 --- a/ceph-osd/hooks/charmhelpers/cli/__init__.py +++ b/ceph-osd/hooks/charmhelpers/cli/__init__.py @@ -16,9 +16,6 @@ import argparse import sys -import six -from six.moves import zip - import charmhelpers.core.unitdata @@ -149,10 +146,7 @@ def wrapper(decorated): def run(self): "Run cli, processing arguments and executing subcommands." arguments = self.argument_parser.parse_args() - if six.PY2: - argspec = inspect.getargspec(arguments.func) - else: - argspec = inspect.getfullargspec(arguments.func) + argspec = inspect.getfullargspec(arguments.func) vargs = [] for arg in argspec.args: vargs.append(getattr(arguments, arg)) @@ -177,10 +171,7 @@ def describe_arguments(func): Analyze a function's signature and return a data structure suitable for passing in as arguments to an argparse parser's add_argument() method.""" - if six.PY2: - argspec = inspect.getargspec(func) - else: - argspec = inspect.getfullargspec(func) + argspec = inspect.getfullargspec(func) # we should probably raise an exception somewhere if func includes **kwargs if argspec.defaults: positional_args = argspec.args[:-len(argspec.defaults)] diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 8d1753c3..bad7a533 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -28,6 +28,7 @@ import yaml from charmhelpers.core.hookenv import ( + application_name, config, hook_name, local_unit, @@ -174,7 +175,8 @@ def _locate_cmd(self, check_cmd): if os.path.exists(os.path.join(path, parts[0])): command = os.path.join(path, parts[0]) if len(parts) > 1: - command += " " + " ".join(parts[1:]) + safe_args = [shlex.quote(arg) for arg in parts[1:]] + command += " " + " ".join(safe_args) return command log('Check command not found: {}'.format(parts[0])) return '' @@ -520,3 +522,39 @@ def remove_deprecated_check(nrpe, deprecated_services): for dep_svc in deprecated_services: log('Deprecated service: {}'.format(dep_svc)) nrpe.remove_check(shortname=dep_svc) + + +def add_deferred_restarts_check(nrpe): + """ + Add NRPE check for services with deferred restarts. + + :param NRPE nrpe: NRPE object to add check to + """ + unit_name = local_unit().replace('/', '-') + shortname = unit_name + '_deferred_restarts' + check_cmd = 'check_deferred_restarts.py --application {}'.format( + application_name()) + + log('Adding deferred restarts nrpe check: {}'.format(shortname)) + nrpe.add_check( + shortname=shortname, + description='Check deferred service restarts {}'.format(unit_name), + check_cmd=check_cmd) + + +def remove_deferred_restarts_check(nrpe): + """ + Remove NRPE check for services with deferred service restarts. + + :param NRPE nrpe: NRPE object to remove check from + """ + unit_name = local_unit().replace('/', '-') + shortname = unit_name + '_deferred_restarts' + check_cmd = 'check_deferred_restarts.py --application {}'.format( + application_name()) + + log('Removing deferred restarts nrpe check: {}'.format(shortname)) + nrpe.remove_check( + shortname=shortname, + description='Check deferred service restarts {}'.format(unit_name), + check_cmd=check_cmd) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py index f0b629a2..146beba6 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -32,8 +32,6 @@ from socket import gethostname as get_unit_hostname -import six - from charmhelpers.core.hookenv import ( log, relation_ids, @@ -125,16 +123,16 @@ def is_crm_dc(): """ cmd = ['crm', 'status'] try: - status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - if not isinstance(status, six.text_type): - status = six.text_type(status, "utf-8") + status = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('utf-8') except subprocess.CalledProcessError as ex: raise CRMDCNotFound(str(ex)) current_dc = '' for line in status.split('\n'): if line.startswith('Current DC'): - # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum + # Current DC: juju-lytrusty-machine-2 (168108163) + # - partition with quorum current_dc = line.split(':')[1].split()[0] if current_dc == get_unit_hostname(): return True @@ -158,9 +156,8 @@ def is_crm_leader(resource, retry=False): return is_crm_dc() cmd = ['crm', 'resource', 'show', resource] try: - status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - if not isinstance(status, six.text_type): - status = six.text_type(status, "utf-8") + status = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('utf-8') except subprocess.CalledProcessError: status = None diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py index 341da9ee..e81a5f0b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/apache/checks/config.py @@ -14,7 +14,6 @@ import os import re -import six import subprocess @@ -95,9 +94,7 @@ def __call__(self): settings = utils.get_settings('apache') ctxt = settings['hardening'] - out = subprocess.check_output(['apache2', '-v']) - if six.PY3: - out = out.decode('utf-8') + out = subprocess.check_output(['apache2', '-v']).decode('utf-8') ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', out).group(1) ctxt['apache_icondir'] = '/usr/share/apache2/icons/' diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py index c1537625..31db8f62 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -15,8 +15,6 @@ import re import subprocess -import six - from charmhelpers.core.hookenv import ( log, INFO, @@ -35,7 +33,7 @@ class DisabledModuleAudit(BaseAudit): def __init__(self, modules): if modules is None: self.modules = [] - elif isinstance(modules, six.string_types): + elif isinstance(modules, str): self.modules = [modules] else: self.modules = modules @@ -68,9 +66,7 @@ def ensure_compliance(self): @staticmethod def _get_loaded_modules(): """Returns the modules which are enabled in Apache.""" - output = subprocess.check_output(['apache2ctl', '-M']) - if six.PY3: - output = output.decode('utf-8') + output = subprocess.check_output(['apache2ctl', '-M']).decode('utf-8') modules = [] for line in output.splitlines(): # Each line of the enabled module output looks like: diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py index cad7bf73..1b22925b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/apt.py @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import # required for external apt import -from six import string_types - from charmhelpers.fetch import ( apt_cache, apt_purge @@ -51,7 +48,7 @@ class RestrictedPackages(BaseAudit): def __init__(self, pkgs, **kwargs): super(RestrictedPackages, self).__init__(**kwargs) - if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): + if isinstance(pkgs, str) or not hasattr(pkgs, '__iter__'): self.pkgs = pkgs.split() else: self.pkgs = pkgs diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/file.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/file.py index 257c6351..84cc2494 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/file.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/audits/file.py @@ -23,7 +23,6 @@ check_call, ) from traceback import format_exc -from six import string_types from stat import ( S_ISGID, S_ISUID @@ -63,7 +62,7 @@ def __init__(self, paths, always_comply=False, *args, **kwargs): """ super(BaseFileAudit, self).__init__(*args, **kwargs) self.always_comply = always_comply - if isinstance(paths, string_types) or not hasattr(paths, '__iter__'): + if isinstance(paths, str) or not hasattr(paths, '__iter__'): self.paths = [paths] else: self.paths = paths diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py index 63f21b9c..45ad076d 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/harden.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - from collections import OrderedDict from charmhelpers.core.hookenv import ( @@ -53,18 +51,17 @@ def harden(overrides=None): overrides = [] def _harden_inner1(f): - # As this has to be py2.7 compat, we can't use nonlocal. Use a trick - # to capture the dictionary that can then be updated. - _logged = {'done': False} + _logged = False def _harden_inner2(*args, **kwargs): # knock out hardening via a config var; normally it won't get # disabled. + nonlocal _logged if _DISABLE_HARDENING_FOR_UNIT_TEST: return f(*args, **kwargs) - if not _logged['done']: + if not _logged: log("Hardening function '%s'" % (f.__name__), level=DEBUG) - _logged['done'] = True + _logged = True RUN_CATALOG = OrderedDict([('os', run_os_checks), ('ssh', run_ssh_checks), ('mysql', run_mysql_checks), @@ -74,7 +71,7 @@ def _harden_inner2(*args, **kwargs): if enabled: modules_to_run = [] # modules will always be performed in the following order - for module, func in six.iteritems(RUN_CATALOG): + for module, func in RUN_CATALOG.items(): if module in enabled: enabled.remove(module) modules_to_run.append(func) diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/login.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/login.py index fe2bc6ef..fd500c8b 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/login.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/login.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from six import string_types - from charmhelpers.contrib.hardening.audits.file import TemplatedFile from charmhelpers.contrib.hardening.host import TEMPLATES_DIR from charmhelpers.contrib.hardening import utils @@ -41,7 +39,7 @@ def __call__(self): # a string assume it to be octal and turn it into an octal # string. umask = settings['environment']['umask'] - if not isinstance(umask, string_types): + if not isinstance(umask, str): umask = '%s' % oct(umask) ctxt = { diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py index f1ea5813..8a57d83d 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -15,7 +15,6 @@ import os import platform import re -import six import subprocess from charmhelpers.core.hookenv import ( @@ -183,9 +182,9 @@ def __call__(self): ctxt['sysctl'][key] = d[2] or None - # Translate for python3 - return {'sysctl_settings': - [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]} + return { + 'sysctl_settings': [(k, v) for k, v in ctxt['sysctl'].items()] + } class SysctlConf(TemplatedFile): diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py index a79f33b7..8bf9f36c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import subprocess from charmhelpers.core.hookenv import ( @@ -82,6 +81,6 @@ class MySQLConfContext(object): """ def __call__(self): settings = utils.get_settings('mysql') - # Translate for python3 - return {'mysql_settings': - [(k, v) for k, v in six.iteritems(settings['security'])]} + return { + 'mysql_settings': [(k, v) for k, v in settings['security'].items()] + } diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py index 5b6765f7..4dee5465 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/templating.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import six from charmhelpers.core.hookenv import ( log, @@ -27,10 +26,7 @@ from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_update apt_update(fatal=True) - if six.PY2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py index 56afa4b6..f93851a9 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hardening/utils.py @@ -16,7 +16,6 @@ import grp import os import pwd -import six import yaml from charmhelpers.core.hookenv import ( @@ -91,7 +90,7 @@ def _apply_overrides(settings, overrides, schema): :returns: dictionary of modules config with user overrides applied. """ if overrides: - for k, v in six.iteritems(overrides): + for k, v in overrides.items(): if k in schema: if schema[k] is None: settings[k] = v diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardware/__init__.py b/ceph-osd/hooks/charmhelpers/contrib/hardware/__init__.py new file mode 100644 index 00000000..474a8f3b --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardware/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-osd/hooks/charmhelpers/contrib/hardware/pci.py b/ceph-osd/hooks/charmhelpers/contrib/hardware/pci.py new file mode 100644 index 00000000..f6b1789a --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/hardware/pci.py @@ -0,0 +1,288 @@ +#!/usr/bin/env python3 +# +# Copyright 2016-2022 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import itertools +import logging +import os +import re +import shlex +import subprocess +import typing + + +def format_pci_addr(pci_addr: str) -> str: + """Format a PCI address with 0 fill for parts + + :param: pci_addr: unformatted PCI address + :type: str + :returns: formatted PCI address + :rtype: str + """ + domain, bus, slot_func = pci_addr.split(":") + slot, func = slot_func.split(".") + return "{}:{}:{}.{}".format( + domain.zfill(4), bus.zfill(2), slot.zfill(2), func + ) + + +def get_sysnet_interfaces_and_macs() -> list: + """Catalog interface information from local system + + each device dict contains: + + interface: logical name + mac_address: MAC address + pci_address: PCI address + state: Current interface state (up/down) + sriov: Boolean indicating whether interface is an SR-IOV + capable device. + sriov_totalvfs: Total VF capacity of device + sriov_numvfs: Configured VF capacity of device + + :returns: array of dict objects containing details of each interface + :rtype: list + """ + net_devs = [] + for sdir in itertools.chain( + glob.glob("/sys/bus/pci/devices/*/net/../"), + glob.glob("/sys/bus/pci/devices/*/virtio*/net/../")): + fq_path = os.path.realpath(sdir) + path = fq_path.split("/") + if "virtio" in path[-1]: + pci_address = path[-2] + else: + pci_address = path[-1] + ifname = get_sysnet_interface(sdir) + if not ifname: + logging.warn("Unable to determine interface name for PCI " + "device {}".format(pci_address)) + continue + device = { + "interface": ifname, + "mac_address": get_sysnet_mac(sdir, ifname), + "pci_address": pci_address, + "state": get_sysnet_device_state(sdir, ifname), + "sriov": is_sriov(sdir), + } + if device["sriov"]: + device["sriov_totalvfs"] = get_sriov_totalvfs(sdir) + device["sriov_numvfs"] = get_sriov_numvfs(sdir) + net_devs.append(device) + + return net_devs + + +def get_sysnet_mac(sysdir: str, ifname: str) -> str: + """Determine MAC address for a device + + :param: sysdir: path to device /sys directory + :type: str + :returns: MAC address of device + :rtype: str + """ + mac_addr_file = os.path.join(sysdir, "net", ifname, "address") + with open(mac_addr_file, "r") as f: + read_data = f.read() + return read_data.strip() + + +def get_sysnet_device_state(sysdir: str, ifname: str) -> str: + """Read operational state of a device + + :param: sysdir: path to device /sys directory + :type: str + :returns: current device state + :rtype: str + """ + state_file = os.path.join(sysdir, "net", ifname, "operstate") + with open(state_file, "r") as f: + read_data = f.read() + return read_data.strip() + + +def is_sriov(sysdir: str) -> bool: + """Determine whether a device is SR-IOV capable + + :param: sysdir: path to device /sys directory + :type: str + :returns: whether device is SR-IOV capable or not + :rtype: bool + """ + return os.path.exists(os.path.join(sysdir, "sriov_totalvfs")) + + +def get_sriov_totalvfs(sysdir: str) -> int: + """Read total VF capacity for a device + + :param: sysdir: path to device /sys directory + :type: str + :returns: number of VF's the device supports + :rtype: int + """ + sriov_totalvfs_file = os.path.join(sysdir, "sriov_totalvfs") + with open(sriov_totalvfs_file, "r") as f: + read_data = f.read() + return int(read_data.strip()) + + +def get_sriov_numvfs(sysdir: str) -> int: + """Read configured VF capacity for a device + + :param: sysdir: path to device /sys directory + :type: str + :returns: number of VF's the device is configured with + :rtype: int + """ + sriov_numvfs_file = os.path.join(sysdir, "sriov_numvfs") + with open(sriov_numvfs_file, "r") as f: + read_data = f.read() + return int(read_data.strip()) + + +# https://github.com/libvirt/libvirt/commit/5b1c525b1f3608156884aed0dc5e925306c1e260 +PF_PHYS_PORT_NAME_REGEX = re.compile(r"(p[0-9]+$)|(p[0-9]+s[0-9]+$)", + re.IGNORECASE) + + +def _phys_port_name_is_pf(sysnetdir: str) -> typing.Optional[bool]: + try: + with open(os.path.join(sysnetdir, "phys_port_name"), "r") as fin: + return (PF_PHYS_PORT_NAME_REGEX.match(fin.read().strip()) + is not None) + except OSError: + return + + +def get_sysnet_interface(sysdir: str) -> typing.Optional[str]: + sysnetdir = os.path.join(sysdir, "net") + netdevs = os.listdir(sysnetdir) + # Return early in case the PCI device only has one netdev + if len(netdevs) == 1: + return netdevs[0] + + # When a PCI device has multiple netdevs we need to figure out which one + # represents the PF + for netdev in netdevs: + if _phys_port_name_is_pf(os.path.join(sysnetdir, netdev)): + return netdev + + +def get_pci_ethernet_addresses() -> list: + """Generate list of PCI addresses for all network adapters + + :returns: list of PCI addresses + :rtype: list + """ + cmd = ["lspci", "-m", "-D"] + lspci_output = subprocess.check_output(cmd).decode("UTF-8") + pci_addresses = [] + for line in lspci_output.split("\n"): + columns = shlex.split(line) + if len(columns) > 1 and columns[1] == "Ethernet controller": + pci_address = columns[0] + pci_addresses.append(format_pci_addr(pci_address)) + return pci_addresses + + +class PCINetDevice(object): + def __init__(self, pci_address): + self.pci_address = pci_address + self.interface_name = None + self.mac_address = None + self.state = None + self.sriov = False + self.sriov_totalvfs = None + self.sriov_numvfs = None + self.update_attributes() + + def update_attributes(self): + self.update_interface_info() + + def update_interface_info(self): + net_devices = get_sysnet_interfaces_and_macs() + for interface in net_devices: + if self.pci_address == interface["pci_address"]: + self.interface_name = interface["interface"] + self.mac_address = interface["mac_address"] + self.state = interface["state"] + self.sriov = interface["sriov"] + if self.sriov: + self.sriov_totalvfs = interface["sriov_totalvfs"] + self.sriov_numvfs = interface["sriov_numvfs"] + + def _set_sriov_numvfs(self, numvfs: int): + sdevice = os.path.join( + "/sys/bus/pci/devices", self.pci_address, "sriov_numvfs" + ) + with open(sdevice, "w") as sh: + sh.write(str(numvfs)) + self.update_attributes() + + def set_sriov_numvfs(self, numvfs: int) -> bool: + """Set the number of VF devices for a SR-IOV PF + + Assuming the device is an SR-IOV device, this function will attempt + to change the number of VF's created by the PF. + + @param numvfs: integer to set the current number of VF's to + @returns boolean indicating whether any changes where made + """ + if self.sriov and numvfs != self.sriov_numvfs: + # NOTE(fnordahl): run-time change of numvfs is disallowed + # without resetting to 0 first. + self._set_sriov_numvfs(0) + self._set_sriov_numvfs(numvfs) + return True + return False + + +class PCINetDevices(object): + def __init__(self): + self.pci_devices = [ + PCINetDevice(dev) for dev in get_pci_ethernet_addresses() + ] + + def update_devices(self): + for pcidev in self.pci_devices: + pcidev.update_attributes() + + def get_macs(self) -> list: + macs = [] + for pcidev in self.pci_devices: + if pcidev.mac_address: + macs.append(pcidev.mac_address) + return macs + + def get_device_from_mac(self, mac: str) -> PCINetDevice: + for pcidev in self.pci_devices: + if pcidev.mac_address == mac: + return pcidev + return None + + def get_device_from_pci_address(self, pci_addr: str) -> PCINetDevice: + for pcidev in self.pci_devices: + if pcidev.pci_address == pci_addr: + return pcidev + return None + + def get_device_from_interface_name( + self, interface_name: str + ) -> PCINetDevice: + for pcidev in self.pci_devices: + if pcidev.interface_name == interface_name: + return pcidev + return None diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index b356d64c..de56584d 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -15,7 +15,6 @@ import glob import re import subprocess -import six import socket from functools import partial @@ -39,20 +38,14 @@ import netifaces except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-netifaces', fatal=True) - else: - apt_install('python3-netifaces', fatal=True) + apt_install('python3-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-netaddr', fatal=True) - else: - apt_install('python3-netaddr', fatal=True) + apt_install('python3-netaddr', fatal=True) import netaddr @@ -462,15 +455,12 @@ def ns_query(address): try: import dns.resolver except ImportError: - if six.PY2: - apt_install('python-dnspython', fatal=True) - else: - apt_install('python3-dnspython', fatal=True) + apt_install('python3-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): rtype = 'PTR' - elif isinstance(address, six.string_types): + elif isinstance(address, str): rtype = 'A' else: return None @@ -513,10 +503,7 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - if six.PY2: - apt_install("python-dnspython", fatal=True) - else: - apt_install("python3-dnspython", fatal=True) + apt_install("python3-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 54081f0c..32c69ff7 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -30,8 +30,6 @@ check_output, CalledProcessError) -import six - import charmhelpers.contrib.storage.linux.ceph as ch_ceph from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( @@ -120,20 +118,12 @@ ) from charmhelpers.core.unitdata import kv -try: - from sriov_netplan_shim import pci -except ImportError: - # The use of the function and contexts that require the pci module is - # optional. - pass +from charmhelpers.contrib.hardware import pci try: import psutil except ImportError: - if six.PY2: - apt_install('python-psutil', fatal=True) - else: - apt_install('python3-psutil', fatal=True) + apt_install('python3-psutil', fatal=True) import psutil CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -150,10 +140,7 @@ def ensure_packages(packages): def context_complete(ctxt): - _missing = [] - for k, v in six.iteritems(ctxt): - if v is None or v == '': - _missing.append(k) + _missing = [k for k, v in ctxt.items() if v is None or v == ''] if _missing: log('Missing required data: %s' % ' '.join(_missing), level=INFO) @@ -180,7 +167,7 @@ def context_complete(self, ctxt): # Fresh start self.complete = False self.missing_data = [] - for k, v in six.iteritems(ctxt): + for k, v in ctxt.items(): if v is None or v == '': if k not in self.missing_data: self.missing_data.append(k) @@ -434,6 +421,9 @@ def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): ('password', ctxt.get('admin_password', '')), ('signing_dir', ctxt.get('signing_dir', '')),)) + if ctxt.get('service_type'): + c.update((('service_type', ctxt.get('service_type')),)) + return c def __call__(self): @@ -476,6 +466,9 @@ def __call__(self): 'internal_protocol': int_protocol, 'api_version': api_version}) + if rdata.get('service_type'): + ctxt['service_type'] = rdata.get('service_type') + if float(api_version) > 2: ctxt.update({ 'admin_domain_name': rdata.get('service_domain'), @@ -547,6 +540,9 @@ def __call__(self): 'api_version': api_version }) + if rdata.get('service_type'): + ctxt['service_type'] = rdata.get('service_type') + if float(api_version) > 2: ctxt.update({'admin_domain_name': rdata.get('domain')}) @@ -1111,10 +1107,14 @@ def get_network_addresses(self): endpoint = resolve_address(net_type) addresses.append((addr, endpoint)) - return sorted(set(addresses)) + # Log the set of addresses to have a trail log and capture if tuples + # change over time in the same unit (LP: #1952414). + sorted_addresses = sorted(set(addresses)) + log('get_network_addresses: {}'.format(sorted_addresses)) + return sorted_addresses def __call__(self): - if isinstance(self.external_ports, six.string_types): + if isinstance(self.external_ports, str): self.external_ports = [self.external_ports] if not self.external_ports or not https(): @@ -1531,9 +1531,9 @@ def __call__(self): continue sub_config = sub_config[self.config_file] - for k, v in six.iteritems(sub_config): + for k, v in sub_config.items(): if k == 'sections': - for section, config_list in six.iteritems(v): + for section, config_list in v.items(): log("adding section '%s'" % (section), level=DEBUG) if ctxt[k].get(section): @@ -1887,8 +1887,11 @@ def __call__(self): normalized.update({port: port for port in resolved if port in ports}) if resolved: - return {normalized[port]: bridge for port, bridge in - six.iteritems(portmap) if port in normalized.keys()} + return { + normalized[port]: bridge + for port, bridge in portmap.items() + if port in normalized.keys() + } return None @@ -2291,15 +2294,10 @@ def _get_canonical_name(self, name=None): name = name or socket.gethostname() fqdn = '' - if six.PY2: - exc = socket.error - else: - exc = OSError - try: addrs = socket.getaddrinfo( name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) - except exc: + except OSError: pass else: for addr in addrs: @@ -2416,12 +2414,12 @@ def get_existing_ovs_use_veth(): existing_ovs_use_veth = None # If there is a dhcp_agent.ini file read the current setting if os.path.isfile(DHCP_AGENT_INI): - # config_ini does the right thing and returns None if the setting is - # commented. + # config_ini does the right thing and returns None if the setting + # is commented. existing_ovs_use_veth = ( config_ini(DHCP_AGENT_INI)["DEFAULT"].get("ovs_use_veth")) # Convert to Bool if necessary - if isinstance(existing_ovs_use_veth, six.string_types): + if isinstance(existing_ovs_use_veth, str): return bool_from_string(existing_ovs_use_veth) return existing_ovs_use_veth @@ -3126,7 +3124,7 @@ def _determine_numvfs(self, device, sriov_numvfs): """Determine number of Virtual Functions (VFs) configured for device. :param device: Object describing a PCI Network interface card (NIC)/ - :type device: sriov_netplan_shim.pci.PCINetDevice + :type device: contrib.hardware.pci.PCINetDevice :param sriov_numvfs: Number of VFs requested for blanket configuration. :type sriov_numvfs: int :returns: Number of VFs to configure for device diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py new file mode 100755 index 00000000..5f392b3c --- /dev/null +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py @@ -0,0 +1,128 @@ +#!/usr/bin/python3 + +# Copyright 2014-2022 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Checks for services with deferred restarts. + +This Nagios check will parse /var/lib/policy-rd.d/ +to find any restarts that are currently deferred. +""" + +import argparse +import glob +import sys +import yaml + + +DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d' + + +def get_deferred_events(): + """Return a list of deferred events dicts from policy-rc.d files. + + Events are read from DEFERRED_EVENTS_DIR and are of the form: + { + action: restart, + policy_requestor_name: rabbitmq-server, + policy_requestor_type: charm, + reason: 'Pkg update', + service: rabbitmq-server, + time: 1614328743 + } + + :raises OSError: Raised in case of a system error while reading a policy file + :raises yaml.YAMLError: Raised if parsing a policy file fails + + :returns: List of deferred event dictionaries + :rtype: list + """ + deferred_events_files = glob.glob( + '{}/*.deferred'.format(DEFERRED_EVENTS_DIR)) + + deferred_events = [] + for event_file in deferred_events_files: + with open(event_file, 'r') as f: + event = yaml.safe_load(f) + deferred_events.append(event) + + return deferred_events + + +def get_deferred_restart_services(application=None): + """Returns a list of services with deferred restarts. + + :param str application: Name of the application that blocked the service restart. + If application is None, all services with deferred restarts + are returned. Services which are blocked by a non-charm + requestor are always returned. + + :raises OSError: Raised in case of a system error while reading a policy file + :raises yaml.YAMLError: Raised if parsing a policy file fails + + :returns: List of services with deferred restarts belonging to application. + :rtype: list + """ + + deferred_restart_events = filter( + lambda e: e['action'] == 'restart', get_deferred_events()) + + deferred_restart_services = set() + for restart_event in deferred_restart_events: + if application: + if ( + restart_event['policy_requestor_type'] != 'charm' or + restart_event['policy_requestor_type'] == 'charm' and + restart_event['policy_requestor_name'] == application + ): + deferred_restart_services.add(restart_event['service']) + else: + deferred_restart_services.add(restart_event['service']) + + return list(deferred_restart_services) + + +def main(): + """Check for services with deferred restarts.""" + parser = argparse.ArgumentParser( + description='Check for services with deferred restarts') + parser.add_argument( + '--application', help='Check services belonging to this application only') + + args = parser.parse_args() + + services = set(get_deferred_restart_services(args.application)) + + if len(services) == 0: + print('OK: No deferred service restarts.') + sys.exit(0) + else: + print( + 'CRITICAL: Restarts are deferred for services: {}.'.format(', '.join(services))) + sys.exit(1) + + +if __name__ == '__main__': + try: + main() + except OSError as e: + print('CRITICAL: A system error occurred: {} ({})'.format(e.errno, e.strerror)) + sys.exit(1) + except yaml.YAMLError as e: + print('CRITICAL: Failed to parse a policy file: {}'.format(str(e))) + sys.exit(1) + except Exception as e: + print('CRITICAL: An unknown error occurred: {}'.format(str(e))) + sys.exit(1) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/keystone.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/keystone.py index d7e02ccd..5775aa44 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/keystone.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/keystone.py @@ -1,4 +1,3 @@ -#!/usr/bin/python # # Copyright 2017 Canonical Ltd # @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six from charmhelpers.fetch import apt_install from charmhelpers.contrib.openstack.context import IdentityServiceContext from charmhelpers.core.hookenv import ( @@ -117,10 +115,7 @@ def __init__(self, endpoint, **kwargs): from keystoneclient.auth.identity import v2 from keystoneclient import session except ImportError: - if six.PY2: - apt_install(["python-keystoneclient"], fatal=True) - else: - apt_install(["python3-keystoneclient"], fatal=True) + apt_install(["python3-keystoneclient"], fatal=True) from keystoneclient.v2_0 import client from keystoneclient.auth.identity import v2 @@ -151,10 +146,7 @@ def __init__(self, endpoint, **kwargs): from keystoneclient import session from keystoneclient.auth.identity import v3 except ImportError: - if six.PY2: - apt_install(["python-keystoneclient"], fatal=True) - else: - apt_install(["python3-keystoneclient"], fatal=True) + apt_install(["python3-keystoneclient"], fatal=True) from keystoneclient.v3 import client from keystoneclient.auth import token_endpoint diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py index b41314cb..47772467 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/neutron.py @@ -14,7 +14,6 @@ # Various utilities for dealing with Neutron and the renaming from Quantum. -import six from subprocess import check_output from charmhelpers.core.hookenv import ( @@ -349,11 +348,4 @@ def parse_vlan_range_mappings(mappings): Returns dict of the form {provider: (start, end)}. """ _mappings = parse_mappings(mappings) - if not _mappings: - return {} - - mappings = {} - for p, r in six.iteritems(_mappings): - mappings[p] = tuple(r.split(':')) - - return mappings + return {p: tuple(r.split(':')) for p, r in _mappings.items()} diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py index 6fa06f26..767943c2 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/policyd.py @@ -15,7 +15,6 @@ import collections import contextlib import os -import six import shutil import yaml import zipfile @@ -204,12 +203,6 @@ def __str__(self): return self.log_message -if six.PY2: - BadZipFile = zipfile.BadZipfile -else: - BadZipFile = zipfile.BadZipFile - - def is_policyd_override_valid_on_this_release(openstack_release): """Check that the charm is running on at least Ubuntu Xenial, and at least the queens release. @@ -487,10 +480,10 @@ def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): if blacklisted_keys_present: raise BadPolicyYamlFile("blacklisted keys {} present." .format(", ".join(blacklisted_keys_present))) - if not all(isinstance(k, six.string_types) for k in keys): + if not all(isinstance(k, str) for k in keys): raise BadPolicyYamlFile("keys in yaml aren't all strings?") # check that the dictionary looks like a mapping of str to str - if not all(isinstance(v, six.string_types) for v in doc.values()): + if not all(isinstance(v, str) for v in doc.values()): raise BadPolicyYamlFile("values in yaml aren't all strings?") return doc @@ -530,8 +523,7 @@ def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) - _scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir - for direntry in _scanner(path): + for direntry in os.scandir(path): # see if the path should be kept. if direntry.path in keep_paths: continue @@ -558,36 +550,6 @@ def maybe_create_directory_for(path, user, group): ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) -@contextlib.contextmanager -def _fallback_scandir(path): - """Fallback os.scandir implementation. - - provide a fallback implementation of os.scandir if this module ever gets - used in a py2 or py34 charm. Uses os.listdir() to get the names in the path, - and then mocks the is_dir() function using os.path.isdir() to check for - directory. - - :param path: the path to list the directories for - :type path: str - :returns: Generator that provides _FBDirectory objects - :rtype: ContextManager[_FBDirectory] - """ - for f in os.listdir(path): - yield _FBDirectory(f) - - -class _FBDirectory(object): - """Mock a scandir Directory object with enough to use in - clean_policyd_dir_for - """ - - def __init__(self, path): - self.path = path - - def is_dir(self): - return os.path.isdir(self.path) - - def path_for_policy_file(service, name): """Return the full path for a policy.d file that will be written to the service's policy.d directory. @@ -768,7 +730,7 @@ def process_policy_resource_file(resource_file, _group) # Every thing worked, so we mark up a success. completed = True - except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: + except (zipfile.BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), level=POLICYD_LOG_LEVEL_DEFAULT) except IOError as e: diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/templating.py index 050f8af5..3b7c6a9f 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/templating.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/templating.py @@ -14,8 +14,6 @@ import os -import six - from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, @@ -29,10 +27,7 @@ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions @@ -62,7 +57,7 @@ def get_loader(templates_dir, os_release): order by OpenStack release. """ tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) - for rel in six.itervalues(OPENSTACK_CODENAMES)] + for rel in OPENSTACK_CODENAMES.values()] if not os.path.isdir(templates_dir): log('Templates directory not found @ %s.' % templates_dir, @@ -225,10 +220,7 @@ def __init__(self, templates_dir, openstack_release): # if this code is running, the object is created pre-install hook. # jinja2 shouldn't get touched until the module is reloaded on next # hook execution, with proper jinja2 bits successfully imported. - if six.PY2: - apt_install('python-jinja2') - else: - apt_install('python3-jinja2') + apt_install('python3-jinja2') def register(self, config_file, contexts, config_template=None): """ @@ -318,9 +310,7 @@ def write(self, config_file): log('Config not registered: %s' % config_file, level=ERROR) raise OSConfigException - _out = self.render(config_file) - if six.PY3: - _out = _out.encode('UTF-8') + _out = self.render(config_file).encode('UTF-8') with open(config_file, 'wb') as out: out.write(_out) @@ -331,7 +321,8 @@ def write_all(self): """ Write out all registered config files. """ - [self.write(k) for k in six.iterkeys(self.templates)] + for k in self.templates.keys(): + self.write(k) def set_release(self, openstack_release): """ @@ -347,8 +338,8 @@ def complete_contexts(self): Returns a list of context interfaces that yield a complete context. ''' interfaces = [] - [interfaces.extend(i.complete_contexts()) - for i in six.itervalues(self.templates)] + for i in self.templates.values(): + interfaces.extend(i.complete_contexts()) return interfaces def get_incomplete_context_data(self, interfaces): @@ -360,7 +351,7 @@ def get_incomplete_context_data(self, interfaces): ''' incomplete_context_data = {} - for i in six.itervalues(self.templates): + for i in self.templates.values(): for context in i.contexts: for interface in interfaces: related = False diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 9cc96d60..c8747c16 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -25,7 +25,6 @@ import itertools import functools -import six import traceback import uuid import yaml @@ -362,6 +361,8 @@ def get_os_codename_install_source(src): rel = '' if src is None: return rel + if src in OPENSTACK_RELEASES: + return src if src in ['distro', 'distro-proposed', 'proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] @@ -401,7 +402,7 @@ def get_os_codename_version(vers): def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): '''Determine OpenStack version number from codename.''' - for k, v in six.iteritems(version_map): + for k, v in version_map.items(): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -411,7 +412,8 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): def get_os_version_codename_swift(codename): '''Determine OpenStack version number of swift from codename.''' - for k, v in six.iteritems(SWIFT_CODENAMES): + # for k, v in six.iteritems(SWIFT_CODENAMES): + for k, v in SWIFT_CODENAMES.items(): if k == codename: return v[-1] e = 'Could not derive swift version for '\ @@ -421,17 +423,17 @@ def get_os_version_codename_swift(codename): def get_swift_codename(version): '''Determine OpenStack codename that corresponds to swift version.''' - codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] + codenames = [k for k, v in SWIFT_CODENAMES.items() if version in v] if len(codenames) > 1: # If more than one release codename contains this version we determine # the actual codename based on the highest available install source. for codename in reversed(codenames): releases = UBUNTU_OPENSTACK_RELEASE - release = [k for k, v in six.iteritems(releases) if codename in v] - ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) - if six.PY3: - ret = ret.decode('UTF-8') + release = [k for k, v in releases.items() if codename in v] + ret = (subprocess + .check_output(['apt-cache', 'policy', 'swift']) + .decode('UTF-8')) if codename in ret or release[0] in ret: return codename elif len(codenames) == 1: @@ -441,7 +443,7 @@ def get_swift_codename(version): match = re.match(r'^(\d+)\.(\d+)', version) if match: major_minor_version = match.group(0) - for codename, versions in six.iteritems(SWIFT_CODENAMES): + for codename, versions in SWIFT_CODENAMES.items(): for release_version in versions: if release_version.startswith(major_minor_version): return codename @@ -477,9 +479,7 @@ def get_os_codename_package(package, fatal=True): if snap_install_requested(): cmd = ['snap', 'list', package] try: - out = subprocess.check_output(cmd) - if six.PY3: - out = out.decode('UTF-8') + out = subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError: return None lines = out.split('\n') @@ -549,16 +549,14 @@ def get_os_version_package(pkg, fatal=True): if 'swift' in pkg: vers_map = SWIFT_CODENAMES - for cname, version in six.iteritems(vers_map): + for cname, version in vers_map.items(): if cname == codename: return version[-1] else: vers_map = OPENSTACK_CODENAMES - for version, cname in six.iteritems(vers_map): + for version, cname in vers_map.items(): if cname == codename: return version - # e = "Could not determine OpenStack version for package: %s" % pkg - # error_out(e) def get_installed_os_version(): @@ -821,10 +819,10 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): if not os.path.exists(os.path.dirname(juju_rc_path)): os.mkdir(os.path.dirname(juju_rc_path)) with open(juju_rc_path, 'wt') as rc_script: - rc_script.write( - "#!/bin/bash\n") - [rc_script.write('export %s=%s\n' % (u, p)) - for u, p in six.iteritems(env_vars) if u != "script_path"] + rc_script.write("#!/bin/bash\n") + for u, p in env_vars.items(): + if u != "script_path": + rc_script.write('export %s=%s\n' % (u, p)) def openstack_upgrade_available(package): @@ -1039,7 +1037,7 @@ def _determine_os_workload_status( state, message, lambda: charm_func(configs)) if state is None: - state, message = _ows_check_services_running(services, ports) + state, message = ows_check_services_running(services, ports) if state is None: state = 'active' @@ -1213,7 +1211,12 @@ def _ows_check_charm_func(state, message, charm_func_with_configs): return state, message +@deprecate("use ows_check_services_running() instead", "2022-05", log=juju_log) def _ows_check_services_running(services, ports): + return ows_check_services_running(services, ports) + + +def ows_check_services_running(services, ports): """Check that the services that should be running are actually running and that any ports specified are being listened to. @@ -1413,45 +1416,75 @@ def incomplete_relation_data(configs, required_interfaces): for i in incomplete_relations} -def do_action_openstack_upgrade(package, upgrade_callback, configs, - force_upgrade=False): +def do_action_openstack_upgrade(package, upgrade_callback, configs): """Perform action-managed OpenStack upgrade. Upgrades packages to the configured openstack-origin version and sets the corresponding action status as a result. - If the charm was installed from source we cannot upgrade it. For backwards compatibility a config flag (action-managed-upgrade) must be set for this code to run, otherwise a full service level upgrade will fire on config-changed. - @param package: package name for determining if upgrade available + @param package: package name for determining if openstack upgrade available @param upgrade_callback: function callback to charm's upgrade function @param configs: templating object derived from OSConfigRenderer class - @param force_upgrade: perform dist-upgrade regardless of new openstack @return: True if upgrade successful; False if upgrade failed or skipped """ ret = False - if openstack_upgrade_available(package) or force_upgrade: + if openstack_upgrade_available(package): if config('action-managed-upgrade'): juju_log('Upgrading OpenStack release') try: upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed.'}) + action_set({'outcome': 'success, upgrade completed'}) ret = True except Exception: - action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'outcome': 'upgrade failed, see traceback'}) action_set({'traceback': traceback.format_exc()}) - action_fail('do_openstack_upgrade resulted in an ' + action_fail('upgrade callback resulted in an ' 'unexpected error') else: action_set({'outcome': 'action-managed-upgrade config is ' - 'False, skipped upgrade.'}) + 'False, skipped upgrade'}) + else: + action_set({'outcome': 'no upgrade available'}) + + return ret + + +def do_action_package_upgrade(package, upgrade_callback, configs): + """Perform package upgrade within the current OpenStack release. + + Upgrades packages only if there is not an openstack upgrade available, + and sets the corresponding action status as a result. + + @param package: package name for determining if openstack upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if not openstack_upgrade_available(package): + juju_log('Upgrading packages') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed'}) + ret = True + except Exception: + action_set({'outcome': 'upgrade failed, see traceback'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('upgrade callback resulted in an ' + 'unexpected error') else: - action_set({'outcome': 'no upgrade available.'}) + action_set({'outcome': 'upgrade skipped because an openstack upgrade ' + 'is available'}) return ret @@ -1849,21 +1882,20 @@ def some_hook(...): """ def wrap(f): - # py27 compatible nonlocal variable. When py3 only, replace with - # nonlocal keyword - __restart_map_cache = {'cache': None} + __restart_map_cache = None @functools.wraps(f) def wrapped_f(*args, **kwargs): + nonlocal __restart_map_cache if is_unit_paused_set(): return f(*args, **kwargs) - if __restart_map_cache['cache'] is None: - __restart_map_cache['cache'] = restart_map() \ + if __restart_map_cache is None: + __restart_map_cache = restart_map() \ if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( (lambda: f(*args, **kwargs)), - __restart_map_cache['cache'], + __restart_map_cache, stopstart, restart_functions, can_restart_now_f, @@ -1888,7 +1920,7 @@ def ordered(orderme): raise ValueError('argument must be a dict type') result = OrderedDict() - for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): + for k, v in sorted(orderme.items(), key=lambda x: x[0]): if isinstance(v, dict): result[k] = ordered(v) else: diff --git a/ceph-osd/hooks/charmhelpers/contrib/python.py b/ceph-osd/hooks/charmhelpers/contrib/python.py index 84cba8c4..fcded680 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/python.py +++ b/ceph-osd/hooks/charmhelpers/contrib/python.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import - # deprecated aliases for backwards compatibility from charmhelpers.fetch.python import debug # noqa from charmhelpers.fetch.python import packages # noqa diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index c70aeb20..1b20b8fe 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -23,7 +23,6 @@ import errno import hashlib import math -import six import os import shutil @@ -218,7 +217,7 @@ def validator(value, valid_type, valid_range=None): "was given {} of type {}" .format(valid_range, type(valid_range))) # If we're dealing with strings - if isinstance(value, six.string_types): + if isinstance(value, str): assert value in valid_range, ( "{} is not in the list {}".format(value, valid_range)) # Integer, float should have a min and max @@ -434,9 +433,9 @@ def add_cache_tier(self, cache_pool, mode): :type mode: str """ # Check the input types and values - validator(value=cache_pool, valid_type=six.string_types) + validator(value=cache_pool, valid_type=str) validator( - value=mode, valid_type=six.string_types, + value=mode, valid_type=str, valid_range=["readonly", "writeback"]) check_call([ @@ -615,7 +614,8 @@ def create(self): class ReplicatedPool(BasePool): def __init__(self, service, name=None, pg_num=None, replicas=None, - percent_data=None, app_name=None, op=None): + percent_data=None, app_name=None, op=None, + profile_name='replicated_rule'): """Initialize ReplicatedPool object. Pool information is either initialized from individual keyword @@ -632,6 +632,8 @@ def __init__(self, service, name=None, pg_num=None, replicas=None, to this replicated pool. :type replicas: int :raises: KeyError + :param profile_name: Crush Profile to use + :type profile_name: Optional[str] """ # NOTE: Do not perform initialization steps that require live data from # a running cluster here. The *Pool classes may be used for validation. @@ -646,11 +648,20 @@ def __init__(self, service, name=None, pg_num=None, replicas=None, # we will fail with KeyError if it is not provided. self.replicas = op['replicas'] self.pg_num = op.get('pg_num') + self.profile_name = op.get('crush-profile') or profile_name else: self.replicas = replicas or 2 self.pg_num = pg_num + self.profile_name = profile_name or 'replicated_rule' def _create(self): + # Validate if crush profile exists + if self.profile_name is None: + msg = ("Failed to discover crush profile named " + "{}".format(self.profile_name)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + # Do extra validation on pg_num with data from live cluster if self.pg_num: # Since the number of placement groups were specified, ensure @@ -668,12 +679,12 @@ def _create(self): '--pg-num-min={}'.format( min(AUTOSCALER_DEFAULT_PGS, self.pg_num) ), - self.name, str(self.pg_num) + self.name, str(self.pg_num), self.profile_name ] else: cmd = [ 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num) + self.name, str(self.pg_num), self.profile_name ] check_call(cmd) @@ -692,7 +703,7 @@ class ErasurePool(BasePool): def __init__(self, service, name=None, erasure_code_profile=None, percent_data=None, app_name=None, op=None, allow_ec_overwrites=False): - """Initialize ReplicatedPool object. + """Initialize ErasurePool object. Pool information is either initialized from individual keyword arguments or from a individual CephBrokerRq operation Dict. @@ -778,10 +789,11 @@ def enabled_manager_modules(): :rtype: List[str] """ cmd = ['ceph', 'mgr', 'module', 'ls'] + quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0 + if quincy_or_later: + cmd.append('--format=json') try: - modules = check_output(cmd) - if six.PY3: - modules = modules.decode('UTF-8') + modules = check_output(cmd).decode('utf-8') except CalledProcessError as e: log("Failed to list ceph modules: {}".format(e), WARNING) return [] @@ -814,10 +826,10 @@ def get_mon_map(service): ceph command fails. """ try: - mon_status = check_output(['ceph', '--id', service, - 'mon_status', '--format=json']) - if six.PY3: - mon_status = mon_status.decode('UTF-8') + octopus_or_later = cmp_pkgrevno('ceph-common', '15.0.0') >= 0 + mon_status_cmd = 'quorum_status' if octopus_or_later else 'mon_status' + mon_status = (check_output(['ceph', '--id', service, mon_status_cmd, + '--format=json'])).decode('utf-8') try: return json.loads(mon_status) except ValueError as v: @@ -959,9 +971,7 @@ def get_erasure_profile(service, name): try: out = check_output(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', - name, '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + name, '--format=json']).decode('utf-8') return json.loads(out) except (CalledProcessError, OSError, ValueError): return None @@ -1164,8 +1174,7 @@ def create_erasure_profile(service, profile_name, 'nvme' ] - validator(erasure_plugin_name, six.string_types, - list(plugin_techniques.keys())) + validator(erasure_plugin_name, str, list(plugin_techniques.keys())) cmd = [ 'ceph', '--id', service, @@ -1176,7 +1185,7 @@ def create_erasure_profile(service, profile_name, ] if erasure_plugin_technique: - validator(erasure_plugin_technique, six.string_types, + validator(erasure_plugin_technique, str, plugin_techniques[erasure_plugin_name]) cmd.append('technique={}'.format(erasure_plugin_technique)) @@ -1189,7 +1198,7 @@ def create_erasure_profile(service, profile_name, failure_domain = 'rack' if failure_domain: - validator(failure_domain, six.string_types, failure_domains) + validator(failure_domain, str, failure_domains) # failure_domain changed in luminous if luminous_or_later: cmd.append('crush-failure-domain={}'.format(failure_domain)) @@ -1198,7 +1207,7 @@ def create_erasure_profile(service, profile_name, # device class new in luminous if luminous_or_later and device_class: - validator(device_class, six.string_types, device_classes) + validator(device_class, str, device_classes) cmd.append('crush-device-class={}'.format(device_class)) else: log('Skipping device class configuration (ceph < 12.0.0)', @@ -1213,7 +1222,7 @@ def create_erasure_profile(service, profile_name, raise ValueError("locality must be provided for lrc plugin") # LRC optional configuration if crush_locality: - validator(crush_locality, six.string_types, failure_domains) + validator(crush_locality, str, failure_domains) cmd.append('crush-locality={}'.format(crush_locality)) if erasure_plugin_name == 'shec': @@ -1241,8 +1250,8 @@ def rename_pool(service, old_name, new_name): :param new_name: Name to rename pool to. :type new_name: str """ - validator(value=old_name, valid_type=six.string_types) - validator(value=new_name, valid_type=six.string_types) + validator(value=old_name, valid_type=str) + validator(value=new_name, valid_type=str) cmd = [ 'ceph', '--id', service, @@ -1260,7 +1269,7 @@ def erasure_profile_exists(service, name): :returns: True if it exists, False otherwise. :rtype: bool """ - validator(value=name, valid_type=six.string_types) + validator(value=name, valid_type=str) try: check_call(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', @@ -1280,12 +1289,10 @@ def get_cache_mode(service, pool_name): :returns: Current cache mode. :rtype: Optional[int] """ - validator(value=service, valid_type=six.string_types) - validator(value=pool_name, valid_type=six.string_types) + validator(value=service, valid_type=str) + validator(value=pool_name, valid_type=str) out = check_output(['ceph', '--id', service, - 'osd', 'dump', '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + 'osd', 'dump', '--format=json']).decode('utf-8') try: osd_json = json.loads(out) for pool in osd_json['pools']: @@ -1299,9 +1306,8 @@ def get_cache_mode(service, pool_name): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, 'lspools']) - if six.PY3: - out = out.decode('UTF-8') + out = check_output( + ['rados', '--id', service, 'lspools']).decode('utf-8') except CalledProcessError: return False @@ -1320,13 +1326,11 @@ def get_osds(service, device_class=None): out = check_output(['ceph', '--id', service, 'osd', 'crush', 'class', 'ls-osd', device_class, - '--format=json']) + '--format=json']).decode('utf-8') else: out = check_output(['ceph', '--id', service, 'osd', 'ls', - '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + '--format=json']).decode('utf-8') return json.loads(out) @@ -1343,9 +1347,7 @@ def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]) - if six.PY3: - out = out.decode('UTF-8') + service, '--pool', pool]).decode('utf-8') except CalledProcessError: return False @@ -1371,7 +1373,7 @@ def update_pool(client, pool, settings): :raises: CalledProcessError """ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] - for k, v in six.iteritems(settings): + for k, v in settings.items(): check_call(cmd + [k, v]) @@ -1509,9 +1511,7 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']) - if six.PY3: - out = out.decode('UTF-8') + out = check_output(['rbd', 'showmapped']).decode('utf-8') except CalledProcessError: return False @@ -1857,7 +1857,7 @@ def _partial_build_common_op_create(self, } def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, - **kwargs): + crush_profile=None, **kwargs): """Adds an operation to create a replicated pool. Refer to docstring for ``_partial_build_common_op_create`` for @@ -1871,6 +1871,10 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, for pool. :type pg_num: int :raises: AssertionError if provided data is of invalid type/range + :param crush_profile: Name of crush profile to use. If not set the + ceph-mon unit handling the broker request will + set its default value. + :type crush_profile: Optional[str] """ if pg_num and kwargs.get('weight'): raise ValueError('pg_num and weight are mutually exclusive') @@ -1880,6 +1884,7 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, 'name': name, 'replicas': replica_count, 'pg_num': pg_num, + 'crush-profile': crush_profile } op.update(self._partial_build_common_op_create(**kwargs)) diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py index 74bab40e..04daea29 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -19,8 +19,6 @@ check_output, ) -import six - ################################################## # loopback device helpers. @@ -40,9 +38,7 @@ def loopback_devices(): ''' loopbacks = {} cmd = ['losetup', '-a'] - output = check_output(cmd) - if six.PY3: - output = output.decode('utf-8') + output = check_output(cmd).decode('utf-8') devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] for dev, _, f in devs: loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] @@ -57,7 +53,7 @@ def create_loopback(file_path): ''' file_path = os.path.abspath(file_path) check_call(['losetup', '--find', file_path]) - for d, f in six.iteritems(loopback_devices()): + for d, f in loopback_devices().items(): if f == file_path: return d @@ -71,7 +67,7 @@ def ensure_loopback_device(path, size): :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) ''' - for d, f in six.iteritems(loopback_devices()): + for d, f in loopback_devices().items(): if f == path: return d diff --git a/ceph-osd/hooks/charmhelpers/core/hookenv.py b/ceph-osd/hooks/charmhelpers/core/hookenv.py index e94247a2..370c3e8f 100644 --- a/ceph-osd/hooks/charmhelpers/core/hookenv.py +++ b/ceph-osd/hooks/charmhelpers/core/hookenv.py @@ -17,12 +17,11 @@ # Authors: # Charm Helpers Developers -from __future__ import print_function import copy from distutils.version import LooseVersion from enum import Enum from functools import wraps -from collections import namedtuple +from collections import namedtuple, UserDict import glob import os import json @@ -36,12 +35,6 @@ from charmhelpers import deprecate -import six -if not six.PY3: - from UserDict import UserDict -else: - from collections import UserDict - CRITICAL = "CRITICAL" ERROR = "ERROR" @@ -112,7 +105,7 @@ def log(message, level=None): command = ['juju-log'] if level: command += ['-l', level] - if not isinstance(message, six.string_types): + if not isinstance(message, str): message = repr(message) command += [message[:SH_MAX_ARG]] # Missing juju-log should not cause failures in unit tests @@ -132,7 +125,7 @@ def log(message, level=None): def function_log(message): """Write a function progress message""" command = ['function-log'] - if not isinstance(message, six.string_types): + if not isinstance(message, str): message = repr(message) command += [message[:SH_MAX_ARG]] # Missing function-log should not cause failures in unit tests @@ -445,12 +438,6 @@ def config(scope=None): """ global _cache_config config_cmd_line = ['config-get', '--all', '--format=json'] - try: - # JSON Decode Exception for Python3.5+ - exc_json = json.decoder.JSONDecodeError - except AttributeError: - # JSON Decode Exception for Python2.7 through Python3.4 - exc_json = ValueError try: if _cache_config is None: config_data = json.loads( @@ -459,7 +446,7 @@ def config(scope=None): if scope is not None: return _cache_config.get(scope) return _cache_config - except (exc_json, UnicodeDecodeError) as e: + except (json.decoder.JSONDecodeError, UnicodeDecodeError) as e: log('Unable to parse output from config-get: config_cmd_line="{}" ' 'message="{}"' .format(config_cmd_line, str(e)), level=ERROR) @@ -491,12 +478,26 @@ def relation_get(attribute=None, unit=None, rid=None, app=None): raise +@cached +def _relation_set_accepts_file(): + """Return True if the juju relation-set command accepts a file. + + Cache the result as it won't change during the execution of a hook, and + thus we can make relation_set() more efficient by only checking for the + first relation_set() call. + + :returns: True if relation_set accepts a file. + :rtype: bool + :raises: subprocess.CalledProcessError if the check fails. + """ + return "--file" in subprocess.check_output( + ["relation-set", "--help"], universal_newlines=True) + + def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] - accepts_file = "--file" in subprocess.check_output( - relation_cmd_line + ["--help"], universal_newlines=True) if app: relation_cmd_line.append('--app') if relation_id is not None: @@ -508,7 +509,7 @@ def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): # sites pass in things like dicts or numbers. if value is not None: settings[key] = "{}".format(value) - if accepts_file: + if _relation_set_accepts_file(): # --file was introduced in Juju 1.23.2. Use it by default if # available, since otherwise we'll break if the relation data is # too big. Ideally we should tell relation-set to read the data from @@ -1003,14 +1004,8 @@ def cmd_exists(cmd): @cached -@deprecate("moved to function_get()", log=log) def action_get(key=None): - """ - .. deprecated:: 0.20.7 - Alias for :func:`function_get`. - - Gets the value of an action parameter, or all key/value param pairs. - """ + """Gets the value of an action parameter, or all key/value param pairs.""" cmd = ['action-get'] if key is not None: cmd.append(key) @@ -1020,8 +1015,12 @@ def action_get(key=None): @cached +@deprecate("moved to action_get()", log=log) def function_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" + """ + .. deprecated:: + Gets the value of an action parameter, or all key/value param pairs. + """ cmd = ['function-get'] # Fallback for older charms. if not cmd_exists('function-get'): @@ -1034,22 +1033,20 @@ def function_get(key=None): return function_data -@deprecate("moved to function_set()", log=log) def action_set(values): - """ - .. deprecated:: 0.20.7 - Alias for :func:`function_set`. - - Sets the values to be returned after the action finishes. - """ + """Sets the values to be returned after the action finishes.""" cmd = ['action-set'] for k, v in list(values.items()): cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) +@deprecate("moved to action_set()", log=log) def function_set(values): - """Sets the values to be returned after the function finishes""" + """ + .. deprecated:: + Sets the values to be returned after the function finishes. + """ cmd = ['function-set'] # Fallback for older charms. if not cmd_exists('function-get'): @@ -1060,12 +1057,8 @@ def function_set(values): subprocess.check_call(cmd) -@deprecate("moved to function_fail()", log=log) def action_fail(message): """ - .. deprecated:: 0.20.7 - Alias for :func:`function_fail`. - Sets the action status to failed and sets the error message. The results set by action_set are preserved. @@ -1073,10 +1066,14 @@ def action_fail(message): subprocess.check_call(['action-fail', message]) +@deprecate("moved to action_fail()", log=log) def function_fail(message): - """Sets the function status to failed and sets the error message. + """ + .. deprecated:: + Sets the function status to failed and sets the error message. - The results set by function_set are preserved.""" + The results set by function_set are preserved. + """ cmd = ['function-fail'] # Fallback for older charms. if not cmd_exists('function-fail'): diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 994ec8a0..ad2cab46 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -31,7 +31,6 @@ import hashlib import functools import itertools -import six from contextlib import contextmanager from collections import OrderedDict, defaultdict @@ -115,6 +114,33 @@ def service_stop(service_name, **kwargs): return service('stop', service_name, **kwargs) +def service_enable(service_name, **kwargs): + """Enable a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_enable('ceph-osd', id=4) + + :param service_name: the name of the service to enable + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + return service('enable', service_name, **kwargs) + + def service_restart(service_name, **kwargs): """Restart a system service. @@ -135,7 +161,7 @@ def service_restart(service_name, **kwargs): :param service_name: the name of the service to restart :param **kwargs: additional parameters to pass to the init system when managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs + parameters to the init system's commandline. kwargs are ignored for init systems not allowing additional parameters via the commandline (systemd). """ @@ -263,7 +289,7 @@ def service(action, service_name, **kwargs): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] - for key, value in six.iteritems(kwargs): + for key, value in kwargs.items(): parameter = '%s=%s' % (key, value) cmd.append(parameter) return subprocess.call(cmd) == 0 @@ -289,7 +315,7 @@ def service_running(service_name, **kwargs): if os.path.exists(_UPSTART_CONF.format(service_name)): try: cmd = ['status', service_name] - for key, value in six.iteritems(kwargs): + for key, value in kwargs.items(): parameter = '%s=%s' % (key, value) cmd.append(parameter) output = subprocess.check_output( @@ -564,7 +590,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) - if six.PY3 and isinstance(content, six.string_types): + if isinstance(content, str): content = content.encode('UTF-8') target.write(content) return @@ -967,7 +993,7 @@ def get_bond_master(interface): def list_nics(nic_type=None): """Return a list of nics of given type(s)""" - if isinstance(nic_type, six.string_types): + if isinstance(nic_type, str): int_types = [nic_type] else: int_types = nic_type @@ -1081,8 +1107,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): try: chown(full, uid, gid) except (IOError, OSError) as e: - # Intended to ignore "file not found". Catching both to be - # compatible with both Python 2.7 and 3.x. + # Intended to ignore "file not found". if e.errno == errno.ENOENT: pass diff --git a/ceph-osd/hooks/charmhelpers/core/services/base.py b/ceph-osd/hooks/charmhelpers/core/services/base.py index 9f880290..7c37c65c 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/base.py +++ b/ceph-osd/hooks/charmhelpers/core/services/base.py @@ -17,8 +17,6 @@ import inspect from collections import Iterable, OrderedDict -import six - from charmhelpers.core import host from charmhelpers.core import hookenv @@ -171,10 +169,7 @@ def provide_data(self): if not units: continue remote_service = units[0].split('/')[0] - if six.PY2: - argspec = inspect.getargspec(provider.provide_data) - else: - argspec = inspect.getfullargspec(provider.provide_data) + argspec = inspect.getfullargspec(provider.provide_data) if len(argspec.args) > 1: data = provider.provide_data(remote_service, service_ready) else: diff --git a/ceph-osd/hooks/charmhelpers/core/services/helpers.py b/ceph-osd/hooks/charmhelpers/core/services/helpers.py index 3e6e30d2..5bf62dd5 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/helpers.py +++ b/ceph-osd/hooks/charmhelpers/core/services/helpers.py @@ -179,7 +179,7 @@ def __init__(self, *args): self.required_options = args self['config'] = hookenv.config() with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.load(fp).get('options', {}) + self.config = yaml.safe_load(fp).get('options', {}) def __bool__(self): for option in self.required_options: @@ -227,7 +227,7 @@ def read_context(self, file_name): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'r') as file_stream: - data = yaml.load(file_stream) + data = yaml.safe_load(file_stream) if not data: raise OSError("%s is empty" % file_name) return data diff --git a/ceph-osd/hooks/charmhelpers/core/strutils.py b/ceph-osd/hooks/charmhelpers/core/strutils.py index 28c6b3f5..31366871 100644 --- a/ceph-osd/hooks/charmhelpers/core/strutils.py +++ b/ceph-osd/hooks/charmhelpers/core/strutils.py @@ -15,7 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import re TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'} @@ -27,8 +26,8 @@ def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY Returns True if value translates to True otherwise False. """ - if isinstance(value, six.string_types): - value = six.text_type(value) + if isinstance(value, str): + value = str(value) else: msg = "Unable to interpret non-string value '%s' as boolean" % (value) raise ValueError(msg) @@ -61,8 +60,8 @@ def bytes_from_string(value): 'P': 5, 'PB': 5, } - if isinstance(value, six.string_types): - value = six.text_type(value) + if isinstance(value, str): + value = str(value) else: msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) diff --git a/ceph-osd/hooks/charmhelpers/core/templating.py b/ceph-osd/hooks/charmhelpers/core/templating.py index 9014015c..cb0213dc 100644 --- a/ceph-osd/hooks/charmhelpers/core/templating.py +++ b/ceph-osd/hooks/charmhelpers/core/templating.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import sys from charmhelpers.core import host from charmhelpers.core import hookenv @@ -43,9 +42,8 @@ def render(source, target, context, owner='root', group='root', The rendered template will be written to the file as well as being returned as a string. - Note: Using this requires python-jinja2 or python3-jinja2; if it is not - installed, calling this will attempt to use charmhelpers.fetch.apt_install - to install it. + Note: Using this requires python3-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. """ try: from jinja2 import FileSystemLoader, Environment, exceptions @@ -57,10 +55,7 @@ def render(source, target, context, owner='root', group='root', 'charmhelpers.fetch to install it', level=hookenv.ERROR) raise - if sys.version_info.major == 2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions if template_loader: diff --git a/ceph-osd/hooks/charmhelpers/fetch/__init__.py b/ceph-osd/hooks/charmhelpers/fetch/__init__.py index 9497ee05..1283f25b 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/__init__.py +++ b/ceph-osd/hooks/charmhelpers/fetch/__init__.py @@ -20,11 +20,7 @@ log, ) -import six -if six.PY3: - from urllib.parse import urlparse, urlunparse -else: - from urlparse import urlparse, urlunparse +from urllib.parse import urlparse, urlunparse # The order of this list is very important. Handlers should be listed in from @@ -134,14 +130,14 @@ def configure_sources(update=False, sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None - if isinstance(sources, six.string_types): + if isinstance(sources, str): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: - if isinstance(keys, six.string_types): + if isinstance(keys, str): keys = [keys] if len(sources) != len(keys): diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index d25587ad..2cb2e88b 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -26,26 +26,15 @@ ) from charmhelpers.core.host import mkdir, check_hash -import six -if six.PY3: - from urllib.request import ( - build_opener, install_opener, urlopen, urlretrieve, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - ) - from urllib.parse import urlparse, urlunparse, parse_qs - from urllib.error import URLError -else: - from urllib import urlretrieve - from urllib2 import ( - build_opener, install_opener, urlopen, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - URLError - ) - from urlparse import urlparse, urlunparse, parse_qs +from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, +) +from urllib.parse import urlparse, urlunparse, parse_qs +from urllib.error import URLError def splituser(host): - '''urllib.splituser(), but six's support of this seems broken''' _userprog = re.compile('^(.*)@(.*)$') match = _userprog.match(host) if match: @@ -54,7 +43,6 @@ def splituser(host): def splitpasswd(user): - '''urllib.splitpasswd(), but six's support of this is missing''' _passwdprog = re.compile('^([^:]*):(.*)$', re.S) match = _passwdprog.match(user) if match: @@ -150,10 +138,7 @@ def install(self, source, dest=None, checksum=None, hash_type='sha1'): raise UnhandledSource(e.strerror) options = parse_qs(url_parts.fragment) for key, value in options.items(): - if not six.PY3: - algorithms = hashlib.algorithms - else: - algorithms = hashlib.algorithms_available + algorithms = hashlib.algorithms_available if key in algorithms: if len(value) != 1: raise TypeError( diff --git a/ceph-osd/hooks/charmhelpers/fetch/centos.py b/ceph-osd/hooks/charmhelpers/fetch/centos.py index a91dcff0..f8492018 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/centos.py +++ b/ceph-osd/hooks/charmhelpers/fetch/centos.py @@ -15,7 +15,6 @@ import subprocess import os import time -import six import yum from tempfile import NamedTemporaryFile @@ -42,7 +41,7 @@ def install(packages, options=None, fatal=False): if options is not None: cmd.extend(options) cmd.append('install') - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -71,7 +70,7 @@ def update(fatal=False): def purge(packages, fatal=False): """Purge one or more packages.""" cmd = ['yum', '--assumeyes', 'remove'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -83,7 +82,7 @@ def yum_search(packages): """Search for a package.""" output = {} cmd = ['yum', 'search'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) diff --git a/ceph-osd/hooks/charmhelpers/fetch/python/debug.py b/ceph-osd/hooks/charmhelpers/fetch/python/debug.py index 757135ee..dd5cca80 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/python/debug.py +++ b/ceph-osd/hooks/charmhelpers/fetch/python/debug.py @@ -15,8 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import atexit import sys diff --git a/ceph-osd/hooks/charmhelpers/fetch/python/packages.py b/ceph-osd/hooks/charmhelpers/fetch/python/packages.py index 60048354..93f1fa3f 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/python/packages.py +++ b/ceph-osd/hooks/charmhelpers/fetch/python/packages.py @@ -16,7 +16,6 @@ # limitations under the License. import os -import six import subprocess import sys @@ -40,10 +39,7 @@ def pip_execute(*args, **kwargs): from pip import main as _pip_execute except ImportError: apt_update() - if six.PY2: - apt_install('python-pip') - else: - apt_install('python3-pip') + apt_install('python3-pip') from pip import main as _pip_execute _pip_execute(*args, **kwargs) finally: @@ -140,12 +136,8 @@ def pip_list(): def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" - if six.PY2: - apt_install('python-virtualenv') - extra_flags = [] - else: - apt_install(['python3-virtualenv', 'virtualenv']) - extra_flags = ['--python=python3'] + apt_install(['python3-virtualenv', 'virtualenv']) + extra_flags = ['--python=python3'] if path: venv_path = path diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index cf8328f0..e6f8a0ad 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -13,10 +13,8 @@ # limitations under the License. from collections import OrderedDict -import os import platform import re -import six import subprocess import sys import time @@ -361,7 +359,7 @@ def apt_install(packages, options=None, fatal=False, quiet=False): cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -413,7 +411,7 @@ def apt_purge(packages, fatal=False): :raises: subprocess.CalledProcessError """ cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -440,7 +438,7 @@ def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark.""" log("Marking {} as {}".format(packages, mark)) cmd = ['apt-mark', mark] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -485,10 +483,7 @@ def import_key(key): if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and '-----END PGP PUBLIC KEY BLOCK-----' in key): log("Writing provided PGP key in the binary format", level=DEBUG) - if six.PY3: - key_bytes = key.encode('utf-8') - else: - key_bytes = key + key_bytes = key.encode('utf-8') key_name = _get_keyid_by_gpg_key(key_bytes) key_gpg = _dearmor_gpg_key(key_bytes) _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) @@ -528,9 +523,8 @@ def _get_keyid_by_gpg_key(key_material): stderr=subprocess.PIPE, stdin=subprocess.PIPE) out, err = ps.communicate(input=key_material) - if six.PY3: - out = out.decode('utf-8') - err = err.decode('utf-8') + out = out.decode('utf-8') + err = err.decode('utf-8') if 'gpg: no valid OpenPGP data found.' in err: raise GPGKeyError('Invalid GPG key material provided') # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) @@ -588,8 +582,7 @@ def _dearmor_gpg_key(key_asc): stdin=subprocess.PIPE) out, err = ps.communicate(input=key_asc) # no need to decode output as it is binary (invalid utf-8), only error - if six.PY3: - err = err.decode('utf-8') + err = err.decode('utf-8') if 'gpg: no valid OpenPGP data found.' in err: raise GPGKeyError('Invalid GPG key material. Check your network setup' ' (MTU, routing, DNS) and/or proxy server settings' @@ -693,7 +686,7 @@ def add_source(source, key=None, fail_invalid=False): ]) if source is None: source = '' - for r, fn in six.iteritems(_mapping): + for r, fn in _mapping.items(): m = re.match(r, source) if m: if key: @@ -726,7 +719,7 @@ def _add_proposed(): """ release = get_distrib_codename() arch = platform.machine() - if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): + if arch not in ARCH_TO_PROPOSED_POCKET.keys(): raise SourceConfigError("Arch {} not supported for (distro-)proposed" .format(arch)) with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: @@ -913,9 +906,8 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), kwargs = {} if quiet: - devnull = os.devnull if six.PY2 else subprocess.DEVNULL - kwargs['stdout'] = devnull - kwargs['stderr'] = devnull + kwargs['stdout'] = subprocess.DEVNULL + kwargs['stderr'] = subprocess.DEVNULL if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -957,9 +949,8 @@ def _run_apt_command(cmd, fatal=False, quiet=False): else: kwargs = {} if quiet: - devnull = os.devnull if six.PY2 else subprocess.DEVNULL - kwargs['stdout'] = devnull - kwargs['stderr'] = devnull + kwargs['stdout'] = subprocess.DEVNULL + kwargs['stderr'] = subprocess.DEVNULL subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs) @@ -989,7 +980,7 @@ def get_installed_version(package): Version object """ cache = apt_cache() - dpkg_result = cache._dpkg_list([package]).get(package, {}) + dpkg_result = cache.dpkg_list([package]).get(package, {}) current_ver = None installed_version = dpkg_result.get('version') diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index 436e1776..6da355fd 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -40,6 +40,9 @@ import subprocess import sys +from charmhelpers import deprecate +from charmhelpers.core.hookenv import log + class _container(dict): """Simple container for attributes.""" @@ -79,7 +82,7 @@ def __getitem__(self, package): apt_result = self._apt_cache_show([package])[package] apt_result['name'] = apt_result.pop('package') pkg = Package(apt_result) - dpkg_result = self._dpkg_list([package]).get(package, {}) + dpkg_result = self.dpkg_list([package]).get(package, {}) current_ver = None installed_version = dpkg_result.get('version') if installed_version: @@ -88,9 +91,29 @@ def __getitem__(self, package): pkg.architecture = dpkg_result.get('architecture') return pkg + @deprecate("use dpkg_list() instead.", "2022-05", log=log) def _dpkg_list(self, packages): + return self.dpkg_list(packages) + + def dpkg_list(self, packages): """Get data from system dpkg database for package. + Note that this method is also useful for querying package names + containing wildcards, for example + + apt_cache().dpkg_list(['nvidia-vgpu-ubuntu-*']) + + may return + + { + 'nvidia-vgpu-ubuntu-470': { + 'name': 'nvidia-vgpu-ubuntu-470', + 'version': '470.68', + 'architecture': 'amd64', + 'description': 'NVIDIA vGPU driver - version 470.68' + } + } + :param packages: Packages to get data from :type packages: List[str] :returns: Structured data about installed packages, keys like diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 429b8900..a22462ec 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -1162,10 +1162,6 @@ def get_mds_bootstrap_key(): 'allow command "osd in"', 'allow command "osd rm"', 'allow command "auth del"', - 'allow command "osd safe-to-destroy"', - 'allow command "osd crush reweight"', - 'allow command "osd purge"', - 'allow command "osd destroy"', ]) ]) @@ -2196,6 +2192,20 @@ def roll_monitor_cluster(new_version, upgrade_key): wait_for_all_monitors_to_upgrade(new_version=new_version, upgrade_key=upgrade_key) bootstrap_manager() + + # NOTE(jmcvaughn): + # Nautilus and later binaries use msgr2 by default, but existing + # clusters that have been upgraded from pre-Nautilus will not + # automatically have msgr2 enabled. Without this, Ceph will show + # a warning only (with no impact to operations), but newly added units + # will not be able to join the cluster. Therefore, we ensure it is + # enabled on upgrade for all versions including and after Nautilus + # (to cater for previous charm versions that will not have done this). + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0 + if nautilus_or_later: + wait_for_all_monitors_to_upgrade(new_version=new_version, + upgrade_key=upgrade_key) + enable_msgr2() except ValueError: log("Failed to find {} in list {}.".format( my_name, mon_sorted_list)) @@ -2221,7 +2231,8 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): # Needed to determine if whether to stop/start ceph-mgr luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0 - + # Needed to differentiate between systemd unit names + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0 kick_function() try: add_source(config('source'), config('key')) @@ -2250,7 +2261,11 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): try: if systemd(): - service_stop('ceph-mon') + if nautilus_or_later: + systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) + else: + systemd_unit = 'ceph-mon' + service_stop(systemd_unit) log("restarting ceph-mgr.target maybe: {}" .format(luminous_or_later)) if luminous_or_later: @@ -2281,7 +2296,11 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): perms=0o755) if systemd(): - service_restart('ceph-mon') + if nautilus_or_later: + systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) + else: + systemd_unit = 'ceph-mon' + service_restart(systemd_unit) log("starting ceph-mgr.target maybe: {}".format(luminous_or_later)) if luminous_or_later: # due to BUG: #1849874 we have to force a restart to get it to @@ -3342,6 +3361,16 @@ def bootstrap_manager(): service_restart(unit) +def enable_msgr2(): + """ + Enables msgr2 + + :raises: subprocess.CalledProcessError if the command fails + """ + cmd = ['ceph', 'mon', 'enable-msgr2'] + subprocess.check_call(cmd) + + def osd_noout(enable): """Sets or unsets 'noout' diff --git a/ceph-osd/osci.yaml b/ceph-osd/osci.yaml index 3b1c1591..f366b6c4 100644 --- a/ceph-osd/osci.yaml +++ b/ceph-osd/osci.yaml @@ -1,9 +1,10 @@ - project: templates: - charm-unit-jobs-py38 + - charm-unit-jobs-py310 - charm-xena-functional-jobs - charm-yoga-functional-jobs vars: needs_charm_build: true charm_build_name: ceph-osd - build_type: charmcraft \ No newline at end of file + build_type: charmcraft diff --git a/ceph-osd/tests/bundles/impish-xena.yaml b/ceph-osd/tests/bundles/impish-xena.yaml deleted file mode 100644 index f49d208b..00000000 --- a/ceph-osd/tests/bundles/impish-xena.yaml +++ /dev/null @@ -1,237 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: impish - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: latest/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: latest/edge - placement-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-osd: - charm: ../../ceph-osd.charm - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - to: - - '3' - - '4' - - '5' - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - to: - - '6' - - '7' - - '8' - channel: quincy/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - channel: latest/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - channel: yoga/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '11' - channel: yoga/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - channel: yoga/edge - - cinder: - expose: True - charm: ch:cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: 'None' - glance-api-version: '2' - to: - - '13' - channel: yoga/edge - - cinder-ceph: - charm: ch:cinder-ceph - channel: yoga/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - channel: yoga/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: yoga/edge - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'cinder-ceph:ceph-access' - - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 4780f66e..f0cc660e 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -2,10 +2,9 @@ charm_name: ceph-osd gate_bundles: - focal-xena - - impish-xena + - focal-yoga dev_bundles: - - focal-yoga - jammy-yoga smoke_bundles: @@ -23,5 +22,4 @@ tests: tests_options: force_deploy: - - impish-xena - jammy-yoga From f74dc6dcd30ae16ec4d94c3ae827badbc072867d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 7 Apr 2022 09:07:37 +0200 Subject: [PATCH 2376/2699] Updates for jammy enablement - charmcraft: build-on 20.04 -> run-on 20.04/22.04 [*archs] - Refresh tox targets - Drop impish bundles and OSCI testing - Add jammy metadata - Default source is yoga - Charmhelpers and charms.ceph sync Change-Id: I39f091db8ef8f18c0a40d4e46d54dfc964c03d70 --- ceph-radosgw/.zuul.yaml | 2 +- ceph-radosgw/charm-helpers-hooks.yaml | 1 + ceph-radosgw/charmcraft.yaml | 16 +- ceph-radosgw/config.yaml | 2 +- .../charmhelpers/contrib/hardware/__init__.py | 13 + .../charmhelpers/contrib/hardware/pci.py | 288 ++++++++++++++++++ .../charmhelpers/contrib/openstack/context.py | 18 +- .../templates/section-keystone-authtoken | 3 + .../section-keystone-authtoken-mitaka | 3 + .../contrib/storage/linux/ceph.py | 30 +- ceph-radosgw/hooks/charmhelpers/core/host.py | 29 +- ceph-radosgw/lib/charms_ceph/utils.py | 245 ++++++++------- ceph-radosgw/metadata.yaml | 2 +- ceph-radosgw/osci.yaml | 20 -- .../tests/bundles/impish-xena-namespaced.yaml | 124 -------- ceph-radosgw/tests/bundles/impish-xena.yaml | 123 -------- ceph-radosgw/tests/tests.yaml | 4 - ceph-radosgw/tox.ini | 5 + 18 files changed, 531 insertions(+), 397 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardware/__init__.py create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/hardware/pci.py delete mode 100644 ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/impish-xena.yaml diff --git a/ceph-radosgw/.zuul.yaml b/ceph-radosgw/.zuul.yaml index fd189e2f..7dd3db96 100644 --- a/ceph-radosgw/.zuul.yaml +++ b/ceph-radosgw/.zuul.yaml @@ -1,3 +1,3 @@ - project: templates: - - openstack-python3-ussuri-jobs + - openstack-python3-charm-yoga-jobs diff --git a/ceph-radosgw/charm-helpers-hooks.yaml b/ceph-radosgw/charm-helpers-hooks.yaml index fa9cd645..03ff1064 100644 --- a/ceph-radosgw/charm-helpers-hooks.yaml +++ b/ceph-radosgw/charm-helpers-hooks.yaml @@ -15,4 +15,5 @@ include: - contrib.openstack|inc=* - contrib.charmsupport - contrib.hardening|inc=* + - contrib.hardware - contrib.openstack.policyd diff --git a/ceph-radosgw/charmcraft.yaml b/ceph-radosgw/charmcraft.yaml index ba84f314..dca60a09 100644 --- a/ceph-radosgw/charmcraft.yaml +++ b/ceph-radosgw/charmcraft.yaml @@ -21,7 +21,15 @@ parts: - README.md bases: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 + - build-on: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 + run-on: + - name: ubuntu + channel: "20.04" + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.04" + architectures: [amd64, s390x, ppc64el, arm64] \ No newline at end of file diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index a7beb36c..ef901fce 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -5,7 +5,7 @@ options: description: RadosGW debug level. Max is 20. source: type: string - default: + default: yoga description: | Optional repository from which to install. May be one of the following: distro (default), ppa:somecustom/ppa, a deb url sources entry, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardware/__init__.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardware/__init__.py new file mode 100644 index 00000000..474a8f3b --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardware/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hardware/pci.py b/ceph-radosgw/hooks/charmhelpers/contrib/hardware/pci.py new file mode 100644 index 00000000..f6b1789a --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hardware/pci.py @@ -0,0 +1,288 @@ +#!/usr/bin/env python3 +# +# Copyright 2016-2022 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import itertools +import logging +import os +import re +import shlex +import subprocess +import typing + + +def format_pci_addr(pci_addr: str) -> str: + """Format a PCI address with 0 fill for parts + + :param: pci_addr: unformatted PCI address + :type: str + :returns: formatted PCI address + :rtype: str + """ + domain, bus, slot_func = pci_addr.split(":") + slot, func = slot_func.split(".") + return "{}:{}:{}.{}".format( + domain.zfill(4), bus.zfill(2), slot.zfill(2), func + ) + + +def get_sysnet_interfaces_and_macs() -> list: + """Catalog interface information from local system + + each device dict contains: + + interface: logical name + mac_address: MAC address + pci_address: PCI address + state: Current interface state (up/down) + sriov: Boolean indicating whether interface is an SR-IOV + capable device. + sriov_totalvfs: Total VF capacity of device + sriov_numvfs: Configured VF capacity of device + + :returns: array of dict objects containing details of each interface + :rtype: list + """ + net_devs = [] + for sdir in itertools.chain( + glob.glob("/sys/bus/pci/devices/*/net/../"), + glob.glob("/sys/bus/pci/devices/*/virtio*/net/../")): + fq_path = os.path.realpath(sdir) + path = fq_path.split("/") + if "virtio" in path[-1]: + pci_address = path[-2] + else: + pci_address = path[-1] + ifname = get_sysnet_interface(sdir) + if not ifname: + logging.warn("Unable to determine interface name for PCI " + "device {}".format(pci_address)) + continue + device = { + "interface": ifname, + "mac_address": get_sysnet_mac(sdir, ifname), + "pci_address": pci_address, + "state": get_sysnet_device_state(sdir, ifname), + "sriov": is_sriov(sdir), + } + if device["sriov"]: + device["sriov_totalvfs"] = get_sriov_totalvfs(sdir) + device["sriov_numvfs"] = get_sriov_numvfs(sdir) + net_devs.append(device) + + return net_devs + + +def get_sysnet_mac(sysdir: str, ifname: str) -> str: + """Determine MAC address for a device + + :param: sysdir: path to device /sys directory + :type: str + :returns: MAC address of device + :rtype: str + """ + mac_addr_file = os.path.join(sysdir, "net", ifname, "address") + with open(mac_addr_file, "r") as f: + read_data = f.read() + return read_data.strip() + + +def get_sysnet_device_state(sysdir: str, ifname: str) -> str: + """Read operational state of a device + + :param: sysdir: path to device /sys directory + :type: str + :returns: current device state + :rtype: str + """ + state_file = os.path.join(sysdir, "net", ifname, "operstate") + with open(state_file, "r") as f: + read_data = f.read() + return read_data.strip() + + +def is_sriov(sysdir: str) -> bool: + """Determine whether a device is SR-IOV capable + + :param: sysdir: path to device /sys directory + :type: str + :returns: whether device is SR-IOV capable or not + :rtype: bool + """ + return os.path.exists(os.path.join(sysdir, "sriov_totalvfs")) + + +def get_sriov_totalvfs(sysdir: str) -> int: + """Read total VF capacity for a device + + :param: sysdir: path to device /sys directory + :type: str + :returns: number of VF's the device supports + :rtype: int + """ + sriov_totalvfs_file = os.path.join(sysdir, "sriov_totalvfs") + with open(sriov_totalvfs_file, "r") as f: + read_data = f.read() + return int(read_data.strip()) + + +def get_sriov_numvfs(sysdir: str) -> int: + """Read configured VF capacity for a device + + :param: sysdir: path to device /sys directory + :type: str + :returns: number of VF's the device is configured with + :rtype: int + """ + sriov_numvfs_file = os.path.join(sysdir, "sriov_numvfs") + with open(sriov_numvfs_file, "r") as f: + read_data = f.read() + return int(read_data.strip()) + + +# https://github.com/libvirt/libvirt/commit/5b1c525b1f3608156884aed0dc5e925306c1e260 +PF_PHYS_PORT_NAME_REGEX = re.compile(r"(p[0-9]+$)|(p[0-9]+s[0-9]+$)", + re.IGNORECASE) + + +def _phys_port_name_is_pf(sysnetdir: str) -> typing.Optional[bool]: + try: + with open(os.path.join(sysnetdir, "phys_port_name"), "r") as fin: + return (PF_PHYS_PORT_NAME_REGEX.match(fin.read().strip()) + is not None) + except OSError: + return + + +def get_sysnet_interface(sysdir: str) -> typing.Optional[str]: + sysnetdir = os.path.join(sysdir, "net") + netdevs = os.listdir(sysnetdir) + # Return early in case the PCI device only has one netdev + if len(netdevs) == 1: + return netdevs[0] + + # When a PCI device has multiple netdevs we need to figure out which one + # represents the PF + for netdev in netdevs: + if _phys_port_name_is_pf(os.path.join(sysnetdir, netdev)): + return netdev + + +def get_pci_ethernet_addresses() -> list: + """Generate list of PCI addresses for all network adapters + + :returns: list of PCI addresses + :rtype: list + """ + cmd = ["lspci", "-m", "-D"] + lspci_output = subprocess.check_output(cmd).decode("UTF-8") + pci_addresses = [] + for line in lspci_output.split("\n"): + columns = shlex.split(line) + if len(columns) > 1 and columns[1] == "Ethernet controller": + pci_address = columns[0] + pci_addresses.append(format_pci_addr(pci_address)) + return pci_addresses + + +class PCINetDevice(object): + def __init__(self, pci_address): + self.pci_address = pci_address + self.interface_name = None + self.mac_address = None + self.state = None + self.sriov = False + self.sriov_totalvfs = None + self.sriov_numvfs = None + self.update_attributes() + + def update_attributes(self): + self.update_interface_info() + + def update_interface_info(self): + net_devices = get_sysnet_interfaces_and_macs() + for interface in net_devices: + if self.pci_address == interface["pci_address"]: + self.interface_name = interface["interface"] + self.mac_address = interface["mac_address"] + self.state = interface["state"] + self.sriov = interface["sriov"] + if self.sriov: + self.sriov_totalvfs = interface["sriov_totalvfs"] + self.sriov_numvfs = interface["sriov_numvfs"] + + def _set_sriov_numvfs(self, numvfs: int): + sdevice = os.path.join( + "/sys/bus/pci/devices", self.pci_address, "sriov_numvfs" + ) + with open(sdevice, "w") as sh: + sh.write(str(numvfs)) + self.update_attributes() + + def set_sriov_numvfs(self, numvfs: int) -> bool: + """Set the number of VF devices for a SR-IOV PF + + Assuming the device is an SR-IOV device, this function will attempt + to change the number of VF's created by the PF. + + @param numvfs: integer to set the current number of VF's to + @returns boolean indicating whether any changes where made + """ + if self.sriov and numvfs != self.sriov_numvfs: + # NOTE(fnordahl): run-time change of numvfs is disallowed + # without resetting to 0 first. + self._set_sriov_numvfs(0) + self._set_sriov_numvfs(numvfs) + return True + return False + + +class PCINetDevices(object): + def __init__(self): + self.pci_devices = [ + PCINetDevice(dev) for dev in get_pci_ethernet_addresses() + ] + + def update_devices(self): + for pcidev in self.pci_devices: + pcidev.update_attributes() + + def get_macs(self) -> list: + macs = [] + for pcidev in self.pci_devices: + if pcidev.mac_address: + macs.append(pcidev.mac_address) + return macs + + def get_device_from_mac(self, mac: str) -> PCINetDevice: + for pcidev in self.pci_devices: + if pcidev.mac_address == mac: + return pcidev + return None + + def get_device_from_pci_address(self, pci_addr: str) -> PCINetDevice: + for pcidev in self.pci_devices: + if pcidev.pci_address == pci_addr: + return pcidev + return None + + def get_device_from_interface_name( + self, interface_name: str + ) -> PCINetDevice: + for pcidev in self.pci_devices: + if pcidev.interface_name == interface_name: + return pcidev + return None diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 8522641b..32c69ff7 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -118,12 +118,7 @@ ) from charmhelpers.core.unitdata import kv -try: - from sriov_netplan_shim import pci -except ImportError: - # The use of the function and contexts that require the pci module is - # optional. - pass +from charmhelpers.contrib.hardware import pci try: import psutil @@ -426,6 +421,9 @@ def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): ('password', ctxt.get('admin_password', '')), ('signing_dir', ctxt.get('signing_dir', '')),)) + if ctxt.get('service_type'): + c.update((('service_type', ctxt.get('service_type')),)) + return c def __call__(self): @@ -468,6 +466,9 @@ def __call__(self): 'internal_protocol': int_protocol, 'api_version': api_version}) + if rdata.get('service_type'): + ctxt['service_type'] = rdata.get('service_type') + if float(api_version) > 2: ctxt.update({ 'admin_domain_name': rdata.get('service_domain'), @@ -539,6 +540,9 @@ def __call__(self): 'api_version': api_version }) + if rdata.get('service_type'): + ctxt['service_type'] = rdata.get('service_type') + if float(api_version) > 2: ctxt.update({'admin_domain_name': rdata.get('domain')}) @@ -3120,7 +3124,7 @@ def _determine_numvfs(self, device, sriov_numvfs): """Determine number of Virtual Functions (VFs) configured for device. :param device: Object describing a PCI Network interface card (NIC)/ - :type device: sriov_netplan_shim.pci.PCINetDevice + :type device: contrib.hardware.pci.PCINetDevice :param sriov_numvfs: Number of VFs requested for blanket configuration. :type sriov_numvfs: int :returns: Number of VFs to configure for device diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken index 5dcebe7c..c9b01528 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken @@ -9,4 +9,7 @@ project_name = {{ admin_tenant_name }} username = {{ admin_user }} password = {{ admin_password }} signing_dir = {{ signing_dir }} +{% if service_type -%} +service_type = {{ service_type }} +{% endif -%} {% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka index c281868b..14c25b4d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka @@ -6,6 +6,9 @@ auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v3 auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v3 project_domain_name = {{ admin_domain_name }} user_domain_name = {{ admin_domain_name }} +{% if service_type -%} +service_type = {{ service_type }} +{% endif -%} {% else -%} auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 244b7af9..1b20b8fe 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -614,7 +614,8 @@ def create(self): class ReplicatedPool(BasePool): def __init__(self, service, name=None, pg_num=None, replicas=None, - percent_data=None, app_name=None, op=None): + percent_data=None, app_name=None, op=None, + profile_name='replicated_rule'): """Initialize ReplicatedPool object. Pool information is either initialized from individual keyword @@ -631,6 +632,8 @@ def __init__(self, service, name=None, pg_num=None, replicas=None, to this replicated pool. :type replicas: int :raises: KeyError + :param profile_name: Crush Profile to use + :type profile_name: Optional[str] """ # NOTE: Do not perform initialization steps that require live data from # a running cluster here. The *Pool classes may be used for validation. @@ -645,11 +648,20 @@ def __init__(self, service, name=None, pg_num=None, replicas=None, # we will fail with KeyError if it is not provided. self.replicas = op['replicas'] self.pg_num = op.get('pg_num') + self.profile_name = op.get('crush-profile') or profile_name else: self.replicas = replicas or 2 self.pg_num = pg_num + self.profile_name = profile_name or 'replicated_rule' def _create(self): + # Validate if crush profile exists + if self.profile_name is None: + msg = ("Failed to discover crush profile named " + "{}".format(self.profile_name)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + # Do extra validation on pg_num with data from live cluster if self.pg_num: # Since the number of placement groups were specified, ensure @@ -667,12 +679,12 @@ def _create(self): '--pg-num-min={}'.format( min(AUTOSCALER_DEFAULT_PGS, self.pg_num) ), - self.name, str(self.pg_num) + self.name, str(self.pg_num), self.profile_name ] else: cmd = [ 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num) + self.name, str(self.pg_num), self.profile_name ] check_call(cmd) @@ -691,7 +703,7 @@ class ErasurePool(BasePool): def __init__(self, service, name=None, erasure_code_profile=None, percent_data=None, app_name=None, op=None, allow_ec_overwrites=False): - """Initialize ReplicatedPool object. + """Initialize ErasurePool object. Pool information is either initialized from individual keyword arguments or from a individual CephBrokerRq operation Dict. @@ -777,6 +789,9 @@ def enabled_manager_modules(): :rtype: List[str] """ cmd = ['ceph', 'mgr', 'module', 'ls'] + quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0 + if quincy_or_later: + cmd.append('--format=json') try: modules = check_output(cmd).decode('utf-8') except CalledProcessError as e: @@ -1842,7 +1857,7 @@ def _partial_build_common_op_create(self, } def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, - **kwargs): + crush_profile=None, **kwargs): """Adds an operation to create a replicated pool. Refer to docstring for ``_partial_build_common_op_create`` for @@ -1856,6 +1871,10 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, for pool. :type pg_num: int :raises: AssertionError if provided data is of invalid type/range + :param crush_profile: Name of crush profile to use. If not set the + ceph-mon unit handling the broker request will + set its default value. + :type crush_profile: Optional[str] """ if pg_num and kwargs.get('weight'): raise ValueError('pg_num and weight are mutually exclusive') @@ -1865,6 +1884,7 @@ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, 'name': name, 'replicas': replica_count, 'pg_num': pg_num, + 'crush-profile': crush_profile } op.update(self._partial_build_common_op_create(**kwargs)) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 2b0a36fb..ad2cab46 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -114,6 +114,33 @@ def service_stop(service_name, **kwargs): return service('stop', service_name, **kwargs) +def service_enable(service_name, **kwargs): + """Enable a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_enable('ceph-osd', id=4) + + :param service_name: the name of the service to enable + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + return service('enable', service_name, **kwargs) + + def service_restart(service_name, **kwargs): """Restart a system service. @@ -134,7 +161,7 @@ def service_restart(service_name, **kwargs): :param service_name: the name of the service to restart :param **kwargs: additional parameters to pass to the init system when managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs + parameters to the init system's commandline. kwargs are ignored for init systems not allowing additional parameters via the commandline (systemd). """ diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index 025ab866..a22462ec 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -1,4 +1,4 @@ -# Copyright 2017 Canonical Ltd +# Copyright 2017-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -293,7 +293,7 @@ def get_link_speed(network_interface): def persist_settings(settings_dict): # Write all settings to /etc/hdparm.conf - """ This will persist the hard drive settings to the /etc/hdparm.conf file + """This will persist the hard drive settings to the /etc/hdparm.conf file The settings_dict should be in the form of {"uuid": {"key":"value"}} @@ -552,7 +552,7 @@ def get_osd_weight(osd_id): :returns: Float :raises: ValueError if the monmap fails to parse. - :raises: CalledProcessError if our ceph command fails. + :raises: CalledProcessError if our Ceph command fails. """ try: tree = str(subprocess @@ -560,7 +560,7 @@ def get_osd_weight(osd_id): .decode('UTF-8')) try: json_tree = json.loads(tree) - # Make sure children are present in the json + # Make sure children are present in the JSON if not json_tree['nodes']: return None for device in json_tree['nodes']: @@ -619,12 +619,12 @@ def _flatten_roots(nodes, lookup_type='host'): def get_osd_tree(service): - """Returns the current osd map in JSON. + """Returns the current OSD map in JSON. :returns: List. :rtype: List[CrushLocation] :raises: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails + Also raises CalledProcessError if our Ceph command fails """ try: tree = str(subprocess @@ -666,12 +666,12 @@ def _get_child_dirs(path): def _get_osd_num_from_dirname(dirname): """Parses the dirname and returns the OSD id. - Parses a string in the form of 'ceph-{osd#}' and returns the osd number + Parses a string in the form of 'ceph-{osd#}' and returns the OSD number from the directory name. :param dirname: the directory name to return the OSD number from - :return int: the osd number the directory name corresponds to - :raises ValueError: if the osd number cannot be parsed from the provided + :return int: the OSD number the directory name corresponds to + :raises ValueError: if the OSD number cannot be parsed from the provided directory name. """ match = re.search(r'ceph-(?P\d+)', dirname) @@ -686,7 +686,7 @@ def get_local_osd_ids(): to split the ID off of the directory name and return it in a list. - :returns: list. A list of osd identifiers + :returns: list. A list of OSD identifiers :raises: OSError if something goes wrong with listing the directory. """ osd_ids = [] @@ -875,12 +875,12 @@ def add_bootstrap_hint(peer): ] CEPH_PARTITIONS = [ - '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # ceph encrypted disk in creation - '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # ceph encrypted journal - '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data - '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data - '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal - '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # ceph disk in creation + '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # Ceph encrypted disk in creation + '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # Ceph encrypted journal + '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # Ceph encrypted OSD data + '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # Ceph OSD data + '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # Ceph OSD journal + '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # Ceph disk in creation ] @@ -984,7 +984,7 @@ def is_osd_disk(dev): def start_osds(devices): - # Scan for ceph block devices + # Scan for Ceph block devices rescan_osd_devices() if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and cmp_pkgrevno('ceph', '14.2.0') < 0): @@ -1229,12 +1229,6 @@ def get_named_key(name, caps=None, pool_list=None): 'get', key_name, ]).decode('UTF-8')).strip() - # NOTE(jamespage); - # Apply any changes to key capabilities, dealing with - # upgrades which requires new caps for operation. - upgrade_key_caps(key_name, - caps or _default_caps, - pool_list) return parse_key(output) except subprocess.CalledProcessError: # Couldn't get the key, time to create it! @@ -1270,7 +1264,7 @@ def get_named_key(name, caps=None, pool_list=None): def upgrade_key_caps(key, caps, pool_list=None): - """ Upgrade key to have capabilities caps """ + """Upgrade key to have capabilities caps""" if not is_leader(): # Not the MON leader OR not clustered return @@ -1304,11 +1298,11 @@ def use_bluestore(): def bootstrap_monitor_cluster(secret): - """Bootstrap local ceph mon into the ceph cluster + """Bootstrap local Ceph mon into the Ceph cluster :param secret: cephx secret to use for monitor authentication :type secret: str - :raises: Exception if ceph mon cannot be bootstrapped + :raises: Exception if Ceph mon cannot be bootstrapped """ hostname = socket.gethostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) @@ -1351,11 +1345,11 @@ def _create_monitor(keyring, secret, hostname, path, done, init_marker): :type: secret: str :param hostname: hostname of the local unit :type hostname: str - :param path: full path to ceph mon directory + :param path: full path to Ceph mon directory :type path: str - :param done: full path to 'done' marker for ceph mon + :param done: full path to 'done' marker for Ceph mon :type done: str - :param init_marker: full path to 'init' marker for ceph mon + :param init_marker: full path to 'init' marker for Ceph mon :type init_marker: str """ subprocess.check_call(['ceph-authtool', keyring, @@ -1415,13 +1409,13 @@ def create_keyrings(): owner=ceph_user(), group=ceph_user(), perms=0o400) else: - # NOTE(jamespage): Later ceph releases require explicit + # NOTE(jamespage): Later Ceph releases require explicit # call to ceph-create-keys to setup the # admin keys for the cluster; this command # will wait for quorum in the cluster before # returning. # NOTE(fnordahl): Explicitly run `ceph-create-keys` for older - # ceph releases too. This improves bootstrap + # Ceph releases too. This improves bootstrap # resilience as the charm will wait for # presence of peer units before attempting # to bootstrap. Note that charms deploying @@ -1503,9 +1497,9 @@ def find_least_used_utility_device(utility_devices, lvs=False): def get_devices(name): - """ Merge config and juju storage based devices + """Merge config and Juju storage based devices - :name: THe name of the device type, eg: wal, osd, journal + :name: The name of the device type, e.g.: wal, osd, journal :returns: Set(device names), which are strings """ if config(name): @@ -1520,11 +1514,11 @@ def get_devices(name): def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, - bluestore=False, key_manager=CEPH_KEY_MANAGER): + bluestore=False, key_manager=CEPH_KEY_MANAGER, osd_id=None): if dev.startswith('/dev'): osdize_dev(dev, osd_format, osd_journal, ignore_errors, encrypt, - bluestore, key_manager) + bluestore, key_manager, osd_id) else: if cmp_pkgrevno('ceph', '14.0.0') >= 0: log("Directory backed OSDs can not be created on Nautilus", @@ -1534,7 +1528,8 @@ def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, - encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER): + encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER, + osd_id=None): """ Prepare a block device for use as a Ceph OSD @@ -1547,7 +1542,7 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, :param: ignore_errors: Don't fail in the event of any errors during processing :param: encrypt: Encrypt block devices using 'key_manager' - :param: bluestore: Use bluestore native ceph block device format + :param: bluestore: Use bluestore native Ceph block device format :param: key_manager: Key management approach for encryption keys :raises subprocess.CalledProcessError: in the event that any supporting subprocess operation failed @@ -1599,7 +1594,8 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, osd_journal, encrypt, bluestore, - key_manager) + key_manager, + osd_id) else: cmd = _ceph_disk(dev, osd_format, @@ -1683,7 +1679,7 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, - key_manager=CEPH_KEY_MANAGER): + key_manager=CEPH_KEY_MANAGER, osd_id=None): """ Prepare and activate a device for usage as a Ceph OSD using ceph-volume. @@ -1695,6 +1691,7 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, :param: encrypt: Use block device encryption :param: bluestore: Use bluestore storage for OSD :param: key_manager: dm-crypt Key Manager to use + :param: osd_id: The OSD-id to recycle, or None to create a new one :raises subprocess.CalledProcessError: in the event that any supporting LVM operation failed. :returns: list. 'ceph-volume' command and required parameters for @@ -1716,6 +1713,9 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, if encrypt and key_manager == CEPH_KEY_MANAGER: cmd.append('--dmcrypt') + if osd_id is not None: + cmd.extend(['--osd-id', str(osd_id)]) + # On-disk journal volume creation if not osd_journal and not bluestore: journal_lv_type = 'journal' @@ -1840,7 +1840,7 @@ def get_conf(variable): Get the value of the given configuration variable from the cluster. - :param variable: ceph configuration variable + :param variable: Ceph configuration variable :returns: str. configured value for provided variable """ @@ -1860,7 +1860,7 @@ def calculate_volume_size(lv_type): :raises KeyError: if invalid lv_type is supplied :returns: int. Configured size in megabytes for volume type """ - # lv_type -> ceph configuration option + # lv_type -> Ceph configuration option _config_map = { 'db': 'bluestore_block_db_size', 'wal': 'bluestore_block_wal_size', @@ -1874,7 +1874,7 @@ def calculate_volume_size(lv_type): 'journal': 1024, } - # conversion of ceph config units to MB + # conversion of Ceph config units to MB _units = { 'db': 1048576, # Bytes -> MB 'wal': 1048576, # Bytes -> MB @@ -1907,7 +1907,7 @@ def _luks_uuid(dev): def _initialize_disk(dev, dev_uuid, encrypt=False, key_manager=CEPH_KEY_MANAGER): """ - Initialize a raw block device consuming 100% of the avaliable + Initialize a raw block device consuming 100% of the available disk space. Function assumes that block device has already been wiped. @@ -2004,7 +2004,7 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid, def osdize_dir(path, encrypt=False, bluestore=False): - """Ask ceph-disk to prepare a directory to become an osd. + """Ask ceph-disk to prepare a directory to become an OSD. :param path: str. The directory to osdize :param encrypt: bool. Should the OSD directory be encrypted at rest @@ -2074,11 +2074,11 @@ def get_running_osds(): def get_cephfs(service): """List the Ceph Filesystems that exist. - :param service: The service name to run the ceph command under - :returns: list. Returns a list of the ceph filesystems + :param service: The service name to run the Ceph command under + :returns: list. Returns a list of the Ceph filesystems """ if get_version() < 0.86: - # This command wasn't introduced until 0.86 ceph + # This command wasn't introduced until 0.86 Ceph return [] try: output = str(subprocess @@ -2157,7 +2157,7 @@ def roll_monitor_cluster(new_version, upgrade_key): sys.exit(1) log('monitor_list: {}'.format(monitor_list)) - # A sorted list of osd unit names + # A sorted list of OSD unit names mon_sorted_list = sorted(monitor_list) # Install packages immediately but defer restarts to when it's our time. @@ -2192,6 +2192,20 @@ def roll_monitor_cluster(new_version, upgrade_key): wait_for_all_monitors_to_upgrade(new_version=new_version, upgrade_key=upgrade_key) bootstrap_manager() + + # NOTE(jmcvaughn): + # Nautilus and later binaries use msgr2 by default, but existing + # clusters that have been upgraded from pre-Nautilus will not + # automatically have msgr2 enabled. Without this, Ceph will show + # a warning only (with no impact to operations), but newly added units + # will not be able to join the cluster. Therefore, we ensure it is + # enabled on upgrade for all versions including and after Nautilus + # (to cater for previous charm versions that will not have done this). + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0 + if nautilus_or_later: + wait_for_all_monitors_to_upgrade(new_version=new_version, + upgrade_key=upgrade_key) + enable_msgr2() except ValueError: log("Failed to find {} in list {}.".format( my_name, mon_sorted_list)) @@ -2204,7 +2218,7 @@ def noop(): def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): - """Upgrade the current ceph monitor to the new version + """Upgrade the current Ceph monitor to the new version :param new_version: String version to upgrade to. """ @@ -2212,18 +2226,19 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): kick_function = noop current_version = get_version() status_set("maintenance", "Upgrading monitor") - log("Current ceph version is {}".format(current_version)) + log("Current Ceph version is {}".format(current_version)) log("Upgrading to: {}".format(new_version)) # Needed to determine if whether to stop/start ceph-mgr luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0 - + # Needed to differentiate between systemd unit names + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0 kick_function() try: add_source(config('source'), config('key')) apt_update(fatal=True) except subprocess.CalledProcessError as err: - log("Adding the ceph source failed with message: {}".format( + log("Adding the Ceph source failed with message: {}".format( err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2246,7 +2261,11 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): try: if systemd(): - service_stop('ceph-mon') + if nautilus_or_later: + systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) + else: + systemd_unit = 'ceph-mon' + service_stop(systemd_unit) log("restarting ceph-mgr.target maybe: {}" .format(luminous_or_later)) if luminous_or_later: @@ -2277,7 +2296,11 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): perms=0o755) if systemd(): - service_restart('ceph-mon') + if nautilus_or_later: + systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) + else: + systemd_unit = 'ceph-mon' + service_restart(systemd_unit) log("starting ceph-mgr.target maybe: {}".format(luminous_or_later)) if luminous_or_later: # due to BUG: #1849874 we have to force a restart to get it to @@ -2294,7 +2317,7 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): def lock_and_roll(upgrade_key, service, my_name, version): - """Create a lock on the ceph monitor cluster and upgrade. + """Create a lock on the Ceph monitor cluster and upgrade. :param upgrade_key: str. The cephx key to use :param service: str. The cephx id to use @@ -2443,7 +2466,7 @@ class WatchDog(object): allow for other delays. There is a compatibility mode where if the otherside never kicks, then it - simply waits for the compatability timer. + simply waits for the compatibility timer. """ class WatchDogDeadException(Exception): @@ -2578,11 +2601,11 @@ def timed_wait(kicked_at_function, def get_upgrade_position(osd_sorted_list, match_name): - """Return the upgrade position for the given osd. + """Return the upgrade position for the given OSD. - :param osd_sorted_list: Osds sorted + :param osd_sorted_list: OSDs sorted :type osd_sorted_list: [str] - :param match_name: The osd name to match + :param match_name: The OSD name to match :type match_name: str :returns: The position of the name :rtype: int @@ -2591,20 +2614,20 @@ def get_upgrade_position(osd_sorted_list, match_name): for index, item in enumerate(osd_sorted_list): if item.name == match_name: return index - raise ValueError("osd name '{}' not found in get_upgrade_position list" + raise ValueError("OSD name '{}' not found in get_upgrade_position list" .format(match_name)) # Edge cases: # 1. Previous node dies on upgrade, can we retry? -# 2. This assumes that the osd failure domain is not set to osd. +# 2. This assumes that the OSD failure domain is not set to OSD. # It rolls an entire server at a time. def roll_osd_cluster(new_version, upgrade_key): """This is tricky to get right so here's what we're going to do. There's 2 possible cases: Either I'm first in line or not. If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous osd is upgraded yet. + and test to see if the previous OSD is upgraded yet. TODO: If you're not in the same failure domain it's safe to upgrade 1. Examine all pools and adopt the most strict failure domain policy @@ -2620,7 +2643,7 @@ def roll_osd_cluster(new_version, upgrade_key): log('roll_osd_cluster called with {}'.format(new_version)) my_name = socket.gethostname() osd_tree = get_osd_tree(service=upgrade_key) - # A sorted list of osd unit names + # A sorted list of OSD unit names osd_sorted_list = sorted(osd_tree) log("osd_sorted_list: {}".format(osd_sorted_list)) @@ -2655,7 +2678,7 @@ def roll_osd_cluster(new_version, upgrade_key): def upgrade_osd(new_version, kick_function=None): - """Upgrades the current osd + """Upgrades the current OSD :param new_version: str. The new version to upgrade to """ @@ -2663,15 +2686,15 @@ def upgrade_osd(new_version, kick_function=None): kick_function = noop current_version = get_version() - status_set("maintenance", "Upgrading osd") - log("Current ceph version is {}".format(current_version)) + status_set("maintenance", "Upgrading OSD") + log("Current Ceph version is {}".format(current_version)) log("Upgrading to: {}".format(new_version)) try: add_source(config('source'), config('key')) apt_update(fatal=True) except subprocess.CalledProcessError as err: - log("Adding the ceph sources failed with message: {}".format( + log("Adding the Ceph sources failed with message: {}".format( err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2685,7 +2708,7 @@ def upgrade_osd(new_version, kick_function=None): kick_function() # If the upgrade does not need an ownership update of any of the - # directories in the osd service directory, then simply restart + # directories in the OSD service directory, then simply restart # all of the OSDs at the same time as this will be the fastest # way to update the code on the node. if not dirs_need_ownership_update('osd'): @@ -2700,7 +2723,7 @@ def upgrade_osd(new_version, kick_function=None): # Need to change the ownership of all directories which are not OSD # directories as well. # TODO - this should probably be moved to the general upgrade function - # and done before mon/osd. + # and done before mon/OSD. update_owner(CEPH_BASE_DIR, recurse_dirs=False) non_osd_dirs = filter(lambda x: not x == 'osd', os.listdir(CEPH_BASE_DIR)) @@ -2721,12 +2744,12 @@ def upgrade_osd(new_version, kick_function=None): _upgrade_single_osd(osd_num, osd_dir) except ValueError as ex: # Directory could not be parsed - junk directory? - log('Could not parse osd directory %s: %s' % (osd_dir, ex), + log('Could not parse OSD directory %s: %s' % (osd_dir, ex), WARNING) continue except (subprocess.CalledProcessError, IOError) as err: - log("Stopping ceph and upgrading packages failed " + log("Stopping Ceph and upgrading packages failed " "with message: {}".format(err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2753,7 +2776,7 @@ def _upgrade_single_osd(osd_num, osd_dir): def stop_osd(osd_num): """Stops the specified OSD number. - :param osd_num: the osd number to stop + :param osd_num: the OSD number to stop """ if systemd(): service_stop('ceph-osd@{}'.format(osd_num)) @@ -2764,7 +2787,7 @@ def stop_osd(osd_num): def start_osd(osd_num): """Starts the specified OSD number. - :param osd_num: the osd number to start. + :param osd_num: the OSD number to start. """ if systemd(): service_start('ceph-osd@{}'.format(osd_num)) @@ -2775,12 +2798,12 @@ def start_osd(osd_num): def disable_osd(osd_num): """Disables the specified OSD number. - Ensures that the specified osd will not be automatically started at the + Ensures that the specified OSD will not be automatically started at the next reboot of the system. Due to differences between init systems, - this method cannot make any guarantees that the specified osd cannot be + this method cannot make any guarantees that the specified OSD cannot be started manually. - :param osd_num: the osd id which should be disabled. + :param osd_num: the OSD id which should be disabled. :raises CalledProcessError: if an error occurs invoking the systemd cmd to disable the OSD :raises IOError, OSError: if the attempt to read/remove the ready file in @@ -2820,7 +2843,7 @@ def enable_osd(osd_num): :param osd_num: the osd id which should be enabled. :raises CalledProcessError: if the call to the systemd command issued fails when enabling the service - :raises IOError: if the attempt to write the ready file in an usptart + :raises IOError: if the attempt to write the ready file in an upstart enabled system fails """ if systemd(): @@ -2828,7 +2851,7 @@ def enable_osd(osd_num): subprocess.check_call(cmd) else: # When running on upstart, the OSDs are started via the ceph-osd-all - # upstart script which will only start the osd if it has a 'ready' + # upstart script which will only start the OSD if it has a 'ready' # file. Make sure that file exists. ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), 'ready') @@ -2881,7 +2904,7 @@ def get_osd_state(osd_num, osd_goal_state=None): If osd_goal_state is not None, loop until the current OSD state matches the OSD goal state. - :param osd_num: the osd id to get state for + :param osd_num: the OSD id to get state for :param osd_goal_state: (Optional) string indicating state to wait for Defaults to None :returns: Returns a str, the OSD state. @@ -2942,7 +2965,7 @@ def maintain_osd_state(osd_num): Ensures the state of an OSD is the same at the end of a block nested in a with statement as it was at the beginning of the block. - :param osd_num: the osd id to maintain state for + :param osd_num: the OSD id to maintain state for """ osd_state = get_osd_state(osd_num) try: @@ -2969,9 +2992,9 @@ def maintain_all_osd_states(): def list_pools(client='admin'): """This will list the current pools that Ceph has - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Returns a list of available pools. :rtype: list :raises: subprocess.CalledProcessError if the subprocess fails to run. @@ -2996,9 +3019,9 @@ def get_pool_param(pool, param, client='admin'): :type pool: str :param param: Name of variable to get :type param: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Value of variable on pool or None :rtype: str or None :raises: subprocess.CalledProcessError @@ -3020,9 +3043,9 @@ def get_pool_erasure_profile(pool, client='admin'): :param pool: Name of pool to get variable from :type pool: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Erasure code profile of pool or None :rtype: str or None :raises: subprocess.CalledProcessError @@ -3041,9 +3064,9 @@ def get_pool_quota(pool, client='admin'): :param pool: Name of pool to get variable from :type pool: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Dictionary with quota variables :rtype: dict :raises: subprocess.CalledProcessError @@ -3066,9 +3089,9 @@ def get_pool_applications(pool='', client='admin'): :param pool: (Optional) Name of pool to get applications for Defaults to get for all pools :type pool: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Dictionary with pool name as key :rtype: dict :raises: subprocess.CalledProcessError @@ -3131,7 +3154,7 @@ def dirs_need_ownership_update(service): necessary due to the upgrade from Hammer to Jewel where the daemon user changes from root: to ceph:. - :param service: the name of the service folder to check (e.g. osd, mon) + :param service: the name of the service folder to check (e.g. OSD, mon) :returns: boolean. True if the directories need a change of ownership, False otherwise. :raises IOError: if an error occurs reading the file stats from one of @@ -3161,7 +3184,7 @@ def dirs_need_ownership_update(service): return False -# A dict of valid ceph upgrade paths. Mapping is old -> new +# A dict of valid Ceph upgrade paths. Mapping is old -> new UPGRADE_PATHS = collections.OrderedDict([ ('firefly', 'hammer'), ('hammer', 'jewel'), @@ -3173,7 +3196,7 @@ def dirs_need_ownership_update(service): ('pacific', 'quincy'), ]) -# Map UCA codenames to ceph codenames +# Map UCA codenames to Ceph codenames UCA_CODENAME_MAP = { 'icehouse': 'firefly', 'juno': 'firefly', @@ -3196,24 +3219,24 @@ def dirs_need_ownership_update(service): def pretty_print_upgrade_paths(): - """Pretty print supported upgrade paths for ceph""" + """Pretty print supported upgrade paths for Ceph""" return ["{} -> {}".format(key, value) for key, value in UPGRADE_PATHS.items()] def resolve_ceph_version(source): - """Resolves a version of ceph based on source configuration + """Resolves a version of Ceph based on source configuration based on Ubuntu Cloud Archive pockets. @param: source: source configuration option of charm - :returns: ceph release codename or None if not resolvable + :returns: Ceph release codename or None if not resolvable """ os_release = get_os_codename_install_source(source) return UCA_CODENAME_MAP.get(os_release) def get_ceph_pg_stat(): - """Returns the result of ceph pg stat. + """Returns the result of 'ceph pg stat'. :returns: dict """ @@ -3248,7 +3271,7 @@ def get_ceph_health(): .decode('UTF-8')) try: json_tree = json.loads(tree) - # Make sure children are present in the json + # Make sure children are present in the JSON if not json_tree['overall_status']: return None @@ -3265,7 +3288,7 @@ def get_ceph_health(): def reweight_osd(osd_num, new_weight): """Changes the crush weight of an OSD to the value specified. - :param osd_num: the osd id which should be changed + :param osd_num: the OSD id which should be changed :param new_weight: the new weight for the OSD :returns: bool. True if output looks right, else false. :raises CalledProcessError: if an error occurs invoking the systemd cmd @@ -3292,7 +3315,7 @@ def reweight_osd(osd_num, new_weight): def determine_packages(): """Determines packages for installation. - :returns: list of ceph packages + :returns: list of Ceph packages """ packages = PACKAGES.copy() if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': @@ -3338,6 +3361,16 @@ def bootstrap_manager(): service_restart(unit) +def enable_msgr2(): + """ + Enables msgr2 + + :raises: subprocess.CalledProcessError if the command fails + """ + cmd = ['ceph', 'mon', 'enable-msgr2'] + subprocess.check_call(cmd) + + def osd_noout(enable): """Sets or unsets 'noout' @@ -3361,12 +3394,12 @@ def osd_noout(enable): class OSDConfigSetError(Exception): - """Error occured applying OSD settings.""" + """Error occurred applying OSD settings.""" pass def apply_osd_settings(settings): - """Applies the provided osd settings + """Applies the provided OSD settings Apply the provided settings to all local OSD unless settings are already present. Settings stop being applied on encountering an error. @@ -3391,7 +3424,7 @@ def _get_cli_key(key): out = json.loads( subprocess.check_output(cmd.split()).decode('UTF-8')) if 'error' in out: - log("Error retrieving osd setting: {}".format(out['error']), + log("Error retrieving OSD setting: {}".format(out['error']), level=ERROR) return False current_settings[key] = out[cli_key] @@ -3408,7 +3441,7 @@ def _get_cli_key(key): out = json.loads( subprocess.check_output(cmd.split()).decode('UTF-8')) if 'error' in out: - log("Error applying osd setting: {}".format(out['error']), + log("Error applying OSD setting: {}".format(out['error']), level=ERROR) raise OSDConfigSetError return True @@ -3478,7 +3511,7 @@ def mgr_disable_module(module): def ceph_config_set(name, value, who): - """Set a ceph config option + """Set a Ceph config option :param name: key to set :type name: str @@ -3496,7 +3529,7 @@ def ceph_config_set(name, value, who): def ceph_config_get(name, who): - """Retrieve the value of a ceph config option + """Retrieve the value of a Ceph config option :param name: key to lookup :type name: str diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index d7dd0827..cabe88dd 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -14,7 +14,7 @@ tags: - misc series: - focal -- impish +- jammy extra-bindings: public: admin: diff --git a/ceph-radosgw/osci.yaml b/ceph-radosgw/osci.yaml index 55a22c68..64c9d049 100644 --- a/ceph-radosgw/osci.yaml +++ b/ceph-radosgw/osci.yaml @@ -10,10 +10,6 @@ voting: false - vault-focal-yoga-namespaced: voting: false - - vault-impish-xena_rgw: - voting: false - - vault-impish-xena-namespaced: - voting: false - vault-jammy-yoga_rgw: voting: false - vault-jammy-yoga-namespaced: @@ -58,22 +54,6 @@ - vault-focal-xena-namespaced vars: tox_extra_args: vault:jammy-yoga-namespaced -- job: - name: vault-impish-xena_rgw - parent: func-target - dependencies: - - vault-focal-xena_rgw - - vault-focal-xena-namespaced - vars: - tox_extra_args: vault:impish-xena -- job: - name: vault-impish-xena-namespaced - parent: func-target - dependencies: - - vault-focal-xena_rgw - - vault-focal-xena-namespaced - vars: - tox_extra_args: vault:impish-xena-namespaced - job: name: vault-focal-yoga_rgw parent: func-target diff --git a/ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml b/ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml deleted file mode 100644 index 8e94f9a0..00000000 --- a/ceph-radosgw/tests/bundles/impish-xena-namespaced.yaml +++ /dev/null @@ -1,124 +0,0 @@ -options: - source: &source distro - -series: impish - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - namespace-tenants: True - to: - - '3' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - channel: latest/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - channel: latest/edge - - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - vault: - charm: ch:vault - num_units: 1 - to: - - '11' - channel: latest/edge - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/impish-xena.yaml b/ceph-radosgw/tests/bundles/impish-xena.yaml deleted file mode 100644 index e26477e8..00000000 --- a/ceph-radosgw/tests/bundles/impish-xena.yaml +++ /dev/null @@ -1,123 +0,0 @@ -options: - source: &source distro - -series: impish - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - to: - - '3' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - channel: latest/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - channel: latest/edge - - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - vault: - charm: ch:vault - num_units: 1 - to: - - '11' - channel: latest/edge - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 06d627b5..7f797427 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -3,8 +3,6 @@ charm_name: ceph-radosgw gate_bundles: - vault: focal-xena - vault: focal-xena-namespaced - - vault: impish-xena - - vault: impish-xena-namespaced smoke_bundles: - vault: focal-xena @@ -34,7 +32,5 @@ tests: tests_options: force_deploy: - - impish-xena - - impish-xena-namespaced - jammy-yoga - jammy-yoga-namespaced diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 81fd2492..acbcb1f1 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -76,6 +76,11 @@ basepython = python3.9 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py310] +basepython = python3.10 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt From 343249f53a9d43f58146e24c81b9183a2d752d17 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 7 Apr 2022 09:07:37 +0200 Subject: [PATCH 2377/2699] Updates for jammy enablement - charmcraft: build-on 20.04 -> run-on 20.04/22.04 [*archs] - Refresh tox targets - Drop impish bundles and OSCI testing - Add jammy metadata Change-Id: Ic16efea3f834c2e085c51caa302aa379e3133a7e --- ceph-dashboard/.zuul.yaml | 2 +- ceph-dashboard/charmcraft.yaml | 7 +- ceph-dashboard/metadata.yaml | 3 - ceph-dashboard/tests/bundles/jammy.yaml | 115 ++++++++++++++++++++++++ ceph-dashboard/tests/tests.yaml | 2 + ceph-dashboard/tox.ini | 5 ++ 6 files changed, 126 insertions(+), 8 deletions(-) create mode 100644 ceph-dashboard/tests/bundles/jammy.yaml diff --git a/ceph-dashboard/.zuul.yaml b/ceph-dashboard/.zuul.yaml index 0eed1965..7ffc71cb 100644 --- a/ceph-dashboard/.zuul.yaml +++ b/ceph-dashboard/.zuul.yaml @@ -1,4 +1,4 @@ - project: templates: - - openstack-python3-ussuri-jobs + - openstack-python3-charm-yoga-jobs - openstack-cover-jobs diff --git a/ceph-dashboard/charmcraft.yaml b/ceph-dashboard/charmcraft.yaml index ff59b025..a77199e1 100644 --- a/ceph-dashboard/charmcraft.yaml +++ b/ceph-dashboard/charmcraft.yaml @@ -25,11 +25,10 @@ bases: channel: "20.04" architectures: - amd64 - - s390x - - ppc64el - - arm64 run-on: - name: ubuntu channel: "20.04" + architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "21.10" + channel: "22.04" + architectures: [amd64, s390x, ppc64el, arm64] \ No newline at end of file diff --git a/ceph-dashboard/metadata.yaml b/ceph-dashboard/metadata.yaml index a35e9cea..8e6db9f8 100644 --- a/ceph-dashboard/metadata.yaml +++ b/ceph-dashboard/metadata.yaml @@ -14,10 +14,7 @@ extra-bindings: public: subordinate: true series: -- bionic - focal -- groovy -- hirsute - jammy requires: dashboard: diff --git a/ceph-dashboard/tests/bundles/jammy.yaml b/ceph-dashboard/tests/bundles/jammy.yaml new file mode 100644 index 00000000..fb195ed4 --- /dev/null +++ b/ceph-dashboard/tests/bundles/jammy.yaml @@ -0,0 +1,115 @@ +local_overlay_enabled: False +series: jammy +applications: + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + channel: latest/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + channel: latest/edge + vault: + num_units: 1 + charm: ch:vault + channel: latest/edge + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + constraints: mem=3072M + num_units: 3 + channel: latest/edge + vault-mysql-router: + charm: ch:mysql-router + channel: latest/edge + ceph-dashboard: + charm: ../../ceph-dashboard.charm + options: + public-hostname: 'ceph-dashboard.zaza.local' + prometheus: + charm: cs:prometheus2 + num_units: 1 + grafana: + # SSL and allow_embedding are not released into cs:grafana yet, due + # Octrober 2021 + charm: ch:grafana + num_units: 1 + options: + anonymous: True + install_plugins: https://storage.googleapis.com/plugins-community/vonage-status-panel/release/1.0.11/vonage-status-panel-1.0.11.zip,https://storage.googleapis.com/plugins-community/grafana-piechart-panel/release/1.6.2/grafana-piechart-panel-1.6.2.zip + install_method: snap + allow_embedding: True + telegraf: + charm: telegraf + channel: stable + options: + hostname: "{host}" + prometheus-alertmanager: + charm: cs:prometheus-alertmanager + num_units: 1 + ceph-radosgw: + charm: ch:ceph-radosgw + num_units: 3 + channel: latest/edge + ceph-fs: + charm: ch:ceph-fs + num_units: 1 + channel: latest/edge + ceph-iscsi: + charm: ch:ceph-iscsi + num_units: 2 + options: + gateway-metadata-pool: iscsi-foo-metadata + channel: latest/edge +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + - - 'ceph-dashboard:dashboard' + - 'ceph-mon:dashboard' + - - 'ceph-dashboard:certificates' + - 'vault:certificates' + - - 'ceph-mon:prometheus' + - 'prometheus:target' + - - 'grafana:grafana-source' + - 'prometheus:grafana-source' + - - 'grafana:certificates' + - 'vault:certificates' + - - 'ceph-osd:juju-info' + - 'telegraf:juju-info' + - - 'ceph-mon:juju-info' + - 'telegraf:juju-info' + - - 'telegraf:prometheus-client' + - 'prometheus:target' + - - 'telegraf:dashboards' + - 'grafana:dashboards' + - - 'ceph-dashboard:grafana-dashboard' + - 'grafana:dashboards' + - - 'ceph-dashboard:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-dashboard:prometheus' + - 'prometheus:website' + - - 'prometheus:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + - - 'ceph-radosgw:certificates' + - 'vault:certificates' + - - 'ceph-dashboard:radosgw-dashboard' + - 'ceph-radosgw:radosgw-user' + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-dashboard:iscsi-dashboard' + - 'ceph-iscsi:admin-access' diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml index 163d0e94..415df2cf 100644 --- a/ceph-dashboard/tests/tests.yaml +++ b/ceph-dashboard/tests/tests.yaml @@ -3,6 +3,8 @@ gate_bundles: - focal smoke_bundles: - focal +dev_bundles: + - jammy configure: - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation - zaza.openstack.charm_tests.ceph.dashboard.setup.check_dashboard_cert diff --git a/ceph-dashboard/tox.ini b/ceph-dashboard/tox.ini index 5926de88..cb9fdfba 100644 --- a/ceph-dashboard/tox.ini +++ b/ceph-dashboard/tox.ini @@ -68,6 +68,11 @@ basepython = python3.9 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py310] +basepython = python3.10 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt From bf00017b15ec9ce8b0627bd1388fef3d87267623 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Wed, 26 Jan 2022 12:32:05 +0000 Subject: [PATCH 2378/2699] Update to build using charmcraft Due to a build problem with the reactive plugin, this change falls back on overriding the steps and doing a manual build, but it also ensures the CI system builds the charm using charmcraft. Changes: - add a build-requirements.txt - modify charmcraft.yaml - modify osci.yaml -> indicate build with charmcraft - modify tox.ini -> tox -e build does charmcraft build/rename -> tox -e build-reactive does the reactive build - modify bundles to use the .charm artifact in tests. and fix deprecation warning re: prefix - tox inception to enable tox -e func-test in the CI This change also switches away from directory backed OSD devices in the test bundles, as they are not supported anymore. Change-Id: I57d1b47afbbeef211bb777fdbd0b4a091a021c19 Co-authored-by: Aurelien Lourot --- ceph-rbd-mirror/build-requirements.txt | 7 + ceph-rbd-mirror/charmcraft.yaml | 25 ++ ceph-rbd-mirror/metadata.yaml | 1 + ceph-rbd-mirror/osci.yaml | 74 +----- ceph-rbd-mirror/rename.sh | 13 + ceph-rbd-mirror/src/metadata.yaml | 3 - ceph-rbd-mirror/src/test-requirements.txt | 3 + .../tests/bundles/bionic-queens-e2e-lxd.yaml | 207 ---------------- .../src/tests/bundles/bionic-queens-e2e.yaml | 207 ---------------- .../src/tests/bundles/bionic-queens.yaml | 111 --------- .../tests/bundles/bionic-rocky-site-a.yaml | 86 ------- .../tests/bundles/bionic-rocky-site-b.yaml | 27 -- .../src/tests/bundles/bionic-rocky.yaml | 114 --------- .../src/tests/bundles/bionic-stein.yaml | 114 --------- .../bundles/bionic-train-image-mirroring.yaml | 120 --------- .../src/tests/bundles/bionic-train.yaml | 116 --------- .../bionic-ussuri-image-mirroring.yaml | 120 --------- .../src/tests/bundles/bionic-ussuri.yaml | 116 --------- .../bundles/focal-ussuri-image-mirroring.yaml | 233 ------------------ .../src/tests/bundles/focal-ussuri.yaml | 228 ----------------- .../focal-victoria-image-mirroring.yaml | 171 ------------- .../src/tests/bundles/focal-victoria.yaml | 167 ------------- .../focal-wallaby-image-mirroring.yaml | 171 ------------- .../src/tests/bundles/focal-wallaby.yaml | 167 ------------- .../src/tests/bundles/focal-xena.yaml | 56 +++-- .../src/tests/bundles/focal-yoga.yaml | 56 +++-- .../hirsute-wallaby-image-mirroring.yaml | 171 ------------- .../src/tests/bundles/hirsute-wallaby.yaml | 166 ------------- .../src/tests/bundles/impish-xena.yaml | 56 +++-- .../src/tests/bundles/jammy-yoga.yaml | 56 +++-- ceph-rbd-mirror/src/tests/tests.yaml | 23 +- ceph-rbd-mirror/test-requirements.txt | 6 + ceph-rbd-mirror/tox.ini | 25 ++ 33 files changed, 241 insertions(+), 2975 deletions(-) create mode 100644 ceph-rbd-mirror/build-requirements.txt create mode 100644 ceph-rbd-mirror/charmcraft.yaml create mode 120000 ceph-rbd-mirror/metadata.yaml create mode 100755 ceph-rbd-mirror/rename.sh delete mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e-lxd.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-b.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-train-image-mirroring.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-ussuri-image-mirroring.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/focal-ussuri-image-mirroring.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/focal-ussuri.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/focal-victoria-image-mirroring.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/focal-wallaby-image-mirroring.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/focal-wallaby.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby-image-mirroring.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby.yaml diff --git a/ceph-rbd-mirror/build-requirements.txt b/ceph-rbd-mirror/build-requirements.txt new file mode 100644 index 00000000..b6d2452f --- /dev/null +++ b/ceph-rbd-mirror/build-requirements.txt @@ -0,0 +1,7 @@ +# NOTES(lourot): +# * We don't install charmcraft via pip anymore because it anyway spins up a +# container and scp the system's charmcraft snap inside it. So the charmcraft +# snap is necessary on the system anyway. +# * `tox -e build` successfully validated with charmcraft 1.2.1 + +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. diff --git a/ceph-rbd-mirror/charmcraft.yaml b/ceph-rbd-mirror/charmcraft.yaml new file mode 100644 index 00000000..49682169 --- /dev/null +++ b/ceph-rbd-mirror/charmcraft.yaml @@ -0,0 +1,25 @@ +type: charm + +parts: + charm: + build-packages: + - tox + - git + - python3-dev + override-build: | + apt-get install ca-certificates -y + tox -e build-reactive + override-stage: | + echo "Copying charm to staging area: $CHARMCRAFT_STAGE" + NAME=$(ls $CHARMCRAFT_PART_BUILD/build/builds) + cp -r $CHARMCRAFT_PART_BUILD/build/builds/$NAME/* $CHARMCRAFT_STAGE/ + override-prime: | + # For some reason, the normal priming chokes on the fact that there's a + # hooks directory. + cp -r $CHARMCRAFT_STAGE/* . + +bases: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 diff --git a/ceph-rbd-mirror/metadata.yaml b/ceph-rbd-mirror/metadata.yaml new file mode 120000 index 00000000..07686838 --- /dev/null +++ b/ceph-rbd-mirror/metadata.yaml @@ -0,0 +1 @@ +src/metadata.yaml \ No newline at end of file diff --git a/ceph-rbd-mirror/osci.yaml b/ceph-rbd-mirror/osci.yaml index 12d7f88c..4f039701 100644 --- a/ceph-rbd-mirror/osci.yaml +++ b/ceph-rbd-mirror/osci.yaml @@ -1,77 +1,17 @@ - project: templates: - - charm-yoga-unit-jobs + - charm-unit-jobs-py38 + - charm-unit-jobs-py39 check: jobs: - - bionic-train_ceph-rbd-mirror - - bionic-stein_ceph-rbd-mirror - - bionic-queens_ceph-rbd-mirror - - bionic-ussuri_ceph-rbd-mirror - - focal-ussuri_ceph-rbd-mirror - - focal-victoria_ceph-rbd-mirror - - focal-wallaby_ceph-rbd-mirror - - focal-xena_ceph-rbd-mirror - - focal-yoga_ceph-rbd-mirror: + - focal-xena + - focal-yoga: voting: false - - hirsute-wallaby_ceph-rbd-mirror - - impish-xena_ceph-rbd-mirror: + - impish-xena: voting: false - - jammy-yoga_ceph-rbd-mirror: + - jammy-yoga: voting: false vars: needs_charm_build: true charm_build_name: ceph-rbd-mirror - -- job: - name: bionic-train_ceph-rbd-mirror - parent: bionic-train - dependencies: - - osci-lint - - tox-py36 - - tox-py38 - - tox-py39 -- job: - name: bionic-queens_ceph-rbd-mirror - parent: bionic-queens - dependencies: &smoke-jobs - - bionic-train_ceph-rbd-mirror -- job: - name: bionic-stein_ceph-rbd-mirror - parent: bionic-stein - dependencies: *smoke-jobs -- job: - name: bionic-ussuri_ceph-rbd-mirror - parent: bionic-stein - dependencies: *smoke-jobs -- job: - name: focal-ussuri_ceph-rbd-mirror - parent: bionic-stein - dependencies: *smoke-jobs -- job: - name: focal-victoria_ceph-rbd-mirror - parent: bionic-stein - dependencies: *smoke-jobs -- job: - name: focal-wallaby_ceph-rbd-mirror - parent: bionic-stein - dependencies: *smoke-jobs -- job: - name: focal-xena_ceph-rbd-mirror - parent: bionic-stein - dependencies: *smoke-jobs -- job: - name: focal-yoga_ceph-rbd-mirror - parent: bionic-stein - dependencies: *smoke-jobs -- job: - name: hirsute-wallaby_ceph-rbd-mirror - parent: bionic-stein - dependencies: *smoke-jobs -- job: - name: impish-xena_ceph-rbd-mirror - parent: bionic-stein - dependencies: *smoke-jobs -- job: - name: jammy-yoga_ceph-rbd-mirror - parent: bionic-stein - dependencies: *smoke-jobs + build_type: charmcraft diff --git a/ceph-rbd-mirror/rename.sh b/ceph-rbd-mirror/rename.sh new file mode 100755 index 00000000..d0c35c97 --- /dev/null +++ b/ceph-rbd-mirror/rename.sh @@ -0,0 +1,13 @@ +#!/bin/bash +charm=$(grep "charm_build_name" osci.yaml | awk '{print $2}') +echo "renaming ${charm}_*.charm to ${charm}.charm" +echo -n "pwd: " +pwd +ls -al +echo "Removing bad downloaded charm maybe?" +if [[ -e "${charm}.charm" ]]; +then + rm "${charm}.charm" +fi +echo "Renaming charm here." +mv ${charm}_*.charm ${charm}.charm diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index f141caf0..a07cf96b 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -15,10 +15,7 @@ tags: - file-servers - misc series: -- bionic - focal -- groovy -- hirsute - impish extra-bindings: public: diff --git a/ceph-rbd-mirror/src/test-requirements.txt b/ceph-rbd-mirror/src/test-requirements.txt index e7710236..9c7afb7f 100644 --- a/ceph-rbd-mirror/src/test-requirements.txt +++ b/ceph-rbd-mirror/src/test-requirements.txt @@ -4,6 +4,9 @@ # https://github.com/openstack-charmers/release-tools # +# Need tox to be available from tox... inception yes, but its a workaround for now +tox + # Functional Test Requirements (let Zaza's dependencies solve all dependencies here!) git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e-lxd.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e-lxd.yaml deleted file mode 100644 index 40b8daba..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e-lxd.yaml +++ /dev/null @@ -1,207 +0,0 @@ -series: bionic -machines: - '0': - constraints: mem=16G - series: bionic - '1': - constraints: mem=16G - series: bionic - '2': - constraints: mem=16G - series: bionic - '3': - constraints: mem=16G - series: bionic - '4': - constraints: mem=16G - series: bionic - '5': - constraints: mem=16G - series: bionic - '6': - constraints: mem=16G - series: bionic - '7': - constraints: mem=16G - series: bionic -applications: - mysql: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - bindings: - '': libvirt-maas - to: - - lxd:0 - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - bindings: - '': libvirt-maas - to: - - lxd:1 - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - bindings: - '': libvirt-maas - to: - - lxd:2 - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - bindings: - '': libvirt-maas - to: - - lxd:0 - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - bindings: - '': libvirt-maas - to: - - lxd:1 - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - num_units: 0 - nova-cloud-controller: - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - bindings: - '': libvirt-maas - to: - - lxd:2 - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - to: - - 0 - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: distro - bindings: - public: libvirt-maas - cluster: libvirt-default - to: - - lxd:0 - - lxd:1 - - lxd:2 - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: distro - osd-devices: /dev/vdb - bindings: - public: libvirt-maas - cluster: libvirt-default - to: - - 0 - - 1 - - 2 - ceph-rbd-mirror: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: distro - bindings: - public: libvirt-maas - cluster: libvirt-default - to: - - 3 - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: distro - bindings: - public: libvirt-maas - cluster: libvirt-default - to: - - lxd:4 - - lxd:5 - - lxd:6 - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: distro - osd-devices: /dev/vdb - bindings: - public: libvirt-maas - cluster: libvirt-default - to: - - 4 - - 5 - - 6 - ceph-rbd-mirror-b: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: distro - bindings: - public: libvirt-maas - cluster: libvirt-default - to: - - 7 -relations: -- - mysql - - keystone -- - mysql - - cinder -- - rabbitmq-server - - cinder -- - keystone - - cinder -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote -- - mysql:shared-db - - nova-cloud-controller:shared-db -- - keystone:identity-service - - nova-cloud-controller:identity-service -- - rabbitmq-server:amqp - - nova-cloud-controller:amqp -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:cloud-compute - - nova-cloud-controller:cloud-compute -- - glance:identity-service - - keystone:identity-service -- - glance:shared-db - - mysql:shared-db -- - glance:amqp - - rabbitmq-server:amqp -- - glance:image-service - - nova-compute:image-service -- - neutron-openvswitch:neutron-plugin - - nova-compute:neutron-plugin -- - neutron-openvswitch:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:image-service - - glance:image-service diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml deleted file mode 100644 index 40b8daba..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-queens-e2e.yaml +++ /dev/null @@ -1,207 +0,0 @@ -series: bionic -machines: - '0': - constraints: mem=16G - series: bionic - '1': - constraints: mem=16G - series: bionic - '2': - constraints: mem=16G - series: bionic - '3': - constraints: mem=16G - series: bionic - '4': - constraints: mem=16G - series: bionic - '5': - constraints: mem=16G - series: bionic - '6': - constraints: mem=16G - series: bionic - '7': - constraints: mem=16G - series: bionic -applications: - mysql: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - bindings: - '': libvirt-maas - to: - - lxd:0 - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - bindings: - '': libvirt-maas - to: - - lxd:1 - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - bindings: - '': libvirt-maas - to: - - lxd:2 - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - bindings: - '': libvirt-maas - to: - - lxd:0 - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - bindings: - '': libvirt-maas - to: - - lxd:1 - neutron-openvswitch: - charm: cs:~openstack-charmers-next/neutron-openvswitch - num_units: 0 - nova-cloud-controller: - charm: cs:~openstack-charmers-next/nova-cloud-controller - num_units: 1 - bindings: - '': libvirt-maas - to: - - lxd:2 - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - to: - - 0 - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: distro - bindings: - public: libvirt-maas - cluster: libvirt-default - to: - - lxd:0 - - lxd:1 - - lxd:2 - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: distro - osd-devices: /dev/vdb - bindings: - public: libvirt-maas - cluster: libvirt-default - to: - - 0 - - 1 - - 2 - ceph-rbd-mirror: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: distro - bindings: - public: libvirt-maas - cluster: libvirt-default - to: - - 3 - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: distro - bindings: - public: libvirt-maas - cluster: libvirt-default - to: - - lxd:4 - - lxd:5 - - lxd:6 - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: distro - osd-devices: /dev/vdb - bindings: - public: libvirt-maas - cluster: libvirt-default - to: - - 4 - - 5 - - 6 - ceph-rbd-mirror-b: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: distro - bindings: - public: libvirt-maas - cluster: libvirt-default - to: - - 7 -relations: -- - mysql - - keystone -- - mysql - - cinder -- - rabbitmq-server - - cinder -- - keystone - - cinder -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote -- - mysql:shared-db - - nova-cloud-controller:shared-db -- - keystone:identity-service - - nova-cloud-controller:identity-service -- - rabbitmq-server:amqp - - nova-cloud-controller:amqp -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp -- - nova-compute:cloud-compute - - nova-cloud-controller:cloud-compute -- - glance:identity-service - - keystone:identity-service -- - glance:shared-db - - mysql:shared-db -- - glance:amqp - - rabbitmq-server:amqp -- - glance:image-service - - nova-compute:image-service -- - neutron-openvswitch:neutron-plugin - - nova-compute:neutron-plugin -- - neutron-openvswitch:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:image-service - - glance:image-service diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml deleted file mode 100644 index b9ffc25b..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-queens.yaml +++ /dev/null @@ -1,111 +0,0 @@ -series: bionic -applications: - mysql: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - source: distro - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: distro - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: distro - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: distro - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: distro - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: distro - bluestore: False - use-direct-io: False - osd-devices: /opt - ceph-rbd-mirror: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: distro - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: distro - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: distro - bluestore: False - use-direct-io: False - osd-devices: /opt - ceph-rbd-mirror-b: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: distro -relations: -- - mysql - - keystone -- - mysql - - cinder -- - mysql - - glance -- - rabbitmq-server - - cinder -- - keystone - - cinder -- - keystone - - glance -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml deleted file mode 100644 index ab033ec5..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-a.yaml +++ /dev/null @@ -1,86 +0,0 @@ -series: bionic -applications: - mysql: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - source: cloud:bionic-rocky - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-rocky - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: cloud:bionic-rocky - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-rocky - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: cloud:bionic-rocky - bluestore: False - use-direct-io: False - osd-devices: /opt - ceph-rbd-mirror: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: cloud:bionic-rocky -relations: -- - mysql - - keystone -- - mysql - - cinder -- - mysql - - glance -- - rabbitmq-server - - cinder -- - keystone - - cinder -- - keystone - - glance -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-b.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-b.yaml deleted file mode 100644 index 2f377961..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky-site-b.yaml +++ /dev/null @@ -1,27 +0,0 @@ -series: bionic -applications: - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-rocky - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: cloud:bionic-rocky - bluestore: False - use-direct-io: False - osd-devices: /opt - ceph-rbd-mirror: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: cloud:bionic-rocky -relations: -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml deleted file mode 100644 index 2c6ba1d2..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-rocky.yaml +++ /dev/null @@ -1,114 +0,0 @@ -series: bionic -applications: - mysql: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - source: cloud:bionic-rocky - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-rocky - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: cloud:bionic-rocky - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-rocky - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-rocky - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: cloud:bionic-rocky - bluestore: False - use-direct-io: False - osd-devices: /opt - ceph-rbd-mirror: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: cloud:bionic-rocky - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-rocky - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: cloud:bionic-rocky - bluestore: False - use-direct-io: False - osd-devices: /opt - ceph-rbd-mirror-b: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: cloud:bionic-rocky -relations: -- - mysql - - keystone -- - mysql - - cinder -- - mysql - - glance -- - rabbitmq-server - - cinder -- - keystone - - cinder -- - keystone - - glance -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml deleted file mode 100644 index 8ededfd5..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-stein.yaml +++ /dev/null @@ -1,114 +0,0 @@ -series: bionic -applications: - mysql: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - source: cloud:bionic-stein - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-stein - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-stein - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: cloud:bionic-stein - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-stein - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-stein - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-stein - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: cloud:bionic-stein - bluestore: False - use-direct-io: False - osd-devices: /opt - ceph-rbd-mirror: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: cloud:bionic-stein - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-stein - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: cloud:bionic-stein - bluestore: False - use-direct-io: False - osd-devices: /opt - ceph-rbd-mirror-b: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: cloud:bionic-stein -relations: -- - mysql - - keystone -- - mysql - - cinder -- - mysql - - glance -- - rabbitmq-server - - cinder -- - keystone - - cinder -- - keystone - - glance -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-train-image-mirroring.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-train-image-mirroring.yaml deleted file mode 100644 index 80b210c0..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-train-image-mirroring.yaml +++ /dev/null @@ -1,120 +0,0 @@ -series: bionic -applications: - mysql: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - source: cloud:bionic-train - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-train - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-train - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: cloud:bionic-train - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - options: - rbd-mirroring-mode: image - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-train - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-train - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-train - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: cloud:bionic-train - #bluestore: False - #use-direct-io: False - storage: - osd-devices: '10G' - ceph-rbd-mirror: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: cloud:bionic-train - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-train - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: cloud:bionic-train - #bluestore: False - #use-direct-io: False - storage: - osd-devices: '10G' - ceph-rbd-mirror-b: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: cloud:bionic-train -relations: -- - mysql - - keystone -- - mysql - - cinder -- - mysql - - glance -- - rabbitmq-server - - cinder -- - keystone - - cinder -- - keystone - - glance -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote -- - cinder-ceph:ceph-replication-device - - ceph-mon-b:client diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml deleted file mode 100644 index 7d3b949f..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-train.yaml +++ /dev/null @@ -1,116 +0,0 @@ -series: bionic -applications: - mysql: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - source: cloud:bionic-train - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-train - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-train - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: cloud:bionic-train - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-train - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-train - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-train - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: cloud:bionic-train - #bluestore: False - #use-direct-io: False - storage: - osd-devices: '10G' - ceph-rbd-mirror: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: cloud:bionic-train - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-train - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: cloud:bionic-train - #bluestore: False - #use-direct-io: False - storage: - osd-devices: '10G' - ceph-rbd-mirror-b: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: cloud:bionic-train -relations: -- - mysql - - keystone -- - mysql - - cinder -- - mysql - - glance -- - rabbitmq-server - - cinder -- - keystone - - cinder -- - keystone - - glance -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri-image-mirroring.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri-image-mirroring.yaml deleted file mode 100644 index 3e95360a..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri-image-mirroring.yaml +++ /dev/null @@ -1,120 +0,0 @@ -series: bionic -applications: - mysql: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - source: cloud:bionic-ussuri - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-ussuri - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: cloud:bionic-ussuri - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - options: - rbd-mirroring-mode: image - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-ussuri - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: cloud:bionic-ussuri - #bluestore: False - #use-direct-io: False - storage: - osd-devices: '10G' - ceph-rbd-mirror: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: cloud:bionic-ussuri - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-ussuri - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: cloud:bionic-ussuri - #bluestore: False - #use-direct-io: False - storage: - osd-devices: '10G' - ceph-rbd-mirror-b: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: cloud:bionic-ussuri -relations: -- - mysql - - keystone -- - mysql - - cinder -- - mysql - - glance -- - rabbitmq-server - - cinder -- - keystone - - cinder -- - keystone - - glance -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote -- - cinder-ceph:ceph-replication-device - - ceph-mon-b:client diff --git a/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml b/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml deleted file mode 100644 index a62d70fd..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/bionic-ussuri.yaml +++ /dev/null @@ -1,116 +0,0 @@ -series: bionic -applications: - mysql: - charm: cs:~openstack-charmers-next/percona-cluster - num_units: 1 - options: - source: cloud:bionic-ussuri - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: cloud:bionic-ussuri - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: cloud:bionic-ussuri - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: cloud:bionic-ussuri - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-ussuri - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: cloud:bionic-ussuri - #bluestore: False - #use-direct-io: False - storage: - osd-devices: '10G' - ceph-rbd-mirror: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: cloud:bionic-ussuri - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: cloud:bionic-ussuri - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: cloud:bionic-ussuri - #bluestore: False - #use-direct-io: False - storage: - osd-devices: '10G' - ceph-rbd-mirror-b: - series: bionic - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: cloud:bionic-ussuri -relations: -- - mysql - - keystone -- - mysql - - cinder -- - mysql - - glance -- - rabbitmq-server - - cinder -- - keystone - - cinder -- - keystone - - glance -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-ussuri-image-mirroring.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-ussuri-image-mirroring.yaml deleted file mode 100644 index ea148e3d..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/focal-ussuri-image-mirroring.yaml +++ /dev/null @@ -1,233 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': - '19': - '20': - '21': - -applications: - - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '3' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '4' - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: *openstack-origin - to: - - '5' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - options: - rbd-mirroring-mode: image - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '6' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - to: - - '7' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '8' - - '9' - - '10' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - #bluestore: False - #use-direct-io: False - storage: - osd-devices: '10G' - to: - - '11' - - '12' - - '13' - - ceph-rbd-mirror: - series: focal - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - to: - - '14' - - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '15' - - '16' - - '17' - - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - #bluestore: False - #use-direct-io: False - storage: - osd-devices: '10G' - to: - - '18' - - '19' - - '20' - - ceph-rbd-mirror-b: - series: focal - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - to: - - '21' - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'rabbitmq-server' - - 'cinder' - - - - 'keystone' - - 'cinder' - - - - 'keystone' - - 'glance' - - - - 'cinder' - - 'cinder-ceph' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'cinder-ceph:ceph-replication-device' - - 'ceph-mon-b:client' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance' - - 'ceph-mon' - - - - 'ceph-mon:osd' - - 'ceph-osd:mon' - - - - 'ceph-mon' - - 'ceph-rbd-mirror:ceph-local' - - - - 'ceph-mon' - - 'ceph-rbd-mirror-b:ceph-remote' - - - - 'ceph-mon-b:osd' - - 'ceph-osd-b:mon' - - - - 'ceph-mon-b' - - 'ceph-rbd-mirror-b:ceph-local' - - - - 'ceph-mon-b' - - 'ceph-rbd-mirror:ceph-remote' diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-ussuri.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-ussuri.yaml deleted file mode 100644 index 8294b095..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/focal-ussuri.yaml +++ /dev/null @@ -1,228 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': - '19': - '20': - '21': - -applications: - - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '3' - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '4' - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: *openstack-origin - to: - - '5' - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '6' - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - to: - - '7' - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '8' - - '9' - - '10' - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - #bluestore: False - #use-direct-io: False - storage: - osd-devices: '10G' - to: - - '11' - - '12' - - '13' - - ceph-rbd-mirror: - series: focal - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - to: - - '14' - - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '15' - - '16' - - '17' - - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - #bluestore: False - #use-direct-io: False - storage: - osd-devices: '10G' - to: - - '18' - - '19' - - '20' - - ceph-rbd-mirror-b: - series: focal - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - to: - - '21' - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'rabbitmq-server' - - 'cinder' - - - - 'keystone' - - 'cinder' - - - - 'keystone' - - 'glance' - - - - 'cinder' - - 'cinder-ceph' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance' - - 'ceph-mon' - - - - 'ceph-mon:osd' - - 'ceph-osd:mon' - - - - 'ceph-mon' - - 'ceph-rbd-mirror:ceph-local' - - - - 'ceph-mon' - - 'ceph-rbd-mirror-b:ceph-remote' - - - - 'ceph-mon-b:osd' - - 'ceph-osd-b:mon' - - - - 'ceph-mon-b' - - 'ceph-rbd-mirror-b:ceph-local' - - - - 'ceph-mon-b' - - 'ceph-rbd-mirror:ceph-remote' diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-victoria-image-mirroring.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-victoria-image-mirroring.yaml deleted file mode 100644 index 2d8d4337..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/focal-victoria-image-mirroring.yaml +++ /dev/null @@ -1,171 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-victoria - -series: &series focal - -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: *openstack-origin - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - options: - rbd-mirroring-mode: image - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: /opt - - ceph-rbd-mirror: - series: *series - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: /opt - - ceph-rbd-mirror-b: - series: *series - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - -relations: - -- - keystone:shared-db - - keystone-mysql-router:shared-db -- - keystone-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - glance:shared-db - - glance-mysql-router:shared-db -- - glance-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - cinder:shared-db - - cinder-mysql-router:shared-db -- - cinder-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - rabbitmq-server - - cinder - -- - keystone - - cinder -- - keystone - - glance - -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client -- - cinder-ceph:ceph-replication-device - - ceph-mon-b:client - -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp - -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon - -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote - -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml deleted file mode 100644 index 77a6e1fa..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/focal-victoria.yaml +++ /dev/null @@ -1,167 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-victoria - -series: &series focal - -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: *openstack-origin - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: /opt - - ceph-rbd-mirror: - series: *series - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: /opt - - ceph-rbd-mirror-b: - series: *series - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - -relations: - -- - keystone:shared-db - - keystone-mysql-router:shared-db -- - keystone-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - glance:shared-db - - glance-mysql-router:shared-db -- - glance-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - cinder:shared-db - - cinder-mysql-router:shared-db -- - cinder-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - rabbitmq-server - - cinder - -- - keystone - - cinder -- - keystone - - glance - -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client - -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp - -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon - -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote - -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-wallaby-image-mirroring.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-wallaby-image-mirroring.yaml deleted file mode 100644 index b28a8244..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/focal-wallaby-image-mirroring.yaml +++ /dev/null @@ -1,171 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-wallaby - -series: &series focal - -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: *openstack-origin - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - options: - rbd-mirroring-mode: image - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: /opt - - ceph-rbd-mirror: - series: *series - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: /opt - - ceph-rbd-mirror-b: - series: *series - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - -relations: - -- - keystone:shared-db - - keystone-mysql-router:shared-db -- - keystone-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - glance:shared-db - - glance-mysql-router:shared-db -- - glance-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - cinder:shared-db - - cinder-mysql-router:shared-db -- - cinder-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - rabbitmq-server - - cinder - -- - keystone - - cinder -- - keystone - - glance - -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client -- - cinder-ceph:ceph-replication-device - - ceph-mon-b:client - -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp - -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon - -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote - -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-wallaby.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-wallaby.yaml deleted file mode 100644 index 8e82a187..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/focal-wallaby.yaml +++ /dev/null @@ -1,167 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-wallaby - -series: &series focal - -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: *openstack-origin - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: /opt - - ceph-rbd-mirror: - series: *series - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: /opt - - ceph-rbd-mirror-b: - series: *series - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - -relations: - -- - keystone:shared-db - - keystone-mysql-router:shared-db -- - keystone-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - glance:shared-db - - glance-mysql-router:shared-db -- - glance-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - cinder:shared-db - - cinder-mysql-router:shared-db -- - cinder-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - rabbitmq-server - - cinder - -- - keystone - - cinder -- - keystone - - glance - -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client - -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp - -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon - -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote - -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-xena.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-xena.yaml index 345cc604..3460aaeb 100644 --- a/ceph-rbd-mirror/src/tests/bundles/focal-xena.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/focal-xena.yaml @@ -1,6 +1,8 @@ variables: openstack-origin: &openstack-origin cloud:focal-xena +local_overlay_enabled: False + series: &series focal machines: @@ -14,14 +16,17 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -29,85 +34,100 @@ applications: - '0' - '1' - '2' + channel: latest/edge keystone: - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin + channel: yoga/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin + channel: latest/edge cinder: - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: block-device: None glance-api-version: 2 openstack-origin: *openstack-origin + channel: yoga/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph num_units: 0 + channel: yoga/edge glance: - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin + channel: yoga/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin + channel: yoga/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 source: *openstack-origin + channel: quincy/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 + storage: + osd-devices: 'cinder,10G' options: source: *openstack-origin bluestore: False use-direct-io: False - osd-devices: /opt + osd-devices: '/dev/test-non-existent' + channel: quincy/edge ceph-rbd-mirror: series: *series - charm: ../../../ceph-rbd-mirror + charm: ../../../ceph-rbd-mirror.charm num_units: 1 options: source: *openstack-origin ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 source: *openstack-origin + channel: quincy/edge ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 + storage: + osd-devices: 'cinder,10G' options: source: *openstack-origin bluestore: False use-direct-io: False - osd-devices: /opt + osd-devices: '/dev/test-non-existent' + channel: quincy/edge ceph-rbd-mirror-b: series: *series - charm: ../../../ceph-rbd-mirror + charm: ../../../ceph-rbd-mirror.charm num_units: 1 options: source: *openstack-origin diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-yoga.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-yoga.yaml index 5d2a4ed6..2564d9c7 100644 --- a/ceph-rbd-mirror/src/tests/bundles/focal-yoga.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/focal-yoga.yaml @@ -1,6 +1,8 @@ variables: openstack-origin: &openstack-origin cloud:focal-yoga +local_overlay_enabled: False + series: &series focal machines: @@ -14,14 +16,17 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -29,85 +34,100 @@ applications: - '0' - '1' - '2' + channel: latest/edge keystone: - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin + channel: yoga/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin + channel: latest/edge cinder: - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: block-device: None glance-api-version: 2 openstack-origin: *openstack-origin + channel: yoga/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph num_units: 0 + channel: yoga/edge glance: - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin + channel: yoga/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin + channel: yoga/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 source: *openstack-origin + channel: quincy/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 + storage: + osd-devices: 'cinder,10G' options: source: *openstack-origin bluestore: False use-direct-io: False - osd-devices: /opt + osd-devices: '/dev/test-non-existent' + channel: quincy/edge ceph-rbd-mirror: series: *series - charm: ../../../ceph-rbd-mirror + charm: ../../../ceph-rbd-mirror.charm num_units: 1 options: source: *openstack-origin ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 source: *openstack-origin + channel: quincy/edge ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 + storage: + osd-devices: 'cinder,10G' options: source: *openstack-origin bluestore: False use-direct-io: False - osd-devices: /opt + osd-devices: '/dev/test-non-existent' + channel: quincy/edge ceph-rbd-mirror-b: series: *series - charm: ../../../ceph-rbd-mirror + charm: ../../../ceph-rbd-mirror.charm num_units: 1 options: source: *openstack-origin diff --git a/ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby-image-mirroring.yaml b/ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby-image-mirroring.yaml deleted file mode 100644 index 61413d37..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby-image-mirroring.yaml +++ /dev/null @@ -1,171 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: &series hirsute - -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: *openstack-origin - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - options: - rbd-mirroring-mode: image - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: /opt - - ceph-rbd-mirror: - series: *series - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: /opt - - ceph-rbd-mirror-b: - series: *series - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - -relations: - -- - keystone:shared-db - - keystone-mysql-router:shared-db -- - keystone-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - glance:shared-db - - glance-mysql-router:shared-db -- - glance-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - cinder:shared-db - - cinder-mysql-router:shared-db -- - cinder-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - rabbitmq-server - - cinder - -- - keystone - - cinder -- - keystone - - glance - -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client -- - cinder-ceph:ceph-replication-device - - ceph-mon-b:client - -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp - -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon - -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote - -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby.yaml b/ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby.yaml deleted file mode 100644 index e2e10c5f..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/hirsute-wallaby.yaml +++ /dev/null @@ -1,166 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: &series hirsute - -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - -applications: - - keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router - - mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - - keystone: - charm: cs:~openstack-charmers-next/keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - - rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - - cinder: - charm: cs:~openstack-charmers-next/cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - - cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph - num_units: 0 - - glance: - charm: cs:~openstack-charmers-next/glance - num_units: 1 - options: - openstack-origin: *openstack-origin - - nova-compute: - charm: cs:~openstack-charmers-next/nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - - ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - - ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: /opt - - ceph-rbd-mirror: - series: *series - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - - ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - - ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd - num_units: 3 - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: /opt - - ceph-rbd-mirror-b: - series: *series - charm: ../../../ceph-rbd-mirror - num_units: 1 - options: - source: *openstack-origin - -relations: - -- - keystone:shared-db - - keystone-mysql-router:shared-db -- - keystone-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - glance:shared-db - - glance-mysql-router:shared-db -- - glance-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - cinder:shared-db - - cinder-mysql-router:shared-db -- - cinder-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - rabbitmq-server - - cinder - -- - keystone - - cinder -- - keystone - - glance - -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client - -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp - -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon - -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote - -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/impish-xena.yaml b/ceph-rbd-mirror/src/tests/bundles/impish-xena.yaml index 40aa1461..ddf2861f 100644 --- a/ceph-rbd-mirror/src/tests/bundles/impish-xena.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/impish-xena.yaml @@ -1,6 +1,8 @@ variables: openstack-origin: &openstack-origin distro +local_overlay_enabled: False + series: &series impish machines: @@ -14,14 +16,17 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -29,84 +34,99 @@ applications: - '0' - '1' - '2' + channel: latest/edge keystone: - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin + channel: yoga/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin + channel: latest/edge cinder: - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: block-device: None glance-api-version: 2 + channel: yoga/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph num_units: 0 + channel: yoga/edge glance: - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin + channel: yoga/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin + channel: yoga/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 source: *openstack-origin + channel: quincy/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 + storage: + osd-devices: 'cinder,10G' options: source: *openstack-origin bluestore: False use-direct-io: False - osd-devices: /opt + osd-devices: '/dev/test-non-existent' + channel: quincy/edge ceph-rbd-mirror: series: *series - charm: ../../../ceph-rbd-mirror + charm: ../../../ceph-rbd-mirror.charm num_units: 1 options: source: *openstack-origin ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 source: *openstack-origin + channel: quincy/edge ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 + storage: + osd-devices: 'cinder,10G' options: source: *openstack-origin bluestore: False use-direct-io: False - osd-devices: /opt + osd-devices: '/dev/test-non-existent' + channel: quincy/edge ceph-rbd-mirror-b: series: *series - charm: ../../../ceph-rbd-mirror + charm: ../../../ceph-rbd-mirror.charm num_units: 1 options: source: *openstack-origin diff --git a/ceph-rbd-mirror/src/tests/bundles/jammy-yoga.yaml b/ceph-rbd-mirror/src/tests/bundles/jammy-yoga.yaml index 17507a07..a3fd3f80 100644 --- a/ceph-rbd-mirror/src/tests/bundles/jammy-yoga.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/jammy-yoga.yaml @@ -1,6 +1,8 @@ variables: openstack-origin: &openstack-origin distro +local_overlay_enabled: False + series: &series jammy machines: @@ -14,14 +16,17 @@ machines: applications: keystone-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge glance-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge cinder-mysql-router: - charm: cs:~openstack-charmers-next/mysql-router + charm: ch:mysql-router + channel: latest/edge mysql-innodb-cluster: - charm: cs:~openstack-charmers-next/mysql-innodb-cluster + charm: ch:mysql-innodb-cluster num_units: 3 options: source: *openstack-origin @@ -29,84 +34,99 @@ applications: - '0' - '1' - '2' + channel: latest/edge keystone: - charm: cs:~openstack-charmers-next/keystone + charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin + channel: yoga/edge rabbitmq-server: - charm: cs:~openstack-charmers-next/rabbitmq-server + charm: ch:rabbitmq-server num_units: 1 options: source: *openstack-origin + channel: latest/edge cinder: - charm: cs:~openstack-charmers-next/cinder + charm: ch:cinder num_units: 1 options: block-device: None glance-api-version: 2 + channel: yoga/edge cinder-ceph: - charm: cs:~openstack-charmers-next/cinder-ceph + charm: ch:cinder-ceph num_units: 0 + channel: yoga/edge glance: - charm: cs:~openstack-charmers-next/glance + charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin + channel: yoga/edge nova-compute: - charm: cs:~openstack-charmers-next/nova-compute + charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin + channel: yoga/edge ceph-mon: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 source: *openstack-origin + channel: quincy/edge ceph-osd: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 + storage: + osd-devices: 'cinder,10G' options: source: *openstack-origin bluestore: False use-direct-io: False - osd-devices: /opt + osd-devices: '/dev/test-non-existent' + channel: quincy/edge ceph-rbd-mirror: series: *series - charm: ../../../ceph-rbd-mirror + charm: ../../../ceph-rbd-mirror.charm num_units: 1 options: source: *openstack-origin ceph-mon-b: - charm: cs:~openstack-charmers-next/ceph-mon + charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 source: *openstack-origin + channel: quincy/edge ceph-osd-b: - charm: cs:~openstack-charmers-next/ceph-osd + charm: ch:ceph-osd num_units: 3 + storage: + osd-devices: 'cinder,10G' options: source: *openstack-origin bluestore: False use-direct-io: False - osd-devices: /opt + osd-devices: '/dev/test-non-existent' + channel: quincy/edge ceph-rbd-mirror-b: series: *series - charm: ../../../ceph-rbd-mirror + charm: ../../../ceph-rbd-mirror.charm num_units: 1 options: source: *openstack-origin diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index bd170e77..117d828c 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -1,38 +1,21 @@ charm_name: ceph-rbd-mirror smoke_bundles: -- focal-ussuri +- focal-xena gate_bundles: -- bionic-queens -- bionic-queens-e2e -- bionic-queens-e2e-lxd -- bionic-stein -- bionic-ussuri -- focal-ussuri -- focal-ussuri-image-mirroring -- focal-victoria -- focal-victoria-image-mirroring -- focal-wallaby -- focal-wallaby-image-mirroring - focal-xena - focal-xena-image-mirroring -- hirsute-wallaby -- hirsute-wallaby-image-mirroring - impish-xena - impish-xena-image-mirroring comment: | The e2e bundles are useful for development but adds no additional value to the functional tests. dev_bundles: -- bionic-rocky -- bionic-train -- bionic-train-image-mirroring -# This is a dev bundle because we hit https://bugs.launchpad.net/charm-ceph-rbd-mirror/+bug/1892201. -- bionic-ussuri-image-mirroring - focal-yoga - focal-yoga-image-mirroring - jammy-yoga - jammy-yoga-image-mirroring configure: +- zaza.openstack.charm_tests.glance.setup.add_cirros_image - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: - zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorTest @@ -40,8 +23,6 @@ tests: - zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorDisasterFailoverTest tests_options: force_deploy: - - hirsute-wallaby - - hirsute-wallaby-image-mirroring - impish-xena - impish-xena-image-mirroring - jammy-yoga diff --git a/ceph-rbd-mirror/test-requirements.txt b/ceph-rbd-mirror/test-requirements.txt index 208032f0..a11a7d07 100644 --- a/ceph-rbd-mirror/test-requirements.txt +++ b/ceph-rbd-mirror/test-requirements.txt @@ -28,6 +28,11 @@ oslo.utils<=3.41.0;python_version<'3.6' requests>=2.18.4 charms.reactive +# Newer mock seems to have some syntax which is newer than python3.5 (e.g. +# f'{something}' +mock>=1.2,<4.0.0; python_version < '3.6' +mock>=1.2; python_version >= '3.6' + nose>=1.3.7 coverage>=3.6 git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack @@ -46,3 +51,4 @@ pbr==5.6.0 # vault cryptography<3.4 # vault, keystone-saml-mellon lxml # keystone-saml-mellon hvac # vault, barbican-vault +psutil # cinder-lvm diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index faf6092e..2d60b8a4 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -37,11 +37,24 @@ setenv = VIRTUAL_ENV={envdir} passenv = http_proxy https_proxy INTERFACE_PATH LAYER_PATH JUJU_REPOSITORY install_command = {toxinidir}/pip.sh install {opts} {packages} +allowlist_externals = + charmcraft + bash + tox + rename.sh deps = -r{toxinidir}/requirements.txt [testenv:build] basepython = python3 +deps = -r{toxinidir}/build-requirements.txt +commands = + charmcraft clean + charmcraft -v build + {toxinidir}/rename.sh + +[testenv:build-reactive] +basepython = python3 commands = charm-build --log-level DEBUG --use-lock-file-branches -o {toxinidir}/build/builds src {posargs} @@ -86,6 +99,18 @@ deps = flake8==3.9.2 charm-tools==2.8.3 commands = flake8 {posargs} src unit_tests +[testenv:func-target] +# Hack to get functional tests working in the charmcraft +# world. We should fix this. +basepython = python3 +passenv = HOME TERM CS_* OS_* TEST_* +deps = -r{toxinidir}/src/test-requirements.txt +changedir = {toxinidir}/src +commands = + bash -c "if [ ! -f ../*.charm ]; then echo 'Charm does not exist. Run tox -e build'; exit 1; fi" + tox --version + tox -e func-target {posargs} + [testenv:cover] # Technique based heavily upon # https://github.com/openstack/nova/blob/master/tox.ini From e153843b58d2602c5bb6d73070d989b69a355cc1 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Tue, 12 Apr 2022 22:05:33 -0300 Subject: [PATCH 2379/2699] Update the charm to use the latest changes in charms.ceph Change-Id: I7aee1d27021e259367d6fe88002f996ab62a61c3 Closes-Bug: #1968369 --- ceph-mon/lib/charms_ceph/broker.py | 3 ++- ceph-mon/lib/charms_ceph/utils.py | 6 +++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ceph-mon/lib/charms_ceph/broker.py b/ceph-mon/lib/charms_ceph/broker.py index d00baedc..90b536fb 100644 --- a/ceph-mon/lib/charms_ceph/broker.py +++ b/ceph-mon/lib/charms_ceph/broker.py @@ -291,7 +291,8 @@ def pool_permission_list_for_service(service): for prefix in prefixes: permissions.append("allow {} object_prefix {}".format(permission, prefix)) - return ['mon', 'allow r, allow command "osd blacklist"', + return ['mon', ('allow r, allow command "osd blacklist"' + ', allow command "osd blocklist"'), 'osd', ', '.join(permissions)] diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index a22462ec..5e76e6be 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -1134,7 +1134,8 @@ def get_mds_bootstrap_key(): _default_caps = collections.OrderedDict([ ('mon', ['allow r', - 'allow command "osd blacklist"']), + 'allow command "osd blacklist"', + 'allow command "osd blocklist"']), ('osd', ['allow rwx']), ]) @@ -3453,6 +3454,9 @@ def enabled_manager_modules(): :rtype: List[str] """ cmd = ['ceph', 'mgr', 'module', 'ls'] + quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0 + if quincy_or_later: + cmd.append('--format=json') try: modules = subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError as e: From 4e8c851e2b96c4581e634977a1a7225e101145b7 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 13 Apr 2022 16:16:43 +0200 Subject: [PATCH 2380/2699] Updates for jammy enablement - charmcraft: build-on 20.04 -> run-on 20.04/22.04 [*archs] - Refresh tox targets - Drop impish bundles and OSCI testing - Add jammy metadata - Default source is yoga Change-Id: I2aaa39d6f780c415db54b28eaf445732247d7d47 --- ceph-fs/.zuul.yaml | 2 +- ceph-fs/charmcraft.yaml | 16 +- ceph-fs/src/config.yaml | 2 +- ceph-fs/src/metadata.yaml | 2 +- ceph-fs/src/tests/bundles/impish-xena.yaml | 241 --------------------- ceph-fs/src/tests/tests.yaml | 2 - 6 files changed, 15 insertions(+), 250 deletions(-) delete mode 100644 ceph-fs/src/tests/bundles/impish-xena.yaml diff --git a/ceph-fs/.zuul.yaml b/ceph-fs/.zuul.yaml index 0eed1965..7ffc71cb 100644 --- a/ceph-fs/.zuul.yaml +++ b/ceph-fs/.zuul.yaml @@ -1,4 +1,4 @@ - project: templates: - - openstack-python3-ussuri-jobs + - openstack-python3-charm-yoga-jobs - openstack-cover-jobs diff --git a/ceph-fs/charmcraft.yaml b/ceph-fs/charmcraft.yaml index 49682169..883fc9b4 100644 --- a/ceph-fs/charmcraft.yaml +++ b/ceph-fs/charmcraft.yaml @@ -19,7 +19,15 @@ parts: cp -r $CHARMCRAFT_STAGE/* . bases: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 + - build-on: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 + run-on: + - name: ubuntu + channel: "20.04" + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.04" + architectures: [amd64, s390x, ppc64el, arm64] \ No newline at end of file diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index 675c9b6e..3c4fa28a 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -5,7 +5,7 @@ options: description: Mon and OSD debug level. Max is 20. source: type: string - default: distro + default: yoga description: | Optional configuration to support use of additional sources such as: . diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 01727358..1bdda048 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -11,7 +11,7 @@ tags: - misc series: - focal -- impish +- jammy subordinate: false requires: ceph-mds: diff --git a/ceph-fs/src/tests/bundles/impish-xena.yaml b/ceph-fs/src/tests/bundles/impish-xena.yaml deleted file mode 100644 index fd96fc12..00000000 --- a/ceph-fs/src/tests/bundles/impish-xena.yaml +++ /dev/null @@ -1,241 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -local_overlay_enabled: False - -series: &series impish - -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - '3': - - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: latest/edge - placement-mysql-router: - charm: ch:mysql-router - channel: latest/edge - neutron-api-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-fs: - charm: ../../../ceph-fs.charm - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '3' - - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - channel: latest/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - channel: latest/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: latest/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: latest/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: *openstack-origin - channel: latest/edge - - nova-compute: - charm: ch:nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: *openstack-origin - channel: latest/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: latest/edge - - neutron-api: - charm: ch:neutron-api - num_units: 1 - options: - manage-neutron-plugin-legacy-mode: true - neutron-plugin: ovs - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: *openstack-origin - channel: latest/edge - - neutron-openvswitch: - charm: ch:neutron-openvswitch - channel: latest/edge - - neutron-gateway: - charm: ch:neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: *openstack-origin - channel: latest/edge - -relations: - - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'neutron-api:shared-db' - - 'neutron-api-mysql-router:shared-db' - - - 'neutron-api-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' - - - - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' - - - - 'neutron-api:identity-service' - - 'keystone:identity-service' - - - - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' - - - - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index 8d0a4d2f..d0bcbc69 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -1,7 +1,6 @@ charm_name: ceph-fs gate_bundles: - focal-xena - - impish-wallaby dev_bundles: - focal-yoga - jammy-yoga @@ -19,5 +18,4 @@ tests: - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation tests_options: force_deploy: - - impish-xena - jammy-yoga From 5020d9c82608f3beddfbe69f52a0a861ecf4d5c2 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 10 Feb 2022 15:52:23 +0100 Subject: [PATCH 2381/2699] Multisite replication should use public, rather than internal, networks Closes-Bug: #1960520 Change-Id: Ie2954a9a59acbc384c18c901e2d324ee003d7108 --- ceph-radosgw/hooks/hooks.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 34290210..9f2fed18 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -695,11 +695,11 @@ def master_relation_joined(relation_id=None): log('unit not ready, deferring multisite configuration') return - internal_url = '{}:{}'.format( - canonical_url(CONFIGS, INTERNAL), + public_url = '{}:{}'.format( + canonical_url(CONFIGS, PUBLIC), listen_port(), ) - endpoints = [internal_url] + endpoints = [public_url] realm = config('realm') zonegroup = config('zonegroup') zone = config('zone') @@ -803,11 +803,11 @@ def slave_relation_changed(relation_id=None, unit=None): log("Defer processing until master RGW has provided required data") return - internal_url = '{}:{}'.format( - canonical_url(CONFIGS, INTERNAL), + public_url = '{}:{}'.format( + canonical_url(CONFIGS, PUBLIC), listen_port(), ) - endpoints = [internal_url] + endpoints = [public_url] realm = config('realm') zonegroup = config('zonegroup') From f40d05a53819bf80066f485acb9392ccabf0eee5 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 18 Apr 2022 20:59:54 +0100 Subject: [PATCH 2382/2699] Add *.charm to gitignore This patch adds *.charm to the .gitignore to ensure that any built artifacts are ignored. Change-Id: I4c79baf0802b2daa6aa9e9435ac9c99b605fc844 --- ceph-fs/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-fs/.gitignore b/ceph-fs/.gitignore index 018b2708..231d85f4 100644 --- a/ceph-fs/.gitignore +++ b/ceph-fs/.gitignore @@ -7,3 +7,4 @@ __pycache__ *.pyc .idea .stestr +*.charm From e8e5b1670294217705d9f5dfdde735458e0d19cd Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Mon, 18 Apr 2022 20:59:54 +0100 Subject: [PATCH 2383/2699] Add *.charm to gitignore This patch adds *.charm to the .gitignore to ensure that any built artifacts are ignored. Change-Id: I8a3ee884ad82d935165c725b38c01cdd25656f49 --- ceph-rbd-mirror/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-rbd-mirror/.gitignore b/ceph-rbd-mirror/.gitignore index 0446b60e..a7eb42f3 100644 --- a/ceph-rbd-mirror/.gitignore +++ b/ceph-rbd-mirror/.gitignore @@ -8,3 +8,4 @@ cover/ layers/ interfaces/ *.swp +*.charm From 210bf55b5bbf870b2b88505fa97df7742a30e07a Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 21 Apr 2022 07:53:35 +0100 Subject: [PATCH 2384/2699] Resync charms.ceph Pickup changes for cephx key permissions for rbd-mirror charm. Change-Id: I01a878f7bbf244d1db79991382a18dc23176d9b6 Closes-Bug: 1879749 --- ceph-mon/lib/charms_ceph/utils.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py index 5e76e6be..e6adcb82 100644 --- a/ceph-mon/lib/charms_ceph/utils.py +++ b/ceph-mon/lib/charms_ceph/utils.py @@ -1167,7 +1167,10 @@ def get_mds_bootstrap_key(): ]) rbd_mirror_caps = collections.OrderedDict([ - ('mon', ['profile rbd; allow r']), + ('mon', ['allow profile rbd-mirror-peer', + 'allow command "service dump"', + 'allow command "service status"' + ]), ('osd', ['profile rbd']), ('mgr', ['allow r']), ]) From b14c69fd5d4e5a86aa7e9fbc744306f3d6cb90f4 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 20 Apr 2022 07:44:10 +0200 Subject: [PATCH 2385/2699] Updates for opendev & jammy enablement - Add .zuul.yaml to run gate testing - charmcraft: build-on 20.04 -> run-on 20.04/22.04 [*archs] - Refresh tox targets - Add jammy metadata - Default source is yoga - fix typo in ganesha-client property Change-Id: Iddf3e97078bc397ace4995f53417837445579fa3 --- ceph-nfs/.gitreview | 4 ++++ ceph-nfs/.zuul.yaml | 4 ++++ ceph-nfs/charmcraft.yaml | 17 +++++++++++++---- ceph-nfs/config.yaml | 2 +- ceph-nfs/metadata.yaml | 4 +--- ceph-nfs/osci.yaml | 13 +++++-------- ceph-nfs/src/charm.py | 2 +- ceph-nfs/tests/nfs_ganesha.py | 4 ++-- ceph-nfs/tox.ini | 5 +++++ 9 files changed, 36 insertions(+), 19 deletions(-) create mode 100644 ceph-nfs/.gitreview create mode 100644 ceph-nfs/.zuul.yaml diff --git a/ceph-nfs/.gitreview b/ceph-nfs/.gitreview new file mode 100644 index 00000000..1156baba --- /dev/null +++ b/ceph-nfs/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.opendev.org +port=29418 +project=openstack/charm-ceph-nfs.git diff --git a/ceph-nfs/.zuul.yaml b/ceph-nfs/.zuul.yaml new file mode 100644 index 00000000..1ffc530a --- /dev/null +++ b/ceph-nfs/.zuul.yaml @@ -0,0 +1,4 @@ +- project: + templates: + - openstack-python3-charm-yoga-jobs + - openstack-cover-jobs \ No newline at end of file diff --git a/ceph-nfs/charmcraft.yaml b/ceph-nfs/charmcraft.yaml index c8792009..23219175 100644 --- a/ceph-nfs/charmcraft.yaml +++ b/ceph-nfs/charmcraft.yaml @@ -19,8 +19,17 @@ parts: apt update apt install -y ca-certificates update-ca-certificates + bases: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 \ No newline at end of file + - build-on: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 + run-on: + - name: ubuntu + channel: "20.04" + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.04" + architectures: [amd64, s390x, ppc64el, arm64] \ No newline at end of file diff --git a/ceph-nfs/config.yaml b/ceph-nfs/config.yaml index a02c7a10..00c53bcb 100644 --- a/ceph-nfs/config.yaml +++ b/ceph-nfs/config.yaml @@ -10,7 +10,7 @@ options: source: type: string - default: proposed + default: yoga description: | Optional configuration to support use of additional sources such as: - ppa:myteam/ppa diff --git a/ceph-nfs/metadata.yaml b/ceph-nfs/metadata.yaml index 6f402dc9..051ed1f9 100644 --- a/ceph-nfs/metadata.yaml +++ b/ceph-nfs/metadata.yaml @@ -9,9 +9,7 @@ tags: - misc series: - focal - - groovy - - hirsute - - impish + - jammy subordinate: false min-juju-version: 2.7.6 extra-bindings: diff --git a/ceph-nfs/osci.yaml b/ceph-nfs/osci.yaml index 7f46aa73..3cb71493 100644 --- a/ceph-nfs/osci.yaml +++ b/ceph-nfs/osci.yaml @@ -1,6 +1,7 @@ - project: templates: - - charm-unit-jobs + - charm-unit-jobs-py38 + - charm-unit-jobs-py39 check: jobs: - focal-pacific @@ -12,12 +13,10 @@ name: focal-pacific parent: func-target dependencies: - charm-build + - charm-build - osci-lint - - tox-py35 - - tox-py36 - - tox-py37 - tox-py38 + - tox-py39 vars: tox_extra_args: focal-pacific - job: @@ -26,9 +25,7 @@ dependencies: - charm-build - osci-lint - - tox-py35 - - tox-py36 - - tox-py37 - tox-py38 + - tox-py39 vars: tox_extra_args: focal-quincy \ No newline at end of file diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index 126ed3a0..256e498f 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -244,7 +244,7 @@ def client_name(self): @property def ganesha_client(self): - GaneshaNFS(self.client_name, self.pool_name) + return GaneshaNFS(self.client_name, self.pool_name) def request_ceph_pool(self, event): """Request pools from Ceph cluster.""" diff --git a/ceph-nfs/tests/nfs_ganesha.py b/ceph-nfs/tests/nfs_ganesha.py index 42bd962f..100c2215 100644 --- a/ceph-nfs/tests/nfs_ganesha.py +++ b/ceph-nfs/tests/nfs_ganesha.py @@ -111,7 +111,7 @@ def _install_dependencies(self, unit: str): def _write_testing_file_on_instance(self, instance_name: str): zaza.utilities.generic.run_via_ssh( unit_name=instance_name, - cmd='echo "test" | sudo tee {}/test'.format(self.mount_dir)) + cmd='echo -n "test" | sudo tee {}/test'.format(self.mount_dir)) @tenacity.retry( stop=tenacity.stop_after_attempt(5), @@ -121,7 +121,7 @@ def _verify_testing_file_on_instance(self, instance_name: str): output = run_with_juju_ssh( 'sudo cat {}/test'.format(self.mount_dir)) logging.info("Verification output: {}".format(output)) - self.assertEqual('test\r\n', output) + self.assertEqual('test', output.strip()) def test_create_share(self): logging.info("Creating a share") diff --git a/ceph-nfs/tox.ini b/ceph-nfs/tox.ini index 09f49807..c278ef61 100644 --- a/ceph-nfs/tox.ini +++ b/ceph-nfs/tox.ini @@ -65,6 +65,11 @@ basepython = python3.9 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py310] +basepython = python3.10 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt From 2302d061aaa89ac325b357fc51964964f21447bf Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Fri, 29 Apr 2022 18:05:44 -0300 Subject: [PATCH 2386/2699] Fix ceph-dashboard SAML tests This patchset fixes a couple of issues with ceph-dashboard's SAML support, mostly with outdated bundle configuration. Change-Id: I6c3f8ffdf10d42f9a5280d7e429a7ae64612a139 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/762 --- ceph-dashboard/osci.yaml | 16 +++------- ceph-dashboard/src/charm.py | 26 +++------------ ceph-dashboard/tests/bundles/focal-yoga.yaml | 5 ++- .../bundles/{jammy.yaml => jammy-yoga.yaml} | 32 ++++++++++--------- 4 files changed, 27 insertions(+), 52 deletions(-) rename ceph-dashboard/tests/bundles/{jammy.yaml => jammy-yoga.yaml} (86%) diff --git a/ceph-dashboard/osci.yaml b/ceph-dashboard/osci.yaml index 6502b8ab..41c40d95 100644 --- a/ceph-dashboard/osci.yaml +++ b/ceph-dashboard/osci.yaml @@ -1,21 +1,13 @@ - project: templates: - charm-unit-jobs-py38 - - charm-unit-jobs-py39 + - charm-unit-jobs-py310 check: jobs: - - focal-octopus + - focal-yoga + - jammy-yoga: + voting: false vars: needs_charm_build: true charm_build_name: ceph-dashboard build_type: charmcraft -- job: - name: focal-octopus - parent: func-target - dependencies: - - charm-build - - osci-lint - - tox-py38 - - tox-py39 - vars: - tox_extra_args: focal diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index a3d3a2f7..a85b2df3 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -14,7 +14,6 @@ from ops.main import main from ops.model import ActiveStatus, BlockedStatus, StatusBase from ops.charm import ActionEvent -from ops_openstack.core import charm_class, get_charm_class_for_release from typing import List, Union, Tuple import base64 @@ -47,7 +46,7 @@ class CephDashboardCharm(ops_openstack.core.OSBaseCharm): """Ceph Dashboard charm.""" _stored = StoredState() - PACKAGES = ['ceph-mgr-dashboard'] + PACKAGES = ['ceph-mgr-dashboard', 'python3-onelogin-saml2'] CEPH_CONFIG_PATH = Path('/etc/ceph') TLS_KEY_PATH = CEPH_CONFIG_PATH / 'ceph-dashboard.key' TLS_PUB_KEY_PATH = CEPH_CONFIG_PATH / 'ceph-dashboard-pub.key' @@ -426,7 +425,6 @@ def _configure_dashboard(self, _) -> None: ceph_utils.mgr_enable_dashboard() self._apply_ceph_config_from_charm_config() self._configure_tls() - self._configure_saml() ceph_utils.mgr_config_set( 'mgr/dashboard/{hostname}/server_addr'.format( hostname=socket.gethostname()), @@ -452,6 +450,8 @@ def _configure_dashboard(self, _) -> None: self._run_cmd([ 'ceph', 'dashboard', 'set-prometheus-api-host', prometheus_ep]) + self._configure_saml() + self._register_dashboards() self._manage_radosgw() self._manage_iscsigw() @@ -571,9 +571,6 @@ def _configure_tls(self) -> None: self.kick_dashboard() def _configure_saml(self) -> None: - if 'python3-onelogin-saml2' not in self.PACKAGES: - return - base_url = self.config.get('saml-base-url') idp_metadata = self.config.get('saml-idp-metadata') if not base_url or not idp_metadata: @@ -626,20 +623,5 @@ def _delete_user_action(self, event: ActionEvent) -> None: event.fail(exc.output) -@charm_class -class CephDashboardCharmOctopus(CephDashboardCharm): - - _stored = StoredState() - release = 'octopus' - - -@charm_class -class CephDashboardCharmQuincy(CephDashboardCharm): - - _stored = StoredState() - PACKAGES = ['ceph-mgr-dashboard', 'python3-onelogin-saml2'] - release = 'quincy' - - if __name__ == "__main__": - main(get_charm_class_for_release()) + main(CephDashboardCharm) diff --git a/ceph-dashboard/tests/bundles/focal-yoga.yaml b/ceph-dashboard/tests/bundles/focal-yoga.yaml index d0b688c7..54037750 100644 --- a/ceph-dashboard/tests/bundles/focal-yoga.yaml +++ b/ceph-dashboard/tests/bundles/focal-yoga.yaml @@ -37,9 +37,8 @@ applications: charm: ../../ceph-dashboard.charm options: public-hostname: 'ceph-dashboard.zaza.local' - source: *openstack-origin prometheus: - charm: cs:prometheus2 + charm: ch:prometheus2 num_units: 1 grafana: # SSL and allow_embedding are not released into cs:grafana yet, due @@ -57,7 +56,7 @@ applications: options: hostname: "{host}" prometheus-alertmanager: - charm: cs:prometheus-alertmanager + charm: ch:prometheus-alertmanager num_units: 1 ceph-radosgw: charm: ch:ceph-radosgw diff --git a/ceph-dashboard/tests/bundles/jammy.yaml b/ceph-dashboard/tests/bundles/jammy-yoga.yaml similarity index 86% rename from ceph-dashboard/tests/bundles/jammy.yaml rename to ceph-dashboard/tests/bundles/jammy-yoga.yaml index fb195ed4..00404066 100644 --- a/ceph-dashboard/tests/bundles/jammy.yaml +++ b/ceph-dashboard/tests/bundles/jammy-yoga.yaml @@ -32,8 +32,9 @@ applications: options: public-hostname: 'ceph-dashboard.zaza.local' prometheus: - charm: cs:prometheus2 + charm: ch:prometheus2 num_units: 1 + series: focal grafana: # SSL and allow_embedding are not released into cs:grafana yet, due # Octrober 2021 @@ -44,14 +45,15 @@ applications: install_plugins: https://storage.googleapis.com/plugins-community/vonage-status-panel/release/1.0.11/vonage-status-panel-1.0.11.zip,https://storage.googleapis.com/plugins-community/grafana-piechart-panel/release/1.6.2/grafana-piechart-panel-1.6.2.zip install_method: snap allow_embedding: True - telegraf: - charm: telegraf - channel: stable - options: - hostname: "{host}" + #telegraf: + # charm: telegraf + # channel: stable + # options: + # hostname: "{host}" prometheus-alertmanager: - charm: cs:prometheus-alertmanager + charm: ch:prometheus-alertmanager num_units: 1 + series: focal ceph-radosgw: charm: ch:ceph-radosgw num_units: 3 @@ -83,14 +85,14 @@ relations: - 'prometheus:grafana-source' - - 'grafana:certificates' - 'vault:certificates' - - - 'ceph-osd:juju-info' - - 'telegraf:juju-info' - - - 'ceph-mon:juju-info' - - 'telegraf:juju-info' - - - 'telegraf:prometheus-client' - - 'prometheus:target' - - - 'telegraf:dashboards' - - 'grafana:dashboards' + #- - 'ceph-osd:juju-info' + #- 'telegraf:juju-info' + #- - 'ceph-mon:juju-info' + # - 'telegraf:juju-info' + #- - 'telegraf:prometheus-client' + # - 'prometheus:target' + #- - 'telegraf:dashboards' + # - 'grafana:dashboards' - - 'ceph-dashboard:grafana-dashboard' - 'grafana:dashboards' - - 'ceph-dashboard:alertmanager-service' From 20f339d6ece70d35e378dd0bad3559ad96f3380e Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Fri, 13 May 2022 11:40:39 -0700 Subject: [PATCH 2387/2699] Don't bootstrap osds on mon-relation-departed hook The charm attempts to bootstrap OSDs on both the mon-relation-changed and the mon-relation-departed hooks. There is no logical reason that the OSDs should be bootstrapped in the -departed hook. Change-Id: I79a790291b0e361d2748d6bed8c989d16ad36daf Closes-Bug: #1885195 --- ceph-osd/hooks/ceph_hooks.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 7c03190b..1e31de0c 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -658,8 +658,7 @@ def get_bdev_enable_discard(): "bdev-enable-discard: %s") % bdev_enable_discard) -@hooks.hook('mon-relation-changed', - 'mon-relation-departed') +@hooks.hook('mon-relation-changed') def mon_relation(): bootstrap_key = relation_get('osd_bootstrap_key') upgrade_key = relation_get('osd_upgrade_key') From 9346bb8fd07a9af2684da24cff42ab935b648f8f Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 13 May 2022 16:08:52 +0200 Subject: [PATCH 2388/2699] Replace cs with ch links on prometheus Change-Id: I60d9fe2068b562d05e421fd1ab2914c9d5f00a25 --- ceph-mon/tests/bundles/focal-xena.yaml | 3 +-- ceph-mon/tests/bundles/focal-yoga.yaml | 3 +-- ceph-mon/tests/bundles/jammy-yoga.yaml | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/ceph-mon/tests/bundles/focal-xena.yaml b/ceph-mon/tests/bundles/focal-xena.yaml index 5c0b9967..017bedcb 100644 --- a/ceph-mon/tests/bundles/focal-xena.yaml +++ b/ceph-mon/tests/bundles/focal-xena.yaml @@ -157,8 +157,7 @@ applications: channel: yoga/edge prometheus2: -# Pin prometheus2 charm version Bug #1891942 - charm: cs:prometheus2-18 + charm: ch:prometheus2 num_units: 1 to: - '16' diff --git a/ceph-mon/tests/bundles/focal-yoga.yaml b/ceph-mon/tests/bundles/focal-yoga.yaml index 8599ac9f..823cebdc 100644 --- a/ceph-mon/tests/bundles/focal-yoga.yaml +++ b/ceph-mon/tests/bundles/focal-yoga.yaml @@ -157,8 +157,7 @@ applications: channel: yoga/edge prometheus2: -# Pin prometheus2 charm version Bug #1891942 - charm: cs:prometheus2-18 + charm: ch:prometheus2 num_units: 1 to: - '16' diff --git a/ceph-mon/tests/bundles/jammy-yoga.yaml b/ceph-mon/tests/bundles/jammy-yoga.yaml index 969abd16..a359c3b5 100644 --- a/ceph-mon/tests/bundles/jammy-yoga.yaml +++ b/ceph-mon/tests/bundles/jammy-yoga.yaml @@ -158,8 +158,7 @@ applications: channel: yoga/edge prometheus2: -# Pin prometheus2 charm version Bug #1891942 - charm: cs:prometheus2-18 + charm: ch:prometheus2 num_units: 1 series: focal to: From ec7ef032462a5819d6c25cd19efeb64d4240f48d Mon Sep 17 00:00:00 2001 From: Ethan Myers Date: Fri, 13 May 2022 14:14:29 -0600 Subject: [PATCH 2389/2699] Add a config option for relaxed s3 bucket names. Closes-Bug: #1926498 Change-Id: I4b329f3327a0e91ccd9f65841cc5d62736918a85 --- ceph-radosgw/config.yaml | 10 ++++++++++ ceph-radosgw/hooks/ceph_radosgw_context.py | 1 + ceph-radosgw/templates/ceph.conf | 1 + ceph-radosgw/unit_tests/test_ceph_radosgw_context.py | 5 +++++ 4 files changed, 17 insertions(+) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index ef901fce..4160b6f8 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -122,6 +122,16 @@ options: that once a pool has been created, changes to this setting will be ignored. Setting this value to -1, enables the number of placement groups to be calculated based on the Ceph placement group calculator. + relaxed-s3-bucket-names: + type: boolean + default: false + description: | + Enables relaxed S3 bucket names rules for US region buckets. This + allows for bucket names with any combination of letters, numbers, + periods, dashes and underscores up to 255 characters long, as long + as bucket names are unique and not formatted as IP addresses. + + https://docs.ceph.com/en/latest/radosgw/s3/bucketops/ pool-type: type: string default: replicated diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index cc7e55c9..ea1924b7 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -292,6 +292,7 @@ def __call__(self): 'unit_public_ip': unit_public_ip(), 'fsid': fsid, 'rgw_swift_versioning': config('rgw-swift-versioning-enabled'), + 'relaxed_s3_bucket_names': config('relaxed-s3-bucket-names'), 'frontend': http_frontend, } diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 29b5e26b..d049eba6 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -15,6 +15,7 @@ debug rgw = {{ loglevel }}/5 ms bind ipv6 = true {% endif %} rgw swift versioning enabled = {{ rgw_swift_versioning }} +rgw relaxed s3 bucket names = {{ relaxed_s3_bucket_names }} {% if global -%} # The following are user-provided options provided via the config-flags charm option. # User-provided [global] section config diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index ac631371..bc067c75 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -119,6 +119,7 @@ def _relation_get(attr, unit, rid): 'fsid': 'testfsid', 'rgw_swift_versioning': False, 'frontend': 'beast', + 'relaxed_s3_bucket_names': False } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -169,6 +170,7 @@ def _relation_get(attr, unit, rid): 'fsid': 'testfsid', 'rgw_swift_versioning': False, 'frontend': 'beast', + 'relaxed_s3_bucket_names': False, } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -228,6 +230,7 @@ def _relation_get(attr, unit, rid): 'fsid': 'testfsid', 'rgw_swift_versioning': False, 'frontend': 'beast', + 'relaxed_s3_bucket_names': False, } self.assertEqual(expect, mon_ctxt()) @@ -269,6 +272,7 @@ def _relation_get(attr, unit, rid): 'fsid': 'testfsid', 'rgw_swift_versioning': False, 'frontend': 'beast', + 'relaxed_s3_bucket_names': False, } self.assertEqual(expect, mon_ctxt()) @@ -368,6 +372,7 @@ def _relation_get(attr, unit, rid): 'fsid': 'testfsid', 'rgw_swift_versioning': False, 'frontend': 'beast', + 'relaxed_s3_bucket_names': False, } self.assertEqual(expect, mon_ctxt()) From 7a518189cdbdbf500d3025acea5fa0d4fe0cc8a5 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Fri, 27 May 2022 17:32:31 -0300 Subject: [PATCH 2390/2699] Add the 'zonegroup' and 'realm' keys to ceph.conf file This patchset adds these 2 additional keys to the ceph.conf file, which are used in multisite configurations when present. Change-Id: I51ca46bbb3479cb73ec4d9966208ed794f0ed774 Closes-Bug: #1975857 --- ceph-radosgw/hooks/ceph_radosgw_context.py | 2 ++ ceph-radosgw/templates/ceph.conf | 8 ++++++++ ceph-radosgw/test-requirements.txt | 3 ++- .../unit_tests/test_ceph_radosgw_context.py | 14 +++++++++++++- 4 files changed, 25 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index ea1924b7..ae2345d5 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -308,6 +308,8 @@ def __call__(self): # Multi-site Zone configuration is optional, # so add after assessment ctxt['rgw_zone'] = config('zone') + ctxt['rgw_zonegroup'] = config('zonegroup') + ctxt['rgw_realm'] = config('realm') return ctxt return {} diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index d049eba6..d728ac03 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -39,6 +39,14 @@ log file = /var/log/ceph/radosgw.log rgw_zone = {{ rgw_zone }} {% endif %} +{% if rgw_zonegroup -%} +rgw_zonegroup = {{ rgw_zonegroup }} +{% endif %} + +{% if rgw_realm -%} +rgw_realm = {{ rgw_realm }} +{% endif %} + rgw init timeout = 1200 rgw frontends = {{ frontend }} port={{ port }} {% if auth_type == 'keystone' %} diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 0aabe171..45508cd0 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -37,7 +37,8 @@ git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: -git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' +git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.8' +tempest<30.0.0;python_version<'3.8' and python_version>='3.6' tempest<24.0.0;python_version<'3.6' croniter # needed for charm-rabbitmq-server unit tests diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index bc067c75..9cad52e4 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -80,6 +80,8 @@ def setUp(self): self.unit_public_ip.return_value = '10.255.255.255' self.cmp_pkgrevno.side_effect = lambda *args: 1 self.arch.return_value = 'amd64' + self.test_config.set('zonegroup', 'zonegroup1') + self.test_config.set('realm', 'realmX') @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') @@ -119,7 +121,9 @@ def _relation_get(attr, unit, rid): 'fsid': 'testfsid', 'rgw_swift_versioning': False, 'frontend': 'beast', - 'relaxed_s3_bucket_names': False + 'relaxed_s3_bucket_names': False, + 'rgw_zonegroup': 'zonegroup1', + 'rgw_realm': 'realmX' } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -171,6 +175,8 @@ def _relation_get(attr, unit, rid): 'rgw_swift_versioning': False, 'frontend': 'beast', 'relaxed_s3_bucket_names': False, + 'rgw_zonegroup': 'zonegroup1', + 'rgw_realm': 'realmX' } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -231,6 +237,8 @@ def _relation_get(attr, unit, rid): 'rgw_swift_versioning': False, 'frontend': 'beast', 'relaxed_s3_bucket_names': False, + 'rgw_zonegroup': 'zonegroup1', + 'rgw_realm': 'realmX' } self.assertEqual(expect, mon_ctxt()) @@ -273,6 +281,8 @@ def _relation_get(attr, unit, rid): 'rgw_swift_versioning': False, 'frontend': 'beast', 'relaxed_s3_bucket_names': False, + 'rgw_zonegroup': 'zonegroup1', + 'rgw_realm': 'realmX' } self.assertEqual(expect, mon_ctxt()) @@ -373,6 +383,8 @@ def _relation_get(attr, unit, rid): 'rgw_swift_versioning': False, 'frontend': 'beast', 'relaxed_s3_bucket_names': False, + 'rgw_zonegroup': 'zonegroup1', + 'rgw_realm': 'realmX' } self.assertEqual(expect, mon_ctxt()) From deefefc340c788855507ac33f0f365aebfd0992b Mon Sep 17 00:00:00 2001 From: Ethan Myers Date: Thu, 19 May 2022 09:44:34 -0600 Subject: [PATCH 2391/2699] Enable charm to configure mds cache options. Closes-Bug: #1891409 Func-Test-PR: https://github.com/openstack-charmers/zaza-openstack-tests/pull/774 Change-Id: If2bdd5c0f2afa1843e686cf69570a50901c85875 --- ceph-fs/src/config.yaml | 21 +++++++++++++++++++ ceph-fs/src/lib/charm/openstack/ceph_fs.py | 11 ++++++++++ ceph-fs/src/templates/ceph.conf | 3 +++ .../test_lib_charm_openstack_ceph_fs.py | 9 ++++++++ 4 files changed, 44 insertions(+) diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index 3c4fa28a..f2af93ac 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -258,3 +258,24 @@ options: description: | Value of bluestore compression max blob size for solid state media on pools requested by this charm. + mds-cache-memory-limit: + type: string + default: 4Gi + description: | + Set the maximum size of Metadata Server (MDS) cache, in bytes. The MDS + will try to stay under this value by (1 - mds_cache_reservation) as a + percent. This is not a hard limit. + mds-cache-reservation: + type: float + default: 0.05 + description: | + The cache reservation for the MDS cache to maintain. The MDS will try + to stay under this value as a percent by (1 - mds_cache_reservation) + as a percent. + mds-health-cache-threshold: + type: float + default: 1.5 + description: | + If the MDS exceeds the cache size specified in mds-cache-memory-limit, + this parameter sets the memory limit, as a percentage of + mds_cache_reservation, that triggers a health warning. diff --git a/ceph-fs/src/lib/charm/openstack/ceph_fs.py b/ceph-fs/src/lib/charm/openstack/ceph_fs.py index a9433c44..99e891ff 100644 --- a/ceph-fs/src/lib/charm/openstack/ceph_fs.py +++ b/ceph-fs/src/lib/charm/openstack/ceph_fs.py @@ -53,6 +53,10 @@ def mds_name(self): def networks(self): return self.charm_instance.get_networks('ceph-public-network') + @property + def mds_cache(self): + return self.charm_instance.get_mds_cache() + @property def public_addr(self): if ch_core.hookenv.config('prefer-ipv6'): @@ -119,6 +123,13 @@ def get_public_addr(self): return self.get_host_ip() + def get_mds_cache(self): + return {'mds-cache-memory-limit': config('mds-cache-memory-limit'), + 'mds-cache-reservation': config('mds-cache-reservation'), + 'mds-health-cache-threshold': + config('mds-health-cache-threshold') + } + @cached @staticmethod def get_host_ip(hostname=None): diff --git a/ceph-fs/src/templates/ceph.conf b/ceph-fs/src/templates/ceph.conf index d064e443..cd2725cc 100644 --- a/ceph-fs/src/templates/ceph.conf +++ b/ceph-fs/src/templates/ceph.conf @@ -26,6 +26,9 @@ log file = /var/log/ceph.log [mds] keyring = /var/lib/ceph/mds/$cluster-$id/keyring +mds cache memory limit = {{ options.mds_cache_memory_limit }} +mds cache reservation = {{ options.mds_cache_reservation }} +mds health cache threshold = {{ options.mds_health_cache_threshold }} [mds.{{ options.mds_name }}] host = {{ options.hostname }} diff --git a/ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py b/ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py index 8cf4faf9..6873aaef 100644 --- a/ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py +++ b/ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py @@ -80,3 +80,12 @@ def test_configuration_class(self): self.assertEquals( self.target.options.public_addr, '2001:db8::fake') + self.patch_target('get_mds_cache') + self.get_mds_cache.return_value = { + 'mds-cache-memory-limit': '4Gi', + 'mds-cache-reservation': 0.05, + 'mds-health-cache-threshold': 1.5} + self.assertEquals(self.target.options.mds_cache, { + 'mds-cache-memory-limit': '4Gi', + 'mds-cache-reservation': 0.05, + 'mds-health-cache-threshold': 1.5}) From 232660ab13efec5d05c7e73d44e0901a2a517d19 Mon Sep 17 00:00:00 2001 From: peppepetra86 Date: Mon, 6 Jun 2022 11:43:16 +0200 Subject: [PATCH 2392/2699] Improve OSD device details dashboard Add osd's status and utilization to the dashboard. Closes-Bug: 1977501 Change-Id: Ic18ff5dc76a4ed4f4343a38f57c046a209e0bc96 --- .../src/dashboards/osd-device-details.json | 521 +++++++++++++++++- 1 file changed, 513 insertions(+), 8 deletions(-) diff --git a/ceph-dashboard/src/dashboards/osd-device-details.json b/ceph-dashboard/src/dashboards/osd-device-details.json index eefb5912..f4f4cbe9 100644 --- a/ceph-dashboard/src/dashboards/osd-device-details.json +++ b/ceph-dashboard/src/dashboards/osd-device-details.json @@ -36,12 +36,425 @@ "panels": [ { "collapsed": false, + "datasource": null, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, + "id": 16, + "panels": [], + "title": "Status $osd", + "type": "row" + }, + { + "cacheTimeout": null, + "datasource": "prometheus - Juju generated source", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [ + { + "from": "0", + "id": 1, + "text": "DOWN", + "to": "0.99", + "type": 2, + "value": "0" + }, + { + "from": "0.99", + "id": 2, + "text": "UP", + "to": "1", + "type": 2, + "value": "1" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0 + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 0, + "y": 1 + }, + "id": 18, + "interval": null, + "links": [], + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "center", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": { + "valueSize": 30 + }, + "textMode": "value" + }, + "pluginVersion": "7.4.1", + "targets": [ + { + "$$hashKey": "object:484", + "aggregation": "Last", + "decimals": 2, + "displayAliasType": "Warning / Critical", + "displayType": "Regular", + "displayValueWithAlias": "Never", + "expr": "ceph_osd_up{ceph_daemon=~\"$osd\"}", + "format": "table", + "instant": false, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A", + "units": "none", + "valueHandler": "Number Threshold" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "", + "transparent": true, + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "prometheus - Juju generated source", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [ + { + "from": "0", + "id": 1, + "text": "OUT", + "to": "0.99", + "type": 2, + "value": "0" + }, + { + "from": "0.99", + "id": 2, + "text": "IN", + "to": "1", + "type": 2, + "value": "1" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0 + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 2, + "y": 1 + }, + "id": 19, + "interval": null, + "links": [], + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "center", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": { + "valueSize": 30 + }, + "textMode": "value" + }, + "pluginVersion": "7.4.1", + "targets": [ + { + "$$hashKey": "object:484", + "aggregation": "Last", + "decimals": 2, + "displayAliasType": "Warning / Critical", + "displayType": "Regular", + "displayValueWithAlias": "Never", + "expr": "ceph_osd_in{ceph_daemon=~\"$osd\"}", + "format": "table", + "instant": false, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A", + "units": "none", + "valueHandler": "Number Threshold" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "", + "transparent": true, + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 21, + "panels": [], + "title": "Utilization $osd", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "fieldConfig": { + "defaults": { + "color": {}, + "custom": {}, + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "short" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 18, + "x": 0, + "y": 5 + }, + "hiddenSeries": false, + "id": 23, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ceph_osd_numpg{ceph_daemon=~\"$osd\"}", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "Num PGs in $osd", + "queryType": "randomWalk", + "refId": "A" + }, + { + "expr": "avg(ceph_osd_numpg)", + "hide": false, + "interval": "", + "legendFormat": "Average Number of PGs in the Cluster", + "refId": "B" + } + ], + "thresholds": [ + { + "$$hashKey": "object:702", + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": null, + "yaxis": "right" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "PGs", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:94", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:95", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "prometheus - Juju generated source", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "orange", + "value": 60 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 6, + "x": 18, + "y": 5 + }, + "id": 25, + "options": { + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.4.1", + "targets": [ + { + "expr": "(ceph_osd_stat_bytes_used{ceph_daemon=\"$osd\"}/ceph_osd_stat_bytes{ceph_daemon=\"$osd\"})*100", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Utilization", + "type": "gauge" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 15 + }, "id": 14, "panels": [], "title": "OSD Performance", @@ -53,13 +466,21 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 9, "w": 6, "x": 0, - "y": 1 + "y": 16 }, + "hiddenSeries": false, "id": 2, "legend": { "avg": false, @@ -74,7 +495,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.4.1", "pointradius": 5, "points": false, "renderer": "flot", @@ -105,6 +530,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "$osd Latency", "tooltip": { @@ -149,13 +575,21 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 9, "w": 6, "x": 6, - "y": 1 + "y": 16 }, + "hiddenSeries": false, "id": 8, "legend": { "avg": false, @@ -170,7 +604,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.4.1", "pointradius": 5, "points": false, "renderer": "flot", @@ -201,6 +639,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "$osd R/W IOPS", "tooltip": { @@ -245,13 +684,21 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 9, "w": 6, "x": 12, - "y": 1 + "y": 16 }, + "hiddenSeries": false, "id": 7, "legend": { "avg": false, @@ -266,7 +713,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.4.1", "pointradius": 5, "points": false, "renderer": "flot", @@ -297,6 +748,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "$osd R/W Bytes", "tooltip": { @@ -337,11 +789,12 @@ }, { "collapsed": false, + "datasource": null, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 10 + "y": 25 }, "id": 12, "panels": [], @@ -354,13 +807,21 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 9, "w": 6, "x": 0, - "y": 11 + "y": 26 }, + "hiddenSeries": false, "id": 9, "legend": { "avg": false, @@ -375,7 +836,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.4.1", "pointradius": 5, "points": false, "renderer": "flot", @@ -406,6 +871,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Physical Device Latency for $osd", "tooltip": { @@ -450,13 +916,21 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 9, "w": 6, "x": 6, - "y": 11 + "y": 26 }, + "hiddenSeries": false, "id": 5, "legend": { "avg": false, @@ -471,7 +945,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.4.1", "pointradius": 5, "points": false, "renderer": "flot", @@ -502,6 +980,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Physical Device R/W IOPS for $osd", "tooltip": { @@ -546,13 +1025,21 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 1, + "fillGradient": 0, "gridPos": { "h": 9, "w": 6, "x": 12, - "y": 11 + "y": 26 }, + "hiddenSeries": false, "id": 10, "legend": { "avg": false, @@ -567,7 +1054,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.4.1", "pointradius": 5, "points": false, "renderer": "flot", @@ -598,6 +1089,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Physical Device R/W Bytes for $osd", "tooltip": { @@ -642,13 +1134,21 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 1, + "fillGradient": 0, "gridPos": { "h": 9, "w": 6, "x": 18, - "y": 11 + "y": 26 }, + "hiddenSeries": false, "id": 4, "legend": { "avg": false, @@ -663,7 +1163,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.4.1", "pointradius": 5, "points": false, "renderer": "flot", @@ -682,6 +1186,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Physical Device Util% for $osd", "tooltip": { From 8b0e7f62bde9bb65bfa3fa2eea458a8168368e23 Mon Sep 17 00:00:00 2001 From: peppepetra86 Date: Mon, 6 Jun 2022 13:03:49 +0200 Subject: [PATCH 2393/2699] Improve Ceph cluster dashboard Add IOPS and Objects in the cluster to the dashboard. Closes-Bug: 1977504 Change-Id: Idfa6c4066b6852b3beb95e2c83823372eea2d9ae --- .../src/dashboards/ceph-cluster.json | 392 +++++++++++++++++- 1 file changed, 383 insertions(+), 9 deletions(-) diff --git a/ceph-dashboard/src/dashboards/ceph-cluster.json b/ceph-dashboard/src/dashboards/ceph-cluster.json index 61a425d0..d683696e 100644 --- a/ceph-dashboard/src/dashboards/ceph-cluster.json +++ b/ceph-dashboard/src/dashboards/ceph-cluster.json @@ -54,6 +54,12 @@ "datasource": "$datasource", "editable": false, "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "none", "gauge": { "maxValue": 100, @@ -119,7 +125,6 @@ "thresholds": "1,2", "timeFrom": null, "title": "Health Status", - "transparent": false, "type": "singlestat", "valueFontSize": "50%", "valueMaps": [ @@ -152,6 +157,12 @@ "cornerRadius": 0, "datasource": "$datasource", "displayName": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "flipCard": false, "flipTime": 5, "fontFormat": "Regular", @@ -262,6 +273,12 @@ ], "datasource": "$datasource", "decimals": 2, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "percentunit", "gauge": { "maxValue": 100, @@ -339,13 +356,21 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 6, "w": 8, "x": 8, "y": 0 }, + "hiddenSeries": false, "id": 53, "legend": { "avg": false, @@ -360,7 +385,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.4.1", "pointradius": 5, "points": false, "renderer": "flot", @@ -455,6 +484,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "PG States", "tooltip": { @@ -487,7 +517,11 @@ "min": null, "show": false } - ] + ], + "yaxis": { + "align": false, + "alignLevel": null + } }, { "aliasColors": {}, @@ -495,13 +529,21 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 6, "w": 8, "x": 16, "y": 0 }, + "hiddenSeries": false, "id": 66, "legend": { "avg": false, @@ -516,7 +558,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.4.1", "pointradius": 5, "points": false, "renderer": "flot", @@ -561,6 +607,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "OSD Latencies", "tooltip": { @@ -593,7 +640,11 @@ "min": null, "show": true } - ] + ], + "yaxis": { + "align": false, + "alignLevel": null + } }, { "clusterName": "", @@ -607,6 +658,12 @@ "cornerRadius": 1, "datasource": "$datasource", "displayName": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "flipCard": false, "flipTime": 5, "fontFormat": "Regular", @@ -688,6 +745,12 @@ "cornerRadius": 0, "datasource": "$datasource", "displayName": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "flipCard": false, "flipTime": 5, "fontFormat": "Regular", @@ -729,13 +792,21 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 1, + "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 0, "y": 6 }, + "hiddenSeries": false, "id": 45, "legend": { "avg": false, @@ -750,12 +821,17 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.4.1", "pointradius": 0.5, "points": false, "renderer": "flot", "seriesOverrides": [ { + "$$hashKey": "object:271", "alias": "Reads", "transform": "negative-Y" } @@ -767,6 +843,7 @@ { "expr": "sum(irate(ceph_osd_op_w_in_bytes[1m]))", "format": "time_series", + "interval": "", "intervalFactor": 1, "legendFormat": "Writes", "refId": "A" @@ -781,6 +858,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Cluster I/O", "tooltip": { @@ -798,6 +876,7 @@ }, "yaxes": [ { + "$$hashKey": "object:278", "format": "Bps", "label": "Read (-) / Write (+)", "logBase": 1, @@ -806,6 +885,7 @@ "show": true }, { + "$$hashKey": "object:279", "format": "short", "label": null, "logBase": 1, @@ -813,7 +893,11 @@ "min": null, "show": false } - ] + ], + "yaxis": { + "align": false, + "alignLevel": null + } }, { "aliasColors": {}, @@ -821,13 +905,21 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 1, + "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 12, "y": 6 }, + "hiddenSeries": false, "id": 62, "legend": { "avg": false, @@ -842,7 +934,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.4.1", "pointradius": 5, "points": false, "renderer": "flot", @@ -860,6 +956,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "In-/Egress", "tooltip": { @@ -892,7 +989,249 @@ "min": null, "show": false } - ] + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "decimals": 0, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 15 + }, + "hiddenSeries": false, + "id": 70, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(ceph_osd_op_w[1m]))", + "interval": "", + "legendFormat": "Write", + "queryType": "randomWalk", + "refId": "A" + }, + { + "expr": "sum(irate(ceph_osd_op_r[1m]))", + "hide": false, + "interval": "", + "legendFormat": "Read", + "queryType": "randomWalk", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "IOPS", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:184", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:185", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Degraded": "orange", + "Misplaced": "yellow", + "Unfound": "red" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "decimals": 0, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 15 + }, + "hiddenSeries": false, + "id": 71, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(ceph_pool_objects)", + "interval": "", + "legendFormat": "Total", + "queryType": "randomWalk", + "refId": "A" + }, + { + "expr": "ceph_num_objects_degraded", + "hide": false, + "interval": "", + "legendFormat": "Degraded", + "queryType": "randomWalk", + "refId": "B" + }, + { + "expr": "ceph_num_objects_misplaced", + "hide": false, + "interval": "", + "legendFormat": "Misplaced", + "queryType": "randomWalk", + "refId": "C" + }, + { + "expr": "ceph_num_objects_unfound", + "hide": false, + "interval": "", + "legendFormat": "Unfound", + "queryType": "randomWalk", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Objects in the cluster", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:184", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:185", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } }, { "cards": { @@ -909,19 +1248,27 @@ }, "dataFormat": "timeseries", "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "gridPos": { "h": 9, "w": 6, "x": 0, - "y": 15 + "y": 26 }, "heatmap": {}, + "hideZeroBuckets": false, "highlightCards": true, "id": 55, "legend": { "show": true }, "links": [], + "reverseYBuckets": false, "span": 12, "targets": [ { @@ -955,6 +1302,7 @@ "show": true, "splitFactor": null }, + "yBucketBound": "auto", "yBucketNumber": null, "yBucketSize": null }, @@ -972,19 +1320,27 @@ }, "dataFormat": "timeseries", "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "gridPos": { "h": 9, "w": 6, "x": 6, - "y": 15 + "y": 26 }, "heatmap": {}, + "hideZeroBuckets": false, "highlightCards": true, "id": 59, "legend": { "show": true }, "links": [], + "reverseYBuckets": false, "targets": [ { "expr": "ceph_osd_numpg", @@ -1014,6 +1370,7 @@ "show": true, "splitFactor": null }, + "yBucketBound": "auto", "yBucketNumber": null, "yBucketSize": null }, @@ -1023,13 +1380,21 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 12, - "y": 15 + "y": 26 }, + "hiddenSeries": false, "id": 64, "legend": { "avg": false, @@ -1044,7 +1409,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.4.1", "pointradius": 5, "points": false, "renderer": "flot", @@ -1063,6 +1432,7 @@ ], "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, "title": "Recovery Rate", "tooltip": { @@ -1095,7 +1465,11 @@ "min": null, "show": true } - ] + ], + "yaxis": { + "align": false, + "alignLevel": null + } } ], "refresh": "30s", From de43d4519abafeae0564ab91c9bde075bdde9e25 Mon Sep 17 00:00:00 2001 From: Juan Pablo Norena Date: Wed, 18 May 2022 12:36:59 +0000 Subject: [PATCH 2394/2699] Add get-or-create-user and delete-user actions for ceph auth The get-or-create-user action allows to create and get user, with its mon and osd capabilities, and retrieve the related keyring. The delete-user action allows to delete users. Closes-Bug: 1899215 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/765 Change-Id: I2bd148e442990b6ff978624023bd85a741c6259a --- ceph-mon/actions.yaml | 22 +++++++ ceph-mon/actions/delete-user | 1 + ceph-mon/actions/delete_user.py | 43 +++++++++++++ ceph-mon/actions/get-or-create-user | 1 + ceph-mon/actions/get_or_create_user.py | 63 +++++++++++++++++++ ceph-mon/test-requirements.txt | 3 +- .../unit_tests/test_action_delete_user.py | 39 ++++++++++++ .../test_action_get_or_create_user.py | 57 +++++++++++++++++ 8 files changed, 228 insertions(+), 1 deletion(-) create mode 120000 ceph-mon/actions/delete-user create mode 100755 ceph-mon/actions/delete_user.py create mode 120000 ceph-mon/actions/get-or-create-user create mode 100755 ceph-mon/actions/get_or_create_user.py create mode 100644 ceph-mon/unit_tests/test_action_delete_user.py create mode 100644 ceph-mon/unit_tests/test_action_get_or_create_user.py diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index a33eaa63..5f7fcdf7 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -417,3 +417,25 @@ list-crush-rules: default: text description: "The output format, either json, yaml or text (default)" additionalProperties: false +get-or-create-user: + description: "Get or create a user and it's capabilities." + params: + username: + type: string + description: "User ID to get or create." + mon-caps: + type: string + default: allow rw + description: "Monitor capabilities include r, w, x access settings or profile {name}." + osd-caps: + type: string + default: allow rw + description: "OSD capabilities include r, w, x, class-read, class-write access settings or profile {name}." + required: [username] +delete-user: + description: "Delete a user." + params: + username: + type: string + description: "User ID to delete." + required: [username] \ No newline at end of file diff --git a/ceph-mon/actions/delete-user b/ceph-mon/actions/delete-user new file mode 120000 index 00000000..f55bc90f --- /dev/null +++ b/ceph-mon/actions/delete-user @@ -0,0 +1 @@ +delete_user.py \ No newline at end of file diff --git a/ceph-mon/actions/delete_user.py b/ceph-mon/actions/delete_user.py new file mode 100755 index 00000000..93c6016c --- /dev/null +++ b/ceph-mon/actions/delete_user.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +sys.path.append('hooks') +from charmhelpers.core.hookenv import action_get, action_fail, action_set, log +from subprocess import CalledProcessError, check_output, STDOUT + + +def delete_user(): + username = action_get("username") + client = "client.{}".format(username) + try: + log(f'Attempting to delete credentials for entity {client}.') + output = check_output(['ceph', 'auth', 'del', client], + stderr=STDOUT).decode("utf-8") + return output + except CalledProcessError as e: + log(f'Failed to delete credentials for entity {client}.') + action_fail("User creation failed because of a failed process. " + "Ret Code: {} Message: {}".format(e.returncode, str(e))) + + +def main(): + action_set({"message": delete_user()}) + + +if __name__ == "__main__": + main() diff --git a/ceph-mon/actions/get-or-create-user b/ceph-mon/actions/get-or-create-user new file mode 120000 index 00000000..0060cdb0 --- /dev/null +++ b/ceph-mon/actions/get-or-create-user @@ -0,0 +1 @@ +get_or_create_user.py \ No newline at end of file diff --git a/ceph-mon/actions/get_or_create_user.py b/ceph-mon/actions/get_or_create_user.py new file mode 100755 index 00000000..000855d7 --- /dev/null +++ b/ceph-mon/actions/get_or_create_user.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import json + +sys.path.append("hooks") +from charmhelpers.core.hookenv import action_get, action_fail, action_set, log +from subprocess import CalledProcessError, check_output + + +def get_or_create_user(): + username = action_get("username") + client = "client.{}".format(username) + try: + log(f'Attempting to retrieve existing credentials for entity {client}') + keyring = json.loads( + check_output(["ceph", "auth", "get", client, + "--format=json"]).decode("utf-8") + ) + log(f'Found existing credentials for entity {client}') + return json.dumps(keyring, indent=2) + except CalledProcessError: + log(f'Credentials for entity {client} not found') + pass + try: + log(f'Attempting to create new credentials for entity {client}') + mon_caps = action_get("mon-caps") + osd_caps = action_get("osd-caps") + log(f'with the following mon capabilities: {mon_caps},') + log(f'and osd capabilities: {osd_caps}.') + keyring = json.loads( + check_output(["ceph", "auth", "get-or-create", + client, "mon", mon_caps, "osd", osd_caps, + "--format=json"]).decode("utf-8") + ) + log(f'New credentials for entity {client} created') + return json.dumps(keyring, indent=2) + except CalledProcessError as e: + log(f'Failed to get or create credentials for entity {client}.') + action_fail("User creation failed because of a failed process. " + "Ret Code: {} Message: {}".format(e.returncode, str(e))) + + +def main(): + action_set({"message": get_or_create_user()}) + + +if __name__ == "__main__": + main() diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index e9401604..d515cae9 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -44,7 +44,8 @@ git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: -git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' +git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.8' +tempest<31.0.0;python_version<'3.8' tempest<24.0.0;python_version<'3.6' croniter # needed for charm-rabbitmq-server unit tests diff --git a/ceph-mon/unit_tests/test_action_delete_user.py b/ceph-mon/unit_tests/test_action_delete_user.py new file mode 100644 index 00000000..74c66201 --- /dev/null +++ b/ceph-mon/unit_tests/test_action_delete_user.py @@ -0,0 +1,39 @@ +# Copyright 2022 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for delete_user action.""" + +from actions import delete_user +from test_utils import CharmTestCase + + +class DeleteUserTestCase(CharmTestCase): + _stderr = b"""updated""" + + def setUp(self): + super(DeleteUserTestCase, self).setUp( + delete_user, ["check_output", "action_get", "action_fail", + "action_set", "log"]) + self.action_get.return_value = "sandbox" # username=sandbox + self.check_output.return_value = self._stderr + + def test_delete_user(self): + """Test getting status updated.""" + self.user = None + + def _action_set(message): + self.user = message["message"] + self.action_set.side_effect = _action_set + delete_user.main() + self.action_get.assert_called_once_with("username") + self.assertEqual(self.user, "updated") diff --git a/ceph-mon/unit_tests/test_action_get_or_create_user.py b/ceph-mon/unit_tests/test_action_get_or_create_user.py new file mode 100644 index 00000000..03127acb --- /dev/null +++ b/ceph-mon/unit_tests/test_action_get_or_create_user.py @@ -0,0 +1,57 @@ +# Copyright 2022 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for get_or_create_user action.""" + +import json + +from actions import get_or_create_user +from test_utils import CharmTestCase + + +class GetOrCreateUserTestCase(CharmTestCase): + _keyring = b""" + [ + { + "entity": "client.sandbox", + "key": "AQCnGXxiOkueGBAAsWX27MV8PNwuyMhPSzSCPg==", + "caps": { + "mon": "allow r", + "osd": "allow r" + } + } + ]""" + + def setUp(self): + super(GetOrCreateUserTestCase, self).setUp( + get_or_create_user, ["check_output", "action_get", "action_fail", + "action_set", "log"]) + self.action_get.return_value = "sandbox" # username=sandbox + self.check_output.return_value = self._keyring + + def test_get_or_create_user(self): + """Test getting resulting keyring.""" + self.user = None + + def _action_set(message): + self.user = json.loads(message["message"]) + self.action_set.side_effect = _action_set + get_or_create_user.main() + self.action_get.assert_called_once_with("username") + self.assertEqual(self.user[0]["entity"], "client.sandbox") + self.assertEqual( + self.user[0]["key"], + "AQCnGXxiOkueGBAAsWX27MV8PNwuyMhPSzSCPg==" + ) + self.assertEqual(self.user[0]["caps"]["mon"], "allow r") + self.assertEqual(self.user[0]["caps"]["osd"], "allow r") From abd75e7f51b67eb17f4499a2a0c6a98675396b00 Mon Sep 17 00:00:00 2001 From: "Chi Wai, Chan" Date: Wed, 22 Jun 2022 15:44:53 +0800 Subject: [PATCH 2395/2699] Fixed typo in a function comment. --check_osds_down --> --check_num_osds Change-Id: Ic5938cc5f12606ff0cc67df988b95ecf673b6c5f --- ceph-mon/files/nagios/check_ceph_status.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/files/nagios/check_ceph_status.py b/ceph-mon/files/nagios/check_ceph_status.py index 045bcac5..11e32595 100755 --- a/ceph-mon/files/nagios/check_ceph_status.py +++ b/ceph-mon/files/nagios/check_ceph_status.py @@ -175,7 +175,7 @@ def check_ceph_status(args): print(message_all_ok) return message_all_ok - # if it is just --check_osds_down, deal with it and ignore overall health + # if it is just --check_num_osds, deal with it and ignore overall health if args.check_num_osds: osdmap = status_data['osdmap']['osdmap'] num_osds = osdmap['num_osds'] From 3f11d79e1f6376b1a9212ef3b1bbfa2cc600c5fb Mon Sep 17 00:00:00 2001 From: Connor Chamberlain Date: Fri, 25 Feb 2022 08:33:10 -0700 Subject: [PATCH 2396/2699] Added safe-pg-repair action This action automatically repairs inconsistent placement groups which are caused by read errors. PGs are repaired using `ceph pg repair `. Action is only taken if on of a PG's shards has a "read_error", and no action will be taken if any additional errors are found. No action will be taken if multiple "read_errors" are found. This action is intended to be safe to run in all contexts. Closes-Bug: #1923218 Change-Id: I903dfe02aa3b7c67414e3d0d9b57f4042d301830 --- ceph-mon/actions.yaml | 4 +- ceph-mon/actions/pg-repair | 1 + ceph-mon/actions/pg_repair.py | 202 +++++++++++++ ceph-mon/unit_tests/test_action_pg_repair.py | 280 +++++++++++++++++++ 4 files changed, 486 insertions(+), 1 deletion(-) create mode 120000 ceph-mon/actions/pg-repair create mode 100755 ceph-mon/actions/pg_repair.py create mode 100644 ceph-mon/unit_tests/test_action_pg_repair.py diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 5f7fcdf7..0ab20b5c 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -438,4 +438,6 @@ delete-user: username: type: string description: "User ID to delete." - required: [username] \ No newline at end of file + required: [username] +pg-repair: + description: "Repair inconsistent placement groups, if safe to do so." diff --git a/ceph-mon/actions/pg-repair b/ceph-mon/actions/pg-repair new file mode 120000 index 00000000..e60c9660 --- /dev/null +++ b/ceph-mon/actions/pg-repair @@ -0,0 +1 @@ +pg_repair.py \ No newline at end of file diff --git a/ceph-mon/actions/pg_repair.py b/ceph-mon/actions/pg_repair.py new file mode 100755 index 00000000..6dd17ecc --- /dev/null +++ b/ceph-mon/actions/pg_repair.py @@ -0,0 +1,202 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os +import sys +from subprocess import check_output, CalledProcessError + +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, "../hooks")) +_lib = os.path.abspath(os.path.join(_path, "../lib")) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + + +_add_path(_hooks) +_add_path(_lib) + + +from charmhelpers.core.hookenv import ( + log, + function_fail, + function_set, +) +from charms_ceph.utils import list_pools + + +def get_rados_inconsistent_objs(pg): + """Get all inconsistent objects for a given placement group. + + :param pg: Name of a placement group + :type pg: str + :return: list of inconsistent objects + :rtype: list[str] + """ + return json.loads( + check_output( + ["rados", "list-inconsistent-obj", pg, "--format=json-pretty"] + ).decode("UTF-8") + ) + + +def get_rados_inconsistent_pgs(pool): + """Get all inconsistent placement groups for a given pool. + + :param pool: Name of a Ceph pool + :type pool: str + :returns: list of inconsistent placement group IDs + :rtype: list[str] + """ + return json.loads( + check_output(["rados", "list-inconsistent-pg", pool]).decode("UTF-8") + ) + + +def get_inconsistent_pgs(ceph_pools): + """Get all inconsistent placement groups for a list of pools. + + :param ceph_pools: List of names of Ceph pools + :type ceph_pools: list[str] + :returns: list of inconsistent placement group IDs as a set + :rtype: set[str] + """ + inconsistent_pgs = set() + for pool in ceph_pools: + inconsistent_pgs.update(get_rados_inconsistent_pgs(pool)) + return inconsistent_pgs + + +def get_safe_pg_repairs(inconsistent_pgs): + """Filters inconsistent placement groups for ones that are safe to repair. + + :param inconsistent_pgs: List of inconsistent placement groups + :type inconsistent_pgs: list[str] + :returns: list of safely repairable placement groups as a set + :rtype: set[str] + """ + return {pg for pg in inconsistent_pgs if is_pg_safe_to_repair(pg)} + + +def is_pg_safe_to_repair(pg): + """Determines if a placement group is safe to repair. + + :param pg: Name of an inconsistent placement group + :type pg: str + :returns: placement group is safe to repair + :rtype: bool + """ + # Additional tests for known safe cases can be added here. + return has_read_error_only(pg) + + +def has_read_error_only(pg): + """Determines if an inconsistent placement group is caused by a read error. + Returns False if no read errors are found, or if any errors other than read + errors are found. + + :param pg: ID of an inconsistent placement group + :type pg: str + :returns: placement group is safe to repair + :rtype: bool + """ + rados_inconsistent_objs = get_rados_inconsistent_objs(pg) + read_error_found = False + for inconsistent in rados_inconsistent_objs.get("inconsistents", []): + for shard in inconsistent.get("shards", []): + errors = shard.get("errors", []) + if errors == ["read_error"]: + if read_error_found: + return False + read_error_found = True + continue + elif errors: + # Error other than "read_error" detected + return False + return read_error_found + + +def perform_pg_repairs(pgs): + """Runs `ceph pg repair` on a group of placement groups. + All placement groups provided should be confirmed as safe prior to using + this method. + + :param pgs: List of safe-to-repair placement groups + :type pg: list[str] + """ + for pg in pgs: + log("Repairing ceph placement group {}".format(pg)) + check_output(["ceph", "pg", "repair", pg]) + + +def pg_repair(): + """Repair all inconsistent placement groups caused by read errors.""" + ceph_pools = list_pools() + if not ceph_pools: + msg = "No Ceph pools found." + log(msg) + function_set(msg) + return + + # Get inconsistent placement groups + inconsistent_pgs = get_inconsistent_pgs(ceph_pools) + if not inconsistent_pgs: + msg = "No inconsistent placement groups found." + log(msg) + function_set(msg) + return + + # Filter for known safe cases + safe_pg_repairs = get_safe_pg_repairs(inconsistent_pgs) + unsafe_pg_repairs = inconsistent_pgs.difference(safe_pg_repairs) + + # Perform safe placement group repairs + if unsafe_pg_repairs: + log( + "Ignoring unsafe placement group repairs: {}".format( + unsafe_pg_repairs + ) + ) + if safe_pg_repairs: + log("Safe placement group repairs found: {}".format(safe_pg_repairs)) + perform_pg_repairs(safe_pg_repairs) + function_set( + { + "message": "placement groups repaired: {}".format( + sorted(safe_pg_repairs) + ) + } + ) + else: + msg = "No safe placement group repairs found." + log(msg) + function_set(msg) + + +def main(): + try: + pg_repair() + except CalledProcessError as e: + log(e) + function_fail( + "Safe placement group repair failed with error: {}".format(str(e)) + ) + + +if __name__ == "__main__": + main() diff --git a/ceph-mon/unit_tests/test_action_pg_repair.py b/ceph-mon/unit_tests/test_action_pg_repair.py new file mode 100644 index 00000000..258c6103 --- /dev/null +++ b/ceph-mon/unit_tests/test_action_pg_repair.py @@ -0,0 +1,280 @@ +# Copyright 2022 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the pg_repair action.""" + +from actions import pg_repair as action +import unittest.mock as mock +from test_utils import CharmTestCase +import json + + +class PlacementGroupRepairTestCase(CharmTestCase): + """Run tests for the action.""" + + def setUp(self): + """Init mocks for test cases.""" + super(PlacementGroupRepairTestCase, self).setUp( + action, + [ + "function_fail", + "function_set", + "get_rados_inconsistent_objs", + "get_rados_inconsistent_pgs", + ], + ) + + @mock.patch("actions.pg_repair.get_rados_inconsistent_pgs") + def test_get_inconsistent_pgs(self, _rados_inc_pgs): + """Test collection of all inconsistent placement groups.""" + _rados_inc_pgs.side_effect = (["1.a", "2.b"], ["2.b", "3.c"], []) + ceph_pools = ["testPool0", "testPool1", "testPool2"] + result = action.get_inconsistent_pgs(ceph_pools) + self.assertEqual(result, {"1.a", "2.b", "3.c"}) + + @mock.patch("actions.pg_repair.get_rados_inconsistent_objs") + def test_safe_case_detection(self, _rados_inc_objs): + """Test that safe case is detected.""" + _rados_inc_objs.return_value = rados_inc_obj_output_safe() + result = action.is_pg_safe_to_repair("") + self.assertTrue(result) + + @mock.patch("actions.pg_repair.get_rados_inconsistent_objs") + def test_unsafe_case_detection_extra_erros(self, _rados_inc_objs): + """Test that the unsafe case of extra errors is detected.""" + _rados_inc_objs.return_value = rados_inc_obj_output_extra_errors() + result = action.is_pg_safe_to_repair("") + self.assertFalse(result) + + @mock.patch("actions.pg_repair.get_rados_inconsistent_objs") + def test_unsafe_case_detection_multiple_read_errors(self, _rados_inc_objs): + """Test that the unsafe case of multiple read errors is detected.""" + _rados_inc_objs.return_value = ( + rados_inc_obj_output_multiple_read_errors() + ) + result = action.is_pg_safe_to_repair("") + self.assertFalse(result) + + @mock.patch("actions.pg_repair.get_rados_inconsistent_objs") + def test_get_safe_pg_repair(self, _rados_inc_objs): + _rados_inc_objs.side_effect = ( + rados_inc_obj_output_safe(), + rados_inc_obj_output_extra_errors(), + rados_inc_obj_output_multiple_read_errors(), + ) + inconsistent_pgs = ("3.1f2", "12.ab3", "16.222") + result = action.get_safe_pg_repairs(inconsistent_pgs) + self.assertEqual(result, {"3.1f2"}) + + @mock.patch("actions.pg_repair.list_pools") + def test_pg_repair_no_ceph_pools(self, _list_pools): + """Test action fails when no Ceph pools found.""" + _list_pools.return_value = [] + action.pg_repair() + msg = "No Ceph pools found." + self.function_set.assert_called_once_with(msg) + + @mock.patch("actions.pg_repair.get_inconsistent_pgs") + @mock.patch("actions.pg_repair.list_pools") + def test_pg_repair_no_inconsistent_pgs(self, _list_pools, _get_inc_pgs): + _list_pools.return_value = ["testPool"] + _get_inc_pgs.return_value = [] + action.pg_repair() + msg = "No inconsistent placement groups found." + self.function_set.assert_called_once_with(msg) + + @mock.patch("actions.pg_repair.check_output") + @mock.patch("actions.pg_repair.get_rados_inconsistent_objs") + @mock.patch("actions.pg_repair.get_rados_inconsistent_pgs") + @mock.patch("actions.pg_repair.list_pools") + def test_pg_repair_safe_case( + self, _list_pools, _rados_inc_pgs, _rados_inc_objs, _check_output + ): + """Test action succeeds with one read error.""" + _list_pools.return_value = ["testPool"] + _rados_inc_pgs.return_value = {"16.abf", "12.bd4"} + _rados_inc_objs.return_value = rados_inc_obj_output_safe() + _check_output.return_value = b"" + action.pg_repair() + self.function_set.assert_called_once_with( + {"message": "placement groups repaired: ['12.bd4', '16.abf']"} + ) + + @mock.patch("actions.pg_repair.get_rados_inconsistent_objs") + @mock.patch("actions.pg_repair.get_rados_inconsistent_pgs") + @mock.patch("actions.pg_repair.list_pools") + def test_pg_repair_extra_errors( + self, _list_pools, _rados_inc_pgs, _rados_inc_objs + ): + """Test action fails with errors other than read errors.""" + _list_pools.return_value = ["testPool"] + _rados_inc_pgs.return_value = {"16.abf", "12.bd4"} + _rados_inc_objs.return_value = rados_inc_obj_output_extra_errors() + action.pg_repair() + self.function_set.assert_called_once() + + @mock.patch("actions.pg_repair.get_rados_inconsistent_objs") + @mock.patch("actions.pg_repair.get_rados_inconsistent_pgs") + @mock.patch("actions.pg_repair.list_pools") + def test_pg_repair_multiple_read_errors( + self, _list_pools, _rados_inc_pgs, _rados_inc_objs + ): + """Test action fails with multiple read errors.""" + _list_pools.return_value = ["testPool"] + _rados_inc_pgs.return_value = {"16.abf", "12.bd4"} + _rados_inc_objs.return_value = ( + rados_inc_obj_output_multiple_read_errors() + ) + action.pg_repair() + self.function_set.assert_called_once() + + +def rados_inc_obj_output_safe(): + return json.loads("""{ + "epoch": 873, + "inconsistents": [ + { + "object": { + "data": "nothing to see here" + }, + "errors": [], + "union_shard_errors": [ + "read_error" + ], + "selected_object_info": { + "data": "nothing to see here" + }, + "shards": [ + { + "osd": 53, + "primary": true, + "errors": [ + "read_error" + ], + "size": 4046848 + }, + { + "osd": 56, + "primary": false, + "errors": [], + "size": 4046848, + "omap_digest": "0xffffffff", + "data_digest": "0xb86056e7" + }, + { + "osd": 128, + "primary": false, + "errors": [], + "size": 4046848, + "omap_digest": "0xffffffff", + "data_digest": "0xb86056e7" + } + ] + } + ] + }""") + + +def rados_inc_obj_output_extra_errors(): + return json.loads("""{ + "epoch": 873, + "inconsistents": [ + { + "object": { + "data": "nothing to see here" + }, + "errors": [], + "union_shard_errors": [ + "read_error" + ], + "selected_object_info": { + "data": "nothing to see here" + }, + "shards": [ + { + "osd": 53, + "primary": true, + "errors": [ + "read_error", + "some_other_error" + ], + "size": 4046848 + }, + { + "osd": 56, + "primary": false, + "errors": [], + "size": 4046848, + "omap_digest": "0xffffffff", + "data_digest": "0xb86056e7" + }, + { + "osd": 128, + "primary": false, + "errors": [], + "size": 4046848, + "omap_digest": "0xffffffff", + "data_digest": "0xb86056e7" + } + ] + } + ] + }""") + + +def rados_inc_obj_output_multiple_read_errors(): + return json.loads("""{ + "epoch": 873, + "inconsistents": [ + { + "object": { + "data": "nothing to see here" + }, + "errors": [], + "union_shard_errors": [ + "read_error" + ], + "selected_object_info": { + "data": "nothing to see here" + }, + "shards": [ + { + "osd": 53, + "primary": true, + "errors": [ + "read_error" + ], + "size": 4046848 + }, + { + "osd": 56, + "primary": false, + "errors": [ + "read_error" + ], + "size": 4046848, + "omap_digest": "0xffffffff", + "data_digest": "0xb86056e7" + }, + { + "osd": 128, + "primary": false, + "errors": [], + "size": 4046848, + "omap_digest": "0xffffffff", + "data_digest": "0xb86056e7" + } + ] + } + ] + }""") From 84683303e8f1288f51273073674b117cdb39e318 Mon Sep 17 00:00:00 2001 From: Gokhan Cetinkaya Date: Fri, 15 Jul 2022 01:46:58 +0000 Subject: [PATCH 2397/2699] Docfix objecs -> objects Change-Id: I48d0ff007dfd6c23c7005e8d6afee8bf6b9a7d8c --- ceph-mon/actions.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 0ab20b5c..301413b7 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -209,7 +209,7 @@ list-erasure-profiles: description: "List the names of all erasure code profiles" additionalProperties: false list-inconsistent-objs: - description: "List the names of the inconsistent objecs per PG" + description: "List the names of the inconsistent objects per PG" params: format: type: string From c32f4675c0ff825f170b50b99eb59454c0a208a3 Mon Sep 17 00:00:00 2001 From: Hicham El Gharbi Date: Tue, 19 Jul 2022 12:18:06 +0200 Subject: [PATCH 2398/2699] Create NRPE check to verify ceph daemons versions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This NRPE check confirms if the versions of cluster daemons are divergent. WARN - any minor version diverged WARN – any versions are 1 release behind the mon CRIT – any versions are 2 releases behind the mon CRIT – any versions releases are head the mon A juju action is also provided 'get-versions-report' which provide to users, a quick way to see daemons versions running on cluster hosts. Closes-Bug: #1943628 Change-Id: I41b5c8576dc9cf885fa813a93e6d51e8804eb9d8 --- ceph-mon/actions.yaml | 2 + ceph-mon/actions/ceph_ops.py | 51 +++++++++++ ceph-mon/actions/get-versions-report | 1 + ceph-mon/actions/get_versions_report.py | 26 ++++++ ceph-mon/files/nagios/check_ceph_status.py | 75 ++++++++++++++++ ceph-mon/hooks/ceph_hooks.py | 8 ++ ceph-mon/unit_tests/ceph_ls_node.json | 35 ++++++++ .../unit_tests/ceph_versions_alligned.json | 15 ++++ .../unit_tests/ceph_versions_diverged.json | 19 +++++ ceph-mon/unit_tests/test_actions_mon.py | 40 +++++++++ ceph-mon/unit_tests/test_check_ceph_status.py | 85 +++++++++++++++++++ 11 files changed, 357 insertions(+) create mode 120000 ceph-mon/actions/get-versions-report create mode 100755 ceph-mon/actions/get_versions_report.py create mode 100644 ceph-mon/unit_tests/ceph_ls_node.json create mode 100644 ceph-mon/unit_tests/ceph_versions_alligned.json create mode 100644 ceph-mon/unit_tests/ceph_versions_diverged.json diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 301413b7..9655a527 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -4,6 +4,8 @@ resume-health: description: "Resume ceph health operations across the entire ceph cluster" get-health: description: "Output the current cluster health reported by `ceph health`" +get-versions-report: + description: "Outputs running daemon versions for all cluster members" create-cache-tier: description: "Create a new cache tier" params: diff --git a/ceph-mon/actions/ceph_ops.py b/ceph-mon/actions/ceph_ops.py index 5cc7b13a..0e6eb7ac 100755 --- a/ceph-mon/actions/ceph_ops.py +++ b/ceph-mon/actions/ceph_ops.py @@ -26,6 +26,11 @@ set_pool_quota, snapshot_pool, remove_pool_snapshot +class CephReportError(Exception): + """This indicates a critical error.""" + pass + + def list_pools(): """Return a list of all Ceph pools.""" try: @@ -35,6 +40,52 @@ def list_pools(): action_fail(str(e)) +def get_versions_report(): + """ + Return a mapping of hosts and their related ceph daemon versions. + + On error, raise a CephReportError. + """ + report = dict() + try: + output = check_output(['ceph', 'node', 'ls']).decode('UTF-8') + except CalledProcessError as e: + action_fail(str(e)) + raise(CephReportError("Getting nodes list fail")) + nodes_list = json.loads(output) + + # osd versions + for osd_host, osds in nodes_list['osd'].items(): + report.setdefault(osd_host, []) + for osd in osds: + try: + output = check_output(['ceph', 'tell', + "osd.{}".format(osd), + 'version']).decode('UTF-8') + except CalledProcessError: + raise( + CephReportError("Getting osd.{} version fail".format(osd)) + ) + report[osd_host].append(json.loads(output)['version']) + + # mon versions + for mon_host, mons in nodes_list['mon'].items(): + report.setdefault(mon_host, []) + for mon in mons: + try: + output = check_output(['ceph', 'tell', + "mon.{}".format(mon), + 'version']).decode('UTF-8') + except CalledProcessError as e: + action_fail(str(e)) + raise( + CephReportError("Getting mon.{} version fail".format(mon)) + ) + report[mon_host].append(json.loads(output)['version']) + + return json.dumps(report, indent=4) + + def get_health(): """ Returns the output of 'ceph health'. diff --git a/ceph-mon/actions/get-versions-report b/ceph-mon/actions/get-versions-report new file mode 120000 index 00000000..b50dd0fe --- /dev/null +++ b/ceph-mon/actions/get-versions-report @@ -0,0 +1 @@ +get_versions_report.py \ No newline at end of file diff --git a/ceph-mon/actions/get_versions_report.py b/ceph-mon/actions/get_versions_report.py new file mode 100755 index 00000000..d1ea92b6 --- /dev/null +++ b/ceph-mon/actions/get_versions_report.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ceph_ops import get_versions_report, CephReportError +from charmhelpers.core.hookenv import log, action_set, action_fail + +if __name__ == '__main__': + try: + action_set({'message': get_versions_report()}) + except CephReportError as e: + log(e) + action_fail( + "get versions report failed with message: {}".format(str(e))) diff --git a/ceph-mon/files/nagios/check_ceph_status.py b/ceph-mon/files/nagios/check_ceph_status.py index 11e32595..074efec5 100755 --- a/ceph-mon/files/nagios/check_ceph_status.py +++ b/ceph-mon/files/nagios/check_ceph_status.py @@ -86,6 +86,32 @@ def get_ceph_version(): return out_version +def get_daemons_versions(): + """ + Uses CLI to get the ceph versions + + :returns: set containing tuple of integers, + all the differents versions encountered in the cluster + :raises: UnknownError + """ + try: + tree = subprocess.check_output(['ceph', + 'versions']).decode('UTF-8') + except subprocess.CalledProcessError as e: + raise UnknownError( + "UNKNOWN: could not determine OSDs versions, error: {}".format(e)) + ceph_versions = json.loads(tree) + # ceph version command return a json output + # containing version of all daemons connected to the cluster + # here we parse the overall field, + # to get a set of all versions seen by the cluster + daemons_versions = set(map( + lambda x: tuple(int(i) for i in + x.split(' ')[2].split('.')), + ceph_versions['overall'].keys())) + return daemons_versions + + def get_status_and_messages(status_data): """ Used to get general status of a Ceph cluster as well as a list of @@ -135,6 +161,50 @@ def check_ceph_status(args): """ status_critical = False + # if it is just --check_daemons_versions_consistency, + # deal with it and ignore overall health + if args.check_daemons_versions_consistency: + daemons_versions = get_daemons_versions() + # we check that the osds have same versions + num_of_versions = len(daemons_versions) + if num_of_versions == 1: + message_ok = "OK: All versions alligned" + return message_ok + else: + # version diverged + # we check if major release are the same + # by parsing version number in the daemon_version set + # and keeping major version number or coverting the minor + # version number if major version is 0 + num_of_releases = set(map(lambda x: x[0], daemons_versions)) + if len(num_of_releases) == 1: + msg = 'WARNING: Components minor versions diverged.' + 'Run get-versions-report to know more' + raise WarnError(msg) + else: + # Releases diverged + major, _minor, _patch = get_ceph_version() + release_versions_diff = list(map(lambda x: major - x, + num_of_releases)) + if max(release_versions_diff) >= 2: + msg = "CRITICAL: A component is " \ + "{} version behind osd leader" \ + ". Run get-versions-report to know more".format( + max(release_versions_diff)) + raise CriticalError(msg) + if min(release_versions_diff) <= -1: + msg = "CRITICAL: A component is " \ + "{} version ahead osd leader" \ + ". Run get-versions-report to know more".format( + abs(min(release_versions_diff))) + raise CriticalError(msg) + if max(release_versions_diff) == 1: + msg = "WARNING: A component is " \ + "{} version behind osd leader" \ + ". Run get-versions-report to know more".format( + max(release_versions_diff)) + raise WarnError(msg) + if args.status_file: check_file_freshness(args.status_file) with open(args.status_file) as f: @@ -287,6 +357,11 @@ def parse_args(args): dest='check_num_osds', default=False, action='store_true', help="Check whether all OSDs are up and in") + parser.add_argument('--check_daemons_versions_consistency', + dest='check_daemons_versions_consistency', + default=False, + action='store_true', + help="Check all OSDs versions") return parser.parse_args(args) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index 826c5d4b..a6cd1e32 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -1211,6 +1211,14 @@ def update_nrpe_config(): description='Check whether all OSDs are up and in', check_cmd=check_cmd ) + if is_leader(): + check_cmd = 'check_ceph_status.py -f {}' \ + ' --check_daemons_versions'.format(STATUS_FILE) + nrpe_setup.add_check( + shortname='ceph_daemons_versions', + description='Check wheter all ceph daemons versions are alligned', + check_cmd=check_cmd + ) nrpe_setup.write() diff --git a/ceph-mon/unit_tests/ceph_ls_node.json b/ceph-mon/unit_tests/ceph_ls_node.json new file mode 100644 index 00000000..556cf2e3 --- /dev/null +++ b/ceph-mon/unit_tests/ceph_ls_node.json @@ -0,0 +1,35 @@ +{ + "mon": { + "juju-c8b0a2-3-lxd-0": [ + "juju-c8b0a2-3-lxd-0" + ], + "juju-c8b0a2-4-lxd-0": [ + "juju-c8b0a2-4-lxd-0" + ], + "juju-c8b0a2-5-lxd-0": [ + "juju-c8b0a2-5-lxd-0" + ] + }, + "osd": { + "aware-bee": [ + 1 + ], + "grand-ape": [ + 0 + ], + "lucky-muskox": [ + 2 + ] + }, + "mgr": { + "juju-c8b0a2-3-lxd-0": [ + "juju-c8b0a2-3-lxd-0" + ], + "juju-c8b0a2-4-lxd-0": [ + "juju-c8b0a2-4-lxd-0" + ], + "juju-c8b0a2-5-lxd-0": [ + "juju-c8b0a2-5-lxd-0" + ] + } +} diff --git a/ceph-mon/unit_tests/ceph_versions_alligned.json b/ceph-mon/unit_tests/ceph_versions_alligned.json new file mode 100644 index 00000000..3acae499 --- /dev/null +++ b/ceph-mon/unit_tests/ceph_versions_alligned.json @@ -0,0 +1,15 @@ +{ + "mon": { + "ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)": 3 + }, + "mgr": { + "ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)": 3 + }, + "osd": { + "ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)": 2 + }, + "mds": {}, + "overall": { + "ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)": 8 + } +} diff --git a/ceph-mon/unit_tests/ceph_versions_diverged.json b/ceph-mon/unit_tests/ceph_versions_diverged.json new file mode 100644 index 00000000..4dd5c5af --- /dev/null +++ b/ceph-mon/unit_tests/ceph_versions_diverged.json @@ -0,0 +1,19 @@ +{ + "mon": { + "ceph version 15.2.16 (d46a73d6d0a67a79558054a3a5a72cb561724974) octopus (stable)": 1, + "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 + }, + "mgr": { + "ceph version 15.2.16 (d46a73d6d0a67a79558054a3a5a72cb561724974) octopus (stable)": 3 + }, + "osd": { + "ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)": 3, + "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 + }, + "mds": {}, + "overall": { + "ceph version 15.2.16 (d46a73d6d0a67a79558054a3a5a72cb561724974) octopus (stable)": 4, + "ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)": 3, + "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 + } +} diff --git a/ceph-mon/unit_tests/test_actions_mon.py b/ceph-mon/unit_tests/test_actions_mon.py index edbb4561..ff54db0f 100644 --- a/ceph-mon/unit_tests/test_actions_mon.py +++ b/ceph-mon/unit_tests/test_actions_mon.py @@ -13,6 +13,7 @@ import json import sys import unittest.mock as mock +from subprocess import CalledProcessError from test_utils import CharmTestCase @@ -53,6 +54,45 @@ def test_get_health(self): cmd = ['ceph', 'health'] self.check_output.assert_called_once_with(cmd) + def test_get_version_report_ok(self): + def _call_rslt(): + with open('unit_tests/ceph_ls_node.json') as f: + tree = f.read() + yield tree.encode('UTF-8') + while True: + yield ('{' + ' "version": "16.2.7",' + ' "release": "pacific",' + ' "release_type": "stable"' + '}').encode('UTF-8') + self.check_output.side_effect = _call_rslt() + result = actions.get_versions_report() + self.assertEqual('{\n' + ' "aware-bee": [\n' + ' "16.2.7"\n' + ' ],\n' + ' "grand-ape": [\n' + ' "16.2.7"\n' + ' ],\n' + ' "lucky-muskox": [\n' + ' "16.2.7"\n' + ' ],\n' + ' "juju-c8b0a2-3-lxd-0": [\n' + ' "16.2.7"\n' + ' ],\n' + ' "juju-c8b0a2-4-lxd-0": [\n' + ' "16.2.7"\n' + ' ],\n' + ' "juju-c8b0a2-5-lxd-0": [\n' + ' "16.2.7"\n' + ' ]\n' + '}', result) + + def test_get_version_report_fail(self): + self.check_output.side_effect = CalledProcessError(1, 'ceph node ls') + self.assertRaises(actions.CephReportError, + lambda: actions.get_versions_report()) + @mock.patch('socket.gethostname') def test_get_quorum_status(self, mock_hostname): mock_hostname.return_value = 'mockhost' diff --git a/ceph-mon/unit_tests/test_check_ceph_status.py b/ceph-mon/unit_tests/test_check_ceph_status.py index 5342ce55..e6984884 100644 --- a/ceph-mon/unit_tests/test_check_ceph_status.py +++ b/ceph-mon/unit_tests/test_check_ceph_status.py @@ -17,6 +17,7 @@ import sys from unittest.mock import patch +from subprocess import CalledProcessError # import the module we want to test os.sys.path.insert(1, os.path.join(sys.path[0], 'files/nagios')) @@ -25,6 +26,90 @@ @patch('subprocess.check_output') class NagiosTestCase(unittest.TestCase): + def test_get_daemons_versions_alligned(self, mock_subprocess): + with open('unit_tests/ceph_versions_alligned.json', 'rb') as f: + mock_subprocess.return_value = f.read() + osds_versions = check_ceph_status.get_daemons_versions() + self.assertEqual(osds_versions, set([(16, 2, 7)])) + + def test_get_daemons_versions_diverged(self, mock_subprocess): + with open('unit_tests/ceph_versions_diverged.json', 'rb') as f: + mock_subprocess.return_value = f.read() + osds_versions = check_ceph_status.get_daemons_versions() + self.assertEqual(osds_versions, set([(16, 2, 7), (17, 2, 0), + (15, 2, 16)])) + + def test_get_daemons_versions_exeption(self, mock_subprocess): + mock_subprocess.side_effect = CalledProcessError(1, 'ceph versions') + self.assertRaises(check_ceph_status.UnknownError, + lambda: check_ceph_status.get_daemons_versions()) + + # Version Alligned + @patch('check_ceph_status.get_daemons_versions') + def test_versions_alligned(self, mock_daemons_versions, mock_subprocess): + mock_subprocess.return_value = 'ceph version 16.2.7 ' \ + '(dd0603118f56ab514f133c8d2e3adfc983942503)'.encode('UTF-8') + mock_daemons_versions.return_value = set([(16, 2, 7)]) + args = check_ceph_status.parse_args([ + '--check_daemons_versions_consistency']) + check_output = check_ceph_status.check_ceph_status(args) + self.assertRegex(check_output, r"^OK: All versions alligned$") + + # Minor version diverged + @patch('check_ceph_status.get_daemons_versions') + def test_min_versions_diverged(self, mock_daemons_versions, + mock_subprocess): + mock_subprocess.return_value = 'ceph version 16.2.7 ' \ + '(dd0603118f56ab514f133c8d2e3adfc983942503)'.encode('UTF-8') + mock_daemons_versions.return_value = set([(16, 2, 7), (16, 1, 7)]) + args = check_ceph_status.parse_args([ + '--check_daemons_versions_consistency']) + self.assertRaises(check_ceph_status.WarnError, + lambda: check_ceph_status.check_ceph_status(args)) + + # Major version ahead + @patch('check_ceph_status.get_daemons_versions') + def test_one_version_ahead(self, mock_daemons_versions, mock_subprocess): + mock_subprocess.return_value = 'ceph version 16.2.7 ' \ + '(dd0603118f56ab514f133c8d2e3adfc983942503)'.encode('UTF-8') + mock_daemons_versions.return_value = set([(16, 2, 7), (17, 2, 0)]) + args = check_ceph_status.parse_args([ + '--check_daemons_versions_consistency']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) + + # Two major version ahead + @patch('check_ceph_status.get_daemons_versions') + def test_two_version_ahead(self, mock_daemons_versions, mock_subprocess): + mock_subprocess.return_value = 'ceph version 15.2.16 ' \ + '(d46a73d6d0a67a79558054a3a5a72cb561724974)'.encode('UTF-8') + mock_daemons_versions.return_value = set([(15, 2, 16), (17, 2, 0)]) + args = check_ceph_status.parse_args([ + '--check_daemons_versions_consistency']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) + + # Major version behind + @patch('check_ceph_status.get_daemons_versions') + def test_version_behind(self, mock_daemons_versions, mock_subprocess): + mock_subprocess.return_value = 'ceph version 16.2.7 ' \ + '(dd0603118f56ab514f133c8d2e3adfc983942503)'.encode('UTF-8') + mock_daemons_versions.return_value = set([(15, 2, 16), (16, 2, 7)]) + args = check_ceph_status.parse_args([ + '--check_daemons_versions_consistency']) + self.assertRaises(check_ceph_status.WarnError, + lambda: check_ceph_status.check_ceph_status(args)) + + # Two major version behind + @patch('check_ceph_status.get_daemons_versions') + def test_two_version_behind(self, mock_daemons_versions, mock_subprocess): + mock_subprocess.return_value = 'ceph version 17.2.0 ' \ + '(43e2e60a7559d3f46c9d53f1ca875fd499a1e35e)'.encode('UTF-8') + mock_daemons_versions.return_value = set([(15, 2, 16), (17, 2, 0)]) + args = check_ceph_status.parse_args([ + '--check_daemons_versions_consistency']) + self.assertRaises(check_ceph_status.CriticalError, + lambda: check_ceph_status.check_ceph_status(args)) def test_get_ceph_version(self, mock_subprocess): mock_subprocess.return_value = 'ceph version 10.2.9 ' \ From 701568ca2fcf014f9b4acbbce78c3c8fb1ace11d Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Wed, 20 Jul 2022 18:12:59 -0400 Subject: [PATCH 2399/2699] Update deployment commands Update the README deployment commands by removing the cs: prefix and the Charm Store openstack-charmers namespace. Change-Id: I6204397161c2770acc759dd310a7ba3e3eb4d685 --- ceph-dashboard/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-dashboard/README.md b/ceph-dashboard/README.md index e8287784..dbc301c5 100644 --- a/ceph-dashboard/README.md +++ b/ceph-dashboard/README.md @@ -43,7 +43,7 @@ We are assuming a pre-existing Ceph cluster. Deploy ceph-dashboard as a subordinate to the ceph-mon charm: - juju deploy cs:~openstack-charmers/ceph-dashboard + juju deploy ceph-dashboard juju add-relation ceph-dashboard:dashboard ceph-mon:dashboard TLS is a requirement for this charm. Enable it by adding a relation to the @@ -62,7 +62,7 @@ See [Managing TLS certificates][cdg-tls] in the The dashboard is accessed via a load balancer using VIPs and implemented via the openstack-loadbalancer and hacluster charms: - juju deploy -n 3 --config vip=10.5.20.200 cs:~openstack-charmers/openstack-loadbalancer + juju deploy -n 3 --config vip=10.5.20.200 openstack-loadbalancer juju deploy hacluster openstack-loadbalancer-hacluster juju add-relation openstack-loadbalancer:ha openstack-loadbalancer-hacluster:ha From 27a34edf84d9f9846943fe9ebcd58b0ac6fd6cc0 Mon Sep 17 00:00:00 2001 From: utkarshbhatthere Date: Tue, 5 Jul 2022 17:27:36 +0530 Subject: [PATCH 2400/2699] Adds support for migration to multi-site system. 1.) Currently multi-site can only be configured when system is being deployed from scratch, migration works by renaming the existing Zone/Zonegroups (Z/ZG) to Juju config values on primary site before secondary site pulls the realm data and then rename and configure secondary Zone accordingly. During migration: 2.) If multiple Z/ZG not matching the config values are present at primary site, the leader unit will block and prompt use of 'force-enable-multisite' which renames and configures selected Z/ZG according to multisite config values. 3.) If the site being added as a secondary already contain Buckets, the unit will block and prompt the operator to purge all such Buckets before proceeding. Closes-Bug: #1959837 Change-Id: I01a4c1c4551c797f0a32951dfbde8a1a4126c2d6 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/840 --- ceph-radosgw/actions.yaml | 9 + ceph-radosgw/actions/actions.py | 108 ++++- ceph-radosgw/actions/force-enable-multisite | 1 + ceph-radosgw/hooks/ceph_radosgw_context.py | 2 +- ceph-radosgw/hooks/hooks.py | 57 ++- ceph-radosgw/hooks/multisite.py | 379 +++++++++++++++++- ceph-radosgw/hooks/utils.py | 94 +++-- ceph-radosgw/osci.yaml | 29 ++ .../tests/bundles/focal-xena-multisite.yaml | 98 +++++ .../tests/bundles/focal-yoga-multisite.yaml | 99 +++++ .../tests/bundles/jammy-yoga-multisite.yaml | 99 +++++ ceph-radosgw/tests/tests.yaml | 6 +- ceph-radosgw/unit_tests/test_actions.py | 11 +- ceph-radosgw/unit_tests/test_hooks.py | 5 +- ceph-radosgw/unit_tests/test_multisite.py | 169 ++++++++ 15 files changed, 1116 insertions(+), 50 deletions(-) create mode 120000 ceph-radosgw/actions/force-enable-multisite create mode 100644 ceph-radosgw/tests/bundles/focal-xena-multisite.yaml create mode 100644 ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml create mode 100644 ceph-radosgw/tests/bundles/jammy-yoga-multisite.yaml diff --git a/ceph-radosgw/actions.yaml b/ceph-radosgw/actions.yaml index d76f76bb..90a05216 100644 --- a/ceph-radosgw/actions.yaml +++ b/ceph-radosgw/actions.yaml @@ -10,3 +10,12 @@ readwrite: description: Mark the zone associated with the local units as read/write (multi-site). tidydefaults: description: Delete default zone and zonegroup configuration (multi-site). +force-enable-multisite: + description: Reconfigure provided Zone and Zonegroup for migration to multisite. + params: + zone: + type: string + description: Existing Zone to be reconfigured as the 'zone' config value. + zonegroup: + type: string + description: Existing Zonegroup to be reconfigured as the 'zonegroup' config value. diff --git a/ceph-radosgw/actions/actions.py b/ceph-radosgw/actions/actions.py index ddbdde04..db0aa548 100755 --- a/ceph-radosgw/actions/actions.py +++ b/ceph-radosgw/actions/actions.py @@ -17,6 +17,7 @@ import os import subprocess import sys +import uuid sys.path.append('hooks/') @@ -25,12 +26,27 @@ from charmhelpers.core.hookenv import ( action_fail, config, + is_leader, + leader_set, action_set, + action_get, + log, + ERROR, + DEBUG, +) +from charmhelpers.contrib.openstack.ip import ( + canonical_url, + PUBLIC, ) from utils import ( pause_unit_helper, resume_unit_helper, register_configs, + listen_port, + service_name, +) +from charmhelpers.core.host import ( + service_restart, ) @@ -50,13 +66,19 @@ def resume(args): def promote(args): """Promote zone associated with local RGW units to master/default""" zone = config('zone') + zonegroup = config('zonegroup') + if not is_leader(): + action_fail('This action can only be executed on leader unit.') + return if not zone: action_fail('No zone configuration set, not promoting') return try: multisite.modify_zone(zone, default=True, master=True) - multisite.update_period() + multisite.update_period(zonegroup=zonegroup, zone=zone) + leader_set(restart_nonce=str(uuid.uuid4())) + service_restart(service_name()) action_set( values={'message': 'zone:{} promoted to ' 'master/default'.format(zone)} @@ -122,6 +144,89 @@ def tidydefaults(args): ': {} - {}'.format(zone, cpe.output)) +def force_enable_multisite(args): + """Configure provided zone and zonegroup according to Multisite Config + + In a situation when multiple zone or zonegroups are configured on the + primary site, the decision for which pair to use in multisite system + is taken through this action. It takes provided parameters (zone name + and zonegroup name) and rename/ modify them appropriately. + """ + public_url = '{}:{}'.format( + canonical_url(register_configs(), PUBLIC), + listen_port(), + ) + current_zone = action_get("zone") + current_zonegroup = action_get("zonegroup") + endpoints = [public_url] + realm = config('realm') + new_zone = config('zone') + new_zonegroup = config('zonegroup') + + log("zone:{}, zonegroup:{}, endpoints:{}, realm:{}, new_zone:{}, " + "new_zonegroup:{}".format( + current_zone, current_zonegroup, endpoints, + realm, new_zone, new_zonegroup + ), level=DEBUG) + + if not is_leader(): + action_fail('This action can only be executed on leader unit.') + return + + if not all((realm, new_zonegroup, new_zone)): + action_fail("Missing required charm configurations realm({}), " + "zonegroup({}) and zone({}).".format( + realm, new_zonegroup, new_zone + )) + return + + if current_zone not in multisite.list_zones(): + action_fail('Provided zone {} does not exist.'.format(current_zone)) + return + + if current_zonegroup not in multisite.list_zonegroups(): + action_fail('Provided zone {} does not exist.' + .format(current_zonegroup)) + return + + try: + # Rename chosen zonegroup/zone as per charm config value. + rename_result = multisite.rename_multisite_config( + [current_zonegroup], + new_zonegroup, + [current_zone], new_zone + ) + if rename_result is None: + action_fail('Failed to rename zone {} or zonegroup {}.' + .format(current_zone, current_zonegroup)) + return + + # Configure zonegroup/zone as master for multisite. + modify_result = multisite.modify_multisite_config( + new_zone, new_zonegroup, + realm=realm, + endpoints=endpoints + ) + if modify_result is None: + action_fail('Failed to configure zone {} or zonegroup {}.' + .format(new_zonegroup, new_zone)) + return + + leader_set(restart_nonce=str(uuid.uuid4())) + service_restart(service_name()) + action_set( + values={ + 'message': 'Multisite Configuration Resolved' + } + ) + except subprocess.CalledProcessError as cpe: + message = "Failed to configure zone ({}) and zonegroup ({})".format( + current_zone, current_zonegroup + ) + log(message, level=ERROR) + action_fail(message + " : {}".format(cpe.output)) + + # A dictionary of all the defined actions to callables (which take # parsed arguments). ACTIONS = { @@ -131,6 +236,7 @@ def tidydefaults(args): "readonly": readonly, "readwrite": readwrite, "tidydefaults": tidydefaults, + "force-enable-multisite": force_enable_multisite, } diff --git a/ceph-radosgw/actions/force-enable-multisite b/ceph-radosgw/actions/force-enable-multisite new file mode 120000 index 00000000..405a394e --- /dev/null +++ b/ceph-radosgw/actions/force-enable-multisite @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index ae2345d5..be991ca1 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -305,7 +305,7 @@ def __call__(self): ctxt.update(user_provided) if self.context_complete(ctxt): - # Multi-site Zone configuration is optional, + # Multi-site zone configuration is optional, # so add after assessment ctxt['rgw_zone'] = config('zone') ctxt['rgw_zonegroup'] = config('zonegroup') diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 9f2fed18..2f563098 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -27,6 +27,7 @@ import multisite from charmhelpers.core.hookenv import ( + ERROR, relation_get, relation_id as ch_relation_id, relation_ids, @@ -366,7 +367,7 @@ def _mon_relation(): existing_zones = multisite.list_zones() log('Existing zones {}'.format(existing_zones), level=DEBUG) if zone not in existing_zones: - log("Zone '{}' doesn't exist, creating".format(zone)) + log("zone '{}' doesn't exist, creating".format(zone)) try: multisite.create_zone(zone, endpoints=endpoints, @@ -377,7 +378,7 @@ def _mon_relation(): # NOTE(lourot): may have been created in the # background by the Rados Gateway daemon, see # lp:1856106 - log("Zone '{}' existed already after all".format( + log("zone '{}' existed already after all".format( zone)) else: raise @@ -741,8 +742,43 @@ def master_relation_joined(relation_id=None): multisite.create_realm(realm, default=True) mutation = True + # Migration if master site has buckets configured. + # Migration involves renaming existing zone/zongroups such that existing + # buckets and their objects can be preserved on the master site. + if multisite.check_cluster_has_buckets() is True: + log('Migrating to multisite with zone ({}) and zonegroup ({})' + .format(zone, zonegroup), level=DEBUG) + zones = multisite.list_zones() + zonegroups = multisite.list_zonegroups() + + if (len(zonegroups) > 1) and (zonegroup not in zonegroups): + log('Multiple zonegroups found {}, aborting.' + .format(zonegroups), level=ERROR) + return + + if (len(zones) > 1) and (zone not in zones): + log('Multiple zones found {}, aborting.' + .format(zones), level=ERROR) + return + + rename_result = multisite.rename_multisite_config( + zonegroups, zonegroup, + zones, zone + ) + if rename_result is None: + return + + modify_result = multisite.modify_multisite_config( + zone, zonegroup, + endpoints=endpoints, + realm=realm + ) + if modify_result is None: + return + mutation = True + if zonegroup not in multisite.list_zonegroups(): - log('Zonegroup {} not found, creating now'.format(zonegroup)) + log('zonegroup {} not found, creating now'.format(zonegroup)) multisite.create_zonegroup(zonegroup, endpoints=endpoints, default=True, master=True, @@ -750,7 +786,7 @@ def master_relation_joined(relation_id=None): mutation = True if zone not in multisite.list_zones(): - log('Zone {} not found, creating now'.format(zone)) + log('zone {} not found, creating now'.format(zone)) multisite.create_zone(zone, endpoints=endpoints, default=True, master=True, @@ -773,7 +809,7 @@ def master_relation_joined(relation_id=None): log( 'Mutation detected. Restarting {}.'.format(service_name()), 'INFO') - multisite.update_period() + multisite.update_period(zonegroup=zonegroup, zone=zone) service_restart(service_name()) leader_set(restart_nonce=str(uuid.uuid4())) else: @@ -829,6 +865,13 @@ def slave_relation_changed(relation_id=None, unit=None): mutation = False + # NOTE(utkarshbhatthere): + # A site with existing data can create inconsistencies when added as a + # secondary site for RGW. Hence it must be pristine. + if multisite.check_cluster_has_buckets(): + log("Non-Pristine site can't be used as secondary", level=ERROR) + return + if realm not in multisite.list_realms(): log('Realm {} not found, pulling now'.format(realm)) multisite.pull_realm(url=master_data['url'], @@ -841,7 +884,7 @@ def slave_relation_changed(relation_id=None, unit=None): mutation = True if zone not in multisite.list_zones(): - log('Zone {} not found, creating now'.format(zone)) + log('zone {} not found, creating now'.format(zone)) multisite.create_zone(zone, endpoints=endpoints, default=False, master=False, @@ -854,7 +897,7 @@ def slave_relation_changed(relation_id=None, unit=None): log( 'Mutation detected. Restarting {}.'.format(service_name()), 'INFO') - multisite.update_period() + multisite.update_period(zonegroup=zonegroup, zone=zone) service_restart(service_name()) leader_set(restart_nonce=str(uuid.uuid4())) else: diff --git a/ceph-radosgw/hooks/multisite.py b/ceph-radosgw/hooks/multisite.py index 4bf35a86..5815cba7 100644 --- a/ceph-radosgw/hooks/multisite.py +++ b/ceph-radosgw/hooks/multisite.py @@ -25,7 +25,7 @@ RGW_ADMIN = 'radosgw-admin' -@decorators.retry_on_exception(num_retries=5, base_delay=3, +@decorators.retry_on_exception(num_retries=10, base_delay=5, exc_type=subprocess.CalledProcessError) def _check_output(cmd): """Logging wrapper for subprocess.check_ouput""" @@ -105,6 +105,32 @@ def list_zones(retry_on_empty=False): list_users = functools.partial(_list, 'user') +def list_buckets(zone, zonegroup): + """List Buckets served under the provided zone and zonegroup pair. + + :param zonegroup: Parent zonegroup. + :type zonegroup: str + :param zone: Parent zone. + :type zone: str + :returns: List of buckets found + :rtype: list + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'bucket', 'list', + '--rgw-zone={}'.format(zone), + '--rgw-zonegroup={}'.format(zonegroup), + ] + try: + return json.loads(_check_output(cmd)) + except subprocess.CalledProcessError: + hookenv.log("Bucket queried for incorrect zone({})-zonegroup({}) " + "pair".format(zone, zonegroup), level=hookenv.ERROR) + return None + except TypeError: + return None + + def create_realm(name, default=False): """ Create a new RADOS Gateway Realm. @@ -146,7 +172,7 @@ def set_default_realm(name): def create_zonegroup(name, endpoints, default=False, master=False, realm=None): """ - Create a new RADOS Gateway Zone Group + Create a new RADOS Gateway zone Group :param name: name of zonegroup to create :type name: str @@ -179,10 +205,49 @@ def create_zonegroup(name, endpoints, default=False, master=False, realm=None): return None +def modify_zonegroup(name, endpoints=None, default=False, + master=False, realm=None): + """Modify an existing RADOS Gateway zonegroup + + An empty list of endpoints would cause NO-CHANGE in the configured + endpoints for the zonegroup. + + :param name: name of zonegroup to modify + :type name: str + :param endpoints: list of URLs to endpoints for zonegroup + :type endpoints: list[str] + :param default: set zonegroup as the default zonegroup + :type default: boolean + :param master: set zonegroup as the master zonegroup + :type master: boolean + :param realm: realm name for provided zonegroup + :type realm: str + :return: zonegroup configuration + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'zonegroup', 'modify', + '--rgw-zonegroup={}'.format(name), + ] + if realm: + cmd.append('--rgw-realm={}'.format(realm)) + if endpoints: + cmd.append('--endpoints={}'.format(','.join(endpoints))) + if default: + cmd.append('--default') + if master: + cmd.append('--master') + try: + return json.loads(_check_output(cmd)) + except TypeError: + return None + + def create_zone(name, endpoints, default=False, master=False, zonegroup=None, access_key=None, secret=None, readonly=False): """ - Create a new RADOS Gateway Zone + Create a new RADOS Gateway zone :param name: name of zone to create :type name: str @@ -226,9 +291,9 @@ def create_zone(name, endpoints, default=False, master=False, zonegroup=None, def modify_zone(name, endpoints=None, default=False, master=False, - access_key=None, secret=None, readonly=False): - """ - Modify an existing RADOS Gateway zone + access_key=None, secret=None, readonly=False, + realm=None, zonegroup=None): + """Modify an existing RADOS Gateway zone :param name: name of zone to create :type name: str @@ -243,7 +308,11 @@ def modify_zone(name, endpoints=None, default=False, master=False, :param secret: secret to use with access-key for the zone :type secret: str :param readonly: set zone as read only - :type: readonly: boolean + :type readonly: boolean + :param realm: realm to use for zone + :type realm: str + :param zonegroup: zonegroup to use for zone + :type zonegroup: str :return: zone configuration :rtype: dict """ @@ -252,6 +321,10 @@ def modify_zone(name, endpoints=None, default=False, master=False, 'zone', 'modify', '--rgw-zone={}'.format(name), ] + if realm: + cmd.append('--rgw-realm={}'.format(realm)) + if zonegroup: + cmd.append('--rgw-zonegroup={}'.format(zonegroup)) if endpoints: cmd.append('--endpoints={}'.format(','.join(endpoints))) if access_key and secret: @@ -268,14 +341,24 @@ def modify_zone(name, endpoints=None, default=False, master=False, return None -def update_period(fatal=True): - """ - Update RADOS Gateway configuration period +def update_period(fatal=True, zonegroup=None, zone=None): + """Update RADOS Gateway configuration period + + :param fatal: In failure case, whether CalledProcessError is to be raised. + :type fatal: boolean + :param zonegroup: zonegroup name + :type zonegroup: str + :param zone: zone name + :type zone: str """ cmd = [ RGW_ADMIN, '--id={}'.format(_key_name()), 'period', 'update', '--commit' ] + if zonegroup is not None: + cmd.append('--rgw-zonegroup={}'.format(zonegroup)) + if zone is not None: + cmd.append('--rgw-zone={}'.format(zone)) if fatal: _check_call(cmd) else: @@ -439,3 +522,279 @@ def pull_period(url, access_key, secret): return json.loads(_check_output(cmd)) except TypeError: return None + + +def rename_zone(name, new_name, zonegroup): + """Rename an existing RADOS Gateway zone + + If the command execution succeeds, 0 is returned, otherwise + None is returned to the caller. + + :param name: current name for the zone being renamed + :type name: str + :param new_name: new name for the zone being renamed + :type new_name: str + :rtype: int + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'zone', 'rename', + '--rgw-zone={}'.format(name), + '--zone-new-name={}'.format(new_name), + '--rgw-zonegroup={}'.format(zonegroup) + ] + result = _call(cmd) + return 0 if result == 0 else None + + +def rename_zonegroup(name, new_name): + """Rename an existing RADOS Gateway zonegroup + + If the command execution succeeds, 0 is returned, otherwise + None is returned to the caller. + + :param name: current name for the zonegroup being renamed + :type name: str + :param new_name: new name for the zonegroup being renamed + :type new_name: str + :rtype: int + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'zonegroup', 'rename', + '--rgw-zonegroup={}'.format(name), + '--zonegroup-new-name={}'.format(new_name), + ] + result = _call(cmd) + return 0 if result == 0 else None + + +def get_zonegroup_info(zonegroup): + """Fetch detailed info for the provided zonegroup + + :param zonegroup: zonegroup Name for detailed query + :type zonegroup: str + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'zonegroup', 'get', + '--rgw-zonegroup={}'.format(zonegroup), + ] + try: + return json.loads(_check_output(cmd)) + except TypeError: + return None + + +def get_sync_status(): + """ + Get sync status + :returns: Sync Status Report from radosgw-admin + :rtype: str + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'sync', 'status', + ] + try: + return _check_output(cmd) + except subprocess.CalledProcessError: + hookenv.log("Failed to fetch sync status", level=hookenv.ERROR) + return None + + +def is_multisite_configured(zone, zonegroup): + """Check if system is already multisite configured + + Checks if zone and zonegroup are configured appropriately and + remote data sync source is detected in sync status + + :rtype: Boolean + """ + if zone not in list_zones(): + hookenv.log("No local zone found with name ({})".format(zonegroup), + level=hookenv.ERROR) + return False + + if zonegroup not in list_zonegroups(): + hookenv.log("No zonegroup found with name ({})".format(zonegroup), + level=hookenv.ERROR) + return False + + sync_status = get_sync_status() + if sync_status is not None: + return ('data sync source:' in sync_status) + + return False + + +def get_local_zone(zonegroup): + """Get local zone to provided parent zonegroup. + + In multisite systems, zonegroup contains both local and remote zone info + this method is used to fetch the zone local to querying site. + + :param zonegroup: parent zonegroup name. + :type zonegroup: str + :returns: tuple with parent zonegroup and local zone name + :rtype: tuple + """ + local_zones = list_zones() + zonegroup_info = get_zonegroup_info(zonegroup) + + if zonegroup_info is None: + hookenv.log("Failed to fetch zonegroup ({}) info".format(zonegroup), + level=hookenv.ERROR) + return None + + # zonegroup info always contains self name and zones list so fetching + # directly is safe. + master_zonegroup = zonegroup_info['name'] + for zone_info in zonegroup_info['zones']: + zone = zone_info['name'] + if zone in local_zones: + return zone, master_zonegroup + + hookenv.log( + "No local zone configured for zonegroup ({})".format(zonegroup), + level=hookenv.ERROR + ) + return None + + +def rename_multisite_config(zonegroups, new_zonegroup_name, + zones, new_zone_name): + """Rename zone and zonegroup to provided new names. + + If zone list (zones) or zonegroup list (zonegroups) contain 1 element + rename the only element present in the list to provided (new_) value. + + :param zonegroups: List of zonegroups available at site. + :type zonegroups: list[str] + :param new_zonegroup_name: Desired new name for master zonegroup. + :type new_zonegroup_name: str + :param zones: List of zones available at site. + :type zones: list[str] + :param new_zonegroup_name: Desired new name for master zone. + :type new_zonegroup_name: str + + :return: Whether any of the zone or zonegroup is renamed. + :rtype: Boolean + """ + mutation = False + if (len(zonegroups) == 1) and (len(zones) == 1): + if new_zonegroup_name not in zonegroups: + result = rename_zonegroup(zonegroups[0], new_zonegroup_name) + if result is None: + hookenv.log( + "Failed renaming zonegroup from {} to {}" + .format(zonegroups[0], new_zonegroup_name), + level=hookenv.ERROR + ) + return None + mutation = True + + if new_zone_name not in zones: + result = rename_zone(zones[0], new_zone_name, new_zonegroup_name) + if result is None: + hookenv.log( + "Failed renaming zone from {} to {}" + .format(zones[0], new_zone_name), level=hookenv.ERROR + ) + return None + mutation = True + + if mutation: + hookenv.log("Renamed zonegroup {} to {}, and zone {} to {}".format( + zonegroups[0], new_zonegroup_name, + zones[0], new_zone_name)) + return True + + return False + + +def modify_multisite_config(zone, zonegroup, endpoints=None, realm=None): + """Configure zone and zonegroup as master for multisite system. + + :param zonegroup: zonegroup name being configured for multisite + :type zonegroup: str + :param zone: zone name being configured for multisite + :type zone: str + :param endpoints: list of URLs to RGW endpoints + :type endpoints: list[str] + :param realm: realm to use for multisite + :type realm: str + :rtype: Boolean + """ + if modify_zonegroup(zonegroup, endpoints=endpoints, default=True, + master=True, realm=realm) is None: + hookenv.log( + "Failed configuring zonegroup {}".format(zonegroup), + level=hookenv.ERROR + ) + return None + + if modify_zone(zone, endpoints=endpoints, default=True, + master=True, zonegroup=zonegroup, realm=realm) is None: + hookenv.log( + "Failed configuring zone {}".format(zone), level=hookenv.ERROR + ) + return None + + update_period(zonegroup=zonegroup, zone=zone) + hookenv.log("Configured zonegroup {}, and zone {} for multisite".format( + zonegroup, zone)) + return True + + +def check_zone_has_buckets(zone, zonegroup): + """Checks whether provided zone-zonegroup pair contains any bucket. + + :param zone: zone name to query buckets in. + :type zone: str + :param zonegroup: Parent zonegroup of zone. + :type zonegroup: str + :rtype: Boolean + """ + buckets = list_buckets(zone, zonegroup) + if buckets is not None: + return (len(buckets) > 0) + hookenv.log( + "Failed to query buckets for zone {} zonegroup {}" + .format(zone, zonegroup), + level=hookenv.WARNING + ) + return False + + +def check_zonegroup_has_buckets(zonegroup): + """Checks whether any bucket exists in the master zone of a zonegroup. + + :param zone: zonegroup name to query buckets. + :type zone: str + :rtype: Boolean + """ + # NOTE(utkarshbhatthere): sometimes querying against a particular + # zonegroup results in info of an entirely different zonegroup, thus to + # prevent a query against an incorrect pair in such cases, both zone and + # zonegroup names are taken from zonegroup info. + master_zone, master_zonegroup = get_local_zone(zonegroup) + + # If master zone is not configured for zonegroup + if master_zone is None: + hookenv.log("No master zone configured for zonegroup {}" + .format(master_zonegroup), level=hookenv.WARNING) + return False + return check_zone_has_buckets(master_zone, master_zonegroup) + + +def check_cluster_has_buckets(): + """Iteratively check if ANY zonegroup has buckets on cluster. + + :rtype: Boolean + """ + for zonegroup in list_zonegroups(): + if check_zonegroup_has_buckets(zonegroup): + return True + return False diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index f05d190b..00695154 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -20,6 +20,7 @@ from copy import deepcopy import ceph_radosgw_context +import multisite from charmhelpers.core.hookenv import ( relation_get, @@ -184,6 +185,14 @@ def get_optional_interfaces(): return optional_interfaces +def get_zones_zonegroups(): + """Get a tuple with lists of zones and zonegroups existing on site + + :rtype: tuple + """ + return multisite.list_zones(), multisite.list_zonegroups() + + def check_optional_config_and_relations(configs): """Check that if we have a relation_id for high availability that we can get the hacluster config. If we can't then we are blocked. This function @@ -201,41 +210,72 @@ def check_optional_config_and_relations(configs): return ('blocked', 'hacluster missing configuration: ' 'vip, vip_iface, vip_cidr') - # NOTE: misc multi-site relation and config checks + multisite_config = (config('realm'), config('zonegroup'), config('zone')) - if relation_ids('master') or relation_ids('slave'): + master_configured = (leader_get('access_key'), + leader_get('secret'), + leader_get('restart_nonce')) + + # Any realm or zonegroup config is present, multisite checks can be done. + if (config('realm') or config('zonegroup')): + # All of Realm, zonegroup, and zone must be configured. if not all(multisite_config): return ('blocked', 'multi-site configuration incomplete ' '(realm={realm}, zonegroup={zonegroup}' ', zone={zone})'.format(**config())) - if (all(multisite_config) and not - (relation_ids('master') or relation_ids('slave'))): - return ('blocked', - 'multi-site configuration but master/slave ' - 'relation missing') - if (all(multisite_config) and relation_ids('slave')): - multisite_ready = False - for rid in relation_ids('slave'): - for unit in related_units(rid): - if relation_get('url', unit=unit, rid=rid): - multisite_ready = True - continue - if not multisite_ready: - return ('waiting', - 'multi-site master relation incomplete') - master_configured = ( - leader_get('access_key'), - leader_get('secret'), - leader_get('restart_nonce'), - ) - if (all(multisite_config) and - relation_ids('master') and - not all(master_configured)): - return ('waiting', - 'waiting for configuration of master zone') + + # Master/Slave Relation should be configured. + if not (relation_ids('master') or relation_ids('slave')): + return ('blocked', + 'multi-site configuration but master/slave ' + 'relation missing') + + # Primary site status check + if relation_ids('master'): + # Migration: The system is not multisite already. + if not multisite.is_multisite_configured(config('zone'), + config('zonegroup')): + if multisite.check_cluster_has_buckets(): + zones, zonegroups = get_zones_zonegroups() + status_msg = "Multiple zone or zonegroup configured, " \ + "use action 'config-multisite-values' to " \ + "resolve." + if (len(zonegroups) > 1 and + config('zonegroup') not in zonegroups): + return('blocked', status_msg) + + if len(zones) > 1 and config('zone') not in zones: + return('blocked', status_msg) + + if not all(master_configured): + return ('blocked', "Failure in Multisite migration, " + "Refer to Logs.") + # Non-Migration scenario. + if not all(master_configured): + return ('waiting', + 'waiting for configuration of master zone') + + # Secondary site status check + if relation_ids('slave'): + # Migration: The system is not multisite already. + if not multisite.is_multisite_configured(config('zone'), + config('zonegroup')): + if multisite.check_cluster_has_buckets(): + return ('blocked', + "Non-Pristine RGW site can't be used as secondary") + + multisite_ready = False + for rid in relation_ids('slave'): + for unit in related_units(rid): + if relation_get('url', unit=unit, rid=rid): + multisite_ready = True + continue + if not multisite_ready: + return ('waiting', + 'multi-site master relation incomplete') # Check that provided Ceph BlueStoe configuration is valid. try: diff --git a/ceph-radosgw/osci.yaml b/ceph-radosgw/osci.yaml index 64c9d049..9d065483 100644 --- a/ceph-radosgw/osci.yaml +++ b/ceph-radosgw/osci.yaml @@ -4,12 +4,17 @@ - charm-unit-jobs-py39 check: jobs: + - focal-xena-multisite - vault-focal-xena_rgw - vault-focal-xena-namespaced + - focal-yoga-multisite: + voting: false - vault-focal-yoga_rgw: voting: false - vault-focal-yoga-namespaced: voting: false + - jammy-yoga-multisite: + voting: false - vault-jammy-yoga_rgw: voting: false - vault-jammy-yoga-namespaced: @@ -18,6 +23,16 @@ needs_charm_build: true charm_build_name: ceph-radosgw build_type: charmcraft +- job: + name: focal-xena-multisite + parent: func-target + dependencies: + - osci-lint + - charm-build + - tox-py38 + - tox-py39 + vars: + tox_extra_args: focal-xena-multisite - job: name: vault-focal-xena_rgw parent: func-target @@ -38,6 +53,13 @@ vars: tox_extra_args: vault:focal-xena-namespaced +- job: + name: jammy-yoga-multisite + parent: func-target + dependencies: + - focal-xena-multisite + vars: + tox_extra_args: jammy-yoga-multisite - job: name: vault-jammy-yoga_rgw parent: func-target @@ -54,6 +76,13 @@ - vault-focal-xena-namespaced vars: tox_extra_args: vault:jammy-yoga-namespaced +- job: + name: focal-yoga-multisite + parent: func-target + dependencies: + - focal-xena-multisite + vars: + tox_extra_args: focal-yoga-multisite - job: name: vault-focal-yoga_rgw parent: func-target diff --git a/ceph-radosgw/tests/bundles/focal-xena-multisite.yaml b/ceph-radosgw/tests/bundles/focal-xena-multisite.yaml new file mode 100644 index 00000000..2de95cca --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-xena-multisite.yaml @@ -0,0 +1,98 @@ +options: + source: &source cloud:focal-xena + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + '9': + +applications: + ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '0' + + secondary-ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '1' + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '2' + - '6' + - '7' + channel: latest/edge + + secondary-ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '3' + - '8' + - '9' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 1 + options: + monitor-count: 1 + source: *source + to: + - '4' + channel: latest/edge + + secondary-ceph-mon: + charm: ch:ceph-mon + num_units: 1 + options: + monitor-count: 1 + source: *source + to: + - '5' + channel: latest/edge + +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'secondary-ceph-osd:mon' + - 'secondary-ceph-mon:osd' + + - - 'secondary-ceph-radosgw:mon' + - 'secondary-ceph-mon:radosgw' diff --git a/ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml b/ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml new file mode 100644 index 00000000..8c1a1cfd --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml @@ -0,0 +1,99 @@ +options: + source: &source cloud:focal-yoga + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + '9': + +applications: + ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '0' + + secondary-ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '1' + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '2' + - '6' + - '7' + channel: latest/edge + + secondary-ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '3' + - '8' + - '9' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 1 + options: + monitor-count: 1 + source: *source + to: + - '4' + channel: latest/edge + + secondary-ceph-mon: + charm: ch:ceph-mon + num_units: 1 + options: + monitor-count: 1 + source: *source + to: + - '5' + channel: latest/edge + +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'secondary-ceph-osd:mon' + - 'secondary-ceph-mon:osd' + + - - 'secondary-ceph-radosgw:mon' + - 'secondary-ceph-mon:radosgw' + diff --git a/ceph-radosgw/tests/bundles/jammy-yoga-multisite.yaml b/ceph-radosgw/tests/bundles/jammy-yoga-multisite.yaml new file mode 100644 index 00000000..2536b5ad --- /dev/null +++ b/ceph-radosgw/tests/bundles/jammy-yoga-multisite.yaml @@ -0,0 +1,99 @@ +options: + source: &source distro + +series: jammy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + '9': + +applications: + ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '0' + + secondary-ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '1' + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '2' + - '6' + - '7' + channel: latest/edge + + secondary-ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '3' + - '8' + - '9' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 1 + options: + monitor-count: 1 + source: *source + to: + - '4' + channel: latest/edge + + secondary-ceph-mon: + charm: ch:ceph-mon + num_units: 1 + options: + monitor-count: 1 + source: *source + to: + - '5' + channel: latest/edge + +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'secondary-ceph-osd:mon' + - 'secondary-ceph-mon:osd' + + - - 'secondary-ceph-radosgw:mon' + - 'secondary-ceph-mon:radosgw' + diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 7f797427..76f71201 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,13 +1,17 @@ charm_name: ceph-radosgw gate_bundles: + - focal-xena-multisite - vault: focal-xena - vault: focal-xena-namespaced smoke_bundles: + - focal-xena-multisite - vault: focal-xena dev_bundles: + - focal-yoga-multisite + - jammy-yoga-multisite - vault: focal-yoga - vault: focal-yoga-namespaced - vault: jammy-yoga @@ -16,7 +20,7 @@ dev_bundles: target_deploy_status: vault: workload-status: blocked - workload-status-message: Vault needs to be initialized + workload-status-message-prefix: Vault needs to be initialized configure: - vault: diff --git a/ceph-radosgw/unit_tests/test_actions.py b/ceph-radosgw/unit_tests/test_actions.py index 795da2a6..cebb77bb 100644 --- a/ceph-radosgw/unit_tests/test_actions.py +++ b/ceph-radosgw/unit_tests/test_actions.py @@ -85,6 +85,9 @@ class MultisiteActionsTestCase(CharmTestCase): 'action_set', 'multisite', 'config', + 'is_leader', + 'leader_set', + 'service_name', ] def setUp(self): @@ -93,17 +96,23 @@ def setUp(self): self.config.side_effect = self.test_config.get def test_promote(self): + self.is_leader.return_value = True self.test_config.set('zone', 'testzone') + self.test_config.set('zonegroup', 'testzonegroup') actions.promote([]) self.multisite.modify_zone.assert_called_once_with( 'testzone', default=True, master=True, ) - self.multisite.update_period.assert_called_once_with() + self.multisite.update_period.assert_called_once_with( + zonegroup='testzonegroup', zone='testzone' + ) def test_promote_unconfigured(self): + self.is_leader.return_value = True self.test_config.set('zone', None) + self.test_config.set('zonegroup', None) actions.promote([]) self.action_fail.assert_called_once() diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 87f37abc..416ea84f 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -740,7 +740,7 @@ def test_master_relation_joined_create_everything(self): ) self.multisite.update_period.assert_has_calls([ call(fatal=False), - call(), + call(zonegroup='testzonegroup', zone='testzone'), ]) self.service_restart.assert_called_once_with('rgw@hostname') self.leader_set.assert_has_calls([ @@ -827,6 +827,7 @@ def test_slave_relation_changed(self): self.relation_get.return_value = self._test_relation self.multisite.list_realms.return_value = [] self.multisite.list_zones.return_value = [] + self.multisite.check_cluster_has_buckets.return_value = False ceph_hooks.slave_relation_changed('slave:1', 'rgw/0') self.config.assert_has_calls([ call('realm'), @@ -857,7 +858,7 @@ def test_slave_relation_changed(self): ) self.multisite.update_period.assert_has_calls([ call(fatal=False), - call(), + call(zonegroup='testzonegroup', zone='testzone2'), ]) self.service_restart.assert_called_once() self.leader_set.assert_called_once_with(restart_nonce=ANY) diff --git a/ceph-radosgw/unit_tests/test_multisite.py b/ceph-radosgw/unit_tests/test_multisite.py index 6234a8dc..5374e422 100644 --- a/ceph-radosgw/unit_tests/test_multisite.py +++ b/ceph-radosgw/unit_tests/test_multisite.py @@ -25,6 +25,19 @@ def whoami(): return inspect.stack()[1][3] +def get_zonegroup_stub(): + # populate dummy zone info + zone = {} + zone['id'] = "test_zone_id" + zone['name'] = "test_zone" + + # populate dummy zonegroup info + zonegroup = {} + zonegroup['name'] = "test_zonegroup" + zonegroup['zones'] = [zone] + return zonegroup + + class TestMultisiteHelpers(CharmTestCase): TO_PATCH = [ @@ -285,3 +298,159 @@ def test_pull_period(self): '--url=http://master:80', '--access-key=testkey', '--secret=testsecret', ], stderr=mock.ANY) + + def test_list_buckets(self): + self.subprocess.CalledProcessError = BaseException + multisite.list_buckets('default', 'default') + self.subprocess.check_output.assert_called_once_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'bucket', 'list', '--rgw-zone=default', + '--rgw-zonegroup=default' + ], stderr=mock.ANY) + + def test_rename_zonegroup(self): + multisite.rename_zonegroup('default', 'test_zone_group') + self.subprocess.call.assert_called_once_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zonegroup', 'rename', '--rgw-zonegroup=default', + '--zonegroup-new-name=test_zone_group' + ]) + + def test_rename_zone(self): + multisite.rename_zone('default', 'test_zone', 'test_zone_group') + self.subprocess.call.assert_called_once_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zone', 'rename', '--rgw-zone=default', + '--zone-new-name=test_zone', + '--rgw-zonegroup=test_zone_group' + ]) + + def test_get_zonegroup(self): + multisite.get_zonegroup_info('test_zone') + self.subprocess.check_output.assert_called_once_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zonegroup', 'get', '--rgw-zonegroup=test_zone' + ], stderr=mock.ANY) + + def test_modify_zonegroup_migrate(self): + multisite.modify_zonegroup('test_zonegroup', + endpoints=['http://localhost:80'], + default=True, master=True, + realm='test_realm') + self.subprocess.check_output.assert_called_once_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zonegroup', 'modify', + '--rgw-zonegroup=test_zonegroup', '--rgw-realm=test_realm', + '--endpoints=http://localhost:80', '--default', '--master', + ], stderr=mock.ANY) + + def test_modify_zone_migrate(self): + multisite.modify_zone('test_zone', default=True, master=True, + endpoints=['http://localhost:80'], + zonegroup='test_zonegroup', realm='test_realm') + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zone', 'modify', + '--rgw-zone=test_zone', '--rgw-realm=test_realm', + '--rgw-zonegroup=test_zonegroup', + '--endpoints=http://localhost:80', + '--master', '--default', '--read-only=0', + ], stderr=mock.ANY) + + @mock.patch.object(multisite, 'list_zones') + @mock.patch.object(multisite, 'get_zonegroup_info') + def test_get_local_zone(self, mock_get_zonegroup_info, mock_list_zones): + mock_get_zonegroup_info.return_value = get_zonegroup_stub() + mock_list_zones.return_value = ['test_zone'] + zone, _zonegroup = multisite.get_local_zone('test_zonegroup') + self.assertEqual( + zone, + 'test_zone' + ) + + def test_rename_multisite_config_zonegroup_fail(self): + self.assertEqual( + multisite.rename_multisite_config( + ['default'], 'test_zonegroup', + ['default'], 'test_zone' + ), + None + ) + + self.subprocess.call.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zonegroup', 'rename', '--rgw-zonegroup=default', + '--zonegroup-new-name=test_zonegroup' + ]) + + def test_modify_multisite_config_zonegroup_fail(self): + self.assertEqual( + multisite.modify_multisite_config( + 'test_zone', 'test_zonegroup', + endpoints=['http://localhost:80'], + realm='test_realm' + ), + None + ) + + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zonegroup', 'modify', '--rgw-zonegroup=test_zonegroup', + '--rgw-realm=test_realm', + '--endpoints=http://localhost:80', '--default', + '--master', + ], stderr=mock.ANY) + + @mock.patch.object(multisite, 'modify_zonegroup') + def test_modify_multisite_config_zone_fail(self, mock_modify_zonegroup): + mock_modify_zonegroup.return_value = True + self.assertEqual( + multisite.modify_multisite_config( + 'test_zone', 'test_zonegroup', + endpoints=['http://localhost:80'], + realm='test_realm' + ), + None + ) + + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zone', 'modify', + '--rgw-zone=test_zone', + '--rgw-realm=test_realm', + '--rgw-zonegroup=test_zonegroup', + '--endpoints=http://localhost:80', + '--master', '--default', '--read-only=0', + ], stderr=mock.ANY) + + @mock.patch.object(multisite, 'rename_zonegroup') + def test_rename_multisite_config_zone_fail(self, mock_rename_zonegroup): + mock_rename_zonegroup.return_value = True + self.assertEqual( + multisite.rename_multisite_config( + ['default'], 'test_zonegroup', + ['default'], 'test_zone' + ), + None + ) + + self.subprocess.call.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zone', 'rename', '--rgw-zone=default', + '--zone-new-name=test_zone', + '--rgw-zonegroup=test_zonegroup', + ]) + + @mock.patch.object(multisite, 'list_zonegroups') + @mock.patch.object(multisite, 'get_local_zone') + @mock.patch.object(multisite, 'list_buckets') + def test_check_zone_has_buckets(self, mock_list_zonegroups, + mock_get_local_zone, + mock_list_buckets): + mock_list_zonegroups.return_value = ['test_zonegroup'] + mock_get_local_zone.return_value = 'test_zone', 'test_zonegroup' + mock_list_buckets.return_value = ['test_bucket_1', 'test_bucket_2'] + self.assertEqual( + multisite.check_cluster_has_buckets(), + True + ) From f267ea8ab892a6b03cc2d2014735050bbc3d6980 Mon Sep 17 00:00:00 2001 From: utkarshbhatthere Date: Mon, 8 Aug 2022 18:57:23 +0530 Subject: [PATCH 2401/2699] Fixes currently present PEP8 issues Change-Id: Idff27970dc0d41288444fc3ed18585e3f3a1a0ad --- ceph-nfs/src/charm.py | 28 ++++++++---- ceph-nfs/src/ganesha.py | 57 ++++++++++++++++++------- ceph-nfs/src/interface_ceph_nfs_peer.py | 7 ++- ceph-nfs/src/manager.py | 4 +- ceph-nfs/tests/nfs_ganesha.py | 21 ++++++--- ceph-nfs/unit_tests/test_ganesha.py | 10 +++-- 6 files changed, 89 insertions(+), 38 deletions(-) diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index 256e498f..01caa849 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -88,7 +88,9 @@ def pool_name(self): :returns: Data pool name. :rtype: str """ - return self.charm_instance.config_get('rbd-pool-name', self.charm_instance.app.name) + return self.charm_instance.config_get( + 'rbd-pool-name', self.charm_instance.app.name + ) @property def client_name(self): @@ -293,7 +295,8 @@ def render_config(self, event): mode=0o750) def daemon_reload_and_restart(service_name): - logging.debug("restarting {} after config change".format(service_name)) + logging.debug("restarting {} after config change" + .format(service_name)) subprocess.check_call(['systemctl', 'daemon-reload']) subprocess.check_call(['systemctl', 'restart', service_name]) @@ -411,7 +414,8 @@ def access_address(self) -> str: def create_share_action(self, event): if not self.model.unit.is_leader(): - event.fail("Share creation needs to be run from the application leader") + event.fail("Share creation needs to be run " + "from the application leader") return share_size = event.params.get('size') name = event.params.get('name') @@ -420,7 +424,8 @@ def create_share_action(self, event): export_path = self.ganesha_client.create_share( size=share_size, name=name, access_ips=allowed_ips) if not export_path: - event.fail("Failed to create share, check the log for more details") + event.fail("Failed to create share, check the " + "log for more details") return self.peers.trigger_reload() event.set_results({ @@ -431,12 +436,17 @@ def create_share_action(self, event): def list_shares_action(self, event): exports = self.ganesha_client.list_shares() event.set_results({ - "exports": [{"id": export.export_id, "name": export.name} for export in exports] + "exports": [ + { + "id": export.export_id, "name": export.name + } for export in exports + ] }) def delete_share_action(self, event): if not self.model.unit.is_leader(): - event.fail("Share creation needs to be run from the application leader") + event.fail("Share creation needs to be run " + "from the application leader") return name = event.params.get('name') purge = event.params.get('purge') @@ -448,7 +458,8 @@ def delete_share_action(self, event): def grant_access_action(self, event): if not self.model.unit.is_leader(): - event.fail("Share creation needs to be run from the application leader") + event.fail("Share creation needs to be run " + "from the application leader") return name = event.params.get('name') address = event.params.get('client') @@ -463,7 +474,8 @@ def grant_access_action(self, event): def revoke_access_action(self, event): if not self.model.unit.is_leader(): - event.fail("Share creation needs to be run from the application leader") + event.fail("Share creation needs to be run " + "from the application leader") return name = event.params.get('name') address = event.params.get('client') diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index 9997488a..5c54c11a 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -26,7 +26,9 @@ def __init__(self, export_options: Optional[Dict] = None): raise RuntimeError('export_options must be a dictionary') self.export_options = export_options if not isinstance(self.export_options['EXPORT']['CLIENT'], list): - self.export_options['EXPORT']['CLIENT'] = [self.export_options['EXPORT']['CLIENT']] + self.export_options['EXPORT']['CLIENT'] = [ + self.export_options['EXPORT']['CLIENT'] + ] def from_export(export: str) -> 'Export': return Export(export_options=manager.parseconf(export)) @@ -52,9 +54,13 @@ def clients_by_mode(self): clients_by_mode = {'r': [], 'rw': []} for client in self.clients: if client['Access_Type'].lower() == 'r': - clients_by_mode['r'] += [s.strip() for s in client['Clients'].split(',')] + clients_by_mode['r'] += [ + s.strip() for s in client['Clients'].split(',') + ] elif client['Access_Type'].lower() == 'rw': - clients_by_mode['rw'] += [s.strip() for s in client['Clients'].split(',')] + clients_by_mode['rw'] += [ + s.strip() for s in client['Clients'].split(',') + ] else: raise RuntimeError("Invalid access type") return clients_by_mode @@ -84,7 +90,9 @@ def add_client(self, client: str): def remove_client(self, client: str): clients_by_mode = self.clients_by_mode for (mode, clients) in clients_by_mode.items(): - clients_by_mode[mode] = [old_client for old_client in clients if old_client != client] + clients_by_mode[mode] = [ + old_client for old_client in clients if old_client != client + ] self.export_options['EXPORT']['CLIENT'] = [] for (mode, clients) in clients_by_mode.items(): if clients: @@ -112,7 +120,9 @@ def create_share(self, name: str = None, size: int = None, if name is None: name = str(uuid.uuid4()) else: - existing_shares = [share for share in self.list_shares() if share.name == name] + existing_shares = [ + share for share in self.list_shares() if share.name == name + ] if existing_shares: return existing_shares[0].path if size is not None: @@ -179,7 +189,8 @@ def list_shares(self) -> List[Export]: def resize_share(self, name: str, size: int): size_in_bytes = size * 1024 * 1024 - self._ceph_subvolume_command('resize', 'ceph-fs', name, str(size_in_bytes), '--no_shrink') + self._ceph_subvolume_command('resize', 'ceph-fs', name, + str(size_in_bytes), '--no_shrink') def delete_share(self, name: str, purge=False): share = [share for share in self.list_shares() if share.name == name] @@ -187,7 +198,8 @@ def delete_share(self, name: str, purge=False): share = share[0] else: return - logging.info("About to remove export {} ({})".format(share.name, share.export_id)) + logging.info("About to remove export {} ({})" + .format(share.name, share.export_id)) self._ganesha_remove_export(share.export_id) logging.debug("Removing export from index") self._remove_share_from_index(share.export_id) @@ -204,7 +216,8 @@ def grant_access(self, name: str, client: str) -> Optional[str]: export_template = share.to_export() logging.debug("Export template::\n{}".format(export_template)) tmp_file = self._tmpfile(export_template) - self._rados_put('ganesha-export-{}'.format(share.export_id), tmp_file.name) + self._rados_put('ganesha-export-{}'.format(share.export_id), + tmp_file.name) self._ganesha_update_export(share.export_id, tmp_file.name) def revoke_access(self, name: str, client: str): @@ -215,7 +228,8 @@ def revoke_access(self, name: str, client: str): export_template = share.to_export() logging.debug("Export template::\n{}".format(export_template)) tmp_file = self._tmpfile(export_template) - self._rados_put('ganesha-export-{}'.format(share.export_id), tmp_file.name) + self._rados_put('ganesha-export-{}'.format(share.export_id), + tmp_file.name) self._ganesha_update_export(share.export_id, tmp_file.name) def get_share(self, name: str) -> Optional[Export]: @@ -230,7 +244,8 @@ def _ganesha_add_export(self, export_path: str, tmp_path: str): """Add a configured NFS export to Ganesha""" self._dbus_send( 'ExportMgr', 'AddExport', - 'string:{}'.format(tmp_path), 'string:EXPORT(Path={})'.format(export_path)) + 'string:{}'.format(tmp_path), + 'string:EXPORT(Path={})'.format(export_path)) def _ganesha_remove_export(self, share_id: int): """Remove a configured NFS export from Ganesha""" @@ -243,12 +258,14 @@ def _ganesha_update_export(self, share_id: int, tmp_path: str): """Update a configured NFS export in Ganesha""" self._dbus_send( 'ExportMgr', 'UpdateExport', - 'string:{}'.format(tmp_path), 'string:EXPORT(Export_Id={})'.format(share_id)) + 'string:{}'.format(tmp_path), + 'string:EXPORT(Export_Id={})'.format(share_id)) def _dbus_send(self, section: str, action: str, *args): """Send a command to Ganesha via Dbus""" cmd = [ - 'dbus-send', '--print-reply', '--system', '--dest=org.ganesha.nfsd', + 'dbus-send', '--print-reply', '--system', + '--dest=org.ganesha.nfsd', '/org/ganesha/nfsd/{}'.format(section), 'org.ganesha.nfsd.exportmgr.{}'.format(action)] + [*args] logging.debug("About to call: {}".format(cmd)) @@ -275,7 +292,8 @@ def _create_cephfs_share(self, name: str, size_in_bytes: int = None): """ try: if size_in_bytes is not None: - self._ceph_subvolume_command('create', 'ceph-fs', name, str(size_in_bytes)) + self._ceph_subvolume_command('create', 'ceph-fs', + name, str(size_in_bytes)) else: self._ceph_subvolume_command('create', 'ceph-fs', name) except subprocess.CalledProcessError: @@ -297,7 +315,9 @@ def _create_cephfs_share(self, name: str, size_in_bytes: int = None): logging.error("failed to get path") return False - def _ceph_subvolume_command(self, *cmd: List[str]) -> subprocess.CompletedProcess: + def _ceph_subvolume_command( + self, *cmd: List[str] + ) -> subprocess.CompletedProcess: """Run a ceph fs subvolume command""" return self._ceph_fs_command('subvolume', *cmd) @@ -317,7 +337,10 @@ def _ceph_auth_key(self, access_id: str) -> str: def _ceph_command(self, *cmd: List[str]) -> subprocess.CompletedProcess: """Run a ceph command""" - cmd = ["ceph", "--id", self.client_name, "--conf=/etc/ceph/ceph.conf"] + [*cmd] + cmd = [ + "ceph", "--id", self.client_name, + "--conf=/etc/ceph/ceph.conf" + ] + [*cmd] return subprocess.check_output(cmd, stderr=subprocess.DEVNULL) def _get_next_export_id(self) -> int: @@ -386,7 +409,9 @@ def _rados_rm(self, name: str): def _add_share_to_index(self, export_id: int): """Add an export RADOS object's URL to the RADOS URL index.""" index_data = self._rados_get(self.export_index) - url = '%url rados://{}/ganesha-export-{}'.format(self.ceph_pool, export_id) + url = '%url rados://{}/ganesha-export-{}'.format( + self.ceph_pool, export_id + ) rados_urls = index_data.split('\n') if url not in rados_urls: rados_urls.append(url) diff --git a/ceph-nfs/src/interface_ceph_nfs_peer.py b/ceph-nfs/src/interface_ceph_nfs_peer.py index 8e371b3c..f00d54aa 100644 --- a/ceph-nfs/src/interface_ceph_nfs_peer.py +++ b/ceph-nfs/src/interface_ceph_nfs_peer.py @@ -54,7 +54,8 @@ def __init__(self, charm, relation_name): def on_changed(self, event): logging.info("CephNFSPeers on_changed") logging.debug('pool_initialised: {}'.format(self.pool_initialised)) - if self.pool_initialised == 'True' and not self._stored.pool_initialised: + if self.pool_initialised == 'True' and \ + not self._stored.pool_initialised: logging.info("emiting pool initialised") self.on.pool_initialised.emit() self._stored.pool_initialised = True @@ -75,7 +76,9 @@ def initialised_pool(self): self.on.pool_initialised.emit() def trigger_reload(self): - self.peer_rel.data[self.peer_rel.app]['reload_nonce'] = str(uuid.uuid4()) + self.peer_rel.data[ + self.peer_rel.app + ]['reload_nonce'] = str(uuid.uuid4()) self.on.reload_nonce.emit() @property diff --git a/ceph-nfs/src/manager.py b/ceph-nfs/src/manager.py index fd625ed8..c21578b9 100644 --- a/ceph-nfs/src/manager.py +++ b/ceph-nfs/src/manager.py @@ -94,8 +94,8 @@ def _conf2json(conf): word = ":" elif word == ";": word = ',' - elif (word in ['{', '}'] or - re.search(r'\A-?[1-9]\d*(\.\d+)?\Z', word)): + elif word in ['{', '}'] or \ + re.search(r'\A-?[1-9]\d*(\.\d+)?\Z', word): pass else: word = json.dumps(word) diff --git a/ceph-nfs/tests/nfs_ganesha.py b/ceph-nfs/tests/nfs_ganesha.py index 100c2215..845700b4 100644 --- a/ceph-nfs/tests/nfs_ganesha.py +++ b/ceph-nfs/tests/nfs_ganesha.py @@ -76,7 +76,8 @@ def _grant_access(self, share_name: str, access_ip: str): }) self.assertEqual(action.status, 'completed') - def _mount_share(self, unit_name: str, share_ip: str, export_path: str, retry: bool = True): + def _mount_share(self, unit_name: str, share_ip: str, + export_path: str, retry: bool = True): self._install_dependencies(unit_name) ssh_cmd = ( 'sudo mkdir -p {0} && ' @@ -88,7 +89,8 @@ def _mount_share(self, unit_name: str, share_ip: str, export_path: str, retry: b if retry: for attempt in tenacity.Retrying( stop=tenacity.stop_after_attempt(5), - wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)): + wait=tenacity.wait_exponential(multiplier=3, + min=2, max=10)): with attempt: zaza.utilities.generic.run_via_ssh( unit_name=unit_name, @@ -117,7 +119,9 @@ def _write_testing_file_on_instance(self, instance_name: str): stop=tenacity.stop_after_attempt(5), wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)) def _verify_testing_file_on_instance(self, instance_name: str): - run_with_juju_ssh = zaza.utilities.installers.make_juju_ssh_fn('ubuntu/1', sudo=True) + run_with_juju_ssh = zaza.utilities.installers.make_juju_ssh_fn( + 'ubuntu/1', sudo=True + ) output = run_with_juju_ssh( 'sudo cat {}/test'.format(self.mount_dir)) logging.info("Verification output: {}".format(output)) @@ -126,8 +130,12 @@ def _verify_testing_file_on_instance(self, instance_name: str): def test_create_share(self): logging.info("Creating a share") # Todo - enable ACL testing - ubuntu_0_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/0')) - ubuntu_1_ip = zaza.model.get_unit_public_address(zaza.model.get_unit_from_name('ubuntu/1')) + ubuntu_0_ip = zaza.model.get_unit_public_address( + zaza.model.get_unit_from_name('ubuntu/0') + ) + ubuntu_1_ip = zaza.model.get_unit_public_address( + zaza.model.get_unit_from_name('ubuntu/1') + ) share = self._create_share('test_ganesha_share', access_ip=ubuntu_0_ip) # share = self._create_share('test_ganesha_share') zaza.model.wait_for_application_states(states={ @@ -164,4 +172,5 @@ def test_list_shares(self): logging.debug("Action results: {}".format(results)) logging.debug("exports: {}".format(results['exports'])) exports = yaml.safe_load(results['exports']) - self.assertIn('test_ganesha_list_share', [export['name'] for export in exports]) + self.assertIn('test_ganesha_list_share', + [export['name'] for export in exports]) diff --git a/ceph-nfs/unit_tests/test_ganesha.py b/ceph-nfs/unit_tests/test_ganesha.py index 2ad40088..ba2dc19c 100644 --- a/ceph-nfs/unit_tests/test_ganesha.py +++ b/ceph-nfs/unit_tests/test_ganesha.py @@ -9,7 +9,7 @@ # The directory in the exported file system this export # is rooted on. - Path = '/volumes/_nogroup/test_ganesha_share/e12a49ef-1b2b-40b3-ba6c-7e6695bcc950'; + Path = '/volumes/_nogroup/test_ganesha_share/e12a49ef-1b2b-40b3-ba6c'; # FSAL, Ganesha's module component FSAL { @@ -20,7 +20,7 @@ } # Path of export in the NFSv4 pseudo filesystem - Pseudo = '/volumes/_nogroup/test_ganesha_share/e12a49ef-1b2b-40b3-ba6c-7e6695bcc950'; + Pseudo = '/volumes/_nogroup/test_ganesha_share/e12a49ef-1b2b-40b3-ba6c'; SecType = "sys"; CLIENT { @@ -38,7 +38,8 @@ class ExportTest(unittest.TestCase): def test_parser(self): export = ganesha.Export.from_export(EXAMPLE_EXPORT) self.assertEqual(export.export_id, 1000) - self.assertEqual(export.clients, [{'Access_Type': 'rw', 'Clients': '0.0.0.0'}]) + self.assertEqual(export.clients, + [{'Access_Type': 'rw', 'Clients': '0.0.0.0'}]) self.assertEqual(export.name, 'test_ganesha_share') def test_add_client(self): @@ -57,7 +58,8 @@ def test_add_client(self): self.assertEqual( export.clients, [{ - 'Access_Type': 'rw', 'Clients': '0.0.0.0, 10.0.0.0/8, 192.168.0.0/16' + 'Access_Type': 'rw', + 'Clients': '0.0.0.0, 10.0.0.0/8, 192.168.0.0/16' }]) def test_remove_client(self): From 260c88fb5940e6f26b6803bd70cb0a6f11cf140b Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 31 May 2022 22:54:43 +0200 Subject: [PATCH 2402/2699] update readme with mentioned relations and bug link Change-Id: Ic2cbe864a4998762fcabcd36d4be026690c707ea --- ceph-nfs/README.md | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/ceph-nfs/README.md b/ceph-nfs/README.md index bae2b9f2..0887907c 100644 --- a/ceph-nfs/README.md +++ b/ceph-nfs/README.md @@ -43,10 +43,15 @@ Once everything settles, your shares will be accessible over the loadbalancer's ## Relations -TODO: Provide any relations which are provided or required by your charm +Ceph-NFS consumes the ceph-client relation from the ceph-mon charm. -## Contributing +# Bugs -Please see the [Juju SDK docs](https://juju.is/docs/sdk) for guidelines -on enhancements to this charm following best practice guidelines, and -`CONTRIBUTING.md` for developer guidance. +Please report bugs on [Launchpad][lp-bugs-charm-ceph-fs]. + +For general charm questions refer to the OpenStack [Charm Guide][cg]. + + + +[lp-bugs-charm-ceph-fs]: https://bugs.launchpad.net/charm-ceph-fs/+filebug +[cg]: https://docs.openstack.org/charm-guide From 67b94eb622592c99ce6592f8555e07e6c4923555 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 27 Jul 2022 19:49:12 -0300 Subject: [PATCH 2403/2699] Test upgrade path This patchset rearranges the tests so that the upgrade path is properly tested, with the tests that aren't in main moved into a different class. func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/849 Change-Id: I5ca38134115f82ac6dff5f559757df68a6c0cd24 --- ceph-mon/tests/bundles/focal-xena.yaml | 3 +- ceph-mon/tests/bundles/focal-yoga.yaml | 3 +- ceph-mon/tests/bundles/jammy-yoga.yaml | 3 +- ceph-mon/tests/bundles/local-focal-yoga.yaml | 249 +++++++++++++++++++ ceph-mon/tests/tests.yaml | 11 +- 5 files changed, 265 insertions(+), 4 deletions(-) create mode 100644 ceph-mon/tests/bundles/local-focal-yoga.yaml diff --git a/ceph-mon/tests/bundles/focal-xena.yaml b/ceph-mon/tests/bundles/focal-xena.yaml index 017bedcb..592d7eaf 100644 --- a/ceph-mon/tests/bundles/focal-xena.yaml +++ b/ceph-mon/tests/bundles/focal-xena.yaml @@ -72,7 +72,8 @@ applications: channel: quincy/edge ceph-mon: - charm: ../../ceph-mon.charm + charm: ch:ceph-mon + channel: quincy/edge num_units: 3 options: source: *openstack-origin diff --git a/ceph-mon/tests/bundles/focal-yoga.yaml b/ceph-mon/tests/bundles/focal-yoga.yaml index 823cebdc..3e75ee5b 100644 --- a/ceph-mon/tests/bundles/focal-yoga.yaml +++ b/ceph-mon/tests/bundles/focal-yoga.yaml @@ -72,7 +72,8 @@ applications: channel: quincy/edge ceph-mon: - charm: ../../ceph-mon.charm + charm: ch:ceph-mon + channel: quincy/edge num_units: 3 options: source: *openstack-origin diff --git a/ceph-mon/tests/bundles/jammy-yoga.yaml b/ceph-mon/tests/bundles/jammy-yoga.yaml index a359c3b5..28932094 100644 --- a/ceph-mon/tests/bundles/jammy-yoga.yaml +++ b/ceph-mon/tests/bundles/jammy-yoga.yaml @@ -73,7 +73,8 @@ applications: channel: quincy/edge ceph-mon: - charm: ../../ceph-mon.charm + charm: ch:ceph-mon + channel: quincy/edge num_units: 3 options: source: *openstack-origin diff --git a/ceph-mon/tests/bundles/local-focal-yoga.yaml b/ceph-mon/tests/bundles/local-focal-yoga.yaml new file mode 100644 index 00000000..823cebdc --- /dev/null +++ b/ceph-mon/tests/bundles/local-focal-yoga.yaml @@ -0,0 +1,249 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-yoga + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + glance-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + nova-cloud-controller-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + placement-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: 8.0.19/edge + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: '10G' + options: + source: *openstack-origin + osd-devices: '/dev/test-non-existent' + to: + - '3' + - '4' + - '5' + channel: quincy/edge + + ceph-mon: + charm: ../../ceph-mon.charm + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + to: + - '6' + - '7' + - '8' + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + channel: 3.9/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + channel: yoga/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + libvirt-image-backend: rbd + to: + - '11' + channel: yoga/edge + + glance: + expose: True + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + channel: yoga/edge + + cinder: + expose: True + charm: ch:cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: *openstack-origin + to: + - '13' + channel: yoga/edge + + cinder-ceph: + charm: ch:cinder-ceph + channel: yoga/edge + + nova-cloud-controller: + expose: True + charm: ch:nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + channel: yoga/edge + + placement: + charm: ch:placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + channel: yoga/edge + + prometheus2: + charm: ch:prometheus2 + num_units: 1 + to: + - '16' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - nova-compute:ceph-access + - cinder-ceph:ceph-access + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'ceph-mon:prometheus' + - 'prometheus2:target' diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 9246aa2d..2df4cb85 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -20,7 +20,16 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CephTest - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest - + # Charm upgrade, then re-run tests + - zaza.charm_tests.lifecycle.tests.UpgradeCharmsToPath;ceph-mon + - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes + - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest + - zaza.openstack.charm_tests.ceph.tests.CephRelationTest + - zaza.openstack.charm_tests.ceph.tests.CephTest + - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest + - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest + # Tests from quincy. + - zaza.openstack.charm_tests.ceph.tests.CephAuthTest tests_options: force_deploy: - jammy-yoga From efdf9ff36770e0dfa4d0068f2153e0bb1c8b0228 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 9 Aug 2022 09:35:10 -0400 Subject: [PATCH 2404/2699] Remove force-deploy for Jammy Additionally, Move jammy-yoga out of dev-bundles Change-Id: I8cc90044fde5b9da4013a805aaa62b6ccf8a5250 --- ceph-mon/tests/tests.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 2df4cb85..06b51cc4 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -3,8 +3,6 @@ charm_name: ceph-mon gate_bundles: - focal-xena - focal-yoga - -dev_bundles: - jammy-yoga smoke_bundles: @@ -30,6 +28,3 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest # Tests from quincy. - zaza.openstack.charm_tests.ceph.tests.CephAuthTest -tests_options: - force_deploy: - - jammy-yoga From dcd69afc2552d7ebe86868fa915af5ab598e7296 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 9 Aug 2022 14:52:13 -0400 Subject: [PATCH 2405/2699] Remove force-deploy for Jammy Additionally, Move jammy-yoga out of dev-bundles Change-Id: I759ed6b716c91f28bed2b9a1010e01e39c138114 --- ceph-osd/tests/tests.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index f0cc660e..e9490a78 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -3,8 +3,6 @@ charm_name: ceph-osd gate_bundles: - focal-xena - focal-yoga - -dev_bundles: - jammy-yoga smoke_bundles: @@ -19,7 +17,3 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CephTest - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - zaza.openstack.charm_tests.ceph.osd.tests.ServiceTest - -tests_options: - force_deploy: - - jammy-yoga From 292c04bb7ddde786ff2f3d84e39d4b680975cb67 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 9 Aug 2022 14:53:50 -0400 Subject: [PATCH 2406/2699] Remove force-deploy for Jammy Additionally, Move jammy-yoga out of dev-bundles Change-Id: Ibcfee61d595e8966ce0d17cc09983da870cec278 --- ceph-fs/src/tests/tests.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index d0bcbc69..42aec0ba 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -1,7 +1,6 @@ charm_name: ceph-fs gate_bundles: - focal-xena -dev_bundles: - focal-yoga - jammy-yoga smoke_bundles: @@ -16,6 +15,4 @@ tests: - zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests - zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation -tests_options: - force_deploy: - - jammy-yoga + From 0d1a8efc90b3a0a0ed55e31ddd5ced5b1db097d2 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 9 Aug 2022 15:18:11 -0400 Subject: [PATCH 2407/2699] Remove force-deploy for Jammy Additionally, Move jammy-yoga out of dev-bundles and remove Impish test. Change-Id: Ie82b0f3d3d19a3cd07be133f64a70445e974ad2b --- ceph-proxy/osci.yaml | 15 ++------------- ceph-proxy/tests/tests.yaml | 7 ------- 2 files changed, 2 insertions(+), 20 deletions(-) diff --git a/ceph-proxy/osci.yaml b/ceph-proxy/osci.yaml index 9b8d20da..d08a33e9 100644 --- a/ceph-proxy/osci.yaml +++ b/ceph-proxy/osci.yaml @@ -7,12 +7,8 @@ check: jobs: - focal-xena-ec - - focal-yoga-ec: - voting: false - - impish-xena-ec: - voting: false - - jammy-yoga-ec: - voting: false + - focal-yoga-ec + - jammy-yoga-ec vars: needs_charm_build: true charm_build_name: ceph-proxy @@ -34,13 +30,6 @@ - focal-xena-ec vars: tox_extra_args: erasure-coded:focal-yoga-ec -- job: - name: impish-xena-ec - parent: func-target - dependencies: - - focal-xena-ec - vars: - tox_extra_args: erasure-coded:impish-xena-ec - job: name: jammy-yoga-ec parent: func-target diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index fba0805b..75fe4043 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -14,8 +14,6 @@ tests: gate_bundles: - focal-xena - erasure-coded: focal-xena-ec - -dev_bundles: - focal-yoga - erasure-coded: focal-yoga-ec - jammy-yoga @@ -43,8 +41,3 @@ target_deploy_status: glance: workload-status: waiting workload-status-message: "Incomplete relations: storage-backend" - -tests_options: - force_deploy: - - jammy-yoga - - jammy-yoga-ec From a42af26066572111a09b8157ff020f8417c93424 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 15 Aug 2022 09:32:50 -0400 Subject: [PATCH 2408/2699] Make jammy voting This change also enabled jammy-proposed to ensure we can get an updated python3-cheroot until it's released into -updates. Change-Id: I2325f7ddf2c0015a054b8d2d1c97f1054b619149 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/857 --- ceph-dashboard/osci.yaml | 6 +----- ceph-dashboard/tests/bundles/jammy-yoga.yaml | 2 ++ ceph-dashboard/tests/tests.yaml | 3 +-- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/ceph-dashboard/osci.yaml b/ceph-dashboard/osci.yaml index 41c40d95..ffe2619e 100644 --- a/ceph-dashboard/osci.yaml +++ b/ceph-dashboard/osci.yaml @@ -2,11 +2,7 @@ templates: - charm-unit-jobs-py38 - charm-unit-jobs-py310 - check: - jobs: - - focal-yoga - - jammy-yoga: - voting: false + - charm-yoga-functional-jobs vars: needs_charm_build: true charm_build_name: ceph-dashboard diff --git a/ceph-dashboard/tests/bundles/jammy-yoga.yaml b/ceph-dashboard/tests/bundles/jammy-yoga.yaml index 00404066..81635bb8 100644 --- a/ceph-dashboard/tests/bundles/jammy-yoga.yaml +++ b/ceph-dashboard/tests/bundles/jammy-yoga.yaml @@ -14,6 +14,7 @@ applications: num_units: 3 options: monitor-count: '3' + source: distro-proposed channel: latest/edge vault: num_units: 1 @@ -40,6 +41,7 @@ applications: # Octrober 2021 charm: ch:grafana num_units: 1 + series: focal options: anonymous: True install_plugins: https://storage.googleapis.com/plugins-community/vonage-status-panel/release/1.0.11/vonage-status-panel-1.0.11.zip,https://storage.googleapis.com/plugins-community/grafana-piechart-panel/release/1.6.2/grafana-piechart-panel-1.6.2.zip diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml index 415df2cf..f19bf66a 100644 --- a/ceph-dashboard/tests/tests.yaml +++ b/ceph-dashboard/tests/tests.yaml @@ -1,10 +1,9 @@ charm_name: ceph-dasboard gate_bundles: - focal + - jammy smoke_bundles: - focal -dev_bundles: - - jammy configure: - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation - zaza.openstack.charm_tests.ceph.dashboard.setup.check_dashboard_cert From 0f179a3c2e7a9d1c5923c63cc113fb2f2a80cace Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 19 Aug 2021 16:05:34 -0500 Subject: [PATCH 2409/2699] Disable insecure global-id reclamation Closes-Bug: #1929262 Change-Id: Id9f4cfdd70bab0090b66cbc8aeb258936cbf909e --- ceph-mon/hooks/ceph_hooks.py | 6 ++++-- ceph-mon/hooks/utils.py | 19 +++++++++++++++++++ ceph-mon/unit_tests/test_ceph_hooks.py | 1 + ceph-mon/unit_tests/test_ceph_utils.py | 12 ++++++++++++ 4 files changed, 36 insertions(+), 2 deletions(-) diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/hooks/ceph_hooks.py index a6cd1e32..1cc4c4bd 100755 --- a/ceph-mon/hooks/ceph_hooks.py +++ b/ceph-mon/hooks/ceph_hooks.py @@ -103,6 +103,7 @@ mgr_enable_module, is_mgr_module_enabled, set_balancer_mode, + try_disable_insecure_reclaim, ) from charmhelpers.contrib.charmsupport import nrpe @@ -325,10 +326,9 @@ def config_changed(): if cmp_pkgrevno('ceph', '12.0.0') >= 0: status_set('maintenance', 'Bootstrapping single Ceph MGR') ceph.bootstrap_manager() - + try_disable_insecure_reclaim() for relid in relation_ids('dashboard'): dashboard_relation(relid) - # Update client relations notify_client() @@ -528,6 +528,8 @@ def attempt_mon_cluster_bootstrap(): except subprocess.CalledProcessError: log("Failed to initialize autoscaler, it must be " "initialized on the last monitor", level='info') + + try_disable_insecure_reclaim() # If we can and want to if is_leader() and config('customize-failure-domain'): # But only if the environment supports it diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/hooks/utils.py index 3e29c7c9..2781eefd 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/hooks/utils.py @@ -23,6 +23,7 @@ cached, config, goal_state, + is_leader, log, network_get_primary_address, related_units, @@ -296,6 +297,24 @@ def get_ceph_osd_releases(): return list(ceph_osd_releases) +def try_disable_insecure_reclaim(): + """Disable insecure global-id reclaim on supported versions. + + This function will disable insecure global-id reclaim on versions + of ceph that are supported. Running this on a healthy cluster or + a cluster that doesn't support the option won't have any effect. + """ + if is_leader(): + try: + subprocess.check_call([ + 'ceph', '--id', 'admin', + 'config', 'set', 'mon', + 'auth_allow_insecure_global_id_reclaim', 'false']) + except subprocess.CalledProcessError as e: + log("Could not disable insecure reclaim: {}".format(e), + level='ERROR') + + def execute_post_osd_upgrade_steps(ceph_osd_release): """Executes post-upgrade steps. diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index dc5ff7a2..a2c653ce 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -32,6 +32,7 @@ 'relation_get', 'relations_of_type', 'status_set', + 'try_disable_insecure_reclaim', ] CHARM_CONFIG = {'config-flags': '', diff --git a/ceph-mon/unit_tests/test_ceph_utils.py b/ceph-mon/unit_tests/test_ceph_utils.py index 4332c71a..9a82ff1d 100644 --- a/ceph-mon/unit_tests/test_ceph_utils.py +++ b/ceph-mon/unit_tests/test_ceph_utils.py @@ -388,3 +388,15 @@ def test_balancer_mode_no_balancer(self, is_mgr_module_enabled.return_value = False utils.set_balancer_mode('upmap') check_call.assert_not_called() + + @mock.patch.object(utils.subprocess, 'check_call') + @mock.patch.object(utils, 'is_leader') + def test_disable_insecure_reclaim(self, + is_leader, + check_call): + is_leader.return_value = True + utils.try_disable_insecure_reclaim() + check_call.assert_called_once_with([ + 'ceph', '--id', 'admin', + 'config', 'set', 'mon', + 'auth_allow_insecure_global_id_reclaim', 'false']) From e72fb4f5f85bb95d1b58ef2b35eeedd25d913f4a Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 11 May 2022 17:39:53 -0300 Subject: [PATCH 2410/2699] First rewrite of ceph-mon with operator framework This patchset implements the first rewrite of the charm using the operator framework by simply calling into the hooks. This change also includes functional validation about charm upgrades from the previous stable to the locally built charm. Fix tempest breakage for python < 3.8 Co-authored-by: Chris MacNaughton Change-Id: I61308bb2900134ea163d9e92444066a3cb0de43d func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/849 --- ceph-mon/Makefile | 34 - ceph-mon/TODO | 6 - ceph-mon/actions/__init__.py | 3 - ceph-mon/actions/ceph_ops.py | 3 - ceph-mon/actions/change_osd_weight.py | 5 - ceph-mon/actions/copy_pool.py | 3 - ceph-mon/actions/create_cache_tier.py | 3 - ceph-mon/actions/create_crush_rule.py | 3 - ceph-mon/actions/create_erasure_profile.py | 3 - ceph-mon/actions/create_pool.py | 3 - ceph-mon/actions/crushmap_update.py | 1 + ceph-mon/actions/delete_erasure_profile.py | 3 - ceph-mon/actions/delete_pool.py | 3 - ceph-mon/actions/delete_user.py | 3 - ceph-mon/actions/get_erasure_profile.py | 4 - ceph-mon/actions/get_or_create_user.py | 2 - ceph-mon/actions/get_quorum_status.py | 3 - ceph-mon/actions/list_crush_rules.py | 15 +- ceph-mon/actions/list_erasure_profiles.py | 3 - ceph-mon/actions/list_inconsistent_objs.py | 3 - ceph-mon/actions/list_pools.py | 15 +- ceph-mon/actions/pg_repair.py | 16 +- ceph-mon/actions/pool_get.py | 3 - ceph-mon/actions/pool_set.py | 4 - ceph-mon/actions/pool_statistics.py | 3 - ceph-mon/actions/purge_osd.py | 6 +- ceph-mon/actions/remove_cache_tier.py | 3 - ceph-mon/actions/remove_pool_snapshot.py | 3 - ceph-mon/actions/rename_pool.py | 3 - ceph-mon/actions/security_checklist.py | 2 - ceph-mon/actions/set_noout.py | 3 - ceph-mon/actions/set_pool_max_bytes.py | 3 - ceph-mon/actions/show_disk_free.py | 3 - ceph-mon/actions/snapshot_pool.py | 3 - ceph-mon/actions/unset_noout.py | 3 - ceph-mon/charm-helpers-hooks.yaml | 21 - ceph-mon/charmcraft.yaml | 28 +- ceph-mon/hooks/__init__.py | 13 - ceph-mon/hooks/admin-relation-changed | 1 - ceph-mon/hooks/admin-relation-joined | 1 - .../hooks/bootstrap-source-relation-changed | 1 - .../hooks/bootstrap-source-relation-departed | 1 - ceph-mon/hooks/charmhelpers/__init__.py | 84 - ceph-mon/hooks/charmhelpers/cli/__init__.py | 187 - ceph-mon/hooks/charmhelpers/cli/benchmark.py | 34 - ceph-mon/hooks/charmhelpers/cli/commands.py | 30 - ceph-mon/hooks/charmhelpers/cli/hookenv.py | 21 - ceph-mon/hooks/charmhelpers/cli/host.py | 29 - ceph-mon/hooks/charmhelpers/cli/unitdata.py | 46 - .../hooks/charmhelpers/contrib/__init__.py | 13 - .../contrib/charmsupport/__init__.py | 13 - .../charmhelpers/contrib/charmsupport/nrpe.py | 560 --- .../contrib/charmsupport/volumes.py | 173 - .../contrib/hahelpers/__init__.py | 13 - .../charmhelpers/contrib/hahelpers/apache.py | 90 - .../charmhelpers/contrib/hahelpers/cluster.py | 448 --- .../contrib/hardening/README.hardening.md | 38 - .../contrib/hardening/__init__.py | 13 - .../contrib/hardening/apache/__init__.py | 17 - .../hardening/apache/checks/__init__.py | 29 - .../contrib/hardening/apache/checks/config.py | 101 - .../apache/templates/99-hardening.conf | 32 - .../hardening/apache/templates/__init__.py | 0 .../hardening/apache/templates/alias.conf | 31 - .../contrib/hardening/audits/__init__.py | 54 - .../contrib/hardening/audits/apache.py | 101 - .../contrib/hardening/audits/apt.py | 101 - .../contrib/hardening/audits/file.py | 549 --- .../contrib/hardening/defaults/__init__.py | 0 .../contrib/hardening/defaults/apache.yaml | 16 - .../hardening/defaults/apache.yaml.schema | 12 - .../contrib/hardening/defaults/mysql.yaml | 38 - .../hardening/defaults/mysql.yaml.schema | 15 - .../contrib/hardening/defaults/os.yaml | 68 - .../contrib/hardening/defaults/os.yaml.schema | 43 - .../contrib/hardening/defaults/ssh.yaml | 49 - .../hardening/defaults/ssh.yaml.schema | 42 - .../charmhelpers/contrib/hardening/harden.py | 93 - .../contrib/hardening/host/__init__.py | 17 - .../contrib/hardening/host/checks/__init__.py | 48 - .../contrib/hardening/host/checks/apt.py | 37 - .../contrib/hardening/host/checks/limits.py | 53 - .../contrib/hardening/host/checks/login.py | 63 - .../hardening/host/checks/minimize_access.py | 50 - .../contrib/hardening/host/checks/pam.py | 132 - .../contrib/hardening/host/checks/profile.py | 49 - .../hardening/host/checks/securetty.py | 37 - .../hardening/host/checks/suid_sgid.py | 129 - .../contrib/hardening/host/checks/sysctl.py | 208 - .../hardening/host/templates/10.hardcore.conf | 8 - .../hardening/host/templates/99-hardening.sh | 5 - .../host/templates/99-juju-hardening.conf | 7 - .../hardening/host/templates/__init__.py | 0 .../hardening/host/templates/login.defs | 349 -- .../contrib/hardening/host/templates/modules | 117 - .../hardening/host/templates/passwdqc.conf | 11 - .../host/templates/pinerolo_profile.sh | 8 - .../hardening/host/templates/securetty | 11 - .../contrib/hardening/host/templates/tally2 | 14 - .../contrib/hardening/mysql/__init__.py | 17 - .../hardening/mysql/checks/__init__.py | 29 - .../contrib/hardening/mysql/checks/config.py | 86 - .../hardening/mysql/templates/__init__.py | 0 .../hardening/mysql/templates/hardening.cnf | 12 - .../contrib/hardening/ssh/__init__.py | 17 - .../contrib/hardening/ssh/checks/__init__.py | 29 - .../contrib/hardening/ssh/checks/config.py | 435 -- .../hardening/ssh/templates/__init__.py | 0 .../hardening/ssh/templates/ssh_config | 70 - .../hardening/ssh/templates/sshd_config | 159 - .../contrib/hardening/templating.py | 69 - .../charmhelpers/contrib/hardening/utils.py | 154 - .../charmhelpers/contrib/network/__init__.py | 13 - .../hooks/charmhelpers/contrib/network/ip.py | 590 --- .../contrib/openstack/__init__.py | 13 - .../contrib/openstack/alternatives.py | 44 - .../contrib/openstack/audits/__init__.py | 212 - .../audits/openstack_security_guide.py | 270 -- .../contrib/openstack/cert_utils.py | 443 -- .../charmhelpers/contrib/openstack/context.py | 3361 ---------------- .../contrib/openstack/deferred_events.py | 416 -- .../contrib/openstack/exceptions.py | 26 - .../contrib/openstack/files/__init__.py | 16 - .../files/check_deferred_restarts.py | 128 - .../openstack/files/policy_rc_d_script.py | 196 - .../contrib/openstack/ha/__init__.py | 13 - .../contrib/openstack/ha/utils.py | 348 -- .../charmhelpers/contrib/openstack/ip.py | 235 -- .../contrib/openstack/keystone.py | 170 - .../charmhelpers/contrib/openstack/neutron.py | 351 -- .../contrib/openstack/policy_rcd.py | 173 - .../charmhelpers/contrib/openstack/policyd.py | 763 ---- .../contrib/openstack/ssh_migrations.py | 412 -- .../contrib/openstack/templates/__init__.py | 16 - .../contrib/openstack/templating.py | 370 -- .../charmhelpers/contrib/openstack/utils.py | 2694 ------------- .../contrib/openstack/vaultlocker.py | 179 - .../charmhelpers/contrib/storage/__init__.py | 13 - .../contrib/storage/linux/__init__.py | 13 - .../contrib/storage/linux/bcache.py | 74 - .../contrib/storage/linux/ceph.py | 2384 ----------- .../contrib/storage/linux/loopback.py | 88 - .../charmhelpers/contrib/storage/linux/lvm.py | 182 - .../contrib/storage/linux/utils.py | 128 - ceph-mon/hooks/charmhelpers/core/__init__.py | 13 - .../hooks/charmhelpers/core/decorators.py | 93 - ceph-mon/hooks/charmhelpers/core/files.py | 43 - ceph-mon/hooks/charmhelpers/core/fstab.py | 132 - ceph-mon/hooks/charmhelpers/core/hookenv.py | 1636 -------- ceph-mon/hooks/charmhelpers/core/host.py | 1304 ------ .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 72 - .../charmhelpers/core/host_factory/ubuntu.py | 122 - ceph-mon/hooks/charmhelpers/core/hugepage.py | 69 - ceph-mon/hooks/charmhelpers/core/kernel.py | 72 - .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 - .../core/kernel_factory/ubuntu.py | 13 - .../charmhelpers/core/services/__init__.py | 16 - .../hooks/charmhelpers/core/services/base.py | 362 -- .../charmhelpers/core/services/helpers.py | 290 -- ceph-mon/hooks/charmhelpers/core/strutils.py | 131 - ceph-mon/hooks/charmhelpers/core/sysctl.py | 75 - .../hooks/charmhelpers/core/templating.py | 88 - ceph-mon/hooks/charmhelpers/core/unitdata.py | 525 --- ceph-mon/hooks/charmhelpers/fetch/__init__.py | 208 - .../hooks/charmhelpers/fetch/archiveurl.py | 150 - ceph-mon/hooks/charmhelpers/fetch/bzrurl.py | 76 - ceph-mon/hooks/charmhelpers/fetch/centos.py | 170 - ceph-mon/hooks/charmhelpers/fetch/giturl.py | 69 - .../charmhelpers/fetch/python/__init__.py | 13 - .../hooks/charmhelpers/fetch/python/debug.py | 52 - .../charmhelpers/fetch/python/packages.py | 148 - .../hooks/charmhelpers/fetch/python/rpdb.py | 56 - .../charmhelpers/fetch/python/version.py | 32 - ceph-mon/hooks/charmhelpers/fetch/snap.py | 150 - ceph-mon/hooks/charmhelpers/fetch/ubuntu.py | 1003 ----- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 335 -- ceph-mon/hooks/charmhelpers/osplatform.py | 49 - .../hooks/charmhelpers/payload/__init__.py | 15 - ceph-mon/hooks/charmhelpers/payload/execd.py | 65 - ceph-mon/hooks/client-relation-changed | 1 - ceph-mon/hooks/client-relation-joined | 1 - ceph-mon/hooks/config-changed | 1 - ceph-mon/hooks/dashboard-relation-joined | 1 - ceph-mon/hooks/install | 21 - ceph-mon/hooks/install.real | 1 - ceph-mon/hooks/install_deps | 18 - ceph-mon/hooks/leader-settings-changed | 1 - ceph-mon/hooks/mds-relation-changed | 1 - ceph-mon/hooks/mds-relation-joined | 1 - ceph-mon/hooks/mon-relation-changed | 1 - ceph-mon/hooks/mon-relation-departed | 1 - ceph-mon/hooks/mon-relation-joined | 1 - .../nrpe-external-master-relation-changed | 1 - .../nrpe-external-master-relation-joined | 1 - ceph-mon/hooks/osd-relation-changed | 1 - ceph-mon/hooks/osd-relation-joined | 1 - ceph-mon/hooks/post-series-upgrade | 1 - ceph-mon/hooks/pre-series-upgrade | 1 - ceph-mon/hooks/prometheus-relation-changed | 1 - ceph-mon/hooks/prometheus-relation-departed | 1 - ceph-mon/hooks/prometheus-relation-joined | 1 - ceph-mon/hooks/radosgw-relation-changed | 1 - ceph-mon/hooks/radosgw-relation-joined | 1 - ceph-mon/hooks/rbd-mirror-relation-changed | 1 - ceph-mon/hooks/rbd-mirror-relation-joined | 1 - ceph-mon/hooks/start | 1 - ceph-mon/hooks/stop | 1 - ceph-mon/hooks/update-status | 1 - ceph-mon/hooks/upgrade-charm | 7 - ceph-mon/hooks/upgrade-charm.real | 1 - ceph-mon/lib/charms_ceph/__init__.py | 0 ceph-mon/lib/charms_ceph/broker.py | 913 ----- ceph-mon/lib/charms_ceph/crush_utils.py | 154 - ceph-mon/lib/charms_ceph/utils.py | 3583 ----------------- ceph-mon/osci.yaml | 12 + ceph-mon/requirements.txt | 38 +- ceph-mon/{hooks => src}/ceph_hooks.py | 0 ceph-mon/src/charm.py | 134 + ceph-mon/{hooks => src}/utils.py | 12 +- ceph-mon/test-requirements.txt | 2 +- ceph-mon/tests/tests.yaml | 10 +- ceph-mon/tox.ini | 2 +- ceph-mon/unit_tests/__init__.py | 1 + ceph-mon/unit_tests/test_ceph_utils.py | 2 +- 226 files changed, 195 insertions(+), 32598 deletions(-) delete mode 100644 ceph-mon/Makefile delete mode 100644 ceph-mon/TODO delete mode 100644 ceph-mon/charm-helpers-hooks.yaml delete mode 100644 ceph-mon/hooks/__init__.py delete mode 120000 ceph-mon/hooks/admin-relation-changed delete mode 120000 ceph-mon/hooks/admin-relation-joined delete mode 120000 ceph-mon/hooks/bootstrap-source-relation-changed delete mode 120000 ceph-mon/hooks/bootstrap-source-relation-departed delete mode 100644 ceph-mon/hooks/charmhelpers/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/cli/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/cli/benchmark.py delete mode 100644 ceph-mon/hooks/charmhelpers/cli/commands.py delete mode 100644 ceph-mon/hooks/charmhelpers/cli/hookenv.py delete mode 100644 ceph-mon/hooks/charmhelpers/cli/host.py delete mode 100644 ceph-mon/hooks/charmhelpers/cli/unitdata.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/charmsupport/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hahelpers/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hahelpers/apache.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/README.hardening.md delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/apache/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/apt.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/limits.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/pam.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/login.defs delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/modules delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/securetty delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/tally2 delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/network/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/network/ip.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/audits/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/context.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/files/__init__.py delete mode 100755 ceph-mon/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py delete mode 100755 ceph-mon/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/ha/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/policy_rcd.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/ssh_migrations.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/templates/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/storage/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/storage/linux/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/storage/linux/bcache.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py delete mode 100644 ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/decorators.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/files.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/fstab.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/hookenv.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/host.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/host_factory/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/host_factory/centos.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/hugepage.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/kernel.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/kernel_factory/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/kernel_factory/centos.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/kernel_factory/ubuntu.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/services/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/services/base.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/services/helpers.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/strutils.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/sysctl.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/templating.py delete mode 100644 ceph-mon/hooks/charmhelpers/core/unitdata.py delete mode 100644 ceph-mon/hooks/charmhelpers/fetch/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/fetch/archiveurl.py delete mode 100644 ceph-mon/hooks/charmhelpers/fetch/bzrurl.py delete mode 100644 ceph-mon/hooks/charmhelpers/fetch/centos.py delete mode 100644 ceph-mon/hooks/charmhelpers/fetch/giturl.py delete mode 100644 ceph-mon/hooks/charmhelpers/fetch/python/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/fetch/python/debug.py delete mode 100644 ceph-mon/hooks/charmhelpers/fetch/python/packages.py delete mode 100644 ceph-mon/hooks/charmhelpers/fetch/python/rpdb.py delete mode 100644 ceph-mon/hooks/charmhelpers/fetch/python/version.py delete mode 100644 ceph-mon/hooks/charmhelpers/fetch/snap.py delete mode 100644 ceph-mon/hooks/charmhelpers/fetch/ubuntu.py delete mode 100644 ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py delete mode 100644 ceph-mon/hooks/charmhelpers/osplatform.py delete mode 100644 ceph-mon/hooks/charmhelpers/payload/__init__.py delete mode 100644 ceph-mon/hooks/charmhelpers/payload/execd.py delete mode 120000 ceph-mon/hooks/client-relation-changed delete mode 120000 ceph-mon/hooks/client-relation-joined delete mode 120000 ceph-mon/hooks/config-changed delete mode 120000 ceph-mon/hooks/dashboard-relation-joined delete mode 100755 ceph-mon/hooks/install delete mode 120000 ceph-mon/hooks/install.real delete mode 100755 ceph-mon/hooks/install_deps delete mode 120000 ceph-mon/hooks/leader-settings-changed delete mode 120000 ceph-mon/hooks/mds-relation-changed delete mode 120000 ceph-mon/hooks/mds-relation-joined delete mode 120000 ceph-mon/hooks/mon-relation-changed delete mode 120000 ceph-mon/hooks/mon-relation-departed delete mode 120000 ceph-mon/hooks/mon-relation-joined delete mode 120000 ceph-mon/hooks/nrpe-external-master-relation-changed delete mode 120000 ceph-mon/hooks/nrpe-external-master-relation-joined delete mode 120000 ceph-mon/hooks/osd-relation-changed delete mode 120000 ceph-mon/hooks/osd-relation-joined delete mode 120000 ceph-mon/hooks/post-series-upgrade delete mode 120000 ceph-mon/hooks/pre-series-upgrade delete mode 120000 ceph-mon/hooks/prometheus-relation-changed delete mode 120000 ceph-mon/hooks/prometheus-relation-departed delete mode 120000 ceph-mon/hooks/prometheus-relation-joined delete mode 120000 ceph-mon/hooks/radosgw-relation-changed delete mode 120000 ceph-mon/hooks/radosgw-relation-joined delete mode 120000 ceph-mon/hooks/rbd-mirror-relation-changed delete mode 120000 ceph-mon/hooks/rbd-mirror-relation-joined delete mode 120000 ceph-mon/hooks/start delete mode 120000 ceph-mon/hooks/stop delete mode 120000 ceph-mon/hooks/update-status delete mode 100755 ceph-mon/hooks/upgrade-charm delete mode 120000 ceph-mon/hooks/upgrade-charm.real delete mode 100644 ceph-mon/lib/charms_ceph/__init__.py delete mode 100644 ceph-mon/lib/charms_ceph/broker.py delete mode 100644 ceph-mon/lib/charms_ceph/crush_utils.py delete mode 100644 ceph-mon/lib/charms_ceph/utils.py rename ceph-mon/{hooks => src}/ceph_hooks.py (100%) create mode 100755 ceph-mon/src/charm.py rename ceph-mon/{hooks => src}/utils.py (98%) diff --git a/ceph-mon/Makefile b/ceph-mon/Makefile deleted file mode 100644 index c772e4c1..00000000 --- a/ceph-mon/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/make -PYTHON := /usr/bin/env python3 - -lint: - @tox -e pep8 - -test: - @echo Starting unit tests... - @tox -e py27 - -functional_test: - @echo Starting Amulet tests... - @tox -e func27 - -bin/charm_helpers_sync.py: - @mkdir -p bin - @curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py - - -bin/git_sync.py: - @mkdir -p bin - @wget -O bin/git_sync.py https://raw.githubusercontent.com/CanonicalLtd/git-sync/master/git_sync.py - -ch-sync: bin/charm_helpers_sync.py - $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml - -ceph-sync: bin/git_sync.py - $(PYTHON) bin/git_sync.py -d lib -s https://github.com/openstack/charms.ceph.git - -sync: ch-sync - -publish: lint test - bzr push lp:charms/ceph-mon - bzr push lp:charms/trusty/ceph-mon diff --git a/ceph-mon/TODO b/ceph-mon/TODO deleted file mode 100644 index 22e0889d..00000000 --- a/ceph-mon/TODO +++ /dev/null @@ -1,6 +0,0 @@ -Ceph Charm -========== - - * fix tunables (http://tracker.newdream.net/issues/2210) - * more than 192 PGs - * fixup data placement in crush to be host not osd driven diff --git a/ceph-mon/actions/__init__.py b/ceph-mon/actions/__init__.py index b7fe4e1b..9b088de8 100644 --- a/ceph-mon/actions/__init__.py +++ b/ceph-mon/actions/__init__.py @@ -11,6 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -import sys -sys.path.append('hooks') diff --git a/ceph-mon/actions/ceph_ops.py b/ceph-mon/actions/ceph_ops.py index 0e6eb7ac..a71c6869 100755 --- a/ceph-mon/actions/ceph_ops.py +++ b/ceph-mon/actions/ceph_ops.py @@ -14,9 +14,6 @@ import json from subprocess import CalledProcessError, check_output -import sys - -sys.path.append('hooks') from charmhelpers.core.hookenv import ( action_get, diff --git a/ceph-mon/actions/change_osd_weight.py b/ceph-mon/actions/change_osd_weight.py index 9a517349..1732f010 100755 --- a/ceph-mon/actions/change_osd_weight.py +++ b/ceph-mon/actions/change_osd_weight.py @@ -16,11 +16,6 @@ """Changes the crush weight of an OSD.""" -import sys - -sys.path.append("lib") -sys.path.append("hooks") - from charmhelpers.core.hookenv import function_fail, function_get, log from charms_ceph.utils import reweight_osd diff --git a/ceph-mon/actions/copy_pool.py b/ceph-mon/actions/copy_pool.py index 5112cf70..84723c8a 100755 --- a/ceph-mon/actions/copy_pool.py +++ b/ceph-mon/actions/copy_pool.py @@ -14,11 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import subprocess -sys.path.append('hooks') - import charmhelpers.core.hookenv as hookenv diff --git a/ceph-mon/actions/create_cache_tier.py b/ceph-mon/actions/create_cache_tier.py index 0ef212ed..cc68257e 100755 --- a/ceph-mon/actions/create_cache_tier.py +++ b/ceph-mon/actions/create_cache_tier.py @@ -15,9 +15,6 @@ # limitations under the License. from subprocess import CalledProcessError -import sys - -sys.path.append('hooks') from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists from charmhelpers.core.hookenv import action_get, log, action_fail diff --git a/ceph-mon/actions/create_crush_rule.py b/ceph-mon/actions/create_crush_rule.py index 207b4f4f..65781132 100755 --- a/ceph-mon/actions/create_crush_rule.py +++ b/ceph-mon/actions/create_crush_rule.py @@ -14,11 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import subprocess -sys.path.append('hooks') - import charmhelpers.core.hookenv as hookenv diff --git a/ceph-mon/actions/create_erasure_profile.py b/ceph-mon/actions/create_erasure_profile.py index 5306baa6..40673d7e 100755 --- a/ceph-mon/actions/create_erasure_profile.py +++ b/ceph-mon/actions/create_erasure_profile.py @@ -15,9 +15,6 @@ # limitations under the License. from subprocess import CalledProcessError -import sys - -sys.path.append('hooks') from charmhelpers.contrib.storage.linux.ceph import create_erasure_profile from charmhelpers.core.hookenv import action_get, log, action_fail diff --git a/ceph-mon/actions/create_pool.py b/ceph-mon/actions/create_pool.py index f8faee1f..7b9582e2 100755 --- a/ceph-mon/actions/create_pool.py +++ b/ceph-mon/actions/create_pool.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from subprocess import CalledProcessError from charmhelpers.core.hookenv import action_get, log, action_fail from charmhelpers.contrib.storage.linux.ceph import ErasurePool, ReplicatedPool diff --git a/ceph-mon/actions/crushmap_update.py b/ceph-mon/actions/crushmap_update.py index c4aa13f0..fbe188fc 100755 --- a/ceph-mon/actions/crushmap_update.py +++ b/ceph-mon/actions/crushmap_update.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import base64 from charmhelpers.core.hookenv import action_get, action_fail from subprocess import check_output, CalledProcessError, PIPE, Popen diff --git a/ceph-mon/actions/delete_erasure_profile.py b/ceph-mon/actions/delete_erasure_profile.py index 17dc2ef5..748ce5a6 100755 --- a/ceph-mon/actions/delete_erasure_profile.py +++ b/ceph-mon/actions/delete_erasure_profile.py @@ -17,9 +17,6 @@ from subprocess import CalledProcessError __author__ = 'chris' -import sys - -sys.path.append('hooks') from charmhelpers.contrib.storage.linux.ceph import remove_erasure_profile from charmhelpers.core.hookenv import action_get, log, action_fail diff --git a/ceph-mon/actions/delete_pool.py b/ceph-mon/actions/delete_pool.py index d05078da..3d7460e3 100755 --- a/ceph-mon/actions/delete_pool.py +++ b/ceph-mon/actions/delete_pool.py @@ -14,11 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import subprocess -sys.path.append('hooks') - from charmhelpers.core.hookenv import action_get, log, action_fail diff --git a/ceph-mon/actions/delete_user.py b/ceph-mon/actions/delete_user.py index 93c6016c..4dc8283b 100755 --- a/ceph-mon/actions/delete_user.py +++ b/ceph-mon/actions/delete_user.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from charmhelpers.core.hookenv import action_get, action_fail, action_set, log from subprocess import CalledProcessError, check_output, STDOUT diff --git a/ceph-mon/actions/get_erasure_profile.py b/ceph-mon/actions/get_erasure_profile.py index a259e748..9038f2b0 100755 --- a/ceph-mon/actions/get_erasure_profile.py +++ b/ceph-mon/actions/get_erasure_profile.py @@ -14,10 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') - from charmhelpers.contrib.storage.linux.ceph import get_erasure_profile from charmhelpers.core.hookenv import action_get, action_set diff --git a/ceph-mon/actions/get_or_create_user.py b/ceph-mon/actions/get_or_create_user.py index 000855d7..a841dd66 100755 --- a/ceph-mon/actions/get_or_create_user.py +++ b/ceph-mon/actions/get_or_create_user.py @@ -14,10 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import json -sys.path.append("hooks") from charmhelpers.core.hookenv import action_get, action_fail, action_set, log from subprocess import CalledProcessError, check_output diff --git a/ceph-mon/actions/get_quorum_status.py b/ceph-mon/actions/get_quorum_status.py index 31f04890..a537bce3 100755 --- a/ceph-mon/actions/get_quorum_status.py +++ b/ceph-mon/actions/get_quorum_status.py @@ -16,12 +16,9 @@ """Run action to collect Ceph quorum_status output.""" import json -import sys from subprocess import CalledProcessError -sys.path.append('hooks') - from ceph_ops import get_quorum_status from charmhelpers.core.hookenv import function_fail, function_get, function_set diff --git a/ceph-mon/actions/list_crush_rules.py b/ceph-mon/actions/list_crush_rules.py index a28fcc2b..6f57cc45 100755 --- a/ceph-mon/actions/list_crush_rules.py +++ b/ceph-mon/actions/list_crush_rules.py @@ -13,24 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import json -import os -import sys import yaml from subprocess import check_output, CalledProcessError -_path = os.path.dirname(os.path.realpath(__file__)) -_hooks = os.path.abspath(os.path.join(_path, "../hooks")) - - -def _add_path(path): - if path not in sys.path: - sys.path.insert(1, path) - - -_add_path(_hooks) - - from charmhelpers.core.hookenv import ( ERROR, log, diff --git a/ceph-mon/actions/list_erasure_profiles.py b/ceph-mon/actions/list_erasure_profiles.py index c26804ec..2c067583 100755 --- a/ceph-mon/actions/list_erasure_profiles.py +++ b/ceph-mon/actions/list_erasure_profiles.py @@ -14,11 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys from subprocess import check_output, CalledProcessError -sys.path.append('hooks') - from charmhelpers.core.hookenv import action_get, log, action_set, action_fail if __name__ == '__main__': diff --git a/ceph-mon/actions/list_inconsistent_objs.py b/ceph-mon/actions/list_inconsistent_objs.py index 6d8de5d0..5112166b 100755 --- a/ceph-mon/actions/list_inconsistent_objs.py +++ b/ceph-mon/actions/list_inconsistent_objs.py @@ -16,12 +16,9 @@ import json import re -import sys from subprocess import check_output, CalledProcessError import yaml -sys.path.append('hooks') - from charmhelpers.core.hookenv import function_fail, function_get, \ function_set, log diff --git a/ceph-mon/actions/list_pools.py b/ceph-mon/actions/list_pools.py index 10c05611..4c1384a9 100755 --- a/ceph-mon/actions/list_pools.py +++ b/ceph-mon/actions/list_pools.py @@ -13,23 +13,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import json -import os -import sys from subprocess import check_output, CalledProcessError -_path = os.path.dirname(os.path.realpath(__file__)) -_hooks = os.path.abspath(os.path.join(_path, "../hooks")) - - -def _add_path(path): - if path not in sys.path: - sys.path.insert(1, path) - - -_add_path(_hooks) - - from charmhelpers.core.hookenv import ( log, function_fail, diff --git a/ceph-mon/actions/pg_repair.py b/ceph-mon/actions/pg_repair.py index 6dd17ecc..be440f5e 100755 --- a/ceph-mon/actions/pg_repair.py +++ b/ceph-mon/actions/pg_repair.py @@ -13,24 +13,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import json -import os -import sys from subprocess import check_output, CalledProcessError -_path = os.path.dirname(os.path.realpath(__file__)) -_hooks = os.path.abspath(os.path.join(_path, "../hooks")) -_lib = os.path.abspath(os.path.join(_path, "../lib")) - - -def _add_path(path): - if path not in sys.path: - sys.path.insert(1, path) - - -_add_path(_hooks) -_add_path(_lib) - from charmhelpers.core.hookenv import ( log, diff --git a/ceph-mon/actions/pool_get.py b/ceph-mon/actions/pool_get.py index 5073d8c3..b139d0dc 100755 --- a/ceph-mon/actions/pool_get.py +++ b/ceph-mon/actions/pool_get.py @@ -14,11 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys from subprocess import check_output, CalledProcessError -sys.path.append('hooks') - from charmhelpers.core.hookenv import log, action_set, action_get, action_fail if __name__ == '__main__': diff --git a/ceph-mon/actions/pool_set.py b/ceph-mon/actions/pool_set.py index 39ee9345..fafa6898 100755 --- a/ceph-mon/actions/pool_set.py +++ b/ceph-mon/actions/pool_set.py @@ -15,10 +15,6 @@ # limitations under the License. from subprocess import CalledProcessError -import sys - -sys.path.append('lib') -sys.path.append('hooks') from charmhelpers.core.hookenv import action_get, log, action_fail from charms_ceph.broker import handle_set_pool_value diff --git a/ceph-mon/actions/pool_statistics.py b/ceph-mon/actions/pool_statistics.py index 30635fb3..e6e8e796 100755 --- a/ceph-mon/actions/pool_statistics.py +++ b/ceph-mon/actions/pool_statistics.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from subprocess import check_output, CalledProcessError from charmhelpers.core.hookenv import log, action_set, action_fail diff --git a/ceph-mon/actions/purge_osd.py b/ceph-mon/actions/purge_osd.py index 29328075..e884186f 100755 --- a/ceph-mon/actions/purge_osd.py +++ b/ceph-mon/actions/purge_osd.py @@ -26,16 +26,12 @@ CalledProcessError, ) -import sys -sys.path.append('lib') -sys.path.append('hooks') - - from charmhelpers.core.hookenv import ( function_get, log, function_fail ) + from charmhelpers.core.host import cmp_pkgrevno from charmhelpers.contrib.storage.linux import ceph from charms_ceph.utils import get_osd_weight diff --git a/ceph-mon/actions/remove_cache_tier.py b/ceph-mon/actions/remove_cache_tier.py index e0c3444f..18c816c5 100755 --- a/ceph-mon/actions/remove_cache_tier.py +++ b/ceph-mon/actions/remove_cache_tier.py @@ -15,9 +15,6 @@ # limitations under the License. from subprocess import CalledProcessError -import sys - -sys.path.append('hooks') from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists from charmhelpers.core.hookenv import action_get, log, action_fail diff --git a/ceph-mon/actions/remove_pool_snapshot.py b/ceph-mon/actions/remove_pool_snapshot.py index b451b99e..065f6f67 100755 --- a/ceph-mon/actions/remove_pool_snapshot.py +++ b/ceph-mon/actions/remove_pool_snapshot.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from subprocess import CalledProcessError from charmhelpers.core.hookenv import action_get, log, action_fail from charmhelpers.contrib.storage.linux.ceph import remove_pool_snapshot diff --git a/ceph-mon/actions/rename_pool.py b/ceph-mon/actions/rename_pool.py index ba7f7ac2..7a759d15 100755 --- a/ceph-mon/actions/rename_pool.py +++ b/ceph-mon/actions/rename_pool.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from subprocess import CalledProcessError from charmhelpers.core.hookenv import action_get, log, action_fail from charmhelpers.contrib.storage.linux.ceph import rename_pool diff --git a/ceph-mon/actions/security_checklist.py b/ceph-mon/actions/security_checklist.py index 23b1caf1..8bc1b27b 100755 --- a/ceph-mon/actions/security_checklist.py +++ b/ceph-mon/actions/security_checklist.py @@ -16,8 +16,6 @@ import sys -sys.path.append('hooks') - import charmhelpers.contrib.openstack.audits as audits from charmhelpers.contrib.openstack.audits import ( openstack_security_guide, diff --git a/ceph-mon/actions/set_noout.py b/ceph-mon/actions/set_noout.py index 145c6988..47ebad80 100755 --- a/ceph-mon/actions/set_noout.py +++ b/ceph-mon/actions/set_noout.py @@ -14,10 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys -sys.path.append('hooks') from charmhelpers.core.hookenv import action_set, action_fail -sys.path.append('lib') from charms_ceph.utils import osd_noout if __name__ == '__main__': diff --git a/ceph-mon/actions/set_pool_max_bytes.py b/ceph-mon/actions/set_pool_max_bytes.py index d5893c73..7ffc662a 100755 --- a/ceph-mon/actions/set_pool_max_bytes.py +++ b/ceph-mon/actions/set_pool_max_bytes.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from subprocess import CalledProcessError from charmhelpers.core.hookenv import action_get, log, action_fail from charmhelpers.contrib.storage.linux.ceph import set_pool_quota diff --git a/ceph-mon/actions/show_disk_free.py b/ceph-mon/actions/show_disk_free.py index 1f38f094..1b372782 100755 --- a/ceph-mon/actions/show_disk_free.py +++ b/ceph-mon/actions/show_disk_free.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from subprocess import check_output, CalledProcessError from charmhelpers.core.hookenv import log, action_get, action_set, action_fail diff --git a/ceph-mon/actions/snapshot_pool.py b/ceph-mon/actions/snapshot_pool.py index a147b755..251d3fe1 100755 --- a/ceph-mon/actions/snapshot_pool.py +++ b/ceph-mon/actions/snapshot_pool.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from subprocess import CalledProcessError from charmhelpers.core.hookenv import action_get, log, action_fail from charmhelpers.contrib.storage.linux.ceph import snapshot_pool diff --git a/ceph-mon/actions/unset_noout.py b/ceph-mon/actions/unset_noout.py index 36be4a69..30035cc9 100755 --- a/ceph-mon/actions/unset_noout.py +++ b/ceph-mon/actions/unset_noout.py @@ -14,10 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys -sys.path.append('hooks') from charmhelpers.core.hookenv import action_set, action_fail -sys.path.append('lib') from charms_ceph.utils import osd_noout if __name__ == '__main__': diff --git a/ceph-mon/charm-helpers-hooks.yaml b/ceph-mon/charm-helpers-hooks.yaml deleted file mode 100644 index df1e68a5..00000000 --- a/ceph-mon/charm-helpers-hooks.yaml +++ /dev/null @@ -1,21 +0,0 @@ -repo: https://github.com/juju/charm-helpers -destination: hooks/charmhelpers -include: - - core - - osplatform - - cli - - fetch - - contrib.storage.linux - - payload.execd - - contrib.openstack - - contrib.network.ip - - contrib.hahelpers - - contrib.openstack: - - alternatives - - audits - - exceptions - - utils - - contrib.charmsupport - - contrib.hardening|inc=* - - fetch.python - - contrib.openstack.policyd diff --git a/ceph-mon/charmcraft.yaml b/ceph-mon/charmcraft.yaml index b3a85236..97af1335 100644 --- a/ceph-mon/charmcraft.yaml +++ b/ceph-mon/charmcraft.yaml @@ -2,23 +2,25 @@ type: charm parts: charm: - plugin: dump - source: . prime: - actions/* - - files/* - - hooks/* - lib/* - templates/* - - actions.yaml - - config.yaml - - copyright - - hardening.yaml - - icon.svg - - LICENSE - - Makefile - - metadata.yaml - - README.md + after: + - update-certificates + charm-python-packages: + # Use the updated version of setuptools (needed by jinja2). + - setuptools + build-packages: + - git + + update-certificates: + # Ensure that certificates in the base image are up-to-date. + plugin: nil + override-build: | + apt update + apt install -y ca-certificates + update-ca-certificates bases: - build-on: diff --git a/ceph-mon/hooks/__init__.py b/ceph-mon/hooks/__init__.py deleted file mode 100644 index 9b088de8..00000000 --- a/ceph-mon/hooks/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/hooks/admin-relation-changed b/ceph-mon/hooks/admin-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/admin-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/admin-relation-joined b/ceph-mon/hooks/admin-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/admin-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/bootstrap-source-relation-changed b/ceph-mon/hooks/bootstrap-source-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/bootstrap-source-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/bootstrap-source-relation-departed b/ceph-mon/hooks/bootstrap-source-relation-departed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/bootstrap-source-relation-departed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/charmhelpers/__init__.py b/ceph-mon/hooks/charmhelpers/__init__.py deleted file mode 100644 index ddf30450..00000000 --- a/ceph-mon/hooks/charmhelpers/__init__.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Bootstrap charm-helpers, installing its dependencies if necessary using -# only standard libraries. -import functools -import inspect -import subprocess - - -try: - import yaml # NOQA:F401 -except ImportError: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # NOQA:F401 - - -# Holds a list of mapping of mangled function names that have been deprecated -# using the @deprecate decorator below. This is so that the warning is only -# printed once for each usage of the function. -__deprecated_functions = {} - - -def deprecate(warning, date=None, log=None): - """Add a deprecation warning the first time the function is used. - - The date which is a string in semi-ISO8660 format indicates the year-month - that the function is officially going to be removed. - - usage: - - @deprecate('use core/fetch/add_source() instead', '2017-04') - def contributed_add_source_thing(...): - ... - - And it then prints to the log ONCE that the function is deprecated. - The reason for passing the logging function (log) is so that hookenv.log - can be used for a charm if needed. - - :param warning: String to indicate what is to be used instead. - :param date: Optional string in YYYY-MM format to indicate when the - function will definitely (probably) be removed. - :param log: The log function to call in order to log. If None, logs to - stdout - """ - def wrap(f): - - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - try: - module = inspect.getmodule(f) - file = inspect.getsourcefile(f) - lines = inspect.getsourcelines(f) - f_name = "{}-{}-{}..{}-{}".format( - module.__name__, file, lines[0], lines[-1], f.__name__) - except (IOError, TypeError): - # assume it was local, so just use the name of the function - f_name = f.__name__ - if f_name not in __deprecated_functions: - __deprecated_functions[f_name] = True - s = "DEPRECATION WARNING: Function {} is being removed".format( - f.__name__) - if date: - s = "{} on/around {}".format(s, date) - if warning: - s = "{} : {}".format(s, warning) - if log: - log(s) - else: - print(s) - return f(*args, **kwargs) - return wrapped_f - return wrap diff --git a/ceph-mon/hooks/charmhelpers/cli/__init__.py b/ceph-mon/hooks/charmhelpers/cli/__init__.py deleted file mode 100644 index 2b0c4b7a..00000000 --- a/ceph-mon/hooks/charmhelpers/cli/__init__.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -import argparse -import sys - -import charmhelpers.core.unitdata - - -class OutputFormatter(object): - def __init__(self, outfile=sys.stdout): - self.formats = ( - "raw", - "json", - "py", - "yaml", - "csv", - "tab", - ) - self.outfile = outfile - - def add_arguments(self, argument_parser): - formatgroup = argument_parser.add_mutually_exclusive_group() - choices = self.supported_formats - formatgroup.add_argument("--format", metavar='FMT', - help="Select output format for returned data, " - "where FMT is one of: {}".format(choices), - choices=choices, default='raw') - for fmt in self.formats: - fmtfunc = getattr(self, fmt) - formatgroup.add_argument("-{}".format(fmt[0]), - "--{}".format(fmt), action='store_const', - const=fmt, dest='format', - help=fmtfunc.__doc__) - - @property - def supported_formats(self): - return self.formats - - def raw(self, output): - """Output data as raw string (default)""" - if isinstance(output, (list, tuple)): - output = '\n'.join(map(str, output)) - self.outfile.write(str(output)) - - def py(self, output): - """Output data as a nicely-formatted python data structure""" - import pprint - pprint.pprint(output, stream=self.outfile) - - def json(self, output): - """Output data in JSON format""" - import json - json.dump(output, self.outfile) - - def yaml(self, output): - """Output data in YAML format""" - import yaml - yaml.safe_dump(output, self.outfile) - - def csv(self, output): - """Output data as excel-compatible CSV""" - import csv - csvwriter = csv.writer(self.outfile) - csvwriter.writerows(output) - - def tab(self, output): - """Output data in excel-compatible tab-delimited format""" - import csv - csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab) - csvwriter.writerows(output) - - def format_output(self, output, fmt='raw'): - fmtfunc = getattr(self, fmt) - fmtfunc(output) - - -class CommandLine(object): - argument_parser = None - subparsers = None - formatter = None - exit_code = 0 - - def __init__(self): - if not self.argument_parser: - self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks') - if not self.formatter: - self.formatter = OutputFormatter() - self.formatter.add_arguments(self.argument_parser) - if not self.subparsers: - self.subparsers = self.argument_parser.add_subparsers(help='Commands') - - def subcommand(self, command_name=None): - """ - Decorate a function as a subcommand. Use its arguments as the - command-line arguments""" - def wrapper(decorated): - cmd_name = command_name or decorated.__name__ - subparser = self.subparsers.add_parser(cmd_name, - description=decorated.__doc__) - for args, kwargs in describe_arguments(decorated): - subparser.add_argument(*args, **kwargs) - subparser.set_defaults(func=decorated) - return decorated - return wrapper - - def test_command(self, decorated): - """ - Subcommand is a boolean test function, so bool return values should be - converted to a 0/1 exit code. - """ - decorated._cli_test_command = True - return decorated - - def no_output(self, decorated): - """ - Subcommand is not expected to return a value, so don't print a spurious None. - """ - decorated._cli_no_output = True - return decorated - - def subcommand_builder(self, command_name, description=None): - """ - Decorate a function that builds a subcommand. Builders should accept a - single argument (the subparser instance) and return the function to be - run as the command.""" - def wrapper(decorated): - subparser = self.subparsers.add_parser(command_name) - func = decorated(subparser) - subparser.set_defaults(func=func) - subparser.description = description or func.__doc__ - return wrapper - - def run(self): - "Run cli, processing arguments and executing subcommands." - arguments = self.argument_parser.parse_args() - argspec = inspect.getfullargspec(arguments.func) - vargs = [] - for arg in argspec.args: - vargs.append(getattr(arguments, arg)) - if argspec.varargs: - vargs.extend(getattr(arguments, argspec.varargs)) - output = arguments.func(*vargs) - if getattr(arguments.func, '_cli_test_command', False): - self.exit_code = 0 if output else 1 - output = '' - if getattr(arguments.func, '_cli_no_output', False): - output = '' - self.formatter.format_output(output, arguments.format) - if charmhelpers.core.unitdata._KV: - charmhelpers.core.unitdata._KV.flush() - - -cmdline = CommandLine() - - -def describe_arguments(func): - """ - Analyze a function's signature and return a data structure suitable for - passing in as arguments to an argparse parser's add_argument() method.""" - - argspec = inspect.getfullargspec(func) - # we should probably raise an exception somewhere if func includes **kwargs - if argspec.defaults: - positional_args = argspec.args[:-len(argspec.defaults)] - keyword_names = argspec.args[-len(argspec.defaults):] - for arg, default in zip(keyword_names, argspec.defaults): - yield ('--{}'.format(arg),), {'default': default} - else: - positional_args = argspec.args - - for arg in positional_args: - yield (arg,), {} - if argspec.varargs: - yield (argspec.varargs,), {'nargs': '*'} diff --git a/ceph-mon/hooks/charmhelpers/cli/benchmark.py b/ceph-mon/hooks/charmhelpers/cli/benchmark.py deleted file mode 100644 index 303af14b..00000000 --- a/ceph-mon/hooks/charmhelpers/cli/benchmark.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import cmdline -from charmhelpers.contrib.benchmark import Benchmark - - -@cmdline.subcommand(command_name='benchmark-start') -def start(): - Benchmark.start() - - -@cmdline.subcommand(command_name='benchmark-finish') -def finish(): - Benchmark.finish() - - -@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score") -def service(subparser): - subparser.add_argument("value", help="The composite score.") - subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.") - subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.") - return Benchmark.set_composite_score diff --git a/ceph-mon/hooks/charmhelpers/cli/commands.py b/ceph-mon/hooks/charmhelpers/cli/commands.py deleted file mode 100644 index b9310565..00000000 --- a/ceph-mon/hooks/charmhelpers/cli/commands.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This module loads sub-modules into the python runtime so they can be -discovered via the inspect module. In order to prevent flake8 from (rightfully) -telling us these are unused modules, throw a ' # noqa' at the end of each import -so that the warning is suppressed. -""" - -from . import CommandLine # noqa - -""" -Import the sub-modules which have decorated subcommands to register with chlp. -""" -from . import host # noqa -from . import benchmark # noqa -from . import unitdata # noqa -from . import hookenv # noqa diff --git a/ceph-mon/hooks/charmhelpers/cli/hookenv.py b/ceph-mon/hooks/charmhelpers/cli/hookenv.py deleted file mode 100644 index bd72f448..00000000 --- a/ceph-mon/hooks/charmhelpers/cli/hookenv.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import cmdline -from charmhelpers.core import hookenv - - -cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped) -cmdline.subcommand('service-name')(hookenv.service_name) -cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped) diff --git a/ceph-mon/hooks/charmhelpers/cli/host.py b/ceph-mon/hooks/charmhelpers/cli/host.py deleted file mode 100644 index 40396849..00000000 --- a/ceph-mon/hooks/charmhelpers/cli/host.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import cmdline -from charmhelpers.core import host - - -@cmdline.subcommand() -def mounts(): - "List mounts" - return host.mounts() - - -@cmdline.subcommand_builder('service', description="Control system services") -def service(subparser): - subparser.add_argument("action", help="The action to perform (start, stop, etc...)") - subparser.add_argument("service_name", help="Name of the service to control") - return host.service diff --git a/ceph-mon/hooks/charmhelpers/cli/unitdata.py b/ceph-mon/hooks/charmhelpers/cli/unitdata.py deleted file mode 100644 index acce846f..00000000 --- a/ceph-mon/hooks/charmhelpers/cli/unitdata.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import cmdline -from charmhelpers.core import unitdata - - -@cmdline.subcommand_builder('unitdata', description="Store and retrieve data") -def unitdata_cmd(subparser): - nested = subparser.add_subparsers() - - get_cmd = nested.add_parser('get', help='Retrieve data') - get_cmd.add_argument('key', help='Key to retrieve the value of') - get_cmd.set_defaults(action='get', value=None) - - getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data') - getrange_cmd.add_argument('key', metavar='prefix', - help='Prefix of the keys to retrieve') - getrange_cmd.set_defaults(action='getrange', value=None) - - set_cmd = nested.add_parser('set', help='Store data') - set_cmd.add_argument('key', help='Key to set') - set_cmd.add_argument('value', help='Value to store') - set_cmd.set_defaults(action='set') - - def _unitdata_cmd(action, key, value): - if action == 'get': - return unitdata.kv().get(key) - elif action == 'getrange': - return unitdata.kv().getrange(key) - elif action == 'set': - unitdata.kv().set(key, value) - unitdata.kv().flush() - return '' - return _unitdata_cmd diff --git a/ceph-mon/hooks/charmhelpers/contrib/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py deleted file mode 100644 index bad7a533..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ /dev/null @@ -1,560 +0,0 @@ -# Copyright 2012-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Compatibility with the nrpe-external-master charm""" -# -# Authors: -# Matthew Wedgwood - -import glob -import grp -import os -import pwd -import re -import shlex -import shutil -import subprocess -import yaml - -from charmhelpers.core.hookenv import ( - application_name, - config, - hook_name, - local_unit, - log, - relation_get, - relation_ids, - relation_set, - relations_of_type, -) - -from charmhelpers.core.host import service -from charmhelpers.core import host - -# This module adds compatibility with the nrpe-external-master and plain nrpe -# subordinate charms. To use it in your charm: -# -# 1. Update metadata.yaml -# -# provides: -# (...) -# nrpe-external-master: -# interface: nrpe-external-master -# scope: container -# -# and/or -# -# provides: -# (...) -# local-monitors: -# interface: local-monitors -# scope: container - -# -# 2. Add the following to config.yaml -# -# nagios_context: -# default: "juju" -# type: string -# description: | -# Used by the nrpe subordinate charms. -# A string that will be prepended to instance name to set the host name -# in nagios. So for instance the hostname would be something like: -# juju-myservice-0 -# If you're running multiple environments with the same services in them -# this allows you to differentiate between them. -# nagios_servicegroups: -# default: "" -# type: string -# description: | -# A comma-separated list of nagios servicegroups. -# If left empty, the nagios_context will be used as the servicegroup -# -# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master -# -# 4. Update your hooks.py with something like this: -# -# from charmsupport.nrpe import NRPE -# (...) -# def update_nrpe_config(): -# nrpe_compat = NRPE() -# nrpe_compat.add_check( -# shortname = "myservice", -# description = "Check MyService", -# check_cmd = "check_http -w 2 -c 10 http://localhost" -# ) -# nrpe_compat.add_check( -# "myservice_other", -# "Check for widget failures", -# check_cmd = "/srv/myapp/scripts/widget_check" -# ) -# nrpe_compat.write() -# -# def config_changed(): -# (...) -# update_nrpe_config() -# -# def nrpe_external_master_relation_changed(): -# update_nrpe_config() -# -# def local_monitors_relation_changed(): -# update_nrpe_config() -# -# 4.a If your charm is a subordinate charm set primary=False -# -# from charmsupport.nrpe import NRPE -# (...) -# def update_nrpe_config(): -# nrpe_compat = NRPE(primary=False) -# -# 5. ln -s hooks.py nrpe-external-master-relation-changed -# ln -s hooks.py local-monitors-relation-changed - - -class CheckException(Exception): - pass - - -class Check(object): - shortname_re = '[A-Za-z0-9-_.@]+$' - service_template = (""" -#--------------------------------------------------- -# This file is Juju managed -#--------------------------------------------------- -define service {{ - use active-service - host_name {nagios_hostname} - service_description {nagios_hostname}[{shortname}] """ - """{description} - check_command check_nrpe!{command} - servicegroups {nagios_servicegroup} -{service_config_overrides} -}} -""") - - def __init__(self, shortname, description, check_cmd, max_check_attempts=None): - super(Check, self).__init__() - # XXX: could be better to calculate this from the service name - if not re.match(self.shortname_re, shortname): - raise CheckException("shortname must match {}".format( - Check.shortname_re)) - self.shortname = shortname - self.command = "check_{}".format(shortname) - # Note: a set of invalid characters is defined by the - # Nagios server config - # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= - self.description = description - self.check_cmd = self._locate_cmd(check_cmd) - self.max_check_attempts = max_check_attempts - - def _get_check_filename(self): - return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) - - def _get_service_filename(self, hostname): - return os.path.join(NRPE.nagios_exportdir, - 'service__{}_{}.cfg'.format(hostname, self.command)) - - def _locate_cmd(self, check_cmd): - search_path = ( - '/usr/lib/nagios/plugins', - '/usr/local/lib/nagios/plugins', - ) - parts = shlex.split(check_cmd) - for path in search_path: - if os.path.exists(os.path.join(path, parts[0])): - command = os.path.join(path, parts[0]) - if len(parts) > 1: - safe_args = [shlex.quote(arg) for arg in parts[1:]] - command += " " + " ".join(safe_args) - return command - log('Check command not found: {}'.format(parts[0])) - return '' - - def _remove_service_files(self): - if not os.path.exists(NRPE.nagios_exportdir): - return - for f in os.listdir(NRPE.nagios_exportdir): - if f.endswith('_{}.cfg'.format(self.command)): - os.remove(os.path.join(NRPE.nagios_exportdir, f)) - - def remove(self, hostname): - nrpe_check_file = self._get_check_filename() - if os.path.exists(nrpe_check_file): - os.remove(nrpe_check_file) - self._remove_service_files() - - def write(self, nagios_context, hostname, nagios_servicegroups): - nrpe_check_file = self._get_check_filename() - with open(nrpe_check_file, 'w') as nrpe_check_config: - nrpe_check_config.write("# check {}\n".format(self.shortname)) - if nagios_servicegroups: - nrpe_check_config.write( - "# The following header was added automatically by juju\n") - nrpe_check_config.write( - "# Modifying it will affect nagios monitoring and alerting\n") - nrpe_check_config.write( - "# servicegroups: {}\n".format(nagios_servicegroups)) - nrpe_check_config.write("command[{}]={}\n".format( - self.command, self.check_cmd)) - - if not os.path.exists(NRPE.nagios_exportdir): - log('Not writing service config as {} is not accessible'.format( - NRPE.nagios_exportdir)) - else: - self.write_service_config(nagios_context, hostname, - nagios_servicegroups) - - def write_service_config(self, nagios_context, hostname, - nagios_servicegroups): - self._remove_service_files() - - if self.max_check_attempts: - service_config_overrides = ' max_check_attempts {}'.format( - self.max_check_attempts - ) # Note indentation is here rather than in the template to avoid trailing spaces - else: - service_config_overrides = '' # empty string to avoid printing 'None' - templ_vars = { - 'nagios_hostname': hostname, - 'nagios_servicegroup': nagios_servicegroups, - 'description': self.description, - 'shortname': self.shortname, - 'command': self.command, - 'service_config_overrides': service_config_overrides, - } - nrpe_service_text = Check.service_template.format(**templ_vars) - nrpe_service_file = self._get_service_filename(hostname) - with open(nrpe_service_file, 'w') as nrpe_service_config: - nrpe_service_config.write(str(nrpe_service_text)) - - def run(self): - subprocess.call(self.check_cmd) - - -class NRPE(object): - nagios_logdir = '/var/log/nagios' - nagios_exportdir = '/var/lib/nagios/export' - nrpe_confdir = '/etc/nagios/nrpe.d' - homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server - - def __init__(self, hostname=None, primary=True): - super(NRPE, self).__init__() - self.config = config() - self.primary = primary - self.nagios_context = self.config['nagios_context'] - if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: - self.nagios_servicegroups = self.config['nagios_servicegroups'] - else: - self.nagios_servicegroups = self.nagios_context - self.unit_name = local_unit().replace('/', '-') - if hostname: - self.hostname = hostname - else: - nagios_hostname = get_nagios_hostname() - if nagios_hostname: - self.hostname = nagios_hostname - else: - self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) - self.checks = [] - # Iff in an nrpe-external-master relation hook, set primary status - relation = relation_ids('nrpe-external-master') - if relation: - log("Setting charm primary status {}".format(primary)) - for rid in relation: - relation_set(relation_id=rid, relation_settings={'primary': self.primary}) - self.remove_check_queue = set() - - @classmethod - def does_nrpe_conf_dir_exist(cls): - """Return True if th nrpe_confdif directory exists.""" - return os.path.isdir(cls.nrpe_confdir) - - def add_check(self, *args, **kwargs): - shortname = None - if kwargs.get('shortname') is None: - if len(args) > 0: - shortname = args[0] - else: - shortname = kwargs['shortname'] - - self.checks.append(Check(*args, **kwargs)) - try: - self.remove_check_queue.remove(shortname) - except KeyError: - pass - - def remove_check(self, *args, **kwargs): - if kwargs.get('shortname') is None: - raise ValueError('shortname of check must be specified') - - # Use sensible defaults if they're not specified - these are not - # actually used during removal, but they're required for constructing - # the Check object; check_disk is chosen because it's part of the - # nagios-plugins-basic package. - if kwargs.get('check_cmd') is None: - kwargs['check_cmd'] = 'check_disk' - if kwargs.get('description') is None: - kwargs['description'] = '' - - check = Check(*args, **kwargs) - check.remove(self.hostname) - self.remove_check_queue.add(kwargs['shortname']) - - def write(self): - try: - nagios_uid = pwd.getpwnam('nagios').pw_uid - nagios_gid = grp.getgrnam('nagios').gr_gid - except Exception: - log("Nagios user not set up, nrpe checks not updated") - return - - if not os.path.exists(NRPE.nagios_logdir): - os.mkdir(NRPE.nagios_logdir) - os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) - - nrpe_monitors = {} - monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} - - # check that the charm can write to the conf dir. If not, then nagios - # probably isn't installed, and we can defer. - if not self.does_nrpe_conf_dir_exist(): - return - - for nrpecheck in self.checks: - nrpecheck.write(self.nagios_context, self.hostname, - self.nagios_servicegroups) - nrpe_monitors[nrpecheck.shortname] = { - "command": nrpecheck.command, - } - # If we were passed max_check_attempts, add that to the relation data - if nrpecheck.max_check_attempts is not None: - nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts - - # update-status hooks are configured to firing every 5 minutes by - # default. When nagios-nrpe-server is restarted, the nagios server - # reports checks failing causing unnecessary alerts. Let's not restart - # on update-status hooks. - if not hook_name() == 'update-status': - service('restart', 'nagios-nrpe-server') - - monitor_ids = relation_ids("local-monitors") + \ - relation_ids("nrpe-external-master") - for rid in monitor_ids: - reldata = relation_get(unit=local_unit(), rid=rid) - if 'monitors' in reldata: - # update the existing set of monitors with the new data - old_monitors = yaml.safe_load(reldata['monitors']) - old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe'] - # remove keys that are in the remove_check_queue - old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items() - if k not in self.remove_check_queue} - # update/add nrpe_monitors - old_nrpe_monitors.update(nrpe_monitors) - old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors - # write back to the relation - relation_set(relation_id=rid, monitors=yaml.dump(old_monitors)) - else: - # write a brand new set of monitors, as no existing ones. - relation_set(relation_id=rid, monitors=yaml.dump(monitors)) - - self.remove_check_queue.clear() - - -def get_nagios_hostcontext(relation_name='nrpe-external-master'): - """ - Query relation with nrpe subordinate, return the nagios_host_context - - :param str relation_name: Name of relation nrpe sub joined to - """ - for rel in relations_of_type(relation_name): - if 'nagios_host_context' in rel: - return rel['nagios_host_context'] - - -def get_nagios_hostname(relation_name='nrpe-external-master'): - """ - Query relation with nrpe subordinate, return the nagios_hostname - - :param str relation_name: Name of relation nrpe sub joined to - """ - for rel in relations_of_type(relation_name): - if 'nagios_hostname' in rel: - return rel['nagios_hostname'] - - -def get_nagios_unit_name(relation_name='nrpe-external-master'): - """ - Return the nagios unit name prepended with host_context if needed - - :param str relation_name: Name of relation nrpe sub joined to - """ - host_context = get_nagios_hostcontext(relation_name) - if host_context: - unit = "%s:%s" % (host_context, local_unit()) - else: - unit = local_unit() - return unit - - -def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): - """ - Add checks for each service in list - - :param NRPE nrpe: NRPE object to add check to - :param list services: List of services to check - :param str unit_name: Unit name to use in check description - :param bool immediate_check: For sysv init, run the service check immediately - """ - for svc in services: - # Don't add a check for these services from neutron-gateway - if svc in ['ext-port', 'os-charm-phy-nic-mtu']: - next - - upstart_init = '/etc/init/%s.conf' % svc - sysv_init = '/etc/init.d/%s' % svc - - if host.init_is_systemd(service_name=svc): - nrpe.add_check( - shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_systemd.py %s' % svc - ) - elif os.path.exists(upstart_init): - nrpe.add_check( - shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_upstart_job %s' % svc - ) - elif os.path.exists(sysv_init): - cronpath = '/etc/cron.d/nagios-service-check-%s' % svc - checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) - croncmd = ( - '/usr/local/lib/nagios/plugins/check_exit_status.pl ' - '-e -s /etc/init.d/%s status' % svc - ) - cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) - f = open(cronpath, 'w') - f.write(cron_file) - f.close() - nrpe.add_check( - shortname=svc, - description='service check {%s}' % unit_name, - check_cmd='check_status_file.py -f %s' % checkpath, - ) - # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail - # (LP: #1670223). - if immediate_check and os.path.isdir(nrpe.homedir): - f = open(checkpath, 'w') - subprocess.call( - croncmd.split(), - stdout=f, - stderr=subprocess.STDOUT - ) - f.close() - os.chmod(checkpath, 0o644) - - -def copy_nrpe_checks(nrpe_files_dir=None): - """ - Copy the nrpe checks into place - - """ - NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - if nrpe_files_dir is None: - # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks - for segment in ['.', 'hooks']: - nrpe_files_dir = os.path.abspath(os.path.join( - os.getenv('CHARM_DIR'), - segment, - 'charmhelpers', - 'contrib', - 'openstack', - 'files')) - if os.path.isdir(nrpe_files_dir): - break - else: - raise RuntimeError("Couldn't find charmhelpers directory") - if not os.path.exists(NAGIOS_PLUGINS): - os.makedirs(NAGIOS_PLUGINS) - for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): - if os.path.isfile(fname): - shutil.copy2(fname, - os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) - - -def add_haproxy_checks(nrpe, unit_name): - """ - Add checks for each service in list - - :param NRPE nrpe: NRPE object to add check to - :param str unit_name: Unit name to use in check description - """ - nrpe.add_check( - shortname='haproxy_servers', - description='Check HAProxy {%s}' % unit_name, - check_cmd='check_haproxy.sh') - nrpe.add_check( - shortname='haproxy_queue', - description='Check HAProxy queue depth {%s}' % unit_name, - check_cmd='check_haproxy_queue_depth.sh') - - -def remove_deprecated_check(nrpe, deprecated_services): - """ - Remove checks for deprecated services in list - - :param nrpe: NRPE object to remove check from - :type nrpe: NRPE - :param deprecated_services: List of deprecated services that are removed - :type deprecated_services: list - """ - for dep_svc in deprecated_services: - log('Deprecated service: {}'.format(dep_svc)) - nrpe.remove_check(shortname=dep_svc) - - -def add_deferred_restarts_check(nrpe): - """ - Add NRPE check for services with deferred restarts. - - :param NRPE nrpe: NRPE object to add check to - """ - unit_name = local_unit().replace('/', '-') - shortname = unit_name + '_deferred_restarts' - check_cmd = 'check_deferred_restarts.py --application {}'.format( - application_name()) - - log('Adding deferred restarts nrpe check: {}'.format(shortname)) - nrpe.add_check( - shortname=shortname, - description='Check deferred service restarts {}'.format(unit_name), - check_cmd=check_cmd) - - -def remove_deferred_restarts_check(nrpe): - """ - Remove NRPE check for services with deferred service restarts. - - :param NRPE nrpe: NRPE object to remove check from - """ - unit_name = local_unit().replace('/', '-') - shortname = unit_name + '_deferred_restarts' - check_cmd = 'check_deferred_restarts.py --application {}'.format( - application_name()) - - log('Removing deferred restarts nrpe check: {}'.format(shortname)) - nrpe.remove_check( - shortname=shortname, - description='Check deferred service restarts {}'.format(unit_name), - check_cmd=check_cmd) diff --git a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py b/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py deleted file mode 100644 index f7c6fbdc..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -''' -Functions for managing volumes in juju units. One volume is supported per unit. -Subordinates may have their own storage, provided it is on its own partition. - -Configuration stanzas:: - - volume-ephemeral: - type: boolean - default: true - description: > - If false, a volume is mounted as specified in "volume-map" - If true, ephemeral storage will be used, meaning that log data - will only exist as long as the machine. YOU HAVE BEEN WARNED. - volume-map: - type: string - default: {} - description: > - YAML map of units to device names, e.g: - "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" - Service units will raise a configure-error if volume-ephemeral - is 'true' and no volume-map value is set. Use 'juju set' to set a - value and 'juju resolved' to complete configuration. - -Usage:: - - from charmsupport.volumes import configure_volume, VolumeConfigurationError - from charmsupport.hookenv import log, ERROR - def post_mount_hook(): - stop_service('myservice') - def post_mount_hook(): - start_service('myservice') - - if __name__ == '__main__': - try: - configure_volume(before_change=pre_mount_hook, - after_change=post_mount_hook) - except VolumeConfigurationError: - log('Storage could not be configured', ERROR) - -''' - -# XXX: Known limitations -# - fstab is neither consulted nor updated - -import os -from charmhelpers.core import hookenv -from charmhelpers.core import host -import yaml - - -MOUNT_BASE = '/srv/juju/volumes' - - -class VolumeConfigurationError(Exception): - '''Volume configuration data is missing or invalid''' - pass - - -def get_config(): - '''Gather and sanity-check volume configuration data''' - volume_config = {} - config = hookenv.config() - - errors = False - - if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): - volume_config['ephemeral'] = True - else: - volume_config['ephemeral'] = False - - try: - volume_map = yaml.safe_load(config.get('volume-map', '{}')) - except yaml.YAMLError as e: - hookenv.log("Error parsing YAML volume-map: {}".format(e), - hookenv.ERROR) - errors = True - if volume_map is None: - # probably an empty string - volume_map = {} - elif not isinstance(volume_map, dict): - hookenv.log("Volume-map should be a dictionary, not {}".format( - type(volume_map))) - errors = True - - volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) - if volume_config['device'] and volume_config['ephemeral']: - # asked for ephemeral storage but also defined a volume ID - hookenv.log('A volume is defined for this unit, but ephemeral ' - 'storage was requested', hookenv.ERROR) - errors = True - elif not volume_config['device'] and not volume_config['ephemeral']: - # asked for permanent storage but did not define volume ID - hookenv.log('Ephemeral storage was requested, but there is no volume ' - 'defined for this unit.', hookenv.ERROR) - errors = True - - unit_mount_name = hookenv.local_unit().replace('/', '-') - volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) - - if errors: - return None - return volume_config - - -def mount_volume(config): - if os.path.exists(config['mountpoint']): - if not os.path.isdir(config['mountpoint']): - hookenv.log('Not a directory: {}'.format(config['mountpoint'])) - raise VolumeConfigurationError() - else: - host.mkdir(config['mountpoint']) - if os.path.ismount(config['mountpoint']): - unmount_volume(config) - if not host.mount(config['device'], config['mountpoint'], persist=True): - raise VolumeConfigurationError() - - -def unmount_volume(config): - if os.path.ismount(config['mountpoint']): - if not host.umount(config['mountpoint'], persist=True): - raise VolumeConfigurationError() - - -def managed_mounts(): - '''List of all mounted managed volumes''' - return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) - - -def configure_volume(before_change=lambda: None, after_change=lambda: None): - '''Set up storage (or don't) according to the charm's volume configuration. - Returns the mount point or "ephemeral". before_change and after_change - are optional functions to be called if the volume configuration changes. - ''' - - config = get_config() - if not config: - hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) - raise VolumeConfigurationError() - - if config['ephemeral']: - if os.path.ismount(config['mountpoint']): - before_change() - unmount_volume(config) - after_change() - return 'ephemeral' - else: - # persistent storage - if os.path.ismount(config['mountpoint']): - mounts = dict(managed_mounts()) - if mounts.get(config['mountpoint']) != config['device']: - before_change() - unmount_volume(config) - mount_volume(config) - after_change() - else: - before_change() - mount_volume(config) - after_change() - return config['mountpoint'] diff --git a/ceph-mon/hooks/charmhelpers/contrib/hahelpers/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hahelpers/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hahelpers/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/hahelpers/apache.py b/ceph-mon/hooks/charmhelpers/contrib/hahelpers/apache.py deleted file mode 100644 index a54702bc..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hahelpers/apache.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2012 Canonical Ltd. -# -# This file is sourced from lp:openstack-charm-helpers -# -# Authors: -# James Page -# Adam Gandelman -# - -import os - -from charmhelpers.core import host -from charmhelpers.core.hookenv import ( - config as config_get, - relation_get, - relation_ids, - related_units as relation_list, - log, - INFO, -) - -# This file contains the CA cert from the charms ssl_ca configuration -# option, in future the file name should be updated reflect that. -CONFIG_CA_CERT_FILE = 'keystone_juju_ca_cert' - - -def get_cert(cn=None): - # TODO: deal with multiple https endpoints via charm config - cert = config_get('ssl_cert') - key = config_get('ssl_key') - if not (cert and key): - log("Inspecting identity-service relations for SSL certificate.", - level=INFO) - cert = key = None - if cn: - ssl_cert_attr = 'ssl_cert_{}'.format(cn) - ssl_key_attr = 'ssl_key_{}'.format(cn) - else: - ssl_cert_attr = 'ssl_cert' - ssl_key_attr = 'ssl_key' - for r_id in relation_ids('identity-service'): - for unit in relation_list(r_id): - if not cert: - cert = relation_get(ssl_cert_attr, - rid=r_id, unit=unit) - if not key: - key = relation_get(ssl_key_attr, - rid=r_id, unit=unit) - return (cert, key) - - -def get_ca_cert(): - ca_cert = config_get('ssl_ca') - if ca_cert is None: - log("Inspecting identity-service relations for CA SSL certificate.", - level=INFO) - for r_id in (relation_ids('identity-service') + - relation_ids('identity-credentials')): - for unit in relation_list(r_id): - if ca_cert is None: - ca_cert = relation_get('ca_cert', - rid=r_id, unit=unit) - return ca_cert - - -def retrieve_ca_cert(cert_file): - cert = None - if os.path.isfile(cert_file): - with open(cert_file, 'rb') as crt: - cert = crt.read() - return cert - - -def install_ca_cert(ca_cert): - host.install_ca_cert(ca_cert, CONFIG_CA_CERT_FILE) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py deleted file mode 100644 index 146beba6..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ /dev/null @@ -1,448 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2012 Canonical Ltd. -# -# Authors: -# James Page -# Adam Gandelman -# - -""" -Helpers for clustering and determining "cluster leadership" and other -clustering-related helpers. -""" - -import functools -import subprocess -import os -import time - -from socket import gethostname as get_unit_hostname - -from charmhelpers.core.hookenv import ( - log, - relation_ids, - related_units as relation_list, - relation_get, - config as config_get, - INFO, - DEBUG, - WARNING, - unit_get, - is_leader as juju_is_leader, - status_set, -) -from charmhelpers.core.host import ( - modulo_distribution, -) -from charmhelpers.core.decorators import ( - retry_on_exception, -) -from charmhelpers.core.strutils import ( - bool_from_string, -) - -DC_RESOURCE_NAME = 'DC' - - -class HAIncompleteConfig(Exception): - pass - - -class HAIncorrectConfig(Exception): - pass - - -class CRMResourceNotFound(Exception): - pass - - -class CRMDCNotFound(Exception): - pass - - -def is_elected_leader(resource): - """ - Returns True if the charm executing this is the elected cluster leader. - - It relies on two mechanisms to determine leadership: - 1. If juju is sufficiently new and leadership election is supported, - the is_leader command will be used. - 2. If the charm is part of a corosync cluster, call corosync to - determine leadership. - 3. If the charm is not part of a corosync cluster, the leader is - determined as being "the alive unit with the lowest unit number". In - other words, the oldest surviving unit. - """ - try: - return juju_is_leader() - except NotImplementedError: - log('Juju leadership election feature not enabled' - ', using fallback support', - level=WARNING) - - if is_clustered(): - if not is_crm_leader(resource): - log('Deferring action to CRM leader.', level=INFO) - return False - else: - peers = peer_units() - if peers and not oldest_peer(peers): - log('Deferring action to oldest service unit.', level=INFO) - return False - return True - - -def is_clustered(): - for r_id in (relation_ids('ha') or []): - for unit in (relation_list(r_id) or []): - clustered = relation_get('clustered', - rid=r_id, - unit=unit) - if clustered: - return True - return False - - -def is_crm_dc(): - """ - Determine leadership by querying the pacemaker Designated Controller - """ - cmd = ['crm', 'status'] - try: - status = subprocess.check_output( - cmd, stderr=subprocess.STDOUT).decode('utf-8') - except subprocess.CalledProcessError as ex: - raise CRMDCNotFound(str(ex)) - - current_dc = '' - for line in status.split('\n'): - if line.startswith('Current DC'): - # Current DC: juju-lytrusty-machine-2 (168108163) - # - partition with quorum - current_dc = line.split(':')[1].split()[0] - if current_dc == get_unit_hostname(): - return True - elif current_dc == 'NONE': - raise CRMDCNotFound('Current DC: NONE') - - return False - - -@retry_on_exception(5, base_delay=2, - exc_type=(CRMResourceNotFound, CRMDCNotFound)) -def is_crm_leader(resource, retry=False): - """ - Returns True if the charm calling this is the elected corosync leader, - as returned by calling the external "crm" command. - - We allow this operation to be retried to avoid the possibility of getting a - false negative. See LP #1396246 for more info. - """ - if resource == DC_RESOURCE_NAME: - return is_crm_dc() - cmd = ['crm', 'resource', 'show', resource] - try: - status = subprocess.check_output( - cmd, stderr=subprocess.STDOUT).decode('utf-8') - except subprocess.CalledProcessError: - status = None - - if status and get_unit_hostname() in status: - return True - - if status and "resource %s is NOT running" % (resource) in status: - raise CRMResourceNotFound("CRM resource %s not found" % (resource)) - - return False - - -def is_leader(resource): - log("is_leader is deprecated. Please consider using is_crm_leader " - "instead.", level=WARNING) - return is_crm_leader(resource) - - -def peer_units(peer_relation="cluster"): - peers = [] - for r_id in (relation_ids(peer_relation) or []): - for unit in (relation_list(r_id) or []): - peers.append(unit) - return peers - - -def peer_ips(peer_relation='cluster', addr_key='private-address'): - '''Return a dict of peers and their private-address''' - peers = {} - for r_id in relation_ids(peer_relation): - for unit in relation_list(r_id): - peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) - return peers - - -def oldest_peer(peers): - """Determines who the oldest peer is by comparing unit numbers.""" - local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) - for peer in peers: - remote_unit_no = int(peer.split('/')[1]) - if remote_unit_no < local_unit_no: - return False - return True - - -def eligible_leader(resource): - log("eligible_leader is deprecated. Please consider using " - "is_elected_leader instead.", level=WARNING) - return is_elected_leader(resource) - - -def https(): - ''' - Determines whether enough data has been provided in configuration - or relation data to configure HTTPS - . - returns: boolean - ''' - use_https = config_get('use-https') - if use_https and bool_from_string(use_https): - return True - if config_get('ssl_cert') and config_get('ssl_key'): - return True - for r_id in relation_ids('certificates'): - for unit in relation_list(r_id): - ca = relation_get('ca', rid=r_id, unit=unit) - if ca: - return True - for r_id in relation_ids('identity-service'): - for unit in relation_list(r_id): - # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN - rel_state = [ - relation_get('https_keystone', rid=r_id, unit=unit), - relation_get('ca_cert', rid=r_id, unit=unit), - ] - # NOTE: works around (LP: #1203241) - if (None not in rel_state) and ('' not in rel_state): - return True - return False - - -def determine_api_port(public_port, singlenode_mode=False): - ''' - Determine correct API server listening port based on - existence of HTTPS reverse proxy and/or haproxy. - - public_port: int: standard public port for given service - - singlenode_mode: boolean: Shuffle ports when only a single unit is present - - returns: int: the correct listening port for the API service - ''' - i = 0 - if singlenode_mode: - i += 1 - elif len(peer_units()) > 0 or is_clustered(): - i += 1 - if https(): - i += 1 - return public_port - (i * 10) - - -def determine_apache_port(public_port, singlenode_mode=False): - ''' - Description: Determine correct apache listening port based on public IP + - state of the cluster. - - public_port: int: standard public port for given service - - singlenode_mode: boolean: Shuffle ports when only a single unit is present - - returns: int: the correct listening port for the HAProxy service - ''' - i = 0 - if singlenode_mode: - i += 1 - elif len(peer_units()) > 0 or is_clustered(): - i += 1 - return public_port - (i * 10) - - -determine_apache_port_single = functools.partial( - determine_apache_port, singlenode_mode=True) - - -def get_hacluster_config(exclude_keys=None): - ''' - Obtains all relevant configuration from charm configuration required - for initiating a relation to hacluster: - - ha-bindiface, ha-mcastport, vip, os-internal-hostname, - os-admin-hostname, os-public-hostname, os-access-hostname - - param: exclude_keys: list of setting key(s) to be excluded. - returns: dict: A dict containing settings keyed by setting name. - raises: HAIncompleteConfig if settings are missing or incorrect. - ''' - settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname', - 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname'] - conf = {} - for setting in settings: - if exclude_keys and setting in exclude_keys: - continue - - conf[setting] = config_get(setting) - - if not valid_hacluster_config(): - raise HAIncorrectConfig('Insufficient or incorrect config data to ' - 'configure hacluster.') - return conf - - -def valid_hacluster_config(): - ''' - Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname - must be set. - - Note: ha-bindiface and ha-macastport both have defaults and will always - be set. We only care that either vip or dns-ha is set. - - :returns: boolean: valid config returns true. - raises: HAIncompatibileConfig if settings conflict. - raises: HAIncompleteConfig if settings are missing. - ''' - vip = config_get('vip') - dns = config_get('dns-ha') - if not(bool(vip) ^ bool(dns)): - msg = ('HA: Either vip or dns-ha must be set but not both in order to ' - 'use high availability') - status_set('blocked', msg) - raise HAIncorrectConfig(msg) - - # If dns-ha then one of os-*-hostname must be set - if dns: - dns_settings = ['os-internal-hostname', 'os-admin-hostname', - 'os-public-hostname', 'os-access-hostname'] - # At this point it is unknown if one or all of the possible - # network spaces are in HA. Validate at least one is set which is - # the minimum required. - for setting in dns_settings: - if config_get(setting): - log('DNS HA: At least one hostname is set {}: {}' - ''.format(setting, config_get(setting)), - level=DEBUG) - return True - - msg = ('DNS HA: At least one os-*-hostname(s) must be set to use ' - 'DNS HA') - status_set('blocked', msg) - raise HAIncompleteConfig(msg) - - log('VIP HA: VIP is set {}'.format(vip), level=DEBUG) - return True - - -def canonical_url(configs, vip_setting='vip'): - ''' - Returns the correct HTTP URL to this host given the state of HTTPS - configuration and hacluster. - - :configs : OSTemplateRenderer: A config tempating object to inspect for - a complete https context. - - :vip_setting: str: Setting in charm config that specifies - VIP address. - ''' - scheme = 'http' - if 'https' in configs.complete_contexts(): - scheme = 'https' - if is_clustered(): - addr = config_get(vip_setting) - else: - addr = unit_get('private-address') - return '%s://%s' % (scheme, addr) - - -def distributed_wait(modulo=None, wait=None, operation_name='operation'): - ''' Distribute operations by waiting based on modulo_distribution - - If modulo and or wait are not set, check config_get for those values. - If config values are not set, default to modulo=3 and wait=30. - - :param modulo: int The modulo number creates the group distribution - :param wait: int The constant time wait value - :param operation_name: string Operation name for status message - i.e. 'restart' - :side effect: Calls config_get() - :side effect: Calls log() - :side effect: Calls status_set() - :side effect: Calls time.sleep() - ''' - if modulo is None: - modulo = config_get('modulo-nodes') or 3 - if wait is None: - wait = config_get('known-wait') or 30 - if juju_is_leader(): - # The leader should never wait - calculated_wait = 0 - else: - # non_zero_wait=True guarantees the non-leader who gets modulo 0 - # will still wait - calculated_wait = modulo_distribution(modulo=modulo, wait=wait, - non_zero_wait=True) - msg = "Waiting {} seconds for {} ...".format(calculated_wait, - operation_name) - log(msg, DEBUG) - status_set('maintenance', msg) - time.sleep(calculated_wait) - - -def get_managed_services_and_ports(services, external_ports, - external_services=None, - port_conv_f=determine_apache_port_single): - """Get the services and ports managed by this charm. - - Return only the services and corresponding ports that are managed by this - charm. This excludes haproxy when there is a relation with hacluster. This - is because this charm passes responsibility for stopping and starting - haproxy to hacluster. - - Similarly, if a relation with hacluster exists then the ports returned by - this method correspond to those managed by the apache server rather than - haproxy. - - :param services: List of services. - :type services: List[str] - :param external_ports: List of ports managed by external services. - :type external_ports: List[int] - :param external_services: List of services to be removed if ha relation is - present. - :type external_services: List[str] - :param port_conv_f: Function to apply to ports to calculate the ports - managed by services controlled by this charm. - :type port_convert_func: f() - :returns: A tuple containing a list of services first followed by a list of - ports. - :rtype: Tuple[List[str], List[int]] - """ - if external_services is None: - external_services = ['haproxy'] - if relation_ids('ha'): - for svc in external_services: - try: - services.remove(svc) - except ValueError: - pass - external_ports = [port_conv_f(p) for p in external_ports] - return services, external_ports diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/README.hardening.md b/ceph-mon/hooks/charmhelpers/contrib/hardening/README.hardening.md deleted file mode 100644 index 91280c03..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/README.hardening.md +++ /dev/null @@ -1,38 +0,0 @@ -# Juju charm-helpers hardening library - -## Description - -This library provides multiple implementations of system and application -hardening that conform to the standards of http://hardening.io/. - -Current implementations include: - - * OS - * SSH - * MySQL - * Apache - -## Requirements - -* Juju Charms - -## Usage - -1. Synchronise this library into your charm and add the harden() decorator - (from contrib.hardening.harden) to any functions or methods you want to use - to trigger hardening of your application/system. - -2. Add a config option called 'harden' to your charm config.yaml and set it to - a space-delimited list of hardening modules you want to run e.g. "os ssh" - -3. Override any config defaults (contrib.hardening.defaults) by adding a file - called hardening.yaml to your charm root containing the name(s) of the - modules whose settings you want override at root level and then any settings - with overrides e.g. - - os: - general: - desktop_enable: True - -4. Now just run your charm as usual and hardening will be applied each time the - hook runs. diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/__init__.py deleted file mode 100644 index 30a3e943..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/__init__.py deleted file mode 100644 index 58bebd84..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path - -TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py deleted file mode 100644 index 3bc2ebd4..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.core.hookenv import ( - log, - DEBUG, -) -from charmhelpers.contrib.hardening.apache.checks import config - - -def run_apache_checks(): - log("Starting Apache hardening checks.", level=DEBUG) - checks = config.get_audits() - for check in checks: - log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) - check.ensure_compliance() - - log("Apache hardening checks complete.", level=DEBUG) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py deleted file mode 100644 index e81a5f0b..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import re -import subprocess - - -from charmhelpers.core.hookenv import ( - log, - INFO, -) -from charmhelpers.contrib.hardening.audits.file import ( - FilePermissionAudit, - DirectoryPermissionAudit, - NoReadWriteForOther, - TemplatedFile, - DeletedFile -) -from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit -from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get Apache hardening config audits. - - :returns: dictionary of audits - """ - if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0: - log("Apache server does not appear to be installed on this node - " - "skipping apache hardening", level=INFO) - return [] - - context = ApacheConfContext() - settings = utils.get_settings('apache') - audits = [ - FilePermissionAudit(paths=os.path.join( - settings['common']['apache_dir'], 'apache2.conf'), - user='root', group='root', mode=0o0640), - - TemplatedFile(os.path.join(settings['common']['apache_dir'], - 'mods-available/alias.conf'), - context, - TEMPLATES_DIR, - mode=0o0640, - user='root', - service_actions=[{'service': 'apache2', - 'actions': ['restart']}]), - - TemplatedFile(os.path.join(settings['common']['apache_dir'], - 'conf-enabled/99-hardening.conf'), - context, - TEMPLATES_DIR, - mode=0o0640, - user='root', - service_actions=[{'service': 'apache2', - 'actions': ['restart']}]), - - DirectoryPermissionAudit(settings['common']['apache_dir'], - user='root', - group='root', - mode=0o0750), - - DisabledModuleAudit(settings['hardening']['modules_to_disable']), - - NoReadWriteForOther(settings['common']['apache_dir']), - - DeletedFile(['/var/www/html/index.html']) - ] - - return audits - - -class ApacheConfContext(object): - """Defines the set of key/value pairs to set in a apache config file. - - This context, when called, will return a dictionary containing the - key/value pairs of setting to specify in the - /etc/apache/conf-enabled/hardening.conf file. - """ - def __call__(self): - settings = utils.get_settings('apache') - ctxt = settings['hardening'] - - out = subprocess.check_output(['apache2', '-v']).decode('utf-8') - ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', - out).group(1) - ctxt['apache_icondir'] = '/usr/share/apache2/icons/' - return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf deleted file mode 100644 index 22b68041..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf +++ /dev/null @@ -1,32 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### - - - - # http://httpd.apache.org/docs/2.4/upgrading.html - {% if apache_version > '2.2' -%} - Require all granted - {% else -%} - Order Allow,Deny - Deny from all - {% endif %} - - - - - Options -Indexes -FollowSymLinks - AllowOverride None - - - - Options -Indexes -FollowSymLinks - AllowOverride None - - -TraceEnable {{ traceenable }} -ServerTokens {{ servertokens }} - -SSLHonorCipherOrder {{ honor_cipher_order }} -SSLCipherSuite {{ cipher_suite }} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf b/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf deleted file mode 100644 index e46a58a3..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf +++ /dev/null @@ -1,31 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### - - # - # Aliases: Add here as many aliases as you need (with no limit). The format is - # Alias fakename realname - # - # Note that if you include a trailing / on fakename then the server will - # require it to be present in the URL. So "/icons" isn't aliased in this - # example, only "/icons/". If the fakename is slash-terminated, then the - # realname must also be slash terminated, and if the fakename omits the - # trailing slash, the realname must also omit it. - # - # We include the /icons/ alias for FancyIndexed directory listings. If - # you do not use FancyIndexing, you may comment this out. - # - Alias /icons/ "{{ apache_icondir }}/" - - - Options -Indexes -MultiViews -FollowSymLinks - AllowOverride None -{% if apache_version == '2.4' -%} - Require all granted -{% else -%} - Order allow,deny - Allow from all -{% endif %} - - diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py deleted file mode 100644 index 6dd5b05f..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class BaseAudit(object): # NO-QA - """Base class for hardening checks. - - The lifecycle of a hardening check is to first check to see if the system - is in compliance for the specified check. If it is not in compliance, the - check method will return a value which will be supplied to the. - """ - def __init__(self, *args, **kwargs): - self.unless = kwargs.get('unless', None) - super(BaseAudit, self).__init__() - - def ensure_compliance(self): - """Checks to see if the current hardening check is in compliance or - not. - - If the check that is performed is not in compliance, then an exception - should be raised. - """ - pass - - def _take_action(self): - """Determines whether to perform the action or not. - - Checks whether or not an action should be taken. This is determined by - the truthy value for the unless parameter. If unless is a callback - method, it will be invoked with no parameters in order to determine - whether or not the action should be taken. Otherwise, the truthy value - of the unless attribute will determine if the action should be - performed. - """ - # Do the action if there isn't an unless override. - if self.unless is None: - return True - - # Invoke the callback if there is one. - if hasattr(self.unless, '__call__'): - return not self.unless() - - return not self.unless diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py deleted file mode 100644 index 31db8f62..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import subprocess - -from charmhelpers.core.hookenv import ( - log, - INFO, - ERROR, -) - -from charmhelpers.contrib.hardening.audits import BaseAudit - - -class DisabledModuleAudit(BaseAudit): - """Audits Apache2 modules. - - Determines if the apache2 modules are enabled. If the modules are enabled - then they are removed in the ensure_compliance. - """ - def __init__(self, modules): - if modules is None: - self.modules = [] - elif isinstance(modules, str): - self.modules = [modules] - else: - self.modules = modules - - def ensure_compliance(self): - """Ensures that the modules are not loaded.""" - if not self.modules: - return - - try: - loaded_modules = self._get_loaded_modules() - non_compliant_modules = [] - for module in self.modules: - if module in loaded_modules: - log("Module '%s' is enabled but should not be." % - (module), level=INFO) - non_compliant_modules.append(module) - - if len(non_compliant_modules) == 0: - return - - for module in non_compliant_modules: - self._disable_module(module) - self._restart_apache() - except subprocess.CalledProcessError as e: - log('Error occurred auditing apache module compliance. ' - 'This may have been already reported. ' - 'Output is: %s' % e.output, level=ERROR) - - @staticmethod - def _get_loaded_modules(): - """Returns the modules which are enabled in Apache.""" - output = subprocess.check_output(['apache2ctl', '-M']).decode('utf-8') - modules = [] - for line in output.splitlines(): - # Each line of the enabled module output looks like: - # module_name (static|shared) - # Plus a header line at the top of the output which is stripped - # out by the regex. - matcher = re.search(r'^ (\S*)_module (\S*)', line) - if matcher: - modules.append(matcher.group(1)) - return modules - - @staticmethod - def _disable_module(module): - """Disables the specified module in Apache.""" - try: - subprocess.check_call(['a2dismod', module]) - except subprocess.CalledProcessError as e: - # Note: catch error here to allow the attempt of disabling - # multiple modules in one go rather than failing after the - # first module fails. - log('Error occurred disabling module %s. ' - 'Output is: %s' % (module, e.output), level=ERROR) - - @staticmethod - def _restart_apache(): - """Restarts the apache process""" - subprocess.check_output(['service', 'apache2', 'restart']) - - @staticmethod - def is_ssl_enabled(): - """Check if SSL module is enabled or not""" - return 'ssl' in DisabledModuleAudit._get_loaded_modules() diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py deleted file mode 100644 index 1b22925b..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/apt.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.fetch import ( - apt_cache, - apt_purge -) -from charmhelpers.core.hookenv import ( - log, - DEBUG, - WARNING, -) -from charmhelpers.contrib.hardening.audits import BaseAudit -from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg - - -class AptConfig(BaseAudit): - - def __init__(self, config, **kwargs): - self.config = config - - def verify_config(self): - apt_pkg.init() - for cfg in self.config: - value = apt_pkg.config.get(cfg['key'], cfg.get('default', '')) - if value and value != cfg['expected']: - log("APT config '%s' has unexpected value '%s' " - "(expected='%s')" % - (cfg['key'], value, cfg['expected']), level=WARNING) - - def ensure_compliance(self): - self.verify_config() - - -class RestrictedPackages(BaseAudit): - """Class used to audit restricted packages on the system.""" - - def __init__(self, pkgs, **kwargs): - super(RestrictedPackages, self).__init__(**kwargs) - if isinstance(pkgs, str) or not hasattr(pkgs, '__iter__'): - self.pkgs = pkgs.split() - else: - self.pkgs = pkgs - - def ensure_compliance(self): - cache = apt_cache() - - for p in self.pkgs: - if p not in cache: - continue - - pkg = cache[p] - if not self.is_virtual_package(pkg): - if not pkg.current_ver: - log("Package '%s' is not installed." % pkg.name, - level=DEBUG) - continue - else: - log("Restricted package '%s' is installed" % pkg.name, - level=WARNING) - self.delete_package(cache, pkg) - else: - log("Checking restricted virtual package '%s' provides" % - pkg.name, level=DEBUG) - self.delete_package(cache, pkg) - - def delete_package(self, cache, pkg): - """Deletes the package from the system. - - Deletes the package form the system, properly handling virtual - packages. - - :param cache: the apt cache - :param pkg: the package to remove - """ - if self.is_virtual_package(pkg): - log("Package '%s' appears to be virtual - purging provides" % - pkg.name, level=DEBUG) - for _p in pkg.provides_list: - self.delete_package(cache, _p[2].parent_pkg) - elif not pkg.current_ver: - log("Package '%s' not installed" % pkg.name, level=DEBUG) - return - else: - log("Purging package '%s'" % pkg.name, level=DEBUG) - apt_purge(pkg.name) - - def is_virtual_package(self, pkg): - return (pkg.get('has_provides', False) and - not pkg.get('has_versions', False)) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py deleted file mode 100644 index 84cc2494..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/audits/file.py +++ /dev/null @@ -1,549 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import grp -import os -import pwd -import re - -from subprocess import ( - CalledProcessError, - check_output, - check_call, -) -from traceback import format_exc -from stat import ( - S_ISGID, - S_ISUID -) - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - INFO, - WARNING, - ERROR, -) -from charmhelpers.core import unitdata -from charmhelpers.core.host import file_hash -from charmhelpers.contrib.hardening.audits import BaseAudit -from charmhelpers.contrib.hardening.templating import ( - get_template_path, - render_and_write, -) -from charmhelpers.contrib.hardening import utils - - -class BaseFileAudit(BaseAudit): - """Base class for file audits. - - Provides api stubs for compliance check flow that must be used by any class - that implemented this one. - """ - - def __init__(self, paths, always_comply=False, *args, **kwargs): - """ - :param paths: string path of list of paths of files we want to apply - compliance checks are criteria to. - :param always_comply: if true compliance criteria is always applied - else compliance is skipped for non-existent - paths. - """ - super(BaseFileAudit, self).__init__(*args, **kwargs) - self.always_comply = always_comply - if isinstance(paths, str) or not hasattr(paths, '__iter__'): - self.paths = [paths] - else: - self.paths = paths - - def ensure_compliance(self): - """Ensure that the all registered files comply to registered criteria. - """ - for p in self.paths: - if os.path.exists(p): - if self.is_compliant(p): - continue - - log('File %s is not in compliance.' % p, level=INFO) - else: - if not self.always_comply: - log("Non-existent path '%s' - skipping compliance check" - % (p), level=INFO) - continue - - if self._take_action(): - log("Applying compliance criteria to '%s'" % (p), level=INFO) - self.comply(p) - - def is_compliant(self, path): - """Audits the path to see if it is compliance. - - :param path: the path to the file that should be checked. - """ - raise NotImplementedError - - def comply(self, path): - """Enforces the compliance of a path. - - :param path: the path to the file that should be enforced. - """ - raise NotImplementedError - - @classmethod - def _get_stat(cls, path): - """Returns the Posix st_stat information for the specified file path. - - :param path: the path to get the st_stat information for. - :returns: an st_stat object for the path or None if the path doesn't - exist. - """ - return os.stat(path) - - -class FilePermissionAudit(BaseFileAudit): - """Implements an audit for file permissions and ownership for a user. - - This class implements functionality that ensures that a specific user/group - will own the file(s) specified and that the permissions specified are - applied properly to the file. - """ - def __init__(self, paths, user, group=None, mode=0o600, **kwargs): - self.user = user - self.group = group - self.mode = mode - super(FilePermissionAudit, self).__init__(paths, user, group, mode, - **kwargs) - - @property - def user(self): - return self._user - - @user.setter - def user(self, name): - try: - user = pwd.getpwnam(name) - except KeyError: - log('Unknown user %s' % name, level=ERROR) - user = None - self._user = user - - @property - def group(self): - return self._group - - @group.setter - def group(self, name): - try: - group = None - if name: - group = grp.getgrnam(name) - else: - group = grp.getgrgid(self.user.pw_gid) - except KeyError: - log('Unknown group %s' % name, level=ERROR) - self._group = group - - def is_compliant(self, path): - """Checks if the path is in compliance. - - Used to determine if the path specified meets the necessary - requirements to be in compliance with the check itself. - - :param path: the file path to check - :returns: True if the path is compliant, False otherwise. - """ - stat = self._get_stat(path) - user = self.user - group = self.group - - compliant = True - if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid: - log('File %s is not owned by %s:%s.' % (path, user.pw_name, - group.gr_name), - level=INFO) - compliant = False - - # POSIX refers to the st_mode bits as corresponding to both the - # file type and file permission bits, where the least significant 12 - # bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the - # file permission bits (8-0) - perms = stat.st_mode & 0o7777 - if perms != self.mode: - log('File %s has incorrect permissions, currently set to %s' % - (path, oct(stat.st_mode & 0o7777)), level=INFO) - compliant = False - - return compliant - - def comply(self, path): - """Issues a chown and chmod to the file paths specified.""" - utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name, - self.mode) - - -class DirectoryPermissionAudit(FilePermissionAudit): - """Performs a permission check for the specified directory path.""" - - def __init__(self, paths, user, group=None, mode=0o600, - recursive=True, **kwargs): - super(DirectoryPermissionAudit, self).__init__(paths, user, group, - mode, **kwargs) - self.recursive = recursive - - def is_compliant(self, path): - """Checks if the directory is compliant. - - Used to determine if the path specified and all of its children - directories are in compliance with the check itself. - - :param path: the directory path to check - :returns: True if the directory tree is compliant, otherwise False. - """ - if not os.path.isdir(path): - log('Path specified %s is not a directory.' % path, level=ERROR) - raise ValueError("%s is not a directory." % path) - - if not self.recursive: - return super(DirectoryPermissionAudit, self).is_compliant(path) - - compliant = True - for root, dirs, _ in os.walk(path): - if len(dirs) > 0: - continue - - if not super(DirectoryPermissionAudit, self).is_compliant(root): - compliant = False - continue - - return compliant - - def comply(self, path): - for root, dirs, _ in os.walk(path): - if len(dirs) > 0: - super(DirectoryPermissionAudit, self).comply(root) - - -class ReadOnly(BaseFileAudit): - """Audits that files and folders are read only.""" - def __init__(self, paths, *args, **kwargs): - super(ReadOnly, self).__init__(paths=paths, *args, **kwargs) - - def is_compliant(self, path): - try: - output = check_output(['find', path, '-perm', '-go+w', - '-type', 'f']).strip() - - # The find above will find any files which have permission sets - # which allow too broad of write access. As such, the path is - # compliant if there is no output. - if output: - return False - - return True - except CalledProcessError as e: - log('Error occurred checking finding writable files for %s. ' - 'Error information is: command %s failed with returncode ' - '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, - format_exc(e)), level=ERROR) - return False - - def comply(self, path): - try: - check_output(['chmod', 'go-w', '-R', path]) - except CalledProcessError as e: - log('Error occurred removing writeable permissions for %s. ' - 'Error information is: command %s failed with returncode ' - '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, - format_exc(e)), level=ERROR) - - -class NoReadWriteForOther(BaseFileAudit): - """Ensures that the files found under the base path are readable or - writable by anyone other than the owner or the group. - """ - def __init__(self, paths): - super(NoReadWriteForOther, self).__init__(paths) - - def is_compliant(self, path): - try: - cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o', - '-perm', '-o+w', '-type', 'f'] - output = check_output(cmd).strip() - - # The find above here will find any files which have read or - # write permissions for other, meaning there is too broad of access - # to read/write the file. As such, the path is compliant if there's - # no output. - if output: - return False - - return True - except CalledProcessError as e: - log('Error occurred while finding files which are readable or ' - 'writable to the world in %s. ' - 'Command output is: %s.' % (path, e.output), level=ERROR) - - def comply(self, path): - try: - check_output(['chmod', '-R', 'o-rw', path]) - except CalledProcessError as e: - log('Error occurred attempting to change modes of files under ' - 'path %s. Output of command is: %s' % (path, e.output)) - - -class NoSUIDSGIDAudit(BaseFileAudit): - """Audits that specified files do not have SUID/SGID bits set.""" - def __init__(self, paths, *args, **kwargs): - super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs) - - def is_compliant(self, path): - stat = self._get_stat(path) - if (stat.st_mode & (S_ISGID | S_ISUID)) != 0: - return False - - return True - - def comply(self, path): - try: - log('Removing suid/sgid from %s.' % path, level=DEBUG) - check_output(['chmod', '-s', path]) - except CalledProcessError as e: - log('Error occurred removing suid/sgid from %s.' - 'Error information is: command %s failed with returncode ' - '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, - format_exc(e)), level=ERROR) - - -class TemplatedFile(BaseFileAudit): - """The TemplatedFileAudit audits the contents of a templated file. - - This audit renders a file from a template, sets the appropriate file - permissions, then generates a hashsum with which to check the content - changed. - """ - def __init__(self, path, context, template_dir, mode, user='root', - group='root', service_actions=None, **kwargs): - self.context = context - self.user = user - self.group = group - self.mode = mode - self.template_dir = template_dir - self.service_actions = service_actions - super(TemplatedFile, self).__init__(paths=path, always_comply=True, - **kwargs) - - def is_compliant(self, path): - """Determines if the templated file is compliant. - - A templated file is only compliant if it has not changed (as - determined by its sha256 hashsum) AND its file permissions are set - appropriately. - - :param path: the path to check compliance. - """ - same_templates = self.templates_match(path) - same_content = self.contents_match(path) - same_permissions = self.permissions_match(path) - - if same_content and same_permissions and same_templates: - return True - - return False - - def run_service_actions(self): - """Run any actions on services requested.""" - if not self.service_actions: - return - - for svc_action in self.service_actions: - name = svc_action['service'] - actions = svc_action['actions'] - log("Running service '%s' actions '%s'" % (name, actions), - level=DEBUG) - for action in actions: - cmd = ['service', name, action] - try: - check_call(cmd) - except CalledProcessError as exc: - log("Service name='%s' action='%s' failed - %s" % - (name, action, exc), level=WARNING) - - def comply(self, path): - """Ensures the contents and the permissions of the file. - - :param path: the path to correct - """ - dirname = os.path.dirname(path) - if not os.path.exists(dirname): - os.makedirs(dirname) - - self.pre_write() - render_and_write(self.template_dir, path, self.context()) - utils.ensure_permissions(path, self.user, self.group, self.mode) - self.run_service_actions() - self.save_checksum(path) - self.post_write() - - def pre_write(self): - """Invoked prior to writing the template.""" - pass - - def post_write(self): - """Invoked after writing the template.""" - pass - - def templates_match(self, path): - """Determines if the template files are the same. - - The template file equality is determined by the hashsum of the - template files themselves. If there is no hashsum, then the content - cannot be sure to be the same so treat it as if they changed. - Otherwise, return whether or not the hashsums are the same. - - :param path: the path to check - :returns: boolean - """ - template_path = get_template_path(self.template_dir, path) - key = 'hardening:template:%s' % template_path - template_checksum = file_hash(template_path) - kv = unitdata.kv() - stored_tmplt_checksum = kv.get(key) - if not stored_tmplt_checksum: - kv.set(key, template_checksum) - kv.flush() - log('Saved template checksum for %s.' % template_path, - level=DEBUG) - # Since we don't have a template checksum, then assume it doesn't - # match and return that the template is different. - return False - elif stored_tmplt_checksum != template_checksum: - kv.set(key, template_checksum) - kv.flush() - log('Updated template checksum for %s.' % template_path, - level=DEBUG) - return False - - # Here the template hasn't changed based upon the calculated - # checksum of the template and what was previously stored. - return True - - def contents_match(self, path): - """Determines if the file content is the same. - - This is determined by comparing hashsum of the file contents and - the saved hashsum. If there is no hashsum, then the content cannot - be sure to be the same so treat them as if they are not the same. - Otherwise, return True if the hashsums are the same, False if they - are not the same. - - :param path: the file to check. - """ - checksum = file_hash(path) - - kv = unitdata.kv() - stored_checksum = kv.get('hardening:%s' % path) - if not stored_checksum: - # If the checksum hasn't been generated, return False to ensure - # the file is written and the checksum stored. - log('Checksum for %s has not been calculated.' % path, level=DEBUG) - return False - elif stored_checksum != checksum: - log('Checksum mismatch for %s.' % path, level=DEBUG) - return False - - return True - - def permissions_match(self, path): - """Determines if the file owner and permissions match. - - :param path: the path to check. - """ - audit = FilePermissionAudit(path, self.user, self.group, self.mode) - return audit.is_compliant(path) - - def save_checksum(self, path): - """Calculates and saves the checksum for the path specified. - - :param path: the path of the file to save the checksum. - """ - checksum = file_hash(path) - kv = unitdata.kv() - kv.set('hardening:%s' % path, checksum) - kv.flush() - - -class DeletedFile(BaseFileAudit): - """Audit to ensure that a file is deleted.""" - def __init__(self, paths): - super(DeletedFile, self).__init__(paths) - - def is_compliant(self, path): - return not os.path.exists(path) - - def comply(self, path): - os.remove(path) - - -class FileContentAudit(BaseFileAudit): - """Audit the contents of a file.""" - def __init__(self, paths, cases, **kwargs): - # Cases we expect to pass - self.pass_cases = cases.get('pass', []) - # Cases we expect to fail - self.fail_cases = cases.get('fail', []) - super(FileContentAudit, self).__init__(paths, **kwargs) - - def is_compliant(self, path): - """ - Given a set of content matching cases i.e. tuple(regex, bool) where - bool value denotes whether or not regex is expected to match, check that - all cases match as expected with the contents of the file. Cases can be - expected to pass of fail. - - :param path: Path of file to check. - :returns: Boolean value representing whether or not all cases are - found to be compliant. - """ - log("Auditing contents of file '%s'" % (path), level=DEBUG) - with open(path, 'r') as fd: - contents = fd.read() - - matches = 0 - for pattern in self.pass_cases: - key = re.compile(pattern, flags=re.MULTILINE) - results = re.search(key, contents) - if results: - matches += 1 - else: - log("Pattern '%s' was expected to pass but instead it failed" - % (pattern), level=WARNING) - - for pattern in self.fail_cases: - key = re.compile(pattern, flags=re.MULTILINE) - results = re.search(key, contents) - if not results: - matches += 1 - else: - log("Pattern '%s' was expected to fail but instead it passed" - % (pattern), level=WARNING) - - total = len(self.pass_cases) + len(self.fail_cases) - log("Checked %s cases and %s passed" % (total, matches), level=DEBUG) - return matches == total - - def comply(self, *args, **kwargs): - """NOOP since we just issue warnings. This is to avoid the - NotImplememtedError. - """ - log("Not applying any compliance criteria, only checks.", level=INFO) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml deleted file mode 100644 index 0f940d4c..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# NOTE: this file contains the default configuration for the 'apache' hardening -# code. If you want to override any settings you must add them to a file -# called hardening.yaml in the root directory of your charm using the -# name 'apache' as the root key followed by any of the following with new -# values. - -common: - apache_dir: '/etc/apache2' - -hardening: - traceenable: 'off' - allowed_http_methods: "GET POST" - modules_to_disable: [ cgi, cgid ] - servertokens: 'Prod' - honor_cipher_order: 'on' - cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES' diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema deleted file mode 100644 index c112137c..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema +++ /dev/null @@ -1,12 +0,0 @@ -# NOTE: this schema must contain all valid keys from it's associated defaults -# file. It is used to validate user-provided overrides. -common: - apache_dir: - traceenable: - -hardening: - allowed_http_methods: - modules_to_disable: - servertokens: - honor_cipher_order: - cipher_suite: diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml deleted file mode 100644 index 682d22bf..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# NOTE: this file contains the default configuration for the 'mysql' hardening -# code. If you want to override any settings you must add them to a file -# called hardening.yaml in the root directory of your charm using the -# name 'mysql' as the root key followed by any of the following with new -# values. - -hardening: - mysql-conf: /etc/mysql/my.cnf - hardening-conf: /etc/mysql/conf.d/hardening.cnf - -security: - # @see http://www.symantec.com/connect/articles/securing-mysql-step-step - # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot - chroot: None - - # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create - safe-user-create: 1 - - # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth - secure-auth: 1 - - # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links - skip-symbolic-links: 1 - - # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database - skip-show-database: True - - # @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile - local-infile: 0 - - # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs - allow-suspicious-udfs: 0 - - # @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges - automatic-sp-privileges: 0 - - # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv - secure-file-priv: /tmp diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema deleted file mode 100644 index 2edf325c..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema +++ /dev/null @@ -1,15 +0,0 @@ -# NOTE: this schema must contain all valid keys from it's associated defaults -# file. It is used to validate user-provided overrides. -hardening: - mysql-conf: - hardening-conf: -security: - chroot: - safe-user-create: - secure-auth: - skip-symbolic-links: - skip-show-database: - local-infile: - allow-suspicious-udfs: - automatic-sp-privileges: - secure-file-priv: diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml deleted file mode 100644 index 9a8627b5..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml +++ /dev/null @@ -1,68 +0,0 @@ -# NOTE: this file contains the default configuration for the 'os' hardening -# code. If you want to override any settings you must add them to a file -# called hardening.yaml in the root directory of your charm using the -# name 'os' as the root key followed by any of the following with new -# values. - -general: - desktop_enable: False # (type:boolean) - -environment: - extra_user_paths: [] - umask: 027 - root_path: / - -auth: - pw_max_age: 60 - # discourage password cycling - pw_min_age: 7 - retries: 5 - lockout_time: 600 - timeout: 60 - allow_homeless: False # (type:boolean) - pam_passwdqc_enable: True # (type:boolean) - pam_passwdqc_options: 'min=disabled,disabled,16,12,8' - root_ttys: - console - tty1 - tty2 - tty3 - tty4 - tty5 - tty6 - uid_min: 1000 - gid_min: 1000 - sys_uid_min: 100 - sys_uid_max: 999 - sys_gid_min: 100 - sys_gid_max: 999 - chfn_restrict: - -security: - users_allow: [] - suid_sgid_enforce: True # (type:boolean) - # user-defined blacklist and whitelist - suid_sgid_blacklist: [] - suid_sgid_whitelist: [] - # if this is True, remove any suid/sgid bits from files that were not in the whitelist - suid_sgid_dry_run_on_unknown: False # (type:boolean) - suid_sgid_remove_from_unknown: False # (type:boolean) - # remove packages with known issues - packages_clean: True # (type:boolean) - packages_list: - xinetd - inetd - ypserv - telnet-server - rsh-server - rsync - kernel_enable_module_loading: True # (type:boolean) - kernel_enable_core_dump: False # (type:boolean) - ssh_tmout: 300 - -sysctl: - kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128 - kernel_enable_sysrq: False # (type:boolean) - forwarding: False # (type:boolean) - ipv6_enable: False # (type:boolean) - arp_restricted: True # (type:boolean) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema deleted file mode 100644 index cc3b9c20..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema +++ /dev/null @@ -1,43 +0,0 @@ -# NOTE: this schema must contain all valid keys from it's associated defaults -# file. It is used to validate user-provided overrides. -general: - desktop_enable: -environment: - extra_user_paths: - umask: - root_path: -auth: - pw_max_age: - pw_min_age: - retries: - lockout_time: - timeout: - allow_homeless: - pam_passwdqc_enable: - pam_passwdqc_options: - root_ttys: - uid_min: - gid_min: - sys_uid_min: - sys_uid_max: - sys_gid_min: - sys_gid_max: - chfn_restrict: -security: - users_allow: - suid_sgid_enforce: - suid_sgid_blacklist: - suid_sgid_whitelist: - suid_sgid_dry_run_on_unknown: - suid_sgid_remove_from_unknown: - packages_clean: - packages_list: - kernel_enable_module_loading: - kernel_enable_core_dump: - ssh_tmout: -sysctl: - kernel_secure_sysrq: - kernel_enable_sysrq: - forwarding: - ipv6_enable: - arp_restricted: diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml deleted file mode 100644 index cd529bca..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml +++ /dev/null @@ -1,49 +0,0 @@ -# NOTE: this file contains the default configuration for the 'ssh' hardening -# code. If you want to override any settings you must add them to a file -# called hardening.yaml in the root directory of your charm using the -# name 'ssh' as the root key followed by any of the following with new -# values. - -common: - service_name: 'ssh' - network_ipv6_enable: False # (type:boolean) - ports: [22] - remote_hosts: [] - -client: - package: 'openssh-client' - cbc_required: False # (type:boolean) - weak_hmac: False # (type:boolean) - weak_kex: False # (type:boolean) - roaming: False - password_authentication: 'no' - -server: - host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key', - '/etc/ssh/ssh_host_ecdsa_key'] - cbc_required: False # (type:boolean) - weak_hmac: False # (type:boolean) - weak_kex: False # (type:boolean) - allow_root_with_key: False # (type:boolean) - allow_tcp_forwarding: 'no' - allow_agent_forwarding: 'no' - allow_x11_forwarding: 'no' - use_privilege_separation: 'sandbox' - listen_to: ['0.0.0.0'] - use_pam: 'no' - package: 'openssh-server' - password_authentication: 'no' - alive_interval: '600' - alive_count: '3' - sftp_enable: False # (type:boolean) - sftp_group: 'sftponly' - sftp_chroot: '/home/%u' - deny_users: [] - allow_users: [] - deny_groups: [] - allow_groups: [] - print_motd: 'no' - print_last_log: 'no' - use_dns: 'no' - max_auth_tries: 2 - max_sessions: 10 diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema b/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema deleted file mode 100644 index d05e054b..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema +++ /dev/null @@ -1,42 +0,0 @@ -# NOTE: this schema must contain all valid keys from it's associated defaults -# file. It is used to validate user-provided overrides. -common: - service_name: - network_ipv6_enable: - ports: - remote_hosts: -client: - package: - cbc_required: - weak_hmac: - weak_kex: - roaming: - password_authentication: -server: - host_key_files: - cbc_required: - weak_hmac: - weak_kex: - allow_root_with_key: - allow_tcp_forwarding: - allow_agent_forwarding: - allow_x11_forwarding: - use_privilege_separation: - listen_to: - use_pam: - package: - password_authentication: - alive_interval: - alive_count: - sftp_enable: - sftp_group: - sftp_chroot: - deny_users: - allow_users: - deny_groups: - allow_groups: - print_motd: - print_last_log: - use_dns: - max_auth_tries: - max_sessions: diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py deleted file mode 100644 index 45ad076d..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/harden.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import OrderedDict - -from charmhelpers.core.hookenv import ( - config, - log, - DEBUG, - WARNING, -) -from charmhelpers.contrib.hardening.host.checks import run_os_checks -from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks -from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks -from charmhelpers.contrib.hardening.apache.checks import run_apache_checks - -_DISABLE_HARDENING_FOR_UNIT_TEST = False - - -def harden(overrides=None): - """Hardening decorator. - - This is the main entry point for running the hardening stack. In order to - run modules of the stack you must add this decorator to charm hook(s) and - ensure that your charm config.yaml contains the 'harden' option set to - one or more of the supported modules. Setting these will cause the - corresponding hardening code to be run when the hook fires. - - This decorator can and should be applied to more than one hook or function - such that hardening modules are called multiple times. This is because - subsequent calls will perform auditing checks that will report any changes - to resources hardened by the first run (and possibly perform compliance - actions as a result of any detected infractions). - - :param overrides: Optional list of stack modules used to override those - provided with 'harden' config. - :returns: Returns value returned by decorated function once executed. - """ - if overrides is None: - overrides = [] - - def _harden_inner1(f): - _logged = False - - def _harden_inner2(*args, **kwargs): - # knock out hardening via a config var; normally it won't get - # disabled. - nonlocal _logged - if _DISABLE_HARDENING_FOR_UNIT_TEST: - return f(*args, **kwargs) - if not _logged: - log("Hardening function '%s'" % (f.__name__), level=DEBUG) - _logged = True - RUN_CATALOG = OrderedDict([('os', run_os_checks), - ('ssh', run_ssh_checks), - ('mysql', run_mysql_checks), - ('apache', run_apache_checks)]) - - enabled = overrides[:] or (config("harden") or "").split() - if enabled: - modules_to_run = [] - # modules will always be performed in the following order - for module, func in RUN_CATALOG.items(): - if module in enabled: - enabled.remove(module) - modules_to_run.append(func) - - if enabled: - log("Unknown hardening modules '%s' - ignoring" % - (', '.join(enabled)), level=WARNING) - - for hardener in modules_to_run: - log("Executing hardening module '%s'" % - (hardener.__name__), level=DEBUG) - hardener() - else: - log("No hardening applied to '%s'" % (f.__name__), level=DEBUG) - - return f(*args, **kwargs) - return _harden_inner2 - - return _harden_inner1 diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/__init__.py deleted file mode 100644 index 58bebd84..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path - -TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py deleted file mode 100644 index 0e7e409f..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.core.hookenv import ( - log, - DEBUG, -) -from charmhelpers.contrib.hardening.host.checks import ( - apt, - limits, - login, - minimize_access, - pam, - profile, - securetty, - suid_sgid, - sysctl -) - - -def run_os_checks(): - log("Starting OS hardening checks.", level=DEBUG) - checks = apt.get_audits() - checks.extend(limits.get_audits()) - checks.extend(login.get_audits()) - checks.extend(minimize_access.get_audits()) - checks.extend(pam.get_audits()) - checks.extend(profile.get_audits()) - checks.extend(securetty.get_audits()) - checks.extend(suid_sgid.get_audits()) - checks.extend(sysctl.get_audits()) - - for check in checks: - log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) - check.ensure_compliance() - - log("OS hardening checks complete.", level=DEBUG) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/apt.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/apt.py deleted file mode 100644 index 7ce41b00..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/apt.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.contrib.hardening.utils import get_settings -from charmhelpers.contrib.hardening.audits.apt import ( - AptConfig, - RestrictedPackages, -) - - -def get_audits(): - """Get OS hardening apt audits. - - :returns: dictionary of audits - """ - audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated', - 'expected': 'false'}])] - - settings = get_settings('os') - clean_packages = settings['security']['packages_clean'] - if clean_packages: - security_packages = settings['security']['packages_list'] - if security_packages: - audits.append(RestrictedPackages(security_packages)) - - return audits diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/limits.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/limits.py deleted file mode 100644 index e94f5ebe..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/limits.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.contrib.hardening.audits.file import ( - DirectoryPermissionAudit, - TemplatedFile, -) -from charmhelpers.contrib.hardening.host import TEMPLATES_DIR -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get OS hardening security limits audits. - - :returns: dictionary of audits - """ - audits = [] - settings = utils.get_settings('os') - - # Ensure that the /etc/security/limits.d directory is only writable - # by the root user, but others can execute and read. - audits.append(DirectoryPermissionAudit('/etc/security/limits.d', - user='root', group='root', - mode=0o755)) - - # If core dumps are not enabled, then don't allow core dumps to be - # created as they may contain sensitive information. - if not settings['security']['kernel_enable_core_dump']: - audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf', - SecurityLimitsContext(), - template_dir=TEMPLATES_DIR, - user='root', group='root', mode=0o0440)) - return audits - - -class SecurityLimitsContext(object): - - def __call__(self): - settings = utils.get_settings('os') - ctxt = {'disable_core_dump': - not settings['security']['kernel_enable_core_dump']} - return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py deleted file mode 100644 index fd500c8b..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/login.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.contrib.hardening.audits.file import TemplatedFile -from charmhelpers.contrib.hardening.host import TEMPLATES_DIR -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get OS hardening login.defs audits. - - :returns: dictionary of audits - """ - audits = [TemplatedFile('/etc/login.defs', LoginContext(), - template_dir=TEMPLATES_DIR, - user='root', group='root', mode=0o0444)] - return audits - - -class LoginContext(object): - - def __call__(self): - settings = utils.get_settings('os') - - # Octal numbers in yaml end up being turned into decimal, - # so check if the umask is entered as a string (e.g. '027') - # or as an octal umask as we know it (e.g. 002). If its not - # a string assume it to be octal and turn it into an octal - # string. - umask = settings['environment']['umask'] - if not isinstance(umask, str): - umask = '%s' % oct(umask) - - ctxt = { - 'additional_user_paths': - settings['environment']['extra_user_paths'], - 'umask': umask, - 'pwd_max_age': settings['auth']['pw_max_age'], - 'pwd_min_age': settings['auth']['pw_min_age'], - 'uid_min': settings['auth']['uid_min'], - 'sys_uid_min': settings['auth']['sys_uid_min'], - 'sys_uid_max': settings['auth']['sys_uid_max'], - 'gid_min': settings['auth']['gid_min'], - 'sys_gid_min': settings['auth']['sys_gid_min'], - 'sys_gid_max': settings['auth']['sys_gid_max'], - 'login_retries': settings['auth']['retries'], - 'login_timeout': settings['auth']['timeout'], - 'chfn_restrict': settings['auth']['chfn_restrict'], - 'allow_login_without_home': settings['auth']['allow_homeless'] - } - - return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py deleted file mode 100644 index 6e64be00..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.contrib.hardening.audits.file import ( - FilePermissionAudit, - ReadOnly, -) -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get OS hardening access audits. - - :returns: dictionary of audits - """ - audits = [] - settings = utils.get_settings('os') - - # Remove write permissions from $PATH folders for all regular users. - # This prevents changing system-wide commands from normal users. - path_folders = {'/usr/local/sbin', - '/usr/local/bin', - '/usr/sbin', - '/usr/bin', - '/bin'} - extra_user_paths = settings['environment']['extra_user_paths'] - path_folders.update(extra_user_paths) - audits.append(ReadOnly(path_folders)) - - # Only allow the root user to have access to the shadow file. - audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600)) - - if 'change_user' not in settings['security']['users_allow']: - # su should only be accessible to user and group root, unless it is - # expressly defined to allow users to change to root via the - # security_users_allow config option. - audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750)) - - return audits diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/pam.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/pam.py deleted file mode 100644 index 9b38d5f0..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/pam.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from subprocess import ( - check_output, - CalledProcessError, -) - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - ERROR, -) -from charmhelpers.fetch import ( - apt_install, - apt_purge, - apt_update, -) -from charmhelpers.contrib.hardening.audits.file import ( - TemplatedFile, - DeletedFile, -) -from charmhelpers.contrib.hardening import utils -from charmhelpers.contrib.hardening.host import TEMPLATES_DIR - - -def get_audits(): - """Get OS hardening PAM authentication audits. - - :returns: dictionary of audits - """ - audits = [] - - settings = utils.get_settings('os') - - if settings['auth']['pam_passwdqc_enable']: - audits.append(PasswdqcPAM('/etc/passwdqc.conf')) - - if settings['auth']['retries']: - audits.append(Tally2PAM('/usr/share/pam-configs/tally2')) - else: - audits.append(DeletedFile('/usr/share/pam-configs/tally2')) - - return audits - - -class PasswdqcPAMContext(object): - - def __call__(self): - ctxt = {} - settings = utils.get_settings('os') - - ctxt['auth_pam_passwdqc_options'] = \ - settings['auth']['pam_passwdqc_options'] - - return ctxt - - -class PasswdqcPAM(TemplatedFile): - """The PAM Audit verifies the linux PAM settings.""" - def __init__(self, path): - super(PasswdqcPAM, self).__init__(path=path, - template_dir=TEMPLATES_DIR, - context=PasswdqcPAMContext(), - user='root', - group='root', - mode=0o0640) - - def pre_write(self): - # Always remove? - for pkg in ['libpam-ccreds', 'libpam-cracklib']: - log("Purging package '%s'" % pkg, level=DEBUG), - apt_purge(pkg) - - apt_update(fatal=True) - for pkg in ['libpam-passwdqc']: - log("Installing package '%s'" % pkg, level=DEBUG), - apt_install(pkg) - - def post_write(self): - """Updates the PAM configuration after the file has been written""" - try: - check_output(['pam-auth-update', '--package']) - except CalledProcessError as e: - log('Error calling pam-auth-update: %s' % e, level=ERROR) - - -class Tally2PAMContext(object): - - def __call__(self): - ctxt = {} - settings = utils.get_settings('os') - - ctxt['auth_lockout_time'] = settings['auth']['lockout_time'] - ctxt['auth_retries'] = settings['auth']['retries'] - - return ctxt - - -class Tally2PAM(TemplatedFile): - """The PAM Audit verifies the linux PAM settings.""" - def __init__(self, path): - super(Tally2PAM, self).__init__(path=path, - template_dir=TEMPLATES_DIR, - context=Tally2PAMContext(), - user='root', - group='root', - mode=0o0640) - - def pre_write(self): - # Always remove? - apt_purge('libpam-ccreds') - apt_update(fatal=True) - apt_install('libpam-modules') - - def post_write(self): - """Updates the PAM configuration after the file has been written""" - try: - check_output(['pam-auth-update', '--package']) - except CalledProcessError as e: - log('Error calling pam-auth-update: %s' % e, level=ERROR) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py deleted file mode 100644 index 2727428d..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/profile.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.contrib.hardening.audits.file import TemplatedFile -from charmhelpers.contrib.hardening.host import TEMPLATES_DIR -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get OS hardening profile audits. - - :returns: dictionary of audits - """ - audits = [] - - settings = utils.get_settings('os') - # If core dumps are not enabled, then don't allow core dumps to be - # created as they may contain sensitive information. - if not settings['security']['kernel_enable_core_dump']: - audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh', - ProfileContext(), - template_dir=TEMPLATES_DIR, - mode=0o0755, user='root', group='root')) - if settings['security']['ssh_tmout']: - audits.append(TemplatedFile('/etc/profile.d/99-hardening.sh', - ProfileContext(), - template_dir=TEMPLATES_DIR, - mode=0o0644, user='root', group='root')) - return audits - - -class ProfileContext(object): - - def __call__(self): - settings = utils.get_settings('os') - ctxt = {'ssh_tmout': - settings['security']['ssh_tmout']} - return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py deleted file mode 100644 index 34cd0217..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.contrib.hardening.audits.file import TemplatedFile -from charmhelpers.contrib.hardening.host import TEMPLATES_DIR -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get OS hardening Secure TTY audits. - - :returns: dictionary of audits - """ - audits = [] - audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(), - template_dir=TEMPLATES_DIR, - mode=0o0400, user='root', group='root')) - return audits - - -class SecureTTYContext(object): - - def __call__(self): - settings = utils.get_settings('os') - ctxt = {'ttys': settings['auth']['root_ttys']} - return ctxt diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py deleted file mode 100644 index bcbe3fde..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import subprocess - -from charmhelpers.core.hookenv import ( - log, - INFO, -) -from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit -from charmhelpers.contrib.hardening import utils - - -BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh', - '/usr/libexec/openssh/ssh-keysign', - '/usr/lib/openssh/ssh-keysign', - '/sbin/netreport', - '/usr/sbin/usernetctl', - '/usr/sbin/userisdnctl', - '/usr/sbin/pppd', - '/usr/bin/lockfile', - '/usr/bin/mail-lock', - '/usr/bin/mail-unlock', - '/usr/bin/mail-touchlock', - '/usr/bin/dotlockfile', - '/usr/bin/arping', - '/usr/sbin/uuidd', - '/usr/bin/mtr', - '/usr/lib/evolution/camel-lock-helper-1.2', - '/usr/lib/pt_chown', - '/usr/lib/eject/dmcrypt-get-device', - '/usr/lib/mc/cons.saver'] - -WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount', - '/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at', - '/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp', - '/usr/bin/passwd', '/usr/bin/ssh-agent', - '/usr/libexec/utempter/utempter', '/usr/sbin/lockdev', - '/usr/sbin/sendmail.sendmail', '/usr/bin/expiry', - '/bin/ping6', '/usr/bin/traceroute6.iputils', - '/sbin/mount.nfs', '/sbin/umount.nfs', - '/sbin/mount.nfs4', '/sbin/umount.nfs4', - '/usr/bin/crontab', - '/usr/bin/wall', '/usr/bin/write', - '/usr/bin/screen', - '/usr/bin/mlocate', - '/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh', - '/bin/fusermount', - '/usr/bin/pkexec', - '/usr/bin/sudo', '/usr/bin/sudoedit', - '/usr/sbin/postdrop', '/usr/sbin/postqueue', - '/usr/sbin/suexec', - '/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth', - '/usr/kerberos/bin/ksu', - '/usr/sbin/ccreds_validate', - '/usr/bin/Xorg', - '/usr/bin/X', - '/usr/lib/dbus-1.0/dbus-daemon-launch-helper', - '/usr/lib/vte/gnome-pty-helper', - '/usr/lib/libvte9/gnome-pty-helper', - '/usr/lib/libvte-2.90-9/gnome-pty-helper'] - - -def get_audits(): - """Get OS hardening suid/sgid audits. - - :returns: dictionary of audits - """ - checks = [] - settings = utils.get_settings('os') - if not settings['security']['suid_sgid_enforce']: - log("Skipping suid/sgid hardening", level=INFO) - return checks - - # Build the blacklist and whitelist of files for suid/sgid checks. - # There are a total of 4 lists: - # 1. the system blacklist - # 2. the system whitelist - # 3. the user blacklist - # 4. the user whitelist - # - # The blacklist is the set of paths which should NOT have the suid/sgid bit - # set and the whitelist is the set of paths which MAY have the suid/sgid - # bit setl. The user whitelist/blacklist effectively override the system - # whitelist/blacklist. - u_b = settings['security']['suid_sgid_blacklist'] - u_w = settings['security']['suid_sgid_whitelist'] - - blacklist = set(BLACKLIST) - set(u_w + u_b) - whitelist = set(WHITELIST) - set(u_b + u_w) - - checks.append(NoSUIDSGIDAudit(blacklist)) - - dry_run = settings['security']['suid_sgid_dry_run_on_unknown'] - - if settings['security']['suid_sgid_remove_from_unknown'] or dry_run: - # If the policy is a dry_run (e.g. complain only) or remove unknown - # suid/sgid bits then find all of the paths which have the suid/sgid - # bit set and then remove the whitelisted paths. - root_path = settings['environment']['root_path'] - unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist) - checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run)) - - return checks - - -def find_paths_with_suid_sgid(root_path): - """Finds all paths/files which have an suid/sgid bit enabled. - - Starting with the root_path, this will recursively find all paths which - have an suid or sgid bit set. - """ - cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000', - '-type', 'f', '!', '-path', '/proc/*', '-print'] - - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, _ = p.communicate() - return set(out.split('\n')) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py deleted file mode 100644 index 8a57d83d..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import platform -import re -import subprocess - -from charmhelpers.core.hookenv import ( - log, - INFO, - WARNING, -) -from charmhelpers.contrib.hardening import utils -from charmhelpers.contrib.hardening.audits.file import ( - FilePermissionAudit, - TemplatedFile, -) -from charmhelpers.contrib.hardening.host import TEMPLATES_DIR - - -SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s -net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s -net.ipv4.conf.all.rp_filter=1 -net.ipv4.conf.default.rp_filter=1 -net.ipv4.icmp_echo_ignore_broadcasts=1 -net.ipv4.icmp_ignore_bogus_error_responses=1 -net.ipv4.icmp_ratelimit=100 -net.ipv4.icmp_ratemask=88089 -net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s -net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s -net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s -net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s -net.ipv4.tcp_rfc1337=1 -net.ipv4.tcp_syncookies=1 -net.ipv4.conf.all.shared_media=1 -net.ipv4.conf.default.shared_media=1 -net.ipv4.conf.all.accept_source_route=0 -net.ipv4.conf.default.accept_source_route=0 -net.ipv4.conf.all.accept_redirects=0 -net.ipv4.conf.default.accept_redirects=0 -net.ipv6.conf.all.accept_redirects=0 -net.ipv6.conf.default.accept_redirects=0 -net.ipv4.conf.all.secure_redirects=0 -net.ipv4.conf.default.secure_redirects=0 -net.ipv4.conf.all.send_redirects=0 -net.ipv4.conf.default.send_redirects=0 -net.ipv4.conf.all.log_martians=0 -net.ipv6.conf.default.router_solicitations=0 -net.ipv6.conf.default.accept_ra_rtr_pref=0 -net.ipv6.conf.default.accept_ra_pinfo=0 -net.ipv6.conf.default.accept_ra_defrtr=0 -net.ipv6.conf.default.autoconf=0 -net.ipv6.conf.default.dad_transmits=0 -net.ipv6.conf.default.max_addresses=1 -net.ipv6.conf.all.accept_ra=0 -net.ipv6.conf.default.accept_ra=0 -kernel.modules_disabled=%(kernel_modules_disabled)s -kernel.sysrq=%(kernel_sysrq)s -fs.suid_dumpable=%(fs_suid_dumpable)s -kernel.randomize_va_space=2 -""" - - -def get_audits(): - """Get OS hardening sysctl audits. - - :returns: dictionary of audits - """ - audits = [] - settings = utils.get_settings('os') - - # Apply the sysctl settings which are configured to be applied. - audits.append(SysctlConf()) - # Make sure that only root has access to the sysctl.conf file, and - # that it is read-only. - audits.append(FilePermissionAudit('/etc/sysctl.conf', - user='root', - group='root', mode=0o0440)) - # If module loading is not enabled, then ensure that the modules - # file has the appropriate permissions and rebuild the initramfs - if not settings['security']['kernel_enable_module_loading']: - audits.append(ModulesTemplate()) - - return audits - - -class ModulesContext(object): - - def __call__(self): - settings = utils.get_settings('os') - with open('/proc/cpuinfo', 'r') as fd: - cpuinfo = fd.readlines() - - for line in cpuinfo: - match = re.search(r"^vendor_id\s+:\s+(.+)", line) - if match: - vendor = match.group(1) - - if vendor == "GenuineIntel": - vendor = "intel" - elif vendor == "AuthenticAMD": - vendor = "amd" - - ctxt = {'arch': platform.processor(), - 'cpuVendor': vendor, - 'desktop_enable': settings['general']['desktop_enable']} - - return ctxt - - -class ModulesTemplate(object): - - def __init__(self): - super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules', - ModulesContext(), - templates_dir=TEMPLATES_DIR, - user='root', group='root', - mode=0o0440) - - def post_write(self): - subprocess.check_call(['update-initramfs', '-u']) - - -class SysCtlHardeningContext(object): - def __call__(self): - settings = utils.get_settings('os') - ctxt = {'sysctl': {}} - - log("Applying sysctl settings", level=INFO) - extras = {'net_ipv4_ip_forward': 0, - 'net_ipv6_conf_all_forwarding': 0, - 'net_ipv6_conf_all_disable_ipv6': 1, - 'net_ipv4_tcp_timestamps': 0, - 'net_ipv4_conf_all_arp_ignore': 0, - 'net_ipv4_conf_all_arp_announce': 0, - 'kernel_sysrq': 0, - 'fs_suid_dumpable': 0, - 'kernel_modules_disabled': 1} - - if settings['sysctl']['ipv6_enable']: - extras['net_ipv6_conf_all_disable_ipv6'] = 0 - - if settings['sysctl']['forwarding']: - extras['net_ipv4_ip_forward'] = 1 - extras['net_ipv6_conf_all_forwarding'] = 1 - - if settings['sysctl']['arp_restricted']: - extras['net_ipv4_conf_all_arp_ignore'] = 1 - extras['net_ipv4_conf_all_arp_announce'] = 2 - - if settings['security']['kernel_enable_module_loading']: - extras['kernel_modules_disabled'] = 0 - - if settings['sysctl']['kernel_enable_sysrq']: - sysrq_val = settings['sysctl']['kernel_secure_sysrq'] - extras['kernel_sysrq'] = sysrq_val - - if settings['security']['kernel_enable_core_dump']: - extras['fs_suid_dumpable'] = 1 - - settings.update(extras) - for d in (SYSCTL_DEFAULTS % settings).split(): - d = d.strip().partition('=') - key = d[0].strip() - path = os.path.join('/proc/sys', key.replace('.', '/')) - if not os.path.exists(path): - log("Skipping '%s' since '%s' does not exist" % (key, path), - level=WARNING) - continue - - ctxt['sysctl'][key] = d[2] or None - - return { - 'sysctl_settings': [(k, v) for k, v in ctxt['sysctl'].items()] - } - - -class SysctlConf(TemplatedFile): - """An audit check for sysctl settings.""" - def __init__(self): - self.conffile = '/etc/sysctl.d/99-juju-hardening.conf' - super(SysctlConf, self).__init__(self.conffile, - SysCtlHardeningContext(), - template_dir=TEMPLATES_DIR, - user='root', group='root', - mode=0o0440) - - def post_write(self): - try: - subprocess.check_call(['sysctl', '-p', self.conffile]) - except subprocess.CalledProcessError as e: - # NOTE: on some systems if sysctl cannot apply all settings it - # will return non-zero as well. - log("sysctl command returned an error (maybe some " - "keys could not be set) - %s" % (e), - level=WARNING) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf deleted file mode 100644 index 0014191f..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf +++ /dev/null @@ -1,8 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -{% if disable_core_dump -%} -# Prevent core dumps for all users. These are usually only needed by developers and may contain sensitive information. -* hard core 0 -{% endif %} \ No newline at end of file diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh deleted file mode 100644 index 616cef46..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh +++ /dev/null @@ -1,5 +0,0 @@ -TMOUT={{ tmout }} -readonly TMOUT -export TMOUT - -readonly HISTFILE diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf deleted file mode 100644 index 101f1e1d..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf +++ /dev/null @@ -1,7 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -{% for key, value in sysctl_settings -%} -{{ key }}={{ value }} -{% endfor -%} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/login.defs b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/login.defs deleted file mode 100644 index 7d107637..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/login.defs +++ /dev/null @@ -1,349 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -# -# /etc/login.defs - Configuration control definitions for the login package. -# -# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH. -# If unspecified, some arbitrary (and possibly incorrect) value will -# be assumed. All other items are optional - if not specified then -# the described action or option will be inhibited. -# -# Comment lines (lines beginning with "#") and blank lines are ignored. -# -# Modified for Linux. --marekm - -# REQUIRED for useradd/userdel/usermod -# Directory where mailboxes reside, _or_ name of file, relative to the -# home directory. If you _do_ define MAIL_DIR and MAIL_FILE, -# MAIL_DIR takes precedence. -# -# Essentially: -# - MAIL_DIR defines the location of users mail spool files -# (for mbox use) by appending the username to MAIL_DIR as defined -# below. -# - MAIL_FILE defines the location of the users mail spool files as the -# fully-qualified filename obtained by prepending the user home -# directory before $MAIL_FILE -# -# NOTE: This is no more used for setting up users MAIL environment variable -# which is, starting from shadow 4.0.12-1 in Debian, entirely the -# job of the pam_mail PAM modules -# See default PAM configuration files provided for -# login, su, etc. -# -# This is a temporary situation: setting these variables will soon -# move to /etc/default/useradd and the variables will then be -# no more supported -MAIL_DIR /var/mail -#MAIL_FILE .mail - -# -# Enable logging and display of /var/log/faillog login failure info. -# This option conflicts with the pam_tally PAM module. -# -FAILLOG_ENAB yes - -# -# Enable display of unknown usernames when login failures are recorded. -# -# WARNING: Unknown usernames may become world readable. -# See #290803 and #298773 for details about how this could become a security -# concern -LOG_UNKFAIL_ENAB no - -# -# Enable logging of successful logins -# -LOG_OK_LOGINS yes - -# -# Enable "syslog" logging of su activity - in addition to sulog file logging. -# SYSLOG_SG_ENAB does the same for newgrp and sg. -# -SYSLOG_SU_ENAB yes -SYSLOG_SG_ENAB yes - -# -# If defined, all su activity is logged to this file. -# -#SULOG_FILE /var/log/sulog - -# -# If defined, file which maps tty line to TERM environment parameter. -# Each line of the file is in a format something like "vt100 tty01". -# -#TTYTYPE_FILE /etc/ttytype - -# -# If defined, login failures will be logged here in a utmp format -# last, when invoked as lastb, will read /var/log/btmp, so... -# -FTMP_FILE /var/log/btmp - -# -# If defined, the command name to display when running "su -". For -# example, if this is defined as "su" then a "ps" will display the -# command is "-su". If not defined, then "ps" would display the -# name of the shell actually being run, e.g. something like "-sh". -# -SU_NAME su - -# -# If defined, file which inhibits all the usual chatter during the login -# sequence. If a full pathname, then hushed mode will be enabled if the -# user's name or shell are found in the file. If not a full pathname, then -# hushed mode will be enabled if the file exists in the user's home directory. -# -HUSHLOGIN_FILE .hushlogin -#HUSHLOGIN_FILE /etc/hushlogins - -# -# *REQUIRED* The default PATH settings, for superuser and normal users. -# -# (they are minimal, add the rest in the shell startup files) -ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin{% if additional_user_paths %}{{ additional_user_paths }}{% endif %} - -# -# Terminal permissions -# -# TTYGROUP Login tty will be assigned this group ownership. -# TTYPERM Login tty will be set to this permission. -# -# If you have a "write" program which is "setgid" to a special group -# which owns the terminals, define TTYGROUP to the group number and -# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign -# TTYPERM to either 622 or 600. -# -# In Debian /usr/bin/bsd-write or similar programs are setgid tty -# However, the default and recommended value for TTYPERM is still 0600 -# to not allow anyone to write to anyone else console or terminal - -# Users can still allow other people to write them by issuing -# the "mesg y" command. - -TTYGROUP tty -TTYPERM 0600 - -# -# Login configuration initializations: -# -# ERASECHAR Terminal ERASE character ('\010' = backspace). -# KILLCHAR Terminal KILL character ('\025' = CTRL/U). -# UMASK Default "umask" value. -# -# The ERASECHAR and KILLCHAR are used only on System V machines. -# -# UMASK is the default umask value for pam_umask and is used by -# useradd and newusers to set the mode of the new home directories. -# 022 is the "historical" value in Debian for UMASK -# 027, or even 077, could be considered better for privacy -# There is no One True Answer here : each sysadmin must make up his/her -# mind. -# -# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value -# for private user groups, i. e. the uid is the same as gid, and username is -# the same as the primary group name: for these, the user permissions will be -# used as group permissions, e. g. 022 will become 002. -# -# Prefix these values with "0" to get octal, "0x" to get hexadecimal. -# -ERASECHAR 0177 -KILLCHAR 025 -UMASK {{ umask }} - -# Enable setting of the umask group bits to be the same as owner bits (examples: `022` -> `002`, `077` -> `007`) for non-root users, if the uid is the same as gid, and username is the same as the primary group name. -# If set to yes, userdel will remove the user´s group if it contains no more members, and useradd will create by default a group with the name of the user. -USERGROUPS_ENAB yes - -# -# Password aging controls: -# -# PASS_MAX_DAYS Maximum number of days a password may be used. -# PASS_MIN_DAYS Minimum number of days allowed between password changes. -# PASS_WARN_AGE Number of days warning given before a password expires. -# -PASS_MAX_DAYS {{ pwd_max_age }} -PASS_MIN_DAYS {{ pwd_min_age }} -PASS_WARN_AGE 7 - -# -# Min/max values for automatic uid selection in useradd -# -UID_MIN {{ uid_min }} -UID_MAX 60000 -# System accounts -SYS_UID_MIN {{ sys_uid_min }} -SYS_UID_MAX {{ sys_uid_max }} - -# Min/max values for automatic gid selection in groupadd -GID_MIN {{ gid_min }} -GID_MAX 60000 -# System accounts -SYS_GID_MIN {{ sys_gid_min }} -SYS_GID_MAX {{ sys_gid_max }} - -# -# Max number of login retries if password is bad. This will most likely be -# overridden by PAM, since the default pam_unix module has it's own built -# in of 3 retries. However, this is a safe fallback in case you are using -# an authentication module that does not enforce PAM_MAXTRIES. -# -LOGIN_RETRIES {{ login_retries }} - -# -# Max time in seconds for login -# -LOGIN_TIMEOUT {{ login_timeout }} - -# -# Which fields may be changed by regular users using chfn - use -# any combination of letters "frwh" (full name, room number, work -# phone, home phone). If not defined, no changes are allowed. -# For backward compatibility, "yes" = "rwh" and "no" = "frwh". -# -{% if chfn_restrict %} -CHFN_RESTRICT {{ chfn_restrict }} -{% endif %} - -# -# Should login be allowed if we can't cd to the home directory? -# Default in no. -# -DEFAULT_HOME {% if allow_login_without_home %} yes {% else %} no {% endif %} - -# -# If defined, this command is run when removing a user. -# It should remove any at/cron/print jobs etc. owned by -# the user to be removed (passed as the first argument). -# -#USERDEL_CMD /usr/sbin/userdel_local - -# -# Enable setting of the umask group bits to be the same as owner bits -# (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is -# the same as gid, and username is the same as the primary group name. -# -# If set to yes, userdel will remove the user´s group if it contains no -# more members, and useradd will create by default a group with the name -# of the user. -# -USERGROUPS_ENAB yes - -# -# Instead of the real user shell, the program specified by this parameter -# will be launched, although its visible name (argv[0]) will be the shell's. -# The program may do whatever it wants (logging, additional authentication, -# banner, ...) before running the actual shell. -# -# FAKE_SHELL /bin/fakeshell - -# -# If defined, either full pathname of a file containing device names or -# a ":" delimited list of device names. Root logins will be allowed only -# upon these devices. -# -# This variable is used by login and su. -# -#CONSOLE /etc/consoles -#CONSOLE console:tty01:tty02:tty03:tty04 - -# -# List of groups to add to the user's supplementary group set -# when logging in on the console (as determined by the CONSOLE -# setting). Default is none. -# -# Use with caution - it is possible for users to gain permanent -# access to these groups, even when not logged in on the console. -# How to do it is left as an exercise for the reader... -# -# This variable is used by login and su. -# -#CONSOLE_GROUPS floppy:audio:cdrom - -# -# If set to "yes", new passwords will be encrypted using the MD5-based -# algorithm compatible with the one used by recent releases of FreeBSD. -# It supports passwords of unlimited length and longer salt strings. -# Set to "no" if you need to copy encrypted passwords to other systems -# which don't understand the new algorithm. Default is "no". -# -# This variable is deprecated. You should use ENCRYPT_METHOD. -# -MD5_CRYPT_ENAB no - -# -# If set to MD5 , MD5-based algorithm will be used for encrypting password -# If set to SHA256, SHA256-based algorithm will be used for encrypting password -# If set to SHA512, SHA512-based algorithm will be used for encrypting password -# If set to DES, DES-based algorithm will be used for encrypting password (default) -# Overrides the MD5_CRYPT_ENAB option -# -# Note: It is recommended to use a value consistent with -# the PAM modules configuration. -# -ENCRYPT_METHOD SHA512 - -# -# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512. -# -# Define the number of SHA rounds. -# With a lot of rounds, it is more difficult to brute forcing the password. -# But note also that it more CPU resources will be needed to authenticate -# users. -# -# If not specified, the libc will choose the default number of rounds (5000). -# The values must be inside the 1000-999999999 range. -# If only one of the MIN or MAX values is set, then this value will be used. -# If MIN > MAX, the highest value will be used. -# -# SHA_CRYPT_MIN_ROUNDS 5000 -# SHA_CRYPT_MAX_ROUNDS 5000 - -################# OBSOLETED BY PAM ############## -# # -# These options are now handled by PAM. Please # -# edit the appropriate file in /etc/pam.d/ to # -# enable the equivelants of them. -# -############### - -#MOTD_FILE -#DIALUPS_CHECK_ENAB -#LASTLOG_ENAB -#MAIL_CHECK_ENAB -#OBSCURE_CHECKS_ENAB -#PORTTIME_CHECKS_ENAB -#SU_WHEEL_ONLY -#CRACKLIB_DICTPATH -#PASS_CHANGE_TRIES -#PASS_ALWAYS_WARN -#ENVIRON_FILE -#NOLOGINS_FILE -#ISSUE_FILE -#PASS_MIN_LEN -#PASS_MAX_LEN -#ULIMIT -#ENV_HZ -#CHFN_AUTH -#CHSH_AUTH -#FAIL_DELAY - -################# OBSOLETED ####################### -# # -# These options are no more handled by shadow. # -# # -# Shadow utilities will display a warning if they # -# still appear. # -# # -################################################### - -# CLOSE_SESSIONS -# LOGIN_STRING -# NO_PASSWORD_CONSOLE -# QMAIL_DIR - - - diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/modules b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/modules deleted file mode 100644 index ef0354ee..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/modules +++ /dev/null @@ -1,117 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -# /etc/modules: kernel modules to load at boot time. -# -# This file contains the names of kernel modules that should be loaded -# at boot time, one per line. Lines beginning with "#" are ignored. -# Parameters can be specified after the module name. - -# Arch -# ---- -# -# Modules for certains builds, contains support modules and some CPU-specific optimizations. - -{% if arch == "x86_64" -%} -# Optimize for x86_64 cryptographic features -twofish-x86_64-3way -twofish-x86_64 -aes-x86_64 -salsa20-x86_64 -blowfish-x86_64 -{% endif -%} - -{% if cpuVendor == "intel" -%} -# Intel-specific optimizations -ghash-clmulni-intel -aesni-intel -kvm-intel -{% endif -%} - -{% if cpuVendor == "amd" -%} -# AMD-specific optimizations -kvm-amd -{% endif -%} - -kvm - - -# Crypto -# ------ - -# Some core modules which comprise strong cryptography. -blowfish_common -blowfish_generic -ctr -cts -lrw -lzo -rmd160 -rmd256 -rmd320 -serpent -sha512_generic -twofish_common -twofish_generic -xts -zlib - - -# Drivers -# ------- - -# Basics -lp -rtc -loop - -# Filesystems -ext2 -btrfs - -{% if desktop_enable -%} -# Desktop -psmouse -snd -snd_ac97_codec -snd_intel8x0 -snd_page_alloc -snd_pcm -snd_timer -soundcore -usbhid -{% endif -%} - -# Lib -# --- -xz - - -# Net -# --- - -# All packets needed for netfilter rules (ie iptables, ebtables). -ip_tables -x_tables -iptable_filter -iptable_nat - -# Targets -ipt_LOG -ipt_REJECT - -# Modules -xt_connlimit -xt_tcpudp -xt_recent -xt_limit -xt_conntrack -nf_conntrack -nf_conntrack_ipv4 -nf_defrag_ipv4 -xt_state -nf_nat - -# Addons -xt_pknock \ No newline at end of file diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf deleted file mode 100644 index f98d14e5..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -Name: passwdqc password strength enforcement -Default: yes -Priority: 1024 -Conflicts: cracklib -Password-Type: Primary -Password: - requisite pam_passwdqc.so {{ auth_pam_passwdqc_options }} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh deleted file mode 100644 index fd2de791..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh +++ /dev/null @@ -1,8 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -# Disable core dumps via soft limits for all users. Compliance to this setting -# is voluntary and can be modified by users up to a hard limit. This setting is -# a sane default. -ulimit -S -c 0 > /dev/null 2>&1 diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/securetty b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/securetty deleted file mode 100644 index 15b18d4e..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/securetty +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -# A list of TTYs, from which root can log in -# see `man securetty` for reference -{% if ttys -%} -{% for tty in ttys -%} -{{ tty }} -{% endfor -%} -{% endif -%} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/tally2 b/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/tally2 deleted file mode 100644 index d9620299..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/host/templates/tally2 +++ /dev/null @@ -1,14 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -Name: tally2 lockout after failed attempts enforcement -Default: yes -Priority: 1024 -Conflicts: cracklib -Auth-Type: Primary -Auth-Initial: - required pam_tally2.so deny={{ auth_retries }} onerr=fail unlock_time={{ auth_lockout_time }} -Account-Type: Primary -Account-Initial: - required pam_tally2.so diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/__init__.py deleted file mode 100644 index 58bebd84..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path - -TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py deleted file mode 100644 index 1990d851..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.core.hookenv import ( - log, - DEBUG, -) -from charmhelpers.contrib.hardening.mysql.checks import config - - -def run_mysql_checks(): - log("Starting MySQL hardening checks.", level=DEBUG) - checks = config.get_audits() - for check in checks: - log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) - check.ensure_compliance() - - log("MySQL hardening checks complete.", level=DEBUG) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py deleted file mode 100644 index 8bf9f36c..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import subprocess - -from charmhelpers.core.hookenv import ( - log, - WARNING, -) -from charmhelpers.contrib.hardening.audits.file import ( - FilePermissionAudit, - DirectoryPermissionAudit, - TemplatedFile, -) -from charmhelpers.contrib.hardening.mysql import TEMPLATES_DIR -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get MySQL hardening config audits. - - :returns: dictionary of audits - """ - if subprocess.call(['which', 'mysql'], stdout=subprocess.PIPE) != 0: - log("MySQL does not appear to be installed on this node - " - "skipping mysql hardening", level=WARNING) - return [] - - settings = utils.get_settings('mysql') - hardening_settings = settings['hardening'] - my_cnf = hardening_settings['mysql-conf'] - - audits = [ - FilePermissionAudit(paths=[my_cnf], user='root', - group='root', mode=0o0600), - - TemplatedFile(hardening_settings['hardening-conf'], - MySQLConfContext(), - TEMPLATES_DIR, - mode=0o0750, - user='mysql', - group='root', - service_actions=[{'service': 'mysql', - 'actions': ['restart']}]), - - # MySQL and Percona charms do not allow configuration of the - # data directory, so use the default. - DirectoryPermissionAudit('/var/lib/mysql', - user='mysql', - group='mysql', - recursive=False, - mode=0o755), - - DirectoryPermissionAudit('/etc/mysql', - user='root', - group='root', - recursive=False, - mode=0o700), - ] - - return audits - - -class MySQLConfContext(object): - """Defines the set of key/value pairs to set in a mysql config file. - - This context, when called, will return a dictionary containing the - key/value pairs of setting to specify in the - /etc/mysql/conf.d/hardening.cnf file. - """ - def __call__(self): - settings = utils.get_settings('mysql') - return { - 'mysql_settings': [(k, v) for k, v in settings['security'].items()] - } diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf b/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf deleted file mode 100644 index 8242586c..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf +++ /dev/null @@ -1,12 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -[mysqld] -{% for setting, value in mysql_settings -%} -{% if value == 'True' -%} -{{ setting }} -{% elif value != 'None' and value != None -%} -{{ setting }} = {{ value }} -{% endif -%} -{% endfor -%} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/__init__.py deleted file mode 100644 index 58bebd84..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path - -TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py deleted file mode 100644 index edaf484b..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.core.hookenv import ( - log, - DEBUG, -) -from charmhelpers.contrib.hardening.ssh.checks import config - - -def run_ssh_checks(): - log("Starting SSH hardening checks.", level=DEBUG) - checks = config.get_audits() - for check in checks: - log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) - check.ensure_compliance() - - log("SSH hardening checks complete.", level=DEBUG) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py deleted file mode 100644 index 41bed2d1..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py +++ /dev/null @@ -1,435 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from charmhelpers.contrib.network.ip import ( - get_address_in_network, - get_iface_addr, - is_ip, -) -from charmhelpers.core.hookenv import ( - log, - DEBUG, -) -from charmhelpers.fetch import ( - apt_install, - apt_update, -) -from charmhelpers.core.host import ( - lsb_release, - CompareHostReleases, -) -from charmhelpers.contrib.hardening.audits.file import ( - TemplatedFile, - FileContentAudit, -) -from charmhelpers.contrib.hardening.ssh import TEMPLATES_DIR -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get SSH hardening config audits. - - :returns: dictionary of audits - """ - audits = [SSHConfig(), SSHDConfig(), SSHConfigFileContentAudit(), - SSHDConfigFileContentAudit()] - return audits - - -class SSHConfigContext(object): - - type = 'client' - - def get_macs(self, allow_weak_mac): - if allow_weak_mac: - weak_macs = 'weak' - else: - weak_macs = 'default' - - default = 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160' - macs = {'default': default, - 'weak': default + ',hmac-sha1'} - - default = ('hmac-sha2-512-etm@openssh.com,' - 'hmac-sha2-256-etm@openssh.com,' - 'hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,' - 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160') - macs_66 = {'default': default, - 'weak': default + ',hmac-sha1'} - - # Use newer ciphers on Ubuntu Trusty and above - _release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(_release) >= 'trusty': - log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG) - macs = macs_66 - - return macs[weak_macs] - - def get_kexs(self, allow_weak_kex): - if allow_weak_kex: - weak_kex = 'weak' - else: - weak_kex = 'default' - - default = 'diffie-hellman-group-exchange-sha256' - weak = (default + ',diffie-hellman-group14-sha1,' - 'diffie-hellman-group-exchange-sha1,' - 'diffie-hellman-group1-sha1') - kex = {'default': default, - 'weak': weak} - - default = ('curve25519-sha256@libssh.org,' - 'diffie-hellman-group-exchange-sha256') - weak = (default + ',diffie-hellman-group14-sha1,' - 'diffie-hellman-group-exchange-sha1,' - 'diffie-hellman-group1-sha1') - kex_66 = {'default': default, - 'weak': weak} - - # Use newer kex on Ubuntu Trusty and above - _release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(_release) >= 'trusty': - log('Detected Ubuntu 14.04 or newer, using new key exchange ' - 'algorithms', level=DEBUG) - kex = kex_66 - - return kex[weak_kex] - - def get_ciphers(self, cbc_required): - if cbc_required: - weak_ciphers = 'weak' - else: - weak_ciphers = 'default' - - default = 'aes256-ctr,aes192-ctr,aes128-ctr' - cipher = {'default': default, - 'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'} - - default = ('chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,' - 'aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr') - ciphers_66 = {'default': default, - 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'} - - # Use newer ciphers on ubuntu Trusty and above - _release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(_release) >= 'trusty': - log('Detected Ubuntu 14.04 or newer, using new ciphers', - level=DEBUG) - cipher = ciphers_66 - - return cipher[weak_ciphers] - - def get_listening(self, listen=['0.0.0.0']): - """Returns a list of addresses SSH can list on - - Turns input into a sensible list of IPs SSH can listen on. Input - must be a python list of interface names, IPs and/or CIDRs. - - :param listen: list of IPs, CIDRs, interface names - - :returns: list of IPs available on the host - """ - if listen == ['0.0.0.0']: - return listen - - value = [] - for network in listen: - try: - ip = get_address_in_network(network=network, fatal=True) - except ValueError: - if is_ip(network): - ip = network - else: - try: - ip = get_iface_addr(iface=network, fatal=False)[0] - except IndexError: - continue - value.append(ip) - if value == []: - return ['0.0.0.0'] - return value - - def __call__(self): - settings = utils.get_settings('ssh') - if settings['common']['network_ipv6_enable']: - addr_family = 'any' - else: - addr_family = 'inet' - - ctxt = { - 'addr_family': addr_family, - 'remote_hosts': settings['common']['remote_hosts'], - 'password_auth_allowed': - settings['client']['password_authentication'], - 'ports': settings['common']['ports'], - 'ciphers': self.get_ciphers(settings['client']['cbc_required']), - 'macs': self.get_macs(settings['client']['weak_hmac']), - 'kexs': self.get_kexs(settings['client']['weak_kex']), - 'roaming': settings['client']['roaming'], - } - return ctxt - - -class SSHConfig(TemplatedFile): - def __init__(self): - path = '/etc/ssh/ssh_config' - super(SSHConfig, self).__init__(path=path, - template_dir=TEMPLATES_DIR, - context=SSHConfigContext(), - user='root', - group='root', - mode=0o0644) - - def pre_write(self): - settings = utils.get_settings('ssh') - apt_update(fatal=True) - apt_install(settings['client']['package']) - if not os.path.exists('/etc/ssh'): - os.makedir('/etc/ssh') - # NOTE: don't recurse - utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, - maxdepth=0) - - def post_write(self): - # NOTE: don't recurse - utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, - maxdepth=0) - - -class SSHDConfigContext(SSHConfigContext): - - type = 'server' - - def __call__(self): - settings = utils.get_settings('ssh') - if settings['common']['network_ipv6_enable']: - addr_family = 'any' - else: - addr_family = 'inet' - - ctxt = { - 'ssh_ip': self.get_listening(settings['server']['listen_to']), - 'password_auth_allowed': - settings['server']['password_authentication'], - 'ports': settings['common']['ports'], - 'addr_family': addr_family, - 'ciphers': self.get_ciphers(settings['server']['cbc_required']), - 'macs': self.get_macs(settings['server']['weak_hmac']), - 'kexs': self.get_kexs(settings['server']['weak_kex']), - 'host_key_files': settings['server']['host_key_files'], - 'allow_root_with_key': settings['server']['allow_root_with_key'], - 'password_authentication': - settings['server']['password_authentication'], - 'use_priv_sep': settings['server']['use_privilege_separation'], - 'use_pam': settings['server']['use_pam'], - 'allow_x11_forwarding': settings['server']['allow_x11_forwarding'], - 'print_motd': settings['server']['print_motd'], - 'print_last_log': settings['server']['print_last_log'], - 'client_alive_interval': - settings['server']['alive_interval'], - 'client_alive_count': settings['server']['alive_count'], - 'allow_tcp_forwarding': settings['server']['allow_tcp_forwarding'], - 'allow_agent_forwarding': - settings['server']['allow_agent_forwarding'], - 'deny_users': settings['server']['deny_users'], - 'allow_users': settings['server']['allow_users'], - 'deny_groups': settings['server']['deny_groups'], - 'allow_groups': settings['server']['allow_groups'], - 'use_dns': settings['server']['use_dns'], - 'sftp_enable': settings['server']['sftp_enable'], - 'sftp_group': settings['server']['sftp_group'], - 'sftp_chroot': settings['server']['sftp_chroot'], - 'max_auth_tries': settings['server']['max_auth_tries'], - 'max_sessions': settings['server']['max_sessions'], - } - return ctxt - - -class SSHDConfig(TemplatedFile): - def __init__(self): - path = '/etc/ssh/sshd_config' - super(SSHDConfig, self).__init__(path=path, - template_dir=TEMPLATES_DIR, - context=SSHDConfigContext(), - user='root', - group='root', - mode=0o0600, - service_actions=[{'service': 'ssh', - 'actions': - ['restart']}]) - - def pre_write(self): - settings = utils.get_settings('ssh') - apt_update(fatal=True) - apt_install(settings['server']['package']) - if not os.path.exists('/etc/ssh'): - os.makedir('/etc/ssh') - # NOTE: don't recurse - utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, - maxdepth=0) - - def post_write(self): - # NOTE: don't recurse - utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, - maxdepth=0) - - -class SSHConfigFileContentAudit(FileContentAudit): - def __init__(self): - self.path = '/etc/ssh/ssh_config' - super(SSHConfigFileContentAudit, self).__init__(self.path, {}) - - def is_compliant(self, *args, **kwargs): - self.pass_cases = [] - self.fail_cases = [] - settings = utils.get_settings('ssh') - - _release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(_release) >= 'trusty': - if not settings['server']['weak_hmac']: - self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') - else: - self.pass_cases.append(r'^MACs.+,hmac-sha1$') - - if settings['server']['weak_kex']: - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa - else: - self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa - self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa - - if settings['server']['cbc_required']: - self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - else: - self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa - self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') - self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - else: - if not settings['client']['weak_hmac']: - self.fail_cases.append(r'^MACs.+,hmac-sha1$') - else: - self.pass_cases.append(r'^MACs.+,hmac-sha1$') - - if settings['client']['weak_kex']: - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa - else: - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa - - if settings['client']['cbc_required']: - self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - else: - self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - - if settings['client']['roaming']: - self.pass_cases.append(r'^UseRoaming yes$') - else: - self.fail_cases.append(r'^UseRoaming yes$') - - return super(SSHConfigFileContentAudit, self).is_compliant(*args, - **kwargs) - - -class SSHDConfigFileContentAudit(FileContentAudit): - def __init__(self): - self.path = '/etc/ssh/sshd_config' - super(SSHDConfigFileContentAudit, self).__init__(self.path, {}) - - def is_compliant(self, *args, **kwargs): - self.pass_cases = [] - self.fail_cases = [] - settings = utils.get_settings('ssh') - - _release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(_release) >= 'trusty': - if not settings['server']['weak_hmac']: - self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') - else: - self.pass_cases.append(r'^MACs.+,hmac-sha1$') - - if settings['server']['weak_kex']: - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa - else: - self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa - self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa - - if settings['server']['cbc_required']: - self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - else: - self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa - self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') - self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - else: - if not settings['server']['weak_hmac']: - self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') - else: - self.pass_cases.append(r'^MACs.+,hmac-sha1$') - - if settings['server']['weak_kex']: - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa - else: - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa - - if settings['server']['cbc_required']: - self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - else: - self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - - if settings['server']['sftp_enable']: - self.pass_cases.append(r'^Subsystem\ssftp') - else: - self.fail_cases.append(r'^Subsystem\ssftp') - - return super(SSHDConfigFileContentAudit, self).is_compliant(*args, - **kwargs) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config deleted file mode 100644 index 9742d8e2..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config +++ /dev/null @@ -1,70 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -# This is the ssh client system-wide configuration file. See -# ssh_config(5) for more information. This file provides defaults for -# users, and the values can be changed in per-user configuration files -# or on the command line. - -# Configuration data is parsed as follows: -# 1. command line options -# 2. user-specific file -# 3. system-wide file -# Any configuration value is only changed the first time it is set. -# Thus, host-specific definitions should be at the beginning of the -# configuration file, and defaults at the end. - -# Site-wide defaults for some commonly used options. For a comprehensive -# list of available options, their meanings and defaults, please see the -# ssh_config(5) man page. - -# Restrict the following configuration to be limited to this Host. -{% if remote_hosts -%} -Host {{ ' '.join(remote_hosts) }} -{% endif %} -ForwardAgent no -ForwardX11 no -ForwardX11Trusted yes -RhostsRSAAuthentication no -RSAAuthentication yes -PasswordAuthentication {{ password_auth_allowed }} -HostbasedAuthentication no -GSSAPIAuthentication no -GSSAPIDelegateCredentials no -GSSAPIKeyExchange no -GSSAPITrustDNS no -BatchMode no -CheckHostIP yes -AddressFamily {{ addr_family }} -ConnectTimeout 0 -StrictHostKeyChecking ask -IdentityFile ~/.ssh/identity -IdentityFile ~/.ssh/id_rsa -IdentityFile ~/.ssh/id_dsa -# The port at the destination should be defined -{% for port in ports -%} -Port {{ port }} -{% endfor %} -Protocol 2 -Cipher 3des -{% if ciphers -%} -Ciphers {{ ciphers }} -{%- endif %} -{% if macs -%} -MACs {{ macs }} -{%- endif %} -{% if kexs -%} -KexAlgorithms {{ kexs }} -{%- endif %} -EscapeChar ~ -Tunnel no -TunnelDevice any:any -PermitLocalCommand no -VisualHostKey no -RekeyLimit 1G 1h -SendEnv LANG LC_* -HashKnownHosts yes -{% if roaming -%} -UseRoaming {{ roaming }} -{% endif %} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config b/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config deleted file mode 100644 index 5f87298a..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config +++ /dev/null @@ -1,159 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -# Package generated configuration file -# See the sshd_config(5) manpage for details - -# What ports, IPs and protocols we listen for -{% for port in ports -%} -Port {{ port }} -{% endfor -%} -AddressFamily {{ addr_family }} -# Use these options to restrict which interfaces/protocols sshd will bind to -{% if ssh_ip -%} -{% for ip in ssh_ip -%} -ListenAddress {{ ip }} -{% endfor %} -{%- else -%} -ListenAddress :: -ListenAddress 0.0.0.0 -{% endif -%} -Protocol 2 -{% if ciphers -%} -Ciphers {{ ciphers }} -{% endif -%} -{% if macs -%} -MACs {{ macs }} -{% endif -%} -{% if kexs -%} -KexAlgorithms {{ kexs }} -{% endif -%} -# HostKeys for protocol version 2 -{% for keyfile in host_key_files -%} -HostKey {{ keyfile }} -{% endfor -%} - -# Privilege Separation is turned on for security -{% if use_priv_sep -%} -UsePrivilegeSeparation {{ use_priv_sep }} -{% endif -%} - -# Lifetime and size of ephemeral version 1 server key -KeyRegenerationInterval 3600 -ServerKeyBits 1024 - -# Logging -SyslogFacility AUTH -LogLevel VERBOSE - -# Authentication: -LoginGraceTime 30s -{% if allow_root_with_key -%} -PermitRootLogin without-password -{% else -%} -PermitRootLogin no -{% endif %} -PermitTunnel no -PermitUserEnvironment no -StrictModes yes - -RSAAuthentication yes -PubkeyAuthentication yes -AuthorizedKeysFile %h/.ssh/authorized_keys - -# Don't read the user's ~/.rhosts and ~/.shosts files -IgnoreRhosts yes -# For this to work you will also need host keys in /etc/ssh_known_hosts -RhostsRSAAuthentication no -# similar for protocol version 2 -HostbasedAuthentication no -# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication -IgnoreUserKnownHosts yes - -# To enable empty passwords, change to yes (NOT RECOMMENDED) -PermitEmptyPasswords no - -# Change to yes to enable challenge-response passwords (beware issues with -# some PAM modules and threads) -ChallengeResponseAuthentication no - -# Change to no to disable tunnelled clear text passwords -PasswordAuthentication {{ password_authentication }} - -# Kerberos options -KerberosAuthentication no -KerberosGetAFSToken no -KerberosOrLocalPasswd no -KerberosTicketCleanup yes - -# GSSAPI options -GSSAPIAuthentication no -GSSAPICleanupCredentials yes - -X11Forwarding {{ allow_x11_forwarding }} -X11DisplayOffset 10 -X11UseLocalhost yes -GatewayPorts no -PrintMotd {{ print_motd }} -PrintLastLog {{ print_last_log }} -TCPKeepAlive no -UseLogin no - -ClientAliveInterval {{ client_alive_interval }} -ClientAliveCountMax {{ client_alive_count }} -AllowTcpForwarding {{ allow_tcp_forwarding }} -AllowAgentForwarding {{ allow_agent_forwarding }} - -MaxStartups 10:30:100 -#Banner /etc/issue.net - -# Allow client to pass locale environment variables -AcceptEnv LANG LC_* - -# Set this to 'yes' to enable PAM authentication, account processing, -# and session processing. If this is enabled, PAM authentication will -# be allowed through the ChallengeResponseAuthentication and -# PasswordAuthentication. Depending on your PAM configuration, -# PAM authentication via ChallengeResponseAuthentication may bypass -# the setting of "PermitRootLogin without-password". -# If you just want the PAM account and session checks to run without -# PAM authentication, then enable this but set PasswordAuthentication -# and ChallengeResponseAuthentication to 'no'. -UsePAM {{ use_pam }} - -{% if deny_users -%} -DenyUsers {{ deny_users }} -{% endif -%} -{% if allow_users -%} -AllowUsers {{ allow_users }} -{% endif -%} -{% if deny_groups -%} -DenyGroups {{ deny_groups }} -{% endif -%} -{% if allow_groups -%} -AllowGroups allow_groups -{% endif -%} -UseDNS {{ use_dns }} -MaxAuthTries {{ max_auth_tries }} -MaxSessions {{ max_sessions }} - -{% if sftp_enable -%} -# Configuration, in case SFTP is used -## override default of no subsystems -## Subsystem sftp /opt/app/openssh5/libexec/sftp-server -Subsystem sftp internal-sftp -l VERBOSE - -## These lines must appear at the *end* of sshd_config -Match Group {{ sftp_group }} -ForceCommand internal-sftp -l VERBOSE -ChrootDirectory {{ sftp_chroot }} -{% else -%} -# Configuration, in case SFTP is used -## override default of no subsystems -## Subsystem sftp /opt/app/openssh5/libexec/sftp-server -## These lines must appear at the *end* of sshd_config -Match Group sftponly -ForceCommand internal-sftp -l VERBOSE -ChrootDirectory /sftpchroot/home/%u -{% endif %} diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py deleted file mode 100644 index 4dee5465..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/templating.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - WARNING, -) - -try: - from jinja2 import FileSystemLoader, Environment -except ImportError: - from charmhelpers.fetch import apt_install - from charmhelpers.fetch import apt_update - apt_update(fatal=True) - apt_install('python3-jinja2', fatal=True) - from jinja2 import FileSystemLoader, Environment - - -# NOTE: function separated from main rendering code to facilitate easier -# mocking in unit tests. -def write(path, data): - with open(path, 'wb') as out: - out.write(data) - - -def get_template_path(template_dir, path): - """Returns the template file which would be used to render the path. - - The path to the template file is returned. - :param template_dir: the directory the templates are located in - :param path: the file path to be written to. - :returns: path to the template file - """ - return os.path.join(template_dir, os.path.basename(path)) - - -def render_and_write(template_dir, path, context): - """Renders the specified template into the file. - - :param template_dir: the directory to load the template from - :param path: the path to write the templated contents to - :param context: the parameters to pass to the rendering engine - """ - env = Environment(loader=FileSystemLoader(template_dir)) - template_file = os.path.basename(path) - template = env.get_template(template_file) - log('Rendering from template: %s' % template.name, level=DEBUG) - rendered_content = template.render(context) - if not rendered_content: - log("Render returned None - skipping '%s'" % path, - level=WARNING) - return - - write(path, rendered_content.encode('utf-8').strip()) - log('Wrote template %s' % path, level=DEBUG) diff --git a/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py b/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py deleted file mode 100644 index f93851a9..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/hardening/utils.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2016-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import glob -import grp -import os -import pwd -import yaml - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - INFO, - WARNING, - ERROR, -) - - -# Global settings cache. Since each hook fire entails a fresh module import it -# is safe to hold this in memory and not risk missing config changes (since -# they will result in a new hook fire and thus re-import). -__SETTINGS__ = {} - - -def _get_defaults(modules): - """Load the default config for the provided modules. - - :param modules: stack modules config defaults to lookup. - :returns: modules default config dictionary. - """ - default = os.path.join(os.path.dirname(__file__), - 'defaults/%s.yaml' % (modules)) - return yaml.safe_load(open(default)) - - -def _get_schema(modules): - """Load the config schema for the provided modules. - - NOTE: this schema is intended to have 1-1 relationship with they keys in - the default config and is used a means to verify valid overrides provided - by the user. - - :param modules: stack modules config schema to lookup. - :returns: modules default schema dictionary. - """ - schema = os.path.join(os.path.dirname(__file__), - 'defaults/%s.yaml.schema' % (modules)) - return yaml.safe_load(open(schema)) - - -def _get_user_provided_overrides(modules): - """Load user-provided config overrides. - - :param modules: stack modules to lookup in user overrides yaml file. - :returns: overrides dictionary. - """ - overrides = os.path.join(os.environ['JUJU_CHARM_DIR'], - 'hardening.yaml') - if os.path.exists(overrides): - log("Found user-provided config overrides file '%s'" % - (overrides), level=DEBUG) - settings = yaml.safe_load(open(overrides)) - if settings and settings.get(modules): - log("Applying '%s' overrides" % (modules), level=DEBUG) - return settings.get(modules) - - log("No overrides found for '%s'" % (modules), level=DEBUG) - else: - log("No hardening config overrides file '%s' found in charm " - "root dir" % (overrides), level=DEBUG) - - return {} - - -def _apply_overrides(settings, overrides, schema): - """Get overrides config overlaid onto modules defaults. - - :param modules: require stack modules config. - :returns: dictionary of modules config with user overrides applied. - """ - if overrides: - for k, v in overrides.items(): - if k in schema: - if schema[k] is None: - settings[k] = v - elif type(schema[k]) is dict: - settings[k] = _apply_overrides(settings[k], overrides[k], - schema[k]) - else: - raise Exception("Unexpected type found in schema '%s'" % - type(schema[k]), level=ERROR) - else: - log("Unknown override key '%s' - ignoring" % (k), level=INFO) - - return settings - - -def get_settings(modules): - global __SETTINGS__ - if modules in __SETTINGS__: - return __SETTINGS__[modules] - - schema = _get_schema(modules) - settings = _get_defaults(modules) - overrides = _get_user_provided_overrides(modules) - __SETTINGS__[modules] = _apply_overrides(settings, overrides, schema) - return __SETTINGS__[modules] - - -def ensure_permissions(path, user, group, permissions, maxdepth=-1): - """Ensure permissions for path. - - If path is a file, apply to file and return. If path is a directory, - apply recursively (if required) to directory contents and return. - - :param user: user name - :param group: group name - :param permissions: octal permissions - :param maxdepth: maximum recursion depth. A negative maxdepth allows - infinite recursion and maxdepth=0 means no recursion. - :returns: None - """ - if not os.path.exists(path): - log("File '%s' does not exist - cannot set permissions" % (path), - level=WARNING) - return - - _user = pwd.getpwnam(user) - os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid) - os.chmod(path, permissions) - - if maxdepth == 0: - log("Max recursion depth reached - skipping further recursion", - level=DEBUG) - return - elif maxdepth > 0: - maxdepth -= 1 - - if os.path.isdir(path): - contents = glob.glob("%s/*" % (path)) - for c in contents: - ensure_permissions(c, user=user, group=group, - permissions=permissions, maxdepth=maxdepth) diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/network/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/network/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py b/ceph-mon/hooks/charmhelpers/contrib/network/ip.py deleted file mode 100644 index de56584d..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/network/ip.py +++ /dev/null @@ -1,590 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import glob -import re -import subprocess -import socket - -from functools import partial - -from charmhelpers.fetch import apt_install, apt_update -from charmhelpers.core.hookenv import ( - config, - log, - network_get_primary_address, - unit_get, - WARNING, - NoNetworkBinding, -) - -from charmhelpers.core.host import ( - lsb_release, - CompareHostReleases, -) - -try: - import netifaces -except ImportError: - apt_update(fatal=True) - apt_install('python3-netifaces', fatal=True) - import netifaces - -try: - import netaddr -except ImportError: - apt_update(fatal=True) - apt_install('python3-netaddr', fatal=True) - import netaddr - - -def _validate_cidr(network): - try: - netaddr.IPNetwork(network) - except (netaddr.core.AddrFormatError, ValueError): - raise ValueError("Network (%s) is not in CIDR presentation format" % - network) - - -def no_ip_found_error_out(network): - errmsg = ("No IP address found in network(s): %s" % network) - raise ValueError(errmsg) - - -def _get_ipv6_network_from_address(address): - """Get an netaddr.IPNetwork for the given IPv6 address - :param address: a dict as returned by netifaces.ifaddresses - :returns netaddr.IPNetwork: None if the address is a link local or loopback - address - """ - if address['addr'].startswith('fe80') or address['addr'] == "::1": - return None - - prefix = address['netmask'].split("/") - if len(prefix) > 1: - netmask = prefix[1] - else: - netmask = address['netmask'] - return netaddr.IPNetwork("%s/%s" % (address['addr'], - netmask)) - - -def get_address_in_network(network, fallback=None, fatal=False): - """Get an IPv4 or IPv6 address within the network from the host. - - :param network (str): CIDR presentation format. For example, - '192.168.1.0/24'. Supports multiple networks as a space-delimited list. - :param fallback (str): If no address is found, return fallback. - :param fatal (boolean): If no address is found, fallback is not - set and fatal is True then exit(1). - """ - if network is None: - if fallback is not None: - return fallback - - if fatal: - no_ip_found_error_out(network) - else: - return None - - networks = network.split() or [network] - for network in networks: - _validate_cidr(network) - network = netaddr.IPNetwork(network) - for iface in netifaces.interfaces(): - try: - addresses = netifaces.ifaddresses(iface) - except ValueError: - # If an instance was deleted between - # netifaces.interfaces() run and now, its interfaces are gone - continue - if network.version == 4 and netifaces.AF_INET in addresses: - for addr in addresses[netifaces.AF_INET]: - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - if cidr in network: - return str(cidr.ip) - - if network.version == 6 and netifaces.AF_INET6 in addresses: - for addr in addresses[netifaces.AF_INET6]: - cidr = _get_ipv6_network_from_address(addr) - if cidr and cidr in network: - return str(cidr.ip) - - if fallback is not None: - return fallback - - if fatal: - no_ip_found_error_out(network) - - return None - - -def is_ipv6(address): - """Determine whether provided address is IPv6 or not.""" - try: - address = netaddr.IPAddress(address) - except netaddr.AddrFormatError: - # probably a hostname - so not an address at all! - return False - - return address.version == 6 - - -def is_address_in_network(network, address): - """ - Determine whether the provided address is within a network range. - - :param network (str): CIDR presentation format. For example, - '192.168.1.0/24'. - :param address: An individual IPv4 or IPv6 address without a net - mask or subnet prefix. For example, '192.168.1.1'. - :returns boolean: Flag indicating whether address is in network. - """ - try: - network = netaddr.IPNetwork(network) - except (netaddr.core.AddrFormatError, ValueError): - raise ValueError("Network (%s) is not in CIDR presentation format" % - network) - - try: - address = netaddr.IPAddress(address) - except (netaddr.core.AddrFormatError, ValueError): - raise ValueError("Address (%s) is not in correct presentation format" % - address) - - if address in network: - return True - else: - return False - - -def _get_for_address(address, key): - """Retrieve an attribute of or the physical interface that - the IP address provided could be bound to. - - :param address (str): An individual IPv4 or IPv6 address without a net - mask or subnet prefix. For example, '192.168.1.1'. - :param key: 'iface' for the physical interface name or an attribute - of the configured interface, for example 'netmask'. - :returns str: Requested attribute or None if address is not bindable. - """ - address = netaddr.IPAddress(address) - for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) - if address.version == 4 and netifaces.AF_INET in addresses: - addr = addresses[netifaces.AF_INET][0]['addr'] - netmask = addresses[netifaces.AF_INET][0]['netmask'] - network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - cidr = network.cidr - if address in cidr: - if key == 'iface': - return iface - else: - return addresses[netifaces.AF_INET][0][key] - - if address.version == 6 and netifaces.AF_INET6 in addresses: - for addr in addresses[netifaces.AF_INET6]: - network = _get_ipv6_network_from_address(addr) - if not network: - continue - - cidr = network.cidr - if address in cidr: - if key == 'iface': - return iface - elif key == 'netmask' and cidr: - return str(cidr).split('/')[1] - else: - return addr[key] - return None - - -get_iface_for_address = partial(_get_for_address, key='iface') - - -get_netmask_for_address = partial(_get_for_address, key='netmask') - - -def resolve_network_cidr(ip_address): - ''' - Resolves the full address cidr of an ip_address based on - configured network interfaces - ''' - netmask = get_netmask_for_address(ip_address) - return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr) - - -def format_ipv6_addr(address): - """If address is IPv6, wrap it in '[]' otherwise return None. - - This is required by most configuration files when specifying IPv6 - addresses. - """ - if is_ipv6(address): - return "[%s]" % address - - return None - - -def is_ipv6_disabled(): - try: - result = subprocess.check_output( - ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], - stderr=subprocess.STDOUT, - universal_newlines=True) - except subprocess.CalledProcessError: - return True - - return "net.ipv6.conf.all.disable_ipv6 = 1" in result - - -def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, - fatal=True, exc_list=None): - """Return the assigned IP address for a given interface, if any. - - :param iface: network interface on which address(es) are expected to - be found. - :param inet_type: inet address family - :param inc_aliases: include alias interfaces in search - :param fatal: if True, raise exception if address not found - :param exc_list: list of addresses to ignore - :return: list of ip addresses - """ - # Extract nic if passed /dev/ethX - if '/' in iface: - iface = iface.split('/')[-1] - - if not exc_list: - exc_list = [] - - try: - inet_num = getattr(netifaces, inet_type) - except AttributeError: - raise Exception("Unknown inet type '%s'" % str(inet_type)) - - interfaces = netifaces.interfaces() - if inc_aliases: - ifaces = [] - for _iface in interfaces: - if iface == _iface or _iface.split(':')[0] == iface: - ifaces.append(_iface) - - if fatal and not ifaces: - raise Exception("Invalid interface '%s'" % iface) - - ifaces.sort() - else: - if iface not in interfaces: - if fatal: - raise Exception("Interface '%s' not found " % (iface)) - else: - return [] - - else: - ifaces = [iface] - - addresses = [] - for netiface in ifaces: - net_info = netifaces.ifaddresses(netiface) - if inet_num in net_info: - for entry in net_info[inet_num]: - if 'addr' in entry and entry['addr'] not in exc_list: - addresses.append(entry['addr']) - - if fatal and not addresses: - raise Exception("Interface '%s' doesn't have any %s addresses." % - (iface, inet_type)) - - return sorted(addresses) - - -get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') - - -def get_iface_from_addr(addr): - """Work out on which interface the provided address is configured.""" - for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) - for inet_type in addresses: - for _addr in addresses[inet_type]: - _addr = _addr['addr'] - # link local - ll_key = re.compile("(.+)%.*") - raw = re.match(ll_key, _addr) - if raw: - _addr = raw.group(1) - - if _addr == addr: - log("Address '%s' is configured on iface '%s'" % - (addr, iface)) - return iface - - msg = "Unable to infer net iface on which '%s' is configured" % (addr) - raise Exception(msg) - - -def sniff_iface(f): - """Ensure decorated function is called with a value for iface. - - If no iface provided, inject net iface inferred from unit private address. - """ - def iface_sniffer(*args, **kwargs): - if not kwargs.get('iface', None): - kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) - - return f(*args, **kwargs) - - return iface_sniffer - - -@sniff_iface -def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, - dynamic_only=True): - """Get assigned IPv6 address for a given interface. - - Returns list of addresses found. If no address found, returns empty list. - - If iface is None, we infer the current primary interface by doing a reverse - lookup on the unit private-address. - - We currently only support scope global IPv6 addresses i.e. non-temporary - addresses. If no global IPv6 address is found, return the first one found - in the ipv6 address list. - - :param iface: network interface on which ipv6 address(es) are expected to - be found. - :param inc_aliases: include alias interfaces in search - :param fatal: if True, raise exception if address not found - :param exc_list: list of addresses to ignore - :param dynamic_only: only recognise dynamic addresses - :return: list of ipv6 addresses - """ - addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', - inc_aliases=inc_aliases, fatal=fatal, - exc_list=exc_list) - - if addresses: - global_addrs = [] - for addr in addresses: - key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") - m = re.match(key_scope_link_local, addr) - if m: - eui_64_mac = m.group(1) - iface = m.group(2) - else: - global_addrs.append(addr) - - if global_addrs: - # Make sure any found global addresses are not temporary - cmd = ['ip', 'addr', 'show', iface] - out = subprocess.check_output( - cmd).decode('UTF-8', errors='replace') - if dynamic_only: - key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") - else: - key = re.compile("inet6 (.+)/[0-9]+ scope global.*") - - addrs = [] - for line in out.split('\n'): - line = line.strip() - m = re.match(key, line) - if m and 'temporary' not in line: - # Return the first valid address we find - for addr in global_addrs: - if m.group(1) == addr: - if not dynamic_only or \ - m.group(1).endswith(eui_64_mac): - addrs.append(addr) - - if addrs: - return addrs - - if fatal: - raise Exception("Interface '%s' does not have a scope global " - "non-temporary ipv6 address." % iface) - - return [] - - -def get_bridges(vnic_dir='/sys/devices/virtual/net'): - """Return a list of bridges on the system.""" - b_regex = "%s/*/bridge" % vnic_dir - return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] - - -def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): - """Return a list of nics comprising a given bridge on the system.""" - brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) - return [x.split('/')[-1] for x in glob.glob(brif_regex)] - - -def is_bridge_member(nic): - """Check if a given nic is a member of a bridge.""" - for bridge in get_bridges(): - if nic in get_bridge_nics(bridge): - return True - - return False - - -def is_ip(address): - """ - Returns True if address is a valid IP address. - """ - try: - # Test to see if already an IPv4/IPv6 address - address = netaddr.IPAddress(address) - return True - except (netaddr.AddrFormatError, ValueError): - return False - - -def ns_query(address): - try: - import dns.resolver - except ImportError: - apt_install('python3-dnspython', fatal=True) - import dns.resolver - - if isinstance(address, dns.name.Name): - rtype = 'PTR' - elif isinstance(address, str): - rtype = 'A' - else: - return None - - try: - answers = dns.resolver.query(address, rtype) - except dns.resolver.NXDOMAIN: - return None - - if answers: - return str(answers[0]) - return None - - -def get_host_ip(hostname, fallback=None): - """ - Resolves the IP for a given hostname, or returns - the input if it is already an IP. - """ - if is_ip(hostname): - return hostname - - ip_addr = ns_query(hostname) - if not ip_addr: - try: - ip_addr = socket.gethostbyname(hostname) - except Exception: - log("Failed to resolve hostname '%s'" % (hostname), - level=WARNING) - return fallback - return ip_addr - - -def get_hostname(address, fqdn=True): - """ - Resolves hostname for given IP, or returns the input - if it is already a hostname. - """ - if is_ip(address): - try: - import dns.reversename - except ImportError: - apt_install("python3-dnspython", fatal=True) - import dns.reversename - - rev = dns.reversename.from_address(address) - result = ns_query(rev) - - if not result: - try: - result = socket.gethostbyaddr(address)[0] - except Exception: - return None - else: - result = address - - if fqdn: - # strip trailing . - if result.endswith('.'): - return result[:-1] - else: - return result - else: - return result.split('.')[0] - - -def port_has_listener(address, port): - """ - Returns True if the address:port is open and being listened to, - else False. - - @param address: an IP address or hostname - @param port: integer port - - Note calls 'zc' via a subprocess shell - """ - cmd = ['nc', '-z', address, str(port)] - result = subprocess.call(cmd) - return not(bool(result)) - - -def assert_charm_supports_ipv6(): - """Check whether we are able to support charms ipv6.""" - release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(release) < "trusty": - raise Exception("IPv6 is not supported in the charms for Ubuntu " - "versions less than Trusty 14.04") - - -def get_relation_ip(interface, cidr_network=None): - """Return this unit's IP for the given interface. - - Allow for an arbitrary interface to use with network-get to select an IP. - Handle all address selection options including passed cidr network and - IPv6. - - Usage: get_relation_ip('amqp', cidr_network='10.0.0.0/8') - - @param interface: string name of the relation. - @param cidr_network: string CIDR Network to select an address from. - @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. - @returns IPv6 or IPv4 address - """ - # Select the interface address first - # For possible use as a fallback below with get_address_in_network - try: - # Get the interface specific IP - address = network_get_primary_address(interface) - except NotImplementedError: - # If network-get is not available - address = get_host_ip(unit_get('private-address')) - except NoNetworkBinding: - log("No network binding for {}".format(interface), WARNING) - address = get_host_ip(unit_get('private-address')) - - if config('prefer-ipv6'): - # Currently IPv6 has priority, eventually we want IPv6 to just be - # another network space. - assert_charm_supports_ipv6() - return get_ipv6_addr()[0] - elif cidr_network: - # If a specific CIDR network is passed get the address from that - # network. - return get_address_in_network(cidr_network, address) - - # Return the interface address - return address diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py deleted file mode 100644 index 547de09c..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/alternatives.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -''' Helper for managing alternatives for file conflict resolution ''' - -import subprocess -import shutil -import os - - -def install_alternative(name, target, source, priority=50): - ''' Install alternative configuration ''' - if (os.path.exists(target) and not os.path.islink(target)): - # Move existing file/directory away before installing - shutil.move(target, '{}.bak'.format(target)) - cmd = [ - 'update-alternatives', '--force', '--install', - target, name, source, str(priority) - ] - subprocess.check_call(cmd) - - -def remove_alternative(name, source): - """Remove an installed alternative configuration file - - :param name: string name of the alternative to remove - :param source: string full path to alternative to remove - """ - cmd = [ - 'update-alternatives', '--remove', - name, source - ] - subprocess.check_call(cmd) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/__init__.py deleted file mode 100644 index 7f7e5f79..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/__init__.py +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright 2019 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""OpenStack Security Audit code""" - -import collections -from enum import Enum -import traceback - -from charmhelpers.core.host import cmp_pkgrevno -import charmhelpers.contrib.openstack.utils as openstack_utils -import charmhelpers.core.hookenv as hookenv - - -class AuditType(Enum): - OpenStackSecurityGuide = 1 - - -_audits = {} - -Audit = collections.namedtuple('Audit', 'func filters') - - -def audit(*args): - """Decorator to register an audit. - - These are used to generate audits that can be run on a - deployed system that matches the given configuration - - :param args: List of functions to filter tests against - :type args: List[Callable[Dict]] - """ - def wrapper(f): - test_name = f.__name__ - if _audits.get(test_name): - raise RuntimeError( - "Test name '{}' used more than once" - .format(test_name)) - non_callables = [fn for fn in args if not callable(fn)] - if non_callables: - raise RuntimeError( - "Configuration includes non-callable filters: {}" - .format(non_callables)) - _audits[test_name] = Audit(func=f, filters=args) - return f - return wrapper - - -def is_audit_type(*args): - """This audit is included in the specified kinds of audits. - - :param *args: List of AuditTypes to include this audit in - :type args: List[AuditType] - :rtype: Callable[Dict] - """ - def _is_audit_type(audit_options): - if audit_options.get('audit_type') in args: - return True - else: - return False - return _is_audit_type - - -def since_package(pkg, pkg_version): - """This audit should be run after the specified package version (incl). - - :param pkg: Package name to compare - :type pkg: str - :param release: The package version - :type release: str - :rtype: Callable[Dict] - """ - def _since_package(audit_options=None): - return cmp_pkgrevno(pkg, pkg_version) >= 0 - - return _since_package - - -def before_package(pkg, pkg_version): - """This audit should be run before the specified package version (excl). - - :param pkg: Package name to compare - :type pkg: str - :param release: The package version - :type release: str - :rtype: Callable[Dict] - """ - def _before_package(audit_options=None): - return not since_package(pkg, pkg_version)() - - return _before_package - - -def since_openstack_release(pkg, release): - """This audit should run after the specified OpenStack version (incl). - - :param pkg: Package name to compare - :type pkg: str - :param release: The OpenStack release codename - :type release: str - :rtype: Callable[Dict] - """ - def _since_openstack_release(audit_options=None): - _release = openstack_utils.get_os_codename_package(pkg) - return openstack_utils.CompareOpenStackReleases(_release) >= release - - return _since_openstack_release - - -def before_openstack_release(pkg, release): - """This audit should run before the specified OpenStack version (excl). - - :param pkg: Package name to compare - :type pkg: str - :param release: The OpenStack release codename - :type release: str - :rtype: Callable[Dict] - """ - def _before_openstack_release(audit_options=None): - return not since_openstack_release(pkg, release)() - - return _before_openstack_release - - -def it_has_config(config_key): - """This audit should be run based on specified config keys. - - :param config_key: Config key to look for - :type config_key: str - :rtype: Callable[Dict] - """ - def _it_has_config(audit_options): - return audit_options.get(config_key) is not None - - return _it_has_config - - -def run(audit_options): - """Run the configured audits with the specified audit_options. - - :param audit_options: Configuration for the audit - :type audit_options: Config - - :rtype: Dict[str, str] - """ - errors = {} - results = {} - for name, audit in sorted(_audits.items()): - result_name = name.replace('_', '-') - if result_name in audit_options.get('excludes', []): - print( - "Skipping {} because it is" - "excluded in audit config" - .format(result_name)) - continue - if all(p(audit_options) for p in audit.filters): - try: - audit.func(audit_options) - print("{}: PASS".format(name)) - results[result_name] = { - 'success': True, - } - except AssertionError as e: - print("{}: FAIL ({})".format(name, e)) - results[result_name] = { - 'success': False, - 'message': e, - } - except Exception as e: - print("{}: ERROR ({})".format(name, e)) - errors[name] = e - results[result_name] = { - 'success': False, - 'message': e, - } - for name, error in errors.items(): - print("=" * 20) - print("Error in {}: ".format(name)) - traceback.print_tb(error.__traceback__) - print() - return results - - -def action_parse_results(result): - """Parse the result of `run` in the context of an action. - - :param result: The result of running the security-checklist - action on a unit - :type result: Dict[str, Dict[str, str]] - :rtype: int - """ - passed = True - for test, result in result.items(): - if result['success']: - hookenv.action_set({test: 'PASS'}) - else: - hookenv.action_set({test: 'FAIL - {}'.format(result['message'])}) - passed = False - if not passed: - hookenv.action_fail("One or more tests failed") - return 0 if passed else 1 diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py deleted file mode 100644 index 79740ed0..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright 2019 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import configparser -import glob -import os.path -import subprocess - -from charmhelpers.contrib.openstack.audits import ( - audit, - AuditType, - # filters - is_audit_type, - it_has_config, -) - -from charmhelpers.core.hookenv import ( - cached, -) - -""" -The Security Guide suggests a specific list of files inside the -config directory for the service having 640 specifically, but -by ensuring the containing directory is 750, only the owner can -write, and only the group can read files within the directory. - -By restricting access to the containing directory, we can more -effectively ensure that there is no accidental leakage if a new -file is added to the service without being added to the security -guide, and to this check. -""" -FILE_ASSERTIONS = { - 'barbican': { - '/etc/barbican': {'group': 'barbican', 'mode': '750'}, - }, - 'ceph-mon': { - '/var/lib/charm/ceph-mon/ceph.conf': - {'owner': 'root', 'group': 'root', 'mode': '644'}, - '/etc/ceph/ceph.client.admin.keyring': - {'owner': 'ceph', 'group': 'ceph'}, - '/etc/ceph/rbdmap': {'mode': '644'}, - '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, - '/var/lib/ceph/bootstrap-*/ceph.keyring': - {'owner': 'ceph', 'group': 'ceph', 'mode': '600'} - }, - 'ceph-osd': { - '/var/lib/charm/ceph-osd/ceph.conf': - {'owner': 'ceph', 'group': 'ceph', 'mode': '644'}, - '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, - '/var/lib/ceph/*': {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, - '/var/lib/ceph/bootstrap-*/ceph.keyring': - {'owner': 'ceph', 'group': 'ceph', 'mode': '600'}, - '/var/lib/ceph/radosgw': - {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, - }, - 'cinder': { - '/etc/cinder': {'group': 'cinder', 'mode': '750'}, - }, - 'glance': { - '/etc/glance': {'group': 'glance', 'mode': '750'}, - }, - 'keystone': { - '/etc/keystone': - {'owner': 'keystone', 'group': 'keystone', 'mode': '750'}, - }, - 'manilla': { - '/etc/manila': {'group': 'manilla', 'mode': '750'}, - }, - 'neutron-gateway': { - '/etc/neutron': {'group': 'neutron', 'mode': '750'}, - }, - 'neutron-api': { - '/etc/neutron/': {'group': 'neutron', 'mode': '750'}, - }, - 'nova-cloud-controller': { - '/etc/nova': {'group': 'nova', 'mode': '750'}, - }, - 'nova-compute': { - '/etc/nova/': {'group': 'nova', 'mode': '750'}, - }, - 'openstack-dashboard': { - # From security guide - '/etc/openstack-dashboard/local_settings.py': - {'group': 'horizon', 'mode': '640'}, - }, -} - -Ownership = collections.namedtuple('Ownership', 'owner group mode') - - -@cached -def _stat(file): - """ - Get the Ownership information from a file. - - :param file: The path to a file to stat - :type file: str - :returns: owner, group, and mode of the specified file - :rtype: Ownership - :raises subprocess.CalledProcessError: If the underlying stat fails - """ - out = subprocess.check_output( - ['stat', '-c', '%U %G %a', file]).decode('utf-8') - return Ownership(*out.strip().split(' ')) - - -@cached -def _config_ini(path): - """ - Parse an ini file - - :param path: The path to a file to parse - :type file: str - :returns: Configuration contained in path - :rtype: Dict - """ - # When strict is enabled, duplicate options are not allowed in the - # parsed INI; however, Oslo allows duplicate values. This change - # causes us to ignore the duplicate values which is acceptable as - # long as we don't validate any multi-value options - conf = configparser.ConfigParser(strict=False) - conf.read(path) - return dict(conf) - - -def _validate_file_ownership(owner, group, file_name, optional=False): - """ - Validate that a specified file is owned by `owner:group`. - - :param owner: Name of the owner - :type owner: str - :param group: Name of the group - :type group: str - :param file_name: Path to the file to verify - :type file_name: str - :param optional: Is this file optional, - ie: Should this test fail when it's missing - :type optional: bool - """ - try: - ownership = _stat(file_name) - except subprocess.CalledProcessError as e: - print("Error reading file: {}".format(e)) - if not optional: - assert False, "Specified file does not exist: {}".format(file_name) - assert owner == ownership.owner, \ - "{} has an incorrect owner: {} should be {}".format( - file_name, ownership.owner, owner) - assert group == ownership.group, \ - "{} has an incorrect group: {} should be {}".format( - file_name, ownership.group, group) - print("Validate ownership of {}: PASS".format(file_name)) - - -def _validate_file_mode(mode, file_name, optional=False): - """ - Validate that a specified file has the specified permissions. - - :param mode: file mode that is desires - :type owner: str - :param file_name: Path to the file to verify - :type file_name: str - :param optional: Is this file optional, - ie: Should this test fail when it's missing - :type optional: bool - """ - try: - ownership = _stat(file_name) - except subprocess.CalledProcessError as e: - print("Error reading file: {}".format(e)) - if not optional: - assert False, "Specified file does not exist: {}".format(file_name) - assert mode == ownership.mode, \ - "{} has an incorrect mode: {} should be {}".format( - file_name, ownership.mode, mode) - print("Validate mode of {}: PASS".format(file_name)) - - -@cached -def _config_section(config, section): - """Read the configuration file and return a section.""" - path = os.path.join(config.get('config_path'), config.get('config_file')) - conf = _config_ini(path) - return conf.get(section) - - -@audit(is_audit_type(AuditType.OpenStackSecurityGuide), - it_has_config('files')) -def validate_file_ownership(config): - """Verify that configuration files are owned by the correct user/group.""" - files = config.get('files', {}) - for file_name, options in files.items(): - for key in options.keys(): - if key not in ["owner", "group", "mode"]: - raise RuntimeError( - "Invalid ownership configuration: {}".format(key)) - owner = options.get('owner', config.get('owner', 'root')) - group = options.get('group', config.get('group', 'root')) - optional = options.get('optional', config.get('optional', False)) - if '*' in file_name: - for file in glob.glob(file_name): - if file not in files.keys(): - if os.path.isfile(file): - _validate_file_ownership(owner, group, file, optional) - else: - if os.path.isfile(file_name): - _validate_file_ownership(owner, group, file_name, optional) - - -@audit(is_audit_type(AuditType.OpenStackSecurityGuide), - it_has_config('files')) -def validate_file_permissions(config): - """Verify that permissions on configuration files are secure enough.""" - files = config.get('files', {}) - for file_name, options in files.items(): - for key in options.keys(): - if key not in ["owner", "group", "mode"]: - raise RuntimeError( - "Invalid ownership configuration: {}".format(key)) - mode = options.get('mode', config.get('permissions', '600')) - optional = options.get('optional', config.get('optional', False)) - if '*' in file_name: - for file in glob.glob(file_name): - if file not in files.keys(): - if os.path.isfile(file): - _validate_file_mode(mode, file, optional) - else: - if os.path.isfile(file_name): - _validate_file_mode(mode, file_name, optional) - - -@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) -def validate_uses_keystone(audit_options): - """Validate that the service uses Keystone for authentication.""" - section = _config_section(audit_options, 'api') or _config_section(audit_options, 'DEFAULT') - assert section is not None, "Missing section 'api / DEFAULT'" - assert section.get('auth_strategy') == "keystone", \ - "Application is not using Keystone" - - -@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) -def validate_uses_tls_for_keystone(audit_options): - """Verify that TLS is used to communicate with Keystone.""" - section = _config_section(audit_options, 'keystone_authtoken') - assert section is not None, "Missing section 'keystone_authtoken'" - assert not section.get('insecure') and \ - "https://" in section.get("auth_uri"), \ - "TLS is not used for Keystone" - - -@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) -def validate_uses_tls_for_glance(audit_options): - """Verify that TLS is used to communicate with Glance.""" - section = _config_section(audit_options, 'glance') - assert section is not None, "Missing section 'glance'" - assert not section.get('insecure') and \ - "https://" in section.get("api_servers"), \ - "TLS is not used for Glance" diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py deleted file mode 100644 index 5c961c58..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ /dev/null @@ -1,443 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Common python helper functions used for OpenStack charm certificates. - -import os -import json -from base64 import b64decode - -from charmhelpers.contrib.network.ip import ( - get_hostname, - resolve_network_cidr, -) -from charmhelpers.core.hookenv import ( - local_unit, - network_get_primary_address, - config, - related_units, - relation_get, - relation_ids, - remote_service_name, - NoNetworkBinding, - log, - WARNING, - INFO, -) -from charmhelpers.contrib.openstack.ip import ( - resolve_address, - get_vip_in_network, - ADDRESS_MAP, - get_default_api_bindings, - local_address, -) -from charmhelpers.contrib.network.ip import ( - get_relation_ip, -) - -from charmhelpers.core.host import ( - ca_cert_absolute_path, - install_ca_cert, - mkdir, - write_file, -) - -from charmhelpers.contrib.hahelpers.apache import ( - CONFIG_CA_CERT_FILE, -) - - -class CertRequest(object): - - """Create a request for certificates to be generated - """ - - def __init__(self, json_encode=True): - self.entries = [] - self.hostname_entry = None - self.json_encode = json_encode - - def add_entry(self, net_type, cn, addresses): - """Add a request to the batch - - :param net_type: str network space name request is for - :param cn: str Canonical Name for certificate - :param addresses: [] List of addresses to be used as SANs - """ - self.entries.append({ - 'cn': cn, - 'addresses': addresses}) - - def add_hostname_cn(self): - """Add a request for the hostname of the machine""" - ip = local_address(unit_get_fallback='private-address') - addresses = [ip] - # If a vip is being used without os-hostname config or - # network spaces then we need to ensure the local units - # cert has the appropriate vip in the SAN list - vip = get_vip_in_network(resolve_network_cidr(ip)) - if vip: - addresses.append(vip) - self.hostname_entry = { - 'cn': get_hostname(ip), - 'addresses': addresses} - - def add_hostname_cn_ip(self, addresses): - """Add an address to the SAN list for the hostname request - - :param addr: [] List of address to be added - """ - for addr in addresses: - if addr not in self.hostname_entry['addresses']: - self.hostname_entry['addresses'].append(addr) - - def get_request(self): - """Generate request from the batched up entries - - """ - if self.hostname_entry: - self.entries.append(self.hostname_entry) - request = {} - for entry in self.entries: - sans = sorted(list(set(entry['addresses']))) - request[entry['cn']] = {'sans': sans} - if self.json_encode: - req = {'cert_requests': json.dumps(request, sort_keys=True)} - else: - req = {'cert_requests': request} - req['unit_name'] = local_unit().replace('/', '_') - return req - - -def get_certificate_request(json_encode=True, bindings=None): - """Generate a certificate requests based on the network configuration - - :param json_encode: Encode request in JSON or not. Used for setting - directly on a relation. - :type json_encode: boolean - :param bindings: List of bindings to check in addition to default api - bindings. - :type bindings: list of strings - :returns: CertRequest request as dictionary or JSON string. - :rtype: Union[dict, json] - """ - if bindings: - # Add default API bindings to bindings list - bindings = list(bindings + get_default_api_bindings()) - else: - # Use default API bindings - bindings = get_default_api_bindings() - req = CertRequest(json_encode=json_encode) - req.add_hostname_cn() - # Add os-hostname entries - _sans = get_certificate_sans(bindings=bindings) - - # Handle specific hostnames per binding - for binding in bindings: - try: - hostname_override = config(ADDRESS_MAP[binding]['override']) - except KeyError: - hostname_override = None - try: - try: - net_addr = resolve_address(endpoint_type=binding) - except KeyError: - net_addr = None - ip = network_get_primary_address(binding) - addresses = [net_addr, ip] - vip = get_vip_in_network(resolve_network_cidr(ip)) - if vip: - addresses.append(vip) - - # Clear any Nones or duplicates - addresses = list(set([i for i in addresses if i])) - # Add hostname certificate request - if hostname_override: - req.add_entry( - binding, - hostname_override, - addresses) - # Remove hostname specific addresses from _sans - for addr in addresses: - try: - _sans.remove(addr) - except (ValueError, KeyError): - pass - - except NoNetworkBinding: - log("Skipping request for certificate for ip in {} space, no " - "local address found".format(binding), WARNING) - # Guarantee all SANs are covered - # These are network addresses with no corresponding hostname. - # Add the ips to the hostname cert to allow for this. - req.add_hostname_cn_ip(_sans) - return req.get_request() - - -def get_certificate_sans(bindings=None): - """Get all possible IP addresses for certificate SANs. - - :param bindings: List of bindings to check in addition to default api - bindings. - :type bindings: list of strings - :returns: List of binding string names - :rtype: List[str] - """ - _sans = [local_address(unit_get_fallback='private-address')] - if bindings: - # Add default API bindings to bindings list - bindings = list(bindings + get_default_api_bindings()) - else: - # Use default API bindings - bindings = get_default_api_bindings() - - for binding in bindings: - # Check for config override - try: - net_config = config(ADDRESS_MAP[binding]['config']) - except KeyError: - # There is no configuration network for this binding name - net_config = None - # Using resolve_address is likely redundant. Keeping it here in - # case there is an edge case it handles. - try: - net_addr = resolve_address(endpoint_type=binding) - except KeyError: - net_addr = None - ip = get_relation_ip(binding, cidr_network=net_config) - _sans = _sans + [net_addr, ip] - vip = get_vip_in_network(resolve_network_cidr(ip)) - if vip: - _sans.append(vip) - # Clear any Nones and duplicates - return list(set([i for i in _sans if i])) - - -def create_ip_cert_links(ssl_dir, custom_hostname_link=None, bindings=None): - """Create symlinks for SAN records - - :param ssl_dir: str Directory to create symlinks in - :param custom_hostname_link: str Additional link to be created - :param bindings: List of bindings to check in addition to default api - bindings. - :type bindings: list of strings - """ - - if bindings: - # Add default API bindings to bindings list - bindings = list(bindings + get_default_api_bindings()) - else: - # Use default API bindings - bindings = get_default_api_bindings() - - # This includes the hostname cert and any specific bindng certs: - # admin, internal, public - req = get_certificate_request(json_encode=False, bindings=bindings)["cert_requests"] - # Specific certs - for cert_req in req.keys(): - requested_cert = os.path.join( - ssl_dir, - 'cert_{}'.format(cert_req)) - requested_key = os.path.join( - ssl_dir, - 'key_{}'.format(cert_req)) - for addr in req[cert_req]['sans']: - cert = os.path.join(ssl_dir, 'cert_{}'.format(addr)) - key = os.path.join(ssl_dir, 'key_{}'.format(addr)) - if os.path.isfile(requested_cert) and not os.path.isfile(cert): - os.symlink(requested_cert, cert) - os.symlink(requested_key, key) - - # Handle custom hostnames - hostname = get_hostname(local_address(unit_get_fallback='private-address')) - hostname_cert = os.path.join( - ssl_dir, - 'cert_{}'.format(hostname)) - hostname_key = os.path.join( - ssl_dir, - 'key_{}'.format(hostname)) - if custom_hostname_link: - custom_cert = os.path.join( - ssl_dir, - 'cert_{}'.format(custom_hostname_link)) - custom_key = os.path.join( - ssl_dir, - 'key_{}'.format(custom_hostname_link)) - if os.path.isfile(hostname_cert) and not os.path.isfile(custom_cert): - os.symlink(hostname_cert, custom_cert) - os.symlink(hostname_key, custom_key) - - -def install_certs(ssl_dir, certs, chain=None, user='root', group='root'): - """Install the certs passed into the ssl dir and append the chain if - provided. - - :param ssl_dir: str Directory to create symlinks in - :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}} - :param chain: str Chain to be appended to certs - :param user: (Optional) Owner of certificate files. Defaults to 'root' - :type user: str - :param group: (Optional) Group of certificate files. Defaults to 'root' - :type group: str - """ - for cn, bundle in certs.items(): - cert_filename = 'cert_{}'.format(cn) - key_filename = 'key_{}'.format(cn) - cert_data = bundle['cert'] - if chain: - # Append chain file so that clients that trust the root CA will - # trust certs signed by an intermediate in the chain - cert_data = cert_data + os.linesep + chain - write_file( - path=os.path.join(ssl_dir, cert_filename), owner=user, group=group, - content=cert_data, perms=0o640) - write_file( - path=os.path.join(ssl_dir, key_filename), owner=user, group=group, - content=bundle['key'], perms=0o640) - - -def get_cert_relation_ca_name(cert_relation_id=None): - """Determine CA certificate name as provided by relation. - - The filename on disk depends on the name chosen for the application on the - providing end of the certificates relation. - - :param cert_relation_id: (Optional) Relation id providing the certs - :type cert_relation_id: str - :returns: CA certificate filename without path nor extension - :rtype: str - """ - if cert_relation_id is None: - try: - cert_relation_id = relation_ids('certificates')[0] - except IndexError: - return '' - return '{}_juju_ca_cert'.format( - remote_service_name(relid=cert_relation_id)) - - -def _manage_ca_certs(ca, cert_relation_id): - """Manage CA certs. - - :param ca: CA Certificate from certificate relation. - :type ca: str - :param cert_relation_id: Relation id providing the certs - :type cert_relation_id: str - """ - config_ssl_ca = config('ssl_ca') - config_cert_file = ca_cert_absolute_path(CONFIG_CA_CERT_FILE) - if config_ssl_ca: - log("Installing CA certificate from charm ssl_ca config to {}".format( - config_cert_file), INFO) - install_ca_cert( - b64decode(config_ssl_ca).rstrip(), - name=CONFIG_CA_CERT_FILE) - elif os.path.exists(config_cert_file): - log("Removing CA certificate {}".format(config_cert_file), INFO) - os.remove(config_cert_file) - log("Installing CA certificate from certificate relation", INFO) - install_ca_cert( - ca.encode(), - name=get_cert_relation_ca_name(cert_relation_id)) - - -def process_certificates(service_name, relation_id, unit, - custom_hostname_link=None, user='root', group='root', - bindings=None): - """Process the certificates supplied down the relation - - :param service_name: str Name of service the certificates are for. - :param relation_id: str Relation id providing the certs - :param unit: str Unit providing the certs - :param custom_hostname_link: str Name of custom link to create - :param user: (Optional) Owner of certificate files. Defaults to 'root' - :type user: str - :param group: (Optional) Group of certificate files. Defaults to 'root' - :type group: str - :param bindings: List of bindings to check in addition to default api - bindings. - :type bindings: list of strings - :returns: True if certificates processed for local unit or False - :rtype: bool - """ - if bindings: - # Add default API bindings to bindings list - bindings = list(bindings + get_default_api_bindings()) - else: - # Use default API bindings - bindings = get_default_api_bindings() - - data = relation_get(rid=relation_id, unit=unit) - ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) - mkdir(path=ssl_dir) - name = local_unit().replace('/', '_') - certs = data.get('{}.processed_requests'.format(name)) - chain = data.get('chain') - ca = data.get('ca') - if certs: - certs = json.loads(certs) - _manage_ca_certs(ca, relation_id) - install_certs(ssl_dir, certs, chain, user=user, group=group) - create_ip_cert_links( - ssl_dir, - custom_hostname_link=custom_hostname_link, - bindings=bindings) - return True - return False - - -def get_requests_for_local_unit(relation_name=None): - """Extract any certificates data targeted at this unit down relation_name. - - :param relation_name: str Name of relation to check for data. - :returns: List of bundles of certificates. - :rtype: List of dicts - """ - local_name = local_unit().replace('/', '_') - raw_certs_key = '{}.processed_requests'.format(local_name) - relation_name = relation_name or 'certificates' - bundles = [] - for rid in relation_ids(relation_name): - for unit in related_units(rid): - data = relation_get(rid=rid, unit=unit) - if data.get(raw_certs_key): - bundles.append({ - 'ca': data['ca'], - 'chain': data.get('chain'), - 'certs': json.loads(data[raw_certs_key])}) - return bundles - - -def get_bundle_for_cn(cn, relation_name=None): - """Extract certificates for the given cn. - - :param cn: str Canonical Name on certificate. - :param relation_name: str Relation to check for certificates down. - :returns: Dictionary of certificate data, - :rtype: dict. - """ - entries = get_requests_for_local_unit(relation_name) - cert_bundle = {} - for entry in entries: - for _cn, bundle in entry['certs'].items(): - if _cn == cn: - cert_bundle = { - 'cert': bundle['cert'], - 'key': bundle['key'], - 'chain': entry['chain'], - 'ca': entry['ca']} - break - if cert_bundle: - break - return cert_bundle diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py deleted file mode 100644 index 32c69ff7..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/context.py +++ /dev/null @@ -1,3361 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import copy -import enum -import glob -import hashlib -import json -import math -import os -import re -import socket -import time - -from base64 import b64decode -from subprocess import ( - check_call, - check_output, - CalledProcessError) - -import charmhelpers.contrib.storage.linux.ceph as ch_ceph - -from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( - _config_ini as config_ini -) - -from charmhelpers.fetch import ( - apt_install, - filter_installed_packages, -) -from charmhelpers.core.hookenv import ( - NoNetworkBinding, - config, - is_relation_made, - local_unit, - log, - relation_get, - relation_ids, - related_units, - relation_set, - unit_private_ip, - charm_name, - DEBUG, - INFO, - ERROR, - status_set, - network_get_primary_address, - WARNING, - service_name, -) - -from charmhelpers.core.sysctl import create as sysctl_create -from charmhelpers.core.strutils import bool_from_string -from charmhelpers.contrib.openstack.exceptions import OSContextError - -from charmhelpers.core.host import ( - get_bond_master, - is_phy_iface, - list_nics, - get_nic_hwaddr, - mkdir, - write_file, - pwgen, - lsb_release, - CompareHostReleases, -) -from charmhelpers.contrib.hahelpers.cluster import ( - determine_apache_port, - determine_api_port, - https, - is_clustered, -) -from charmhelpers.contrib.hahelpers.apache import ( - get_cert, - get_ca_cert, - install_ca_cert, -) -from charmhelpers.contrib.openstack.neutron import ( - neutron_plugin_attribute, - parse_data_port_mappings, -) -from charmhelpers.contrib.openstack.ip import ( - resolve_address, - INTERNAL, - ADMIN, - PUBLIC, - ADDRESS_MAP, - local_address, -) -from charmhelpers.contrib.network.ip import ( - get_address_in_network, - get_ipv4_addr, - get_ipv6_addr, - get_netmask_for_address, - format_ipv6_addr, - is_bridge_member, - is_ipv6_disabled, - get_relation_ip, -) -from charmhelpers.contrib.openstack.utils import ( - config_flags_parser, - get_os_codename_install_source, - enable_memcache, - CompareOpenStackReleases, - os_release, -) -from charmhelpers.core.unitdata import kv - -from charmhelpers.contrib.hardware import pci - -try: - import psutil -except ImportError: - apt_install('python3-psutil', fatal=True) - import psutil - -CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' -ADDRESS_TYPES = ['admin', 'internal', 'public'] -HAPROXY_RUN_DIR = '/var/run/haproxy/' -DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2" - - -def ensure_packages(packages): - """Install but do not upgrade required plugin packages.""" - required = filter_installed_packages(packages) - if required: - apt_install(required, fatal=True) - - -def context_complete(ctxt): - _missing = [k for k, v in ctxt.items() if v is None or v == ''] - - if _missing: - log('Missing required data: %s' % ' '.join(_missing), level=INFO) - return False - - return True - - -class OSContextGenerator(object): - """Base class for all context generators.""" - interfaces = [] - related = False - complete = False - missing_data = [] - - def __call__(self): - raise NotImplementedError - - def context_complete(self, ctxt): - """Check for missing data for the required context data. - Set self.missing_data if it exists and return False. - Set self.complete if no missing data and return True. - """ - # Fresh start - self.complete = False - self.missing_data = [] - for k, v in ctxt.items(): - if v is None or v == '': - if k not in self.missing_data: - self.missing_data.append(k) - - if self.missing_data: - self.complete = False - log('Missing required data: %s' % ' '.join(self.missing_data), - level=INFO) - else: - self.complete = True - return self.complete - - def get_related(self): - """Check if any of the context interfaces have relation ids. - Set self.related and return True if one of the interfaces - has relation ids. - """ - # Fresh start - self.related = False - try: - for interface in self.interfaces: - if relation_ids(interface): - self.related = True - return self.related - except AttributeError as e: - log("{} {}" - "".format(self, e), 'INFO') - return self.related - - -class SharedDBContext(OSContextGenerator): - interfaces = ['shared-db'] - - def __init__(self, database=None, user=None, relation_prefix=None, - ssl_dir=None, relation_id=None): - """Allows inspecting relation for settings prefixed with - relation_prefix. This is useful for parsing access for multiple - databases returned via the shared-db interface (eg, nova_password, - quantum_password) - """ - self.relation_prefix = relation_prefix - self.database = database - self.user = user - self.ssl_dir = ssl_dir - self.rel_name = self.interfaces[0] - self.relation_id = relation_id - - def __call__(self): - self.database = self.database or config('database') - self.user = self.user or config('database-user') - if None in [self.database, self.user]: - log("Could not generate shared_db context. Missing required charm " - "config options. (database name and user)", level=ERROR) - raise OSContextError - - ctxt = {} - - # NOTE(jamespage) if mysql charm provides a network upon which - # access to the database should be made, reconfigure relation - # with the service units local address and defer execution - access_network = relation_get('access-network') - if access_network is not None: - if self.relation_prefix is not None: - hostname_key = "{}_hostname".format(self.relation_prefix) - else: - hostname_key = "hostname" - access_hostname = get_address_in_network( - access_network, - local_address(unit_get_fallback='private-address')) - set_hostname = relation_get(attribute=hostname_key, - unit=local_unit()) - if set_hostname != access_hostname: - relation_set(relation_settings={hostname_key: access_hostname}) - return None # Defer any further hook execution for now.... - - password_setting = 'password' - if self.relation_prefix: - password_setting = self.relation_prefix + '_password' - - if self.relation_id: - rids = [self.relation_id] - else: - rids = relation_ids(self.interfaces[0]) - - rel = (get_os_codename_install_source(config('openstack-origin')) or - 'icehouse') - for rid in rids: - self.related = True - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - host = rdata.get('db_host') - host = format_ipv6_addr(host) or host - ctxt = { - 'database_host': host, - 'database': self.database, - 'database_user': self.user, - 'database_password': rdata.get(password_setting), - 'database_type': 'mysql+pymysql' - } - # Port is being introduced with LP Bug #1876188 - # but it not currently required and may not be set in all - # cases, particularly in classic charms. - port = rdata.get('db_port') - if port: - ctxt['database_port'] = port - if CompareOpenStackReleases(rel) < 'queens': - ctxt['database_type'] = 'mysql' - if self.context_complete(ctxt): - db_ssl(rdata, ctxt, self.ssl_dir) - return ctxt - return {} - - -class PostgresqlDBContext(OSContextGenerator): - interfaces = ['pgsql-db'] - - def __init__(self, database=None): - self.database = database - - def __call__(self): - self.database = self.database or config('database') - if self.database is None: - log('Could not generate postgresql_db context. Missing required ' - 'charm config options. (database name)', level=ERROR) - raise OSContextError - - ctxt = {} - for rid in relation_ids(self.interfaces[0]): - self.related = True - for unit in related_units(rid): - rel_host = relation_get('host', rid=rid, unit=unit) - rel_user = relation_get('user', rid=rid, unit=unit) - rel_passwd = relation_get('password', rid=rid, unit=unit) - ctxt = {'database_host': rel_host, - 'database': self.database, - 'database_user': rel_user, - 'database_password': rel_passwd, - 'database_type': 'postgresql'} - if self.context_complete(ctxt): - return ctxt - - return {} - - -def db_ssl(rdata, ctxt, ssl_dir): - if 'ssl_ca' in rdata and ssl_dir: - ca_path = os.path.join(ssl_dir, 'db-client.ca') - with open(ca_path, 'wb') as fh: - fh.write(b64decode(rdata['ssl_ca'])) - - ctxt['database_ssl_ca'] = ca_path - elif 'ssl_ca' in rdata: - log("Charm not setup for ssl support but ssl ca found", level=INFO) - return ctxt - - if 'ssl_cert' in rdata: - cert_path = os.path.join( - ssl_dir, 'db-client.cert') - if not os.path.exists(cert_path): - log("Waiting 1m for ssl client cert validity", level=INFO) - time.sleep(60) - - with open(cert_path, 'wb') as fh: - fh.write(b64decode(rdata['ssl_cert'])) - - ctxt['database_ssl_cert'] = cert_path - key_path = os.path.join(ssl_dir, 'db-client.key') - with open(key_path, 'wb') as fh: - fh.write(b64decode(rdata['ssl_key'])) - - ctxt['database_ssl_key'] = key_path - - return ctxt - - -class IdentityServiceContext(OSContextGenerator): - - def __init__(self, - service=None, - service_user=None, - rel_name='identity-service'): - self.service = service - self.service_user = service_user - self.rel_name = rel_name - self.interfaces = [self.rel_name] - - def _setup_pki_cache(self): - if self.service and self.service_user: - # This is required for pki token signing if we don't want /tmp to - # be used. - cachedir = '/var/cache/%s' % (self.service) - if not os.path.isdir(cachedir): - log("Creating service cache dir %s" % (cachedir), level=DEBUG) - mkdir(path=cachedir, owner=self.service_user, - group=self.service_user, perms=0o700) - - return cachedir - return None - - def _get_pkg_name(self, python_name='keystonemiddleware'): - """Get corresponding distro installed package for python - package name. - - :param python_name: nameof the python package - :type: string - """ - pkg_names = map(lambda x: x + python_name, ('python3-', 'python-')) - - for pkg in pkg_names: - if not filter_installed_packages((pkg,)): - return pkg - - return None - - def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): - """Build Jinja2 context for full rendering of [keystone_authtoken] - section with variable names included. Re-constructed from former - template 'section-keystone-auth-mitaka'. - - :param ctxt: Jinja2 context returned from self.__call__() - :type: dict - :param keystonemiddleware_os_rel: OpenStack release name of - keystonemiddleware package installed - """ - c = collections.OrderedDict((('auth_type', 'password'),)) - - # 'www_authenticate_uri' replaced 'auth_uri' since Stein, - # see keystonemiddleware upstream sources for more info - if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein': - c.update(( - ('www_authenticate_uri', "{}://{}:{}/v3".format( - ctxt.get('service_protocol', ''), - ctxt.get('service_host', ''), - ctxt.get('service_port', ''))),)) - else: - c.update(( - ('auth_uri', "{}://{}:{}/v3".format( - ctxt.get('service_protocol', ''), - ctxt.get('service_host', ''), - ctxt.get('service_port', ''))),)) - - c.update(( - ('auth_url', "{}://{}:{}/v3".format( - ctxt.get('auth_protocol', ''), - ctxt.get('auth_host', ''), - ctxt.get('auth_port', ''))), - ('project_domain_name', ctxt.get('admin_domain_name', '')), - ('user_domain_name', ctxt.get('admin_domain_name', '')), - ('project_name', ctxt.get('admin_tenant_name', '')), - ('username', ctxt.get('admin_user', '')), - ('password', ctxt.get('admin_password', '')), - ('signing_dir', ctxt.get('signing_dir', '')),)) - - if ctxt.get('service_type'): - c.update((('service_type', ctxt.get('service_type')),)) - - return c - - def __call__(self): - log('Generating template context for ' + self.rel_name, level=DEBUG) - ctxt = {} - - keystonemiddleware_os_release = None - if self._get_pkg_name(): - keystonemiddleware_os_release = os_release(self._get_pkg_name()) - - cachedir = self._setup_pki_cache() - if cachedir: - ctxt['signing_dir'] = cachedir - - for rid in relation_ids(self.rel_name): - self.related = True - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - serv_host = rdata.get('service_host') - serv_host = format_ipv6_addr(serv_host) or serv_host - auth_host = rdata.get('auth_host') - auth_host = format_ipv6_addr(auth_host) or auth_host - int_host = rdata.get('internal_host') - int_host = format_ipv6_addr(int_host) or int_host - svc_protocol = rdata.get('service_protocol') or 'http' - auth_protocol = rdata.get('auth_protocol') or 'http' - int_protocol = rdata.get('internal_protocol') or 'http' - api_version = rdata.get('api_version') or '2.0' - ctxt.update({'service_port': rdata.get('service_port'), - 'service_host': serv_host, - 'auth_host': auth_host, - 'auth_port': rdata.get('auth_port'), - 'internal_host': int_host, - 'internal_port': rdata.get('internal_port'), - 'admin_tenant_name': rdata.get('service_tenant'), - 'admin_user': rdata.get('service_username'), - 'admin_password': rdata.get('service_password'), - 'service_protocol': svc_protocol, - 'auth_protocol': auth_protocol, - 'internal_protocol': int_protocol, - 'api_version': api_version}) - - if rdata.get('service_type'): - ctxt['service_type'] = rdata.get('service_type') - - if float(api_version) > 2: - ctxt.update({ - 'admin_domain_name': rdata.get('service_domain'), - 'service_project_id': rdata.get('service_tenant_id'), - 'service_domain_id': rdata.get('service_domain_id')}) - - # we keep all veriables in ctxt for compatibility and - # add nested dictionary for keystone_authtoken generic - # templating - if keystonemiddleware_os_release: - ctxt['keystone_authtoken'] = \ - self._get_keystone_authtoken_ctxt( - ctxt, keystonemiddleware_os_release) - - if self.context_complete(ctxt): - # NOTE(jamespage) this is required for >= icehouse - # so a missing value just indicates keystone needs - # upgrading - ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') - ctxt['admin_domain_id'] = rdata.get('service_domain_id') - return ctxt - - return {} - - -class IdentityCredentialsContext(IdentityServiceContext): - '''Context for identity-credentials interface type''' - - def __init__(self, - service=None, - service_user=None, - rel_name='identity-credentials'): - super(IdentityCredentialsContext, self).__init__(service, - service_user, - rel_name) - - def __call__(self): - log('Generating template context for ' + self.rel_name, level=DEBUG) - ctxt = {} - - cachedir = self._setup_pki_cache() - if cachedir: - ctxt['signing_dir'] = cachedir - - for rid in relation_ids(self.rel_name): - self.related = True - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - credentials_host = rdata.get('credentials_host') - credentials_host = ( - format_ipv6_addr(credentials_host) or credentials_host - ) - auth_host = rdata.get('auth_host') - auth_host = format_ipv6_addr(auth_host) or auth_host - svc_protocol = rdata.get('credentials_protocol') or 'http' - auth_protocol = rdata.get('auth_protocol') or 'http' - api_version = rdata.get('api_version') or '2.0' - ctxt.update({ - 'service_port': rdata.get('credentials_port'), - 'service_host': credentials_host, - 'auth_host': auth_host, - 'auth_port': rdata.get('auth_port'), - 'admin_tenant_name': rdata.get('credentials_project'), - 'admin_tenant_id': rdata.get('credentials_project_id'), - 'admin_user': rdata.get('credentials_username'), - 'admin_password': rdata.get('credentials_password'), - 'service_protocol': svc_protocol, - 'auth_protocol': auth_protocol, - 'api_version': api_version - }) - - if rdata.get('service_type'): - ctxt['service_type'] = rdata.get('service_type') - - if float(api_version) > 2: - ctxt.update({'admin_domain_name': - rdata.get('domain')}) - - if self.context_complete(ctxt): - return ctxt - - return {} - - -class NovaVendorMetadataContext(OSContextGenerator): - """Context used for configuring nova vendor metadata on nova.conf file.""" - - def __init__(self, os_release_pkg, interfaces=None): - """Initialize the NovaVendorMetadataContext object. - - :param os_release_pkg: the package name to extract the OpenStack - release codename from. - :type os_release_pkg: str - :param interfaces: list of string values to be used as the Context's - relation interfaces. - :type interfaces: List[str] - """ - self.os_release_pkg = os_release_pkg - if interfaces is not None: - self.interfaces = interfaces - - def __call__(self): - cmp_os_release = CompareOpenStackReleases( - os_release(self.os_release_pkg)) - ctxt = {'vendor_data': False} - - vdata_providers = [] - vdata = config('vendor-data') - vdata_url = config('vendor-data-url') - - if vdata: - try: - # validate the JSON. If invalid, we do not set anything here - json.loads(vdata) - except (TypeError, ValueError) as e: - log('Error decoding vendor-data. {}'.format(e), level=ERROR) - else: - ctxt['vendor_data'] = True - # Mitaka does not support DynamicJSON - # so vendordata_providers is not needed - if cmp_os_release > 'mitaka': - vdata_providers.append('StaticJSON') - - if vdata_url: - if cmp_os_release > 'mitaka': - ctxt['vendor_data_url'] = vdata_url - vdata_providers.append('DynamicJSON') - else: - log('Dynamic vendor data unsupported' - ' for {}.'.format(cmp_os_release), level=ERROR) - if vdata_providers: - ctxt['vendordata_providers'] = ','.join(vdata_providers) - - return ctxt - - -class NovaVendorMetadataJSONContext(OSContextGenerator): - """Context used for writing nova vendor metadata json file.""" - - def __init__(self, os_release_pkg): - """Initialize the NovaVendorMetadataJSONContext object. - - :param os_release_pkg: the package name to extract the OpenStack - release codename from. - :type os_release_pkg: str - """ - self.os_release_pkg = os_release_pkg - - def __call__(self): - ctxt = {'vendor_data_json': '{}'} - - vdata = config('vendor-data') - if vdata: - try: - # validate the JSON. If invalid, we return empty. - json.loads(vdata) - except (TypeError, ValueError) as e: - log('Error decoding vendor-data. {}'.format(e), level=ERROR) - else: - ctxt['vendor_data_json'] = vdata - - return ctxt - - -class AMQPContext(OSContextGenerator): - - def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None, - relation_id=None): - self.ssl_dir = ssl_dir - self.rel_name = rel_name - self.relation_prefix = relation_prefix - self.interfaces = [rel_name] - self.relation_id = relation_id - - def __call__(self): - log('Generating template context for amqp', level=DEBUG) - conf = config() - if self.relation_prefix: - user_setting = '%s-rabbit-user' % (self.relation_prefix) - vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) - else: - user_setting = 'rabbit-user' - vhost_setting = 'rabbit-vhost' - - try: - username = conf[user_setting] - vhost = conf[vhost_setting] - except KeyError as e: - log('Could not generate shared_db context. Missing required charm ' - 'config options: %s.' % e, level=ERROR) - raise OSContextError - - ctxt = {} - if self.relation_id: - rids = [self.relation_id] - else: - rids = relation_ids(self.rel_name) - for rid in rids: - ha_vip_only = False - self.related = True - transport_hosts = None - rabbitmq_port = '5672' - for unit in related_units(rid): - if relation_get('clustered', rid=rid, unit=unit): - ctxt['clustered'] = True - vip = relation_get('vip', rid=rid, unit=unit) - vip = format_ipv6_addr(vip) or vip - ctxt['rabbitmq_host'] = vip - transport_hosts = [vip] - else: - host = relation_get('private-address', rid=rid, unit=unit) - host = format_ipv6_addr(host) or host - ctxt['rabbitmq_host'] = host - transport_hosts = [host] - - ctxt.update({ - 'rabbitmq_user': username, - 'rabbitmq_password': relation_get('password', rid=rid, - unit=unit), - 'rabbitmq_virtual_host': vhost, - }) - - ssl_port = relation_get('ssl_port', rid=rid, unit=unit) - if ssl_port: - ctxt['rabbit_ssl_port'] = ssl_port - rabbitmq_port = ssl_port - - ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) - if ssl_ca: - ctxt['rabbit_ssl_ca'] = ssl_ca - - if relation_get('ha_queues', rid=rid, unit=unit) is not None: - ctxt['rabbitmq_ha_queues'] = True - - ha_vip_only = relation_get('ha-vip-only', - rid=rid, unit=unit) is not None - - if self.context_complete(ctxt): - if 'rabbit_ssl_ca' in ctxt: - if not self.ssl_dir: - log("Charm not setup for ssl support but ssl ca " - "found", level=INFO) - break - - ca_path = os.path.join( - self.ssl_dir, 'rabbit-client-ca.pem') - with open(ca_path, 'wb') as fh: - fh.write(b64decode(ctxt['rabbit_ssl_ca'])) - ctxt['rabbit_ssl_ca'] = ca_path - - # Sufficient information found = break out! - break - - # Used for active/active rabbitmq >= grizzly - if (('clustered' not in ctxt or ha_vip_only) and - len(related_units(rid)) > 1): - rabbitmq_hosts = [] - for unit in related_units(rid): - host = relation_get('private-address', rid=rid, unit=unit) - if not relation_get('password', rid=rid, unit=unit): - log( - ("Skipping {} password not sent which indicates " - "unit is not ready.".format(host)), - level=DEBUG) - continue - host = format_ipv6_addr(host) or host - rabbitmq_hosts.append(host) - - rabbitmq_hosts = sorted(rabbitmq_hosts) - ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) - transport_hosts = rabbitmq_hosts - - if transport_hosts: - transport_url_hosts = ','.join([ - "{}:{}@{}:{}".format(ctxt['rabbitmq_user'], - ctxt['rabbitmq_password'], - host_, - rabbitmq_port) - for host_ in transport_hosts]) - ctxt['transport_url'] = "rabbit://{}/{}".format( - transport_url_hosts, vhost) - - oslo_messaging_flags = conf.get('oslo-messaging-flags', None) - if oslo_messaging_flags: - ctxt['oslo_messaging_flags'] = config_flags_parser( - oslo_messaging_flags) - - oslo_messaging_driver = conf.get( - 'oslo-messaging-driver', DEFAULT_OSLO_MESSAGING_DRIVER) - if oslo_messaging_driver: - ctxt['oslo_messaging_driver'] = oslo_messaging_driver - - notification_format = conf.get('notification-format', None) - if notification_format: - ctxt['notification_format'] = notification_format - - notification_topics = conf.get('notification-topics', None) - if notification_topics: - ctxt['notification_topics'] = notification_topics - - send_notifications_to_logs = conf.get('send-notifications-to-logs', None) - if send_notifications_to_logs: - ctxt['send_notifications_to_logs'] = send_notifications_to_logs - - if not self.complete: - return {} - - return ctxt - - -class CephContext(OSContextGenerator): - """Generates context for /etc/ceph/ceph.conf templates.""" - interfaces = ['ceph'] - - def __call__(self): - if not relation_ids('ceph'): - return {} - - log('Generating template context for ceph', level=DEBUG) - mon_hosts = [] - ctxt = { - 'use_syslog': str(config('use-syslog')).lower() - } - for rid in relation_ids('ceph'): - for unit in related_units(rid): - if not ctxt.get('auth'): - ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) - if not ctxt.get('key'): - ctxt['key'] = relation_get('key', rid=rid, unit=unit) - if not ctxt.get('rbd_features'): - default_features = relation_get('rbd-features', rid=rid, unit=unit) - if default_features is not None: - ctxt['rbd_features'] = default_features - - ceph_addrs = relation_get('ceph-public-address', rid=rid, - unit=unit) - if ceph_addrs: - for addr in ceph_addrs.split(' '): - mon_hosts.append(format_ipv6_addr(addr) or addr) - else: - priv_addr = relation_get('private-address', rid=rid, - unit=unit) - mon_hosts.append(format_ipv6_addr(priv_addr) or priv_addr) - - ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) - - if config('pool-type') and config('pool-type') == 'erasure-coded': - base_pool_name = config('rbd-pool') or config('rbd-pool-name') - if not base_pool_name: - base_pool_name = service_name() - ctxt['rbd_default_data_pool'] = base_pool_name - - if not os.path.isdir('/etc/ceph'): - os.mkdir('/etc/ceph') - - if not self.context_complete(ctxt): - return {} - - ensure_packages(['ceph-common']) - return ctxt - - def context_complete(self, ctxt): - """Overridden here to ensure the context is actually complete. - - We set `key` and `auth` to None here, by default, to ensure - that the context will always evaluate to incomplete until the - Ceph relation has actually sent these details; otherwise, - there is a potential race condition between the relation - appearing and the first unit actually setting this data on the - relation. - - :param ctxt: The current context members - :type ctxt: Dict[str, ANY] - :returns: True if the context is complete - :rtype: bool - """ - if 'auth' not in ctxt or 'key' not in ctxt: - return False - return super(CephContext, self).context_complete(ctxt) - - -class HAProxyContext(OSContextGenerator): - """Provides half a context for the haproxy template, which describes - all peers to be included in the cluster. Each charm needs to include - its own context generator that describes the port mapping. - - :side effect: mkdir is called on HAPROXY_RUN_DIR - """ - interfaces = ['cluster'] - - def __init__(self, singlenode_mode=False, - address_types=ADDRESS_TYPES): - self.address_types = address_types - self.singlenode_mode = singlenode_mode - - def __call__(self): - if not os.path.isdir(HAPROXY_RUN_DIR): - mkdir(path=HAPROXY_RUN_DIR) - if not relation_ids('cluster') and not self.singlenode_mode: - return {} - - l_unit = local_unit().replace('/', '-') - cluster_hosts = collections.OrderedDict() - - # NOTE(jamespage): build out map of configured network endpoints - # and associated backends - for addr_type in self.address_types: - cfg_opt = 'os-{}-network'.format(addr_type) - # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather - # than 'internal' - if addr_type == 'internal': - _addr_map_type = INTERNAL - else: - _addr_map_type = addr_type - # Network spaces aware - laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'], - config(cfg_opt)) - if laddr: - netmask = get_netmask_for_address(laddr) - cluster_hosts[laddr] = { - 'network': "{}/{}".format(laddr, - netmask), - 'backends': collections.OrderedDict([(l_unit, - laddr)]) - } - for rid in relation_ids('cluster'): - for unit in sorted(related_units(rid)): - # API Charms will need to set {addr_type}-address with - # get_relation_ip(addr_type) - _laddr = relation_get('{}-address'.format(addr_type), - rid=rid, unit=unit) - if _laddr: - _unit = unit.replace('/', '-') - cluster_hosts[laddr]['backends'][_unit] = _laddr - - # NOTE(jamespage) add backend based on get_relation_ip - this - # will either be the only backend or the fallback if no acls - # match in the frontend - # Network spaces aware - addr = get_relation_ip('cluster') - cluster_hosts[addr] = {} - netmask = get_netmask_for_address(addr) - cluster_hosts[addr] = { - 'network': "{}/{}".format(addr, netmask), - 'backends': collections.OrderedDict([(l_unit, - addr)]) - } - for rid in relation_ids('cluster'): - for unit in sorted(related_units(rid)): - # API Charms will need to set their private-address with - # get_relation_ip('cluster') - _laddr = relation_get('private-address', - rid=rid, unit=unit) - if _laddr: - _unit = unit.replace('/', '-') - cluster_hosts[addr]['backends'][_unit] = _laddr - - ctxt = { - 'frontends': cluster_hosts, - 'default_backend': addr - } - - if config('haproxy-server-timeout'): - ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') - - if config('haproxy-client-timeout'): - ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') - - if config('haproxy-queue-timeout'): - ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout') - - if config('haproxy-connect-timeout'): - ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') - - if config('prefer-ipv6'): - ctxt['local_host'] = 'ip6-localhost' - ctxt['haproxy_host'] = '::' - else: - ctxt['local_host'] = '127.0.0.1' - ctxt['haproxy_host'] = '0.0.0.0' - - ctxt['ipv6_enabled'] = not is_ipv6_disabled() - - ctxt['stat_port'] = '8888' - - db = kv() - ctxt['stat_password'] = db.get('stat-password') - if not ctxt['stat_password']: - ctxt['stat_password'] = db.set('stat-password', - pwgen(32)) - db.flush() - - for frontend in cluster_hosts: - if (len(cluster_hosts[frontend]['backends']) > 1 or - self.singlenode_mode): - # Enable haproxy when we have enough peers. - log('Ensuring haproxy enabled in /etc/default/haproxy.', - level=DEBUG) - with open('/etc/default/haproxy', 'w') as out: - out.write('ENABLED=1\n') - - return ctxt - - log('HAProxy context is incomplete, this unit has no peers.', - level=INFO) - return {} - - -class ImageServiceContext(OSContextGenerator): - interfaces = ['image-service'] - - def __call__(self): - """Obtains the glance API server from the image-service relation. - Useful in nova and cinder (currently). - """ - log('Generating template context for image-service.', level=DEBUG) - rids = relation_ids('image-service') - if not rids: - return {} - - for rid in rids: - for unit in related_units(rid): - api_server = relation_get('glance-api-server', - rid=rid, unit=unit) - if api_server: - return {'glance_api_servers': api_server} - - log("ImageService context is incomplete. Missing required relation " - "data.", level=INFO) - return {} - - -class ApacheSSLContext(OSContextGenerator): - """Generates a context for an apache vhost configuration that configures - HTTPS reverse proxying for one or many endpoints. Generated context - looks something like:: - - { - 'namespace': 'cinder', - 'private_address': 'iscsi.mycinderhost.com', - 'endpoints': [(8776, 8766), (8777, 8767)] - } - - The endpoints list consists of a tuples mapping external ports - to internal ports. - """ - interfaces = ['https'] - - # charms should inherit this context and set external ports - # and service namespace accordingly. - external_ports = [] - service_namespace = None - user = group = 'root' - - def enable_modules(self): - cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers'] - check_call(cmd) - - def configure_cert(self, cn=None): - ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) - mkdir(path=ssl_dir) - cert, key = get_cert(cn) - if cert and key: - if cn: - cert_filename = 'cert_{}'.format(cn) - key_filename = 'key_{}'.format(cn) - else: - cert_filename = 'cert' - key_filename = 'key' - - write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert), owner=self.user, - group=self.group, perms=0o640) - write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key), owner=self.user, - group=self.group, perms=0o640) - - def configure_ca(self): - ca_cert = get_ca_cert() - if ca_cert: - install_ca_cert(b64decode(ca_cert)) - - def canonical_names(self): - """Figure out which canonical names clients will access this service. - """ - cns = [] - for r_id in relation_ids('identity-service'): - for unit in related_units(r_id): - rdata = relation_get(rid=r_id, unit=unit) - for k in rdata: - if k.startswith('ssl_key_'): - cns.append(k.lstrip('ssl_key_')) - - return sorted(list(set(cns))) - - def get_network_addresses(self): - """For each network configured, return corresponding address and - hostnamr or vip (if available). - - Returns a list of tuples of the form: - - [(address_in_net_a, hostname_in_net_a), - (address_in_net_b, hostname_in_net_b), - ...] - - or, if no hostnames(s) available: - - [(address_in_net_a, vip_in_net_a), - (address_in_net_b, vip_in_net_b), - ...] - - or, if no vip(s) available: - - [(address_in_net_a, address_in_net_a), - (address_in_net_b, address_in_net_b), - ...] - """ - addresses = [] - for net_type in [INTERNAL, ADMIN, PUBLIC]: - net_config = config(ADDRESS_MAP[net_type]['config']) - # NOTE(jamespage): Fallback must always be private address - # as this is used to bind services on the - # local unit. - fallback = local_address(unit_get_fallback="private-address") - if net_config: - addr = get_address_in_network(net_config, - fallback) - else: - try: - addr = network_get_primary_address( - ADDRESS_MAP[net_type]['binding'] - ) - except (NotImplementedError, NoNetworkBinding): - addr = fallback - - endpoint = resolve_address(net_type) - addresses.append((addr, endpoint)) - - # Log the set of addresses to have a trail log and capture if tuples - # change over time in the same unit (LP: #1952414). - sorted_addresses = sorted(set(addresses)) - log('get_network_addresses: {}'.format(sorted_addresses)) - return sorted_addresses - - def __call__(self): - if isinstance(self.external_ports, str): - self.external_ports = [self.external_ports] - - if not self.external_ports or not https(): - return {} - - use_keystone_ca = True - for rid in relation_ids('certificates'): - if related_units(rid): - use_keystone_ca = False - - if use_keystone_ca: - self.configure_ca() - - self.enable_modules() - - ctxt = {'namespace': self.service_namespace, - 'endpoints': [], - 'ext_ports': []} - - if use_keystone_ca: - cns = self.canonical_names() - if cns: - for cn in cns: - self.configure_cert(cn) - else: - # Expect cert/key provided in config (currently assumed that ca - # uses ip for cn) - for net_type in (INTERNAL, ADMIN, PUBLIC): - cn = resolve_address(endpoint_type=net_type) - self.configure_cert(cn) - - addresses = self.get_network_addresses() - for address, endpoint in addresses: - for api_port in self.external_ports: - ext_port = determine_apache_port(api_port, - singlenode_mode=True) - int_port = determine_api_port(api_port, singlenode_mode=True) - portmap = (address, endpoint, int(ext_port), int(int_port)) - ctxt['endpoints'].append(portmap) - ctxt['ext_ports'].append(int(ext_port)) - - ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) - return ctxt - - -class NeutronContext(OSContextGenerator): - interfaces = [] - - @property - def plugin(self): - return None - - @property - def network_manager(self): - return None - - @property - def packages(self): - return neutron_plugin_attribute(self.plugin, 'packages', - self.network_manager) - - @property - def neutron_security_groups(self): - return None - - def _ensure_packages(self): - for pkgs in self.packages: - ensure_packages(pkgs) - - def ovs_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - ovs_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'ovs', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return ovs_ctxt - - def nuage_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - nuage_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'vsp', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return nuage_ctxt - - def nvp_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - nvp_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'nvp', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return nvp_ctxt - - def n1kv_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - n1kv_config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - n1kv_user_config_flags = config('n1kv-config-flags') - restrict_policy_profiles = config('n1kv-restrict-policy-profiles') - n1kv_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'n1kv', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': n1kv_config, - 'vsm_ip': config('n1kv-vsm-ip'), - 'vsm_username': config('n1kv-vsm-username'), - 'vsm_password': config('n1kv-vsm-password'), - 'restrict_policy_profiles': restrict_policy_profiles} - - if n1kv_user_config_flags: - flags = config_flags_parser(n1kv_user_config_flags) - n1kv_ctxt['user_config_flags'] = flags - - return n1kv_ctxt - - def calico_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - calico_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'Calico', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return calico_ctxt - - def neutron_ctxt(self): - if https(): - proto = 'https' - else: - proto = 'http' - - if is_clustered(): - host = config('vip') - else: - host = local_address(unit_get_fallback='private-address') - - ctxt = {'network_manager': self.network_manager, - 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} - return ctxt - - def pg_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - ovs_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'plumgrid', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - return ovs_ctxt - - def midonet_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - midonet_config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - mido_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'midonet', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': midonet_config} - - return mido_ctxt - - def __call__(self): - if self.network_manager not in ['quantum', 'neutron']: - return {} - - if not self.plugin: - return {} - - ctxt = self.neutron_ctxt() - - if self.plugin == 'ovs': - ctxt.update(self.ovs_ctxt()) - elif self.plugin in ['nvp', 'nsx']: - ctxt.update(self.nvp_ctxt()) - elif self.plugin == 'n1kv': - ctxt.update(self.n1kv_ctxt()) - elif self.plugin == 'Calico': - ctxt.update(self.calico_ctxt()) - elif self.plugin == 'vsp': - ctxt.update(self.nuage_ctxt()) - elif self.plugin == 'plumgrid': - ctxt.update(self.pg_ctxt()) - elif self.plugin == 'midonet': - ctxt.update(self.midonet_ctxt()) - - alchemy_flags = config('neutron-alchemy-flags') - if alchemy_flags: - flags = config_flags_parser(alchemy_flags) - ctxt['neutron_alchemy_flags'] = flags - - return ctxt - - -class NeutronPortContext(OSContextGenerator): - - def resolve_ports(self, ports): - """Resolve NICs not yet bound to bridge(s) - - If hwaddress provided then returns resolved hwaddress otherwise NIC. - """ - if not ports: - return None - - hwaddr_to_nic = {} - hwaddr_to_ip = {} - extant_nics = list_nics() - - for nic in extant_nics: - # Ignore virtual interfaces (bond masters will be identified from - # their slaves) - if not is_phy_iface(nic): - continue - - _nic = get_bond_master(nic) - if _nic: - log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), - level=DEBUG) - nic = _nic - - hwaddr = get_nic_hwaddr(nic) - hwaddr_to_nic[hwaddr] = nic - addresses = get_ipv4_addr(nic, fatal=False) - addresses += get_ipv6_addr(iface=nic, fatal=False) - hwaddr_to_ip[hwaddr] = addresses - - resolved = [] - mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) - for entry in ports: - if re.match(mac_regex, entry): - # NIC is in known NICs and does NOT have an IP address - if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: - # If the nic is part of a bridge then don't use it - if is_bridge_member(hwaddr_to_nic[entry]): - continue - - # Entry is a MAC address for a valid interface that doesn't - # have an IP address assigned yet. - resolved.append(hwaddr_to_nic[entry]) - elif entry in extant_nics: - # If the passed entry is not a MAC address and the interface - # exists, assume it's a valid interface, and that the user put - # it there on purpose (we can trust it to be the real external - # network). - resolved.append(entry) - - # Ensure no duplicates - return list(set(resolved)) - - -class OSConfigFlagContext(OSContextGenerator): - """Provides support for user-defined config flags. - - Users can define a comma-seperated list of key=value pairs - in the charm configuration and apply them at any point in - any file by using a template flag. - - Sometimes users might want config flags inserted within a - specific section so this class allows users to specify the - template flag name, allowing for multiple template flags - (sections) within the same context. - - NOTE: the value of config-flags may be a comma-separated list of - key=value pairs and some Openstack config files support - comma-separated lists as values. - """ - - def __init__(self, charm_flag='config-flags', - template_flag='user_config_flags'): - """ - :param charm_flag: config flags in charm configuration. - :param template_flag: insert point for user-defined flags in template - file. - """ - super(OSConfigFlagContext, self).__init__() - self._charm_flag = charm_flag - self._template_flag = template_flag - - def __call__(self): - config_flags = config(self._charm_flag) - if not config_flags: - return {} - - return {self._template_flag: - config_flags_parser(config_flags)} - - -class LibvirtConfigFlagsContext(OSContextGenerator): - """ - This context provides support for extending - the libvirt section through user-defined flags. - """ - def __call__(self): - ctxt = {} - libvirt_flags = config('libvirt-flags') - if libvirt_flags: - ctxt['libvirt_flags'] = config_flags_parser( - libvirt_flags) - return ctxt - - -class SubordinateConfigContext(OSContextGenerator): - - """ - Responsible for inspecting relations to subordinates that - may be exporting required config via a json blob. - - The subordinate interface allows subordinates to export their - configuration requirements to the principle for multiple config - files and multiple services. Ie, a subordinate that has interfaces - to both glance and nova may export to following yaml blob as json:: - - glance: - /etc/glance/glance-api.conf: - sections: - DEFAULT: - - [key1, value1] - /etc/glance/glance-registry.conf: - MYSECTION: - - [key2, value2] - nova: - /etc/nova/nova.conf: - sections: - DEFAULT: - - [key3, value3] - - - It is then up to the principle charms to subscribe this context to - the service+config file it is interestd in. Configuration data will - be available in the template context, in glance's case, as:: - - ctxt = { - ... other context ... - 'subordinate_configuration': { - 'DEFAULT': { - 'key1': 'value1', - }, - 'MYSECTION': { - 'key2': 'value2', - }, - } - } - """ - - def __init__(self, service, config_file, interface): - """ - :param service : Service name key to query in any subordinate - data found - :param config_file : Service's config file to query sections - :param interface : Subordinate interface to inspect - """ - self.config_file = config_file - if isinstance(service, list): - self.services = service - else: - self.services = [service] - if isinstance(interface, list): - self.interfaces = interface - else: - self.interfaces = [interface] - - def __call__(self): - ctxt = {'sections': {}} - rids = [] - for interface in self.interfaces: - rids.extend(relation_ids(interface)) - for rid in rids: - for unit in related_units(rid): - sub_config = relation_get('subordinate_configuration', - rid=rid, unit=unit) - if sub_config and sub_config != '': - try: - sub_config = json.loads(sub_config) - except Exception: - log('Could not parse JSON from ' - 'subordinate_configuration setting from %s' - % rid, level=ERROR) - continue - - for service in self.services: - if service not in sub_config: - log('Found subordinate_configuration on %s but it ' - 'contained nothing for %s service' - % (rid, service), level=INFO) - continue - - sub_config = sub_config[service] - if self.config_file not in sub_config: - log('Found subordinate_configuration on %s but it ' - 'contained nothing for %s' - % (rid, self.config_file), level=INFO) - continue - - sub_config = sub_config[self.config_file] - for k, v in sub_config.items(): - if k == 'sections': - for section, config_list in v.items(): - log("adding section '%s'" % (section), - level=DEBUG) - if ctxt[k].get(section): - ctxt[k][section].extend(config_list) - else: - ctxt[k][section] = config_list - else: - ctxt[k] = v - if self.context_complete(ctxt): - log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) - return ctxt - else: - return {} - - def context_complete(self, ctxt): - """Overridden here to ensure the context is actually complete. - - :param ctxt: The current context members - :type ctxt: Dict[str, ANY] - :returns: True if the context is complete - :rtype: bool - """ - if not ctxt.get('sections'): - return False - return super(SubordinateConfigContext, self).context_complete(ctxt) - - -class LogLevelContext(OSContextGenerator): - - def __call__(self): - ctxt = {} - ctxt['debug'] = \ - False if config('debug') is None else config('debug') - ctxt['verbose'] = \ - False if config('verbose') is None else config('verbose') - - return ctxt - - -class SyslogContext(OSContextGenerator): - - def __call__(self): - ctxt = {'use_syslog': config('use-syslog')} - return ctxt - - -class BindHostContext(OSContextGenerator): - - def __call__(self): - if config('prefer-ipv6'): - return {'bind_host': '::'} - else: - return {'bind_host': '0.0.0.0'} - - -MAX_DEFAULT_WORKERS = 4 -DEFAULT_MULTIPLIER = 2 - - -def _calculate_workers(): - ''' - Determine the number of worker processes based on the CPU - count of the unit containing the application. - - Workers will be limited to MAX_DEFAULT_WORKERS in - container environments where no worker-multipler configuration - option been set. - - @returns int: number of worker processes to use - ''' - multiplier = config('worker-multiplier') - - # distinguish an empty config and an explicit config as 0.0 - if multiplier is None: - multiplier = DEFAULT_MULTIPLIER - - count = int(_num_cpus() * multiplier) - if count <= 0: - # assign at least one worker - count = 1 - - if config('worker-multiplier') is None: - # NOTE(jamespage): Limit unconfigured worker-multiplier - # to MAX_DEFAULT_WORKERS to avoid insane - # worker configuration on large servers - # Reference: https://pad.lv/1665270 - count = min(count, MAX_DEFAULT_WORKERS) - - return count - - -def _num_cpus(): - ''' - Compatibility wrapper for calculating the number of CPU's - a unit has. - - @returns: int: number of CPU cores detected - ''' - try: - return psutil.cpu_count() - except AttributeError: - return psutil.NUM_CPUS - - -class WorkerConfigContext(OSContextGenerator): - - def __call__(self): - ctxt = {"workers": _calculate_workers()} - return ctxt - - -class WSGIWorkerConfigContext(WorkerConfigContext): - - def __init__(self, name=None, script=None, admin_script=None, - public_script=None, user=None, group=None, - process_weight=1.00, - admin_process_weight=0.25, public_process_weight=0.75): - self.service_name = name - self.user = user or name - self.group = group or name - self.script = script - self.admin_script = admin_script - self.public_script = public_script - self.process_weight = process_weight - self.admin_process_weight = admin_process_weight - self.public_process_weight = public_process_weight - - def __call__(self): - total_processes = _calculate_workers() - ctxt = { - "service_name": self.service_name, - "user": self.user, - "group": self.group, - "script": self.script, - "admin_script": self.admin_script, - "public_script": self.public_script, - "processes": int(math.ceil(self.process_weight * total_processes)), - "admin_processes": int(math.ceil(self.admin_process_weight * - total_processes)), - "public_processes": int(math.ceil(self.public_process_weight * - total_processes)), - "threads": 1, - } - return ctxt - - -class ZeroMQContext(OSContextGenerator): - interfaces = ['zeromq-configuration'] - - def __call__(self): - ctxt = {} - if is_relation_made('zeromq-configuration', 'host'): - for rid in relation_ids('zeromq-configuration'): - for unit in related_units(rid): - ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) - ctxt['zmq_host'] = relation_get('host', unit, rid) - ctxt['zmq_redis_address'] = relation_get( - 'zmq_redis_address', unit, rid) - - return ctxt - - -class NotificationDriverContext(OSContextGenerator): - - def __init__(self, zmq_relation='zeromq-configuration', - amqp_relation='amqp'): - """ - :param zmq_relation: Name of Zeromq relation to check - """ - self.zmq_relation = zmq_relation - self.amqp_relation = amqp_relation - - def __call__(self): - ctxt = {'notifications': 'False'} - if is_relation_made(self.amqp_relation): - ctxt['notifications'] = "True" - - return ctxt - - -class SysctlContext(OSContextGenerator): - """This context check if the 'sysctl' option exists on configuration - then creates a file with the loaded contents""" - def __call__(self): - sysctl_dict = config('sysctl') - if sysctl_dict: - sysctl_create(sysctl_dict, - '/etc/sysctl.d/50-{0}.conf'.format(charm_name())) - return {'sysctl': sysctl_dict} - - -class NeutronAPIContext(OSContextGenerator): - ''' - Inspects current neutron-plugin-api relation for neutron settings. Return - defaults if it is not present. - ''' - interfaces = ['neutron-plugin-api'] - - def __call__(self): - self.neutron_defaults = { - 'l2_population': { - 'rel_key': 'l2-population', - 'default': False, - }, - 'overlay_network_type': { - 'rel_key': 'overlay-network-type', - 'default': 'gre', - }, - 'neutron_security_groups': { - 'rel_key': 'neutron-security-groups', - 'default': False, - }, - 'network_device_mtu': { - 'rel_key': 'network-device-mtu', - 'default': None, - }, - 'enable_dvr': { - 'rel_key': 'enable-dvr', - 'default': False, - }, - 'enable_l3ha': { - 'rel_key': 'enable-l3ha', - 'default': False, - }, - 'dns_domain': { - 'rel_key': 'dns-domain', - 'default': None, - }, - 'polling_interval': { - 'rel_key': 'polling-interval', - 'default': 2, - }, - 'rpc_response_timeout': { - 'rel_key': 'rpc-response-timeout', - 'default': 60, - }, - 'report_interval': { - 'rel_key': 'report-interval', - 'default': 30, - }, - 'enable_qos': { - 'rel_key': 'enable-qos', - 'default': False, - }, - 'enable_nsg_logging': { - 'rel_key': 'enable-nsg-logging', - 'default': False, - }, - 'enable_nfg_logging': { - 'rel_key': 'enable-nfg-logging', - 'default': False, - }, - 'enable_port_forwarding': { - 'rel_key': 'enable-port-forwarding', - 'default': False, - }, - 'enable_fwaas': { - 'rel_key': 'enable-fwaas', - 'default': False, - }, - 'global_physnet_mtu': { - 'rel_key': 'global-physnet-mtu', - 'default': 1500, - }, - 'physical_network_mtus': { - 'rel_key': 'physical-network-mtus', - 'default': None, - }, - } - ctxt = self.get_neutron_options({}) - for rid in relation_ids('neutron-plugin-api'): - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - # The l2-population key is used by the context as a way of - # checking if the api service on the other end is sending data - # in a recent format. - if 'l2-population' in rdata: - ctxt.update(self.get_neutron_options(rdata)) - - extension_drivers = [] - - if ctxt['enable_qos']: - extension_drivers.append('qos') - - if ctxt['enable_nsg_logging']: - extension_drivers.append('log') - - ctxt['extension_drivers'] = ','.join(extension_drivers) - - l3_extension_plugins = [] - - if ctxt['enable_port_forwarding']: - l3_extension_plugins.append('port_forwarding') - - if ctxt['enable_fwaas']: - l3_extension_plugins.append('fwaas_v2') - if ctxt['enable_nfg_logging']: - l3_extension_plugins.append('fwaas_v2_log') - - ctxt['l3_extension_plugins'] = l3_extension_plugins - - return ctxt - - def get_neutron_options(self, rdata): - settings = {} - for nkey in self.neutron_defaults.keys(): - defv = self.neutron_defaults[nkey]['default'] - rkey = self.neutron_defaults[nkey]['rel_key'] - if rkey in rdata.keys(): - if type(defv) is bool: - settings[nkey] = bool_from_string(rdata[rkey]) - else: - settings[nkey] = rdata[rkey] - else: - settings[nkey] = defv - return settings - - -class ExternalPortContext(NeutronPortContext): - - def __call__(self): - ctxt = {} - ports = config('ext-port') - if ports: - ports = [p.strip() for p in ports.split()] - ports = self.resolve_ports(ports) - if ports: - ctxt = {"ext_port": ports[0]} - napi_settings = NeutronAPIContext()() - mtu = napi_settings.get('network_device_mtu') - if mtu: - ctxt['ext_port_mtu'] = mtu - - return ctxt - - -class DataPortContext(NeutronPortContext): - - def __call__(self): - ports = config('data-port') - if ports: - # Map of {bridge:port/mac} - portmap = parse_data_port_mappings(ports) - ports = portmap.keys() - # Resolve provided ports or mac addresses and filter out those - # already attached to a bridge. - resolved = self.resolve_ports(ports) - # Rebuild port index using resolved and filtered ports. - normalized = {get_nic_hwaddr(port): port for port in resolved - if port not in ports} - normalized.update({port: port for port in resolved - if port in ports}) - if resolved: - return { - normalized[port]: bridge - for port, bridge in portmap.items() - if port in normalized.keys() - } - - return None - - -class PhyNICMTUContext(DataPortContext): - - def __call__(self): - ctxt = {} - mappings = super(PhyNICMTUContext, self).__call__() - if mappings and mappings.keys(): - ports = sorted(mappings.keys()) - napi_settings = NeutronAPIContext()() - mtu = napi_settings.get('network_device_mtu') - all_ports = set() - # If any of ports is a vlan device, its underlying device must have - # mtu applied first. - for port in ports: - for lport in glob.glob("/sys/class/net/%s/lower_*" % port): - lport = os.path.basename(lport) - all_ports.add(lport.split('_')[1]) - - all_ports = list(all_ports) - all_ports.extend(ports) - if mtu: - ctxt["devs"] = '\\n'.join(all_ports) - ctxt['mtu'] = mtu - - return ctxt - - -class NetworkServiceContext(OSContextGenerator): - - def __init__(self, rel_name='quantum-network-service'): - self.rel_name = rel_name - self.interfaces = [rel_name] - - def __call__(self): - for rid in relation_ids(self.rel_name): - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - ctxt = { - 'keystone_host': rdata.get('keystone_host'), - 'service_port': rdata.get('service_port'), - 'auth_port': rdata.get('auth_port'), - 'service_tenant': rdata.get('service_tenant'), - 'service_username': rdata.get('service_username'), - 'service_password': rdata.get('service_password'), - 'quantum_host': rdata.get('quantum_host'), - 'quantum_port': rdata.get('quantum_port'), - 'quantum_url': rdata.get('quantum_url'), - 'region': rdata.get('region'), - 'service_protocol': - rdata.get('service_protocol') or 'http', - 'auth_protocol': - rdata.get('auth_protocol') or 'http', - 'api_version': - rdata.get('api_version') or '2.0', - } - if self.context_complete(ctxt): - return ctxt - return {} - - -class InternalEndpointContext(OSContextGenerator): - """Internal endpoint context. - - This context provides the endpoint type used for communication between - services e.g. between Nova and Cinder internally. Openstack uses Public - endpoints by default so this allows admins to optionally use internal - endpoints. - """ - def __call__(self): - return {'use_internal_endpoints': config('use-internal-endpoints')} - - -class VolumeAPIContext(InternalEndpointContext): - """Volume API context. - - This context provides information regarding the volume endpoint to use - when communicating between services. It determines which version of the - API is appropriate for use. - - This value will be determined in the resulting context dictionary - returned from calling the VolumeAPIContext object. Information provided - by this context is as follows: - - volume_api_version: the volume api version to use, currently - 'v2' or 'v3' - volume_catalog_info: the information to use for a cinder client - configuration that consumes API endpoints from the keystone - catalog. This is defined as the type:name:endpoint_type string. - """ - # FIXME(wolsen) This implementation is based on the provider being able - # to specify the package version to check but does not guarantee that the - # volume service api version selected is available. In practice, it is - # quite likely the volume service *is* providing the v3 volume service. - # This should be resolved when the service-discovery spec is implemented. - def __init__(self, pkg): - """ - Creates a new VolumeAPIContext for use in determining which version - of the Volume API should be used for communication. A package codename - should be supplied for determining the currently installed OpenStack - version. - - :param pkg: the package codename to use in order to determine the - component version (e.g. nova-common). See - charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more. - """ - super(VolumeAPIContext, self).__init__() - self._ctxt = None - if not pkg: - raise ValueError('package name must be provided in order to ' - 'determine current OpenStack version.') - self.pkg = pkg - - @property - def ctxt(self): - if self._ctxt is not None: - return self._ctxt - self._ctxt = self._determine_ctxt() - return self._ctxt - - def _determine_ctxt(self): - """Determines the Volume API endpoint information. - - Determines the appropriate version of the API that should be used - as well as the catalog_info string that would be supplied. Returns - a dict containing the volume_api_version and the volume_catalog_info. - """ - rel = os_release(self.pkg) - version = '2' - if CompareOpenStackReleases(rel) >= 'pike': - version = '3' - - service_type = 'volumev{version}'.format(version=version) - service_name = 'cinderv{version}'.format(version=version) - endpoint_type = 'publicURL' - if config('use-internal-endpoints'): - endpoint_type = 'internalURL' - catalog_info = '{type}:{name}:{endpoint}'.format( - type=service_type, name=service_name, endpoint=endpoint_type) - - return { - 'volume_api_version': version, - 'volume_catalog_info': catalog_info, - } - - def __call__(self): - return self.ctxt - - -class AppArmorContext(OSContextGenerator): - """Base class for apparmor contexts.""" - - def __init__(self, profile_name=None): - self._ctxt = None - self.aa_profile = profile_name - self.aa_utils_packages = ['apparmor-utils'] - - @property - def ctxt(self): - if self._ctxt is not None: - return self._ctxt - self._ctxt = self._determine_ctxt() - return self._ctxt - - def _determine_ctxt(self): - """ - Validate aa-profile-mode settings is disable, enforce, or complain. - - :return ctxt: Dictionary of the apparmor profile or None - """ - if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: - ctxt = {'aa_profile_mode': config('aa-profile-mode'), - 'ubuntu_release': lsb_release()['DISTRIB_RELEASE']} - if self.aa_profile: - ctxt['aa_profile'] = self.aa_profile - else: - ctxt = None - return ctxt - - def __call__(self): - return self.ctxt - - def install_aa_utils(self): - """ - Install packages required for apparmor configuration. - """ - log("Installing apparmor utils.") - ensure_packages(self.aa_utils_packages) - - def manually_disable_aa_profile(self): - """ - Manually disable an apparmor profile. - - If aa-profile-mode is set to disabled (default) this is required as the - template has been written but apparmor is yet unaware of the profile - and aa-disable aa-profile fails. Without this the profile would kick - into enforce mode on the next service restart. - - """ - profile_path = '/etc/apparmor.d' - disable_path = '/etc/apparmor.d/disable' - if not os.path.lexists(os.path.join(disable_path, self.aa_profile)): - os.symlink(os.path.join(profile_path, self.aa_profile), - os.path.join(disable_path, self.aa_profile)) - - def setup_aa_profile(self): - """ - Setup an apparmor profile. - The ctxt dictionary will contain the apparmor profile mode and - the apparmor profile name. - Makes calls out to aa-disable, aa-complain, or aa-enforce to setup - the apparmor profile. - """ - self() - if not self.ctxt: - log("Not enabling apparmor Profile") - return - self.install_aa_utils() - cmd = ['aa-{}'.format(self.ctxt['aa_profile_mode'])] - cmd.append(self.ctxt['aa_profile']) - log("Setting up the apparmor profile for {} in {} mode." - "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode'])) - try: - check_call(cmd) - except CalledProcessError as e: - # If aa-profile-mode is set to disabled (default) manual - # disabling is required as the template has been written but - # apparmor is yet unaware of the profile and aa-disable aa-profile - # fails. If aa-disable learns to read profile files first this can - # be removed. - if self.ctxt['aa_profile_mode'] == 'disable': - log("Manually disabling the apparmor profile for {}." - "".format(self.ctxt['aa_profile'])) - self.manually_disable_aa_profile() - return - status_set('blocked', "Apparmor profile {} failed to be set to {}." - "".format(self.ctxt['aa_profile'], - self.ctxt['aa_profile_mode'])) - raise e - - -class MemcacheContext(OSContextGenerator): - """Memcache context - - This context provides options for configuring a local memcache client and - server for both IPv4 and IPv6 - """ - - def __init__(self, package=None): - """ - @param package: Package to examine to extrapolate OpenStack release. - Used when charms have no openstack-origin config - option (ie subordinates) - """ - self.package = package - - def __call__(self): - ctxt = {} - ctxt['use_memcache'] = enable_memcache(package=self.package) - if ctxt['use_memcache']: - # Trusty version of memcached does not support ::1 as a listen - # address so use host file entry instead - release = lsb_release()['DISTRIB_CODENAME'].lower() - if is_ipv6_disabled(): - if CompareHostReleases(release) > 'trusty': - ctxt['memcache_server'] = '127.0.0.1' - else: - ctxt['memcache_server'] = 'localhost' - ctxt['memcache_server_formatted'] = '127.0.0.1' - ctxt['memcache_port'] = '11211' - ctxt['memcache_url'] = '{}:{}'.format( - ctxt['memcache_server_formatted'], - ctxt['memcache_port']) - else: - if CompareHostReleases(release) > 'trusty': - ctxt['memcache_server'] = '::1' - else: - ctxt['memcache_server'] = 'ip6-localhost' - ctxt['memcache_server_formatted'] = '[::1]' - ctxt['memcache_port'] = '11211' - ctxt['memcache_url'] = 'inet6:{}:{}'.format( - ctxt['memcache_server_formatted'], - ctxt['memcache_port']) - return ctxt - - -class EnsureDirContext(OSContextGenerator): - ''' - Serves as a generic context to create a directory as a side-effect. - - Useful for software that supports drop-in files (.d) in conjunction - with config option-based templates. Examples include: - * OpenStack oslo.policy drop-in files; - * systemd drop-in config files; - * other software that supports overriding defaults with .d files - - Another use-case is when a subordinate generates a configuration for - primary to render in a separate directory. - - Some software requires a user to create a target directory to be - scanned for drop-in files with a specific format. This is why this - context is needed to do that before rendering a template. - ''' - - def __init__(self, dirname, **kwargs): - '''Used merely to ensure that a given directory exists.''' - self.dirname = dirname - self.kwargs = kwargs - - def __call__(self): - mkdir(self.dirname, **self.kwargs) - return {} - - -class VersionsContext(OSContextGenerator): - """Context to return the openstack and operating system versions. - - """ - def __init__(self, pkg='python-keystone'): - """Initialise context. - - :param pkg: Package to extrapolate openstack version from. - :type pkg: str - """ - self.pkg = pkg - - def __call__(self): - ostack = os_release(self.pkg) - osystem = lsb_release()['DISTRIB_CODENAME'].lower() - return { - 'openstack_release': ostack, - 'operating_system_release': osystem} - - -class LogrotateContext(OSContextGenerator): - """Common context generator for logrotate.""" - - def __init__(self, location, interval, count): - """ - :param location: Absolute path for the logrotate config file - :type location: str - :param interval: The interval for the rotations. Valid values are - 'daily', 'weekly', 'monthly', 'yearly' - :type interval: str - :param count: The logrotate count option configures the 'count' times - the log files are being rotated before being - :type count: int - """ - self.location = location - self.interval = interval - self.count = 'rotate {}'.format(count) - - def __call__(self): - ctxt = { - 'logrotate_logs_location': self.location, - 'logrotate_interval': self.interval, - 'logrotate_count': self.count, - } - return ctxt - - -class HostInfoContext(OSContextGenerator): - """Context to provide host information.""" - - def __init__(self, use_fqdn_hint_cb=None): - """Initialize HostInfoContext - - :param use_fqdn_hint_cb: Callback whose return value used to populate - `use_fqdn_hint` - :type use_fqdn_hint_cb: Callable[[], bool] - """ - # Store callback used to get hint for whether FQDN should be used - - # Depending on the workload a charm manages, the use of FQDN vs. - # shortname may be a deploy-time decision, i.e. behaviour can not - # change on charm upgrade or post-deployment configuration change. - - # The hint is passed on as a flag in the context to allow the decision - # to be made in the Jinja2 configuration template. - self.use_fqdn_hint_cb = use_fqdn_hint_cb - - def _get_canonical_name(self, name=None): - """Get the official FQDN of the host - - The implementation of ``socket.getfqdn()`` in the standard Python - library does not exhaust all methods of getting the official name - of a host ref Python issue https://bugs.python.org/issue5004 - - This function mimics the behaviour of a call to ``hostname -f`` to - get the official FQDN but returns an empty string if it is - unsuccessful. - - :param name: Shortname to get FQDN on - :type name: Optional[str] - :returns: The official FQDN for host or empty string ('') - :rtype: str - """ - name = name or socket.gethostname() - fqdn = '' - - try: - addrs = socket.getaddrinfo( - name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) - except OSError: - pass - else: - for addr in addrs: - if addr[3]: - if '.' in addr[3]: - fqdn = addr[3] - break - return fqdn - - def __call__(self): - name = socket.gethostname() - ctxt = { - 'host_fqdn': self._get_canonical_name(name) or name, - 'host': name, - 'use_fqdn_hint': ( - self.use_fqdn_hint_cb() if self.use_fqdn_hint_cb else False) - } - return ctxt - - -def validate_ovs_use_veth(*args, **kwargs): - """Validate OVS use veth setting for dhcp agents - - The ovs_use_veth setting is considered immutable as it will break existing - deployments. Historically, we set ovs_use_veth=True in dhcp_agent.ini. It - turns out this is no longer necessary. Ideally, all new deployments would - have this set to False. - - This function validates that the config value does not conflict with - previously deployed settings in dhcp_agent.ini. - - See LP Bug#1831935 for details. - - :returns: Status state and message - :rtype: Union[(None, None), (string, string)] - """ - existing_ovs_use_veth = ( - DHCPAgentContext.get_existing_ovs_use_veth()) - config_ovs_use_veth = DHCPAgentContext.parse_ovs_use_veth() - - # Check settings are set and not None - if existing_ovs_use_veth is not None and config_ovs_use_veth is not None: - # Check for mismatch between existing config ini and juju config - if existing_ovs_use_veth != config_ovs_use_veth: - # Stop the line to avoid breakage - msg = ( - "The existing setting for dhcp_agent.ini ovs_use_veth, {}, " - "does not match the juju config setting, {}. This may lead to " - "VMs being unable to receive a DHCP IP. Either change the " - "juju config setting or dhcp agents may need to be recreated." - .format(existing_ovs_use_veth, config_ovs_use_veth)) - log(msg, ERROR) - return ( - "blocked", - "Mismatched existing and configured ovs-use-veth. See log.") - - # Everything is OK - return None, None - - -class DHCPAgentContext(OSContextGenerator): - - def __call__(self): - """Return the DHCPAGentContext. - - Return all DHCP Agent INI related configuration. - ovs unit is attached to (as a subordinate) and the 'dns_domain' from - the neutron-plugin-api relations (if one is set). - - :returns: Dictionary context - :rtype: Dict - """ - - ctxt = {} - dnsmasq_flags = config('dnsmasq-flags') - if dnsmasq_flags: - ctxt['dnsmasq_flags'] = config_flags_parser(dnsmasq_flags) - ctxt['dns_servers'] = config('dns-servers') - - neutron_api_settings = NeutronAPIContext()() - - ctxt['debug'] = config('debug') - ctxt['instance_mtu'] = config('instance-mtu') - ctxt['ovs_use_veth'] = self.get_ovs_use_veth() - - ctxt['enable_metadata_network'] = config('enable-metadata-network') - ctxt['enable_isolated_metadata'] = config('enable-isolated-metadata') - - if neutron_api_settings.get('dns_domain'): - ctxt['dns_domain'] = neutron_api_settings.get('dns_domain') - - # Override user supplied config for these plugins as these settings are - # mandatory - if config('plugin') in ['nvp', 'nsx', 'n1kv']: - ctxt['enable_metadata_network'] = True - ctxt['enable_isolated_metadata'] = True - - ctxt['append_ovs_config'] = False - cmp_release = CompareOpenStackReleases( - os_release('neutron-common', base='icehouse')) - if cmp_release >= 'queens' and config('enable-dpdk'): - ctxt['append_ovs_config'] = True - - return ctxt - - @staticmethod - def get_existing_ovs_use_veth(): - """Return existing ovs_use_veth setting from dhcp_agent.ini. - - :returns: Boolean value of existing ovs_use_veth setting or None - :rtype: Optional[Bool] - """ - DHCP_AGENT_INI = "/etc/neutron/dhcp_agent.ini" - existing_ovs_use_veth = None - # If there is a dhcp_agent.ini file read the current setting - if os.path.isfile(DHCP_AGENT_INI): - # config_ini does the right thing and returns None if the setting - # is commented. - existing_ovs_use_veth = ( - config_ini(DHCP_AGENT_INI)["DEFAULT"].get("ovs_use_veth")) - # Convert to Bool if necessary - if isinstance(existing_ovs_use_veth, str): - return bool_from_string(existing_ovs_use_veth) - return existing_ovs_use_veth - - @staticmethod - def parse_ovs_use_veth(): - """Parse the ovs-use-veth config setting. - - Parse the string config setting for ovs-use-veth and return a boolean - or None. - - bool_from_string will raise a ValueError if the string is not falsy or - truthy. - - :raises: ValueError for invalid input - :returns: Boolean value of ovs-use-veth or None - :rtype: Optional[Bool] - """ - _config = config("ovs-use-veth") - # An unset parameter returns None. Just in case we will also check for - # an empty string: "". Ironically, (the problem we are trying to avoid) - # "False" returns True and "" returns False. - if _config is None or not _config: - # Return None - return - # bool_from_string handles many variations of true and false strings - # as well as upper and lowercases including: - # ['y', 'yes', 'true', 't', 'on', 'n', 'no', 'false', 'f', 'off'] - return bool_from_string(_config) - - def get_ovs_use_veth(self): - """Return correct ovs_use_veth setting for use in dhcp_agent.ini. - - Get the right value from config or existing dhcp_agent.ini file. - Existing has precedence. Attempt to default to "False" without - disrupting existing deployments. Handle existing deployments and - upgrades safely. See LP Bug#1831935 - - :returns: Value to use for ovs_use_veth setting - :rtype: Bool - """ - _existing = self.get_existing_ovs_use_veth() - if _existing is not None: - return _existing - - _config = self.parse_ovs_use_veth() - if _config is None: - # New better default - return False - else: - return _config - - -EntityMac = collections.namedtuple('EntityMac', ['entity', 'mac']) - - -def resolve_pci_from_mapping_config(config_key): - """Resolve local PCI devices from MAC addresses in mapping config. - - Note that this function keeps record of mac->PCI address lookups - in the local unit db as the devices will disappaear from the system - once bound. - - :param config_key: Configuration option key to parse data from - :type config_key: str - :returns: PCI device address to Tuple(entity, mac) map - :rtype: collections.OrderedDict[str,Tuple[str,str]] - """ - devices = pci.PCINetDevices() - resolved_devices = collections.OrderedDict() - db = kv() - # Note that ``parse_data_port_mappings`` returns Dict regardless of input - for mac, entity in parse_data_port_mappings(config(config_key)).items(): - pcidev = devices.get_device_from_mac(mac) - if pcidev: - # NOTE: store mac->pci allocation as post binding - # it disappears from PCIDevices. - db.set(mac, pcidev.pci_address) - db.flush() - - pci_address = db.get(mac) - if pci_address: - resolved_devices[pci_address] = EntityMac(entity, mac) - - return resolved_devices - - -class DPDKDeviceContext(OSContextGenerator): - - def __init__(self, driver_key=None, bridges_key=None, bonds_key=None): - """Initialize DPDKDeviceContext. - - :param driver_key: Key to use when retrieving driver config. - :type driver_key: str - :param bridges_key: Key to use when retrieving bridge config. - :type bridges_key: str - :param bonds_key: Key to use when retrieving bonds config. - :type bonds_key: str - """ - self.driver_key = driver_key or 'dpdk-driver' - self.bridges_key = bridges_key or 'data-port' - self.bonds_key = bonds_key or 'dpdk-bond-mappings' - - def __call__(self): - """Populate context. - - :returns: context - :rtype: Dict[str,Union[str,collections.OrderedDict[str,str]]] - """ - driver = config(self.driver_key) - if driver is None: - return {} - # Resolve PCI devices for both directly used devices (_bridges) - # and devices for use in dpdk bonds (_bonds) - pci_devices = resolve_pci_from_mapping_config(self.bridges_key) - pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) - return {'devices': pci_devices, - 'driver': driver} - - -class OVSDPDKDeviceContext(OSContextGenerator): - - def __init__(self, bridges_key=None, bonds_key=None): - """Initialize OVSDPDKDeviceContext. - - :param bridges_key: Key to use when retrieving bridge config. - :type bridges_key: str - :param bonds_key: Key to use when retrieving bonds config. - :type bonds_key: str - """ - self.bridges_key = bridges_key or 'data-port' - self.bonds_key = bonds_key or 'dpdk-bond-mappings' - - @staticmethod - def _parse_cpu_list(cpulist): - """Parses a linux cpulist for a numa node - - :returns: list of cores - :rtype: List[int] - """ - cores = [] - ranges = cpulist.split(',') - for cpu_range in ranges: - if "-" in cpu_range: - cpu_min_max = cpu_range.split('-') - cores += range(int(cpu_min_max[0]), - int(cpu_min_max[1]) + 1) - else: - cores.append(int(cpu_range)) - return cores - - def _numa_node_cores(self): - """Get map of numa node -> cpu core - - :returns: map of numa node -> cpu core - :rtype: Dict[str,List[int]] - """ - nodes = {} - node_regex = '/sys/devices/system/node/node*' - for node in glob.glob(node_regex): - index = node.lstrip('/sys/devices/system/node/node') - with open(os.path.join(node, 'cpulist')) as cpulist: - nodes[index] = self._parse_cpu_list(cpulist.read().strip()) - return nodes - - def cpu_mask(self): - """Get hex formatted CPU mask - - The mask is based on using the first config:dpdk-socket-cores - cores of each NUMA node in the unit. - :returns: hex formatted CPU mask - :rtype: str - """ - return self.cpu_masks()['dpdk_lcore_mask'] - - def cpu_masks(self): - """Get hex formatted CPU masks - - The mask is based on using the first config:dpdk-socket-cores - cores of each NUMA node in the unit, followed by the - next config:pmd-socket-cores - - :returns: Dict of hex formatted CPU masks - :rtype: Dict[str, str] - """ - num_lcores = config('dpdk-socket-cores') - pmd_cores = config('pmd-socket-cores') - lcore_mask = 0 - pmd_mask = 0 - for cores in self._numa_node_cores().values(): - for core in cores[:num_lcores]: - lcore_mask = lcore_mask | 1 << core - for core in cores[num_lcores:][:pmd_cores]: - pmd_mask = pmd_mask | 1 << core - return { - 'pmd_cpu_mask': format(pmd_mask, '#04x'), - 'dpdk_lcore_mask': format(lcore_mask, '#04x')} - - def socket_memory(self): - """Formatted list of socket memory configuration per socket. - - :returns: socket memory configuration per socket. - :rtype: str - """ - lscpu_out = check_output( - ['lscpu', '-p=socket']).decode('UTF-8').strip() - sockets = set() - for line in lscpu_out.split('\n'): - try: - sockets.add(int(line)) - except ValueError: - # lscpu output is headed by comments so ignore them. - pass - sm_size = config('dpdk-socket-memory') - mem_list = [str(sm_size) for _ in sockets] - if mem_list: - return ','.join(mem_list) - else: - return str(sm_size) - - def devices(self): - """List of PCI devices for use by DPDK - - :returns: List of PCI devices for use by DPDK - :rtype: collections.OrderedDict[str,str] - """ - pci_devices = resolve_pci_from_mapping_config(self.bridges_key) - pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) - return pci_devices - - def _formatted_whitelist(self, flag): - """Flag formatted list of devices to whitelist - - :param flag: flag format to use - :type flag: str - :rtype: str - """ - whitelist = [] - for device in self.devices(): - whitelist.append(flag.format(device=device)) - return ' '.join(whitelist) - - def device_whitelist(self): - """Formatted list of devices to whitelist for dpdk - - using the old style '-w' flag - - :returns: devices to whitelist prefixed by '-w ' - :rtype: str - """ - return self._formatted_whitelist('-w {device}') - - def pci_whitelist(self): - """Formatted list of devices to whitelist for dpdk - - using the new style '--pci-whitelist' flag - - :returns: devices to whitelist prefixed by '--pci-whitelist ' - :rtype: str - """ - return self._formatted_whitelist('--pci-whitelist {device}') - - def __call__(self): - """Populate context. - - :returns: context - :rtype: Dict[str,Union[bool,str]] - """ - ctxt = {} - whitelist = self.device_whitelist() - if whitelist: - ctxt['dpdk_enabled'] = config('enable-dpdk') - ctxt['device_whitelist'] = self.device_whitelist() - ctxt['socket_memory'] = self.socket_memory() - ctxt['cpu_mask'] = self.cpu_mask() - return ctxt - - -class BridgePortInterfaceMap(object): - """Build a map of bridge ports and interfaces from charm configuration. - - NOTE: the handling of this detail in the charm is pre-deprecated. - - The long term goal is for network connectivity detail to be modelled in - the server provisioning layer (such as MAAS) which in turn will provide - a Netplan YAML description that will be used to drive Open vSwitch. - - Until we get to that reality the charm will need to configure this - detail based on application level configuration options. - - There is a established way of mapping interfaces to ports and bridges - in the ``neutron-openvswitch`` and ``neutron-gateway`` charms and we - will carry that forward. - - The relationship between bridge, port and interface(s). - +--------+ - | bridge | - +--------+ - | - +----------------+ - | port aka. bond | - +----------------+ - | | - +-+ +-+ - |i| |i| - |n| |n| - |t| |t| - |0| |N| - +-+ +-+ - """ - class interface_type(enum.Enum): - """Supported interface types. - - Supported interface types can be found in the ``iface_types`` column - in the ``Open_vSwitch`` table on a running system. - """ - dpdk = 'dpdk' - internal = 'internal' - system = 'system' - - def __str__(self): - """Return string representation of value. - - :returns: string representation of value. - :rtype: str - """ - return self.value - - def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None, - global_mtu=None): - """Initialize map. - - :param bridges_key: Name of bridge:interface/port map config key - (default: 'data-port') - :type bridges_key: Optional[str] - :param bonds_key: Name of port-name:interface map config key - (default: 'dpdk-bond-mappings') - :type bonds_key: Optional[str] - :param enable_dpdk_key: Name of DPDK toggle config key - (default: 'enable-dpdk') - :type enable_dpdk_key: Optional[str] - :param global_mtu: Set a MTU on all interfaces at map initialization. - - The default is to have Open vSwitch get this from the underlying - interface as set up by bare metal provisioning. - - Note that you can augment the MTU on an individual interface basis - like this: - - ifdatamap = bpi.get_ifdatamap(bridge, port) - ifdatamap = { - port: { - **ifdata, - **{'mtu-request': my_individual_mtu_map[port]}, - } - for port, ifdata in ifdatamap.items() - } - :type global_mtu: Optional[int] - """ - bridges_key = bridges_key or 'data-port' - bonds_key = bonds_key or 'dpdk-bond-mappings' - enable_dpdk_key = enable_dpdk_key or 'enable-dpdk' - self._map = collections.defaultdict( - lambda: collections.defaultdict(dict)) - self._ifname_mac_map = collections.defaultdict(list) - self._mac_ifname_map = {} - self._mac_pci_address_map = {} - - # First we iterate over the list of physical interfaces visible to the - # system and update interface name to mac and mac to interface name map - for ifname in list_nics(): - if not is_phy_iface(ifname): - continue - mac = get_nic_hwaddr(ifname) - self._ifname_mac_map[ifname] = [mac] - self._mac_ifname_map[mac] = ifname - - # check if interface is part of a linux bond - _bond_name = get_bond_master(ifname) - if _bond_name and _bond_name != ifname: - log('Add linux bond "{}" to map for physical interface "{}" ' - 'with mac "{}".'.format(_bond_name, ifname, mac), - level=DEBUG) - # for bonds we want to be able to get a list of the mac - # addresses for the physical interfaces the bond is made up of. - if self._ifname_mac_map.get(_bond_name): - self._ifname_mac_map[_bond_name].append(mac) - else: - self._ifname_mac_map[_bond_name] = [mac] - - # In light of the pre-deprecation notice in the docstring of this - # class we will expose the ability to configure OVS bonds as a - # DPDK-only feature, but generally use the data structures internally. - if config(enable_dpdk_key): - # resolve PCI address of interfaces listed in the bridges and bonds - # charm configuration options. Note that for already bound - # interfaces the helper will retrieve MAC address from the unit - # KV store as the information is no longer available in sysfs. - _pci_bridge_mac = resolve_pci_from_mapping_config( - bridges_key) - _pci_bond_mac = resolve_pci_from_mapping_config( - bonds_key) - - for pci_address, bridge_mac in _pci_bridge_mac.items(): - if bridge_mac.mac in self._mac_ifname_map: - # if we already have the interface name in our map it is - # visible to the system and therefore not bound to DPDK - continue - ifname = 'dpdk-{}'.format( - hashlib.sha1( - pci_address.encode('UTF-8')).hexdigest()[:7]) - self._ifname_mac_map[ifname] = [bridge_mac.mac] - self._mac_ifname_map[bridge_mac.mac] = ifname - self._mac_pci_address_map[bridge_mac.mac] = pci_address - - for pci_address, bond_mac in _pci_bond_mac.items(): - # for bonds we want to be able to get a list of macs from - # the bond name and also get at the interface name made up - # of the hash of the PCI address - ifname = 'dpdk-{}'.format( - hashlib.sha1( - pci_address.encode('UTF-8')).hexdigest()[:7]) - self._ifname_mac_map[bond_mac.entity].append(bond_mac.mac) - self._mac_ifname_map[bond_mac.mac] = ifname - self._mac_pci_address_map[bond_mac.mac] = pci_address - - config_bridges = config(bridges_key) or '' - for bridge, ifname_or_mac in ( - pair.split(':', 1) - for pair in config_bridges.split()): - if ':' in ifname_or_mac: - try: - ifname = self.ifname_from_mac(ifname_or_mac) - except KeyError: - # The interface is destined for a different unit in the - # deployment. - continue - macs = [ifname_or_mac] - else: - ifname = ifname_or_mac - macs = self.macs_from_ifname(ifname_or_mac) - - portname = ifname - for mac in macs: - try: - pci_address = self.pci_address_from_mac(mac) - iftype = self.interface_type.dpdk - ifname = self.ifname_from_mac(mac) - except KeyError: - pci_address = None - iftype = self.interface_type.system - - self.add_interface( - bridge, portname, ifname, iftype, pci_address, global_mtu) - - if not macs: - # We have not mapped the interface and it is probably some sort - # of virtual interface. Our user have put it in the config with - # a purpose so let's carry out their wish. LP: #1884743 - log('Add unmapped interface from config: name "{}" bridge "{}"' - .format(ifname, bridge), - level=DEBUG) - self.add_interface( - bridge, ifname, ifname, self.interface_type.system, None, - global_mtu) - - def __getitem__(self, key): - """Provide a Dict-like interface, get value of item. - - :param key: Key to look up value from. - :type key: any - :returns: Value - :rtype: any - """ - return self._map.__getitem__(key) - - def __iter__(self): - """Provide a Dict-like interface, iterate over keys. - - :returns: Iterator - :rtype: Iterator[any] - """ - return self._map.__iter__() - - def __len__(self): - """Provide a Dict-like interface, measure the length of internal map. - - :returns: Length - :rtype: int - """ - return len(self._map) - - def items(self): - """Provide a Dict-like interface, iterate over items. - - :returns: Key Value pairs - :rtype: Iterator[any, any] - """ - return self._map.items() - - def keys(self): - """Provide a Dict-like interface, iterate over keys. - - :returns: Iterator - :rtype: Iterator[any] - """ - return self._map.keys() - - def ifname_from_mac(self, mac): - """ - :returns: Name of interface - :rtype: str - :raises: KeyError - """ - return (get_bond_master(self._mac_ifname_map[mac]) or - self._mac_ifname_map[mac]) - - def macs_from_ifname(self, ifname): - """ - :returns: List of hardware address (MAC) of interface - :rtype: List[str] - :raises: KeyError - """ - return self._ifname_mac_map[ifname] - - def pci_address_from_mac(self, mac): - """ - :param mac: Hardware address (MAC) of interface - :type mac: str - :returns: PCI address of device associated with mac - :rtype: str - :raises: KeyError - """ - return self._mac_pci_address_map[mac] - - def add_interface(self, bridge, port, ifname, iftype, - pci_address, mtu_request): - """Add an interface to the map. - - :param bridge: Name of bridge on which the bond will be added - :type bridge: str - :param port: Name of port which will represent the bond on bridge - :type port: str - :param ifname: Name of interface that will make up the bonded port - :type ifname: str - :param iftype: Type of interface - :type iftype: BridgeBondMap.interface_type - :param pci_address: PCI address of interface - :type pci_address: Optional[str] - :param mtu_request: MTU to request for interface - :type mtu_request: Optional[int] - """ - self._map[bridge][port][ifname] = { - 'type': str(iftype), - } - if pci_address: - self._map[bridge][port][ifname].update({ - 'pci-address': pci_address, - }) - if mtu_request is not None: - self._map[bridge][port][ifname].update({ - 'mtu-request': str(mtu_request) - }) - - def get_ifdatamap(self, bridge, port): - """Get structure suitable for charmhelpers.contrib.network.ovs helpers. - - :param bridge: Name of bridge on which the port will be added - :type bridge: str - :param port: Name of port which will represent one or more interfaces - :type port: str - """ - for _bridge, _ports in self.items(): - for _port, _interfaces in _ports.items(): - if _bridge == bridge and _port == port: - ifdatamap = {} - for name, data in _interfaces.items(): - ifdatamap.update({ - name: { - 'type': data['type'], - }, - }) - if data.get('mtu-request') is not None: - ifdatamap[name].update({ - 'mtu_request': data['mtu-request'], - }) - if data.get('pci-address'): - ifdatamap[name].update({ - 'options': { - 'dpdk-devargs': data['pci-address'], - }, - }) - return ifdatamap - - -class BondConfig(object): - """Container and helpers for bond configuration options. - - Data is put into a dictionary and a convenient config get interface is - provided. - """ - - DEFAULT_LACP_CONFIG = { - 'mode': 'balance-tcp', - 'lacp': 'active', - 'lacp-time': 'fast' - } - ALL_BONDS = 'ALL_BONDS' - - BOND_MODES = ['active-backup', 'balance-slb', 'balance-tcp'] - BOND_LACP = ['active', 'passive', 'off'] - BOND_LACP_TIME = ['fast', 'slow'] - - def __init__(self, config_key=None): - """Parse specified configuration option. - - :param config_key: Configuration key to retrieve data from - (default: ``dpdk-bond-config``) - :type config_key: Optional[str] - """ - self.config_key = config_key or 'dpdk-bond-config' - - self.lacp_config = { - self.ALL_BONDS: copy.deepcopy(self.DEFAULT_LACP_CONFIG) - } - - lacp_config = config(self.config_key) - if lacp_config: - lacp_config_map = lacp_config.split() - for entry in lacp_config_map: - bond, entry = entry.partition(':')[0:3:2] - if not bond: - bond = self.ALL_BONDS - - mode, entry = entry.partition(':')[0:3:2] - if not mode: - mode = self.DEFAULT_LACP_CONFIG['mode'] - assert mode in self.BOND_MODES, \ - "Bond mode {} is invalid".format(mode) - - lacp, entry = entry.partition(':')[0:3:2] - if not lacp: - lacp = self.DEFAULT_LACP_CONFIG['lacp'] - assert lacp in self.BOND_LACP, \ - "Bond lacp {} is invalid".format(lacp) - - lacp_time, entry = entry.partition(':')[0:3:2] - if not lacp_time: - lacp_time = self.DEFAULT_LACP_CONFIG['lacp-time'] - assert lacp_time in self.BOND_LACP_TIME, \ - "Bond lacp-time {} is invalid".format(lacp_time) - - self.lacp_config[bond] = { - 'mode': mode, - 'lacp': lacp, - 'lacp-time': lacp_time - } - - def get_bond_config(self, bond): - """Get the LACP configuration for a bond - - :param bond: the bond name - :return: a dictionary with the configuration of the bond - :rtype: Dict[str,Dict[str,str]] - """ - return self.lacp_config.get(bond, self.lacp_config[self.ALL_BONDS]) - - def get_ovs_portdata(self, bond): - """Get structure suitable for charmhelpers.contrib.network.ovs helpers. - - :param bond: the bond name - :return: a dictionary with the configuration of the bond - :rtype: Dict[str,Union[str,Dict[str,str]]] - """ - bond_config = self.get_bond_config(bond) - return { - 'bond_mode': bond_config['mode'], - 'lacp': bond_config['lacp'], - 'other_config': { - 'lacp-time': bond_config['lacp-time'], - }, - } - - -class SRIOVContext(OSContextGenerator): - """Provide context for configuring SR-IOV devices.""" - - class sriov_config_mode(enum.Enum): - """Mode in which SR-IOV is configured. - - The configuration option identified by the ``numvfs_key`` parameter - is overloaded and defines in which mode the charm should interpret - the other SR-IOV-related configuration options. - """ - auto = 'auto' - blanket = 'blanket' - explicit = 'explicit' - - PCIDeviceNumVFs = collections.namedtuple( - 'PCIDeviceNumVFs', ['device', 'numvfs']) - - def _determine_numvfs(self, device, sriov_numvfs): - """Determine number of Virtual Functions (VFs) configured for device. - - :param device: Object describing a PCI Network interface card (NIC)/ - :type device: contrib.hardware.pci.PCINetDevice - :param sriov_numvfs: Number of VFs requested for blanket configuration. - :type sriov_numvfs: int - :returns: Number of VFs to configure for device - :rtype: Optional[int] - """ - - def _get_capped_numvfs(requested): - """Get a number of VFs that does not exceed individual card limits. - - Depending and make and model of NIC the number of VFs supported - vary. Requesting more VFs than a card support would be a fatal - error, cap the requested number at the total number of VFs each - individual card supports. - - :param requested: Number of VFs requested - :type requested: int - :returns: Number of VFs allowed - :rtype: int - """ - actual = min(int(requested), int(device.sriov_totalvfs)) - if actual < int(requested): - log('Requested VFs ({}) too high for device {}. Falling back ' - 'to value supported by device: {}' - .format(requested, device.interface_name, - device.sriov_totalvfs), - level=WARNING) - return actual - - if self._sriov_config_mode == self.sriov_config_mode.auto: - # auto-mode - # - # If device mapping configuration is present, return information - # on cards with mapping. - # - # If no device mapping configuration is present, return information - # for all cards. - # - # The maximum number of VFs supported by card will be used. - if (self._sriov_mapped_devices and - device.interface_name not in self._sriov_mapped_devices): - log('SR-IOV configured in auto mode: No device mapping for {}' - .format(device.interface_name), - level=DEBUG) - return - return _get_capped_numvfs(device.sriov_totalvfs) - elif self._sriov_config_mode == self.sriov_config_mode.blanket: - # blanket-mode - # - # User has specified a number of VFs that should apply to all - # cards with support for VFs. - return _get_capped_numvfs(sriov_numvfs) - elif self._sriov_config_mode == self.sriov_config_mode.explicit: - # explicit-mode - # - # User has given a list of interface names and associated number of - # VFs - if device.interface_name not in self._sriov_config_devices: - log('SR-IOV configured in explicit mode: No device:numvfs ' - 'pair for device {}, skipping.' - .format(device.interface_name), - level=DEBUG) - return - return _get_capped_numvfs( - self._sriov_config_devices[device.interface_name]) - else: - raise RuntimeError('This should not be reached') - - def __init__(self, numvfs_key=None, device_mappings_key=None): - """Initialize map from PCI devices and configuration options. - - :param numvfs_key: Config key for numvfs (default: 'sriov-numvfs') - :type numvfs_key: Optional[str] - :param device_mappings_key: Config key for device mappings - (default: 'sriov-device-mappings') - :type device_mappings_key: Optional[str] - :raises: RuntimeError - """ - numvfs_key = numvfs_key or 'sriov-numvfs' - device_mappings_key = device_mappings_key or 'sriov-device-mappings' - - devices = pci.PCINetDevices() - charm_config = config() - sriov_numvfs = charm_config.get(numvfs_key) or '' - sriov_device_mappings = charm_config.get(device_mappings_key) or '' - - # create list of devices from sriov_device_mappings config option - self._sriov_mapped_devices = [ - pair.split(':', 1)[1] - for pair in sriov_device_mappings.split() - ] - - # create map of device:numvfs from sriov_numvfs config option - self._sriov_config_devices = { - ifname: numvfs for ifname, numvfs in ( - pair.split(':', 1) for pair in sriov_numvfs.split() - if ':' in sriov_numvfs) - } - - # determine configuration mode from contents of sriov_numvfs - if sriov_numvfs == 'auto': - self._sriov_config_mode = self.sriov_config_mode.auto - elif sriov_numvfs.isdigit(): - self._sriov_config_mode = self.sriov_config_mode.blanket - elif ':' in sriov_numvfs: - self._sriov_config_mode = self.sriov_config_mode.explicit - else: - raise RuntimeError('Unable to determine mode of SR-IOV ' - 'configuration.') - - self._map = { - device.pci_address: self.PCIDeviceNumVFs( - device, self._determine_numvfs(device, sriov_numvfs)) - for device in devices.pci_devices - if device.sriov and - self._determine_numvfs(device, sriov_numvfs) is not None - } - - def __call__(self): - """Provide backward compatible SR-IOV context. - - :returns: Map interface name: min(configured, max) virtual functions. - Example: - { - 'eth0': 16, - 'eth1': 32, - 'eth2': 64, - } - :rtype: Dict[str,int] - """ - return { - pcidnvfs.device.interface_name: pcidnvfs.numvfs - for _, pcidnvfs in self._map.items() - } - - @property - def get_map(self): - """Provide map of configured SR-IOV capable PCI devices. - - :returns: Map PCI-address: (PCIDevice, min(configured, max) VFs. - Example: - { - '0000:81:00.0': self.PCIDeviceNumVFs(, 32), - '0000:81:00.1': self.PCIDeviceNumVFs(, 32), - } - :rtype: Dict[str, self.PCIDeviceNumVFs] - """ - return self._map - - -class CephBlueStoreCompressionContext(OSContextGenerator): - """Ceph BlueStore compression options.""" - - # Tuple with Tuples that map configuration option name to CephBrokerRq op - # property name - options = ( - ('bluestore-compression-algorithm', - 'compression-algorithm'), - ('bluestore-compression-mode', - 'compression-mode'), - ('bluestore-compression-required-ratio', - 'compression-required-ratio'), - ('bluestore-compression-min-blob-size', - 'compression-min-blob-size'), - ('bluestore-compression-min-blob-size-hdd', - 'compression-min-blob-size-hdd'), - ('bluestore-compression-min-blob-size-ssd', - 'compression-min-blob-size-ssd'), - ('bluestore-compression-max-blob-size', - 'compression-max-blob-size'), - ('bluestore-compression-max-blob-size-hdd', - 'compression-max-blob-size-hdd'), - ('bluestore-compression-max-blob-size-ssd', - 'compression-max-blob-size-ssd'), - ) - - def __init__(self): - """Initialize context by loading values from charm config. - - We keep two maps, one suitable for use with CephBrokerRq's and one - suitable for template generation. - """ - charm_config = config() - - # CephBrokerRq op map - self.op = {} - # Context exposed for template generation - self.ctxt = {} - for config_key, op_key in self.options: - value = charm_config.get(config_key) - self.ctxt.update({config_key.replace('-', '_'): value}) - self.op.update({op_key: value}) - - def __call__(self): - """Get context. - - :returns: Context - :rtype: Dict[str,any] - """ - return self.ctxt - - def get_op(self): - """Get values for use in CephBrokerRq op. - - :returns: Context values with CephBrokerRq op property name as key. - :rtype: Dict[str,any] - """ - return self.op - - def get_kwargs(self): - """Get values for use as keyword arguments. - - :returns: Context values with key suitable for use as kwargs to - CephBrokerRq add_op_create_*_pool methods. - :rtype: Dict[str,any] - """ - return { - k.replace('-', '_'): v - for k, v in self.op.items() - } - - def validate(self): - """Validate options. - - :raises: AssertionError - """ - # We slip in a dummy name on class instantiation to allow validation of - # the other options. It will not affect further use. - # - # NOTE: once we retire Python 3.5 we can fold this into a in-line - # dictionary comprehension in the call to the initializer. - dummy_op = {'name': 'dummy-name'} - dummy_op.update(self.op) - pool = ch_ceph.BasePool('dummy-service', op=dummy_op) - pool.validate() diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py deleted file mode 100644 index 94eacf6c..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/deferred_events.py +++ /dev/null @@ -1,416 +0,0 @@ -# Copyright 2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Module for managing deferred service events. - -This module is used to manage deferred service events from both charm actions -and package actions. -""" - -import datetime -import glob -import yaml -import os -import time -import uuid - -import charmhelpers.contrib.openstack.policy_rcd as policy_rcd -import charmhelpers.core.hookenv as hookenv -import charmhelpers.core.host as host -import charmhelpers.core.unitdata as unitdata - -import subprocess - - -# Deferred events generated from the charm are stored along side those -# generated from packaging. -DEFERRED_EVENTS_DIR = policy_rcd.POLICY_DEFERRED_EVENTS_DIR - - -class ServiceEvent(): - - def __init__(self, timestamp, service, reason, action, - policy_requestor_name=None, policy_requestor_type=None): - self.timestamp = timestamp - self.service = service - self.reason = reason - self.action = action - if policy_requestor_name: - self.policy_requestor_name = policy_requestor_name - else: - self.policy_requestor_name = hookenv.service_name() - if policy_requestor_type: - self.policy_requestor_type = policy_requestor_type - else: - self.policy_requestor_type = 'charm' - - def __eq__(self, other): - for attr in vars(self): - if getattr(self, attr) != getattr(other, attr): - return False - return True - - def matching_request(self, other): - for attr in ['service', 'action', 'reason']: - if getattr(self, attr) != getattr(other, attr): - return False - return True - - @classmethod - def from_dict(cls, data): - return cls( - data['timestamp'], - data['service'], - data['reason'], - data['action'], - data.get('policy_requestor_name'), - data.get('policy_requestor_type')) - - -def deferred_events_files(): - """Deferred event files - - Deferred event files that were generated by service_name() policy. - - :returns: Deferred event files - :rtype: List[str] - """ - return glob.glob('{}/*.deferred'.format(DEFERRED_EVENTS_DIR)) - - -def read_event_file(file_name): - """Read a file and return the corresponding objects. - - :param file_name: Name of file to read. - :type file_name: str - :returns: ServiceEvent from file. - :rtype: ServiceEvent - """ - with open(file_name, 'r') as f: - contents = yaml.safe_load(f) - event = ServiceEvent( - contents['timestamp'], - contents['service'], - contents['reason'], - contents['action'], - policy_requestor_name=contents.get('policy_requestor_name'), - policy_requestor_type=contents.get('policy_requestor_type')) - return event - - -def deferred_events(): - """Get list of deferred events. - - List of deferred events. Events are represented by dicts of the form: - - { - action: restart, - policy_requestor_name: neutron-openvswitch, - policy_requestor_type: charm, - reason: 'Pkg update', - service: openvswitch-switch, - time: 1614328743} - - :returns: List of deferred events. - :rtype: List[ServiceEvent] - """ - events = [] - for defer_file in deferred_events_files(): - events.append((defer_file, read_event_file(defer_file))) - return events - - -def duplicate_event_files(event): - """Get list of event files that have equivalent deferred events. - - :param event: Event to compare - :type event: ServiceEvent - :returns: List of event files - :rtype: List[str] - """ - duplicates = [] - for event_file, existing_event in deferred_events(): - if event.matching_request(existing_event): - duplicates.append(event_file) - return duplicates - - -def get_event_record_file(policy_requestor_type, policy_requestor_name): - """Generate filename for storing a new event. - - :param policy_requestor_type: System that blocked event - :type policy_requestor_type: str - :param policy_requestor_name: Name of application that blocked event - :type policy_requestor_name: str - :returns: File name - :rtype: str - """ - file_name = '{}/{}-{}-{}.deferred'.format( - DEFERRED_EVENTS_DIR, - policy_requestor_type, - policy_requestor_name, - uuid.uuid1()) - return file_name - - -def save_event(event): - """Write deferred events to backend. - - :param event: Event to save - :type event: ServiceEvent - """ - requestor_name = hookenv.service_name() - requestor_type = 'charm' - init_policy_log_dir() - if duplicate_event_files(event): - hookenv.log( - "Not writing new event, existing event found. {} {} {}".format( - event.service, - event.action, - event.reason), - level="DEBUG") - else: - record_file = get_event_record_file( - policy_requestor_type=requestor_type, - policy_requestor_name=requestor_name) - - with open(record_file, 'w') as f: - data = { - 'timestamp': event.timestamp, - 'service': event.service, - 'action': event.action, - 'reason': event.reason, - 'policy_requestor_type': requestor_type, - 'policy_requestor_name': requestor_name} - yaml.dump(data, f) - - -def clear_deferred_events(svcs, action): - """Remove any outstanding deferred events. - - Remove a deferred event if its service is in the services list and its - action matches. - - :param svcs: List of services to remove. - :type svcs: List[str] - :param action: Action to remove - :type action: str - """ - # XXX This function is not currently processing the action. It needs to - # match the action and also take account of try-restart and the - # equivalnce of stop-start and restart. - for defer_file in deferred_events_files(): - deferred_event = read_event_file(defer_file) - if deferred_event.service in svcs: - os.remove(defer_file) - - -def init_policy_log_dir(): - """Ensure directory to store events exists.""" - if not os.path.exists(DEFERRED_EVENTS_DIR): - os.mkdir(DEFERRED_EVENTS_DIR) - - -def get_deferred_events(): - """Return a list of deferred events requested by the charm and packages. - - :returns: List of deferred events - :rtype: List[ServiceEvent] - """ - events = [] - for _, event in deferred_events(): - events.append(event) - return events - - -def get_deferred_restarts(): - """List of deferred restart events requested by the charm and packages. - - :returns: List of deferred restarts - :rtype: List[ServiceEvent] - """ - return [e for e in get_deferred_events() if e.action == 'restart'] - - -def clear_deferred_restarts(services): - """Clear deferred restart events targeted at `services`. - - :param services: Services with deferred actions to clear. - :type services: List[str] - """ - clear_deferred_events(services, 'restart') - - -def process_svc_restart(service): - """Respond to a service restart having occurred. - - :param service: Services that the action was performed against. - :type service: str - """ - clear_deferred_restarts([service]) - - -def is_restart_permitted(): - """Check whether restarts are permitted. - - :returns: Whether restarts are permitted - :rtype: bool - """ - if hookenv.config('enable-auto-restarts') is None: - return True - return hookenv.config('enable-auto-restarts') - - -def check_and_record_restart_request(service, changed_files): - """Check if restarts are permitted, if they are not log the request. - - :param service: Service to be restarted - :type service: str - :param changed_files: Files that have changed to trigger restarts. - :type changed_files: List[str] - :returns: Whether restarts are permitted - :rtype: bool - """ - changed_files = sorted(list(set(changed_files))) - permitted = is_restart_permitted() - if not permitted: - save_event(ServiceEvent( - timestamp=round(time.time()), - service=service, - reason='File(s) changed: {}'.format( - ', '.join(changed_files)), - action='restart')) - return permitted - - -def deferrable_svc_restart(service, reason=None): - """Restarts service if permitted, if not defer it. - - :param service: Service to be restarted - :type service: str - :param reason: Reason for restart - :type reason: Union[str, None] - """ - if is_restart_permitted(): - host.service_restart(service) - else: - save_event(ServiceEvent( - timestamp=round(time.time()), - service=service, - reason=reason, - action='restart')) - - -def configure_deferred_restarts(services): - """Setup deferred restarts. - - :param services: Services to block restarts of. - :type services: List[str] - """ - policy_rcd.install_policy_rcd() - if is_restart_permitted(): - policy_rcd.remove_policy_file() - else: - blocked_actions = ['stop', 'restart', 'try-restart'] - for svc in services: - policy_rcd.add_policy_block(svc, blocked_actions) - - -def get_service_start_time(service): - """Find point in time when the systemd unit transitioned to active state. - - :param service: Services to check timetsamp of. - :type service: str - """ - start_time = None - out = subprocess.check_output( - [ - 'systemctl', - 'show', - service, - '--property=ActiveEnterTimestamp']) - str_time = out.decode().rstrip().replace('ActiveEnterTimestamp=', '') - if str_time: - start_time = datetime.datetime.strptime( - str_time, - '%a %Y-%m-%d %H:%M:%S %Z') - return start_time - - -def check_restart_timestamps(): - """Check deferred restarts against systemd units start time. - - Check if a service has a deferred event and clear it if it has been - subsequently restarted. - """ - for event in get_deferred_restarts(): - start_time = get_service_start_time(event.service) - deferred_restart_time = datetime.datetime.fromtimestamp( - event.timestamp) - if start_time and start_time < deferred_restart_time: - hookenv.log( - ("Restart still required, {} was started at {}, restart was " - "requested after that at {}").format( - event.service, - start_time, - deferred_restart_time), - level='DEBUG') - else: - clear_deferred_restarts([event.service]) - - -def set_deferred_hook(hookname): - """Record that a hook has been deferred. - - :param hookname: Name of hook that was deferred. - :type hookname: str - """ - with unitdata.HookData()() as t: - kv = t[0] - deferred_hooks = kv.get('deferred-hooks', []) - if hookname not in deferred_hooks: - deferred_hooks.append(hookname) - kv.set('deferred-hooks', sorted(list(set(deferred_hooks)))) - - -def get_deferred_hooks(): - """Get a list of deferred hooks. - - :returns: List of hook names. - :rtype: List[str] - """ - with unitdata.HookData()() as t: - kv = t[0] - return kv.get('deferred-hooks', []) - - -def clear_deferred_hooks(): - """Clear any deferred hooks.""" - with unitdata.HookData()() as t: - kv = t[0] - kv.set('deferred-hooks', []) - - -def clear_deferred_hook(hookname): - """Clear a specific deferred hooks. - - :param hookname: Name of hook to remove. - :type hookname: str - """ - with unitdata.HookData()() as t: - kv = t[0] - deferred_hooks = kv.get('deferred-hooks', []) - if hookname in deferred_hooks: - deferred_hooks.remove(hookname) - kv.set('deferred-hooks', deferred_hooks) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py deleted file mode 100644 index b2330637..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/exceptions.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class OSContextError(Exception): - """Raised when an error occurs during context generation. - - This exception is principally used in contrib.openstack.context - """ - pass - - -class ServiceActionError(Exception): - """Raised when a service action (stop/start/ etc) failed.""" - pass diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/files/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/files/__init__.py deleted file mode 100644 index 9df5f746..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/files/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# dummy __init__.py to fool syncer into thinking this is a syncable python -# module diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py deleted file mode 100755 index 5f392b3c..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/python3 - -# Copyright 2014-2022 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Checks for services with deferred restarts. - -This Nagios check will parse /var/lib/policy-rd.d/ -to find any restarts that are currently deferred. -""" - -import argparse -import glob -import sys -import yaml - - -DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d' - - -def get_deferred_events(): - """Return a list of deferred events dicts from policy-rc.d files. - - Events are read from DEFERRED_EVENTS_DIR and are of the form: - { - action: restart, - policy_requestor_name: rabbitmq-server, - policy_requestor_type: charm, - reason: 'Pkg update', - service: rabbitmq-server, - time: 1614328743 - } - - :raises OSError: Raised in case of a system error while reading a policy file - :raises yaml.YAMLError: Raised if parsing a policy file fails - - :returns: List of deferred event dictionaries - :rtype: list - """ - deferred_events_files = glob.glob( - '{}/*.deferred'.format(DEFERRED_EVENTS_DIR)) - - deferred_events = [] - for event_file in deferred_events_files: - with open(event_file, 'r') as f: - event = yaml.safe_load(f) - deferred_events.append(event) - - return deferred_events - - -def get_deferred_restart_services(application=None): - """Returns a list of services with deferred restarts. - - :param str application: Name of the application that blocked the service restart. - If application is None, all services with deferred restarts - are returned. Services which are blocked by a non-charm - requestor are always returned. - - :raises OSError: Raised in case of a system error while reading a policy file - :raises yaml.YAMLError: Raised if parsing a policy file fails - - :returns: List of services with deferred restarts belonging to application. - :rtype: list - """ - - deferred_restart_events = filter( - lambda e: e['action'] == 'restart', get_deferred_events()) - - deferred_restart_services = set() - for restart_event in deferred_restart_events: - if application: - if ( - restart_event['policy_requestor_type'] != 'charm' or - restart_event['policy_requestor_type'] == 'charm' and - restart_event['policy_requestor_name'] == application - ): - deferred_restart_services.add(restart_event['service']) - else: - deferred_restart_services.add(restart_event['service']) - - return list(deferred_restart_services) - - -def main(): - """Check for services with deferred restarts.""" - parser = argparse.ArgumentParser( - description='Check for services with deferred restarts') - parser.add_argument( - '--application', help='Check services belonging to this application only') - - args = parser.parse_args() - - services = set(get_deferred_restart_services(args.application)) - - if len(services) == 0: - print('OK: No deferred service restarts.') - sys.exit(0) - else: - print( - 'CRITICAL: Restarts are deferred for services: {}.'.format(', '.join(services))) - sys.exit(1) - - -if __name__ == '__main__': - try: - main() - except OSError as e: - print('CRITICAL: A system error occurred: {} ({})'.format(e.errno, e.strerror)) - sys.exit(1) - except yaml.YAMLError as e: - print('CRITICAL: Failed to parse a policy file: {}'.format(str(e))) - sys.exit(1) - except Exception as e: - print('CRITICAL: An unknown error occurred: {}'.format(str(e))) - sys.exit(1) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py deleted file mode 100755 index 431e972b..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env python3 - -"""This script is an implementation of policy-rc.d - -For further information on policy-rc.d see *1 - -*1 https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt -""" -import collections -import glob -import os -import logging -import sys -import time -import uuid -import yaml - - -SystemPolicy = collections.namedtuple( - 'SystemPolicy', - [ - 'policy_requestor_name', - 'policy_requestor_type', - 'service', - 'blocked_actions']) - -DEFAULT_POLICY_CONFIG_DIR = '/etc/policy-rc.d' -DEFAULT_POLICY_LOG_DIR = '/var/lib/policy-rc.d' - - -def read_policy_file(policy_file): - """Return system policies from given file. - - :param file_name: Name of file to read. - :type file_name: str - :returns: Policy - :rtype: List[SystemPolicy] - """ - policies = [] - if os.path.exists(policy_file): - with open(policy_file, 'r') as f: - policy = yaml.safe_load(f) - for service, actions in policy['blocked_actions'].items(): - service = service.replace('.service', '') - policies.append(SystemPolicy( - policy_requestor_name=policy['policy_requestor_name'], - policy_requestor_type=policy['policy_requestor_type'], - service=service, - blocked_actions=actions)) - return policies - - -def get_policies(policy_config_dir): - """Return all system policies in policy_config_dir. - - :param policy_config_dir: Name of file to read. - :type policy_config_dir: str - :returns: Policy - :rtype: List[SystemPolicy] - """ - _policy = [] - for f in glob.glob('{}/*.policy'.format(policy_config_dir)): - _policy.extend(read_policy_file(f)) - return _policy - - -def record_blocked_action(service, action, blocking_policies, policy_log_dir): - """Record that an action was requested but deniedl - - :param service: Service that was blocked - :type service: str - :param action: Action that was blocked. - :type action: str - :param blocking_policies: Policies that blocked the action on the service. - :type blocking_policies: List[SystemPolicy] - :param policy_log_dir: Directory to place the blocking action record. - :type policy_log_dir: str - """ - if not os.path.exists(policy_log_dir): - os.mkdir(policy_log_dir) - seconds = round(time.time()) - for policy in blocking_policies: - if not os.path.exists(policy_log_dir): - os.mkdir(policy_log_dir) - file_name = '{}/{}-{}-{}.deferred'.format( - policy_log_dir, - policy.policy_requestor_type, - policy.policy_requestor_name, - uuid.uuid1()) - with open(file_name, 'w') as f: - data = { - 'timestamp': seconds, - 'service': service, - 'action': action, - 'reason': 'Package update', - 'policy_requestor_type': policy.policy_requestor_type, - 'policy_requestor_name': policy.policy_requestor_name} - yaml.dump(data, f) - - -def get_blocking_policies(service, action, policy_config_dir): - """Record that an action was requested but deniedl - - :param service: Service that action is requested against. - :type service: str - :param action: Action that is requested. - :type action: str - :param policy_config_dir: Directory that stores policy files. - :type policy_config_dir: str - :returns: Policies - :rtype: List[SystemPolicy] - """ - service = service.replace('.service', '') - blocking_policies = [ - policy - for policy in get_policies(policy_config_dir) - if policy.service == service and action in policy.blocked_actions] - return blocking_policies - - -def process_action_request(service, action, policy_config_dir, policy_log_dir): - """Take the requested action against service and check if it is permitted. - - :param service: Service that action is requested against. - :type service: str - :param action: Action that is requested. - :type action: str - :param policy_config_dir: Directory that stores policy files. - :type policy_config_dir: str - :param policy_log_dir: Directory that stores policy files. - :type policy_log_dir: str - :returns: Tuple of whether the action is permitted and explanation. - :rtype: (boolean, str) - """ - blocking_policies = get_blocking_policies( - service, - action, - policy_config_dir) - if blocking_policies: - policy_msg = [ - '{} {}'.format(p.policy_requestor_type, p.policy_requestor_name) - for p in sorted(blocking_policies)] - message = '{} of {} blocked by {}'.format( - action, - service, - ', '.join(policy_msg)) - record_blocked_action( - service, - action, - blocking_policies, - policy_log_dir) - action_permitted = False - else: - message = "Permitting {} {}".format(service, action) - action_permitted = True - return action_permitted, message - - -def main(): - logging.basicConfig( - filename='/var/log/policy-rc.d.log', - level=logging.DEBUG, - format='%(asctime)s %(message)s') - - service = sys.argv[1] - action = sys.argv[2] - - permitted, message = process_action_request( - service, - action, - DEFAULT_POLICY_CONFIG_DIR, - DEFAULT_POLICY_LOG_DIR) - logging.info(message) - - # https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt - # Exit status codes: - # 0 - action allowed - # 1 - unknown action (therefore, undefined policy) - # 100 - unknown initscript id - # 101 - action forbidden by policy - # 102 - subsystem error - # 103 - syntax error - # 104 - [reserved] - # 105 - behaviour uncertain, policy undefined. - # 106 - action not allowed. Use the returned fallback actions - # (which are implied to be "allowed") instead. - - if permitted: - return 0 - else: - return 101 - - -if __name__ == "__main__": - rc = main() - sys.exit(rc) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/__init__.py deleted file mode 100644 index 9b088de8..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py deleted file mode 100644 index a5cbdf53..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ /dev/null @@ -1,348 +0,0 @@ -# Copyright 2014-2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2016 Canonical Ltd. -# -# Authors: -# Openstack Charmers < -# - -""" -Helpers for high availability. -""" - -import hashlib -import json - -import re - -from charmhelpers.core.hookenv import ( - expected_related_units, - log, - relation_set, - charm_name, - config, - status_set, - DEBUG, -) - -from charmhelpers.core.host import ( - lsb_release -) - -from charmhelpers.contrib.openstack.ip import ( - resolve_address, - is_ipv6, -) - -from charmhelpers.contrib.network.ip import ( - get_iface_for_address, - get_netmask_for_address, -) - -from charmhelpers.contrib.hahelpers.cluster import ( - get_hacluster_config -) - -JSON_ENCODE_OPTIONS = dict( - sort_keys=True, - allow_nan=False, - indent=None, - separators=(',', ':'), -) - -VIP_GROUP_NAME = 'grp_{service}_vips' -DNSHA_GROUP_NAME = 'grp_{service}_hostnames' - - -class DNSHAException(Exception): - """Raised when an error occurs setting up DNS HA - """ - - pass - - -def update_dns_ha_resource_params(resources, resource_params, - relation_id=None, - crm_ocf='ocf:maas:dns'): - """ Configure DNS-HA resources based on provided configuration and - update resource dictionaries for the HA relation. - - @param resources: Pointer to dictionary of resources. - Usually instantiated in ha_joined(). - @param resource_params: Pointer to dictionary of resource parameters. - Usually instantiated in ha_joined() - @param relation_id: Relation ID of the ha relation - @param crm_ocf: Corosync Open Cluster Framework resource agent to use for - DNS HA - """ - _relation_data = {'resources': {}, 'resource_params': {}} - update_hacluster_dns_ha(charm_name(), - _relation_data, - crm_ocf) - resources.update(_relation_data['resources']) - resource_params.update(_relation_data['resource_params']) - relation_set(relation_id=relation_id, groups=_relation_data['groups']) - - -def assert_charm_supports_dns_ha(): - """Validate prerequisites for DNS HA - The MAAS client is only available on Xenial or greater - - :raises DNSHAException: if release is < 16.04 - """ - if lsb_release().get('DISTRIB_RELEASE') < '16.04': - msg = ('DNS HA is only supported on 16.04 and greater ' - 'versions of Ubuntu.') - status_set('blocked', msg) - raise DNSHAException(msg) - return True - - -def expect_ha(): - """ Determine if the unit expects to be in HA - - Check juju goal-state if ha relation is expected, check for VIP or dns-ha - settings which indicate the unit should expect to be related to hacluster. - - @returns boolean - """ - ha_related_units = [] - try: - ha_related_units = list(expected_related_units(reltype='ha')) - except (NotImplementedError, KeyError): - pass - return len(ha_related_units) > 0 or config('vip') or config('dns-ha') - - -def generate_ha_relation_data(service, - extra_settings=None, - haproxy_enabled=True): - """ Generate relation data for ha relation - - Based on configuration options and unit interfaces, generate a json - encoded dict of relation data items for the hacluster relation, - providing configuration for DNS HA or VIP's + haproxy clone sets. - - Example of supplying additional settings:: - - COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' - AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' - AGENT_CA_PARAMS = 'op monitor interval="5s"' - - ha_console_settings = { - 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, - 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, - 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, - 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) - generate_ha_relation_data('nova', extra_settings=ha_console_settings) - - - @param service: Name of the service being configured - @param extra_settings: Dict of additional resource data - @returns dict: json encoded data for use with relation_set - """ - _relation_data = {'resources': {}, 'resource_params': {}} - - if haproxy_enabled: - _meta = 'meta migration-threshold="INFINITY" failure-timeout="5s"' - _haproxy_res = 'res_{}_haproxy'.format(service) - _relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'} - _relation_data['resource_params'] = { - _haproxy_res: '{} op monitor interval="5s"'.format(_meta) - } - _relation_data['init_services'] = {_haproxy_res: 'haproxy'} - _relation_data['clones'] = { - 'cl_{}_haproxy'.format(service): _haproxy_res - } - - if extra_settings: - for k, v in extra_settings.items(): - if _relation_data.get(k): - _relation_data[k].update(v) - else: - _relation_data[k] = v - - if config('dns-ha'): - update_hacluster_dns_ha(service, _relation_data) - else: - update_hacluster_vip(service, _relation_data) - - return { - 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS) - for k, v in _relation_data.items() if v - } - - -def update_hacluster_dns_ha(service, relation_data, - crm_ocf='ocf:maas:dns'): - """ Configure DNS-HA resources based on provided configuration - - @param service: Name of the service being configured - @param relation_data: Pointer to dictionary of relation data. - @param crm_ocf: Corosync Open Cluster Framework resource agent to use for - DNS HA - """ - # Validate the charm environment for DNS HA - assert_charm_supports_dns_ha() - - settings = ['os-admin-hostname', 'os-internal-hostname', - 'os-public-hostname', 'os-access-hostname'] - - # Check which DNS settings are set and update dictionaries - hostname_group = [] - for setting in settings: - hostname = config(setting) - if hostname is None: - log('DNS HA: Hostname setting {} is None. Ignoring.' - ''.format(setting), - DEBUG) - continue - m = re.search('os-(.+?)-hostname', setting) - if m: - endpoint_type = m.group(1) - # resolve_address's ADDRESS_MAP uses 'int' not 'internal' - if endpoint_type == 'internal': - endpoint_type = 'int' - else: - msg = ('Unexpected DNS hostname setting: {}. ' - 'Cannot determine endpoint_type name' - ''.format(setting)) - status_set('blocked', msg) - raise DNSHAException(msg) - - hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type) - if hostname_key in hostname_group: - log('DNS HA: Resource {}: {} already exists in ' - 'hostname group - skipping'.format(hostname_key, hostname), - DEBUG) - continue - - hostname_group.append(hostname_key) - relation_data['resources'][hostname_key] = crm_ocf - relation_data['resource_params'][hostname_key] = ( - 'params fqdn="{}" ip_address="{}"' - .format(hostname, resolve_address(endpoint_type=endpoint_type, - override=False))) - - if len(hostname_group) >= 1: - log('DNS HA: Hostname group is set with {} as members. ' - 'Informing the ha relation'.format(' '.join(hostname_group)), - DEBUG) - relation_data['groups'] = { - DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group) - } - else: - msg = 'DNS HA: Hostname group has no members.' - status_set('blocked', msg) - raise DNSHAException(msg) - - -def get_vip_settings(vip): - """Calculate which nic is on the correct network for the given vip. - - If nic or netmask discovery fail then fallback to using charm supplied - config. If fallback is used this is indicated via the fallback variable. - - @param vip: VIP to lookup nic and cidr for. - @returns (str, str, bool): eg (iface, netmask, fallback) - """ - iface = get_iface_for_address(vip) - netmask = get_netmask_for_address(vip) - fallback = False - if iface is None: - iface = config('vip_iface') - fallback = True - if netmask is None: - netmask = config('vip_cidr') - fallback = True - return iface, netmask, fallback - - -def update_hacluster_vip(service, relation_data): - """ Configure VIP resources based on provided configuration - - @param service: Name of the service being configured - @param relation_data: Pointer to dictionary of relation data. - """ - cluster_config = get_hacluster_config() - vip_group = [] - vips_to_delete = [] - for vip in cluster_config['vip'].split(): - if is_ipv6(vip): - res_vip = 'ocf:heartbeat:IPv6addr' - vip_params = 'ipv6addr' - else: - res_vip = 'ocf:heartbeat:IPaddr2' - vip_params = 'ip' - - iface, netmask, fallback = get_vip_settings(vip) - - vip_monitoring = 'op monitor timeout="20s" interval="10s" depth="0"' - if iface is not None: - # NOTE(jamespage): Delete old VIP resources - # Old style naming encoding iface in name - # does not work well in environments where - # interface/subnet wiring is not consistent - vip_key = 'res_{}_{}_vip'.format(service, iface) - if vip_key in vips_to_delete: - vip_key = '{}_{}'.format(vip_key, vip_params) - vips_to_delete.append(vip_key) - - vip_key = 'res_{}_{}_vip'.format( - service, - hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]) - - relation_data['resources'][vip_key] = res_vip - # NOTE(jamespage): - # Use option provided vip params if these where used - # instead of auto-detected values - if fallback: - relation_data['resource_params'][vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}" ' - 'nic="{iface}" {vip_monitoring}'.format( - ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask, - vip_monitoring=vip_monitoring)) - else: - # NOTE(jamespage): - # let heartbeat figure out which interface and - # netmask to configure, which works nicely - # when network interface naming is not - # consistent across units. - relation_data['resource_params'][vip_key] = ( - 'params {ip}="{vip}" {vip_monitoring}'.format( - ip=vip_params, - vip=vip, - vip_monitoring=vip_monitoring)) - - vip_group.append(vip_key) - - if vips_to_delete: - try: - relation_data['delete_resources'].extend(vips_to_delete) - except KeyError: - relation_data['delete_resources'] = vips_to_delete - - if len(vip_group) >= 1: - key = VIP_GROUP_NAME.format(service=service) - try: - relation_data['groups'][key] = ' '.join(vip_group) - except KeyError: - relation_data['groups'] = { - key: ' '.join(vip_group) - } diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py deleted file mode 100644 index b8c94c56..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ip.py +++ /dev/null @@ -1,235 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.core.hookenv import ( - NoNetworkBinding, - config, - unit_get, - service_name, - network_get_primary_address, -) -from charmhelpers.contrib.network.ip import ( - get_address_in_network, - is_address_in_network, - is_ipv6, - get_ipv6_addr, - resolve_network_cidr, -) -from charmhelpers.contrib.hahelpers.cluster import is_clustered - -PUBLIC = 'public' -INTERNAL = 'int' -ADMIN = 'admin' -ACCESS = 'access' - -# TODO: reconcile 'int' vs 'internal' binding names -ADDRESS_MAP = { - PUBLIC: { - 'binding': 'public', - 'config': 'os-public-network', - 'fallback': 'public-address', - 'override': 'os-public-hostname', - }, - INTERNAL: { - 'binding': 'internal', - 'config': 'os-internal-network', - 'fallback': 'private-address', - 'override': 'os-internal-hostname', - }, - ADMIN: { - 'binding': 'admin', - 'config': 'os-admin-network', - 'fallback': 'private-address', - 'override': 'os-admin-hostname', - }, - ACCESS: { - 'binding': 'access', - 'config': 'access-network', - 'fallback': 'private-address', - 'override': 'os-access-hostname', - }, - # Note (thedac) bridge to begin the reconciliation between 'int' vs - # 'internal' binding names - 'internal': { - 'binding': 'internal', - 'config': 'os-internal-network', - 'fallback': 'private-address', - 'override': 'os-internal-hostname', - }, -} - - -def canonical_url(configs, endpoint_type=PUBLIC): - """Returns the correct HTTP URL to this host given the state of HTTPS - configuration, hacluster and charm configuration. - - :param configs: OSTemplateRenderer config templating object to inspect - for a complete https context. - :param endpoint_type: str endpoint type to resolve. - :param returns: str base URL for services on the current service unit. - """ - scheme = _get_scheme(configs) - - address = resolve_address(endpoint_type) - if is_ipv6(address): - address = "[{}]".format(address) - - return '%s://%s' % (scheme, address) - - -def _get_scheme(configs): - """Returns the scheme to use for the url (either http or https) - depending upon whether https is in the configs value. - - :param configs: OSTemplateRenderer config templating object to inspect - for a complete https context. - :returns: either 'http' or 'https' depending on whether https is - configured within the configs context. - """ - scheme = 'http' - if configs and 'https' in configs.complete_contexts(): - scheme = 'https' - return scheme - - -def _get_address_override(endpoint_type=PUBLIC): - """Returns any address overrides that the user has defined based on the - endpoint type. - - Note: this function allows for the service name to be inserted into the - address if the user specifies {service_name}.somehost.org. - - :param endpoint_type: the type of endpoint to retrieve the override - value for. - :returns: any endpoint address or hostname that the user has overridden - or None if an override is not present. - """ - override_key = ADDRESS_MAP[endpoint_type]['override'] - addr_override = config(override_key) - if not addr_override: - return None - else: - return addr_override.format(service_name=service_name()) - - -def local_address(unit_get_fallback='public-address'): - """Return a network address for this unit. - - Attempt to retrieve a 'default' IP address for this unit - from network-get. If this is running with an old version of Juju then - fallback to unit_get. - - Note on juju < 2.9 the binding to juju-info may not exist, so fall back to - the unit-get. - - :param unit_get_fallback: Either 'public-address' or 'private-address'. - Only used with old versions of Juju. - :type unit_get_fallback: str - :returns: IP Address - :rtype: str - """ - try: - return network_get_primary_address('juju-info') - except (NotImplementedError, NoNetworkBinding): - return unit_get(unit_get_fallback) - - -def resolve_address(endpoint_type=PUBLIC, override=True): - """Return unit address depending on net config. - - If unit is clustered with vip(s) and has net splits defined, return vip on - correct network. If clustered with no nets defined, return primary vip. - - If not clustered, return unit address ensuring address is on configured net - split if one is configured, or a Juju 2.0 extra-binding has been used. - - :param endpoint_type: Network endpoing type - :param override: Accept hostname overrides or not - """ - resolved_address = None - if override: - resolved_address = _get_address_override(endpoint_type) - if resolved_address: - return resolved_address - - vips = config('vip') - if vips: - vips = vips.split() - - net_type = ADDRESS_MAP[endpoint_type]['config'] - net_addr = config(net_type) - net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] - binding = ADDRESS_MAP[endpoint_type]['binding'] - clustered = is_clustered() - - if clustered and vips: - if net_addr: - for vip in vips: - if is_address_in_network(net_addr, vip): - resolved_address = vip - break - else: - # NOTE: endeavour to check vips against network space - # bindings - try: - bound_cidr = resolve_network_cidr( - network_get_primary_address(binding) - ) - for vip in vips: - if is_address_in_network(bound_cidr, vip): - resolved_address = vip - break - except (NotImplementedError, NoNetworkBinding): - # If no net-splits configured and no support for extra - # bindings/network spaces so we expect a single vip - resolved_address = vips[0] - else: - if config('prefer-ipv6'): - fallback_addr = get_ipv6_addr(exc_list=vips)[0] - else: - fallback_addr = local_address(unit_get_fallback=net_fallback) - - if net_addr: - resolved_address = get_address_in_network(net_addr, fallback_addr) - else: - # NOTE: only try to use extra bindings if legacy network - # configuration is not in use - try: - resolved_address = network_get_primary_address(binding) - except (NotImplementedError, NoNetworkBinding): - resolved_address = fallback_addr - - if resolved_address is None: - raise ValueError("Unable to resolve a suitable IP address based on " - "charm state and configuration. (net_type=%s, " - "clustered=%s)" % (net_type, clustered)) - - return resolved_address - - -def get_vip_in_network(network): - matching_vip = None - vips = config('vip') - if vips: - for vip in vips.split(): - if is_address_in_network(network, vip): - matching_vip = vip - return matching_vip - - -def get_default_api_bindings(): - _default_bindings = [] - for binding in [INTERNAL, ADMIN, PUBLIC]: - _default_bindings.append(ADDRESS_MAP[binding]['binding']) - return _default_bindings diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py deleted file mode 100644 index 5775aa44..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/keystone.py +++ /dev/null @@ -1,170 +0,0 @@ -# -# Copyright 2017 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.fetch import apt_install -from charmhelpers.contrib.openstack.context import IdentityServiceContext -from charmhelpers.core.hookenv import ( - log, - ERROR, -) - - -def get_api_suffix(api_version): - """Return the formatted api suffix for the given version - @param api_version: version of the keystone endpoint - @returns the api suffix formatted according to the given api - version - """ - return 'v2.0' if api_version in (2, "2", "2.0") else 'v3' - - -def format_endpoint(schema, addr, port, api_version): - """Return a formatted keystone endpoint - @param schema: http or https - @param addr: ipv4/ipv6 host of the keystone service - @param port: port of the keystone service - @param api_version: 2 or 3 - @returns a fully formatted keystone endpoint - """ - return '{}://{}:{}/{}/'.format(schema, addr, port, - get_api_suffix(api_version)) - - -def get_keystone_manager(endpoint, api_version, **kwargs): - """Return a keystonemanager for the correct API version - - @param endpoint: the keystone endpoint to point client at - @param api_version: version of the keystone api the client should use - @param kwargs: token or username/tenant/password information - @returns keystonemanager class used for interrogating keystone - """ - if api_version == 2: - return KeystoneManager2(endpoint, **kwargs) - if api_version == 3: - return KeystoneManager3(endpoint, **kwargs) - raise ValueError('No manager found for api version {}'.format(api_version)) - - -def get_keystone_manager_from_identity_service_context(): - """Return a keystonmanager generated from a - instance of charmhelpers.contrib.openstack.context.IdentityServiceContext - @returns keystonamenager instance - """ - context = IdentityServiceContext()() - if not context: - msg = "Identity service context cannot be generated" - log(msg, level=ERROR) - raise ValueError(msg) - - endpoint = format_endpoint(context['service_protocol'], - context['service_host'], - context['service_port'], - context['api_version']) - - if context['api_version'] in (2, "2.0"): - api_version = 2 - else: - api_version = 3 - - return get_keystone_manager(endpoint, api_version, - username=context['admin_user'], - password=context['admin_password'], - tenant_name=context['admin_tenant_name']) - - -class KeystoneManager(object): - - def resolve_service_id(self, service_name=None, service_type=None): - """Find the service_id of a given service""" - services = [s._info for s in self.api.services.list()] - - service_name = service_name.lower() - for s in services: - name = s['name'].lower() - if service_type and service_name: - if (service_name == name and service_type == s['type']): - return s['id'] - elif service_name and service_name == name: - return s['id'] - elif service_type and service_type == s['type']: - return s['id'] - return None - - def service_exists(self, service_name=None, service_type=None): - """Determine if the given service exists on the service list""" - return self.resolve_service_id(service_name, service_type) is not None - - -class KeystoneManager2(KeystoneManager): - - def __init__(self, endpoint, **kwargs): - try: - from keystoneclient.v2_0 import client - from keystoneclient.auth.identity import v2 - from keystoneclient import session - except ImportError: - apt_install(["python3-keystoneclient"], fatal=True) - - from keystoneclient.v2_0 import client - from keystoneclient.auth.identity import v2 - from keystoneclient import session - - self.api_version = 2 - - token = kwargs.get("token", None) - if token: - api = client.Client(endpoint=endpoint, token=token) - else: - auth = v2.Password(username=kwargs.get("username"), - password=kwargs.get("password"), - tenant_name=kwargs.get("tenant_name"), - auth_url=endpoint) - sess = session.Session(auth=auth) - api = client.Client(session=sess) - - self.api = api - - -class KeystoneManager3(KeystoneManager): - - def __init__(self, endpoint, **kwargs): - try: - from keystoneclient.v3 import client - from keystoneclient.auth import token_endpoint - from keystoneclient import session - from keystoneclient.auth.identity import v3 - except ImportError: - apt_install(["python3-keystoneclient"], fatal=True) - - from keystoneclient.v3 import client - from keystoneclient.auth import token_endpoint - from keystoneclient import session - from keystoneclient.auth.identity import v3 - - self.api_version = 3 - - token = kwargs.get("token", None) - if token: - auth = token_endpoint.Token(endpoint=endpoint, - token=token) - sess = session.Session(auth=auth) - else: - auth = v3.Password(auth_url=endpoint, - user_id=kwargs.get("username"), - password=kwargs.get("password"), - project_id=kwargs.get("tenant_name")) - sess = session.Session(auth=auth) - - self.api = client.Client(session=sess) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py deleted file mode 100644 index 47772467..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/neutron.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Various utilities for dealing with Neutron and the renaming from Quantum. - -from subprocess import check_output - -from charmhelpers.core.hookenv import ( - config, - log, - ERROR, -) - -from charmhelpers.contrib.openstack.utils import ( - os_release, - CompareOpenStackReleases, -) - - -def headers_package(): - """Ensures correct linux-headers for running kernel are installed, - for building DKMS package""" - kver = check_output(['uname', '-r']).decode('UTF-8').strip() - return 'linux-headers-%s' % kver - - -QUANTUM_CONF_DIR = '/etc/quantum' - - -def kernel_version(): - """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ - kver = check_output(['uname', '-r']).decode('UTF-8').strip() - kver = kver.split('.') - return (int(kver[0]), int(kver[1])) - - -def determine_dkms_package(): - """ Determine which DKMS package should be used based on kernel version """ - # NOTE: 3.13 kernels have support for GRE and VXLAN native - if kernel_version() >= (3, 13): - return [] - else: - return [headers_package(), 'openvswitch-datapath-dkms'] - - -# legacy - - -def quantum_plugins(): - return { - 'ovs': { - 'config': '/etc/quantum/plugins/openvswitch/' - 'ovs_quantum_plugin.ini', - 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' - 'OVSQuantumPluginV2', - 'contexts': [], - 'services': ['quantum-plugin-openvswitch-agent'], - 'packages': [determine_dkms_package(), - ['quantum-plugin-openvswitch-agent']], - 'server_packages': ['quantum-server', - 'quantum-plugin-openvswitch'], - 'server_services': ['quantum-server'] - }, - 'nvp': { - 'config': '/etc/quantum/plugins/nicira/nvp.ini', - 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' - 'QuantumPlugin.NvpPluginV2', - 'contexts': [], - 'services': [], - 'packages': [], - 'server_packages': ['quantum-server', - 'quantum-plugin-nicira'], - 'server_services': ['quantum-server'] - } - } - - -NEUTRON_CONF_DIR = '/etc/neutron' - - -def neutron_plugins(): - release = os_release('nova-common') - plugins = { - 'ovs': { - 'config': '/etc/neutron/plugins/openvswitch/' - 'ovs_neutron_plugin.ini', - 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' - 'OVSNeutronPluginV2', - 'contexts': [], - 'services': ['neutron-plugin-openvswitch-agent'], - 'packages': [determine_dkms_package(), - ['neutron-plugin-openvswitch-agent']], - 'server_packages': ['neutron-server', - 'neutron-plugin-openvswitch'], - 'server_services': ['neutron-server'] - }, - 'nvp': { - 'config': '/etc/neutron/plugins/nicira/nvp.ini', - 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' - 'NeutronPlugin.NvpPluginV2', - 'contexts': [], - 'services': [], - 'packages': [], - 'server_packages': ['neutron-server', - 'neutron-plugin-nicira'], - 'server_services': ['neutron-server'] - }, - 'nsx': { - 'config': '/etc/neutron/plugins/vmware/nsx.ini', - 'driver': 'vmware', - 'contexts': [], - 'services': [], - 'packages': [], - 'server_packages': ['neutron-server', - 'neutron-plugin-vmware'], - 'server_services': ['neutron-server'] - }, - 'n1kv': { - 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', - 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', - 'contexts': [], - 'services': [], - 'packages': [determine_dkms_package(), - ['neutron-plugin-cisco']], - 'server_packages': ['neutron-server', - 'neutron-plugin-cisco'], - 'server_services': ['neutron-server'] - }, - 'Calico': { - 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', - 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', - 'contexts': [], - 'services': ['calico-felix', - 'bird', - 'neutron-dhcp-agent', - 'nova-api-metadata', - 'etcd'], - 'packages': [determine_dkms_package(), - ['calico-compute', - 'bird', - 'neutron-dhcp-agent', - 'nova-api-metadata', - 'etcd']], - 'server_packages': ['neutron-server', 'calico-control', 'etcd'], - 'server_services': ['neutron-server', 'etcd'] - }, - 'vsp': { - 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', - 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', - 'contexts': [], - 'services': [], - 'packages': [], - 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], - 'server_services': ['neutron-server'] - }, - 'plumgrid': { - 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', - 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin' - '.plumgrid_plugin.NeutronPluginPLUMgridV2'), - 'contexts': [], - 'services': [], - 'packages': ['plumgrid-lxc', - 'iovisor-dkms'], - 'server_packages': ['neutron-server', - 'neutron-plugin-plumgrid'], - 'server_services': ['neutron-server'] - }, - 'midonet': { - 'config': '/etc/neutron/plugins/midonet/midonet.ini', - 'driver': 'midonet.neutron.plugin.MidonetPluginV2', - 'contexts': [], - 'services': [], - 'packages': [determine_dkms_package()], - 'server_packages': ['neutron-server', - 'python-neutron-plugin-midonet'], - 'server_services': ['neutron-server'] - } - } - if CompareOpenStackReleases(release) >= 'icehouse': - # NOTE: patch in ml2 plugin for icehouse onwards - plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' - plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' - plugins['ovs']['server_packages'] = ['neutron-server', - 'neutron-plugin-ml2'] - # NOTE: patch in vmware renames nvp->nsx for icehouse onwards - plugins['nvp'] = plugins['nsx'] - if CompareOpenStackReleases(release) >= 'kilo': - plugins['midonet']['driver'] = ( - 'neutron.plugins.midonet.plugin.MidonetPluginV2') - if CompareOpenStackReleases(release) >= 'liberty': - plugins['midonet']['driver'] = ( - 'midonet.neutron.plugin_v1.MidonetPluginV2') - plugins['midonet']['server_packages'].remove( - 'python-neutron-plugin-midonet') - plugins['midonet']['server_packages'].append( - 'python-networking-midonet') - plugins['plumgrid']['driver'] = ( - 'networking_plumgrid.neutron.plugins' - '.plugin.NeutronPluginPLUMgridV2') - plugins['plumgrid']['server_packages'].remove( - 'neutron-plugin-plumgrid') - if CompareOpenStackReleases(release) >= 'mitaka': - plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') - plugins['nsx']['server_packages'].append('python-vmware-nsx') - plugins['nsx']['config'] = '/etc/neutron/nsx.ini' - plugins['vsp']['driver'] = ( - 'nuage_neutron.plugins.nuage.plugin.NuagePlugin') - if CompareOpenStackReleases(release) >= 'newton': - plugins['vsp']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' - plugins['vsp']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' - plugins['vsp']['server_packages'] = ['neutron-server', - 'neutron-plugin-ml2'] - return plugins - - -def neutron_plugin_attribute(plugin, attr, net_manager=None): - manager = net_manager or network_manager() - if manager == 'quantum': - plugins = quantum_plugins() - elif manager == 'neutron': - plugins = neutron_plugins() - else: - log("Network manager '%s' does not support plugins." % (manager), - level=ERROR) - raise Exception - - try: - _plugin = plugins[plugin] - except KeyError: - log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) - raise Exception - - try: - return _plugin[attr] - except KeyError: - return None - - -def network_manager(): - ''' - Deals with the renaming of Quantum to Neutron in H and any situations - that require compatibility (eg, deploying H with network-manager=quantum, - upgrading from G). - ''' - release = os_release('nova-common') - manager = config('network-manager').lower() - - if manager not in ['quantum', 'neutron']: - return manager - - if release in ['essex']: - # E does not support neutron - log('Neutron networking not supported in Essex.', level=ERROR) - raise Exception - elif release in ['folsom', 'grizzly']: - # neutron is named quantum in F and G - return 'quantum' - else: - # ensure accurate naming for all releases post-H - return 'neutron' - - -def parse_mappings(mappings, key_rvalue=False): - """By default mappings are lvalue keyed. - - If key_rvalue is True, the mapping will be reversed to allow multiple - configs for the same lvalue. - """ - parsed = {} - if mappings: - mappings = mappings.split() - for m in mappings: - p = m.partition(':') - - if key_rvalue: - key_index = 2 - val_index = 0 - # if there is no rvalue skip to next - if not p[1]: - continue - else: - key_index = 0 - val_index = 2 - - key = p[key_index].strip() - parsed[key] = p[val_index].strip() - - return parsed - - -def parse_bridge_mappings(mappings): - """Parse bridge mappings. - - Mappings must be a space-delimited list of provider:bridge mappings. - - Returns dict of the form {provider:bridge}. - """ - return parse_mappings(mappings) - - -def parse_data_port_mappings(mappings, default_bridge='br-data'): - """Parse data port mappings. - - Mappings must be a space-delimited list of bridge:port. - - Returns dict of the form {port:bridge} where ports may be mac addresses or - interface names. - """ - - # NOTE(dosaboy): we use rvalue for key to allow multiple values to be - # proposed for since it may be a mac address which will differ - # across units this allowing first-known-good to be chosen. - _mappings = parse_mappings(mappings, key_rvalue=True) - if not _mappings or list(_mappings.values()) == ['']: - if not mappings: - return {} - - # For backwards-compatibility we need to support port-only provided in - # config. - _mappings = {mappings.split()[0]: default_bridge} - - ports = _mappings.keys() - if len(set(ports)) != len(ports): - raise Exception("It is not allowed to have the same port configured " - "on more than one bridge") - - return _mappings - - -def parse_vlan_range_mappings(mappings): - """Parse vlan range mappings. - - Mappings must be a space-delimited list of provider:start:end mappings. - - The start:end range is optional and may be omitted. - - Returns dict of the form {provider: (start, end)}. - """ - _mappings = parse_mappings(mappings) - return {p: tuple(r.split(':')) for p, r in _mappings.items()} diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/policy_rcd.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/policy_rcd.py deleted file mode 100644 index ecffbc68..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/policy_rcd.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Module for managing policy-rc.d script and associated files. - -This module manages the installation of /usr/sbin/policy-rc.d, the -policy files and the event files. When a package update occurs the -packaging system calls: - -policy-rc.d [options] - -The return code of the script determines if the packaging system -will perform that action on the given service. The policy-rc.d -implementation installed by this module checks if an action is -permitted by checking policy files placed in /etc/policy-rc.d. -If a policy file exists which denies the requested action then -this is recorded in an event file which is placed in -/var/lib/policy-rc.d. -""" - -import os -import shutil -import tempfile -import yaml - -import charmhelpers.contrib.openstack.files as os_files -import charmhelpers.contrib.openstack.alternatives as alternatives -import charmhelpers.core.hookenv as hookenv -import charmhelpers.core.host as host - -POLICY_HEADER = """# Managed by juju\n""" -POLICY_DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d' -POLICY_CONFIG_DIR = '/etc/policy-rc.d' - - -def get_policy_file_name(): - """Get the name of the policy file for this application. - - :returns: Policy file name - :rtype: str - """ - application_name = hookenv.service_name() - return '{}/charm-{}.policy'.format(POLICY_CONFIG_DIR, application_name) - - -def read_default_policy_file(): - """Return the policy file. - - A policy is in the form: - blocked_actions: - neutron-dhcp-agent: [restart, stop, try-restart] - neutron-l3-agent: [restart, stop, try-restart] - neutron-metadata-agent: [restart, stop, try-restart] - neutron-openvswitch-agent: [restart, stop, try-restart] - openvswitch-switch: [restart, stop, try-restart] - ovs-vswitchd: [restart, stop, try-restart] - ovs-vswitchd-dpdk: [restart, stop, try-restart] - ovsdb-server: [restart, stop, try-restart] - policy_requestor_name: neutron-openvswitch - policy_requestor_type: charm - - :returns: Policy - :rtype: Dict[str, Union[str, Dict[str, List[str]]] - """ - policy = {} - policy_file = get_policy_file_name() - if os.path.exists(policy_file): - with open(policy_file, 'r') as f: - policy = yaml.safe_load(f) - return policy - - -def write_policy_file(policy_file, policy): - """Write policy to disk. - - :param policy_file: Name of policy file - :type policy_file: str - :param policy: Policy - :type policy: Dict[str, Union[str, Dict[str, List[str]]]] - """ - with tempfile.NamedTemporaryFile('w', delete=False) as f: - f.write(POLICY_HEADER) - yaml.dump(policy, f) - tmp_file_name = f.name - shutil.move(tmp_file_name, policy_file) - - -def remove_policy_file(): - """Remove policy file.""" - try: - os.remove(get_policy_file_name()) - except FileNotFoundError: - pass - - -def install_policy_rcd(): - """Install policy-rc.d components.""" - source_file_dir = os.path.dirname(os.path.abspath(os_files.__file__)) - policy_rcd_exec = "/var/lib/charm/{}/policy-rc.d".format( - hookenv.service_name()) - host.mkdir(os.path.dirname(policy_rcd_exec)) - shutil.copy2( - '{}/policy_rc_d_script.py'.format(source_file_dir), - policy_rcd_exec) - # policy-rc.d must be installed via the alternatives system: - # https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt - if not os.path.exists('/usr/sbin/policy-rc.d'): - alternatives.install_alternative( - 'policy-rc.d', - '/usr/sbin/policy-rc.d', - policy_rcd_exec) - host.mkdir(POLICY_CONFIG_DIR) - - -def get_default_policy(): - """Return the default policy structure. - - :returns: Policy - :rtype: Dict[str, Union[str, Dict[str, List[str]]] - """ - policy = { - 'policy_requestor_name': hookenv.service_name(), - 'policy_requestor_type': 'charm', - 'blocked_actions': {}} - return policy - - -def add_policy_block(service, blocked_actions): - """Update a policy file with new list of actions. - - :param service: Service name - :type service: str - :param blocked_actions: Action to block - :type blocked_actions: List[str] - """ - policy = read_default_policy_file() or get_default_policy() - policy_file = get_policy_file_name() - if policy['blocked_actions'].get(service): - policy['blocked_actions'][service].extend(blocked_actions) - else: - policy['blocked_actions'][service] = blocked_actions - policy['blocked_actions'][service] = sorted( - list(set(policy['blocked_actions'][service]))) - write_policy_file(policy_file, policy) - - -def remove_policy_block(service, unblocked_actions): - """Remove list of actions from policy file. - - :param service: Service name - :type service: str - :param unblocked_actions: Action to unblock - :type unblocked_actions: List[str] - """ - policy_file = get_policy_file_name() - policy = read_default_policy_file() - for action in unblocked_actions: - try: - policy['blocked_actions'][service].remove(action) - except (KeyError, ValueError): - continue - write_policy_file(policy_file, policy) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py deleted file mode 100644 index 767943c2..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/policyd.py +++ /dev/null @@ -1,763 +0,0 @@ -# Copyright 2019-2021 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import contextlib -import os -import shutil -import yaml -import zipfile - -import charmhelpers -import charmhelpers.core.hookenv as hookenv -import charmhelpers.core.host as ch_host - -# Features provided by this module: - -""" -Policy.d helper functions -========================= - -The functions in this module are designed, as a set, to provide an easy-to-use -set of hooks for classic charms to add in /etc//policy.d/ -directory override YAML files. - -(For charms.openstack charms, a mixin class is provided for this -functionality). - -In order to "hook" this functionality into a (classic) charm, two functions are -provided: - - maybe_do_policyd_overrides(openstack_release, - service, - blacklist_paths=none, - blacklist_keys=none, - template_function=none, - restart_handler=none) - - maybe_do_policyd_overrides_on_config_changed(openstack_release, - service, - blacklist_paths=None, - blacklist_keys=None, - template_function=None, - restart_handler=None - -(See the docstrings for details on the parameters) - -The functions should be called from the install and upgrade hooks in the charm. -The `maybe_do_policyd_overrides_on_config_changed` function is designed to be -called on the config-changed hook, in that it does an additional check to -ensure that an already overridden policy.d in an upgrade or install hooks isn't -repeated. - -In order the *enable* this functionality, the charm's install, config_changed, -and upgrade_charm hooks need to be modified, and a new config option (see -below) needs to be added. The README for the charm should also be updated. - -Examples from the keystone charm are: - -@hooks.hook('install.real') -@harden() -def install(): - ... - # call the policy overrides handler which will install any policy overrides - maybe_do_policyd_overrides(os_release('keystone'), 'keystone') - - -@hooks.hook('config-changed') -@restart_on_change(restart_map(), restart_functions=restart_function_map()) -@harden() -def config_changed(): - ... - # call the policy overrides handler which will install any policy overrides - maybe_do_policyd_overrides_on_config_changed(os_release('keystone'), - 'keystone') - -@hooks.hook('upgrade-charm') -@restart_on_change(restart_map(), stopstart=True) -@harden() -def upgrade_charm(): - ... - # call the policy overrides handler which will install any policy overrides - maybe_do_policyd_overrides(os_release('keystone'), 'keystone') - -Status Line -=========== - -The workload status code in charm-helpers has been modified to detect if -policy.d override code has been incorporated into the charm by checking for the -new config variable (in the config.yaml). If it has been, then the workload -status line will automatically show "PO:" at the beginning of the workload -status for that unit/service if the config option is set. If the policy -override is broken, the "PO (broken):" will be shown. No changes to the charm -(apart from those already mentioned) are needed to enable this functionality. -(charms.openstack charms also get this functionality, but please see that -library for further details). -""" - -# The config.yaml for the charm should contain the following for the config -# option: - -""" - use-policyd-override: - type: boolean - default: False - description: | - If True then use the resource file named 'policyd-override' to install - override YAML files in the service's policy.d directory. The resource - file should be a ZIP file containing at least one yaml file with a .yaml - or .yml extension. If False then remove the overrides. -""" - -# The metadata.yaml for the charm should contain the following: -""" -resources: - policyd-override: - type: file - filename: policyd-override.zip - description: The policy.d overrides file -""" - -# The README for the charm should contain the following: -""" -Policy Overrides ----------------- - -This feature allows for policy overrides using the `policy.d` directory. This -is an **advanced** feature and the policies that the OpenStack service supports -should be clearly and unambiguously understood before trying to override, or -add to, the default policies that the service uses. The charm also has some -policy defaults. They should also be understood before being overridden. - -> **Caution**: It is possible to break the system (for tenants and other - services) if policies are incorrectly applied to the service. - -Policy overrides are YAML files that contain rules that will add to, or -override, existing policy rules in the service. The `policy.d` directory is -a place to put the YAML override files. This charm owns the -`/etc/keystone/policy.d` directory, and as such, any manual changes to it will -be overwritten on charm upgrades. - -Overrides are provided to the charm using a Juju resource called -`policyd-override`. The resource is a ZIP file. This file, say -`overrides.zip`, is attached to the charm by: - - - juju attach-resource policyd-override=overrides.zip - -The policy override is enabled in the charm using: - - juju config use-policyd-override=true - -When `use-policyd-override` is `True` the status line of the charm will be -prefixed with `PO:` indicating that policies have been overridden. If the -installation of the policy override YAML files failed for any reason then the -status line will be prefixed with `PO (broken):`. The log file for the charm -will indicate the reason. No policy override files are installed if the `PO -(broken):` is shown. The status line indicates that the overrides are broken, -not that the policy for the service has failed. The policy will be the defaults -for the charm and service. - -Policy overrides on one service may affect the functionality of another -service. Therefore, it may be necessary to provide policy overrides for -multiple service charms to achieve a consistent set of policies across the -OpenStack system. The charms for the other services that may need overrides -should be checked to ensure that they support overrides before proceeding. -""" - -POLICYD_VALID_EXTS = ['.yaml', '.yml', '.j2', '.tmpl', '.tpl'] -POLICYD_TEMPLATE_EXTS = ['.j2', '.tmpl', '.tpl'] -POLICYD_RESOURCE_NAME = "policyd-override" -POLICYD_CONFIG_NAME = "use-policyd-override" -POLICYD_SUCCESS_FILENAME = "policyd-override-success" -POLICYD_LOG_LEVEL_DEFAULT = hookenv.INFO -POLICYD_ALWAYS_BLACKLISTED_KEYS = ("admin_required", "cloud_admin") - - -class BadPolicyZipFile(Exception): - - def __init__(self, log_message): - self.log_message = log_message - - def __str__(self): - return self.log_message - - -class BadPolicyYamlFile(Exception): - - def __init__(self, log_message): - self.log_message = log_message - - def __str__(self): - return self.log_message - - -def is_policyd_override_valid_on_this_release(openstack_release): - """Check that the charm is running on at least Ubuntu Xenial, and at - least the queens release. - - :param openstack_release: the release codename that is installed. - :type openstack_release: str - :returns: True if okay - :rtype: bool - """ - # NOTE(ajkavanagh) circular import! This is because the status message - # generation code in utils has to call into this module, but this function - # needs the CompareOpenStackReleases() function. The only way to solve - # this is either to put ALL of this module into utils, or refactor one or - # other of the CompareOpenStackReleases or status message generation code - # into a 3rd module. - import charmhelpers.contrib.openstack.utils as ch_utils - return ch_utils.CompareOpenStackReleases(openstack_release) >= 'queens' - - -def maybe_do_policyd_overrides(openstack_release, - service, - blacklist_paths=None, - blacklist_keys=None, - template_function=None, - restart_handler=None, - user=None, - group=None, - config_changed=False): - """If the config option is set, get the resource file and process it to - enable the policy.d overrides for the service passed. - - The param `openstack_release` is required as the policyd overrides feature - is only supported on openstack_release "queens" or later, and on ubuntu - "xenial" or later. Prior to these versions, this feature is a NOP. - - The optional template_function is a function that accepts a string and has - an opportunity to modify the loaded file prior to it being read by - yaml.safe_load(). This allows the charm to perform "templating" using - charm derived data. - - The param blacklist_paths are paths (that are in the service's policy.d - directory that should not be touched). - - The param blacklist_keys are keys that must not appear in the yaml file. - If they do, then the whole policy.d file fails. - - The yaml file extracted from the resource_file (which is a zipped file) has - its file path reconstructed. This, also, must not match any path in the - black list. - - The param restart_handler is an optional Callable that is called to perform - the service restart if the policy.d file is changed. This should normally - be None as oslo.policy automatically picks up changes in the policy.d - directory. However, for any services where this is buggy then a - restart_handler can be used to force the policy.d files to be read. - - If the config_changed param is True, then the handling is slightly - different: It will only perform the policyd overrides if the config is True - and the success file doesn't exist. Otherwise, it does nothing as the - resource file has already been processed. - - :param openstack_release: The openstack release that is installed. - :type openstack_release: str - :param service: the service name to construct the policy.d directory for. - :type service: str - :param blacklist_paths: optional list of paths to leave alone - :type blacklist_paths: Union[None, List[str]] - :param blacklist_keys: optional list of keys that mustn't appear in the - yaml file's - :type blacklist_keys: Union[None, List[str]] - :param template_function: Optional function that can modify the string - prior to being processed as a Yaml document. - :type template_function: Union[None, Callable[[str], str]] - :param restart_handler: The function to call if the service should be - restarted. - :type restart_handler: Union[None, Callable[]] - :param user: The user to create/write files/directories as - :type user: Union[None, str] - :param group: the group to create/write files/directories as - :type group: Union[None, str] - :param config_changed: Set to True for config_changed hook. - :type config_changed: bool - """ - _user = service if user is None else user - _group = service if group is None else group - if not is_policyd_override_valid_on_this_release(openstack_release): - return - hookenv.log("Running maybe_do_policyd_overrides", - level=POLICYD_LOG_LEVEL_DEFAULT) - config = hookenv.config() - try: - if not config.get(POLICYD_CONFIG_NAME, False): - clean_policyd_dir_for(service, - blacklist_paths, - user=_user, - group=_group) - if (os.path.isfile(_policy_success_file()) and - restart_handler is not None and - callable(restart_handler)): - restart_handler() - remove_policy_success_file() - return - except Exception as e: - hookenv.log("... ERROR: Exception is: {}".format(str(e)), - level=POLICYD_CONFIG_NAME) - import traceback - hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT) - return - # if the policyd overrides have been performed when doing config_changed - # just return - if config_changed and is_policy_success_file_set(): - hookenv.log("... already setup, so skipping.", - level=POLICYD_LOG_LEVEL_DEFAULT) - return - # from now on it should succeed; if it doesn't then status line will show - # broken. - resource_filename = get_policy_resource_filename() - restart = process_policy_resource_file( - resource_filename, service, blacklist_paths, blacklist_keys, - template_function) - if restart and restart_handler is not None and callable(restart_handler): - restart_handler() - - -@charmhelpers.deprecate("Use maybe_do_policyd_overrides instead") -def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): - """This function is designed to be called from the config changed hook. - - DEPRECATED: please use maybe_do_policyd_overrides() with the param - `config_changed` as `True`. - - See maybe_do_policyd_overrides() for more details on the params. - """ - if 'config_changed' not in kwargs.keys(): - kwargs['config_changed'] = True - return maybe_do_policyd_overrides(*args, **kwargs) - - -def get_policy_resource_filename(): - """Function to extract the policy resource filename - - :returns: The filename of the resource, if set, otherwise, if an error - occurs, then None is returned. - :rtype: Union[str, None] - """ - try: - return hookenv.resource_get(POLICYD_RESOURCE_NAME) - except Exception: - return None - - -@contextlib.contextmanager -def open_and_filter_yaml_files(filepath, has_subdirs=False): - """Validate that the filepath provided is a zip file and contains at least - one (.yaml|.yml) file, and that the files are not duplicated when the zip - file is flattened. Note that the yaml files are not checked. This is the - first stage in validating the policy zipfile; individual yaml files are not - checked for validity or black listed keys. - - If the has_subdirs param is True, then the files are flattened to the first - directory, and the files in the root are ignored. - - An example of use is: - - with open_and_filter_yaml_files(some_path) as zfp, g: - for zipinfo in g: - # do something with zipinfo ... - - :param filepath: a filepath object that can be opened by zipfile - :type filepath: Union[AnyStr, os.PathLike[AntStr]] - :param has_subdirs: Keep first level of subdirectories in yaml file. - :type has_subdirs: bool - :returns: (zfp handle, - a generator of the (name, filename, ZipInfo object) tuples) as a - tuple. - :rtype: ContextManager[(zipfile.ZipFile, - Generator[(name, str, str, zipfile.ZipInfo)])] - :raises: zipfile.BadZipFile - :raises: BadPolicyZipFile if duplicated yaml or missing - :raises: IOError if the filepath is not found - """ - with zipfile.ZipFile(filepath, 'r') as zfp: - # first pass through; check for duplicates and at least one yaml file. - names = collections.defaultdict(int) - yamlfiles = _yamlfiles(zfp, has_subdirs) - for name, _, _, _ in yamlfiles: - names[name] += 1 - # There must be at least 1 yaml file. - if len(names.keys()) == 0: - raise BadPolicyZipFile("contains no yaml files with {} extensions." - .format(", ".join(POLICYD_VALID_EXTS))) - # There must be no duplicates - duplicates = [n for n, c in names.items() if c > 1] - if duplicates: - raise BadPolicyZipFile("{} have duplicates in the zip file." - .format(", ".join(duplicates))) - # Finally, let's yield the generator - yield (zfp, yamlfiles) - - -def _yamlfiles(zipfile, has_subdirs=False): - """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) - and the infolist item from a zipfile. - - If the `has_subdirs` param is True, the the only yaml files that have a - directory component are read, and then first part of the directory - component is kept, along with the filename in the name. e.g. an entry with - a filename of: - - compute/someotherdir/override.yaml - - is returned as: - - compute/override, yaml, override.yaml, - - This is to help with the special, additional, processing that the dashboard - charm requires. - - :param zipfile: the zipfile to read zipinfo items from - :type zipfile: zipfile.ZipFile - :param has_subdirs: Keep first level of subdirectories in yaml file. - :type has_subdirs: bool - :returns: generator of (name, ext, filename, info item) for each - self-identified yaml file. - :rtype: List[(str, str, str, zipfile.ZipInfo)] - """ - files = [] - for infolist_item in zipfile.infolist(): - try: - if infolist_item.is_dir(): - continue - except AttributeError: - # fallback to "old" way to determine dir entry for pre-py36 - if infolist_item.filename.endswith('/'): - continue - _dir, name_ext = os.path.split(infolist_item.filename) - name, ext = os.path.splitext(name_ext) - if has_subdirs and _dir != "": - name = os.path.join(_dir.split(os.path.sep)[0], name) - ext = ext.lower() - if ext and ext in POLICYD_VALID_EXTS: - files.append((name, ext, name_ext, infolist_item)) - return files - - -def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): - """Read, validate and return the (first) yaml document from the stream. - - The doc is read, and checked for a yaml file. The the top-level keys are - checked against the blacklist_keys provided. If there are problems then an - Exception is raised. Otherwise the yaml document is returned as a Python - object that can be dumped back as a yaml file on the system. - - The yaml file must only consist of a str:str mapping, and if not then the - yaml file is rejected. - - :param stream_or_doc: the file object to read the yaml from - :type stream_or_doc: Union[AnyStr, IO[AnyStr]] - :param blacklist_keys: Any keys, which if in the yaml file, should cause - and error. - :type blacklisted_keys: Union[None, List[str]] - :returns: the yaml file as a python document - :rtype: Dict[str, str] - :raises: yaml.YAMLError if there is a problem with the document - :raises: BadPolicyYamlFile if file doesn't look right or there are - blacklisted keys in the file. - """ - blacklist_keys = blacklist_keys or [] - blacklist_keys.append(POLICYD_ALWAYS_BLACKLISTED_KEYS) - doc = yaml.safe_load(stream_or_doc) - if not isinstance(doc, dict): - raise BadPolicyYamlFile("doesn't look like a policy file?") - keys = set(doc.keys()) - blacklisted_keys_present = keys.intersection(blacklist_keys) - if blacklisted_keys_present: - raise BadPolicyYamlFile("blacklisted keys {} present." - .format(", ".join(blacklisted_keys_present))) - if not all(isinstance(k, str) for k in keys): - raise BadPolicyYamlFile("keys in yaml aren't all strings?") - # check that the dictionary looks like a mapping of str to str - if not all(isinstance(v, str) for v in doc.values()): - raise BadPolicyYamlFile("values in yaml aren't all strings?") - return doc - - -def policyd_dir_for(service): - """Return the policy directory for the named service. - - :param service: str - :returns: the policy.d override directory. - :rtype: os.PathLike[str] - """ - return os.path.join("/", "etc", service, "policy.d") - - -def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): - """Clean out the policyd directory except for items that should be kept. - - The keep_paths, if used, should be set to the full path of the files that - should be kept in the policyd directory for the service. Note that the - service name is passed in, and then the policyd_dir_for() function is used. - This is so that a coding error doesn't result in a sudden deletion of the - charm (say). - - :param service: the service name to use to construct the policy.d dir. - :type service: str - :param keep_paths: optional list of paths to not delete. - :type keep_paths: Union[None, List[str]] - :param user: The user to create/write files/directories as - :type user: Union[None, str] - :param group: the group to create/write files/directories as - :type group: Union[None, str] - """ - _user = service if user is None else user - _group = service if group is None else group - keep_paths = keep_paths or [] - path = policyd_dir_for(service) - hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) - if not os.path.exists(path): - ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) - for direntry in os.scandir(path): - # see if the path should be kept. - if direntry.path in keep_paths: - continue - # we remove any directories; it's ours and there shouldn't be any - if direntry.is_dir(): - shutil.rmtree(direntry.path) - else: - os.remove(direntry.path) - - -def maybe_create_directory_for(path, user, group): - """For the filename 'path', ensure that the directory for that path exists. - - Note that if the directory already exists then the permissions are NOT - changed. - - :param path: the filename including the path to it. - :type path: str - :param user: the user to create the directory as - :param group: the group to create the directory as - """ - _dir, _ = os.path.split(path) - if not os.path.exists(_dir): - ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) - - -def path_for_policy_file(service, name): - """Return the full path for a policy.d file that will be written to the - service's policy.d directory. - - It is constructed using policyd_dir_for(), the name and the ".yaml" - extension. - - For horizon, for example, it's a bit more complicated. The name param is - actually "override_service_dir/a_name", where target_service needs to be - one the allowed horizon override services. This translation and check is - done in the _yamlfiles() function. - - :param service: the service name - :type service: str - :param name: the name for the policy override - :type name: str - :returns: the full path name for the file - :rtype: os.PathLike[str] - """ - return os.path.join(policyd_dir_for(service), name + ".yaml") - - -def _policy_success_file(): - """Return the file name for a successful drop of policy.d overrides - - :returns: the path name for the file. - :rtype: str - """ - return os.path.join(hookenv.charm_dir(), POLICYD_SUCCESS_FILENAME) - - -def remove_policy_success_file(): - """Remove the file that indicates successful policyd override.""" - try: - os.remove(_policy_success_file()) - except Exception: - pass - - -def set_policy_success_file(): - """Set the file that indicates successful policyd override.""" - open(_policy_success_file(), "w").close() - - -def is_policy_success_file_set(): - """Returns True if the policy success file has been set. - - This indicates that policies are overridden and working properly. - - :returns: True if the policy file is set - :rtype: bool - """ - return os.path.isfile(_policy_success_file()) - - -def policyd_status_message_prefix(): - """Return the prefix str for the status line. - - "PO:" indicating that the policy overrides are in place, or "PO (broken):" - if the policy is supposed to be working but there is no success file. - - :returns: the prefix - :rtype: str - """ - if is_policy_success_file_set(): - return "PO:" - return "PO (broken):" - - -def process_policy_resource_file(resource_file, - service, - blacklist_paths=None, - blacklist_keys=None, - template_function=None, - preserve_topdir=False, - preprocess_filename=None, - user=None, - group=None): - """Process the resource file (which should contain at least one yaml file) - and write those files to the service's policy.d directory. - - The optional template_function is a function that accepts a python - string and has an opportunity to modify the document - prior to it being read by the yaml.safe_load() function and written to - disk. Note that this function does *not* say how the templating is done - - this is up to the charm to implement its chosen method. - - The param blacklist_paths are paths (that are in the service's policy.d - directory that should not be touched). - - The param blacklist_keys are keys that must not appear in the yaml file. - If they do, then the whole policy.d file fails. - - The yaml file extracted from the resource_file (which is a zipped file) has - its file path reconstructed. This, also, must not match any path in the - black list. - - The yaml filename can be modified in two ways. If the `preserve_topdir` - param is True, then files will be flattened to the top dir. This allows - for creating sets of files that can be grouped into a single level tree - structure. - - Secondly, if the `preprocess_filename` param is not None and callable() - then the name is passed to that function for preprocessing before being - converted to the end location. This is to allow munging of the filename - prior to being tested for a blacklist path. - - If any error occurs, then the policy.d directory is cleared, the error is - written to the log, and the status line will eventually show as failed. - - :param resource_file: The zipped file to open and extract yaml files form. - :type resource_file: Union[AnyStr, os.PathLike[AnyStr]] - :param service: the service name to construct the policy.d directory for. - :type service: str - :param blacklist_paths: optional list of paths to leave alone - :type blacklist_paths: Union[None, List[str]] - :param blacklist_keys: optional list of keys that mustn't appear in the - yaml file's - :type blacklist_keys: Union[None, List[str]] - :param template_function: Optional function that can modify the yaml - document. - :type template_function: Union[None, Callable[[AnyStr], AnyStr]] - :param preserve_topdir: Keep the toplevel subdir - :type preserve_topdir: bool - :param preprocess_filename: Optional function to use to process filenames - extracted from the resource file. - :type preprocess_filename: Union[None, Callable[[AnyStr]. AnyStr]] - :param user: The user to create/write files/directories as - :type user: Union[None, str] - :param group: the group to create/write files/directories as - :type group: Union[None, str] - :returns: True if the processing was successful, False if not. - :rtype: boolean - """ - hookenv.log("Running process_policy_resource_file", level=hookenv.DEBUG) - blacklist_paths = blacklist_paths or [] - completed = False - _preprocess = None - if preprocess_filename is not None and callable(preprocess_filename): - _preprocess = preprocess_filename - _user = service if user is None else user - _group = service if group is None else group - try: - with open_and_filter_yaml_files( - resource_file, preserve_topdir) as (zfp, gen): - # first clear out the policy.d directory and clear success - remove_policy_success_file() - clean_policyd_dir_for(service, - blacklist_paths, - user=_user, - group=_group) - for name, ext, filename, zipinfo in gen: - # See if the name should be preprocessed. - if _preprocess is not None: - name = _preprocess(name) - # construct a name for the output file. - yaml_filename = path_for_policy_file(service, name) - if yaml_filename in blacklist_paths: - raise BadPolicyZipFile("policy.d name {} is blacklisted" - .format(yaml_filename)) - with zfp.open(zipinfo) as fp: - doc = fp.read() - # if template_function is not None, then offer the document - # to the template function - if ext in POLICYD_TEMPLATE_EXTS: - if (template_function is None or not - callable(template_function)): - raise BadPolicyZipFile( - "Template {} but no template_function is " - "available".format(filename)) - doc = template_function(doc) - yaml_doc = read_and_validate_yaml(doc, blacklist_keys) - # we may have to create the directory - maybe_create_directory_for(yaml_filename, _user, _group) - ch_host.write_file(yaml_filename, - yaml.dump(yaml_doc).encode('utf-8'), - _user, - _group) - # Every thing worked, so we mark up a success. - completed = True - except (zipfile.BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: - hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), - level=POLICYD_LOG_LEVEL_DEFAULT) - except IOError as e: - # technically this shouldn't happen; it would be a programming error as - # the filename comes from Juju and thus, should exist. - hookenv.log( - "File {} failed with IOError. This really shouldn't happen" - " -- error: {}".format(resource_file, str(e)), - level=POLICYD_LOG_LEVEL_DEFAULT) - except Exception as e: - import traceback - hookenv.log("General Exception({}) during policyd processing" - .format(str(e)), - level=POLICYD_LOG_LEVEL_DEFAULT) - hookenv.log(traceback.format_exc()) - finally: - if not completed: - hookenv.log("Processing {} failed: cleaning policy.d directory" - .format(resource_file), - level=POLICYD_LOG_LEVEL_DEFAULT) - clean_policyd_dir_for(service, - blacklist_paths, - user=_user, - group=_group) - else: - # touch the success filename - hookenv.log("policy.d overrides installed.", - level=POLICYD_LOG_LEVEL_DEFAULT) - set_policy_success_file() - return completed diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/ssh_migrations.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/ssh_migrations.py deleted file mode 100644 index 96b9f71d..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/ssh_migrations.py +++ /dev/null @@ -1,412 +0,0 @@ -# Copyright 2018 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import subprocess - -from charmhelpers.core.hookenv import ( - ERROR, - log, - relation_get, -) -from charmhelpers.contrib.network.ip import ( - is_ipv6, - ns_query, -) -from charmhelpers.contrib.openstack.utils import ( - get_hostname, - get_host_ip, - is_ip, -) - -NOVA_SSH_DIR = '/etc/nova/compute_ssh/' - - -def ssh_directory_for_unit(application_name, user=None): - """Return the directory used to store ssh assets for the application. - - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - :returns: Fully qualified directory path. - :rtype: str - """ - if user: - application_name = "{}_{}".format(application_name, user) - _dir = os.path.join(NOVA_SSH_DIR, application_name) - for d in [NOVA_SSH_DIR, _dir]: - if not os.path.isdir(d): - os.mkdir(d) - for f in ['authorized_keys', 'known_hosts']: - f = os.path.join(_dir, f) - if not os.path.isfile(f): - open(f, 'w').close() - return _dir - - -def known_hosts(application_name, user=None): - """Return the known hosts file for the application. - - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - :returns: Fully qualified path to file. - :rtype: str - """ - return os.path.join( - ssh_directory_for_unit(application_name, user), - 'known_hosts') - - -def authorized_keys(application_name, user=None): - """Return the authorized keys file for the application. - - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - :returns: Fully qualified path to file. - :rtype: str - """ - return os.path.join( - ssh_directory_for_unit(application_name, user), - 'authorized_keys') - - -def ssh_known_host_key(host, application_name, user=None): - """Return the first entry in known_hosts for host. - - :param host: hostname to lookup in file. - :type host: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - :returns: Host key - :rtype: str or None - """ - cmd = [ - 'ssh-keygen', - '-f', known_hosts(application_name, user), - '-H', - '-F', - host] - try: - # The first line of output is like '# Host xx found: line 1 type RSA', - # which should be excluded. - output = subprocess.check_output(cmd) - except subprocess.CalledProcessError as e: - # RC of 1 seems to be legitimate for most ssh-keygen -F calls. - if e.returncode == 1: - output = e.output - else: - raise - output = output.strip() - - if output: - # Bug #1500589 cmd has 0 rc on precise if entry not present - lines = output.split('\n') - if len(lines) >= 1: - return lines[0] - - return None - - -def remove_known_host(host, application_name, user=None): - """Remove the entry in known_hosts for host. - - :param host: hostname to lookup in file. - :type host: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - log('Removing SSH known host entry for compute host at %s' % host) - cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host] - subprocess.check_call(cmd) - - -def is_same_key(key_1, key_2): - """Extract the key from two host entries and compare them. - - :param key_1: Host key - :type key_1: str - :param key_2: Host key - :type key_2: str - """ - # The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp' - # 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare - # the part start with 'ssh-rsa' followed with '= ', because the hash - # value in the beginning will change each time. - k_1 = key_1.split('= ')[1] - k_2 = key_2.split('= ')[1] - return k_1 == k_2 - - -def add_known_host(host, application_name, user=None): - """Add the given host key to the known hosts file. - - :param host: host name - :type host: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] - try: - remote_key = subprocess.check_output(cmd).strip() - except Exception as e: - log('Could not obtain SSH host key from %s' % host, level=ERROR) - raise e - - current_key = ssh_known_host_key(host, application_name, user) - if current_key and remote_key: - if is_same_key(remote_key, current_key): - log('Known host key for compute host %s up to date.' % host) - return - else: - remove_known_host(host, application_name, user) - - log('Adding SSH host key to known hosts for compute node at %s.' % host) - with open(known_hosts(application_name, user), 'a') as out: - out.write("{}\n".format(remote_key)) - - -def ssh_authorized_key_exists(public_key, application_name, user=None): - """Check if given key is in the authorized_key file. - - :param public_key: Public key. - :type public_key: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - :returns: Whether given key is in the authorized_key file. - :rtype: boolean - """ - with open(authorized_keys(application_name, user)) as keys: - return ('%s' % public_key) in keys.read() - - -def add_authorized_key(public_key, application_name, user=None): - """Add given key to the authorized_key file. - - :param public_key: Public key. - :type public_key: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - with open(authorized_keys(application_name, user), 'a') as keys: - keys.write("{}\n".format(public_key)) - - -def ssh_compute_add_host_and_key(public_key, hostname, private_address, - application_name, user=None): - """Add a compute nodes ssh details to local cache. - - Collect various hostname variations and add the corresponding host keys to - the local known hosts file. Finally, add the supplied public key to the - authorized_key file. - - :param public_key: Public key. - :type public_key: str - :param hostname: Hostname to collect host keys from. - :type hostname: str - :param private_address:aCorresponding private address for hostname - :type private_address: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - # If remote compute node hands us a hostname, ensure we have a - # known hosts entry for its IP, hostname and FQDN. - hosts = [private_address] - - if not is_ipv6(private_address): - if hostname: - hosts.append(hostname) - - if is_ip(private_address): - hn = get_hostname(private_address) - if hn: - hosts.append(hn) - short = hn.split('.')[0] - if ns_query(short): - hosts.append(short) - else: - hosts.append(get_host_ip(private_address)) - short = private_address.split('.')[0] - if ns_query(short): - hosts.append(short) - - for host in list(set(hosts)): - add_known_host(host, application_name, user) - - if not ssh_authorized_key_exists(public_key, application_name, user): - log('Saving SSH authorized key for compute host at %s.' % - private_address) - add_authorized_key(public_key, application_name, user) - - -def ssh_compute_add(public_key, application_name, rid=None, unit=None, - user=None): - """Add a compute nodes ssh details to local cache. - - Collect various hostname variations and add the corresponding host keys to - the local known hosts file. Finally, add the supplied public key to the - authorized_key file. - - :param public_key: Public key. - :type public_key: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param rid: Relation id of the relation between this charm and the app. If - none is supplied it is assumed its the relation relating to - the current hook context. - :type rid: str - :param unit: Unit to add ssh asserts for if none is supplied it is assumed - its the unit relating to the current hook context. - :type unit: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - relation_data = relation_get(rid=rid, unit=unit) - ssh_compute_add_host_and_key( - public_key, - relation_data.get('hostname'), - relation_data.get('private-address'), - application_name, - user=user) - - -def ssh_known_hosts_lines(application_name, user=None): - """Return contents of known_hosts file for given application. - - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - known_hosts_list = [] - with open(known_hosts(application_name, user)) as hosts: - for hosts_line in hosts: - if hosts_line.rstrip(): - known_hosts_list.append(hosts_line.rstrip()) - return(known_hosts_list) - - -def ssh_authorized_keys_lines(application_name, user=None): - """Return contents of authorized_keys file for given application. - - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - authorized_keys_list = [] - - with open(authorized_keys(application_name, user)) as keys: - for authkey_line in keys: - if authkey_line.rstrip(): - authorized_keys_list.append(authkey_line.rstrip()) - return(authorized_keys_list) - - -def ssh_compute_remove(public_key, application_name, user=None): - """Remove given public key from authorized_keys file. - - :param public_key: Public key. - :type public_key: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - if not (os.path.isfile(authorized_keys(application_name, user)) or - os.path.isfile(known_hosts(application_name, user))): - return - - keys = ssh_authorized_keys_lines(application_name, user=None) - keys = [k.strip() for k in keys] - - if public_key not in keys: - return - - [keys.remove(key) for key in keys if key == public_key] - - with open(authorized_keys(application_name, user), 'w') as _keys: - keys = '\n'.join(keys) - if not keys.endswith('\n'): - keys += '\n' - _keys.write(keys) - - -def get_ssh_settings(application_name, user=None): - """Retrieve the known host entries and public keys for application - - Retrieve the known host entries and public keys for application for all - units of the given application related to this application for the - app + user combination. - - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - :returns: Public keys + host keys for all units for app + user combination. - :rtype: dict - """ - settings = {} - keys = {} - prefix = '' - if user: - prefix = '{}_'.format(user) - - for i, line in enumerate(ssh_known_hosts_lines( - application_name=application_name, user=user)): - settings['{}known_hosts_{}'.format(prefix, i)] = line - if settings: - settings['{}known_hosts_max_index'.format(prefix)] = len( - settings.keys()) - - for i, line in enumerate(ssh_authorized_keys_lines( - application_name=application_name, user=user)): - keys['{}authorized_keys_{}'.format(prefix, i)] = line - if keys: - keys['{}authorized_keys_max_index'.format(prefix)] = len(keys.keys()) - settings.update(keys) - return settings - - -def get_all_user_ssh_settings(application_name): - """Retrieve the known host entries and public keys for application - - Retrieve the known host entries and public keys for application for all - units of the given application related to this application for root user - and nova user. - - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :returns: Public keys + host keys for all units for app + user combination. - :rtype: dict - """ - settings = get_ssh_settings(application_name) - settings.update(get_ssh_settings(application_name, user='nova')) - return settings diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/templates/__init__.py deleted file mode 100644 index 9df5f746..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/templates/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# dummy __init__.py to fool syncer into thinking this is a syncable python -# module diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py deleted file mode 100644 index 3b7c6a9f..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/templating.py +++ /dev/null @@ -1,370 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from charmhelpers.fetch import apt_install, apt_update -from charmhelpers.core.hookenv import ( - log, - ERROR, - INFO, - TRACE -) -from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES - -try: - from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions -except ImportError: - apt_update(fatal=True) - apt_install('python3-jinja2', fatal=True) - from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions - - -class OSConfigException(Exception): - pass - - -def get_loader(templates_dir, os_release): - """ - Create a jinja2.ChoiceLoader containing template dirs up to - and including os_release. If directory template directory - is missing at templates_dir, it will be omitted from the loader. - templates_dir is added to the bottom of the search list as a base - loading dir. - - A charm may also ship a templates dir with this module - and it will be appended to the bottom of the search list, eg:: - - hooks/charmhelpers/contrib/openstack/templates - - :param templates_dir (str): Base template directory containing release - sub-directories. - :param os_release (str): OpenStack release codename to construct template - loader. - :returns: jinja2.ChoiceLoader constructed with a list of - jinja2.FilesystemLoaders, ordered in descending - order by OpenStack release. - """ - tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) - for rel in OPENSTACK_CODENAMES.values()] - - if not os.path.isdir(templates_dir): - log('Templates directory not found @ %s.' % templates_dir, - level=ERROR) - raise OSConfigException - - # the bottom contains tempaltes_dir and possibly a common templates dir - # shipped with the helper. - loaders = [FileSystemLoader(templates_dir)] - helper_templates = os.path.join(os.path.dirname(__file__), 'templates') - if os.path.isdir(helper_templates): - loaders.append(FileSystemLoader(helper_templates)) - - for rel, tmpl_dir in tmpl_dirs: - if os.path.isdir(tmpl_dir): - loaders.insert(0, FileSystemLoader(tmpl_dir)) - if rel == os_release: - break - # demote this log to the lowest level; we don't really need to see these - # lots in production even when debugging. - log('Creating choice loader with dirs: %s' % - [l.searchpath for l in loaders], level=TRACE) - return ChoiceLoader(loaders) - - -class OSConfigTemplate(object): - """ - Associates a config file template with a list of context generators. - Responsible for constructing a template context based on those generators. - """ - - def __init__(self, config_file, contexts, config_template=None): - self.config_file = config_file - - if hasattr(contexts, '__call__'): - self.contexts = [contexts] - else: - self.contexts = contexts - - self._complete_contexts = [] - - self.config_template = config_template - - def context(self): - ctxt = {} - for context in self.contexts: - _ctxt = context() - if _ctxt: - ctxt.update(_ctxt) - # track interfaces for every complete context. - [self._complete_contexts.append(interface) - for interface in context.interfaces - if interface not in self._complete_contexts] - return ctxt - - def complete_contexts(self): - ''' - Return a list of interfaces that have satisfied contexts. - ''' - if self._complete_contexts: - return self._complete_contexts - self.context() - return self._complete_contexts - - @property - def is_string_template(self): - """:returns: Boolean if this instance is a template initialised with a string""" - return self.config_template is not None - - -class OSConfigRenderer(object): - """ - This class provides a common templating system to be used by OpenStack - charms. It is intended to help charms share common code and templates, - and ease the burden of managing config templates across multiple OpenStack - releases. - - Basic usage:: - - # import some common context generates from charmhelpers - from charmhelpers.contrib.openstack import context - - # Create a renderer object for a specific OS release. - configs = OSConfigRenderer(templates_dir='/tmp/templates', - openstack_release='folsom') - # register some config files with context generators. - configs.register(config_file='/etc/nova/nova.conf', - contexts=[context.SharedDBContext(), - context.AMQPContext()]) - configs.register(config_file='/etc/nova/api-paste.ini', - contexts=[context.IdentityServiceContext()]) - configs.register(config_file='/etc/haproxy/haproxy.conf', - contexts=[context.HAProxyContext()]) - configs.register(config_file='/etc/keystone/policy.d/extra.cfg', - contexts=[context.ExtraPolicyContext() - context.KeystoneContext()], - config_template=hookenv.config('extra-policy')) - # write out a single config - configs.write('/etc/nova/nova.conf') - # write out all registered configs - configs.write_all() - - **OpenStack Releases and template loading** - - When the object is instantiated, it is associated with a specific OS - release. This dictates how the template loader will be constructed. - - The constructed loader attempts to load the template from several places - in the following order: - - from the most recent OS release-specific template dir (if one exists) - - the base templates_dir - - a template directory shipped in the charm with this helper file. - - For the example above, '/tmp/templates' contains the following structure:: - - /tmp/templates/nova.conf - /tmp/templates/api-paste.ini - /tmp/templates/grizzly/api-paste.ini - /tmp/templates/havana/api-paste.ini - - Since it was registered with the grizzly release, it first searches - the grizzly directory for nova.conf, then the templates dir. - - When writing api-paste.ini, it will find the template in the grizzly - directory. - - If the object were created with folsom, it would fall back to the - base templates dir for its api-paste.ini template. - - This system should help manage changes in config files through - openstack releases, allowing charms to fall back to the most recently - updated config template for a given release - - The haproxy.conf, since it is not shipped in the templates dir, will - be loaded from the module directory's template directory, eg - $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows - us to ship common templates (haproxy, apache) with the helpers. - - **Context generators** - - Context generators are used to generate template contexts during hook - execution. Doing so may require inspecting service relations, charm - config, etc. When registered, a config file is associated with a list - of generators. When a template is rendered and written, all context - generates are called in a chain to generate the context dictionary - passed to the jinja2 template. See context.py for more info. - """ - def __init__(self, templates_dir, openstack_release): - if not os.path.isdir(templates_dir): - log('Could not locate templates dir %s' % templates_dir, - level=ERROR) - raise OSConfigException - - self.templates_dir = templates_dir - self.openstack_release = openstack_release - self.templates = {} - self._tmpl_env = None - - if None in [Environment, ChoiceLoader, FileSystemLoader]: - # if this code is running, the object is created pre-install hook. - # jinja2 shouldn't get touched until the module is reloaded on next - # hook execution, with proper jinja2 bits successfully imported. - apt_install('python3-jinja2') - - def register(self, config_file, contexts, config_template=None): - """ - Register a config file with a list of context generators to be called - during rendering. - config_template can be used to load a template from a string instead of - using template loaders and template files. - :param config_file (str): a path where a config file will be rendered - :param contexts (list): a list of context dictionaries with kv pairs - :param config_template (str): an optional template string to use - """ - self.templates[config_file] = OSConfigTemplate( - config_file=config_file, - contexts=contexts, - config_template=config_template - ) - log('Registered config file: {}'.format(config_file), - level=INFO) - - def _get_tmpl_env(self): - if not self._tmpl_env: - loader = get_loader(self.templates_dir, self.openstack_release) - self._tmpl_env = Environment(loader=loader) - - def _get_template(self, template): - self._get_tmpl_env() - template = self._tmpl_env.get_template(template) - log('Loaded template from {}'.format(template.filename), - level=INFO) - return template - - def _get_template_from_string(self, ostmpl): - ''' - Get a jinja2 template object from a string. - :param ostmpl: OSConfigTemplate to use as a data source. - ''' - self._get_tmpl_env() - template = self._tmpl_env.from_string(ostmpl.config_template) - log('Loaded a template from a string for {}'.format( - ostmpl.config_file), - level=INFO) - return template - - def render(self, config_file): - if config_file not in self.templates: - log('Config not registered: {}'.format(config_file), level=ERROR) - raise OSConfigException - - ostmpl = self.templates[config_file] - ctxt = ostmpl.context() - - if ostmpl.is_string_template: - template = self._get_template_from_string(ostmpl) - log('Rendering from a string template: ' - '{}'.format(config_file), - level=INFO) - else: - _tmpl = os.path.basename(config_file) - try: - template = self._get_template(_tmpl) - except exceptions.TemplateNotFound: - # if no template is found with basename, try looking - # for it using a munged full path, eg: - # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf - _tmpl = '_'.join(config_file.split('/')[1:]) - try: - template = self._get_template(_tmpl) - except exceptions.TemplateNotFound as e: - log('Could not load template from {} by {} or {}.' - ''.format( - self.templates_dir, - os.path.basename(config_file), - _tmpl - ), - level=ERROR) - raise e - - log('Rendering from template: {}'.format(config_file), - level=INFO) - return template.render(ctxt) - - def write(self, config_file): - """ - Write a single config file, raises if config file is not registered. - """ - if config_file not in self.templates: - log('Config not registered: %s' % config_file, level=ERROR) - raise OSConfigException - - _out = self.render(config_file).encode('UTF-8') - - with open(config_file, 'wb') as out: - out.write(_out) - - log('Wrote template %s.' % config_file, level=INFO) - - def write_all(self): - """ - Write out all registered config files. - """ - for k in self.templates.keys(): - self.write(k) - - def set_release(self, openstack_release): - """ - Resets the template environment and generates a new template loader - based on a the new openstack release. - """ - self._tmpl_env = None - self.openstack_release = openstack_release - self._get_tmpl_env() - - def complete_contexts(self): - ''' - Returns a list of context interfaces that yield a complete context. - ''' - interfaces = [] - for i in self.templates.values(): - interfaces.extend(i.complete_contexts()) - return interfaces - - def get_incomplete_context_data(self, interfaces): - ''' - Return dictionary of relation status of interfaces and any missing - required context data. Example: - {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, - 'zeromq-configuration': {'related': False}} - ''' - incomplete_context_data = {} - - for i in self.templates.values(): - for context in i.contexts: - for interface in interfaces: - related = False - if interface in context.interfaces: - related = context.get_related() - missing_data = context.missing_data - if missing_data: - incomplete_context_data[interface] = {'missing_data': missing_data} - if related: - if incomplete_context_data.get(interface): - incomplete_context_data[interface].update({'related': True}) - else: - incomplete_context_data[interface] = {'related': True} - else: - incomplete_context_data[interface] = {'related': False} - return incomplete_context_data diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py deleted file mode 100644 index c8747c16..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/utils.py +++ /dev/null @@ -1,2694 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Common python helper functions used for OpenStack charms. -from collections import OrderedDict, namedtuple -from functools import partial, wraps - -import subprocess -import json -import operator -import os -import sys -import re -import itertools -import functools - -import traceback -import uuid -import yaml - -from charmhelpers import deprecate - -from charmhelpers.contrib.network import ip - -from charmhelpers.core import decorators, unitdata - -import charmhelpers.contrib.openstack.deferred_events as deferred_events - -from charmhelpers.core.hookenv import ( - WORKLOAD_STATES, - action_fail, - action_get, - action_set, - config, - expected_peer_units, - expected_related_units, - log as juju_log, - charm_dir, - INFO, - ERROR, - metadata, - related_units, - relation_get, - relation_id, - relation_ids, - relation_set, - service_name as ch_service_name, - status_set, - hook_name, - application_version_set, - cached, - leader_set, - leader_get, - local_unit, -) - -from charmhelpers.core.strutils import ( - BasicStringComparator, - bool_from_string, -) - -from charmhelpers.contrib.storage.linux.lvm import ( - deactivate_lvm_volume_group, - is_lvm_physical_volume, - remove_lvm_physical_volume, -) - -from charmhelpers.contrib.network.ip import ( - get_ipv6_addr, - is_ipv6, - port_has_listener, -) - -from charmhelpers.core.host import ( - lsb_release, - mounts, - umount, - service_running, - service_pause, - service_resume, - service_stop, - service_start, - restart_on_change_helper, -) - -from charmhelpers.fetch import ( - apt_cache, - apt_install, - import_key as fetch_import_key, - add_source as fetch_add_source, - SourceConfigError, - GPGKeyError, - get_upstream_version, - filter_installed_packages, - filter_missing_packages, - ubuntu_apt_pkg as apt, - OPENSTACK_RELEASES, - UBUNTU_OPENSTACK_RELEASE, -) - -from charmhelpers.fetch.snap import ( - snap_install, - snap_refresh, - valid_snap_channel, -) - -from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk -from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device -from charmhelpers.contrib.openstack.exceptions import OSContextError, ServiceActionError -from charmhelpers.contrib.openstack.policyd import ( - policyd_status_message_prefix, - POLICYD_CONFIG_NAME, -) - -from charmhelpers.contrib.openstack.ha.utils import ( - expect_ha, -) - -CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" -CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' - -DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' - 'restricted main multiverse universe') - -OPENSTACK_CODENAMES = OrderedDict([ - # NOTE(lourot): 'yyyy.i' isn't actually mapping with any real version - # number. This just means the i-th version of the year yyyy. - ('2011.2', 'diablo'), - ('2012.1', 'essex'), - ('2012.2', 'folsom'), - ('2013.1', 'grizzly'), - ('2013.2', 'havana'), - ('2014.1', 'icehouse'), - ('2014.2', 'juno'), - ('2015.1', 'kilo'), - ('2015.2', 'liberty'), - ('2016.1', 'mitaka'), - ('2016.2', 'newton'), - ('2017.1', 'ocata'), - ('2017.2', 'pike'), - ('2018.1', 'queens'), - ('2018.2', 'rocky'), - ('2019.1', 'stein'), - ('2019.2', 'train'), - ('2020.1', 'ussuri'), - ('2020.2', 'victoria'), - ('2021.1', 'wallaby'), - ('2021.2', 'xena'), - ('2022.1', 'yoga'), -]) - -# The ugly duckling - must list releases oldest to newest -SWIFT_CODENAMES = OrderedDict([ - ('diablo', - ['1.4.3']), - ('essex', - ['1.4.8']), - ('folsom', - ['1.7.4']), - ('grizzly', - ['1.7.6', '1.7.7', '1.8.0']), - ('havana', - ['1.9.0', '1.9.1', '1.10.0']), - ('icehouse', - ['1.11.0', '1.12.0', '1.13.0', '1.13.1']), - ('juno', - ['2.0.0', '2.1.0', '2.2.0']), - ('kilo', - ['2.2.1', '2.2.2']), - ('liberty', - ['2.3.0', '2.4.0', '2.5.0']), - ('mitaka', - ['2.5.0', '2.6.0', '2.7.0']), - ('newton', - ['2.8.0', '2.9.0', '2.10.0']), - ('ocata', - ['2.11.0', '2.12.0', '2.13.0']), - ('pike', - ['2.13.0', '2.15.0']), - ('queens', - ['2.16.0', '2.17.0']), - ('rocky', - ['2.18.0', '2.19.0']), - ('stein', - ['2.20.0', '2.21.0']), - ('train', - ['2.22.0', '2.23.0']), - ('ussuri', - ['2.24.0', '2.25.0']), - ('victoria', - ['2.25.0', '2.26.0']), -]) - -# >= Liberty version->codename mapping -PACKAGE_CODENAMES = { - 'nova-common': OrderedDict([ - ('12', 'liberty'), - ('13', 'mitaka'), - ('14', 'newton'), - ('15', 'ocata'), - ('16', 'pike'), - ('17', 'queens'), - ('18', 'rocky'), - ('19', 'stein'), - ('20', 'train'), - ('21', 'ussuri'), - ('22', 'victoria'), - ]), - 'neutron-common': OrderedDict([ - ('7', 'liberty'), - ('8', 'mitaka'), - ('9', 'newton'), - ('10', 'ocata'), - ('11', 'pike'), - ('12', 'queens'), - ('13', 'rocky'), - ('14', 'stein'), - ('15', 'train'), - ('16', 'ussuri'), - ('17', 'victoria'), - ]), - 'cinder-common': OrderedDict([ - ('7', 'liberty'), - ('8', 'mitaka'), - ('9', 'newton'), - ('10', 'ocata'), - ('11', 'pike'), - ('12', 'queens'), - ('13', 'rocky'), - ('14', 'stein'), - ('15', 'train'), - ('16', 'ussuri'), - ('17', 'victoria'), - ]), - 'keystone': OrderedDict([ - ('8', 'liberty'), - ('9', 'mitaka'), - ('10', 'newton'), - ('11', 'ocata'), - ('12', 'pike'), - ('13', 'queens'), - ('14', 'rocky'), - ('15', 'stein'), - ('16', 'train'), - ('17', 'ussuri'), - ('18', 'victoria'), - ]), - 'horizon-common': OrderedDict([ - ('8', 'liberty'), - ('9', 'mitaka'), - ('10', 'newton'), - ('11', 'ocata'), - ('12', 'pike'), - ('13', 'queens'), - ('14', 'rocky'), - ('15', 'stein'), - ('16', 'train'), - ('18', 'ussuri'), # Note this was actually 17.0 - 18.3 - ('19', 'victoria'), # Note this is really 18.6 - ]), - 'ceilometer-common': OrderedDict([ - ('5', 'liberty'), - ('6', 'mitaka'), - ('7', 'newton'), - ('8', 'ocata'), - ('9', 'pike'), - ('10', 'queens'), - ('11', 'rocky'), - ('12', 'stein'), - ('13', 'train'), - ('14', 'ussuri'), - ('15', 'victoria'), - ]), - 'heat-common': OrderedDict([ - ('5', 'liberty'), - ('6', 'mitaka'), - ('7', 'newton'), - ('8', 'ocata'), - ('9', 'pike'), - ('10', 'queens'), - ('11', 'rocky'), - ('12', 'stein'), - ('13', 'train'), - ('14', 'ussuri'), - ('15', 'victoria'), - ]), - 'glance-common': OrderedDict([ - ('11', 'liberty'), - ('12', 'mitaka'), - ('13', 'newton'), - ('14', 'ocata'), - ('15', 'pike'), - ('16', 'queens'), - ('17', 'rocky'), - ('18', 'stein'), - ('19', 'train'), - ('20', 'ussuri'), - ('21', 'victoria'), - ]), - 'openstack-dashboard': OrderedDict([ - ('8', 'liberty'), - ('9', 'mitaka'), - ('10', 'newton'), - ('11', 'ocata'), - ('12', 'pike'), - ('13', 'queens'), - ('14', 'rocky'), - ('15', 'stein'), - ('16', 'train'), - ('18', 'ussuri'), - ('19', 'victoria'), - ]), -} - -DEFAULT_LOOPBACK_SIZE = '5G' - -DB_SERIES_UPGRADING_KEY = 'cluster-series-upgrading' - -DB_MAINTENANCE_KEYS = [DB_SERIES_UPGRADING_KEY] - - -class CompareOpenStackReleases(BasicStringComparator): - """Provide comparisons of OpenStack releases. - - Use in the form of - - if CompareOpenStackReleases(release) > 'mitaka': - # do something with mitaka - """ - _list = OPENSTACK_RELEASES - - -def error_out(msg): - juju_log("FATAL ERROR: %s" % msg, level='ERROR') - sys.exit(1) - - -def get_installed_semantic_versioned_packages(): - '''Get a list of installed packages which have OpenStack semantic versioning - - :returns List of installed packages - :rtype: [pkg1, pkg2, ...] - ''' - return filter_missing_packages(PACKAGE_CODENAMES.keys()) - - -def get_os_codename_install_source(src): - '''Derive OpenStack release codename from a given installation source.''' - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - rel = '' - if src is None: - return rel - if src in OPENSTACK_RELEASES: - return src - if src in ['distro', 'distro-proposed', 'proposed']: - try: - rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] - except KeyError: - e = 'Could not derive openstack release for '\ - 'this Ubuntu release: %s' % ubuntu_rel - error_out(e) - return rel - - if src.startswith('cloud:'): - ca_rel = src.split(':')[1] - ca_rel = ca_rel.split('-')[1].split('/')[0] - return ca_rel - - # Best guess match based on deb string provided - if (src.startswith('deb') or - src.startswith('ppa') or - src.startswith('snap')): - for v in OPENSTACK_CODENAMES.values(): - if v in src: - return v - - -def get_os_version_install_source(src): - codename = get_os_codename_install_source(src) - return get_os_version_codename(codename) - - -def get_os_codename_version(vers): - '''Determine OpenStack codename from version number.''' - try: - return OPENSTACK_CODENAMES[vers] - except KeyError: - e = 'Could not determine OpenStack codename for version %s' % vers - error_out(e) - - -def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): - '''Determine OpenStack version number from codename.''' - for k, v in version_map.items(): - if v == codename: - return k - e = 'Could not derive OpenStack version for '\ - 'codename: %s' % codename - error_out(e) - - -def get_os_version_codename_swift(codename): - '''Determine OpenStack version number of swift from codename.''' - # for k, v in six.iteritems(SWIFT_CODENAMES): - for k, v in SWIFT_CODENAMES.items(): - if k == codename: - return v[-1] - e = 'Could not derive swift version for '\ - 'codename: %s' % codename - error_out(e) - - -def get_swift_codename(version): - '''Determine OpenStack codename that corresponds to swift version.''' - codenames = [k for k, v in SWIFT_CODENAMES.items() if version in v] - - if len(codenames) > 1: - # If more than one release codename contains this version we determine - # the actual codename based on the highest available install source. - for codename in reversed(codenames): - releases = UBUNTU_OPENSTACK_RELEASE - release = [k for k, v in releases.items() if codename in v] - ret = (subprocess - .check_output(['apt-cache', 'policy', 'swift']) - .decode('UTF-8')) - if codename in ret or release[0] in ret: - return codename - elif len(codenames) == 1: - return codenames[0] - - # NOTE: fallback - attempt to match with just major.minor version - match = re.match(r'^(\d+)\.(\d+)', version) - if match: - major_minor_version = match.group(0) - for codename, versions in SWIFT_CODENAMES.items(): - for release_version in versions: - if release_version.startswith(major_minor_version): - return codename - - return None - - -def get_os_codename_package(package, fatal=True): - """Derive OpenStack release codename from an installed package. - - Initially, see if the openstack-release pkg is available (by trying to - install it) and use it instead. - - If it isn't then it falls back to the existing method of checking the - version of the package passed and then resolving the version from that - using lookup tables. - - Note: if possible, charms should use get_installed_os_version() to - determine the version of the "openstack-release" pkg. - - :param package: the package to test for version information. - :type package: str - :param fatal: If True (default), then die via error_out() - :type fatal: bool - :returns: the OpenStack release codename (e.g. ussuri) - :rtype: str - """ - - codename = get_installed_os_version() - if codename: - return codename - - if snap_install_requested(): - cmd = ['snap', 'list', package] - try: - out = subprocess.check_output(cmd).decode('UTF-8') - except subprocess.CalledProcessError: - return None - lines = out.split('\n') - for line in lines: - if package in line: - # Second item in list is Version - return line.split()[1] - - cache = apt_cache() - - try: - pkg = cache[package] - except Exception: - if not fatal: - return None - # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation '\ - 'candidate: %s' % package - error_out(e) - - if not pkg.current_ver: - if not fatal: - return None - # package is known, but no version is currently installed. - e = 'Could not determine version of uninstalled package: %s' % package - error_out(e) - - vers = apt.upstream_version(pkg.current_ver.ver_str) - if 'swift' in pkg.name: - # Fully x.y.z match for swift versions - match = re.match(r'^(\d+)\.(\d+)\.(\d+)', vers) - else: - # x.y match only for 20XX.X - # and ignore patch level for other packages - match = re.match(r'^(\d+)\.(\d+)', vers) - - if match: - vers = match.group(0) - - # Generate a major version number for newer semantic - # versions of openstack projects - major_vers = vers.split('.')[0] - # >= Liberty independent project versions - if (package in PACKAGE_CODENAMES and - major_vers in PACKAGE_CODENAMES[package]): - return PACKAGE_CODENAMES[package][major_vers] - else: - # < Liberty co-ordinated project versions - try: - if 'swift' in pkg.name: - return get_swift_codename(vers) - else: - return OPENSTACK_CODENAMES[vers] - except KeyError: - if not fatal: - return None - e = 'Could not determine OpenStack codename for version %s' % vers - error_out(e) - - -def get_os_version_package(pkg, fatal=True): - '''Derive OpenStack version number from an installed package.''' - codename = get_os_codename_package(pkg, fatal=fatal) - - if not codename: - return None - - if 'swift' in pkg: - vers_map = SWIFT_CODENAMES - for cname, version in vers_map.items(): - if cname == codename: - return version[-1] - else: - vers_map = OPENSTACK_CODENAMES - for version, cname in vers_map.items(): - if cname == codename: - return version - - -def get_installed_os_version(): - """Determine the OpenStack release code name from openstack-release pkg. - - This uses the "openstack-release" pkg (if it exists) to return the - OpenStack release codename (e.g. usurri, mitaka, ocata, etc.) - - Note, it caches the result so that it is only done once per hook. - - :returns: the OpenStack release codename, if available - :rtype: Optional[str] - """ - @cached - def _do_install(): - apt_install(filter_installed_packages(['openstack-release']), - fatal=False, quiet=True) - - _do_install() - return openstack_release().get('OPENSTACK_CODENAME') - - -@cached -def openstack_release(): - """Return /etc/os-release in a dict.""" - d = {} - try: - with open('/etc/openstack-release', 'r') as lsb: - for l in lsb: - s = l.split('=') - if len(s) != 2: - continue - d[s[0].strip()] = s[1].strip() - except FileNotFoundError: - pass - return d - - -# Module local cache variable for the os_release. -_os_rel = None - - -def reset_os_release(): - '''Unset the cached os_release version''' - global _os_rel - _os_rel = None - - -def os_release(package, base=None, reset_cache=False, source_key=None): - """Returns OpenStack release codename from a cached global. - - If reset_cache then unset the cached os_release version and return the - freshly determined version. - - If the codename can not be determined from either an installed package or - the installation source, the earliest release supported by the charm should - be returned. - - :param package: Name of package to determine release from - :type package: str - :param base: Fallback codename if endavours to determine from package fail - :type base: Optional[str] - :param reset_cache: Reset any cached codename value - :type reset_cache: bool - :param source_key: Name of source configuration option - (default: 'openstack-origin') - :type source_key: Optional[str] - :returns: OpenStack release codename - :rtype: str - """ - source_key = source_key or 'openstack-origin' - if not base: - base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']] - global _os_rel - if reset_cache: - reset_os_release() - if _os_rel: - return _os_rel - _os_rel = ( - get_os_codename_package(package, fatal=False) or - get_os_codename_install_source(config(source_key)) or - base) - return _os_rel - - -@deprecate("moved to charmhelpers.fetch.import_key()", "2017-07", log=juju_log) -def import_key(keyid): - """Import a key, either ASCII armored, or a GPG key id. - - @param keyid: the key in ASCII armor format, or a GPG key id. - @raises SystemExit() via sys.exit() on failure. - """ - try: - return fetch_import_key(keyid) - except GPGKeyError as e: - error_out("Could not import key: {}".format(str(e))) - - -def get_source_and_pgp_key(source_and_key): - """Look for a pgp key ID or ascii-armor key in the given input. - - :param source_and_key: String, "source_spec|keyid" where '|keyid' is - optional. - :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id - if there was no '|' in the source_and_key string. - """ - try: - source, key = source_and_key.split('|', 2) - return source, key or None - except ValueError: - return source_and_key, None - - -@deprecate("use charmhelpers.fetch.add_source() instead.", - "2017-07", log=juju_log) -def configure_installation_source(source_plus_key): - """Configure an installation source. - - The functionality is provided by charmhelpers.fetch.add_source() - The difference between the two functions is that add_source() signature - requires the key to be passed directly, whereas this function passes an - optional key by appending '|' to the end of the source specification - 'source'. - - Another difference from add_source() is that the function calls sys.exit(1) - if the configuration fails, whereas add_source() raises - SourceConfigurationError(). Another difference, is that add_source() - silently fails (with a juju_log command) if there is no matching source to - configure, whereas this function fails with a sys.exit(1) - - :param source: String_plus_key -- see above for details. - - Note that the behaviour on error is to log the error to the juju log and - then call sys.exit(1). - """ - if source_plus_key.startswith('snap'): - # Do nothing for snap installs - return - # extract the key if there is one, denoted by a '|' in the rel - source, key = get_source_and_pgp_key(source_plus_key) - - # handle the ordinary sources via add_source - try: - fetch_add_source(source, key, fail_invalid=True) - except SourceConfigError as se: - error_out(str(se)) - - -def config_value_changed(option): - """ - Determine if config value changed since last call to this function. - """ - hook_data = unitdata.HookData() - with hook_data(): - db = unitdata.kv() - current = config(option) - saved = db.get(option) - db.set(option, current) - if saved is None: - return False - return current != saved - - -def get_endpoint_key(service_name, relation_id, unit_name): - """Return the key used to refer to an ep changed notification from a unit. - - :param service_name: Service name eg nova, neutron, placement etc - :type service_name: str - :param relation_id: The id of the relation the unit is on. - :type relation_id: str - :param unit_name: The name of the unit publishing the notification. - :type unit_name: str - :returns: The key used to refer to an ep changed notification from a unit - :rtype: str - """ - return '{}-{}-{}'.format( - service_name, - relation_id.replace(':', '_'), - unit_name.replace('/', '_')) - - -def get_endpoint_notifications(service_names, rel_name='identity-service'): - """Return all notifications for the given services. - - :param service_names: List of service name. - :type service_name: List - :param rel_name: Name of the relation to query - :type rel_name: str - :returns: A dict containing the source of the notification and its nonce. - :rtype: Dict[str, str] - """ - notifications = {} - for rid in relation_ids(rel_name): - for unit in related_units(relid=rid): - ep_changed_json = relation_get( - rid=rid, - unit=unit, - attribute='ep_changed') - if ep_changed_json: - ep_changed = json.loads(ep_changed_json) - for service in service_names: - if ep_changed.get(service): - key = get_endpoint_key(service, rid, unit) - notifications[key] = ep_changed[service] - return notifications - - -def endpoint_changed(service_name, rel_name='identity-service'): - """Whether a new notification has been received for an endpoint. - - :param service_name: Service name eg nova, neutron, placement etc - :type service_name: str - :param rel_name: Name of the relation to query - :type rel_name: str - :returns: Whether endpoint has changed - :rtype: bool - """ - changed = False - with unitdata.HookData()() as t: - db = t[0] - notifications = get_endpoint_notifications( - [service_name], - rel_name=rel_name) - for key, nonce in notifications.items(): - if db.get(key) != nonce: - juju_log(('New endpoint change notification found: ' - '{}={}').format(key, nonce), - 'INFO') - changed = True - break - return changed - - -def save_endpoint_changed_triggers(service_names, rel_name='identity-service'): - """Save the endpoint triggers in db so it can be tracked if they changed. - - :param service_names: List of service name. - :type service_name: List - :param rel_name: Name of the relation to query - :type rel_name: str - """ - with unitdata.HookData()() as t: - db = t[0] - notifications = get_endpoint_notifications( - service_names, - rel_name=rel_name) - for key, nonce in notifications.items(): - db.set(key, nonce) - - -def save_script_rc(script_path="scripts/scriptrc", **env_vars): - """ - Write an rc file in the charm-delivered directory containing - exported environment variables provided by env_vars. Any charm scripts run - outside the juju hook environment can source this scriptrc to obtain - updated config information necessary to perform health checks or - service changes. - """ - juju_rc_path = "%s/%s" % (charm_dir(), script_path) - if not os.path.exists(os.path.dirname(juju_rc_path)): - os.mkdir(os.path.dirname(juju_rc_path)) - with open(juju_rc_path, 'wt') as rc_script: - rc_script.write("#!/bin/bash\n") - for u, p in env_vars.items(): - if u != "script_path": - rc_script.write('export %s=%s\n' % (u, p)) - - -def openstack_upgrade_available(package): - """ - Determines if an OpenStack upgrade is available from installation - source, based on version of installed package. - - :param package: str: Name of installed package. - - :returns: bool: : Returns True if configured installation source offers - a newer version of package. - """ - - src = config('openstack-origin') - cur_vers = get_os_version_package(package) - if not cur_vers: - # The package has not been installed yet do not attempt upgrade - return False - if "swift" in package: - codename = get_os_codename_install_source(src) - avail_vers = get_os_version_codename_swift(codename) - else: - try: - avail_vers = get_os_version_install_source(src) - except Exception: - avail_vers = cur_vers - apt.init() - return apt.version_compare(avail_vers, cur_vers) >= 1 - - -def ensure_block_device(block_device): - ''' - Confirm block_device, create as loopback if necessary. - - :param block_device: str: Full path of block device to ensure. - - :returns: str: Full path of ensured block device. - ''' - _none = ['None', 'none', None] - if (block_device in _none): - error_out('prepare_storage(): Missing required input: block_device=%s.' - % block_device) - - if block_device.startswith('/dev/'): - bdev = block_device - elif block_device.startswith('/'): - _bd = block_device.split('|') - if len(_bd) == 2: - bdev, size = _bd - else: - bdev = block_device - size = DEFAULT_LOOPBACK_SIZE - bdev = ensure_loopback_device(bdev, size) - else: - bdev = '/dev/%s' % block_device - - if not is_block_device(bdev): - error_out('Failed to locate valid block device at %s' % bdev) - - return bdev - - -def clean_storage(block_device): - ''' - Ensures a block device is clean. That is: - - unmounted - - any lvm volume groups are deactivated - - any lvm physical device signatures removed - - partition table wiped - - :param block_device: str: Full path to block device to clean. - ''' - for mp, d in mounts(): - if d == block_device: - juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % - (d, mp), level=INFO) - umount(mp, persist=True) - - if is_lvm_physical_volume(block_device): - deactivate_lvm_volume_group(block_device) - remove_lvm_physical_volume(block_device) - else: - zap_disk(block_device) - - -is_ip = ip.is_ip -ns_query = ip.ns_query -get_host_ip = ip.get_host_ip -get_hostname = ip.get_hostname - - -def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): - mm_map = {} - if os.path.isfile(mm_file): - with open(mm_file, 'r') as f: - mm_map = json.load(f) - return mm_map - - -def sync_db_with_multi_ipv6_addresses(database, database_user, - relation_prefix=None): - hosts = get_ipv6_addr(dynamic_only=False) - - if config('vip'): - vips = config('vip').split() - for vip in vips: - if vip and is_ipv6(vip): - hosts.append(vip) - - kwargs = {'database': database, - 'username': database_user, - 'hostname': json.dumps(hosts)} - - if relation_prefix: - for key in list(kwargs.keys()): - kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] - del kwargs[key] - - for rid in relation_ids('shared-db'): - relation_set(relation_id=rid, **kwargs) - - -def os_requires_version(ostack_release, pkg): - """ - Decorator for hook to specify minimum supported release - """ - def wrap(f): - @wraps(f) - def wrapped_f(*args): - if os_release(pkg) < ostack_release: - raise Exception("This hook is not supported on releases" - " before %s" % ostack_release) - f(*args) - return wrapped_f - return wrap - - -def os_workload_status(configs, required_interfaces, charm_func=None): - """ - Decorator to set workload status based on complete contexts - """ - def wrap(f): - @wraps(f) - def wrapped_f(*args, **kwargs): - # Run the original function first - f(*args, **kwargs) - # Set workload status now that contexts have been - # acted on - set_os_workload_status(configs, required_interfaces, charm_func) - return wrapped_f - return wrap - - -def set_os_workload_status(configs, required_interfaces, charm_func=None, - services=None, ports=None): - """Set the state of the workload status for the charm. - - This calls _determine_os_workload_status() to get the new state, message - and sets the status using status_set() - - @param configs: a templating.OSConfigRenderer() object - @param required_interfaces: {generic: [specific, specific2, ...]} - @param charm_func: a callable function that returns state, message. The - signature is charm_func(configs) -> (state, message) - @param services: list of strings OR dictionary specifying services/ports - @param ports: OPTIONAL list of port numbers. - @returns state, message: the new workload status, user message - """ - state, message = _determine_os_workload_status( - configs, required_interfaces, charm_func, services, ports) - status_set(state, message) - - -def _determine_os_workload_status( - configs, required_interfaces, charm_func=None, - services=None, ports=None): - """Determine the state of the workload status for the charm. - - This function returns the new workload status for the charm based - on the state of the interfaces, the paused state and whether the - services are actually running and any specified ports are open. - - This checks: - - 1. if the unit should be paused, that it is actually paused. If so the - state is 'maintenance' + message, else 'broken'. - 2. that the interfaces/relations are complete. If they are not then - it sets the state to either 'broken' or 'waiting' and an appropriate - message. - 3. If all the relation data is set, then it checks that the actual - services really are running. If not it sets the state to 'broken'. - - If everything is okay then the state returns 'active'. - - @param configs: a templating.OSConfigRenderer() object - @param required_interfaces: {generic: [specific, specific2, ...]} - @param charm_func: a callable function that returns state, message. The - signature is charm_func(configs) -> (state, message) - @param services: list of strings OR dictionary specifying services/ports - @param ports: OPTIONAL list of port numbers. - @returns state, message: the new workload status, user message - """ - state, message = _ows_check_if_paused(services, ports) - - if state is None: - state, message = _ows_check_generic_interfaces( - configs, required_interfaces) - - if state != 'maintenance' and charm_func: - # _ows_check_charm_func() may modify the state, message - state, message = _ows_check_charm_func( - state, message, lambda: charm_func(configs)) - - if state is None: - state, message = ows_check_services_running(services, ports) - - if state is None: - state = 'active' - message = "Unit is ready" - juju_log(message, 'INFO') - - try: - if config(POLICYD_CONFIG_NAME): - message = "{} {}".format(policyd_status_message_prefix(), message) - # Get deferred restarts events that have been triggered by a policy - # written by this charm. - deferred_restarts = list(set( - [e.service - for e in deferred_events.get_deferred_restarts() - if e.policy_requestor_name == ch_service_name()])) - if deferred_restarts: - svc_msg = "Services queued for restart: {}".format( - ', '.join(sorted(deferred_restarts))) - message = "{}. {}".format(message, svc_msg) - deferred_hooks = deferred_events.get_deferred_hooks() - if deferred_hooks: - svc_msg = "Hooks skipped due to disabled auto restarts: {}".format( - ', '.join(sorted(deferred_hooks))) - message = "{}. {}".format(message, svc_msg) - - except Exception: - pass - - return state, message - - -def _ows_check_if_paused(services=None, ports=None): - """Check if the unit is supposed to be paused, and if so check that the - services/ports (if passed) are actually stopped/not being listened to. - - If the unit isn't supposed to be paused, just return None, None - - If the unit is performing a series upgrade, return a message indicating - this. - - @param services: OPTIONAL services spec or list of service names. - @param ports: OPTIONAL list of port numbers. - @returns state, message or None, None - """ - if is_unit_upgrading_set(): - state, message = check_actually_paused(services=services, - ports=ports) - if state is None: - # we're paused okay, so set maintenance and return - state = "blocked" - message = ("Ready for do-release-upgrade and reboot. " - "Set complete when finished.") - return state, message - - if is_unit_paused_set(): - state, message = check_actually_paused(services=services, - ports=ports) - if state is None: - # we're paused okay, so set maintenance and return - state = "maintenance" - message = "Paused. Use 'resume' action to resume normal service." - return state, message - return None, None - - -def _ows_check_generic_interfaces(configs, required_interfaces): - """Check the complete contexts to determine the workload status. - - - Checks for missing or incomplete contexts - - juju log details of missing required data. - - determines the correct workload status - - creates an appropriate message for status_set(...) - - if there are no problems then the function returns None, None - - @param configs: a templating.OSConfigRenderer() object - @params required_interfaces: {generic_interface: [specific_interface], } - @returns state, message or None, None - """ - incomplete_rel_data = incomplete_relation_data(configs, - required_interfaces) - state = None - message = None - missing_relations = set() - incomplete_relations = set() - - for generic_interface, relations_states in incomplete_rel_data.items(): - related_interface = None - missing_data = {} - # Related or not? - for interface, relation_state in relations_states.items(): - if relation_state.get('related'): - related_interface = interface - missing_data = relation_state.get('missing_data') - break - # No relation ID for the generic_interface? - if not related_interface: - juju_log("{} relation is missing and must be related for " - "functionality. ".format(generic_interface), 'WARN') - state = 'blocked' - missing_relations.add(generic_interface) - else: - # Relation ID eists but no related unit - if not missing_data: - # Edge case - relation ID exists but departings - _hook_name = hook_name() - if (('departed' in _hook_name or 'broken' in _hook_name) and - related_interface in _hook_name): - state = 'blocked' - missing_relations.add(generic_interface) - juju_log("{} relation's interface, {}, " - "relationship is departed or broken " - "and is required for functionality." - "".format(generic_interface, related_interface), - "WARN") - # Normal case relation ID exists but no related unit - # (joining) - else: - juju_log("{} relations's interface, {}, is related but has" - " no units in the relation." - "".format(generic_interface, related_interface), - "INFO") - # Related unit exists and data missing on the relation - else: - juju_log("{} relation's interface, {}, is related awaiting " - "the following data from the relationship: {}. " - "".format(generic_interface, related_interface, - ", ".join(missing_data)), "INFO") - if state != 'blocked': - state = 'waiting' - if generic_interface not in missing_relations: - incomplete_relations.add(generic_interface) - - if missing_relations: - message = "Missing relations: {}".format(", ".join(missing_relations)) - if incomplete_relations: - message += "; incomplete relations: {}" \ - "".format(", ".join(incomplete_relations)) - state = 'blocked' - elif incomplete_relations: - message = "Incomplete relations: {}" \ - "".format(", ".join(incomplete_relations)) - state = 'waiting' - - return state, message - - -def _ows_check_charm_func(state, message, charm_func_with_configs): - """Run a custom check function for the charm to see if it wants to - change the state. This is only run if not in 'maintenance' and - tests to see if the new state is more important that the previous - one determined by the interfaces/relations check. - - @param state: the previously determined state so far. - @param message: the user orientated message so far. - @param charm_func: a callable function that returns state, message - @returns state, message strings. - """ - if charm_func_with_configs: - charm_state, charm_message = charm_func_with_configs() - if (charm_state != 'active' and - charm_state != 'unknown' and - charm_state is not None): - state = workload_state_compare(state, charm_state) - if message: - charm_message = charm_message.replace("Incomplete relations: ", - "") - message = "{}, {}".format(message, charm_message) - else: - message = charm_message - return state, message - - -@deprecate("use ows_check_services_running() instead", "2022-05", log=juju_log) -def _ows_check_services_running(services, ports): - return ows_check_services_running(services, ports) - - -def ows_check_services_running(services, ports): - """Check that the services that should be running are actually running - and that any ports specified are being listened to. - - @param services: list of strings OR dictionary specifying services/ports - @param ports: list of ports - @returns state, message: strings or None, None - """ - messages = [] - state = None - if services is not None: - services = _extract_services_list_helper(services) - services_running, running = _check_running_services(services) - if not all(running): - messages.append( - "Services not running that should be: {}" - .format(", ".join(_filter_tuples(services_running, False)))) - state = 'blocked' - # also verify that the ports that should be open are open - # NB, that ServiceManager objects only OPTIONALLY have ports - map_not_open, ports_open = ( - _check_listening_on_services_ports(services)) - if not all(ports_open): - # find which service has missing ports. They are in service - # order which makes it a bit easier. - message_parts = {service: ", ".join([str(v) for v in open_ports]) - for service, open_ports in map_not_open.items()} - message = ", ".join( - ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) - messages.append( - "Services with ports not open that should be: {}" - .format(message)) - state = 'blocked' - - if ports is not None: - # and we can also check ports which we don't know the service for - ports_open, ports_open_bools = _check_listening_on_ports_list(ports) - if not all(ports_open_bools): - messages.append( - "Ports which should be open, but are not: {}" - .format(", ".join([str(p) for p, v in ports_open - if not v]))) - state = 'blocked' - - if state is not None: - message = "; ".join(messages) - return state, message - - return None, None - - -def _extract_services_list_helper(services): - """Extract a OrderedDict of {service: [ports]} of the supplied services - for use by the other functions. - - The services object can either be: - - None : no services were passed (an empty dict is returned) - - a list of strings - - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} - - An array of [{'service': service_name, ...}, ...] - - @param services: see above - @returns OrderedDict(service: [ports], ...) - """ - if services is None: - return {} - if isinstance(services, dict): - services = services.values() - # either extract the list of services from the dictionary, or if - # it is a simple string, use that. i.e. works with mixed lists. - _s = OrderedDict() - for s in services: - if isinstance(s, dict) and 'service' in s: - _s[s['service']] = s.get('ports', []) - if isinstance(s, str): - _s[s] = [] - return _s - - -def _check_running_services(services): - """Check that the services dict provided is actually running and provide - a list of (service, boolean) tuples for each service. - - Returns both a zipped list of (service, boolean) and a list of booleans - in the same order as the services. - - @param services: OrderedDict of strings: [ports], one for each service to - check. - @returns [(service, boolean), ...], : results for checks - [boolean] : just the result of the service checks - """ - services_running = [service_running(s) for s in services] - return list(zip(services, services_running)), services_running - - -def _check_listening_on_services_ports(services, test=False): - """Check that the unit is actually listening (has the port open) on the - ports that the service specifies are open. If test is True then the - function returns the services with ports that are open rather than - closed. - - Returns an OrderedDict of service: ports and a list of booleans - - @param services: OrderedDict(service: [port, ...], ...) - @param test: default=False, if False, test for closed, otherwise open. - @returns OrderedDict(service: [port-not-open, ...]...), [boolean] - """ - test = not(not(test)) # ensure test is True or False - all_ports = list(itertools.chain(*services.values())) - ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] - map_ports = OrderedDict() - matched_ports = [p for p, opened in zip(all_ports, ports_states) - if opened == test] # essentially opened xor test - for service, ports in services.items(): - set_ports = set(ports).intersection(matched_ports) - if set_ports: - map_ports[service] = set_ports - return map_ports, ports_states - - -def _check_listening_on_ports_list(ports): - """Check that the ports list given are being listened to - - Returns a list of ports being listened to and a list of the - booleans. - - @param ports: LIST of port numbers. - @returns [(port_num, boolean), ...], [boolean] - """ - ports_open = [port_has_listener('0.0.0.0', p) for p in ports] - return zip(ports, ports_open), ports_open - - -def _filter_tuples(services_states, state): - """Return a simple list from a list of tuples according to the condition - - @param services_states: LIST of (string, boolean): service and running - state. - @param state: Boolean to match the tuple against. - @returns [LIST of strings] that matched the tuple RHS. - """ - return [s for s, b in services_states if b == state] - - -def workload_state_compare(current_workload_state, workload_state): - """ Return highest priority of two states""" - hierarchy = {'unknown': -1, - 'active': 0, - 'maintenance': 1, - 'waiting': 2, - 'blocked': 3, - } - - if hierarchy.get(workload_state) is None: - workload_state = 'unknown' - if hierarchy.get(current_workload_state) is None: - current_workload_state = 'unknown' - - # Set workload_state based on hierarchy of statuses - if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): - return current_workload_state - else: - return workload_state - - -def incomplete_relation_data(configs, required_interfaces): - """Check complete contexts against required_interfaces - Return dictionary of incomplete relation data. - - configs is an OSConfigRenderer object with configs registered - - required_interfaces is a dictionary of required general interfaces - with dictionary values of possible specific interfaces. - Example: - required_interfaces = {'database': ['shared-db', 'pgsql-db']} - - The interface is said to be satisfied if anyone of the interfaces in the - list has a complete context. - - Return dictionary of incomplete or missing required contexts with relation - status of interfaces and any missing data points. Example: - {'message': - {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, - 'zeromq-configuration': {'related': False}}, - 'identity': - {'identity-service': {'related': False}}, - 'database': - {'pgsql-db': {'related': False}, - 'shared-db': {'related': True}}} - """ - complete_ctxts = configs.complete_contexts() - incomplete_relations = [ - svc_type - for svc_type, interfaces in required_interfaces.items() - if not set(interfaces).intersection(complete_ctxts)] - return { - i: configs.get_incomplete_context_data(required_interfaces[i]) - for i in incomplete_relations} - - -def do_action_openstack_upgrade(package, upgrade_callback, configs): - """Perform action-managed OpenStack upgrade. - - Upgrades packages to the configured openstack-origin version and sets - the corresponding action status as a result. - - For backwards compatibility a config flag (action-managed-upgrade) must - be set for this code to run, otherwise a full service level upgrade will - fire on config-changed. - - @param package: package name for determining if openstack upgrade available - @param upgrade_callback: function callback to charm's upgrade function - @param configs: templating object derived from OSConfigRenderer class - - @return: True if upgrade successful; False if upgrade failed or skipped - """ - ret = False - - if openstack_upgrade_available(package): - if config('action-managed-upgrade'): - juju_log('Upgrading OpenStack release') - - try: - upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed'}) - ret = True - except Exception: - action_set({'outcome': 'upgrade failed, see traceback'}) - action_set({'traceback': traceback.format_exc()}) - action_fail('upgrade callback resulted in an ' - 'unexpected error') - else: - action_set({'outcome': 'action-managed-upgrade config is ' - 'False, skipped upgrade'}) - else: - action_set({'outcome': 'no upgrade available'}) - - return ret - - -def do_action_package_upgrade(package, upgrade_callback, configs): - """Perform package upgrade within the current OpenStack release. - - Upgrades packages only if there is not an openstack upgrade available, - and sets the corresponding action status as a result. - - @param package: package name for determining if openstack upgrade available - @param upgrade_callback: function callback to charm's upgrade function - @param configs: templating object derived from OSConfigRenderer class - - @return: True if upgrade successful; False if upgrade failed or skipped - """ - ret = False - - if not openstack_upgrade_available(package): - juju_log('Upgrading packages') - - try: - upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed'}) - ret = True - except Exception: - action_set({'outcome': 'upgrade failed, see traceback'}) - action_set({'traceback': traceback.format_exc()}) - action_fail('upgrade callback resulted in an ' - 'unexpected error') - else: - action_set({'outcome': 'upgrade skipped because an openstack upgrade ' - 'is available'}) - - return ret - - -def remote_restart(rel_name, remote_service=None): - trigger = { - 'restart-trigger': str(uuid.uuid4()), - } - if remote_service: - trigger['remote-service'] = remote_service - for rid in relation_ids(rel_name): - # This subordinate can be related to two separate services using - # different subordinate relations so only issue the restart if - # the principle is connected down the relation we think it is - if related_units(relid=rid): - relation_set(relation_id=rid, - relation_settings=trigger, - ) - - -def check_actually_paused(services=None, ports=None): - """Check that services listed in the services object and ports - are actually closed (not listened to), to verify that the unit is - properly paused. - - @param services: See _extract_services_list_helper - @returns status, : string for status (None if okay) - message : string for problem for status_set - """ - state = None - message = None - messages = [] - if services is not None: - services = _extract_services_list_helper(services) - services_running, services_states = _check_running_services(services) - if any(services_states): - # there shouldn't be any running so this is a problem - messages.append("these services running: {}" - .format(", ".join( - _filter_tuples(services_running, True)))) - state = "blocked" - ports_open, ports_open_bools = ( - _check_listening_on_services_ports(services, True)) - if any(ports_open_bools): - message_parts = {service: ", ".join([str(v) for v in open_ports]) - for service, open_ports in ports_open.items()} - message = ", ".join( - ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) - messages.append( - "these service:ports are open: {}".format(message)) - state = 'blocked' - if ports is not None: - ports_open, bools = _check_listening_on_ports_list(ports) - if any(bools): - messages.append( - "these ports which should be closed, but are open: {}" - .format(", ".join([str(p) for p, v in ports_open if v]))) - state = 'blocked' - if messages: - message = ("Services should be paused but {}" - .format(", ".join(messages))) - return state, message - - -def set_unit_paused(): - """Set the unit to a paused state in the local kv() store. - This does NOT actually pause the unit - """ - with unitdata.HookData()() as t: - kv = t[0] - kv.set('unit-paused', True) - - -def clear_unit_paused(): - """Clear the unit from a paused state in the local kv() store - This does NOT actually restart any services - it only clears the - local state. - """ - with unitdata.HookData()() as t: - kv = t[0] - kv.set('unit-paused', False) - - -def is_unit_paused_set(): - """Return the state of the kv().get('unit-paused'). - This does NOT verify that the unit really is paused. - - To help with units that don't have HookData() (testing) - if it excepts, return False - """ - try: - with unitdata.HookData()() as t: - kv = t[0] - # transform something truth-y into a Boolean. - return not(not(kv.get('unit-paused'))) - except Exception: - return False - - -def is_hook_allowed(hookname, check_deferred_restarts=True): - """Check if hook can run. - - :param hookname: Name of hook to check.. - :type hookname: str - :param check_deferred_restarts: Whether to check deferred restarts. - :type check_deferred_restarts: bool - """ - permitted = True - reasons = [] - if is_unit_paused_set(): - reasons.append( - "Unit is pause or upgrading. Skipping {}".format(hookname)) - permitted = False - - if check_deferred_restarts: - if deferred_events.is_restart_permitted(): - permitted = True - deferred_events.clear_deferred_hook(hookname) - else: - if not config().changed('enable-auto-restarts'): - deferred_events.set_deferred_hook(hookname) - reasons.append("auto restarts are disabled") - permitted = False - return permitted, " and ".join(reasons) - - -def manage_payload_services(action, services=None, charm_func=None): - """Run an action against all services. - - An optional charm_func() can be called. It should raise an Exception to - indicate that the function failed. If it was successful it should return - None or an optional message. - - The signature for charm_func is: - charm_func() -> message: str - - charm_func() is executed after any services are stopped, if supplied. - - The services object can either be: - - None : no services were passed (an empty dict is returned) - - a list of strings - - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} - - An array of [{'service': service_name, ...}, ...] - - :param action: Action to run: pause, resume, start or stop. - :type action: str - :param services: See above - :type services: See above - :param charm_func: function to run for custom charm pausing. - :type charm_func: f() - :returns: Status boolean and list of messages - :rtype: (bool, []) - :raises: RuntimeError - """ - actions = { - 'pause': service_pause, - 'resume': service_resume, - 'start': service_start, - 'stop': service_stop} - action = action.lower() - if action not in actions.keys(): - raise RuntimeError( - "action: {} must be one of: {}".format(action, - ', '.join(actions.keys()))) - services = _extract_services_list_helper(services) - messages = [] - success = True - if services: - for service in services.keys(): - rc = actions[action](service) - if not rc: - success = False - messages.append("{} didn't {} cleanly.".format(service, - action)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - success = False - messages.append(str(e)) - return success, messages - - -def make_wait_for_ports_barrier(ports, retry_count=5): - """Make a function to wait for port shutdowns. - - Create a function which closes over the provided ports. The function will - retry probing ports until they are closed or the retry count has been reached. - - """ - @decorators.retry_on_predicate(retry_count, operator.not_, base_delay=0.1) - def retry_port_check(): - _, ports_states = _check_listening_on_ports_list(ports) - juju_log("Probe ports {}, result: {}".format(ports, ports_states), level="DEBUG") - return any(ports_states) - return retry_port_check - - -def pause_unit(assess_status_func, services=None, ports=None, - charm_func=None): - """Pause a unit by stopping the services and setting 'unit-paused' - in the local kv() store. - - Also checks that the services have stopped and ports are no longer - being listened to. - - An optional charm_func() can be called that can either raise an - Exception or return non None, None to indicate that the unit - didn't pause cleanly. - - The signature for charm_func is: - charm_func() -> message: string - - charm_func() is executed after any services are stopped, if supplied. - - The services object can either be: - - None : no services were passed (an empty dict is returned) - - a list of strings - - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} - - An array of [{'service': service_name, ...}, ...] - - @param assess_status_func: (f() -> message: string | None) or None - @param services: OPTIONAL see above - @param ports: OPTIONAL list of port - @param charm_func: function to run for custom charm pausing. - @returns None - @raises Exception(message) on an error for action_fail(). - """ - _, messages = manage_payload_services( - 'pause', - services=services, - charm_func=charm_func) - set_unit_paused() - - if assess_status_func: - message = assess_status_func() - if message: - messages.append(message) - if messages and not is_unit_upgrading_set(): - raise Exception("Couldn't pause: {}".format("; ".join(messages))) - - -def resume_unit(assess_status_func, services=None, ports=None, - charm_func=None): - """Resume a unit by starting the services and clearning 'unit-paused' - in the local kv() store. - - Also checks that the services have started and ports are being listened to. - - An optional charm_func() can be called that can either raise an - Exception or return non None to indicate that the unit - didn't resume cleanly. - - The signature for charm_func is: - charm_func() -> message: string - - charm_func() is executed after any services are started, if supplied. - - The services object can either be: - - None : no services were passed (an empty dict is returned) - - a list of strings - - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} - - An array of [{'service': service_name, ...}, ...] - - @param assess_status_func: (f() -> message: string | None) or None - @param services: OPTIONAL see above - @param ports: OPTIONAL list of port - @param charm_func: function to run for custom charm resuming. - @returns None - @raises Exception(message) on an error for action_fail(). - """ - _, messages = manage_payload_services( - 'resume', - services=services, - charm_func=charm_func) - clear_unit_paused() - if assess_status_func: - message = assess_status_func() - if message: - messages.append(message) - if messages: - raise Exception("Couldn't resume: {}".format("; ".join(messages))) - - -def restart_services_action(services=None, when_all_stopped_func=None, - deferred_only=None): - """Manage a service restart request via charm action. - - :param services: Services to be restarted - :type model_name: List[str] - :param when_all_stopped_func: Function to call when all services are - stopped. - :type when_all_stopped_func: Callable[] - :param model_name: Only restart services which have a deferred restart - event. - :type model_name: bool - """ - if services and deferred_only: - raise ValueError( - "services and deferred_only are mutually exclusive") - if deferred_only: - services = list(set( - [a.service for a in deferred_events.get_deferred_restarts()])) - _, messages = manage_payload_services( - 'stop', - services=services, - charm_func=when_all_stopped_func) - if messages: - raise ServiceActionError( - "Error processing service stop request: {}".format( - "; ".join(messages))) - _, messages = manage_payload_services( - 'start', - services=services) - if messages: - raise ServiceActionError( - "Error processing service start request: {}".format( - "; ".join(messages))) - deferred_events.clear_deferred_restarts(services) - - -def make_assess_status_func(*args, **kwargs): - """Creates an assess_status_func() suitable for handing to pause_unit() - and resume_unit(). - - This uses the _determine_os_workload_status(...) function to determine - what the workload_status should be for the unit. If the unit is - not in maintenance or active states, then the message is returned to - the caller. This is so an action that doesn't result in either a - complete pause or complete resume can signal failure with an action_fail() - """ - def _assess_status_func(): - state, message = _determine_os_workload_status(*args, **kwargs) - status_set(state, message) - if state not in ['maintenance', 'active']: - return message - return None - - return _assess_status_func - - -def pausable_restart_on_change(restart_map, stopstart=False, - restart_functions=None, - can_restart_now_f=None, - post_svc_restart_f=None, - pre_restarts_wait_f=None): - """A restart_on_change decorator that checks to see if the unit is - paused. If it is paused then the decorated function doesn't fire. - - This is provided as a helper, as the @restart_on_change(...) decorator - is in core.host, yet the openstack specific helpers are in this file - (contrib.openstack.utils). Thus, this needs to be an optional feature - for openstack charms (or charms that wish to use the openstack - pause/resume type features). - - It is used as follows: - - from contrib.openstack.utils import ( - pausable_restart_on_change as restart_on_change) - - @restart_on_change(restart_map, stopstart=) - def some_hook(...): - pass - - see core.utils.restart_on_change() for more details. - - Note restart_map can be a callable, in which case, restart_map is only - evaluated at runtime. This means that it is lazy and the underlying - function won't be called if the decorated function is never called. Note, - retains backwards compatibility for passing a non-callable dictionary. - - :param f: function to decorate. - :type f: Callable - :param restart_map: Optionally callable, which then returns the restart_map or - the restart map {conf_file: [services]} - :type restart_map: Union[Callable[[],], Dict[str, List[str,]] - :param stopstart: whether to stop, start or restart a service - :type stopstart: booleean - :param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - :type restart_functions: Dict[str, Callable[[str], None]] - :param can_restart_now_f: A function used to check if the restart is - permitted. - :type can_restart_now_f: Callable[[str, List[str]], boolean] - :param post_svc_restart_f: A function run after a service has - restarted. - :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function called before any restarts. - :type pre_restarts_wait_f: Callable[None, None] - :returns: decorator to use a restart_on_change with pausability - :rtype: decorator - - - """ - def wrap(f): - __restart_map_cache = None - - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - nonlocal __restart_map_cache - if is_unit_paused_set(): - return f(*args, **kwargs) - if __restart_map_cache is None: - __restart_map_cache = restart_map() \ - if callable(restart_map) else restart_map - # otherwise, normal restart_on_change functionality - return restart_on_change_helper( - (lambda: f(*args, **kwargs)), - __restart_map_cache, - stopstart, - restart_functions, - can_restart_now_f, - post_svc_restart_f, - pre_restarts_wait_f) - return wrapped_f - return wrap - - -def ordered(orderme): - """Converts the provided dictionary into a collections.OrderedDict. - - The items in the returned OrderedDict will be inserted based on the - natural sort order of the keys. Nested dictionaries will also be sorted - in order to ensure fully predictable ordering. - - :param orderme: the dict to order - :return: collections.OrderedDict - :raises: ValueError: if `orderme` isn't a dict instance. - """ - if not isinstance(orderme, dict): - raise ValueError('argument must be a dict type') - - result = OrderedDict() - for k, v in sorted(orderme.items(), key=lambda x: x[0]): - if isinstance(v, dict): - result[k] = ordered(v) - else: - result[k] = v - - return result - - -def config_flags_parser(config_flags): - """Parses config flags string into dict. - - This parsing method supports a few different formats for the config - flag values to be parsed: - - 1. A string in the simple format of key=value pairs, with the possibility - of specifying multiple key value pairs within the same string. For - example, a string in the format of 'key1=value1, key2=value2' will - return a dict of: - - {'key1': 'value1', 'key2': 'value2'}. - - 2. A string in the above format, but supporting a comma-delimited list - of values for the same key. For example, a string in the format of - 'key1=value1, key2=value3,value4,value5' will return a dict of: - - {'key1': 'value1', 'key2': 'value2,value3,value4'} - - 3. A string containing a colon character (:) prior to an equal - character (=) will be treated as yaml and parsed as such. This can be - used to specify more complex key value pairs. For example, - a string in the format of 'key1: subkey1=value1, subkey2=value2' will - return a dict of: - - {'key1', 'subkey1=value1, subkey2=value2'} - - The provided config_flags string may be a list of comma-separated values - which themselves may be comma-separated list of values. - """ - # If we find a colon before an equals sign then treat it as yaml. - # Note: limit it to finding the colon first since this indicates assignment - # for inline yaml. - colon = config_flags.find(':') - equals = config_flags.find('=') - if colon > 0: - if colon < equals or equals < 0: - return ordered(yaml.safe_load(config_flags)) - - if config_flags.find('==') >= 0: - juju_log("config_flags is not in expected format (key=value)", - level=ERROR) - raise OSContextError - - # strip the following from each value. - post_strippers = ' ,' - # we strip any leading/trailing '=' or ' ' from the string then - # split on '='. - split = config_flags.strip(' =').split('=') - limit = len(split) - flags = OrderedDict() - for i in range(0, limit - 1): - current = split[i] - next = split[i + 1] - vindex = next.rfind(',') - if (i == limit - 2) or (vindex < 0): - value = next - else: - value = next[:vindex] - - if i == 0: - key = current - else: - # if this not the first entry, expect an embedded key. - index = current.rfind(',') - if index < 0: - juju_log("Invalid config value(s) at index %s" % (i), - level=ERROR) - raise OSContextError - key = current[index + 1:] - - # Add to collection. - flags[key.strip(post_strippers)] = value.rstrip(post_strippers) - - return flags - - -def os_application_version_set(package): - '''Set version of application for Juju 2.0 and later''' - application_version = get_upstream_version(package) - # NOTE(jamespage) if not able to figure out package version, fallback to - # openstack codename version detection. - if not application_version: - application_version_set(os_release(package)) - else: - application_version_set(application_version) - - -def os_application_status_set(check_function): - """Run the supplied function and set the application status accordingly. - - :param check_function: Function to run to get app states and messages. - :type check_function: function - """ - state, message = check_function() - status_set(state, message, application=True) - - -def enable_memcache(source=None, release=None, package=None): - """Determine if memcache should be enabled on the local unit - - @param release: release of OpenStack currently deployed - @param package: package to derive OpenStack version deployed - @returns boolean Whether memcache should be enabled - """ - _release = None - if release: - _release = release - else: - _release = os_release(package) - if not _release: - _release = get_os_codename_install_source(source) - - return CompareOpenStackReleases(_release) >= 'mitaka' - - -def token_cache_pkgs(source=None, release=None): - """Determine additional packages needed for token caching - - @param source: source string for charm - @param release: release of OpenStack currently deployed - @returns List of package to enable token caching - """ - packages = [] - if enable_memcache(source=source, release=release): - packages.extend(['memcached', 'python-memcache']) - return packages - - -def update_json_file(filename, items): - """Updates the json `filename` with a given dict. - :param filename: path to json file (e.g. /etc/glance/policy.json) - :param items: dict of items to update - """ - if not items: - return - - with open(filename) as fd: - policy = json.load(fd) - - # Compare before and after and if nothing has changed don't write the file - # since that could cause unnecessary service restarts. - before = json.dumps(policy, indent=4, sort_keys=True) - policy.update(items) - after = json.dumps(policy, indent=4, sort_keys=True) - if before == after: - return - - with open(filename, "w") as fd: - fd.write(after) - - -@cached -def snap_install_requested(): - """ Determine if installing from snaps - - If openstack-origin is of the form snap:track/channel[/branch] - and channel is in SNAPS_CHANNELS return True. - """ - origin = config('openstack-origin') or "" - if not origin.startswith('snap:'): - return False - - _src = origin[5:] - if '/' in _src: - channel = _src.split('/')[1] - else: - # Handle snap:track with no channel - channel = 'stable' - return valid_snap_channel(channel) - - -def get_snaps_install_info_from_origin(snaps, src, mode='classic'): - """Generate a dictionary of snap install information from origin - - @param snaps: List of snaps - @param src: String of openstack-origin or source of the form - snap:track/channel - @param mode: String classic, devmode or jailmode - @returns: Dictionary of snaps with channels and modes - """ - - if not src.startswith('snap:'): - juju_log("Snap source is not a snap origin", 'WARN') - return {} - - _src = src[5:] - channel = '--channel={}'.format(_src) - - return {snap: {'channel': channel, 'mode': mode} - for snap in snaps} - - -def install_os_snaps(snaps, refresh=False): - """Install OpenStack snaps from channel and with mode - - @param snaps: Dictionary of snaps with channels and modes of the form: - {'snap_name': {'channel': 'snap_channel', - 'mode': 'snap_mode'}} - Where channel is a snapstore channel and mode is --classic, --devmode - or --jailmode. - @param post_snap_install: Callback function to run after snaps have been - installed - """ - - def _ensure_flag(flag): - if flag.startswith('--'): - return flag - return '--{}'.format(flag) - - if refresh: - for snap in snaps.keys(): - snap_refresh(snap, - _ensure_flag(snaps[snap]['channel']), - _ensure_flag(snaps[snap]['mode'])) - else: - for snap in snaps.keys(): - snap_install(snap, - _ensure_flag(snaps[snap]['channel']), - _ensure_flag(snaps[snap]['mode'])) - - -def set_unit_upgrading(): - """Set the unit to a upgrading state in the local kv() store. - """ - with unitdata.HookData()() as t: - kv = t[0] - kv.set('unit-upgrading', True) - - -def clear_unit_upgrading(): - """Clear the unit from a upgrading state in the local kv() store - """ - with unitdata.HookData()() as t: - kv = t[0] - kv.set('unit-upgrading', False) - - -def is_unit_upgrading_set(): - """Return the state of the kv().get('unit-upgrading'). - - To help with units that don't have HookData() (testing) - if it excepts, return False - """ - try: - with unitdata.HookData()() as t: - kv = t[0] - # transform something truth-y into a Boolean. - return not(not(kv.get('unit-upgrading'))) - except Exception: - return False - - -def series_upgrade_prepare(pause_unit_helper=None, configs=None): - """ Run common series upgrade prepare tasks. - - :param pause_unit_helper: function: Function to pause unit - :param configs: OSConfigRenderer object: Configurations - :returns None: - """ - set_unit_upgrading() - if pause_unit_helper and configs: - if not is_unit_paused_set(): - pause_unit_helper(configs) - - -def series_upgrade_complete(resume_unit_helper=None, configs=None): - """ Run common series upgrade complete tasks. - - :param resume_unit_helper: function: Function to resume unit - :param configs: OSConfigRenderer object: Configurations - :returns None: - """ - clear_unit_paused() - clear_unit_upgrading() - if configs: - configs.write_all() - if resume_unit_helper: - resume_unit_helper(configs) - - -def is_db_initialised(): - """Check leader storage to see if database has been initialised. - - :returns: Whether DB has been initialised - :rtype: bool - """ - db_initialised = None - if leader_get('db-initialised') is None: - juju_log( - 'db-initialised key missing, assuming db is not initialised', - 'DEBUG') - db_initialised = False - else: - db_initialised = bool_from_string(leader_get('db-initialised')) - juju_log('Database initialised: {}'.format(db_initialised), 'DEBUG') - return db_initialised - - -def set_db_initialised(): - """Add flag to leader storage to indicate database has been initialised. - """ - juju_log('Setting db-initialised to True', 'DEBUG') - leader_set({'db-initialised': True}) - - -def is_db_maintenance_mode(relid=None): - """Check relation data from notifications of db in maintenance mode. - - :returns: Whether db has notified it is in maintenance mode. - :rtype: bool - """ - juju_log('Checking for maintenance notifications', 'DEBUG') - if relid: - r_ids = [relid] - else: - r_ids = relation_ids('shared-db') - rids_units = [(r, u) for r in r_ids for u in related_units(r)] - notifications = [] - for r_id, unit in rids_units: - settings = relation_get(unit=unit, rid=r_id) - for key, value in settings.items(): - if value and key in DB_MAINTENANCE_KEYS: - juju_log( - 'Unit: {}, Key: {}, Value: {}'.format(unit, key, value), - 'DEBUG') - try: - notifications.append(bool_from_string(value)) - except ValueError: - juju_log( - 'Could not discern bool from {}'.format(value), - 'WARN') - pass - return True in notifications - - -@cached -def container_scoped_relations(): - """Get all the container scoped relations - - :returns: List of relation names - :rtype: List - """ - md = metadata() - relations = [] - for relation_type in ('provides', 'requires', 'peers'): - for relation in md.get(relation_type, []): - if md[relation_type][relation].get('scope') == 'container': - relations.append(relation) - return relations - - -def container_scoped_relation_get(attribute=None): - """Get relation data from all container scoped relations. - - :param attribute: Name of attribute to get - :type attribute: Optional[str] - :returns: Iterator with relation data - :rtype: Iterator[Optional[any]] - """ - for endpoint_name in container_scoped_relations(): - for rid in relation_ids(endpoint_name): - for unit in related_units(rid): - yield relation_get( - attribute=attribute, - unit=unit, - rid=rid) - - -def is_db_ready(use_current_context=False, rel_name=None): - """Check remote database is ready to be used. - - Database relations are expected to provide a list of 'allowed' units to - confirm that the database is ready for use by those units. - - If db relation has provided this information and local unit is a member, - returns True otherwise False. - - :param use_current_context: Whether to limit checks to current hook - context. - :type use_current_context: bool - :param rel_name: Name of relation to check - :type rel_name: string - :returns: Whether remote db is ready. - :rtype: bool - :raises: Exception - """ - key = 'allowed_units' - - rel_name = rel_name or 'shared-db' - this_unit = local_unit() - - if use_current_context: - if relation_id() in relation_ids(rel_name): - rids_units = [(None, None)] - else: - raise Exception("use_current_context=True but not in {} " - "rel hook contexts (currently in {})." - .format(rel_name, relation_id())) - else: - rids_units = [(r_id, u) - for r_id in relation_ids(rel_name) - for u in related_units(r_id)] - - for rid, unit in rids_units: - allowed_units = relation_get(rid=rid, unit=unit, attribute=key) - if allowed_units and this_unit in allowed_units.split(): - juju_log("This unit ({}) is in allowed unit list from {}".format( - this_unit, - unit), 'DEBUG') - return True - - juju_log("This unit was not found in any allowed unit list") - return False - - -def is_expected_scale(peer_relation_name='cluster'): - """Query juju goal-state to determine whether our peer- and dependency- - relations are at the expected scale. - - Useful for deferring per unit per relation housekeeping work until we are - ready to complete it successfully and without unnecessary repetiton. - - Always returns True if version of juju used does not support goal-state. - - :param peer_relation_name: Name of peer relation - :type rel_name: string - :returns: True or False - :rtype: bool - """ - def _get_relation_id(rel_type): - return next((rid for rid in relation_ids(reltype=rel_type)), None) - - Relation = namedtuple('Relation', 'rel_type rel_id') - peer_rid = _get_relation_id(peer_relation_name) - # Units with no peers should still have a peer relation. - if not peer_rid: - juju_log('Not at expected scale, no peer relation found', 'DEBUG') - return False - expected_relations = [ - Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))] - if expect_ha(): - expected_relations.append( - Relation( - rel_type='ha', - rel_id=_get_relation_id('ha'))) - juju_log( - 'Checking scale of {} relations'.format( - ','.join([r.rel_type for r in expected_relations])), - 'DEBUG') - try: - if (len(related_units(relid=peer_rid)) < - len(list(expected_peer_units()))): - return False - for rel in expected_relations: - if not rel.rel_id: - juju_log( - 'Expected to find {} relation, but it is missing'.format( - rel.rel_type), - 'DEBUG') - return False - # Goal state returns every unit even for container scoped - # relations but the charm only ever has a relation with - # the local unit. - if rel.rel_type in container_scoped_relations(): - expected_count = 1 - else: - expected_count = len( - list(expected_related_units(reltype=rel.rel_type))) - if len(related_units(relid=rel.rel_id)) < expected_count: - juju_log( - ('Not at expected scale, not enough units on {} ' - 'relation'.format(rel.rel_type)), - 'DEBUG') - return False - except NotImplementedError: - return True - juju_log('All checks have passed, unit is at expected scale', 'DEBUG') - return True - - -def get_peer_key(unit_name): - """Get the peer key for this unit. - - The peer key is the key a unit uses to publish its status down the peer - relation - - :param unit_name: Name of unit - :type unit_name: string - :returns: Peer key for given unit - :rtype: string - """ - return 'unit-state-{}'.format(unit_name.replace('/', '-')) - - -UNIT_READY = 'READY' -UNIT_NOTREADY = 'NOTREADY' -UNIT_UNKNOWN = 'UNKNOWN' -UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN] - - -def inform_peers_unit_state(state, relation_name='cluster'): - """Inform peers of the state of this unit. - - :param state: State of unit to publish - :type state: string - :param relation_name: Name of relation to publish state on - :type relation_name: string - """ - if state not in UNIT_STATES: - raise ValueError( - "Setting invalid state {} for unit".format(state)) - this_unit = local_unit() - for r_id in relation_ids(relation_name): - juju_log('Telling peer behind relation {} that {} is {}'.format( - r_id, this_unit, state), 'DEBUG') - relation_set(relation_id=r_id, - relation_settings={ - get_peer_key(this_unit): state}) - - -def get_peers_unit_state(relation_name='cluster'): - """Get the state of all peers. - - :param relation_name: Name of relation to check peers on. - :type relation_name: string - :returns: Unit states keyed on unit name. - :rtype: dict - :raises: ValueError - """ - r_ids = relation_ids(relation_name) - rids_units = [(r, u) for r in r_ids for u in related_units(r)] - unit_states = {} - for r_id, unit in rids_units: - settings = relation_get(unit=unit, rid=r_id) - unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN) - if unit_states[unit] not in UNIT_STATES: - raise ValueError( - "Unit in unknown state {}".format(unit_states[unit])) - return unit_states - - -def are_peers_ready(relation_name='cluster'): - """Check if all peers are ready. - - :param relation_name: Name of relation to check peers on. - :type relation_name: string - :returns: Whether all units are ready. - :rtype: bool - """ - unit_states = get_peers_unit_state(relation_name).values() - juju_log('{} peers are in the following states: {}'.format( - relation_name, unit_states), 'DEBUG') - return all(state == UNIT_READY for state in unit_states) - - -def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'): - """Inform peers if this unit is ready. - - The check function should return a tuple (state, message). A state - of 'READY' indicates the unit is READY. - - :param check_unit_ready_func: Function to run to check readiness - :type check_unit_ready_func: function - :param relation_name: Name of relation to check peers on. - :type relation_name: string - """ - unit_ready, msg = check_unit_ready_func() - if unit_ready: - state = UNIT_READY - else: - state = UNIT_NOTREADY - juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG') - inform_peers_unit_state(state, relation_name) - - -def check_api_unit_ready(check_db_ready=True): - """Check if this unit is ready. - - :param check_db_ready: Include checks of database readiness. - :type check_db_ready: bool - :returns: Whether unit state is ready and status message - :rtype: (bool, str) - """ - unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready) - return unit_state == WORKLOAD_STATES.ACTIVE, msg - - -def get_api_unit_status(check_db_ready=True): - """Return a workload status and message for this unit. - - :param check_db_ready: Include checks of database readiness. - :type check_db_ready: bool - :returns: Workload state and message - :rtype: (bool, str) - """ - unit_state = WORKLOAD_STATES.ACTIVE - msg = 'Unit is ready' - if is_db_maintenance_mode(): - unit_state = WORKLOAD_STATES.MAINTENANCE - msg = 'Database in maintenance mode.' - elif is_unit_paused_set(): - unit_state = WORKLOAD_STATES.BLOCKED - msg = 'Unit paused.' - elif check_db_ready and not is_db_ready(): - unit_state = WORKLOAD_STATES.WAITING - msg = 'Allowed_units list provided but this unit not present' - elif not is_db_initialised(): - unit_state = WORKLOAD_STATES.WAITING - msg = 'Database not initialised' - elif not is_expected_scale(): - unit_state = WORKLOAD_STATES.WAITING - msg = 'Charm and its dependencies not yet at expected scale' - juju_log(msg, 'DEBUG') - return unit_state, msg - - -def check_api_application_ready(): - """Check if this application is ready. - - :returns: Whether application state is ready and status message - :rtype: (bool, str) - """ - app_state, msg = get_api_application_status() - return app_state == WORKLOAD_STATES.ACTIVE, msg - - -def get_api_application_status(): - """Return a workload status and message for this application. - - :returns: Workload state and message - :rtype: (bool, str) - """ - app_state, msg = get_api_unit_status() - if app_state == WORKLOAD_STATES.ACTIVE: - if are_peers_ready(): - msg = 'Application Ready' - else: - app_state = WORKLOAD_STATES.WAITING - msg = 'Some units are not ready' - juju_log(msg, 'DEBUG') - return app_state, msg - - -def sequence_status_check_functions(*functions): - """Sequence the functions passed so that they all get a chance to run as - the charm status check functions. - - :param *functions: a list of functions that return (state, message) - :type *functions: List[Callable[[OSConfigRender], (str, str)]] - :returns: the Callable that takes configs and returns (state, message) - :rtype: Callable[[OSConfigRender], (str, str)] - """ - def _inner_sequenced_functions(configs): - state, message = 'unknown', '' - for f in functions: - new_state, new_message = f(configs) - state = workload_state_compare(state, new_state) - if message: - message = "{}, {}".format(message, new_message) - else: - message = new_message - return state, message - - return _inner_sequenced_functions - - -SubordinatePackages = namedtuple('SubordinatePackages', ['install', 'purge']) - - -def get_subordinate_release_packages(os_release, package_type='deb'): - """Iterate over subordinate relations and get package information. - - :param os_release: OpenStack release to look for - :type os_release: str - :param package_type: Package type (one of 'deb' or 'snap') - :type package_type: str - :returns: Packages to install and packages to purge or None - :rtype: SubordinatePackages[set,set] - """ - install = set() - purge = set() - - for rdata in container_scoped_relation_get('releases-packages-map'): - rp_map = json.loads(rdata or '{}') - # The map provided by subordinate has OpenStack release name as key. - # Find package information from subordinate matching requested release - # or the most recent release prior to requested release by sorting the - # keys in reverse order. This follows established patterns in our - # charms for templates and reactive charm implementations, i.e. as long - # as nothing has changed the definitions for the prior OpenStack - # release is still valid. - for release in sorted(rp_map.keys(), reverse=True): - if (CompareOpenStackReleases(release) <= os_release and - package_type in rp_map[release]): - for name, container in ( - ('install', install), - ('purge', purge)): - for pkg in rp_map[release][package_type].get(name, []): - container.add(pkg) - break - return SubordinatePackages(install, purge) - - -def get_subordinate_services(): - """Iterate over subordinate relations and get service information. - - In a similar fashion as with get_subordinate_release_packages(), - principle charms can retrieve a list of services advertised by their - subordinate charms. This is useful to know about subordinate services when - pausing, resuming or upgrading a principle unit. - - :returns: Name of all services advertised by all subordinates - :rtype: Set[str] - """ - services = set() - for rdata in container_scoped_relation_get('services'): - services |= set(json.loads(rdata or '[]')) - return services - - -os_restart_on_change = partial( - pausable_restart_on_change, - can_restart_now_f=deferred_events.check_and_record_restart_request, - post_svc_restart_f=deferred_events.process_svc_restart) - - -def restart_services_action_helper(all_services): - """Helper to run the restart-services action. - - NOTE: all_services is all services that could be restarted but - depending on the action arguments it may be a subset of - these that are actually restarted. - - :param all_services: All services that could be restarted - :type all_services: List[str] - """ - deferred_only = action_get("deferred-only") - services = action_get("services") - if services: - services = services.split() - else: - services = all_services - if deferred_only: - restart_services_action(deferred_only=True) - else: - restart_services_action(services=services) - - -def show_deferred_events_action_helper(): - """Helper to run the show-deferred-restarts action.""" - restarts = [] - for event in deferred_events.get_deferred_events(): - restarts.append('{} {} {}'.format( - str(event.timestamp), - event.service.ljust(40), - event.reason)) - restarts.sort() - output = { - 'restarts': restarts, - 'hooks': deferred_events.get_deferred_hooks()} - action_set({'output': "{}".format( - yaml.dump(output, default_flow_style=False))}) diff --git a/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py deleted file mode 100644 index e5418c39..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright 2018-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import os - -import charmhelpers.contrib.openstack.alternatives as alternatives -import charmhelpers.contrib.openstack.context as context - -import charmhelpers.core.hookenv as hookenv -import charmhelpers.core.host as host -import charmhelpers.core.templating as templating -import charmhelpers.core.unitdata as unitdata - -VAULTLOCKER_BACKEND = 'charm-vaultlocker' - - -class VaultKVContext(context.OSContextGenerator): - """Vault KV context for interaction with vault-kv interfaces""" - interfaces = ['secrets-storage'] - - def __init__(self, secret_backend=None): - super(context.OSContextGenerator, self).__init__() - self.secret_backend = ( - secret_backend or 'charm-{}'.format(hookenv.service_name()) - ) - - def __call__(self): - try: - import hvac - except ImportError: - # BUG: #1862085 - if the relation is made to vault, but the - # 'encrypt' option is not made, then the charm errors with an - # import warning. This catches that, logs a warning, and returns - # with an empty context. - hookenv.log("VaultKVContext: trying to use hvac pythong module " - "but it's not available. Is secrets-stroage relation " - "made, but encrypt option not set?", - level=hookenv.WARNING) - # return an empty context on hvac import error - return {} - ctxt = {} - # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 - db = unitdata.kv() - # currently known-good secret-id - secret_id = db.get('secret-id') - - for relation_id in hookenv.relation_ids(self.interfaces[0]): - for unit in hookenv.related_units(relation_id): - data = hookenv.relation_get(unit=unit, - rid=relation_id) - vault_url = data.get('vault_url') - role_id = data.get('{}_role_id'.format(hookenv.local_unit())) - token = data.get('{}_token'.format(hookenv.local_unit())) - - if all([vault_url, role_id, token]): - token = json.loads(token) - vault_url = json.loads(vault_url) - - # Tokens may change when secret_id's are being - # reissued - if so use token to get new secret_id - token_success = False - try: - secret_id = retrieve_secret_id( - url=vault_url, - token=token - ) - token_success = True - except hvac.exceptions.InvalidRequest: - # Try next - pass - - if token_success: - db.set('secret-id', secret_id) - db.flush() - - ctxt['vault_url'] = vault_url - ctxt['role_id'] = json.loads(role_id) - ctxt['secret_id'] = secret_id - ctxt['secret_backend'] = self.secret_backend - vault_ca = data.get('vault_ca') - if vault_ca: - ctxt['vault_ca'] = json.loads(vault_ca) - - self.complete = True - break - else: - if secret_id: - ctxt['vault_url'] = vault_url - ctxt['role_id'] = json.loads(role_id) - ctxt['secret_id'] = secret_id - ctxt['secret_backend'] = self.secret_backend - vault_ca = data.get('vault_ca') - if vault_ca: - ctxt['vault_ca'] = json.loads(vault_ca) - - if self.complete: - break - - if ctxt: - self.complete = True - - return ctxt - - -def write_vaultlocker_conf(context, priority=100): - """Write vaultlocker configuration to disk and install alternative - - :param context: Dict of data from vault-kv relation - :ptype: context: dict - :param priority: Priority of alternative configuration - :ptype: priority: int""" - charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format( - hookenv.service_name() - ) - host.mkdir(os.path.dirname(charm_vl_path), perms=0o700) - templating.render(source='vaultlocker.conf.j2', - target=charm_vl_path, - context=context, perms=0o600), - alternatives.install_alternative('vaultlocker.conf', - '/etc/vaultlocker/vaultlocker.conf', - charm_vl_path, priority) - - -def vault_relation_complete(backend=None): - """Determine whether vault relation is complete - - :param backend: Name of secrets backend requested - :ptype backend: string - :returns: whether the relation to vault is complete - :rtype: bool""" - try: - import hvac - except ImportError: - return False - try: - vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) - vault_kv() - return vault_kv.complete - except hvac.exceptions.InvalidRequest: - return False - - -# TODO: contrib a high level unwrap method to hvac that works -def retrieve_secret_id(url, token): - """Retrieve a response-wrapped secret_id from Vault - - :param url: URL to Vault Server - :ptype url: str - :param token: One shot Token to use - :ptype token: str - :returns: secret_id to use for Vault Access - :rtype: str""" - import hvac - try: - # hvac 0.10.1 changed default adapter to JSONAdapter - client = hvac.Client(url=url, token=token, adapter=hvac.adapters.Request) - except AttributeError: - # hvac < 0.6.2 doesn't have adapter but uses the same response interface - client = hvac.Client(url=url, token=token) - else: - # hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate - if not isinstance(client.adapter, hvac.adapters.Request): - client.adapter = hvac.adapters.Request(base_uri=url, token=token) - response = client._post('/v1/sys/wrapping/unwrap') - if response.status_code == 200: - data = response.json() - return data['data']['secret_id'] diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/storage/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/__init__.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/bcache.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/bcache.py deleted file mode 100644 index 605991e1..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/bcache.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2017 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os -import json - -from charmhelpers.core.hookenv import log - -stats_intervals = ['stats_day', 'stats_five_minute', - 'stats_hour', 'stats_total'] - -SYSFS = '/sys' - - -class Bcache(object): - """Bcache behaviour - """ - - def __init__(self, cachepath): - self.cachepath = cachepath - - @classmethod - def fromdevice(cls, devname): - return cls('{}/block/{}/bcache'.format(SYSFS, devname)) - - def __str__(self): - return self.cachepath - - def get_stats(self, interval): - """Get cache stats - """ - intervaldir = 'stats_{}'.format(interval) - path = "{}/{}".format(self.cachepath, intervaldir) - out = dict() - for elem in os.listdir(path): - out[elem] = open('{}/{}'.format(path, elem)).read().strip() - return out - - -def get_bcache_fs(): - """Return all cache sets - """ - cachesetroot = "{}/fs/bcache".format(SYSFS) - try: - dirs = os.listdir(cachesetroot) - except OSError: - log("No bcache fs found") - return [] - cacheset = set([Bcache('{}/{}'.format(cachesetroot, d)) for d in dirs if not d.startswith('register')]) - return cacheset - - -def get_stats_action(cachespec, interval): - """Action for getting bcache statistics for a given cachespec. - Cachespec can either be a device name, eg. 'sdb', which will retrieve - cache stats for the given device, or 'global', which will retrieve stats - for all cachesets - """ - if cachespec == 'global': - caches = get_bcache_fs() - else: - caches = [Bcache.fromdevice(cachespec)] - res = dict((c.cachepath, c.get_stats(interval)) for c in caches) - return json.dumps(res, indent=4, separators=(',', ': ')) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py deleted file mode 100644 index 1b20b8fe..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ /dev/null @@ -1,2384 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is sourced from lp:openstack-charm-helpers -# -# Authors: -# James Page -# Adam Gandelman -# - -import collections -import errno -import hashlib -import math - -import os -import shutil -import json -import time -import uuid - -from subprocess import ( - check_call, - check_output, - CalledProcessError, -) -from charmhelpers import deprecate -from charmhelpers.core.hookenv import ( - application_name, - config, - service_name, - local_unit, - relation_get, - relation_ids, - relation_set, - related_units, - log, - DEBUG, - INFO, - WARNING, - ERROR, -) -from charmhelpers.core.host import ( - mount, - mounts, - service_start, - service_stop, - service_running, - umount, - cmp_pkgrevno, -) -from charmhelpers.fetch import ( - apt_install, -) -from charmhelpers.core.unitdata import kv - -from charmhelpers.core.kernel import modprobe -from charmhelpers.contrib.openstack.utils import config_flags_parser - -KEYRING = '/etc/ceph/ceph.client.{}.keyring' -KEYFILE = '/etc/ceph/ceph.client.{}.key' - -CEPH_CONF = """[global] -auth supported = {auth} -keyring = {keyring} -mon host = {mon_hosts} -log to syslog = {use_syslog} -err to syslog = {use_syslog} -clog to syslog = {use_syslog} -""" - -# The number of placement groups per OSD to target for placement group -# calculations. This number is chosen as 100 due to the ceph PG Calc -# documentation recommending to choose 100 for clusters which are not -# expected to increase in the foreseeable future. Since the majority of the -# calculations are done on deployment, target the case of non-expanding -# clusters as the default. -DEFAULT_PGS_PER_OSD_TARGET = 100 -DEFAULT_POOL_WEIGHT = 10.0 -LEGACY_PG_COUNT = 200 -DEFAULT_MINIMUM_PGS = 2 -AUTOSCALER_DEFAULT_PGS = 32 - - -class OsdPostUpgradeError(Exception): - """Error class for OSD post-upgrade operations.""" - pass - - -class OSDSettingConflict(Exception): - """Error class for conflicting osd setting requests.""" - pass - - -class OSDSettingNotAllowed(Exception): - """Error class for a disallowed setting.""" - pass - - -OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed) - -OSD_SETTING_WHITELIST = [ - 'osd heartbeat grace', - 'osd heartbeat interval', -] - - -def _order_dict_by_key(rdict): - """Convert a dictionary into an OrderedDict sorted by key. - - :param rdict: Dictionary to be ordered. - :type rdict: dict - :returns: Ordered Dictionary. - :rtype: collections.OrderedDict - """ - return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0])) - - -def get_osd_settings(relation_name): - """Consolidate requested osd settings from all clients. - - Consolidate requested osd settings from all clients. Check that the - requested setting is on the whitelist and it does not conflict with - any other requested settings. - - :returns: Dictionary of settings - :rtype: dict - - :raises: OSDSettingNotAllowed - :raises: OSDSettingConflict - """ - rel_ids = relation_ids(relation_name) - osd_settings = {} - for relid in rel_ids: - for unit in related_units(relid): - unit_settings = relation_get('osd-settings', unit, relid) or '{}' - unit_settings = json.loads(unit_settings) - for key, value in unit_settings.items(): - if key not in OSD_SETTING_WHITELIST: - msg = 'Illegal settings "{}"'.format(key) - raise OSDSettingNotAllowed(msg) - if key in osd_settings: - if osd_settings[key] != unit_settings[key]: - msg = 'Conflicting settings for "{}"'.format(key) - raise OSDSettingConflict(msg) - else: - osd_settings[key] = value - return _order_dict_by_key(osd_settings) - - -def send_application_name(relid=None): - """Send the application name down the relation. - - :param relid: Relation id to set application name in. - :type relid: str - """ - relation_set( - relation_id=relid, - relation_settings={'application-name': application_name()}) - - -def send_osd_settings(): - """Pass on requested OSD settings to osd units.""" - try: - settings = get_osd_settings('client') - except OSD_SETTING_EXCEPTIONS as e: - # There is a problem with the settings, not passing them on. Update - # status will notify the user. - log(e, level=ERROR) - return - data = { - 'osd-settings': json.dumps(settings, sort_keys=True)} - for relid in relation_ids('osd'): - relation_set(relation_id=relid, - relation_settings=data) - - -def validator(value, valid_type, valid_range=None): - """Helper function for type validation. - - Used to validate these: - https://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values - https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression - - Example input: - validator(value=1, - valid_type=int, - valid_range=[0, 2]) - - This says I'm testing value=1. It must be an int inclusive in [0,2] - - :param value: The value to validate. - :type value: any - :param valid_type: The type that value should be. - :type valid_type: any - :param valid_range: A range of values that value can assume. - :type valid_range: Optional[Union[List,Tuple]] - :raises: AssertionError, ValueError - """ - assert isinstance(value, valid_type), ( - "{} is not a {}".format(value, valid_type)) - if valid_range is not None: - assert isinstance( - valid_range, list) or isinstance(valid_range, tuple), ( - "valid_range must be of type List or Tuple, " - "was given {} of type {}" - .format(valid_range, type(valid_range))) - # If we're dealing with strings - if isinstance(value, str): - assert value in valid_range, ( - "{} is not in the list {}".format(value, valid_range)) - # Integer, float should have a min and max - else: - if len(valid_range) != 2: - raise ValueError( - "Invalid valid_range list of {} for {}. " - "List must be [min,max]".format(valid_range, value)) - assert value >= valid_range[0], ( - "{} is less than minimum allowed value of {}" - .format(value, valid_range[0])) - assert value <= valid_range[1], ( - "{} is greater than maximum allowed value of {}" - .format(value, valid_range[1])) - - -class PoolCreationError(Exception): - """A custom exception to inform the caller that a pool creation failed. - - Provides an error message - """ - - def __init__(self, message): - super(PoolCreationError, self).__init__(message) - - -class BasePool(object): - """An object oriented approach to Ceph pool creation. - - This base class is inherited by ReplicatedPool and ErasurePool. Do not call - create() on this base class as it will raise an exception. - - Instantiate a child class and call create(). - """ - # Dictionary that maps pool operation properties to Tuples with valid type - # and valid range - op_validation_map = { - 'compression-algorithm': (str, ('lz4', 'snappy', 'zlib', 'zstd')), - 'compression-mode': (str, ('none', 'passive', 'aggressive', 'force')), - 'compression-required-ratio': (float, None), - 'compression-min-blob-size': (int, None), - 'compression-min-blob-size-hdd': (int, None), - 'compression-min-blob-size-ssd': (int, None), - 'compression-max-blob-size': (int, None), - 'compression-max-blob-size-hdd': (int, None), - 'compression-max-blob-size-ssd': (int, None), - 'rbd-mirroring-mode': (str, ('image', 'pool')) - } - - def __init__(self, service, name=None, percent_data=None, app_name=None, - op=None): - """Initialize BasePool object. - - Pool information is either initialized from individual keyword - arguments or from a individual CephBrokerRq operation Dict. - - :param service: The Ceph user name to run commands under. - :type service: str - :param name: Name of pool to operate on. - :type name: str - :param percent_data: The expected pool size in relation to all - available resources in the Ceph cluster. Will be - used to set the ``target_size_ratio`` pool - property. (default: 10.0) - :type percent_data: Optional[float] - :param app_name: Ceph application name, usually one of: - ('cephfs', 'rbd', 'rgw') (default: 'unknown') - :type app_name: Optional[str] - :param op: Broker request Op to compile pool data from. - :type op: Optional[Dict[str,any]] - :raises: KeyError - """ - # NOTE: Do not perform initialization steps that require live data from - # a running cluster here. The *Pool classes may be used for validation. - self.service = service - self.op = op or {} - - if op: - # When initializing from op the `name` attribute is required and we - # will fail with KeyError if it is not provided. - self.name = op['name'] - self.percent_data = op.get('weight') - self.app_name = op.get('app-name') - else: - self.name = name - self.percent_data = percent_data - self.app_name = app_name - - # Set defaults for these if they are not provided - self.percent_data = self.percent_data or 10.0 - self.app_name = self.app_name or 'unknown' - - def validate(self): - """Check that value of supplied operation parameters are valid. - - :raises: ValueError - """ - for op_key, op_value in self.op.items(): - if op_key in self.op_validation_map and op_value is not None: - valid_type, valid_range = self.op_validation_map[op_key] - try: - validator(op_value, valid_type, valid_range) - except (AssertionError, ValueError) as e: - # Normalize on ValueError, also add information about which - # variable we had an issue with. - raise ValueError("'{}': {}".format(op_key, str(e))) - - def _create(self): - """Perform the pool creation, method MUST be overridden by child class. - """ - raise NotImplementedError - - def _post_create(self): - """Perform common post pool creation tasks. - - Note that pool properties subject to change during the lifetime of a - pool / deployment should go into the ``update`` method. - - Do not add calls for a specific pool type here, those should go into - one of the pool specific classes. - """ - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 - if nautilus_or_later: - # Ensure we set the expected pool ratio - update_pool( - client=self.service, - pool=self.name, - settings={ - 'target_size_ratio': str( - self.percent_data / 100.0), - }) - try: - set_app_name_for_pool(client=self.service, - pool=self.name, - name=self.app_name) - except CalledProcessError: - log('Could not set app name for pool {}' - .format(self.name), - level=WARNING) - if 'pg_autoscaler' in enabled_manager_modules(): - try: - enable_pg_autoscale(self.service, self.name) - except CalledProcessError as e: - log('Could not configure auto scaling for pool {}: {}' - .format(self.name, e), - level=WARNING) - - def create(self): - """Create pool and perform any post pool creation tasks. - - To allow for sharing of common code among pool specific classes the - processing has been broken out into the private methods ``_create`` - and ``_post_create``. - - Do not add any pool type specific handling here, that should go into - one of the pool specific classes. - """ - if not pool_exists(self.service, self.name): - self.validate() - self._create() - self._post_create() - self.update() - - def set_quota(self): - """Set a quota if requested. - - :raises: CalledProcessError - """ - max_bytes = self.op.get('max-bytes') - max_objects = self.op.get('max-objects') - if max_bytes or max_objects: - set_pool_quota(service=self.service, pool_name=self.name, - max_bytes=max_bytes, max_objects=max_objects) - - def set_compression(self): - """Set compression properties if requested. - - :raises: CalledProcessError - """ - compression_properties = { - key.replace('-', '_'): value - for key, value in self.op.items() - if key in ( - 'compression-algorithm', - 'compression-mode', - 'compression-required-ratio', - 'compression-min-blob-size', - 'compression-min-blob-size-hdd', - 'compression-min-blob-size-ssd', - 'compression-max-blob-size', - 'compression-max-blob-size-hdd', - 'compression-max-blob-size-ssd') and value} - if compression_properties: - update_pool(self.service, self.name, compression_properties) - - def update(self): - """Update properties for an already existing pool. - - Do not add calls for a specific pool type here, those should go into - one of the pool specific classes. - """ - self.validate() - self.set_quota() - self.set_compression() - - def add_cache_tier(self, cache_pool, mode): - """Adds a new cache tier to an existing pool. - - :param cache_pool: The cache tier pool name to add. - :type cache_pool: str - :param mode: The caching mode to use for this pool. - valid range = ["readonly", "writeback"] - :type mode: str - """ - # Check the input types and values - validator(value=cache_pool, valid_type=str) - validator( - value=mode, valid_type=str, - valid_range=["readonly", "writeback"]) - - check_call([ - 'ceph', '--id', self.service, - 'osd', 'tier', 'add', self.name, cache_pool, - ]) - check_call([ - 'ceph', '--id', self.service, - 'osd', 'tier', 'cache-mode', cache_pool, mode, - ]) - check_call([ - 'ceph', '--id', self.service, - 'osd', 'tier', 'set-overlay', self.name, cache_pool, - ]) - check_call([ - 'ceph', '--id', self.service, - 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom', - ]) - - def remove_cache_tier(self, cache_pool): - """Removes a cache tier from Ceph. - - Flushes all dirty objects from writeback pools and waits for that to - complete. - - :param cache_pool: The cache tier pool name to remove. - :type cache_pool: str - """ - # read-only is easy, writeback is much harder - mode = get_cache_mode(self.service, cache_pool) - if mode == 'readonly': - check_call([ - 'ceph', '--id', self.service, - 'osd', 'tier', 'cache-mode', cache_pool, 'none' - ]) - check_call([ - 'ceph', '--id', self.service, - 'osd', 'tier', 'remove', self.name, cache_pool, - ]) - - elif mode == 'writeback': - pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', - 'cache-mode', cache_pool, 'forward'] - if cmp_pkgrevno('ceph-common', '10.1') >= 0: - # Jewel added a mandatory flag - pool_forward_cmd.append('--yes-i-really-mean-it') - - check_call(pool_forward_cmd) - # Flush the cache and wait for it to return - check_call([ - 'rados', '--id', self.service, - '-p', cache_pool, 'cache-flush-evict-all']) - check_call([ - 'ceph', '--id', self.service, - 'osd', 'tier', 'remove-overlay', self.name]) - check_call([ - 'ceph', '--id', self.service, - 'osd', 'tier', 'remove', self.name, cache_pool]) - - def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, - device_class=None): - """Return the number of placement groups to use when creating the pool. - - Returns the number of placement groups which should be specified when - creating the pool. This is based upon the calculation guidelines - provided by the Ceph Placement Group Calculator (located online at - http://ceph.com/pgcalc/). - - The number of placement groups are calculated using the following: - - (Target PGs per OSD) * (OSD #) * (%Data) - ---------------------------------------- - (Pool size) - - Per the upstream guidelines, the OSD # should really be considered - based on the number of OSDs which are eligible to be selected by the - pool. Since the pool creation doesn't specify any of CRUSH set rules, - the default rule will be dependent upon the type of pool being - created (replicated or erasure). - - This code makes no attempt to determine the number of OSDs which can be - selected for the specific rule, rather it is left to the user to tune - in the form of 'expected-osd-count' config option. - - :param pool_size: pool_size is either the number of replicas for - replicated pools or the K+M sum for erasure coded pools - :type pool_size: int - :param percent_data: the percentage of data that is expected to - be contained in the pool for the specific OSD set. Default value - is to assume 10% of the data is for this pool, which is a - relatively low % of the data but allows for the pg_num to be - increased. NOTE: the default is primarily to handle the scenario - where related charms requiring pools has not been upgraded to - include an update to indicate their relative usage of the pools. - :type percent_data: float - :param device_class: class of storage to use for basis of pgs - calculation; ceph supports nvme, ssd and hdd by default based - on presence of devices of each type in the deployment. - :type device_class: str - :returns: The number of pgs to use. - :rtype: int - """ - - # Note: This calculation follows the approach that is provided - # by the Ceph PG Calculator located at http://ceph.com/pgcalc/. - validator(value=pool_size, valid_type=int) - - # Ensure that percent data is set to something - even with a default - # it can be set to None, which would wreak havoc below. - if percent_data is None: - percent_data = DEFAULT_POOL_WEIGHT - - # If the expected-osd-count is specified, then use the max between - # the expected-osd-count and the actual osd_count - osd_list = get_osds(self.service, device_class) - expected = config('expected-osd-count') or 0 - - if osd_list: - if device_class: - osd_count = len(osd_list) - else: - osd_count = max(expected, len(osd_list)) - - # Log a message to provide some insight if the calculations claim - # to be off because someone is setting the expected count and - # there are more OSDs in reality. Try to make a proper guess - # based upon the cluster itself. - if not device_class and expected and osd_count != expected: - log("Found more OSDs than provided expected count. " - "Using the actual count instead", INFO) - elif expected: - # Use the expected-osd-count in older ceph versions to allow for - # a more accurate pg calculations - osd_count = expected - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - return LEGACY_PG_COUNT - - percent_data /= 100.0 - target_pgs_per_osd = config( - 'pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET - num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size - - # NOTE: ensure a sane minimum number of PGS otherwise we don't get any - # reasonable data distribution in minimal OSD configurations - if num_pg < DEFAULT_MINIMUM_PGS: - num_pg = DEFAULT_MINIMUM_PGS - - # The CRUSH algorithm has a slight optimization for placement groups - # with powers of 2 so find the nearest power of 2. If the nearest - # power of 2 is more than 25% below the original value, the next - # highest value is used. To do this, find the nearest power of 2 such - # that 2^n <= num_pg, check to see if its within the 25% tolerance. - exponent = math.floor(math.log(num_pg, 2)) - nearest = 2 ** exponent - if (num_pg - nearest) > (num_pg * 0.25): - # Choose the next highest power of 2 since the nearest is more - # than 25% below the original value. - return int(nearest * 2) - else: - return int(nearest) - - -class Pool(BasePool): - """Compatibility shim for any descendents external to this library.""" - - @deprecate( - 'The ``Pool`` baseclass has been replaced by ``BasePool`` class.') - def __init__(self, service, name): - super(Pool, self).__init__(service, name=name) - - def create(self): - pass - - -class ReplicatedPool(BasePool): - def __init__(self, service, name=None, pg_num=None, replicas=None, - percent_data=None, app_name=None, op=None, - profile_name='replicated_rule'): - """Initialize ReplicatedPool object. - - Pool information is either initialized from individual keyword - arguments or from a individual CephBrokerRq operation Dict. - - Please refer to the docstring of the ``BasePool`` class for - documentation of the common parameters. - - :param pg_num: Express wish for number of Placement Groups (this value - is subject to validation against a running cluster prior - to use to avoid creating a pool with too many PGs) - :type pg_num: int - :param replicas: Number of copies there should be of each object added - to this replicated pool. - :type replicas: int - :raises: KeyError - :param profile_name: Crush Profile to use - :type profile_name: Optional[str] - """ - # NOTE: Do not perform initialization steps that require live data from - # a running cluster here. The *Pool classes may be used for validation. - - # The common parameters are handled in our parents initializer - super(ReplicatedPool, self).__init__( - service=service, name=name, percent_data=percent_data, - app_name=app_name, op=op) - - if op: - # When initializing from op `replicas` is a required attribute, and - # we will fail with KeyError if it is not provided. - self.replicas = op['replicas'] - self.pg_num = op.get('pg_num') - self.profile_name = op.get('crush-profile') or profile_name - else: - self.replicas = replicas or 2 - self.pg_num = pg_num - self.profile_name = profile_name or 'replicated_rule' - - def _create(self): - # Validate if crush profile exists - if self.profile_name is None: - msg = ("Failed to discover crush profile named " - "{}".format(self.profile_name)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - - # Do extra validation on pg_num with data from live cluster - if self.pg_num: - # Since the number of placement groups were specified, ensure - # that there aren't too many created. - max_pgs = self.get_pgs(self.replicas, 100.0) - self.pg_num = min(self.pg_num, max_pgs) - else: - self.pg_num = self.get_pgs(self.replicas, self.percent_data) - - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 - # Create it - if nautilus_or_later: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - '--pg-num-min={}'.format( - min(AUTOSCALER_DEFAULT_PGS, self.pg_num) - ), - self.name, str(self.pg_num), self.profile_name - ] - else: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num), self.profile_name - ] - check_call(cmd) - - def _post_create(self): - # Set the pool replica size - update_pool(client=self.service, - pool=self.name, - settings={'size': str(self.replicas)}) - # Perform other common post pool creation tasks - super(ReplicatedPool, self)._post_create() - - -class ErasurePool(BasePool): - """Default jerasure erasure coded pool.""" - - def __init__(self, service, name=None, erasure_code_profile=None, - percent_data=None, app_name=None, op=None, - allow_ec_overwrites=False): - """Initialize ErasurePool object. - - Pool information is either initialized from individual keyword - arguments or from a individual CephBrokerRq operation Dict. - - Please refer to the docstring of the ``BasePool`` class for - documentation of the common parameters. - - :param erasure_code_profile: EC Profile to use (default: 'default') - :type erasure_code_profile: Optional[str] - """ - # NOTE: Do not perform initialization steps that require live data from - # a running cluster here. The *Pool classes may be used for validation. - - # The common parameters are handled in our parents initializer - super(ErasurePool, self).__init__( - service=service, name=name, percent_data=percent_data, - app_name=app_name, op=op) - - if op: - # Note that the different default when initializing from op stems - # from different handling of this in the `charms.ceph` library. - self.erasure_code_profile = op.get('erasure-profile', - 'default-canonical') - self.allow_ec_overwrites = op.get('allow-ec-overwrites') - else: - # We keep the class default when initialized from keyword arguments - # to not break the API for any other consumers. - self.erasure_code_profile = erasure_code_profile or 'default' - self.allow_ec_overwrites = allow_ec_overwrites - - def _create(self): - # Try to find the erasure profile information in order to properly - # size the number of placement groups. The size of an erasure - # coded placement group is calculated as k+m. - erasure_profile = get_erasure_profile(self.service, - self.erasure_code_profile) - - # Check for errors - if erasure_profile is None: - msg = ("Failed to discover erasure profile named " - "{}".format(self.erasure_code_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - if 'k' not in erasure_profile or 'm' not in erasure_profile: - # Error - msg = ("Unable to find k (data chunks) or m (coding chunks) " - "in erasure profile {}".format(erasure_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - - k = int(erasure_profile['k']) - m = int(erasure_profile['m']) - pgs = self.get_pgs(k + m, self.percent_data) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 - # Create it - if nautilus_or_later: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - '--pg-num-min={}'.format( - min(AUTOSCALER_DEFAULT_PGS, pgs) - ), - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile - ] - else: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile - ] - check_call(cmd) - - def _post_create(self): - super(ErasurePool, self)._post_create() - if self.allow_ec_overwrites: - update_pool(self.service, self.name, - {'allow_ec_overwrites': 'true'}) - - -def enabled_manager_modules(): - """Return a list of enabled manager modules. - - :rtype: List[str] - """ - cmd = ['ceph', 'mgr', 'module', 'ls'] - quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0 - if quincy_or_later: - cmd.append('--format=json') - try: - modules = check_output(cmd).decode('utf-8') - except CalledProcessError as e: - log("Failed to list ceph modules: {}".format(e), WARNING) - return [] - modules = json.loads(modules) - return modules['enabled_modules'] - - -def enable_pg_autoscale(service, pool_name): - """Enable Ceph's PG autoscaler for the specified pool. - - :param service: The Ceph user name to run the command under - :type service: str - :param pool_name: The name of the pool to enable sutoscaling on - :type pool_name: str - :raises: CalledProcessError if the command fails - """ - check_call([ - 'ceph', '--id', service, - 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) - - -def get_mon_map(service): - """Return the current monitor map. - - :param service: The Ceph user name to run the command under - :type service: str - :returns: Dictionary with monitor map data - :rtype: Dict[str,any] - :raises: ValueError if the monmap fails to parse, CalledProcessError if our - ceph command fails. - """ - try: - octopus_or_later = cmp_pkgrevno('ceph-common', '15.0.0') >= 0 - mon_status_cmd = 'quorum_status' if octopus_or_later else 'mon_status' - mon_status = (check_output(['ceph', '--id', service, mon_status_cmd, - '--format=json'])).decode('utf-8') - try: - return json.loads(mon_status) - except ValueError as v: - log("Unable to parse mon_status json: {}. Error: {}" - .format(mon_status, str(v))) - raise - except CalledProcessError as e: - log("mon_status command failed with message: {}" - .format(str(e))) - raise - - -def hash_monitor_names(service): - """Get a sorted list of monitor hashes in ascending order. - - Uses the get_mon_map() function to get information about the monitor - cluster. Hash the name of each monitor. - - :param service: The Ceph user name to run the command under. - :type service: str - :returns: a sorted list of monitor hashes in an ascending order. - :rtype : List[str] - :raises: CalledProcessError, ValueError - """ - try: - hash_list = [] - monitor_list = get_mon_map(service=service) - if monitor_list['monmap']['mons']: - for mon in monitor_list['monmap']['mons']: - hash_list.append( - hashlib.sha224(mon['name'].encode('utf-8')).hexdigest()) - return sorted(hash_list) - else: - return None - except (ValueError, CalledProcessError): - raise - - -def monitor_key_delete(service, key): - """Delete a key and value pair from the monitor cluster. - - Deletes a key value pair on the monitor cluster. - - :param service: The Ceph user name to run the command under - :type service: str - :param key: The key to delete. - :type key: str - :raises: CalledProcessError - """ - try: - check_output( - ['ceph', '--id', service, - 'config-key', 'del', str(key)]) - except CalledProcessError as e: - log("Monitor config-key put failed with message: {}" - .format(e.output)) - raise - - -def monitor_key_set(service, key, value): - """Set a key value pair on the monitor cluster. - - :param service: The Ceph user name to run the command under. - :type service str - :param key: The key to set. - :type key: str - :param value: The value to set. This will be coerced into a string. - :type value: str - :raises: CalledProcessError - """ - try: - check_output( - ['ceph', '--id', service, - 'config-key', 'put', str(key), str(value)]) - except CalledProcessError as e: - log("Monitor config-key put failed with message: {}" - .format(e.output)) - raise - - -def monitor_key_get(service, key): - """Get the value of an existing key in the monitor cluster. - - :param service: The Ceph user name to run the command under - :type service: str - :param key: The key to search for. - :type key: str - :return: Returns the value of that key or None if not found. - :rtype: Optional[str] - """ - try: - output = check_output( - ['ceph', '--id', service, - 'config-key', 'get', str(key)]).decode('UTF-8') - return output - except CalledProcessError as e: - log("Monitor config-key get failed with message: {}" - .format(e.output)) - return None - - -def monitor_key_exists(service, key): - """Search for existence of key in the monitor cluster. - - :param service: The Ceph user name to run the command under. - :type service: str - :param key: The key to search for. - :type key: str - :return: Returns True if the key exists, False if not. - :rtype: bool - :raises: CalledProcessError if an unknown error occurs. - """ - try: - check_call( - ['ceph', '--id', service, - 'config-key', 'exists', str(key)]) - # I can return true here regardless because Ceph returns - # ENOENT if the key wasn't found - return True - except CalledProcessError as e: - if e.returncode == errno.ENOENT: - return False - else: - log("Unknown error from ceph config-get exists: {} {}" - .format(e.returncode, e.output)) - raise - - -def get_erasure_profile(service, name): - """Get an existing erasure code profile if it exists. - - :param service: The Ceph user name to run the command under. - :type service: str - :param name: Name of profile. - :type name: str - :returns: Dictionary with profile data. - :rtype: Optional[Dict[str]] - """ - try: - out = check_output(['ceph', '--id', service, - 'osd', 'erasure-code-profile', 'get', - name, '--format=json']).decode('utf-8') - return json.loads(out) - except (CalledProcessError, OSError, ValueError): - return None - - -def pool_set(service, pool_name, key, value): - """Sets a value for a RADOS pool in ceph. - - :param service: The Ceph user name to run the command under. - :type service: str - :param pool_name: Name of pool to set property on. - :type pool_name: str - :param key: Property key. - :type key: str - :param value: Value, will be coerced into str and shifted to lowercase. - :type value: str - :raises: CalledProcessError - """ - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'set', pool_name, key, str(value).lower()] - check_call(cmd) - - -def snapshot_pool(service, pool_name, snapshot_name): - """Snapshots a RADOS pool in Ceph. - - :param service: The Ceph user name to run the command under. - :type service: str - :param pool_name: Name of pool to snapshot. - :type pool_name: str - :param snapshot_name: Name of snapshot to create. - :type snapshot_name: str - :raises: CalledProcessError - """ - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'mksnap', pool_name, snapshot_name] - check_call(cmd) - - -def remove_pool_snapshot(service, pool_name, snapshot_name): - """Remove a snapshot from a RADOS pool in Ceph. - - :param service: The Ceph user name to run the command under. - :type service: str - :param pool_name: Name of pool to remove snapshot from. - :type pool_name: str - :param snapshot_name: Name of snapshot to remove. - :type snapshot_name: str - :raises: CalledProcessError - """ - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] - check_call(cmd) - - -def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): - """Set byte quota on a RADOS pool in Ceph. - - :param service: The Ceph user name to run the command under - :type service: str - :param pool_name: Name of pool - :type pool_name: str - :param max_bytes: Maximum bytes quota to apply - :type max_bytes: int - :param max_objects: Maximum objects quota to apply - :type max_objects: int - :raises: subprocess.CalledProcessError - """ - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'set-quota', pool_name] - if max_bytes: - cmd = cmd + ['max_bytes', str(max_bytes)] - if max_objects: - cmd = cmd + ['max_objects', str(max_objects)] - check_call(cmd) - - -def remove_pool_quota(service, pool_name): - """Remove byte quota on a RADOS pool in Ceph. - - :param service: The Ceph user name to run the command under. - :type service: str - :param pool_name: Name of pool to remove quota from. - :type pool_name: str - :raises: CalledProcessError - """ - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] - check_call(cmd) - - -def remove_erasure_profile(service, profile_name): - """Remove erasure code profile. - - :param service: The Ceph user name to run the command under - :type service: str - :param profile_name: Name of profile to remove. - :type profile_name: str - :raises: CalledProcessError - """ - cmd = [ - 'ceph', '--id', service, - 'osd', 'erasure-code-profile', 'rm', profile_name] - check_call(cmd) - - -def create_erasure_profile(service, profile_name, - erasure_plugin_name='jerasure', - failure_domain=None, - data_chunks=2, coding_chunks=1, - locality=None, durability_estimator=None, - helper_chunks=None, - scalar_mds=None, - crush_locality=None, - device_class=None, - erasure_plugin_technique=None): - """Create a new erasure code profile if one does not already exist for it. - - Profiles are considered immutable so will not be updated if the named - profile already exists. - - Please refer to [0] for more details. - - 0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ - - :param service: The Ceph user name to run the command under. - :type service: str - :param profile_name: Name of profile. - :type profile_name: str - :param erasure_plugin_name: Erasure code plugin. - :type erasure_plugin_name: str - :param failure_domain: Failure domain, one of: - ('chassis', 'datacenter', 'host', 'osd', 'pdu', - 'pod', 'rack', 'region', 'room', 'root', 'row'). - :type failure_domain: str - :param data_chunks: Number of data chunks. - :type data_chunks: int - :param coding_chunks: Number of coding chunks. - :type coding_chunks: int - :param locality: Locality. - :type locality: int - :param durability_estimator: Durability estimator. - :type durability_estimator: int - :param helper_chunks: int - :type helper_chunks: int - :param device_class: Restrict placement to devices of specific class. - :type device_class: str - :param scalar_mds: one of ['isa', 'jerasure', 'shec'] - :type scalar_mds: str - :param crush_locality: LRC locality faulure domain, one of: - ('chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', - 'rack', 'region', 'room', 'root', 'row') or unset. - :type crush_locaity: str - :param erasure_plugin_technique: Coding technique for EC plugin - :type erasure_plugin_technique: str - :return: None. Can raise CalledProcessError, ValueError or AssertionError - """ - if erasure_profile_exists(service, profile_name): - log('EC profile {} exists, skipping update'.format(profile_name), - level=WARNING) - return - - plugin_techniques = { - 'jerasure': [ - 'reed_sol_van', - 'reed_sol_r6_op', - 'cauchy_orig', - 'cauchy_good', - 'liberation', - 'blaum_roth', - 'liber8tion' - ], - 'lrc': [], - 'isa': [ - 'reed_sol_van', - 'cauchy', - ], - 'shec': [ - 'single', - 'multiple' - ], - 'clay': [], - } - failure_domains = [ - 'chassis', 'datacenter', - 'host', 'osd', - 'pdu', 'pod', - 'rack', 'region', - 'room', 'root', - 'row', - ] - device_classes = [ - 'ssd', - 'hdd', - 'nvme' - ] - - validator(erasure_plugin_name, str, list(plugin_techniques.keys())) - - cmd = [ - 'ceph', '--id', service, - 'osd', 'erasure-code-profile', 'set', profile_name, - 'plugin={}'.format(erasure_plugin_name), - 'k={}'.format(str(data_chunks)), - 'm={}'.format(str(coding_chunks)), - ] - - if erasure_plugin_technique: - validator(erasure_plugin_technique, str, - plugin_techniques[erasure_plugin_name]) - cmd.append('technique={}'.format(erasure_plugin_technique)) - - luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 - - # Set failure domain from options if not provided in args - if not failure_domain and config('customize-failure-domain'): - # Defaults to 'host' so just need to deal with - # setting 'rack' if feature is enabled - failure_domain = 'rack' - - if failure_domain: - validator(failure_domain, str, failure_domains) - # failure_domain changed in luminous - if luminous_or_later: - cmd.append('crush-failure-domain={}'.format(failure_domain)) - else: - cmd.append('ruleset-failure-domain={}'.format(failure_domain)) - - # device class new in luminous - if luminous_or_later and device_class: - validator(device_class, str, device_classes) - cmd.append('crush-device-class={}'.format(device_class)) - else: - log('Skipping device class configuration (ceph < 12.0.0)', - level=DEBUG) - - # Add plugin specific information - if erasure_plugin_name == 'lrc': - # LRC mandatory configuration - if locality: - cmd.append('l={}'.format(str(locality))) - else: - raise ValueError("locality must be provided for lrc plugin") - # LRC optional configuration - if crush_locality: - validator(crush_locality, str, failure_domains) - cmd.append('crush-locality={}'.format(crush_locality)) - - if erasure_plugin_name == 'shec': - # SHEC optional configuration - if durability_estimator: - cmd.append('c={}'.format((durability_estimator))) - - if erasure_plugin_name == 'clay': - # CLAY optional configuration - if helper_chunks: - cmd.append('d={}'.format(str(helper_chunks))) - if scalar_mds: - cmd.append('scalar-mds={}'.format(scalar_mds)) - - check_call(cmd) - - -def rename_pool(service, old_name, new_name): - """Rename a Ceph pool from old_name to new_name. - - :param service: The Ceph user name to run the command under. - :type service: str - :param old_name: Name of pool subject to rename. - :type old_name: str - :param new_name: Name to rename pool to. - :type new_name: str - """ - validator(value=old_name, valid_type=str) - validator(value=new_name, valid_type=str) - - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'rename', old_name, new_name] - check_call(cmd) - - -def erasure_profile_exists(service, name): - """Check to see if an Erasure code profile already exists. - - :param service: The Ceph user name to run the command under - :type service: str - :param name: Name of profile to look for. - :type name: str - :returns: True if it exists, False otherwise. - :rtype: bool - """ - validator(value=name, valid_type=str) - try: - check_call(['ceph', '--id', service, - 'osd', 'erasure-code-profile', 'get', - name]) - return True - except CalledProcessError: - return False - - -def get_cache_mode(service, pool_name): - """Find the current caching mode of the pool_name given. - - :param service: The Ceph user name to run the command under - :type service: str - :param pool_name: Name of pool. - :type pool_name: str - :returns: Current cache mode. - :rtype: Optional[int] - """ - validator(value=service, valid_type=str) - validator(value=pool_name, valid_type=str) - out = check_output(['ceph', '--id', service, - 'osd', 'dump', '--format=json']).decode('utf-8') - try: - osd_json = json.loads(out) - for pool in osd_json['pools']: - if pool['pool_name'] == pool_name: - return pool['cache_mode'] - return None - except ValueError: - raise - - -def pool_exists(service, name): - """Check to see if a RADOS pool already exists.""" - try: - out = check_output( - ['rados', '--id', service, 'lspools']).decode('utf-8') - except CalledProcessError: - return False - - return name in out.split() - - -def get_osds(service, device_class=None): - """Return a list of all Ceph Object Storage Daemons currently in the - cluster (optionally filtered by storage device class). - - :param device_class: Class of storage device for OSD's - :type device_class: str - """ - luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 - if luminous_or_later and device_class: - out = check_output(['ceph', '--id', service, - 'osd', 'crush', 'class', - 'ls-osd', device_class, - '--format=json']).decode('utf-8') - else: - out = check_output(['ceph', '--id', service, - 'osd', 'ls', - '--format=json']).decode('utf-8') - return json.loads(out) - - -def install(): - """Basic Ceph client installation.""" - ceph_dir = "/etc/ceph" - if not os.path.exists(ceph_dir): - os.mkdir(ceph_dir) - - apt_install('ceph-common', fatal=True) - - -def rbd_exists(service, pool, rbd_img): - """Check to see if a RADOS block device exists.""" - try: - out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]).decode('utf-8') - except CalledProcessError: - return False - - return rbd_img in out - - -def create_rbd_image(service, pool, image, sizemb): - """Create a new RADOS block device.""" - cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, - '--pool', pool] - check_call(cmd) - - -def update_pool(client, pool, settings): - """Update pool properties. - - :param client: Client/User-name to authenticate with. - :type client: str - :param pool: Name of pool to operate on - :type pool: str - :param settings: Dictionary with key/value pairs to set. - :type settings: Dict[str, str] - :raises: CalledProcessError - """ - cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] - for k, v in settings.items(): - check_call(cmd + [k, v]) - - -def set_app_name_for_pool(client, pool, name): - """Calls `osd pool application enable` for the specified pool name - - :param client: Name of the ceph client to use - :type client: str - :param pool: Pool to set app name for - :type pool: str - :param name: app name for the specified pool - :type name: str - - :raises: CalledProcessError if ceph call fails - """ - if cmp_pkgrevno('ceph-common', '12.0.0') >= 0: - cmd = ['ceph', '--id', client, 'osd', 'pool', - 'application', 'enable', pool, name] - check_call(cmd) - - -def create_pool(service, name, replicas=3, pg_num=None): - """Create a new RADOS pool.""" - if pool_exists(service, name): - log("Ceph pool {} already exists, skipping creation".format(name), - level=WARNING) - return - - if not pg_num: - # Calculate the number of placement groups based - # on upstream recommended best practices. - osds = get_osds(service) - if osds: - pg_num = (len(osds) * 100 // replicas) - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - pg_num = 200 - - cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] - check_call(cmd) - - update_pool(service, name, settings={'size': str(replicas)}) - - -def delete_pool(service, name): - """Delete a RADOS pool from ceph.""" - cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, - '--yes-i-really-really-mean-it'] - check_call(cmd) - - -def _keyfile_path(service): - return KEYFILE.format(service) - - -def _keyring_path(service): - return KEYRING.format(service) - - -def add_key(service, key): - """Add a key to a keyring. - - Creates the keyring if it doesn't already exist. - - Logs and returns if the key is already in the keyring. - """ - keyring = _keyring_path(service) - if os.path.exists(keyring): - with open(keyring, 'r') as ring: - if key in ring.read(): - log('Ceph keyring exists at %s and has not changed.' % keyring, - level=DEBUG) - return - log('Updating existing keyring %s.' % keyring, level=DEBUG) - - cmd = ['ceph-authtool', keyring, '--create-keyring', - '--name=client.{}'.format(service), '--add-key={}'.format(key)] - check_call(cmd) - log('Created new ceph keyring at %s.' % keyring, level=DEBUG) - - -def create_keyring(service, key): - """Deprecated. Please use the more accurately named 'add_key'""" - return add_key(service, key) - - -def delete_keyring(service): - """Delete an existing Ceph keyring.""" - keyring = _keyring_path(service) - if not os.path.exists(keyring): - log('Keyring does not exist at %s' % keyring, level=WARNING) - return - - os.remove(keyring) - log('Deleted ring at %s.' % keyring, level=INFO) - - -def create_key_file(service, key): - """Create a file containing key.""" - keyfile = _keyfile_path(service) - if os.path.exists(keyfile): - log('Keyfile exists at %s.' % keyfile, level=WARNING) - return - - with open(keyfile, 'w') as fd: - fd.write(key) - - log('Created new keyfile at %s.' % keyfile, level=INFO) - - -def get_ceph_nodes(relation='ceph'): - """Query named relation to determine current nodes.""" - hosts = [] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - hosts.append(relation_get('private-address', unit=unit, rid=r_id)) - - return hosts - - -def configure(service, key, auth, use_syslog): - """Perform basic configuration of Ceph.""" - add_key(service, key) - create_key_file(service, key) - hosts = get_ceph_nodes() - with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: - ceph_conf.write(CEPH_CONF.format(auth=auth, - keyring=_keyring_path(service), - mon_hosts=",".join(map(str, hosts)), - use_syslog=use_syslog)) - modprobe('rbd') - - -def image_mapped(name): - """Determine whether a RADOS block device is mapped locally.""" - try: - out = check_output(['rbd', 'showmapped']).decode('utf-8') - except CalledProcessError: - return False - - return name in out - - -def map_block_storage(service, pool, image): - """Map a RADOS block device for local use.""" - cmd = [ - 'rbd', - 'map', - '{}/{}'.format(pool, image), - '--user', - service, - '--secret', - _keyfile_path(service), - ] - check_call(cmd) - - -def filesystem_mounted(fs): - """Determine whether a filesystem is already mounted.""" - return fs in [f for f, m in mounts()] - - -def make_filesystem(blk_device, fstype='ext4', timeout=10): - """Make a new filesystem on the specified block device.""" - count = 0 - e_noent = errno.ENOENT - while not os.path.exists(blk_device): - if count >= timeout: - log('Gave up waiting on block device %s' % blk_device, - level=ERROR) - raise IOError(e_noent, os.strerror(e_noent), blk_device) - - log('Waiting for block device %s to appear' % blk_device, - level=DEBUG) - count += 1 - time.sleep(1) - else: - log('Formatting block device %s as filesystem %s.' % - (blk_device, fstype), level=INFO) - check_call(['mkfs', '-t', fstype, blk_device]) - - -def place_data_on_block_device(blk_device, data_src_dst): - """Migrate data in data_src_dst to blk_device and then remount.""" - # mount block device into /mnt - mount(blk_device, '/mnt') - # copy data to /mnt - copy_files(data_src_dst, '/mnt') - # umount block device - umount('/mnt') - # Grab user/group ID's from original source - _dir = os.stat(data_src_dst) - uid = _dir.st_uid - gid = _dir.st_gid - # re-mount where the data should originally be - # TODO: persist is currently a NO-OP in core.host - mount(blk_device, data_src_dst, persist=True) - # ensure original ownership of new mount. - os.chown(data_src_dst, uid, gid) - - -def copy_files(src, dst, symlinks=False, ignore=None): - """Copy files from src to dst.""" - for item in os.listdir(src): - s = os.path.join(src, item) - d = os.path.join(dst, item) - if os.path.isdir(s): - shutil.copytree(s, d, symlinks, ignore) - else: - shutil.copy2(s, d) - - -def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, - blk_device, fstype, system_services=[], - replicas=3): - """NOTE: This function must only be called from a single service unit for - the same rbd_img otherwise data loss will occur. - - Ensures given pool and RBD image exists, is mapped to a block device, - and the device is formatted and mounted at the given mount_point. - - If formatting a device for the first time, data existing at mount_point - will be migrated to the RBD device before being re-mounted. - - All services listed in system_services will be stopped prior to data - migration and restarted when complete. - """ - # Ensure pool, RBD image, RBD mappings are in place. - if not pool_exists(service, pool): - log('Creating new pool {}.'.format(pool), level=INFO) - create_pool(service, pool, replicas=replicas) - - if not rbd_exists(service, pool, rbd_img): - log('Creating RBD image ({}).'.format(rbd_img), level=INFO) - create_rbd_image(service, pool, rbd_img, sizemb) - - if not image_mapped(rbd_img): - log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), - level=INFO) - map_block_storage(service, pool, rbd_img) - - # make file system - # TODO: What happens if for whatever reason this is run again and - # the data is already in the rbd device and/or is mounted?? - # When it is mounted already, it will fail to make the fs - # XXX: This is really sketchy! Need to at least add an fstab entry - # otherwise this hook will blow away existing data if its executed - # after a reboot. - if not filesystem_mounted(mount_point): - make_filesystem(blk_device, fstype) - - for svc in system_services: - if service_running(svc): - log('Stopping services {} prior to migrating data.' - .format(svc), level=DEBUG) - service_stop(svc) - - place_data_on_block_device(blk_device, mount_point) - - for svc in system_services: - log('Starting service {} after migrating data.' - .format(svc), level=DEBUG) - service_start(svc) - - -def ensure_ceph_keyring(service, user=None, group=None, - relation='ceph', key=None): - """Ensures a ceph keyring is created for a named service and optionally - ensures user and group ownership. - - @returns boolean: Flag to indicate whether a key was successfully written - to disk based on either relation data or a supplied key - """ - if not key: - for rid in relation_ids(relation): - for unit in related_units(rid): - key = relation_get('key', rid=rid, unit=unit) - if key: - break - - if not key: - return False - - add_key(service=service, key=key) - keyring = _keyring_path(service) - if user and group: - check_call(['chown', '%s.%s' % (user, group), keyring]) - - return True - - -class CephBrokerRq(object): - """Ceph broker request. - - Multiple operations can be added to a request and sent to the Ceph broker - to be executed. - - Request is json-encoded for sending over the wire. - - The API is versioned and defaults to version 1. - """ - - def __init__(self, api_version=1, request_id=None, raw_request_data=None): - """Initialize CephBrokerRq object. - - Builds a new empty request or rebuilds a request from on-wire JSON - data. - - :param api_version: API version for request (default: 1). - :type api_version: Optional[int] - :param request_id: Unique identifier for request. - (default: string representation of generated UUID) - :type request_id: Optional[str] - :param raw_request_data: JSON-encoded string to build request from. - :type raw_request_data: Optional[str] - :raises: KeyError - """ - if raw_request_data: - request_data = json.loads(raw_request_data) - self.api_version = request_data['api-version'] - self.request_id = request_data['request-id'] - self.set_ops(request_data['ops']) - else: - self.api_version = api_version - if request_id: - self.request_id = request_id - else: - self.request_id = str(uuid.uuid1()) - self.ops = [] - - def add_op(self, op): - """Add an op if it is not already in the list. - - :param op: Operation to add. - :type op: dict - """ - if op not in self.ops: - self.ops.append(op) - - def add_op_request_access_to_group(self, name, namespace=None, - permission=None, key_name=None, - object_prefix_permissions=None): - """ - Adds the requested permissions to the current service's Ceph key, - allowing the key to access only the specified pools or - object prefixes. object_prefix_permissions should be a dictionary - keyed on the permission with the corresponding value being a list - of prefixes to apply that permission to. - { - 'rwx': ['prefix1', 'prefix2'], - 'class-read': ['prefix3']} - """ - self.add_op({ - 'op': 'add-permissions-to-key', 'group': name, - 'namespace': namespace, - 'name': key_name or service_name(), - 'group-permission': permission, - 'object-prefix-permissions': object_prefix_permissions}) - - def add_op_create_pool(self, name, replica_count=3, pg_num=None, - weight=None, group=None, namespace=None, - app_name=None, max_bytes=None, max_objects=None): - """DEPRECATED: Use ``add_op_create_replicated_pool()`` or - ``add_op_create_erasure_pool()`` instead. - """ - return self.add_op_create_replicated_pool( - name, replica_count=replica_count, pg_num=pg_num, weight=weight, - group=group, namespace=namespace, app_name=app_name, - max_bytes=max_bytes, max_objects=max_objects) - - # Use function parameters and docstring to define types in a compatible - # manner. - # - # NOTE: Our caller should always use a kwarg Dict when calling us so - # no need to maintain fixed order/position for parameters. Please keep them - # sorted by name when adding new ones. - def _partial_build_common_op_create(self, - app_name=None, - compression_algorithm=None, - compression_mode=None, - compression_required_ratio=None, - compression_min_blob_size=None, - compression_min_blob_size_hdd=None, - compression_min_blob_size_ssd=None, - compression_max_blob_size=None, - compression_max_blob_size_hdd=None, - compression_max_blob_size_ssd=None, - group=None, - max_bytes=None, - max_objects=None, - namespace=None, - rbd_mirroring_mode='pool', - weight=None): - """Build common part of a create pool operation. - - :param app_name: Tag pool with application name. Note that there is - certain protocols emerging upstream with regard to - meaningful application names to use. - Examples are 'rbd' and 'rgw'. - :type app_name: Optional[str] - :param compression_algorithm: Compressor to use, one of: - ('lz4', 'snappy', 'zlib', 'zstd') - :type compression_algorithm: Optional[str] - :param compression_mode: When to compress data, one of: - ('none', 'passive', 'aggressive', 'force') - :type compression_mode: Optional[str] - :param compression_required_ratio: Minimum compression ratio for data - chunk, if the requested ratio is not - achieved the compressed version will - be thrown away and the original - stored. - :type compression_required_ratio: Optional[float] - :param compression_min_blob_size: Chunks smaller than this are never - compressed (unit: bytes). - :type compression_min_blob_size: Optional[int] - :param compression_min_blob_size_hdd: Chunks smaller than this are not - compressed when destined to - rotational media (unit: bytes). - :type compression_min_blob_size_hdd: Optional[int] - :param compression_min_blob_size_ssd: Chunks smaller than this are not - compressed when destined to flash - media (unit: bytes). - :type compression_min_blob_size_ssd: Optional[int] - :param compression_max_blob_size: Chunks larger than this are broken - into N * compression_max_blob_size - chunks before being compressed - (unit: bytes). - :type compression_max_blob_size: Optional[int] - :param compression_max_blob_size_hdd: Chunks larger than this are - broken into - N * compression_max_blob_size_hdd - chunks before being compressed - when destined for rotational - media (unit: bytes) - :type compression_max_blob_size_hdd: Optional[int] - :param compression_max_blob_size_ssd: Chunks larger than this are - broken into - N * compression_max_blob_size_ssd - chunks before being compressed - when destined for flash media - (unit: bytes). - :type compression_max_blob_size_ssd: Optional[int] - :param group: Group to add pool to - :type group: Optional[str] - :param max_bytes: Maximum bytes quota to apply - :type max_bytes: Optional[int] - :param max_objects: Maximum objects quota to apply - :type max_objects: Optional[int] - :param namespace: Group namespace - :type namespace: Optional[str] - :param rbd_mirroring_mode: Pool mirroring mode used when Ceph RBD - mirroring is enabled. - :type rbd_mirroring_mode: Optional[str] - :param weight: The percentage of data that is expected to be contained - in the pool from the total available space on the OSDs. - Used to calculate number of Placement Groups to create - for pool. - :type weight: Optional[float] - :returns: Dictionary with kwarg name as key. - :rtype: Dict[str,any] - :raises: AssertionError - """ - return { - 'app-name': app_name, - 'compression-algorithm': compression_algorithm, - 'compression-mode': compression_mode, - 'compression-required-ratio': compression_required_ratio, - 'compression-min-blob-size': compression_min_blob_size, - 'compression-min-blob-size-hdd': compression_min_blob_size_hdd, - 'compression-min-blob-size-ssd': compression_min_blob_size_ssd, - 'compression-max-blob-size': compression_max_blob_size, - 'compression-max-blob-size-hdd': compression_max_blob_size_hdd, - 'compression-max-blob-size-ssd': compression_max_blob_size_ssd, - 'group': group, - 'max-bytes': max_bytes, - 'max-objects': max_objects, - 'group-namespace': namespace, - 'rbd-mirroring-mode': rbd_mirroring_mode, - 'weight': weight, - } - - def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, - crush_profile=None, **kwargs): - """Adds an operation to create a replicated pool. - - Refer to docstring for ``_partial_build_common_op_create`` for - documentation of keyword arguments. - - :param name: Name of pool to create - :type name: str - :param replica_count: Number of copies Ceph should keep of your data. - :type replica_count: int - :param pg_num: Request specific number of Placement Groups to create - for pool. - :type pg_num: int - :raises: AssertionError if provided data is of invalid type/range - :param crush_profile: Name of crush profile to use. If not set the - ceph-mon unit handling the broker request will - set its default value. - :type crush_profile: Optional[str] - """ - if pg_num and kwargs.get('weight'): - raise ValueError('pg_num and weight are mutually exclusive') - - op = { - 'op': 'create-pool', - 'name': name, - 'replicas': replica_count, - 'pg_num': pg_num, - 'crush-profile': crush_profile - } - op.update(self._partial_build_common_op_create(**kwargs)) - - # Initialize Pool-object to validate type and range of ops. - pool = ReplicatedPool('dummy-service', op=op) - pool.validate() - - self.add_op(op) - - def add_op_create_erasure_pool(self, name, erasure_profile=None, - allow_ec_overwrites=False, **kwargs): - """Adds an operation to create a erasure coded pool. - - Refer to docstring for ``_partial_build_common_op_create`` for - documentation of keyword arguments. - - :param name: Name of pool to create - :type name: str - :param erasure_profile: Name of erasure code profile to use. If not - set the ceph-mon unit handling the broker - request will set its default value. - :type erasure_profile: str - :param allow_ec_overwrites: allow EC pools to be overridden - :type allow_ec_overwrites: bool - :raises: AssertionError if provided data is of invalid type/range - """ - op = { - 'op': 'create-pool', - 'name': name, - 'pool-type': 'erasure', - 'erasure-profile': erasure_profile, - 'allow-ec-overwrites': allow_ec_overwrites, - } - op.update(self._partial_build_common_op_create(**kwargs)) - - # Initialize Pool-object to validate type and range of ops. - pool = ErasurePool('dummy-service', op) - pool.validate() - - self.add_op(op) - - def add_op_create_erasure_profile(self, name, - erasure_type='jerasure', - erasure_technique=None, - k=None, m=None, - failure_domain=None, - lrc_locality=None, - shec_durability_estimator=None, - clay_helper_chunks=None, - device_class=None, - clay_scalar_mds=None, - lrc_crush_locality=None): - """Adds an operation to create a erasure coding profile. - - :param name: Name of profile to create - :type name: str - :param erasure_type: Which of the erasure coding plugins should be used - :type erasure_type: string - :param erasure_technique: EC plugin technique to use - :type erasure_technique: string - :param k: Number of data chunks - :type k: int - :param m: Number of coding chunks - :type m: int - :param lrc_locality: Group the coding and data chunks into sets of size locality - (lrc plugin) - :type lrc_locality: int - :param durability_estimator: The number of parity chunks each of which includes - a data chunk in its calculation range (shec plugin) - :type durability_estimator: int - :param helper_chunks: The number of helper chunks to use for recovery operations - (clay plugin) - :type: helper_chunks: int - :param failure_domain: Type of failure domain from Ceph bucket types - to be used - :type failure_domain: string - :param device_class: Device class to use for profile (ssd, hdd) - :type device_class: string - :param clay_scalar_mds: Plugin to use for CLAY layered construction - (jerasure|isa|shec) - :type clay_scaler_mds: string - :param lrc_crush_locality: Type of crush bucket in which set of chunks - defined by lrc_locality will be stored. - :type lrc_crush_locality: string - """ - self.add_op({'op': 'create-erasure-profile', - 'name': name, - 'k': k, - 'm': m, - 'l': lrc_locality, - 'c': shec_durability_estimator, - 'd': clay_helper_chunks, - 'erasure-type': erasure_type, - 'erasure-technique': erasure_technique, - 'failure-domain': failure_domain, - 'device-class': device_class, - 'scalar-mds': clay_scalar_mds, - 'crush-locality': lrc_crush_locality}) - - def set_ops(self, ops): - """Set request ops to provided value. - - Useful for injecting ops that come from a previous request - to allow comparisons to ensure validity. - """ - self.ops = ops - - @property - def request(self): - return json.dumps({'api-version': self.api_version, 'ops': self.ops, - 'request-id': self.request_id}) - - def _ops_equal(self, other): - keys_to_compare = [ - 'replicas', 'name', 'op', 'pg_num', 'group-permission', - 'object-prefix-permissions', - ] - keys_to_compare += list(self._partial_build_common_op_create().keys()) - if len(self.ops) == len(other.ops): - for req_no in range(0, len(self.ops)): - for key in keys_to_compare: - if self.ops[req_no].get(key) != other.ops[req_no].get(key): - return False - else: - return False - return True - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - if self.api_version == other.api_version and \ - self._ops_equal(other): - return True - else: - return False - - def __ne__(self, other): - return not self.__eq__(other) - - -class CephBrokerRsp(object): - """Ceph broker response. - - Response is json-decoded and contents provided as methods/properties. - - The API is versioned and defaults to version 1. - """ - - def __init__(self, encoded_rsp): - self.api_version = None - self.rsp = json.loads(encoded_rsp) - - @property - def request_id(self): - return self.rsp.get('request-id') - - @property - def exit_code(self): - return self.rsp.get('exit-code') - - @property - def exit_msg(self): - return self.rsp.get('stderr') - - -# Ceph Broker Conversation: -# If a charm needs an action to be taken by ceph it can create a CephBrokerRq -# and send that request to ceph via the ceph relation. The CephBrokerRq has a -# unique id so that the client can identity which CephBrokerRsp is associated -# with the request. Ceph will also respond to each client unit individually -# creating a response key per client unit eg glance/0 will get a CephBrokerRsp -# via key broker-rsp-glance-0 -# -# To use this the charm can just do something like: -# -# from charmhelpers.contrib.storage.linux.ceph import ( -# send_request_if_needed, -# is_request_complete, -# CephBrokerRq, -# ) -# -# @hooks.hook('ceph-relation-changed') -# def ceph_changed(): -# rq = CephBrokerRq() -# rq.add_op_create_pool(name='poolname', replica_count=3) -# -# if is_request_complete(rq): -# -# else: -# send_request_if_needed(get_ceph_request()) -# -# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example -# of glance having sent a request to ceph which ceph has successfully processed -# 'ceph:8': { -# 'ceph/0': { -# 'auth': 'cephx', -# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', -# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', -# 'ceph-public-address': '10.5.44.103', -# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', -# 'private-address': '10.5.44.103', -# }, -# 'glance/0': { -# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' -# '"ops": [{"replicas": 3, "name": "glance", ' -# '"op": "create-pool"}]}'), -# 'private-address': '10.5.44.109', -# }, -# } - -def get_previous_request(rid): - """Return the last ceph broker request sent on a given relation - - :param rid: Relation id to query for request - :type rid: str - :returns: CephBrokerRq object or None if relation data not found. - :rtype: Optional[CephBrokerRq] - """ - broker_req = relation_get(attribute='broker_req', rid=rid, - unit=local_unit()) - if broker_req: - return CephBrokerRq(raw_request_data=broker_req) - - -def get_request_states(request, relation='ceph'): - """Return a dict of requests per relation id with their corresponding - completion state. - - This allows a charm, which has a request for ceph, to see whether there is - an equivalent request already being processed and if so what state that - request is in. - - @param request: A CephBrokerRq object - """ - complete = [] - requests = {} - for rid in relation_ids(relation): - complete = False - previous_request = get_previous_request(rid) - if request == previous_request: - sent = True - complete = is_request_complete_for_rid(previous_request, rid) - else: - sent = False - complete = False - - requests[rid] = { - 'sent': sent, - 'complete': complete, - } - - return requests - - -def is_request_sent(request, relation='ceph'): - """Check to see if a functionally equivalent request has already been sent - - Returns True if a similair request has been sent - - @param request: A CephBrokerRq object - """ - states = get_request_states(request, relation=relation) - for rid in states.keys(): - if not states[rid]['sent']: - return False - - return True - - -def is_request_complete(request, relation='ceph'): - """Check to see if a functionally equivalent request has already been - completed - - Returns True if a similair request has been completed - - @param request: A CephBrokerRq object - """ - states = get_request_states(request, relation=relation) - for rid in states.keys(): - if not states[rid]['complete']: - return False - - return True - - -def is_request_complete_for_rid(request, rid): - """Check if a given request has been completed on the given relation - - @param request: A CephBrokerRq object - @param rid: Relation ID - """ - broker_key = get_broker_rsp_key() - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - if rdata.get(broker_key): - rsp = CephBrokerRsp(rdata.get(broker_key)) - if rsp.request_id == request.request_id: - if not rsp.exit_code: - return True - else: - # The remote unit sent no reply targeted at this unit so either the - # remote ceph cluster does not support unit targeted replies or it - # has not processed our request yet. - if rdata.get('broker_rsp'): - request_data = json.loads(rdata['broker_rsp']) - if request_data.get('request-id'): - log('Ignoring legacy broker_rsp without unit key as remote ' - 'service supports unit specific replies', level=DEBUG) - else: - log('Using legacy broker_rsp as remote service does not ' - 'supports unit specific replies', level=DEBUG) - rsp = CephBrokerRsp(rdata['broker_rsp']) - if not rsp.exit_code: - return True - - return False - - -def get_broker_rsp_key(): - """Return broker response key for this unit - - This is the key that ceph is going to use to pass request status - information back to this unit - """ - return 'broker-rsp-' + local_unit().replace('/', '-') - - -def send_request_if_needed(request, relation='ceph'): - """Send broker request if an equivalent request has not already been sent - - @param request: A CephBrokerRq object - """ - if is_request_sent(request, relation=relation): - log('Request already sent but not complete, not sending new request', - level=DEBUG) - else: - for rid in relation_ids(relation): - log('Sending request {}'.format(request.request_id), level=DEBUG) - relation_set(relation_id=rid, broker_req=request.request) - relation_set(relation_id=rid, relation_settings={'unit-name': local_unit()}) - - -def has_broker_rsp(rid=None, unit=None): - """Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data. - - :param rid: The relation to check (default of None means current relation) - :type rid: Union[str, None] - :param unit: The remote unit to check (default of None means current unit) - :type unit: Union[str, None] - :returns: True if broker key exists and is set to something 'truthy' - :rtype: bool - """ - rdata = relation_get(rid=rid, unit=unit) or {} - broker_rsp = rdata.get(get_broker_rsp_key()) - return True if broker_rsp else False - - -def is_broker_action_done(action, rid=None, unit=None): - """Check whether broker action has completed yet. - - @param action: name of action to be performed - @returns True if action complete otherwise False - """ - rdata = relation_get(rid=rid, unit=unit) or {} - broker_rsp = rdata.get(get_broker_rsp_key()) - if not broker_rsp: - return False - - rsp = CephBrokerRsp(broker_rsp) - unit_name = local_unit().partition('/')[2] - key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) - kvstore = kv() - val = kvstore.get(key=key) - if val and val == rsp.request_id: - return True - - return False - - -def mark_broker_action_done(action, rid=None, unit=None): - """Mark action as having been completed. - - @param action: name of action to be performed - @returns None - """ - rdata = relation_get(rid=rid, unit=unit) or {} - broker_rsp = rdata.get(get_broker_rsp_key()) - if not broker_rsp: - return - - rsp = CephBrokerRsp(broker_rsp) - unit_name = local_unit().partition('/')[2] - key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) - kvstore = kv() - kvstore.set(key=key, value=rsp.request_id) - kvstore.flush() - - -class CephConfContext(object): - """Ceph config (ceph.conf) context. - - Supports user-provided Ceph configuration settings. Use can provide a - dictionary as the value for the config-flags charm option containing - Ceph configuration settings keyede by their section in ceph.conf. - """ - def __init__(self, permitted_sections=None): - self.permitted_sections = permitted_sections or [] - - def __call__(self): - conf = config('config-flags') - if not conf: - return {} - - conf = config_flags_parser(conf) - if not isinstance(conf, dict): - log("Provided config-flags is not a dictionary - ignoring", - level=WARNING) - return {} - - permitted = self.permitted_sections - if permitted: - diff = set(conf.keys()).difference(set(permitted)) - if diff: - log("Config-flags contains invalid keys '%s' - they will be " - "ignored" % (', '.join(diff)), level=WARNING) - - ceph_conf = {} - for key in conf: - if permitted and key not in permitted: - log("Ignoring key '%s'" % key, level=WARNING) - continue - - ceph_conf[key] = conf[key] - return ceph_conf - - -class CephOSDConfContext(CephConfContext): - """Ceph config (ceph.conf) context. - - Consolidates settings from config-flags via CephConfContext with - settings provided by the mons. The config-flag values are preserved in - conf['osd'], settings from the mons which do not clash with config-flag - settings are in conf['osd_from_client'] and finally settings which do - clash are in conf['osd_from_client_conflict']. Rather than silently drop - the conflicting settings they are provided in the context so they can be - rendered commented out to give some visibility to the admin. - """ - - def __init__(self, permitted_sections=None): - super(CephOSDConfContext, self).__init__( - permitted_sections=permitted_sections) - try: - self.settings_from_mons = get_osd_settings('mon') - except OSDSettingConflict: - log( - "OSD settings from mons are inconsistent, ignoring them", - level=WARNING) - self.settings_from_mons = {} - - def filter_osd_from_mon_settings(self): - """Filter settings from client relation against config-flags. - - :returns: A tuple ( - ,config-flag values, - ,client settings which do not conflict with config-flag values, - ,client settings which confilct with config-flag values) - :rtype: (OrderedDict, OrderedDict, OrderedDict) - """ - ceph_conf = super(CephOSDConfContext, self).__call__() - conflicting_entries = {} - clear_entries = {} - for key, value in self.settings_from_mons.items(): - if key in ceph_conf.get('osd', {}): - if ceph_conf['osd'][key] != value: - conflicting_entries[key] = value - else: - clear_entries[key] = value - clear_entries = _order_dict_by_key(clear_entries) - conflicting_entries = _order_dict_by_key(conflicting_entries) - return ceph_conf, clear_entries, conflicting_entries - - def __call__(self): - """Construct OSD config context. - - Standard context with two additional special keys. - osd_from_client_conflict: client settings which confilct with - config-flag values - osd_from_client: settings which do not conflict with config-flag - values - - :returns: OSD config context dict. - :rtype: dict - """ - conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings() - conf['osd_from_client_conflict'] = osd_conflict - conf['osd_from_client'] = osd_clear - return conf diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py deleted file mode 100644 index 04daea29..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import re -from subprocess import ( - check_call, - check_output, -) - - -################################################## -# loopback device helpers. -################################################## -def loopback_devices(): - ''' - Parse through 'losetup -a' output to determine currently mapped - loopback devices. Output is expected to look like: - - /dev/loop0: [0807]:961814 (/tmp/my.img) - - or: - - /dev/loop0: [0807]:961814 (/tmp/my.img (deleted)) - - :returns: dict: a dict mapping {loopback_dev: backing_file} - ''' - loopbacks = {} - cmd = ['losetup', '-a'] - output = check_output(cmd).decode('utf-8') - devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] - for dev, _, f in devs: - loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] - return loopbacks - - -def create_loopback(file_path): - ''' - Create a loopback device for a given backing file. - - :returns: str: Full path to new loopback device (eg, /dev/loop0) - ''' - file_path = os.path.abspath(file_path) - check_call(['losetup', '--find', file_path]) - for d, f in loopback_devices().items(): - if f == file_path: - return d - - -def ensure_loopback_device(path, size): - ''' - Ensure a loopback device exists for a given backing file path and size. - If it a loopback device is not mapped to file, a new one will be created. - - TODO: Confirm size of found loopback device. - - :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) - ''' - for d, f in loopback_devices().items(): - if f == path: - return d - - if not os.path.exists(path): - cmd = ['truncate', '--size', size, path] - check_call(cmd) - - return create_loopback(path) - - -def is_mapped_loopback_device(device): - """ - Checks if a given device name is an existing/mapped loopback device. - :param device: str: Full path to the device (eg, /dev/loop1). - :returns: str: Path to the backing file if is a loopback device - empty string otherwise - """ - return loopback_devices().get(device, "") diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py deleted file mode 100644 index d0a57211..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -from subprocess import ( - CalledProcessError, - check_call, - check_output, - Popen, - PIPE, -) - - -################################################## -# LVM helpers. -################################################## -def deactivate_lvm_volume_group(block_device): - ''' - Deactivate any volume group associated with an LVM physical volume. - - :param block_device: str: Full path to LVM physical volume - ''' - vg = list_lvm_volume_group(block_device) - if vg: - cmd = ['vgchange', '-an', vg] - check_call(cmd) - - -def is_lvm_physical_volume(block_device): - ''' - Determine whether a block device is initialized as an LVM PV. - - :param block_device: str: Full path of block device to inspect. - - :returns: boolean: True if block device is a PV, False if not. - ''' - try: - check_output(['pvdisplay', block_device]) - return True - except CalledProcessError: - return False - - -def remove_lvm_physical_volume(block_device): - ''' - Remove LVM PV signatures from a given block device. - - :param block_device: str: Full path of block device to scrub. - ''' - p = Popen(['pvremove', '-ff', block_device], - stdin=PIPE) - p.communicate(input='y\n') - - -def list_lvm_volume_group(block_device): - ''' - List LVM volume group associated with a given block device. - - Assumes block device is a valid LVM PV. - - :param block_device: str: Full path of block device to inspect. - - :returns: str: Name of volume group associated with block device or None - ''' - vg = None - pvd = check_output(['pvdisplay', block_device]).splitlines() - for lvm in pvd: - lvm = lvm.decode('UTF-8') - if lvm.strip().startswith('VG Name'): - vg = ' '.join(lvm.strip().split()[2:]) - return vg - - -def create_lvm_physical_volume(block_device): - ''' - Initialize a block device as an LVM physical volume. - - :param block_device: str: Full path of block device to initialize. - - ''' - check_call(['pvcreate', block_device]) - - -def create_lvm_volume_group(volume_group, block_device): - ''' - Create an LVM volume group backed by a given block device. - - Assumes block device has already been initialized as an LVM PV. - - :param volume_group: str: Name of volume group to create. - :block_device: str: Full path of PV-initialized block device. - ''' - check_call(['vgcreate', volume_group, block_device]) - - -def list_logical_volumes(select_criteria=None, path_mode=False): - ''' - List logical volumes - - :param select_criteria: str: Limit list to those volumes matching this - criteria (see 'lvs -S help' for more details) - :param path_mode: bool: return logical volume name in 'vg/lv' format, this - format is required for some commands like lvextend - :returns: [str]: List of logical volumes - ''' - lv_diplay_attr = 'lv_name' - if path_mode: - # Parsing output logic relies on the column order - lv_diplay_attr = 'vg_name,' + lv_diplay_attr - cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings'] - if select_criteria: - cmd.extend(['--select', select_criteria]) - lvs = [] - for lv in check_output(cmd).decode('UTF-8').splitlines(): - if not lv: - continue - if path_mode: - lvs.append('/'.join(lv.strip().split())) - else: - lvs.append(lv.strip()) - return lvs - - -list_thin_logical_volume_pools = functools.partial( - list_logical_volumes, - select_criteria='lv_attr =~ ^t') - -list_thin_logical_volumes = functools.partial( - list_logical_volumes, - select_criteria='lv_attr =~ ^V') - - -def extend_logical_volume_by_device(lv_name, block_device): - ''' - Extends the size of logical volume lv_name by the amount of free space on - physical volume block_device. - - :param lv_name: str: name of logical volume to be extended (vg/lv format) - :param block_device: str: name of block_device to be allocated to lv_name - ''' - cmd = ['lvextend', lv_name, block_device] - check_call(cmd) - - -def create_logical_volume(lv_name, volume_group, size=None): - ''' - Create a new logical volume in an existing volume group - - :param lv_name: str: name of logical volume to be created. - :param volume_group: str: Name of volume group to use for the new volume. - :param size: str: Size of logical volume to create (100% if not supplied) - :raises subprocess.CalledProcessError: in the event that the lvcreate fails. - ''' - if size: - check_call([ - 'lvcreate', - '--yes', - '-L', - '{}'.format(size), - '-n', lv_name, volume_group - ]) - # create the lv with all the space available, this is needed because the - # system call is different for LVM - else: - check_call([ - 'lvcreate', - '--yes', - '-l', - '100%FREE', - '-n', lv_name, volume_group - ]) diff --git a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py deleted file mode 100644 index a3561760..00000000 --- a/ceph-mon/hooks/charmhelpers/contrib/storage/linux/utils.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import re -from stat import S_ISBLK - -from subprocess import ( - CalledProcessError, - check_call, - check_output, - call -) - - -def _luks_uuid(dev): - """ - Check to see if dev is a LUKS encrypted volume, returning the UUID - of volume if it is. - - :param: dev: path to block device to check. - :returns: str. UUID of LUKS device or None if not a LUKS device - """ - try: - cmd = ['cryptsetup', 'luksUUID', dev] - return check_output(cmd).decode('UTF-8').strip() - except CalledProcessError: - return None - - -def is_luks_device(dev): - """ - Determine if dev is a LUKS-formatted block device. - - :param: dev: A full path to a block device to check for LUKS header - presence - :returns: boolean: indicates whether a device is used based on LUKS header. - """ - return True if _luks_uuid(dev) else False - - -def is_mapped_luks_device(dev): - """ - Determine if dev is a mapped LUKS device - :param: dev: A full path to a block device to be checked - :returns: boolean: indicates whether a device is mapped - """ - _, dirs, _ = next(os.walk( - '/sys/class/block/{}/holders/' - .format(os.path.basename(os.path.realpath(dev)))) - ) - is_held = len(dirs) > 0 - return is_held and is_luks_device(dev) - - -def is_block_device(path): - ''' - Confirm device at path is a valid block device node. - - :returns: boolean: True if path is a block device, False if not. - ''' - if not os.path.exists(path): - return False - return S_ISBLK(os.stat(path).st_mode) - - -def zap_disk(block_device): - ''' - Clear a block device of partition table. Relies on sgdisk, which is - installed as pat of the 'gdisk' package in Ubuntu. - - :param block_device: str: Full path of block device to clean. - ''' - # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b - # sometimes sgdisk exits non-zero; this is OK, dd will clean up - call(['sgdisk', '--zap-all', '--', block_device]) - call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) - dev_end = check_output(['blockdev', '--getsz', - block_device]).decode('UTF-8') - gpt_end = int(dev_end.split()[0]) - 100 - check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), - 'bs=1M', 'count=1']) - check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), - 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) - - -def is_device_mounted(device): - '''Given a device path, return True if that device is mounted, and False - if it isn't. - - :param device: str: Full path of the device to check. - :returns: boolean: True if the path represents a mounted device, False if - it doesn't. - ''' - try: - out = check_output(['lsblk', '-P', device]).decode('UTF-8') - except Exception: - return False - return bool(re.search(r'MOUNTPOINT=".+"', out)) - - -def mkfs_xfs(device, force=False, inode_size=1024): - """Format device with XFS filesystem. - - By default this should fail if the device already has a filesystem on it. - :param device: Full path to device to format - :ptype device: tr - :param force: Force operation - :ptype: force: boolean - :param inode_size: XFS inode size in bytes - :ptype inode_size: int""" - cmd = ['mkfs.xfs'] - if force: - cmd.append("-f") - - cmd += ['-i', "size={}".format(inode_size), device] - check_call(cmd) diff --git a/ceph-mon/hooks/charmhelpers/core/__init__.py b/ceph-mon/hooks/charmhelpers/core/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/ceph-mon/hooks/charmhelpers/core/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/core/decorators.py b/ceph-mon/hooks/charmhelpers/core/decorators.py deleted file mode 100644 index e7e95d17..00000000 --- a/ceph-mon/hooks/charmhelpers/core/decorators.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2014 Canonical Ltd. -# -# Authors: -# Edward Hope-Morley -# - -import time - -from charmhelpers.core.hookenv import ( - log, - INFO, -) - - -def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): - """If the decorated function raises exception exc_type, allow num_retries - retry attempts before raise the exception. - """ - def _retry_on_exception_inner_1(f): - def _retry_on_exception_inner_2(*args, **kwargs): - retries = num_retries - multiplier = 1 - while True: - try: - return f(*args, **kwargs) - except exc_type: - if not retries: - raise - - delay = base_delay * multiplier - multiplier += 1 - log("Retrying '%s' %d more times (delay=%s)" % - (f.__name__, retries, delay), level=INFO) - retries -= 1 - if delay: - time.sleep(delay) - - return _retry_on_exception_inner_2 - - return _retry_on_exception_inner_1 - - -def retry_on_predicate(num_retries, predicate_fun, base_delay=0): - """Retry based on return value - - The return value of the decorated function is passed to the given predicate_fun. If the - result of the predicate is False, retry the decorated function up to num_retries times - - An exponential backoff up to base_delay^num_retries seconds can be introduced by setting - base_delay to a nonzero value. The default is to run with a zero (i.e. no) delay - - :param num_retries: Max. number of retries to perform - :type num_retries: int - :param predicate_fun: Predicate function to determine if a retry is necessary - :type predicate_fun: callable - :param base_delay: Starting value in seconds for exponential delay, defaults to 0 (no delay) - :type base_delay: float - """ - def _retry_on_pred_inner_1(f): - def _retry_on_pred_inner_2(*args, **kwargs): - retries = num_retries - multiplier = 1 - delay = base_delay - while True: - result = f(*args, **kwargs) - if predicate_fun(result) or retries <= 0: - return result - delay *= multiplier - multiplier += 1 - log("Result {}, retrying '{}' {} more times (delay={})".format( - result, f.__name__, retries, delay), level=INFO) - retries -= 1 - if delay: - time.sleep(delay) - - return _retry_on_pred_inner_2 - - return _retry_on_pred_inner_1 diff --git a/ceph-mon/hooks/charmhelpers/core/files.py b/ceph-mon/hooks/charmhelpers/core/files.py deleted file mode 100644 index fdd82b75..00000000 --- a/ceph-mon/hooks/charmhelpers/core/files.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -__author__ = 'Jorge Niedbalski ' - -import os -import subprocess - - -def sed(filename, before, after, flags='g'): - """ - Search and replaces the given pattern on filename. - - :param filename: relative or absolute file path. - :param before: expression to be replaced (see 'man sed') - :param after: expression to replace with (see 'man sed') - :param flags: sed-compatible regex flags in example, to make - the search and replace case insensitive, specify ``flags="i"``. - The ``g`` flag is always specified regardless, so you do not - need to remember to include it when overriding this parameter. - :returns: If the sed command exit code was zero then return, - otherwise raise CalledProcessError. - """ - expression = r's/{0}/{1}/{2}'.format(before, - after, flags) - - return subprocess.check_call(["sed", "-i", "-r", "-e", - expression, - os.path.expanduser(filename)]) diff --git a/ceph-mon/hooks/charmhelpers/core/fstab.py b/ceph-mon/hooks/charmhelpers/core/fstab.py deleted file mode 100644 index d9fa9152..00000000 --- a/ceph-mon/hooks/charmhelpers/core/fstab.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import os - -__author__ = 'Jorge Niedbalski R. ' - - -class Fstab(io.FileIO): - """This class extends file in order to implement a file reader/writer - for file `/etc/fstab` - """ - - class Entry(object): - """Entry class represents a non-comment line on the `/etc/fstab` file - """ - def __init__(self, device, mountpoint, filesystem, - options, d=0, p=0): - self.device = device - self.mountpoint = mountpoint - self.filesystem = filesystem - - if not options: - options = "defaults" - - self.options = options - self.d = int(d) - self.p = int(p) - - def __eq__(self, o): - return str(self) == str(o) - - def __str__(self): - return "{} {} {} {} {} {}".format(self.device, - self.mountpoint, - self.filesystem, - self.options, - self.d, - self.p) - - DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') - - def __init__(self, path=None): - if path: - self._path = path - else: - self._path = self.DEFAULT_PATH - super(Fstab, self).__init__(self._path, 'rb+') - - def _hydrate_entry(self, line): - # NOTE: use split with no arguments to split on any - # whitespace including tabs - return Fstab.Entry(*filter( - lambda x: x not in ('', None), - line.strip("\n").split())) - - @property - def entries(self): - self.seek(0) - for line in self.readlines(): - line = line.decode('us-ascii') - try: - if line.strip() and not line.strip().startswith("#"): - yield self._hydrate_entry(line) - except ValueError: - pass - - def get_entry_by_attr(self, attr, value): - for entry in self.entries: - e_attr = getattr(entry, attr) - if e_attr == value: - return entry - return None - - def add_entry(self, entry): - if self.get_entry_by_attr('device', entry.device): - return False - - self.write((str(entry) + '\n').encode('us-ascii')) - self.truncate() - return entry - - def remove_entry(self, entry): - self.seek(0) - - lines = [l.decode('us-ascii') for l in self.readlines()] - - found = False - for index, line in enumerate(lines): - if line.strip() and not line.strip().startswith("#"): - if self._hydrate_entry(line) == entry: - found = True - break - - if not found: - return False - - lines.remove(line) - - self.seek(0) - self.write(''.join(lines).encode('us-ascii')) - self.truncate() - return True - - @classmethod - def remove_by_mountpoint(cls, mountpoint, path=None): - fstab = cls(path=path) - entry = fstab.get_entry_by_attr('mountpoint', mountpoint) - if entry: - return fstab.remove_entry(entry) - return False - - @classmethod - def add(cls, device, mountpoint, filesystem, options=None, path=None): - return cls(path=path).add_entry(Fstab.Entry(device, - mountpoint, filesystem, - options=options)) diff --git a/ceph-mon/hooks/charmhelpers/core/hookenv.py b/ceph-mon/hooks/charmhelpers/core/hookenv.py deleted file mode 100644 index 370c3e8f..00000000 --- a/ceph-mon/hooks/charmhelpers/core/hookenv.py +++ /dev/null @@ -1,1636 +0,0 @@ -# Copyright 2013-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"Interactions with the Juju environment" -# -# Authors: -# Charm Helpers Developers - -import copy -from distutils.version import LooseVersion -from enum import Enum -from functools import wraps -from collections import namedtuple, UserDict -import glob -import os -import json -import yaml -import re -import subprocess -import sys -import errno -import tempfile -from subprocess import CalledProcessError - -from charmhelpers import deprecate - - -CRITICAL = "CRITICAL" -ERROR = "ERROR" -WARNING = "WARNING" -INFO = "INFO" -DEBUG = "DEBUG" -TRACE = "TRACE" -MARKER = object() -SH_MAX_ARG = 131071 - - -RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. ' - 'This may not be compatible with software you are ' - 'running in your shell.') - - -class WORKLOAD_STATES(Enum): - ACTIVE = 'active' - BLOCKED = 'blocked' - MAINTENANCE = 'maintenance' - WAITING = 'waiting' - - -cache = {} - - -def cached(func): - """Cache return values for multiple executions of func + args - - For example:: - - @cached - def unit_get(attribute): - pass - - unit_get('test') - - will cache the result of unit_get + 'test' for future calls. - """ - @wraps(func) - def wrapper(*args, **kwargs): - global cache - key = json.dumps((func, args, kwargs), sort_keys=True, default=str) - try: - return cache[key] - except KeyError: - pass # Drop out of the exception handler scope. - res = func(*args, **kwargs) - cache[key] = res - return res - wrapper._wrapped = func - return wrapper - - -def flush(key): - """Flushes any entries from function cache where the - key is found in the function+args """ - flush_list = [] - for item in cache: - if key in item: - flush_list.append(item) - for item in flush_list: - del cache[item] - - -def log(message, level=None): - """Write a message to the juju log""" - command = ['juju-log'] - if level: - command += ['-l', level] - if not isinstance(message, str): - message = repr(message) - command += [message[:SH_MAX_ARG]] - # Missing juju-log should not cause failures in unit tests - # Send log output to stderr - try: - subprocess.call(command) - except OSError as e: - if e.errno == errno.ENOENT: - if level: - message = "{}: {}".format(level, message) - message = "juju-log: {}".format(message) - print(message, file=sys.stderr) - else: - raise - - -def function_log(message): - """Write a function progress message""" - command = ['function-log'] - if not isinstance(message, str): - message = repr(message) - command += [message[:SH_MAX_ARG]] - # Missing function-log should not cause failures in unit tests - # Send function_log output to stderr - try: - subprocess.call(command) - except OSError as e: - if e.errno == errno.ENOENT: - message = "function-log: {}".format(message) - print(message, file=sys.stderr) - else: - raise - - -class Serializable(UserDict): - """Wrapper, an object that can be serialized to yaml or json""" - - def __init__(self, obj): - # wrap the object - UserDict.__init__(self) - self.data = obj - - def __getattr__(self, attr): - # See if this object has attribute. - if attr in ("json", "yaml", "data"): - return self.__dict__[attr] - # Check for attribute in wrapped object. - got = getattr(self.data, attr, MARKER) - if got is not MARKER: - return got - # Proxy to the wrapped object via dict interface. - try: - return self.data[attr] - except KeyError: - raise AttributeError(attr) - - def __getstate__(self): - # Pickle as a standard dictionary. - return self.data - - def __setstate__(self, state): - # Unpickle into our wrapper. - self.data = state - - def json(self): - """Serialize the object to json""" - return json.dumps(self.data) - - def yaml(self): - """Serialize the object to yaml""" - return yaml.dump(self.data) - - -def execution_environment(): - """A convenient bundling of the current execution context""" - context = {} - context['conf'] = config() - if relation_id(): - context['reltype'] = relation_type() - context['relid'] = relation_id() - context['rel'] = relation_get() - context['unit'] = local_unit() - context['rels'] = relations() - context['env'] = os.environ - return context - - -def in_relation_hook(): - """Determine whether we're running in a relation hook""" - return 'JUJU_RELATION' in os.environ - - -def relation_type(): - """The scope for the current relation hook""" - return os.environ.get('JUJU_RELATION', None) - - -@cached -def relation_id(relation_name=None, service_or_unit=None): - """The relation ID for the current or a specified relation""" - if not relation_name and not service_or_unit: - return os.environ.get('JUJU_RELATION_ID', None) - elif relation_name and service_or_unit: - service_name = service_or_unit.split('/')[0] - for relid in relation_ids(relation_name): - remote_service = remote_service_name(relid) - if remote_service == service_name: - return relid - else: - raise ValueError('Must specify neither or both of relation_name and service_or_unit') - - -def departing_unit(): - """The departing unit for the current relation hook. - - Available since juju 2.8. - - :returns: the departing unit, or None if the information isn't available. - :rtype: Optional[str] - """ - return os.environ.get('JUJU_DEPARTING_UNIT', None) - - -def local_unit(): - """Local unit ID""" - return os.environ['JUJU_UNIT_NAME'] - - -def remote_unit(): - """The remote unit for the current relation hook""" - return os.environ.get('JUJU_REMOTE_UNIT', None) - - -def application_name(): - """ - The name of the deployed application this unit belongs to. - """ - return local_unit().split('/')[0] - - -def service_name(): - """ - .. deprecated:: 0.19.1 - Alias for :func:`application_name`. - """ - return application_name() - - -def model_name(): - """ - Name of the model that this unit is deployed in. - """ - return os.environ['JUJU_MODEL_NAME'] - - -def model_uuid(): - """ - UUID of the model that this unit is deployed in. - """ - return os.environ['JUJU_MODEL_UUID'] - - -def principal_unit(): - """Returns the principal unit of this unit, otherwise None""" - # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT - principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) - # If it's empty, then this unit is the principal - if principal_unit == '': - return os.environ['JUJU_UNIT_NAME'] - elif principal_unit is not None: - return principal_unit - # For Juju 2.1 and below, let's try work out the principle unit by - # the various charms' metadata.yaml. - for reltype in relation_types(): - for rid in relation_ids(reltype): - for unit in related_units(rid): - md = _metadata_unit(unit) - if not md: - continue - subordinate = md.pop('subordinate', None) - if not subordinate: - return unit - return None - - -@cached -def remote_service_name(relid=None): - """The remote service name for a given relation-id (or the current relation)""" - if relid is None: - unit = remote_unit() - else: - units = related_units(relid) - unit = units[0] if units else None - return unit.split('/')[0] if unit else None - - -def hook_name(): - """The name of the currently executing hook""" - return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) - - -class Config(dict): - """A dictionary representation of the charm's config.yaml, with some - extra features: - - - See which values in the dictionary have changed since the previous hook. - - For values that have changed, see what the previous value was. - - Store arbitrary data for use in a later hook. - - NOTE: Do not instantiate this object directly - instead call - ``hookenv.config()``, which will return an instance of :class:`Config`. - - Example usage:: - - >>> # inside a hook - >>> from charmhelpers.core import hookenv - >>> config = hookenv.config() - >>> config['foo'] - 'bar' - >>> # store a new key/value for later use - >>> config['mykey'] = 'myval' - - - >>> # user runs `juju set mycharm foo=baz` - >>> # now we're inside subsequent config-changed hook - >>> config = hookenv.config() - >>> config['foo'] - 'baz' - >>> # test to see if this val has changed since last hook - >>> config.changed('foo') - True - >>> # what was the previous value? - >>> config.previous('foo') - 'bar' - >>> # keys/values that we add are preserved across hooks - >>> config['mykey'] - 'myval' - - """ - CONFIG_FILE_NAME = '.juju-persistent-config' - - def __init__(self, *args, **kw): - super(Config, self).__init__(*args, **kw) - self.implicit_save = True - self._prev_dict = None - self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path) and os.stat(self.path).st_size: - self.load_previous() - atexit(self._implicit_save) - - def load_previous(self, path=None): - """Load previous copy of config from disk. - - In normal usage you don't need to call this method directly - it - is called automatically at object initialization. - - :param path: - - File path from which to load the previous config. If `None`, - config is loaded from the default location. If `path` is - specified, subsequent `save()` calls will write to the same - path. - - """ - self.path = path or self.path - with open(self.path) as f: - try: - self._prev_dict = json.load(f) - except ValueError as e: - log('Found but was unable to parse previous config data, ' - 'ignoring which will report all values as changed - {}' - .format(str(e)), level=ERROR) - return - for k, v in copy.deepcopy(self._prev_dict).items(): - if k not in self: - self[k] = v - - def changed(self, key): - """Return True if the current value for this key is different from - the previous value. - - """ - if self._prev_dict is None: - return True - return self.previous(key) != self.get(key) - - def previous(self, key): - """Return previous value for this key, or None if there - is no previous value. - - """ - if self._prev_dict: - return self._prev_dict.get(key) - return None - - def save(self): - """Save this config to disk. - - If the charm is using the :mod:`Services Framework ` - or :meth:'@hook ' decorator, this - is called automatically at the end of successful hook execution. - Otherwise, it should be called directly by user code. - - To disable automatic saves, set ``implicit_save=False`` on this - instance. - - """ - with open(self.path, 'w') as f: - os.fchmod(f.fileno(), 0o600) - json.dump(self, f) - - def _implicit_save(self): - if self.implicit_save: - self.save() - - -_cache_config = None - - -def config(scope=None): - """ - Get the juju charm configuration (scope==None) or individual key, - (scope=str). The returned value is a Python data structure loaded as - JSON from the Juju config command. - - :param scope: If set, return the value for the specified key. - :type scope: Optional[str] - :returns: Either the whole config as a Config, or a key from it. - :rtype: Any - """ - global _cache_config - config_cmd_line = ['config-get', '--all', '--format=json'] - try: - if _cache_config is None: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) - _cache_config = Config(config_data) - if scope is not None: - return _cache_config.get(scope) - return _cache_config - except (json.decoder.JSONDecodeError, UnicodeDecodeError) as e: - log('Unable to parse output from config-get: config_cmd_line="{}" ' - 'message="{}"' - .format(config_cmd_line, str(e)), level=ERROR) - return None - - -@cached -def relation_get(attribute=None, unit=None, rid=None, app=None): - """Get relation information""" - _args = ['relation-get', '--format=json'] - if app is not None: - if unit is not None: - raise ValueError("Cannot use both 'unit' and 'app'") - _args.append('--app') - if rid: - _args.append('-r') - _args.append(rid) - _args.append(attribute or '-') - # unit or application name - if unit or app: - _args.append(unit or app) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except CalledProcessError as e: - if e.returncode == 2: - return None - raise - - -@cached -def _relation_set_accepts_file(): - """Return True if the juju relation-set command accepts a file. - - Cache the result as it won't change during the execution of a hook, and - thus we can make relation_set() more efficient by only checking for the - first relation_set() call. - - :returns: True if relation_set accepts a file. - :rtype: bool - :raises: subprocess.CalledProcessError if the check fails. - """ - return "--file" in subprocess.check_output( - ["relation-set", "--help"], universal_newlines=True) - - -def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): - """Set relation information for the current unit""" - relation_settings = relation_settings if relation_settings else {} - relation_cmd_line = ['relation-set'] - if app: - relation_cmd_line.append('--app') - if relation_id is not None: - relation_cmd_line.extend(('-r', relation_id)) - settings = relation_settings.copy() - settings.update(kwargs) - for key, value in settings.items(): - # Force value to be a string: it always should, but some call - # sites pass in things like dicts or numbers. - if value is not None: - settings[key] = "{}".format(value) - if _relation_set_accepts_file(): - # --file was introduced in Juju 1.23.2. Use it by default if - # available, since otherwise we'll break if the relation data is - # too big. Ideally we should tell relation-set to read the data from - # stdin, but that feature is broken in 1.23.2: Bug #1454678. - with tempfile.NamedTemporaryFile(delete=False) as settings_file: - settings_file.write(yaml.safe_dump(settings).encode("utf-8")) - subprocess.check_call( - relation_cmd_line + ["--file", settings_file.name]) - os.remove(settings_file.name) - else: - for key, value in settings.items(): - if value is None: - relation_cmd_line.append('{}='.format(key)) - else: - relation_cmd_line.append('{}={}'.format(key, value)) - subprocess.check_call(relation_cmd_line) - # Flush cache of any relation-gets for local unit - flush(local_unit()) - - -def relation_clear(r_id=None): - ''' Clears any relation data already set on relation r_id ''' - settings = relation_get(rid=r_id, - unit=local_unit()) - for setting in settings: - if setting not in ['public-address', 'private-address']: - settings[setting] = None - relation_set(relation_id=r_id, - **settings) - - -@cached -def relation_ids(reltype=None): - """A list of relation_ids""" - reltype = reltype or relation_type() - relid_cmd_line = ['relation-ids', '--format=json'] - if reltype is not None: - relid_cmd_line.append(reltype) - return json.loads( - subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] - return [] - - -@cached -def related_units(relid=None): - """A list of related units""" - relid = relid or relation_id() - units_cmd_line = ['relation-list', '--format=json'] - if relid is not None: - units_cmd_line.extend(('-r', relid)) - return json.loads( - subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] - - -def expected_peer_units(): - """Get a generator for units we expect to join peer relation based on - goal-state. - - The local unit is excluded from the result to make it easy to gauge - completion of all peers joining the relation with existing hook tools. - - Example usage: - log('peer {} of {} joined peer relation' - .format(len(related_units()), - len(list(expected_peer_units())))) - - This function will raise NotImplementedError if used with juju versions - without goal-state support. - - :returns: iterator - :rtype: types.GeneratorType - :raises: NotImplementedError - """ - if not has_juju_version("2.4.0"): - # goal-state first appeared in 2.4.0. - raise NotImplementedError("goal-state") - _goal_state = goal_state() - return (key for key in _goal_state['units'] - if '/' in key and key != local_unit()) - - -def expected_related_units(reltype=None): - """Get a generator for units we expect to join relation based on - goal-state. - - Note that you can not use this function for the peer relation, take a look - at expected_peer_units() for that. - - This function will raise KeyError if you request information for a - relation type for which juju goal-state does not have information. It will - raise NotImplementedError if used with juju versions without goal-state - support. - - Example usage: - log('participant {} of {} joined relation {}' - .format(len(related_units()), - len(list(expected_related_units())), - relation_type())) - - :param reltype: Relation type to list data for, default is to list data for - the relation type we are currently executing a hook for. - :type reltype: str - :returns: iterator - :rtype: types.GeneratorType - :raises: KeyError, NotImplementedError - """ - if not has_juju_version("2.4.4"): - # goal-state existed in 2.4.0, but did not list individual units to - # join a relation in 2.4.1 through 2.4.3. (LP: #1794739) - raise NotImplementedError("goal-state relation unit count") - reltype = reltype or relation_type() - _goal_state = goal_state() - return (key for key in _goal_state['relations'][reltype] if '/' in key) - - -@cached -def relation_for_unit(unit=None, rid=None): - """Get the json representation of a unit's relation""" - unit = unit or remote_unit() - relation = relation_get(unit=unit, rid=rid) - for key in relation: - if key.endswith('-list'): - relation[key] = relation[key].split() - relation['__unit__'] = unit - return relation - - -@cached -def relations_for_id(relid=None): - """Get relations of a specific relation ID""" - relation_data = [] - relid = relid or relation_ids() - for unit in related_units(relid): - unit_data = relation_for_unit(unit, relid) - unit_data['__relid__'] = relid - relation_data.append(unit_data) - return relation_data - - -@cached -def relations_of_type(reltype=None): - """Get relations of a specific type""" - relation_data = [] - reltype = reltype or relation_type() - for relid in relation_ids(reltype): - for relation in relations_for_id(relid): - relation['__relid__'] = relid - relation_data.append(relation) - return relation_data - - -@cached -def metadata(): - """Get the current charm metadata.yaml contents as a python object""" - with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: - return yaml.safe_load(md) - - -def _metadata_unit(unit): - """Given the name of a unit (e.g. apache2/0), get the unit charm's - metadata.yaml. Very similar to metadata() but allows us to inspect - other units. Unit needs to be co-located, such as a subordinate or - principal/primary. - - :returns: metadata.yaml as a python object. - - """ - basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) - unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') - if not os.path.exists(joineddir): - return None - with open(joineddir) as md: - return yaml.safe_load(md) - - -@cached -def relation_types(): - """Get a list of relation types supported by this charm""" - rel_types = [] - md = metadata() - for key in ('provides', 'requires', 'peers'): - section = md.get(key) - if section: - rel_types.extend(section.keys()) - return rel_types - - -@cached -def peer_relation_id(): - '''Get the peers relation id if a peers relation has been joined, else None.''' - md = metadata() - section = md.get('peers') - if section: - for key in section: - relids = relation_ids(key) - if relids: - return relids[0] - return None - - -@cached -def relation_to_interface(relation_name): - """ - Given the name of a relation, return the interface that relation uses. - - :returns: The interface name, or ``None``. - """ - return relation_to_role_and_interface(relation_name)[1] - - -@cached -def relation_to_role_and_interface(relation_name): - """ - Given the name of a relation, return the role and the name of the interface - that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). - - :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. - """ - _metadata = metadata() - for role in ('provides', 'requires', 'peers'): - interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') - if interface: - return role, interface - return None, None - - -@cached -def role_and_interface_to_relations(role, interface_name): - """ - Given a role and interface name, return a list of relation names for the - current charm that use that interface under that role (where role is one - of ``provides``, ``requires``, or ``peers``). - - :returns: A list of relation names. - """ - _metadata = metadata() - results = [] - for relation_name, relation in _metadata.get(role, {}).items(): - if relation['interface'] == interface_name: - results.append(relation_name) - return results - - -@cached -def interface_to_relations(interface_name): - """ - Given an interface, return a list of relation names for the current - charm that use that interface. - - :returns: A list of relation names. - """ - results = [] - for role in ('provides', 'requires', 'peers'): - results.extend(role_and_interface_to_relations(role, interface_name)) - return results - - -@cached -def charm_name(): - """Get the name of the current charm as is specified on metadata.yaml""" - return metadata().get('name') - - -@cached -def relations(): - """Get a nested dictionary of relation data for all related units""" - rels = {} - for reltype in relation_types(): - relids = {} - for relid in relation_ids(reltype): - units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} - for unit in related_units(relid): - reldata = relation_get(unit=unit, rid=relid) - units[unit] = reldata - relids[relid] = units - rels[reltype] = relids - return rels - - -@cached -def is_relation_made(relation, keys='private-address'): - ''' - Determine whether a relation is established by checking for - presence of key(s). If a list of keys is provided, they - must all be present for the relation to be identified as made - ''' - if isinstance(keys, str): - keys = [keys] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - context = {} - for k in keys: - context[k] = relation_get(k, rid=r_id, - unit=unit) - if None not in context.values(): - return True - return False - - -def _port_op(op_name, port, protocol="TCP"): - """Open or close a service network port""" - _args = [op_name] - icmp = protocol.upper() == "ICMP" - if icmp: - _args.append(protocol) - else: - _args.append('{}/{}'.format(port, protocol)) - try: - subprocess.check_call(_args) - except subprocess.CalledProcessError: - # Older Juju pre 2.3 doesn't support ICMP - # so treat it as a no-op if it fails. - if not icmp: - raise - - -def open_port(port, protocol="TCP"): - """Open a service network port""" - _port_op('open-port', port, protocol) - - -def close_port(port, protocol="TCP"): - """Close a service network port""" - _port_op('close-port', port, protocol) - - -def open_ports(start, end, protocol="TCP"): - """Opens a range of service network ports""" - _args = ['open-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def close_ports(start, end, protocol="TCP"): - """Close a range of service network ports""" - _args = ['close-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def opened_ports(): - """Get the opened ports - - *Note that this will only show ports opened in a previous hook* - - :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` - """ - _args = ['opened-ports', '--format=json'] - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - - -@cached -def unit_get(attribute): - """Get the unit ID for the remote unit""" - _args = ['unit-get', '--format=json', attribute] - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -def unit_public_ip(): - """Get this unit's public IP address""" - return unit_get('public-address') - - -def unit_private_ip(): - """Get this unit's private IP address""" - return unit_get('private-address') - - -@cached -def storage_get(attribute=None, storage_id=None): - """Get storage attributes""" - _args = ['storage-get', '--format=json'] - if storage_id: - _args.extend(('-s', storage_id)) - if attribute: - _args.append(attribute) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -@cached -def storage_list(storage_name=None): - """List the storage IDs for the unit""" - _args = ['storage-list', '--format=json'] - if storage_name: - _args.append(storage_name) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except OSError as e: - import errno - if e.errno == errno.ENOENT: - # storage-list does not exist - return [] - raise - - -class UnregisteredHookError(Exception): - """Raised when an undefined hook is called""" - pass - - -class Hooks(object): - """A convenient handler for hook functions. - - Example:: - - hooks = Hooks() - - # register a hook, taking its name from the function name - @hooks.hook() - def install(): - pass # your code here - - # register a hook, providing a custom hook name - @hooks.hook("config-changed") - def config_changed(): - pass # your code here - - if __name__ == "__main__": - # execute a hook based on the name the program is called by - hooks.execute(sys.argv) - """ - - def __init__(self, config_save=None): - super(Hooks, self).__init__() - self._hooks = {} - - # For unknown reasons, we allow the Hooks constructor to override - # config().implicit_save. - if config_save is not None: - config().implicit_save = config_save - - def register(self, name, function): - """Register a hook""" - self._hooks[name] = function - - def execute(self, args): - """Execute a registered hook based on args[0]""" - _run_atstart() - hook_name = os.path.basename(args[0]) - if hook_name in self._hooks: - try: - self._hooks[hook_name]() - except SystemExit as x: - if x.code is None or x.code == 0: - _run_atexit() - raise - _run_atexit() - else: - raise UnregisteredHookError(hook_name) - - def hook(self, *hook_names): - """Decorator, registering them as hooks""" - def wrapper(decorated): - for hook_name in hook_names: - self.register(hook_name, decorated) - else: - self.register(decorated.__name__, decorated) - if '_' in decorated.__name__: - self.register( - decorated.__name__.replace('_', '-'), decorated) - return decorated - return wrapper - - -class NoNetworkBinding(Exception): - pass - - -def charm_dir(): - """Return the root directory of the current charm""" - d = os.environ.get('JUJU_CHARM_DIR') - if d is not None: - return d - return os.environ.get('CHARM_DIR') - - -def cmd_exists(cmd): - """Return True if the specified cmd exists in the path""" - return any( - os.access(os.path.join(path, cmd), os.X_OK) - for path in os.environ["PATH"].split(os.pathsep) - ) - - -@cached -def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs.""" - cmd = ['action-get'] - if key is not None: - cmd.append(key) - cmd.append('--format=json') - action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) - return action_data - - -@cached -@deprecate("moved to action_get()", log=log) -def function_get(key=None): - """ - .. deprecated:: - Gets the value of an action parameter, or all key/value param pairs. - """ - cmd = ['function-get'] - # Fallback for older charms. - if not cmd_exists('function-get'): - cmd = ['action-get'] - - if key is not None: - cmd.append(key) - cmd.append('--format=json') - function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) - return function_data - - -def action_set(values): - """Sets the values to be returned after the action finishes.""" - cmd = ['action-set'] - for k, v in list(values.items()): - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -@deprecate("moved to action_set()", log=log) -def function_set(values): - """ - .. deprecated:: - Sets the values to be returned after the function finishes. - """ - cmd = ['function-set'] - # Fallback for older charms. - if not cmd_exists('function-get'): - cmd = ['action-set'] - - for k, v in list(values.items()): - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -def action_fail(message): - """ - Sets the action status to failed and sets the error message. - - The results set by action_set are preserved. - """ - subprocess.check_call(['action-fail', message]) - - -@deprecate("moved to action_fail()", log=log) -def function_fail(message): - """ - .. deprecated:: - Sets the function status to failed and sets the error message. - - The results set by function_set are preserved. - """ - cmd = ['function-fail'] - # Fallback for older charms. - if not cmd_exists('function-fail'): - cmd = ['action-fail'] - cmd.append(message) - - subprocess.check_call(cmd) - - -def action_name(): - """Get the name of the currently executing action.""" - return os.environ.get('JUJU_ACTION_NAME') - - -def function_name(): - """Get the name of the currently executing function.""" - return os.environ.get('JUJU_FUNCTION_NAME') or action_name() - - -def action_uuid(): - """Get the UUID of the currently executing action.""" - return os.environ.get('JUJU_ACTION_UUID') - - -def function_id(): - """Get the ID of the currently executing function.""" - return os.environ.get('JUJU_FUNCTION_ID') or action_uuid() - - -def action_tag(): - """Get the tag for the currently executing action.""" - return os.environ.get('JUJU_ACTION_TAG') - - -def function_tag(): - """Get the tag for the currently executing function.""" - return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() - - -def status_set(workload_state, message, application=False): - """Set the workload state with a message - - Use status-set to set the workload state with a message which is visible - to the user via juju status. If the status-set command is not found then - assume this is juju < 1.23 and juju-log the message instead. - - workload_state -- valid juju workload state. str or WORKLOAD_STATES - message -- status update message - application -- Whether this is an application state set - """ - bad_state_msg = '{!r} is not a valid workload state' - - if isinstance(workload_state, str): - try: - # Convert string to enum. - workload_state = WORKLOAD_STATES[workload_state.upper()] - except KeyError: - raise ValueError(bad_state_msg.format(workload_state)) - - if workload_state not in WORKLOAD_STATES: - raise ValueError(bad_state_msg.format(workload_state)) - - cmd = ['status-set'] - if application: - cmd.append('--application') - cmd.extend([workload_state.value, message]) - try: - ret = subprocess.call(cmd) - if ret == 0: - return - except OSError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'status-set failed: {} {}'.format(workload_state.value, - message) - log(log_message, level='INFO') - - -def status_get(): - """Retrieve the previously set juju workload state and message - - If the status-get command is not found then assume this is juju < 1.23 and - return 'unknown', "" - - """ - cmd = ['status-get', "--format=json", "--include-data"] - try: - raw_status = subprocess.check_output(cmd) - except OSError as e: - if e.errno == errno.ENOENT: - return ('unknown', "") - else: - raise - else: - status = json.loads(raw_status.decode("UTF-8")) - return (status["status"], status["message"]) - - -def translate_exc(from_exc, to_exc): - def inner_translate_exc1(f): - @wraps(f) - def inner_translate_exc2(*args, **kwargs): - try: - return f(*args, **kwargs) - except from_exc: - raise to_exc - - return inner_translate_exc2 - - return inner_translate_exc1 - - -def application_version_set(version): - """Charm authors may trigger this command from any hook to output what - version of the application is running. This could be a package version, - for instance postgres version 9.5. It could also be a build number or - version control revision identifier, for instance git sha 6fb7ba68. """ - - cmd = ['application-version-set'] - cmd.append(version) - try: - subprocess.check_call(cmd) - except OSError: - log("Application Version: {}".format(version)) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -@cached -def goal_state(): - """Juju goal state values""" - cmd = ['goal-state', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def is_leader(): - """Does the current unit hold the juju leadership - - Uses juju to determine whether the current unit is the leader of its peers - """ - cmd = ['is-leader', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_get(attribute=None): - """Juju leader get value(s)""" - cmd = ['leader-get', '--format=json'] + [attribute or '-'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_set(settings=None, **kwargs): - """Juju leader set value(s)""" - # Don't log secrets. - # log("Juju leader-set '%s'" % (settings), level=DEBUG) - cmd = ['leader-set'] - settings = settings or {} - settings.update(kwargs) - for k, v in settings.items(): - if v is None: - cmd.append('{}='.format(k)) - else: - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_register(ptype, klass, pid): - """ is used while a hook is running to let Juju know that a - payload has been started.""" - cmd = ['payload-register'] - for x in [ptype, klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_unregister(klass, pid): - """ is used while a hook is running to let Juju know - that a payload has been manually stopped. The and provided - must match a payload that has been previously registered with juju using - payload-register.""" - cmd = ['payload-unregister'] - for x in [klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_status_set(klass, pid, status): - """is used to update the current status of a registered payload. - The and provided must match a payload that has been previously - registered with juju using payload-register. The must be one of the - follow: starting, started, stopping, stopped""" - cmd = ['payload-status-set'] - for x in [klass, pid, status]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def resource_get(name): - """used to fetch the resource path of the given name. - - must match a name of defined resource in metadata.yaml - - returns either a path or False if resource not available - """ - if not name: - return False - - cmd = ['resource-get', name] - try: - return subprocess.check_output(cmd).decode('UTF-8') - except subprocess.CalledProcessError: - return False - - -@cached -def juju_version(): - """Full version string (eg. '1.23.3.1-trusty-amd64')""" - # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 - jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] - return subprocess.check_output([jujud, 'version'], - universal_newlines=True).strip() - - -def has_juju_version(minimum_version): - """Return True if the Juju version is at least the provided version""" - return LooseVersion(juju_version()) >= LooseVersion(minimum_version) - - -_atexit = [] -_atstart = [] - - -def atstart(callback, *args, **kwargs): - '''Schedule a callback to run before the main hook. - - Callbacks are run in the order they were added. - - This is useful for modules and classes to perform initialization - and inject behavior. In particular: - - - Run common code before all of your hooks, such as logging - the hook name or interesting relation data. - - Defer object or module initialization that requires a hook - context until we know there actually is a hook context, - making testing easier. - - Rather than requiring charm authors to include boilerplate to - invoke your helper's behavior, have it run automatically if - your object is instantiated or module imported. - - This is not at all useful after your hook framework as been launched. - ''' - global _atstart - _atstart.append((callback, args, kwargs)) - - -def atexit(callback, *args, **kwargs): - '''Schedule a callback to run on successful hook completion. - - Callbacks are run in the reverse order that they were added.''' - _atexit.append((callback, args, kwargs)) - - -def _run_atstart(): - '''Hook frameworks must invoke this before running the main hook body.''' - global _atstart - for callback, args, kwargs in _atstart: - callback(*args, **kwargs) - del _atstart[:] - - -def _run_atexit(): - '''Hook frameworks must invoke this after the main hook body has - successfully completed. Do not invoke it if the hook fails.''' - global _atexit - for callback, args, kwargs in reversed(_atexit): - callback(*args, **kwargs) - del _atexit[:] - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def network_get_primary_address(binding): - ''' - Deprecated since Juju 2.3; use network_get() - - Retrieve the primary network address for a named binding - - :param binding: string. The name of a relation of extra-binding - :return: string. The primary IP address for the named binding - :raise: NotImplementedError if run on Juju < 2.0 - ''' - cmd = ['network-get', '--primary-address', binding] - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - if 'no network config found for binding' in e.output.decode('UTF-8'): - raise NoNetworkBinding("No network binding for {}" - .format(binding)) - else: - raise - return response - - -def network_get(endpoint, relation_id=None): - """ - Retrieve the network details for a relation endpoint - - :param endpoint: string. The name of a relation endpoint - :param relation_id: int. The ID of the relation for the current context. - :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if request not supported by the Juju version. - """ - if not has_juju_version('2.2'): - raise NotImplementedError(juju_version()) # earlier versions require --primary-address - if relation_id and not has_juju_version('2.3'): - raise NotImplementedError # 2.3 added the -r option - - cmd = ['network-get', endpoint, '--format', 'yaml'] - if relation_id: - cmd.append('-r') - cmd.append(relation_id) - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - return yaml.safe_load(response) - - -def add_metric(*args, **kwargs): - """Add metric values. Values may be expressed with keyword arguments. For - metric names containing dashes, these may be expressed as one or more - 'key=value' positional arguments. May only be called from the collect-metrics - hook.""" - _args = ['add-metric'] - _kvpairs = [] - _kvpairs.extend(args) - _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) - _args.extend(sorted(_kvpairs)) - try: - subprocess.check_call(_args) - return - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) - log(log_message, level='INFO') - - -def meter_status(): - """Get the meter status, if running in the meter-status-changed hook.""" - return os.environ.get('JUJU_METER_STATUS') - - -def meter_info(): - """Get the meter status information, if running in the meter-status-changed - hook.""" - return os.environ.get('JUJU_METER_INFO') - - -def iter_units_for_relation_name(relation_name): - """Iterate through all units in a relation - - Generator that iterates through all the units in a relation and yields - a named tuple with rid and unit field names. - - Usage: - data = [(u.rid, u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param relation_name: string relation name - :yield: Named Tuple with rid and unit field names - """ - RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') - for rid in relation_ids(relation_name): - for unit in related_units(rid): - yield RelatedUnit(rid, unit) - - -def ingress_address(rid=None, unit=None): - """ - Retrieve the ingress-address from a relation when available. - Otherwise, return the private-address. - - When used on the consuming side of the relation (unit is a remote - unit), the ingress-address is the IP address that this unit needs - to use to reach the provided service on the remote unit. - - When used on the providing side of the relation (unit == local_unit()), - the ingress-address is the IP address that is advertised to remote - units on this relation. Remote units need to use this address to - reach the local provided service on this unit. - - Note that charms may document some other method to use in - preference to the ingress_address(), such as an address provided - on a different relation attribute or a service discovery mechanism. - This allows charms to redirect inbound connections to their peers - or different applications such as load balancers. - - Usage: - addresses = [ingress_address(rid=u.rid, unit=u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param rid: string relation id - :param unit: string unit name - :side effect: calls relation_get - :return: string IP address - """ - settings = relation_get(rid=rid, unit=unit) - return (settings.get('ingress-address') or - settings.get('private-address')) - - -def egress_subnets(rid=None, unit=None): - """ - Retrieve the egress-subnets from a relation. - - This function is to be used on the providing side of the - relation, and provides the ranges of addresses that client - connections may come from. The result is uninteresting on - the consuming side of a relation (unit == local_unit()). - - Returns a stable list of subnets in CIDR format. - eg. ['192.168.1.0/24', '2001::F00F/128'] - - If egress-subnets is not available, falls back to using the published - ingress-address, or finally private-address. - - :param rid: string relation id - :param unit: string unit name - :side effect: calls relation_get - :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] - """ - def _to_range(addr): - if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: - addr += '/32' - elif ':' in addr and '/' not in addr: # IPv6 - addr += '/128' - return addr - - settings = relation_get(rid=rid, unit=unit) - if 'egress-subnets' in settings: - return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] - if 'ingress-address' in settings: - return [_to_range(settings['ingress-address'])] - if 'private-address' in settings: - return [_to_range(settings['private-address'])] - return [] # Should never happen - - -def unit_doomed(unit=None): - """Determines if the unit is being removed from the model - - Requires Juju 2.4.1. - - :param unit: string unit name, defaults to local_unit - :side effect: calls goal_state - :side effect: calls local_unit - :side effect: calls has_juju_version - :return: True if the unit is being removed, already gone, or never existed - """ - if not has_juju_version("2.4.1"): - # We cannot risk blindly returning False for 'we don't know', - # because that could cause data loss; if call sites don't - # need an accurate answer, they likely don't need this helper - # at all. - # goal-state existed in 2.4.0, but did not handle removals - # correctly until 2.4.1. - raise NotImplementedError("is_doomed") - if unit is None: - unit = local_unit() - gs = goal_state() - units = gs.get('units', {}) - if unit not in units: - return True - # I don't think 'dead' units ever show up in the goal-state, but - # check anyway in addition to 'dying'. - return units[unit]['status'] in ('dying', 'dead') - - -def env_proxy_settings(selected_settings=None): - """Get proxy settings from process environment variables. - - Get charm proxy settings from environment variables that correspond to - juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see - lp:1782236) and juju-ftp-proxy in a format suitable for passing to an - application that reacts to proxy settings passed as environment variables. - Some applications support lowercase or uppercase notation (e.g. curl), some - support only lowercase (e.g. wget), there are also subjectively rare cases - of only uppercase notation support. no_proxy CIDR and wildcard support also - varies between runtimes and applications as there is no enforced standard. - - Some applications may connect to multiple destinations and expose config - options that would affect only proxy settings for a specific destination - these should be handled in charms in an application-specific manner. - - :param selected_settings: format only a subset of possible settings - :type selected_settings: list - :rtype: Option(None, dict[str, str]) - """ - SUPPORTED_SETTINGS = { - 'http': 'HTTP_PROXY', - 'https': 'HTTPS_PROXY', - 'no_proxy': 'NO_PROXY', - 'ftp': 'FTP_PROXY' - } - if selected_settings is None: - selected_settings = SUPPORTED_SETTINGS - - selected_vars = [v for k, v in SUPPORTED_SETTINGS.items() - if k in selected_settings] - proxy_settings = {} - for var in selected_vars: - var_val = os.getenv(var) - if var_val: - proxy_settings[var] = var_val - proxy_settings[var.lower()] = var_val - # Now handle juju-prefixed environment variables. The legacy vs new - # environment variable usage is mutually exclusive - charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var)) - if charm_var_val: - proxy_settings[var] = charm_var_val - proxy_settings[var.lower()] = charm_var_val - if 'no_proxy' in proxy_settings: - if _contains_range(proxy_settings['no_proxy']): - log(RANGE_WARNING, level=WARNING) - return proxy_settings if proxy_settings else None - - -def _contains_range(addresses): - """Check for cidr or wildcard domain in a string. - - Given a string comprising a comma separated list of ip addresses - and domain names, determine whether the string contains IP ranges - or wildcard domains. - - :param addresses: comma separated list of domains and ip addresses. - :type addresses: str - """ - return ( - # Test for cidr (e.g. 10.20.20.0/24) - "/" in addresses or - # Test for wildcard domains (*.foo.com or .foo.com) - "*" in addresses or - addresses.startswith(".") or - ",." in addresses or - " ." in addresses) - - -def is_subordinate(): - """Check whether charm is subordinate in unit metadata. - - :returns: True if unit is subordniate, False otherwise. - :rtype: bool - """ - return metadata().get('subordinate') is True diff --git a/ceph-mon/hooks/charmhelpers/core/host.py b/ceph-mon/hooks/charmhelpers/core/host.py deleted file mode 100644 index ad2cab46..00000000 --- a/ceph-mon/hooks/charmhelpers/core/host.py +++ /dev/null @@ -1,1304 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tools for working with the host system""" -# Copyright 2012 Canonical Ltd. -# -# Authors: -# Nick Moffitt -# Matthew Wedgwood - -import errno -import os -import re -import pwd -import glob -import grp -import random -import string -import subprocess -import hashlib -import functools -import itertools - -from contextlib import contextmanager -from collections import OrderedDict, defaultdict -from .hookenv import log, INFO, DEBUG, local_unit, charm_name -from .fstab import Fstab -from charmhelpers.osplatform import get_platform - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.host_factory.ubuntu import ( # NOQA:F401 - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - get_distrib_codename, - arch - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.host_factory.centos import ( # NOQA:F401 - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - ) # flake8: noqa -- ignore F401 for this import - -UPDATEDB_PATH = '/etc/updatedb.conf' -CA_CERT_DIR = '/usr/local/share/ca-certificates' - - -def service_start(service_name, **kwargs): - """Start a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('start', service_name, **kwargs) - - -def service_stop(service_name, **kwargs): - """Stop a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('stop', service_name, **kwargs) - - -def service_enable(service_name, **kwargs): - """Enable a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be restarted. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_enable('ceph-osd', id=4) - - :param service_name: the name of the service to enable - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - return service('enable', service_name, **kwargs) - - -def service_restart(service_name, **kwargs): - """Restart a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be restarted. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_restart('ceph-osd', id=4) - - :param service_name: the name of the service to restart - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - return service('restart', service_name) - - -def service_reload(service_name, restart_on_failure=False, **kwargs): - """Reload a system service, optionally falling back to restart if - reload fails. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_reload('ceph-osd', id=4) - - :param service_name: the name of the service to reload - :param restart_on_failure: boolean indicating whether to fallback to a - restart if the reload fails. - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - service_result = service('reload', service_name, **kwargs) - if not service_result and restart_on_failure: - service_result = service('restart', service_name, **kwargs) - return service_result - - -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", - **kwargs): - """Pause a system service. - - Stop it, and prevent it from starting again at boot. - - :param service_name: the name of the service to pause - :param init_dir: path to the upstart init directory - :param initd_dir: path to the sysv init directory - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems which do not support - key=value arguments via the commandline. - """ - stopped = True - if service_running(service_name, **kwargs): - stopped = service_stop(service_name, **kwargs) - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(service_name=service_name): - service('disable', service_name) - service('mask', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "disable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - return stopped - - -def service_resume(service_name, init_dir="/etc/init", - initd_dir="/etc/init.d", **kwargs): - """Resume a system service. - - Re-enable starting again at boot. Start the service. - - :param service_name: the name of the service to resume - :param init_dir: the path to the init dir - :param initd dir: the path to the initd dir - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(service_name=service_name): - service('unmask', service_name) - service('enable', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "enable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - started = service_running(service_name, **kwargs) - - if not started: - started = service_start(service_name, **kwargs) - return started - - -def service(action, service_name, **kwargs): - """Control a system service. - - :param action: the action to take on the service - :param service_name: the name of the service to perform th action on - :param **kwargs: additional params to be passed to the service command in - the form of key=value. - """ - if init_is_systemd(service_name=service_name): - cmd = ['systemctl', action, service_name] - else: - cmd = ['service', service_name, action] - for key, value in kwargs.items(): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - return subprocess.call(cmd) == 0 - - -_UPSTART_CONF = "/etc/init/{}.conf" -_INIT_D_CONF = "/etc/init.d/{}" - - -def service_running(service_name, **kwargs): - """Determine whether a system service is running. - - :param service_name: the name of the service - :param **kwargs: additional args to pass to the service command. This is - used to pass additional key=value arguments to the - service command line for managing specific instance - units (e.g. service ceph-osd status id=2). The kwargs - are ignored in systemd services. - """ - if init_is_systemd(service_name=service_name): - return service('is-active', service_name) - else: - if os.path.exists(_UPSTART_CONF.format(service_name)): - try: - cmd = ['status', service_name] - for key, value in kwargs.items(): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - output = subprocess.check_output( - cmd, stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False - else: - # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running - # 'start/running' - if ("start/running" in output or - "is running" in output or - "up and running" in output): - return True - elif os.path.exists(_INIT_D_CONF.format(service_name)): - # Check System V scripts init script return codes - return service('status', service_name) - return False - - -SYSTEMD_SYSTEM = '/run/systemd/system' - - -def init_is_systemd(service_name=None): - """ - Returns whether the host uses systemd for the specified service. - - @param Optional[str] service_name: specific name of service - """ - if str(service_name).startswith("snap."): - return True - if lsb_release()['DISTRIB_CODENAME'] == 'trusty': - return False - return os.path.isdir(SYSTEMD_SYSTEM) - - -def adduser(username, password=None, shell='/bin/bash', - system_user=False, primary_group=None, - secondary_groups=None, uid=None, home_dir=None): - """Add a user to the system. - - Will log but otherwise succeed if the user already exists. - - :param str username: Username to create - :param str password: Password for user; if ``None``, create a system user - :param str shell: The default shell for the user - :param bool system_user: Whether to create a login or system user - :param str primary_group: Primary group for user; defaults to username - :param list secondary_groups: Optional list of additional groups - :param int uid: UID for user being created - :param str home_dir: Home directory for user - - :returns: The password database entry struct, as returned by `pwd.getpwnam` - """ - try: - user_info = pwd.getpwnam(username) - log('user {0} already exists!'.format(username)) - if uid: - user_info = pwd.getpwuid(int(uid)) - log('user with uid {0} already exists!'.format(uid)) - except KeyError: - log('creating user {0}'.format(username)) - cmd = ['useradd'] - if uid: - cmd.extend(['--uid', str(uid)]) - if home_dir: - cmd.extend(['--home', str(home_dir)]) - if system_user or password is None: - cmd.append('--system') - else: - cmd.extend([ - '--create-home', - '--shell', shell, - '--password', password, - ]) - if not primary_group: - try: - grp.getgrnam(username) - primary_group = username # avoid "group exists" error - except KeyError: - pass - if primary_group: - cmd.extend(['-g', primary_group]) - if secondary_groups: - cmd.extend(['-G', ','.join(secondary_groups)]) - cmd.append(username) - subprocess.check_call(cmd) - user_info = pwd.getpwnam(username) - return user_info - - -def user_exists(username): - """Check if a user exists""" - try: - pwd.getpwnam(username) - user_exists = True - except KeyError: - user_exists = False - return user_exists - - -def uid_exists(uid): - """Check if a uid exists""" - try: - pwd.getpwuid(uid) - uid_exists = True - except KeyError: - uid_exists = False - return uid_exists - - -def group_exists(groupname): - """Check if a group exists""" - try: - grp.getgrnam(groupname) - group_exists = True - except KeyError: - group_exists = False - return group_exists - - -def gid_exists(gid): - """Check if a gid exists""" - try: - grp.getgrgid(gid) - gid_exists = True - except KeyError: - gid_exists = False - return gid_exists - - -def add_group(group_name, system_group=False, gid=None): - """Add a group to the system - - Will log but otherwise succeed if the group already exists. - - :param str group_name: group to create - :param bool system_group: Create system group - :param int gid: GID for user being created - - :returns: The password database entry struct, as returned by `grp.getgrnam` - """ - try: - group_info = grp.getgrnam(group_name) - log('group {0} already exists!'.format(group_name)) - if gid: - group_info = grp.getgrgid(gid) - log('group with gid {0} already exists!'.format(gid)) - except KeyError: - log('creating group {0}'.format(group_name)) - add_new_group(group_name, system_group, gid) - group_info = grp.getgrnam(group_name) - return group_info - - -def add_user_to_group(username, group): - """Add a user to a group""" - cmd = ['gpasswd', '-a', username, group] - log("Adding user {} to group {}".format(username, group)) - subprocess.check_call(cmd) - - -def chage(username, lastday=None, expiredate=None, inactive=None, - mindays=None, maxdays=None, root=None, warndays=None): - """Change user password expiry information - - :param str username: User to update - :param str lastday: Set when password was changed in YYYY-MM-DD format - :param str expiredate: Set when user's account will no longer be - accessible in YYYY-MM-DD format. - -1 will remove an account expiration date. - :param str inactive: Set the number of days of inactivity after a password - has expired before the account is locked. - -1 will remove an account's inactivity. - :param str mindays: Set the minimum number of days between password - changes to MIN_DAYS. - 0 indicates the password can be changed anytime. - :param str maxdays: Set the maximum number of days during which a - password is valid. - -1 as MAX_DAYS will remove checking maxdays - :param str root: Apply changes in the CHROOT_DIR directory - :param str warndays: Set the number of days of warning before a password - change is required - :raises subprocess.CalledProcessError: if call to chage fails - """ - cmd = ['chage'] - if root: - cmd.extend(['--root', root]) - if lastday: - cmd.extend(['--lastday', lastday]) - if expiredate: - cmd.extend(['--expiredate', expiredate]) - if inactive: - cmd.extend(['--inactive', inactive]) - if mindays: - cmd.extend(['--mindays', mindays]) - if maxdays: - cmd.extend(['--maxdays', maxdays]) - if warndays: - cmd.extend(['--warndays', warndays]) - cmd.append(username) - subprocess.check_call(cmd) - - -remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') - - -def rsync(from_path, to_path, flags='-r', options=None, timeout=None): - """Replicate the contents of a path""" - options = options or ['--delete', '--executability'] - cmd = ['/usr/bin/rsync', flags] - if timeout: - cmd = ['timeout', str(timeout)] + cmd - cmd.extend(options) - cmd.append(from_path) - cmd.append(to_path) - log(" ".join(cmd)) - return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() - - -def symlink(source, destination): - """Create a symbolic link""" - log("Symlinking {} as {}".format(source, destination)) - cmd = [ - 'ln', - '-sf', - source, - destination, - ] - subprocess.check_call(cmd) - - -def mkdir(path, owner='root', group='root', perms=0o555, force=False): - """Create a directory""" - log("Making dir {} {}:{} {:o}".format(path, owner, group, - perms)) - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - realpath = os.path.abspath(path) - path_exists = os.path.exists(realpath) - if path_exists and force: - if not os.path.isdir(realpath): - log("Removing non-directory file {} prior to mkdir()".format(path)) - os.unlink(realpath) - os.makedirs(realpath, perms) - elif not path_exists: - os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) - os.chmod(realpath, perms) - - -def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a byte string.""" - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - # lets see if we can grab the file and compare the context, to avoid doing - # a write. - existing_content = None - existing_uid, existing_gid, existing_perms = None, None, None - try: - with open(path, 'rb') as target: - existing_content = target.read() - stat = os.stat(path) - existing_uid, existing_gid, existing_perms = ( - stat.st_uid, stat.st_gid, stat.st_mode - ) - except Exception: - pass - if content != existing_content: - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), - level=DEBUG) - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - if isinstance(content, str): - content = content.encode('UTF-8') - target.write(content) - return - # the contents were the same, but we might still need to change the - # ownership or permissions. - if existing_uid != uid: - log("Changing uid on already existing content: {} -> {}" - .format(existing_uid, uid), level=DEBUG) - os.chown(path, uid, -1) - if existing_gid != gid: - log("Changing gid on already existing content: {} -> {}" - .format(existing_gid, gid), level=DEBUG) - os.chown(path, -1, gid) - if existing_perms != perms: - log("Changing permissions on existing content: {} -> {}" - .format(existing_perms, perms), level=DEBUG) - os.chmod(path, perms) - - -def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab""" - return Fstab.remove_by_mountpoint(mp) - - -def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file""" - return Fstab.add(dev, mp, fs, options=options) - - -def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): - """Mount a filesystem at a particular mountpoint""" - cmd_args = ['mount'] - if options is not None: - cmd_args.extend(['-o', options]) - cmd_args.extend([device, mountpoint]) - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) - return False - - if persist: - return fstab_add(device, mountpoint, filesystem, options=options) - return True - - -def umount(mountpoint, persist=False): - """Unmount a filesystem""" - cmd_args = ['umount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - - if persist: - return fstab_remove(mountpoint) - return True - - -def mounts(): - """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" - with open('/proc/mounts') as f: - # [['/mount/point','/dev/path'],[...]] - system_mounts = [m[1::-1] for m in [l.strip().split() - for l in f.readlines()]] - return system_mounts - - -def fstab_mount(mountpoint): - """Mount filesystem using fstab""" - cmd_args = ['mount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - return True - - -def file_hash(path, hash_type='md5'): - """Generate a hash checksum of the contents of 'path' or None if not found. - - :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - """ - if os.path.exists(path): - h = getattr(hashlib, hash_type)() - with open(path, 'rb') as source: - h.update(source.read()) - return h.hexdigest() - else: - return None - - -def path_hash(path): - """Generate a hash checksum of all files matching 'path'. Standard - wildcards like '*' and '?' are supported, see documentation for the 'glob' - module for more information. - - :return: dict: A { filename: hash } dictionary for all matched files. - Empty if none found. - """ - return { - filename: file_hash(filename) - for filename in glob.iglob(path) - } - - -def check_hash(path, checksum, hash_type='md5'): - """Validate a file using a cryptographic checksum. - - :param str checksum: Value of the checksum used to validate the file. - :param str hash_type: Hash algorithm used to generate `checksum`. - Can be any hash algorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - :raises ChecksumError: If the file fails the checksum - - """ - actual_checksum = file_hash(path, hash_type) - if checksum != actual_checksum: - raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) - - -class ChecksumError(ValueError): - """A class derived from Value error to indicate the checksum failed.""" - pass - - -class restart_on_change(object): - """Decorator and context manager to handle restarts. - - Usage: - - @restart_on_change(restart_map, ...) - def function_that_might_trigger_a_restart(...) - ... - - Or: - - with restart_on_change(restart_map, ...): - do_stuff_that_might_trigger_a_restart() - ... - """ - - def __init__(self, restart_map, stopstart=False, restart_functions=None, - can_restart_now_f=None, post_svc_restart_f=None, - pre_restarts_wait_f=None): - """ - :param restart_map: {file: [service, ...]} - :type restart_map: Dict[str, List[str,]] - :param stopstart: whether to stop, start or restart a service - :type stopstart: booleean - :param restart_functions: nonstandard functions to use to restart - services {svc: func, ...} - :type restart_functions: Dict[str, Callable[[str], None]] - :param can_restart_now_f: A function used to check if the restart is - permitted. - :type can_restart_now_f: Callable[[str, List[str]], boolean] - :param post_svc_restart_f: A function run after a service has - restarted. - :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function called before any restarts. - :type pre_restarts_wait_f: Callable[None, None] - """ - self.restart_map = restart_map - self.stopstart = stopstart - self.restart_functions = restart_functions - self.can_restart_now_f = can_restart_now_f - self.post_svc_restart_f = post_svc_restart_f - self.pre_restarts_wait_f = pre_restarts_wait_f - - def __call__(self, f): - """Work like a decorator. - - Returns a wrapped function that performs the restart if triggered. - - :param f: The function that is being wrapped. - :type f: Callable[[Any], Any] - :returns: the wrapped function - :rtype: Callable[[Any], Any] - """ - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - return restart_on_change_helper( - (lambda: f(*args, **kwargs)), - self.restart_map, - stopstart=self.stopstart, - restart_functions=self.restart_functions, - can_restart_now_f=self.can_restart_now_f, - post_svc_restart_f=self.post_svc_restart_f, - pre_restarts_wait_f=self.pre_restarts_wait_f) - return wrapped_f - - def __enter__(self): - """Enter the runtime context related to this object. """ - self.checksums = _pre_restart_on_change_helper(self.restart_map) - - def __exit__(self, exc_type, exc_val, exc_tb): - """Exit the runtime context related to this object. - - The parameters describe the exception that caused the context to be - exited. If the context was exited without an exception, all three - arguments will be None. - """ - if exc_type is None: - _post_restart_on_change_helper( - self.checksums, - self.restart_map, - stopstart=self.stopstart, - restart_functions=self.restart_functions, - can_restart_now_f=self.can_restart_now_f, - post_svc_restart_f=self.post_svc_restart_f, - pre_restarts_wait_f=self.pre_restarts_wait_f) - # All is good, so return False; any exceptions will propagate. - return False - - -def restart_on_change_helper(lambda_f, restart_map, stopstart=False, - restart_functions=None, - can_restart_now_f=None, - post_svc_restart_f=None, - pre_restarts_wait_f=None): - """Helper function to perform the restart_on_change function. - - This is provided for decorators to restart services if files described - in the restart_map have changed after an invocation of lambda_f(). - - This functions allows for a number of helper functions to be passed. - - `restart_functions` is a map with a service as the key and the - corresponding value being the function to call to restart the service. For - example if `restart_functions={'some-service': my_restart_func}` then - `my_restart_func` should a function which takes one argument which is the - service name to be retstarted. - - `can_restart_now_f` is a function which checks that a restart is permitted. - It should return a bool which indicates if a restart is allowed and should - take a service name (str) and a list of changed files (List[str]) as - arguments. - - `post_svc_restart_f` is a function which runs after a service has been - restarted. It takes the service name that was restarted as an argument. - - `pre_restarts_wait_f` is a function which is called before any restarts - occur. The use case for this is an application which wants to try and - stagger restarts between units. - - :param lambda_f: function to call. - :type lambda_f: Callable[[], ANY] - :param restart_map: {file: [service, ...]} - :type restart_map: Dict[str, List[str,]] - :param stopstart: whether to stop, start or restart a service - :type stopstart: booleean - :param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - :type restart_functions: Dict[str, Callable[[str], None]] - :param can_restart_now_f: A function used to check if the restart is - permitted. - :type can_restart_now_f: Callable[[str, List[str]], boolean] - :param post_svc_restart_f: A function run after a service has - restarted. - :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function called before any restarts. - :type pre_restarts_wait_f: Callable[None, None] - :returns: result of lambda_f() - :rtype: ANY - """ - checksums = _pre_restart_on_change_helper(restart_map) - r = lambda_f() - _post_restart_on_change_helper(checksums, - restart_map, - stopstart, - restart_functions, - can_restart_now_f, - post_svc_restart_f, - pre_restarts_wait_f) - return r - - -def _pre_restart_on_change_helper(restart_map): - """Take a snapshot of file hashes. - - :param restart_map: {file: [service, ...]} - :type restart_map: Dict[str, List[str,]] - :returns: Dictionary of file paths and the files checksum. - :rtype: Dict[str, str] - """ - return {path: path_hash(path) for path in restart_map} - - -def _post_restart_on_change_helper(checksums, - restart_map, - stopstart=False, - restart_functions=None, - can_restart_now_f=None, - post_svc_restart_f=None, - pre_restarts_wait_f=None): - """Check whether files have changed. - - :param checksums: Dictionary of file paths and the files checksum. - :type checksums: Dict[str, str] - :param restart_map: {file: [service, ...]} - :type restart_map: Dict[str, List[str,]] - :param stopstart: whether to stop, start or restart a service - :type stopstart: booleean - :param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - :type restart_functions: Dict[str, Callable[[str], None]] - :param can_restart_now_f: A function used to check if the restart is - permitted. - :type can_restart_now_f: Callable[[str, List[str]], boolean] - :param post_svc_restart_f: A function run after a service has - restarted. - :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function called before any restarts. - :type pre_restarts_wait_f: Callable[None, None] - """ - if restart_functions is None: - restart_functions = {} - changed_files = defaultdict(list) - restarts = [] - # create a list of lists of the services to restart - for path, services in restart_map.items(): - if path_hash(path) != checksums[path]: - restarts.append(services) - for svc in services: - changed_files[svc].append(path) - # create a flat list of ordered services without duplicates from lists - services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) - if services_list: - if pre_restarts_wait_f: - pre_restarts_wait_f() - actions = ('stop', 'start') if stopstart else ('restart',) - for service_name in services_list: - if can_restart_now_f: - if not can_restart_now_f(service_name, - changed_files[service_name]): - continue - if service_name in restart_functions: - restart_functions[service_name](service_name) - else: - for action in actions: - service(action, service_name) - if post_svc_restart_f: - post_svc_restart_f(service_name) - - -def pwgen(length=None): - """Generate a random password.""" - if length is None: - # A random length is ok to use a weak PRNG - length = random.choice(range(35, 45)) - alphanumeric_chars = [ - l for l in (string.ascii_letters + string.digits) - if l not in 'l0QD1vAEIOUaeiou'] - # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the - # actual password - random_generator = random.SystemRandom() - random_chars = [ - random_generator.choice(alphanumeric_chars) for _ in range(length)] - return(''.join(random_chars)) - - -def is_phy_iface(interface): - """Returns True if interface is not virtual, otherwise False.""" - if interface: - sys_net = '/sys/class/net' - if os.path.isdir(sys_net): - for iface in glob.glob(os.path.join(sys_net, '*')): - if '/virtual/' in os.path.realpath(iface): - continue - - if interface == os.path.basename(iface): - return True - - return False - - -def get_bond_master(interface): - """Returns bond master if interface is bond slave otherwise None. - - NOTE: the provided interface is expected to be physical - """ - if interface: - iface_path = '/sys/class/net/%s' % (interface) - if os.path.exists(iface_path): - if '/virtual/' in os.path.realpath(iface_path): - return None - - master = os.path.join(iface_path, 'master') - if os.path.exists(master): - master = os.path.realpath(master) - # make sure it is a bond master - if os.path.exists(os.path.join(master, 'bonding')): - return os.path.basename(master) - - return None - - -def list_nics(nic_type=None): - """Return a list of nics of given type(s)""" - if isinstance(nic_type, str): - int_types = [nic_type] - else: - int_types = nic_type - - interfaces = [] - if nic_type: - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output( - cmd).decode('UTF-8', errors='replace') - ip_output = ip_output.split('\n') - ip_output = (line for line in ip_output if line) - for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + - r'[0-9]+\.[0-9]+)@.*', line) - if matched: - iface = matched.groups()[0] - else: - iface = line.split()[1].replace(":", "") - - if iface not in interfaces: - interfaces.append(iface) - else: - cmd = ['ip', 'a'] - ip_output = subprocess.check_output( - cmd).decode('UTF-8', errors='replace').split('\n') - ip_output = (line.strip() for line in ip_output if line) - - key = re.compile(r'^[0-9]+:\s+(.+):') - for line in ip_output: - matched = re.search(key, line) - if matched: - iface = matched.group(1) - iface = iface.partition("@")[0] - if iface not in interfaces: - interfaces.append(iface) - - return interfaces - - -def set_nic_mtu(nic, mtu): - """Set the Maximum Transmission Unit (MTU) on a network interface.""" - cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] - subprocess.check_call(cmd) - - -def get_nic_mtu(nic): - """Return the Maximum Transmission Unit (MTU) for a network interface.""" - cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output( - cmd).decode('UTF-8', errors='replace').split('\n') - mtu = "" - for line in ip_output: - words = line.split() - if 'mtu' in words: - mtu = words[words.index("mtu") + 1] - return mtu - - -def get_nic_hwaddr(nic): - """Return the Media Access Control (MAC) for a network interface.""" - cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8', errors='replace') - hwaddr = "" - words = ip_output.split() - if 'link/ether' in words: - hwaddr = words[words.index('link/ether') + 1] - return hwaddr - - -@contextmanager -def chdir(directory): - """Change the current working directory to a different directory for a code - block and return the previous directory after the block exits. Useful to - run commands from a specified directory. - - :param str directory: The directory path to change to for this context. - """ - cur = os.getcwd() - try: - yield os.chdir(directory) - finally: - os.chdir(cur) - - -def chownr(path, owner, group, follow_links=True, chowntopdir=False): - """Recursively change user and group ownership of files and directories - in given path. Doesn't chown path itself by default, only its children. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - :param bool follow_links: Also follow and chown links if True - :param bool chowntopdir: Also chown path itself if True - """ - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - if follow_links: - chown = os.chown - else: - chown = os.lchown - - if chowntopdir: - broken_symlink = os.path.lexists(path) and not os.path.exists(path) - if not broken_symlink: - chown(path, uid, gid) - for root, dirs, files in os.walk(path, followlinks=follow_links): - for name in dirs + files: - full = os.path.join(root, name) - try: - chown(full, uid, gid) - except (IOError, OSError) as e: - # Intended to ignore "file not found". - if e.errno == errno.ENOENT: - pass - - -def lchownr(path, owner, group): - """Recursively change user and group ownership of files and directories - in a given path, not following symbolic links. See the documentation for - 'os.lchown' for more information. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - """ - chownr(path, owner, group, follow_links=False) - - -def owner(path): - """Returns a tuple containing the username & groupname owning the path. - - :param str path: the string path to retrieve the ownership - :return tuple(str, str): A (username, groupname) tuple containing the - name of the user and group owning the path. - :raises OSError: if the specified path does not exist - """ - stat = os.stat(path) - username = pwd.getpwuid(stat.st_uid)[0] - groupname = grp.getgrgid(stat.st_gid)[0] - return username, groupname - - -def get_total_ram(): - """The total amount of system RAM in bytes. - - This is what is reported by the OS, and may be overcommitted when - there are multiple containers hosted on the same machine. - """ - with open('/proc/meminfo', 'r') as f: - for line in f.readlines(): - if line: - key, value, unit = line.split() - if key == 'MemTotal:': - assert unit == 'kB', 'Unknown unit' - return int(value) * 1024 # Classic, not KiB. - raise NotImplementedError() - - -UPSTART_CONTAINER_TYPE = '/run/container_type' - - -def is_container(): - """Determine whether unit is running in a container - - @return: boolean indicating if unit is in a container - """ - if init_is_systemd(): - # Detect using systemd-detect-virt - return subprocess.call(['systemd-detect-virt', - '--container']) == 0 - else: - # Detect using upstart container file marker - return os.path.exists(UPSTART_CONTAINER_TYPE) - - -def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): - """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. - - This method has no effect if the path specified by updatedb_path does not - exist or is not a file. - - @param path: string the path to add to the updatedb.conf PRUNEPATHS value - @param updatedb_path: the path the updatedb.conf file - """ - if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): - # If the updatedb.conf file doesn't exist then don't attempt to update - # the file as the package providing mlocate may not be installed on - # the local system - return - - with open(updatedb_path, 'r+') as f_id: - updatedb_text = f_id.read() - output = updatedb(updatedb_text, path) - f_id.seek(0) - f_id.write(output) - f_id.truncate() - - -def updatedb(updatedb_text, new_path): - lines = [line for line in updatedb_text.split("\n")] - for i, line in enumerate(lines): - if line.startswith("PRUNEPATHS="): - paths_line = line.split("=")[1].replace('"', '') - paths = paths_line.split(" ") - if new_path not in paths: - paths.append(new_path) - lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) - output = "\n".join(lines) - return output - - -def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): - """ Modulo distribution - - This helper uses the unit number, a modulo value and a constant wait time - to produce a calculated wait time distribution. This is useful in large - scale deployments to distribute load during an expensive operation such as - service restarts. - - If you have 1000 nodes that need to restart 100 at a time 1 minute at a - time: - - time.wait(modulo_distribution(modulo=100, wait=60)) - restart() - - If you need restarts to happen serially set modulo to the exact number of - nodes and set a high constant wait time: - - time.wait(modulo_distribution(modulo=10, wait=120)) - restart() - - @param modulo: int The modulo number creates the group distribution - @param wait: int The constant time wait value - @param non_zero_wait: boolean Override unit % modulo == 0, - return modulo * wait. Used to avoid collisions with - leader nodes which are often given priority. - @return: int Calculated time to wait for unit operation - """ - unit_number = int(local_unit().split('/')[1]) - calculated_wait_time = (unit_number % modulo) * wait - if non_zero_wait and calculated_wait_time == 0: - return modulo * wait - else: - return calculated_wait_time - - -def ca_cert_absolute_path(basename_without_extension): - """Returns absolute path to CA certificate. - - :param basename_without_extension: Filename without extension - :type basename_without_extension: str - :returns: Absolute full path - :rtype: str - """ - return '{}/{}.crt'.format(CA_CERT_DIR, basename_without_extension) - - -def install_ca_cert(ca_cert, name=None): - """ - Install the given cert as a trusted CA. - - The ``name`` is the stem of the filename where the cert is written, and if - not provided, it will default to ``juju-{charm_name}``. - - If the cert is empty or None, or is unchanged, nothing is done. - """ - if not ca_cert: - return - if not isinstance(ca_cert, bytes): - ca_cert = ca_cert.encode('utf8') - if not name: - name = 'juju-{}'.format(charm_name()) - cert_file = ca_cert_absolute_path(name) - new_hash = hashlib.md5(ca_cert).hexdigest() - if file_hash(cert_file) == new_hash: - return - log("Installing new CA cert at: {}".format(cert_file), level=INFO) - write_file(cert_file, ca_cert) - subprocess.check_call(['update-ca-certificates', '--fresh']) - - -def get_system_env(key, default=None): - """Get data from system environment as represented in ``/etc/environment``. - - :param key: Key to look up - :type key: str - :param default: Value to return if key is not found - :type default: any - :returns: Value for key if found or contents of default parameter - :rtype: any - :raises: subprocess.CalledProcessError - """ - env_file = '/etc/environment' - # use the shell and env(1) to parse the global environments file. This is - # done to get the correct result even if the user has shell variable - # substitutions or other shell logic in that file. - output = subprocess.check_output( - ['env', '-i', '/bin/bash', '-c', - 'set -a && source {} && env'.format(env_file)], - universal_newlines=True) - for k, v in (line.split('=', 1) - for line in output.splitlines() if '=' in line): - if k == key: - return v - else: - return default diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/__init__.py b/ceph-mon/hooks/charmhelpers/core/host_factory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/centos.py b/ceph-mon/hooks/charmhelpers/core/host_factory/centos.py deleted file mode 100644 index 7781a396..00000000 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/centos.py +++ /dev/null @@ -1,72 +0,0 @@ -import subprocess -import yum -import os - -from charmhelpers.core.strutils import BasicStringComparator - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Host releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - - def __init__(self, item): - raise NotImplementedError( - "CompareHostReleases() is not implemented for CentOS") - - -def service_available(service_name): - # """Determine whether a system service is available.""" - if os.path.isdir('/run/systemd/system'): - cmd = ['systemctl', 'is-enabled', service_name] - else: - cmd = ['service', service_name, 'is-enabled'] - return subprocess.call(cmd) == 0 - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['groupadd'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('-r') - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/os-release in a dict.""" - d = {} - with open('/etc/os-release', 'r') as lsb: - for l in lsb: - s = l.split('=') - if len(s) != 2: - continue - d[s[0].strip()] = s[1].strip() - return d - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports YumBase function if the pkgcache argument - is None. - """ - if not pkgcache: - y = yum.YumBase() - packages = y.doPackageLists() - pkgcache = {i.Name: i.version for i in packages['installed']} - pkg = pkgcache[package] - if pkg > revno: - return 1 - if pkg < revno: - return -1 - return 0 diff --git a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py deleted file mode 100644 index 0906c5c0..00000000 --- a/ceph-mon/hooks/charmhelpers/core/host_factory/ubuntu.py +++ /dev/null @@ -1,122 +0,0 @@ -import subprocess - -from charmhelpers.core.hookenv import cached -from charmhelpers.core.strutils import BasicStringComparator - - -UBUNTU_RELEASES = ( - 'lucid', - 'maverick', - 'natty', - 'oneiric', - 'precise', - 'quantal', - 'raring', - 'saucy', - 'trusty', - 'utopic', - 'vivid', - 'wily', - 'xenial', - 'yakkety', - 'zesty', - 'artful', - 'bionic', - 'cosmic', - 'disco', - 'eoan', - 'focal', - 'groovy', - 'hirsute', - 'impish', - 'jammy', -) - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Ubuntu releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - _list = UBUNTU_RELEASES - - -def service_available(service_name): - """Determine whether a system service is available""" - try: - subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError as e: - return b'unrecognized service' not in e.output - else: - return True - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['addgroup'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('--system') - else: - cmd.extend([ - '--group', - ]) - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/lsb-release in a dict""" - d = {} - with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - -def get_distrib_codename(): - """Return the codename of the distribution - :returns: The codename - :rtype: str - """ - return lsb_release()['DISTRIB_CODENAME'].lower() - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports apt_cache function from charmhelpers.fetch if - the pkgcache argument is None. Be sure to add charmhelpers.fetch if - you call this function, or pass an apt_pkg.Cache() instance. - """ - from charmhelpers.fetch import apt_pkg, get_installed_version - if not pkgcache: - current_ver = get_installed_version(package) - else: - pkg = pkgcache[package] - current_ver = pkg.current_ver - - return apt_pkg.version_compare(current_ver.ver_str, revno) - - -@cached -def arch(): - """Return the package architecture as a string. - - :returns: the architecture - :rtype: str - :raises: subprocess.CalledProcessError if dpkg command fails - """ - return subprocess.check_output( - ['dpkg', '--print-architecture'] - ).rstrip().decode('UTF-8') diff --git a/ceph-mon/hooks/charmhelpers/core/hugepage.py b/ceph-mon/hooks/charmhelpers/core/hugepage.py deleted file mode 100644 index 54b5b5e2..00000000 --- a/ceph-mon/hooks/charmhelpers/core/hugepage.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml -from charmhelpers.core import fstab -from charmhelpers.core import sysctl -from charmhelpers.core.host import ( - add_group, - add_user_to_group, - fstab_mount, - mkdir, -) -from charmhelpers.core.strutils import bytes_from_string -from subprocess import check_output - - -def hugepage_support(user, group='hugetlb', nr_hugepages=256, - max_map_count=65536, mnt_point='/run/hugepages/kvm', - pagesize='2MB', mount=True, set_shmmax=False): - """Enable hugepages on system. - - Args: - user (str) -- Username to allow access to hugepages to - group (str) -- Group name to own hugepages - nr_hugepages (int) -- Number of pages to reserve - max_map_count (int) -- Number of Virtual Memory Areas a process can own - mnt_point (str) -- Directory to mount hugepages on - pagesize (str) -- Size of hugepages - mount (bool) -- Whether to Mount hugepages - """ - group_info = add_group(group) - gid = group_info.gr_gid - add_user_to_group(user, group) - if max_map_count < 2 * nr_hugepages: - max_map_count = 2 * nr_hugepages - sysctl_settings = { - 'vm.nr_hugepages': nr_hugepages, - 'vm.max_map_count': max_map_count, - 'vm.hugetlb_shm_group': gid, - } - if set_shmmax: - shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) - shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages - if shmmax_minsize > shmmax_current: - sysctl_settings['kernel.shmmax'] = shmmax_minsize - sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') - mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) - lfstab = fstab.Fstab() - fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) - if fstab_entry: - lfstab.remove_entry(fstab_entry) - entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', - 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) - lfstab.add_entry(entry) - if mount: - fstab_mount(mnt_point) diff --git a/ceph-mon/hooks/charmhelpers/core/kernel.py b/ceph-mon/hooks/charmhelpers/core/kernel.py deleted file mode 100644 index e01f4f8b..00000000 --- a/ceph-mon/hooks/charmhelpers/core/kernel.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import subprocess - -from charmhelpers.osplatform import get_platform -from charmhelpers.core.hookenv import ( - log, - INFO -) - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.kernel_factory.ubuntu import ( # NOQA:F401 - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.kernel_factory.centos import ( # NOQA:F401 - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import - -__author__ = "Jorge Niedbalski " - - -def modprobe(module, persist=True): - """Load a kernel module and configure for auto-load on reboot.""" - cmd = ['modprobe', module] - - log('Loading kernel module %s' % module, level=INFO) - - subprocess.check_call(cmd) - if persist: - persistent_modprobe(module) - - -def rmmod(module, force=False): - """Remove a module from the linux kernel""" - cmd = ['rmmod'] - if force: - cmd.append('-f') - cmd.append(module) - log('Removing kernel module %s' % module, level=INFO) - return subprocess.check_call(cmd) - - -def lsmod(): - """Shows what kernel modules are currently loaded""" - return subprocess.check_output(['lsmod'], - universal_newlines=True) - - -def is_module_loaded(module): - """Checks if a kernel module is already loaded""" - matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) - return len(matches) > 0 diff --git a/ceph-mon/hooks/charmhelpers/core/kernel_factory/__init__.py b/ceph-mon/hooks/charmhelpers/core/kernel_factory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-mon/hooks/charmhelpers/core/kernel_factory/centos.py b/ceph-mon/hooks/charmhelpers/core/kernel_factory/centos.py deleted file mode 100644 index 1c402c11..00000000 --- a/ceph-mon/hooks/charmhelpers/core/kernel_factory/centos.py +++ /dev/null @@ -1,17 +0,0 @@ -import subprocess -import os - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - if not os.path.exists('/etc/rc.modules'): - open('/etc/rc.modules', 'a') - os.chmod('/etc/rc.modules', 111) - with open('/etc/rc.modules', 'r+') as modules: - if module not in modules.read(): - modules.write('modprobe %s\n' % module) - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["dracut", "-f", version]) diff --git a/ceph-mon/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/ceph-mon/hooks/charmhelpers/core/kernel_factory/ubuntu.py deleted file mode 100644 index 3de372fd..00000000 --- a/ceph-mon/hooks/charmhelpers/core/kernel_factory/ubuntu.py +++ /dev/null @@ -1,13 +0,0 @@ -import subprocess - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module + "\n") - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/ceph-mon/hooks/charmhelpers/core/services/__init__.py b/ceph-mon/hooks/charmhelpers/core/services/__init__.py deleted file mode 100644 index 61fd074e..00000000 --- a/ceph-mon/hooks/charmhelpers/core/services/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .base import * # NOQA -from .helpers import * # NOQA diff --git a/ceph-mon/hooks/charmhelpers/core/services/base.py b/ceph-mon/hooks/charmhelpers/core/services/base.py deleted file mode 100644 index 7c37c65c..00000000 --- a/ceph-mon/hooks/charmhelpers/core/services/base.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import json -import inspect -from collections import Iterable, OrderedDict - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -__all__ = ['ServiceManager', 'ManagerCallback', - 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', - 'service_restart', 'service_stop'] - - -class ServiceManager(object): - def __init__(self, services=None): - """ - Register a list of services, given their definitions. - - Service definitions are dicts in the following formats (all keys except - 'service' are optional):: - - { - "service": , - "required_data": , - "provided_data": , - "data_ready": , - "data_lost": , - "start": , - "stop": , - "ports": , - } - - The 'required_data' list should contain dicts of required data (or - dependency managers that act like dicts and know how to collect the data). - Only when all items in the 'required_data' list are populated are the list - of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more - information. - - The 'provided_data' list should contain relation data providers, most likely - a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, - that will indicate a set of data to set on a given relation. - - The 'data_ready' value should be either a single callback, or a list of - callbacks, to be called when all items in 'required_data' pass `is_ready()`. - Each callback will be called with the service name as the only parameter. - After all of the 'data_ready' callbacks are called, the 'start' callbacks - are fired. - - The 'data_lost' value should be either a single callback, or a list of - callbacks, to be called when a 'required_data' item no longer passes - `is_ready()`. Each callback will be called with the service name as the - only parameter. After all of the 'data_lost' callbacks are called, - the 'stop' callbacks are fired. - - The 'start' value should be either a single callback, or a list of - callbacks, to be called when starting the service, after the 'data_ready' - callbacks are complete. Each callback will be called with the service - name as the only parameter. This defaults to - `[host.service_start, services.open_ports]`. - - The 'stop' value should be either a single callback, or a list of - callbacks, to be called when stopping the service. If the service is - being stopped because it no longer has all of its 'required_data', this - will be called after all of the 'data_lost' callbacks are complete. - Each callback will be called with the service name as the only parameter. - This defaults to `[services.close_ports, host.service_stop]`. - - The 'ports' value should be a list of ports to manage. The default - 'start' handler will open the ports after the service is started, - and the default 'stop' handler will close the ports prior to stopping - the service. - - - Examples: - - The following registers an Upstart service called bingod that depends on - a mongodb relation and which runs a custom `db_migrate` function prior to - restarting the service, and a Runit service called spadesd:: - - manager = services.ServiceManager([ - { - 'service': 'bingod', - 'ports': [80, 443], - 'required_data': [MongoRelation(), config(), {'my': 'data'}], - 'data_ready': [ - services.template(source='bingod.conf'), - services.template(source='bingod.ini', - target='/etc/bingod.ini', - owner='bingo', perms=0400), - ], - }, - { - 'service': 'spadesd', - 'data_ready': services.template(source='spadesd_run.j2', - target='/etc/sv/spadesd/run', - perms=0555), - 'start': runit_start, - 'stop': runit_stop, - }, - ]) - manager.manage() - """ - self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') - self._ready = None - self.services = OrderedDict() - for service in services or []: - service_name = service['service'] - self.services[service_name] = service - - def manage(self): - """ - Handle the current hook by doing The Right Thing with the registered services. - """ - hookenv._run_atstart() - try: - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.reconfigure_services() - self.provide_data() - except SystemExit as x: - if x.code is None or x.code == 0: - hookenv._run_atexit() - hookenv._run_atexit() - - def provide_data(self): - """ - Set the relation data for each provider in the ``provided_data`` list. - - A provider must have a `name` attribute, which indicates which relation - to set data on, and a `provide_data()` method, which returns a dict of - data to set. - - The `provide_data()` method can optionally accept two parameters: - - * ``remote_service`` The name of the remote service that the data will - be provided to. The `provide_data()` method will be called once - for each connected service (not unit). This allows the method to - tailor its data to the given service. - * ``service_ready`` Whether or not the service definition had all of - its requirements met, and thus the ``data_ready`` callbacks run. - - Note that the ``provided_data`` methods are now called **after** the - ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks - a chance to generate any data necessary for the providing to the remote - services. - """ - for service_name, service in self.services.items(): - service_ready = self.is_ready(service_name) - for provider in service.get('provided_data', []): - for relid in hookenv.relation_ids(provider.name): - units = hookenv.related_units(relid) - if not units: - continue - remote_service = units[0].split('/')[0] - argspec = inspect.getfullargspec(provider.provide_data) - if len(argspec.args) > 1: - data = provider.provide_data(remote_service, service_ready) - else: - data = provider.provide_data() - if data: - hookenv.relation_set(relid, data) - - def reconfigure_services(self, *service_names): - """ - Update all files for one or more registered services, and, - if ready, optionally restart them. - - If no service names are given, reconfigures all registered services. - """ - for service_name in service_names or self.services.keys(): - if self.is_ready(service_name): - self.fire_event('data_ready', service_name) - self.fire_event('start', service_name, default=[ - service_restart, - manage_ports]) - self.save_ready(service_name) - else: - if self.was_ready(service_name): - self.fire_event('data_lost', service_name) - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - self.save_lost(service_name) - - def stop_services(self, *service_names): - """ - Stop one or more registered services, by name. - - If no service names are given, stops all registered services. - """ - for service_name in service_names or self.services.keys(): - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - - def get_service(self, service_name): - """ - Given the name of a registered service, return its service definition. - """ - service = self.services.get(service_name) - if not service: - raise KeyError('Service not registered: %s' % service_name) - return service - - def fire_event(self, event_name, service_name, default=None): - """ - Fire a data_ready, data_lost, start, or stop event on a given service. - """ - service = self.get_service(service_name) - callbacks = service.get(event_name, default) - if not callbacks: - return - if not isinstance(callbacks, Iterable): - callbacks = [callbacks] - for callback in callbacks: - if isinstance(callback, ManagerCallback): - callback(self, service_name, event_name) - else: - callback(service_name) - - def is_ready(self, service_name): - """ - Determine if a registered service is ready, by checking its 'required_data'. - - A 'required_data' item can be any mapping type, and is considered ready - if `bool(item)` evaluates as True. - """ - service = self.get_service(service_name) - reqs = service.get('required_data', []) - return all(bool(req) for req in reqs) - - def _load_ready_file(self): - if self._ready is not None: - return - if os.path.exists(self._ready_file): - with open(self._ready_file) as fp: - self._ready = set(json.load(fp)) - else: - self._ready = set() - - def _save_ready_file(self): - if self._ready is None: - return - with open(self._ready_file, 'w') as fp: - json.dump(list(self._ready), fp) - - def save_ready(self, service_name): - """ - Save an indicator that the given service is now data_ready. - """ - self._load_ready_file() - self._ready.add(service_name) - self._save_ready_file() - - def save_lost(self, service_name): - """ - Save an indicator that the given service is no longer data_ready. - """ - self._load_ready_file() - self._ready.discard(service_name) - self._save_ready_file() - - def was_ready(self, service_name): - """ - Determine if the given service was previously data_ready. - """ - self._load_ready_file() - return service_name in self._ready - - -class ManagerCallback(object): - """ - Special case of a callback that takes the `ServiceManager` instance - in addition to the service name. - - Subclasses should implement `__call__` which should accept three parameters: - - * `manager` The `ServiceManager` instance - * `service_name` The name of the service it's being triggered for - * `event_name` The name of the event that this callback is handling - """ - def __call__(self, manager, service_name, event_name): - raise NotImplementedError() - - -class PortManagerCallback(ManagerCallback): - """ - Callback class that will open or close ports, for use as either - a start or stop action. - """ - def __call__(self, manager, service_name, event_name): - service = manager.get_service(service_name) - # turn this generator into a list, - # as we'll be going over it multiple times - new_ports = list(service.get('ports', [])) - port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) - if os.path.exists(port_file): - with open(port_file) as fp: - old_ports = fp.read().split(',') - for old_port in old_ports: - if bool(old_port) and not self.ports_contains(old_port, new_ports): - hookenv.close_port(old_port) - with open(port_file, 'w') as fp: - fp.write(','.join(str(port) for port in new_ports)) - for port in new_ports: - # A port is either a number or 'ICMP' - protocol = 'TCP' - if str(port).upper() == 'ICMP': - protocol = 'ICMP' - if event_name == 'start': - hookenv.open_port(port, protocol) - elif event_name == 'stop': - hookenv.close_port(port, protocol) - - def ports_contains(self, port, ports): - if not bool(port): - return False - if str(port).upper() != 'ICMP': - port = int(port) - return port in ports - - -def service_stop(service_name): - """ - Wrapper around host.service_stop to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_running(service_name): - host.service_stop(service_name) - - -def service_restart(service_name): - """ - Wrapper around host.service_restart to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_available(service_name): - if host.service_running(service_name): - host.service_restart(service_name) - else: - host.service_start(service_name) - - -# Convenience aliases -open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/ceph-mon/hooks/charmhelpers/core/services/helpers.py b/ceph-mon/hooks/charmhelpers/core/services/helpers.py deleted file mode 100644 index 5bf62dd5..00000000 --- a/ceph-mon/hooks/charmhelpers/core/services/helpers.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import yaml - -from charmhelpers.core import hookenv -from charmhelpers.core import host -from charmhelpers.core import templating - -from charmhelpers.core.services.base import ManagerCallback - - -__all__ = ['RelationContext', 'TemplateCallback', - 'render_template', 'template'] - - -class RelationContext(dict): - """ - Base class for a context generator that gets relation data from juju. - - Subclasses must provide the attributes `name`, which is the name of the - interface of interest, `interface`, which is the type of the interface of - interest, and `required_keys`, which is the set of keys required for the - relation to be considered complete. The data for all interfaces matching - the `name` attribute that are complete will used to populate the dictionary - values (see `get_data`, below). - - The generated context will be namespaced under the relation :attr:`name`, - to prevent potential naming conflicts. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = None - interface = None - - def __init__(self, name=None, additional_required_keys=None): - if not hasattr(self, 'required_keys'): - self.required_keys = [] - - if name is not None: - self.name = name - if additional_required_keys: - self.required_keys.extend(additional_required_keys) - self.get_data() - - def __bool__(self): - """ - Returns True if all of the required_keys are available. - """ - return self.is_ready() - - __nonzero__ = __bool__ - - def __repr__(self): - return super(RelationContext, self).__repr__() - - def is_ready(self): - """ - Returns True if all of the `required_keys` are available from any units. - """ - ready = len(self.get(self.name, [])) > 0 - if not ready: - hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) - return ready - - def _is_ready(self, unit_data): - """ - Helper method that tests a set of relation data and returns True if - all of the `required_keys` are present. - """ - return set(unit_data.keys()).issuperset(set(self.required_keys)) - - def get_data(self): - """ - Retrieve the relation data for each unit involved in a relation and, - if complete, store it in a list under `self[self.name]`. This - is automatically called when the RelationContext is instantiated. - - The units are sorted lexographically first by the service ID, then by - the unit ID. Thus, if an interface has two other services, 'db:1' - and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', - and 'db:2' having one unit, 'mediawiki/0', all of which have a complete - set of data, the relation data for the units will be stored in the - order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. - - If you only care about a single unit on the relation, you can just - access it as `{{ interface[0]['key'] }}`. However, if you can at all - support multiple units on a relation, you should iterate over the list, - like:: - - {% for unit in interface -%} - {{ unit['key'] }}{% if not loop.last %},{% endif %} - {%- endfor %} - - Note that since all sets of relation data from all related services and - units are in a single list, if you need to know which service or unit a - set of data came from, you'll need to extend this class to preserve - that information. - """ - if not hookenv.relation_ids(self.name): - return - - ns = self.setdefault(self.name, []) - for rid in sorted(hookenv.relation_ids(self.name)): - for unit in sorted(hookenv.related_units(rid)): - reldata = hookenv.relation_get(rid=rid, unit=unit) - if self._is_ready(reldata): - ns.append(reldata) - - def provide_data(self): - """ - Return data to be relation_set for this interface. - """ - return {} - - -class MysqlRelation(RelationContext): - """ - Relation context for the `mysql` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'db' - interface = 'mysql' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'user', 'password', 'database'] - RelationContext.__init__(self, *args, **kwargs) - - -class HttpRelation(RelationContext): - """ - Relation context for the `http` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'website' - interface = 'http' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'port'] - RelationContext.__init__(self, *args, **kwargs) - - def provide_data(self): - return { - 'host': hookenv.unit_get('private-address'), - 'port': 80, - } - - -class RequiredConfig(dict): - """ - Data context that loads config options with one or more mandatory options. - - Once the required options have been changed from their default values, all - config options will be available, namespaced under `config` to prevent - potential naming conflicts (for example, between a config option and a - relation property). - - :param list *args: List of options that must be changed from their default values. - """ - - def __init__(self, *args): - self.required_options = args - self['config'] = hookenv.config() - with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.safe_load(fp).get('options', {}) - - def __bool__(self): - for option in self.required_options: - if option not in self['config']: - return False - current_value = self['config'][option] - default_value = self.config[option].get('default') - if current_value == default_value: - return False - if current_value in (None, '') and default_value in (None, ''): - return False - return True - - def __nonzero__(self): - return self.__bool__() - - -class StoredContext(dict): - """ - A data context that always returns the data that it was first created with. - - This is useful to do a one-time generation of things like passwords, that - will thereafter use the same value that was originally generated, instead - of generating a new value each time it is run. - """ - def __init__(self, file_name, config_data): - """ - If the file exists, populate `self` with the data from the file. - Otherwise, populate with the given data and persist it to the file. - """ - if os.path.exists(file_name): - self.update(self.read_context(file_name)) - else: - self.store_context(file_name, config_data) - self.update(config_data) - - def store_context(self, file_name, config_data): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0o600) - yaml.dump(config_data, file_stream) - - def read_context(self, file_name): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'r') as file_stream: - data = yaml.safe_load(file_stream) - if not data: - raise OSError("%s is empty" % file_name) - return data - - -class TemplateCallback(ManagerCallback): - """ - Callback class that will render a Jinja2 template, for use as a ready - action. - - :param str source: The template source file, relative to - `$CHARM_DIR/templates` - - :param str target: The target to write the rendered template to (or None) - :param str owner: The owner of the rendered file - :param str group: The group of the rendered file - :param int perms: The permissions of the rendered file - :param partial on_change_action: functools partial to be executed when - rendered file changes - :param jinja2 loader template_loader: A jinja2 template loader - - :return str: The rendered template - """ - def __init__(self, source, target, - owner='root', group='root', perms=0o444, - on_change_action=None, template_loader=None): - self.source = source - self.target = target - self.owner = owner - self.group = group - self.perms = perms - self.on_change_action = on_change_action - self.template_loader = template_loader - - def __call__(self, manager, service_name, event_name): - pre_checksum = '' - if self.on_change_action and os.path.isfile(self.target): - pre_checksum = host.file_hash(self.target) - service = manager.get_service(service_name) - context = {'ctx': {}} - for ctx in service.get('required_data', []): - context.update(ctx) - context['ctx'].update(ctx) - - result = templating.render(self.source, self.target, context, - self.owner, self.group, self.perms, - template_loader=self.template_loader) - if self.on_change_action: - if pre_checksum == host.file_hash(self.target): - hookenv.log( - 'No change detected: {}'.format(self.target), - hookenv.DEBUG) - else: - self.on_change_action() - - return result - - -# Convenience aliases for templates -render_template = template = TemplateCallback diff --git a/ceph-mon/hooks/charmhelpers/core/strutils.py b/ceph-mon/hooks/charmhelpers/core/strutils.py deleted file mode 100644 index 31366871..00000000 --- a/ceph-mon/hooks/charmhelpers/core/strutils.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'} -FALSEY_STRINGS = {'n', 'no', 'false', 'f', 'off'} - - -def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY_STRINGS, assume_false=False): - """Interpret string value as boolean. - - Returns True if value translates to True otherwise False. - """ - if isinstance(value, str): - value = str(value) - else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) - raise ValueError(msg) - - value = value.strip().lower() - - if value in truthy_strings: - return True - elif value in falsey_strings or assume_false: - return False - - msg = "Unable to interpret string value '%s' as boolean" % (value) - raise ValueError(msg) - - -def bytes_from_string(value): - """Interpret human readable string value as bytes. - - Returns int - """ - BYTE_POWER = { - 'K': 1, - 'KB': 1, - 'M': 2, - 'MB': 2, - 'G': 3, - 'GB': 3, - 'T': 4, - 'TB': 4, - 'P': 5, - 'PB': 5, - } - if isinstance(value, str): - value = str(value) - else: - msg = "Unable to interpret non-string value '%s' as bytes" % (value) - raise ValueError(msg) - matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if matches: - size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) - else: - # Assume that value passed in is bytes - try: - size = int(value) - except ValueError: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return size - - -class BasicStringComparator(object): - """Provides a class that will compare strings from an iterator type object. - Used to provide > and < comparisons on strings that may not necessarily be - alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the - z-wrap. - """ - - _list = None - - def __init__(self, item): - if self._list is None: - raise Exception("Must define the _list in the class definition!") - try: - self.index = self._list.index(item) - except Exception: - raise KeyError("Item '{}' is not in list '{}'" - .format(item, self._list)) - - def __eq__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index == self._list.index(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __lt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index < self._list.index(other) - - def __ge__(self, other): - return not self.__lt__(other) - - def __gt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index > self._list.index(other) - - def __le__(self, other): - return not self.__gt__(other) - - def __str__(self): - """Always give back the item at the index so it can be used in - comparisons like: - - s_mitaka = CompareOpenStack('mitaka') - s_newton = CompareOpenstack('newton') - - assert s_newton > s_mitaka - - @returns: - """ - return self._list[self.index] diff --git a/ceph-mon/hooks/charmhelpers/core/sysctl.py b/ceph-mon/hooks/charmhelpers/core/sysctl.py deleted file mode 100644 index 386428d6..00000000 --- a/ceph-mon/hooks/charmhelpers/core/sysctl.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml - -from subprocess import check_call, CalledProcessError - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - ERROR, - WARNING, -) - -from charmhelpers.core.host import is_container - -__author__ = 'Jorge Niedbalski R. ' - - -def create(sysctl_dict, sysctl_file, ignore=False): - """Creates a sysctl.conf file from a YAML associative array - - :param sysctl_dict: a dict or YAML-formatted string of sysctl - options eg "{ 'kernel.max_pid': 1337 }" - :type sysctl_dict: str - :param sysctl_file: path to the sysctl file to be saved - :type sysctl_file: str or unicode - :param ignore: If True, ignore "unknown variable" errors. - :type ignore: bool - :returns: None - """ - if type(sysctl_dict) is not dict: - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return - else: - sysctl_dict_parsed = sysctl_dict - - with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict_parsed.items(): - fd.write("{}={}\n".format(key, value)) - - log("Updating sysctl_file: {} values: {}".format(sysctl_file, - sysctl_dict_parsed), - level=DEBUG) - - call = ["sysctl", "-p", sysctl_file] - if ignore: - call.append("-e") - - try: - check_call(call) - except CalledProcessError as e: - if is_container(): - log("Error setting some sysctl keys in this container: {}".format(e.output), - level=WARNING) - else: - raise e diff --git a/ceph-mon/hooks/charmhelpers/core/templating.py b/ceph-mon/hooks/charmhelpers/core/templating.py deleted file mode 100644 index cb0213dc..00000000 --- a/ceph-mon/hooks/charmhelpers/core/templating.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', - template_loader=None, config_template=None): - """ - Render a template. - - The `source` path, if not absolute, is relative to the `templates_dir`. - - The `target` path should be absolute. It can also be `None`, in which - case no file will be written. - - The context should be a dict containing the values to be replaced in the - template. - - config_template may be provided to render from a provided template instead - of loading from a file. - - The `owner`, `group`, and `perms` options will be passed to `write_file`. - - If omitted, `templates_dir` defaults to the `templates` folder in the charm. - - The rendered template will be written to the file as well as being returned - as a string. - - Note: Using this requires python3-jinja2; if it is not installed, calling - this will attempt to use charmhelpers.fetch.apt_install to install it. - """ - try: - from jinja2 import FileSystemLoader, Environment, exceptions - except ImportError: - try: - from charmhelpers.fetch import apt_install - except ImportError: - hookenv.log('Could not import jinja2, and could not import ' - 'charmhelpers.fetch to install it', - level=hookenv.ERROR) - raise - apt_install('python3-jinja2', fatal=True) - from jinja2 import FileSystemLoader, Environment, exceptions - - if template_loader: - template_env = Environment(loader=template_loader) - else: - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - template_env = Environment(loader=FileSystemLoader(templates_dir)) - - # load from a string if provided explicitly - if config_template is not None: - template = template_env.from_string(config_template) - else: - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e - content = template.render(context) - if target is not None: - target_dir = os.path.dirname(target) - if not os.path.exists(target_dir): - # This is a terrible default directory permission, as the file - # or its siblings will often contain secrets. - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) - host.write_file(target, content.encode(encoding), owner, group, perms) - return content diff --git a/ceph-mon/hooks/charmhelpers/core/unitdata.py b/ceph-mon/hooks/charmhelpers/core/unitdata.py deleted file mode 100644 index d9b8d0b0..00000000 --- a/ceph-mon/hooks/charmhelpers/core/unitdata.py +++ /dev/null @@ -1,525 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Authors: -# Kapil Thangavelu -# -""" -Intro ------ - -A simple way to store state in units. This provides a key value -storage with support for versioned, transactional operation, -and can calculate deltas from previous values to simplify unit logic -when processing changes. - - -Hook Integration ----------------- - -There are several extant frameworks for hook execution, including - - - charmhelpers.core.hookenv.Hooks - - charmhelpers.core.services.ServiceManager - -The storage classes are framework agnostic, one simple integration is -via the HookData contextmanager. It will record the current hook -execution environment (including relation data, config data, etc.), -setup a transaction and allow easy access to the changes from -previously seen values. One consequence of the integration is the -reservation of particular keys ('rels', 'unit', 'env', 'config', -'charm_revisions') for their respective values. - -Here's a fully worked integration example using hookenv.Hooks:: - - from charmhelper.core import hookenv, unitdata - - hook_data = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # Print all changes to configuration from previously seen - # values. - for changed, (prev, cur) in hook_data.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookkeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - # Directly access all charm config as a mapping. - conf = db.getrange('config', True) - - # Directly access all relation data as a mapping - rels = db.getrange('rels', True) - - if __name__ == '__main__': - with hook_data(): - hook.execute() - - -A more basic integration is via the hook_scope context manager which simply -manages transaction scope (and records hook name, and timestamp):: - - >>> from unitdata import kv - >>> db = kv() - >>> with db.hook_scope('install'): - ... # do work, in transactional scope. - ... db.set('x', 1) - >>> db.get('x') - 1 - - -Usage ------ - -Values are automatically json de/serialized to preserve basic typing -and complex data struct capabilities (dicts, lists, ints, booleans, etc). - -Individual values can be manipulated via get/set:: - - >>> kv.set('y', True) - >>> kv.get('y') - True - - # We can set complex values (dicts, lists) as a single key. - >>> kv.set('config', {'a': 1, 'b': True'}) - - # Also supports returning dictionaries as a record which - # provides attribute access. - >>> config = kv.get('config', record=True) - >>> config.b - True - - -Groups of keys can be manipulated with update/getrange:: - - >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") - >>> kv.getrange('gui.', strip=True) - {'z': 1, 'y': 2} - -When updating values, its very helpful to understand which values -have actually changed and how have they changed. The storage -provides a delta method to provide for this:: - - >>> data = {'debug': True, 'option': 2} - >>> delta = kv.delta(data, 'config.') - >>> delta.debug.previous - None - >>> delta.debug.current - True - >>> delta - {'debug': (None, True), 'option': (None, 2)} - -Note the delta method does not persist the actual change, it needs to -be explicitly saved via 'update' method:: - - >>> kv.update(data, 'config.') - -Values modified in the context of a hook scope retain historical values -associated to the hookname. - - >>> with db.hook_scope('config-changed'): - ... db.set('x', 42) - >>> db.gethistory('x') - [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), - (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] - -""" - -import collections -import contextlib -import datetime -import itertools -import json -import os -import pprint -import sqlite3 -import sys - -__author__ = 'Kapil Thangavelu ' - - -class Storage(object): - """Simple key value database for local unit state within charms. - - Modifications are not persisted unless :meth:`flush` is called. - - To support dicts, lists, integer, floats, and booleans values - are automatically json encoded/decoded. - - Note: to facilitate unit testing, ':memory:' can be passed as the - path parameter which causes sqlite3 to only build the db in memory. - This should only be used for testing purposes. - """ - def __init__(self, path=None): - self.db_path = path - if path is None: - if 'UNIT_STATE_DB' in os.environ: - self.db_path = os.environ['UNIT_STATE_DB'] - else: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') - if self.db_path != ':memory:': - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) - self.conn = sqlite3.connect('%s' % self.db_path) - self.cursor = self.conn.cursor() - self.revision = None - self._closed = False - self._init() - - def close(self): - if self._closed: - return - self.flush(False) - self.cursor.close() - self.conn.close() - self._closed = True - - def get(self, key, default=None, record=False): - self.cursor.execute('select data from kv where key=?', [key]) - result = self.cursor.fetchone() - if not result: - return default - if record: - return Record(json.loads(result[0])) - return json.loads(result[0]) - - def getrange(self, key_prefix, strip=False): - """ - Get a range of keys starting with a common prefix as a mapping of - keys to values. - - :param str key_prefix: Common prefix among all keys - :param bool strip: Optionally strip the common prefix from the key - names in the returned dict - :return dict: A (possibly empty) dict of key-value mappings - """ - self.cursor.execute("select key, data from kv where key like ?", - ['%s%%' % key_prefix]) - result = self.cursor.fetchall() - - if not result: - return {} - if not strip: - key_prefix = '' - return dict([ - (k[len(key_prefix):], json.loads(v)) for k, v in result]) - - def update(self, mapping, prefix=""): - """ - Set the values of multiple keys at once. - - :param dict mapping: Mapping of keys to values - :param str prefix: Optional prefix to apply to all keys in `mapping` - before setting - """ - for k, v in mapping.items(): - self.set("%s%s" % (prefix, k), v) - - def unset(self, key): - """ - Remove a key from the database entirely. - """ - self.cursor.execute('delete from kv where key=?', [key]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - [key, self.revision, json.dumps('DELETED')]) - - def unsetrange(self, keys=None, prefix=""): - """ - Remove a range of keys starting with a common prefix, from the database - entirely. - - :param list keys: List of keys to remove. - :param str prefix: Optional prefix to apply to all keys in ``keys`` - before removing. - """ - if keys is not None: - keys = ['%s%s' % (prefix, key) for key in keys] - self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), - list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) - else: - self.cursor.execute('delete from kv where key like ?', - ['%s%%' % prefix]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) - - def set(self, key, value): - """ - Set a value in the database. - - :param str key: Key to set the value for - :param value: Any JSON-serializable value to be set - """ - serialized = json.dumps(value) - - self.cursor.execute('select data from kv where key=?', [key]) - exists = self.cursor.fetchone() - - # Skip mutations to the same value - if exists: - if exists[0] == serialized: - return value - - if not exists: - self.cursor.execute( - 'insert into kv (key, data) values (?, ?)', - (key, serialized)) - else: - self.cursor.execute(''' - update kv - set data = ? - where key = ?''', [serialized, key]) - - # Save - if not self.revision: - return value - - self.cursor.execute( - 'select 1 from kv_revisions where key=? and revision=?', - [key, self.revision]) - exists = self.cursor.fetchone() - - if not exists: - self.cursor.execute( - '''insert into kv_revisions ( - revision, key, data) values (?, ?, ?)''', - (self.revision, key, serialized)) - else: - self.cursor.execute( - ''' - update kv_revisions - set data = ? - where key = ? - and revision = ?''', - [serialized, key, self.revision]) - - return value - - def delta(self, mapping, prefix): - """ - return a delta containing values that have changed. - """ - previous = self.getrange(prefix, strip=True) - if not previous: - pk = set() - else: - pk = set(previous.keys()) - ck = set(mapping.keys()) - delta = DeltaSet() - - # added - for k in ck.difference(pk): - delta[k] = Delta(None, mapping[k]) - - # removed - for k in pk.difference(ck): - delta[k] = Delta(previous[k], None) - - # changed - for k in pk.intersection(ck): - c = mapping[k] - p = previous[k] - if c != p: - delta[k] = Delta(p, c) - - return delta - - @contextlib.contextmanager - def hook_scope(self, name=""): - """Scope all future interactions to the current hook execution - revision.""" - assert not self.revision - self.cursor.execute( - 'insert into hooks (hook, date) values (?, ?)', - (name or sys.argv[0], - datetime.datetime.utcnow().isoformat())) - self.revision = self.cursor.lastrowid - try: - yield self.revision - self.revision = None - except Exception: - self.flush(False) - self.revision = None - raise - else: - self.flush() - - def flush(self, save=True): - if save: - self.conn.commit() - elif self._closed: - return - else: - self.conn.rollback() - - def _init(self): - self.cursor.execute(''' - create table if not exists kv ( - key text, - data text, - primary key (key) - )''') - self.cursor.execute(''' - create table if not exists kv_revisions ( - key text, - revision integer, - data text, - primary key (key, revision) - )''') - self.cursor.execute(''' - create table if not exists hooks ( - version integer primary key autoincrement, - hook text, - date text - )''') - self.conn.commit() - - def gethistory(self, key, deserialize=False): - self.cursor.execute( - ''' - select kv.revision, kv.key, kv.data, h.hook, h.date - from kv_revisions kv, - hooks h - where kv.key=? - and kv.revision = h.version - ''', [key]) - if deserialize is False: - return self.cursor.fetchall() - return map(_parse_history, self.cursor.fetchall()) - - def debug(self, fh=sys.stderr): - self.cursor.execute('select * from kv') - pprint.pprint(self.cursor.fetchall(), stream=fh) - self.cursor.execute('select * from kv_revisions') - pprint.pprint(self.cursor.fetchall(), stream=fh) - - -def _parse_history(d): - return (d[0], d[1], json.loads(d[2]), d[3], - datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) - - -class HookData(object): - """Simple integration for existing hook exec frameworks. - - Records all unit information, and stores deltas for processing - by the hook. - - Sample:: - - from charmhelper.core import hookenv, unitdata - - changes = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # View all changes to configuration - for changed, (prev, cur) in changes.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookkeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - if __name__ == '__main__': - with changes(): - hook.execute() - - """ - def __init__(self): - self.kv = kv() - self.conf = None - self.rels = None - - @contextlib.contextmanager - def __call__(self): - from charmhelpers.core import hookenv - hook_name = hookenv.hook_name() - - with self.kv.hook_scope(hook_name): - self._record_charm_version(hookenv.charm_dir()) - delta_config, delta_relation = self._record_hook(hookenv) - yield self.kv, delta_config, delta_relation - - def _record_charm_version(self, charm_dir): - # Record revisions.. charm revisions are meaningless - # to charm authors as they don't control the revision. - # so logic dependnent on revision is not particularly - # useful, however it is useful for debugging analysis. - charm_rev = open( - os.path.join(charm_dir, 'revision')).read().strip() - charm_rev = charm_rev or '0' - revs = self.kv.get('charm_revisions', []) - if charm_rev not in revs: - revs.append(charm_rev.strip() or '0') - self.kv.set('charm_revisions', revs) - - def _record_hook(self, hookenv): - data = hookenv.execution_environment() - self.conf = conf_delta = self.kv.delta(data['conf'], 'config') - self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') - self.kv.set('env', dict(data['env'])) - self.kv.set('unit', data['unit']) - self.kv.set('relid', data.get('relid')) - return conf_delta, rels_delta - - -class Record(dict): - - __slots__ = () - - def __getattr__(self, k): - if k in self: - return self[k] - raise AttributeError(k) - - -class DeltaSet(Record): - - __slots__ = () - - -Delta = collections.namedtuple('Delta', ['previous', 'current']) - - -_KV = None - - -def kv(): - global _KV - if _KV is None: - _KV = Storage() - return _KV diff --git a/ceph-mon/hooks/charmhelpers/fetch/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/__init__.py deleted file mode 100644 index 1283f25b..00000000 --- a/ceph-mon/hooks/charmhelpers/fetch/__init__.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import importlib -from charmhelpers.osplatform import get_platform -from yaml import safe_load -from charmhelpers.core.hookenv import ( - config, - log, -) - -from urllib.parse import urlparse, urlunparse - - -# The order of this list is very important. Handlers should be listed in from -# least- to most-specific URL matching. -FETCH_HANDLERS = ( - 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', - 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', - 'charmhelpers.fetch.giturl.GitUrlFetchHandler', -) - - -class SourceConfigError(Exception): - pass - - -class UnhandledSource(Exception): - pass - - -class AptLockError(Exception): - pass - - -class GPGKeyError(Exception): - """Exception occurs when a GPG key cannot be fetched or used. The message - indicates what the problem is. - """ - pass - - -class BaseFetchHandler(object): - - """Base class for FetchHandler implementations in fetch plugins""" - - def can_handle(self, source): - """Returns True if the source can be handled. Otherwise returns - a string explaining why it cannot""" - return "Wrong source type" - - def install(self, source): - """Try to download and unpack the source. Return the path to the - unpacked files or raise UnhandledSource.""" - raise UnhandledSource("Wrong source type {}".format(source)) - - def parse_url(self, url): - return urlparse(url) - - def base_url(self, url): - """Return url without querystring or fragment""" - parts = list(self.parse_url(url)) - parts[4:] = ['' for i in parts[4:]] - return urlunparse(parts) - - -__platform__ = get_platform() -module = "charmhelpers.fetch.%s" % __platform__ -fetch = importlib.import_module(module) - -filter_installed_packages = fetch.filter_installed_packages -filter_missing_packages = fetch.filter_missing_packages -install = fetch.apt_install -upgrade = fetch.apt_upgrade -update = _fetch_update = fetch.apt_update -purge = fetch.apt_purge -add_source = fetch.add_source - -if __platform__ == "ubuntu": - apt_cache = fetch.apt_cache - apt_install = fetch.apt_install - apt_update = fetch.apt_update - apt_upgrade = fetch.apt_upgrade - apt_purge = fetch.apt_purge - apt_autoremove = fetch.apt_autoremove - apt_mark = fetch.apt_mark - apt_hold = fetch.apt_hold - apt_unhold = fetch.apt_unhold - import_key = fetch.import_key - get_upstream_version = fetch.get_upstream_version - apt_pkg = fetch.ubuntu_apt_pkg - get_apt_dpkg_env = fetch.get_apt_dpkg_env - get_installed_version = fetch.get_installed_version - OPENSTACK_RELEASES = fetch.OPENSTACK_RELEASES - UBUNTU_OPENSTACK_RELEASE = fetch.UBUNTU_OPENSTACK_RELEASE -elif __platform__ == "centos": - yum_search = fetch.yum_search - - -def configure_sources(update=False, - sources_var='install_sources', - keys_var='install_keys'): - """Configure multiple sources from charm configuration. - - The lists are encoded as yaml fragments in the configuration. - The fragment needs to be included as a string. Sources and their - corresponding keys are of the types supported by add_source(). - - Example config: - install_sources: | - - "ppa:foo" - - "http://example.com/repo precise main" - install_keys: | - - null - - "a1b2c3d4" - - Note that 'null' (a.k.a. None) should not be quoted. - """ - sources = safe_load((config(sources_var) or '').strip()) or [] - keys = safe_load((config(keys_var) or '').strip()) or None - - if isinstance(sources, str): - sources = [sources] - - if keys is None: - for source in sources: - add_source(source, None) - else: - if isinstance(keys, str): - keys = [keys] - - if len(sources) != len(keys): - raise SourceConfigError( - 'Install sources and keys lists are different lengths') - for source, key in zip(sources, keys): - add_source(source, key) - if update: - _fetch_update(fatal=True) - - -def install_remote(source, *args, **kwargs): - """Install a file tree from a remote source. - - The specified source should be a url of the form: - scheme://[host]/path[#[option=value][&...]] - - Schemes supported are based on this modules submodules. - Options supported are submodule-specific. - Additional arguments are passed through to the submodule. - - For example:: - - dest = install_remote('http://example.com/archive.tgz', - checksum='deadbeef', - hash_type='sha1') - - This will download `archive.tgz`, validate it using SHA1 and, if - the file is ok, extract it and return the directory in which it - was extracted. If the checksum fails, it will raise - :class:`charmhelpers.core.host.ChecksumError`. - """ - # We ONLY check for True here because can_handle may return a string - # explaining why it can't handle a given source. - handlers = [h for h in plugins() if h.can_handle(source) is True] - for handler in handlers: - try: - return handler.install(source, *args, **kwargs) - except UnhandledSource as e: - log('Install source attempt unsuccessful: {}'.format(e), - level='WARNING') - raise UnhandledSource("No handler found for source {}".format(source)) - - -def install_from_config(config_var_name): - """Install a file from config.""" - charm_config = config() - source = charm_config[config_var_name] - return install_remote(source) - - -def plugins(fetch_handlers=None): - if not fetch_handlers: - fetch_handlers = FETCH_HANDLERS - plugin_list = [] - for handler_name in fetch_handlers: - package, classname = handler_name.rsplit('.', 1) - try: - handler_class = getattr( - importlib.import_module(package), - classname) - plugin_list.append(handler_class()) - except NotImplementedError: - # Skip missing plugins so that they can be omitted from - # installation if desired - log("FetchHandler {} not found, skipping plugin".format( - handler_name)) - return plugin_list diff --git a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py b/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py deleted file mode 100644 index 2cb2e88b..00000000 --- a/ceph-mon/hooks/charmhelpers/fetch/archiveurl.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import hashlib -import re - -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.payload.archive import ( - get_archive_handler, - extract, -) -from charmhelpers.core.host import mkdir, check_hash - -from urllib.request import ( - build_opener, install_opener, urlopen, urlretrieve, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, -) -from urllib.parse import urlparse, urlunparse, parse_qs -from urllib.error import URLError - - -def splituser(host): - _userprog = re.compile('^(.*)@(.*)$') - match = _userprog.match(host) - if match: - return match.group(1, 2) - return None, host - - -def splitpasswd(user): - _passwdprog = re.compile('^([^:]*):(.*)$', re.S) - match = _passwdprog.match(user) - if match: - return match.group(1, 2) - return user, None - - -class ArchiveUrlFetchHandler(BaseFetchHandler): - """ - Handler to download archive files from arbitrary URLs. - - Can fetch from http, https, ftp, and file URLs. - - Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. - - Installs the contents of the archive in $CHARM_DIR/fetched/. - """ - def can_handle(self, source): - url_parts = self.parse_url(source) - if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): - # XXX: Why is this returning a boolean and a string? It's - # doomed to fail since "bool(can_handle('foo://'))" will be True. - return "Wrong source type" - if get_archive_handler(self.base_url(source)): - return True - return False - - def download(self, source, dest): - """ - Download an archive file. - - :param str source: URL pointing to an archive file. - :param str dest: Local path location to download archive file to. - """ - # propagate all exceptions - # URLError, OSError, etc - proto, netloc, path, params, query, fragment = urlparse(source) - if proto in ('http', 'https'): - auth, barehost = splituser(netloc) - if auth is not None: - source = urlunparse((proto, barehost, path, params, query, fragment)) - username, password = splitpasswd(auth) - passman = HTTPPasswordMgrWithDefaultRealm() - # Realm is set to None in add_password to force the username and password - # to be used whatever the realm - passman.add_password(None, source, username, password) - authhandler = HTTPBasicAuthHandler(passman) - opener = build_opener(authhandler) - install_opener(opener) - response = urlopen(source) - try: - with open(dest, 'wb') as dest_file: - dest_file.write(response.read()) - except Exception as e: - if os.path.isfile(dest): - os.unlink(dest) - raise e - - # Mandatory file validation via Sha1 or MD5 hashing. - def download_and_validate(self, url, hashsum, validate="sha1"): - tempfile, headers = urlretrieve(url) - check_hash(tempfile, hashsum, validate) - return tempfile - - def install(self, source, dest=None, checksum=None, hash_type='sha1'): - """ - Download and install an archive file, with optional checksum validation. - - The checksum can also be given on the `source` URL's fragment. - For example:: - - handler.install('http://example.com/file.tgz#sha1=deadbeef') - - :param str source: URL pointing to an archive file. - :param str dest: Local destination path to install to. If not given, - installs to `$CHARM_DIR/archives/archive_file_name`. - :param str checksum: If given, validate the archive file after download. - :param str hash_type: Algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - - """ - url_parts = self.parse_url(source) - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) - dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) - try: - self.download(source, dld_file) - except URLError as e: - raise UnhandledSource(e.reason) - except OSError as e: - raise UnhandledSource(e.strerror) - options = parse_qs(url_parts.fragment) - for key, value in options.items(): - algorithms = hashlib.algorithms_available - if key in algorithms: - if len(value) != 1: - raise TypeError( - "Expected 1 hash value, not %d" % len(value)) - expected = value[0] - check_hash(dld_file, expected, key) - if checksum: - check_hash(dld_file, checksum, hash_type) - return extract(dld_file, dest) diff --git a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py b/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py deleted file mode 100644 index c4ab3ff1..00000000 --- a/ceph-mon/hooks/charmhelpers/fetch/bzrurl.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from subprocess import STDOUT, check_output -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource, - filter_installed_packages, - install, -) -from charmhelpers.core.host import mkdir - - -if filter_installed_packages(['bzr']) != []: - install(['bzr']) - if filter_installed_packages(['bzr']) != []: - raise NotImplementedError('Unable to install bzr') - - -class BzrUrlFetchHandler(BaseFetchHandler): - """Handler for bazaar branches via generic and lp URLs.""" - - def can_handle(self, source): - url_parts = self.parse_url(source) - if url_parts.scheme not in ('bzr+ssh', 'lp', ''): - return False - elif not url_parts.scheme: - return os.path.exists(os.path.join(source, '.bzr')) - else: - return True - - def branch(self, source, dest, revno=None): - if not self.can_handle(source): - raise UnhandledSource("Cannot handle {}".format(source)) - cmd_opts = [] - if revno: - cmd_opts += ['-r', str(revno)] - if os.path.exists(dest): - cmd = ['bzr', 'pull'] - cmd += cmd_opts - cmd += ['--overwrite', '-d', dest, source] - else: - cmd = ['bzr', 'branch'] - cmd += cmd_opts - cmd += [source, dest] - check_output(cmd, stderr=STDOUT) - - def install(self, source, dest=None, revno=None): - url_parts = self.parse_url(source) - branch_name = url_parts.path.strip("/").split("/")[-1] - if dest: - dest_dir = os.path.join(dest, branch_name) - else: - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) - - if dest and not os.path.exists(dest): - mkdir(dest, perms=0o755) - - try: - self.branch(source, dest_dir, revno) - except OSError as e: - raise UnhandledSource(e.strerror) - return dest_dir diff --git a/ceph-mon/hooks/charmhelpers/fetch/centos.py b/ceph-mon/hooks/charmhelpers/fetch/centos.py deleted file mode 100644 index f8492018..00000000 --- a/ceph-mon/hooks/charmhelpers/fetch/centos.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import subprocess -import os -import time -import yum - -from tempfile import NamedTemporaryFile -from charmhelpers.core.hookenv import log - -YUM_NO_LOCK = 1 # The return code for "couldn't acquire lock" in YUM. -YUM_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. -YUM_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. - - -def filter_installed_packages(packages): - """Return a list of packages that require installation.""" - yb = yum.YumBase() - package_list = yb.doPackageLists() - temp_cache = {p.base_package_name: 1 for p in package_list['installed']} - - _pkgs = [p for p in packages if not temp_cache.get(p, False)] - return _pkgs - - -def install(packages, options=None, fatal=False): - """Install one or more packages.""" - cmd = ['yum', '--assumeyes'] - if options is not None: - cmd.extend(options) - cmd.append('install') - if isinstance(packages, str): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - _run_yum_command(cmd, fatal) - - -def upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages.""" - cmd = ['yum', '--assumeyes'] - if options is not None: - cmd.extend(options) - cmd.append('upgrade') - log("Upgrading with options: {}".format(options)) - _run_yum_command(cmd, fatal) - - -def update(fatal=False): - """Update local yum cache.""" - cmd = ['yum', '--assumeyes', 'update'] - log("Update with fatal: {}".format(fatal)) - _run_yum_command(cmd, fatal) - - -def purge(packages, fatal=False): - """Purge one or more packages.""" - cmd = ['yum', '--assumeyes', 'remove'] - if isinstance(packages, str): - cmd.append(packages) - else: - cmd.extend(packages) - log("Purging {}".format(packages)) - _run_yum_command(cmd, fatal) - - -def yum_search(packages): - """Search for a package.""" - output = {} - cmd = ['yum', 'search'] - if isinstance(packages, str): - cmd.append(packages) - else: - cmd.extend(packages) - log("Searching for {}".format(packages)) - result = subprocess.check_output(cmd) - for package in list(packages): - output[package] = package in result - return output - - -def add_source(source, key=None): - """Add a package source to this system. - - @param source: a URL with a rpm package - - @param key: A key to be added to the system's keyring and used - to verify the signatures on packages. Ideally, this should be an - ASCII format GPG public key including the block headers. A GPG key - id may also be used, but be aware that only insecure protocols are - available to retrieve the actual public key from a public keyserver - placing your Juju environment at risk. - """ - if source is None: - log('Source is not present. Skipping') - return - - if source.startswith('http'): - directory = '/etc/yum.repos.d/' - for filename in os.listdir(directory): - with open(directory + filename, 'r') as rpm_file: - if source in rpm_file.read(): - break - else: - log("Add source: {!r}".format(source)) - # write in the charms.repo - with open(directory + 'Charms.repo', 'a') as rpm_file: - rpm_file.write('[%s]\n' % source[7:].replace('/', '_')) - rpm_file.write('name=%s\n' % source[7:]) - rpm_file.write('baseurl=%s\n\n' % source) - else: - log("Unknown source: {!r}".format(source)) - - if key: - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile('w+') as key_file: - key_file.write(key) - key_file.flush() - key_file.seek(0) - subprocess.check_call(['rpm', '--import', key_file.name]) - else: - subprocess.check_call(['rpm', '--import', key]) - - -def _run_yum_command(cmd, fatal=False): - """Run an YUM command. - - Checks the output and retry if the fatal flag is set to True. - - :param: cmd: str: The yum command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. - """ - env = os.environ.copy() - - if fatal: - retry_count = 0 - result = None - - # If the command is considered "fatal", we need to retry if the yum - # lock was not acquired. - - while result is None or result == YUM_NO_LOCK: - try: - result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > YUM_NO_LOCK_RETRY_COUNT: - raise - result = e.returncode - log("Couldn't acquire YUM lock. Will retry in {} seconds." - "".format(YUM_NO_LOCK_RETRY_DELAY)) - time.sleep(YUM_NO_LOCK_RETRY_DELAY) - - else: - subprocess.call(cmd, env=env) diff --git a/ceph-mon/hooks/charmhelpers/fetch/giturl.py b/ceph-mon/hooks/charmhelpers/fetch/giturl.py deleted file mode 100644 index 070ca9bb..00000000 --- a/ceph-mon/hooks/charmhelpers/fetch/giturl.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from subprocess import check_output, CalledProcessError, STDOUT -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource, - filter_installed_packages, - install, -) - -if filter_installed_packages(['git']) != []: - install(['git']) - if filter_installed_packages(['git']) != []: - raise NotImplementedError('Unable to install git') - - -class GitUrlFetchHandler(BaseFetchHandler): - """Handler for git branches via generic and github URLs.""" - - def can_handle(self, source): - url_parts = self.parse_url(source) - # TODO (mattyw) no support for ssh git@ yet - if url_parts.scheme not in ('http', 'https', 'git', ''): - return False - elif not url_parts.scheme: - return os.path.exists(os.path.join(source, '.git')) - else: - return True - - def clone(self, source, dest, branch="master", depth=None): - if not self.can_handle(source): - raise UnhandledSource("Cannot handle {}".format(source)) - - if os.path.exists(dest): - cmd = ['git', '-C', dest, 'pull', source, branch] - else: - cmd = ['git', 'clone', source, dest, '--branch', branch] - if depth: - cmd.extend(['--depth', depth]) - check_output(cmd, stderr=STDOUT) - - def install(self, source, branch="master", dest=None, depth=None): - url_parts = self.parse_url(source) - branch_name = url_parts.path.strip("/").split("/")[-1] - if dest: - dest_dir = os.path.join(dest, branch_name) - else: - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) - try: - self.clone(source, dest_dir, branch, depth) - except CalledProcessError as e: - raise UnhandledSource(e) - except OSError as e: - raise UnhandledSource(e.strerror) - return dest_dir diff --git a/ceph-mon/hooks/charmhelpers/fetch/python/__init__.py b/ceph-mon/hooks/charmhelpers/fetch/python/__init__.py deleted file mode 100644 index bff99dc9..00000000 --- a/ceph-mon/hooks/charmhelpers/fetch/python/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2019 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/ceph-mon/hooks/charmhelpers/fetch/python/debug.py b/ceph-mon/hooks/charmhelpers/fetch/python/debug.py deleted file mode 100644 index dd5cca80..00000000 --- a/ceph-mon/hooks/charmhelpers/fetch/python/debug.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import atexit -import sys - -from charmhelpers.fetch.python.rpdb import Rpdb -from charmhelpers.core.hookenv import ( - open_port, - close_port, - ERROR, - log -) - -__author__ = "Jorge Niedbalski " - -DEFAULT_ADDR = "0.0.0.0" -DEFAULT_PORT = 4444 - - -def _error(message): - log(message, level=ERROR) - - -def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT): - """ - Set a trace point using the remote debugger - """ - atexit.register(close_port, port) - try: - log("Starting a remote python debugger session on %s:%s" % (addr, - port)) - open_port(port) - debugger = Rpdb(addr=addr, port=port) - debugger.set_trace(sys._getframe().f_back) - except Exception: - _error("Cannot start a remote debug session on %s:%s" % (addr, - port)) diff --git a/ceph-mon/hooks/charmhelpers/fetch/python/packages.py b/ceph-mon/hooks/charmhelpers/fetch/python/packages.py deleted file mode 100644 index 93f1fa3f..00000000 --- a/ceph-mon/hooks/charmhelpers/fetch/python/packages.py +++ /dev/null @@ -1,148 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import subprocess -import sys - -from charmhelpers.fetch import apt_install, apt_update -from charmhelpers.core.hookenv import charm_dir, log - -__author__ = "Jorge Niedbalski " - - -def pip_execute(*args, **kwargs): - """Overridden pip_execute() to stop sys.path being changed. - - The act of importing main from the pip module seems to cause add wheels - from the /usr/share/python-wheels which are installed by various tools. - This function ensures that sys.path remains the same after the call is - executed. - """ - try: - _path = sys.path - try: - from pip import main as _pip_execute - except ImportError: - apt_update() - apt_install('python3-pip') - from pip import main as _pip_execute - _pip_execute(*args, **kwargs) - finally: - sys.path = _path - - -def parse_options(given, available): - """Given a set of options, check if available""" - for key, value in sorted(given.items()): - if not value: - continue - if key in available: - yield "--{0}={1}".format(key, value) - - -def pip_install_requirements(requirements, constraints=None, **options): - """Install a requirements file. - - :param constraints: Path to pip constraints file. - http://pip.readthedocs.org/en/stable/user_guide/#constraints-files - """ - command = ["install"] - - available_options = ('proxy', 'src', 'log', ) - for option in parse_options(options, available_options): - command.append(option) - - command.append("-r {0}".format(requirements)) - if constraints: - command.append("-c {0}".format(constraints)) - log("Installing from file: {} with constraints {} " - "and options: {}".format(requirements, constraints, command)) - else: - log("Installing from file: {} with options: {}".format(requirements, - command)) - pip_execute(command) - - -def pip_install(package, fatal=False, upgrade=False, venv=None, - constraints=None, **options): - """Install a python package""" - if venv: - venv_python = os.path.join(venv, 'bin/pip') - command = [venv_python, "install"] - else: - command = ["install"] - - available_options = ('proxy', 'src', 'log', 'index-url', ) - for option in parse_options(options, available_options): - command.append(option) - - if upgrade: - command.append('--upgrade') - - if constraints: - command.extend(['-c', constraints]) - - if isinstance(package, list): - command.extend(package) - else: - command.append(package) - - log("Installing {} package with options: {}".format(package, - command)) - if venv: - subprocess.check_call(command) - else: - pip_execute(command) - - -def pip_uninstall(package, **options): - """Uninstall a python package""" - command = ["uninstall", "-q", "-y"] - - available_options = ('proxy', 'log', ) - for option in parse_options(options, available_options): - command.append(option) - - if isinstance(package, list): - command.extend(package) - else: - command.append(package) - - log("Uninstalling {} package with options: {}".format(package, - command)) - pip_execute(command) - - -def pip_list(): - """Returns the list of current python installed packages - """ - return pip_execute(["list"]) - - -def pip_create_virtualenv(path=None): - """Create an isolated Python environment.""" - apt_install(['python3-virtualenv', 'virtualenv']) - extra_flags = ['--python=python3'] - - if path: - venv_path = path - else: - venv_path = os.path.join(charm_dir(), 'venv') - - if not os.path.exists(venv_path): - subprocess.check_call(['virtualenv', venv_path] + extra_flags) diff --git a/ceph-mon/hooks/charmhelpers/fetch/python/rpdb.py b/ceph-mon/hooks/charmhelpers/fetch/python/rpdb.py deleted file mode 100644 index 9b31610c..00000000 --- a/ceph-mon/hooks/charmhelpers/fetch/python/rpdb.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Remote Python Debugger (pdb wrapper).""" - -import pdb -import socket -import sys - -__author__ = "Bertrand Janin " -__version__ = "0.1.3" - - -class Rpdb(pdb.Pdb): - - def __init__(self, addr="127.0.0.1", port=4444): - """Initialize the socket and initialize pdb.""" - - # Backup stdin and stdout before replacing them by the socket handle - self.old_stdout = sys.stdout - self.old_stdin = sys.stdin - - # Open a 'reusable' socket to let the webapp reload on the same port - self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) - self.skt.bind((addr, port)) - self.skt.listen(1) - (clientsocket, address) = self.skt.accept() - handle = clientsocket.makefile('rw') - pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle) - sys.stdout = sys.stdin = handle - - def shutdown(self): - """Revert stdin and stdout, close the socket.""" - sys.stdout = self.old_stdout - sys.stdin = self.old_stdin - self.skt.close() - self.set_continue() - - def do_continue(self, arg): - """Stop all operation on ``continue``.""" - self.shutdown() - return 1 - - do_EOF = do_quit = do_exit = do_c = do_cont = do_continue diff --git a/ceph-mon/hooks/charmhelpers/fetch/python/version.py b/ceph-mon/hooks/charmhelpers/fetch/python/version.py deleted file mode 100644 index 3eb42103..00000000 --- a/ceph-mon/hooks/charmhelpers/fetch/python/version.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -__author__ = "Jorge Niedbalski " - - -def current_version(): - """Current system python version""" - return sys.version_info - - -def current_version_string(): - """Current system python version as string major.minor.micro""" - return "{0}.{1}.{2}".format(sys.version_info.major, - sys.version_info.minor, - sys.version_info.micro) diff --git a/ceph-mon/hooks/charmhelpers/fetch/snap.py b/ceph-mon/hooks/charmhelpers/fetch/snap.py deleted file mode 100644 index 36d6bce9..00000000 --- a/ceph-mon/hooks/charmhelpers/fetch/snap.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Charm helpers snap for classic charms. - -If writing reactive charms, use the snap layer: -https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html -""" -import subprocess -import os -from time import sleep -from charmhelpers.core.hookenv import log - -__author__ = 'Joseph Borg ' - -# The return code for "couldn't acquire lock" in Snap -# (hopefully this will be improved). -SNAP_NO_LOCK = 1 -SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. -SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. -SNAP_CHANNELS = [ - 'edge', - 'beta', - 'candidate', - 'stable', -] - - -class CouldNotAcquireLockException(Exception): - pass - - -class InvalidSnapChannel(Exception): - pass - - -def _snap_exec(commands): - """ - Execute snap commands. - - :param commands: List commands - :return: Integer exit code - """ - assert type(commands) == list - - retry_count = 0 - return_code = None - - while return_code is None or return_code == SNAP_NO_LOCK: - try: - return_code = subprocess.check_call(['snap'] + commands, - env=os.environ) - except subprocess.CalledProcessError as e: - retry_count += + 1 - if retry_count > SNAP_NO_LOCK_RETRY_COUNT: - raise CouldNotAcquireLockException( - 'Could not acquire lock after {} attempts' - .format(SNAP_NO_LOCK_RETRY_COUNT)) - return_code = e.returncode - log('Snap failed to acquire lock, trying again in {} seconds.' - .format(SNAP_NO_LOCK_RETRY_DELAY), level='WARN') - sleep(SNAP_NO_LOCK_RETRY_DELAY) - - return return_code - - -def snap_install(packages, *flags): - """ - Install a snap package. - - :param packages: String or List String package name - :param flags: List String flags to pass to install command - :return: Integer return code from snap - """ - if type(packages) is not list: - packages = [packages] - - flags = list(flags) - - message = 'Installing snap(s) "%s"' % ', '.join(packages) - if flags: - message += ' with option(s) "%s"' % ', '.join(flags) - - log(message, level='INFO') - return _snap_exec(['install'] + flags + packages) - - -def snap_remove(packages, *flags): - """ - Remove a snap package. - - :param packages: String or List String package name - :param flags: List String flags to pass to remove command - :return: Integer return code from snap - """ - if type(packages) is not list: - packages = [packages] - - flags = list(flags) - - message = 'Removing snap(s) "%s"' % ', '.join(packages) - if flags: - message += ' with options "%s"' % ', '.join(flags) - - log(message, level='INFO') - return _snap_exec(['remove'] + flags + packages) - - -def snap_refresh(packages, *flags): - """ - Refresh / Update snap package. - - :param packages: String or List String package name - :param flags: List String flags to pass to refresh command - :return: Integer return code from snap - """ - if type(packages) is not list: - packages = [packages] - - flags = list(flags) - - message = 'Refreshing snap(s) "%s"' % ', '.join(packages) - if flags: - message += ' with options "%s"' % ', '.join(flags) - - log(message, level='INFO') - return _snap_exec(['refresh'] + flags + packages) - - -def valid_snap_channel(channel): - """ Validate snap channel exists - - :raises InvalidSnapChannel: When channel does not exist - :return: Boolean - """ - if channel.lower() in SNAP_CHANNELS: - return True - else: - raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel)) diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py deleted file mode 100644 index e6f8a0ad..00000000 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu.py +++ /dev/null @@ -1,1003 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import OrderedDict -import platform -import re -import subprocess -import sys -import time - -from charmhelpers import deprecate -from charmhelpers.core.host import get_distrib_codename, get_system_env - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - WARNING, - env_proxy_settings, -) -from charmhelpers.fetch import SourceConfigError, GPGKeyError -from charmhelpers.fetch import ubuntu_apt_pkg - -PROPOSED_POCKET = ( - "# Proposed\n" - "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe " - "multiverse restricted\n") -PROPOSED_PORTS_POCKET = ( - "# Proposed\n" - "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe " - "multiverse restricted\n") -# Only supports 64bit and ppc64 at the moment. -ARCH_TO_PROPOSED_POCKET = { - 'x86_64': PROPOSED_POCKET, - 'ppc64le': PROPOSED_PORTS_POCKET, - 'aarch64': PROPOSED_PORTS_POCKET, - 's390x': PROPOSED_PORTS_POCKET, -} -CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" -CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' -CLOUD_ARCHIVE = """# Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" -CLOUD_ARCHIVE_POCKETS = { - # Folsom - 'folsom': 'precise-updates/folsom', - 'folsom/updates': 'precise-updates/folsom', - 'precise-folsom': 'precise-updates/folsom', - 'precise-folsom/updates': 'precise-updates/folsom', - 'precise-updates/folsom': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'precise-folsom/proposed': 'precise-proposed/folsom', - 'precise-proposed/folsom': 'precise-proposed/folsom', - # Grizzly - 'grizzly': 'precise-updates/grizzly', - 'grizzly/updates': 'precise-updates/grizzly', - 'precise-grizzly': 'precise-updates/grizzly', - 'precise-grizzly/updates': 'precise-updates/grizzly', - 'precise-updates/grizzly': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly', - 'precise-grizzly/proposed': 'precise-proposed/grizzly', - 'precise-proposed/grizzly': 'precise-proposed/grizzly', - # Havana - 'havana': 'precise-updates/havana', - 'havana/updates': 'precise-updates/havana', - 'precise-havana': 'precise-updates/havana', - 'precise-havana/updates': 'precise-updates/havana', - 'precise-updates/havana': 'precise-updates/havana', - 'havana/proposed': 'precise-proposed/havana', - 'precise-havana/proposed': 'precise-proposed/havana', - 'precise-proposed/havana': 'precise-proposed/havana', - # Icehouse - 'icehouse': 'precise-updates/icehouse', - 'icehouse/updates': 'precise-updates/icehouse', - 'precise-icehouse': 'precise-updates/icehouse', - 'precise-icehouse/updates': 'precise-updates/icehouse', - 'precise-updates/icehouse': 'precise-updates/icehouse', - 'icehouse/proposed': 'precise-proposed/icehouse', - 'precise-icehouse/proposed': 'precise-proposed/icehouse', - 'precise-proposed/icehouse': 'precise-proposed/icehouse', - # Juno - 'juno': 'trusty-updates/juno', - 'juno/updates': 'trusty-updates/juno', - 'trusty-juno': 'trusty-updates/juno', - 'trusty-juno/updates': 'trusty-updates/juno', - 'trusty-updates/juno': 'trusty-updates/juno', - 'juno/proposed': 'trusty-proposed/juno', - 'trusty-juno/proposed': 'trusty-proposed/juno', - 'trusty-proposed/juno': 'trusty-proposed/juno', - # Kilo - 'kilo': 'trusty-updates/kilo', - 'kilo/updates': 'trusty-updates/kilo', - 'trusty-kilo': 'trusty-updates/kilo', - 'trusty-kilo/updates': 'trusty-updates/kilo', - 'trusty-updates/kilo': 'trusty-updates/kilo', - 'kilo/proposed': 'trusty-proposed/kilo', - 'trusty-kilo/proposed': 'trusty-proposed/kilo', - 'trusty-proposed/kilo': 'trusty-proposed/kilo', - # Liberty - 'liberty': 'trusty-updates/liberty', - 'liberty/updates': 'trusty-updates/liberty', - 'trusty-liberty': 'trusty-updates/liberty', - 'trusty-liberty/updates': 'trusty-updates/liberty', - 'trusty-updates/liberty': 'trusty-updates/liberty', - 'liberty/proposed': 'trusty-proposed/liberty', - 'trusty-liberty/proposed': 'trusty-proposed/liberty', - 'trusty-proposed/liberty': 'trusty-proposed/liberty', - # Mitaka - 'mitaka': 'trusty-updates/mitaka', - 'mitaka/updates': 'trusty-updates/mitaka', - 'trusty-mitaka': 'trusty-updates/mitaka', - 'trusty-mitaka/updates': 'trusty-updates/mitaka', - 'trusty-updates/mitaka': 'trusty-updates/mitaka', - 'mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', - # Newton - 'newton': 'xenial-updates/newton', - 'newton/updates': 'xenial-updates/newton', - 'xenial-newton': 'xenial-updates/newton', - 'xenial-newton/updates': 'xenial-updates/newton', - 'xenial-updates/newton': 'xenial-updates/newton', - 'newton/proposed': 'xenial-proposed/newton', - 'xenial-newton/proposed': 'xenial-proposed/newton', - 'xenial-proposed/newton': 'xenial-proposed/newton', - # Ocata - 'ocata': 'xenial-updates/ocata', - 'ocata/updates': 'xenial-updates/ocata', - 'xenial-ocata': 'xenial-updates/ocata', - 'xenial-ocata/updates': 'xenial-updates/ocata', - 'xenial-updates/ocata': 'xenial-updates/ocata', - 'ocata/proposed': 'xenial-proposed/ocata', - 'xenial-ocata/proposed': 'xenial-proposed/ocata', - 'xenial-proposed/ocata': 'xenial-proposed/ocata', - # Pike - 'pike': 'xenial-updates/pike', - 'xenial-pike': 'xenial-updates/pike', - 'xenial-pike/updates': 'xenial-updates/pike', - 'xenial-updates/pike': 'xenial-updates/pike', - 'pike/proposed': 'xenial-proposed/pike', - 'xenial-pike/proposed': 'xenial-proposed/pike', - 'xenial-proposed/pike': 'xenial-proposed/pike', - # Queens - 'queens': 'xenial-updates/queens', - 'xenial-queens': 'xenial-updates/queens', - 'xenial-queens/updates': 'xenial-updates/queens', - 'xenial-updates/queens': 'xenial-updates/queens', - 'queens/proposed': 'xenial-proposed/queens', - 'xenial-queens/proposed': 'xenial-proposed/queens', - 'xenial-proposed/queens': 'xenial-proposed/queens', - # Rocky - 'rocky': 'bionic-updates/rocky', - 'bionic-rocky': 'bionic-updates/rocky', - 'bionic-rocky/updates': 'bionic-updates/rocky', - 'bionic-updates/rocky': 'bionic-updates/rocky', - 'rocky/proposed': 'bionic-proposed/rocky', - 'bionic-rocky/proposed': 'bionic-proposed/rocky', - 'bionic-proposed/rocky': 'bionic-proposed/rocky', - # Stein - 'stein': 'bionic-updates/stein', - 'bionic-stein': 'bionic-updates/stein', - 'bionic-stein/updates': 'bionic-updates/stein', - 'bionic-updates/stein': 'bionic-updates/stein', - 'stein/proposed': 'bionic-proposed/stein', - 'bionic-stein/proposed': 'bionic-proposed/stein', - 'bionic-proposed/stein': 'bionic-proposed/stein', - # Train - 'train': 'bionic-updates/train', - 'bionic-train': 'bionic-updates/train', - 'bionic-train/updates': 'bionic-updates/train', - 'bionic-updates/train': 'bionic-updates/train', - 'train/proposed': 'bionic-proposed/train', - 'bionic-train/proposed': 'bionic-proposed/train', - 'bionic-proposed/train': 'bionic-proposed/train', - # Ussuri - 'ussuri': 'bionic-updates/ussuri', - 'bionic-ussuri': 'bionic-updates/ussuri', - 'bionic-ussuri/updates': 'bionic-updates/ussuri', - 'bionic-updates/ussuri': 'bionic-updates/ussuri', - 'ussuri/proposed': 'bionic-proposed/ussuri', - 'bionic-ussuri/proposed': 'bionic-proposed/ussuri', - 'bionic-proposed/ussuri': 'bionic-proposed/ussuri', - # Victoria - 'victoria': 'focal-updates/victoria', - 'focal-victoria': 'focal-updates/victoria', - 'focal-victoria/updates': 'focal-updates/victoria', - 'focal-updates/victoria': 'focal-updates/victoria', - 'victoria/proposed': 'focal-proposed/victoria', - 'focal-victoria/proposed': 'focal-proposed/victoria', - 'focal-proposed/victoria': 'focal-proposed/victoria', - # Wallaby - 'wallaby': 'focal-updates/wallaby', - 'focal-wallaby': 'focal-updates/wallaby', - 'focal-wallaby/updates': 'focal-updates/wallaby', - 'focal-updates/wallaby': 'focal-updates/wallaby', - 'wallaby/proposed': 'focal-proposed/wallaby', - 'focal-wallaby/proposed': 'focal-proposed/wallaby', - 'focal-proposed/wallaby': 'focal-proposed/wallaby', - # Xena - 'xena': 'focal-updates/xena', - 'focal-xena': 'focal-updates/xena', - 'focal-xena/updates': 'focal-updates/xena', - 'focal-updates/xena': 'focal-updates/xena', - 'xena/proposed': 'focal-proposed/xena', - 'focal-xena/proposed': 'focal-proposed/xena', - 'focal-proposed/xena': 'focal-proposed/xena', - # Yoga - 'yoga': 'focal-updates/yoga', - 'focal-yoga': 'focal-updates/yoga', - 'focal-yoga/updates': 'focal-updates/yoga', - 'focal-updates/yoga': 'focal-updates/yoga', - 'yoga/proposed': 'focal-proposed/yoga', - 'focal-yoga/proposed': 'focal-proposed/yoga', - 'focal-proposed/yoga': 'focal-proposed/yoga', -} - - -OPENSTACK_RELEASES = ( - 'diablo', - 'essex', - 'folsom', - 'grizzly', - 'havana', - 'icehouse', - 'juno', - 'kilo', - 'liberty', - 'mitaka', - 'newton', - 'ocata', - 'pike', - 'queens', - 'rocky', - 'stein', - 'train', - 'ussuri', - 'victoria', - 'wallaby', - 'xena', - 'yoga', -) - - -UBUNTU_OPENSTACK_RELEASE = OrderedDict([ - ('oneiric', 'diablo'), - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), - ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ('disco', 'stein'), - ('eoan', 'train'), - ('focal', 'ussuri'), - ('groovy', 'victoria'), - ('hirsute', 'wallaby'), - ('impish', 'xena'), - ('jammy', 'yoga'), -]) - - -APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. -CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. -CMD_RETRY_COUNT = 10 # Retry a failing fatal command X times. - - -def filter_installed_packages(packages): - """Return a list of packages that require installation.""" - cache = apt_cache() - _pkgs = [] - for package in packages: - try: - p = cache[package] - p.current_ver or _pkgs.append(package) - except KeyError: - log('Package {} has no installation candidate.'.format(package), - level='WARNING') - _pkgs.append(package) - return _pkgs - - -def filter_missing_packages(packages): - """Return a list of packages that are installed. - - :param packages: list of packages to evaluate. - :returns list: Packages that are installed. - """ - return list( - set(packages) - - set(filter_installed_packages(packages)) - ) - - -def apt_cache(*_, **__): - """Shim returning an object simulating the apt_pkg Cache. - - :param _: Accept arguments for compatibility, not used. - :type _: any - :param __: Accept keyword arguments for compatibility, not used. - :type __: any - :returns:Object used to interrogate the system apt and dpkg databases. - :rtype:ubuntu_apt_pkg.Cache - """ - if 'apt_pkg' in sys.modules: - # NOTE(fnordahl): When our consumer use the upstream ``apt_pkg`` module - # in conjunction with the apt_cache helper function, they may expect us - # to call ``apt_pkg.init()`` for them. - # - # Detect this situation, log a warning and make the call to - # ``apt_pkg.init()`` to avoid the consumer Python interpreter from - # crashing with a segmentation fault. - @deprecate( - 'Support for use of upstream ``apt_pkg`` module in conjunction' - 'with charm-helpers is deprecated since 2019-06-25', - date=None, log=lambda x: log(x, level=WARNING)) - def one_shot_log(): - pass - - one_shot_log() - sys.modules['apt_pkg'].init() - return ubuntu_apt_pkg.Cache() - - -def apt_install(packages, options=None, fatal=False, quiet=False): - """Install one or more packages. - - :param packages: Package(s) to install - :type packages: Option[str, List[str]] - :param options: Options to pass on to apt-get - :type options: Option[None, List[str]] - :param fatal: Whether the command's output should be checked and - retried. - :type fatal: bool - :param quiet: if True (default), suppress log message to stdout/stderr - :type quiet: bool - :raises: subprocess.CalledProcessError - """ - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - cmd.append('install') - if isinstance(packages, str): - cmd.append(packages) - else: - cmd.extend(packages) - if not quiet: - log("Installing {} with options: {}" - .format(packages, options)) - _run_apt_command(cmd, fatal, quiet=quiet) - - -def apt_upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages. - - :param options: Options to pass on to apt-get - :type options: Option[None, List[str]] - :param fatal: Whether the command's output should be checked and - retried. - :type fatal: bool - :param dist: Whether ``dist-upgrade`` should be used over ``upgrade`` - :type dist: bool - :raises: subprocess.CalledProcessError - """ - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - if dist: - cmd.append('dist-upgrade') - else: - cmd.append('upgrade') - log("Upgrading with options: {}".format(options)) - _run_apt_command(cmd, fatal) - - -def apt_update(fatal=False): - """Update local apt cache.""" - cmd = ['apt-get', 'update'] - _run_apt_command(cmd, fatal) - - -def apt_purge(packages, fatal=False): - """Purge one or more packages. - - :param packages: Package(s) to install - :type packages: Option[str, List[str]] - :param fatal: Whether the command's output should be checked and - retried. - :type fatal: bool - :raises: subprocess.CalledProcessError - """ - cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, str): - cmd.append(packages) - else: - cmd.extend(packages) - log("Purging {}".format(packages)) - _run_apt_command(cmd, fatal) - - -def apt_autoremove(purge=True, fatal=False): - """Purge one or more packages. - :param purge: Whether the ``--purge`` option should be passed on or not. - :type purge: bool - :param fatal: Whether the command's output should be checked and - retried. - :type fatal: bool - :raises: subprocess.CalledProcessError - """ - cmd = ['apt-get', '--assume-yes', 'autoremove'] - if purge: - cmd.append('--purge') - _run_apt_command(cmd, fatal) - - -def apt_mark(packages, mark, fatal=False): - """Flag one or more packages using apt-mark.""" - log("Marking {} as {}".format(packages, mark)) - cmd = ['apt-mark', mark] - if isinstance(packages, str): - cmd.append(packages) - else: - cmd.extend(packages) - - if fatal: - subprocess.check_call(cmd, universal_newlines=True) - else: - subprocess.call(cmd, universal_newlines=True) - - -def apt_hold(packages, fatal=False): - return apt_mark(packages, 'hold', fatal=fatal) - - -def apt_unhold(packages, fatal=False): - return apt_mark(packages, 'unhold', fatal=fatal) - - -def import_key(key): - """Import an ASCII Armor key. - - A Radix64 format keyid is also supported for backwards - compatibility. In this case Ubuntu keyserver will be - queried for a key via HTTPS by its keyid. This method - is less preferable because https proxy servers may - require traffic decryption which is equivalent to a - man-in-the-middle attack (a proxy server impersonates - keyserver TLS certificates and has to be explicitly - trusted by the system). - - :param key: A GPG key in ASCII armor format, - including BEGIN and END markers or a keyid. - :type key: (bytes, str) - :raises: GPGKeyError if the key could not be imported - """ - key = key.strip() - if '-' in key or '\n' in key: - # Send everything not obviously a keyid to GPG to import, as - # we trust its validation better than our own. eg. handling - # comments before the key. - log("PGP key found (looks like ASCII Armor format)", level=DEBUG) - if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and - '-----END PGP PUBLIC KEY BLOCK-----' in key): - log("Writing provided PGP key in the binary format", level=DEBUG) - key_bytes = key.encode('utf-8') - key_name = _get_keyid_by_gpg_key(key_bytes) - key_gpg = _dearmor_gpg_key(key_bytes) - _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) - else: - raise GPGKeyError("ASCII armor markers missing from GPG key") - else: - log("PGP key found (looks like Radix64 format)", level=WARNING) - log("SECURELY importing PGP key from keyserver; " - "full key not provided.", level=WARNING) - # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL - # to retrieve GPG keys. `apt-key adv` command is deprecated as is - # apt-key in general as noted in its manpage. See lp:1433761 for more - # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop - # gpg - key_asc = _get_key_by_keyid(key) - # write the key in GPG format so that apt-key list shows it - key_gpg = _dearmor_gpg_key(key_asc) - _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg) - - -def _get_keyid_by_gpg_key(key_material): - """Get a GPG key fingerprint by GPG key material. - Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded - or binary GPG key material. Can be used, for example, to generate file - names for keys passed via charm options. - - :param key_material: ASCII armor-encoded or binary GPG key material - :type key_material: bytes - :raises: GPGKeyError if invalid key material has been provided - :returns: A GPG key fingerprint - :rtype: str - """ - # Use the same gpg command for both Xenial and Bionic - cmd = 'gpg --with-colons --with-fingerprint' - ps = subprocess.Popen(cmd.split(), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - stdin=subprocess.PIPE) - out, err = ps.communicate(input=key_material) - out = out.decode('utf-8') - err = err.decode('utf-8') - if 'gpg: no valid OpenPGP data found.' in err: - raise GPGKeyError('Invalid GPG key material provided') - # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) - return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1) - - -def _get_key_by_keyid(keyid): - """Get a key via HTTPS from the Ubuntu keyserver. - Different key ID formats are supported by SKS keyservers (the longer ones - are more secure, see "dead beef attack" and https://evil32.com/). Since - HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will - impersonate keyserver.ubuntu.com and generate a certificate with - keyserver.ubuntu.com in the CN field or in SubjAltName fields of a - certificate. If such proxy behavior is expected it is necessary to add the - CA certificate chain containing the intermediate CA of the SSLBump proxy to - every machine that this code runs on via ca-certs cloud-init directive (via - cloudinit-userdata model-config) or via other means (such as through a - custom charm option). Also note that DNS resolution for the hostname in a - URL is done at a proxy server - not at the client side. - - 8-digit (32 bit) key ID - https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6 - 16-digit (64 bit) key ID - https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6 - 40-digit key ID: - https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6 - - :param keyid: An 8, 16 or 40 hex digit keyid to find a key for - :type keyid: (bytes, str) - :returns: A key material for the specified GPG key id - :rtype: (str, bytes) - :raises: subprocess.CalledProcessError - """ - # options=mr - machine-readable output (disables html wrappers) - keyserver_url = ('https://keyserver.ubuntu.com' - '/pks/lookup?op=get&options=mr&exact=on&search=0x{}') - curl_cmd = ['curl', keyserver_url.format(keyid)] - # use proxy server settings in order to retrieve the key - return subprocess.check_output(curl_cmd, - env=env_proxy_settings(['https'])) - - -def _dearmor_gpg_key(key_asc): - """Converts a GPG key in the ASCII armor format to the binary format. - - :param key_asc: A GPG key in ASCII armor format. - :type key_asc: (str, bytes) - :returns: A GPG key in binary format - :rtype: (str, bytes) - :raises: GPGKeyError - """ - ps = subprocess.Popen(['gpg', '--dearmor'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - stdin=subprocess.PIPE) - out, err = ps.communicate(input=key_asc) - # no need to decode output as it is binary (invalid utf-8), only error - err = err.decode('utf-8') - if 'gpg: no valid OpenPGP data found.' in err: - raise GPGKeyError('Invalid GPG key material. Check your network setup' - ' (MTU, routing, DNS) and/or proxy server settings' - ' as well as destination keyserver status.') - else: - return out - - -def _write_apt_gpg_keyfile(key_name, key_material): - """Writes GPG key material into a file at a provided path. - - :param key_name: A key name to use for a key file (could be a fingerprint) - :type key_name: str - :param key_material: A GPG key material (binary) - :type key_material: (str, bytes) - """ - with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name), - 'wb') as keyf: - keyf.write(key_material) - - -def add_source(source, key=None, fail_invalid=False): - """Add a package source to this system. - - @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples:: - - ppa:charmers/example - deb https://stub:key@private.example.com/ubuntu trusty main - - In addition: - 'proposed:' may be used to enable the standard 'proposed' - pocket for the release. - 'cloud:' may be used to activate official cloud archive pockets, - such as 'cloud:icehouse' - 'distro' may be used as a noop - - Full list of source specifications supported by the function are: - - 'distro': A NOP; i.e. it has no effect. - 'proposed': the proposed deb spec [2] is wrtten to - /etc/apt/sources.list/proposed - 'distro-proposed': adds -proposed to the debs [2] - 'ppa:': add-apt-repository --yes - 'deb ': add-apt-repository --yes deb - 'http://....': add-apt-repository --yes http://... - 'cloud-archive:': add-apt-repository -yes cloud-archive: - 'cloud:[-staging]': specify a Cloud Archive pocket with - optional staging version. If staging is used then the staging PPA [2] - with be used. If staging is NOT used then the cloud archive [3] will be - added, and the 'ubuntu-cloud-keyring' package will be added for the - current distro. - '': translate to cloud: based on the current - distro version (i.e. for 'ussuri' this will either be 'bionic-ussuri' or - 'distro'. - '/proposed': as above, but for proposed. - - Otherwise the source is not recognised and this is logged to the juju log. - However, no error is raised, unless sys_error_on_exit is True. - - [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main - where {} is replaced with the derived pocket name. - [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \ - main universe multiverse restricted - where {} is replaced with the lsb_release codename (e.g. xenial) - [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu - to /etc/apt/sources.list.d/cloud-archive-list - - @param key: A key to be added to the system's APT keyring and used - to verify the signatures on packages. Ideally, this should be an - ASCII format GPG public key including the block headers. A GPG key - id may also be used, but be aware that only insecure protocols are - available to retrieve the actual public key from a public keyserver - placing your Juju environment at risk. ppa and cloud archive keys - are securely added automatically, so should not be provided. - - @param fail_invalid: (boolean) if True, then the function raises a - SourceConfigError is there is no matching installation source. - - @raises SourceConfigError() if for cloud:, the is not a - valid pocket in CLOUD_ARCHIVE_POCKETS - """ - # extract the OpenStack versions from the CLOUD_ARCHIVE_POCKETS; can't use - # the list in contrib.openstack.utils as it might not be included in - # classic charms and would break everything. Having OpenStack specific - # code in this file is a bit of an antipattern, anyway. - os_versions_regex = "({})".format("|".join(OPENSTACK_RELEASES)) - - _mapping = OrderedDict([ - (r"^distro$", lambda: None), # This is a NOP - (r"^(?:proposed|distro-proposed)$", _add_proposed), - (r"^cloud-archive:(.*)$", _add_apt_repository), - (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), - (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), - (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), - (r"^cloud:(.*)$", _add_cloud_pocket), - (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), - (r"^{}\/proposed$".format(os_versions_regex), - _add_bare_openstack_proposed), - (r"^{}$".format(os_versions_regex), _add_bare_openstack), - ]) - if source is None: - source = '' - for r, fn in _mapping.items(): - m = re.match(r, source) - if m: - if key: - # Import key before adding the source which depends on it, - # as refreshing packages could fail otherwise. - try: - import_key(key) - except GPGKeyError as e: - raise SourceConfigError(str(e)) - # call the associated function with the captured groups - # raises SourceConfigError on error. - fn(*m.groups()) - break - else: - # nothing matched. log an error and maybe sys.exit - err = "Unknown source: {!r}".format(source) - log(err) - if fail_invalid: - raise SourceConfigError(err) - - -def _add_proposed(): - """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list - - Uses get_distrib_codename to determine the correct stanza for - the deb line. - - For Intel architectures PROPOSED_POCKET is used for the release, but for - other architectures PROPOSED_PORTS_POCKET is used for the release. - """ - release = get_distrib_codename() - arch = platform.machine() - if arch not in ARCH_TO_PROPOSED_POCKET.keys(): - raise SourceConfigError("Arch {} not supported for (distro-)proposed" - .format(arch)) - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: - apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release)) - - -def _add_apt_repository(spec): - """Add the spec using add_apt_repository - - :param spec: the parameter to pass to add_apt_repository - :type spec: str - """ - if '{series}' in spec: - series = get_distrib_codename() - spec = spec.replace('{series}', series) - _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https', 'http', 'no_proxy']) - ) - - -def _add_cloud_pocket(pocket): - """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list - - Note that this overwrites the existing file if there is one. - - This function also converts the simple pocket in to the actual pocket using - the CLOUD_ARCHIVE_POCKETS mapping. - - :param pocket: string representing the pocket to add a deb spec for. - :raises: SourceConfigError if the cloud pocket doesn't exist or the - requested release doesn't match the current distro version. - """ - apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - if pocket not in CLOUD_ARCHIVE_POCKETS: - raise SourceConfigError( - 'Unsupported cloud: source option %s' % - pocket) - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) - - -def _add_cloud_staging(cloud_archive_release, openstack_release): - """Add the cloud staging repository which is in - ppa:ubuntu-cloud-archive/-staging - - This function checks that the cloud_archive_release matches the current - codename for the distro that charm is being installed on. - - :param cloud_archive_release: string, codename for the release. - :param openstack_release: String, codename for the openstack release. - :raises: SourceConfigError if the cloud_archive_release doesn't match the - current version of the os. - """ - _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) - ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release) - cmd = 'add-apt-repository -y {}'.format(ppa) - _run_with_retries(cmd.split(' ')) - - -def _add_cloud_distro_check(cloud_archive_release, openstack_release): - """Add the cloud pocket, but also check the cloud_archive_release against - the current distro, and use the openstack_release as the full lookup. - - This just calls _add_cloud_pocket() with the openstack_release as pocket - to get the correct cloud-archive.list for dpkg to work with. - - :param cloud_archive_release:String, codename for the distro release. - :param openstack_release: String, spec for the release to look up in the - CLOUD_ARCHIVE_POCKETS - :raises: SourceConfigError if this is the wrong distro, or the pocket spec - doesn't exist. - """ - _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) - _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release)) - - -def _verify_is_ubuntu_rel(release, os_release): - """Verify that the release is in the same as the current ubuntu release. - - :param release: String, lowercase for the release. - :param os_release: String, the os_release being asked for - :raises: SourceConfigError if the release is not the same as the ubuntu - release. - """ - ubuntu_rel = get_distrib_codename() - if release != ubuntu_rel: - raise SourceConfigError( - 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' - 'version ({})'.format(release, os_release, ubuntu_rel)) - - -def _add_bare_openstack(openstack_release): - """Add cloud or distro based on the release given. - - The spec given is, say, 'ussuri', but this could apply cloud:bionic-ussuri - or 'distro' depending on whether the ubuntu release is bionic or focal. - - :param openstack_release: the OpenStack codename to determine the release - for. - :type openstack_release: str - :raises: SourceConfigError - """ - # TODO(ajkavanagh) - surely this means we should be removing cloud archives - # if they exist? - __add_bare_helper(openstack_release, "{}-{}", lambda: None) - - -def _add_bare_openstack_proposed(openstack_release): - """Add cloud of distro but with proposed. - - The spec given is, say, 'ussuri' but this could apply - cloud:bionic-ussuri/proposed or 'distro/proposed' depending on whether the - ubuntu release is bionic or focal. - - :param openstack_release: the OpenStack codename to determine the release - for. - :type openstack_release: str - :raises: SourceConfigError - """ - __add_bare_helper(openstack_release, "{}-{}/proposed", _add_proposed) - - -def __add_bare_helper(openstack_release, pocket_format, final_function): - """Helper for _add_bare_openstack[_proposed] - - The bulk of the work between the two functions is exactly the same except - for the pocket format and the function that is run if it's the distro - version. - - :param openstack_release: the OpenStack codename. e.g. ussuri - :type openstack_release: str - :param pocket_format: the pocket formatter string to construct a pocket str - from the openstack_release and the current ubuntu version. - :type pocket_format: str - :param final_function: the function to call if it is the distro version. - :type final_function: Callable - :raises SourceConfigError on error - """ - ubuntu_version = get_distrib_codename() - possible_pocket = pocket_format.format(ubuntu_version, openstack_release) - if possible_pocket in CLOUD_ARCHIVE_POCKETS: - _add_cloud_pocket(possible_pocket) - return - # Otherwise it's almost certainly the distro version; verify that it - # exists. - try: - assert UBUNTU_OPENSTACK_RELEASE[ubuntu_version] == openstack_release - except KeyError: - raise SourceConfigError( - "Invalid ubuntu version {} isn't known to this library" - .format(ubuntu_version)) - except AssertionError: - raise SourceConfigError( - 'Invalid OpenStack release specified: {} for Ubuntu version {}' - .format(openstack_release, ubuntu_version)) - final_function() - - -def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), - retry_message="", cmd_env=None, quiet=False): - """Run a command and retry until success or max_retries is reached. - - :param cmd: The apt command to run. - :type cmd: str - :param max_retries: The number of retries to attempt on a fatal - command. Defaults to CMD_RETRY_COUNT. - :type max_retries: int - :param retry_exitcodes: Optional additional exit codes to retry. - Defaults to retry on exit code 1. - :type retry_exitcodes: tuple - :param retry_message: Optional log prefix emitted during retries. - :type retry_message: str - :param: cmd_env: Environment variables to add to the command run. - :type cmd_env: Option[None, Dict[str, str]] - :param quiet: if True, silence the output of the command from stdout and - stderr - :type quiet: bool - """ - env = get_apt_dpkg_env() - if cmd_env: - env.update(cmd_env) - - kwargs = {} - if quiet: - kwargs['stdout'] = subprocess.DEVNULL - kwargs['stderr'] = subprocess.DEVNULL - - if not retry_message: - retry_message = "Failed executing '{}'".format(" ".join(cmd)) - retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) - - retry_count = 0 - result = None - - retry_results = (None,) + retry_exitcodes - while result in retry_results: - try: - result = subprocess.check_call(cmd, env=env, **kwargs) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > max_retries: - raise - result = e.returncode - log(retry_message) - time.sleep(CMD_RETRY_DELAY) - - -def _run_apt_command(cmd, fatal=False, quiet=False): - """Run an apt command with optional retries. - - :param cmd: The apt command to run. - :type cmd: str - :param fatal: Whether the command's output should be checked and - retried. - :type fatal: bool - :param quiet: if True, silence the output of the command from stdout and - stderr - :type quiet: bool - """ - if fatal: - _run_with_retries( - cmd, retry_exitcodes=(1, APT_NO_LOCK,), - retry_message="Couldn't acquire DPKG lock", - quiet=quiet) - else: - kwargs = {} - if quiet: - kwargs['stdout'] = subprocess.DEVNULL - kwargs['stderr'] = subprocess.DEVNULL - subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs) - - -def get_upstream_version(package): - """Determine upstream version based on installed package - - @returns None (if not installed) or the upstream version - """ - cache = apt_cache() - try: - pkg = cache[package] - except Exception: - # the package is unknown to the current apt cache. - return None - - if not pkg.current_ver: - # package is known, but no version is currently installed. - return None - - return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str) - - -def get_installed_version(package): - """Determine installed version of a package - - @returns None (if not installed) or the installed version as - Version object - """ - cache = apt_cache() - dpkg_result = cache.dpkg_list([package]).get(package, {}) - current_ver = None - installed_version = dpkg_result.get('version') - - if installed_version: - current_ver = ubuntu_apt_pkg.Version({'ver_str': installed_version}) - return current_ver - - -def get_apt_dpkg_env(): - """Get environment suitable for execution of APT and DPKG tools. - - We keep this in a helper function instead of in a global constant to - avoid execution on import of the library. - :returns: Environment suitable for execution of APT and DPKG tools. - :rtype: Dict[str, str] - """ - # The fallback is used in the event of ``/etc/environment`` not containing - # avalid PATH variable. - return {'DEBIAN_FRONTEND': 'noninteractive', - 'PATH': get_system_env('PATH', '/usr/sbin:/usr/bin:/sbin:/bin')} diff --git a/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py deleted file mode 100644 index 6da355fd..00000000 --- a/ceph-mon/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ /dev/null @@ -1,335 +0,0 @@ -# Copyright 2019-2021 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Provide a subset of the ``python-apt`` module API. - -Data collection is done through subprocess calls to ``apt-cache`` and -``dpkg-query`` commands. - -The main purpose for this module is to avoid dependency on the -``python-apt`` python module. - -The indicated python module is a wrapper around the ``apt`` C++ library -which is tightly connected to the version of the distribution it was -shipped on. It is not developed in a backward/forward compatible manner. - -This in turn makes it incredibly hard to distribute as a wheel for a piece -of python software that supports a span of distro releases [0][1]. - -Upstream feedback like [2] does not give confidence in this ever changing, -so with this we get rid of the dependency. - -0: https://github.com/juju-solutions/layer-basic/pull/135 -1: https://bugs.launchpad.net/charm-octavia/+bug/1824112 -2: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=845330#10 -""" - -import locale -import os -import subprocess -import sys - -from charmhelpers import deprecate -from charmhelpers.core.hookenv import log - - -class _container(dict): - """Simple container for attributes.""" - __getattr__ = dict.__getitem__ - __setattr__ = dict.__setitem__ - - -class Package(_container): - """Simple container for package attributes.""" - - -class Version(_container): - """Simple container for version attributes.""" - - -class Cache(object): - """Simulation of ``apt_pkg`` Cache object.""" - def __init__(self, progress=None): - pass - - def __contains__(self, package): - try: - pkg = self.__getitem__(package) - return pkg is not None - except KeyError: - return False - - def __getitem__(self, package): - """Get information about a package from apt and dpkg databases. - - :param package: Name of package - :type package: str - :returns: Package object - :rtype: object - :raises: KeyError, subprocess.CalledProcessError - """ - apt_result = self._apt_cache_show([package])[package] - apt_result['name'] = apt_result.pop('package') - pkg = Package(apt_result) - dpkg_result = self.dpkg_list([package]).get(package, {}) - current_ver = None - installed_version = dpkg_result.get('version') - if installed_version: - current_ver = Version({'ver_str': installed_version}) - pkg.current_ver = current_ver - pkg.architecture = dpkg_result.get('architecture') - return pkg - - @deprecate("use dpkg_list() instead.", "2022-05", log=log) - def _dpkg_list(self, packages): - return self.dpkg_list(packages) - - def dpkg_list(self, packages): - """Get data from system dpkg database for package. - - Note that this method is also useful for querying package names - containing wildcards, for example - - apt_cache().dpkg_list(['nvidia-vgpu-ubuntu-*']) - - may return - - { - 'nvidia-vgpu-ubuntu-470': { - 'name': 'nvidia-vgpu-ubuntu-470', - 'version': '470.68', - 'architecture': 'amd64', - 'description': 'NVIDIA vGPU driver - version 470.68' - } - } - - :param packages: Packages to get data from - :type packages: List[str] - :returns: Structured data about installed packages, keys like - ``dpkg-query --list`` - :rtype: dict - :raises: subprocess.CalledProcessError - """ - pkgs = {} - cmd = ['dpkg-query', '--list'] - cmd.extend(packages) - if locale.getlocale() == (None, None): - # subprocess calls out to locale.getpreferredencoding(False) to - # determine encoding. Workaround for Trusty where the - # environment appears to not be set up correctly. - locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') - try: - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT, - universal_newlines=True) - except subprocess.CalledProcessError as cp: - # ``dpkg-query`` may return error and at the same time have - # produced useful output, for example when asked for multiple - # packages where some are not installed - if cp.returncode != 1: - raise - output = cp.output - headings = [] - for line in output.splitlines(): - if line.startswith('||/'): - headings = line.split() - headings.pop(0) - continue - elif (line.startswith('|') or line.startswith('+') or - line.startswith('dpkg-query:')): - continue - else: - data = line.split(None, 4) - status = data.pop(0) - if status not in ('ii', 'hi'): - continue - pkg = {} - pkg.update({k.lower(): v for k, v in zip(headings, data)}) - if 'name' in pkg: - pkgs.update({pkg['name']: pkg}) - return pkgs - - def _apt_cache_show(self, packages): - """Get data from system apt cache for package. - - :param packages: Packages to get data from - :type packages: List[str] - :returns: Structured data about package, keys like - ``apt-cache show`` - :rtype: dict - :raises: subprocess.CalledProcessError - """ - pkgs = {} - cmd = ['apt-cache', 'show', '--no-all-versions'] - cmd.extend(packages) - if locale.getlocale() == (None, None): - # subprocess calls out to locale.getpreferredencoding(False) to - # determine encoding. Workaround for Trusty where the - # environment appears to not be set up correctly. - locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') - try: - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT, - universal_newlines=True) - previous = None - pkg = {} - for line in output.splitlines(): - if not line: - if 'package' in pkg: - pkgs.update({pkg['package']: pkg}) - pkg = {} - continue - if line.startswith(' '): - if previous and previous in pkg: - pkg[previous] += os.linesep + line.lstrip() - continue - if ':' in line: - kv = line.split(':', 1) - key = kv[0].lower() - if key == 'n': - continue - previous = key - pkg.update({key: kv[1].lstrip()}) - except subprocess.CalledProcessError as cp: - # ``apt-cache`` returns 100 if none of the packages asked for - # exist in the apt cache. - if cp.returncode != 100: - raise - return pkgs - - -class Config(_container): - def __init__(self): - super(Config, self).__init__(self._populate()) - - def _populate(self): - cfgs = {} - cmd = ['apt-config', 'dump'] - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT, - universal_newlines=True) - for line in output.splitlines(): - if not line.startswith("CommandLine"): - k, v = line.split(" ", 1) - cfgs[k] = v.strip(";").strip("\"") - - return cfgs - - -# Backwards compatibility with old apt_pkg module -sys.modules[__name__].config = Config() - - -def init(): - """Compatibility shim that does nothing.""" - pass - - -def upstream_version(version): - """Extracts upstream version from a version string. - - Upstream reference: https://salsa.debian.org/apt-team/apt/blob/master/ - apt-pkg/deb/debversion.cc#L259 - - :param version: Version string - :type version: str - :returns: Upstream version - :rtype: str - """ - if version: - version = version.split(':')[-1] - version = version.split('-')[0] - return version - - -def version_compare(a, b): - """Compare the given versions. - - Call out to ``dpkg`` to make sure the code doing the comparison is - compatible with what the ``apt`` library would do. Mimic the return - values. - - Upstream reference: - https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html - ?highlight=version_compare#apt_pkg.version_compare - - :param a: version string - :type a: str - :param b: version string - :type b: str - :returns: >0 if ``a`` is greater than ``b``, 0 if a equals b, - <0 if ``a`` is smaller than ``b`` - :rtype: int - :raises: subprocess.CalledProcessError, RuntimeError - """ - for op in ('gt', 1), ('eq', 0), ('lt', -1): - try: - subprocess.check_call(['dpkg', '--compare-versions', - a, op[0], b], - stderr=subprocess.STDOUT, - universal_newlines=True) - return op[1] - except subprocess.CalledProcessError as cp: - if cp.returncode == 1: - continue - raise - else: - raise RuntimeError('Unable to compare "{}" and "{}", according to ' - 'our logic they are neither greater, equal nor ' - 'less than each other.'.format(a, b)) - - -class PkgVersion(): - """Allow package versions to be compared. - - For example:: - - >>> import charmhelpers.fetch as fetch - >>> (fetch.apt_pkg.PkgVersion('2:20.4.0') < - ... fetch.apt_pkg.PkgVersion('2:20.5.0')) - True - >>> pkgs = [fetch.apt_pkg.PkgVersion('2:20.4.0'), - ... fetch.apt_pkg.PkgVersion('2:21.4.0'), - ... fetch.apt_pkg.PkgVersion('2:17.4.0')] - >>> pkgs.sort() - >>> pkgs - [2:17.4.0, 2:20.4.0, 2:21.4.0] - """ - - def __init__(self, version): - self.version = version - - def __lt__(self, other): - return version_compare(self.version, other.version) == -1 - - def __le__(self, other): - return self.__lt__(other) or self.__eq__(other) - - def __gt__(self, other): - return version_compare(self.version, other.version) == 1 - - def __ge__(self, other): - return self.__gt__(other) or self.__eq__(other) - - def __eq__(self, other): - return version_compare(self.version, other.version) == 0 - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return self.version - - def __hash__(self): - return hash(repr(self)) diff --git a/ceph-mon/hooks/charmhelpers/osplatform.py b/ceph-mon/hooks/charmhelpers/osplatform.py deleted file mode 100644 index 1ace468f..00000000 --- a/ceph-mon/hooks/charmhelpers/osplatform.py +++ /dev/null @@ -1,49 +0,0 @@ -import platform -import os - - -def get_platform(): - """Return the current OS platform. - - For example: if current os platform is Ubuntu then a string "ubuntu" - will be returned (which is the name of the module). - This string is used to decide which platform module should be imported. - """ - # linux_distribution is deprecated and will be removed in Python 3.7 - # Warnings *not* disabled, as we certainly need to fix this. - if hasattr(platform, 'linux_distribution'): - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] - else: - current_platform = _get_platform_from_fs() - - if "Ubuntu" in current_platform: - return "ubuntu" - elif "CentOS" in current_platform: - return "centos" - elif "debian" in current_platform: - # Stock Python does not detect Ubuntu and instead returns debian. - # Or at least it does in some build environments like Travis CI - return "ubuntu" - elif "elementary" in current_platform: - # ElementaryOS fails to run tests locally without this. - return "ubuntu" - elif "Pop!_OS" in current_platform: - # Pop!_OS also fails to run tests locally without this. - return "ubuntu" - else: - raise RuntimeError("This module is not supported on {}." - .format(current_platform)) - - -def _get_platform_from_fs(): - """Get Platform from /etc/os-release.""" - with open(os.path.join(os.sep, 'etc', 'os-release')) as fin: - content = dict( - line.split('=', 1) - for line in fin.read().splitlines() - if '=' in line - ) - for k, v in content.items(): - content[k] = v.strip('"') - return content["NAME"] diff --git a/ceph-mon/hooks/charmhelpers/payload/__init__.py b/ceph-mon/hooks/charmhelpers/payload/__init__.py deleted file mode 100644 index ee55cb3d..00000000 --- a/ceph-mon/hooks/charmhelpers/payload/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"Tools for working with files injected into a charm just before deployment." diff --git a/ceph-mon/hooks/charmhelpers/payload/execd.py b/ceph-mon/hooks/charmhelpers/payload/execd.py deleted file mode 100644 index 1502aa0b..00000000 --- a/ceph-mon/hooks/charmhelpers/payload/execd.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys -import subprocess -from charmhelpers.core import hookenv - - -def default_execd_dir(): - return os.path.join(os.environ['CHARM_DIR'], 'exec.d') - - -def execd_module_paths(execd_dir=None): - """Generate a list of full paths to modules within execd_dir.""" - if not execd_dir: - execd_dir = default_execd_dir() - - if not os.path.exists(execd_dir): - return - - for subpath in os.listdir(execd_dir): - module = os.path.join(execd_dir, subpath) - if os.path.isdir(module): - yield module - - -def execd_submodule_paths(command, execd_dir=None): - """Generate a list of full paths to the specified command within exec_dir. - """ - for module_path in execd_module_paths(execd_dir): - path = os.path.join(module_path, command) - if os.access(path, os.X_OK) and os.path.isfile(path): - yield path - - -def execd_run(command, execd_dir=None, die_on_error=True, stderr=subprocess.STDOUT): - """Run command for each module within execd_dir which defines it.""" - for submodule_path in execd_submodule_paths(command, execd_dir): - try: - subprocess.check_output(submodule_path, stderr=stderr, - universal_newlines=True) - except subprocess.CalledProcessError as e: - hookenv.log("Error ({}) running {}. Output: {}".format( - e.returncode, e.cmd, e.output)) - if die_on_error: - sys.exit(e.returncode) - - -def execd_preinstall(execd_dir=None): - """Run charm-pre-install for each module within execd_dir.""" - execd_run('charm-pre-install', execd_dir=execd_dir) diff --git a/ceph-mon/hooks/client-relation-changed b/ceph-mon/hooks/client-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/client-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/client-relation-joined b/ceph-mon/hooks/client-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/client-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/config-changed b/ceph-mon/hooks/config-changed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/config-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/dashboard-relation-joined b/ceph-mon/hooks/dashboard-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/dashboard-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/install b/ceph-mon/hooks/install deleted file mode 100755 index e8ad54b4..00000000 --- a/ceph-mon/hooks/install +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -e -# ensure that the python3 bits are installed, whichever version of ubunut -# is being installed. - -declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') - -check_and_install() { - pkg="${1}-${2}" - if ! dpkg -s ${pkg} 2>&1 > /dev/null; then - apt-get -y install ${pkg} - fi -} - -PYTHON="python3" - -for dep in ${DEPS[@]}; do - check_and_install ${PYTHON} ${dep} -done - -./hooks/install_deps -exec ./hooks/install.real diff --git a/ceph-mon/hooks/install.real b/ceph-mon/hooks/install.real deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/install.real +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/install_deps b/ceph-mon/hooks/install_deps deleted file mode 100755 index c480f29e..00000000 --- a/ceph-mon/hooks/install_deps +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -e -# Wrapper to ensure that python dependencies are installed before we get into -# the python part of the hook execution - -declare -a DEPS=('dnspython' 'pyudev') - -check_and_install() { - pkg="${1}-${2}" - if ! dpkg -s ${pkg} 2>&1 > /dev/null; then - apt-get -y install ${pkg} - fi -} - -PYTHON="python3" - -for dep in ${DEPS[@]}; do - check_and_install ${PYTHON} ${dep} -done diff --git a/ceph-mon/hooks/leader-settings-changed b/ceph-mon/hooks/leader-settings-changed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/leader-settings-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/mds-relation-changed b/ceph-mon/hooks/mds-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/mds-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/mds-relation-joined b/ceph-mon/hooks/mds-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/mds-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/mon-relation-changed b/ceph-mon/hooks/mon-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/mon-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/mon-relation-departed b/ceph-mon/hooks/mon-relation-departed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/mon-relation-departed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/mon-relation-joined b/ceph-mon/hooks/mon-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/mon-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/nrpe-external-master-relation-changed b/ceph-mon/hooks/nrpe-external-master-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/nrpe-external-master-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/nrpe-external-master-relation-joined b/ceph-mon/hooks/nrpe-external-master-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/nrpe-external-master-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/osd-relation-changed b/ceph-mon/hooks/osd-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/osd-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/osd-relation-joined b/ceph-mon/hooks/osd-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/osd-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/post-series-upgrade b/ceph-mon/hooks/post-series-upgrade deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/post-series-upgrade +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/pre-series-upgrade b/ceph-mon/hooks/pre-series-upgrade deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/pre-series-upgrade +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/prometheus-relation-changed b/ceph-mon/hooks/prometheus-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/prometheus-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/prometheus-relation-departed b/ceph-mon/hooks/prometheus-relation-departed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/prometheus-relation-departed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/prometheus-relation-joined b/ceph-mon/hooks/prometheus-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/prometheus-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/radosgw-relation-changed b/ceph-mon/hooks/radosgw-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/radosgw-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/radosgw-relation-joined b/ceph-mon/hooks/radosgw-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/radosgw-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/rbd-mirror-relation-changed b/ceph-mon/hooks/rbd-mirror-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/rbd-mirror-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/rbd-mirror-relation-joined b/ceph-mon/hooks/rbd-mirror-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/rbd-mirror-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/start b/ceph-mon/hooks/start deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/start +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/stop b/ceph-mon/hooks/stop deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/stop +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/update-status b/ceph-mon/hooks/update-status deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/update-status +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/hooks/upgrade-charm b/ceph-mon/hooks/upgrade-charm deleted file mode 100755 index a454f76f..00000000 --- a/ceph-mon/hooks/upgrade-charm +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -e -# Wrapper to ensure that old python bytecode isn't hanging around -# after we upgrade the charm with newer libraries -rm -rf **/*.pyc - -./hooks/install_deps -exec ./hooks/upgrade-charm.real diff --git a/ceph-mon/hooks/upgrade-charm.real b/ceph-mon/hooks/upgrade-charm.real deleted file mode 120000 index 52d96630..00000000 --- a/ceph-mon/hooks/upgrade-charm.real +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/ceph-mon/lib/charms_ceph/__init__.py b/ceph-mon/lib/charms_ceph/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceph-mon/lib/charms_ceph/broker.py b/ceph-mon/lib/charms_ceph/broker.py deleted file mode 100644 index 90b536fb..00000000 --- a/ceph-mon/lib/charms_ceph/broker.py +++ /dev/null @@ -1,913 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import json -import os - -from subprocess import check_call, check_output, CalledProcessError -from tempfile import NamedTemporaryFile - -from charms_ceph.utils import ( - get_cephfs, - get_osd_weight -) -from charms_ceph.crush_utils import Crushmap - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - INFO, - ERROR, -) -from charmhelpers.contrib.storage.linux.ceph import ( - create_erasure_profile, - delete_pool, - erasure_profile_exists, - get_osds, - monitor_key_get, - monitor_key_set, - pool_exists, - pool_set, - remove_pool_snapshot, - rename_pool, - snapshot_pool, - validator, - ErasurePool, - BasePool, - ReplicatedPool, -) - -# This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ -# This should do a decent job of preventing people from passing in bad values. -# It will give a useful error message - -POOL_KEYS = { - # "Ceph Key Name": [Python type, [Valid Range]] - "size": [int], - "min_size": [int], - "crash_replay_interval": [int], - "pgp_num": [int], # = or < pg_num - "crush_ruleset": [int], - "hashpspool": [bool], - "nodelete": [bool], - "nopgchange": [bool], - "nosizechange": [bool], - "write_fadvise_dontneed": [bool], - "noscrub": [bool], - "nodeep-scrub": [bool], - "hit_set_type": [str, ["bloom", "explicit_hash", - "explicit_object"]], - "hit_set_count": [int, [1, 1]], - "hit_set_period": [int], - "hit_set_fpp": [float, [0.0, 1.0]], - "cache_target_dirty_ratio": [float], - "cache_target_dirty_high_ratio": [float], - "cache_target_full_ratio": [float], - "target_max_bytes": [int], - "target_max_objects": [int], - "cache_min_flush_age": [int], - "cache_min_evict_age": [int], - "fast_read": [bool], - "allow_ec_overwrites": [bool], - "compression_mode": [str, ["none", "passive", "aggressive", "force"]], - "compression_algorithm": [str, ["lz4", "snappy", "zlib", "zstd"]], - "compression_required_ratio": [float, [0.0, 1.0]], - "crush_rule": [str], -} - -CEPH_BUCKET_TYPES = [ - 'osd', - 'host', - 'chassis', - 'rack', - 'row', - 'pdu', - 'pod', - 'room', - 'datacenter', - 'region', - 'root' -] - - -def decode_req_encode_rsp(f): - """Decorator to decode incoming requests and encode responses.""" - - def decode_inner(req): - return json.dumps(f(json.loads(req))) - - return decode_inner - - -@decode_req_encode_rsp -def process_requests(reqs): - """Process Ceph broker request(s). - - This is a versioned api. API version must be supplied by the client making - the request. - - :param reqs: dict of request parameters. - :returns: dict. exit-code and reason if not 0 - """ - request_id = reqs.get('request-id') - try: - version = reqs.get('api-version') - if version == 1: - log('Processing request {}'.format(request_id), level=DEBUG) - resp = process_requests_v1(reqs['ops']) - if request_id: - resp['request-id'] = request_id - - return resp - - except Exception as exc: - log(str(exc), level=ERROR) - msg = ("Unexpected error occurred while processing requests: %s" % - reqs) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - msg = ("Missing or invalid api version ({})".format(version)) - resp = {'exit-code': 1, 'stderr': msg} - if request_id: - resp['request-id'] = request_id - - return resp - - -def handle_create_erasure_profile(request, service): - """Create an erasure profile. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - # "isa" | "lrc" | "shec" | "clay" or it defaults to "jerasure" - erasure_type = request.get('erasure-type') - # dependent on erasure coding type - erasure_technique = request.get('erasure-technique') - # "host" | "rack" | ... - failure_domain = request.get('failure-domain') - name = request.get('name') - # Binary Distribution Matrix (BDM) parameters - bdm_k = request.get('k') - bdm_m = request.get('m') - # LRC parameters - bdm_l = request.get('l') - crush_locality = request.get('crush-locality') - # SHEC parameters - bdm_c = request.get('c') - # CLAY parameters - bdm_d = request.get('d') - scalar_mds = request.get('scalar-mds') - # Device Class - device_class = request.get('device-class') - - if failure_domain and failure_domain not in CEPH_BUCKET_TYPES: - msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - create_erasure_profile(service=service, - erasure_plugin_name=erasure_type, - profile_name=name, - failure_domain=failure_domain, - data_chunks=bdm_k, - coding_chunks=bdm_m, - locality=bdm_l, - durability_estimator=bdm_d, - helper_chunks=bdm_c, - scalar_mds=scalar_mds, - crush_locality=crush_locality, - device_class=device_class, - erasure_plugin_technique=erasure_technique) - - return {'exit-code': 0} - - -def handle_add_permissions_to_key(request, service): - """Groups are defined by the key cephx.groups.(namespace-)?-(name). This - key will contain a dict serialized to JSON with data about the group, - including pools and members. - - A group can optionally have a namespace defined that will be used to - further restrict pool access. - """ - resp = {'exit-code': 0} - - service_name = request.get('name') - group_name = request.get('group') - group_namespace = request.get('group-namespace') - if group_namespace: - group_name = "{}-{}".format(group_namespace, group_name) - group = get_group(group_name=group_name) - service_obj = get_service_groups(service=service_name, - namespace=group_namespace) - if request.get('object-prefix-permissions'): - service_obj['object_prefix_perms'] = request.get( - 'object-prefix-permissions') - format("Service object: {}".format(service_obj)) - permission = request.get('group-permission') or "rwx" - if service_name not in group['services']: - group['services'].append(service_name) - save_group(group=group, group_name=group_name) - if permission not in service_obj['group_names']: - service_obj['group_names'][permission] = [] - if group_name not in service_obj['group_names'][permission]: - service_obj['group_names'][permission].append(group_name) - save_service(service=service_obj, service_name=service_name) - service_obj['groups'] = _build_service_groups(service_obj, - group_namespace) - update_service_permissions(service_name, service_obj, group_namespace) - - return resp - - -def handle_set_key_permissions(request, service): - """Ensure the key has the requested permissions.""" - permissions = request.get('permissions') - client = request.get('client') - call = ['ceph', '--id', service, 'auth', 'caps', - 'client.{}'.format(client)] + permissions - try: - check_call(call) - except CalledProcessError as e: - log("Error updating key capabilities: {}".format(e), level=ERROR) - - -def update_service_permissions(service, service_obj=None, namespace=None): - """Update the key permissions for the named client in Ceph""" - if not service_obj: - service_obj = get_service_groups(service=service, namespace=namespace) - permissions = pool_permission_list_for_service(service_obj) - call = ['ceph', 'auth', 'caps', 'client.{}'.format(service)] + permissions - try: - check_call(call) - except CalledProcessError as e: - log("Error updating key capabilities: {}".format(e)) - - -def add_pool_to_group(pool, group, namespace=None): - """Add a named pool to a named group""" - group_name = group - if namespace: - group_name = "{}-{}".format(namespace, group_name) - group = get_group(group_name=group_name) - if pool not in group['pools']: - group["pools"].append(pool) - save_group(group, group_name=group_name) - for service in group['services']: - update_service_permissions(service, namespace=namespace) - - -def pool_permission_list_for_service(service): - """Build the permission string for Ceph for a given service""" - permissions = [] - permission_types = collections.OrderedDict() - for permission, group in sorted(service["group_names"].items()): - if permission not in permission_types: - permission_types[permission] = [] - for item in group: - permission_types[permission].append(item) - for permission, groups in permission_types.items(): - permission = "allow {}".format(permission) - for group in groups: - for pool in service['groups'][group].get('pools', []): - permissions.append("{} pool={}".format(permission, pool)) - for permission, prefixes in sorted( - service.get("object_prefix_perms", {}).items()): - for prefix in prefixes: - permissions.append("allow {} object_prefix {}".format(permission, - prefix)) - return ['mon', ('allow r, allow command "osd blacklist"' - ', allow command "osd blocklist"'), - 'osd', ', '.join(permissions)] - - -def get_service_groups(service, namespace=None): - """Services are objects stored with some metadata, they look like (for a - service named "nova"): - { - group_names: {'rwx': ['images']}, - groups: {} - } - After populating the group, it looks like: - { - group_names: {'rwx': ['images']}, - groups: { - 'images': { - pools: ['glance'], - services: ['nova'] - } - } - } - """ - service_json = monitor_key_get(service='admin', - key="cephx.services.{}".format(service)) - try: - service = json.loads(service_json) - except (TypeError, ValueError): - service = None - if service: - service['groups'] = _build_service_groups(service, namespace) - else: - service = {'group_names': {}, 'groups': {}} - return service - - -def _build_service_groups(service, namespace=None): - """Rebuild the 'groups' dict for a service group - - :returns: dict: dictionary keyed by group name of the following - format: - - { - 'images': { - pools: ['glance'], - services: ['nova', 'glance] - }, - 'vms':{ - pools: ['nova'], - services: ['nova'] - } - } - """ - all_groups = {} - for groups in service['group_names'].values(): - for group in groups: - name = group - if namespace: - name = "{}-{}".format(namespace, name) - all_groups[group] = get_group(group_name=name) - return all_groups - - -def get_group(group_name): - """A group is a structure to hold data about a named group, structured as: - { - pools: ['glance'], - services: ['nova'] - } - """ - group_key = get_group_key(group_name=group_name) - group_json = monitor_key_get(service='admin', key=group_key) - try: - group = json.loads(group_json) - except (TypeError, ValueError): - group = None - if not group: - group = { - 'pools': [], - 'services': [] - } - return group - - -def save_service(service_name, service): - """Persist a service in the monitor cluster""" - service['groups'] = {} - return monitor_key_set(service='admin', - key="cephx.services.{}".format(service_name), - value=json.dumps(service, sort_keys=True)) - - -def save_group(group, group_name): - """Persist a group in the monitor cluster""" - group_key = get_group_key(group_name=group_name) - return monitor_key_set(service='admin', - key=group_key, - value=json.dumps(group, sort_keys=True)) - - -def get_group_key(group_name): - """Build group key""" - return 'cephx.groups.{}'.format(group_name) - - -def handle_erasure_pool(request, service): - """Create a new erasure coded pool. - - :param request: dict of request operations and params. - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0. - """ - pool_name = request.get('name') - erasure_profile = request.get('erasure-profile') - group_name = request.get('group') - - if erasure_profile is None: - erasure_profile = "default-canonical" - - if group_name: - group_namespace = request.get('group-namespace') - # Add the pool to the group named "group_name" - add_pool_to_group(pool=pool_name, - group=group_name, - namespace=group_namespace) - - # TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds - if not erasure_profile_exists(service=service, name=erasure_profile): - # TODO: Fail and tell them to create the profile or default - msg = ("erasure-profile {} does not exist. Please create it with: " - "create-erasure-profile".format(erasure_profile)) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - try: - pool = ErasurePool(service=service, - op=request) - except KeyError: - msg = "Missing parameter." - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - # Ok make the erasure pool - if not pool_exists(service=service, name=pool_name): - log("Creating pool '{}' (erasure_profile={})" - .format(pool.name, erasure_profile), level=INFO) - pool.create() - - # Set/update properties that are allowed to change after pool creation. - pool.update() - - -def handle_replicated_pool(request, service): - """Create a new replicated pool. - - :param request: dict of request operations and params. - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0. - """ - pool_name = request.get('name') - group_name = request.get('group') - - # Optional params - # NOTE: Check this against the handling in the Pool classes, reconcile and - # remove. - pg_num = request.get('pg_num') - replicas = request.get('replicas') - if pg_num: - # Cap pg_num to max allowed just in case. - osds = get_osds(service) - if osds: - pg_num = min(pg_num, (len(osds) * 100 // replicas)) - request.update({'pg_num': pg_num}) - - if group_name: - group_namespace = request.get('group-namespace') - # Add the pool to the group named "group_name" - add_pool_to_group(pool=pool_name, - group=group_name, - namespace=group_namespace) - - try: - pool = ReplicatedPool(service=service, - op=request) - except KeyError: - msg = "Missing parameter." - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - if not pool_exists(service=service, name=pool_name): - log("Creating pool '{}' (replicas={})".format(pool.name, replicas), - level=INFO) - pool.create() - else: - log("Pool '{}' already exists - skipping create".format(pool.name), - level=DEBUG) - - # Set/update properties that are allowed to change after pool creation. - pool.update() - - -def handle_create_cache_tier(request, service): - """Create a cache tier on a cold pool. Modes supported are - "writeback" and "readonly". - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - # mode = "writeback" | "readonly" - storage_pool = request.get('cold-pool') - cache_pool = request.get('hot-pool') - cache_mode = request.get('mode') - - if cache_mode is None: - cache_mode = "writeback" - - # cache and storage pool must exist first - if not pool_exists(service=service, name=storage_pool) or not pool_exists( - service=service, name=cache_pool): - msg = ("cold-pool: {} and hot-pool: {} must exist. Please create " - "them first".format(storage_pool, cache_pool)) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - p = BasePool(service=service, name=storage_pool) - p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) - - -def handle_remove_cache_tier(request, service): - """Remove a cache tier from the cold pool. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - storage_pool = request.get('cold-pool') - cache_pool = request.get('hot-pool') - # cache and storage pool must exist first - if not pool_exists(service=service, name=storage_pool) or not pool_exists( - service=service, name=cache_pool): - msg = ("cold-pool: {} or hot-pool: {} doesn't exist. Not " - "deleting cache tier".format(storage_pool, cache_pool)) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - pool = BasePool(name=storage_pool, service=service) - pool.remove_cache_tier(cache_pool=cache_pool) - - -def handle_set_pool_value(request, service, coerce=False): - """Sets an arbitrary pool value. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :param coerce: Try to parse/coerce the value into the correct type. - Used by the action code that only gets Str from Juju - :returns: dict. exit-code and reason if not 0 - """ - # Set arbitrary pool values - params = {'pool': request.get('name'), - 'key': request.get('key'), - 'value': request.get('value')} - if params['key'] not in POOL_KEYS: - msg = "Invalid key '{}'".format(params['key']) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - # Get the validation method - validator_params = POOL_KEYS[params['key']] - # BUG: #1838650 - the function needs to try to coerce the value param to - # the type required for the validator to pass. Note, if this blows, then - # the param isn't parsable to the correct type. - if coerce: - try: - params['value'] = validator_params[0](params['value']) - except ValueError: - raise RuntimeError("Value {} isn't of type {}" - .format(params['value'], validator_params[0])) - # end of BUG: #1838650 - if len(validator_params) == 1: - # Validate that what the user passed is actually legal per Ceph's rules - validator(params['value'], validator_params[0]) - else: - # Validate that what the user passed is actually legal per Ceph's rules - validator(params['value'], validator_params[0], validator_params[1]) - - # Set the value - pool_set(service=service, pool_name=params['pool'], key=params['key'], - value=params['value']) - - -def handle_rgw_regionmap_update(request, service): - """Change the radosgw region map. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - name = request.get('client-name') - if not name: - msg = "Missing rgw-region or client-name params" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - try: - check_output(['radosgw-admin', - '--id', service, - 'regionmap', 'update', '--name', name]) - except CalledProcessError as err: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} - - -def handle_rgw_regionmap_default(request, service): - """Create a radosgw region map. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - region = request.get('rgw-region') - name = request.get('client-name') - if not region or not name: - msg = "Missing rgw-region or client-name params" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - try: - check_output( - [ - 'radosgw-admin', - '--id', service, - 'regionmap', - 'default', - '--rgw-region', region, - '--name', name]) - except CalledProcessError as err: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} - - -def handle_rgw_zone_set(request, service): - """Create a radosgw zone. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - json_file = request.get('zone-json') - name = request.get('client-name') - region_name = request.get('region-name') - zone_name = request.get('zone-name') - if not json_file or not name or not region_name or not zone_name: - msg = "Missing json-file or client-name params" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - infile = NamedTemporaryFile(delete=False) - with open(infile.name, 'w') as infile_handle: - infile_handle.write(json_file) - try: - check_output( - [ - 'radosgw-admin', - '--id', service, - 'zone', - 'set', - '--rgw-zone', zone_name, - '--infile', infile.name, - '--name', name, - ] - ) - except CalledProcessError as err: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} - os.unlink(infile.name) - - -def handle_put_osd_in_bucket(request, service): - """Move an osd into a specified crush bucket. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - osd_id = request.get('osd') - target_bucket = request.get('bucket') - if not osd_id or not target_bucket: - msg = "Missing OSD ID or Bucket" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - crushmap = Crushmap() - try: - crushmap.ensure_bucket_is_present(target_bucket) - check_output( - [ - 'ceph', - '--id', service, - 'osd', - 'crush', - 'set', - str(osd_id), - str(get_osd_weight(osd_id)), - "root={}".format(target_bucket) - ] - ) - - except Exception as exc: - msg = "Failed to move OSD " \ - "{} into Bucket {} :: {}".format(osd_id, target_bucket, exc) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - -def handle_rgw_create_user(request, service): - """Create a new rados gateway user. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - user_id = request.get('rgw-uid') - display_name = request.get('display-name') - name = request.get('client-name') - if not name or not display_name or not user_id: - msg = "Missing client-name, display-name or rgw-uid" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - try: - create_output = check_output( - [ - 'radosgw-admin', - '--id', service, - 'user', - 'create', - '--uid', user_id, - '--display-name', display_name, - '--name', name, - '--system' - ] - ) - try: - user_json = json.loads(str(create_output.decode('UTF-8'))) - return {'exit-code': 0, 'user': user_json} - except ValueError as err: - log(err, level=ERROR) - return {'exit-code': 1, 'stderr': err} - - except CalledProcessError as err: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} - - -def handle_create_cephfs(request, service): - """Create a new cephfs. - - :param request: The broker request - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - cephfs_name = request.get('mds_name') - data_pool = request.get('data_pool') - extra_pools = request.get('extra_pools', None) or [] - metadata_pool = request.get('metadata_pool') - # Check if the user params were provided - if not cephfs_name or not data_pool or not metadata_pool: - msg = "Missing mds_name, data_pool or metadata_pool params" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - # Sanity check that the required pools exist - for pool_name in [data_pool, metadata_pool] + extra_pools: - if not pool_exists(service=service, name=pool_name): - msg = "CephFS pool {} does not exist. Cannot create CephFS".format( - pool_name) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - if get_cephfs(service=service): - # CephFS new has already been called - log("CephFS already created") - return - - # Finally create CephFS - try: - check_output(["ceph", - '--id', service, - "fs", "new", cephfs_name, - metadata_pool, - data_pool]) - except CalledProcessError as err: - if err.returncode == 22: - log("CephFS already created") - return - else: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} - for pool_name in extra_pools: - cmd = ["ceph", '--id', service, "fs", "add_data_pool", cephfs_name, - pool_name] - try: - check_output(cmd) - except CalledProcessError as err: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} - - -def handle_rgw_region_set(request, service): - # radosgw-admin region set --infile us.json --name client.radosgw.us-east-1 - """Set the rados gateway region. - - :param request: dict. The broker request. - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - json_file = request.get('region-json') - name = request.get('client-name') - region_name = request.get('region-name') - zone_name = request.get('zone-name') - if not json_file or not name or not region_name or not zone_name: - msg = "Missing json-file or client-name params" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - infile = NamedTemporaryFile(delete=False) - with open(infile.name, 'w') as infile_handle: - infile_handle.write(json_file) - try: - check_output( - [ - 'radosgw-admin', - '--id', service, - 'region', - 'set', - '--rgw-zone', zone_name, - '--infile', infile.name, - '--name', name, - ] - ) - except CalledProcessError as err: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} - os.unlink(infile.name) - - -def process_requests_v1(reqs): - """Process v1 requests. - - Takes a list of requests (dicts) and processes each one. If an error is - found, processing stops and the client is notified in the response. - - Returns a response dict containing the exit code (non-zero if any - operation failed along with an explanation). - """ - ret = None - log("Processing {} ceph broker requests".format(len(reqs)), level=INFO) - for req in reqs: - op = req.get('op') - log("Processing op='{}'".format(op), level=DEBUG) - # Use admin client since we do not have other client key locations - # setup to use them for these operations. - svc = 'admin' - if op == "create-pool": - pool_type = req.get('pool-type') # "replicated" | "erasure" - - # Default to replicated if pool_type isn't given - if pool_type == 'erasure': - ret = handle_erasure_pool(request=req, service=svc) - else: - ret = handle_replicated_pool(request=req, service=svc) - elif op == "create-cephfs": - ret = handle_create_cephfs(request=req, service=svc) - elif op == "create-cache-tier": - ret = handle_create_cache_tier(request=req, service=svc) - elif op == "remove-cache-tier": - ret = handle_remove_cache_tier(request=req, service=svc) - elif op == "create-erasure-profile": - ret = handle_create_erasure_profile(request=req, service=svc) - elif op == "delete-pool": - pool = req.get('name') - ret = delete_pool(service=svc, name=pool) - elif op == "rename-pool": - old_name = req.get('name') - new_name = req.get('new-name') - ret = rename_pool(service=svc, old_name=old_name, - new_name=new_name) - elif op == "snapshot-pool": - pool = req.get('name') - snapshot_name = req.get('snapshot-name') - ret = snapshot_pool(service=svc, pool_name=pool, - snapshot_name=snapshot_name) - elif op == "remove-pool-snapshot": - pool = req.get('name') - snapshot_name = req.get('snapshot-name') - ret = remove_pool_snapshot(service=svc, pool_name=pool, - snapshot_name=snapshot_name) - elif op == "set-pool-value": - ret = handle_set_pool_value(request=req, service=svc) - elif op == "rgw-region-set": - ret = handle_rgw_region_set(request=req, service=svc) - elif op == "rgw-zone-set": - ret = handle_rgw_zone_set(request=req, service=svc) - elif op == "rgw-regionmap-update": - ret = handle_rgw_regionmap_update(request=req, service=svc) - elif op == "rgw-regionmap-default": - ret = handle_rgw_regionmap_default(request=req, service=svc) - elif op == "rgw-create-user": - ret = handle_rgw_create_user(request=req, service=svc) - elif op == "move-osd-to-bucket": - ret = handle_put_osd_in_bucket(request=req, service=svc) - elif op == "add-permissions-to-key": - ret = handle_add_permissions_to_key(request=req, service=svc) - elif op == 'set-key-permissions': - ret = handle_set_key_permissions(request=req, service=svc) - else: - msg = "Unknown operation '{}'".format(op) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - if type(ret) == dict and 'exit-code' in ret: - return ret - - return {'exit-code': 0} diff --git a/ceph-mon/lib/charms_ceph/crush_utils.py b/ceph-mon/lib/charms_ceph/crush_utils.py deleted file mode 100644 index 37084bf1..00000000 --- a/ceph-mon/lib/charms_ceph/crush_utils.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2014 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -from subprocess import check_output, CalledProcessError - -from charmhelpers.core.hookenv import ( - log, - ERROR, -) - -CRUSH_BUCKET = """root {name} {{ - id {id} # do not change unnecessarily - # weight 0.000 - alg straw2 - hash 0 # rjenkins1 -}} - -rule {name} {{ - ruleset 0 - type replicated - min_size 1 - max_size 10 - step take {name} - step chooseleaf firstn 0 type host - step emit -}}""" - -# This regular expression looks for a string like: -# root NAME { -# id NUMBER -# so that we can extract NAME and ID from the crushmap -CRUSHMAP_BUCKETS_RE = re.compile(r"root\s+(.+)\s+\{\s*id\s+(-?\d+)") - -# This regular expression looks for ID strings in the crushmap like: -# id NUMBER -# so that we can extract the IDs from a crushmap -CRUSHMAP_ID_RE = re.compile(r"id\s+(-?\d+)") - - -class Crushmap(object): - """An object oriented approach to Ceph crushmap management.""" - - def __init__(self): - self._crushmap = self.load_crushmap() - roots = re.findall(CRUSHMAP_BUCKETS_RE, self._crushmap) - buckets = [] - ids = list(map( - lambda x: int(x), - re.findall(CRUSHMAP_ID_RE, self._crushmap))) - ids = sorted(ids) - if roots != []: - for root in roots: - buckets.append(CRUSHBucket(root[0], root[1], True)) - - self._buckets = buckets - if ids != []: - self._ids = ids - else: - self._ids = [0] - - def load_crushmap(self): - try: - crush = str(check_output(['ceph', 'osd', 'getcrushmap']) - .decode('UTF-8')) - return str(check_output(['crushtool', '-d', '-'], - stdin=crush.stdout) - .decode('UTF-8')) - except CalledProcessError as e: - log("Error occurred while loading and decompiling CRUSH map:" - "{}".format(e), ERROR) - raise - - def ensure_bucket_is_present(self, bucket_name): - if bucket_name not in [bucket.name for bucket in self.buckets()]: - self.add_bucket(bucket_name) - self.save() - - def buckets(self): - """Return a list of buckets that are in the Crushmap.""" - return self._buckets - - def add_bucket(self, bucket_name): - """Add a named bucket to Ceph""" - new_id = min(self._ids) - 1 - self._ids.append(new_id) - self._buckets.append(CRUSHBucket(bucket_name, new_id)) - - def save(self): - """Persist Crushmap to Ceph""" - try: - crushmap = self.build_crushmap() - compiled = str(check_output(['crushtool', '-c', '/dev/stdin', '-o', - '/dev/stdout'], stdin=crushmap) - .decode('UTF-8')) - ceph_output = str(check_output(['ceph', 'osd', 'setcrushmap', '-i', - '/dev/stdin'], stdin=compiled) - .decode('UTF-8')) - return ceph_output - except CalledProcessError as e: - log("save error: {}".format(e)) - raise - - def build_crushmap(self): - """Modifies the current CRUSH map to include the new buckets""" - tmp_crushmap = self._crushmap - for bucket in self._buckets: - if not bucket.default: - tmp_crushmap = "{}\n\n{}".format( - tmp_crushmap, - Crushmap.bucket_string(bucket.name, bucket.id)) - - return tmp_crushmap - - @staticmethod - def bucket_string(name, id): - return CRUSH_BUCKET.format(name=name, id=id) - - -class CRUSHBucket(object): - """CRUSH bucket description object.""" - - def __init__(self, name, id, default=False): - self.name = name - self.id = int(id) - self.default = default - - def __repr__(self): - return "Bucket {{Name: {name}, ID: {id}}}".format( - name=self.name, id=self.id) - - def __eq__(self, other): - """Override the default Equals behavior""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return NotImplemented - - def __ne__(self, other): - """Define a non-equality test""" - if isinstance(other, self.__class__): - return not self.__eq__(other) - return NotImplemented diff --git a/ceph-mon/lib/charms_ceph/utils.py b/ceph-mon/lib/charms_ceph/utils.py deleted file mode 100644 index e6adcb82..00000000 --- a/ceph-mon/lib/charms_ceph/utils.py +++ /dev/null @@ -1,3583 +0,0 @@ -# Copyright 2017-2021 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import glob -import itertools -import json -import os -import pyudev -import random -import re -import socket -import subprocess -import sys -import time -import uuid -import functools - -from contextlib import contextmanager -from datetime import datetime - -from charmhelpers.core import hookenv -from charmhelpers.core import templating -from charmhelpers.core.host import ( - chownr, - cmp_pkgrevno, - lsb_release, - mkdir, - owner, - service_restart, - service_start, - service_stop, - CompareHostReleases, - write_file, - is_container, -) -from charmhelpers.core.hookenv import ( - cached, - config, - log, - status_set, - DEBUG, - ERROR, - WARNING, - storage_get, - storage_list, -) -from charmhelpers.fetch import ( - add_source, - apt_install, - apt_purge, - apt_update, - filter_missing_packages, - get_installed_version -) -from charmhelpers.contrib.storage.linux.ceph import ( - get_mon_map, - monitor_key_set, - monitor_key_exists, - monitor_key_get, -) -from charmhelpers.contrib.storage.linux.utils import ( - is_block_device, - is_device_mounted, -) -from charmhelpers.contrib.openstack.utils import ( - get_os_codename_install_source, -) -from charmhelpers.contrib.storage.linux import lvm -from charmhelpers.core.unitdata import kv - -CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph') -OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd') -HDPARM_FILE = os.path.join(os.sep, 'etc', 'hdparm.conf') - -LEADER = 'leader' -PEON = 'peon' -QUORUM = [LEADER, PEON] - -PACKAGES = ['ceph', 'gdisk', - 'radosgw', 'xfsprogs', - 'lvm2', 'parted', 'smartmontools'] - -REMOVE_PACKAGES = [] -CHRONY_PACKAGE = 'chrony' - -CEPH_KEY_MANAGER = 'ceph' -VAULT_KEY_MANAGER = 'vault' -KEY_MANAGERS = [ - CEPH_KEY_MANAGER, - VAULT_KEY_MANAGER, -] - -LinkSpeed = { - "BASE_10": 10, - "BASE_100": 100, - "BASE_1000": 1000, - "GBASE_10": 10000, - "GBASE_40": 40000, - "GBASE_100": 100000, - "UNKNOWN": None -} - -# Mapping of adapter speed to sysctl settings -NETWORK_ADAPTER_SYSCTLS = { - # 10Gb - LinkSpeed["GBASE_10"]: { - 'net.core.rmem_default': 524287, - 'net.core.wmem_default': 524287, - 'net.core.rmem_max': 524287, - 'net.core.wmem_max': 524287, - 'net.core.optmem_max': 524287, - 'net.core.netdev_max_backlog': 300000, - 'net.ipv4.tcp_rmem': '10000000 10000000 10000000', - 'net.ipv4.tcp_wmem': '10000000 10000000 10000000', - 'net.ipv4.tcp_mem': '10000000 10000000 10000000' - }, - # Mellanox 10/40Gb - LinkSpeed["GBASE_40"]: { - 'net.ipv4.tcp_timestamps': 0, - 'net.ipv4.tcp_sack': 1, - 'net.core.netdev_max_backlog': 250000, - 'net.core.rmem_max': 4194304, - 'net.core.wmem_max': 4194304, - 'net.core.rmem_default': 4194304, - 'net.core.wmem_default': 4194304, - 'net.core.optmem_max': 4194304, - 'net.ipv4.tcp_rmem': '4096 87380 4194304', - 'net.ipv4.tcp_wmem': '4096 65536 4194304', - 'net.ipv4.tcp_low_latency': 1, - 'net.ipv4.tcp_adv_win_scale': 1 - } -} - - -class Partition(object): - def __init__(self, name, number, size, start, end, sectors, uuid): - """A block device partition. - - :param name: Name of block device - :param number: Partition number - :param size: Capacity of the device - :param start: Starting block - :param end: Ending block - :param sectors: Number of blocks - :param uuid: UUID of the partition - """ - self.name = name, - self.number = number - self.size = size - self.start = start - self.end = end - self.sectors = sectors - self.uuid = uuid - - def __str__(self): - return "number: {} start: {} end: {} sectors: {} size: {} " \ - "name: {} uuid: {}".format(self.number, self.start, - self.end, - self.sectors, self.size, - self.name, self.uuid) - - def __eq__(self, other): - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - return not self.__eq__(other) - - -def unmounted_disks(): - """List of unmounted block devices on the current host.""" - disks = [] - context = pyudev.Context() - for device in context.list_devices(DEVTYPE='disk'): - if device['SUBSYSTEM'] == 'block': - if device.device_node is None: - continue - - matched = False - for block_type in [u'dm-', u'loop', u'ram', u'nbd']: - if block_type in device.device_node: - matched = True - if matched: - continue - - disks.append(device.device_node) - log("Found disks: {}".format(disks)) - return [disk for disk in disks if not is_device_mounted(disk)] - - -def save_sysctls(sysctl_dict, save_location): - """Persist the sysctls to the hard drive. - - :param sysctl_dict: dict - :param save_location: path to save the settings to - :raises: IOError if anything goes wrong with writing. - """ - try: - # Persist the settings for reboots - with open(save_location, "w") as fd: - for key, value in sysctl_dict.items(): - fd.write("{}={}\n".format(key, value)) - - except IOError as e: - log("Unable to persist sysctl settings to {}. Error {}".format( - save_location, e), level=ERROR) - raise - - -def tune_nic(network_interface): - """This will set optimal sysctls for the particular network adapter. - - :param network_interface: string The network adapter name. - """ - speed = get_link_speed(network_interface) - if speed in NETWORK_ADAPTER_SYSCTLS: - status_set('maintenance', 'Tuning device {}'.format( - network_interface)) - sysctl_file = os.path.join( - os.sep, - 'etc', - 'sysctl.d', - '51-ceph-osd-charm-{}.conf'.format(network_interface)) - try: - log("Saving sysctl_file: {} values: {}".format( - sysctl_file, NETWORK_ADAPTER_SYSCTLS[speed]), - level=DEBUG) - save_sysctls(sysctl_dict=NETWORK_ADAPTER_SYSCTLS[speed], - save_location=sysctl_file) - except IOError as e: - log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " - "failed. {}".format(network_interface, e), - level=ERROR) - - try: - # Apply the settings - log("Applying sysctl settings", level=DEBUG) - subprocess.check_output(["sysctl", "-p", sysctl_file]) - except subprocess.CalledProcessError as err: - log('sysctl -p {} failed with error {}'.format(sysctl_file, - err.output), - level=ERROR) - else: - log("No settings found for network adapter: {}".format( - network_interface), level=DEBUG) - - -def get_link_speed(network_interface): - """This will find the link speed for a given network device. Returns None - if an error occurs. - :param network_interface: string The network adapter interface. - :returns: LinkSpeed - """ - speed_path = os.path.join(os.sep, 'sys', 'class', 'net', - network_interface, 'speed') - # I'm not sure where else we'd check if this doesn't exist - if not os.path.exists(speed_path): - return LinkSpeed["UNKNOWN"] - - try: - with open(speed_path, 'r') as sysfs: - nic_speed = sysfs.readlines() - - # Did we actually read anything? - if not nic_speed: - return LinkSpeed["UNKNOWN"] - - # Try to find a sysctl match for this particular speed - for name, speed in LinkSpeed.items(): - if speed == int(nic_speed[0].strip()): - return speed - # Default to UNKNOWN if we can't find a match - return LinkSpeed["UNKNOWN"] - except IOError as e: - log("Unable to open {path} because of error: {error}".format( - path=speed_path, - error=e), level='error') - return LinkSpeed["UNKNOWN"] - - -def persist_settings(settings_dict): - # Write all settings to /etc/hdparm.conf - """This will persist the hard drive settings to the /etc/hdparm.conf file - - The settings_dict should be in the form of {"uuid": {"key":"value"}} - - :param settings_dict: dict of settings to save - """ - if not settings_dict: - return - - try: - templating.render(source='hdparm.conf', target=HDPARM_FILE, - context=settings_dict) - except IOError as err: - log("Unable to open {path} because of error: {error}".format( - path=HDPARM_FILE, error=err), level=ERROR) - except Exception as e: - # The templating.render can raise a jinja2 exception if the - # template is not found. Rather than polluting the import - # space of this charm, simply catch Exception - log('Unable to render {path} due to error: {error}'.format( - path=HDPARM_FILE, error=e), level=ERROR) - - -def set_max_sectors_kb(dev_name, max_sectors_size): - """This function sets the max_sectors_kb size of a given block device. - - :param dev_name: Name of the block device to query - :param max_sectors_size: int of the max_sectors_size to save - """ - max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_sectors_kb') - try: - with open(max_sectors_kb_path, 'w') as f: - f.write(max_sectors_size) - except IOError as e: - log('Failed to write max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e), level=ERROR) - - -def get_max_sectors_kb(dev_name): - """This function gets the max_sectors_kb size of a given block device. - - :param dev_name: Name of the block device to query - :returns: int which is either the max_sectors_kb or 0 on error. - """ - max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_sectors_kb') - - # Read in what Linux has set by default - if os.path.exists(max_sectors_kb_path): - try: - with open(max_sectors_kb_path, 'r') as f: - max_sectors_kb = f.read().strip() - return int(max_sectors_kb) - except IOError as e: - log('Failed to read max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e), level=ERROR) - # Bail. - return 0 - return 0 - - -def get_max_hw_sectors_kb(dev_name): - """This function gets the max_hw_sectors_kb for a given block device. - - :param dev_name: Name of the block device to query - :returns: int which is either the max_hw_sectors_kb or 0 on error. - """ - max_hw_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_hw_sectors_kb') - # Read in what the hardware supports - if os.path.exists(max_hw_sectors_kb_path): - try: - with open(max_hw_sectors_kb_path, 'r') as f: - max_hw_sectors_kb = f.read().strip() - return int(max_hw_sectors_kb) - except IOError as e: - log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( - max_hw_sectors_kb_path, e), level=ERROR) - return 0 - return 0 - - -def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): - """This function sets the hard drive read ahead. - - :param dev_name: Name of the block device to set read ahead on. - :param read_ahead_sectors: int How many sectors to read ahead. - """ - try: - # Set the read ahead sectors to 256 - log('Setting read ahead to {} for device {}'.format( - read_ahead_sectors, - dev_name)) - subprocess.check_output(['hdparm', - '-a{}'.format(read_ahead_sectors), - dev_name]) - except subprocess.CalledProcessError as e: - log('hdparm failed with error: {}'.format(e.output), - level=ERROR) - - -def get_block_uuid(block_dev): - """This queries blkid to get the uuid for a block device. - - :param block_dev: Name of the block device to query. - :returns: The UUID of the device or None on Error. - """ - try: - block_info = str(subprocess - .check_output(['blkid', '-o', 'export', block_dev]) - .decode('UTF-8')) - for tag in block_info.split('\n'): - parts = tag.split('=') - if parts[0] == 'UUID': - return parts[1] - return None - except subprocess.CalledProcessError as err: - log('get_block_uuid failed with error: {}'.format(err.output), - level=ERROR) - return None - - -def check_max_sectors(save_settings_dict, - block_dev, - uuid): - """Tune the max_hw_sectors if needed. - - make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb or at - least 1MB for spinning disks - If the box has a RAID card with cache this could go much bigger. - - :param save_settings_dict: The dict used to persist settings - :param block_dev: A block device name: Example: /dev/sda - :param uuid: The uuid of the block device - """ - dev_name = None - path_parts = os.path.split(block_dev) - if len(path_parts) == 2: - dev_name = path_parts[1] - else: - log('Unable to determine the block device name from path: {}'.format( - block_dev)) - # Play it safe and bail - return - max_sectors_kb = get_max_sectors_kb(dev_name=dev_name) - max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name) - - if max_sectors_kb < max_hw_sectors_kb: - # OK we have a situation where the hardware supports more than Linux is - # currently requesting - config_max_sectors_kb = hookenv.config('max-sectors-kb') - if config_max_sectors_kb < max_hw_sectors_kb: - # Set the max_sectors_kb to the config.yaml value if it is less - # than the max_hw_sectors_kb - log('Setting max_sectors_kb for device {} to {}'.format( - dev_name, config_max_sectors_kb)) - save_settings_dict[ - "drive_settings"][uuid][ - "read_ahead_sect"] = config_max_sectors_kb - set_max_sectors_kb(dev_name=dev_name, - max_sectors_size=config_max_sectors_kb) - else: - # Set to the max_hw_sectors_kb - log('Setting max_sectors_kb for device {} to {}'.format( - dev_name, max_hw_sectors_kb)) - save_settings_dict[ - "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb - set_max_sectors_kb(dev_name=dev_name, - max_sectors_size=max_hw_sectors_kb) - else: - log('max_sectors_kb match max_hw_sectors_kb. No change needed for ' - 'device: {}'.format(block_dev)) - - -def tune_dev(block_dev): - """Try to make some intelligent decisions with HDD tuning. Future work will - include optimizing SSDs. - - This function will change the read ahead sectors and the max write - sectors for each block device. - - :param block_dev: A block device name: Example: /dev/sda - """ - uuid = get_block_uuid(block_dev) - if uuid is None: - log('block device {} uuid is None. Unable to save to ' - 'hdparm.conf'.format(block_dev), level=DEBUG) - return - save_settings_dict = {} - log('Tuning device {}'.format(block_dev)) - status_set('maintenance', 'Tuning device {}'.format(block_dev)) - set_hdd_read_ahead(block_dev) - save_settings_dict["drive_settings"] = {} - save_settings_dict["drive_settings"][uuid] = {} - save_settings_dict["drive_settings"][uuid]['read_ahead_sect'] = 256 - - check_max_sectors(block_dev=block_dev, - save_settings_dict=save_settings_dict, - uuid=uuid) - - persist_settings(settings_dict=save_settings_dict) - status_set('maintenance', 'Finished tuning device {}'.format(block_dev)) - - -def ceph_user(): - return 'ceph' - - -class CrushLocation(object): - def __init__(self, identifier, name, osd="", host="", chassis="", - rack="", row="", pdu="", pod="", room="", - datacenter="", zone="", region="", root=""): - self.identifier = identifier - self.name = name - self.osd = osd - self.host = host - self.chassis = chassis - self.rack = rack - self.row = row - self.pdu = pdu - self.pod = pod - self.room = room - self.datacenter = datacenter - self.zone = zone - self.region = region - self.root = root - - def __str__(self): - return "name: {} id: {} osd: {} host: {} chassis: {} rack: {} " \ - "row: {} pdu: {} pod: {} room: {} datacenter: {} zone: {} " \ - "region: {} root: {}".format(self.name, self.identifier, - self.osd, self.host, self.chassis, - self.rack, self.row, self.pdu, - self.pod, self.room, - self.datacenter, self.zone, - self.region, self.root) - - def __eq__(self, other): - return not self.name < other.name and not other.name < self.name - - def __ne__(self, other): - return self.name < other.name or other.name < self.name - - def __gt__(self, other): - return self.name > other.name - - def __ge__(self, other): - return not self.name < other.name - - def __le__(self, other): - return self.name < other.name - - -def get_osd_weight(osd_id): - """Returns the weight of the specified OSD. - - :returns: Float - :raises: ValueError if the monmap fails to parse. - :raises: CalledProcessError if our Ceph command fails. - """ - try: - tree = str(subprocess - .check_output(['ceph', 'osd', 'tree', '--format=json']) - .decode('UTF-8')) - try: - json_tree = json.loads(tree) - # Make sure children are present in the JSON - if not json_tree['nodes']: - return None - for device in json_tree['nodes']: - if device['type'] == 'osd' and device['name'] == osd_id: - return device['crush_weight'] - except ValueError as v: - log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v)) - raise - except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( - e)) - raise - - -def _filter_nodes_and_set_attributes(node, node_lookup_map, lookup_type): - """Get all nodes of the desired type, with all their attributes. - - These attributes can be direct or inherited from ancestors. - """ - attribute_dict = {node['type']: node['name']} - if node['type'] == lookup_type: - attribute_dict['name'] = node['name'] - attribute_dict['identifier'] = node['id'] - return [attribute_dict] - elif not node.get('children'): - return [attribute_dict] - else: - descendant_attribute_dicts = [ - _filter_nodes_and_set_attributes(node_lookup_map[node_id], - node_lookup_map, lookup_type) - for node_id in node.get('children', []) - ] - return [dict(attribute_dict, **descendant_attribute_dict) - for descendant_attribute_dict - in itertools.chain.from_iterable(descendant_attribute_dicts)] - - -def _flatten_roots(nodes, lookup_type='host'): - """Get a flattened list of nodes of the desired type. - - :param nodes: list of nodes defined as a dictionary of attributes and - children - :type nodes: List[Dict[int, Any]] - :param lookup_type: type of searched node - :type lookup_type: str - :returns: flattened list of nodes - :rtype: List[Dict[str, Any]] - """ - lookup_map = {node['id']: node for node in nodes} - root_attributes_dicts = [_filter_nodes_and_set_attributes(node, lookup_map, - lookup_type) - for node in nodes if node['type'] == 'root'] - # get a flattened list of roots. - return list(itertools.chain.from_iterable(root_attributes_dicts)) - - -def get_osd_tree(service): - """Returns the current OSD map in JSON. - - :returns: List. - :rtype: List[CrushLocation] - :raises: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our Ceph command fails - """ - try: - tree = str(subprocess - .check_output(['ceph', '--id', service, - 'osd', 'tree', '--format=json']) - .decode('UTF-8')) - try: - json_tree = json.loads(tree) - roots = _flatten_roots(json_tree["nodes"]) - return [CrushLocation(**host) for host in roots] - except ValueError as v: - log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v)) - raise - except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format(e)) - raise - - -def _get_child_dirs(path): - """Returns a list of directory names in the specified path. - - :param path: a full path listing of the parent directory to return child - directory names - :returns: list. A list of child directories under the parent directory - :raises: ValueError if the specified path does not exist or is not a - directory, - OSError if an error occurs reading the directory listing - """ - if not os.path.exists(path): - raise ValueError('Specified path "%s" does not exist' % path) - if not os.path.isdir(path): - raise ValueError('Specified path "%s" is not a directory' % path) - - files_in_dir = [os.path.join(path, f) for f in os.listdir(path)] - return list(filter(os.path.isdir, files_in_dir)) - - -def _get_osd_num_from_dirname(dirname): - """Parses the dirname and returns the OSD id. - - Parses a string in the form of 'ceph-{osd#}' and returns the OSD number - from the directory name. - - :param dirname: the directory name to return the OSD number from - :return int: the OSD number the directory name corresponds to - :raises ValueError: if the OSD number cannot be parsed from the provided - directory name. - """ - match = re.search(r'ceph-(?P\d+)', dirname) - if not match: - raise ValueError("dirname not in correct format: {}".format(dirname)) - - return match.group('osd_id') - - -def get_local_osd_ids(): - """This will list the /var/lib/ceph/osd/* directories and try - to split the ID off of the directory name and return it in - a list. - - :returns: list. A list of OSD identifiers - :raises: OSError if something goes wrong with listing the directory. - """ - osd_ids = [] - osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') - if os.path.exists(osd_path): - try: - dirs = os.listdir(osd_path) - for osd_dir in dirs: - osd_id = osd_dir.split('-')[1] - if (_is_int(osd_id) and - filesystem_mounted(os.path.join( - os.sep, osd_path, osd_dir))): - osd_ids.append(osd_id) - except OSError: - raise - return osd_ids - - -def get_local_mon_ids(): - """This will list the /var/lib/ceph/mon/* directories and try - to split the ID off of the directory name and return it in - a list. - - :returns: list. A list of monitor identifiers - :raises: OSError if something goes wrong with listing the directory. - """ - mon_ids = [] - mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') - if os.path.exists(mon_path): - try: - dirs = os.listdir(mon_path) - for mon_dir in dirs: - # Basically this takes everything after ceph- as the monitor ID - match = re.search('ceph-(?P.*)', mon_dir) - if match: - mon_ids.append(match.group('mon_id')) - except OSError: - raise - return mon_ids - - -def _is_int(v): - """Return True if the object v can be turned into an integer.""" - try: - int(v) - return True - except ValueError: - return False - - -def get_version(): - """Derive Ceph release from an installed package.""" - import apt_pkg as apt - - package = "ceph" - - current_ver = get_installed_version(package) - if not current_ver: - # package is known, but no version is currently installed. - e = 'Could not determine version of uninstalled package: %s' % package - error_out(e) - - vers = apt.upstream_version(current_ver.ver_str) - - # x.y match only for 20XX.X - # and ignore patch level for other packages - match = re.match(r'^(\d+)\.(\d+)', vers) - - if match: - vers = match.group(0) - return float(vers) - - -def error_out(msg): - log("FATAL ERROR: {}".format(msg), - level=ERROR) - sys.exit(1) - - -def is_quorum(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(str(subprocess - .check_output(cmd) - .decode('UTF-8'))) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] in QUORUM: - return True - else: - return False - else: - return False - - -def is_leader(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(str(subprocess - .check_output(cmd) - .decode('UTF-8'))) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] == LEADER: - return True - else: - return False - else: - return False - - -def manager_available(): - # if manager daemon isn't on this release, just say it is Fine - if cmp_pkgrevno('ceph', '11.0.0') < 0: - return True - cmd = ["sudo", "-u", "ceph", "ceph", "mgr", "dump", "-f", "json"] - try: - result = json.loads(subprocess.check_output(cmd).decode('UTF-8')) - return result['available'] - except subprocess.CalledProcessError as e: - log("'{}' failed: {}".format(" ".join(cmd), str(e))) - return False - except Exception: - return False - - -def wait_for_quorum(): - while not is_quorum(): - log("Waiting for quorum to be reached") - time.sleep(3) - - -def wait_for_manager(): - while not manager_available(): - log("Waiting for manager to be available") - time.sleep(5) - - -def add_bootstrap_hint(peer): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "add_bootstrap_peer_hint", - peer - ] - if os.path.exists(asok): - # Ignore any errors for this call - subprocess.call(cmd) - - -DISK_FORMATS = [ - 'xfs', - 'ext4', - 'btrfs' -] - -CEPH_PARTITIONS = [ - '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # Ceph encrypted disk in creation - '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # Ceph encrypted journal - '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # Ceph encrypted OSD data - '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # Ceph OSD data - '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # Ceph OSD journal - '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # Ceph disk in creation -] - - -def get_partition_list(dev): - """Lists the partitions of a block device. - - :param dev: Path to a block device. ex: /dev/sda - :returns: Returns a list of Partition objects. - :raises: CalledProcessException if lsblk fails - """ - partitions_list = [] - try: - partitions = get_partitions(dev) - # For each line of output - for partition in partitions: - parts = partition.split() - try: - partitions_list.append( - Partition(number=parts[0], - start=parts[1], - end=parts[2], - sectors=parts[3], - size=parts[4], - name=parts[5], - uuid=parts[6]) - ) - except IndexError: - partitions_list.append( - Partition(number=parts[0], - start=parts[1], - end=parts[2], - sectors=parts[3], - size=parts[4], - name="", - uuid=parts[5]) - ) - - return partitions_list - except subprocess.CalledProcessError: - raise - - -def is_pristine_disk(dev): - """ - Read first 2048 bytes (LBA 0 - 3) of block device to determine whether it - is actually all zeros and safe for us to use. - - Existing partitioning tools does not discern between a failure to read from - block device, failure to understand a partition table and the fact that a - block device has no partition table. Since we need to be positive about - which is which we need to read the device directly and confirm ourselves. - - :param dev: Path to block device - :type dev: str - :returns: True all 2048 bytes == 0x0, False if not - :rtype: bool - """ - want_bytes = 2048 - - try: - f = open(dev, 'rb') - except OSError as e: - log(e) - return False - - data = f.read(want_bytes) - read_bytes = len(data) - if read_bytes != want_bytes: - log('{}: short read, got {} bytes expected {}.' - .format(dev, read_bytes, want_bytes), level=WARNING) - return False - - return all(byte == 0x0 for byte in data) - - -def is_osd_disk(dev): - db = kv() - osd_devices = db.get('osd-devices', []) - if dev in osd_devices: - log('Device {} already processed by charm,' - ' skipping'.format(dev)) - return True - - partitions = get_partition_list(dev) - for partition in partitions: - try: - info = str(subprocess - .check_output(['sgdisk', '-i', partition.number, dev]) - .decode('UTF-8')) - info = info.split("\n") # IGNORE:E1103 - for line in info: - for ptype in CEPH_PARTITIONS: - sig = 'Partition GUID code: {}'.format(ptype) - if line.startswith(sig): - return True - except subprocess.CalledProcessError as e: - log("sgdisk inspection of partition {} on {} failed with " - "error: {}. Skipping".format(partition.minor, dev, e), - level=ERROR) - return False - - -def start_osds(devices): - # Scan for Ceph block devices - rescan_osd_devices() - if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and - cmp_pkgrevno('ceph', '14.2.0') < 0): - # Use ceph-disk activate for directory based OSD's - for dev_or_path in devices: - if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call( - ['ceph-disk', 'activate', dev_or_path]) - - -def udevadm_settle(): - cmd = ['udevadm', 'settle'] - subprocess.call(cmd) - - -def rescan_osd_devices(): - cmd = [ - 'udevadm', 'trigger', - '--subsystem-match=block', '--action=add' - ] - - subprocess.call(cmd) - - udevadm_settle() - - -_client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' - - -def is_bootstrapped(): - return os.path.exists( - '/var/lib/ceph/mon/ceph-{}/done'.format(socket.gethostname())) - - -def wait_for_bootstrap(): - while not is_bootstrapped(): - time.sleep(3) - - -def generate_monitor_secret(): - cmd = [ - 'ceph-authtool', - '/dev/stdout', - '--name=mon.', - '--gen-key' - ] - res = str(subprocess.check_output(cmd).decode('UTF-8')) - - return "{}==".format(res.split('=')[1].strip()) - - -# OSD caps taken from ceph-create-keys -_osd_bootstrap_caps = { - 'mon': [ - 'allow command osd create ...', - 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', - 'allow command mon getmap' - ] -} - -_osd_bootstrap_caps_profile = { - 'mon': [ - 'allow profile bootstrap-osd' - ] -} - - -def parse_key(raw_key): - # get-or-create appears to have different output depending - # on whether its 'get' or 'create' - # 'create' just returns the key, 'get' is more verbose and - # needs parsing - key = None - if len(raw_key.splitlines()) == 1: - key = raw_key - else: - for element in raw_key.splitlines(): - if 'key' in element: - return element.split(' = ')[1].strip() # IGNORE:E1103 - return key - - -def get_osd_bootstrap_key(): - try: - # Attempt to get/create a key using the OSD bootstrap profile first - key = get_named_key('bootstrap-osd', - _osd_bootstrap_caps_profile) - except Exception: - # If that fails try with the older style permissions - key = get_named_key('bootstrap-osd', - _osd_bootstrap_caps) - return key - - -_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" - - -def import_radosgw_key(key): - if not os.path.exists(_radosgw_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _radosgw_keyring, - '--create-keyring', - '--name=client.radosgw.gateway', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - -# OSD caps taken from ceph-create-keys -_radosgw_caps = { - 'mon': ['allow rw'], - 'osd': ['allow rwx'] -} -_upgrade_caps = { - 'mon': ['allow rwx'] -} - - -def get_radosgw_key(pool_list=None, name=None): - return get_named_key(name=name or 'radosgw.gateway', - caps=_radosgw_caps, - pool_list=pool_list) - - -def get_mds_key(name): - return create_named_keyring(entity='mds', - name=name, - caps=mds_caps) - - -_mds_bootstrap_caps_profile = { - 'mon': [ - 'allow profile bootstrap-mds' - ] -} - - -def get_mds_bootstrap_key(): - return get_named_key('bootstrap-mds', - _mds_bootstrap_caps_profile) - - -_default_caps = collections.OrderedDict([ - ('mon', ['allow r', - 'allow command "osd blacklist"', - 'allow command "osd blocklist"']), - ('osd', ['allow rwx']), -]) - -admin_caps = collections.OrderedDict([ - ('mds', ['allow *']), - ('mgr', ['allow *']), - ('mon', ['allow *']), - ('osd', ['allow *']) -]) - -mds_caps = collections.OrderedDict([ - ('osd', ['allow *']), - ('mds', ['allow']), - ('mon', ['allow rwx']), -]) - -osd_upgrade_caps = collections.OrderedDict([ - ('mon', ['allow command "config-key"', - 'allow command "osd tree"', - 'allow command "config-key list"', - 'allow command "config-key put"', - 'allow command "config-key get"', - 'allow command "config-key exists"', - 'allow command "osd out"', - 'allow command "osd in"', - 'allow command "osd rm"', - 'allow command "auth del"', - ]) -]) - -rbd_mirror_caps = collections.OrderedDict([ - ('mon', ['allow profile rbd-mirror-peer', - 'allow command "service dump"', - 'allow command "service status"' - ]), - ('osd', ['profile rbd']), - ('mgr', ['allow r']), -]) - - -def get_rbd_mirror_key(name): - return get_named_key(name=name, caps=rbd_mirror_caps) - - -def create_named_keyring(entity, name, caps=None): - caps = caps or _default_caps - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', 'get-or-create', '{entity}.{name}'.format(entity=entity, - name=name), - ] - for subsystem, subcaps in caps.items(): - cmd.extend([subsystem, '; '.join(subcaps)]) - log("Calling check_output: {}".format(cmd), level=DEBUG) - return (parse_key(str(subprocess - .check_output(cmd) - .decode('UTF-8')) - .strip())) # IGNORE:E1103 - - -def get_upgrade_key(): - return get_named_key('upgrade-osd', _upgrade_caps) - - -def get_named_key(name, caps=None, pool_list=None): - """Retrieve a specific named cephx key. - - :param name: String Name of key to get. - :param pool_list: The list of pools to give access to - :param caps: dict of cephx capabilities - :returns: Returns a cephx key - """ - key_name = 'client.{}'.format(name) - try: - # Does the key already exist? - output = str(subprocess.check_output( - [ - 'sudo', - '-u', ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', - 'get', - key_name, - ]).decode('UTF-8')).strip() - return parse_key(output) - except subprocess.CalledProcessError: - # Couldn't get the key, time to create it! - log("Creating new key for {}".format(name), level=DEBUG) - caps = caps or _default_caps - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', 'get-or-create', key_name, - ] - # Add capabilities - for subsystem, subcaps in caps.items(): - if subsystem == 'osd': - if pool_list: - # This will output a string similar to: - # "pool=rgw pool=rbd pool=something" - pools = " ".join(['pool={0}'.format(i) for i in pool_list]) - subcaps[0] = subcaps[0] + " " + pools - cmd.extend([subsystem, '; '.join(subcaps)]) - - log("Calling check_output: {}".format(cmd), level=DEBUG) - return parse_key(str(subprocess - .check_output(cmd) - .decode('UTF-8')) - .strip()) # IGNORE:E1103 - - -def upgrade_key_caps(key, caps, pool_list=None): - """Upgrade key to have capabilities caps""" - if not is_leader(): - # Not the MON leader OR not clustered - return - cmd = [ - "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key - ] - for subsystem, subcaps in caps.items(): - if subsystem == 'osd': - if pool_list: - # This will output a string similar to: - # "pool=rgw pool=rbd pool=something" - pools = " ".join(['pool={0}'.format(i) for i in pool_list]) - subcaps[0] = subcaps[0] + " " + pools - cmd.extend([subsystem, '; '.join(subcaps)]) - subprocess.check_call(cmd) - - -@cached -def systemd(): - return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' - - -def use_bluestore(): - """Determine whether bluestore should be used for OSD's - - :returns: whether bluestore disk format should be used - :rtype: bool""" - if cmp_pkgrevno('ceph', '12.2.0') < 0: - return False - return config('bluestore') - - -def bootstrap_monitor_cluster(secret): - """Bootstrap local Ceph mon into the Ceph cluster - - :param secret: cephx secret to use for monitor authentication - :type secret: str - :raises: Exception if Ceph mon cannot be bootstrapped - """ - hostname = socket.gethostname() - path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - done = '{}/done'.format(path) - if systemd(): - init_marker = '{}/systemd'.format(path) - else: - init_marker = '{}/upstart'.format(path) - - keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) - - if os.path.exists(done): - log('bootstrap_monitor_cluster: mon already initialized.') - else: - # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', owner=ceph_user(), - group=ceph_user(), perms=0o755) - mkdir(path, owner=ceph_user(), group=ceph_user(), - perms=0o755) - # end changes for Ceph >= 0.61.3 - try: - _create_monitor(keyring, - secret, - hostname, - path, - done, - init_marker) - except Exception: - raise - finally: - os.unlink(keyring) - - -def _create_monitor(keyring, secret, hostname, path, done, init_marker): - """Create monitor filesystem and enable and start ceph-mon process - - :param keyring: path to temporary keyring on disk - :type keyring: str - :param secret: cephx secret to use for monitor authentication - :type: secret: str - :param hostname: hostname of the local unit - :type hostname: str - :param path: full path to Ceph mon directory - :type path: str - :param done: full path to 'done' marker for Ceph mon - :type done: str - :param init_marker: full path to 'init' marker for Ceph mon - :type init_marker: str - """ - subprocess.check_call(['ceph-authtool', keyring, - '--create-keyring', '--name=mon.', - '--add-key={}'.format(secret), - '--cap', 'mon', 'allow *']) - subprocess.check_call(['ceph-mon', '--mkfs', - '-i', hostname, - '--keyring', keyring]) - chownr('/var/log/ceph', ceph_user(), ceph_user()) - chownr(path, ceph_user(), ceph_user()) - with open(done, 'w'): - pass - with open(init_marker, 'w'): - pass - - if systemd(): - if cmp_pkgrevno('ceph', '14.0.0') >= 0: - systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) - else: - systemd_unit = 'ceph-mon' - subprocess.check_call(['systemctl', 'enable', systemd_unit]) - service_restart(systemd_unit) - else: - service_restart('ceph-mon-all') - - -def create_keyrings(): - """Create keyrings for operation of ceph-mon units - - NOTE: The quorum should be done before to execute this function. - - :raises: Exception if keyrings cannot be created - """ - if cmp_pkgrevno('ceph', '14.0.0') >= 0: - # NOTE(jamespage): At Nautilus, keys are created by the - # monitors automatically and just need - # exporting. - output = str(subprocess.check_output( - [ - 'sudo', - '-u', ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', 'get', 'client.admin', - ]).decode('UTF-8')).strip() - if not output: - # NOTE: key not yet created, raise exception and retry - raise Exception - # NOTE: octopus wants newline at end of file LP: #1864706 - output += '\n' - write_file(_client_admin_keyring, output, - owner=ceph_user(), group=ceph_user(), - perms=0o400) - else: - # NOTE(jamespage): Later Ceph releases require explicit - # call to ceph-create-keys to setup the - # admin keys for the cluster; this command - # will wait for quorum in the cluster before - # returning. - # NOTE(fnordahl): Explicitly run `ceph-create-keys` for older - # Ceph releases too. This improves bootstrap - # resilience as the charm will wait for - # presence of peer units before attempting - # to bootstrap. Note that charms deploying - # ceph-mon service should disable running of - # `ceph-create-keys` service in init system. - cmd = ['ceph-create-keys', '--id', socket.gethostname()] - if cmp_pkgrevno('ceph', '12.0.0') >= 0: - # NOTE(fnordahl): The default timeout in ceph-create-keys of 600 - # seconds is not adequate. Increase timeout when - # timeout parameter available. For older releases - # we rely on retry_on_exception decorator. - # LP#1719436 - cmd.extend(['--timeout', '1800']) - subprocess.check_call(cmd) - osstat = os.stat(_client_admin_keyring) - if not osstat.st_size: - # NOTE(fnordahl): Retry will fail as long as this file exists. - # LP#1719436 - os.remove(_client_admin_keyring) - raise Exception - - -def update_monfs(): - hostname = socket.gethostname() - monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - if systemd(): - init_marker = '{}/systemd'.format(monfs) - else: - init_marker = '{}/upstart'.format(monfs) - if os.path.exists(monfs) and not os.path.exists(init_marker): - # Mark mon as managed by upstart so that - # it gets start correctly on reboots - with open(init_marker, 'w'): - pass - - -def get_partitions(dev): - cmd = ['partx', '--raw', '--noheadings', dev] - try: - out = str(subprocess.check_output(cmd).decode('UTF-8')).splitlines() - log("get partitions: {}".format(out), level=DEBUG) - return out - except subprocess.CalledProcessError as e: - log("Can't get info for {0}: {1}".format(dev, e.output)) - return [] - - -def get_lvs(dev): - """ - List logical volumes for the provided block device - - :param: dev: Full path to block device. - :raises subprocess.CalledProcessError: in the event that any supporting - operation failed. - :returns: list: List of logical volumes provided by the block device - """ - if not lvm.is_lvm_physical_volume(dev): - return [] - vg_name = lvm.list_lvm_volume_group(dev) - return lvm.list_logical_volumes('vg_name={}'.format(vg_name)) - - -def find_least_used_utility_device(utility_devices, lvs=False): - """ - Find a utility device which has the smallest number of partitions - among other devices in the supplied list. - - :utility_devices: A list of devices to be used for filestore journal - or bluestore wal or db. - :lvs: flag to indicate whether inspection should be based on LVM LV's - :return: string device name - """ - if lvs: - usages = map(lambda a: (len(get_lvs(a)), a), utility_devices) - else: - usages = map(lambda a: (len(get_partitions(a)), a), utility_devices) - least = min(usages, key=lambda t: t[0]) - return least[1] - - -def get_devices(name): - """Merge config and Juju storage based devices - - :name: The name of the device type, e.g.: wal, osd, journal - :returns: Set(device names), which are strings - """ - if config(name): - devices = [dev.strip() for dev in config(name).split(' ')] - else: - devices = [] - storage_ids = storage_list(name) - devices.extend((storage_get('location', sid) for sid in storage_ids)) - devices = filter(os.path.exists, devices) - - return set(devices) - - -def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, - bluestore=False, key_manager=CEPH_KEY_MANAGER, osd_id=None): - if dev.startswith('/dev'): - osdize_dev(dev, osd_format, osd_journal, - ignore_errors, encrypt, - bluestore, key_manager, osd_id) - else: - if cmp_pkgrevno('ceph', '14.0.0') >= 0: - log("Directory backed OSDs can not be created on Nautilus", - level=WARNING) - return - osdize_dir(dev, encrypt, bluestore) - - -def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, - encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER, - osd_id=None): - """ - Prepare a block device for use as a Ceph OSD - - A block device will only be prepared once during the lifetime - of the calling charm unit; future executions will be skipped. - - :param: dev: Full path to block device to use - :param: osd_format: Format for OSD filesystem - :param: osd_journal: List of block devices to use for OSD journals - :param: ignore_errors: Don't fail in the event of any errors during - processing - :param: encrypt: Encrypt block devices using 'key_manager' - :param: bluestore: Use bluestore native Ceph block device format - :param: key_manager: Key management approach for encryption keys - :raises subprocess.CalledProcessError: in the event that any supporting - subprocess operation failed - :raises ValueError: if an invalid key_manager is provided - """ - if key_manager not in KEY_MANAGERS: - raise ValueError('Unsupported key manager: {}'.format(key_manager)) - - db = kv() - osd_devices = db.get('osd-devices', []) - try: - if dev in osd_devices: - log('Device {} already processed by charm,' - ' skipping'.format(dev)) - return - - if not os.path.exists(dev): - log('Path {} does not exist - bailing'.format(dev)) - return - - if not is_block_device(dev): - log('Path {} is not a block device - bailing'.format(dev)) - return - - if is_osd_disk(dev): - log('Looks like {} is already an' - ' OSD data or journal, skipping.'.format(dev)) - if is_device_mounted(dev): - osd_devices.append(dev) - return - - if is_device_mounted(dev): - log('Looks like {} is in use, skipping.'.format(dev)) - return - - if is_active_bluestore_device(dev): - log('{} is in use as an active bluestore block device,' - ' skipping.'.format(dev)) - osd_devices.append(dev) - return - - if is_mapped_luks_device(dev): - log('{} is a mapped LUKS device,' - ' skipping.'.format(dev)) - return - - if cmp_pkgrevno('ceph', '12.2.4') >= 0: - cmd = _ceph_volume(dev, - osd_journal, - encrypt, - bluestore, - key_manager, - osd_id) - else: - cmd = _ceph_disk(dev, - osd_format, - osd_journal, - encrypt, - bluestore) - - try: - status_set('maintenance', 'Initializing device {}'.format(dev)) - log("osdize cmd: {}".format(cmd)) - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - try: - lsblk_output = subprocess.check_output( - ['lsblk', '-P']).decode('UTF-8') - except subprocess.CalledProcessError as e: - log("Couldn't get lsblk output: {}".format(e), ERROR) - if ignore_errors: - log('Unable to initialize device: {}'.format(dev), WARNING) - if lsblk_output: - log('lsblk output: {}'.format(lsblk_output), DEBUG) - else: - log('Unable to initialize device: {}'.format(dev), ERROR) - if lsblk_output: - log('lsblk output: {}'.format(lsblk_output), WARNING) - raise - - # NOTE: Record processing of device only on success to ensure that - # the charm only tries to initialize a device of OSD usage - # once during its lifetime. - osd_devices.append(dev) - finally: - db.set('osd-devices', osd_devices) - db.flush() - - -def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): - """ - Prepare a device for usage as a Ceph OSD using ceph-disk - - :param: dev: Full path to use for OSD block device setup, - The function looks up realpath of the device - :param: osd_journal: List of block devices to use for OSD journals - :param: encrypt: Use block device encryption (unsupported) - :param: bluestore: Use bluestore storage for OSD - :returns: list. 'ceph-disk' command and required parameters for - execution by check_call - """ - cmd = ['ceph-disk', 'prepare'] - - if encrypt: - cmd.append('--dmcrypt') - - if osd_format and not bluestore: - cmd.append('--fs-type') - cmd.append(osd_format) - - # NOTE(jamespage): enable experimental bluestore support - if use_bluestore(): - cmd.append('--bluestore') - wal = get_devices('bluestore-wal') - if wal: - cmd.append('--block.wal') - least_used_wal = find_least_used_utility_device(wal) - cmd.append(least_used_wal) - db = get_devices('bluestore-db') - if db: - cmd.append('--block.db') - least_used_db = find_least_used_utility_device(db) - cmd.append(least_used_db) - elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: - cmd.append('--filestore') - - cmd.append(os.path.realpath(dev)) - - if osd_journal: - least_used = find_least_used_utility_device(osd_journal) - cmd.append(least_used) - - return cmd - - -def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, - key_manager=CEPH_KEY_MANAGER, osd_id=None): - """ - Prepare and activate a device for usage as a Ceph OSD using ceph-volume. - - This also includes creation of all PV's, VG's and LV's required to - support the initialization of the OSD. - - :param: dev: Full path to use for OSD block device setup - :param: osd_journal: List of block devices to use for OSD journals - :param: encrypt: Use block device encryption - :param: bluestore: Use bluestore storage for OSD - :param: key_manager: dm-crypt Key Manager to use - :param: osd_id: The OSD-id to recycle, or None to create a new one - :raises subprocess.CalledProcessError: in the event that any supporting - LVM operation failed. - :returns: list. 'ceph-volume' command and required parameters for - execution by check_call - """ - cmd = ['ceph-volume', 'lvm', 'create'] - - osd_fsid = str(uuid.uuid4()) - cmd.append('--osd-fsid') - cmd.append(osd_fsid) - - if bluestore: - cmd.append('--bluestore') - main_device_type = 'block' - else: - cmd.append('--filestore') - main_device_type = 'data' - - if encrypt and key_manager == CEPH_KEY_MANAGER: - cmd.append('--dmcrypt') - - if osd_id is not None: - cmd.extend(['--osd-id', str(osd_id)]) - - # On-disk journal volume creation - if not osd_journal and not bluestore: - journal_lv_type = 'journal' - cmd.append('--journal') - cmd.append(_allocate_logical_volume( - dev=dev, - lv_type=journal_lv_type, - osd_fsid=osd_fsid, - size='{}M'.format(calculate_volume_size('journal')), - encrypt=encrypt, - key_manager=key_manager) - ) - - cmd.append('--data') - cmd.append(_allocate_logical_volume(dev=dev, - lv_type=main_device_type, - osd_fsid=osd_fsid, - encrypt=encrypt, - key_manager=key_manager)) - - if bluestore: - for extra_volume in ('wal', 'db'): - devices = get_devices('bluestore-{}'.format(extra_volume)) - if devices: - cmd.append('--block.{}'.format(extra_volume)) - least_used = find_least_used_utility_device(devices, - lvs=True) - cmd.append(_allocate_logical_volume( - dev=least_used, - lv_type=extra_volume, - osd_fsid=osd_fsid, - size='{}M'.format(calculate_volume_size(extra_volume)), - shared=True, - encrypt=encrypt, - key_manager=key_manager) - ) - - elif osd_journal: - cmd.append('--journal') - least_used = find_least_used_utility_device(osd_journal, - lvs=True) - cmd.append(_allocate_logical_volume( - dev=least_used, - lv_type='journal', - osd_fsid=osd_fsid, - size='{}M'.format(calculate_volume_size('journal')), - shared=True, - encrypt=encrypt, - key_manager=key_manager) - ) - - return cmd - - -def _partition_name(dev): - """ - Derive the first partition name for a block device - - :param: dev: Full path to block device. - :returns: str: Full path to first partition on block device. - """ - if dev[-1].isdigit(): - return '{}p1'.format(dev) - else: - return '{}1'.format(dev) - - -def is_active_bluestore_device(dev): - """ - Determine whether provided device is part of an active - bluestore based OSD (as its block component). - - :param: dev: Full path to block device to check for Bluestore usage. - :returns: boolean: indicating whether device is in active use. - """ - if not lvm.is_lvm_physical_volume(dev): - return False - - vg_name = lvm.list_lvm_volume_group(dev) - try: - lv_name = lvm.list_logical_volumes('vg_name={}'.format(vg_name))[0] - except IndexError: - return False - - block_symlinks = glob.glob('/var/lib/ceph/osd/ceph-*/block') - for block_candidate in block_symlinks: - if os.path.islink(block_candidate): - target = os.readlink(block_candidate) - if target.endswith(lv_name): - return True - - return False - - -def is_luks_device(dev): - """ - Determine if dev is a LUKS-formatted block device. - - :param: dev: A full path to a block device to check for LUKS header - presence - :returns: boolean: indicates whether a device is used based on LUKS header. - """ - return True if _luks_uuid(dev) else False - - -def is_mapped_luks_device(dev): - """ - Determine if dev is a mapped LUKS device - :param: dev: A full path to a block device to be checked - :returns: boolean: indicates whether a device is mapped - """ - _, dirs, _ = next(os.walk( - '/sys/class/block/{}/holders/' - .format(os.path.basename(os.path.realpath(dev)))) - ) - is_held = len(dirs) > 0 - return is_held and is_luks_device(dev) - - -def get_conf(variable): - """ - Get the value of the given configuration variable from the - cluster. - - :param variable: Ceph configuration variable - :returns: str. configured value for provided variable - - """ - return subprocess.check_output([ - 'ceph-osd', - '--show-config-value={}'.format(variable), - '--no-mon-config', - ]).strip() - - -def calculate_volume_size(lv_type): - """ - Determine the configured size for Bluestore DB/WAL or - Filestore Journal devices - - :param lv_type: volume type (db, wal or journal) - :raises KeyError: if invalid lv_type is supplied - :returns: int. Configured size in megabytes for volume type - """ - # lv_type -> Ceph configuration option - _config_map = { - 'db': 'bluestore_block_db_size', - 'wal': 'bluestore_block_wal_size', - 'journal': 'osd_journal_size', - } - - # default sizes in MB - _default_size = { - 'db': 1024, - 'wal': 576, - 'journal': 1024, - } - - # conversion of Ceph config units to MB - _units = { - 'db': 1048576, # Bytes -> MB - 'wal': 1048576, # Bytes -> MB - 'journal': 1, # Already in MB - } - - configured_size = get_conf(_config_map[lv_type]) - - if configured_size is None or int(configured_size) == 0: - return _default_size[lv_type] - else: - return int(configured_size) / _units[lv_type] - - -def _luks_uuid(dev): - """ - Check to see if dev is a LUKS encrypted volume, returning the UUID - of volume if it is. - - :param: dev: path to block device to check. - :returns: str. UUID of LUKS device or None if not a LUKS device - """ - try: - cmd = ['cryptsetup', 'luksUUID', dev] - return subprocess.check_output(cmd).decode('UTF-8').strip() - except subprocess.CalledProcessError: - return None - - -def _initialize_disk(dev, dev_uuid, encrypt=False, - key_manager=CEPH_KEY_MANAGER): - """ - Initialize a raw block device consuming 100% of the available - disk space. - - Function assumes that block device has already been wiped. - - :param: dev: path to block device to initialize - :param: dev_uuid: UUID to use for any dm-crypt operations - :param: encrypt: Encrypt OSD devices using dm-crypt - :param: key_manager: Key management approach for dm-crypt keys - :raises: subprocess.CalledProcessError: if any parted calls fail - :returns: str: Full path to new partition. - """ - use_vaultlocker = encrypt and key_manager == VAULT_KEY_MANAGER - - if use_vaultlocker: - # NOTE(jamespage): Check to see if already initialized as a LUKS - # volume, which indicates this is a shared block - # device for journal, db or wal volumes. - luks_uuid = _luks_uuid(dev) - if luks_uuid: - return '/dev/mapper/crypt-{}'.format(luks_uuid) - - dm_crypt = '/dev/mapper/crypt-{}'.format(dev_uuid) - - if use_vaultlocker and not os.path.exists(dm_crypt): - subprocess.check_call([ - 'vaultlocker', - 'encrypt', - '--uuid', dev_uuid, - dev, - ]) - subprocess.check_call([ - 'dd', - 'if=/dev/zero', - 'of={}'.format(dm_crypt), - 'bs=512', - 'count=1', - ]) - - if use_vaultlocker: - return dm_crypt - else: - return dev - - -def _allocate_logical_volume(dev, lv_type, osd_fsid, - size=None, shared=False, - encrypt=False, - key_manager=CEPH_KEY_MANAGER): - """ - Allocate a logical volume from a block device, ensuring any - required initialization and setup of PV's and VG's to support - the LV. - - :param: dev: path to block device to allocate from. - :param: lv_type: logical volume type to create - (data, block, journal, wal, db) - :param: osd_fsid: UUID of the OSD associate with the LV - :param: size: Size in LVM format for the device; - if unset 100% of VG - :param: shared: Shared volume group (journal, wal, db) - :param: encrypt: Encrypt OSD devices using dm-crypt - :param: key_manager: dm-crypt Key Manager to use - :raises subprocess.CalledProcessError: in the event that any supporting - LVM or parted operation fails. - :returns: str: String in the format 'vg_name/lv_name'. - """ - lv_name = "osd-{}-{}".format(lv_type, osd_fsid) - current_volumes = lvm.list_logical_volumes() - if shared: - dev_uuid = str(uuid.uuid4()) - else: - dev_uuid = osd_fsid - pv_dev = _initialize_disk(dev, dev_uuid, encrypt, key_manager) - - vg_name = None - if not lvm.is_lvm_physical_volume(pv_dev): - lvm.create_lvm_physical_volume(pv_dev) - if not os.path.exists(pv_dev): - # NOTE: trigger rescan to work around bug 1878752 - rescan_osd_devices() - if shared: - vg_name = 'ceph-{}-{}'.format(lv_type, - str(uuid.uuid4())) - else: - vg_name = 'ceph-{}'.format(osd_fsid) - lvm.create_lvm_volume_group(vg_name, pv_dev) - else: - vg_name = lvm.list_lvm_volume_group(pv_dev) - - if lv_name not in current_volumes: - lvm.create_logical_volume(lv_name, vg_name, size) - - return "{}/{}".format(vg_name, lv_name) - - -def osdize_dir(path, encrypt=False, bluestore=False): - """Ask ceph-disk to prepare a directory to become an OSD. - - :param path: str. The directory to osdize - :param encrypt: bool. Should the OSD directory be encrypted at rest - :returns: None - """ - - db = kv() - osd_devices = db.get('osd-devices', []) - if path in osd_devices: - log('Device {} already processed by charm,' - ' skipping'.format(path)) - return - - for t in ['upstart', 'systemd']: - if os.path.exists(os.path.join(path, t)): - log('Path {} is already used as an OSD dir - bailing'.format(path)) - return - - if cmp_pkgrevno('ceph', "0.56.6") < 0: - log('Unable to use directories for OSDs with ceph < 0.56.6', - level=ERROR) - return - - mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) - chownr('/var/lib/ceph', ceph_user(), ceph_user()) - cmd = [ - 'sudo', '-u', ceph_user(), - 'ceph-disk', - 'prepare', - '--data-dir', - path - ] - if cmp_pkgrevno('ceph', '0.60') >= 0: - if encrypt: - cmd.append('--dmcrypt') - - # NOTE(icey): enable experimental bluestore support - if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: - cmd.append('--bluestore') - elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: - cmd.append('--filestore') - log("osdize dir cmd: {}".format(cmd)) - subprocess.check_call(cmd) - - # NOTE: Record processing of device only on success to ensure that - # the charm only tries to initialize a device of OSD usage - # once during its lifetime. - osd_devices.append(path) - db.set('osd-devices', osd_devices) - db.flush() - - -def filesystem_mounted(fs): - return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 - - -def get_running_osds(): - """Returns a list of the pids of the current running OSD daemons""" - cmd = ['pgrep', 'ceph-osd'] - try: - result = str(subprocess.check_output(cmd).decode('UTF-8')) - return result.split() - except subprocess.CalledProcessError: - return [] - - -def get_cephfs(service): - """List the Ceph Filesystems that exist. - - :param service: The service name to run the Ceph command under - :returns: list. Returns a list of the Ceph filesystems - """ - if get_version() < 0.86: - # This command wasn't introduced until 0.86 Ceph - return [] - try: - output = str(subprocess - .check_output(["ceph", '--id', service, "fs", "ls"]) - .decode('UTF-8')) - if not output: - return [] - """ - Example subprocess output: - 'name: ip-172-31-23-165, metadata pool: ip-172-31-23-165_metadata, - data pools: [ip-172-31-23-165_data ]\n' - output: filesystems: ['ip-172-31-23-165'] - """ - filesystems = [] - for line in output.splitlines(): - parts = line.split(',') - for part in parts: - if "name" in part: - filesystems.append(part.split(' ')[1]) - except subprocess.CalledProcessError: - return [] - - -def wait_for_all_monitors_to_upgrade(new_version, upgrade_key): - """Fairly self explanatory name. This function will wait - for all monitors in the cluster to upgrade or it will - return after a timeout period has expired. - - :param new_version: str of the version to watch - :param upgrade_key: the cephx key name to use - """ - done = False - start_time = time.time() - monitor_list = [] - - mon_map = get_mon_map('admin') - if mon_map['monmap']['mons']: - for mon in mon_map['monmap']['mons']: - monitor_list.append(mon['name']) - while not done: - try: - done = all(monitor_key_exists(upgrade_key, "{}_{}_{}_done".format( - "mon", mon, new_version - )) for mon in monitor_list) - current_time = time.time() - if current_time > (start_time + 10 * 60): - raise Exception - else: - # Wait 30 seconds and test again if all monitors are upgraded - time.sleep(30) - except subprocess.CalledProcessError: - raise - - -# Edge cases: -# 1. Previous node dies on upgrade, can we retry? -def roll_monitor_cluster(new_version, upgrade_key): - """This is tricky to get right so here's what we're going to do. - - There's 2 possible cases: Either I'm first in line or not. - If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous monitor is upgraded yet. - - :param new_version: str of the version to upgrade to - :param upgrade_key: the cephx key name to use when upgrading - """ - log('roll_monitor_cluster called with {}'.format(new_version)) - my_name = socket.gethostname() - monitor_list = [] - mon_map = get_mon_map('admin') - if mon_map['monmap']['mons']: - for mon in mon_map['monmap']['mons']: - monitor_list.append(mon['name']) - else: - status_set('blocked', 'Unable to get monitor cluster information') - sys.exit(1) - log('monitor_list: {}'.format(monitor_list)) - - # A sorted list of OSD unit names - mon_sorted_list = sorted(monitor_list) - - # Install packages immediately but defer restarts to when it's our time. - upgrade_monitor(new_version, restart_daemons=False) - try: - position = mon_sorted_list.index(my_name) - log("upgrade position: {}".format(position)) - if position == 0: - # I'm first! Roll - # First set a key to inform others I'm about to roll - lock_and_roll(upgrade_key=upgrade_key, - service='mon', - my_name=my_name, - version=new_version) - else: - # Check if the previous node has finished - status_set('waiting', - 'Waiting on {} to finish upgrading'.format( - mon_sorted_list[position - 1])) - wait_on_previous_node(upgrade_key=upgrade_key, - service='mon', - previous_node=mon_sorted_list[position - 1], - version=new_version) - lock_and_roll(upgrade_key=upgrade_key, - service='mon', - my_name=my_name, - version=new_version) - # NOTE(jamespage): - # Wait until all monitors have upgraded before bootstrapping - # the ceph-mgr daemons due to use of new mgr keyring profiles - if new_version == 'luminous': - wait_for_all_monitors_to_upgrade(new_version=new_version, - upgrade_key=upgrade_key) - bootstrap_manager() - - # NOTE(jmcvaughn): - # Nautilus and later binaries use msgr2 by default, but existing - # clusters that have been upgraded from pre-Nautilus will not - # automatically have msgr2 enabled. Without this, Ceph will show - # a warning only (with no impact to operations), but newly added units - # will not be able to join the cluster. Therefore, we ensure it is - # enabled on upgrade for all versions including and after Nautilus - # (to cater for previous charm versions that will not have done this). - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0 - if nautilus_or_later: - wait_for_all_monitors_to_upgrade(new_version=new_version, - upgrade_key=upgrade_key) - enable_msgr2() - except ValueError: - log("Failed to find {} in list {}.".format( - my_name, mon_sorted_list)) - status_set('blocked', 'failed to upgrade monitor') - - -# For E731 we can't assign a lambda, therefore, instead pass this. -def noop(): - pass - - -def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): - """Upgrade the current Ceph monitor to the new version - - :param new_version: String version to upgrade to. - """ - if kick_function is None: - kick_function = noop - current_version = get_version() - status_set("maintenance", "Upgrading monitor") - log("Current Ceph version is {}".format(current_version)) - log("Upgrading to: {}".format(new_version)) - - # Needed to determine if whether to stop/start ceph-mgr - luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0 - # Needed to differentiate between systemd unit names - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0 - kick_function() - try: - add_source(config('source'), config('key')) - apt_update(fatal=True) - except subprocess.CalledProcessError as err: - log("Adding the Ceph source failed with message: {}".format( - err)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - kick_function() - - try: - apt_install(packages=determine_packages(), fatal=True) - rm_packages = determine_packages_to_remove() - if rm_packages: - apt_purge(packages=rm_packages, fatal=True) - except subprocess.CalledProcessError as err: - log("Upgrading packages failed " - "with message: {}".format(err)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - if not restart_daemons: - log("Packages upgraded but not restarting daemons yet.") - return - - try: - if systemd(): - if nautilus_or_later: - systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) - else: - systemd_unit = 'ceph-mon' - service_stop(systemd_unit) - log("restarting ceph-mgr.target maybe: {}" - .format(luminous_or_later)) - if luminous_or_later: - service_stop('ceph-mgr.target') - else: - service_stop('ceph-mon-all') - - kick_function() - - owner = ceph_user() - - # Ensure the files and directories under /var/lib/ceph is chowned - # properly as part of the move to the Jewel release, which moved the - # ceph daemons to running as ceph:ceph instead of root:root. - if new_version == 'jewel': - # Ensure the ownership of Ceph's directories is correct - chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), - owner=owner, - group=owner, - follow_links=True) - - kick_function() - - # Ensure that mon directory is user writable - hostname = socket.gethostname() - path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - mkdir(path, owner=ceph_user(), group=ceph_user(), - perms=0o755) - - if systemd(): - if nautilus_or_later: - systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) - else: - systemd_unit = 'ceph-mon' - service_restart(systemd_unit) - log("starting ceph-mgr.target maybe: {}".format(luminous_or_later)) - if luminous_or_later: - # due to BUG: #1849874 we have to force a restart to get it to - # drop the previous version of ceph-manager and start the new - # one. - service_restart('ceph-mgr.target') - else: - service_start('ceph-mon-all') - except subprocess.CalledProcessError as err: - log("Stopping ceph and upgrading packages failed " - "with message: {}".format(err)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - -def lock_and_roll(upgrade_key, service, my_name, version): - """Create a lock on the Ceph monitor cluster and upgrade. - - :param upgrade_key: str. The cephx key to use - :param service: str. The cephx id to use - :param my_name: str. The current hostname - :param version: str. The version we are upgrading to - """ - start_timestamp = time.time() - - log('monitor_key_set {}_{}_{}_start {}'.format( - service, - my_name, - version, - start_timestamp)) - monitor_key_set(upgrade_key, "{}_{}_{}_start".format( - service, my_name, version), start_timestamp) - - # alive indication: - alive_function = ( - lambda: monitor_key_set( - upgrade_key, "{}_{}_{}_alive" - .format(service, my_name, version), time.time())) - dog = WatchDog(kick_interval=3 * 60, - kick_function=alive_function) - - log("Rolling") - - # This should be quick - if service == 'osd': - upgrade_osd(version, kick_function=dog.kick_the_dog) - elif service == 'mon': - upgrade_monitor(version, kick_function=dog.kick_the_dog) - else: - log("Unknown service {}. Unable to upgrade".format(service), - level=ERROR) - log("Done") - - stop_timestamp = time.time() - # Set a key to inform others I am finished - log('monitor_key_set {}_{}_{}_done {}'.format(service, - my_name, - version, - stop_timestamp)) - status_set('maintenance', 'Finishing upgrade') - monitor_key_set(upgrade_key, "{}_{}_{}_done".format(service, - my_name, - version), - stop_timestamp) - - -def wait_on_previous_node(upgrade_key, service, previous_node, version): - """A lock that sleeps the current thread while waiting for the previous - node to finish upgrading. - - :param upgrade_key: - :param service: str. the cephx id to use - :param previous_node: str. The name of the previous node to wait on - :param version: str. The version we are upgrading to - :returns: None - """ - log("Previous node is: {}".format(previous_node)) - - previous_node_started_f = ( - lambda: monitor_key_exists( - upgrade_key, - "{}_{}_{}_start".format(service, previous_node, version))) - previous_node_finished_f = ( - lambda: monitor_key_exists( - upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version))) - previous_node_alive_time_f = ( - lambda: monitor_key_get( - upgrade_key, - "{}_{}_{}_alive".format(service, previous_node, version))) - - # wait for 30 minutes until the previous node starts. We don't proceed - # unless we get a start condition. - try: - WatchDog.wait_until(previous_node_started_f, timeout=30 * 60) - except WatchDog.WatchDogTimeoutException: - log("Waited for previous node to start for 30 minutes. " - "It didn't start, so may have a serious issue. Continuing with " - "upgrade of this node.", - level=WARNING) - return - - # keep the time it started from this nodes' perspective. - previous_node_started_at = time.time() - log("Detected that previous node {} has started. Time now: {}" - .format(previous_node, previous_node_started_at)) - - # Now wait for the node to complete. The node may optionally be kicking - # with the *_alive key, which allows this node to wait longer as it 'knows' - # the other node is proceeding. - try: - WatchDog.timed_wait(kicked_at_function=previous_node_alive_time_f, - complete_function=previous_node_finished_f, - wait_time=30 * 60, - compatibility_wait_time=10 * 60, - max_kick_interval=5 * 60) - except WatchDog.WatchDogDeadException: - # previous node was kicking, but timed out; log this condition and move - # on. - now = time.time() - waited = int((now - previous_node_started_at) / 60) - log("Previous node started, but has now not ticked for 5 minutes. " - "Waited total of {} mins on node {}. current time: {} > " - "previous node start time: {}. " - "Continuing with upgrade of this node." - .format(waited, previous_node, now, previous_node_started_at), - level=WARNING) - except WatchDog.WatchDogTimeoutException: - # previous node never kicked, or simply took too long; log this - # condition and move on. - now = time.time() - waited = int((now - previous_node_started_at) / 60) - log("Previous node is taking too long; assuming it has died." - "Waited {} mins on node {}. current time: {} > " - "previous node start time: {}. " - "Continuing with upgrade of this node." - .format(waited, previous_node, now, previous_node_started_at), - level=WARNING) - - -class WatchDog(object): - """Watch a dog; basically a kickable timer with a timeout between two async - units. - - The idea is that you have an overall timeout and then can kick that timeout - with intermediary hits, with a max time between those kicks allowed. - - Note that this watchdog doesn't rely on the clock of the other side; just - roughly when it detects when the other side started. All timings are based - on the local clock. - - The kicker will not 'kick' more often than a set interval, regardless of - how often the kick_the_dog() function is called. The kicker provides a - function (lambda: -> None) that is called when the kick interval is - reached. - - The waiter calls the static method with a check function - (lambda: -> Boolean) that indicates when the wait should be over and the - maximum interval to wait. e.g. 30 minutes with a 5 minute kick interval. - - So the waiter calls wait(f, 30, 3) and the kicker sets up a 3 minute kick - interval, or however long it is expected for the key to propagate and to - allow for other delays. - - There is a compatibility mode where if the otherside never kicks, then it - simply waits for the compatibility timer. - """ - - class WatchDogDeadException(Exception): - pass - - class WatchDogTimeoutException(Exception): - pass - - def __init__(self, kick_interval=3 * 60, kick_function=None): - """Initialise a new WatchDog - - :param kick_interval: the interval when this side kicks the other in - seconds. - :type kick_interval: Int - :param kick_function: The function to call that does the kick. - :type kick_function: Callable[] - """ - self.start_time = time.time() - self.last_run_func = None - self.last_kick_at = None - self.kick_interval = kick_interval - self.kick_f = kick_function - - def kick_the_dog(self): - """Might call the kick_function if it's time. - - This function can be called as frequently as needed, but will run the - self.kick_function after kick_interval seconds have passed. - """ - now = time.time() - if (self.last_run_func is None or - (now - self.last_run_func > self.kick_interval)): - if self.kick_f is not None: - self.kick_f() - self.last_run_func = now - self.last_kick_at = now - - @staticmethod - def wait_until(wait_f, timeout=10 * 60): - """Wait for timeout seconds until the passed function return True. - - :param wait_f: The function to call that will end the wait. - :type wait_f: Callable[[], Boolean] - :param timeout: The time to wait in seconds. - :type timeout: int - """ - start_time = time.time() - while(not wait_f()): - now = time.time() - if now > start_time + timeout: - raise WatchDog.WatchDogTimeoutException() - wait_time = random.randrange(5, 30) - log('wait_until: waiting for {} seconds'.format(wait_time)) - time.sleep(wait_time) - - @staticmethod - def timed_wait(kicked_at_function, - complete_function, - wait_time=30 * 60, - compatibility_wait_time=10 * 60, - max_kick_interval=5 * 60): - """Wait a maximum time with an intermediate 'kick' time. - - This function will wait for max_kick_interval seconds unless the - kicked_at_function() call returns a time that is not older that - max_kick_interval (in seconds). i.e. the other side can signal that it - is still doing things during the max_kick_interval as long as it kicks - at least every max_kick_interval seconds. - - The maximum wait is "wait_time", but the otherside must keep kicking - during this period. - - The "compatibility_wait_time" is used if the other side never kicks - (i.e. the kicked_at_function() always returns None. In this case the - function wait up to "compatibility_wait_time". - - Note that the type of the return from the kicked_at_function is an - Optional[str], not a Float. The function will coerce this to a float - for the comparison. This represents the return value of - time.time() at the "other side". It's a string to simplify the - function obtaining the time value from the other side. - - The function raises WatchDogTimeoutException if either the - compatibility_wait_time or the wait_time are exceeded. - - The function raises WatchDogDeadException if the max_kick_interval is - exceeded. - - Note that it is possible that the first kick interval is extended to - compatibility_wait_time if the "other side" doesn't kick immediately. - The best solution is for the other side to kick early and often. - - :param kicked_at_function: The function to call to retrieve the time - that the other side 'kicked' at. None if the other side hasn't - kicked. - :type kicked_at_function: Callable[[], Optional[str]] - :param complete_function: The callable that returns True when done. - :type complete_function: Callable[[], Boolean] - :param wait_time: the maximum time to wait, even with kicks, in - seconds. - :type wait_time: int - :param compatibility_wait_time: The time to wait if no kicks are - received, in seconds. - :type compatibility_wait_time: int - :param max_kick_interval: The maximum time allowed between kicks before - the wait is over, in seconds: - :type max_kick_interval: int - :raises: WatchDog.WatchDogTimeoutException, - WatchDog.WatchDogDeadException - """ - start_time = time.time() - while True: - if complete_function(): - break - # the time when the waiting for unit last kicked. - kicked_at = kicked_at_function() - now = time.time() - if kicked_at is None: - # assume other end doesn't do alive kicks - if (now - start_time > compatibility_wait_time): - raise WatchDog.WatchDogTimeoutException() - else: - # other side is participating in kicks; must kick at least - # every 'max_kick_interval' to stay alive. - if (now - float(kicked_at) > max_kick_interval): - raise WatchDog.WatchDogDeadException() - if (now - start_time > wait_time): - raise WatchDog.WatchDogTimeoutException() - delay_time = random.randrange(5, 30) - log('waiting for {} seconds'.format(delay_time)) - time.sleep(delay_time) - - -def get_upgrade_position(osd_sorted_list, match_name): - """Return the upgrade position for the given OSD. - - :param osd_sorted_list: OSDs sorted - :type osd_sorted_list: [str] - :param match_name: The OSD name to match - :type match_name: str - :returns: The position of the name - :rtype: int - :raises: ValueError if name is not found - """ - for index, item in enumerate(osd_sorted_list): - if item.name == match_name: - return index - raise ValueError("OSD name '{}' not found in get_upgrade_position list" - .format(match_name)) - - -# Edge cases: -# 1. Previous node dies on upgrade, can we retry? -# 2. This assumes that the OSD failure domain is not set to OSD. -# It rolls an entire server at a time. -def roll_osd_cluster(new_version, upgrade_key): - """This is tricky to get right so here's what we're going to do. - - There's 2 possible cases: Either I'm first in line or not. - If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous OSD is upgraded yet. - - TODO: If you're not in the same failure domain it's safe to upgrade - 1. Examine all pools and adopt the most strict failure domain policy - Example: Pool 1: Failure domain = rack - Pool 2: Failure domain = host - Pool 3: Failure domain = row - - outcome: Failure domain = host - - :param new_version: str of the version to upgrade to - :param upgrade_key: the cephx key name to use when upgrading - """ - log('roll_osd_cluster called with {}'.format(new_version)) - my_name = socket.gethostname() - osd_tree = get_osd_tree(service=upgrade_key) - # A sorted list of OSD unit names - osd_sorted_list = sorted(osd_tree) - log("osd_sorted_list: {}".format(osd_sorted_list)) - - try: - position = get_upgrade_position(osd_sorted_list, my_name) - log("upgrade position: {}".format(position)) - if position == 0: - # I'm first! Roll - # First set a key to inform others I'm about to roll - lock_and_roll(upgrade_key=upgrade_key, - service='osd', - my_name=my_name, - version=new_version) - else: - # Check if the previous node has finished - status_set('waiting', - 'Waiting on {} to finish upgrading'.format( - osd_sorted_list[position - 1].name)) - wait_on_previous_node( - upgrade_key=upgrade_key, - service='osd', - previous_node=osd_sorted_list[position - 1].name, - version=new_version) - lock_and_roll(upgrade_key=upgrade_key, - service='osd', - my_name=my_name, - version=new_version) - except ValueError: - log("Failed to find name {} in list {}".format( - my_name, osd_sorted_list)) - status_set('blocked', 'failed to upgrade osd') - - -def upgrade_osd(new_version, kick_function=None): - """Upgrades the current OSD - - :param new_version: str. The new version to upgrade to - """ - if kick_function is None: - kick_function = noop - - current_version = get_version() - status_set("maintenance", "Upgrading OSD") - log("Current Ceph version is {}".format(current_version)) - log("Upgrading to: {}".format(new_version)) - - try: - add_source(config('source'), config('key')) - apt_update(fatal=True) - except subprocess.CalledProcessError as err: - log("Adding the Ceph sources failed with message: {}".format( - err)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - kick_function() - - try: - # Upgrade the packages before restarting the daemons. - status_set('maintenance', 'Upgrading packages to %s' % new_version) - apt_install(packages=determine_packages(), fatal=True) - kick_function() - - # If the upgrade does not need an ownership update of any of the - # directories in the OSD service directory, then simply restart - # all of the OSDs at the same time as this will be the fastest - # way to update the code on the node. - if not dirs_need_ownership_update('osd'): - log('Restarting all OSDs to load new binaries', DEBUG) - with maintain_all_osd_states(): - if systemd(): - service_restart('ceph-osd.target') - else: - service_restart('ceph-osd-all') - return - - # Need to change the ownership of all directories which are not OSD - # directories as well. - # TODO - this should probably be moved to the general upgrade function - # and done before mon/OSD. - update_owner(CEPH_BASE_DIR, recurse_dirs=False) - non_osd_dirs = filter(lambda x: not x == 'osd', - os.listdir(CEPH_BASE_DIR)) - non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x), - non_osd_dirs) - for i, path in enumerate(non_osd_dirs): - if i % 100 == 0: - kick_function() - update_owner(path) - - # Fast service restart wasn't an option because each of the OSD - # directories need the ownership updated for all the files on - # the OSD. Walk through the OSDs one-by-one upgrading the OSD. - for osd_dir in _get_child_dirs(OSD_BASE_DIR): - kick_function() - try: - osd_num = _get_osd_num_from_dirname(osd_dir) - _upgrade_single_osd(osd_num, osd_dir) - except ValueError as ex: - # Directory could not be parsed - junk directory? - log('Could not parse OSD directory %s: %s' % (osd_dir, ex), - WARNING) - continue - - except (subprocess.CalledProcessError, IOError) as err: - log("Stopping Ceph and upgrading packages failed " - "with message: {}".format(err)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - -def _upgrade_single_osd(osd_num, osd_dir): - """Upgrades the single OSD directory. - - :param osd_num: the num of the OSD - :param osd_dir: the directory of the OSD to upgrade - :raises CalledProcessError: if an error occurs in a command issued as part - of the upgrade process - :raises IOError: if an error occurs reading/writing to a file as part - of the upgrade process - """ - with maintain_osd_state(osd_num): - stop_osd(osd_num) - disable_osd(osd_num) - update_owner(osd_dir) - enable_osd(osd_num) - start_osd(osd_num) - - -def stop_osd(osd_num): - """Stops the specified OSD number. - - :param osd_num: the OSD number to stop - """ - if systemd(): - service_stop('ceph-osd@{}'.format(osd_num)) - else: - service_stop('ceph-osd', id=osd_num) - - -def start_osd(osd_num): - """Starts the specified OSD number. - - :param osd_num: the OSD number to start. - """ - if systemd(): - service_start('ceph-osd@{}'.format(osd_num)) - else: - service_start('ceph-osd', id=osd_num) - - -def disable_osd(osd_num): - """Disables the specified OSD number. - - Ensures that the specified OSD will not be automatically started at the - next reboot of the system. Due to differences between init systems, - this method cannot make any guarantees that the specified OSD cannot be - started manually. - - :param osd_num: the OSD id which should be disabled. - :raises CalledProcessError: if an error occurs invoking the systemd cmd - to disable the OSD - :raises IOError, OSError: if the attempt to read/remove the ready file in - an upstart enabled system fails - """ - if systemd(): - # When running under systemd, the individual ceph-osd daemons run as - # templated units and can be directly addressed by referring to the - # templated service name ceph-osd@. Additionally, systemd - # allows one to disable a specific templated unit by running the - # 'systemctl disable ceph-osd@' command. When disabled, the - # OSD should remain disabled until re-enabled via systemd. - # Note: disabling an already disabled service in systemd returns 0, so - # no need to check whether it is enabled or not. - cmd = ['systemctl', 'disable', 'ceph-osd@{}'.format(osd_num)] - subprocess.check_call(cmd) - else: - # Neither upstart nor the ceph-osd upstart script provides for - # disabling the starting of an OSD automatically. The specific OSD - # cannot be prevented from running manually, however it can be - # prevented from running automatically on reboot by removing the - # 'ready' file in the OSD's root directory. This is due to the - # ceph-osd-all upstart script checking for the presence of this file - # before starting the OSD. - ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), - 'ready') - if os.path.exists(ready_file): - os.unlink(ready_file) - - -def enable_osd(osd_num): - """Enables the specified OSD number. - - Ensures that the specified osd_num will be enabled and ready to start - automatically in the event of a reboot. - - :param osd_num: the osd id which should be enabled. - :raises CalledProcessError: if the call to the systemd command issued - fails when enabling the service - :raises IOError: if the attempt to write the ready file in an upstart - enabled system fails - """ - if systemd(): - cmd = ['systemctl', 'enable', 'ceph-osd@{}'.format(osd_num)] - subprocess.check_call(cmd) - else: - # When running on upstart, the OSDs are started via the ceph-osd-all - # upstart script which will only start the OSD if it has a 'ready' - # file. Make sure that file exists. - ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), - 'ready') - with open(ready_file, 'w') as f: - f.write('ready') - - # Make sure the correct user owns the file. It shouldn't be necessary - # as the upstart script should run with root privileges, but its better - # to have all the files matching ownership. - update_owner(ready_file) - - -def update_owner(path, recurse_dirs=True): - """Changes the ownership of the specified path. - - Changes the ownership of the specified path to the new ceph daemon user - using the system's native chown functionality. This may take awhile, - so this method will issue a set_status for any changes of ownership which - recurses into directory structures. - - :param path: the path to recursively change ownership for - :param recurse_dirs: boolean indicating whether to recursively change the - ownership of all the files in a path's subtree or to - simply change the ownership of the path. - :raises CalledProcessError: if an error occurs issuing the chown system - command - """ - user = ceph_user() - user_group = '{ceph_user}:{ceph_user}'.format(ceph_user=user) - cmd = ['chown', user_group, path] - if os.path.isdir(path) and recurse_dirs: - status_set('maintenance', ('Updating ownership of %s to %s' % - (path, user))) - cmd.insert(1, '-R') - - log('Changing ownership of {path} to {user}'.format( - path=path, user=user_group), DEBUG) - start = datetime.now() - subprocess.check_call(cmd) - elapsed_time = (datetime.now() - start) - - log('Took {secs} seconds to change the ownership of path: {path}'.format( - secs=elapsed_time.total_seconds(), path=path), DEBUG) - - -def get_osd_state(osd_num, osd_goal_state=None): - """Get OSD state or loop until OSD state matches OSD goal state. - - If osd_goal_state is None, just return the current OSD state. - If osd_goal_state is not None, loop until the current OSD state matches - the OSD goal state. - - :param osd_num: the OSD id to get state for - :param osd_goal_state: (Optional) string indicating state to wait for - Defaults to None - :returns: Returns a str, the OSD state. - :rtype: str - """ - while True: - asok = "/var/run/ceph/ceph-osd.{}.asok".format(osd_num) - cmd = [ - 'ceph', - 'daemon', - asok, - 'status' - ] - try: - result = json.loads(str(subprocess - .check_output(cmd) - .decode('UTF-8'))) - except (subprocess.CalledProcessError, ValueError) as e: - log("{}".format(e), level=DEBUG) - continue - osd_state = result['state'] - log("OSD {} state: {}, goal state: {}".format( - osd_num, osd_state, osd_goal_state), level=DEBUG) - if not osd_goal_state: - return osd_state - if osd_state == osd_goal_state: - return osd_state - time.sleep(3) - - -def get_all_osd_states(osd_goal_states=None): - """Get all OSD states or loop until all OSD states match OSD goal states. - - If osd_goal_states is None, just return a dictionary of current OSD states. - If osd_goal_states is not None, loop until the current OSD states match - the OSD goal states. - - :param osd_goal_states: (Optional) dict indicating states to wait for - Defaults to None - :returns: Returns a dictionary of current OSD states. - :rtype: dict - """ - osd_states = {} - for osd_num in get_local_osd_ids(): - if not osd_goal_states: - osd_states[osd_num] = get_osd_state(osd_num) - else: - osd_states[osd_num] = get_osd_state( - osd_num, - osd_goal_state=osd_goal_states[osd_num]) - return osd_states - - -@contextmanager -def maintain_osd_state(osd_num): - """Ensure the state of an OSD is maintained. - - Ensures the state of an OSD is the same at the end of a block nested - in a with statement as it was at the beginning of the block. - - :param osd_num: the OSD id to maintain state for - """ - osd_state = get_osd_state(osd_num) - try: - yield - finally: - get_osd_state(osd_num, osd_goal_state=osd_state) - - -@contextmanager -def maintain_all_osd_states(): - """Ensure all local OSD states are maintained. - - Ensures the states of all local OSDs are the same at the end of a - block nested in a with statement as they were at the beginning of - the block. - """ - osd_states = get_all_osd_states() - try: - yield - finally: - get_all_osd_states(osd_goal_states=osd_states) - - -def list_pools(client='admin'): - """This will list the current pools that Ceph has - - :param client: (Optional) client id for Ceph key to use - Defaults to ``admin`` - :type client: str - :returns: Returns a list of available pools. - :rtype: list - :raises: subprocess.CalledProcessError if the subprocess fails to run. - """ - try: - pool_list = [] - pools = subprocess.check_output(['rados', '--id', client, 'lspools'], - universal_newlines=True, - stderr=subprocess.STDOUT) - for pool in pools.splitlines(): - pool_list.append(pool) - return pool_list - except subprocess.CalledProcessError as err: - log("rados lspools failed with error: {}".format(err.output)) - raise - - -def get_pool_param(pool, param, client='admin'): - """Get parameter from pool. - - :param pool: Name of pool to get variable from - :type pool: str - :param param: Name of variable to get - :type param: str - :param client: (Optional) client id for Ceph key to use - Defaults to ``admin`` - :type client: str - :returns: Value of variable on pool or None - :rtype: str or None - :raises: subprocess.CalledProcessError - """ - try: - output = subprocess.check_output( - ['ceph', '--id', client, 'osd', 'pool', 'get', pool, param], - universal_newlines=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as cp: - if cp.returncode == 2 and 'ENOENT: option' in cp.output: - return None - raise - if ':' in output: - return output.split(':')[1].lstrip().rstrip() - - -def get_pool_erasure_profile(pool, client='admin'): - """Get erasure code profile for pool. - - :param pool: Name of pool to get variable from - :type pool: str - :param client: (Optional) client id for Ceph key to use - Defaults to ``admin`` - :type client: str - :returns: Erasure code profile of pool or None - :rtype: str or None - :raises: subprocess.CalledProcessError - """ - try: - return get_pool_param(pool, 'erasure_code_profile', client=client) - except subprocess.CalledProcessError as cp: - if cp.returncode == 13 and 'EACCES: pool' in cp.output: - # Not a Erasure coded pool - return None - raise - - -def get_pool_quota(pool, client='admin'): - """Get pool quota. - - :param pool: Name of pool to get variable from - :type pool: str - :param client: (Optional) client id for Ceph key to use - Defaults to ``admin`` - :type client: str - :returns: Dictionary with quota variables - :rtype: dict - :raises: subprocess.CalledProcessError - """ - output = subprocess.check_output( - ['ceph', '--id', client, 'osd', 'pool', 'get-quota', pool], - universal_newlines=True, stderr=subprocess.STDOUT) - rc = re.compile(r'\s+max\s+(\S+)\s*:\s+(\d+)') - result = {} - for line in output.splitlines(): - m = rc.match(line) - if m: - result.update({'max_{}'.format(m.group(1)): m.group(2)}) - return result - - -def get_pool_applications(pool='', client='admin'): - """Get pool applications. - - :param pool: (Optional) Name of pool to get applications for - Defaults to get for all pools - :type pool: str - :param client: (Optional) client id for Ceph key to use - Defaults to ``admin`` - :type client: str - :returns: Dictionary with pool name as key - :rtype: dict - :raises: subprocess.CalledProcessError - """ - - cmd = ['ceph', '--id', client, 'osd', 'pool', 'application', 'get'] - if pool: - cmd.append(pool) - try: - output = subprocess.check_output(cmd, - universal_newlines=True, - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as cp: - if cp.returncode == 2 and 'ENOENT' in cp.output: - return {} - raise - return json.loads(output) - - -def list_pools_detail(): - """Get detailed information about pools. - - Structure: - {'pool_name_1': {'applications': {'application': {}}, - 'parameters': {'pg_num': '42', 'size': '42'}, - 'quota': {'max_bytes': '1000', - 'max_objects': '10'}, - }, - 'pool_name_2': ... - } - - :returns: Dictionary with detailed pool information. - :rtype: dict - :raises: subproces.CalledProcessError - """ - get_params = ['pg_num', 'size'] - result = {} - applications = get_pool_applications() - for pool in list_pools(): - result[pool] = { - 'applications': applications.get(pool, {}), - 'parameters': {}, - 'quota': get_pool_quota(pool), - } - for param in get_params: - result[pool]['parameters'].update({ - param: get_pool_param(pool, param)}) - erasure_profile = get_pool_erasure_profile(pool) - if erasure_profile: - result[pool]['parameters'].update({ - 'erasure_code_profile': erasure_profile}) - return result - - -def dirs_need_ownership_update(service): - """Determines if directories still need change of ownership. - - Examines the set of directories under the /var/lib/ceph/{service} directory - and determines if they have the correct ownership or not. This is - necessary due to the upgrade from Hammer to Jewel where the daemon user - changes from root: to ceph:. - - :param service: the name of the service folder to check (e.g. OSD, mon) - :returns: boolean. True if the directories need a change of ownership, - False otherwise. - :raises IOError: if an error occurs reading the file stats from one of - the child directories. - :raises OSError: if the specified path does not exist or some other error - """ - expected_owner = expected_group = ceph_user() - path = os.path.join(CEPH_BASE_DIR, service) - for child in _get_child_dirs(path): - curr_owner, curr_group = owner(child) - - if (curr_owner == expected_owner) and (curr_group == expected_group): - continue - - # NOTE(lathiat): when config_changed runs on reboot, the OSD might not - # yet be mounted or started, and the underlying directory the OSD is - # mounted to is expected to be owned by root. So skip the check. This - # may also happen for OSD directories for OSDs that were removed. - if (service == 'osd' and - not os.path.exists(os.path.join(child, 'magic'))): - continue - - log('Directory "%s" needs its ownership updated' % child, DEBUG) - return True - - # All child directories had the expected ownership - return False - - -# A dict of valid Ceph upgrade paths. Mapping is old -> new -UPGRADE_PATHS = collections.OrderedDict([ - ('firefly', 'hammer'), - ('hammer', 'jewel'), - ('jewel', 'luminous'), - ('luminous', 'mimic'), - ('mimic', 'nautilus'), - ('nautilus', 'octopus'), - ('octopus', 'pacific'), - ('pacific', 'quincy'), -]) - -# Map UCA codenames to Ceph codenames -UCA_CODENAME_MAP = { - 'icehouse': 'firefly', - 'juno': 'firefly', - 'kilo': 'hammer', - 'liberty': 'hammer', - 'mitaka': 'jewel', - 'newton': 'jewel', - 'ocata': 'jewel', - 'pike': 'luminous', - 'queens': 'luminous', - 'rocky': 'mimic', - 'stein': 'mimic', - 'train': 'nautilus', - 'ussuri': 'octopus', - 'victoria': 'octopus', - 'wallaby': 'pacific', - 'xena': 'pacific', - 'yoga': 'quincy', -} - - -def pretty_print_upgrade_paths(): - """Pretty print supported upgrade paths for Ceph""" - return ["{} -> {}".format(key, value) - for key, value in UPGRADE_PATHS.items()] - - -def resolve_ceph_version(source): - """Resolves a version of Ceph based on source configuration - based on Ubuntu Cloud Archive pockets. - - @param: source: source configuration option of charm - :returns: Ceph release codename or None if not resolvable - """ - os_release = get_os_codename_install_source(source) - return UCA_CODENAME_MAP.get(os_release) - - -def get_ceph_pg_stat(): - """Returns the result of 'ceph pg stat'. - - :returns: dict - """ - try: - tree = str(subprocess - .check_output(['ceph', 'pg', 'stat', '--format=json']) - .decode('UTF-8')) - try: - json_tree = json.loads(tree) - if not json_tree['num_pg_by_state']: - return None - return json_tree - except ValueError as v: - log("Unable to parse ceph pg stat json: {}. Error: {}".format( - tree, v)) - raise - except subprocess.CalledProcessError as e: - log("ceph pg stat command failed with message: {}".format(e)) - raise - - -def get_ceph_health(): - """Returns the health of the cluster from a 'ceph status' - - :returns: dict tree of ceph status - :raises: CalledProcessError if our ceph command fails to get the overall - status, use get_ceph_health()['overall_status']. - """ - try: - tree = str(subprocess - .check_output(['ceph', 'status', '--format=json']) - .decode('UTF-8')) - try: - json_tree = json.loads(tree) - # Make sure children are present in the JSON - if not json_tree['overall_status']: - return None - - return json_tree - except ValueError as v: - log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v)) - raise - except subprocess.CalledProcessError as e: - log("ceph status command failed with message: {}".format(e)) - raise - - -def reweight_osd(osd_num, new_weight): - """Changes the crush weight of an OSD to the value specified. - - :param osd_num: the OSD id which should be changed - :param new_weight: the new weight for the OSD - :returns: bool. True if output looks right, else false. - :raises CalledProcessError: if an error occurs invoking the systemd cmd - """ - try: - cmd_result = str(subprocess - .check_output(['ceph', 'osd', 'crush', - 'reweight', "osd.{}".format(osd_num), - new_weight], - stderr=subprocess.STDOUT) - .decode('UTF-8')) - expected_result = "reweighted item id {ID} name \'osd.{ID}\'".format( - ID=osd_num) + " to {}".format(new_weight) - log(cmd_result) - if expected_result in cmd_result: - return True - return False - except subprocess.CalledProcessError as e: - log("ceph osd crush reweight command failed" - " with message: {}".format(e)) - raise - - -def determine_packages(): - """Determines packages for installation. - - :returns: list of Ceph packages - """ - packages = PACKAGES.copy() - if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': - btrfs_package = 'btrfs-progs' - else: - btrfs_package = 'btrfs-tools' - packages.append(btrfs_package) - return packages - - -def determine_packages_to_remove(): - """Determines packages for removal - - Note: if in a container, then the CHRONY_PACKAGE is removed. - - :returns: list of packages to be removed - :rtype: List[str] - """ - rm_packages = REMOVE_PACKAGES.copy() - if is_container(): - rm_packages.extend(filter_missing_packages([CHRONY_PACKAGE])) - return rm_packages - - -def bootstrap_manager(): - hostname = socket.gethostname() - path = '/var/lib/ceph/mgr/ceph-{}'.format(hostname) - keyring = os.path.join(path, 'keyring') - - if os.path.exists(keyring): - log('bootstrap_manager: mgr already initialized.') - else: - mkdir(path, owner=ceph_user(), group=ceph_user()) - subprocess.check_call(['ceph', 'auth', 'get-or-create', - 'mgr.{}'.format(hostname), 'mon', - 'allow profile mgr', 'osd', 'allow *', - 'mds', 'allow *', '--out-file', - keyring]) - chownr(path, ceph_user(), ceph_user()) - - unit = 'ceph-mgr@{}'.format(hostname) - subprocess.check_call(['systemctl', 'enable', unit]) - service_restart(unit) - - -def enable_msgr2(): - """ - Enables msgr2 - - :raises: subprocess.CalledProcessError if the command fails - """ - cmd = ['ceph', 'mon', 'enable-msgr2'] - subprocess.check_call(cmd) - - -def osd_noout(enable): - """Sets or unsets 'noout' - - :param enable: bool. True to set noout, False to unset. - :returns: bool. True if output looks right. - :raises CalledProcessError: if an error occurs invoking the systemd cmd - """ - operation = { - True: 'set', - False: 'unset', - } - try: - subprocess.check_call(['ceph', '--id', 'admin', - 'osd', operation[enable], - 'noout']) - log('running ceph osd {} noout'.format(operation[enable])) - return True - except subprocess.CalledProcessError as e: - log(e) - raise - - -class OSDConfigSetError(Exception): - """Error occurred applying OSD settings.""" - pass - - -def apply_osd_settings(settings): - """Applies the provided OSD settings - - Apply the provided settings to all local OSD unless settings are already - present. Settings stop being applied on encountering an error. - - :param settings: dict. Dictionary of settings to apply. - :returns: bool. True if commands ran successfully. - :raises: OSDConfigSetError - """ - current_settings = {} - base_cmd = 'ceph daemon osd.{osd_id} config --format=json' - get_cmd = base_cmd + ' get {key}' - set_cmd = base_cmd + ' set {key} {value}' - - def _get_cli_key(key): - return(key.replace(' ', '_')) - # Retrieve the current values to check keys are correct and to make this a - # noop if setting are already applied. - for osd_id in get_local_osd_ids(): - for key, value in sorted(settings.items()): - cli_key = _get_cli_key(key) - cmd = get_cmd.format(osd_id=osd_id, key=cli_key) - out = json.loads( - subprocess.check_output(cmd.split()).decode('UTF-8')) - if 'error' in out: - log("Error retrieving OSD setting: {}".format(out['error']), - level=ERROR) - return False - current_settings[key] = out[cli_key] - settings_diff = { - k: v - for k, v in settings.items() - if str(v) != str(current_settings[k])} - for key, value in sorted(settings_diff.items()): - log("Setting {} to {}".format(key, value), level=DEBUG) - cmd = set_cmd.format( - osd_id=osd_id, - key=_get_cli_key(key), - value=value) - out = json.loads( - subprocess.check_output(cmd.split()).decode('UTF-8')) - if 'error' in out: - log("Error applying OSD setting: {}".format(out['error']), - level=ERROR) - raise OSDConfigSetError - return True - - -def enabled_manager_modules(): - """Return a list of enabled manager modules. - - :rtype: List[str] - """ - cmd = ['ceph', 'mgr', 'module', 'ls'] - quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0 - if quincy_or_later: - cmd.append('--format=json') - try: - modules = subprocess.check_output(cmd).decode('UTF-8') - except subprocess.CalledProcessError as e: - log("Failed to list ceph modules: {}".format(e), WARNING) - return [] - modules = json.loads(modules) - return modules['enabled_modules'] - - -def is_mgr_module_enabled(module): - """Is a given manager module enabled. - - :param module: - :type module: str - :returns: Whether the named module is enabled - :rtype: bool - """ - return module in enabled_manager_modules() - - -is_dashboard_enabled = functools.partial(is_mgr_module_enabled, 'dashboard') - - -def mgr_enable_module(module): - """Enable a Ceph Manager Module. - - :param module: The module name to enable - :type module: str - - :raises: subprocess.CalledProcessError - """ - if not is_mgr_module_enabled(module): - subprocess.check_call(['ceph', 'mgr', 'module', 'enable', module]) - return True - return False - - -mgr_enable_dashboard = functools.partial(mgr_enable_module, 'dashboard') - - -def mgr_disable_module(module): - """Enable a Ceph Manager Module. - - :param module: The module name to enable - :type module: str - - :raises: subprocess.CalledProcessError - """ - if is_mgr_module_enabled(module): - subprocess.check_call(['ceph', 'mgr', 'module', 'disable', module]) - return True - return False - - -mgr_disable_dashboard = functools.partial(mgr_disable_module, 'dashboard') - - -def ceph_config_set(name, value, who): - """Set a Ceph config option - - :param name: key to set - :type name: str - :param value: value corresponding to key - :type value: str - :param who: Config area the key is associated with (e.g. 'dashboard') - :type who: str - - :raises: subprocess.CalledProcessError - """ - subprocess.check_call(['ceph', 'config', 'set', who, name, value]) - - -mgr_config_set = functools.partial(ceph_config_set, who='mgr') - - -def ceph_config_get(name, who): - """Retrieve the value of a Ceph config option - - :param name: key to lookup - :type name: str - :param who: Config area the key is associated with (e.g. 'dashboard') - :type who: str - :returns: Value associated with key - :rtype: str - :raises: subprocess.CalledProcessError - """ - return subprocess.check_output( - ['ceph', 'config', 'get', who, name]).decode('UTF-8') - - -mgr_config_get = functools.partial(ceph_config_get, who='mgr') - - -def _dashboard_set_ssl_artifact(path, artifact_name, hostname=None): - """Set SSL dashboard config option. - - :param path: Path to file - :type path: str - :param artifact_name: Option name for setting the artifact - :type artifact_name: str - :param hostname: If hostname is set artifact will only be associated with - the dashboard on that host. - :type hostname: str - :raises: subprocess.CalledProcessError - """ - cmd = ['ceph', 'dashboard', artifact_name] - if hostname: - cmd.append(hostname) - cmd.extend(['-i', path]) - log(cmd, level=DEBUG) - subprocess.check_call(cmd) - - -dashboard_set_ssl_certificate = functools.partial( - _dashboard_set_ssl_artifact, - artifact_name='set-ssl-certificate') - - -dashboard_set_ssl_certificate_key = functools.partial( - _dashboard_set_ssl_artifact, - artifact_name='set-ssl-certificate-key') diff --git a/ceph-mon/osci.yaml b/ceph-mon/osci.yaml index f2ffe001..6970c873 100644 --- a/ceph-mon/osci.yaml +++ b/ceph-mon/osci.yaml @@ -8,3 +8,15 @@ needs_charm_build: true charm_build_name: ceph-mon build_type: charmcraft + check: + jobs: + - new-install-focal-yoga +- job: + name: new-install-focal-yoga + parent: func-target + dependencies: + - osci-lint + - charm-build + - tox-py38 + vars: + tox_extra_args: install:local-focal-yoga diff --git a/ceph-mon/requirements.txt b/ceph-mon/requirements.txt index ead6e89a..d9dd8416 100644 --- a/ceph-mon/requirements.txt +++ b/ceph-mon/requirements.txt @@ -1,24 +1,14 @@ -# This file is managed centrally by release-tools and should not be modified -# within individual charm repos. See the 'global' dir contents for available -# choices of *requirements.txt files for OpenStack Charms: -# https://github.com/openstack-charmers/release-tools -# -# TODO: Distill the func test requirements from the lint/unit test -# requirements. They are intertwined. Also, Zaza itself should specify -# all of its own requirements and if it doesn't, fix it there. -# -pbr==5.6.0 -simplejson>=2.2.0 -netifaces>=0.10.4 - -# Strange import error with newer netaddr: -netaddr>0.7.16,<0.8.0 - -Jinja2>=2.6 # BSD License (3 clause) -six>=1.9.0 - -# dnspython 2.0.0 dropped py3.5 support -dnspython<2.0.0; python_version < '3.6' -dnspython; python_version >= '3.6' - -psutil>=1.1.1,<2.0.0 +importlib-resources +ops >= 1.2.0 +tenacity +pyudev +dnspython +netaddr +netifaces +pyyaml +git+https://github.com/openstack/charms.ceph#egg=charms_ceph +git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack +git+https://opendev.org/openstack/charm-ops-interface-tls-certificates#egg=interface_tls_certificates +git+https://github.com/openstack-charmers/ops-interface-ceph-iscsi-admin-access#egg=interface_ceph_iscsi_admin_access +git+https://github.com/openstack-charmers/ops-interface-openstack-loadbalancer#egg=interface_openstack_loadbalancer +git+https://github.com/juju/charm-helpers#egg=charm-helpers diff --git a/ceph-mon/hooks/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py similarity index 100% rename from ceph-mon/hooks/ceph_hooks.py rename to ceph-mon/src/ceph_hooks.py diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py new file mode 100755 index 00000000..9003aa52 --- /dev/null +++ b/ceph-mon/src/charm.py @@ -0,0 +1,134 @@ +#! /usr/bin/python3 + +from ops.main import main + +import ops_openstack.core + +import ceph_hooks as hooks + + +class CephMonCharm(ops_openstack.core.OSBaseCharm): + + # General charm control callbacks. + def on_install(self, event): + hooks.install() + + def on_config(self, event): + hooks.config_changed() + + def on_pre_series_upgrade(self, event): + hooks.pre_series_upgrade() + + def on_upgrade(self, event): + hooks.upgrade_charm() + + def on_post_series_upgrade(self, event): + hooks.post_series_upgrade() + + # Relations. + def on_mon_relation_joined(self, event): + hooks.mon_relation_joined() + + def on_bootstrap_source_relation_changed(self, event): + hooks.bootstrap_source_relation_changed() + + def on_prometheus_relation_joined_or_changed(self, event): + hooks.prometheus_relation() + + def on_prometheus_relation_departed(self, event): + hooks.prometheus_left() + + def on_mon_relation(self, event): + hooks.mon_relation() + + def on_osd_relation(self, event): + hooks.osd_relation() + + def on_dashboard_relation_joined(self, event): + hooks.dashboard_relation() + + def on_radosgw_relation(self, event): + hooks.radosgw_relation() + + def on_rbd_mirror_relation(self, event): + hooks.rbd_mirror_relation() + + def on_mds_relation(self, event): + hooks.mds_relation_joined() + + def on_admin_relation(self, event): + hooks.admin_relation_joined() + + def on_client_relation(self, event): + hooks.client_relation() + + def on_nrpe_relation(self, event): + hooks.upgrade_nrpe_config() + + def __init__(self, *args): + super().__init__(*args) + self._stored.is_started = True + fw = self.framework + + fw.observe(self.on.install, self.on_install) + fw.observe(self.on.config_changed, self.on_config) + fw.observe(self.on.pre_series_upgrade, self.on_pre_series_upgrade) + fw.observe(self.on.upgrade_charm, self.on_upgrade) + fw.observe(self.on.post_series_upgrade, self.on_post_series_upgrade) + + fw.observe(self.on.mon_relation_joined, self.on_mon_relation_joined) + fw.observe(self.on.bootstrap_source_relation_changed, + self.on_bootstrap_source_relation_changed) + fw.observe(self.on.prometheus_relation_joined, + self.on_prometheus_relation_joined_or_changed) + fw.observe(self.on.prometheus_relation_changed, + self.on_prometheus_relation_joined_or_changed) + fw.observe(self.on.prometheus_relation_departed, + self.on_prometheus_relation_departed) + + for key in ('mon_relation_departed', 'mon_relation_changed', + 'leader_settings_changed', + 'bootstrap_source_relation_departed'): + fw.observe(getattr(self.on, key), self.on_mon_relation) + + fw.observe(self.on.osd_relation_joined, + self.on_osd_relation) + fw.observe(self.on.osd_relation_changed, + self.on_osd_relation) + + fw.observe(self.on.dashboard_relation_joined, + self.on_dashboard_relation_joined) + + fw.observe(self.on.radosgw_relation_changed, + self.on_radosgw_relation) + fw.observe(self.on.radosgw_relation_joined, + self.on_radosgw_relation) + + fw.observe(self.on.rbd_mirror_relation_changed, + self.on_rbd_mirror_relation) + fw.observe(self.on.rbd_mirror_relation_joined, + self.on_rbd_mirror_relation) + + fw.observe(self.on.mds_relation_changed, + self.on_mds_relation) + fw.observe(self.on.mds_relation_joined, + self.on_mds_relation) + + fw.observe(self.on.admin_relation_changed, + self.on_admin_relation) + fw.observe(self.on.admin_relation_joined, + self.on_admin_relation) + + fw.observe(self.on.client_relation_changed, + self.on_client_relation) + fw.observe(self.on.client_relation_joined, + self.on_client_relation) + + fw.observe(self.on.nrpe_external_master_relation_joined, + self.on_nrpe_relation) + fw.observe(self.on.nrpe_external_master_relation_changed, + self.on_nrpe_relation) + + +if __name__ == '__main__': + main(CephMonCharm) diff --git a/ceph-mon/hooks/utils.py b/ceph-mon/src/utils.py similarity index 98% rename from ceph-mon/hooks/utils.py rename to ceph-mon/src/utils.py index 2781eefd..759ae9d7 100644 --- a/ceph-mon/hooks/utils.py +++ b/ceph-mon/src/utils.py @@ -32,10 +32,7 @@ status_set, unit_get, ) -from charmhelpers.fetch import ( - apt_install, - filter_installed_packages -) + from charmhelpers.core.host import ( lsb_release, CompareHostReleases, @@ -47,12 +44,7 @@ ) from charmhelpers.contrib.storage.linux import ceph -try: - import dns.resolver -except ImportError: - apt_install(filter_installed_packages(['python-dnspython']), - fatal=True) - import dns.resolver +import dns.resolver class OsdPostUpgradeError(Exception): diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index d515cae9..eee61b69 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -45,7 +45,7 @@ git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.open # Needed for charm-glance: git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.8' -tempest<31.0.0;python_version<'3.8' +tempest<30.0.0;python_version<'3.8' and python_version >= '3.6' tempest<24.0.0;python_version<'3.6' croniter # needed for charm-rabbitmq-server unit tests diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 06b51cc4..7b0f7309 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -9,9 +9,17 @@ smoke_bundles: - focal-yoga configure: - - zaza.openstack.charm_tests.glance.setup.add_lts_image + - install: + - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: + - install: + - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes + - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest + - zaza.openstack.charm_tests.ceph.tests.CephRelationTest + - zaza.openstack.charm_tests.ceph.tests.CephTest + - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest + - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - zaza.openstack.charm_tests.ceph.tests.CephRelationTest diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index f4e8a47c..5fd9c98d 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -85,7 +85,7 @@ deps = -r{toxinidir}/requirements.txt basepython = python3 deps = flake8==3.9.2 charm-tools==2.8.3 -commands = flake8 {posargs} hooks unit_tests tests actions lib files +commands = flake8 {posargs} unit_tests tests actions files src charm-proof [testenv:cover] diff --git a/ceph-mon/unit_tests/__init__.py b/ceph-mon/unit_tests/__init__.py index 70342765..f439d3f1 100644 --- a/ceph-mon/unit_tests/__init__.py +++ b/ceph-mon/unit_tests/__init__.py @@ -17,3 +17,4 @@ sys.path.append('lib') sys.path.append('unit_tests') sys.path.append('actions') +sys.path.append('src') diff --git a/ceph-mon/unit_tests/test_ceph_utils.py b/ceph-mon/unit_tests/test_ceph_utils.py index 9a82ff1d..5da722fd 100644 --- a/ceph-mon/unit_tests/test_ceph_utils.py +++ b/ceph-mon/unit_tests/test_ceph_utils.py @@ -16,7 +16,7 @@ import test_utils -from hooks import utils +import utils class CephUtilsTestCase(test_utils.CharmTestCase): From dbe3b94f25043ad52a0a4b5ea5bab2b0c31c9a99 Mon Sep 17 00:00:00 2001 From: utkarshbhatthere Date: Sat, 20 Aug 2022 00:30:42 +0530 Subject: [PATCH 2411/2699] Adds existence verification for config values Multisite config values (realm, zonegroup, zone) are written to ceph.conf as the defaults without verifying their existence, this causes failure for commands which use the default values. Closes-Bug: #1987127 Change-Id: I0ab4df34f0000339227e5d5b80352355ea7bd36e --- ceph-radosgw/hooks/ceph_radosgw_context.py | 15 +++++++-- ceph-radosgw/hooks/hooks.py | 2 ++ ceph-radosgw/hooks/multisite.py | 33 +++++++++++++++++-- .../unit_tests/test_ceph_radosgw_context.py | 17 ++++++++++ 4 files changed, 62 insertions(+), 5 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index be991ca1..c951fb61 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -18,6 +18,7 @@ import tempfile import shutil +import multisite from charmhelpers.contrib.openstack import context from charmhelpers.contrib.hahelpers.cluster import ( determine_api_port, @@ -307,9 +308,17 @@ def __call__(self): if self.context_complete(ctxt): # Multi-site zone configuration is optional, # so add after assessment - ctxt['rgw_zone'] = config('zone') - ctxt['rgw_zonegroup'] = config('zonegroup') - ctxt['rgw_realm'] = config('realm') + zone = config('zone') + zonegroup = config('zonegroup') + realm = config('realm') + log("config: zone {} zonegroup {} realm {}" + .format(zone, zonegroup, realm), level=DEBUG) + if zone in multisite.plain_list('zone'): + ctxt['rgw_zone'] = zone + if zonegroup in multisite.plain_list('zonegroup'): + ctxt['rgw_zonegroup'] = zonegroup + if realm in multisite.plain_list('realm'): + ctxt['rgw_realm'] = realm return ctxt return {} diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 2f563098..eaea048e 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -810,6 +810,7 @@ def master_relation_joined(relation_id=None): 'Mutation detected. Restarting {}.'.format(service_name()), 'INFO') multisite.update_period(zonegroup=zonegroup, zone=zone) + CONFIGS.write_all() service_restart(service_name()) leader_set(restart_nonce=str(uuid.uuid4())) else: @@ -898,6 +899,7 @@ def slave_relation_changed(relation_id=None, unit=None): 'Mutation detected. Restarting {}.'.format(service_name()), 'INFO') multisite.update_period(zonegroup=zonegroup, zone=zone) + CONFIGS.write_all() service_restart(service_name()) leader_set(restart_nonce=str(uuid.uuid4())) else: diff --git a/ceph-radosgw/hooks/multisite.py b/ceph-radosgw/hooks/multisite.py index 5815cba7..590fdeb0 100644 --- a/ceph-radosgw/hooks/multisite.py +++ b/ceph-radosgw/hooks/multisite.py @@ -81,6 +81,35 @@ def _list(key): return [] +def plain_list(key): + """Simple Implementation for list_*, where execution may fail expectedly. + + On failure, retries are not attempted and empty list is returned. + + :param key: string for required resource (zone, zonegroup, realm, user) + :type key: str + :return: list of specified entities found + :rtype: list + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + key, 'list' + ] + try: + result = json.loads(subprocess.check_output( + cmd, stderr=subprocess.PIPE + ).decode('UTF-8')) + hookenv.log("Results: {}".format(result), level=hookenv.DEBUG) + if isinstance(result, dict): + return result['{}s'.format(key)] + else: + return result + except subprocess.CalledProcessError: + return [] + except TypeError: + return [] + + @decorators.retry_on_exception(num_retries=5, base_delay=3, exc_type=ValueError) def list_zones(retry_on_empty=False): @@ -646,7 +675,7 @@ def get_local_zone(zonegroup): if zonegroup_info is None: hookenv.log("Failed to fetch zonegroup ({}) info".format(zonegroup), level=hookenv.ERROR) - return None + return None, None # zonegroup info always contains self name and zones list so fetching # directly is safe. @@ -660,7 +689,7 @@ def get_local_zone(zonegroup): "No local zone configured for zonegroup ({})".format(zonegroup), level=hookenv.ERROR ) - return None + return None, None def rename_multisite_config(zonegroups, new_zonegroup_name, diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index 9cad52e4..cfd07e07 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -33,6 +33,7 @@ 'determine_api_port', 'cmp_pkgrevno', 'leader_get', + 'multisite', 'utils', ] @@ -83,6 +84,17 @@ def setUp(self): self.test_config.set('zonegroup', 'zonegroup1') self.test_config.set('realm', 'realmX') + @staticmethod + def plain_list_stub(key): + if key == "zone": + return ["default"] + if key == "zonegroup": + return ["zonegroup1"] + if key == "realm": + return ["realmX"] + else: + return [] + @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') @patch.object(context, 'ensure_host_resolvable_v6') @@ -104,6 +116,7 @@ def _relation_get(attr, unit, rid): self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] + self.multisite.plain_list = self.plain_list_stub self.determine_api_port.return_value = 70 expect = { 'auth_supported': 'cephx', @@ -156,6 +169,7 @@ def _relation_get(attr, unit, rid): self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] + self.multisite.plain_list = self.plain_list_stub self.related_units.return_value = ['ceph-proxy/0'] self.determine_api_port.return_value = 70 expect = { @@ -219,6 +233,7 @@ def _relation_get(attr, unit, rid): self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] + self.multisite.plain_list = self.plain_list_stub self.determine_api_port.return_value = 70 expect = { 'auth_supported': 'none', @@ -264,6 +279,7 @@ def _relation_get(attr, unit, rid): self.relation_ids.return_value = ['mon:6'] self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] self.determine_api_port.return_value = 70 + self.multisite.plain_list = self.plain_list_stub expect = { 'auth_supported': 'cephx', 'hostname': 'testhost', @@ -365,6 +381,7 @@ def _relation_get(attr, unit, rid): self.relation_get.side_effect = _relation_get self.relation_ids.return_value = ['mon:6'] self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] + self.multisite.plain_list = self.plain_list_stub self.determine_api_port.return_value = 70 expect = { 'auth_supported': 'cephx', From 76ae65489a67ab6f4f5145ca318dcd9ec3266cc9 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 14 Jun 2022 20:31:09 +0000 Subject: [PATCH 2412/2699] Add Kinetic and Zed support * sync charm-helpers to classic charms * change openstack-origin/source default to zed * align testing with zed * add new zed bundles * add zed bundles to tests.yaml * add zed tests to osci.yaml and .zuul.yaml * update build-on and run-on bases * add bindep.txt for py310 * sync tox.ini and requirements.txt for ruamel * use charmcraft_channel 2.0/stable * drop reactive plugin overrides * move interface/layer env vars to charmcraft.yaml Change-Id: I9dda207bbd35f8641a6aac03e99503bc26071407 Depends-On: https://review.opendev.org/c/openstack/charm-ops-openstack/+/853306 --- ceph-iscsi/.zuul.yaml | 2 +- ceph-iscsi/charmcraft.yaml | 7 +--- ceph-iscsi/config.yaml | 2 +- ceph-iscsi/metadata.yaml | 3 +- ceph-iscsi/osci.yaml | 42 +++++++++++++------ .../{focal-ec.yaml => kinetic-ec.yaml} | 4 +- .../bundles/{focal.yaml => kinetic.yaml} | 4 +- ceph-iscsi/tests/tests.yaml | 8 ++-- ceph-iscsi/tox.ini | 2 +- .../unit_tests/test_ceph_iscsi_charm.py | 5 ++- 10 files changed, 46 insertions(+), 33 deletions(-) rename ceph-iscsi/tests/bundles/{focal-ec.yaml => kinetic-ec.yaml} (94%) rename ceph-iscsi/tests/bundles/{focal.yaml => kinetic.yaml} (93%) diff --git a/ceph-iscsi/.zuul.yaml b/ceph-iscsi/.zuul.yaml index 7ffc71cb..23bf5f62 100644 --- a/ceph-iscsi/.zuul.yaml +++ b/ceph-iscsi/.zuul.yaml @@ -1,4 +1,4 @@ - project: templates: - - openstack-python3-charm-yoga-jobs + - openstack-python3-charm-zed-jobs - openstack-cover-jobs diff --git a/ceph-iscsi/charmcraft.yaml b/ceph-iscsi/charmcraft.yaml index 23219175..070bf49d 100644 --- a/ceph-iscsi/charmcraft.yaml +++ b/ceph-iscsi/charmcraft.yaml @@ -23,13 +23,10 @@ parts: bases: - build-on: - name: ubuntu - channel: "20.04" + channel: "22.04" architectures: - amd64 run-on: - - name: ubuntu - channel: "20.04" - architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu channel: "22.04" - architectures: [amd64, s390x, ppc64el, arm64] \ No newline at end of file + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-iscsi/config.yaml b/ceph-iscsi/config.yaml index ba1120f1..9a9b79e2 100644 --- a/ceph-iscsi/config.yaml +++ b/ceph-iscsi/config.yaml @@ -5,7 +5,7 @@ options: description: Mon and OSD debug level. Max is 20. source: type: string - default: yoga + default: zed description: | Optional configuration to support use of additional sources such as: - ppa:myteam/ppa diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index 04c1bde3..78cef03f 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -10,8 +10,7 @@ tags: - storage - misc series: - - focal - - jammy +- jammy subordinate: false min-juju-version: 2.7.6 extra-bindings: diff --git a/ceph-iscsi/osci.yaml b/ceph-iscsi/osci.yaml index 04fd9c79..6f7edc1b 100644 --- a/ceph-iscsi/osci.yaml +++ b/ceph-iscsi/osci.yaml @@ -1,29 +1,47 @@ - project: templates: - - charm-unit-jobs-py38 - - charm-unit-jobs-py39 + - charm-unit-jobs-py310 check: jobs: - - ceph-iscsi-focal-octopus - - ceph-iscsi-focal-octopus-ec + - ceph-iscsi-jammy-quincy + - ceph-iscsi-jammy-quincy-ec + - ceph-iscsi-kinetic-quincy: + voting: false + - ceph-iscsi-kinetic-quincy-ec: + voting: false vars: needs_charm_build: true charm_build_name: ceph-iscsi build_type: charmcraft + charmcraft_channel: 2.0/stable - job: - name: ceph-iscsi-focal-octopus + name: ceph-iscsi-jammy-quincy parent: func-target dependencies: - charm-build - osci-lint - - tox-py38 - - tox-py39 + - name: tox-py310 + soft: true vars: - tox_extra_args: focal + tox_extra_args: jammy - job: - name: ceph-iscsi-focal-octopus-ec + name: ceph-iscsi-jammy-quincy-ec parent: func-target - dependencies: &smoke-jobs - - ceph-iscsi-focal-octopus + dependencies: + - ceph-iscsi-jammy-quincy + vars: + tox_extra_args: jammy-ec +- job: + name: ceph-iscsi-kinetic-quincy + parent: func-target + dependencies: + - ceph-iscsi-jammy-quincy + vars: + tox_extra_args: kinetic +- job: + name: ceph-iscsi-kinetic-quincy-ec + parent: func-target + dependencies: + - ceph-iscsi-jammy-quincy vars: - tox_extra_args: focal-ec + tox_extra_args: kinetic-ec diff --git a/ceph-iscsi/tests/bundles/focal-ec.yaml b/ceph-iscsi/tests/bundles/kinetic-ec.yaml similarity index 94% rename from ceph-iscsi/tests/bundles/focal-ec.yaml rename to ceph-iscsi/tests/bundles/kinetic-ec.yaml index a381c85d..d2fc1a7f 100644 --- a/ceph-iscsi/tests/bundles/focal-ec.yaml +++ b/ceph-iscsi/tests/bundles/kinetic-ec.yaml @@ -1,5 +1,5 @@ local_overlay_enabled: False -series: focal +series: kinetic machines: '0': '1': @@ -36,8 +36,6 @@ applications: pool-type: erasure-coded ec-profile-k: 4 ec-profile-m: 2 - # Use proposed until fix for #1883112 is backported - source: distro-proposed to: - '0' - '1' diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/kinetic.yaml similarity index 93% rename from ceph-iscsi/tests/bundles/focal.yaml rename to ceph-iscsi/tests/bundles/kinetic.yaml index ee42dbf4..e674318d 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/kinetic.yaml @@ -1,5 +1,5 @@ local_overlay_enabled: False -series: focal +series: kinetic machines: '0': '1': @@ -35,8 +35,6 @@ applications: num_units: 4 options: gateway-metadata-pool: iscsi-foo-metadata - # Use proposed until fix for #1883112 is backported - source: distro-proposed to: - '0' - '1' diff --git a/ceph-iscsi/tests/tests.yaml b/ceph-iscsi/tests/tests.yaml index 6f0ce1bd..a33948e8 100644 --- a/ceph-iscsi/tests/tests.yaml +++ b/ceph-iscsi/tests/tests.yaml @@ -1,12 +1,14 @@ charm_name: ceph-iscsi gate_bundles: - - focal-ec - - focal + - jammy-ec + - jammy smoke_bundles: - - focal + - jammy dev_bundles: - jammy-ec - jammy + - kinetic-ec + - kinetic configure: - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation - zaza.openstack.charm_tests.ceph.iscsi.setup.basic_guest_setup diff --git a/ceph-iscsi/tox.ini b/ceph-iscsi/tox.ini index c50ea0f3..c8550616 100644 --- a/ceph-iscsi/tox.ini +++ b/ceph-iscsi/tox.ini @@ -118,7 +118,7 @@ basepython = python3 deps = -r{toxinidir}/build-requirements.txt commands = charmcraft clean - charmcraft -v build + charmcraft -v pack {toxinidir}/rename.sh [testenv:func-noop] diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index 850e9997..b5d30839 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -173,8 +173,9 @@ def network_get(self, endpoint_name, relation_id=None): 'egress-subnets': ['10.0.0.0/24']} return network_data + config_ = self.harness._get_config(charm_config=None) self.harness._backend = _TestingOPSModelBackend( - self.harness._unit_name, self.harness._meta) + self.harness._unit_name, self.harness._meta, config_) self.harness._model = model.Model( self.harness._meta, self.harness._backend) @@ -398,7 +399,7 @@ def test_on_pools_available(self): rel_id, 'ceph-iscsi', {'admin_password': 'existing password', - 'gateway_ready': False}) + 'gateway_ready': 'False'}) self.harness.begin() self.harness.charm.ceph_client._stored.pools_available = True with patch.object(Path, 'mkdir') as mock_mkdir: From dd6e2e3d4ac3c965228c60aaf10d65b387a1d1dc Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 10 Jun 2022 20:14:46 +0000 Subject: [PATCH 2413/2699] Add Kinetic and Zed support * sync charm-helpers to classic charms * change openstack-origin/source default to zed * align testing with zed * add new zed bundles * add zed bundles to tests.yaml * add zed tests to osci.yaml and .zuul.yaml * update build-on and run-on bases * add bindep.txt for py310 * sync tox.ini and requirements.txt for ruamel * use charmcraft_channel 2.0/stable * drop reactive plugin overrides * move interface/layer env vars to charmcraft.yaml Change-Id: I78b0720e75891a41364ba0ddb82add89c3b77ca1 --- ceph-proxy/.zuul.yaml | 2 +- ceph-proxy/bindep.txt | 3 ++ ceph-proxy/charmcraft.yaml | 7 ++-- ceph-proxy/charmhelpers/contrib/network/ip.py | 2 +- .../charmhelpers/contrib/openstack/utils.py | 6 +++- ceph-proxy/charmhelpers/core/host.py | 6 ++-- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-proxy/charmhelpers/core/services/base.py | 3 +- ceph-proxy/charmhelpers/fetch/archiveurl.py | 31 +++++++++++++++--- ceph-proxy/charmhelpers/fetch/ubuntu.py | 10 ++++++ ceph-proxy/config.yaml | 2 +- ceph-proxy/lib/charms_ceph/broker.py | 3 +- ceph-proxy/lib/charms_ceph/utils.py | 11 +++++-- ceph-proxy/metadata.yaml | 1 - ceph-proxy/osci.yaml | 32 +++++++------------ ceph-proxy/requirements.txt | 11 +++++-- ceph-proxy/test-requirements.txt | 16 +--------- ceph-proxy/tests/bundles/jammy-yoga-ec.yaml | 16 +++++----- ceph-proxy/tests/bundles/jammy-yoga.yaml | 16 +++++----- .../{focal-xena-ec.yaml => jammy-zed-ec.yaml} | 20 ++++++------ .../{focal-yoga.yaml => jammy-zed.yaml} | 20 ++++++------ ...focal-yoga-ec.yaml => kinetic-zed-ec.yaml} | 20 ++++++------ .../{focal-xena.yaml => kinetic-zed.yaml} | 20 ++++++------ ceph-proxy/tests/tests.yaml | 19 ++++++++--- ceph-proxy/tox.ini | 28 +++------------- 25 files changed, 163 insertions(+), 143 deletions(-) create mode 100644 ceph-proxy/bindep.txt rename ceph-proxy/tests/bundles/{focal-xena-ec.yaml => jammy-zed-ec.yaml} (93%) rename ceph-proxy/tests/bundles/{focal-yoga.yaml => jammy-zed.yaml} (92%) rename ceph-proxy/tests/bundles/{focal-yoga-ec.yaml => kinetic-zed-ec.yaml} (93%) rename ceph-proxy/tests/bundles/{focal-xena.yaml => kinetic-zed.yaml} (92%) diff --git a/ceph-proxy/.zuul.yaml b/ceph-proxy/.zuul.yaml index 7dd3db96..75fc2a78 100644 --- a/ceph-proxy/.zuul.yaml +++ b/ceph-proxy/.zuul.yaml @@ -1,3 +1,3 @@ - project: templates: - - openstack-python3-charm-yoga-jobs + - openstack-python3-charm-zed-jobs diff --git a/ceph-proxy/bindep.txt b/ceph-proxy/bindep.txt new file mode 100644 index 00000000..bdbe8d56 --- /dev/null +++ b/ceph-proxy/bindep.txt @@ -0,0 +1,3 @@ +libffi-dev [platform:dpkg] +libxml2-dev [platform:dpkg] +libxslt1-dev [platform:dpkg] diff --git a/ceph-proxy/charmcraft.yaml b/ceph-proxy/charmcraft.yaml index 68b9a010..09f03428 100644 --- a/ceph-proxy/charmcraft.yaml +++ b/ceph-proxy/charmcraft.yaml @@ -24,13 +24,10 @@ parts: bases: - build-on: - name: ubuntu - channel: "20.04" + channel: "22.04" architectures: - amd64 run-on: - - name: ubuntu - channel: "20.04" - architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu channel: "22.04" - architectures: [amd64, s390x, ppc64el, arm64] \ No newline at end of file + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-proxy/charmhelpers/contrib/network/ip.py b/ceph-proxy/charmhelpers/contrib/network/ip.py index de56584d..f8edf37a 100644 --- a/ceph-proxy/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/charmhelpers/contrib/network/ip.py @@ -467,7 +467,7 @@ def ns_query(address): try: answers = dns.resolver.query(address, rtype) - except dns.resolver.NXDOMAIN: + except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers): return None if answers: diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index c8747c16..1fa2814a 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -158,6 +158,7 @@ ('2021.1', 'wallaby'), ('2021.2', 'xena'), ('2022.1', 'yoga'), + ('2022.2', 'zed'), ]) # The ugly duckling - must list releases oldest to newest @@ -400,13 +401,16 @@ def get_os_codename_version(vers): error_out(e) -def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES, + raise_exception=False): '''Determine OpenStack version number from codename.''' for k, v in version_map.items(): if v == codename: return k e = 'Could not derive OpenStack version for '\ 'codename: %s' % codename + if raise_exception: + raise ValueError(str(e)) error_out(e) diff --git a/ceph-proxy/charmhelpers/core/host.py b/ceph-proxy/charmhelpers/core/host.py index ad2cab46..ef6c8eca 100644 --- a/ceph-proxy/charmhelpers/core/host.py +++ b/ceph-proxy/charmhelpers/core/host.py @@ -277,7 +277,7 @@ def service_resume(service_name, init_dir="/etc/init", return started -def service(action, service_name, **kwargs): +def service(action, service_name=None, **kwargs): """Control a system service. :param action: the action to take on the service @@ -286,7 +286,9 @@ def service(action, service_name, **kwargs): the form of key=value. """ if init_is_systemd(service_name=service_name): - cmd = ['systemctl', action, service_name] + cmd = ['systemctl', action] + if service_name is not None: + cmd.append(service_name) else: cmd = ['service', service_name, action] for key, value in kwargs.items(): diff --git a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py index 0906c5c0..cc2d89fe 100644 --- a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py @@ -30,6 +30,7 @@ 'hirsute', 'impish', 'jammy', + 'kinetic', ) diff --git a/ceph-proxy/charmhelpers/core/services/base.py b/ceph-proxy/charmhelpers/core/services/base.py index 7c37c65c..8d217b59 100644 --- a/ceph-proxy/charmhelpers/core/services/base.py +++ b/ceph-proxy/charmhelpers/core/services/base.py @@ -15,7 +15,8 @@ import os import json import inspect -from collections import Iterable, OrderedDict +from collections import OrderedDict +from collections.abc import Iterable from charmhelpers.core import host from charmhelpers.core import hookenv diff --git a/ceph-proxy/charmhelpers/fetch/archiveurl.py b/ceph-proxy/charmhelpers/fetch/archiveurl.py index 2cb2e88b..0e35c901 100644 --- a/ceph-proxy/charmhelpers/fetch/archiveurl.py +++ b/ceph-proxy/charmhelpers/fetch/archiveurl.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import contextlib import os import hashlib import re @@ -24,11 +25,15 @@ get_archive_handler, extract, ) +from charmhelpers.core.hookenv import ( + env_proxy_settings, +) from charmhelpers.core.host import mkdir, check_hash from urllib.request import ( build_opener, install_opener, urlopen, urlretrieve, HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ProxyHandler ) from urllib.parse import urlparse, urlunparse, parse_qs from urllib.error import URLError @@ -50,6 +55,20 @@ def splitpasswd(user): return user, None +@contextlib.contextmanager +def proxy_env(): + """ + Creates a context which temporarily modifies the proxy settings in os.environ. + """ + restore = {**os.environ} # Copy the current os.environ + juju_proxies = env_proxy_settings() or {} + os.environ.update(**juju_proxies) # Insert or Update the os.environ + yield os.environ + for key in juju_proxies: + del os.environ[key] # remove any keys which were added or updated + os.environ.update(**restore) # restore any original values + + class ArchiveUrlFetchHandler(BaseFetchHandler): """ Handler to download archive files from arbitrary URLs. @@ -80,6 +99,7 @@ def download(self, source, dest): # propagate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse(source) + handlers = [] if proto in ('http', 'https'): auth, barehost = splituser(netloc) if auth is not None: @@ -89,10 +109,13 @@ def download(self, source, dest): # Realm is set to None in add_password to force the username and password # to be used whatever the realm passman.add_password(None, source, username, password) - authhandler = HTTPBasicAuthHandler(passman) - opener = build_opener(authhandler) - install_opener(opener) - response = urlopen(source) + handlers.append(HTTPBasicAuthHandler(passman)) + + with proxy_env(): + handlers.append(ProxyHandler()) + opener = build_opener(*handlers) + install_opener(opener) + response = urlopen(source) try: with open(dest, 'wb') as dest_file: dest_file.write(response.read()) diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py index e6f8a0ad..93b92765 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -222,6 +222,14 @@ 'yoga/proposed': 'focal-proposed/yoga', 'focal-yoga/proposed': 'focal-proposed/yoga', 'focal-proposed/yoga': 'focal-proposed/yoga', + # Zed + 'zed': 'jammy-updates/zed', + 'jammy-zed': 'jammy-updates/zed', + 'jammy-zed/updates': 'jammy-updates/zed', + 'jammy-updates/zed': 'jammy-updates/zed', + 'zed/proposed': 'jammy-proposed/zed', + 'jammy-zed/proposed': 'jammy-proposed/zed', + 'jammy-proposed/zed': 'jammy-proposed/zed', } @@ -248,6 +256,7 @@ 'wallaby', 'xena', 'yoga', + 'zed', ) @@ -274,6 +283,7 @@ ('hirsute', 'wallaby'), ('impish', 'xena'), ('jammy', 'yoga'), + ('kinetic', 'zed'), ]) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 533dac62..6f6c9daf 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -10,7 +10,7 @@ options: Setting this to True will allow supporting services to log to syslog. source: type: string - default: "" + default: zed description: | Repository from which to install. May be one of the following: distro (default), ppa:somecustom/ppa, a deb url sources entry, diff --git a/ceph-proxy/lib/charms_ceph/broker.py b/ceph-proxy/lib/charms_ceph/broker.py index d00baedc..90b536fb 100644 --- a/ceph-proxy/lib/charms_ceph/broker.py +++ b/ceph-proxy/lib/charms_ceph/broker.py @@ -291,7 +291,8 @@ def pool_permission_list_for_service(service): for prefix in prefixes: permissions.append("allow {} object_prefix {}".format(permission, prefix)) - return ['mon', 'allow r, allow command "osd blacklist"', + return ['mon', ('allow r, allow command "osd blacklist"' + ', allow command "osd blocklist"'), 'osd', ', '.join(permissions)] diff --git a/ceph-proxy/lib/charms_ceph/utils.py b/ceph-proxy/lib/charms_ceph/utils.py index a22462ec..e6adcb82 100644 --- a/ceph-proxy/lib/charms_ceph/utils.py +++ b/ceph-proxy/lib/charms_ceph/utils.py @@ -1134,7 +1134,8 @@ def get_mds_bootstrap_key(): _default_caps = collections.OrderedDict([ ('mon', ['allow r', - 'allow command "osd blacklist"']), + 'allow command "osd blacklist"', + 'allow command "osd blocklist"']), ('osd', ['allow rwx']), ]) @@ -1166,7 +1167,10 @@ def get_mds_bootstrap_key(): ]) rbd_mirror_caps = collections.OrderedDict([ - ('mon', ['profile rbd; allow r']), + ('mon', ['allow profile rbd-mirror-peer', + 'allow command "service dump"', + 'allow command "service status"' + ]), ('osd', ['profile rbd']), ('mgr', ['allow r']), ]) @@ -3453,6 +3457,9 @@ def enabled_manager_modules(): :rtype: List[str] """ cmd = ['ceph', 'mgr', 'module', 'ls'] + quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0 + if quincy_or_later: + cmd.append('--format=json') try: modules = subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError as e: diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 3ffef3fd..22689a74 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -10,7 +10,6 @@ tags: - file-servers - misc series: -- focal - jammy extra-bindings: public: diff --git a/ceph-proxy/osci.yaml b/ceph-proxy/osci.yaml index d08a33e9..81056ab1 100644 --- a/ceph-proxy/osci.yaml +++ b/ceph-proxy/osci.yaml @@ -1,39 +1,31 @@ - project: templates: - - charm-unit-jobs-py38 - - charm-unit-jobs-py39 - - charm-yoga-functional-jobs - - charm-xena-functional-jobs + - charm-unit-jobs-py310 + - charm-zed-functional-jobs check: jobs: - - focal-xena-ec - - focal-yoga-ec - jammy-yoga-ec + - kinetic-zed-ec: + voting: false vars: needs_charm_build: true charm_build_name: ceph-proxy build_type: charmcraft + charmcraft_channel: 2.0/stable - job: - name: focal-xena-ec + name: jammy-yoga-ec parent: func-target dependencies: - osci-lint - charm-build - - tox-py38 - - tox-py39 - vars: - tox_extra_args: erasure-coded:focal-xena-ec -- job: - name: focal-yoga-ec - parent: func-target - dependencies: - - focal-xena-ec + - name: tox-py310 + soft: true vars: - tox_extra_args: erasure-coded:focal-yoga-ec + tox_extra_args: erasure-coded:jammy-yoga-ec - job: - name: jammy-yoga-ec + name: kinetic-zed-ec parent: func-target dependencies: - - focal-xena-ec + - jammy-yoga-ec vars: - tox_extra_args: erasure-coded:jammy-yoga-ec + tox_extra_args: erasure-coded:kinetic-zed-ec diff --git a/ceph-proxy/requirements.txt b/ceph-proxy/requirements.txt index ead6e89a..3b1cb7b1 100644 --- a/ceph-proxy/requirements.txt +++ b/ceph-proxy/requirements.txt @@ -11,14 +11,19 @@ pbr==5.6.0 simplejson>=2.2.0 netifaces>=0.10.4 +# NOTE: newer versions of cryptography require a Rust compiler to build, +# see +# * https://github.com/openstack-charmers/zaza/issues/421 +# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html +# +cryptography<3.4 + # Strange import error with newer netaddr: netaddr>0.7.16,<0.8.0 Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 -# dnspython 2.0.0 dropped py3.5 support -dnspython<2.0.0; python_version < '3.6' -dnspython; python_version >= '3.6' +dnspython psutil>=1.1.1,<2.0.0 diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 0aabe171..4ef87dc5 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -8,7 +8,6 @@ # all of its own requirements and if it doesn't, fix it there. # pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. -cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 requests>=2.18.4 @@ -19,25 +18,12 @@ stestr>=2.2.0 # https://github.com/mtreinish/stestr/issues/145 cliff<3.0.0 -# Dependencies of stestr. Newer versions use keywords that didn't exist in -# python 3.5 yet (e.g. "ModuleNotFoundError") -importlib-metadata<3.0.0; python_version < '3.6' -importlib-resources<3.0.0; python_version < '3.6' - -# Some Zuul nodes sometimes pull newer versions of these dependencies which -# dropped support for python 3.5: -osprofiler<2.7.0;python_version<'3.6' -stevedore<1.31.0;python_version<'3.6' -debtcollector<1.22.0;python_version<'3.6' -oslo.utils<=3.41.0;python_version<'3.6' - coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: -git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' -tempest<24.0.0;python_version<'3.6' +git+https://opendev.org/openstack/tempest.git#egg=tempest croniter # needed for charm-rabbitmq-server unit tests diff --git a/ceph-proxy/tests/bundles/jammy-yoga-ec.yaml b/ceph-proxy/tests/bundles/jammy-yoga-ec.yaml index 26ae8716..0cf89d79 100644 --- a/ceph-proxy/tests/bundles/jammy-yoga-ec.yaml +++ b/ceph-proxy/tests/bundles/jammy-yoga-ec.yaml @@ -62,7 +62,7 @@ applications: - '3' - '4' - '5' - channel: quincy/edge + channel: latest/edge ceph-osd: charm: ch:ceph-osd @@ -78,7 +78,7 @@ applications: - '16' - '17' - '18' - channel: quincy/edge + channel: latest/edge ceph-proxy: charm: ../../ceph-proxy.charm @@ -98,7 +98,7 @@ applications: ec-profile-m: 2 to: - '10' - channel: quincy/edge + channel: latest/edge cinder: charm: ch:cinder @@ -112,7 +112,7 @@ applications: constraints: mem=2048 to: - '11' - channel: yoga/edge + channel: latest/edge cinder-ceph: charm: ch:cinder-ceph @@ -123,7 +123,7 @@ applications: ec-profile-m: 2 ec-profile-plugin: lrc ec-profile-locality: 3 - channel: yoga/edge + channel: latest/edge keystone: charm: ch:keystone @@ -134,7 +134,7 @@ applications: constraints: mem=1024 to: - '12' - channel: yoga/edge + channel: latest/edge rabbitmq-server: charm: ch:rabbitmq-server @@ -157,7 +157,7 @@ applications: ec-profile-plugin: jerasure to: - '14' - channel: yoga/edge + channel: latest/edge nova-compute: charm: ch:nova-compute @@ -171,7 +171,7 @@ applications: libvirt-image-backend: rbd to: - '15' - channel: yoga/edge + channel: latest/edge relations: diff --git a/ceph-proxy/tests/bundles/jammy-yoga.yaml b/ceph-proxy/tests/bundles/jammy-yoga.yaml index 0a5a4b19..8677eee2 100644 --- a/ceph-proxy/tests/bundles/jammy-yoga.yaml +++ b/ceph-proxy/tests/bundles/jammy-yoga.yaml @@ -59,7 +59,7 @@ applications: - '3' - '4' - '5' - channel: quincy/edge + channel: latest/edge ceph-osd: charm: ch:ceph-osd @@ -72,7 +72,7 @@ applications: - '6' - '7' - '8' - channel: quincy/edge + channel: latest/edge ceph-proxy: charm: ../../ceph-proxy.charm @@ -89,7 +89,7 @@ applications: source: *openstack-origin to: - '10' - channel: quincy/edge + channel: latest/edge cinder: charm: ch:cinder @@ -103,13 +103,13 @@ applications: constraints: mem=2048 to: - '11' - channel: yoga/edge + channel: latest/edge cinder-ceph: charm: ch:cinder-ceph options: restrict-ceph-pools: True - channel: yoga/edge + channel: latest/edge keystone: charm: ch:keystone @@ -120,7 +120,7 @@ applications: constraints: mem=1024 to: - '12' - channel: yoga/edge + channel: latest/edge rabbitmq-server: charm: ch:rabbitmq-server @@ -139,7 +139,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: yoga/edge + channel: latest/edge nova-compute: charm: ch:nova-compute @@ -148,7 +148,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: yoga/edge + channel: latest/edge relations: diff --git a/ceph-proxy/tests/bundles/focal-xena-ec.yaml b/ceph-proxy/tests/bundles/jammy-zed-ec.yaml similarity index 93% rename from ceph-proxy/tests/bundles/focal-xena-ec.yaml rename to ceph-proxy/tests/bundles/jammy-zed-ec.yaml index d5632805..0cf89d79 100644 --- a/ceph-proxy/tests/bundles/focal-xena-ec.yaml +++ b/ceph-proxy/tests/bundles/jammy-zed-ec.yaml @@ -1,7 +1,7 @@ variables: - openstack-origin: &openstack-origin cloud:focal-xena + openstack-origin: &openstack-origin distro -series: focal +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' @@ -62,7 +62,7 @@ applications: - '3' - '4' - '5' - channel: quincy/edge + channel: latest/edge ceph-osd: charm: ch:ceph-osd @@ -78,7 +78,7 @@ applications: - '16' - '17' - '18' - channel: quincy/edge + channel: latest/edge ceph-proxy: charm: ../../ceph-proxy.charm @@ -98,7 +98,7 @@ applications: ec-profile-m: 2 to: - '10' - channel: quincy/edge + channel: latest/edge cinder: charm: ch:cinder @@ -112,7 +112,7 @@ applications: constraints: mem=2048 to: - '11' - channel: yoga/edge + channel: latest/edge cinder-ceph: charm: ch:cinder-ceph @@ -123,7 +123,7 @@ applications: ec-profile-m: 2 ec-profile-plugin: lrc ec-profile-locality: 3 - channel: yoga/edge + channel: latest/edge keystone: charm: ch:keystone @@ -134,7 +134,7 @@ applications: constraints: mem=1024 to: - '12' - channel: yoga/edge + channel: latest/edge rabbitmq-server: charm: ch:rabbitmq-server @@ -157,7 +157,7 @@ applications: ec-profile-plugin: jerasure to: - '14' - channel: yoga/edge + channel: latest/edge nova-compute: charm: ch:nova-compute @@ -171,7 +171,7 @@ applications: libvirt-image-backend: rbd to: - '15' - channel: yoga/edge + channel: latest/edge relations: diff --git a/ceph-proxy/tests/bundles/focal-yoga.yaml b/ceph-proxy/tests/bundles/jammy-zed.yaml similarity index 92% rename from ceph-proxy/tests/bundles/focal-yoga.yaml rename to ceph-proxy/tests/bundles/jammy-zed.yaml index b6315472..041dc19f 100644 --- a/ceph-proxy/tests/bundles/focal-yoga.yaml +++ b/ceph-proxy/tests/bundles/jammy-zed.yaml @@ -1,7 +1,7 @@ variables: - openstack-origin: &openstack-origin cloud:focal-yoga + openstack-origin: &openstack-origin cloud:jammy-zed -series: focal +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' @@ -59,7 +59,7 @@ applications: - '3' - '4' - '5' - channel: quincy/edge + channel: latest/edge ceph-osd: charm: ch:ceph-osd @@ -72,7 +72,7 @@ applications: - '6' - '7' - '8' - channel: quincy/edge + channel: latest/edge ceph-proxy: charm: ../../ceph-proxy.charm @@ -89,7 +89,7 @@ applications: source: *openstack-origin to: - '10' - channel: quincy/edge + channel: latest/edge cinder: charm: ch:cinder @@ -103,13 +103,13 @@ applications: constraints: mem=2048 to: - '11' - channel: yoga/edge + channel: latest/edge cinder-ceph: charm: ch:cinder-ceph options: restrict-ceph-pools: True - channel: yoga/edge + channel: latest/edge keystone: charm: ch:keystone @@ -120,7 +120,7 @@ applications: constraints: mem=1024 to: - '12' - channel: yoga/edge + channel: latest/edge rabbitmq-server: charm: ch:rabbitmq-server @@ -139,7 +139,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: yoga/edge + channel: latest/edge nova-compute: charm: ch:nova-compute @@ -148,7 +148,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: yoga/edge + channel: latest/edge relations: diff --git a/ceph-proxy/tests/bundles/focal-yoga-ec.yaml b/ceph-proxy/tests/bundles/kinetic-zed-ec.yaml similarity index 93% rename from ceph-proxy/tests/bundles/focal-yoga-ec.yaml rename to ceph-proxy/tests/bundles/kinetic-zed-ec.yaml index c9cd5b4e..479b391c 100644 --- a/ceph-proxy/tests/bundles/focal-yoga-ec.yaml +++ b/ceph-proxy/tests/bundles/kinetic-zed-ec.yaml @@ -1,7 +1,7 @@ variables: - openstack-origin: &openstack-origin cloud:focal-yoga + openstack-origin: &openstack-origin distro -series: focal +series: kinetic comment: - 'machines section to decide order of deployment. database sooner = faster' @@ -62,7 +62,7 @@ applications: - '3' - '4' - '5' - channel: quincy/edge + channel: latest/edge ceph-osd: charm: ch:ceph-osd @@ -78,7 +78,7 @@ applications: - '16' - '17' - '18' - channel: quincy/edge + channel: latest/edge ceph-proxy: charm: ../../ceph-proxy.charm @@ -98,7 +98,7 @@ applications: ec-profile-m: 2 to: - '10' - channel: quincy/edge + channel: latest/edge cinder: charm: ch:cinder @@ -112,7 +112,7 @@ applications: constraints: mem=2048 to: - '11' - channel: yoga/edge + channel: latest/edge cinder-ceph: charm: ch:cinder-ceph @@ -123,7 +123,7 @@ applications: ec-profile-m: 2 ec-profile-plugin: lrc ec-profile-locality: 3 - channel: yoga/edge + channel: latest/edge keystone: charm: ch:keystone @@ -134,7 +134,7 @@ applications: constraints: mem=1024 to: - '12' - channel: yoga/edge + channel: latest/edge rabbitmq-server: charm: ch:rabbitmq-server @@ -157,7 +157,7 @@ applications: ec-profile-plugin: jerasure to: - '14' - channel: yoga/edge + channel: latest/edge nova-compute: charm: ch:nova-compute @@ -171,7 +171,7 @@ applications: libvirt-image-backend: rbd to: - '15' - channel: yoga/edge + channel: latest/edge relations: diff --git a/ceph-proxy/tests/bundles/focal-xena.yaml b/ceph-proxy/tests/bundles/kinetic-zed.yaml similarity index 92% rename from ceph-proxy/tests/bundles/focal-xena.yaml rename to ceph-proxy/tests/bundles/kinetic-zed.yaml index f9b5c376..ca1731c5 100644 --- a/ceph-proxy/tests/bundles/focal-xena.yaml +++ b/ceph-proxy/tests/bundles/kinetic-zed.yaml @@ -1,7 +1,7 @@ variables: - openstack-origin: &openstack-origin cloud:focal-xena + openstack-origin: &openstack-origin distro -series: focal +series: kinetic comment: - 'machines section to decide order of deployment. database sooner = faster' @@ -59,7 +59,7 @@ applications: - '3' - '4' - '5' - channel: quincy/edge + channel: latest/edge ceph-osd: charm: ch:ceph-osd @@ -72,7 +72,7 @@ applications: - '6' - '7' - '8' - channel: quincy/edge + channel: latest/edge ceph-proxy: charm: ../../ceph-proxy.charm @@ -89,7 +89,7 @@ applications: source: *openstack-origin to: - '10' - channel: quincy/edge + channel: latest/edge cinder: charm: ch:cinder @@ -103,13 +103,13 @@ applications: constraints: mem=2048 to: - '11' - channel: yoga/edge + channel: latest/edge cinder-ceph: charm: ch:cinder-ceph options: restrict-ceph-pools: True - channel: yoga/edge + channel: latest/edge keystone: charm: ch:keystone @@ -120,7 +120,7 @@ applications: constraints: mem=1024 to: - '12' - channel: yoga/edge + channel: latest/edge rabbitmq-server: charm: ch:rabbitmq-server @@ -139,7 +139,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: yoga/edge + channel: latest/edge nova-compute: charm: ch:nova-compute @@ -148,7 +148,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: yoga/edge + channel: latest/edge relations: diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 75fe4043..ad74db78 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -12,15 +12,19 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes gate_bundles: - - focal-xena - - erasure-coded: focal-xena-ec - - focal-yoga - - erasure-coded: focal-yoga-ec - jammy-yoga - erasure-coded: jammy-yoga-ec +dev_bundles: + - jammy-yoga + - erasure-coded: jammy-yoga-ec + - jammy-zed + - erasure-coded: jammy-zed-ec + - kinetic-zed + - erasure-coded: kinetic-zed-ec + smoke_bundles: - - focal-xena + - jammy-yoga target_deploy_status: ceph-proxy: @@ -41,3 +45,8 @@ target_deploy_status: glance: workload-status: waiting workload-status-message: "Incomplete relations: storage-backend" + +tests_options: + force_deploy: + - kinetic-zed + - kinetic-zed-ec diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index 81fd2492..bddbd1f2 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -48,31 +48,11 @@ basepython = python3 deps = -r{toxinidir}/build-requirements.txt commands = charmcraft clean - charmcraft -v build + charmcraft -v pack {toxinidir}/rename.sh -[testenv:py35] -basepython = python3.5 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -[testenv:py36] -basepython = python3.6 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -[testenv:py37] -basepython = python3.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -[testenv:py38] -basepython = python3.8 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -[testenv:py39] -basepython = python3.9 +[testenv:py310] +basepython = python3.10 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt @@ -84,7 +64,7 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 deps = flake8==3.9.2 - charm-tools==2.8.3 + git+https://github.com/juju/charm-tools.git commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof From a95e54ea94a068f4ed4b436a467f973317905500 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 10 Jun 2022 20:14:46 +0000 Subject: [PATCH 2414/2699] Add Kinetic and Zed support * sync charm-helpers to classic charms * change openstack-origin/source default to zed * align testing with zed * add new zed bundles * add zed bundles to tests.yaml * add zed tests to osci.yaml and .zuul.yaml * update build-on and run-on bases * add bindep.txt for py310 * sync tox.ini and requirements.txt for ruamel * use charmcraft_channel 2.0/stable * drop reactive plugin overrides * move interface/layer env vars to charmcraft.yaml Change-Id: Ieb1ef7b7ab76775f5769621a6a7cbcfb18c40b7f --- ceph-radosgw/.zuul.yaml | 2 +- ceph-radosgw/bindep.txt | 3 + ceph-radosgw/charmcraft.yaml | 7 +- ceph-radosgw/config.yaml | 2 +- .../hooks/charmhelpers/contrib/network/ip.py | 2 +- .../charmhelpers/contrib/openstack/context.py | 61 ++++++------ .../charmhelpers/contrib/openstack/utils.py | 6 +- ceph-radosgw/hooks/charmhelpers/core/host.py | 6 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + .../hooks/charmhelpers/core/services/base.py | 3 +- .../hooks/charmhelpers/fetch/archiveurl.py | 31 ++++++- .../hooks/charmhelpers/fetch/ubuntu.py | 10 ++ ceph-radosgw/lib/charms_ceph/broker.py | 3 +- ceph-radosgw/lib/charms_ceph/utils.py | 11 ++- ceph-radosgw/metadata.yaml | 1 - ceph-radosgw/osci.yaml | 92 +++++++++---------- ceph-radosgw/requirements.txt | 11 ++- ceph-radosgw/test-requirements.txt | 17 +--- ...ultisite.yaml => jammy-zed-multisite.yaml} | 4 +- ...espaced.yaml => jammy-zed-namespaced.yaml} | 4 +- .../{focal-xena.yaml => jammy-zed.yaml} | 4 +- ...tisite.yaml => kinetic-zed-multisite.yaml} | 5 +- ...paced.yaml => kinetic-zed-namespaced.yaml} | 4 +- .../{focal-yoga.yaml => kinetic-zed.yaml} | 4 +- ceph-radosgw/tests/tests.yaml | 23 +++-- ceph-radosgw/tox.ini | 29 +----- 26 files changed, 179 insertions(+), 167 deletions(-) create mode 100644 ceph-radosgw/bindep.txt rename ceph-radosgw/tests/bundles/{focal-yoga-multisite.yaml => jammy-zed-multisite.yaml} (97%) rename ceph-radosgw/tests/bundles/{focal-yoga-namespaced.yaml => jammy-zed-namespaced.yaml} (97%) rename ceph-radosgw/tests/bundles/{focal-xena.yaml => jammy-zed.yaml} (97%) rename ceph-radosgw/tests/bundles/{focal-xena-multisite.yaml => kinetic-zed-multisite.yaml} (97%) rename ceph-radosgw/tests/bundles/{focal-xena-namespaced.yaml => kinetic-zed-namespaced.yaml} (97%) rename ceph-radosgw/tests/bundles/{focal-yoga.yaml => kinetic-zed.yaml} (97%) diff --git a/ceph-radosgw/.zuul.yaml b/ceph-radosgw/.zuul.yaml index 7dd3db96..75fc2a78 100644 --- a/ceph-radosgw/.zuul.yaml +++ b/ceph-radosgw/.zuul.yaml @@ -1,3 +1,3 @@ - project: templates: - - openstack-python3-charm-yoga-jobs + - openstack-python3-charm-zed-jobs diff --git a/ceph-radosgw/bindep.txt b/ceph-radosgw/bindep.txt new file mode 100644 index 00000000..bdbe8d56 --- /dev/null +++ b/ceph-radosgw/bindep.txt @@ -0,0 +1,3 @@ +libffi-dev [platform:dpkg] +libxml2-dev [platform:dpkg] +libxslt1-dev [platform:dpkg] diff --git a/ceph-radosgw/charmcraft.yaml b/ceph-radosgw/charmcraft.yaml index dca60a09..3d5d4c80 100644 --- a/ceph-radosgw/charmcraft.yaml +++ b/ceph-radosgw/charmcraft.yaml @@ -23,13 +23,10 @@ parts: bases: - build-on: - name: ubuntu - channel: "20.04" + channel: "22.04" architectures: - amd64 run-on: - - name: ubuntu - channel: "20.04" - architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu channel: "22.04" - architectures: [amd64, s390x, ppc64el, arm64] \ No newline at end of file + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 4160b6f8..2cfd3e08 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -5,7 +5,7 @@ options: description: RadosGW debug level. Max is 20. source: type: string - default: yoga + default: zed description: | Optional repository from which to install. May be one of the following: distro (default), ppa:somecustom/ppa, a deb url sources entry, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index de56584d..f8edf37a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -467,7 +467,7 @@ def ns_query(address): try: answers = dns.resolver.query(address, rtype) - except dns.resolver.NXDOMAIN: + except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers): return None if answers: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 32c69ff7..970a657b 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -2560,14 +2560,18 @@ def _parse_cpu_list(cpulist): :rtype: List[int] """ cores = [] - ranges = cpulist.split(',') - for cpu_range in ranges: - if "-" in cpu_range: - cpu_min_max = cpu_range.split('-') - cores += range(int(cpu_min_max[0]), - int(cpu_min_max[1]) + 1) - else: - cores.append(int(cpu_range)) + if cpulist and re.match(r"^[0-9,\-^]*$", cpulist): + ranges = cpulist.split(',') + for cpu_range in ranges: + if "-" in cpu_range: + cpu_min_max = cpu_range.split('-') + cores += range(int(cpu_min_max[0]), + int(cpu_min_max[1]) + 1) + elif "^" in cpu_range: + cpu_rm = cpu_range.split('^') + cores.remove(int(cpu_rm[1])) + else: + cores.append(int(cpu_range)) return cores def _numa_node_cores(self): @@ -2586,36 +2590,32 @@ def _numa_node_cores(self): def cpu_mask(self): """Get hex formatted CPU mask - The mask is based on using the first config:dpdk-socket-cores cores of each NUMA node in the unit. :returns: hex formatted CPU mask :rtype: str """ - return self.cpu_masks()['dpdk_lcore_mask'] - - def cpu_masks(self): - """Get hex formatted CPU masks + num_cores = config('dpdk-socket-cores') + mask = 0 + for cores in self._numa_node_cores().values(): + for core in cores[:num_cores]: + mask = mask | 1 << core + return format(mask, '#04x') - The mask is based on using the first config:dpdk-socket-cores - cores of each NUMA node in the unit, followed by the - next config:pmd-socket-cores + @classmethod + def pmd_cpu_mask(cls): + """Get hex formatted pmd CPU mask - :returns: Dict of hex formatted CPU masks - :rtype: Dict[str, str] + The mask is based on config:pmd-cpu-set. + :returns: hex formatted CPU mask + :rtype: str """ - num_lcores = config('dpdk-socket-cores') - pmd_cores = config('pmd-socket-cores') - lcore_mask = 0 - pmd_mask = 0 - for cores in self._numa_node_cores().values(): - for core in cores[:num_lcores]: - lcore_mask = lcore_mask | 1 << core - for core in cores[num_lcores:][:pmd_cores]: - pmd_mask = pmd_mask | 1 << core - return { - 'pmd_cpu_mask': format(pmd_mask, '#04x'), - 'dpdk_lcore_mask': format(lcore_mask, '#04x')} + mask = 0 + cpu_list = cls._parse_cpu_list(config('pmd-cpu-set')) + if cpu_list: + for core in cpu_list: + mask = mask | 1 << core + return format(mask, '#x') def socket_memory(self): """Formatted list of socket memory configuration per socket. @@ -2694,6 +2694,7 @@ def __call__(self): ctxt['device_whitelist'] = self.device_whitelist() ctxt['socket_memory'] = self.socket_memory() ctxt['cpu_mask'] = self.cpu_mask() + ctxt['pmd_cpu_mask'] = self.pmd_cpu_mask() return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index c8747c16..1fa2814a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -158,6 +158,7 @@ ('2021.1', 'wallaby'), ('2021.2', 'xena'), ('2022.1', 'yoga'), + ('2022.2', 'zed'), ]) # The ugly duckling - must list releases oldest to newest @@ -400,13 +401,16 @@ def get_os_codename_version(vers): error_out(e) -def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES, + raise_exception=False): '''Determine OpenStack version number from codename.''' for k, v in version_map.items(): if v == codename: return k e = 'Could not derive OpenStack version for '\ 'codename: %s' % codename + if raise_exception: + raise ValueError(str(e)) error_out(e) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index ad2cab46..ef6c8eca 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -277,7 +277,7 @@ def service_resume(service_name, init_dir="/etc/init", return started -def service(action, service_name, **kwargs): +def service(action, service_name=None, **kwargs): """Control a system service. :param action: the action to take on the service @@ -286,7 +286,9 @@ def service(action, service_name, **kwargs): the form of key=value. """ if init_is_systemd(service_name=service_name): - cmd = ['systemctl', action, service_name] + cmd = ['systemctl', action] + if service_name is not None: + cmd.append(service_name) else: cmd = ['service', service_name, action] for key, value in kwargs.items(): diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index 0906c5c0..cc2d89fe 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -30,6 +30,7 @@ 'hirsute', 'impish', 'jammy', + 'kinetic', ) diff --git a/ceph-radosgw/hooks/charmhelpers/core/services/base.py b/ceph-radosgw/hooks/charmhelpers/core/services/base.py index 7c37c65c..8d217b59 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/services/base.py +++ b/ceph-radosgw/hooks/charmhelpers/core/services/base.py @@ -15,7 +15,8 @@ import os import json import inspect -from collections import Iterable, OrderedDict +from collections import OrderedDict +from collections.abc import Iterable from charmhelpers.core import host from charmhelpers.core import hookenv diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py index 2cb2e88b..0e35c901 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/archiveurl.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import contextlib import os import hashlib import re @@ -24,11 +25,15 @@ get_archive_handler, extract, ) +from charmhelpers.core.hookenv import ( + env_proxy_settings, +) from charmhelpers.core.host import mkdir, check_hash from urllib.request import ( build_opener, install_opener, urlopen, urlretrieve, HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ProxyHandler ) from urllib.parse import urlparse, urlunparse, parse_qs from urllib.error import URLError @@ -50,6 +55,20 @@ def splitpasswd(user): return user, None +@contextlib.contextmanager +def proxy_env(): + """ + Creates a context which temporarily modifies the proxy settings in os.environ. + """ + restore = {**os.environ} # Copy the current os.environ + juju_proxies = env_proxy_settings() or {} + os.environ.update(**juju_proxies) # Insert or Update the os.environ + yield os.environ + for key in juju_proxies: + del os.environ[key] # remove any keys which were added or updated + os.environ.update(**restore) # restore any original values + + class ArchiveUrlFetchHandler(BaseFetchHandler): """ Handler to download archive files from arbitrary URLs. @@ -80,6 +99,7 @@ def download(self, source, dest): # propagate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse(source) + handlers = [] if proto in ('http', 'https'): auth, barehost = splituser(netloc) if auth is not None: @@ -89,10 +109,13 @@ def download(self, source, dest): # Realm is set to None in add_password to force the username and password # to be used whatever the realm passman.add_password(None, source, username, password) - authhandler = HTTPBasicAuthHandler(passman) - opener = build_opener(authhandler) - install_opener(opener) - response = urlopen(source) + handlers.append(HTTPBasicAuthHandler(passman)) + + with proxy_env(): + handlers.append(ProxyHandler()) + opener = build_opener(*handlers) + install_opener(opener) + response = urlopen(source) try: with open(dest, 'wb') as dest_file: dest_file.write(response.read()) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index e6f8a0ad..93b92765 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -222,6 +222,14 @@ 'yoga/proposed': 'focal-proposed/yoga', 'focal-yoga/proposed': 'focal-proposed/yoga', 'focal-proposed/yoga': 'focal-proposed/yoga', + # Zed + 'zed': 'jammy-updates/zed', + 'jammy-zed': 'jammy-updates/zed', + 'jammy-zed/updates': 'jammy-updates/zed', + 'jammy-updates/zed': 'jammy-updates/zed', + 'zed/proposed': 'jammy-proposed/zed', + 'jammy-zed/proposed': 'jammy-proposed/zed', + 'jammy-proposed/zed': 'jammy-proposed/zed', } @@ -248,6 +256,7 @@ 'wallaby', 'xena', 'yoga', + 'zed', ) @@ -274,6 +283,7 @@ ('hirsute', 'wallaby'), ('impish', 'xena'), ('jammy', 'yoga'), + ('kinetic', 'zed'), ]) diff --git a/ceph-radosgw/lib/charms_ceph/broker.py b/ceph-radosgw/lib/charms_ceph/broker.py index d00baedc..90b536fb 100644 --- a/ceph-radosgw/lib/charms_ceph/broker.py +++ b/ceph-radosgw/lib/charms_ceph/broker.py @@ -291,7 +291,8 @@ def pool_permission_list_for_service(service): for prefix in prefixes: permissions.append("allow {} object_prefix {}".format(permission, prefix)) - return ['mon', 'allow r, allow command "osd blacklist"', + return ['mon', ('allow r, allow command "osd blacklist"' + ', allow command "osd blocklist"'), 'osd', ', '.join(permissions)] diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index a22462ec..e6adcb82 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -1134,7 +1134,8 @@ def get_mds_bootstrap_key(): _default_caps = collections.OrderedDict([ ('mon', ['allow r', - 'allow command "osd blacklist"']), + 'allow command "osd blacklist"', + 'allow command "osd blocklist"']), ('osd', ['allow rwx']), ]) @@ -1166,7 +1167,10 @@ def get_mds_bootstrap_key(): ]) rbd_mirror_caps = collections.OrderedDict([ - ('mon', ['profile rbd; allow r']), + ('mon', ['allow profile rbd-mirror-peer', + 'allow command "service dump"', + 'allow command "service status"' + ]), ('osd', ['profile rbd']), ('mgr', ['allow r']), ]) @@ -3453,6 +3457,9 @@ def enabled_manager_modules(): :rtype: List[str] """ cmd = ['ceph', 'mgr', 'module', 'ls'] + quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0 + if quincy_or_later: + cmd.append('--format=json') try: modules = subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError as e: diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index cabe88dd..c89cd043 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -13,7 +13,6 @@ tags: - file-servers - misc series: -- focal - jammy extra-bindings: public: diff --git a/ceph-radosgw/osci.yaml b/ceph-radosgw/osci.yaml index 9d065483..ebd411b1 100644 --- a/ceph-radosgw/osci.yaml +++ b/ceph-radosgw/osci.yaml @@ -1,101 +1,95 @@ - project: templates: - - charm-unit-jobs-py38 - - charm-unit-jobs-py39 + - charm-unit-jobs-py310 check: jobs: - - focal-xena-multisite - - vault-focal-xena_rgw - - vault-focal-xena-namespaced - - focal-yoga-multisite: + - jammy-yoga-multisite + - jammy-zed-multisite: voting: false - - vault-focal-yoga_rgw: + - kinetic-zed-multisite: voting: false - - vault-focal-yoga-namespaced: + - vault-jammy-yoga_rgw + - vault-jammy-yoga-namespaced + - vault-jammy-zed_rgw: voting: false - - jammy-yoga-multisite: + - vault-jammy-zed-namespaced: voting: false - - vault-jammy-yoga_rgw: + - vault-kinetic-zed_rgw: voting: false - - vault-jammy-yoga-namespaced: + - vault-kinetic-zed-namespaced: voting: false vars: needs_charm_build: true charm_build_name: ceph-radosgw build_type: charmcraft + charmcraft_channel: 2.0/stable - job: - name: focal-xena-multisite + name: jammy-yoga-multisite parent: func-target dependencies: - osci-lint - charm-build - - tox-py38 - - tox-py39 + - name: tox-py310 + soft: true vars: - tox_extra_args: focal-xena-multisite + tox_extra_args: jammy-yoga-multisite - job: - name: vault-focal-xena_rgw + name: jammy-zed-multisite parent: func-target dependencies: - - osci-lint - - charm-build - - tox-py38 - - tox-py39 + - jammy-yoga-multisite vars: - tox_extra_args: vault:focal-xena + tox_extra_args: jammy-zed-multisite - job: - name: vault-focal-xena-namespaced + name: kinetic-zed-multisite parent: func-target dependencies: - - osci-lint - - tox-py38 - - tox-py39 + - jammy-yoga-multisite vars: - tox_extra_args: vault:focal-xena-namespaced - -- job: - name: jammy-yoga-multisite - parent: func-target - dependencies: - - focal-xena-multisite - vars: - tox_extra_args: jammy-yoga-multisite + tox_extra_args: kinetic-zed-multisite - job: name: vault-jammy-yoga_rgw parent: func-target dependencies: - - vault-focal-xena_rgw - - vault-focal-xena-namespaced + - jammy-yoga-multisite vars: tox_extra_args: vault:jammy-yoga - job: name: vault-jammy-yoga-namespaced parent: func-target dependencies: - - vault-focal-xena_rgw - - vault-focal-xena-namespaced + - jammy-yoga-multisite vars: tox_extra_args: vault:jammy-yoga-namespaced - job: - name: focal-yoga-multisite + name: vault-jammy-zed_rgw + parent: func-target + dependencies: + - vault-jammy-yoga_rgw + - vault-jammy-yoga-namespaced + vars: + tox_extra_args: vault:jammy-zed +- job: + name: vault-jammy-zed-namespaced parent: func-target dependencies: - - focal-xena-multisite + - vault-jammy-yoga_rgw + - vault-jammy-yoga-namespaced vars: - tox_extra_args: focal-yoga-multisite + tox_extra_args: vault:jammy-zed-namespaced - job: - name: vault-focal-yoga_rgw + name: vault-kinetic-zed_rgw parent: func-target dependencies: - - vault-focal-xena_rgw - - vault-focal-xena-namespaced + - vault-jammy-yoga_rgw + - vault-jammy-yoga-namespaced vars: - tox_extra_args: vault:focal-yoga + tox_extra_args: vault:kinetic-zed - job: - name: vault-focal-yoga-namespaced + name: vault-kinetic-zed-namespaced parent: func-target dependencies: - - vault-focal-xena_rgw - - vault-focal-xena-namespaced + - vault-jammy-yoga_rgw + - vault-jammy-yoga-namespaced vars: - tox_extra_args: vault:focal-yoga-namespaced + tox_extra_args: vault:kinetic-zed-namespaced diff --git a/ceph-radosgw/requirements.txt b/ceph-radosgw/requirements.txt index ead6e89a..3b1cb7b1 100644 --- a/ceph-radosgw/requirements.txt +++ b/ceph-radosgw/requirements.txt @@ -11,14 +11,19 @@ pbr==5.6.0 simplejson>=2.2.0 netifaces>=0.10.4 +# NOTE: newer versions of cryptography require a Rust compiler to build, +# see +# * https://github.com/openstack-charmers/zaza/issues/421 +# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html +# +cryptography<3.4 + # Strange import error with newer netaddr: netaddr>0.7.16,<0.8.0 Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 -# dnspython 2.0.0 dropped py3.5 support -dnspython<2.0.0; python_version < '3.6' -dnspython; python_version >= '3.6' +dnspython psutil>=1.1.1,<2.0.0 diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 45508cd0..4ef87dc5 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -8,7 +8,6 @@ # all of its own requirements and if it doesn't, fix it there. # pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. -cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 requests>=2.18.4 @@ -19,26 +18,12 @@ stestr>=2.2.0 # https://github.com/mtreinish/stestr/issues/145 cliff<3.0.0 -# Dependencies of stestr. Newer versions use keywords that didn't exist in -# python 3.5 yet (e.g. "ModuleNotFoundError") -importlib-metadata<3.0.0; python_version < '3.6' -importlib-resources<3.0.0; python_version < '3.6' - -# Some Zuul nodes sometimes pull newer versions of these dependencies which -# dropped support for python 3.5: -osprofiler<2.7.0;python_version<'3.6' -stevedore<1.31.0;python_version<'3.6' -debtcollector<1.22.0;python_version<'3.6' -oslo.utils<=3.41.0;python_version<'3.6' - coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: -git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.8' -tempest<30.0.0;python_version<'3.8' and python_version>='3.6' -tempest<24.0.0;python_version<'3.6' +git+https://opendev.org/openstack/tempest.git#egg=tempest croniter # needed for charm-rabbitmq-server unit tests diff --git a/ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml b/ceph-radosgw/tests/bundles/jammy-zed-multisite.yaml similarity index 97% rename from ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml rename to ceph-radosgw/tests/bundles/jammy-zed-multisite.yaml index 8c1a1cfd..602e11f2 100644 --- a/ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml +++ b/ceph-radosgw/tests/bundles/jammy-zed-multisite.yaml @@ -1,7 +1,7 @@ options: - source: &source cloud:focal-yoga + source: &source cloud:jammy-zed -series: focal +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' diff --git a/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml b/ceph-radosgw/tests/bundles/jammy-zed-namespaced.yaml similarity index 97% rename from ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml rename to ceph-radosgw/tests/bundles/jammy-zed-namespaced.yaml index 7d05aa82..946b826b 100644 --- a/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/jammy-zed-namespaced.yaml @@ -1,7 +1,7 @@ options: - source: &source cloud:focal-yoga + source: &source cloud:jammy-zed -series: focal +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' diff --git a/ceph-radosgw/tests/bundles/focal-xena.yaml b/ceph-radosgw/tests/bundles/jammy-zed.yaml similarity index 97% rename from ceph-radosgw/tests/bundles/focal-xena.yaml rename to ceph-radosgw/tests/bundles/jammy-zed.yaml index e0a1e1c9..bb97a58b 100644 --- a/ceph-radosgw/tests/bundles/focal-xena.yaml +++ b/ceph-radosgw/tests/bundles/jammy-zed.yaml @@ -1,7 +1,7 @@ options: - source: &source cloud:focal-xena + source: &source cloud:jammy-zed -series: focal +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' diff --git a/ceph-radosgw/tests/bundles/focal-xena-multisite.yaml b/ceph-radosgw/tests/bundles/kinetic-zed-multisite.yaml similarity index 97% rename from ceph-radosgw/tests/bundles/focal-xena-multisite.yaml rename to ceph-radosgw/tests/bundles/kinetic-zed-multisite.yaml index 2de95cca..ea03d4c6 100644 --- a/ceph-radosgw/tests/bundles/focal-xena-multisite.yaml +++ b/ceph-radosgw/tests/bundles/kinetic-zed-multisite.yaml @@ -1,7 +1,7 @@ options: - source: &source cloud:focal-xena + source: &source distro -series: focal +series: kinetic comment: - 'machines section to decide order of deployment. database sooner = faster' @@ -96,3 +96,4 @@ relations: - - 'secondary-ceph-radosgw:mon' - 'secondary-ceph-mon:radosgw' + diff --git a/ceph-radosgw/tests/bundles/focal-xena-namespaced.yaml b/ceph-radosgw/tests/bundles/kinetic-zed-namespaced.yaml similarity index 97% rename from ceph-radosgw/tests/bundles/focal-xena-namespaced.yaml rename to ceph-radosgw/tests/bundles/kinetic-zed-namespaced.yaml index 022d5620..862e0e18 100644 --- a/ceph-radosgw/tests/bundles/focal-xena-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/kinetic-zed-namespaced.yaml @@ -1,7 +1,7 @@ options: - source: &source cloud:focal-xena + source: &source distro -series: focal +series: kinetic comment: - 'machines section to decide order of deployment. database sooner = faster' diff --git a/ceph-radosgw/tests/bundles/focal-yoga.yaml b/ceph-radosgw/tests/bundles/kinetic-zed.yaml similarity index 97% rename from ceph-radosgw/tests/bundles/focal-yoga.yaml rename to ceph-radosgw/tests/bundles/kinetic-zed.yaml index 697a9be8..8431a762 100644 --- a/ceph-radosgw/tests/bundles/focal-yoga.yaml +++ b/ceph-radosgw/tests/bundles/kinetic-zed.yaml @@ -1,7 +1,7 @@ options: - source: &source cloud:focal-yoga + source: &source distro -series: focal +series: kinetic comment: - 'machines section to decide order of deployment. database sooner = faster' diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 76f71201..4ccd8cb0 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,21 +1,24 @@ charm_name: ceph-radosgw gate_bundles: - - focal-xena-multisite - - vault: focal-xena - - vault: focal-xena-namespaced + - jammy-yoga-multisite + - vault: jammy-yoga + - vault: jammy-yoga-namespaced smoke_bundles: - - focal-xena-multisite - - vault: focal-xena + - jammy-yoga-multisite + - vault: jammy-yoga dev_bundles: - - focal-yoga-multisite - jammy-yoga-multisite - - vault: focal-yoga - - vault: focal-yoga-namespaced + - jammy-zed-multisite + - kinetic-zed-multisite - vault: jammy-yoga - vault: jammy-yoga-namespaced + - vault: jammy-zed + - vault: jammy-zed-namespaced + - vault: kinetic-zed + - vault: kinetic-zed-namespaced target_deploy_status: vault: @@ -36,5 +39,5 @@ tests: tests_options: force_deploy: - - jammy-yoga - - jammy-yoga-namespaced + - kinetic-zed + - kinetic-zed-namespaced diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index acbcb1f1..bddbd1f2 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -48,34 +48,9 @@ basepython = python3 deps = -r{toxinidir}/build-requirements.txt commands = charmcraft clean - charmcraft -v build + charmcraft -v pack {toxinidir}/rename.sh -[testenv:py35] -basepython = python3.5 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -[testenv:py36] -basepython = python3.6 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -[testenv:py37] -basepython = python3.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -[testenv:py38] -basepython = python3.8 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -[testenv:py39] -basepython = python3.9 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - [testenv:py310] basepython = python3.10 deps = -r{toxinidir}/requirements.txt @@ -89,7 +64,7 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 deps = flake8==3.9.2 - charm-tools==2.8.3 + git+https://github.com/juju/charm-tools.git commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof From c673d02d9bca0f1df68da069ca16126442ca8402 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 9 Aug 2022 09:33:09 -0400 Subject: [PATCH 2415/2699] Add functional test support for Jammy Change-Id: I712f289d4c1cfec84fdc64aab920ba1aba0b3192 --- ceph-nfs/osci.yaml | 11 +++++- ceph-nfs/tests/bundles/jammy-quincy.yaml | 47 ++++++++++++++++++++++++ ceph-nfs/tests/tests.yaml | 1 + 3 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 ceph-nfs/tests/bundles/jammy-quincy.yaml diff --git a/ceph-nfs/osci.yaml b/ceph-nfs/osci.yaml index 3cb71493..54cd9b07 100644 --- a/ceph-nfs/osci.yaml +++ b/ceph-nfs/osci.yaml @@ -5,6 +5,8 @@ check: jobs: - focal-pacific + - focal-quincy + - jammy-quincy vars: needs_charm_build: true charm_build_name: ceph-nfs @@ -28,4 +30,11 @@ - tox-py38 - tox-py39 vars: - tox_extra_args: focal-quincy \ No newline at end of file + tox_extra_args: focal-quincy +- job: + name: jammy-quincy + parent: func-target + dependencies: + - focal-quincy + vars: + tox_extra_args: jammy-quincy \ No newline at end of file diff --git a/ceph-nfs/tests/bundles/jammy-quincy.yaml b/ceph-nfs/tests/bundles/jammy-quincy.yaml new file mode 100644 index 00000000..669cb915 --- /dev/null +++ b/ceph-nfs/tests/bundles/jammy-quincy.yaml @@ -0,0 +1,47 @@ +local_overlay_enabled: False +series: jammy +applications: + ubuntu: + charm: cs:ubuntu + num_units: 2 + ceph-nfs: + charm: ../../ceph-nfs.charm + num_units: 2 + options: + source: distro + ceph-osd: + charm: ch:ceph-osd + channel: quincy/edge + num_units: 3 + storage: + osd-devices: '2,10G' + options: + source: distro + ceph-mon: + charm: ch:ceph-mon + channel: quincy/edge + num_units: 3 + options: + monitor-count: '3' + expected-osd-count: 6 + source: distro + ceph-fs: + charm: ch:ceph-fs + channel: quincy/edge + num_units: 2 + options: + source: distro + hacluster: + charm: ch:hacluster + channel: 2.4/edge + options: + cluster_count: 2 +relations: + - - 'ceph-mon:client' + - 'ceph-nfs:ceph-client' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-fs' + - 'ceph-mon' + - - 'ceph-nfs:ha' + - 'hacluster:ha' diff --git a/ceph-nfs/tests/tests.yaml b/ceph-nfs/tests/tests.yaml index 37d4d29b..fe66ba01 100644 --- a/ceph-nfs/tests/tests.yaml +++ b/ceph-nfs/tests/tests.yaml @@ -2,6 +2,7 @@ charm_name: ceph-nfs gate_bundles: - focal-quincy - focal-pacific + - jammy-pacific smoke_bundles: - focal-pacific configure: [] From 9dc2f58e87dbd44a827e2de766fce8831650ac23 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 25 Aug 2022 14:12:38 -0400 Subject: [PATCH 2416/2699] Fix ceph-osd disk removal on Jammy Closes-Bug: #1987695 Change-Id: I7523e8fa0f6b62329eefcce6daf250881812943c --- ceph-osd/actions/remove_disk.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ceph-osd/actions/remove_disk.py b/ceph-osd/actions/remove_disk.py index 7a48cba1..7e1cae25 100755 --- a/ceph-osd/actions/remove_disk.py +++ b/ceph-osd/actions/remove_disk.py @@ -18,6 +18,7 @@ import errno import json from math import ceil +import os import subprocess import sys import time @@ -240,6 +241,14 @@ def remove(self, purge, timeout, force): # Stop the OSD service. hookenv.log('Stopping the OSD service', hookenv.DEBUG) charms_ceph.utils.stop_osd(self.osd_id[4:]) + charms_ceph.utils.disable_osd(self.osd_id[4:]) + unit_filename = \ + '/run/systemd/system/ceph-osd.target.wants/ceph-osd@{}.service' \ + .format(self.osd_id[4:]) + if os.path.exists(unit_filename): + os.remove(unit_filename) + + subprocess.check_call(['systemctl', 'daemon-reload']) # Remove the OSD from the cluster. hookenv.log('Destroying the OSD', hookenv.DEBUG) From 21a83605360daf0241c634b816c33f6d910862f8 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 30 Aug 2022 13:08:52 -0400 Subject: [PATCH 2417/2699] Remove small OpenStack deployment from functional tests func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/864 Change-Id: Ic143b99503a396d7ae1484a1a3e68bdd6e7ce525 --- ceph-fs/src/tests/bundles/focal-xena.yaml | 203 +--------------------- ceph-fs/src/tests/bundles/focal-yoga.yaml | 203 +--------------------- ceph-fs/src/tests/bundles/jammy-yoga.yaml | 203 +--------------------- ceph-fs/src/tests/tests.yaml | 16 +- 4 files changed, 19 insertions(+), 606 deletions(-) diff --git a/ceph-fs/src/tests/bundles/focal-xena.yaml b/ceph-fs/src/tests/bundles/focal-xena.yaml index 6de4b967..a1c359ce 100644 --- a/ceph-fs/src/tests/bundles/focal-xena.yaml +++ b/ceph-fs/src/tests/bundles/focal-xena.yaml @@ -6,45 +6,10 @@ local_overlay_enabled: False series: &series focal -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - '3': - - applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: latest/edge - placement-mysql-router: - charm: ch:mysql-router - channel: latest/edge - neutron-api-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: latest/edge - + ubuntu: # used to test mounts + charm: ch:ubuntu + num_units: 2 ceph-fs: charm: ../../../ceph-fs.charm num_units: 1 @@ -53,8 +18,6 @@ applications: pool-type: erasure-coded ec-profile-k: 4 ec-profile-m: 2 - to: - - '3' ceph-osd: charm: ch:ceph-osd @@ -74,169 +37,9 @@ applications: source: *openstack-origin channel: latest/edge - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - channel: latest/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: latest/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: latest/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: *openstack-origin - channel: latest/edge - - nova-compute: - charm: ch:nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: *openstack-origin - channel: latest/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: latest/edge - - neutron-api: - charm: ch:neutron-api - num_units: 1 - options: - manage-neutron-plugin-legacy-mode: true - neutron-plugin: ovs - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: *openstack-origin - channel: latest/edge - - neutron-openvswitch: - charm: ch:neutron-openvswitch - channel: latest/edge - - neutron-gateway: - charm: ch:neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: *openstack-origin - channel: latest/edge - relations: - - - 'ceph-mon:mds' - 'ceph-fs:ceph-mds' - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - 'ceph-osd:mon' - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'neutron-api:shared-db' - - 'neutron-api-mysql-router:shared-db' - - - 'neutron-api-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' - - - - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' - - - - 'neutron-api:identity-service' - - 'keystone:identity-service' - - - - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' - - - - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/focal-yoga.yaml b/ceph-fs/src/tests/bundles/focal-yoga.yaml index b6177072..718084ba 100644 --- a/ceph-fs/src/tests/bundles/focal-yoga.yaml +++ b/ceph-fs/src/tests/bundles/focal-yoga.yaml @@ -5,45 +5,10 @@ local_overlay_enabled: False series: &series focal -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - '3': - - applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: latest/edge - placement-mysql-router: - charm: ch:mysql-router - channel: latest/edge - neutron-api-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: latest/edge - + ubuntu: # used to test mounts + charm: ch:ubuntu + num_units: 2 ceph-fs: charm: ../../../ceph-fs.charm num_units: 1 @@ -52,8 +17,6 @@ applications: pool-type: erasure-coded ec-profile-k: 4 ec-profile-m: 2 - to: - - '3' ceph-osd: charm: ch:ceph-osd @@ -73,169 +36,9 @@ applications: source: *openstack-origin channel: latest/edge - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - channel: latest/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: latest/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: latest/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: *openstack-origin - channel: latest/edge - - nova-compute: - charm: ch:nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: *openstack-origin - channel: latest/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: latest/edge - - neutron-api: - charm: ch:neutron-api - num_units: 1 - options: - manage-neutron-plugin-legacy-mode: true - neutron-plugin: ovs - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: *openstack-origin - channel: latest/edge - - neutron-openvswitch: - charm: ch:neutron-openvswitch - channel: latest/edge - - neutron-gateway: - charm: ch:neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: *openstack-origin - channel: latest/edge - relations: - - - 'ceph-mon:mds' - 'ceph-fs:ceph-mds' - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - 'ceph-osd:mon' - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'neutron-api:shared-db' - - 'neutron-api-mysql-router:shared-db' - - - 'neutron-api-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' - - - - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' - - - - 'neutron-api:identity-service' - - 'keystone:identity-service' - - - - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' - - - - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/bundles/jammy-yoga.yaml b/ceph-fs/src/tests/bundles/jammy-yoga.yaml index f49df651..2a428f1d 100644 --- a/ceph-fs/src/tests/bundles/jammy-yoga.yaml +++ b/ceph-fs/src/tests/bundles/jammy-yoga.yaml @@ -5,45 +5,10 @@ local_overlay_enabled: False series: &series jammy -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - '3': - - applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: latest/edge - placement-mysql-router: - charm: ch:mysql-router - channel: latest/edge - neutron-api-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: latest/edge - + ubuntu: # used to test mounts + charm: ch:ubuntu + num_units: 2 ceph-fs: charm: ../../../ceph-fs.charm num_units: 1 @@ -52,8 +17,6 @@ applications: pool-type: erasure-coded ec-profile-k: 4 ec-profile-m: 2 - to: - - '3' ceph-osd: charm: ch:ceph-osd @@ -73,169 +36,9 @@ applications: source: *openstack-origin channel: latest/edge - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - channel: latest/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: latest/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: latest/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - network-manager: Neutron - openstack-origin: *openstack-origin - channel: latest/edge - - nova-compute: - charm: ch:nova-compute - num_units: 2 - constraints: mem=8G - options: - config-flags: default_ephemeral_format=ext4 - enable-live-migration: true - enable-resize: true - migration-auth-type: ssh - openstack-origin: *openstack-origin - channel: latest/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: latest/edge - - neutron-api: - charm: ch:neutron-api - num_units: 1 - options: - manage-neutron-plugin-legacy-mode: true - neutron-plugin: ovs - flat-network-providers: physnet1 - neutron-security-groups: true - openstack-origin: *openstack-origin - channel: latest/edge - - neutron-openvswitch: - charm: ch:neutron-openvswitch - channel: latest/edge - - neutron-gateway: - charm: ch:neutron-gateway - num_units: 1 - options: - bridge-mappings: physnet1:br-ex - openstack-origin: *openstack-origin - channel: latest/edge - relations: - - - 'ceph-mon:mds' - 'ceph-fs:ceph-mds' - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - 'ceph-osd:mon' - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'neutron-api:shared-db' - - 'neutron-api-mysql-router:shared-db' - - - 'neutron-api-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'neutron-api:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-api:neutron-api' - - 'nova-cloud-controller:neutron-api' - - - - 'neutron-api:neutron-plugin-api' - - 'neutron-gateway:neutron-plugin-api' - - - - 'neutron-api:identity-service' - - 'keystone:identity-service' - - - - 'nova-compute:neutron-plugin' - - 'neutron-openvswitch:neutron-plugin' - - - - 'neutron-gateway:amqp' - - 'rabbitmq-server:amqp' - - - - 'neutron-openvswitch:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:quantum-network-service' - - 'neutron-gateway:quantum-network-service' diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index 42aec0ba..08aef01e 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -5,14 +5,18 @@ gate_bundles: - jammy-yoga smoke_bundles: - focal-xena -configure: - - zaza.openstack.charm_tests.glance.setup.add_lts_image - - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network - - zaza.openstack.charm_tests.nova.setup.create_flavors - - zaza.openstack.charm_tests.nova.setup.manage_ssh_key - - zaza.openstack.charm_tests.keystone.setup.add_demo_user +# configure: +# - zaza.openstack.charm_tests.glance.setup.add_lts_image +# - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network +# - zaza.openstack.charm_tests.nova.setup.create_flavors +# - zaza.openstack.charm_tests.nova.setup.manage_ssh_key +# - zaza.openstack.charm_tests.keystone.setup.add_demo_user tests: - zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests - zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation +target_deploy_status: + ubuntu: + workload-status: active + workload-status-message-prefix: '' \ No newline at end of file From 03321d42be091d44cfc363a86fddc8855c97bdda Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 23 Aug 2022 09:53:37 -0400 Subject: [PATCH 2418/2699] Migrate install handling to operator native Change-Id: I0d06db0c8ac15b3a1fd3e9c4b4f0d2243fb1875e --- .../lib/charms/operator_libs_linux/v0/apt.py | 1329 +++++++++++++++++ .../charms/operator_libs_linux/v1/systemd.py | 219 +++ ceph-mon/src/ceph_hooks.py | 22 - ceph-mon/src/charm.py | 26 +- 4 files changed, 1573 insertions(+), 23 deletions(-) create mode 100644 ceph-mon/lib/charms/operator_libs_linux/v0/apt.py create mode 100644 ceph-mon/lib/charms/operator_libs_linux/v1/systemd.py diff --git a/ceph-mon/lib/charms/operator_libs_linux/v0/apt.py b/ceph-mon/lib/charms/operator_libs_linux/v0/apt.py new file mode 100644 index 00000000..2b5c8f2e --- /dev/null +++ b/ceph-mon/lib/charms/operator_libs_linux/v0/apt.py @@ -0,0 +1,1329 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Abstractions for the system's Debian/Ubuntu package information and repositories. + +This module contains abstractions and wrappers around Debian/Ubuntu-style repositories and +packages, in order to easily provide an idiomatic and Pythonic mechanism for adding packages and/or +repositories to systems for use in machine charms. + +A sane default configuration is attainable through nothing more than instantiation of the +appropriate classes. `DebianPackage` objects provide information about the architecture, version, +name, and status of a package. + +`DebianPackage` will try to look up a package either from `dpkg -L` or from `apt-cache` when +provided with a string indicating the package name. If it cannot be located, `PackageNotFoundError` +will be returned, as `apt` and `dpkg` otherwise return `100` for all errors, and a meaningful error +message if the package is not known is desirable. + +To install packages with convenience methods: + +```python +try: + # Run `apt-get update` + apt.update() + apt.add_package("zsh") + apt.add_package(["vim", "htop", "wget"]) +except PackageNotFoundError: + logger.error("a specified package not found in package cache or on system") +except PackageError as e: + logger.error("could not install package. Reason: %s", e.message) +```` + +To find details of a specific package: + +```python +try: + vim = apt.DebianPackage.from_system("vim") + + # To find from the apt cache only + # apt.DebianPackage.from_apt_cache("vim") + + # To find from installed packages only + # apt.DebianPackage.from_installed_package("vim") + + vim.ensure(PackageState.Latest) + logger.info("updated vim to version: %s", vim.fullversion) +except PackageNotFoundError: + logger.error("a specified package not found in package cache or on system") +except PackageError as e: + logger.error("could not install package. Reason: %s", e.message) +``` + + +`RepositoryMapping` will return a dict-like object containing enabled system repositories +and their properties (available groups, baseuri. gpg key). This class can add, disable, or +manipulate repositories. Items can be retrieved as `DebianRepository` objects. + +In order add a new repository with explicit details for fields, a new `DebianRepository` can +be added to `RepositoryMapping` + +`RepositoryMapping` provides an abstraction around the existing repositories on the system, +and can be accessed and iterated over like any `Mapping` object, to retrieve values by key, +iterate, or perform other operations. + +Keys are constructed as `{repo_type}-{}-{release}` in order to uniquely identify a repository. + +Repositories can be added with explicit values through a Python constructor. + +Example: + +```python +repositories = apt.RepositoryMapping() + +if "deb-example.com-focal" not in repositories: + repositories.add(DebianRepository(enabled=True, repotype="deb", + uri="https://example.com", release="focal", groups=["universe"])) +``` + +Alternatively, any valid `sources.list` line may be used to construct a new +`DebianRepository`. + +Example: + +```python +repositories = apt.RepositoryMapping() + +if "deb-us.archive.ubuntu.com-xenial" not in repositories: + line = "deb http://us.archive.ubuntu.com/ubuntu xenial main restricted" + repo = DebianRepository.from_repo_line(line) + repositories.add(repo) +``` +""" + +import fileinput +import glob +import logging +import os +import re +import subprocess +from collections.abc import Mapping +from enum import Enum +from subprocess import PIPE, CalledProcessError, check_call, check_output +from typing import Iterable, List, Optional, Tuple, Union +from urllib.parse import urlparse + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "7c3dbc9c2ad44a47bd6fcb25caa270e5" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 7 + + +VALID_SOURCE_TYPES = ("deb", "deb-src") +OPTIONS_MATCHER = re.compile(r"\[.*?\]") + + +class Error(Exception): + """Base class of most errors raised by this library.""" + + def __repr__(self): + """String representation of Error.""" + return "<{}.{} {}>".format(type(self).__module__, type(self).__name__, self.args) + + @property + def name(self): + """Return a string representation of the model plus class.""" + return "<{}.{}>".format(type(self).__module__, type(self).__name__) + + @property + def message(self): + """Return the message passed as an argument.""" + return self.args[0] + + +class PackageError(Error): + """Raised when there's an error installing or removing a package.""" + + +class PackageNotFoundError(Error): + """Raised when a requested package is not known to the system.""" + + +class PackageState(Enum): + """A class to represent possible package states.""" + + Present = "present" + Absent = "absent" + Latest = "latest" + Available = "available" + + +class DebianPackage: + """Represents a traditional Debian package and its utility functions. + + `DebianPackage` wraps information and functionality around a known package, whether installed + or available. The version, epoch, name, and architecture can be easily queried and compared + against other `DebianPackage` objects to determine the latest version or to install a specific + version. + + The representation of this object as a string mimics the output from `dpkg` for familiarity. + + Installation and removal of packages is handled through the `state` property or `ensure` + method, with the following options: + + apt.PackageState.Absent + apt.PackageState.Available + apt.PackageState.Present + apt.PackageState.Latest + + When `DebianPackage` is initialized, the state of a given `DebianPackage` object will be set to + `Available`, `Present`, or `Latest`, with `Absent` implemented as a convenience for removal + (though it operates essentially the same as `Available`). + """ + + def __init__( + self, name: str, version: str, epoch: str, arch: str, state: PackageState + ) -> None: + self._name = name + self._arch = arch + self._state = state + self._version = Version(version, epoch) + + def __eq__(self, other) -> bool: + """Equality for comparison. + + Args: + other: a `DebianPackage` object for comparison + + Returns: + A boolean reflecting equality + """ + return isinstance(other, self.__class__) and ( + self._name, + self._version.number, + ) == (other._name, other._version.number) + + def __hash__(self): + """A basic hash so this class can be used in Mappings and dicts.""" + return hash((self._name, self._version.number)) + + def __repr__(self): + """A representation of the package.""" + return "<{}.{}: {}>".format(self.__module__, self.__class__.__name__, self.__dict__) + + def __str__(self): + """A human-readable representation of the package.""" + return "<{}: {}-{}.{} -- {}>".format( + self.__class__.__name__, + self._name, + self._version, + self._arch, + str(self._state), + ) + + @staticmethod + def _apt( + command: str, + package_names: Union[str, List], + optargs: Optional[List[str]] = None, + ) -> None: + """Wrap package management commands for Debian/Ubuntu systems. + + Args: + command: the command given to `apt-get` + package_names: a package name or list of package names to operate on + optargs: an (Optional) list of additioanl arguments + + Raises: + PackageError if an error is encountered + """ + optargs = optargs if optargs is not None else [] + if isinstance(package_names, str): + package_names = [package_names] + _cmd = ["apt-get", "-y", *optargs, command, *package_names] + try: + check_call(_cmd, stderr=PIPE, stdout=PIPE) + except CalledProcessError as e: + raise PackageError( + "Could not {} package(s) [{}]: {}".format(command, [*package_names], e.output) + ) from None + + def _add(self) -> None: + """Add a package to the system.""" + self._apt( + "install", + "{}={}".format(self.name, self.version), + optargs=["--option=Dpkg::Options::=--force-confold"], + ) + + def _remove(self) -> None: + """Removes a package from the system. Implementation-specific.""" + return self._apt("remove", "{}={}".format(self.name, self.version)) + + @property + def name(self) -> str: + """Returns the name of the package.""" + return self._name + + def ensure(self, state: PackageState): + """Ensures that a package is in a given state. + + Args: + state: a `PackageState` to reconcile the package to + + Raises: + PackageError from the underlying call to apt + """ + if self._state is not state: + if state not in (PackageState.Present, PackageState.Latest): + self._remove() + else: + self._add() + self._state = state + + @property + def present(self) -> bool: + """Returns whether or not a package is present.""" + return self._state in (PackageState.Present, PackageState.Latest) + + @property + def latest(self) -> bool: + """Returns whether the package is the most recent version.""" + return self._state is PackageState.Latest + + @property + def state(self) -> PackageState: + """Returns the current package state.""" + return self._state + + @state.setter + def state(self, state: PackageState) -> None: + """Sets the package state to a given value. + + Args: + state: a `PackageState` to reconcile the package to + + Raises: + PackageError from the underlying call to apt + """ + if state in (PackageState.Latest, PackageState.Present): + self._add() + else: + self._remove() + self._state = state + + @property + def version(self) -> "Version": + """Returns the version for a package.""" + return self._version + + @property + def epoch(self) -> str: + """Returns the epoch for a package. May be unset.""" + return self._version.epoch + + @property + def arch(self) -> str: + """Returns the architecture for a package.""" + return self._arch + + @property + def fullversion(self) -> str: + """Returns the name+epoch for a package.""" + return "{}.{}".format(self._version, self._arch) + + @staticmethod + def _get_epoch_from_version(version: str) -> Tuple[str, str]: + """Pull the epoch, if any, out of a version string.""" + epoch_matcher = re.compile(r"^((?P\d+):)?(?P.*)") + matches = epoch_matcher.search(version).groupdict() + return matches.get("epoch", ""), matches.get("version") + + @classmethod + def from_system( + cls, package: str, version: Optional[str] = "", arch: Optional[str] = "" + ) -> "DebianPackage": + """Locates a package, either on the system or known to apt, and serializes the information. + + Args: + package: a string representing the package + version: an optional string if a specific version isr equested + arch: an optional architecture, defaulting to `dpkg --print-architecture`. If an + architecture is not specified, this will be used for selection. + + """ + try: + return DebianPackage.from_installed_package(package, version, arch) + except PackageNotFoundError: + logger.debug( + "package '%s' is not currently installed or has the wrong architecture.", package + ) + + # Ok, try `apt-cache ...` + try: + return DebianPackage.from_apt_cache(package, version, arch) + except (PackageNotFoundError, PackageError): + # If we get here, it's not known to the systems. + # This seems unnecessary, but virtually all `apt` commands have a return code of `100`, + # and providing meaningful error messages without this is ugly. + raise PackageNotFoundError( + "Package '{}{}' could not be found on the system or in the apt cache!".format( + package, ".{}".format(arch) if arch else "" + ) + ) from None + + @classmethod + def from_installed_package( + cls, package: str, version: Optional[str] = "", arch: Optional[str] = "" + ) -> "DebianPackage": + """Check whether the package is already installed and return an instance. + + Args: + package: a string representing the package + version: an optional string if a specific version isr equested + arch: an optional architecture, defaulting to `dpkg --print-architecture`. + If an architecture is not specified, this will be used for selection. + """ + system_arch = check_output( + ["dpkg", "--print-architecture"], universal_newlines=True + ).strip() + arch = arch if arch else system_arch + + # Regexps are a really terrible way to do this. Thanks dpkg + output = "" + try: + output = check_output(["dpkg", "-l", package], stderr=PIPE, universal_newlines=True) + except CalledProcessError: + raise PackageNotFoundError("Package is not installed: {}".format(package)) from None + + # Pop off the output from `dpkg -l' because there's no flag to + # omit it` + lines = str(output).splitlines()[5:] + + dpkg_matcher = re.compile( + r""" + ^(?P\w+?)\s+ + (?P.*?)(?P:\w+?)?\s+ + (?P.*?)\s+ + (?P\w+?)\s+ + (?P.*) + """, + re.VERBOSE, + ) + + for line in lines: + try: + matches = dpkg_matcher.search(line).groupdict() + package_status = matches["package_status"] + + if not package_status.endswith("i"): + logger.debug( + "package '%s' in dpkg output but not installed, status: '%s'", + package, + package_status, + ) + break + + epoch, split_version = DebianPackage._get_epoch_from_version(matches["version"]) + pkg = DebianPackage( + matches["package_name"], + split_version, + epoch, + matches["arch"], + PackageState.Present, + ) + if (pkg.arch == "all" or pkg.arch == arch) and ( + version == "" or str(pkg.version) == version + ): + return pkg + except AttributeError: + logger.warning("dpkg matcher could not parse line: %s", line) + + # If we didn't find it, fail through + raise PackageNotFoundError("Package {}.{} is not installed!".format(package, arch)) + + @classmethod + def from_apt_cache( + cls, package: str, version: Optional[str] = "", arch: Optional[str] = "" + ) -> "DebianPackage": + """Check whether the package is already installed and return an instance. + + Args: + package: a string representing the package + version: an optional string if a specific version isr equested + arch: an optional architecture, defaulting to `dpkg --print-architecture`. + If an architecture is not specified, this will be used for selection. + """ + system_arch = check_output( + ["dpkg", "--print-architecture"], universal_newlines=True + ).strip() + arch = arch if arch else system_arch + + # Regexps are a really terrible way to do this. Thanks dpkg + keys = ("Package", "Architecture", "Version") + + try: + output = check_output( + ["apt-cache", "show", package], stderr=PIPE, universal_newlines=True + ) + except CalledProcessError as e: + raise PackageError( + "Could not list packages in apt-cache: {}".format(e.output) + ) from None + + pkg_groups = output.strip().split("\n\n") + keys = ("Package", "Architecture", "Version") + + for pkg_raw in pkg_groups: + lines = str(pkg_raw).splitlines() + vals = {} + for line in lines: + if line.startswith(keys): + items = line.split(":", 1) + vals[items[0]] = items[1].strip() + else: + continue + + epoch, split_version = DebianPackage._get_epoch_from_version(vals["Version"]) + pkg = DebianPackage( + vals["Package"], + split_version, + epoch, + vals["Architecture"], + PackageState.Available, + ) + + if (pkg.arch == "all" or pkg.arch == arch) and ( + version == "" or str(pkg.version) == version + ): + return pkg + + # If we didn't find it, fail through + raise PackageNotFoundError("Package {}.{} is not in the apt cache!".format(package, arch)) + + +class Version: + """An abstraction around package versions. + + This seems like it should be strictly unnecessary, except that `apt_pkg` is not usable inside a + venv, and wedging version comparisions into `DebianPackage` would overcomplicate it. + + This class implements the algorithm found here: + https://www.debian.org/doc/debian-policy/ch-controlfields.html#version + """ + + def __init__(self, version: str, epoch: str): + self._version = version + self._epoch = epoch or "" + + def __repr__(self): + """A representation of the package.""" + return "<{}.{}: {}>".format(self.__module__, self.__class__.__name__, self.__dict__) + + def __str__(self): + """A human-readable representation of the package.""" + return "{}{}".format("{}:".format(self._epoch) if self._epoch else "", self._version) + + @property + def epoch(self): + """Returns the epoch for a package. May be empty.""" + return self._epoch + + @property + def number(self) -> str: + """Returns the version number for a package.""" + return self._version + + def _get_parts(self, version: str) -> Tuple[str, str]: + """Separate the version into component upstream and Debian pieces.""" + try: + version.rindex("-") + except ValueError: + # No hyphens means no Debian version + return version, "0" + + upstream, debian = version.rsplit("-", 1) + return upstream, debian + + def _listify(self, revision: str) -> List[str]: + """Split a revision string into a listself. + + This list is comprised of alternating between strings and numbers, + padded on either end to always be "str, int, str, int..." and + always be of even length. This allows us to trivially implement the + comparison algorithm described. + """ + result = [] + while revision: + rev_1, remains = self._get_alphas(revision) + rev_2, remains = self._get_digits(remains) + result.extend([rev_1, rev_2]) + revision = remains + return result + + def _get_alphas(self, revision: str) -> Tuple[str, str]: + """Return a tuple of the first non-digit characters of a revision.""" + # get the index of the first digit + for i, char in enumerate(revision): + if char.isdigit(): + if i == 0: + return "", revision + return revision[0:i], revision[i:] + # string is entirely alphas + return revision, "" + + def _get_digits(self, revision: str) -> Tuple[int, str]: + """Return a tuple of the first integer characters of a revision.""" + # If the string is empty, return (0,'') + if not revision: + return 0, "" + # get the index of the first non-digit + for i, char in enumerate(revision): + if not char.isdigit(): + if i == 0: + return 0, revision + return int(revision[0:i]), revision[i:] + # string is entirely digits + return int(revision), "" + + def _dstringcmp(self, a, b): # noqa: C901 + """Debian package version string section lexical sort algorithm. + + The lexical comparison is a comparison of ASCII values modified so + that all the letters sort earlier than all the non-letters and so that + a tilde sorts before anything, even the end of a part. + """ + if a == b: + return 0 + try: + for i, char in enumerate(a): + if char == b[i]: + continue + # "a tilde sorts before anything, even the end of a part" + # (emptyness) + if char == "~": + return -1 + if b[i] == "~": + return 1 + # "all the letters sort earlier than all the non-letters" + if char.isalpha() and not b[i].isalpha(): + return -1 + if not char.isalpha() and b[i].isalpha(): + return 1 + # otherwise lexical sort + if ord(char) > ord(b[i]): + return 1 + if ord(char) < ord(b[i]): + return -1 + except IndexError: + # a is longer than b but otherwise equal, greater unless there are tildes + if char == "~": + return -1 + return 1 + # if we get here, a is shorter than b but otherwise equal, so check for tildes... + if b[len(a)] == "~": + return 1 + return -1 + + def _compare_revision_strings(self, first: str, second: str): # noqa: C901 + """Compare two debian revision strings.""" + if first == second: + return 0 + + # listify pads results so that we will always be comparing ints to ints + # and strings to strings (at least until we fall off the end of a list) + first_list = self._listify(first) + second_list = self._listify(second) + if first_list == second_list: + return 0 + try: + for i, item in enumerate(first_list): + # explicitly raise IndexError if we've fallen off the edge of list2 + if i >= len(second_list): + raise IndexError + # if the items are equal, next + if item == second_list[i]: + continue + # numeric comparison + if isinstance(item, int): + if item > second_list[i]: + return 1 + if item < second_list[i]: + return -1 + else: + # string comparison + return self._dstringcmp(item, second_list[i]) + except IndexError: + # rev1 is longer than rev2 but otherwise equal, hence greater + # ...except for goddamn tildes + if first_list[len(second_list)][0][0] == "~": + return 1 + return 1 + # rev1 is shorter than rev2 but otherwise equal, hence lesser + # ...except for goddamn tildes + if second_list[len(first_list)][0][0] == "~": + return -1 + return -1 + + def _compare_version(self, other) -> int: + if (self.number, self.epoch) == (other.number, other.epoch): + return 0 + + if self.epoch < other.epoch: + return -1 + if self.epoch > other.epoch: + return 1 + + # If none of these are true, follow the algorithm + upstream_version, debian_version = self._get_parts(self.number) + other_upstream_version, other_debian_version = self._get_parts(other.number) + + upstream_cmp = self._compare_revision_strings(upstream_version, other_upstream_version) + if upstream_cmp != 0: + return upstream_cmp + + debian_cmp = self._compare_revision_strings(debian_version, other_debian_version) + if debian_cmp != 0: + return debian_cmp + + return 0 + + def __lt__(self, other) -> bool: + """Less than magic method impl.""" + return self._compare_version(other) < 0 + + def __eq__(self, other) -> bool: + """Equality magic method impl.""" + return self._compare_version(other) == 0 + + def __gt__(self, other) -> bool: + """Greater than magic method impl.""" + return self._compare_version(other) > 0 + + def __le__(self, other) -> bool: + """Less than or equal to magic method impl.""" + return self.__eq__(other) or self.__lt__(other) + + def __ge__(self, other) -> bool: + """Greater than or equal to magic method impl.""" + return self.__gt__(other) or self.__eq__(other) + + def __ne__(self, other) -> bool: + """Not equal to magic method impl.""" + return not self.__eq__(other) + + +def add_package( + package_names: Union[str, List[str]], + version: Optional[str] = "", + arch: Optional[str] = "", + update_cache: Optional[bool] = False, +) -> Union[DebianPackage, List[DebianPackage]]: + """Add a package or list of packages to the system. + + Args: + name: the name(s) of the package(s) + version: an (Optional) version as a string. Defaults to the latest known + arch: an optional architecture for the package + update_cache: whether or not to run `apt-get update` prior to operating + + Raises: + PackageNotFoundError if the package is not in the cache. + """ + cache_refreshed = False + if update_cache: + update() + cache_refreshed = True + + packages = {"success": [], "retry": [], "failed": []} + + package_names = [package_names] if type(package_names) is str else package_names + if not package_names: + raise TypeError("Expected at least one package name to add, received zero!") + + if len(package_names) != 1 and version: + raise TypeError( + "Explicit version should not be set if more than one package is being added!" + ) + + for p in package_names: + pkg, success = _add(p, version, arch) + if success: + packages["success"].append(pkg) + else: + logger.warning("failed to locate and install/update '%s'", pkg) + packages["retry"].append(p) + + if packages["retry"] and not cache_refreshed: + logger.info("updating the apt-cache and retrying installation of failed packages.") + update() + + for p in packages["retry"]: + pkg, success = _add(p, version, arch) + if success: + packages["success"].append(pkg) + else: + packages["failed"].append(p) + + if packages["failed"]: + raise PackageError("Failed to install packages: {}".format(", ".join(packages["failed"]))) + + return packages["success"] if len(packages["success"]) > 1 else packages["success"][0] + + +def _add( + name: str, + version: Optional[str] = "", + arch: Optional[str] = "", +) -> Tuple[Union[DebianPackage, str], bool]: + """Adds a package. + + Args: + name: the name(s) of the package(s) + version: an (Optional) version as a string. Defaults to the latest known + arch: an optional architecture for the package + + Returns: a tuple of `DebianPackage` if found, or a :str: if it is not, and + a boolean indicating success + """ + try: + pkg = DebianPackage.from_system(name, version, arch) + pkg.ensure(state=PackageState.Present) + return pkg, True + except PackageNotFoundError: + return name, False + + +def remove_package( + package_names: Union[str, List[str]] +) -> Union[DebianPackage, List[DebianPackage]]: + """Removes a package from the system. + + Args: + package_names: the name of a package + + Raises: + PackageNotFoundError if the package is not found. + """ + packages = [] + + package_names = [package_names] if type(package_names) is str else package_names + if not package_names: + raise TypeError("Expected at least one package name to add, received zero!") + + for p in package_names: + try: + pkg = DebianPackage.from_installed_package(p) + pkg.ensure(state=PackageState.Absent) + packages.append(pkg) + except PackageNotFoundError: + logger.info("package '%s' was requested for removal, but it was not installed.", p) + + # the list of packages will be empty when no package is removed + logger.debug("packages: '%s'", packages) + return packages[0] if len(packages) == 1 else packages + + +def update() -> None: + """Updates the apt cache via `apt-get update`.""" + check_call(["apt-get", "update"], stderr=PIPE, stdout=PIPE) + + +class InvalidSourceError(Error): + """Exceptions for invalid source entries.""" + + +class GPGKeyError(Error): + """Exceptions for GPG keys.""" + + +class DebianRepository: + """An abstraction to represent a repository.""" + + def __init__( + self, + enabled: bool, + repotype: str, + uri: str, + release: str, + groups: List[str], + filename: Optional[str] = "", + gpg_key_filename: Optional[str] = "", + options: Optional[dict] = None, + ): + self._enabled = enabled + self._repotype = repotype + self._uri = uri + self._release = release + self._groups = groups + self._filename = filename + self._gpg_key_filename = gpg_key_filename + self._options = options + + @property + def enabled(self): + """Return whether or not the repository is enabled.""" + return self._enabled + + @property + def repotype(self): + """Return whether it is binary or source.""" + return self._repotype + + @property + def uri(self): + """Return the URI.""" + return self._uri + + @property + def release(self): + """Return which Debian/Ubuntu releases it is valid for.""" + return self._release + + @property + def groups(self): + """Return the enabled package groups.""" + return self._groups + + @property + def filename(self): + """Returns the filename for a repository.""" + return self._filename + + @filename.setter + def filename(self, fname: str) -> None: + """Sets the filename used when a repo is written back to diskself. + + Args: + fname: a filename to write the repository information to. + """ + if not fname.endswith(".list"): + raise InvalidSourceError("apt source filenames should end in .list!") + + self._filename = fname + + @property + def gpg_key(self): + """Returns the path to the GPG key for this repository.""" + return self._gpg_key_filename + + @property + def options(self): + """Returns any additional repo options which are set.""" + return self._options + + def make_options_string(self) -> str: + """Generate the complete options string for a a repository. + + Combining `gpg_key`, if set, and the rest of the options to find + a complex repo string. + """ + options = self._options if self._options else {} + if self._gpg_key_filename: + options["signed-by"] = self._gpg_key_filename + + return ( + "[{}] ".format(" ".join(["{}={}".format(k, v) for k, v in options.items()])) + if options + else "" + ) + + @staticmethod + def prefix_from_uri(uri: str) -> str: + """Get a repo list prefix from the uri, depending on whether a path is set.""" + uridetails = urlparse(uri) + path = ( + uridetails.path.lstrip("/").replace("/", "-") if uridetails.path else uridetails.netloc + ) + return "/etc/apt/sources.list.d/{}".format(path) + + @staticmethod + def from_repo_line(repo_line: str, write_file: Optional[bool] = True) -> "DebianRepository": + """Instantiate a new `DebianRepository` a `sources.list` entry line. + + Args: + repo_line: a string representing a repository entry + write_file: boolean to enable writing the new repo to disk + """ + repo = RepositoryMapping._parse(repo_line, "UserInput") + fname = "{}-{}.list".format( + DebianRepository.prefix_from_uri(repo.uri), repo.release.replace("/", "-") + ) + repo.filename = fname + + options = repo.options if repo.options else {} + if repo.gpg_key: + options["signed-by"] = repo.gpg_key + + # For Python 3.5 it's required to use sorted in the options dict in order to not have + # different results in the order of the options between executions. + options_str = ( + "[{}] ".format(" ".join(["{}={}".format(k, v) for k, v in sorted(options.items())])) + if options + else "" + ) + + if write_file: + with open(fname, "wb") as f: + f.write( + ( + "{}".format("#" if not repo.enabled else "") + + "{} {}{} ".format(repo.repotype, options_str, repo.uri) + + "{} {}\n".format(repo.release, " ".join(repo.groups)) + ).encode("utf-8") + ) + + return repo + + def disable(self) -> None: + """Remove this repository from consideration. + + Disable it instead of removing from the repository file. + """ + searcher = "{} {}{} {}".format( + self.repotype, self.make_options_string(), self.uri, self.release + ) + for line in fileinput.input(self._filename, inplace=True): + if re.match(r"^{}\s".format(re.escape(searcher)), line): + print("# {}".format(line), end="") + else: + print(line, end="") + + def import_key(self, key: str) -> None: + """Import an ASCII Armor key. + + A Radix64 format keyid is also supported for backwards + compatibility. In this case Ubuntu keyserver will be + queried for a key via HTTPS by its keyid. This method + is less preferrable because https proxy servers may + require traffic decryption which is equivalent to a + man-in-the-middle attack (a proxy server impersonates + keyserver TLS certificates and has to be explicitly + trusted by the system). + + Args: + key: A GPG key in ASCII armor format, + including BEGIN and END markers or a keyid. + + Raises: + GPGKeyError if the key could not be imported + """ + key = key.strip() + if "-" in key or "\n" in key: + # Send everything not obviously a keyid to GPG to import, as + # we trust its validation better than our own. eg. handling + # comments before the key. + logger.debug("PGP key found (looks like ASCII Armor format)") + if ( + "-----BEGIN PGP PUBLIC KEY BLOCK-----" in key + and "-----END PGP PUBLIC KEY BLOCK-----" in key + ): + logger.debug("Writing provided PGP key in the binary format") + key_bytes = key.encode("utf-8") + key_name = self._get_keyid_by_gpg_key(key_bytes) + key_gpg = self._dearmor_gpg_key(key_bytes) + self._gpg_key_filename = "/etc/apt/trusted.gpg.d/{}.gpg".format(key_name) + self._write_apt_gpg_keyfile(key_name=self._gpg_key_filename, key_material=key_gpg) + else: + raise GPGKeyError("ASCII armor markers missing from GPG key") + else: + logger.warning( + "PGP key found (looks like Radix64 format). " + "SECURELY importing PGP key from keyserver; " + "full key not provided." + ) + # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL + # to retrieve GPG keys. `apt-key adv` command is deprecated as is + # apt-key in general as noted in its manpage. See lp:1433761 for more + # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop + # gpg + key_asc = self._get_key_by_keyid(key) + # write the key in GPG format so that apt-key list shows it + key_gpg = self._dearmor_gpg_key(key_asc.encode("utf-8")) + self._gpg_key_filename = "/etc/apt/trusted.gpg.d/{}.gpg".format(key) + self._write_apt_gpg_keyfile(key_name=key, key_material=key_gpg) + + @staticmethod + def _get_keyid_by_gpg_key(key_material: bytes) -> str: + """Get a GPG key fingerprint by GPG key material. + + Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded + or binary GPG key material. Can be used, for example, to generate file + names for keys passed via charm options. + """ + # Use the same gpg command for both Xenial and Bionic + cmd = ["gpg", "--with-colons", "--with-fingerprint"] + ps = subprocess.run( + cmd, + stdout=PIPE, + stderr=PIPE, + input=key_material, + ) + out, err = ps.stdout.decode(), ps.stderr.decode() + if "gpg: no valid OpenPGP data found." in err: + raise GPGKeyError("Invalid GPG key material provided") + # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) + return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1) + + @staticmethod + def _get_key_by_keyid(keyid: str) -> str: + """Get a key via HTTPS from the Ubuntu keyserver. + + Different key ID formats are supported by SKS keyservers (the longer ones + are more secure, see "dead beef attack" and https://evil32.com/). Since + HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will + impersonate keyserver.ubuntu.com and generate a certificate with + keyserver.ubuntu.com in the CN field or in SubjAltName fields of a + certificate. If such proxy behavior is expected it is necessary to add the + CA certificate chain containing the intermediate CA of the SSLBump proxy to + every machine that this code runs on via ca-certs cloud-init directive (via + cloudinit-userdata model-config) or via other means (such as through a + custom charm option). Also note that DNS resolution for the hostname in a + URL is done at a proxy server - not at the client side. + 8-digit (32 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6 + 16-digit (64 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6 + 40-digit key ID: + https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6 + + Args: + keyid: An 8, 16 or 40 hex digit keyid to find a key for + + Returns: + A string contining key material for the specified GPG key id + + + Raises: + subprocess.CalledProcessError + """ + # options=mr - machine-readable output (disables html wrappers) + keyserver_url = ( + "https://keyserver.ubuntu.com" "/pks/lookup?op=get&options=mr&exact=on&search=0x{}" + ) + curl_cmd = ["curl", keyserver_url.format(keyid)] + # use proxy server settings in order to retrieve the key + return check_output(curl_cmd).decode() + + @staticmethod + def _dearmor_gpg_key(key_asc: bytes) -> bytes: + """Converts a GPG key in the ASCII armor format to the binary format. + + Args: + key_asc: A GPG key in ASCII armor format. + + Returns: + A GPG key in binary format as a string + + Raises: + GPGKeyError + """ + ps = subprocess.run(["gpg", "--dearmor"], stdout=PIPE, stderr=PIPE, input=key_asc) + out, err = ps.stdout, ps.stderr.decode() + if "gpg: no valid OpenPGP data found." in err: + raise GPGKeyError( + "Invalid GPG key material. Check your network setup" + " (MTU, routing, DNS) and/or proxy server settings" + " as well as destination keyserver status." + ) + else: + return out + + @staticmethod + def _write_apt_gpg_keyfile(key_name: str, key_material: bytes) -> None: + """Writes GPG key material into a file at a provided path. + + Args: + key_name: A key name to use for a key file (could be a fingerprint) + key_material: A GPG key material (binary) + """ + with open(key_name, "wb") as keyf: + keyf.write(key_material) + + +class RepositoryMapping(Mapping): + """An representation of known repositories. + + Instantiation of `RepositoryMapping` will iterate through the + filesystem, parse out repository files in `/etc/apt/...`, and create + `DebianRepository` objects in this list. + + Typical usage: + + repositories = apt.RepositoryMapping() + repositories.add(DebianRepository( + enabled=True, repotype="deb", uri="https://example.com", release="focal", + groups=["universe"] + )) + """ + + def __init__(self): + self._repository_map = {} + # Repositories that we're adding -- used to implement mode param + self.default_file = "/etc/apt/sources.list" + + # read sources.list if it exists + if os.path.isfile(self.default_file): + self.load(self.default_file) + + # read sources.list.d + for file in glob.iglob("/etc/apt/sources.list.d/*.list"): + self.load(file) + + def __contains__(self, key: str) -> bool: + """Magic method for checking presence of repo in mapping.""" + return key in self._repository_map + + def __len__(self) -> int: + """Return number of repositories in map.""" + return len(self._repository_map) + + def __iter__(self) -> Iterable[DebianRepository]: + """Iterator magic method for RepositoryMapping.""" + return iter(self._repository_map.values()) + + def __getitem__(self, repository_uri: str) -> DebianRepository: + """Return a given `DebianRepository`.""" + return self._repository_map[repository_uri] + + def __setitem__(self, repository_uri: str, repository: DebianRepository) -> None: + """Add a `DebianRepository` to the cache.""" + self._repository_map[repository_uri] = repository + + def load(self, filename: str): + """Load a repository source file into the cache. + + Args: + filename: the path to the repository file + """ + parsed = [] + skipped = [] + with open(filename, "r") as f: + for n, line in enumerate(f): + try: + repo = self._parse(line, filename) + except InvalidSourceError: + skipped.append(n) + else: + repo_identifier = "{}-{}-{}".format(repo.repotype, repo.uri, repo.release) + self._repository_map[repo_identifier] = repo + parsed.append(n) + logger.debug("parsed repo: '%s'", repo_identifier) + + if skipped: + skip_list = ", ".join(str(s) for s in skipped) + logger.debug("skipped the following lines in file '%s': %s", filename, skip_list) + + if parsed: + logger.info("parsed %d apt package repositories", len(parsed)) + else: + raise InvalidSourceError("all repository lines in '{}' were invalid!".format(filename)) + + @staticmethod + def _parse(line: str, filename: str) -> DebianRepository: + """Parse a line in a sources.list file. + + Args: + line: a single line from `load` to parse + filename: the filename being read + + Raises: + InvalidSourceError if the source type is unknown + """ + enabled = True + repotype = uri = release = gpg_key = "" + options = {} + groups = [] + + line = line.strip() + if line.startswith("#"): + enabled = False + line = line[1:] + + # Check for "#" in the line and treat a part after it as a comment then strip it off. + i = line.find("#") + if i > 0: + line = line[:i] + + # Split a source into substrings to initialize a new repo. + source = line.strip() + if source: + # Match any repo options, and get a dict representation. + for v in re.findall(OPTIONS_MATCHER, source): + opts = dict(o.split("=") for o in v.strip("[]").split()) + # Extract the 'signed-by' option for the gpg_key + gpg_key = opts.pop("signed-by", "") + options = opts + + # Remove any options from the source string and split the string into chunks + source = re.sub(OPTIONS_MATCHER, "", source) + chunks = source.split() + + # Check we've got a valid list of chunks + if len(chunks) < 3 or chunks[0] not in VALID_SOURCE_TYPES: + raise InvalidSourceError("An invalid sources line was found in %s!", filename) + + repotype = chunks[0] + uri = chunks[1] + release = chunks[2] + groups = chunks[3:] + + return DebianRepository( + enabled, repotype, uri, release, groups, filename, gpg_key, options + ) + else: + raise InvalidSourceError("An invalid sources line was found in %s!", filename) + + def add(self, repo: DebianRepository, default_filename: Optional[bool] = False) -> None: + """Add a new repository to the system. + + Args: + repo: a `DebianRepository` object + default_filename: an (Optional) filename if the default is not desirable + """ + new_filename = "{}-{}.list".format( + DebianRepository.prefix_from_uri(repo.uri), repo.release.replace("/", "-") + ) + + fname = repo.filename or new_filename + + options = repo.options if repo.options else {} + if repo.gpg_key: + options["signed-by"] = repo.gpg_key + + with open(fname, "wb") as f: + f.write( + ( + "{}".format("#" if not repo.enabled else "") + + "{} {}{} ".format(repo.repotype, repo.make_options_string(), repo.uri) + + "{} {}\n".format(repo.release, " ".join(repo.groups)) + ).encode("utf-8") + ) + + self._repository_map["{}-{}-{}".format(repo.repotype, repo.uri, repo.release)] = repo + + def disable(self, repo: DebianRepository) -> None: + """Remove a repository. Disable by default. + + Args: + repo: a `DebianRepository` to disable + """ + searcher = "{} {}{} {}".format( + repo.repotype, repo.make_options_string(), repo.uri, repo.release + ) + + for line in fileinput.input(repo.filename, inplace=True): + if re.match(r"^{}\s".format(re.escape(searcher)), line): + print("# {}".format(line), end="") + else: + print(line, end="") + + self._repository_map["{}-{}-{}".format(repo.repotype, repo.uri, repo.release)] = repo diff --git a/ceph-mon/lib/charms/operator_libs_linux/v1/systemd.py b/ceph-mon/lib/charms/operator_libs_linux/v1/systemd.py new file mode 100644 index 00000000..5be34c17 --- /dev/null +++ b/ceph-mon/lib/charms/operator_libs_linux/v1/systemd.py @@ -0,0 +1,219 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Abstractions for stopping, starting and managing system services via systemd. + +This library assumes that your charm is running on a platform that uses systemd. E.g., +Centos 7 or later, Ubuntu Xenial (16.04) or later. + +For the most part, we transparently provide an interface to a commonly used selection of +systemd commands, with a few shortcuts baked in. For example, service_pause and +service_resume with run the mask/unmask and enable/disable invocations. + +Example usage: +```python +from charms.operator_libs_linux.v0.systemd import service_running, service_reload + +# Start a service +if not service_running("mysql"): + success = service_start("mysql") + +# Attempt to reload a service, restarting if necessary +success = service_reload("nginx", restart_on_failure=True) +``` + +""" + +import logging +import subprocess + +__all__ = [ # Don't export `_systemctl`. (It's not the intended way of using this lib.) + "service_pause", + "service_reload", + "service_restart", + "service_resume", + "service_running", + "service_start", + "service_stop", + "daemon_reload", +] + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "045b0d179f6b4514a8bb9b48aee9ebaf" + +# Increment this major API version when introducing breaking changes +LIBAPI = 1 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 0 + + +class SystemdError(Exception): + pass + + +def _popen_kwargs(): + return dict( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + bufsize=1, + universal_newlines=True, + encoding="utf-8", + ) + + +def _systemctl( + sub_cmd: str, service_name: str = None, now: bool = None, quiet: bool = None +) -> bool: + """Control a system service. + + Args: + sub_cmd: the systemctl subcommand to issue + service_name: the name of the service to perform the action on + now: passes the --now flag to the shell invocation. + quiet: passes the --quiet flag to the shell invocation. + """ + cmd = ["systemctl", sub_cmd] + + if service_name is not None: + cmd.append(service_name) + if now is not None: + cmd.append("--now") + if quiet is not None: + cmd.append("--quiet") + if sub_cmd != "is-active": + logger.debug("Attempting to {} '{}' with command {}.".format(cmd, service_name, cmd)) + else: + logger.debug("Checking if '{}' is active".format(service_name)) + + proc = subprocess.Popen(cmd, **_popen_kwargs()) + last_line = "" + for line in iter(proc.stdout.readline, ""): + last_line = line + logger.debug(line) + + proc.wait() + + if sub_cmd == "is-active": + # If we are just checking whether a service is running, return True/False, rather + # than raising an error. + if proc.returncode < 1: + return True + if proc.returncode == 3: # Code returned when service is not active. + return False + + if proc.returncode < 1: + return True + + raise SystemdError( + "Could not {}{}: systemd output: {}".format( + sub_cmd, " {}".format(service_name) if service_name else "", last_line + ) + ) + + +def service_running(service_name: str) -> bool: + """Determine whether a system service is running. + + Args: + service_name: the name of the service + """ + return _systemctl("is-active", service_name, quiet=True) + + +def service_start(service_name: str) -> bool: + """Start a system service. + + Args: + service_name: the name of the service to stop + """ + return _systemctl("start", service_name) + + +def service_stop(service_name: str) -> bool: + """Stop a system service. + + Args: + service_name: the name of the service to stop + """ + return _systemctl("stop", service_name) + + +def service_restart(service_name: str) -> bool: + """Restart a system service. + + Args: + service_name: the name of the service to restart + """ + return _systemctl("restart", service_name) + + +def service_reload(service_name: str, restart_on_failure: bool = False) -> bool: + """Reload a system service, optionally falling back to restart if reload fails. + + Args: + service_name: the name of the service to reload + restart_on_failure: boolean indicating whether to fallback to a restart if the + reload fails. + """ + try: + return _systemctl("reload", service_name) + except SystemdError: + if restart_on_failure: + return _systemctl("restart", service_name) + else: + raise + + +def service_pause(service_name: str) -> bool: + """Pause a system service. + + Stop it, and prevent it from starting again at boot. + + Args: + service_name: the name of the service to pause + """ + _systemctl("disable", service_name, now=True) + _systemctl("mask", service_name) + + if not service_running(service_name): + return True + + raise SystemdError("Attempted to pause '{}', but it is still running.".format(service_name)) + + +def service_resume(service_name: str) -> bool: + """Resume a system service. + + Re-enable starting again at boot. Start the service. + + Args: + service_name: the name of the service to resume + """ + _systemctl("unmask", service_name) + _systemctl("enable", service_name, now=True) + + if service_running(service_name): + return True + + raise SystemdError("Attempted to resume '{}', but it is not running.".format(service_name)) + + +def daemon_reload() -> bool: + """Reload systemd manager configuration.""" + return _systemctl("daemon-reload") diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 1cc4c4bd..5c65b36d 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -58,13 +58,10 @@ cmp_pkgrevno) from charmhelpers.fetch import ( apt_install, - apt_update, - apt_purge, filter_installed_packages, add_source, get_upstream_version, ) -from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.openstack.utils import ( clear_unit_paused, @@ -158,25 +155,6 @@ def check_for_upgrade(): level=ERROR) -@hooks.hook('install.real') -@harden() -def install(): - execd_preinstall() - add_source(config('source'), config('key')) - apt_update(fatal=True) - apt_install(packages=ceph.determine_packages(), fatal=True) - rm_packages = ceph.determine_packages_to_remove() - if rm_packages: - apt_purge(packages=rm_packages, fatal=True) - try: - # we defer and explicitly run `ceph-create-keys` from - # add_keyring_to_ceph() as part of bootstrap process - # LP: #1719436. - service_pause('ceph-create-keys') - except ValueError: - pass - - def get_ceph_context(): networks = get_networks('ceph-public-network') public_network = ', '.join(networks) diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index 9003aa52..4acadc72 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -2,16 +2,40 @@ from ops.main import main +import charms.operator_libs_linux.v0.apt as apt +import charms.operator_libs_linux.v1.systemd as systemd + import ops_openstack.core +import charms_ceph.utils as ceph import ceph_hooks as hooks class CephMonCharm(ops_openstack.core.OSBaseCharm): + release = 'quincy' + + PACKAGES = [ + 'ceph', 'gdisk', + 'radosgw', 'lvm2', 'parted', 'smartmontools', + ] + # General charm control callbacks. + + # TODO: Figure out how to do hardening in an operator-framework + # world def on_install(self, event): - hooks.install() + self.install_pkgs() + rm_packages = ceph.determine_packages_to_remove() + if rm_packages: + apt.remove_package(packages=rm_packages, fatal=True) + try: + # we defer and explicitly run `ceph-create-keys` from + # add_keyring_to_ceph() as part of bootstrap process + # LP: #1719436. + systemd.service_pause('ceph-create-keys') + except systemd.SystemdError: + pass def on_config(self, event): hooks.config_changed() From b69c73c2c7d3ead6a4b3d6a9c1e458d2e1770fb3 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 2 Sep 2022 17:44:42 +0200 Subject: [PATCH 2419/2699] Add support for prometheus-k8s Add support for the metrics-endpoint relation. This allows relating ceph-mon to prometheus-k8s which is being used in the COS Lite observability stack. Upon relation, the ceph prometheus module will be enabled and a corresponding scrape job configured for prometheus-k8s. Drive-by test improvement for the utils module Change-Id: Iaeee57aaa6f3678fdaef35f2582b4b4c974acb2a --- ceph-mon/README.md | 4 + .../observability_libs/v0/juju_topology.py | 306 +++ .../prometheus_k8s/v0/prometheus_scrape.py | 2282 +++++++++++++++++ ceph-mon/metadata.yaml | 2 + ceph-mon/src/ceph_metrics.py | 51 + ceph-mon/src/charm.py | 3 + ceph-mon/tox.ini | 2 +- ceph-mon/unit_tests/test_ceph_metrics.py | 91 + ceph-mon/unit_tests/test_ceph_utils.py | 3 +- 9 files changed, 2741 insertions(+), 3 deletions(-) create mode 100644 ceph-mon/lib/charms/observability_libs/v0/juju_topology.py create mode 100644 ceph-mon/lib/charms/prometheus_k8s/v0/prometheus_scrape.py create mode 100644 ceph-mon/src/ceph_metrics.py create mode 100644 ceph-mon/unit_tests/test_ceph_metrics.py diff --git a/ceph-mon/README.md b/ceph-mon/README.md index ab45dc2b..ded17a27 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -140,6 +140,9 @@ The charm supports Ceph metric monitoring with Prometheus. Add relations to the > **Note**: Prometheus support is available starting with Ceph Luminous (xenial-queens UCA pocket). +Alternatively, integration with the [COS Lite][cos-lite] observability +stack is available via the metrics-endpoint relation. + ## Actions This section lists Juju [actions][juju-docs-actions] supported by the charm. @@ -224,3 +227,4 @@ For general charm questions refer to the OpenStack [Charm Guide][cg]. [cloud-archive-ceph]: https://wiki.ubuntu.com/OpenStack/CloudArchive#Ceph_and_the_UCA [upstream-ceph-buckets]: https://docs.ceph.com/docs/master/rados/operations/crush-map/#types-and-buckets [jq]: https://stedolan.github.io/jq/ +[cos-lite]: https://charmhub.io/cos-lite diff --git a/ceph-mon/lib/charms/observability_libs/v0/juju_topology.py b/ceph-mon/lib/charms/observability_libs/v0/juju_topology.py new file mode 100644 index 00000000..c985b1e7 --- /dev/null +++ b/ceph-mon/lib/charms/observability_libs/v0/juju_topology.py @@ -0,0 +1,306 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. +"""## Overview. + +This document explains how to use the `JujuTopology` class to +create and consume topology information from Juju in a consistent manner. + +The goal of the Juju topology is to uniquely identify a piece +of software running across any of your Juju-managed deployments. +This is achieved by combining the following four elements: + +- Model name +- Model UUID +- Application name +- Unit identifier + + +For a more in-depth description of the concept, as well as a +walk-through of it's use-case in observability, see +[this blog post](https://juju.is/blog/model-driven-observability-part-2-juju-topology-metrics) +on the Juju blog. + +## Library Usage + +This library may be used to create and consume `JujuTopology` objects. +The `JujuTopology` class provides three ways to create instances: + +### Using the `from_charm` method + +Enables instantiation by supplying the charm as an argument. When +creating topology objects for the current charm, this is the recommended +approach. + +```python +topology = JujuTopology.from_charm(self) +``` + +### Using the `from_dict` method + +Allows for instantion using a dictionary of relation data, like the +`scrape_metadata` from Prometheus or the labels of an alert rule. When +creating topology objects for remote charms, this is the recommended +approach. + +```python +scrape_metadata = json.loads(relation.data[relation.app].get("scrape_metadata", "{}")) +topology = JujuTopology.from_dict(scrape_metadata) +``` + +### Using the class constructor + +Enables instantiation using whatever values you want. While this +is useful in some very specific cases, this is almost certainly not +what you are looking for as setting these values manually may +result in observability metrics which do not uniquely identify a +charm in order to provide accurate usage reporting, alerting, +horizontal scaling, or other use cases. + +```python +topology = JujuTopology( + model="some-juju-model", + model_uuid="00000000-0000-0000-0000-000000000001", + application="fancy-juju-application", + unit="fancy-juju-application/0", + charm_name="fancy-juju-application-k8s", +) +``` + +""" + +import re +from collections import OrderedDict +from typing import Dict, List, Optional + +# The unique Charmhub library identifier, never change it +LIBID = "bced1658f20f49d28b88f61f83c2d232" + +LIBAPI = 0 +LIBPATCH = 2 + + +class InvalidUUIDError(Exception): + """Invalid UUID was provided.""" + + def __init__(self, uuid: str): + self.message = "'{}' is not a valid UUID.".format(uuid) + super().__init__(self.message) + + +class JujuTopology: + """JujuTopology is used for storing, generating and formatting juju topology information.""" + + def __init__( + self, + model: str, + model_uuid: str, + application: str, + unit: str = None, + charm_name: str = None, + ): + """Build a JujuTopology object. + + A `JujuTopology` object is used for storing and transforming + Juju topology information. This information is used to + annotate Prometheus scrape jobs and alert rules. Such + annotation when applied to scrape jobs helps in identifying + the source of the scrapped metrics. On the other hand when + applied to alert rules topology information ensures that + evaluation of alert expressions is restricted to the source + (charm) from which the alert rules were obtained. + + Args: + model: a string name of the Juju model + model_uuid: a globally unique string identifier for the Juju model + application: an application name as a string + unit: a unit name as a string + charm_name: name of charm as a string + """ + if not self.is_valid_uuid(model_uuid): + raise InvalidUUIDError(model_uuid) + + self._model = model + self._model_uuid = model_uuid + self._application = application + self._charm_name = charm_name + self._unit = unit + + def is_valid_uuid(self, uuid): + """Validate the supplied UUID against the Juju Model UUID pattern.""" + # TODO: + # Harness is harcoding an UUID that is v1 not v4: f2c1b2a6-e006-11eb-ba80-0242ac130004 + # See: https://github.com/canonical/operator/issues/779 + # + # >>> uuid.UUID("f2c1b2a6-e006-11eb-ba80-0242ac130004").version + # 1 + # + # we changed the validation of the 3ed UUID block: 4[a-f0-9]{3} -> [a-f0-9]{4} + # See: https://github.com/canonical/operator/blob/main/ops/testing.py#L1094 + # + # Juju in fact generates a UUID v4: https://github.com/juju/utils/blob/master/uuid.go#L62 + # but does not validate it is actually v4: + # See: + # - https://github.com/juju/utils/blob/master/uuid.go#L22 + # - https://github.com/juju/schema/blob/master/strings.go#L79 + # + # Once Harness fixes this, we should remove this comment and refactor the regex or + # the entire method using the uuid module to validate UUIDs + regex = re.compile( + "^[a-f0-9]{8}-?[a-f0-9]{4}-?[a-f0-9]{4}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}$" + ) + return bool(regex.match(uuid)) + + @classmethod + def from_charm(cls, charm): + """Creates a JujuTopology instance by using the model data available on a charm object. + + Args: + charm: a `CharmBase` object for which the `JujuTopology` will be constructed + Returns: + a `JujuTopology` object. + """ + return cls( + model=charm.model.name, + model_uuid=charm.model.uuid, + application=charm.model.app.name, + unit=charm.model.unit.name, + charm_name=charm.meta.name, + ) + + @classmethod + def from_dict(cls, data: dict): + """Factory method for creating `JujuTopology` children from a dictionary. + + Args: + data: a dictionary with five keys providing topology information. The keys are + - "model" + - "model_uuid" + - "application" + - "unit" + - "charm_name" + `unit` and `charm_name` may be empty, but will result in more limited + labels. However, this allows us to support charms without workloads. + + Returns: + a `JujuTopology` object. + """ + return cls( + model=data["model"], + model_uuid=data["model_uuid"], + application=data["application"], + unit=data.get("unit", ""), + charm_name=data.get("charm_name", ""), + ) + + def as_dict( + self, *, remapped_keys: Dict[str, str] = None, excluded_keys: List[str] = None + ) -> OrderedDict: + """Format the topology information into an ordered dict. + + Keeping the dictionary ordered is important to be able to + compare dicts without having to resort to deep comparisons. + + Args: + remapped_keys: A dictionary mapping old key names to new key names, + which will be substituted when invoked. + excluded_keys: A list of key names to exclude from the returned dict. + uuid_length: The length to crop the UUID to. + """ + ret = OrderedDict( + [ + ("model", self.model), + ("model_uuid", self.model_uuid), + ("application", self.application), + ("unit", self.unit), + ("charm_name", self.charm_name), + ] + ) + if excluded_keys: + ret = OrderedDict({k: v for k, v in ret.items() if k not in excluded_keys}) + + if remapped_keys: + ret = OrderedDict( + (remapped_keys.get(k), v) if remapped_keys.get(k) else (k, v) for k, v in ret.items() # type: ignore + ) + + return ret + + @property + def identifier(self) -> str: + """Format the topology information into a terse string. + + This crops the model UUID, making it unsuitable for comparisons against + anything but other identifiers. Mainly to be used as a display name or file + name where long strings might become an issue. + + >>> JujuTopology( \ + model = "a-model", \ + model_uuid = "00000000-0000-4000-8000-000000000000", \ + application = "some-app", \ + unit = "some-app/1" \ + ).identifier + 'a-model_00000000_some-app' + """ + parts = self.as_dict( + excluded_keys=["unit", "charm_name"], + ) + + parts["model_uuid"] = self.model_uuid_short + values = parts.values() + + return "_".join([str(val) for val in values]).replace("/", "_") + + @property + def label_matcher_dict(self) -> Dict[str, str]: + """Format the topology information into a dict with keys having 'juju_' as prefix. + + Relabelled topology never includes the unit as it would then only match + the leader unit (ie. the unit that produced the dict). + """ + items = self.as_dict( + remapped_keys={"charm_name": "charm"}, + excluded_keys=["unit"], + ).items() + + return {"juju_{}".format(key): value for key, value in items if value} + + @property + def label_matchers(self) -> str: + """Format the topology information into a promql/logql label matcher string. + + Topology label matchers should never include the unit as it + would then only match the leader unit (ie. the unit that + produced the matchers). + """ + items = self.label_matcher_dict.items() + return ", ".join(['{}="{}"'.format(key, value) for key, value in items if value]) + + @property + def model(self) -> str: + """Getter for the juju model value.""" + return self._model + + @property + def model_uuid(self) -> str: + """Getter for the juju model uuid value.""" + return self._model_uuid + + @property + def model_uuid_short(self) -> str: + """Getter for the juju model value, truncated to the first eight letters.""" + return self._model_uuid[:8] + + @property + def application(self) -> str: + """Getter for the juju application value.""" + return self._application + + @property + def charm_name(self) -> Optional[str]: + """Getter for the juju charm name value.""" + return self._charm_name + + @property + def unit(self) -> Optional[str]: + """Getter for the juju unit value.""" + return self._unit diff --git a/ceph-mon/lib/charms/prometheus_k8s/v0/prometheus_scrape.py b/ceph-mon/lib/charms/prometheus_k8s/v0/prometheus_scrape.py new file mode 100644 index 00000000..85e922a9 --- /dev/null +++ b/ceph-mon/lib/charms/prometheus_k8s/v0/prometheus_scrape.py @@ -0,0 +1,2282 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. +"""## Overview. + +This document explains how to integrate with the Prometheus charm +for the purpose of providing a metrics endpoint to Prometheus. It +also explains how alternative implementations of the Prometheus charms +may maintain the same interface and be backward compatible with all +currently integrated charms. Finally this document is the +authoritative reference on the structure of relation data that is +shared between Prometheus charms and any other charm that intends to +provide a scrape target for Prometheus. + +## Provider Library Usage + +This Prometheus charm interacts with its scrape targets using its +charm library. Charms seeking to expose metric endpoints for the +Prometheus charm, must do so using the `MetricsEndpointProvider` +object from this charm library. For the simplest use cases, using the +`MetricsEndpointProvider` object only requires instantiating it, +typically in the constructor of your charm (the one which exposes a +metrics endpoint). The `MetricsEndpointProvider` constructor requires +the name of the relation over which a scrape target (metrics endpoint) +is exposed to the Prometheus charm. This relation must use the +`prometheus_scrape` interface. By default address of the metrics +endpoint is set to the unit IP address, by each unit of the +`MetricsEndpointProvider` charm. These units set their address in +response to the `PebbleReady` event of each container in the unit, +since container restarts of Kubernetes charms can result in change of +IP addresses. The default name for the metrics endpoint relation is +`metrics-endpoint`. It is strongly recommended to use the same +relation name for consistency across charms and doing so obviates the +need for an additional constructor argument. The +`MetricsEndpointProvider` object may be instantiated as follows + + from charms.prometheus_k8s.v0.prometheus_scrape import MetricsEndpointProvider + + def __init__(self, *args): + super().__init__(*args) + ... + self.metrics_endpoint = MetricsEndpointProvider(self) + ... + +Note that the first argument (`self`) to `MetricsEndpointProvider` is +always a reference to the parent (scrape target) charm. + +An instantiated `MetricsEndpointProvider` object will ensure that each +unit of its parent charm, is a scrape target for the +`MetricsEndpointConsumer` (Prometheus) charm. By default +`MetricsEndpointProvider` assumes each unit of the consumer charm +exports its metrics at a path given by `/metrics` on port 80. These +defaults may be changed by providing the `MetricsEndpointProvider` +constructor an optional argument (`jobs`) that represents a +Prometheus scrape job specification using Python standard data +structures. This job specification is a subset of Prometheus' own +[scrape +configuration](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) +format but represented using Python data structures. More than one job +may be provided using the `jobs` argument. Hence `jobs` accepts a list +of dictionaries where each dictionary represents one `` +object as described in the Prometheus documentation. The currently +supported configuration subset is: `job_name`, `metrics_path`, +`static_configs` + +Suppose it is required to change the port on which scraped metrics are +exposed to 8000. This may be done by providing the following data +structure as the value of `jobs`. + +``` +[ + { + "static_configs": [ + { + "targets": ["*:8000"] + } + ] + } +] +``` + +The wildcard ("*") host specification implies that the scrape targets +will automatically be set to the host addresses advertised by each +unit of the consumer charm. + +It is also possible to change the metrics path and scrape multiple +ports, for example + +``` +[ + { + "metrics_path": "/my-metrics-path", + "static_configs": [ + { + "targets": ["*:8000", "*:8081"], + } + ] + } +] +``` + +More complex scrape configurations are possible. For example + +``` +[ + { + "static_configs": [ + { + "targets": ["10.1.32.215:7000", "*:8000"], + "labels": { + "some-key": "some-value" + } + } + ] + } +] +``` + +This example scrapes the target "10.1.32.215" at port 7000 in addition +to scraping each unit at port 8000. There is however one difference +between wildcard targets (specified using "*") and fully qualified +targets (such as "10.1.32.215"). The Prometheus charm automatically +associates labels with metrics generated by each target. These labels +localise the source of metrics within the Juju topology by specifying +its "model name", "model UUID", "application name" and "unit +name". However unit name is associated only with wildcard targets but +not with fully qualified targets. + +Multiple jobs with different metrics paths and labels are allowed, but +each job must be given a unique name: + +``` +[ + { + "job_name": "my-first-job", + "metrics_path": "one-path", + "static_configs": [ + { + "targets": ["*:7000"], + "labels": { + "some-key": "some-value" + } + } + ] + }, + { + "job_name": "my-second-job", + "metrics_path": "another-path", + "static_configs": [ + { + "targets": ["*:8000"], + "labels": { + "some-other-key": "some-other-value" + } + } + ] + } +] +``` + +**Important:** `job_name` should be a fixed string (e.g. hardcoded literal). +For instance, if you include variable elements, like your `unit.name`, it may break +the continuity of the metrics time series gathered by Prometheus when the leader unit +changes (e.g. on upgrade or rescale). + +Additionally, it is also technically possible, but **strongly discouraged**, to +configure the following scrape-related settings, which behave as described by the +[Prometheus documentation](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config): + +- `static_configs` +- `scrape_interval` +- `scrape_timeout` +- `proxy_url` +- `relabel_configs` +- `metrics_relabel_configs` +- `sample_limit` +- `label_limit` +- `label_name_length_limit` +- `label_value_length_limit` + +The settings above are supported by the `prometheus_scrape` library only for the sake of +specialized facilities like the [Prometheus Scrape Config](https://charmhub.io/prometheus-scrape-config-k8s) +charm. Virtually no charms should use these settings, and charmers definitely **should not** +expose them to the Juju administrator via configuration options. + +## Consumer Library Usage + +The `MetricsEndpointConsumer` object may be used by Prometheus +charms to manage relations with their scrape targets. For this +purposes a Prometheus charm needs to do two things + +1. Instantiate the `MetricsEndpointConsumer` object by providing it a +reference to the parent (Prometheus) charm and optionally the name of +the relation that the Prometheus charm uses to interact with scrape +targets. This relation must confirm to the `prometheus_scrape` +interface and it is strongly recommended that this relation be named +`metrics-endpoint` which is its default value. + +For example a Prometheus charm may instantiate the +`MetricsEndpointConsumer` in its constructor as follows + + from charms.prometheus_k8s.v0.prometheus_scrape import MetricsEndpointConsumer + + def __init__(self, *args): + super().__init__(*args) + ... + self.metrics_consumer = MetricsEndpointConsumer(self) + ... + +2. A Prometheus charm also needs to respond to the +`TargetsChangedEvent` event of the `MetricsEndpointConsumer` by adding itself as +an observer for these events, as in + + self.framework.observe( + self.metrics_consumer.on.targets_changed, + self._on_scrape_targets_changed, + ) + +In responding to the `TargetsChangedEvent` event the Prometheus +charm must update the Prometheus configuration so that any new scrape +targets are added and/or old ones removed from the list of scraped +endpoints. For this purpose the `MetricsEndpointConsumer` object +exposes a `jobs()` method that returns a list of scrape jobs. Each +element of this list is the Prometheus scrape configuration for that +job. In order to update the Prometheus configuration, the Prometheus +charm needs to replace the current list of jobs with the list provided +by `jobs()` as follows + + def _on_scrape_targets_changed(self, event): + ... + scrape_jobs = self.metrics_consumer.jobs() + for job in scrape_jobs: + prometheus_scrape_config.append(job) + ... + +## Alerting Rules + +This charm library also supports gathering alerting rules from all +related `MetricsEndpointProvider` charms and enabling corresponding alerts within the +Prometheus charm. Alert rules are automatically gathered by `MetricsEndpointProvider` +charms when using this library, from a directory conventionally named +`prometheus_alert_rules`. This directory must reside at the top level +in the `src` folder of the consumer charm. Each file in this directory +is assumed to be in one of two formats: +- the official prometheus alert rule format, conforming to the +[Prometheus docs](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) +- a single rule format, which is a simplified subset of the official format, +comprising a single alert rule per file, using the same YAML fields. + +The file name must have the `.rule` extension. + +An example of the contents of such a file in the custom single rule +format is shown below. + +``` +alert: HighRequestLatency +expr: job:request_latency_seconds:mean5m{my_key=my_value} > 0.5 +for: 10m +labels: + severity: Medium + type: HighLatency +annotations: + summary: High request latency for {{ $labels.instance }}. +``` + +The `MetricsEndpointProvider` will read all available alert rules and +also inject "filtering labels" into the alert expressions. The +filtering labels ensure that alert rules are localised to the metrics +provider charm's Juju topology (application, model and its UUID). Such +a topology filter is essential to ensure that alert rules submitted by +one provider charm generates alerts only for that same charm. When +alert rules are embedded in a charm, and the charm is deployed as a +Juju application, the alert rules from that application have their +expressions automatically updated to filter for metrics coming from +the units of that application alone. This remove risk of spurious +evaluation, e.g., when you have multiple deployments of the same charm +monitored by the same Prometheus. + +Not all alerts one may want to specify can be embedded in a +charm. Some alert rules will be specific to a user's use case. This is +the case, for example, of alert rules that are based on business +constraints, like expecting a certain amount of requests to a specific +API every five minutes. Such alert rules can be specified via the +[COS Config Charm](https://charmhub.io/cos-configuration-k8s), +which allows importing alert rules and other settings like dashboards +from a Git repository. + +Gathering alert rules and generating rule files within the Prometheus +charm is easily done using the `alerts()` method of +`MetricsEndpointConsumer`. Alerts generated by Prometheus will +automatically include Juju topology labels in the alerts. These labels +indicate the source of the alert. The following labels are +automatically included with each alert + +- `juju_model` +- `juju_model_uuid` +- `juju_application` + +## Relation Data + +The Prometheus charm uses both application and unit relation data to +obtain information regarding its scrape jobs, alert rules and scrape +targets. This relation data is in JSON format and it closely resembles +the YAML structure of Prometheus [scrape configuration] +(https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config). + +Units of Metrics provider charms advertise their names and addresses +over unit relation data using the `prometheus_scrape_unit_name` and +`prometheus_scrape_unit_address` keys. While the `scrape_metadata`, +`scrape_jobs` and `alert_rules` keys in application relation data +of Metrics provider charms hold eponymous information. + +""" # noqa: W505 + +import copy +import hashlib +import ipaddress +import json +import logging +import os +import platform +import re +import socket +import subprocess +import tempfile +import uuid +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Union + +import yaml +from charms.observability_libs.v0.juju_topology import JujuTopology +from ops.charm import CharmBase, RelationRole +from ops.framework import BoundEvent, EventBase, EventSource, Object, ObjectEvents + +# The unique Charmhub library identifier, never change it +LIBID = "bc84295fef5f4049878f07b131968ee2" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 21 + +logger = logging.getLogger(__name__) + + +ALLOWED_KEYS = { + "job_name", + "metrics_path", + "static_configs", + "scrape_interval", + "scrape_timeout", + "proxy_url", + "relabel_configs", + "metrics_relabel_configs", + "sample_limit", + "label_limit", + "label_name_length_limit", + "label_value_lenght_limit", +} +DEFAULT_JOB = { + "metrics_path": "/metrics", + "static_configs": [{"targets": ["*:80"]}], +} + + +DEFAULT_RELATION_NAME = "metrics-endpoint" +RELATION_INTERFACE_NAME = "prometheus_scrape" + +DEFAULT_ALERT_RULES_RELATIVE_PATH = "./src/prometheus_alert_rules" + + +class RelationNotFoundError(Exception): + """Raised if there is no relation with the given name is found.""" + + def __init__(self, relation_name: str): + self.relation_name = relation_name + self.message = "No relation named '{}' found".format(relation_name) + + super().__init__(self.message) + + +class RelationInterfaceMismatchError(Exception): + """Raised if the relation with the given name has a different interface.""" + + def __init__( + self, + relation_name: str, + expected_relation_interface: str, + actual_relation_interface: str, + ): + self.relation_name = relation_name + self.expected_relation_interface = expected_relation_interface + self.actual_relation_interface = actual_relation_interface + self.message = ( + "The '{}' relation has '{}' as interface rather than the expected '{}'".format( + relation_name, actual_relation_interface, expected_relation_interface + ) + ) + + super().__init__(self.message) + + +class RelationRoleMismatchError(Exception): + """Raised if the relation with the given name has a different role.""" + + def __init__( + self, + relation_name: str, + expected_relation_role: RelationRole, + actual_relation_role: RelationRole, + ): + self.relation_name = relation_name + self.expected_relation_interface = expected_relation_role + self.actual_relation_role = actual_relation_role + self.message = "The '{}' relation has role '{}' rather than the expected '{}'".format( + relation_name, repr(actual_relation_role), repr(expected_relation_role) + ) + + super().__init__(self.message) + + +class InvalidAlertRuleEvent(EventBase): + """Event emitted when alert rule files are not parsable. + + Enables us to set a clear status on the provider. + """ + + def __init__(self, handle, errors: str = "", valid: bool = False): + super().__init__(handle) + self.errors = errors + self.valid = valid + + def snapshot(self) -> Dict: + """Save alert rule information.""" + return { + "valid": self.valid, + "errors": self.errors, + } + + def restore(self, snapshot): + """Restore alert rule information.""" + self.valid = snapshot["valid"] + self.errors = snapshot["errors"] + + +class MetricsEndpointProviderEvents(ObjectEvents): + """Events raised by :class:`InvalidAlertRuleEvent`s.""" + + alert_rule_status_changed = EventSource(InvalidAlertRuleEvent) + + +def _validate_relation_by_interface_and_direction( + charm: CharmBase, + relation_name: str, + expected_relation_interface: str, + expected_relation_role: RelationRole, +): + """Verifies that a relation has the necessary characteristics. + + Verifies that the `relation_name` provided: (1) exists in metadata.yaml, + (2) declares as interface the interface name passed as `relation_interface` + and (3) has the right "direction", i.e., it is a relation that `charm` + provides or requires. + + Args: + charm: a `CharmBase` object to scan for the matching relation. + relation_name: the name of the relation to be verified. + expected_relation_interface: the interface name to be matched by the + relation named `relation_name`. + expected_relation_role: whether the `relation_name` must be either + provided or required by `charm`. + + Raises: + RelationNotFoundError: If there is no relation in the charm's metadata.yaml + with the same name as provided via `relation_name` argument. + RelationInterfaceMismatchError: The relation with the same name as provided + via `relation_name` argument does not have the same relation interface + as specified via the `expected_relation_interface` argument. + RelationRoleMismatchError: If the relation with the same name as provided + via `relation_name` argument does not have the same role as specified + via the `expected_relation_role` argument. + """ + if relation_name not in charm.meta.relations: + raise RelationNotFoundError(relation_name) + + relation = charm.meta.relations[relation_name] + + actual_relation_interface = relation.interface_name + if actual_relation_interface != expected_relation_interface: + raise RelationInterfaceMismatchError( + relation_name, expected_relation_interface, actual_relation_interface + ) + + if expected_relation_role == RelationRole.provides: + if relation_name not in charm.meta.provides: + raise RelationRoleMismatchError( + relation_name, RelationRole.provides, RelationRole.requires + ) + elif expected_relation_role == RelationRole.requires: + if relation_name not in charm.meta.requires: + raise RelationRoleMismatchError( + relation_name, RelationRole.requires, RelationRole.provides + ) + else: + raise Exception("Unexpected RelationDirection: {}".format(expected_relation_role)) + + +def _sanitize_scrape_configuration(job) -> dict: + """Restrict permissible scrape configuration options. + + If job is empty then a default job is returned. The + default job is + + ``` + { + "metrics_path": "/metrics", + "static_configs": [{"targets": ["*:80"]}], + } + ``` + + Args: + job: a dict containing a single Prometheus job + specification. + + Returns: + a dictionary containing a sanitized job specification. + """ + sanitized_job = DEFAULT_JOB.copy() + sanitized_job.update({key: value for key, value in job.items() if key in ALLOWED_KEYS}) + return sanitized_job + + +class InvalidAlertRulePathError(Exception): + """Raised if the alert rules folder cannot be found or is otherwise invalid.""" + + def __init__( + self, + alert_rules_absolute_path: Path, + message: str, + ): + self.alert_rules_absolute_path = alert_rules_absolute_path + self.message = message + + super().__init__(self.message) + + +def _is_official_alert_rule_format(rules_dict: dict) -> bool: + """Are alert rules in the upstream format as supported by Prometheus. + + Alert rules in dictionary format are in "official" form if they + contain a "groups" key, since this implies they contain a list of + alert rule groups. + + Args: + rules_dict: a set of alert rules in Python dictionary format + + Returns: + True if alert rules are in official Prometheus file format. + """ + return "groups" in rules_dict + + +def _is_single_alert_rule_format(rules_dict: dict) -> bool: + """Are alert rules in single rule format. + + The Prometheus charm library supports reading of alert rules in a + custom format that consists of a single alert rule per file. This + does not conform to the official Prometheus alert rule file format + which requires that each alert rules file consists of a list of + alert rule groups and each group consists of a list of alert + rules. + + Alert rules in dictionary form are considered to be in single rule + format if in the least it contains two keys corresponding to the + alert rule name and alert expression. + + Returns: + True if alert rule is in single rule file format. + """ + # one alert rule per file + return set(rules_dict) >= {"alert", "expr"} + + +class AlertRules: + """Utility class for amalgamating prometheus alert rule files and injecting juju topology. + + An `AlertRules` object supports aggregating alert rules from files and directories in both + official and single rule file formats using the `add_path()` method. All the alert rules + read are annotated with Juju topology labels and amalgamated into a single data structure + in the form of a Python dictionary using the `as_dict()` method. Such a dictionary can be + easily dumped into JSON format and exchanged over relation data. The dictionary can also + be dumped into YAML format and written directly into an alert rules file that is read by + Prometheus. Note that multiple `AlertRules` objects must not be written into the same file, + since Prometheus allows only a single list of alert rule groups per alert rules file. + + The official Prometheus format is a YAML file conforming to the Prometheus documentation + (https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/). + The custom single rule format is a subsection of the official YAML, having a single alert + rule, effectively "one alert per file". + """ + + # This class uses the following terminology for the various parts of a rule file: + # - alert rules file: the entire groups[] yaml, including the "groups:" key. + # - alert groups (plural): the list of groups[] (a list, i.e. no "groups:" key) - it is a list + # of dictionaries that have the "name" and "rules" keys. + # - alert group (singular): a single dictionary that has the "name" and "rules" keys. + # - alert rules (plural): all the alerts in a given alert group - a list of dictionaries with + # the "alert" and "expr" keys. + # - alert rule (singular): a single dictionary that has the "alert" and "expr" keys. + + def __init__(self, topology: Optional[JujuTopology] = None): + """Build and alert rule object. + + Args: + topology: an optional `JujuTopology` instance that is used to annotate all alert rules. + """ + self.topology = topology + self.tool = CosTool(None) + self.alert_groups = [] # type: List[dict] + + def _from_file(self, root_path: Path, file_path: Path) -> List[dict]: + """Read a rules file from path, injecting juju topology. + + Args: + root_path: full path to the root rules folder (used only for generating group name) + file_path: full path to a *.rule file. + + Returns: + A list of dictionaries representing the rules file, if file is valid (the structure is + formed by `yaml.safe_load` of the file); an empty list otherwise. + """ + with file_path.open() as rf: + # Load a list of rules from file then add labels and filters + try: + rule_file = yaml.safe_load(rf) + + except Exception as e: + logger.error("Failed to read alert rules from %s: %s", file_path.name, e) + return [] + + if _is_official_alert_rule_format(rule_file): + alert_groups = rule_file["groups"] + elif _is_single_alert_rule_format(rule_file): + # convert to list of alert groups + # group name is made up from the file name + alert_groups = [{"name": file_path.stem, "rules": [rule_file]}] + else: + # invalid/unsupported + logger.error("Invalid rules file: %s", file_path.name) + return [] + + # update rules with additional metadata + for alert_group in alert_groups: + # update group name with topology and sub-path + alert_group["name"] = self._group_name( + str(root_path), + str(file_path), + alert_group["name"], + ) + + # add "juju_" topology labels + for alert_rule in alert_group["rules"]: + if "labels" not in alert_rule: + alert_rule["labels"] = {} + + if self.topology: + alert_rule["labels"].update(self.topology.label_matcher_dict) + # insert juju topology filters into a prometheus alert rule + alert_rule["expr"] = self.tool.inject_label_matchers( + re.sub(r"%%juju_topology%%,?", "", alert_rule["expr"]), + self.topology.label_matcher_dict, + ) + + return alert_groups + + def _group_name(self, root_path: str, file_path: str, group_name: str) -> str: + """Generate group name from path and topology. + + The group name is made up of the relative path between the root dir_path, the file path, + and topology identifier. + + Args: + root_path: path to the root rules dir. + file_path: path to rule file. + group_name: original group name to keep as part of the new augmented group name + + Returns: + New group name, augmented by juju topology and relative path. + """ + rel_path = os.path.relpath(os.path.dirname(file_path), root_path) + rel_path = "" if rel_path == "." else rel_path.replace(os.path.sep, "_") + + # Generate group name: + # - name, from juju topology + # - suffix, from the relative path of the rule file; + group_name_parts = [self.topology.identifier] if self.topology else [] + group_name_parts.extend([rel_path, group_name, "alerts"]) + # filter to remove empty strings + return "_".join(filter(None, group_name_parts)) + + @classmethod + def _multi_suffix_glob( + cls, dir_path: Path, suffixes: List[str], recursive: bool = True + ) -> list: + """Helper function for getting all files in a directory that have a matching suffix. + + Args: + dir_path: path to the directory to glob from. + suffixes: list of suffixes to include in the glob (items should begin with a period). + recursive: a flag indicating whether a glob is recursive (nested) or not. + + Returns: + List of files in `dir_path` that have one of the suffixes specified in `suffixes`. + """ + all_files_in_dir = dir_path.glob("**/*" if recursive else "*") + return list(filter(lambda f: f.is_file() and f.suffix in suffixes, all_files_in_dir)) + + def _from_dir(self, dir_path: Path, recursive: bool) -> List[dict]: + """Read all rule files in a directory. + + All rules from files for the same directory are loaded into a single + group. The generated name of this group includes juju topology. + By default, only the top directory is scanned; for nested scanning, pass `recursive=True`. + + Args: + dir_path: directory containing *.rule files (alert rules without groups). + recursive: flag indicating whether to scan for rule files recursively. + + Returns: + a list of dictionaries representing prometheus alert rule groups, each dictionary + representing an alert group (structure determined by `yaml.safe_load`). + """ + alert_groups = [] # type: List[dict] + + # Gather all alerts into a list of groups + for file_path in self._multi_suffix_glob(dir_path, [".rule", ".rules"], recursive): + alert_groups_from_file = self._from_file(dir_path, file_path) + if alert_groups_from_file: + logger.debug("Reading alert rule from %s", file_path) + alert_groups.extend(alert_groups_from_file) + + return alert_groups + + def add_path(self, path: str, *, recursive: bool = False) -> None: + """Add rules from a dir path. + + All rules from files are aggregated into a data structure representing a single rule file. + All group names are augmented with juju topology. + + Args: + path: either a rules file or a dir of rules files. + recursive: whether to read files recursively or not (no impact if `path` is a file). + + Returns: + True if path was added else False. + """ + path = Path(path) # type: Path + if path.is_dir(): + self.alert_groups.extend(self._from_dir(path, recursive)) + elif path.is_file(): + self.alert_groups.extend(self._from_file(path.parent, path)) + else: + logger.debug("Alert rules path does not exist: %s", path) + + def as_dict(self) -> dict: + """Return standard alert rules file in dict representation. + + Returns: + a dictionary containing a single list of alert rule groups. + The list of alert rule groups is provided as value of the + "groups" dictionary key. + """ + return {"groups": self.alert_groups} if self.alert_groups else {} + + +class TargetsChangedEvent(EventBase): + """Event emitted when Prometheus scrape targets change.""" + + def __init__(self, handle, relation_id): + super().__init__(handle) + self.relation_id = relation_id + + def snapshot(self): + """Save scrape target relation information.""" + return {"relation_id": self.relation_id} + + def restore(self, snapshot): + """Restore scrape target relation information.""" + self.relation_id = snapshot["relation_id"] + + +class MonitoringEvents(ObjectEvents): + """Event descriptor for events raised by `MetricsEndpointConsumer`.""" + + targets_changed = EventSource(TargetsChangedEvent) + + +class MetricsEndpointConsumer(Object): + """A Prometheus based Monitoring service.""" + + on = MonitoringEvents() + + def __init__(self, charm: CharmBase, relation_name: str = DEFAULT_RELATION_NAME): + """A Prometheus based Monitoring service. + + Args: + charm: a `CharmBase` instance that manages this + instance of the Prometheus service. + relation_name: an optional string name of the relation between `charm` + and the Prometheus charmed service. The default is "metrics-endpoint". + It is strongly advised not to change the default, so that people + deploying your charm will have a consistent experience with all + other charms that consume metrics endpoints. + + Raises: + RelationNotFoundError: If there is no relation in the charm's metadata.yaml + with the same name as provided via `relation_name` argument. + RelationInterfaceMismatchError: The relation with the same name as provided + via `relation_name` argument does not have the `prometheus_scrape` relation + interface. + RelationRoleMismatchError: If the relation with the same name as provided + via `relation_name` argument does not have the `RelationRole.requires` + role. + """ + _validate_relation_by_interface_and_direction( + charm, relation_name, RELATION_INTERFACE_NAME, RelationRole.requires + ) + + super().__init__(charm, relation_name) + self._charm = charm + self._relation_name = relation_name + self._tool = CosTool(self._charm) + events = self._charm.on[relation_name] + self.framework.observe(events.relation_changed, self._on_metrics_provider_relation_changed) + self.framework.observe( + events.relation_departed, self._on_metrics_provider_relation_departed + ) + + def _on_metrics_provider_relation_changed(self, event): + """Handle changes with related metrics providers. + + Anytime there are changes in relations between Prometheus + and metrics provider charms the Prometheus charm is informed, + through a `TargetsChangedEvent` event. The Prometheus charm can + then choose to update its scrape configuration. + + Args: + event: a `CharmEvent` in response to which the Prometheus + charm must update its scrape configuration. + """ + rel_id = event.relation.id + + self.on.targets_changed.emit(relation_id=rel_id) + + def _on_metrics_provider_relation_departed(self, event): + """Update job config when a metrics provider departs. + + When a metrics provider departs the Prometheus charm is informed + through a `TargetsChangedEvent` event so that it can update its + scrape configuration to ensure that the departed metrics provider + is removed from the list of scrape jobs and + + Args: + event: a `CharmEvent` that indicates a metrics provider + unit has departed. + """ + rel_id = event.relation.id + self.on.targets_changed.emit(relation_id=rel_id) + + def jobs(self) -> list: + """Fetch the list of scrape jobs. + + Returns: + A list consisting of all the static scrape configurations + for each related `MetricsEndpointProvider` that has specified + its scrape targets. + """ + scrape_jobs = [] + + for relation in self._charm.model.relations[self._relation_name]: + static_scrape_jobs = self._static_scrape_config(relation) + if static_scrape_jobs: + scrape_jobs.extend(static_scrape_jobs) + + scrape_jobs = _dedupe_job_names(scrape_jobs) + + return scrape_jobs + + def alerts(self) -> dict: + """Fetch alerts for all relations. + + A Prometheus alert rules file consists of a list of "groups". Each + group consists of a list of alerts (`rules`) that are sequentially + executed. This method returns all the alert rules provided by each + related metrics provider charm. These rules may be used to generate a + separate alert rules file for each relation since the returned list + of alert groups are indexed by that relations Juju topology identifier. + The Juju topology identifier string includes substrings that identify + alert rule related metadata such as the Juju model, model UUID and the + application name from where the alert rule originates. Since this + topology identifier is globally unique, it may be used for instance as + the name for the file into which the list of alert rule groups are + written. For each relation, the structure of data returned is a dictionary + representation of a standard prometheus rules file: + + {"groups": [{"name": ...}, ...]} + + per official prometheus documentation + https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + + The value of the `groups` key is such that it may be used to generate + a Prometheus alert rules file directly using `yaml.dump` but the + `groups` key itself must be included as this is required by Prometheus. + + For example the list of alert rule groups returned by this method may + be written into files consumed by Prometheus as follows + + ``` + for topology_identifier, alert_rule_groups in self.metrics_consumer.alerts().items(): + filename = "juju_" + topology_identifier + ".rules" + path = os.path.join(PROMETHEUS_RULES_DIR, filename) + rules = yaml.dump(alert_rule_groups) + container.push(path, rules, make_dirs=True) + ``` + + Returns: + A dictionary mapping the Juju topology identifier of the source charm to + its list of alert rule groups. + """ + alerts = {} # type: Dict[str, dict] # mapping b/w juju identifiers and alert rule files + for relation in self._charm.model.relations[self._relation_name]: + if not relation.units or not relation.app: + continue + + alert_rules = json.loads(relation.data[relation.app].get("alert_rules", "{}")) + if not alert_rules: + continue + + identifier = None + try: + scrape_metadata = json.loads(relation.data[relation.app]["scrape_metadata"]) + identifier = JujuTopology.from_dict(scrape_metadata).identifier + alerts[identifier] = self._tool.apply_label_matchers(alert_rules) + + except KeyError as e: + logger.debug( + "Relation %s has no 'scrape_metadata': %s", + relation.id, + e, + ) + identifier = self._get_identifier_by_alert_rules(alert_rules) + + if not identifier: + logger.error( + "Alert rules were found but no usable group or identifier was present" + ) + continue + + alerts[identifier] = alert_rules + + _, errmsg = self._tool.validate_alert_rules(alert_rules) + if errmsg: + if alerts[identifier]: + del alerts[identifier] + relation.data[self._charm.app]["event"] = json.dumps({"errors": errmsg}) + continue + + return alerts + + def _get_identifier_by_alert_rules(self, rules: dict) -> Union[str, None]: + """Determine an appropriate dict key for alert rules. + + The key is used as the filename when writing alerts to disk, so the structure + and uniqueness is important. + + Args: + rules: a dict of alert rules + """ + if "groups" not in rules: + logger.debug("No alert groups were found in relation data") + return None + + # Construct an ID based on what's in the alert rules if they have labels + for group in rules["groups"]: + try: + labels = group["rules"][0]["labels"] + identifier = "{}_{}_{}".format( + labels["juju_model"], + labels["juju_model_uuid"], + labels["juju_application"], + ) + return identifier + except KeyError: + logger.debug("Alert rules were found but no usable labels were present") + continue + + logger.warning( + "No labeled alert rules were found, and no 'scrape_metadata' " + "was available. Using the alert group name as filename." + ) + try: + for group in rules["groups"]: + return group["name"] + except KeyError: + logger.debug("No group name was found to use as identifier") + + return None + + def _static_scrape_config(self, relation) -> list: + """Generate the static scrape configuration for a single relation. + + If the relation data includes `scrape_metadata` then the value + of this key is used to annotate the scrape jobs with Juju + Topology labels before returning them. + + Args: + relation: an `ops.model.Relation` object whose static + scrape configuration is required. + + Returns: + A list (possibly empty) of scrape jobs. Each job is a + valid Prometheus scrape configuration for that job, + represented as a Python dictionary. + """ + if not relation.units: + return [] + + scrape_jobs = json.loads(relation.data[relation.app].get("scrape_jobs", "[]")) + + if not scrape_jobs: + return [] + + scrape_metadata = json.loads(relation.data[relation.app].get("scrape_metadata", "{}")) + + if not scrape_metadata: + return scrape_jobs + + job_name_prefix = "juju_{}_prometheus_scrape".format( + JujuTopology.from_dict(scrape_metadata).identifier + ) + hosts = self._relation_hosts(relation) + + labeled_job_configs = [] + for job in scrape_jobs: + config = self._labeled_static_job_config( + _sanitize_scrape_configuration(job), + job_name_prefix, + hosts, + scrape_metadata, + ) + labeled_job_configs.append(config) + + return labeled_job_configs + + def _relation_hosts(self, relation) -> dict: + """Fetch unit names and address of all metrics provider units for a single relation. + + Args: + relation: An `ops.model.Relation` object for which the unit name to + address mapping is required. + + Returns: + A dictionary that maps unit names to unit addresses for + the specified relation. + """ + hosts = {} + for unit in relation.units: + # TODO deprecate and remove unit.name + unit_name = relation.data[unit].get("prometheus_scrape_unit_name") or unit.name + # TODO deprecate and remove "prometheus_scrape_host" + unit_address = relation.data[unit].get( + "prometheus_scrape_unit_address" + ) or relation.data[unit].get("prometheus_scrape_host") + if unit_name and unit_address: + hosts.update({unit_name: unit_address}) + + return hosts + + def _labeled_static_job_config(self, job, job_name_prefix, hosts, scrape_metadata) -> dict: + """Construct labeled job configuration for a single job. + + Args: + + job: a dictionary representing the job configuration as obtained from + `MetricsEndpointProvider` over relation data. + job_name_prefix: a string that may either be used as the + job name if the job has no associated name or used as a prefix for + the job if it does have a job name. + hosts: a dictionary mapping host names to host address for + all units of the relation for which this job configuration + must be constructed. + scrape_metadata: scrape configuration metadata obtained + from `MetricsEndpointProvider` from the same relation for + which this job configuration is being constructed. + + Returns: + A dictionary representing a Prometheus job configuration + for a single job. + """ + name = job.get("job_name") + job_name = "{}_{}".format(job_name_prefix, name) if name else job_name_prefix + + labeled_job = job.copy() + labeled_job["job_name"] = job_name + + static_configs = job.get("static_configs") + labeled_job["static_configs"] = [] + + # relabel instance labels so that instance identifiers are globally unique + # stable over unit recreation + instance_relabel_config = { + "source_labels": ["juju_model", "juju_model_uuid", "juju_application"], + "separator": "_", + "target_label": "instance", + "regex": "(.*)", + } + + # label all static configs in the Prometheus job + # labeling inserts Juju topology information and + # sets a relable config for instance labels + for static_config in static_configs: + labels = static_config.get("labels", {}) if static_configs else {} + all_targets = static_config.get("targets", []) + + # split all targets into those which will have unit labels + # and those which will not + ports = [] + unitless_targets = [] + for target in all_targets: + host, port = self._target_parts(target) + if host.strip() == "*": + ports.append(port.strip()) + else: + unitless_targets.append(target) + + # label scrape targets that do not have unit labels + if unitless_targets: + unitless_config = self._labeled_unitless_config( + unitless_targets, labels, scrape_metadata + ) + labeled_job["static_configs"].append(unitless_config) + + # label scrape targets that do have unit labels + for host_name, host_address in hosts.items(): + static_config = self._labeled_unit_config( + host_name, host_address, ports, labels, scrape_metadata + ) + labeled_job["static_configs"].append(static_config) + if "juju_unit" not in instance_relabel_config["source_labels"]: + instance_relabel_config["source_labels"].append("juju_unit") # type: ignore + + # ensure topology relabeling of instance label is last in order of relabelings + relabel_configs = job.get("relabel_configs", []) + relabel_configs.append(instance_relabel_config) + labeled_job["relabel_configs"] = relabel_configs + + return labeled_job + + def _target_parts(self, target) -> list: + """Extract host and port from a wildcard target. + + Args: + target: a string specifying a scrape target. A + scrape target is expected to have the format + "host:port". The host part may be a wildcard + "*" and the port part can be missing (along + with ":") in which case port is set to 80. + + Returns: + a list with target host and port as in [host, port] + """ + if ":" in target: + parts = target.split(":") + else: + parts = [target, "80"] + + return parts + + def _set_juju_labels(self, labels, scrape_metadata) -> dict: + """Create a copy of metric labels with Juju topology information. + + Args: + labels: a dictionary containing Prometheus metric labels. + scrape_metadata: scrape related metadata provided by + `MetricsEndpointProvider`. + + Returns: + a copy of the `labels` dictionary augmented with Juju + topology information with the exception of unit name. + """ + juju_labels = labels.copy() # deep copy not needed + juju_labels.update(JujuTopology.from_dict(scrape_metadata).label_matcher_dict) + + return juju_labels + + def _labeled_unitless_config(self, targets, labels, scrape_metadata) -> dict: + """Static scrape configuration for fully qualified host addresses. + + Fully qualified hosts are those scrape targets for which the + address are specified by the `MetricsEndpointProvider` as part + of the scrape job specification set in application relation data. + The address specified need not belong to any unit of the + `MetricsEndpointProvider` charm. As a result there is no reliable + way to determine the name (Juju topology unit name) for such a + target. + + Args: + targets: a list of addresses of fully qualified hosts. + labels: labels specified by `MetricsEndpointProvider` clients + which are associated with `targets`. + scrape_metadata: scrape related metadata provided by `MetricsEndpointProvider`. + + Returns: + A dictionary containing the static scrape configuration + for a list of fully qualified hosts. + """ + juju_labels = self._set_juju_labels(labels, scrape_metadata) + unitless_config = {"targets": targets, "labels": juju_labels} + return unitless_config + + def _labeled_unit_config( + self, unit_name, host_address, ports, labels, scrape_metadata + ) -> dict: + """Static scrape configuration for a wildcard host. + + Wildcard hosts are those scrape targets whose name (Juju unit + name) and address (unit IP address) is set into unit relation + data by the `MetricsEndpointProvider` charm, which sets this + data for ALL its units. + + Args: + unit_name: a string representing the unit name of the wildcard host. + host_address: a string representing the address of the wildcard host. + ports: list of ports on which this wildcard host exposes its metrics. + labels: a dictionary of labels provided by + `MetricsEndpointProvider` intended to be associated with + this wildcard host. + scrape_metadata: scrape related metadata provided by `MetricsEndpointProvider`. + + Returns: + A dictionary containing the static scrape configuration + for a single wildcard host. + """ + juju_labels = self._set_juju_labels(labels, scrape_metadata) + + juju_labels["juju_unit"] = unit_name + + static_config = {"labels": juju_labels} + + if ports: + targets = [] + for port in ports: + targets.append("{}:{}".format(host_address, port)) + static_config["targets"] = targets # type: ignore + else: + static_config["targets"] = [host_address] # type: ignore + + return static_config + + +def _dedupe_job_names(jobs: List[dict]): + """Deduplicate a list of dicts by appending a hash to the value of the 'job_name' key. + + Additionally fully dedeuplicate any identical jobs. + + Args: + jobs: A list of prometheus scrape jobs + """ + jobs_copy = copy.deepcopy(jobs) + + # Convert to a dict with job names as keys + # I think this line is O(n^2) but it should be okay given the list sizes + jobs_dict = { + job["job_name"]: list(filter(lambda x: x["job_name"] == job["job_name"], jobs_copy)) + for job in jobs_copy + } + + # If multiple jobs have the same name, convert the name to "name_" + for key in jobs_dict: + if len(jobs_dict[key]) > 1: + for job in jobs_dict[key]: + job_json = json.dumps(job) + hashed = hashlib.sha256(job_json.encode()).hexdigest() + job["job_name"] = "{}_{}".format(job["job_name"], hashed) + new_jobs = [] + for key in jobs_dict: + new_jobs.extend([i for i in jobs_dict[key]]) + + # Deduplicate jobs which are equal + # Again this in O(n^2) but it should be okay + deduped_jobs = [] + seen = [] + for job in new_jobs: + job_json = json.dumps(job) + hashed = hashlib.sha256(job_json.encode()).hexdigest() + if hashed in seen: + continue + seen.append(hashed) + deduped_jobs.append(job) + + return deduped_jobs + + +def _resolve_dir_against_charm_path(charm: CharmBase, *path_elements: str) -> str: + """Resolve the provided path items against the directory of the main file. + + Look up the directory of the `main.py` file being executed. This is normally + going to be the charm.py file of the charm including this library. Then, resolve + the provided path elements and, if the result path exists and is a directory, + return its absolute path; otherwise, raise en exception. + + Raises: + InvalidAlertRulePathError, if the path does not exist or is not a directory. + """ + charm_dir = Path(str(charm.charm_dir)) + if not charm_dir.exists() or not charm_dir.is_dir(): + # Operator Framework does not currently expose a robust + # way to determine the top level charm source directory + # that is consistent across deployed charms and unit tests + # Hence for unit tests the current working directory is used + # TODO: updated this logic when the following ticket is resolved + # https://github.com/canonical/operator/issues/643 + charm_dir = Path(os.getcwd()) + + alerts_dir_path = charm_dir.absolute().joinpath(*path_elements) + + if not alerts_dir_path.exists(): + raise InvalidAlertRulePathError(alerts_dir_path, "directory does not exist") + if not alerts_dir_path.is_dir(): + raise InvalidAlertRulePathError(alerts_dir_path, "is not a directory") + + return str(alerts_dir_path) + + +class MetricsEndpointProvider(Object): + """A metrics endpoint for Prometheus.""" + + on = MetricsEndpointProviderEvents() + + def __init__( + self, + charm, + relation_name: str = DEFAULT_RELATION_NAME, + jobs=None, + alert_rules_path: str = DEFAULT_ALERT_RULES_RELATIVE_PATH, + refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, + ): + """Construct a metrics provider for a Prometheus charm. + + If your charm exposes a Prometheus metrics endpoint, the + `MetricsEndpointProvider` object enables your charm to easily + communicate how to reach that metrics endpoint. + + By default, a charm instantiating this object has the metrics + endpoints of each of its units scraped by the related Prometheus + charms. The scraped metrics are automatically tagged by the + Prometheus charms with Juju topology data via the + `juju_model_name`, `juju_model_uuid`, `juju_application_name` + and `juju_unit` labels. To support such tagging `MetricsEndpointProvider` + automatically forwards scrape metadata to a `MetricsEndpointConsumer` + (Prometheus charm). + + Scrape targets provided by `MetricsEndpointProvider` can be + customized when instantiating this object. For example in the + case of a charm exposing the metrics endpoint for each of its + units on port 8080 and the `/metrics` path, the + `MetricsEndpointProvider` can be instantiated as follows: + + self.metrics_endpoint_provider = MetricsEndpointProvider( + self, + jobs=[{ + "static_configs": [{"targets": ["*:8080"]}], + }]) + + The notation `*:` means "scrape each unit of this charm on port + ``. + + In case the metrics endpoints are not on the standard `/metrics` path, + a custom path can be specified as follows: + + self.metrics_endpoint_provider = MetricsEndpointProvider( + self, + jobs=[{ + "metrics_path": "/my/strange/metrics/path", + "static_configs": [{"targets": ["*:8080"]}], + }]) + + Note how the `jobs` argument is a list: this allows you to expose multiple + combinations of paths "metrics_path" and "static_configs" in case your charm + exposes multiple endpoints, which could happen, for example, when you have + multiple workload containers, with applications in each needing to be scraped. + The structure of the objects in the `jobs` list is one-to-one with the + `scrape_config` configuration item of Prometheus' own configuration (see + https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config + ), but with only a subset of the fields allowed. The permitted fields are + listed in `ALLOWED_KEYS` object in this charm library module. + + It is also possible to specify alert rules. By default, this library will look + into the `/prometheus_alert_rules`, which in a standard charm + layouts resolves to `src/prometheus_alert_rules`. Each alert rule goes into a + separate `*.rule` file. If the syntax of a rule is invalid, + the `MetricsEndpointProvider` logs an error and does not load the particular + rule. + + To avoid false positives and negatives in the evaluation of alert rules, + all ingested alert rule expressions are automatically qualified using Juju + Topology filters. This ensures that alert rules provided by your charm, trigger + alerts based only on data scrapped from your charm. For example an alert rule + such as the following + + alert: UnitUnavailable + expr: up < 1 + for: 0m + + will be automatically transformed into something along the lines of the following + + alert: UnitUnavailable + expr: up{juju_model=, juju_model_uuid=, juju_application=} < 1 + for: 0m + + An attempt will be made to validate alert rules prior to loading them into Prometheus. + If they are invalid, an event will be emitted from this object which charms can respond + to in order to set a meaningful status for administrators. + + This can be observed via `consumer.on.alert_rule_status_changed` which contains: + - The error(s) encountered when validating as `errors` + - A `valid` attribute, which can be used to reset the state of charms if alert rules + are updated via another mechanism (e.g. `cos-config`) and refreshed. + + Args: + charm: a `CharmBase` object that manages this + `MetricsEndpointProvider` object. Typically this is + `self` in the instantiating class. + relation_name: an optional string name of the relation between `charm` + and the Prometheus charmed service. The default is "metrics-endpoint". + It is strongly advised not to change the default, so that people + deploying your charm will have a consistent experience with all + other charms that provide metrics endpoints. + jobs: an optional list of dictionaries where each + dictionary represents the Prometheus scrape + configuration for a single job. When not provided, a + default scrape configuration is provided for the + `/metrics` endpoint polling all units of the charm on port `80` + using the `MetricsEndpointProvider` object. + alert_rules_path: an optional path for the location of alert rules + files. Defaults to "./prometheus_alert_rules", + resolved relative to the directory hosting the charm entry file. + The alert rules are automatically updated on charm upgrade. + refresh_event: an optional bound event or list of bound events which + will be observed to re-set scrape job data (IP address and others) + + Raises: + RelationNotFoundError: If there is no relation in the charm's metadata.yaml + with the same name as provided via `relation_name` argument. + RelationInterfaceMismatchError: The relation with the same name as provided + via `relation_name` argument does not have the `prometheus_scrape` relation + interface. + RelationRoleMismatchError: If the relation with the same name as provided + via `relation_name` argument does not have the `RelationRole.provides` + role. + """ + _validate_relation_by_interface_and_direction( + charm, relation_name, RELATION_INTERFACE_NAME, RelationRole.provides + ) + + try: + alert_rules_path = _resolve_dir_against_charm_path(charm, alert_rules_path) + except InvalidAlertRulePathError as e: + logger.debug( + "Invalid Prometheus alert rules folder at %s: %s", + e.alert_rules_absolute_path, + e.message, + ) + + super().__init__(charm, relation_name) + self.topology = JujuTopology.from_charm(charm) + + self._charm = charm + self._alert_rules_path = alert_rules_path + self._relation_name = relation_name + # sanitize job configurations to the supported subset of parameters + jobs = [] if jobs is None else jobs + self._jobs = [_sanitize_scrape_configuration(job) for job in jobs] + + events = self._charm.on[self._relation_name] + self.framework.observe(events.relation_joined, self._set_scrape_job_spec) + self.framework.observe(events.relation_changed, self._on_relation_changed) + + if not refresh_event: + if len(self._charm.meta.containers) == 1: + if "kubernetes" in self._charm.meta.series: + # This is a podspec charm + refresh_event = [self._charm.on.update_status] + else: + # This is a sidecar/pebble charm + container = list(self._charm.meta.containers.values())[0] + refresh_event = [self._charm.on[container.name.replace("-", "_")].pebble_ready] + else: + logger.warning( + "%d containers are present in metadata.yaml and " + "refresh_event was not specified. Defaulting to update_status. " + "Metrics IP may not be set in a timely fashion.", + len(self._charm.meta.containers), + ) + refresh_event = [self._charm.on.update_status] + + else: + if not isinstance(refresh_event, list): + refresh_event = [refresh_event] + + for ev in refresh_event: + self.framework.observe(ev, self._set_unit_ip) + + self.framework.observe(self._charm.on.upgrade_charm, self._set_scrape_job_spec) + + # If there is no leader during relation_joined we will still need to set alert rules. + self.framework.observe(self._charm.on.leader_elected, self._set_scrape_job_spec) + + def _on_relation_changed(self, event): + """Check for alert rule messages in the relation data before moving on.""" + if self._charm.unit.is_leader(): + ev = json.loads(event.relation.data[event.app].get("event", "{}")) + + if ev: + valid = bool(ev.get("valid", True)) + errors = ev.get("errors", "") + + if valid and not errors: + self.on.alert_rule_status_changed.emit(valid=valid) + else: + self.on.alert_rule_status_changed.emit(valid=valid, errors=errors) + + self._set_scrape_job_spec(event) + + def _set_scrape_job_spec(self, event): + """Ensure scrape target information is made available to prometheus. + + When a metrics provider charm is related to a prometheus charm, the + metrics provider sets specification and metadata related to its own + scrape configuration. This information is set using Juju application + data. In addition each of the consumer units also sets its own + host address in Juju unit relation data. + """ + self._set_unit_ip(event) + + if not self._charm.unit.is_leader(): + return + + alert_rules = AlertRules(topology=self.topology) + alert_rules.add_path(self._alert_rules_path, recursive=True) + alert_rules_as_dict = alert_rules.as_dict() + + for relation in self._charm.model.relations[self._relation_name]: + relation.data[self._charm.app]["scrape_metadata"] = json.dumps(self._scrape_metadata) + relation.data[self._charm.app]["scrape_jobs"] = json.dumps(self._scrape_jobs) + + if alert_rules_as_dict: + # Update relation data with the string representation of the rule file. + # Juju topology is already included in the "scrape_metadata" field above. + # The consumer side of the relation uses this information to name the rules file + # that is written to the filesystem. + relation.data[self._charm.app]["alert_rules"] = json.dumps(alert_rules_as_dict) + + def _set_unit_ip(self, _): + """Set unit host address. + + Each time a metrics provider charm container is restarted it updates its own + host address in the unit relation data for the prometheus charm. + + The only argument specified is an event and it ignored. this is for expediency + to be able to use this method as an event handler, although no access to the + event is actually needed. + """ + for relation in self._charm.model.relations[self._relation_name]: + unit_ip = str(self._charm.model.get_binding(relation).network.bind_address) + relation.data[self._charm.unit]["prometheus_scrape_unit_address"] = ( + unit_ip if self._is_valid_unit_address(unit_ip) else socket.getfqdn() + ) + + relation.data[self._charm.unit]["prometheus_scrape_unit_name"] = str( + self._charm.model.unit.name + ) + + def _is_valid_unit_address(self, address: str) -> bool: + """Validate a unit address. + + At present only IP address validation is supported, but + this may be extended to DNS addresses also, as needed. + + Args: + address: a string representing a unit address + """ + try: + _ = ipaddress.ip_address(address) + except ValueError: + return False + + return True + + @property + def _scrape_jobs(self) -> list: + """Fetch list of scrape jobs. + + Returns: + A list of dictionaries, where each dictionary specifies a + single scrape job for Prometheus. + """ + return self._jobs if self._jobs else [DEFAULT_JOB] + + @property + def _scrape_metadata(self) -> dict: + """Generate scrape metadata. + + Returns: + Scrape configuration metadata for this metrics provider charm. + """ + return self.topology.as_dict() + + +class PrometheusRulesProvider(Object): + """Forward rules to Prometheus. + + This object may be used to forward rules to Prometheus. At present it only supports + forwarding alert rules. This is unlike :class:`MetricsEndpointProvider`, which + is used for forwarding both scrape targets and associated alert rules. This object + is typically used when there is a desire to forward rules that apply globally (across + all deployed charms and units) rather than to a single charm. All rule files are + forwarded using the same 'prometheus_scrape' interface that is also used by + `MetricsEndpointProvider`. + + Args: + charm: A charm instance that `provides` a relation with the `prometheus_scrape` interface. + relation_name: Name of the relation in `metadata.yaml` that + has the `prometheus_scrape` interface. + dir_path: Root directory for the collection of rule files. + recursive: Whether or not to scan for rule files recursively. + """ + + def __init__( + self, + charm: CharmBase, + relation_name: str = DEFAULT_RELATION_NAME, + dir_path: str = DEFAULT_ALERT_RULES_RELATIVE_PATH, + recursive=True, + ): + super().__init__(charm, relation_name) + self._charm = charm + self._relation_name = relation_name + self._recursive = recursive + + try: + dir_path = _resolve_dir_against_charm_path(charm, dir_path) + except InvalidAlertRulePathError as e: + logger.debug( + "Invalid Prometheus alert rules folder at %s: %s", + e.alert_rules_absolute_path, + e.message, + ) + self.dir_path = dir_path + + events = self._charm.on[self._relation_name] + event_sources = [ + events.relation_joined, + events.relation_changed, + self._charm.on.leader_elected, + self._charm.on.upgrade_charm, + ] + + for event_source in event_sources: + self.framework.observe(event_source, self._update_relation_data) + + def _reinitialize_alert_rules(self): + """Reloads alert rules and updates all relations.""" + self._update_relation_data(None) + + def _update_relation_data(self, _): + """Update application relation data with alert rules for all relations.""" + if not self._charm.unit.is_leader(): + return + + alert_rules = AlertRules() + alert_rules.add_path(self.dir_path, recursive=self._recursive) + alert_rules_as_dict = alert_rules.as_dict() + + logger.info("Updating relation data with rule files from disk") + for relation in self._charm.model.relations[self._relation_name]: + relation.data[self._charm.app]["alert_rules"] = json.dumps( + alert_rules_as_dict, + sort_keys=True, # sort, to prevent unnecessary relation_changed events + ) + + +class MetricsEndpointAggregator(Object): + """Aggregate metrics from multiple scrape targets. + + `MetricsEndpointAggregator` collects scrape target information from one + or more related charms and forwards this to a `MetricsEndpointConsumer` + charm, which may be in a different Juju model. However it is + essential that `MetricsEndpointAggregator` itself resides in the same + model as its scrape targets, as this is currently the only way to + ensure in Juju that the `MetricsEndpointAggregator` will be able to + determine the model name and uuid of the scrape targets. + + `MetricsEndpointAggregator` should be used in place of + `MetricsEndpointProvider` in the following two use cases: + + 1. Integrating one or more scrape targets that do not support the + `prometheus_scrape` interface. + + 2. Integrating one or more scrape targets through cross model + relations. Although the [Scrape Config Operator](https://charmhub.io/cos-configuration-k8s) + may also be used for the purpose of supporting cross model + relations. + + Using `MetricsEndpointAggregator` to build a Prometheus charm client + only requires instantiating it. Instantiating + `MetricsEndpointAggregator` is similar to `MetricsEndpointProvider` except + that it requires specifying the names of three relations: the + relation with scrape targets, the relation for alert rules, and + that with the Prometheus charms. For example + + ```python + self._aggregator = MetricsEndpointAggregator( + self, + { + "prometheus": "monitoring", + "scrape_target": "prometheus-target", + "alert_rules": "prometheus-rules" + } + ) + ``` + + `MetricsEndpointAggregator` assumes that each unit of a scrape target + sets in its unit-level relation data two entries with keys + "hostname" and "port". If it is required to integrate with charms + that do not honor these assumptions, it is always possible to + derive from `MetricsEndpointAggregator` overriding the `_get_targets()` + method, which is responsible for aggregating the unit name, host + address ("hostname") and port of the scrape target. + + `MetricsEndpointAggregator` also assumes that each unit of a + scrape target sets in its unit-level relation data a key named + "groups". The value of this key is expected to be the string + representation of list of Prometheus Alert rules in YAML format. + An example of a single such alert rule is + + ```yaml + - alert: HighRequestLatency + expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5 + for: 10m + labels: + severity: page + annotations: + summary: High request latency + ``` + + Once again if it is required to integrate with charms that do not + honour these assumptions about alert rules then an object derived + from `MetricsEndpointAggregator` may be used by overriding the + `_get_alert_rules()` method. + + `MetricsEndpointAggregator` ensures that Prometheus scrape job + specifications and alert rules are annotated with Juju topology + information, just like `MetricsEndpointProvider` and + `MetricsEndpointConsumer` do. + + By default `MetricsEndpointAggregator` ensures that Prometheus + "instance" labels refer to Juju topology. This ensures that + instance labels are stable over unit recreation. While it is not + advisable to change this option, if required it can be done by + setting the "relabel_instance" keyword argument to `False` when + constructing an aggregator object. + """ + + def __init__(self, charm, relation_names, relabel_instance=True): + """Construct a `MetricsEndpointAggregator`. + + Args: + charm: a `CharmBase` object that manages this + `MetricsEndpointAggregator` object. Typically this is + `self` in the instantiating class. + relation_names: a dictionary with three keys. The value + of the "scrape_target" and "alert_rules" keys are + the relation names over which scrape job and alert rule + information is gathered by this `MetricsEndpointAggregator`. + And the value of the "prometheus" key is the name of + the relation with a `MetricsEndpointConsumer` such as + the Prometheus charm. + relabel_instance: A boolean flag indicating if Prometheus + scrape job "instance" labels must refer to Juju Topology. + """ + super().__init__(charm, relation_names["prometheus"]) + + self._charm = charm + self._target_relation = relation_names["scrape_target"] + self._prometheus_relation = relation_names["prometheus"] + self._alert_rules_relation = relation_names["alert_rules"] + self._relabel_instance = relabel_instance + + # manage Prometheus charm relation events + prometheus_events = self._charm.on[self._prometheus_relation] + self.framework.observe(prometheus_events.relation_joined, self._set_prometheus_data) + + # manage list of Prometheus scrape jobs from related scrape targets + target_events = self._charm.on[self._target_relation] + self.framework.observe(target_events.relation_changed, self._update_prometheus_jobs) + self.framework.observe(target_events.relation_departed, self._remove_prometheus_jobs) + + # manage alert rules for Prometheus from related scrape targets + alert_rule_events = self._charm.on[self._alert_rules_relation] + self.framework.observe(alert_rule_events.relation_changed, self._update_alert_rules) + self.framework.observe(alert_rule_events.relation_departed, self._remove_alert_rules) + + def _set_prometheus_data(self, event): + """Ensure every new Prometheus instances is updated. + + Any time a new Prometheus unit joins the relation with + `MetricsEndpointAggregator`, that Prometheus unit is provided + with the complete set of existing scrape jobs and alert rules. + """ + jobs = [] # list of scrape jobs, one per relation + for relation in self.model.relations[self._target_relation]: + targets = self._get_targets(relation) + if targets and relation.app: + jobs.append(self._static_scrape_job(targets, relation.app.name)) + + groups = [] # list of alert rule groups, one group per relation + for relation in self.model.relations[self._alert_rules_relation]: + unit_rules = self._get_alert_rules(relation) + if unit_rules and relation.app: + appname = relation.app.name + rules = self._label_alert_rules(unit_rules, appname) + group = {"name": self._group_name(appname), "rules": rules} + groups.append(group) + + event.relation.data[self._charm.app]["scrape_jobs"] = json.dumps(jobs) + event.relation.data[self._charm.app]["alert_rules"] = json.dumps({"groups": groups}) + + def _set_target_job_data(self, targets: dict, app_name: str, **kwargs) -> None: + """Update scrape jobs in response to scrape target changes. + + When there is any change in relation data with any scrape + target, the Prometheus scrape job, for that specific target is + updated. Additionally, if this method is called manually, do the + sameself. + + Args: + targets: a `dict` containing target information + app_name: a `str` identifying the application + """ + # new scrape job for the relation that has changed + updated_job = self._static_scrape_job(targets, app_name, **kwargs) + + for relation in self.model.relations[self._prometheus_relation]: + jobs = json.loads(relation.data[self._charm.app].get("scrape_jobs", "[]")) + # list of scrape jobs that have not changed + jobs = [job for job in jobs if updated_job["job_name"] != job["job_name"]] + jobs.append(updated_job) + relation.data[self._charm.app]["scrape_jobs"] = json.dumps(jobs) + + def _update_prometheus_jobs(self, event): + """Update scrape jobs in response to scrape target changes. + + When there is any change in relation data with any scrape + target, the Prometheus scrape job, for that specific target is + updated. + """ + targets = self._get_targets(event.relation) + if not targets: + return + + # new scrape job for the relation that has changed + updated_job = self._static_scrape_job(targets, event.relation.app.name) + + for relation in self.model.relations[self._prometheus_relation]: + jobs = json.loads(relation.data[self._charm.app].get("scrape_jobs", "[]")) + # list of scrape jobs that have not changed + jobs = [job for job in jobs if updated_job["job_name"] != job["job_name"]] + jobs.append(updated_job) + relation.data[self._charm.app]["scrape_jobs"] = json.dumps(jobs) + + def _remove_prometheus_jobs(self, event): + """Remove scrape jobs when a target departs. + + Any time a scrape target departs, any Prometheus scrape job + associated with that specific scrape target is removed. + """ + job_name = self._job_name(event.relation.app.name) + unit_name = event.unit.name + + for relation in self.model.relations[self._prometheus_relation]: + jobs = json.loads(relation.data[self._charm.app].get("scrape_jobs", "[]")) + if not jobs: + continue + + changed_job = [j for j in jobs if j.get("job_name") == job_name] + if not changed_job: + continue + changed_job = changed_job[0] + + # list of scrape jobs that have not changed + jobs = [job for job in jobs if job.get("job_name") != job_name] + + # list of scrape jobs for units of the same application that still exist + configs_kept = [ + config + for config in changed_job["static_configs"] # type: ignore + if config.get("labels", {}).get("juju_unit") != unit_name + ] + + if configs_kept: + changed_job["static_configs"] = configs_kept # type: ignore + jobs.append(changed_job) + + relation.data[self._charm.app]["scrape_jobs"] = json.dumps(jobs) + + def _update_alert_rules(self, event): + """Update alert rules in response to scrape target changes. + + When there is any change in alert rule relation data for any + scrape target, the list of alert rules for that specific + target is updated. + """ + unit_rules = self._get_alert_rules(event.relation) + if not unit_rules: + return + + appname = event.relation.app.name + rules = self._label_alert_rules(unit_rules, appname) + # the alert rule group that has changed + updated_group = {"name": self._group_name(appname), "rules": rules} + + for relation in self.model.relations[self._prometheus_relation]: + alert_rules = json.loads(relation.data[self._charm.app].get("alert_rules", "{}")) + groups = alert_rules.get("groups", []) + # list of alert rule groups that have not changed + groups = [group for group in groups if updated_group["name"] != group["name"]] + groups.append(updated_group) + relation.data[self._charm.app]["alert_rules"] = json.dumps({"groups": groups}) + + def _remove_alert_rules(self, event): + """Remove alert rules for departed targets. + + Any time a scrape target departs any alert rules associated + with that specific scrape target is removed. + """ + group_name = self._group_name(event.relation.app.name) + unit_name = event.unit.name + + for relation in self.model.relations[self._prometheus_relation]: + alert_rules = json.loads(relation.data[self._charm.app].get("alert_rules", "{}")) + if not alert_rules: + continue + + groups = alert_rules.get("groups", []) + if not groups: + continue + + changed_group = [group for group in groups if group["name"] == group_name] + if not changed_group: + continue + changed_group = changed_group[0] + + # list of alert rule groups that have not changed + groups = [group for group in groups if group["name"] != group_name] + + # list of alert rules not associated with departing unit + rules_kept = [ + rule + for rule in changed_group.get("rules") # type: ignore + if rule.get("labels").get("juju_unit") != unit_name + ] + + if rules_kept: + changed_group["rules"] = rules_kept # type: ignore + groups.append(changed_group) + + relation.data[self._charm.app]["alert_rules"] = ( + json.dumps({"groups": groups}) if groups else "{}" + ) + + def _get_targets(self, relation) -> dict: + """Fetch scrape targets for a relation. + + Scrape target information is returned for each unit in the + relation. This information contains the unit name, network + hostname (or address) for that unit, and port on which an + metrics endpoint is exposed in that unit. + + Args: + relation: an `ops.model.Relation` object for which scrape + targets are required. + + Returns: + a dictionary whose keys are names of the units in the + relation. There values associated with each key is itself + a dictionary of the form + ``` + {"hostname": hostname, "port": port} + ``` + """ + targets = {} + for unit in relation.units: + port = relation.data[unit].get("port", 80) + hostname = relation.data[unit].get("hostname") + if hostname: + targets.update({unit.name: {"hostname": hostname, "port": port}}) + + return targets + + def _get_alert_rules(self, relation) -> dict: + """Fetch alert rules for a relation. + + Each unit of the related scrape target may have its own + associated alert rules. Alert rules for all units are returned + indexed by unit name. + + Args: + relation: an `ops.model.Relation` object for which alert + rules are required. + + Returns: + a dictionary whose keys are names of the units in the + relation. There values associated with each key is a list + of alert rules. Each rule is in dictionary format. The + structure "rule dictionary" corresponds to single + Prometheus alert rule. + """ + rules = {} + for unit in relation.units: + unit_rules = yaml.safe_load(relation.data[unit].get("groups", "")) + if unit_rules: + rules.update({unit.name: unit_rules}) + + return rules + + def _job_name(self, appname) -> str: + """Construct a scrape job name. + + Each relation has its own unique scrape job name. All units in + the relation are scraped as part of the same scrape job. + + Args: + appname: string name of a related application. + + Returns: + a string Prometheus scrape job name for the application. + """ + return "juju_{}_{}_{}_prometheus_scrape".format( + self.model.name, self.model.uuid[:7], appname + ) + + def _group_name(self, appname) -> str: + """Construct name for an alert rule group. + + Each unit in a relation may define its own alert rules. All + rules, for all units in a relation are grouped together and + given a single alert rule group name. + + Args: + appname: string name of a related application. + + Returns: + a string Prometheus alert rules group name for the application. + """ + return "juju_{}_{}_{}_alert_rules".format(self.model.name, self.model.uuid[:7], appname) + + def _label_alert_rules(self, unit_rules, appname) -> list: + """Apply juju topology labels to alert rules. + + Args: + unit_rules: a list of alert rules, where each rule is in + dictionary format. + appname: a string name of the application to which the + alert rules belong. + + Returns: + a list of alert rules with Juju topology labels. + """ + labeled_rules = [] + for unit_name, rules in unit_rules.items(): + for rule in rules: + # the new JujuTopology removed this, so build it up by hand + matchers = { + "juju_{}".format(k): v + for k, v in JujuTopology(self.model.name, self.model.uuid, appname, unit_name) + .as_dict(excluded_keys=["charm_name"]) + .items() + } + rule["labels"].update(matchers.items()) + labeled_rules.append(rule) + + return labeled_rules + + def _static_scrape_job(self, targets, application_name, **kwargs) -> dict: + """Construct a static scrape job for an application. + + Args: + targets: a dictionary providing hostname and port for all + scrape target. The keys of this dictionary are unit + names. Values corresponding to these keys are + themselves a dictionary with keys "hostname" and + "port". + application_name: a string name of the application for + which this static scrape job is being constructed. + + Returns: + A dictionary corresponding to a Prometheus static scrape + job configuration for one application. The returned + dictionary may be transformed into YAML and appended to + the list of any existing list of Prometheus static configs. + """ + juju_model = self.model.name + juju_model_uuid = self.model.uuid + job = { + "job_name": self._job_name(application_name), + "static_configs": [ + { + "targets": ["{}:{}".format(target["hostname"], target["port"])], + "labels": { + "juju_model": juju_model, + "juju_model_uuid": juju_model_uuid, + "juju_application": application_name, + "juju_unit": unit_name, + "host": target["hostname"], + }, + } + for unit_name, target in targets.items() + ], + "relabel_configs": self._relabel_configs + kwargs.get("relabel_configs", []), + } + job.update(kwargs.get("updates", {})) + + return job + + @property + def _relabel_configs(self) -> list: + """Create Juju topology relabeling configuration. + + Using Juju topology for instance labels ensures that these + labels are stable across unit recreation. + + Returns: + a list of Prometheus relabling configurations. Each item in + this list is one relabel configuration. + """ + return ( + [ + { + "source_labels": [ + "juju_model", + "juju_model_uuid", + "juju_application", + "juju_unit", + ], + "separator": "_", + "target_label": "instance", + "regex": "(.*)", + } + ] + if self._relabel_instance + else [] + ) + + +class CosTool: + """Uses cos-tool to inject label matchers into alert rule expressions and validate rules.""" + + _path = None + _disabled = False + + def __init__(self, charm): + self._charm = charm + + @property + def path(self): + """Lazy lookup of the path of cos-tool.""" + if self._disabled: + return None + if not self._path: + self._path = self._get_tool_path() + if not self._path: + logger.debug("Skipping injection of juju topology as label matchers") + self._disabled = True + return self._path + + def apply_label_matchers(self, rules) -> dict: + """Will apply label matchers to the expression of all alerts in all supplied groups.""" + if not self.path: + return rules + for group in rules["groups"]: + rules_in_group = group.get("rules", []) + for rule in rules_in_group: + topology = {} + # if the user for some reason has provided juju_unit, we'll need to honor it + # in most cases, however, this will be empty + for label in [ + "juju_model", + "juju_model_uuid", + "juju_application", + "juju_charm", + "juju_unit", + ]: + if label in rule["labels"]: + topology[label] = rule["labels"][label] + + rule["expr"] = self.inject_label_matchers(rule["expr"], topology) + return rules + + def validate_alert_rules(self, rules: dict) -> Tuple[bool, str]: + """Will validate correctness of alert rules, returning a boolean and any errors.""" + if not self.path: + logger.debug("`cos-tool` unavailable. Not validating alert correctness.") + return True, "" + + with tempfile.TemporaryDirectory() as tmpdir: + rule_path = Path(tmpdir + "/validate_rule.yaml") + + # Smash "our" rules format into what upstream actually uses, which is more like: + # + # groups: + # - name: foo + # rules: + # - alert: SomeAlert + # expr: up + # - alert: OtherAlert + # expr: up + transformed_rules = {"groups": []} # type: ignore + for rule in rules["groups"]: + transformed = {"name": str(uuid.uuid4()), "rules": [rule]} + transformed_rules["groups"].append(transformed) + + rule_path.write_text(yaml.dump(transformed_rules)) + + args = [str(self.path), "validate", str(rule_path)] + # noinspection PyBroadException + try: + self._exec(args) + return True, "" + except subprocess.CalledProcessError as e: + logger.debug("Validating the rules failed: %s", e.output) + return False, ", ".join([line for line in e.output if "error validating" in line]) + + def inject_label_matchers(self, expression, topology) -> str: + """Add label matchers to an expression.""" + if not topology: + return expression + if not self.path: + logger.debug("`cos-tool` unavailable. Leaving expression unchanged: %s", expression) + return expression + args = [str(self.path), "transform"] + args.extend( + ["--label-matcher={}={}".format(key, value) for key, value in topology.items()] + ) + + args.extend(["{}".format(expression)]) + # noinspection PyBroadException + try: + return self._exec(args) + except subprocess.CalledProcessError as e: + logger.debug('Applying the expression failed: "%s", falling back to the original', e) + return expression + + def _get_tool_path(self) -> Optional[Path]: + arch = platform.machine() + arch = "amd64" if arch == "x86_64" else arch + res = "cos-tool-{}".format(arch) + try: + path = Path(res).resolve() + path.chmod(0o777) + return path + except NotImplementedError: + logger.debug("System lacks support for chmod") + except FileNotFoundError: + logger.debug('Could not locate cos-tool at: "{}"'.format(res)) + return None + + def _exec(self, cmd) -> str: + result = subprocess.run(cmd, check=True, stdout=subprocess.PIPE) + output = result.stdout.decode("utf-8").strip() + return output diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index cbc8e4d6..d10de73b 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -36,6 +36,8 @@ provides: interface: ceph-rbd-mirror prometheus: interface: http + metrics-endpoint: + interface: prometheus_scrape dashboard: interface: ceph-dashboard requires: diff --git a/ceph-mon/src/ceph_metrics.py b/ceph-mon/src/ceph_metrics.py new file mode 100644 index 00000000..cd715528 --- /dev/null +++ b/ceph-mon/src/ceph_metrics.py @@ -0,0 +1,51 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Provide ceph metrics to prometheus + +Configure prometheus scrape jobs via the metrics-endpoint relation. +""" +import logging +from typing import Optional, Union, List + +from charms.prometheus_k8s.v0 import prometheus_scrape +from charms_ceph import utils as ceph_utils +from ops.framework import BoundEvent + + +logger = logging.getLogger(__name__) + +DEFAULT_CEPH_JOB = { + "metrics_path": "/metrics", + "static_configs": [{"targets": ["*:9283"]}], +} + + +class CephMetricsEndpointProvider(prometheus_scrape.MetricsEndpointProvider): + def __init__( + self, + charm, + relation_name: str = prometheus_scrape.DEFAULT_RELATION_NAME, + jobs=None, + alert_rules_path: str = prometheus_scrape.DEFAULT_ALERT_RULES_RELATIVE_PATH, # noqa + refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, + ): + if jobs is None: + jobs = [DEFAULT_CEPH_JOB] + super().__init__( + charm, + relation_name=relation_name, + jobs=jobs, + alert_rules_path=alert_rules_path, + refresh_event=refresh_event, + ) + + def _on_relation_changed(self, event): + """Enable prometheus on relation change""" + if self._charm.unit.is_leader() and ceph_utils.is_bootstrapped(): + logger.debug( + "is_leader and is_bootstrapped, running rel changed: %s", event + ) + ceph_utils.mgr_enable_module("prometheus") + logger.debug("module_enabled") + super()._on_relation_changed(event) diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index 9003aa52..6a3cf8ac 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -5,6 +5,7 @@ import ops_openstack.core import ceph_hooks as hooks +import ceph_metrics class CephMonCharm(ops_openstack.core.OSBaseCharm): @@ -70,6 +71,8 @@ def __init__(self, *args): self._stored.is_started = True fw = self.framework + self.metrics_endpoint = ceph_metrics.CephMetricsEndpointProvider(self) + fw.observe(self.on.install, self.on_install) fw.observe(self.on.config_changed, self.on_config) fw.observe(self.on.pre_series_upgrade, self.on_pre_series_upgrade) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 5fd9c98d..597f5d1e 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -84,7 +84,7 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 deps = flake8==3.9.2 - charm-tools==2.8.3 + charm-tools==2.8.4 commands = flake8 {posargs} unit_tests tests actions files src charm-proof diff --git a/ceph-mon/unit_tests/test_ceph_metrics.py b/ceph-mon/unit_tests/test_ceph_metrics.py new file mode 100644 index 00000000..e767472a --- /dev/null +++ b/ceph-mon/unit_tests/test_ceph_metrics.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 + +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. + +from unittest.mock import patch +import unittest + +from ops import storage, model, framework +from ops.testing import Harness, _TestingModelBackend + +import charm + + +class TestCephMetrics(unittest.TestCase): + def setUp(self): + super().setUp() + self.harness = Harness(charm.CephMonCharm) + + # BEGIN: Workaround until network_get is implemented + class _TestingOPSModelBackend(_TestingModelBackend): + def network_get(self, endpoint_name, relation_id=None): + network_data = { + "bind-addresses": [ + { + "addresses": [{"value": "10.0.0.10"}], + } + ], + } + return network_data + + self.harness._backend = _TestingOPSModelBackend( + self.harness._unit_name, self.harness._meta + ) + self.harness._model = model.Model( + self.harness._meta, self.harness._backend + ) + self.harness._framework = framework.Framework( + storage.SQLiteStorage(":memory:"), + self.harness._charm_dir, + self.harness._meta, + self.harness._model, + ) + # END Workaround + self.addCleanup(self.harness.cleanup) + self.harness.begin() + self.harness.set_leader(True) + + def test_init(self): + self.assertEqual( + self.harness.charm.metrics_endpoint._relation_name, + "metrics-endpoint", + ) + + @patch("ceph_metrics.ceph_utils.is_bootstrapped", return_value=True) + @patch("ceph_metrics.ceph_utils.is_mgr_module_enabled", return_value=False) + @patch("ceph_metrics.ceph_utils.mgr_enable_module") + def test_add_rel( + self, + mgr_enable_module, + _is_mgr_module_enable, + _is_bootstrapped, + ): + rel_id = self.harness.add_relation("metrics-endpoint", "prometheus") + self.harness.add_relation_unit(rel_id, "prometheus/0") + + unit_rel_data = self.harness.get_relation_data( + rel_id, self.harness.model.unit + ) + self.assertEqual( + unit_rel_data["prometheus_scrape_unit_address"], "10.0.0.10" + ) + + # Trigger relation change event as a side effect + self.harness.update_relation_data( + rel_id, "prometheus/0", {"foo": "bar"} + ) + + mgr_enable_module.assert_called_once() + + app_rel_data = self.harness.get_relation_data( + rel_id, self.harness.model.app + ) + jobs = app_rel_data["scrape_jobs"] + self.assertEqual( + jobs, + ( + '[{"metrics_path": "/metrics", ' + '"static_configs": [{"targets": ["*:9283"]}]}]' + ), + ) diff --git a/ceph-mon/unit_tests/test_ceph_utils.py b/ceph-mon/unit_tests/test_ceph_utils.py index 5da722fd..2cf59996 100644 --- a/ceph-mon/unit_tests/test_ceph_utils.py +++ b/ceph-mon/unit_tests/test_ceph_utils.py @@ -297,8 +297,7 @@ def test_get_ceph_osd_releases_two_releases( releases = utils.get_ceph_osd_releases() self.assertEqual(len(releases), 2) - self.assertEqual(releases[0], ceph_release_1) - self.assertEqual(releases[1], ceph_release_2) + self.assertEqual(sorted(releases), [ceph_release_1, ceph_release_2]) @mock.patch.object(utils.subprocess, 'check_output') @mock.patch.object(utils.json, 'loads') From 42731bb61c85c04fe8cd8834389fc8696554bde8 Mon Sep 17 00:00:00 2001 From: utkarshbhatthere Date: Tue, 30 Aug 2022 10:36:53 +0000 Subject: [PATCH 2420/2699] Adds support for scaling down multisite rgw system Adds implementation for relation-departed hooks to cleanly remove participant sites from the multisite system. The replication between zones is stopped and both zones split up to continue as separate master zones. Change-Id: I420f7933db55f3004f752949b5c09b1b79774f64 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/863 --- ceph-radosgw/hooks/hooks.py | 47 ++++++++++++++ ceph-radosgw/hooks/multisite.py | 75 ++++++++++++++++++++--- ceph-radosgw/hooks/utils.py | 4 +- ceph-radosgw/unit_tests/test_hooks.py | 41 ++++++++++++- ceph-radosgw/unit_tests/test_multisite.py | 30 +++++++++ 5 files changed, 188 insertions(+), 9 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index eaea048e..7c5e9cd0 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -821,6 +821,53 @@ def master_relation_joined(relation_id=None): secret=secret) +@hooks.hook('master-relation-departed') +@hooks.hook('slave-relation-departed') +def multisite_relation_departed(): + if not is_leader(): + log('Cannot remove multisite relation, this unit is not the leader') + return + + if not ready_for_service(legacy=False): + raise RuntimeError("Leader unit not ready for service.") + + zone = config('zone') + zonegroup = config('zonegroup') + realm = config('realm') + + # If config zone/zonegroup not present on site, + # remove-relation is called prematurely + if not multisite.is_multisite_configured(zone=zone, + zonegroup=zonegroup): + log('Multisite is not configured, skipping scaledown.') + return + + zonegroup_info = multisite.get_zonegroup_info(zonegroup) + # remove other zones from zonegroup + for zone_info in zonegroup_info['zones']: + if zone_info['name'] is not zone: + multisite.remove_zone_from_zonegroup( + zone_info['name'], zonegroup + ) + + # modify self as master zone. + multisite.modify_zone(zone, default=True, master=True, + zonegroup=zonegroup) + + # Update period. + multisite.update_period( + fatal=True, zonegroup=zonegroup, + zone=zone, realm=realm + ) + + # Verify multisite is not configured. + if multisite.is_multisite_configured(zone=zone, + zonegroup=zonegroup): + status_set(WORKLOAD_STATES.BLOCKED, + "Failed to do a clean scaledown.") + raise RuntimeError("Residual multisite config at local site.") + + @hooks.hook('slave-relation-changed') def slave_relation_changed(relation_id=None, unit=None): if not is_leader(): diff --git a/ceph-radosgw/hooks/multisite.py b/ceph-radosgw/hooks/multisite.py index 590fdeb0..fc4200f6 100644 --- a/ceph-radosgw/hooks/multisite.py +++ b/ceph-radosgw/hooks/multisite.py @@ -370,7 +370,60 @@ def modify_zone(name, endpoints=None, default=False, master=False, return None -def update_period(fatal=True, zonegroup=None, zone=None): +def remove_zone_from_zonegroup(zone, zonegroup): + """Remove RADOS Gateway zone from provided parent zonegroup + + Removal is different from deletion, this operation removes zone/zonegroup + affiliation but does not delete the actual zone. + + :param zonegroup: parent zonegroup name + :type zonegroup: str + :param zone: zone name + :type zone: str + :return: modified zonegroup config + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'zonegroup', 'remove', + '--rgw-zonegroup={}'.format(zonegroup), + '--rgw-zone={}'.format(zone), + ] + try: + result = _check_output(cmd) + return json.loads(result) + except (TypeError, subprocess.CalledProcessError) as exc: + raise RuntimeError( + "Error removing zone {} from zonegroup {}. Result: {}" + .format(zone, zonegroup, result)) from exc + + +def add_zone_to_zonegroup(zone, zonegroup): + """Add RADOS Gateway zone to provided zonegroup + + :param zonegroup: parent zonegroup name + :type zonegroup: str + :param zone: zone name + :type zone: str + :return: modified zonegroup config + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'zonegroup', 'add', + '--rgw-zonegroup={}'.format(zonegroup), + '--rgw-zone={}'.format(zone), + ] + try: + result = _check_output(cmd) + return json.loads(result) + except (TypeError, subprocess.CalledProcessError) as exc: + raise RuntimeError( + "Error adding zone {} from zonegroup {}. Result: {}" + .format(zone, zonegroup, result)) from exc + + +def update_period(fatal=True, zonegroup=None, zone=None, realm=None): """Update RADOS Gateway configuration period :param fatal: In failure case, whether CalledProcessError is to be raised. @@ -379,6 +432,8 @@ def update_period(fatal=True, zonegroup=None, zone=None): :type zonegroup: str :param zone: zone name :type zone: str + :param realm: realm name + :type realm: str """ cmd = [ RGW_ADMIN, '--id={}'.format(_key_name()), @@ -388,6 +443,8 @@ def update_period(fatal=True, zonegroup=None, zone=None): cmd.append('--rgw-zonegroup={}'.format(zonegroup)) if zone is not None: cmd.append('--rgw-zone={}'.format(zone)) + if realm is not None: + cmd.append('--rgw-realm={}'.format(realm)) if fatal: _check_call(cmd) else: @@ -641,17 +698,21 @@ def is_multisite_configured(zone, zonegroup): :rtype: Boolean """ - if zone not in list_zones(): - hookenv.log("No local zone found with name ({})".format(zonegroup), - level=hookenv.ERROR) + local_zones = list_zones() + if zone not in local_zones: + hookenv.log("zone {} not found in local zones {}" + .format(zone, local_zones), level=hookenv.ERROR) return False - if zonegroup not in list_zonegroups(): - hookenv.log("No zonegroup found with name ({})".format(zonegroup), - level=hookenv.ERROR) + local_zonegroups = list_zonegroups() + if zonegroup not in local_zonegroups: + hookenv.log("zonegroup {} not found in local zonegroups {}" + .format(zonegroup, local_zonegroups), level=hookenv.ERROR) return False sync_status = get_sync_status() + hookenv.log("Multisite sync status {}".format(sync_status), + level=hookenv.DEBUG) if sync_status is not None: return ('data sync source:' in sync_status) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 00695154..80da68fc 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -219,7 +219,9 @@ def check_optional_config_and_relations(configs): leader_get('restart_nonce')) # Any realm or zonegroup config is present, multisite checks can be done. - if (config('realm') or config('zonegroup')): + # zone config can't be used because it's used by default. + if config('realm') or config('zonegroup') or relation_ids('master') \ + or relation_ids('slave'): # All of Realm, zonegroup, and zone must be configured. if not all(multisite_config): return ('blocked', diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index 416ea84f..cec082cf 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import json from unittest.mock import ( patch, call, MagicMock, ANY ) @@ -75,6 +75,25 @@ ] +# Stub Methods +def get_zonegroup_stub(): + # populate dummy zones info + zone_one = {} + zone_one['id'] = "test_zone_id_one" + zone_one['name'] = "testzone" + + zone_two = {} + zone_two['id'] = "test_zone_id_two" + zone_two['name'] = "testzone_two" + + # populate dummy zonegroup info + zonegroup = {} + zonegroup['name'] = "testzonegroup" + zonegroup['master_zone'] = "test_zone_id_one" + zonegroup['zones'] = [zone_one, zone_two] + return zonegroup + + class CephRadosGWTests(CharmTestCase): def setUp(self): @@ -793,6 +812,26 @@ def test_master_relation_joined_not_leader(self): ) self.multisite.list_realms.assert_not_called() + @patch.object(json, 'loads') + def test_multisite_relation_departed(self, json_loads): + for k, v in self._complete_config.items(): + self.test_config.set(k, v) + self.is_leader.return_value = True + # Multisite is configured at first but then disabled. + self.multisite.is_multisite_configured.side_effect = [True, False] + self.multisite.get_zonegroup_info.return_value = get_zonegroup_stub() + # json.loads() raises TypeError for mock objects. + json_loads.returnvalue = [] + ceph_hooks.multisite_relation_departed() + + self.multisite.modify_zone.assert_called_once_with( + 'testzone', default=True, master=True, zonegroup='testzonegroup' + ) + self.multisite.update_period.assert_called_once_with( + fatal=True, zonegroup='testzonegroup', + zone='testzone', realm='testrealm' + ) + class SlaveMultisiteTests(CephRadosMultisiteTests): diff --git a/ceph-radosgw/unit_tests/test_multisite.py b/ceph-radosgw/unit_tests/test_multisite.py index 5374e422..cb030bec 100644 --- a/ceph-radosgw/unit_tests/test_multisite.py +++ b/ceph-radosgw/unit_tests/test_multisite.py @@ -14,6 +14,7 @@ import inspect import os +import json from unittest import mock import multisite @@ -34,6 +35,7 @@ def get_zonegroup_stub(): # populate dummy zonegroup info zonegroup = {} zonegroup['name'] = "test_zonegroup" + zonegroup['master_zone'] = "test_zone_id" zonegroup['zones'] = [zone] return zonegroup @@ -441,6 +443,34 @@ def test_rename_multisite_config_zone_fail(self, mock_rename_zonegroup): '--rgw-zonegroup=test_zonegroup', ]) + @mock.patch.object(json, 'loads') + def test_remove_zone_from_zonegroup(self, json_loads): + # json.loads() raises TypeError for mock objects. + json_loads.returnvalue = [] + multisite.remove_zone_from_zonegroup( + 'test_zone', 'test_zonegroup', + ) + + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zonegroup', 'remove', '--rgw-zonegroup=test_zonegroup', + '--rgw-zone=test_zone', + ], stderr=mock.ANY) + + @mock.patch.object(json, 'loads') + def test_add_zone_from_zonegroup(self, json_loads): + # json.loads() raises TypeError for mock objects. + json_loads.returnvalue = [] + multisite.add_zone_to_zonegroup( + 'test_zone', 'test_zonegroup', + ) + + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zonegroup', 'add', '--rgw-zonegroup=test_zonegroup', + '--rgw-zone=test_zone', + ], stderr=mock.ANY) + @mock.patch.object(multisite, 'list_zonegroups') @mock.patch.object(multisite, 'get_local_zone') @mock.patch.object(multisite, 'list_buckets') From 7801205a82b0ea2087f9d065379512777925c567 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Tue, 23 Aug 2022 11:23:34 -0300 Subject: [PATCH 2421/2699] Rewrite the 'change-osd-weight' to use the op framework This patchset changes a single action, 'change-osd-weight' so that it's implemented with the operator framework. Change-Id: Ia11885a2096b6e4b1ecda5caea38939e17098e1d --- ceph-mon/actions/__init__.py | 2 +- ceph-mon/actions/change-osd-weight | 1 - ceph-mon/src/charm.py | 21 ++++++++++++++++ ceph-mon/src/ops_actions/__init__.py | 15 +++++++++++ .../ops_actions}/change_osd_weight.py | 25 ++++++++++--------- ceph-mon/tox.ini | 1 - .../test_action_change_osd_weight.py | 24 ++++++++++-------- ceph-mon/unit_tests/test_utils.py | 10 +++++++- 8 files changed, 73 insertions(+), 26 deletions(-) delete mode 120000 ceph-mon/actions/change-osd-weight create mode 100644 ceph-mon/src/ops_actions/__init__.py rename ceph-mon/{actions => src/ops_actions}/change_osd_weight.py (61%) mode change 100755 => 100644 diff --git a/ceph-mon/actions/__init__.py b/ceph-mon/actions/__init__.py index 9b088de8..26092e0f 100644 --- a/ceph-mon/actions/__init__.py +++ b/ceph-mon/actions/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2016 Canonical Ltd +# Copyright 2022 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/ceph-mon/actions/change-osd-weight b/ceph-mon/actions/change-osd-weight deleted file mode 120000 index 07705325..00000000 --- a/ceph-mon/actions/change-osd-weight +++ /dev/null @@ -1 +0,0 @@ -change_osd_weight.py \ No newline at end of file diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index 6a3cf8ac..18d99131 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -7,6 +7,8 @@ import ceph_hooks as hooks import ceph_metrics +import ops_actions + class CephMonCharm(ops_openstack.core.OSBaseCharm): @@ -66,12 +68,31 @@ def on_client_relation(self, event): def on_nrpe_relation(self, event): hooks.upgrade_nrpe_config() + # Actions. + + def _observe_action(self, on_action, callable): + def _make_method(fn): + return lambda _, event: fn(event) + + method_name = 'on_' + str(on_action.event_kind) + method = _make_method(callable) + # In addition to being a method, the action callbacks _must_ have + # the same '__name__' as their attribute name (this is how lookups + # work in the operator framework world). + method.__name__ = method_name + + inst = type(self) + setattr(inst, method_name, method) + self.framework.observe(on_action, getattr(self, method_name)) + def __init__(self, *args): super().__init__(*args) self._stored.is_started = True fw = self.framework self.metrics_endpoint = ceph_metrics.CephMetricsEndpointProvider(self) + self._observe_action(self.on.change_osd_weight_action, + ops_actions.change_osd_weight.change_osd_weight) fw.observe(self.on.install, self.on_install) fw.observe(self.on.config_changed, self.on_config) diff --git a/ceph-mon/src/ops_actions/__init__.py b/ceph-mon/src/ops_actions/__init__.py new file mode 100644 index 00000000..b8d2de33 --- /dev/null +++ b/ceph-mon/src/ops_actions/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2022 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import change_osd_weight # noqa: F401 diff --git a/ceph-mon/actions/change_osd_weight.py b/ceph-mon/src/ops_actions/change_osd_weight.py old mode 100755 new mode 100644 similarity index 61% rename from ceph-mon/actions/change_osd_weight.py rename to ceph-mon/src/ops_actions/change_osd_weight.py index 1732f010..cc12cf94 --- a/ceph-mon/actions/change_osd_weight.py +++ b/ceph-mon/src/ops_actions/change_osd_weight.py @@ -16,25 +16,26 @@ """Changes the crush weight of an OSD.""" -from charmhelpers.core.hookenv import function_fail, function_get, log -from charms_ceph.utils import reweight_osd +import charms_ceph.utils as ceph_utils +import logging -def crush_reweight(osd_num, new_weight): +logger = logging.getLogger(__name__) + + +def change_osd_weight(event) -> None: """Run reweight_osd to change OSD weight.""" + osd_num = event.params.get("osd") + new_weight = event.params.get("weight") try: - result = reweight_osd(str(osd_num), str(new_weight)) + result = ceph_utils.reweight_osd(str(osd_num), str(new_weight)) except Exception as e: - log(e) - function_fail("Reweight failed due to exception") + logger.warn(e) + event.fail("Reweight failed due to exception") return if not result: - function_fail("Reweight failed to complete") + event.fail("Reweight failed to complete") return - -if __name__ == "__main__": - osd_num = function_get("osd") - new_weight = function_get("weight") - crush_reweight(osd_num, new_weight) + event.set_results({'message': 'success'}) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 597f5d1e..b22b7bb2 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -86,7 +86,6 @@ basepython = python3 deps = flake8==3.9.2 charm-tools==2.8.4 commands = flake8 {posargs} unit_tests tests actions files src - charm-proof [testenv:cover] # Technique based heavily upon diff --git a/ceph-mon/unit_tests/test_action_change_osd_weight.py b/ceph-mon/unit_tests/test_action_change_osd_weight.py index e0bff653..49db6796 100644 --- a/ceph-mon/unit_tests/test_action_change_osd_weight.py +++ b/ceph-mon/unit_tests/test_action_change_osd_weight.py @@ -13,25 +13,29 @@ """Tests for reweight_osd action.""" -from actions import change_osd_weight as action import unittest.mock as mock -from test_utils import CharmTestCase +from test_utils import CharmTestCase, MockActionEvent +from ops.testing import Harness + +with mock.patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + # src.charm imports ceph_hooks, so we need to workaround the inclusion + # of the 'harden' decorator. + from src.charm import CephMonCharm class ReweightTestCase(CharmTestCase): """Run tests for action.""" def setUp(self): - """Init mocks for test cases.""" - super(ReweightTestCase, self).setUp( - action, ["function_get", "function_fail"] - ) + self.harness = Harness(CephMonCharm) - @mock.patch("actions.change_osd_weight.reweight_osd") + @mock.patch("ops_actions.change_osd_weight.ceph_utils.reweight_osd") def test_reweight_osd(self, _reweight_osd): """Test reweight_osd action has correct calls.""" _reweight_osd.return_value = True - osd_num = 4 - new_weight = 1.2 - action.crush_reweight(osd_num, new_weight) + self.harness.begin() + self.harness.charm.on_change_osd_weight_action( + MockActionEvent({'osd': 4, 'weight': 1.2})) _reweight_osd.assert_has_calls([mock.call("4", "1.2")]) diff --git a/ceph-mon/unit_tests/test_utils.py b/ceph-mon/unit_tests/test_utils.py index ce139de5..538aec0d 100644 --- a/ceph-mon/unit_tests/test_utils.py +++ b/ceph-mon/unit_tests/test_utils.py @@ -17,7 +17,7 @@ import os import yaml -from unittest.mock import patch +from unittest.mock import patch, MagicMock def load_config(): @@ -157,3 +157,11 @@ def get(self, attr=None): elif attr in self.settings: return self.settings[attr] return None + + +class MockActionEvent: + + def __init__(self, params={}): + self.params = params + self.fail = MagicMock() + self.set_results = MagicMock() From 950932bfce71259254c0fa1230a78c988f9bb74d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 23 Aug 2022 17:14:12 -0400 Subject: [PATCH 2422/2699] Rewrite actions/copy_pool into the oeprator framework In addition to trivial changes (passing `event` into the `copy_pool` function), this change introduces an update to the actions/__init__.py that allows succinct import and use from the main charm.py. An apparently unrelated change is the removal of charm-proof from the lint job, as it fails with the removal of actions/copy-pool. Change-Id: I66a5590ddf0f0bb5ca073a91b451f8c78598609a func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/866 --- ceph-mon/actions/copy-pool | 1 - ceph-mon/src/charm.py | 2 ++ ceph-mon/src/ops_actions/__init__.py | 5 +++- .../{actions => src/ops_actions}/copy_pool.py | 14 +++------ ceph-mon/tests/tests.yaml | 1 + ceph-mon/unit_tests/test_ceph_actions.py | 30 ++++++++++--------- 6 files changed, 27 insertions(+), 26 deletions(-) delete mode 120000 ceph-mon/actions/copy-pool rename ceph-mon/{actions => src/ops_actions}/copy_pool.py (74%) mode change 100755 => 100644 diff --git a/ceph-mon/actions/copy-pool b/ceph-mon/actions/copy-pool deleted file mode 120000 index 97ffd8cb..00000000 --- a/ceph-mon/actions/copy-pool +++ /dev/null @@ -1 +0,0 @@ -copy_pool.py \ No newline at end of file diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index 18d99131..02bc5275 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -93,6 +93,8 @@ def __init__(self, *args): self.metrics_endpoint = ceph_metrics.CephMetricsEndpointProvider(self) self._observe_action(self.on.change_osd_weight_action, ops_actions.change_osd_weight.change_osd_weight) + self._observe_action(self.on.copy_pool_action, + ops_actions.copy_pool.copy_pool) fw.observe(self.on.install, self.on_install) fw.observe(self.on.config_changed, self.on_config) diff --git a/ceph-mon/src/ops_actions/__init__.py b/ceph-mon/src/ops_actions/__init__.py index b8d2de33..0afa6266 100644 --- a/ceph-mon/src/ops_actions/__init__.py +++ b/ceph-mon/src/ops_actions/__init__.py @@ -12,4 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import change_osd_weight # noqa: F401 +from . import ( # noqa: F401 + change_osd_weight, + copy_pool, +) diff --git a/ceph-mon/actions/copy_pool.py b/ceph-mon/src/ops_actions/copy_pool.py old mode 100755 new mode 100644 similarity index 74% rename from ceph-mon/actions/copy_pool.py rename to ceph-mon/src/ops_actions/copy_pool.py index 84723c8a..722b167f --- a/ceph-mon/actions/copy_pool.py +++ b/ceph-mon/src/ops_actions/copy_pool.py @@ -16,20 +16,14 @@ import subprocess -import charmhelpers.core.hookenv as hookenv - -def copy_pool(): +def copy_pool(event) -> None: try: - source = hookenv.action_get("source") - target = hookenv.action_get("target") + source = event.params.get("source") + target = event.params.get("target") subprocess.check_call([ 'rados', 'cppool', source, target ]) except subprocess.CalledProcessError as e: - hookenv.action_fail("Error copying pool: {}".format(str(e))) - - -if __name__ == '__main__': - copy_pool() + event.fail("Error copying pool: {}".format(str(e))) diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 7b0f7309..6e1fa867 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -36,3 +36,4 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest # Tests from quincy. - zaza.openstack.charm_tests.ceph.tests.CephAuthTest + - zaza.openstack.charm_tests.ceph.tests.CephMonActionsTest diff --git a/ceph-mon/unit_tests/test_ceph_actions.py b/ceph-mon/unit_tests/test_ceph_actions.py index 81f31e7c..c202deec 100644 --- a/ceph-mon/unit_tests/test_ceph_actions.py +++ b/ceph-mon/unit_tests/test_ceph_actions.py @@ -12,24 +12,24 @@ # limitations under the License. import unittest.mock as mock +from ops.testing import Harness import subprocess import test_utils import create_crush_rule -import copy_pool +with mock.patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + # src.charm imports ceph_hooks, so we need to workaround the inclusion + # of the 'harden' decorator. + from src.charm import CephMonCharm -class CopyPoolTestCase(test_utils.CharmTestCase): - TO_PATCH = [ - 'hookenv', - ] +class CopyPoolTestCase(test_utils.CharmTestCase): def setUp(self): - super(CopyPoolTestCase, self).setUp( - copy_pool, - self.TO_PATCH - ) + self.harness = Harness(CephMonCharm) @mock.patch.object(create_crush_rule.subprocess, 'check_call') def test_copy_pool(self, mock_check_call): @@ -37,8 +37,9 @@ def test_copy_pool(self, mock_check_call): 'source': 'source-pool', 'target': 'target-pool', } - self.hookenv.action_get.side_effect = lambda k: _action_data.get(k) - copy_pool.copy_pool() + self.harness.begin() + self.harness.charm.on_copy_pool_action( + test_utils.MockActionEvent(_action_data)) mock_check_call.assert_called_with([ 'rados', 'cppool', 'source-pool', 'target-pool', @@ -50,14 +51,15 @@ def test_copy_pool_failed(self, mock_check_call): 'source': 'source-pool', 'target': 'target-pool', } - self.hookenv.action_get.side_effect = lambda k: _action_data.get(k) + self.harness.begin() mock_check_call.side_effect = subprocess.CalledProcessError(1, 'rados') - copy_pool.copy_pool() + event = test_utils.MockActionEvent(_action_data) + self.harness.charm.on_copy_pool_action(event) mock_check_call.assert_called_with([ 'rados', 'cppool', 'source-pool', 'target-pool', ]) - self.hookenv.action_fail.assert_called_once_with(mock.ANY) + event.fail.assert_called_once_with(mock.ANY) class CreateCrushRuleTestCase(test_utils.CharmTestCase): From 010d7814a3a3560eedf27c49547cde8a43bf2c00 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 9 Sep 2022 12:45:22 +0200 Subject: [PATCH 2423/2699] Deprecate legacy prometheus2 Support for relating to the prometheus2 machine charm is deprecated and will be removed at some point in the future. Change-Id: Ib0d55ffa03aaacf0d5b6108a6641929a4442eaf1 --- ceph-mon/src/ceph_hooks.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 1cc4c4bd..5a4d4b33 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -440,10 +440,13 @@ def bootstrap_source_relation_changed(): 'prometheus-relation-changed') def prometheus_relation(relid=None, unit=None, prometheus_permitted=None, module_enabled=None): + log("DEPRECATION warning: relating to the prometheus2 machine charm is " + "deprecated in favor of COS Lite", level=INFO) if not ceph.is_bootstrapped(): return if prometheus_permitted is None: prometheus_permitted = cmp_pkgrevno('ceph', '12.2.0') >= 0 + if module_enabled is None: module_enabled = (is_mgr_module_enabled('prometheus') or mgr_enable_module('prometheus')) From 215bb1570aef64015d39897d16d56e5bfb3c3755 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 16 Aug 2022 17:01:25 -0400 Subject: [PATCH 2424/2699] Remove -proposed from jammy functional test bundle Change-Id: I0e93840e8dd8d84078caf7591a081abf8942c33c --- ceph-dashboard/tests/bundles/jammy-yoga.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-dashboard/tests/bundles/jammy-yoga.yaml b/ceph-dashboard/tests/bundles/jammy-yoga.yaml index 81635bb8..0e652340 100644 --- a/ceph-dashboard/tests/bundles/jammy-yoga.yaml +++ b/ceph-dashboard/tests/bundles/jammy-yoga.yaml @@ -14,7 +14,6 @@ applications: num_units: 3 options: monitor-count: '3' - source: distro-proposed channel: latest/edge vault: num_units: 1 From 975668da6e8eb1efa33606fcb40646867584e4f5 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 9 Sep 2022 15:15:38 +0200 Subject: [PATCH 2425/2699] Fix: disable prometheus module on relation depart Disable the ceph prometheus module on relation departure Change-Id: I44f906aa17407c19fa2bbb9b4fbaa86964837b9a --- ceph-mon/src/ceph_metrics.py | 14 ++++++++++++++ ceph-mon/unit_tests/test_ceph_metrics.py | 7 ++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/ceph-mon/src/ceph_metrics.py b/ceph-mon/src/ceph_metrics.py index cd715528..4320543e 100644 --- a/ceph-mon/src/ceph_metrics.py +++ b/ceph-mon/src/ceph_metrics.py @@ -39,6 +39,10 @@ def __init__( alert_rules_path=alert_rules_path, refresh_event=refresh_event, ) + events = charm.on[relation_name] + self.framework.observe( + events.relation_departed, self._on_relation_departed + ) def _on_relation_changed(self, event): """Enable prometheus on relation change""" @@ -49,3 +53,13 @@ def _on_relation_changed(self, event): ceph_utils.mgr_enable_module("prometheus") logger.debug("module_enabled") super()._on_relation_changed(event) + + def _on_relation_departed(self, event): + """Disable prometheus on depart of relation""" + if self._charm.unit.is_leader() and ceph_utils.is_bootstrapped(): + logger.debug( + "is_leader and is_bootstrapped, running rel departed: %s", + event, + ) + ceph_utils.mgr_disable_module("prometheus") + logger.debug("module_disabled") diff --git a/ceph-mon/unit_tests/test_ceph_metrics.py b/ceph-mon/unit_tests/test_ceph_metrics.py index e767472a..4bd16af3 100644 --- a/ceph-mon/unit_tests/test_ceph_metrics.py +++ b/ceph-mon/unit_tests/test_ceph_metrics.py @@ -55,8 +55,10 @@ def test_init(self): @patch("ceph_metrics.ceph_utils.is_bootstrapped", return_value=True) @patch("ceph_metrics.ceph_utils.is_mgr_module_enabled", return_value=False) @patch("ceph_metrics.ceph_utils.mgr_enable_module") - def test_add_rel( + @patch("ceph_metrics.ceph_utils.mgr_disable_module") + def test_add_remove_rel( self, + mgr_disable_module, mgr_enable_module, _is_mgr_module_enable, _is_bootstrapped, @@ -89,3 +91,6 @@ def test_add_rel( '"static_configs": [{"targets": ["*:9283"]}]}]' ), ) + + self.harness.remove_relation(rel_id) + mgr_disable_module.assert_called_once() From 9f7ccf902eaa4be29fde3ba7ec8f22cde808b2f9 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 9 Sep 2022 18:05:16 +0200 Subject: [PATCH 2426/2699] Fix nrpe relation handling Fix nrpe relation handling, also re-add the nrpe check plugin and script Change-Id: Id81f04f2e2702d8489c0ed1daa85d9f29c7fcc36 --- ceph-mon/charmcraft.yaml | 1 + ceph-mon/src/charm.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-mon/charmcraft.yaml b/ceph-mon/charmcraft.yaml index 97af1335..ed560c4e 100644 --- a/ceph-mon/charmcraft.yaml +++ b/ceph-mon/charmcraft.yaml @@ -6,6 +6,7 @@ parts: - actions/* - lib/* - templates/* + - files/* after: - update-certificates charm-python-packages: diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index 6a3cf8ac..6e6634b5 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -64,7 +64,7 @@ def on_client_relation(self, event): hooks.client_relation() def on_nrpe_relation(self, event): - hooks.upgrade_nrpe_config() + hooks.update_nrpe_config() def __init__(self, *args): super().__init__(*args) From 30e41859b1e5437496fb26038410477061f220b4 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 15 Mar 2019 14:06:02 +0100 Subject: [PATCH 2427/2699] Ensure add-disk and hooks handle disks the same way Depends-On: I2ea119f5a1b2a36ccd36df4db094f208a1db100e Depends-On: Ie19e5318ea35c38e5d02963260b85fec0f233df6 Change-Id: Idebe45504233fb5559a3e9ddd9b2d6534cba7bb2 Closes-Bug: #1820271 --- ceph-osd/actions/add_disk.py | 2 +- ceph-osd/hooks/ceph_hooks.py | 14 ++------------ ceph-osd/unit_tests/test_actions_add_disk.py | 5 ++++- ceph-osd/unit_tests/test_ceph_hooks.py | 15 ++++++++++++++- 4 files changed, 21 insertions(+), 15 deletions(-) diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index 57d49fcf..d7ca969d 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -68,7 +68,7 @@ def add_device(request, device_path, bucket=None, ceph_hooks.get_journal_devices(), hookenv.config('ignore-device-errors'), hookenv.config('osd-encrypt'), - hookenv.config('bluestore'), + charms_ceph.utils.use_bluestore(), hookenv.config('osd-encrypt-keymanager'), osd_id) # Make it fast! diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 1e31de0c..351d7d51 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -271,16 +271,6 @@ def use_vaultlocker(): return False -def use_bluestore(): - """Determine whether bluestore should be used for OSD's - - :returns: whether bluestore disk format should be used - :rtype: bool""" - if cmp_pkgrevno('ceph', '12.2.0') < 0: - return False - return config('bluestore') - - def install_apparmor_profile(): """ Install ceph apparmor profiles and configure @@ -402,7 +392,7 @@ def get_ceph_context(upgrading=False): 'dio': str(config('use-direct-io')).lower(), 'short_object_len': use_short_objects(), 'upgrade_in_progress': upgrading, - 'bluestore': use_bluestore(), + 'bluestore': ceph.use_bluestore(), 'bluestore_experimental': cmp_pkgrevno('ceph', '12.1.0') < 0, 'bluestore_block_wal_size': config('bluestore-block-wal-size'), 'bluestore_block_db_size': config('bluestore-block-db-size'), @@ -547,8 +537,8 @@ def prepare_disks_and_activate(): if is_osd_bootstrap_ready(): log('ceph bootstrapped, rescanning disks') emit_cephconf() - bluestore = use_bluestore() ceph.udevadm_settle() + bluestore = ceph.use_bluestore() for dev in get_devices(): ceph.osdize(dev, config('osd-format'), osd_journal, diff --git a/ceph-osd/unit_tests/test_actions_add_disk.py b/ceph-osd/unit_tests/test_actions_add_disk.py index 1d06394f..a3129f8d 100644 --- a/ceph-osd/unit_tests/test_actions_add_disk.py +++ b/ceph-osd/unit_tests/test_actions_add_disk.py @@ -25,9 +25,11 @@ def setUp(self): add_disk, ['hookenv', 'kv']) self.kv.return_value = self.kv + @mock.patch.object(add_disk.charms_ceph.utils, 'use_bluestore') @mock.patch.object(add_disk.ceph_hooks, 'get_journal_devices') @mock.patch.object(add_disk.charms_ceph.utils, 'osdize') - def test_add_device(self, mock_osdize, mock_get_journal_devices): + def test_add_device(self, mock_osdize, mock_get_journal_devices, + mock_use_bluestore): def fake_config(key): return { @@ -41,6 +43,7 @@ def fake_config(key): self.hookenv.config.side_effect = fake_config mock_get_journal_devices.return_value = '' self.hookenv.relation_ids.return_value = ['ceph:0'] + mock_use_bluestore.return_value = True db = mock.MagicMock() self.kv.return_value = db diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 5d13f86c..2d332bf1 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -26,6 +26,8 @@ lambda *args, **kwargs: f(*args, **kwargs)) import ceph_hooks +import charms_ceph.utils as ceph_utils + CHARM_CONFIG = {'config-flags': '', 'loglevel': 1, 'use-syslog': True, @@ -63,6 +65,7 @@ def setUp(self): @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) + @patch.object(ceph_utils, 'use_bluestore', lambda *args: False) @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', '10.0.0.2']) @patch.object(ceph_hooks, 'get_networks', lambda *args: "") @@ -110,6 +113,7 @@ def test_get_ceph_context(self, mock_config, mock_config2): @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', '10.0.0.2']) @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph_utils, 'use_bluestore', lambda *args: False) @patch.object(ceph, 'config') @patch.object(ceph_hooks, 'config') def test_get_ceph_context_invalid_bdev_enable_discard(self, mock_config, @@ -154,6 +158,7 @@ def test_get_ceph_context_invalid_bdev_enable_discard(self, mock_config, @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda pkg, ver: -1 if ver == '12.1.0' else 1) + @patch.object(ceph_utils, 'use_bluestore', lambda *args: False) @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', '10.0.0.2']) @patch.object(ceph_hooks, 'get_networks', lambda *args: "") @@ -198,6 +203,7 @@ def test_get_ceph_context_filestore_old(self, mock_config, mock_config2): @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) + @patch.object(ceph_utils, 'use_bluestore', lambda *args: True) @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', '10.0.0.2']) @patch.object(ceph_hooks, 'get_networks', lambda *args: "") @@ -250,6 +256,7 @@ def test_get_ceph_context_bluestore(self, mock_config, mock_config2): @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda pkg, ver: -1 if ver == '12.1.0' else 1) + @patch.object(ceph_utils, 'use_bluestore', lambda *args: True) @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', '10.0.0.2']) @patch.object(ceph_hooks, 'get_networks', lambda *args: "") @@ -298,6 +305,7 @@ def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2): @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) + @patch.object(ceph_utils, 'use_bluestore', lambda *args: False) @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', '10.0.0.2']) @patch.object(ceph_hooks, 'get_networks', lambda *args: "") @@ -344,6 +352,7 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) + @patch.object(ceph_utils, 'use_bluestore', lambda *args: False) @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', '10.0.0.2']) @patch.object(ceph_hooks, 'get_networks', lambda *args: "") @@ -384,6 +393,7 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'bluestore_block_db_size': 0} self.assertEqual(ctxt, expected) + @patch.object(ceph_utils, 'cmp_pkgrevno', lambda *args: 1) @patch.object(ceph_hooks.ch_context, 'CephBlueStoreCompressionContext') @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') @@ -394,13 +404,16 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', '10.0.0.2']) @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph_utils, 'config') @patch.object(ceph, 'config') @patch.object(ceph_hooks, 'config') def test_get_ceph_context_bluestore_compression( - self, mock_config, mock_config2, mock_bluestore_compression): + self, mock_config, mock_config2, mock_config3, + mock_bluestore_compression): config = copy.deepcopy(CHARM_CONFIG) mock_config.side_effect = lambda key: config[key] mock_config2.side_effect = lambda key: config[key] + mock_config3.side_effect = lambda key: config[key] mock_bluestore_compression().return_value = { 'fake-bluestore-compression-key': 'fake-value'} ctxt = ceph_hooks.get_ceph_context() From 021f03956240697de5e4595ba5ccdf65b3da83f8 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 12 Sep 2022 14:24:29 -0400 Subject: [PATCH 2428/2699] Add mock to make unit tests work on Kinetic Change-Id: I0740c4ad27da89b9dbfbc49af4aaf3e3443af012 --- ceph-osd/unit_tests/test_actions_remove_disk.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-osd/unit_tests/test_actions_remove_disk.py b/ceph-osd/unit_tests/test_actions_remove_disk.py index 369d3f1f..8482290d 100644 --- a/ceph-osd/unit_tests/test_actions_remove_disk.py +++ b/ceph-osd/unit_tests/test_actions_remove_disk.py @@ -69,6 +69,7 @@ def test_action_osd_constructor(self, bcache_names): obj = remove_disk.ActionOSD(dev_map, osd_id='1') self.assertEqual(obj.device, '/dev/sdx1') + @mock.patch.object(remove_disk.charms_ceph.utils, 'disable_osd') @mock.patch.object(remove_disk, 'device_size') @mock.patch.object(remove_disk.charms_ceph.utils, 'stop_osd') @mock.patch.object(remove_disk, 'bcache_remove') @@ -76,7 +77,8 @@ def test_action_osd_constructor(self, bcache_names): @mock.patch.object(remove_disk.subprocess, 'check_call') @mock.patch.object(remove_disk, 'get_bcache_names') def test_action_osd_remove(self, get_bcache_names, check_call, - call, bcache_remove, stop_osd, device_size): + call, bcache_remove, stop_osd, device_size, + disable_osd): call.return_value = 0 get_bcache_names.return_value = ('/dev/backing', '/dev/caching') device_size.side_effect = lambda x: 1 if x == '/dev/caching' else 0 From 084163c2fc0613c563138a448cd464f86445e76a Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 22 Sep 2022 10:28:06 +0200 Subject: [PATCH 2429/2699] Fix: run main procedures in ops charm Previously, before converting to the operator framework, the charm used to run default processes on every hook run, due to the __main__ block in the ceph_hooks module. This patch aims to resurrect these default processes for the operator charm. Change-Id: Iffaec5287f248f61b737d79a1ea945e3125255d9 --- ceph-mon/src/charm.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index d0e3ebd4..4b10e003 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -1,4 +1,5 @@ #! /usr/bin/python3 +import logging from ops.main import main @@ -109,9 +110,21 @@ def _make_method(fn): setattr(inst, method_name, method) self.framework.observe(on_action, getattr(self, method_name)) + def is_blocked_insecure_cmr(self): + remote_block = False + remote_unit_name = hooks.remote_unit() + if remote_unit_name and hooks.is_cmr_unit(remote_unit_name): + remote_block = not self.config['permit-insecure-cmr'] + return remote_block + def __init__(self, *args): super().__init__(*args) self._stored.is_started = True + + if self.is_blocked_insecure_cmr(): + logging.error("Not running hook, CMR detected and not supported") + return + fw = self.framework self.metrics_endpoint = ceph_metrics.CephMetricsEndpointProvider(self) @@ -182,3 +195,4 @@ def __init__(self, *args): if __name__ == '__main__': main(CephMonCharm) + hooks.assess_status() From 014c9384eaf49734eda6d876fc900bfb4d832cde Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 22 Sep 2022 10:28:06 +0200 Subject: [PATCH 2430/2699] Implement prometheus alert rules Alert rules can be attached as a resource and will be transmitted via the metrics-endpoint relation. Default alert rules taken from upstream ceph have been added for reference. Change-Id: I6a3c6f06e9b9d911b35c8ced1968becc6471b362 --- ceph-mon/README.md | 11 + .../prometheus_alerts.yml.default | 635 ++++++++++++++++++ .../prometheus_k8s/v0/prometheus_scrape.py | 91 +-- ceph-mon/metadata.yaml | 5 + ceph-mon/src/ceph_hooks.py | 5 +- ceph-mon/src/ceph_metrics.py | 78 ++- ceph-mon/src/charm.py | 20 +- ceph-mon/unit_tests/test_ceph_metrics.py | 64 ++ 8 files changed, 862 insertions(+), 47 deletions(-) create mode 100644 ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default diff --git a/ceph-mon/README.md b/ceph-mon/README.md index ded17a27..ccb57d38 100644 --- a/ceph-mon/README.md +++ b/ceph-mon/README.md @@ -143,6 +143,16 @@ The charm supports Ceph metric monitoring with Prometheus. Add relations to the Alternatively, integration with the [COS Lite][cos-lite] observability stack is available via the metrics-endpoint relation. +Relating to prometheus-k8s via the metrics-endpoint interface (as is +found in the [COS Lite][cos-lite] bundle) will send metrics to +prometheus. Additionally, alerting rules will be configured for +prometheus as well. Alerting rules are configured as a resource +`alert-rules`; the default rules are taken from [upstream ceph +rules][ceph-rules]. It is possible to replace the default with +customized rules by attaching a resource: + + juju attach ceph-mon alert-rules=./my-prom-alerts.yaml.rules + ## Actions This section lists Juju [actions][juju-docs-actions] supported by the charm. @@ -228,3 +238,4 @@ For general charm questions refer to the OpenStack [Charm Guide][cg]. [upstream-ceph-buckets]: https://docs.ceph.com/docs/master/rados/operations/crush-map/#types-and-buckets [jq]: https://stedolan.github.io/jq/ [cos-lite]: https://charmhub.io/cos-lite +[ceph-rules]: https://github.com/ceph/ceph/blob/351e1ac63950164ea5f08a6bfc7c14af586bb208/monitoring/ceph-mixin/prometheus_alerts.yml diff --git a/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default b/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default new file mode 100644 index 00000000..a544d41e --- /dev/null +++ b/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default @@ -0,0 +1,635 @@ +groups: + - name: "cluster health" + rules: + - alert: "CephHealthError" + annotations: + description: "The cluster state has been HEALTH_ERROR for more than 5 minutes. Please check 'ceph health detail' for more information." + summary: "Ceph is in the ERROR state" + expr: "ceph_health_status == 2" + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.2.1" + severity: "critical" + type: "ceph_default" + - alert: "CephHealthWarning" + annotations: + description: "The cluster state has been HEALTH_WARN for more than 15 minutes. Please check 'ceph health detail' for more information." + summary: "Ceph is in the WARNING state" + expr: "ceph_health_status == 1" + for: "15m" + labels: + severity: "warning" + type: "ceph_default" + - name: "mon" + rules: + - alert: "CephMonDownQuorumAtRisk" + annotations: + description: "{{ $min := query \"floor(count(ceph_mon_metadata) / 2) + 1\" | first | value }}Quorum requires a majority of monitors (x {{ $min }}) to be active. Without quorum the cluster will become inoperable, affecting all services and connected clients. The following monitors are down: {{- range query \"(ceph_mon_quorum_status == 0) + on(ceph_daemon) group_left(hostname) (ceph_mon_metadata * 0)\" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}" + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down" + summary: "Monitor quorum is at risk" + expr: | + ( + (ceph_health_detail{name="MON_DOWN"} == 1) * on() ( + count(ceph_mon_quorum_status == 1) == bool (floor(count(ceph_mon_metadata) / 2) + 1) + ) + ) == 1 + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.3.1" + severity: "critical" + type: "ceph_default" + - alert: "CephMonDown" + annotations: + description: | + {{ $down := query "count(ceph_mon_quorum_status == 0)" | first | value }}{{ $s := "" }}{{ if gt $down 1.0 }}{{ $s = "s" }}{{ end }}You have {{ $down }} monitor{{ $s }} down. Quorum is still intact, but the loss of an additional monitor will make your cluster inoperable. The following monitors are down: {{- range query "(ceph_mon_quorum_status == 0) + on(ceph_daemon) group_left(hostname) (ceph_mon_metadata * 0)" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }} + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down" + summary: "One or more monitors down" + expr: | + count(ceph_mon_quorum_status == 0) <= (count(ceph_mon_metadata) - floor(count(ceph_mon_metadata) / 2) + 1) + for: "30s" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephMonDiskspaceCritical" + annotations: + description: "The free space available to a monitor's store is critically low. You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; {{- range query \"ceph_mon_metadata\"}} - {{ .Labels.hostname }} {{- end }}" + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-crit" + summary: "Filesystem space on at least one monitor is critically low" + expr: "ceph_health_detail{name=\"MON_DISK_CRIT\"} == 1" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.3.2" + severity: "critical" + type: "ceph_default" + - alert: "CephMonDiskspaceLow" + annotations: + description: "The space available to a monitor's store is approaching full (>70% is the default). You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; {{- range query \"ceph_mon_metadata\"}} - {{ .Labels.hostname }} {{- end }}" + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-low" + summary: "Drive space on at least one monitor is approaching full" + expr: "ceph_health_detail{name=\"MON_DISK_LOW\"} == 1" + for: "5m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephMonClockSkew" + annotations: + description: "Ceph monitors rely on closely synchronized time to maintain quorum and cluster consistency. This event indicates that the time on at least one mon has drifted too far from the lead mon. Review cluster status with ceph -s. This will show which monitors are affected. Check the time sync status on each monitor host with 'ceph time-sync-status' and the state and peers of your ntpd or chrony daemon." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-clock-skew" + summary: "Clock skew detected among monitors" + expr: "ceph_health_detail{name=\"MON_CLOCK_SKEW\"} == 1" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - name: "osd" + rules: + - alert: "CephOSDDownHigh" + annotations: + description: "{{ $value | humanize }}% or {{ with query \"count(ceph_osd_up == 0)\" }}{{ . | first | value }}{{ end }} of {{ with query \"count(ceph_osd_up)\" }}{{ . | first | value }}{{ end }} OSDs are down (>= 10%). The following OSDs are down: {{- range query \"(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0\" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}" + summary: "More than 10% of OSDs are down" + expr: "count(ceph_osd_up == 0) / count(ceph_osd_up) * 100 >= 10" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.4.1" + severity: "critical" + type: "ceph_default" + - alert: "CephOSDHostDown" + annotations: + description: "The following OSDs are down: {{- range query \"(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0\" }} - {{ .Labels.hostname }} : {{ .Labels.ceph_daemon }} {{- end }}" + summary: "An OSD host is offline" + expr: "ceph_health_detail{name=\"OSD_HOST_DOWN\"} == 1" + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.4.8" + severity: "warning" + type: "ceph_default" + - alert: "CephOSDDown" + annotations: + description: | + {{ $num := query "count(ceph_osd_up == 0)" | first | value }}{{ $s := "" }}{{ if gt $num 1.0 }}{{ $s = "s" }}{{ end }}{{ $num }} OSD{{ $s }} down for over 5mins. The following OSD{{ $s }} {{ if eq $s "" }}is{{ else }}are{{ end }} down: {{- range query "(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0"}} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }} + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-down" + summary: "An OSD has been marked down" + expr: "ceph_health_detail{name=\"OSD_DOWN\"} == 1" + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.4.2" + severity: "warning" + type: "ceph_default" + - alert: "CephOSDNearFull" + annotations: + description: "One or more OSDs have reached the NEARFULL threshold. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-nearfull" + summary: "OSD(s) running low on free space (NEARFULL)" + expr: "ceph_health_detail{name=\"OSD_NEARFULL\"} == 1" + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.4.3" + severity: "warning" + type: "ceph_default" + - alert: "CephOSDFull" + annotations: + description: "An OSD has reached the FULL threshold. Writes to pools that share the affected OSD will be blocked. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-full" + summary: "OSD full, writes blocked" + expr: "ceph_health_detail{name=\"OSD_FULL\"} > 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.4.6" + severity: "critical" + type: "ceph_default" + - alert: "CephOSDBackfillFull" + annotations: + description: "An OSD has reached the BACKFILL FULL threshold. This will prevent rebalance operations from completing. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-backfillfull" + summary: "OSD(s) too full for backfill operations" + expr: "ceph_health_detail{name=\"OSD_BACKFILLFULL\"} > 0" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephOSDTooManyRepairs" + annotations: + description: "Reads from an OSD have used a secondary PG to return data to the client, indicating a potential failing drive." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-too-many-repairs" + summary: "OSD reports a high number of read errors" + expr: "ceph_health_detail{name=\"OSD_TOO_MANY_REPAIRS\"} == 1" + for: "30s" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephOSDTimeoutsPublicNetwork" + annotations: + description: "OSD heartbeats on the cluster's 'public' network (frontend) are running slow. Investigate the network for latency or loss issues. Use 'ceph health detail' to show the affected OSDs." + summary: "Network issues delaying OSD heartbeats (public network)" + expr: "ceph_health_detail{name=\"OSD_SLOW_PING_TIME_FRONT\"} == 1" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephOSDTimeoutsClusterNetwork" + annotations: + description: "OSD heartbeats on the cluster's 'cluster' network (backend) are slow. Investigate the network for latency issues on this subnet. Use 'ceph health detail' to show the affected OSDs." + summary: "Network issues delaying OSD heartbeats (cluster network)" + expr: "ceph_health_detail{name=\"OSD_SLOW_PING_TIME_BACK\"} == 1" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephOSDInternalDiskSizeMismatch" + annotations: + description: "One or more OSDs have an internal inconsistency between metadata and the size of the device. This could lead to the OSD(s) crashing in future. You should redeploy the affected OSDs." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-disk-size-mismatch" + summary: "OSD size inconsistency error" + expr: "ceph_health_detail{name=\"BLUESTORE_DISK_SIZE_MISMATCH\"} == 1" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephDeviceFailurePredicted" + annotations: + description: "The device health module has determined that one or more devices will fail soon. To review device status use 'ceph device ls'. To show a specific device use 'ceph device info '. Mark the OSD out so that data may migrate to other OSDs. Once the OSD has drained, destroy the OSD, replace the device, and redeploy the OSD." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#id2" + summary: "Device(s) predicted to fail soon" + expr: "ceph_health_detail{name=\"DEVICE_HEALTH\"} == 1" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephDeviceFailurePredictionTooHigh" + annotations: + description: "The device health module has determined that devices predicted to fail can not be remediated automatically, since too many OSDs would be removed from the cluster to ensure performance and availabililty. Prevent data integrity issues by adding new OSDs so that data may be relocated." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-toomany" + summary: "Too many devices are predicted to fail, unable to resolve" + expr: "ceph_health_detail{name=\"DEVICE_HEALTH_TOOMANY\"} == 1" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.4.7" + severity: "critical" + type: "ceph_default" + - alert: "CephDeviceFailureRelocationIncomplete" + annotations: + description: "The device health module has determined that one or more devices will fail soon, but the normal process of relocating the data on the device to other OSDs in the cluster is blocked. \nEnsure that the cluster has available free space. It may be necessary to add capacity to the cluster to allow data from the failing device to successfully migrate, or to enable the balancer." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-in-use" + summary: "Device failure is predicted, but unable to relocate data" + expr: "ceph_health_detail{name=\"DEVICE_HEALTH_IN_USE\"} == 1" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephOSDFlapping" + annotations: + description: "OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked down and back up {{ $value | humanize }} times once a minute for 5 minutes. This may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster network, or the public network if no cluster network is deployed. Check the network stats on the listed host(s)." + documentation: "https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds" + summary: "Network issues are causing OSDs to flap (mark each other down)" + expr: "(rate(ceph_osd_up[5m]) * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) * 60 > 1" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.4.4" + severity: "warning" + type: "ceph_default" + - alert: "CephOSDReadErrors" + annotations: + description: "An OSD has encountered read errors, but the OSD has recovered by retrying the reads. This may indicate an issue with hardware or the kernel." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-spurious-read-errors" + summary: "Device read errors detected" + expr: "ceph_health_detail{name=\"BLUESTORE_SPURIOUS_READ_ERRORS\"} == 1" + for: "30s" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephPGImbalance" + annotations: + description: "OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} deviates by more than 30% from average PG count." + summary: "PGs are not balanced across OSDs" + expr: | + abs( + ((ceph_osd_numpg > 0) - on (job) group_left avg(ceph_osd_numpg > 0) by (job)) / + on (job) group_left avg(ceph_osd_numpg > 0) by (job) + ) * on (ceph_daemon) group_left(hostname) ceph_osd_metadata > 0.30 + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.4.5" + severity: "warning" + type: "ceph_default" + - name: "mds" + rules: + - alert: "CephFilesystemDamaged" + annotations: + description: "Filesystem metadata has been corrupted. Data may be inaccessible. Analyze metrics from the MDS daemon admin socket, or escalate to support." + documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages" + summary: "CephFS filesystem is damaged." + expr: "ceph_health_detail{name=\"MDS_DAMAGE\"} > 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.5.1" + severity: "critical" + type: "ceph_default" + - alert: "CephFilesystemOffline" + annotations: + description: "All MDS ranks are unavailable. The MDS daemons managing metadata are down, rendering the filesystem offline." + documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-all-down" + summary: "CephFS filesystem is offline" + expr: "ceph_health_detail{name=\"MDS_ALL_DOWN\"} > 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.5.3" + severity: "critical" + type: "ceph_default" + - alert: "CephFilesystemDegraded" + annotations: + description: "One or more metadata daemons (MDS ranks) are failed or in a damaged state. At best the filesystem is partially available, at worst the filesystem is completely unusable." + documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-degraded" + summary: "CephFS filesystem is degraded" + expr: "ceph_health_detail{name=\"FS_DEGRADED\"} > 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.5.4" + severity: "critical" + type: "ceph_default" + - alert: "CephFilesystemMDSRanksLow" + annotations: + description: "The filesystem's 'max_mds' setting defines the number of MDS ranks in the filesystem. The current number of active MDS daemons is less than this value." + documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-up-less-than-max" + summary: "Ceph MDS daemon count is lower than configured" + expr: "ceph_health_detail{name=\"MDS_UP_LESS_THAN_MAX\"} > 0" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephFilesystemInsufficientStandby" + annotations: + description: "The minimum number of standby daemons required by standby_count_wanted is less than the current number of standby daemons. Adjust the standby count or increase the number of MDS daemons." + documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-insufficient-standby" + summary: "Ceph filesystem standby daemons too few" + expr: "ceph_health_detail{name=\"MDS_INSUFFICIENT_STANDBY\"} > 0" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephFilesystemFailureNoStandby" + annotations: + description: "An MDS daemon has failed, leaving only one active rank and no available standby. Investigate the cause of the failure or add a standby MDS." + documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-with-failed-mds" + summary: "MDS daemon failed, no further standby available" + expr: "ceph_health_detail{name=\"FS_WITH_FAILED_MDS\"} > 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.5.5" + severity: "critical" + type: "ceph_default" + - alert: "CephFilesystemReadOnly" + annotations: + description: "The filesystem has switched to READ ONLY due to an unexpected error when writing to the metadata pool. Either analyze the output from the MDS daemon admin socket, or escalate to support." + documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages" + summary: "CephFS filesystem in read only mode due to write error(s)" + expr: "ceph_health_detail{name=\"MDS_HEALTH_READ_ONLY\"} > 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.5.2" + severity: "critical" + type: "ceph_default" + - name: "mgr" + rules: + - alert: "CephMgrModuleCrash" + annotations: + description: "One or more mgr modules have crashed and have yet to be acknowledged by an administrator. A crashed module may impact functionality within the cluster. Use the 'ceph crash' command to determine which module has failed, and archive it to acknowledge the failure." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#recent-mgr-module-crash" + summary: "A manager module has recently crashed" + expr: "ceph_health_detail{name=\"RECENT_MGR_MODULE_CRASH\"} == 1" + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.6.1" + severity: "critical" + type: "ceph_default" + - alert: "CephMgrPrometheusModuleInactive" + annotations: + description: "The mgr/prometheus module at {{ $labels.instance }} is unreachable. This could mean that the module has been disabled or the mgr daemon itself is down. Without the mgr/prometheus module metrics and alerts will no longer function. Open a shell to an admin node or toolbox pod and use 'ceph -s' to to determine whether the mgr is active. If the mgr is not active, restart it, otherwise you can determine module status with 'ceph mgr module ls'. If it is not listed as enabled, enable it with 'ceph mgr module enable prometheus'." + summary: "The mgr/prometheus module is not available" + expr: "up{job=\"ceph\"} == 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.6.2" + severity: "critical" + type: "ceph_default" + - name: "pgs" + rules: + - alert: "CephPGsInactive" + annotations: + description: "{{ $value }} PGs have been inactive for more than 5 minutes in pool {{ $labels.name }}. Inactive placement groups are not able to serve read/write requests." + summary: "One or more placement groups are inactive" + expr: "ceph_pool_metadata * on(pool_id,instance) group_left() (ceph_pg_total - ceph_pg_active) > 0" + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.7.1" + severity: "critical" + type: "ceph_default" + - alert: "CephPGsUnclean" + annotations: + description: "{{ $value }} PGs have been unclean for more than 15 minutes in pool {{ $labels.name }}. Unclean PGs have not recovered from a previous failure." + summary: "One or more placement groups are marked unclean" + expr: "ceph_pool_metadata * on(pool_id,instance) group_left() (ceph_pg_total - ceph_pg_clean) > 0" + for: "15m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.7.2" + severity: "warning" + type: "ceph_default" + - alert: "CephPGsDamaged" + annotations: + description: "During data consistency checks (scrub), at least one PG has been flagged as being damaged or inconsistent. Check to see which PG is affected, and attempt a manual repair if necessary. To list problematic placement groups, use 'rados list-inconsistent-pg '. To repair PGs use the 'ceph pg repair ' command." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-damaged" + summary: "Placement group damaged, manual intervention needed" + expr: "ceph_health_detail{name=~\"PG_DAMAGED|OSD_SCRUB_ERRORS\"} == 1" + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.7.4" + severity: "critical" + type: "ceph_default" + - alert: "CephPGRecoveryAtRisk" + annotations: + description: "Data redundancy is at risk since one or more OSDs are at or above the 'full' threshold. Add more capacity to the cluster, restore down/out OSDs, or delete unwanted data." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-recovery-full" + summary: "OSDs are too full for recovery" + expr: "ceph_health_detail{name=\"PG_RECOVERY_FULL\"} == 1" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.7.5" + severity: "critical" + type: "ceph_default" + - alert: "CephPGUnavilableBlockingIO" + annotations: + description: "Data availability is reduced, impacting the cluster's ability to service I/O. One or more placement groups (PGs) are in a state that blocks I/O." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-availability" + summary: "PG is unavailable, blocking I/O" + expr: "((ceph_health_detail{name=\"PG_AVAILABILITY\"} == 1) - scalar(ceph_health_detail{name=\"OSD_DOWN\"})) == 1" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.7.3" + severity: "critical" + type: "ceph_default" + - alert: "CephPGBackfillAtRisk" + annotations: + description: "Data redundancy may be at risk due to lack of free space within the cluster. One or more OSDs have reached the 'backfillfull' threshold. Add more capacity, or delete unwanted data." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-backfill-full" + summary: "Backfill operations are blocked due to lack of free space" + expr: "ceph_health_detail{name=\"PG_BACKFILL_FULL\"} == 1" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.7.6" + severity: "critical" + type: "ceph_default" + - alert: "CephPGNotScrubbed" + annotations: + description: "One or more PGs have not been scrubbed recently. Scrubs check metadata integrity, protecting against bit-rot. They check that metadata is consistent across data replicas. When PGs miss their scrub interval, it may indicate that the scrub window is too small, or PGs were not in a 'clean' state during the scrub window. You can manually initiate a scrub with: ceph pg scrub " + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-scrubbed" + summary: "Placement group(s) have not been scrubbed" + expr: "ceph_health_detail{name=\"PG_NOT_SCRUBBED\"} == 1" + for: "5m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephPGsHighPerOSD" + annotations: + description: "The number of placement groups per OSD is too high (exceeds the mon_max_pg_per_osd setting).\n Check that the pg_autoscaler has not been disabled for any pools with 'ceph osd pool autoscale-status', and that the profile selected is appropriate. You may also adjust the target_size_ratio of a pool to guide the autoscaler based on the expected relative size of the pool ('ceph osd pool set cephfs.cephfs.meta target_size_ratio .1') or set the pg_autoscaler mode to 'warn' and adjust pg_num appropriately for one or more pools." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks/#too-many-pgs" + summary: "Placement groups per OSD is too high" + expr: "ceph_health_detail{name=\"TOO_MANY_PGS\"} == 1" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephPGNotDeepScrubbed" + annotations: + description: "One or more PGs have not been deep scrubbed recently. Deep scrubs protect against bit-rot. They compare data replicas to ensure consistency. When PGs miss their deep scrub interval, it may indicate that the window is too small or PGs were not in a 'clean' state during the deep-scrub window." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-deep-scrubbed" + summary: "Placement group(s) have not been deep scrubbed" + expr: "ceph_health_detail{name=\"PG_NOT_DEEP_SCRUBBED\"} == 1" + for: "5m" + labels: + severity: "warning" + type: "ceph_default" + - name: "nodes" + rules: + - alert: "CephNodeRootFilesystemFull" + annotations: + description: "Root volume is dangerously full: {{ $value | humanize }}% free." + summary: "Root filesystem is dangerously full" + expr: "node_filesystem_avail_bytes{mountpoint=\"/\"} / node_filesystem_size_bytes{mountpoint=\"/\"} * 100 < 5" + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.8.1" + severity: "critical" + type: "ceph_default" + - alert: "CephNodeNetworkPacketDrops" + annotations: + description: "Node {{ $labels.instance }} experiences packet drop > 0.5% or > 10 packets/s on interface {{ $labels.device }}." + summary: "One or more NICs reports packet drops" + expr: | + ( + rate(node_network_receive_drop_total{device!="lo"}[1m]) + + rate(node_network_transmit_drop_total{device!="lo"}[1m]) + ) / ( + rate(node_network_receive_packets_total{device!="lo"}[1m]) + + rate(node_network_transmit_packets_total{device!="lo"}[1m]) + ) >= 0.0050000000000000001 and ( + rate(node_network_receive_drop_total{device!="lo"}[1m]) + + rate(node_network_transmit_drop_total{device!="lo"}[1m]) + ) >= 10 + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.8.2" + severity: "warning" + type: "ceph_default" + - alert: "CephNodeNetworkPacketErrors" + annotations: + description: "Node {{ $labels.instance }} experiences packet errors > 0.01% or > 10 packets/s on interface {{ $labels.device }}." + summary: "One or more NICs reports packet errors" + expr: | + ( + rate(node_network_receive_errs_total{device!="lo"}[1m]) + + rate(node_network_transmit_errs_total{device!="lo"}[1m]) + ) / ( + rate(node_network_receive_packets_total{device!="lo"}[1m]) + + rate(node_network_transmit_packets_total{device!="lo"}[1m]) + ) >= 0.0001 or ( + rate(node_network_receive_errs_total{device!="lo"}[1m]) + + rate(node_network_transmit_errs_total{device!="lo"}[1m]) + ) >= 10 + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.8.3" + severity: "warning" + type: "ceph_default" + - alert: "CephNodeDiskspaceWarning" + annotations: + description: "Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will be full in less than 5 days based on the 48 hour trailing fill rate." + summary: "Host filesystem free space is getting low" + expr: "predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5) *on(instance) group_left(nodename) node_uname_info < 0" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.8.4" + severity: "warning" + type: "ceph_default" + - alert: "CephNodeInconsistentMTU" + annotations: + description: "Node {{ $labels.instance }} has a different MTU size ({{ $value }}) than the median of devices named {{ $labels.device }}." + summary: "MTU settings across Ceph hosts are inconsistent" + expr: "node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0) == scalar( max by (device) (node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0)) != quantile by (device) (.5, node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0)) )or node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0) == scalar( min by (device) (node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0)) != quantile by (device) (.5, node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0)) )" + labels: + severity: "warning" + type: "ceph_default" + - name: "pools" + rules: + - alert: "CephPoolGrowthWarning" + annotations: + description: "Pool '{{ $labels.name }}' will be full in less than 5 days assuming the average fill-up rate of the past 48 hours." + summary: "Pool growth rate may soon exceed capacity" + expr: "(predict_linear(ceph_pool_percent_used[2d], 3600 * 24 * 5) * on(pool_id) group_right ceph_pool_metadata) >= 95" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.9.2" + severity: "warning" + type: "ceph_default" + - alert: "CephPoolBackfillFull" + annotations: + description: "A pool is approaching the near full threshold, which will prevent recovery/backfill operations from completing. Consider adding more capacity." + summary: "Free space in a pool is too low for recovery/backfill" + expr: "ceph_health_detail{name=\"POOL_BACKFILLFULL\"} > 0" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephPoolFull" + annotations: + description: "A pool has reached its MAX quota, or OSDs supporting the pool have reached the FULL threshold. Until this is resolved, writes to the pool will be blocked. Pool Breakdown (top 5) {{- range query \"topk(5, sort_desc(ceph_pool_percent_used * on(pool_id) group_right ceph_pool_metadata))\" }} - {{ .Labels.name }} at {{ .Value }}% {{- end }} Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota max_bytes )" + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pool-full" + summary: "Pool is full - writes are blocked" + expr: "ceph_health_detail{name=\"POOL_FULL\"} > 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.9.1" + severity: "critical" + type: "ceph_default" + - alert: "CephPoolNearFull" + annotations: + description: "A pool has exceeded the warning (percent full) threshold, or OSDs supporting the pool have reached the NEARFULL threshold. Writes may continue, but you are at risk of the pool going read-only if more capacity isn't made available. Determine the affected pool with 'ceph df detail', looking at QUOTA BYTES and STORED. Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota max_bytes ). Also ensure that the balancer is active." + summary: "One or more Ceph pools are nearly full" + expr: "ceph_health_detail{name=\"POOL_NEAR_FULL\"} > 0" + for: "5m" + labels: + severity: "warning" + type: "ceph_default" + - name: "healthchecks" + rules: + - alert: "CephSlowOps" + annotations: + description: "{{ $value }} OSD requests are taking too long to process (osd_op_complaint_time exceeded)" + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops" + summary: "OSD operations are slow to complete" + expr: "ceph_healthcheck_slow_ops > 0" + for: "30s" + labels: + severity: "warning" + type: "ceph_default" + - name: "cephadm" + rules: + - alert: "CephadmUpgradeFailed" + annotations: + description: "The cephadm cluster upgrade process has failed. The cluster remains in an undetermined state. Please review the cephadm logs, to understand the nature of the issue" + summary: "Ceph version upgrade has failed" + expr: "ceph_health_detail{name=\"UPGRADE_EXCEPTION\"} > 0" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.11.2" + severity: "critical" + type: "ceph_default" + - alert: "CephadmDaemonFailed" + annotations: + description: "A daemon managed by cephadm is no longer active. Determine, which daemon is down with 'ceph health detail'. you may start daemons with the 'ceph orch daemon start '" + summary: "A ceph daemon manged by cephadm is down" + expr: "ceph_health_detail{name=\"CEPHADM_FAILED_DAEMON\"} > 0" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.11.1" + severity: "critical" + type: "ceph_default" + - alert: "CephadmPaused" + annotations: + description: "Cluster management has been paused manually. This will prevent the orchestrator from service management and reconciliation. If this is not intentional, resume cephadm operations with 'ceph orch resume'" + documentation: "https://docs.ceph.com/en/latest/cephadm/operations#cephadm-paused" + summary: "Orchestration tasks via cephadm are PAUSED" + expr: "ceph_health_detail{name=\"CEPHADM_PAUSED\"} > 0" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - name: "PrometheusServer" + rules: + - alert: "PrometheusJobMissing" + annotations: + description: "The prometheus job that scrapes from Ceph is no longer defined, this will effectively mean you'll have no metrics or alerts for the cluster. Please review the job definitions in the prometheus.yml file of the prometheus instance." + summary: "The scrape job for Ceph is missing from Prometheus" + expr: "absent(up{job=\"ceph\"})" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.12.1" + severity: "critical" + type: "ceph_default" + - name: "rados" + rules: + - alert: "CephObjectMissing" + annotations: + description: "The latest version of a RADOS object can not be found, even though all OSDs are up. I/O requests for this object from clients will block (hang). Resolving this issue may require the object to be rolled back to a prior version manually, and manually verified." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#object-unfound" + summary: "Object(s) marked UNFOUND" + expr: "(ceph_health_detail{name=\"OBJECT_UNFOUND\"} == 1) * on() (count(ceph_osd_up == 1) == bool count(ceph_osd_metadata)) == 1" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.10.1" + severity: "critical" + type: "ceph_default" + - name: "generic" + rules: + - alert: "CephDaemonCrash" + annotations: + description: "One or more daemons have crashed recently, and need to be acknowledged. This notification ensures that software crashes do not go unseen. To acknowledge a crash, use the 'ceph crash archive ' command." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks/#recent-crash" + summary: "One or more Ceph daemons have crashed, and are pending acknowledgement" + expr: "ceph_health_detail{name=\"RECENT_CRASH\"} == 1" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.1.2" + severity: "critical" + type: "ceph_default" diff --git a/ceph-mon/lib/charms/prometheus_k8s/v0/prometheus_scrape.py b/ceph-mon/lib/charms/prometheus_k8s/v0/prometheus_scrape.py index 85e922a9..6d8b9f8c 100644 --- a/ceph-mon/lib/charms/prometheus_k8s/v0/prometheus_scrape.py +++ b/ceph-mon/lib/charms/prometheus_k8s/v0/prometheus_scrape.py @@ -322,7 +322,6 @@ def _on_scrape_targets_changed(self, event): import socket import subprocess import tempfile -import uuid from pathlib import Path from typing import Dict, List, Optional, Tuple, Union @@ -339,7 +338,7 @@ def _on_scrape_targets_changed(self, event): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 21 +LIBPATCH = 22 logger = logging.getLogger(__name__) @@ -356,7 +355,10 @@ def _on_scrape_targets_changed(self, event): "sample_limit", "label_limit", "label_name_length_limit", - "label_value_lenght_limit", + "label_value_length_limit", + "scheme", + "basic_auth", + "tls_config", } DEFAULT_JOB = { "metrics_path": "/metrics", @@ -639,6 +641,12 @@ def _from_file(self, root_path: Path, file_path: Path) -> List[dict]: logger.error("Failed to read alert rules from %s: %s", file_path.name, e) return [] + if not rule_file: + logger.warning("Empty rules file: %s", file_path.name) + return [] + if not isinstance(rule_file, dict): + logger.error("Invalid rules file (must be a dict): %s", file_path.name) + return [] if _is_official_alert_rule_format(rule_file): alert_groups = rule_file["groups"] elif _is_single_alert_rule_format(rule_file): @@ -920,7 +928,7 @@ def alerts(self) -> dict: for topology_identifier, alert_rule_groups in self.metrics_consumer.alerts().items(): filename = "juju_" + topology_identifier + ".rules" path = os.path.join(PROMETHEUS_RULES_DIR, filename) - rules = yaml.dump(alert_rule_groups) + rules = yaml.safe_dump(alert_rule_groups) container.push(path, rules, make_dirs=True) ``` @@ -937,7 +945,6 @@ def alerts(self) -> dict: if not alert_rules: continue - identifier = None try: scrape_metadata = json.loads(relation.data[relation.app]["scrape_metadata"]) identifier = JujuTopology.from_dict(scrape_metadata).identifier @@ -1118,7 +1125,7 @@ def _labeled_static_job_config(self, job, job_name_prefix, hosts, scrape_metadat # label all static configs in the Prometheus job # labeling inserts Juju topology information and - # sets a relable config for instance labels + # sets a relabeling config for instance labels for static_config in static_configs: labels = static_config.get("labels", {}) if static_configs else {} all_targets = static_config.get("targets", []) @@ -1187,7 +1194,7 @@ def _set_juju_labels(self, labels, scrape_metadata) -> dict: Returns: a copy of the `labels` dictionary augmented with Juju - topology information with the exception of unit name. + topology information except for unit name. """ juju_labels = labels.copy() # deep copy not needed juju_labels.update(JujuTopology.from_dict(scrape_metadata).label_matcher_dict) @@ -1262,7 +1269,7 @@ def _labeled_unit_config( def _dedupe_job_names(jobs: List[dict]): """Deduplicate a list of dicts by appending a hash to the value of the 'job_name' key. - Additionally fully dedeuplicate any identical jobs. + Additionally, fully de-duplicate any identical jobs. Args: jobs: A list of prometheus scrape jobs @@ -1345,6 +1352,7 @@ def __init__( jobs=None, alert_rules_path: str = DEFAULT_ALERT_RULES_RELATIVE_PATH, refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, + external_hostname: str = None, ): """Construct a metrics provider for a Prometheus charm. @@ -1430,7 +1438,7 @@ def __init__( Args: charm: a `CharmBase` object that manages this - `MetricsEndpointProvider` object. Typically this is + `MetricsEndpointProvider` object. Typically, this is `self` in the instantiating class. relation_name: an optional string name of the relation between `charm` and the Prometheus charmed service. The default is "metrics-endpoint". @@ -1449,6 +1457,8 @@ def __init__( The alert rules are automatically updated on charm upgrade. refresh_event: an optional bound event or list of bound events which will be observed to re-set scrape job data (IP address and others) + external_hostname: an optional argument that represents an external hostname that + can be generated by an Ingress or a Proxy. Raises: RelationNotFoundError: If there is no relation in the charm's metadata.yaml @@ -1482,7 +1492,7 @@ def __init__( # sanitize job configurations to the supported subset of parameters jobs = [] if jobs is None else jobs self._jobs = [_sanitize_scrape_configuration(job) for job in jobs] - + self.external_hostname = external_hostname events = self._charm.on[self._relation_name] self.framework.observe(events.relation_joined, self._set_scrape_job_spec) self.framework.observe(events.relation_changed, self._on_relation_changed) @@ -1510,7 +1520,7 @@ def __init__( refresh_event = [refresh_event] for ev in refresh_event: - self.framework.observe(ev, self._set_unit_ip) + self.framework.observe(ev, self._set_scrape_job_spec) self.framework.observe(self._charm.on.upgrade_charm, self._set_scrape_job_spec) @@ -1539,7 +1549,7 @@ def _set_scrape_job_spec(self, event): When a metrics provider charm is related to a prometheus charm, the metrics provider sets specification and metadata related to its own scrape configuration. This information is set using Juju application - data. In addition each of the consumer units also sets its own + data. In addition, each of the consumer units also sets its own host address in Juju unit relation data. """ self._set_unit_ip(event) @@ -1568,16 +1578,21 @@ def _set_unit_ip(self, _): Each time a metrics provider charm container is restarted it updates its own host address in the unit relation data for the prometheus charm. - The only argument specified is an event and it ignored. this is for expediency + The only argument specified is an event, and it ignored. This is for expediency to be able to use this method as an event handler, although no access to the event is actually needed. """ for relation in self._charm.model.relations[self._relation_name]: unit_ip = str(self._charm.model.get_binding(relation).network.bind_address) - relation.data[self._charm.unit]["prometheus_scrape_unit_address"] = ( - unit_ip if self._is_valid_unit_address(unit_ip) else socket.getfqdn() - ) + if self.external_hostname: + unit_address = self.external_hostname + elif self._is_valid_unit_address(unit_ip): + unit_address = unit_ip + else: + unit_address = socket.getfqdn() + + relation.data[self._charm.unit]["prometheus_scrape_unit_address"] = unit_address relation.data[self._charm.unit]["prometheus_scrape_unit_name"] = str( self._charm.model.unit.name ) @@ -1634,7 +1649,7 @@ class PrometheusRulesProvider(Object): relation_name: Name of the relation in `metadata.yaml` that has the `prometheus_scrape` interface. dir_path: Root directory for the collection of rule files. - recursive: Whether or not to scan for rule files recursively. + recursive: Whether to scan for rule files recursively. """ def __init__( @@ -1696,7 +1711,7 @@ class MetricsEndpointAggregator(Object): `MetricsEndpointAggregator` collects scrape target information from one or more related charms and forwards this to a `MetricsEndpointConsumer` - charm, which may be in a different Juju model. However it is + charm, which may be in a different Juju model. However, it is essential that `MetricsEndpointAggregator` itself resides in the same model as its scrape targets, as this is currently the only way to ensure in Juju that the `MetricsEndpointAggregator` will be able to @@ -1765,7 +1780,7 @@ class MetricsEndpointAggregator(Object): information, just like `MetricsEndpointProvider` and `MetricsEndpointConsumer` do. - By default `MetricsEndpointAggregator` ensures that Prometheus + By default, `MetricsEndpointAggregator` ensures that Prometheus "instance" labels refer to Juju topology. This ensures that instance labels are stable over unit recreation. While it is not advisable to change this option, if required it can be done by @@ -1778,7 +1793,7 @@ def __init__(self, charm, relation_names, relabel_instance=True): Args: charm: a `CharmBase` object that manages this - `MetricsEndpointAggregator` object. Typically this is + `MetricsEndpointAggregator` object. Typically, this is `self` in the instantiating class. relation_names: a dictionary with three keys. The value of the "scrape_target" and "alert_rules" keys are @@ -1843,7 +1858,7 @@ def _set_target_job_data(self, targets: dict, app_name: str, **kwargs) -> None: When there is any change in relation data with any scrape target, the Prometheus scrape job, for that specific target is updated. Additionally, if this method is called manually, do the - sameself. + same. Args: targets: a `dict` containing target information @@ -1985,7 +2000,7 @@ def _get_targets(self, relation) -> dict: Scrape target information is returned for each unit in the relation. This information contains the unit name, network - hostname (or address) for that unit, and port on which an + hostname (or address) for that unit, and port on which a metrics endpoint is exposed in that unit. Args: @@ -2142,7 +2157,7 @@ def _relabel_configs(self) -> list: labels are stable across unit recreation. Returns: - a list of Prometheus relabling configurations. Each item in + a list of Prometheus relabeling configurations. Each item in this list is one relabel configuration. """ return ( @@ -2216,22 +2231,7 @@ def validate_alert_rules(self, rules: dict) -> Tuple[bool, str]: with tempfile.TemporaryDirectory() as tmpdir: rule_path = Path(tmpdir + "/validate_rule.yaml") - - # Smash "our" rules format into what upstream actually uses, which is more like: - # - # groups: - # - name: foo - # rules: - # - alert: SomeAlert - # expr: up - # - alert: OtherAlert - # expr: up - transformed_rules = {"groups": []} # type: ignore - for rule in rules["groups"]: - transformed = {"name": str(uuid.uuid4()), "rules": [rule]} - transformed_rules["groups"].append(transformed) - - rule_path.write_text(yaml.dump(transformed_rules)) + rule_path.write_text(yaml.dump(rules)) args = [str(self.path), "validate", str(rule_path)] # noinspection PyBroadException @@ -2240,7 +2240,13 @@ def validate_alert_rules(self, rules: dict) -> Tuple[bool, str]: return True, "" except subprocess.CalledProcessError as e: logger.debug("Validating the rules failed: %s", e.output) - return False, ", ".join([line for line in e.output if "error validating" in line]) + return False, ", ".join( + [ + line + for line in e.output.decode("utf8").splitlines() + if "error validating" in line + ] + ) def inject_label_matchers(self, expression, topology) -> str: """Add label matchers to an expression.""" @@ -2277,6 +2283,5 @@ def _get_tool_path(self) -> Optional[Path]: return None def _exec(self, cmd) -> str: - result = subprocess.run(cmd, check=True, stdout=subprocess.PIPE) - output = result.stdout.decode("utf-8").strip() - return output + result = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + return result.stdout.decode("utf-8").strip() diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index d10de73b..18b9e0ab 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -43,3 +43,8 @@ provides: requires: bootstrap-source: interface: ceph-bootstrap +resources: + alert-rules: + type: file + filename: alert.yaml.rules + description: "Alerting rules" diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 0beac650..9b01164d 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -1235,7 +1235,7 @@ def is_unsupported_cmr(unit_name): return unsupported -def assess_status(): +def assess_status(charm=None): '''Assess status of current unit''' application_version_set(get_upstream_version(VERSION_PACKAGE)) if not config('permit-insecure-cmr'): @@ -1291,6 +1291,9 @@ def assess_status(): status_set('blocked', str(e)) return + if charm is not None and charm.metrics_endpoint.assess_alert_rule_errors(): + return + # active - bootstrapped + quorum status check if ceph.is_bootstrapped() and ceph.is_quorum(): expected_osd_count = config('expected-osd-count') or 3 diff --git a/ceph-mon/src/ceph_metrics.py b/ceph-mon/src/ceph_metrics.py index 4320543e..910879bf 100644 --- a/ceph-mon/src/ceph_metrics.py +++ b/ceph-mon/src/ceph_metrics.py @@ -5,9 +5,16 @@ Configure prometheus scrape jobs via the metrics-endpoint relation. """ +import json import logging +import os.path +import pathlib from typing import Optional, Union, List +import ops.model +from ops.model import BlockedStatus + +import charm from charms.prometheus_k8s.v0 import prometheus_scrape from charms_ceph import utils as ceph_utils from ops.framework import BoundEvent @@ -19,15 +26,16 @@ "metrics_path": "/metrics", "static_configs": [{"targets": ["*:9283"]}], } +DEFAULT_ALERT_RULES_RELATIVE_PATH = "files/prometheus_alert_rules" class CephMetricsEndpointProvider(prometheus_scrape.MetricsEndpointProvider): def __init__( self, - charm, + charm: charm.CephMonCharm, relation_name: str = prometheus_scrape.DEFAULT_RELATION_NAME, jobs=None, - alert_rules_path: str = prometheus_scrape.DEFAULT_ALERT_RULES_RELATIVE_PATH, # noqa + alert_rules_path: str = DEFAULT_ALERT_RULES_RELATIVE_PATH, refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, ): if jobs is None: @@ -43,6 +51,11 @@ def __init__( self.framework.observe( events.relation_departed, self._on_relation_departed ) + self.framework.observe( + self.on.alert_rule_status_changed, + self._on_alert_rule_status_changed, + ) + charm._stored.set_default(alert_rule_errors=None) def _on_relation_changed(self, event): """Enable prometheus on relation change""" @@ -63,3 +76,64 @@ def _on_relation_departed(self, event): ) ceph_utils.mgr_disable_module("prometheus") logger.debug("module_disabled") + # We're not related to prom, don't care about alert rules + self._charm._stored.alert_rule_errors = None + + def assess_alert_rule_errors(self): + if self._charm._stored.alert_rule_errors: + self._charm.unit.status = BlockedStatus( + "invalid alert rules, check unit logs" + ) + return True + + def _on_alert_rule_status_changed(self, event): + logger.debug( + "alert rule status changed: %s, %s, %s", + event, + event.valid, + event.errors, + ) + if event.errors: + logger.warning("invalid alert rules: %s", event.errors) + self._charm._stored.alert_rule_errors = event.errors + else: + self._charm._stored.alert_rule_errors = None + + def get_alert_rules_resource(self): + try: + return self._charm.model.resources.fetch("alert-rules") + except ops.model.ModelError as e: + logger.warning("can't get alert-rules resource: %s", e) + + def _set_alert_rules(self, rules_dict): + logger.debug("set alert rules: %s", rules_dict) + # alert rules seem ok locally, clear any errors + # prometheus may still signal alert rule errors + # via the relation though + self._charm._stored.alert_rule_errors = None + + for relation in self._charm.model.relations[self._relation_name]: + relation.data[self._charm.app]["alert_rules"] = json.dumps( + rules_dict + ) + + def update_alert_rules(self): + if self._charm.unit.is_leader() and ceph_utils.is_bootstrapped(): + resource = self.get_alert_rules_resource() + if resource is None or not os.path.getsize(resource): + logger.debug("empty rules resource, clearing alert rules") + self._set_alert_rules({}) + return + sink = pathlib.Path(self._alert_rules_path) / "alert.yaml.rules" + if sink.exists(): + sink.unlink() + sink.symlink_to(resource) + alert_rules = prometheus_scrape.AlertRules(topology=self.topology) + alert_rules.add_path(str(sink), recursive=True) + alert_rules_as_dict = alert_rules.as_dict() + if not alert_rules_as_dict: + msg = "invalid alert rules: {}".format(sink.open().read()) + logger.warning(msg) + self._charm._stored.alert_rule_errors = msg + return + self._set_alert_rules(alert_rules_as_dict) diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index 4b10e003..f3779a99 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -40,58 +40,77 @@ def on_install(self, event): systemd.service_pause('ceph-create-keys') except systemd.SystemdError: pass + hooks.assess_status(self) def on_config(self, event): hooks.config_changed() + hooks.assess_status(self) def on_pre_series_upgrade(self, event): hooks.pre_series_upgrade() + hooks.assess_status(self) def on_upgrade(self, event): + self.metrics_endpoint.update_alert_rules() hooks.upgrade_charm() + hooks.assess_status(self) def on_post_series_upgrade(self, event): hooks.post_series_upgrade() + hooks.assess_status(self) # Relations. def on_mon_relation_joined(self, event): hooks.mon_relation_joined() + hooks.assess_status(self) def on_bootstrap_source_relation_changed(self, event): hooks.bootstrap_source_relation_changed() + hooks.assess_status(self) def on_prometheus_relation_joined_or_changed(self, event): hooks.prometheus_relation() + hooks.assess_status(self) def on_prometheus_relation_departed(self, event): hooks.prometheus_left() + hooks.assess_status(self) def on_mon_relation(self, event): hooks.mon_relation() + hooks.assess_status(self) def on_osd_relation(self, event): hooks.osd_relation() + hooks.assess_status(self) def on_dashboard_relation_joined(self, event): hooks.dashboard_relation() + hooks.assess_status(self) def on_radosgw_relation(self, event): hooks.radosgw_relation() + hooks.assess_status(self) def on_rbd_mirror_relation(self, event): hooks.rbd_mirror_relation() + hooks.assess_status(self) def on_mds_relation(self, event): hooks.mds_relation_joined() + hooks.assess_status(self) def on_admin_relation(self, event): hooks.admin_relation_joined() + hooks.assess_status(self) def on_client_relation(self, event): hooks.client_relation() + hooks.assess_status(self) def on_nrpe_relation(self, event): hooks.update_nrpe_config() + hooks.assess_status(self) # Actions. @@ -195,4 +214,3 @@ def __init__(self, *args): if __name__ == '__main__': main(CephMonCharm) - hooks.assess_status() diff --git a/ceph-mon/unit_tests/test_ceph_metrics.py b/ceph-mon/unit_tests/test_ceph_metrics.py index 4bd16af3..16e89735 100644 --- a/ceph-mon/unit_tests/test_ceph_metrics.py +++ b/ceph-mon/unit_tests/test_ceph_metrics.py @@ -1,4 +1,8 @@ #!/usr/bin/env python3 +import json +import pathlib +import tempfile +import textwrap # Copyright 2022 Canonical Ltd. # See LICENSE file for licensing details. @@ -9,10 +13,30 @@ from ops import storage, model, framework from ops.testing import Harness, _TestingModelBackend +import ceph_metrics # noqa: avoid circ. import import charm class TestCephMetrics(unittest.TestCase): + @classmethod + def setUpClass(cls): + """Run once before tests begin.""" + cls.tempdir = tempfile.TemporaryDirectory() + cls.tmp = pathlib.Path(cls.tempdir.name) + cls.rules_dir = cls.tmp / "rules" + cls.rules_dir.mkdir() + cls.rules = textwrap.dedent( + """ + groups: + - name: "testgroup" + rules: [] + """ + ) + + @classmethod + def tearDownClass(cls): + cls.tempdir.cleanup() + def setUp(self): super().setUp() self.harness = Harness(charm.CephMonCharm) @@ -42,9 +66,12 @@ def network_get(self, endpoint_name, relation_id=None): self.harness._model, ) # END Workaround + self.addCleanup(self.harness.cleanup) + self.harness.begin() self.harness.set_leader(True) + self.harness.charm.metrics_endpoint._alert_rules_path = self.rules_dir def test_init(self): self.assertEqual( @@ -94,3 +121,40 @@ def test_add_remove_rel( self.harness.remove_relation(rel_id) mgr_disable_module.assert_called_once() + + def get_alert_rules(self, rel_id): + app_rel_data = self.harness.get_relation_data( + rel_id, self.harness.model.app + ) + return json.loads(app_rel_data["alert_rules"]) + + @patch("ceph_metrics.ceph_utils.is_bootstrapped", return_value=True) + @patch("ceph_metrics.CephMetricsEndpointProvider._set_alert_rules") + def test_update_alert_rules_empty( + self, set_alert_rules, _is_bootstrapped, + ): + """Test: no alert rules created with empty alert rules file.""" + rel_id = self.harness.add_relation("metrics-endpoint", "prometheus") + self.harness.add_relation_unit(rel_id, "prometheus/0") + self.harness.add_resource("alert-rules", "") + self.harness.charm.metrics_endpoint.update_alert_rules() + set_alert_rules.assert_called_with({}) + + @patch("ceph_metrics.ceph_utils.is_bootstrapped", return_value=True) + def test_update_alert_rules_invalid(self, _is_bootstrapped): + rel_id = self.harness.add_relation("metrics-endpoint", "prometheus") + self.harness.add_relation_unit(rel_id, "prometheus/0") + self.harness.add_resource("alert-rules", "not-a-rule") + self.harness.charm.metrics_endpoint.update_alert_rules() + self.assertTrue( + self.harness.charm.metrics_endpoint.assess_alert_rule_errors() + ) + + @patch("ceph_metrics.ceph_utils.is_bootstrapped", return_value=True) + def test_update_alert_rules(self, _is_bootstrapped): + rel_id = self.harness.add_relation("metrics-endpoint", "prometheus") + self.harness.add_relation_unit(rel_id, "prometheus/0") + self.harness.add_resource("alert-rules", self.rules) + self.harness.charm.metrics_endpoint.update_alert_rules() + alert_rules = self.get_alert_rules(rel_id) + self.assertTrue(alert_rules.get("groups")) From 47a6df6ea8b9bc816d1fd3283c6500b72b0c5f2a Mon Sep 17 00:00:00 2001 From: utkarshbhatthere Date: Thu, 22 Sep 2022 11:39:37 +0000 Subject: [PATCH 2431/2699] Adds primary/secondary multisite relation A new relation with primary/secondary nomenclature is added and the old master/slave relation is marked as *Deprecated*. In future, master/slave relation would be completely removed. Change-Id: I9cda48b74a20aaa9a41baedc79332bfaf13951d3 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/926 --- ceph-radosgw/hooks/hooks.py | 57 +++++++++++++--- ...ation-broken => primary-relation-departed} | 0 ...lation-changed => primary-relation-joined} | 0 ...tion-broken => secondary-relation-changed} | 0 ...ion-joined => secondary-relation-departed} | 0 ceph-radosgw/hooks/utils.py | 20 +++--- ceph-radosgw/metadata.yaml | 4 ++ ceph-radosgw/unit_tests/test_hooks.py | 68 +++++++++---------- 8 files changed, 96 insertions(+), 53 deletions(-) rename ceph-radosgw/hooks/{master-relation-broken => primary-relation-departed} (100%) rename ceph-radosgw/hooks/{master-relation-changed => primary-relation-joined} (100%) rename ceph-radosgw/hooks/{slave-relation-broken => secondary-relation-changed} (100%) rename ceph-radosgw/hooks/{slave-relation-joined => secondary-relation-departed} (100%) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 7c5e9cd0..f7abed61 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -340,7 +340,7 @@ def _mon_relation(): # NOTE(jamespage): # Multi-site deployments need to defer restart as the - # zone is not created until the master relation is + # zone is not created until the primary relation is # joined; restarting here will cause a restart burst # in systemd and stop the process restarting once # zone configuration is complete. @@ -690,8 +690,8 @@ def radosgw_user_changed(relation_id=None): 'daemon-id': socket.gethostname()}) -@hooks.hook('master-relation-joined') -def master_relation_joined(relation_id=None): +@hooks.hook('primary-relation-joined') +def primary_relation_joined(relation_id=None): if not ready_for_service(legacy=False): log('unit not ready, deferring multisite configuration') return @@ -742,9 +742,9 @@ def master_relation_joined(relation_id=None): multisite.create_realm(realm, default=True) mutation = True - # Migration if master site has buckets configured. + # Migration if primary site has buckets configured. # Migration involves renaming existing zone/zongroups such that existing - # buckets and their objects can be preserved on the master site. + # buckets and their objects can be preserved on the primary site. if multisite.check_cluster_has_buckets() is True: log('Migrating to multisite with zone ({}) and zonegroup ({})' .format(zone, zonegroup), level=DEBUG) @@ -821,8 +821,8 @@ def master_relation_joined(relation_id=None): secret=secret) -@hooks.hook('master-relation-departed') -@hooks.hook('slave-relation-departed') +@hooks.hook('primary-relation-departed') +@hooks.hook('secondary-relation-departed') def multisite_relation_departed(): if not is_leader(): log('Cannot remove multisite relation, this unit is not the leader') @@ -868,8 +868,8 @@ def multisite_relation_departed(): raise RuntimeError("Residual multisite config at local site.") -@hooks.hook('slave-relation-changed') -def slave_relation_changed(relation_id=None, unit=None): +@hooks.hook('secondary-relation-changed') +def secondary_relation_changed(relation_id=None, unit=None): if not is_leader(): log('Cannot setup multisite configuration, this unit is not the ' 'leader') @@ -884,7 +884,7 @@ def slave_relation_changed(relation_id=None, unit=None): master_data.get('access_key'), master_data.get('secret'), master_data.get('url'))): - log("Defer processing until master RGW has provided required data") + log("Defer processing until primary RGW has provided required data") return public_url = '{}:{}'.format( @@ -933,6 +933,9 @@ def slave_relation_changed(relation_id=None, unit=None): if zone not in multisite.list_zones(): log('zone {} not found, creating now'.format(zone)) + multisite.pull_period(url=master_data['url'], + access_key=master_data['access_key'], + secret=master_data['secret']) multisite.create_zone(zone, endpoints=endpoints, default=False, master=False, @@ -953,6 +956,27 @@ def slave_relation_changed(relation_id=None, unit=None): log('No mutation detected.', 'INFO') +@hooks.hook('master-relation-departed') +@hooks.hook('slave-relation-departed') +def master_slave_relation_departed(): + log("departed relation is deprecated", "WARN") + multisite_relation_departed() + + +@hooks.hook('master-relation-joined') +def master_relation_joined(relation_id=None): + log("This relation is deprecated, use primary-secondary relation instead", + "WARN") + primary_relation_joined(relation_id) + + +@hooks.hook('slave-relation-changed') +def slave_relation_changed(relation_id=None, unit=None): + log("This relation is deprecated, use primary-secondary relation instead", + "WARN") + secondary_relation_changed(relation_id, unit) + + @hooks.hook('leader-settings-changed') def leader_settings_changed(): # NOTE: leader unit will only ever set leader storage @@ -962,19 +986,30 @@ def leader_settings_changed(): if restart_nonce_changed(leader_get('restart_nonce')): service_restart(service_name()) if not is_leader(): + # Deprecated Master/Slave relation for r_id in relation_ids('master'): master_relation_joined(r_id) + # Primary/Secondary relation + for r_id in relation_ids('primary'): + primary_relation_joined(r_id) for r_id in relation_ids('radosgw-user'): radosgw_user_changed(r_id) def process_multisite_relations(): - """Re-trigger any pending master/slave relations""" + """Re-trigger any pending multisite relations""" + # Deprecated Master/Slave relation for r_id in relation_ids('master'): master_relation_joined(r_id) for r_id in relation_ids('slave'): for unit in related_units(r_id): slave_relation_changed(r_id, unit) + # Primary/Secondary relation + for r_id in relation_ids('primary'): + primary_relation_joined(r_id) + for r_id in relation_ids('secondary'): + for unit in related_units(r_id): + secondary_relation_changed(r_id, unit) if __name__ == '__main__': diff --git a/ceph-radosgw/hooks/master-relation-broken b/ceph-radosgw/hooks/primary-relation-departed similarity index 100% rename from ceph-radosgw/hooks/master-relation-broken rename to ceph-radosgw/hooks/primary-relation-departed diff --git a/ceph-radosgw/hooks/master-relation-changed b/ceph-radosgw/hooks/primary-relation-joined similarity index 100% rename from ceph-radosgw/hooks/master-relation-changed rename to ceph-radosgw/hooks/primary-relation-joined diff --git a/ceph-radosgw/hooks/slave-relation-broken b/ceph-radosgw/hooks/secondary-relation-changed similarity index 100% rename from ceph-radosgw/hooks/slave-relation-broken rename to ceph-radosgw/hooks/secondary-relation-changed diff --git a/ceph-radosgw/hooks/slave-relation-joined b/ceph-radosgw/hooks/secondary-relation-departed similarity index 100% rename from ceph-radosgw/hooks/slave-relation-joined rename to ceph-radosgw/hooks/secondary-relation-departed diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 80da68fc..97d71d9f 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -218,10 +218,14 @@ def check_optional_config_and_relations(configs): leader_get('secret'), leader_get('restart_nonce')) + # An operator may have deployed both relations + primary_rids = relation_ids('master') + relation_ids('primary') + secondary_rids = relation_ids('slave') + relation_ids('secondary') + multisite_rids = primary_rids + secondary_rids + # Any realm or zonegroup config is present, multisite checks can be done. # zone config can't be used because it's used by default. - if config('realm') or config('zonegroup') or relation_ids('master') \ - or relation_ids('slave'): + if config('realm') or config('zonegroup') or multisite_rids: # All of Realm, zonegroup, and zone must be configured. if not all(multisite_config): return ('blocked', @@ -229,14 +233,14 @@ def check_optional_config_and_relations(configs): '(realm={realm}, zonegroup={zonegroup}' ', zone={zone})'.format(**config())) - # Master/Slave Relation should be configured. - if not (relation_ids('master') or relation_ids('slave')): + # Primary/Secondary Relation should be configured. + if not multisite_rids: return ('blocked', - 'multi-site configuration but master/slave ' + 'multi-site configuration but primary/secondary ' 'relation missing') # Primary site status check - if relation_ids('master'): + if primary_rids: # Migration: The system is not multisite already. if not multisite.is_multisite_configured(config('zone'), config('zonegroup')): @@ -261,7 +265,7 @@ def check_optional_config_and_relations(configs): 'waiting for configuration of master zone') # Secondary site status check - if relation_ids('slave'): + if secondary_rids: # Migration: The system is not multisite already. if not multisite.is_multisite_configured(config('zone'), config('zonegroup')): @@ -270,7 +274,7 @@ def check_optional_config_and_relations(configs): "Non-Pristine RGW site can't be used as secondary") multisite_ready = False - for rid in relation_ids('slave'): + for rid in secondary_rids: for unit in related_units(rid): if relation_get('url', unit=unit, rid=rid): multisite_ready = True diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index c89cd043..a0f590d3 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -30,6 +30,8 @@ requires: interface: tls-certificates slave: interface: radosgw-multisite + secondary: + interface: radosgw-multisite provides: nrpe-external-master: interface: nrpe-external-master @@ -38,6 +40,8 @@ provides: interface: http master: interface: radosgw-multisite + primary: + interface: radosgw-multisite object-store: interface: swift-proxy radosgw-user: diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index cec082cf..e0fed1aa 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -609,21 +609,21 @@ class MiscMultisiteTests(CharmTestCase): 'related_units', 'leader_get', 'is_leader', - 'master_relation_joined', - 'slave_relation_changed', + 'primary_relation_joined', + 'secondary_relation_changed', 'service_restart', 'service_name', 'multisite' ] _relation_ids = { - 'master': ['master:1'], - 'slave': ['slave:1'], + 'primary': ['primary:1'], + 'secondary': ['secondary:1'], } _related_units = { - 'master:1': ['rgw/0', 'rgw/1'], - 'slave:1': ['rgw-s/0', 'rgw-s/1'], + 'primary:1': ['rgw/0', 'rgw/1'], + 'secondary:1': ['rgw-s/0', 'rgw-s/1'], } def setUp(self): @@ -642,14 +642,14 @@ def test_leader_settings_changed(self): self.is_leader.return_value = False ceph_hooks.leader_settings_changed() self.service_restart.assert_called_once_with('rgw@hostname') - self.master_relation_joined.assert_called_once_with('master:1') + self.primary_relation_joined.assert_called_once_with('primary:1') def test_process_multisite_relations(self): ceph_hooks.process_multisite_relations() - self.master_relation_joined.assert_called_once_with('master:1') - self.slave_relation_changed.assert_has_calls([ - call('slave:1', 'rgw-s/0'), - call('slave:1', 'rgw-s/1'), + self.primary_relation_joined.assert_called_once_with('primary:1') + self.secondary_relation_changed.assert_has_calls([ + call('secondary:1', 'rgw-s/0'), + call('secondary:1', 'rgw-s/1'), ]) @@ -684,7 +684,7 @@ def setUp(self): self.systemd_based_radosgw.return_value = True -class MasterMultisiteTests(CephRadosMultisiteTests): +class PrimaryMultisiteTests(CephRadosMultisiteTests): _complete_config = { 'realm': 'testrealm', @@ -703,8 +703,8 @@ class MasterMultisiteTests(CephRadosMultisiteTests): 'restart_nonce': 'foobar', } - def test_master_relation_joined_missing_config(self): - ceph_hooks.master_relation_joined('master:1') + def test_primary_relation_joined_missing_config(self): + ceph_hooks.primary_relation_joined('primary:1') self.config.assert_has_calls([ call('realm'), call('zonegroup'), @@ -712,7 +712,7 @@ def test_master_relation_joined_missing_config(self): ]) self.relation_set.assert_not_called() - def test_master_relation_joined_create_everything(self): + def test_primary_relation_joined_create_everything(self): for k, v in self._complete_config.items(): self.test_config.set(k, v) self.listen_port.return_value = 80 @@ -725,7 +725,7 @@ def test_master_relation_joined_create_everything(self): self.multisite.create_system_user.return_value = ( 'mykey', 'mysecret', ) - ceph_hooks.master_relation_joined('master:1') + ceph_hooks.primary_relation_joined('primary:1') self.config.assert_has_calls([ call('realm'), call('zonegroup'), @@ -768,12 +768,12 @@ def test_master_relation_joined_create_everything(self): call(restart_nonce=ANY), ]) self.relation_set.assert_called_with( - relation_id='master:1', + relation_id='primary:1', access_key='mykey', secret='mysecret', ) - def test_master_relation_joined_create_nothing(self): + def test_primary_relation_joined_create_nothing(self): for k, v in self._complete_config.items(): self.test_config.set(k, v) self.is_leader.return_value = True @@ -786,7 +786,7 @@ def test_master_relation_joined_create_nothing(self): self.multisite.list_users.return_value = [ ceph_hooks.MULTISITE_SYSTEM_USER ] - ceph_hooks.master_relation_joined('master:1') + ceph_hooks.primary_relation_joined('primary:1') self.multisite.create_realm.assert_not_called() self.multisite.create_zonegroup.assert_not_called() self.multisite.create_zone.assert_not_called() @@ -795,15 +795,15 @@ def test_master_relation_joined_create_nothing(self): self.service_restart.assert_not_called() self.leader_set.assert_not_called() - def test_master_relation_joined_not_leader(self): + def test_primary_relation_joined_not_leader(self): for k, v in self._complete_config.items(): self.test_config.set(k, v) self.listen_port.return_value = 80 self.is_leader.return_value = False self.leader_get.side_effect = lambda attr: self._leader_data.get(attr) - ceph_hooks.master_relation_joined('master:1') + ceph_hooks.primary_relation_joined('primary:1') self.relation_set.assert_called_once_with( - relation_id='master:1', + relation_id='primary:1', realm='testrealm', zonegroup='testzonegroup', url='http://rgw:80', @@ -833,7 +833,7 @@ def test_multisite_relation_departed(self, json_loads): ) -class SlaveMultisiteTests(CephRadosMultisiteTests): +class SecondaryMultisiteTests(CephRadosMultisiteTests): _complete_config = { 'realm': 'testrealm', @@ -846,7 +846,7 @@ class SlaveMultisiteTests(CephRadosMultisiteTests): 'zonegroup': 'testzonegroup', 'access_key': 'anotherkey', 'secret': 'anothersecret', - 'url': 'http://master:80' + 'url': 'http://primary:80' } _test_bad_relation = { @@ -854,10 +854,10 @@ class SlaveMultisiteTests(CephRadosMultisiteTests): 'zonegroup': 'anotherzg', 'access_key': 'anotherkey', 'secret': 'anothersecret', - 'url': 'http://master:80' + 'url': 'http://primary:80' } - def test_slave_relation_changed(self): + def test_secondary_relation_changed(self): for k, v in self._complete_config.items(): self.test_config.set(k, v) self.is_leader.return_value = True @@ -867,7 +867,7 @@ def test_slave_relation_changed(self): self.multisite.list_realms.return_value = [] self.multisite.list_zones.return_value = [] self.multisite.check_cluster_has_buckets.return_value = False - ceph_hooks.slave_relation_changed('slave:1', 'rgw/0') + ceph_hooks.secondary_relation_changed('secondary:1', 'rgw/0') self.config.assert_has_calls([ call('realm'), call('zonegroup'), @@ -878,7 +878,7 @@ def test_slave_relation_changed(self): access_key=self._test_relation['access_key'], secret=self._test_relation['secret'], ) - self.multisite.pull_period.assert_called_once_with( + self.multisite.pull_period.assert_called_with( url=self._test_relation['url'], access_key=self._test_relation['access_key'], secret=self._test_relation['secret'], @@ -902,20 +902,20 @@ def test_slave_relation_changed(self): self.service_restart.assert_called_once() self.leader_set.assert_called_once_with(restart_nonce=ANY) - def test_slave_relation_changed_incomplete_relation(self): + def test_secondary_relation_changed_incomplete_relation(self): for k, v in self._complete_config.items(): self.test_config.set(k, v) self.is_leader.return_value = True self.relation_get.return_value = {} - ceph_hooks.slave_relation_changed('slave:1', 'rgw/0') + ceph_hooks.secondary_relation_changed('secondary:1', 'rgw/0') self.config.assert_not_called() - def test_slave_relation_changed_mismatching_config(self): + def test_secondary_relation_changed_mismatching_config(self): for k, v in self._complete_config.items(): self.test_config.set(k, v) self.is_leader.return_value = True self.relation_get.return_value = self._test_bad_relation - ceph_hooks.slave_relation_changed('slave:1', 'rgw/0') + ceph_hooks.secondary_relation_changed('secondary:1', 'rgw/0') self.config.assert_has_calls([ call('realm'), call('zonegroup'), @@ -923,9 +923,9 @@ def test_slave_relation_changed_mismatching_config(self): ]) self.multisite.list_realms.assert_not_called() - def test_slave_relation_changed_not_leader(self): + def test_secondary_relation_changed_not_leader(self): self.is_leader.return_value = False - ceph_hooks.slave_relation_changed('slave:1', 'rgw/0') + ceph_hooks.secondary_relation_changed('secondary:1', 'rgw/0') self.relation_get.assert_not_called() @patch.object(ceph_hooks, 'apt_install') From 8bb747a914541d180d4f2ddd0f043ed10413840f Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Mon, 26 Sep 2022 16:13:50 +0000 Subject: [PATCH 2432/2699] Sync charm-helpers for zed support Change-Id: I266226c771282194f809120be0271012705cb4bd --- .../charmhelpers/contrib/charmsupport/nrpe.py | 16 ++ .../charmhelpers/contrib/hahelpers/cluster.py | 2 +- .../hooks/charmhelpers/contrib/network/ip.py | 4 +- .../charmhelpers/contrib/openstack/context.py | 158 +++++++++++++----- .../contrib/openstack/ha/utils.py | 29 ++++ .../charmhelpers/contrib/openstack/ip.py | 25 +++ .../contrib/openstack/ssh_migrations.py | 4 +- .../charmhelpers/contrib/openstack/utils.py | 12 +- .../contrib/storage/linux/utils.py | 21 ++- ceph-osd/hooks/charmhelpers/core/host.py | 8 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + .../hooks/charmhelpers/core/services/base.py | 3 +- .../hooks/charmhelpers/fetch/archiveurl.py | 31 +++- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 28 +++- ceph-osd/test-requirements.txt | 1 + 15 files changed, 278 insertions(+), 65 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py index bad7a533..ac002bc6 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-osd/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -19,6 +19,7 @@ import glob import grp +import json import os import pwd import re @@ -30,6 +31,7 @@ from charmhelpers.core.hookenv import ( application_name, config, + ERROR, hook_name, local_unit, log, @@ -416,6 +418,20 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): :param str unit_name: Unit name to use in check description :param bool immediate_check: For sysv init, run the service check immediately """ + # check_haproxy is redundant in the presence of check_crm. See LP Bug#1880601 for details. + # just remove check_haproxy if haproxy is added as a lsb resource in hacluster. + for rid in relation_ids("ha"): + ha_resources = relation_get("json_resources", rid=rid, unit=local_unit()) + if ha_resources: + try: + ha_resources_parsed = json.loads(ha_resources) + except ValueError as e: + log('Could not parse JSON from ha resources. {}'.format(e), level=ERROR) + raise + if "lsb:haproxy" in ha_resources_parsed.values(): + if "haproxy" in services: + log("removed check_haproxy. This service will be monitored by check_crm") + services.remove("haproxy") for svc in services: # Don't add a check for these services from neutron-gateway if svc in ['ext-port', 'os-charm-phy-nic-mtu']: diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py index 146beba6..ffda5fe1 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -324,7 +324,7 @@ def valid_hacluster_config(): ''' vip = config_get('vip') dns = config_get('dns-ha') - if not(bool(vip) ^ bool(dns)): + if not (bool(vip) ^ bool(dns)): msg = ('HA: Either vip or dns-ha must be set but not both in order to ' 'use high availability') status_set('blocked', msg) diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index de56584d..cf9926b9 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -467,7 +467,7 @@ def ns_query(address): try: answers = dns.resolver.query(address, rtype) - except dns.resolver.NXDOMAIN: + except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers): return None if answers: @@ -539,7 +539,7 @@ def port_has_listener(address, port): """ cmd = ['nc', '-z', address, str(port)] result = subprocess.call(cmd) - return not(bool(result)) + return not (bool(result)) def assert_charm_supports_ipv6(): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 32c69ff7..5e33d188 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -25,6 +25,7 @@ import time from base64 import b64decode +from distutils.version import LooseVersion from subprocess import ( check_call, check_output, @@ -39,6 +40,7 @@ from charmhelpers.fetch import ( apt_install, filter_installed_packages, + get_installed_version, ) from charmhelpers.core.hookenv import ( NoNetworkBinding, @@ -59,6 +61,7 @@ network_get_primary_address, WARNING, service_name, + remote_service_name, ) from charmhelpers.core.sysctl import create as sysctl_create @@ -130,6 +133,7 @@ ADDRESS_TYPES = ['admin', 'internal', 'public'] HAPROXY_RUN_DIR = '/var/run/haproxy/' DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2" +DEFAULT_HAPROXY_EXPORTER_STATS_PORT = 8404 def ensure_packages(packages): @@ -345,6 +349,14 @@ def db_ssl(rdata, ctxt, ssl_dir): class IdentityServiceContext(OSContextGenerator): + _forward_compat_remaps = { + 'admin_user': 'admin-user-name', + 'service_username': 'service-user-name', + 'service_tenant': 'service-project-name', + 'service_tenant_id': 'service-project-id', + 'service_domain': 'service-domain-name', + } + def __init__(self, service=None, service_user=None, @@ -397,11 +409,16 @@ def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): # 'www_authenticate_uri' replaced 'auth_uri' since Stein, # see keystonemiddleware upstream sources for more info if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein': - c.update(( - ('www_authenticate_uri', "{}://{}:{}/v3".format( - ctxt.get('service_protocol', ''), - ctxt.get('service_host', ''), - ctxt.get('service_port', ''))),)) + if 'public_auth_url' in ctxt: + c.update(( + ('www_authenticate_uri', '{}/v3'.format( + ctxt.get('public_auth_url'))),)) + else: + c.update(( + ('www_authenticate_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) else: c.update(( ('auth_uri', "{}://{}:{}/v3".format( @@ -409,11 +426,17 @@ def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): ctxt.get('service_host', ''), ctxt.get('service_port', ''))),)) + if 'internal_auth_url' in ctxt: + c.update(( + ('auth_url', ctxt.get('internal_auth_url')),)) + else: + c.update(( + ('auth_url', "{}://{}:{}/v3".format( + ctxt.get('auth_protocol', ''), + ctxt.get('auth_host', ''), + ctxt.get('auth_port', ''))),)) + c.update(( - ('auth_url', "{}://{}:{}/v3".format( - ctxt.get('auth_protocol', ''), - ctxt.get('auth_host', ''), - ctxt.get('auth_port', ''))), ('project_domain_name', ctxt.get('admin_domain_name', '')), ('user_domain_name', ctxt.get('admin_domain_name', '')), ('project_name', ctxt.get('admin_tenant_name', '')), @@ -441,7 +464,27 @@ def __call__(self): for rid in relation_ids(self.rel_name): self.related = True for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) + rdata = {} + # NOTE(jamespage): + # forwards compat with application data + # bag driven approach to relation. + _adata = relation_get(rid=rid, app=remote_service_name(rid)) + if _adata: + # New app data bag uses - instead of _ + # in key names - remap for compat with + # existing relation data keys + for key, value in _adata.items(): + if key == 'api-version': + rdata[key.replace('-', '_')] = value.strip('v') + else: + rdata[key.replace('-', '_')] = value + # Re-map some keys for backwards compatibility + for target, source in self._forward_compat_remaps.items(): + rdata[target] = _adata.get(source) + else: + # No app data bag presented - fallback + # to legacy unit based relation data + rdata = relation_get(rid=rid, unit=unit) serv_host = rdata.get('service_host') serv_host = format_ipv6_addr(serv_host) or serv_host auth_host = rdata.get('auth_host') @@ -475,6 +518,19 @@ def __call__(self): 'service_project_id': rdata.get('service_tenant_id'), 'service_domain_id': rdata.get('service_domain_id')}) + # NOTE: + # keystone-k8s operator presents full URLS + # for all three endpoints - public and internal are + # externally addressable for machine based charm + if 'public_auth_url' in rdata: + ctxt.update({ + 'public_auth_url': rdata.get('public_auth_url'), + }) + if 'internal_auth_url' in rdata: + ctxt.update({ + 'internal_auth_url': rdata.get('internal_auth_url'), + }) + # we keep all veriables in ctxt for compatibility and # add nested dictionary for keystone_authtoken generic # templating @@ -860,9 +916,14 @@ class HAProxyContext(OSContextGenerator): interfaces = ['cluster'] def __init__(self, singlenode_mode=False, - address_types=ADDRESS_TYPES): + address_types=None, + exporter_stats_port=DEFAULT_HAPROXY_EXPORTER_STATS_PORT): + if address_types is None: + address_types = ADDRESS_TYPES[:] + self.address_types = address_types self.singlenode_mode = singlenode_mode + self.exporter_stats_port = exporter_stats_port def __call__(self): if not os.path.isdir(HAPROXY_RUN_DIR): @@ -957,10 +1018,20 @@ def __call__(self): db = kv() ctxt['stat_password'] = db.get('stat-password') if not ctxt['stat_password']: - ctxt['stat_password'] = db.set('stat-password', - pwgen(32)) + ctxt['stat_password'] = db.set('stat-password', pwgen(32)) db.flush() + # NOTE(rgildein): configure prometheus exporter for haproxy > 2.0.0 + # New bind will be created and a prometheus-exporter + # will be used for path /metrics. At the same time, + # prometheus-exporter avoids using auth. + haproxy_version = get_installed_version("haproxy") + if (haproxy_version and + haproxy_version.ver_str >= LooseVersion("2.0.0") and + is_relation_made("haproxy-exporter")): + ctxt["stats_exporter_host"] = get_relation_ip("haproxy-exporter") + ctxt["stats_exporter_port"] = self.exporter_stats_port + for frontend in cluster_hosts: if (len(cluster_hosts[frontend]['backends']) > 1 or self.singlenode_mode): @@ -2560,14 +2631,18 @@ def _parse_cpu_list(cpulist): :rtype: List[int] """ cores = [] - ranges = cpulist.split(',') - for cpu_range in ranges: - if "-" in cpu_range: - cpu_min_max = cpu_range.split('-') - cores += range(int(cpu_min_max[0]), - int(cpu_min_max[1]) + 1) - else: - cores.append(int(cpu_range)) + if cpulist and re.match(r"^[0-9,\-^]*$", cpulist): + ranges = cpulist.split(',') + for cpu_range in ranges: + if "-" in cpu_range: + cpu_min_max = cpu_range.split('-') + cores += range(int(cpu_min_max[0]), + int(cpu_min_max[1]) + 1) + elif "^" in cpu_range: + cpu_rm = cpu_range.split('^') + cores.remove(int(cpu_rm[1])) + else: + cores.append(int(cpu_range)) return cores def _numa_node_cores(self): @@ -2586,36 +2661,32 @@ def _numa_node_cores(self): def cpu_mask(self): """Get hex formatted CPU mask - The mask is based on using the first config:dpdk-socket-cores cores of each NUMA node in the unit. :returns: hex formatted CPU mask :rtype: str """ - return self.cpu_masks()['dpdk_lcore_mask'] - - def cpu_masks(self): - """Get hex formatted CPU masks + num_cores = config('dpdk-socket-cores') + mask = 0 + for cores in self._numa_node_cores().values(): + for core in cores[:num_cores]: + mask = mask | 1 << core + return format(mask, '#04x') - The mask is based on using the first config:dpdk-socket-cores - cores of each NUMA node in the unit, followed by the - next config:pmd-socket-cores + @classmethod + def pmd_cpu_mask(cls): + """Get hex formatted pmd CPU mask - :returns: Dict of hex formatted CPU masks - :rtype: Dict[str, str] + The mask is based on config:pmd-cpu-set. + :returns: hex formatted CPU mask + :rtype: str """ - num_lcores = config('dpdk-socket-cores') - pmd_cores = config('pmd-socket-cores') - lcore_mask = 0 - pmd_mask = 0 - for cores in self._numa_node_cores().values(): - for core in cores[:num_lcores]: - lcore_mask = lcore_mask | 1 << core - for core in cores[num_lcores:][:pmd_cores]: - pmd_mask = pmd_mask | 1 << core - return { - 'pmd_cpu_mask': format(pmd_mask, '#04x'), - 'dpdk_lcore_mask': format(lcore_mask, '#04x')} + mask = 0 + cpu_list = cls._parse_cpu_list(config('pmd-cpu-set')) + if cpu_list: + for core in cpu_list: + mask = mask | 1 << core + return format(mask, '#x') def socket_memory(self): """Formatted list of socket memory configuration per socket. @@ -2694,6 +2765,7 @@ def __call__(self): ctxt['device_whitelist'] = self.device_whitelist() ctxt['socket_memory'] = self.socket_memory() ctxt['cpu_mask'] = self.cpu_mask() + ctxt['pmd_cpu_mask'] = self.pmd_cpu_mask() return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py index a5cbdf53..b4912c42 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -25,6 +25,7 @@ import hashlib import json +import os import re @@ -36,6 +37,7 @@ config, status_set, DEBUG, + application_name, ) from charmhelpers.core.host import ( @@ -65,6 +67,7 @@ VIP_GROUP_NAME = 'grp_{service}_vips' DNSHA_GROUP_NAME = 'grp_{service}_hostnames' +HAPROXY_DASHBOARD_RESOURCE = "haproxy-dashboard" class DNSHAException(Exception): @@ -346,3 +349,29 @@ def update_hacluster_vip(service, relation_data): relation_data['groups'] = { key: ' '.join(vip_group) } + + +def render_grafana_dashboard(prometheus_app_name, haproxy_dashboard): + """Load grafana dashboard json model and insert prometheus datasource. + + :param prometheus_app_name: name of the 'prometheus' application that will + be used as datasource in grafana dashboard + :type prometheus_app_name: str + :param haproxy_dashboard: path to haproxy dashboard + :type haproxy_dashboard: str + :return: Grafana dashboard json model as a str. + :rtype: str + """ + from charmhelpers.contrib.templating import jinja + + dashboard_template = os.path.basename(haproxy_dashboard) + dashboard_template_dir = os.path.dirname(haproxy_dashboard) + app_name = application_name() + datasource = "{} - Juju generated source".format(prometheus_app_name) + return jinja.render(dashboard_template, + {"datasource": datasource, + "app_name": app_name, + "prometheus_app_name": prometheus_app_name}, + template_dir=dashboard_template_dir, + jinja_env_args={"variable_start_string": "<< ", + "variable_end_string": " >>"}) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py index b8c94c56..2afad369 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ip.py @@ -25,6 +25,7 @@ is_ipv6, get_ipv6_addr, resolve_network_cidr, + get_iface_for_address ) from charmhelpers.contrib.hahelpers.cluster import is_clustered @@ -145,6 +146,30 @@ def local_address(unit_get_fallback='public-address'): return unit_get(unit_get_fallback) +def get_invalid_vips(): + """Check if any of the provided vips are invalid. + A vip is invalid if it doesn't belong to the subnet in any interface. + If all vips are valid, this returns an empty list. + + :returns: A list of strings, where each string is an invalid vip address. + :rtype: list + """ + + clustered = is_clustered() + vips = config('vip') + if vips: + vips = vips.split() + invalid_vips = [] + + if clustered and vips: + for vip in vips: + iface_for_vip = get_iface_for_address(vip) + if iface_for_vip is None: + invalid_vips.append(vip) + + return invalid_vips + + def resolve_address(endpoint_type=PUBLIC, override=True): """Return unit address depending on net config. diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/ssh_migrations.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/ssh_migrations.py index 96b9f71d..0512e3a5 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/ssh_migrations.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/ssh_migrations.py @@ -310,7 +310,7 @@ def ssh_known_hosts_lines(application_name, user=None): for hosts_line in hosts: if hosts_line.rstrip(): known_hosts_list.append(hosts_line.rstrip()) - return(known_hosts_list) + return known_hosts_list def ssh_authorized_keys_lines(application_name, user=None): @@ -327,7 +327,7 @@ def ssh_authorized_keys_lines(application_name, user=None): for authkey_line in keys: if authkey_line.rstrip(): authorized_keys_list.append(authkey_line.rstrip()) - return(authorized_keys_list) + return authorized_keys_list def ssh_compute_remove(public_key, application_name, user=None): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index c8747c16..47e700e8 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -158,6 +158,7 @@ ('2021.1', 'wallaby'), ('2021.2', 'xena'), ('2022.1', 'yoga'), + ('2022.2', 'zed'), ]) # The ugly duckling - must list releases oldest to newest @@ -400,13 +401,16 @@ def get_os_codename_version(vers): error_out(e) -def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES, + raise_exception=False): '''Determine OpenStack version number from codename.''' for k, v in version_map.items(): if v == codename: return k e = 'Could not derive OpenStack version for '\ 'codename: %s' % codename + if raise_exception: + raise ValueError(str(e)) error_out(e) @@ -1323,7 +1327,7 @@ def _check_listening_on_services_ports(services, test=False): @param test: default=False, if False, test for closed, otherwise open. @returns OrderedDict(service: [port-not-open, ...]...), [boolean] """ - test = not(not(test)) # ensure test is True or False + test = not (not (test)) # ensure test is True or False all_ports = list(itertools.chain(*services.values())) ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] map_ports = OrderedDict() @@ -1579,7 +1583,7 @@ def is_unit_paused_set(): with unitdata.HookData()() as t: kv = t[0] # transform something truth-y into a Boolean. - return not(not(kv.get('unit-paused'))) + return not (not (kv.get('unit-paused'))) except Exception: return False @@ -2177,7 +2181,7 @@ def is_unit_upgrading_set(): with unitdata.HookData()() as t: kv = t[0] # transform something truth-y into a Boolean. - return not(not(kv.get('unit-upgrading'))) + return not (not (kv.get('unit-upgrading'))) except Exception: return False diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py index a3561760..4d05b121 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -23,6 +23,12 @@ call ) +from charmhelpers.core.hookenv import ( + log, + WARNING, + INFO +) + def _luks_uuid(dev): """ @@ -110,7 +116,7 @@ def is_device_mounted(device): return bool(re.search(r'MOUNTPOINT=".+"', out)) -def mkfs_xfs(device, force=False, inode_size=1024): +def mkfs_xfs(device, force=False, inode_size=None): """Format device with XFS filesystem. By default this should fail if the device already has a filesystem on it. @@ -118,11 +124,20 @@ def mkfs_xfs(device, force=False, inode_size=1024): :ptype device: tr :param force: Force operation :ptype: force: boolean - :param inode_size: XFS inode size in bytes + :param inode_size: XFS inode size in bytes; if set to 0 or None, + the value used will be the XFS system default :ptype inode_size: int""" cmd = ['mkfs.xfs'] if force: cmd.append("-f") - cmd += ['-i', "size={}".format(inode_size), device] + if inode_size: + if inode_size >= 256 and inode_size <= 2048: + cmd += ['-i', "size={}".format(inode_size)] + else: + log("Config value xfs-inode-size={} is invalid. Using system default.".format(inode_size), level=WARNING) + else: + log("Using XFS filesystem with system default inode size.", level=INFO) + + cmd += [device] check_call(cmd) diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index ad2cab46..70dde6a5 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -277,7 +277,7 @@ def service_resume(service_name, init_dir="/etc/init", return started -def service(action, service_name, **kwargs): +def service(action, service_name=None, **kwargs): """Control a system service. :param action: the action to take on the service @@ -286,7 +286,9 @@ def service(action, service_name, **kwargs): the form of key=value. """ if init_is_systemd(service_name=service_name): - cmd = ['systemctl', action, service_name] + cmd = ['systemctl', action] + if service_name is not None: + cmd.append(service_name) else: cmd = ['service', service_name, action] for key, value in kwargs.items(): @@ -952,7 +954,7 @@ def pwgen(length=None): random_generator = random.SystemRandom() random_chars = [ random_generator.choice(alphanumeric_chars) for _ in range(length)] - return(''.join(random_chars)) + return ''.join(random_chars) def is_phy_iface(interface): diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index 0906c5c0..cc2d89fe 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -30,6 +30,7 @@ 'hirsute', 'impish', 'jammy', + 'kinetic', ) diff --git a/ceph-osd/hooks/charmhelpers/core/services/base.py b/ceph-osd/hooks/charmhelpers/core/services/base.py index 7c37c65c..8d217b59 100644 --- a/ceph-osd/hooks/charmhelpers/core/services/base.py +++ b/ceph-osd/hooks/charmhelpers/core/services/base.py @@ -15,7 +15,8 @@ import os import json import inspect -from collections import Iterable, OrderedDict +from collections import OrderedDict +from collections.abc import Iterable from charmhelpers.core import host from charmhelpers.core import hookenv diff --git a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py index 2cb2e88b..0e35c901 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py +++ b/ceph-osd/hooks/charmhelpers/fetch/archiveurl.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import contextlib import os import hashlib import re @@ -24,11 +25,15 @@ get_archive_handler, extract, ) +from charmhelpers.core.hookenv import ( + env_proxy_settings, +) from charmhelpers.core.host import mkdir, check_hash from urllib.request import ( build_opener, install_opener, urlopen, urlretrieve, HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ProxyHandler ) from urllib.parse import urlparse, urlunparse, parse_qs from urllib.error import URLError @@ -50,6 +55,20 @@ def splitpasswd(user): return user, None +@contextlib.contextmanager +def proxy_env(): + """ + Creates a context which temporarily modifies the proxy settings in os.environ. + """ + restore = {**os.environ} # Copy the current os.environ + juju_proxies = env_proxy_settings() or {} + os.environ.update(**juju_proxies) # Insert or Update the os.environ + yield os.environ + for key in juju_proxies: + del os.environ[key] # remove any keys which were added or updated + os.environ.update(**restore) # restore any original values + + class ArchiveUrlFetchHandler(BaseFetchHandler): """ Handler to download archive files from arbitrary URLs. @@ -80,6 +99,7 @@ def download(self, source, dest): # propagate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse(source) + handlers = [] if proto in ('http', 'https'): auth, barehost = splituser(netloc) if auth is not None: @@ -89,10 +109,13 @@ def download(self, source, dest): # Realm is set to None in add_password to force the username and password # to be used whatever the realm passman.add_password(None, source, username, password) - authhandler = HTTPBasicAuthHandler(passman) - opener = build_opener(authhandler) - install_opener(opener) - response = urlopen(source) + handlers.append(HTTPBasicAuthHandler(passman)) + + with proxy_env(): + handlers.append(ProxyHandler()) + opener = build_opener(*handlers) + install_opener(opener) + response = urlopen(source) try: with open(dest, 'wb') as dest_file: dest_file.write(response.read()) diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index e6f8a0ad..fcf09675 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -222,6 +222,18 @@ 'yoga/proposed': 'focal-proposed/yoga', 'focal-yoga/proposed': 'focal-proposed/yoga', 'focal-proposed/yoga': 'focal-proposed/yoga', + # Zed + 'zed': 'jammy-updates/zed', + 'jammy-zed': 'jammy-updates/zed', + 'jammy-zed/updates': 'jammy-updates/zed', + 'jammy-updates/zed': 'jammy-updates/zed', + 'zed/proposed': 'jammy-proposed/zed', + 'jammy-zed/proposed': 'jammy-proposed/zed', + 'jammy-proposed/zed': 'jammy-proposed/zed', + + # OVN + 'focal-ovn-22.03': 'focal-updates/ovn-22.03', + 'focal-ovn-22.03/proposed': 'focal-proposed/ovn-22.03', } @@ -248,6 +260,7 @@ 'wallaby', 'xena', 'yoga', + 'zed', ) @@ -274,6 +287,7 @@ ('hirsute', 'wallaby'), ('impish', 'xena'), ('jammy', 'yoga'), + ('kinetic', 'zed'), ]) @@ -353,6 +367,9 @@ def apt_install(packages, options=None, fatal=False, quiet=False): :type quiet: bool :raises: subprocess.CalledProcessError """ + if not packages: + log("Nothing to install", level=DEBUG) + return if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -677,6 +694,7 @@ def add_source(source, key=None, fail_invalid=False): (r"^cloud-archive:(.*)$", _add_apt_repository), (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), + (r"^cloud:(.*)-(ovn-.*)$", _add_cloud_distro_check), (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), (r"^cloud:(.*)$", _add_cloud_pocket), (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), @@ -740,6 +758,11 @@ def _add_apt_repository(spec): ) +def __write_sources_list_d_actual_pocket(file, actual_pocket): + with open('/etc/apt/sources.list.d/{}'.format(file), 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + + def _add_cloud_pocket(pocket): """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list @@ -759,8 +782,9 @@ def _add_cloud_pocket(pocket): 'Unsupported cloud: source option %s' % pocket) actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + __write_sources_list_d_actual_pocket( + 'cloud-archive{}.list'.format('' if 'ovn' not in pocket else '-ovn'), + actual_pocket) def _add_cloud_staging(cloud_archive_release, openstack_release): diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 636508fc..71172e38 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -43,3 +43,4 @@ tempest<24.0.0;python_version<'3.6' croniter # needed for charm-rabbitmq-server unit tests pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. +pyopenssl<=22.0.0 From 57abb31ee69b76b588b3cbcedad3da2454ec827f Mon Sep 17 00:00:00 2001 From: utkarshbhatthere Date: Wed, 28 Sep 2022 07:54:41 +0000 Subject: [PATCH 2433/2699] Fixes openssl dependency issue for tests Change-Id: I2cfaf1de8d2096522cb435751be874df0d985578 --- ceph-radosgw/test-requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 4ef87dc5..5539f038 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -26,4 +26,5 @@ git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.open # Needed for charm-glance: git+https://opendev.org/openstack/tempest.git#egg=tempest +pyopenssl<=22.0.0 croniter # needed for charm-rabbitmq-server unit tests From 6dd2878fa634088b0a13667d9c81878374ca5bed Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 27 Sep 2022 14:01:43 -0400 Subject: [PATCH 2434/2699] Update tox.ini for charmcraft 2 Also, this fixes an issue with pyopenssl Change-Id: I8cb2d6ffeb27f58d97a2d4f8b32ced3c527a5f5e --- ceph-mon/osci.yaml | 1 + ceph-mon/test-requirements.txt | 4 ++++ ceph-mon/tox.ini | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ceph-mon/osci.yaml b/ceph-mon/osci.yaml index 6970c873..0a254da2 100644 --- a/ceph-mon/osci.yaml +++ b/ceph-mon/osci.yaml @@ -8,6 +8,7 @@ needs_charm_build: true charm_build_name: ceph-mon build_type: charmcraft + charmcraft_channel: 2.0/stable check: jobs: - new-install-focal-yoga diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index eee61b69..20e94a7c 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -49,3 +49,7 @@ tempest<30.0.0;python_version<'3.8' and python_version >= '3.6' tempest<24.0.0;python_version<'3.6' croniter # needed for charm-rabbitmq-server unit tests + +# icey: pyopenssl 22 introduces a requirement on newer OpenSSL which causes test +# failures. Pin pyopenssl to resolve the failure. +pyopenssl<=22.0.0 \ No newline at end of file diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index b22b7bb2..483e561a 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -48,7 +48,7 @@ basepython = python3 deps = -r{toxinidir}/build-requirements.txt commands = charmcraft clean - charmcraft -v build + charmcraft -v pack {toxinidir}/rename.sh [testenv:py36] From 722846436dbe4fab013dc33d82832aba98075889 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Wed, 28 Sep 2022 22:44:29 +0200 Subject: [PATCH 2435/2699] Fix: make ceph_metrics test more robust Instead of messing with the harness' construction patch the missing network-get in place Change-Id: I162a0b73d76a3ed18689c2baf258372efe5f2ec4 --- ceph-mon/unit_tests/helpers.py | 23 +++++++++++++ ceph-mon/unit_tests/test_ceph_metrics.py | 43 +++++------------------- 2 files changed, 32 insertions(+), 34 deletions(-) create mode 100644 ceph-mon/unit_tests/helpers.py diff --git a/ceph-mon/unit_tests/helpers.py b/ceph-mon/unit_tests/helpers.py new file mode 100644 index 00000000..f0970674 --- /dev/null +++ b/ceph-mon/unit_tests/helpers.py @@ -0,0 +1,23 @@ +# Copyright 2020 Canonical Ltd. +# See LICENSE file for licensing details. + +from typing import Callable +from unittest.mock import patch + + +def patch_network_get(private_address="10.0.0.10") -> Callable: + def network_get(*args, **kwargs) -> dict: + """Patch for the not-yet-implemented testing backend needed for `bind_address`. + + This patch decorator can be used for cases such as: + self.model.get_binding(event.relation).network.bind_address + """ + return { + "bind-addresses": [ + { + "addresses": [{"value": private_address}], + } + ], + } + + return patch("ops.testing._TestingModelBackend.network_get", network_get) diff --git a/ceph-mon/unit_tests/test_ceph_metrics.py b/ceph-mon/unit_tests/test_ceph_metrics.py index 16e89735..8a5e3eba 100644 --- a/ceph-mon/unit_tests/test_ceph_metrics.py +++ b/ceph-mon/unit_tests/test_ceph_metrics.py @@ -1,22 +1,23 @@ #!/usr/bin/env python3 +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. + import json import pathlib import tempfile import textwrap -# Copyright 2022 Canonical Ltd. -# See LICENSE file for licensing details. - from unittest.mock import patch import unittest -from ops import storage, model, framework -from ops.testing import Harness, _TestingModelBackend +from ops.testing import Harness import ceph_metrics # noqa: avoid circ. import import charm +import helpers +@helpers.patch_network_get() class TestCephMetrics(unittest.TestCase): @classmethod def setUpClass(cls): @@ -40,35 +41,7 @@ def tearDownClass(cls): def setUp(self): super().setUp() self.harness = Harness(charm.CephMonCharm) - - # BEGIN: Workaround until network_get is implemented - class _TestingOPSModelBackend(_TestingModelBackend): - def network_get(self, endpoint_name, relation_id=None): - network_data = { - "bind-addresses": [ - { - "addresses": [{"value": "10.0.0.10"}], - } - ], - } - return network_data - - self.harness._backend = _TestingOPSModelBackend( - self.harness._unit_name, self.harness._meta - ) - self.harness._model = model.Model( - self.harness._meta, self.harness._backend - ) - self.harness._framework = framework.Framework( - storage.SQLiteStorage(":memory:"), - self.harness._charm_dir, - self.harness._meta, - self.harness._model, - ) - # END Workaround - self.addCleanup(self.harness.cleanup) - self.harness.begin() self.harness.set_leader(True) self.harness.charm.metrics_endpoint._alert_rules_path = self.rules_dir @@ -131,7 +104,9 @@ def get_alert_rules(self, rel_id): @patch("ceph_metrics.ceph_utils.is_bootstrapped", return_value=True) @patch("ceph_metrics.CephMetricsEndpointProvider._set_alert_rules") def test_update_alert_rules_empty( - self, set_alert_rules, _is_bootstrapped, + self, + set_alert_rules, + _is_bootstrapped, ): """Test: no alert rules created with empty alert rules file.""" rel_id = self.harness.add_relation("metrics-endpoint", "prometheus") From 15c6e8fe7bec121116081fa428628ac63c2d092c Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 3 Oct 2022 11:13:58 -0400 Subject: [PATCH 2436/2699] bump testing to yoga alignment Change-Id: I2d0d3d799f0d48f2785dd9a6cf8b27d518665395 --- ceph-mon/.zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/.zuul.yaml b/ceph-mon/.zuul.yaml index 0eed1965..7ffc71cb 100644 --- a/ceph-mon/.zuul.yaml +++ b/ceph-mon/.zuul.yaml @@ -1,4 +1,4 @@ - project: templates: - - openstack-python3-ussuri-jobs + - openstack-python3-charm-yoga-jobs - openstack-cover-jobs From bd0f020c380fbff86ad9477fd9251e51ab6ff03b Mon Sep 17 00:00:00 2001 From: "Chi Wai, Chan" Date: Wed, 5 Oct 2022 14:56:23 +0800 Subject: [PATCH 2437/2699] Make check_ceph_status.py a bit more "noisy" by default. Closes-Bug: #1989154 Change-Id: Ie0d73f14698e4f3ba4e7231920a622f587b4330f --- ceph-mon/config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index c2509362..83248ffc 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -216,15 +216,15 @@ options: A comma-separated list of nagios servicegroups. If left empty, the nagios_context will be used as the servicegroup. nagios_degraded_thresh: - default: 1.0 + default: 0.1 type: float description: "Threshold for degraded ratio (0.1 = 10%)" nagios_misplaced_thresh: - default: 1.0 + default: 0.1 type: float description: "Threshold for misplaced ratio (0.1 = 10%)" nagios_recovery_rate: - default: '1' + default: '100' type: string description: | Recovery rate (in objects/s) below which we consider recovery From 76ccca55ca47822ee1658199bb0d5e79c2d613c5 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Wed, 5 Oct 2022 10:20:17 +0200 Subject: [PATCH 2438/2699] Fix: ceph metrics alert rule symlinking Really remove existing symlinks, also improve type hinting Change-Id: I28f3ac85f22971bac63f58825842d0f5d712fad9 --- ceph-mon/src/ceph_metrics.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ceph-mon/src/ceph_metrics.py b/ceph-mon/src/ceph_metrics.py index 910879bf..baffbcf6 100644 --- a/ceph-mon/src/ceph_metrics.py +++ b/ceph-mon/src/ceph_metrics.py @@ -9,12 +9,14 @@ import logging import os.path import pathlib -from typing import Optional, Union, List +from typing import Optional, Union, List, TYPE_CHECKING import ops.model from ops.model import BlockedStatus -import charm +if TYPE_CHECKING: + import charm + from charms.prometheus_k8s.v0 import prometheus_scrape from charms_ceph import utils as ceph_utils from ops.framework import BoundEvent @@ -32,7 +34,7 @@ class CephMetricsEndpointProvider(prometheus_scrape.MetricsEndpointProvider): def __init__( self, - charm: charm.CephMonCharm, + charm: "charm.CephMonCharm", relation_name: str = prometheus_scrape.DEFAULT_RELATION_NAME, jobs=None, alert_rules_path: str = DEFAULT_ALERT_RULES_RELATIVE_PATH, @@ -125,7 +127,7 @@ def update_alert_rules(self): self._set_alert_rules({}) return sink = pathlib.Path(self._alert_rules_path) / "alert.yaml.rules" - if sink.exists(): + if sink.exists() or sink.is_symlink(): sink.unlink() sink.symlink_to(resource) alert_rules = prometheus_scrape.AlertRules(topology=self.topology) From e04886d8d24e09b99caac07f217c6ef7c7d5088b Mon Sep 17 00:00:00 2001 From: Edin Sarajlic Date: Fri, 17 Dec 2021 08:45:02 +0800 Subject: [PATCH 2439/2699] Add nagios check for expected number of OSDs This check does not require manually setting the number of expected OSDs. Initially, the charm sets the count (per-host) to that of what's present in the OSD tree. The count will be updated (on a per-host basis) when the number of OSDs grows, but not when it shrinks. There is a charm action to reset the expected count using information from the OSD tree. Closes-Bug: #1952985 Change-Id: Ia6a060bf151908c1d4159e6bdffa7bfe1f0a7988 --- ceph-mon/actions.yaml | 2 + ceph-mon/actions/reset-osd-count-report | 1 + ceph-mon/actions/reset_osd_count_report.py | 28 +++ ceph-mon/files/nagios/check_ceph_osd_count.py | 121 ++++++++++ ceph-mon/files/nagios/collect_ceph_status.sh | 9 + ceph-mon/src/ceph_hooks.py | 58 ++++- .../unit_tests/test_check_ceph_osd_count.py | 216 ++++++++++++++++++ 7 files changed, 434 insertions(+), 1 deletion(-) create mode 120000 ceph-mon/actions/reset-osd-count-report create mode 100755 ceph-mon/actions/reset_osd_count_report.py create mode 100755 ceph-mon/files/nagios/check_ceph_osd_count.py create mode 100644 ceph-mon/unit_tests/test_check_ceph_osd_count.py diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 9655a527..2ba87287 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -443,3 +443,5 @@ delete-user: required: [username] pg-repair: description: "Repair inconsistent placement groups, if safe to do so." +reset-osd-count-report: + description: "Update report of osds present in osd tree. Used for monitoring." diff --git a/ceph-mon/actions/reset-osd-count-report b/ceph-mon/actions/reset-osd-count-report new file mode 120000 index 00000000..ce265d1e --- /dev/null +++ b/ceph-mon/actions/reset-osd-count-report @@ -0,0 +1 @@ +reset_osd_count_report.py \ No newline at end of file diff --git a/ceph-mon/actions/reset_osd_count_report.py b/ceph-mon/actions/reset_osd_count_report.py new file mode 100755 index 00000000..0334c441 --- /dev/null +++ b/ceph-mon/actions/reset_osd_count_report.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +sys.path.append("hooks") +from ceph_hooks import update_host_osd_count_report + + +def reset_osd_count_report(): + update_host_osd_count_report(reset=True) + + +if __name__ == '__main__': + reset_osd_count_report() diff --git a/ceph-mon/files/nagios/check_ceph_osd_count.py b/ceph-mon/files/nagios/check_ceph_osd_count.py new file mode 100755 index 00000000..0703bfd7 --- /dev/null +++ b/ceph-mon/files/nagios/check_ceph_osd_count.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 + +# Copyright (C) 2021 Canonical +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import sys +import time + + +EXIT_OK = 0 +EXIT_WARN = 1 +EXIT_CRIT = 2 +EXIT_UNKNOWN = 3 +EXIT_CODE_TEXT = ["OK", "WARN", "CRITICAL", "UNKNOWN"] + +CURRENT_OSD_COUNT_FILE = "/var/lib/nagios/current-ceph-osd-count.json" + + +class CriticalError(Exception): + """This indicates a critical error.""" + + +def check_file_freshness(filename, newer_than=3600): + """Check a file exists, is readable and is newer than seconds. + + :param filename: The filename to check + :type filename: str + :param newer_than: The file should be newer than n seconds, default 3600 + :type: newer_than: int + :raises CriticalError: If file is not readable or older then seconds + """ + # First check the file exists and is readable + if not os.path.exists(filename): + raise CriticalError("%s: does not exist." % (filename)) + if os.access(filename, os.R_OK) == 0: + raise CriticalError("%s: is not readable." % (filename)) + + # Then ensure the file is up-to-date enough + mtime = os.stat(filename).st_mtime + last_modified = time.time() - mtime + if last_modified > newer_than: + raise CriticalError("%s: was last modified on %s and is too old " + "(> %s seconds)." + % (filename, time.ctime(mtime), newer_than)) + if last_modified < 0: + raise CriticalError("%s: was last modified on %s which is in the " + "future." + % (filename, time.ctime(mtime))) + + +def check_ceph_osd_count(host_osd_count_report): + + with open(host_osd_count_report, "r") as f: + expected_osd_map = json.load(f) + + current_osd_map = get_osd_tree() + + exit_code = EXIT_OK + err_msgs = [] + for host, osd_list in expected_osd_map.items(): + if host not in current_osd_map: + err_msgs.append("Missing host {}".format(host)) + current_osd_map[host] = {} + + if len(osd_list) <= len(current_osd_map[host]): + continue + + missing_osds = list(set(osd_list) - set(current_osd_map[host])) + if missing_osds: + osd_ids = [str(osd) for osd in missing_osds] + err_msgs.append("Missing osds on " + "{}: {}".format(host, + ", ".join(osd_ids))) + exit_code = EXIT_CRIT + + return (exit_code, err_msgs) + + +def get_osd_tree(): + """Read CURRENT_OSD_COUNT_FILE to get the host osd map. + + :return: The map of node and osd ids. + :rtype: Dict[str: List[str]] + """ + check_file_freshness(CURRENT_OSD_COUNT_FILE) + with open(CURRENT_OSD_COUNT_FILE, "r") as f: + current_osd_counts = json.load(f) + + host_osd_map = {} + for node in current_osd_counts["nodes"]: + if node["type"] != "host": + continue + + host_osd_map[node["name"]] = node["children"] + + return host_osd_map + + +if __name__ == "__main__": + host_osd_report = sys.argv[1] + if not os.path.isfile(host_osd_report): + print("UNKNOWN: report file missing: {}".format(host_osd_report)) + sys.exit(EXIT_UNKNOWN) + + (exit_code, err_msgs) = check_ceph_osd_count(host_osd_report) + print("{} {}".format(EXIT_CODE_TEXT[exit_code], + ", ".join(err_msgs))) + sys.exit(exit_code) diff --git a/ceph-mon/files/nagios/collect_ceph_status.sh b/ceph-mon/files/nagios/collect_ceph_status.sh index a2e284e2..514c219c 100755 --- a/ceph-mon/files/nagios/collect_ceph_status.sh +++ b/ceph-mon/files/nagios/collect_ceph_status.sh @@ -22,3 +22,12 @@ ceph status --format json >${TMP_FILE} chown root:nagios ${TMP_FILE} chmod 0640 ${TMP_FILE} mv ${TMP_FILE} ${DATA_FILE} + +DATA_FILE="${DATA_DIR}/current-ceph-osd-count.json" +TMP_FILE=$(mktemp -p ${DATA_DIR}) + +ceph osd tree --format json > ${TMP_FILE} + +chown root:nagios ${TMP_FILE} +chmod 0640 ${TMP_FILE} +mv ${TMP_FILE} ${DATA_FILE} diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 9b01164d..e365bcaa 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -20,6 +20,7 @@ import subprocess import sys import uuid +import pathlib sys.path.append('lib') import charms_ceph.utils as ceph @@ -109,9 +110,11 @@ hooks = Hooks() NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' +NAGIOS_FILE_FOLDER = '/var/lib/nagios' SCRIPTS_DIR = '/usr/local/bin' -STATUS_FILE = '/var/lib/nagios/cat-ceph-status.txt' +STATUS_FILE = '{}/cat-ceph-status.txt'.format(NAGIOS_FILE_FOLDER) STATUS_CRONFILE = '/etc/cron.d/cat-ceph-health' +HOST_OSD_COUNT_REPORT = '{}/host-osd-report.json'.format(NAGIOS_FILE_FOLDER) def check_for_upgrade(): @@ -215,6 +218,44 @@ def emit_cephconf(): JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped' +def update_host_osd_count_report(reset=False): + """Update report showing hosts->osds. Used for monitoring.""" + current_osd_tree = ceph.get_osd_tree('admin') + + # Convert [CrushLocation,...] -> {: [osdid],...} for easy comparison + current_host_osd_map = {} + for osd in current_osd_tree: + osd_list = current_host_osd_map.get(osd.host, []) + osd_list.append(osd.identifier) + current_host_osd_map[osd.host] = osd_list + + pathlib.Path(NAGIOS_FILE_FOLDER).mkdir(parents=True, exist_ok=True) + if not os.path.isfile(HOST_OSD_COUNT_REPORT) or reset: + write_file(HOST_OSD_COUNT_REPORT, '{}') + + with open(HOST_OSD_COUNT_REPORT, "r") as f: + expected_host_osd_map = json.load(f) + + if current_host_osd_map == expected_host_osd_map: + return + + for host, osd_list in current_host_osd_map.items(): + if host not in expected_host_osd_map: + expected_host_osd_map[host] = osd_list + + if len(osd_list) > len(expected_host_osd_map[host]): + # osd list is growing, add them to the expected + expected_host_osd_map[host] = osd_list + + if len(osd_list) == len(expected_host_osd_map[host]) and \ + osd_list != expected_host_osd_map[host]: + # different osd ids, maybe hdd swap, refresh + expected_host_osd_map[host] = osd_list + + write_file(HOST_OSD_COUNT_REPORT, + json.dumps(expected_host_osd_map)) + + @hooks.hook('config-changed') @harden() def config_changed(): @@ -884,6 +925,9 @@ def osd_relation(relid=None, unit=None): for relid in relation_ids('dashboard'): dashboard_relation(relid) + if ready_for_service(): + update_host_osd_count_report() + else: log('mon cluster not in quorum - deferring fsid provision') @@ -1143,6 +1187,10 @@ def update_nrpe_config(): 'check_ceph_status.py'), os.path.join(NAGIOS_PLUGINS, 'check_ceph_status.py')) + rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', + 'check_ceph_osd_count.py'), + os.path.join(NAGIOS_PLUGINS, 'check_ceph_osd_count.py')) + script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh') rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'collect_ceph_status.sh'), @@ -1168,6 +1216,14 @@ def update_nrpe_config(): check_cmd=check_cmd ) + check_cmd = 'check_ceph_osd_count.py {} '.format( + HOST_OSD_COUNT_REPORT) + nrpe_setup.add_check( + shortname='ceph_osd_count', + description='Check if osd count matches expected count', + check_cmd=check_cmd + ) + if config('nagios_additional_checks'): additional_critical = config('nagios_additional_checks_critical') x = ast.literal_eval(config('nagios_additional_checks')) diff --git a/ceph-mon/unit_tests/test_check_ceph_osd_count.py b/ceph-mon/unit_tests/test_check_ceph_osd_count.py new file mode 100644 index 00000000..22aa382c --- /dev/null +++ b/ceph-mon/unit_tests/test_check_ceph_osd_count.py @@ -0,0 +1,216 @@ +# Copyright 2021 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import unittest + +from unittest.mock import patch, mock_open +from src.ceph_hooks import update_host_osd_count_report + +os.sys.path.insert(1, os.path.join(sys.path[0], 'lib')) +os.sys.path.insert(1, os.path.join(sys.path[0], 'files/nagios')) + +import check_ceph_osd_count + +from charms_ceph.utils import CrushLocation + + +class CheckCephOsdCountTestCase(unittest.TestCase): + + @patch("check_ceph_osd_count.get_osd_tree") + def test_check_equal_ceph_osd_trees(self, mock_get_osd_tree): + """Check that if current and expected osd trees match return OK exit""" + + current_osd_tree = {"host1": [0]} + mock_get_osd_tree.return_value = current_osd_tree + expected_osd_tree = """{"host1": [0]}""" + with patch( + "check_ceph_osd_count.open", + mock_open(read_data=expected_osd_tree), + ) as file: + (exit_code, _) = check_ceph_osd_count.check_ceph_osd_count(file) + self.assertEqual(exit_code, check_ceph_osd_count.EXIT_OK) + + # change osd order + current_osd_tree = {"host1": [0, 1]} + mock_get_osd_tree.return_value = current_osd_tree + expected_osd_tree = """{"host1": [1, 0]}""" + with patch( + "check_ceph_osd_count.open", + mock_open(read_data=expected_osd_tree), + ) as file: + (exit_code, _) = check_ceph_osd_count.check_ceph_osd_count(file) + self.assertEqual(exit_code, check_ceph_osd_count.EXIT_OK) + + @patch("check_ceph_osd_count.get_osd_tree") + def test_check_missing_expected_osd(self, mock_get_osd_tree): + """Check that missing expected osd returns appropriate exit code.""" + current_osd_tree = {"host1": [0]} + mock_get_osd_tree.return_value = current_osd_tree + expected_osd_tree = """{"host1": [0, 1]}""" + with patch( + "check_ceph_osd_count.open", + mock_open(read_data=expected_osd_tree), + ) as file: + + (exit_code, _) = check_ceph_osd_count.check_ceph_osd_count(file) + self.assertEqual(exit_code, check_ceph_osd_count.EXIT_CRIT) + + @patch("check_ceph_osd_count.get_osd_tree") + def test_check_missing_expected_host(self, + mock_get_osd_tree): + """Check that missing expected host returns appropriate exit code.""" + current_osd_tree = {"host1": [0]} + mock_get_osd_tree.return_value = current_osd_tree + expected_osd_tree = """{"host1": [0], "host2": [1]}""" + with patch( + "check_ceph_osd_count.open", + mock_open(read_data=expected_osd_tree), + ) as file: + + (exit_code, _) = check_ceph_osd_count.check_ceph_osd_count(file) + self.assertEqual(exit_code, check_ceph_osd_count.EXIT_CRIT) + + @patch("check_ceph_osd_count.get_osd_tree") + def test_check_change_osd_ids(self, mock_get_osd_tree): + """Check that a change in osd ids (of same length) is OK.""" + current_osd_tree = {"host1": [1], "host2": [3]} + mock_get_osd_tree.return_value = current_osd_tree + expected_osd_tree = """{"host1": [0], "host2": [1]}""" + with patch( + "check_ceph_osd_count.open", + mock_open(read_data=expected_osd_tree), + ) as file: + (exit_code, _) = check_ceph_osd_count.check_ceph_osd_count(file) + self.assertEqual(exit_code, check_ceph_osd_count.EXIT_OK) + + @patch("check_ceph_osd_count.get_osd_tree") + def test_osd_tree_current_gt_expected(self, mock_get_osd_tree): + """Check that growing osd list is added to expected.""" + current_osd_tree = {"host1": [0, 1], "host2": [2]} + mock_get_osd_tree.return_value = current_osd_tree + expected_osd_tree = """{"host1": [0]}""" + with patch( + "check_ceph_osd_count.open", + mock_open(read_data=expected_osd_tree), + ) as file: + (exit_code, _) = check_ceph_osd_count.check_ceph_osd_count(file) + self.assertEqual(exit_code, check_ceph_osd_count.EXIT_OK) + + @patch("json.dumps") + @patch("src.ceph_hooks.write_file") + @patch("src.ceph_hooks.pathlib") + @patch("charms_ceph.utils.get_osd_tree") + def test_update_report_fresh_tree(self, + mock_get_osd_tree, + mock_pathlib, + mock_write_file, + mock_json_dumps): + """Check that an empty expected tree triggers an update to expected.""" + new_osd_tree = [CrushLocation(0, "osd.0", osd="osd.0", host="host1"), + CrushLocation(1, "osd.1", osd="osd.1", host="host1")] + new_osd_dict = {"host1": [0, 1]} + mock_get_osd_tree.return_value = new_osd_tree + + with patch( + "src.ceph_hooks.open", + mock_open(read_data="{}"), + ): + update_host_osd_count_report() + mock_json_dumps.assert_called_with(new_osd_dict) + + @patch("json.dumps") + @patch("src.ceph_hooks.write_file") + @patch("src.ceph_hooks.pathlib") + @patch("charms_ceph.utils.get_osd_tree") + def test_update_report_new_host(self, + mock_get_osd_tree, + mock_pathlib, + mock_write_file, + mock_json_dumps): + """Check that adding new host adds new host to expected tree.""" + new_osd_tree = [CrushLocation(0, "osd.0", osd="osd.0", host="host1"), + CrushLocation(1, "osd.1", osd="osd.1", host="host1"), + CrushLocation(2, "osd.2", osd="osd.2", host="host2")] + mock_get_osd_tree.return_value = new_osd_tree + with patch( + "src.ceph_hooks.open", + mock_open(read_data="""{"host1": [0, 1]}"""), + ): + update_host_osd_count_report() + mock_json_dumps.assert_called_with( + {"host1": [0, 1], "host2": [2]}) + + @patch("json.dumps") + @patch("src.ceph_hooks.write_file") + @patch("src.ceph_hooks.pathlib") + @patch("charms_ceph.utils.get_osd_tree") + def test_update_report_missing_host(self, + mock_get_osd_tree, + mock_pathlib, + mock_write_file, + mock_json_dumps): + """Check that missing host is not removed from expected tree.""" + new_osd_tree = [CrushLocation(0, "osd.0", osd="osd.0", host="host1"), + CrushLocation(2, "osd.2", osd="osd.2", host="host1")] + mock_get_osd_tree.return_value = new_osd_tree + with patch( + "src.ceph_hooks.open", + mock_open(read_data="""{"host1": [0], "host2": [1]}"""), + ): + update_host_osd_count_report() + mock_json_dumps.assert_called_with( + {"host1": [0, 2], "host2": [1]}) + + @patch("json.dumps") + @patch("src.ceph_hooks.write_file") + @patch("src.ceph_hooks.pathlib") + @patch("charms_ceph.utils.get_osd_tree") + def test_update_report_fewer_osds(self, + mock_get_osd_tree, + mock_pathlib, + mock_write_file, + mock_json_dumps): + """Check that report isn't updated when osd list shrinks.""" + new_osd_tree = [CrushLocation(0, "osd.0", osd="osd.0", host="host1")] + mock_get_osd_tree.return_value = new_osd_tree + with patch( + "src.ceph_hooks.open", + mock_open(read_data="""{"host1": [0, 1]}"""), + ): + update_host_osd_count_report() + mock_json_dumps.assert_called_with( + {"host1": [0, 1]}) + + @patch("json.dumps") + @patch("src.ceph_hooks.write_file") + @patch("src.ceph_hooks.pathlib") + @patch("charms_ceph.utils.get_osd_tree") + def test_update_report_diff_osd_ids(self, + mock_get_osd_tree, + mock_write_file, + mock_pathlib, + mock_json_dumps): + """Check that new osdid list (of same length) becomes new expected.""" + new_osd_tree = [CrushLocation(2, "osd.2", osd="osd.2", host="host1"), + CrushLocation(3, "osd.3", osd="osd.3", host="host1")] + mock_get_osd_tree.return_value = new_osd_tree + with patch( + "src.ceph_hooks.open", + mock_open(read_data="""{"host1": [0, 1]}"""), + ): + update_host_osd_count_report() + mock_json_dumps.assert_called_with( + {"host1": [2, 3]}) From 1daad67187e8a742ae841978832dcf2d2501cec9 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 29 Sep 2022 08:42:40 -0400 Subject: [PATCH 2440/2699] Rewrite the create-crush-rule action with the ops framework Change-Id: Ifaccd20ba4a0f148a38d14edf0c26bd4a4d5d655 --- ceph-mon/actions/create-crush-rule | 1 - ceph-mon/src/charm.py | 2 + ceph-mon/src/ops_actions/__init__.py | 1 + .../ops_actions}/create_crush_rule.py | 25 +++--- ceph-mon/unit_tests/test_ceph_actions.py | 82 ++++++++++--------- 5 files changed, 60 insertions(+), 51 deletions(-) delete mode 120000 ceph-mon/actions/create-crush-rule rename ceph-mon/{actions => src/ops_actions}/create_crush_rule.py (67%) mode change 100755 => 100644 diff --git a/ceph-mon/actions/create-crush-rule b/ceph-mon/actions/create-crush-rule deleted file mode 120000 index e4607fb7..00000000 --- a/ceph-mon/actions/create-crush-rule +++ /dev/null @@ -1 +0,0 @@ -create_crush_rule.py \ No newline at end of file diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index f3779a99..7ea17a3d 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -151,6 +151,8 @@ def __init__(self, *args): ops_actions.change_osd_weight.change_osd_weight) self._observe_action(self.on.copy_pool_action, ops_actions.copy_pool.copy_pool) + self._observe_action(self.on.create_crush_rule_action, + ops_actions.create_crush_rule.create_crush_rule) fw.observe(self.on.install, self.on_install) fw.observe(self.on.config_changed, self.on_config) diff --git a/ceph-mon/src/ops_actions/__init__.py b/ceph-mon/src/ops_actions/__init__.py index 0afa6266..571988ef 100644 --- a/ceph-mon/src/ops_actions/__init__.py +++ b/ceph-mon/src/ops_actions/__init__.py @@ -15,4 +15,5 @@ from . import ( # noqa: F401 change_osd_weight, copy_pool, + create_crush_rule, ) diff --git a/ceph-mon/actions/create_crush_rule.py b/ceph-mon/src/ops_actions/create_crush_rule.py old mode 100755 new mode 100644 similarity index 67% rename from ceph-mon/actions/create_crush_rule.py rename to ceph-mon/src/ops_actions/create_crush_rule.py index 65781132..453ac1ef --- a/ceph-mon/actions/create_crush_rule.py +++ b/ceph-mon/src/ops_actions/create_crush_rule.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright 2019 Canonical Ltd +# Copyright 2022 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,16 +14,21 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Creates a new CRUSH rule.""" + +import logging import subprocess -import charmhelpers.core.hookenv as hookenv +logger = logging.getLogger(__name__) -def create_crush_rule(): +def create_crush_rule(event) -> None: """Create a new CRUSH rule.""" - rule_name = hookenv.action_get('name') - failure_domain = hookenv.action_get('failure-domain') - device_class = hookenv.action_get('device-class') + + rule_name = event.params.get('name') + failure_domain = event.params.get('failure-domain') + device_class = event.params.get('device-class') + cmd = [ 'ceph', 'osd', 'crush', 'rule', 'create-replicated', @@ -36,8 +41,8 @@ def create_crush_rule(): try: subprocess.check_call(cmd) except subprocess.CalledProcessError as e: - hookenv.action_fail(str(e)) - + logger.warn(e) + event.fail("rule creation failed due to exception") + return -if __name__ == '__main__': - create_crush_rule() + event.set_results({'message': 'success'}) diff --git a/ceph-mon/unit_tests/test_ceph_actions.py b/ceph-mon/unit_tests/test_ceph_actions.py index c202deec..9b7d84fc 100644 --- a/ceph-mon/unit_tests/test_ceph_actions.py +++ b/ceph-mon/unit_tests/test_ceph_actions.py @@ -16,7 +16,7 @@ import subprocess import test_utils -import create_crush_rule +import ops_actions.copy_pool as copy_pool with mock.patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: @@ -31,7 +31,7 @@ class CopyPoolTestCase(test_utils.CharmTestCase): def setUp(self): self.harness = Harness(CephMonCharm) - @mock.patch.object(create_crush_rule.subprocess, 'check_call') + @mock.patch.object(copy_pool.subprocess, 'check_call') def test_copy_pool(self, mock_check_call): _action_data = { 'source': 'source-pool', @@ -45,7 +45,7 @@ def test_copy_pool(self, mock_check_call): 'source-pool', 'target-pool', ]) - @mock.patch.object(create_crush_rule.subprocess, 'check_call') + @mock.patch.object(copy_pool.subprocess, 'check_call') def test_copy_pool_failed(self, mock_check_call): _action_data = { 'source': 'source-pool', @@ -63,67 +63,69 @@ def test_copy_pool_failed(self, mock_check_call): class CreateCrushRuleTestCase(test_utils.CharmTestCase): - - TO_PATCH = [ - 'hookenv', - ] + """Run tests for action.""" def setUp(self): - super(CreateCrushRuleTestCase, self).setUp( - create_crush_rule, - self.TO_PATCH - ) + self.harness = Harness(CephMonCharm) + self.addCleanup(self.harness.cleanup) - @mock.patch.object(create_crush_rule.subprocess, 'check_call') + @mock.patch("ops_actions.create_crush_rule.subprocess.check_call") def test_create_crush_rule(self, mock_check_call): - _action_data = { - 'name': 'replicated_nvme', - 'failure-domain': 'host', - 'device-class': 'nvme', - } - self.hookenv.action_get.side_effect = lambda k: _action_data.get(k) - create_crush_rule.create_crush_rule() - mock_check_call.assert_called_with([ + """Test reweight_osd action has correct calls.""" + self.harness.begin() + self.harness.charm.on_create_crush_rule_action( + test_utils.MockActionEvent({ + 'name': 'replicated_nvme', + 'failure-domain': 'host', + 'device-class': 'nvme', + })) + expected = [ 'ceph', 'osd', 'crush', 'rule', 'create-replicated', 'replicated_nvme', 'default', 'host', 'nvme', - ]) + ] + mock_check_call.assert_called_once_with(expected) - @mock.patch.object(create_crush_rule.subprocess, 'check_call') + @mock.patch("ops_actions.create_crush_rule.subprocess.check_call") def test_create_crush_rule_no_class(self, mock_check_call): - _action_data = { - 'name': 'replicated_whoknows', - 'failure-domain': 'disk', - } - self.hookenv.action_get.side_effect = lambda k: _action_data.get(k) - create_crush_rule.create_crush_rule() - mock_check_call.assert_called_with([ + """Test reweight_osd action has correct calls.""" + self.harness.begin() + self.harness.charm.on_create_crush_rule_action( + test_utils.MockActionEvent({ + 'name': 'replicated_whoknows', + 'failure-domain': 'disk', + })) + expected = [ 'ceph', 'osd', 'crush', 'rule', 'create-replicated', 'replicated_whoknows', 'default', - 'disk', - ]) + 'disk' + ] + mock_check_call.assert_called_once_with(expected) - @mock.patch.object(create_crush_rule.subprocess, 'check_call') + @mock.patch("ops_actions.create_crush_rule.subprocess.check_call") def test_create_crush_rule_failed(self, mock_check_call): - _action_data = { + """Test reweight_osd action has correct calls.""" + self.harness.begin() + mock_check_call.side_effect = subprocess.CalledProcessError(1, 'test') + event = test_utils.MockActionEvent({ 'name': 'replicated_nvme', 'failure-domain': 'host', 'device-class': 'nvme', - } - self.hookenv.action_get.side_effect = lambda k: _action_data.get(k) - mock_check_call.side_effect = subprocess.CalledProcessError(1, 'test') - create_crush_rule.create_crush_rule() - mock_check_call.assert_called_with([ + }) + self.harness.charm.on_create_crush_rule_action(event) + expected = [ 'ceph', 'osd', 'crush', 'rule', 'create-replicated', 'replicated_nvme', 'default', 'host', 'nvme', - ]) - self.hookenv.action_fail.assert_called_once_with(mock.ANY) + ] + mock_check_call.assert_called_once_with(expected) + event.fail.assert_called_once_with( + 'rule creation failed due to exception') From ab16fbba0aba6b017f3a6b0423acc1d7d6131f37 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 29 Sep 2022 09:56:49 -0400 Subject: [PATCH 2441/2699] rewrite create-erasure-profile with ops famework Change-Id: I27b0e926865ecb39ad4f5ad25de8266e9db75695 --- ceph-mon/actions/create-erasure-profile | 1 - ceph-mon/src/charm.py | 3 + ceph-mon/src/ops_actions/__init__.py | 1 + .../ops_actions}/create_erasure_profile.py | 70 ++++++------ ceph-mon/unit_tests/test_ceph_actions.py | 101 ++++++++++++++++++ 5 files changed, 139 insertions(+), 37 deletions(-) delete mode 120000 ceph-mon/actions/create-erasure-profile rename ceph-mon/{actions => src/ops_actions}/create_erasure_profile.py (69%) diff --git a/ceph-mon/actions/create-erasure-profile b/ceph-mon/actions/create-erasure-profile deleted file mode 120000 index e7625474..00000000 --- a/ceph-mon/actions/create-erasure-profile +++ /dev/null @@ -1 +0,0 @@ -create_erasure_profile.py \ No newline at end of file diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index 7ea17a3d..c16cffaa 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -153,6 +153,9 @@ def __init__(self, *args): ops_actions.copy_pool.copy_pool) self._observe_action(self.on.create_crush_rule_action, ops_actions.create_crush_rule.create_crush_rule) + self._observe_action( + self.on.create_erasure_profile_action, + ops_actions.create_erasure_profile.create_erasure_profile_action) fw.observe(self.on.install, self.on_install) fw.observe(self.on.config_changed, self.on_config) diff --git a/ceph-mon/src/ops_actions/__init__.py b/ceph-mon/src/ops_actions/__init__.py index 571988ef..8711e196 100644 --- a/ceph-mon/src/ops_actions/__init__.py +++ b/ceph-mon/src/ops_actions/__init__.py @@ -16,4 +16,5 @@ change_osd_weight, copy_pool, create_crush_rule, + create_erasure_profile, ) diff --git a/ceph-mon/actions/create_erasure_profile.py b/ceph-mon/src/ops_actions/create_erasure_profile.py similarity index 69% rename from ceph-mon/actions/create_erasure_profile.py rename to ceph-mon/src/ops_actions/create_erasure_profile.py index 40673d7e..d84285be 100755 --- a/ceph-mon/actions/create_erasure_profile.py +++ b/ceph-mon/src/ops_actions/create_erasure_profile.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright 2016 Canonical Ltd +# Copyright 2022 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,16 +17,18 @@ from subprocess import CalledProcessError from charmhelpers.contrib.storage.linux.ceph import create_erasure_profile -from charmhelpers.core.hookenv import action_get, log, action_fail +import logging +logger = logging.getLogger(__name__) -def make_erasure_profile(): - name = action_get("name") - plugin = action_get("plugin") - failure_domain = action_get("failure-domain") - device_class = action_get("device-class") - k = action_get("data-chunks") - m = action_get("coding-chunks") + +def create_erasure_profile_action(event): + name = event.params.get("name") + plugin = event.params.get("plugin") + failure_domain = event.params.get("failure-domain") + device_class = event.params.get("device-class") + k = event.params.get("data-chunks") + m = event.params.get("coding-chunks") # jerasure requires k+m # isa requires k+m @@ -43,9 +45,9 @@ def make_erasure_profile(): failure_domain=failure_domain, device_class=device_class) except CalledProcessError as e: - log(e) - action_fail("Create erasure profile failed with " - "message: {}".format(str(e))) + logger.warning(e) + event.fail("Create erasure profile failed with " + "message: {}".format(str(e))) elif plugin == "isa": try: create_erasure_profile(service='admin', @@ -56,12 +58,12 @@ def make_erasure_profile(): failure_domain=failure_domain, device_class=device_class) except CalledProcessError as e: - log(e) - action_fail("Create erasure profile failed with " - "message: {}".format(str(e))) + logger.warning(e) + event.fail("Create erasure profile failed with " + "message: {}".format(str(e))) elif plugin == "lrc": - locality_chunks = action_get("locality-chunks") - crush_locality = action_get('crush-locality') + locality_chunks = event.params.get("locality-chunks") + crush_locality = event.params.get('crush-locality') try: create_erasure_profile(service='admin', erasure_plugin_name=plugin, @@ -73,11 +75,11 @@ def make_erasure_profile(): failure_domain=failure_domain, device_class=device_class) except CalledProcessError as e: - log(e) - action_fail("Create erasure profile failed with " - "message: {}".format(str(e))) + logger.warning(e) + event.fail("Create erasure profile failed with " + "message: {}".format(str(e))) elif plugin == "shec": - c = action_get("durability-estimator") + c = event.params.get("durability-estimator") try: create_erasure_profile(service='admin', erasure_plugin_name=plugin, @@ -88,12 +90,12 @@ def make_erasure_profile(): failure_domain=failure_domain, device_class=device_class) except CalledProcessError as e: - log(e) - action_fail("Create erasure profile failed with " - "message: {}".format(str(e))) + logger.warning(e) + event.fail("Create erasure profile failed with " + "message: {}".format(str(e))) elif plugin == "clay": - d = action_get("helper-chunks") - scalar_mds = action_get('scalar-mds') + d = event.params.get("helper-chunks") + scalar_mds = event.params.get('scalar-mds') try: create_erasure_profile(service='admin', erasure_plugin_name=plugin, @@ -105,15 +107,11 @@ def make_erasure_profile(): failure_domain=failure_domain, device_class=device_class) except CalledProcessError as e: - log(e) - action_fail("Create erasure profile failed with " - "message: {}".format(str(e))) + logger.warning(e) + event.fail("Create erasure profile failed with " + "message: {}".format(str(e))) else: # Unknown erasure plugin - action_fail("Unknown erasure-plugin type of {}. " - "Only jerasure, isa, lrc, shec or clay is " - "allowed".format(plugin)) - - -if __name__ == '__main__': - make_erasure_profile() + event.fail("Unknown erasure-plugin type of {}. " + "Only jerasure, isa, lrc, shec or clay is " + "allowed".format(plugin)) diff --git a/ceph-mon/unit_tests/test_ceph_actions.py b/ceph-mon/unit_tests/test_ceph_actions.py index 9b7d84fc..514b5dbb 100644 --- a/ceph-mon/unit_tests/test_ceph_actions.py +++ b/ceph-mon/unit_tests/test_ceph_actions.py @@ -129,3 +129,104 @@ def test_create_crush_rule_failed(self, mock_check_call): mock_check_call.assert_called_once_with(expected) event.fail.assert_called_once_with( 'rule creation failed due to exception') + + +class CreateErasureProfileTestCase(test_utils.CharmTestCase): + """Run tests for action.""" + + def setUp(self): + self.harness = Harness(CephMonCharm) + self.addCleanup(self.harness.cleanup) + + @mock.patch('ops_actions.create_erasure_profile.create_erasure_profile') + def test_create_jerasure_profile(self, mock_create_erasure_profile): + self.harness.begin() + self.harness.charm.on_create_erasure_profile_action( + test_utils.MockActionEvent({ + 'name': 'erasure', + 'plugin': 'jerasure', + 'failure-domain': 'disk', + 'k': 6, + 'm': 3, + })) + mock_create_erasure_profile.assert_called_once_with( + service='admin', erasure_plugin_name='jerasure', + profile_name='erasure', data_chunks=None, + coding_chunks=None, failure_domain='disk', device_class=None + ) + + @mock.patch('ops_actions.create_erasure_profile.create_erasure_profile') + def test_create_isa_profile(self, mock_create_erasure_profile): + self.harness.begin() + self.harness.charm.on_create_erasure_profile_action( + test_utils.MockActionEvent({ + 'name': 'erasure', + 'plugin': 'isa', + 'failure-domain': 'disk', + 'k': 6, + 'm': 3, + })) + mock_create_erasure_profile.assert_called_once_with( + service='admin', erasure_plugin_name='isa', + profile_name='erasure', data_chunks=None, + coding_chunks=None, failure_domain='disk', device_class=None + ) + + @mock.patch('ops_actions.create_erasure_profile.create_erasure_profile') + def test_create_lrc_profile(self, mock_create_erasure_profile): + self.harness.begin() + self.harness.charm.on_create_erasure_profile_action( + test_utils.MockActionEvent({ + 'name': 'erasure', + 'plugin': 'lrc', + 'failure-domain': 'disk', + 'k': 6, + 'm': 3, + 'locality-chunks': 2, + 'crush-locality': 'host', + })) + mock_create_erasure_profile.assert_called_once_with( + service='admin', erasure_plugin_name='lrc', + profile_name='erasure', data_chunks=None, + coding_chunks=None, locality=2, crush_locality='host', + failure_domain='disk', device_class=None + ) + + @mock.patch('ops_actions.create_erasure_profile.create_erasure_profile') + def test_create_shec_profile(self, mock_create_erasure_profile): + self.harness.begin() + self.harness.charm.on_create_erasure_profile_action( + test_utils.MockActionEvent({ + 'name': 'erasure', + 'plugin': 'shec', + 'failure-domain': 'disk', + 'k': 6, + 'm': 3, + 'durability-estimator': 2 + })) + mock_create_erasure_profile.assert_called_once_with( + service='admin', erasure_plugin_name='shec', + profile_name='erasure', data_chunks=None, + coding_chunks=None, durability_estimator=2, + failure_domain='disk', device_class=None + ) + + @mock.patch('ops_actions.create_erasure_profile.create_erasure_profile') + def test_create_clay_profile(self, mock_create_erasure_profile): + self.harness.begin() + self.harness.charm.on_create_erasure_profile_action( + test_utils.MockActionEvent({ + 'name': 'erasure', + 'plugin': 'clay', + 'failure-domain': 'disk', + 'k': 6, + 'm': 3, + 'helper-chunks': 2, + 'scalar-mds': 'jerasure' + })) + mock_create_erasure_profile.assert_called_once_with( + service='admin', erasure_plugin_name='clay', + profile_name='erasure', data_chunks=None, + coding_chunks=None, helper_chunks=2, + scalar_mds='jerasure', failure_domain='disk', device_class=None + ) From 210d6dcc21f91776785450973cf72b2ba9008dc5 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 30 Sep 2022 08:05:17 -0400 Subject: [PATCH 2442/2699] Rewrite get_health action with the Operator framework Change-Id: I68645a3d00c0622c7701c8177bcd510c3092afe4 --- ceph-mon/actions/ceph_ops.py | 14 ---------- ceph-mon/actions/get-health | 1 - ceph-mon/src/charm.py | 2 ++ ceph-mon/src/ops_actions/__init__.py | 1 + .../ops_actions}/get_health.py | 17 +++++++----- ceph-mon/unit_tests/test_actions_mon.py | 5 ---- ceph-mon/unit_tests/test_ceph_actions.py | 26 +++++++++++++++++++ 7 files changed, 39 insertions(+), 27 deletions(-) delete mode 120000 ceph-mon/actions/get-health rename ceph-mon/{actions => src/ops_actions}/get_health.py (71%) diff --git a/ceph-mon/actions/ceph_ops.py b/ceph-mon/actions/ceph_ops.py index a71c6869..10cc8ba0 100755 --- a/ceph-mon/actions/ceph_ops.py +++ b/ceph-mon/actions/ceph_ops.py @@ -83,20 +83,6 @@ def get_versions_report(): return json.dumps(report, indent=4) -def get_health(): - """ - Returns the output of 'ceph health'. - - On error, 'unknown' is returned. - """ - try: - value = check_output(['ceph', 'health']).decode('UTF-8') - return value - except CalledProcessError as e: - action_fail(str(e)) - return 'Getting health failed, health unknown' - - def pool_get(): """ Returns a key from a pool using 'ceph osd pool get'. diff --git a/ceph-mon/actions/get-health b/ceph-mon/actions/get-health deleted file mode 120000 index 9c8a8000..00000000 --- a/ceph-mon/actions/get-health +++ /dev/null @@ -1 +0,0 @@ -get_health.py \ No newline at end of file diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index c16cffaa..6a07a3b8 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -156,6 +156,8 @@ def __init__(self, *args): self._observe_action( self.on.create_erasure_profile_action, ops_actions.create_erasure_profile.create_erasure_profile_action) + self._observe_action(self.on.get_health_action, + ops_actions.get_health.get_health_action) fw.observe(self.on.install, self.on_install) fw.observe(self.on.config_changed, self.on_config) diff --git a/ceph-mon/src/ops_actions/__init__.py b/ceph-mon/src/ops_actions/__init__.py index 8711e196..54aaec57 100644 --- a/ceph-mon/src/ops_actions/__init__.py +++ b/ceph-mon/src/ops_actions/__init__.py @@ -17,4 +17,5 @@ copy_pool, create_crush_rule, create_erasure_profile, + get_health, ) diff --git a/ceph-mon/actions/get_health.py b/ceph-mon/src/ops_actions/get_health.py similarity index 71% rename from ceph-mon/actions/get_health.py rename to ceph-mon/src/ops_actions/get_health.py index d1e0da48..b148c954 100755 --- a/ceph-mon/actions/get_health.py +++ b/ceph-mon/src/ops_actions/get_health.py @@ -14,15 +14,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from subprocess import CalledProcessError +from subprocess import check_output, CalledProcessError +import logging -from ceph_ops import get_health -from charmhelpers.core.hookenv import log, action_set, action_fail -if __name__ == '__main__': +logger = logging.getLogger(__name__) + + +def get_health_action(event): try: - action_set({'message': get_health()}) + event.set_results( + {'message': check_output(['ceph', 'health']).decode('UTF-8')}) except CalledProcessError as e: - log(e) - action_fail( + logger.warning(e) + event.fail( "ceph health failed with message: {}".format(str(e))) diff --git a/ceph-mon/unit_tests/test_actions_mon.py b/ceph-mon/unit_tests/test_actions_mon.py index ff54db0f..a09a7b0a 100644 --- a/ceph-mon/unit_tests/test_actions_mon.py +++ b/ceph-mon/unit_tests/test_actions_mon.py @@ -49,11 +49,6 @@ def setUp(self): "action_fail", "open"]) - def test_get_health(self): - actions.get_health() - cmd = ['ceph', 'health'] - self.check_output.assert_called_once_with(cmd) - def test_get_version_report_ok(self): def _call_rslt(): with open('unit_tests/ceph_ls_node.json') as f: diff --git a/ceph-mon/unit_tests/test_ceph_actions.py b/ceph-mon/unit_tests/test_ceph_actions.py index 514b5dbb..21520390 100644 --- a/ceph-mon/unit_tests/test_ceph_actions.py +++ b/ceph-mon/unit_tests/test_ceph_actions.py @@ -230,3 +230,29 @@ def test_create_clay_profile(self, mock_create_erasure_profile): coding_chunks=None, helper_chunks=2, scalar_mds='jerasure', failure_domain='disk', device_class=None ) + + +class GetHealthTestCase(test_utils.CharmTestCase): + """Run tests for action.""" + + def setUp(self): + self.harness = Harness(CephMonCharm) + self.harness.begin() + self.addCleanup(self.harness.cleanup) + + @mock.patch('ops_actions.get_health.check_output') + def test_get_health_action(self, mock_check_output): + mock_check_output.return_value = b'yay' + event = test_utils.MockActionEvent({}) + self.harness.charm.on_get_health_action(event) + event.set_results.assert_called_once_with(({'message': 'yay'})) + + @mock.patch('ops_actions.get_health.check_output') + def test_get_health_action_error(self, mock_check_output): + mock_check_output.side_effect = subprocess.CalledProcessError( + 1, 'test') + event = test_utils.MockActionEvent({}) + self.harness.charm.on_get_health_action(event) + event.fail.assert_called_once_with( + 'ceph health failed with message: ' + "Command 'test' returned non-zero exit status 1.") From e16b046fd97266c259c4e21bb472b565c88cf300 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Wed, 5 Oct 2022 10:31:50 +0200 Subject: [PATCH 2443/2699] Rewrite update status machinery with the ops framework Add a new module ceph_status for checking ceph-mon status. Provide the ceph_shared helpers for querying current status of ceph-mon units. Also add some initial testing for the charm module. Change-Id: I5079023ca692f0a2b7bfda96bb1834b8e9b1f0cc --- ceph-mon/src/ceph_hooks.py | 114 +--------- ceph-mon/src/ceph_metrics.py | 9 +- ceph-mon/src/ceph_shared.py | 88 ++++++++ ceph-mon/src/ceph_status.py | 147 +++++++++++++ ceph-mon/src/charm.py | 27 +-- ceph-mon/unit_tests/test_ceph_metrics.py | 2 +- ceph-mon/unit_tests/test_ceph_shared.py | 55 +++++ ceph-mon/unit_tests/test_ceph_status.py | 120 +++++++++++ ceph-mon/unit_tests/test_charm.py | 62 ++++++ ceph-mon/unit_tests/test_status.py | 251 ----------------------- 10 files changed, 485 insertions(+), 390 deletions(-) create mode 100644 ceph-mon/src/ceph_shared.py create mode 100644 ceph-mon/src/ceph_status.py create mode 100644 ceph-mon/unit_tests/test_ceph_shared.py create mode 100644 ceph-mon/unit_tests/test_ceph_status.py create mode 100644 ceph-mon/unit_tests/test_charm.py delete mode 100644 ceph-mon/unit_tests/test_status.py diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 9b01164d..bc80bd33 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -43,13 +43,12 @@ leader_set, leader_get, is_leader, remote_unit, - Hooks, UnregisteredHookError, + Hooks, service_name, relations_of_type, - relations, status_set, local_unit, - application_version_set) +) from charmhelpers.core.host import ( service_pause, mkdir, @@ -60,14 +59,12 @@ apt_install, filter_installed_packages, add_source, - get_upstream_version, ) from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.openstack.utils import ( clear_unit_paused, clear_unit_upgrading, get_os_codename_install_source, - is_unit_upgrading_set, set_unit_paused, set_unit_upgrading, ) @@ -81,19 +78,15 @@ from charmhelpers.contrib.storage.linux.ceph import ( CephBrokerRq, CephConfContext, - OSD_SETTING_EXCEPTIONS, enable_pg_autoscale, - get_osd_settings, send_osd_settings, ) from utils import ( - add_rbd_mirror_features, assert_charm_supports_ipv6, get_cluster_addr, get_networks, get_public_addr, get_rbd_features, - has_rbd_mirrors, get_ceph_osd_releases, execute_post_osd_upgrade_steps, mgr_disable_module, @@ -1235,94 +1228,6 @@ def is_unsupported_cmr(unit_name): return unsupported -def assess_status(charm=None): - '''Assess status of current unit''' - application_version_set(get_upstream_version(VERSION_PACKAGE)) - if not config('permit-insecure-cmr'): - units = [unit - for rtype in relations() - for relid in relation_ids(reltype=rtype) - for unit in related_units(relid=relid) - if is_cmr_unit(unit)] - if units: - status_set("blocked", "Unsupported CMR relation") - return - if is_unit_upgrading_set(): - status_set("blocked", - "Ready for do-release-upgrade and reboot. " - "Set complete when finished.") - return - - # Check that the no-bootstrap config option is set in conjunction with - # having the bootstrap-source relation established - if not config('no-bootstrap') and is_relation_made('bootstrap-source'): - status_set('blocked', 'Cannot join the bootstrap-source relation when ' - 'no-bootstrap is False') - return - - moncount = int(config('monitor-count')) - units = get_peer_units() - # not enough peers and mon_count > 1 - if len(units.keys()) < moncount: - status_set('blocked', 'Insufficient peer units to bootstrap' - ' cluster (require {})'.format(moncount)) - return - - # mon_count > 1, peers, but no ceph-public-address - ready = sum(1 for unit_ready in units.values() if unit_ready) - if ready < moncount: - status_set('waiting', 'Peer units detected, waiting for addresses') - return - - configured_rbd_features = config('default-rbd-features') - if has_rbd_mirrors() and configured_rbd_features: - if add_rbd_mirror_features( - configured_rbd_features) != configured_rbd_features: - # The configured RBD features bitmap does not contain the features - # required for RBD Mirroring - status_set('blocked', 'Configuration mismatch: RBD Mirroring ' - 'enabled but incorrect value set for ' - '``default-rbd-features``') - return - - try: - get_osd_settings('client') - except OSD_SETTING_EXCEPTIONS as e: - status_set('blocked', str(e)) - return - - if charm is not None and charm.metrics_endpoint.assess_alert_rule_errors(): - return - - # active - bootstrapped + quorum status check - if ceph.is_bootstrapped() and ceph.is_quorum(): - expected_osd_count = config('expected-osd-count') or 3 - if sufficient_osds(expected_osd_count): - status_set('active', 'Unit is ready and clustered') - elif not relation_ids('osd'): - status_set('blocked', 'Missing relation: OSD') - else: - status_set( - 'waiting', - 'Monitor bootstrapped but waiting for number of' - ' OSDs to reach expected-osd-count ({})' - .format(expected_osd_count) - ) - else: - # Unit should be running and clustered, but no quorum - # TODO: should this be blocked or waiting? - status_set('blocked', 'Unit not clustered (no quorum)') - # If there's a pending lock for this unit, - # can i get the lock? - # reboot the ceph-mon process - - -@hooks.hook('update-status') -@harden() -def update_status(): - log('Updating status.') - - @hooks.hook('pre-series-upgrade') def pre_series_upgrade(): log("Running prepare series upgrade hook", "INFO") @@ -1342,18 +1247,3 @@ def post_series_upgrade(): # upgrading states. clear_unit_paused() clear_unit_upgrading() - - -if __name__ == '__main__': - remote_block = False - remote_unit_name = remote_unit() - if remote_unit_name and is_cmr_unit(remote_unit_name): - remote_block = not config('permit-insecure-cmr') - if remote_block: - log("Not running hook, CMR detected and not supported", "ERROR") - else: - try: - hooks.execute(sys.argv) - except UnregisteredHookError as e: - log('Unknown hook {} - skipping.'.format(e)) - assess_status() diff --git a/ceph-mon/src/ceph_metrics.py b/ceph-mon/src/ceph_metrics.py index baffbcf6..472f8fff 100644 --- a/ceph-mon/src/ceph_metrics.py +++ b/ceph-mon/src/ceph_metrics.py @@ -12,7 +12,6 @@ from typing import Optional, Union, List, TYPE_CHECKING import ops.model -from ops.model import BlockedStatus if TYPE_CHECKING: import charm @@ -81,12 +80,8 @@ def _on_relation_departed(self, event): # We're not related to prom, don't care about alert rules self._charm._stored.alert_rule_errors = None - def assess_alert_rule_errors(self): - if self._charm._stored.alert_rule_errors: - self._charm.unit.status = BlockedStatus( - "invalid alert rules, check unit logs" - ) - return True + def have_alert_rule_errors(self): + return bool(self._charm._stored.alert_rule_errors) def _on_alert_rule_status_changed(self, event): logger.debug( diff --git a/ceph-mon/src/ceph_shared.py b/ceph-mon/src/ceph_shared.py new file mode 100644 index 00000000..4b52d372 --- /dev/null +++ b/ceph-mon/src/ceph_shared.py @@ -0,0 +1,88 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Shared operator framework code + +Provide helpers for querying current status of ceph-mon units +""" +import logging +from typing import Mapping, List, Dict, TYPE_CHECKING + +from ops import model, framework + + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + import charm + + +class CephMonInfo(framework.Object): + """Provide status information about ceph-mon. + + Information about + - Relations + - Peer information + - CMR units + """ + + def __init__(self, charm: "charm.CephMonCharm"): + super().__init__(charm, "moninfo") + self.charm = charm + + @property + def relations(self) -> Mapping[str, List[model.Relation]]: + return self.charm.model.relations + + def get_peer_mons(self) -> Dict[model.Unit, model.RelationDataContent]: + """Retrieve information about ceph-mon peer units.""" + return self._get_related_unit_data("mon") + + def get_osd_units(self) -> Dict[model.Unit, model.RelationDataContent]: + """Retrieve information about related osd units.""" + return self._get_related_unit_data("osd") + + def _get_related_unit_data( + self, reltype: str + ) -> Dict[model.Unit, model.RelationDataContent]: + rel_units = [ + unit for rel in self.relations[reltype] for unit in rel.units + ] + rel_data = {} + for rel in self.relations[reltype]: + for unit in rel_units: + rel_data[unit] = rel.data.get(unit, {}) + return rel_data + + def remote_units(self) -> List[model.Unit]: + """Retrieve related CMR units.""" + remotes = [ + unit + for reltype in self.relations.values() + for rel in reltype + for unit in rel.units + if unit.name.startswith("remote-") + ] + return remotes + + def sufficient_osds(self, minimum_osds: int = 3) -> bool: + """ + Determine if the minimum number of OSD's have been + bootstrapped into the cluster. + + :param expected_osds: The minimum number of OSD's required + :return: boolean indicating whether the required number of + OSD's where detected. + """ + osds = self.get_osd_units() + bootstrapped_osds = sum( + int(osd.get("bootstrapped-osds")) + for osd in osds.values() + if osd.get("bootstrapped-osds") + ) + if bootstrapped_osds >= minimum_osds: + return True + return False + + def have_osd_relation(self) -> bool: + return bool(self.relations["osd"]) diff --git a/ceph-mon/src/ceph_status.py b/ceph-mon/src/ceph_status.py new file mode 100644 index 00000000..e854b20f --- /dev/null +++ b/ceph-mon/src/ceph_status.py @@ -0,0 +1,147 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Provide status checking for the ceph-mon charm""" + +import logging +from typing import Union, TYPE_CHECKING + +from charmhelpers.core.hookenv import ( + application_version_set, + is_relation_made, +) +from charmhelpers.fetch import get_upstream_version +from ops import model + +import utils + +if TYPE_CHECKING: + import charm + +from charmhelpers.contrib.storage.linux import ceph as ch_ceph + +import charms_ceph.utils as ceph_utils +import ceph_shared + +logger = logging.getLogger(__name__) + +VERSION_PACKAGE = "ceph-common" + + +class StatusAssessor(ceph_shared.CephMonInfo): + """Status checking for ceph-mon charms + + Takes a ceph-mon charm object as a client, registers checking methods for + the charm object and updates status. + """ + + def __init__(self, charm: "charm.CephMonCharm"): + super().__init__(charm) + self.framework.observe( + self.framework.on.commit, self.assess_status + ) + self.register_checks() + + def config(self, key) -> Union[str, int, float, bool, None]: + return self.charm.model.config.get(key) + + def check_insecure_cmr(self) -> model.StatusBase: + if not self.config("permit-insecure-cmr") and self.remote_units(): + return model.BlockedStatus("Unsupported CMR relation") + return model.ActiveStatus() + + def check_bootstrap_source(self) -> model.StatusBase: + if not self.config("no-bootstrap") and is_relation_made( + "bootstrap-source" + ): + return model.BlockedStatus( + "Cannot join the bootstrap-source relation when " + "no-bootstrap is False", + ) + return model.ActiveStatus() + + def check_moncount(self) -> model.StatusBase: + moncount = self.config("monitor-count") + if ( + len(self.get_peer_mons()) + 1 < moncount + ): # we're including ourselves + return model.BlockedStatus( + "Insufficient peer units to bootstrap" + " cluster (require {})".format(moncount) + ) + return model.ActiveStatus() + + def check_ready_mons(self) -> model.StatusBase: + moncount = self.config("monitor-count") + mons = self.get_peer_mons() + ready = sum( + 1 for mon in mons.values() if mon.get("ceph-public-address") + ) + if ready + 1 < moncount: # "this" mon is ready presumably + return model.WaitingStatus( + "Peer units detected, waiting for addresses" + ) + return model.ActiveStatus() + + def check_rbd_features(self) -> model.StatusBase: + configured_rbd_features = self.config("default-rbd-features") + if utils.has_rbd_mirrors() and configured_rbd_features: + if ( + utils.add_rbd_mirror_features(configured_rbd_features) + != configured_rbd_features + ): + # The configured RBD features bitmap does not contain the + # features required for RBD Mirroring + return model.BlockedStatus( + "Configuration mismatch: RBD Mirroring " + "enabled but incorrect value set for " + "``default-rbd-features``", + ) + return model.ActiveStatus() + + def check_get_osd_settings(self): + try: + ch_ceph.get_osd_settings("client") + except ch_ceph.OSD_SETTING_EXCEPTIONS as e: + return model.BlockedStatus(str(e)) + return model.ActiveStatus() + + def check_alert_rule_errors(self): + if self.charm.metrics_endpoint.have_alert_rule_errors(): + return model.BlockedStatus("invalid alert rules, check unit logs") + return model.ActiveStatus() + + def check_expected_osd_count(self): + if ceph_utils.is_bootstrapped() and ceph_utils.is_quorum(): + expected_osd_count = self.config("expected-osd-count") or 3 + if self.sufficient_osds(expected_osd_count): + return model.ActiveStatus("Unit is ready and clustered") + elif not self.have_osd_relation(): + return model.BlockedStatus("Missing relation: OSD") + else: + return model.WaitingStatus( + "Monitor bootstrapped but waiting for number of" + " OSDs to reach expected-osd-count ({})".format( + expected_osd_count + ) + ) + else: + return model.BlockedStatus("Unit not clustered (no quorum)") + + def register_checks(self): + checkers = [ + self.check_insecure_cmr, + self.check_bootstrap_source, + self.check_moncount, + self.check_ready_mons, + self.check_rbd_features, + self.check_alert_rule_errors, + self.check_expected_osd_count, + ] + for check in checkers: + self.charm.register_status_check(check) + + def assess_status(self, _event): + logger.debug("Running assess_status() for %s", self.charm) + application_version_set(get_upstream_version(VERSION_PACKAGE)) + self.charm.update_status() diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index f3779a99..0491d54e 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -3,6 +3,7 @@ from ops.main import main +import ceph_status import charms.operator_libs_linux.v0.apt as apt import charms.operator_libs_linux.v1.systemd as systemd @@ -40,77 +41,62 @@ def on_install(self, event): systemd.service_pause('ceph-create-keys') except systemd.SystemdError: pass - hooks.assess_status(self) def on_config(self, event): hooks.config_changed() - hooks.assess_status(self) def on_pre_series_upgrade(self, event): hooks.pre_series_upgrade() - hooks.assess_status(self) def on_upgrade(self, event): self.metrics_endpoint.update_alert_rules() hooks.upgrade_charm() - hooks.assess_status(self) def on_post_series_upgrade(self, event): hooks.post_series_upgrade() - hooks.assess_status(self) # Relations. def on_mon_relation_joined(self, event): hooks.mon_relation_joined() - hooks.assess_status(self) def on_bootstrap_source_relation_changed(self, event): hooks.bootstrap_source_relation_changed() - hooks.assess_status(self) def on_prometheus_relation_joined_or_changed(self, event): hooks.prometheus_relation() - hooks.assess_status(self) def on_prometheus_relation_departed(self, event): hooks.prometheus_left() - hooks.assess_status(self) def on_mon_relation(self, event): hooks.mon_relation() - hooks.assess_status(self) def on_osd_relation(self, event): hooks.osd_relation() - hooks.assess_status(self) def on_dashboard_relation_joined(self, event): hooks.dashboard_relation() - hooks.assess_status(self) def on_radosgw_relation(self, event): hooks.radosgw_relation() - hooks.assess_status(self) def on_rbd_mirror_relation(self, event): hooks.rbd_mirror_relation() - hooks.assess_status(self) def on_mds_relation(self, event): hooks.mds_relation_joined() - hooks.assess_status(self) def on_admin_relation(self, event): hooks.admin_relation_joined() - hooks.assess_status(self) def on_client_relation(self, event): hooks.client_relation() - hooks.assess_status(self) def on_nrpe_relation(self, event): hooks.update_nrpe_config() - hooks.assess_status(self) + + def on_commit(self, _event): + self.ceph_status.assess_status() # Actions. @@ -141,12 +127,15 @@ def __init__(self, *args): self._stored.is_started = True if self.is_blocked_insecure_cmr(): - logging.error("Not running hook, CMR detected and not supported") + logging.error( + "Not running hook, CMR detected and not supported") return fw = self.framework self.metrics_endpoint = ceph_metrics.CephMetricsEndpointProvider(self) + self.ceph_status = ceph_status.StatusAssessor(self) + self._observe_action(self.on.change_osd_weight_action, ops_actions.change_osd_weight.change_osd_weight) self._observe_action(self.on.copy_pool_action, diff --git a/ceph-mon/unit_tests/test_ceph_metrics.py b/ceph-mon/unit_tests/test_ceph_metrics.py index 8a5e3eba..0468d28d 100644 --- a/ceph-mon/unit_tests/test_ceph_metrics.py +++ b/ceph-mon/unit_tests/test_ceph_metrics.py @@ -122,7 +122,7 @@ def test_update_alert_rules_invalid(self, _is_bootstrapped): self.harness.add_resource("alert-rules", "not-a-rule") self.harness.charm.metrics_endpoint.update_alert_rules() self.assertTrue( - self.harness.charm.metrics_endpoint.assess_alert_rule_errors() + self.harness.charm.metrics_endpoint.have_alert_rule_errors() ) @patch("ceph_metrics.ceph_utils.is_bootstrapped", return_value=True) diff --git a/ceph-mon/unit_tests/test_ceph_shared.py b/ceph-mon/unit_tests/test_ceph_shared.py new file mode 100644 index 00000000..9279d1b7 --- /dev/null +++ b/ceph-mon/unit_tests/test_ceph_shared.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. + +from unittest.mock import patch +import unittest + +from ops.testing import Harness + +import ceph_shared +import charm + + +@patch("charm.hooks") +class TestCephShared(unittest.TestCase): + def setUp(self): + super().setUp() + self.harness = Harness(charm.CephMonCharm) + self.addCleanup(self.harness.cleanup) + + def test_init(self, _hooks): + self.harness.begin() + ceph_info = ceph_shared.CephMonInfo(self.harness.charm) + self.assertTrue(ceph_info.relations) + + def test_get_peer_mons(self, _hooks): + self.harness.begin() + self.harness.set_leader(True) + ceph_info = ceph_shared.CephMonInfo(self.harness.charm) + self.harness.add_relation_unit( + self.harness.add_relation("mon", "ceph-mon"), "ceph-mon/0" + ) + peer_mons = ceph_info.get_peer_mons() + self.assertEqual(len(peer_mons), 1) + peer = list(peer_mons.keys())[0] + self.assertEqual(peer.name, "ceph-mon/0") + + def test_not_sufficient_osds(self, _hooks): + self.harness.begin() + ceph_info = ceph_shared.CephMonInfo(self.harness.charm) + rel_id = self.harness.add_relation("osd", "ceph-osd") + self.harness.add_relation_unit(rel_id, "ceph-osd/0") + have_enough = ceph_info.sufficient_osds(minimum_osds=77) + self.assertFalse(have_enough) + + def test_sufficient_osds(self, _hooks): + self.harness.begin() + ceph_info = ceph_shared.CephMonInfo(self.harness.charm) + rel_id = self.harness.add_relation("osd", "ceph-osd") + self.harness.add_relation_unit(rel_id, "ceph-osd/0") + self.harness.update_relation_data( + rel_id, "ceph-osd/0", {"bootstrapped-osds": "77"} + ) + have_enough = ceph_info.sufficient_osds(minimum_osds=77) + self.assertTrue(have_enough) diff --git a/ceph-mon/unit_tests/test_ceph_status.py b/ceph-mon/unit_tests/test_ceph_status.py new file mode 100644 index 00000000..f5e7960e --- /dev/null +++ b/ceph-mon/unit_tests/test_ceph_status.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. + +from unittest.mock import patch +import unittest + +from ops import model +from ops.testing import Harness + +import ceph_status +import charm + +from charmhelpers.contrib.storage.linux import ceph as ch_ceph + + +@patch("charm.hooks") +class TestCephStatus(unittest.TestCase): + def setUp(self): + super().setUp() + self.harness = Harness(charm.CephMonCharm) + self.addCleanup(self.harness.cleanup) + + def test_init(self, _hooks): + self.harness.begin() + status = ceph_status.StatusAssessor(self.harness.charm) + self.assertTrue(status.charm.custom_status_checks) + + def test_check_insecure_cmr(self, _hooks): + self.harness.begin() + status = ceph_status.StatusAssessor(self.harness.charm) + result = status.check_insecure_cmr() + self.assertIsInstance(result, model.ActiveStatus) + self.harness.add_relation_unit( + self.harness.add_relation("client", "remote"), "remote-foo/0" + ) + result = status.check_insecure_cmr() + self.assertIsInstance(result, model.BlockedStatus) + + def test_check_moncount(self, _hooks): + self.harness.begin() + status = ceph_status.StatusAssessor(self.harness.charm) + result = status.check_moncount() + self.assertIsInstance(result, model.BlockedStatus) + rel_id = self.harness.add_relation("mon", "ceph-mon") + for n in (0, 1, 2): + self.harness.add_relation_unit(rel_id, "ceph-mon/{}".format(n)) + result = status.check_moncount() + self.assertIsInstance(result, model.ActiveStatus) + + def test_check_ready_mons(self, _hooks): + self.harness.begin() + status = ceph_status.StatusAssessor(self.harness.charm) + result = status.check_ready_mons() + self.assertIsInstance(result, model.WaitingStatus) + rel_id = self.harness.add_relation("mon", "ceph-mon") + for n in (0, 1, 2): + self.harness.add_relation_unit(rel_id, "ceph-mon/{}".format(n)) + self.harness.update_relation_data( + rel_id, "ceph-mon/{}".format(n), {"ceph-public-address": "foo"} + ) + result = status.check_ready_mons() + self.assertIsInstance(result, model.ActiveStatus) + + @patch("ceph_status.ch_ceph.get_osd_settings") + def test_check_get_osd_settings(self, get_osd_settings, _hooks): + self.harness.begin() + status = ceph_status.StatusAssessor(self.harness.charm) + result = status.check_get_osd_settings() + self.assertIsInstance(result, model.ActiveStatus) + get_osd_settings.side_effect = ch_ceph.OSDSettingConflict( + "testexception" + ) + result = status.check_get_osd_settings() + self.assertIsInstance(result, model.BlockedStatus) + + def test_check_alert_rule_errors(self, _hooks): + self.harness.begin() + status = ceph_status.StatusAssessor(self.harness.charm) + with patch.object( + self.harness.charm, + "metrics_endpoint", + create=True, + ) as metrics_endpoint: + metrics_endpoint.have_alert_rule_errors.return_value = True + result = status.check_alert_rule_errors() + self.assertIsInstance(result, model.BlockedStatus) + + metrics_endpoint.have_alert_rule_errors.return_value = False + result = status.check_alert_rule_errors() + self.assertIsInstance(result, model.ActiveStatus) + + @patch("ceph_status.ceph_utils") + def test_check_expected_osd_count(self, ceph_utils, _hooks): + self.harness.begin() + status = ceph_status.StatusAssessor(self.harness.charm) + + # not bootstrapped + ceph_utils.is_bootstrapped.return_value = False + ceph_utils.is_quorum.return_value = False + result = status.check_expected_osd_count() + self.assertIsInstance(result, model.BlockedStatus) + self.assertEqual(result.message, "Unit not clustered (no quorum)") + + # bootstrapped, no osd rel + ceph_utils.is_bootstrapped.return_value = True + ceph_utils.is_quorum.return_value = True + result = status.check_expected_osd_count() + self.assertIsInstance(result, model.BlockedStatus) + self.assertEqual(result.message, "Missing relation: OSD") + + # bootstrapped, enough osds + rel_id = self.harness.add_relation("osd", "ceph-osd") + for n in (0, 1, 2): + self.harness.add_relation_unit(rel_id, "ceph-osd/{}".format(n)) + self.harness.update_relation_data( + rel_id, "ceph-osd/{}".format(n), {"bootstrapped-osds": "1"} + ) + result = status.check_expected_osd_count() + self.assertIsInstance(result, model.ActiveStatus) diff --git a/ceph-mon/unit_tests/test_charm.py b/ceph-mon/unit_tests/test_charm.py new file mode 100644 index 00000000..dc004fcb --- /dev/null +++ b/ceph-mon/unit_tests/test_charm.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 + +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. + +from unittest.mock import patch +import unittest + +from ops.testing import Harness + +import ceph_metrics # noqa: avoid circ. import +import charm + + +class TestCephCharm(unittest.TestCase): + def setUp(self): + super().setUp() + self.harness = Harness(charm.CephMonCharm) + self.harness.begin() + self.addCleanup(self.harness.cleanup) + + def test_init(self): + self.assertTrue(self.harness.charm.framework) + self.assertTrue(self.harness.charm.metrics_endpoint) + self.assertTrue(self.harness.charm.ceph_status) + + @patch("charm.hooks") + def test_on_config_changed(self, hooks): + self.harness.update_config({"permit-insecure-cmr": None}) + hooks.config_changed.assert_called() + + @patch("charm.ops_openstack.core.apt_install") + @patch("charm.ops_openstack.core.apt_update") + @patch("charm.ops_openstack.core.add_source") + @patch("charm.ops_openstack.core.OSBaseCharm.update_status") + @patch("charm.hooks") + @patch("charm.systemd") + @patch("charm.apt") + def test_on_install( + self, + _apt, + _systemd, + _hooks, + _update_status, + _add_source, + apt_update, + apt_install, + ): + self.harness.update_config({"permit-insecure-cmr": None}) + self.harness.charm.on.install.emit() + apt_install.assert_called_with( + [ + "ceph", + "gdisk", + "radosgw", + "lvm2", + "parted", + "smartmontools", + ], + fatal=True, + ) + apt_update.assert_called() diff --git a/ceph-mon/unit_tests/test_status.py b/ceph-mon/unit_tests/test_status.py deleted file mode 100644 index fffe17ff..00000000 --- a/ceph-mon/unit_tests/test_status.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest.mock as mock -import sys - -import test_utils - -import charmhelpers.contrib.storage.linux.ceph as ch_ceph - -# python-apt is not installed as part of test-requirements but is imported by -# some charmhelpers modules so create a fake import. -mock_apt = mock.MagicMock() -sys.modules['apt'] = mock_apt -mock_apt.apt_pkg = mock.MagicMock() - -with mock.patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: - mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: - lambda *args, **kwargs: f(*args, **kwargs)) - import ceph_hooks as hooks - -TO_PATCH = [ - 'status_set', - 'config', - 'ceph', - 'is_relation_made', - 'relations', - 'relation_ids', - 'relation_get', - 'related_units', - 'local_unit', - 'application_version_set', - 'get_upstream_version', -] - -NO_PEERS = { - 'ceph-mon1': True -} - -ENOUGH_PEERS_INCOMPLETE = { - 'ceph-mon1': True, - 'ceph-mon2': False, - 'ceph-mon3': False, -} - -ENOUGH_PEERS_COMPLETE = { - 'ceph-mon1': True, - 'ceph-mon2': True, - 'ceph-mon3': True, -} - - -class ServiceStatusTestCase(test_utils.CharmTestCase): - def setUp(self): - super(ServiceStatusTestCase, self).setUp(hooks, TO_PATCH) - self.config.side_effect = self.test_config.get - self.test_config.set('monitor-count', 3) - self.local_unit.return_value = 'ceph-mon1' - self.get_upstream_version.return_value = '10.2.2' - self.is_relation_made.return_value = False - - @mock.patch.object(hooks, 'get_peer_units') - def test_assess_status_no_peers(self, _peer_units): - _peer_units.return_value = NO_PEERS - hooks.assess_status() - self.status_set.assert_called_with('blocked', mock.ANY) - self.application_version_set.assert_called_with('10.2.2') - - @mock.patch.object(hooks, 'get_peer_units') - def test_assess_status_peers_incomplete(self, _peer_units): - _peer_units.return_value = ENOUGH_PEERS_INCOMPLETE - hooks.assess_status() - self.status_set.assert_called_with('waiting', mock.ANY) - self.application_version_set.assert_called_with('10.2.2') - - @mock.patch.object(hooks, 'get_osd_settings') - @mock.patch.object(hooks, 'has_rbd_mirrors') - @mock.patch.object(hooks, 'sufficient_osds') - @mock.patch.object(hooks, 'get_peer_units') - def test_assess_status_peers_complete_active(self, _peer_units, - _sufficient_osds, - _has_rbd_mirrors, - _get_osd_settings): - _peer_units.return_value = ENOUGH_PEERS_COMPLETE - _sufficient_osds.return_value = True - self.ceph.is_bootstrapped.return_value = True - self.ceph.is_quorum.return_value = True - _has_rbd_mirrors.return_value = False - _get_osd_settings.return_value = {} - hooks.assess_status() - self.status_set.assert_called_with('active', mock.ANY) - self.application_version_set.assert_called_with('10.2.2') - - @mock.patch.object(hooks, 'relation_ids') - @mock.patch.object(hooks, 'get_osd_settings') - @mock.patch.object(hooks, 'has_rbd_mirrors') - @mock.patch.object(hooks, 'sufficient_osds') - @mock.patch.object(hooks, 'get_peer_units') - def test_assess_status_no_osd_relation( - self, - _peer_units, - _sufficient_osds, - _has_rbd_mirrors, - _get_osd_settings, - _relation_ids - ): - _peer_units.return_value = ENOUGH_PEERS_COMPLETE - _sufficient_osds.return_value = False - _relation_ids.return_value = [] - self.ceph.is_bootstrapped.return_value = True - self.ceph.is_quorum.return_value = True - _has_rbd_mirrors.return_value = False - _get_osd_settings.return_value = {} - hooks.assess_status() - self.status_set.assert_called_with('blocked', 'Missing relation: OSD') - self.application_version_set.assert_called_with('10.2.2') - - @mock.patch.object(hooks, 'relation_ids') - @mock.patch.object(hooks, 'get_osd_settings') - @mock.patch.object(hooks, 'has_rbd_mirrors') - @mock.patch.object(hooks, 'sufficient_osds') - @mock.patch.object(hooks, 'get_peer_units') - def test_assess_status_osd_relation_but_insufficient_osds( - self, - _peer_units, - _sufficient_osds, - _has_rbd_mirrors, - _get_osd_settings, - _relation_ids - ): - _peer_units.return_value = ENOUGH_PEERS_COMPLETE - _sufficient_osds.return_value = False - _relation_ids.return_value = ['osd:1'] - self.ceph.is_bootstrapped.return_value = True - self.ceph.is_quorum.return_value = True - _has_rbd_mirrors.return_value = False - _get_osd_settings.return_value = {} - hooks.assess_status() - self.status_set.assert_called_with('waiting', mock.ANY) - self.application_version_set.assert_called_with('10.2.2') - - @mock.patch.object(hooks, 'get_osd_settings') - @mock.patch.object(hooks, 'has_rbd_mirrors') - @mock.patch.object(hooks, 'sufficient_osds') - @mock.patch.object(hooks, 'get_peer_units') - def test_assess_status_invalid_osd_settings(self, _peer_units, - _sufficient_osds, - _has_rbd_mirrors, - _get_osd_settings): - _peer_units.return_value = ENOUGH_PEERS_COMPLETE - _sufficient_osds.return_value = True - self.ceph.is_bootstrapped.return_value = True - self.ceph.is_quorum.return_value = True - _has_rbd_mirrors.return_value = False - _get_osd_settings.side_effect = ch_ceph.OSDSettingConflict( - 'conflict in setting foo') - hooks.assess_status() - self.status_set.assert_called_with('blocked', mock.ANY) - - @mock.patch.object(hooks, 'get_osd_settings') - @mock.patch.object(hooks, 'has_rbd_mirrors') - @mock.patch.object(hooks, 'sufficient_osds') - @mock.patch.object(hooks, 'get_peer_units') - def test_assess_status_peers_complete_down(self, _peer_units, - _sufficient_osds, - _has_rbd_mirrors, - _get_osd_settings): - _peer_units.return_value = ENOUGH_PEERS_COMPLETE - _sufficient_osds.return_value = True - self.ceph.is_bootstrapped.return_value = False - self.ceph.is_quorum.return_value = False - _has_rbd_mirrors.return_value = False - _get_osd_settings.return_value = {} - hooks.assess_status() - self.status_set.assert_called_with('blocked', mock.ANY) - self.application_version_set.assert_called_with('10.2.2') - - @mock.patch.object(hooks, 'has_rbd_mirrors') - @mock.patch.object(hooks, 'sufficient_osds') - @mock.patch.object(hooks, 'get_peer_units') - def test_assess_status_rbd_feature_mismatch(self, _peer_units, - _sufficient_osds, - _has_rbd_mirrors): - _peer_units.return_value = ENOUGH_PEERS_COMPLETE - _sufficient_osds.return_value = True - self.ceph.is_bootstrapped.return_value = True - self.ceph.is_quorum.return_value = True - _has_rbd_mirrors.return_value = True - self.test_config.set('default-rbd-features', 61) - hooks.assess_status() - self.status_set.assert_called_once_with('blocked', mock.ANY) - - def test_get_peer_units_no_peers(self): - self.relation_ids.return_value = ['mon:1'] - self.related_units.return_value = [] - self.assertEqual({'ceph-mon1': True}, - hooks.get_peer_units()) - - def test_get_peer_units_peers_incomplete(self): - self.relation_ids.return_value = ['mon:1'] - self.related_units.return_value = ['ceph-mon2', - 'ceph-mon3'] - self.relation_get.return_value = None - self.assertEqual({'ceph-mon1': True, - 'ceph-mon2': False, - 'ceph-mon3': False}, - hooks.get_peer_units()) - - def test_get_peer_units_peers_complete(self): - self.relation_ids.return_value = ['mon:1'] - self.related_units.return_value = ['ceph-mon2', - 'ceph-mon3'] - self.relation_get.side_effect = ['ceph-mon2', - 'ceph-mon3'] - self.assertEqual({'ceph-mon1': True, - 'ceph-mon2': True, - 'ceph-mon3': True}, - hooks.get_peer_units()) - - def test_no_bootstrap_not_set(self): - self.is_relation_made.return_value = True - hooks.assess_status() - self.status_set.assert_called_with('blocked', mock.ANY) - self.application_version_set.assert_called_with('10.2.2') - - def test_cmr_remote_unit(self): - self.test_config.set('permit-insecure-cmr', False) - self.relations.return_value = ['client'] - self.relation_ids.return_value = ['client:1'] - self.related_units.return_value = ['remote-1'] - hooks.assess_status() - self.status_set.assert_called_with( - 'blocked', - 'Unsupported CMR relation') - self.status_set.reset_mock() - self.test_config.set('permit-insecure-cmr', True) - hooks.assess_status() - self.assertFalse( - mock.call('blocked', 'Unsupported CMR relation') in - self.status_set.call_args_list) From aad8e62b8bd92f0b6cdda420b174bac01b0b0656 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 27 Sep 2022 14:14:46 -0400 Subject: [PATCH 2444/2699] Add operator-native ceph-client library Change-Id: Id9caf3b385094b9bc4010893034185d0a47c45d4 --- ceph-mon/src/ceph_client.py | 208 ++++++++++++++++ ceph-mon/src/ceph_hooks.py | 130 +++------- ceph-mon/src/charm.py | 82 ++++++- ceph-mon/unit_tests/manage_test_relations.py | 53 +++++ .../unit_tests/test_ceph_client_interface.py | 158 ++++++++++++ ceph-mon/unit_tests/test_ceph_hooks.py | 225 +----------------- ceph-mon/unit_tests/test_charm.py | 5 +- 7 files changed, 527 insertions(+), 334 deletions(-) create mode 100644 ceph-mon/src/ceph_client.py create mode 100644 ceph-mon/unit_tests/manage_test_relations.py create mode 100644 ceph-mon/unit_tests/test_ceph_client_interface.py diff --git a/ceph-mon/src/ceph_client.py b/ceph-mon/src/ceph_client.py new file mode 100644 index 00000000..b9881380 --- /dev/null +++ b/ceph-mon/src/ceph_client.py @@ -0,0 +1,208 @@ +"""Ceph client library +""" + +import json +import logging + +from ops.framework import Object +from ops.framework import StoredState + +from charmhelpers.contrib.storage.linux.ceph import ( + send_osd_settings, +) +import charms_ceph.utils as ceph + + +from utils import ( + get_public_addr, + get_rbd_features, +) + + +logger = logging.getLogger(__name__) + + +class CephClientProvides(Object): + """ + Encapsulate the Provides side of the Ceph Client relation. + + Hook events observed: + - relation-joined + - relation-changed + """ + + charm = None + _stored = StoredState() + + def __init__(self, charm, relation_name='client'): + super().__init__(charm, relation_name) + + self._stored.set_default(processed=[]) + self.charm = charm + self.this_unit = self.model.unit + self.relation_name = relation_name + self.framework.observe( + charm.on[self.relation_name].relation_joined, + self._on_relation_changed + ) + self.framework.observe( + charm.on[self.relation_name].relation_changed, + self._on_relation_changed + ) + + def notify_all(self): + send_osd_settings() + if not self.charm.ready_for_service(): + return + for relation in self.model.relations[self.relation_name]: + for unit in relation.units: + self._handle_client_relation(relation, unit) + + def _on_relation_changed(self, event): + """Prepare relation for data from requiring side.""" + send_osd_settings() + if not self.charm.ready_for_service(): + return + self._handle_client_relation(event.relation, event.unit) + + def _get_ceph_info_from_configs(self): + """Create dictionary of ceph information required to set client relation. + + :returns: Dictionary of ceph configurations needed for client relation + :rtype: dict + """ + public_addr = get_public_addr() + rbd_features = get_rbd_features() + data = { + 'auth': 'cephx', + 'ceph-public-address': public_addr + } + if rbd_features: + data['rbd-features'] = rbd_features + return data + + def _get_custom_relation_init_data(self): + """Information required for specialised relation. + + :returns: Ceph configurations needed for specialised relation + :rtype: dict + """ + return {} + + def _get_client_application_name(self, relation, unit): + """Retrieve client application name from relation data.""" + return relation.data[unit].get( + 'application-name', + relation.app) + + def _handle_client_relation(self, relation, unit): + """Handle broker request and set the relation data + + :param relation: Operator relation + :type relation: Relation + :param unit: Unit to handle + :type unit: Unit + """ + + # if is_unsupported_cmr(unit): + # return + + logger.debug( + 'mon cluster in quorum and osds bootstrapped ' + '- providing client with keys, processing broker requests') + + service_name = self._get_client_application_name(relation, unit) + data = self._get_ceph_info_from_configs() + data.update(self._get_custom_relation_init_data()) + data.update({'key': ceph.get_named_key(service_name)}) + + data.update( + self._handle_broker_request( + relation, unit, add_legacy_response=True)) + for k, v in data.items(): + relation.data[self.this_unit][k] = str(v) + + def _req_already_treated(self, request_id): + """Check if broker request already handled. + + The local relation data holds all the broker request/responses that + are handled as a dictionary. There will be a single entry for each + unit that makes broker request in the form of broker-rsp-: + {reqeust-id: , ..}. Verify if request_id exists in the relation + data broker response for the requested unit. + + :param request_id: Request ID + :type request_id: str + :returns: Whether request is already handled + :rtype: bool + """ + return request_id in self._stored.processed + + def _handle_broker_request( + self, relation, unit, add_legacy_response=False): + """Retrieve broker request from relation, process, return response data. + + :param event: Operator event for the relation + :type relid: Event + :param add_legacy_response: (Optional) Adds the legacy ``broker_rsp`` + key to the response in addition to the + new way. + :type add_legacy_response: bool + :returns: Dictionary of response data ready for use with relation_set. + :rtype: dict + """ + def _get_broker_req_id(request): + try: + if isinstance(request, str): + try: + req_key = json.loads(request)['request-id'] + except (TypeError, json.decoder.JSONDecodeError): + logger.warning( + 'Not able to decode request ' + 'id for broker request {}'. + format(request)) + req_key = None + else: + req_key = request['request-id'] + except KeyError: + logger.warning( + 'Not able to decode request id for broker request {}'. + format(request)) + req_key = None + + return req_key + + response = {} + + settings = relation.data[unit] + if 'broker_req' in settings: + broker_req_id = _get_broker_req_id(settings['broker_req']) + if broker_req_id is None: + return {} + + if not ceph.is_leader(): + logger.debug( + "Not leader - ignoring broker request {}".format( + broker_req_id)) + return {} + + if self._req_already_treated(broker_req_id): + logger.debug( + "Ignoring already executed broker request {}".format( + broker_req_id)) + return {} + + rsp = self.charm.process_broker_request( + broker_req_id, settings['broker_req']) + unit_id = settings.get( + 'unit-name', unit.name).replace('/', '-') + unit_response_key = 'broker-rsp-' + unit_id + response.update({unit_response_key: rsp}) + if add_legacy_response: + response.update({'broker_rsp': rsp}) + processed = self._stored.processed + processed.append(broker_req_id) + self._stored.processed = processed + else: + logger.warn('broker_req not in settings: {}'.format(settings)) + return response diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 6499567b..38d3ccfc 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -252,6 +252,12 @@ def update_host_osd_count_report(reset=False): @hooks.hook('config-changed') @harden() def config_changed(): + ''' + Handle config-changed + + :returns: Whether or not relations should be notified after completion. + :rtype: bool + ''' # Get the cfg object so we can see if the no-bootstrap value has changed # and triggered this hook invocation cfg = config() @@ -341,8 +347,7 @@ def config_changed(): try_disable_insecure_reclaim() for relid in relation_ids('dashboard'): dashboard_relation(relid) - # Update client relations - notify_client() + return True def get_mon_hosts(): @@ -393,6 +398,10 @@ def bootstrap_source_relation_changed(): the ceph-mon charm. This relation is used to exchange the remote ceph-public-addresses which are used for the mon's, the fsid, and the monitor-secret. + + :returns: Whether or not relations should be notified after completion. + :rtype: bool + '' """ if not config('no-bootstrap'): status_set('blocked', 'Cannot join the bootstrap-source relation when ' @@ -445,7 +454,7 @@ def bootstrap_source_relation_changed(): # The leader unit needs to bootstrap itself as it won't receive the # leader-settings-changed hook elsewhere. if curr_fsid: - mon_relation() + return mon_relation() @hooks.hook('prometheus-relation-joined', @@ -488,6 +497,12 @@ def prometheus_left(): 'leader-settings-changed', 'bootstrap-source-relation-departed') def mon_relation(): + ''' + Handle the mon relation + + :returns: Whether or not relations should be notified after completion. + :rtype: bool + ''' if leader_get('monitor-secret') is None: log('still waiting for leader to setup keys') status_set('waiting', 'Waiting for leader to setup keys') @@ -503,9 +518,11 @@ def mon_relation(): # the unit handling the broker request will update a nonce on the # mon relation. notify_relations() + return True else: if attempt_mon_cluster_bootstrap(): notify_relations() + return True else: log('Not enough mons ({}), punting.' .format(len(get_mon_hosts()))) @@ -578,7 +595,6 @@ def attempt_mon_cluster_bootstrap(): def notify_relations(): notify_osds() notify_radosgws() - notify_client() notify_rbd_mirrors() notify_prometheus() @@ -613,79 +629,6 @@ def notify_rbd_mirrors(): rbd_mirror_relation(relid=relid, unit=unit, recurse=False) -def _get_ceph_info_from_configs(): - """Create dictionary of ceph information required to set client relation. - - :returns: Dictionary of ceph configurations needed for client relation - :rtpe: dict - """ - public_addr = get_public_addr() - rbd_features = get_rbd_features() - data = { - 'auth': 'cephx', - 'ceph-public-address': public_addr - } - if rbd_features: - data['rbd-features'] = rbd_features - return data - - -def _handle_client_relation(relid, unit, data=None): - """Handle broker request and set the relation data - - :param relid: Relation ID - :type relid: str - :param unit: Unit name - :type unit: str - :param data: Initial relation data - :type data: dict - """ - if data is None: - data = {} - - if is_unsupported_cmr(unit): - return - data.update( - handle_broker_request(relid, unit, add_legacy_response=True)) - relation_set(relation_id=relid, relation_settings=data) - - -def notify_client(): - send_osd_settings() - if not ready_for_service(): - log("mon cluster is not in quorum, skipping notify_client", - level=WARNING) - return - - for relid in relation_ids('client'): - data = _get_ceph_info_from_configs() - - service_name = None - # Loop through all related units until client application name is found - # This is done in seperate loop to avoid calling ceph to retreive named - # key for every unit - for unit in related_units(relid): - service_name = get_client_application_name(relid, unit) - if service_name: - data.update({'key': ceph.get_named_key(service_name)}) - break - - if not service_name: - log('Unable to determine remote service name, deferring processing' - ' of broker requests for relation {} '.format(relid)) - # continue with next relid - continue - - for unit in related_units(relid): - _handle_client_relation(relid, unit, data) - - for relid in relation_ids('admin'): - admin_relation_joined(relid) - for relid in relation_ids('mds'): - for unit in related_units(relid): - mds_relation_joined(relid=relid, unit=unit) - - def req_already_treated(request_id, relid, req_unit): """Check if broker request already handled. @@ -911,7 +854,6 @@ def osd_relation(relid=None, unit=None): # NOTE: radosgw key provision is gated on presence of OSD # units so ensure that any deferred hooks are processed notify_radosgws() - notify_client() notify_rbd_mirrors() send_osd_settings() @@ -1036,6 +978,16 @@ def radosgw_relation(relid=None, unit=None): @hooks.hook('rbd-mirror-relation-joined') @hooks.hook('rbd-mirror-relation-changed') def rbd_mirror_relation(relid=None, unit=None, recurse=True): + ''' + Handle the rbd mirror relation + + :param recurse: Whether we should call out to update relation functions or + not. Mainly used to handle recursion when called from + notify_rbd_mirrors() + :type recurse: bool + :returns: Whether or not relations should be notified after completion. + :rtype: bool + ''' if ready_for_service(): log('mon cluster in quorum and osds bootstrapped ' '- providing rbd-mirror client with keys') @@ -1071,7 +1023,7 @@ def rbd_mirror_relation(relid=None, unit=None, recurse=True): # make sure clients are updated with the appropriate RBD features # bitmap. if recurse: - notify_client() + return True @hooks.hook('mds-relation-changed') @@ -1117,26 +1069,6 @@ def admin_relation_joined(relid=None): relation_settings=data) -@hooks.hook('client-relation-changed') -@hooks.hook('client-relation-joined') -def client_relation(relid=None, unit=None): - send_osd_settings() - if ready_for_service(): - log('mon cluster in quorum and osds bootstrapped ' - '- providing client with keys, processing broker requests') - if not unit: - unit = remote_unit() - service_name = get_client_application_name(relid, unit) - if not service_name: - log('Unable to determine remote service name, deferring ' - 'processing of broker requests for relation {} ' - 'remote unit {}'.format(relid, unit)) - return - data = _get_ceph_info_from_configs() - data.update({'key': ceph.get_named_key(service_name)}) - _handle_client_relation(relid, unit, data) - - @hooks.hook('upgrade-charm.real') @harden() def upgrade_charm(): diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index 0491d54e..341854d9 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -7,15 +7,35 @@ import charms.operator_libs_linux.v0.apt as apt import charms.operator_libs_linux.v1.systemd as systemd +from ops.charm import CharmEvents +from ops.framework import EventBase, EventSource + import ops_openstack.core import charms_ceph.utils as ceph - +from charms_ceph.broker import ( + process_requests +) import ceph_hooks as hooks +import ceph_client import ceph_metrics import ops_actions +logger = logging.getLogger(__name__) + + +class NotifyClientEvent(EventBase): + def __init__(self, handle): + super().__init__(handle) + + +class CephCharmEvents(CharmEvents): + """Custom charm events.""" + + notify_clients = EventSource(NotifyClientEvent) + + class CephMonCharm(ops_openstack.core.OSBaseCharm): release = 'quincy' @@ -25,6 +45,8 @@ class CephMonCharm(ops_openstack.core.OSBaseCharm): 'radosgw', 'lvm2', 'parted', 'smartmontools', ] + on = CephCharmEvents() + # General charm control callbacks. # TODO: Figure out how to do hardening in an operator-framework @@ -43,7 +65,8 @@ def on_install(self, event): pass def on_config(self, event): - hooks.config_changed() + if hooks.config_changed(): + self.on.notify_clients.emit() def on_pre_series_upgrade(self, event): hooks.pre_series_upgrade() @@ -51,6 +74,7 @@ def on_pre_series_upgrade(self, event): def on_upgrade(self, event): self.metrics_endpoint.update_alert_rules() hooks.upgrade_charm() + self.on.notify_clients.emit() def on_post_series_upgrade(self, event): hooks.post_series_upgrade() @@ -60,7 +84,8 @@ def on_mon_relation_joined(self, event): hooks.mon_relation_joined() def on_bootstrap_source_relation_changed(self, event): - hooks.bootstrap_source_relation_changed() + if hooks.bootstrap_source_relation_changed(): + self.on.notify_clients.emit() def on_prometheus_relation_joined_or_changed(self, event): hooks.prometheus_relation() @@ -69,10 +94,12 @@ def on_prometheus_relation_departed(self, event): hooks.prometheus_left() def on_mon_relation(self, event): - hooks.mon_relation() + if hooks.mon_relation(): + self.on.notify_clients.emit() def on_osd_relation(self, event): hooks.osd_relation() + self.on.notify_clients.emit() def on_dashboard_relation_joined(self, event): hooks.dashboard_relation() @@ -81,7 +108,8 @@ def on_radosgw_relation(self, event): hooks.radosgw_relation() def on_rbd_mirror_relation(self, event): - hooks.rbd_mirror_relation() + if hooks.rbd_mirror_relation(): + self.on.notify_clients.emit() def on_mds_relation(self, event): hooks.mds_relation_joined() @@ -89,9 +117,6 @@ def on_mds_relation(self, event): def on_admin_relation(self, event): hooks.admin_relation_joined() - def on_client_relation(self, event): - hooks.client_relation() - def on_nrpe_relation(self, event): hooks.update_nrpe_config() @@ -122,6 +147,16 @@ def is_blocked_insecure_cmr(self): remote_block = not self.config['permit-insecure-cmr'] return remote_block + def notify_clients(self, _event): + self.clients.notify_all() + for relation in self.model.relations['admin']: + hooks.admin_relation_joined(str(relation.id)) + + for relation in self.model.relations['mds']: + for unit in relation.units: + hooks.mds_relation_joined( + relid=str(relation.id), unit=unit.name) + def __init__(self, *args): super().__init__(*args) self._stored.is_started = True @@ -133,6 +168,7 @@ def __init__(self, *args): fw = self.framework + self.clients = ceph_client.CephClientProvides(self) self.metrics_endpoint = ceph_metrics.CephMetricsEndpointProvider(self) self.ceph_status = ceph_status.StatusAssessor(self) @@ -190,16 +226,36 @@ def __init__(self, *args): fw.observe(self.on.admin_relation_joined, self.on_admin_relation) - fw.observe(self.on.client_relation_changed, - self.on_client_relation) - fw.observe(self.on.client_relation_joined, - self.on_client_relation) - fw.observe(self.on.nrpe_external_master_relation_joined, self.on_nrpe_relation) fw.observe(self.on.nrpe_external_master_relation_changed, self.on_nrpe_relation) + fw.observe(self.on.notify_clients, self.notify_clients) + + def ready_for_service(self): + return hooks.ready_for_service() + + def process_broker_request(self, broker_req_id, requests, recurse=True): + broker_result = process_requests(requests) + if hooks.relation_ids('rbd-mirror'): + # NOTE(fnordahl): juju relation level data candidate + # notify mons to flag that the other mon units should update + # their ``rbd-mirror`` relations with information about new + # pools. + logger.debug('Notifying peers after processing broker' + 'request {}.'.format(broker_req_id)) + hooks.notify_mons() + # notify_rbd_mirrors is the only case where this is False + if recurse: + # update ``rbd-mirror`` relations for this unit with + # information about new pools. + logger.debug( + 'Notifying this units rbd-mirror relations after ' + 'processing broker request {}.'.format(broker_req_id)) + hooks.notify_rbd_mirrors() + return broker_result + if __name__ == '__main__': main(CephMonCharm) diff --git a/ceph-mon/unit_tests/manage_test_relations.py b/ceph-mon/unit_tests/manage_test_relations.py new file mode 100644 index 00000000..c9ca72a8 --- /dev/null +++ b/ceph-mon/unit_tests/manage_test_relations.py @@ -0,0 +1,53 @@ +# Copyright 2022 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest.mock as mock +from ops.testing import Harness +with mock.patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + # src.charm imports ceph_hooks, so we need to workaround the inclusion + # of the 'harden' decorator. + from src.charm import CephMonCharm + + +relation_id = int + + +def add_ceph_client_relation(harness: Harness[CephMonCharm]) -> relation_id: + rel_id = harness.add_relation( + 'client', + 'glance') + harness.add_relation_unit( + rel_id, + 'glance/0') + harness.update_relation_data( + rel_id, + 'glance/0', + {'ingress-address': '10.0.0.3'}) + return rel_id + + +def add_ceph_mds_relation(harness: Harness[CephMonCharm]) -> relation_id: + rel_id = harness.add_relation( + 'mds', + 'ceph-fs') + harness.add_relation_unit( + rel_id, + 'ceph-fs/0') + harness.update_relation_data( + rel_id, + 'ceph-fs/0', + {'ingress-address': '10.0.0.3'}) + return rel_id diff --git a/ceph-mon/unit_tests/test_ceph_client_interface.py b/ceph-mon/unit_tests/test_ceph_client_interface.py new file mode 100644 index 00000000..d517dbaa --- /dev/null +++ b/ceph-mon/unit_tests/test_ceph_client_interface.py @@ -0,0 +1,158 @@ +# Copyright 2022 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for reweight_osd action.""" + +# import json +import unittest.mock as mock +from test_utils import CharmTestCase +from ops.testing import Harness +from manage_test_relations import ( + add_ceph_client_relation, + add_ceph_mds_relation, +) + +with mock.patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + # src.charm imports ceph_hooks, so we need to workaround the inclusion + # of the 'harden' decorator. + from src.charm import CephMonCharm + + +class CephClientTestCase(CharmTestCase): + """Run tests for action.""" + + def setUp(self): + self.harness = Harness(CephMonCharm) + self.addCleanup(self.harness.cleanup) + + @mock.patch("src.charm.ceph_client.ceph.get_named_key") + @mock.patch("src.charm.ceph_client.get_rbd_features") + @mock.patch("src.charm.ceph_client.get_public_addr") + @mock.patch.object(CephMonCharm, "ready_for_service") + @mock.patch("src.charm.ceph_client.send_osd_settings") + def test_client_relation( + self, _send_osd_settings, mock_ready_for_service, + mock_get_public_addr, mock_get_rbd_features, mock_get_named_key): + mock_get_public_addr.return_value = '127.0.0.1' + mock_ready_for_service.return_value = True + mock_get_rbd_features.return_value = 42 + mock_get_named_key.return_value = 'test key' + self.harness.begin() + self.harness.set_leader() + rel_id = add_ceph_client_relation(self.harness) + unit_rel_data = self.harness.get_relation_data( + rel_id, + 'ceph-mon/0') + self.assertEqual( + unit_rel_data, + { + 'auth': 'cephx', + 'ceph-public-address': '127.0.0.1', + 'key': 'test key', + 'rbd-features': '42', + }) + + @mock.patch("src.charm.ceph_client.ceph.is_leader") + @mock.patch.object(CephMonCharm, "process_broker_request") + @mock.patch("src.charm.ceph_client.ceph.get_named_key") + @mock.patch("src.charm.ceph_client.get_rbd_features") + @mock.patch("src.charm.ceph_client.get_public_addr") + @mock.patch.object(CephMonCharm, "ready_for_service") + @mock.patch("src.charm.ceph_client.send_osd_settings") + def test_client_relation_broker( + self, _send_osd_settings, mock_ready_for_service, + mock_get_public_addr, mock_get_rbd_features, mock_get_named_key, + mock_process_broker_request, mock_is_leader): + mock_get_public_addr.return_value = '127.0.0.1' + mock_ready_for_service.return_value = True + mock_get_rbd_features.return_value = 42 + mock_get_named_key.return_value = 'test key' + mock_process_broker_request.return_value = 'AOK' + mock_is_leader.return_value = True + self.harness.begin() + self.harness.set_leader() + rel_id = add_ceph_client_relation(self.harness) + self.harness.update_relation_data( + rel_id, + 'glance/0', + {'broker_req': '{"request-id": "req"}'}) + mock_process_broker_request.assert_called_once_with( + 'req', '{"request-id": "req"}' + ) + unit_rel_data = self.harness.get_relation_data( + rel_id, + 'ceph-mon/0') + self.assertEqual( + unit_rel_data, + { + 'auth': 'cephx', + 'ceph-public-address': '127.0.0.1', + 'key': 'test key', + 'rbd-features': '42', + 'broker-rsp-glance-0': 'AOK', + 'broker_rsp': 'AOK' + }) + mock_process_broker_request.reset_mock() + self.harness.update_relation_data( + rel_id, + 'glance/0', + {'broker_req': '{"request-id": "req"}'}) + mock_process_broker_request.assert_not_called() + + @mock.patch("src.charm.hooks.mds_relation_joined") + @mock.patch("src.charm.ceph_client.ceph.get_named_key") + @mock.patch("src.charm.ceph_client.get_rbd_features") + @mock.patch("src.charm.ceph_client.get_public_addr") + @mock.patch.object(CephMonCharm, "ready_for_service") + @mock.patch("src.charm.ceph_client.send_osd_settings") + def test_notify_clients( + self, _send_osd_settings, mock_ready_for_service, + mock_get_public_addr, mock_get_rbd_features, mock_get_named_key, + mock_mds_relation_joined): + mock_get_public_addr.return_value = '127.0.0.1' + mock_ready_for_service.return_value = True + mock_get_rbd_features.return_value = None + mock_get_named_key.return_value = 'test key' + self.harness.begin() + self.harness.set_leader() + rel_id = add_ceph_client_relation(self.harness) + add_ceph_mds_relation(self.harness) + + unit_rel_data = self.harness.get_relation_data( + rel_id, + 'ceph-mon/0') + self.assertEqual( + unit_rel_data, + { + 'auth': 'cephx', + 'ceph-public-address': '127.0.0.1', + 'key': 'test key', + }) + mock_get_rbd_features.return_value = 42 + self.harness.charm.on.notify_clients.emit() + unit_rel_data = self.harness.get_relation_data( + rel_id, + 'ceph-mon/0') + self.assertEqual( + unit_rel_data, + { + 'auth': 'cephx', + 'ceph-public-address': '127.0.0.1', + 'key': 'test key', + 'rbd-features': '42', + }) + + mock_mds_relation_joined.assert_called_with( + relid='1', unit='ceph-fs/0') diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index a2c653ce..f5a07a56 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -214,12 +214,10 @@ def test_nrpe_dependency_installed(self, mock_config): @patch.object(ceph_hooks, 'service_pause') @patch.object(ceph_hooks, 'notify_radosgws') @patch.object(ceph_hooks, 'ceph') - @patch.object(ceph_hooks, 'notify_client') @patch.object(ceph_hooks, 'config') def test_upgrade_charm_with_nrpe_relation_installs_dependencies( self, mock_config, - mock_notify_client, mock_ceph, mock_notify_radosgws, mock_service_pause, @@ -242,88 +240,11 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies( ceph_hooks.upgrade_charm() mocks["apt_install"].assert_called_with( ["python-dbus", "lockfile-progs"]) - mock_notify_client.assert_called_once_with() mock_notify_radosgws.assert_called_once_with() mock_ceph.update_monfs.assert_called_once_with() mock_notify_prometheus.assert_called_once_with() mock_service_pause.assert_called_with('ceph-create-keys') - @patch.object(ceph_hooks, 'relation_get') - @patch.object(ceph_hooks, 'mds_relation_joined') - @patch.object(ceph_hooks, 'admin_relation_joined') - @patch.object(ceph_hooks, 'relation_set') - @patch.object(ceph_hooks, 'handle_broker_request') - @patch.object(ceph_hooks, 'config') - @patch.object(ceph_hooks, 'related_units') - @patch.object(ceph_hooks.ceph, 'get_named_key') - @patch.object(ceph_hooks.hookenv, 'remote_service_name') - @patch.object(ceph_hooks, 'relation_ids') - @patch.object(ceph_hooks.ceph, 'is_leader') - @patch.object(ceph_hooks, 'get_rbd_features') - @patch.object(ceph_hooks, 'get_public_addr') - @patch.object(ceph_hooks, 'ready_for_service') - @patch.object(ceph_hooks, 'send_osd_settings') - def test_notify_client(self, - _send_osd_settings, - _ready_for_service, - _get_public_addr, - _get_rbd_features, - _is_leader, - _relation_ids, - _remote_service_name, - _get_named_key, - _related_units, - _config, - _handle_broker_request, - _relation_set, - _admin_relation_joined, - _mds_relation_joined, - _relation_get): - _relation_ids.return_value = ['arelid'] - _related_units.return_value = ['aunit/0'] - _relation_get.return_value = {'application-name': 'aunit'} - _remote_service_name.return_value = 'aunit' - _is_leader.return_value = True - config = copy.deepcopy(CHARM_CONFIG) - _config.side_effect = lambda key: config[key] - _handle_broker_request.return_value = {} - _get_rbd_features.return_value = None - - ceph_hooks.notify_client() - _send_osd_settings.assert_called_once_with() - _ready_for_service.assert_called_once_with() - _get_public_addr.assert_called_once_with() - _get_named_key.assert_called_once_with('aunit') - _handle_broker_request.assert_called_once_with( - 'arelid', 'aunit/0', add_legacy_response=True) - _relation_set.assert_called_once_with( - relation_id='arelid', - relation_settings={ - 'key': _get_named_key(), - 'auth': 'cephx', - 'ceph-public-address': _get_public_addr() - }) - - _relation_ids.assert_has_calls([ - call('admin'), - call('mds'), - ]) - _admin_relation_joined.assert_called_once_with('arelid') - _mds_relation_joined.assert_called_once_with(relid='arelid', - unit='aunit/0') - - _get_rbd_features.return_value = 42 - _relation_set.reset_mock() - ceph_hooks.notify_client() - _relation_set.assert_called_once_with( - relation_id='arelid', - relation_settings={ - 'key': _get_named_key(), - 'auth': 'cephx', - 'ceph-public-address': _get_public_addr(), - 'rbd-features': 42, - }) - @patch.object(ceph_hooks, 'rbd_mirror_relation') @patch.object(ceph_hooks, 'related_units') @patch.object(ceph_hooks, 'relation_ids') @@ -389,7 +310,6 @@ def test_get_client_application_name(self, remote_unit, relation_get, ceph_hooks.get_client_application_name('rel:1', None), 'glance') - @patch.object(ceph_hooks, 'notify_client') @patch.object(ceph_hooks.ceph, 'list_pools') @patch.object(ceph_hooks, 'mgr_enable_module') @patch.object(ceph_hooks, 'emit_cephconf') @@ -406,15 +326,13 @@ def test_config_changed_no_autotune(self, create_sysctl, emit_ceph_conf, mgr_enable_module, - list_pools, - notify_client): + list_pools): relations_of_type.return_value = False self.test_config.set('pg-autotune', 'false') self.test_config.set('balancer-mode', '') ceph_hooks.config_changed() mgr_enable_module.assert_not_called() - @patch.object(ceph_hooks, 'notify_client') @patch.object(ceph_hooks.ceph, 'monitor_key_set') @patch.object(ceph_hooks.ceph, 'list_pools') @patch.object(ceph_hooks, 'mgr_enable_module') @@ -435,8 +353,7 @@ def test_config_changed_with_autotune(self, emit_ceph_conf, mgr_enable_module, list_pools, - monitor_key_set, - notify_client): + monitor_key_set): relations_of_type.return_value = False cmp_pkgrevno.return_value = 1 self.test_config.set('pg-autotune', 'true') @@ -445,7 +362,6 @@ def test_config_changed_with_autotune(self, mgr_enable_module.assert_called_once_with('pg_autoscaler') monitor_key_set.assert_called_once_with('admin', 'autotune', 'true') - @patch.object(ceph_hooks, 'notify_client') @patch.object(ceph_hooks.ceph, 'list_pools') @patch.object(ceph_hooks, 'mgr_enable_module') @patch.object(ceph_hooks, 'emit_cephconf') @@ -464,8 +380,7 @@ def test_config_changed_with_default_autotune(self, create_sysctl, emit_ceph_conf, mgr_enable_module, - list_pools, - notify_client): + list_pools): relations_of_type.return_value = False cmp_pkgrevno.return_value = 1 self.test_config.set('pg-autotune', 'auto') @@ -593,130 +508,6 @@ def test_related_osd_multi_relation(self, call('osd:23') ]) - @patch.object(ceph_hooks, 'send_osd_settings') - @patch.object(ceph_hooks, 'get_rbd_features') - @patch.object(ceph_hooks, 'relation_set') - @patch.object(ceph_hooks, 'handle_broker_request') - @patch.object(ceph_hooks, 'config') - @patch.object(ceph_hooks.ceph, 'get_named_key') - @patch.object(ceph_hooks, 'get_public_addr') - @patch.object(ceph_hooks, 'get_client_application_name') - @patch.object(ceph_hooks, 'ready_for_service') - def test_client_relation(self, - _ready_for_service, - _get_client_application_name, - _get_public_addr, - _get_named_key, - _config, - _handle_broker_request, - _relation_set, - _get_rbd_features, - _send_osd_settings): - _get_client_application_name.return_value = 'glance' - config = copy.deepcopy(CHARM_CONFIG) - _config.side_effect = lambda key: config[key] - _handle_broker_request.return_value = {} - _get_rbd_features.return_value = None - ceph_hooks.client_relation(relid='rel1', unit='glance/0') - _ready_for_service.assert_called_once_with() - _send_osd_settings.assert_called_once_with() - _get_public_addr.assert_called_once_with() - _get_named_key.assert_called_once_with('glance') - _handle_broker_request.assert_called_once_with( - 'rel1', 'glance/0', add_legacy_response=True) - _relation_set.assert_called_once_with( - relation_id='rel1', - relation_settings={ - 'key': _get_named_key(), - 'auth': 'cephx', - 'ceph-public-address': _get_public_addr() - }) - _get_rbd_features.return_value = 42 - _relation_set.reset_mock() - ceph_hooks.client_relation(relid='rel1', unit='glance/0') - _relation_set.assert_called_once_with( - relation_id='rel1', - relation_settings={ - 'key': _get_named_key(), - 'auth': 'cephx', - 'ceph-public-address': _get_public_addr(), - 'rbd-features': 42, - }) - - @patch.object(ceph_hooks, 'req_already_treated') - @patch.object(ceph_hooks, 'send_osd_settings') - @patch.object(ceph_hooks, 'get_rbd_features') - @patch.object(ceph_hooks, 'config') - @patch.object(ceph_hooks.ceph, 'get_named_key') - @patch.object(ceph_hooks, 'get_public_addr') - @patch.object(ceph_hooks.hookenv, 'remote_service_name') - @patch.object(ceph_hooks, 'relation_ids', return_value=[]) - @patch.object(ceph_hooks, 'ready_for_service') - @patch.object(ceph_hooks.ceph, 'is_quorum') - @patch.object(ceph_hooks, 'remote_unit') - @patch.object(ceph_hooks, 'relation_get') - @patch.object(ceph_hooks.ceph, 'is_leader') - @patch.object(ceph_hooks, 'process_requests') - @patch.object(ceph_hooks, 'relation_set') - def test_client_relation_non_rel_hook(self, relation_set, - process_requests, - is_leader, - relation_get, - remote_unit, - is_quorum, - ready_for_service, - relation_ids, - remote_service_name, - get_public_addr, - get_named_key, - _config, - _get_rbd_features, - _send_osd_settings, - req_already_treated): - # Check for LP #1738154 - ready_for_service.return_value = True - process_requests.return_value = 'AOK' - is_leader.return_value = True - relation_get.return_value = {'broker_req': '{"request-id": "req"}'} - remote_unit.return_value = None - is_quorum.return_value = True - config = copy.deepcopy(CHARM_CONFIG) - _config.side_effect = lambda key: config[key] - _get_rbd_features.return_value = None - req_already_treated.return_value = False - ceph_hooks.client_relation(relid='rel1', unit='glance/0') - _send_osd_settings.assert_called_once_with() - relation_set.assert_called_once_with( - relation_id='rel1', - relation_settings={ - 'key': get_named_key(), - 'auth': 'cephx', - 'ceph-public-address': get_public_addr(), - 'broker-rsp-glance-0': 'AOK', - 'broker_rsp': 'AOK'}) - relation_set.reset_mock() - remote_unit.return_value = 'glance/0' - ceph_hooks.client_relation() - relation_set.assert_called_once_with( - relation_id=None, - relation_settings={ - 'key': get_named_key(), - 'auth': 'cephx', - 'ceph-public-address': get_public_addr(), - 'broker-rsp-glance-0': 'AOK', - 'broker_rsp': 'AOK'}) - - # Verify relation_set when broker request is already treated - relation_set.reset_mock() - req_already_treated.return_value = True - ceph_hooks.client_relation(relid='rel1', unit='glance/0') - relation_set.assert_called_once_with( - relation_id='rel1', - relation_settings={ - 'key': get_named_key(), - 'auth': 'cephx', - 'ceph-public-address': get_public_addr()}) - @patch.object(ceph_hooks, 'req_already_treated') @patch.object(ceph_hooks, 'relation_ids') @patch.object(ceph_hooks, 'notify_mons') @@ -880,7 +671,6 @@ def test_bootstrap_source_different_fsid_secret(self): self.assertRaises(AssertionError, ceph_hooks.bootstrap_source_relation_changed) - @patch.object(ceph_hooks, 'notify_client') @patch.object(ceph_hooks.ceph, 'is_bootstrapped') @patch.object(ceph_hooks, 'emit_cephconf') @patch.object(ceph_hooks, 'leader_get') @@ -897,8 +687,7 @@ def test_config_changed(self, _is_leader, _leader_get, _emit_cephconf, - _is_bootstrapped, - _notify_client): + _is_bootstrapped): config = copy.deepcopy(CHARM_CONFIG) _config.side_effect = \ lambda key=None: config.get(key, None) if key else config @@ -915,7 +704,6 @@ def test_config_changed(self, ]) _emit_cephconf.assert_called_once_with() _is_bootstrapped.assert_called_once_with() - _notify_client.assert_called_once_with() @patch.object(ceph_hooks, 'emit_cephconf') @patch.object(ceph_hooks, 'create_sysctl') @@ -1086,9 +874,7 @@ def setUp(self): self.ceph.list_pools_detail.return_value = {'pool': {}} @patch.object(ceph_hooks, 'retrieve_client_broker_requests') - @patch.object(ceph_hooks, 'notify_client') def test_rbd_mirror_relation(self, - _notify_client, _retrieve_client_broker_requests): self.handle_broker_request.return_value = {} base_relation_settings = { @@ -1112,8 +898,6 @@ def test_rbd_mirror_relation(self, relation_settings=base_relation_settings) self.test_relation.set( {'unique_id': None}) - _notify_client.assert_called_once_with() - _notify_client.reset_mock() ceph_hooks.rbd_mirror_relation('rbd-mirror:52', 'ceph-rbd-mirror/0', recurse=False) self.relation_set.assert_called_with( @@ -1121,7 +905,6 @@ def test_rbd_mirror_relation(self, relation_settings=base_relation_settings) self.test_relation.set( {'unique_id': json.dumps('otherSideIsReactiveEndpoint')}) - self.assertFalse(_notify_client.called) ceph_hooks.rbd_mirror_relation('rbd-mirror:53', 'ceph-rbd-mirror/0') self.ceph.get_rbd_mirror_key.assert_called_once_with( 'rbd-mirror.otherSideIsReactiveEndpoint') diff --git a/ceph-mon/unit_tests/test_charm.py b/ceph-mon/unit_tests/test_charm.py index dc004fcb..ee313778 100644 --- a/ceph-mon/unit_tests/test_charm.py +++ b/ceph-mon/unit_tests/test_charm.py @@ -24,11 +24,13 @@ def test_init(self): self.assertTrue(self.harness.charm.metrics_endpoint) self.assertTrue(self.harness.charm.ceph_status) + @patch.object(charm.ceph_client.CephClientProvides, 'notify_all') @patch("charm.hooks") - def test_on_config_changed(self, hooks): + def test_on_config_changed(self, hooks, _notify_all): self.harness.update_config({"permit-insecure-cmr": None}) hooks.config_changed.assert_called() + @patch.object(charm.ceph_client.CephClientProvides, 'notify_all') @patch("charm.ops_openstack.core.apt_install") @patch("charm.ops_openstack.core.apt_update") @patch("charm.ops_openstack.core.add_source") @@ -45,6 +47,7 @@ def test_on_install( _add_source, apt_update, apt_install, + _notify_all ): self.harness.update_config({"permit-insecure-cmr": None}) self.harness.charm.on.install.emit() From 481952b6c3de1fc5be30908043e30830d431f184 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 7 Oct 2022 08:13:26 -0400 Subject: [PATCH 2445/2699] Add ceph-fs to test bundles to ensure relation works Change-Id: Ifc5e382e44f3dfcddfda3c526e07e9bb5892fbc3 --- ceph-mon/tests/bundles/focal-xena.yaml | 13 +++++++++++++ ceph-mon/tests/bundles/focal-yoga.yaml | 13 +++++++++++++ ceph-mon/tests/bundles/jammy-yoga.yaml | 13 +++++++++++++ ceph-mon/tests/bundles/local-focal-yoga.yaml | 13 +++++++++++++ 4 files changed, 52 insertions(+) diff --git a/ceph-mon/tests/bundles/focal-xena.yaml b/ceph-mon/tests/bundles/focal-xena.yaml index 592d7eaf..d9a6550c 100644 --- a/ceph-mon/tests/bundles/focal-xena.yaml +++ b/ceph-mon/tests/bundles/focal-xena.yaml @@ -26,6 +26,7 @@ machines: '14': '15': '16': + '17': applications: @@ -83,6 +84,15 @@ applications: - '7' - '8' + ceph-fs: + charm: ch:ceph-fs + num_units: 1 + options: + source: *openstack-origin + channel: quincy/edge + to: + - '17' + rabbitmq-server: charm: ch:rabbitmq-server num_units: 1 @@ -218,6 +228,9 @@ relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + - - 'nova-cloud-controller:shared-db' - 'nova-cloud-controller-mysql-router:shared-db' - - 'nova-cloud-controller-mysql-router:db-router' diff --git a/ceph-mon/tests/bundles/focal-yoga.yaml b/ceph-mon/tests/bundles/focal-yoga.yaml index 3e75ee5b..bb475bc1 100644 --- a/ceph-mon/tests/bundles/focal-yoga.yaml +++ b/ceph-mon/tests/bundles/focal-yoga.yaml @@ -26,6 +26,7 @@ machines: '14': '15': '16': + '17': applications: @@ -83,6 +84,15 @@ applications: - '7' - '8' + ceph-fs: + charm: ch:ceph-fs + num_units: 1 + options: + source: *openstack-origin + channel: quincy/edge + to: + - '17' + rabbitmq-server: charm: ch:rabbitmq-server num_units: 1 @@ -218,6 +228,9 @@ relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + - - 'nova-cloud-controller:shared-db' - 'nova-cloud-controller-mysql-router:shared-db' - - 'nova-cloud-controller-mysql-router:db-router' diff --git a/ceph-mon/tests/bundles/jammy-yoga.yaml b/ceph-mon/tests/bundles/jammy-yoga.yaml index 28932094..24818960 100644 --- a/ceph-mon/tests/bundles/jammy-yoga.yaml +++ b/ceph-mon/tests/bundles/jammy-yoga.yaml @@ -27,6 +27,7 @@ machines: '15': '16': series: focal + '17': applications: @@ -84,6 +85,15 @@ applications: - '7' - '8' + ceph-fs: + charm: ch:ceph-fs + num_units: 1 + options: + source: *openstack-origin + channel: quincy/edge + to: + - '17' + rabbitmq-server: charm: ch:rabbitmq-server num_units: 1 @@ -220,6 +230,9 @@ relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + - - 'nova-cloud-controller:shared-db' - 'nova-cloud-controller-mysql-router:shared-db' - - 'nova-cloud-controller-mysql-router:db-router' diff --git a/ceph-mon/tests/bundles/local-focal-yoga.yaml b/ceph-mon/tests/bundles/local-focal-yoga.yaml index 823cebdc..98c46c8b 100644 --- a/ceph-mon/tests/bundles/local-focal-yoga.yaml +++ b/ceph-mon/tests/bundles/local-focal-yoga.yaml @@ -26,6 +26,7 @@ machines: '14': '15': '16': + '17': applications: @@ -82,6 +83,15 @@ applications: - '7' - '8' + ceph-fs: + charm: ch:ceph-fs + num_units: 1 + options: + source: *openstack-origin + channel: quincy/edge + to: + - '17' + rabbitmq-server: charm: ch:rabbitmq-server num_units: 1 @@ -217,6 +227,9 @@ relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + - - 'nova-cloud-controller:shared-db' - 'nova-cloud-controller-mysql-router:shared-db' - - 'nova-cloud-controller-mysql-router:db-router' From 73d0abd239cd425c61bd7ac41e32d76949f25bb9 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 12 Oct 2022 09:14:00 -0400 Subject: [PATCH 2446/2699] python 3.8 should still be tested Change-Id: I60fae36a08cd717c7db1d622af45cb9cd53d6a4c --- ceph-proxy/.zuul.yaml | 2 ++ ceph-proxy/osci.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/ceph-proxy/.zuul.yaml b/ceph-proxy/.zuul.yaml index 75fc2a78..168494f5 100644 --- a/ceph-proxy/.zuul.yaml +++ b/ceph-proxy/.zuul.yaml @@ -1,3 +1,5 @@ - project: templates: - openstack-python3-charm-zed-jobs + - openstack-python3-charm-yoga-jobs + diff --git a/ceph-proxy/osci.yaml b/ceph-proxy/osci.yaml index 81056ab1..0f563793 100644 --- a/ceph-proxy/osci.yaml +++ b/ceph-proxy/osci.yaml @@ -1,5 +1,7 @@ - project: templates: + - charm-unit-jobs-py38 + - charm-unit-jobs-py39 - charm-unit-jobs-py310 - charm-zed-functional-jobs check: From 379e55248b65550c7f687ddd3ea3f8b4e1c3e61e Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Mon, 5 Sep 2022 17:13:55 -0300 Subject: [PATCH 2447/2699] Enable users to start/stop Crimson OSD's This patchset modifies the add-disk action so that it now can optionally start a Crimson OSD daemon. Change-Id: I59bf4e41f1f56c6bda2352b5613289ff73113342 Depends-On: If58bde4d5445ed5de420abc007db6bf8b8e43269 --- ceph-osd/actions.yaml | 10 ++++ ceph-osd/actions/add_disk.py | 61 +++++++++++++++++++- ceph-osd/files/systemd/crimson-osd@.service | 9 +++ ceph-osd/tox.ini | 2 +- ceph-osd/unit_tests/test_actions_add_disk.py | 19 ++++++ 5 files changed, 98 insertions(+), 3 deletions(-) create mode 100644 ceph-osd/files/systemd/crimson-osd@.service diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 56a22c6c..7d620907 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -59,6 +59,16 @@ add-disk: The size of the partitions to create for the caching devices. If left unspecified, then the full size of the devices will be split evenly across partitions. + use-crimson: + type: boolean + description: | + Whether to use the Crimson implementation for the new OSD. Note that + this is an experimental feature, and the charm doesn't provide any + lifecycle support for OSD's that run on Crimson. + i-really-mean-it: + type: boolean + description: | + Must be set when 'use-crimson' is True. required: - osd-devices blacklist-add-disk: diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index 57d49fcf..e87acff2 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -14,8 +14,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import os import psutil +import shutil +import subprocess import sys sys.path.append('lib') @@ -24,6 +27,7 @@ import charmhelpers.contrib.storage.linux.ceph as ch_ceph import charmhelpers.core.hookenv as hookenv from charmhelpers.core.hookenv import function_fail +from charmhelpers.fetch import apt_install from charmhelpers.core.unitdata import kv from utils import (PartitionIter, device_size, DeviceError) @@ -32,8 +36,51 @@ import charms_ceph.utils +CRIMSON_PACKAGES = ['crimson-osd', 'libc-ares2', 'libcrypto++-dev', + 'libyaml-cpp-dev'] +CRIMSON_SYSTEMD_FILE = '/lib/systemd/system/crimson-osd@.service' + + +def get_osd_from_device(device): + """Given a device, return the OSD ID that it maps to.""" + output = subprocess.check_output(['ceph-volume', 'lvm', 'list', + '--format=json']) + devmap = json.loads(output.decode('utf8')) + for osd_id, data in devmap.items(): + for elem in data: + if device in elem.get('devices', []): + return osd_id + + +def start_crimson_osd(osd_id, device): + """An OSD was started with the classic daemon, but Crimson was + requested. As such, stop the current one and launch the correct daemon.""" + + if osd_id is None: + osd_id = get_osd_from_device(device) + + charms_ceph.utils.stop_osd(osd_id) + charms_ceph.utils.disable_osd(osd_id) + unit_name = ( + '/run/systemd/system/ceph-osd.target.wants/ceph-osd@{}.service' + .format(osd_id)) + + if os.path.exists(unit_name): + os.remove(unit_name) + + if not os.path.exists(CRIMSON_SYSTEMD_FILE): + apt_install(CRIMSON_PACKAGES, fatal=True) + shutil.copy('files/systemd/crimson-osd@.service', CRIMSON_SYSTEMD_FILE) + subprocess.check_call(['systemctl', 'daemon-reload']) + + subprocess.check_call(['systemctl', 'enable', + 'crimson-osd@{}'.format(osd_id)]) + subprocess.check_call(['systemctl', 'start', + 'crimson-osd@{}'.format(osd_id)]) + + def add_device(request, device_path, bucket=None, - osd_id=None, part_iter=None): + osd_id=None, part_iter=None, use_crimson=False): """Add a new device to be used by the OSD unit. :param request: A broker request to notify monitors of changes. @@ -71,6 +118,10 @@ def add_device(request, device_path, bucket=None, hookenv.config('bluestore'), hookenv.config('osd-encrypt-keymanager'), osd_id) + + if use_crimson: + start_crimson_osd(osd_id, effective_dev) + # Make it fast! if hookenv.config('autotune'): charms_ceph.utils.tune_dev(device_path) @@ -152,6 +203,11 @@ def validate_partition_size(psize, devices, caches): if __name__ == "__main__": + crimson = hookenv.action_get('use-crimson') + if crimson and not hookenv.action_get('i-really-mean-it'): + function_fail('Need to pass i-really-mean-it for Crimson OSDs') + sys.exit(1) + request = ch_ceph.CephBrokerRq() devices = get_devices('osd-devices') caches = get_devices('cache-devices') or cache_storage() @@ -184,7 +240,8 @@ def validate_partition_size(psize, devices, caches): request = add_device(request=request, device_path=dev, bucket=hookenv.action_get("bucket"), - osd_id=osd_id, part_iter=part_iter) + osd_id=osd_id, part_iter=part_iter, + use_crimson=crimson) except Exception: errors.append(dev) diff --git a/ceph-osd/files/systemd/crimson-osd@.service b/ceph-osd/files/systemd/crimson-osd@.service new file mode 100644 index 00000000..3982fc97 --- /dev/null +++ b/ceph-osd/files/systemd/crimson-osd@.service @@ -0,0 +1,9 @@ +[Unit] +Description=Ceph object storage daemon crimson-osd.%i + +[Service] +Environment=CLUSTER=ceph +ExecStart=/usr/bin/crimson-osd -i %i +ExecStop=/usr/bin/kill -QUIT $MAINPID +User=ceph +Group=ceph diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 1b8f281b..fe9af0a2 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -79,7 +79,7 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 deps = flake8==3.9.2 - charm-tools==2.8.3 + charm-tools==2.8.4 commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof diff --git a/ceph-osd/unit_tests/test_actions_add_disk.py b/ceph-osd/unit_tests/test_actions_add_disk.py index 1d06394f..de037129 100644 --- a/ceph-osd/unit_tests/test_actions_add_disk.py +++ b/ceph-osd/unit_tests/test_actions_add_disk.py @@ -103,3 +103,22 @@ def test_validate_osd_id(self): self.assertTrue(add_disk.validate_osd_id(elem)) for elem in ('osd.-1', '-3', '???', -100, 3.4, {}): self.assertFalse(add_disk.validate_osd_id(elem)) + + @mock.patch.object(add_disk.charms_ceph.utils, 'disable_osd') + @mock.patch.object(add_disk.charms_ceph.utils, 'stop_osd') + @mock.patch.object(add_disk.subprocess, 'check_output') + @mock.patch.object(add_disk.subprocess, 'check_call') + @mock.patch.object(add_disk, 'apt_install') + @mock.patch.object(add_disk.shutil, 'copy') + @mock.patch.object(add_disk.os.path, 'exists') + def test_crimson_osd(self, os_path_exists, shcopy, apt_install, + check_call, check_output, stop_osd, disable_osd): + os_path_exists.return_value = False + check_output.return_value = b'{"1": [{"devices": ["/dev/vdc"]}]}' + self.assertIsNone(add_disk.get_osd_from_device("/dev/vda")) + + add_disk.start_crimson_osd(None, '/dev/vdc') + stop_osd.assert_called_with("1") + check_call.assert_any_call(['systemctl', 'start', 'crimson-osd@1']) + shcopy.assert_called() + apt_install.assert_called() From c2a5dc4f3724869d02d4475ff6a8fc5e1adbb400 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 19 Oct 2022 11:38:45 -0400 Subject: [PATCH 2448/2699] fix .gitreview repo reference Change-Id: I235eeef8ffc7f102e319eeeb4c12181e34f5a069 --- ceph-dashboard/.gitreview | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-dashboard/.gitreview b/ceph-dashboard/.gitreview index 1d6df895..4b2e1139 100644 --- a/ceph-dashboard/.gitreview +++ b/ceph-dashboard/.gitreview @@ -1,4 +1,4 @@ [gerrit] host=review.opendev.org port=29418 -project=openstack/charm-ceph-dashboard +project=openstack/charm-ceph-dashboard.git From 1e05e1cc8e0e4d616d2d05762fc68a5ad6b36710 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 19 Oct 2022 11:43:20 -0400 Subject: [PATCH 2449/2699] fix .gitreview repo reference Change-Id: I825e3838b6ea488b2af848c58726e0c3580d767a --- ceph-iscsi/.gitreview | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-iscsi/.gitreview b/ceph-iscsi/.gitreview index bcc8f8ba..577c572c 100644 --- a/ceph-iscsi/.gitreview +++ b/ceph-iscsi/.gitreview @@ -1,4 +1,4 @@ [gerrit] host=review.opendev.org port=29418 -project=openstack/charm-ceph-iscsi +project=openstack/charm-ceph-iscsi.git From 8c6bbab9051d2343f6c2c4f1897ec471f052cde3 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Wed, 19 Oct 2022 17:38:29 +0200 Subject: [PATCH 2450/2699] Pin operator library The operator 1.5.3 testing harness doesn't work with this charms tests; pinning operator until we have a proper fix. Closes-Bug: #1993092 Change-Id: I95c02baa4869db4ef64a5c09a8b442b5fc07f4da --- ceph-dashboard/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-dashboard/requirements.txt b/ceph-dashboard/requirements.txt index 991628aa..2b5ec510 100644 --- a/ceph-dashboard/requirements.txt +++ b/ceph-dashboard/requirements.txt @@ -1,5 +1,5 @@ importlib-resources -ops >= 1.2.0 +ops >= 1.2.0, <= 1.5.2 tenacity git+https://github.com/openstack/charms.ceph#egg=charms_ceph git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack From 89cb38348cc8722b92c1b4305953a4ecde24f87b Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 19 Oct 2022 11:48:52 -0400 Subject: [PATCH 2451/2699] fix .gitreview repo reference Change-Id: Ie8b504a779eef131f72ac8ecb559f35ddd5e70e3 --- ceph-proxy/.gitreview | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/.gitreview b/ceph-proxy/.gitreview index 14a8e185..134c66ec 100644 --- a/ceph-proxy/.gitreview +++ b/ceph-proxy/.gitreview @@ -1,4 +1,4 @@ [gerrit] host=review.opendev.org port=29418 -project=openstack/charm-ceph-proxy +project=openstack/charm-ceph-proxy.git From f5773277d699b7e00c5c571783e1077c2e5e536b Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 27 Sep 2022 14:14:46 -0400 Subject: [PATCH 2452/2699] Adds operator-native mds provides library Change-Id: Id9783ca8f7091d9f6fb9419642d08383685bffb3 --- ceph-mon/src/ceph_mds.py | 66 +++++++++++++++ ceph-mon/src/charm.py | 17 +--- .../unit_tests/test_ceph_client_interface.py | 12 ++- ceph-mon/unit_tests/test_ceph_mds_relation.py | 81 +++++++++++++++++++ 4 files changed, 156 insertions(+), 20 deletions(-) create mode 100644 ceph-mon/src/ceph_mds.py create mode 100644 ceph-mon/unit_tests/test_ceph_mds_relation.py diff --git a/ceph-mon/src/ceph_mds.py b/ceph-mon/src/ceph_mds.py new file mode 100644 index 00000000..30d66f89 --- /dev/null +++ b/ceph-mon/src/ceph_mds.py @@ -0,0 +1,66 @@ +"""Ceph mds library +""" + +import logging +from typing import Dict + +from charmhelpers.core.hookenv import leader_get +from ops import model + +import charms_ceph.utils as ceph + + +logger = logging.getLogger(__name__) + +import ceph_client + + +class CephMdsProvides(ceph_client.CephClientProvides): + """Encapsulate the provides side of the Ceph MDS relation. + + Observes the mds-relation-joined hook event + """ + + charm = None + _mds_name = None + + def __init__(self, charm): + super().__init__(charm, "mds") + self.charm = charm + + def _get_mds_name(self, relation: model.Relation, unit: model.Unit) -> str: + """Retrieve mds-name from relation data.""" + unit_data = relation.data[unit] + return unit_data.get("mds-name", relation.app.name) + + def _get_custom_relation_init_data(self) -> Dict: + """Information required for the mds relation. + + :returns: Ceph configuration needed for the mds relation + :rtype: dict + """ + return { + "fsid": leader_get("fsid"), + "{}_mds_key".format(self._mds_name): ceph.get_mds_key( + name=self._mds_name + ), + } + + def _handle_client_relation( + self, relation: model.Relation, unit: model.Unit + ) -> None: + """Handle broker request and set the relation data + + :param relation: Operator relation + :type relation: Relation + :param unit: Unit to handle + :type unit: Unit + """ + + self._mds_name = self._get_mds_name(relation, unit) + + logger.debug( + "mon cluster in quorum and osds bootstrapped" + " - providing mds client with keys" + ) + super()._handle_client_relation(relation, unit) diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index b180df24..33398fcd 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -4,6 +4,8 @@ from ops.main import main import ceph_status +import ceph_mds + import charms.operator_libs_linux.v0.apt as apt import charms.operator_libs_linux.v1.systemd as systemd @@ -111,9 +113,6 @@ def on_rbd_mirror_relation(self, event): if hooks.rbd_mirror_relation(): self.on.notify_clients.emit() - def on_mds_relation(self, event): - hooks.mds_relation_joined() - def on_admin_relation(self, event): hooks.admin_relation_joined() @@ -149,14 +148,10 @@ def is_blocked_insecure_cmr(self): def notify_clients(self, _event): self.clients.notify_all() + self.mds.notify_all() for relation in self.model.relations['admin']: hooks.admin_relation_joined(str(relation.id)) - for relation in self.model.relations['mds']: - for unit in relation.units: - hooks.mds_relation_joined( - relid=str(relation.id), unit=unit.name) - def __init__(self, *args): super().__init__(*args) self._stored.is_started = True @@ -171,6 +166,7 @@ def __init__(self, *args): self.clients = ceph_client.CephClientProvides(self) self.metrics_endpoint = ceph_metrics.CephMetricsEndpointProvider(self) self.ceph_status = ceph_status.StatusAssessor(self) + self.mds = ceph_mds.CephMdsProvides(self) self._observe_action(self.on.change_osd_weight_action, ops_actions.change_osd_weight.change_osd_weight) @@ -223,11 +219,6 @@ def __init__(self, *args): fw.observe(self.on.rbd_mirror_relation_joined, self.on_rbd_mirror_relation) - fw.observe(self.on.mds_relation_changed, - self.on_mds_relation) - fw.observe(self.on.mds_relation_joined, - self.on_mds_relation) - fw.observe(self.on.admin_relation_changed, self.on_admin_relation) fw.observe(self.on.admin_relation_joined, diff --git a/ceph-mon/unit_tests/test_ceph_client_interface.py b/ceph-mon/unit_tests/test_ceph_client_interface.py index d517dbaa..41b2b4ec 100644 --- a/ceph-mon/unit_tests/test_ceph_client_interface.py +++ b/ceph-mon/unit_tests/test_ceph_client_interface.py @@ -111,16 +111,16 @@ def test_client_relation_broker( {'broker_req': '{"request-id": "req"}'}) mock_process_broker_request.assert_not_called() - @mock.patch("src.charm.hooks.mds_relation_joined") @mock.patch("src.charm.ceph_client.ceph.get_named_key") @mock.patch("src.charm.ceph_client.get_rbd_features") @mock.patch("src.charm.ceph_client.get_public_addr") @mock.patch.object(CephMonCharm, "ready_for_service") @mock.patch("src.charm.ceph_client.send_osd_settings") + @mock.patch("src.charm.ceph_mds.leader_get", return_value="testfsid") + @mock.patch("src.charm.ceph_mds.ceph") def test_notify_clients( - self, _send_osd_settings, mock_ready_for_service, - mock_get_public_addr, mock_get_rbd_features, mock_get_named_key, - mock_mds_relation_joined): + self, _ceph, _leader, _send_osd_settings, mock_ready_for_service, + mock_get_public_addr, mock_get_rbd_features, mock_get_named_key): mock_get_public_addr.return_value = '127.0.0.1' mock_ready_for_service.return_value = True mock_get_rbd_features.return_value = None @@ -153,6 +153,4 @@ def test_notify_clients( 'key': 'test key', 'rbd-features': '42', }) - - mock_mds_relation_joined.assert_called_with( - relid='1', unit='ceph-fs/0') + self.assertEqual(self.harness.charm.mds._mds_name, "ceph-fs") diff --git a/ceph-mon/unit_tests/test_ceph_mds_relation.py b/ceph-mon/unit_tests/test_ceph_mds_relation.py new file mode 100644 index 00000000..dbcdc9cd --- /dev/null +++ b/ceph-mon/unit_tests/test_ceph_mds_relation.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. +from unittest import mock +from unittest.mock import patch +import unittest +from ops.testing import Harness + +import ceph_mds +import charm +from manage_test_relations import ( + add_ceph_mds_relation, +) + + +@patch("charm.hooks") +class TestCephShared(unittest.TestCase): + def setUp(self): + super().setUp() + self.harness = Harness(charm.CephMonCharm) + self.addCleanup(self.harness.cleanup) + + def test_init(self, _hooks): + self.harness.begin() + mds = ceph_mds.CephMdsProvides(self.harness.charm) + self.assertTrue(mds.this_unit) + + @mock.patch("src.charm.ceph_client.ceph.is_leader") + @mock.patch("src.charm.ceph_mds.leader_get", return_value="test-fsid") + @mock.patch("src.charm.ceph_mds.ceph") + @mock.patch.object(charm.CephMonCharm, "process_broker_request") + @mock.patch("src.charm.ceph_client.ceph.get_named_key") + @mock.patch("src.charm.ceph_client.get_rbd_features") + @mock.patch("src.charm.ceph_client.get_public_addr") + @mock.patch.object(charm.CephMonCharm, "ready_for_service") + @mock.patch("src.charm.ceph_client.send_osd_settings") + def test_client_relation_broker( + self, + _send_osd_settings, + mock_ready_for_service, + mock_get_public_addr, + mock_get_rbd_features, + mock_get_named_key, + mock_process_broker_request, + mock_ceph_utils, + mock_leader_get, + mock_is_leader, + _hooks, + ): + mock_get_public_addr.return_value = "127.0.0.1" + mock_ready_for_service.return_value = True + mock_get_rbd_features.return_value = 42 + mock_get_named_key.return_value = "test key" + mock_process_broker_request.return_value = "AOK" + mock_ceph_utils.get_mds_key.return_value = "test-mds-key" + mock_is_leader.return_value = True + self.harness.begin() + self.harness.set_leader() + mds = ceph_mds.CephMdsProvides(self.harness.charm) + rel_id = add_ceph_mds_relation(self.harness) + self.harness.update_relation_data( + rel_id, "ceph-fs/0", {"broker_req": '{"request-id": "req"}'} + ) + self.assertEqual(mds._mds_name, "ceph-fs") + mock_leader_get.assert_called_with("fsid") + unit_rel_data = self.harness.get_relation_data(rel_id, "ceph-mon/0") + self.assertEqual( + unit_rel_data, + { + "auth": "cephx", + "ceph-public-address": "127.0.0.1", + "key": "test key", + "rbd-features": "42", + "broker-rsp-ceph-fs-0": "AOK", + "broker_rsp": "AOK", + 'ceph-fs_mds_key': 'test-mds-key', + 'fsid': 'test-fsid', + + }, + ) + mock_process_broker_request.reset_mock() From a99e3a5ef0e6b42cc0955ddb692df79277229d07 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 3 Oct 2022 10:54:45 +0200 Subject: [PATCH 2453/2699] Rewrite the get-erasure-profile action with the ops framework Change-Id: I07cb5838c446ba08469e1d0f22d75d74c40ef29c --- ceph-mon/actions/get-erasure-profile | 1 - ceph-mon/src/charm.py | 2 ++ ceph-mon/src/ops_actions/__init__.py | 1 + .../ops_actions}/get_erasure_profile.py | 17 +++++------- ceph-mon/test-requirements.txt | 2 +- ceph-mon/unit_tests/test_ceph_actions.py | 27 +++++++++++++++++++ 6 files changed, 38 insertions(+), 12 deletions(-) delete mode 120000 ceph-mon/actions/get-erasure-profile rename ceph-mon/{actions => src/ops_actions}/get_erasure_profile.py (60%) diff --git a/ceph-mon/actions/get-erasure-profile b/ceph-mon/actions/get-erasure-profile deleted file mode 120000 index 97cea7a5..00000000 --- a/ceph-mon/actions/get-erasure-profile +++ /dev/null @@ -1 +0,0 @@ -get_erasure_profile.py \ No newline at end of file diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index b180df24..df868401 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -183,6 +183,8 @@ def __init__(self, *args): ops_actions.create_erasure_profile.create_erasure_profile_action) self._observe_action(self.on.get_health_action, ops_actions.get_health.get_health_action) + self._observe_action(self.on.get_erasure_profile_action, + ops_actions.get_erasure_profile.erasure_profile) fw.observe(self.on.install, self.on_install) fw.observe(self.on.config_changed, self.on_config) diff --git a/ceph-mon/src/ops_actions/__init__.py b/ceph-mon/src/ops_actions/__init__.py index 54aaec57..2513e9ee 100644 --- a/ceph-mon/src/ops_actions/__init__.py +++ b/ceph-mon/src/ops_actions/__init__.py @@ -18,4 +18,5 @@ create_crush_rule, create_erasure_profile, get_health, + get_erasure_profile, ) diff --git a/ceph-mon/actions/get_erasure_profile.py b/ceph-mon/src/ops_actions/get_erasure_profile.py similarity index 60% rename from ceph-mon/actions/get_erasure_profile.py rename to ceph-mon/src/ops_actions/get_erasure_profile.py index 9038f2b0..e53fdfaf 100755 --- a/ceph-mon/actions/get_erasure_profile.py +++ b/ceph-mon/src/ops_actions/get_erasure_profile.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright 2016 Canonical Ltd +# Copyright 2022 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,15 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from charmhelpers.contrib.storage.linux.ceph import get_erasure_profile -from charmhelpers.core.hookenv import action_get, action_set +"""Get an erasure profile given a profile name.""" +from charmhelpers.contrib.storage.linux import ceph -def make_erasure_profile(): - name = action_get("name") - out = get_erasure_profile(service='admin', name=name) - action_set({'message': out}) - -if __name__ == '__main__': - make_erasure_profile() +def erasure_profile(event) -> None: + profile_name = event.params.get("name") + out = ceph.get_erasure_profile(service="admin", name=profile_name) + event.set_results({"message": out}) diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 20e94a7c..0e30a1e5 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -52,4 +52,4 @@ croniter # needed for charm-rabbitmq-server unit tests # icey: pyopenssl 22 introduces a requirement on newer OpenSSL which causes test # failures. Pin pyopenssl to resolve the failure. -pyopenssl<=22.0.0 \ No newline at end of file +pyopenssl<=22.0.0 diff --git a/ceph-mon/unit_tests/test_ceph_actions.py b/ceph-mon/unit_tests/test_ceph_actions.py index 21520390..86e34773 100644 --- a/ceph-mon/unit_tests/test_ceph_actions.py +++ b/ceph-mon/unit_tests/test_ceph_actions.py @@ -256,3 +256,30 @@ def test_get_health_action_error(self, mock_check_output): event.fail.assert_called_once_with( 'ceph health failed with message: ' "Command 'test' returned non-zero exit status 1.") + + +class GetErasureProfile(test_utils.CharmTestCase): + """Run tests for action.""" + + def setUp(self): + self.harness = Harness(CephMonCharm) + self.harness.begin() + self.addCleanup(self.harness.cleanup) + + @mock.patch('ops_actions.get_erasure_profile.ceph') + def test_get_erasure_profile_ok(self, mock_ceph): + mock_ceph.get_erasure_profile.return_value = "foo-erasure-params" + event = test_utils.MockActionEvent({"name": "foo-profile"}) + self.harness.charm.on_get_erasure_profile_action(event) + event.set_results.assert_called_once_with(( + {"message": "foo-erasure-params"} + )) + + @mock.patch('ops_actions.get_erasure_profile.ceph') + def test_get_erasure_profile_notfound(self, mock_ceph): + mock_ceph.get_erasure_profile.return_value = None + event = test_utils.MockActionEvent({"name": "notfound-profile"}) + self.harness.charm.on_get_erasure_profile_action(event) + event.set_results.assert_called_once_with(( + {"message": None} + )) From abc70f71bf93f1ebca0af96610a8e79a1564c437 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 26 Oct 2022 08:08:53 -0400 Subject: [PATCH 2454/2699] Add kinetic support Change-Id: I42acfcd1ec49e1a9181f92a46d522ce3a6e92514 --- ceph-mon/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 18b9e0ab..8418e6b0 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -12,6 +12,7 @@ tags: series: - focal - jammy +- kinetic peers: mon: interface: ceph From 1c0738dec9a24ce878ba5e917fa2bd1341421e17 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 31 Oct 2022 14:20:36 +0100 Subject: [PATCH 2455/2699] Partial revert[1] to re-enable Focal support 1: a95e54ea94a068f4ed4b436a467f973317905500 Change-Id: I565e36b94d249d2cf0beccf1480189a19da008d9 --- ceph-radosgw/config.yaml | 2 +- ceph-radosgw/metadata.yaml | 1 + ceph-radosgw/osci.yaml | 31 +++++ .../tests/bundles/focal-yoga-multisite.yaml | 99 ++++++++++++++ .../tests/bundles/focal-yoga-namespaced.yaml | 124 ++++++++++++++++++ ceph-radosgw/tests/bundles/focal-yoga.yaml | 123 +++++++++++++++++ ceph-radosgw/tests/tests.yaml | 3 + ceph-radosgw/tox.ini | 9 ++ 8 files changed, 391 insertions(+), 1 deletion(-) create mode 100644 ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml create mode 100644 ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml create mode 100644 ceph-radosgw/tests/bundles/focal-yoga.yaml diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 2cfd3e08..3c621136 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -5,7 +5,7 @@ options: description: RadosGW debug level. Max is 20. source: type: string - default: zed + default: yoga description: | Optional repository from which to install. May be one of the following: distro (default), ppa:somecustom/ppa, a deb url sources entry, diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index a0f590d3..195d9e38 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -13,6 +13,7 @@ tags: - file-servers - misc series: +- focal - jammy extra-bindings: public: diff --git a/ceph-radosgw/osci.yaml b/ceph-radosgw/osci.yaml index ebd411b1..cb19e21d 100644 --- a/ceph-radosgw/osci.yaml +++ b/ceph-radosgw/osci.yaml @@ -1,8 +1,13 @@ - project: templates: + - charm-unit-jobs-py38 + - charm-unit-jobs-py39 - charm-unit-jobs-py310 check: jobs: + - vault-focal-yoga_rgw + - vault-focal-yoga-namespaced + - focal-yoga-multisite - jammy-yoga-multisite - jammy-zed-multisite: voting: false @@ -23,6 +28,18 @@ charm_build_name: ceph-radosgw build_type: charmcraft charmcraft_channel: 2.0/stable +- job: + name: focal-yoga-multisite + parent: func-target + dependencies: + - osci-lint + - charm-build + - tox-py38 + - tox-py39 + - name: tox-py310 + soft: true + vars: + tox_extra_args: focal-yoga-multisite - job: name: jammy-yoga-multisite parent: func-target @@ -47,6 +64,13 @@ - jammy-yoga-multisite vars: tox_extra_args: kinetic-zed-multisite +- job: + name: vault-focal-yoga_rgw + parent: func-target + dependencies: + - focal-yoga-multisite + vars: + tox_extra_args: vault:focal-yoga - job: name: vault-jammy-yoga_rgw parent: func-target @@ -54,6 +78,13 @@ - jammy-yoga-multisite vars: tox_extra_args: vault:jammy-yoga +- job: + name: vault-focal-yoga-namespaced + parent: func-target + dependencies: + - focal-yoga-multisite + vars: + tox_extra_args: vault:focal-yoga-namespaced - job: name: vault-jammy-yoga-namespaced parent: func-target diff --git a/ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml b/ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml new file mode 100644 index 00000000..8c1a1cfd --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml @@ -0,0 +1,99 @@ +options: + source: &source cloud:focal-yoga + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + '9': + +applications: + ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '0' + + secondary-ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '1' + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '2' + - '6' + - '7' + channel: latest/edge + + secondary-ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '3' + - '8' + - '9' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 1 + options: + monitor-count: 1 + source: *source + to: + - '4' + channel: latest/edge + + secondary-ceph-mon: + charm: ch:ceph-mon + num_units: 1 + options: + monitor-count: 1 + source: *source + to: + - '5' + channel: latest/edge + +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'secondary-ceph-osd:mon' + - 'secondary-ceph-mon:osd' + + - - 'secondary-ceph-radosgw:mon' + - 'secondary-ceph-mon:radosgw' + diff --git a/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml new file mode 100644 index 00000000..7d05aa82 --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml @@ -0,0 +1,124 @@ +options: + source: &source cloud:focal-yoga + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + channel: latest/edge + + ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + namespace-tenants: True + to: + - '3' + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + channel: latest/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + channel: latest/edge + + vault-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + vault: + charm: ch:vault + num_units: 1 + to: + - '11' + channel: latest/edge + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/focal-yoga.yaml b/ceph-radosgw/tests/bundles/focal-yoga.yaml new file mode 100644 index 00000000..697a9be8 --- /dev/null +++ b/ceph-radosgw/tests/bundles/focal-yoga.yaml @@ -0,0 +1,123 @@ +options: + source: &source cloud:focal-yoga + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + channel: latest/edge + + ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '3' + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + channel: latest/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + channel: latest/edge + + vault-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + vault: + charm: ch:vault + num_units: 1 + to: + - '11' + channel: latest/edge + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 4ccd8cb0..d45160cd 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -4,6 +4,9 @@ gate_bundles: - jammy-yoga-multisite - vault: jammy-yoga - vault: jammy-yoga-namespaced + - focal-yoga-multisite + - vault: focal-yoga + - vault: focal-yoga-namespaced smoke_bundles: - jammy-yoga-multisite diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index bddbd1f2..45b39294 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -51,6 +51,15 @@ commands = charmcraft -v pack {toxinidir}/rename.sh +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +[testenv:py39] +basepython = python3.9 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py310] basepython = python3.10 deps = -r{toxinidir}/requirements.txt From 87e524ebf30551264890ccf8765fe11271622198 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Wed, 2 Nov 2022 14:38:03 +0100 Subject: [PATCH 2456/2699] Work around config initialisation behaviour change The previous (classic) version of the charm initialised a Config object in the install hook and let it go out of scope. Initialise a config object explicitly in the install and upgrade charm hooks. Change-Id: Ic389c840cc4253adaddcaa50d184db6ca66cb397 --- ceph-mon/src/charm.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index 788c1f73..8a1786b4 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -53,7 +53,20 @@ class CephMonCharm(ops_openstack.core.OSBaseCharm): # TODO: Figure out how to do hardening in an operator-framework # world + + def _initialise_config(self): + # The following two lines are a horrible hack to deal with the + # lifecycle of a charm changing compared to the classic charm. + # The previous (classic) version of the charm initialised a + # Config object in the install hook and let it go out of scope. + # As a result of this, the config_changed processing attempts + # to upgrade Ceph from distro to the configured release when it + # runs during the install or upgrade-charm hooks. + c = hooks.config() + c.save() + def on_install(self, event): + self._initialise_config() self.install_pkgs() rm_packages = ceph.determine_packages_to_remove() if rm_packages: @@ -74,6 +87,7 @@ def on_pre_series_upgrade(self, event): hooks.pre_series_upgrade() def on_upgrade(self, event): + self._initialise_config() self.metrics_endpoint.update_alert_rules() hooks.upgrade_charm() self.on.notify_clients.emit() From 188c597c77cc2250d9b3a7ddd11f35dfc4a0daf1 Mon Sep 17 00:00:00 2001 From: "Chi Wai, Chan" Date: Fri, 4 Nov 2022 14:04:48 +0800 Subject: [PATCH 2457/2699] Add job matcher. This allows query to distinguish between ceph clusters using job label. Closes-Bug: #1990248 Change-Id: I8c14d6ab03fab3830d6da632b5dec1065d9068b2 --- .../src/dashboards/ceph-cluster.json | 89 +++++++++++------- .../src/dashboards/cephfs-overview.json | 27 +++++- .../src/dashboards/host-details.json | 71 +++++++++----- .../src/dashboards/hosts-overview.json | 37 ++++++-- .../src/dashboards/osd-device-details.json | 57 ++++++++---- .../src/dashboards/osds-overview.json | 93 ++++++++++++------- .../src/dashboards/pool-detail.json | 37 ++++++-- .../src/dashboards/pool-overview.json | 67 ++++++++----- .../src/dashboards/radosgw-detail.json | 45 ++++++--- .../src/dashboards/radosgw-overview.json | 37 ++++++-- .../src/dashboards/radosgw-sync-overview.json | 29 +++++- .../src/dashboards/rbd-details.json | 47 +++++++--- .../src/dashboards/rbd-overview.json | 39 ++++++-- 13 files changed, 474 insertions(+), 201 deletions(-) diff --git a/ceph-dashboard/src/dashboards/ceph-cluster.json b/ceph-dashboard/src/dashboards/ceph-cluster.json index d683696e..d376e242 100644 --- a/ceph-dashboard/src/dashboards/ceph-cluster.json +++ b/ceph-dashboard/src/dashboards/ceph-cluster.json @@ -113,7 +113,7 @@ "tableColumn": "", "targets": [ { - "expr": "ceph_health_status", + "expr": "ceph_health_status{job=~\"$job\"}", "format": "time_series", "instant": true, "interval": "$interval", @@ -186,7 +186,7 @@ "displayAliasType": "Always", "displayType": "Regular", "displayValueWithAlias": "When Alias Displayed", - "expr": "count(ceph_osd_metadata)", + "expr": "count(ceph_osd_metadata{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "All", @@ -201,7 +201,7 @@ "displayAliasType": "Always", "displayType": "Regular", "displayValueWithAlias": "When Alias Displayed", - "expr": "sum(ceph_osds_in)", + "expr": "sum(ceph_osds_in{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "In", @@ -216,7 +216,7 @@ "displayAliasType": "Warning / Critical", "displayType": "Regular", "displayValueWithAlias": "When Alias Displayed", - "expr": "sum(ceph_osd_in == bool 0)", + "expr": "sum(ceph_osd_in{job=~\"$job\"} == bool 0)", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -233,7 +233,7 @@ "displayAliasType": "Always", "displayType": "Regular", "displayValueWithAlias": "When Alias Displayed", - "expr": "sum(ceph_osd_up)", + "expr": "sum(ceph_osd_up{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Up", @@ -249,7 +249,7 @@ "displayAliasType": "Warning / Critical", "displayType": "Regular", "displayValueWithAlias": "When Alias Displayed", - "expr": "sum(ceph_osd_up == bool 0)", + "expr": "sum(ceph_osd_up{job=~\"$job\"} == bool 0)", "format": "time_series", "intervalFactor": 1, "legendFormat": "Down", @@ -330,7 +330,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_osd_stat_bytes_used)/sum(ceph_osd_stat_bytes)", + "expr": "sum(ceph_osd_stat_bytes_used{job=~\"$job\"})/sum(ceph_osd_stat_bytes{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Used", @@ -433,49 +433,49 @@ "steppedLine": false, "targets": [ { - "expr": "sum(ceph_pg_total)", + "expr": "sum(ceph_pg_total{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Total", "refId": "A" }, { - "expr": "sum(ceph_pg_active)", + "expr": "sum(ceph_pg_active{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Active", "refId": "B" }, { - "expr": "sum(ceph_pg_total - ceph_pg_active)", + "expr": "sum(ceph_pg_total{job=~\"$job\"} - ceph_pg_active{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Inactive", "refId": "G" }, { - "expr": "sum(ceph_pg_undersized)", + "expr": "sum(ceph_pg_undersized{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Undersized", "refId": "F" }, { - "expr": "sum(ceph_pg_degraded)", + "expr": "sum(ceph_pg_degraded{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Degraded", "refId": "C" }, { - "expr": "sum(ceph_pg_inconsistent)", + "expr": "sum(ceph_pg_inconsistent{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Inconsistent", "refId": "D" }, { - "expr": "sum(ceph_pg_down)", + "expr": "sum(ceph_pg_down{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Down", @@ -577,28 +577,28 @@ "steppedLine": false, "targets": [ { - "expr": "quantile(0.95, ceph_osd_apply_latency_ms)", + "expr": "quantile(0.95, ceph_osd_apply_latency_ms{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Apply Latency P_95", "refId": "A" }, { - "expr": "quantile(0.95, ceph_osd_commit_latency_ms)", + "expr": "quantile(0.95, ceph_osd_commit_latency_ms{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Commit Latency P_95", "refId": "B" }, { - "expr": "avg(ceph_osd_apply_latency_ms)", + "expr": "avg(ceph_osd_apply_latency_ms{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Avg Apply Latency", "refId": "C" }, { - "expr": "avg(ceph_osd_commit_latency_ms)", + "expr": "avg(ceph_osd_commit_latency_ms{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Avg Commit Latency", @@ -687,7 +687,7 @@ "displayAliasType": "Always", "displayType": "Regular", "displayValueWithAlias": "When Alias Displayed", - "expr": "sum(ceph_mon_quorum_status)", + "expr": "sum(ceph_mon_quorum_status{job=~\"$job\"})", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -704,7 +704,7 @@ "displayAliasType": "Always", "displayType": "Regular", "displayValueWithAlias": "When Alias Displayed", - "expr": "count(ceph_mon_quorum_status)", + "expr": "count(ceph_mon_quorum_status{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Total", @@ -721,7 +721,7 @@ "displayAliasType": "Warning / Critical", "displayType": "Annotation", "displayValueWithAlias": "Never", - "expr": "count(ceph_mon_quorum_status) / sum(ceph_mon_quorum_status)", + "expr": "count(ceph_mon_quorum_status{job=~\"$job\"}) / sum(ceph_mon_quorum_status{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "MONs out of Quorum", @@ -774,7 +774,7 @@ "displayAliasType": "Always", "displayType": "Regular", "displayValueWithAlias": "When Alias Displayed", - "expr": "ceph_mds_server_handle_client_session", + "expr": "ceph_mds_server_handle_client_session{job=~\"$job\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "Clients", @@ -841,7 +841,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(irate(ceph_osd_op_w_in_bytes[1m]))", + "expr": "sum(irate(ceph_osd_op_w_in_bytes{job=~\"$job\"}[1m]))", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -849,7 +849,7 @@ "refId": "A" }, { - "expr": "sum(irate(ceph_osd_op_r_out_bytes[1m]))", + "expr": "sum(irate(ceph_osd_op_r_out_bytes{job=~\"$job\"}[1m]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Reads", @@ -948,7 +948,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(deriv(ceph_pool_stored[1m]))", + "expr": "sum(deriv(ceph_pool_stored{job=~\"$job\"}[1m]))", "format": "time_series", "intervalFactor": 1, "refId": "A" @@ -1046,14 +1046,14 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(ceph_osd_op_w[1m]))", + "expr": "sum(rate(ceph_osd_op_w{job=~\"$job\"}[1m]))", "interval": "", "legendFormat": "Write", "queryType": "randomWalk", "refId": "A" }, { - "expr": "sum(irate(ceph_osd_op_r[1m]))", + "expr": "sum(irate(ceph_osd_op_r{job=~\"$job\"}[1m]))", "hide": false, "interval": "", "legendFormat": "Read", @@ -1159,14 +1159,14 @@ "steppedLine": false, "targets": [ { - "expr": "sum(ceph_pool_objects)", + "expr": "sum(ceph_pool_objects{job=~\"$job\"})", "interval": "", "legendFormat": "Total", "queryType": "randomWalk", "refId": "A" }, { - "expr": "ceph_num_objects_degraded", + "expr": "ceph_num_objects_degraded{job=~\"$job\"}", "hide": false, "interval": "", "legendFormat": "Degraded", @@ -1174,7 +1174,7 @@ "refId": "B" }, { - "expr": "ceph_num_objects_misplaced", + "expr": "ceph_num_objects_misplaced{job=~\"$job\"}", "hide": false, "interval": "", "legendFormat": "Misplaced", @@ -1182,7 +1182,7 @@ "refId": "C" }, { - "expr": "ceph_num_objects_unfound", + "expr": "ceph_num_objects_unfound{job=~\"$job\"}", "hide": false, "interval": "", "legendFormat": "Unfound", @@ -1272,7 +1272,7 @@ "span": 12, "targets": [ { - "expr": "ceph_osd_stat_bytes_used / ceph_osd_stat_bytes", + "expr": "ceph_osd_stat_bytes_used{job=~\"$job\"} / ceph_osd_stat_bytes{job=~\"$job\"}", "format": "time_series", "interval": "1m", "intervalFactor": 1, @@ -1343,7 +1343,7 @@ "reverseYBuckets": false, "targets": [ { - "expr": "ceph_osd_numpg", + "expr": "ceph_osd_numpg{job=~\"$job\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "#PGs", @@ -1423,7 +1423,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(irate(ceph_osd_recovery_ops[1m]))", + "expr": "sum(irate(ceph_osd_recovery_ops{job=~\"$job\"}[1m]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Op/s", @@ -1565,6 +1565,27 @@ "query": "1m,10m,30m,1h,6h,12h,1d,7d,14d,30d", "refresh": 2, "type": "interval" + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Job", + "multi": false, + "name": "job", + "options": [], + "query": "label_values(ceph_osd_metadata, job)", + "refresh": 1, + "regex": "(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, diff --git a/ceph-dashboard/src/dashboards/cephfs-overview.json b/ceph-dashboard/src/dashboards/cephfs-overview.json index 57922f55..bb07e5ce 100644 --- a/ceph-dashboard/src/dashboards/cephfs-overview.json +++ b/ceph-dashboard/src/dashboards/cephfs-overview.json @@ -89,14 +89,14 @@ "steppedLine": false, "targets": [ { - "expr": "sum(ceph_objecter_op_r{ceph_daemon=~\"($mds_servers).*\"})", + "expr": "sum(ceph_objecter_op_r{ceph_daemon=~\"($mds_servers).*\", job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Read Ops", "refId": "A" }, { - "expr": "sum(ceph_objecter_op_w{ceph_daemon=~\"($mds_servers).*\"})", + "expr": "sum(ceph_objecter_op_w{ceph_daemon=~\"($mds_servers).*\", job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Write Ops", @@ -180,7 +180,7 @@ "steppedLine": false, "targets": [ { - "expr": "ceph_mds_server_handle_client_request{ceph_daemon=~\"($mds_servers).*\"}", + "expr": "ceph_mds_server_handle_client_request{ceph_daemon=~\"($mds_servers).*\", job=~\"$job\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{ceph_daemon}}", @@ -269,6 +269,27 @@ "tagsQuery": "", "type": "query", "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Job", + "multi": false, + "name": "job", + "options": [], + "query": "label_values(ceph_osd_metadata, job)", + "refresh": 1, + "regex": "(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, diff --git a/ceph-dashboard/src/dashboards/host-details.json b/ceph-dashboard/src/dashboards/host-details.json index 46fd31a7..91ba1d25 100644 --- a/ceph-dashboard/src/dashboards/host-details.json +++ b/ceph-dashboard/src/dashboards/host-details.json @@ -114,7 +114,7 @@ "tableColumn": "", "targets": [ { - "expr": "count(sum by (ceph_daemon) (ceph_osd_metadata{hostname='$ceph_hosts'}))", + "expr": "count(sum by (ceph_daemon) (ceph_osd_metadata{hostname='$ceph_hosts', job=~\"$job\"}))", "format": "time_series", "intervalFactor": 2, "refId": "A", @@ -182,7 +182,7 @@ "steppedLine": false, "targets": [ { - "expr": "cpu_usage_user{cpu=\"cpu-total\", host='$ceph_hosts'}", + "expr": "cpu_usage_user{cpu=\"cpu-total\", host='$ceph_hosts', job=~\"$job\"}", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -191,7 +191,7 @@ "step": 2 }, { - "expr": "cpu_usage_iowait{cpu=\"cpu-total\", host='$ceph_hosts'}", + "expr": "cpu_usage_iowait{cpu=\"cpu-total\", host='$ceph_hosts', job=~\"$job\"}", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -200,7 +200,7 @@ "step": 2 }, { - "expr": "cpu_usage_nice{cpu=\"cpu-total\", host='$ceph_hosts'}", + "expr": "cpu_usage_nice{cpu=\"cpu-total\", host='$ceph_hosts', job=~\"$job\"}", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -209,7 +209,7 @@ "step": 2 }, { - "expr": "cpu_usage_softirq{cpu=\"cpu-total\", host='$ceph_hosts'}", + "expr": "cpu_usage_softirq{cpu=\"cpu-total\", host='$ceph_hosts', job=~\"$job\"}", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -218,7 +218,7 @@ "step": 2 }, { - "expr": "cpu_usage_irq{cpu=\"cpu-total\", host='$ceph_hosts'}", + "expr": "cpu_usage_irq{cpu=\"cpu-total\", host='$ceph_hosts', job=~\"$job\"}", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -227,7 +227,7 @@ "step": 2 }, { - "expr": "cpu_usage_system{cpu=\"cpu-total\", host='$ceph_hosts'}", + "expr": "cpu_usage_system{cpu=\"cpu-total\", host='$ceph_hosts', job=~\"$job\"}", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -236,7 +236,7 @@ "step": 2 }, { - "expr": "cpu_usage_idle{cpu=\"cpu-total\", host='$ceph_hosts'}", + "expr": "cpu_usage_idle{cpu=\"cpu-total\", host='$ceph_hosts', job=~\"$job\"}", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -337,14 +337,14 @@ "steppedLine": false, "targets": [ { - "expr": "mem_used{host='$ceph_hosts'}", + "expr": "mem_used{host='$ceph_hosts', job=~\"$job\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "used", "refId": "D" }, { - "expr": "mem_free{host='$ceph_hosts'}", + "expr": "mem_free{host='$ceph_hosts', job=~\"$job\"}", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -352,7 +352,7 @@ "refId": "A" }, { - "expr": "mem_buffered{host='$ceph_hosts'} + mem_cached{host='$ceph_hosts'}", + "expr": "mem_buffered{host='$ceph_hosts', job=~\"$job\"} + mem_cached{host='$ceph_hosts', job=~\"$job\"}", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -360,7 +360,7 @@ "refId": "C" }, { - "expr": "mem_total{host='$ceph_hosts'}", + "expr": "mem_total{host='$ceph_hosts', job=~\"$job\"}", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -455,7 +455,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (device) (\n irate(net_bytes_recv{host='$ceph_hosts',device!=\"lo\"}[1m])\n)", + "expr": "sum by (device) (\n irate(net_bytes_recv{host='$ceph_hosts',device!=\"lo\", job=~\"$job\"}[1m])\n)", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.rx", @@ -464,7 +464,7 @@ "textEditor": true }, { - "expr": "sum by (device) (\n irate(net_bytes_sent{host='$ceph_hosts',device!=\"lo\"}[1m])\n)", + "expr": "sum by (device) (\n irate(net_bytes_sent{host='$ceph_hosts',device!=\"lo\", job=~\"$job\"}[1m])\n)", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.tx", @@ -555,7 +555,7 @@ "steppedLine": false, "targets": [ { - "expr": "irate(net_drop_in{host='$ceph_hosts'}[1m])", + "expr": "irate(net_drop_in{host='$ceph_hosts', job=~\"$job\"}[1m])", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -563,7 +563,7 @@ "refId": "A" }, { - "expr": "irate(net_drop_out{host='$ceph_hosts'}[1m])", + "expr": "irate(net_drop_out{host='$ceph_hosts', job=~\"$job\"}[1m])", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.tx", @@ -675,7 +675,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_osd_stat_bytes and on (ceph_daemon) ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"})", + "expr": "sum(ceph_osd_stat_bytes{job=~\"$job\"} and on (ceph_daemon) ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\", job=~\"$job\"})", "format": "time_series", "intervalFactor": 2, "refId": "A", @@ -739,7 +739,7 @@ "steppedLine": false, "targets": [ { - "expr": "irate(net_err_in{host='$ceph_hosts'}[1m])", + "expr": "irate(net_err_in{host='$ceph_hosts', job=~\"$job\"}[1m])", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -747,7 +747,7 @@ "refId": "A" }, { - "expr": "irate(net_err_out{host='$ceph_hosts'}[1m])", + "expr": "irate(net_err_out{host='$ceph_hosts', job=~\"$job\"}[1m])", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.tx", @@ -852,7 +852,7 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(label_replace((irate(diskio_writes{host='$ceph_hosts'}[5m])), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts'}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", + "expr": "label_replace(label_replace((irate(diskio_writes{host='$ceph_hosts', job=~\"$job\"}[5m])), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts', job=~\"$job\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}({{ceph_daemon}}) writes", @@ -861,7 +861,7 @@ "textEditor": true }, { - "expr": "label_replace(label_replace((irate(diskio_reads{host='$ceph_hosts'}[5m])), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts'}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", + "expr": "label_replace(label_replace((irate(diskio_reads{host='$ceph_hosts', job=~\"$job\"}[5m])), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts', job=~\"$job\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -953,14 +953,14 @@ "steppedLine": false, "targets": [ { - "expr" : "label_replace(label_replace((irate(diskio_write_bytes[5m]) / 10 ), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts'}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", + "expr" : "label_replace(label_replace((irate(diskio_write_bytes{job=~\"$job\"}[5m]) / 10 ), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts', job=~\"$job\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}({{ceph_daemon}}) write", "refId": "B" }, { - "expr" : "label_replace(label_replace((irate(diskio_read_bytes[5m]) / 10 ), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts'}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", + "expr" : "label_replace(label_replace((irate(diskio_read_bytes{job=~\"$job\"}[5m]) / 10 ), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts', job=~\"$job\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}({{ceph_daemon}}) read", @@ -1046,7 +1046,7 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(label_replace((irate(diskio_weighted_io_time{host='$ceph_hosts'}[5m])), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts'}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", + "expr": "label_replace(label_replace((irate(diskio_weighted_io_time{host='$ceph_hosts', job=~\"$job\"}[5m])), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts', job=~\"$job\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -1137,7 +1137,7 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(label_replace((irate(diskio_io_time{host='$ceph_hosts'}[5m])), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts'}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", + "expr": "label_replace(label_replace((irate(diskio_io_time{host='$ceph_hosts', job=~\"$job\"}[5m])), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{exported_instance='$ceph_hosts', job=~\"$job\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\")", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -1230,6 +1230,27 @@ "tagsQuery": "", "type": "query", "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Job", + "multi": false, + "name": "job", + "options": [], + "query": "label_values(ceph_osd_metadata, job)", + "refresh": 1, + "regex": "(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, diff --git a/ceph-dashboard/src/dashboards/hosts-overview.json b/ceph-dashboard/src/dashboards/hosts-overview.json index 9c1a3729..85880eb1 100644 --- a/ceph-dashboard/src/dashboards/hosts-overview.json +++ b/ceph-dashboard/src/dashboards/hosts-overview.json @@ -101,7 +101,7 @@ "tableColumn": "", "targets": [ { - "expr": "count(sum by (hostname) (ceph_osd_metadata))", + "expr": "count(sum by (hostname) (ceph_osd_metadata{job=~\"$job\"}))", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -184,7 +184,7 @@ "tableColumn": "", "targets": [ { - "expr": "avg(\n 1 - (\n avg by(dns_name) \n (cpu_usage_idle{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\", cpu='cpu-total'} / 100)))", + "expr": "avg(\n 1 - (\n avg by(dns_name) \n (cpu_usage_idle{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\", cpu='cpu-total', job=~\"$job\"} / 100)))", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -267,7 +267,7 @@ "tableColumn": "", "targets": [ { - "expr": "avg (((mem_total{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) - (\n (mem_free{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (mem_cached{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (mem_buffered{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) +\n (mem_slab{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})\n )) /\n (mem_total{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}))", + "expr": "avg (((mem_total{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\", job=~\"$job\"}) - (\n (mem_free{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (mem_cached{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\", job=~\"$job\"}) + \n (mem_buffered{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\", job=~\"$job\"}) +\n (mem_slab{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\", job=~\"$job\"})\n )) /\n (mem_total{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\", job=~\"$job\"}))", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -349,7 +349,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum ((irate(diskio_reads{dns_name=~\"($osd_hosts|$mds_hosts).*\"}[5m])) + \n(irate(diskio_writes{dns_name=~\"($osd_hosts|$mds_hosts).*\"}[5m])))", + "expr": "sum ((irate(diskio_reads{dns_name=~\"($osd_hosts|$mds_hosts).*\", job=~\"$job\"}[5m])) + \n(irate(diskio_writes{dns_name=~\"($osd_hosts|$mds_hosts).*\", job=~\"$job\"}[5m])))", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -431,7 +431,7 @@ "tableColumn": "", "targets": [ { - "expr" : "avg (label_replace(label_replace((irate(diskio_io_time[5m]) / 10 ), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{dns_name=~\"($osd_hosts|$mds_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"))", + "expr" : "avg (label_replace(label_replace((irate(diskio_io_time{job=~\"$job\"}[5m]) / 10 ), \"device\", \"$1\", \"name\", \"(.+)\"), \"exported_instance\", \"$1\", \"host\", \"(.+)\") * on(exported_instance, device) group_right(ceph_daemon) label_replace(ceph_disk_occupation{dns_name=~\"($osd_hosts|$mds_hosts).*\", job=~\"$job\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"))", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -514,7 +514,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum (\n irate(net_bytes_recv{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n) +\nsum (\n irate(net_bytes_sent{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m]))", + "expr": "sum (\n irate(net_bytes_recv{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\", job=~\"$job\"}[1m])\n) +\nsum (\n irate(net_bytes_sent{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\", job=~\"$job\"}[1m]))", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -572,7 +572,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10,100 * (\n 1 - (\n avg by(dns_name) \n (cpu_usage_idle{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\", cpu='cpu-total'} / 100))))", + "expr": "topk(10,100 * (\n 1 - (\n avg by(dns_name) \n (cpu_usage_idle{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\", cpu='cpu-total', job=~\"$job\"} / 100))))", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{dns_name}}", @@ -659,7 +659,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10, (sum by(dns_name) (\n (\n irate(net_bytes_recv{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n ) +\n (\n irate(net_bytes_sent{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n ))\n )\n)", + "expr": "topk(10, (sum by(dns_name) (\n (\n irate(net_bytes_recv{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\", job=~\"$job\"}[1m])\n ) +\n (\n irate(net_bytes_sent{dns_name=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\", job=~\"$job\"}[1m])\n ))\n )\n)", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{dns_name}}", @@ -813,6 +813,27 @@ "tagsQuery": "", "type": "query", "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Job", + "multi": false, + "name": "job", + "options": [], + "query": "label_values(ceph_osd_metadata, job)", + "refresh": 1, + "regex": "(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, diff --git a/ceph-dashboard/src/dashboards/osd-device-details.json b/ceph-dashboard/src/dashboards/osd-device-details.json index f4f4cbe9..e2ee7e28 100644 --- a/ceph-dashboard/src/dashboards/osd-device-details.json +++ b/ceph-dashboard/src/dashboards/osd-device-details.json @@ -130,7 +130,7 @@ "displayAliasType": "Warning / Critical", "displayType": "Regular", "displayValueWithAlias": "Never", - "expr": "ceph_osd_up{ceph_daemon=~\"$osd\"}", + "expr": "ceph_osd_up{job=~\"$job\", ceph_daemon=~\"$osd\"}", "format": "table", "instant": false, "interval": "", @@ -229,7 +229,7 @@ "displayAliasType": "Warning / Critical", "displayType": "Regular", "displayValueWithAlias": "Never", - "expr": "ceph_osd_in{ceph_daemon=~\"$osd\"}", + "expr": "ceph_osd_in{job=~\"$job\", ceph_daemon=~\"$osd\"}", "format": "table", "instant": false, "interval": "", @@ -315,7 +315,7 @@ "steppedLine": false, "targets": [ { - "expr": "ceph_osd_numpg{ceph_daemon=~\"$osd\"}", + "expr": "ceph_osd_numpg{job=~\"$job\", ceph_daemon=~\"$osd\"}", "format": "time_series", "instant": false, "interval": "", @@ -436,7 +436,7 @@ "pluginVersion": "7.4.1", "targets": [ { - "expr": "(ceph_osd_stat_bytes_used{ceph_daemon=\"$osd\"}/ceph_osd_stat_bytes{ceph_daemon=\"$osd\"})*100", + "expr": "(ceph_osd_stat_bytes_used{job=~\"$job\", ceph_daemon=~\"$osd\"}/ceph_osd_stat_bytes{job=~\"$job\", ceph_daemon=~\"$osd\"})*100", "interval": "", "legendFormat": "", "queryType": "randomWalk", @@ -514,14 +514,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_osd_op_r_latency_sum{ceph_daemon=~\"$osd\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m])", + "expr": "irate(ceph_osd_op_r_latency_sum{job=~\"$job\", ceph_daemon=~\"$osd\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count{job=~\"$job\", ceph_daemon=~\"$osd\"}[1m])", "format": "time_series", "intervalFactor": 1, "legendFormat": "read", "refId": "A" }, { - "expr": "irate(ceph_osd_op_w_latency_sum{ceph_daemon=~\"$osd\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m])", + "expr": "irate(ceph_osd_op_w_latency_sum{job=~\"$job\", ceph_daemon=~\"$osd\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count{job=~\"$job\", ceph_daemon=~\"$osd\"}[1m])", "format": "time_series", "intervalFactor": 1, "legendFormat": "write", @@ -623,14 +623,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_osd_op_r{ceph_daemon=~\"$osd\"}[1m])", + "expr": "irate(ceph_osd_op_r{job=~\"$job\", ceph_daemon=~\"$osd\"}[1m])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Reads", "refId": "A" }, { - "expr": "irate(ceph_osd_op_w{ceph_daemon=~\"$osd\"}[1m])", + "expr": "irate(ceph_osd_op_w{job=~\"$job\", ceph_daemon=~\"$osd\"}[1m])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Writes", @@ -732,14 +732,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_osd_op_r_out_bytes{ceph_daemon=~\"$osd\"}[1m])", + "expr": "irate(ceph_osd_op_r_out_bytes{job=~\"$job\", ceph_daemon=~\"$osd\"}[1m])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Read Bytes", "refId": "A" }, { - "expr": "irate(ceph_osd_op_w_in_bytes{ceph_daemon=~\"$osd\"}[1m])", + "expr": "irate(ceph_osd_op_w_in_bytes{job=~\"$job\", ceph_daemon=~\"$osd\"}[1m])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Write Bytes", @@ -855,14 +855,14 @@ "steppedLine": false, "targets": [ { - "expr": "(label_replace(irate(node_disk_read_time_seconds_total[1m]) / irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))", + "expr": "(label_replace(irate(node_disk_read_time_seconds_total[1m]) / irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{job=~\"$job\", ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{instance}}/{{device}} Reads", "refId": "A" }, { - "expr": "(label_replace(irate(node_disk_write_time_seconds_total[1m]) / irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))", + "expr": "(label_replace(irate(node_disk_write_time_seconds_total[1m]) / irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{job=~\"$job\", ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{instance}}/{{device}} Writes", @@ -964,14 +964,14 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace(irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{job=~\"$job\", ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}} on {{instance}} Writes", "refId": "A" }, { - "expr": "label_replace(irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace(irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{job=~\"$job\", ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}} on {{instance}} Reads", @@ -1073,14 +1073,14 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(irate(node_disk_read_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace(irate(node_disk_read_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{job=~\"$job\", ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{instance}} {{device}} Reads", "refId": "A" }, { - "expr": "label_replace(irate(node_disk_written_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace(irate(node_disk_written_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{job=~\"$job\", ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{instance}} {{device}} Writes", @@ -1177,7 +1177,7 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(irate(node_disk_io_time_seconds_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace(irate(node_disk_io_time_seconds_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{job=~\"$job\", ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}} on {{instance}}", @@ -1246,6 +1246,27 @@ "skipUrlSync": false, "type": "datasource" }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Job", + "multi": false, + "name": "job", + "options": [], + "query": "label_values(ceph_osd_metadata, job)", + "refresh": 1, + "regex": "(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, { "allValue": null, "current": {}, @@ -1256,7 +1277,7 @@ "multi": false, "name": "osd", "options": [], - "query": "label_values(ceph_osd_metadata,ceph_daemon)", + "query": "label_values(ceph_osd_metadata{job=~\"$job\"}, ceph_daemon)", "refresh": 1, "regex": "(.*)", "skipUrlSync": false, diff --git a/ceph-dashboard/src/dashboards/osds-overview.json b/ceph-dashboard/src/dashboards/osds-overview.json index 4b91df9e..95bac3c4 100644 --- a/ceph-dashboard/src/dashboards/osds-overview.json +++ b/ceph-dashboard/src/dashboards/osds-overview.json @@ -85,21 +85,21 @@ "steppedLine": false, "targets": [ { - "expr": "avg (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)", + "expr": "avg (irate(ceph_osd_op_r_latency_sum{job=~\"$job\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count{job=~\"$job\"}[1m]) * 1000)", "format": "time_series", "intervalFactor": 1, "legendFormat": "AVG read", "refId": "A" }, { - "expr": "max (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)", + "expr": "max (irate(ceph_osd_op_r_latency_sum{job=~\"$job\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count{job=~\"$job\"}[1m]) * 1000)", "format": "time_series", "intervalFactor": 1, "legendFormat": "MAX read", "refId": "B" }, { - "expr": "quantile(0.95,\n (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n)", + "expr": "quantile(0.95,\n (irate(ceph_osd_op_r_latency_sum{job=~\"$job\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count{job=~\"$job\"}[1m]) * 1000)\n)", "format": "time_series", "intervalFactor": 1, "legendFormat": "@95%ile", @@ -211,7 +211,7 @@ ], "targets": [ { - "expr": "topk(10,\n (sort(\n (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n ))\n)\n\n", + "expr": "topk(10,\n (sort(\n (irate(ceph_osd_op_r_latency_sum{job=~\"$job\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count{job=~\"$job\"}[1m]) * 1000)\n ))\n)\n\n", "format": "table", "instant": true, "intervalFactor": 1, @@ -262,21 +262,21 @@ "steppedLine": false, "targets": [ { - "expr": "avg (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)", + "expr": "avg (irate(ceph_osd_op_w_latency_sum{job=~\"$job\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count{job=~\"$job\"}[1m]) * 1000)", "format": "time_series", "intervalFactor": 1, "legendFormat": "AVG write", "refId": "A" }, { - "expr": "max (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)", + "expr": "max (irate(ceph_osd_op_w_latency_sum{job=~\"$job\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count{job=~\"$job\"}[1m]) * 1000)", "format": "time_series", "intervalFactor": 1, "legendFormat": "MAX write", "refId": "B" }, { - "expr": "quantile(0.95,\n (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n)", + "expr": "quantile(0.95,\n (irate(ceph_osd_op_w_latency_sum{job=~\"$job\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count{job=~\"$job\"}[1m]) * 1000)\n)", "format": "time_series", "intervalFactor": 1, "legendFormat": "@95%ile write", @@ -388,7 +388,7 @@ ], "targets": [ { - "expr": "topk(10,\n (sort(\n (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n ))\n)\n\n", + "expr": "topk(10,\n (sort(\n (irate(ceph_osd_op_w_latency_sum{job=~\"$job\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count{job=~\"$job\"}[1m]) * 1000)\n ))\n)\n\n", "format": "table", "instant": true, "intervalFactor": 1, @@ -431,7 +431,7 @@ "strokeWidth": 1, "targets": [ { - "expr": "count by (device_class) (ceph_osd_metadata)", + "expr": "count by (device_class) (ceph_osd_metadata{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device_class}}", @@ -479,7 +479,7 @@ "strokeWidth": 1, "targets": [ { - "expr": "count(ceph_bluefs_wal_total_bytes)", + "expr": "count(ceph_bluefs_wal_total_bytes{job=~\"$job\"})", "format": "time_series", "intervalFactor": 2, "legendFormat": "bluestore", @@ -487,7 +487,7 @@ "step": 240 }, { - "expr": "count(ceph_osd_metadata) - count(ceph_bluefs_wal_total_bytes)", + "expr": "count(ceph_osd_metadata{job=~\"$job\"}) - count(ceph_bluefs_wal_total_bytes{job=~\"$job\"})", "format": "time_series", "intervalFactor": 2, "legendFormat": "filestore", @@ -495,7 +495,7 @@ "step": 240 }, { - "expr": "absent(ceph_bluefs_wal_total_bytes)*count(ceph_osd_metadata)", + "expr": "absent(ceph_bluefs_wal_total_bytes{job=~\"$job\"})*count(ceph_osd_metadata{job=~\"$job\"})", "format": "time_series", "intervalFactor": 2, "legendFormat": "filestore", @@ -548,7 +548,7 @@ "strokeWidth": "1", "targets": [ { - "expr": "count(ceph_osd_stat_bytes < 1099511627776)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} < 1099511627776)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<1 TB", @@ -556,7 +556,7 @@ "step": 2 }, { - "expr": "count(ceph_osd_stat_bytes >= 1099511627776 < 2199023255552)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 1099511627776 < 2199023255552)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<2 TB", @@ -564,7 +564,7 @@ "step": 2 }, { - "expr": "count(ceph_osd_stat_bytes >= 2199023255552 < 3298534883328)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 2199023255552 < 3298534883328)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<3TB", @@ -572,7 +572,7 @@ "step": 2 }, { - "expr": "count(ceph_osd_stat_bytes >= 3298534883328 < 4398046511104)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 3298534883328 < 4398046511104)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<4TB", @@ -580,7 +580,7 @@ "step": 2 }, { - "expr": "count(ceph_osd_stat_bytes >= 4398046511104 < 6597069766656)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 4398046511104 < 6597069766656)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<6TB", @@ -588,7 +588,7 @@ "step": 2 }, { - "expr": "count(ceph_osd_stat_bytes >= 6597069766656 < 8796093022208)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 6597069766656 < 8796093022208)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<8TB", @@ -596,7 +596,7 @@ "step": 2 }, { - "expr": "count(ceph_osd_stat_bytes >= 8796093022208 < 10995116277760)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 8796093022208 < 10995116277760)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<10TB", @@ -604,7 +604,7 @@ "step": 2 }, { - "expr": "count(ceph_osd_stat_bytes >= 10995116277760 < 13194139533312)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 10995116277760 < 13194139533312)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<12TB", @@ -612,7 +612,7 @@ "step": 2 }, { - "expr": "count(ceph_osd_stat_bytes >= 13194139533312)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 13194139533312)", "format": "time_series", "intervalFactor": 2, "legendFormat": "12TB+", @@ -668,7 +668,7 @@ "steppedLine": false, "targets": [ { - "expr": "ceph_osd_numpg\n", + "expr": "ceph_osd_numpg{job=~\"$job\"}\n", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -766,14 +766,14 @@ "steppedLine": false, "targets": [ { - "expr": "round(sum(irate(ceph_pool_rd[30s])))", + "expr": "round(sum(irate(ceph_pool_rd{job=~\"$job\"}[30s])))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Reads", "refId": "A" }, { - "expr": "round(sum(irate(ceph_pool_wr[30s])))", + "expr": "round(sum(irate(ceph_pool_wr{job=~\"$job\"}[30s])))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Writes", @@ -823,21 +823,42 @@ "tags": [], "templating": { "list": [ - { - "current": { + { + "current": { "tags": [], "text": "default", "value": "default" - }, - "hide": 0, - "label": "Data Source", - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - } + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Job", + "multi": false, + "name": "job", + "options": [], + "query": "label_values(ceph_osd_metadata, job)", + "refresh": 1, + "regex": "(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } ] }, "time": { diff --git a/ceph-dashboard/src/dashboards/pool-detail.json b/ceph-dashboard/src/dashboards/pool-detail.json index dd6bc392..056592f8 100644 --- a/ceph-dashboard/src/dashboards/pool-detail.json +++ b/ceph-dashboard/src/dashboards/pool-detail.json @@ -102,7 +102,7 @@ "tableColumn": "", "targets": [ { - "expr": "(ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail)) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "expr": "(ceph_pool_stored{job=~\"$job\"} / (ceph_pool_stored{job=~\"$job\"} + ceph_pool_max_avail{job=~\"$job\"})) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\", job=~\"$job\"}", "format": "time_series", "intervalFactor": 1, "refId": "A" @@ -183,7 +183,7 @@ "tableColumn": "", "targets": [ { - "expr": "(ceph_pool_max_avail / deriv(ceph_pool_stored[6h])) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"} > 0", + "expr": "(ceph_pool_max_avail{job=~\"$job\"} / deriv(ceph_pool_stored{job=~\"$job\"}[6h])) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\", job=~\"$job\"} > 0", "format": "time_series", "intervalFactor": 1, "refId": "A" @@ -248,7 +248,7 @@ "steppedLine": false, "targets": [ { - "expr": "deriv(ceph_pool_objects[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "expr": "deriv(ceph_pool_objects{job=~\"$job\"}[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\", job=~\"$job\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "Objects per second", @@ -341,14 +341,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_pool_rd[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "expr": "irate(ceph_pool_rd{job=~\"$job\"}[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\", job=~\"$job\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "reads", "refId": "B" }, { - "expr": "irate(ceph_pool_wr[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "expr": "irate(ceph_pool_wr{job=~\"$job\"}[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\", job=~\"$job\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "writes", @@ -441,14 +441,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_pool_rd_bytes[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "expr": "irate(ceph_pool_rd_bytes{job=~\"$job\"}[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\", job=~\"$job\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "reads", "refId": "A" }, { - "expr": "irate(ceph_pool_wr_bytes[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "expr": "irate(ceph_pool_wr_bytes{job=~\"$job\"}[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\", job=~\"$job\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "writes", @@ -536,7 +536,7 @@ "steppedLine": false, "targets": [ { - "expr": "ceph_pool_objects * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "expr": "ceph_pool_objects{job=~\"$job\"} * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\", job=~\"$job\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "Number of Objects", @@ -625,6 +625,27 @@ "tagsQuery": "", "type": "query", "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Job", + "multi": false, + "name": "job", + "options": [], + "query": "label_values(ceph_osd_metadata, job)", + "refresh": 1, + "regex": "(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, diff --git a/ceph-dashboard/src/dashboards/pool-overview.json b/ceph-dashboard/src/dashboards/pool-overview.json index c405f607..1751b8a0 100644 --- a/ceph-dashboard/src/dashboards/pool-overview.json +++ b/ceph-dashboard/src/dashboards/pool-overview.json @@ -82,7 +82,7 @@ "tableColumn": "", "targets": [ { - "expr": "count(ceph_pool_metadata)", + "expr": "count(ceph_pool_metadata{job=~\"$job\"})", "format": "table", "instant": true, "interval": "", @@ -170,7 +170,7 @@ "tableColumn": "", "targets": [ { - "expr": "count(ceph_pool_metadata{compression_mode!=\"none\"})", + "expr": "count(ceph_pool_metadata{compression_mode!=\"none\", job=~\"$job\"})", "interval": "", "legendFormat": "", "refId": "A" @@ -256,7 +256,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_osd_stat_bytes)", + "expr": "sum(ceph_osd_stat_bytes{job=~\"$job\"})", "interval": "", "legendFormat": "", "refId": "A" @@ -341,7 +341,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_pool_bytes_used)", + "expr": "sum(ceph_pool_bytes_used{job=~\"$job\"})", "instant": true, "interval": "", "legendFormat": "", @@ -429,7 +429,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_pool_stored)", + "expr": "sum(ceph_pool_stored{job=~\"$job\"})", "instant": true, "interval": "", "legendFormat": "", @@ -516,7 +516,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used)", + "expr": "sum(ceph_pool_compress_under_bytes{job=~\"$job\"} - ceph_pool_compress_bytes_used{job=~\"$job\"})", "interval": "", "legendFormat": "", "refId": "A" @@ -601,7 +601,7 @@ "tableColumn": "", "targets": [ { - "expr": "(sum(ceph_pool_compress_under_bytes > 0) / sum(ceph_pool_stored_raw and ceph_pool_compress_under_bytes > 0)) * 100", + "expr": "(sum(ceph_pool_compress_under_bytes{job=~\"$job\"} > 0) / sum(ceph_pool_stored_raw{job=~\"$job\"} and ceph_pool_compress_under_bytes{job=~\"$job\"} > 0)) * 100", "format": "table", "hide": false, "interval": "", @@ -688,7 +688,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_pool_compress_under_bytes > 0) / sum(ceph_pool_compress_bytes_used > 0)", + "expr": "sum(ceph_pool_compress_under_bytes{job=~\"$job\"} > 0) / sum(ceph_pool_compress_bytes_used{job=~\"$job\"} > 0)", "interval": "", "legendFormat": "", "refId": "A" @@ -1080,7 +1080,7 @@ ], "targets": [ { - "expr": "(ceph_pool_percent_used * on(pool_id) group_left(name) ceph_pool_metadata)", + "expr": "(ceph_pool_percent_used{job=~\"$job\"} * on(pool_id) group_left(name) ceph_pool_metadata{job=~\"$job\"})", "format": "table", "hide": false, "instant": true, @@ -1090,7 +1090,7 @@ "refId": "D" }, { - "expr": "ceph_pool_stored * on(pool_id) group_left ceph_pool_metadata", + "expr": "ceph_pool_stored{job=~\"$job\"} * on(pool_id) group_left ceph_pool_metadata{job=~\"$job\"}", "format": "table", "instant": true, "interval": "", @@ -1098,7 +1098,7 @@ "refId": "J" }, { - "expr": "ceph_pool_max_avail * on(pool_id) group_left(name) ceph_pool_metadata", + "expr": "ceph_pool_max_avail{job=~\"$job\"} * on(pool_id) group_left(name) ceph_pool_metadata{job=~\"$job\"}", "format": "table", "instant": true, "interval": "", @@ -1106,7 +1106,7 @@ "refId": "B" }, { - "expr": "delta(ceph_pool_stored[5d])", + "expr": "delta(ceph_pool_stored{job=~\"$job\"}[5d])", "format": "table", "instant": true, "interval": "", @@ -1114,7 +1114,7 @@ "refId": "F" }, { - "expr": "ceph_pool_metadata", + "expr": "ceph_pool_metadata{job=~\"$job\"}", "format": "table", "instant": true, "interval": "", @@ -1122,7 +1122,7 @@ "refId": "I" }, { - "expr": "ceph_pool_metadata{compression_mode!=\"none\"}", + "expr": "ceph_pool_metadata{compression_mode!=\"none\", job=~\"$job\"}", "format": "table", "instant": true, "interval": "", @@ -1130,7 +1130,7 @@ "refId": "K" }, { - "expr": "(ceph_pool_compress_under_bytes / ceph_pool_compress_bytes_used > 0) and on(pool_id) (((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100 > 0.5)", + "expr": "(ceph_pool_compress_under_bytes{job=~\"$job\"} / ceph_pool_compress_bytes_used{job=~\"$job\"} > 0) and on(pool_id) (((ceph_pool_compress_under_bytes{job=~\"$job\"} > 0) / ceph_pool_stored_raw{job=~\"$job\"}) * 100 > 0.5)", "format": "table", "hide": false, "instant": true, @@ -1140,7 +1140,7 @@ "refId": "A" }, { - "expr": "((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100", + "expr": "((ceph_pool_compress_under_bytes{job=~\"$job\"} > 0) / ceph_pool_stored_raw{job=~\"$job\"}) * 100", "format": "table", "instant": true, "interval": "", @@ -1148,7 +1148,7 @@ "refId": "C" }, { - "expr": "(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used > 0)", + "expr": "(ceph_pool_compress_under_bytes{job=~\"$job\"} - ceph_pool_compress_bytes_used{job=~\"$job\"} > 0)", "format": "table", "instant": true, "interval": "", @@ -1156,7 +1156,7 @@ "refId": "E" }, { - "expr": "rate(ceph_pool_rd[30s]) + rate(ceph_pool_wr[30s])", + "expr": "rate(ceph_pool_rd{job=~\"$job\"}[30s]) + rate(ceph_pool_wr{job=~\"$job\"}[30s])", "format": "table", "instant": true, "interval": "", @@ -1164,7 +1164,7 @@ "refId": "G" }, { - "expr": "rate(ceph_pool_rd_bytes[30s]) + rate(ceph_pool_wr_bytes[30s])", + "expr": "rate(ceph_pool_rd_bytes{job=~\"$job\"}[30s]) + rate(ceph_pool_wr_bytes{job=~\"$job\"}[30s])", "format": "table", "instant": true, "interval": "", @@ -1226,7 +1226,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk($topk,round((rate(ceph_pool_rd[30s]) + rate(ceph_pool_wr[30s])),1) * on(pool_id) group_left(instance,name) ceph_pool_metadata) ", + "expr": "topk($topk,round((rate(ceph_pool_rd{job=~\"$job\"}[30s]) + rate(ceph_pool_wr{job=~\"$job\"}[30s])),1) * on(pool_id) group_left(instance,name) ceph_pool_metadata{job=~\"$job\"}) ", "format": "time_series", "hide": false, "interval": "", @@ -1235,7 +1235,7 @@ "refId": "F" }, { - "expr": "topk($topk,rate(ceph_pool_wr[30s]) + on(pool_id) group_left(instance,name) ceph_pool_metadata) ", + "expr": "topk($topk,rate(ceph_pool_wr{job=~\"$job\"}[30s]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{job=~\"$job\"}) ", "format": "time_series", "hide": true, "interval": "", @@ -1330,7 +1330,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk($topk,(rate(ceph_pool_rd_bytes[30s]) + rate(ceph_pool_wr_bytes[30s])) * on(pool_id) group_left(instance,name) ceph_pool_metadata)", + "expr": "topk($topk,(rate(ceph_pool_rd_bytes{job=~\"$job\"}[30s]) + rate(ceph_pool_wr_bytes{job=~\"$job\"}[30s])) * on(pool_id) group_left(instance,name) ceph_pool_metadata{job=~\"$job\"})", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -1422,7 +1422,7 @@ "steppedLine": false, "targets": [ { - "expr": "ceph_pool_bytes_used * on(pool_id) group_right ceph_pool_metadata", + "expr": "ceph_pool_bytes_used{job=~\"$job\"} * on(pool_id) group_right ceph_pool_metadata{job=~\"$job\"}", "interval": "", "legendFormat": "{{name}}", "refId": "A" @@ -1521,6 +1521,27 @@ "query": "15", "skipUrlSync": false, "type": "textbox" + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Job", + "multi": false, + "name": "job", + "options": [], + "query": "label_values(ceph_osd_metadata, job)", + "refresh": 1, + "regex": "(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, diff --git a/ceph-dashboard/src/dashboards/radosgw-detail.json b/ceph-dashboard/src/dashboards/radosgw-detail.json index bf5b16b8..5a5c64ae 100644 --- a/ceph-dashboard/src/dashboards/radosgw-detail.json +++ b/ceph-dashboard/src/dashboards/radosgw-detail.json @@ -90,14 +90,14 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (ceph_daemon) (rate(ceph_rgw_get_initial_lat_sum{ceph_daemon=~\"($rgw_servers)\"}[30s]) / rate(ceph_rgw_get_initial_lat_count{ceph_daemon=~\"($rgw_servers)\"}[30s]))", + "expr": "sum by (ceph_daemon) (rate(ceph_rgw_get_initial_lat_sum{job=~\"$job\", ceph_daemon=~\"($rgw_servers)\"}[30s]) / rate(ceph_rgw_get_initial_lat_count{job=~\"$job\", ceph_daemon=~\"($rgw_servers)\"}[30s]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "GET {{ceph_daemon}}", "refId": "A" }, { - "expr": "sum by (ceph_daemon)(rate(ceph_rgw_put_initial_lat_sum{ceph_daemon=~\"($rgw_servers)\"}[30s]) / rate(ceph_rgw_put_initial_lat_count{ceph_daemon=~\"($rgw_servers)\"}[30s]))", + "expr": "sum by (ceph_daemon)(rate(ceph_rgw_put_initial_lat_sum{job=~\"$job\", ceph_daemon=~\"($rgw_servers)\"}[30s]) / rate(ceph_rgw_put_initial_lat_count{job=~\"$job\", ceph_daemon=~\"($rgw_servers)\"}[30s]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "PUT {{ceph_daemon}}", @@ -179,14 +179,14 @@ "steppedLine": false, "targets": [ { - "expr": "rate(ceph_rgw_get_b{ceph_daemon=~\"$rgw_servers\"}[30s])", + "expr": "rate(ceph_rgw_get_b{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "GETs {{ceph_daemon}}", "refId": "B" }, { - "expr": "rate(ceph_rgw_put_b{ceph_daemon=~\"$rgw_servers\"}[30s])", + "expr": "rate(ceph_rgw_put_b{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "PUTs {{ceph_daemon}}", @@ -275,28 +275,28 @@ "steppedLine": false, "targets": [ { - "expr": "rate(ceph_rgw_failed_req{ceph_daemon=~\"$rgw_servers\"}[30s])", + "expr": "rate(ceph_rgw_failed_req{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Requests Failed {{ceph_daemon}}", "refId": "B" }, { - "expr": "rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s])", + "expr": "rate(ceph_rgw_get{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "GETs {{ceph_daemon}}", "refId": "C" }, { - "expr": "rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s])", + "expr": "rate(ceph_rgw_put{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "PUTs {{ceph_daemon}}", "refId": "D" }, { - "expr": "rate(ceph_rgw_req{ceph_daemon=~\"$rgw_servers\"}[30s]) -\n (rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s]) +\n rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s]))", + "expr": "rate(ceph_rgw_req{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}[30s]) -\n (rate(ceph_rgw_get{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}[30s]) +\n rate(ceph_rgw_put{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}[30s]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Other {{ceph_daemon}}", @@ -376,28 +376,28 @@ "strokeWidth": 1, "targets": [ { - "expr": "rate(ceph_rgw_failed_req{ceph_daemon=~\"$rgw_servers\"}[30s])", + "expr": "rate(ceph_rgw_failed_req{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Failures {{ceph_daemon}}", "refId": "A" }, { - "expr": "rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s])", + "expr": "rate(ceph_rgw_get{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "GETs {{ceph_daemon}}", "refId": "B" }, { - "expr": "rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s])", + "expr": "rate(ceph_rgw_put{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "PUTs {{ceph_daemon}}", "refId": "C" }, { - "expr": "rate(ceph_rgw_req{ceph_daemon=~\"$rgw_servers\"}[30s]) -\n (rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s]) +\n rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s]))", + "expr": "rate(ceph_rgw_req{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}[30s]) -\n (rate(ceph_rgw_get{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}[30s]) +\n rate(ceph_rgw_put{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}[30s]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Other (DELETE,LIST) {{ceph_daemon}}", @@ -451,6 +451,27 @@ "tagsQuery": "", "type": "query", "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Job", + "multi": false, + "name": "job", + "options": [], + "query": "label_values(ceph_osd_metadata, job)", + "refresh": 1, + "regex": "(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, diff --git a/ceph-dashboard/src/dashboards/radosgw-overview.json b/ceph-dashboard/src/dashboards/radosgw-overview.json index 487d736b..e1f7cf22 100644 --- a/ceph-dashboard/src/dashboards/radosgw-overview.json +++ b/ceph-dashboard/src/dashboards/radosgw-overview.json @@ -83,14 +83,14 @@ "steppedLine": false, "targets": [ { - "expr": "rate(ceph_rgw_get_initial_lat_sum[30s]) / rate(ceph_rgw_get_initial_lat_count[30s])", + "expr": "rate(ceph_rgw_get_initial_lat_sum{job=~\"$job\"}[30s]) / rate(ceph_rgw_get_initial_lat_count{job=~\"$job\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "GET AVG", "refId": "A" }, { - "expr": "rate(ceph_rgw_put_initial_lat_sum[30s]) / rate(ceph_rgw_put_initial_lat_count[30s])", + "expr": "rate(ceph_rgw_put_initial_lat_sum{job=~\"$job\"}[30s]) / rate(ceph_rgw_put_initial_lat_count{job=~\"$job\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "PUT AVG", @@ -170,7 +170,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by(rgw_host) (label_replace(rate(ceph_rgw_req[30s]), \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"))", + "expr": "sum by(rgw_host) (label_replace(rate(ceph_rgw_req{job=~\"$job\"}[30s]), \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"))", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rgw_host}}", @@ -252,7 +252,7 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(rate(ceph_rgw_get_initial_lat_sum[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_get_initial_lat_count[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")", + "expr": "label_replace(rate(ceph_rgw_get_initial_lat_sum{job=~\"$job\"}[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_get_initial_lat_count{job=~\"$job\"}[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rgw_host}}", @@ -334,14 +334,14 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(ceph_rgw_get_b[30s]))", + "expr": "sum(rate(ceph_rgw_get_b{job=~\"$job\"}[30s]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "GETs", "refId": "A" }, { - "expr": "sum(rate(ceph_rgw_put_b[30s]))", + "expr": "sum(rate(ceph_rgw_put_b{job=~\"$job\"}[30s]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "PUTs", @@ -422,7 +422,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by(rgw_host) (\n (label_replace(rate(ceph_rgw_get_b[30s]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")) + \n (label_replace(rate(ceph_rgw_put_b[30s]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\"))\n)", + "expr": "sum by(rgw_host) (\n (label_replace(rate(ceph_rgw_get_b{job=~\"$job\"}[30s]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")) + \n (label_replace(rate(ceph_rgw_put_b{job=~\"$job\"}[30s]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\"))\n)", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rgw_host}}", @@ -503,7 +503,7 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(rate(ceph_rgw_put_initial_lat_sum[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_put_initial_lat_count[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")", + "expr": "label_replace(rate(ceph_rgw_put_initial_lat_sum{job=~\"$job\"}[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_put_initial_lat_count{job=~\"$job\"}[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rgw_host}}", @@ -590,6 +590,27 @@ "refresh": 1, "regex": "", "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Job", + "multi": false, + "name": "job", + "options": [], + "query": "label_values(ceph_osd_metadata, job)", + "refresh": 1, + "regex": "(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, diff --git a/ceph-dashboard/src/dashboards/radosgw-sync-overview.json b/ceph-dashboard/src/dashboards/radosgw-sync-overview.json index e9136d78..66775da6 100644 --- a/ceph-dashboard/src/dashboards/radosgw-sync-overview.json +++ b/ceph-dashboard/src/dashboards/radosgw-sync-overview.json @@ -70,7 +70,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_bytes_sum[30s]))", + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_bytes_sum{job=~\"$job\"}[30s]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{source_zone}}", @@ -151,7 +151,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_bytes_count[30s]))", + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_bytes_count{job=~\"$job\"}[30s]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{source_zone}}", @@ -232,7 +232,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_poll_latency_sum[30s]) * 1000)", + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_poll_latency_sum{job=~\"$job\"}[30s]) * 1000)", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{source_zone}}", @@ -313,7 +313,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_errors[30s]))", + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_errors{job=~\"$job\"}[30s]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{source_zone}}", @@ -400,6 +400,27 @@ "refresh": 1, "regex": "", "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Job", + "multi": false, + "name": "job", + "options": [], + "query": "label_values(ceph_osd_metadata, job)", + "refresh": 1, + "regex": "(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, diff --git a/ceph-dashboard/src/dashboards/rbd-details.json b/ceph-dashboard/src/dashboards/rbd-details.json index 59932a5e..86851c5f 100644 --- a/ceph-dashboard/src/dashboards/rbd-details.json +++ b/ceph-dashboard/src/dashboards/rbd-details.json @@ -40,7 +40,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$Datasource", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, @@ -74,14 +74,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_rbd_write_ops{pool=\"$Pool\", image=\"$Image\"}[30s])", + "expr": "irate(ceph_rbd_write_ops{job=~\"$job\", pool=\"$Pool\", image=\"$Image\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Write", "refId": "A" }, { - "expr": "irate(ceph_rbd_read_ops{pool=\"$Pool\", image=\"$Image\"}[30s])", + "expr": "irate(ceph_rbd_read_ops{job=~\"$job\", pool=\"$Pool\", image=\"$Image\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Read", @@ -133,7 +133,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$Datasource", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, @@ -165,14 +165,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_rbd_write_bytes{pool=\"$Pool\", image=\"$Image\"}[30s])", + "expr": "irate(ceph_rbd_write_bytes{job=~\"$job\", pool=\"$Pool\", image=\"$Image\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Write", "refId": "A" }, { - "expr": "irate(ceph_rbd_read_bytes{pool=\"$Pool\", image=\"$Image\"}[30s])", + "expr": "irate(ceph_rbd_read_bytes{job=~\"$job\", pool=\"$Pool\", image=\"$Image\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Read", @@ -224,7 +224,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$Datasource", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, @@ -256,14 +256,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_rbd_write_latency_sum{pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_write_latency_count{pool=\"$Pool\", image=\"$Image\"}[30s])", + "expr": "irate(ceph_rbd_write_latency_sum{job=~\"$job\", pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_write_latency_count{job=~\"$job\", pool=\"$Pool\", image=\"$Image\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Write", "refId": "A" }, { - "expr": "irate(ceph_rbd_read_latency_sum{pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_read_latency_count{pool=\"$Pool\", image=\"$Image\"}[30s])", + "expr": "irate(ceph_rbd_read_latency_sum{job=~\"$job\", pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_read_latency_count{job=~\"$job\", pool=\"$Pool\", image=\"$Image\"}[30s])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Read", @@ -320,8 +320,8 @@ { "current": {}, "hide": 0, - "label": null, - "name": "Datasource", + "label": "Data Source", + "name": "datasource", "options": [], "query": "prometheus", "refresh": 1, @@ -332,7 +332,7 @@ { "allValue": null, "current": {}, - "datasource": "$Datasource", + "datasource": "$datasource", "hide": 0, "includeAll": false, "label": null, @@ -353,7 +353,7 @@ { "allValue": null, "current": {}, - "datasource": "$Datasource", + "datasource": "$datasource", "hide": 0, "includeAll": false, "label": null, @@ -370,6 +370,27 @@ "tagsQuery": "", "type": "query", "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Job", + "multi": false, + "name": "job", + "options": [], + "query": "label_values(ceph_osd_metadata, job)", + "refresh": 1, + "regex": "(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, diff --git a/ceph-dashboard/src/dashboards/rbd-overview.json b/ceph-dashboard/src/dashboards/rbd-overview.json index eb15fbcb..a415e1d5 100644 --- a/ceph-dashboard/src/dashboards/rbd-overview.json +++ b/ceph-dashboard/src/dashboards/rbd-overview.json @@ -83,14 +83,14 @@ "steppedLine": false, "targets": [ { - "expr": "round(sum(irate(ceph_rbd_write_ops[30s])))", + "expr": "round(sum(irate(ceph_rbd_write_ops{job=~\"$job\"}[30s])))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Writes", "refId": "A" }, { - "expr": "round(sum(irate(ceph_rbd_read_ops[30s])))", + "expr": "round(sum(irate(ceph_rbd_read_ops{job=~\"$job\"}[30s])))", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -176,7 +176,7 @@ "steppedLine": false, "targets": [ { - "expr": "round(sum(irate(ceph_rbd_write_bytes[30s])))", + "expr": "round(sum(irate(ceph_rbd_write_bytes{job=~\"$job\"}[30s])))", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -184,7 +184,7 @@ "refId": "A" }, { - "expr": "round(sum(irate(ceph_rbd_read_bytes[30s])))", + "expr": "round(sum(irate(ceph_rbd_read_bytes{job=~\"$job\"}[30s])))", "format": "time_series", "instant": false, "interval": "", @@ -271,14 +271,14 @@ "steppedLine": false, "targets": [ { - "expr": "round(sum(irate(ceph_rbd_write_latency_sum[30s])) / sum(irate(ceph_rbd_write_latency_count[30s])))", + "expr": "round(sum(irate(ceph_rbd_write_latency_sum{job=~\"$job\"}[30s])) / sum(irate(ceph_rbd_write_latency_count{job=~\"$job\"}[30s])))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Write", "refId": "A" }, { - "expr": "round(sum(irate(ceph_rbd_read_latency_sum[30s])) / sum(irate(ceph_rbd_read_latency_count[30s])))", + "expr": "round(sum(irate(ceph_rbd_read_latency_sum{job=~\"$job\"}[30s])) / sum(irate(ceph_rbd_read_latency_count{job=~\"$job\"}[30s])))", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -416,7 +416,7 @@ ], "targets": [ { - "expr": "topk(10, (sort((irate(ceph_rbd_write_ops[30s]) + on (image, pool, namespace) irate(ceph_rbd_read_ops[30s])))))", + "expr": "topk(10, (sort((irate(ceph_rbd_write_ops{job=~\"$job\"}[30s]) + on (image, pool, namespace) irate(ceph_rbd_read_ops{job=~\"$job\"}[30s])))))", "format": "table", "instant": true, "intervalFactor": 1, @@ -513,7 +513,7 @@ ], "targets": [ { - "expr": "topk(10, sort(sum(irate(ceph_rbd_read_bytes[30s]) + irate(ceph_rbd_write_bytes[30s])) by (pool, image, namespace)))", + "expr": "topk(10, sort(sum(irate(ceph_rbd_read_bytes{job=~\"$job\"}[30s]) + irate(ceph_rbd_write_bytes{job=~\"$job\"}[30s])) by (pool, image, namespace)))", "format": "table", "instant": true, "intervalFactor": 1, @@ -611,7 +611,7 @@ ], "targets": [ { - "expr": "topk(10,\n sum(\n irate(ceph_rbd_write_latency_sum[30s]) / clamp_min(irate(ceph_rbd_write_latency_count[30s]), 1) +\n irate(ceph_rbd_read_latency_sum[30s]) / clamp_min(irate(ceph_rbd_read_latency_count[30s]), 1)\n ) by (pool, image, namespace)\n)", + "expr": "topk(10,\n sum(\n irate(ceph_rbd_write_latency_sum{job=~\"$job\"}[30s]) / clamp_min(irate(ceph_rbd_write_latency_count{job=~\"$job\"}[30s]), 1) +\n irate(ceph_rbd_read_latency_sum{job=~\"$job\"}[30s]) / clamp_min(irate(ceph_rbd_read_latency_count{job=~\"$job\"}[30s]), 1)\n ) by (pool, image, namespace)\n)", "format": "table", "instant": true, "intervalFactor": 1, @@ -645,6 +645,27 @@ "regex": "", "skipUrlSync": false, "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Job", + "multi": false, + "name": "job", + "options": [], + "query": "label_values(ceph_osd_metadata, job)", + "refresh": 1, + "regex": "(.*)", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, From 081f3eaccae27845230ad0c3c0a8b4a111b30e2b Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Wed, 30 Nov 2022 12:41:41 +0100 Subject: [PATCH 2458/2699] Fix: init alert rules on rel change Check for alert rules early, on first metrics-endpoint rel change Change-Id: Iea39c33c614d204ee39ad39da68c31d213ed19e6 --- ceph-mon/bindep.txt | 1 + ceph-mon/src/ceph_metrics.py | 1 + 2 files changed, 2 insertions(+) diff --git a/ceph-mon/bindep.txt b/ceph-mon/bindep.txt index ba2ccb4b..9cce56b5 100644 --- a/ceph-mon/bindep.txt +++ b/ceph-mon/bindep.txt @@ -2,3 +2,4 @@ libxml2-dev [platform:dpkg test] libxslt1-dev [platform:dpkg test] build-essential [platform:dpkg test] zlib1g-dev [platform:dpkg test] +libffi-dev [platform:dpkg test] diff --git a/ceph-mon/src/ceph_metrics.py b/ceph-mon/src/ceph_metrics.py index 472f8fff..0c320451 100644 --- a/ceph-mon/src/ceph_metrics.py +++ b/ceph-mon/src/ceph_metrics.py @@ -66,6 +66,7 @@ def _on_relation_changed(self, event): ) ceph_utils.mgr_enable_module("prometheus") logger.debug("module_enabled") + self.update_alert_rules() super()._on_relation_changed(event) def _on_relation_departed(self, event): From 62cff8e3fcbba9c3d42699d6d6614a87d216880a Mon Sep 17 00:00:00 2001 From: Samuel Walladge Date: Tue, 6 Dec 2022 12:33:10 +1030 Subject: [PATCH 2459/2699] Fix typo in requesting loadbalancer ceph-dashboard requires the `http-check expect status 200` configured in haproxy. openstack-loadbalancer only sets this up if the check_type is set to 'http'. Closes-Bug: #1998871 Change-Id: Iea74aff1205813749cee71436af14ee3579db41a --- ceph-dashboard/src/charm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index a85b2df3..a5d7fc14 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -227,7 +227,7 @@ def _request_loadbalancer(self, _) -> None: self.TLS_PORT, self.TLS_PORT, self._get_bind_ip(), - 'httpd') + 'http') def _register_dashboards(self) -> None: """Register all dashboards with grafana""" From 990516154d3f70f6a986aa4aeeb489e973e8de5d Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Tue, 13 Dec 2022 12:29:45 -0300 Subject: [PATCH 2460/2699] Unpin tox version This unpinning is meant to solve the issues with tox 4.x breaking all the virtualenv dependencies. Also, re-sync from release-tools. Change-Id: I673c4b96de74f83d8fd3c0657c7ab0477fc4e9dc --- ceph-osd/pip.sh | 18 ---------------- ceph-osd/requirements.txt | 11 +++++++--- ceph-osd/test-requirements.txt | 20 +----------------- ceph-osd/tox.ini | 38 +++++++++++++++------------------- 4 files changed, 26 insertions(+), 61 deletions(-) delete mode 100755 ceph-osd/pip.sh diff --git a/ceph-osd/pip.sh b/ceph-osd/pip.sh deleted file mode 100755 index 9a7e6b09..00000000 --- a/ceph-osd/pip.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -# -# This file is managed centrally by release-tools and should not be modified -# within individual charm repos. See the 'global' dir contents for available -# choices of tox.ini for OpenStack Charms: -# https://github.com/openstack-charmers/release-tools -# -# setuptools 58.0 dropped the support for use_2to3=true which is needed to -# install blessings (an indirect dependency of charm-tools). -# -# More details on the beahvior of tox and virtualenv creation can be found at -# https://github.com/tox-dev/tox/issues/448 -# -# This script is wrapper to force the use of the pinned versions early in the -# process when the virtualenv was created and upgraded before installing the -# depedencies declared in the target. -pip install 'pip<20.3' 'setuptools<50.0.0' -pip "$@" diff --git a/ceph-osd/requirements.txt b/ceph-osd/requirements.txt index ead6e89a..3b1cb7b1 100644 --- a/ceph-osd/requirements.txt +++ b/ceph-osd/requirements.txt @@ -11,14 +11,19 @@ pbr==5.6.0 simplejson>=2.2.0 netifaces>=0.10.4 +# NOTE: newer versions of cryptography require a Rust compiler to build, +# see +# * https://github.com/openstack-charmers/zaza/issues/421 +# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html +# +cryptography<3.4 + # Strange import error with newer netaddr: netaddr>0.7.16,<0.8.0 Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 -# dnspython 2.0.0 dropped py3.5 support -dnspython<2.0.0; python_version < '3.6' -dnspython; python_version >= '3.6' +dnspython psutil>=1.1.1,<2.0.0 diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 71172e38..40d87f30 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -8,8 +8,6 @@ # all of its own requirements and if it doesn't, fix it there. # pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. -cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. -setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 requests>=2.18.4 @@ -19,28 +17,12 @@ stestr>=2.2.0 # https://github.com/mtreinish/stestr/issues/145 cliff<3.0.0 -# Dependencies of stestr. Newer versions use keywords that didn't exist in -# python 3.5 yet (e.g. "ModuleNotFoundError") -importlib-metadata<3.0.0; python_version < '3.6' -importlib-resources<3.0.0; python_version < '3.6' - -# Some Zuul nodes sometimes pull newer versions of these dependencies which -# dropped support for python 3.5: -osprofiler<2.7.0;python_version<'3.6' -stevedore<1.31.0;python_version<'3.6' -debtcollector<1.22.0;python_version<'3.6' -oslo.utils<=3.41.0;python_version<'3.6' - coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: -git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6' -tempest<24.0.0;python_version<'3.6' +git+https://opendev.org/openstack/tempest.git#egg=tempest croniter # needed for charm-rabbitmq-server unit tests -pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. -cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. -pyopenssl<=22.0.0 diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index fe9af0a2..2431a75d 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -9,47 +9,43 @@ # all of its own requirements and if it doesn't, fix it there. [tox] envlist = pep8,py3 -skipsdist = True # NOTE: Avoid build/test env pollution by not enabling sitepackages. sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False -# NOTES: -# * We avoid the new dependency resolver by pinning pip < 20.3, see -# https://github.com/pypa/pip/issues/9187 -# * Pinning dependencies requires tox >= 3.2.0, see -# https://tox.readthedocs.io/en/latest/config.html#conf-requires -# * It is also necessary to pin virtualenv as a newer virtualenv would still -# lead to fetching the latest pip in the func* tox targets, see -# https://stackoverflow.com/a/38133283 -requires = - pip < 20.3 - virtualenv < 20.0 - setuptools < 50.0.0 - -# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci -minversion = 3.18.0 [testenv] +# We use tox mainly for virtual environment management for test requirements +# and do not install the charm code as a Python package into that environment. +# Ref: https://tox.wiki/en/latest/config.html#skip_install +skip_install = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} -install_command = - {toxinidir}/pip.sh install {opts} {packages} commands = stestr run --slowest {posargs} allowlist_externals = charmcraft rename.sh -passenv = HOME TERM CS_* OS_* TEST_* +passenv = + HOME + TERM + CS_* + OS_* + TEST_* deps = -r{toxinidir}/test-requirements.txt [testenv:build] basepython = python3 deps = -r{toxinidir}/build-requirements.txt +# charmcraft clean is done to ensure that +# `tox -e build` always performs a clean, repeatable build. +# For faster rebuilds during development, +# directly run `charmcraft -v pack && ./rename.sh`. commands = charmcraft clean - charmcraft -v build + charmcraft -v pack {toxinidir}/rename.sh + charmcraft clean [testenv:py36] basepython = python3.6 @@ -79,7 +75,7 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 deps = flake8==3.9.2 - charm-tools==2.8.4 + git+https://github.com/juju/charm-tools.git commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof From 7a4a575c2afcb85800e4feb8c2856814f4db07ad Mon Sep 17 00:00:00 2001 From: alitvinov Date: Mon, 12 Dec 2022 16:44:48 +0400 Subject: [PATCH 2461/2699] Tweak apparmor profile to access OSD volumes. Plus add aa-profile-mode enforce option to the test bundles. Closes-Bug: #1860801 Change-Id: I8264ad760d92da3faa384c8edca5566fc622c57d --- ceph-osd/config.yaml | 5 +++++ ceph-osd/files/apparmor/usr.bin.ceph-osd | 2 +- ceph-osd/tests/bundles/focal-xena.yaml | 1 + ceph-osd/tests/bundles/focal-yoga.yaml | 1 + ceph-osd/tests/bundles/jammy-yoga.yaml | 1 + 5 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 8345428a..2f0f94d9 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -67,6 +67,9 @@ options: For ceph < 14.2.0 (Nautilus) these can also be directories instead of devices. If the value does not start with "/dev" then it will be interpreted as a directory. + NOTE: if the value does not start with "/dev" then apparmor + "enforce" profile is not supported. + bdev-enable-discard: type: string default: auto @@ -344,6 +347,8 @@ options: cluster as all ceph-osd processes must be restarted as part of changing the apparmor profile enforcement mode. Always test in pre-production before enabling AppArmor on a live cluster. + NOTE: apparmor 'enforce' profile is supported only if osd-device + name starts with "/dev" bluestore-compression-algorithm: type: string default: lz4 diff --git a/ceph-osd/files/apparmor/usr.bin.ceph-osd b/ceph-osd/files/apparmor/usr.bin.ceph-osd index 14084ab8..95846077 100644 --- a/ceph-osd/files/apparmor/usr.bin.ceph-osd +++ b/ceph-osd/files/apparmor/usr.bin.ceph-osd @@ -37,7 +37,7 @@ /{,var/}tmp/ r, /dev/ r, - /dev/** rw, + /dev/** rwk, /sys/devices/** r, /run/blkid/blkid.tab r, diff --git a/ceph-osd/tests/bundles/focal-xena.yaml b/ceph-osd/tests/bundles/focal-xena.yaml index 5fb13313..fb86f5a5 100644 --- a/ceph-osd/tests/bundles/focal-xena.yaml +++ b/ceph-osd/tests/bundles/focal-xena.yaml @@ -63,6 +63,7 @@ applications: options: osd-devices: '/dev/test-non-existent' source: *openstack-origin + aa-profile-mode: enforce to: - '3' - '4' diff --git a/ceph-osd/tests/bundles/focal-yoga.yaml b/ceph-osd/tests/bundles/focal-yoga.yaml index 4a8d004c..839e8189 100644 --- a/ceph-osd/tests/bundles/focal-yoga.yaml +++ b/ceph-osd/tests/bundles/focal-yoga.yaml @@ -63,6 +63,7 @@ applications: options: osd-devices: '/dev/test-non-existent' source: *openstack-origin + aa-profile-mode: enforce to: - '3' - '4' diff --git a/ceph-osd/tests/bundles/jammy-yoga.yaml b/ceph-osd/tests/bundles/jammy-yoga.yaml index 77c5d22c..0355b2eb 100644 --- a/ceph-osd/tests/bundles/jammy-yoga.yaml +++ b/ceph-osd/tests/bundles/jammy-yoga.yaml @@ -63,6 +63,7 @@ applications: options: osd-devices: '/dev/test-non-existent' source: *openstack-origin + aa-profile-mode: enforce to: - '3' - '4' From f7a3b1ef70d05e09b08a1f3de72d0b666fdd6c05 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 16 Jan 2023 20:48:46 +0000 Subject: [PATCH 2462/2699] Fix charm for tox4 compatibility Related-Bug: 2002788 Change-Id: I49c3864dbe7ad476e67c6ade54d352e0520682b9 --- ceph-radosgw/osci.yaml | 26 +++++++++--------- ceph-radosgw/pip.sh | 18 ------------- ceph-radosgw/test-requirements.txt | 3 +-- ceph-radosgw/tox.ini | 43 +++++++++++------------------- 4 files changed, 29 insertions(+), 61 deletions(-) delete mode 100755 ceph-radosgw/pip.sh diff --git a/ceph-radosgw/osci.yaml b/ceph-radosgw/osci.yaml index cb19e21d..b511577b 100644 --- a/ceph-radosgw/osci.yaml +++ b/ceph-radosgw/osci.yaml @@ -27,7 +27,7 @@ needs_charm_build: true charm_build_name: ceph-radosgw build_type: charmcraft - charmcraft_channel: 2.0/stable + charmcraft_channel: 2.1/stable - job: name: focal-yoga-multisite parent: func-target @@ -39,7 +39,7 @@ - name: tox-py310 soft: true vars: - tox_extra_args: focal-yoga-multisite + tox_extra_args: '-- focal-yoga-multisite' - job: name: jammy-yoga-multisite parent: func-target @@ -49,49 +49,49 @@ - name: tox-py310 soft: true vars: - tox_extra_args: jammy-yoga-multisite + tox_extra_args: '-- jammy-yoga-multisite' - job: name: jammy-zed-multisite parent: func-target dependencies: - jammy-yoga-multisite vars: - tox_extra_args: jammy-zed-multisite + tox_extra_args: '-- jammy-zed-multisite' - job: name: kinetic-zed-multisite parent: func-target dependencies: - jammy-yoga-multisite vars: - tox_extra_args: kinetic-zed-multisite + tox_extra_args: '-- kinetic-zed-multisite' - job: name: vault-focal-yoga_rgw parent: func-target dependencies: - focal-yoga-multisite vars: - tox_extra_args: vault:focal-yoga + tox_extra_args: '-- vault:focal-yoga' - job: name: vault-jammy-yoga_rgw parent: func-target dependencies: - jammy-yoga-multisite vars: - tox_extra_args: vault:jammy-yoga + tox_extra_args: '-- vault:jammy-yoga' - job: name: vault-focal-yoga-namespaced parent: func-target dependencies: - focal-yoga-multisite vars: - tox_extra_args: vault:focal-yoga-namespaced + tox_extra_args: '-- vault:focal-yoga-namespaced' - job: name: vault-jammy-yoga-namespaced parent: func-target dependencies: - jammy-yoga-multisite vars: - tox_extra_args: vault:jammy-yoga-namespaced + tox_extra_args: '-- vault:jammy-yoga-namespaced' - job: name: vault-jammy-zed_rgw parent: func-target @@ -99,7 +99,7 @@ - vault-jammy-yoga_rgw - vault-jammy-yoga-namespaced vars: - tox_extra_args: vault:jammy-zed + tox_extra_args: '-- vault:jammy-zed' - job: name: vault-jammy-zed-namespaced parent: func-target @@ -107,7 +107,7 @@ - vault-jammy-yoga_rgw - vault-jammy-yoga-namespaced vars: - tox_extra_args: vault:jammy-zed-namespaced + tox_extra_args: '-- vault:jammy-zed-namespaced' - job: name: vault-kinetic-zed_rgw parent: func-target @@ -115,7 +115,7 @@ - vault-jammy-yoga_rgw - vault-jammy-yoga-namespaced vars: - tox_extra_args: vault:kinetic-zed + tox_extra_args: '-- vault:kinetic-zed' - job: name: vault-kinetic-zed-namespaced parent: func-target @@ -123,4 +123,4 @@ - vault-jammy-yoga_rgw - vault-jammy-yoga-namespaced vars: - tox_extra_args: vault:kinetic-zed-namespaced + tox_extra_args: '-- vault:kinetic-zed-namespaced' diff --git a/ceph-radosgw/pip.sh b/ceph-radosgw/pip.sh deleted file mode 100755 index 9a7e6b09..00000000 --- a/ceph-radosgw/pip.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -# -# This file is managed centrally by release-tools and should not be modified -# within individual charm repos. See the 'global' dir contents for available -# choices of tox.ini for OpenStack Charms: -# https://github.com/openstack-charmers/release-tools -# -# setuptools 58.0 dropped the support for use_2to3=true which is needed to -# install blessings (an indirect dependency of charm-tools). -# -# More details on the beahvior of tox and virtualenv creation can be found at -# https://github.com/tox-dev/tox/issues/448 -# -# This script is wrapper to force the use of the pinned versions early in the -# process when the virtualenv was created and upgraded before installing the -# depedencies declared in the target. -pip install 'pip<20.3' 'setuptools<50.0.0' -pip "$@" diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 5539f038..e972406e 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -8,7 +8,6 @@ # all of its own requirements and if it doesn't, fix it there. # pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. -setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 requests>=2.18.4 @@ -26,5 +25,5 @@ git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.open # Needed for charm-glance: git+https://opendev.org/openstack/tempest.git#egg=tempest -pyopenssl<=22.0.0 croniter # needed for charm-rabbitmq-server unit tests +psutil diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 45b39294..ae4d124c 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -9,56 +9,43 @@ # all of its own requirements and if it doesn't, fix it there. [tox] envlist = pep8,py3 -skipsdist = True # NOTE: Avoid build/test env pollution by not enabling sitepackages. sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False -# NOTES: -# * We avoid the new dependency resolver by pinning pip < 20.3, see -# https://github.com/pypa/pip/issues/9187 -# * Pinning dependencies requires tox >= 3.2.0, see -# https://tox.readthedocs.io/en/latest/config.html#conf-requires -# * It is also necessary to pin virtualenv as a newer virtualenv would still -# lead to fetching the latest pip in the func* tox targets, see -# https://stackoverflow.com/a/38133283 -requires = - pip < 20.3 - virtualenv < 20.0 - setuptools < 50.0.0 - -# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci -minversion = 3.18.0 [testenv] +# We use tox mainly for virtual environment management for test requirements +# and do not install the charm code as a Python package into that environment. +# Ref: https://tox.wiki/en/latest/config.html#skip_install +skip_install = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} -install_command = - {toxinidir}/pip.sh install {opts} {packages} commands = stestr run --slowest {posargs} allowlist_externals = charmcraft rename.sh -passenv = HOME TERM CS_* OS_* TEST_* +passenv = + HOME + TERM + CS_* + OS_* + TEST_* deps = -r{toxinidir}/test-requirements.txt [testenv:build] basepython = python3 deps = -r{toxinidir}/build-requirements.txt +# charmcraft clean is done to ensure that +# `tox -e build` always performs a clean, repeatable build. +# For faster rebuilds during development, +# directly run `charmcraft -v pack && ./rename.sh`. commands = charmcraft clean charmcraft -v pack {toxinidir}/rename.sh - -[testenv:py38] -basepython = python3.8 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -[testenv:py39] -basepython = python3.9 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt + charmcraft clean [testenv:py310] basepython = python3.10 From e74b95f6d753dbb64c4ba9a36055e319376f669c Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Tue, 13 Dec 2022 12:18:09 -0300 Subject: [PATCH 2463/2699] Unpin tox version This unpinning is meant to solve the issues with tox 4.x breaking all the virtualenv dependencies. Change-Id: Ifc3381b2f2e4e41ebf6676080bf1831baffb0d42 --- ceph-mon/osci.yaml | 2 +- ceph-mon/pip.sh | 18 ------------------ ceph-mon/requirements.txt | 2 +- ceph-mon/tox.ini | 21 +++++++-------------- 4 files changed, 9 insertions(+), 34 deletions(-) delete mode 100755 ceph-mon/pip.sh diff --git a/ceph-mon/osci.yaml b/ceph-mon/osci.yaml index 0a254da2..462e2476 100644 --- a/ceph-mon/osci.yaml +++ b/ceph-mon/osci.yaml @@ -20,4 +20,4 @@ - charm-build - tox-py38 vars: - tox_extra_args: install:local-focal-yoga + tox_extra_args: '-- install:local-focal-yoga' diff --git a/ceph-mon/pip.sh b/ceph-mon/pip.sh deleted file mode 100755 index 9a7e6b09..00000000 --- a/ceph-mon/pip.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -# -# This file is managed centrally by release-tools and should not be modified -# within individual charm repos. See the 'global' dir contents for available -# choices of tox.ini for OpenStack Charms: -# https://github.com/openstack-charmers/release-tools -# -# setuptools 58.0 dropped the support for use_2to3=true which is needed to -# install blessings (an indirect dependency of charm-tools). -# -# More details on the beahvior of tox and virtualenv creation can be found at -# https://github.com/tox-dev/tox/issues/448 -# -# This script is wrapper to force the use of the pinned versions early in the -# process when the virtualenv was created and upgraded before installing the -# depedencies declared in the target. -pip install 'pip<20.3' 'setuptools<50.0.0' -pip "$@" diff --git a/ceph-mon/requirements.txt b/ceph-mon/requirements.txt index d9dd8416..64959dc5 100644 --- a/ceph-mon/requirements.txt +++ b/ceph-mon/requirements.txt @@ -11,4 +11,4 @@ git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack git+https://opendev.org/openstack/charm-ops-interface-tls-certificates#egg=interface_tls_certificates git+https://github.com/openstack-charmers/ops-interface-ceph-iscsi-admin-access#egg=interface_ceph_iscsi_admin_access git+https://github.com/openstack-charmers/ops-interface-openstack-loadbalancer#egg=interface_openstack_loadbalancer -git+https://github.com/juju/charm-helpers#egg=charm-helpers +git+https://github.com/juju/charm-helpers#egg=charmhelpers diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 483e561a..75e28749 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -14,18 +14,6 @@ skipsdist = True sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False -# NOTES: -# * We avoid the new dependency resolver by pinning pip < 20.3, see -# https://github.com/pypa/pip/issues/9187 -# * Pinning dependencies requires tox >= 3.2.0, see -# https://tox.readthedocs.io/en/latest/config.html#conf-requires -# * It is also necessary to pin virtualenv as a newer virtualenv would still -# lead to fetching the latest pip in the func* tox targets, see -# https://stackoverflow.com/a/38133283 -requires = - pip < 20.3 - virtualenv < 20.0 - setuptools < 50.0.0 # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci minversion = 3.18.0 @@ -35,12 +23,17 @@ setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} install_command = - {toxinidir}/pip.sh install {opts} {packages} + pip install {opts} {packages} commands = stestr run --slowest {posargs} allowlist_externals = charmcraft rename.sh -passenv = HOME TERM CS_* OS_* TEST_* +passenv = + HOME + TERM + CS_* + OS_* + TEST_* deps = -r{toxinidir}/test-requirements.txt [testenv:build] From 968bb32bcd66caed2aee47b1447be44303bcfe49 Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Tue, 17 Jan 2023 17:09:48 +0900 Subject: [PATCH 2464/2699] Make sure lockfile-progs package is installed Also, drop python-dbus for simplicity since "check_upstart_job" in nrpe is not enabled any longer. And the python-dbus package is no longer available on jammy either. [on focal with systemd] $ ls -1 /etc/nagios/nrpe.d/ check_ceph.cfg check_conntrack.cfg check_reboot.cfg check_systemd_scopes.cfg Closes-Bug: #1998163 Change-Id: I30bc22ae8509367207004b90eb2c38ad0fae9ffe --- ceph-mon/src/ceph_hooks.py | 3 +-- ceph-mon/unit_tests/test_ceph_hooks.py | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 38d3ccfc..b2dd97db 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -1104,8 +1104,7 @@ def upgrade_charm(): @hooks.hook('nrpe-external-master-relation-joined') @hooks.hook('nrpe-external-master-relation-changed') def update_nrpe_config(): - # python-dbus is used by check_upstart_job - apt_install(['python-dbus', 'lockfile-progs']) + apt_install('lockfile-progs', fatal=True) log('Refreshing nagios checks') if os.path.isdir(NAGIOS_PLUGINS): rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index f5a07a56..f7a4f17c 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -206,8 +206,8 @@ def test_nrpe_dependency_installed(self, mock_config): write_file=DEFAULT, nrpe=DEFAULT) as mocks: ceph_hooks.update_nrpe_config() - mocks["apt_install"].assert_called_once_with( - ["python-dbus", "lockfile-progs"]) + mocks["apt_install"].assert_called_with( + "lockfile-progs", fatal=True) @patch.object(ceph_hooks, 'notify_prometheus') @patch.object(ceph_hooks, 'notify_rbd_mirrors') @@ -239,7 +239,7 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies( mocks["is_relation_made"].return_value = True ceph_hooks.upgrade_charm() mocks["apt_install"].assert_called_with( - ["python-dbus", "lockfile-progs"]) + "lockfile-progs", fatal=True) mock_notify_radosgws.assert_called_once_with() mock_ceph.update_monfs.assert_called_once_with() mock_notify_prometheus.assert_called_once_with() From dbcb211ed520dabfb50e2ce3c856d4339993d6ff Mon Sep 17 00:00:00 2001 From: Samuel Walladge Date: Wed, 4 Jan 2023 15:59:04 +1030 Subject: [PATCH 2465/2699] Save the crash module auth key Read the key set on the mon relation, and use ceph-authtool to save it to a keyring, for use by the crash module for crash reporting. When this auth key is set, the crash module (enabled by default) will update ceph-mon with a report. It also results in a neat summary of recent crashes that can be viewed by `ceph health detail`. For example: ``` $ juju ssh ceph-mon/leader -- sudo ceph health detail HEALTH_WARN 1 daemons have recently crashed [WRN] RECENT_CRASH: 1 daemons have recently crashed osd.1 crashed on host node-3 at 2023-01-04T05:25:18.218628Z ``` ref. https://docs.ceph.com/en/latest/mgr/crash/ See also https://review.opendev.org/c/openstack/charm-ceph-mon/+/869138 for where the client_crash_key relation data set is implemented. Depends-On: https://review.opendev.org/c/openstack/charm-ceph-mon/+/869138 Closes-Bug: #2000630 Change-Id: I77c84c368e6665e4988ebe9a735f000f99d0b78e --- ceph-osd/hooks/ceph_hooks.py | 4 ++++ ceph-osd/hooks/utils.py | 11 +++++++++++ ceph-osd/lib/charms_ceph/utils.py | 2 +- ceph-osd/templates/ceph.conf | 3 +++ 4 files changed, 19 insertions(+), 1 deletion(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 351d7d51..7d33f45a 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -80,6 +80,7 @@ import_osd_bootstrap_key, import_osd_upgrade_key, import_osd_removal_key, + import_client_crash_key, get_host_ip, get_networks, assert_charm_supports_ipv6, @@ -653,6 +654,7 @@ def mon_relation(): bootstrap_key = relation_get('osd_bootstrap_key') upgrade_key = relation_get('osd_upgrade_key') removal_key = relation_get('osd_disk_removal_key') + client_crash_key = relation_get('client_crash_key') if get_fsid() and get_auth() and bootstrap_key: log('mon has provided conf- scanning disks') emit_cephconf() @@ -664,6 +666,8 @@ def mon_relation(): _, settings, _ = (ch_ceph.CephOSDConfContext() .filter_osd_from_mon_settings()) ceph.apply_osd_settings(settings) + if client_crash_key: + import_client_crash_key(client_crash_key) else: log('mon cluster has not yet provided conf') diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 44f96c62..0a14d1bb 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -73,6 +73,7 @@ _bootstrap_keyring = "/var/lib/ceph/bootstrap-osd/ceph.keyring" _upgrade_keyring = "/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring" _removal_keyring = "/var/lib/ceph/osd/ceph.client.osd-removal.keyring" +_client_crash_keyring = "/var/lib/ceph/osd/ceph.client.crash.keyring" def is_osd_bootstrap_ready(): @@ -129,6 +130,16 @@ def import_osd_removal_key(key): _import_key(key, _removal_keyring, 'client.osd-removal') +def import_client_crash_key(key): + """ + Ensure that the client.crash keyring is set up. + + :param key: The cephx key to add to the client.crash keyring + :type key: str + :raises: subprocess.CalledProcessError""" + _import_key(key, _client_crash_keyring, 'client.crash') + + def render_template(template_name, context, template_dir=TEMPLATES_DIR): """Render Jinja2 template. diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index a22462ec..3633dd4c 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -695,7 +695,7 @@ def get_local_osd_ids(): try: dirs = os.listdir(osd_path) for osd_dir in dirs: - osd_id = osd_dir.split('-')[1] + osd_id = osd_dir.split('-')[1] if '-' in osd_dir else '' if (_is_int(osd_id) and filesystem_mounted(os.path.join( os.sep, osd_path, osd_dir))): diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 782a231d..2966ce58 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -54,6 +54,9 @@ keyring = /var/lib/ceph/osd/ceph.client.osd-upgrade.keyring [client.osd-removal] keyring = /var/lib/ceph/osd/ceph.client.osd-removal.keyring +[client.crash] +keyring = /var/lib/ceph/osd/ceph.client.crash.keyring + [mon] keyring = /var/lib/ceph/mon/$cluster-$id/keyring From 32e9751f6ce92d4e140951eb6e2636d8936ece02 Mon Sep 17 00:00:00 2001 From: Samuel Walladge Date: Wed, 4 Jan 2023 15:59:34 +1030 Subject: [PATCH 2466/2699] Create a key for ceph-osd for crash module auth This will be set on the osd relation, so the ceph-osd charm can use this key for auth by the crash reporting module. ref. https://docs.ceph.com/en/latest/mgr/crash/ See https://review.opendev.org/c/openstack/charm-ceph-osd/+/869139 for how this key is used by ceph-osd. Closes-Bug: #2000630 Change-Id: Ic95aae6b5981a6df1e0b3c310bcef8018c494a24 --- ceph-mon/src/ceph_hooks.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index b2dd97db..ab2dd1c4 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -839,6 +839,16 @@ def osd_relation(relid=None, unit=None): 'allow command "osd destroy"', ] } + ), + # Provide a key to the osd for use by the crash module: + # https://docs.ceph.com/en/latest/mgr/crash/ + 'client_crash_key': ceph.create_named_keyring( + 'client', + 'crash', + caps={ + 'mon': ['profile crash'], + 'mgr': ['profile crash'], + } ) } From 7b0969af61b169cd9809ceb8077c3da2b9e3aab2 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Jan 2023 12:17:22 +0000 Subject: [PATCH 2467/2699] Ensure crushtool --test called correctly Later Ceph releases require that the --test function of crushtool is called with replica information for validation. Pass in "--num-rep 3" as a basic check plus "--show-statistics" to silence a non-fatal warning message. This can be clean cherry-picked back at least as far as Ceph 12.2.x. Change-Id: I76d21ddd9da79535f68490b4231ae13705e27edb Closes-Bug: 2003690 --- ceph-mon/src/ceph_hooks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index b2dd97db..77023968 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -573,7 +573,8 @@ def attempt_mon_cluster_bootstrap(): "chooseleaf firstn 0 type rack/' > " "/tmp/crush.decompiled", "crushtool -c /tmp/crush.decompiled -o /tmp/crush.map", - "crushtool -i /tmp/crush.map --test", + "crushtool -i /tmp/crush.map --test " + "--num-rep 3 --show-statistics", "ceph osd setcrushmap -i /tmp/crush.map" ] for cmd in cmds: From c593179d51393ec918f09c0109850db2b63ffaff Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Wed, 25 Jan 2023 08:51:57 -0500 Subject: [PATCH 2468/2699] Add kinetic support Add 22.10 run-on base. Change-Id: I2de43d9d547849ffea6df502a249c771a77a78aa --- ceph-mon/charmcraft.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ceph-mon/charmcraft.yaml b/ceph-mon/charmcraft.yaml index ed560c4e..2ebc1eac 100644 --- a/ceph-mon/charmcraft.yaml +++ b/ceph-mon/charmcraft.yaml @@ -36,4 +36,6 @@ bases: - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - + - name: ubuntu + channel: "22.10" + architectures: [amd64, s390x, ppc64el, arm64] From a3758401db4dcb42c95bdaa3ec122fbd4ddabc87 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Wed, 25 Jan 2023 08:52:18 -0500 Subject: [PATCH 2469/2699] Add kinetic support Add 22.10 run-on base and add kinetic to metadata.yaml. Change-Id: I519e59794d1e046d0bf985d8a22129270c33c10a --- ceph-osd/charmcraft.yaml | 3 +++ ceph-osd/metadata.yaml | 1 + 2 files changed, 4 insertions(+) diff --git a/ceph-osd/charmcraft.yaml b/ceph-osd/charmcraft.yaml index 4190b63d..7ac40aca 100644 --- a/ceph-osd/charmcraft.yaml +++ b/ceph-osd/charmcraft.yaml @@ -33,3 +33,6 @@ bases: - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.10" + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 42aee0be..ecce1ed7 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -13,6 +13,7 @@ tags: series: - focal - jammy +- kinetic description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. From b0093f78d9a044393324de5f0f15ff72af658c65 Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Wed, 1 Feb 2023 03:11:22 +0000 Subject: [PATCH 2470/2699] Revert "Create NRPE check to verify ceph daemons versions" This reverts commit c32f4675c0ff825f170b50b99eb59454c0a208a3. Reason for revert: The Ceph version check seems to be missing a consideration of users to execute the nrpe check. It actually fails to get keyrings to execute the command as it's run by a non-root user. $ juju run-action --wait nrpe/0 run-nrpe-check name=check-ceph-daemons-versions unit-nrpe-0: UnitId: nrpe/0 id: "20" results: Stderr: | 2023-02-01T03:03:09.556+0000 7f4677361700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory 2023-02-01T03:03:09.556+0000 7f4677361700 -1 AuthRegistry(0x7f467005f540) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx 2023-02-01T03:03:09.556+0000 7f4677361700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory 2023-02-01T03:03:09.556+0000 7f4677361700 -1 AuthRegistry(0x7f4670064d88) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx 2023-02-01T03:03:09.560+0000 7f4677361700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory 2023-02-01T03:03:09.560+0000 7f4677361700 -1 AuthRegistry(0x7f4677360000) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx [errno 2] RADOS object not found (error connecting to the cluster) check-output: 'UNKNOWN: could not determine OSDs versions, error: Command ''[''ceph'', ''versions'']'' returned non-zero exit status 1.' status: completed timing: completed: 2023-02-01 03:03:10 +0000 UTC enqueued: 2023-02-01 03:03:09 +0000 UTC started: 2023-02-01 03:03:09 +0000 UTC Related-Bug: #1943628 Change-Id: I84b306e84661e6664e8a69fa93dfdb02fa4f1e7e --- ceph-mon/actions.yaml | 2 - ceph-mon/actions/ceph_ops.py | 51 ----------- ceph-mon/actions/get-versions-report | 1 - ceph-mon/actions/get_versions_report.py | 26 ------ ceph-mon/files/nagios/check_ceph_status.py | 75 ---------------- ceph-mon/src/ceph_hooks.py | 8 -- ceph-mon/unit_tests/ceph_ls_node.json | 35 -------- .../unit_tests/ceph_versions_alligned.json | 15 ---- .../unit_tests/ceph_versions_diverged.json | 19 ----- ceph-mon/unit_tests/test_actions_mon.py | 40 --------- ceph-mon/unit_tests/test_check_ceph_status.py | 85 ------------------- 11 files changed, 357 deletions(-) delete mode 120000 ceph-mon/actions/get-versions-report delete mode 100755 ceph-mon/actions/get_versions_report.py delete mode 100644 ceph-mon/unit_tests/ceph_ls_node.json delete mode 100644 ceph-mon/unit_tests/ceph_versions_alligned.json delete mode 100644 ceph-mon/unit_tests/ceph_versions_diverged.json diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 2ba87287..09056bb4 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -4,8 +4,6 @@ resume-health: description: "Resume ceph health operations across the entire ceph cluster" get-health: description: "Output the current cluster health reported by `ceph health`" -get-versions-report: - description: "Outputs running daemon versions for all cluster members" create-cache-tier: description: "Create a new cache tier" params: diff --git a/ceph-mon/actions/ceph_ops.py b/ceph-mon/actions/ceph_ops.py index 10cc8ba0..ac8c1464 100755 --- a/ceph-mon/actions/ceph_ops.py +++ b/ceph-mon/actions/ceph_ops.py @@ -23,11 +23,6 @@ set_pool_quota, snapshot_pool, remove_pool_snapshot -class CephReportError(Exception): - """This indicates a critical error.""" - pass - - def list_pools(): """Return a list of all Ceph pools.""" try: @@ -37,52 +32,6 @@ def list_pools(): action_fail(str(e)) -def get_versions_report(): - """ - Return a mapping of hosts and their related ceph daemon versions. - - On error, raise a CephReportError. - """ - report = dict() - try: - output = check_output(['ceph', 'node', 'ls']).decode('UTF-8') - except CalledProcessError as e: - action_fail(str(e)) - raise(CephReportError("Getting nodes list fail")) - nodes_list = json.loads(output) - - # osd versions - for osd_host, osds in nodes_list['osd'].items(): - report.setdefault(osd_host, []) - for osd in osds: - try: - output = check_output(['ceph', 'tell', - "osd.{}".format(osd), - 'version']).decode('UTF-8') - except CalledProcessError: - raise( - CephReportError("Getting osd.{} version fail".format(osd)) - ) - report[osd_host].append(json.loads(output)['version']) - - # mon versions - for mon_host, mons in nodes_list['mon'].items(): - report.setdefault(mon_host, []) - for mon in mons: - try: - output = check_output(['ceph', 'tell', - "mon.{}".format(mon), - 'version']).decode('UTF-8') - except CalledProcessError as e: - action_fail(str(e)) - raise( - CephReportError("Getting mon.{} version fail".format(mon)) - ) - report[mon_host].append(json.loads(output)['version']) - - return json.dumps(report, indent=4) - - def pool_get(): """ Returns a key from a pool using 'ceph osd pool get'. diff --git a/ceph-mon/actions/get-versions-report b/ceph-mon/actions/get-versions-report deleted file mode 120000 index b50dd0fe..00000000 --- a/ceph-mon/actions/get-versions-report +++ /dev/null @@ -1 +0,0 @@ -get_versions_report.py \ No newline at end of file diff --git a/ceph-mon/actions/get_versions_report.py b/ceph-mon/actions/get_versions_report.py deleted file mode 100755 index d1ea92b6..00000000 --- a/ceph-mon/actions/get_versions_report.py +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright 2022 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ceph_ops import get_versions_report, CephReportError -from charmhelpers.core.hookenv import log, action_set, action_fail - -if __name__ == '__main__': - try: - action_set({'message': get_versions_report()}) - except CephReportError as e: - log(e) - action_fail( - "get versions report failed with message: {}".format(str(e))) diff --git a/ceph-mon/files/nagios/check_ceph_status.py b/ceph-mon/files/nagios/check_ceph_status.py index 074efec5..11e32595 100755 --- a/ceph-mon/files/nagios/check_ceph_status.py +++ b/ceph-mon/files/nagios/check_ceph_status.py @@ -86,32 +86,6 @@ def get_ceph_version(): return out_version -def get_daemons_versions(): - """ - Uses CLI to get the ceph versions - - :returns: set containing tuple of integers, - all the differents versions encountered in the cluster - :raises: UnknownError - """ - try: - tree = subprocess.check_output(['ceph', - 'versions']).decode('UTF-8') - except subprocess.CalledProcessError as e: - raise UnknownError( - "UNKNOWN: could not determine OSDs versions, error: {}".format(e)) - ceph_versions = json.loads(tree) - # ceph version command return a json output - # containing version of all daemons connected to the cluster - # here we parse the overall field, - # to get a set of all versions seen by the cluster - daemons_versions = set(map( - lambda x: tuple(int(i) for i in - x.split(' ')[2].split('.')), - ceph_versions['overall'].keys())) - return daemons_versions - - def get_status_and_messages(status_data): """ Used to get general status of a Ceph cluster as well as a list of @@ -161,50 +135,6 @@ def check_ceph_status(args): """ status_critical = False - # if it is just --check_daemons_versions_consistency, - # deal with it and ignore overall health - if args.check_daemons_versions_consistency: - daemons_versions = get_daemons_versions() - # we check that the osds have same versions - num_of_versions = len(daemons_versions) - if num_of_versions == 1: - message_ok = "OK: All versions alligned" - return message_ok - else: - # version diverged - # we check if major release are the same - # by parsing version number in the daemon_version set - # and keeping major version number or coverting the minor - # version number if major version is 0 - num_of_releases = set(map(lambda x: x[0], daemons_versions)) - if len(num_of_releases) == 1: - msg = 'WARNING: Components minor versions diverged.' - 'Run get-versions-report to know more' - raise WarnError(msg) - else: - # Releases diverged - major, _minor, _patch = get_ceph_version() - release_versions_diff = list(map(lambda x: major - x, - num_of_releases)) - if max(release_versions_diff) >= 2: - msg = "CRITICAL: A component is " \ - "{} version behind osd leader" \ - ". Run get-versions-report to know more".format( - max(release_versions_diff)) - raise CriticalError(msg) - if min(release_versions_diff) <= -1: - msg = "CRITICAL: A component is " \ - "{} version ahead osd leader" \ - ". Run get-versions-report to know more".format( - abs(min(release_versions_diff))) - raise CriticalError(msg) - if max(release_versions_diff) == 1: - msg = "WARNING: A component is " \ - "{} version behind osd leader" \ - ". Run get-versions-report to know more".format( - max(release_versions_diff)) - raise WarnError(msg) - if args.status_file: check_file_freshness(args.status_file) with open(args.status_file) as f: @@ -357,11 +287,6 @@ def parse_args(args): dest='check_num_osds', default=False, action='store_true', help="Check whether all OSDs are up and in") - parser.add_argument('--check_daemons_versions_consistency', - dest='check_daemons_versions_consistency', - default=False, - action='store_true', - help="Check all OSDs versions") return parser.parse_args(args) diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 2f52fb8c..01cca64e 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -1185,14 +1185,6 @@ def update_nrpe_config(): description='Check whether all OSDs are up and in', check_cmd=check_cmd ) - if is_leader(): - check_cmd = 'check_ceph_status.py -f {}' \ - ' --check_daemons_versions'.format(STATUS_FILE) - nrpe_setup.add_check( - shortname='ceph_daemons_versions', - description='Check wheter all ceph daemons versions are alligned', - check_cmd=check_cmd - ) nrpe_setup.write() diff --git a/ceph-mon/unit_tests/ceph_ls_node.json b/ceph-mon/unit_tests/ceph_ls_node.json deleted file mode 100644 index 556cf2e3..00000000 --- a/ceph-mon/unit_tests/ceph_ls_node.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "mon": { - "juju-c8b0a2-3-lxd-0": [ - "juju-c8b0a2-3-lxd-0" - ], - "juju-c8b0a2-4-lxd-0": [ - "juju-c8b0a2-4-lxd-0" - ], - "juju-c8b0a2-5-lxd-0": [ - "juju-c8b0a2-5-lxd-0" - ] - }, - "osd": { - "aware-bee": [ - 1 - ], - "grand-ape": [ - 0 - ], - "lucky-muskox": [ - 2 - ] - }, - "mgr": { - "juju-c8b0a2-3-lxd-0": [ - "juju-c8b0a2-3-lxd-0" - ], - "juju-c8b0a2-4-lxd-0": [ - "juju-c8b0a2-4-lxd-0" - ], - "juju-c8b0a2-5-lxd-0": [ - "juju-c8b0a2-5-lxd-0" - ] - } -} diff --git a/ceph-mon/unit_tests/ceph_versions_alligned.json b/ceph-mon/unit_tests/ceph_versions_alligned.json deleted file mode 100644 index 3acae499..00000000 --- a/ceph-mon/unit_tests/ceph_versions_alligned.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "mon": { - "ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)": 3 - }, - "mgr": { - "ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)": 3 - }, - "osd": { - "ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)": 2 - }, - "mds": {}, - "overall": { - "ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)": 8 - } -} diff --git a/ceph-mon/unit_tests/ceph_versions_diverged.json b/ceph-mon/unit_tests/ceph_versions_diverged.json deleted file mode 100644 index 4dd5c5af..00000000 --- a/ceph-mon/unit_tests/ceph_versions_diverged.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "mon": { - "ceph version 15.2.16 (d46a73d6d0a67a79558054a3a5a72cb561724974) octopus (stable)": 1, - "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 - }, - "mgr": { - "ceph version 15.2.16 (d46a73d6d0a67a79558054a3a5a72cb561724974) octopus (stable)": 3 - }, - "osd": { - "ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)": 3, - "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 - }, - "mds": {}, - "overall": { - "ceph version 15.2.16 (d46a73d6d0a67a79558054a3a5a72cb561724974) octopus (stable)": 4, - "ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)": 3, - "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 - } -} diff --git a/ceph-mon/unit_tests/test_actions_mon.py b/ceph-mon/unit_tests/test_actions_mon.py index a09a7b0a..54c44fff 100644 --- a/ceph-mon/unit_tests/test_actions_mon.py +++ b/ceph-mon/unit_tests/test_actions_mon.py @@ -13,7 +13,6 @@ import json import sys import unittest.mock as mock -from subprocess import CalledProcessError from test_utils import CharmTestCase @@ -49,45 +48,6 @@ def setUp(self): "action_fail", "open"]) - def test_get_version_report_ok(self): - def _call_rslt(): - with open('unit_tests/ceph_ls_node.json') as f: - tree = f.read() - yield tree.encode('UTF-8') - while True: - yield ('{' - ' "version": "16.2.7",' - ' "release": "pacific",' - ' "release_type": "stable"' - '}').encode('UTF-8') - self.check_output.side_effect = _call_rslt() - result = actions.get_versions_report() - self.assertEqual('{\n' - ' "aware-bee": [\n' - ' "16.2.7"\n' - ' ],\n' - ' "grand-ape": [\n' - ' "16.2.7"\n' - ' ],\n' - ' "lucky-muskox": [\n' - ' "16.2.7"\n' - ' ],\n' - ' "juju-c8b0a2-3-lxd-0": [\n' - ' "16.2.7"\n' - ' ],\n' - ' "juju-c8b0a2-4-lxd-0": [\n' - ' "16.2.7"\n' - ' ],\n' - ' "juju-c8b0a2-5-lxd-0": [\n' - ' "16.2.7"\n' - ' ]\n' - '}', result) - - def test_get_version_report_fail(self): - self.check_output.side_effect = CalledProcessError(1, 'ceph node ls') - self.assertRaises(actions.CephReportError, - lambda: actions.get_versions_report()) - @mock.patch('socket.gethostname') def test_get_quorum_status(self, mock_hostname): mock_hostname.return_value = 'mockhost' diff --git a/ceph-mon/unit_tests/test_check_ceph_status.py b/ceph-mon/unit_tests/test_check_ceph_status.py index e6984884..5342ce55 100644 --- a/ceph-mon/unit_tests/test_check_ceph_status.py +++ b/ceph-mon/unit_tests/test_check_ceph_status.py @@ -17,7 +17,6 @@ import sys from unittest.mock import patch -from subprocess import CalledProcessError # import the module we want to test os.sys.path.insert(1, os.path.join(sys.path[0], 'files/nagios')) @@ -26,90 +25,6 @@ @patch('subprocess.check_output') class NagiosTestCase(unittest.TestCase): - def test_get_daemons_versions_alligned(self, mock_subprocess): - with open('unit_tests/ceph_versions_alligned.json', 'rb') as f: - mock_subprocess.return_value = f.read() - osds_versions = check_ceph_status.get_daemons_versions() - self.assertEqual(osds_versions, set([(16, 2, 7)])) - - def test_get_daemons_versions_diverged(self, mock_subprocess): - with open('unit_tests/ceph_versions_diverged.json', 'rb') as f: - mock_subprocess.return_value = f.read() - osds_versions = check_ceph_status.get_daemons_versions() - self.assertEqual(osds_versions, set([(16, 2, 7), (17, 2, 0), - (15, 2, 16)])) - - def test_get_daemons_versions_exeption(self, mock_subprocess): - mock_subprocess.side_effect = CalledProcessError(1, 'ceph versions') - self.assertRaises(check_ceph_status.UnknownError, - lambda: check_ceph_status.get_daemons_versions()) - - # Version Alligned - @patch('check_ceph_status.get_daemons_versions') - def test_versions_alligned(self, mock_daemons_versions, mock_subprocess): - mock_subprocess.return_value = 'ceph version 16.2.7 ' \ - '(dd0603118f56ab514f133c8d2e3adfc983942503)'.encode('UTF-8') - mock_daemons_versions.return_value = set([(16, 2, 7)]) - args = check_ceph_status.parse_args([ - '--check_daemons_versions_consistency']) - check_output = check_ceph_status.check_ceph_status(args) - self.assertRegex(check_output, r"^OK: All versions alligned$") - - # Minor version diverged - @patch('check_ceph_status.get_daemons_versions') - def test_min_versions_diverged(self, mock_daemons_versions, - mock_subprocess): - mock_subprocess.return_value = 'ceph version 16.2.7 ' \ - '(dd0603118f56ab514f133c8d2e3adfc983942503)'.encode('UTF-8') - mock_daemons_versions.return_value = set([(16, 2, 7), (16, 1, 7)]) - args = check_ceph_status.parse_args([ - '--check_daemons_versions_consistency']) - self.assertRaises(check_ceph_status.WarnError, - lambda: check_ceph_status.check_ceph_status(args)) - - # Major version ahead - @patch('check_ceph_status.get_daemons_versions') - def test_one_version_ahead(self, mock_daemons_versions, mock_subprocess): - mock_subprocess.return_value = 'ceph version 16.2.7 ' \ - '(dd0603118f56ab514f133c8d2e3adfc983942503)'.encode('UTF-8') - mock_daemons_versions.return_value = set([(16, 2, 7), (17, 2, 0)]) - args = check_ceph_status.parse_args([ - '--check_daemons_versions_consistency']) - self.assertRaises(check_ceph_status.CriticalError, - lambda: check_ceph_status.check_ceph_status(args)) - - # Two major version ahead - @patch('check_ceph_status.get_daemons_versions') - def test_two_version_ahead(self, mock_daemons_versions, mock_subprocess): - mock_subprocess.return_value = 'ceph version 15.2.16 ' \ - '(d46a73d6d0a67a79558054a3a5a72cb561724974)'.encode('UTF-8') - mock_daemons_versions.return_value = set([(15, 2, 16), (17, 2, 0)]) - args = check_ceph_status.parse_args([ - '--check_daemons_versions_consistency']) - self.assertRaises(check_ceph_status.CriticalError, - lambda: check_ceph_status.check_ceph_status(args)) - - # Major version behind - @patch('check_ceph_status.get_daemons_versions') - def test_version_behind(self, mock_daemons_versions, mock_subprocess): - mock_subprocess.return_value = 'ceph version 16.2.7 ' \ - '(dd0603118f56ab514f133c8d2e3adfc983942503)'.encode('UTF-8') - mock_daemons_versions.return_value = set([(15, 2, 16), (16, 2, 7)]) - args = check_ceph_status.parse_args([ - '--check_daemons_versions_consistency']) - self.assertRaises(check_ceph_status.WarnError, - lambda: check_ceph_status.check_ceph_status(args)) - - # Two major version behind - @patch('check_ceph_status.get_daemons_versions') - def test_two_version_behind(self, mock_daemons_versions, mock_subprocess): - mock_subprocess.return_value = 'ceph version 17.2.0 ' \ - '(43e2e60a7559d3f46c9d53f1ca875fd499a1e35e)'.encode('UTF-8') - mock_daemons_versions.return_value = set([(15, 2, 16), (17, 2, 0)]) - args = check_ceph_status.parse_args([ - '--check_daemons_versions_consistency']) - self.assertRaises(check_ceph_status.CriticalError, - lambda: check_ceph_status.check_ceph_status(args)) def test_get_ceph_version(self, mock_subprocess): mock_subprocess.return_value = 'ceph version 10.2.9 ' \ From 7a2a7efe73b48b2377de621506a8288a263416cc Mon Sep 17 00:00:00 2001 From: Utkarsh Bhatt Date: Mon, 5 Dec 2022 13:57:06 +0530 Subject: [PATCH 2471/2699] Removes stderr pipe from _check_output Change-Id: Ia6e838d607fecb9b391ebc450d611af1865b2eab --- ceph-radosgw/hooks/hooks.py | 11 +++---- ceph-radosgw/hooks/multisite.py | 2 +- ceph-radosgw/metadata.yaml | 1 + ceph-radosgw/unit_tests/test_multisite.py | 36 +++++++++++------------ 4 files changed, 24 insertions(+), 26 deletions(-) diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index f7abed61..f4db085e 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -373,13 +373,10 @@ def _mon_relation(): endpoints=endpoints, default=True, master=True, zonegroup=zonegroup) - except subprocess.CalledProcessError as e: - if 'File exists' in e.stderr.decode('UTF-8'): - # NOTE(lourot): may have been created in the - # background by the Rados Gateway daemon, see - # lp:1856106 - log("zone '{}' existed already after all".format( - zone)) + except subprocess.CalledProcessError: + if zone in multisite.list_zones(retry_on_empty=True): + log("zone '{}' existed already after all" + .format(zone)) else: raise diff --git a/ceph-radosgw/hooks/multisite.py b/ceph-radosgw/hooks/multisite.py index fc4200f6..18a33410 100644 --- a/ceph-radosgw/hooks/multisite.py +++ b/ceph-radosgw/hooks/multisite.py @@ -30,7 +30,7 @@ def _check_output(cmd): """Logging wrapper for subprocess.check_ouput""" hookenv.log("Executing: {}".format(' '.join(cmd)), level=hookenv.DEBUG) - return subprocess.check_output(cmd, stderr=subprocess.PIPE).decode('UTF-8') + return subprocess.check_output(cmd).decode('UTF-8') @decorators.retry_on_exception(num_retries=5, base_delay=3, diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 195d9e38..9c1574a4 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -15,6 +15,7 @@ tags: series: - focal - jammy +- kinetic extra-bindings: public: admin: diff --git a/ceph-radosgw/unit_tests/test_multisite.py b/ceph-radosgw/unit_tests/test_multisite.py index cb030bec..403935fa 100644 --- a/ceph-radosgw/unit_tests/test_multisite.py +++ b/ceph-radosgw/unit_tests/test_multisite.py @@ -77,7 +77,7 @@ def test_create_realm(self): 'radosgw-admin', '--id=rgw.testhost', 'realm', 'create', '--rgw-realm=beedata', '--default' - ], stderr=mock.ANY) + ]) def test_list_realms(self): with open(self._testdata(whoami()), 'rb') as f: @@ -110,7 +110,7 @@ def test_create_user(self): 'user', 'create', '--uid=mrbees', '--display-name=Synchronization User', - ], stderr=mock.ANY) + ]) def test_create_system_user(self): with open(self._testdata(whoami()), 'rb') as f: @@ -130,7 +130,7 @@ def test_create_system_user(self): '--uid=mrbees', '--display-name=Synchronization User', '--system' - ], stderr=mock.ANY) + ]) def test_create_zonegroup(self): with open(self._testdata(whoami()), 'rb') as f: @@ -151,7 +151,7 @@ def test_create_zonegroup(self): '--rgw-realm=beedata', '--default', '--master' - ], stderr=mock.ANY) + ]) def test_list_zonegroups(self): with open(self._testdata(whoami()), 'rb') as f: @@ -182,7 +182,7 @@ def test_create_zone(self): '--access-key=mykey', '--secret=mypassword', '--read-only=0', - ], stderr=mock.ANY) + ]) def test_modify_zone(self): multisite.modify_zone( @@ -199,7 +199,7 @@ def test_modify_zone(self): '--endpoints=http://localhost:80,https://localhost:443', '--access-key=mykey', '--secret=secret', '--read-only=1', - ], stderr=mock.ANY) + ]) def test_modify_zone_promote_master(self): multisite.modify_zone( @@ -214,7 +214,7 @@ def test_modify_zone_promote_master(self): '--master', '--default', '--read-only=0', - ], stderr=mock.ANY) + ]) def test_modify_zone_partial_credentials(self): multisite.modify_zone( @@ -228,7 +228,7 @@ def test_modify_zone_partial_credentials(self): '--rgw-zone=brundall-east', '--endpoints=http://localhost:80,https://localhost:443', '--read-only=0', - ], stderr=mock.ANY) + ]) def test_list_zones(self): with open(self._testdata(whoami()), 'rb') as f: @@ -288,7 +288,7 @@ def test_pull_realm(self): 'realm', 'pull', '--url=http://master:80', '--access-key=testkey', '--secret=testsecret', - ], stderr=mock.ANY) + ]) def test_pull_period(self): multisite.pull_period(url='http://master:80', @@ -299,7 +299,7 @@ def test_pull_period(self): 'period', 'pull', '--url=http://master:80', '--access-key=testkey', '--secret=testsecret', - ], stderr=mock.ANY) + ]) def test_list_buckets(self): self.subprocess.CalledProcessError = BaseException @@ -308,7 +308,7 @@ def test_list_buckets(self): 'radosgw-admin', '--id=rgw.testhost', 'bucket', 'list', '--rgw-zone=default', '--rgw-zonegroup=default' - ], stderr=mock.ANY) + ]) def test_rename_zonegroup(self): multisite.rename_zonegroup('default', 'test_zone_group') @@ -332,7 +332,7 @@ def test_get_zonegroup(self): self.subprocess.check_output.assert_called_once_with([ 'radosgw-admin', '--id=rgw.testhost', 'zonegroup', 'get', '--rgw-zonegroup=test_zone' - ], stderr=mock.ANY) + ]) def test_modify_zonegroup_migrate(self): multisite.modify_zonegroup('test_zonegroup', @@ -344,7 +344,7 @@ def test_modify_zonegroup_migrate(self): 'zonegroup', 'modify', '--rgw-zonegroup=test_zonegroup', '--rgw-realm=test_realm', '--endpoints=http://localhost:80', '--default', '--master', - ], stderr=mock.ANY) + ]) def test_modify_zone_migrate(self): multisite.modify_zone('test_zone', default=True, master=True, @@ -357,7 +357,7 @@ def test_modify_zone_migrate(self): '--rgw-zonegroup=test_zonegroup', '--endpoints=http://localhost:80', '--master', '--default', '--read-only=0', - ], stderr=mock.ANY) + ]) @mock.patch.object(multisite, 'list_zones') @mock.patch.object(multisite, 'get_zonegroup_info') @@ -401,7 +401,7 @@ def test_modify_multisite_config_zonegroup_fail(self): '--rgw-realm=test_realm', '--endpoints=http://localhost:80', '--default', '--master', - ], stderr=mock.ANY) + ]) @mock.patch.object(multisite, 'modify_zonegroup') def test_modify_multisite_config_zone_fail(self, mock_modify_zonegroup): @@ -423,7 +423,7 @@ def test_modify_multisite_config_zone_fail(self, mock_modify_zonegroup): '--rgw-zonegroup=test_zonegroup', '--endpoints=http://localhost:80', '--master', '--default', '--read-only=0', - ], stderr=mock.ANY) + ]) @mock.patch.object(multisite, 'rename_zonegroup') def test_rename_multisite_config_zone_fail(self, mock_rename_zonegroup): @@ -455,7 +455,7 @@ def test_remove_zone_from_zonegroup(self, json_loads): 'radosgw-admin', '--id=rgw.testhost', 'zonegroup', 'remove', '--rgw-zonegroup=test_zonegroup', '--rgw-zone=test_zone', - ], stderr=mock.ANY) + ]) @mock.patch.object(json, 'loads') def test_add_zone_from_zonegroup(self, json_loads): @@ -469,7 +469,7 @@ def test_add_zone_from_zonegroup(self, json_loads): 'radosgw-admin', '--id=rgw.testhost', 'zonegroup', 'add', '--rgw-zonegroup=test_zonegroup', '--rgw-zone=test_zone', - ], stderr=mock.ANY) + ]) @mock.patch.object(multisite, 'list_zonegroups') @mock.patch.object(multisite, 'get_local_zone') From ad69ff72503623c3469d7e92aaf6acf8f10805f6 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Wed, 25 Jan 2023 08:51:04 -0500 Subject: [PATCH 2472/2699] Add kinetic support Add 22.10 run-on base and add kinetic to metadata.yaml. Additional changes: * change CHARMCRAFT_* vars to CRAFT_* and add entrypoint * pin tox < 4.0.0 for now * switch to charmcraft pack * add standard bindep.txt * switch charm-tools to 2.8.4 to remove py310 ruamel requirement * use charmcraft_channel 2.0/stable Change-Id: I549e8382e4c079eb9eefec13f2a72e994e2b86f0 --- ceph-fs/bindep.txt | 4 ++++ ceph-fs/charmcraft.yaml | 15 ++++++++++----- ceph-fs/osci.yaml | 1 + ceph-fs/requirements.txt | 2 +- ceph-fs/src/metadata.yaml | 1 + ceph-fs/tox.ini | 5 +++-- 6 files changed, 20 insertions(+), 8 deletions(-) create mode 100644 ceph-fs/bindep.txt diff --git a/ceph-fs/bindep.txt b/ceph-fs/bindep.txt new file mode 100644 index 00000000..17575d9f --- /dev/null +++ b/ceph-fs/bindep.txt @@ -0,0 +1,4 @@ +libffi-dev [platform:dpkg] +libpq-dev [platform:dpkg] +libxml2-dev [platform:dpkg] +libxslt1-dev [platform:dpkg] diff --git a/ceph-fs/charmcraft.yaml b/ceph-fs/charmcraft.yaml index 883fc9b4..b5409f37 100644 --- a/ceph-fs/charmcraft.yaml +++ b/ceph-fs/charmcraft.yaml @@ -2,21 +2,23 @@ type: charm parts: charm: + charm-entrypoint: "hooks/install" build-packages: - tox - git - python3-dev + - libffi-dev override-build: | apt-get install ca-certificates -y tox -e build-reactive override-stage: | - echo "Copying charm to staging area: $CHARMCRAFT_STAGE" - NAME=$(ls $CHARMCRAFT_PART_BUILD/build/builds) - cp -r $CHARMCRAFT_PART_BUILD/build/builds/$NAME/* $CHARMCRAFT_STAGE/ + echo "Copying charm to staging area: $CRAFT_STAGE" + NAME=$(ls $CRAFT_PART_BUILD/build/builds) + cp -r $CRAFT_PART_BUILD/build/builds/$NAME/* $CRAFT_STAGE/ override-prime: | # For some reason, the normal priming chokes on the fact that there's a # hooks directory. - cp -r $CHARMCRAFT_STAGE/* . + cp -r $CRAFT_STAGE/* . bases: - build-on: @@ -30,4 +32,7 @@ bases: architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu channel: "22.04" - architectures: [amd64, s390x, ppc64el, arm64] \ No newline at end of file + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.10" + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-fs/osci.yaml b/ceph-fs/osci.yaml index e84a9f3e..6fb7c566 100644 --- a/ceph-fs/osci.yaml +++ b/ceph-fs/osci.yaml @@ -8,3 +8,4 @@ needs_charm_build: true charm_build_name: ceph-fs build_type: charmcraft + charmcraft_channel: 2.0/stable diff --git a/ceph-fs/requirements.txt b/ceph-fs/requirements.txt index a68620f6..c539e82b 100644 --- a/ceph-fs/requirements.txt +++ b/ceph-fs/requirements.txt @@ -10,7 +10,7 @@ setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb # Build requirements cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. -charm-tools==2.8.3 +charm-tools==2.8.4 simplejson diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 1bdda048..44087365 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -12,6 +12,7 @@ tags: series: - focal - jammy +- kinetic subordinate: false requires: ceph-mds: diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 2d60b8a4..9ae404dc 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -23,6 +23,7 @@ requires = pip < 20.3 virtualenv < 20.0 setuptools<50.0.0 + tox < 4.0.0 # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci minversion = 3.18.0 @@ -50,7 +51,7 @@ basepython = python3 deps = -r{toxinidir}/build-requirements.txt commands = charmcraft clean - charmcraft -v build + charmcraft -v pack {toxinidir}/rename.sh [testenv:build-reactive] @@ -96,7 +97,7 @@ commands = stestr run --slowest {posargs} [testenv:pep8] basepython = python3 deps = flake8==3.9.2 - charm-tools==2.8.3 + charm-tools==2.8.4 commands = flake8 {posargs} src unit_tests [testenv:func-target] From e9b5792056a6066dcbb96feffb2550235eaa2b61 Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Tue, 21 Feb 2023 14:55:07 -0300 Subject: [PATCH 2473/2699] Add kinetic support Add 22.10 run-on base and add kinetic to metadata.yaml. Change-Id: Ie057238ac1d3640047480cd243a9b4fb9e0eae30 --- ceph-radosgw/charmcraft.yaml | 3 +++ ceph-radosgw/metadata.yaml | 1 + 2 files changed, 4 insertions(+) diff --git a/ceph-radosgw/charmcraft.yaml b/ceph-radosgw/charmcraft.yaml index 3d5d4c80..102ded4c 100644 --- a/ceph-radosgw/charmcraft.yaml +++ b/ceph-radosgw/charmcraft.yaml @@ -30,3 +30,6 @@ bases: - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.10" + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 195d9e38..9c1574a4 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -15,6 +15,7 @@ tags: series: - focal - jammy +- kinetic extra-bindings: public: admin: From b4ba1bf425815361e6321a05f70f5de958a9e697 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 28 Feb 2023 09:25:28 -0500 Subject: [PATCH 2474/2699] Remove note about tech-preview state Change-Id: I975bbe00d27d10900503deb3e031b9b906ee8c70 --- ceph-dashboard/README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/ceph-dashboard/README.md b/ceph-dashboard/README.md index dbc301c5..933b5d6b 100644 --- a/ceph-dashboard/README.md +++ b/ceph-dashboard/README.md @@ -5,8 +5,6 @@ a built-in web-based Ceph management and monitoring application. It works in conjunction with the [openstack-loadbalancer][loadbalancer-charm] charm, which in turn utilises the [hacluster][hacluster-charm] charm. -> **Note**: The ceph-dashboard charm is currently in tech-preview. - # Usage ## Configuration From 748bdf954fda3aa2f0f61c404732c7a1295a21a2 Mon Sep 17 00:00:00 2001 From: utkarshbhatthere Date: Tue, 7 Feb 2023 13:44:49 +0530 Subject: [PATCH 2475/2699] Adds cinder volume in test bundles to replace loopback devices in test. Change-Id: I96b52dda0698d8942a836bd5df0baa3a0e3500ad func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/989 --- ceph-osd/tests/bundles/focal-xena.yaml | 2 +- ceph-osd/tests/bundles/focal-yoga.yaml | 2 +- ceph-osd/tests/bundles/jammy-yoga.yaml | 2 +- ceph-osd/tox.ini | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-osd/tests/bundles/focal-xena.yaml b/ceph-osd/tests/bundles/focal-xena.yaml index fb86f5a5..4cf0cc70 100644 --- a/ceph-osd/tests/bundles/focal-xena.yaml +++ b/ceph-osd/tests/bundles/focal-xena.yaml @@ -59,7 +59,7 @@ applications: charm: ../../ceph-osd.charm num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: 'cinder,10G,2' options: osd-devices: '/dev/test-non-existent' source: *openstack-origin diff --git a/ceph-osd/tests/bundles/focal-yoga.yaml b/ceph-osd/tests/bundles/focal-yoga.yaml index 839e8189..41b6acbb 100644 --- a/ceph-osd/tests/bundles/focal-yoga.yaml +++ b/ceph-osd/tests/bundles/focal-yoga.yaml @@ -59,7 +59,7 @@ applications: charm: ../../ceph-osd.charm num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: 'cinder,10G,2' options: osd-devices: '/dev/test-non-existent' source: *openstack-origin diff --git a/ceph-osd/tests/bundles/jammy-yoga.yaml b/ceph-osd/tests/bundles/jammy-yoga.yaml index 0355b2eb..a4a640e8 100644 --- a/ceph-osd/tests/bundles/jammy-yoga.yaml +++ b/ceph-osd/tests/bundles/jammy-yoga.yaml @@ -59,7 +59,7 @@ applications: charm: ../../ceph-osd.charm num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: 'cinder,10G,2' options: osd-devices: '/dev/test-non-existent' source: *openstack-origin diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 2431a75d..98bf885f 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -25,7 +25,7 @@ setenv = VIRTUAL_ENV={envdir} commands = stestr run --slowest {posargs} allowlist_externals = charmcraft - rename.sh + {toxinidir}/rename.sh passenv = HOME TERM From 3c51dd8472cf2c63ed8e0d46c2a1eaf80dd24d35 Mon Sep 17 00:00:00 2001 From: utkarshbhatthere Date: Thu, 9 Mar 2023 16:55:29 +0530 Subject: [PATCH 2476/2699] Adds timeout for ceph command calls. Change-Id: I8c81b1f0042181d814d5f268282b082c8a5fc217 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/989 --- ceph-osd/actions/remove_disk.py | 34 ++++++++++++------- .../unit_tests/test_actions_remove_disk.py | 19 ++++++++--- 2 files changed, 35 insertions(+), 18 deletions(-) diff --git a/ceph-osd/actions/remove_disk.py b/ceph-osd/actions/remove_disk.py index 7e1cae25..154290b0 100755 --- a/ceph-osd/actions/remove_disk.py +++ b/ceph-osd/actions/remove_disk.py @@ -103,35 +103,43 @@ def map_id_to_device(dev_map, osd_id): return elem['path'] -def safe_to_destroy(osd_id): +def safe_to_destroy(osd_id, timeout=300): """Test whether an OSD id is safe to destroy per the Ceph cluster.""" - ret = subprocess.call(['ceph', '--id', 'osd-removal', - 'osd', 'safe-to-destroy', osd_id]) + ret = subprocess.call([ + 'ceph', '--id', 'osd-removal', + 'osd', 'safe-to-destroy', osd_id + ], timeout=timeout) return ret == 0 -def safe_to_stop(osd_id): +def safe_to_stop(osd_id, timeout=300): """Test whether an OSD is safe to stop.""" - ret = subprocess.call(['ceph', '--id', 'osd-removal', - 'osd', 'ok-to-stop', osd_id]) + ret = subprocess.call([ + 'ceph', '--id', 'osd-removal', + 'osd', 'ok-to-stop', osd_id + ], timeout=timeout) return ret == 0 -def reweight_osd(osd_id): +def reweight_osd(osd_id, timeout=300): """Set the weight of the OSD id to zero.""" - subprocess.check_call(['ceph', '--id', 'osd-removal', - 'osd', 'crush', 'reweight', osd_id, '0']) + subprocess.check_call([ + 'ceph', '--id', 'osd-removal', + 'osd', 'crush', 'reweight', osd_id, '0' + ], timeout=timeout) -def destroy(osd_id, purge=False): +def destroy(osd_id, purge=False, timeout=300): """Destroy or purge an OSD id.""" for _ in range(10): # We might get here before the OSD is marked as down. As such, # retry if the error code is EBUSY. try: - subprocess.check_call(['ceph', '--id', 'osd-removal', 'osd', - 'purge' if purge else 'destroy', - osd_id, '--yes-i-really-mean-it']) + subprocess.check_call([ + 'ceph', '--id', 'osd-removal', 'osd', + 'purge' if purge else 'destroy', + osd_id, '--yes-i-really-mean-it' + ], timeout=timeout) return except subprocess.CalledProcessError as e: if e.returncode != errno.EBUSY: diff --git a/ceph-osd/unit_tests/test_actions_remove_disk.py b/ceph-osd/unit_tests/test_actions_remove_disk.py index 369d3f1f..b729ab1c 100644 --- a/ceph-osd/unit_tests/test_actions_remove_disk.py +++ b/ceph-osd/unit_tests/test_actions_remove_disk.py @@ -87,11 +87,20 @@ def test_action_osd_remove(self, get_bcache_names, check_call, obj = remove_disk.ActionOSD(dev_map, osd_id='1') obj.remove(True, 1, True) - call.assert_any_call(prefix_args + ['osd', 'safe-to-destroy', 'osd.1']) - check_call.assert_any_call(prefix_args + ['osd', 'purge', 'osd.1', - '--yes-i-really-mean-it']) - check_call.assert_any_call(prefix_args + ['osd', 'crush', 'reweight', - 'osd.1', '0']) + + # Subprocess Call checks + call.assert_any_call( + prefix_args + ['osd', 'safe-to-destroy', 'osd.1'], timeout=300 + ) + check_call.assert_any_call( + prefix_args + ['osd', 'purge', 'osd.1', '--yes-i-really-mean-it'], + timeout=300 + ) + check_call.assert_any_call( + prefix_args + ['osd', 'crush', 'reweight', 'osd.1', '0'], + timeout=300 + ) + bcache_remove.assert_called_with( '/dev/bcache0', '/dev/backing', '/dev/caching') report = obj.report From 666937b62efd444f10737dba8e0d5d9b777d0afa Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 10 Mar 2023 12:55:27 -0500 Subject: [PATCH 2477/2699] Fix issue with ceph-client relation handling A bug was introduced when changing ceph-client to an operator framework library that caused the fallback application_name handling to present a class name rather than a remote applicaiton name. This change updates the handling to get at an `app.name` rather than an `app`. As a drive-by, this also allow-lists the fully- qualified rename.sh. Closes-Bug: #1995086 Change-Id: I57b685cb78ba5c4930eb0fa73d7ef09d39d73743 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1022 --- ceph-mon/src/ceph_client.py | 2 +- ceph-mon/tox.ini | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/src/ceph_client.py b/ceph-mon/src/ceph_client.py index b9881380..2a122d8d 100644 --- a/ceph-mon/src/ceph_client.py +++ b/ceph-mon/src/ceph_client.py @@ -93,7 +93,7 @@ def _get_client_application_name(self, relation, unit): """Retrieve client application name from relation data.""" return relation.data[unit].get( 'application-name', - relation.app) + relation.app.name) def _handle_client_relation(self, relation, unit): """Handle broker request and set the relation data diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 75e28749..cdfdb44c 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -27,7 +27,7 @@ install_command = commands = stestr run --slowest {posargs} allowlist_externals = charmcraft - rename.sh + {toxinidir}/rename.sh passenv = HOME TERM From 2fb856dbf5edce33719206a939713e16f8e42d7a Mon Sep 17 00:00:00 2001 From: Facundo Ciccioli Date: Fri, 10 Mar 2023 18:02:34 +0100 Subject: [PATCH 2478/2699] Fix Nagios additional checks functionality Commit 40b22e3d on juju/charm-helpers repo introduced shell quoting of each argument passed to the check, turning the quoting of the double quotes done here not only unnecessary but also damaging to the final command. Closes-Bug: #2008784 Change-Id: Ifedd5875d27e72a857b01a48afcd058476734695 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1022 --- ceph-mon/src/ceph_hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 01cca64e..5f9b2a11 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -1167,7 +1167,7 @@ def update_nrpe_config(): name = "ceph-{}".format(key.replace(" ", "")) log("Adding check {}".format(name)) check_cmd = 'check_ceph_status.py -f {}' \ - ' --additional_check \\\"{}\\\"' \ + ' --additional_check \"{}\"' \ ' {}'.format(STATUS_FILE, value, "--additional_check_critical" if additional_critical is True else "") From 232df2b90f91dcb52930ecbfb7e5676e61a7ff8a Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 8 Mar 2023 19:31:35 -0300 Subject: [PATCH 2479/2699] Use a different name for the local key/value store The operator framework and charmhelpers use the same path for the local K/V store, which causes problems when running certain hooks like 'pre-series-upgrade'. In order to work around this issue, this patchset makes the charmhelpers lib use a different path, while migrating the DB file before doing so. Closes-Bug: #2005137 Change-Id: Ic2e024371ff431888731753d29fff8538232009a --- ceph-mon/src/charm.py | 26 ++++++++++++++++++++++++++ ceph-mon/tox.ini | 2 +- 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index 8a1786b4..66021235 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -1,5 +1,7 @@ #! /usr/bin/python3 import logging +import os +import shutil from ops.main import main @@ -47,6 +49,8 @@ class CephMonCharm(ops_openstack.core.OSBaseCharm): 'radosgw', 'lvm2', 'parted', 'smartmontools', ] + NEW_DB_PATH = '.charmhelpers-unit-state.db' + on = CephCharmEvents() # General charm control callbacks. @@ -83,12 +87,29 @@ def on_config(self, event): if hooks.config_changed(): self.on.notify_clients.emit() + def make_db_path(self, suffix): + return os.path.join(os.environ.get('CHARM_DIR', ''), suffix) + + def migrate_db(self): + """ + Migrate the Key/Value database into a new location. + This is done to avoid conflicts between charmhelpers and + the ops library, since they both use the same path and + with excluding lock semantics. + """ + db_path = self.make_db_path('.unit-state.db') + new_db_path = self.make_db_path(self.NEW_DB_PATH) + if os.path.exists(db_path) and not os.path.exists(new_db_path): + # The new DB doesn't exist yet. Copy it over. + shutil.copy(db_path, new_db_path) + def on_pre_series_upgrade(self, event): hooks.pre_series_upgrade() def on_upgrade(self, event): self._initialise_config() self.metrics_endpoint.update_alert_rules() + self.migrate_db() hooks.upgrade_charm() self.on.notify_clients.emit() @@ -175,6 +196,11 @@ def __init__(self, *args): "Not running hook, CMR detected and not supported") return + # Make the charmhelpers lib use a different DB path. This is done + # so as to avoid conflicts with what the ops framework uses. + # See: https://bugs.launchpad.net/charm-ceph-mon/+bug/2005137 + os.environ['UNIT_STATE_DB'] = self.make_db_path(self.NEW_DB_PATH) + fw = self.framework self.clients = ceph_client.CephClientProvides(self) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index 75e28749..cdfdb44c 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -27,7 +27,7 @@ install_command = commands = stestr run --slowest {posargs} allowlist_externals = charmcraft - rename.sh + {toxinidir}/rename.sh passenv = HOME TERM From 30d96e8006977f7816d9845e7451cb6f22bf22f3 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 15 Mar 2023 19:52:36 -0300 Subject: [PATCH 2480/2699] Add a notice indicating that the charm is deprecated Change-Id: I768cca3c55c636a14d0eb6f026d8d745e6f70a5d --- ceph-iscsi/README.md | 2 ++ ceph-iscsi/osci.yaml | 8 ++++---- ceph-iscsi/requirements.txt | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index c5518605..4dc4ff75 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -4,6 +4,8 @@ The ceph-iscsi charm deploys the [Ceph iSCSI gateway service][ceph-iscsi-upstream]. The charm is intended to be used in conjunction with the [ceph-osd][ceph-osd-charm] and [ceph-mon][ceph-mon-charm] charms. +**NOTE**: This charm is deprecated. No new features will be introduced. + # Usage ## Configuration diff --git a/ceph-iscsi/osci.yaml b/ceph-iscsi/osci.yaml index 6f7edc1b..fce5cb16 100644 --- a/ceph-iscsi/osci.yaml +++ b/ceph-iscsi/osci.yaml @@ -23,25 +23,25 @@ - name: tox-py310 soft: true vars: - tox_extra_args: jammy + tox_extra_args: -- jammy - job: name: ceph-iscsi-jammy-quincy-ec parent: func-target dependencies: - ceph-iscsi-jammy-quincy vars: - tox_extra_args: jammy-ec + tox_extra_args: -- jammy-ec - job: name: ceph-iscsi-kinetic-quincy parent: func-target dependencies: - ceph-iscsi-jammy-quincy vars: - tox_extra_args: kinetic + tox_extra_args: -- kinetic - job: name: ceph-iscsi-kinetic-quincy-ec parent: func-target dependencies: - ceph-iscsi-jammy-quincy vars: - tox_extra_args: kinetic-ec + tox_extra_args: -- kinetic-ec diff --git a/ceph-iscsi/requirements.txt b/ceph-iscsi/requirements.txt index 160abfd1..abcff2cc 100644 --- a/ceph-iscsi/requirements.txt +++ b/ceph-iscsi/requirements.txt @@ -1,6 +1,6 @@ # requirements +ops <= 1.6.0 git+https://github.com/juju/charm-helpers.git#egg=charmhelpers -git+https://github.com/canonical/operator.git#egg=ops git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack git+https://opendev.org/openstack/charm-ops-interface-tls-certificates#egg=interface_tls_certificates From 4c6224393c1d153f701c9907a612351275549fb4 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 16 Mar 2023 14:18:42 -0400 Subject: [PATCH 2481/2699] Add support for interim Ubuntu releases - update bundles to include UCA pocket tests - update test configuration - update metadata to include lunar - update snapcraft to allow run-on for lunar Change-Id: I351ec2e549ef9ce1b25d4a57643c5f2e5e151d66 --- ceph-fs/charmcraft.yaml | 3 ++ ceph-fs/osci.yaml | 2 + ceph-fs/src/metadata.yaml | 1 + ceph-fs/src/tests/bundles/jammy-antelope.yaml | 44 +++++++++++++++++++ ceph-fs/src/tests/bundles/jammy-zed.yaml | 44 +++++++++++++++++++ ceph-fs/src/tests/bundles/kinetic-zed.yaml | 44 +++++++++++++++++++ ceph-fs/src/tests/bundles/lunar-antelope.yaml | 44 +++++++++++++++++++ 7 files changed, 182 insertions(+) create mode 100644 ceph-fs/src/tests/bundles/jammy-antelope.yaml create mode 100644 ceph-fs/src/tests/bundles/jammy-zed.yaml create mode 100644 ceph-fs/src/tests/bundles/kinetic-zed.yaml create mode 100644 ceph-fs/src/tests/bundles/lunar-antelope.yaml diff --git a/ceph-fs/charmcraft.yaml b/ceph-fs/charmcraft.yaml index b5409f37..25b2873b 100644 --- a/ceph-fs/charmcraft.yaml +++ b/ceph-fs/charmcraft.yaml @@ -36,3 +36,6 @@ bases: - name: ubuntu channel: "22.10" architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "23.04" + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-fs/osci.yaml b/ceph-fs/osci.yaml index 6fb7c566..f8e2ead4 100644 --- a/ceph-fs/osci.yaml +++ b/ceph-fs/osci.yaml @@ -4,6 +4,8 @@ - charm-unit-jobs-py39 - charm-xena-functional-jobs - charm-yoga-functional-jobs + - charm-zed-functional-jobs + - charm-functional-jobs vars: needs_charm_build: true charm_build_name: ceph-fs diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 44087365..cb2dca34 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -13,6 +13,7 @@ series: - focal - jammy - kinetic +- lunar subordinate: false requires: ceph-mds: diff --git a/ceph-fs/src/tests/bundles/jammy-antelope.yaml b/ceph-fs/src/tests/bundles/jammy-antelope.yaml new file mode 100644 index 00000000..b75c3506 --- /dev/null +++ b/ceph-fs/src/tests/bundles/jammy-antelope.yaml @@ -0,0 +1,44 @@ +variables: + openstack-origin: &openstack-origin cloud:jammy-antelope + +local_overlay_enabled: False + +series: &series jammy + +applications: + ubuntu: # used to test mounts + charm: ch:ubuntu + num_units: 2 + ceph-fs: + charm: ../../../ceph-fs.charm + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + channel: latest/edge + +relations: + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' diff --git a/ceph-fs/src/tests/bundles/jammy-zed.yaml b/ceph-fs/src/tests/bundles/jammy-zed.yaml new file mode 100644 index 00000000..04bfb37c --- /dev/null +++ b/ceph-fs/src/tests/bundles/jammy-zed.yaml @@ -0,0 +1,44 @@ +variables: + openstack-origin: &openstack-origin cloud:jammy-zed + +local_overlay_enabled: False + +series: &series jammy + +applications: + ubuntu: # used to test mounts + charm: ch:ubuntu + num_units: 2 + ceph-fs: + charm: ../../../ceph-fs.charm + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + channel: latest/edge + +relations: + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' diff --git a/ceph-fs/src/tests/bundles/kinetic-zed.yaml b/ceph-fs/src/tests/bundles/kinetic-zed.yaml new file mode 100644 index 00000000..c976a4a5 --- /dev/null +++ b/ceph-fs/src/tests/bundles/kinetic-zed.yaml @@ -0,0 +1,44 @@ +variables: + openstack-origin: &openstack-origin distro + +local_overlay_enabled: False + +series: &series kinetic + +applications: + ubuntu: # used to test mounts + charm: ch:ubuntu + num_units: 2 + ceph-fs: + charm: ../../../ceph-fs.charm + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + channel: latest/edge + +relations: + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' diff --git a/ceph-fs/src/tests/bundles/lunar-antelope.yaml b/ceph-fs/src/tests/bundles/lunar-antelope.yaml new file mode 100644 index 00000000..35d64bcb --- /dev/null +++ b/ceph-fs/src/tests/bundles/lunar-antelope.yaml @@ -0,0 +1,44 @@ +variables: + openstack-origin: &openstack-origin distro + +local_overlay_enabled: False + +series: &series lunar + +applications: + ubuntu: # used to test mounts + charm: ch:ubuntu + num_units: 2 + ceph-fs: + charm: ../../../ceph-fs.charm + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + channel: latest/edge + +relations: + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' From 6b76235c68a86dc0f551d690b5627d1d10fe7fbd Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 17 Mar 2023 08:55:55 -0400 Subject: [PATCH 2482/2699] Add support for interim Ubuntu releases - update bundles to include UCA pocket tests - update test configuration - update metadata to include kinetic and lunar - update snapcraft to allow run-on for kinetic and lunar Change-Id: I6b229b502dd4ee9f1d219240b86f7826abf0c25d --- ceph-mon/.zuul.yaml | 1 + ceph-mon/charmcraft.yaml | 3 + ceph-mon/osci.yaml | 2 + ceph-mon/tests/bundles/jammy-antelope.yaml | 263 ++++++++++++++++++++ ceph-mon/tests/bundles/jammy-zed.yaml | 263 ++++++++++++++++++++ ceph-mon/tests/bundles/kinetic-zed.yaml | 265 +++++++++++++++++++++ ceph-mon/tests/bundles/lunar-antelope.yaml | 265 +++++++++++++++++++++ 7 files changed, 1062 insertions(+) create mode 100644 ceph-mon/tests/bundles/jammy-antelope.yaml create mode 100644 ceph-mon/tests/bundles/jammy-zed.yaml create mode 100644 ceph-mon/tests/bundles/kinetic-zed.yaml create mode 100644 ceph-mon/tests/bundles/lunar-antelope.yaml diff --git a/ceph-mon/.zuul.yaml b/ceph-mon/.zuul.yaml index 7ffc71cb..69974080 100644 --- a/ceph-mon/.zuul.yaml +++ b/ceph-mon/.zuul.yaml @@ -1,4 +1,5 @@ - project: templates: - openstack-python3-charm-yoga-jobs + - openstack-python3-charm-jobs - openstack-cover-jobs diff --git a/ceph-mon/charmcraft.yaml b/ceph-mon/charmcraft.yaml index 2ebc1eac..8c6755c6 100644 --- a/ceph-mon/charmcraft.yaml +++ b/ceph-mon/charmcraft.yaml @@ -39,3 +39,6 @@ bases: - name: ubuntu channel: "22.10" architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "23.04" + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-mon/osci.yaml b/ceph-mon/osci.yaml index 462e2476..4349fa91 100644 --- a/ceph-mon/osci.yaml +++ b/ceph-mon/osci.yaml @@ -4,6 +4,8 @@ - charm-unit-jobs-py310 - charm-xena-functional-jobs - charm-yoga-functional-jobs + - charm-zed-functional-jobs + - charm-functional-jobs vars: needs_charm_build: true charm_build_name: ceph-mon diff --git a/ceph-mon/tests/bundles/jammy-antelope.yaml b/ceph-mon/tests/bundles/jammy-antelope.yaml new file mode 100644 index 00000000..bb475bc1 --- /dev/null +++ b/ceph-mon/tests/bundles/jammy-antelope.yaml @@ -0,0 +1,263 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-yoga + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + glance-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + nova-cloud-controller-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + placement-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: 8.0.19/edge + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: '10G' + options: + source: *openstack-origin + osd-devices: '/dev/test-non-existent' + to: + - '3' + - '4' + - '5' + channel: quincy/edge + + ceph-mon: + charm: ch:ceph-mon + channel: quincy/edge + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + to: + - '6' + - '7' + - '8' + + ceph-fs: + charm: ch:ceph-fs + num_units: 1 + options: + source: *openstack-origin + channel: quincy/edge + to: + - '17' + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + channel: 3.9/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + channel: yoga/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + libvirt-image-backend: rbd + to: + - '11' + channel: yoga/edge + + glance: + expose: True + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + channel: yoga/edge + + cinder: + expose: True + charm: ch:cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: *openstack-origin + to: + - '13' + channel: yoga/edge + + cinder-ceph: + charm: ch:cinder-ceph + channel: yoga/edge + + nova-cloud-controller: + expose: True + charm: ch:nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + channel: yoga/edge + + placement: + charm: ch:placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + channel: yoga/edge + + prometheus2: + charm: ch:prometheus2 + num_units: 1 + to: + - '16' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - nova-compute:ceph-access + - cinder-ceph:ceph-access + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'ceph-mon:prometheus' + - 'prometheus2:target' diff --git a/ceph-mon/tests/bundles/jammy-zed.yaml b/ceph-mon/tests/bundles/jammy-zed.yaml new file mode 100644 index 00000000..bb475bc1 --- /dev/null +++ b/ceph-mon/tests/bundles/jammy-zed.yaml @@ -0,0 +1,263 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-yoga + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + glance-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + nova-cloud-controller-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + placement-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: 8.0.19/edge + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: '10G' + options: + source: *openstack-origin + osd-devices: '/dev/test-non-existent' + to: + - '3' + - '4' + - '5' + channel: quincy/edge + + ceph-mon: + charm: ch:ceph-mon + channel: quincy/edge + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + to: + - '6' + - '7' + - '8' + + ceph-fs: + charm: ch:ceph-fs + num_units: 1 + options: + source: *openstack-origin + channel: quincy/edge + to: + - '17' + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + channel: 3.9/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + channel: yoga/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + libvirt-image-backend: rbd + to: + - '11' + channel: yoga/edge + + glance: + expose: True + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + channel: yoga/edge + + cinder: + expose: True + charm: ch:cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: *openstack-origin + to: + - '13' + channel: yoga/edge + + cinder-ceph: + charm: ch:cinder-ceph + channel: yoga/edge + + nova-cloud-controller: + expose: True + charm: ch:nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + channel: yoga/edge + + placement: + charm: ch:placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + channel: yoga/edge + + prometheus2: + charm: ch:prometheus2 + num_units: 1 + to: + - '16' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - nova-compute:ceph-access + - cinder-ceph:ceph-access + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'ceph-mon:prometheus' + - 'prometheus2:target' diff --git a/ceph-mon/tests/bundles/kinetic-zed.yaml b/ceph-mon/tests/bundles/kinetic-zed.yaml new file mode 100644 index 00000000..24818960 --- /dev/null +++ b/ceph-mon/tests/bundles/kinetic-zed.yaml @@ -0,0 +1,265 @@ +variables: + openstack-origin: &openstack-origin distro + +series: jammy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + series: focal + '17': + + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + glance-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + nova-cloud-controller-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + placement-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: 8.0.19/edge + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: '10G' + options: + source: *openstack-origin + osd-devices: '/dev/test-non-existent' + to: + - '3' + - '4' + - '5' + channel: quincy/edge + + ceph-mon: + charm: ch:ceph-mon + channel: quincy/edge + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + to: + - '6' + - '7' + - '8' + + ceph-fs: + charm: ch:ceph-fs + num_units: 1 + options: + source: *openstack-origin + channel: quincy/edge + to: + - '17' + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + channel: 3.9/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + channel: yoga/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + libvirt-image-backend: rbd + to: + - '11' + channel: yoga/edge + + glance: + expose: True + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + channel: yoga/edge + + cinder: + expose: True + charm: ch:cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: *openstack-origin + to: + - '13' + channel: yoga/edge + + cinder-ceph: + charm: ch:cinder-ceph + channel: yoga/edge + + nova-cloud-controller: + expose: True + charm: ch:nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + channel: yoga/edge + + placement: + charm: ch:placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + channel: yoga/edge + + prometheus2: + charm: ch:prometheus2 + num_units: 1 + series: focal + to: + - '16' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - nova-compute:ceph-access + - cinder-ceph:ceph-access + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'ceph-mon:prometheus' + - 'prometheus2:target' diff --git a/ceph-mon/tests/bundles/lunar-antelope.yaml b/ceph-mon/tests/bundles/lunar-antelope.yaml new file mode 100644 index 00000000..24818960 --- /dev/null +++ b/ceph-mon/tests/bundles/lunar-antelope.yaml @@ -0,0 +1,265 @@ +variables: + openstack-origin: &openstack-origin distro + +series: jammy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + series: focal + '17': + + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + glance-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + nova-cloud-controller-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + placement-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: 8.0.19/edge + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: '10G' + options: + source: *openstack-origin + osd-devices: '/dev/test-non-existent' + to: + - '3' + - '4' + - '5' + channel: quincy/edge + + ceph-mon: + charm: ch:ceph-mon + channel: quincy/edge + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + to: + - '6' + - '7' + - '8' + + ceph-fs: + charm: ch:ceph-fs + num_units: 1 + options: + source: *openstack-origin + channel: quincy/edge + to: + - '17' + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + channel: 3.9/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + channel: yoga/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + libvirt-image-backend: rbd + to: + - '11' + channel: yoga/edge + + glance: + expose: True + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + channel: yoga/edge + + cinder: + expose: True + charm: ch:cinder + num_units: 1 + options: + block-device: 'None' + glance-api-version: '2' + openstack-origin: *openstack-origin + to: + - '13' + channel: yoga/edge + + cinder-ceph: + charm: ch:cinder-ceph + channel: yoga/edge + + nova-cloud-controller: + expose: True + charm: ch:nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + channel: yoga/edge + + placement: + charm: ch:placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + channel: yoga/edge + + prometheus2: + charm: ch:prometheus2 + num_units: 1 + series: focal + to: + - '16' + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - nova-compute:ceph-access + - cinder-ceph:ceph-access + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'ceph-mon:prometheus' + - 'prometheus2:target' From 68270beef2313804830aae584097fc7da27844e5 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 15 Mar 2023 19:52:36 -0300 Subject: [PATCH 2483/2699] Add support for interim Ubuntu releases - update bundles to include UCA pocket tests - update test configuration - update metadata to include kinetic and lunar - update snapcraft to allow run-on for kinetic and lunar Change-Id: I70e902b52058b1e3ed6e3918b33ccb571eb0aa65 --- ceph-iscsi/.zuul.yaml | 3 +- ceph-iscsi/charmcraft.yaml | 6 ++ ceph-iscsi/config.yaml | 2 +- ceph-iscsi/metadata.yaml | 2 + ceph-iscsi/osci.yaml | 21 ++++++ ceph-iscsi/tests/bundles/lunar-ec.yaml | 94 +++++++++++++++++++++++++ ceph-iscsi/tests/bundles/lunar.yaml | 95 ++++++++++++++++++++++++++ 7 files changed, 221 insertions(+), 2 deletions(-) create mode 100644 ceph-iscsi/tests/bundles/lunar-ec.yaml create mode 100644 ceph-iscsi/tests/bundles/lunar.yaml diff --git a/ceph-iscsi/.zuul.yaml b/ceph-iscsi/.zuul.yaml index 23bf5f62..69974080 100644 --- a/ceph-iscsi/.zuul.yaml +++ b/ceph-iscsi/.zuul.yaml @@ -1,4 +1,5 @@ - project: templates: - - openstack-python3-charm-zed-jobs + - openstack-python3-charm-yoga-jobs + - openstack-python3-charm-jobs - openstack-cover-jobs diff --git a/ceph-iscsi/charmcraft.yaml b/ceph-iscsi/charmcraft.yaml index 070bf49d..5d665764 100644 --- a/ceph-iscsi/charmcraft.yaml +++ b/ceph-iscsi/charmcraft.yaml @@ -30,3 +30,9 @@ bases: - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.10" + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "23.04" + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-iscsi/config.yaml b/ceph-iscsi/config.yaml index 9a9b79e2..870c9239 100644 --- a/ceph-iscsi/config.yaml +++ b/ceph-iscsi/config.yaml @@ -5,7 +5,7 @@ options: description: Mon and OSD debug level. Max is 20. source: type: string - default: zed + default: distro description: | Optional configuration to support use of additional sources such as: - ppa:myteam/ppa diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index 78cef03f..f67f4d3a 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -11,6 +11,8 @@ tags: - misc series: - jammy +- kinetic +- lunar subordinate: false min-juju-version: 2.7.6 extra-bindings: diff --git a/ceph-iscsi/osci.yaml b/ceph-iscsi/osci.yaml index fce5cb16..a6b65fb5 100644 --- a/ceph-iscsi/osci.yaml +++ b/ceph-iscsi/osci.yaml @@ -9,6 +9,10 @@ voting: false - ceph-iscsi-kinetic-quincy-ec: voting: false + - ceph-iscsi-lunar-quincy: + voting: false + - ceph-iscsi-lunar-quincy-ec: + voting: false vars: needs_charm_build: true charm_build_name: ceph-iscsi @@ -45,3 +49,20 @@ - ceph-iscsi-jammy-quincy vars: tox_extra_args: -- kinetic-ec + +- job: + name: ceph-iscsi-lunar-quincy + parent: func-target + voting: false + dependencies: + - ceph-iscsi-jammy-quincy + vars: + tox_extra_args: -- lunar +- job: + name: ceph-iscsi-lunar-quincy-ec + parent: func-target + voting: false + dependencies: + - ceph-iscsi-jammy-quincy + vars: + tox_extra_args: -- lunar-ec diff --git a/ceph-iscsi/tests/bundles/lunar-ec.yaml b/ceph-iscsi/tests/bundles/lunar-ec.yaml new file mode 100644 index 00000000..b9c97710 --- /dev/null +++ b/ceph-iscsi/tests/bundles/lunar-ec.yaml @@ -0,0 +1,94 @@ +local_overlay_enabled: False +series: lunar +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + constraints: mem=3072M + '9': + constraints: mem=3072M + '10': + constraints: mem=3072M + '11': + '12': + '13': + '14': + '15': +applications: + ubuntu: + charm: cs:ubuntu + num_units: 3 + to: + - '7' + - '14' + - '15' + ceph-iscsi: + charm: ../../ceph-iscsi.charm + num_units: 2 + options: + gateway-metadata-pool: iscsi-foo-metadata + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '0' + - '1' + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + to: + - '0' + - '1' + - '2' + - '11' + - '12' + - '13' + channel: latest/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + to: + - '3' + - '4' + - '5' + channel: latest/edge + vault: + num_units: 1 + charm: ch:vault + to: + - '6' + channel: latest/edge + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + to: + - '8' + - '9' + - '10' + channel: latest/edge + vault-mysql-router: + charm: ch:mysql-router + channel: latest/edge +relations: + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/bundles/lunar.yaml b/ceph-iscsi/tests/bundles/lunar.yaml new file mode 100644 index 00000000..f9123b3c --- /dev/null +++ b/ceph-iscsi/tests/bundles/lunar.yaml @@ -0,0 +1,95 @@ +local_overlay_enabled: False +series: lunar +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + constraints: mem=3072M + '9': + constraints: mem=3072M + '10': + constraints: mem=3072M + '11': + '12': + '13': + '14': + '15': + '16': + '17': +applications: + ubuntu: + charm: cs:ubuntu + num_units: 3 + to: + - '7' + - '14' + - '15' + ceph-iscsi: + charm: ../../ceph-iscsi.charm + num_units: 4 + options: + gateway-metadata-pool: iscsi-foo-metadata + to: + - '0' + - '1' + - '16' + - '17' + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + to: + - '0' + - '1' + - '2' + - '11' + - '12' + - '13' + channel: latest/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + to: + - '3' + - '4' + - '5' + channel: latest/edge + vault: + num_units: 1 + charm: ch:vault + to: + - '6' + channel: latest/edge + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + to: + - '8' + - '9' + - '10' + channel: latest/edge + vault-mysql-router: + charm: ch:mysql-router + channel: latest/edge +relations: + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' From ef7aed60a71313f97d3cb753ccad6aae8b8e1f3b Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 17 Mar 2023 10:28:54 -0400 Subject: [PATCH 2484/2699] Add support for interim Ubuntu releases - update bundles to include UCA pocket tests - update test configuration - update metadata to include lunar - update snapcraft to allow run-on for lunar Change-Id: I16b9d724930eacca42b9410c73931caceca8cca8 --- ceph-osd/.zuul.yaml | 3 +- ceph-osd/charmcraft.yaml | 3 + ceph-osd/metadata.yaml | 1 + ceph-osd/osci.yaml | 2 + ceph-osd/tests/bundles/jammy-antelope.yaml | 238 +++++++++++++++++++++ ceph-osd/tests/bundles/jammy-zed.yaml | 238 +++++++++++++++++++++ ceph-osd/tests/bundles/kinetic-zed.yaml | 238 +++++++++++++++++++++ ceph-osd/tests/bundles/lunar-antelope.yaml | 238 +++++++++++++++++++++ 8 files changed, 960 insertions(+), 1 deletion(-) create mode 100644 ceph-osd/tests/bundles/jammy-antelope.yaml create mode 100644 ceph-osd/tests/bundles/jammy-zed.yaml create mode 100644 ceph-osd/tests/bundles/kinetic-zed.yaml create mode 100644 ceph-osd/tests/bundles/lunar-antelope.yaml diff --git a/ceph-osd/.zuul.yaml b/ceph-osd/.zuul.yaml index 1ffc530a..69974080 100644 --- a/ceph-osd/.zuul.yaml +++ b/ceph-osd/.zuul.yaml @@ -1,4 +1,5 @@ - project: templates: - openstack-python3-charm-yoga-jobs - - openstack-cover-jobs \ No newline at end of file + - openstack-python3-charm-jobs + - openstack-cover-jobs diff --git a/ceph-osd/charmcraft.yaml b/ceph-osd/charmcraft.yaml index 7ac40aca..68b48b28 100644 --- a/ceph-osd/charmcraft.yaml +++ b/ceph-osd/charmcraft.yaml @@ -36,3 +36,6 @@ bases: - name: ubuntu channel: "22.10" architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "23.04" + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index ecce1ed7..097d3302 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -14,6 +14,7 @@ series: - focal - jammy - kinetic +- lunar description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. diff --git a/ceph-osd/osci.yaml b/ceph-osd/osci.yaml index f366b6c4..672db7d6 100644 --- a/ceph-osd/osci.yaml +++ b/ceph-osd/osci.yaml @@ -4,6 +4,8 @@ - charm-unit-jobs-py310 - charm-xena-functional-jobs - charm-yoga-functional-jobs + - charm-zed-functional-jobs + - charm-functional-jobs vars: needs_charm_build: true charm_build_name: ceph-osd diff --git a/ceph-osd/tests/bundles/jammy-antelope.yaml b/ceph-osd/tests/bundles/jammy-antelope.yaml new file mode 100644 index 00000000..41b6acbb --- /dev/null +++ b/ceph-osd/tests/bundles/jammy-antelope.yaml @@ -0,0 +1,238 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-yoga + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + glance-mysql-router: + charm: ch:mysql-router + channel: latest/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: latest/edge + nova-cloud-controller-mysql-router: + charm: ch:mysql-router + channel: latest/edge + placement-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: latest/edge + + ceph-osd: + charm: ../../ceph-osd.charm + num_units: 3 + storage: + osd-devices: 'cinder,10G,2' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + aa-profile-mode: enforce + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + to: + - '6' + - '7' + - '8' + channel: quincy/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + channel: latest/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + channel: yoga/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + channel: yoga/edge + + glance: + expose: True + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + channel: yoga/edge + + cinder: + expose: True + charm: ch:cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: 'None' + glance-api-version: '2' + to: + - '13' + channel: yoga/edge + + cinder-ceph: + charm: ch:cinder-ceph + channel: yoga/edge + + nova-cloud-controller: + expose: True + charm: ch:nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + channel: yoga/edge + + placement: + charm: ch:placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + channel: yoga/edge + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'cinder-ceph:ceph-access' + - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/bundles/jammy-zed.yaml b/ceph-osd/tests/bundles/jammy-zed.yaml new file mode 100644 index 00000000..41b6acbb --- /dev/null +++ b/ceph-osd/tests/bundles/jammy-zed.yaml @@ -0,0 +1,238 @@ +variables: + openstack-origin: &openstack-origin cloud:focal-yoga + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + glance-mysql-router: + charm: ch:mysql-router + channel: latest/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: latest/edge + nova-cloud-controller-mysql-router: + charm: ch:mysql-router + channel: latest/edge + placement-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: latest/edge + + ceph-osd: + charm: ../../ceph-osd.charm + num_units: 3 + storage: + osd-devices: 'cinder,10G,2' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + aa-profile-mode: enforce + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + to: + - '6' + - '7' + - '8' + channel: quincy/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + channel: latest/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + channel: yoga/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + channel: yoga/edge + + glance: + expose: True + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + channel: yoga/edge + + cinder: + expose: True + charm: ch:cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: 'None' + glance-api-version: '2' + to: + - '13' + channel: yoga/edge + + cinder-ceph: + charm: ch:cinder-ceph + channel: yoga/edge + + nova-cloud-controller: + expose: True + charm: ch:nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + channel: yoga/edge + + placement: + charm: ch:placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + channel: yoga/edge + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'cinder-ceph:ceph-access' + - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/bundles/kinetic-zed.yaml b/ceph-osd/tests/bundles/kinetic-zed.yaml new file mode 100644 index 00000000..a4a640e8 --- /dev/null +++ b/ceph-osd/tests/bundles/kinetic-zed.yaml @@ -0,0 +1,238 @@ +variables: + openstack-origin: &openstack-origin distro + +series: jammy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + glance-mysql-router: + charm: ch:mysql-router + channel: latest/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: latest/edge + nova-cloud-controller-mysql-router: + charm: ch:mysql-router + channel: latest/edge + placement-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: latest/edge + + ceph-osd: + charm: ../../ceph-osd.charm + num_units: 3 + storage: + osd-devices: 'cinder,10G,2' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + aa-profile-mode: enforce + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + to: + - '6' + - '7' + - '8' + channel: quincy/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + channel: latest/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + channel: yoga/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + channel: yoga/edge + + glance: + expose: True + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + channel: yoga/edge + + cinder: + expose: True + charm: ch:cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: 'None' + glance-api-version: '2' + to: + - '13' + channel: yoga/edge + + cinder-ceph: + charm: ch:cinder-ceph + channel: yoga/edge + + nova-cloud-controller: + expose: True + charm: ch:nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + channel: yoga/edge + + placement: + charm: ch:placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + channel: yoga/edge + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'cinder-ceph:ceph-access' + - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/bundles/lunar-antelope.yaml b/ceph-osd/tests/bundles/lunar-antelope.yaml new file mode 100644 index 00000000..a4a640e8 --- /dev/null +++ b/ceph-osd/tests/bundles/lunar-antelope.yaml @@ -0,0 +1,238 @@ +variables: + openstack-origin: &openstack-origin distro + +series: jammy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + glance-mysql-router: + charm: ch:mysql-router + channel: latest/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: latest/edge + nova-cloud-controller-mysql-router: + charm: ch:mysql-router + channel: latest/edge + placement-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: latest/edge + + ceph-osd: + charm: ../../ceph-osd.charm + num_units: 3 + storage: + osd-devices: 'cinder,10G,2' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + aa-profile-mode: enforce + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + to: + - '6' + - '7' + - '8' + channel: quincy/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + channel: latest/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + channel: yoga/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + channel: yoga/edge + + glance: + expose: True + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + channel: yoga/edge + + cinder: + expose: True + charm: ch:cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: 'None' + glance-api-version: '2' + to: + - '13' + channel: yoga/edge + + cinder-ceph: + charm: ch:cinder-ceph + channel: yoga/edge + + nova-cloud-controller: + expose: True + charm: ch:nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + channel: yoga/edge + + placement: + charm: ch:placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + channel: yoga/edge + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'cinder-ceph:ceph-access' + - 'nova-compute:ceph-access' From 458179aa79e9964b15923249fcc6bfbd45537f48 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Fri, 17 Mar 2023 10:21:11 -0400 Subject: [PATCH 2485/2699] Add support for interim Ubuntu releases - update bundles to include UCA pocket tests - update test configuration - update metadata to include kinetic and lunar - update snapcraft to allow run-on for kinetic and lunar Change-Id: I8e8fdcaec9b51aeb16ffc255fdc94ad991980504 --- ceph-nfs/charmcraft.yaml | 8 +++- ceph-nfs/metadata.yaml | 2 + ceph-nfs/osci.yaml | 24 +++++++++-- ceph-nfs/requirements.txt | 2 +- ceph-nfs/tests/bundles/kinetic-quincy.yaml | 47 ++++++++++++++++++++++ ceph-nfs/tests/bundles/lunar-quincy.yaml | 47 ++++++++++++++++++++++ 6 files changed, 125 insertions(+), 5 deletions(-) create mode 100644 ceph-nfs/tests/bundles/kinetic-quincy.yaml create mode 100644 ceph-nfs/tests/bundles/lunar-quincy.yaml diff --git a/ceph-nfs/charmcraft.yaml b/ceph-nfs/charmcraft.yaml index 23219175..ae4e297d 100644 --- a/ceph-nfs/charmcraft.yaml +++ b/ceph-nfs/charmcraft.yaml @@ -32,4 +32,10 @@ bases: architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu channel: "22.04" - architectures: [amd64, s390x, ppc64el, arm64] \ No newline at end of file + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.10" + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "23.04" + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-nfs/metadata.yaml b/ceph-nfs/metadata.yaml index 051ed1f9..9a583a1e 100644 --- a/ceph-nfs/metadata.yaml +++ b/ceph-nfs/metadata.yaml @@ -10,6 +10,8 @@ tags: series: - focal - jammy + - kinetic + - lunar subordinate: false min-juju-version: 2.7.6 extra-bindings: diff --git a/ceph-nfs/osci.yaml b/ceph-nfs/osci.yaml index 54cd9b07..8002eaf5 100644 --- a/ceph-nfs/osci.yaml +++ b/ceph-nfs/osci.yaml @@ -7,6 +7,8 @@ - focal-pacific - focal-quincy - jammy-quincy + - kinetic-quincy + - lunar-quincy vars: needs_charm_build: true charm_build_name: ceph-nfs @@ -20,7 +22,7 @@ - tox-py38 - tox-py39 vars: - tox_extra_args: focal-pacific + tox_extra_args: -- focal-pacific - job: name: focal-quincy parent: func-target @@ -30,11 +32,27 @@ - tox-py38 - tox-py39 vars: - tox_extra_args: focal-quincy + tox_extra_args: -- focal-quincy - job: name: jammy-quincy parent: func-target dependencies: - focal-quincy vars: - tox_extra_args: jammy-quincy \ No newline at end of file + tox_extra_args: -- jammy-quincy +- job: + name: kinetic-quincy + parent: func-target + dependencies: + - focal-quincy + vars: + tox_extra_args: -- kinetic-quincy +- job: + name: lunar-quincy + parent: func-target + voting: false + dependencies: + - focal-quincy + vars: + tox_extra_args: -- lunar-quincy + \ No newline at end of file diff --git a/ceph-nfs/requirements.txt b/ceph-nfs/requirements.txt index cda466ad..6e7ef1d8 100644 --- a/ceph-nfs/requirements.txt +++ b/ceph-nfs/requirements.txt @@ -1,6 +1,6 @@ # requirements +ops <= 1.6.0 git+https://github.com/juju/charm-helpers.git#egg=charmhelpers -git+https://github.com/canonical/operator.git#egg=ops git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client git+https://github.com/ChrisMacNaughton/charm-ops-openstack.git@feature/charm-instance-to-relation-adapter#egg=ops_openstack git+https://opendev.org/openstack/charm-interface-hacluster#egg=interface_hacluster diff --git a/ceph-nfs/tests/bundles/kinetic-quincy.yaml b/ceph-nfs/tests/bundles/kinetic-quincy.yaml new file mode 100644 index 00000000..669cb915 --- /dev/null +++ b/ceph-nfs/tests/bundles/kinetic-quincy.yaml @@ -0,0 +1,47 @@ +local_overlay_enabled: False +series: jammy +applications: + ubuntu: + charm: cs:ubuntu + num_units: 2 + ceph-nfs: + charm: ../../ceph-nfs.charm + num_units: 2 + options: + source: distro + ceph-osd: + charm: ch:ceph-osd + channel: quincy/edge + num_units: 3 + storage: + osd-devices: '2,10G' + options: + source: distro + ceph-mon: + charm: ch:ceph-mon + channel: quincy/edge + num_units: 3 + options: + monitor-count: '3' + expected-osd-count: 6 + source: distro + ceph-fs: + charm: ch:ceph-fs + channel: quincy/edge + num_units: 2 + options: + source: distro + hacluster: + charm: ch:hacluster + channel: 2.4/edge + options: + cluster_count: 2 +relations: + - - 'ceph-mon:client' + - 'ceph-nfs:ceph-client' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-fs' + - 'ceph-mon' + - - 'ceph-nfs:ha' + - 'hacluster:ha' diff --git a/ceph-nfs/tests/bundles/lunar-quincy.yaml b/ceph-nfs/tests/bundles/lunar-quincy.yaml new file mode 100644 index 00000000..669cb915 --- /dev/null +++ b/ceph-nfs/tests/bundles/lunar-quincy.yaml @@ -0,0 +1,47 @@ +local_overlay_enabled: False +series: jammy +applications: + ubuntu: + charm: cs:ubuntu + num_units: 2 + ceph-nfs: + charm: ../../ceph-nfs.charm + num_units: 2 + options: + source: distro + ceph-osd: + charm: ch:ceph-osd + channel: quincy/edge + num_units: 3 + storage: + osd-devices: '2,10G' + options: + source: distro + ceph-mon: + charm: ch:ceph-mon + channel: quincy/edge + num_units: 3 + options: + monitor-count: '3' + expected-osd-count: 6 + source: distro + ceph-fs: + charm: ch:ceph-fs + channel: quincy/edge + num_units: 2 + options: + source: distro + hacluster: + charm: ch:hacluster + channel: 2.4/edge + options: + cluster_count: 2 +relations: + - - 'ceph-mon:client' + - 'ceph-nfs:ceph-client' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-fs' + - 'ceph-mon' + - - 'ceph-nfs:ha' + - 'hacluster:ha' From 583a1e154094eb9ad17c8bca7b16a1d1085149ce Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 20 Mar 2023 14:01:27 +0100 Subject: [PATCH 2486/2699] Fix: increase timeout for remove-disk The `ceph osd purge` command used for the remove-disk action can take several minutes to run. Increase the timeout for this to avoid spurious errors. Change-Id: I3a7fafa42c4a2ecaf45ba476e0157937e468ca33 --- ceph-osd/actions/remove_disk.py | 2 +- ceph-osd/unit_tests/test_actions_remove_disk.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-osd/actions/remove_disk.py b/ceph-osd/actions/remove_disk.py index 154290b0..beced7d9 100755 --- a/ceph-osd/actions/remove_disk.py +++ b/ceph-osd/actions/remove_disk.py @@ -129,7 +129,7 @@ def reweight_osd(osd_id, timeout=300): ], timeout=timeout) -def destroy(osd_id, purge=False, timeout=300): +def destroy(osd_id, purge=False, timeout=600): """Destroy or purge an OSD id.""" for _ in range(10): # We might get here before the OSD is marked as down. As such, diff --git a/ceph-osd/unit_tests/test_actions_remove_disk.py b/ceph-osd/unit_tests/test_actions_remove_disk.py index b729ab1c..6a47c6c4 100644 --- a/ceph-osd/unit_tests/test_actions_remove_disk.py +++ b/ceph-osd/unit_tests/test_actions_remove_disk.py @@ -94,7 +94,7 @@ def test_action_osd_remove(self, get_bcache_names, check_call, ) check_call.assert_any_call( prefix_args + ['osd', 'purge', 'osd.1', '--yes-i-really-mean-it'], - timeout=300 + timeout=600 ) check_call.assert_any_call( prefix_args + ['osd', 'crush', 'reweight', 'osd.1', '0'], From 8209231fd214de072dee624b0070393c11c88c39 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 20 Mar 2023 08:42:13 -0400 Subject: [PATCH 2487/2699] Add support for interim Ubuntu releases - update bundles to include UCA pocket tests - update test configuration - update metadata to include kinetic and lunar - update snapcraft to allow run-on for kinetic and lunar Change-Id: I05ce01c13efdd453c06e3c8d615d64bad6e27727 --- ceph-radosgw/.zuul.yaml | 1 + ceph-radosgw/charmcraft.yaml | 3 + ceph-radosgw/osci.yaml | 64 +++++++++ .../bundles/jammy-antelope-multisite.yaml | 99 ++++++++++++++ .../bundles/jammy-antelope-namespaced.yaml | 124 ++++++++++++++++++ .../tests/bundles/jammy-antelope.yaml | 123 +++++++++++++++++ .../tests/bundles/lunar-antelope.yaml | 123 +++++++++++++++++ ceph-radosgw/tests/tests.yaml | 8 ++ ceph-radosgw/tox.ini | 2 +- 9 files changed, 546 insertions(+), 1 deletion(-) create mode 100644 ceph-radosgw/tests/bundles/jammy-antelope-multisite.yaml create mode 100644 ceph-radosgw/tests/bundles/jammy-antelope-namespaced.yaml create mode 100644 ceph-radosgw/tests/bundles/jammy-antelope.yaml create mode 100644 ceph-radosgw/tests/bundles/lunar-antelope.yaml diff --git a/ceph-radosgw/.zuul.yaml b/ceph-radosgw/.zuul.yaml index 75fc2a78..77259668 100644 --- a/ceph-radosgw/.zuul.yaml +++ b/ceph-radosgw/.zuul.yaml @@ -1,3 +1,4 @@ - project: templates: - openstack-python3-charm-zed-jobs + - openstack-python3-charm-jobs diff --git a/ceph-radosgw/charmcraft.yaml b/ceph-radosgw/charmcraft.yaml index 102ded4c..f6121727 100644 --- a/ceph-radosgw/charmcraft.yaml +++ b/ceph-radosgw/charmcraft.yaml @@ -33,3 +33,6 @@ bases: - name: ubuntu channel: "22.10" architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "23.04" + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-radosgw/osci.yaml b/ceph-radosgw/osci.yaml index b511577b..da15f095 100644 --- a/ceph-radosgw/osci.yaml +++ b/ceph-radosgw/osci.yaml @@ -11,18 +11,30 @@ - jammy-yoga-multisite - jammy-zed-multisite: voting: false + - jammy-antelope-multisite: + voting: false - kinetic-zed-multisite: voting: false + - lunar-antelope-multisite: + voting: false - vault-jammy-yoga_rgw - vault-jammy-yoga-namespaced - vault-jammy-zed_rgw: voting: false - vault-jammy-zed-namespaced: voting: false + - vault-jammy-antelope_rgw: + voting: false + - vault-jammy-antelope-namespaced: + voting: false - vault-kinetic-zed_rgw: voting: false - vault-kinetic-zed-namespaced: voting: false + - vault-lunar-antelope_rgw: + voting: false + - vault-lunar-antelope-namespaced: + voting: false vars: needs_charm_build: true charm_build_name: ceph-radosgw @@ -57,6 +69,13 @@ - jammy-yoga-multisite vars: tox_extra_args: '-- jammy-zed-multisite' +- job: + name: jammy-antelope-multisite + parent: func-target + dependencies: + - jammy-yoga-multisite + vars: + tox_extra_args: '-- jammy-antelope-multisite' - job: name: kinetic-zed-multisite parent: func-target @@ -64,6 +83,13 @@ - jammy-yoga-multisite vars: tox_extra_args: '-- kinetic-zed-multisite' +- job: + name: lunar-antelope-multisite + parent: func-target + dependencies: + - jammy-yoga-multisite + vars: + tox_extra_args: '-- lunar-antelope-multisite' - job: name: vault-focal-yoga_rgw parent: func-target @@ -92,6 +118,20 @@ - jammy-yoga-multisite vars: tox_extra_args: '-- vault:jammy-yoga-namespaced' +- job: + name: vault-jammy-zed-namespaced + parent: func-target + dependencies: + - jammy-yoga-multisite + vars: + tox_extra_args: '-- vault:jammy-zed-namespaced' +- job: + name: vault-jammy-antelope-namespaced + parent: func-target + dependencies: + - jammy-yoga-multisite + vars: + tox_extra_args: '-- vault:jammy-antelope-namespaced' - job: name: vault-jammy-zed_rgw parent: func-target @@ -124,3 +164,27 @@ - vault-jammy-yoga-namespaced vars: tox_extra_args: '-- vault:kinetic-zed-namespaced' +- job: + name: vault-jammy-antelope_rgw + parent: func-target + dependencies: + - vault-jammy-yoga_rgw + - vault-jammy-yoga-namespaced + vars: + tox_extra_args: '-- vault:jammy-antelope' +- job: + name: vault-lunar-antelope_rgw + parent: func-target + dependencies: + - vault-jammy-yoga_rgw + - vault-jammy-yoga-namespaced + vars: + tox_extra_args: '-- vault:lunar-antelope' +- job: + name: vault-lunar-antelope-namespaced + parent: func-target + dependencies: + - vault-jammy-yoga_rgw + - vault-jammy-yoga-namespaced + vars: + tox_extra_args: '-- vault:lunar-antelope-namespaced' diff --git a/ceph-radosgw/tests/bundles/jammy-antelope-multisite.yaml b/ceph-radosgw/tests/bundles/jammy-antelope-multisite.yaml new file mode 100644 index 00000000..bf9daa5b --- /dev/null +++ b/ceph-radosgw/tests/bundles/jammy-antelope-multisite.yaml @@ -0,0 +1,99 @@ +options: + source: &source cloud:jammy-antelope + +series: jammy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + '9': + +applications: + ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '0' + + secondary-ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '1' + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '2' + - '6' + - '7' + channel: latest/edge + + secondary-ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '3' + - '8' + - '9' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 1 + options: + monitor-count: 1 + source: *source + to: + - '4' + channel: latest/edge + + secondary-ceph-mon: + charm: ch:ceph-mon + num_units: 1 + options: + monitor-count: 1 + source: *source + to: + - '5' + channel: latest/edge + +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'secondary-ceph-osd:mon' + - 'secondary-ceph-mon:osd' + + - - 'secondary-ceph-radosgw:mon' + - 'secondary-ceph-mon:radosgw' + diff --git a/ceph-radosgw/tests/bundles/jammy-antelope-namespaced.yaml b/ceph-radosgw/tests/bundles/jammy-antelope-namespaced.yaml new file mode 100644 index 00000000..41d9c1c1 --- /dev/null +++ b/ceph-radosgw/tests/bundles/jammy-antelope-namespaced.yaml @@ -0,0 +1,124 @@ +options: + source: &source cloud:jammy-antelope + +series: jammy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + channel: latest/edge + + ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + namespace-tenants: True + to: + - '3' + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + channel: latest/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + channel: latest/edge + + vault-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + vault: + charm: ch:vault + num_units: 1 + to: + - '11' + channel: latest/edge + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/jammy-antelope.yaml b/ceph-radosgw/tests/bundles/jammy-antelope.yaml new file mode 100644 index 00000000..e6cd4982 --- /dev/null +++ b/ceph-radosgw/tests/bundles/jammy-antelope.yaml @@ -0,0 +1,123 @@ +options: + source: &source cloud:jammy-antelope + +series: jammy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + channel: latest/edge + + ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '3' + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + channel: latest/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + channel: latest/edge + + vault-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + vault: + charm: ch:vault + num_units: 1 + to: + - '11' + channel: latest/edge + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/lunar-antelope.yaml b/ceph-radosgw/tests/bundles/lunar-antelope.yaml new file mode 100644 index 00000000..6d55ab44 --- /dev/null +++ b/ceph-radosgw/tests/bundles/lunar-antelope.yaml @@ -0,0 +1,123 @@ +options: + source: &source cloud:lunar-antelope + +series: lunar + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + channel: latest/edge + + ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '3' + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + channel: latest/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + channel: latest/edge + + vault-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + vault: + charm: ch:vault + num_units: 1 + to: + - '11' + channel: latest/edge + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index d45160cd..03bf199c 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -15,13 +15,19 @@ smoke_bundles: dev_bundles: - jammy-yoga-multisite - jammy-zed-multisite + - lunar-antelope-multisite - kinetic-zed-multisite + - jammy-antelope-multisite - vault: jammy-yoga - vault: jammy-yoga-namespaced - vault: jammy-zed + - vault: lunar-antelope - vault: jammy-zed-namespaced + - vault: lunar-antelope-namespaced - vault: kinetic-zed + - vault: jammy-antelope - vault: kinetic-zed-namespaced + - vault: jammy-antelope-namespaced target_deploy_status: vault: @@ -43,4 +49,6 @@ tests: tests_options: force_deploy: - kinetic-zed + - jammy-antelope - kinetic-zed-namespaced + - jammy-antelope-namespaced diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index ae4d124c..2cb6ca16 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -25,7 +25,7 @@ setenv = VIRTUAL_ENV={envdir} commands = stestr run --slowest {posargs} allowlist_externals = charmcraft - rename.sh + {toxinidir}/rename.sh passenv = HOME TERM From 67f3739a7e0141d07eefe37c588eacf526f2a64a Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Tue, 14 Mar 2023 10:55:08 +0100 Subject: [PATCH 2488/2699] Fix pristine status Only check configured devices instead of all system devices and don't check already processed devices when computing pristine status Closes-Bug: #1988088 Change-Id: Ia6bf7a5b7abddb72c3ec61fd9e02daf42e94c2da func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1025 --- ceph-osd/hooks/ceph_hooks.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 7d33f45a..6622896a 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -849,8 +849,12 @@ def assess_status(): 'Unit is ready ({} OSD)'.format(len(running_osds))) else: pristine = True - osd_journals = get_journal_devices() - for dev in list(set(ceph.unmounted_disks()) - set(osd_journals)): + # Check unmounted disks that should be configured but don't check + # journals or already processed devices + config_devices = (set(get_devices()) & set(ceph.unmounted_disks())) + osd_journals = set(get_journal_devices()) + touched_devices = set(kv().get('osd-devices', [])) + for dev in config_devices - osd_journals - touched_devices: if (not ceph.is_active_bluestore_device(dev) and not ceph.is_pristine_disk(dev) and not ceph.is_mapped_luks_device(dev)): From 33d5bea06a959f28ecbb95c65e727d94e11164c4 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 20 Mar 2023 14:01:08 -0400 Subject: [PATCH 2489/2699] Add support for interim Ubuntu releases - update bundles to include UCA pocket tests - update test configuration - update metadata to include kinetic and lunar - update snapcraft to allow run-on for kinetic and lunar Change-Id: I583679055cd8add80321282edd7ec12eaa5826fc --- ceph-proxy/.zuul.yaml | 1 + ceph-proxy/charmcraft.yaml | 6 + ceph-proxy/config.yaml | 2 +- ceph-proxy/metadata.yaml | 2 + ceph-proxy/osci.yaml | 14 +- ceph-proxy/tests/bundles/jammy-antelope.yaml | 199 +++++++++++++++ .../tests/bundles/lunar-antelope-ec.yaml | 228 ++++++++++++++++++ ceph-proxy/tests/bundles/lunar-antelope.yaml | 199 +++++++++++++++ ceph-proxy/tests/tests.yaml | 6 + 9 files changed, 654 insertions(+), 3 deletions(-) create mode 100644 ceph-proxy/tests/bundles/jammy-antelope.yaml create mode 100644 ceph-proxy/tests/bundles/lunar-antelope-ec.yaml create mode 100644 ceph-proxy/tests/bundles/lunar-antelope.yaml diff --git a/ceph-proxy/.zuul.yaml b/ceph-proxy/.zuul.yaml index 168494f5..d6c1104b 100644 --- a/ceph-proxy/.zuul.yaml +++ b/ceph-proxy/.zuul.yaml @@ -2,4 +2,5 @@ templates: - openstack-python3-charm-zed-jobs - openstack-python3-charm-yoga-jobs + - openstack-python3-charm-jobs diff --git a/ceph-proxy/charmcraft.yaml b/ceph-proxy/charmcraft.yaml index 09f03428..37cacbce 100644 --- a/ceph-proxy/charmcraft.yaml +++ b/ceph-proxy/charmcraft.yaml @@ -31,3 +31,9 @@ bases: - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.10" + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "23.04" + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 6f6c9daf..80f8564c 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -10,7 +10,7 @@ options: Setting this to True will allow supporting services to log to syslog. source: type: string - default: zed + default: antelope description: | Repository from which to install. May be one of the following: distro (default), ppa:somecustom/ppa, a deb url sources entry, diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 22689a74..7854a2dc 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -11,6 +11,8 @@ tags: - misc series: - jammy +- kinetic +- lunar extra-bindings: public: cluster: diff --git a/ceph-proxy/osci.yaml b/ceph-proxy/osci.yaml index 0f563793..3614568e 100644 --- a/ceph-proxy/osci.yaml +++ b/ceph-proxy/osci.yaml @@ -4,11 +4,14 @@ - charm-unit-jobs-py39 - charm-unit-jobs-py310 - charm-zed-functional-jobs + - charm-functional-jobs check: jobs: - jammy-yoga-ec - kinetic-zed-ec: voting: false + - lunar-antelope-ec: + voting: false vars: needs_charm_build: true charm_build_name: ceph-proxy @@ -23,11 +26,18 @@ - name: tox-py310 soft: true vars: - tox_extra_args: erasure-coded:jammy-yoga-ec + tox_extra_args: -- erasure-coded:jammy-yoga-ec - job: name: kinetic-zed-ec parent: func-target dependencies: - jammy-yoga-ec vars: - tox_extra_args: erasure-coded:kinetic-zed-ec + tox_extra_args: -- erasure-coded:kinetic-zed-ec +- job: + name: lunar-antelope-ec + parent: func-target + dependencies: + - jammy-yoga-ec + vars: + tox_extra_args: -- erasure-coded:lunar-antelope-ec diff --git a/ceph-proxy/tests/bundles/jammy-antelope.yaml b/ceph-proxy/tests/bundles/jammy-antelope.yaml new file mode 100644 index 00000000..8677eee2 --- /dev/null +++ b/ceph-proxy/tests/bundles/jammy-antelope.yaml @@ -0,0 +1,199 @@ +variables: + openstack-origin: &openstack-origin distro + +series: jammy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + cinder-mysql-router: + charm: ch:mysql-router + channel: latest/edge + glance-mysql-router: + charm: ch:mysql-router + channel: latest/edge + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + channel: latest/edge + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + channel: latest/edge + + ceph-proxy: + charm: ../../ceph-proxy.charm + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: ch:ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + to: + - '10' + channel: latest/edge + + cinder: + charm: ch:cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + channel: latest/edge + + cinder-ceph: + charm: ch:cinder-ceph + options: + restrict-ceph-pools: True + channel: latest/edge + + keystone: + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + channel: latest/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + channel: latest/edge + + glance: + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + channel: latest/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + channel: latest/edge + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/bundles/lunar-antelope-ec.yaml b/ceph-proxy/tests/bundles/lunar-antelope-ec.yaml new file mode 100644 index 00000000..f4fd5f4c --- /dev/null +++ b/ceph-proxy/tests/bundles/lunar-antelope-ec.yaml @@ -0,0 +1,228 @@ +variables: + openstack-origin: &openstack-origin distro + +series: lunar + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + +applications: + + cinder-mysql-router: + charm: ch:mysql-router + channel: latest/edge + glance-mysql-router: + charm: ch:mysql-router + channel: latest/edge + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + channel: latest/edge + + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + - '16' + - '17' + - '18' + channel: latest/edge + + ceph-proxy: + charm: ../../ceph-proxy.charm + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: ch:ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '10' + channel: latest/edge + + cinder: + charm: ch:cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + channel: latest/edge + + cinder-ceph: + charm: ch:cinder-ceph + options: + restrict-ceph-pools: True + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: lrc + ec-profile-locality: 3 + channel: latest/edge + + keystone: + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + channel: latest/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + channel: latest/edge + + glance: + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: jerasure + to: + - '14' + channel: latest/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + ec-profile-plugin: isa + libvirt-image-backend: rbd + to: + - '15' + channel: latest/edge + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:ceph' + - 'ceph-proxy:client' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:ceph' + - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/lunar-antelope.yaml b/ceph-proxy/tests/bundles/lunar-antelope.yaml new file mode 100644 index 00000000..bf65396d --- /dev/null +++ b/ceph-proxy/tests/bundles/lunar-antelope.yaml @@ -0,0 +1,199 @@ +variables: + openstack-origin: &openstack-origin cloud:lunar-antelope + +series: lunar + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + cinder-mysql-router: + charm: ch:mysql-router + channel: latest/edge + glance-mysql-router: + charm: ch:mysql-router + channel: latest/edge + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + to: + - '3' + - '4' + - '5' + channel: latest/edge + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: 10G + options: + source: *openstack-origin + to: + - '6' + - '7' + - '8' + channel: latest/edge + + ceph-proxy: + charm: ../../ceph-proxy.charm + num_units: 1 + options: + source: *openstack-origin + to: + - '9' + + ceph-radosgw: + charm: ch:ceph-radosgw + num_units: 1 + options: + source: *openstack-origin + to: + - '10' + channel: latest/edge + + cinder: + charm: ch:cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: "" + ephemeral-unmount: "" + glance-api-version: 2 + overwrite: "false" + constraints: mem=2048 + to: + - '11' + channel: latest/edge + + cinder-ceph: + charm: ch:cinder-ceph + options: + restrict-ceph-pools: True + channel: latest/edge + + keystone: + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + admin-password: openstack + constraints: mem=1024 + to: + - '12' + channel: latest/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + constraints: mem=1024 + options: + source: *openstack-origin + to: + - '13' + channel: latest/edge + + glance: + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + channel: latest/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + channel: latest/edge + + +relations: + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-proxy:radosgw' + - 'ceph-radosgw:mon' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-proxy:client' + + - - 'glance:image-service' + - 'nova-compute:image-service' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index ad74db78..ee84d3e2 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -19,9 +19,13 @@ dev_bundles: - jammy-yoga - erasure-coded: jammy-yoga-ec - jammy-zed + - lunar-antelope - erasure-coded: jammy-zed-ec + - erasure-coded: lunar-antelope-ec - kinetic-zed + - jammy-antelope - erasure-coded: kinetic-zed-ec + - erasure-coded: jammy-antelope-ec smoke_bundles: - jammy-yoga @@ -49,4 +53,6 @@ target_deploy_status: tests_options: force_deploy: - kinetic-zed + - jammy-antelope - kinetic-zed-ec + - jammy-antelope-ec From 45ac5f74b31f4b59e40a2a0ba8972352f0e04bbf Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Tue, 28 Mar 2023 09:01:40 -0300 Subject: [PATCH 2490/2699] Charm-helpers sync Sync to get Antelope support. Change-Id: I850bf37fa395949d6df9ad2c5157b75012ee8ba2 --- .../charmhelpers/contrib/openstack/context.py | 76 +++++++++++-------- .../contrib/openstack/deferred_events.py | 4 +- .../charmhelpers/contrib/openstack/utils.py | 1 + .../contrib/openstack/vaultlocker.py | 7 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-osd/hooks/charmhelpers/core/unitdata.py | 11 +-- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 18 ++++- 7 files changed, 78 insertions(+), 40 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 5e33d188..d894b6a6 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -469,66 +469,80 @@ def __call__(self): # forwards compat with application data # bag driven approach to relation. _adata = relation_get(rid=rid, app=remote_service_name(rid)) + adata = {} + # if no app data bag presented - fallback + # to legacy unit based relation data + rdata = relation_get(rid=rid, unit=unit) if _adata: # New app data bag uses - instead of _ # in key names - remap for compat with # existing relation data keys for key, value in _adata.items(): if key == 'api-version': - rdata[key.replace('-', '_')] = value.strip('v') + adata[key.replace('-', '_')] = value.strip('v') else: - rdata[key.replace('-', '_')] = value + adata[key.replace('-', '_')] = value # Re-map some keys for backwards compatibility for target, source in self._forward_compat_remaps.items(): - rdata[target] = _adata.get(source) - else: - # No app data bag presented - fallback - # to legacy unit based relation data - rdata = relation_get(rid=rid, unit=unit) - serv_host = rdata.get('service_host') + adata[target] = _adata.get(source) + # Now preferentially get data from the app data bag, but if + # it's not available, get it from the legacy based relation + # data. + + def _resolve(key): + return adata.get(key) or rdata.get(key) + + serv_host = _resolve('service_host') serv_host = format_ipv6_addr(serv_host) or serv_host - auth_host = rdata.get('auth_host') + auth_host = _resolve('auth_host') auth_host = format_ipv6_addr(auth_host) or auth_host - int_host = rdata.get('internal_host') + int_host = _resolve('internal_host',) int_host = format_ipv6_addr(int_host) or int_host - svc_protocol = rdata.get('service_protocol') or 'http' - auth_protocol = rdata.get('auth_protocol') or 'http' - int_protocol = rdata.get('internal_protocol') or 'http' - api_version = rdata.get('api_version') or '2.0' - ctxt.update({'service_port': rdata.get('service_port'), + svc_protocol = _resolve('service_protocol') or 'http' + auth_protocol = _resolve('auth_protocol') or 'http' + admin_role = _resolve('admin_role') or 'Admin' + int_protocol = _resolve('internal_protocol') or 'http' + api_version = _resolve('api_version') or '2.0' + ctxt.update({'service_port': _resolve('service_port'), 'service_host': serv_host, 'auth_host': auth_host, - 'auth_port': rdata.get('auth_port'), + 'auth_port': _resolve('auth_port'), 'internal_host': int_host, - 'internal_port': rdata.get('internal_port'), - 'admin_tenant_name': rdata.get('service_tenant'), - 'admin_user': rdata.get('service_username'), - 'admin_password': rdata.get('service_password'), + 'internal_port': _resolve('internal_port'), + 'admin_tenant_name': _resolve('service_tenant'), + 'admin_user': _resolve('service_username'), + 'admin_password': _resolve('service_password'), + 'admin_role': admin_role, 'service_protocol': svc_protocol, 'auth_protocol': auth_protocol, 'internal_protocol': int_protocol, 'api_version': api_version}) - if rdata.get('service_type'): - ctxt['service_type'] = rdata.get('service_type') + service_type = _resolve('service_type') + if service_type: + ctxt['service_type'] = service_type if float(api_version) > 2: ctxt.update({ - 'admin_domain_name': rdata.get('service_domain'), - 'service_project_id': rdata.get('service_tenant_id'), - 'service_domain_id': rdata.get('service_domain_id')}) + 'admin_domain_name': _resolve('service_domain'), + 'service_project_id': _resolve('service_tenant_id'), + 'service_domain_id': _resolve('service_domain_id')}) # NOTE: # keystone-k8s operator presents full URLS # for all three endpoints - public and internal are # externally addressable for machine based charm - if 'public_auth_url' in rdata: + public_auth_url = _resolve('public_auth_url') + # if 'public_auth_url' in rdata: + if public_auth_url: ctxt.update({ - 'public_auth_url': rdata.get('public_auth_url'), + 'public_auth_url': public_auth_url, }) - if 'internal_auth_url' in rdata: + internal_auth_url = _resolve('internal_auth_url') + # if 'internal_auth_url' in rdata: + if internal_auth_url: ctxt.update({ - 'internal_auth_url': rdata.get('internal_auth_url'), + 'internal_auth_url': internal_auth_url, }) # we keep all veriables in ctxt for compatibility and @@ -543,8 +557,8 @@ def __call__(self): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading - ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') - ctxt['admin_domain_id'] = rdata.get('service_domain_id') + ctxt['admin_tenant_id'] = _resolve('service_tenant_id') + ctxt['admin_domain_id'] = _resolve('service_domain_id') return ctxt return {} diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py index 94eacf6c..4c46e41a 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/deferred_events.py @@ -127,7 +127,9 @@ def deferred_events(): """ events = [] for defer_file in deferred_events_files(): - events.append((defer_file, read_event_file(defer_file))) + event = read_event_file(defer_file) + if event.policy_requestor_name == hookenv.service_name(): + events.append((defer_file, event)) return events diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 47e700e8..3d52eb16 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -159,6 +159,7 @@ ('2021.2', 'xena'), ('2022.1', 'yoga'), ('2022.2', 'zed'), + ('2023.1', 'antelope'), ]) # The ugly duckling - must list releases oldest to newest diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py index e5418c39..002bc579 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -173,7 +173,12 @@ def retrieve_secret_id(url, token): # hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate if not isinstance(client.adapter, hvac.adapters.Request): client.adapter = hvac.adapters.Request(base_uri=url, token=token) - response = client._post('/v1/sys/wrapping/unwrap') + try: + # hvac == 1.0.0 has an API to unwrap with the user token + response = client.sys.unwrap() + except AttributeError: + # fallback to hvac < 1.0.0 + response = client._post('/v1/sys/wrapping/unwrap') if response.status_code == 200: data = response.json() return data['data']['secret_id'] diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index cc2d89fe..a279d5be 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -31,6 +31,7 @@ 'impish', 'jammy', 'kinetic', + 'lunar', ) diff --git a/ceph-osd/hooks/charmhelpers/core/unitdata.py b/ceph-osd/hooks/charmhelpers/core/unitdata.py index d9b8d0b0..8f4bbc61 100644 --- a/ceph-osd/hooks/charmhelpers/core/unitdata.py +++ b/ceph-osd/hooks/charmhelpers/core/unitdata.py @@ -171,8 +171,9 @@ class Storage(object): path parameter which causes sqlite3 to only build the db in memory. This should only be used for testing purposes. """ - def __init__(self, path=None): + def __init__(self, path=None, keep_revisions=False): self.db_path = path + self.keep_revisions = keep_revisions if path is None: if 'UNIT_STATE_DB' in os.environ: self.db_path = os.environ['UNIT_STATE_DB'] @@ -242,7 +243,7 @@ def unset(self, key): Remove a key from the database entirely. """ self.cursor.execute('delete from kv where key=?', [key]) - if self.revision and self.cursor.rowcount: + if self.keep_revisions and self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values (?, ?, ?)', [key, self.revision, json.dumps('DELETED')]) @@ -259,14 +260,14 @@ def unsetrange(self, keys=None, prefix=""): if keys is not None: keys = ['%s%s' % (prefix, key) for key in keys] self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) - if self.revision and self.cursor.rowcount: + if self.keep_revisions and self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) else: self.cursor.execute('delete from kv where key like ?', ['%s%%' % prefix]) - if self.revision and self.cursor.rowcount: + if self.keep_revisions and self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values (?, ?, ?)', ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) @@ -299,7 +300,7 @@ def set(self, key, value): where key = ?''', [serialized, key]) # Save - if not self.revision: + if (not self.keep_revisions) or (not self.revision): return value self.cursor.execute( diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index fcf09675..effc884a 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -230,6 +230,14 @@ 'zed/proposed': 'jammy-proposed/zed', 'jammy-zed/proposed': 'jammy-proposed/zed', 'jammy-proposed/zed': 'jammy-proposed/zed', + # antelope + 'antelope': 'jammy-updates/antelope', + 'jammy-antelope': 'jammy-updates/antelope', + 'jammy-antelope/updates': 'jammy-updates/antelope', + 'jammy-updates/antelope': 'jammy-updates/antelope', + 'antelope/proposed': 'jammy-proposed/antelope', + 'jammy-antelope/proposed': 'jammy-proposed/antelope', + 'jammy-proposed/antelope': 'jammy-proposed/antelope', # OVN 'focal-ovn-22.03': 'focal-updates/ovn-22.03', @@ -261,6 +269,7 @@ 'xena', 'yoga', 'zed', + 'antelope', ) @@ -288,6 +297,7 @@ ('impish', 'xena'), ('jammy', 'yoga'), ('kinetic', 'zed'), + ('lunar', 'antelope'), ]) @@ -945,10 +955,14 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), try: result = subprocess.check_call(cmd, env=env, **kwargs) except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 + result = e.returncode + if result not in retry_results: + # a non-retriable exitcode was produced + raise + retry_count += 1 if retry_count > max_retries: + # a retriable exitcode was produced more than {max_retries} times raise - result = e.returncode log(retry_message) time.sleep(CMD_RETRY_DELAY) From 28a633d42bf18d777152decc34607f2b0565d4fb Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Thu, 6 Apr 2023 15:56:27 -0400 Subject: [PATCH 2491/2699] Charm-helpers sync charm-helpers sync to pick up Antelope UCA support Change-Id: Ie649be98ecd338b6441a59a0ad32aa696fc8ca99 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 16 ++ .../charmhelpers/contrib/hahelpers/cluster.py | 2 +- .../hooks/charmhelpers/contrib/network/ip.py | 2 +- .../charmhelpers/contrib/openstack/context.py | 149 ++++++++++++++---- .../contrib/openstack/deferred_events.py | 4 +- .../contrib/openstack/ha/utils.py | 29 ++++ .../charmhelpers/contrib/openstack/ip.py | 25 +++ .../contrib/openstack/ssh_migrations.py | 4 +- .../contrib/openstack/templates/haproxy.cfg | 5 + .../templates/section-keystone-authtoken | 2 + .../section-keystone-authtoken-mitaka | 2 + .../openstack/templates/section-service-user | 11 ++ .../charmhelpers/contrib/openstack/utils.py | 7 +- .../contrib/openstack/vaultlocker.py | 7 +- .../contrib/storage/linux/utils.py | 21 ++- ceph-radosgw/hooks/charmhelpers/core/host.py | 2 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + .../hooks/charmhelpers/core/unitdata.py | 11 +- .../hooks/charmhelpers/fetch/ubuntu.py | 36 ++++- 19 files changed, 282 insertions(+), 54 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-service-user diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py index bad7a533..ac002bc6 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -19,6 +19,7 @@ import glob import grp +import json import os import pwd import re @@ -30,6 +31,7 @@ from charmhelpers.core.hookenv import ( application_name, config, + ERROR, hook_name, local_unit, log, @@ -416,6 +418,20 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): :param str unit_name: Unit name to use in check description :param bool immediate_check: For sysv init, run the service check immediately """ + # check_haproxy is redundant in the presence of check_crm. See LP Bug#1880601 for details. + # just remove check_haproxy if haproxy is added as a lsb resource in hacluster. + for rid in relation_ids("ha"): + ha_resources = relation_get("json_resources", rid=rid, unit=local_unit()) + if ha_resources: + try: + ha_resources_parsed = json.loads(ha_resources) + except ValueError as e: + log('Could not parse JSON from ha resources. {}'.format(e), level=ERROR) + raise + if "lsb:haproxy" in ha_resources_parsed.values(): + if "haproxy" in services: + log("removed check_haproxy. This service will be monitored by check_crm") + services.remove("haproxy") for svc in services: # Don't add a check for these services from neutron-gateway if svc in ['ext-port', 'os-charm-phy-nic-mtu']: diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index 146beba6..ffda5fe1 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -324,7 +324,7 @@ def valid_hacluster_config(): ''' vip = config_get('vip') dns = config_get('dns-ha') - if not(bool(vip) ^ bool(dns)): + if not (bool(vip) ^ bool(dns)): msg = ('HA: Either vip or dns-ha must be set but not both in order to ' 'use high availability') status_set('blocked', msg) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index f8edf37a..cf9926b9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -539,7 +539,7 @@ def port_has_listener(address, port): """ cmd = ['nc', '-z', address, str(port)] result = subprocess.call(cmd) - return not(bool(result)) + return not (bool(result)) def assert_charm_supports_ipv6(): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 970a657b..d894b6a6 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -25,6 +25,7 @@ import time from base64 import b64decode +from distutils.version import LooseVersion from subprocess import ( check_call, check_output, @@ -39,6 +40,7 @@ from charmhelpers.fetch import ( apt_install, filter_installed_packages, + get_installed_version, ) from charmhelpers.core.hookenv import ( NoNetworkBinding, @@ -59,6 +61,7 @@ network_get_primary_address, WARNING, service_name, + remote_service_name, ) from charmhelpers.core.sysctl import create as sysctl_create @@ -130,6 +133,7 @@ ADDRESS_TYPES = ['admin', 'internal', 'public'] HAPROXY_RUN_DIR = '/var/run/haproxy/' DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2" +DEFAULT_HAPROXY_EXPORTER_STATS_PORT = 8404 def ensure_packages(packages): @@ -345,6 +349,14 @@ def db_ssl(rdata, ctxt, ssl_dir): class IdentityServiceContext(OSContextGenerator): + _forward_compat_remaps = { + 'admin_user': 'admin-user-name', + 'service_username': 'service-user-name', + 'service_tenant': 'service-project-name', + 'service_tenant_id': 'service-project-id', + 'service_domain': 'service-domain-name', + } + def __init__(self, service=None, service_user=None, @@ -397,11 +409,16 @@ def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): # 'www_authenticate_uri' replaced 'auth_uri' since Stein, # see keystonemiddleware upstream sources for more info if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein': - c.update(( - ('www_authenticate_uri', "{}://{}:{}/v3".format( - ctxt.get('service_protocol', ''), - ctxt.get('service_host', ''), - ctxt.get('service_port', ''))),)) + if 'public_auth_url' in ctxt: + c.update(( + ('www_authenticate_uri', '{}/v3'.format( + ctxt.get('public_auth_url'))),)) + else: + c.update(( + ('www_authenticate_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) else: c.update(( ('auth_uri', "{}://{}:{}/v3".format( @@ -409,11 +426,17 @@ def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): ctxt.get('service_host', ''), ctxt.get('service_port', ''))),)) + if 'internal_auth_url' in ctxt: + c.update(( + ('auth_url', ctxt.get('internal_auth_url')),)) + else: + c.update(( + ('auth_url', "{}://{}:{}/v3".format( + ctxt.get('auth_protocol', ''), + ctxt.get('auth_host', ''), + ctxt.get('auth_port', ''))),)) + c.update(( - ('auth_url', "{}://{}:{}/v3".format( - ctxt.get('auth_protocol', ''), - ctxt.get('auth_host', ''), - ctxt.get('auth_port', ''))), ('project_domain_name', ctxt.get('admin_domain_name', '')), ('user_domain_name', ctxt.get('admin_domain_name', '')), ('project_name', ctxt.get('admin_tenant_name', '')), @@ -441,39 +464,86 @@ def __call__(self): for rid in relation_ids(self.rel_name): self.related = True for unit in related_units(rid): + rdata = {} + # NOTE(jamespage): + # forwards compat with application data + # bag driven approach to relation. + _adata = relation_get(rid=rid, app=remote_service_name(rid)) + adata = {} + # if no app data bag presented - fallback + # to legacy unit based relation data rdata = relation_get(rid=rid, unit=unit) - serv_host = rdata.get('service_host') + if _adata: + # New app data bag uses - instead of _ + # in key names - remap for compat with + # existing relation data keys + for key, value in _adata.items(): + if key == 'api-version': + adata[key.replace('-', '_')] = value.strip('v') + else: + adata[key.replace('-', '_')] = value + # Re-map some keys for backwards compatibility + for target, source in self._forward_compat_remaps.items(): + adata[target] = _adata.get(source) + # Now preferentially get data from the app data bag, but if + # it's not available, get it from the legacy based relation + # data. + + def _resolve(key): + return adata.get(key) or rdata.get(key) + + serv_host = _resolve('service_host') serv_host = format_ipv6_addr(serv_host) or serv_host - auth_host = rdata.get('auth_host') + auth_host = _resolve('auth_host') auth_host = format_ipv6_addr(auth_host) or auth_host - int_host = rdata.get('internal_host') + int_host = _resolve('internal_host',) int_host = format_ipv6_addr(int_host) or int_host - svc_protocol = rdata.get('service_protocol') or 'http' - auth_protocol = rdata.get('auth_protocol') or 'http' - int_protocol = rdata.get('internal_protocol') or 'http' - api_version = rdata.get('api_version') or '2.0' - ctxt.update({'service_port': rdata.get('service_port'), + svc_protocol = _resolve('service_protocol') or 'http' + auth_protocol = _resolve('auth_protocol') or 'http' + admin_role = _resolve('admin_role') or 'Admin' + int_protocol = _resolve('internal_protocol') or 'http' + api_version = _resolve('api_version') or '2.0' + ctxt.update({'service_port': _resolve('service_port'), 'service_host': serv_host, 'auth_host': auth_host, - 'auth_port': rdata.get('auth_port'), + 'auth_port': _resolve('auth_port'), 'internal_host': int_host, - 'internal_port': rdata.get('internal_port'), - 'admin_tenant_name': rdata.get('service_tenant'), - 'admin_user': rdata.get('service_username'), - 'admin_password': rdata.get('service_password'), + 'internal_port': _resolve('internal_port'), + 'admin_tenant_name': _resolve('service_tenant'), + 'admin_user': _resolve('service_username'), + 'admin_password': _resolve('service_password'), + 'admin_role': admin_role, 'service_protocol': svc_protocol, 'auth_protocol': auth_protocol, 'internal_protocol': int_protocol, 'api_version': api_version}) - if rdata.get('service_type'): - ctxt['service_type'] = rdata.get('service_type') + service_type = _resolve('service_type') + if service_type: + ctxt['service_type'] = service_type if float(api_version) > 2: ctxt.update({ - 'admin_domain_name': rdata.get('service_domain'), - 'service_project_id': rdata.get('service_tenant_id'), - 'service_domain_id': rdata.get('service_domain_id')}) + 'admin_domain_name': _resolve('service_domain'), + 'service_project_id': _resolve('service_tenant_id'), + 'service_domain_id': _resolve('service_domain_id')}) + + # NOTE: + # keystone-k8s operator presents full URLS + # for all three endpoints - public and internal are + # externally addressable for machine based charm + public_auth_url = _resolve('public_auth_url') + # if 'public_auth_url' in rdata: + if public_auth_url: + ctxt.update({ + 'public_auth_url': public_auth_url, + }) + internal_auth_url = _resolve('internal_auth_url') + # if 'internal_auth_url' in rdata: + if internal_auth_url: + ctxt.update({ + 'internal_auth_url': internal_auth_url, + }) # we keep all veriables in ctxt for compatibility and # add nested dictionary for keystone_authtoken generic @@ -487,8 +557,8 @@ def __call__(self): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading - ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') - ctxt['admin_domain_id'] = rdata.get('service_domain_id') + ctxt['admin_tenant_id'] = _resolve('service_tenant_id') + ctxt['admin_domain_id'] = _resolve('service_domain_id') return ctxt return {} @@ -860,9 +930,14 @@ class HAProxyContext(OSContextGenerator): interfaces = ['cluster'] def __init__(self, singlenode_mode=False, - address_types=ADDRESS_TYPES): + address_types=None, + exporter_stats_port=DEFAULT_HAPROXY_EXPORTER_STATS_PORT): + if address_types is None: + address_types = ADDRESS_TYPES[:] + self.address_types = address_types self.singlenode_mode = singlenode_mode + self.exporter_stats_port = exporter_stats_port def __call__(self): if not os.path.isdir(HAPROXY_RUN_DIR): @@ -957,10 +1032,20 @@ def __call__(self): db = kv() ctxt['stat_password'] = db.get('stat-password') if not ctxt['stat_password']: - ctxt['stat_password'] = db.set('stat-password', - pwgen(32)) + ctxt['stat_password'] = db.set('stat-password', pwgen(32)) db.flush() + # NOTE(rgildein): configure prometheus exporter for haproxy > 2.0.0 + # New bind will be created and a prometheus-exporter + # will be used for path /metrics. At the same time, + # prometheus-exporter avoids using auth. + haproxy_version = get_installed_version("haproxy") + if (haproxy_version and + haproxy_version.ver_str >= LooseVersion("2.0.0") and + is_relation_made("haproxy-exporter")): + ctxt["stats_exporter_host"] = get_relation_ip("haproxy-exporter") + ctxt["stats_exporter_port"] = self.exporter_stats_port + for frontend in cluster_hosts: if (len(cluster_hosts[frontend]['backends']) > 1 or self.singlenode_mode): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py index 94eacf6c..4c46e41a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/deferred_events.py @@ -127,7 +127,9 @@ def deferred_events(): """ events = [] for defer_file in deferred_events_files(): - events.append((defer_file, read_event_file(defer_file))) + event = read_event_file(defer_file) + if event.policy_requestor_name == hookenv.service_name(): + events.append((defer_file, event)) return events diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py index a5cbdf53..b4912c42 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -25,6 +25,7 @@ import hashlib import json +import os import re @@ -36,6 +37,7 @@ config, status_set, DEBUG, + application_name, ) from charmhelpers.core.host import ( @@ -65,6 +67,7 @@ VIP_GROUP_NAME = 'grp_{service}_vips' DNSHA_GROUP_NAME = 'grp_{service}_hostnames' +HAPROXY_DASHBOARD_RESOURCE = "haproxy-dashboard" class DNSHAException(Exception): @@ -346,3 +349,29 @@ def update_hacluster_vip(service, relation_data): relation_data['groups'] = { key: ' '.join(vip_group) } + + +def render_grafana_dashboard(prometheus_app_name, haproxy_dashboard): + """Load grafana dashboard json model and insert prometheus datasource. + + :param prometheus_app_name: name of the 'prometheus' application that will + be used as datasource in grafana dashboard + :type prometheus_app_name: str + :param haproxy_dashboard: path to haproxy dashboard + :type haproxy_dashboard: str + :return: Grafana dashboard json model as a str. + :rtype: str + """ + from charmhelpers.contrib.templating import jinja + + dashboard_template = os.path.basename(haproxy_dashboard) + dashboard_template_dir = os.path.dirname(haproxy_dashboard) + app_name = application_name() + datasource = "{} - Juju generated source".format(prometheus_app_name) + return jinja.render(dashboard_template, + {"datasource": datasource, + "app_name": app_name, + "prometheus_app_name": prometheus_app_name}, + template_dir=dashboard_template_dir, + jinja_env_args={"variable_start_string": "<< ", + "variable_end_string": " >>"}) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py index b8c94c56..2afad369 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ip.py @@ -25,6 +25,7 @@ is_ipv6, get_ipv6_addr, resolve_network_cidr, + get_iface_for_address ) from charmhelpers.contrib.hahelpers.cluster import is_clustered @@ -145,6 +146,30 @@ def local_address(unit_get_fallback='public-address'): return unit_get(unit_get_fallback) +def get_invalid_vips(): + """Check if any of the provided vips are invalid. + A vip is invalid if it doesn't belong to the subnet in any interface. + If all vips are valid, this returns an empty list. + + :returns: A list of strings, where each string is an invalid vip address. + :rtype: list + """ + + clustered = is_clustered() + vips = config('vip') + if vips: + vips = vips.split() + invalid_vips = [] + + if clustered and vips: + for vip in vips: + iface_for_vip = get_iface_for_address(vip) + if iface_for_vip is None: + invalid_vips.append(vip) + + return invalid_vips + + def resolve_address(endpoint_type=PUBLIC, override=True): """Return unit address depending on net config. diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ssh_migrations.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ssh_migrations.py index 96b9f71d..0512e3a5 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ssh_migrations.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/ssh_migrations.py @@ -310,7 +310,7 @@ def ssh_known_hosts_lines(application_name, user=None): for hosts_line in hosts: if hosts_line.rstrip(): known_hosts_list.append(hosts_line.rstrip()) - return(known_hosts_list) + return known_hosts_list def ssh_authorized_keys_lines(application_name, user=None): @@ -327,7 +327,7 @@ def ssh_authorized_keys_lines(application_name, user=None): for authkey_line in keys: if authkey_line.rstrip(): authorized_keys_list.append(authkey_line.rstrip()) - return(authorized_keys_list) + return authorized_keys_list def ssh_compute_remove(public_key, application_name, user=None): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 626ecbab..da2522f6 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -49,6 +49,11 @@ defaults listen stats bind {{ local_host }}:{{ stat_port }} +{%- if stats_exporter_host and stats_exporter_port %} + bind {{ stats_exporter_host }}:{{ stats_exporter_port }} + option http-use-htx + http-request use-service prometheus-exporter if { path /metrics } +{%- endif %} mode http stats enable stats hide-version diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken index c9b01528..dbad506f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken @@ -12,4 +12,6 @@ signing_dir = {{ signing_dir }} {% if service_type -%} service_type = {{ service_type }} {% endif -%} +service_token_roles = {{ admin_role }} +service_token_roles_required = True {% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka index 14c25b4d..139a0512 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka @@ -22,4 +22,6 @@ signing_dir = {{ signing_dir }} {% if use_memcache == true %} memcached_servers = {{ memcache_url }} {% endif -%} +service_token_roles = {{ admin_role }} +service_token_roles_required = True {% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-service-user b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-service-user new file mode 100644 index 00000000..c740cc28 --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-service-user @@ -0,0 +1,11 @@ +{% if auth_host -%} +[service_user] +send_service_user_token = true +auth_type = password +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +project_domain_id = default +user_domain_id = default +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 1fa2814a..3d52eb16 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -159,6 +159,7 @@ ('2021.2', 'xena'), ('2022.1', 'yoga'), ('2022.2', 'zed'), + ('2023.1', 'antelope'), ]) # The ugly duckling - must list releases oldest to newest @@ -1327,7 +1328,7 @@ def _check_listening_on_services_ports(services, test=False): @param test: default=False, if False, test for closed, otherwise open. @returns OrderedDict(service: [port-not-open, ...]...), [boolean] """ - test = not(not(test)) # ensure test is True or False + test = not (not (test)) # ensure test is True or False all_ports = list(itertools.chain(*services.values())) ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] map_ports = OrderedDict() @@ -1583,7 +1584,7 @@ def is_unit_paused_set(): with unitdata.HookData()() as t: kv = t[0] # transform something truth-y into a Boolean. - return not(not(kv.get('unit-paused'))) + return not (not (kv.get('unit-paused'))) except Exception: return False @@ -2181,7 +2182,7 @@ def is_unit_upgrading_set(): with unitdata.HookData()() as t: kv = t[0] # transform something truth-y into a Boolean. - return not(not(kv.get('unit-upgrading'))) + return not (not (kv.get('unit-upgrading'))) except Exception: return False diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py index e5418c39..002bc579 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -173,7 +173,12 @@ def retrieve_secret_id(url, token): # hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate if not isinstance(client.adapter, hvac.adapters.Request): client.adapter = hvac.adapters.Request(base_uri=url, token=token) - response = client._post('/v1/sys/wrapping/unwrap') + try: + # hvac == 1.0.0 has an API to unwrap with the user token + response = client.sys.unwrap() + except AttributeError: + # fallback to hvac < 1.0.0 + response = client._post('/v1/sys/wrapping/unwrap') if response.status_code == 200: data = response.json() return data['data']['secret_id'] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py index a3561760..4d05b121 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -23,6 +23,12 @@ call ) +from charmhelpers.core.hookenv import ( + log, + WARNING, + INFO +) + def _luks_uuid(dev): """ @@ -110,7 +116,7 @@ def is_device_mounted(device): return bool(re.search(r'MOUNTPOINT=".+"', out)) -def mkfs_xfs(device, force=False, inode_size=1024): +def mkfs_xfs(device, force=False, inode_size=None): """Format device with XFS filesystem. By default this should fail if the device already has a filesystem on it. @@ -118,11 +124,20 @@ def mkfs_xfs(device, force=False, inode_size=1024): :ptype device: tr :param force: Force operation :ptype: force: boolean - :param inode_size: XFS inode size in bytes + :param inode_size: XFS inode size in bytes; if set to 0 or None, + the value used will be the XFS system default :ptype inode_size: int""" cmd = ['mkfs.xfs'] if force: cmd.append("-f") - cmd += ['-i', "size={}".format(inode_size), device] + if inode_size: + if inode_size >= 256 and inode_size <= 2048: + cmd += ['-i', "size={}".format(inode_size)] + else: + log("Config value xfs-inode-size={} is invalid. Using system default.".format(inode_size), level=WARNING) + else: + log("Using XFS filesystem with system default inode size.", level=INFO) + + cmd += [device] check_call(cmd) diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index ef6c8eca..70dde6a5 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -954,7 +954,7 @@ def pwgen(length=None): random_generator = random.SystemRandom() random_chars = [ random_generator.choice(alphanumeric_chars) for _ in range(length)] - return(''.join(random_chars)) + return ''.join(random_chars) def is_phy_iface(interface): diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index cc2d89fe..a279d5be 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -31,6 +31,7 @@ 'impish', 'jammy', 'kinetic', + 'lunar', ) diff --git a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py index d9b8d0b0..8f4bbc61 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py +++ b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py @@ -171,8 +171,9 @@ class Storage(object): path parameter which causes sqlite3 to only build the db in memory. This should only be used for testing purposes. """ - def __init__(self, path=None): + def __init__(self, path=None, keep_revisions=False): self.db_path = path + self.keep_revisions = keep_revisions if path is None: if 'UNIT_STATE_DB' in os.environ: self.db_path = os.environ['UNIT_STATE_DB'] @@ -242,7 +243,7 @@ def unset(self, key): Remove a key from the database entirely. """ self.cursor.execute('delete from kv where key=?', [key]) - if self.revision and self.cursor.rowcount: + if self.keep_revisions and self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values (?, ?, ?)', [key, self.revision, json.dumps('DELETED')]) @@ -259,14 +260,14 @@ def unsetrange(self, keys=None, prefix=""): if keys is not None: keys = ['%s%s' % (prefix, key) for key in keys] self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) - if self.revision and self.cursor.rowcount: + if self.keep_revisions and self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) else: self.cursor.execute('delete from kv where key like ?', ['%s%%' % prefix]) - if self.revision and self.cursor.rowcount: + if self.keep_revisions and self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values (?, ?, ?)', ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) @@ -299,7 +300,7 @@ def set(self, key, value): where key = ?''', [serialized, key]) # Save - if not self.revision: + if (not self.keep_revisions) or (not self.revision): return value self.cursor.execute( diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 93b92765..effc884a 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -230,6 +230,18 @@ 'zed/proposed': 'jammy-proposed/zed', 'jammy-zed/proposed': 'jammy-proposed/zed', 'jammy-proposed/zed': 'jammy-proposed/zed', + # antelope + 'antelope': 'jammy-updates/antelope', + 'jammy-antelope': 'jammy-updates/antelope', + 'jammy-antelope/updates': 'jammy-updates/antelope', + 'jammy-updates/antelope': 'jammy-updates/antelope', + 'antelope/proposed': 'jammy-proposed/antelope', + 'jammy-antelope/proposed': 'jammy-proposed/antelope', + 'jammy-proposed/antelope': 'jammy-proposed/antelope', + + # OVN + 'focal-ovn-22.03': 'focal-updates/ovn-22.03', + 'focal-ovn-22.03/proposed': 'focal-proposed/ovn-22.03', } @@ -257,6 +269,7 @@ 'xena', 'yoga', 'zed', + 'antelope', ) @@ -284,6 +297,7 @@ ('impish', 'xena'), ('jammy', 'yoga'), ('kinetic', 'zed'), + ('lunar', 'antelope'), ]) @@ -363,6 +377,9 @@ def apt_install(packages, options=None, fatal=False, quiet=False): :type quiet: bool :raises: subprocess.CalledProcessError """ + if not packages: + log("Nothing to install", level=DEBUG) + return if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -687,6 +704,7 @@ def add_source(source, key=None, fail_invalid=False): (r"^cloud-archive:(.*)$", _add_apt_repository), (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), + (r"^cloud:(.*)-(ovn-.*)$", _add_cloud_distro_check), (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), (r"^cloud:(.*)$", _add_cloud_pocket), (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), @@ -750,6 +768,11 @@ def _add_apt_repository(spec): ) +def __write_sources_list_d_actual_pocket(file, actual_pocket): + with open('/etc/apt/sources.list.d/{}'.format(file), 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + + def _add_cloud_pocket(pocket): """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list @@ -769,8 +792,9 @@ def _add_cloud_pocket(pocket): 'Unsupported cloud: source option %s' % pocket) actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + __write_sources_list_d_actual_pocket( + 'cloud-archive{}.list'.format('' if 'ovn' not in pocket else '-ovn'), + actual_pocket) def _add_cloud_staging(cloud_archive_release, openstack_release): @@ -931,10 +955,14 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), try: result = subprocess.check_call(cmd, env=env, **kwargs) except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 + result = e.returncode + if result not in retry_results: + # a non-retriable exitcode was produced + raise + retry_count += 1 if retry_count > max_retries: + # a retriable exitcode was produced more than {max_retries} times raise - result = e.returncode log(retry_message) time.sleep(CMD_RETRY_DELAY) From de5c76611fbf638dc5fc3ba2b9f8e7986450e8bf Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Thu, 6 Apr 2023 19:14:11 -0300 Subject: [PATCH 2492/2699] Remove relation test The CephRelationTest class wasn't of much used and the test was rather flaky, since it compared public IP addresses. Change-Id: I9a77f4a86412f9bf4d27c0d7e0a7fe34d5a403ff func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1034 --- ceph-osd/tests/tests.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index e9490a78..a1c0d6c6 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -13,7 +13,6 @@ configure: tests: - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - - zaza.openstack.charm_tests.ceph.tests.CephRelationTest - zaza.openstack.charm_tests.ceph.tests.CephTest - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - zaza.openstack.charm_tests.ceph.osd.tests.ServiceTest From 51484dd91f7dac1dba603268f1828badebaa773b Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Thu, 6 Apr 2023 19:15:49 -0300 Subject: [PATCH 2493/2699] Remove relation test The CephRelationTest class wasn't of much used and the test was rather flaky, since it compared public IP addresses. Change-Id: Iba5aad1d895ba8b28ce364899a1e41275dc3003b func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1034 --- ceph-mon/tests/tests.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 6e1fa867..075a4548 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -16,13 +16,11 @@ tests: - install: - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - - zaza.openstack.charm_tests.ceph.tests.CephRelationTest - zaza.openstack.charm_tests.ceph.tests.CephTest - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - - zaza.openstack.charm_tests.ceph.tests.CephRelationTest - zaza.openstack.charm_tests.ceph.tests.CephTest - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest @@ -30,7 +28,6 @@ tests: - zaza.charm_tests.lifecycle.tests.UpgradeCharmsToPath;ceph-mon - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - - zaza.openstack.charm_tests.ceph.tests.CephRelationTest - zaza.openstack.charm_tests.ceph.tests.CephTest - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest From 4cb17e3809a890c4b4579999be97e142310e44bf Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 8 May 2023 20:40:03 +0200 Subject: [PATCH 2494/2699] Fix: testing bundles for jammy and lunar were off Change-Id: I314fef8551e896ab35678bc78f0233cb42030413 --- ceph-mon/tests/bundles/jammy-antelope.yaml | 20 ++++++++------- ceph-mon/tests/bundles/jammy-zed.yaml | 20 ++++++++------- ceph-mon/tests/bundles/lunar-antelope.yaml | 30 +++++++++++----------- 3 files changed, 37 insertions(+), 33 deletions(-) diff --git a/ceph-mon/tests/bundles/jammy-antelope.yaml b/ceph-mon/tests/bundles/jammy-antelope.yaml index bb475bc1..026a81db 100644 --- a/ceph-mon/tests/bundles/jammy-antelope.yaml +++ b/ceph-mon/tests/bundles/jammy-antelope.yaml @@ -1,7 +1,9 @@ variables: - openstack-origin: &openstack-origin cloud:focal-yoga + openstack-origin: &openstack-origin cloud:jammy-antelope -series: focal +local_overlay_enabled: False + +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' @@ -110,7 +112,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: yoga/edge + channel: 2023.1/edge nova-compute: charm: ch:nova-compute @@ -120,7 +122,7 @@ applications: libvirt-image-backend: rbd to: - '11' - channel: yoga/edge + channel: 2023.1/edge glance: expose: True @@ -130,7 +132,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: yoga/edge + channel: 2023.1/edge cinder: expose: True @@ -142,11 +144,11 @@ applications: openstack-origin: *openstack-origin to: - '13' - channel: yoga/edge + channel: 2023.1/edge cinder-ceph: charm: ch:cinder-ceph - channel: yoga/edge + channel: 2023.1/edge nova-cloud-controller: expose: True @@ -156,7 +158,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: yoga/edge + channel: 2023.1/edge placement: charm: ch:placement @@ -165,7 +167,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: yoga/edge + channel: 2023.1/edge prometheus2: charm: ch:prometheus2 diff --git a/ceph-mon/tests/bundles/jammy-zed.yaml b/ceph-mon/tests/bundles/jammy-zed.yaml index bb475bc1..071ac6e4 100644 --- a/ceph-mon/tests/bundles/jammy-zed.yaml +++ b/ceph-mon/tests/bundles/jammy-zed.yaml @@ -1,7 +1,9 @@ variables: - openstack-origin: &openstack-origin cloud:focal-yoga + openstack-origin: &openstack-origin cloud:jammy-zed -series: focal +local_overlay_enabled: False + +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' @@ -110,7 +112,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: yoga/edge + channel: zed/edge nova-compute: charm: ch:nova-compute @@ -120,7 +122,7 @@ applications: libvirt-image-backend: rbd to: - '11' - channel: yoga/edge + channel: zed/edge glance: expose: True @@ -130,7 +132,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: yoga/edge + channel: zed/edge cinder: expose: True @@ -142,11 +144,11 @@ applications: openstack-origin: *openstack-origin to: - '13' - channel: yoga/edge + channel: zed/edge cinder-ceph: charm: ch:cinder-ceph - channel: yoga/edge + channel: zed/edge nova-cloud-controller: expose: True @@ -156,7 +158,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: yoga/edge + channel: zed/edge placement: charm: ch:placement @@ -165,7 +167,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: yoga/edge + channel: zed/edge prometheus2: charm: ch:prometheus2 diff --git a/ceph-mon/tests/bundles/lunar-antelope.yaml b/ceph-mon/tests/bundles/lunar-antelope.yaml index 24818960..23f7a744 100644 --- a/ceph-mon/tests/bundles/lunar-antelope.yaml +++ b/ceph-mon/tests/bundles/lunar-antelope.yaml @@ -1,7 +1,7 @@ variables: openstack-origin: &openstack-origin distro -series: jammy +series: lunar comment: - 'machines section to decide order of deployment. database sooner = faster' @@ -34,19 +34,19 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: latest/edge glance-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: latest/edge cinder-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: latest/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: latest/edge placement-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: latest/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -57,7 +57,7 @@ applications: - '0' - '1' - '2' - channel: 8.0.19/edge + channel: latest/edge ceph-osd: charm: ch:ceph-osd @@ -101,7 +101,7 @@ applications: source: *openstack-origin to: - '9' - channel: 3.9/edge + channel: latest/edge keystone: expose: True @@ -111,7 +111,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: yoga/edge + channel: 2023.1/edge nova-compute: charm: ch:nova-compute @@ -121,7 +121,7 @@ applications: libvirt-image-backend: rbd to: - '11' - channel: yoga/edge + channel: 2023.1/edge glance: expose: True @@ -131,7 +131,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: yoga/edge + channel: 2023.1/edge cinder: expose: True @@ -143,11 +143,11 @@ applications: openstack-origin: *openstack-origin to: - '13' - channel: yoga/edge + channel: 2023.1/edge cinder-ceph: charm: ch:cinder-ceph - channel: yoga/edge + channel: 2023.1/edge nova-cloud-controller: expose: True @@ -157,7 +157,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: yoga/edge + channel: 2023.1/edge placement: charm: ch:placement @@ -166,7 +166,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: yoga/edge + channel: 2023.1/edge prometheus2: charm: ch:prometheus2 From 70cb655aaaa865cb02fc1e9d602011bb45d89581 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 11 May 2023 15:35:27 +0200 Subject: [PATCH 2495/2699] Testing: use mysql and rabbitmq from LTS For better stability use LTS series for rabbitmq and mysql when testing instead of interim releases. Also remove xena (non-lts) from tests and yoga as a source default Change-Id: Ie443c55dc4cc1b7f63eacfee79b28f210f1277e4 --- ceph-mon/config.yaml | 2 +- ceph-mon/osci.yaml | 1 - ceph-mon/tests/bundles/jammy-antelope.yaml | 30 +++++------ ceph-mon/tests/bundles/jammy-zed.yaml | 16 +++--- ceph-mon/tests/bundles/lunar-antelope.yaml | 58 +++++++++++----------- 5 files changed, 50 insertions(+), 57 deletions(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 83248ffc..fb69ac2e 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -10,7 +10,7 @@ options: If set to True, supporting services will log to syslog. source: type: string - default: yoga + default: distro description: | Optional configuration to support use of additional sources such as: . diff --git a/ceph-mon/osci.yaml b/ceph-mon/osci.yaml index 4349fa91..f97c553f 100644 --- a/ceph-mon/osci.yaml +++ b/ceph-mon/osci.yaml @@ -2,7 +2,6 @@ templates: - charm-unit-jobs-py38 - charm-unit-jobs-py310 - - charm-xena-functional-jobs - charm-yoga-functional-jobs - charm-zed-functional-jobs - charm-functional-jobs diff --git a/ceph-mon/tests/bundles/jammy-antelope.yaml b/ceph-mon/tests/bundles/jammy-antelope.yaml index 026a81db..1be00105 100644 --- a/ceph-mon/tests/bundles/jammy-antelope.yaml +++ b/ceph-mon/tests/bundles/jammy-antelope.yaml @@ -35,30 +35,35 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge cinder-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge placement-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' - '2' - channel: 8.0.19/edge + channel: 8.0/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + to: + - '9' + channel: 3.9/edge ceph-osd: charm: ch:ceph-osd @@ -95,15 +100,6 @@ applications: to: - '17' - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - channel: 3.9/edge - keystone: expose: True charm: ch:keystone diff --git a/ceph-mon/tests/bundles/jammy-zed.yaml b/ceph-mon/tests/bundles/jammy-zed.yaml index 071ac6e4..a5d6e807 100644 --- a/ceph-mon/tests/bundles/jammy-zed.yaml +++ b/ceph-mon/tests/bundles/jammy-zed.yaml @@ -35,30 +35,28 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge cinder-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge placement-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' - '2' - channel: 8.0.19/edge + channel: 8.0/edge ceph-osd: charm: ch:ceph-osd @@ -98,8 +96,6 @@ applications: rabbitmq-server: charm: ch:rabbitmq-server num_units: 1 - options: - source: *openstack-origin to: - '9' channel: 3.9/edge diff --git a/ceph-mon/tests/bundles/lunar-antelope.yaml b/ceph-mon/tests/bundles/lunar-antelope.yaml index 23f7a744..134abb9e 100644 --- a/ceph-mon/tests/bundles/lunar-antelope.yaml +++ b/ceph-mon/tests/bundles/lunar-antelope.yaml @@ -1,5 +1,7 @@ variables: openstack-origin: &openstack-origin distro + # use infra (mysql, rabbit) from lts for stability + infra-series: &infra-series jammy series: lunar @@ -8,11 +10,15 @@ comment: machines: '0': constraints: mem=3072M + series: *infra-series '1': constraints: mem=3072M + series: *infra-series '2': constraints: mem=3072M + series: *infra-series '3': + series: *infra-series '4': '5': '6': @@ -26,38 +32,43 @@ machines: '14': '15': '16': - series: focal '17': + series: focal applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge placement-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' - '2' - channel: latest/edge + channel: 8.0/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + to: + - '3' + channel: 3.9/edge ceph-osd: charm: ch:ceph-osd @@ -68,9 +79,9 @@ applications: source: *openstack-origin osd-devices: '/dev/test-non-existent' to: - - '3' - '4' - '5' + - '6' channel: quincy/edge ceph-mon: @@ -81,9 +92,9 @@ applications: source: *openstack-origin monitor-count: '3' to: - - '6' - '7' - '8' + - '9' ceph-fs: charm: ch:ceph-fs @@ -92,16 +103,7 @@ applications: source: *openstack-origin channel: quincy/edge to: - - '17' - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - channel: latest/edge + - '10' keystone: expose: True @@ -110,7 +112,7 @@ applications: options: openstack-origin: *openstack-origin to: - - '10' + - '11' channel: 2023.1/edge nova-compute: @@ -120,7 +122,7 @@ applications: openstack-origin: *openstack-origin libvirt-image-backend: rbd to: - - '11' + - '12' channel: 2023.1/edge glance: @@ -130,7 +132,7 @@ applications: options: openstack-origin: *openstack-origin to: - - '12' + - '13' channel: 2023.1/edge cinder: @@ -142,7 +144,7 @@ applications: glance-api-version: '2' openstack-origin: *openstack-origin to: - - '13' + - '14' channel: 2023.1/edge cinder-ceph: @@ -156,7 +158,7 @@ applications: options: openstack-origin: *openstack-origin to: - - '14' + - '15' channel: 2023.1/edge placement: @@ -165,7 +167,7 @@ applications: options: openstack-origin: *openstack-origin to: - - '15' + - '16' channel: 2023.1/edge prometheus2: @@ -173,7 +175,7 @@ applications: num_units: 1 series: focal to: - - '16' + - '17' relations: - - 'nova-compute:amqp' From c05f78bc88065234bfb9e06de21bf8324fc32213 Mon Sep 17 00:00:00 2001 From: jneo8 Date: Thu, 27 Apr 2023 14:31:39 +0800 Subject: [PATCH 2496/2699] Fix persistent config file not update bug When ceph doing the version upgrade, it will check the previous ceph from the `source` config variable which store in persistent file. But the persistent file update is broken. It is because we use hookenv.Config from ops framework, but the hookenv._run_atexit, which save the change to file, is not been called. Partial-Bug: #2007976 Change-Id: Ibf12a2b87736cb1d32788672fb390e027f15b936 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1047 --- ceph-mon/src/charm.py | 9 +++++++++ ceph-mon/tests/tests.yaml | 1 + ceph-mon/unit_tests/test_charm.py | 5 +++++ 3 files changed, 15 insertions(+) diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index 66021235..940b8779 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -157,6 +157,13 @@ def on_nrpe_relation(self, event): def on_commit(self, _event): self.ceph_status.assess_status() + def on_pre_commit(self, _event): + # Fix bug: https://bugs.launchpad.net/charm-ceph-mon/+bug/2007976 + # The persistent config file doesn't update because the config save + # function handled by atexit is not triggered. + # Trigger it manually here. + hooks.hookenv._run_atexit() + # Actions. def _observe_action(self, on_action, callable): @@ -273,6 +280,8 @@ def __init__(self, *args): fw.observe(self.on.notify_clients, self.notify_clients) + fw.observe(self.on.framework.on.pre_commit, self.on_pre_commit) + def ready_for_service(self): return hooks.ready_for_service() diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 075a4548..197e9205 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -26,6 +26,7 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest # Charm upgrade, then re-run tests - zaza.charm_tests.lifecycle.tests.UpgradeCharmsToPath;ceph-mon + - zaza.openstack.charm_tests.ceph.tests.CephMonJujuPersistent - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - zaza.openstack.charm_tests.ceph.tests.CephTest diff --git a/ceph-mon/unit_tests/test_charm.py b/ceph-mon/unit_tests/test_charm.py index ee313778..edd009a4 100644 --- a/ceph-mon/unit_tests/test_charm.py +++ b/ceph-mon/unit_tests/test_charm.py @@ -63,3 +63,8 @@ def test_on_install( fatal=True, ) apt_update.assert_called() + + @patch("charm.hooks") + def test_on_pre_commit(self, hooks): + self.harness.charm.on.framework.on.pre_commit.emit() + hooks.hookenv._run_atexit.assert_called() From 29ad43ef8b9ad2d6fe9ed4f4d78b4e01035c8e7d Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Wed, 24 May 2023 12:54:45 +0200 Subject: [PATCH 2497/2699] Testing: remove unused grafana plugin Due to a Grafana bug[0] installing zip plugins has issues. We are also not testing plugins, therefore removing plugin installation from functest. [0] https://bugs.launchpad.net/charm-grafana/+bug/2017810 Change-Id: I4de5e911754cc5d99cdf94e394c81a70269c50e8 --- ceph-dashboard/tests/bundles/focal-yoga.yaml | 1 - ceph-dashboard/tests/bundles/focal.yaml | 1 - ceph-dashboard/tests/bundles/jammy-yoga.yaml | 1 - ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 | 1 - 4 files changed, 4 deletions(-) diff --git a/ceph-dashboard/tests/bundles/focal-yoga.yaml b/ceph-dashboard/tests/bundles/focal-yoga.yaml index 54037750..faf66c50 100644 --- a/ceph-dashboard/tests/bundles/focal-yoga.yaml +++ b/ceph-dashboard/tests/bundles/focal-yoga.yaml @@ -47,7 +47,6 @@ applications: num_units: 1 options: anonymous: True - install_plugins: https://storage.googleapis.com/plugins-community/vonage-status-panel/release/1.0.11/vonage-status-panel-1.0.11.zip,https://storage.googleapis.com/plugins-community/grafana-piechart-panel/release/1.6.2/grafana-piechart-panel-1.6.2.zip install_method: snap allow_embedding: True telegraf: diff --git a/ceph-dashboard/tests/bundles/focal.yaml b/ceph-dashboard/tests/bundles/focal.yaml index 34a95591..3097ee77 100644 --- a/ceph-dashboard/tests/bundles/focal.yaml +++ b/ceph-dashboard/tests/bundles/focal.yaml @@ -41,7 +41,6 @@ applications: num_units: 1 options: anonymous: True - install_plugins: https://storage.googleapis.com/plugins-community/vonage-status-panel/release/1.0.11/vonage-status-panel-1.0.11.zip,https://storage.googleapis.com/plugins-community/grafana-piechart-panel/release/1.6.2/grafana-piechart-panel-1.6.2.zip install_method: snap allow_embedding: True telegraf: diff --git a/ceph-dashboard/tests/bundles/jammy-yoga.yaml b/ceph-dashboard/tests/bundles/jammy-yoga.yaml index 0e652340..4bd82824 100644 --- a/ceph-dashboard/tests/bundles/jammy-yoga.yaml +++ b/ceph-dashboard/tests/bundles/jammy-yoga.yaml @@ -43,7 +43,6 @@ applications: series: focal options: anonymous: True - install_plugins: https://storage.googleapis.com/plugins-community/vonage-status-panel/release/1.0.11/vonage-status-panel-1.0.11.zip,https://storage.googleapis.com/plugins-community/grafana-piechart-panel/release/1.6.2/grafana-piechart-panel-1.6.2.zip install_method: snap allow_embedding: True #telegraf: diff --git a/ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 b/ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 index 3539227a..3248e700 100644 --- a/ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 +++ b/ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 @@ -2,4 +2,3 @@ applications: grafana: options: http_proxy: '{{ TEST_HTTP_PROXY }}' - install_plugins: {{ TEST_GRAFANA_PLUGIN_VONAGE_URL }},{{ TEST_GRAFANA_PLUGIN_PIECHART_URL }} From e4e780395b2c03b6bc9847146a00e373ae9e72fa Mon Sep 17 00:00:00 2001 From: Samuel Walladge Date: Wed, 31 May 2023 12:23:11 +0930 Subject: [PATCH 2498/2699] Enable rgw trust forwarded https when https proxy This option is required for server-side encryption to be allowed if radosgw is behind a reverse proxy, such as here when certificates are configured and apache2 is running. ref. https://docs.ceph.com/en/latest/radosgw/encryption/ It is safe to always enable when https is configured in the charm, because it will be securely behind the reverse proxy in the unit. This option must not be enabled when https is not configured in the charm, because this would allow clients to spoof headers. Closes-Bug: #2021560 Change-Id: I940f9b2f424a3d98936b5f185bf8f87b71091317 --- ceph-radosgw/hooks/ceph_radosgw_context.py | 1 + ceph-radosgw/templates/ceph.conf | 3 + .../unit_tests/test_ceph_radosgw_context.py | 112 ++++++++++++++++-- 3 files changed, 105 insertions(+), 11 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index c951fb61..e57ceef2 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -295,6 +295,7 @@ def __call__(self): 'rgw_swift_versioning': config('rgw-swift-versioning-enabled'), 'relaxed_s3_bucket_names': config('relaxed-s3-bucket-names'), 'frontend': http_frontend, + 'behind_https_proxy': https(), } # NOTE(dosaboy): these sections must correspond to what is supported in diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index d728ac03..d126b20b 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -16,6 +16,9 @@ ms bind ipv6 = true {% endif %} rgw swift versioning enabled = {{ rgw_swift_versioning }} rgw relaxed s3 bucket names = {{ relaxed_s3_bucket_names }} +{% if behind_https_proxy -%} +rgw trust forwarded https = true +{% endif %} {% if global -%} # The following are user-provided options provided via the config-flags charm option. # User-provided [global] section config diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index cfd07e07..f3f9553a 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -74,6 +74,7 @@ def test_ctxt(self, _harelation_ids, _ctxtrelation_ids, _haconfig, class MonContextTest(CharmTestCase): + maxDiff = None def setUp(self): super(MonContextTest, self).setUp(context, TO_PATCH) @@ -95,10 +96,16 @@ def plain_list_stub(key): else: return [] + @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') + @patch('charmhelpers.contrib.hahelpers.cluster.config_get') @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') @patch.object(context, 'ensure_host_resolvable_v6') - def test_ctxt(self, mock_ensure_rsv_v6): + def test_ctxt( + self, mock_ensure_rsv_v6, mock_config_get, mock_relation_ids + ): + mock_relation_ids.return_value = [] + mock_config_get.side_effect = self.test_config.get self.socket.gethostname.return_value = 'testhost' mon_ctxt = context.MonContext() addresses = ['10.5.4.1', '10.5.4.2', '10.5.4.3'] @@ -136,7 +143,8 @@ def _relation_get(attr, unit, rid): 'frontend': 'beast', 'relaxed_s3_bucket_names': False, 'rgw_zonegroup': 'zonegroup1', - 'rgw_realm': 'realmX' + 'rgw_realm': 'realmX', + 'behind_https_proxy': False, } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -148,10 +156,72 @@ def _relation_get(attr, unit, rid): self.assertEqual(expect, mon_ctxt()) self.assertTrue(mock_ensure_rsv_v6.called) + @patch('ceph_radosgw_context.https') @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') @patch.object(context, 'ensure_host_resolvable_v6') - def test_list_of_addresses_from_ceph_proxy(self, mock_ensure_rsv_v6): + def test_ctxt_with_https_proxy(self, mock_ensure_rsv_v6, mock_https): + mock_https.return_value = True + self.socket.gethostname.return_value = 'testhost' + mon_ctxt = context.MonContext() + addresses = ['10.5.4.1', '10.5.4.2', '10.5.4.3'] + + def _relation_get(attr, unit, rid): + if attr == 'ceph-public-address': + return addresses.pop() + elif attr == 'auth': + return 'cephx' + elif attr == 'rgw.testhost_key': + return 'testkey' + elif attr == 'fsid': + return 'testfsid' + + self.relation_get.side_effect = _relation_get + self.relation_ids.return_value = ['mon:6'] + self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] + self.multisite.plain_list = self.plain_list_stub + self.determine_api_port.return_value = 70 + expect = { + 'auth_supported': 'cephx', + 'hostname': 'testhost', + 'mon_hosts': '10.5.4.1 10.5.4.2 10.5.4.3', + 'old_auth': False, + 'systemd_rgw': True, + 'unit_public_ip': '10.255.255.255', + 'use_syslog': 'false', + 'loglevel': 1, + 'port': 70, + 'client_radosgw_gateway': {'rgw init timeout': 60}, + 'ipv6': False, + 'rgw_zone': 'default', + 'fsid': 'testfsid', + 'rgw_swift_versioning': False, + 'frontend': 'beast', + 'relaxed_s3_bucket_names': False, + 'rgw_zonegroup': 'zonegroup1', + 'rgw_realm': 'realmX', + 'behind_https_proxy': True, + } + self.assertEqual(expect, mon_ctxt()) + self.assertFalse(mock_ensure_rsv_v6.called) + + self.test_config.set('prefer-ipv6', True) + addresses = ['10.5.4.1', '10.5.4.2', '10.5.4.3'] + expect['ipv6'] = True + expect['port'] = "[::]:%s" % (70) + self.assertEqual(expect, mon_ctxt()) + self.assertTrue(mock_ensure_rsv_v6.called) + + @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') + @patch('charmhelpers.contrib.hahelpers.cluster.config_get') + @patch.object(ceph, 'config', lambda *args: + '{"client.radosgw.gateway": {"rgw init timeout": 60}}') + @patch.object(context, 'ensure_host_resolvable_v6') + def test_list_of_addresses_from_ceph_proxy( + self, mock_ensure_rsv_v6, mock_config_get, mock_relation_ids + ): + mock_relation_ids.return_value = [] + mock_config_get.side_effect = self.test_config.get self.socket.gethostname.return_value = 'testhost' mon_ctxt = context.MonContext() addresses = ['10.5.4.1 10.5.4.2 10.5.4.3'] @@ -190,7 +260,8 @@ def _relation_get(attr, unit, rid): 'frontend': 'beast', 'relaxed_s3_bucket_names': False, 'rgw_zonegroup': 'zonegroup1', - 'rgw_realm': 'realmX' + 'rgw_realm': 'realmX', + 'behind_https_proxy': False, } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -202,9 +273,13 @@ def _relation_get(attr, unit, rid): self.assertEqual(expect, mon_ctxt()) self.assertTrue(mock_ensure_rsv_v6.called) + @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') + @patch('charmhelpers.contrib.hahelpers.cluster.config_get') @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') - def test_ctxt_missing_data(self): + def test_ctxt_missing_data(self, mock_config_get, mock_relation_ids): + mock_relation_ids.return_value = [] + mock_config_get.side_effect = self.test_config.get self.socket.gethostname.return_value = 'testhost' mon_ctxt = context.MonContext() self.relation_get.return_value = None @@ -212,9 +287,13 @@ def test_ctxt_missing_data(self): self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] self.assertEqual({}, mon_ctxt()) + @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') + @patch('charmhelpers.contrib.hahelpers.cluster.config_get') @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') - def test_ctxt_inconsistent_auths(self): + def test_ctxt_inconsistent_auths(self, mock_config_get, mock_relation_ids): + mock_relation_ids.return_value = [] + mock_config_get.side_effect = self.test_config.get self.socket.gethostname.return_value = 'testhost' mon_ctxt = context.MonContext() addresses = ['10.5.4.1', '10.5.4.2', '10.5.4.3'] @@ -253,13 +332,18 @@ def _relation_get(attr, unit, rid): 'frontend': 'beast', 'relaxed_s3_bucket_names': False, 'rgw_zonegroup': 'zonegroup1', - 'rgw_realm': 'realmX' + 'rgw_realm': 'realmX', + 'behind_https_proxy': False, } self.assertEqual(expect, mon_ctxt()) + @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') + @patch('charmhelpers.contrib.hahelpers.cluster.config_get') @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') - def test_ctxt_consistent_auths(self): + def test_ctxt_consistent_auths(self, mock_config_get, mock_relation_ids): + mock_relation_ids.return_value = [] + mock_config_get.side_effect = self.test_config.get self.socket.gethostname.return_value = 'testhost' mon_ctxt = context.MonContext() addresses = ['10.5.4.1', '10.5.4.2', '10.5.4.3'] @@ -298,7 +382,8 @@ def _relation_get(attr, unit, rid): 'frontend': 'beast', 'relaxed_s3_bucket_names': False, 'rgw_zonegroup': 'zonegroup1', - 'rgw_realm': 'realmX' + 'rgw_realm': 'realmX', + 'behind_https_proxy': False, } self.assertEqual(expect, mon_ctxt()) @@ -360,9 +445,13 @@ def _compare_version(package, version): _test_version = '16.2.0' context.validate_http_frontend('beast') + @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') + @patch('charmhelpers.contrib.hahelpers.cluster.config_get') @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') - def test_ctxt_inconsistent_fsids(self): + def test_ctxt_inconsistent_fsids(self, mock_config_get, mock_relation_ids): + mock_relation_ids.return_value = [] + mock_config_get.side_effect = self.test_config.get self.socket.gethostname.return_value = 'testhost' mon_ctxt = context.MonContext() addresses = ['10.5.4.1', '10.5.4.2', '10.5.4.3'] @@ -401,7 +490,8 @@ def _relation_get(attr, unit, rid): 'frontend': 'beast', 'relaxed_s3_bucket_names': False, 'rgw_zonegroup': 'zonegroup1', - 'rgw_realm': 'realmX' + 'rgw_realm': 'realmX', + 'behind_https_proxy': False, } self.assertEqual(expect, mon_ctxt()) From 7359be9fe39dc9f4dcf2195c899e5be2a404993a Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 16 Mar 2023 14:04:08 -0400 Subject: [PATCH 2499/2699] Add support for interim Ubuntu releases - update bundles to include UCA pocket tests - update test configuration - update metadata to include kinetic and lunar - update snapcraft to allow run-on for kinetic and lunar Change-Id: I72ab8b2ec96cf78df8a84e0686a33229a6270fb3 --- ceph-dashboard/charmcraft.yaml | 8 +- ceph-dashboard/metadata.yaml | 2 + ceph-dashboard/osci.yaml | 2 + .../tests/bundles/jammy-antelope.yaml | 118 ++++++++++++++++++ ceph-dashboard/tests/bundles/jammy-zed.yaml | 118 ++++++++++++++++++ ceph-dashboard/tests/bundles/kinetic-zed.yaml | 104 +++++++++++++++ .../tests/bundles/lunar-antelope.yaml | 104 +++++++++++++++ .../tests/bundles/overlays/focal.yaml.j2 | 4 - 8 files changed, 455 insertions(+), 5 deletions(-) create mode 100644 ceph-dashboard/tests/bundles/jammy-antelope.yaml create mode 100644 ceph-dashboard/tests/bundles/jammy-zed.yaml create mode 100644 ceph-dashboard/tests/bundles/kinetic-zed.yaml create mode 100644 ceph-dashboard/tests/bundles/lunar-antelope.yaml delete mode 100644 ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 diff --git a/ceph-dashboard/charmcraft.yaml b/ceph-dashboard/charmcraft.yaml index a77199e1..75c5e371 100644 --- a/ceph-dashboard/charmcraft.yaml +++ b/ceph-dashboard/charmcraft.yaml @@ -31,4 +31,10 @@ bases: architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu channel: "22.04" - architectures: [amd64, s390x, ppc64el, arm64] \ No newline at end of file + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.10" + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "23.04" + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-dashboard/metadata.yaml b/ceph-dashboard/metadata.yaml index 8e6db9f8..25311625 100644 --- a/ceph-dashboard/metadata.yaml +++ b/ceph-dashboard/metadata.yaml @@ -16,6 +16,8 @@ subordinate: true series: - focal - jammy +- kinetic +- lunar requires: dashboard: interface: ceph-dashboard diff --git a/ceph-dashboard/osci.yaml b/ceph-dashboard/osci.yaml index ffe2619e..9fdcb42b 100644 --- a/ceph-dashboard/osci.yaml +++ b/ceph-dashboard/osci.yaml @@ -3,6 +3,8 @@ - charm-unit-jobs-py38 - charm-unit-jobs-py310 - charm-yoga-functional-jobs + - charm-zed-functional-jobs + - charm-functional-jobs vars: needs_charm_build: true charm_build_name: ceph-dashboard diff --git a/ceph-dashboard/tests/bundles/jammy-antelope.yaml b/ceph-dashboard/tests/bundles/jammy-antelope.yaml new file mode 100644 index 00000000..10d53b62 --- /dev/null +++ b/ceph-dashboard/tests/bundles/jammy-antelope.yaml @@ -0,0 +1,118 @@ +local_overlay_enabled: False +series: jammy +variables: + openstack-origin: &openstack-origin cloud:jammy-antelope +applications: + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + channel: quincy/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + channel: quincy/edge + vault: + num_units: 1 + charm: ch:vault + channel: latest/edge + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + constraints: mem=3072M + num_units: 3 + channel: latest/edge + vault-mysql-router: + charm: ch:mysql-router + channel: latest/edge + ceph-dashboard: + charm: ../../ceph-dashboard.charm + options: + public-hostname: 'ceph-dashboard.zaza.local' + prometheus: + charm: ch:prometheus2 + num_units: 1 + grafana: + # SSL and allow_embedding are not released into cs:grafana yet, due + # October 2021 + charm: ch:grafana + num_units: 1 + options: + anonymous: True + install_method: snap + allow_embedding: True + telegraf: + charm: telegraf + channel: stable + options: + hostname: "{host}" + prometheus-alertmanager: + charm: ch:prometheus-alertmanager + num_units: 1 + ceph-radosgw: + charm: ch:ceph-radosgw + num_units: 3 + channel: quincy/edge + ceph-fs: + charm: ch:ceph-fs + num_units: 1 + channel: quincy/edge + ceph-iscsi: + charm: ch:ceph-iscsi + num_units: 2 + options: + gateway-metadata-pool: iscsi-foo-metadata + channel: quincy/edge +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + - - 'ceph-dashboard:dashboard' + - 'ceph-mon:dashboard' + - - 'ceph-dashboard:certificates' + - 'vault:certificates' + - - 'ceph-mon:prometheus' + - 'prometheus:target' + - - 'grafana:grafana-source' + - 'prometheus:grafana-source' + - - 'grafana:certificates' + - 'vault:certificates' + - - 'ceph-osd:juju-info' + - 'telegraf:juju-info' + - - 'ceph-mon:juju-info' + - 'telegraf:juju-info' + - - 'telegraf:prometheus-client' + - 'prometheus:target' + - - 'telegraf:dashboards' + - 'grafana:dashboards' + - - 'ceph-dashboard:grafana-dashboard' + - 'grafana:dashboards' + - - 'ceph-dashboard:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-dashboard:prometheus' + - 'prometheus:website' + - - 'prometheus:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + - - 'ceph-radosgw:certificates' + - 'vault:certificates' + - - 'ceph-dashboard:radosgw-dashboard' + - 'ceph-radosgw:radosgw-user' + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-dashboard:iscsi-dashboard' + - 'ceph-iscsi:admin-access' diff --git a/ceph-dashboard/tests/bundles/jammy-zed.yaml b/ceph-dashboard/tests/bundles/jammy-zed.yaml new file mode 100644 index 00000000..694d84d1 --- /dev/null +++ b/ceph-dashboard/tests/bundles/jammy-zed.yaml @@ -0,0 +1,118 @@ +local_overlay_enabled: False +series: jammy +variables: + openstack-origin: &openstack-origin cloud:jammy-zed +applications: + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + channel: quincy/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + channel: quincy/edge + vault: + num_units: 1 + charm: ch:vault + channel: latest/edge + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + constraints: mem=3072M + num_units: 3 + channel: latest/edge + vault-mysql-router: + charm: ch:mysql-router + channel: latest/edge + ceph-dashboard: + charm: ../../ceph-dashboard.charm + options: + public-hostname: 'ceph-dashboard.zaza.local' + prometheus: + charm: ch:prometheus2 + num_units: 1 + grafana: + # SSL and allow_embedding are not released into cs:grafana yet, due + # October 2021 + charm: ch:grafana + num_units: 1 + options: + anonymous: True + install_method: snap + allow_embedding: True + telegraf: + charm: telegraf + channel: stable + options: + hostname: "{host}" + prometheus-alertmanager: + charm: ch:prometheus-alertmanager + num_units: 1 + ceph-radosgw: + charm: ch:ceph-radosgw + num_units: 3 + channel: quincy/edge + ceph-fs: + charm: ch:ceph-fs + num_units: 1 + channel: quincy/edge + ceph-iscsi: + charm: ch:ceph-iscsi + num_units: 2 + options: + gateway-metadata-pool: iscsi-foo-metadata + channel: quincy/edge +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + - - 'ceph-dashboard:dashboard' + - 'ceph-mon:dashboard' + - - 'ceph-dashboard:certificates' + - 'vault:certificates' + - - 'ceph-mon:prometheus' + - 'prometheus:target' + - - 'grafana:grafana-source' + - 'prometheus:grafana-source' + - - 'grafana:certificates' + - 'vault:certificates' + - - 'ceph-osd:juju-info' + - 'telegraf:juju-info' + - - 'ceph-mon:juju-info' + - 'telegraf:juju-info' + - - 'telegraf:prometheus-client' + - 'prometheus:target' + - - 'telegraf:dashboards' + - 'grafana:dashboards' + - - 'ceph-dashboard:grafana-dashboard' + - 'grafana:dashboards' + - - 'ceph-dashboard:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-dashboard:prometheus' + - 'prometheus:website' + - - 'prometheus:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + - - 'ceph-radosgw:certificates' + - 'vault:certificates' + - - 'ceph-dashboard:radosgw-dashboard' + - 'ceph-radosgw:radosgw-user' + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-dashboard:iscsi-dashboard' + - 'ceph-iscsi:admin-access' diff --git a/ceph-dashboard/tests/bundles/kinetic-zed.yaml b/ceph-dashboard/tests/bundles/kinetic-zed.yaml new file mode 100644 index 00000000..93095327 --- /dev/null +++ b/ceph-dashboard/tests/bundles/kinetic-zed.yaml @@ -0,0 +1,104 @@ +local_overlay_enabled: False +series: kinetic +applications: + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + channel: latest/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + channel: latest/edge + vault: + num_units: 1 + charm: ch:vault + channel: latest/edge + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + constraints: mem=3072M + num_units: 3 + channel: latest/edge + vault-mysql-router: + charm: ch:mysql-router + channel: latest/edge + ceph-dashboard: + charm: ../../ceph-dashboard.charm + options: + public-hostname: 'ceph-dashboard.zaza.local' + prometheus: + charm: ch:prometheus2 + num_units: 1 + series: focal + grafana: + # SSL and allow_embedding are not released into cs:grafana yet, due + # Octrober 2021 + charm: ch:grafana + num_units: 1 + series: focal + options: + anonymous: True + install_method: snap + allow_embedding: True + prometheus-alertmanager: + charm: ch:prometheus-alertmanager + num_units: 1 + series: focal + ceph-radosgw: + charm: ch:ceph-radosgw + num_units: 3 + channel: latest/edge + ceph-fs: + charm: ch:ceph-fs + num_units: 1 + channel: latest/edge + ceph-iscsi: + charm: ch:ceph-iscsi + num_units: 2 + options: + gateway-metadata-pool: iscsi-foo-metadata + channel: latest/edge +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + - - 'ceph-dashboard:dashboard' + - 'ceph-mon:dashboard' + - - 'ceph-dashboard:certificates' + - 'vault:certificates' + - - 'ceph-mon:prometheus' + - 'prometheus:target' + - - 'grafana:grafana-source' + - 'prometheus:grafana-source' + - - 'grafana:certificates' + - 'vault:certificates' + - - 'ceph-dashboard:grafana-dashboard' + - 'grafana:dashboards' + - - 'ceph-dashboard:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-dashboard:prometheus' + - 'prometheus:website' + - - 'prometheus:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + - - 'ceph-radosgw:certificates' + - 'vault:certificates' + - - 'ceph-dashboard:radosgw-dashboard' + - 'ceph-radosgw:radosgw-user' + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-dashboard:iscsi-dashboard' + - 'ceph-iscsi:admin-access' diff --git a/ceph-dashboard/tests/bundles/lunar-antelope.yaml b/ceph-dashboard/tests/bundles/lunar-antelope.yaml new file mode 100644 index 00000000..9ce0237b --- /dev/null +++ b/ceph-dashboard/tests/bundles/lunar-antelope.yaml @@ -0,0 +1,104 @@ +local_overlay_enabled: False +series: lunar +applications: + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + channel: latest/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + channel: latest/edge + vault: + num_units: 1 + charm: ch:vault + channel: latest/edge + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + constraints: mem=3072M + num_units: 3 + channel: latest/edge + vault-mysql-router: + charm: ch:mysql-router + channel: latest/edge + ceph-dashboard: + charm: ../../ceph-dashboard.charm + options: + public-hostname: 'ceph-dashboard.zaza.local' + prometheus: + charm: ch:prometheus2 + num_units: 1 + series: focal + grafana: + # SSL and allow_embedding are not released into cs:grafana yet, due + # Octrober 2021 + charm: ch:grafana + num_units: 1 + series: focal + options: + anonymous: True + install_method: snap + allow_embedding: True + prometheus-alertmanager: + charm: ch:prometheus-alertmanager + num_units: 1 + series: focal + ceph-radosgw: + charm: ch:ceph-radosgw + num_units: 3 + channel: latest/edge + ceph-fs: + charm: ch:ceph-fs + num_units: 1 + channel: latest/edge + ceph-iscsi: + charm: ch:ceph-iscsi + num_units: 2 + options: + gateway-metadata-pool: iscsi-foo-metadata + channel: latest/edge +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + - - 'ceph-dashboard:dashboard' + - 'ceph-mon:dashboard' + - - 'ceph-dashboard:certificates' + - 'vault:certificates' + - - 'ceph-mon:prometheus' + - 'prometheus:target' + - - 'grafana:grafana-source' + - 'prometheus:grafana-source' + - - 'grafana:certificates' + - 'vault:certificates' + - - 'ceph-dashboard:grafana-dashboard' + - 'grafana:dashboards' + - - 'ceph-dashboard:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-dashboard:prometheus' + - 'prometheus:website' + - - 'prometheus:alertmanager-service' + - 'prometheus-alertmanager:alertmanager-service' + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + - - 'ceph-radosgw:certificates' + - 'vault:certificates' + - - 'ceph-dashboard:radosgw-dashboard' + - 'ceph-radosgw:radosgw-user' + - - 'ceph-mon:mds' + - 'ceph-fs:ceph-mds' + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-dashboard:iscsi-dashboard' + - 'ceph-iscsi:admin-access' diff --git a/ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 b/ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 deleted file mode 100644 index 3248e700..00000000 --- a/ceph-dashboard/tests/bundles/overlays/focal.yaml.j2 +++ /dev/null @@ -1,4 +0,0 @@ -applications: - grafana: - options: - http_proxy: '{{ TEST_HTTP_PROXY }}' From 72b9eb1205b39176ea8e7bbcb1dedcec1ed845f4 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Wed, 31 May 2023 13:07:11 +0200 Subject: [PATCH 2500/2699] rbd mirror relation: be persistent in getting pool info Auth for getting pool details can fail initially if we set up a rbd mirror relation at cloud bootstrap. Add some retry to give it another chance Change-Id: I2f5ac561120b1abe52ea0621bb472bc78495fa97 Partial-Bug: #2021967 --- ceph-mon/src/ceph_hooks.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 5f9b2a11..85922148 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -22,6 +22,8 @@ import uuid import pathlib +import tenacity + sys.path.append('lib') import charms_ceph.utils as ceph from charms_ceph.broker import ( @@ -1006,12 +1008,19 @@ def rbd_mirror_relation(relid=None, unit=None, recurse=True): unit = remote_unit() if is_unsupported_cmr(unit): return + + # Add some tenacity in getting pool details + @tenacity.retry(wait=tenacity.wait_exponential(max=20), + reraise=True) + def get_pool_details(): + return ceph.list_pools_detail() + # handle broker requests first to get a updated pool map data = (handle_broker_request(relid, unit, recurse=recurse)) data.update({ 'auth': 'cephx', 'ceph-public-address': get_public_addr(), - 'pools': json.dumps(ceph.list_pools_detail(), sort_keys=True), + 'pools': json.dumps(get_pool_details(), sort_keys=True), 'broker_requests': json.dumps( [rq.request for rq in retrieve_client_broker_requests()], sort_keys=True), From 51113d2c405b9da4ee1ef0272a750b370d35bb1d Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 14 Apr 2022 13:48:32 +0200 Subject: [PATCH 2501/2699] Bypass charm going into blocked due to a bug After Octopus, the reporting about image states fails because of a permission issue in Ceph. This change disables that status reporting to allow a deployment to be healthy even when some tools cannot query status. Also modernize build and func testing: remove python 3.9 and xena from tests Related-Bug: #1879749 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1063 Change-Id: Id86fc043495b89609cf6873ec58aee1e2e388578 --- ceph-rbd-mirror/bindep.txt | 3 + ceph-rbd-mirror/charmcraft.yaml | 31 +-- ceph-rbd-mirror/osci.yaml | 9 +- ceph-rbd-mirror/requirements.txt | 19 +- .../lib/charm/openstack/ceph_rbd_mirror.py | 8 +- ceph-rbd-mirror/src/metadata.yaml | 2 +- .../src/tests/bundles/focal-xena.yaml | 187 ------------------ .../src/tests/bundles/focal-yoga.yaml | 4 +- .../src/tests/bundles/impish-xena.yaml | 186 ----------------- .../src/tests/bundles/jammy-yoga.yaml | 4 +- ceph-rbd-mirror/src/tox.ini | 26 +-- ceph-rbd-mirror/tox.ini | 66 ++----- ...est_lib_charm_openstack_ceph_rbd_mirror.py | 5 +- 13 files changed, 76 insertions(+), 474 deletions(-) create mode 100644 ceph-rbd-mirror/bindep.txt delete mode 100644 ceph-rbd-mirror/src/tests/bundles/focal-xena.yaml delete mode 100644 ceph-rbd-mirror/src/tests/bundles/impish-xena.yaml diff --git a/ceph-rbd-mirror/bindep.txt b/ceph-rbd-mirror/bindep.txt new file mode 100644 index 00000000..bdbe8d56 --- /dev/null +++ b/ceph-rbd-mirror/bindep.txt @@ -0,0 +1,3 @@ +libffi-dev [platform:dpkg] +libxml2-dev [platform:dpkg] +libxslt1-dev [platform:dpkg] diff --git a/ceph-rbd-mirror/charmcraft.yaml b/ceph-rbd-mirror/charmcraft.yaml index 49682169..0b706795 100644 --- a/ceph-rbd-mirror/charmcraft.yaml +++ b/ceph-rbd-mirror/charmcraft.yaml @@ -2,24 +2,25 @@ type: charm parts: charm: + source: src/ + plugin: reactive + build-snaps: + - charm build-packages: - tox - git - python3-dev - override-build: | - apt-get install ca-certificates -y - tox -e build-reactive - override-stage: | - echo "Copying charm to staging area: $CHARMCRAFT_STAGE" - NAME=$(ls $CHARMCRAFT_PART_BUILD/build/builds) - cp -r $CHARMCRAFT_PART_BUILD/build/builds/$NAME/* $CHARMCRAFT_STAGE/ - override-prime: | - # For some reason, the normal priming chokes on the fact that there's a - # hooks directory. - cp -r $CHARMCRAFT_STAGE/* . + build-environment: + - CHARM_INTERFACES_DIR: /root/project/interfaces/ + - CHARM_LAYERS_DIR: /root/project/layers/ bases: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 + - build-on: + - name: ubuntu + channel: "22.04" + architectures: + - amd64 + run-on: + - name: ubuntu + channel: "22.04" + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-rbd-mirror/osci.yaml b/ceph-rbd-mirror/osci.yaml index 4f039701..d196797b 100644 --- a/ceph-rbd-mirror/osci.yaml +++ b/ceph-rbd-mirror/osci.yaml @@ -1,17 +1,14 @@ - project: templates: - charm-unit-jobs-py38 - - charm-unit-jobs-py39 + - charm-unit-jobs-py310 check: jobs: - - focal-xena - - focal-yoga: - voting: false - - impish-xena: - voting: false + - focal-yoga - jammy-yoga: voting: false vars: needs_charm_build: true charm_build_name: ceph-rbd-mirror build_type: charmcraft + charmcraft_channel: 2.1/stable diff --git a/ceph-rbd-mirror/requirements.txt b/ceph-rbd-mirror/requirements.txt index a68620f6..b3dc23f7 100644 --- a/ceph-rbd-mirror/requirements.txt +++ b/ceph-rbd-mirror/requirements.txt @@ -8,16 +8,13 @@ # requirements.txt setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 -# Build requirements -cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. -charm-tools==2.8.3 +# NOTE: newer versions of cryptography require a Rust compiler to build, +# see +# * https://github.com/openstack-charmers/zaza/issues/421 +# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html +# +cryptography<3.4 -simplejson +git+https://github.com/juju/charm-tools.git -# Newer versions use keywords that didn't exist in python 3.5 yet (e.g. -# "ModuleNotFoundError") -# NOTE(lourot): This might look like a duplication of test-requirements.txt but -# some tox targets use only test-requirements.txt whereas charm-build uses only -# requirements.txt -importlib-metadata<3.0.0; python_version < '3.6' -importlib-resources<3.0.0; python_version < '3.6' +simplejson diff --git a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py index c20ea48d..3446c619 100644 --- a/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/src/lib/charm/openstack/ceph_rbd_mirror.py @@ -97,8 +97,12 @@ def custom_assess_status_check(self): if not pool_msg: pool_msg = 'Pools ' pool_msg += '{} ({}) '.format(health, count) - if health != 'OK': - status = 'blocked' + + # Disabling blocked state until + # https://bugs.launchpad.net/charm-ceph-rbd-mirror/+bug/1879749 + # is resolved + # if health != 'OK': + # status = 'blocked' for state, count in stats['image_states'].items(): if not image_msg: image_msg = 'Images ' diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index a07cf96b..51dfde77 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -16,7 +16,7 @@ tags: - misc series: - focal -- impish +- jammy extra-bindings: public: cluster: diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-xena.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-xena.yaml deleted file mode 100644 index 3460aaeb..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/focal-xena.yaml +++ /dev/null @@ -1,187 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-xena - -local_overlay_enabled: False - -series: &series focal - -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: latest/edge - - keystone: - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: yoga/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - channel: latest/edge - - cinder: - charm: ch:cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: *openstack-origin - channel: yoga/edge - - cinder-ceph: - charm: ch:cinder-ceph - num_units: 0 - channel: yoga/edge - - glance: - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: yoga/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: yoga/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - channel: quincy/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: '/dev/test-non-existent' - channel: quincy/edge - - ceph-rbd-mirror: - series: *series - charm: ../../../ceph-rbd-mirror.charm - num_units: 1 - options: - source: *openstack-origin - - ceph-mon-b: - charm: ch:ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - channel: quincy/edge - - ceph-osd-b: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: '/dev/test-non-existent' - channel: quincy/edge - - ceph-rbd-mirror-b: - series: *series - charm: ../../../ceph-rbd-mirror.charm - num_units: 1 - options: - source: *openstack-origin - -relations: - -- - keystone:shared-db - - keystone-mysql-router:shared-db -- - keystone-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - glance:shared-db - - glance-mysql-router:shared-db -- - glance-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - cinder:shared-db - - cinder-mysql-router:shared-db -- - cinder-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - rabbitmq-server - - cinder - -- - keystone - - cinder -- - keystone - - glance - -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client - -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp - -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon - -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote - -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/focal-yoga.yaml b/ceph-rbd-mirror/src/tests/bundles/focal-yoga.yaml index 2564d9c7..9b8f51b6 100644 --- a/ceph-rbd-mirror/src/tests/bundles/focal-yoga.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/focal-yoga.yaml @@ -152,9 +152,9 @@ relations: - - rabbitmq-server - cinder -- - keystone +- - 'keystone:identity-service' - cinder -- - keystone +- - 'keystone:identity-service' - glance - - cinder diff --git a/ceph-rbd-mirror/src/tests/bundles/impish-xena.yaml b/ceph-rbd-mirror/src/tests/bundles/impish-xena.yaml deleted file mode 100644 index ddf2861f..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/impish-xena.yaml +++ /dev/null @@ -1,186 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -local_overlay_enabled: False - -series: &series impish - -machines: - '0': - constraints: "mem=3072M" - '1': - constraints: "mem=3072M" - '2': - constraints: "mem=3072M" - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: latest/edge - - keystone: - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: yoga/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - channel: latest/edge - - cinder: - charm: ch:cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - channel: yoga/edge - - cinder-ceph: - charm: ch:cinder-ceph - num_units: 0 - channel: yoga/edge - - glance: - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: yoga/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: yoga/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - channel: quincy/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: '/dev/test-non-existent' - channel: quincy/edge - - ceph-rbd-mirror: - series: *series - charm: ../../../ceph-rbd-mirror.charm - num_units: 1 - options: - source: *openstack-origin - - ceph-mon-b: - charm: ch:ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - channel: quincy/edge - - ceph-osd-b: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: '/dev/test-non-existent' - channel: quincy/edge - - ceph-rbd-mirror-b: - series: *series - charm: ../../../ceph-rbd-mirror.charm - num_units: 1 - options: - source: *openstack-origin - -relations: - -- - keystone:shared-db - - keystone-mysql-router:shared-db -- - keystone-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - glance:shared-db - - glance-mysql-router:shared-db -- - glance-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - cinder:shared-db - - cinder-mysql-router:shared-db -- - cinder-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - rabbitmq-server - - cinder - -- - keystone - - cinder -- - keystone - - glance - -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client - -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp - -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon - -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote - -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/jammy-yoga.yaml b/ceph-rbd-mirror/src/tests/bundles/jammy-yoga.yaml index a3fd3f80..8e85d4aa 100644 --- a/ceph-rbd-mirror/src/tests/bundles/jammy-yoga.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/jammy-yoga.yaml @@ -151,9 +151,9 @@ relations: - - rabbitmq-server - cinder -- - keystone +- - 'keystone:identity-service' - cinder -- - keystone +- - 'keystone:identity-service' - glance - - cinder diff --git a/ceph-rbd-mirror/src/tox.ini b/ceph-rbd-mirror/src/tox.ini index b40d2952..8ffff0f9 100644 --- a/ceph-rbd-mirror/src/tox.ini +++ b/ceph-rbd-mirror/src/tox.ini @@ -6,32 +6,26 @@ [tox] envlist = pep8 -skipsdist = True # NOTE: Avoid build/test env pollution by not enabling sitepackages. sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False -# NOTES: -# * We avoid the new dependency resolver by pinning pip < 20.3, see -# https://github.com/pypa/pip/issues/9187 -# * Pinning dependencies requires tox >= 3.2.0, see -# https://tox.readthedocs.io/en/latest/config.html#conf-requires -# * It is also necessary to pin virtualenv as a newer virtualenv would still -# lead to fetching the latest pip in the func* tox targets, see -# https://stackoverflow.com/a/38133283 -requires = pip < 20.3 - virtualenv < 20.0 -# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci -minversion = 3.18.0 [testenv] +# We use tox mainly for virtual environment management for test requirements +# and do not install the charm code as a Python package into that environment. +# Ref: https://tox.wiki/en/latest/config.html#skip_install +skip_install = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 allowlist_externals = juju -passenv = HOME TERM CS_* OS_* TEST_* +passenv = + HOME + TERM + CS_* + OS_* + TEST_* deps = -r{toxinidir}/test-requirements.txt -install_command = - pip install {opts} {packages} [testenv:pep8] basepython = python3 diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index 2d60b8a4..c028e9a5 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -5,53 +5,51 @@ # https://github.com/openstack-charmers/release-tools [tox] -skipsdist = True envlist = pep8,py3 # NOTE: Avoid build/test env pollution by not enabling sitepackages. sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False -# NOTES: -# * We avoid the new dependency resolver by pinning pip < 20.3, see -# https://github.com/pypa/pip/issues/9187 -# * Pinning dependencies requires tox >= 3.2.0, see -# https://tox.readthedocs.io/en/latest/config.html#conf-requires -# * It is also necessary to pin virtualenv as a newer virtualenv would still -# lead to fetching the latest pip in the func* tox targets, see -# https://stackoverflow.com/a/38133283 -requires = - pip < 20.3 - virtualenv < 20.0 - setuptools<50.0.0 - -# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci -minversion = 3.18.0 [testenv] +# We use tox mainly for virtual environment management for test requirements +# and do not install the charm code as a Python package into that environment. +# Ref: https://tox.wiki/en/latest/config.html#skip_install +skip_install = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 TERM=linux LAYER_PATH={toxinidir}/layers INTERFACE_PATH={toxinidir}/interfaces JUJU_REPOSITORY={toxinidir}/build -passenv = http_proxy https_proxy INTERFACE_PATH LAYER_PATH JUJU_REPOSITORY -install_command = - {toxinidir}/pip.sh install {opts} {packages} + CHARM_LAYERS_DIR={toxinidir}/layers + CHARM_INTERFACES_DIR={toxinidir}/interfaces +passenv = + no_proxy + http_proxy + https_proxy + CHARM_INTERFACES_DIR + CHARM_LAYERS_DIR + JUJU_REPOSITORY allowlist_externals = charmcraft bash tox - rename.sh + {toxinidir}/rename.sh deps = -r{toxinidir}/requirements.txt [testenv:build] basepython = python3 -deps = -r{toxinidir}/build-requirements.txt +# charmcraft clean is done to ensure that +# `tox -e build` always performs a clean, repeatable build. +# For faster rebuilds during development, +# directly run `charmcraft -v pack && ./rename.sh`. commands = charmcraft clean - charmcraft -v build + charmcraft -v pack {toxinidir}/rename.sh + charmcraft clean [testenv:build-reactive] basepython = python3 @@ -68,11 +66,6 @@ basepython = python3 deps = -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} -[testenv:py35] -basepython = python3.5 -deps = -r{toxinidir}/test-requirements.txt -commands = stestr run --slowest {posargs} - [testenv:py36] basepython = python3.6 deps = -r{toxinidir}/test-requirements.txt @@ -88,29 +81,12 @@ basepython = python3.8 deps = -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} -[testenv:py39] -basepython = python3.9 -deps = -r{toxinidir}/test-requirements.txt -commands = stestr run --slowest {posargs} - [testenv:pep8] basepython = python3 deps = flake8==3.9.2 - charm-tools==2.8.3 + charm-tools==2.8.4 commands = flake8 {posargs} src unit_tests -[testenv:func-target] -# Hack to get functional tests working in the charmcraft -# world. We should fix this. -basepython = python3 -passenv = HOME TERM CS_* OS_* TEST_* -deps = -r{toxinidir}/src/test-requirements.txt -changedir = {toxinidir}/src -commands = - bash -c "if [ ! -f ../*.charm ]; then echo 'Charm does not exist. Run tox -e build'; exit 1; fi" - tox --version - tox -e func-target {posargs} - [testenv:cover] # Technique based heavily upon # https://github.com/openstack/nova/blob/master/tox.ini diff --git a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py index e0c16452..49039fad 100644 --- a/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py +++ b/ceph-rbd-mirror/unit_tests/test_lib_charm_openstack_ceph_rbd_mirror.py @@ -51,7 +51,10 @@ def test_custom_assess_status_check(self): {'stopped': 2, 'replaying': 2}), }) result = crmc.custom_assess_status_check() - self.assertTrue('blocked' in result[0]) + # Disabling blocked state until + # https://bugs.launchpad.net/charm-ceph-rbd-mirror/+bug/1879749 + # is resolved + # self.assertTrue('blocked' in result[0]) # the order of which the statuses appear in the string is undefined self.assertTrue('OK (1)' in result[1]) self.assertTrue('WARN (1)' in result[1]) From 3e4fb47ebdc17987b51eb80b853421ae2ef7d6e5 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Fri, 2 Jun 2023 17:38:39 -0300 Subject: [PATCH 2502/2699] Add Kinetic and Zed support * sync charm-helpers to classic charms * change openstack-origin/source default to zed * align testing with zed * add new zed bundles * add zed bundles to tests.yaml * add zed tests to osci.yaml and .zuul.yaml * update build-on and run-on bases * add bindep.txt for py310 * sync tox.ini and requirements.txt for ruamel * use charmcraft_channel 2.0/stable * drop reactive plugin overrides * move interface/layer env vars to charmcraft.yaml * fix poetry-core addition in wheelhouse Change-Id: Ia2ad44152cf620e9812bfbd58276735a7491949c --- ceph-rbd-mirror/.zuul.yaml | 2 +- ceph-rbd-mirror/bindep.txt | 1 + ceph-rbd-mirror/build-requirements.txt | 1 + ceph-rbd-mirror/charmcraft.yaml | 5 +- ceph-rbd-mirror/osci.yaml | 7 +- .../src/tests/bundles/jammy-zed.yaml | 186 ++++++++++++++++++ .../src/tests/bundles/kinetic-zed.yaml | 186 ++++++++++++++++++ ceph-rbd-mirror/src/tests/tests.yaml | 10 +- ceph-rbd-mirror/src/wheelhouse.txt | 2 +- ceph-rbd-mirror/test-requirements.txt | 18 +- ceph-rbd-mirror/tox.ini | 14 +- 11 files changed, 395 insertions(+), 37 deletions(-) create mode 100644 ceph-rbd-mirror/src/tests/bundles/jammy-zed.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/kinetic-zed.yaml diff --git a/ceph-rbd-mirror/.zuul.yaml b/ceph-rbd-mirror/.zuul.yaml index fd189e2f..75fc2a78 100644 --- a/ceph-rbd-mirror/.zuul.yaml +++ b/ceph-rbd-mirror/.zuul.yaml @@ -1,3 +1,3 @@ - project: templates: - - openstack-python3-ussuri-jobs + - openstack-python3-charm-zed-jobs diff --git a/ceph-rbd-mirror/bindep.txt b/ceph-rbd-mirror/bindep.txt index bdbe8d56..17575d9f 100644 --- a/ceph-rbd-mirror/bindep.txt +++ b/ceph-rbd-mirror/bindep.txt @@ -1,3 +1,4 @@ libffi-dev [platform:dpkg] +libpq-dev [platform:dpkg] libxml2-dev [platform:dpkg] libxslt1-dev [platform:dpkg] diff --git a/ceph-rbd-mirror/build-requirements.txt b/ceph-rbd-mirror/build-requirements.txt index b6d2452f..ff4b3577 100644 --- a/ceph-rbd-mirror/build-requirements.txt +++ b/ceph-rbd-mirror/build-requirements.txt @@ -5,3 +5,4 @@ # * `tox -e build` successfully validated with charmcraft 1.2.1 cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. +markupsafe<=2 diff --git a/ceph-rbd-mirror/charmcraft.yaml b/ceph-rbd-mirror/charmcraft.yaml index 0b706795..366bcbe0 100644 --- a/ceph-rbd-mirror/charmcraft.yaml +++ b/ceph-rbd-mirror/charmcraft.yaml @@ -17,10 +17,13 @@ parts: bases: - build-on: - name: ubuntu - channel: "22.04" + channel: "20.04" architectures: - amd64 run-on: + - name: ubuntu + channel: "20.04" + architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-rbd-mirror/osci.yaml b/ceph-rbd-mirror/osci.yaml index d196797b..5caf4703 100644 --- a/ceph-rbd-mirror/osci.yaml +++ b/ceph-rbd-mirror/osci.yaml @@ -2,11 +2,8 @@ templates: - charm-unit-jobs-py38 - charm-unit-jobs-py310 - check: - jobs: - - focal-yoga - - jammy-yoga: - voting: false + - charm-yoga-functional-jobs + - charm-zed-functional-jobs vars: needs_charm_build: true charm_build_name: ceph-rbd-mirror diff --git a/ceph-rbd-mirror/src/tests/bundles/jammy-zed.yaml b/ceph-rbd-mirror/src/tests/bundles/jammy-zed.yaml new file mode 100644 index 00000000..4c21f316 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/jammy-zed.yaml @@ -0,0 +1,186 @@ +variables: + openstack-origin: &openstack-origin cloud:jammy-zed + +local_overlay_enabled: False + +series: &series jammy + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + glance-mysql-router: + charm: ch:mysql-router + channel: latest/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: latest/edge + + keystone: + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + channel: latest/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + channel: latest/edge + + cinder: + charm: ch:cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + channel: latest/edge + + cinder-ceph: + charm: ch:cinder-ceph + num_units: 0 + channel: latest/edge + + glance: + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + channel: latest/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + channel: quincy/edge + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: '/dev/test-non-existent' + channel: quincy/edge + + ceph-rbd-mirror: + series: *series + charm: ../../../ceph-rbd-mirror.charm + num_units: 1 + options: + source: *openstack-origin + + ceph-mon-b: + charm: ch:ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + channel: quincy/edge + + ceph-osd-b: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: '/dev/test-non-existent' + channel: quincy/edge + + ceph-rbd-mirror-b: + series: *series + charm: ../../../ceph-rbd-mirror.charm + num_units: 1 + options: + source: *openstack-origin + +relations: + +- - keystone:shared-db + - keystone-mysql-router:shared-db +- - keystone-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - glance:shared-db + - glance-mysql-router:shared-db +- - glance-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - cinder:shared-db + - cinder-mysql-router:shared-db +- - cinder-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - rabbitmq-server + - cinder + +- - 'keystone:identity-service' + - cinder +- - 'keystone:identity-service' + - glance + +- - cinder + - cinder-ceph +- - cinder-ceph:ceph + - ceph-mon:client + +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp + +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon + +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote + +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/kinetic-zed.yaml b/ceph-rbd-mirror/src/tests/bundles/kinetic-zed.yaml new file mode 100644 index 00000000..e093f8f1 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/kinetic-zed.yaml @@ -0,0 +1,186 @@ +variables: + openstack-origin: &openstack-origin distro + +local_overlay_enabled: False + +series: &series kinetic + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + glance-mysql-router: + charm: ch:mysql-router + channel: latest/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: latest/edge + + keystone: + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + channel: latest/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + channel: latest/edge + + cinder: + charm: ch:cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + channel: latest/edge + + cinder-ceph: + charm: ch:cinder-ceph + num_units: 0 + channel: latest/edge + + glance: + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + channel: latest/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + channel: quincy/edge + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: '/dev/test-non-existent' + channel: quincy/edge + + ceph-rbd-mirror: + series: *series + charm: ../../../ceph-rbd-mirror.charm + num_units: 1 + options: + source: *openstack-origin + + ceph-mon-b: + charm: ch:ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + channel: quincy/edge + + ceph-osd-b: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: '/dev/test-non-existent' + channel: quincy/edge + + ceph-rbd-mirror-b: + series: *series + charm: ../../../ceph-rbd-mirror.charm + num_units: 1 + options: + source: *openstack-origin + +relations: + +- - keystone:shared-db + - keystone-mysql-router:shared-db +- - keystone-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - glance:shared-db + - glance-mysql-router:shared-db +- - glance-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - cinder:shared-db + - cinder-mysql-router:shared-db +- - cinder-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - rabbitmq-server + - cinder + +- - 'keystone:identity-service' + - cinder +- - 'keystone:identity-service' + - glance + +- - cinder + - cinder-ceph +- - cinder-ceph:ceph + - ceph-mon:client + +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp + +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon + +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote + +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index 117d828c..c318138b 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -14,6 +14,10 @@ dev_bundles: - focal-yoga-image-mirroring - jammy-yoga - jammy-yoga-image-mirroring +- jammy-zed +- jammy-zed-image-mirroring +- kinetic-zed +- kinetic-zed-image-mirroring configure: - zaza.openstack.charm_tests.glance.setup.add_cirros_image - zaza.openstack.charm_tests.glance.setup.add_lts_image @@ -23,7 +27,5 @@ tests: - zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorDisasterFailoverTest tests_options: force_deploy: - - impish-xena - - impish-xena-image-mirroring - - jammy-yoga - - jammy-yoga-image-mirroring + - kinetic-zed + - kinetic-zed-image-mirroring diff --git a/ceph-rbd-mirror/src/wheelhouse.txt b/ceph-rbd-mirror/src/wheelhouse.txt index 10f9a4e5..04ab38c3 100644 --- a/ceph-rbd-mirror/src/wheelhouse.txt +++ b/ceph-rbd-mirror/src/wheelhouse.txt @@ -1,4 +1,4 @@ git+https://github.com/juju/charm-helpers.git#egg=charmhelpers psutil - +poetry-core git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack diff --git a/ceph-rbd-mirror/test-requirements.txt b/ceph-rbd-mirror/test-requirements.txt index a11a7d07..a7936e65 100644 --- a/ceph-rbd-mirror/test-requirements.txt +++ b/ceph-rbd-mirror/test-requirements.txt @@ -4,7 +4,6 @@ # https://github.com/openstack-charmers/release-tools # pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. -cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 stestr>=2.2.0 @@ -13,25 +12,10 @@ stestr>=2.2.0 # https://github.com/mtreinish/stestr/issues/145 cliff<3.0.0 -# Dependencies of stestr. Newer versions use keywords that didn't exist in -# python 3.5 yet (e.g. "ModuleNotFoundError") -importlib-metadata<3.0.0; python_version < '3.6' -importlib-resources<3.0.0; python_version < '3.6' - -# Some Zuul nodes sometimes pull newer versions of these dependencies which -# dropped support for python 3.5: -osprofiler<2.7.0;python_version<'3.6' -stevedore<1.31.0;python_version<'3.6' -debtcollector<1.22.0;python_version<'3.6' -oslo.utils<=3.41.0;python_version<'3.6' - requests>=2.18.4 charms.reactive -# Newer mock seems to have some syntax which is newer than python3.5 (e.g. -# f'{something}' -mock>=1.2,<4.0.0; python_version < '3.6' -mock>=1.2; python_version >= '3.6' +mock>=1.2 nose>=1.3.7 coverage>=3.6 diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index c028e9a5..af776db4 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -19,8 +19,6 @@ skip_install = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 TERM=linux - LAYER_PATH={toxinidir}/layers - INTERFACE_PATH={toxinidir}/interfaces JUJU_REPOSITORY={toxinidir}/build CHARM_LAYERS_DIR={toxinidir}/layers CHARM_INTERFACES_DIR={toxinidir}/interfaces @@ -66,18 +64,18 @@ basepython = python3 deps = -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} -[testenv:py36] -basepython = python3.6 +[testenv:py38] +basepython = python3.8 deps = -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} -[testenv:py37] -basepython = python3.7 +[testenv:py39] +basepython = python3.9 deps = -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} -[testenv:py38] -basepython = python3.8 +[testenv:py310] +basepython = python3.10 deps = -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} From 7fd6025e1408d9688649ee0ced496b3ee7e71302 Mon Sep 17 00:00:00 2001 From: Samuel Walladge Date: Thu, 12 Jan 2023 16:20:03 +1030 Subject: [PATCH 2503/2699] Configure ceph with osd-memory-target from ceph-osd charm Change-Id: Id3f21f8ab68fb88529b6cbd78217e27772c2739c --- ceph-mon/src/ceph_hooks.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 5f9b2a11..793e78fd 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -868,12 +868,30 @@ def osd_relation(relid=None, unit=None): notify_rbd_mirrors() send_osd_settings() - for relid in relation_ids('dashboard'): - dashboard_relation(relid) + for dashboard_relid in relation_ids('dashboard'): + dashboard_relation(dashboard_relid) if ready_for_service(): update_host_osd_count_report() + if is_leader(): + osd_host = relation_get(rid=relid, unit=unit, attribute='osd-host') + osd = f"osd/host:{osd_host}" + osd_memory_target = relation_get( + rid=relid, unit=unit, attribute='osd-memory-target' + ) + if osd_host: + if osd_memory_target: + ceph.ceph_config_set( + "osd_memory_target", + osd_memory_target, + osd, + ) + else: + subprocess.check_call( + ["ceph", "config", "rm", osd, "osd_memory_target"] + ) + else: log('mon cluster not in quorum - deferring fsid provision') From 411453dfd68dc934cc5ea386334cc8ed11665f19 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Mon, 20 Mar 2023 14:18:16 -0400 Subject: [PATCH 2504/2699] Add support for interim Ubuntu releases - update bundles to include UCA pocket tests - update test configuration - update metadata to include kinetic and lunar - update snapcraft to allow run-on for kinetic and lunar Change-Id: Ic9c1aef2b8c81bf53be3a18ca2806c69ce618f90 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1074 Change-Id: Ie39cef6965c30f2adb78b4be568ced17a140115b --- ceph-rbd-mirror/.zuul.yaml | 1 + ceph-rbd-mirror/build-requirements.txt | 8 - ceph-rbd-mirror/charmcraft.yaml | 7 + ceph-rbd-mirror/osci.yaml | 3 +- ceph-rbd-mirror/pip.sh | 18 -- ceph-rbd-mirror/src/metadata.yaml | 2 + ceph-rbd-mirror/src/test-requirements.txt | 3 - .../src/tests/bundles/jammy-antelope.yaml | 184 +++++++++++++ .../src/tests/bundles/jammy-zed.yaml | 25 +- .../src/tests/bundles/kinetic-zed.yaml | 22 +- .../src/tests/bundles/lunar-antelope.yaml | 246 ++++++++++++++++++ 11 files changed, 462 insertions(+), 57 deletions(-) delete mode 100644 ceph-rbd-mirror/build-requirements.txt delete mode 100755 ceph-rbd-mirror/pip.sh create mode 100644 ceph-rbd-mirror/src/tests/bundles/jammy-antelope.yaml create mode 100644 ceph-rbd-mirror/src/tests/bundles/lunar-antelope.yaml diff --git a/ceph-rbd-mirror/.zuul.yaml b/ceph-rbd-mirror/.zuul.yaml index 75fc2a78..77259668 100644 --- a/ceph-rbd-mirror/.zuul.yaml +++ b/ceph-rbd-mirror/.zuul.yaml @@ -1,3 +1,4 @@ - project: templates: - openstack-python3-charm-zed-jobs + - openstack-python3-charm-jobs diff --git a/ceph-rbd-mirror/build-requirements.txt b/ceph-rbd-mirror/build-requirements.txt deleted file mode 100644 index ff4b3577..00000000 --- a/ceph-rbd-mirror/build-requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -# NOTES(lourot): -# * We don't install charmcraft via pip anymore because it anyway spins up a -# container and scp the system's charmcraft snap inside it. So the charmcraft -# snap is necessary on the system anyway. -# * `tox -e build` successfully validated with charmcraft 1.2.1 - -cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. -markupsafe<=2 diff --git a/ceph-rbd-mirror/charmcraft.yaml b/ceph-rbd-mirror/charmcraft.yaml index 366bcbe0..47444bf4 100644 --- a/ceph-rbd-mirror/charmcraft.yaml +++ b/ceph-rbd-mirror/charmcraft.yaml @@ -27,3 +27,10 @@ bases: - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.10" + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "23.04" + architectures: [amd64, s390x, ppc64el, arm64] + diff --git a/ceph-rbd-mirror/osci.yaml b/ceph-rbd-mirror/osci.yaml index 5caf4703..19eaacf0 100644 --- a/ceph-rbd-mirror/osci.yaml +++ b/ceph-rbd-mirror/osci.yaml @@ -2,8 +2,7 @@ templates: - charm-unit-jobs-py38 - charm-unit-jobs-py310 - - charm-yoga-functional-jobs - - charm-zed-functional-jobs + - charm-functional-jobs vars: needs_charm_build: true charm_build_name: ceph-rbd-mirror diff --git a/ceph-rbd-mirror/pip.sh b/ceph-rbd-mirror/pip.sh deleted file mode 100755 index 9a7e6b09..00000000 --- a/ceph-rbd-mirror/pip.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -# -# This file is managed centrally by release-tools and should not be modified -# within individual charm repos. See the 'global' dir contents for available -# choices of tox.ini for OpenStack Charms: -# https://github.com/openstack-charmers/release-tools -# -# setuptools 58.0 dropped the support for use_2to3=true which is needed to -# install blessings (an indirect dependency of charm-tools). -# -# More details on the beahvior of tox and virtualenv creation can be found at -# https://github.com/tox-dev/tox/issues/448 -# -# This script is wrapper to force the use of the pinned versions early in the -# process when the virtualenv was created and upgraded before installing the -# depedencies declared in the target. -pip install 'pip<20.3' 'setuptools<50.0.0' -pip "$@" diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index 51dfde77..8bcc0f7a 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -17,6 +17,8 @@ tags: series: - focal - jammy +- kinetic +- lunar extra-bindings: public: cluster: diff --git a/ceph-rbd-mirror/src/test-requirements.txt b/ceph-rbd-mirror/src/test-requirements.txt index 9c7afb7f..e7710236 100644 --- a/ceph-rbd-mirror/src/test-requirements.txt +++ b/ceph-rbd-mirror/src/test-requirements.txt @@ -4,9 +4,6 @@ # https://github.com/openstack-charmers/release-tools # -# Need tox to be available from tox... inception yes, but its a workaround for now -tox - # Functional Test Requirements (let Zaza's dependencies solve all dependencies here!) git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack diff --git a/ceph-rbd-mirror/src/tests/bundles/jammy-antelope.yaml b/ceph-rbd-mirror/src/tests/bundles/jammy-antelope.yaml new file mode 100644 index 00000000..f8a08dea --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/jammy-antelope.yaml @@ -0,0 +1,184 @@ +variables: + openstack-origin: &openstack-origin cloud:jammy-antelope + series: &series jammy + +local_overlay_enabled: False + +series: *series + +machines: + '0': + constraints: "mem=3072M" + '1': + constraints: "mem=3072M" + '2': + constraints: "mem=3072M" + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + glance-mysql-router: + charm: ch:mysql-router + channel: latest/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + to: + - '0' + - '1' + - '2' + channel: 8.0.19/edge + + keystone: + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + channel: 2023.1/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + channel: 3.9/edge + + cinder: + charm: ch:cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + openstack-origin: *openstack-origin + channel: 2023.1/edge + + cinder-ceph: + charm: ch:cinder-ceph + num_units: 0 + channel: 2023.1/edge + + glance: + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + channel: 2023.1/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + channel: 2023.1/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + channel: quincy/edge + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: '/dev/test-non-existent' + channel: quincy/edge + + ceph-rbd-mirror: + series: *series + charm: ../../../ceph-rbd-mirror.charm + num_units: 1 + options: + source: *openstack-origin + + ceph-mon-b: + charm: ch:ceph-mon + num_units: 3 + options: + expected-osd-count: 3 + source: *openstack-origin + channel: quincy/edge + + ceph-osd-b: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: '/dev/test-non-existent' + channel: quincy/edge + + ceph-rbd-mirror-b: + series: *series + charm: ../../../ceph-rbd-mirror.charm + num_units: 1 + options: + source: *openstack-origin + +relations: + +- - keystone:shared-db + - keystone-mysql-router:shared-db +- - keystone-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - glance:shared-db + - glance-mysql-router:shared-db +- - glance-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - cinder:shared-db + - cinder-mysql-router:shared-db +- - cinder-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - rabbitmq-server + - cinder + +- - 'keystone:identity-service' + - cinder +- - 'keystone:identity-service' + - glance + +- - cinder + - cinder-ceph +- - cinder-ceph:ceph + - ceph-mon:client + +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp + +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon + +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote + +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/bundles/jammy-zed.yaml b/ceph-rbd-mirror/src/tests/bundles/jammy-zed.yaml index 4c21f316..e9ab0c36 100644 --- a/ceph-rbd-mirror/src/tests/bundles/jammy-zed.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/jammy-zed.yaml @@ -17,38 +17,34 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' - '2' - channel: latest/edge + channel: 8.0/edge keystone: charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin - channel: latest/edge + channel: zed/edge rabbitmq-server: charm: ch:rabbitmq-server num_units: 1 - options: - source: *openstack-origin - channel: latest/edge + channel: 3.9/edge cinder: charm: ch:cinder @@ -56,26 +52,27 @@ applications: options: block-device: None glance-api-version: 2 - channel: latest/edge + openstack-origin: *openstack-origin + channel: zed/edge cinder-ceph: charm: ch:cinder-ceph num_units: 0 - channel: latest/edge + channel: zed/edge glance: charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin - channel: latest/edge + channel: zed/edge nova-compute: charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin - channel: latest/edge + channel: zed/edge ceph-mon: charm: ch:ceph-mon diff --git a/ceph-rbd-mirror/src/tests/bundles/kinetic-zed.yaml b/ceph-rbd-mirror/src/tests/bundles/kinetic-zed.yaml index e093f8f1..1631de19 100644 --- a/ceph-rbd-mirror/src/tests/bundles/kinetic-zed.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/kinetic-zed.yaml @@ -17,13 +17,13 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -34,21 +34,19 @@ applications: - '0' - '1' - '2' - channel: latest/edge + channel: 8.0/edge keystone: charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin - channel: latest/edge + channel: zed/edge rabbitmq-server: charm: ch:rabbitmq-server num_units: 1 - options: - source: *openstack-origin - channel: latest/edge + channel: 3.9/edge cinder: charm: ch:cinder @@ -56,26 +54,26 @@ applications: options: block-device: None glance-api-version: 2 - channel: latest/edge + channel: zed/edge cinder-ceph: charm: ch:cinder-ceph num_units: 0 - channel: latest/edge + channel: zed/edge glance: charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin - channel: latest/edge + channel: zed/edge nova-compute: charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin - channel: latest/edge + channel: zed/edge ceph-mon: charm: ch:ceph-mon diff --git a/ceph-rbd-mirror/src/tests/bundles/lunar-antelope.yaml b/ceph-rbd-mirror/src/tests/bundles/lunar-antelope.yaml new file mode 100644 index 00000000..cd0d8d71 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/bundles/lunar-antelope.yaml @@ -0,0 +1,246 @@ +variables: + openstack-origin: &openstack-origin distro + series: &series lunar + infra-series: &infra-series jammy + +local_overlay_enabled: False + +series: *series + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + series: *infra-series + '1': + constraints: mem=3072M + series: *infra-series + '2': + constraints: mem=3072M + series: *infra-series + '3': + series: *infra-series + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + '19': + '20': + '21': + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + glance-mysql-router: + charm: ch:mysql-router + channel: latest/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + # Reduce chances of spurious "super-read-only" failures, see lp:1882205 + expel-timeout: 20 + to: + - '0' + - '1' + - '2' + channel: 8.0.19/edge + series: *infra-series + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + to: + - '3' + channel: 3.9/edge + series: *infra-series + + keystone: + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '4' + channel: 2023.1/edge + + cinder: + charm: ch:cinder + num_units: 1 + options: + block-device: None + glance-api-version: 2 + to: + - '5' + channel: 2023.1/edge + + cinder-ceph: + charm: ch:cinder-ceph + num_units: 0 + channel: 2023.1/edge + + glance: + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '6' + channel: 2023.1/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '7' + channel: 2023.1/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + expected-osd-count: 3 + to: + - '8' + - '9' + - '10' + channel: quincy/edge + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: '/dev/test-non-existent' + to: + - '11' + - '12' + - '13' + channel: quincy/edge + + ceph-rbd-mirror: + series: *series + charm: ../../../ceph-rbd-mirror.charm + num_units: 1 + options: + source: *openstack-origin + to: + - '14' + + ceph-mon-b: + charm: ch:ceph-mon + num_units: 3 + options: + source: *openstack-origin + monitor-count: '3' + expected-osd-count: 3 + to: + - '15' + - '16' + - '17' + channel: quincy/edge + + ceph-osd-b: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *openstack-origin + bluestore: False + use-direct-io: False + osd-devices: '/dev/test-non-existent' + to: + - '18' + - '19' + - '20' + channel: quincy/edge + + ceph-rbd-mirror-b: + series: *series + charm: ../../../ceph-rbd-mirror.charm + num_units: 1 + options: + source: *openstack-origin + to: + - '21' + +relations: + +- - keystone:shared-db + - keystone-mysql-router:shared-db +- - keystone-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - glance:shared-db + - glance-mysql-router:shared-db +- - glance-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - cinder:shared-db + - cinder-mysql-router:shared-db +- - cinder-mysql-router:db-router + - mysql-innodb-cluster:db-router + +- - rabbitmq-server + - cinder + +- - 'keystone:identity-service' + - cinder +- - 'keystone:identity-service' + - glance + +- - cinder + - cinder-ceph +- - cinder-ceph:ceph + - ceph-mon:client + +- - nova-compute:ceph-access + - cinder-ceph:ceph-access +- - nova-compute:amqp + - rabbitmq-server:amqp + +- - glance:image-service + - nova-compute:image-service +- - glance + - ceph-mon + +- - ceph-mon:osd + - ceph-osd:mon +- - ceph-mon + - ceph-rbd-mirror:ceph-local +- - ceph-mon + - ceph-rbd-mirror-b:ceph-remote + +- - ceph-mon-b:osd + - ceph-osd-b:mon +- - ceph-mon-b + - ceph-rbd-mirror-b:ceph-local +- - ceph-mon-b + - ceph-rbd-mirror:ceph-remote From 90044e7fb15b28af369a4d8c8a63ec09e4cd4cd3 Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Fri, 16 Jun 2023 21:21:39 +0900 Subject: [PATCH 2505/2699] Don't clear osd_memory_target unconditionally The charm can now set osd_memory_target, but it's not per device class or type by the nature of how the charm works. Resetting osd_memory_target always when osd_memory_target is not passed over the relation is a bit risky behavior since operators may have set osd_memory_target by hand with `ceph config` command out side of the charm. Let's be less disruptive on the charm upgrade. Closes-Bug: #1934143 Change-Id: I34dd33e54193a9ebdbc9571d153aa6206c85a067 --- ceph-mon/src/ceph_hooks.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 1ade1c6a..857b9675 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -882,17 +882,12 @@ def osd_relation(relid=None, unit=None): osd_memory_target = relation_get( rid=relid, unit=unit, attribute='osd-memory-target' ) - if osd_host: - if osd_memory_target: - ceph.ceph_config_set( - "osd_memory_target", - osd_memory_target, - osd, - ) - else: - subprocess.check_call( - ["ceph", "config", "rm", osd, "osd_memory_target"] - ) + if all([osd_host, osd_memory_target]): + ceph.ceph_config_set( + "osd_memory_target", + osd_memory_target, + osd, + ) else: log('mon cluster not in quorum - deferring fsid provision') From b6de88b191b74d6f31c51d714cdba5fc3de4d789 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Tue, 13 Dec 2022 13:23:41 -0300 Subject: [PATCH 2506/2699] Unpin tox version This unpinning is meant to solve the issues with tox 4.x breaking all the virtualenv dependencies. Change-Id: Idd02728e33be4931c7729c218629b7a8430d0f41 --- ceph-proxy/osci.yaml | 2 +- ceph-proxy/pip.sh | 18 ------------------ ceph-proxy/tox.ini | 24 +++++++++--------------- 3 files changed, 10 insertions(+), 34 deletions(-) delete mode 100755 ceph-proxy/pip.sh diff --git a/ceph-proxy/osci.yaml b/ceph-proxy/osci.yaml index 3614568e..e5d780a5 100644 --- a/ceph-proxy/osci.yaml +++ b/ceph-proxy/osci.yaml @@ -26,7 +26,7 @@ - name: tox-py310 soft: true vars: - tox_extra_args: -- erasure-coded:jammy-yoga-ec + tox_extra_args: '-- erasure-coded:jammy-yoga-ec' - job: name: kinetic-zed-ec parent: func-target diff --git a/ceph-proxy/pip.sh b/ceph-proxy/pip.sh deleted file mode 100755 index 9a7e6b09..00000000 --- a/ceph-proxy/pip.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -# -# This file is managed centrally by release-tools and should not be modified -# within individual charm repos. See the 'global' dir contents for available -# choices of tox.ini for OpenStack Charms: -# https://github.com/openstack-charmers/release-tools -# -# setuptools 58.0 dropped the support for use_2to3=true which is needed to -# install blessings (an indirect dependency of charm-tools). -# -# More details on the beahvior of tox and virtualenv creation can be found at -# https://github.com/tox-dev/tox/issues/448 -# -# This script is wrapper to force the use of the pinned versions early in the -# process when the virtualenv was created and upgraded before installing the -# depedencies declared in the target. -pip install 'pip<20.3' 'setuptools<50.0.0' -pip "$@" diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index bddbd1f2..ebf24210 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -14,18 +14,6 @@ skipsdist = True sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False -# NOTES: -# * We avoid the new dependency resolver by pinning pip < 20.3, see -# https://github.com/pypa/pip/issues/9187 -# * Pinning dependencies requires tox >= 3.2.0, see -# https://tox.readthedocs.io/en/latest/config.html#conf-requires -# * It is also necessary to pin virtualenv as a newer virtualenv would still -# lead to fetching the latest pip in the func* tox targets, see -# https://stackoverflow.com/a/38133283 -requires = - pip < 20.3 - virtualenv < 20.0 - setuptools < 50.0.0 # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci minversion = 3.18.0 @@ -35,12 +23,18 @@ setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} install_command = - {toxinidir}/pip.sh install {opts} {packages} + pip install {opts} {packages} commands = stestr run --slowest {posargs} allowlist_externals = charmcraft - rename.sh -passenv = HOME TERM CS_* OS_* TEST_* + pip + {toxinidir}/rename.sh +passenv = + HOME + TERM + CS_* + OS_* + TEST_* deps = -r{toxinidir}/test-requirements.txt [testenv:build] From 054ca881615ee2450e9a7d429d2b0f6f1066f257 Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Fri, 16 Jun 2023 18:10:22 -0400 Subject: [PATCH 2507/2699] Charm-helpers sync Change-Id: I0053f28aaaecc6b9d60dfbccbb7c308c929cb046 --- .../charmhelpers/contrib/charmsupport/nrpe.py | 16 ++++++++ .../charmhelpers/contrib/hahelpers/cluster.py | 9 ++++- ceph-proxy/charmhelpers/contrib/network/ip.py | 2 +- .../contrib/openstack/deferred_events.py | 4 +- .../contrib/openstack/ha/utils.py | 29 ++++++++++++++ .../charmhelpers/contrib/openstack/ip.py | 25 ++++++++++++ .../charmhelpers/contrib/openstack/utils.py | 9 +++-- .../contrib/storage/linux/ceph.py | 23 ++++++++--- .../contrib/storage/linux/utils.py | 21 ++++++++-- ceph-proxy/charmhelpers/core/host.py | 2 +- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-proxy/charmhelpers/core/unitdata.py | 11 +++--- ceph-proxy/charmhelpers/fetch/ubuntu.py | 38 ++++++++++++++++--- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 36 +++++++----------- 14 files changed, 178 insertions(+), 48 deletions(-) diff --git a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py index bad7a533..ac002bc6 100644 --- a/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py +++ b/ceph-proxy/charmhelpers/contrib/charmsupport/nrpe.py @@ -19,6 +19,7 @@ import glob import grp +import json import os import pwd import re @@ -30,6 +31,7 @@ from charmhelpers.core.hookenv import ( application_name, config, + ERROR, hook_name, local_unit, log, @@ -416,6 +418,20 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): :param str unit_name: Unit name to use in check description :param bool immediate_check: For sysv init, run the service check immediately """ + # check_haproxy is redundant in the presence of check_crm. See LP Bug#1880601 for details. + # just remove check_haproxy if haproxy is added as a lsb resource in hacluster. + for rid in relation_ids("ha"): + ha_resources = relation_get("json_resources", rid=rid, unit=local_unit()) + if ha_resources: + try: + ha_resources_parsed = json.loads(ha_resources) + except ValueError as e: + log('Could not parse JSON from ha resources. {}'.format(e), level=ERROR) + raise + if "lsb:haproxy" in ha_resources_parsed.values(): + if "haproxy" in services: + log("removed check_haproxy. This service will be monitored by check_crm") + services.remove("haproxy") for svc in services: # Don't add a check for these services from neutron-gateway if svc in ['ext-port', 'os-charm-phy-nic-mtu']: diff --git a/ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py b/ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py index 146beba6..7b309256 100644 --- a/ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-proxy/charmhelpers/contrib/hahelpers/cluster.py @@ -221,6 +221,13 @@ def https(): return True if config_get('ssl_cert') and config_get('ssl_key'): return True + # Local import to avoid ciruclar dependency. + import charmhelpers.contrib.openstack.cert_utils as cert_utils + if ( + cert_utils.get_certificate_request() and not + cert_utils.get_requests_for_local_unit("certificates") + ): + return False for r_id in relation_ids('certificates'): for unit in relation_list(r_id): ca = relation_get('ca', rid=r_id, unit=unit) @@ -324,7 +331,7 @@ def valid_hacluster_config(): ''' vip = config_get('vip') dns = config_get('dns-ha') - if not(bool(vip) ^ bool(dns)): + if not (bool(vip) ^ bool(dns)): msg = ('HA: Either vip or dns-ha must be set but not both in order to ' 'use high availability') status_set('blocked', msg) diff --git a/ceph-proxy/charmhelpers/contrib/network/ip.py b/ceph-proxy/charmhelpers/contrib/network/ip.py index f8edf37a..cf9926b9 100644 --- a/ceph-proxy/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/charmhelpers/contrib/network/ip.py @@ -539,7 +539,7 @@ def port_has_listener(address, port): """ cmd = ['nc', '-z', address, str(port)] result = subprocess.call(cmd) - return not(bool(result)) + return not (bool(result)) def assert_charm_supports_ipv6(): diff --git a/ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py b/ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py index 94eacf6c..4c46e41a 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/deferred_events.py @@ -127,7 +127,9 @@ def deferred_events(): """ events = [] for defer_file in deferred_events_files(): - events.append((defer_file, read_event_file(defer_file))) + event = read_event_file(defer_file) + if event.policy_requestor_name == hookenv.service_name(): + events.append((defer_file, event)) return events diff --git a/ceph-proxy/charmhelpers/contrib/openstack/ha/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/ha/utils.py index a5cbdf53..b4912c42 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/ha/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/ha/utils.py @@ -25,6 +25,7 @@ import hashlib import json +import os import re @@ -36,6 +37,7 @@ config, status_set, DEBUG, + application_name, ) from charmhelpers.core.host import ( @@ -65,6 +67,7 @@ VIP_GROUP_NAME = 'grp_{service}_vips' DNSHA_GROUP_NAME = 'grp_{service}_hostnames' +HAPROXY_DASHBOARD_RESOURCE = "haproxy-dashboard" class DNSHAException(Exception): @@ -346,3 +349,29 @@ def update_hacluster_vip(service, relation_data): relation_data['groups'] = { key: ' '.join(vip_group) } + + +def render_grafana_dashboard(prometheus_app_name, haproxy_dashboard): + """Load grafana dashboard json model and insert prometheus datasource. + + :param prometheus_app_name: name of the 'prometheus' application that will + be used as datasource in grafana dashboard + :type prometheus_app_name: str + :param haproxy_dashboard: path to haproxy dashboard + :type haproxy_dashboard: str + :return: Grafana dashboard json model as a str. + :rtype: str + """ + from charmhelpers.contrib.templating import jinja + + dashboard_template = os.path.basename(haproxy_dashboard) + dashboard_template_dir = os.path.dirname(haproxy_dashboard) + app_name = application_name() + datasource = "{} - Juju generated source".format(prometheus_app_name) + return jinja.render(dashboard_template, + {"datasource": datasource, + "app_name": app_name, + "prometheus_app_name": prometheus_app_name}, + template_dir=dashboard_template_dir, + jinja_env_args={"variable_start_string": "<< ", + "variable_end_string": " >>"}) diff --git a/ceph-proxy/charmhelpers/contrib/openstack/ip.py b/ceph-proxy/charmhelpers/contrib/openstack/ip.py index b8c94c56..2afad369 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/ip.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/ip.py @@ -25,6 +25,7 @@ is_ipv6, get_ipv6_addr, resolve_network_cidr, + get_iface_for_address ) from charmhelpers.contrib.hahelpers.cluster import is_clustered @@ -145,6 +146,30 @@ def local_address(unit_get_fallback='public-address'): return unit_get(unit_get_fallback) +def get_invalid_vips(): + """Check if any of the provided vips are invalid. + A vip is invalid if it doesn't belong to the subnet in any interface. + If all vips are valid, this returns an empty list. + + :returns: A list of strings, where each string is an invalid vip address. + :rtype: list + """ + + clustered = is_clustered() + vips = config('vip') + if vips: + vips = vips.split() + invalid_vips = [] + + if clustered and vips: + for vip in vips: + iface_for_vip = get_iface_for_address(vip) + if iface_for_vip is None: + invalid_vips.append(vip) + + return invalid_vips + + def resolve_address(endpoint_type=PUBLIC, override=True): """Return unit address depending on net config. diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index 1fa2814a..83b6884b 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -159,6 +159,7 @@ ('2021.2', 'xena'), ('2022.1', 'yoga'), ('2022.2', 'zed'), + ('2023.1', 'antelope'), ]) # The ugly duckling - must list releases oldest to newest @@ -956,7 +957,7 @@ def os_requires_version(ostack_release, pkg): def wrap(f): @wraps(f) def wrapped_f(*args): - if os_release(pkg) < ostack_release: + if CompareOpenStackReleases(os_release(pkg)) < ostack_release: raise Exception("This hook is not supported on releases" " before %s" % ostack_release) f(*args) @@ -1327,7 +1328,7 @@ def _check_listening_on_services_ports(services, test=False): @param test: default=False, if False, test for closed, otherwise open. @returns OrderedDict(service: [port-not-open, ...]...), [boolean] """ - test = not(not(test)) # ensure test is True or False + test = not (not (test)) # ensure test is True or False all_ports = list(itertools.chain(*services.values())) ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] map_ports = OrderedDict() @@ -1583,7 +1584,7 @@ def is_unit_paused_set(): with unitdata.HookData()() as t: kv = t[0] # transform something truth-y into a Boolean. - return not(not(kv.get('unit-paused'))) + return not (not (kv.get('unit-paused'))) except Exception: return False @@ -2181,7 +2182,7 @@ def is_unit_upgrading_set(): with unitdata.HookData()() as t: kv = t[0] # transform something truth-y into a Boolean. - return not(not(kv.get('unit-upgrading'))) + return not (not (kv.get('unit-upgrading'))) except Exception: return False diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py index 1b20b8fe..2e1fc1b5 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py @@ -28,7 +28,6 @@ import shutil import json import time -import uuid from subprocess import ( check_call, @@ -1677,6 +1676,10 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ + # The below hash is the result of running + # `hashlib.sha1('[]'.encode()).hexdigest()` + EMPTY_LIST_SHA = '97d170e1550eee4afc0af065b78cda302a97674c' + def __init__(self, api_version=1, request_id=None, raw_request_data=None): """Initialize CephBrokerRq object. @@ -1685,8 +1688,12 @@ def __init__(self, api_version=1, request_id=None, raw_request_data=None): :param api_version: API version for request (default: 1). :type api_version: Optional[int] - :param request_id: Unique identifier for request. - (default: string representation of generated UUID) + :param request_id: Unique identifier for request. The identifier will + be updated as ops are added or removed from the + broker request. This ensures that Ceph will + correctly process requests where operations are + added after the initial request is processed. + (default: sha1 of operations) :type request_id: Optional[str] :param raw_request_data: JSON-encoded string to build request from. :type raw_request_data: Optional[str] @@ -1695,16 +1702,20 @@ def __init__(self, api_version=1, request_id=None, raw_request_data=None): if raw_request_data: request_data = json.loads(raw_request_data) self.api_version = request_data['api-version'] - self.request_id = request_data['request-id'] self.set_ops(request_data['ops']) + self.request_id = request_data['request-id'] else: self.api_version = api_version if request_id: self.request_id = request_id else: - self.request_id = str(uuid.uuid1()) + self.request_id = CephBrokerRq.EMPTY_LIST_SHA self.ops = [] + def _hash_ops(self): + """Return the sha1 of the requested Broker ops.""" + return hashlib.sha1(json.dumps(self.ops, sort_keys=True).encode()).hexdigest() + def add_op(self, op): """Add an op if it is not already in the list. @@ -1713,6 +1724,7 @@ def add_op(self, op): """ if op not in self.ops: self.ops.append(op) + self.request_id = self._hash_ops() def add_op_request_access_to_group(self, name, namespace=None, permission=None, key_name=None, @@ -1991,6 +2003,7 @@ def set_ops(self, ops): to allow comparisons to ensure validity. """ self.ops = ops + self.request_id = self._hash_ops() @property def request(self): diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/utils.py b/ceph-proxy/charmhelpers/contrib/storage/linux/utils.py index a3561760..4d05b121 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/utils.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/utils.py @@ -23,6 +23,12 @@ call ) +from charmhelpers.core.hookenv import ( + log, + WARNING, + INFO +) + def _luks_uuid(dev): """ @@ -110,7 +116,7 @@ def is_device_mounted(device): return bool(re.search(r'MOUNTPOINT=".+"', out)) -def mkfs_xfs(device, force=False, inode_size=1024): +def mkfs_xfs(device, force=False, inode_size=None): """Format device with XFS filesystem. By default this should fail if the device already has a filesystem on it. @@ -118,11 +124,20 @@ def mkfs_xfs(device, force=False, inode_size=1024): :ptype device: tr :param force: Force operation :ptype: force: boolean - :param inode_size: XFS inode size in bytes + :param inode_size: XFS inode size in bytes; if set to 0 or None, + the value used will be the XFS system default :ptype inode_size: int""" cmd = ['mkfs.xfs'] if force: cmd.append("-f") - cmd += ['-i', "size={}".format(inode_size), device] + if inode_size: + if inode_size >= 256 and inode_size <= 2048: + cmd += ['-i', "size={}".format(inode_size)] + else: + log("Config value xfs-inode-size={} is invalid. Using system default.".format(inode_size), level=WARNING) + else: + log("Using XFS filesystem with system default inode size.", level=INFO) + + cmd += [device] check_call(cmd) diff --git a/ceph-proxy/charmhelpers/core/host.py b/ceph-proxy/charmhelpers/core/host.py index ef6c8eca..70dde6a5 100644 --- a/ceph-proxy/charmhelpers/core/host.py +++ b/ceph-proxy/charmhelpers/core/host.py @@ -954,7 +954,7 @@ def pwgen(length=None): random_generator = random.SystemRandom() random_chars = [ random_generator.choice(alphanumeric_chars) for _ in range(length)] - return(''.join(random_chars)) + return ''.join(random_chars) def is_phy_iface(interface): diff --git a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py index cc2d89fe..a279d5be 100644 --- a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py @@ -31,6 +31,7 @@ 'impish', 'jammy', 'kinetic', + 'lunar', ) diff --git a/ceph-proxy/charmhelpers/core/unitdata.py b/ceph-proxy/charmhelpers/core/unitdata.py index d9b8d0b0..8f4bbc61 100644 --- a/ceph-proxy/charmhelpers/core/unitdata.py +++ b/ceph-proxy/charmhelpers/core/unitdata.py @@ -171,8 +171,9 @@ class Storage(object): path parameter which causes sqlite3 to only build the db in memory. This should only be used for testing purposes. """ - def __init__(self, path=None): + def __init__(self, path=None, keep_revisions=False): self.db_path = path + self.keep_revisions = keep_revisions if path is None: if 'UNIT_STATE_DB' in os.environ: self.db_path = os.environ['UNIT_STATE_DB'] @@ -242,7 +243,7 @@ def unset(self, key): Remove a key from the database entirely. """ self.cursor.execute('delete from kv where key=?', [key]) - if self.revision and self.cursor.rowcount: + if self.keep_revisions and self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values (?, ?, ?)', [key, self.revision, json.dumps('DELETED')]) @@ -259,14 +260,14 @@ def unsetrange(self, keys=None, prefix=""): if keys is not None: keys = ['%s%s' % (prefix, key) for key in keys] self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) - if self.revision and self.cursor.rowcount: + if self.keep_revisions and self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) else: self.cursor.execute('delete from kv where key like ?', ['%s%%' % prefix]) - if self.revision and self.cursor.rowcount: + if self.keep_revisions and self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values (?, ?, ?)', ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) @@ -299,7 +300,7 @@ def set(self, key, value): where key = ?''', [serialized, key]) # Save - if not self.revision: + if (not self.keep_revisions) or (not self.revision): return value self.cursor.execute( diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py index 93b92765..1bad0db8 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -230,6 +230,18 @@ 'zed/proposed': 'jammy-proposed/zed', 'jammy-zed/proposed': 'jammy-proposed/zed', 'jammy-proposed/zed': 'jammy-proposed/zed', + # antelope + 'antelope': 'jammy-updates/antelope', + 'jammy-antelope': 'jammy-updates/antelope', + 'jammy-antelope/updates': 'jammy-updates/antelope', + 'jammy-updates/antelope': 'jammy-updates/antelope', + 'antelope/proposed': 'jammy-proposed/antelope', + 'jammy-antelope/proposed': 'jammy-proposed/antelope', + 'jammy-proposed/antelope': 'jammy-proposed/antelope', + + # OVN + 'focal-ovn-22.03': 'focal-updates/ovn-22.03', + 'focal-ovn-22.03/proposed': 'focal-proposed/ovn-22.03', } @@ -257,6 +269,7 @@ 'xena', 'yoga', 'zed', + 'antelope', ) @@ -284,6 +297,7 @@ ('impish', 'xena'), ('jammy', 'yoga'), ('kinetic', 'zed'), + ('lunar', 'antelope'), ]) @@ -363,6 +377,9 @@ def apt_install(packages, options=None, fatal=False, quiet=False): :type quiet: bool :raises: subprocess.CalledProcessError """ + if not packages: + log("Nothing to install", level=DEBUG) + return if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -574,7 +591,7 @@ def _get_key_by_keyid(keyid): curl_cmd = ['curl', keyserver_url.format(keyid)] # use proxy server settings in order to retrieve the key return subprocess.check_output(curl_cmd, - env=env_proxy_settings(['https'])) + env=env_proxy_settings(['https', 'no_proxy'])) def _dearmor_gpg_key(key_asc): @@ -687,6 +704,7 @@ def add_source(source, key=None, fail_invalid=False): (r"^cloud-archive:(.*)$", _add_apt_repository), (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), + (r"^cloud:(.*)-(ovn-.*)$", _add_cloud_distro_check), (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), (r"^cloud:(.*)$", _add_cloud_pocket), (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), @@ -750,6 +768,11 @@ def _add_apt_repository(spec): ) +def __write_sources_list_d_actual_pocket(file, actual_pocket): + with open('/etc/apt/sources.list.d/{}'.format(file), 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + + def _add_cloud_pocket(pocket): """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list @@ -769,8 +792,9 @@ def _add_cloud_pocket(pocket): 'Unsupported cloud: source option %s' % pocket) actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + __write_sources_list_d_actual_pocket( + 'cloud-archive{}.list'.format('' if 'ovn' not in pocket else '-ovn'), + actual_pocket) def _add_cloud_staging(cloud_archive_release, openstack_release): @@ -931,10 +955,14 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), try: result = subprocess.check_call(cmd, env=env, **kwargs) except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 + result = e.returncode + if result not in retry_results: + # a non-retriable exitcode was produced + raise + retry_count += 1 if retry_count > max_retries: + # a retriable exitcode was produced more than {max_retries} times raise - result = e.returncode log(retry_message) time.sleep(CMD_RETRY_DELAY) diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py index 6da355fd..f4dde4a9 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -122,13 +122,12 @@ def dpkg_list(self, packages): :raises: subprocess.CalledProcessError """ pkgs = {} - cmd = ['dpkg-query', '--list'] + cmd = [ + 'dpkg-query', '--show', + '--showformat', + r'${db:Status-Abbrev}\t${Package}\t${Version}\t${Architecture}\t${binary:Summary}\n' + ] cmd.extend(packages) - if locale.getlocale() == (None, None): - # subprocess calls out to locale.getpreferredencoding(False) to - # determine encoding. Workaround for Trusty where the - # environment appears to not be set up correctly. - locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, @@ -140,24 +139,17 @@ def dpkg_list(self, packages): if cp.returncode != 1: raise output = cp.output - headings = [] for line in output.splitlines(): - if line.startswith('||/'): - headings = line.split() - headings.pop(0) - continue - elif (line.startswith('|') or line.startswith('+') or - line.startswith('dpkg-query:')): + # only process lines for successfully installed packages + if not (line.startswith('ii ') or line.startswith('hi ')): continue - else: - data = line.split(None, 4) - status = data.pop(0) - if status not in ('ii', 'hi'): - continue - pkg = {} - pkg.update({k.lower(): v for k, v in zip(headings, data)}) - if 'name' in pkg: - pkgs.update({pkg['name']: pkg}) + status, name, version, arch, desc = line.split('\t', 4) + pkgs[name] = { + 'name': name, + 'version': version, + 'architecture': arch, + 'description': desc, + } return pkgs def _apt_cache_show(self, packages): From 523ce327f91614f53bd66fdcf0f5b6c1a941cdc2 Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Fri, 16 Jun 2023 17:37:07 -0400 Subject: [PATCH 2508/2699] Normalize testing bundles Summary of changes: - Drop 'source' override in mysql-innodb-cluster and rabbitmq-server - Fix 'openstack-origin' Change-Id: I19faecbcfebdaf5dcf0d0346c5d0b0eed02866aa --- ceph-proxy/tests/bundles/jammy-antelope.yaml | 6 +----- ceph-proxy/tests/bundles/jammy-yoga-ec.yaml | 4 ---- ceph-proxy/tests/bundles/jammy-yoga.yaml | 4 ---- ceph-proxy/tests/bundles/jammy-zed-ec.yaml | 6 +----- ceph-proxy/tests/bundles/jammy-zed.yaml | 4 ---- ceph-proxy/tests/bundles/kinetic-zed-ec.yaml | 4 ---- ceph-proxy/tests/bundles/kinetic-zed.yaml | 4 ---- ceph-proxy/tests/bundles/lunar-antelope-ec.yaml | 4 ---- ceph-proxy/tests/bundles/lunar-antelope.yaml | 6 +----- 9 files changed, 3 insertions(+), 39 deletions(-) diff --git a/ceph-proxy/tests/bundles/jammy-antelope.yaml b/ceph-proxy/tests/bundles/jammy-antelope.yaml index 8677eee2..76c08a8f 100644 --- a/ceph-proxy/tests/bundles/jammy-antelope.yaml +++ b/ceph-proxy/tests/bundles/jammy-antelope.yaml @@ -1,5 +1,5 @@ variables: - openstack-origin: &openstack-origin distro + openstack-origin: &openstack-origin cloud:jammy-antelope series: jammy @@ -41,8 +41,6 @@ applications: mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' @@ -126,8 +124,6 @@ applications: charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 - options: - source: *openstack-origin to: - '13' channel: latest/edge diff --git a/ceph-proxy/tests/bundles/jammy-yoga-ec.yaml b/ceph-proxy/tests/bundles/jammy-yoga-ec.yaml index 0cf89d79..b24cecfe 100644 --- a/ceph-proxy/tests/bundles/jammy-yoga-ec.yaml +++ b/ceph-proxy/tests/bundles/jammy-yoga-ec.yaml @@ -44,8 +44,6 @@ applications: mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' @@ -140,8 +138,6 @@ applications: charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 - options: - source: *openstack-origin to: - '13' channel: latest/edge diff --git a/ceph-proxy/tests/bundles/jammy-yoga.yaml b/ceph-proxy/tests/bundles/jammy-yoga.yaml index 8677eee2..87d4b5ae 100644 --- a/ceph-proxy/tests/bundles/jammy-yoga.yaml +++ b/ceph-proxy/tests/bundles/jammy-yoga.yaml @@ -41,8 +41,6 @@ applications: mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' @@ -126,8 +124,6 @@ applications: charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 - options: - source: *openstack-origin to: - '13' channel: latest/edge diff --git a/ceph-proxy/tests/bundles/jammy-zed-ec.yaml b/ceph-proxy/tests/bundles/jammy-zed-ec.yaml index 0cf89d79..d449fdef 100644 --- a/ceph-proxy/tests/bundles/jammy-zed-ec.yaml +++ b/ceph-proxy/tests/bundles/jammy-zed-ec.yaml @@ -1,5 +1,5 @@ variables: - openstack-origin: &openstack-origin distro + openstack-origin: &openstack-origin cloud:jammy-zed series: jammy @@ -44,8 +44,6 @@ applications: mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' @@ -140,8 +138,6 @@ applications: charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 - options: - source: *openstack-origin to: - '13' channel: latest/edge diff --git a/ceph-proxy/tests/bundles/jammy-zed.yaml b/ceph-proxy/tests/bundles/jammy-zed.yaml index 041dc19f..94e65f8e 100644 --- a/ceph-proxy/tests/bundles/jammy-zed.yaml +++ b/ceph-proxy/tests/bundles/jammy-zed.yaml @@ -41,8 +41,6 @@ applications: mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' @@ -126,8 +124,6 @@ applications: charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 - options: - source: *openstack-origin to: - '13' channel: latest/edge diff --git a/ceph-proxy/tests/bundles/kinetic-zed-ec.yaml b/ceph-proxy/tests/bundles/kinetic-zed-ec.yaml index 479b391c..42b0d69a 100644 --- a/ceph-proxy/tests/bundles/kinetic-zed-ec.yaml +++ b/ceph-proxy/tests/bundles/kinetic-zed-ec.yaml @@ -44,8 +44,6 @@ applications: mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' @@ -140,8 +138,6 @@ applications: charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 - options: - source: *openstack-origin to: - '13' channel: latest/edge diff --git a/ceph-proxy/tests/bundles/kinetic-zed.yaml b/ceph-proxy/tests/bundles/kinetic-zed.yaml index ca1731c5..4d031149 100644 --- a/ceph-proxy/tests/bundles/kinetic-zed.yaml +++ b/ceph-proxy/tests/bundles/kinetic-zed.yaml @@ -41,8 +41,6 @@ applications: mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' @@ -126,8 +124,6 @@ applications: charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 - options: - source: *openstack-origin to: - '13' channel: latest/edge diff --git a/ceph-proxy/tests/bundles/lunar-antelope-ec.yaml b/ceph-proxy/tests/bundles/lunar-antelope-ec.yaml index f4fd5f4c..9b5f7f84 100644 --- a/ceph-proxy/tests/bundles/lunar-antelope-ec.yaml +++ b/ceph-proxy/tests/bundles/lunar-antelope-ec.yaml @@ -44,8 +44,6 @@ applications: mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' @@ -140,8 +138,6 @@ applications: charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 - options: - source: *openstack-origin to: - '13' channel: latest/edge diff --git a/ceph-proxy/tests/bundles/lunar-antelope.yaml b/ceph-proxy/tests/bundles/lunar-antelope.yaml index bf65396d..a7b7de2e 100644 --- a/ceph-proxy/tests/bundles/lunar-antelope.yaml +++ b/ceph-proxy/tests/bundles/lunar-antelope.yaml @@ -1,5 +1,5 @@ variables: - openstack-origin: &openstack-origin cloud:lunar-antelope + openstack-origin: &openstack-origin distro series: lunar @@ -41,8 +41,6 @@ applications: mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' @@ -126,8 +124,6 @@ applications: charm: ch:rabbitmq-server num_units: 1 constraints: mem=1024 - options: - source: *openstack-origin to: - '13' channel: latest/edge From 98b1fc940e9994d4209d72b5758b6c4bf517fb61 Mon Sep 17 00:00:00 2001 From: Fulvio Galeazzi Date: Thu, 1 Apr 2021 08:55:53 +0000 Subject: [PATCH 2509/2699] Add support for CephFS. Extend ceph-proxy to implement ceph-mds interface, this allows the ceph-fs to be related. The testing is made reusing the CephFSTests testing class. Usage example: juju add-relation ceph-proxy:mds ceph-fs:ceph-mds Co-Authored-By: Felipe Reyes Closes-Bug: #1922195 Func-Test-PR: https://github.com/openstack-charmers/zaza-openstack-tests/pull/558 Change-Id: I437dbac9fe018eb2d0ffb87052d61a08aa014473 --- ceph-proxy/README.md | 10 +++- ceph-proxy/hooks/ceph.py | 36 +++++++++++-- ceph-proxy/hooks/ceph_hooks.py | 43 +++++++++++++++ ceph-proxy/hooks/mds-relation-changed | 1 + ceph-proxy/hooks/mds-relation-joined | 1 + ceph-proxy/metadata.yaml | 2 + ceph-proxy/tests/bundles/jammy-antelope.yaml | 19 +++++++ ceph-proxy/tests/bundles/jammy-yoga.yaml | 19 +++++++ ceph-proxy/tests/bundles/jammy-zed.yaml | 19 +++++++ ceph-proxy/tests/bundles/kinetic-zed.yaml | 19 +++++++ ceph-proxy/tests/bundles/lunar-antelope.yaml | 19 +++++++ ceph-proxy/tests/tests.yaml | 23 +++++--- ceph-proxy/unit_tests/test_ceph_hooks.py | 56 +++++++++++++++++++- 13 files changed, 251 insertions(+), 16 deletions(-) create mode 120000 ceph-proxy/hooks/mds-relation-changed create mode 120000 ceph-proxy/hooks/mds-relation-joined diff --git a/ceph-proxy/README.md b/ceph-proxy/README.md index ad00a340..b586e27a 100644 --- a/ceph-proxy/README.md +++ b/ceph-proxy/README.md @@ -7,6 +7,10 @@ The ceph-proxy charm deploys a proxy that acts as a [ceph-mon][ceph-mon-charm] application for an external Ceph cluster. It joins a non-charmed Ceph cluster to a Juju model. +The charm works with traditional Ceph charm clients (e.g. cinder, glance, +nova-compute) but it also supports the [ceph-radosgw][ceph-radosgw-charm] and +[ceph-fs][ceph-fs-charm] charms. + # Usage ## Configuration @@ -66,7 +70,9 @@ For general charm questions refer to the [OpenStack Charm Guide][cg]. [ceph-upstream]: https://ceph.io [cg]: https://docs.openstack.org/charm-guide -[ceph-mon-charm]: https://jaas.ai/ceph-mon -[juju-docs-actions]: https://jaas.ai/docs/actions +[ceph-mon-charm]: https://charmhub.io/ceph-mon +[ceph-fs-charm]: https://charmbui.io/ceph-fs +[ceph-radosgw-charm]: https://charmbui.io/ceph-radosgw +[juju-docs-actions]: https://charmbui.io/docs/actions [juju-docs-config-apps]: https://juju.is/docs/configuring-applications [lp-bugs-charm-ceph-proxy]: https://bugs.launchpad.net/charm-ceph-proxy/+filebug diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index d6213a70..7e57155f 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -351,6 +351,12 @@ def get_radosgw_key(name='radosgw.gateway'): return get_named_key(name, _radosgw_caps) +def get_mds_key(name): + return get_named_entity_key(entity='mds', + name=name, + caps=mds_caps) + + _default_caps = collections.OrderedDict([ ('mon', ['allow r', 'allow command "osd blacklist"']), @@ -363,6 +369,12 @@ def get_radosgw_key(name='radosgw.gateway'): 'osd': ['allow *'] } +mds_caps = collections.OrderedDict([ + ('osd', ['allow *']), + ('mds', ['allow']), + ('mon', ['allow rwx']), +]) + osd_upgrade_caps = { 'mon': ['allow command "config-key"', 'allow command "osd tree"', @@ -390,15 +402,17 @@ def _config_user_key(name): return k -def get_named_key(name, caps=None, pool_list=None): +def get_named_entity_key(name, caps=None, pool_list=None, + entity='client'): """Retrieve a specific named cephx key. - :param name: String Name of key to get. - :param pool_list: The list of pools to give access to + :param name: String Name of key to get. EXACT MATCH :param caps: dict of cephx capabilities + :param pool_list: The list of pools to give access to + :param entity: String Name of type to get. :returns: Returns a cephx key """ - key_name = 'client.{}'.format(name) + key_name = '{}.{}'.format(entity, name) try: # Does the key already exist? output = str(subprocess.check_output( @@ -424,7 +438,8 @@ def get_named_key(name, caps=None, pool_list=None): return parse_key(output) except subprocess.CalledProcessError: # Couldn't get the key, time to create it! - log("Creating new key for {}".format(name), level=DEBUG) + log("Creating new key for {}".format(key_name), level=DEBUG) + caps = caps or _default_caps cmd = [ "sudo", @@ -455,6 +470,17 @@ def get_named_key(name, caps=None, pool_list=None): .strip()) # IGNORE:E1103 +def get_named_key(name, caps=None, pool_list=None): + """Retrieve a specific named cephx key. + + :param name: String Name of key to get. + :param caps: dict of cephx capabilities + :param pool_list: The list of pools to give access to + :returns: Returns a cephx key + """ + return get_named_entity_key(name, caps, pool_list, entity='client') + + def upgrade_key_caps(key, caps, pool_list=None): """ Upgrade key to have capabilities caps """ if not is_leader(): diff --git a/ceph-proxy/hooks/ceph_hooks.py b/ceph-proxy/hooks/ceph_hooks.py index 0c72f294..682c7b32 100755 --- a/ceph-proxy/hooks/ceph_hooks.py +++ b/ceph-proxy/hooks/ceph_hooks.py @@ -31,6 +31,7 @@ def _add_path(path): from charmhelpers.core.hookenv import ( log, DEBUG, + INFO, config, is_leader, relation_ids, @@ -137,6 +138,7 @@ def emit_cephconf(): notify_radosgws() notify_client() + notify_cephfs_mds() @hooks.hook('config-changed') @@ -160,6 +162,12 @@ def notify_client(): client_relation_joined(relid=relid, unit=unit) +def notify_cephfs_mds(): + for relid in relation_ids('mds'): + for unit in related_units(relid): + mds_relation_joined(relid=relid, unit=unit) + + @hooks.hook('radosgw-relation-changed') @hooks.hook('radosgw-relation-joined') def radosgw_relation(relid=None, unit=None): @@ -203,6 +211,41 @@ def radosgw_relation(relid=None, unit=None): log('FSID or admin key not provided, please configure them') +@hooks.hook('mds-relation-joined') +@hooks.hook('mds-relation-changed') +def mds_relation_joined(relid=None, unit=None): + if not ready(): + log('MDS: FSID or admin key not provided, please configure them', + level=INFO) + return + + log('ceph-proxy config ok - providing mds client with keys') + if not unit: + unit = remote_unit() + + mds_name = relation_get(attribute='mds-name', + rid=relid, unit=unit) + ceph_addrs = config('monitor-hosts') + data = { + 'fsid': config('fsid'), + 'auth': config('auth-supported'), + 'ceph-public-address': ceph_addrs, + } + if mds_name: + data['{}_mds_key'.format(mds_name)] = ( + ceph.get_mds_key(name=mds_name) + ) + + settings = relation_get(rid=relid, unit=unit) or {} + if 'broker_req' in settings: + rsp = process_requests(settings['broker_req']) + unit_id = unit.replace('/', '-') + unit_response_key = 'broker-rsp-' + unit_id + data[unit_response_key] = rsp + log('MDS: relation_set (%s): %s' % (relid, str(data)), level=DEBUG) + relation_set(relation_id=relid, relation_settings=data) + + @hooks.hook('client-relation-joined') def client_relation_joined(relid=None, unit=None): if ready(): diff --git a/ceph-proxy/hooks/mds-relation-changed b/ceph-proxy/hooks/mds-relation-changed new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-proxy/hooks/mds-relation-changed @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/hooks/mds-relation-joined b/ceph-proxy/hooks/mds-relation-joined new file mode 120000 index 00000000..52d96630 --- /dev/null +++ b/ceph-proxy/hooks/mds-relation-joined @@ -0,0 +1 @@ +ceph_hooks.py \ No newline at end of file diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 7854a2dc..24f04fcb 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -21,3 +21,5 @@ provides: interface: ceph-client radosgw: interface: ceph-radosgw + mds: + interface: ceph-mds diff --git a/ceph-proxy/tests/bundles/jammy-antelope.yaml b/ceph-proxy/tests/bundles/jammy-antelope.yaml index 76c08a8f..1e4e54e6 100644 --- a/ceph-proxy/tests/bundles/jammy-antelope.yaml +++ b/ceph-proxy/tests/bundles/jammy-antelope.yaml @@ -25,6 +25,9 @@ machines: '13': '14': '15': + '16': + '17': + '18': applications: @@ -146,6 +149,19 @@ applications: - '15' channel: latest/edge + ubuntu: # used to test mounts + charm: ch:ubuntu + num_units: 2 + to: + - '16' + - '17' + + ceph-fs: + charm: ch:ceph-fs + channel: latest/edge + num_units: 1 + to: + - '18' relations: @@ -193,3 +209,6 @@ relations: - - 'nova-compute:amqp' - 'rabbitmq-server:amqp' + + - - 'ceph-proxy:mds' + - 'ceph-fs:ceph-mds' diff --git a/ceph-proxy/tests/bundles/jammy-yoga.yaml b/ceph-proxy/tests/bundles/jammy-yoga.yaml index 87d4b5ae..12ee6212 100644 --- a/ceph-proxy/tests/bundles/jammy-yoga.yaml +++ b/ceph-proxy/tests/bundles/jammy-yoga.yaml @@ -25,6 +25,9 @@ machines: '13': '14': '15': + '16': + '17': + '18': applications: @@ -146,6 +149,19 @@ applications: - '15' channel: latest/edge + ubuntu: # used to test mounts + charm: ch:ubuntu + num_units: 2 + to: + - '16' + - '17' + + ceph-fs: + charm: ch:ceph-fs + channel: latest/edge + num_units: 1 + to: + - '18' relations: @@ -193,3 +209,6 @@ relations: - - 'nova-compute:amqp' - 'rabbitmq-server:amqp' + + - - 'ceph-proxy:mds' + - 'ceph-fs:ceph-mds' diff --git a/ceph-proxy/tests/bundles/jammy-zed.yaml b/ceph-proxy/tests/bundles/jammy-zed.yaml index 94e65f8e..80969e47 100644 --- a/ceph-proxy/tests/bundles/jammy-zed.yaml +++ b/ceph-proxy/tests/bundles/jammy-zed.yaml @@ -25,6 +25,9 @@ machines: '13': '14': '15': + '16': + '17': + '18': applications: @@ -146,6 +149,19 @@ applications: - '15' channel: latest/edge + ubuntu: # used to test mounts + charm: ch:ubuntu + num_units: 2 + to: + - '16' + - '17' + + ceph-fs: + charm: ch:ceph-fs + channel: latest/edge + num_units: 1 + to: + - '18' relations: @@ -193,3 +209,6 @@ relations: - - 'nova-compute:amqp' - 'rabbitmq-server:amqp' + + - - 'ceph-proxy:mds' + - 'ceph-fs:ceph-mds' diff --git a/ceph-proxy/tests/bundles/kinetic-zed.yaml b/ceph-proxy/tests/bundles/kinetic-zed.yaml index 4d031149..376648af 100644 --- a/ceph-proxy/tests/bundles/kinetic-zed.yaml +++ b/ceph-proxy/tests/bundles/kinetic-zed.yaml @@ -25,6 +25,9 @@ machines: '13': '14': '15': + '16': + '17': + '18': applications: @@ -146,6 +149,19 @@ applications: - '15' channel: latest/edge + ubuntu: # used to test mounts + charm: ch:ubuntu + num_units: 2 + to: + - '16' + - '17' + + ceph-fs: + charm: ch:ceph-fs + channel: latest/edge + num_units: 1 + to: + - '18' relations: @@ -193,3 +209,6 @@ relations: - - 'nova-compute:amqp' - 'rabbitmq-server:amqp' + + - - 'ceph-proxy:mds' + - 'ceph-fs:ceph-mds' diff --git a/ceph-proxy/tests/bundles/lunar-antelope.yaml b/ceph-proxy/tests/bundles/lunar-antelope.yaml index a7b7de2e..e6cdff99 100644 --- a/ceph-proxy/tests/bundles/lunar-antelope.yaml +++ b/ceph-proxy/tests/bundles/lunar-antelope.yaml @@ -25,6 +25,9 @@ machines: '13': '14': '15': + '16': + '17': + '18': applications: @@ -146,6 +149,19 @@ applications: - '15' channel: latest/edge + ubuntu: # used to test mounts + charm: ch:ubuntu + num_units: 2 + to: + - '16' + - '17' + + ceph-fs: + charm: ch:ceph-fs + channel: latest/edge + num_units: 1 + to: + - '18' relations: @@ -193,3 +209,6 @@ relations: - - 'nova-compute:amqp' - 'rabbitmq-server:amqp' + + - - 'ceph-proxy:mds' + - 'ceph-fs:ceph-mds' diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index ee84d3e2..bfa452d3 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -7,6 +7,7 @@ configure: tests: - zaza.openstack.charm_tests.ceph.tests.CephProxyTest + - zaza.openstack.charm_tests.ceph.fs.tests.CephFSWithCephProxyTests - erasure-coded: - zaza.openstack.charm_tests.ceph.tests.CephProxyTest - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes @@ -33,22 +34,28 @@ smoke_bundles: target_deploy_status: ceph-proxy: workload-status: blocked - workload-status-message: Ensure FSID and admin-key are set + workload-status-message-prefix: "Ensure FSID and admin-key are set" ceph-radosgw: workload-status: waiting - workload-status-message: "Incomplete relations: mon" + workload-status-message-prefix: "Incomplete relations: mon" keystone: workload-status: active - workload-status-message: "Unit is ready" - nova-compute: - workload-status: waiting - workload-status-message: "Incomplete relations: storage-backend" + workload-status-message-prefix: "Unit is ready" cinder-ceph: workload-status: waiting - workload-status-message: "Ceph broker request incomplete" + workload-status-message-prefix: "Ceph broker request incomplete" + ceph-fs: + workload-status: waiting + workload-status-message-prefix: "'ceph-mds' incomplete" + nova-compute: + workload-status: waiting + workload-status-message-prefix: "Incomplete relations: storage-backend" glance: workload-status: waiting - workload-status-message: "Incomplete relations: storage-backend" + workload-status-message-prefix: "Incomplete relations: storage-backend" + ubuntu: + workload-status: active + workload-status-message-prefix: '' tests_options: force_deploy: diff --git a/ceph-proxy/unit_tests/test_ceph_hooks.py b/ceph-proxy/unit_tests/test_ceph_hooks.py index 2af966a1..8706b4cb 100644 --- a/ceph-proxy/unit_tests/test_ceph_hooks.py +++ b/ceph-proxy/unit_tests/test_ceph_hooks.py @@ -74,10 +74,11 @@ def test_radosgw_relation(self, mock_apt_install, mock_check_output, mock_apt_install.assert_called_with(packages=[]) @mock.patch('ceph.ceph_user') + @mock.patch.object(hooks, 'mds_relation_joined', autospec=True) @mock.patch.object(hooks, 'radosgw_relation') @mock.patch.object(hooks, 'client_relation_joined') def test_emit_cephconf(self, mock_client_rel, mock_rgw_rel, - mock_ceph_user): + mock_mds_rel, mock_ceph_user): mock_ceph_user.return_value = 'ceph-user' self.test_config.set('monitor-hosts', '127.0.0.1:1234') self.test_config.set('fsid', 'abc123') @@ -89,6 +90,8 @@ def c(k): 'client': ['client:1'], 'rados:1': ['rados/1'], 'client:1': ['client/1'], + 'mds': ['mds:2'], + 'mds:2': ['mds/3'], } return x[k] @@ -127,6 +130,7 @@ def c(k): mock_rgw_rel.assert_called_with(relid='rados:1', unit='rados/1') mock_client_rel.assert_called_with(relid='client:1', unit='client/1') + mock_mds_rel.assert_called_with(relid='mds:2', unit='mds/3') @mock.patch.object(hooks.ceph, 'ceph_user') @mock.patch('subprocess.check_output') @@ -162,6 +166,56 @@ def test_config_get_skips_package_update(self, mock_package_install.assert_not_called() mock_emit_cephconf.assert_any_call() + @mock.patch('subprocess.check_output', autospec=True) + @mock.patch('ceph.config', autospec=True) + @mock.patch('ceph.get_mds_key', autospec=True) + @mock.patch('ceph.ceph_user', autospec=True) + def test_mds_relation_joined(self, ceph_user, get_mds_key, ceph_config, + check_output): + my_mds_key = '1234-key' + mds_name = 'adjusted-mayfly' + rid = 'mds:1' + ceph_user.return_value = 'ceph' + get_mds_key.return_value = my_mds_key + ceph_config.side_effect = self.test_config.get + + settings = {'ceph-public-address': '127.0.0.1:1234 [::1]:4321', + 'auth': 'cephx', + 'fsid': 'some-fsid'} + + rel_data_get = {'broker_req': 'my-uuid', + 'mds-name': mds_name} + rel_data_set = {'broker-rsp-client-0': 'foobar', + '%s_mds_key' % mds_name: my_mds_key} + rel_data_set.update(settings) + + def fake_relation_get(attribute=None, rid=None, unit=None): + if attribute: + return rel_data_get[attribute] + else: + return rel_data_get + + self.relation_get.side_effect = fake_relation_get + + # unconfigured ceph-proxy + with mock.patch.object(hooks, 'log') as log: + hooks.mds_relation_joined() + log.assert_called_with( + 'MDS: FSID or admin key not provided, please configure them', + level='INFO') + + # Configure ceph-proxy with the ceph details. + self.test_config.set('monitor-hosts', settings['ceph-public-address']) + self.test_config.set('fsid', settings['fsid']) + self.test_config.set('admin-key', 'some-admin-key') + + with mock.patch.object(hooks, 'process_requests') as process_requests: + process_requests.return_value = 'foobar' + hooks.mds_relation_joined(relid=rid) + process_requests.assert_called_with('my-uuid') + self.relation_set.assert_called_with( + relation_id=rid, relation_settings=rel_data_set) + @mock.patch('ceph_hooks.emit_cephconf') @mock.patch('ceph_hooks.package_install') def test_update_apt_source(self, mock_package_install, mock_emit_cephconf): From 5bb929612d3021106e12355ef94cb9a8f1543549 Mon Sep 17 00:00:00 2001 From: utkarshbhatthere Date: Tue, 27 Jun 2023 13:02:18 +0530 Subject: [PATCH 2510/2699] Adds focal to charmcraft Change-Id: Id74fa86d80e6e7bf16ee048bb6277f082705e8d7 Signed-off-by: utkarshbhatthere --- ceph-radosgw/charmcraft.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ceph-radosgw/charmcraft.yaml b/ceph-radosgw/charmcraft.yaml index f6121727..ac8ec1e4 100644 --- a/ceph-radosgw/charmcraft.yaml +++ b/ceph-radosgw/charmcraft.yaml @@ -27,6 +27,9 @@ bases: architectures: - amd64 run-on: + - name: ubuntu + channel: "20.04" + architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] From 67ffcba41abe077ff675ff1244bf945ba4c53bd8 Mon Sep 17 00:00:00 2001 From: utkarshbhatthere Date: Thu, 22 Jun 2023 14:22:05 +0530 Subject: [PATCH 2511/2699] Adds source for application in test bundles Also set channels Change-Id: I21d7865c41898551f3f1b1859388139b4cff2bd1 --- ceph-dashboard/tests/bundles/focal-yoga.yaml | 9 +++++-- ceph-dashboard/tests/bundles/jammy-yoga.yaml | 24 +++++++++++++------ ceph-dashboard/tests/bundles/kinetic-zed.yaml | 16 +++++++++++++ .../tests/bundles/lunar-antelope.yaml | 16 +++++++++++++ 4 files changed, 56 insertions(+), 9 deletions(-) diff --git a/ceph-dashboard/tests/bundles/focal-yoga.yaml b/ceph-dashboard/tests/bundles/focal-yoga.yaml index faf66c50..fb7448a5 100644 --- a/ceph-dashboard/tests/bundles/focal-yoga.yaml +++ b/ceph-dashboard/tests/bundles/focal-yoga.yaml @@ -9,15 +9,15 @@ applications: storage: osd-devices: 'cinder,10G' options: - osd-devices: '/dev/test-non-existent' source: *openstack-origin + osd-devices: '/dev/test-non-existent' channel: quincy/edge ceph-mon: charm: ch:ceph-mon num_units: 3 options: - monitor-count: '3' source: *openstack-origin + monitor-count: '3' channel: quincy/edge vault: num_units: 1 @@ -60,15 +60,20 @@ applications: ceph-radosgw: charm: ch:ceph-radosgw num_units: 3 + options: + source: *openstack-origin channel: latest/edge ceph-fs: charm: ch:ceph-fs num_units: 1 + options: + source: *openstack-origin channel: latest/edge ceph-iscsi: charm: ch:ceph-iscsi num_units: 2 options: + source: *openstack-origin gateway-metadata-pool: iscsi-foo-metadata channel: latest/edge relations: diff --git a/ceph-dashboard/tests/bundles/jammy-yoga.yaml b/ceph-dashboard/tests/bundles/jammy-yoga.yaml index 4bd82824..541cd412 100644 --- a/ceph-dashboard/tests/bundles/jammy-yoga.yaml +++ b/ceph-dashboard/tests/bundles/jammy-yoga.yaml @@ -1,3 +1,6 @@ +variables: + openstack-origin: &openstack-origin distro + local_overlay_enabled: False series: jammy applications: @@ -7,14 +10,16 @@ applications: storage: osd-devices: 'cinder,10G' options: + source: *openstack-origin osd-devices: '/dev/test-non-existent' - channel: latest/edge + channel: quincy/edge ceph-mon: charm: ch:ceph-mon num_units: 3 options: + source: *openstack-origin monitor-count: '3' - channel: latest/edge + channel: quincy/edge vault: num_units: 1 charm: ch:vault @@ -23,10 +28,10 @@ applications: charm: ch:mysql-innodb-cluster constraints: mem=3072M num_units: 3 - channel: latest/edge + channel: 8.0/edge vault-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge ceph-dashboard: charm: ../../ceph-dashboard.charm options: @@ -57,17 +62,22 @@ applications: ceph-radosgw: charm: ch:ceph-radosgw num_units: 3 - channel: latest/edge + channel: quincy/edge + options: + source: *openstack-origin ceph-fs: charm: ch:ceph-fs num_units: 1 - channel: latest/edge + channel: quincy/edge + options: + source: *openstack-origin ceph-iscsi: charm: ch:ceph-iscsi num_units: 2 options: + source: *openstack-origin gateway-metadata-pool: iscsi-foo-metadata - channel: latest/edge + channel: quincy/edge relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' diff --git a/ceph-dashboard/tests/bundles/kinetic-zed.yaml b/ceph-dashboard/tests/bundles/kinetic-zed.yaml index 93095327..300db748 100644 --- a/ceph-dashboard/tests/bundles/kinetic-zed.yaml +++ b/ceph-dashboard/tests/bundles/kinetic-zed.yaml @@ -1,18 +1,24 @@ local_overlay_enabled: False series: kinetic +variables: + source: &source distro applications: ceph-osd: charm: ch:ceph-osd + series: kinetic num_units: 6 storage: osd-devices: 'cinder,10G' options: + source: *source osd-devices: '/dev/test-non-existent' channel: latest/edge ceph-mon: charm: ch:ceph-mon + series: kinetic num_units: 3 options: + source: *source monitor-count: '3' channel: latest/edge vault: @@ -23,6 +29,8 @@ applications: charm: ch:mysql-innodb-cluster constraints: mem=3072M num_units: 3 + options: + source: *source channel: latest/edge vault-mysql-router: charm: ch:mysql-router @@ -51,16 +59,24 @@ applications: series: focal ceph-radosgw: charm: ch:ceph-radosgw + series: kinetic num_units: 3 + options: + source: *source channel: latest/edge ceph-fs: charm: ch:ceph-fs + series: kinetic num_units: 1 + options: + source: *source channel: latest/edge ceph-iscsi: charm: ch:ceph-iscsi + series: kinetic num_units: 2 options: + source: *source gateway-metadata-pool: iscsi-foo-metadata channel: latest/edge relations: diff --git a/ceph-dashboard/tests/bundles/lunar-antelope.yaml b/ceph-dashboard/tests/bundles/lunar-antelope.yaml index 9ce0237b..209d56e6 100644 --- a/ceph-dashboard/tests/bundles/lunar-antelope.yaml +++ b/ceph-dashboard/tests/bundles/lunar-antelope.yaml @@ -1,18 +1,24 @@ local_overlay_enabled: False series: lunar +variables: + source: &source distro applications: ceph-osd: charm: ch:ceph-osd + series: lunar num_units: 6 storage: osd-devices: 'cinder,10G' options: + source: *source osd-devices: '/dev/test-non-existent' channel: latest/edge ceph-mon: charm: ch:ceph-mon + series: lunar num_units: 3 options: + source: *source monitor-count: '3' channel: latest/edge vault: @@ -23,6 +29,8 @@ applications: charm: ch:mysql-innodb-cluster constraints: mem=3072M num_units: 3 + options: + source: *source channel: latest/edge vault-mysql-router: charm: ch:mysql-router @@ -51,16 +59,24 @@ applications: series: focal ceph-radosgw: charm: ch:ceph-radosgw + series: lunar num_units: 3 + options: + source: *source channel: latest/edge ceph-fs: charm: ch:ceph-fs + series: lunar num_units: 1 + options: + source: *source channel: latest/edge ceph-iscsi: charm: ch:ceph-iscsi + series: lunar num_units: 2 options: + source: *source gateway-metadata-pool: iscsi-foo-metadata channel: latest/edge relations: From fd054bf5d1cb6ea69247f8237f52f15fbe35dcb5 Mon Sep 17 00:00:00 2001 From: utkarshbhatthere Date: Thu, 1 Jun 2023 15:57:49 +0530 Subject: [PATCH 2512/2699] Adds skip logic to non-leader units Non leader units will skip the event handling if Ceph dashboard is not enabled by the leader yet. Some test bundle fixes. Partial-Bug: #1952282 Change-Id: I743e50663ee85c91af4962d7d100e2fd48efa48c --- ceph-dashboard/src/charm.py | 11 ++++++++--- ceph-dashboard/tests/bundles/focal-yoga.yaml | 12 ++++++------ ceph-dashboard/tests/bundles/focal.yaml | 2 +- ceph-dashboard/tests/bundles/jammy-antelope.yaml | 6 +++--- ceph-dashboard/tests/bundles/jammy-yoga.yaml | 2 +- ceph-dashboard/tests/bundles/jammy-zed.yaml | 4 ++-- ceph-dashboard/tests/bundles/kinetic-zed.yaml | 2 +- ceph-dashboard/tests/bundles/lunar-antelope.yaml | 16 ++++++++-------- 8 files changed, 30 insertions(+), 25 deletions(-) diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index a5d7fc14..4fa7aa0e 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -415,14 +415,19 @@ def _apply_ceph_config_from_charm_config(self) -> None: "Skipping charm option {}, not supported".format( option.charm_option_name)) - def _configure_dashboard(self, _) -> None: + def _configure_dashboard(self, event) -> None: """Configure dashboard""" self.request_certificates() if not self.mon.mons_ready: logging.info("Not configuring dashboard, mons not ready") return - if self.unit.is_leader() and not ceph_utils.is_dashboard_enabled(): - ceph_utils.mgr_enable_dashboard() + if not ceph_utils.is_dashboard_enabled(): + if self.unit.is_leader(): + ceph_utils.mgr_enable_dashboard() + else: + logging.info("Dashboard not enabled, deferring event.") + return + self._apply_ceph_config_from_charm_config() self._configure_tls() ceph_utils.mgr_config_set( diff --git a/ceph-dashboard/tests/bundles/focal-yoga.yaml b/ceph-dashboard/tests/bundles/focal-yoga.yaml index fb7448a5..4fea6771 100644 --- a/ceph-dashboard/tests/bundles/focal-yoga.yaml +++ b/ceph-dashboard/tests/bundles/focal-yoga.yaml @@ -7,7 +7,7 @@ applications: charm: ch:ceph-osd num_units: 6 storage: - osd-devices: 'cinder,10G' + osd-devices: 'cinder,10G,2' options: source: *openstack-origin osd-devices: '/dev/test-non-existent' @@ -29,10 +29,10 @@ applications: num_units: 3 options: source: *openstack-origin - channel: latest/edge + channel: 8.0/edge vault-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge ceph-dashboard: charm: ../../ceph-dashboard.charm options: @@ -62,20 +62,20 @@ applications: num_units: 3 options: source: *openstack-origin - channel: latest/edge + channel: quincy/edge ceph-fs: charm: ch:ceph-fs num_units: 1 options: source: *openstack-origin - channel: latest/edge + channel: quincy/edge ceph-iscsi: charm: ch:ceph-iscsi num_units: 2 options: source: *openstack-origin gateway-metadata-pool: iscsi-foo-metadata - channel: latest/edge + channel: quincy/edge relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' diff --git a/ceph-dashboard/tests/bundles/focal.yaml b/ceph-dashboard/tests/bundles/focal.yaml index 3097ee77..3c3cde65 100644 --- a/ceph-dashboard/tests/bundles/focal.yaml +++ b/ceph-dashboard/tests/bundles/focal.yaml @@ -5,7 +5,7 @@ applications: charm: ch:ceph-osd num_units: 6 storage: - osd-devices: 'cinder,10G' + osd-devices: 'cinder,10G,2' options: osd-devices: '/dev/test-non-existent' channel: latest/edge diff --git a/ceph-dashboard/tests/bundles/jammy-antelope.yaml b/ceph-dashboard/tests/bundles/jammy-antelope.yaml index 10d53b62..dfb6f2e8 100644 --- a/ceph-dashboard/tests/bundles/jammy-antelope.yaml +++ b/ceph-dashboard/tests/bundles/jammy-antelope.yaml @@ -7,7 +7,7 @@ applications: charm: ch:ceph-osd num_units: 6 storage: - osd-devices: 'cinder,10G' + osd-devices: 'cinder,10G,2' options: osd-devices: '/dev/test-non-existent' source: *openstack-origin @@ -27,10 +27,10 @@ applications: charm: ch:mysql-innodb-cluster constraints: mem=3072M num_units: 3 - channel: latest/edge + channel: 8.0/edge vault-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge ceph-dashboard: charm: ../../ceph-dashboard.charm options: diff --git a/ceph-dashboard/tests/bundles/jammy-yoga.yaml b/ceph-dashboard/tests/bundles/jammy-yoga.yaml index 541cd412..dd81eb90 100644 --- a/ceph-dashboard/tests/bundles/jammy-yoga.yaml +++ b/ceph-dashboard/tests/bundles/jammy-yoga.yaml @@ -8,7 +8,7 @@ applications: charm: ch:ceph-osd num_units: 6 storage: - osd-devices: 'cinder,10G' + osd-devices: 'cinder,10G,2' options: source: *openstack-origin osd-devices: '/dev/test-non-existent' diff --git a/ceph-dashboard/tests/bundles/jammy-zed.yaml b/ceph-dashboard/tests/bundles/jammy-zed.yaml index 694d84d1..90ba7203 100644 --- a/ceph-dashboard/tests/bundles/jammy-zed.yaml +++ b/ceph-dashboard/tests/bundles/jammy-zed.yaml @@ -7,7 +7,7 @@ applications: charm: ch:ceph-osd num_units: 6 storage: - osd-devices: 'cinder,10G' + osd-devices: 'cinder,10G,2' options: osd-devices: '/dev/test-non-existent' source: *openstack-origin @@ -30,7 +30,7 @@ applications: channel: latest/edge vault-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge ceph-dashboard: charm: ../../ceph-dashboard.charm options: diff --git a/ceph-dashboard/tests/bundles/kinetic-zed.yaml b/ceph-dashboard/tests/bundles/kinetic-zed.yaml index 300db748..a7df8a7e 100644 --- a/ceph-dashboard/tests/bundles/kinetic-zed.yaml +++ b/ceph-dashboard/tests/bundles/kinetic-zed.yaml @@ -8,7 +8,7 @@ applications: series: kinetic num_units: 6 storage: - osd-devices: 'cinder,10G' + osd-devices: 'cinder,10G,2' options: source: *source osd-devices: '/dev/test-non-existent' diff --git a/ceph-dashboard/tests/bundles/lunar-antelope.yaml b/ceph-dashboard/tests/bundles/lunar-antelope.yaml index 209d56e6..e2a1ba28 100644 --- a/ceph-dashboard/tests/bundles/lunar-antelope.yaml +++ b/ceph-dashboard/tests/bundles/lunar-antelope.yaml @@ -8,11 +8,11 @@ applications: series: lunar num_units: 6 storage: - osd-devices: 'cinder,10G' + osd-devices: 'cinder,10G,2' options: source: *source osd-devices: '/dev/test-non-existent' - channel: latest/edge + channel: quincy/edge ceph-mon: charm: ch:ceph-mon series: lunar @@ -20,7 +20,7 @@ applications: options: source: *source monitor-count: '3' - channel: latest/edge + channel: quincy/edge vault: num_units: 1 charm: ch:vault @@ -31,10 +31,10 @@ applications: num_units: 3 options: source: *source - channel: latest/edge + channel: 8.0/edge vault-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge ceph-dashboard: charm: ../../ceph-dashboard.charm options: @@ -63,14 +63,14 @@ applications: num_units: 3 options: source: *source - channel: latest/edge + channel: quincy/edge ceph-fs: charm: ch:ceph-fs series: lunar num_units: 1 options: source: *source - channel: latest/edge + channel: quincy/edge ceph-iscsi: charm: ch:ceph-iscsi series: lunar @@ -78,7 +78,7 @@ applications: options: source: *source gateway-metadata-pool: iscsi-foo-metadata - channel: latest/edge + channel: quincy/edge relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' From 8b0d7edd00e070cc213f1c2724334e7c60030dda Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Wed, 29 Jun 2022 15:08:15 -0500 Subject: [PATCH 2513/2699] Ensure broker requests are re-processed on upgrade-charm When broker-request caching was added, it broke functionality that ensured that clients were updated on charm-upgrade, this change enables a bypass of that cache functionality and uses it to re-process broker requests in the upgrade-charm hook. Depends-On: https://review.opendev.org/c/openstack/charms.ceph/+/848311 Func-Test-Pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1066 Closes-Bug: #1968369 Change-Id: Ibdad1fd5976fdf2d5f3384f1b120b0d5dda34947 --- ceph-mon/src/ceph_client.py | 6 ++- ceph-mon/src/ceph_hooks.py | 60 +++++++++++++++++--------- ceph-mon/tests/tests.yaml | 2 + ceph-mon/unit_tests/test_ceph_hooks.py | 13 +++--- 4 files changed, 53 insertions(+), 28 deletions(-) diff --git a/ceph-mon/src/ceph_client.py b/ceph-mon/src/ceph_client.py index 2a122d8d..900b6327 100644 --- a/ceph-mon/src/ceph_client.py +++ b/ceph-mon/src/ceph_client.py @@ -139,7 +139,7 @@ def _req_already_treated(self, request_id): return request_id in self._stored.processed def _handle_broker_request( - self, relation, unit, add_legacy_response=False): + self, relation, unit, add_legacy_response=False, force=False): """Retrieve broker request from relation, process, return response data. :param event: Operator event for the relation @@ -149,6 +149,8 @@ def _handle_broker_request( new way. :type add_legacy_response: bool :returns: Dictionary of response data ready for use with relation_set. + :param force: Whether to re-process broker requests. + :type force: bool :rtype: dict """ def _get_broker_req_id(request): @@ -186,7 +188,7 @@ def _get_broker_req_id(request): broker_req_id)) return {} - if self._req_already_treated(broker_req_id): + if self._req_already_treated(broker_req_id) and not force: logger.debug( "Ignoring already executed broker request {}".format( broker_req_id)) diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 857b9675..ed4935ac 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -595,10 +595,10 @@ def attempt_mon_cluster_bootstrap(): return True -def notify_relations(): - notify_osds() - notify_radosgws() - notify_rbd_mirrors() +def notify_relations(reprocess_broker_requests=False): + notify_osds(reprocess_broker_requests=reprocess_broker_requests) + notify_radosgws(reprocess_broker_requests=reprocess_broker_requests) + notify_rbd_mirrors(reprocess_broker_requests=reprocess_broker_requests) notify_prometheus() @@ -614,22 +614,29 @@ def notify_prometheus(): module_enabled=module_enabled) -def notify_osds(): +def notify_osds(reprocess_broker_requests=False): for relid in relation_ids('osd'): for unit in related_units(relid): - osd_relation(relid=relid, unit=unit) + osd_relation( + relid=relid, unit=unit, + reprocess_broker_requests=reprocess_broker_requests) -def notify_radosgws(): +def notify_radosgws(reprocess_broker_requests=False): for relid in relation_ids('radosgw'): for unit in related_units(relid): - radosgw_relation(relid=relid, unit=unit) + radosgw_relation( + relid=relid, unit=unit, + reprocess_broker_requests=reprocess_broker_requests) -def notify_rbd_mirrors(): +def notify_rbd_mirrors(reprocess_broker_requests=False): for relid in relation_ids('rbd-mirror'): for unit in related_units(relid): - rbd_mirror_relation(relid=relid, unit=unit, recurse=False) + rbd_mirror_relation( + relid=relid, unit=unit, + recurse=False, + reprocess_broker_requests=reprocess_broker_requests) def req_already_treated(request_id, relid, req_unit): @@ -738,7 +745,7 @@ def _get_request(relation_data): def handle_broker_request(relid, unit, add_legacy_response=False, - recurse=True): + recurse=True, force=False): """Retrieve broker request from relation, process, return response data. :param relid: Realtion ID @@ -752,6 +759,9 @@ def handle_broker_request(relid, unit, add_legacy_response=False, not. Mainly used to handle recursion when called from notify_rbd_mirrors() :type recurse: bool + :param force: Process broker requests even if they have already been + processed. + :type force: bool :returns: Dictionary of response data ready for use with relation_set. :rtype: dict """ @@ -784,7 +794,7 @@ def _get_broker_req_id(request): level=DEBUG) return {} - if req_already_treated(broker_req_id, relid, unit): + if req_already_treated(broker_req_id, relid, unit) and not force: log("Ignoring already executed broker request {}".format( broker_req_id), level=DEBUG) @@ -820,7 +830,7 @@ def _get_broker_req_id(request): @hooks.hook('osd-relation-joined') @hooks.hook('osd-relation-changed') -def osd_relation(relid=None, unit=None): +def osd_relation(relid=None, unit=None, reprocess_broker_requests=False): if ceph.is_quorum(): log('mon cluster in quorum - providing fsid & keys') public_addr = get_public_addr() @@ -855,7 +865,8 @@ def osd_relation(relid=None, unit=None): ) } - data.update(handle_broker_request(relid, unit)) + data.update(handle_broker_request( + relid, unit, force=reprocess_broker_requests)) relation_set(relation_id=relid, relation_settings=data) @@ -968,7 +979,7 @@ def dashboard_relation(relid=None): @hooks.hook('radosgw-relation-changed') @hooks.hook('radosgw-relation-joined') -def radosgw_relation(relid=None, unit=None): +def radosgw_relation(relid=None, unit=None, reprocess_broker_requests=False): # Install radosgw for admin tools apt_install(packages=filter_installed_packages(['radosgw'])) if not unit: @@ -997,13 +1008,16 @@ def radosgw_relation(relid=None, unit=None): # Old style global radosgw key data['radosgw_key'] = ceph.get_radosgw_key() - data.update(handle_broker_request(relid, unit)) + data.update(handle_broker_request( + relid, unit, force=reprocess_broker_requests)) relation_set(relation_id=relid, relation_settings=data) @hooks.hook('rbd-mirror-relation-joined') @hooks.hook('rbd-mirror-relation-changed') -def rbd_mirror_relation(relid=None, unit=None, recurse=True): +def rbd_mirror_relation( + relid=None, unit=None, recurse=True, + reprocess_broker_requests=False): ''' Handle the rbd mirror relation @@ -1029,7 +1043,8 @@ def get_pool_details(): return ceph.list_pools_detail() # handle broker requests first to get a updated pool map - data = (handle_broker_request(relid, unit, recurse=recurse)) + data = (handle_broker_request( + relid, unit, recurse=recurse, force=reprocess_broker_requests)) data.update({ 'auth': 'cephx', 'ceph-public-address': get_public_addr(), @@ -1061,7 +1076,8 @@ def get_pool_details(): @hooks.hook('mds-relation-changed') @hooks.hook('mds-relation-joined') -def mds_relation_joined(relid=None, unit=None): +def mds_relation_joined( + relid=None, unit=None, reprocess_broker_requests=False): if ready_for_service(): log('mon cluster in quorum and osds bootstrapped ' '- providing mds client with keys') @@ -1078,7 +1094,9 @@ def mds_relation_joined(relid=None, unit=None): ceph.get_mds_key(name=mds_name), 'auth': 'cephx', 'ceph-public-address': public_addr} - data.update(handle_broker_request(relid, unit)) + data.update( + handle_broker_request( + relid, unit, force=reprocess_broker_requests)) relation_set(relation_id=relid, relation_settings=data) @@ -1131,7 +1149,7 @@ def upgrade_charm(): # NOTE(jamespage): # Reprocess broker requests to ensure that any cephx # key permission changes are applied - notify_relations() + notify_relations(reprocess_broker_requests=True) @hooks.hook('nrpe-external-master-relation-joined') diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 197e9205..f90e6757 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -19,6 +19,7 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CephTest - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest + - zaza.openstack.charm_tests.ceph.mon.tests.CephPermissionUpgradeTest - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - zaza.openstack.charm_tests.ceph.tests.CephTest @@ -35,3 +36,4 @@ tests: # Tests from quincy. - zaza.openstack.charm_tests.ceph.tests.CephAuthTest - zaza.openstack.charm_tests.ceph.tests.CephMonActionsTest + - zaza.openstack.charm_tests.ceph.mon.tests.CephPermissionUpgradeTest diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index f7a4f17c..b11bde57 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -240,7 +240,8 @@ def test_upgrade_charm_with_nrpe_relation_installs_dependencies( ceph_hooks.upgrade_charm() mocks["apt_install"].assert_called_with( "lockfile-progs", fatal=True) - mock_notify_radosgws.assert_called_once_with() + mock_notify_radosgws.assert_called_once_with( + reprocess_broker_requests=True) mock_ceph.update_monfs.assert_called_once_with() mock_notify_prometheus.assert_called_once_with() mock_service_pause.assert_called_with('ceph-create-keys') @@ -255,9 +256,11 @@ def test_notify_rbd_mirrors(self, mock_relation_ids, mock_related_units, ceph_hooks.notify_rbd_mirrors() mock_relation_ids.assert_called_once_with('rbd-mirror') mock_related_units.assert_called_once_with('arelid') - mock_rbd_mirror_relation.assert_called_once_with(relid='arelid', - unit='aunit', - recurse=False) + mock_rbd_mirror_relation.assert_called_once_with( + relid='arelid', + unit='aunit', + recurse=False, + reprocess_broker_requests=False) @patch.object(ceph_hooks, 'uuid') @patch.object(ceph_hooks, 'relation_set') @@ -892,7 +895,7 @@ def test_rbd_mirror_relation(self, ] ceph_hooks.rbd_mirror_relation('rbd-mirror:51', 'ceph-rbd-mirror/0') self.handle_broker_request.assert_called_with( - 'rbd-mirror:51', 'ceph-rbd-mirror/0', recurse=True) + 'rbd-mirror:51', 'ceph-rbd-mirror/0', recurse=True, force=False) self.relation_set.assert_called_with( relation_id='rbd-mirror:51', relation_settings=base_relation_settings) From 5bc76a462f56be6975a185725595d3975c3c71f3 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 5 Jul 2023 16:23:31 -0300 Subject: [PATCH 2514/2699] Fix ceph-mon upgrade path This PR makes some small changes in the upgrade path logic by providing a fallback method of fetching the current ceph-mon version and adding additional checks to see if the upgrade can be done in a sane way. Closes-Bug: #2024253 Change-Id: I1ca4316aaf4f0b855a12aa582a8188c88e926fa6 --- ceph-mon/src/ceph_hooks.py | 29 ++++++++++++++++- ceph-mon/unit_tests/test_upgrade.py | 49 +++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 1 deletion(-) diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index ed4935ac..a04fc7bd 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -112,6 +112,19 @@ HOST_OSD_COUNT_REPORT = '{}/host-osd-report.json'.format(NAGIOS_FILE_FOLDER) +def get_current_ceph_version(): + try: + out = subprocess.check_output(['ceph-mon', '-v']).decode('utf-8') + except subprocess.CalledProcessError as exc: + log(("failed to get ceph version: %s. check that the ceph-mon " + "binary is installed and runs correctly") % str(exc), + level=ERROR) + return '' + + # ceph version X.Y.Z (HASH) version-name (stable) + return out.split()[4] + + def check_for_upgrade(): if not ceph.is_bootstrapped(): log("Ceph is not bootstrapped, skipping upgrade checks.") @@ -120,9 +133,21 @@ def check_for_upgrade(): c = hookenv.config() old_version = ceph.resolve_ceph_version(c.previous('source') or 'distro') + + if not old_version: + old_version = get_current_ceph_version() + if not old_version: + log(("failed to get ceph version. check that the ceph-mon " + "binary is installed and runs correctly"), level=ERROR) + return + log('old_version: {}'.format(old_version)) - # Strip all whitespace + new_version = ceph.resolve_ceph_version(hookenv.config('source')) + if not new_version: + log(("new version not found. make sure the 'source' option has " + "been set and try again (using 'distro' may help"), level=WARNING) + return old_version_os = get_os_codename_install_source(c.previous('source') or 'distro') @@ -137,6 +162,8 @@ def check_for_upgrade(): ceph.roll_monitor_cluster(new_version=new_version, upgrade_key='admin') elif (old_version == new_version and + old_version_os is not None and + new_version_os is not None and old_version_os < new_version_os): # See LP: #1778823 add_source(hookenv.config('source'), hookenv.config('key')) diff --git a/ceph-mon/unit_tests/test_upgrade.py b/ceph-mon/unit_tests/test_upgrade.py index f60bb43b..9c3c6335 100644 --- a/ceph-mon/unit_tests/test_upgrade.py +++ b/ceph-mon/unit_tests/test_upgrade.py @@ -1,6 +1,7 @@ from unittest.mock import patch from ceph_hooks import check_for_upgrade from test_utils import CharmTestCase +from charms_ceph.utils import resolve_ceph_version as resolve_ceph_version_orig __author__ = 'Chris Holcombe ' @@ -76,3 +77,51 @@ def test_check_for_upgrade_from_rocky_to_stein(self, roll_monitor_cluster, check_for_upgrade() roll_monitor_cluster.assert_not_called() add_source.assert_called_with('cloud:bionic-stein', 'some-key') + + @patch('ceph_hooks.ceph.resolve_ceph_version') + @patch('ceph_hooks.subprocess.check_output') + @patch('ceph_hooks.add_source') + @patch('ceph_hooks.ceph.is_bootstrapped') + @patch('ceph_hooks.hookenv') + @patch('ceph_hooks.ceph.roll_monitor_cluster') + def test_check_for_upgrade_no_current_version(self, roll_monitor_cluster, + hookenv, is_bootstrapped, + add_source, check_output, + resolve_ceph_version): + _resolve_first = True + + def _resolve_version(arg): + nonlocal _resolve_first + if _resolve_first: + _resolve_first = False + return None + return resolve_ceph_version_orig(arg) + + resolve_ceph_version.side_effect = _resolve_version + check_output.return_value = b""" +ceph version 16.2.13 (123) pacific (stable)""" + is_bootstrapped.return_value = True + hookenv.config.side_effect = self.test_config + self.test_config.set('source', 'cloud:focal-yoga') + check_for_upgrade() + roll_monitor_cluster.assert_called() + add_source.assert_not_called() + + @patch('ceph_hooks.ceph.resolve_ceph_version') + @patch('ceph_hooks.subprocess.check_output') + @patch('ceph_hooks.add_source') + @patch('ceph_hooks.ceph.is_bootstrapped') + @patch('ceph_hooks.hookenv') + @patch('ceph_hooks.ceph.roll_monitor_cluster') + def test_check_for_upgrade_no_versions(self, roll_monitor_cluster, + hookenv, is_bootstrapped, + add_source, check_output, + resolve_ceph_version): + resolve_ceph_version.return_value = None + check_output.return_value = b""" +ceph version 17.2.5 (456) quincy (stable)""" + is_bootstrapped.return_value = True + hookenv.config.side_effect = self.test_config + check_for_upgrade() + roll_monitor_cluster.assert_not_called() + add_source.assert_not_called() From 5196b62a74772f4e13740c12f3860cc7303ee48c Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 10 Jul 2023 09:36:25 +0200 Subject: [PATCH 2515/2699] Set consistent source Avoid the unintuitive situation where users are deploying from channel=quincy but get an older ceph due to deploying series=focal by explicitly setting source=quincy which is what most users want anyway; those that do not can still explicitly set source. Change-Id: I9428e93ba6107ba5e2ebcc667995b3d88eb03d27 --- ceph-mon/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index fb69ac2e..1a5375de 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -10,7 +10,7 @@ options: If set to True, supporting services will log to syslog. source: type: string - default: distro + default: quincy description: | Optional configuration to support use of additional sources such as: . From da0d3c9a1d1fd33cfdbba8b6e521587ad1e4d5b2 Mon Sep 17 00:00:00 2001 From: Jadon Naas Date: Tue, 18 Jul 2023 13:54:09 -0400 Subject: [PATCH 2516/2699] Add docs key and point at Discourse Add the 'docs' key and point it at a Discourse topic previously populated with the charm's README contents. When the new charm revision is released to the Charmhub, this Discourse-based content will be displayed there. In the absense of the this new key, the Charmhub's default behaviour is to display the value of the charm's 'description' key. Change-Id: I173cadb5a8208283883e1119dbfc5d661809cc5f --- ceph-mon/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 8418e6b0..f94c0360 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -4,6 +4,7 @@ maintainer: OpenStack Charmers description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. +docs: https://discourse.charmhub.io/t/ceph-mon-docs-index/10544 tags: - openstack - storage From 0c3227beea135ca5537b1f3e62c024208b4e8709 Mon Sep 17 00:00:00 2001 From: Jadon Naas Date: Tue, 18 Jul 2023 13:58:31 -0400 Subject: [PATCH 2517/2699] Add docs key and point at Discourse Add the 'docs' key and point it at a Discourse topic previously populated with the charm's README contents. When the new charm revision is released to the Charmhub, this Discourse-based content will be displayed there. In the absense of the this new key, the Charmhub's default behaviour is to display the value of the charm's 'description' key. Change-Id: I0fb5e14bd2e58d4abdcce6e230d5c4c1a1d00af7 --- ceph-osd/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 097d3302..9d81d0f9 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -21,6 +21,7 @@ description: | . This charm provides the Ceph OSD personality for expanding storage capacity within a ceph deployment. +docs: https://discourse.charmhub.io/t/ceph-osd-docs-index/10545 extra-bindings: public: cluster: From 8fa699513aa561e5c5428e730e19d2418abeafeb Mon Sep 17 00:00:00 2001 From: Jadon Naas Date: Tue, 18 Jul 2023 14:14:32 -0400 Subject: [PATCH 2518/2699] Add docs key and point at Discourse Add the 'docs' key and point it at a Discourse topic previously populated with the charm's README contents. When the new charm revision is released to the Charmhub, this Discourse-based content will be displayed there. In the absense of the this new key, the Charmhub's default behaviour is to display the value of the charm's 'description' key. Change-Id: Id759ee81cea83fe9b18671ce5bc19a31db319a77 --- ceph-radosgw/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 9c1574a4..30b48a20 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -7,6 +7,7 @@ description: | . This charm provides the RADOS HTTP gateway supporting S3 and Swift protocols for object storage. +docs: https://discourse.charmhub.io/t/ceph-radosgw-docs-index/11005 tags: - openstack - storage From 49ec714d8abae14f9f0f0c5f92c754d6c8f2d128 Mon Sep 17 00:00:00 2001 From: Jadon Naas Date: Tue, 18 Jul 2023 14:20:45 -0400 Subject: [PATCH 2519/2699] Add docs key and point at Discourse Add the 'docs' key and point it at a Discourse topic previously populated with the charm's README contents. When the new charm revision is released to the Charmhub, this Discourse-based content will be displayed there. In the absense of the this new key, the Charmhub's default behaviour is to display the value of the charm's 'description' key. Change-Id: Ia36919df21c55bfc40497f7c174ba5919f44a394 --- ceph-rbd-mirror/src/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index 8bcc0f7a..1c3d74ec 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -9,6 +9,7 @@ description: | the ``rbd`` application are selected. NOTE: The charm requires Ceph Luminous or later. +docs: https://discourse.charmhub.io/t/ceph-rbd-mirror-docs-index/11006 tags: - openstack - storage From d5e5e27b133bbe6ea699221eec489e2e99cf0d9f Mon Sep 17 00:00:00 2001 From: Jadon Naas Date: Tue, 18 Jul 2023 14:25:57 -0400 Subject: [PATCH 2520/2699] Add docs key and point at Discourse Add the 'docs' key and point it at a Discourse topic previously populated with the charm's README contents. When the new charm revision is released to the Charmhub, this Discourse-based content will be displayed there. In the absense of the this new key, the Charmhub's default behaviour is to display the value of the charm's 'description' key. Change-Id: I4c9f607213138adff0af434a417048c9d40af2a4 --- ceph-dashboard/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-dashboard/metadata.yaml b/ceph-dashboard/metadata.yaml index 25311625..06c7b895 100644 --- a/ceph-dashboard/metadata.yaml +++ b/ceph-dashboard/metadata.yaml @@ -6,6 +6,7 @@ maintainer: OpenStack Charmers summary: Enable dashboard for Ceph description: | Enable the ceph dashboard on the ceph mon units +docs: https://discourse.charmhub.io/t/ceph-dashboard-docs-index/11007 tags: - openstack - storage From a830a18c515fc2f0d511bc5169d9c284214f3137 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 24 Jul 2023 17:24:24 +0200 Subject: [PATCH 2521/2699] Fix: test bundles for jammy and lunar Series and UCA pockets were off. Also deploy supporting charms for lunar on jammy for stability Change-Id: I805b28bf625d69626fcf663530ac93073d298732 --- ceph-osd/tests/bundles/jammy-antelope.yaml | 36 ++++++++--------- ceph-osd/tests/bundles/jammy-zed.yaml | 36 ++++++++--------- ceph-osd/tests/bundles/lunar-antelope.yaml | 46 +++++++++++++--------- 3 files changed, 59 insertions(+), 59 deletions(-) diff --git a/ceph-osd/tests/bundles/jammy-antelope.yaml b/ceph-osd/tests/bundles/jammy-antelope.yaml index 41b6acbb..48a8b36a 100644 --- a/ceph-osd/tests/bundles/jammy-antelope.yaml +++ b/ceph-osd/tests/bundles/jammy-antelope.yaml @@ -1,7 +1,7 @@ variables: - openstack-origin: &openstack-origin cloud:focal-yoga + openstack-origin: &openstack-origin cloud:jammy-antelope -series: focal +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' @@ -30,30 +30,28 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge placement-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' - '2' - channel: latest/edge + channel: 8.0/edge ceph-osd: charm: ../../ceph-osd.charm @@ -84,11 +82,9 @@ applications: rabbitmq-server: charm: ch:rabbitmq-server num_units: 1 - options: - source: *openstack-origin to: - '9' - channel: latest/edge + channel: 3.9/edge keystone: expose: True @@ -98,7 +94,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: yoga/edge + channel: 2023.1/edge nova-compute: charm: ch:nova-compute @@ -107,7 +103,7 @@ applications: openstack-origin: *openstack-origin to: - '11' - channel: yoga/edge + channel: 2023.1/edge glance: expose: True @@ -117,7 +113,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: yoga/edge + channel: 2023.1/edge cinder: expose: True @@ -129,11 +125,11 @@ applications: glance-api-version: '2' to: - '13' - channel: yoga/edge + channel: 2023.1/edge cinder-ceph: charm: ch:cinder-ceph - channel: yoga/edge + channel: 2023.1/edge nova-cloud-controller: expose: True @@ -143,7 +139,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: yoga/edge + channel: 2023.1/edge placement: charm: ch:placement @@ -152,7 +148,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: yoga/edge + channel: 2023.1/edge relations: - - 'nova-compute:amqp' diff --git a/ceph-osd/tests/bundles/jammy-zed.yaml b/ceph-osd/tests/bundles/jammy-zed.yaml index 41b6acbb..9b4960bf 100644 --- a/ceph-osd/tests/bundles/jammy-zed.yaml +++ b/ceph-osd/tests/bundles/jammy-zed.yaml @@ -1,7 +1,7 @@ variables: - openstack-origin: &openstack-origin cloud:focal-yoga + openstack-origin: &openstack-origin cloud:jammy-zed -series: focal +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' @@ -30,30 +30,28 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge placement-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' - '2' - channel: latest/edge + channel: 8.0/edge ceph-osd: charm: ../../ceph-osd.charm @@ -84,11 +82,9 @@ applications: rabbitmq-server: charm: ch:rabbitmq-server num_units: 1 - options: - source: *openstack-origin to: - '9' - channel: latest/edge + channel: 3.9/edge keystone: expose: True @@ -98,7 +94,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: yoga/edge + channel: zed/edge nova-compute: charm: ch:nova-compute @@ -107,7 +103,7 @@ applications: openstack-origin: *openstack-origin to: - '11' - channel: yoga/edge + channel: zed/edge glance: expose: True @@ -117,7 +113,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: yoga/edge + channel: zed/edge cinder: expose: True @@ -129,11 +125,11 @@ applications: glance-api-version: '2' to: - '13' - channel: yoga/edge + channel: zed/edge cinder-ceph: charm: ch:cinder-ceph - channel: yoga/edge + channel: zed/edge nova-cloud-controller: expose: True @@ -143,7 +139,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: yoga/edge + channel: zed/edge placement: charm: ch:placement @@ -152,7 +148,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: yoga/edge + channel: zed/edge relations: - - 'nova-compute:amqp' diff --git a/ceph-osd/tests/bundles/lunar-antelope.yaml b/ceph-osd/tests/bundles/lunar-antelope.yaml index a4a640e8..85692211 100644 --- a/ceph-osd/tests/bundles/lunar-antelope.yaml +++ b/ceph-osd/tests/bundles/lunar-antelope.yaml @@ -1,17 +1,22 @@ variables: openstack-origin: &openstack-origin distro + # use infra (mysql, rabbit) from lts for stability + infra-series: &infra-series jammy -series: jammy +series: lunar comment: - 'machines section to decide order of deployment. database sooner = faster' machines: '0': constraints: mem=3072M + series: *infra-series '1': constraints: mem=3072M + series: *infra-series '2': constraints: mem=3072M + series: *infra-series '3': '4': '5': @@ -19,41 +24,46 @@ machines: '7': '8': '9': + series: *infra-series '10': + series: *infra-series '11': + series: *infra-series '12': + series: *infra-series '13': + series: *infra-series '14': + series: *infra-series '15': + series: *infra-series applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge placement-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' - '2' - channel: latest/edge + channel: 8.0/edge ceph-osd: charm: ../../ceph-osd.charm @@ -84,11 +94,9 @@ applications: rabbitmq-server: charm: ch:rabbitmq-server num_units: 1 - options: - source: *openstack-origin to: - '9' - channel: latest/edge + channel: 3.9/edge keystone: expose: True @@ -98,7 +106,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: yoga/edge + channel: 2023.1/edge nova-compute: charm: ch:nova-compute @@ -107,7 +115,7 @@ applications: openstack-origin: *openstack-origin to: - '11' - channel: yoga/edge + channel: 2023.1/edge glance: expose: True @@ -117,7 +125,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: yoga/edge + channel: 2023.1/edge cinder: expose: True @@ -129,11 +137,11 @@ applications: glance-api-version: '2' to: - '13' - channel: yoga/edge + channel: 2023.1/edge cinder-ceph: charm: ch:cinder-ceph - channel: yoga/edge + channel: 2023.1/edge nova-cloud-controller: expose: True @@ -143,7 +151,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: yoga/edge + channel: 2023.1/edge placement: charm: ch:placement @@ -152,7 +160,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: yoga/edge + channel: 2023.1/edge relations: - - 'nova-compute:amqp' From fd1d066b54199cabb2c7142a2af73a42cf448ea3 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 18 Jul 2023 16:47:18 -0400 Subject: [PATCH 2522/2699] Add 2023.2 Bobcat support * sync charm-helpers to classic charms * change openstack-origin/source default to quincy * add mantic to metadata series * align testing with bobcat * add new bobcat bundles * add bobcat bundles to tests.yaml * add bobcat tests to osci.yaml * update build-on and run-on bases * drop kinetic Change-Id: I2cc4d9bb350d86b356d9ffe2bc62987c23f747fd --- ceph-rbd-mirror/charmcraft.yaml | 4 +- ceph-rbd-mirror/src/metadata.yaml | 2 +- .../{jammy-zed.yaml => jammy-bobcat.yaml} | 31 ++--- .../{kinetic-zed.yaml => mantic-bobcat.yaml} | 112 ++++++++++++++---- ceph-rbd-mirror/src/tests/tests.yaml | 9 +- 5 files changed, 109 insertions(+), 49 deletions(-) rename ceph-rbd-mirror/src/tests/bundles/{jammy-zed.yaml => jammy-bobcat.yaml} (88%) rename ceph-rbd-mirror/src/tests/bundles/{kinetic-zed.yaml => mantic-bobcat.yaml} (69%) diff --git a/ceph-rbd-mirror/charmcraft.yaml b/ceph-rbd-mirror/charmcraft.yaml index 47444bf4..8e9445e5 100644 --- a/ceph-rbd-mirror/charmcraft.yaml +++ b/ceph-rbd-mirror/charmcraft.yaml @@ -28,9 +28,9 @@ bases: channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "22.10" + channel: "23.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "23.04" + channel: "23.10" architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index 8bcc0f7a..4caab2ee 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -17,8 +17,8 @@ tags: series: - focal - jammy -- kinetic - lunar +- mantic extra-bindings: public: cluster: diff --git a/ceph-rbd-mirror/src/tests/bundles/jammy-zed.yaml b/ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml similarity index 88% rename from ceph-rbd-mirror/src/tests/bundles/jammy-zed.yaml rename to ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml index e9ab0c36..ae4f6f43 100644 --- a/ceph-rbd-mirror/src/tests/bundles/jammy-zed.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml @@ -1,9 +1,10 @@ variables: - openstack-origin: &openstack-origin cloud:jammy-zed + openstack-origin: &openstack-origin cloud:jammy-bobcat + series: &series jammy local_overlay_enabled: False -series: &series jammy +series: *series machines: '0': @@ -17,13 +18,13 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: 8.0/edge + channel: latest/edge glance-mysql-router: charm: ch:mysql-router - channel: 8.0/edge + channel: latest/edge cinder-mysql-router: charm: ch:mysql-router - channel: 8.0/edge + channel: latest/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -32,14 +33,14 @@ applications: - '0' - '1' - '2' - channel: 8.0/edge + channel: 8.0.19/edge keystone: charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin - channel: zed/edge + channel: latest/edge rabbitmq-server: charm: ch:rabbitmq-server @@ -53,26 +54,26 @@ applications: block-device: None glance-api-version: 2 openstack-origin: *openstack-origin - channel: zed/edge + channel: latest/edge cinder-ceph: charm: ch:cinder-ceph num_units: 0 - channel: zed/edge + channel: latest/edge glance: charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin - channel: zed/edge + channel: latest/edge nova-compute: charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin - channel: zed/edge + channel: latest/edge ceph-mon: charm: ch:ceph-mon @@ -80,7 +81,7 @@ applications: options: expected-osd-count: 3 source: *openstack-origin - channel: quincy/edge + channel: latest/edge ceph-osd: charm: ch:ceph-osd @@ -92,7 +93,7 @@ applications: bluestore: False use-direct-io: False osd-devices: '/dev/test-non-existent' - channel: quincy/edge + channel: latest/edge ceph-rbd-mirror: series: *series @@ -107,7 +108,7 @@ applications: options: expected-osd-count: 3 source: *openstack-origin - channel: quincy/edge + channel: latest/edge ceph-osd-b: charm: ch:ceph-osd @@ -119,7 +120,7 @@ applications: bluestore: False use-direct-io: False osd-devices: '/dev/test-non-existent' - channel: quincy/edge + channel: latest/edge ceph-rbd-mirror-b: series: *series diff --git a/ceph-rbd-mirror/src/tests/bundles/kinetic-zed.yaml b/ceph-rbd-mirror/src/tests/bundles/mantic-bobcat.yaml similarity index 69% rename from ceph-rbd-mirror/src/tests/bundles/kinetic-zed.yaml rename to ceph-rbd-mirror/src/tests/bundles/mantic-bobcat.yaml index 1631de19..ab2f69d3 100644 --- a/ceph-rbd-mirror/src/tests/bundles/kinetic-zed.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/mantic-bobcat.yaml @@ -1,52 +1,86 @@ variables: openstack-origin: &openstack-origin distro + series: &series mantic + infra-series: &infra-series jammy local_overlay_enabled: False -series: &series kinetic +series: *series +comment: +- 'machines section to decide order of deployment. database sooner = faster' machines: '0': - constraints: "mem=3072M" + constraints: mem=3072M + series: *infra-series '1': - constraints: "mem=3072M" + constraints: mem=3072M + series: *infra-series '2': - constraints: "mem=3072M" + constraints: mem=3072M + series: *infra-series + '3': + series: *infra-series + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + '19': + '20': + '21': applications: keystone-mysql-router: charm: ch:mysql-router - channel: 8.0/edge + channel: latest/edge glance-mysql-router: charm: ch:mysql-router - channel: 8.0/edge + channel: latest/edge cinder-mysql-router: charm: ch:mysql-router - channel: 8.0/edge + channel: latest/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 options: - source: *openstack-origin + # Reduce chances of spurious "super-read-only" failures, see lp:1882205 + expel-timeout: 20 to: - '0' - '1' - '2' - channel: 8.0/edge + channel: 8.0.19/edge + series: *infra-series + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + to: + - '3' + channel: 3.9/edge + series: *infra-series keystone: charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin - channel: zed/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - channel: 3.9/edge + to: + - '4' + channel: latest/edge cinder: charm: ch:cinder @@ -54,34 +88,45 @@ applications: options: block-device: None glance-api-version: 2 - channel: zed/edge + to: + - '5' + channel: latest/edge cinder-ceph: charm: ch:cinder-ceph num_units: 0 - channel: zed/edge + channel: latest/edge glance: charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin - channel: zed/edge + to: + - '6' + channel: latest/edge nova-compute: charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin - channel: zed/edge + to: + - '7' + channel: latest/edge ceph-mon: charm: ch:ceph-mon num_units: 3 options: - expected-osd-count: 3 source: *openstack-origin - channel: quincy/edge + monitor-count: '3' + expected-osd-count: 3 + to: + - '8' + - '9' + - '10' + channel: latest/edge ceph-osd: charm: ch:ceph-osd @@ -93,7 +138,11 @@ applications: bluestore: False use-direct-io: False osd-devices: '/dev/test-non-existent' - channel: quincy/edge + to: + - '11' + - '12' + - '13' + channel: latest/edge ceph-rbd-mirror: series: *series @@ -101,14 +150,21 @@ applications: num_units: 1 options: source: *openstack-origin + to: + - '14' ceph-mon-b: charm: ch:ceph-mon num_units: 3 options: - expected-osd-count: 3 source: *openstack-origin - channel: quincy/edge + monitor-count: '3' + expected-osd-count: 3 + to: + - '15' + - '16' + - '17' + channel: latest/edge ceph-osd-b: charm: ch:ceph-osd @@ -120,7 +176,11 @@ applications: bluestore: False use-direct-io: False osd-devices: '/dev/test-non-existent' - channel: quincy/edge + to: + - '18' + - '19' + - '20' + channel: latest/edge ceph-rbd-mirror-b: series: *series @@ -128,6 +188,8 @@ applications: num_units: 1 options: source: *openstack-origin + to: + - '21' relations: diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index c318138b..fca4a427 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -6,6 +6,8 @@ gate_bundles: - focal-xena-image-mirroring - impish-xena - impish-xena-image-mirroring +- jammy-bobcat +- mantic-bobcat comment: | The e2e bundles are useful for development but adds no additional value to the functional tests. @@ -14,10 +16,6 @@ dev_bundles: - focal-yoga-image-mirroring - jammy-yoga - jammy-yoga-image-mirroring -- jammy-zed -- jammy-zed-image-mirroring -- kinetic-zed -- kinetic-zed-image-mirroring configure: - zaza.openstack.charm_tests.glance.setup.add_cirros_image - zaza.openstack.charm_tests.glance.setup.add_lts_image @@ -27,5 +25,4 @@ tests: - zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorDisasterFailoverTest tests_options: force_deploy: - - kinetic-zed - - kinetic-zed-image-mirroring + - mantic-bobcat From 5dbc93ca4b3da90a4cdd94003834f58ed38e212e Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 25 Jul 2023 16:48:34 -0400 Subject: [PATCH 2523/2699] Add docs key and point at Discourse Add the 'docs' key and point it at a Discourse topic previously populated with the charm's README contents. When the new charm revision is released to the Charmhub, this Discourse-based content will be displayed there. In the absense of the this new key, the Charmhub's default behaviour is to display the value of the charm's 'description' key. Change-Id: Ia414ab7d30db9443cf0d9d26071ca48b772f8d1e --- ceph-iscsi/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index f67f4d3a..6c69da50 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -5,6 +5,7 @@ description: | The iSCSI gateway is integrating Ceph Storage with the iSCSI standard to provide a Highly Available (HA) iSCSI target that exports RADOS Block Device (RBD) images as SCSI disks. +docs: https://discourse.charmhub.io/t/ceph-iscsi-docs-index/11222 tags: - openstack - storage From adcf333598a3680725d7172942ecf5d14eeb437e Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 25 Jul 2023 17:12:49 -0400 Subject: [PATCH 2524/2699] Add docs key and point at Discourse Add the 'docs' key and point it at a Discourse topic previously populated with the charm's README contents. When the new charm revision is released to the Charmhub, this Discourse-based content will be displayed there. In the absense of the this new key, the Charmhub's default behaviour is to display the value of the charm's 'description' key. Change-Id: Ic11a02b94cb9708a8b830942264e5ee80b748a5e --- ceph-nfs/metadata.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-nfs/metadata.yaml b/ceph-nfs/metadata.yaml index 9a583a1e..76cc88ae 100644 --- a/ceph-nfs/metadata.yaml +++ b/ceph-nfs/metadata.yaml @@ -4,6 +4,7 @@ maintainer: OpenStack Charmers description: | The NFS gateway is provided by NFS-Ganesha and provides NFS shares that are backed by CephFS. +docs: https://discourse.charmhub.io/t/ceph-nfs-docs-index/11224 tags: - storage - misc @@ -24,4 +25,4 @@ requires: scope: container peers: cluster: - interface: ceph-nfs-peer \ No newline at end of file + interface: ceph-nfs-peer From 723eacda52652aef9607f2556da2b85c54c2412b Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 18 Jul 2023 16:47:18 -0400 Subject: [PATCH 2525/2699] Add 2023.2 Bobcat support * sync charm-helpers to classic charms * change openstack-origin/source default to quincy * add mantic to metadata series * align testing with bobcat * add new bobcat bundles * add bobcat bundles to tests.yaml * add bobcat tests to osci.yaml * update build-on and run-on bases * drop kinetic Change-Id: I7449eba63107b43525359fb92ae1a0ad9e648bab --- ceph-osd/charmcraft.yaml | 4 +- ceph-osd/config.yaml | 2 +- .../charmhelpers/contrib/hahelpers/cluster.py | 7 ++ .../contrib/openstack/cert_utils.py | 11 +++ .../charmhelpers/contrib/openstack/context.py | 4 + .../charmhelpers/contrib/openstack/utils.py | 3 +- .../contrib/storage/linux/ceph.py | 23 ++++- .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 12 ++- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 36 +++---- ceph-osd/lib/charms_ceph/broker.py | 3 +- ceph-osd/lib/charms_ceph/utils.py | 93 +++++++++++++------ ceph-osd/metadata.yaml | 2 +- ceph-osd/osci.yaml | 1 - .../{kinetic-zed.yaml => jammy-bobcat.yaml} | 22 ++--- .../{jammy-zed.yaml => mantic-bobcat.yaml} | 46 +++++---- ceph-osd/tests/tests.yaml | 2 + 17 files changed, 179 insertions(+), 93 deletions(-) rename ceph-osd/tests/bundles/{kinetic-zed.yaml => jammy-bobcat.yaml} (93%) rename ceph-osd/tests/bundles/{jammy-zed.yaml => mantic-bobcat.yaml} (83%) diff --git a/ceph-osd/charmcraft.yaml b/ceph-osd/charmcraft.yaml index 68b48b28..777dc20f 100644 --- a/ceph-osd/charmcraft.yaml +++ b/ceph-osd/charmcraft.yaml @@ -34,8 +34,8 @@ bases: channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "22.10" + channel: "23.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "23.04" + channel: "23.10" architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 2f0f94d9..a2b2ed75 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -5,7 +5,7 @@ options: description: OSD debug level. Max is 20. source: type: string - default: yoga + default: quincy description: | Optional configuration to support use of additional sources such as: . diff --git a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py index ffda5fe1..7b309256 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-osd/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -221,6 +221,13 @@ def https(): return True if config_get('ssl_cert') and config_get('ssl_key'): return True + # Local import to avoid ciruclar dependency. + import charmhelpers.contrib.openstack.cert_utils as cert_utils + if ( + cert_utils.get_certificate_request() and not + cert_utils.get_requests_for_local_unit("certificates") + ): + return False for r_id in relation_ids('certificates'): for unit in relation_list(r_id): ca = relation_get('ca', rid=r_id, unit=unit) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py index 5c961c58..a25ca995 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -409,6 +409,9 @@ def get_requests_for_local_unit(relation_name=None): relation_name = relation_name or 'certificates' bundles = [] for rid in relation_ids(relation_name): + sent = relation_get(rid=rid, unit=local_unit()) + legacy_keys = ['certificate_name', 'common_name'] + is_legacy_request = set(sent).intersection(legacy_keys) for unit in related_units(rid): data = relation_get(rid=rid, unit=unit) if data.get(raw_certs_key): @@ -416,6 +419,14 @@ def get_requests_for_local_unit(relation_name=None): 'ca': data['ca'], 'chain': data.get('chain'), 'certs': json.loads(data[raw_certs_key])}) + elif is_legacy_request: + bundles.append({ + 'ca': data['ca'], + 'chain': data.get('chain'), + 'certs': {sent['common_name']: + {'cert': data.get(local_name + '.server.cert'), + 'key': data.get(local_name + '.server.key')}}}) + return bundles diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index d894b6a6..24a13d0d 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -1748,6 +1748,9 @@ def __init__(self, name=None, script=None, admin_script=None, def __call__(self): total_processes = _calculate_workers() + enable_wsgi_rotation = config('wsgi-rotation') + if enable_wsgi_rotation is None: + enable_wsgi_rotation = True ctxt = { "service_name": self.service_name, "user": self.user, @@ -1761,6 +1764,7 @@ def __call__(self): "public_processes": int(math.ceil(self.public_process_weight * total_processes)), "threads": 1, + "wsgi_rotation": enable_wsgi_rotation, } return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 3d52eb16..e98be2c5 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -160,6 +160,7 @@ ('2022.1', 'yoga'), ('2022.2', 'zed'), ('2023.1', 'antelope'), + ('2023.2', 'bobcat'), ]) # The ugly duckling - must list releases oldest to newest @@ -957,7 +958,7 @@ def os_requires_version(ostack_release, pkg): def wrap(f): @wraps(f) def wrapped_f(*args): - if os_release(pkg) < ostack_release: + if CompareOpenStackReleases(os_release(pkg)) < ostack_release: raise Exception("This hook is not supported on releases" " before %s" % ostack_release) f(*args) diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1b20b8fe..2e1fc1b5 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -28,7 +28,6 @@ import shutil import json import time -import uuid from subprocess import ( check_call, @@ -1677,6 +1676,10 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ + # The below hash is the result of running + # `hashlib.sha1('[]'.encode()).hexdigest()` + EMPTY_LIST_SHA = '97d170e1550eee4afc0af065b78cda302a97674c' + def __init__(self, api_version=1, request_id=None, raw_request_data=None): """Initialize CephBrokerRq object. @@ -1685,8 +1688,12 @@ def __init__(self, api_version=1, request_id=None, raw_request_data=None): :param api_version: API version for request (default: 1). :type api_version: Optional[int] - :param request_id: Unique identifier for request. - (default: string representation of generated UUID) + :param request_id: Unique identifier for request. The identifier will + be updated as ops are added or removed from the + broker request. This ensures that Ceph will + correctly process requests where operations are + added after the initial request is processed. + (default: sha1 of operations) :type request_id: Optional[str] :param raw_request_data: JSON-encoded string to build request from. :type raw_request_data: Optional[str] @@ -1695,16 +1702,20 @@ def __init__(self, api_version=1, request_id=None, raw_request_data=None): if raw_request_data: request_data = json.loads(raw_request_data) self.api_version = request_data['api-version'] - self.request_id = request_data['request-id'] self.set_ops(request_data['ops']) + self.request_id = request_data['request-id'] else: self.api_version = api_version if request_id: self.request_id = request_id else: - self.request_id = str(uuid.uuid1()) + self.request_id = CephBrokerRq.EMPTY_LIST_SHA self.ops = [] + def _hash_ops(self): + """Return the sha1 of the requested Broker ops.""" + return hashlib.sha1(json.dumps(self.ops, sort_keys=True).encode()).hexdigest() + def add_op(self, op): """Add an op if it is not already in the list. @@ -1713,6 +1724,7 @@ def add_op(self, op): """ if op not in self.ops: self.ops.append(op) + self.request_id = self._hash_ops() def add_op_request_access_to_group(self, name, namespace=None, permission=None, key_name=None, @@ -1991,6 +2003,7 @@ def set_ops(self, ops): to allow comparisons to ensure validity. """ self.ops = ops + self.request_id = self._hash_ops() @property def request(self): diff --git a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py index a279d5be..732d76c3 100644 --- a/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -32,6 +32,7 @@ 'jammy', 'kinetic', 'lunar', + 'mantic', ) diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index effc884a..1be992c4 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -238,6 +238,14 @@ 'antelope/proposed': 'jammy-proposed/antelope', 'jammy-antelope/proposed': 'jammy-proposed/antelope', 'jammy-proposed/antelope': 'jammy-proposed/antelope', + # bobcat + 'bobcat': 'jammy-updates/bobcat', + 'jammy-bobcat': 'jammy-updates/bobcat', + 'jammy-bobcat/updates': 'jammy-updates/bobcat', + 'jammy-updates/bobcat': 'jammy-updates/bobcat', + 'bobcat/proposed': 'jammy-proposed/bobcat', + 'jammy-bobcat/proposed': 'jammy-proposed/bobcat', + 'jammy-proposed/bobcat': 'jammy-proposed/bobcat', # OVN 'focal-ovn-22.03': 'focal-updates/ovn-22.03', @@ -270,6 +278,7 @@ 'yoga', 'zed', 'antelope', + 'bobcat', ) @@ -298,6 +307,7 @@ ('jammy', 'yoga'), ('kinetic', 'zed'), ('lunar', 'antelope'), + ('mantic', 'bobcat'), ]) @@ -591,7 +601,7 @@ def _get_key_by_keyid(keyid): curl_cmd = ['curl', keyserver_url.format(keyid)] # use proxy server settings in order to retrieve the key return subprocess.check_output(curl_cmd, - env=env_proxy_settings(['https'])) + env=env_proxy_settings(['https', 'no_proxy'])) def _dearmor_gpg_key(key_asc): diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index 6da355fd..f4dde4a9 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -122,13 +122,12 @@ def dpkg_list(self, packages): :raises: subprocess.CalledProcessError """ pkgs = {} - cmd = ['dpkg-query', '--list'] + cmd = [ + 'dpkg-query', '--show', + '--showformat', + r'${db:Status-Abbrev}\t${Package}\t${Version}\t${Architecture}\t${binary:Summary}\n' + ] cmd.extend(packages) - if locale.getlocale() == (None, None): - # subprocess calls out to locale.getpreferredencoding(False) to - # determine encoding. Workaround for Trusty where the - # environment appears to not be set up correctly. - locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, @@ -140,24 +139,17 @@ def dpkg_list(self, packages): if cp.returncode != 1: raise output = cp.output - headings = [] for line in output.splitlines(): - if line.startswith('||/'): - headings = line.split() - headings.pop(0) - continue - elif (line.startswith('|') or line.startswith('+') or - line.startswith('dpkg-query:')): + # only process lines for successfully installed packages + if not (line.startswith('ii ') or line.startswith('hi ')): continue - else: - data = line.split(None, 4) - status = data.pop(0) - if status not in ('ii', 'hi'): - continue - pkg = {} - pkg.update({k.lower(): v for k, v in zip(headings, data)}) - if 'name' in pkg: - pkgs.update({pkg['name']: pkg}) + status, name, version, arch, desc = line.split('\t', 4) + pkgs[name] = { + 'name': name, + 'version': version, + 'architecture': arch, + 'description': desc, + } return pkgs def _apt_cache_show(self, packages): diff --git a/ceph-osd/lib/charms_ceph/broker.py b/ceph-osd/lib/charms_ceph/broker.py index d00baedc..90b536fb 100644 --- a/ceph-osd/lib/charms_ceph/broker.py +++ b/ceph-osd/lib/charms_ceph/broker.py @@ -291,7 +291,8 @@ def pool_permission_list_for_service(service): for prefix in prefixes: permissions.append("allow {} object_prefix {}".format(permission, prefix)) - return ['mon', 'allow r, allow command "osd blacklist"', + return ['mon', ('allow r, allow command "osd blacklist"' + ', allow command "osd blocklist"'), 'osd', ', '.join(permissions)] diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 3633dd4c..01fb9ac9 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -681,15 +681,29 @@ def _get_osd_num_from_dirname(dirname): return match.group('osd_id') +def get_crimson_osd_ids(): + """Return a set of the OSDs that are running with the Crimson backend.""" + rv = set() + try: + out = subprocess.check_output(['pgrep', 'crimson-osd', '-a']) + for line in out.decode('utf8').splitlines(): + rv.add(line.split()[-1]) + except Exception: + pass + + return rv + + def get_local_osd_ids(): """This will list the /var/lib/ceph/osd/* directories and try to split the ID off of the directory name and return it in - a list. + a list. Excludes crimson OSD's from the returned list. :returns: list. A list of OSD identifiers :raises: OSError if something goes wrong with listing the directory. """ osd_ids = [] + crimson_osds = get_crimson_osd_ids() osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') if os.path.exists(osd_path): try: @@ -698,7 +712,8 @@ def get_local_osd_ids(): osd_id = osd_dir.split('-')[1] if '-' in osd_dir else '' if (_is_int(osd_id) and filesystem_mounted(os.path.join( - os.sep, osd_path, osd_dir))): + os.sep, osd_path, osd_dir)) and + osd_id not in crimson_osds): osd_ids.append(osd_id) except OSError: raise @@ -1134,7 +1149,8 @@ def get_mds_bootstrap_key(): _default_caps = collections.OrderedDict([ ('mon', ['allow r', - 'allow command "osd blacklist"']), + 'allow command "osd blacklist"', + 'allow command "osd blocklist"']), ('osd', ['allow rwx']), ]) @@ -1166,7 +1182,10 @@ def get_mds_bootstrap_key(): ]) rbd_mirror_caps = collections.OrderedDict([ - ('mon', ['profile rbd; allow r']), + ('mon', ['allow profile rbd-mirror-peer', + 'allow command "service dump"', + 'allow command "service status"' + ]), ('osd', ['profile rbd']), ('mgr', ['allow r']), ]) @@ -1212,28 +1231,15 @@ def get_named_key(name, caps=None, pool_list=None): :param caps: dict of cephx capabilities :returns: Returns a cephx key """ - key_name = 'client.{}'.format(name) - try: - # Does the key already exist? - output = str(subprocess.check_output( - [ - 'sudo', - '-u', ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', - 'get', - key_name, - ]).decode('UTF-8')).strip() - return parse_key(output) - except subprocess.CalledProcessError: - # Couldn't get the key, time to create it! - log("Creating new key for {}".format(name), level=DEBUG) caps = caps or _default_caps + key_name = 'client.{}'.format(name) + + key = ceph_auth_get(key_name) + if key: + upgrade_key_caps(key_name, caps) + return key + + log("Creating new key for {}".format(name), level=DEBUG) cmd = [ "sudo", "-u", @@ -1255,6 +1261,7 @@ def get_named_key(name, caps=None, pool_list=None): pools = " ".join(['pool={0}'.format(i) for i in pool_list]) subcaps[0] = subcaps[0] + " " + pools cmd.extend([subsystem, '; '.join(subcaps)]) + ceph_auth_get.cache_clear() log("Calling check_output: {}".format(cmd), level=DEBUG) return parse_key(str(subprocess @@ -1263,6 +1270,30 @@ def get_named_key(name, caps=None, pool_list=None): .strip()) # IGNORE:E1103 +@functools.lru_cache() +def ceph_auth_get(key_name): + try: + # Does the key already exist? + output = str(subprocess.check_output( + [ + 'sudo', + '-u', ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', + 'get', + key_name, + ]).decode('UTF-8')).strip() + return parse_key(output) + except subprocess.CalledProcessError: + # Couldn't get the key + pass + + def upgrade_key_caps(key, caps, pool_list=None): """Upgrade key to have capabilities caps""" if not is_leader(): @@ -2063,7 +2094,7 @@ def filesystem_mounted(fs): def get_running_osds(): """Returns a list of the pids of the current running OSD daemons""" - cmd = ['pgrep', 'ceph-osd'] + cmd = ['pgrep', 'ceph-osd|crimson-osd'] try: result = str(subprocess.check_output(cmd).decode('UTF-8')) return result.split() @@ -2514,7 +2545,7 @@ def wait_until(wait_f, timeout=10 * 60): :type timeout: int """ start_time = time.time() - while(not wait_f()): + while not wait_f(): now = time.time() if now > start_time + timeout: raise WatchDog.WatchDogTimeoutException() @@ -3215,6 +3246,9 @@ def dirs_need_ownership_update(service): 'wallaby': 'pacific', 'xena': 'pacific', 'yoga': 'quincy', + 'zed': 'quincy', + 'antelope': 'quincy', + 'bobcat': 'quincy', } @@ -3414,7 +3448,7 @@ def apply_osd_settings(settings): set_cmd = base_cmd + ' set {key} {value}' def _get_cli_key(key): - return(key.replace(' ', '_')) + return key.replace(' ', '_') # Retrieve the current values to check keys are correct and to make this a # noop if setting are already applied. for osd_id in get_local_osd_ids(): @@ -3453,6 +3487,9 @@ def enabled_manager_modules(): :rtype: List[str] """ cmd = ['ceph', 'mgr', 'module', 'ls'] + quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0 + if quincy_or_later: + cmd.append('--format=json') try: modules = subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError as e: diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 097d3302..ede8ec00 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -13,8 +13,8 @@ tags: series: - focal - jammy -- kinetic - lunar +- mantic description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. diff --git a/ceph-osd/osci.yaml b/ceph-osd/osci.yaml index 672db7d6..2538297b 100644 --- a/ceph-osd/osci.yaml +++ b/ceph-osd/osci.yaml @@ -4,7 +4,6 @@ - charm-unit-jobs-py310 - charm-xena-functional-jobs - charm-yoga-functional-jobs - - charm-zed-functional-jobs - charm-functional-jobs vars: needs_charm_build: true diff --git a/ceph-osd/tests/bundles/kinetic-zed.yaml b/ceph-osd/tests/bundles/jammy-bobcat.yaml similarity index 93% rename from ceph-osd/tests/bundles/kinetic-zed.yaml rename to ceph-osd/tests/bundles/jammy-bobcat.yaml index a4a640e8..426fdef0 100644 --- a/ceph-osd/tests/bundles/kinetic-zed.yaml +++ b/ceph-osd/tests/bundles/jammy-bobcat.yaml @@ -1,5 +1,5 @@ variables: - openstack-origin: &openstack-origin distro + openstack-origin: &openstack-origin cloud:jammy-bobcat series: jammy @@ -47,8 +47,6 @@ applications: mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' @@ -79,13 +77,11 @@ applications: - '6' - '7' - '8' - channel: quincy/edge + channel: latest/edge rabbitmq-server: charm: ch:rabbitmq-server num_units: 1 - options: - source: *openstack-origin to: - '9' channel: latest/edge @@ -98,7 +94,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: yoga/edge + channel: latest/edge nova-compute: charm: ch:nova-compute @@ -107,7 +103,7 @@ applications: openstack-origin: *openstack-origin to: - '11' - channel: yoga/edge + channel: latest/edge glance: expose: True @@ -117,7 +113,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: yoga/edge + channel: latest/edge cinder: expose: True @@ -129,11 +125,11 @@ applications: glance-api-version: '2' to: - '13' - channel: yoga/edge + channel: latest/edge cinder-ceph: charm: ch:cinder-ceph - channel: yoga/edge + channel: latest/edge nova-cloud-controller: expose: True @@ -143,7 +139,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: yoga/edge + channel: latest/edge placement: charm: ch:placement @@ -152,7 +148,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: yoga/edge + channel: latest/edge relations: - - 'nova-compute:amqp' diff --git a/ceph-osd/tests/bundles/jammy-zed.yaml b/ceph-osd/tests/bundles/mantic-bobcat.yaml similarity index 83% rename from ceph-osd/tests/bundles/jammy-zed.yaml rename to ceph-osd/tests/bundles/mantic-bobcat.yaml index 9b4960bf..3dc07363 100644 --- a/ceph-osd/tests/bundles/jammy-zed.yaml +++ b/ceph-osd/tests/bundles/mantic-bobcat.yaml @@ -1,17 +1,22 @@ variables: - openstack-origin: &openstack-origin cloud:jammy-zed + openstack-origin: &openstack-origin distro + # use infra (mysql, rabbit) from lts for stability + infra-series: &infra-series jammy -series: jammy +series: mantic comment: - 'machines section to decide order of deployment. database sooner = faster' machines: '0': constraints: mem=3072M + series: *infra-series '1': constraints: mem=3072M + series: *infra-series '2': constraints: mem=3072M + series: *infra-series '3': '4': '5': @@ -19,30 +24,37 @@ machines: '7': '8': '9': + series: *infra-series '10': + series: *infra-series '11': + series: *infra-series '12': + series: *infra-series '13': + series: *infra-series '14': + series: *infra-series '15': + series: *infra-series applications: keystone-mysql-router: charm: ch:mysql-router - channel: 8.0/edge + channel: latest/edge glance-mysql-router: charm: ch:mysql-router - channel: 8.0/edge + channel: latest/edge cinder-mysql-router: charm: ch:mysql-router - channel: 8.0/edge + channel: latest/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: 8.0/edge + channel: latest/edge placement-mysql-router: charm: ch:mysql-router - channel: 8.0/edge + channel: latest/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -51,7 +63,7 @@ applications: - '0' - '1' - '2' - channel: 8.0/edge + channel: latest/edge ceph-osd: charm: ../../ceph-osd.charm @@ -77,14 +89,14 @@ applications: - '6' - '7' - '8' - channel: quincy/edge + channel: latest/edge rabbitmq-server: charm: ch:rabbitmq-server num_units: 1 to: - '9' - channel: 3.9/edge + channel: latest/edge keystone: expose: True @@ -94,7 +106,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: zed/edge + channel: latest/edge nova-compute: charm: ch:nova-compute @@ -103,7 +115,7 @@ applications: openstack-origin: *openstack-origin to: - '11' - channel: zed/edge + channel: latest/edge glance: expose: True @@ -113,7 +125,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: zed/edge + channel: latest/edge cinder: expose: True @@ -125,11 +137,11 @@ applications: glance-api-version: '2' to: - '13' - channel: zed/edge + channel: latest/edge cinder-ceph: charm: ch:cinder-ceph - channel: zed/edge + channel: latest/edge nova-cloud-controller: expose: True @@ -139,7 +151,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: zed/edge + channel: latest/edge placement: charm: ch:placement @@ -148,7 +160,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: zed/edge + channel: latest/edge relations: - - 'nova-compute:amqp' diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index a1c0d6c6..af1f66cf 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -4,6 +4,8 @@ gate_bundles: - focal-xena - focal-yoga - jammy-yoga + - jammy-bobcat + - mantic-bobcat smoke_bundles: - focal-xena From ac70df5c051652be0ab0011e918ec7d80b504cfb Mon Sep 17 00:00:00 2001 From: Samuel Walladge Date: Mon, 31 Jul 2023 15:02:57 +0930 Subject: [PATCH 2526/2699] Clarify that osd-devices not present are ignored This means that for cases where servers may have a different number of disks, the same application can be deployed across all, listing all disks in the osd-devices option. Any devices in the list that aren't found on the server will simply be ignored. Change-Id: I7d0e32571845f790bb1ec42aa6eef72cc9b57b38 --- ceph-osd/README.md | 6 ++++-- ceph-osd/config.yaml | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/ceph-osd/README.md b/ceph-osd/README.md index ea2d5798..16b12f9a 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -46,8 +46,10 @@ for both charms. #### `osd-devices` The `osd-devices` option lists what block devices can be used for OSDs across -the cluster. See section 'Storage devices' for an elaboration on this -fundamental topic. +the cluster. Devices that are listed in this option, but do not exist, will +be ignored. + +See section 'Storage devices' for an elaboration on this fundamental topic. #### `osd-format` diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 2f0f94d9..1498f4c2 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -63,6 +63,7 @@ options: These devices are the range of devices that will be checked for and used across all service units, in addition to any volumes attached via the --storage flag during deployment. + Any devices not found will be ignored. . For ceph < 14.2.0 (Nautilus) these can also be directories instead of devices. If the value does not start with "/dev" then it will be From 53dd066c56f2eec0e8624a93bff533087bb65d03 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 18 Jul 2023 16:47:18 -0400 Subject: [PATCH 2527/2699] Add 2023.2 Bobcat support * sync charm-helpers to classic charms * change openstack-origin/source default to quincy * add mantic to metadata series * align testing with bobcat * add new bobcat bundles * add bobcat bundles to tests.yaml * add bobcat tests to osci.yaml * update build-on and run-on bases * drop kinetic * add additional unit test https mocks needed since charm-helpers commit 6064a34627882d1c8acf74644c48d05db67ee3b4 * update charmcraft_channel to 2.x/stable Change-Id: I2d9c41c294668c3bb7fcba253adb8bc0c939d150 --- ceph-radosgw/charmcraft.yaml | 4 +- ceph-radosgw/config.yaml | 2 +- .../charmhelpers/contrib/hahelpers/cluster.py | 7 + .../contrib/openstack/cert_utils.py | 11 ++ .../charmhelpers/contrib/openstack/context.py | 4 + .../templates/section-keystone-authtoken | 2 + .../section-keystone-authtoken-mitaka | 2 + .../openstack/templates/section-service-user | 4 +- .../templates/wsgi-openstack-api.conf | 6 + .../templates/wsgi-openstack-metadata.conf | 6 + .../charmhelpers/contrib/openstack/utils.py | 3 +- .../contrib/storage/linux/ceph.py | 23 +++- .../charmhelpers/core/host_factory/ubuntu.py | 1 + .../hooks/charmhelpers/fetch/ubuntu.py | 12 +- .../charmhelpers/fetch/ubuntu_apt_pkg.py | 36 ++--- ceph-radosgw/lib/charms_ceph/utils.py | 84 ++++++++---- ceph-radosgw/metadata.yaml | 3 +- ceph-radosgw/osci.yaml | 70 +++++----- ...isite.yaml => jammy-bobcat-multisite.yaml} | 2 +- ...aced.yaml => jammy-bobcat-namespaced.yaml} | 2 +- .../{jammy-zed.yaml => jammy-bobcat.yaml} | 2 +- .../tests/bundles/kinetic-zed-multisite.yaml | 99 -------------- .../tests/bundles/kinetic-zed-namespaced.yaml | 124 ------------------ .../{kinetic-zed.yaml => mantic-bobcat.yaml} | 4 +- ceph-radosgw/tests/tests.yaml | 16 +-- .../unit_tests/test_ceph_radosgw_context.py | 35 ++++- 26 files changed, 220 insertions(+), 344 deletions(-) rename ceph-radosgw/tests/bundles/{jammy-zed-multisite.yaml => jammy-bobcat-multisite.yaml} (97%) rename ceph-radosgw/tests/bundles/{jammy-zed-namespaced.yaml => jammy-bobcat-namespaced.yaml} (98%) rename ceph-radosgw/tests/bundles/{jammy-zed.yaml => jammy-bobcat.yaml} (98%) delete mode 100644 ceph-radosgw/tests/bundles/kinetic-zed-multisite.yaml delete mode 100644 ceph-radosgw/tests/bundles/kinetic-zed-namespaced.yaml rename ceph-radosgw/tests/bundles/{kinetic-zed.yaml => mantic-bobcat.yaml} (97%) diff --git a/ceph-radosgw/charmcraft.yaml b/ceph-radosgw/charmcraft.yaml index ac8ec1e4..d5329498 100644 --- a/ceph-radosgw/charmcraft.yaml +++ b/ceph-radosgw/charmcraft.yaml @@ -34,8 +34,8 @@ bases: channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "22.10" + channel: "23.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "23.04" + channel: "23.10" architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 3c621136..a9a9eed3 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -5,7 +5,7 @@ options: description: RadosGW debug level. Max is 20. source: type: string - default: yoga + default: quincy description: | Optional repository from which to install. May be one of the following: distro (default), ppa:somecustom/ppa, a deb url sources entry, diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py index ffda5fe1..7b309256 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -221,6 +221,13 @@ def https(): return True if config_get('ssl_cert') and config_get('ssl_key'): return True + # Local import to avoid ciruclar dependency. + import charmhelpers.contrib.openstack.cert_utils as cert_utils + if ( + cert_utils.get_certificate_request() and not + cert_utils.get_requests_for_local_unit("certificates") + ): + return False for r_id in relation_ids('certificates'): for unit in relation_list(r_id): ca = relation_get('ca', rid=r_id, unit=unit) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py index 5c961c58..a25ca995 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -409,6 +409,9 @@ def get_requests_for_local_unit(relation_name=None): relation_name = relation_name or 'certificates' bundles = [] for rid in relation_ids(relation_name): + sent = relation_get(rid=rid, unit=local_unit()) + legacy_keys = ['certificate_name', 'common_name'] + is_legacy_request = set(sent).intersection(legacy_keys) for unit in related_units(rid): data = relation_get(rid=rid, unit=unit) if data.get(raw_certs_key): @@ -416,6 +419,14 @@ def get_requests_for_local_unit(relation_name=None): 'ca': data['ca'], 'chain': data.get('chain'), 'certs': json.loads(data[raw_certs_key])}) + elif is_legacy_request: + bundles.append({ + 'ca': data['ca'], + 'chain': data.get('chain'), + 'certs': {sent['common_name']: + {'cert': data.get(local_name + '.server.cert'), + 'key': data.get(local_name + '.server.key')}}}) + return bundles diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index d894b6a6..24a13d0d 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -1748,6 +1748,9 @@ def __init__(self, name=None, script=None, admin_script=None, def __call__(self): total_processes = _calculate_workers() + enable_wsgi_rotation = config('wsgi-rotation') + if enable_wsgi_rotation is None: + enable_wsgi_rotation = True ctxt = { "service_name": self.service_name, "user": self.user, @@ -1761,6 +1764,7 @@ def __call__(self): "public_processes": int(math.ceil(self.public_process_weight * total_processes)), "threads": 1, + "wsgi_rotation": enable_wsgi_rotation, } return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken index dbad506f..aef5edd8 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken @@ -12,6 +12,8 @@ signing_dir = {{ signing_dir }} {% if service_type -%} service_type = {{ service_type }} {% endif -%} +{% if admin_role -%} service_token_roles = {{ admin_role }} service_token_roles_required = True {% endif -%} +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka index 139a0512..31c21b4a 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka @@ -22,6 +22,8 @@ signing_dir = {{ signing_dir }} {% if use_memcache == true %} memcached_servers = {{ memcache_url }} {% endif -%} +{% if admin_role -%} service_token_roles = {{ admin_role }} service_token_roles_required = True {% endif -%} +{% endif -%} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-service-user b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-service-user index c740cc28..ff454086 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-service-user +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-service-user @@ -3,8 +3,8 @@ send_service_user_token = true auth_type = password auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} -project_domain_id = default -user_domain_id = default +project_domain_name = service_domain +user_domain_name = service_domain project_name = {{ admin_tenant_name }} username = {{ admin_user }} password = {{ admin_password }} diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf index 6c4e37e4..2cb735e9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf @@ -12,6 +12,12 @@ Listen {{ admin_port }} Listen {{ public_port }} {% endif -%} +{% if wsgi_rotation -%} +WSGISocketRotation On +{% else -%} +WSGISocketRotation Off +{% endif -%} + {% if port -%} WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf index 6c4e37e4..2cb735e9 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf @@ -12,6 +12,12 @@ Listen {{ admin_port }} Listen {{ public_port }} {% endif -%} +{% if wsgi_rotation -%} +WSGISocketRotation On +{% else -%} +WSGISocketRotation Off +{% endif -%} + {% if port -%} WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index 3d52eb16..e98be2c5 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -160,6 +160,7 @@ ('2022.1', 'yoga'), ('2022.2', 'zed'), ('2023.1', 'antelope'), + ('2023.2', 'bobcat'), ]) # The ugly duckling - must list releases oldest to newest @@ -957,7 +958,7 @@ def os_requires_version(ostack_release, pkg): def wrap(f): @wraps(f) def wrapped_f(*args): - if os_release(pkg) < ostack_release: + if CompareOpenStackReleases(os_release(pkg)) < ostack_release: raise Exception("This hook is not supported on releases" " before %s" % ostack_release) f(*args) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1b20b8fe..2e1fc1b5 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -28,7 +28,6 @@ import shutil import json import time -import uuid from subprocess import ( check_call, @@ -1677,6 +1676,10 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ + # The below hash is the result of running + # `hashlib.sha1('[]'.encode()).hexdigest()` + EMPTY_LIST_SHA = '97d170e1550eee4afc0af065b78cda302a97674c' + def __init__(self, api_version=1, request_id=None, raw_request_data=None): """Initialize CephBrokerRq object. @@ -1685,8 +1688,12 @@ def __init__(self, api_version=1, request_id=None, raw_request_data=None): :param api_version: API version for request (default: 1). :type api_version: Optional[int] - :param request_id: Unique identifier for request. - (default: string representation of generated UUID) + :param request_id: Unique identifier for request. The identifier will + be updated as ops are added or removed from the + broker request. This ensures that Ceph will + correctly process requests where operations are + added after the initial request is processed. + (default: sha1 of operations) :type request_id: Optional[str] :param raw_request_data: JSON-encoded string to build request from. :type raw_request_data: Optional[str] @@ -1695,16 +1702,20 @@ def __init__(self, api_version=1, request_id=None, raw_request_data=None): if raw_request_data: request_data = json.loads(raw_request_data) self.api_version = request_data['api-version'] - self.request_id = request_data['request-id'] self.set_ops(request_data['ops']) + self.request_id = request_data['request-id'] else: self.api_version = api_version if request_id: self.request_id = request_id else: - self.request_id = str(uuid.uuid1()) + self.request_id = CephBrokerRq.EMPTY_LIST_SHA self.ops = [] + def _hash_ops(self): + """Return the sha1 of the requested Broker ops.""" + return hashlib.sha1(json.dumps(self.ops, sort_keys=True).encode()).hexdigest() + def add_op(self, op): """Add an op if it is not already in the list. @@ -1713,6 +1724,7 @@ def add_op(self, op): """ if op not in self.ops: self.ops.append(op) + self.request_id = self._hash_ops() def add_op_request_access_to_group(self, name, namespace=None, permission=None, key_name=None, @@ -1991,6 +2003,7 @@ def set_ops(self, ops): to allow comparisons to ensure validity. """ self.ops = ops + self.request_id = self._hash_ops() @property def request(self): diff --git a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py index a279d5be..732d76c3 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -32,6 +32,7 @@ 'jammy', 'kinetic', 'lunar', + 'mantic', ) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index effc884a..1be992c4 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -238,6 +238,14 @@ 'antelope/proposed': 'jammy-proposed/antelope', 'jammy-antelope/proposed': 'jammy-proposed/antelope', 'jammy-proposed/antelope': 'jammy-proposed/antelope', + # bobcat + 'bobcat': 'jammy-updates/bobcat', + 'jammy-bobcat': 'jammy-updates/bobcat', + 'jammy-bobcat/updates': 'jammy-updates/bobcat', + 'jammy-updates/bobcat': 'jammy-updates/bobcat', + 'bobcat/proposed': 'jammy-proposed/bobcat', + 'jammy-bobcat/proposed': 'jammy-proposed/bobcat', + 'jammy-proposed/bobcat': 'jammy-proposed/bobcat', # OVN 'focal-ovn-22.03': 'focal-updates/ovn-22.03', @@ -270,6 +278,7 @@ 'yoga', 'zed', 'antelope', + 'bobcat', ) @@ -298,6 +307,7 @@ ('jammy', 'yoga'), ('kinetic', 'zed'), ('lunar', 'antelope'), + ('mantic', 'bobcat'), ]) @@ -591,7 +601,7 @@ def _get_key_by_keyid(keyid): curl_cmd = ['curl', keyserver_url.format(keyid)] # use proxy server settings in order to retrieve the key return subprocess.check_output(curl_cmd, - env=env_proxy_settings(['https'])) + env=env_proxy_settings(['https', 'no_proxy'])) def _dearmor_gpg_key(key_asc): diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index 6da355fd..f4dde4a9 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -122,13 +122,12 @@ def dpkg_list(self, packages): :raises: subprocess.CalledProcessError """ pkgs = {} - cmd = ['dpkg-query', '--list'] + cmd = [ + 'dpkg-query', '--show', + '--showformat', + r'${db:Status-Abbrev}\t${Package}\t${Version}\t${Architecture}\t${binary:Summary}\n' + ] cmd.extend(packages) - if locale.getlocale() == (None, None): - # subprocess calls out to locale.getpreferredencoding(False) to - # determine encoding. Workaround for Trusty where the - # environment appears to not be set up correctly. - locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, @@ -140,24 +139,17 @@ def dpkg_list(self, packages): if cp.returncode != 1: raise output = cp.output - headings = [] for line in output.splitlines(): - if line.startswith('||/'): - headings = line.split() - headings.pop(0) - continue - elif (line.startswith('|') or line.startswith('+') or - line.startswith('dpkg-query:')): + # only process lines for successfully installed packages + if not (line.startswith('ii ') or line.startswith('hi ')): continue - else: - data = line.split(None, 4) - status = data.pop(0) - if status not in ('ii', 'hi'): - continue - pkg = {} - pkg.update({k.lower(): v for k, v in zip(headings, data)}) - if 'name' in pkg: - pkgs.update({pkg['name']: pkg}) + status, name, version, arch, desc = line.split('\t', 4) + pkgs[name] = { + 'name': name, + 'version': version, + 'architecture': arch, + 'description': desc, + } return pkgs def _apt_cache_show(self, packages): diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index e6adcb82..01fb9ac9 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -681,24 +681,39 @@ def _get_osd_num_from_dirname(dirname): return match.group('osd_id') +def get_crimson_osd_ids(): + """Return a set of the OSDs that are running with the Crimson backend.""" + rv = set() + try: + out = subprocess.check_output(['pgrep', 'crimson-osd', '-a']) + for line in out.decode('utf8').splitlines(): + rv.add(line.split()[-1]) + except Exception: + pass + + return rv + + def get_local_osd_ids(): """This will list the /var/lib/ceph/osd/* directories and try to split the ID off of the directory name and return it in - a list. + a list. Excludes crimson OSD's from the returned list. :returns: list. A list of OSD identifiers :raises: OSError if something goes wrong with listing the directory. """ osd_ids = [] + crimson_osds = get_crimson_osd_ids() osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') if os.path.exists(osd_path): try: dirs = os.listdir(osd_path) for osd_dir in dirs: - osd_id = osd_dir.split('-')[1] + osd_id = osd_dir.split('-')[1] if '-' in osd_dir else '' if (_is_int(osd_id) and filesystem_mounted(os.path.join( - os.sep, osd_path, osd_dir))): + os.sep, osd_path, osd_dir)) and + osd_id not in crimson_osds): osd_ids.append(osd_id) except OSError: raise @@ -1216,28 +1231,15 @@ def get_named_key(name, caps=None, pool_list=None): :param caps: dict of cephx capabilities :returns: Returns a cephx key """ - key_name = 'client.{}'.format(name) - try: - # Does the key already exist? - output = str(subprocess.check_output( - [ - 'sudo', - '-u', ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', - 'get', - key_name, - ]).decode('UTF-8')).strip() - return parse_key(output) - except subprocess.CalledProcessError: - # Couldn't get the key, time to create it! - log("Creating new key for {}".format(name), level=DEBUG) caps = caps or _default_caps + key_name = 'client.{}'.format(name) + + key = ceph_auth_get(key_name) + if key: + upgrade_key_caps(key_name, caps) + return key + + log("Creating new key for {}".format(name), level=DEBUG) cmd = [ "sudo", "-u", @@ -1259,6 +1261,7 @@ def get_named_key(name, caps=None, pool_list=None): pools = " ".join(['pool={0}'.format(i) for i in pool_list]) subcaps[0] = subcaps[0] + " " + pools cmd.extend([subsystem, '; '.join(subcaps)]) + ceph_auth_get.cache_clear() log("Calling check_output: {}".format(cmd), level=DEBUG) return parse_key(str(subprocess @@ -1267,6 +1270,30 @@ def get_named_key(name, caps=None, pool_list=None): .strip()) # IGNORE:E1103 +@functools.lru_cache() +def ceph_auth_get(key_name): + try: + # Does the key already exist? + output = str(subprocess.check_output( + [ + 'sudo', + '-u', ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', + 'get', + key_name, + ]).decode('UTF-8')).strip() + return parse_key(output) + except subprocess.CalledProcessError: + # Couldn't get the key + pass + + def upgrade_key_caps(key, caps, pool_list=None): """Upgrade key to have capabilities caps""" if not is_leader(): @@ -2067,7 +2094,7 @@ def filesystem_mounted(fs): def get_running_osds(): """Returns a list of the pids of the current running OSD daemons""" - cmd = ['pgrep', 'ceph-osd'] + cmd = ['pgrep', 'ceph-osd|crimson-osd'] try: result = str(subprocess.check_output(cmd).decode('UTF-8')) return result.split() @@ -2518,7 +2545,7 @@ def wait_until(wait_f, timeout=10 * 60): :type timeout: int """ start_time = time.time() - while(not wait_f()): + while not wait_f(): now = time.time() if now > start_time + timeout: raise WatchDog.WatchDogTimeoutException() @@ -3219,6 +3246,9 @@ def dirs_need_ownership_update(service): 'wallaby': 'pacific', 'xena': 'pacific', 'yoga': 'quincy', + 'zed': 'quincy', + 'antelope': 'quincy', + 'bobcat': 'quincy', } @@ -3418,7 +3448,7 @@ def apply_osd_settings(settings): set_cmd = base_cmd + ' set {key} {value}' def _get_cli_key(key): - return(key.replace(' ', '_')) + return key.replace(' ', '_') # Retrieve the current values to check keys are correct and to make this a # noop if setting are already applied. for osd_id in get_local_osd_ids(): diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 9c1574a4..448eeea7 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -15,7 +15,8 @@ tags: series: - focal - jammy -- kinetic +- lunar +- mantic extra-bindings: public: admin: diff --git a/ceph-radosgw/osci.yaml b/ceph-radosgw/osci.yaml index da15f095..9566e5e0 100644 --- a/ceph-radosgw/osci.yaml +++ b/ceph-radosgw/osci.yaml @@ -9,37 +9,37 @@ - vault-focal-yoga-namespaced - focal-yoga-multisite - jammy-yoga-multisite - - jammy-zed-multisite: - voting: false - jammy-antelope-multisite: voting: false - - kinetic-zed-multisite: + - jammy-bobcat-multisite: voting: false - lunar-antelope-multisite: voting: false + - mantic-bobcat-multisite: + voting: false - vault-jammy-yoga_rgw - vault-jammy-yoga-namespaced - - vault-jammy-zed_rgw: - voting: false - - vault-jammy-zed-namespaced: - voting: false - vault-jammy-antelope_rgw: voting: false - vault-jammy-antelope-namespaced: voting: false - - vault-kinetic-zed_rgw: + - vault-jammy-bobcat_rgw: voting: false - - vault-kinetic-zed-namespaced: + - vault-jammy-bobcat-namespaced: voting: false - vault-lunar-antelope_rgw: voting: false - vault-lunar-antelope-namespaced: voting: false + - vault-mantic-bobcat_rgw: + voting: false + - vault-mantic-bobcat-namespaced: + voting: false vars: needs_charm_build: true charm_build_name: ceph-radosgw build_type: charmcraft - charmcraft_channel: 2.1/stable + charmcraft_channel: 2.x/stable - job: name: focal-yoga-multisite parent: func-target @@ -63,33 +63,33 @@ vars: tox_extra_args: '-- jammy-yoga-multisite' - job: - name: jammy-zed-multisite + name: jammy-antelope-multisite parent: func-target dependencies: - jammy-yoga-multisite vars: - tox_extra_args: '-- jammy-zed-multisite' + tox_extra_args: '-- jammy-antelope-multisite' - job: - name: jammy-antelope-multisite + name: jammy-bobcat-multisite parent: func-target dependencies: - jammy-yoga-multisite vars: - tox_extra_args: '-- jammy-antelope-multisite' + tox_extra_args: '-- jammy-bobcat-multisite' - job: - name: kinetic-zed-multisite + name: lunar-antelope-multisite parent: func-target dependencies: - jammy-yoga-multisite vars: - tox_extra_args: '-- kinetic-zed-multisite' + tox_extra_args: '-- lunar-antelope-multisite' - job: - name: lunar-antelope-multisite + name: mantic-bobcat-multisite parent: func-target dependencies: - jammy-yoga-multisite vars: - tox_extra_args: '-- lunar-antelope-multisite' + tox_extra_args: '-- mantic-bobcat-multisite' - job: name: vault-focal-yoga_rgw parent: func-target @@ -118,13 +118,6 @@ - jammy-yoga-multisite vars: tox_extra_args: '-- vault:jammy-yoga-namespaced' -- job: - name: vault-jammy-zed-namespaced - parent: func-target - dependencies: - - jammy-yoga-multisite - vars: - tox_extra_args: '-- vault:jammy-zed-namespaced' - job: name: vault-jammy-antelope-namespaced parent: func-target @@ -133,45 +126,44 @@ vars: tox_extra_args: '-- vault:jammy-antelope-namespaced' - job: - name: vault-jammy-zed_rgw + name: vault-jammy-bobcat-namespaced parent: func-target dependencies: - - vault-jammy-yoga_rgw - - vault-jammy-yoga-namespaced + - jammy-yoga-multisite vars: - tox_extra_args: '-- vault:jammy-zed' + tox_extra_args: '-- vault:jammy-bobcat-namespaced' - job: - name: vault-jammy-zed-namespaced + name: vault-jammy-antelope_rgw parent: func-target dependencies: - vault-jammy-yoga_rgw - vault-jammy-yoga-namespaced vars: - tox_extra_args: '-- vault:jammy-zed-namespaced' + tox_extra_args: '-- vault:jammy-antelope' - job: - name: vault-kinetic-zed_rgw + name: vault-jammy-bobcat_rgw parent: func-target dependencies: - vault-jammy-yoga_rgw - vault-jammy-yoga-namespaced vars: - tox_extra_args: '-- vault:kinetic-zed' + tox_extra_args: '-- vault:jammy-bobcat' - job: - name: vault-kinetic-zed-namespaced + name: vault-lunar-antelope-namespaced parent: func-target dependencies: - vault-jammy-yoga_rgw - vault-jammy-yoga-namespaced vars: - tox_extra_args: '-- vault:kinetic-zed-namespaced' + tox_extra_args: '-- vault:lunar-antelope-namespaced' - job: - name: vault-jammy-antelope_rgw + name: vault-mantic-bobcat-namespaced parent: func-target dependencies: - vault-jammy-yoga_rgw - vault-jammy-yoga-namespaced vars: - tox_extra_args: '-- vault:jammy-antelope' + tox_extra_args: '-- vault:mantic-bobcat-namespaced' - job: name: vault-lunar-antelope_rgw parent: func-target @@ -181,10 +173,10 @@ vars: tox_extra_args: '-- vault:lunar-antelope' - job: - name: vault-lunar-antelope-namespaced + name: vault-mantic-bobcat_rgw parent: func-target dependencies: - vault-jammy-yoga_rgw - vault-jammy-yoga-namespaced vars: - tox_extra_args: '-- vault:lunar-antelope-namespaced' + tox_extra_args: '-- vault:mantic-bobcat' diff --git a/ceph-radosgw/tests/bundles/jammy-zed-multisite.yaml b/ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml similarity index 97% rename from ceph-radosgw/tests/bundles/jammy-zed-multisite.yaml rename to ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml index 602e11f2..48f1b9c4 100644 --- a/ceph-radosgw/tests/bundles/jammy-zed-multisite.yaml +++ b/ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml @@ -1,5 +1,5 @@ options: - source: &source cloud:jammy-zed + source: &source cloud:jammy-bobcat series: jammy diff --git a/ceph-radosgw/tests/bundles/jammy-zed-namespaced.yaml b/ceph-radosgw/tests/bundles/jammy-bobcat-namespaced.yaml similarity index 98% rename from ceph-radosgw/tests/bundles/jammy-zed-namespaced.yaml rename to ceph-radosgw/tests/bundles/jammy-bobcat-namespaced.yaml index 946b826b..0ebcf4b3 100644 --- a/ceph-radosgw/tests/bundles/jammy-zed-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/jammy-bobcat-namespaced.yaml @@ -1,5 +1,5 @@ options: - source: &source cloud:jammy-zed + source: &source cloud:jammy-bobcat series: jammy diff --git a/ceph-radosgw/tests/bundles/jammy-zed.yaml b/ceph-radosgw/tests/bundles/jammy-bobcat.yaml similarity index 98% rename from ceph-radosgw/tests/bundles/jammy-zed.yaml rename to ceph-radosgw/tests/bundles/jammy-bobcat.yaml index bb97a58b..bcef92af 100644 --- a/ceph-radosgw/tests/bundles/jammy-zed.yaml +++ b/ceph-radosgw/tests/bundles/jammy-bobcat.yaml @@ -1,5 +1,5 @@ options: - source: &source cloud:jammy-zed + source: &source cloud:jammy-bobcat series: jammy diff --git a/ceph-radosgw/tests/bundles/kinetic-zed-multisite.yaml b/ceph-radosgw/tests/bundles/kinetic-zed-multisite.yaml deleted file mode 100644 index ea03d4c6..00000000 --- a/ceph-radosgw/tests/bundles/kinetic-zed-multisite.yaml +++ /dev/null @@ -1,99 +0,0 @@ -options: - source: &source distro - -series: kinetic - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - '9': - -applications: - ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - to: - - '0' - - secondary-ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - to: - - '1' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '2' - - '6' - - '7' - channel: latest/edge - - secondary-ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '3' - - '8' - - '9' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 1 - options: - monitor-count: 1 - source: *source - to: - - '4' - channel: latest/edge - - secondary-ceph-mon: - charm: ch:ceph-mon - num_units: 1 - options: - monitor-count: 1 - source: *source - to: - - '5' - channel: latest/edge - -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'secondary-ceph-osd:mon' - - 'secondary-ceph-mon:osd' - - - - 'secondary-ceph-radosgw:mon' - - 'secondary-ceph-mon:radosgw' - diff --git a/ceph-radosgw/tests/bundles/kinetic-zed-namespaced.yaml b/ceph-radosgw/tests/bundles/kinetic-zed-namespaced.yaml deleted file mode 100644 index 862e0e18..00000000 --- a/ceph-radosgw/tests/bundles/kinetic-zed-namespaced.yaml +++ /dev/null @@ -1,124 +0,0 @@ -options: - source: &source distro - -series: kinetic - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - namespace-tenants: True - to: - - '3' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - channel: latest/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - channel: latest/edge - - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - vault: - charm: ch:vault - num_units: 1 - to: - - '11' - channel: latest/edge - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/kinetic-zed.yaml b/ceph-radosgw/tests/bundles/mantic-bobcat.yaml similarity index 97% rename from ceph-radosgw/tests/bundles/kinetic-zed.yaml rename to ceph-radosgw/tests/bundles/mantic-bobcat.yaml index 8431a762..9e0b78e1 100644 --- a/ceph-radosgw/tests/bundles/kinetic-zed.yaml +++ b/ceph-radosgw/tests/bundles/mantic-bobcat.yaml @@ -1,7 +1,7 @@ options: - source: &source distro + source: &source cloud:mantic-bobcat -series: kinetic +series: mantic comment: - 'machines section to decide order of deployment. database sooner = faster' diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 03bf199c..c92410b9 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -14,20 +14,20 @@ smoke_bundles: dev_bundles: - jammy-yoga-multisite - - jammy-zed-multisite - lunar-antelope-multisite - - kinetic-zed-multisite + - mantic-bobcat-multisite - jammy-antelope-multisite + - jammy-bobcat-multisite - vault: jammy-yoga - vault: jammy-yoga-namespaced - - vault: jammy-zed - vault: lunar-antelope - - vault: jammy-zed-namespaced + - vault: mantic-bobcat - vault: lunar-antelope-namespaced - - vault: kinetic-zed + - vault: mantic-bobcat-namespaced - vault: jammy-antelope - - vault: kinetic-zed-namespaced + - vault: jammy-bobcat - vault: jammy-antelope-namespaced + - vault: jammy-bobcat-namespaced target_deploy_status: vault: @@ -48,7 +48,7 @@ tests: tests_options: force_deploy: - - kinetic-zed - jammy-antelope - - kinetic-zed-namespaced + - jammy-bobcat - jammy-antelope-namespaced + - jammy-bobcat-namespaced diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index f3f9553a..b259a7f6 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -46,6 +46,7 @@ def setUp(self): self.cmp_pkgrevno.return_value = 1 self.arch.return_value = 'amd64' + @patch('ceph_radosgw_context.https') @patch('charmhelpers.contrib.openstack.context.get_relation_ip') @patch('charmhelpers.contrib.openstack.context.mkdir') @patch('charmhelpers.contrib.openstack.context.local_unit') @@ -54,7 +55,9 @@ def setUp(self): @patch('charmhelpers.contrib.openstack.context.relation_ids') @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') def test_ctxt(self, _harelation_ids, _ctxtrelation_ids, _haconfig, - _ctxtconfig, _local_unit, _mkdir, _get_relation_ip): + _ctxtconfig, _local_unit, _mkdir, _get_relation_ip, + _mock_https): + _mock_https.return_value = False _get_relation_ip.return_value = '10.0.0.10' _ctxtconfig.side_effect = self.test_config.get _haconfig.side_effect = self.test_config.get @@ -96,14 +99,17 @@ def plain_list_stub(key): else: return [] + @patch('ceph_radosgw_context.https') @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') @patch('charmhelpers.contrib.hahelpers.cluster.config_get') @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') @patch.object(context, 'ensure_host_resolvable_v6') def test_ctxt( - self, mock_ensure_rsv_v6, mock_config_get, mock_relation_ids + self, mock_ensure_rsv_v6, mock_config_get, mock_relation_ids, + mock_https, ): + mock_https.return_value = False mock_relation_ids.return_value = [] mock_config_get.side_effect = self.test_config.get self.socket.gethostname.return_value = 'testhost' @@ -212,14 +218,17 @@ def _relation_get(attr, unit, rid): self.assertEqual(expect, mon_ctxt()) self.assertTrue(mock_ensure_rsv_v6.called) + @patch('ceph_radosgw_context.https') @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') @patch('charmhelpers.contrib.hahelpers.cluster.config_get') @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') @patch.object(context, 'ensure_host_resolvable_v6') def test_list_of_addresses_from_ceph_proxy( - self, mock_ensure_rsv_v6, mock_config_get, mock_relation_ids + self, mock_ensure_rsv_v6, mock_config_get, mock_relation_ids, + mock_https, ): + mock_https.return_value = False mock_relation_ids.return_value = [] mock_config_get.side_effect = self.test_config.get self.socket.gethostname.return_value = 'testhost' @@ -273,11 +282,14 @@ def _relation_get(attr, unit, rid): self.assertEqual(expect, mon_ctxt()) self.assertTrue(mock_ensure_rsv_v6.called) + @patch('ceph_radosgw_context.https') @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') @patch('charmhelpers.contrib.hahelpers.cluster.config_get') @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') - def test_ctxt_missing_data(self, mock_config_get, mock_relation_ids): + def test_ctxt_missing_data(self, mock_config_get, mock_relation_ids, + mock_https): + mock_https.return_value = False mock_relation_ids.return_value = [] mock_config_get.side_effect = self.test_config.get self.socket.gethostname.return_value = 'testhost' @@ -287,11 +299,14 @@ def test_ctxt_missing_data(self, mock_config_get, mock_relation_ids): self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] self.assertEqual({}, mon_ctxt()) + @patch('ceph_radosgw_context.https') @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') @patch('charmhelpers.contrib.hahelpers.cluster.config_get') @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') - def test_ctxt_inconsistent_auths(self, mock_config_get, mock_relation_ids): + def test_ctxt_inconsistent_auths(self, mock_config_get, mock_relation_ids, + mock_https): + mock_https.return_value = False mock_relation_ids.return_value = [] mock_config_get.side_effect = self.test_config.get self.socket.gethostname.return_value = 'testhost' @@ -337,11 +352,14 @@ def _relation_get(attr, unit, rid): } self.assertEqual(expect, mon_ctxt()) + @patch('ceph_radosgw_context.https') @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') @patch('charmhelpers.contrib.hahelpers.cluster.config_get') @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') - def test_ctxt_consistent_auths(self, mock_config_get, mock_relation_ids): + def test_ctxt_consistent_auths(self, mock_config_get, mock_relation_ids, + mock_https): + mock_https.return_value = False mock_relation_ids.return_value = [] mock_config_get.side_effect = self.test_config.get self.socket.gethostname.return_value = 'testhost' @@ -445,11 +463,14 @@ def _compare_version(package, version): _test_version = '16.2.0' context.validate_http_frontend('beast') + @patch('ceph_radosgw_context.https') @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') @patch('charmhelpers.contrib.hahelpers.cluster.config_get') @patch.object(ceph, 'config', lambda *args: '{"client.radosgw.gateway": {"rgw init timeout": 60}}') - def test_ctxt_inconsistent_fsids(self, mock_config_get, mock_relation_ids): + def test_ctxt_inconsistent_fsids(self, mock_config_get, mock_relation_ids, + mock_https): + mock_https.return_value = False mock_relation_ids.return_value = [] mock_config_get.side_effect = self.test_config.get self.socket.gethostname.return_value = 'testhost' From 628913babaf34b0f942f34b22e3647c3d0fc75cc Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 18 Jul 2023 16:47:18 -0400 Subject: [PATCH 2528/2699] Add 2023.2 Bobcat support * sync charm-helpers to classic charms * change openstack-origin/source default to quincy * add mantic to metadata series * align testing with bobcat * add new bobcat bundles * add bobcat bundles to tests.yaml * add bobcat tests to osci.yaml * update build-on and run-on bases * drop kinetic Change-Id: Ifc896089989c0658ea751689e2c9689b0a382527 --- ceph-dashboard/charmcraft.yaml | 4 ++-- ceph-dashboard/metadata.yaml | 2 +- ceph-dashboard/osci.yaml | 1 - .../tests/bundles/jammy-antelope.yaml | 6 ++---- .../{jammy-zed.yaml => jammy-bobcat.yaml} | 16 +++++++------- .../tests/bundles/lunar-antelope.yaml | 13 ++---------- .../{kinetic-zed.yaml => mantic-bobcat.yaml} | 21 ++++++------------- ceph-dashboard/tests/tests.yaml | 5 +++++ 8 files changed, 25 insertions(+), 43 deletions(-) rename ceph-dashboard/tests/bundles/{jammy-zed.yaml => jammy-bobcat.yaml} (91%) rename ceph-dashboard/tests/bundles/{kinetic-zed.yaml => mantic-bobcat.yaml} (90%) diff --git a/ceph-dashboard/charmcraft.yaml b/ceph-dashboard/charmcraft.yaml index 75c5e371..0b55422f 100644 --- a/ceph-dashboard/charmcraft.yaml +++ b/ceph-dashboard/charmcraft.yaml @@ -33,8 +33,8 @@ bases: channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "22.10" + channel: "23.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "23.04" + channel: "23.10" architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-dashboard/metadata.yaml b/ceph-dashboard/metadata.yaml index 25311625..0bb9485f 100644 --- a/ceph-dashboard/metadata.yaml +++ b/ceph-dashboard/metadata.yaml @@ -16,8 +16,8 @@ subordinate: true series: - focal - jammy -- kinetic - lunar +- mantic requires: dashboard: interface: ceph-dashboard diff --git a/ceph-dashboard/osci.yaml b/ceph-dashboard/osci.yaml index 9fdcb42b..df2d7b99 100644 --- a/ceph-dashboard/osci.yaml +++ b/ceph-dashboard/osci.yaml @@ -3,7 +3,6 @@ - charm-unit-jobs-py38 - charm-unit-jobs-py310 - charm-yoga-functional-jobs - - charm-zed-functional-jobs - charm-functional-jobs vars: needs_charm_build: true diff --git a/ceph-dashboard/tests/bundles/jammy-antelope.yaml b/ceph-dashboard/tests/bundles/jammy-antelope.yaml index dfb6f2e8..b8def797 100644 --- a/ceph-dashboard/tests/bundles/jammy-antelope.yaml +++ b/ceph-dashboard/tests/bundles/jammy-antelope.yaml @@ -10,14 +10,12 @@ applications: osd-devices: 'cinder,10G,2' options: osd-devices: '/dev/test-non-existent' - source: *openstack-origin channel: quincy/edge ceph-mon: charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' - source: *openstack-origin channel: quincy/edge vault: num_units: 1 @@ -27,10 +25,10 @@ applications: charm: ch:mysql-innodb-cluster constraints: mem=3072M num_units: 3 - channel: 8.0/edge + channel: latest/edge vault-mysql-router: charm: ch:mysql-router - channel: 8.0/edge + channel: latest/edge ceph-dashboard: charm: ../../ceph-dashboard.charm options: diff --git a/ceph-dashboard/tests/bundles/jammy-zed.yaml b/ceph-dashboard/tests/bundles/jammy-bobcat.yaml similarity index 91% rename from ceph-dashboard/tests/bundles/jammy-zed.yaml rename to ceph-dashboard/tests/bundles/jammy-bobcat.yaml index 90ba7203..42462d15 100644 --- a/ceph-dashboard/tests/bundles/jammy-zed.yaml +++ b/ceph-dashboard/tests/bundles/jammy-bobcat.yaml @@ -1,7 +1,7 @@ local_overlay_enabled: False series: jammy variables: - openstack-origin: &openstack-origin cloud:jammy-zed + openstack-origin: &openstack-origin cloud:jammy-bobcat applications: ceph-osd: charm: ch:ceph-osd @@ -10,15 +10,13 @@ applications: osd-devices: 'cinder,10G,2' options: osd-devices: '/dev/test-non-existent' - source: *openstack-origin - channel: quincy/edge + channel: latest/edge ceph-mon: charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' - source: *openstack-origin - channel: quincy/edge + channel: latest/edge vault: num_units: 1 charm: ch:vault @@ -30,7 +28,7 @@ applications: channel: latest/edge vault-mysql-router: charm: ch:mysql-router - channel: 8.0/edge + channel: latest/edge ceph-dashboard: charm: ../../ceph-dashboard.charm options: @@ -58,17 +56,17 @@ applications: ceph-radosgw: charm: ch:ceph-radosgw num_units: 3 - channel: quincy/edge + channel: latest/edge ceph-fs: charm: ch:ceph-fs num_units: 1 - channel: quincy/edge + channel: latest/edge ceph-iscsi: charm: ch:ceph-iscsi num_units: 2 options: gateway-metadata-pool: iscsi-foo-metadata - channel: quincy/edge + channel: latest/edge relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' diff --git a/ceph-dashboard/tests/bundles/lunar-antelope.yaml b/ceph-dashboard/tests/bundles/lunar-antelope.yaml index e2a1ba28..fcfa66d3 100644 --- a/ceph-dashboard/tests/bundles/lunar-antelope.yaml +++ b/ceph-dashboard/tests/bundles/lunar-antelope.yaml @@ -10,7 +10,6 @@ applications: storage: osd-devices: 'cinder,10G,2' options: - source: *source osd-devices: '/dev/test-non-existent' channel: quincy/edge ceph-mon: @@ -18,7 +17,6 @@ applications: series: lunar num_units: 3 options: - source: *source monitor-count: '3' channel: quincy/edge vault: @@ -29,12 +27,10 @@ applications: charm: ch:mysql-innodb-cluster constraints: mem=3072M num_units: 3 - options: - source: *source - channel: 8.0/edge + channel: latest/edge vault-mysql-router: charm: ch:mysql-router - channel: 8.0/edge + channel: latest/edge ceph-dashboard: charm: ../../ceph-dashboard.charm options: @@ -61,22 +57,17 @@ applications: charm: ch:ceph-radosgw series: lunar num_units: 3 - options: - source: *source channel: quincy/edge ceph-fs: charm: ch:ceph-fs series: lunar num_units: 1 - options: - source: *source channel: quincy/edge ceph-iscsi: charm: ch:ceph-iscsi series: lunar num_units: 2 options: - source: *source gateway-metadata-pool: iscsi-foo-metadata channel: quincy/edge relations: diff --git a/ceph-dashboard/tests/bundles/kinetic-zed.yaml b/ceph-dashboard/tests/bundles/mantic-bobcat.yaml similarity index 90% rename from ceph-dashboard/tests/bundles/kinetic-zed.yaml rename to ceph-dashboard/tests/bundles/mantic-bobcat.yaml index a7df8a7e..2f4d4f97 100644 --- a/ceph-dashboard/tests/bundles/kinetic-zed.yaml +++ b/ceph-dashboard/tests/bundles/mantic-bobcat.yaml @@ -1,24 +1,22 @@ local_overlay_enabled: False -series: kinetic +series: mantic variables: source: &source distro applications: ceph-osd: charm: ch:ceph-osd - series: kinetic + series: mantic num_units: 6 storage: osd-devices: 'cinder,10G,2' options: - source: *source osd-devices: '/dev/test-non-existent' channel: latest/edge ceph-mon: charm: ch:ceph-mon - series: kinetic + series: mantic num_units: 3 options: - source: *source monitor-count: '3' channel: latest/edge vault: @@ -29,8 +27,6 @@ applications: charm: ch:mysql-innodb-cluster constraints: mem=3072M num_units: 3 - options: - source: *source channel: latest/edge vault-mysql-router: charm: ch:mysql-router @@ -59,24 +55,19 @@ applications: series: focal ceph-radosgw: charm: ch:ceph-radosgw - series: kinetic + series: mantic num_units: 3 - options: - source: *source channel: latest/edge ceph-fs: charm: ch:ceph-fs - series: kinetic + series: mantic num_units: 1 - options: - source: *source channel: latest/edge ceph-iscsi: charm: ch:ceph-iscsi - series: kinetic + series: mantic num_units: 2 options: - source: *source gateway-metadata-pool: iscsi-foo-metadata channel: latest/edge relations: diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml index f19bf66a..11721de7 100644 --- a/ceph-dashboard/tests/tests.yaml +++ b/ceph-dashboard/tests/tests.yaml @@ -2,6 +2,8 @@ charm_name: ceph-dasboard gate_bundles: - focal - jammy + - jammy-bobcat + - mantic-bobcat smoke_bundles: - focal configure: @@ -27,3 +29,6 @@ target_deploy_status: telegraf: workload-status: active workload-status-message-prefix: Monitoring +tests_options: + force_deploy: + - mantic-bobcat From 051c4afa47a041df09b2ccde3f0783d20f708475 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 18 Jul 2023 16:47:18 -0400 Subject: [PATCH 2529/2699] Add 2023.2 Bobcat support * sync charm-helpers to classic charms * change openstack-origin/source default to quincy * add mantic to metadata series * align testing with bobcat * add new bobcat bundles * add bobcat bundles to tests.yaml * add bobcat tests to osci.yaml * update build-on and run-on bases * drop kinetic * update charmcraft_channel to 2.x/stable Change-Id: If59e6c4db7688c0819da2b3feb0c7bda89de6780 --- ceph-proxy/charmcraft.yaml | 4 +- .../charmhelpers/contrib/openstack/utils.py | 1 + .../charmhelpers/core/host_factory/ubuntu.py | 1 + ceph-proxy/charmhelpers/fetch/ubuntu.py | 10 + ceph-proxy/config.yaml | 2 +- ceph-proxy/lib/charms_ceph/utils.py | 84 ++++--- ceph-proxy/metadata.yaml | 2 +- ceph-proxy/osci.yaml | 15 +- .../{jammy-zed.yaml => jammy-bobcat.yaml} | 2 +- ceph-proxy/tests/bundles/jammy-zed-ec.yaml | 224 ------------------ ...etic-zed-ec.yaml => mantic-bobcat-ec.yaml} | 2 +- .../{kinetic-zed.yaml => mantic-bobcat.yaml} | 2 +- ceph-proxy/tests/tests.yaml | 12 +- 13 files changed, 89 insertions(+), 272 deletions(-) rename ceph-proxy/tests/bundles/{jammy-zed.yaml => jammy-bobcat.yaml} (98%) delete mode 100644 ceph-proxy/tests/bundles/jammy-zed-ec.yaml rename ceph-proxy/tests/bundles/{kinetic-zed-ec.yaml => mantic-bobcat-ec.yaml} (99%) rename ceph-proxy/tests/bundles/{kinetic-zed.yaml => mantic-bobcat.yaml} (99%) diff --git a/ceph-proxy/charmcraft.yaml b/ceph-proxy/charmcraft.yaml index 37cacbce..62a8f5b8 100644 --- a/ceph-proxy/charmcraft.yaml +++ b/ceph-proxy/charmcraft.yaml @@ -32,8 +32,8 @@ bases: channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "22.10" + channel: "23.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "23.04" + channel: "23.10" architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index 83b6884b..e98be2c5 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -160,6 +160,7 @@ ('2022.1', 'yoga'), ('2022.2', 'zed'), ('2023.1', 'antelope'), + ('2023.2', 'bobcat'), ]) # The ugly duckling - must list releases oldest to newest diff --git a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py index a279d5be..732d76c3 100644 --- a/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py +++ b/ceph-proxy/charmhelpers/core/host_factory/ubuntu.py @@ -32,6 +32,7 @@ 'jammy', 'kinetic', 'lunar', + 'mantic', ) diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py index 1bad0db8..1be992c4 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -238,6 +238,14 @@ 'antelope/proposed': 'jammy-proposed/antelope', 'jammy-antelope/proposed': 'jammy-proposed/antelope', 'jammy-proposed/antelope': 'jammy-proposed/antelope', + # bobcat + 'bobcat': 'jammy-updates/bobcat', + 'jammy-bobcat': 'jammy-updates/bobcat', + 'jammy-bobcat/updates': 'jammy-updates/bobcat', + 'jammy-updates/bobcat': 'jammy-updates/bobcat', + 'bobcat/proposed': 'jammy-proposed/bobcat', + 'jammy-bobcat/proposed': 'jammy-proposed/bobcat', + 'jammy-proposed/bobcat': 'jammy-proposed/bobcat', # OVN 'focal-ovn-22.03': 'focal-updates/ovn-22.03', @@ -270,6 +278,7 @@ 'yoga', 'zed', 'antelope', + 'bobcat', ) @@ -298,6 +307,7 @@ ('jammy', 'yoga'), ('kinetic', 'zed'), ('lunar', 'antelope'), + ('mantic', 'bobcat'), ]) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 80f8564c..720fb347 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -10,7 +10,7 @@ options: Setting this to True will allow supporting services to log to syslog. source: type: string - default: antelope + default: quincy description: | Repository from which to install. May be one of the following: distro (default), ppa:somecustom/ppa, a deb url sources entry, diff --git a/ceph-proxy/lib/charms_ceph/utils.py b/ceph-proxy/lib/charms_ceph/utils.py index e6adcb82..01fb9ac9 100644 --- a/ceph-proxy/lib/charms_ceph/utils.py +++ b/ceph-proxy/lib/charms_ceph/utils.py @@ -681,24 +681,39 @@ def _get_osd_num_from_dirname(dirname): return match.group('osd_id') +def get_crimson_osd_ids(): + """Return a set of the OSDs that are running with the Crimson backend.""" + rv = set() + try: + out = subprocess.check_output(['pgrep', 'crimson-osd', '-a']) + for line in out.decode('utf8').splitlines(): + rv.add(line.split()[-1]) + except Exception: + pass + + return rv + + def get_local_osd_ids(): """This will list the /var/lib/ceph/osd/* directories and try to split the ID off of the directory name and return it in - a list. + a list. Excludes crimson OSD's from the returned list. :returns: list. A list of OSD identifiers :raises: OSError if something goes wrong with listing the directory. """ osd_ids = [] + crimson_osds = get_crimson_osd_ids() osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') if os.path.exists(osd_path): try: dirs = os.listdir(osd_path) for osd_dir in dirs: - osd_id = osd_dir.split('-')[1] + osd_id = osd_dir.split('-')[1] if '-' in osd_dir else '' if (_is_int(osd_id) and filesystem_mounted(os.path.join( - os.sep, osd_path, osd_dir))): + os.sep, osd_path, osd_dir)) and + osd_id not in crimson_osds): osd_ids.append(osd_id) except OSError: raise @@ -1216,28 +1231,15 @@ def get_named_key(name, caps=None, pool_list=None): :param caps: dict of cephx capabilities :returns: Returns a cephx key """ - key_name = 'client.{}'.format(name) - try: - # Does the key already exist? - output = str(subprocess.check_output( - [ - 'sudo', - '-u', ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', - 'get', - key_name, - ]).decode('UTF-8')).strip() - return parse_key(output) - except subprocess.CalledProcessError: - # Couldn't get the key, time to create it! - log("Creating new key for {}".format(name), level=DEBUG) caps = caps or _default_caps + key_name = 'client.{}'.format(name) + + key = ceph_auth_get(key_name) + if key: + upgrade_key_caps(key_name, caps) + return key + + log("Creating new key for {}".format(name), level=DEBUG) cmd = [ "sudo", "-u", @@ -1259,6 +1261,7 @@ def get_named_key(name, caps=None, pool_list=None): pools = " ".join(['pool={0}'.format(i) for i in pool_list]) subcaps[0] = subcaps[0] + " " + pools cmd.extend([subsystem, '; '.join(subcaps)]) + ceph_auth_get.cache_clear() log("Calling check_output: {}".format(cmd), level=DEBUG) return parse_key(str(subprocess @@ -1267,6 +1270,30 @@ def get_named_key(name, caps=None, pool_list=None): .strip()) # IGNORE:E1103 +@functools.lru_cache() +def ceph_auth_get(key_name): + try: + # Does the key already exist? + output = str(subprocess.check_output( + [ + 'sudo', + '-u', ceph_user(), + 'ceph', + '--name', 'mon.', + '--keyring', + '/var/lib/ceph/mon/ceph-{}/keyring'.format( + socket.gethostname() + ), + 'auth', + 'get', + key_name, + ]).decode('UTF-8')).strip() + return parse_key(output) + except subprocess.CalledProcessError: + # Couldn't get the key + pass + + def upgrade_key_caps(key, caps, pool_list=None): """Upgrade key to have capabilities caps""" if not is_leader(): @@ -2067,7 +2094,7 @@ def filesystem_mounted(fs): def get_running_osds(): """Returns a list of the pids of the current running OSD daemons""" - cmd = ['pgrep', 'ceph-osd'] + cmd = ['pgrep', 'ceph-osd|crimson-osd'] try: result = str(subprocess.check_output(cmd).decode('UTF-8')) return result.split() @@ -2518,7 +2545,7 @@ def wait_until(wait_f, timeout=10 * 60): :type timeout: int """ start_time = time.time() - while(not wait_f()): + while not wait_f(): now = time.time() if now > start_time + timeout: raise WatchDog.WatchDogTimeoutException() @@ -3219,6 +3246,9 @@ def dirs_need_ownership_update(service): 'wallaby': 'pacific', 'xena': 'pacific', 'yoga': 'quincy', + 'zed': 'quincy', + 'antelope': 'quincy', + 'bobcat': 'quincy', } @@ -3418,7 +3448,7 @@ def apply_osd_settings(settings): set_cmd = base_cmd + ' set {key} {value}' def _get_cli_key(key): - return(key.replace(' ', '_')) + return key.replace(' ', '_') # Retrieve the current values to check keys are correct and to make this a # noop if setting are already applied. for osd_id in get_local_osd_ids(): diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 24f04fcb..c2e2d9a1 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -11,8 +11,8 @@ tags: - misc series: - jammy -- kinetic - lunar +- mantic extra-bindings: public: cluster: diff --git a/ceph-proxy/osci.yaml b/ceph-proxy/osci.yaml index e5d780a5..3478aab2 100644 --- a/ceph-proxy/osci.yaml +++ b/ceph-proxy/osci.yaml @@ -3,20 +3,19 @@ - charm-unit-jobs-py38 - charm-unit-jobs-py39 - charm-unit-jobs-py310 - - charm-zed-functional-jobs - charm-functional-jobs check: jobs: - jammy-yoga-ec - - kinetic-zed-ec: - voting: false - lunar-antelope-ec: voting: false + - mantic-bobcat-ec: + voting: false vars: needs_charm_build: true charm_build_name: ceph-proxy build_type: charmcraft - charmcraft_channel: 2.0/stable + charmcraft_channel: 2.x/stable - job: name: jammy-yoga-ec parent: func-target @@ -28,16 +27,16 @@ vars: tox_extra_args: '-- erasure-coded:jammy-yoga-ec' - job: - name: kinetic-zed-ec + name: lunar-antelope-ec parent: func-target dependencies: - jammy-yoga-ec vars: - tox_extra_args: -- erasure-coded:kinetic-zed-ec + tox_extra_args: -- erasure-coded:lunar-antelope-ec - job: - name: lunar-antelope-ec + name: mantic-bobcat-ec parent: func-target dependencies: - jammy-yoga-ec vars: - tox_extra_args: -- erasure-coded:lunar-antelope-ec + tox_extra_args: -- erasure-coded:mantic-bobcat-ec diff --git a/ceph-proxy/tests/bundles/jammy-zed.yaml b/ceph-proxy/tests/bundles/jammy-bobcat.yaml similarity index 98% rename from ceph-proxy/tests/bundles/jammy-zed.yaml rename to ceph-proxy/tests/bundles/jammy-bobcat.yaml index 80969e47..b431f527 100644 --- a/ceph-proxy/tests/bundles/jammy-zed.yaml +++ b/ceph-proxy/tests/bundles/jammy-bobcat.yaml @@ -1,5 +1,5 @@ variables: - openstack-origin: &openstack-origin cloud:jammy-zed + openstack-origin: &openstack-origin cloud:jammy-bobcat series: jammy diff --git a/ceph-proxy/tests/bundles/jammy-zed-ec.yaml b/ceph-proxy/tests/bundles/jammy-zed-ec.yaml deleted file mode 100644 index d449fdef..00000000 --- a/ceph-proxy/tests/bundles/jammy-zed-ec.yaml +++ /dev/null @@ -1,224 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:jammy-zed - -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': - -applications: - - cinder-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - channel: latest/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - '16' - - '17' - - '18' - channel: latest/edge - - ceph-proxy: - charm: ../../ceph-proxy.charm - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: ch:ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '10' - channel: latest/edge - - cinder: - charm: ch:cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - channel: latest/edge - - cinder-ceph: - charm: ch:cinder-ceph - options: - restrict-ceph-pools: True - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: lrc - ec-profile-locality: 3 - channel: latest/edge - - keystone: - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - channel: latest/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - constraints: mem=1024 - to: - - '13' - channel: latest/edge - - glance: - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: jerasure - to: - - '14' - channel: latest/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: isa - libvirt-image-backend: rbd - to: - - '15' - channel: latest/edge - - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:ceph' - - 'ceph-proxy:client' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:ceph' - - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/kinetic-zed-ec.yaml b/ceph-proxy/tests/bundles/mantic-bobcat-ec.yaml similarity index 99% rename from ceph-proxy/tests/bundles/kinetic-zed-ec.yaml rename to ceph-proxy/tests/bundles/mantic-bobcat-ec.yaml index 42b0d69a..a70a2ad7 100644 --- a/ceph-proxy/tests/bundles/kinetic-zed-ec.yaml +++ b/ceph-proxy/tests/bundles/mantic-bobcat-ec.yaml @@ -1,7 +1,7 @@ variables: openstack-origin: &openstack-origin distro -series: kinetic +series: mantic comment: - 'machines section to decide order of deployment. database sooner = faster' diff --git a/ceph-proxy/tests/bundles/kinetic-zed.yaml b/ceph-proxy/tests/bundles/mantic-bobcat.yaml similarity index 99% rename from ceph-proxy/tests/bundles/kinetic-zed.yaml rename to ceph-proxy/tests/bundles/mantic-bobcat.yaml index 376648af..a07c9bfc 100644 --- a/ceph-proxy/tests/bundles/kinetic-zed.yaml +++ b/ceph-proxy/tests/bundles/mantic-bobcat.yaml @@ -1,7 +1,7 @@ variables: openstack-origin: &openstack-origin distro -series: kinetic +series: mantic comment: - 'machines section to decide order of deployment. database sooner = faster' diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index bfa452d3..67f4a342 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -19,14 +19,14 @@ gate_bundles: dev_bundles: - jammy-yoga - erasure-coded: jammy-yoga-ec - - jammy-zed - lunar-antelope - - erasure-coded: jammy-zed-ec + - mantic-bobcat - erasure-coded: lunar-antelope-ec - - kinetic-zed + - erasure-coded: mantic-bobcat-ec - jammy-antelope - - erasure-coded: kinetic-zed-ec + - jammy-bobcat - erasure-coded: jammy-antelope-ec + - erasure-coded: jammy-bobcat-ec smoke_bundles: - jammy-yoga @@ -59,7 +59,7 @@ target_deploy_status: tests_options: force_deploy: - - kinetic-zed - jammy-antelope - - kinetic-zed-ec + - jammy-bobcat - jammy-antelope-ec + - jammy-bobcat-ec From 0d36846f5b6466446e9d0fe7000b73da23af755a Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 18 Jul 2023 16:47:18 -0400 Subject: [PATCH 2530/2699] Add 2023.2 Bobcat support * sync charm-helpers to classic charms * change openstack-origin/source default to quincy * add mantic to metadata series * align testing with bobcat * add new bobcat bundles * add bobcat bundles to tests.yaml * add bobcat tests to osci.yaml * update build-on and run-on bases * drop kinetic * update charmcraft_channel to 2.x/stable Change-Id: I4c9d7fc9f3f3588fa777b5ecb14971ff923f2d11 --- ceph-mon/charmcraft.yaml | 4 +- ceph-mon/metadata.yaml | 3 +- ceph-mon/osci.yaml | 3 +- .../{jammy-zed.yaml => jammy-bobcat.yaml} | 30 ++++---- .../{kinetic-zed.yaml => mantic-bobcat.yaml} | 74 ++++++++++--------- ceph-mon/tests/tests.yaml | 2 + 6 files changed, 60 insertions(+), 56 deletions(-) rename ceph-mon/tests/bundles/{jammy-zed.yaml => jammy-bobcat.yaml} (95%) rename ceph-mon/tests/bundles/{kinetic-zed.yaml => mantic-bobcat.yaml} (89%) diff --git a/ceph-mon/charmcraft.yaml b/ceph-mon/charmcraft.yaml index 8c6755c6..b0562382 100644 --- a/ceph-mon/charmcraft.yaml +++ b/ceph-mon/charmcraft.yaml @@ -37,8 +37,8 @@ bases: channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "22.10" + channel: "23.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "23.04" + channel: "23.10" architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 8418e6b0..c03fff00 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -12,7 +12,8 @@ tags: series: - focal - jammy -- kinetic +- lunar +- mantic peers: mon: interface: ceph diff --git a/ceph-mon/osci.yaml b/ceph-mon/osci.yaml index f97c553f..4954c571 100644 --- a/ceph-mon/osci.yaml +++ b/ceph-mon/osci.yaml @@ -3,13 +3,12 @@ - charm-unit-jobs-py38 - charm-unit-jobs-py310 - charm-yoga-functional-jobs - - charm-zed-functional-jobs - charm-functional-jobs vars: needs_charm_build: true charm_build_name: ceph-mon build_type: charmcraft - charmcraft_channel: 2.0/stable + charmcraft_channel: 2.x/stable check: jobs: - new-install-focal-yoga diff --git a/ceph-mon/tests/bundles/jammy-zed.yaml b/ceph-mon/tests/bundles/jammy-bobcat.yaml similarity index 95% rename from ceph-mon/tests/bundles/jammy-zed.yaml rename to ceph-mon/tests/bundles/jammy-bobcat.yaml index a5d6e807..ad65cb90 100644 --- a/ceph-mon/tests/bundles/jammy-zed.yaml +++ b/ceph-mon/tests/bundles/jammy-bobcat.yaml @@ -1,5 +1,5 @@ variables: - openstack-origin: &openstack-origin cloud:jammy-zed + openstack-origin: &openstack-origin cloud:jammy-bobcat local_overlay_enabled: False @@ -58,6 +58,13 @@ applications: - '2' channel: 8.0/edge + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + to: + - '9' + channel: 3.9/edge + ceph-osd: charm: ch:ceph-osd num_units: 3 @@ -93,13 +100,6 @@ applications: to: - '17' - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - to: - - '9' - channel: 3.9/edge - keystone: expose: True charm: ch:keystone @@ -108,7 +108,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: zed/edge + channel: 2023.1/edge nova-compute: charm: ch:nova-compute @@ -118,7 +118,7 @@ applications: libvirt-image-backend: rbd to: - '11' - channel: zed/edge + channel: 2023.1/edge glance: expose: True @@ -128,7 +128,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: zed/edge + channel: 2023.1/edge cinder: expose: True @@ -140,11 +140,11 @@ applications: openstack-origin: *openstack-origin to: - '13' - channel: zed/edge + channel: 2023.1/edge cinder-ceph: charm: ch:cinder-ceph - channel: zed/edge + channel: 2023.1/edge nova-cloud-controller: expose: True @@ -154,7 +154,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: zed/edge + channel: 2023.1/edge placement: charm: ch:placement @@ -163,7 +163,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: zed/edge + channel: 2023.1/edge prometheus2: charm: ch:prometheus2 diff --git a/ceph-mon/tests/bundles/kinetic-zed.yaml b/ceph-mon/tests/bundles/mantic-bobcat.yaml similarity index 89% rename from ceph-mon/tests/bundles/kinetic-zed.yaml rename to ceph-mon/tests/bundles/mantic-bobcat.yaml index 24818960..4fe93205 100644 --- a/ceph-mon/tests/bundles/kinetic-zed.yaml +++ b/ceph-mon/tests/bundles/mantic-bobcat.yaml @@ -1,18 +1,24 @@ variables: openstack-origin: &openstack-origin distro + # use infra (mysql, rabbit) from lts for stability + infra-series: &infra-series jammy -series: jammy +series: mantic comment: - 'machines section to decide order of deployment. database sooner = faster' machines: '0': constraints: mem=3072M + series: *infra-series '1': constraints: mem=3072M + series: *infra-series '2': constraints: mem=3072M + series: *infra-series '3': + series: *infra-series '4': '5': '6': @@ -26,38 +32,43 @@ machines: '14': '15': '16': - series: focal '17': + series: focal applications: keystone-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge cinder-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge placement-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' - '2' - channel: 8.0.19/edge + channel: 8.0/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + to: + - '3' + channel: 3.9/edge ceph-osd: charm: ch:ceph-osd @@ -68,9 +79,9 @@ applications: source: *openstack-origin osd-devices: '/dev/test-non-existent' to: - - '3' - '4' - '5' + - '6' channel: quincy/edge ceph-mon: @@ -81,9 +92,9 @@ applications: source: *openstack-origin monitor-count: '3' to: - - '6' - '7' - '8' + - '9' ceph-fs: charm: ch:ceph-fs @@ -92,16 +103,7 @@ applications: source: *openstack-origin channel: quincy/edge to: - - '17' - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - channel: 3.9/edge + - '10' keystone: expose: True @@ -110,8 +112,8 @@ applications: options: openstack-origin: *openstack-origin to: - - '10' - channel: yoga/edge + - '11' + channel: 2023.1/edge nova-compute: charm: ch:nova-compute @@ -120,8 +122,8 @@ applications: openstack-origin: *openstack-origin libvirt-image-backend: rbd to: - - '11' - channel: yoga/edge + - '12' + channel: 2023.1/edge glance: expose: True @@ -130,8 +132,8 @@ applications: options: openstack-origin: *openstack-origin to: - - '12' - channel: yoga/edge + - '13' + channel: 2023.1/edge cinder: expose: True @@ -142,12 +144,12 @@ applications: glance-api-version: '2' openstack-origin: *openstack-origin to: - - '13' - channel: yoga/edge + - '14' + channel: 2023.1/edge cinder-ceph: charm: ch:cinder-ceph - channel: yoga/edge + channel: 2023.1/edge nova-cloud-controller: expose: True @@ -156,8 +158,8 @@ applications: options: openstack-origin: *openstack-origin to: - - '14' - channel: yoga/edge + - '15' + channel: 2023.1/edge placement: charm: ch:placement @@ -165,15 +167,15 @@ applications: options: openstack-origin: *openstack-origin to: - - '15' - channel: yoga/edge + - '16' + channel: 2023.1/edge prometheus2: charm: ch:prometheus2 num_units: 1 series: focal to: - - '16' + - '17' relations: - - 'nova-compute:amqp' diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index f90e6757..18134ca0 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -4,6 +4,8 @@ gate_bundles: - focal-xena - focal-yoga - jammy-yoga + - jammy-bobcat + - mantic-bobcat smoke_bundles: - focal-yoga From 60c4052202013aab772b4225c40a66baa0157737 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 18 Jul 2023 16:47:18 -0400 Subject: [PATCH 2531/2699] Add 2023.2 Bobcat support * sync charm-helpers to classic charms * change openstack-origin/source default to quincy * add mantic to metadata series * align testing with bobcat * add new bobcat bundles * add bobcat bundles to tests.yaml * add bobcat tests to osci.yaml * update build-on and run-on bases * drop kinetic * update charmcraft_channel to 2.x/stable Change-Id: Ibb97b427f29a061adc2d67e55ae3976387d332c7 --- ceph-fs/charmcraft.yaml | 4 ++-- ceph-fs/osci.yaml | 3 +-- ceph-fs/requirements.txt | 19 ++++++++----------- ceph-fs/src/config.yaml | 2 +- ceph-fs/src/metadata.yaml | 2 +- .../{jammy-zed.yaml => jammy-bobcat.yaml} | 2 +- .../{kinetic-zed.yaml => mantic-bobcat.yaml} | 2 +- ceph-fs/src/tests/tests.yaml | 4 +++- ceph-fs/tox.ini | 1 - 9 files changed, 18 insertions(+), 21 deletions(-) rename ceph-fs/src/tests/bundles/{jammy-zed.yaml => jammy-bobcat.yaml} (93%) rename ceph-fs/src/tests/bundles/{kinetic-zed.yaml => mantic-bobcat.yaml} (97%) diff --git a/ceph-fs/charmcraft.yaml b/ceph-fs/charmcraft.yaml index 25b2873b..3683b391 100644 --- a/ceph-fs/charmcraft.yaml +++ b/ceph-fs/charmcraft.yaml @@ -34,8 +34,8 @@ bases: channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "22.10" + channel: "23.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "23.04" + channel: "23.10" architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-fs/osci.yaml b/ceph-fs/osci.yaml index f8e2ead4..3d15efd1 100644 --- a/ceph-fs/osci.yaml +++ b/ceph-fs/osci.yaml @@ -4,10 +4,9 @@ - charm-unit-jobs-py39 - charm-xena-functional-jobs - charm-yoga-functional-jobs - - charm-zed-functional-jobs - charm-functional-jobs vars: needs_charm_build: true charm_build_name: ceph-fs build_type: charmcraft - charmcraft_channel: 2.0/stable + charmcraft_channel: 2.x/stable diff --git a/ceph-fs/requirements.txt b/ceph-fs/requirements.txt index c539e82b..b3dc23f7 100644 --- a/ceph-fs/requirements.txt +++ b/ceph-fs/requirements.txt @@ -8,16 +8,13 @@ # requirements.txt setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 -# Build requirements -cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. -charm-tools==2.8.4 +# NOTE: newer versions of cryptography require a Rust compiler to build, +# see +# * https://github.com/openstack-charmers/zaza/issues/421 +# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html +# +cryptography<3.4 -simplejson +git+https://github.com/juju/charm-tools.git -# Newer versions use keywords that didn't exist in python 3.5 yet (e.g. -# "ModuleNotFoundError") -# NOTE(lourot): This might look like a duplication of test-requirements.txt but -# some tox targets use only test-requirements.txt whereas charm-build uses only -# requirements.txt -importlib-metadata<3.0.0; python_version < '3.6' -importlib-resources<3.0.0; python_version < '3.6' +simplejson diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index f2af93ac..58ce4db8 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -5,7 +5,7 @@ options: description: Mon and OSD debug level. Max is 20. source: type: string - default: yoga + default: quincy description: | Optional configuration to support use of additional sources such as: . diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index cb2dca34..94bc2f88 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -12,8 +12,8 @@ tags: series: - focal - jammy -- kinetic - lunar +- mantic subordinate: false requires: ceph-mds: diff --git a/ceph-fs/src/tests/bundles/jammy-zed.yaml b/ceph-fs/src/tests/bundles/jammy-bobcat.yaml similarity index 93% rename from ceph-fs/src/tests/bundles/jammy-zed.yaml rename to ceph-fs/src/tests/bundles/jammy-bobcat.yaml index 04bfb37c..09ab5319 100644 --- a/ceph-fs/src/tests/bundles/jammy-zed.yaml +++ b/ceph-fs/src/tests/bundles/jammy-bobcat.yaml @@ -1,5 +1,5 @@ variables: - openstack-origin: &openstack-origin cloud:jammy-zed + openstack-origin: &openstack-origin cloud:jammy-bobcat local_overlay_enabled: False diff --git a/ceph-fs/src/tests/bundles/kinetic-zed.yaml b/ceph-fs/src/tests/bundles/mantic-bobcat.yaml similarity index 97% rename from ceph-fs/src/tests/bundles/kinetic-zed.yaml rename to ceph-fs/src/tests/bundles/mantic-bobcat.yaml index c976a4a5..3456d8cb 100644 --- a/ceph-fs/src/tests/bundles/kinetic-zed.yaml +++ b/ceph-fs/src/tests/bundles/mantic-bobcat.yaml @@ -3,7 +3,7 @@ variables: local_overlay_enabled: False -series: &series kinetic +series: &series mantic applications: ubuntu: # used to test mounts diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index 08aef01e..a554e97b 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -3,6 +3,8 @@ gate_bundles: - focal-xena - focal-yoga - jammy-yoga + - jammy-bobcat + - mantic-bobcat smoke_bundles: - focal-xena # configure: @@ -19,4 +21,4 @@ tests: target_deploy_status: ubuntu: workload-status: active - workload-status-message-prefix: '' \ No newline at end of file + workload-status-message-prefix: '' diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 9ae404dc..50527f59 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -97,7 +97,6 @@ commands = stestr run --slowest {posargs} [testenv:pep8] basepython = python3 deps = flake8==3.9.2 - charm-tools==2.8.4 commands = flake8 {posargs} src unit_tests [testenv:func-target] From f417e36b15810e3b5a770300b615a64cf5d22de3 Mon Sep 17 00:00:00 2001 From: Jadon Naas Date: Tue, 8 Aug 2023 10:30:15 -0400 Subject: [PATCH 2532/2699] Fix typo in actions.yaml There was a typo in actions.yaml for the resume action that was breaking builds. This change fixes that typo. The charm will build. Closes-Bug: 2030677 Change-Id: I4501c3142620dc6bff87f554a9f26b39ac4c927d --- ceph-radosgw/actions.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/actions.yaml b/ceph-radosgw/actions.yaml index 90a05216..abe46ad2 100644 --- a/ceph-radosgw/actions.yaml +++ b/ceph-radosgw/actions.yaml @@ -1,7 +1,7 @@ pause: description: Pause the ceph-radosgw unit. resume: - descrpition: Resume the ceph-radosgw unit. + description: Resume the ceph-radosgw unit. promote: description: Promote the zone associated with the local units to master/default (multi-site). readonly: From da6f655e7857fef11effcf6ad28ef3cdeb0b246c Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 25 Jul 2023 17:03:36 -0400 Subject: [PATCH 2533/2699] Add docs key and point at Discourse Add the 'docs' key and point it at a Discourse topic previously populated with the charm's README contents. When the new charm revision is released to the Charmhub, this Discourse-based content will be displayed there. In the absense of the this new key, the Charmhub's default behaviour is to display the value of the charm's 'description' key. Change-Id: I4b117272f68a27732809d0dddc31ac9cf69c60d9 --- ceph-fs/src/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index 94bc2f88..e20f31f3 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -4,6 +4,7 @@ maintainer: OpenStack Charmers description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. +docs: https://discourse.charmhub.io/t/ceph-fs-docs-index/11223 tags: - openstack - storage From 287a75a54df93e087ddb055a8d4defa6e07f9cce Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Mon, 7 Aug 2023 19:09:50 -0300 Subject: [PATCH 2534/2699] Fix pool names in RadosGW charm The latest Ceph versions forbid pool names that start with a dot. Since the RadosGW charm uses pools named so extensively, this patchset fixes that issue. In addition, the Ceph libraries are synced as well, since they were outdated. Change-Id: I50112480bb3669de08ee85a9bf9a594b379e9ec3 --- ceph-radosgw/actions.yaml | 2 +- ceph-radosgw/hooks/ceph_rgw.py | 13 +++++-- .../contrib/openstack/cert_utils.py | 33 ++++++++++------ .../hooks/charmhelpers/core/unitdata.py | 38 ++++++++++++++++++- ceph-radosgw/lib/charms_ceph/utils.py | 8 +++- ceph-radosgw/unit_tests/test_ceph.py | 16 +------- 6 files changed, 77 insertions(+), 33 deletions(-) diff --git a/ceph-radosgw/actions.yaml b/ceph-radosgw/actions.yaml index 90a05216..abe46ad2 100644 --- a/ceph-radosgw/actions.yaml +++ b/ceph-radosgw/actions.yaml @@ -1,7 +1,7 @@ pause: description: Pause the ceph-radosgw unit. resume: - descrpition: Resume the ceph-radosgw unit. + description: Resume the ceph-radosgw unit. promote: description: Promote the zone associated with the local units to master/default (multi-site). readonly: diff --git a/ceph-radosgw/hooks/ceph_rgw.py b/ceph-radosgw/hooks/ceph_rgw.py index 463c281f..d3a98604 100644 --- a/ceph-radosgw/hooks/ceph_rgw.py +++ b/ceph-radosgw/hooks/ceph_rgw.py @@ -79,6 +79,10 @@ def import_radosgw_key(key, name=None): return False +def normalize_pool_name(pool): + return pool[1:] if pool.startswith('.') else pool + + def get_create_rgw_pools_rq(prefix=None): """Pre-create RGW pools so that they have the correct settings. @@ -101,6 +105,8 @@ def _add_light_pool(rq, pool, pg_num, prefix=None): w = weights.get(pool, 0.10) if prefix: pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) + + pool = normalize_pool_name(pool) if pg_num > 0: rq.add_op_create_pool(name=pool, replica_count=replicas, pg_num=pg_num, group='objects', @@ -162,7 +168,7 @@ def _add_light_pool(rq, pool, pg_num, prefix=None): # the function arguments. Until then we need to build the dict # prior to the function call. kwargs = { - 'name': pool, + 'name': normalize_pool_name(pool), 'erasure_profile': profile_name, 'weight': bucket_weight, 'group': "objects", @@ -178,7 +184,7 @@ def _add_light_pool(rq, pool, pg_num, prefix=None): # the function arguments. Until then we need to build the dict # prior to the function call. kwargs = { - 'name': pool, + 'name': normalize_pool_name(pool), 'replica_count': replicas, 'weight': bucket_weight, 'group': 'objects', @@ -209,7 +215,8 @@ def _add_light_pool(rq, pool, pg_num, prefix=None): for pool in light: _add_light_pool(rq, pool, pg_num, prefix) - _add_light_pool(rq, '.rgw.root', pg_num) + # RadosGW creates this pool automatically from Quincy on. + # _add_light_pool(rq, '.rgw.root', pg_num) if config('restrict-ceph-pools'): rq.add_op_request_access_to_group(name="objects", diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py index a25ca995..6620f59f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -414,18 +414,27 @@ def get_requests_for_local_unit(relation_name=None): is_legacy_request = set(sent).intersection(legacy_keys) for unit in related_units(rid): data = relation_get(rid=rid, unit=unit) - if data.get(raw_certs_key): - bundles.append({ - 'ca': data['ca'], - 'chain': data.get('chain'), - 'certs': json.loads(data[raw_certs_key])}) - elif is_legacy_request: - bundles.append({ - 'ca': data['ca'], - 'chain': data.get('chain'), - 'certs': {sent['common_name']: - {'cert': data.get(local_name + '.server.cert'), - 'key': data.get(local_name + '.server.key')}}}) + # Note: Bug#2028683 - data may not be available if the certificates + # relation hasn't been populated by the providing charm. If no 'ca' + # in the data then don't attempt the bundle at all. + if data.get('ca'): + if data.get(raw_certs_key): + bundles.append({ + 'ca': data['ca'], + 'chain': data.get('chain'), + 'certs': json.loads(data[raw_certs_key]) + }) + elif is_legacy_request: + bundles.append({ + 'ca': data['ca'], + 'chain': data.get('chain'), + 'certs': { + sent['common_name']: { + 'cert': data.get(local_name + '.server.cert'), + 'key': data.get(local_name + '.server.key') + } + } + }) return bundles diff --git a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py index 8f4bbc61..dac757f1 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py +++ b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py @@ -151,6 +151,7 @@ def config_changed(): import datetime import itertools import json +import logging import os import pprint import sqlite3 @@ -521,6 +522,41 @@ class DeltaSet(Record): def kv(): global _KV + + # If we are running unit tests, it is useful to go into memory-backed KV store to + # avoid concurrency issues when running multiple tests. This is not a + # problem when juju is running normally. + + env_var = os.environ.get("CHARM_HELPERS_TESTMODE", "auto").lower() + if env_var not in ["auto", "no", "yes"]: + logging.warning(f"Unknown value for CHARM_HELPERS_TESTMODE '{env_var}', assuming 'no'") + env_var = "no" + + if env_var == "no": + in_memory_db = False + elif env_var == "yes": + in_memory_db = True + elif env_var == "auto": + # If UNIT_STATE_DB is set, respect this request + if "UNIT_STATE_DB" in os.environ: + in_memory_db = False + # Autodetect normal juju execution by looking for juju variables + elif "JUJU_CHARM_DIR" in os.environ or "JUJU_UNIT_NAME" in os.environ: + in_memory_db = False + else: + # We are probably running in unit test mode + logging.warning("Auto-detected unit test environment for KV store.") + in_memory_db = True + else: + # Help the linter realise that in_memory_db is always set + raise Exception("Cannot reach this line") + if _KV is None: - _KV = Storage() + if in_memory_db: + _KV = Storage(":memory:") + else: + _KV = Storage() + else: + if in_memory_db and _KV.db_path != ":memory:": + logging.warning("Running with in_memory_db and KV is not set to :memory:") return _KV diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index 01fb9ac9..41eff9b4 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -1223,6 +1223,11 @@ def get_upgrade_key(): return get_named_key('upgrade-osd', _upgrade_caps) +def is_internal_client(name): + keys = ('osd-upgrade', 'osd-removal', 'admin', 'rbd-mirror', 'mds') + return any(name.startswith(key) for key in keys) + + def get_named_key(name, caps=None, pool_list=None): """Retrieve a specific named cephx key. @@ -1236,7 +1241,8 @@ def get_named_key(name, caps=None, pool_list=None): key = ceph_auth_get(key_name) if key: - upgrade_key_caps(key_name, caps) + if is_internal_client(name): + upgrade_key_caps(key_name, caps) return key log("Creating new key for {}".format(name), level=DEBUG) diff --git a/ceph-radosgw/unit_tests/test_ceph.py b/ceph-radosgw/unit_tests/test_ceph.py index 28e73402..98e3d37a 100644 --- a/ceph-radosgw/unit_tests/test_ceph.py +++ b/ceph-radosgw/unit_tests/test_ceph.py @@ -107,9 +107,6 @@ def test_create_rgw_pools_rq_with_prefix( call('us-east.rgw.buckets.index', replica_count=3, pg_num=10, weight=None, group='objects', namespace=None, app_name='rgw', max_bytes=None, max_objects=None), - call('.rgw.root', replica_count=3, pg_num=10, weight=None, - group='objects', namespace=None, app_name='rgw', - max_bytes=None, max_objects=None), ]) # confirm operation with bluestore compression @@ -163,9 +160,6 @@ def test_create_rgw_pools_rq_with_prefix( call('us-east.rgw.buckets.index', replica_count=3, pg_num=10, weight=None, group='objects', namespace=None, app_name='rgw', max_bytes=None, max_objects=None), - call('.rgw.root', replica_count=3, pg_num=10, weight=None, - group='objects', namespace=None, app_name='rgw', - max_bytes=None, max_objects=None), ]) @patch.object(utils.context, 'CephBlueStoreCompressionContext') @@ -228,9 +222,6 @@ def test_create_rgw_pools_rq_no_prefix_post_jewel( call('default.rgw.buckets.index', replica_count=3, pg_num=None, weight=3.0, group='objects', namespace=None, app_name='rgw', max_bytes=None, max_objects=None), - call('.rgw.root', replica_count=3, pg_num=None, weight=0.1, - group='objects', namespace=None, app_name='rgw', - max_bytes=None, max_objects=None), ]) mock_request_access.assert_called_with(key_name='radosgw.gateway', name='objects', @@ -287,9 +278,6 @@ def test_create_rgw_pools_rq_no_prefix_post_jewel( call('default.rgw.buckets.index', replica_count=3, pg_num=None, weight=3.0, group='objects', namespace=None, app_name='rgw', max_bytes=None, max_objects=None), - call('.rgw.root', replica_count=3, pg_num=None, weight=0.1, - group='objects', namespace=None, app_name='rgw', - max_bytes=None, max_objects=None), ]) @patch.object(utils.context, 'CephBlueStoreCompressionContext') @@ -365,9 +353,7 @@ def test_create_rgw_pools_rq_no_prefix_ec(self, mock_broker, call(weight=3.00, replica_count=3, name='default.rgw.buckets.index', group='objects', app_name='rgw'), - call(weight=0.10, replica_count=3, name='.rgw.root', - group='objects', app_name='rgw')], - ) + ]) mock_request_access.assert_called_with(key_name='radosgw.gateway', name='objects', permission='rwx') From 9be6a2c0f8a060550a74f34cfbaf0cf26e4b618e Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Thu, 10 Aug 2023 16:56:19 -0400 Subject: [PATCH 2535/2699] Use charmcraft 2.x to build Change-Id: Ifbbbc1e8c525ef16f589bfc2e3b4fcef5f74135f --- ceph-dashboard/osci.yaml | 1 + ceph-dashboard/tox.ini | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph-dashboard/osci.yaml b/ceph-dashboard/osci.yaml index df2d7b99..9cb187a6 100644 --- a/ceph-dashboard/osci.yaml +++ b/ceph-dashboard/osci.yaml @@ -8,3 +8,4 @@ needs_charm_build: true charm_build_name: ceph-dashboard build_type: charmcraft + charmcraft_channel: 2.x/stable diff --git a/ceph-dashboard/tox.ini b/ceph-dashboard/tox.ini index cb9fdfba..70a0d4b6 100644 --- a/ceph-dashboard/tox.ini +++ b/ceph-dashboard/tox.ini @@ -121,7 +121,7 @@ basepython = python3 deps = -r{toxinidir}/build-requirements.txt commands = charmcraft clean - charmcraft -v build + charmcraft -v pack {toxinidir}/rename.sh [testenv:func-noop] From 8640ff4787e21130836d4f5be9cff56e211e54ce Mon Sep 17 00:00:00 2001 From: Felipe Reyes Date: Thu, 10 Aug 2023 17:00:17 -0400 Subject: [PATCH 2536/2699] Use charmcraft 2.x to build Change-Id: I02cdc32404868bde1456672f43edb525d3aaf59c --- ceph-osd/osci.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-osd/osci.yaml b/ceph-osd/osci.yaml index 2538297b..84a63029 100644 --- a/ceph-osd/osci.yaml +++ b/ceph-osd/osci.yaml @@ -9,3 +9,4 @@ needs_charm_build: true charm_build_name: ceph-osd build_type: charmcraft + charmcraft_channel: 2.x/stable From b5422ef2e3366a06f51115863879f11a43b1af2a Mon Sep 17 00:00:00 2001 From: Samuel Walladge Date: Tue, 10 Jan 2023 12:01:32 +1030 Subject: [PATCH 2537/2699] Add config option for tuning osd memory target Closes-Bug: #1934143 Depends-On: https://review.opendev.org/c/openstack/charm-ceph-mon/+/869896 Change-Id: I22dfc25c4ac2737f5d872ca2bdab3c533533dbff --- ceph-osd/config.yaml | 24 ++++++ ceph-osd/hooks/ceph_hooks.py | 72 +++++++++++++++- ceph-osd/unit_tests/test_ceph_hooks.py | 112 +++++++++++++++++++++++++ 3 files changed, 207 insertions(+), 1 deletion(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 10340c26..22908119 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -223,6 +223,30 @@ options: . Setting this option on a running Ceph OSD node will not affect running OSD devices, but will add the setting to ceph.conf for the next restart. + tune-osd-memory-target: + type: string + default: + description: | + Set to tune the value of osd_memory_target. + + If unset or set to an empty string, + the charm will not update the value for ceph. + This means that a new deployment with this value unset will default to ceph's default (4GB). + And if a value was set, but then later unset, ceph will remain configured with the last set value. + This is to allow for manually configuring this value in ceph without interference from the charm. + + If set to "{n}%" (where n is an integer), the value will be set as follows: + + total ram * (n/100) / number of osds on the host + + If set to "{n}GB" (n is an integer), osd_memory_target will be set per OSD directly. + + Take care when choosing a value that it both provides enough memory for ceph + and leave enough memory for the system and other workloads to function. + For common cases, + it is recommended to stay within the bounds of 4GB < value < 90% of system memory. + If these bounds are broken, a warning will be emitted by the charm, + but the value will still be set. ignore-device-errors: type: boolean default: False diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 6622896a..474389a2 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -19,6 +19,7 @@ import json import netifaces import os +import re import shutil import socket import subprocess @@ -54,6 +55,7 @@ add_to_updatedb_prunepath, cmp_pkgrevno, is_container, + get_total_ram, lsb_release, mkdir, service_reload, @@ -360,6 +362,63 @@ def use_short_objects(): return False +def warn_if_memory_outside_bounds(value): + """ + Log a warning if value < 4GB or (value * osds) > 90% total memory. + + :param value: int - proposed value for osd_memory_target in bytes + """ + ninety_percent = int(0.9 * get_total_ram()) + four_GB = 4 * 1024 * 1024 * 1024 + num_osds = len(kv().get("osd-devices", [])) + + # 4GB is the default value; we don't want to go lower than that, + # otherwise performance will be impacted. + if value < four_GB: + log("tune-osd-memory-target results in value < 4GB. " + "This is not recommended.", level=WARNING) + + # 90% is a somewhat arbitrary upper limit, + # that should allow enough memory for the OS to function, + # while not limiting ceph too much. + elif (value * num_osds) > ninety_percent: + log("tune-osd-memory-target results in value > 90% of system ram. " + "This is not recommended.", level=WARNING) + + +def get_osd_memory_target(): + """ + Processes the config value of tune-osd-memory-target. + + Returns a safe value for osd_memory_target. + + :returns: integer value for osd_memory_target, converted to a string. + :rtype: string + """ + tune_osd_memory_target = config('tune-osd-memory-target') + + if not tune_osd_memory_target: + return "" + + match = re.match(r"(\d+)GB$", tune_osd_memory_target) + if match: + osd_memory_target = int(match.group(1)) * 1024 * 1024 * 1024 + warn_if_memory_outside_bounds(osd_memory_target) + return str(osd_memory_target) + + match = re.match(r"(\d+)%$", tune_osd_memory_target) + if match: + percentage = int(match.group(1)) / 100 + num_osds = len(kv().get("osd-devices", [])) + osd_memory_target = int(get_total_ram() * percentage / num_osds) + warn_if_memory_outside_bounds(osd_memory_target) + return str(osd_memory_target) + + log("tune-osd-memory-target value invalid," + " leaving the OSD memory target unchanged", level=ERROR) + return "" + + def get_ceph_context(upgrading=False): """Returns the current context dictionary for generating ceph.conf @@ -475,6 +534,15 @@ def config_changed(): if sysctl_dict: create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-osd-charm.conf') + for r_id in hookenv.relation_ids('mon'): + hookenv.relation_set( + relation_id=r_id, + relation_settings={ + 'osd-host': socket.gethostname(), + 'osd-memory-target': get_osd_memory_target(), + } + ) + e_mountpoint = config('ephemeral-unmount') if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): umount(e_mountpoint) @@ -563,7 +631,9 @@ def prepare_disks_and_activate(): 'bootstrapped-osds': len(db.get('osd-devices', [])), 'ceph_release': ceph.resolve_ceph_version( hookenv.config('source') or 'distro' - ) + ), + 'osd-host': socket.gethostname(), + 'osd-memory-target': get_osd_memory_target(), } ) diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 2d332bf1..2bfa2d3c 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -34,6 +34,7 @@ 'osd-journal-size': 1024, 'osd-max-backfills': 1, 'osd-recovery-max-active': 2, + 'tune-osd-memory-target': '', 'use-direct-io': True, 'osd-format': 'ext4', 'prefer-ipv6': False, @@ -54,6 +55,8 @@ class CephHooksTestCase(unittest.TestCase): + maxDiff = None + def setUp(self): super(CephHooksTestCase, self).setUp() @@ -707,6 +710,115 @@ def test_get_bdev_enable_discard(self, mock_config, config['bdev-enable-discard'] = value self.assertEqual(ceph_hooks.get_bdev_enable_discard(), expected) + @patch.object(ceph_hooks, "get_total_ram") + @patch.object(ceph_hooks, "kv") + @patch.object(ceph_hooks, "log") + def test_warn_memory_bounds( + self, mock_log, mock_kv, mock_total_ram + ): + mock_total_ram.return_value = 16 * 1024 * 1024 * 1024 # 16GB + mock_kv.return_value = {"osd-devices": ["osd1", "osd2"]} + ceph_hooks.warn_if_memory_outside_bounds(5 * 1024 * 1024 * 1024) # 5GB + mock_log.assert_not_called() + + mock_kv.return_value = {"osd-devices": ["osd1", "osd2", "osd3"]} + ceph_hooks.warn_if_memory_outside_bounds(5 * 1024 * 1024 * 1024) # 5GB + mock_log.assert_called_with( + "tune-osd-memory-target results in value > 90% of system ram. " + "This is not recommended.", + level=ceph_hooks.WARNING + ) + + mock_kv.return_value = {"osd-devices": ["osd1", "osd2"]} + ceph_hooks.warn_if_memory_outside_bounds(2 * 1024 * 1024 * 1024) # 2GB + mock_log.assert_called_with( + "tune-osd-memory-target results in value < 4GB. " + "This is not recommended.", + level=ceph_hooks.WARNING + ) + + @patch.object(ceph_hooks, "config") + @patch.object(ceph_hooks, "get_total_ram") + @patch.object(ceph_hooks, "kv") + @patch.object(ceph_hooks, "log") + def test_get_osd_memory_target_gb( + self, mock_log, mock_kv, mock_total_ram, + mock_config, + ): + mock_total_ram.return_value = 16 * 1024 * 1024 * 1024 # 16GB + mock_kv.return_value = {"osd-devices": ["osd1", "osd2"]} + + def config_func(k): + if k == "tune-osd-memory-target": + return "5GB" + raise ValueError + mock_config.side_effect = config_func + + target = ceph_hooks.get_osd_memory_target() + self.assertEqual(target, str(5 * 1024 * 1024 * 1024)) # 5GB + + @patch.object(ceph_hooks, "config") + @patch.object(ceph_hooks, "get_total_ram") + @patch.object(ceph_hooks, "kv") + @patch.object(ceph_hooks, "log") + def test_get_osd_memory_target_percentage( + self, mock_log, mock_kv, mock_total_ram, + mock_config, + ): + mock_total_ram.return_value = 16 * 1024 * 1024 * 1024 # 16GB + mock_kv.return_value = {"osd-devices": ["osd1", "osd2"]} + + def config_func(k): + if k == "tune-osd-memory-target": + return "50%" + raise ValueError + mock_config.side_effect = config_func + + target = ceph_hooks.get_osd_memory_target() + # should be 50% of 16GB / 2 osd devices = 4GB + self.assertEqual(target, str(4 * 1024 * 1024 * 1024)) # 4GB + + @patch.object(ceph_hooks, "config") + @patch.object(ceph_hooks, "get_total_ram") + @patch.object(ceph_hooks, "kv") + @patch.object(ceph_hooks, "log") + def test_get_osd_memory_target_empty( + self, mock_log, mock_kv, mock_total_ram, + mock_config, + ): + mock_total_ram.return_value = 16 * 1024 * 1024 * 1024 # 16GB + mock_kv.return_value = {"osd-devices": ["osd1", "osd2"]} + + mock_config.side_effect = lambda _: None + + target = ceph_hooks.get_osd_memory_target() + self.assertEqual(target, "") + + @patch.object(ceph_hooks, "config") + @patch.object(ceph_hooks, "get_total_ram") + @patch.object(ceph_hooks, "kv") + @patch.object(ceph_hooks, "log") + def test_get_osd_memory_target_invalid( + self, mock_log, mock_kv, mock_total_ram, + mock_config, + ): + mock_total_ram.return_value = 16 * 1024 * 1024 * 1024 # 16GB + mock_kv.return_value = {"osd-devices": ["osd1", "osd2"]} + + def config_func(k): + if k == "tune-osd-memory-target": + return "foo" + raise ValueError + mock_config.side_effect = config_func + + target = ceph_hooks.get_osd_memory_target() + self.assertEqual(target, "") + mock_log.assert_called_with( + "tune-osd-memory-target value invalid," + " leaving the OSD memory target unchanged", + level=ceph_hooks.ERROR, + ) + @patch.object(ceph_hooks, 'local_unit') @patch.object(ceph_hooks, 'relation_get') From 55456ab01e6fa136a8b0113a27551b076bf1d0f6 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 18 Jul 2023 16:47:18 -0400 Subject: [PATCH 2538/2699] Add 2023.2 Bobcat support * sync charm-helpers to classic charms * change openstack-origin/source default to quincy * add mantic to metadata series * align testing with bobcat * add new bobcat bundles * add bobcat bundles to tests.yaml * add bobcat tests to osci.yaml * update build-on and run-on bases * drop kinetic Change-Id: Ia2b1ab2a1bb0de5c46e22a5348c6530ff13e83d0 --- ceph-nfs/charmcraft.yaml | 4 ++-- ceph-nfs/config.yaml | 2 +- ceph-nfs/metadata.yaml | 4 ++-- ceph-nfs/osci.yaml | 13 +++++++------ .../{kinetic-quincy.yaml => mantic-quincy.yaml} | 9 +++++---- ceph-nfs/tests/tests.yaml | 1 + ceph-nfs/tox.ini | 4 ++-- 7 files changed, 20 insertions(+), 17 deletions(-) rename ceph-nfs/tests/bundles/{kinetic-quincy.yaml => mantic-quincy.yaml} (89%) diff --git a/ceph-nfs/charmcraft.yaml b/ceph-nfs/charmcraft.yaml index ae4e297d..71f50dd0 100644 --- a/ceph-nfs/charmcraft.yaml +++ b/ceph-nfs/charmcraft.yaml @@ -34,8 +34,8 @@ bases: channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "22.10" + channel: "23.04" architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu - channel: "23.04" + channel: "23.10" architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-nfs/config.yaml b/ceph-nfs/config.yaml index 00c53bcb..fd6c3337 100644 --- a/ceph-nfs/config.yaml +++ b/ceph-nfs/config.yaml @@ -10,7 +10,7 @@ options: source: type: string - default: yoga + default: quincy description: | Optional configuration to support use of additional sources such as: - ppa:myteam/ppa diff --git a/ceph-nfs/metadata.yaml b/ceph-nfs/metadata.yaml index 9a583a1e..76f85c29 100644 --- a/ceph-nfs/metadata.yaml +++ b/ceph-nfs/metadata.yaml @@ -10,8 +10,8 @@ tags: series: - focal - jammy - - kinetic - lunar + - mantic subordinate: false min-juju-version: 2.7.6 extra-bindings: @@ -24,4 +24,4 @@ requires: scope: container peers: cluster: - interface: ceph-nfs-peer \ No newline at end of file + interface: ceph-nfs-peer diff --git a/ceph-nfs/osci.yaml b/ceph-nfs/osci.yaml index 8002eaf5..7500386e 100644 --- a/ceph-nfs/osci.yaml +++ b/ceph-nfs/osci.yaml @@ -7,12 +7,13 @@ - focal-pacific - focal-quincy - jammy-quincy - - kinetic-quincy - lunar-quincy + - mantic-quincy vars: needs_charm_build: true charm_build_name: ceph-nfs build_type: charmcraft + charmcraft_channel: 2.x/stable - job: name: focal-pacific parent: func-target @@ -41,18 +42,18 @@ vars: tox_extra_args: -- jammy-quincy - job: - name: kinetic-quincy + name: lunar-quincy parent: func-target + voting: false dependencies: - focal-quincy vars: - tox_extra_args: -- kinetic-quincy + tox_extra_args: -- lunar-quincy - job: - name: lunar-quincy + name: mantic-quincy parent: func-target voting: false dependencies: - focal-quincy vars: - tox_extra_args: -- lunar-quincy - \ No newline at end of file + tox_extra_args: -- mantic-quincy diff --git a/ceph-nfs/tests/bundles/kinetic-quincy.yaml b/ceph-nfs/tests/bundles/mantic-quincy.yaml similarity index 89% rename from ceph-nfs/tests/bundles/kinetic-quincy.yaml rename to ceph-nfs/tests/bundles/mantic-quincy.yaml index 669cb915..0f8616ce 100644 --- a/ceph-nfs/tests/bundles/kinetic-quincy.yaml +++ b/ceph-nfs/tests/bundles/mantic-quincy.yaml @@ -1,5 +1,6 @@ local_overlay_enabled: False -series: jammy +series: mantic +jammy applications: ubuntu: charm: cs:ubuntu @@ -11,7 +12,7 @@ applications: source: distro ceph-osd: charm: ch:ceph-osd - channel: quincy/edge + channel: latest/edge num_units: 3 storage: osd-devices: '2,10G' @@ -19,7 +20,7 @@ applications: source: distro ceph-mon: charm: ch:ceph-mon - channel: quincy/edge + channel: latest/edge num_units: 3 options: monitor-count: '3' @@ -27,7 +28,7 @@ applications: source: distro ceph-fs: charm: ch:ceph-fs - channel: quincy/edge + channel: latest/edge num_units: 2 options: source: distro diff --git a/ceph-nfs/tests/tests.yaml b/ceph-nfs/tests/tests.yaml index fe66ba01..cd8f59ec 100644 --- a/ceph-nfs/tests/tests.yaml +++ b/ceph-nfs/tests/tests.yaml @@ -3,6 +3,7 @@ gate_bundles: - focal-quincy - focal-pacific - jammy-pacific + - mantic-quincy smoke_bundles: - focal-pacific configure: [] diff --git a/ceph-nfs/tox.ini b/ceph-nfs/tox.ini index c278ef61..c8550616 100644 --- a/ceph-nfs/tox.ini +++ b/ceph-nfs/tox.ini @@ -118,7 +118,7 @@ basepython = python3 deps = -r{toxinidir}/build-requirements.txt commands = charmcraft clean - charmcraft -v build + charmcraft -v pack {toxinidir}/rename.sh [testenv:func-noop] @@ -148,4 +148,4 @@ commands = [flake8] # Ignore E902 because the unit_tests directory is missing in the built charm. -ignore = E402,E226,E902 \ No newline at end of file +ignore = E402,E226,E902 From b9ac2d739c1a6aae78db0380785147ab10ba1897 Mon Sep 17 00:00:00 2001 From: utkarshbhatthere Date: Fri, 26 May 2023 16:47:16 +0530 Subject: [PATCH 2539/2699] Fixes SSL conflicts between relation and config data. The fix adds event based handling of SSL configuration using charm config and cleanup of SSL for relation and config based key/certs. It also adds logical abstractions to analyse SSL setup and emit relevant events. Closes-Bug: 1952282 Change-Id: Ic486434526f639f5985cfe355e303c1d6ff5fa0d Signed-off-by: utkarshbhatthere func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1090 --- ceph-dashboard/src/ceph_dashboard_commands.py | 152 +++++ ceph-dashboard/src/charm.py | 619 +++++++++++------- ceph-dashboard/src/charm_option.py | 108 +++ ceph-dashboard/tests/bundles/focal-yoga.yaml | 2 +- ceph-dashboard/tests/bundles/focal.yaml | 2 +- .../tests/bundles/jammy-antelope.yaml | 2 +- .../tests/bundles/jammy-bobcat.yaml | 2 +- ceph-dashboard/tests/bundles/jammy-yoga.yaml | 2 +- .../tests/bundles/lunar-antelope.yaml | 4 +- .../tests/bundles/mantic-bobcat.yaml | 2 +- .../unit_tests/test_ceph_dashboard_charm.py | 95 ++- 11 files changed, 701 insertions(+), 289 deletions(-) create mode 100644 ceph-dashboard/src/ceph_dashboard_commands.py create mode 100644 ceph-dashboard/src/charm_option.py diff --git a/ceph-dashboard/src/ceph_dashboard_commands.py b/ceph-dashboard/src/ceph_dashboard_commands.py new file mode 100644 index 00000000..d047e899 --- /dev/null +++ b/ceph-dashboard/src/ceph_dashboard_commands.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python3 +# Copyright 2023 Canonical +# See LICENSE file for licensing details. +# +# Learn more at: https://juju.is/docs/sdk + +import json +import socket +from typing import List +from functools import partial + +import subprocess +import logging + +from charm_option import CharmCephOption + +logger = logging.getLogger(__name__) + + +def _run_cmd(cmd: List[str]): + """Run command in subprocess + + `cmd` The command to run + """ + return subprocess.check_output( + cmd, stderr=subprocess.STDOUT + ).decode('UTF-8') + + +def exec_option_ceph_cmd(option: CharmCephOption, value: str) -> None: + """Execute internal ceph command for the CharmCephOption""" + _run_cmd(option.ceph_command(value)) + + +def ceph_dashboard_delete_user(user: str) -> None: + """Delete Ceph dashboard user.""" + cmd = ['ceph', 'dashboard', 'ac-user-delete', user] + _run_cmd(cmd) + + +def ceph_dashboard_add_user(user: str, filename: str, role: str) -> str: + """Create Ceph dashboard user.""" + cmd = [ + 'ceph', 'dashboard', 'ac-user-create', '--enabled', + '-i', filename, user, role + ] + return _run_cmd(cmd) + + +def ceph_dashboard_config_saml( + base_url: str, idp_meta: str, + username_attr: str, idp_entity_id: str +) -> None: + """Configure SSO SAML2""" + cmd = [ + 'ceph', 'dashboard', 'sso', 'setup', 'saml2', + base_url, idp_meta + ] + if username_attr: + cmd.append(username_attr) + + if idp_entity_id: + cmd.append(idp_entity_id) + _run_cmd(cmd) + + +def ceph_config_get(key: str) -> str: + "Fetch Value for a particular ceph-config key." + cmd = [ + "ceph", "config-key", "get", key + ] + try: + return _run_cmd(cmd) + except subprocess.CalledProcessError: + logger.error("Failed to fetch key %s", key) + + +def ceph_config_list() -> list: + "Fetch list of ceph-config keys." + cmd = [ + "ceph", "config-key", "ls" + ] + + # CLI returns empty list if no config-key is configured. + return json.loads(_run_cmd(cmd)) + + +def ceph_config_set(key: str, value: str) -> None: + "Remove the provided key/value pair" + cmd = ["ceph", "config-key", "set", key, value] + + logging.debug("Setting config-key: %s", key) + _run_cmd(cmd) + + +def ceph_config_reset(key: str) -> None: + "Remove the provided key/value pair" + cmd = ["ceph", "config-key", "rm", key] + + logging.debug("Removing config-key: %s", key) + _run_cmd(cmd) + + +def dashboard_set(prop: str, value: str) -> str: + "Configure ceph dashboard properties" + logger.debug("Setting Dashboard %s as %s", prop, value) + return _run_cmd(["ceph", "dashboard", prop, value]) + + +def apply_setting(ceph_setting: str, value: List[str]) -> str: + """Apply a dashboard setting""" + cmd = ["ceph", "dashboard", ceph_setting] + cmd.extend(value) + return _run_cmd(cmd) + + +get_ceph_dashboard_ssl_key = partial(ceph_config_get, "mgr/dashboard/key") +get_ceph_dashboard_ssl_crt = partial(ceph_config_get, "mgr/dashboard/crt") +get_ceph_dashboard_host_ssl_key = partial( + ceph_config_get, f"mgr/dashboard/{socket.gethostname()}/key" +) +get_ceph_dashboard_host_ssl_crt = partial( + ceph_config_get, f"mgr/dashboard/{socket.gethostname()}/crt" +) + + +def check_ceph_dashboard_ssl_enabled() -> bool: + """Check if ssl config-key is set to true""" + ssl_status = ceph_config_get("config/mgr/mgr/dashboard/ssl") + return ssl_status == "true" + + +def check_ceph_dashboard_ssl_configured( + is_check_host_key: bool = False) -> bool: + """Check if SSL key and certificate are configured on ceph dashboard.""" + if is_check_host_key: + keys = [ + f"mgr/dashboard/{socket.gethostname()}/crt", + f"mgr/dashboard/{socket.gethostname()}/key", + ] + else: + keys = [ # List of keys to check for ssl configuration + "mgr/dashboard/crt", + "mgr/dashboard/key" + ] + + for key in keys: + value = ceph_config_get(key) + if value is None: + return False + + return True diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index 4fa7aa0e..5ef66ecc 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -7,41 +7,60 @@ """Charm for the Ceph Dashboard.""" import json -import logging -import tempfile - -from ops.framework import StoredState -from ops.main import main -from ops.model import ActiveStatus, BlockedStatus, StatusBase -from ops.charm import ActionEvent -from typing import List, Union, Tuple - import base64 -import interface_tls_certificates.ca_client as ca_client -import interface_openstack_loadbalancer.loadbalancer as ops_lb_interface +import logging import re import secrets import socket import string import subprocess -import tenacity -import ops_openstack.plugins.classes +import tempfile +from pathlib import Path +from typing import List, Tuple, Union + +import charmhelpers.core.host as ch_host +import charms_ceph.utils as ceph_utils +import cryptography.hazmat.primitives.serialization as serialization import interface_ceph_iscsi_admin_access.admin_access as admin_access import interface_dashboard -import interface_grafana_dashboard +import interface_grafana_dashboard as grafana_interface import interface_http +import interface_openstack_loadbalancer.loadbalancer as ops_lb_interface import interface_radosgw_user -import cryptography.hazmat.primitives.serialization as serialization -import charms_ceph.utils as ceph_utils -import charmhelpers.core.host as ch_host +import interface_tls_certificates.ca_client as ca_client +import ops_openstack.plugins.classes +import tenacity -from pathlib import Path +from ops.charm import ActionEvent, CharmEvents +from ops.framework import EventBase, EventSource, StoredState +from ops.main import main +from ops.model import ActiveStatus, BlockedStatus, StatusBase + +# Charm Src +import ceph_dashboard_commands as cmds +from charm_option import CharmCephOptionList logger = logging.getLogger(__name__) TLS_Config = Tuple[Union[bytes, None], Union[bytes, None], Union[bytes, None]] +# Maintenance Events +class DisableSSL(EventBase): + """Charm Event to disable SSL and clean certificates.""" + + +class EnableSSLFromConfig(EventBase): + """Charm Event to configure SSL using Charm config values.""" + + +class CephCharmEvents(CharmEvents): + """Custom charm events.""" + + disable_ssl = EventSource(DisableSSL) + enable_ssl_from_config = EventSource(EnableSSLFromConfig) + + class CephDashboardCharm(ops_openstack.core.OSBaseCharm): """Ceph Dashboard charm.""" @@ -59,183 +78,106 @@ class CephDashboardCharm(ops_openstack.core.OSBaseCharm): DASH_DIR = Path('src/dashboards') LB_SERVICE_NAME = "ceph-dashboard" - class CharmCephOption(): - """Manage a charm option to ceph command to manage that option""" + # Charm Events + on = CephCharmEvents() - def __init__(self, charm_option_name, ceph_option_name, - min_version=None): - self.charm_option_name = charm_option_name - self.ceph_option_name = ceph_option_name - self.min_version = min_version - - def is_supported(self) -> bool: - """Is the option supported on this unit""" - if self.min_version: - return self.minimum_supported(self.min_version) - return True - - def minimum_supported(self, supported_version: str) -> bool: - """Check if installed Ceph release is >= to supported_version""" - return ch_host.cmp_pkgrevno('ceph-common', supported_version) >= 0 - - def convert_option(self, value: Union[bool, str, int]) -> List[str]: - """Convert a value to the corresponding value part of the ceph - dashboard command""" - return [str(value)] - - def ceph_command(self, value: List[str]) -> List[str]: - """Shell command to set option to desired value""" - cmd = ['ceph', 'dashboard', self.ceph_option_name] - cmd.extend(self.convert_option(value)) - return cmd - - class DebugOption(CharmCephOption): - - def convert_option(self, value): - """Convert charm True/False to enable/disable""" - if value: - return ['enable'] - else: - return ['disable'] - - class MOTDOption(CharmCephOption): - - def convert_option(self, value): - """Split motd charm option into ['severity', 'time', 'message']""" - if value: - return value.split('|') - else: - return ['clear'] - - CHARM_TO_CEPH_OPTIONS = [ - DebugOption('debug', 'debug'), - CharmCephOption( - 'enable-password-policy', - 'set-pwd-policy-enabled'), - CharmCephOption( - 'password-policy-check-length', - 'set-pwd-policy-check-length-enabled'), - CharmCephOption( - 'password-policy-check-oldpwd', - 'set-pwd-policy-check-oldpwd-enabled'), - CharmCephOption( - 'password-policy-check-username', - 'set-pwd-policy-check-username-enabled'), - CharmCephOption( - 'password-policy-check-exclusion-list', - 'set-pwd-policy-check-exclusion-list-enabled'), - CharmCephOption( - 'password-policy-check-complexity', - 'set-pwd-policy-check-complexity-enabled'), - CharmCephOption( - 'password-policy-check-sequential-chars', - 'set-pwd-policy-check-sequential-chars-enabled'), - CharmCephOption( - 'password-policy-check-repetitive-chars', - 'set-pwd-policy-check-repetitive-chars-enabled'), - CharmCephOption( - 'password-policy-min-length', - 'set-pwd-policy-min-length'), - CharmCephOption( - 'password-policy-min-complexity', - 'set-pwd-policy-min-complexity'), - CharmCephOption( - 'audit-api-enabled', - 'set-audit-api-enabled'), - CharmCephOption( - 'audit-api-log-payload', - 'set-audit-api-log-payload'), - MOTDOption( - 'motd', - 'motd', - min_version='15.2.14') - ] + CHARM_TO_CEPH_OPTIONS = CharmCephOptionList().get() def __init__(self, *args) -> None: """Setup adapters and observers.""" super().__init__(*args) super().register_status_check(self.check_dashboard) self.framework.observe( - self.on.config_changed, - self._configure_dashboard) - self.mon = interface_dashboard.CephDashboardRequires( - self, - 'dashboard') - self.ca_client = ca_client.CAClient( - self, - 'certificates') + self.on.config_changed, self._configure_dashboard + ) + self.mon = interface_dashboard.CephDashboardRequires(self, "dashboard") self.radosgw_user = interface_radosgw_user.RadosGWUserRequires( - self, - 'radosgw-dashboard', - request_system_role=True) + self, "radosgw-dashboard", request_system_role=True + ) self.iscsi_user = admin_access.CephISCSIAdminAccessRequires( - self, - 'iscsi-dashboard') - self.framework.observe( - self.mon.on.mon_ready, - self._configure_dashboard) - self.framework.observe( - self.ca_client.on.ca_available, - self._configure_dashboard) + self, "iscsi-dashboard" + ) self.framework.observe( - self.ca_client.on.tls_server_config_ready, - self._configure_dashboard) + self.mon.on.mon_ready, self._configure_dashboard + ) self.framework.observe( - self.radosgw_user.on.gw_user_ready, - self._configure_dashboard) + self.radosgw_user.on.gw_user_ready, self._configure_dashboard + ) self.framework.observe( - self.iscsi_user.on.admin_access_ready, - self._configure_dashboard) + self.iscsi_user.on.admin_access_ready, self._configure_dashboard + ) self.framework.observe(self.on.add_user_action, self._add_user_action) self.framework.observe( - self.on.delete_user_action, - self._delete_user_action) + self.on.delete_user_action, self._delete_user_action + ) self.ingress = ops_lb_interface.OSLoadbalancerRequires( - self, - 'loadbalancer') - self.grafana_dashboard = \ - interface_grafana_dashboard.GrafanaDashboardProvides( - self, - 'grafana-dashboard') + self, "loadbalancer" + ) + self.grafana_dashboard = grafana_interface.GrafanaDashboardProvides( + self, "grafana-dashboard" + ) self.alertmanager = interface_http.HTTPRequires( - self, - 'alertmanager-service') - self.prometheus = interface_http.HTTPRequires( - self, - 'prometheus') + self, "alertmanager-service" + ) + self.prometheus = interface_http.HTTPRequires(self, "prometheus") + self.framework.observe( + self.grafana_dashboard.on.dash_ready, self._configure_dashboard + ) + self.framework.observe( + self.alertmanager.on.http_ready, self._configure_dashboard + ) + self.framework.observe( + self.prometheus.on.http_ready, self._configure_dashboard + ) self.framework.observe( - self.grafana_dashboard.on.dash_ready, - self._configure_dashboard) + self.ingress.on.lb_relation_ready, self._request_loadbalancer + ) self.framework.observe( - self.alertmanager.on.http_ready, - self._configure_dashboard) + self.ingress.on.lb_configured, self._configure_dashboard + ) + + # Certificates Relation + self.ca_client = ca_client.CAClient(self, "certificates") self.framework.observe( - self.prometheus.on.http_ready, - self._configure_dashboard) + self.ca_client.on.ca_available, self._request_certificates + ) self.framework.observe( - self.ingress.on.lb_relation_ready, - self._request_loadbalancer) + self.ca_client.on.tls_server_config_ready, + self._enable_ssl_from_relation + ) self.framework.observe( - self.ingress.on.lb_configured, - self._configure_dashboard) + self.on["certificates"].relation_departed, + self._certificates_relation_departed, + ) + + # Charm Custom Events + self.framework.observe(self.on.disable_ssl, self._clean_ssl_conf) + self.framework.observe( + self.on.enable_ssl_from_config, self._enable_ssl_from_config + ) + self._stored.set_default(is_started=False) - def _request_loadbalancer(self, _) -> None: + def _request_loadbalancer(self, _event) -> None: """Send request to create loadbalancer""" self.ingress.request_loadbalancer( self.LB_SERVICE_NAME, self.TLS_PORT, self.TLS_PORT, self._get_bind_ip(), - 'http') + 'http', + ) def _register_dashboards(self) -> None: """Register all dashboards with grafana""" + if not self.unit.is_leader(): + return # Do nothing on non leader units. + for dash_file in self.DASH_DIR.glob("*.json"): self.grafana_dashboard.register_dashboard( dash_file.stem, json.loads(dash_file.read_text())) - logging.info( + logging.debug( "register_grafana_dashboard: {}".format(dash_file)) def _update_legacy_radosgw_creds(self, access_key: str, @@ -285,11 +227,10 @@ def _manage_radosgw(self) -> None: creds[0]['access_key'], creds[0]['secret_key']) - def request_certificates(self) -> None: + def _request_certificates(self, event) -> None: """Request TLS certificates.""" if not self.ca_client.is_joined: - logging.debug( - "Cannot request certificates, relation not present.") + logging.debug("Cannot request certificates, relation not present.") return addresses = set() if self.ingress.relations: @@ -302,6 +243,7 @@ def request_certificates(self) -> None: logging.debug( ("Defering certificate request until loadbalancer has " "responded.")) + event.defer() return for binding_name in ['public']: binding = self.model.get_binding(binding_name) @@ -358,45 +300,32 @@ def check_dashboard(self) -> StatusBase: 'charm config')), (self._check_grafana_config, 'Charm config option grafana-api-url ' 'not set'), - (self._check_dashboard_responding, 'Dashboard not responding')] + (self._check_dashboard_responding, 'Dashboard not responding') + ] for check_f, msg in checks: if not check_f(): return BlockedStatus(msg) - return ActiveStatus() + + # Check if both relation based and config based certs are supplied. + return self._status_check_conflicting_ssl_sources() def kick_dashboard(self) -> None: """Disable and re-enable dashboard""" ceph_utils.mgr_disable_dashboard() ceph_utils.mgr_enable_dashboard() - def _run_cmd(self, cmd: List[str]) -> str: - """Run command in subprocess - - `cmd` The command to run - """ - try: - output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - return output.decode('UTF-8') - except subprocess.CalledProcessError as exc: - logging.exception("Command failed: {}".format(exc.output)) - - def _apply_setting(self, ceph_setting: str, value: List[str]) -> str: - """Apply a dashboard setting""" - cmd = ['ceph', 'dashboard', ceph_setting] - cmd.extend(value) - return self._run_cmd(cmd) - - def _apply_file_setting(self, ceph_setting: str, - file_contents: str, - extra_args: List[str] = None) -> None: + def _apply_file_setting( + self, ceph_setting: str, file_contents: str, + extra_args: List[str] = None + ) -> None: """Apply a setting via a file""" - with tempfile.NamedTemporaryFile(mode='w', delete=True) as _file: + with tempfile.NamedTemporaryFile(mode="w", delete=True) as _file: _file.write(file_contents) _file.flush() - settings = ['-i', _file.name] + settings = ["-i", _file.name] if extra_args: settings.extend(extra_args) - self._apply_setting(ceph_setting, settings) + cmds.apply_setting(ceph_setting, settings) def _apply_ceph_config_from_charm_config(self) -> None: """Read charm config and apply settings to dashboard config""" @@ -409,18 +338,44 @@ def _apply_ceph_config_from_charm_config(self) -> None: option.charm_option_name)) continue if option.is_supported(): - self._run_cmd(option.ceph_command(value)) + cmds.exec_option_ceph_cmd(option, value) else: logging.warning( "Skipping charm option {}, not supported".format( option.charm_option_name)) + def _configure_service_apis(self) -> None: + """Configure related service APIs in ceph dashboard""" + if self.unit.is_leader(): + grafana_ep = self.config.get("grafana-api-url") + if grafana_ep: + cmds.dashboard_set("set-grafana-api-url", grafana_ep) + + alertmanager_conn = self.alertmanager.get_service_ep_data() + if alertmanager_conn: + cmds.dashboard_set( + "set-alertmanager-api-host", + "http://{}:{}".format( + alertmanager_conn["hostname"], + alertmanager_conn["port"] + ), + ) + + prometheus_conn = self.prometheus.get_service_ep_data() + if prometheus_conn: + cmds.dashboard_set( + "set-prometheus-api-host", + "http://{}:{}".format( + prometheus_conn["hostname"], prometheus_conn["port"] + ), + ) + def _configure_dashboard(self, event) -> None: """Configure dashboard""" - self.request_certificates() if not self.mon.mons_ready: logging.info("Not configuring dashboard, mons not ready") return + if not ceph_utils.is_dashboard_enabled(): if self.unit.is_leader(): ceph_utils.mgr_enable_dashboard() @@ -428,34 +383,34 @@ def _configure_dashboard(self, event) -> None: logging.info("Dashboard not enabled, deferring event.") return - self._apply_ceph_config_from_charm_config() - self._configure_tls() - ceph_utils.mgr_config_set( - 'mgr/dashboard/{hostname}/server_addr'.format( - hostname=socket.gethostname()), - str(self._get_bind_ip())) if self.unit.is_leader(): - grafana_ep = self.config.get('grafana-api-url') - if grafana_ep: - self._run_cmd([ - 'ceph', 'dashboard', 'set-grafana-api-url', grafana_ep]) - alertmanager_conn = self.alertmanager.get_service_ep_data() - if alertmanager_conn: - alertmanager_ep = 'http://{}:{}'.format( - alertmanager_conn['hostname'], - alertmanager_conn['port']) - self._run_cmd([ - 'ceph', 'dashboard', 'set-alertmanager-api-host', - alertmanager_ep]) - prometheus_conn = self.prometheus.get_service_ep_data() - if prometheus_conn: - prometheus_ep = 'http://{}:{}'.format( - prometheus_conn['hostname'], - prometheus_conn['port']) - self._run_cmd([ - 'ceph', 'dashboard', 'set-prometheus-api-host', - prometheus_ep]) - self._configure_saml() + # If charm config ssl is present. + if self._is_charm_ssl_from_config(): + if not cmds.check_ceph_dashboard_ssl_configured(): + # Configure SSL using charm config. + self.on.enable_ssl_from_config.emit() + else: # charm config is not present. + # Since certificates relation can provide unique certs to each + # unit, the below check should only be performed on leader as + # the central key/cert pair matches leader unit. + key, cert, _ = self._get_tls_from_relation() + if not self.is_ceph_dashboard_ssl_key_cert_same(key, cert): + # clean SSL if not configured using relation + self.on.disable_ssl.emit() + # apply charm config + self._apply_ceph_config_from_charm_config() + + self._configure_saml() + + ceph_utils.mgr_config_set( + "mgr/dashboard/{hostname}/server_addr".format( + hostname=socket.gethostname() + ), + str(self._get_bind_ip()), + ) + + # configure grafana, prometheus and alertmanager API endpoints + self._configure_service_apis() self._register_dashboards() self._manage_radosgw() @@ -468,6 +423,64 @@ def _get_bind_ip(self) -> str: binding = self.model.get_binding('public') return str(binding.network.ingress_address) + def _clean_ssl_conf(self, _event) -> None: + """Clean ssl conf for ceph-dashboard.""" + + # NOTE: Clearing up of SSL key/cert is done centrally so that it can + # be performed with consistency for all units at once. + if self.unit.is_leader(): + # Disable ssl + cmds.ceph_config_set("config/mgr/mgr/dashboard/ssl", "false") + + config_keys = cmds.ceph_config_list() + for config in config_keys: + # clear all certificates. + if re.match("mgr/dashboard.*/crt", config): + cmds.ceph_config_reset(config) + # clear all keys. + if re.match("mgr/dashboard.*/key", config): + cmds.ceph_config_reset(config) + + def is_ceph_dashboard_ssl_key_cert_same( + self, key: str, cert: str, check_host: bool = False + ) -> Union[bool, None]: + """Checks if provided ssl key/cert match with configured key/cert. + + Since this method can result in falsy values even if the provided pair + is empty (None). It is advised to use this method for falsy checks + carefully. + + :returns: None if ssl is not configured or provided key/cert are empty. + """ + if not cmds.check_ceph_dashboard_ssl_configured(): + # Ceph Dashboard SSL not configured. + return None + + # Provided key/crt from param + if key is None or cert is None: + logger.debug("Empty key/cert pair : \n" + "Key %s, \nCerts: %s", (key is None), (cert is None)) + return None + + # Decode to ascii strings if bytes. + if isinstance(key, bytes): + key = key.decode() + if isinstance(cert, bytes): + cert = cert.decode() + + # Configured key/crt from ceph-dashboard + if not check_host: + ssl_key = cmds.get_ceph_dashboard_ssl_key() + ssl_crt = cmds.get_ceph_dashboard_ssl_crt() + else: + ssl_key = cmds.get_ceph_dashboard_host_ssl_key() + ssl_crt = cmds.get_ceph_dashboard_host_ssl_crt() + + if ssl_key == key and ssl_crt == cert: + return True + else: + return False + def _get_tls_from_config(self) -> TLS_Config: """Extract TLS config from charm config.""" raw_key = self.config.get("ssl_key") @@ -475,6 +488,7 @@ def _get_tls_from_config(self) -> TLS_Config: raw_ca_cert = self.config.get("ssl_ca") if not (raw_key and raw_key): return None, None, None + key = base64.b64decode(raw_key) cert = base64.b64decode(raw_cert) if raw_ca_cert: @@ -483,8 +497,18 @@ def _get_tls_from_config(self) -> TLS_Config: ca_cert = None return key, cert, ca_cert + def _is_relation_active(self, relation_name: str) -> bool: + """Check if any instance of the relation is present.""" + return any( + relation.id for relation in self.model.relations[relation_name] + ) + def _get_tls_from_relation(self) -> TLS_Config: - """Extract TLS config from certificatees relation.""" + """Extract TLS config from certificates relation.""" + # If 'certificates' relation is not present return None. + if not self._is_relation_active('certificates'): + return None, None, None + if not self.ca_client.is_server_cert_ready: return None, None, None key = self.ca_client.server_key.private_bytes( @@ -507,8 +531,8 @@ def _get_tls_from_relation(self) -> TLS_Config: root_ca_chain = bytes() ca_cert = ( self.ca_client.ca_certificate.public_bytes( - encoding=serialization.Encoding.PEM) + - root_ca_chain) + encoding=serialization.Encoding.PEM + ) + root_ca_chain) return key, cert, ca_cert def _update_iscsigw_creds(self, creds): @@ -533,20 +557,22 @@ def _manage_iscsigw(self) -> None: for c in creds: self._update_iscsigw_creds(c) - def _configure_tls(self) -> None: - """Configure TLS.""" - logging.debug("Attempting to collect TLS config from relation") - key, cert, ca_cert = self._get_tls_from_relation() - ca_cert_path = self.TLS_VAULT_CA_CERT_PATH - if not (key and cert): - logging.debug("Attempting to collect TLS config from charm " - "config") - key, cert, ca_cert = self._get_tls_from_config() - ca_cert_path = self.TLS_CHARM_CA_CERT_PATH - if not (key and cert): - logging.warn( - "Not configuring TLS, not all data present") - return + def _certificates_relation_departed(self, event) -> None: + """Certificates relation departed handle""" + if self.unit.is_leader(): + # Clear SSL if not configured using charm config. + # NOTE: Since certificates relation has departed, check has to be + # done using the charm config key/certs. + key, cert, _ = self._get_tls_from_config() + if not self.is_ceph_dashboard_ssl_key_cert_same(key, cert): + self._clean_ssl_conf(event) + + # Possible handover to charm-config SSL. + if self._is_charm_ssl_from_config(): + self.on.enable_ssl_from_config.emit() + + def _configure_tls(self, key, cert, ca_cert, ca_cert_path) -> None: + """Configure TLS using provided credentials""" self.TLS_KEY_PATH.write_bytes(key) self.TLS_CERT_PATH.write_bytes(cert) if ca_cert: @@ -576,21 +602,20 @@ def _configure_tls(self) -> None: self.kick_dashboard() def _configure_saml(self) -> None: - base_url = self.config.get('saml-base-url') - idp_metadata = self.config.get('saml-idp-metadata') - if not base_url or not idp_metadata: + if not self.unit.is_leader(): + logger.debug("Unit not leader, skipping saml config") return - cmd = ['ceph', 'dashboard', 'sso', 'setup', 'saml2', - base_url, idp_metadata] + base_url = self.config.get('saml-base-url') + idp_metadata = self.config.get('saml-idp-metadata') username_attr = self.config.get('saml-username-attribute') - if username_attr: - cmd.append(username_attr) idp_entity_id = self.config.get('saml-idp-entity-id') - if idp_entity_id: - cmd.append(idp_entity_id) + if not base_url or not idp_metadata: + return - self._run_cmd(cmd) + cmds.ceph_dashboard_config_saml( + base_url, idp_metadata, username_attr, idp_entity_id + ) def _gen_user_password(self, length: int = 12) -> str: """Generate a password""" @@ -607,12 +632,10 @@ def _add_user_action(self, event: ActionEvent) -> None: event.fail("Config missing") else: password = self._gen_user_password() - with tempfile.NamedTemporaryFile(mode='w', delete=True) as fp: + with tempfile.NamedTemporaryFile(mode="w", delete=True) as fp: fp.write(password) fp.flush() - cmd_out = subprocess.check_output([ - 'ceph', 'dashboard', 'ac-user-create', '--enabled', - '-i', fp.name, username, role]).decode('UTF-8') + cmd_out = cmds.ceph_dashboard_add_user(username, fp.name, role) if re.match('User.*already exists', cmd_out): event.fail("User already exists") else: @@ -622,11 +645,111 @@ def _delete_user_action(self, event: ActionEvent) -> None: """Delete a user""" username = event.params["username"] try: - self._run_cmd(['ceph', 'dashboard', 'ac-user-delete', username]) + cmds.ceph_dashboard_delete_user(username) event.set_results({"message": "User {} deleted".format(username)}) except subprocess.CalledProcessError as exc: event.fail(exc.output) + def _is_charm_ssl_from_relation(self) -> bool: + """Check if ssl cert/key are provided by certificates relation.""" + key, cert, _ = self._get_tls_from_relation() + # True if both key and cert are present false otherwise. + return key and cert + + def _is_charm_ssl_from_config(self) -> bool: + """Check if ssl cert/key are configured in charm config.""" + key, cert, _ = self._get_tls_from_config() + # True if both key and cert are present false otherwise. + return key and cert + + def _is_charm_ssl_multiple_sources(self) -> bool: + """Check if SSL key/cert are available from multiple sources.""" + return self._is_charm_ssl_from_config() \ + and self._is_charm_ssl_from_relation() + + def _status_check_conflicting_ssl_sources(self): + """Generate status check message for multiple ssl key/cert scenario.""" + # If conflicting SSL source is not present + if not self._is_charm_ssl_multiple_sources(): + return ActiveStatus() + + # If both are waiting. + if not cmds.check_ceph_dashboard_ssl_configured(): + return BlockedStatus( + "Conflict: SSL configuration available from 'certificates' " + "relation and Charm config, refusing to guess. " + "Remove conflicting source to proceed." + ) + + key, cert, _ = self._get_tls_from_config() + if self.is_ceph_dashboard_ssl_key_cert_same(key, cert): + # SSL currently configured from charm config. + return BlockedStatus( + "Conflict: Active SSL from Charm config, 'certificates' " + "relation is ignored. Remove conflicting source to proceed." + ) + + key, cert, _ = self._get_tls_from_relation() + # 'Certificates' relation provides unique key/cert to each host. + # Hence cert check is performed for host. + if self.is_ceph_dashboard_ssl_key_cert_same( + key, cert, check_host=True + ): + # SSL currently configured from relation. + return BlockedStatus( + "Conflict: Active SSL from 'certificates' relation, Charm " + "config is ignored. Remove conflicting source to proceed." + ) + + return BlockedStatus("Unknown SSL source.") + + def _configure_tls_from_charm_config(self) -> None: + """Configure TLS using charm config values.""" + logging.debug("Attempting to collect TLS config from charm config") + key, cert, ca_cert = self._get_tls_from_config() + if not (key and cert): + logging.error("Not configuring, not all config data present") + return + + # Configure TLS + self._configure_tls(key, cert, ca_cert, self.TLS_CHARM_CA_CERT_PATH) + + def _configure_tls_from_relation(self) -> None: + """Configure TLS from certificates relation""" + logging.debug("Attempting to collect TLS config from relation") + key, cert, ca_cert = self._get_tls_from_relation() + if not (key and cert): + logging.error("Not configuring TLS, not all relation data present") + return + + # Configure TLS + self._configure_tls(key, cert, ca_cert, self.TLS_VAULT_CA_CERT_PATH) + + # Custom SSL Event Handles + def _enable_ssl_from_config(self, _event) -> None: + """Configure Ceph Dashboard SSL with available key/cert from charm.""" + if all([ + cmds.check_ceph_dashboard_ssl_configured(), + cmds.check_ceph_dashboard_ssl_configured(is_check_host_key=True) + ]): + # SSL is already configured for both central and host key/cert. + return + + self._configure_tls_from_charm_config() + + # Certificates relation handle. + def _enable_ssl_from_relation(self, event) -> None: + """Configure Ceph Dashboard SSL using key/cert from relation.""" + if cmds.check_ceph_dashboard_ssl_configured(): + key, cert, _ = self._get_tls_from_config() + if self.is_ceph_dashboard_ssl_key_cert_same(key, cert): + # Charm relation event deferred until conflicting charm config + # ssl is removed. Operator is informed through unit status. + event.defer() + return # SSL is already configured. + + self._configure_tls_from_relation() + if __name__ == "__main__": main(CephDashboardCharm) diff --git a/ceph-dashboard/src/charm_option.py b/ceph-dashboard/src/charm_option.py new file mode 100644 index 00000000..08f72af7 --- /dev/null +++ b/ceph-dashboard/src/charm_option.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 +# Copyright 2023 Canonical +# See LICENSE file for licensing details. +# +# Learn more at: https://juju.is/docs/sdk + +import charmhelpers.core.host as ch_host +from typing import List, Union + + +class CharmCephOption(): + """Manage a charm option to ceph command to manage that option""" + + def __init__( + self, charm_option_name, ceph_option_name, min_version=None + ): + self.charm_option_name = charm_option_name + self.ceph_option_name = ceph_option_name + self.min_version = min_version + + def is_supported(self) -> bool: + """Is the option supported on this unit""" + if self.min_version: + return self.minimum_supported(self.min_version) + return True + + def minimum_supported(self, supported_version: str) -> bool: + """Check if installed Ceph release is >= to supported_version""" + return ch_host.cmp_pkgrevno('ceph-common', supported_version) >= 0 + + def convert_option(self, value: Union[bool, str, int]) -> List[str]: + """Convert a value to the corresponding value part of the ceph + dashboard command""" + return [str(value)] + + def ceph_command(self, value: List[str]) -> List[str]: + """Shell command to set option to desired value""" + cmd = ['ceph', 'dashboard', self.ceph_option_name] + cmd.extend(self.convert_option(value)) + return cmd + + +class DebugOption(CharmCephOption): + + def convert_option(self, value): + """Convert charm True/False to enable/disable""" + if value: + return ['enable'] + else: + return ['disable'] + + +class MOTDOption(CharmCephOption): + + def convert_option(self, value): + """Split motd charm option into ['severity', 'time', 'message']""" + if value: + return value.split('|') + else: + return ['clear'] + + +class CharmCephOptionList(): + def get(self) -> List: + """Get Charm options list""" + return [ + DebugOption('debug', 'debug'), + CharmCephOption( + 'enable-password-policy', + 'set-pwd-policy-enabled'), + CharmCephOption( + 'password-policy-check-length', + 'set-pwd-policy-check-length-enabled'), + CharmCephOption( + 'password-policy-check-oldpwd', + 'set-pwd-policy-check-oldpwd-enabled'), + CharmCephOption( + 'password-policy-check-username', + 'set-pwd-policy-check-username-enabled'), + CharmCephOption( + 'password-policy-check-exclusion-list', + 'set-pwd-policy-check-exclusion-list-enabled'), + CharmCephOption( + 'password-policy-check-complexity', + 'set-pwd-policy-check-complexity-enabled'), + CharmCephOption( + 'password-policy-check-sequential-chars', + 'set-pwd-policy-check-sequential-chars-enabled'), + CharmCephOption( + 'password-policy-check-repetitive-chars', + 'set-pwd-policy-check-repetitive-chars-enabled'), + CharmCephOption( + 'password-policy-min-length', + 'set-pwd-policy-min-length'), + CharmCephOption( + 'password-policy-min-complexity', + 'set-pwd-policy-min-complexity'), + CharmCephOption( + 'audit-api-enabled', + 'set-audit-api-enabled'), + CharmCephOption( + 'audit-api-log-payload', + 'set-audit-api-log-payload'), + MOTDOption( + 'motd', + 'motd', + min_version='15.2.14') + ] diff --git a/ceph-dashboard/tests/bundles/focal-yoga.yaml b/ceph-dashboard/tests/bundles/focal-yoga.yaml index 4fea6771..7d7d1bf8 100644 --- a/ceph-dashboard/tests/bundles/focal-yoga.yaml +++ b/ceph-dashboard/tests/bundles/focal-yoga.yaml @@ -22,7 +22,7 @@ applications: vault: num_units: 1 charm: ch:vault - channel: latest/edge + channel: 1.8/stable mysql-innodb-cluster: charm: ch:mysql-innodb-cluster constraints: mem=3072M diff --git a/ceph-dashboard/tests/bundles/focal.yaml b/ceph-dashboard/tests/bundles/focal.yaml index 3c3cde65..fedc7011 100644 --- a/ceph-dashboard/tests/bundles/focal.yaml +++ b/ceph-dashboard/tests/bundles/focal.yaml @@ -18,7 +18,7 @@ applications: vault: num_units: 1 charm: ch:vault - channel: latest/edge + channel: 1.8/stable mysql-innodb-cluster: charm: ch:mysql-innodb-cluster constraints: mem=3072M diff --git a/ceph-dashboard/tests/bundles/jammy-antelope.yaml b/ceph-dashboard/tests/bundles/jammy-antelope.yaml index b8def797..4263cbe3 100644 --- a/ceph-dashboard/tests/bundles/jammy-antelope.yaml +++ b/ceph-dashboard/tests/bundles/jammy-antelope.yaml @@ -20,7 +20,7 @@ applications: vault: num_units: 1 charm: ch:vault - channel: latest/edge + channel: 1.8/stable mysql-innodb-cluster: charm: ch:mysql-innodb-cluster constraints: mem=3072M diff --git a/ceph-dashboard/tests/bundles/jammy-bobcat.yaml b/ceph-dashboard/tests/bundles/jammy-bobcat.yaml index 42462d15..62c023f7 100644 --- a/ceph-dashboard/tests/bundles/jammy-bobcat.yaml +++ b/ceph-dashboard/tests/bundles/jammy-bobcat.yaml @@ -20,7 +20,7 @@ applications: vault: num_units: 1 charm: ch:vault - channel: latest/edge + channel: 1.8/stable mysql-innodb-cluster: charm: ch:mysql-innodb-cluster constraints: mem=3072M diff --git a/ceph-dashboard/tests/bundles/jammy-yoga.yaml b/ceph-dashboard/tests/bundles/jammy-yoga.yaml index dd81eb90..5a38b13c 100644 --- a/ceph-dashboard/tests/bundles/jammy-yoga.yaml +++ b/ceph-dashboard/tests/bundles/jammy-yoga.yaml @@ -23,7 +23,7 @@ applications: vault: num_units: 1 charm: ch:vault - channel: latest/edge + channel: 1.8/stable mysql-innodb-cluster: charm: ch:mysql-innodb-cluster constraints: mem=3072M diff --git a/ceph-dashboard/tests/bundles/lunar-antelope.yaml b/ceph-dashboard/tests/bundles/lunar-antelope.yaml index fcfa66d3..160e6833 100644 --- a/ceph-dashboard/tests/bundles/lunar-antelope.yaml +++ b/ceph-dashboard/tests/bundles/lunar-antelope.yaml @@ -22,7 +22,8 @@ applications: vault: num_units: 1 charm: ch:vault - channel: latest/edge + channel: 1.8/stable + series: jammy mysql-innodb-cluster: charm: ch:mysql-innodb-cluster constraints: mem=3072M @@ -31,6 +32,7 @@ applications: vault-mysql-router: charm: ch:mysql-router channel: latest/edge + series: jammy ceph-dashboard: charm: ../../ceph-dashboard.charm options: diff --git a/ceph-dashboard/tests/bundles/mantic-bobcat.yaml b/ceph-dashboard/tests/bundles/mantic-bobcat.yaml index 2f4d4f97..bed13e0b 100644 --- a/ceph-dashboard/tests/bundles/mantic-bobcat.yaml +++ b/ceph-dashboard/tests/bundles/mantic-bobcat.yaml @@ -22,7 +22,7 @@ applications: vault: num_units: 1 charm: ch:vault - channel: latest/edge + channel: 1.8/stable mysql-innodb-cluster: charm: ch:mysql-innodb-cluster constraints: mem=3072M diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py index 96c749c4..904b861e 100644 --- a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -151,6 +151,12 @@ class _CephDashboardCharm(charm.CephDashboardCharm): def _get_bind_ip(self): return '10.0.0.10' + def _clean_ssl_conf(self, _event): + return # empty stub + + def _is_relation_active(self, _event): + return True + class TestCephDashboardCharmBase(CharmTestCase): @@ -158,7 +164,7 @@ class TestCephDashboardCharmBase(CharmTestCase): 'ceph_utils', 'ch_host', 'socket', - 'subprocess', + 'subprocess', # charm's subprocess import 'ch_host', ] @@ -207,9 +213,11 @@ def test_init(self): self.harness.begin() self.assertFalse(self.harness.charm._stored.is_started) - def test_charm_config(self): + @patch('ceph_dashboard_commands.subprocess') + @patch('charm_option.ch_host') + def test_charm_config(self, option_ch_host, subprocess): self.ceph_utils.is_dashboard_enabled.return_value = True - self.ch_host.cmp_pkgrevno.return_value = 0 + option_ch_host.cmp_pkgrevno.return_value = 0 basic_boolean = [ ('enable-password-policy', 'set-pwd-policy-enabled'), ('password-policy-check-length', @@ -254,13 +262,16 @@ def test_charm_config(self): { 'mon-ready': 'True'}) _harness.begin() + _harness.set_leader(True) + _harness.charm.is_ceph_dashboard_ssl_key_cert_same \ + = lambda *_: True expected_cmd = base_cmd + expected_options - self.subprocess.check_output.reset_mock() + subprocess.check_output.reset_mock() _harness.update_config( key_values={charm_option: charm_value}) - self.subprocess.check_output.assert_called_once_with( + subprocess.check_output.assert_called_once_with( expected_cmd, - stderr=self.subprocess.STDOUT) + stderr=subprocess.STDOUT) def test__on_ca_available(self): rel_id = self.harness.add_relation('certificates', 'vault') @@ -332,7 +343,8 @@ def test_kick_dashboard(self): self.ceph_utils.mgr_disable_dashboard.assert_called_once_with() self.ceph_utils.mgr_enable_dashboard.assert_called_once_with() - def test__configure_dashboard(self): + @patch('ceph_dashboard_commands.subprocess') + def test_configure_dashboard(self, subprocess): self.ceph_utils.is_dashboard_enabled.return_value = True rel_id = self.harness.add_relation('dashboard', 'ceph-mon') self.harness.begin() @@ -377,9 +389,11 @@ def test__get_bind_ip(self): self.harness.charm._get_bind_ip(), '10.0.0.10') + @patch('ceph_dashboard_commands.check_ceph_dashboard_ssl_configured') @patch('socket.gethostname') - def test_certificates_relation(self, _gethostname): + def test_certificates_relation(self, _gethostname, ssl_configured): self.ceph_utils.is_dashboard_enabled.return_value = True + ssl_configured.return_value = False mock_TLS_KEY_PATH = MagicMock() mock_TLS_CERT_PATH = MagicMock() mock_TLS_VAULT_CA_CERT_PATH = MagicMock() @@ -434,6 +448,8 @@ def test_certificates_relation(self, _gethostname): 'ip': ['10.10.0.101'], 'port': 8443, 'protocol': 'http'}}})}) + # Reemit deferred events. + self.harness.framework.reemit() self.assertNotEqual( self.harness.get_relation_data( cert_rel_id, @@ -464,8 +480,10 @@ def test_certificates_relation(self, _gethostname): self.ceph_utils.mgr_disable_dashboard.assert_called_once_with() self.ceph_utils.mgr_enable_dashboard.assert_called_once_with() - def test_certificates_from_config(self): + @patch('ceph_dashboard_commands.check_ceph_dashboard_ssl_configured') + def test_certificates_from_config(self, ssl_configured): self.ceph_utils.is_dashboard_enabled.return_value = True + ssl_configured.return_value = False mock_TLS_KEY_PATH = MagicMock() mock_TLS_CERT_PATH = MagicMock() mock_TLS_CHARM_CA_CERT_PATH = MagicMock() @@ -503,7 +521,8 @@ def test_certificates_from_config(self): self.ceph_utils.mgr_disable_dashboard.assert_called_once_with() self.ceph_utils.mgr_enable_dashboard.assert_called_once_with() - def test_rados_gateway(self): + @patch('ceph_dashboard_commands.subprocess') + def test_rados_gateway(self, subprocess): self.ceph_utils.is_dashboard_enabled.return_value = True self.ch_host.cmp_pkgrevno.return_value = 1 mon_rel_id = self.harness.add_relation('dashboard', 'ceph-mon') @@ -541,16 +560,17 @@ def test_rados_gateway(self): 'access-key': 'XNUZVPL364U0BL1OXWJZ', 'secret-key': 'SgBo115xJcW90nkQ5EaNQ6fPeyeUUT0GxhwQbLFo', 'uid': 'radosgw-user-9'}) - self.subprocess.check_output.assert_has_calls([ + subprocess.check_output.assert_has_calls([ call(['ceph', 'dashboard', 'set-rgw-api-access-key', '-i', ANY], - stderr=self.subprocess.STDOUT), + stderr=subprocess.STDOUT), call().decode('UTF-8'), call(['ceph', 'dashboard', 'set-rgw-api-secret-key', '-i', ANY], - stderr=self.subprocess.STDOUT), + stderr=subprocess.STDOUT), call().decode('UTF-8'), ]) - def test_rados_gateway_multi_relations_pacific(self): + @patch('ceph_dashboard_commands.subprocess') + def test_rados_gateway_multi_relations_pacific(self, subprocess): self.ceph_utils.is_dashboard_enabled.return_value = True self.ch_host.cmp_pkgrevno.return_value = 1 rel_id1 = self.harness.add_relation('radosgw-dashboard', 'ceph-eu') @@ -589,7 +609,7 @@ def test_rados_gateway_multi_relations_pacific(self): 'access-key': 'XNUZVPL364U0BL1OXWJZ', 'secret-key': 'SgBo115xJcW90nkQ5EaNQ6fPeyeUUT0GxhwQbLFo', 'uid': 'radosgw-user-9'}) - self.subprocess.check_output.reset_mock() + subprocess.check_output.reset_mock() self.harness.update_relation_data( rel_id2, 'ceph-us', @@ -597,16 +617,17 @@ def test_rados_gateway_multi_relations_pacific(self): 'access-key': 'JGHKJGDKJGJGJHGYYYYM', 'secret-key': 'iljkdfhHKHKd88LKxNLSKDiijfjfjfldjfjlf44', 'uid': 'radosgw-user-10'}) - self.subprocess.check_output.assert_has_calls([ + subprocess.check_output.assert_has_calls([ call(['ceph', 'dashboard', 'set-rgw-api-access-key', '-i', ANY], - stderr=self.subprocess.STDOUT), + stderr=subprocess.STDOUT), call().decode('UTF-8'), call(['ceph', 'dashboard', 'set-rgw-api-secret-key', '-i', ANY], - stderr=self.subprocess.STDOUT), + stderr=subprocess.STDOUT), call().decode('UTF-8'), ]) - def test_rados_gateway_multi_relations_octopus(self): + @patch('ceph_dashboard_commands.subprocess') + def test_rados_gateway_multi_relations_octopus(self, subprocess): self.ch_host.cmp_pkgrevno.return_value = -1 rel_id1 = self.harness.add_relation('radosgw-dashboard', 'ceph-eu') rel_id2 = self.harness.add_relation('radosgw-dashboard', 'ceph-us') @@ -635,7 +656,7 @@ def test_rados_gateway_multi_relations_octopus(self): 'access-key': 'XNUZVPL364U0BL1OXWJZ', 'secret-key': 'SgBo115xJcW90nkQ5EaNQ6fPeyeUUT0GxhwQbLFo', 'uid': 'radosgw-user-9'}) - self.subprocess.check_output.reset_mock() + subprocess.check_output.reset_mock() self.harness.update_relation_data( rel_id2, 'ceph-us', @@ -643,7 +664,7 @@ def test_rados_gateway_multi_relations_octopus(self): 'access-key': 'JGHKJGDKJGJGJHGYYYYM', 'secret-key': 'iljkdfhHKHKd88LKxNLSKDiijfjfjfldjfjlf44', 'uid': 'radosgw-user-10'}) - self.assertFalse(self.subprocess.check_output.called) + self.assertFalse(subprocess.check_output.called) @patch.object(charm.secrets, 'choice') def test__gen_user_password(self, _choice): @@ -653,10 +674,11 @@ def test__gen_user_password(self, _choice): self.harness.charm._gen_user_password(), 'rrrrrrrrrrrr') + @patch('ceph_dashboard_commands.subprocess') @patch.object(charm.tempfile, 'NamedTemporaryFile') @patch.object(charm.secrets, 'choice') - def test__add_user_action(self, _choice, _NTFile): - self.subprocess.check_output.return_value = b'' + def test_add_user_action(self, _choice, _NTFile, subprocess): + subprocess.check_output.return_value = b'Byte String' _NTFile.return_value.__enter__.return_value.name = 'tempfilename' _choice.return_value = 'r' self.harness.begin() @@ -665,27 +687,31 @@ def test__add_user_action(self, _choice, _NTFile): 'username': 'auser', 'role': 'administrator'} self.harness.charm._add_user_action(action_event) - self.subprocess.check_output.assert_called_once_with( - ['ceph', 'dashboard', 'ac-user-create', '--enabled', - '-i', 'tempfilename', 'auser', 'administrator']) + subprocess.check_output.assert_called_once_with( + ['ceph', 'dashboard', 'ac-user-create', '--enabled', '-i', + 'tempfilename', 'auser', 'administrator'], + stderr=subprocess.STDOUT + ) - def test__delete_user_action(self): - self.subprocess.check_output.return_value = b'' + @patch('ceph_dashboard_commands.subprocess') + def test__delete_user_action(self, subprocess): + subprocess.check_output.return_value = b'' self.harness.begin() action_event = MagicMock() action_event.params = { 'username': 'auser'} self.harness.charm._delete_user_action(action_event) - self.subprocess.check_output.assert_called_once_with( + subprocess.check_output.assert_called_once_with( ['ceph', 'dashboard', 'ac-user-delete', 'auser'], - stderr=self.subprocess.STDOUT) + stderr=subprocess.STDOUT) - def test_saml(self): - self.subprocess.check_output.return_value = b'' + @patch('ceph_dashboard_commands.subprocess') + def test_saml(self, subprocess): + subprocess.check_output.return_value = b'' self.harness.begin() self.harness.charm.PACKAGES.append('python3-onelogin-saml2') self.harness.charm._configure_saml() - self.subprocess.check_output.assert_not_called() + subprocess.check_output.assert_not_called() base_url = 'https://saml-base' idp_meta = 'file://idp.xml' @@ -701,8 +727,9 @@ def test_saml(self): } ) + self.harness.set_leader() self.harness.charm._configure_saml() - self.subprocess.check_output.assert_called_with( + subprocess.check_output.assert_called_with( ['ceph', 'dashboard', 'sso', 'setup', 'saml2', base_url, idp_meta, username_attr, entity_id], stderr=ANY From 8486876855ac2593bc2b45e17ff4e4dabd7f0403 Mon Sep 17 00:00:00 2001 From: Alex Kavanagh Date: Fri, 4 Aug 2023 17:16:42 +0100 Subject: [PATCH 2540/2699] Ensure get_requests_for_local_unit doesn't fail on incomplete relation This is a rebuild/make sync for charms to pickup the fix in charmhelpers to fix any inadvertant accesses of ['ca'] in the relation data before it is available from vault in the certificates relation. Fix in charmhelpers is in [1]. [1] https://github.com/juju/charm-helpers/pull/824 Closes-Bug: #2028683 Change-Id: Ie05a9ff536700282dc0c66816b50efee5da62767 --- ceph-radosgw/hooks/charmhelpers/core/unitdata.py | 3 ++- ceph-radosgw/hooks/charmhelpers/fetch/snap.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py index dac757f1..65153f1f 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/unitdata.py +++ b/ceph-radosgw/hooks/charmhelpers/core/unitdata.py @@ -529,7 +529,8 @@ def kv(): env_var = os.environ.get("CHARM_HELPERS_TESTMODE", "auto").lower() if env_var not in ["auto", "no", "yes"]: - logging.warning(f"Unknown value for CHARM_HELPERS_TESTMODE '{env_var}', assuming 'no'") + logging.warning("Unknown value for CHARM_HELPERS_TESTMODE '%s'" + ", assuming 'no'", env_var) env_var = "no" if env_var == "no": diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/snap.py b/ceph-radosgw/hooks/charmhelpers/fetch/snap.py index 36d6bce9..7ab7ce3e 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/snap.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/snap.py @@ -52,7 +52,7 @@ def _snap_exec(commands): :param commands: List commands :return: Integer exit code """ - assert type(commands) == list + assert isinstance(commands, list) retry_count = 0 return_code = None From 537468cfa6f3a3f2bc6e3387f2cce0ab1a7c5133 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 16 Aug 2023 17:08:47 -0300 Subject: [PATCH 2541/2699] Return previous result of processed broker requests Instead of returning an empty dict for already processed broker requests, store the result and return it. This works around issues in charms like ceph-fs that spin indefinitely waiting for the response to a request that never arrives. Closes-Bug: #2031414 Change-Id: Ie86f007d76fe75cc07cf7a973eff3f535a11dbe7 --- ceph-mon/src/ceph_client.py | 41 +++++++++++++++---------------------- 1 file changed, 16 insertions(+), 25 deletions(-) diff --git a/ceph-mon/src/ceph_client.py b/ceph-mon/src/ceph_client.py index 900b6327..65a3ebca 100644 --- a/ceph-mon/src/ceph_client.py +++ b/ceph-mon/src/ceph_client.py @@ -37,7 +37,7 @@ class CephClientProvides(Object): def __init__(self, charm, relation_name='client'): super().__init__(charm, relation_name) - self._stored.set_default(processed=[]) + self._stored.set_default(processed=[], processed_map={}) self.charm = charm self.this_unit = self.model.unit self.relation_name = relation_name @@ -50,6 +50,8 @@ def __init__(self, charm, relation_name='client'): self._on_relation_changed ) + self._stored.processed_map = {} + def notify_all(self): send_osd_settings() if not self.charm.ready_for_service(): @@ -122,22 +124,6 @@ def _handle_client_relation(self, relation, unit): for k, v in data.items(): relation.data[self.this_unit][k] = str(v) - def _req_already_treated(self, request_id): - """Check if broker request already handled. - - The local relation data holds all the broker request/responses that - are handled as a dictionary. There will be a single entry for each - unit that makes broker request in the form of broker-rsp-: - {reqeust-id: , ..}. Verify if request_id exists in the relation - data broker response for the requested unit. - - :param request_id: Request ID - :type request_id: str - :returns: Whether request is already handled - :rtype: bool - """ - return request_id in self._stored.processed - def _handle_broker_request( self, relation, unit, add_legacy_response=False, force=False): """Retrieve broker request from relation, process, return response data. @@ -188,23 +174,28 @@ def _get_broker_req_id(request): broker_req_id)) return {} - if self._req_already_treated(broker_req_id) and not force: + unit_id = settings.get( + 'unit-name', unit.name).replace('/', '-') + unit_response_key = 'broker-rsp-' + unit_id + prev_result = self._stored.processed_map.get(broker_req_id) + if prev_result is not None and not force: + # The broker request has been processed already and we have + # stored the result. Log it so that the users may know and + # return the cached value, with the unit key. logger.debug( "Ignoring already executed broker request {}".format( broker_req_id)) - return {} + rsp = {unit_response_key: prev_result} + if add_legacy_response: + rsp.update({'broker_rsp': prev_result}) + return rsp rsp = self.charm.process_broker_request( broker_req_id, settings['broker_req']) - unit_id = settings.get( - 'unit-name', unit.name).replace('/', '-') - unit_response_key = 'broker-rsp-' + unit_id response.update({unit_response_key: rsp}) if add_legacy_response: response.update({'broker_rsp': rsp}) - processed = self._stored.processed - processed.append(broker_req_id) - self._stored.processed = processed + self._stored.processed_map[broker_req_id] = rsp else: logger.warn('broker_req not in settings: {}'.format(settings)) return response From 6ad1781f84bd5c74ff41a8cf94efdc8198e59f94 Mon Sep 17 00:00:00 2001 From: Utkarsh Bhatt Date: Fri, 9 Dec 2022 14:02:01 +0530 Subject: [PATCH 2542/2699] Fixes testcase coverage for charm Change-Id: I6f778f4b34e48c06d7ac4e9ef2f9c6633bf04ba0 --- ceph-radosgw/.coveragerc | 7 ------- ceph-radosgw/unit_tests/test_actions.py | 1 + 2 files changed, 1 insertion(+), 7 deletions(-) delete mode 100644 ceph-radosgw/.coveragerc diff --git a/ceph-radosgw/.coveragerc b/ceph-radosgw/.coveragerc deleted file mode 100644 index 61e98080..00000000 --- a/ceph-radosgw/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[report] -# Regexes for lines to exclude from consideration -exclude_lines = - if __name__ == .__main__.: -include= - hooks/ceph.py - hooks/hooks.py diff --git a/ceph-radosgw/unit_tests/test_actions.py b/ceph-radosgw/unit_tests/test_actions.py index cebb77bb..1978b68b 100644 --- a/ceph-radosgw/unit_tests/test_actions.py +++ b/ceph-radosgw/unit_tests/test_actions.py @@ -88,6 +88,7 @@ class MultisiteActionsTestCase(CharmTestCase): 'is_leader', 'leader_set', 'service_name', + 'service_restart', ] def setUp(self): From ad8b4f971eee4a65f7beced7972ed5f8e42768e2 Mon Sep 17 00:00:00 2001 From: Peter Matulis Date: Tue, 25 Jul 2023 15:50:24 -0400 Subject: [PATCH 2543/2699] Add docs key and point at Discourse Add the 'docs' key and point it at a Discourse topic previously populated with the charm's README contents. When the new charm revision is released to the Charmhub, this Discourse-based content will be displayed there. In the absense of the this new key, the Charmhub's default behaviour is to display the value of the charm's 'description' key. func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1108 Change-Id: I782890f44628a634c7adcf9cdace753c62d6c262 --- ceph-proxy/metadata.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index 24f04fcb..56e0a24f 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -4,6 +4,7 @@ maintainer: OpenStack Charmers description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. +docs: https://discourse.charmhub.io/t/ceph-proxy-docs-index/11218 tags: - openstack - storage From 98f353389f1e1b82c5aa6f51331aa498116108ed Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 4 Sep 2023 16:26:25 +0200 Subject: [PATCH 2544/2699] Prune CI test jobs and test bundles Change-Id: I1be06ec2901ac414388f4875c95631e4ed50145e --- ceph-mon/osci.yaml | 1 - ceph-mon/tests/bundles/focal-xena.yaml | 263 ------------------------ ceph-mon/tests/bundles/jammy-yoga.yaml | 265 ------------------------- 3 files changed, 529 deletions(-) delete mode 100644 ceph-mon/tests/bundles/focal-xena.yaml delete mode 100644 ceph-mon/tests/bundles/jammy-yoga.yaml diff --git a/ceph-mon/osci.yaml b/ceph-mon/osci.yaml index 4954c571..d1262adf 100644 --- a/ceph-mon/osci.yaml +++ b/ceph-mon/osci.yaml @@ -2,7 +2,6 @@ templates: - charm-unit-jobs-py38 - charm-unit-jobs-py310 - - charm-yoga-functional-jobs - charm-functional-jobs vars: needs_charm_build: true diff --git a/ceph-mon/tests/bundles/focal-xena.yaml b/ceph-mon/tests/bundles/focal-xena.yaml deleted file mode 100644 index d9a6550c..00000000 --- a/ceph-mon/tests/bundles/focal-xena.yaml +++ /dev/null @@ -1,263 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-xena - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - glance-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - placement-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: 8.0.19/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - source: *openstack-origin - osd-devices: '/dev/test-non-existent' - to: - - '3' - - '4' - - '5' - channel: quincy/edge - - ceph-mon: - charm: ch:ceph-mon - channel: quincy/edge - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - to: - - '6' - - '7' - - '8' - - ceph-fs: - charm: ch:ceph-fs - num_units: 1 - options: - source: *openstack-origin - channel: quincy/edge - to: - - '17' - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - channel: 3.9/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - channel: yoga/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - libvirt-image-backend: rbd - to: - - '11' - channel: yoga/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - channel: yoga/edge - - cinder: - expose: True - charm: ch:cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: *openstack-origin - to: - - '13' - channel: yoga/edge - - cinder-ceph: - charm: ch:cinder-ceph - channel: yoga/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - channel: yoga/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: yoga/edge - - prometheus2: - charm: ch:prometheus2 - num_units: 1 - to: - - '16' - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - nova-compute:ceph-access - - cinder-ceph:ceph-access - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'ceph-mon:prometheus' - - 'prometheus2:target' diff --git a/ceph-mon/tests/bundles/jammy-yoga.yaml b/ceph-mon/tests/bundles/jammy-yoga.yaml deleted file mode 100644 index 24818960..00000000 --- a/ceph-mon/tests/bundles/jammy-yoga.yaml +++ /dev/null @@ -1,265 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - series: focal - '17': - - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - glance-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - placement-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: 8.0.19/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - source: *openstack-origin - osd-devices: '/dev/test-non-existent' - to: - - '3' - - '4' - - '5' - channel: quincy/edge - - ceph-mon: - charm: ch:ceph-mon - channel: quincy/edge - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - to: - - '6' - - '7' - - '8' - - ceph-fs: - charm: ch:ceph-fs - num_units: 1 - options: - source: *openstack-origin - channel: quincy/edge - to: - - '17' - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - channel: 3.9/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - channel: yoga/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - libvirt-image-backend: rbd - to: - - '11' - channel: yoga/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - channel: yoga/edge - - cinder: - expose: True - charm: ch:cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: *openstack-origin - to: - - '13' - channel: yoga/edge - - cinder-ceph: - charm: ch:cinder-ceph - channel: yoga/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - channel: yoga/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: yoga/edge - - prometheus2: - charm: ch:prometheus2 - num_units: 1 - series: focal - to: - - '16' - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - nova-compute:ceph-access - - cinder-ceph:ceph-access - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'ceph-mon:prometheus' - - 'prometheus2:target' From f084ea313bfa62819f8bb913419caaf6db0af199 Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Tue, 30 Aug 2022 18:25:26 -0400 Subject: [PATCH 2545/2699] Partially revert previous change to re-enable Focal as a gating target Change-Id: I140a076a89384adf0916e62059a49b035a8b0fbd --- ceph-iscsi/charmcraft.yaml | 3 + ceph-iscsi/metadata.yaml | 1 + ceph-iscsi/osci.yaml | 17 +++++ ceph-iscsi/tests/bundles/focal-ec.yaml | 94 +++++++++++++++++++++++++ ceph-iscsi/tests/bundles/focal.yaml | 95 ++++++++++++++++++++++++++ ceph-iscsi/tests/tests.yaml | 2 + 6 files changed, 212 insertions(+) create mode 100644 ceph-iscsi/tests/bundles/focal-ec.yaml create mode 100644 ceph-iscsi/tests/bundles/focal.yaml diff --git a/ceph-iscsi/charmcraft.yaml b/ceph-iscsi/charmcraft.yaml index 5d665764..52f92540 100644 --- a/ceph-iscsi/charmcraft.yaml +++ b/ceph-iscsi/charmcraft.yaml @@ -27,6 +27,9 @@ bases: architectures: - amd64 run-on: + - name: ubuntu + channel: "20.04" + architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index f67f4d3a..945985da 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -10,6 +10,7 @@ tags: - storage - misc series: +- focal - jammy - kinetic - lunar diff --git a/ceph-iscsi/osci.yaml b/ceph-iscsi/osci.yaml index a6b65fb5..4150888f 100644 --- a/ceph-iscsi/osci.yaml +++ b/ceph-iscsi/osci.yaml @@ -3,6 +3,8 @@ - charm-unit-jobs-py310 check: jobs: + - ceph-iscsi-focal-quincy + - ceph-iscsi-focal-quincy-ec - ceph-iscsi-jammy-quincy - ceph-iscsi-jammy-quincy-ec - ceph-iscsi-kinetic-quincy: @@ -18,6 +20,21 @@ charm_build_name: ceph-iscsi build_type: charmcraft charmcraft_channel: 2.0/stable +- job: + name: ceph-iscsi-focal-quincy + parent: func-target + dependencies: + - ceph-iscsi-jammy-quincy + vars: + tox_extra_args: -- focal +- job: + name: ceph-iscsi-focal-quincy-ec + parent: func-target + dependencies: + - ceph-iscsi-jammy-quincy + vars: + tox_extra_args: -- focal-ec + - job: name: ceph-iscsi-jammy-quincy parent: func-target diff --git a/ceph-iscsi/tests/bundles/focal-ec.yaml b/ceph-iscsi/tests/bundles/focal-ec.yaml new file mode 100644 index 00000000..8b017fb5 --- /dev/null +++ b/ceph-iscsi/tests/bundles/focal-ec.yaml @@ -0,0 +1,94 @@ +local_overlay_enabled: False +series: focal +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + constraints: mem=3072M + '9': + constraints: mem=3072M + '10': + constraints: mem=3072M + '11': + '12': + '13': + '14': + '15': +applications: + ubuntu: + charm: cs:ubuntu + num_units: 3 + to: + - '7' + - '14' + - '15' + ceph-iscsi: + charm: ../../ceph-iscsi.charm + num_units: 2 + options: + gateway-metadata-pool: iscsi-foo-metadata + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '0' + - '1' + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + to: + - '0' + - '1' + - '2' + - '11' + - '12' + - '13' + channel: latest/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + to: + - '3' + - '4' + - '5' + channel: latest/edge + vault: + num_units: 1 + charm: ch:vault + to: + - '6' + channel: 1.7/edge + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + to: + - '8' + - '9' + - '10' + channel: 8.0.19/edge + vault-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge +relations: + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml new file mode 100644 index 00000000..d95d685b --- /dev/null +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -0,0 +1,95 @@ +local_overlay_enabled: False +series: focal +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + constraints: mem=3072M + '9': + constraints: mem=3072M + '10': + constraints: mem=3072M + '11': + '12': + '13': + '14': + '15': + '16': + '17': +applications: + ubuntu: + charm: cs:ubuntu + num_units: 3 + to: + - '7' + - '14' + - '15' + ceph-iscsi: + charm: ../../ceph-iscsi.charm + num_units: 4 + options: + gateway-metadata-pool: iscsi-foo-metadata + to: + - '0' + - '1' + - '16' + - '17' + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + to: + - '0' + - '1' + - '2' + - '11' + - '12' + - '13' + channel: latest/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + to: + - '3' + - '4' + - '5' + channel: latest/edge + vault: + num_units: 1 + charm: ch:vault + to: + - '6' + channel: 1.7/edge + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + to: + - '8' + - '9' + - '10' + channel: 8.0.19/edge + vault-mysql-router: + charm: ch:mysql-router + channel: 8.0.19/edge +relations: + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/tests.yaml b/ceph-iscsi/tests/tests.yaml index a33948e8..4114bbb5 100644 --- a/ceph-iscsi/tests/tests.yaml +++ b/ceph-iscsi/tests/tests.yaml @@ -1,5 +1,7 @@ charm_name: ceph-iscsi gate_bundles: + - focal-ec + - focal - jammy-ec - jammy smoke_bundles: From 15b0eed5d918a019f8c519608791ac51cf1c13ec Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Thu, 7 Sep 2023 11:12:10 -0300 Subject: [PATCH 2546/2699] Remove some CI jobs and test bundles Change-Id: I526f090426d72842a709adc39ff796fafa17dcbb --- ceph-osd/osci.yaml | 2 - ceph-osd/tests/bundles/focal-xena.yaml | 238 ------------------------- ceph-osd/tests/bundles/jammy-yoga.yaml | 238 ------------------------- 3 files changed, 478 deletions(-) delete mode 100644 ceph-osd/tests/bundles/focal-xena.yaml delete mode 100644 ceph-osd/tests/bundles/jammy-yoga.yaml diff --git a/ceph-osd/osci.yaml b/ceph-osd/osci.yaml index 84a63029..2a379fd0 100644 --- a/ceph-osd/osci.yaml +++ b/ceph-osd/osci.yaml @@ -2,8 +2,6 @@ templates: - charm-unit-jobs-py38 - charm-unit-jobs-py310 - - charm-xena-functional-jobs - - charm-yoga-functional-jobs - charm-functional-jobs vars: needs_charm_build: true diff --git a/ceph-osd/tests/bundles/focal-xena.yaml b/ceph-osd/tests/bundles/focal-xena.yaml deleted file mode 100644 index 4cf0cc70..00000000 --- a/ceph-osd/tests/bundles/focal-xena.yaml +++ /dev/null @@ -1,238 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-xena - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: latest/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: latest/edge - placement-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-osd: - charm: ../../ceph-osd.charm - num_units: 3 - storage: - osd-devices: 'cinder,10G,2' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - aa-profile-mode: enforce - to: - - '3' - - '4' - - '5' - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - to: - - '6' - - '7' - - '8' - channel: quincy/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - channel: latest/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - channel: yoga/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '11' - channel: yoga/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - channel: yoga/edge - - cinder: - expose: True - charm: ch:cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: 'None' - glance-api-version: '2' - to: - - '13' - channel: yoga/edge - - cinder-ceph: - charm: ch:cinder-ceph - channel: yoga/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - channel: yoga/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: yoga/edge - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'cinder-ceph:ceph-access' - - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/bundles/jammy-yoga.yaml b/ceph-osd/tests/bundles/jammy-yoga.yaml deleted file mode 100644 index a4a640e8..00000000 --- a/ceph-osd/tests/bundles/jammy-yoga.yaml +++ /dev/null @@ -1,238 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: latest/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: latest/edge - placement-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-osd: - charm: ../../ceph-osd.charm - num_units: 3 - storage: - osd-devices: 'cinder,10G,2' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - aa-profile-mode: enforce - to: - - '3' - - '4' - - '5' - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - to: - - '6' - - '7' - - '8' - channel: quincy/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - channel: latest/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - channel: yoga/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '11' - channel: yoga/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - channel: yoga/edge - - cinder: - expose: True - charm: ch:cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: 'None' - glance-api-version: '2' - to: - - '13' - channel: yoga/edge - - cinder-ceph: - charm: ch:cinder-ceph - channel: yoga/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - channel: yoga/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: yoga/edge - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'cinder-ceph:ceph-access' - - 'nova-compute:ceph-access' From f0dd2b250dbea2b2c126b6449dd56c57fa5d874b Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 11 Sep 2023 12:31:22 +0200 Subject: [PATCH 2547/2699] Prune CI jobs Change-Id: I4260d8c4ea6e5a7cedca4231dec42d0700e25638 --- ceph-radosgw/osci.yaml | 82 ++---------- .../tests/bundles/jammy-yoga-multisite.yaml | 99 -------------- .../tests/bundles/jammy-yoga-namespaced.yaml | 124 ------------------ ceph-radosgw/tests/bundles/jammy-yoga.yaml | 123 ----------------- .../tests/bundles/lunar-antelope.yaml | 123 ----------------- ceph-radosgw/tests/tests.yaml | 10 +- 6 files changed, 11 insertions(+), 550 deletions(-) delete mode 100644 ceph-radosgw/tests/bundles/jammy-yoga-multisite.yaml delete mode 100644 ceph-radosgw/tests/bundles/jammy-yoga-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/jammy-yoga.yaml delete mode 100644 ceph-radosgw/tests/bundles/lunar-antelope.yaml diff --git a/ceph-radosgw/osci.yaml b/ceph-radosgw/osci.yaml index 9566e5e0..7173e4b2 100644 --- a/ceph-radosgw/osci.yaml +++ b/ceph-radosgw/osci.yaml @@ -1,24 +1,18 @@ - project: templates: - charm-unit-jobs-py38 - - charm-unit-jobs-py39 - charm-unit-jobs-py310 check: jobs: - vault-focal-yoga_rgw - vault-focal-yoga-namespaced - focal-yoga-multisite - - jammy-yoga-multisite - jammy-antelope-multisite: voting: false - jammy-bobcat-multisite: voting: false - - lunar-antelope-multisite: - voting: false - mantic-bobcat-multisite: voting: false - - vault-jammy-yoga_rgw - - vault-jammy-yoga-namespaced - vault-jammy-antelope_rgw: voting: false - vault-jammy-antelope-namespaced: @@ -27,10 +21,6 @@ voting: false - vault-jammy-bobcat-namespaced: voting: false - - vault-lunar-antelope_rgw: - voting: false - - vault-lunar-antelope-namespaced: - voting: false - vault-mantic-bobcat_rgw: voting: false - vault-mantic-bobcat-namespaced: @@ -47,47 +37,27 @@ - osci-lint - charm-build - tox-py38 - - tox-py39 - - name: tox-py310 - soft: true vars: tox_extra_args: '-- focal-yoga-multisite' -- job: - name: jammy-yoga-multisite - parent: func-target - dependencies: - - osci-lint - - charm-build - - name: tox-py310 - soft: true - vars: - tox_extra_args: '-- jammy-yoga-multisite' - job: name: jammy-antelope-multisite parent: func-target dependencies: - - jammy-yoga-multisite + - focal-yoga-multisite vars: tox_extra_args: '-- jammy-antelope-multisite' - job: name: jammy-bobcat-multisite parent: func-target dependencies: - - jammy-yoga-multisite + - jammy-antelope-multisite vars: tox_extra_args: '-- jammy-bobcat-multisite' -- job: - name: lunar-antelope-multisite - parent: func-target - dependencies: - - jammy-yoga-multisite - vars: - tox_extra_args: '-- lunar-antelope-multisite' - job: name: mantic-bobcat-multisite parent: func-target dependencies: - - jammy-yoga-multisite + - jammy-antelope-multisite vars: tox_extra_args: '-- mantic-bobcat-multisite' - job: @@ -97,13 +67,6 @@ - focal-yoga-multisite vars: tox_extra_args: '-- vault:focal-yoga' -- job: - name: vault-jammy-yoga_rgw - parent: func-target - dependencies: - - jammy-yoga-multisite - vars: - tox_extra_args: '-- vault:jammy-yoga' - job: name: vault-focal-yoga-namespaced parent: func-target @@ -111,72 +74,45 @@ - focal-yoga-multisite vars: tox_extra_args: '-- vault:focal-yoga-namespaced' -- job: - name: vault-jammy-yoga-namespaced - parent: func-target - dependencies: - - jammy-yoga-multisite - vars: - tox_extra_args: '-- vault:jammy-yoga-namespaced' - job: name: vault-jammy-antelope-namespaced parent: func-target dependencies: - - jammy-yoga-multisite + - jammy-antelope-multisite vars: tox_extra_args: '-- vault:jammy-antelope-namespaced' - job: name: vault-jammy-bobcat-namespaced parent: func-target dependencies: - - jammy-yoga-multisite + - jammy-antelope-multisite vars: tox_extra_args: '-- vault:jammy-bobcat-namespaced' - job: name: vault-jammy-antelope_rgw parent: func-target dependencies: - - vault-jammy-yoga_rgw - - vault-jammy-yoga-namespaced + - jammy-antelope-multisite vars: tox_extra_args: '-- vault:jammy-antelope' - job: name: vault-jammy-bobcat_rgw parent: func-target dependencies: - - vault-jammy-yoga_rgw - - vault-jammy-yoga-namespaced + - vault-jammy-antelope_rgw vars: tox_extra_args: '-- vault:jammy-bobcat' -- job: - name: vault-lunar-antelope-namespaced - parent: func-target - dependencies: - - vault-jammy-yoga_rgw - - vault-jammy-yoga-namespaced - vars: - tox_extra_args: '-- vault:lunar-antelope-namespaced' - job: name: vault-mantic-bobcat-namespaced parent: func-target dependencies: - - vault-jammy-yoga_rgw - - vault-jammy-yoga-namespaced + - vault-jammy-antelope_rgw vars: tox_extra_args: '-- vault:mantic-bobcat-namespaced' -- job: - name: vault-lunar-antelope_rgw - parent: func-target - dependencies: - - vault-jammy-yoga_rgw - - vault-jammy-yoga-namespaced - vars: - tox_extra_args: '-- vault:lunar-antelope' - job: name: vault-mantic-bobcat_rgw parent: func-target dependencies: - - vault-jammy-yoga_rgw - - vault-jammy-yoga-namespaced + - vault-jammy-antelope_rgw vars: tox_extra_args: '-- vault:mantic-bobcat' diff --git a/ceph-radosgw/tests/bundles/jammy-yoga-multisite.yaml b/ceph-radosgw/tests/bundles/jammy-yoga-multisite.yaml deleted file mode 100644 index 2536b5ad..00000000 --- a/ceph-radosgw/tests/bundles/jammy-yoga-multisite.yaml +++ /dev/null @@ -1,99 +0,0 @@ -options: - source: &source distro - -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - '9': - -applications: - ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - to: - - '0' - - secondary-ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - to: - - '1' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '2' - - '6' - - '7' - channel: latest/edge - - secondary-ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '3' - - '8' - - '9' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 1 - options: - monitor-count: 1 - source: *source - to: - - '4' - channel: latest/edge - - secondary-ceph-mon: - charm: ch:ceph-mon - num_units: 1 - options: - monitor-count: 1 - source: *source - to: - - '5' - channel: latest/edge - -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'secondary-ceph-osd:mon' - - 'secondary-ceph-mon:osd' - - - - 'secondary-ceph-radosgw:mon' - - 'secondary-ceph-mon:radosgw' - diff --git a/ceph-radosgw/tests/bundles/jammy-yoga-namespaced.yaml b/ceph-radosgw/tests/bundles/jammy-yoga-namespaced.yaml deleted file mode 100644 index 64629ae1..00000000 --- a/ceph-radosgw/tests/bundles/jammy-yoga-namespaced.yaml +++ /dev/null @@ -1,124 +0,0 @@ -options: - source: &source distro - -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - namespace-tenants: True - to: - - '3' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - channel: latest/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - channel: latest/edge - - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - vault: - charm: ch:vault - num_units: 1 - to: - - '11' - channel: latest/edge - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/jammy-yoga.yaml b/ceph-radosgw/tests/bundles/jammy-yoga.yaml deleted file mode 100644 index 45ae1af8..00000000 --- a/ceph-radosgw/tests/bundles/jammy-yoga.yaml +++ /dev/null @@ -1,123 +0,0 @@ -options: - source: &source distro - -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - to: - - '3' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - channel: latest/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - channel: latest/edge - - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - vault: - charm: ch:vault - num_units: 1 - to: - - '11' - channel: latest/edge - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/lunar-antelope.yaml b/ceph-radosgw/tests/bundles/lunar-antelope.yaml deleted file mode 100644 index 6d55ab44..00000000 --- a/ceph-radosgw/tests/bundles/lunar-antelope.yaml +++ /dev/null @@ -1,123 +0,0 @@ -options: - source: &source cloud:lunar-antelope - -series: lunar - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - to: - - '3' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - channel: latest/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - channel: latest/edge - - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - vault: - charm: ch:vault - num_units: 1 - to: - - '11' - channel: latest/edge - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index c92410b9..c205b603 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,25 +1,19 @@ charm_name: ceph-radosgw gate_bundles: - - jammy-yoga-multisite - - vault: jammy-yoga - - vault: jammy-yoga-namespaced - focal-yoga-multisite - vault: focal-yoga - vault: focal-yoga-namespaced smoke_bundles: - - jammy-yoga-multisite - - vault: jammy-yoga + - jammy-antelope-multisite + - vault: jammy-antelope dev_bundles: - - jammy-yoga-multisite - lunar-antelope-multisite - mantic-bobcat-multisite - jammy-antelope-multisite - jammy-bobcat-multisite - - vault: jammy-yoga - - vault: jammy-yoga-namespaced - vault: lunar-antelope - vault: mantic-bobcat - vault: lunar-antelope-namespaced From 679153da21792a5903bcc1a3cae58b2cbaba86ca Mon Sep 17 00:00:00 2001 From: Utkarsh Bhatt Date: Fri, 1 Sep 2023 13:11:41 +0530 Subject: [PATCH 2548/2699] Adds dashboard enablement/deferral logic improvements Closes-Bug: 2033886 Change-Id: I2f4d4dfa890fc79f7f5d5b1273a11907b60e8255 Signed-off-by: Utkarsh Bhatt --- ceph-dashboard/src/charm.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index 5ef66ecc..53a306ca 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -370,7 +370,7 @@ def _configure_service_apis(self) -> None: ), ) - def _configure_dashboard(self, event) -> None: + def _configure_dashboard(self, _event) -> None: """Configure dashboard""" if not self.mon.mons_ready: logging.info("Not configuring dashboard, mons not ready") @@ -726,8 +726,15 @@ def _configure_tls_from_relation(self) -> None: self._configure_tls(key, cert, ca_cert, self.TLS_VAULT_CA_CERT_PATH) # Custom SSL Event Handles - def _enable_ssl_from_config(self, _event) -> None: + def _enable_ssl_from_config(self, event) -> None: """Configure Ceph Dashboard SSL with available key/cert from charm.""" + if not ceph_utils.is_dashboard_enabled(): + if self.unit.is_leader(): + ceph_utils.mgr_enable_dashboard() + else: + event.defer() + return + if all([ cmds.check_ceph_dashboard_ssl_configured(), cmds.check_ceph_dashboard_ssl_configured(is_check_host_key=True) @@ -740,6 +747,13 @@ def _enable_ssl_from_config(self, _event) -> None: # Certificates relation handle. def _enable_ssl_from_relation(self, event) -> None: """Configure Ceph Dashboard SSL using key/cert from relation.""" + if not ceph_utils.is_dashboard_enabled(): + if self.unit.is_leader(): + ceph_utils.mgr_enable_dashboard() + else: + event.defer() + return + if cmds.check_ceph_dashboard_ssl_configured(): key, cert, _ = self._get_tls_from_config() if self.is_ceph_dashboard_ssl_key_cert_same(key, cert): From 562d4bb3157e6dfaad494d8277d30838fc9cf6de Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Tue, 12 Sep 2023 19:23:43 -0300 Subject: [PATCH 2549/2699] Sync charm libraries Change-Id: Ie6ae6fa1369db537fa606fa04df538f9a5587a0c --- .../contrib/openstack/cert_utils.py | 33 ++++++++++------ .../charmhelpers/contrib/openstack/context.py | 8 ++-- .../contrib/storage/linux/ceph.py | 8 +++- ceph-osd/hooks/charmhelpers/core/unitdata.py | 39 ++++++++++++++++++- ceph-osd/hooks/charmhelpers/fetch/snap.py | 2 +- ceph-osd/lib/charms_ceph/utils.py | 8 +++- 6 files changed, 77 insertions(+), 21 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py index a25ca995..6620f59f 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/cert_utils.py @@ -414,18 +414,27 @@ def get_requests_for_local_unit(relation_name=None): is_legacy_request = set(sent).intersection(legacy_keys) for unit in related_units(rid): data = relation_get(rid=rid, unit=unit) - if data.get(raw_certs_key): - bundles.append({ - 'ca': data['ca'], - 'chain': data.get('chain'), - 'certs': json.loads(data[raw_certs_key])}) - elif is_legacy_request: - bundles.append({ - 'ca': data['ca'], - 'chain': data.get('chain'), - 'certs': {sent['common_name']: - {'cert': data.get(local_name + '.server.cert'), - 'key': data.get(local_name + '.server.key')}}}) + # Note: Bug#2028683 - data may not be available if the certificates + # relation hasn't been populated by the providing charm. If no 'ca' + # in the data then don't attempt the bundle at all. + if data.get('ca'): + if data.get(raw_certs_key): + bundles.append({ + 'ca': data['ca'], + 'chain': data.get('chain'), + 'certs': json.loads(data[raw_certs_key]) + }) + elif is_legacy_request: + bundles.append({ + 'ca': data['ca'], + 'chain': data.get('chain'), + 'certs': { + sent['common_name']: { + 'cert': data.get(local_name + '.server.cert'), + 'key': data.get(local_name + '.server.key') + } + } + }) return bundles diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 24a13d0d..42f15032 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -1748,9 +1748,9 @@ def __init__(self, name=None, script=None, admin_script=None, def __call__(self): total_processes = _calculate_workers() - enable_wsgi_rotation = config('wsgi-rotation') - if enable_wsgi_rotation is None: - enable_wsgi_rotation = True + enable_wsgi_socket_rotation = config('wsgi-socket-rotation') + if enable_wsgi_socket_rotation is None: + enable_wsgi_socket_rotation = True ctxt = { "service_name": self.service_name, "user": self.user, @@ -1764,7 +1764,7 @@ def __call__(self): "public_processes": int(math.ceil(self.public_process_weight * total_processes)), "threads": 1, - "wsgi_rotation": enable_wsgi_rotation, + "wsgi_socket_rotation": enable_wsgi_socket_rotation, } return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py index 2e1fc1b5..6ec67cba 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -158,15 +158,19 @@ def get_osd_settings(relation_name): return _order_dict_by_key(osd_settings) -def send_application_name(relid=None): +def send_application_name(relid=None, app_name=None): """Send the application name down the relation. :param relid: Relation id to set application name in. :type relid: str + :param app_name: Application name to send in the relation. + :type app_name: str """ + if app_name is None: + app_name = application_name() relation_set( relation_id=relid, - relation_settings={'application-name': application_name()}) + relation_settings={'application-name': app_name}) def send_osd_settings(): diff --git a/ceph-osd/hooks/charmhelpers/core/unitdata.py b/ceph-osd/hooks/charmhelpers/core/unitdata.py index 8f4bbc61..65153f1f 100644 --- a/ceph-osd/hooks/charmhelpers/core/unitdata.py +++ b/ceph-osd/hooks/charmhelpers/core/unitdata.py @@ -151,6 +151,7 @@ def config_changed(): import datetime import itertools import json +import logging import os import pprint import sqlite3 @@ -521,6 +522,42 @@ class DeltaSet(Record): def kv(): global _KV + + # If we are running unit tests, it is useful to go into memory-backed KV store to + # avoid concurrency issues when running multiple tests. This is not a + # problem when juju is running normally. + + env_var = os.environ.get("CHARM_HELPERS_TESTMODE", "auto").lower() + if env_var not in ["auto", "no", "yes"]: + logging.warning("Unknown value for CHARM_HELPERS_TESTMODE '%s'" + ", assuming 'no'", env_var) + env_var = "no" + + if env_var == "no": + in_memory_db = False + elif env_var == "yes": + in_memory_db = True + elif env_var == "auto": + # If UNIT_STATE_DB is set, respect this request + if "UNIT_STATE_DB" in os.environ: + in_memory_db = False + # Autodetect normal juju execution by looking for juju variables + elif "JUJU_CHARM_DIR" in os.environ or "JUJU_UNIT_NAME" in os.environ: + in_memory_db = False + else: + # We are probably running in unit test mode + logging.warning("Auto-detected unit test environment for KV store.") + in_memory_db = True + else: + # Help the linter realise that in_memory_db is always set + raise Exception("Cannot reach this line") + if _KV is None: - _KV = Storage() + if in_memory_db: + _KV = Storage(":memory:") + else: + _KV = Storage() + else: + if in_memory_db and _KV.db_path != ":memory:": + logging.warning("Running with in_memory_db and KV is not set to :memory:") return _KV diff --git a/ceph-osd/hooks/charmhelpers/fetch/snap.py b/ceph-osd/hooks/charmhelpers/fetch/snap.py index 36d6bce9..7ab7ce3e 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/snap.py +++ b/ceph-osd/hooks/charmhelpers/fetch/snap.py @@ -52,7 +52,7 @@ def _snap_exec(commands): :param commands: List commands :return: Integer exit code """ - assert type(commands) == list + assert isinstance(commands, list) retry_count = 0 return_code = None diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 01fb9ac9..41eff9b4 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -1223,6 +1223,11 @@ def get_upgrade_key(): return get_named_key('upgrade-osd', _upgrade_caps) +def is_internal_client(name): + keys = ('osd-upgrade', 'osd-removal', 'admin', 'rbd-mirror', 'mds') + return any(name.startswith(key) for key in keys) + + def get_named_key(name, caps=None, pool_list=None): """Retrieve a specific named cephx key. @@ -1236,7 +1241,8 @@ def get_named_key(name, caps=None, pool_list=None): key = ceph_auth_get(key_name) if key: - upgrade_key_caps(key_name, caps) + if is_internal_client(name): + upgrade_key_caps(key_name, caps) return key log("Creating new key for {}".format(name), level=DEBUG) From 88e46d34391ca2b2ff23e08080bad8591fbeb860 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Tue, 12 Sep 2023 19:28:19 -0300 Subject: [PATCH 2550/2699] Sync charm libraries Change-Id: Ic2916be468271eb3ff37b305429f3e4133e734c5 --- .../contrib/storage/linux/ceph.py | 8 +++- ceph-proxy/charmhelpers/core/unitdata.py | 39 ++++++++++++++++++- ceph-proxy/charmhelpers/fetch/snap.py | 2 +- ceph-proxy/lib/charms_ceph/broker.py | 2 +- ceph-proxy/lib/charms_ceph/utils.py | 11 +++++- 5 files changed, 55 insertions(+), 7 deletions(-) diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py index 2e1fc1b5..6ec67cba 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/ceph.py @@ -158,15 +158,19 @@ def get_osd_settings(relation_name): return _order_dict_by_key(osd_settings) -def send_application_name(relid=None): +def send_application_name(relid=None, app_name=None): """Send the application name down the relation. :param relid: Relation id to set application name in. :type relid: str + :param app_name: Application name to send in the relation. + :type app_name: str """ + if app_name is None: + app_name = application_name() relation_set( relation_id=relid, - relation_settings={'application-name': application_name()}) + relation_settings={'application-name': app_name}) def send_osd_settings(): diff --git a/ceph-proxy/charmhelpers/core/unitdata.py b/ceph-proxy/charmhelpers/core/unitdata.py index 8f4bbc61..65153f1f 100644 --- a/ceph-proxy/charmhelpers/core/unitdata.py +++ b/ceph-proxy/charmhelpers/core/unitdata.py @@ -151,6 +151,7 @@ def config_changed(): import datetime import itertools import json +import logging import os import pprint import sqlite3 @@ -521,6 +522,42 @@ class DeltaSet(Record): def kv(): global _KV + + # If we are running unit tests, it is useful to go into memory-backed KV store to + # avoid concurrency issues when running multiple tests. This is not a + # problem when juju is running normally. + + env_var = os.environ.get("CHARM_HELPERS_TESTMODE", "auto").lower() + if env_var not in ["auto", "no", "yes"]: + logging.warning("Unknown value for CHARM_HELPERS_TESTMODE '%s'" + ", assuming 'no'", env_var) + env_var = "no" + + if env_var == "no": + in_memory_db = False + elif env_var == "yes": + in_memory_db = True + elif env_var == "auto": + # If UNIT_STATE_DB is set, respect this request + if "UNIT_STATE_DB" in os.environ: + in_memory_db = False + # Autodetect normal juju execution by looking for juju variables + elif "JUJU_CHARM_DIR" in os.environ or "JUJU_UNIT_NAME" in os.environ: + in_memory_db = False + else: + # We are probably running in unit test mode + logging.warning("Auto-detected unit test environment for KV store.") + in_memory_db = True + else: + # Help the linter realise that in_memory_db is always set + raise Exception("Cannot reach this line") + if _KV is None: - _KV = Storage() + if in_memory_db: + _KV = Storage(":memory:") + else: + _KV = Storage() + else: + if in_memory_db and _KV.db_path != ":memory:": + logging.warning("Running with in_memory_db and KV is not set to :memory:") return _KV diff --git a/ceph-proxy/charmhelpers/fetch/snap.py b/ceph-proxy/charmhelpers/fetch/snap.py index 36d6bce9..7ab7ce3e 100644 --- a/ceph-proxy/charmhelpers/fetch/snap.py +++ b/ceph-proxy/charmhelpers/fetch/snap.py @@ -52,7 +52,7 @@ def _snap_exec(commands): :param commands: List commands :return: Integer exit code """ - assert type(commands) == list + assert isinstance(commands, list) retry_count = 0 return_code = None diff --git a/ceph-proxy/lib/charms_ceph/broker.py b/ceph-proxy/lib/charms_ceph/broker.py index 90b536fb..71f85f45 100644 --- a/ceph-proxy/lib/charms_ceph/broker.py +++ b/ceph-proxy/lib/charms_ceph/broker.py @@ -907,7 +907,7 @@ def process_requests_v1(reqs): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - if type(ret) == dict and 'exit-code' in ret: + if isinstance(ret, dict) and 'exit-code' in ret: return ret return {'exit-code': 0} diff --git a/ceph-proxy/lib/charms_ceph/utils.py b/ceph-proxy/lib/charms_ceph/utils.py index 01fb9ac9..94bfb9e4 100644 --- a/ceph-proxy/lib/charms_ceph/utils.py +++ b/ceph-proxy/lib/charms_ceph/utils.py @@ -1223,6 +1223,11 @@ def get_upgrade_key(): return get_named_key('upgrade-osd', _upgrade_caps) +def is_internal_client(name): + keys = ('osd-upgrade', 'osd-removal', 'admin', 'rbd-mirror', 'mds') + return any(name.startswith(key) for key in keys) + + def get_named_key(name, caps=None, pool_list=None): """Retrieve a specific named cephx key. @@ -1236,7 +1241,8 @@ def get_named_key(name, caps=None, pool_list=None): key = ceph_auth_get(key_name) if key: - upgrade_key_caps(key_name, caps) + if is_internal_client(name): + upgrade_key_caps(key_name, caps) return key log("Creating new key for {}".format(name), level=DEBUG) @@ -3225,6 +3231,7 @@ def dirs_need_ownership_update(service): ('nautilus', 'octopus'), ('octopus', 'pacific'), ('pacific', 'quincy'), + ('quincy', 'reef'), ]) # Map UCA codenames to Ceph codenames @@ -3248,7 +3255,7 @@ def dirs_need_ownership_update(service): 'yoga': 'quincy', 'zed': 'quincy', 'antelope': 'quincy', - 'bobcat': 'quincy', + 'bobcat': 'reef', } From cccd27942708617ecc888c7b2f038c8d7fc58f8f Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Tue, 12 Sep 2023 19:26:02 -0300 Subject: [PATCH 2551/2699] Sync charm libraries Change-Id: I3cc5a774f0d4fec2eb7fb719579df6fce24167ef --- .../hooks/charmhelpers/contrib/openstack/context.py | 8 ++++---- .../contrib/openstack/templates/wsgi-openstack-api.conf | 2 +- .../openstack/templates/wsgi-openstack-metadata.conf | 2 +- .../hooks/charmhelpers/contrib/storage/linux/ceph.py | 8 ++++++-- ceph-radosgw/lib/charms_ceph/broker.py | 2 +- ceph-radosgw/lib/charms_ceph/utils.py | 3 ++- 6 files changed, 15 insertions(+), 10 deletions(-) diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 24a13d0d..42f15032 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -1748,9 +1748,9 @@ def __init__(self, name=None, script=None, admin_script=None, def __call__(self): total_processes = _calculate_workers() - enable_wsgi_rotation = config('wsgi-rotation') - if enable_wsgi_rotation is None: - enable_wsgi_rotation = True + enable_wsgi_socket_rotation = config('wsgi-socket-rotation') + if enable_wsgi_socket_rotation is None: + enable_wsgi_socket_rotation = True ctxt = { "service_name": self.service_name, "user": self.user, @@ -1764,7 +1764,7 @@ def __call__(self): "public_processes": int(math.ceil(self.public_process_weight * total_processes)), "threads": 1, - "wsgi_rotation": enable_wsgi_rotation, + "wsgi_socket_rotation": enable_wsgi_socket_rotation, } return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf index 2cb735e9..de5f603f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf @@ -12,7 +12,7 @@ Listen {{ admin_port }} Listen {{ public_port }} {% endif -%} -{% if wsgi_rotation -%} +{% if wsgi_socket_rotation -%} WSGISocketRotation On {% else -%} WSGISocketRotation Off diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf index 2cb735e9..de5f603f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf @@ -12,7 +12,7 @@ Listen {{ admin_port }} Listen {{ public_port }} {% endif -%} -{% if wsgi_rotation -%} +{% if wsgi_socket_rotation -%} WSGISocketRotation On {% else -%} WSGISocketRotation Off diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py index 2e1fc1b5..6ec67cba 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -158,15 +158,19 @@ def get_osd_settings(relation_name): return _order_dict_by_key(osd_settings) -def send_application_name(relid=None): +def send_application_name(relid=None, app_name=None): """Send the application name down the relation. :param relid: Relation id to set application name in. :type relid: str + :param app_name: Application name to send in the relation. + :type app_name: str """ + if app_name is None: + app_name = application_name() relation_set( relation_id=relid, - relation_settings={'application-name': application_name()}) + relation_settings={'application-name': app_name}) def send_osd_settings(): diff --git a/ceph-radosgw/lib/charms_ceph/broker.py b/ceph-radosgw/lib/charms_ceph/broker.py index 90b536fb..71f85f45 100644 --- a/ceph-radosgw/lib/charms_ceph/broker.py +++ b/ceph-radosgw/lib/charms_ceph/broker.py @@ -907,7 +907,7 @@ def process_requests_v1(reqs): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - if type(ret) == dict and 'exit-code' in ret: + if isinstance(ret, dict) and 'exit-code' in ret: return ret return {'exit-code': 0} diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index 41eff9b4..94bfb9e4 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -3231,6 +3231,7 @@ def dirs_need_ownership_update(service): ('nautilus', 'octopus'), ('octopus', 'pacific'), ('pacific', 'quincy'), + ('quincy', 'reef'), ]) # Map UCA codenames to Ceph codenames @@ -3254,7 +3255,7 @@ def dirs_need_ownership_update(service): 'yoga': 'quincy', 'zed': 'quincy', 'antelope': 'quincy', - 'bobcat': 'quincy', + 'bobcat': 'reef', } From 19c06c95c904a4e5f6b1ec4c62b63e093a4a0d36 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 13 Sep 2023 14:11:36 -0300 Subject: [PATCH 2552/2699] Sync libraries again (Reef mapping was missing) Change-Id: I41b5abba9c29a99c16c67ee0d4a8c10e760c76fa --- ceph-osd/lib/charms_ceph/broker.py | 2 +- ceph-osd/lib/charms_ceph/utils.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ceph-osd/lib/charms_ceph/broker.py b/ceph-osd/lib/charms_ceph/broker.py index 90b536fb..71f85f45 100644 --- a/ceph-osd/lib/charms_ceph/broker.py +++ b/ceph-osd/lib/charms_ceph/broker.py @@ -907,7 +907,7 @@ def process_requests_v1(reqs): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - if type(ret) == dict and 'exit-code' in ret: + if isinstance(ret, dict) and 'exit-code' in ret: return ret return {'exit-code': 0} diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 41eff9b4..94bfb9e4 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -3231,6 +3231,7 @@ def dirs_need_ownership_update(service): ('nautilus', 'octopus'), ('octopus', 'pacific'), ('pacific', 'quincy'), + ('quincy', 'reef'), ]) # Map UCA codenames to Ceph codenames @@ -3254,7 +3255,7 @@ def dirs_need_ownership_update(service): 'yoga': 'quincy', 'zed': 'quincy', 'antelope': 'quincy', - 'bobcat': 'quincy', + 'bobcat': 'reef', } From 00f5a18d95240346ebb01dd3261529cb07ddb2ff Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Tue, 19 Sep 2023 08:48:39 +0200 Subject: [PATCH 2553/2699] Doc: fix functest link Also prune CI jobs Signed-off-by: Peter Sabaini Change-Id: I334174afe00eab1556779b7aea24710fa8caae05 --- ceph-dashboard/osci.yaml | 1 - ceph-dashboard/tests/README.md | 2 +- ceph-dashboard/tests/bundles/jammy-yoga.yaml | 127 ------------------- 3 files changed, 1 insertion(+), 129 deletions(-) delete mode 100644 ceph-dashboard/tests/bundles/jammy-yoga.yaml diff --git a/ceph-dashboard/osci.yaml b/ceph-dashboard/osci.yaml index 9cb187a6..92e37937 100644 --- a/ceph-dashboard/osci.yaml +++ b/ceph-dashboard/osci.yaml @@ -2,7 +2,6 @@ templates: - charm-unit-jobs-py38 - charm-unit-jobs-py310 - - charm-yoga-functional-jobs - charm-functional-jobs vars: needs_charm_build: true diff --git a/ceph-dashboard/tests/README.md b/ceph-dashboard/tests/README.md index d002a1e4..3e0ccefb 100644 --- a/ceph-dashboard/tests/README.md +++ b/ceph-dashboard/tests/README.md @@ -14,5 +14,5 @@ tox -e func-smoke ``` For full details on functional testing of OpenStack charms please refer to -the [functional testing](https://docs.openstack.org/charm-guide/latest/reference/testing.html#functional-testing) +the [testing](https://docs.openstack.org/charm-guide/latest/community/software-contrib/testing.html) section of the OpenStack Charm Guide. diff --git a/ceph-dashboard/tests/bundles/jammy-yoga.yaml b/ceph-dashboard/tests/bundles/jammy-yoga.yaml deleted file mode 100644 index 5a38b13c..00000000 --- a/ceph-dashboard/tests/bundles/jammy-yoga.yaml +++ /dev/null @@ -1,127 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -local_overlay_enabled: False -series: jammy -applications: - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G,2' - options: - source: *openstack-origin - osd-devices: '/dev/test-non-existent' - channel: quincy/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - channel: quincy/edge - vault: - num_units: 1 - charm: ch:vault - channel: 1.8/stable - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - constraints: mem=3072M - num_units: 3 - channel: 8.0/edge - vault-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - ceph-dashboard: - charm: ../../ceph-dashboard.charm - options: - public-hostname: 'ceph-dashboard.zaza.local' - prometheus: - charm: ch:prometheus2 - num_units: 1 - series: focal - grafana: - # SSL and allow_embedding are not released into cs:grafana yet, due - # Octrober 2021 - charm: ch:grafana - num_units: 1 - series: focal - options: - anonymous: True - install_method: snap - allow_embedding: True - #telegraf: - # charm: telegraf - # channel: stable - # options: - # hostname: "{host}" - prometheus-alertmanager: - charm: ch:prometheus-alertmanager - num_units: 1 - series: focal - ceph-radosgw: - charm: ch:ceph-radosgw - num_units: 3 - channel: quincy/edge - options: - source: *openstack-origin - ceph-fs: - charm: ch:ceph-fs - num_units: 1 - channel: quincy/edge - options: - source: *openstack-origin - ceph-iscsi: - charm: ch:ceph-iscsi - num_units: 2 - options: - source: *openstack-origin - gateway-metadata-pool: iscsi-foo-metadata - channel: quincy/edge -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - 'ceph-dashboard:dashboard' - - 'ceph-mon:dashboard' - - - 'ceph-dashboard:certificates' - - 'vault:certificates' - - - 'ceph-mon:prometheus' - - 'prometheus:target' - - - 'grafana:grafana-source' - - 'prometheus:grafana-source' - - - 'grafana:certificates' - - 'vault:certificates' - #- - 'ceph-osd:juju-info' - #- 'telegraf:juju-info' - #- - 'ceph-mon:juju-info' - # - 'telegraf:juju-info' - #- - 'telegraf:prometheus-client' - # - 'prometheus:target' - #- - 'telegraf:dashboards' - # - 'grafana:dashboards' - - - 'ceph-dashboard:grafana-dashboard' - - 'grafana:dashboards' - - - 'ceph-dashboard:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - - 'ceph-dashboard:prometheus' - - 'prometheus:website' - - - 'prometheus:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - 'ceph-radosgw:certificates' - - 'vault:certificates' - - - 'ceph-dashboard:radosgw-dashboard' - - 'ceph-radosgw:radosgw-user' - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-dashboard:iscsi-dashboard' - - 'ceph-iscsi:admin-access' From 9c9a6f2856aa75b04ba283dd67face5d0711a46d Mon Sep 17 00:00:00 2001 From: Jake Nabasny Date: Sat, 5 Aug 2023 17:46:42 -0400 Subject: [PATCH 2554/2699] Update command references to juju 3.x syntax Change-Id: I4f1e1bbcf43a55b465382af914a0951aedf8c62b Change "relate" to "integrate" in README juju cmd Change-Id: I4f1e1bbcf43a55b465382af914a0951aedf8c62b --- ceph-osd/README.md | 24 +++++++++---------- ceph-osd/actions/remove_disk.py | 2 +- ceph-osd/actions/zap_disk.py | 2 +- .../unit_tests/test_actions_remove_disk.py | 2 +- ceph-osd/unit_tests/test_actions_zap_disk.py | 6 ++--- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/ceph-osd/README.md b/ceph-osd/README.md index 16b12f9a..f7704ecb 100644 --- a/ceph-osd/README.md +++ b/ceph-osd/README.md @@ -130,7 +130,7 @@ three OSDs (one per ceph-osd unit) and three MONs: juju deploy -n 3 --config ceph-osd.yaml ceph-osd juju deploy -n 3 --to lxd:0,lxd:1,lxd:2 ceph-mon - juju add-relation ceph-osd:mon ceph-mon:osd + juju integrate ceph-osd:mon ceph-mon:osd Here, a containerised MON is running alongside each storage node. We've assumed that the machines spawned in the first command are assigned IDs of 0, 1, and 2. @@ -281,10 +281,10 @@ completely (e.g. the storage hardware is reaching EOL). Examples: # Set OSDs '0' and '1' to 'out' on unit `ceph-osd/4` - juju run-action --wait ceph-osd/4 osd-out osds=osd.0,osd.1 + juju run ceph-osd/4 osd-out osds=osd.0,osd.1 # Set all OSDs to 'out' on unit `ceph-osd/2` - juju run-action --wait ceph-osd/2 osd-out osds=all + juju run ceph-osd/2 osd-out osds=all ### Set OSDs to 'in' @@ -297,10 +297,10 @@ with the cluster 'noout' flag. Examples: # Set OSDs '0' and '1' to 'in' on unit `ceph-osd/4` - juju run-action --wait ceph-osd/4 osd-in osds=osd.0,osd.1 + juju run ceph-osd/4 osd-in osds=osd.0,osd.1 # Set all OSDs to 'in' on unit `ceph-osd/2` - juju run-action --wait ceph-osd/2 osd-in osds=all + juju run ceph-osd/2 osd-in osds=all ### Stop and start OSDs @@ -312,10 +312,10 @@ Use the `stop` and `start` actions to stop and start OSD daemons on a unit. Examples: # Stop services 'ceph-osd@0' and 'ceph-osd@1' on unit `ceph-osd/4` - juju run-action --wait ceph-osd/4 stop osds=0,1 + juju run ceph-osd/4 stop osds=0,1 # Start all ceph-osd services on unit `ceph-osd/2` - juju run-action --wait ceph-osd/2 start osds=all + juju run ceph-osd/2 start osds=all > **Note**: Stopping an OSD daemon will put the associated unit into a blocked state. @@ -339,7 +339,7 @@ The action lists the unit's block devices by categorising them in three ways: Example: # List disks on unit `ceph-osd/4` - juju run-action --wait ceph-osd/4 list-disks + juju run ceph-osd/4 list-disks ### Add a disk @@ -365,7 +365,7 @@ operator to manually add OSD volumes (for disks that are not listed by Example: # Add disk /dev/vde on unit `ceph-osd/4` - juju run-action --wait ceph-osd/4 add-disk osd-devices=/dev/vde + juju run ceph-osd/4 add-disk osd-devices=/dev/vde ### Blacklist a disk @@ -392,7 +392,7 @@ Use the `list-disks` action to list the unit's blacklist entries. Example: # Blacklist disks /dev/vda and /dev/vdf on unit `ceph-osd/0` - juju run-action --wait ceph-osd/0 \ + juju run ceph-osd/0 \ blacklist-add-disk osd-devices='/dev/vda /dev/vdf' ### Un-blacklist a disk @@ -413,7 +413,7 @@ Each device should have an existing entry in the unit's blacklist. Use the Example: # Un-blacklist disk /dev/vdb on unit `ceph-osd/1` - juju run-action --wait ceph-osd/1 \ + juju run ceph-osd/1 \ blacklist-remove-disk osd-devices=/dev/vdb ### Zap a disk @@ -441,7 +441,7 @@ action. Example: # Zap disk /dev/vdc on unit `ceph-osd/3` - juju run-action --wait ceph-osd/3 \ + juju run ceph-osd/3 \ zap-disk i-really-mean-it=true devices=/dev/vdc > **Note**: The `zap-disk` action cannot be run on a mounted device, an active diff --git a/ceph-osd/actions/remove_disk.py b/ceph-osd/actions/remove_disk.py index beced7d9..ec9e546f 100755 --- a/ceph-osd/actions/remove_disk.py +++ b/ceph-osd/actions/remove_disk.py @@ -300,7 +300,7 @@ def write_report(report, ftype): for device, action_args in report.items(): args = json.dumps(action_args, separators=(' ', '=')) args = args.replace('{', '').replace('}', '').replace('"', '') - msg += 'juju run-action {} add-disk {} {}'.format( + msg += 'juju run {} add-disk {} {}'.format( hookenv.local_unit(), 'osd-devices=' + device, args) else: msg = json.dumps(report) diff --git a/ceph-osd/actions/zap_disk.py b/ceph-osd/actions/zap_disk.py index 22733449..ec5ca1f2 100755 --- a/ceph-osd/actions/zap_disk.py +++ b/ceph-osd/actions/zap_disk.py @@ -109,7 +109,7 @@ def zap(): db.flush() hookenv.action_set({ 'message': "{} disk(s) have been zapped, to use them as OSDs, run: \n" - "juju run-action {} add-disk osd-devices=\"{}\"".format( + "juju run {} add-disk osd-devices=\"{}\"".format( len(devices), hookenv.local_unit(), " ".join(devices)) diff --git a/ceph-osd/unit_tests/test_actions_remove_disk.py b/ceph-osd/unit_tests/test_actions_remove_disk.py index cf96bc22..2fb7148d 100644 --- a/ceph-osd/unit_tests/test_actions_remove_disk.py +++ b/ceph-osd/unit_tests/test_actions_remove_disk.py @@ -131,7 +131,7 @@ def test_write_report(self, action_set, local_unit): remove_disk.write_report(report, 'text') self.assertIn('message', output) msg = output['message'] - self.assertIn('juju run-action ceph-osd/0 add-disk', msg) + self.assertIn('juju run ceph-osd/0 add-disk', msg) self.assertIn('osd-devices=dev@', msg) self.assertIn('osd-ids=osd.1', msg) self.assertIn('cache-devices=cache@', msg) diff --git a/ceph-osd/unit_tests/test_actions_zap_disk.py b/ceph-osd/unit_tests/test_actions_zap_disk.py index 00ce17f3..fa7c1eaf 100644 --- a/ceph-osd/unit_tests/test_actions_zap_disk.py +++ b/ceph-osd/unit_tests/test_actions_zap_disk.py @@ -57,7 +57,7 @@ def side_effect(arg): self.hookenv.action_set.assert_called_with({ 'message': "1 disk(s) have been zapped, to use " "them as OSDs, run: \njuju " - "run-action ceph-osd-test/0 add-disk " + "run ceph-osd-test/0 add-disk " "osd-devices=\"/dev/vdb\"" }) @@ -84,7 +84,7 @@ def side_effect(arg): self.hookenv.action_set.assert_called_with({ 'message': "2 disk(s) have been zapped, to use " "them as OSDs, run: \njuju " - "run-action ceph-osd-test/0 add-disk " + "run ceph-osd-test/0 add-disk " "osd-devices=\"/dev/vdb /dev/vdc\"" }) @@ -182,7 +182,7 @@ def side_effect(arg): self.hookenv.action_set.assert_called_with({ 'message': "1 disk(s) have been zapped, to use " "them as OSDs, run: \njuju " - "run-action ceph-osd-test/0 add-disk " + "run ceph-osd-test/0 add-disk " "osd-devices=\"/dev/vdb\"" }) From 7ef6a0452c504b034d2a253536dd7f4ca10bc296 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 21 Sep 2023 11:12:13 +0200 Subject: [PATCH 2555/2699] Remove unused tox target The func-dev target is unused and indeed fails since we don't define any dev bundles. func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1137 Signed-off-by: Peter Sabaini Change-Id: I9993b8d8bc55fa1b4c7d61931bfdb6ee4ee3c7b4 --- ceph-dashboard/tox.ini | 5 ----- 1 file changed, 5 deletions(-) diff --git a/ceph-dashboard/tox.ini b/ceph-dashboard/tox.ini index 70a0d4b6..aa5eec86 100644 --- a/ceph-dashboard/tox.ini +++ b/ceph-dashboard/tox.ini @@ -139,11 +139,6 @@ basepython = python3 commands = functest-run-suite --keep-model --smoke -[testenv:func-dev] -basepython = python3 -commands = - functest-run-suite --keep-model --dev - [testenv:func-target] basepython = python3 commands = From d2269165834c0ef1b282128d72100b10a368b46e Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Wed, 27 Sep 2023 12:52:32 +0200 Subject: [PATCH 2556/2699] Add upgrade func testing Change functional testing to also test upgrades from charmhub version to the locally built charm. Include a new test for installing a locally built charm from scratch. Also pin some support charms to specific tracks. Change-Id: Id7de1723a4bbfcff02dd606be650b4410f897913 --- ceph-osd/osci.yaml | 12 + ceph-osd/tests/bundles/focal-yoga.yaml | 17 +- ceph-osd/tests/bundles/jammy-antelope.yaml | 3 +- ceph-osd/tests/bundles/jammy-bobcat.yaml | 17 +- .../tests/bundles/local-jammy-antelope.yaml | 234 ++++++++++++++++++ ceph-osd/tests/bundles/lunar-antelope.yaml | 3 +- ceph-osd/tests/bundles/mantic-bobcat.yaml | 3 +- ceph-osd/tests/tests.yaml | 16 +- 8 files changed, 284 insertions(+), 21 deletions(-) create mode 100644 ceph-osd/tests/bundles/local-jammy-antelope.yaml diff --git a/ceph-osd/osci.yaml b/ceph-osd/osci.yaml index 2a379fd0..d78147f4 100644 --- a/ceph-osd/osci.yaml +++ b/ceph-osd/osci.yaml @@ -8,3 +8,15 @@ charm_build_name: ceph-osd build_type: charmcraft charmcraft_channel: 2.x/stable + check: + jobs: + - new-install-jammy-antelope +- job: + name: new-install-jammy-antelope + parent: func-target + dependencies: + - osci-lint + - charm-build + - tox-py38 + vars: + tox_extra_args: '-- install:local-jammy-antelope' diff --git a/ceph-osd/tests/bundles/focal-yoga.yaml b/ceph-osd/tests/bundles/focal-yoga.yaml index 41b6acbb..6fe81b77 100644 --- a/ceph-osd/tests/bundles/focal-yoga.yaml +++ b/ceph-osd/tests/bundles/focal-yoga.yaml @@ -30,19 +30,19 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0.19/edge glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0.19/edge cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0.19/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0.19/edge placement-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0.19/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -53,10 +53,11 @@ applications: - '0' - '1' - '2' - channel: latest/edge + channel: 8.0.19/edge ceph-osd: - charm: ../../ceph-osd.charm + charm: ch:ceph-osd + channel: quincy/edge num_units: 3 storage: osd-devices: 'cinder,10G,2' @@ -88,7 +89,7 @@ applications: source: *openstack-origin to: - '9' - channel: latest/edge + channel: 3.9/edge keystone: expose: True diff --git a/ceph-osd/tests/bundles/jammy-antelope.yaml b/ceph-osd/tests/bundles/jammy-antelope.yaml index 48a8b36a..6474006a 100644 --- a/ceph-osd/tests/bundles/jammy-antelope.yaml +++ b/ceph-osd/tests/bundles/jammy-antelope.yaml @@ -54,7 +54,8 @@ applications: channel: 8.0/edge ceph-osd: - charm: ../../ceph-osd.charm + charm: ch:ceph-osd + channel: quincy/edge num_units: 3 storage: osd-devices: 'cinder,10G,2' diff --git a/ceph-osd/tests/bundles/jammy-bobcat.yaml b/ceph-osd/tests/bundles/jammy-bobcat.yaml index 426fdef0..ac468730 100644 --- a/ceph-osd/tests/bundles/jammy-bobcat.yaml +++ b/ceph-osd/tests/bundles/jammy-bobcat.yaml @@ -30,19 +30,19 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge placement-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -51,10 +51,11 @@ applications: - '0' - '1' - '2' - channel: latest/edge + channel: 8.0/edge ceph-osd: - charm: ../../ceph-osd.charm + charm: ch:ceph-osd + channel: latest/edge num_units: 3 storage: osd-devices: 'cinder,10G,2' @@ -84,7 +85,7 @@ applications: num_units: 1 to: - '9' - channel: latest/edge + channel: 3.9/edge keystone: expose: True diff --git a/ceph-osd/tests/bundles/local-jammy-antelope.yaml b/ceph-osd/tests/bundles/local-jammy-antelope.yaml new file mode 100644 index 00000000..48a8b36a --- /dev/null +++ b/ceph-osd/tests/bundles/local-jammy-antelope.yaml @@ -0,0 +1,234 @@ +variables: + openstack-origin: &openstack-origin cloud:jammy-antelope + +series: jammy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: 8.0/edge + glance-mysql-router: + charm: ch:mysql-router + channel: 8.0/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: 8.0/edge + nova-cloud-controller-mysql-router: + charm: ch:mysql-router + channel: 8.0/edge + placement-mysql-router: + charm: ch:mysql-router + channel: 8.0/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + to: + - '0' + - '1' + - '2' + channel: 8.0/edge + + ceph-osd: + charm: ../../ceph-osd.charm + num_units: 3 + storage: + osd-devices: 'cinder,10G,2' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + aa-profile-mode: enforce + to: + - '3' + - '4' + - '5' + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + to: + - '6' + - '7' + - '8' + channel: quincy/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + to: + - '9' + channel: 3.9/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '10' + channel: 2023.1/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + channel: 2023.1/edge + + glance: + expose: True + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + channel: 2023.1/edge + + cinder: + expose: True + charm: ch:cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: 'None' + glance-api-version: '2' + to: + - '13' + channel: 2023.1/edge + + cinder-ceph: + charm: ch:cinder-ceph + channel: 2023.1/edge + + nova-cloud-controller: + expose: True + charm: ch:nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + channel: 2023.1/edge + + placement: + charm: ch:placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + channel: 2023.1/edge + +relations: + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:ceph' + - 'ceph-mon:client' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'cinder-ceph:ceph' + - 'ceph-mon:client' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement' + - 'keystone' + + - - 'placement' + - 'nova-cloud-controller' + + - - 'cinder-ceph:ceph-access' + - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/bundles/lunar-antelope.yaml b/ceph-osd/tests/bundles/lunar-antelope.yaml index 85692211..8d04e764 100644 --- a/ceph-osd/tests/bundles/lunar-antelope.yaml +++ b/ceph-osd/tests/bundles/lunar-antelope.yaml @@ -66,7 +66,8 @@ applications: channel: 8.0/edge ceph-osd: - charm: ../../ceph-osd.charm + charm: ch:ceph-osd + channel: quincy/edge num_units: 3 storage: osd-devices: 'cinder,10G,2' diff --git a/ceph-osd/tests/bundles/mantic-bobcat.yaml b/ceph-osd/tests/bundles/mantic-bobcat.yaml index 3dc07363..9c2e41c6 100644 --- a/ceph-osd/tests/bundles/mantic-bobcat.yaml +++ b/ceph-osd/tests/bundles/mantic-bobcat.yaml @@ -66,7 +66,8 @@ applications: channel: latest/edge ceph-osd: - charm: ../../ceph-osd.charm + charm: ch:ceph-osd + channel: latest/edge num_units: 3 storage: osd-devices: 'cinder,10G,2' diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index af1f66cf..6701ea2e 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -8,12 +8,24 @@ gate_bundles: - mantic-bobcat smoke_bundles: - - focal-xena + - jammy-antelope configure: - - zaza.openstack.charm_tests.glance.setup.add_lts_image + - install: + - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: + - install: + - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest + - zaza.openstack.charm_tests.ceph.tests.CephTest + - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest + - zaza.openstack.charm_tests.ceph.osd.tests.ServiceTest + - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest + - zaza.openstack.charm_tests.ceph.tests.CephTest + - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest + - zaza.openstack.charm_tests.ceph.osd.tests.ServiceTest + # Charm upgrade, then re-run tests + - zaza.charm_tests.lifecycle.tests.UpgradeCharmsToPath;ceph-osd - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - zaza.openstack.charm_tests.ceph.tests.CephTest - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest From 60a1fe1297030bba04f4f0f830167bc1e71ed393 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 28 Sep 2023 21:47:28 +0200 Subject: [PATCH 2557/2699] Add upgrade func testing Change functional testing to also test upgrades from charmhub version to the locally built charm. Include a new test for installing a locally built charm from scratch. Also pin some support charms to specific tracks. Change-Id: I57937f55526f531cb807a876fd074f5f589b00ed --- .../tests/bundles/jammy-antelope.yaml | 13 +- .../tests/bundles/local-jammy-antelope.yaml | 121 ++++++++++++++++++ ceph-radosgw/tests/tests.yaml | 6 + 3 files changed, 133 insertions(+), 7 deletions(-) create mode 100644 ceph-radosgw/tests/bundles/local-jammy-antelope.yaml diff --git a/ceph-radosgw/tests/bundles/jammy-antelope.yaml b/ceph-radosgw/tests/bundles/jammy-antelope.yaml index e6cd4982..ad5c78e8 100644 --- a/ceph-radosgw/tests/bundles/jammy-antelope.yaml +++ b/ceph-radosgw/tests/bundles/jammy-antelope.yaml @@ -26,21 +26,20 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *source to: - '0' - '1' - '2' - channel: latest/edge + channel: 8.0/edge ceph-radosgw: - charm: ../../ceph-radosgw.charm + charm: ch:ceph-radosgw + channel: latest/edge num_units: 1 options: source: *source @@ -85,14 +84,14 @@ applications: vault-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge vault: charm: ch:vault num_units: 1 to: - '11' - channel: latest/edge + channel: 1.8/stable relations: diff --git a/ceph-radosgw/tests/bundles/local-jammy-antelope.yaml b/ceph-radosgw/tests/bundles/local-jammy-antelope.yaml new file mode 100644 index 00000000..2029ec51 --- /dev/null +++ b/ceph-radosgw/tests/bundles/local-jammy-antelope.yaml @@ -0,0 +1,121 @@ +options: + source: &source cloud:jammy-antelope + +series: jammy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: 8.0/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + to: + - '0' + - '1' + - '2' + channel: 8.0/edge + + ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '3' + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + channel: latest/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + channel: latest/edge + + vault-mysql-router: + charm: ch:mysql-router + channel: 8.0/edge + + vault: + charm: ch:vault + num_units: 1 + to: + - '11' + channel: 1.8/stable + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index c205b603..1fe9b0fc 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -39,6 +39,12 @@ tests: - zaza.openstack.charm_tests.swift.tests.S3APITest - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation + # Charm upgrade, then re-run tests + - zaza.charm_tests.lifecycle.tests.UpgradeCharmsToPath;ceph-radosgw + - zaza.openstack.charm_tests.ceph.tests.CephRGWTest + - zaza.openstack.charm_tests.swift.tests.S3APITest + - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes + - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation tests_options: force_deploy: From 788b7a5957ef069f8d7a9f1faa9c833c7c685ad9 Mon Sep 17 00:00:00 2001 From: Samuel Walladge Date: Fri, 29 Sep 2023 13:30:32 +0930 Subject: [PATCH 2558/2699] Warn in status if tune-osd-memory-target invalid This is useful, because if an invalid value is set, the value is ignored and not overridden, and an error logged. So now we warn about this in the status to be more obvious to the user. Change-Id: Idc4a7706f30cbcea8aee83a1406fa84139fe510d --- ceph-osd/hooks/ceph_hooks.py | 17 ++++++++++++++ ceph-osd/unit_tests/test_ceph_hooks.py | 32 ++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 474389a2..d2ce9b5b 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -386,6 +386,18 @@ def warn_if_memory_outside_bounds(value): "This is not recommended.", level=WARNING) +def is_tune_osd_memory_target_valid() -> bool: + """ + Check if the tune-osd-memory-target value is valid + + :returns: True if valid, else False + :rtype: bool + """ + # NOTE: keep this logic in sync with get_osd_memory_target() + value = config('tune-osd-memory-target') + return not value or bool(re.match(r"\d+(?:GB|%)$", value)) + + def get_osd_memory_target(): """ Processes the config value of tune-osd-memory-target. @@ -868,6 +880,11 @@ def secrets_storage_changed(): def assess_status(): """Assess status of current unit""" + + if not is_tune_osd_memory_target_valid(): + status_set('blocked', 'tune-osd-memory-target config value is invalid') + return + # check to see if the unit is paused. application_version_set(get_upstream_version(VERSION_PACKAGE)) if is_unit_upgrading_set(): diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 2bfa2d3c..711f46e4 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -737,6 +737,38 @@ def test_warn_memory_bounds( level=ceph_hooks.WARNING ) + @patch.object(ceph_hooks, "config") + def test_is_tune_osd_memory_target_valid(self, mock_config): + def tune(value): + return lambda k: ( + value if k == "tune-osd-memory-target" else KeyError + ) + + # value, is_valid + scenarios = [ + ("", True), + ("5GB", True), + ("020GB", True), + ("34GB", True), + ("5%", True), + ("05%", True), + ("50%", True), + ("test", False), + (" ", False), + ("5", False), + ("GB", False), + ("%", False), + ("test5GB", False), + ("50%%", False), + ] + for value, expected_valid in scenarios: + mock_config.side_effect = tune(value) + print(f"testing tune-osd-memory-target set to {value}") + self.assertEqual( + ceph_hooks.is_tune_osd_memory_target_valid(), + expected_valid + ) + @patch.object(ceph_hooks, "config") @patch.object(ceph_hooks, "get_total_ram") @patch.object(ceph_hooks, "kv") From bc26ab5ff268ad39ca9170193858e8f9e179f99c Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 29 Sep 2023 15:30:00 +0200 Subject: [PATCH 2559/2699] Fix version retrieval During cluster deployment a situation can arise where there are already osd relations but osds are not yet fully added to the cluster. This can make version retrieval fail for osds. Retry version retrieval to give the cluster a chance to settle. Also update tests to install OpenStack from latest/edge Change-Id: I12a1bcd32be2ed8a8e5ee0e304f716f5a190bd57 --- ceph-mon/src/utils.py | 29 ++++++++++++++++++++---- ceph-mon/tests/bundles/jammy-bobcat.yaml | 20 ++++++++-------- ceph-mon/unit_tests/test_ceph_utils.py | 6 ++--- 3 files changed, 38 insertions(+), 17 deletions(-) diff --git a/ceph-mon/src/utils.py b/ceph-mon/src/utils.py index 759ae9d7..2bb145c9 100644 --- a/ceph-mon/src/utils.py +++ b/ceph-mon/src/utils.py @@ -18,6 +18,7 @@ import subprocess import errno +import tenacity from charmhelpers.core.hookenv import ( DEBUG, cached, @@ -327,10 +328,16 @@ def execute_post_osd_upgrade_steps(ceph_osd_release): log(message=msg, level='ERROR') -def _all_ceph_versions_same(): - """Checks that ceph-mon and ceph-osd have converged to the same version. +@tenacity.retry( + wait=tenacity.wait_exponential(multiplier=1, max=10), + reraise=True, + stop=tenacity.stop_after_attempt(8)) +def _get_versions(): + """Gets the ceph versions. - :return boolean: True if all same, false if not or command failed. + Retry if the commands fails to give the cluster time to settle. + + :return tuple: (bool, dict) True if successful, False if not, and a dict """ try: versions_command = 'ceph versions' @@ -340,11 +347,25 @@ def _all_ceph_versions_same(): if call_error.returncode == errno.EINVAL: log('Calling "ceph versions" failed. Command requires ' 'luminous and above.', level='WARNING') - return False + return False, {} else: log('Calling "ceph versions" failed.', level='ERROR') raise OsdPostUpgradeError(call_error) + log('Versions: {}'.format(versions_str), level='DEBUG') versions_dict = json.loads(versions_str) + # provoke keyerror if we don't have osd versions yet to cause a retry + _ = versions_dict['osd'] + return True, versions_dict + + +def _all_ceph_versions_same(): + """Checks that ceph-mon and ceph-osd have converged to the same version. + + :return boolean: True if all same, false if not or command failed. + """ + ok, versions_dict = _get_versions() + if not ok: + return False if len(versions_dict['overall']) > 1: log('All upgrades of mon and osd have not completed.') return False diff --git a/ceph-mon/tests/bundles/jammy-bobcat.yaml b/ceph-mon/tests/bundles/jammy-bobcat.yaml index ad65cb90..2227b406 100644 --- a/ceph-mon/tests/bundles/jammy-bobcat.yaml +++ b/ceph-mon/tests/bundles/jammy-bobcat.yaml @@ -77,11 +77,11 @@ applications: - '3' - '4' - '5' - channel: quincy/edge + channel: latest/edge ceph-mon: charm: ch:ceph-mon - channel: quincy/edge + channel: latest/edge num_units: 3 options: source: *openstack-origin @@ -96,7 +96,7 @@ applications: num_units: 1 options: source: *openstack-origin - channel: quincy/edge + channel: latest/edge to: - '17' @@ -108,7 +108,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: 2023.1/edge + channel: latest/edge nova-compute: charm: ch:nova-compute @@ -118,7 +118,7 @@ applications: libvirt-image-backend: rbd to: - '11' - channel: 2023.1/edge + channel: latest/edge glance: expose: True @@ -128,7 +128,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: 2023.1/edge + channel: latest/edge cinder: expose: True @@ -140,11 +140,11 @@ applications: openstack-origin: *openstack-origin to: - '13' - channel: 2023.1/edge + channel: latest/edge cinder-ceph: charm: ch:cinder-ceph - channel: 2023.1/edge + channel: latest/edge nova-cloud-controller: expose: True @@ -154,7 +154,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: 2023.1/edge + channel: latest/edge placement: charm: ch:placement @@ -163,7 +163,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: 2023.1/edge + channel: latest/edge prometheus2: charm: ch:prometheus2 diff --git a/ceph-mon/unit_tests/test_ceph_utils.py b/ceph-mon/unit_tests/test_ceph_utils.py index 2cf59996..ba3da7f3 100644 --- a/ceph-mon/unit_tests/test_ceph_utils.py +++ b/ceph-mon/unit_tests/test_ceph_utils.py @@ -172,7 +172,7 @@ def test_all_ceph_versions_same_one_overall_one_osd_true( self.assertTrue( return_bool, msg='all_ceph_versions_same returned False but should be True') - log.assert_not_called() + log.assert_called_once() @mock.patch.object(utils.subprocess, 'check_output') @mock.patch.object(utils.json, 'loads') @@ -190,7 +190,7 @@ def test_all_ceph_versions_same_two_overall_returns_false( self.assertFalse( return_bool, msg='all_ceph_versions_same returned True but should be False') - log.assert_called_once() + self.assertEquals(log.call_count, 2) @mock.patch.object(utils.subprocess, 'check_output') @mock.patch.object(utils.json, 'loads') @@ -208,7 +208,7 @@ def test_all_ceph_versions_same_one_overall_no_osd_returns_false( self.assertFalse( return_bool, msg='all_ceph_versions_same returned True but should be False') - log.assert_called_once() + self.assertEquals(log.call_count, 2) @mock.patch.object(utils.subprocess, 'check_output') @mock.patch.object(utils, 'log') From abf26471d342d219fae63d3f36dc6c6394a1d7ef Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 2 Oct 2023 12:34:31 +0200 Subject: [PATCH 2560/2699] Fix: increase timeout for get versions Change-Id: Iee13e9a88f047f5835aee8e5a308ce2035d28891 --- ceph-mon/src/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/src/utils.py b/ceph-mon/src/utils.py index 2bb145c9..9e41ea82 100644 --- a/ceph-mon/src/utils.py +++ b/ceph-mon/src/utils.py @@ -331,7 +331,7 @@ def execute_post_osd_upgrade_steps(ceph_osd_release): @tenacity.retry( wait=tenacity.wait_exponential(multiplier=1, max=10), reraise=True, - stop=tenacity.stop_after_attempt(8)) + stop=tenacity.stop_after_attempt(30)) def _get_versions(): """Gets the ceph versions. From c84624fe99c239fcd1cbf6fa3be5da588819e306 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Tue, 3 Oct 2023 09:02:43 +0200 Subject: [PATCH 2561/2699] Remove kinetic support Change-Id: I92d02b1385dec1a047ed67d162ef860ccb15b061 --- ceph-iscsi/metadata.yaml | 1 - ceph-iscsi/osci.yaml | 18 ----- ceph-iscsi/tests/bundles/jammy-ec.yaml | 7 +- ceph-iscsi/tests/bundles/jammy.yaml | 7 +- ceph-iscsi/tests/bundles/kinetic-ec.yaml | 94 ----------------------- ceph-iscsi/tests/bundles/kinetic.yaml | 95 ------------------------ ceph-iscsi/tests/tests.yaml | 2 - 7 files changed, 8 insertions(+), 216 deletions(-) delete mode 100644 ceph-iscsi/tests/bundles/kinetic-ec.yaml delete mode 100644 ceph-iscsi/tests/bundles/kinetic.yaml diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index a9d1c77d..452d26e7 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -13,7 +13,6 @@ tags: series: - focal - jammy -- kinetic - lunar subordinate: false min-juju-version: 2.7.6 diff --git a/ceph-iscsi/osci.yaml b/ceph-iscsi/osci.yaml index 4150888f..d502701a 100644 --- a/ceph-iscsi/osci.yaml +++ b/ceph-iscsi/osci.yaml @@ -7,10 +7,6 @@ - ceph-iscsi-focal-quincy-ec - ceph-iscsi-jammy-quincy - ceph-iscsi-jammy-quincy-ec - - ceph-iscsi-kinetic-quincy: - voting: false - - ceph-iscsi-kinetic-quincy-ec: - voting: false - ceph-iscsi-lunar-quincy: voting: false - ceph-iscsi-lunar-quincy-ec: @@ -52,20 +48,6 @@ - ceph-iscsi-jammy-quincy vars: tox_extra_args: -- jammy-ec -- job: - name: ceph-iscsi-kinetic-quincy - parent: func-target - dependencies: - - ceph-iscsi-jammy-quincy - vars: - tox_extra_args: -- kinetic -- job: - name: ceph-iscsi-kinetic-quincy-ec - parent: func-target - dependencies: - - ceph-iscsi-jammy-quincy - vars: - tox_extra_args: -- kinetic-ec - job: name: ceph-iscsi-lunar-quincy diff --git a/ceph-iscsi/tests/bundles/jammy-ec.yaml b/ceph-iscsi/tests/bundles/jammy-ec.yaml index 61132188..0bbabc23 100644 --- a/ceph-iscsi/tests/bundles/jammy-ec.yaml +++ b/ceph-iscsi/tests/bundles/jammy-ec.yaml @@ -69,7 +69,7 @@ applications: charm: ch:vault to: - '6' - channel: latest/edge + channel: 1.8/stable mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 @@ -77,10 +77,11 @@ applications: - '8' - '9' - '10' - channel: latest/edge + channel: 8.0/edge vault-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge + relations: - - 'ceph-mon:client' - 'ceph-iscsi:ceph-client' diff --git a/ceph-iscsi/tests/bundles/jammy.yaml b/ceph-iscsi/tests/bundles/jammy.yaml index 11fac588..6a57b0f7 100644 --- a/ceph-iscsi/tests/bundles/jammy.yaml +++ b/ceph-iscsi/tests/bundles/jammy.yaml @@ -70,7 +70,7 @@ applications: charm: ch:vault to: - '6' - channel: latest/edge + channel: 1.8/stable mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 @@ -78,10 +78,11 @@ applications: - '8' - '9' - '10' - channel: latest/edge + channel: 8.0/edge vault-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge + relations: - - 'ceph-mon:client' - 'ceph-iscsi:ceph-client' diff --git a/ceph-iscsi/tests/bundles/kinetic-ec.yaml b/ceph-iscsi/tests/bundles/kinetic-ec.yaml deleted file mode 100644 index d2fc1a7f..00000000 --- a/ceph-iscsi/tests/bundles/kinetic-ec.yaml +++ /dev/null @@ -1,94 +0,0 @@ -local_overlay_enabled: False -series: kinetic -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - constraints: mem=3072M - '9': - constraints: mem=3072M - '10': - constraints: mem=3072M - '11': - '12': - '13': - '14': - '15': -applications: - ubuntu: - charm: cs:ubuntu - num_units: 3 - to: - - '7' - - '14' - - '15' - ceph-iscsi: - charm: ../../ceph-iscsi.charm - num_units: 2 - options: - gateway-metadata-pool: iscsi-foo-metadata - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '0' - - '1' - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - to: - - '0' - - '1' - - '2' - - '11' - - '12' - - '13' - channel: latest/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - to: - - '3' - - '4' - - '5' - channel: latest/edge - vault: - num_units: 1 - charm: ch:vault - to: - - '6' - channel: latest/edge - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '8' - - '9' - - '10' - channel: latest/edge - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge -relations: - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/bundles/kinetic.yaml b/ceph-iscsi/tests/bundles/kinetic.yaml deleted file mode 100644 index e674318d..00000000 --- a/ceph-iscsi/tests/bundles/kinetic.yaml +++ /dev/null @@ -1,95 +0,0 @@ -local_overlay_enabled: False -series: kinetic -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - constraints: mem=3072M - '9': - constraints: mem=3072M - '10': - constraints: mem=3072M - '11': - '12': - '13': - '14': - '15': - '16': - '17': -applications: - ubuntu: - charm: cs:ubuntu - num_units: 3 - to: - - '7' - - '14' - - '15' - ceph-iscsi: - charm: ../../ceph-iscsi.charm - num_units: 4 - options: - gateway-metadata-pool: iscsi-foo-metadata - to: - - '0' - - '1' - - '16' - - '17' - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - to: - - '0' - - '1' - - '2' - - '11' - - '12' - - '13' - channel: latest/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - to: - - '3' - - '4' - - '5' - channel: latest/edge - vault: - num_units: 1 - charm: ch:vault - to: - - '6' - channel: latest/edge - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '8' - - '9' - - '10' - channel: latest/edge - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge -relations: - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/tests.yaml b/ceph-iscsi/tests/tests.yaml index 4114bbb5..b4f83fff 100644 --- a/ceph-iscsi/tests/tests.yaml +++ b/ceph-iscsi/tests/tests.yaml @@ -9,8 +9,6 @@ smoke_bundles: dev_bundles: - jammy-ec - jammy - - kinetic-ec - - kinetic configure: - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation - zaza.openstack.charm_tests.ceph.iscsi.setup.basic_guest_setup From 7a44f37f22714187609b8da4e95b8ba08fe9c5e4 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Tue, 3 Oct 2023 17:56:54 +0200 Subject: [PATCH 2562/2699] Add reef functests Reef functests, use jammy-bobcat as pkg source Change-Id: I28ac0868b1e40153d06ccb353ea833f11c19a0b9 --- ceph-iscsi/osci.yaml | 20 ++++ ceph-iscsi/tests/bundles/jammy-reef-ec.yaml | 100 +++++++++++++++++++ ceph-iscsi/tests/bundles/jammy-reef.yaml | 101 ++++++++++++++++++++ ceph-iscsi/tests/tests.yaml | 2 + 4 files changed, 223 insertions(+) create mode 100644 ceph-iscsi/tests/bundles/jammy-reef-ec.yaml create mode 100644 ceph-iscsi/tests/bundles/jammy-reef.yaml diff --git a/ceph-iscsi/osci.yaml b/ceph-iscsi/osci.yaml index d502701a..237382b6 100644 --- a/ceph-iscsi/osci.yaml +++ b/ceph-iscsi/osci.yaml @@ -7,6 +7,8 @@ - ceph-iscsi-focal-quincy-ec - ceph-iscsi-jammy-quincy - ceph-iscsi-jammy-quincy-ec + - ceph-iscsi-jammy-reef + - ceph-iscsi-jammy-reef-ec - ceph-iscsi-lunar-quincy: voting: false - ceph-iscsi-lunar-quincy-ec: @@ -49,6 +51,24 @@ vars: tox_extra_args: -- jammy-ec +- job: + name: ceph-iscsi-jammy-reef + parent: func-target + dependencies: + - charm-build + - osci-lint + - name: tox-py310 + soft: true + vars: + tox_extra_args: -- jammy-reef +- job: + name: ceph-iscsi-jammy-reef-ec + parent: func-target + dependencies: + - ceph-iscsi-jammy-reef + vars: + tox_extra_args: -- jammy-reef-ec + - job: name: ceph-iscsi-lunar-quincy parent: func-target diff --git a/ceph-iscsi/tests/bundles/jammy-reef-ec.yaml b/ceph-iscsi/tests/bundles/jammy-reef-ec.yaml new file mode 100644 index 00000000..45b749c0 --- /dev/null +++ b/ceph-iscsi/tests/bundles/jammy-reef-ec.yaml @@ -0,0 +1,100 @@ +variables: + source: &source cloud:jammy-bobcat + +local_overlay_enabled: False +series: jammy +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + constraints: mem=3072M + '9': + constraints: mem=3072M + '10': + constraints: mem=3072M + '11': + '12': + '13': + '14': + '15': +applications: + ubuntu: + charm: cs:ubuntu + num_units: 3 + to: + - '7' + - '14' + - '15' + ceph-iscsi: + charm: ../../ceph-iscsi.charm + num_units: 2 + options: + gateway-metadata-pool: iscsi-foo-metadata + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + to: + - '0' + - '1' + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *source + to: + - '0' + - '1' + - '2' + - '11' + - '12' + - '13' + channel: latest/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *source + to: + - '3' + - '4' + - '5' + channel: latest/edge + vault: + num_units: 1 + charm: ch:vault + to: + - '6' + channel: 1.8/stable + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + to: + - '8' + - '9' + - '10' + channel: 8.0/edge + vault-mysql-router: + charm: ch:mysql-router + channel: 8.0/edge + +relations: + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/bundles/jammy-reef.yaml b/ceph-iscsi/tests/bundles/jammy-reef.yaml new file mode 100644 index 00000000..93fc7fe0 --- /dev/null +++ b/ceph-iscsi/tests/bundles/jammy-reef.yaml @@ -0,0 +1,101 @@ +options: + source: &source cloud:jammy-bobcat + +local_overlay_enabled: False +series: jammy +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + constraints: mem=3072M + '9': + constraints: mem=3072M + '10': + constraints: mem=3072M + '11': + '12': + '13': + '14': + '15': + '16': + '17': +applications: + ubuntu: + charm: cs:ubuntu + num_units: 3 + to: + - '7' + - '14' + - '15' + ceph-iscsi: + charm: ../../ceph-iscsi.charm + num_units: 4 + options: + gateway-metadata-pool: iscsi-foo-metadata + to: + - '0' + - '1' + - '16' + - '17' + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *source + to: + - '0' + - '1' + - '2' + - '11' + - '12' + - '13' + channel: latest/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *source + to: + - '3' + - '4' + - '5' + channel: latest/edge + vault: + num_units: 1 + charm: ch:vault + to: + - '6' + channel: 1.8/stable + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + to: + - '8' + - '9' + - '10' + channel: 8.0/edge + vault-mysql-router: + charm: ch:mysql-router + channel: 8.0/edge + +relations: + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/tests.yaml b/ceph-iscsi/tests/tests.yaml index b4f83fff..fd844339 100644 --- a/ceph-iscsi/tests/tests.yaml +++ b/ceph-iscsi/tests/tests.yaml @@ -4,6 +4,8 @@ gate_bundles: - focal - jammy-ec - jammy + - jammy-reef + - jammy-reef-ef smoke_bundles: - jammy dev_bundles: From d2993eb6be7e0129b603e3b868c2f99a96a8bde3 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 5 Oct 2023 14:33:07 +0200 Subject: [PATCH 2563/2699] Remove FileStore support Remove support for creating FileStore OSDs. Also prevent upgrade attempts to Reef if a FileStore OSD is detected Change-Id: I9609bc0222365cb1f4059312b466a12ef4e0397f --- ceph-osd/actions/add_disk.py | 1 - ceph-osd/config.yaml | 9 -- ceph-osd/hooks/ceph_hooks.py | 13 +- ceph-osd/hooks/utils.py | 23 ++++ ceph-osd/lib/charms_ceph/utils.py | 136 ++++++------------- ceph-osd/templates/ceph.conf | 9 +- ceph-osd/unit_tests/test_actions_add_disk.py | 8 +- ceph-osd/unit_tests/test_ceph_hooks.py | 16 --- ceph-osd/unit_tests/test_upgrade.py | 40 +++++- 9 files changed, 111 insertions(+), 144 deletions(-) diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index 98af2def..15de3478 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -115,7 +115,6 @@ def add_device(request, device_path, bucket=None, ceph_hooks.get_journal_devices(), hookenv.config('ignore-device-errors'), hookenv.config('osd-encrypt'), - charms_ceph.utils.use_bluestore(), hookenv.config('osd-encrypt-keymanager'), osd_id) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 22908119..de77e1af 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -90,15 +90,6 @@ options: where the specified journal device does not exist on a node. . Only supported with ceph >= 0.48.3. - bluestore: - type: boolean - default: True - description: | - Enable BlueStore storage backend for OSD devices. - . - Only supported with ceph >= 12.2.0. - . - Setting to 'False' will use FileStore as the storage format. bluestore-wal: type: string default: diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index d2ce9b5b..993166a7 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -26,6 +26,8 @@ import sys import traceback +import utils + sys.path.append('lib') import charms_ceph.utils as ceph from charmhelpers.core import hookenv @@ -147,6 +149,14 @@ def check_for_upgrade(): 'distro') new_version_os = get_os_codename_install_source(hookenv.config('source')) + # If the new version is reef, and we detect that we are running FileStore + # bail out with an error message + filestore_osds = utils.find_filestore_osds() + if new_version == 'reef' and filestore_osds: + log("Refuse to upgrade to reef with FileStore OSDs present: {}".format( + filestore_osds), level=ERROR) + return + # May be in a previous upgrade that was failed if the directories # still need an ownership update. Check this condition. resuming_upgrade = ceph.dirs_need_ownership_update('osd') @@ -464,7 +474,6 @@ def get_ceph_context(upgrading=False): 'dio': str(config('use-direct-io')).lower(), 'short_object_len': use_short_objects(), 'upgrade_in_progress': upgrading, - 'bluestore': ceph.use_bluestore(), 'bluestore_experimental': cmp_pkgrevno('ceph', '12.1.0') < 0, 'bluestore_block_wal_size': config('bluestore-block-wal-size'), 'bluestore_block_db_size': config('bluestore-block-db-size'), @@ -619,13 +628,11 @@ def prepare_disks_and_activate(): log('ceph bootstrapped, rescanning disks') emit_cephconf() ceph.udevadm_settle() - bluestore = ceph.use_bluestore() for dev in get_devices(): ceph.osdize(dev, config('osd-format'), osd_journal, config('ignore-device-errors'), config('osd-encrypt'), - bluestore, config('osd-encrypt-keymanager')) # Make it fast! if config('autotune'): diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index 0a14d1bb..a86b99eb 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -692,3 +692,26 @@ def get_parent_device(dev): return '/dev/' + child['name'] return dev + + +def find_filestore_osds(): + # Path to Ceph OSD + osd_path = '/var/lib/ceph/osd' + + # Search through OSD directories in path starting with 'ceph-' + dirs = [d for d in os.listdir(osd_path) + if d.startswith('ceph-') + and os.path.isdir(os.path.join(osd_path, d))] + + found = [] + for dir in dirs: + # Construct the full path + type_file_path = os.path.join(osd_path, dir, 'type') + # Open and read the type file + with open(type_file_path, 'r') as f: + content = f.read() + # Check if the content includes 'filestore' + if 'filestore' in content: + found.append(dir) + + return found diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 94bfb9e4..756dd9f1 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -1324,16 +1324,6 @@ def systemd(): return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' -def use_bluestore(): - """Determine whether bluestore should be used for OSD's - - :returns: whether bluestore disk format should be used - :rtype: bool""" - if cmp_pkgrevno('ceph', '12.2.0') < 0: - return False - return config('bluestore') - - def bootstrap_monitor_cluster(secret): """Bootstrap local Ceph mon into the Ceph cluster @@ -1551,21 +1541,21 @@ def get_devices(name): def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, - bluestore=False, key_manager=CEPH_KEY_MANAGER, osd_id=None): + key_manager=CEPH_KEY_MANAGER, osd_id=None): if dev.startswith('/dev'): osdize_dev(dev, osd_format, osd_journal, ignore_errors, encrypt, - bluestore, key_manager, osd_id) + key_manager, osd_id) else: if cmp_pkgrevno('ceph', '14.0.0') >= 0: log("Directory backed OSDs can not be created on Nautilus", level=WARNING) return - osdize_dir(dev, encrypt, bluestore) + osdize_dir(dev, encrypt) def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, - encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER, + encrypt=False, key_manager=CEPH_KEY_MANAGER, osd_id=None): """ Prepare a block device for use as a Ceph OSD @@ -1579,7 +1569,6 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, :param: ignore_errors: Don't fail in the event of any errors during processing :param: encrypt: Encrypt block devices using 'key_manager' - :param: bluestore: Use bluestore native Ceph block device format :param: key_manager: Key management approach for encryption keys :raises subprocess.CalledProcessError: in the event that any supporting subprocess operation failed @@ -1630,15 +1619,13 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, cmd = _ceph_volume(dev, osd_journal, encrypt, - bluestore, key_manager, osd_id) else: cmd = _ceph_disk(dev, osd_format, osd_journal, - encrypt, - bluestore) + encrypt) try: status_set('maintenance', 'Initializing device {}'.format(dev)) @@ -1669,7 +1656,7 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, db.flush() -def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): +def _ceph_disk(dev, osd_format, osd_journal, encrypt=False): """ Prepare a device for usage as a Ceph OSD using ceph-disk @@ -1677,7 +1664,6 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): The function looks up realpath of the device :param: osd_journal: List of block devices to use for OSD journals :param: encrypt: Use block device encryption (unsupported) - :param: bluestore: Use bluestore storage for OSD :returns: list. 'ceph-disk' command and required parameters for execution by check_call """ @@ -1686,25 +1672,17 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): if encrypt: cmd.append('--dmcrypt') - if osd_format and not bluestore: - cmd.append('--fs-type') - cmd.append(osd_format) - - # NOTE(jamespage): enable experimental bluestore support - if use_bluestore(): - cmd.append('--bluestore') - wal = get_devices('bluestore-wal') - if wal: - cmd.append('--block.wal') - least_used_wal = find_least_used_utility_device(wal) - cmd.append(least_used_wal) - db = get_devices('bluestore-db') - if db: - cmd.append('--block.db') - least_used_db = find_least_used_utility_device(db) - cmd.append(least_used_db) - elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: - cmd.append('--filestore') + cmd.append('--bluestore') + wal = get_devices('bluestore-wal') + if wal: + cmd.append('--block.wal') + least_used_wal = find_least_used_utility_device(wal) + cmd.append(least_used_wal) + db = get_devices('bluestore-db') + if db: + cmd.append('--block.db') + least_used_db = find_least_used_utility_device(db) + cmd.append(least_used_db) cmd.append(os.path.realpath(dev)) @@ -1715,8 +1693,8 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): return cmd -def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, - key_manager=CEPH_KEY_MANAGER, osd_id=None): +def _ceph_volume(dev, osd_journal, encrypt=False, key_manager=CEPH_KEY_MANAGER, + osd_id=None): """ Prepare and activate a device for usage as a Ceph OSD using ceph-volume. @@ -1726,7 +1704,6 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, :param: dev: Full path to use for OSD block device setup :param: osd_journal: List of block devices to use for OSD journals :param: encrypt: Use block device encryption - :param: bluestore: Use bluestore storage for OSD :param: key_manager: dm-crypt Key Manager to use :param: osd_id: The OSD-id to recycle, or None to create a new one :raises subprocess.CalledProcessError: in the event that any supporting @@ -1739,13 +1716,8 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, osd_fsid = str(uuid.uuid4()) cmd.append('--osd-fsid') cmd.append(osd_fsid) - - if bluestore: - cmd.append('--bluestore') - main_device_type = 'block' - else: - cmd.append('--filestore') - main_device_type = 'data' + cmd.append('--bluestore') + main_device_type = 'block' if encrypt and key_manager == CEPH_KEY_MANAGER: cmd.append('--dmcrypt') @@ -1753,19 +1725,6 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, if osd_id is not None: cmd.extend(['--osd-id', str(osd_id)]) - # On-disk journal volume creation - if not osd_journal and not bluestore: - journal_lv_type = 'journal' - cmd.append('--journal') - cmd.append(_allocate_logical_volume( - dev=dev, - lv_type=journal_lv_type, - osd_fsid=osd_fsid, - size='{}M'.format(calculate_volume_size('journal')), - encrypt=encrypt, - key_manager=key_manager) - ) - cmd.append('--data') cmd.append(_allocate_logical_volume(dev=dev, lv_type=main_device_type, @@ -1773,36 +1732,21 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, encrypt=encrypt, key_manager=key_manager)) - if bluestore: - for extra_volume in ('wal', 'db'): - devices = get_devices('bluestore-{}'.format(extra_volume)) - if devices: - cmd.append('--block.{}'.format(extra_volume)) - least_used = find_least_used_utility_device(devices, - lvs=True) - cmd.append(_allocate_logical_volume( - dev=least_used, - lv_type=extra_volume, - osd_fsid=osd_fsid, - size='{}M'.format(calculate_volume_size(extra_volume)), - shared=True, - encrypt=encrypt, - key_manager=key_manager) - ) - - elif osd_journal: - cmd.append('--journal') - least_used = find_least_used_utility_device(osd_journal, - lvs=True) - cmd.append(_allocate_logical_volume( - dev=least_used, - lv_type='journal', - osd_fsid=osd_fsid, - size='{}M'.format(calculate_volume_size('journal')), - shared=True, - encrypt=encrypt, - key_manager=key_manager) - ) + for extra_volume in ('wal', 'db'): + devices = get_devices('bluestore-{}'.format(extra_volume)) + if devices: + cmd.append('--block.{}'.format(extra_volume)) + least_used = find_least_used_utility_device(devices, + lvs=True) + cmd.append(_allocate_logical_volume( + dev=least_used, + lv_type=extra_volume, + osd_fsid=osd_fsid, + size='{}M'.format(calculate_volume_size(extra_volume)), + shared=True, + encrypt=encrypt, + key_manager=key_manager) + ) return cmd @@ -2040,7 +1984,7 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid, return "{}/{}".format(vg_name, lv_name) -def osdize_dir(path, encrypt=False, bluestore=False): +def osdize_dir(path, encrypt=False): """Ask ceph-disk to prepare a directory to become an OSD. :param path: str. The directory to osdize @@ -2077,12 +2021,8 @@ def osdize_dir(path, encrypt=False, bluestore=False): if cmp_pkgrevno('ceph', '0.60') >= 0: if encrypt: cmd.append('--dmcrypt') + cmd.append('--bluestore') - # NOTE(icey): enable experimental bluestore support - if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: - cmd.append('--bluestore') - elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: - cmd.append('--filestore') log("osdize dir cmd: {}".format(cmd)) subprocess.check_call(cmd) diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 2966ce58..931ff8c2 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -44,7 +44,7 @@ osd crush initial weight = {{ crush_initial_weight }} {% endfor %} {% endif %} -{% if bluestore_experimental and bluestore -%} +{% if bluestore_experimental -%} enable experimental unrecoverable data corrupting features = bluestore rocksdb {%- endif %} @@ -66,7 +66,6 @@ keyring = /var/lib/ceph/mds/$cluster-$id/keyring [osd] keyring = /var/lib/ceph/osd/$cluster-$id/keyring -{% if bluestore -%} {% if not bluestore_experimental -%} osd objectstore = bluestore {%- endif %} @@ -77,11 +76,7 @@ bluestore block wal size = {{ bluestore_block_wal_size }} bluestore block db size = {{ bluestore_block_db_size }} {%- endif %} {% include 'section-ceph-bluestore-compression' %} -{%- else %} -osd journal size = {{ osd_journal_size }} -filestore xattr use omap = true -journal dio = {{ dio }} -{%- endif %} + bdev enable discard = {{ bdev_discard }} bdev async discard = {{ bdev_discard }} diff --git a/ceph-osd/unit_tests/test_actions_add_disk.py b/ceph-osd/unit_tests/test_actions_add_disk.py index 6e1618c8..6e04308f 100644 --- a/ceph-osd/unit_tests/test_actions_add_disk.py +++ b/ceph-osd/unit_tests/test_actions_add_disk.py @@ -25,17 +25,14 @@ def setUp(self): add_disk, ['hookenv', 'kv']) self.kv.return_value = self.kv - @mock.patch.object(add_disk.charms_ceph.utils, 'use_bluestore') @mock.patch.object(add_disk.ceph_hooks, 'get_journal_devices') @mock.patch.object(add_disk.charms_ceph.utils, 'osdize') - def test_add_device(self, mock_osdize, mock_get_journal_devices, - mock_use_bluestore): + def test_add_device(self, mock_osdize, mock_get_journal_devices): def fake_config(key): return { 'ignore-device-errors': True, 'osd-encrypt': True, - 'bluestore': True, 'osd-encrypt-keymanager': True, 'autotune': False, }.get(key) @@ -43,7 +40,6 @@ def fake_config(key): self.hookenv.config.side_effect = fake_config mock_get_journal_devices.return_value = '' self.hookenv.relation_ids.return_value = ['ceph:0'] - mock_use_bluestore.return_value = True db = mock.MagicMock() self.kv.return_value = db @@ -56,7 +52,7 @@ def fake_config(key): relation_settings={'bootstrapped-osds': 1}) self.hookenv.relation_set.assert_has_calls([call]) mock_osdize.assert_has_calls([mock.call('/dev/myosddev', - None, '', True, True, True, + None, '', True, True, True, None)]) piter = add_disk.PartitionIter(['/dev/cache'], 100, ['/dev/myosddev']) diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 711f46e4..b4cceea0 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -68,7 +68,6 @@ def setUp(self): @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) - @patch.object(ceph_utils, 'use_bluestore', lambda *args: False) @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', '10.0.0.2']) @patch.object(ceph_hooks, 'get_networks', lambda *args: "") @@ -99,7 +98,6 @@ def test_get_ceph_context(self, mock_config, mock_config2): 'upgrade_in_progress': False, 'use_syslog': 'true', 'bdev_discard': True, - 'bluestore': False, 'bluestore_experimental': False, 'bluestore_block_wal_size': 0, 'bluestore_block_db_size': 0} @@ -116,7 +114,6 @@ def test_get_ceph_context(self, mock_config, mock_config2): @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', '10.0.0.2']) @patch.object(ceph_hooks, 'get_networks', lambda *args: "") - @patch.object(ceph_utils, 'use_bluestore', lambda *args: False) @patch.object(ceph, 'config') @patch.object(ceph_hooks, 'config') def test_get_ceph_context_invalid_bdev_enable_discard(self, mock_config, @@ -146,7 +143,6 @@ def test_get_ceph_context_invalid_bdev_enable_discard(self, mock_config, 'upgrade_in_progress': False, 'use_syslog': 'true', 'bdev_discard': False, - 'bluestore': False, 'bluestore_experimental': False, 'bluestore_block_wal_size': 0, 'bluestore_block_db_size': 0} @@ -161,7 +157,6 @@ def test_get_ceph_context_invalid_bdev_enable_discard(self, mock_config, @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda pkg, ver: -1 if ver == '12.1.0' else 1) - @patch.object(ceph_utils, 'use_bluestore', lambda *args: False) @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', '10.0.0.2']) @patch.object(ceph_hooks, 'get_networks', lambda *args: "") @@ -192,7 +187,6 @@ def test_get_ceph_context_filestore_old(self, mock_config, mock_config2): 'upgrade_in_progress': False, 'use_syslog': 'true', 'bdev_discard': True, - 'bluestore': False, 'bluestore_experimental': True, 'bluestore_block_wal_size': 0, 'bluestore_block_db_size': 0} @@ -206,7 +200,6 @@ def test_get_ceph_context_filestore_old(self, mock_config, mock_config2): @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) - @patch.object(ceph_utils, 'use_bluestore', lambda *args: True) @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', '10.0.0.2']) @patch.object(ceph_hooks, 'get_networks', lambda *args: "") @@ -244,7 +237,6 @@ def test_get_ceph_context_bluestore(self, mock_config, mock_config2): 'upgrade_in_progress': False, 'use_syslog': 'true', 'bdev_discard': True, - 'bluestore': True, 'bluestore_experimental': False, 'bluestore_block_wal_size': BLUESTORE_WAL_TEST_SIZE, 'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE} @@ -259,7 +251,6 @@ def test_get_ceph_context_bluestore(self, mock_config, mock_config2): @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda pkg, ver: -1 if ver == '12.1.0' else 1) - @patch.object(ceph_utils, 'use_bluestore', lambda *args: True) @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', '10.0.0.2']) @patch.object(ceph_hooks, 'get_networks', lambda *args: "") @@ -268,7 +259,6 @@ def test_get_ceph_context_bluestore(self, mock_config, mock_config2): def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2): self.maxDiff = None config = copy.deepcopy(CHARM_CONFIG) - config['bluestore'] = True config['bluestore-block-wal-size'] = BLUESTORE_WAL_TEST_SIZE config['bluestore-block-db-size'] = BLUESTORE_DB_TEST_SIZE mock_config.side_effect = lambda key: config[key] @@ -294,7 +284,6 @@ def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2): 'upgrade_in_progress': False, 'use_syslog': 'true', 'bdev_discard': True, - 'bluestore': True, 'bluestore_experimental': True, 'bluestore_block_wal_size': BLUESTORE_WAL_TEST_SIZE, 'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE} @@ -308,7 +297,6 @@ def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2): @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) - @patch.object(ceph_utils, 'use_bluestore', lambda *args: False) @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', '10.0.0.2']) @patch.object(ceph_hooks, 'get_networks', lambda *args: "") @@ -341,7 +329,6 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): 'upgrade_in_progress': False, 'use_syslog': 'true', 'bdev_discard': True, - 'bluestore': False, 'bluestore_experimental': False, 'bluestore_block_wal_size': 0, 'bluestore_block_db_size': 0} @@ -355,7 +342,6 @@ def test_get_ceph_context_w_config_flags(self, mock_config, mock_config2): @patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1") @patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) - @patch.object(ceph_utils, 'use_bluestore', lambda *args: False) @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1', '10.0.0.2']) @patch.object(ceph_hooks, 'get_networks', lambda *args: "") @@ -390,7 +376,6 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'upgrade_in_progress': False, 'use_syslog': 'true', 'bdev_discard': True, - 'bluestore': False, 'bluestore_experimental': False, 'bluestore_block_wal_size': 0, 'bluestore_block_db_size': 0} @@ -440,7 +425,6 @@ def test_get_ceph_context_bluestore_compression( 'upgrade_in_progress': False, 'use_syslog': 'true', 'bdev_discard': True, - 'bluestore': False, 'bluestore_experimental': False, 'bluestore_block_wal_size': 0, 'bluestore_block_db_size': 0, diff --git a/ceph-osd/unit_tests/test_upgrade.py b/ceph-osd/unit_tests/test_upgrade.py index dd3f223b..1e7be80d 100644 --- a/ceph-osd/unit_tests/test_upgrade.py +++ b/ceph-osd/unit_tests/test_upgrade.py @@ -15,7 +15,9 @@ class UpgradeRollingTestCase(CharmTestCase): @patch('ceph_hooks.emit_cephconf') @patch('ceph_hooks.hookenv') @patch('ceph_hooks.ceph.roll_osd_cluster') - def test_check_for_upgrade(self, roll_osd_cluster, hookenv, + @patch('utils.find_filestore_osds') + def test_check_for_upgrade(self, find_filestore_osds, + roll_osd_cluster, hookenv, emit_cephconf, version, exists, dirs_need_ownership_update, notify_mon_of_upgrade): @@ -47,7 +49,9 @@ def test_check_for_upgrade(self, roll_osd_cluster, hookenv, @patch('ceph_hooks.emit_cephconf') @patch('ceph_hooks.hookenv') @patch('ceph_hooks.ceph.roll_osd_cluster') - def test_resume_failed_upgrade(self, roll_osd_cluster, + @patch('utils.find_filestore_osds') + def test_resume_failed_upgrade(self, find_filestore_osds, + roll_osd_cluster, hookenv, emit_cephconf, version, exists, dirs_need_ownership_update, @@ -94,7 +98,9 @@ def test_check_for_upgrade_not_bootstrapped(self, roll_monitor_cluster, @patch('ceph_hooks.ceph.is_bootstrapped') @patch('ceph_hooks.hookenv') @patch('ceph_hooks.ceph.roll_monitor_cluster') - def test_check_for_upgrade_from_pike_to_queens(self, roll_monitor_cluster, + @patch('utils.find_filestore_osds') + def test_check_for_upgrade_from_pike_to_queens(self, find_filestore_osds, + roll_monitor_cluster, hookenv, is_bootstrapped, add_source, dirs_need_ownership_update, @@ -116,7 +122,9 @@ def test_check_for_upgrade_from_pike_to_queens(self, roll_monitor_cluster, @patch('ceph_hooks.ceph.is_bootstrapped') @patch('ceph_hooks.hookenv') @patch('ceph_hooks.ceph.roll_monitor_cluster') - def test_check_for_upgrade_from_rocky_to_stein(self, roll_monitor_cluster, + @patch('utils.find_filestore_osds') + def test_check_for_upgrade_from_rocky_to_stein(self, find_filestore_osds, + roll_monitor_cluster, hookenv, is_bootstrapped, add_source, dirs_need_ownership_update, @@ -132,6 +140,30 @@ def test_check_for_upgrade_from_rocky_to_stein(self, roll_monitor_cluster, roll_monitor_cluster.assert_not_called() add_source.assert_called_with('cloud:bionic-stein', 'some-key') + @patch('ceph_hooks.os.path.exists') + @patch('ceph_hooks.ceph.dirs_need_ownership_update') + @patch('ceph_hooks.add_source') + @patch('ceph_hooks.ceph.is_bootstrapped') + @patch('ceph_hooks.hookenv') + @patch('ceph_hooks.ceph.roll_monitor_cluster') + @patch('utils.find_filestore_osds') + def test_check_for_upgrade_reef_filestore(self, find_filestore_osds, + roll_monitor_cluster, + hookenv, is_bootstrapped, + add_source, + dirs_need_ownership_update, + exists): + exists.return_value = True + is_bootstrapped.return_value = True + find_filestore_osds.return_value = ['ceph-0'] + hookenv.config.side_effect = self.test_config + self.test_config.set('key', 'some-key') + self.test_config.set_previous('source', 'cloud:jammy-antelope') + self.test_config.set('source', 'cloud:jammy-bobcat') + check_for_upgrade() + roll_monitor_cluster.assert_not_called() + dirs_need_ownership_update.assert_not_called() + class UpgradeUtilTestCase(CharmTestCase): @patch('ceph_hooks.relation_ids') From 92e791f552e4bb342b61bf096b830479b8b17d76 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Tue, 3 Oct 2023 12:38:12 +0200 Subject: [PATCH 2564/2699] Add reef functests to CI Via the jammy-bobcat UCA Don't rely on feature branch, vendor in context adapter instead Change-Id: Ifc5f04ce2259d19cb120a1434a2f80b178bd4dc3 --- ceph-nfs/osci.yaml | 8 +++++ ceph-nfs/requirements.txt | 2 +- ceph-nfs/src/charm.py | 47 +++++++++++++++++++++++- ceph-nfs/tests/bundles/jammy-reef.yaml | 50 ++++++++++++++++++++++++++ ceph-nfs/tests/tests.yaml | 3 +- 5 files changed, 107 insertions(+), 3 deletions(-) create mode 100644 ceph-nfs/tests/bundles/jammy-reef.yaml diff --git a/ceph-nfs/osci.yaml b/ceph-nfs/osci.yaml index 7500386e..c3ff84ec 100644 --- a/ceph-nfs/osci.yaml +++ b/ceph-nfs/osci.yaml @@ -7,6 +7,7 @@ - focal-pacific - focal-quincy - jammy-quincy + - jammy-reef - lunar-quincy - mantic-quincy vars: @@ -41,6 +42,13 @@ - focal-quincy vars: tox_extra_args: -- jammy-quincy +- job: + name: jammy-reef + parent: func-target + dependencies: + - focal-quincy + vars: + tox_extra_args: -- jammy-reef - job: name: lunar-quincy parent: func-target diff --git a/ceph-nfs/requirements.txt b/ceph-nfs/requirements.txt index 6e7ef1d8..1a8dbfad 100644 --- a/ceph-nfs/requirements.txt +++ b/ceph-nfs/requirements.txt @@ -2,5 +2,5 @@ ops <= 1.6.0 git+https://github.com/juju/charm-helpers.git#egg=charmhelpers git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client -git+https://github.com/ChrisMacNaughton/charm-ops-openstack.git@feature/charm-instance-to-relation-adapter#egg=ops_openstack +git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack git+https://opendev.org/openstack/charm-interface-hacluster#egg=interface_hacluster diff --git a/ceph-nfs/src/charm.py b/ceph-nfs/src/charm.py index 01caa849..10061250 100755 --- a/ceph-nfs/src/charm.py +++ b/ceph-nfs/src/charm.py @@ -101,8 +101,53 @@ def hostname(self): return socket.gethostname() -class CephNFSAdapters( +class OpenStackContextAdapters( ops_openstack.adapters.OpenStackRelationAdapters): + """ + Augmentation of OpenStackRelationAdapters that also includes contexts. + Proposed for upstreaming + https://review.opendev.org/c/openstack/charm-ops-openstack/+/897238 + """ + + relation_adapters = {} + + def __init__(self, relations, charm_instance, + options_instance=None, contexts=None): + """ + :param relations: List of instances of relation classes + :param options: Configuration class to use (DEPRECATED) + :param options_instance: Instance of Configuration class to use + :param charm_instance: optional charm_instance that is captured as a + weakref for use on the adapter. + :param contexts: Optional list of contexts + """ + super().__init__( + relations, charm_instance, + options_instance=options_instance + ) + if contexts is None: + contexts = () + self._contexts = set() + for context in contexts: + self.add_context(context) + + def __iter__(self): + """ + Iterate over the relations and contexts presented to the charm. + """ + for ref in self._relations.union(self._contexts): + yield ref, getattr(self, ref) + + def add_context(self, context): + """Add the context to this adapters instance. + + :param relation: a RAW context + """ + setattr(self, context.name, context) + self._contexts.add(context.name) + + +class CephNFSAdapters(OpenStackContextAdapters): """Collection of relation adapters.""" relation_adapters = { diff --git a/ceph-nfs/tests/bundles/jammy-reef.yaml b/ceph-nfs/tests/bundles/jammy-reef.yaml new file mode 100644 index 00000000..e0c186cf --- /dev/null +++ b/ceph-nfs/tests/bundles/jammy-reef.yaml @@ -0,0 +1,50 @@ +options: + source: &source cloud:jammy-bobcat + +local_overlay_enabled: False +series: jammy +applications: + ubuntu: + charm: cs:ubuntu + num_units: 2 + ceph-nfs: + charm: ../../ceph-nfs.charm + num_units: 2 + options: + source: *source + ceph-osd: + charm: ch:ceph-osd + channel: latest/edge + num_units: 3 + storage: + osd-devices: '2,10G' + options: + source: *source + ceph-mon: + charm: ch:ceph-mon + channel: latest/edge + num_units: 3 + options: + monitor-count: '3' + expected-osd-count: 6 + source: *source + ceph-fs: + charm: ch:ceph-fs + channel: latest/edge + num_units: 2 + options: + source: *source + hacluster: + charm: ch:hacluster + channel: 2.4/edge + options: + cluster_count: 2 +relations: + - - 'ceph-mon:client' + - 'ceph-nfs:ceph-client' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-fs' + - 'ceph-mon' + - - 'ceph-nfs:ha' + - 'hacluster:ha' diff --git a/ceph-nfs/tests/tests.yaml b/ceph-nfs/tests/tests.yaml index cd8f59ec..4660722e 100644 --- a/ceph-nfs/tests/tests.yaml +++ b/ceph-nfs/tests/tests.yaml @@ -2,7 +2,8 @@ charm_name: ceph-nfs gate_bundles: - focal-quincy - focal-pacific - - jammy-pacific + - jammy-quincy + - jammy-reef - mantic-quincy smoke_bundles: - focal-pacific From 49eec5877fc9a85ad662f6c0b0cb96a65951feeb Mon Sep 17 00:00:00 2001 From: Samuel Walladge Date: Mon, 9 Oct 2023 11:17:09 +1030 Subject: [PATCH 2565/2699] Document how multiple devices must be provided osd-devices, bluestore-db, bluestore-wal, and osd-journal accept multiple devices. Note these must be strictly space separated (not newlines), due to how .split(' ') is used. Change-Id: Ic1b883b791fbd1801bbda4d9b9330117d6aea516 --- ceph-osd/config.yaml | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 22908119..484d53a9 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -58,7 +58,7 @@ options: type: string default: description: | - The devices to format and set up as OSD volumes. + The devices to format and set up as OSD volumes, space separated. . These devices are the range of devices that will be checked for and used across all service units, in addition to any volumes attached @@ -84,8 +84,8 @@ options: type: string default: description: | - The device to use as a shared journal drive for all OSDs on a node. By - default a journal partition will be created on each OSD volume device for + The devices to use as shared journal drives for all OSDs on a node, space separated. + By default a journal partition will be created on each OSD volume device for use by that OSD. The default behaviour is also the fallback for the case where the specified journal device does not exist on a node. . @@ -103,7 +103,8 @@ options: type: string default: description: | - Path to a BlueStore WAL block device or file. Should only be set if using + Path to BlueStore WAL block devices or files, space separated. + Should only be set if using a separate physical device that is faster than the DB device (such as an NVDIMM or faster SSD). Otherwise BlueStore automatically maintains the WAL inside of the DB device. This block device is used as an LVM PV and @@ -113,7 +114,8 @@ options: type: string default: description: | - Path to a BlueStore WAL db block device or file. If you have a separate + Path to BlueStore WAL db block devices or files, space separated. + If you have a separate physical device faster than the block device this will store all of the filesystem metadata (RocksDB) there and also integrates the Write Ahead Log (WAL) unless a further separate bluestore-wal device is configured From 4ef0fe137c3563d3e62cd997d133a2b7088cb01c Mon Sep 17 00:00:00 2001 From: Samuel Walladge Date: Tue, 17 Oct 2023 08:29:49 +1030 Subject: [PATCH 2566/2699] Document rationale for erasure coded pools This addresses the questions: - Why doesn't the charm let me use an erasure coded pool as the default? - How do I use the secondary erasure coded pool once it is configured? Change-Id: Ia885ce41043a4cb04a6d92993474c44e9b994c55 --- ceph-fs/src/README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ceph-fs/src/README.md b/ceph-fs/src/README.md index 6acd4310..5c94b7e9 100644 --- a/ceph-fs/src/README.md +++ b/ceph-fs/src/README.md @@ -71,6 +71,14 @@ created: a replicated pool (for storing MDS metadata) and an erasure coded pool `ceph-osd-replication-count` configuration option only applies to the metadata (replicated) pool. +Note that the replicated pool will be the default pool for all data. +The user must manually configure the secondary erasure coded pool for use, +for example by using [file layouts][file-layouts]. + +It's not recommended to use an erasure coded data pool as the default data pool; +see [createfs docs][createfs] for more explanation. +Thus, the charm does not support this case. + Erasure coded pools can be configured via options whose names begin with the `ec-` prefix. @@ -135,3 +143,5 @@ For general charm questions refer to the OpenStack [Charm Guide][cg]. [cloud-archive-ceph]: https://wiki.ubuntu.com/OpenStack/CloudArchive#Ceph_and_the_UCA [cdg-ceph-erasure-coding]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-erasure-coding.html [ceph-bluestore-compression]: https://docs.ceph.com/en/latest/rados/configuration/bluestore-config-ref/#inline-compression +[createfs]: https://docs.ceph.com/en/latest/cephfs/createfs/ +[file-layouts]: https://docs.ceph.com/en/latest/cephfs/file-layouts/ From 778a8bdd546431473c728285b6004d8c86fe3b6c Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 19 Oct 2023 12:15:40 +0200 Subject: [PATCH 2567/2699] Fix status handling for non-leaders Closes-bug: 2039763 Change-Id: Ic24b5885c1289c2653f4280f7fe2815b1c2afb15 --- ceph-dashboard/src/charm.py | 23 ++++++++++++++++--- .../unit_tests/test_ceph_dashboard_charm.py | 9 -------- 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index 53a306ca..100099d4 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -370,17 +370,32 @@ def _configure_service_apis(self) -> None: ), ) - def _configure_dashboard(self, _event) -> None: + def _configure_dashboard(self, event) -> None: """Configure dashboard""" if not self.mon.mons_ready: logging.info("Not configuring dashboard, mons not ready") return - if not ceph_utils.is_dashboard_enabled(): + if ceph_utils.is_dashboard_enabled(): + if not self.unit.is_leader(): + # leader already enabled the dashboard and also handles config, + # we don't need to do anything except set ourselves as ready + logging.debug("Dashboard already enabled, setting ready.") + self._stored.is_started = True + self.update_status() + return + else: if self.unit.is_leader(): + # we're the leader, enable dashboard and continue + # configuration below + logging.debug("Enabling dashboard as leader.") ceph_utils.mgr_enable_dashboard() else: - logging.info("Dashboard not enabled, deferring event.") + # non-leader, defer event until leader has enabled and + # configured the dashboard + logging.info("Dashboard not enabled, deferring event on " + "non-leader") + event.defer() return if self.unit.is_leader(): @@ -743,6 +758,7 @@ def _enable_ssl_from_config(self, event) -> None: return self._configure_tls_from_charm_config() + self.update_status() # Certificates relation handle. def _enable_ssl_from_relation(self, event) -> None: @@ -763,6 +779,7 @@ def _enable_ssl_from_relation(self, event) -> None: return # SSL is already configured. self._configure_tls_from_relation() + self.update_status() if __name__ == "__main__": diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py index 904b861e..99028ca8 100644 --- a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -356,15 +356,6 @@ def test_configure_dashboard(self, subprocess): 'ceph-mon/0', { 'mon-ready': 'True'}) - - self.ceph_utils.mgr_config_set.reset_mock() - self.harness.set_leader(False) - self.harness.charm._configure_dashboard(None) - self.assertFalse(self.ceph_utils.mgr_enable_dashboard.called) - self.ceph_utils.mgr_config_set.assert_called_once_with( - 'mgr/dashboard/server1/server_addr', - '10.0.0.10') - self.ceph_utils.mgr_config_set.reset_mock() self.ceph_utils.is_dashboard_enabled.return_value = True self.harness.set_leader() From 7843e36143cbb7355572b837062377abfbe15356 Mon Sep 17 00:00:00 2001 From: Samuel Walladge Date: Thu, 26 Oct 2023 10:19:02 +1030 Subject: [PATCH 2568/2699] Fix charm for tox4 compatibility Also remove python 3.5 targets as this is long not supported. And add the mocking for charmhelpers - see https://review.opendev.org/c/openstack/charm-heat/+/899195 for example Related-Bug: 2002788 Change-Id: Ic70092a45d595d337ea26505139c2f6515cb1d38 --- ceph-iscsi/build-requirements.txt | 7 ------ ceph-iscsi/osci.yaml | 2 +- ceph-iscsi/test-requirements.txt | 3 --- ceph-iscsi/tox.ini | 42 ++++++++++++------------------- ceph-iscsi/unit_tests/__init__.py | 12 +++++++++ 5 files changed, 29 insertions(+), 37 deletions(-) delete mode 100644 ceph-iscsi/build-requirements.txt diff --git a/ceph-iscsi/build-requirements.txt b/ceph-iscsi/build-requirements.txt deleted file mode 100644 index b6d2452f..00000000 --- a/ceph-iscsi/build-requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -# NOTES(lourot): -# * We don't install charmcraft via pip anymore because it anyway spins up a -# container and scp the system's charmcraft snap inside it. So the charmcraft -# snap is necessary on the system anyway. -# * `tox -e build` successfully validated with charmcraft 1.2.1 - -cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. diff --git a/ceph-iscsi/osci.yaml b/ceph-iscsi/osci.yaml index d502701a..26525448 100644 --- a/ceph-iscsi/osci.yaml +++ b/ceph-iscsi/osci.yaml @@ -15,7 +15,7 @@ needs_charm_build: true charm_build_name: ceph-iscsi build_type: charmcraft - charmcraft_channel: 2.0/stable + charmcraft_channel: 2.2/stable - job: name: ceph-iscsi-focal-quincy parent: func-target diff --git a/ceph-iscsi/test-requirements.txt b/ceph-iscsi/test-requirements.txt index 358e1bc3..552381d2 100644 --- a/ceph-iscsi/test-requirements.txt +++ b/ceph-iscsi/test-requirements.txt @@ -8,9 +8,6 @@ flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 requests>=2.18.4 psutil -# oslo.i18n dropped py35 support -oslo.i18n<4.0.0 git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack -pytz # workaround for 14.04 pip/tox pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/ceph-iscsi/tox.ini b/ceph-iscsi/tox.ini index c8550616..a7703d60 100644 --- a/ceph-iscsi/tox.ini +++ b/ceph-iscsi/tox.ini @@ -7,44 +7,30 @@ skipsdist = True sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False -# NOTES: -# * We avoid the new dependency resolver by pinning pip < 20.3, see -# https://github.com/pypa/pip/issues/9187 -# * Pinning dependencies requires tox >= 3.2.0, see -# https://tox.readthedocs.io/en/latest/config.html#conf-requires -# * It is also necessary to pin virtualenv as a newer virtualenv would still -# lead to fetching the latest pip in the func* tox targets, see -# https://stackoverflow.com/a/38133283 -# * It is necessary to declare setuptools as a dependency otherwise tox will -# fail very early at not being able to load it. The version pinning is in -# line with `pip.sh`. -requires = pip < 20.3 - virtualenv < 20.0 - setuptools < 50.0.0 -# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci -minversion = 3.2.0 [testenv] +# We use tox mainly for virtual environment management for test requirements +# and do not install the charm code as a Python package into that environment. +# Ref: https://tox.wiki/en/latest/config.html#skip_install +skip_install = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 CHARM_DIR={envdir} -install_command = - pip install {opts} {packages} commands = stestr run --slowest {posargs} allowlist_externals = git add-to-archive.py bash charmcraft - rename.sh -passenv = HOME TERM CS_* OS_* TEST_* + {toxinidir}/rename.sh +passenv = + HOME + TERM + CS_* + OS_* + TEST_* deps = -r{toxinidir}/test-requirements.txt -[testenv:py35] -basepython = python3.5 -# python3.5 is irrelevant on a focal+ charm. -commands = /bin/true - [testenv:py36] basepython = python3.6 deps = -r{toxinidir}/requirements.txt @@ -70,6 +56,11 @@ basepython = python3.10 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py311] +basepython = python3.11 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt @@ -115,7 +106,6 @@ commands = {posargs} [testenv:build] basepython = python3 -deps = -r{toxinidir}/build-requirements.txt commands = charmcraft clean charmcraft -v pack diff --git a/ceph-iscsi/unit_tests/__init__.py b/ceph-iscsi/unit_tests/__init__.py index 577ab7e9..7c565f59 100644 --- a/ceph-iscsi/unit_tests/__init__.py +++ b/ceph-iscsi/unit_tests/__init__.py @@ -17,3 +17,15 @@ # Mock out secrets to make py35 happy. sys.modules['secrets'] = mock.MagicMock() + +# Patch out lsb_release() and get_platform() as unit tests should be fully +# insulated from the underlying platform. Unit tests assume that the system is +# ubuntu jammy. +mock.patch( + 'charmhelpers.osplatform.get_platform', return_value='ubuntu' +).start() +mock.patch( + 'charmhelpers.core.host.lsb_release', + return_value={ + 'DISTRIB_CODENAME': 'jammy' + }).start() From 8fa2e8f737ff94ddaffde36638cdd8aa3be06076 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 25 Oct 2023 11:54:55 -0300 Subject: [PATCH 2569/2699] Reject invalid username and password This patchset checks that the username and password parameters comply with ceph-iscsi requirements. Change-Id: Ida720e5381d0ff3446cf88405452f8a3778c7efc Fixes-bug: #2040328 --- ceph-iscsi/src/charm.py | 31 +++++++++++++++++-- .../unit_tests/test_ceph_iscsi_charm.py | 8 ++--- 2 files changed, 33 insertions(+), 6 deletions(-) diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index c5eb3fdd..165e9c26 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -521,10 +521,37 @@ def calculate_target_pools(self, event): rbd_pool_name = self.data_pool_name return rbd_pool_name, ec_rbd_metadata_pool + def _validate_str(self, value, allowed, min_len, max_len, typ): + if any(s for s in value if s not in allowed): + raise ValueError('%s can only contain: %s' % (typ, allowed)) + elif len(value) < min_len or len(value) > max_len: + raise ValueError('%s must be between %d and %d characters long' % + (typ, min_len, max_len)) + + def _validate_username(self, value): + self._validate_str(value, string.ascii_letters + string.digits + + '.@-_:', 8, 64, 'username') + + def _validate_password(self, value): + self._validate_str(value, string.ascii_letters + string.digits + + '@-_/', 12, 16, 'password') + def on_create_target_action(self, event): """Create an iSCSI target.""" gw_client = gwcli_client.GatewayClient() target = event.params.get('iqn', self.DEFAULT_TARGET) + username = event.params['client-username'] + passwd = event.params['client-password'] + try: + self._validate_username(username) + self._validate_password(passwd) + except ValueError as exc: + logging.error(str(exc)) + fail_str = 'invalid username or password: %s' % str(exc) + event.fail(fail_str) + event.set_results({'err': fail_str}) + return + gateway_units = event.params.get( 'gateway-units', [u for u in self.peers.ready_peer_details.keys()]) @@ -567,8 +594,8 @@ def on_create_target_action(self, event): gw_client.add_client_auth( target, event.params['client-initiatorname'], - event.params['client-username'], - event.params['client-password']) + username, + passwd) gw_client.add_disk_to_client( target, event.params['client-initiatorname'], diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index b5d30839..02e29abf 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -237,7 +237,7 @@ def test_on_create_target_action(self, _getfqdn): 'image-size': '5G', 'client-initiatorname': 'client-initiator', 'client-username': 'myusername', - 'client-password': 'mypassword'} + 'client-password': 'mypassword123'} self.harness.charm.on_create_target_action(action_event) self.gwc.add_gateway_to_target.assert_has_calls([ call( @@ -260,7 +260,7 @@ def test_on_create_target_action(self, _getfqdn): 'iqn.mock.iscsi-gw:iscsi-igw', 'client-initiator', 'myusername', - 'mypassword') + 'mypassword123') self.gwc.add_disk_to_client.assert_called_once_with( 'iqn.mock.iscsi-gw:iscsi-igw', 'client-initiator', @@ -283,7 +283,7 @@ def test_on_create_target_action_ec(self, _getfqdn): 'image-size': '5G', 'client-initiatorname': 'client-initiator', 'client-username': 'myusername', - 'client-password': 'mypassword'} + 'client-password': 'mypassword123'} self.harness.charm.on_create_target_action(action_event) self.subprocess.check_call.assert_called_once_with( [ @@ -315,7 +315,7 @@ def test_on_create_target_action_ec(self, _getfqdn): 'iqn.mock.iscsi-gw:iscsi-igw', 'client-initiator', 'myusername', - 'mypassword') + 'mypassword123') self.gwc.add_disk_to_client.assert_called_once_with( 'iqn.mock.iscsi-gw:iscsi-igw', 'client-initiator', From 5e4ded29f9139003d23eae0c1bae62da7fe45c00 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Tue, 24 Oct 2023 20:29:33 -0300 Subject: [PATCH 2570/2699] Set iscsi pools' application This patchset sets the default application name for the charm-created pools to 'rbd'. Closes-bug: #2040274 Change-Id: I781a2e33b3de9f55b97fddd661214f601fc6134e --- ceph-iscsi/src/charm.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index c5eb3fdd..59b2595f 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -312,7 +312,8 @@ def request_ceph_pool(self, event): 'configuration?: "{}"'.format(str(e))) return self.ceph_client.create_replicated_pool( - self.config_get('gateway-metadata-pool')) + self.config_get('gateway-metadata-pool'), + app_name='rbd') weight = self.config_get('ceph-pool-weight') replicas = self.config_get('ceph-osd-replication-count') if self.config_get('pool-type') == 'erasure-coded': @@ -361,10 +362,12 @@ def request_ceph_pool(self, event): erasure_profile=profile_name, weight=weight, allow_ec_overwrites=True, + app_name='rbd', **bcomp_kwargs ) self.ceph_client.create_replicated_pool( name=self.metadata_pool_name, + app_name='rbd', weight=metadata_weight ) else: @@ -372,6 +375,7 @@ def request_ceph_pool(self, event): name=self.data_pool_name, replicas=replicas, weight=weight, + app_name='rbd', **bcomp_kwargs) logging.info("Requesting permissions") self.ceph_client.request_ceph_permissions( From 41cfd9479c4ae301302fcb92665668a42c44eb81 Mon Sep 17 00:00:00 2001 From: Ionut Balutoiu Date: Mon, 30 Oct 2023 16:15:55 +0200 Subject: [PATCH 2571/2699] Improve platform mocking Patch out charmhelpers.osplatform.get_platform() and charmhelpers.core.host.lsb_release() globally in the unit tests to insulate the unit tests from the platform that the unit tests are being run on. Also, add mock for `charmhelpers.contrib.openstack.context.is_ipv6_disabled` in the `HAProxyContextTests.test_ctxt` unit test. The charmhelpers function `is_ipv6_disabled` calls `sysctl` and this doesn't exist on Debian container images. This fixes the following unit test failure: ``` FileNotFoundError: [Errno 2] No such file or directory: 'sysctl' ``` Signed-off-by: Ionut Balutoiu Co-authored-by: Alex Kavanagh Change-Id: I60bc9550a83eb342e78d3c1916d98bfaa8035572 --- ceph-radosgw/unit_tests/__init__.py | 13 +++++++++++++ .../unit_tests/test_ceph_radosgw_context.py | 4 +++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/ceph-radosgw/unit_tests/__init__.py b/ceph-radosgw/unit_tests/__init__.py index ed0779fb..e9479e96 100644 --- a/ceph-radosgw/unit_tests/__init__.py +++ b/ceph-radosgw/unit_tests/__init__.py @@ -13,8 +13,21 @@ # limitations under the License. import sys +from unittest import mock sys.path.append('actions') sys.path.append('hooks') sys.path.append('lib') sys.path.append('unit_tests') + +# Patch out lsb_release() and get_platform() as unit tests should be fully +# insulated from the underlying platform. Unit tests assume that the system is +# ubuntu jammy. +mock.patch( + 'charmhelpers.osplatform.get_platform', return_value='ubuntu' +).start() +mock.patch( + 'charmhelpers.core.host.lsb_release', + return_value={ + 'DISTRIB_CODENAME': 'jammy' + }).start() diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index b259a7f6..b426c2ae 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -47,6 +47,7 @@ def setUp(self): self.arch.return_value = 'amd64' @patch('ceph_radosgw_context.https') + @patch('charmhelpers.contrib.openstack.context.is_ipv6_disabled') @patch('charmhelpers.contrib.openstack.context.get_relation_ip') @patch('charmhelpers.contrib.openstack.context.mkdir') @patch('charmhelpers.contrib.openstack.context.local_unit') @@ -56,7 +57,7 @@ def setUp(self): @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') def test_ctxt(self, _harelation_ids, _ctxtrelation_ids, _haconfig, _ctxtconfig, _local_unit, _mkdir, _get_relation_ip, - _mock_https): + _is_ipv6_disabled, _mock_https): _mock_https.return_value = False _get_relation_ip.return_value = '10.0.0.10' _ctxtconfig.side_effect = self.test_config.get @@ -74,6 +75,7 @@ def test_ctxt(self, _harelation_ids, _ctxtrelation_ids, _haconfig, 'https': False } self.assertEqual(expect, haproxy_context()) + _is_ipv6_disabled.assert_called_once_with() class MonContextTest(CharmTestCase): From 68ef72f362e111b637e1860d54fbd1ab1b77d3ab Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Wed, 1 Nov 2023 21:45:59 +0900 Subject: [PATCH 2572/2699] Don't set the default pool as "iscsi" "iscsi" is the name of the metadata pool by default in the charm and not to be used for tenant data. The charm can fill out the data pool name automatically with `def data_pool_name` or can accept a user specified value. After fixing the issue, the "iscsi" pool will be used only for hosting gateway.conf as a rados object so reflect the purpose into the application name for the pool too. Also, update the command syntax not to print the deprecation warning: "rbd: --user is deprecated, use --id" Closes-Bug: #2042419 Related-Bug: #2040274 Change-Id: Iec275190854edcc85915d93db233f444c92fbb17 --- ceph-iscsi/actions.yaml | 2 -- ceph-iscsi/src/charm.py | 4 ++-- ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/ceph-iscsi/actions.yaml b/ceph-iscsi/actions.yaml index a474f3c3..683e66e9 100644 --- a/ceph-iscsi/actions.yaml +++ b/ceph-iscsi/actions.yaml @@ -42,11 +42,9 @@ create-target: description: "Image name " rbd-pool-name: type: string - default: iscsi description: "Name of ceph pool to use to back target " ec-rbd-metadata-pool: type: string - default: iscsi description: "Name of the metadata pool to use with rbd-pool-name if rbd-pool-name is erasure coded." client-initiatorname: type: string diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 747ee21d..9ace9cc1 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -313,7 +313,7 @@ def request_ceph_pool(self, event): return self.ceph_client.create_replicated_pool( self.config_get('gateway-metadata-pool'), - app_name='rbd') + app_name='rados') weight = self.config_get('ceph-pool-weight') replicas = self.config_get('ceph-osd-replication-count') if self.config_get('pool-type') == 'erasure-coded': @@ -566,7 +566,7 @@ def on_create_target_action(self, event): # as the gwcli does not currently handle the creation. cmd = [ 'rbd', - '--user', 'ceph-iscsi', + '--id', 'ceph-iscsi', '--conf', str(self.CEPH_CONF), 'create', '--size', event.params['image-size'], diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index 02e29abf..f4d6c3a2 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -288,7 +288,7 @@ def test_on_create_target_action_ec(self, _getfqdn): self.subprocess.check_call.assert_called_once_with( [ 'rbd', - '--user', 'ceph-iscsi', + '--id', 'ceph-iscsi', '--conf', '/etc/ceph/iscsi/ceph.conf', 'create', '--size', '5G', From eece895a9eba32fd6b353ad0fdbce06684c1059c Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Mon, 6 Nov 2023 13:09:30 +0000 Subject: [PATCH 2573/2699] Fix key errors for rbd-pool-name and ec-rbd-metadata-pool The existing code assumes event.params['rbd-pool-name'] and event.params['ec-rbd-metadata-pool'] have a key. Set an empty string by default to be filled by other part of the code. Follow-up of Iec275190854edcc85915d93db233f444c92fbb17 Closes-Bug: #2042419 Change-Id: I819f0d4456530ace8162456dec1f2fcb7fa95e7f --- ceph-iscsi/actions.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-iscsi/actions.yaml b/ceph-iscsi/actions.yaml index 683e66e9..87205b90 100644 --- a/ceph-iscsi/actions.yaml +++ b/ceph-iscsi/actions.yaml @@ -42,9 +42,11 @@ create-target: description: "Image name " rbd-pool-name: type: string + default: "" description: "Name of ceph pool to use to back target " ec-rbd-metadata-pool: type: string + default: "" description: "Name of the metadata pool to use with rbd-pool-name if rbd-pool-name is erasure coded." client-initiatorname: type: string From a16c28e82986e90d596c4e08a9e485a2a96f5927 Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Fri, 20 Oct 2023 23:12:10 +0900 Subject: [PATCH 2574/2699] Allow ceph device scrape-health-metrics Ceph has a function to collect health metrics through smartctl or nvme command out of the box. And it relies on sudo spawned from the ceph-osd process so it needs to be considered in the apparmor policy. [/etc/sudoers.d/ceph-smartctl in ceph-base package] > ## allow ceph daemons (which run as user ceph) to collect device > ## health metrics > > ceph ALL=NOPASSWD: /usr/sbin/smartctl -x --json=o /dev/* > ceph ALL=NOPASSWD: /usr/sbin/nvme * smart-log-add --json /dev/* Also sync charmhelpers and mock platform info Closes-Bug: #2031637 Change-Id: I981a5db0fd49eca83aa8a619f0cbd0d34a533842 --- ceph-osd/files/apparmor/usr.bin.ceph-osd | 56 +++++++++++++++++++ .../charmhelpers/contrib/openstack/utils.py | 24 ++------ ceph-osd/test-requirements.txt | 1 + ceph-osd/unit_tests/__init__.py | 13 +++++ 4 files changed, 74 insertions(+), 20 deletions(-) diff --git a/ceph-osd/files/apparmor/usr.bin.ceph-osd b/ceph-osd/files/apparmor/usr.bin.ceph-osd index 95846077..ac11e4d9 100644 --- a/ceph-osd/files/apparmor/usr.bin.ceph-osd +++ b/ceph-osd/files/apparmor/usr.bin.ceph-osd @@ -4,6 +4,7 @@ /usr/bin/ceph-osd { #include #include + #include #include /usr/bin/ceph-osd mr, @@ -36,8 +37,12 @@ /{,var/}run/ceph/* rwk, /{,var/}tmp/ r, + / r, /dev/ r, /dev/** rwk, + /run/udev/data/* r, + /sys/bus/nd/devices/ r, + /sys/bus/nd/devices/** r, /sys/devices/** r, /run/blkid/blkid.tab r, @@ -48,4 +53,55 @@ /usr/share/distro-info/** r, /etc/lsb-release r, /etc/debian_version r, + + /usr/bin/sudo Px -> ceph-osd-sudo, +} + +profile ceph-osd-sudo flags=(attach_disconnected) { + #include + #include + #include + #include + + capability audit_write, + capability setgid, + capability setuid, + capability sys_resource, + + /usr/bin/sudo r, + /usr/libexec/sudo/* mr, + + /etc/default/locale r, + /etc/environment r, + /etc/security/limits.d/ r, + /etc/security/limits.d/* r, + /etc/sudo.conf r, + /etc/sudoers r, + /etc/sudoers.d/ r, + /etc/sudoers.d/* r, + + owner @{PROC}/1/limits r, + owner @{PROC}/@{pids}/stat r, + + /usr/sbin/nvme Cx, + /usr/sbin/smartctl Cx, + + profile /usr/sbin/nvme { + #include + + /usr/sbin/nvme r, + } + + profile /usr/sbin/smartctl { + #include + + capability sys_admin, + capability sys_rawio, + + /usr/sbin/smartctl r, + /var/lib/smartmontools/** r, + + /dev/* r, + /sys/devices/** r, + } } diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index e98be2c5..429b09e5 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -416,17 +416,6 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES, error_out(e) -def get_os_version_codename_swift(codename): - '''Determine OpenStack version number of swift from codename.''' - # for k, v in six.iteritems(SWIFT_CODENAMES): - for k, v in SWIFT_CODENAMES.items(): - if k == codename: - return v[-1] - e = 'Could not derive swift version for '\ - 'codename: %s' % codename - error_out(e) - - def get_swift_codename(version): '''Determine OpenStack codename that corresponds to swift version.''' codenames = [k for k, v in SWIFT_CODENAMES.items() if version in v] @@ -585,7 +574,6 @@ def _do_install(): return openstack_release().get('OPENSTACK_CODENAME') -@cached def openstack_release(): """Return /etc/os-release in a dict.""" d = {} @@ -847,14 +835,10 @@ def openstack_upgrade_available(package): if not cur_vers: # The package has not been installed yet do not attempt upgrade return False - if "swift" in package: - codename = get_os_codename_install_source(src) - avail_vers = get_os_version_codename_swift(codename) - else: - try: - avail_vers = get_os_version_install_source(src) - except Exception: - avail_vers = cur_vers + try: + avail_vers = get_os_version_install_source(src) + except Exception: + avail_vers = cur_vers apt.init() return apt.version_compare(avail_vers, cur_vers) >= 1 diff --git a/ceph-osd/test-requirements.txt b/ceph-osd/test-requirements.txt index 40d87f30..e972406e 100644 --- a/ceph-osd/test-requirements.txt +++ b/ceph-osd/test-requirements.txt @@ -26,3 +26,4 @@ git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.open git+https://opendev.org/openstack/tempest.git#egg=tempest croniter # needed for charm-rabbitmq-server unit tests +psutil diff --git a/ceph-osd/unit_tests/__init__.py b/ceph-osd/unit_tests/__init__.py index b8024d8a..f460572a 100644 --- a/ceph-osd/unit_tests/__init__.py +++ b/ceph-osd/unit_tests/__init__.py @@ -14,6 +14,7 @@ import sys from unittest.mock import MagicMock +from unittest import mock sys.path.append('hooks') sys.path.append('lib') @@ -21,3 +22,15 @@ sys.path.append('unit_tests') sys.modules["tabulate"] = MagicMock() + +# Patch out lsb_release() and get_platform() as unit tests should be fully +# insulated from the underlying platform. Unit tests assume that the system is +# ubuntu jammy. +mock.patch( + 'charmhelpers.osplatform.get_platform', return_value='ubuntu' +).start() +mock.patch( + 'charmhelpers.core.host.lsb_release', + return_value={ + 'DISTRIB_CODENAME': 'jammy' + }).start() From 8539db317f0436a706ff6dec68f492f6efb3c6b7 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 10 Nov 2023 12:06:39 +0200 Subject: [PATCH 2575/2699] Doc: add contribution links Change-Id: I6b9e7499604f360b7a7ec13696afde03e0817096 --- ceph-dashboard/README.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/ceph-dashboard/README.md b/ceph-dashboard/README.md index 933b5d6b..4b8b6bcf 100644 --- a/ceph-dashboard/README.md +++ b/ceph-dashboard/README.md @@ -176,9 +176,20 @@ The OpenStack Charms project maintains two documentation guides: and support notes * [OpenStack Charms Deployment Guide][cdg]: for charm usage information +# Contributing + +Please see the [OpenStack Charm Guide community section][cgc] for contribution guidelines. Specifically, see the [software contributions section][swc] for software contribution guidelines, and the [documentation contributions section][docc] for guidelines on documentation contribution. + + # Bugs -Please report bugs on [Launchpad][lp-bugs-charm-ceph-dashboard]. +Please report bugs on [Launchpad][lp-bugs-charm-ceph-dashboard]. Note the [bug submission section][bugc] on guidelines for reporting bugs. + + +# License + +The ceph-dashboard charm is free software, distributed under the Apache 2.0 software licence. See the LICENSE file for more information. + @@ -188,6 +199,10 @@ Please report bugs on [Launchpad][lp-bugs-charm-ceph-dashboard]. [cg]: https://docs.openstack.org/charm-guide [cdg]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide [cdg-tls]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-certificate-management.html +[cgc]: https://docs.openstack.org/charm-guide/latest/community/ +[swc]: https://docs.openstack.org/charm-guide/latest/community/software-contrib/ +[docc]: https://docs.openstack.org/charm-guide/latest/community/doc-contrib/ +[bugc]: https://docs.openstack.org/charm-guide/latest/community/software-bug.html [lp-bugs-charm-ceph-dashboard]: https://bugs.launchpad.net/charm-ceph-dashboard [anchor-grafana-dashboards]: #embedded-grafana-dashboards [loadbalancer-charm]: https://jaas.ai/u/openstack-charmers/openstack-loadbalancer From fae50774b49d03d481c4d3b7018f9d8c8b4565fd Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 10 Nov 2023 12:46:44 +0200 Subject: [PATCH 2576/2699] Tox: add Python 3.11 section to tox.ini Also improve mocking unit tests Change-Id: Ie4356c23e97cec48f5731323bc90d63335ecc753 --- ceph-mon/tox.ini | 5 +++++ ceph-mon/unit_tests/__init__.py | 17 +++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index cdfdb44c..ec2ce221 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -69,6 +69,11 @@ basepython = python3.10 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py311] +basepython = python3.11 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt diff --git a/ceph-mon/unit_tests/__init__.py b/ceph-mon/unit_tests/__init__.py index f439d3f1..91c4fa66 100644 --- a/ceph-mon/unit_tests/__init__.py +++ b/ceph-mon/unit_tests/__init__.py @@ -13,8 +13,25 @@ # limitations under the License. import sys + +from unittest import mock + + sys.path.append('hooks') sys.path.append('lib') sys.path.append('unit_tests') sys.path.append('actions') sys.path.append('src') + + +# Patch out lsb_release() and get_platform() as unit tests should be fully +# insulated from the underlying platform. Unit tests assume that the system is +# ubuntu jammy. +mock.patch( + 'charmhelpers.osplatform.get_platform', return_value='ubuntu' +).start() +mock.patch( + 'charmhelpers.core.host.lsb_release', + return_value={ + 'DISTRIB_CODENAME': 'jammy' + }).start() From 1f4dc7b32dcc5ae3951d4b0cce452562fd52af52 Mon Sep 17 00:00:00 2001 From: Samuel Walladge Date: Mon, 6 Nov 2023 17:11:19 +1030 Subject: [PATCH 2577/2699] Add config option for rbd_stats_pools This allows configuration RBD IO statistics collection for RBD pools. Co-authored-by: Yoshi Kadokawa Closes-Bug: #2042405 Related-Bug: #1989648 Change-Id: I2252163533a312f0f53165f946711ab20bb0e3c9 --- ceph-mon/config.yaml | 10 +++++ ceph-mon/src/ceph_hooks.py | 5 +++ ceph-mon/src/ceph_metrics.py | 2 + ceph-mon/src/utils.py | 11 +++++ ceph-mon/unit_tests/test_ceph_hooks.py | 55 +++++++++++++++++++++++- ceph-mon/unit_tests/test_ceph_metrics.py | 1 + 6 files changed, 83 insertions(+), 1 deletion(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 1a5375de..882e9b81 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -326,3 +326,13 @@ options: description: | The balancer mode used by the Ceph manager. Can only be set for Luminous or later versions, and only when the balancer module is enabled. + rbd-stats-pools: + type: string + default: "" + description: | + Set pools to collect RBD per-image IO statistics by enabling dynamic OSD performance counters. + It can be set to: + - a comma separated list of RBD pools to enable (eg. "pool1,pool2,poolN") + - "*" to enable for all RBD pools + - "" to disable statistics + For more information: https://docs.ceph.com/en/latest/mgr/prometheus/#rbd-io-statistics diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index a04fc7bd..6310e60f 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -92,6 +92,7 @@ get_rbd_features, get_ceph_osd_releases, execute_post_osd_upgrade_steps, + mgr_config_set_rbd_stats_pools, mgr_disable_module, mgr_enable_module, is_mgr_module_enabled, @@ -376,6 +377,9 @@ def config_changed(): try_disable_insecure_reclaim() for relid in relation_ids('dashboard'): dashboard_relation(relid) + + mgr_config_set_rbd_stats_pools() + return True @@ -502,6 +506,7 @@ def prometheus_relation(relid=None, unit=None, prometheus_permitted=None, mgr_enable_module('prometheus')) log("checking if prometheus module is enabled") if prometheus_permitted and module_enabled: + mgr_config_set_rbd_stats_pools() log("Updating prometheus") data = { 'hostname': get_relation_ip('prometheus'), diff --git a/ceph-mon/src/ceph_metrics.py b/ceph-mon/src/ceph_metrics.py index 0c320451..25077782 100644 --- a/ceph-mon/src/ceph_metrics.py +++ b/ceph-mon/src/ceph_metrics.py @@ -19,6 +19,7 @@ from charms.prometheus_k8s.v0 import prometheus_scrape from charms_ceph import utils as ceph_utils from ops.framework import BoundEvent +from utils import mgr_config_set_rbd_stats_pools logger = logging.getLogger(__name__) @@ -64,6 +65,7 @@ def _on_relation_changed(self, event): logger.debug( "is_leader and is_bootstrapped, running rel changed: %s", event ) + mgr_config_set_rbd_stats_pools() ceph_utils.mgr_enable_module("prometheus") logger.debug("module_enabled") self.update_alert_rules() diff --git a/ceph-mon/src/utils.py b/ceph-mon/src/utils.py index 9e41ea82..03fdb9de 100644 --- a/ceph-mon/src/utils.py +++ b/ceph-mon/src/utils.py @@ -19,6 +19,7 @@ import errno import tenacity +from charms_ceph import utils as ceph_utils from charmhelpers.core.hookenv import ( DEBUG, cached, @@ -417,3 +418,13 @@ def _set_require_osd_release(release): msg = 'Unable to execute command <{}>'.format(call_error.cmd) log(message=msg, level='ERROR') raise OsdPostUpgradeError(call_error) + + +def mgr_config_set_rbd_stats_pools(): + """Update ceph mgr config with the value from rbd-status-pools config + """ + if is_leader() and ceph_utils.is_bootstrapped(): + ceph_utils.mgr_config_set( + 'mgr/prometheus/rbd_stats_pools', + config('rbd-stats-pools') + ) diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index b11bde57..497bb212 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -18,6 +18,7 @@ mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: lambda *args, **kwargs: f(*args, **kwargs)) import ceph_hooks + import utils TO_PATCH = [ 'config', @@ -55,7 +56,8 @@ 'nagios_additional_checks': "", 'nagios_additional_checks_critical': False, 'nagios_check_num_osds': False, - 'disable-pg-max-object-skew': False} + 'disable-pg-max-object-skew': False, + 'rbd-stats-pools': 'foo'} class CephHooksTestCase(test_utils.CharmTestCase): @@ -313,6 +315,8 @@ def test_get_client_application_name(self, remote_unit, relation_get, ceph_hooks.get_client_application_name('rel:1', None), 'glance') + @patch.object(utils, 'is_leader', lambda: False) + @patch.object(ceph_hooks.ceph, 'mgr_config_set', lambda _key, _value: None) @patch.object(ceph_hooks.ceph, 'list_pools') @patch.object(ceph_hooks, 'mgr_enable_module') @patch.object(ceph_hooks, 'emit_cephconf') @@ -336,6 +340,8 @@ def test_config_changed_no_autotune(self, ceph_hooks.config_changed() mgr_enable_module.assert_not_called() + @patch.object(utils, 'is_leader', lambda: False) + @patch.object(ceph_hooks.ceph, 'mgr_config_set', lambda _key, _value: None) @patch.object(ceph_hooks.ceph, 'monitor_key_set') @patch.object(ceph_hooks.ceph, 'list_pools') @patch.object(ceph_hooks, 'mgr_enable_module') @@ -365,6 +371,8 @@ def test_config_changed_with_autotune(self, mgr_enable_module.assert_called_once_with('pg_autoscaler') monitor_key_set.assert_called_once_with('admin', 'autotune', 'true') + @patch.object(utils, 'is_leader', lambda: False) + @patch.object(ceph_hooks.ceph, 'mgr_config_set', lambda _key, _value: None) @patch.object(ceph_hooks.ceph, 'list_pools') @patch.object(ceph_hooks, 'mgr_enable_module') @patch.object(ceph_hooks, 'emit_cephconf') @@ -674,6 +682,7 @@ def test_bootstrap_source_different_fsid_secret(self): self.assertRaises(AssertionError, ceph_hooks.bootstrap_source_relation_changed) + @patch.object(utils, 'is_leader', lambda: False) @patch.object(ceph_hooks.ceph, 'is_bootstrapped') @patch.object(ceph_hooks, 'emit_cephconf') @patch.object(ceph_hooks, 'leader_get') @@ -708,6 +717,50 @@ def test_config_changed(self, _emit_cephconf.assert_called_once_with() _is_bootstrapped.assert_called_once_with() + @patch.object(utils, 'is_leader', lambda: True) + @patch.object(utils, 'config', lambda _: 'pool1') + @patch.object(utils.ceph_utils, 'mgr_config_set') + @patch.object(ceph_hooks.ceph, 'is_bootstrapped') + @patch.object(ceph_hooks, 'emit_cephconf') + @patch.object(ceph_hooks, 'leader_get') + @patch.object(ceph_hooks, 'is_leader') + @patch.object(ceph_hooks, 'relations_of_type') + @patch.object(ceph_hooks, 'get_mon_hosts') + @patch.object(ceph_hooks, 'check_for_upgrade') + @patch.object(ceph_hooks, 'config') + def test_config_changed_leader( + self, + _config, + _check_for_upgrade, + _get_mon_hosts, + _relations_of_type, + _is_leader, + _leader_get, + _emit_cephconf, + _is_bootstrapped, + _mgr_config_set + ): + config = copy.deepcopy(CHARM_CONFIG) + _config.side_effect = \ + lambda key=None: config.get(key, None) if key else config + _relations_of_type.return_value = False + _is_leader.return_value = True + _leader_get.side_effect = ['fsid', 'monsec', 'fsid', 'monsec'] + _is_bootstrapped.return_value = True + ceph_hooks.config_changed() + _check_for_upgrade.assert_called_once_with() + _get_mon_hosts.assert_called_once_with() + _leader_get.assert_has_calls([ + call('fsid'), + call('monitor-secret'), + ]) + _emit_cephconf.assert_called_once_with() + _is_bootstrapped.assert_has_calls([call(), call()]) + _mgr_config_set.assert_called_once_with( + 'mgr/prometheus/rbd_stats_pools', 'pool1' + ) + + @patch.object(utils, 'is_leader', lambda: False) @patch.object(ceph_hooks, 'emit_cephconf') @patch.object(ceph_hooks, 'create_sysctl') @patch.object(ceph_hooks, 'check_for_upgrade') diff --git a/ceph-mon/unit_tests/test_ceph_metrics.py b/ceph-mon/unit_tests/test_ceph_metrics.py index 0468d28d..f93141f1 100644 --- a/ceph-mon/unit_tests/test_ceph_metrics.py +++ b/ceph-mon/unit_tests/test_ceph_metrics.py @@ -52,6 +52,7 @@ def test_init(self): "metrics-endpoint", ) + @patch("ceph_metrics.mgr_config_set_rbd_stats_pools", lambda: None) @patch("ceph_metrics.ceph_utils.is_bootstrapped", return_value=True) @patch("ceph_metrics.ceph_utils.is_mgr_module_enabled", return_value=False) @patch("ceph_metrics.ceph_utils.mgr_enable_module") From 3bfe28dba2fe07b9742c27d2f4fbb38d70d589c3 Mon Sep 17 00:00:00 2001 From: Pedro Castillo Date: Thu, 9 Nov 2023 15:50:44 -0600 Subject: [PATCH 2578/2699] Refactor cache validation for the ceph-osd NRPE check Closes-Bug: #2019251 Closes-Bug: #2021507 Change-Id: Ib50414756165f2587f0127e572675c7ca8e31ef9 --- .../files/nagios/check_ceph_osd_services.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/ceph-osd/files/nagios/check_ceph_osd_services.py b/ceph-osd/files/nagios/check_ceph_osd_services.py index 7f53b2d7..cff66541 100755 --- a/ceph-osd/files/nagios/check_ceph_osd_services.py +++ b/ceph-osd/files/nagios/check_ceph_osd_services.py @@ -6,9 +6,11 @@ import os import sys +from datetime import datetime, timedelta CRON_CHECK_TMPFILE = 'ceph-osd-checks' NAGIOS_HOME = '/var/lib/nagios' +CACHE_MAX_AGE = timedelta(minutes=10) STATE_OK = 0 STATE_WARNING = 1 @@ -31,6 +33,16 @@ def run_main(): print("File '{}' doesn't exist".format(_tmp_file)) return STATE_UNKNOWN + try: + s = os.stat(_tmp_file) + if datetime.now() - datetime.fromtimestamp(s.st_mtime) > CACHE_MAX_AGE: + print("Status file is older than {}".format(CACHE_MAX_AGE)) + return STATE_CRITICAL + except Exception as e: + print("Something went wrong grabbing stats for the file: {}".format( + str(e))) + return STATE_UNKNOWN + try: with open(_tmp_file, 'rt') as f: lines = f.readlines() @@ -38,12 +50,6 @@ def run_main(): print("Something went wrong reading the file: {}".format(str(e))) return STATE_UNKNOWN - # now remove the file in case the next check fails. - try: - os.remove(_tmp_file) - except Exception: - pass - if not lines: print("checked status file is empty: {}".format(_tmp_file)) return STATE_UNKNOWN From 2797931036ebe09c73b762dc954f2949766330c4 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Tue, 14 Nov 2023 11:40:36 +0100 Subject: [PATCH 2579/2699] Functest: don't use LTS image To make tests faster and more robust, avoid creating an additional LTS image. Also remove obsolete test bundle options Change-Id: Icea61ae71980a7fcc18b3898c2913d47339e05f6 --- ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml | 4 ---- ceph-rbd-mirror/src/tests/bundles/mantic-bobcat.yaml | 4 ---- ceph-rbd-mirror/src/tests/tests.yaml | 1 - 3 files changed, 9 deletions(-) diff --git a/ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml b/ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml index ae4f6f43..c3f2d5bd 100644 --- a/ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml @@ -90,8 +90,6 @@ applications: osd-devices: 'cinder,10G' options: source: *openstack-origin - bluestore: False - use-direct-io: False osd-devices: '/dev/test-non-existent' channel: latest/edge @@ -117,8 +115,6 @@ applications: osd-devices: 'cinder,10G' options: source: *openstack-origin - bluestore: False - use-direct-io: False osd-devices: '/dev/test-non-existent' channel: latest/edge diff --git a/ceph-rbd-mirror/src/tests/bundles/mantic-bobcat.yaml b/ceph-rbd-mirror/src/tests/bundles/mantic-bobcat.yaml index ab2f69d3..672966fa 100644 --- a/ceph-rbd-mirror/src/tests/bundles/mantic-bobcat.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/mantic-bobcat.yaml @@ -135,8 +135,6 @@ applications: osd-devices: 'cinder,10G' options: source: *openstack-origin - bluestore: False - use-direct-io: False osd-devices: '/dev/test-non-existent' to: - '11' @@ -173,8 +171,6 @@ applications: osd-devices: 'cinder,10G' options: source: *openstack-origin - bluestore: False - use-direct-io: False osd-devices: '/dev/test-non-existent' to: - '18' diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index fca4a427..7dc42c54 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -18,7 +18,6 @@ dev_bundles: - jammy-yoga-image-mirroring configure: - zaza.openstack.charm_tests.glance.setup.add_cirros_image -- zaza.openstack.charm_tests.glance.setup.add_lts_image tests: - zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorTest - zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorControlledFailoverTest From fb7eceba792b2f9ecadfa7093dda261025d42437 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Tue, 3 Oct 2023 10:43:04 +0200 Subject: [PATCH 2580/2699] Functests: pin support channels, cleanup Change-Id: Ied88fe505855bcc0f0e01d00e5f671f629002e1c --- ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml | 8 ++++---- ceph-rbd-mirror/src/tests/tests.yaml | 6 +----- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml b/ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml index c3f2d5bd..3bae7f3d 100644 --- a/ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml @@ -18,13 +18,13 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -33,7 +33,7 @@ applications: - '0' - '1' - '2' - channel: 8.0.19/edge + channel: 8.0/edge keystone: charm: ch:keystone diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index 7dc42c54..7b040009 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -1,11 +1,7 @@ charm_name: ceph-rbd-mirror smoke_bundles: -- focal-xena +- jammy-antelope gate_bundles: -- focal-xena -- focal-xena-image-mirroring -- impish-xena -- impish-xena-image-mirroring - jammy-bobcat - mantic-bobcat comment: | From 02412328250a9c23048e21a06f1673e7b9d043eb Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Fri, 24 Nov 2023 21:00:04 -0300 Subject: [PATCH 2581/2699] Revert default source to 'yoga' The Openstack libs don't recognize Ceph releases when specifying the charm source. Instead, we have to use an Openstack release. Since it was set to quincy, reset it to yoga. Change-Id: Ie9d485e89bd97d10774912691d657428758300ae Closes-Bug: #2044052 --- ceph-osd/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index de77e1af..699afe9e 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -5,7 +5,7 @@ options: description: OSD debug level. Max is 20. source: type: string - default: quincy + default: yoga description: | Optional configuration to support use of additional sources such as: . From 2f93f92fcfed34d469305aa46caafb93e5af63c9 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 7 Dec 2023 09:04:20 +0100 Subject: [PATCH 2582/2699] Fix: enable GW services once we have config Closes-Bug: #2045828 Change-Id: Ia473b2793a60172645713b722cbab80dcdbf93b6 --- ceph-iscsi/src/charm.py | 5 +++++ ceph-iscsi/test-requirements.txt | 1 + ceph-iscsi/tests/bundles/focal.yaml | 6 +----- ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py | 3 +++ 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/ceph-iscsi/src/charm.py b/ceph-iscsi/src/charm.py index 9ace9cc1..2352fa17 100755 --- a/ceph-iscsi/src/charm.py +++ b/ceph-iscsi/src/charm.py @@ -421,6 +421,11 @@ def _render_configs(): self.adapters) logging.info("Rendering config") _render_configs() + # Make sure the gateway services are enabled after rendering the + # configurations and starting those. Those are disabled by + # default in the package. LP: #2045828 + for service_name in self.GW_SERVICES: + ch_host.service_enable(service_name) logging.info("Setting started state") self.peers.announce_ready() self._stored.is_started = True diff --git a/ceph-iscsi/test-requirements.txt b/ceph-iscsi/test-requirements.txt index 552381d2..cff01fc9 100644 --- a/ceph-iscsi/test-requirements.txt +++ b/ceph-iscsi/test-requirements.txt @@ -11,3 +11,4 @@ psutil git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack pyudev # for ceph-* charm unit tests (not mocked?) +protobuf<3.21.0 # https://github.com/juju/python-libjuju/issues/914 diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index d95d685b..5293c2a3 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -20,8 +20,6 @@ machines: '13': '14': '15': - '16': - '17': applications: ubuntu: charm: cs:ubuntu @@ -32,14 +30,12 @@ applications: - '15' ceph-iscsi: charm: ../../ceph-iscsi.charm - num_units: 4 + num_units: 2 options: gateway-metadata-pool: iscsi-foo-metadata to: - '0' - '1' - - '16' - - '17' ceph-osd: charm: ch:ceph-osd num_units: 6 diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index f4d6c3a2..99f08af5 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -158,6 +158,9 @@ def setUp(self): self.test_admin_password = 'rrrrrrrr' self.gwc = MagicMock() self.gwcli_client.GatewayClient.return_value = self.gwc + patch_srv_enable = patch.object(charm.ch_host, 'service_enable') + patch_srv_enable.start() + self.addCleanup(patch_srv_enable.stop) # BEGIN: Workaround until network_get is implemented class _TestingOPSModelBackend(_TestingModelBackend): From 115ff114b4a445009d516ef96e8e99eba23df05e Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 15 Dec 2023 19:49:09 +0100 Subject: [PATCH 2583/2699] Reef default source bobcat For the reef track, we want to default to having bobcat as a source, as this will give reef packages Change-Id: I3b5434cffc7e324c676ecbb6a146d29e2f553e5b --- ceph-osd/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index d6cb8532..7d6954bb 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -5,7 +5,7 @@ options: description: OSD debug level. Max is 20. source: type: string - default: yoga + default: bobcat description: | Optional configuration to support use of additional sources such as: . From bcfbbbba36131f4965ec2534febd93f6d39af96e Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Fri, 15 Dec 2023 13:09:17 -0300 Subject: [PATCH 2584/2699] Revert default source to 'bobcat' The Openstack libs don't recognize Ceph releases when specifying the charm source. Instead, we have to use an Openstack release. Since it was set to quincy, reset it to bobcat. Closes-Bug: #2026651 Change-Id: Ibac09d2bf77eeba69789434eaa6112c2028fbf64 --- ceph-mon/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 882e9b81..83e5462e 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -10,7 +10,7 @@ options: If set to True, supporting services will log to syslog. source: type: string - default: quincy + default: bobcat description: | Optional configuration to support use of additional sources such as: . From 45cbdb94663de740f6578910bd0653fec520a049 Mon Sep 17 00:00:00 2001 From: Utkarsh Bhatt Date: Thu, 14 Dec 2023 16:19:28 +0530 Subject: [PATCH 2585/2699] Use single credential for configuring object storage gw in dashboard. The radosgw relation provides the same credential mapped to the hostnames of all the related radosgw application units. This extra information is not needed. Even when multiple radosgw applications are present in the same Juju model, a single instance of credentials is sufficient for dashbaord to present Object GW UI. Closes-Bug: #1986593 Change-Id: Ib56e0bd4fe588219d6b5728d9a71cf09abdd25de Signed-off-by: Utkarsh Bhatt --- ceph-dashboard/src/charm.py | 41 +++++-------------- .../tests/bundles/jammy-antelope.yaml | 3 ++ .../tests/bundles/jammy-bobcat.yaml | 3 ++ .../tests/bundles/lunar-antelope.yaml | 1 + .../tests/bundles/mantic-bobcat.yaml | 3 +- .../unit_tests/test_ceph_dashboard_charm.py | 5 --- 6 files changed, 20 insertions(+), 36 deletions(-) diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index 100099d4..fe8eb69f 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -18,7 +18,6 @@ from pathlib import Path from typing import List, Tuple, Union -import charmhelpers.core.host as ch_host import charms_ceph.utils as ceph_utils import cryptography.hazmat.primitives.serialization as serialization import interface_ceph_iscsi_admin_access.admin_access as admin_access @@ -180,8 +179,9 @@ def _register_dashboards(self) -> None: logging.debug( "register_grafana_dashboard: {}".format(dash_file)) - def _update_legacy_radosgw_creds(self, access_key: str, - secret_key: str) -> None: + def _update_radosgw_creds( + self, access_key: str, secret_key: str + ) -> None: """Update dashboard db with access & secret key for rados gateways. This method uses the legacy format which only supports one gateway. @@ -189,21 +189,6 @@ def _update_legacy_radosgw_creds(self, access_key: str, self._apply_file_setting('set-rgw-api-access-key', access_key) self._apply_file_setting('set-rgw-api-secret-key', secret_key) - def _update_multi_radosgw_creds(self, creds: str) -> None: - """Update dashboard db with access & secret key for rados gateway.""" - access_keys = {c['daemon_id']: c['access_key'] for c in creds} - secret_keys = {c['daemon_id']: c['secret_key'] for c in creds} - self._apply_file_setting( - 'set-rgw-api-access-key', - json.dumps(access_keys)) - self._apply_file_setting( - 'set-rgw-api-secret-key', - json.dumps(secret_keys)) - - def _support_multiple_gateways(self) -> bool: - """Check if version of dashboard supports multiple rados gateways""" - return ch_host.cmp_pkgrevno('ceph-common', '16.0') > 0 - def _manage_radosgw(self) -> None: """Register rados gateways in dashboard db""" if self.unit.is_leader(): @@ -214,18 +199,14 @@ def _manage_radosgw(self) -> None: if cred_count < 1: logging.info("No object gateway creds found") return - if self._support_multiple_gateways(): - self._update_multi_radosgw_creds(creds) - else: - if cred_count > 1: - logging.error( - "Cannot enable object gateway support. Ceph release " - "does not support multiple object gateways in the " - "dashboard") - else: - self._update_legacy_radosgw_creds( - creds[0]['access_key'], - creds[0]['secret_key']) + # Update the provided creds for radosgw. + # NOTE(utkarshbhatthere): Having multiple credentials is not + # required even where there are multiple radosgw applications + # in the juju model. Therefore, first available creds are + # populated in dashboard. + self._update_radosgw_creds( + creds[0]['access_key'], + creds[0]['secret_key']) def _request_certificates(self, event) -> None: """Request TLS certificates.""" diff --git a/ceph-dashboard/tests/bundles/jammy-antelope.yaml b/ceph-dashboard/tests/bundles/jammy-antelope.yaml index 4263cbe3..b2497e93 100644 --- a/ceph-dashboard/tests/bundles/jammy-antelope.yaml +++ b/ceph-dashboard/tests/bundles/jammy-antelope.yaml @@ -40,6 +40,8 @@ applications: # SSL and allow_embedding are not released into cs:grafana yet, due # October 2021 charm: ch:grafana + series: focal + channel: latest/stable num_units: 1 options: anonymous: True @@ -52,6 +54,7 @@ applications: hostname: "{host}" prometheus-alertmanager: charm: ch:prometheus-alertmanager + series: focal num_units: 1 ceph-radosgw: charm: ch:ceph-radosgw diff --git a/ceph-dashboard/tests/bundles/jammy-bobcat.yaml b/ceph-dashboard/tests/bundles/jammy-bobcat.yaml index 62c023f7..fcdd5727 100644 --- a/ceph-dashboard/tests/bundles/jammy-bobcat.yaml +++ b/ceph-dashboard/tests/bundles/jammy-bobcat.yaml @@ -41,6 +41,8 @@ applications: # October 2021 charm: ch:grafana num_units: 1 + series: focal + channel: latest/stable options: anonymous: True install_method: snap @@ -52,6 +54,7 @@ applications: hostname: "{host}" prometheus-alertmanager: charm: ch:prometheus-alertmanager + series: focal num_units: 1 ceph-radosgw: charm: ch:ceph-radosgw diff --git a/ceph-dashboard/tests/bundles/lunar-antelope.yaml b/ceph-dashboard/tests/bundles/lunar-antelope.yaml index 160e6833..dd2f39a2 100644 --- a/ceph-dashboard/tests/bundles/lunar-antelope.yaml +++ b/ceph-dashboard/tests/bundles/lunar-antelope.yaml @@ -47,6 +47,7 @@ applications: charm: ch:grafana num_units: 1 series: focal + channel: latest/stable options: anonymous: True install_method: snap diff --git a/ceph-dashboard/tests/bundles/mantic-bobcat.yaml b/ceph-dashboard/tests/bundles/mantic-bobcat.yaml index bed13e0b..70e356e5 100644 --- a/ceph-dashboard/tests/bundles/mantic-bobcat.yaml +++ b/ceph-dashboard/tests/bundles/mantic-bobcat.yaml @@ -65,7 +65,8 @@ applications: channel: latest/edge ceph-iscsi: charm: ch:ceph-iscsi - series: mantic + # ceph-iscsi is deprecated therefore using older series. + series: jammy num_units: 2 options: gateway-metadata-pool: iscsi-foo-metadata diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py index 99028ca8..0c62fb79 100644 --- a/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_charm.py @@ -162,10 +162,8 @@ class TestCephDashboardCharmBase(CharmTestCase): PATCHES = [ 'ceph_utils', - 'ch_host', 'socket', 'subprocess', # charm's subprocess import - 'ch_host', ] def setUp(self): @@ -515,7 +513,6 @@ def test_certificates_from_config(self, ssl_configured): @patch('ceph_dashboard_commands.subprocess') def test_rados_gateway(self, subprocess): self.ceph_utils.is_dashboard_enabled.return_value = True - self.ch_host.cmp_pkgrevno.return_value = 1 mon_rel_id = self.harness.add_relation('dashboard', 'ceph-mon') rel_id = self.harness.add_relation('radosgw-dashboard', 'ceph-radosgw') self.harness.begin() @@ -563,7 +560,6 @@ def test_rados_gateway(self, subprocess): @patch('ceph_dashboard_commands.subprocess') def test_rados_gateway_multi_relations_pacific(self, subprocess): self.ceph_utils.is_dashboard_enabled.return_value = True - self.ch_host.cmp_pkgrevno.return_value = 1 rel_id1 = self.harness.add_relation('radosgw-dashboard', 'ceph-eu') rel_id2 = self.harness.add_relation('radosgw-dashboard', 'ceph-us') mon_rel_id = self.harness.add_relation('dashboard', 'ceph-mon') @@ -619,7 +615,6 @@ def test_rados_gateway_multi_relations_pacific(self, subprocess): @patch('ceph_dashboard_commands.subprocess') def test_rados_gateway_multi_relations_octopus(self, subprocess): - self.ch_host.cmp_pkgrevno.return_value = -1 rel_id1 = self.harness.add_relation('radosgw-dashboard', 'ceph-eu') rel_id2 = self.harness.add_relation('radosgw-dashboard', 'ceph-us') self.harness.begin() From fc8741087eec9224ffb1bb2240b6cb43aacf911f Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 3 Jan 2024 18:10:30 -0300 Subject: [PATCH 2586/2699] Retry setting rbd_stats_pools prometheus config Setting the 'mgr/prometheus/rbd_stats_pools' option can fail if we arrive too early, even if the cluster is bootstrapped. This is particularly seen in ceph-radosgw test runs. This patchset thus adds a retry decorator to work around this issue. Change-Id: Id9b7b903e67154e7d2bb6fecbeef7fac126804a8 --- ceph-mon/src/utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ceph-mon/src/utils.py b/ceph-mon/src/utils.py index 03fdb9de..e539ea5a 100644 --- a/ceph-mon/src/utils.py +++ b/ceph-mon/src/utils.py @@ -420,6 +420,10 @@ def _set_require_osd_release(release): raise OsdPostUpgradeError(call_error) +@tenacity.retry( + wait=tenacity.wait_exponential(multiplier=1, max=10), + reraise=True, + stop=tenacity.stop_after_attempt(30)) def mgr_config_set_rbd_stats_pools(): """Update ceph mgr config with the value from rbd-status-pools config """ From 68c139c65e658e9dff8777f8a42e21c16d20f3e6 Mon Sep 17 00:00:00 2001 From: peppepetra86 Date: Thu, 19 May 2022 10:13:13 +0200 Subject: [PATCH 2587/2699] Allow URLs which contain encoded path separators This is to resolve the issue with objects containing slash in the name not correctly synced in multisite environments. Closes-Bug: #1974138 Change-Id: I71ac000bb4754c9cb987d703f145dc2a5ff032ad --- ceph-radosgw/templates/openstack_https_frontend.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-radosgw/templates/openstack_https_frontend.conf b/ceph-radosgw/templates/openstack_https_frontend.conf index 6463f415..5df76315 100644 --- a/ceph-radosgw/templates/openstack_https_frontend.conf +++ b/ceph-radosgw/templates/openstack_https_frontend.conf @@ -18,6 +18,7 @@ Listen {{ ext_port }} # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} + AllowEncodedSlashes On ProxyPass / http://localhost:{{ int }}/ nocanon ProxyPassReverse / http://localhost:{{ int }}/ ProxyPreserveHost on From a71e3407cbd1ead3ed0ea5ffff9bf3642c21391d Mon Sep 17 00:00:00 2001 From: Danny Cocks Date: Wed, 25 Oct 2023 14:50:53 +1100 Subject: [PATCH 2588/2699] Add nagios check for radosgw-admin sync status This duplicates the check performed for ceph status and specialises it for radosgw-admin sync status instead. The config options available are: - nagios_rgw_zones: this is which zones are expected to be connected - nagios_rgw_additional_checks: this is equivalent to nagios_additional_checks and allows for a configurable set of strings to grep for as critical alerts. Change-Id: Ideb35587693feaf1cc0736e981005332e91ca861 --- ceph-mon/config.yaml | 23 +++ .../files/nagios/check_radosgw_sync_status.py | 191 ++++++++++++++++++ ceph-mon/files/nagios/collect_ceph_status.sh | 11 + ceph-mon/src/ceph_hooks.py | 21 ++ ceph-mon/unit_tests/test_ceph_hooks.py | 2 + 5 files changed, 248 insertions(+) create mode 100755 ceph-mon/files/nagios/check_radosgw_sync_status.py diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 1a5375de..0e514f24 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -260,6 +260,29 @@ options: description: | Whether additional checks report warning or error when their checks are positive. + nagios_rgw_zones: + default: "" + type: string + description: | + Comma-separated list of zones that are expected to be connected to this + radosgw. These will be checked by the line "data sync source... + (zone-name)" in the output of `radosgw-admin sync status`. + . + Example: + . + zone1,zone2 + nagios_rgw_additional_checks: + default: "" + type: string + description: | + List describing additional checks. Each item is a regular expression to + search in the output of radosgw-admin sync status. Note, this is a + list unlike `nagios_additional_checks` which uses a dictionary. + . + Example: + . + ['data is behind on'] + . use-direct-io: type: boolean default: True diff --git a/ceph-mon/files/nagios/check_radosgw_sync_status.py b/ceph-mon/files/nagios/check_radosgw_sync_status.py new file mode 100755 index 00000000..01edfa24 --- /dev/null +++ b/ceph-mon/files/nagios/check_radosgw_sync_status.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python3 + +# Copyright (C) 2005, 2006, 2007, 2012 James Troup +# Copyright (C) 2014, 2017 Canonical +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Authors: Danny Cocks +# Based on check_ceph_status.py and authors therein + +import re +import argparse +import os +import subprocess +import sys +import time +import traceback + + +class CriticalError(Exception): + """This indicates a critical error.""" + pass + + +class UnknownError(Exception): + """This indicates a unknown error was encountered.""" + pass + + +def check_file_freshness(filename, newer_than=3600): + """ + Check a file exists, is readable and is newer than seconds (where + defaults to 3600). + """ + # First check the file exists and is readable + if not os.path.exists(filename): + raise CriticalError("%s: does not exist." % (filename)) + if os.access(filename, os.R_OK) == 0: + raise CriticalError("%s: is not readable." % (filename)) + + # Then ensure the file is up-to-date enough + mtime = os.stat(filename).st_mtime + last_modified = time.time() - mtime + if last_modified > newer_than: + raise CriticalError("%s: was last modified on %s and is too old " + "(> %s seconds)." + % (filename, time.ctime(mtime), newer_than)) + if last_modified < 0: + raise CriticalError("%s: was last modified on %s which is in the " + "future." + % (filename, time.ctime(mtime))) + + +def check_radosgw_status(args): + """ + Used to check the status of multizone RadosGW Ceph. Uses the output of + 'radosgw-admin sync status', generated during the separate cronjob, to + determine if health is OK, and if not, should we alert on that situation. + + As this is the first iteration of this function, we will only do a very + basic check and will rely on the charm config option + `nagios_rgw_additional_checks` which is passed to this script via + `args.additional_check` + + :param args: argparse object formatted in the convention of generic Nagios + checks + :returns string, describing the status of the ceph cluster. + :raises: UnknownError, CriticalError + """ + + if args.status_file: + check_file_freshness(args.status_file) + with open(args.status_file) as f: + status_data = f.read() + else: + try: + status_data = (subprocess.check_output(['radosgw-admin', + 'sync', + 'status']) + .decode('UTF-8')) + except subprocess.CalledProcessError as e: + raise UnknownError( + "UNKNOWN: radosgw-admin sync status command" + "failed with error: {}".format(e)) + + # If the realm name is empty, i.e. the first line is + # realm () + # then we assume this means this is not multizone, so exit early. + lines = status_data.split('\n') + if len(lines) >= 1 and re.match(r"realm .* \(\)", lines[0].strip()): + return "No multizone detected" + + # This is a hangover from check_ceph_status.py and not directly applicable + # here. I include it for an additional check. + required_strings = ['realm', 'zonegroup', 'zone'] + if not all(s in status_data for s in required_strings): + raise UnknownError('UNKNOWN: status data is incomplete') + + # The default message if we end up with no alerts + message_all_ok = "All OK" + # The list to collect messages + msgs = [] + + # The always-done checks go here. + # Currently none + + # Handle checks to do with given expected zones that should be connected. + if args.zones: + for zone in args.zones.split(','): + search_regex = r"data sync source:.*\(" + zone + r"\)" + if re.search(search_regex, status_data) is None: + msg = ("CRITICAL: Missing expected sync source '{}'" + .format(zone)) + msgs.append(msg) + + # For additional checks, also test these things + if args.additional_checks: + for check in args.additional_checks: + m = re.search(check, status_data) + if m is not None: + msgs.append("CRITICAL: {}".format(m.group(0))) + + complete_output = '\n'.join(msgs) + if any(msg.startswith("CRITICAL") for msg in msgs): + raise CriticalError(complete_output) + elif len(msgs) >= 1: + raise UnknownError(complete_output) + else: + return message_all_ok + + +def parse_args(args): + parser = argparse.ArgumentParser(description='Check ceph status') + parser.add_argument('-f', '--file', dest='status_file', + default=False, + help='Optional file with "ceph status" output. ' + 'Generally useful for testing, and if the Nagios ' + 'user account does not have rights for the Ceph ' + 'config files.') + parser.add_argument('--zones', dest='zones', + default=None, + help="Check if the given zones, as a comma-separated " + "list, are present in the output. If they are " + "missing report critical.") + parser.add_argument('--additional_check', dest='additional_checks', + action='append', + help="Check if a given pattern exists in any status" + "message. If it does, report critical") + + return parser.parse_args(args) + + +def main(args): + # Note: leaving "warning" in here, as a reminder for the expected NRPE + # returncodes, even though this script doesn't output any warnings. + EXIT_CODES = {'ok': 0, 'warning': 1, 'critical': 2, 'unknown': 3} + exitcode = 'unknown' + try: + output_msg = check_radosgw_status(args) + print(output_msg) + exitcode = 'ok' + except UnknownError as msg: + print(msg) + exitcode = 'unknown' + except CriticalError as msg: + print(msg) + exitcode = 'critical' + except Exception: + print("%s raised unknown exception '%s'" % ('check_ceph_status', + sys.exc_info()[0])) + print('=' * 60) + traceback.print_exc(file=sys.stdout) + print('=' * 60) + exitcode = 'unknown' + return EXIT_CODES[exitcode] + + +if __name__ == '__main__': + args = parse_args(sys.argv[1:]) + status = main(args) + sys.exit(status) diff --git a/ceph-mon/files/nagios/collect_ceph_status.sh b/ceph-mon/files/nagios/collect_ceph_status.sh index 514c219c..962687a3 100755 --- a/ceph-mon/files/nagios/collect_ceph_status.sh +++ b/ceph-mon/files/nagios/collect_ceph_status.sh @@ -31,3 +31,14 @@ ceph osd tree --format json > ${TMP_FILE} chown root:nagios ${TMP_FILE} chmod 0640 ${TMP_FILE} mv ${TMP_FILE} ${DATA_FILE} + + +# Note: radosgw-admin sync status doesn't support outputting in json at time of writing +DATA_FILE="${DATA_DIR}/current-radosgw-admin-sync-status.raw" +TMP_FILE=$(mktemp -p ${DATA_DIR}) + +radosgw-admin sync status > ${TMP_FILE} + +chown root:nagios ${TMP_FILE} +chmod 0640 ${TMP_FILE} +mv ${TMP_FILE} ${DATA_FILE} diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index a04fc7bd..31e4c199 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -108,6 +108,8 @@ NAGIOS_FILE_FOLDER = '/var/lib/nagios' SCRIPTS_DIR = '/usr/local/bin' STATUS_FILE = '{}/cat-ceph-status.txt'.format(NAGIOS_FILE_FOLDER) +RADOSGW_STATUS_FILE = ('{}/current-radosgw-admin-sync-status.raw' + .format(NAGIOS_FILE_FOLDER)) STATUS_CRONFILE = '/etc/cron.d/cat-ceph-health' HOST_OSD_COUNT_REPORT = '{}/host-osd-report.json'.format(NAGIOS_FILE_FOLDER) @@ -1193,6 +1195,10 @@ def update_nrpe_config(): 'check_ceph_osd_count.py'), os.path.join(NAGIOS_PLUGINS, 'check_ceph_osd_count.py')) + rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', + 'check_radosgw_sync_status.py'), + os.path.join(NAGIOS_PLUGINS, 'check_radosgw_sync_status.py')) + script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh') rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'collect_ceph_status.sh'), @@ -1252,6 +1258,21 @@ def update_nrpe_config(): description='Check whether all OSDs are up and in', check_cmd=check_cmd ) + + check_cmd = ('check_radosgw_sync_status.py -f {}' + .format(RADOSGW_STATUS_FILE)) + if config('nagios_rgw_zones'): + check_cmd += ' --zones "{}"'.format(config('nagios_rgw_zones')) + if config('nagios_rgw_additional_checks'): + x = ast.literal_eval(config('nagios_rgw_additional_checks')) + for check in x: + check_cmd += ' --additional_check \"{}\"'.format(check) + nrpe_setup.add_check( + shortname='radosgw_multizone', + description='Check multizone radosgw health', + check_cmd=check_cmd + ) + nrpe_setup.write() diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index b11bde57..de584d91 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -54,6 +54,8 @@ 'nagios_raise_nodeepscrub': True, 'nagios_additional_checks': "", 'nagios_additional_checks_critical': False, + 'nagios_rgw_zones': "", + 'nagios_rgw_additional_checks': "", 'nagios_check_num_osds': False, 'disable-pg-max-object-skew': False} From 8db290a73e0aff1486892eff09e4082c8e029b8d Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Fri, 15 Dec 2023 13:21:10 -0300 Subject: [PATCH 2589/2699] Revert default source to 'bobcat' The Openstack libs don't recognize Ceph releases when specifying the charm source. Instead, we have to use an Openstack release. Since it was set to quincy, reset it to bobcat. Change-Id: I5b82c876184025a790e2b0cab84348c71a8a4a55 --- ceph-radosgw/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index a9a9eed3..2b6f48af 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -5,7 +5,7 @@ options: description: RadosGW debug level. Max is 20. source: type: string - default: quincy + default: bobcat description: | Optional repository from which to install. May be one of the following: distro (default), ppa:somecustom/ppa, a deb url sources entry, From a62a3bbb6f219f0ed3d5a8bb3b49836b61844ecc Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Tue, 16 Jan 2024 11:21:07 +0100 Subject: [PATCH 2590/2699] Don't error out on missing OSDs Ceph reef has a behaviour change where it doesn't always return version keys for all components. In I12a1bcd32be2ed8a8e5ee0e304f716f5a190bd57 an attempt was made to fix this by retrying, however this code path can also be hit when a component such as OSDs are absent. While a cluster without OSDs wouldn't be functional it still should not cause the charm to error. As a fix, just make the OSD component optional when querying for a version instead of retrying. Change-Id: I5524896c7ad944f6f22fb1498ab0069397b52418 --- ceph-mon/src/utils.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/ceph-mon/src/utils.py b/ceph-mon/src/utils.py index e539ea5a..b7c49b9d 100644 --- a/ceph-mon/src/utils.py +++ b/ceph-mon/src/utils.py @@ -329,10 +329,6 @@ def execute_post_osd_upgrade_steps(ceph_osd_release): log(message=msg, level='ERROR') -@tenacity.retry( - wait=tenacity.wait_exponential(multiplier=1, max=10), - reraise=True, - stop=tenacity.stop_after_attempt(30)) def _get_versions(): """Gets the ceph versions. @@ -354,8 +350,6 @@ def _get_versions(): raise OsdPostUpgradeError(call_error) log('Versions: {}'.format(versions_str), level='DEBUG') versions_dict = json.loads(versions_str) - # provoke keyerror if we don't have osd versions yet to cause a retry - _ = versions_dict['osd'] return True, versions_dict @@ -370,7 +364,7 @@ def _all_ceph_versions_same(): if len(versions_dict['overall']) > 1: log('All upgrades of mon and osd have not completed.') return False - if len(versions_dict['osd']) < 1: + if len(versions_dict.get('osd', [])) < 1: log('Monitors have converged but no osd versions found.', level='WARNING') return False From 1405c67fd98fab02d489d6e7d6d49417eb786c27 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Wed, 17 Jan 2024 16:50:37 +0100 Subject: [PATCH 2591/2699] Add alerting rules for RGW multisite deployments Add default prometheus alerting rules for RadosGW multisite deployments based on the built-in Ceph RGW multisite metrics. Note that the included prometheus_alerts.yml.default rule file is included for reference only. The ceph-mon charm will utilize the resource file from https://charmhub.io/ceph-mon/resources/alert-rules for deployment so that operators can easily customize these rules. Change-Id: I5a12162d73686963132a952bddd85ec205964de4 --- .../prometheus_alerts.yml.default | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default b/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default index a544d41e..e0914e49 100644 --- a/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default +++ b/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default @@ -633,3 +633,50 @@ groups: oid: "1.3.6.1.4.1.50495.1.2.1.1.2" severity: "critical" type: "ceph_default" + - name: "rgwmultisite" + rules: + - alert: "CephRGWMultisiteFetchError" + annotations: + description: "Unsuccessful Object Replications from source zone threshold has been exceeded. The threshold is defined as 2 errors per 15min" + summary: "Unsuccessful Object Replications from Source Zone Threshold Exceeded" + expr: "increase(ceph_data_sync_from_zone_fetch_errors[15m]) > 2" + for: "5m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephRGWMultisitePollError" + annotations: + description: "Unsuccessful Object Replications from Source Zone Threshold Exceeded. The threshold is defined as 2 errors per 15min" + summary: "Unsuccessful Object Replications from Source Zone Threshold Exceeded" + expr: "increase(ceph_data_sync_from_zone_poll_errors[15m]) > 2" + for: "5m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephRGWMultisiteFetchErrorCritical" + annotations: + description: "Critical: Unsuccessful Object Replications from source zone threshold has been exceeded. The threshold is defined as 50 errors per 15min" + summary: "Critical: Unsuccessful Object Replications from Source Zone Threshold Exceeded" + expr: "increase(ceph_data_sync_from_zone_fetch_errors[15m]) > 50" + for: "5m" + labels: + severity: "critical" + type: "ceph_default" + - alert: "CephRGWMultisitePollErrorCritical" + annotations: + description: "Critical: Unsuccessful Object Replications from source zone threshold has been exceeded. The threshold is defined as 50 errors per 15min" + summary: "Critical: Unsuccessful Object Replications from Source Zone Threshold Exceeded" + expr: "increase(ceph_data_sync_from_zone_poll_errors[15m]) > 50" + for: "5m" + labels: + severity: "critical" + type: "ceph_default" + - alert: "CephRGWMultisitePollLatency" + annotations: + description: "Latency for poll request threshold exceeded. The threshold is defined as 600s latency per 15min" + summary: "Poll Request Latency Threshold Exceeded" + expr: "increase(ceph_data_sync_from_zone_poll_latency_sum[15m]) > 600" + for: "5m" + labels: + severity: "warning" + type: "ceph_default" From a2d136f5d6f66eb5c921a41e5fe713475001c457 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 19 Jan 2024 19:12:40 +0100 Subject: [PATCH 2592/2699] Fixup: multisite alert rule help texts Change-Id: I558804c8bbd162a15bd97a023ac612d32fd96b02 --- .../prometheus_alert_rules/prometheus_alerts.yml.default | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default b/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default index e0914e49..df2fcf57 100644 --- a/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default +++ b/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default @@ -646,8 +646,8 @@ groups: type: "ceph_default" - alert: "CephRGWMultisitePollError" annotations: - description: "Unsuccessful Object Replications from Source Zone Threshold Exceeded. The threshold is defined as 2 errors per 15min" - summary: "Unsuccessful Object Replications from Source Zone Threshold Exceeded" + description: "Unsuccessful replication log request errors threshold has been exceeded. The threshold is defined as 2 errors per 15min" + summary: "Unsuccessful Replication Log Request Errors Threshold Exceeded" expr: "increase(ceph_data_sync_from_zone_poll_errors[15m]) > 2" for: "5m" labels: @@ -664,8 +664,8 @@ groups: type: "ceph_default" - alert: "CephRGWMultisitePollErrorCritical" annotations: - description: "Critical: Unsuccessful Object Replications from source zone threshold has been exceeded. The threshold is defined as 50 errors per 15min" - summary: "Critical: Unsuccessful Object Replications from Source Zone Threshold Exceeded" + description: "Critical: Unsuccessful replication log request errors threshold has been exceeded. The threshold is defined as 50 errors per 15min" + summary: "Critical: Unsuccessful Replication Log Request Errors Threshold Exceeded" expr: "increase(ceph_data_sync_from_zone_poll_errors[15m]) > 50" for: "5m" labels: From 4ff52f76e3df50ec8c04cb17fe59ac280da19b4a Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 13 Mar 2023 16:06:03 +0100 Subject: [PATCH 2593/2699] Initial support for the s3 interface Implement initial support for the s3 interface here: https://github.com/canonical/charm-relation-interfaces/tree/main/interfaces/s3/v0 Drive-by: fully qualify rename.sh in allowlist_externals Change-Id: I8a78c41840c529cf2c35f487739c0397e4374f97 --- ceph-radosgw/hooks/hooks.py | 181 +++++++++++++++++- ceph-radosgw/hooks/install_deps | 2 +- ceph-radosgw/hooks/s3-relation-changed | 1 + ceph-radosgw/hooks/s3-relation-departed | 1 + ceph-radosgw/hooks/utils.py | 46 +++++ ceph-radosgw/metadata.yaml | 2 + .../unit_tests/test_ceph_radosgw_utils.py | 16 ++ ceph-radosgw/unit_tests/test_hooks.py | 77 ++++++++ 8 files changed, 323 insertions(+), 3 deletions(-) create mode 120000 ceph-radosgw/hooks/s3-relation-changed create mode 120000 ceph-radosgw/hooks/s3-relation-departed diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index f4db085e..6a7db9a2 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# + # Copyright 2016 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,12 +14,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +import base64 +import json import os import subprocess import sys import socket import uuid +import utils + sys.path.append('lib') import ceph_rgw as ceph @@ -89,6 +93,7 @@ ) from utils import ( assess_status, + boto_client, disable_unused_apache_sites, listen_port, multisite_deployment, @@ -99,8 +104,11 @@ restart_map, restart_nonce_changed, resume_unit_helper, + s3_app, service_name, services, + set_s3_app, + clear_s3_app, setup_ipv6, systemd_based_radosgw, ) @@ -118,7 +126,7 @@ PACKAGES = [ 'haproxy', 'radosgw', - 'apache2' + 'apache2', ] APACHE_PACKAGES = [ @@ -284,6 +292,11 @@ def _config_changed(): (opened_port_number, port)) _config_changed() + # Update s3 apps with ssl-ca, if available + ssl_ca = config('ssl-ca') + if ssl_ca: + update_s3_ca_info([ssl_ca]) + @hooks.hook('mon-relation-departed', 'mon-relation-changed') @@ -600,6 +613,10 @@ def _certs_changed(): _certs_changed() for r_id in relation_ids('identity-service'): identity_joined(relid=r_id) + # Update s3 apps with ca material, if available + ca_chains = cert_rel_ca() + if ca_chains: + update_s3_ca_info(ca_chains) def get_radosgw_username(r_id): @@ -1009,6 +1026,166 @@ def process_multisite_relations(): secondary_relation_changed(r_id, unit) +def cert_rel_ca(): + """Get ca material from the certificates relation. + + Returns a list of base64 encoded strings + """ + data = None + for r_id in relation_ids('certificates'): + # First check for app data + remote_app = remote_service_name(r_id) + data = relation_get(rid=r_id, app=remote_app) + if data: + break + # No app data, check for unit data + for unit in related_units(r_id): + data = relation_get(rid=r_id, unit=unit) + if data: + break + if not data: + log('No certificates rel data found', level=DEBUG) + return + ca_chain = [base64.b64encode(d.encode('utf-8')).decode() + for d in (data.get('chain'), data.get('ca')) if d] + return ca_chain + + +def update_s3_ca_info(ca_chains): + """Update tls ca info for s3 connected apps. + + Takes a list of base64 encoded ca chains and sets them on the s3 + relations + """ + apps = utils.all_s3_apps() + if not apps: + return + for app, s3_info in apps.items(): + s3_info['tls-ca-chain'] = ca_chains + for rid in relation_ids('s3'): + relation_set(rid=rid, app=app, relation_settings=s3_info) + + +def get_relation_info(relation_id): + rid = relation_id or ch_relation_id() + remote_app = remote_service_name(rid) + bucket = relation_get(app=remote_app, attribute='bucket') + return rid, remote_app, bucket + + +def create_new_s3_user(remote_app): + user = f"{remote_app}-{uuid.uuid4()}" + access_key, secret_key = multisite.create_user(user) + if not access_key or not secret_key: + raise RuntimeError("Failed to create user: {}".format(user)) + return user, access_key, secret_key + + +def handle_existing_s3_info( + rid, remote_app, + bucket, endpoint, ca_chains, + existing_s3_info): + log( + "s3 info found, not adding new user/bucket: {}".format(rid), + level=DEBUG + ) + # Pass back previously computed data for convenience, but omit the + # secret key + update = { + "bucket": bucket, + "access-key": existing_s3_info['access-key'], + "endpoint": endpoint, + "tls-ca-chain": json.dumps(ca_chains) + } + relation_set(rid=rid, app=remote_app, relation_settings=update) + + +def create_bucket(user, access_key, secret_key, bucket, endpoint, ca_chains): + client = boto_client(access_key, secret_key, endpoint) + try: + client.create_bucket(Bucket=bucket) + # Ignore already existing bucket, just log it + except client.meta.client.exceptions.BucketAlreadyExists as e: + log("Bucket {} already exists: {}".format(bucket, e)) + log( + "s3: added user={}, bucket: {}".format(user, bucket), + level=DEBUG + ) + + +@hooks.hook('s3-relation-changed') +def s3_relation_changed(relation_id=None): + """ + Handle the s3 relation changed hook. + + If this unit is the leader, the charm will set up a user, secret and access + key and bucket, then set this data on the relation. It will also set + endpoint info on the relation as well. + """ + if not is_leader(): + log('Not leader, defer s3 relation changed hook') + return + + if not ready_for_service(legacy=False): + log('Not ready for service, defer s3 relation changed hook') + return + + rid, remote_app, bucket = get_relation_info(relation_id) + if not bucket: + # Non-leader remote unit or otherwise missing bucket info + log( + 'No bucket app={}, rid={}, skip s3 rel'.format(remote_app, rid), + level=DEBUG + ) + return + + endpoint = '{}:{}'.format( + canonical_url(CONFIGS, PUBLIC), + listen_port(), + ) + + ssl_ca = config('ssl-ca') + if ssl_ca: + ca_chains = [ssl_ca] + else: + ca_chains = cert_rel_ca() + + existing_s3_info = s3_app(remote_app) + if existing_s3_info: + handle_existing_s3_info( + rid, remote_app, bucket, endpoint, ca_chains, existing_s3_info) + return + + # This is a new request, create user and bucket + user, access_key, secret_key = create_new_s3_user(remote_app) + create_bucket(user, access_key, secret_key, bucket, endpoint, ca_chains) + + # Store bucket, creds, endpoint in the app databag + update = { + "bucket": bucket, + "access-key": access_key, + "secret-key": secret_key, + "endpoint": endpoint, + "tls-ca-chain": json.dumps(ca_chains) + } + relation_set(app=remote_app, relation_settings=update) + set_s3_app(remote_app, bucket, access_key, secret_key) + log("Added new s3 app update: {}".format(update), level=DEBUG) + + +@hooks.hook("s3-relation-departed") +def s3_relation_departed(relation_id=None): + """Handle the s3 relation departed hook.""" + if not is_leader() or not ready_for_service(legacy=False): + log('Not leader or not ready, skip depart s3 rel') + return + + remote_app = remote_service_name() + clear_s3_app(remote_app) + log("Removed s3 app for: {}, {}".format( + relation_id, remote_app), level=DEBUG) + + if __name__ == '__main__': try: hooks.execute(sys.argv) diff --git a/ceph-radosgw/hooks/install_deps b/ceph-radosgw/hooks/install_deps index 0f116166..9d4f7646 100755 --- a/ceph-radosgw/hooks/install_deps +++ b/ceph-radosgw/hooks/install_deps @@ -1,7 +1,7 @@ #!/bin/bash -e # Install required dependencies for charm runtime -declare -a DEPS=('apt' 'netaddr' 'netifaces' 'yaml' 'jinja2' 'dnspython' 'pyudev') +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'yaml' 'jinja2' 'dnspython' 'pyudev' 'boto3') check_and_install() { pkg="${1}-${2}" diff --git a/ceph-radosgw/hooks/s3-relation-changed b/ceph-radosgw/hooks/s3-relation-changed new file mode 120000 index 00000000..f94593a0 --- /dev/null +++ b/ceph-radosgw/hooks/s3-relation-changed @@ -0,0 +1 @@ +./hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/s3-relation-departed b/ceph-radosgw/hooks/s3-relation-departed new file mode 120000 index 00000000..f94593a0 --- /dev/null +++ b/ceph-radosgw/hooks/s3-relation-departed @@ -0,0 +1 @@ +./hooks.py \ No newline at end of file diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 97d71d9f..4c7a21a1 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import os import socket import subprocess @@ -19,6 +20,8 @@ from collections import OrderedDict from copy import deepcopy +import boto3 + import ceph_radosgw_context import multisite @@ -29,6 +32,7 @@ application_version_set, config, leader_get, + leader_set, log, ) from charmhelpers.contrib.openstack import ( @@ -507,3 +511,45 @@ def multisite_deployment(): return all((config('zone'), config('zonegroup'), config('realm'))) + + +def boto_client(access_key, secret_key, endpoint): + return boto3.resource("s3", + verify=False, + endpoint_url=endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + + +def set_s3_app(app, bucket, access_key, secret_key): + """Store known s3 app info.""" + apps = all_s3_apps() + if app not in apps: + apps[app] = { + "bucket": bucket, + "access-key": access_key, + "secret-key": secret_key, + } + leader_set({"s3-apps": json.dumps(apps)}) + + +def s3_app(app): + """Return s3 app info.""" + apps = all_s3_apps() + return apps.get(app) + + +def all_s3_apps(): + """Return all s3 app info.""" + apps = leader_get("s3-apps") + if not apps: + return {} + return json.loads(apps) + + +def clear_s3_app(app): + """Delete s3 app info if present.""" + apps = all_s3_apps() + if app in apps: + del apps[app] + leader_set({"s3-apps": json.dumps(apps)}) diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 536e1cf3..40f0b25e 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -50,6 +50,8 @@ provides: interface: swift-proxy radosgw-user: interface: radosgw-user + s3: + interface: s3 peers: cluster: interface: swift-ha diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index 9d39758f..56315aef 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -33,6 +33,8 @@ 'init_is_systemd', 'unitdata', 'config', + 'leader_get', + 'leader_set', ] @@ -308,3 +310,17 @@ def test_listen_port(self): self.assertEquals(443, utils.listen_port()) self.test_config.set('port', 42) self.assertEquals(42, utils.listen_port()) + + def test_set_s3_app(self): + self.leader_get.return_value = None + utils.set_s3_app('myapp', 'b', 'a', 's') + self.leader_set.assert_called_once_with({ + 's3-apps': + '{"myapp": {"bucket": "b", "access-key": "a", "secret-key": "s"}}' + }) + + def test_s3_app(self): + self.leader_get.return_value = '{"myapp": "a"}' + s3_info = utils.s3_app('myapp') + self.assertEqual(s3_info, 'a') + self.leader_get.assert_called_once_with('s3-apps') diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index e0fed1aa..bb399cb9 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import base64 import json from unittest.mock import ( patch, call, MagicMock, ANY @@ -34,6 +35,7 @@ 'apt_update', 'apt_install', 'apt_purge', + 'boto_client', 'config', 'cmp_pkgrevno', 'execd_preinstall', @@ -72,6 +74,7 @@ 'multisite_deployment', 'multisite', 'ready_for_service', + 'utils', ] @@ -600,6 +603,80 @@ def test_radosgw_user_changed(self, is_leader, canonical_url): expected, any_order=True) + @patch.object(ceph_hooks, 'canonical_url') + @patch.object(ceph_hooks, 'is_leader') + @patch.object(ceph_hooks, 's3_app') + @patch.object(ceph_hooks, 'set_s3_app') + def test_s3_relation_changed( + self, set_s3_app, s3_app, is_leader, canonical_url + ): + self.ready_for_service.return_value = True + self.remote_service_name.return_value = 'mys3app' + is_leader.return_value = True + canonical_url.return_value = 'http://radosgw' + self.multisite.create_user.return_value = ('access1', 'secret1') + s3_app.return_value = None + ceph_hooks.s3_relation_changed('mys3app:1') + + self.relation_set.assert_called_once_with( + app='mys3app', relation_settings=ANY + ) + self.boto_client.return_value.create_bucket.assert_called_once_with( + Bucket=ANY + ) + + def test_cert_rel_ca_app(self): + """Test getting back ca material from the certificates relation. + + This tests the case that the certificates relation has an app + databag with ca material. + """ + relation_data = { + 'certificates/0': { + 'ca': 'ca material', + 'chain': 'chain material', + } + } + self.relation_ids.return_value = relation_data.keys() + self.relation_get.side_effect = lambda rid, app: relation_data[rid] + ca_chain = ceph_hooks.cert_rel_ca() + self.assertEqual(len(ca_chain), 2) + self.assertEqual(base64.b64decode(ca_chain[0]), b'chain material') + self.assertEqual(base64.b64decode(ca_chain[1]), b'ca material') + + def test_cert_rel_ca_unit(self): + """Test getting back ca material from the certificates relation. + + This tests the case that the certificates relation has its ca + material in the unit relation, and only sets ca but not chain. + """ + relation_data = { + 'certificates/0': { + 'ca': 'ca material', + } + } + self.relation_ids.return_value = relation_data.keys() + self.related_units.return_value = ['certificates/0', 'certificates/1'] + self.relation_get.side_effect = [None, relation_data['certificates/0']] + ca_chain = ceph_hooks.cert_rel_ca() + self.assertEqual(len(ca_chain), 1) + self.assertEqual(base64.b64decode(ca_chain[0]), b'ca material') + + def test_update_s3_ca_info(self): + """Test updating the ca info for the s3 relation.""" + self.utils.all_s3_apps.return_value = { + 's3app': {'bucket': 'bucketname'}, + } + self.relation_ids.return_value = ['s3/0'] + ceph_hooks.update_s3_ca_info('foo_ca') + self.relation_set.assert_called_once_with( + rid='s3/0', app='s3app', + relation_settings={ + 'bucket': 'bucketname', + 'tls-ca-chain': 'foo_ca' + } + ) + class MiscMultisiteTests(CharmTestCase): From 28df66fc3e055a70d4392956df8348dc2176ba68 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Tue, 6 Feb 2024 18:19:30 +0100 Subject: [PATCH 2594/2699] Doc: describe the s3 interface Change-Id: I9379ca1bdc29eb2ef7cbd16255c457f37e30021f --- ceph-radosgw/README.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/ceph-radosgw/README.md b/ceph-radosgw/README.md index d5299108..335ae407 100644 --- a/ceph-radosgw/README.md +++ b/ceph-radosgw/README.md @@ -173,6 +173,28 @@ Corosync and Pacemaker backend HA functionality. See [OpenStack high availability][cdg-ha-apps] in the [OpenStack Charms Deployment Guide][cdg] for details. +## S3 Interface Support + +This charm provides [s3 charm interface support][s3spec]. This means +it can act as a provider for applications wishing to make use of S3 +object storage via this relation. An application that implements the +s3 requirer side of this relation will can be related to ceph-radosgw. +Using the mysql-operator charm as an example: + + juju add-relation ceph-radosgw:s3 mysql:s3-parameters + +Upon forming that relation, ceph-radosgw will create a bucket for use +by the requirer, and transmit access information back to the requirer. +The requirer then could use this to connect to the S3 endpoint to +store application data. + +Only a single bucket will be created per requirer application. If an +application relation is removed, the bucket *will* be preserved. If +subsequently the application reestablishes the relation, the bucket +will be reused. + + + ## Network spaces This charm supports the use of Juju [network spaces][juju-docs-spaces] (Juju @@ -249,3 +271,4 @@ Please report bugs on [Launchpad][lp-bugs-charm-ceph-radosgw]. [juju-docs-spaces]: https://jaas.ai/docs/spaces [cdg-ceph-radosgw-multisite]: https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-rgw-multisite.html [ceph-bluestore-compression]: https://docs.ceph.com/en/latest/rados/configuration/bluestore-config-ref/#inline-compression +[s3spec]: https://github.com/canonical/charm-relation-interfaces/tree/main/interfaces/s3/v0 From f1c195dffb030e474c9e34c2385791c03a9abe41 Mon Sep 17 00:00:00 2001 From: Billy Olsen Date: Wed, 7 Feb 2024 18:20:54 -0700 Subject: [PATCH 2595/2699] Update bundles to specify source config option The ceph-mon and ceph-osd charms changed the default source config option to be bobcat in the master branches. This breaks focal tests as the focal-bobcat cloud archive is not a valid source repository. Update the config options for the test bundles to be explicit in the source of package updates. Change-Id: Ia535b0500d39409f939f0aa58271866efbcd55fb --- ceph-iscsi/tests/bundles/focal-ec.yaml | 6 ++++++ ceph-iscsi/tests/bundles/focal.yaml | 6 ++++++ ceph-iscsi/tests/bundles/jammy-ec.yaml | 6 ++++++ ceph-iscsi/tests/bundles/jammy-reef-ec.yaml | 1 + ceph-iscsi/tests/bundles/jammy-reef.yaml | 1 + ceph-iscsi/tests/bundles/jammy.yaml | 6 ++++++ ceph-iscsi/tests/bundles/lunar-ec.yaml | 6 ++++++ ceph-iscsi/tests/bundles/lunar.yaml | 6 ++++++ 8 files changed, 38 insertions(+) diff --git a/ceph-iscsi/tests/bundles/focal-ec.yaml b/ceph-iscsi/tests/bundles/focal-ec.yaml index 8b017fb5..f27597f1 100644 --- a/ceph-iscsi/tests/bundles/focal-ec.yaml +++ b/ceph-iscsi/tests/bundles/focal-ec.yaml @@ -1,3 +1,6 @@ +variables: + source: &source cloud:focal-yoga + local_overlay_enabled: False series: focal machines: @@ -36,6 +39,7 @@ applications: pool-type: erasure-coded ec-profile-k: 4 ec-profile-m: 2 + source: *source to: - '0' - '1' @@ -46,6 +50,7 @@ applications: osd-devices: 'cinder,10G' options: osd-devices: '/dev/test-non-existent' + source: *source to: - '0' - '1' @@ -59,6 +64,7 @@ applications: num_units: 3 options: monitor-count: '3' + source: *source to: - '3' - '4' diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index 5293c2a3..951cfaa8 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -1,3 +1,6 @@ +variables: + source: &source cloud:focal-yoga + local_overlay_enabled: False series: focal machines: @@ -33,6 +36,7 @@ applications: num_units: 2 options: gateway-metadata-pool: iscsi-foo-metadata + source: *source to: - '0' - '1' @@ -43,6 +47,7 @@ applications: osd-devices: 'cinder,10G' options: osd-devices: '/dev/test-non-existent' + source: *source to: - '0' - '1' @@ -56,6 +61,7 @@ applications: num_units: 3 options: monitor-count: '3' + source: *source to: - '3' - '4' diff --git a/ceph-iscsi/tests/bundles/jammy-ec.yaml b/ceph-iscsi/tests/bundles/jammy-ec.yaml index 0bbabc23..9de7d4fe 100644 --- a/ceph-iscsi/tests/bundles/jammy-ec.yaml +++ b/ceph-iscsi/tests/bundles/jammy-ec.yaml @@ -1,3 +1,6 @@ +variables: + source: &source distro + local_overlay_enabled: False series: jammy machines: @@ -36,6 +39,7 @@ applications: pool-type: erasure-coded ec-profile-k: 4 ec-profile-m: 2 + source: *source to: - '0' - '1' @@ -46,6 +50,7 @@ applications: osd-devices: 'cinder,10G' options: osd-devices: '/dev/test-non-existent' + source: *source to: - '0' - '1' @@ -59,6 +64,7 @@ applications: num_units: 3 options: monitor-count: '3' + source: *source to: - '3' - '4' diff --git a/ceph-iscsi/tests/bundles/jammy-reef-ec.yaml b/ceph-iscsi/tests/bundles/jammy-reef-ec.yaml index 45b749c0..646f8504 100644 --- a/ceph-iscsi/tests/bundles/jammy-reef-ec.yaml +++ b/ceph-iscsi/tests/bundles/jammy-reef-ec.yaml @@ -39,6 +39,7 @@ applications: pool-type: erasure-coded ec-profile-k: 4 ec-profile-m: 2 + source: *source to: - '0' - '1' diff --git a/ceph-iscsi/tests/bundles/jammy-reef.yaml b/ceph-iscsi/tests/bundles/jammy-reef.yaml index 93fc7fe0..70d043da 100644 --- a/ceph-iscsi/tests/bundles/jammy-reef.yaml +++ b/ceph-iscsi/tests/bundles/jammy-reef.yaml @@ -38,6 +38,7 @@ applications: num_units: 4 options: gateway-metadata-pool: iscsi-foo-metadata + source: *source to: - '0' - '1' diff --git a/ceph-iscsi/tests/bundles/jammy.yaml b/ceph-iscsi/tests/bundles/jammy.yaml index 6a57b0f7..4c26c99a 100644 --- a/ceph-iscsi/tests/bundles/jammy.yaml +++ b/ceph-iscsi/tests/bundles/jammy.yaml @@ -1,3 +1,6 @@ +variables: + source: &source distro + local_overlay_enabled: False series: jammy machines: @@ -35,6 +38,7 @@ applications: num_units: 4 options: gateway-metadata-pool: iscsi-foo-metadata + source: *source to: - '0' - '1' @@ -47,6 +51,7 @@ applications: osd-devices: 'cinder,10G' options: osd-devices: '/dev/test-non-existent' + source: *source to: - '0' - '1' @@ -60,6 +65,7 @@ applications: num_units: 3 options: monitor-count: '3' + source: *source to: - '3' - '4' diff --git a/ceph-iscsi/tests/bundles/lunar-ec.yaml b/ceph-iscsi/tests/bundles/lunar-ec.yaml index b9c97710..d4519832 100644 --- a/ceph-iscsi/tests/bundles/lunar-ec.yaml +++ b/ceph-iscsi/tests/bundles/lunar-ec.yaml @@ -1,3 +1,6 @@ +variables: + source: &source distro + local_overlay_enabled: False series: lunar machines: @@ -36,6 +39,7 @@ applications: pool-type: erasure-coded ec-profile-k: 4 ec-profile-m: 2 + source: *source to: - '0' - '1' @@ -46,6 +50,7 @@ applications: osd-devices: 'cinder,10G' options: osd-devices: '/dev/test-non-existent' + source: *source to: - '0' - '1' @@ -59,6 +64,7 @@ applications: num_units: 3 options: monitor-count: '3' + source: *source to: - '3' - '4' diff --git a/ceph-iscsi/tests/bundles/lunar.yaml b/ceph-iscsi/tests/bundles/lunar.yaml index f9123b3c..041f75fe 100644 --- a/ceph-iscsi/tests/bundles/lunar.yaml +++ b/ceph-iscsi/tests/bundles/lunar.yaml @@ -1,3 +1,6 @@ +variables: + source: &source distro + local_overlay_enabled: False series: lunar machines: @@ -35,6 +38,7 @@ applications: num_units: 4 options: gateway-metadata-pool: iscsi-foo-metadata + source: *source to: - '0' - '1' @@ -47,6 +51,7 @@ applications: osd-devices: 'cinder,10G' options: osd-devices: '/dev/test-non-existent' + source: *source to: - '0' - '1' @@ -60,6 +65,7 @@ applications: num_units: 3 options: monitor-count: '3' + source: *source to: - '3' - '4' From cd0784a1114f2fa7e2c1f57fff06d50f88f31247 Mon Sep 17 00:00:00 2001 From: Trent Lloyd Date: Thu, 29 Feb 2024 15:53:08 +0800 Subject: [PATCH 2596/2699] Fix failing functional tests Update tox.ini to support the new TEST_CONSTRAINTS_FILE Additionally pin netaddr in test-requirements.txt. The latest netaddr 1.2.1 release throws an error when passed None, which it previously didn't. This causes MonContextTest.test_ctxt_missing_data to error out. This should probably be fixed in charmhelpers later. Change-Id: I5bf8900c426395421c73ec3d52ebd691cc5496f8 --- ceph-radosgw/build-requirements.txt | 7 ------- ceph-radosgw/test-requirements.txt | 3 +++ ceph-radosgw/tox.ini | 30 +++++++++++++++++++---------- 3 files changed, 23 insertions(+), 17 deletions(-) delete mode 100644 ceph-radosgw/build-requirements.txt diff --git a/ceph-radosgw/build-requirements.txt b/ceph-radosgw/build-requirements.txt deleted file mode 100644 index b6d2452f..00000000 --- a/ceph-radosgw/build-requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -# NOTES(lourot): -# * We don't install charmcraft via pip anymore because it anyway spins up a -# container and scp the system's charmcraft snap inside it. So the charmcraft -# snap is necessary on the system anyway. -# * `tox -e build` successfully validated with charmcraft 1.2.1 - -cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index e972406e..59f3cbdb 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -22,6 +22,9 @@ pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack +# netaddr is pinned in requirements.txt, but temptest below sometimes pulls in a newer version +netaddr>0.7.16,<0.8.0 + # Needed for charm-glance: git+https://opendev.org/openstack/tempest.git#egg=tempest diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 2cb6ca16..31118e12 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -32,11 +32,13 @@ passenv = CS_* OS_* TEST_* -deps = -r{toxinidir}/test-requirements.txt +deps = + -c {env:TEST_CONSTRAINTS_FILE:https://raw.githubusercontent.com/openstack-charmers/zaza-openstack-tests/master/constraints/constraints-2024.1.txt} + -r{toxinidir}/test-requirements.txt [testenv:build] basepython = python3 -deps = -r{toxinidir}/build-requirements.txt +deps = # charmcraft clean is done to ensure that # `tox -e build` always performs a clean, repeatable build. # For faster rebuilds during development, @@ -49,18 +51,24 @@ commands = [testenv:py310] basepython = python3.10 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +deps = + -c {env:TEST_CONSTRAINTS_FILE:https://raw.githubusercontent.com/openstack-charmers/zaza-openstack-tests/master/constraints/constraints-2024.1.txt} + -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt [testenv:py3] basepython = python3 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +deps = + -c {env:TEST_CONSTRAINTS_FILE:https://raw.githubusercontent.com/openstack-charmers/zaza-openstack-tests/master/constraints/constraints-2024.1.txt} + -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt [testenv:pep8] basepython = python3 -deps = flake8==3.9.2 - git+https://github.com/juju/charm-tools.git +deps = + -c {env:TEST_CONSTRAINTS_FILE:https://raw.githubusercontent.com/openstack-charmers/zaza-openstack-tests/master/constraints/constraints-2024.1.txt} + flake8==3.9.2 + git+https://github.com/juju/charm-tools.git commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof @@ -68,8 +76,10 @@ commands = flake8 {posargs} hooks unit_tests tests actions lib files # Technique based heavily upon # https://github.com/openstack/nova/blob/master/tox.ini basepython = python3 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +deps = + -c {env:TEST_CONSTRAINTS_FILE:https://raw.githubusercontent.com/openstack-charmers/zaza-openstack-tests/master/constraints/constraints-2024.1.txt} + -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt setenv = {[testenv]setenv} PYTHON=coverage run From 669c9056c3fe2538a6dac05e76206ec2c67fcb34 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 1 Mar 2024 09:57:09 +0100 Subject: [PATCH 2597/2699] Fix: defer cos-prometheus for bootstrap If a COS prometheus changed event is processed but bootstrap hasn't completed yet, we need to retry the event at a later time. Closes-bug: #2042891 Change-Id: I3d274c09522f9d7ef56bc66f68d8488150c125d8 --- ceph-mon/src/ceph_metrics.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/ceph-mon/src/ceph_metrics.py b/ceph-mon/src/ceph_metrics.py index 25077782..a1745b31 100644 --- a/ceph-mon/src/ceph_metrics.py +++ b/ceph-mon/src/ceph_metrics.py @@ -61,15 +61,22 @@ def __init__( def _on_relation_changed(self, event): """Enable prometheus on relation change""" - if self._charm.unit.is_leader() and ceph_utils.is_bootstrapped(): - logger.debug( - "is_leader and is_bootstrapped, running rel changed: %s", event - ) - mgr_config_set_rbd_stats_pools() - ceph_utils.mgr_enable_module("prometheus") - logger.debug("module_enabled") - self.update_alert_rules() - super()._on_relation_changed(event) + if not self._charm.unit.is_leader(): + return + + if not ceph_utils.is_bootstrapped(): + logger.debug("not bootstrapped, defer rel changed: %s", event) + event.defer() + return + + logger.debug( + "is_leader and is_bootstrapped, running rel changed: %s", event + ) + mgr_config_set_rbd_stats_pools() + ceph_utils.mgr_enable_module("prometheus") + logger.debug("module_enabled") + self.update_alert_rules() + super()._on_relation_changed(event) def _on_relation_departed(self, event): """Disable prometheus on depart of relation""" From 1ad57f6a3b167a0e2145accec7fa08c4ba2168cf Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Sun, 17 Mar 2024 22:55:39 +0900 Subject: [PATCH 2598/2699] Don't expect a static job name A job name passed via the prometheus_scrape library doesn't end up as a static job name in the prometheus configuration file in the COS world even though COS expects a fixed string. Practically we cannot have a static job name like job=ceph in any of the alert rules in COS since the charms will convert the string "ceph" into: > juju_MODELNAME_ID_APPNAME_prometheus_scrape_JOBNAME(ceph)-N Let's give up the possibility of the static job name and use "up{}" so it will be annotated with the model name/ID, etc. without any specific job related condition. It will break the alert rules when one unit have more than one scraping endpoint because there will be no way to distinguish multiple scraping jobs. Ceph MON only has one prometheus endpoint for the time being so this change shouldn't cause an immediate issue. Overall, it's not ideal but at least better than the current status, which is an alert error out of the box. The following alert rule: > up{} == 0 will be converted and annotated as: > up{juju_application="ceph-mon",juju_model="ceph",juju_model_uuid="UUID"} == 0 Closes-Bug: #2044062 Change-Id: I0df8bc0238349b5f03179dfb8f4da95da48140c7 --- .../prometheus_alert_rules/prometheus_alerts.yml.default | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default b/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default index df2fcf57..6e662928 100644 --- a/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default +++ b/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default @@ -343,7 +343,7 @@ groups: annotations: description: "The mgr/prometheus module at {{ $labels.instance }} is unreachable. This could mean that the module has been disabled or the mgr daemon itself is down. Without the mgr/prometheus module metrics and alerts will no longer function. Open a shell to an admin node or toolbox pod and use 'ceph -s' to to determine whether the mgr is active. If the mgr is not active, restart it, otherwise you can determine module status with 'ceph mgr module ls'. If it is not listed as enabled, enable it with 'ceph mgr module enable prometheus'." summary: "The mgr/prometheus module is not available" - expr: "up{job=\"ceph\"} == 0" + expr: "up{} == 0" for: "1m" labels: oid: "1.3.6.1.4.1.50495.1.2.1.6.2" @@ -601,7 +601,7 @@ groups: annotations: description: "The prometheus job that scrapes from Ceph is no longer defined, this will effectively mean you'll have no metrics or alerts for the cluster. Please review the job definitions in the prometheus.yml file of the prometheus instance." summary: "The scrape job for Ceph is missing from Prometheus" - expr: "absent(up{job=\"ceph\"})" + expr: "absent(up{})" for: "30s" labels: oid: "1.3.6.1.4.1.50495.1.2.1.12.1" From a66994b726239db023efaf81ed6b3a6b698b866c Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Tue, 26 Mar 2024 18:10:36 -0300 Subject: [PATCH 2599/2699] Implement the 'list-entities' action This action is the first step needed to implement key rotation in charmed Ceph. Change-Id: I59012621a0d9a2a1197fd7f8f0155cf85a37a056 --- ceph-mon/actions.yaml | 11 +++++ ceph-mon/src/charm.py | 2 + ceph-mon/src/ops_actions/__init__.py | 1 + ceph-mon/src/ops_actions/list_entities.py | 53 +++++++++++++++++++++++ ceph-mon/unit_tests/test_ceph_actions.py | 24 ++++++++++ 5 files changed, 91 insertions(+) create mode 100644 ceph-mon/src/ops_actions/list_entities.py diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 09056bb4..9d4bf08b 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -443,3 +443,14 @@ pg-repair: description: "Repair inconsistent placement groups, if safe to do so." reset-osd-count-report: description: "Update report of osds present in osd tree. Used for monitoring." +list-entities: + description: "Returns a list of entities recognized by the Ceph cluster." + params: + format: + type: string + enum: + - json + - yaml + - text + default: text + description: "The output format, either json, yaml or text (default)" diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index 940b8779..0c9e4e8d 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -228,6 +228,8 @@ def __init__(self, *args): ops_actions.get_health.get_health_action) self._observe_action(self.on.get_erasure_profile_action, ops_actions.get_erasure_profile.erasure_profile) + self._observe_action(self.on.list_entities_action, + ops_actions.list_entities.list_entities) fw.observe(self.on.install, self.on_install) fw.observe(self.on.config_changed, self.on_config) diff --git a/ceph-mon/src/ops_actions/__init__.py b/ceph-mon/src/ops_actions/__init__.py index 2513e9ee..1563577d 100644 --- a/ceph-mon/src/ops_actions/__init__.py +++ b/ceph-mon/src/ops_actions/__init__.py @@ -19,4 +19,5 @@ create_erasure_profile, get_health, get_erasure_profile, + list_entities, ) diff --git a/ceph-mon/src/ops_actions/list_entities.py b/ceph-mon/src/ops_actions/list_entities.py new file mode 100644 index 00000000..4b22b701 --- /dev/null +++ b/ceph-mon/src/ops_actions/list_entities.py @@ -0,0 +1,53 @@ +#! /usr/bin/env python3 +# +# Copyright 2024 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Retrieve a list of entities recognized by the Ceph cluster.""" + +import json +import logging +import subprocess +import yaml + + +logger = logging.getLogger(__name__) + + +def list_entities(event): + try: + # NOTE(lmlg): Don't bother passing --format=json or the likes, + # since it sometimes contain escaped strings that are incompatible + # with python's json module. This method of fetching entities is + # simple enough and portable across Ceph versions. + out = subprocess.check_call(['sudo', 'ceph', 'auth', 'ls']) + ret = [] + + for line in out.decode('utf-8').split('\n'): + if line and not (line.startswith(' ') or line.startswith('\t') or + line.startswith('\n')): + ret.append(line) + + fmt = event.params.get('format', 'text') + if fmt == 'json': + msg = json.dumps(str(ret)) + elif fmt == 'yaml': + msg = yaml.safe_dump(str(ret)) + else: + msg = '\n'.join(ret) + + event.set_results({'message': msg}) + except Exception as e: + logger.warning(e) + event.fail('failed to list entities: {}'.format(str(e))) diff --git a/ceph-mon/unit_tests/test_ceph_actions.py b/ceph-mon/unit_tests/test_ceph_actions.py index 86e34773..f5cf1fb8 100644 --- a/ceph-mon/unit_tests/test_ceph_actions.py +++ b/ceph-mon/unit_tests/test_ceph_actions.py @@ -17,6 +17,7 @@ import test_utils import ops_actions.copy_pool as copy_pool +import ops_actions.list_entities as list_entities with mock.patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: @@ -283,3 +284,26 @@ def test_get_erasure_profile_notfound(self, mock_ceph): event.set_results.assert_called_once_with(( {"message": None} )) + + +class ListEntities(test_utils.CharmTestCase): + """Run tests for action.""" + + def setUp(self): + self.harness = Harness(CephMonCharm) + self.harness.begin() + self.addCleanup(self.harness.cleanup) + + @mock.patch.object(list_entities.subprocess, 'check_call') + def test_list_entities(self, check_call): + check_call.return_value = b""" +client.admin + key: AQAOwwFmTR3TNxAAIsdYgastd0uKntPtEnoWug== +mgr.0 + key: AQAVwwFm/CmaJhAAdacns6DdFe4xZE1iwj8izg== +""" + event = test_utils.MockActionEvent({}) + self.harness.charm.on_list_entities_action(event) + event.set_results.assert_called_once_with( + {"message": "client.admin\nmgr.0"} + ) From aef9cd274e375542b8cf9f99321aa56dbc2530a9 Mon Sep 17 00:00:00 2001 From: Shunde Zhang Date: Wed, 3 Jan 2024 04:44:03 +0000 Subject: [PATCH 2600/2699] Add a config option for virtual hosted bucket func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1187 Closes-Bug: #1871745 Change-Id: I295baab496d1eb95daaa8073d4119d01b90d0b38 --- ceph-radosgw/config.yaml | 10 +++ ceph-radosgw/hooks/ceph_radosgw_context.py | 13 +++- ceph-radosgw/hooks/hooks.py | 14 ++++- ceph-radosgw/hooks/utils.py | 5 ++ ceph-radosgw/templates/ceph.conf | 4 ++ .../templates/openstack_https_frontend.conf | 3 + .../unit_tests/test_ceph_radosgw_context.py | 62 +++++++++++++++++++ 7 files changed, 109 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index 2b6f48af..a64a8f18 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -527,3 +527,13 @@ options: later this defaults to 'beast' and for older releases (and on architectures where beast is not supported) 'civetweb'. Civetweb support is removed at Ceph Quincy. + virtual-hosted-bucket-enabled: + type: boolean + default: false + description: | + If true, radosgw is configured to allow the use of virtual hosted bucket + name. This also requires the creation of a DNS CNAME to point all wildcard + subdomains (*.radosgw.domain) to the radosgw IP (or VIP). The host name part + (radosgw.domain) is taken from os-public-hostname so it must have a value too. + + https://docs.ceph.com/en/latest/radosgw/s3/commons/ diff --git a/ceph-radosgw/hooks/ceph_radosgw_context.py b/ceph-radosgw/hooks/ceph_radosgw_context.py index e57ceef2..babc0a74 100644 --- a/ceph-radosgw/hooks/ceph_radosgw_context.py +++ b/ceph-radosgw/hooks/ceph_radosgw_context.py @@ -62,7 +62,10 @@ class ApacheSSLContext(context.ApacheSSLContext): def __call__(self): self.external_ports = [utils.listen_port()] - return super(ApacheSSLContext, self).__call__() + ctx = super(ApacheSSLContext, self).__call__() + ctx['virtual_hosted_bucket_enabled'] = \ + config('virtual-hosted-bucket-enabled') + return ctx class HAProxyContext(context.HAProxyContext): @@ -285,6 +288,8 @@ def __call__(self): 'loglevel': config('loglevel'), 'port': port, 'ipv6': config('prefer-ipv6'), + 'virtual_hosted_bucket_enabled': + config('virtual-hosted-bucket-enabled'), # The public unit IP is only used in case the authentication is # *Not* keystone - in which case it is used to make sure the # storage endpoint returned by the built-in auth is the HAproxy @@ -297,6 +302,12 @@ def __call__(self): 'frontend': http_frontend, 'behind_https_proxy': https(), } + if config('virtual-hosted-bucket-enabled'): + if config('os-public-hostname'): + ctxt['public_hostname'] = config('os-public-hostname') + else: + log("When virtual_hosted_bucket_enabled is true, " + "os_public_hostname must have a value.", level=WARNING) # NOTE(dosaboy): these sections must correspond to what is supported in # the config template. diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index 6a7db9a2..ffecae3a 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -599,9 +599,21 @@ def post_series_upgrade(): @hooks.hook('certificates-relation-joined') def certs_joined(relation_id=None): + cert_req_obj = get_certificate_request() + if config('virtual-hosted-bucket-enabled'): + import json + cert_req = json.loads(cert_req_obj["cert_requests"]) + for cn in cert_req.keys(): + if cn == config('os-public-hostname'): + log("Adding wildcard hostname for virtual hosted buckets", + "INFO") + cert_req[cn]["sans"].append("*."+config('os-public-hostname')) + cert_req_obj['cert_requests'] = json.dumps(cert_req, + sort_keys=True) + log("Cert request: {}".format(cert_req_obj), "INFO") relation_set( relation_id=relation_id, - relation_settings=get_certificate_request()) + relation_settings=cert_req_obj) @hooks.hook('certificates-relation-changed') diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 4c7a21a1..3a7a6d97 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -294,6 +294,11 @@ def check_optional_config_and_relations(configs): except ValueError as e: return ('blocked', 'Invalid configuration: {}'.format(str(e))) + if (config('virtual-hosted-bucket-enabled') and + not config('os-public-hostname')): + return ('blocked', "os-public-hostname must have a value " + "when virtual hosted bucket is enabled") + # return 'unknown' as the lowest priority to not clobber an existing # status. return 'unknown', '' diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index d126b20b..03fa83b2 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -38,6 +38,10 @@ rgw socket path = /tmp/radosgw.sock log file = /var/log/ceph/radosgw.log {% endif %} +{% if virtual_hosted_bucket_enabled -%} +rgw_dns_name = {{ public_hostname }} +{% endif %} + {% if rgw_zone -%} rgw_zone = {{ rgw_zone }} {% endif %} diff --git a/ceph-radosgw/templates/openstack_https_frontend.conf b/ceph-radosgw/templates/openstack_https_frontend.conf index 5df76315..2f061654 100644 --- a/ceph-radosgw/templates/openstack_https_frontend.conf +++ b/ceph-radosgw/templates/openstack_https_frontend.conf @@ -5,6 +5,9 @@ Listen {{ ext_port }} {% for address, endpoint, ext, int in endpoints -%} ServerName {{ endpoint }} +{% if virtual_hosted_bucket_enabled and address != endpoint %} + ServerAlias *.{{ endpoint }} +{% endif %} SSLEngine on # This section is based on Mozilla's recommendation diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index b426c2ae..f3b3289a 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -153,6 +153,7 @@ def _relation_get(attr, unit, rid): 'rgw_zonegroup': 'zonegroup1', 'rgw_realm': 'realmX', 'behind_https_proxy': False, + 'virtual_hosted_bucket_enabled': False, } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -209,6 +210,7 @@ def _relation_get(attr, unit, rid): 'rgw_zonegroup': 'zonegroup1', 'rgw_realm': 'realmX', 'behind_https_proxy': True, + 'virtual_hosted_bucket_enabled': False, } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -273,6 +275,7 @@ def _relation_get(attr, unit, rid): 'rgw_zonegroup': 'zonegroup1', 'rgw_realm': 'realmX', 'behind_https_proxy': False, + 'virtual_hosted_bucket_enabled': False, } self.assertEqual(expect, mon_ctxt()) self.assertFalse(mock_ensure_rsv_v6.called) @@ -351,6 +354,7 @@ def _relation_get(attr, unit, rid): 'rgw_zonegroup': 'zonegroup1', 'rgw_realm': 'realmX', 'behind_https_proxy': False, + 'virtual_hosted_bucket_enabled': False, } self.assertEqual(expect, mon_ctxt()) @@ -404,6 +408,7 @@ def _relation_get(attr, unit, rid): 'rgw_zonegroup': 'zonegroup1', 'rgw_realm': 'realmX', 'behind_https_proxy': False, + 'virtual_hosted_bucket_enabled': False, } self.assertEqual(expect, mon_ctxt()) @@ -515,6 +520,63 @@ def _relation_get(attr, unit, rid): 'rgw_zonegroup': 'zonegroup1', 'rgw_realm': 'realmX', 'behind_https_proxy': False, + 'virtual_hosted_bucket_enabled': False, + } + self.assertEqual(expect, mon_ctxt()) + + @patch('ceph_radosgw_context.https') + @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') + @patch('charmhelpers.contrib.hahelpers.cluster.config_get') + @patch.object(ceph, 'config', lambda *args: + '{"client.radosgw.gateway": {"rgw init timeout": 60}}') + def test_ctxt_virtual_hosted_bucket(self, mock_config_get, + mock_relation_ids, mock_https): + mock_https.return_value = False + mock_relation_ids.return_value = [] + self.test_config.set('virtual-hosted-bucket-enabled', True) + self.test_config.set('os-public-hostname', 'rgw.example.com') + mock_config_get.side_effect = self.test_config.get + self.socket.gethostname.return_value = 'testhost' + mon_ctxt = context.MonContext() + addresses = ['10.5.4.1', '10.5.4.2', '10.5.4.3'] + + def _relation_get(attr, unit, rid): + if attr == 'ceph-public-address': + return addresses.pop() + elif attr == 'auth': + return 'cephx' + elif attr == 'rgw.testhost_key': + return 'testkey' + elif attr == 'fsid': + return 'testfsid' + + self.relation_get.side_effect = _relation_get + self.relation_ids.return_value = ['mon:6'] + self.related_units.return_value = ['ceph/0', 'ceph/1', 'ceph/2'] + self.multisite.plain_list = self.plain_list_stub + self.determine_api_port.return_value = 70 + expect = { + 'auth_supported': 'cephx', + 'hostname': 'testhost', + 'mon_hosts': '10.5.4.1 10.5.4.2 10.5.4.3', + 'old_auth': False, + 'systemd_rgw': True, + 'unit_public_ip': '10.255.255.255', + 'use_syslog': 'false', + 'loglevel': 1, + 'port': 70, + 'client_radosgw_gateway': {'rgw init timeout': 60}, + 'ipv6': False, + 'rgw_zone': 'default', + 'fsid': 'testfsid', + 'rgw_swift_versioning': False, + 'frontend': 'beast', + 'relaxed_s3_bucket_names': False, + 'rgw_zonegroup': 'zonegroup1', + 'rgw_realm': 'realmX', + 'behind_https_proxy': False, + 'virtual_hosted_bucket_enabled': True, + 'public_hostname': 'rgw.example.com', } self.assertEqual(expect, mon_ctxt()) From 86a9211e58dd9bdf23917e22ca07d847876d5fcc Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Thu, 4 Apr 2024 15:50:59 -0300 Subject: [PATCH 2601/2699] Implement the 'rotate-key' action for managers This patchset implements key rotation for managers only. The user can specified either the full entity name (i.e: 'mgr.XXXX') or simply 'mgr', which stands for the local manager. After the entity's directory is located, a new pending key is generated, the keyring file is mutated to include the new key and then replaced in situ. Lastly, the manager service is restarted. Note that Ceph only has one active manager at a certain point, so it only makes sense to call this action on _every_ mon unit. Change-Id: Ie24b3f30922fa5be6641e37635440891614539d5 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1195 --- ceph-mon/actions.yaml | 7 ++ ceph-mon/src/charm.py | 2 + ceph-mon/src/ops_actions/__init__.py | 1 + ceph-mon/src/ops_actions/list_entities.py | 2 +- ceph-mon/src/ops_actions/rotate_key.py | 103 ++++++++++++++++++++++ ceph-mon/tests/tests.yaml | 1 + ceph-mon/unit_tests/test_ceph_actions.py | 55 +++++++++++- 7 files changed, 167 insertions(+), 4 deletions(-) create mode 100644 ceph-mon/src/ops_actions/rotate_key.py diff --git a/ceph-mon/actions.yaml b/ceph-mon/actions.yaml index 9d4bf08b..215c7051 100644 --- a/ceph-mon/actions.yaml +++ b/ceph-mon/actions.yaml @@ -454,3 +454,10 @@ list-entities: - text default: text description: "The output format, either json, yaml or text (default)" +rotate-key: + description: "Rotate the key of an entity in the Ceph cluster" + params: + entity: + type: string + description: The entity for which to rotate the key + required: [entity] diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index 0c9e4e8d..89b6df11 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -230,6 +230,8 @@ def __init__(self, *args): ops_actions.get_erasure_profile.erasure_profile) self._observe_action(self.on.list_entities_action, ops_actions.list_entities.list_entities) + self._observe_action(self.on.rotate_key_action, + ops_actions.rotate_key.rotate_key) fw.observe(self.on.install, self.on_install) fw.observe(self.on.config_changed, self.on_config) diff --git a/ceph-mon/src/ops_actions/__init__.py b/ceph-mon/src/ops_actions/__init__.py index 1563577d..3a2c227a 100644 --- a/ceph-mon/src/ops_actions/__init__.py +++ b/ceph-mon/src/ops_actions/__init__.py @@ -20,4 +20,5 @@ get_health, get_erasure_profile, list_entities, + rotate_key, ) diff --git a/ceph-mon/src/ops_actions/list_entities.py b/ceph-mon/src/ops_actions/list_entities.py index 4b22b701..8726a9c2 100644 --- a/ceph-mon/src/ops_actions/list_entities.py +++ b/ceph-mon/src/ops_actions/list_entities.py @@ -31,7 +31,7 @@ def list_entities(event): # since it sometimes contain escaped strings that are incompatible # with python's json module. This method of fetching entities is # simple enough and portable across Ceph versions. - out = subprocess.check_call(['sudo', 'ceph', 'auth', 'ls']) + out = subprocess.check_output(['sudo', 'ceph', 'auth', 'ls']) ret = [] for line in out.decode('utf-8').split('\n'): diff --git a/ceph-mon/src/ops_actions/rotate_key.py b/ceph-mon/src/ops_actions/rotate_key.py new file mode 100644 index 00000000..68b14277 --- /dev/null +++ b/ceph-mon/src/ops_actions/rotate_key.py @@ -0,0 +1,103 @@ +#! /usr/bin/env python3 +# +# Copyright 2024 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Rotate the key of one or more entities.""" + +import configparser +import json +import logging +import os +import subprocess + +import charms.operator_libs_linux.v1.systemd as systemd + + +logger = logging.getLogger(__name__) +MGR_DIR = "/var/lib/ceph/mgr" + + +def _find_mgr_path(base): + name = "ceph-" + base + try: + if name in os.listdir(MGR_DIR): + return MGR_DIR + "/" + name + except FileNotFoundError as exc: + logger.exception(exc) + return None + + +def _create_key(entity, event): + try: + cmd = ["sudo", "ceph", "auth", "get-or-create-pending", + entity, "--format=json"] + out = subprocess.check_output(cmd).decode("utf-8") + return json.loads(out)[0]["pending_key"] + except (subprocess.SubprocessError, json.decoder.JSONDecodeError) as exc: + logger.exception(exc) + event.fail("Failed to create key: %s" % str(exc)) + raise + + +def _replace_keyring_file(path, entity, key, event): + path += "/keyring" + try: + c = configparser.ConfigParser(default_section=None) + c.read(path) + c[entity]["key"] = key + + with open(path, "w") as file: + c.write(file) + except (KeyError, IOError) as exc: + logger.exception(exc) + event.fail("Failed to replace keyring file: %s" % str(exc)) + raise + + +def _restart_daemon(entity, event): + try: + systemd.service_restart(entity) + except systemd.SystemdError as exc: + logger.exception(exc) + event.fail("Failed to reload daemon: %s" % str(exc)) + raise + + +def rotate_key(event) -> None: + """Rotate the key of the specified entity.""" + entity = event.params.get("entity") + if entity.startswith("mgr"): + if len(entity) > 3: + if entity[3] != '.': + event.fail("Invalid entity name: %s" % entity) + return + path = _find_mgr_path(entity[4:]) + if path is None: + event.fail("Entity %s not found" % entity) + return + else: # just 'mgr' + try: + path = MGR_DIR + "/" + os.listdir(MGR_DIR)[0] + entity = "mgr." + os.path.basename(path)[5:] # skip 'ceph-' + except Exception: + event.fail("No managers found") + return + + key = _create_key(entity, event) + _replace_keyring_file(path, entity, key, event) + _restart_daemon("ceph-mgr@%s.service" % entity[4:], event) + event.set_results({"message": "success"}) + else: + event.fail("Unknown entity: %s" % entity) diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 18134ca0..e2aef903 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -39,3 +39,4 @@ tests: - zaza.openstack.charm_tests.ceph.tests.CephAuthTest - zaza.openstack.charm_tests.ceph.tests.CephMonActionsTest - zaza.openstack.charm_tests.ceph.mon.tests.CephPermissionUpgradeTest + - zaza.openstack.charm_tests.ceph.tests.CephMonKeyRotationTests diff --git a/ceph-mon/unit_tests/test_ceph_actions.py b/ceph-mon/unit_tests/test_ceph_actions.py index f5cf1fb8..b3be8164 100644 --- a/ceph-mon/unit_tests/test_ceph_actions.py +++ b/ceph-mon/unit_tests/test_ceph_actions.py @@ -18,6 +18,7 @@ import test_utils import ops_actions.copy_pool as copy_pool import ops_actions.list_entities as list_entities +import ops_actions.rotate_key as rotate_key with mock.patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec: mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: @@ -294,9 +295,9 @@ def setUp(self): self.harness.begin() self.addCleanup(self.harness.cleanup) - @mock.patch.object(list_entities.subprocess, 'check_call') - def test_list_entities(self, check_call): - check_call.return_value = b""" + @mock.patch.object(list_entities.subprocess, 'check_output') + def test_list_entities(self, check_output): + check_output.return_value = b""" client.admin key: AQAOwwFmTR3TNxAAIsdYgastd0uKntPtEnoWug== mgr.0 @@ -307,3 +308,51 @@ def test_list_entities(self, check_call): event.set_results.assert_called_once_with( {"message": "client.admin\nmgr.0"} ) + + +# Needs to be outside as the decorator wouldn't find it otherwise. +MGR_KEYRING_FILE = """ +[mgr.host-1] + key = old-key +""" + + +class RotateKey(test_utils.CharmTestCase): + """Run tests for action.""" + + def setUp(self): + self.harness = Harness(CephMonCharm) + self.harness.begin() + self.addCleanup(self.harness.cleanup) + + def test_invalid_entity(self): + event = test_utils.MockActionEvent({'entity': '???'}) + self.harness.charm.on_rotate_key_action(event) + event.fail.assert_called_once() + + def test_invalid_mgr(self): + event = test_utils.MockActionEvent({'entity': 'mgr-123'}) + self.harness.charm.on_rotate_key_action(event) + event.fail.assert_called_once() + + @mock.patch('builtins.open', new_callable=mock.mock_open, + read_data=MGR_KEYRING_FILE) + @mock.patch.object(rotate_key.systemd, 'service_restart') + @mock.patch.object(rotate_key.subprocess, 'check_output') + @mock.patch.object(rotate_key.os, 'listdir') + def test_rotate_mgr_key(self, listdir, check_output, service_restart, + _open): + listdir.return_value = ['ceph-host-1'] + check_output.return_value = b'[{"pending_key": "new-key"}]' + + event = test_utils.MockActionEvent({'entity': 'mgr.host-1'}) + self.harness.charm.on_rotate_key_action(event) + + event.set_results.assert_called_with({'message': 'success'}) + listdir.assert_called_once_with('/var/lib/ceph/mgr') + check_output.assert_called_once() + service_restart.assert_called_once_with('ceph-mgr@host-1.service') + + calls = any(x for x in _open.mock_calls + if any(p is not None and 'new-key' in p for p in x.args)) + self.assertTrue(calls) From f7674c9a78f2a97e33337961add338a7565c7d7b Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Tue, 9 Apr 2024 09:51:10 +0200 Subject: [PATCH 2602/2699] Sync charmhelpers Prep for noble/caracal, unit test fix Change-Id: I5ad6b618bb3a5660a61ac3c536430c8e380e655c --- .../charmhelpers/contrib/openstack/context.py | 3 +- .../charmhelpers/contrib/openstack/utils.py | 1 + .../charmhelpers/contrib/storage/linux/lvm.py | 6 +--- ceph-osd/hooks/charmhelpers/core/host.py | 7 +++-- ceph-osd/hooks/charmhelpers/fetch/ubuntu.py | 10 +++++++ ceph-osd/hooks/charmhelpers/osplatform.py | 28 +++++++++++++------ ceph-osd/unit_tests/test_status.py | 1 + 7 files changed, 40 insertions(+), 16 deletions(-) diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 42f15032..1e667fb0 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -545,7 +545,7 @@ def _resolve(key): 'internal_auth_url': internal_auth_url, }) - # we keep all veriables in ctxt for compatibility and + # we keep all variables in ctxt for compatibility and # add nested dictionary for keystone_authtoken generic # templating if keystonemiddleware_os_release: @@ -557,6 +557,7 @@ def _resolve(key): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading + ctxt['admin_user_id'] = _resolve('service_user_id') ctxt['admin_tenant_id'] = _resolve('service_tenant_id') ctxt['admin_domain_id'] = _resolve('service_domain_id') return ctxt diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index 429b09e5..da711c65 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -161,6 +161,7 @@ ('2022.2', 'zed'), ('2023.1', 'antelope'), ('2023.2', 'bobcat'), + ('2024.1', 'caracal'), ]) # The ugly duckling - must list releases oldest to newest diff --git a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py index d0a57211..0d294c79 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-osd/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -17,8 +17,6 @@ CalledProcessError, check_call, check_output, - Popen, - PIPE, ) @@ -58,9 +56,7 @@ def remove_lvm_physical_volume(block_device): :param block_device: str: Full path of block device to scrub. ''' - p = Popen(['pvremove', '-ff', block_device], - stdin=PIPE) - p.communicate(input='y\n') + check_call(['pvremove', '-ff', '--yes', block_device]) def list_lvm_volume_group(block_device): diff --git a/ceph-osd/hooks/charmhelpers/core/host.py b/ceph-osd/hooks/charmhelpers/core/host.py index 70dde6a5..def403c5 100644 --- a/ceph-osd/hooks/charmhelpers/core/host.py +++ b/ceph-osd/hooks/charmhelpers/core/host.py @@ -256,8 +256,11 @@ def service_resume(service_name, init_dir="/etc/init", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(service_name=service_name): - service('unmask', service_name) - service('enable', service_name) + if service('is-enabled', service_name): + log('service {} already enabled'.format(service_name), level=DEBUG) + else: + service('unmask', service_name) + service('enable', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) diff --git a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py index 1be992c4..d0089eb7 100644 --- a/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-osd/hooks/charmhelpers/fetch/ubuntu.py @@ -246,6 +246,14 @@ 'bobcat/proposed': 'jammy-proposed/bobcat', 'jammy-bobcat/proposed': 'jammy-proposed/bobcat', 'jammy-proposed/bobcat': 'jammy-proposed/bobcat', + # caracal + 'caracal': 'jammy-updates/caracal', + 'jammy-caracal': 'jammy-updates/caracal', + 'jammy-caracal/updates': 'jammy-updates/caracal', + 'jammy-updates/caracal': 'jammy-updates/caracal', + 'caracal/proposed': 'jammy-proposed/caracal', + 'jammy-caracal/proposed': 'jammy-proposed/caracal', + 'jammy-proposed/caracal': 'jammy-proposed/caracal', # OVN 'focal-ovn-22.03': 'focal-updates/ovn-22.03', @@ -279,6 +287,7 @@ 'zed', 'antelope', 'bobcat', + 'caracal', ) @@ -308,6 +317,7 @@ ('kinetic', 'zed'), ('lunar', 'antelope'), ('mantic', 'bobcat'), + ('noble', 'caracal'), ]) diff --git a/ceph-osd/hooks/charmhelpers/osplatform.py b/ceph-osd/hooks/charmhelpers/osplatform.py index 1ace468f..5d121866 100644 --- a/ceph-osd/hooks/charmhelpers/osplatform.py +++ b/ceph-osd/hooks/charmhelpers/osplatform.py @@ -9,19 +9,13 @@ def get_platform(): will be returned (which is the name of the module). This string is used to decide which platform module should be imported. """ - # linux_distribution is deprecated and will be removed in Python 3.7 - # Warnings *not* disabled, as we certainly need to fix this. - if hasattr(platform, 'linux_distribution'): - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] - else: - current_platform = _get_platform_from_fs() + current_platform = _get_current_platform() if "Ubuntu" in current_platform: return "ubuntu" elif "CentOS" in current_platform: return "centos" - elif "debian" in current_platform: + elif "debian" in current_platform or "Debian" in current_platform: # Stock Python does not detect Ubuntu and instead returns debian. # Or at least it does in some build environments like Travis CI return "ubuntu" @@ -36,6 +30,24 @@ def get_platform(): .format(current_platform)) +def _get_current_platform(): + """Return the current platform information for the OS. + + Attempts to lookup linux distribution information from the platform + module for releases of python < 3.7. For newer versions of python, + the platform is determined from the /etc/os-release file. + """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warnings *not* disabled, as we certainly need to fix this. + if hasattr(platform, 'linux_distribution'): + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + else: + current_platform = _get_platform_from_fs() + + return current_platform + + def _get_platform_from_fs(): """Get Platform from /etc/os-release.""" with open(os.path.join(os.sep, 'etc', 'os-release')) as fin: diff --git a/ceph-osd/unit_tests/test_status.py b/ceph-osd/unit_tests/test_status.py index 433f92e6..98a87a69 100644 --- a/ceph-osd/unit_tests/test_status.py +++ b/ceph-osd/unit_tests/test_status.py @@ -43,6 +43,7 @@ ] +@patch.object(hooks, 'get_mon_hosts', new=MagicMock(return_value=['1.1.1.1'])) class ServiceStatusTestCase(test_utils.CharmTestCase): def setUp(self): From 23af03a31a281e04134da63965312e0b9bb27c41 Mon Sep 17 00:00:00 2001 From: Federico Bosi Date: Wed, 6 Mar 2024 16:34:29 +0100 Subject: [PATCH 2603/2699] Don't bind to ipv4 if prefer-ipv6 is true Following https://tracker.ceph.com/issues/52867 we need to tell the OSD which address family to use via the ms_bind_ipv4/6 config flags. I added them to the ceph.conf template and updated the config hook. Launchpad: https://bugs.launchpad.net/charm-ceph-osd/+bug/2056337 Change-Id: Ifbd59c204a82109e2b71991078f59537f6db42d3 --- ceph-osd/hooks/ceph_hooks.py | 2 ++ ceph-osd/templates/ceph.conf | 6 ++++ ceph-osd/unit_tests/test_ceph_hooks.py | 50 ++++++++++++++++++++++++++ 3 files changed, 58 insertions(+) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 993166a7..ae5815a5 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -488,6 +488,8 @@ def get_ceph_context(upgrading=False): cephcontext['bdev_discard'] = False if config('prefer-ipv6'): + cephcontext['ms_bind_ipv4'] = False + cephcontext['ms_bind_ipv6'] = True dynamic_ipv6_address = get_ipv6_addr()[0] if not public_network: cephcontext['public_addr'] = dynamic_ipv6_address diff --git a/ceph-osd/templates/ceph.conf b/ceph-osd/templates/ceph.conf index 931ff8c2..81b9ea1a 100644 --- a/ceph-osd/templates/ceph.conf +++ b/ceph-osd/templates/ceph.conf @@ -15,6 +15,12 @@ err to syslog = {{ use_syslog }} clog to syslog = {{ use_syslog }} debug osd = {{ loglevel }}/5 +{% if ms_bind_ipv6 %} +ms_bind_ipv6 = true +{%- endif %} +{%- if ms_bind_ipv4 == false %} +ms_bind_ipv4 = false +{% endif %} {% if ceph_public_network is string %} public network = {{ ceph_public_network }} {%- endif %} diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index b4cceea0..34b0bbeb 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -431,6 +431,56 @@ def test_get_ceph_context_bluestore_compression( 'fake-bluestore-compression-key': 'fake-value'} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks.ch_context, 'CephBlueStoreCompressionContext', + lambda: lambda: {}) + @patch.object(ceph_hooks, 'get_mon_hosts', + lambda *args: ['2a01:348:2f4:0:685e:5748:ae62:209f', + '2a01:348:2f4:0:685e:5748:ae62:20a0']) + @patch.object(ceph_hooks, 'get_ipv6_addr', + lambda *args: ['2a01:348:2f4:0:685e:5748:ae62:209f']) + @patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {}) + @patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234') + @patch.object(ceph_hooks, 'get_auth', lambda *args: False) + @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) + @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph, 'config') + @patch.object(ceph_hooks, 'config') + def test_ipv6_only_env_bindings(self, mock_config, mock_config2): + config = copy.deepcopy(CHARM_CONFIG) + config['prefer-ipv6'] = True + mock_config.side_effect = lambda key: config[key] + mock_config2.side_effect = lambda key: config[key] + ctxt = ceph_hooks.get_ceph_context() + expected = { + 'auth_supported': False, + 'ceph_cluster_network': '', + 'ceph_public_network': '', + 'dio': 'true', + 'fsid': '1234', + 'loglevel': 1, + 'old_auth': False, + 'crush_initial_weight': '0', + 'osd_journal_size': 1024, + 'osd_max_backfills': 1, + 'osd_recovery_max_active': 2, + 'osd_from_client': OrderedDict(), + 'osd_from_client_conflict': OrderedDict(), + 'short_object_len': True, + 'upgrade_in_progress': False, + 'use_syslog': 'true', + 'bdev_discard': True, + 'bluestore_experimental': False, + 'bluestore_block_wal_size': 0, + 'bluestore_block_db_size': 0, + 'cluster_addr': '2a01:348:2f4:0:685e:5748:ae62:209f', + 'public_addr': '2a01:348:2f4:0:685e:5748:ae62:209f', + 'mon_hosts': '2a01:348:2f4:0:685e:5748:ae62:209f ' + '2a01:348:2f4:0:685e:5748:ae62:20a0', + 'ms_bind_ipv4': False, + 'ms_bind_ipv6': True, + } + self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks, 'ceph') @patch.object(ceph_hooks, 'service_restart') @patch.object(ceph_hooks, 'service_reload') From fd07bb35bd494d2042b22bc4496a741ed67fdb50 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Tue, 9 Apr 2024 12:00:25 -0300 Subject: [PATCH 2604/2699] Implement key rotation for RadosGW daemons This patchset implements the needed functionality on the ceph-mon charm to rotate the key of a specified RadosGW daemon. func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1195 Change-Id: I6dbbf6ca1292a34f5d3b4ff8f2966c8b77f53f48 --- ceph-mon/src/charm.py | 7 ++++++- ceph-mon/src/ops_actions/rotate_key.py | 28 +++++++++++++++++++++++++- 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index 89b6df11..1458e3c3 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -194,6 +194,11 @@ def notify_clients(self, _event): for relation in self.model.relations['admin']: hooks.admin_relation_joined(str(relation.id)) + def on_rotate_key_action(self, event): + ops_actions.rotate_key.rotate_key( + event, self.framework.model + ) + def __init__(self, *args): super().__init__(*args) self._stored.is_started = True @@ -231,7 +236,7 @@ def __init__(self, *args): self._observe_action(self.on.list_entities_action, ops_actions.list_entities.list_entities) self._observe_action(self.on.rotate_key_action, - ops_actions.rotate_key.rotate_key) + self.on_rotate_key_action) fw.observe(self.on.install, self.on_install) fw.observe(self.on.config_changed, self.on_config) diff --git a/ceph-mon/src/ops_actions/rotate_key.py b/ceph-mon/src/ops_actions/rotate_key.py index 68b14277..28592af9 100644 --- a/ceph-mon/src/ops_actions/rotate_key.py +++ b/ceph-mon/src/ops_actions/rotate_key.py @@ -75,7 +75,31 @@ def _restart_daemon(entity, event): raise -def rotate_key(event) -> None: +def _handle_rgw_key_rotation(entity, event, model): + rgw_name = entity[7:] # Skip 'client.' + relations = model.relations.get('radosgw') + if not relations: + event.fail('No RadosGW relations found') + return + + for relation in relations: + for unit in relation.units: + try: + data = relation.data + if data[unit]["key_name"] != rgw_name: + continue + except KeyError: + logger.exception('key name not found in relation data bag') + continue + + data[model.unit][rgw_name + "_key"] = _create_key(entity, event) + event.set_results({"message": "success"}) + return + + event.fail("Entity %s not found" % entity) + + +def rotate_key(event, model=None) -> None: """Rotate the key of the specified entity.""" entity = event.params.get("entity") if entity.startswith("mgr"): @@ -99,5 +123,7 @@ def rotate_key(event) -> None: _replace_keyring_file(path, entity, key, event) _restart_daemon("ceph-mgr@%s.service" % entity[4:], event) event.set_results({"message": "success"}) + elif entity.startswith('client.rgw.'): + _handle_rgw_key_rotation(entity, event, model) else: event.fail("Unknown entity: %s" % entity) From a4a84b7586e3afe6d444ace94b437bafc629f54b Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Tue, 9 Apr 2024 12:02:05 -0300 Subject: [PATCH 2605/2699] Implement key rotation on the ceph-radosgw charm This patchset implements key rotation in the ceph-radosgw charm, by replacing the keyring file if it exists and the ceph-mon relation reports a new key. Change-Id: I447b5f827e39118e7dbd430b1c63b3ec4ea3e176 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1195 --- ceph-radosgw/hooks/ceph_rgw.py | 26 +++++++++++++------------- ceph-radosgw/hooks/hooks.py | 4 +--- ceph-radosgw/tests/tests.yaml | 1 + 3 files changed, 15 insertions(+), 16 deletions(-) diff --git a/ceph-radosgw/hooks/ceph_rgw.py b/ceph-radosgw/hooks/ceph_rgw.py index d3a98604..85067dd6 100644 --- a/ceph-radosgw/hooks/ceph_rgw.py +++ b/ceph-radosgw/hooks/ceph_rgw.py @@ -50,19 +50,20 @@ def import_radosgw_key(key, name=None): link_path = None owner = group = 'root' - if not os.path.exists(keyring_path): + exists = os.path.exists(keyring_path) + if not exists: mkdir(path=os.path.dirname(keyring_path), owner=owner, group=group, perms=0o750) - cmd = [ - 'ceph-authtool', - keyring_path, - '--create-keyring', - '--name=client.{}'.format( - name or 'radosgw.gateway' - ), - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) + + cmd = ['ceph-authtool', keyring_path] + if not exists: + cmd.append('--create-keyring') + cmd.extend([ + '--name=client.{}'.format(name or 'radosgw.gateway'), + '--add-key={}'.format(key) + ]) + subprocess.check_call(cmd) + if not exists: cmd = [ 'chown', '{}:{}'.format(owner, group), @@ -74,9 +75,8 @@ def import_radosgw_key(key, name=None): # operations for multi-site configuration if link_path: symlink(keyring_path, link_path) - return True - return False + return not exists def normalize_pool_name(pool): diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index ffecae3a..d362a9bb 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -335,8 +335,7 @@ def _mon_relation(): key_name = None if key: - new_keyring = ceph.import_radosgw_key(key, - name=key_name) + ceph.import_radosgw_key(key, name=key_name) # NOTE(jamespage): # Deal with switch from radosgw init script to # systemd named units for radosgw instances by @@ -358,7 +357,6 @@ def _mon_relation(): # in systemd and stop the process restarting once # zone configuration is complete. if (not is_unit_paused_set() and - new_keyring and not multisite_deployment()): log('Resume service "{}" as we now have keys for it.' .format(service_name()), level=DEBUG) diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 1fe9b0fc..6b2be35c 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -45,6 +45,7 @@ tests: - zaza.openstack.charm_tests.swift.tests.S3APITest - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation + - zaza.openstack.charm_tests.ceph.tests.CephMonKeyRotationTests tests_options: force_deploy: From 3510058b19338ef30e9f70d7d96d2490165677cb Mon Sep 17 00:00:00 2001 From: Federico Bosi Date: Wed, 17 Apr 2024 09:39:32 +0200 Subject: [PATCH 2606/2699] Add ipv6 bind flags Following https://tracker.ceph.com/issues/52867 we need to tell ceph which address family to use via the ms_bind_ipv4/6 config flags. I added them to the ceph.conf template and updated the config hook. Closes-Bug: #2056337 Change-Id: Ib735bd4876b6909762288b97857bccaa597c2b80 --- ceph-mon/src/ceph_hooks.py | 2 ++ ceph-mon/templates/ceph.conf | 6 +++++ ceph-mon/unit_tests/test_ceph_hooks.py | 36 ++++++++++++++++++++++++++ 3 files changed, 44 insertions(+) diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 6c0bf919..101b1066 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -206,6 +206,8 @@ def get_ceph_context(): if config('prefer-ipv6'): dynamic_ipv6_address = get_ipv6_addr()[0] + cephcontext['ms_bind_ipv4'] = False + cephcontext['ms_bind_ipv6'] = True if not public_network: cephcontext['public_addr'] = dynamic_ipv6_address if not cluster_network: diff --git a/ceph-mon/templates/ceph.conf b/ceph-mon/templates/ceph.conf index 3ad2bc07..52c1cb8f 100644 --- a/ceph-mon/templates/ceph.conf +++ b/ceph-mon/templates/ceph.conf @@ -23,6 +23,12 @@ debug osd = {{ loglevel }}/5 # skew calculation. mon pg warn max object skew = -1 +{% if ms_bind_ipv6 %} +ms_bind_ipv6 = true +{%- endif %} +{%- if ms_bind_ipv4 == false %} +ms_bind_ipv4 = false +{% endif %} {% if ceph_public_network is string %} public network = {{ ceph_public_network }} {%- endif %} diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 16972402..b8407818 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -199,6 +199,42 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, 'use_syslog': 'true'} self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks, 'get_rbd_features', return_value=None) + @patch.object(ceph_hooks, 'get_ipv6_addr', + lambda **kwargs: ["2a01:348:2f4:0:685e:5748:ae62:209f"]) + @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) + @patch.object(ceph_hooks, 'get_mon_hosts', + lambda *args: ['2a01:348:2f4:0:685e:5748:ae62:209f', + '2a01:348:2f4:0:685e:5748:ae62:20a0']) + @patch.object(ceph_hooks, 'get_networks', lambda *args: "") + @patch.object(ceph_hooks, 'leader_get', lambda *args: '1234') + @patch.object(ceph, 'config') + @patch.object(ceph_hooks, 'config') + def test_get_ceph_context_prefer_ipv6(self, mock_config, mock_config2, + _get_rbd_features): + config = copy.deepcopy(CHARM_CONFIG) + config['prefer-ipv6'] = True + mock_config.side_effect = lambda key: config[key] + mock_config2.side_effect = lambda key: config[key] + ctxt = ceph_hooks.get_ceph_context() + expected = {'auth_supported': 'cephx', + 'ceph_cluster_network': '', + 'ceph_public_network': '', + 'cluster_addr': '2a01:348:2f4:0:685e:5748:ae62:209f', + 'dio': 'true', + 'fsid': '1234', + 'loglevel': 1, + 'mon_hosts': '2a01:348:2f4:0:685e:5748:ae62:209f ' + '2a01:348:2f4:0:685e:5748:ae62:20a0', + 'mon_data_avail_warn': 30, + 'mon_data_avail_crit': 5, + 'old_auth': False, + 'public_addr': '2a01:348:2f4:0:685e:5748:ae62:209f', + 'use_syslog': 'true', + 'ms_bind_ipv4': False, + 'ms_bind_ipv6': True} + self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks, 'config') def test_nrpe_dependency_installed(self, mock_config): config = copy.deepcopy(CHARM_CONFIG) From 6e7caaa8ee4332a15339b5e8ba1770ef66d301e6 Mon Sep 17 00:00:00 2001 From: Ionut Balutoiu Date: Thu, 19 Oct 2023 13:40:26 +0300 Subject: [PATCH 2607/2699] Fix scale-out in the multi-site replication scenario If the multi-site relation is established, the `ceph-radosgw` application cannot be scaled out. This is happening because the multi-site functions are part of `check_optional_config_and_relations`, which is called by `assess_status` after every successful hook in the main hook entrypoint: ``` if __name__ == '__main__': try: hooks.execute(sys.argv) except UnregisteredHookError as e: log('Unknown hook {} - skipping.'.format(e)) except ValueError as e: # Handle any invalid configuration values status_set(WORKLOAD_STATES.BLOCKED, str(e)) else: assess_status(CONFIGS) ``` The multi-site functions (for example: `is_multisite_configured` or `check_cluster_has_buckets`) will fail since the unit is not be ready for service. This change ensures that the unit is ready for service before calling any multi-site functions. Closes-Bug: #2062405 Change-Id: I63c21a0b545bb456df9b09d8c16cc43cd7eec2f3 Signed-off-by: Ionut Balutoiu --- ceph-radosgw/hooks/utils.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 3a7a6d97..8397ac85 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -246,8 +246,9 @@ def check_optional_config_and_relations(configs): # Primary site status check if primary_rids: # Migration: The system is not multisite already. - if not multisite.is_multisite_configured(config('zone'), - config('zonegroup')): + if (ready_for_service(legacy=False) and + not multisite.is_multisite_configured(config('zone'), + config('zonegroup'))): if multisite.check_cluster_has_buckets(): zones, zonegroups = get_zones_zonegroups() status_msg = "Multiple zone or zonegroup configured, " \ @@ -271,8 +272,9 @@ def check_optional_config_and_relations(configs): # Secondary site status check if secondary_rids: # Migration: The system is not multisite already. - if not multisite.is_multisite_configured(config('zone'), - config('zonegroup')): + if (ready_for_service(legacy=False) and + not multisite.is_multisite_configured(config('zone'), + config('zonegroup'))): if multisite.check_cluster_has_buckets(): return ('blocked', "Non-Pristine RGW site can't be used as secondary") From 277244f3796830bd4a4c92951e89ebe97086be19 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Mon, 22 Apr 2024 20:52:09 -0300 Subject: [PATCH 2608/2699] Implement key rotation for MDS daemons This patchset implements key rotation for MDS daemons, which essentially involves the ceph-fs charm. It works in a very similar fashion to RGW units. Change-Id: I06570d9602137b804af56e358cabf552d6f1e9fd --- ceph-mon/src/ops_actions/rotate_key.py | 30 ++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/ceph-mon/src/ops_actions/rotate_key.py b/ceph-mon/src/ops_actions/rotate_key.py index 28592af9..e500f159 100644 --- a/ceph-mon/src/ops_actions/rotate_key.py +++ b/ceph-mon/src/ops_actions/rotate_key.py @@ -99,6 +99,34 @@ def _handle_rgw_key_rotation(entity, event, model): event.fail("Entity %s not found" % entity) +def _find_mds_unit(relations, mds_name): + for relation in relations: + for unit in relation.units: + try: + if mds_name == relation.data[unit]['mds-name']: + return relation.data + except KeyError: + logger.exception('mds name not found in relation data bag') + + +def _handle_mds_key_rotation(entity, event, model): + mds_name = entity[4:] + relations = model.relations.get('mds') + if not relations: + event.fail('No mds relations found') + return + + key_name = mds_name + '_mds_key' + bag = _find_mds_unit(relations, key_name) + if bag is None: + event.fail('No unit found for entity: %s' % entity) + return + + pending_key = _create_key(entity, event) + bag[model.unit]['pending_key'] = json.dumps({mds_name: pending_key}) + event.set_results({'message': 'success'}) + + def rotate_key(event, model=None) -> None: """Rotate the key of the specified entity.""" entity = event.params.get("entity") @@ -125,5 +153,7 @@ def rotate_key(event, model=None) -> None: event.set_results({"message": "success"}) elif entity.startswith('client.rgw.'): _handle_rgw_key_rotation(entity, event, model) + elif entity.startswith('mds.'): + _handle_mds_key_rotation(entity, event, model) else: event.fail("Unknown entity: %s" % entity) From 35eebb3d1314a9f98b5b591a3957dc02973833c6 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Tue, 23 Apr 2024 13:50:20 -0300 Subject: [PATCH 2609/2699] Improve MDS key rotation Instead of adding a new field that also requires deleting it when it's not needed, simply reset the already-passed key and let ceph-fs handle the rest. Change-Id: I5a9adff9777ab1441ea50eb881a5334a69b087d2 --- ceph-mon/src/ops_actions/rotate_key.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ceph-mon/src/ops_actions/rotate_key.py b/ceph-mon/src/ops_actions/rotate_key.py index e500f159..8e473bba 100644 --- a/ceph-mon/src/ops_actions/rotate_key.py +++ b/ceph-mon/src/ops_actions/rotate_key.py @@ -116,14 +116,13 @@ def _handle_mds_key_rotation(entity, event, model): event.fail('No mds relations found') return - key_name = mds_name + '_mds_key' - bag = _find_mds_unit(relations, key_name) + bag = _find_mds_unit(relations, mds_name) if bag is None: event.fail('No unit found for entity: %s' % entity) return pending_key = _create_key(entity, event) - bag[model.unit]['pending_key'] = json.dumps({mds_name: pending_key}) + bag[model.unit][mds_name + "_mds_key"] = pending_key event.set_results({'message': 'success'}) From fd31c2fb6e6474e0a1b5b319a0f36a6b688ea4dc Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Mon, 22 Apr 2024 21:00:35 -0300 Subject: [PATCH 2610/2699] Implement key rotation for ceph-fs This patchset implements key rotation for the ceph-fs charm by receiving the new pending key from the ceph-mon charm and manually rotating it via Ceph's authtool. It makes use of the 'ceph-mds-relation-changed' hook for this. Change-Id: I773f389f56d78cd7ce58f9f2b5e7d7695164acb1 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1202 --- ceph-fs/osci.yaml | 1 - ceph-fs/src/reactive/ceph_fs.py | 24 ++++++++++- ceph-fs/src/tests/bundles/focal-xena.yaml | 45 --------------------- ceph-fs/src/tests/tests.yaml | 1 + ceph-fs/unit_tests/test_reactive_ceph_fs.py | 2 + 5 files changed, 26 insertions(+), 47 deletions(-) delete mode 100644 ceph-fs/src/tests/bundles/focal-xena.yaml diff --git a/ceph-fs/osci.yaml b/ceph-fs/osci.yaml index 3d15efd1..ec739f37 100644 --- a/ceph-fs/osci.yaml +++ b/ceph-fs/osci.yaml @@ -2,7 +2,6 @@ templates: - charm-unit-jobs-py38 - charm-unit-jobs-py39 - - charm-xena-functional-jobs - charm-yoga-functional-jobs - charm-functional-jobs vars: diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index a9bbe94f..8dc98980 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -1,4 +1,4 @@ -# Copyright 2016 Canonical Ltd +# Copyright 2024 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,9 @@ import charms_openstack.bus import charms_openstack.charm as charm +import os +import subprocess + charms_openstack.bus.discover() @@ -41,6 +44,9 @@ def config_changed(): ceph_mds = reactive.endpoint_from_flag('ceph-mds.pools.available') with charm.provide_charm_instance() as cephfs_charm: + host = cephfs_charm.hostname + exists = os.path.exists('/var/lib/ceph/mds/ceph-%s/keyring' % host) + cephfs_charm.configure_ceph_keyring(ceph_mds.mds_key()) cephfs_charm.render_with_interfaces([ceph_mds]) if reactive.is_flag_set('config.changed.source'): @@ -52,6 +58,22 @@ def config_changed(): reactive.set_flag('config.rendered') cephfs_charm.assess_status() + # If the keyring file existed before this call, then the new + # provided key implies a rotation. + if exists: + svc = 'ceph-mds@%s.service' % host + try: + # Reset the failure count first, as the service may fail + # to come up due to the way the restart-map is handled. + subprocess.check_call(['sudo', 'systemctl', + 'reset-failed', svc]) + subprocess.check_call(['sudo', 'systemctl', 'restart', svc]) + except subprocess.CalledProcessError as exc: + # The service can be temporarily masked when booting, so + # skip that class of errors. + ch_core.hookenv.log('Failed to restart MDS service: %s' % + str(exc)) + @reactive.when('ceph-mds.connected') def storage_ceph_connected(ceph): diff --git a/ceph-fs/src/tests/bundles/focal-xena.yaml b/ceph-fs/src/tests/bundles/focal-xena.yaml deleted file mode 100644 index a1c359ce..00000000 --- a/ceph-fs/src/tests/bundles/focal-xena.yaml +++ /dev/null @@ -1,45 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-xena - -local_overlay_enabled: False - - -series: &series focal - -applications: - ubuntu: # used to test mounts - charm: ch:ubuntu - num_units: 2 - ceph-fs: - charm: ../../../ceph-fs.charm - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - channel: latest/edge - -relations: - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index a554e97b..a3c820ab 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -17,6 +17,7 @@ tests: - zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests - zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation + - zaza.openstack.charm_tests.ceph.tests.CephMonKeyRotationTests target_deploy_status: ubuntu: diff --git a/ceph-fs/unit_tests/test_reactive_ceph_fs.py b/ceph-fs/unit_tests/test_reactive_ceph_fs.py index 8b9be2a8..2840376b 100644 --- a/ceph-fs/unit_tests/test_reactive_ceph_fs.py +++ b/ceph-fs/unit_tests/test_reactive_ceph_fs.py @@ -61,6 +61,8 @@ def test_config_changed(self): self.patch_object(handlers.reactive, 'is_flag_set') self.patch_object(handlers.reactive, 'clear_flag') self.patch_object(handlers.reactive, 'set_flag') + self.patch_object(handlers.os.path, 'exists') + handlers.os.path.exists.return_value = False ceph_mds = mock.MagicMock() ceph_mds.mds_key.return_value = 'fakekey' self.endpoint_from_flag.return_value = ceph_mds From 0dddd11d80129cf69acde38dc4734183827e8e49 Mon Sep 17 00:00:00 2001 From: Ionut Balutoiu Date: Thu, 19 Oct 2023 13:16:04 +0300 Subject: [PATCH 2611/2699] Add group policy configuration Allow configuration of a zone group default sync policy. This is useful in scenarios where we want to have selective buckets sync. Valuable especially with the new `cloud-sync` relation. This is based on Ceph multisite sync policy: https://docs.ceph.com/en/latest/radosgw/multisite-sync-policy/ Additionally, three more Juju actions are added to selectively enable, disable, or reset buckets sync: * `enable-buckets-sync` * `disable-buckets-sync` * `reset-buckets-sync` These new actions are meant to be used in conjunction with a default zone group sync policy that allows syncing, but it's disabled by default. Change-Id: I4a8076192269aaeaca50668ebcebc0a52c6d2c84 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1193 Signed-off-by: Ionut Balutoiu --- ceph-radosgw/README.md | 3 + ceph-radosgw/actions.yaml | 26 ++ ceph-radosgw/actions/actions.py | 178 ++++++++ ceph-radosgw/actions/disable-buckets-sync | 1 + ceph-radosgw/actions/enable-buckets-sync | 1 + ceph-radosgw/actions/reset-buckets-sync | 1 + ceph-radosgw/config.yaml | 28 ++ ceph-radosgw/hooks/hooks.py | 91 ++++ ceph-radosgw/hooks/multisite.py | 401 ++++++++++++++++++ ceph-radosgw/hooks/primary-relation-changed | 1 + ceph-radosgw/unit_tests/test_actions.py | 175 ++++++++ .../unit_tests/test_ceph_radosgw_context.py | 1 + .../unit_tests/test_ceph_radosgw_utils.py | 6 +- ceph-radosgw/unit_tests/test_hooks.py | 105 +++++ ceph-radosgw/unit_tests/test_multisite.py | 230 ++++++++++ .../testdata/test_create_sync_group_flow.json | 20 + .../testdata/test_create_sync_group_pipe.json | 49 +++ .../testdata/test_get_sync_group.json | 45 ++ .../testdata/test_list_sync_groups.json | 45 ++ 19 files changed, 1404 insertions(+), 3 deletions(-) create mode 120000 ceph-radosgw/actions/disable-buckets-sync create mode 120000 ceph-radosgw/actions/enable-buckets-sync create mode 120000 ceph-radosgw/actions/reset-buckets-sync create mode 120000 ceph-radosgw/hooks/primary-relation-changed create mode 100644 ceph-radosgw/unit_tests/testdata/test_create_sync_group_flow.json create mode 100644 ceph-radosgw/unit_tests/testdata/test_create_sync_group_pipe.json create mode 100644 ceph-radosgw/unit_tests/testdata/test_get_sync_group.json create mode 100644 ceph-radosgw/unit_tests/testdata/test_list_sync_groups.json diff --git a/ceph-radosgw/README.md b/ceph-radosgw/README.md index 335ae407..b6dd248c 100644 --- a/ceph-radosgw/README.md +++ b/ceph-radosgw/README.md @@ -243,6 +243,9 @@ not deployed then see file `actions.yaml`. * `readwrite` * `resume` * `tidydefaults` +* `enable-buckets-sync` +* `disable-buckets-sync` +* `reset-buckets-sync` # Documentation diff --git a/ceph-radosgw/actions.yaml b/ceph-radosgw/actions.yaml index abe46ad2..5b24635a 100644 --- a/ceph-radosgw/actions.yaml +++ b/ceph-radosgw/actions.yaml @@ -19,3 +19,29 @@ force-enable-multisite: zonegroup: type: string description: Existing Zonegroup to be reconfigured as the 'zonegroup' config value. +enable-buckets-sync: + description: | + Enable buckets sync in the multi-site replication. This is meant to be + used only when the default zonegroup sync policy is not "enabled", but it is + "allowed". + params: + buckets: + type: string + description: Comma-separated list of buckets' names to enable syncing. +disable-buckets-sync: + description: | + Forbid buckets sync in the multi-site replication. This is useful when you + want to disable syncing for some buckets, but you want to sync all the + other buckets. + params: + buckets: + type: string + description: Comma-separated list of buckets' names to disable syncing. +reset-buckets-sync: + description: | + Reset buckets sync policy. After this is executed, the buckets will be + synced according to the default zone group sync policy. + params: + buckets: + type: string + description: Comma-separated list of buckets' names to reset sync policy. diff --git a/ceph-radosgw/actions/actions.py b/ceph-radosgw/actions/actions.py index db0aa548..bced130b 100755 --- a/ceph-radosgw/actions/actions.py +++ b/ceph-radosgw/actions/actions.py @@ -49,6 +49,8 @@ service_restart, ) +DEFAULT_SYNC_POLICY_ID = 'default' + def pause(args): """Pause the Ceilometer services. @@ -227,6 +229,179 @@ def force_enable_multisite(args): action_fail(message + " : {}".format(cpe.output)) +def is_multisite_sync_policy_action_allowed(): + """Check if the current Juju unit is allowed to run sync policy actions. + + This method checks if the current Juju unit is allowed to execute + the Juju actions to configure Multisite sync policies: + * enable-buckets-sync + * disable-buckets-sync + * reset-buckets-sync + These Juju actions are allowed to run only on the leader unit of the + primary RGW zone. + + :return: Whether the current Juju unit is allowed to run the Multisite + sync policy Juju actions. + :rtype: Boolean + """ + if not is_leader(): + action_fail("This action can only be executed on leader unit.") + return False + + realm = config('realm') + zone = config('zone') + zonegroup = config('zonegroup') + + if not all((realm, zonegroup, zone)): + action_fail("Missing required charm configurations realm({}), " + "zonegroup({}) and zone({}).".format( + realm, zonegroup, zone + )) + return False + + if not multisite.is_multisite_configured(zone=zone, zonegroup=zonegroup): + action_fail("Multisite is not configured") + return False + + zonegroup_info = multisite.get_zonegroup_info(zonegroup) + if zonegroup_info is None: + action_fail("Failed to fetch zonegroup ({}) info".format(zonegroup)) + return False + + zone_info = multisite.get_zone_info(zone) + if zone_info is None: + action_fail("Failed to fetch zone ({}) info".format(zone)) + return False + + if zonegroup_info['master_zone'] != zone_info['id']: + action_fail('This action can only be executed on primary RGW ' + 'application units.') + return False + + return True + + +def update_buckets_sync_policy(buckets, sync_policy_state): + """Update the sync policy state for all the given buckets. + + This method gets a list of bucket names and a sync policy state to set + for all of them. The sync policy state can be one of the following: + "allowed", "enabled", or "forbidden". Validation for the sync policy + state is done in the "multisite.create_sync_group" module method. + + The sync policy state is set by creating a bucket-level sync group with + the given state, followed by a sync group pipe that match all the source + and destination buckets. If the bucket already has a sync group, it is + updated with the new state. + + :param buckets: List of bucket names. + :type buckets: list + :param sync_policy_state: The sync policy state to set for the buckets. + :type sync_policy_state: str + """ + zone = config('zone') + zonegroup = config('zonegroup') + existing_buckets = multisite.list_buckets(zonegroup=zonegroup, zone=zone) + messages = [] + for bucket in buckets: + if bucket in existing_buckets: + multisite.create_sync_group( + bucket=bucket, + group_id=DEFAULT_SYNC_POLICY_ID, + status=sync_policy_state) + multisite.create_sync_group_pipe( + bucket=bucket, + group_id=DEFAULT_SYNC_POLICY_ID, + pipe_id=DEFAULT_SYNC_POLICY_ID, + source_zones=['*'], + dest_zones=['*']) + message = 'Updated "{}" bucket sync policy to "{}"'.format( + bucket, sync_policy_state) + else: + message = ('Bucket "{}" does not exist in the zonegroup "{}" and ' + 'zone "{}"'.format(bucket, zonegroup, zone)) + log(message) + messages.append(message) + action_set( + values={ + 'message': '\n'.join(messages) + } + ) + + +def reset_buckets_sync_policy(buckets): + """Reset the sync policy state for all the given buckets. + + For every bucket in the given list, this method resets the sync policy + state. This is done by removing the bucket-level sync group. + + :param buckets: List of bucket names. + :type buckets: list + """ + zone = config('zone') + zonegroup = config('zonegroup') + existing_buckets = multisite.list_buckets(zonegroup=zonegroup, zone=zone) + messages = [] + for bucket in buckets: + if bucket in existing_buckets: + multisite.remove_sync_group( + bucket=bucket, + group_id=DEFAULT_SYNC_POLICY_ID) + message = 'Reset "{}" bucket sync policy'.format(bucket) + else: + message = ('Bucket "{}" does not exist in the zonegroup "{}" and ' + 'zone "{}"'.format(bucket, zonegroup, zone)) + log(message) + messages.append(message) + action_set( + values={ + 'message': '\n'.join(messages) + } + ) + + +def enable_buckets_sync(args): + """Enable sync for the given buckets""" + if not is_multisite_sync_policy_action_allowed(): + return + try: + update_buckets_sync_policy( + buckets=action_get('buckets').split(','), + sync_policy_state=multisite.SYNC_POLICY_ENABLED, + ) + except subprocess.CalledProcessError as cpe: + message = "Failed to enable sync for the given buckets" + log(message, level=ERROR) + action_fail(message + " : {}".format(cpe.output)) + + +def disable_buckets_sync(args): + """Disable sync for the given buckets""" + if not is_multisite_sync_policy_action_allowed(): + return + try: + update_buckets_sync_policy( + buckets=action_get('buckets').split(','), + sync_policy_state=multisite.SYNC_POLICY_FORBIDDEN, + ) + except subprocess.CalledProcessError as cpe: + message = "Failed to disable sync for the given buckets" + log(message, level=ERROR) + action_fail(message + " : {}".format(cpe.output)) + + +def reset_buckets_sync(args): + """Reset sync policy for the given buckets""" + if not is_multisite_sync_policy_action_allowed(): + return + try: + reset_buckets_sync_policy(buckets=action_get('buckets').split(',')) + except subprocess.CalledProcessError as cpe: + message = "Failed to reset sync for the given buckets" + log(message, level=ERROR) + action_fail(message + " : {}".format(cpe.output)) + + # A dictionary of all the defined actions to callables (which take # parsed arguments). ACTIONS = { @@ -237,6 +412,9 @@ def force_enable_multisite(args): "readwrite": readwrite, "tidydefaults": tidydefaults, "force-enable-multisite": force_enable_multisite, + "enable-buckets-sync": enable_buckets_sync, + "disable-buckets-sync": disable_buckets_sync, + "reset-buckets-sync": reset_buckets_sync, } diff --git a/ceph-radosgw/actions/disable-buckets-sync b/ceph-radosgw/actions/disable-buckets-sync new file mode 120000 index 00000000..405a394e --- /dev/null +++ b/ceph-radosgw/actions/disable-buckets-sync @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/ceph-radosgw/actions/enable-buckets-sync b/ceph-radosgw/actions/enable-buckets-sync new file mode 120000 index 00000000..405a394e --- /dev/null +++ b/ceph-radosgw/actions/enable-buckets-sync @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/ceph-radosgw/actions/reset-buckets-sync b/ceph-radosgw/actions/reset-buckets-sync new file mode 120000 index 00000000..405a394e --- /dev/null +++ b/ceph-radosgw/actions/reset-buckets-sync @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index a64a8f18..c931a840 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -429,6 +429,34 @@ options: description: | Name of RADOS Gateway Zone to create for multi-site replication. This option must be specific to the local site e.g. us-west or us-east. + sync-policy-state: + type: string + default: enabled + description: | + This setting is used by the primary ceph-radosgw in multi-site + replication. + + By default, all the buckets are synced from a primary RGW zone to the + secondary zone. This config option allows us to have selective buckets + sync. If this is set, it will be used as the default policy state for + all the buckets in the zonegroup. + + Valid values are: + * enabled - sync is allowed and enabled + * allowed - sync is allowed + * forbidden - sync is not allowed + sync-policy-flow-type: + type: string + default: symmetrical + description: | + This setting is used by the secondary ceph-radosgw in multi-site + replication, and it's effective only when 'sync-policy-state' config is + set on the primary ceph-radosgw. + + Valid values are: + * directional - data is only synced in one direction, from primary to + secondary. + * symmetrical - data is synced in both directions. namespace-tenants: type: boolean default: False diff --git a/ceph-radosgw/hooks/hooks.py b/ceph-radosgw/hooks/hooks.py index d362a9bb..5d54a4f4 100755 --- a/ceph-radosgw/hooks/hooks.py +++ b/ceph-radosgw/hooks/hooks.py @@ -43,6 +43,7 @@ relation_set, log, DEBUG, + WARNING, Hooks, UnregisteredHookError, status_set, is_leader, @@ -134,6 +135,7 @@ ] MULTISITE_SYSTEM_USER = 'multisite-sync' +MULTISITE_DEFAULT_SYNC_GROUP_ID = 'default' def upgrade_available(): @@ -845,6 +847,86 @@ def primary_relation_joined(relation_id=None): secret=secret) +@hooks.hook('primary-relation-changed') +def primary_relation_changed(relation_id=None, unit=None): + if not is_leader(): + log('Cannot setup multisite configuration, this unit is not the ' + 'leader') + return + if not ready_for_service(legacy=False): + log('unit not ready, deferring multisite configuration') + return + + sync_policy_state = config('sync-policy-state') + if not sync_policy_state: + log("The config sync-policy-state is not set. Skipping zone group " + "default sync policy configuration") + return + + secondary_data = relation_get(rid=relation_id, unit=unit) + if not all((secondary_data.get('zone'), + secondary_data.get('sync_policy_flow_type'))): + log("Defer processing until secondary RGW has provided required data") + return + + zonegroup = config('zonegroup') + primary_zone = config('zone') + secondary_zone = secondary_data['zone'] + sync_flow_type = secondary_data['sync_policy_flow_type'] + + if (secondary_data.get('zone_tier_type') == 'cloud' and + sync_flow_type != multisite.SYNC_FLOW_DIRECTIONAL): + log("The secondary zone is set with cloud tier type. Ignoring " + "configured {} sync policy flow, and using {}.".format( + sync_flow_type, + multisite.SYNC_FLOW_DIRECTIONAL), + level=WARNING) + sync_flow_type = multisite.SYNC_FLOW_DIRECTIONAL + + flow_id = '{}-{}'.format(primary_zone, secondary_zone) + pipe_id = '{}-{}'.format(primary_zone, secondary_zone) + + mutation = multisite.is_sync_group_update_needed( + group_id=MULTISITE_DEFAULT_SYNC_GROUP_ID, + flow_id=flow_id, + pipe_id=pipe_id, + source_zone=primary_zone, + dest_zone=secondary_zone, + desired_status=sync_policy_state, + desired_flow_type=sync_flow_type, + ) + + if mutation: + multisite.create_sync_group( + group_id=MULTISITE_DEFAULT_SYNC_GROUP_ID, + status=sync_policy_state) + multisite.create_sync_group_flow( + group_id=MULTISITE_DEFAULT_SYNC_GROUP_ID, + flow_id=flow_id, + flow_type=sync_flow_type, + source_zone=primary_zone, + dest_zone=secondary_zone) + source_zones = [primary_zone, secondary_zone] + dest_zones = [primary_zone, secondary_zone] + if sync_flow_type == multisite.SYNC_FLOW_DIRECTIONAL: + source_zones = [primary_zone] + dest_zones = [secondary_zone] + multisite.create_sync_group_pipe( + group_id=MULTISITE_DEFAULT_SYNC_GROUP_ID, + pipe_id=pipe_id, + source_zones=source_zones, + dest_zones=dest_zones) + log( + 'Mutation detected. Restarting {}.'.format(service_name()), + 'INFO') + multisite.update_period(zonegroup=zonegroup, zone=primary_zone) + CONFIGS.write_all() + service_restart(service_name()) + leader_set(restart_nonce=str(uuid.uuid4())) + else: + log('No mutation detected.', 'INFO') + + @hooks.hook('primary-relation-departed') @hooks.hook('secondary-relation-departed') def multisite_relation_departed(): @@ -935,6 +1017,9 @@ def secondary_relation_changed(relation_id=None, unit=None): # this operation but a period update will force it to be created. multisite.update_period(fatal=False) + relation_set(relation_id=relation_id, + sync_policy_flow_type=config('sync-policy-flow-type')) + mutation = False # NOTE(utkarshbhatthere): @@ -979,6 +1064,8 @@ def secondary_relation_changed(relation_id=None, unit=None): else: log('No mutation detected.', 'INFO') + relation_set(relation_id=relation_id, zone=zone) + @hooks.hook('master-relation-departed') @hooks.hook('slave-relation-departed') @@ -1016,6 +1103,8 @@ def leader_settings_changed(): # Primary/Secondary relation for r_id in relation_ids('primary'): primary_relation_joined(r_id) + for unit in related_units(r_id): + primary_relation_changed(r_id, unit) for r_id in relation_ids('radosgw-user'): radosgw_user_changed(r_id) @@ -1031,6 +1120,8 @@ def process_multisite_relations(): # Primary/Secondary relation for r_id in relation_ids('primary'): primary_relation_joined(r_id) + for unit in related_units(r_id): + primary_relation_changed(r_id, unit) for r_id in relation_ids('secondary'): for unit in related_units(r_id): secondary_relation_changed(r_id, unit) diff --git a/ceph-radosgw/hooks/multisite.py b/ceph-radosgw/hooks/multisite.py index 18a33410..57f8878f 100644 --- a/ceph-radosgw/hooks/multisite.py +++ b/ceph-radosgw/hooks/multisite.py @@ -24,6 +24,31 @@ RGW_ADMIN = 'radosgw-admin' +SYNC_POLICY_ENABLED = 'enabled' +SYNC_POLICY_ALLOWED = 'allowed' +SYNC_POLICY_FORBIDDEN = 'forbidden' +SYNC_POLICY_STATES = [ + SYNC_POLICY_ENABLED, + SYNC_POLICY_ALLOWED, + SYNC_POLICY_FORBIDDEN +] +SYNC_FLOW_DIRECTIONAL = 'directional' +SYNC_FLOW_SYMMETRICAL = 'symmetrical' +SYNC_FLOW_TYPES = [ + SYNC_FLOW_DIRECTIONAL, + SYNC_FLOW_SYMMETRICAL, +] + + +class UnknownSyncPolicyState(Exception): + """Raised when an unknown sync policy state is encountered""" + pass + + +class UnknownSyncFlowType(Exception): + """Raised when an unknown sync flow type is encountered""" + pass + @decorators.retry_on_exception(num_retries=10, base_delay=5, exc_type=subprocess.CalledProcessError) @@ -370,6 +395,28 @@ def modify_zone(name, endpoints=None, default=False, master=False, return None +def get_zone_info(name, zonegroup=None): + """Fetch detailed info for the provided zone + + :param name: zone name + :type name: str + :param zonegroup: parent zonegroup name + :type zonegroup: str + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'zone', 'get', + '--rgw-zone={}'.format(name), + ] + if zonegroup: + cmd.append('--rgw-zonegroup={}'.format(zonegroup)) + try: + return json.loads(_check_output(cmd)) + except TypeError: + return None + + def remove_zone_from_zonegroup(zone, zonegroup): """Remove RADOS Gateway zone from provided parent zonegroup @@ -888,3 +935,357 @@ def check_cluster_has_buckets(): if check_zonegroup_has_buckets(zonegroup): return True return False + + +def list_sync_groups(bucket=None): + """List sync policy groups. + + :param bucket: Bucket name. If this this given, the bucket level group + policies are listed. + :type bucket: str + + :return: List of sync policy groups. + :rtype: list + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'sync', 'group', 'get', + ] + if bucket: + cmd.append('--bucket={}'.format(bucket)) + try: + return json.loads(_check_output(cmd)) + except TypeError: + return [] + + +def sync_group_exists(group_id, bucket=None): + """Check if the sync policy group exists. + + :param group_id: Sync policy group id. + :type group_id: str + :param bucket: Bucket name. If this this given, the bucket level group + policy is checked. + :type bucket: str + + :rtype: Boolean + """ + for group in list_sync_groups(bucket=bucket): + if group['key'] == group_id: + return True + return False + + +def get_sync_group(group_id, bucket=None): + """Get the sync policy group configuration. + + :param group_id: Sync policy group id. + :type group_id: str + :param bucket: Bucket name. If this this given, the bucket level group + policy is returned. + :type bucket: str + + :return: Sync policy group configuration. + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'sync', 'group', 'get', + '--group-id={}'.format(group_id), + ] + if bucket: + cmd.append('--bucket={}'.format(bucket)) + try: + return json.loads(_check_output(cmd)) + except TypeError: + return None + + +def create_sync_group(group_id, status, bucket=None): + """Create a sync policy group. + + :param group_id: ID of the sync policy group to be created. + :type group_id: str + :param status: Status of the sync policy group to be created. Must be one + of the following: 'enabled', 'allowed', 'forbidden'. + :type status: str + :param bucket: Bucket name. If this this given, the bucket level group + policy is created. + :type bucket: str + + :raises UnknownSyncPolicyState: if the provided status is not one of the + allowed values. + + :return: Sync policy group configuration. + :rtype: dict + """ + if status not in SYNC_POLICY_STATES: + raise UnknownSyncPolicyState( + 'Unknown sync policy state: {}'.format(status)) + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'sync', 'group', 'create', + '--group-id={}'.format(group_id), + '--status={}'.format(status), + ] + if bucket: + cmd.append('--bucket={}'.format(bucket)) + try: + return json.loads(_check_output(cmd)) + except TypeError: + return None + + +def remove_sync_group(group_id, bucket=None): + """Remove a sync group with the given group ID and optional bucket. + + :param group_id: The ID of the sync group to remove. + :type group_id: str + :param bucket: Bucket name. If this this given, the bucket level group + policy is removed. + :type bucket: str + + :return: The output of the command as a dict. + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'sync', 'group', 'remove', + '--group-id={}'.format(group_id), + ] + if bucket: + cmd.append('--bucket={}'.format(bucket)) + try: + return json.loads(_check_output(cmd)) + except TypeError: + return None + + +def is_sync_group_update_needed(group_id, flow_id, pipe_id, source_zone, + dest_zone, desired_status, desired_flow_type): + """Check if the sync group (with the given ID) needs updating. + + :param group_id: The ID of the sync group to check. + :type group_id: str + :param flow_id: The ID of the sync group flow to check. + :type flow_id: str + :param pipe_id: The ID of the sync group pipe to check. + :type pipe_id: str + :param source_zone: Source zone of the sync group flow to check. + :type source_zone: str + :param dest_zone: Dest zone of the sync group flow to check. + :type dest_zone: str + :param desired_status: Desired status of the sync group. + :type desired_status: str + :param desired_flow_type: Desired flow type of the sync group data flow. + :type desired_flow_type: str + + :rtype: Boolean + """ + # Check if sync group exists. + if not sync_group_exists(group_id): + hookenv.log('Sync group "{}" not configured yet'.format(group_id)) + return True + group = get_sync_group(group_id) + + # Check sync group status. + if group.get('status') != desired_status: + hookenv.log('Sync group "{}" status changed to "{}"'.format( + group["id"], desired_status)) + return True + + # Check if data flow needs to be created or updated. + if is_sync_group_flow_update_needed(group=group, + flow_id=flow_id, + source_zone=source_zone, + dest_zone=dest_zone, + desired_flow_type=desired_flow_type): + return True + + # Check if data pipe needs to be created. + pipes = group.get('pipes', []) + pipes_ids = [pipe['id'] for pipe in pipes] + if pipe_id not in pipes_ids: + hookenv.log('Sync group pipe "{}" not created yet'.format(pipe_id)) + return True + + # Sync group configuration is up-to-date. + return False + + +def create_sync_group_flow(group_id, flow_id, flow_type, source_zone, + dest_zone): + """Create a new sync group data flow with the given parameters. + + :param group_id: The ID of the sync group to create the data flow for. + :type group_id: str + :param flow_id: The ID of the new data flow. + :type flow_id: str + :param flow_type: The type of the new data flow. + :type flow_type: str + :param source_zone: The source zone for the new data flow. + :type source_zone: str + :param dest_zone: The destination zone for the new data flow. + :type dest_zone: str + + :raises UnknownSyncFlowType: If an unknown sync flow type is provided. + + :return: Sync group data flow configuration. + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'sync', 'group', 'flow', 'create', + '--group-id={}'.format(group_id), + '--flow-id={}'.format(flow_id), + '--flow-type={}'.format(flow_type), + ] + if flow_type == SYNC_FLOW_SYMMETRICAL: + cmd.append('--zones={},{}'.format(source_zone, dest_zone)) + elif flow_type == SYNC_FLOW_DIRECTIONAL: + cmd.append('--source-zone={}'.format(source_zone)) + cmd.append('--dest-zone={}'.format(dest_zone)) + else: + raise UnknownSyncFlowType( + 'Unknown sync flow type {}'.format(flow_type)) + try: + return json.loads(_check_output(cmd)) + except TypeError: + return None + + +def remove_sync_group_flow(group_id, flow_id, flow_type, source_zone=None, + dest_zone=None): + """Remove a sync group data flow. + + :param group_id: The ID of the sync group. + :type group_id: str + :param flow_id: The ID of the flow to remove. + :type flow_id: str + :param flow_type: The type of the flow to remove. + :type flow_type: str + :param source_zone: The source zone of the flow to remove (only for + directional flows). + :type source_zone: str + :param dest_zone: The destination zone of the flow to remove (only for + directional flows). + :type dest_zone: str + + :return: The output of the command as a dict. + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'sync', 'group', 'flow', 'remove', + '--group-id={}'.format(group_id), + '--flow-id={}'.format(flow_id), + '--flow-type={}'.format(flow_type), + ] + if flow_type == SYNC_FLOW_DIRECTIONAL: + cmd.append('--source-zone={}'.format(source_zone)) + cmd.append('--dest-zone={}'.format(dest_zone)) + try: + return json.loads(_check_output(cmd)) + except TypeError: + return None + + +def create_sync_group_pipe(group_id, pipe_id, source_zones, dest_zones, + source_bucket='*', dest_bucket='*', bucket=None): + """Create a sync group pipe between source and destination zones. + + :param group_id: The ID of the sync group. + :type group_id: str + :param pipe_id: The ID of the sync group pipe. + :type pipe_id: str + :param source_zones: A list of source zones. + :type source_zones: list + :param dest_zones: A list of destination zones. + :type dest_zones: list + :param source_bucket: The source bucket name. Default is '*'. + :type source_bucket: str + :param dest_bucket: The destination bucket name. Default is '*'. + :type dest_bucket: str + :param bucket: The bucket name. If specified, the sync group pipe will be + created for this bucket only. + :type bucket: str + + :return: Sync group pipe configuration. + :rtype: dict + """ + cmd = [ + RGW_ADMIN, '--id={}'.format(_key_name()), + 'sync', 'group', 'pipe', 'create', + '--group-id={}'.format(group_id), + '--pipe-id={}'.format(pipe_id), + '--source-zones={}'.format(','.join(source_zones)), + '--source-bucket={}'.format(source_bucket), + '--dest-zones={}'.format(','.join(dest_zones)), + '--dest-bucket={}'.format(dest_bucket), + ] + if bucket: + cmd.append('--bucket={}'.format(bucket)) + try: + return json.loads(_check_output(cmd)) + except TypeError: + return None + + +def is_sync_group_flow_update_needed(group, flow_id, source_zone, dest_zone, + desired_flow_type): + """Check if the given sync group flow needs updating. + + :param group: The sync policy group configuration. + :type group: dict + :param flow_id: The ID of the sync group flow to check. + :type flow_id: str + :param source_zone: Source zone of the sync group flow to check. + :type source_zone: str + :param dest_zone: Dest zone of the sync group flow to check. + :type dest_zone: str + :param desired_flow_type: Desired flow type of the sync group data flow. + :type desired_flow_type: str + + :rtype: Boolean + """ + symmetrical_flows = group['data_flow'].get('symmetrical', []) + symmetrical_flows_ids = [flow['id'] for flow in symmetrical_flows] + + directional_flows = group['data_flow'].get('directional', []) + directional_flows_ids = [ + # NOTE: Directional flows IDs are not present in the sync group + # configuration. We assume that the ID is a concatenation of the source + # zone and destination zone, as currently configured by the charm code. + # This is a safe assumption, because there are unique directional + # flows for each pair of zones. + "{}-{}".format(flow['source_zone'], flow['dest_zone']) + for flow in directional_flows + ] + + data_flows_ids = symmetrical_flows_ids + directional_flows_ids + if flow_id not in data_flows_ids: + hookenv.log('Data flow "{}" not configured yet'.format(flow_id)) + return True + + # Check if the flow type is consistent with the current configuration. + is_symmetrical = (desired_flow_type == SYNC_FLOW_SYMMETRICAL and + flow_id in symmetrical_flows_ids) + is_directional = (desired_flow_type == SYNC_FLOW_DIRECTIONAL and + flow_id in directional_flows_ids) + if is_symmetrical or is_directional: + # Data flow is consistent with the current configuration. + return False + + # Data flow type has changed. We need to remove the old data flow. + hookenv.log('Data flow "{}" type changed to "{}"'.format( + flow_id, desired_flow_type)) + old_flow_type = ( + SYNC_FLOW_SYMMETRICAL if desired_flow_type == SYNC_FLOW_DIRECTIONAL + else SYNC_FLOW_DIRECTIONAL) + hookenv.log( + 'Removing old data flow "{}" before configuring the new one'.format( + flow_id)) + remove_sync_group_flow( + group_id=group["id"], flow_id=flow_id, flow_type=old_flow_type, + source_zone=source_zone, dest_zone=dest_zone) + return True diff --git a/ceph-radosgw/hooks/primary-relation-changed b/ceph-radosgw/hooks/primary-relation-changed new file mode 120000 index 00000000..9416ca6a --- /dev/null +++ b/ceph-radosgw/hooks/primary-relation-changed @@ -0,0 +1 @@ +hooks.py \ No newline at end of file diff --git a/ceph-radosgw/unit_tests/test_actions.py b/ceph-radosgw/unit_tests/test_actions.py index 1978b68b..01d7407b 100644 --- a/ceph-radosgw/unit_tests/test_actions.py +++ b/ceph-radosgw/unit_tests/test_actions.py @@ -82,6 +82,7 @@ class MultisiteActionsTestCase(CharmTestCase): TO_PATCH = [ 'action_fail', + 'action_get', 'action_set', 'multisite', 'config', @@ -89,6 +90,7 @@ class MultisiteActionsTestCase(CharmTestCase): 'leader_set', 'service_name', 'service_restart', + 'log', ] def setUp(self): @@ -154,3 +156,176 @@ def test_tidydefaults_unconfigured(self): self.test_config.set('zone', None) actions.tidydefaults([]) self.action_fail.assert_called_once() + + def test_enable_buckets_sync(self): + self.multisite.is_multisite_configured.return_value = True + self.multisite.get_zonegroup_info.return_value = { + 'master_zone': 'test-zone-id', + } + self.multisite.get_zone_info.return_value = { + 'id': 'test-zone-id', + } + self.is_leader.return_value = True + self.action_get.return_value = 'testbucket1,testbucket2,non-existent' + self.test_config.set('zone', 'testzone') + self.test_config.set('zonegroup', 'testzonegroup') + self.test_config.set('realm', 'testrealm') + self.multisite.list_buckets.return_value = ['testbucket1', + 'testbucket2'] + + actions.enable_buckets_sync([]) + + self.multisite.is_multisite_configured.assert_called_once() + self.multisite.get_zonegroup_info.assert_called_once_with( + 'testzonegroup', + ) + self.multisite.get_zone_info.assert_called_once_with( + 'testzone', + ) + self.action_get.assert_called_once_with('buckets') + self.multisite.list_buckets.assert_called_once_with( + zonegroup='testzonegroup', zone='testzone', + ) + self.assertEqual(self.multisite.create_sync_group.call_count, 2) + self.multisite.create_sync_group.assert_has_calls([ + mock.call(bucket='testbucket1', + group_id='default', + status=self.multisite.SYNC_POLICY_ENABLED), + mock.call(bucket='testbucket2', + group_id='default', + status=self.multisite.SYNC_POLICY_ENABLED), + ]) + self.assertEqual(self.multisite.create_sync_group_pipe.call_count, 2) + self.multisite.create_sync_group_pipe.assert_has_calls([ + mock.call(bucket='testbucket1', + group_id='default', + pipe_id='default', + source_zones=['*'], + dest_zones=['*']), + mock.call(bucket='testbucket2', + group_id='default', + pipe_id='default', + source_zones=['*'], + dest_zones=['*']), + ]) + expected_messages = [ + 'Updated "testbucket1" bucket sync policy to "{}"'.format( + self.multisite.SYNC_POLICY_ENABLED), + 'Updated "testbucket2" bucket sync policy to "{}"'.format( + self.multisite.SYNC_POLICY_ENABLED), + ('Bucket "non-existent" does not exist in the zonegroup ' + '"testzonegroup" and zone "testzone"'), + ] + self.assertEqual(self.log.call_count, 3) + self.log.assert_has_calls([ + mock.call(expected_messages[0]), + mock.call(expected_messages[1]), + mock.call(expected_messages[2]), + ]) + self.action_set.assert_called_once_with( + values={ + 'message': '\n'.join(expected_messages), + }) + + def test_disable_buckets_sync(self): + self.multisite.is_multisite_configured.return_value = True + self.multisite.get_zonegroup_info.return_value = { + 'master_zone': 'test-zone-id', + } + self.multisite.get_zone_info.return_value = { + 'id': 'test-zone-id', + } + self.is_leader.return_value = True + self.action_get.return_value = 'testbucket1,non-existent' + self.test_config.set('zone', 'testzone') + self.test_config.set('zonegroup', 'testzonegroup') + self.test_config.set('realm', 'testrealm') + self.multisite.list_buckets.return_value = ['testbucket1'] + + actions.disable_buckets_sync([]) + + self.multisite.is_multisite_configured.assert_called_once() + self.multisite.get_zonegroup_info.assert_called_once_with( + 'testzonegroup', + ) + self.multisite.get_zone_info.assert_called_once_with( + 'testzone', + ) + self.action_get.assert_called_once_with('buckets') + self.multisite.list_buckets.assert_called_once_with( + zonegroup='testzonegroup', zone='testzone', + ) + self.multisite.create_sync_group.assert_called_once_with( + bucket='testbucket1', + group_id='default', + status=self.multisite.SYNC_POLICY_FORBIDDEN, + ) + self.multisite.create_sync_group_pipe.assert_called_once_with( + bucket='testbucket1', + group_id='default', + pipe_id='default', + source_zones=['*'], + dest_zones=['*'], + ) + expected_messages = [ + 'Updated "testbucket1" bucket sync policy to "{}"'.format( + self.multisite.SYNC_POLICY_FORBIDDEN), + ('Bucket "non-existent" does not exist in the zonegroup ' + '"testzonegroup" and zone "testzone"'), + ] + self.assertEqual(self.log.call_count, 2) + self.log.assert_has_calls([ + mock.call(expected_messages[0]), + mock.call(expected_messages[1]), + ]) + self.action_set.assert_called_once_with( + values={ + 'message': '\n'.join(expected_messages), + }) + + def test_reset_buckets_sync(self): + self.multisite.is_multisite_configured.return_value = True + self.multisite.get_zonegroup_info.return_value = { + 'master_zone': 'test-zone-id', + } + self.multisite.get_zone_info.return_value = { + 'id': 'test-zone-id', + } + self.is_leader.return_value = True + self.action_get.return_value = 'testbucket1,non-existent' + self.test_config.set('zone', 'testzone') + self.test_config.set('zonegroup', 'testzonegroup') + self.test_config.set('realm', 'testrealm') + self.multisite.list_buckets.return_value = ['testbucket1'] + + actions.reset_buckets_sync([]) + + self.multisite.is_multisite_configured.assert_called_once() + self.multisite.get_zonegroup_info.assert_called_once_with( + 'testzonegroup', + ) + self.multisite.get_zone_info.assert_called_once_with( + 'testzone', + ) + self.action_get.assert_called_once_with('buckets') + self.multisite.list_buckets.assert_called_once_with( + zonegroup='testzonegroup', zone='testzone', + ) + self.multisite.remove_sync_group.assert_called_once_with( + bucket='testbucket1', + group_id='default', + ) + expected_messages = [ + 'Reset "testbucket1" bucket sync policy', + ('Bucket "non-existent" does not exist in the zonegroup ' + '"testzonegroup" and zone "testzone"'), + ] + self.assertEqual(self.log.call_count, 2) + self.log.assert_has_calls([ + mock.call(expected_messages[0]), + mock.call(expected_messages[1]), + ]) + self.action_set.assert_called_once_with( + values={ + 'message': '\n'.join(expected_messages), + }) diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py index f3b3289a..32d6a962 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_context.py @@ -287,6 +287,7 @@ def _relation_get(attr, unit, rid): self.assertEqual(expect, mon_ctxt()) self.assertTrue(mock_ensure_rsv_v6.called) + @patch.object(context, 'format_ipv6_addr', lambda *_: None) @patch('ceph_radosgw_context.https') @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids') @patch('charmhelpers.contrib.hahelpers.cluster.config_get') diff --git a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py index 56315aef..ceaf761e 100644 --- a/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py +++ b/ceph-radosgw/unit_tests/test_ceph_radosgw_utils.py @@ -305,11 +305,11 @@ def test_multisite_deployment(self): def test_listen_port(self): self.https.return_value = False - self.assertEquals(80, utils.listen_port()) + self.assertEqual(80, utils.listen_port()) self.https.return_value = True - self.assertEquals(443, utils.listen_port()) + self.assertEqual(443, utils.listen_port()) self.test_config.set('port', 42) - self.assertEquals(42, utils.listen_port()) + self.assertEqual(42, utils.listen_port()) def test_set_s3_app(self): self.leader_get.return_value = None diff --git a/ceph-radosgw/unit_tests/test_hooks.py b/ceph-radosgw/unit_tests/test_hooks.py index bb399cb9..a12dd0ac 100644 --- a/ceph-radosgw/unit_tests/test_hooks.py +++ b/ceph-radosgw/unit_tests/test_hooks.py @@ -13,6 +13,7 @@ # limitations under the License. import base64 import json +import os from unittest.mock import ( patch, call, MagicMock, ANY ) @@ -338,6 +339,8 @@ def test_object_store_relation(self, _canonical_url): @patch.object(ceph_hooks, 'leader_get') @patch('charmhelpers.contrib.openstack.ip.service_name', lambda *args: 'ceph-radosgw') + @patch('charmhelpers.contrib.openstack.ip.resolve_address', + lambda *args: 'myserv') @patch('charmhelpers.contrib.openstack.ip.config') def test_identity_joined_early_version(self, _config, _leader_get): self.cmp_pkgrevno.return_value = -1 @@ -687,6 +690,7 @@ class MiscMultisiteTests(CharmTestCase): 'leader_get', 'is_leader', 'primary_relation_joined', + 'primary_relation_changed', 'secondary_relation_changed', 'service_restart', 'service_name', @@ -724,6 +728,12 @@ def test_leader_settings_changed(self): def test_process_multisite_relations(self): ceph_hooks.process_multisite_relations() self.primary_relation_joined.assert_called_once_with('primary:1') + self.assertEqual(self.primary_relation_changed.call_count, 2) + self.primary_relation_changed.assert_has_calls([ + call('primary:1', 'rgw/0'), + call('primary:1', 'rgw/1'), + ]) + self.assertEqual(self.secondary_relation_changed.call_count, 2) self.secondary_relation_changed.assert_has_calls([ call('secondary:1', 'rgw-s/0'), call('secondary:1', 'rgw-s/1'), @@ -889,6 +899,87 @@ def test_primary_relation_joined_not_leader(self): ) self.multisite.list_realms.assert_not_called() + def test_primary_relation_changed_sync_policy_state_unset(self): + self.is_leader.return_value = True + self.test_config.set('sync-policy-state', '') + + ceph_hooks.primary_relation_changed('primary:1') + + self.is_leader.assert_called_once() + self.ready_for_service.assert_called_once_with(legacy=False) + self.config.assert_called_once_with('sync-policy-state') + + def test_primary_relation_changed_sync_rel_data_incomplete(self): + self.is_leader.return_value = True + self.test_config.set('sync-policy-state', 'allowed') + self.relation_get.return_value = {'zone': 'secondary'} + + ceph_hooks.primary_relation_changed('primary:1', 'rgw/0') + + self.is_leader.assert_called_once() + self.ready_for_service.assert_called_once_with(legacy=False) + self.config.assert_called_once_with('sync-policy-state') + self.relation_get.assert_called_once_with(rid='primary:1', + unit='rgw/0') + + def test_primary_relation_changed(self): + self.is_leader.return_value = True + configs = { + 'sync-policy-state': 'allowed', + 'zonegroup': 'testzonegroup', + 'zone': 'zone_a', + } + for k, v in configs.items(): + self.test_config.set(k, v) + self.relation_get.return_value = { + 'zone': 'zone_b', + 'sync_policy_flow_type': 'symmetrical', + # this should force flow type to directional, and ignore the value + # from the relation data. + 'zone_tier_type': 'cloud', + } + self.multisite.is_sync_group_update_needed.return_value = True + group_test_data_file = os.path.join( + os.path.dirname(__file__), 'testdata', 'test_get_sync_group.json') + with open(group_test_data_file, 'r') as f: + self.multisite.get_sync_group.return_value = json.loads(f.read()) + + ceph_hooks.primary_relation_changed('primary:1', 'rgw/0') + + self.is_leader.assert_called_once() + self.ready_for_service.assert_called_once_with(legacy=False) + self.config.assert_has_calls([ + call('sync-policy-state'), + call('zonegroup'), + call('zone'), + ]) + self.relation_get.assert_called_once_with(rid='primary:1', + unit='rgw/0') + self.multisite.is_sync_group_update_needed.assert_called_once_with( + group_id=ceph_hooks.MULTISITE_DEFAULT_SYNC_GROUP_ID, + flow_id='zone_a-zone_b', + pipe_id='zone_a-zone_b', + source_zone='zone_a', + dest_zone='zone_b', + desired_status='allowed', + desired_flow_type=self.multisite.SYNC_FLOW_DIRECTIONAL) + self.multisite.create_sync_group.assert_called_once_with( + group_id=ceph_hooks.MULTISITE_DEFAULT_SYNC_GROUP_ID, + status='allowed') + self.multisite.create_sync_group_flow.assert_called_once_with( + group_id=ceph_hooks.MULTISITE_DEFAULT_SYNC_GROUP_ID, + flow_id='zone_a-zone_b', + flow_type=self.multisite.SYNC_FLOW_DIRECTIONAL, + source_zone='zone_a', dest_zone='zone_b') + self.multisite.create_sync_group_pipe.assert_called_once_with( + group_id=ceph_hooks.MULTISITE_DEFAULT_SYNC_GROUP_ID, + pipe_id='zone_a-zone_b', + source_zones=['zone_a'], dest_zones=['zone_b']) + self.multisite.update_period.assert_called_once_with( + zonegroup='testzonegroup', zone='zone_a') + self.service_restart.assert_called_once_with('rgw@hostname') + self.leader_set.assert_called_once_with(restart_nonce=ANY) + @patch.object(json, 'loads') def test_multisite_relation_departed(self, json_loads): for k, v in self._complete_config.items(): @@ -916,6 +1007,7 @@ class SecondaryMultisiteTests(CephRadosMultisiteTests): 'realm': 'testrealm', 'zonegroup': 'testzonegroup', 'zone': 'testzone2', + 'sync-policy-flow-type': 'symmetrical', } _test_relation = { @@ -978,6 +1070,16 @@ def test_secondary_relation_changed(self): ]) self.service_restart.assert_called_once() self.leader_set.assert_called_once_with(restart_nonce=ANY) + self.relation_set.assert_has_calls([ + call( + relation_id='secondary:1', + sync_policy_flow_type='symmetrical', + ), + call( + relation_id='secondary:1', + zone='testzone2', + ), + ]) def test_secondary_relation_changed_incomplete_relation(self): for k, v in self._complete_config.items(): @@ -986,6 +1088,7 @@ def test_secondary_relation_changed_incomplete_relation(self): self.relation_get.return_value = {} ceph_hooks.secondary_relation_changed('secondary:1', 'rgw/0') self.config.assert_not_called() + self.relation_set.assert_not_called() def test_secondary_relation_changed_mismatching_config(self): for k, v in self._complete_config.items(): @@ -999,11 +1102,13 @@ def test_secondary_relation_changed_mismatching_config(self): call('zone'), ]) self.multisite.list_realms.assert_not_called() + self.relation_set.assert_not_called() def test_secondary_relation_changed_not_leader(self): self.is_leader.return_value = False ceph_hooks.secondary_relation_changed('secondary:1', 'rgw/0') self.relation_get.assert_not_called() + self.relation_set.assert_not_called() @patch.object(ceph_hooks, 'apt_install') @patch.object(ceph_hooks, 'services') diff --git a/ceph-radosgw/unit_tests/test_multisite.py b/ceph-radosgw/unit_tests/test_multisite.py index 403935fa..afc7756f 100644 --- a/ceph-radosgw/unit_tests/test_multisite.py +++ b/ceph-radosgw/unit_tests/test_multisite.py @@ -484,3 +484,233 @@ def test_check_zone_has_buckets(self, mock_list_zonegroups, multisite.check_cluster_has_buckets(), True ) + + def test_get_zone_info(self): + multisite.get_zone_info('test_zone', 'test_zonegroup') + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'zone', 'get', + '--rgw-zone=test_zone', '--rgw-zonegroup=test_zonegroup', + ]) + + def test_sync_group_exists(self): + groups = [ + {'key': 'group1'}, + {'key': 'group2'}, + ] + self.subprocess.check_output.return_value = json.dumps(groups).encode() + self.assertTrue(multisite.sync_group_exists('group1')) + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'sync', 'group', 'get', + ]) + + def test_bucket_sync_group_exists(self): + with open(self._testdata('test_list_sync_groups'), 'rb') as f: + self.subprocess.check_output.return_value = f.read() + self.assertTrue(multisite.sync_group_exists('default', + bucket='test')) + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'sync', 'group', 'get', + '--bucket=test', + ]) + + def test_sync_group_does_not_exists(self): + with open(self._testdata('test_list_sync_groups'), 'rb') as f: + self.subprocess.check_output.return_value = f.read() + self.assertFalse(multisite.sync_group_exists('group-non-existent')) + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'sync', 'group', 'get', + ]) + + def test_get_sync_group(self): + with open(self._testdata(whoami()), 'rb') as f: + self.subprocess.check_output.return_value = f.read() + result = multisite.get_sync_group('default') + self.assertEqual(result['id'], 'default') + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'sync', 'group', 'get', + '--group-id=default', + ]) + + def test_create_sync_group(self): + test_group_json = json.dumps({"id": "default"}).encode() + self.subprocess.check_output.return_value = test_group_json + result = multisite.create_sync_group( + group_id='default', + status=multisite.SYNC_POLICY_ENABLED, + ) + self.assertEqual(result['id'], 'default') + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'sync', 'group', 'create', + '--group-id=default', + '--status={}'.format(multisite.SYNC_POLICY_ENABLED), + ]) + + def test_create_sync_group_wrong_status(self): + self.assertRaises( + multisite.UnknownSyncPolicyState, + multisite.create_sync_group, "default", "wrong_status", + ) + + def test_remove_sync_group(self): + multisite.remove_sync_group('default') + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'sync', 'group', 'remove', + '--group-id=default', + ]) + + @mock.patch.object(multisite, 'get_sync_group') + @mock.patch.object(multisite, 'sync_group_exists') + def test_is_sync_group_update_needed(self, mock_sync_group_exists, + mock_get_sync_group): + mock_sync_group_exists.return_value = True + with open(self._testdata('test_get_sync_group'), 'r') as f: + mock_get_sync_group.return_value = json.loads(f.read()) + + result = multisite.is_sync_group_update_needed( + group_id='default', + flow_id='zone_a-zone_b', + pipe_id='zone_a-zone_b', + source_zone='zone_a', + dest_zone='zone_b', + desired_status=multisite.SYNC_POLICY_ALLOWED, + desired_flow_type=multisite.SYNC_FLOW_SYMMETRICAL, + ) + + mock_sync_group_exists.assert_called_with('default') + mock_get_sync_group.assert_called_with('default') + self.assertFalse(result) + + def test_is_sync_group_flow_update_needed(self): + with open(self._testdata('test_get_sync_group'), 'r') as f: + sync_group = json.loads(f.read()) + result = multisite.is_sync_group_flow_update_needed( + sync_group, + flow_id='zone_a-zone_b', + source_zone='zone_a', dest_zone='zone_b', + desired_flow_type=multisite.SYNC_FLOW_SYMMETRICAL, + ) + self.assertFalse(result) + + @mock.patch.object(multisite, 'remove_sync_group_flow') + def test_is_sync_group_flow_update_needed_flow_type_change( + self, mock_remove_sync_group_flow): + with open(self._testdata('test_get_sync_group'), 'r') as f: + sync_group = json.loads(f.read()) + result = multisite.is_sync_group_flow_update_needed( + sync_group, + flow_id='zone_a-zone_b', + source_zone='zone_a', dest_zone='zone_b', + desired_flow_type=multisite.SYNC_FLOW_DIRECTIONAL, + ) + mock_remove_sync_group_flow.assert_called_with( + group_id='default', + flow_id='zone_a-zone_b', + flow_type=multisite.SYNC_FLOW_SYMMETRICAL, + source_zone='zone_a', dest_zone='zone_b', + ) + self.assertTrue(result) + + def test_create_sync_group_flow_symmetrical(self): + with open(self._testdata('test_create_sync_group_flow'), 'rb') as f: + self.subprocess.check_output.return_value = f.read() + result = multisite.create_sync_group_flow( + group_id='default', + flow_id='flow_id', + flow_type=multisite.SYNC_FLOW_SYMMETRICAL, + source_zone='zone_a', + dest_zone='zone_b', + ) + self.assertEqual(result['groups'][0]['id'], 'default') + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'sync', 'group', 'flow', 'create', + '--group-id=default', + '--flow-id=flow_id', + '--flow-type=symmetrical', + '--zones=zone_a,zone_b', + ]) + + def test_create_sync_group_flow_directional(self): + with open(self._testdata('test_create_sync_group_flow'), 'rb') as f: + self.subprocess.check_output.return_value = f.read() + result = multisite.create_sync_group_flow( + group_id='default', + flow_id='flow_id', + flow_type=multisite.SYNC_FLOW_DIRECTIONAL, + source_zone='zone_a', + dest_zone='zone_b', + ) + self.assertEqual(result['groups'][0]['id'], 'default') + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'sync', 'group', 'flow', 'create', + '--group-id=default', + '--flow-id=flow_id', + '--flow-type=directional', + '--source-zone=zone_a', '--dest-zone=zone_b', + ]) + + def test_create_sync_group_flow_wrong_type(self): + self.assertRaises( + multisite.UnknownSyncFlowType, + multisite.create_sync_group_flow, + group_id='default', flow_id='flow_id', flow_type='wrong_type', + source_zone='zone_a', dest_zone='zone_b', + ) + + def test_remove_sync_group_flow_symmetrical(self): + multisite.remove_sync_group_flow( + group_id='default', + flow_id='flow_id', + flow_type=multisite.SYNC_FLOW_SYMMETRICAL, + ) + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'sync', 'group', 'flow', 'remove', + '--group-id=default', + '--flow-id=flow_id', + '--flow-type=symmetrical', + ]) + + def test_remove_sync_group_flow_directional(self): + multisite.remove_sync_group_flow( + group_id='default', + flow_id='flow_id', + flow_type=multisite.SYNC_FLOW_DIRECTIONAL, + source_zone='zone_a', + dest_zone='zone_b', + ) + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'sync', 'group', 'flow', 'remove', + '--group-id=default', + '--flow-id=flow_id', + '--flow-type=directional', + '--source-zone=zone_a', '--dest-zone=zone_b', + ]) + + def test_create_sync_group_pipe(self): + with open(self._testdata(whoami()), 'rb') as f: + self.subprocess.check_output.return_value = f.read() + result = multisite.create_sync_group_pipe( + group_id='default', + pipe_id='pipe_id', + source_zones=['zone_a', 'zone_b'], + dest_zones=['zone_c', 'zone_d'], + ) + self.assertEqual(result['groups'][0]['id'], 'default') + self.subprocess.check_output.assert_called_with([ + 'radosgw-admin', '--id=rgw.testhost', + 'sync', 'group', 'pipe', 'create', + '--group-id=default', + '--pipe-id=pipe_id', + '--source-zones=zone_a,zone_b', '--source-bucket=*', + '--dest-zones=zone_c,zone_d', '--dest-bucket=*', + ]) diff --git a/ceph-radosgw/unit_tests/testdata/test_create_sync_group_flow.json b/ceph-radosgw/unit_tests/testdata/test_create_sync_group_flow.json new file mode 100644 index 00000000..363ecf60 --- /dev/null +++ b/ceph-radosgw/unit_tests/testdata/test_create_sync_group_flow.json @@ -0,0 +1,20 @@ +{ + "groups": [ + { + "id": "default", + "data_flow": { + "symmetrical": [ + { + "id": "zone_a-zone_b", + "zones": [ + "zone_a", + "zone_b" + ] + } + ] + }, + "pipes": [], + "status": "allowed" + } + ] +} diff --git a/ceph-radosgw/unit_tests/testdata/test_create_sync_group_pipe.json b/ceph-radosgw/unit_tests/testdata/test_create_sync_group_pipe.json new file mode 100644 index 00000000..6d2b6630 --- /dev/null +++ b/ceph-radosgw/unit_tests/testdata/test_create_sync_group_pipe.json @@ -0,0 +1,49 @@ +{ + "groups": [ + { + "id": "default", + "data_flow": { + "symmetrical": [ + { + "id": "zone_a-zone_b", + "zones": [ + "zone_a", + "zone_b" + ] + } + ] + }, + "pipes": [ + { + "id": "zone_a-zone_b", + "source": { + "bucket": "*", + "zones": [ + "zone_a", + "zone_b" + ] + }, + "dest": { + "bucket": "*", + "zones": [ + "zone_a", + "zone_b" + ] + }, + "params": { + "source": { + "filter": { + "tags": [] + } + }, + "dest": {}, + "priority": 0, + "mode": "system", + "user": "" + } + } + ], + "status": "allowed" + } + ] +} diff --git a/ceph-radosgw/unit_tests/testdata/test_get_sync_group.json b/ceph-radosgw/unit_tests/testdata/test_get_sync_group.json new file mode 100644 index 00000000..0a3f43d8 --- /dev/null +++ b/ceph-radosgw/unit_tests/testdata/test_get_sync_group.json @@ -0,0 +1,45 @@ +{ + "id": "default", + "data_flow": { + "symmetrical": [ + { + "id": "zone_a-zone_b", + "zones": [ + "zone_a", + "zone_b" + ] + } + ] + }, + "pipes": [ + { + "id": "zone_a-zone_b", + "source": { + "bucket": "*", + "zones": [ + "zone_a", + "zone_b" + ] + }, + "dest": { + "bucket": "*", + "zones": [ + "zone_a", + "zone_b" + ] + }, + "params": { + "source": { + "filter": { + "tags": [] + } + }, + "dest": {}, + "priority": 0, + "mode": "system", + "user": "" + } + } + ], + "status": "allowed" +} diff --git a/ceph-radosgw/unit_tests/testdata/test_list_sync_groups.json b/ceph-radosgw/unit_tests/testdata/test_list_sync_groups.json new file mode 100644 index 00000000..b80c2999 --- /dev/null +++ b/ceph-radosgw/unit_tests/testdata/test_list_sync_groups.json @@ -0,0 +1,45 @@ +[ + { + "key": "default", + "val": { + "id": "default", + "data_flow": { + "directional": [ + { + "source_zone": "zone_a", + "dest_zone": "zone_b" + } + ] + }, + "pipes": [ + { + "id": "zone_a-zone_b", + "source": { + "bucket": "*", + "zones": [ + "zone_a" + ] + }, + "dest": { + "bucket": "*", + "zones": [ + "zone_b" + ] + }, + "params": { + "source": { + "filter": { + "tags": [] + } + }, + "dest": {}, + "priority": 0, + "mode": "system", + "user": "" + } + } + ], + "status": "allowed" + } + } +] From 27bb09de9e62a82313f65b5cb919e9079b3fe2aa Mon Sep 17 00:00:00 2001 From: Utkarsh Bhatt Date: Thu, 2 May 2024 16:30:06 +0530 Subject: [PATCH 2612/2699] Caracal-Squid Enablement Change-Id: Ib5d16975cf4729a21ee91f7cb3d5f2bcbb522530 Signed-off-by: Utkarsh Bhatt --- ceph-osd/charmcraft.yaml | 6 - .../hooks/charmhelpers/contrib/network/ip.py | 48 +++- .../charmhelpers/contrib/openstack/context.py | 15 ++ .../charmhelpers/contrib/openstack/utils.py | 24 +- ceph-osd/lib/charms_ceph/broker.py | 70 +++++ ceph-osd/lib/charms_ceph/utils.py | 2 + ceph-osd/metadata.yaml | 2 - ceph-osd/tests/bundles/jammy-bobcat.yaml | 18 +- ...{mantic-bobcat.yaml => jammy-caracal.yaml} | 30 +-- ceph-osd/tests/bundles/lunar-antelope.yaml | 247 ------------------ ceph-osd/tests/tests.yaml | 2 +- 11 files changed, 166 insertions(+), 298 deletions(-) rename ceph-osd/tests/bundles/{mantic-bobcat.yaml => jammy-caracal.yaml} (88%) delete mode 100644 ceph-osd/tests/bundles/lunar-antelope.yaml diff --git a/ceph-osd/charmcraft.yaml b/ceph-osd/charmcraft.yaml index 777dc20f..4190b63d 100644 --- a/ceph-osd/charmcraft.yaml +++ b/ceph-osd/charmcraft.yaml @@ -33,9 +33,3 @@ bases: - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.10" - architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py index cf9926b9..f3b4864f 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-osd/hooks/charmhelpers/contrib/network/ip.py @@ -16,6 +16,7 @@ import re import subprocess import socket +import ssl from functools import partial @@ -527,19 +528,56 @@ def get_hostname(address, fqdn=True): return result.split('.')[0] -def port_has_listener(address, port): +class SSLPortCheckInfo(object): + + def __init__(self, key, cert, ca_cert, check_hostname=False): + self.key = key + self.cert = cert + self.ca_cert = ca_cert + # NOTE: by default we do not check hostname since the port check is + # typically performed using 0.0.0.0 which will not match the + # certificate. Hence the default for this is False. + self.check_hostname = check_hostname + + @property + def ssl_context(self): + context = ssl.create_default_context() + context.check_hostname = self.check_hostname + context.load_cert_chain(self.cert, self.key) + context.load_verify_locations(self.ca_cert) + return context + + +def port_has_listener(address, port, sslinfo=None): """ Returns True if the address:port is open and being listened to, - else False. + else False. By default uses netcat to check ports but if sslinfo is + provided will use an SSL connection instead. @param address: an IP address or hostname @param port: integer port + @param sslinfo: optional SSLPortCheckInfo object. + If provided, the check is performed using an ssl + connection. Note calls 'zc' via a subprocess shell """ - cmd = ['nc', '-z', address, str(port)] - result = subprocess.call(cmd) - return not (bool(result)) + if not sslinfo: + cmd = ['nc', '-z', address, str(port)] + result = subprocess.call(cmd) + return not (bool(result)) + + try: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock: + ssock = sslinfo.ssl_context.wrap_socket(sock, + server_hostname=address) + ssock.connect((address, port)) + # this bit is crucial to ensure tls close_notify is sent + ssock.unwrap() + + return True + except ConnectionRefusedError: + return False def assert_charm_supports_ipv6(): diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py index 1e667fb0..cd70b55c 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/context.py @@ -202,6 +202,21 @@ def get_related(self): return self.related +class KeystoneAuditMiddleware(OSContextGenerator): + def __init__(self, service: str) -> None: + self.service_name = service + + def __call__(self): + """Return context dictionary containing configuration status of + audit-middleware and the charm service name. + """ + ctxt = { + 'audit_middleware': config('audit-middleware') or False, + 'service_name': self.service_name + } + return ctxt + + class SharedDBContext(OSContextGenerator): interfaces = ['shared-db'] diff --git a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py index da711c65..82c28d8e 100644 --- a/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-osd/hooks/charmhelpers/contrib/openstack/utils.py @@ -1207,12 +1207,14 @@ def _ows_check_services_running(services, ports): return ows_check_services_running(services, ports) -def ows_check_services_running(services, ports): +def ows_check_services_running(services, ports, ssl_check_info=None): """Check that the services that should be running are actually running and that any ports specified are being listened to. @param services: list of strings OR dictionary specifying services/ports @param ports: list of ports + @param ssl_check_info: SSLPortCheckInfo object. If provided, port checks + will be done using an SSL connection. @returns state, message: strings or None, None """ messages = [] @@ -1228,7 +1230,7 @@ def ows_check_services_running(services, ports): # also verify that the ports that should be open are open # NB, that ServiceManager objects only OPTIONALLY have ports map_not_open, ports_open = ( - _check_listening_on_services_ports(services)) + _check_listening_on_services_ports(services, ssl_check_info)) if not all(ports_open): # find which service has missing ports. They are in service # order which makes it a bit easier. @@ -1243,7 +1245,8 @@ def ows_check_services_running(services, ports): if ports is not None: # and we can also check ports which we don't know the service for - ports_open, ports_open_bools = _check_listening_on_ports_list(ports) + ports_open, ports_open_bools = \ + _check_listening_on_ports_list(ports, ssl_check_info) if not all(ports_open_bools): messages.append( "Ports which should be open, but are not: {}" @@ -1302,7 +1305,8 @@ def _check_running_services(services): return list(zip(services, services_running)), services_running -def _check_listening_on_services_ports(services, test=False): +def _check_listening_on_services_ports(services, test=False, + ssl_check_info=None): """Check that the unit is actually listening (has the port open) on the ports that the service specifies are open. If test is True then the function returns the services with ports that are open rather than @@ -1312,11 +1316,14 @@ def _check_listening_on_services_ports(services, test=False): @param services: OrderedDict(service: [port, ...], ...) @param test: default=False, if False, test for closed, otherwise open. + @param ssl_check_info: SSLPortCheckInfo object. If provided, port checks + will be done using an SSL connection. @returns OrderedDict(service: [port-not-open, ...]...), [boolean] """ test = not (not (test)) # ensure test is True or False all_ports = list(itertools.chain(*services.values())) - ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] + ports_states = [port_has_listener('0.0.0.0', p, ssl_check_info) + for p in all_ports] map_ports = OrderedDict() matched_ports = [p for p, opened in zip(all_ports, ports_states) if opened == test] # essentially opened xor test @@ -1327,16 +1334,19 @@ def _check_listening_on_services_ports(services, test=False): return map_ports, ports_states -def _check_listening_on_ports_list(ports): +def _check_listening_on_ports_list(ports, ssl_check_info=None): """Check that the ports list given are being listened to Returns a list of ports being listened to and a list of the booleans. + @param ssl_check_info: SSLPortCheckInfo object. If provided, port checks + will be done using an SSL connection. @param ports: LIST of port numbers. @returns [(port_num, boolean), ...], [boolean] """ - ports_open = [port_has_listener('0.0.0.0', p) for p in ports] + ports_open = [port_has_listener('0.0.0.0', p, ssl_check_info) + for p in ports] return zip(ports, ports_open), ports_open diff --git a/ceph-osd/lib/charms_ceph/broker.py b/ceph-osd/lib/charms_ceph/broker.py index 71f85f45..7f453ec8 100644 --- a/ceph-osd/lib/charms_ceph/broker.py +++ b/ceph-osd/lib/charms_ceph/broker.py @@ -106,6 +106,8 @@ def decode_req_encode_rsp(f): """Decorator to decode incoming requests and encode responses.""" def decode_inner(req): + if isinstance(req, bytes): + req = req.decode('utf-8') return json.dumps(f(json.loads(req))) return decode_inner @@ -833,6 +835,72 @@ def handle_rgw_region_set(request, service): os.unlink(infile.name) +def handle_create_cephfs_client(request, service): + """Creates a new CephFS client for a filesystem. + + :param request: The broker request + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0. + """ + fs_name = request.get('fs_name') + client_id = request.get('client_id') + # TODO: fs allows setting write permissions for a list of paths. + path = request.get('path') + perms = request.get('perms') + # Need all parameters + if not fs_name or not client_id or not path or not perms: + msg = "Missing fs_name, client_id, path or perms params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Check that the provided fs_name exists + if fs_name not in get_cephfs(service=service): + msg = ("Ceph filesystem {} does not exist." + + "Cannot authorize client").format( + fs_name) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Check that the provided client does NOT exist. + try: + cmd = ["ceph", "--id", service, "auth", "ls", "-f", "json"] + auth_ls = json.loads(check_output(cmd, encoding="utf-8")) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + except ValueError as err: + log(str(err), level=ERROR) + return {'exit-code': 1, 'stderr': str(err)} + + client = "client.{}".format(client_id) + if client in (elem["entity"] for elem in auth_ls["auth_dump"]): + msg = "Client {} already exists".format(client) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Try to authorize the client + try: + cmd = [ + "ceph", + "--id", service, + "fs", "authorize", + fs_name, + client, + path, + perms, + "-f", "json" + ] + fs_auth = json.loads(check_output(cmd, encoding="utf-8")) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + except ValueError as err: + log(str(err), level=ERROR) + return {'exit-code': 1, 'stderr': str(err)} + + return {'exit-code': 0, 'key': fs_auth[0]["key"]} + + def process_requests_v1(reqs): """Process v1 requests. @@ -902,6 +970,8 @@ def process_requests_v1(reqs): ret = handle_add_permissions_to_key(request=req, service=svc) elif op == 'set-key-permissions': ret = handle_set_key_permissions(request=req, service=svc) + elif op == "create-cephfs-client": + ret = handle_create_cephfs_client(request=req, service=svc) else: msg = "Unknown operation '{}'".format(op) log(msg, level=ERROR) diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 756dd9f1..57cb1d7b 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -3172,6 +3172,7 @@ def dirs_need_ownership_update(service): ('octopus', 'pacific'), ('pacific', 'quincy'), ('quincy', 'reef'), + ('reef', 'squid'), ]) # Map UCA codenames to Ceph codenames @@ -3196,6 +3197,7 @@ def dirs_need_ownership_update(service): 'zed': 'quincy', 'antelope': 'quincy', 'bobcat': 'reef', + 'caracal': 'squid', } diff --git a/ceph-osd/metadata.yaml b/ceph-osd/metadata.yaml index 6a3d801e..379f88d1 100644 --- a/ceph-osd/metadata.yaml +++ b/ceph-osd/metadata.yaml @@ -13,8 +13,6 @@ tags: series: - focal - jammy -- lunar -- mantic description: | Ceph is a distributed storage and network file system designed to provide excellent performance, reliability, and scalability. diff --git a/ceph-osd/tests/bundles/jammy-bobcat.yaml b/ceph-osd/tests/bundles/jammy-bobcat.yaml index ac468730..84bc99de 100644 --- a/ceph-osd/tests/bundles/jammy-bobcat.yaml +++ b/ceph-osd/tests/bundles/jammy-bobcat.yaml @@ -55,7 +55,7 @@ applications: ceph-osd: charm: ch:ceph-osd - channel: latest/edge + channel: reef/edge num_units: 3 storage: osd-devices: 'cinder,10G,2' @@ -78,7 +78,7 @@ applications: - '6' - '7' - '8' - channel: latest/edge + channel: reef/edge rabbitmq-server: charm: ch:rabbitmq-server @@ -95,7 +95,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: latest/edge + channel: 2023.2/edge nova-compute: charm: ch:nova-compute @@ -104,7 +104,7 @@ applications: openstack-origin: *openstack-origin to: - '11' - channel: latest/edge + channel: 2023.2/edge glance: expose: True @@ -114,7 +114,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: latest/edge + channel: 2023.2/edge cinder: expose: True @@ -126,11 +126,11 @@ applications: glance-api-version: '2' to: - '13' - channel: latest/edge + channel: 2023.2/edge cinder-ceph: charm: ch:cinder-ceph - channel: latest/edge + channel: 2023.2/edge nova-cloud-controller: expose: True @@ -140,7 +140,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: latest/edge + channel: 2023.2/edge placement: charm: ch:placement @@ -149,7 +149,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: latest/edge + channel: 2023.2/edge relations: - - 'nova-compute:amqp' diff --git a/ceph-osd/tests/bundles/mantic-bobcat.yaml b/ceph-osd/tests/bundles/jammy-caracal.yaml similarity index 88% rename from ceph-osd/tests/bundles/mantic-bobcat.yaml rename to ceph-osd/tests/bundles/jammy-caracal.yaml index 9c2e41c6..b9fbbeaf 100644 --- a/ceph-osd/tests/bundles/mantic-bobcat.yaml +++ b/ceph-osd/tests/bundles/jammy-caracal.yaml @@ -1,22 +1,17 @@ variables: - openstack-origin: &openstack-origin distro - # use infra (mysql, rabbit) from lts for stability - infra-series: &infra-series jammy + openstack-origin: &openstack-origin cloud:jammy-caracal -series: mantic +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' machines: '0': constraints: mem=3072M - series: *infra-series '1': constraints: mem=3072M - series: *infra-series '2': constraints: mem=3072M - series: *infra-series '3': '4': '5': @@ -24,37 +19,30 @@ machines: '7': '8': '9': - series: *infra-series '10': - series: *infra-series '11': - series: *infra-series '12': - series: *infra-series '13': - series: *infra-series '14': - series: *infra-series '15': - series: *infra-series applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge placement-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -63,7 +51,7 @@ applications: - '0' - '1' - '2' - channel: latest/edge + channel: 8.0/edge ceph-osd: charm: ch:ceph-osd @@ -97,7 +85,7 @@ applications: num_units: 1 to: - '9' - channel: latest/edge + channel: 3.9/edge keystone: expose: True diff --git a/ceph-osd/tests/bundles/lunar-antelope.yaml b/ceph-osd/tests/bundles/lunar-antelope.yaml deleted file mode 100644 index 8d04e764..00000000 --- a/ceph-osd/tests/bundles/lunar-antelope.yaml +++ /dev/null @@ -1,247 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - # use infra (mysql, rabbit) from lts for stability - infra-series: &infra-series jammy - -series: lunar - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - series: *infra-series - '1': - constraints: mem=3072M - series: *infra-series - '2': - constraints: mem=3072M - series: *infra-series - '3': - '4': - '5': - '6': - '7': - '8': - '9': - series: *infra-series - '10': - series: *infra-series - '11': - series: *infra-series - '12': - series: *infra-series - '13': - series: *infra-series - '14': - series: *infra-series - '15': - series: *infra-series - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - glance-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - placement-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '0' - - '1' - - '2' - channel: 8.0/edge - - ceph-osd: - charm: ch:ceph-osd - channel: quincy/edge - num_units: 3 - storage: - osd-devices: 'cinder,10G,2' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - aa-profile-mode: enforce - to: - - '3' - - '4' - - '5' - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - to: - - '6' - - '7' - - '8' - channel: quincy/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - to: - - '9' - channel: 3.9/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - channel: 2023.1/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '11' - channel: 2023.1/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - channel: 2023.1/edge - - cinder: - expose: True - charm: ch:cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: 'None' - glance-api-version: '2' - to: - - '13' - channel: 2023.1/edge - - cinder-ceph: - charm: ch:cinder-ceph - channel: 2023.1/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - channel: 2023.1/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: 2023.1/edge - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'cinder-ceph:ceph-access' - - 'nova-compute:ceph-access' diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 6701ea2e..7a5802ab 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -5,7 +5,7 @@ gate_bundles: - focal-yoga - jammy-yoga - jammy-bobcat - - mantic-bobcat + - jammy-caracal smoke_bundles: - jammy-antelope From 7d84e737d0b2703210f50788dc49b8793f873c8b Mon Sep 17 00:00:00 2001 From: Utkarsh Bhatt Date: Thu, 2 May 2024 16:50:20 +0530 Subject: [PATCH 2613/2699] Caracal-Squid Enablement Change-Id: If6ac4379eaa222f0243d78caf47acaaf2b95f0f2 Signed-off-by: Utkarsh Bhatt --- ceph-rbd-mirror/charmcraft.yaml | 7 - ceph-rbd-mirror/osci.yaml | 2 +- ceph-rbd-mirror/src/metadata.yaml | 2 - .../src/tests/bundles/jammy-bobcat.yaml | 18 +- ...{mantic-bobcat.yaml => jammy-caracal.yaml} | 90 +------ .../src/tests/bundles/lunar-antelope.yaml | 246 ------------------ ceph-rbd-mirror/src/tests/tests.yaml | 3 +- 7 files changed, 25 insertions(+), 343 deletions(-) rename ceph-rbd-mirror/src/tests/bundles/{mantic-bobcat.yaml => jammy-caracal.yaml} (74%) delete mode 100644 ceph-rbd-mirror/src/tests/bundles/lunar-antelope.yaml diff --git a/ceph-rbd-mirror/charmcraft.yaml b/ceph-rbd-mirror/charmcraft.yaml index 8e9445e5..366bcbe0 100644 --- a/ceph-rbd-mirror/charmcraft.yaml +++ b/ceph-rbd-mirror/charmcraft.yaml @@ -27,10 +27,3 @@ bases: - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.10" - architectures: [amd64, s390x, ppc64el, arm64] - diff --git a/ceph-rbd-mirror/osci.yaml b/ceph-rbd-mirror/osci.yaml index 19eaacf0..a2050c2b 100644 --- a/ceph-rbd-mirror/osci.yaml +++ b/ceph-rbd-mirror/osci.yaml @@ -7,4 +7,4 @@ needs_charm_build: true charm_build_name: ceph-rbd-mirror build_type: charmcraft - charmcraft_channel: 2.1/stable + charmcraft_channel: 2.x/stable diff --git a/ceph-rbd-mirror/src/metadata.yaml b/ceph-rbd-mirror/src/metadata.yaml index f2991fb0..1ed8ab6f 100644 --- a/ceph-rbd-mirror/src/metadata.yaml +++ b/ceph-rbd-mirror/src/metadata.yaml @@ -18,8 +18,6 @@ tags: series: - focal - jammy -- lunar -- mantic extra-bindings: public: cluster: diff --git a/ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml b/ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml index 3bae7f3d..a6a37e0b 100644 --- a/ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/jammy-bobcat.yaml @@ -40,7 +40,7 @@ applications: num_units: 1 options: openstack-origin: *openstack-origin - channel: latest/edge + channel: 2023.2/edge rabbitmq-server: charm: ch:rabbitmq-server @@ -54,26 +54,26 @@ applications: block-device: None glance-api-version: 2 openstack-origin: *openstack-origin - channel: latest/edge + channel: 2023.2/edge cinder-ceph: charm: ch:cinder-ceph num_units: 0 - channel: latest/edge + channel: 2023.2/edge glance: charm: ch:glance num_units: 1 options: openstack-origin: *openstack-origin - channel: latest/edge + channel: 2023.2/edge nova-compute: charm: ch:nova-compute num_units: 1 options: openstack-origin: *openstack-origin - channel: latest/edge + channel: 2023.2/edge ceph-mon: charm: ch:ceph-mon @@ -81,7 +81,7 @@ applications: options: expected-osd-count: 3 source: *openstack-origin - channel: latest/edge + channel: reef/edge ceph-osd: charm: ch:ceph-osd @@ -91,7 +91,7 @@ applications: options: source: *openstack-origin osd-devices: '/dev/test-non-existent' - channel: latest/edge + channel: reef/edge ceph-rbd-mirror: series: *series @@ -106,7 +106,7 @@ applications: options: expected-osd-count: 3 source: *openstack-origin - channel: latest/edge + channel: reef/edge ceph-osd-b: charm: ch:ceph-osd @@ -116,7 +116,7 @@ applications: options: source: *openstack-origin osd-devices: '/dev/test-non-existent' - channel: latest/edge + channel: reef/edge ceph-rbd-mirror-b: series: *series diff --git a/ceph-rbd-mirror/src/tests/bundles/mantic-bobcat.yaml b/ceph-rbd-mirror/src/tests/bundles/jammy-caracal.yaml similarity index 74% rename from ceph-rbd-mirror/src/tests/bundles/mantic-bobcat.yaml rename to ceph-rbd-mirror/src/tests/bundles/jammy-caracal.yaml index 672966fa..67f89686 100644 --- a/ceph-rbd-mirror/src/tests/bundles/mantic-bobcat.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/jammy-caracal.yaml @@ -1,44 +1,18 @@ variables: - openstack-origin: &openstack-origin distro - series: &series mantic - infra-series: &infra-series jammy + openstack-origin: &openstack-origin cloud:jammy-caracal + series: &series jammy local_overlay_enabled: False series: *series -comment: -- 'machines section to decide order of deployment. database sooner = faster' machines: '0': - constraints: mem=3072M - series: *infra-series + constraints: "mem=3072M" '1': - constraints: mem=3072M - series: *infra-series + constraints: "mem=3072M" '2': - constraints: mem=3072M - series: *infra-series - '3': - series: *infra-series - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': - '19': - '20': - '21': + constraints: "mem=3072M" applications: @@ -55,31 +29,22 @@ applications: mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - # Reduce chances of spurious "super-read-only" failures, see lp:1882205 - expel-timeout: 20 to: - '0' - '1' - '2' - channel: 8.0.19/edge - series: *infra-series - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - to: - - '3' - channel: 3.9/edge - series: *infra-series + channel: latest/edge keystone: charm: ch:keystone num_units: 1 options: openstack-origin: *openstack-origin - to: - - '4' + channel: latest/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 channel: latest/edge cinder: @@ -88,8 +53,7 @@ applications: options: block-device: None glance-api-version: 2 - to: - - '5' + openstack-origin: *openstack-origin channel: latest/edge cinder-ceph: @@ -102,8 +66,6 @@ applications: num_units: 1 options: openstack-origin: *openstack-origin - to: - - '6' channel: latest/edge nova-compute: @@ -111,21 +73,14 @@ applications: num_units: 1 options: openstack-origin: *openstack-origin - to: - - '7' channel: latest/edge ceph-mon: charm: ch:ceph-mon num_units: 3 options: - source: *openstack-origin - monitor-count: '3' expected-osd-count: 3 - to: - - '8' - - '9' - - '10' + source: *openstack-origin channel: latest/edge ceph-osd: @@ -136,10 +91,6 @@ applications: options: source: *openstack-origin osd-devices: '/dev/test-non-existent' - to: - - '11' - - '12' - - '13' channel: latest/edge ceph-rbd-mirror: @@ -148,20 +99,13 @@ applications: num_units: 1 options: source: *openstack-origin - to: - - '14' ceph-mon-b: charm: ch:ceph-mon num_units: 3 options: - source: *openstack-origin - monitor-count: '3' expected-osd-count: 3 - to: - - '15' - - '16' - - '17' + source: *openstack-origin channel: latest/edge ceph-osd-b: @@ -172,10 +116,6 @@ applications: options: source: *openstack-origin osd-devices: '/dev/test-non-existent' - to: - - '18' - - '19' - - '20' channel: latest/edge ceph-rbd-mirror-b: @@ -184,8 +124,6 @@ applications: num_units: 1 options: source: *openstack-origin - to: - - '21' relations: diff --git a/ceph-rbd-mirror/src/tests/bundles/lunar-antelope.yaml b/ceph-rbd-mirror/src/tests/bundles/lunar-antelope.yaml deleted file mode 100644 index cd0d8d71..00000000 --- a/ceph-rbd-mirror/src/tests/bundles/lunar-antelope.yaml +++ /dev/null @@ -1,246 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - series: &series lunar - infra-series: &infra-series jammy - -local_overlay_enabled: False - -series: *series - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - series: *infra-series - '1': - constraints: mem=3072M - series: *infra-series - '2': - constraints: mem=3072M - series: *infra-series - '3': - series: *infra-series - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': - '19': - '20': - '21': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - # Reduce chances of spurious "super-read-only" failures, see lp:1882205 - expel-timeout: 20 - to: - - '0' - - '1' - - '2' - channel: 8.0.19/edge - series: *infra-series - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - to: - - '3' - channel: 3.9/edge - series: *infra-series - - keystone: - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '4' - channel: 2023.1/edge - - cinder: - charm: ch:cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - to: - - '5' - channel: 2023.1/edge - - cinder-ceph: - charm: ch:cinder-ceph - num_units: 0 - channel: 2023.1/edge - - glance: - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '6' - channel: 2023.1/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '7' - channel: 2023.1/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - expected-osd-count: 3 - to: - - '8' - - '9' - - '10' - channel: quincy/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: '/dev/test-non-existent' - to: - - '11' - - '12' - - '13' - channel: quincy/edge - - ceph-rbd-mirror: - series: *series - charm: ../../../ceph-rbd-mirror.charm - num_units: 1 - options: - source: *openstack-origin - to: - - '14' - - ceph-mon-b: - charm: ch:ceph-mon - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - expected-osd-count: 3 - to: - - '15' - - '16' - - '17' - channel: quincy/edge - - ceph-osd-b: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: 'cinder,10G' - options: - source: *openstack-origin - bluestore: False - use-direct-io: False - osd-devices: '/dev/test-non-existent' - to: - - '18' - - '19' - - '20' - channel: quincy/edge - - ceph-rbd-mirror-b: - series: *series - charm: ../../../ceph-rbd-mirror.charm - num_units: 1 - options: - source: *openstack-origin - to: - - '21' - -relations: - -- - keystone:shared-db - - keystone-mysql-router:shared-db -- - keystone-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - glance:shared-db - - glance-mysql-router:shared-db -- - glance-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - cinder:shared-db - - cinder-mysql-router:shared-db -- - cinder-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - rabbitmq-server - - cinder - -- - 'keystone:identity-service' - - cinder -- - 'keystone:identity-service' - - glance - -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client - -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp - -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon - -- - ceph-mon:osd - - ceph-osd:mon -- - ceph-mon - - ceph-rbd-mirror:ceph-local -- - ceph-mon - - ceph-rbd-mirror-b:ceph-remote - -- - ceph-mon-b:osd - - ceph-osd-b:mon -- - ceph-mon-b - - ceph-rbd-mirror-b:ceph-local -- - ceph-mon-b - - ceph-rbd-mirror:ceph-remote diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index 7b040009..62b39dbb 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -3,7 +3,6 @@ smoke_bundles: - jammy-antelope gate_bundles: - jammy-bobcat -- mantic-bobcat comment: | The e2e bundles are useful for development but adds no additional value to the functional tests. @@ -20,4 +19,4 @@ tests: - zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorDisasterFailoverTest tests_options: force_deploy: - - mantic-bobcat + - jammy-caracal \ No newline at end of file From 515ef67bad499bb4715ce5bad1a0c32f46898e0b Mon Sep 17 00:00:00 2001 From: Utkarsh Bhatt Date: Thu, 2 May 2024 17:14:53 +0530 Subject: [PATCH 2614/2699] Caracal-Squid Enablement Signed-off-by: Utkarsh Bhatt Change-Id: Iac40c02356ca4f5d453a149cd7ae83bef754b273 --- ceph-mon/charmcraft.yaml | 6 - ceph-mon/metadata.yaml | 4 +- ceph-mon/tests/bundles/jammy-bobcat.yaml | 20 +- ...lunar-antelope.yaml => jammy-caracal.yaml} | 56 ++-- ceph-mon/tests/bundles/mantic-bobcat.yaml | 267 ------------------ ceph-mon/tests/tests.yaml | 2 +- 6 files changed, 37 insertions(+), 318 deletions(-) rename ceph-mon/tests/bundles/{lunar-antelope.yaml => jammy-caracal.yaml} (88%) delete mode 100644 ceph-mon/tests/bundles/mantic-bobcat.yaml diff --git a/ceph-mon/charmcraft.yaml b/ceph-mon/charmcraft.yaml index b0562382..a58f4124 100644 --- a/ceph-mon/charmcraft.yaml +++ b/ceph-mon/charmcraft.yaml @@ -36,9 +36,3 @@ bases: - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.10" - architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 85e088fb..9a02b26e 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -13,8 +13,6 @@ tags: series: - focal - jammy -- lunar -- mantic peers: mon: interface: ceph @@ -50,4 +48,4 @@ resources: alert-rules: type: file filename: alert.yaml.rules - description: "Alerting rules" + description: Alerting rules diff --git a/ceph-mon/tests/bundles/jammy-bobcat.yaml b/ceph-mon/tests/bundles/jammy-bobcat.yaml index 2227b406..b9c1033f 100644 --- a/ceph-mon/tests/bundles/jammy-bobcat.yaml +++ b/ceph-mon/tests/bundles/jammy-bobcat.yaml @@ -77,11 +77,11 @@ applications: - '3' - '4' - '5' - channel: latest/edge + channel: reef/edge ceph-mon: charm: ch:ceph-mon - channel: latest/edge + channel: reef/edge num_units: 3 options: source: *openstack-origin @@ -96,7 +96,7 @@ applications: num_units: 1 options: source: *openstack-origin - channel: latest/edge + channel: reef/edge to: - '17' @@ -108,7 +108,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: latest/edge + channel: 2023.2/edge nova-compute: charm: ch:nova-compute @@ -118,7 +118,7 @@ applications: libvirt-image-backend: rbd to: - '11' - channel: latest/edge + channel: 2023.2/edge glance: expose: True @@ -128,7 +128,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: latest/edge + channel: 2023.2/edge cinder: expose: True @@ -140,11 +140,11 @@ applications: openstack-origin: *openstack-origin to: - '13' - channel: latest/edge + channel: 2023.2/edge cinder-ceph: charm: ch:cinder-ceph - channel: latest/edge + channel: 2023.2/edge nova-cloud-controller: expose: True @@ -154,7 +154,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: latest/edge + channel: 2023.2/edge placement: charm: ch:placement @@ -163,7 +163,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: latest/edge + channel: 2023.2/edge prometheus2: charm: ch:prometheus2 diff --git a/ceph-mon/tests/bundles/lunar-antelope.yaml b/ceph-mon/tests/bundles/jammy-caracal.yaml similarity index 88% rename from ceph-mon/tests/bundles/lunar-antelope.yaml rename to ceph-mon/tests/bundles/jammy-caracal.yaml index 134abb9e..e6e587c0 100644 --- a/ceph-mon/tests/bundles/lunar-antelope.yaml +++ b/ceph-mon/tests/bundles/jammy-caracal.yaml @@ -1,24 +1,20 @@ variables: - openstack-origin: &openstack-origin distro - # use infra (mysql, rabbit) from lts for stability - infra-series: &infra-series jammy + openstack-origin: &openstack-origin cloud:jammy-caracal -series: lunar +local_overlay_enabled: False + +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' machines: '0': constraints: mem=3072M - series: *infra-series '1': constraints: mem=3072M - series: *infra-series '2': constraints: mem=3072M - series: *infra-series '3': - series: *infra-series '4': '5': '6': @@ -33,7 +29,6 @@ machines: '15': '16': '17': - series: focal applications: @@ -67,7 +62,7 @@ applications: charm: ch:rabbitmq-server num_units: 1 to: - - '3' + - '9' channel: 3.9/edge ceph-osd: @@ -79,31 +74,31 @@ applications: source: *openstack-origin osd-devices: '/dev/test-non-existent' to: + - '3' - '4' - '5' - - '6' - channel: quincy/edge + channel: latest/edge ceph-mon: charm: ch:ceph-mon - channel: quincy/edge + channel: latest/edge num_units: 3 options: source: *openstack-origin monitor-count: '3' to: + - '6' - '7' - '8' - - '9' ceph-fs: charm: ch:ceph-fs num_units: 1 options: source: *openstack-origin - channel: quincy/edge + channel: latest/edge to: - - '10' + - '17' keystone: expose: True @@ -112,8 +107,8 @@ applications: options: openstack-origin: *openstack-origin to: - - '11' - channel: 2023.1/edge + - '10' + channel: latest/edge nova-compute: charm: ch:nova-compute @@ -122,8 +117,8 @@ applications: openstack-origin: *openstack-origin libvirt-image-backend: rbd to: - - '12' - channel: 2023.1/edge + - '11' + channel: latest/edge glance: expose: True @@ -132,8 +127,8 @@ applications: options: openstack-origin: *openstack-origin to: - - '13' - channel: 2023.1/edge + - '12' + channel: latest/edge cinder: expose: True @@ -144,12 +139,12 @@ applications: glance-api-version: '2' openstack-origin: *openstack-origin to: - - '14' - channel: 2023.1/edge + - '13' + channel: latest/edge cinder-ceph: charm: ch:cinder-ceph - channel: 2023.1/edge + channel: latest/edge nova-cloud-controller: expose: True @@ -158,8 +153,8 @@ applications: options: openstack-origin: *openstack-origin to: - - '15' - channel: 2023.1/edge + - '14' + channel: latest/edge placement: charm: ch:placement @@ -167,15 +162,14 @@ applications: options: openstack-origin: *openstack-origin to: - - '16' - channel: 2023.1/edge + - '15' + channel: latest/edge prometheus2: charm: ch:prometheus2 num_units: 1 - series: focal to: - - '17' + - '16' relations: - - 'nova-compute:amqp' diff --git a/ceph-mon/tests/bundles/mantic-bobcat.yaml b/ceph-mon/tests/bundles/mantic-bobcat.yaml deleted file mode 100644 index 4fe93205..00000000 --- a/ceph-mon/tests/bundles/mantic-bobcat.yaml +++ /dev/null @@ -1,267 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - # use infra (mysql, rabbit) from lts for stability - infra-series: &infra-series jammy - -series: mantic - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - series: *infra-series - '1': - constraints: mem=3072M - series: *infra-series - '2': - constraints: mem=3072M - series: *infra-series - '3': - series: *infra-series - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - series: focal - - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - glance-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - placement-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '0' - - '1' - - '2' - channel: 8.0/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - to: - - '3' - channel: 3.9/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - source: *openstack-origin - osd-devices: '/dev/test-non-existent' - to: - - '4' - - '5' - - '6' - channel: quincy/edge - - ceph-mon: - charm: ch:ceph-mon - channel: quincy/edge - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - to: - - '7' - - '8' - - '9' - - ceph-fs: - charm: ch:ceph-fs - num_units: 1 - options: - source: *openstack-origin - channel: quincy/edge - to: - - '10' - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '11' - channel: 2023.1/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - libvirt-image-backend: rbd - to: - - '12' - channel: 2023.1/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '13' - channel: 2023.1/edge - - cinder: - expose: True - charm: ch:cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: *openstack-origin - to: - - '14' - channel: 2023.1/edge - - cinder-ceph: - charm: ch:cinder-ceph - channel: 2023.1/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: 2023.1/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '16' - channel: 2023.1/edge - - prometheus2: - charm: ch:prometheus2 - num_units: 1 - series: focal - to: - - '17' - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - nova-compute:ceph-access - - cinder-ceph:ceph-access - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'ceph-mon:prometheus' - - 'prometheus2:target' diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index e2aef903..af26df09 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -5,7 +5,7 @@ gate_bundles: - focal-yoga - jammy-yoga - jammy-bobcat - - mantic-bobcat + - jammy-caracal smoke_bundles: - focal-yoga From b59271e537a9a7ee996832f223b9864f8915b960 Mon Sep 17 00:00:00 2001 From: Utkarsh Bhatt Date: Thu, 2 May 2024 16:15:29 +0530 Subject: [PATCH 2615/2699] Caracal-Squid enablement contains: - Bundle updates - charmhelpers sync - drop lunar/mantic Change-Id: I6ca31ca3b0cc8aedabaeaf2a2e64dede1248e388 Signed-off-by: Utkarsh Bhatt --- ceph-radosgw/charmcraft.yaml | 6 - .../hooks/charmhelpers/contrib/network/ip.py | 48 +++++- .../charmhelpers/contrib/openstack/context.py | 18 ++- .../section-audit-middleware-notifications | 4 + .../openstack/templates/section-filter-audit | 6 + .../charmhelpers/contrib/openstack/utils.py | 49 +++---- .../charmhelpers/contrib/storage/linux/lvm.py | 6 +- ceph-radosgw/hooks/charmhelpers/core/host.py | 7 +- .../hooks/charmhelpers/fetch/ubuntu.py | 10 ++ ceph-radosgw/hooks/charmhelpers/osplatform.py | 28 +++- ceph-radosgw/lib/charms_ceph/broker.py | 70 +++++++++ ceph-radosgw/lib/charms_ceph/utils.py | 138 +++++------------- ceph-radosgw/metadata.yaml | 2 - .../tests/bundles/focal-yoga-multisite.yaml | 8 +- .../tests/bundles/focal-yoga-namespaced.yaml | 6 +- ceph-radosgw/tests/bundles/focal-yoga.yaml | 6 +- .../bundles/jammy-antelope-multisite.yaml | 8 +- .../bundles/jammy-antelope-namespaced.yaml | 9 +- .../tests/bundles/jammy-antelope.yaml | 8 +- .../tests/bundles/jammy-bobcat-multisite.yaml | 8 +- .../bundles/jammy-bobcat-namespaced.yaml | 9 +- ceph-radosgw/tests/bundles/jammy-bobcat.yaml | 9 +- .../bundles/jammy-caracal-multisite.yaml | 99 +++++++++++++ .../bundles/jammy-caracal-namespaced.yaml | 125 ++++++++++++++++ ...{mantic-bobcat.yaml => jammy-caracal.yaml} | 15 +- .../tests/bundles/local-jammy-antelope.yaml | 6 +- ceph-radosgw/tests/tests.yaml | 9 +- 27 files changed, 513 insertions(+), 204 deletions(-) create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-audit-middleware-notifications create mode 100644 ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-filter-audit create mode 100644 ceph-radosgw/tests/bundles/jammy-caracal-multisite.yaml create mode 100644 ceph-radosgw/tests/bundles/jammy-caracal-namespaced.yaml rename ceph-radosgw/tests/bundles/{mantic-bobcat.yaml => jammy-caracal.yaml} (91%) diff --git a/ceph-radosgw/charmcraft.yaml b/ceph-radosgw/charmcraft.yaml index d5329498..e54ca1af 100644 --- a/ceph-radosgw/charmcraft.yaml +++ b/ceph-radosgw/charmcraft.yaml @@ -33,9 +33,3 @@ bases: - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.10" - architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py index cf9926b9..f3b4864f 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/network/ip.py @@ -16,6 +16,7 @@ import re import subprocess import socket +import ssl from functools import partial @@ -527,19 +528,56 @@ def get_hostname(address, fqdn=True): return result.split('.')[0] -def port_has_listener(address, port): +class SSLPortCheckInfo(object): + + def __init__(self, key, cert, ca_cert, check_hostname=False): + self.key = key + self.cert = cert + self.ca_cert = ca_cert + # NOTE: by default we do not check hostname since the port check is + # typically performed using 0.0.0.0 which will not match the + # certificate. Hence the default for this is False. + self.check_hostname = check_hostname + + @property + def ssl_context(self): + context = ssl.create_default_context() + context.check_hostname = self.check_hostname + context.load_cert_chain(self.cert, self.key) + context.load_verify_locations(self.ca_cert) + return context + + +def port_has_listener(address, port, sslinfo=None): """ Returns True if the address:port is open and being listened to, - else False. + else False. By default uses netcat to check ports but if sslinfo is + provided will use an SSL connection instead. @param address: an IP address or hostname @param port: integer port + @param sslinfo: optional SSLPortCheckInfo object. + If provided, the check is performed using an ssl + connection. Note calls 'zc' via a subprocess shell """ - cmd = ['nc', '-z', address, str(port)] - result = subprocess.call(cmd) - return not (bool(result)) + if not sslinfo: + cmd = ['nc', '-z', address, str(port)] + result = subprocess.call(cmd) + return not (bool(result)) + + try: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock: + ssock = sslinfo.ssl_context.wrap_socket(sock, + server_hostname=address) + ssock.connect((address, port)) + # this bit is crucial to ensure tls close_notify is sent + ssock.unwrap() + + return True + except ConnectionRefusedError: + return False def assert_charm_supports_ipv6(): diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py index 42f15032..cd70b55c 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/context.py @@ -202,6 +202,21 @@ def get_related(self): return self.related +class KeystoneAuditMiddleware(OSContextGenerator): + def __init__(self, service: str) -> None: + self.service_name = service + + def __call__(self): + """Return context dictionary containing configuration status of + audit-middleware and the charm service name. + """ + ctxt = { + 'audit_middleware': config('audit-middleware') or False, + 'service_name': self.service_name + } + return ctxt + + class SharedDBContext(OSContextGenerator): interfaces = ['shared-db'] @@ -545,7 +560,7 @@ def _resolve(key): 'internal_auth_url': internal_auth_url, }) - # we keep all veriables in ctxt for compatibility and + # we keep all variables in ctxt for compatibility and # add nested dictionary for keystone_authtoken generic # templating if keystonemiddleware_os_release: @@ -557,6 +572,7 @@ def _resolve(key): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading + ctxt['admin_user_id'] = _resolve('service_user_id') ctxt['admin_tenant_id'] = _resolve('service_tenant_id') ctxt['admin_domain_id'] = _resolve('service_domain_id') return ctxt diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-audit-middleware-notifications b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-audit-middleware-notifications new file mode 100644 index 00000000..1f88014f --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-audit-middleware-notifications @@ -0,0 +1,4 @@ +{% if audit_middleware -%} +[audit_middleware_notifications] +driver = log +{% endif -%} \ No newline at end of file diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-filter-audit b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-filter-audit new file mode 100644 index 00000000..11512aee --- /dev/null +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/templates/section-filter-audit @@ -0,0 +1,6 @@ +{% if audit_middleware and service_name -%} +[filter:audit] +paste.filter_factory = keystonemiddleware.audit:filter_factory +audit_map_file = /etc/{{ service_name }}/api_audit_map.conf +service_name = {{ service_name }} +{% endif -%} \ No newline at end of file diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py index e98be2c5..82c28d8e 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/openstack/utils.py @@ -161,6 +161,7 @@ ('2022.2', 'zed'), ('2023.1', 'antelope'), ('2023.2', 'bobcat'), + ('2024.1', 'caracal'), ]) # The ugly duckling - must list releases oldest to newest @@ -416,17 +417,6 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES, error_out(e) -def get_os_version_codename_swift(codename): - '''Determine OpenStack version number of swift from codename.''' - # for k, v in six.iteritems(SWIFT_CODENAMES): - for k, v in SWIFT_CODENAMES.items(): - if k == codename: - return v[-1] - e = 'Could not derive swift version for '\ - 'codename: %s' % codename - error_out(e) - - def get_swift_codename(version): '''Determine OpenStack codename that corresponds to swift version.''' codenames = [k for k, v in SWIFT_CODENAMES.items() if version in v] @@ -585,7 +575,6 @@ def _do_install(): return openstack_release().get('OPENSTACK_CODENAME') -@cached def openstack_release(): """Return /etc/os-release in a dict.""" d = {} @@ -847,14 +836,10 @@ def openstack_upgrade_available(package): if not cur_vers: # The package has not been installed yet do not attempt upgrade return False - if "swift" in package: - codename = get_os_codename_install_source(src) - avail_vers = get_os_version_codename_swift(codename) - else: - try: - avail_vers = get_os_version_install_source(src) - except Exception: - avail_vers = cur_vers + try: + avail_vers = get_os_version_install_source(src) + except Exception: + avail_vers = cur_vers apt.init() return apt.version_compare(avail_vers, cur_vers) >= 1 @@ -1222,12 +1207,14 @@ def _ows_check_services_running(services, ports): return ows_check_services_running(services, ports) -def ows_check_services_running(services, ports): +def ows_check_services_running(services, ports, ssl_check_info=None): """Check that the services that should be running are actually running and that any ports specified are being listened to. @param services: list of strings OR dictionary specifying services/ports @param ports: list of ports + @param ssl_check_info: SSLPortCheckInfo object. If provided, port checks + will be done using an SSL connection. @returns state, message: strings or None, None """ messages = [] @@ -1243,7 +1230,7 @@ def ows_check_services_running(services, ports): # also verify that the ports that should be open are open # NB, that ServiceManager objects only OPTIONALLY have ports map_not_open, ports_open = ( - _check_listening_on_services_ports(services)) + _check_listening_on_services_ports(services, ssl_check_info)) if not all(ports_open): # find which service has missing ports. They are in service # order which makes it a bit easier. @@ -1258,7 +1245,8 @@ def ows_check_services_running(services, ports): if ports is not None: # and we can also check ports which we don't know the service for - ports_open, ports_open_bools = _check_listening_on_ports_list(ports) + ports_open, ports_open_bools = \ + _check_listening_on_ports_list(ports, ssl_check_info) if not all(ports_open_bools): messages.append( "Ports which should be open, but are not: {}" @@ -1317,7 +1305,8 @@ def _check_running_services(services): return list(zip(services, services_running)), services_running -def _check_listening_on_services_ports(services, test=False): +def _check_listening_on_services_ports(services, test=False, + ssl_check_info=None): """Check that the unit is actually listening (has the port open) on the ports that the service specifies are open. If test is True then the function returns the services with ports that are open rather than @@ -1327,11 +1316,14 @@ def _check_listening_on_services_ports(services, test=False): @param services: OrderedDict(service: [port, ...], ...) @param test: default=False, if False, test for closed, otherwise open. + @param ssl_check_info: SSLPortCheckInfo object. If provided, port checks + will be done using an SSL connection. @returns OrderedDict(service: [port-not-open, ...]...), [boolean] """ test = not (not (test)) # ensure test is True or False all_ports = list(itertools.chain(*services.values())) - ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] + ports_states = [port_has_listener('0.0.0.0', p, ssl_check_info) + for p in all_ports] map_ports = OrderedDict() matched_ports = [p for p, opened in zip(all_ports, ports_states) if opened == test] # essentially opened xor test @@ -1342,16 +1334,19 @@ def _check_listening_on_services_ports(services, test=False): return map_ports, ports_states -def _check_listening_on_ports_list(ports): +def _check_listening_on_ports_list(ports, ssl_check_info=None): """Check that the ports list given are being listened to Returns a list of ports being listened to and a list of the booleans. + @param ssl_check_info: SSLPortCheckInfo object. If provided, port checks + will be done using an SSL connection. @param ports: LIST of port numbers. @returns [(port_num, boolean), ...], [boolean] """ - ports_open = [port_has_listener('0.0.0.0', p) for p in ports] + ports_open = [port_has_listener('0.0.0.0', p, ssl_check_info) + for p in ports] return zip(ports, ports_open), ports_open diff --git a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py index d0a57211..0d294c79 100644 --- a/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-radosgw/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -17,8 +17,6 @@ CalledProcessError, check_call, check_output, - Popen, - PIPE, ) @@ -58,9 +56,7 @@ def remove_lvm_physical_volume(block_device): :param block_device: str: Full path of block device to scrub. ''' - p = Popen(['pvremove', '-ff', block_device], - stdin=PIPE) - p.communicate(input='y\n') + check_call(['pvremove', '-ff', '--yes', block_device]) def list_lvm_volume_group(block_device): diff --git a/ceph-radosgw/hooks/charmhelpers/core/host.py b/ceph-radosgw/hooks/charmhelpers/core/host.py index 70dde6a5..def403c5 100644 --- a/ceph-radosgw/hooks/charmhelpers/core/host.py +++ b/ceph-radosgw/hooks/charmhelpers/core/host.py @@ -256,8 +256,11 @@ def service_resume(service_name, init_dir="/etc/init", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(service_name=service_name): - service('unmask', service_name) - service('enable', service_name) + if service('is-enabled', service_name): + log('service {} already enabled'.format(service_name), level=DEBUG) + else: + service('unmask', service_name) + service('enable', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) diff --git a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py index 1be992c4..d0089eb7 100644 --- a/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py +++ b/ceph-radosgw/hooks/charmhelpers/fetch/ubuntu.py @@ -246,6 +246,14 @@ 'bobcat/proposed': 'jammy-proposed/bobcat', 'jammy-bobcat/proposed': 'jammy-proposed/bobcat', 'jammy-proposed/bobcat': 'jammy-proposed/bobcat', + # caracal + 'caracal': 'jammy-updates/caracal', + 'jammy-caracal': 'jammy-updates/caracal', + 'jammy-caracal/updates': 'jammy-updates/caracal', + 'jammy-updates/caracal': 'jammy-updates/caracal', + 'caracal/proposed': 'jammy-proposed/caracal', + 'jammy-caracal/proposed': 'jammy-proposed/caracal', + 'jammy-proposed/caracal': 'jammy-proposed/caracal', # OVN 'focal-ovn-22.03': 'focal-updates/ovn-22.03', @@ -279,6 +287,7 @@ 'zed', 'antelope', 'bobcat', + 'caracal', ) @@ -308,6 +317,7 @@ ('kinetic', 'zed'), ('lunar', 'antelope'), ('mantic', 'bobcat'), + ('noble', 'caracal'), ]) diff --git a/ceph-radosgw/hooks/charmhelpers/osplatform.py b/ceph-radosgw/hooks/charmhelpers/osplatform.py index 1ace468f..5d121866 100644 --- a/ceph-radosgw/hooks/charmhelpers/osplatform.py +++ b/ceph-radosgw/hooks/charmhelpers/osplatform.py @@ -9,19 +9,13 @@ def get_platform(): will be returned (which is the name of the module). This string is used to decide which platform module should be imported. """ - # linux_distribution is deprecated and will be removed in Python 3.7 - # Warnings *not* disabled, as we certainly need to fix this. - if hasattr(platform, 'linux_distribution'): - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] - else: - current_platform = _get_platform_from_fs() + current_platform = _get_current_platform() if "Ubuntu" in current_platform: return "ubuntu" elif "CentOS" in current_platform: return "centos" - elif "debian" in current_platform: + elif "debian" in current_platform or "Debian" in current_platform: # Stock Python does not detect Ubuntu and instead returns debian. # Or at least it does in some build environments like Travis CI return "ubuntu" @@ -36,6 +30,24 @@ def get_platform(): .format(current_platform)) +def _get_current_platform(): + """Return the current platform information for the OS. + + Attempts to lookup linux distribution information from the platform + module for releases of python < 3.7. For newer versions of python, + the platform is determined from the /etc/os-release file. + """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warnings *not* disabled, as we certainly need to fix this. + if hasattr(platform, 'linux_distribution'): + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + else: + current_platform = _get_platform_from_fs() + + return current_platform + + def _get_platform_from_fs(): """Get Platform from /etc/os-release.""" with open(os.path.join(os.sep, 'etc', 'os-release')) as fin: diff --git a/ceph-radosgw/lib/charms_ceph/broker.py b/ceph-radosgw/lib/charms_ceph/broker.py index 71f85f45..7f453ec8 100644 --- a/ceph-radosgw/lib/charms_ceph/broker.py +++ b/ceph-radosgw/lib/charms_ceph/broker.py @@ -106,6 +106,8 @@ def decode_req_encode_rsp(f): """Decorator to decode incoming requests and encode responses.""" def decode_inner(req): + if isinstance(req, bytes): + req = req.decode('utf-8') return json.dumps(f(json.loads(req))) return decode_inner @@ -833,6 +835,72 @@ def handle_rgw_region_set(request, service): os.unlink(infile.name) +def handle_create_cephfs_client(request, service): + """Creates a new CephFS client for a filesystem. + + :param request: The broker request + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0. + """ + fs_name = request.get('fs_name') + client_id = request.get('client_id') + # TODO: fs allows setting write permissions for a list of paths. + path = request.get('path') + perms = request.get('perms') + # Need all parameters + if not fs_name or not client_id or not path or not perms: + msg = "Missing fs_name, client_id, path or perms params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Check that the provided fs_name exists + if fs_name not in get_cephfs(service=service): + msg = ("Ceph filesystem {} does not exist." + + "Cannot authorize client").format( + fs_name) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Check that the provided client does NOT exist. + try: + cmd = ["ceph", "--id", service, "auth", "ls", "-f", "json"] + auth_ls = json.loads(check_output(cmd, encoding="utf-8")) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + except ValueError as err: + log(str(err), level=ERROR) + return {'exit-code': 1, 'stderr': str(err)} + + client = "client.{}".format(client_id) + if client in (elem["entity"] for elem in auth_ls["auth_dump"]): + msg = "Client {} already exists".format(client) + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Try to authorize the client + try: + cmd = [ + "ceph", + "--id", service, + "fs", "authorize", + fs_name, + client, + path, + perms, + "-f", "json" + ] + fs_auth = json.loads(check_output(cmd, encoding="utf-8")) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + except ValueError as err: + log(str(err), level=ERROR) + return {'exit-code': 1, 'stderr': str(err)} + + return {'exit-code': 0, 'key': fs_auth[0]["key"]} + + def process_requests_v1(reqs): """Process v1 requests. @@ -902,6 +970,8 @@ def process_requests_v1(reqs): ret = handle_add_permissions_to_key(request=req, service=svc) elif op == 'set-key-permissions': ret = handle_set_key_permissions(request=req, service=svc) + elif op == "create-cephfs-client": + ret = handle_create_cephfs_client(request=req, service=svc) else: msg = "Unknown operation '{}'".format(op) log(msg, level=ERROR) diff --git a/ceph-radosgw/lib/charms_ceph/utils.py b/ceph-radosgw/lib/charms_ceph/utils.py index 94bfb9e4..57cb1d7b 100644 --- a/ceph-radosgw/lib/charms_ceph/utils.py +++ b/ceph-radosgw/lib/charms_ceph/utils.py @@ -1324,16 +1324,6 @@ def systemd(): return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' -def use_bluestore(): - """Determine whether bluestore should be used for OSD's - - :returns: whether bluestore disk format should be used - :rtype: bool""" - if cmp_pkgrevno('ceph', '12.2.0') < 0: - return False - return config('bluestore') - - def bootstrap_monitor_cluster(secret): """Bootstrap local Ceph mon into the Ceph cluster @@ -1551,21 +1541,21 @@ def get_devices(name): def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, - bluestore=False, key_manager=CEPH_KEY_MANAGER, osd_id=None): + key_manager=CEPH_KEY_MANAGER, osd_id=None): if dev.startswith('/dev'): osdize_dev(dev, osd_format, osd_journal, ignore_errors, encrypt, - bluestore, key_manager, osd_id) + key_manager, osd_id) else: if cmp_pkgrevno('ceph', '14.0.0') >= 0: log("Directory backed OSDs can not be created on Nautilus", level=WARNING) return - osdize_dir(dev, encrypt, bluestore) + osdize_dir(dev, encrypt) def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, - encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER, + encrypt=False, key_manager=CEPH_KEY_MANAGER, osd_id=None): """ Prepare a block device for use as a Ceph OSD @@ -1579,7 +1569,6 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, :param: ignore_errors: Don't fail in the event of any errors during processing :param: encrypt: Encrypt block devices using 'key_manager' - :param: bluestore: Use bluestore native Ceph block device format :param: key_manager: Key management approach for encryption keys :raises subprocess.CalledProcessError: in the event that any supporting subprocess operation failed @@ -1630,15 +1619,13 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, cmd = _ceph_volume(dev, osd_journal, encrypt, - bluestore, key_manager, osd_id) else: cmd = _ceph_disk(dev, osd_format, osd_journal, - encrypt, - bluestore) + encrypt) try: status_set('maintenance', 'Initializing device {}'.format(dev)) @@ -1669,7 +1656,7 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, db.flush() -def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): +def _ceph_disk(dev, osd_format, osd_journal, encrypt=False): """ Prepare a device for usage as a Ceph OSD using ceph-disk @@ -1677,7 +1664,6 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): The function looks up realpath of the device :param: osd_journal: List of block devices to use for OSD journals :param: encrypt: Use block device encryption (unsupported) - :param: bluestore: Use bluestore storage for OSD :returns: list. 'ceph-disk' command and required parameters for execution by check_call """ @@ -1686,25 +1672,17 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): if encrypt: cmd.append('--dmcrypt') - if osd_format and not bluestore: - cmd.append('--fs-type') - cmd.append(osd_format) - - # NOTE(jamespage): enable experimental bluestore support - if use_bluestore(): - cmd.append('--bluestore') - wal = get_devices('bluestore-wal') - if wal: - cmd.append('--block.wal') - least_used_wal = find_least_used_utility_device(wal) - cmd.append(least_used_wal) - db = get_devices('bluestore-db') - if db: - cmd.append('--block.db') - least_used_db = find_least_used_utility_device(db) - cmd.append(least_used_db) - elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: - cmd.append('--filestore') + cmd.append('--bluestore') + wal = get_devices('bluestore-wal') + if wal: + cmd.append('--block.wal') + least_used_wal = find_least_used_utility_device(wal) + cmd.append(least_used_wal) + db = get_devices('bluestore-db') + if db: + cmd.append('--block.db') + least_used_db = find_least_used_utility_device(db) + cmd.append(least_used_db) cmd.append(os.path.realpath(dev)) @@ -1715,8 +1693,8 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): return cmd -def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, - key_manager=CEPH_KEY_MANAGER, osd_id=None): +def _ceph_volume(dev, osd_journal, encrypt=False, key_manager=CEPH_KEY_MANAGER, + osd_id=None): """ Prepare and activate a device for usage as a Ceph OSD using ceph-volume. @@ -1726,7 +1704,6 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, :param: dev: Full path to use for OSD block device setup :param: osd_journal: List of block devices to use for OSD journals :param: encrypt: Use block device encryption - :param: bluestore: Use bluestore storage for OSD :param: key_manager: dm-crypt Key Manager to use :param: osd_id: The OSD-id to recycle, or None to create a new one :raises subprocess.CalledProcessError: in the event that any supporting @@ -1739,13 +1716,8 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, osd_fsid = str(uuid.uuid4()) cmd.append('--osd-fsid') cmd.append(osd_fsid) - - if bluestore: - cmd.append('--bluestore') - main_device_type = 'block' - else: - cmd.append('--filestore') - main_device_type = 'data' + cmd.append('--bluestore') + main_device_type = 'block' if encrypt and key_manager == CEPH_KEY_MANAGER: cmd.append('--dmcrypt') @@ -1753,19 +1725,6 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, if osd_id is not None: cmd.extend(['--osd-id', str(osd_id)]) - # On-disk journal volume creation - if not osd_journal and not bluestore: - journal_lv_type = 'journal' - cmd.append('--journal') - cmd.append(_allocate_logical_volume( - dev=dev, - lv_type=journal_lv_type, - osd_fsid=osd_fsid, - size='{}M'.format(calculate_volume_size('journal')), - encrypt=encrypt, - key_manager=key_manager) - ) - cmd.append('--data') cmd.append(_allocate_logical_volume(dev=dev, lv_type=main_device_type, @@ -1773,36 +1732,21 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, encrypt=encrypt, key_manager=key_manager)) - if bluestore: - for extra_volume in ('wal', 'db'): - devices = get_devices('bluestore-{}'.format(extra_volume)) - if devices: - cmd.append('--block.{}'.format(extra_volume)) - least_used = find_least_used_utility_device(devices, - lvs=True) - cmd.append(_allocate_logical_volume( - dev=least_used, - lv_type=extra_volume, - osd_fsid=osd_fsid, - size='{}M'.format(calculate_volume_size(extra_volume)), - shared=True, - encrypt=encrypt, - key_manager=key_manager) - ) - - elif osd_journal: - cmd.append('--journal') - least_used = find_least_used_utility_device(osd_journal, - lvs=True) - cmd.append(_allocate_logical_volume( - dev=least_used, - lv_type='journal', - osd_fsid=osd_fsid, - size='{}M'.format(calculate_volume_size('journal')), - shared=True, - encrypt=encrypt, - key_manager=key_manager) - ) + for extra_volume in ('wal', 'db'): + devices = get_devices('bluestore-{}'.format(extra_volume)) + if devices: + cmd.append('--block.{}'.format(extra_volume)) + least_used = find_least_used_utility_device(devices, + lvs=True) + cmd.append(_allocate_logical_volume( + dev=least_used, + lv_type=extra_volume, + osd_fsid=osd_fsid, + size='{}M'.format(calculate_volume_size(extra_volume)), + shared=True, + encrypt=encrypt, + key_manager=key_manager) + ) return cmd @@ -2040,7 +1984,7 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid, return "{}/{}".format(vg_name, lv_name) -def osdize_dir(path, encrypt=False, bluestore=False): +def osdize_dir(path, encrypt=False): """Ask ceph-disk to prepare a directory to become an OSD. :param path: str. The directory to osdize @@ -2077,12 +2021,8 @@ def osdize_dir(path, encrypt=False, bluestore=False): if cmp_pkgrevno('ceph', '0.60') >= 0: if encrypt: cmd.append('--dmcrypt') + cmd.append('--bluestore') - # NOTE(icey): enable experimental bluestore support - if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: - cmd.append('--bluestore') - elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: - cmd.append('--filestore') log("osdize dir cmd: {}".format(cmd)) subprocess.check_call(cmd) @@ -3232,6 +3172,7 @@ def dirs_need_ownership_update(service): ('octopus', 'pacific'), ('pacific', 'quincy'), ('quincy', 'reef'), + ('reef', 'squid'), ]) # Map UCA codenames to Ceph codenames @@ -3256,6 +3197,7 @@ def dirs_need_ownership_update(service): 'zed': 'quincy', 'antelope': 'quincy', 'bobcat': 'reef', + 'caracal': 'squid', } diff --git a/ceph-radosgw/metadata.yaml b/ceph-radosgw/metadata.yaml index 40f0b25e..16fc00bb 100644 --- a/ceph-radosgw/metadata.yaml +++ b/ceph-radosgw/metadata.yaml @@ -16,8 +16,6 @@ tags: series: - focal - jammy -- lunar -- mantic extra-bindings: public: admin: diff --git a/ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml b/ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml index 8c1a1cfd..006ed1d0 100644 --- a/ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml +++ b/ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml @@ -47,7 +47,7 @@ applications: - '2' - '6' - '7' - channel: latest/edge + channel: quincy/edge secondary-ceph-osd: charm: ch:ceph-osd @@ -62,7 +62,7 @@ applications: - '3' - '8' - '9' - channel: latest/edge + channel: quincy/edge ceph-mon: charm: ch:ceph-mon @@ -72,7 +72,7 @@ applications: source: *source to: - '4' - channel: latest/edge + channel: quincy/edge secondary-ceph-mon: charm: ch:ceph-mon @@ -82,7 +82,7 @@ applications: source: *source to: - '5' - channel: latest/edge + channel: quincy/edge relations: - - 'ceph-osd:mon' diff --git a/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml index 7d05aa82..e7d6ebd2 100644 --- a/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml @@ -61,7 +61,7 @@ applications: - '4' - '5' - '6' - channel: latest/edge + channel: quincy/edge ceph-mon: charm: ch:ceph-mon @@ -72,7 +72,7 @@ applications: - '7' - '8' - '9' - channel: latest/edge + channel: quincy/edge keystone: expose: True @@ -82,7 +82,7 @@ applications: openstack-origin: *source to: - '10' - channel: latest/edge + channel: yoga/edge vault-mysql-router: charm: ch:mysql-router diff --git a/ceph-radosgw/tests/bundles/focal-yoga.yaml b/ceph-radosgw/tests/bundles/focal-yoga.yaml index 697a9be8..eac9de1f 100644 --- a/ceph-radosgw/tests/bundles/focal-yoga.yaml +++ b/ceph-radosgw/tests/bundles/focal-yoga.yaml @@ -60,7 +60,7 @@ applications: - '4' - '5' - '6' - channel: latest/edge + channel: quincy/edge ceph-mon: charm: ch:ceph-mon @@ -71,7 +71,7 @@ applications: - '7' - '8' - '9' - channel: latest/edge + channel: quincy/edge keystone: expose: True @@ -81,7 +81,7 @@ applications: openstack-origin: *source to: - '10' - channel: latest/edge + channel: yoga/edge vault-mysql-router: charm: ch:mysql-router diff --git a/ceph-radosgw/tests/bundles/jammy-antelope-multisite.yaml b/ceph-radosgw/tests/bundles/jammy-antelope-multisite.yaml index bf9daa5b..27d2a8b3 100644 --- a/ceph-radosgw/tests/bundles/jammy-antelope-multisite.yaml +++ b/ceph-radosgw/tests/bundles/jammy-antelope-multisite.yaml @@ -47,7 +47,7 @@ applications: - '2' - '6' - '7' - channel: latest/edge + channel: quincy/edge secondary-ceph-osd: charm: ch:ceph-osd @@ -62,7 +62,7 @@ applications: - '3' - '8' - '9' - channel: latest/edge + channel: quincy/edge ceph-mon: charm: ch:ceph-mon @@ -72,7 +72,7 @@ applications: source: *source to: - '4' - channel: latest/edge + channel: quincy/edge secondary-ceph-mon: charm: ch:ceph-mon @@ -82,7 +82,7 @@ applications: source: *source to: - '5' - channel: latest/edge + channel: quincy/edge relations: - - 'ceph-osd:mon' diff --git a/ceph-radosgw/tests/bundles/jammy-antelope-namespaced.yaml b/ceph-radosgw/tests/bundles/jammy-antelope-namespaced.yaml index 41d9c1c1..3c5e57d8 100644 --- a/ceph-radosgw/tests/bundles/jammy-antelope-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/jammy-antelope-namespaced.yaml @@ -40,7 +40,8 @@ applications: channel: latest/edge ceph-radosgw: - charm: ../../ceph-radosgw.charm + charm: ch:ceph-radosgw + channel: quincy/edge num_units: 1 options: source: *source @@ -61,7 +62,7 @@ applications: - '4' - '5' - '6' - channel: latest/edge + channel: quincy/edge ceph-mon: charm: ch:ceph-mon @@ -72,7 +73,7 @@ applications: - '7' - '8' - '9' - channel: latest/edge + channel: quincy/edge keystone: expose: True @@ -82,7 +83,7 @@ applications: openstack-origin: *source to: - '10' - channel: latest/edge + channel: 2023.1/edge vault-mysql-router: charm: ch:mysql-router diff --git a/ceph-radosgw/tests/bundles/jammy-antelope.yaml b/ceph-radosgw/tests/bundles/jammy-antelope.yaml index ad5c78e8..02979ee6 100644 --- a/ceph-radosgw/tests/bundles/jammy-antelope.yaml +++ b/ceph-radosgw/tests/bundles/jammy-antelope.yaml @@ -39,7 +39,7 @@ applications: ceph-radosgw: charm: ch:ceph-radosgw - channel: latest/edge + channel: quincy/edge num_units: 1 options: source: *source @@ -59,7 +59,7 @@ applications: - '4' - '5' - '6' - channel: latest/edge + channel: quincy/edge ceph-mon: charm: ch:ceph-mon @@ -70,7 +70,7 @@ applications: - '7' - '8' - '9' - channel: latest/edge + channel: quincy/edge keystone: expose: True @@ -80,7 +80,7 @@ applications: openstack-origin: *source to: - '10' - channel: latest/edge + channel: 2023.1/edge vault-mysql-router: charm: ch:mysql-router diff --git a/ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml b/ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml index 48f1b9c4..19b5b68a 100644 --- a/ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml +++ b/ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml @@ -47,7 +47,7 @@ applications: - '2' - '6' - '7' - channel: latest/edge + channel: reef/edge secondary-ceph-osd: charm: ch:ceph-osd @@ -62,7 +62,7 @@ applications: - '3' - '8' - '9' - channel: latest/edge + channel: reef/edge ceph-mon: charm: ch:ceph-mon @@ -72,7 +72,7 @@ applications: source: *source to: - '4' - channel: latest/edge + channel: reef/edge secondary-ceph-mon: charm: ch:ceph-mon @@ -82,7 +82,7 @@ applications: source: *source to: - '5' - channel: latest/edge + channel: reef/edge relations: - - 'ceph-osd:mon' diff --git a/ceph-radosgw/tests/bundles/jammy-bobcat-namespaced.yaml b/ceph-radosgw/tests/bundles/jammy-bobcat-namespaced.yaml index 0ebcf4b3..8c385280 100644 --- a/ceph-radosgw/tests/bundles/jammy-bobcat-namespaced.yaml +++ b/ceph-radosgw/tests/bundles/jammy-bobcat-namespaced.yaml @@ -40,7 +40,8 @@ applications: channel: latest/edge ceph-radosgw: - charm: ../../ceph-radosgw.charm + charm: ch:ceph-radosgw + channel: reef/edge num_units: 1 options: source: *source @@ -61,7 +62,7 @@ applications: - '4' - '5' - '6' - channel: latest/edge + channel: reef/edge ceph-mon: charm: ch:ceph-mon @@ -72,7 +73,7 @@ applications: - '7' - '8' - '9' - channel: latest/edge + channel: reef/edge keystone: expose: True @@ -82,7 +83,7 @@ applications: openstack-origin: *source to: - '10' - channel: latest/edge + channel: 2023.2/edge vault-mysql-router: charm: ch:mysql-router diff --git a/ceph-radosgw/tests/bundles/jammy-bobcat.yaml b/ceph-radosgw/tests/bundles/jammy-bobcat.yaml index bcef92af..0e6c26da 100644 --- a/ceph-radosgw/tests/bundles/jammy-bobcat.yaml +++ b/ceph-radosgw/tests/bundles/jammy-bobcat.yaml @@ -40,7 +40,8 @@ applications: channel: latest/edge ceph-radosgw: - charm: ../../ceph-radosgw.charm + charm: ch:ceph-radosgw + channel: reef/edge num_units: 1 options: source: *source @@ -60,7 +61,7 @@ applications: - '4' - '5' - '6' - channel: latest/edge + channel: reef/edge ceph-mon: charm: ch:ceph-mon @@ -71,7 +72,7 @@ applications: - '7' - '8' - '9' - channel: latest/edge + channel: reef/edge keystone: expose: True @@ -81,7 +82,7 @@ applications: openstack-origin: *source to: - '10' - channel: latest/edge + channel: 2023.2/edge vault-mysql-router: charm: ch:mysql-router diff --git a/ceph-radosgw/tests/bundles/jammy-caracal-multisite.yaml b/ceph-radosgw/tests/bundles/jammy-caracal-multisite.yaml new file mode 100644 index 00000000..116737b5 --- /dev/null +++ b/ceph-radosgw/tests/bundles/jammy-caracal-multisite.yaml @@ -0,0 +1,99 @@ +options: + source: &source cloud:jammy-caracal + +series: jammy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + '9': + +applications: + ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '0' + + secondary-ceph-radosgw: + charm: ../../ceph-radosgw.charm + num_units: 1 + options: + source: *source + to: + - '1' + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '2' + - '6' + - '7' + channel: latest/edge + + secondary-ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '3' + - '8' + - '9' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 1 + options: + monitor-count: 1 + source: *source + to: + - '4' + channel: latest/edge + + secondary-ceph-mon: + charm: ch:ceph-mon + num_units: 1 + options: + monitor-count: 1 + source: *source + to: + - '5' + channel: latest/edge + +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'secondary-ceph-osd:mon' + - 'secondary-ceph-mon:osd' + + - - 'secondary-ceph-radosgw:mon' + - 'secondary-ceph-mon:radosgw' + diff --git a/ceph-radosgw/tests/bundles/jammy-caracal-namespaced.yaml b/ceph-radosgw/tests/bundles/jammy-caracal-namespaced.yaml new file mode 100644 index 00000000..b65b26e1 --- /dev/null +++ b/ceph-radosgw/tests/bundles/jammy-caracal-namespaced.yaml @@ -0,0 +1,125 @@ +options: + source: &source cloud:jammy-caracal + +series: jammy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + '5': + '6': + '7': + '8': + '9': + '10': + '11': + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: 8.0/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *source + to: + - '0' + - '1' + - '2' + channel: 8.0/edge + + ceph-radosgw: + charm: ch:ceph-radosgw + channel: latest/edge + num_units: 1 + options: + source: *source + namespace-tenants: True + to: + - '3' + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + constraints: "mem=2048" + storage: + osd-devices: 'cinder,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '4' + - '5' + - '6' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + source: *source + to: + - '7' + - '8' + - '9' + channel: latest/edge + + keystone: + expose: True + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *source + to: + - '10' + channel: latest/edge + + vault-mysql-router: + charm: ch:mysql-router + channel: 8.0/edge + + vault: + charm: ch:vault + num_units: 1 + to: + - '11' + channel: 1.8/edge + +relations: + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' + + - - 'ceph-radosgw:identity-service' + - 'keystone:identity-service' + + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + + - - 'keystone:certificates' + - 'vault:certificates' + + - - 'ceph-radosgw:certificates' + - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/mantic-bobcat.yaml b/ceph-radosgw/tests/bundles/jammy-caracal.yaml similarity index 91% rename from ceph-radosgw/tests/bundles/mantic-bobcat.yaml rename to ceph-radosgw/tests/bundles/jammy-caracal.yaml index 9e0b78e1..59d66de1 100644 --- a/ceph-radosgw/tests/bundles/mantic-bobcat.yaml +++ b/ceph-radosgw/tests/bundles/jammy-caracal.yaml @@ -1,7 +1,7 @@ options: - source: &source cloud:mantic-bobcat + source: &source cloud:jammy-caracal -series: mantic +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' @@ -26,7 +26,7 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -37,10 +37,11 @@ applications: - '0' - '1' - '2' - channel: latest/edge + channel: 8.0/edge ceph-radosgw: - charm: ../../ceph-radosgw.charm + charm: ch:ceph-radosgw + channel: latest/edge num_units: 1 options: source: *source @@ -85,14 +86,14 @@ applications: vault-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge vault: charm: ch:vault num_units: 1 to: - '11' - channel: latest/edge + channel: 1.8/edge relations: diff --git a/ceph-radosgw/tests/bundles/local-jammy-antelope.yaml b/ceph-radosgw/tests/bundles/local-jammy-antelope.yaml index 2029ec51..a9dbdbdb 100644 --- a/ceph-radosgw/tests/bundles/local-jammy-antelope.yaml +++ b/ceph-radosgw/tests/bundles/local-jammy-antelope.yaml @@ -58,7 +58,7 @@ applications: - '4' - '5' - '6' - channel: latest/edge + channel: quincy/edge ceph-mon: charm: ch:ceph-mon @@ -69,7 +69,7 @@ applications: - '7' - '8' - '9' - channel: latest/edge + channel: quincy/edge keystone: expose: True @@ -79,7 +79,7 @@ applications: openstack-origin: *source to: - '10' - channel: latest/edge + channel: 2023.1/edge vault-mysql-router: charm: ch:mysql-router diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index 6b2be35c..f0c59360 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -10,18 +10,15 @@ smoke_bundles: - vault: jammy-antelope dev_bundles: - - lunar-antelope-multisite - - mantic-bobcat-multisite - jammy-antelope-multisite - jammy-bobcat-multisite - - vault: lunar-antelope - - vault: mantic-bobcat - - vault: lunar-antelope-namespaced - - vault: mantic-bobcat-namespaced + - jammy-caracal-multisite - vault: jammy-antelope - vault: jammy-bobcat + - vault: jammy-caracal - vault: jammy-antelope-namespaced - vault: jammy-bobcat-namespaced + - vault: jammy-caracal-namespaced target_deploy_status: vault: From 5aeb28ed3265c4b30475df8a2f3f0d88c2e128e5 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Fri, 12 Apr 2024 13:26:45 -0300 Subject: [PATCH 2616/2699] Implement key rotation for OSD's This patchset implements key rotation for OSD units. The monitor on which this action is called will set the 'pending_key' field in the relation databag, which specifies the OSD id and new key. On their side, OSD units will check this field and compare against the OSD ids that they maintain to tell whether they need to rotate the key or not. Change-Id: Ief5afdea2b8449adbe14c7e838330e2f2be1cfd2 --- ceph-mon/src/ceph_hooks.py | 1 + ceph-mon/src/ops_actions/rotate_key.py | 111 ++++++++++++++++++++++- ceph-mon/unit_tests/test_ceph_actions.py | 49 +++++++++- 3 files changed, 159 insertions(+), 2 deletions(-) diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 101b1066..6eab0936 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -871,6 +871,7 @@ def osd_relation(relid=None, unit=None, reprocess_broker_requests=False): log('mon cluster in quorum - providing fsid & keys') public_addr = get_public_addr() data = { + 'pending_key': '', 'fsid': leader_get('fsid'), 'osd_bootstrap_key': ceph.get_osd_bootstrap_key(), 'auth': 'cephx', diff --git a/ceph-mon/src/ops_actions/rotate_key.py b/ceph-mon/src/ops_actions/rotate_key.py index 8e473bba..1e4ba829 100644 --- a/ceph-mon/src/ops_actions/rotate_key.py +++ b/ceph-mon/src/ops_actions/rotate_key.py @@ -126,6 +126,111 @@ def _handle_mds_key_rotation(entity, event, model): event.set_results({'message': 'success'}) +def _get_osd_tree(): + out = subprocess.check_output(["sudo", "ceph", "osd", "dump", + "--format=json"]) + return json.loads(out.decode("utf8")).get("osds", ()) + + +def _clean_address(addr): + ix = addr.find(":") + return addr if ix < 0 else addr[0:ix] + + +def _get_osd_addrs(osd_id, tree=None): + if tree is None: + tree = _get_osd_tree() + + for osd in tree: + if osd.get("osd") != osd_id: + continue + + return [_clean_address(osd[x]) + for x in ("public_addr", "cluster_addr") + if x in osd] + + +def _get_unit_addr(unit, rel_id): + out = subprocess.check_output(["relation-get", "--format=json", + "-r", str(rel_id), "private-address", unit]) + return out.decode("utf8").replace('"', '').strip() + + +def _find_osd_unit(relations, model, osd_id, tree): + addrs = _get_osd_addrs(osd_id, tree) + if not addrs: + return None + + for relation in relations: + for unit in relation.units: + if _get_unit_addr(unit.name, relation.id) in addrs: + return relation.data[model.unit] + + +def _handle_osd_key_rotation(entity, event, model, tree=None): + osd_rels = model.relations.get("osd") + if not osd_rels: + event.fail("No OSD relations found") + return + + if tree is None: + tree = _get_osd_tree() + + osd_id = int(entity[4:]) + bag = _find_osd_unit(osd_rels, model, osd_id, tree) + if bag is not None: + key = _create_key(entity, event) + bag["pending_key"] = json.dumps({osd_id: key}) + event.set_results({"message": "success"}) + else: + event.fail("No OSD matching entity %s found" % entity) + + +def _add_osd_rotation(rotations, new_bag, osd_id, new_key): + # NOTE(lmlg): We can't use sets or dicts for relation databags, as they + # are mutable and don't implement a __hash__ method. So we use a simple + # (bag, dict) array to map the rotations. + elem = {osd_id: new_key} + for bag, data in rotations: + if bag is new_bag: + data.update(elem) + return + + rotations.append((new_bag, elem)) + + +def _get_osd_ids(): + ret = subprocess.check_output(["sudo", "ceph", "osd", "ls"]) + return ret.decode("utf8").split("\n") + + +def _rotate_all_osds(event, model): + tree = _get_osd_tree() + osd_rels = model.relations.get("osd") + ret = [] + + if not osd_rels: + event.fail("No OSD relations found") + return + + for osd_id in _get_osd_ids(): + osd_id = osd_id.strip() + if not osd_id: + continue + + bag = _find_osd_unit(osd_rels, model, int(osd_id), tree) + if bag is None: + continue + + key = _create_key("osd." + osd_id, event) + _add_osd_rotation(ret, bag, osd_id, key) + + for bag, elem in ret: + bag["pending_key"] = json.dumps(elem) + + event.set_results({"message": "success"}) + + def rotate_key(event, model=None) -> None: """Rotate the key of the specified entity.""" entity = event.params.get("entity") @@ -150,9 +255,13 @@ def rotate_key(event, model=None) -> None: _replace_keyring_file(path, entity, key, event) _restart_daemon("ceph-mgr@%s.service" % entity[4:], event) event.set_results({"message": "success"}) - elif entity.startswith('client.rgw.'): + elif entity.startswith("client.rgw."): _handle_rgw_key_rotation(entity, event, model) elif entity.startswith('mds.'): _handle_mds_key_rotation(entity, event, model) + elif entity == "osd": + _rotate_all_osds(event, model) + elif entity.startswith("osd."): + _handle_osd_key_rotation(entity, event, model) else: event.fail("Unknown entity: %s" % entity) diff --git a/ceph-mon/unit_tests/test_ceph_actions.py b/ceph-mon/unit_tests/test_ceph_actions.py index b3be8164..6a4b77db 100644 --- a/ceph-mon/unit_tests/test_ceph_actions.py +++ b/ceph-mon/unit_tests/test_ceph_actions.py @@ -316,6 +316,21 @@ def test_list_entities(self, check_output): key = old-key """ +OSD_DUMP = b""" +{ + "osds": [ + { + "osd": 0, + "public_addr": "10.5.2.40:6801/13869" + }, + { + "osd": 1, + "public_addr": "10.5.0.160:6801/9017" + } + ] +} +""" + class RotateKey(test_utils.CharmTestCase): """Run tests for action.""" @@ -346,7 +361,7 @@ def test_rotate_mgr_key(self, listdir, check_output, service_restart, check_output.return_value = b'[{"pending_key": "new-key"}]' event = test_utils.MockActionEvent({'entity': 'mgr.host-1'}) - self.harness.charm.on_rotate_key_action(event) + rotate_key.rotate_key(event) event.set_results.assert_called_with({'message': 'success'}) listdir.assert_called_once_with('/var/lib/ceph/mgr') @@ -356,3 +371,35 @@ def test_rotate_mgr_key(self, listdir, check_output, service_restart, calls = any(x for x in _open.mock_calls if any(p is not None and 'new-key' in p for p in x.args)) self.assertTrue(calls) + + @mock.patch.object(rotate_key, '_create_key') + @mock.patch.object(rotate_key.subprocess, 'check_output') + def test_rotate_osd_key(self, check_output, create_key): + def _check_output_inner(args): + if args == ['sudo', 'ceph', 'osd', 'dump', '--format=json']: + return OSD_DUMP + elif args[5] == 'ceph-osd/0': + return b'10.5.2.40' + else: + return b'10.5.0.160' + + check_output.side_effect = _check_output_inner + create_key.return_value = 'some-key' + + unit0 = mock.MagicMock() + unit0.name = 'ceph-osd/0' + unit1 = mock.MagicMock() + unit1.name = 'ceph-osd/1' + + relations = mock.MagicMock() + relations.units = [unit0, unit1] + relations.data = {'ceph-mon/0': {}} + + model = mock.MagicMock() + model.relations = {'osd': [relations]} + model.unit = 'ceph-mon/0' + + event = test_utils.MockActionEvent({'entity': 'osd.1'}) + rotate_key.rotate_key(event, model) + self.assertEqual(relations.data['ceph-mon/0'], + {'pending_key': '{"1": "some-key"}'}) From 868addef2c0cd5fb34bdd9908a024f12604ac7af Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Wed, 22 Nov 2023 14:53:54 +0100 Subject: [PATCH 2617/2699] Add support for the cos_agent relation This supports grafana dashboards and metrics scraping from the ceph mgr prometheus module. Change-Id: I8b2f132a4997d205119f7afe2a1ab6b2ae4c0134 --- .../ceph-cluster-advanced.json | 3792 +++++++++++++++++ .../grafana_dashboards/ceph-cluster.json | 1269 ++++++ .../grafana_dashboards/cephfs-overview.json | 348 ++ .../grafana_dashboards/host-details.json | 1314 ++++++ .../grafana_dashboards/hosts-overview.json | 880 ++++ .../osd-device-details.json | 857 ++++ .../grafana_dashboards/osds-overview.json | 1028 +++++ .../files/grafana_dashboards/pool-detail.json | 694 +++ .../grafana_dashboards/pool-overview.json | 1711 ++++++++ .../grafana_dashboards/radosgw-detail.json | 522 +++ .../grafana_dashboards/radosgw-overview.json | 695 +++ .../radosgw-sync-overview.json | 490 +++ .../files/grafana_dashboards/rbd-details.json | 444 ++ .../grafana_dashboards/rbd-overview.json | 723 ++++ ...rts.yml.default => prometheus_alerts.yaml} | 0 .../lib/charms/grafana_agent/v0/cos_agent.py | 842 ++++ ceph-mon/metadata.yaml | 3 + ceph-mon/src/ceph_metrics.py | 81 + ceph-mon/src/charm.py | 2 +- ceph-mon/test-requirements.txt | 3 + ceph-mon/unit_tests/test_ceph_metrics.py | 66 +- 21 files changed, 15761 insertions(+), 3 deletions(-) create mode 100644 ceph-mon/files/grafana_dashboards/ceph-cluster-advanced.json create mode 100644 ceph-mon/files/grafana_dashboards/ceph-cluster.json create mode 100644 ceph-mon/files/grafana_dashboards/cephfs-overview.json create mode 100644 ceph-mon/files/grafana_dashboards/host-details.json create mode 100644 ceph-mon/files/grafana_dashboards/hosts-overview.json create mode 100644 ceph-mon/files/grafana_dashboards/osd-device-details.json create mode 100644 ceph-mon/files/grafana_dashboards/osds-overview.json create mode 100644 ceph-mon/files/grafana_dashboards/pool-detail.json create mode 100644 ceph-mon/files/grafana_dashboards/pool-overview.json create mode 100644 ceph-mon/files/grafana_dashboards/radosgw-detail.json create mode 100644 ceph-mon/files/grafana_dashboards/radosgw-overview.json create mode 100644 ceph-mon/files/grafana_dashboards/radosgw-sync-overview.json create mode 100644 ceph-mon/files/grafana_dashboards/rbd-details.json create mode 100644 ceph-mon/files/grafana_dashboards/rbd-overview.json rename ceph-mon/files/prometheus_alert_rules/{prometheus_alerts.yml.default => prometheus_alerts.yaml} (100%) create mode 100644 ceph-mon/lib/charms/grafana_agent/v0/cos_agent.py diff --git a/ceph-mon/files/grafana_dashboards/ceph-cluster-advanced.json b/ceph-mon/files/grafana_dashboards/ceph-cluster-advanced.json new file mode 100644 index 00000000..db61f332 --- /dev/null +++ b/ceph-mon/files/grafana_dashboards/ceph-cluster-advanced.json @@ -0,0 +1,3792 @@ +{ + "__inputs": [ ], + "__requires": [ + { + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.3.2" + }, + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" + }, + { + "id": "heatmap", + "name": "Heatmap", + "type": "panel", + "version": "5.0.0" + }, + { + "id": "singlestat", + "name": "Singlestat", + "type": "panel", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "Ceph cluster overview", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CLUSTER STATE", + "titleSize": "h6", + "type": "row" + }, + { + "colors": null, + "datasource": "${prometheusds}", + "description": "", + "fieldConfig": { + "defaults": { + "decimals": 0, + "links": [ ], + "mappings": [ + { + "id": 0, + "options": { + "0": { + "text": "HEALTHY" + }, + "1": { + "text": "WARNING" + }, + "2": { + "text": "ERROR" + } + }, + "type": "value" + }, + { + "id": 1, + "options": { + "match": null, + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#9ac48a" + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 1 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 2 + } + ] + }, + "unit": "none" + } + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 1 + }, + "id": 3, + "interval": "1m", + "links": [ ], + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "ceph_health_status{}", + "format": "time_series", + "instant": true, + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "title": "Ceph health status", + "transparent": true, + "type": "stat" + }, + { + "datasource": "${prometheusds}", + "description": "", + "fieldConfig": { + "defaults": { + "links": [ ], + "mappings": [ + { + "id": 0, + "options": { + "match": null, + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(245, 54, 54, 0.9)" + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 0.10000000000000001 + }, + { + "color": "rgba(50, 172, 45, 0.97)", + "value": 0.29999999999999999 + } + ] + }, + "unit": "percentunit" + } + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 3, + "y": 1 + }, + "id": 4, + "interval": "1m", + "links": [ ], + "maxDataPoints": 100, + "options": { + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "(ceph_cluster_total_bytes{}-ceph_cluster_total_used_bytes{})/ceph_cluster_total_bytes{}", + "format": "time_series", + "instant": true, + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "title": "Available Capacity", + "transparent": false, + "type": "gauge" + }, + { + "colors": null, + "datasource": "${prometheusds}", + "description": "", + "fieldConfig": { + "defaults": { + "decimals": 2, + "links": [ ], + "mappings": [ + { + "id": 0, + "options": { + "match": null, + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)" + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 0.025000000000000001 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 1 + } + ] + }, + "unit": "decbytes" + } + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 6, + "y": 1 + }, + "id": 5, + "interval": "1m", + "links": [ ], + "options": { + "colorMode": "none", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "ceph_cluster_total_bytes{}", + "format": "time_series", + "instant": true, + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 300 + } + ], + "title": "Cluster Capacity", + "transparent": false, + "type": "stat" + }, + { + "colors": null, + "datasource": "${prometheusds}", + "description": "", + "fieldConfig": { + "defaults": { + "decimals": 1, + "links": [ ], + "mappings": [ + { + "id": 0, + "options": { + "match": null, + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "Bps" + } + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 1 + }, + "id": 6, + "links": [ ], + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "sum(irate(ceph_osd_op_w_in_bytes{}[5m]))", + "format": "time_series", + "instant": true, + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Write Throughput", + "transparent": false, + "type": "stat" + }, + { + "colors": null, + "datasource": "${prometheusds}", + "description": "", + "fieldConfig": { + "defaults": { + "decimals": 1, + "links": [ ], + "mappings": [ + { + "id": 0, + "options": { + "match": null, + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#d44a3a" + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 0 + }, + { + "color": "#9ac48a", + "value": 0 + } + ] + }, + "unit": "Bps" + } + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 12, + "y": 1 + }, + "id": 7, + "links": [ ], + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "sum(irate(ceph_osd_op_r_out_bytes{}[5m]))", + "format": "time_series", + "instant": true, + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Read Throughput", + "transparent": false, + "type": "stat" + }, + { + "colorMode": "Panel", + "colors": { + "crit": "rgb(255, 0, 0)", + "disable": "rgba(128, 128, 128, 0.9)", + "ok": "rgba(50, 128, 45, 0.9)", + "warn": "rgba(237, 129, 40, 0.9)" + }, + "cornerRadius": 0, + "datasource": "${prometheusds}", + "description": "", + "displayName": "", + "fieldConfig": { + "defaults": { + "decimals": 0, + "links": [ ], + "mappings": [ ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + } + }, + "flipCard": false, + "flipTime": 5, + "fontFormat": "Regular", + "gridPos": { + "h": 3, + "w": 6, + "x": 15, + "y": 1 + }, + "id": 8, + "isAutoScrollOnOverflow": false, + "isGrayOnNoData": false, + "isHideAlertsOnDisable": false, + "isIgnoreOKColors": false, + "links": [ ], + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "aggregation": "Last", + "alias": "All", + "datasource": "${prometheusds}", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "count(ceph_osd_metadata)", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "All", + "refId": "A", + "units": "none", + "valueHandler": "Number Threshold" + }, + { + "aggregation": "Last", + "alias": "In", + "datasource": "${prometheusds}", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "count(ceph_osd_in)", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "In", + "refId": "B", + "units": "none", + "valueHandler": "Number Threshold" + }, + { + "aggregation": "Last", + "alias": "Out", + "datasource": "${prometheusds}", + "decimals": 2, + "displayAliasType": "Warning / Critical", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "sum(ceph_osd_in == bool 0)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Out", + "refId": "C", + "units": "none", + "valueHandler": "Number Threshold", + "warn": 1 + }, + { + "aggregation": "Last", + "alias": "Up", + "datasource": "${prometheusds}", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "sum(ceph_osd_up)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Up", + "refId": "D", + "units": "none", + "valueHandler": "Number Threshold" + }, + { + "aggregation": "Last", + "alias": "Down", + "datasource": "${prometheusds}", + "decimals": 2, + "displayAliasType": "Warning / Critical", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "sum(ceph_osd_up == bool 0)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Down", + "refId": "E", + "units": "none", + "valueHandler": "Number Threshold", + "warn": 1 + } + ], + "title": "OSDs", + "transparent": false, + "type": "stat" + }, + { + "colorMode": "Panel", + "colors": { + "crit": "rgba(245, 54, 54, 0.9)", + "disable": "rgba(128, 128, 128, 0.9)", + "ok": "rgba(50, 128, 45, 0.9)", + "warn": "rgba(237, 129, 40, 0.9)" + }, + "cornerRadius": 1, + "datasource": "${prometheusds}", + "description": "", + "displayName": "", + "fieldConfig": { + "defaults": { + "decimals": 0, + "links": [ ], + "mappings": [ ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + } + }, + "flipCard": false, + "flipTime": 5, + "fontFormat": "Regular", + "gridPos": { + "h": 6, + "w": 3, + "x": 21, + "y": 1 + }, + "id": 9, + "isAutoScrollOnOverflow": false, + "isGrayOnNoData": false, + "isHideAlertsOnDisable": false, + "isIgnoreOKColors": false, + "links": [ ], + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "aggregation": "Last", + "alias": "Active", + "datasource": "${prometheusds}", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "count(ceph_mgr_status == 1) or vector(0)", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "Active", + "refId": "A", + "units": "none", + "valueHandler": "Number Threshold" + }, + { + "aggregation": "Last", + "alias": "Standby", + "datasource": "${prometheusds}", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "count(ceph_mgr_status == 0) or vector(0)", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "Standby", + "refId": "B", + "units": "none", + "valueHandler": "Number Threshold" + } + ], + "title": "MGRs", + "transparent": false, + "type": "stat" + }, + { + "colorMode": "Panel", + "colors": { + "crit": "rgba(245, 54, 54, 0.9)", + "disable": "rgba(128, 128, 128, 0.9)", + "ok": "rgba(50, 128, 45, 0.9)", + "warn": "rgba(237, 129, 40, 0.9)" + }, + "cornerRadius": 1, + "datasource": "${prometheusds}", + "description": "", + "displayName": "", + "fieldConfig": { + "defaults": { + "decimals": 0, + "links": [ ], + "mappings": [ ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Critical" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Warning" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#987d24", + "mode": "fixed" + } + } + ] + } + ] + }, + "flipCard": false, + "flipTime": 5, + "fontFormat": "Regular", + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 4 + }, + "id": 10, + "isAutoScrollOnOverflow": false, + "isGrayOnNoData": false, + "isHideAlertsOnDisable": false, + "isIgnoreOKColors": false, + "links": [ ], + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "aggregation": "Last", + "alias": "Active", + "datasource": "${prometheusds}", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "count(ALERTS{alertstate=\"firing\",alertname=~\"^Ceph.+\", severity=\"critical\"}) OR vector(0)", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "Critical", + "refId": "A", + "units": "none", + "valueHandler": "Number Threshold" + }, + { + "aggregation": "Last", + "alias": "Standby", + "datasource": "${prometheusds}", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "count(ALERTS{alertstate=\"firing\",alertname=~\"^Ceph.+\", severity=\"warning\"}) OR vector(0)", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "Warning", + "refId": "B", + "units": "none", + "valueHandler": "Number Threshold" + } + ], + "title": "Firing Alerts", + "transparent": false, + "type": "stat" + }, + { + "colors": null, + "datasource": "${prometheusds}", + "description": "", + "displayName": "", + "fieldConfig": { + "defaults": { + "decimals": 0, + "links": [ ], + "mappings": [ + { + "id": 0, + "options": { + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 0.025000000000000001 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 0.10000000000000001 + } + ] + }, + "unit": "decbytes" + } + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 6, + "y": 4 + }, + "id": 11, + "links": [ ], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "ceph_cluster_total_used_bytes{}", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Used Capacity", + "transparent": false, + "type": "stat" + }, + { + "colors": null, + "datasource": "${prometheusds}", + "description": "", + "displayName": "", + "fieldConfig": { + "defaults": { + "decimals": 0, + "links": [ ], + "mappings": [ + { + "id": 0, + "options": { + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 4 + }, + "id": 12, + "links": [ ], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "sum(irate(ceph_osd_op_w{}[$__rate_interval]))", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Write IOPS", + "transparent": false, + "type": "stat" + }, + { + "colors": null, + "datasource": "${prometheusds}", + "description": "", + "displayName": "", + "fieldConfig": { + "defaults": { + "decimals": 0, + "links": [ ], + "mappings": [ + { + "id": 0, + "options": { + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#d44a3a", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 0 + }, + { + "color": "#9ac48a", + "value": 0 + } + ] + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 12, + "y": 4 + }, + "id": 13, + "links": [ ], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "sum(irate(ceph_osd_op_r{}[$__rate_interval]))", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Read IOPS", + "transparent": false, + "type": "stat" + }, + { + "colorMode": "Panel", + "colors": { + "crit": "rgba(245, 54, 54, 0.9)", + "disable": "rgba(128, 128, 128, 0.9)", + "ok": "rgba(50, 128, 45, 0.9)", + "warn": "rgba(237, 129, 40, 0.9)" + }, + "cornerRadius": 1, + "datasource": "${prometheusds}", + "description": "", + "displayName": "", + "fieldConfig": { + "defaults": { + "decimals": 0, + "links": [ ], + "mappings": [ ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + } + }, + "flipCard": false, + "flipTime": 5, + "fontFormat": "Regular", + "gridPos": { + "h": 3, + "w": 6, + "x": 15, + "y": 4 + }, + "id": 14, + "isAutoScrollOnOverflow": false, + "isGrayOnNoData": false, + "isHideAlertsOnDisable": false, + "isIgnoreOKColors": false, + "links": [ ], + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "aggregation": "Last", + "alias": "In Quorum", + "datasource": "${prometheusds}", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "sum(ceph_mon_quorum_status)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "In Quorum", + "refId": "A", + "units": "none", + "valueHandler": "Text Only" + }, + { + "aggregation": "Last", + "alias": "Total", + "crit": 1, + "datasource": "${prometheusds}", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "count(ceph_mon_quorum_status)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Total", + "refId": "B", + "units": "none", + "valueHandler": "Text Only", + "warn": 2 + }, + { + "aggregation": "Last", + "alias": "MONs out of Quorum", + "crit": 1.6000000000000001, + "datasource": "${prometheusds}", + "decimals": 2, + "displayAliasType": "Warning / Critical", + "displayType": "Annotation", + "displayValueWithAlias": "Never", + "expr": "count(ceph_mon_quorum_status) - sum(ceph_mon_quorum_status)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "MONs out of Quorum", + "range": true, + "refId": "C", + "units": "none", + "valueHandler": "Number Threshold", + "warn": 1.1000000000000001 + } + ], + "title": "Monitors", + "transparent": false, + "type": "stat" + }, + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 7 + }, + "id": 15, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CLUSTER STATS", + "titleSize": "h6", + "type": "row" + }, + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 8 + }, + "id": 16, + "limit": 10, + "onlyAlertsOnDashboard": true, + "options": { + "alertInstanceLabelFilter": "{alertname=~\"^Ceph.+\"}", + "alertName": "", + "dashboardAlerts": false, + "groupBy": [ ], + "groupMode": "default", + "maxItems": 20, + "sortOrder": 1, + "stateFilter": { + "error": true, + "firing": true, + "noData": false, + "normal": false, + "pending": true + }, + "viewMode": "list" + }, + "show": "current", + "sortOrder": 1, + "stateFilter": [ ], + "title": "Alerts", + "type": "alertlist" + }, + { + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 40, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 0, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#c0921f", + "value": 75 + }, + { + "color": "#E02F44", + "value": 85 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Total Capacity" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Used" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "green", + "mode": "fixed" + } + }, + { + "id": "custom.thresholdsStyle", + "value": { + "mode": "dashed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 8 + }, + "id": 17, + "interval": "$interval", + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Last", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "9.1.3", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "ceph_cluster_total_bytes{}", + "format": "time_series", + "instant": false, + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Total Capacity", + "range": true, + "refId": "A", + "step": 300 + }, + { + "datasource": "${prometheusds}", + "expr": "ceph_cluster_total_used_bytes{}", + "format": "time_series", + "instant": false, + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Used", + "range": true, + "refId": "B", + "step": 300 + } + ], + "title": "Capacity", + "type": "timeseries" + }, + { + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 85 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [ ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 8 + }, + "id": 18, + "interval": "$interval", + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max", + "min" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "9.1.3", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "sum(irate(ceph_osd_op_w_in_bytes{}[5m]))", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Write", + "range": true, + "refId": "A", + "step": 300 + }, + { + "datasource": "${prometheusds}", + "expr": "sum(irate(ceph_osd_op_r_out_bytes{}[5m]))", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Read", + "range": true, + "refId": "B", + "step": 300 + } + ], + "title": "Cluster Throughput", + "type": "timeseries" + }, + { + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [ ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 16 + }, + "id": 19, + "interval": "$interval", + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max", + "min" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "9.1.3", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "sum(irate(ceph_osd_op_w{}[$__rate_interval]))", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Write", + "range": true, + "refId": "A", + "step": 300 + }, + { + "datasource": "${prometheusds}", + "expr": "sum(irate(ceph_osd_op_r{}[$__rate_interval]))", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Read", + "range": true, + "refId": "B", + "step": 300 + } + ], + "title": "IOPS", + "type": "timeseries" + }, + { + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 16 + }, + "id": 20, + "interval": "$interval", + "options": { + "legend": { + "calcs": [ ], + "displayMode": "list", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "9.1.3", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "(ceph_pool_bytes_used{}) *on (pool_id) group_left(name)(ceph_pool_metadata{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "{{name}}", + "refId": "A", + "step": 300 + } + ], + "title": "Pool Used Bytes", + "type": "timeseries" + }, + { + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "rbd Stored" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "transparent", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 16 + }, + "id": 21, + "interval": "$interval", + "options": { + "legend": { + "calcs": [ ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "9.1.3", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "(ceph_pool_stored_raw{}) *on (pool_id) group_left(name)(ceph_pool_metadata{})", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{name}}", + "range": true, + "refId": "A", + "step": 300 + } + ], + "title": "Pool Used RAW Bytes", + "type": "timeseries" + }, + { + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ ] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 24 + }, + "id": 22, + "interval": "$interval", + "options": { + "legend": { + "calcs": [ ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "9.1.3", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "(ceph_pool_quota_objects{}) *on (pool_id) group_left(name)(ceph_pool_metadata{})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{name}}", + "refId": "A", + "step": 300 + } + ], + "title": "Pool Objects Quota", + "type": "timeseries" + }, + { + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ ] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 24 + }, + "id": 23, + "interval": "$interval", + "options": { + "legend": { + "calcs": [ ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "9.1.3", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "(ceph_pool_quota_bytes{}) *on (pool_id) group_left(name)(ceph_pool_metadata{})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{name}}", + "refId": "A", + "step": 300 + } + ], + "title": "Pool Quota Bytes", + "type": "timeseries" + }, + { + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ ] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 24 + }, + "id": 24, + "interval": "$interval", + "options": { + "legend": { + "calcs": [ ], + "displayMode": "list", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "9.1.3", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "(ceph_pool_objects{}) *on (pool_id) group_left(name)(ceph_pool_metadata{})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{name}}", + "refId": "A" + } + ], + "title": "Objects Per Pool", + "type": "timeseries" + }, + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 31 + }, + "id": 25, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "OBJECTS", + "titleSize": "h6", + "type": "row" + }, + { + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "/^Total.*$/" + }, + "properties": [ + { + "id": "custom.stacking", + "value": { + "group": false, + "mode": "normal" + } + } + ] + } + ] + }, + "gridPos": { + "h": 12, + "w": 6, + "x": 0, + "y": 32 + }, + "id": 26, + "interval": "$interval", + "options": { + "legend": { + "calcs": [ ], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "multi", + "sort": "asc" + } + }, + "pluginVersion": "9.1.3", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pool_objects)", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Total", + "range": true, + "refId": "A", + "step": 200 + } + ], + "title": "OSD Type Count", + "type": "timeseries" + }, + { + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "/^Total.*$/" + }, + "properties": [ + { + "id": "custom.stacking", + "value": { + "group": false, + "mode": "normal" + } + } + ] + } + ] + }, + "gridPos": { + "h": 12, + "w": 8, + "x": 6, + "y": 32 + }, + "id": 27, + "interval": "$interval", + "options": { + "legend": { + "calcs": [ + "lastNotNull" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "asc" + } + }, + "pluginVersion": "9.1.3", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_active{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Active", + "range": true, + "refId": "A" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_clean{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Clean", + "range": true, + "refId": "B" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_peering{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Peering", + "range": true, + "refId": "C" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_degraded{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Degraded", + "range": true, + "refId": "D", + "step": 300 + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_stale{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Stale", + "range": true, + "refId": "E", + "step": 300 + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_unclean_pgs{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Unclean", + "range": true, + "refId": "F", + "step": 300 + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_undersized{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Undersized", + "range": true, + "refId": "G", + "step": 300 + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_incomplete{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Incomplete", + "range": true, + "refId": "H" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_forced_backfill{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Forced Backfill", + "range": true, + "refId": "I" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_forced_recovery{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Forced Recovery", + "range": true, + "refId": "J" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_creating{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Creating", + "range": true, + "refId": "K" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_wait_backfill{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Wait Backfill", + "range": true, + "refId": "L" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_deep{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Deep", + "range": true, + "refId": "M" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_scrubbing{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Scrubbing", + "range": true, + "refId": "N" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_recovering{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Recovering", + "range": true, + "refId": "O" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_repair{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Repair", + "range": true, + "refId": "P" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_down{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Down", + "range": true, + "refId": "Q" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_peered{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Peered", + "range": true, + "refId": "R" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_backfill{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Backfill", + "range": true, + "refId": "S" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_remapped{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Remapped", + "range": true, + "refId": "T" + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_backfill_toofull{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Backfill Toofull", + "range": true, + "refId": "U" + } + ], + "title": "PGs State", + "type": "timeseries" + }, + { + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "/^Total.*$/" + }, + "properties": [ + { + "id": "custom.stacking", + "value": { + "group": false, + "mode": "normal" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 10, + "x": 14, + "y": 32 + }, + "id": 28, + "interval": "$interval", + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "asc" + } + }, + "pluginVersion": "9.1.3", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_degraded{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Degraded", + "range": true, + "refId": "A", + "step": 300 + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_stale{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Stale", + "range": true, + "refId": "B", + "step": 300 + }, + { + "datasource": "${prometheusds}", + "expr": "sum(ceph_pg_undersized{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "Undersized", + "range": true, + "refId": "C", + "step": 300 + } + ], + "title": "Stuck PGs", + "type": "timeseries" + }, + { + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ ] + }, + "gridPos": { + "h": 6, + "w": 10, + "x": 14, + "y": 38 + }, + "id": 29, + "interval": "$interval", + "options": { + "legend": { + "calcs": [ ], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "9.1.3", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "sum(irate(ceph_osd_recovery_ops{}[$interval]))", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "OPS", + "refId": "A", + "step": 300 + } + ], + "title": "Recovery Operations", + "type": "timeseries" + }, + { + "collapse": false, + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 44 + }, + "id": 30, + "panels": [ + { + "cards": { + "cardPadding": null, + "cardRound": null + }, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateOranges", + "exponent": 0.5, + "mode": "opacity" + }, + "dataFormat": "timeseries", + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 42 + }, + "heatmap": { }, + "hideZeroBuckets": false, + "highlightCards": true, + "id": 31, + "legend": { + "show": true + }, + "options": { + "calculate": true, + "calculation": { + "yBuckets": { + "mode": "count", + "scale": { + "log": 2, + "type": "log" + }, + "value": "1" + } + }, + "cellGap": 2, + "cellValues": { }, + "color": { + "exponent": 0.5, + "fill": "#b4ff00", + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1.0000000000000001e-09 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": false + }, + "yAxis": { + "axisPlacement": "left", + "min": "0", + "reverse": false, + "unit": "ms" + } + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "ceph_osd_apply_latency_ms{}", + "format": "time_series", + "instant": false, + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "OSD Apply Latency Distribution", + "tooltip": { + "show": true, + "showHistogram": false + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": "", + "yAxis": { + "decimals": null, + "format": "ms", + "logBase": 2, + "max": null, + "min": "0", + "show": true, + "splitFactor": 1 + }, + "yBucketBound": "auto", + "yBucketNumber": null, + "yBucketSize": 10 + }, + { + "cards": { + "cardPadding": null, + "cardRound": null + }, + "color": { + "cardColor": "#65c5db", + "colorScale": "sqrt", + "colorScheme": "interpolateOranges", + "exponent": 0.5, + "mode": "opacity" + }, + "dataFormat": "timeseries", + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 42 + }, + "heatmap": { }, + "hideZeroBuckets": false, + "highlightCards": true, + "id": 32, + "legend": { + "show": true + }, + "options": { + "calculate": true, + "calculation": { + "yBuckets": { + "mode": "count", + "scale": { + "log": 2, + "type": "log" + } + } + }, + "cellGap": 2, + "cellValues": { }, + "color": { + "exponent": 0.5, + "fill": "#65c5db", + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1.0000000000000001e-09 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": false + }, + "yAxis": { + "axisPlacement": "left", + "min": "0", + "reverse": false, + "unit": "ms" + } + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "ceph_osd_commit_latency_ms{}", + "format": "time_series", + "instant": false, + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "OSD Commit Latency Distribution", + "tooltip": { + "show": true, + "showHistogram": false + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": "", + "yAxis": { + "decimals": null, + "format": "ms", + "logBase": 2, + "max": null, + "min": "0", + "show": true, + "splitFactor": 1 + }, + "yBucketBound": "auto", + "yBucketNumber": null, + "yBucketSize": 10 + }, + { + "cards": { + "cardPadding": null, + "cardRound": null + }, + "color": { + "cardColor": "#806eb7", + "colorScale": "sqrt", + "colorScheme": "interpolateOranges", + "exponent": 0.5, + "mode": "opacity" + }, + "dataFormat": "timeseries", + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 50 + }, + "heatmap": { }, + "hideZeroBuckets": false, + "highlightCards": true, + "id": 33, + "legend": { + "show": true + }, + "options": { + "calculate": true, + "calculation": { + "yBuckets": { + "mode": "count", + "scale": { + "log": 2, + "type": "log" + } + } + }, + "cellGap": 2, + "cellValues": { }, + "color": { + "exponent": 0.5, + "fill": "#806eb7", + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1.0000000000000001e-09 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": false + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 2, + "min": "0", + "reverse": false, + "unit": "ms" + } + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "rate(ceph_osd_op_r_latency_sum{}[5m]) / rate(ceph_osd_op_r_latency_count{}[5m]) >= 0", + "format": "time_series", + "instant": false, + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "OSD Read Op Latency Distribution", + "tooltip": { + "show": true, + "showHistogram": false + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": "", + "yAxis": { + "decimals": null, + "format": "ms", + "logBase": 2, + "max": null, + "min": "0", + "show": true, + "splitFactor": 1 + }, + "yBucketBound": "auto", + "yBucketNumber": null, + "yBucketSize": null + }, + { + "cards": { + "cardPadding": null, + "cardRound": null + }, + "color": { + "cardColor": "#f9934e", + "colorScale": "sqrt", + "colorScheme": "interpolateOranges", + "exponent": 0.5, + "mode": "opacity" + }, + "dataFormat": "timeseries", + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 50 + }, + "heatmap": { }, + "hideZeroBuckets": false, + "highlightCards": true, + "id": 34, + "legend": { + "show": true + }, + "options": { + "calculate": true, + "calculation": { + "yBuckets": { + "mode": "count", + "scale": { + "log": 2, + "type": "log" + } + } + }, + "cellGap": 2, + "cellValues": { }, + "color": { + "exponent": 0.5, + "fill": "#f9934e", + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1.0000000000000001e-09 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": false + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 2, + "min": "0", + "reverse": false, + "unit": "ms" + } + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "rate(ceph_osd_op_w_latency_sum{}[5m]) / rate(ceph_osd_op_w_latency_count{}[5m]) >= 0", + "format": "time_series", + "instant": false, + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "OSD Write Op Latency Distribution", + "tooltip": { + "show": true, + "showHistogram": false + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": "", + "yAxis": { + "decimals": null, + "format": "ms", + "logBase": 2, + "max": null, + "min": "0", + "show": true, + "splitFactor": 1 + }, + "yBucketBound": "auto", + "yBucketNumber": null, + "yBucketSize": null + }, + { + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 58 + }, + "id": 35, + "interval": "$interval", + "options": { + "legend": { + "calcs": [ ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "9.1.3", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "avg(rate(ceph_osd_op_r_latency_sum{}[5m]) / rate(ceph_osd_op_r_latency_count{}[5m]) >= 0)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read", + "refId": "A" + }, + { + "datasource": "${prometheusds}", + "expr": "avg(rate(ceph_osd_op_w_latency_sum{}[5m]) / rate(ceph_osd_op_w_latency_count{}[5m]) >= 0)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write", + "refId": "B" + } + ], + "title": "Recovery Operations", + "type": "timeseries" + }, + { + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 58 + }, + "id": 36, + "interval": "$interval", + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "9.1.3", + "targets": [ + { + "datasource": "${prometheusds}", + "expr": "avg(ceph_osd_apply_latency_ms{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "apply", + "metric": "ceph_osd_perf_apply_latency_seconds", + "refId": "A", + "step": 4 + }, + { + "datasource": "${prometheusds}", + "expr": "avg(ceph_osd_commit_latency_ms{})", + "format": "time_series", + "interval": "$interval", + "intervalFactor": 1, + "legendFormat": "commit", + "metric": "ceph_osd_perf_commit_latency_seconds", + "refId": "B", + "step": 4 + } + ], + "title": "AVG OSD Apply + Commit Latency", + "type": "timeseries" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "LATENCY", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": true, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 45 + }, + "id": 37, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "", + "titleSize": "h6", + "type": "row" + }, + { + "columns": [ ], + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "custom": { + "align": "left", + "cellOptions": { + "type": "auto" + }, + "filterable": false, + "inspect": false + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 46 + }, + "id": 38, + "links": [ ], + "options": { + "footer": { + "countRows": false, + "enablePagination": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "frameIndex": 1, + "showHeader": true + }, + "pluginVersion": "9.4.7", + "styles": "", + "targets": [ + { + "datasource": "${prometheusds}", + "exemplar": false, + "expr": "count by (ceph_version)(ceph_osd_metadata{})", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "OSD Services", + "range": false, + "refId": "A" + }, + { + "datasource": "${prometheusds}", + "exemplar": false, + "expr": "count by (ceph_version)(ceph_mon_metadata{})", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Mon Services", + "range": false, + "refId": "B" + }, + { + "datasource": "${prometheusds}", + "exemplar": false, + "expr": "count by (ceph_version)(ceph_mds_metadata{})", + "format": "table", + "hide": false, + "instant": true, + "intervalFactor": 1, + "legendFormat": "MDS Services", + "range": false, + "refId": "C" + }, + { + "datasource": "${prometheusds}", + "exemplar": false, + "expr": "count by (ceph_version)(ceph_rgw_metadata{})", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "RGW Services", + "range": false, + "refId": "D" + }, + { + "datasource": "${prometheusds}", + "exemplar": false, + "expr": "count by (ceph_version)(ceph_mgr_metadata{})", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "MGR Services", + "range": false, + "refId": "E" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Ceph Versions", + "transformations": [ + { + "id": "merge", + "options": { } + }, + { + "id": "organize", + "options": { + "excludeByName": { }, + "indexByName": { }, + "renameByName": { + "Time": "", + "Value #A": "OSD Services", + "Value #B": "Mon Services", + "Value #C": "MDS Services", + "Value #D": "RGW Services", + "Value #E": "MGR Services", + "ceph_version": "Ceph Version" + } + } + } + ], + "type": "table" + } + ], + "refresh": "1m", + "rows": [ ], + "schemaVersion": 38, + "style": "dark", + "tags": [ + "ceph-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "label": "Data Source", + "name": "prometheusds", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "auto": true, + "auto_count": 10, + "auto_min": "1m", + "current": { + "text": "$__auto_interval_interval", + "value": "$__auto_interval_interval" + }, + "hide": 0, + "label": "Interval", + "name": "interval", + "options": [ + { + "selected": true, + "text": "auto", + "value": "$__auto_interval_interval" + }, + { + "selected": false, + "text": "5s", + "value": "5s" + }, + { + "selected": false, + "text": "10s", + "value": "10s" + }, + { + "selected": false, + "text": "30s", + "value": "30s" + }, + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + }, + { + "selected": false, + "text": "7d", + "value": "7d" + }, + { + "selected": false, + "text": "14d", + "value": "14d" + }, + { + "selected": false, + "text": "30d", + "value": "30d" + } + ], + "query": "5s,10s,30s,1m,10m,30m,1h,6h,12h,1d,7d,14d,30d", + "refresh": 2, + "type": "interval", + "valuelabels": { } + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Ceph Cluster - Advanced", + "version": 0 +} diff --git a/ceph-mon/files/grafana_dashboards/ceph-cluster.json b/ceph-mon/files/grafana_dashboards/ceph-cluster.json new file mode 100644 index 00000000..9d1fbd73 --- /dev/null +++ b/ceph-mon/files/grafana_dashboards/ceph-cluster.json @@ -0,0 +1,1269 @@ +{ + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "heatmap", + "name": "Heatmap", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "5.0.0" + } + ], + "annotations": { + "list": [] + }, + "description": "Ceph cluster overview", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1525415495309, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": true, + "colorValue": false, + "colors": [ + "rgba(50, 128, 45, 0.9)", + "rgba(237, 129, 40, 0.9)", + "rgb(255, 0, 0)" + ], + "datasource": "${prometheusds}", + "editable": false, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 0, + "y": 0 + }, + "hideTimeOverride": true, + "id": 21, + "interval": "1m", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "ceph_health_status", + "format": "time_series", + "instant": true, + "interval": "$interval", + "intervalFactor": 1, + "refId": "A", + "step": 60 + } + ], + "thresholds": "1,2", + "timeFrom": null, + "title": "Health Status", + "transparent": false, + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "OK", + "value": "0" + }, + { + "op": "=", + "text": "WARN", + "value": "1" + }, + { + "op": "=", + "text": "ERR", + "value": "2" + } + ], + "valueName": "current" + }, + { + "colorMode": "Panel", + "colors": { + "crit": "rgb(255, 0, 0)", + "disable": "rgba(128, 128, 128, 0.9)", + "ok": "rgba(50, 128, 45, 0.9)", + "warn": "rgba(237, 129, 40, 0.9)" + }, + "cornerRadius": 0, + "datasource": "${prometheusds}", + "displayName": "", + "flipCard": false, + "flipTime": 5, + "fontFormat": "Regular", + "gridPos": { + "h": 3, + "w": 6, + "x": 6, + "y": 0 + }, + "id": 43, + "isAutoScrollOnOverflow": false, + "isGrayOnNoData": false, + "isHideAlertsOnDisable": false, + "isIgnoreOKColors": false, + "links": [], + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + } + }, + "targets": [ + { + "aggregation": "Last", + "alias": "All", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "count(ceph_osd_metadata)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "All", + "refId": "A", + "units": "none", + "valueHandler": "Number Threshold" + }, + { + "aggregation": "Last", + "alias": "In", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "sum(ceph_osd_in)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "In", + "refId": "B", + "units": "none", + "valueHandler": "Number Threshold" + }, + { + "aggregation": "Last", + "alias": "Out", + "decimals": 2, + "displayAliasType": "Warning / Critical", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "sum(ceph_osd_in == bool 0)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Out", + "refId": "C", + "units": "none", + "valueHandler": "Number Threshold", + "warn": 1 + }, + { + "aggregation": "Last", + "alias": "Up", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "sum(ceph_osd_up)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Up", + "refId": "D", + "units": "none", + "valueHandler": "Number Threshold" + }, + { + "aggregation": "Last", + "alias": "Down", + "crit": 2, + "decimals": 2, + "displayAliasType": "Warning / Critical", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "sum(ceph_osd_up == bool 0)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Down", + "refId": "E", + "units": "none", + "valueHandler": "Number Threshold", + "warn": 1 + } + ], + "title": "OSDs", + "type": "stat" + }, + { + "clusterName": "", + "colorMode": "Panel", + "colors": { + "crit": "rgba(245, 54, 54, 0.9)", + "disable": "rgba(128, 128, 128, 0.9)", + "ok": "rgba(50, 128, 45, 0.9)", + "warn": "rgba(237, 129, 40, 0.9)" + }, + "cornerRadius": 1, + "datasource": "${prometheusds}", + "displayName": "", + "flipCard": false, + "flipTime": 5, + "fontFormat": "Regular", + "gridPos": { + "h": 3, + "w": 6, + "x": 12, + "y": 0 + }, + "id": 41, + "isAutoScrollOnOverflow": false, + "isGrayOnNoData": false, + "isHideAlertsOnDisable": false, + "isIgnoreOKColors": false, + "links": [], + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + } + }, + "targets": [ + { + "aggregation": "Last", + "alias": "In Quorum", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "sum(ceph_mon_quorum_status)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "In Quorum", + "refId": "A", + "units": "none", + "valueHandler": "Text Only" + }, + { + "aggregation": "Last", + "alias": "Total", + "crit": 1, + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "count(ceph_mon_quorum_status)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Total", + "refId": "B", + "units": "none", + "valueHandler": "Text Only", + "warn": 2 + }, + { + "aggregation": "Last", + "alias": "MONs out of Quorum", + "crit": 1.6, + "decimals": 2, + "displayAliasType": "Warning / Critical", + "displayType": "Annotation", + "displayValueWithAlias": "Never", + "expr": "count(ceph_mon_quorum_status) - sum(ceph_mon_quorum_status)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "MONs out of Quorum", + "refId": "C", + "units": "none", + "valueHandler": "Number Threshold", + "warn": 1.1 + } + ], + "title": "Monitors", + "type": "stat" + }, + { + "colorMode": "Panel", + "colors": { + "crit": "rgba(245, 54, 54, 0.9)", + "disable": "rgba(128, 128, 128, 0.9)", + "ok": "rgba(50, 128, 45, 0.9)", + "warn": "rgba(237, 129, 40, 0.9)" + }, + "cornerRadius": 1, + "datasource": "${prometheusds}", + "displayName": "", + "flipCard": false, + "flipTime": 5, + "fontFormat": "Regular", + "gridPos": { + "h": 3, + "w": 6, + "x": 18, + "y": 0 + }, + "id": 68, + "isAutoScrollOnOverflow": false, + "isGrayOnNoData": false, + "isHideAlertsOnDisable": false, + "isIgnoreOKColors": false, + "links": [], + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + } + }, + "targets": [ + { + "aggregation": "Last", + "alias": "Active", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "count(ceph_mgr_status == 1) or vector(0)", + "format": "time_series", + "intervalFactor": 1, + "instant": true, + "legendFormat": "Active", + "refId": "A", + "units": "none", + "valueHandler": "Number Threshold" + }, + { + "aggregation": "Last", + "alias": "Standby", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "count(ceph_mgr_status == 0) or vector(0)", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "Standby", + "refId": "B", + "units": "none", + "valueHandler": "Number Threshold" + } + ], + "title": "MGRs", + "type": "stat" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "decimals": 2, + "format": "percentunit", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 6 + }, + "id": 47, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_osd_stat_bytes_used)/sum(ceph_osd_stat_bytes)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Used", + "refId": "A" + } + ], + "thresholds": "0.7,0.8", + "title": "Capacity used", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "fill": 0, + "gridPos": { + "h": 6, + "w": 9, + "x": 6, + "y": 6 + }, + "id": 53, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Active", + "color": "#508642", + "fill": 1, + "stack": "A" + }, + { + "alias": "Total", + "color": "#f9e2d2" + }, + { + "alias": "Degraded", + "color": "#eab839" + }, + { + "alias": "Undersized", + "color": "#f9934e" + }, + { + "alias": "Inconsistent", + "color": "#e24d42" + }, + { + "alias": "Down", + "color": "#bf1b00" + }, + { + "alias": "Inactive", + "color": "#bf1b00", + "fill": 4, + "linewidth": 0, + "stack": "A" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(ceph_pg_total)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Total", + "refId": "A" + }, + { + "expr": "sum(ceph_pg_active)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Active", + "refId": "B" + }, + { + "expr": "sum(ceph_pg_total - ceph_pg_active)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Inactive", + "refId": "G" + }, + { + "expr": "sum(ceph_pg_undersized)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Undersized", + "refId": "F" + }, + { + "expr": "sum(ceph_pg_degraded)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Degraded", + "refId": "C" + }, + { + "expr": "sum(ceph_pg_inconsistent)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Inconsistent", + "refId": "D" + }, + { + "expr": "sum(ceph_pg_down)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Down", + "refId": "E" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "PG States", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "fill": 0, + "gridPos": { + "h": 6, + "w": 9, + "x": 15, + "y": 6 + }, + "id": 66, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Avg Apply Latency", + "color": "#7eb26d" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "quantile(0.95, ceph_osd_apply_latency_ms)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Apply Latency P_95", + "refId": "A" + }, + { + "expr": "quantile(0.95, ceph_osd_commit_latency_ms)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Commit Latency P_95", + "refId": "B" + }, + { + "expr": "avg(ceph_osd_apply_latency_ms)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Avg Apply Latency", + "refId": "C" + }, + { + "expr": "avg(ceph_osd_commit_latency_ms)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Avg Commit Latency", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "OSD Latencies", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 45, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 0.5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Reads", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(ceph_osd_op_w_in_bytes[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Writes", + "refId": "A" + }, + { + "expr": "sum(irate(ceph_osd_op_r_out_bytes[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Reads", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Cluster I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 62, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(deriv(ceph_pool_stored[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "In-/Egress", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": " Egress (-) / Ingress (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "cards": { + "cardPadding": null, + "cardRound": 1 + }, + "color": { + "cardColor": "rgb(0, 254, 255)", + "colorScale": "sqrt", + "colorScheme": "interpolateBlues", + "exponent": 0.5, + "min": null, + "mode": "spectrum" + }, + "dataFormat": "timeseries", + "datasource": "${prometheusds}", + "gridPos": { + "h": 9, + "w": 6, + "x": 0, + "y": 15 + }, + "heatmap": {}, + "highlightCards": true, + "id": 55, + "legend": { + "show": true + }, + "links": [], + "span": 12, + "targets": [ + { + "expr": "ceph_osd_stat_bytes_used / ceph_osd_stat_bytes", + "format": "time_series", + "interval": "1m", + "intervalFactor": 1, + "legendFormat": "Util (%)", + "refId": "A", + "step": 60 + } + ], + "timeFrom": null, + "title": "OSD Capacity Utilization", + "tooltip": { + "show": true, + "showHistogram": false + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": "", + "yAxis": { + "decimals": 2, + "format": "percentunit", + "logBase": 1, + "max": null, + "min": null, + "show": true, + "splitFactor": null + }, + "yBucketNumber": null, + "yBucketSize": null + }, + { + "cards": { + "cardPadding": null, + "cardRound": 1 + }, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateBlues", + "exponent": 0.5, + "mode": "spectrum" + }, + "dataFormat": "timeseries", + "datasource": "${prometheusds}", + "gridPos": { + "h": 9, + "w": 6, + "x": 6, + "y": 15 + }, + "heatmap": {}, + "highlightCards": true, + "id": 59, + "legend": { + "show": true + }, + "links": [], + "targets": [ + { + "expr": "ceph_osd_numpg", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "#PGs", + "refId": "A" + } + ], + "title": "PGs per OSD", + "tooltip": { + "show": true, + "showHistogram": false + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": "", + "yAxis": { + "decimals": null, + "format": "none", + "logBase": 1, + "max": null, + "min": null, + "show": true, + "splitFactor": null + }, + "yBucketNumber": null, + "yBucketSize": null + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "fill": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 15 + }, + "id": 64, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(ceph_osd_recovery_ops[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Op/s", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Recovery Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ops", + "label": "Recovery Ops/s", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "refresh": "30s", + "schemaVersion": 16, + "style": "dark", + "tags": [ + "ceph", + "cluster" + ], + "templating": { + "list": [ + { + "auto": true, + "auto_count": 10, + "auto_min": "1m", + "current": { + "text": "auto", + "value": "$__auto_interval_interval" + }, + "datasource": null, + "hide": 0, + "includeAll": false, + "label": "Interval", + "multi": false, + "name": "interval", + "options": [ + { + "selected": true, + "text": "auto", + "value": "$__auto_interval_interval" + }, + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + }, + { + "selected": false, + "text": "7d", + "value": "7d" + }, + { + "selected": false, + "text": "14d", + "value": "14d" + }, + { + "selected": false, + "text": "30d", + "value": "30d" + } + ], + "query": "1m,10m,30m,1h,6h,12h,1d,7d,14d,30d", + "refresh": 2, + "type": "interval" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Ceph - Cluster", + "version": 13 + } diff --git a/ceph-mon/files/grafana_dashboards/cephfs-overview.json b/ceph-mon/files/grafana_dashboards/cephfs-overview.json new file mode 100644 index 00000000..dd3a025f --- /dev/null +++ b/ceph-mon/files/grafana_dashboards/cephfs-overview.json @@ -0,0 +1,348 @@ +{ + "__inputs": [ ], + "__requires": [ + { + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.3.2" + }, + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "MDS Performance", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*Reads/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(ceph_objecter_op_r{job=~\"$job\", ceph_daemon=~\"($mds_servers).*\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read Ops", + "refId": "A" + }, + { + "expr": "sum(rate(ceph_objecter_op_w{job=~\"$job\", ceph_daemon=~\"($mds_servers).*\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write Ops", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "MDS Workload - $mds_servers", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "none", + "label": "Reads(-) / Writes (+)", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ceph_mds_server_handle_client_request{job=~\"$job\", ceph_daemon=~\"($mds_servers).*\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ceph_daemon}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Client Request Load - $mds_servers", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "none", + "label": "Client Requests", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "refresh": "30s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "ceph-mixin" + ], + "templating": { + "list": [ + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "MDS Server", + "multi": false, + "name": "mds_servers", + "options": [ ], + "query": "label_values(ceph_mds_inodes{job=~\"$job\"}, ceph_daemon)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "MDS Performance", + "uid": "tbO9LAiZz", + "version": 0 +} diff --git a/ceph-mon/files/grafana_dashboards/host-details.json b/ceph-mon/files/grafana_dashboards/host-details.json new file mode 100644 index 00000000..5d6a3060 --- /dev/null +++ b/ceph-mon/files/grafana_dashboards/host-details.json @@ -0,0 +1,1314 @@ +{ + "__inputs": [ ], + "__requires": [ + { + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.3.2" + }, + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" + }, + { + "id": "singlestat", + "name": "Singlestat", + "type": "panel", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "$ceph_hosts System Overview", + "titleSize": "h6", + "type": "row" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 0, + "y": 1 + }, + "id": 3, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(sum by (ceph_daemon) (ceph_osd_metadata{job=~\"$job\", instance='$ceph_hosts'}))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "OSDs", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": { + "interrupt": "#447EBC", + "steal": "#6D1F62", + "system": "#890F02", + "user": "#3F6833", + "wait": "#C15C17" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "Shows the CPU breakdown. When multiple servers are selected, only the first host's cpu data is shown", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 6, + "x": 3, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (mode) (\n rate(node_cpu{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[$__rate_interval]) or\n rate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[$__rate_interval])\n) / (\n scalar(\n sum(rate(node_cpu{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]))\n ) * 100\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{mode}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "percent", + "label": "% Utilization", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + "Available": "#508642", + "Free": "#508642", + "Total": "#bf1b00", + "Used": "#bf1b00", + "total": "#bf1b00", + "used": "#0a50a1" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 6, + "x": 9, + "y": 1 + }, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "total", + "color": "#bf1b00", + "fill": 0, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "node_memory_MemFree{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_MemFree_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Free", + "refId": "A" + }, + { + "expr": "node_memory_MemTotal{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_MemTotal_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "total", + "refId": "B" + }, + { + "expr": "(\n node_memory_Cached{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_Cached_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n) + (\n node_memory_Buffers{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_Buffers_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n) + (\n node_memory_Slab{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_Slab_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "buffers/cache", + "refId": "C" + }, + { + "expr": "(\n node_memory_MemTotal{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_MemTotal_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n) - (\n (\n node_memory_MemFree{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_MemFree_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n ) + (\n node_memory_Cached{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_Cached_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n ) + (\n node_memory_Buffers{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_Buffers_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n ) +\n (\n node_memory_Slab{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_Slab_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n )\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "used", + "refId": "D" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "RAM Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "bytes", + "label": "RAM used", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "Show the network load (rx,tx) across all interfaces (excluding loopback 'lo')", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 6, + "x": 15, + "y": 1 + }, + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*tx/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (device) (\n rate(\n node_network_receive_bytes{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\",device!=\"lo\"}[$__rate_interval]) or\n rate(node_network_receive_bytes_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\",device!=\"lo\"}[$__rate_interval]\n )\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.rx", + "refId": "A" + }, + { + "expr": "sum by (device) (\n rate(node_network_transmit_bytes{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\",device!=\"lo\"}[$__rate_interval]) or\n rate(node_network_transmit_bytes_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\",device!=\"lo\"}[$__rate_interval])\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.tx", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Network Load", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "decbytes", + "label": "Send (-) / Receive (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 5, + "w": 3, + "x": 21, + "y": 1 + }, + "id": 7, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*tx/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(node_network_receive_drop{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_network_receive_drop_total{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval])\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.rx", + "refId": "A" + }, + { + "expr": "rate(node_network_transmit_drop{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_network_transmit_drop_total{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval])\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.tx", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Network drop rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "pps", + "label": "Send (-) / Receive (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "description": "Each OSD consists of a Journal/WAL partition and a data partition. The RAW Capacity shown is the sum of the data partitions across all OSDs on the selected OSD hosts.", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 0, + "y": 6 + }, + "id": 8, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(\n ceph_osd_stat_bytes{job=~\"$job\"} and\n on (ceph_daemon) ceph_disk_occupation{job=~\"$job\", instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Raw Capacity", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 5, + "w": 3, + "x": 21, + "y": 6 + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*tx/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(node_network_receive_errs{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_network_receive_errs_total{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval])\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.rx", + "refId": "A" + }, + { + "expr": "rate(node_network_transmit_errs{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_network_transmit_errs_total{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval])\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.tx", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Network error rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "pps", + "label": "Send (-) / Receive (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 11 + }, + "id": 10, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "OSD Disk Performance Statistics", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "For any OSD devices on the host, this chart shows the iops per physical device. Each device is shown by it's name and corresponding OSD id value", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 11, + "x": 0, + "y": 12 + }, + "id": 11, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*reads/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(\n (\n rate(node_disk_writes_completed{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_disk_writes_completed_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval])\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) * on(instance, device) group_left(ceph_daemon) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}}) writes", + "refId": "A" + }, + { + "expr": "label_replace(\n (\n rate(node_disk_reads_completed{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_disk_reads_completed_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval])\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) * on(instance, device) group_left(ceph_daemon) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\"},\"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}}) reads", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$ceph_hosts Disk IOPS", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "ops", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "For OSD hosts, this chart shows the disk bandwidth (read bytes/sec + write bytes/sec) of the physical OSD device. Each device is shown by device name, and corresponding OSD id", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 11, + "x": 12, + "y": 12 + }, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*read/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(\n (\n rate(node_disk_bytes_written{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_disk_written_bytes_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval])\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device)\n group_left(ceph_daemon) label_replace(\n label_replace(ceph_disk_occupation_human{job=~\"$job\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n )\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}}) write", + "refId": "A" + }, + { + "expr": "label_replace(\n (\n rate(node_disk_bytes_read{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_disk_read_bytes_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval])\n ),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device)\n group_left(ceph_daemon) label_replace(\n label_replace(ceph_disk_occupation_human{job=~\"$job\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n )\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}}) read", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$ceph_hosts Throughput by Disk", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "Bps", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "For OSD hosts, this chart shows the latency at the physical drive. Each drive is shown by device name, with it's corresponding OSD id", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 11, + "x": 0, + "y": 21 + }, + "id": 13, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max by(instance, device) (label_replace(\n (rate(node_disk_write_time_seconds_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval])) /\n clamp_min(rate(node_disk_writes_completed_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]), 0.001) or\n (rate(node_disk_read_time_seconds_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval])) /\n clamp_min(rate(node_disk_reads_completed_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]), 0.001),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)) * on(instance, device) group_left(ceph_daemon) label_replace(\n label_replace(\n ceph_disk_occupation_human{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}})", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$ceph_hosts Disk Latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "Show disk utilization % (util) of any OSD devices on the host by the physical device name and associated OSD id.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 11, + "x": 12, + "y": 21 + }, + "id": 14, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(\n (\n (rate(node_disk_io_time_ms{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]) / 10) or\n rate(node_disk_io_time_seconds_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]) * 100\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) * on(instance, device) group_left(ceph_daemon) label_replace(\n label_replace(ceph_disk_occupation_human{job=~\"$job\", instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}})", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$ceph_hosts Disk utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "percent", + "label": "%Util", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "columns": [ ], + "datasource": "${prometheusds}", + "description": "This table shows the 10 hosts with the highest number of slow ops", + "gridPos": { + "h": 8, + "w": 4, + "x": 0, + "y": 40 + }, + "id": 15, + "links": [ ], + "sort": { + "col": 2, + "desc": true + }, + "styles": [ + { + "alias": "Instance", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "instance", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Slow Ops", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [ ], + "type": "number", + "unit": "none", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + } + ], + "targets": [ + { + "expr": "topk(10,\n (sum by (instance)(ceph_daemon_health_metrics{type=\"SLOW_OPS\", ceph_daemon=~\"osd.*\"}))\n)\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Top Slow Ops per Host", + "transform": "table", + "type": "table" + } + ], + "refresh": "30s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "ceph-mixin", + "overview" + ], + "templating": { + "list": [ + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": false, + "label": "Hostname", + "multi": false, + "name": "ceph_hosts", + "options": [ ], + "query": "label_values(ceph_disk_occupation, instance)", + "refresh": 1, + "regex": "(.*)", + "sort": 3, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Ceph OSD Host Details", + "uid": "rtOg0AiWz", + "version": 0 +} diff --git a/ceph-mon/files/grafana_dashboards/hosts-overview.json b/ceph-mon/files/grafana_dashboards/hosts-overview.json new file mode 100644 index 00000000..d9250e5b --- /dev/null +++ b/ceph-mon/files/grafana_dashboards/hosts-overview.json @@ -0,0 +1,880 @@ +{ + "__inputs": [ ], + "__requires": [ + { + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.3.2" + }, + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" + }, + { + "id": "singlestat", + "name": "Singlestat", + "type": "panel", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 0, + "y": 0 + }, + "id": 2, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(sum by (instance) (ceph_osd_metadata{job=~\"$job\"}))", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "OSD Hosts", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "description": "Average CPU busy across all hosts (OSD, RGW, MON etc) within the cluster", + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 4, + "y": 0 + }, + "id": 3, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "avg(1 - (\n avg by(instance) (\n rate(node_cpu_seconds_total{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[$__rate_interval]) or\n rate(node_cpu{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[$__rate_interval])\n )\n))\n", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "AVG CPU Busy", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "description": "Average Memory Usage across all hosts in the cluster (excludes buffer/cache usage)", + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 8, + "y": 0 + }, + "id": 4, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "avg ((\n (\n node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or\n node_memory_MemTotal_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}\n ) - ((\n node_memory_MemFree{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or\n node_memory_MemFree_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) +\n (\n node_memory_Cached{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or\n node_memory_Cached_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}\n ) + (\n node_memory_Buffers{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or\n node_memory_Buffers_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}\n ) + (\n node_memory_Slab{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or\n node_memory_Slab_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}\n )\n )\n) / (\n node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or\n node_memory_MemTotal_bytes{instance=~\"($osd_hosts|$rgw_hosts|$mon_hosts|$mds_hosts).*\"}\n))\n", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "AVG RAM Utilization", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "description": "IOPS Load at the device as reported by the OS on all OSD hosts", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 12, + "y": 0 + }, + "id": 5, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum ((\n rate(node_disk_reads_completed{instance=~\"($osd_hosts).*\"}[$__rate_interval]) or\n rate(node_disk_reads_completed_total{instance=~\"($osd_hosts).*\"}[$__rate_interval])\n) + (\n rate(node_disk_writes_completed{instance=~\"($osd_hosts).*\"}[$__rate_interval]) or\n rate(node_disk_writes_completed_total{instance=~\"($osd_hosts).*\"}[$__rate_interval])\n))\n", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Physical IOPS", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "description": "Average Disk utilization for all OSD data devices (i.e. excludes journal/WAL)", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 16, + "y": 0 + }, + "id": 6, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "avg (\n label_replace(\n (rate(node_disk_io_time_ms[$__rate_interval]) / 10 ) or\n (rate(node_disk_io_time_seconds_total[$__rate_interval]) * 100),\n \"instance\", \"$1\", \"instance\", \"([^.:]*).*\"\n ) * on(instance, device) group_left(ceph_daemon) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", instance=~\"($osd_hosts).*\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\"\n )\n)\n", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "AVG Disk Utilization", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "description": "Total send/receive network load across all hosts in the ceph cluster", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 20, + "y": 0 + }, + "id": 7, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (\n (\n rate(node_network_receive_bytes{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[$__rate_interval]) or\n rate(node_network_receive_bytes_total{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[$__rate_interval])\n ) unless on (device, instance)\n label_replace((bonding_slaves > 0), \"device\", \"$1\", \"master\", \"(.+)\")\n) +\nsum (\n (\n rate(node_network_transmit_bytes{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[$__rate_interval]) or\n rate(node_network_transmit_bytes_total{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[$__rate_interval])\n ) unless on (device, instance)\n label_replace((bonding_slaves > 0), \"device\", \"$1\", \"master\", \"(.+)\")\n)\n", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Network Load", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "Show the top 10 busiest hosts by cpu", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 5 + }, + "id": 8, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk(10,\n 100 * (\n 1 - (\n avg by(instance) (\n rate(node_cpu_seconds_total{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[$__rate_interval]) or\n rate(node_cpu{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[$__rate_interval])\n )\n )\n )\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Busy - Top 10 Hosts", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "Top 10 hosts by network load", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk(10, (sum by(instance) (\n(\n rate(node_network_receive_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[$__rate_interval]) or\n rate(node_network_receive_bytes_total{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[$__rate_interval])\n) +\n(\n rate(node_network_transmit_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[$__rate_interval]) or\n rate(node_network_transmit_bytes_total{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[$__rate_interval])\n) unless on (device, instance)\n label_replace((bonding_slaves > 0), \"device\", \"$1\", \"master\", \"(.+)\"))\n))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Network Load - Top 10 Hosts", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "refresh": "30s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "ceph-mixin" + ], + "templating": { + "list": [ + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "osd_hosts", + "options": [ ], + "query": "label_values(ceph_disk_occupation{job=~\"$job\"}, exported_instance)", + "refresh": 1, + "regex": "([^.]*).*", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "mon_hosts", + "options": [ ], + "query": "label_values(ceph_mon_metadata{job=~\"$job\"}, ceph_daemon)", + "refresh": 1, + "regex": "mon.(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "mds_hosts", + "options": [ ], + "query": "label_values(ceph_mds_inodes{job=~\"$job\"}, ceph_daemon)", + "refresh": 1, + "regex": "mds.(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "rgw_hosts", + "options": [ ], + "query": "label_values(ceph_rgw_metadata{job=~\"$job\"}, ceph_daemon)", + "refresh": 1, + "regex": "rgw.(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Ceph OSD Host Overview", + "uid": "y0KGL0iZz", + "version": 0 +} diff --git a/ceph-mon/files/grafana_dashboards/osd-device-details.json b/ceph-mon/files/grafana_dashboards/osd-device-details.json new file mode 100644 index 00000000..2e817476 --- /dev/null +++ b/ceph-mon/files/grafana_dashboards/osd-device-details.json @@ -0,0 +1,857 @@ +{ + "__inputs": [ ], + "__requires": [ + { + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.3.2" + }, + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "OSD Performance", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 0, + "y": 1 + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "read", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_osd_op_r_latency_sum{job=~\"$job\", ceph_daemon=~\"$osd\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_r_latency_count{job=~\"$job\"}[$__rate_interval])\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "read", + "refId": "A" + }, + { + "expr": "rate(ceph_osd_op_w_latency_sum{job=~\"$job\", ceph_daemon=~\"$osd\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_w_latency_count{job=~\"$job\"}[$__rate_interval])\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "write", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$osd Latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 6, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "Reads", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_osd_op_r{job=~\"$job\", ceph_daemon=~\"$osd\"}[$__rate_interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Reads", + "refId": "A" + }, + { + "expr": "rate(ceph_osd_op_w{job=~\"$job\", ceph_daemon=~\"$osd\"}[$__rate_interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Writes", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$osd R/W IOPS", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 12, + "y": 1 + }, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "Read Bytes", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_osd_op_r_out_bytes{job=~\"$job\", ceph_daemon=~\"$osd\"}[$__rate_interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read Bytes", + "refId": "A" + }, + { + "expr": "rate(ceph_osd_op_w_in_bytes{job=~\"$job\", ceph_daemon=~\"$osd\"}[$__rate_interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write Bytes", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$osd R/W Bytes", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "bytes", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 6, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Physical Device Performance", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 0, + "y": 11 + }, + "id": 7, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*Reads/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(\n label_replace(\n rate(node_disk_read_time_seconds_total{}[$__rate_interval]) /\n rate(node_disk_reads_completed_total{}[$__rate_interval]),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n ) and on (instance, device) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", ceph_daemon=~\"$osd\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n )\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}/{{device}} Reads", + "refId": "A" + }, + { + "expr": "(\n label_replace(\n rate(node_disk_write_time_seconds_total{}[$__rate_interval]) /\n rate(node_disk_writes_completed_total{}[$__rate_interval]),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device)\n label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n )\n )\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}/{{device}} Writes", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Physical Device Latency for $osd", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 6, + "y": 11 + }, + "id": 8, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*Reads/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(\n rate(node_disk_writes_completed_total{}[$__rate_interval]),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) and on (instance, device) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", ceph_daemon=~\"$osd\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}} on {{instance}} Writes", + "refId": "A" + }, + { + "expr": "label_replace(\n rate(node_disk_reads_completed_total{}[$__rate_interval]),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) and on (instance, device) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", ceph_daemon=~\"$osd\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}} on {{instance}} Reads", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Physical Device R/W IOPS for $osd", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 12, + "y": 11 + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*Reads/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(\n rate(node_disk_read_bytes_total{}[$__rate_interval]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) and on (instance, device) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", ceph_daemon=~\"$osd\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}} {{device}} Reads", + "refId": "A" + }, + { + "expr": "label_replace(\n rate(node_disk_written_bytes_total{}[$__rate_interval]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) and on (instance, device) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", ceph_daemon=~\"$osd\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}} {{device}} Writes", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Physical Device R/W Bytes for $osd", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "Bps", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 18, + "y": 11 + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(\n rate(node_disk_io_time_seconds_total{}[$__rate_interval]),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) and on (instance, device) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}} on {{instance}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Physical Device Util% for $osd", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "refresh": "30s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "ceph-mixin" + ], + "templating": { + "list": [ + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": false, + "label": "OSD", + "multi": false, + "name": "osd", + "options": [ ], + "query": "label_values(ceph_osd_metadata{job=~\"$job\"}, ceph_daemon)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "OSD device details", + "uid": "CrAHE0iZz", + "version": 0 +} diff --git a/ceph-mon/files/grafana_dashboards/osds-overview.json b/ceph-mon/files/grafana_dashboards/osds-overview.json new file mode 100644 index 00000000..50d0254f --- /dev/null +++ b/ceph-mon/files/grafana_dashboards/osds-overview.json @@ -0,0 +1,1028 @@ +{ + "__inputs": [ ], + "__requires": [ + { + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.0.0" + }, + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" + }, + { + "id": "table", + "name": "Table", + "type": "panel", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "aliasColors": { + "@95%ile": "#e0752d" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 0 + }, + "id": 2, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "avg (\n rate(ceph_osd_op_r_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_r_latency_count{job=~\"$job\"}[$__rate_interval]) * 1000\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "AVG read", + "refId": "A" + }, + { + "expr": "max(\n rate(ceph_osd_op_r_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_r_latency_count{job=~\"$job\"}[$__rate_interval]) * 1000\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "MAX read", + "refId": "B" + }, + { + "expr": "quantile(0.95,\n (\n rate(ceph_osd_op_r_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_r_latency_count{job=~\"$job\"}[$__rate_interval])\n * 1000\n )\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "@95%ile", + "refId": "C" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "OSD Read Latencies", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ] + }, + { + "columns": [ ], + "datasource": "${prometheusds}", + "description": "This table shows the osd's that are delivering the 10 highest read latencies within the cluster", + "gridPos": { + "h": 8, + "w": 4, + "x": 8, + "y": 0 + }, + "id": 3, + "links": [ ], + "sort": { + "col": 2, + "desc": true + }, + "styles": [ + { + "alias": "OSD ID", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "ceph_daemon", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Latency (ms)", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [ ], + "type": "number", + "unit": "none", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + } + ], + "targets": [ + { + "expr": "topk(10,\n (sort(\n (\n rate(ceph_osd_op_r_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_r_latency_count{job=~\"$job\"}[$__rate_interval]) *\n 1000\n )\n ))\n)\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Highest READ Latencies", + "transform": "table", + "type": "table" + }, + { + "aliasColors": { + "@95%ile write": "#e0752d" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 8, + "x": 12, + "y": 0 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "avg(\n rate(ceph_osd_op_w_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_w_latency_count{job=~\"$job\"}[$__rate_interval])\n * 1000\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "AVG write", + "refId": "A" + }, + { + "expr": "max(\n rate(ceph_osd_op_w_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_w_latency_count{job=~\"$job\"}[$__rate_interval]) *\n 1000\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "MAX write", + "refId": "B" + }, + { + "expr": "quantile(0.95, (\n rate(ceph_osd_op_w_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_w_latency_count{job=~\"$job\"}[$__rate_interval]) *\n 1000\n))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "@95%ile write", + "refId": "C" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "OSD Write Latencies", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ] + }, + { + "columns": [ ], + "datasource": "${prometheusds}", + "description": "This table shows the osd's that are delivering the 10 highest write latencies within the cluster", + "gridPos": { + "h": 8, + "w": 4, + "x": 20, + "y": 0 + }, + "id": 5, + "links": [ ], + "sort": { + "col": 2, + "desc": true + }, + "styles": [ + { + "alias": "OSD ID", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "ceph_daemon", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Latency (ms)", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [ ], + "type": "number", + "unit": "none", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + } + ], + "targets": [ + { + "expr": "topk(10,\n (sort(\n (rate(ceph_osd_op_w_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_w_latency_count{job=~\"$job\"}[$__rate_interval]) *\n 1000)\n ))\n)\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Highest WRITE Latencies", + "transform": "table", + "type": "table" + }, + { + "aliasColors": { }, + "datasource": "${prometheusds}", + "description": "", + "gridPos": { + "h": 8, + "w": 4, + "x": 0, + "y": 8 + }, + "id": 6, + "legend": { + "percentage": true, + "show": true, + "values": true + }, + "legendType": "Under graph", + "pieType": "pie", + "targets": [ + { + "expr": "count by (device_class) (ceph_osd_metadata{job=~\"$job\"})", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device_class}}", + "refId": "A" + } + ], + "title": "OSD Types Summary", + "type": "piechart", + "valueName": "current" + }, + { + "aliasColors": { + "Non-Encrypted": "#E5AC0E" + }, + "datasource": "${prometheusds}", + "description": "", + "gridPos": { + "h": 8, + "w": 4, + "x": 4, + "y": 8 + }, + "id": 7, + "legend": { + "percentage": true, + "show": true, + "values": true + }, + "legendType": "Under graph", + "pieType": "pie", + "targets": [ + { + "expr": "count(ceph_bluefs_wal_total_bytes{job=~\"$job\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "bluestore", + "refId": "A" + }, + { + "expr": "absent(ceph_bluefs_wal_total_bytes{job=~\"$job\"}) * count(ceph_osd_metadata{job=~\"$job\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "filestore", + "refId": "B" + } + ], + "title": "OSD Objectstore Types", + "type": "piechart", + "valueName": "current" + }, + { + "aliasColors": { }, + "datasource": "${prometheusds}", + "description": "The pie chart shows the various OSD sizes used within the cluster", + "gridPos": { + "h": 8, + "w": 4, + "x": 8, + "y": 8 + }, + "id": 8, + "legend": { + "percentage": true, + "show": true, + "values": true + }, + "legendType": "Under graph", + "pieType": "pie", + "targets": [ + { + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} < 1099511627776)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<1TB", + "refId": "A" + }, + { + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 1099511627776 < 2199023255552)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<2TB", + "refId": "B" + }, + { + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 2199023255552 < 3298534883328)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<3TB", + "refId": "C" + }, + { + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 3298534883328 < 4398046511104)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<4TB", + "refId": "D" + }, + { + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 4398046511104 < 6597069766656)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<6TB", + "refId": "E" + }, + { + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 6597069766656 < 8796093022208)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<8TB", + "refId": "F" + }, + { + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 8796093022208 < 10995116277760)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<10TB", + "refId": "G" + }, + { + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 10995116277760 < 13194139533312)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<12TB", + "refId": "H" + }, + { + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 13194139533312)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "<12TB+", + "refId": "I" + } + ], + "title": "OSD Size Summary", + "type": "piechart", + "valueName": "current" + }, + { + "aliasColors": { }, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 8, + "x": 12, + "y": 8 + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ceph_osd_numpg{job=~\"$job\"}", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "PGs per OSD", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Distribution of PGs per OSD", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": 20, + "mode": "histogram", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": "# of OSDs", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ] + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "description": "This gauge panel shows onode Hits ratio to help determine if increasing RAM per OSD could help improve the performance of the cluster", + "format": "percentunit", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 8, + "w": 4, + "x": 20, + "y": 8 + }, + "id": 10, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_bluestore_onode_hits{job=~\"$job\"}) / (\n sum(ceph_bluestore_onode_hits{job=~\"$job\"}) +\n sum(ceph_bluestore_onode_misses{job=~\"$job\"})\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": ".75", + "title": "OSD onode Hits Ratio", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 11, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "R/W Profile", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "Show the read/write workload profile overtime", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "round(sum(rate(ceph_pool_rd{job=~\"$job\"}[$__rate_interval])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Reads", + "refId": "A" + }, + { + "expr": "round(sum(rate(ceph_pool_wr{job=~\"$job\"}[$__rate_interval])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Writes", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Read/Write Profile", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "columns": [ ], + "datasource": "${prometheusds}", + "description": "This table shows the 10 OSDs with the highest number of slow ops", + "gridPos": { + "h": 8, + "w": 4, + "x": 0, + "y": 20 + }, + "id": 13, + "links": [ ], + "sort": { + "col": 2, + "desc": true + }, + "styles": [ + { + "alias": "OSD ID", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "ceph_daemon", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Slow Ops", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [ ], + "type": "number", + "unit": "none", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + } + ], + "targets": [ + { + "expr": "topk(10,\n (ceph_daemon_health_metrics{type=\"SLOW_OPS\", ceph_daemon=~\"osd.*\"})\n)\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Top Slow Ops", + "transform": "table", + "type": "table" + } + ], + "refresh": "30s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "ceph-mixin" + ], + "templating": { + "list": [ + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "OSD Overview", + "uid": "lo02I1Aiz", + "version": 0 +} diff --git a/ceph-mon/files/grafana_dashboards/pool-detail.json b/ceph-mon/files/grafana_dashboards/pool-detail.json new file mode 100644 index 00000000..9e4cf744 --- /dev/null +++ b/ceph-mon/files/grafana_dashboards/pool-detail.json @@ -0,0 +1,694 @@ +{ + "__inputs": [ ], + "__requires": [ + { + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.3.2" + }, + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" + }, + { + "id": "singlestat", + "name": "Singlestat", + "type": "panel", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "format": "percentunit", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 7, + "w": 7, + "x": 0, + "y": 0 + }, + "id": 2, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "(ceph_pool_stored{job=~\"$job\"} / (ceph_pool_stored{job=~\"$job\"} + ceph_pool_max_avail{job=~\"$job\"})) *\n on(pool_id) group_left(instance, name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": ".7,.8", + "title": "Capacity used", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": 100, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "description": "Time till pool is full assuming the average fill rate of the last 6 hours", + "format": "s", + "gauge": { + "maxValue": false, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 7, + "y": 0 + }, + "id": 3, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": "" + }, + "tableColumn": "", + "targets": [ + { + "expr": "(ceph_pool_max_avail{job=~\"$job\"} / deriv(ceph_pool_stored{job=~\"$job\"}[6h])) *\n on(pool_id) group_left(instance, name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"} > 0\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "current", + "title": "Time till full", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": false + }, + { + "aliasColors": { + "read_op_per_sec": "#3F6833", + "write_op_per_sec": "#E5AC0E" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "deriv(ceph_pool_objects{job=~\"$job\"}[$__rate_interval]) *\n on(pool_id) group_left(instance, name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Objects per second", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$pool_name Object Ingress/Egress", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "ops", + "label": "Objects out(-) / in(+) ", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + "read_op_per_sec": "#3F6833", + "write_op_per_sec": "#E5AC0E" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 7 + }, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "reads", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_pool_rd{job=~\"$job\"}[$__rate_interval]) *\n on(pool_id) group_left(instance,name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "reads", + "refId": "A" + }, + { + "expr": "rate(ceph_pool_wr{job=~\"$job\"}[$__rate_interval]) *\n on(pool_id) group_left(instance, name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "writes", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$pool_name Client IOPS", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "iops", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + "read_op_per_sec": "#3F6833", + "write_op_per_sec": "#E5AC0E" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 7 + }, + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "reads", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_pool_rd_bytes{job=~\"$job\"}[$__rate_interval]) +\n on(pool_id) group_left(instance, name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "reads", + "refId": "A" + }, + { + "expr": "rate(ceph_pool_wr_bytes{job=~\"$job\"}[$__rate_interval]) +\n on(pool_id) group_left(instance,name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "writes", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$pool_name Client Throughput", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "Bps", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + "read_op_per_sec": "#3F6833", + "write_op_per_sec": "#E5AC0E" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 14 + }, + "id": 7, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ceph_pool_objects{job=~\"$job\"} *\n on(pool_id) group_left(instance,name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Number of Objects", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$pool_name Objects", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": "Objects", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "refresh": "30s", + "rows": [ ], + "schemaVersion": 22, + "style": "dark", + "tags": [ + "ceph-mixin" + ], + "templating": { + "list": [ + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": false, + "label": "Pool Name", + "multi": false, + "name": "pool_name", + "options": [ ], + "query": "label_values(ceph_pool_metadata{job=~\"$job\"}, name)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Ceph Pool Details", + "uid": "-xyV8KCiz", + "version": 0 +} diff --git a/ceph-mon/files/grafana_dashboards/pool-overview.json b/ceph-mon/files/grafana_dashboards/pool-overview.json new file mode 100644 index 00000000..6316d4c5 --- /dev/null +++ b/ceph-mon/files/grafana_dashboards/pool-overview.json @@ -0,0 +1,1711 @@ +{ + "__inputs": [ ], + "__requires": [ ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 0 + }, + "id": 2, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(ceph_pool_metadata{job=~\"$job\"})", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Pools", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "description": "Count of the pools that have compression enabled", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 3, + "y": 0 + }, + "id": 3, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(ceph_pool_metadata{job=~\"$job\", compression_mode!=\"none\"})", + "format": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Pools with Compression", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "description": "Total raw capacity available to the cluster", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 6, + "y": 0 + }, + "id": 4, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_osd_stat_bytes{job=~\"$job\"})", + "format": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Total Raw Capacity", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "description": "Total raw capacity consumed by user data and associated overheads (metadata + redundancy)", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 0 + }, + "id": 5, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_pool_bytes_used{job=~\"$job\"})", + "format": "", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Raw Capacity Consumed", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "description": "Total of client data stored in the cluster", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 12, + "y": 0 + }, + "id": 6, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_pool_stored{job=~\"$job\"})", + "format": "", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Logical Stored ", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "description": "A compression saving is determined as the data eligible to be compressed minus the capacity used to store the data after compression", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 15, + "y": 0 + }, + "id": 7, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(\n ceph_pool_compress_under_bytes{job=~\"$job\"} -\n ceph_pool_compress_bytes_used{job=~\"$job\"}\n)\n", + "format": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Compression Savings", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "description": "Indicates how suitable the data is within the pools that are/have been enabled for compression - averaged across all pools holding compressed data", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 18, + "y": 0 + }, + "id": 8, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "(\n sum(ceph_pool_compress_under_bytes{job=~\"$job\"} > 0) /\n sum(ceph_pool_stored_raw{job=~\"$job\"} and ceph_pool_compress_under_bytes{job=~\"$job\"} > 0)\n) * 100\n", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Compression Eligibility", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${prometheusds}", + "description": "This factor describes the average ratio of data eligible to be compressed divided by the data actually stored. It does not account for data written that was ineligible for compression (too small, or compression yield too low)", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 21, + "y": 0 + }, + "id": 9, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(\n ceph_pool_compress_under_bytes{job=~\"$job\"} > 0)\n / sum(ceph_pool_compress_bytes_used{job=~\"$job\"} > 0\n)\n", + "format": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Compression Factor", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "columns": [ ], + "datasource": "${prometheusds}", + "fieldConfig": { + "defaults": { + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "filterable": true, + "inspect": false + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "instance" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "job" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "name" + }, + "properties": [ + { + "id": "displayName", + "value": "Pool Name" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "pool_id" + }, + "properties": [ + { + "id": "displayName", + "value": "Pool ID" + }, + { + "id": "unit", + "value": "none" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Compression Factor" + }, + { + "id": "unit", + "value": "none" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #D" + }, + "properties": [ + { + "id": "displayName", + "value": "% Used" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.cellOptions", + "value": { + "type": "color-text" + } + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(245, 54, 54, 0.9)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "rgba(50, 172, 45, 0.97)", + "value": 85 + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "Usable Free" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "Compression Eligibility" + }, + { + "id": "unit", + "value": "percent" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #E" + }, + "properties": [ + { + "id": "displayName", + "value": "Compression Savings" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #F" + }, + "properties": [ + { + "id": "displayName", + "value": "Growth (5d)" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.cellOptions", + "value": { + "type": "color-text" + } + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(245, 54, 54, 0.9)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "rgba(50, 172, 45, 0.97)", + "value": 85 + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #G" + }, + "properties": [ + { + "id": "displayName", + "value": "IOPS" + }, + { + "id": "unit", + "value": "none" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #H" + }, + "properties": [ + { + "id": "displayName", + "value": "Bandwidth" + }, + { + "id": "unit", + "value": "Bps" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "__name__" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "type" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "compression_mode" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "description" + }, + "properties": [ + { + "id": "displayName", + "value": "Type" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #J" + }, + "properties": [ + { + "id": "displayName", + "value": "Stored" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #I" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #K" + }, + "properties": [ + { + "id": "displayName", + "value": "Compression" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 10, + "links": [ ], + "options": { + "footer": { + "countRows": false, + "enablePagination": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "frameIndex": 1, + "showHeader": true + }, + "pluginVersion": "10.4.0", + "styles": "", + "targets": [ + { + "expr": "(\n ceph_pool_compress_under_bytes{job=~\"$job\"} /\n ceph_pool_compress_bytes_used{job=~\"$job\"} > 0\n) and on(pool_id) (\n (\n (ceph_pool_compress_under_bytes{job=~\"$job\"} > 0) /\n ceph_pool_stored_raw{job=~\"$job\"}\n ) * 100 > 0.5\n)\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "A", + "refId": "A" + }, + { + "expr": "ceph_pool_max_avail{job=~\"$job\"} *\n on(pool_id) group_left(name) ceph_pool_metadata{job=~\"$job\"}\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "B", + "refId": "B" + }, + { + "expr": "(\n (ceph_pool_compress_under_bytes{job=~\"$job\"} > 0) /\n ceph_pool_stored_raw{job=~\"$job\"}\n) * 100\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "C", + "refId": "C" + }, + { + "expr": "ceph_pool_percent_used{job=~\"$job\"} *\n on(pool_id) group_left(name) ceph_pool_metadata{job=~\"$job\"}\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "D", + "refId": "D" + }, + { + "expr": "ceph_pool_compress_under_bytes{job=~\"$job\"} -\n ceph_pool_compress_bytes_used{job=~\"$job\"} > 0\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "E", + "refId": "E" + }, + { + "expr": "delta(ceph_pool_stored{job=~\"$job\"}[5d])", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "F", + "refId": "F" + }, + { + "expr": "rate(ceph_pool_rd{job=~\"$job\"}[$__rate_interval])\n + rate(ceph_pool_wr{job=~\"$job\"}[$__rate_interval])\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "G", + "refId": "G" + }, + { + "expr": "rate(ceph_pool_rd_bytes{job=~\"$job\"}[$__rate_interval]) +\n rate(ceph_pool_wr_bytes{job=~\"$job\"}[$__rate_interval])\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "H", + "refId": "H" + }, + { + "expr": "ceph_pool_metadata{job=~\"$job\"}", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "I", + "refId": "I" + }, + { + "expr": "ceph_pool_stored{job=~\"$job\"} * on(pool_id) group_left ceph_pool_metadata{job=~\"$job\"}", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "J", + "refId": "J" + }, + { + "expr": "ceph_pool_metadata{job=~\"$job\", compression_mode!=\"none\"}", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "K", + "refId": "K" + }, + { + "expr": "", + "format": "", + "intervalFactor": "", + "legendFormat": "L", + "refId": "L" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Pool Overview", + "transformations": [ + { + "id": "merge", + "options": { } + }, + { + "id": "seriesToRows", + "options": { } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "Value #A": true, + "Value #B": false, + "Value #C": true, + "Value #D": false, + "Value #E": true, + "Value #I": true, + "Value #K": true, + "__name__": true, + "cluster": true, + "compression_mode": true, + "instance": true, + "job": true, + "pool_id": true, + "type": true + }, + "includeByName": { }, + "indexByName": { }, + "renameByName": { } + } + } + ], + "type": "table" + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "This chart shows the sum of read and write IOPS from all clients by pool", + "fieldConfig": { + "defaults": { + "custom": { + "fillOpacity": 8, + "showPoints": "never" + }, + "unit": "short" + } + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 11, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk($topk,\n round(\n (\n rate(ceph_pool_rd{job=~\"$job\"}[$__rate_interval]) +\n rate(ceph_pool_wr{job=~\"$job\"}[$__rate_interval])\n ), 1\n ) * on(pool_id) group_left(instance,name) ceph_pool_metadata{job=~\"$job\"})\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{name}} ", + "refId": "A" + }, + { + "expr": "topk($topk,\n rate(ceph_pool_wr{job=~\"$job\"}[$__rate_interval]) +\n on(pool_id) group_left(instance,name) ceph_pool_metadata{job=~\"$job\"}\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{name}} - write", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Top $topk Client IOPS by Pool", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "timeseries", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": "IOPS", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "The chart shows the sum of read and write bytes from all clients, by pool", + "fieldConfig": { + "defaults": { + "custom": { + "fillOpacity": 8, + "showPoints": "never" + }, + "unit": "Bps" + } + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk($topk,\n (\n rate(ceph_pool_rd_bytes{job=~\"$job\"}[$__rate_interval]) +\n rate(ceph_pool_wr_bytes{job=~\"$job\"}[$__rate_interval])\n ) * on(pool_id) group_left(instance, name) ceph_pool_metadata{job=~\"$job\"}\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{name}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Top $topk Client Bandwidth by Pool", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "timeseries", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "Bps", + "label": "Throughput", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "Historical view of capacity usage, to help identify growth and trends in pool consumption", + "fieldConfig": { + "defaults": { + "custom": { + "fillOpacity": 8, + "showPoints": "never" + }, + "unit": "bytes" + } + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 13, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ceph_pool_bytes_used{job=~\"$job\"} * on(pool_id) group_right ceph_pool_metadata{job=~\"$job\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{name}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Pool Capacity Usage (RAW)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "timeseries", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "bytes", + "label": "Capacity Used", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "refresh": "30s", + "rows": [ ], + "schemaVersion": 22, + "style": "dark", + "tags": [ + "ceph-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "15", + "value": "15" + }, + "hide": 0, + "includeAll": false, + "label": "TopK", + "multi": false, + "name": "topk", + "options": [ + { + "text": "15", + "value": "15" + } + ], + "query": "15", + "refresh": 0, + "type": "custom" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Ceph Pools Overview", + "uid": "z99hzWtmk", + "version": 0 +} diff --git a/ceph-mon/files/grafana_dashboards/radosgw-detail.json b/ceph-mon/files/grafana_dashboards/radosgw-detail.json new file mode 100644 index 00000000..58d17389 --- /dev/null +++ b/ceph-mon/files/grafana_dashboards/radosgw-detail.json @@ -0,0 +1,522 @@ +{ + "__inputs": [ ], + "__requires": [ + { + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.0.0" + }, + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "RGW Host Detail : $rgw_servers", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 1 + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (instance_id) (\n rate(ceph_rgw_get_initial_lat_sum{job=~\"$job\"}[$__rate_interval]) /\n rate(ceph_rgw_get_initial_lat_count{job=~\"$job\"}[$__rate_interval])\n) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GET {{ceph_daemon}}", + "refId": "A" + }, + { + "expr": "sum by (instance_id) (\n rate(ceph_rgw_put_initial_lat_sum{job=~\"$job\"}[$__rate_interval]) /\n rate(ceph_rgw_put_initial_lat_count{job=~\"$job\"}[$__rate_interval])\n) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUT {{ceph_daemon}}", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$rgw_servers GET/PUT Latencies", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 7, + "x": 6, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_rgw_get_b{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GETs {{ceph_daemon}}", + "refId": "A" + }, + { + "expr": "rate(ceph_rgw_put_b{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon)\n ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUTs {{ceph_daemon}}", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Bandwidth by HTTP Operation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + "GETs": "#7eb26d", + "Other": "#447ebc", + "PUTs": "#eab839", + "Requests": "#3f2b5b", + "Requests Failed": "#bf1b00" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 7, + "x": 13, + "y": 1 + }, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_rgw_failed_req{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\",ceph_daemon=~\"$rgw_servers\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Requests Failed {{ceph_daemon}}", + "refId": "A" + }, + { + "expr": "rate(ceph_rgw_get{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GETs {{ceph_daemon}}", + "refId": "B" + }, + { + "expr": "rate(ceph_rgw_put{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUTs {{ceph_daemon}}", + "refId": "C" + }, + { + "expr": "(\n rate(ceph_rgw_req{job=~\"$job\"}[$__rate_interval]) -\n (\n rate(ceph_rgw_get{job=~\"$job\"}[$__rate_interval]) +\n rate(ceph_rgw_put{job=~\"$job\"}[$__rate_interval])\n )\n) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Other {{ceph_daemon}}", + "refId": "D" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "HTTP Request Breakdown", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + "Failures": "#bf1b00", + "GETs": "#7eb26d", + "Other (HEAD,POST,DELETE)": "#447ebc", + "PUTs": "#eab839", + "Requests": "#3f2b5b" + }, + "datasource": "${prometheusds}", + "description": "", + "gridPos": { + "h": 8, + "w": 4, + "x": 20, + "y": 1 + }, + "id": 6, + "legend": { + "percentage": true, + "show": true, + "values": true + }, + "legendType": "Under graph", + "pieType": "pie", + "targets": [ + { + "expr": "rate(ceph_rgw_failed_req{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Failures {{ceph_daemon}}", + "refId": "A" + }, + { + "expr": "rate(ceph_rgw_get{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GETs {{ceph_daemon}}", + "refId": "B" + }, + { + "expr": "rate(ceph_rgw_put{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUTs {{ceph_daemon}}", + "refId": "C" + }, + { + "expr": "(\n rate(ceph_rgw_req{job=~\"$job\"}[$__rate_interval]) -\n (\n rate(ceph_rgw_get{job=~\"$job\"}[$__rate_interval]) +\n rate(ceph_rgw_put{job=~\"$job\"}[$__rate_interval])\n )\n) * on (instance_id) group_left (ceph_daemon)\n ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Other (DELETE,LIST) {{ceph_daemon}}", + "refId": "D" + } + ], + "title": "Workload Breakdown", + "type": "piechart", + "valueName": "current" + } + ], + "refresh": "30s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "ceph-mixin", + "overview" + ], + "templating": { + "list": [ + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "", + "multi": false, + "name": "rgw_servers", + "options": [ ], + "query": "label_values(ceph_rgw_metadata{job=~\"$job\"}, ceph_daemon)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "RGW Instance Detail", + "uid": "x5ARzZtmk", + "version": 0 +} diff --git a/ceph-mon/files/grafana_dashboards/radosgw-overview.json b/ceph-mon/files/grafana_dashboards/radosgw-overview.json new file mode 100644 index 00000000..12c56b6e --- /dev/null +++ b/ceph-mon/files/grafana_dashboards/radosgw-overview.json @@ -0,0 +1,695 @@ +{ + "__inputs": [ ], + "__requires": [ + { + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.0.0" + }, + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "RGW Overview - All Gateways", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 1 + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(\n rate(ceph_rgw_get_initial_lat_sum{job=~\"$job\"}[$__rate_interval]) /\n rate(ceph_rgw_get_initial_lat_count{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\"},\n \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GET {{rgw_host}}", + "refId": "A" + }, + { + "expr": "label_replace(\n rate(ceph_rgw_put_initial_lat_sum{job=~\"$job\"}[$__rate_interval]) /\n rate(ceph_rgw_put_initial_lat_count{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\"},\n \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUT {{rgw_host}}", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Average GET/PUT Latencies by RGW Instance", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 7, + "x": 8, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (rgw_host) (\n label_replace(\n rate(ceph_rgw_req{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\"},\n \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"\n )\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{rgw_host}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Total Requests/sec by RGW Instance", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "Latencies are shown stacked, without a yaxis to provide a visual indication of GET latency imbalance across RGW hosts", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 6, + "x": 15, + "y": 1 + }, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(\n rate(ceph_rgw_get_initial_lat_sum{job=~\"$job\"}[$__rate_interval]) /\n rate(ceph_rgw_get_initial_lat_count{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\"},\n \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{rgw_host}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "GET Latencies by RGW Instance", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "Total bytes transferred in/out of all radosgw instances within the cluster", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 8, + "x": 0, + "y": 8 + }, + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(ceph_rgw_get_b{job=~\"$job\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GETs", + "refId": "A" + }, + { + "expr": "sum(rate(ceph_rgw_put_b{job=~\"$job\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUTs", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Bandwidth Consumed by Type", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "Total bytes transferred in/out through get/put operations, by radosgw instance", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 7, + "x": 8, + "y": 8 + }, + "id": 7, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(sum by (instance_id) (\n rate(ceph_rgw_get_b{job=~\"$job\"}[$__rate_interval]) +\n rate(ceph_rgw_put_b{job=~\"$job\"}[$__rate_interval])) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\"},\n \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{rgw_host}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Bandwidth by RGW Instance", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "Latencies are shown stacked, without a yaxis to provide a visual indication of PUT latency imbalance across RGW hosts", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 15, + "y": 8 + }, + "id": 8, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(\n rate(ceph_rgw_put_initial_lat_sum{job=~\"$job\"}[$__rate_interval]) /\n rate(ceph_rgw_put_initial_lat_count{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\"},\n \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{rgw_host}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "PUT Latencies by RGW Instance", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "refresh": "30s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "ceph-mixin", + "overview" + ], + "templating": { + "list": [ + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "", + "multi": false, + "name": "rgw_servers", + "options": [ ], + "query": "label_values(ceph_rgw_metadata{job=~\"$job\"}, ceph_daemon)", + "refresh": 1, + "regex": "RGW Server", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "RGW Overview", + "uid": "WAkugZpiz", + "version": 0 +} diff --git a/ceph-mon/files/grafana_dashboards/radosgw-sync-overview.json b/ceph-mon/files/grafana_dashboards/radosgw-sync-overview.json new file mode 100644 index 00000000..49db9e00 --- /dev/null +++ b/ceph-mon/files/grafana_dashboards/radosgw-sync-overview.json @@ -0,0 +1,490 @@ +{ + "__inputs": [ ], + "__requires": [ + { + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.0.0" + }, + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "id": 2, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_bytes_sum{job=~\"$job\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{source_zone}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Replication (throughput) from Source Zone", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_bytes_count{job=~\"$job\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{source_zone}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Replication (objects) from Source Zone", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": "Objects/s", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_poll_latency_sum{job=~\"$job\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{source_zone}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Polling Request Latency from Source Zone", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_errors{job=~\"$job\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{source_zone}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Unsuccessful Object Replications from Source Zone", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": "Count/s", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "refresh": "30s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "ceph-mixin", + "overview" + ], + "templating": { + "list": [ + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "", + "multi": false, + "name": "rgw_servers", + "options": [ ], + "query": "label_values(ceph_rgw_metadata{job=~\"$job\"}, ceph_daemon)", + "refresh": 1, + "regex": "RGW Server", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "RGW Sync Overview", + "uid": "rgw-sync-overview", + "version": 0 +} diff --git a/ceph-mon/files/grafana_dashboards/rbd-details.json b/ceph-mon/files/grafana_dashboards/rbd-details.json new file mode 100644 index 00000000..8efc8f47 --- /dev/null +++ b/ceph-mon/files/grafana_dashboards/rbd-details.json @@ -0,0 +1,444 @@ +{ + "__inputs": [ ], + "__requires": [ + { + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.3.3" + }, + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "Detailed Performance of RBD Images (IOPS/Throughput/Latency)", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "RBD per-image IO statistics are disabled by default.\n\nPlease refer to https://docs.ceph.com/en/latest/mgr/prometheus/#rbd-io-statistics for information about how to enable those optionally.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 0 + }, + "id": 2, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_rbd_write_ops{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{pool}} Write", + "refId": "A" + }, + { + "expr": "rate(ceph_rbd_read_ops{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{pool}} Read", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "IOPS", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "iops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "iops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "RBD per-image IO statistics are disabled by default.\n\nPlease refer to https://docs.ceph.com/en/latest/mgr/prometheus/#rbd-io-statistics for information about how to enable those optionally.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 0 + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_rbd_write_bytes{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{pool}} Write", + "refId": "A" + }, + { + "expr": "rate(ceph_rbd_read_bytes{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{pool}} Read", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Throughput", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "RBD per-image IO statistics are disabled by default.\n\nPlease refer to https://docs.ceph.com/en/latest/mgr/prometheus/#rbd-io-statistics for information about how to enable those optionally.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 0 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_rbd_write_latency_sum{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval]) /\n rate(ceph_rbd_write_latency_count{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval])\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{pool}} Write", + "refId": "A" + }, + { + "expr": "rate(ceph_rbd_read_latency_sum{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval]) /\n rate(ceph_rbd_read_latency_count{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval])\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{pool}} Read", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Average Latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "ns", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ns", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "refresh": "30s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "ceph-mixin" + ], + "templating": { + "list": [ + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": false, + "label": "", + "multi": false, + "name": "pool", + "options": [ ], + "query": "label_values(pool)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": false, + "label": "", + "multi": false, + "name": "image", + "options": [ ], + "query": "label_values(image)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "RBD Details", + "uid": "YhCYGcuZz", + "version": 0 +} diff --git a/ceph-mon/files/grafana_dashboards/rbd-overview.json b/ceph-mon/files/grafana_dashboards/rbd-overview.json new file mode 100644 index 00000000..d46297fa --- /dev/null +++ b/ceph-mon/files/grafana_dashboards/rbd-overview.json @@ -0,0 +1,723 @@ +{ + "__inputs": [ ], + "__requires": [ + { + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.4.2" + }, + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" + }, + { + "id": "prometheus", + "name": "Prometheus", + "type": "datasource", + "version": "5.0.0" + }, + { + "id": "table", + "name": "Table", + "type": "panel", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "RBD per-image IO statistics are disabled by default.\n\nPlease refer to https://docs.ceph.com/en/latest/mgr/prometheus/#rbd-io-statistics for information about how to enable those optionally.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "id": 2, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "round(sum(rate(ceph_rbd_write_ops{job=~\"$job\"}[$__rate_interval])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Writes", + "refId": "A" + }, + { + "expr": "round(sum(rate(ceph_rbd_read_ops{job=~\"$job\"}[$__rate_interval])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Reads", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "IOPS", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "RBD per-image IO statistics are disabled by default.\n\nPlease refer to https://docs.ceph.com/en/latest/mgr/prometheus/#rbd-io-statistics for information about how to enable those optionally.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "round(sum(rate(ceph_rbd_write_bytes{job=~\"$job\"}[$__rate_interval])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write", + "refId": "A" + }, + { + "expr": "round(sum(rate(ceph_rbd_read_bytes{job=~\"$job\"}[$__rate_interval])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Throughput", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${prometheusds}", + "description": "RBD per-image IO statistics are disabled by default.\n\nPlease refer to https://docs.ceph.com/en/latest/mgr/prometheus/#rbd-io-statistics for information about how to enable those optionally.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "round(\n sum(rate(ceph_rbd_write_latency_sum{job=~\"$job\"}[$__rate_interval])) /\n sum(rate(ceph_rbd_write_latency_count{job=~\"$job\"}[$__rate_interval]))\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write", + "refId": "A" + }, + { + "expr": "round(\n sum(rate(ceph_rbd_read_latency_sum{job=~\"$job\"}[$__rate_interval])) /\n sum(rate(ceph_rbd_read_latency_count{job=~\"$job\"}[$__rate_interval]))\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Average Latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "ns", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "columns": [ ], + "datasource": "${prometheusds}", + "description": "RBD per-image IO statistics are disabled by default.\n\nPlease refer to https://docs.ceph.com/en/latest/mgr/prometheus/#rbd-io-statistics for information about how to enable those optionally.", + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "id": 5, + "links": [ ], + "sort": { + "col": 3, + "desc": true + }, + "styles": [ + { + "alias": "Pool", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "pool", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Image", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "image", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "IOPS", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [ ], + "type": "number", + "unit": "iops", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + } + ], + "targets": [ + { + "expr": "topk(10,\n (\n sort((\n rate(ceph_rbd_write_ops{job=~\"$job\"}[$__rate_interval]) +\n on (image, pool, namespace) rate(ceph_rbd_read_ops{job=~\"$job\"}[$__rate_interval])\n ))\n )\n)\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Highest IOPS", + "transform": "table", + "type": "table" + }, + { + "columns": [ ], + "datasource": "${prometheusds}", + "description": "RBD per-image IO statistics are disabled by default.\n\nPlease refer to https://docs.ceph.com/en/latest/mgr/prometheus/#rbd-io-statistics for information about how to enable those optionally.", + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 7 + }, + "id": 6, + "links": [ ], + "sort": { + "col": 3, + "desc": true + }, + "styles": [ + { + "alias": "Pool", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "pool", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Image", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "image", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Throughput", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [ ], + "type": "number", + "unit": "Bps", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + } + ], + "targets": [ + { + "expr": "topk(10,\n sort(\n sum(\n rate(ceph_rbd_read_bytes{job=~\"$job\"}[$__rate_interval]) +\n rate(ceph_rbd_write_bytes{job=~\"$job\"}[$__rate_interval])\n ) by (pool, image, namespace)\n )\n)\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Highest Throughput", + "transform": "table", + "type": "table" + }, + { + "columns": [ ], + "datasource": "${prometheusds}", + "description": "RBD per-image IO statistics are disabled by default.\n\nPlease refer to https://docs.ceph.com/en/latest/mgr/prometheus/#rbd-io-statistics for information about how to enable those optionally.", + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 7 + }, + "id": 7, + "links": [ ], + "sort": { + "col": 3, + "desc": true + }, + "styles": [ + { + "alias": "Pool", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "pool", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Image", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "image", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Latency", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [ ], + "type": "number", + "unit": "ns", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + } + ], + "targets": [ + { + "expr": "topk(10,\n sum(\n rate(ceph_rbd_write_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n clamp_min(rate(ceph_rbd_write_latency_count{job=~\"$job\"}[$__rate_interval]), 1) +\n rate(ceph_rbd_read_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n clamp_min(rate(ceph_rbd_read_latency_count{job=~\"$job\"}[$__rate_interval]), 1)\n ) by (pool, image, namespace)\n)\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Highest Latency", + "transform": "table", + "type": "table" + } + ], + "refresh": "30s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "ceph-mixin", + "overview" + ], + "templating": { + "list": [ + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "${prometheusds}", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "RBD Overview", + "uid": "41FrpeUiz", + "version": 0 +} diff --git a/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default b/ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yaml similarity index 100% rename from ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yml.default rename to ceph-mon/files/prometheus_alert_rules/prometheus_alerts.yaml diff --git a/ceph-mon/lib/charms/grafana_agent/v0/cos_agent.py b/ceph-mon/lib/charms/grafana_agent/v0/cos_agent.py new file mode 100644 index 00000000..d3130b2b --- /dev/null +++ b/ceph-mon/lib/charms/grafana_agent/v0/cos_agent.py @@ -0,0 +1,842 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +r"""## Overview. + +This library can be used to manage the cos_agent relation interface: + +- `COSAgentProvider`: Use in machine charms that need to have a workload's metrics + or logs scraped, or forward rule files or dashboards to Prometheus, Loki or Grafana through + the Grafana Agent machine charm. + +- `COSAgentConsumer`: Used in the Grafana Agent machine charm to manage the requirer side of + the `cos_agent` interface. + + +## COSAgentProvider Library Usage + +Grafana Agent machine Charmed Operator interacts with its clients using the cos_agent library. +Charms seeking to send telemetry, must do so using the `COSAgentProvider` object from +this charm library. + +Using the `COSAgentProvider` object only requires instantiating it, +typically in the `__init__` method of your charm (the one which sends telemetry). + +The constructor of `COSAgentProvider` has only one required and nine optional parameters: + +```python + def __init__( + self, + charm: CharmType, + relation_name: str = DEFAULT_RELATION_NAME, + metrics_endpoints: Optional[List[_MetricsEndpointDict]] = None, + metrics_rules_dir: str = "./src/prometheus_alert_rules", + logs_rules_dir: str = "./src/loki_alert_rules", + recurse_rules_dirs: bool = False, + log_slots: Optional[List[str]] = None, + dashboard_dirs: Optional[List[str]] = None, + refresh_events: Optional[List] = None, + scrape_configs: Optional[Union[List[Dict], Callable]] = None, + ): +``` + +### Parameters + +- `charm`: The instance of the charm that instantiates `COSAgentProvider`, typically `self`. + +- `relation_name`: If your charmed operator uses a relation name other than `cos-agent` to use + the `cos_agent` interface, this is where you have to specify that. + +- `metrics_endpoints`: In this parameter you can specify the metrics endpoints that Grafana Agent + machine Charmed Operator will scrape. The configs of this list will be merged with the configs + from `scrape_configs`. + +- `metrics_rules_dir`: The directory in which the Charmed Operator stores its metrics alert rules + files. + +- `logs_rules_dir`: The directory in which the Charmed Operator stores its logs alert rules files. + +- `recurse_rules_dirs`: This parameters set whether Grafana Agent machine Charmed Operator has to + search alert rules files recursively in the previous two directories or not. + +- `log_slots`: Snap slots to connect to for scraping logs in the form ["snap-name:slot", ...]. + +- `dashboard_dirs`: List of directories where the dashboards are stored in the Charmed Operator. + +- `refresh_events`: List of events on which to refresh relation data. + +- `scrape_configs`: List of standard scrape_configs dicts or a callable that returns the list in + case the configs need to be generated dynamically. The contents of this list will be merged + with the configs from `metrics_endpoints`. + + +### Example 1 - Minimal instrumentation: + +In order to use this object the following should be in the `charm.py` file. + +```python +from charms.grafana_agent.v0.cos_agent import COSAgentProvider +... +class TelemetryProviderCharm(CharmBase): + def __init__(self, *args): + ... + self._grafana_agent = COSAgentProvider(self) +``` + +### Example 2 - Full instrumentation: + +In order to use this object the following should be in the `charm.py` file. + +```python +from charms.grafana_agent.v0.cos_agent import COSAgentProvider +... +class TelemetryProviderCharm(CharmBase): + def __init__(self, *args): + ... + self._grafana_agent = COSAgentProvider( + self, + relation_name="custom-cos-agent", + metrics_endpoints=[ + # specify "path" and "port" to scrape from localhost + {"path": "/metrics", "port": 9000}, + {"path": "/metrics", "port": 9001}, + {"path": "/metrics", "port": 9002}, + ], + metrics_rules_dir="./src/alert_rules/prometheus", + logs_rules_dir="./src/alert_rules/loki", + recursive_rules_dir=True, + log_slots=["my-app:slot"], + dashboard_dirs=["./src/dashboards_1", "./src/dashboards_2"], + refresh_events=["update-status", "upgrade-charm"], + scrape_configs=[ + { + "job_name": "custom_job", + "metrics_path": "/metrics", + "authorization": {"credentials": "bearer-token"}, + "static_configs": [ + { + "targets": ["localhost:9003"]}, + "labels": {"key": "value"}, + }, + ], + }, + ] + ) +``` + +### Example 3 - Dynamic scrape configs generation: + +Pass a function to the `scrape_configs` to decouple the generation of the configs +from the instantiation of the COSAgentProvider object. + +```python +from charms.grafana_agent.v0.cos_agent import COSAgentProvider +... + +class TelemetryProviderCharm(CharmBase): + def generate_scrape_configs(self): + return [ + { + "job_name": "custom", + "metrics_path": "/metrics", + "static_configs": [{"targets": ["localhost:9000"]}], + }, + ] + + def __init__(self, *args): + ... + self._grafana_agent = COSAgentProvider( + self, + scrape_configs=self.generate_scrape_configs, + ) +``` + +## COSAgentConsumer Library Usage + +This object may be used by any Charmed Operator which gathers telemetry data by +implementing the consumer side of the `cos_agent` interface. +For instance Grafana Agent machine Charmed Operator. + +For this purpose the charm needs to instantiate the `COSAgentConsumer` object with one mandatory +and two optional arguments. + +### Parameters + +- `charm`: A reference to the parent (Grafana Agent machine) charm. + +- `relation_name`: The name of the relation that the charm uses to interact + with its clients that provides telemetry data using the `COSAgentProvider` object. + + If provided, this relation name must match a provided relation in metadata.yaml with the + `cos_agent` interface. + The default value of this argument is "cos-agent". + +- `refresh_events`: List of events on which to refresh relation data. + + +### Example 1 - Minimal instrumentation: + +In order to use this object the following should be in the `charm.py` file. + +```python +from charms.grafana_agent.v0.cos_agent import COSAgentConsumer +... +class GrafanaAgentMachineCharm(GrafanaAgentCharm) + def __init__(self, *args): + ... + self._cos = COSAgentRequirer(self) +``` + + +### Example 2 - Full instrumentation: + +In order to use this object the following should be in the `charm.py` file. + +```python +from charms.grafana_agent.v0.cos_agent import COSAgentConsumer +... +class GrafanaAgentMachineCharm(GrafanaAgentCharm) + def __init__(self, *args): + ... + self._cos = COSAgentRequirer( + self, + relation_name="cos-agent-consumer", + refresh_events=["update-status", "upgrade-charm"], + ) +``` +""" + +import base64 +import json +import logging +import lzma +from collections import namedtuple +from itertools import chain +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, List, Optional, Set, Union + +import pydantic +from cosl import JujuTopology +from cosl.rules import AlertRules +from ops.charm import RelationChangedEvent +from ops.framework import EventBase, EventSource, Object, ObjectEvents +from ops.model import Relation, Unit +from ops.testing import CharmType + +if TYPE_CHECKING: + try: + from typing import TypedDict + + class _MetricsEndpointDict(TypedDict): + path: str + port: int + + except ModuleNotFoundError: + _MetricsEndpointDict = Dict # pyright: ignore + +LIBID = "dc15fa84cef84ce58155fb84f6c6213a" +LIBAPI = 0 +LIBPATCH = 6 + +PYDEPS = ["cosl", "pydantic < 2"] + +DEFAULT_RELATION_NAME = "cos-agent" +DEFAULT_PEER_RELATION_NAME = "peers" +DEFAULT_SCRAPE_CONFIG = { + "static_configs": [{"targets": ["localhost:80"]}], + "metrics_path": "/metrics", +} + +logger = logging.getLogger(__name__) +SnapEndpoint = namedtuple("SnapEndpoint", "owner, name") + + +class GrafanaDashboard(str): + """Grafana Dashboard encoded json; lzma-compressed.""" + + # TODO Replace this with a custom type when pydantic v2 released (end of 2023 Q1?) + # https://github.com/pydantic/pydantic/issues/4887 + @staticmethod + def _serialize(raw_json: Union[str, bytes]) -> "GrafanaDashboard": + if not isinstance(raw_json, bytes): + raw_json = raw_json.encode("utf-8") + encoded = base64.b64encode(lzma.compress(raw_json)).decode("utf-8") + return GrafanaDashboard(encoded) + + def _deserialize(self) -> Dict: + try: + raw = lzma.decompress(base64.b64decode(self.encode("utf-8"))).decode() + return json.loads(raw) + except json.decoder.JSONDecodeError as e: + logger.error("Invalid Dashboard format: %s", e) + return {} + + def __repr__(self): + """Return string representation of self.""" + return "" + + +class CosAgentProviderUnitData(pydantic.BaseModel): + """Unit databag model for `cos-agent` relation.""" + + # The following entries are the same for all units of the same principal. + # Note that the same grafana agent subordinate may be related to several apps. + # this needs to make its way to the gagent leader + metrics_alert_rules: dict + log_alert_rules: dict + dashboards: List[GrafanaDashboard] + subordinate: Optional[bool] + + # The following entries may vary across units of the same principal app. + # this data does not need to be forwarded to the gagent leader + metrics_scrape_jobs: List[Dict] + log_slots: List[str] + + # when this whole datastructure is dumped into a databag, it will be nested under this key. + # while not strictly necessary (we could have it 'flattened out' into the databag), + # this simplifies working with the model. + KEY: ClassVar[str] = "config" + + +class CosAgentPeersUnitData(pydantic.BaseModel): + """Unit databag model for `peers` cos-agent machine charm peer relation.""" + + # We need the principal unit name and relation metadata to be able to render identifiers + # (e.g. topology) on the leader side, after all the data moves into peer data (the grafana + # agent leader can only see its own principal, because it is a subordinate charm). + principal_unit_name: str + principal_relation_id: str + principal_relation_name: str + + # The only data that is forwarded to the leader is data that needs to go into the app databags + # of the outgoing o11y relations. + metrics_alert_rules: Optional[dict] + log_alert_rules: Optional[dict] + dashboards: Optional[List[GrafanaDashboard]] + + # when this whole datastructure is dumped into a databag, it will be nested under this key. + # while not strictly necessary (we could have it 'flattened out' into the databag), + # this simplifies working with the model. + KEY: ClassVar[str] = "config" + + @property + def app_name(self) -> str: + """Parse out the app name from the unit name. + + TODO: Switch to using `model_post_init` when pydantic v2 is released? + https://github.com/pydantic/pydantic/issues/1729#issuecomment-1300576214 + """ + return self.principal_unit_name.split("/")[0] + + +class COSAgentProvider(Object): + """Integration endpoint wrapper for the provider side of the cos_agent interface.""" + + def __init__( + self, + charm: CharmType, + relation_name: str = DEFAULT_RELATION_NAME, + metrics_endpoints: Optional[List["_MetricsEndpointDict"]] = None, + metrics_rules_dir: str = "./src/prometheus_alert_rules", + logs_rules_dir: str = "./src/loki_alert_rules", + recurse_rules_dirs: bool = False, + log_slots: Optional[List[str]] = None, + dashboard_dirs: Optional[List[str]] = None, + refresh_events: Optional[List] = None, + *, + scrape_configs: Optional[Union[List[dict], Callable]] = None, + ): + """Create a COSAgentProvider instance. + + Args: + charm: The `CharmBase` instance that is instantiating this object. + relation_name: The name of the relation to communicate over. + metrics_endpoints: List of endpoints in the form [{"path": path, "port": port}, ...]. + This argument is a simplified form of the `scrape_configs`. + The contents of this list will be merged with the contents of `scrape_configs`. + metrics_rules_dir: Directory where the metrics rules are stored. + logs_rules_dir: Directory where the logs rules are stored. + recurse_rules_dirs: Whether to recurse into rule paths. + log_slots: Snap slots to connect to for scraping logs + in the form ["snap-name:slot", ...]. + dashboard_dirs: Directory where the dashboards are stored. + refresh_events: List of events on which to refresh relation data. + scrape_configs: List of standard scrape_configs dicts or a callable + that returns the list in case the configs need to be generated dynamically. + The contents of this list will be merged with the contents of `metrics_endpoints`. + """ + super().__init__(charm, relation_name) + dashboard_dirs = dashboard_dirs or ["./src/grafana_dashboards"] + + self._charm = charm + self._relation_name = relation_name + self._metrics_endpoints = metrics_endpoints or [] + self._scrape_configs = scrape_configs or [] + self._metrics_rules = metrics_rules_dir + self._logs_rules = logs_rules_dir + self._recursive = recurse_rules_dirs + self._log_slots = log_slots or [] + self._dashboard_dirs = dashboard_dirs + self._refresh_events = refresh_events or [self._charm.on.config_changed] + + events = self._charm.on[relation_name] + self.framework.observe(events.relation_joined, self._on_refresh) + self.framework.observe(events.relation_changed, self._on_refresh) + for event in self._refresh_events: + self.framework.observe(event, self._on_refresh) + + def _on_refresh(self, event): + """Trigger the class to update relation data.""" + relations = self._charm.model.relations[self._relation_name] + + for relation in relations: + # Before a principal is related to the grafana-agent subordinate, we'd get + # ModelError: ERROR cannot read relation settings: unit "zk/2": settings not found + # Add a guard to make sure it doesn't happen. + if relation.data and self._charm.unit in relation.data: + # Subordinate relations can communicate only over unit data. + try: + data = CosAgentProviderUnitData( + metrics_alert_rules=self._metrics_alert_rules, + log_alert_rules=self._log_alert_rules, + dashboards=self._dashboards, + metrics_scrape_jobs=self._scrape_jobs, + log_slots=self._log_slots, + subordinate=self._charm.meta.subordinate, + ) + relation.data[self._charm.unit][data.KEY] = data.json() + except ( + pydantic.ValidationError, + json.decoder.JSONDecodeError, + ) as e: + logger.error("Invalid relation data provided: %s", e) + + @property + def _scrape_jobs(self) -> List[Dict]: + """Return a prometheus_scrape-like data structure for jobs. + + https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config + """ + if callable(self._scrape_configs): + scrape_configs = self._scrape_configs() + else: + # Create a copy of the user scrape_configs, since we will mutate this object + scrape_configs = self._scrape_configs.copy() + + # Convert "metrics_endpoints" to standard scrape_configs, and add them in + for endpoint in self._metrics_endpoints: + scrape_configs.append( + { + "metrics_path": endpoint["path"], + "static_configs": [{"targets": [f"localhost:{endpoint['port']}"]}], + } + ) + + scrape_configs = scrape_configs or [DEFAULT_SCRAPE_CONFIG] + + # Augment job name to include the app name and a unique id (index) + for idx, scrape_config in enumerate(scrape_configs): + scrape_config["job_name"] = "_".join( + [self._charm.app.name, str(idx), scrape_config.get("job_name", "default")] + ) + + return scrape_configs + + @property + def _metrics_alert_rules(self) -> Dict: + """Use (for now) the prometheus_scrape AlertRules to initialize this.""" + alert_rules = AlertRules( + query_type="promql", topology=JujuTopology.from_charm(self._charm) + ) + alert_rules.add_path(self._metrics_rules, recursive=self._recursive) + return alert_rules.as_dict() + + @property + def _log_alert_rules(self) -> Dict: + """Use (for now) the loki_push_api AlertRules to initialize this.""" + alert_rules = AlertRules(query_type="logql", topology=JujuTopology.from_charm(self._charm)) + alert_rules.add_path(self._logs_rules, recursive=self._recursive) + return alert_rules.as_dict() + + @property + def _dashboards(self) -> List[GrafanaDashboard]: + dashboards: List[GrafanaDashboard] = [] + for d in self._dashboard_dirs: + for path in Path(d).glob("*"): + dashboard = GrafanaDashboard._serialize(path.read_bytes()) + dashboards.append(dashboard) + return dashboards + + +class COSAgentDataChanged(EventBase): + """Event emitted by `COSAgentRequirer` when relation data changes.""" + + +class COSAgentValidationError(EventBase): + """Event emitted by `COSAgentRequirer` when there is an error in the relation data.""" + + def __init__(self, handle, message: str = ""): + super().__init__(handle) + self.message = message + + def snapshot(self) -> Dict: + """Save COSAgentValidationError source information.""" + return {"message": self.message} + + def restore(self, snapshot): + """Restore COSAgentValidationError source information.""" + self.message = snapshot["message"] + + +class COSAgentRequirerEvents(ObjectEvents): + """`COSAgentRequirer` events.""" + + data_changed = EventSource(COSAgentDataChanged) + validation_error = EventSource(COSAgentValidationError) + + +class MultiplePrincipalsError(Exception): + """Custom exception for when there are multiple principal applications.""" + + pass + + +class COSAgentRequirer(Object): + """Integration endpoint wrapper for the Requirer side of the cos_agent interface.""" + + on = COSAgentRequirerEvents() # pyright: ignore + + def __init__( + self, + charm: CharmType, + *, + relation_name: str = DEFAULT_RELATION_NAME, + peer_relation_name: str = DEFAULT_PEER_RELATION_NAME, + refresh_events: Optional[List[str]] = None, + ): + """Create a COSAgentRequirer instance. + + Args: + charm: The `CharmBase` instance that is instantiating this object. + relation_name: The name of the relation to communicate over. + peer_relation_name: The name of the peer relation to communicate over. + refresh_events: List of events on which to refresh relation data. + """ + super().__init__(charm, relation_name) + self._charm = charm + self._relation_name = relation_name + self._peer_relation_name = peer_relation_name + self._refresh_events = refresh_events or [self._charm.on.config_changed] + + events = self._charm.on[relation_name] + self.framework.observe( + events.relation_joined, self._on_relation_data_changed + ) # TODO: do we need this? + self.framework.observe(events.relation_changed, self._on_relation_data_changed) + for event in self._refresh_events: + self.framework.observe(event, self.trigger_refresh) # pyright: ignore + + # Peer relation events + # A peer relation is needed as it is the only mechanism for exchanging data across + # subordinate units. + # self.framework.observe( + # self.on[self._peer_relation_name].relation_joined, self._on_peer_relation_joined + # ) + peer_events = self._charm.on[peer_relation_name] + self.framework.observe(peer_events.relation_changed, self._on_peer_relation_changed) + + @property + def peer_relation(self) -> Optional["Relation"]: + """Helper function for obtaining the peer relation object. + + Returns: peer relation object + (NOTE: would return None if called too early, e.g. during install). + """ + return self.model.get_relation(self._peer_relation_name) + + def _on_peer_relation_changed(self, _): + # Peer data is used for forwarding data from principal units to the grafana agent + # subordinate leader, for updating the app data of the outgoing o11y relations. + if self._charm.unit.is_leader(): + self.on.data_changed.emit() # pyright: ignore + + def _on_relation_data_changed(self, event: RelationChangedEvent): + # Peer data is the only means of communication between subordinate units. + if not self.peer_relation: + event.defer() + return + + cos_agent_relation = event.relation + if not event.unit or not cos_agent_relation.data.get(event.unit): + return + principal_unit = event.unit + + # Coherence check + units = cos_agent_relation.units + if len(units) > 1: + # should never happen + raise ValueError( + f"unexpected error: subordinate relation {cos_agent_relation} " + f"should have exactly one unit" + ) + + if not (raw := cos_agent_relation.data[principal_unit].get(CosAgentProviderUnitData.KEY)): + return + + if not (provider_data := self._validated_provider_data(raw)): + return + + # Copy data from the principal relation to the peer relation, so the leader could + # follow up. + # Save the originating unit name, so it could be used for topology later on by the leader. + data = CosAgentPeersUnitData( # peer relation databag model + principal_unit_name=event.unit.name, + principal_relation_id=str(event.relation.id), + principal_relation_name=event.relation.name, + metrics_alert_rules=provider_data.metrics_alert_rules, + log_alert_rules=provider_data.log_alert_rules, + dashboards=provider_data.dashboards, + ) + self.peer_relation.data[self._charm.unit][ + f"{CosAgentPeersUnitData.KEY}-{event.unit.name}" + ] = data.json() + + # We can't easily tell if the data that was changed is limited to only the data + # that goes into peer relation (in which case, if this is not a leader unit, we wouldn't + # need to emit `on.data_changed`), so we're emitting `on.data_changed` either way. + self.on.data_changed.emit() # pyright: ignore + + def _validated_provider_data(self, raw) -> Optional[CosAgentProviderUnitData]: + try: + return CosAgentProviderUnitData(**json.loads(raw)) + except (pydantic.ValidationError, json.decoder.JSONDecodeError) as e: + self.on.validation_error.emit(message=str(e)) # pyright: ignore + return None + + def trigger_refresh(self, _): + """Trigger a refresh of relation data.""" + # FIXME: Figure out what we should do here + self.on.data_changed.emit() # pyright: ignore + + @property + def _principal_unit(self) -> Optional[Unit]: + """Return the principal unit for a relation. + + Assumes that the relation is of type subordinate. + Relies on the fact that, for subordinate relations, the only remote unit visible to + *this unit* is the principal unit that this unit is attached to. + """ + if relations := self._principal_relations: + # Technically it's a list, but for subordinates there can only be one relation + principal_relation = next(iter(relations)) + if units := principal_relation.units: + # Technically it's a list, but for subordinates there can only be one + return next(iter(units)) + + return None + + @property + def _principal_relations(self): + relations = [] + for relation in self._charm.model.relations[self._relation_name]: + if not json.loads(relation.data[next(iter(relation.units))]["config"]).get( + ["subordinate"], False + ): + relations.append(relation) + if len(relations) > 1: + logger.error( + "Multiple applications claiming to be principal. Update the cos-agent library in the client application charms." + ) + raise MultiplePrincipalsError("Multiple principal applications.") + return relations + + @property + def _remote_data(self) -> List[CosAgentProviderUnitData]: + """Return a list of remote data from each of the related units. + + Assumes that the relation is of type subordinate. + Relies on the fact that, for subordinate relations, the only remote unit visible to + *this unit* is the principal unit that this unit is attached to. + """ + all_data = [] + + for relation in self._charm.model.relations[self._relation_name]: + if not relation.units: + continue + unit = next(iter(relation.units)) + if not (raw := relation.data[unit].get(CosAgentProviderUnitData.KEY)): + continue + if not (provider_data := self._validated_provider_data(raw)): + continue + all_data.append(provider_data) + + return all_data + + def _gather_peer_data(self) -> List[CosAgentPeersUnitData]: + """Collect data from the peers. + + Returns a trimmed-down list of CosAgentPeersUnitData. + """ + relation = self.peer_relation + + # Ensure that whatever context we're running this in, we take the necessary precautions: + if not relation or not relation.data or not relation.app: + return [] + + # Iterate over all peer unit data and only collect every principal once. + peer_data: List[CosAgentPeersUnitData] = [] + app_names: Set[str] = set() + + for unit in chain((self._charm.unit,), relation.units): + if not relation.data.get(unit): + continue + + for unit_name in relation.data.get(unit): # pyright: ignore + if not unit_name.startswith(CosAgentPeersUnitData.KEY): + continue + raw = relation.data[unit].get(unit_name) + if raw is None: + continue + data = CosAgentPeersUnitData(**json.loads(raw)) + # Have we already seen this principal app? + if (app_name := data.app_name) in app_names: + continue + peer_data.append(data) + app_names.add(app_name) + + return peer_data + + @property + def metrics_alerts(self) -> Dict[str, Any]: + """Fetch metrics alerts.""" + alert_rules = {} + + seen_apps: List[str] = [] + for data in self._gather_peer_data(): + if rules := data.metrics_alert_rules: + app_name = data.app_name + if app_name in seen_apps: + continue # dedup! + seen_apps.append(app_name) + # This is only used for naming the file, so be as specific as we can be + identifier = JujuTopology( + model=self._charm.model.name, + model_uuid=self._charm.model.uuid, + application=app_name, + # For the topology unit, we could use `data.principal_unit_name`, but that unit + # name may not be very stable: `_gather_peer_data` de-duplicates by app name so + # the exact unit name that turns up first in the iterator may vary from time to + # time. So using the grafana-agent unit name instead. + unit=self._charm.unit.name, + ).identifier + + alert_rules[identifier] = rules + + return alert_rules + + @property + def metrics_jobs(self) -> List[Dict]: + """Parse the relation data contents and extract the metrics jobs.""" + scrape_jobs = [] + for data in self._remote_data: + for job in data.metrics_scrape_jobs: + # In #220, relation schema changed from a simplified dict to the standard + # `scrape_configs`. + # This is to ensure backwards compatibility with Providers older than v0.5. + if "path" in job and "port" in job and "job_name" in job: + job = { + "job_name": job["job_name"], + "metrics_path": job["path"], + "static_configs": [{"targets": [f"localhost:{job['port']}"]}], + } + + scrape_jobs.append(job) + + return scrape_jobs + + @property + def snap_log_endpoints(self) -> List[SnapEndpoint]: + """Fetch logging endpoints exposed by related snaps.""" + plugs = [] + for data in self._remote_data: + targets = data.log_slots + if targets: + for target in targets: + if target in plugs: + logger.warning( + f"plug {target} already listed. " + "The same snap is being passed from multiple " + "endpoints; this should not happen." + ) + else: + plugs.append(target) + + endpoints = [] + for plug in plugs: + if ":" not in plug: + logger.error(f"invalid plug definition received: {plug}. Ignoring...") + else: + endpoint = SnapEndpoint(*plug.split(":")) + endpoints.append(endpoint) + return endpoints + + @property + def logs_alerts(self) -> Dict[str, Any]: + """Fetch log alerts.""" + alert_rules = {} + seen_apps: List[str] = [] + + for data in self._gather_peer_data(): + if rules := data.log_alert_rules: + # This is only used for naming the file, so be as specific as we can be + app_name = data.app_name + if app_name in seen_apps: + continue # dedup! + seen_apps.append(app_name) + + identifier = JujuTopology( + model=self._charm.model.name, + model_uuid=self._charm.model.uuid, + application=app_name, + # For the topology unit, we could use `data.principal_unit_name`, but that unit + # name may not be very stable: `_gather_peer_data` de-duplicates by app name so + # the exact unit name that turns up first in the iterator may vary from time to + # time. So using the grafana-agent unit name instead. + unit=self._charm.unit.name, + ).identifier + + alert_rules[identifier] = rules + + return alert_rules + + @property + def dashboards(self) -> List[Dict[str, str]]: + """Fetch dashboards as encoded content. + + Dashboards are assumed not to vary across units of the same primary. + """ + dashboards: List[Dict[str, Any]] = [] + + seen_apps: List[str] = [] + for data in self._gather_peer_data(): + app_name = data.app_name + if app_name in seen_apps: + continue # dedup! + seen_apps.append(app_name) + + for encoded_dashboard in data.dashboards or (): + content = GrafanaDashboard(encoded_dashboard)._deserialize() + + title = content.get("title", "no_title") + + dashboards.append( + { + "relation_id": data.principal_relation_id, + # We have the remote charm name - use it for the identifier + "charm": f"{data.principal_relation_name}-{app_name}", + "content": content, + "title": title, + } + ) + + return dashboards diff --git a/ceph-mon/metadata.yaml b/ceph-mon/metadata.yaml index 85e088fb..956a6aa2 100644 --- a/ceph-mon/metadata.yaml +++ b/ceph-mon/metadata.yaml @@ -43,6 +43,9 @@ provides: interface: prometheus_scrape dashboard: interface: ceph-dashboard + cos-agent: + interface: cos_agent + requires: bootstrap-source: interface: ceph-bootstrap diff --git a/ceph-mon/src/ceph_metrics.py b/ceph-mon/src/ceph_metrics.py index a1745b31..e720c846 100644 --- a/ceph-mon/src/ceph_metrics.py +++ b/ceph-mon/src/ceph_metrics.py @@ -9,6 +9,8 @@ import logging import os.path import pathlib +import socket + from typing import Optional, Union, List, TYPE_CHECKING import ops.model @@ -17,6 +19,7 @@ import charm from charms.prometheus_k8s.v0 import prometheus_scrape +from charms.grafana_agent.v0 import cos_agent from charms_ceph import utils as ceph_utils from ops.framework import BoundEvent from utils import mgr_config_set_rbd_stats_pools @@ -28,6 +31,10 @@ "metrics_path": "/metrics", "static_configs": [{"targets": ["*:9283"]}], } +DEFAULT_CEPH_METRICS_ENDPOINT = { + "path": "/metrics", + "port": 9283, +} DEFAULT_ALERT_RULES_RELATIVE_PATH = "files/prometheus_alert_rules" @@ -144,3 +151,77 @@ def update_alert_rules(self): self._charm._stored.alert_rule_errors = msg return self._set_alert_rules(alert_rules_as_dict) + + +class CephCOSAgentProvider(cos_agent.COSAgentProvider): + + def __init__(self, charm): + super().__init__( + charm, + metrics_rules_dir="./files/prometheus_alert_rules", + dashboard_dirs=["./files/grafana_dashboards"], + scrape_configs=self._custom_scrape_configs, + ) + events = self._charm.on[cos_agent.DEFAULT_RELATION_NAME] + self.framework.observe( + events.relation_departed, self._on_relation_departed + ) + + def _on_refresh(self, event): + """Enable prometheus on relation change""" + if self._charm.unit.is_leader() and ceph_utils.is_bootstrapped(): + logger.debug("refreshing cos_agent relation") + mgr_config_set_rbd_stats_pools() + ceph_utils.mgr_enable_module("prometheus") + super()._on_refresh(event) + + def _on_relation_departed(self, event): + """Disable prometheus on depart of relation""" + if self._charm.unit.is_leader() and ceph_utils.is_bootstrapped(): + logger.debug( + "is_leader and is_bootstrapped, running rel departed: %s", + event, + ) + ceph_utils.mgr_disable_module("prometheus") + logger.debug("module_disabled") + + def _custom_scrape_configs(self): + fqdn = socket.getfqdn() + fqdn_parts = fqdn.split('.') + domain = '.'.join(fqdn_parts[1:]) if len(fqdn_parts) > 1 else fqdn + return [ + { + "metrics_path": "/metrics", + "static_configs": [{"targets": ["localhost:9283"]}], + "honor_labels": True, + "metric_relabel_configs": [ + { + # localhost:9283 is the generic default instance label + # added by grafana-agent which is kinda useless. + # Replace it with a somewhat more meaningful label + "source_labels": ["instance"], + "regex": "^localhost:9283$", + "target_label": "instance", + "action": "replace", + "replacement": "ceph_cluster", + }, + { # if we have a non-empty hostname label, use it as the + # instance label + "source_labels": ["hostname"], + "regex": "(.+)", + "target_label": "instance", + "action": "replace", + "replacement": "${1}", + }, + { # tack on the domain to the instance label to make it + # conform to grafana-agent's node-exporter expectations + "source_labels": ["instance"], + "regex": "(.*)", + "target_label": "instance", + "action": "replace", + "replacement": "${1}." + domain, + }, + ] + }, + + ] diff --git a/ceph-mon/src/charm.py b/ceph-mon/src/charm.py index 1458e3c3..e433f071 100755 --- a/ceph-mon/src/charm.py +++ b/ceph-mon/src/charm.py @@ -25,7 +25,6 @@ import ops_actions - logger = logging.getLogger(__name__) @@ -217,6 +216,7 @@ def __init__(self, *args): self.clients = ceph_client.CephClientProvides(self) self.metrics_endpoint = ceph_metrics.CephMetricsEndpointProvider(self) + self.cos_agent = ceph_metrics.CephCOSAgentProvider(self) self.ceph_status = ceph_status.StatusAssessor(self) self.mds = ceph_mds.CephMdsProvides(self) diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 0e30a1e5..9ba127ae 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -53,3 +53,6 @@ croniter # needed for charm-rabbitmq-server unit tests # icey: pyopenssl 22 introduces a requirement on newer OpenSSL which causes test # failures. Pin pyopenssl to resolve the failure. pyopenssl<=22.0.0 + +pydantic < 2 +cosl diff --git a/ceph-mon/unit_tests/test_ceph_metrics.py b/ceph-mon/unit_tests/test_ceph_metrics.py index f93141f1..6e11c3b2 100644 --- a/ceph-mon/unit_tests/test_ceph_metrics.py +++ b/ceph-mon/unit_tests/test_ceph_metrics.py @@ -17,8 +17,7 @@ import helpers -@helpers.patch_network_get() -class TestCephMetrics(unittest.TestCase): +class CephMetricsTestBase(unittest.TestCase): @classmethod def setUpClass(cls): """Run once before tests begin.""" @@ -33,11 +32,17 @@ def setUpClass(cls): rules: [] """ ) + rules_file = cls.rules_dir / "alert-rules.yaml" + with rules_file.open("w") as f: + f.write(cls.rules) @classmethod def tearDownClass(cls): cls.tempdir.cleanup() + +@helpers.patch_network_get() +class TestCephMetrics(CephMetricsTestBase): def setUp(self): super().setUp() self.harness = Harness(charm.CephMonCharm) @@ -134,3 +139,60 @@ def test_update_alert_rules(self, _is_bootstrapped): self.harness.charm.metrics_endpoint.update_alert_rules() alert_rules = self.get_alert_rules(rel_id) self.assertTrue(alert_rules.get("groups")) + + +class TestCephCOSAgentProvider(CephMetricsTestBase): + def setUp(self): + super().setUp() + self.harness = Harness(charm.CephMonCharm) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + self.harness.set_leader(True) + self.harness.charm.cos_agent._metrics_rules = self.rules_dir + + def test_init(self): + self.assertEqual( + self.harness.charm.cos_agent._relation_name, + "cos-agent", + ) + + @patch("ceph_metrics.mgr_config_set_rbd_stats_pools", lambda: None) + @patch("ceph_metrics.ceph_utils.is_bootstrapped", return_value=True) + @patch("ceph_metrics.ceph_utils.is_mgr_module_enabled", return_value=False) + @patch("ceph_metrics.ceph_utils.mgr_enable_module") + @patch("ceph_metrics.ceph_utils.mgr_disable_module") + def test_add_remove_rel( + self, + mgr_disable_module, + mgr_enable_module, + _is_mgr_module_enable, + _is_bootstrapped, + ): + rel_id = self.harness.add_relation("cos-agent", "grafana-agent") + self.harness.add_relation_unit(rel_id, "grafana-agent/0") + + unit_rel_data = self.harness.get_relation_data( + rel_id, self.harness.model.unit + ) + data = json.loads(unit_rel_data["config"]) + self.assertTrue("metrics_scrape_jobs" in data) + self.assertEqual( + data["metrics_scrape_jobs"][0]["metrics_path"], "/metrics" + ) + self.assertTrue("metrics_alert_rules" in data) + self.assertTrue("groups" in data["metrics_alert_rules"]) + mgr_enable_module.assert_called_once() + + self.harness.remove_relation(rel_id) + mgr_disable_module.assert_called_once() + + @patch("socket.getfqdn", return_value="node1.ceph.example.com") + def test_custom_scrape_configs(self, _mock_getfqdn): + configs = self.harness.charm.cos_agent._custom_scrape_configs() + self.assertEqual( + configs[0]["static_configs"][0]["targets"], ["localhost:9283"] + ) + self.assertEqual( + configs[0]["metric_relabel_configs"][0]["replacement"], + "ceph_cluster", + ) From 33b99d664f5aeb808c8a1352f105a18e68de1f01 Mon Sep 17 00:00:00 2001 From: Federico Bosi Date: Tue, 30 Apr 2024 18:45:45 +0200 Subject: [PATCH 2618/2699] Allow static ipv6 addresses & binding check The charm used to only allow dynamic ipv6 addresses. Following some testing (see LP issue) I ensured that it works. The charm wouldn't check if the first local address returned by get_ipv6_addr was the same one that juju provided for the public endpoint. This resulted in the charm binding ceph to some random address while correctly using juju's one to connect to other mons, which none were listening to. We log a warning in case this happens, but default to the previous behavior. Closes-Bug: #2061836 Change-Id: I63eefba2714ccc3189f5260012b376c531052b86 --- ceph-mon/src/ceph_hooks.py | 26 +++++++++++++++++++++++--- ceph-mon/src/utils.py | 2 +- ceph-mon/unit_tests/test_ceph_hooks.py | 9 ++++++++- 3 files changed, 32 insertions(+), 5 deletions(-) diff --git a/ceph-mon/src/ceph_hooks.py b/ceph-mon/src/ceph_hooks.py index 101b1066..e8896ac5 100755 --- a/ceph-mon/src/ceph_hooks.py +++ b/ceph-mon/src/ceph_hooks.py @@ -205,13 +205,33 @@ def get_ceph_context(): } if config('prefer-ipv6'): - dynamic_ipv6_address = get_ipv6_addr()[0] cephcontext['ms_bind_ipv4'] = False cephcontext['ms_bind_ipv6'] = True + + local_addresses = get_ipv6_addr(dynamic_only=False) + public_addr = get_public_addr() + cluster_addr = get_cluster_addr() + # try binding to the address that juju will pass to other charms + if public_addr not in local_addresses: + log(f"Couldn't find a match for our assigned " + f"public ip {public_addr} " + f"out of {local_addresses}, " + f"using default {local_addresses[0]}", + level=WARNING) + public_addr = local_addresses[0] + + if cluster_addr not in local_addresses: + log(f"Couldn't find a match for our assigned " + f"cluster ip {cluster_addr} " + f"out of {local_addresses}, " + f"using default {local_addresses[0]}", + level=WARNING) + cluster_addr = local_addresses[0] + if not public_network: - cephcontext['public_addr'] = dynamic_ipv6_address + cephcontext['public_addr'] = public_addr if not cluster_network: - cephcontext['cluster_addr'] = dynamic_ipv6_address + cephcontext['cluster_addr'] = cluster_addr else: cephcontext['public_addr'] = get_public_addr() cephcontext['cluster_addr'] = get_cluster_addr() diff --git a/ceph-mon/src/utils.py b/ceph-mon/src/utils.py index b7c49b9d..1829f9c8 100644 --- a/ceph-mon/src/utils.py +++ b/ceph-mon/src/utils.py @@ -130,7 +130,7 @@ def get_unit_hostname(): @cached def get_host_ip(hostname=None): if config('prefer-ipv6'): - return get_ipv6_addr()[0] + return get_ipv6_addr(dynamic_only=False)[0] hostname = hostname or unit_get('private-address') try: diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index b8407818..713583bf 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -200,8 +200,15 @@ def test_get_ceph_context_w_config_flags_invalid(self, mock_config, self.assertEqual(ctxt, expected) @patch.object(ceph_hooks, 'get_rbd_features', return_value=None) + # Provide multiple local addresses, + # we'll check that the right (second) one is used @patch.object(ceph_hooks, 'get_ipv6_addr', - lambda **kwargs: ["2a01:348:2f4:0:685e:5748:ae62:209f"]) + lambda **kwargs: ["2a01:348:2f4:0:bad:bad:bad:bad", + "2a01:348:2f4:0:685e:5748:ae62:209f"]) + @patch.object(ceph_hooks, 'get_public_addr', + lambda *args: "2a01:348:2f4:0:685e:5748:ae62:209f") + @patch.object(ceph_hooks, 'get_cluster_addr', + lambda *args: "2a01:348:2f4:0:685e:5748:ae62:209f") @patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1) @patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['2a01:348:2f4:0:685e:5748:ae62:209f', From 693c9f08af3ace53116051b9648e8982a8642847 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Thu, 11 Apr 2024 13:37:19 -0300 Subject: [PATCH 2619/2699] Implement key rotation for OSD This patchset implements key rotation for OSD's. It does so by receiving an update in the relation data bag from ceph-mons (where the actions is started), which informs the OSD units for which OSD the key needs to be rotated and the key itself. The OSD units then check if they are managing the ID specified, and if so, proceed to rotate the key. Change-Id: I382a0a657b31c172a036ce7ca62facbbce32b4a0 --- ceph-osd/hooks/ceph_hooks.py | 14 ++++++++++++++ ceph-osd/hooks/utils.py | 23 +++++++++++++++++++---- ceph-osd/unit_tests/test_ceph_hooks.py | 12 ++++++++++++ ceph-osd/unit_tests/test_ceph_utils.py | 12 ++++++++++++ 4 files changed, 57 insertions(+), 4 deletions(-) diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index 993166a7..174e0f45 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -85,6 +85,7 @@ import_osd_upgrade_key, import_osd_removal_key, import_client_crash_key, + import_pending_key, get_host_ip, get_networks, assert_charm_supports_ipv6, @@ -738,8 +739,21 @@ def get_bdev_enable_discard(): "bdev-enable-discard: %s") % bdev_enable_discard) +def handle_pending_key(pending_key): + for osd_id, key in json.loads(pending_key).items(): + if not os.path.exists('/var/lib/ceph/osd/ceph-%s' % osd_id): + continue + import_pending_key(key, osd_id) + service_restart('ceph-osd@%s' % osd_id) + + @hooks.hook('mon-relation-changed') def mon_relation(): + pending_key = relation_get('pending_key') + if pending_key: + handle_pending_key(pending_key) + return + bootstrap_key = relation_get('osd_bootstrap_key') upgrade_key = relation_get('osd_upgrade_key') removal_key = relation_get('osd_disk_removal_key') diff --git a/ceph-osd/hooks/utils.py b/ceph-osd/hooks/utils.py index a86b99eb..b293e6a0 100644 --- a/ceph-osd/hooks/utils.py +++ b/ceph-osd/hooks/utils.py @@ -85,15 +85,17 @@ def is_osd_bootstrap_ready(): return os.path.exists(_bootstrap_keyring) -def _import_key(key, path, name): - if not os.path.exists(path): +def _import_key(key, path, name, override=False): + exists = os.path.exists(path) + if not exists or override: + create = ['--create-keyring'] if not exists else [] cmd = [ 'sudo', '-u', ceph.ceph_user(), 'ceph-authtool', - path, - '--create-keyring', + path + ] + create + [ '--name={}'.format(name), '--add-key={}'.format(key) ] @@ -140,6 +142,19 @@ def import_client_crash_key(key): _import_key(key, _client_crash_keyring, 'client.crash') +def import_pending_key(key, osd_id): + """ + Import a pending key, used for key rotation. + + :param key: The pending cephx key that will replace the current one. + :type key: str + :param osd_id: The OSD id whose key will be replaced. + :type osd_id: str + :raises: subprocess.CalledProcessError""" + _import_key(key, '/var/lib/ceph/osd/ceph-%s/keyring' % osd_id, + 'osd.%s' % osd_id, override=True) + + def render_template(template_name, context, template_dir=TEMPLATES_DIR): """Render Jinja2 template. diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index b4cceea0..3d272789 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -835,6 +835,18 @@ def config_func(k): level=ceph_hooks.ERROR, ) + @patch.object(ceph_hooks, 'service_restart') + @patch.object(ceph_hooks, 'import_pending_key') + @patch.object(ceph_hooks.os.path, 'exists') + def test_handle_pending_key(self, exists, import_pending_key, + service_restart): + exists.return_value = True + pending_key = '{"0":"some-key"}' + ceph_hooks.handle_pending_key(pending_key) + exists.assert_called_with('/var/lib/ceph/osd/ceph-0') + import_pending_key.assert_called_with('some-key', '0') + service_restart.assert_called_with('ceph-osd@0') + @patch.object(ceph_hooks, 'local_unit') @patch.object(ceph_hooks, 'relation_get') diff --git a/ceph-osd/unit_tests/test_ceph_utils.py b/ceph-osd/unit_tests/test_ceph_utils.py index f0fbabd6..2ae572aa 100644 --- a/ceph-osd/unit_tests/test_ceph_utils.py +++ b/ceph-osd/unit_tests/test_ceph_utils.py @@ -352,3 +352,15 @@ def test_parent_device(self, check_output): }] }''' self.assertEqual(utils.get_parent_device('/dev/loop1p1'), '/dev/loop1') + + @patch.object(utils.ceph, 'ceph_user') + @patch.object(utils.subprocess, 'check_call') + @patch.object(utils.os.path, 'exists') + def test_import_pending_key(self, exists, check_call, ceph_user): + ceph_user.return_value = 'ceph' + exists.return_value = True + utils.import_pending_key('some-key', '0') + exists.assert_called_with('/var/lib/ceph/osd/ceph-0/keyring') + check_call.assert_called_with(['sudo', '-u', 'ceph', 'ceph-authtool', + '/var/lib/ceph/osd/ceph-0/keyring', + '--name=osd.0', '--add-key=some-key']) From a3512b3c22fa4e1a81b79ff0653288d6d4efd8f7 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 9 May 2024 14:55:02 +0200 Subject: [PATCH 2620/2699] Update for Ceph Squid Change default origin to caracal to pick Squid packages. Add noble bundle, remove old focal bundles and update associated test configuration files. Change-Id: I3f09703a25b364f974b5fc10c61662d4a5c0af56 --- ceph-mon/.zuul.yaml | 1 - ceph-mon/config.yaml | 2 +- ceph-mon/osci.yaml | 13 - ceph-mon/tests/bundles/local-focal-yoga.yaml | 262 ------------------ .../{focal-yoga.yaml => noble-caracal.yaml} | 56 ++-- ceph-mon/tests/tests.yaml | 5 +- 6 files changed, 30 insertions(+), 309 deletions(-) delete mode 100644 ceph-mon/tests/bundles/local-focal-yoga.yaml rename ceph-mon/tests/bundles/{focal-yoga.yaml => noble-caracal.yaml} (89%) diff --git a/ceph-mon/.zuul.yaml b/ceph-mon/.zuul.yaml index 69974080..fd20909e 100644 --- a/ceph-mon/.zuul.yaml +++ b/ceph-mon/.zuul.yaml @@ -1,5 +1,4 @@ - project: templates: - - openstack-python3-charm-yoga-jobs - openstack-python3-charm-jobs - openstack-cover-jobs diff --git a/ceph-mon/config.yaml b/ceph-mon/config.yaml index 23dff176..9be01566 100644 --- a/ceph-mon/config.yaml +++ b/ceph-mon/config.yaml @@ -10,7 +10,7 @@ options: If set to True, supporting services will log to syslog. source: type: string - default: bobcat + default: caracal description: | Optional configuration to support use of additional sources such as: . diff --git a/ceph-mon/osci.yaml b/ceph-mon/osci.yaml index d1262adf..c02b3896 100644 --- a/ceph-mon/osci.yaml +++ b/ceph-mon/osci.yaml @@ -1,6 +1,5 @@ - project: templates: - - charm-unit-jobs-py38 - charm-unit-jobs-py310 - charm-functional-jobs vars: @@ -8,15 +7,3 @@ charm_build_name: ceph-mon build_type: charmcraft charmcraft_channel: 2.x/stable - check: - jobs: - - new-install-focal-yoga -- job: - name: new-install-focal-yoga - parent: func-target - dependencies: - - osci-lint - - charm-build - - tox-py38 - vars: - tox_extra_args: '-- install:local-focal-yoga' diff --git a/ceph-mon/tests/bundles/local-focal-yoga.yaml b/ceph-mon/tests/bundles/local-focal-yoga.yaml deleted file mode 100644 index 98c46c8b..00000000 --- a/ceph-mon/tests/bundles/local-focal-yoga.yaml +++ /dev/null @@ -1,262 +0,0 @@ -variables: - openstack-origin: &openstack-origin cloud:focal-yoga - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - glance-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - placement-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: 8.0.19/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - source: *openstack-origin - osd-devices: '/dev/test-non-existent' - to: - - '3' - - '4' - - '5' - channel: quincy/edge - - ceph-mon: - charm: ../../ceph-mon.charm - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - to: - - '6' - - '7' - - '8' - - ceph-fs: - charm: ch:ceph-fs - num_units: 1 - options: - source: *openstack-origin - channel: quincy/edge - to: - - '17' - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - channel: 3.9/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - channel: yoga/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - libvirt-image-backend: rbd - to: - - '11' - channel: yoga/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - channel: yoga/edge - - cinder: - expose: True - charm: ch:cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: *openstack-origin - to: - - '13' - channel: yoga/edge - - cinder-ceph: - charm: ch:cinder-ceph - channel: yoga/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - channel: yoga/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: yoga/edge - - prometheus2: - charm: ch:prometheus2 - num_units: 1 - to: - - '16' - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - nova-compute:ceph-access - - cinder-ceph:ceph-access - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'ceph-mon:prometheus' - - 'prometheus2:target' diff --git a/ceph-mon/tests/bundles/focal-yoga.yaml b/ceph-mon/tests/bundles/noble-caracal.yaml similarity index 89% rename from ceph-mon/tests/bundles/focal-yoga.yaml rename to ceph-mon/tests/bundles/noble-caracal.yaml index bb475bc1..b0b80b50 100644 --- a/ceph-mon/tests/bundles/focal-yoga.yaml +++ b/ceph-mon/tests/bundles/noble-caracal.yaml @@ -1,7 +1,9 @@ variables: - openstack-origin: &openstack-origin cloud:focal-yoga + openstack-origin: &openstack-origin distro -series: focal +local_overlay_enabled: False + +series: noble comment: - 'machines section to decide order of deployment. database sooner = faster' @@ -33,30 +35,35 @@ applications: keystone-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge cinder-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge nova-cloud-controller-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge placement-mysql-router: charm: ch:mysql-router - channel: 8.0.19/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster num_units: 3 - options: - source: *openstack-origin to: - '0' - '1' - '2' - channel: 8.0.19/edge + channel: 8.0/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + to: + - '9' + channel: 3.9/edge ceph-osd: charm: ch:ceph-osd @@ -70,11 +77,11 @@ applications: - '3' - '4' - '5' - channel: quincy/edge + channel: latest/edge ceph-mon: charm: ch:ceph-mon - channel: quincy/edge + channel: latest/edge num_units: 3 options: source: *openstack-origin @@ -89,19 +96,10 @@ applications: num_units: 1 options: source: *openstack-origin - channel: quincy/edge + channel: latest/edge to: - '17' - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - channel: 3.9/edge - keystone: expose: True charm: ch:keystone @@ -110,7 +108,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: yoga/edge + channel: latest/edge nova-compute: charm: ch:nova-compute @@ -120,7 +118,7 @@ applications: libvirt-image-backend: rbd to: - '11' - channel: yoga/edge + channel: latest/edge glance: expose: True @@ -130,7 +128,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: yoga/edge + channel: latest/edge cinder: expose: True @@ -142,11 +140,11 @@ applications: openstack-origin: *openstack-origin to: - '13' - channel: yoga/edge + channel: latest/edge cinder-ceph: charm: ch:cinder-ceph - channel: yoga/edge + channel: latest/edge nova-cloud-controller: expose: True @@ -156,7 +154,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: yoga/edge + channel: latest/edge placement: charm: ch:placement @@ -165,7 +163,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: yoga/edge + channel: latest/edge prometheus2: charm: ch:prometheus2 diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index af26df09..08d034d3 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,14 +1,13 @@ charm_name: ceph-mon gate_bundles: - - focal-xena - - focal-yoga - jammy-yoga - jammy-bobcat - jammy-caracal + - noble-caracal smoke_bundles: - - focal-yoga + - jammy-caracal configure: - install: From 7f750c93b4934801d3ab50ad1679702c5d873951 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 10 May 2024 11:13:34 +0200 Subject: [PATCH 2621/2699] Fixes for ceph squid Make caracal the default source. Add python3-packaging package as a tactical fix for bug #2064717. Add a local caracal functest. func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1209 Change-Id: I1328d2c6221c77bfe0f2420af770cc5064394c57 --- ceph-osd/.zuul.yaml | 1 - ceph-osd/config.yaml | 2 +- ceph-osd/hooks/ceph_hooks.py | 6 +++++- ceph-osd/osci.yaml | 6 +++--- ...my-antelope.yaml => local-jammy-caracal.yaml} | 16 ++++++++-------- ceph-osd/tests/tests.yaml | 4 +--- 6 files changed, 18 insertions(+), 17 deletions(-) rename ceph-osd/tests/bundles/{local-jammy-antelope.yaml => local-jammy-caracal.yaml} (94%) diff --git a/ceph-osd/.zuul.yaml b/ceph-osd/.zuul.yaml index 69974080..fd20909e 100644 --- a/ceph-osd/.zuul.yaml +++ b/ceph-osd/.zuul.yaml @@ -1,5 +1,4 @@ - project: templates: - - openstack-python3-charm-yoga-jobs - openstack-python3-charm-jobs - openstack-cover-jobs diff --git a/ceph-osd/config.yaml b/ceph-osd/config.yaml index 7d6954bb..838261fb 100644 --- a/ceph-osd/config.yaml +++ b/ceph-osd/config.yaml @@ -5,7 +5,7 @@ options: description: OSD debug level. Max is 20. source: type: string - default: bobcat + default: caracal description: | Optional configuration to support use of additional sources such as: . diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index ae5815a5..d37c4d16 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -321,7 +321,11 @@ def install_udev_rules(): def install(): add_source(config('source'), config('key')) apt_update(fatal=True) - apt_install(packages=ceph.determine_packages(), fatal=True) + packages = ceph.determine_packages() + # TODO(chrome0): temp. fix for bug #2064717; remove once this has been + # fixed + packages.append('python3-packaging') + apt_install(packages=packages, fatal=True) if config('autotune'): log('The autotune config is deprecated and planned ' 'for removal in the next release.', level=WARNING) diff --git a/ceph-osd/osci.yaml b/ceph-osd/osci.yaml index d78147f4..26d8e8b8 100644 --- a/ceph-osd/osci.yaml +++ b/ceph-osd/osci.yaml @@ -10,13 +10,13 @@ charmcraft_channel: 2.x/stable check: jobs: - - new-install-jammy-antelope + - new-install-jammy-caracal - job: - name: new-install-jammy-antelope + name: new-install-jammy-caracal parent: func-target dependencies: - osci-lint - charm-build - tox-py38 vars: - tox_extra_args: '-- install:local-jammy-antelope' + tox_extra_args: '-- install:local-jammy-caracal' diff --git a/ceph-osd/tests/bundles/local-jammy-antelope.yaml b/ceph-osd/tests/bundles/local-jammy-caracal.yaml similarity index 94% rename from ceph-osd/tests/bundles/local-jammy-antelope.yaml rename to ceph-osd/tests/bundles/local-jammy-caracal.yaml index 48a8b36a..a7f59e04 100644 --- a/ceph-osd/tests/bundles/local-jammy-antelope.yaml +++ b/ceph-osd/tests/bundles/local-jammy-caracal.yaml @@ -1,5 +1,5 @@ variables: - openstack-origin: &openstack-origin cloud:jammy-antelope + openstack-origin: &openstack-origin cloud:jammy-caracal series: jammy @@ -94,7 +94,7 @@ applications: openstack-origin: *openstack-origin to: - '10' - channel: 2023.1/edge + channel: latest/edge nova-compute: charm: ch:nova-compute @@ -103,7 +103,7 @@ applications: openstack-origin: *openstack-origin to: - '11' - channel: 2023.1/edge + channel: latest/edge glance: expose: True @@ -113,7 +113,7 @@ applications: openstack-origin: *openstack-origin to: - '12' - channel: 2023.1/edge + channel: latest/edge cinder: expose: True @@ -125,11 +125,11 @@ applications: glance-api-version: '2' to: - '13' - channel: 2023.1/edge + channel: latest/edge cinder-ceph: charm: ch:cinder-ceph - channel: 2023.1/edge + channel: latest/edge nova-cloud-controller: expose: True @@ -139,7 +139,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: 2023.1/edge + channel: latest/edge placement: charm: ch:placement @@ -148,7 +148,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: 2023.1/edge + channel: latest/edge relations: - - 'nova-compute:amqp' diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index 7a5802ab..ec5877dd 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,14 +1,12 @@ charm_name: ceph-osd gate_bundles: - - focal-xena - - focal-yoga - jammy-yoga - jammy-bobcat - jammy-caracal smoke_bundles: - - jammy-antelope + - jammy-caracal configure: - install: From c3c2f06a117192ef82c1bea9c29d4f31dfd8d653 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Fri, 17 May 2024 12:46:00 -0300 Subject: [PATCH 2622/2699] Caracal-Squid enablement for ceph-iscsi Change-Id: Iba3a572d7429d8fa62bf52b4e5393a765efdfbc0 --- ceph-iscsi/charmcraft.yaml | 3 --- ceph-iscsi/metadata.yaml | 1 - ceph-iscsi/tests/bundles/focal-ec.yaml | 4 ++-- ceph-iscsi/tests/bundles/focal.yaml | 4 ++-- ceph-iscsi/tests/bundles/jammy-ec.yaml | 4 ++-- ceph-iscsi/tests/bundles/jammy-reef-ec.yaml | 4 ++-- ceph-iscsi/tests/bundles/jammy-reef.yaml | 4 ++-- ceph-iscsi/tests/bundles/jammy.yaml | 4 ++-- 8 files changed, 12 insertions(+), 16 deletions(-) diff --git a/ceph-iscsi/charmcraft.yaml b/ceph-iscsi/charmcraft.yaml index 52f92540..2edbf324 100644 --- a/ceph-iscsi/charmcraft.yaml +++ b/ceph-iscsi/charmcraft.yaml @@ -36,6 +36,3 @@ bases: - name: ubuntu channel: "22.10" architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.04" - architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-iscsi/metadata.yaml b/ceph-iscsi/metadata.yaml index 452d26e7..e98e9c21 100644 --- a/ceph-iscsi/metadata.yaml +++ b/ceph-iscsi/metadata.yaml @@ -13,7 +13,6 @@ tags: series: - focal - jammy -- lunar subordinate: false min-juju-version: 2.7.6 extra-bindings: diff --git a/ceph-iscsi/tests/bundles/focal-ec.yaml b/ceph-iscsi/tests/bundles/focal-ec.yaml index f27597f1..d72f97a1 100644 --- a/ceph-iscsi/tests/bundles/focal-ec.yaml +++ b/ceph-iscsi/tests/bundles/focal-ec.yaml @@ -58,7 +58,7 @@ applications: - '11' - '12' - '13' - channel: latest/edge + channel: quincy/edge ceph-mon: charm: ch:ceph-mon num_units: 3 @@ -69,7 +69,7 @@ applications: - '3' - '4' - '5' - channel: latest/edge + channel: quincy/edge vault: num_units: 1 charm: ch:vault diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml index 951cfaa8..cab5a7d8 100644 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ b/ceph-iscsi/tests/bundles/focal.yaml @@ -55,7 +55,7 @@ applications: - '11' - '12' - '13' - channel: latest/edge + channel: quincy/edge ceph-mon: charm: ch:ceph-mon num_units: 3 @@ -66,7 +66,7 @@ applications: - '3' - '4' - '5' - channel: latest/edge + channel: quincy/edge vault: num_units: 1 charm: ch:vault diff --git a/ceph-iscsi/tests/bundles/jammy-ec.yaml b/ceph-iscsi/tests/bundles/jammy-ec.yaml index 9de7d4fe..3bcc745a 100644 --- a/ceph-iscsi/tests/bundles/jammy-ec.yaml +++ b/ceph-iscsi/tests/bundles/jammy-ec.yaml @@ -58,7 +58,7 @@ applications: - '11' - '12' - '13' - channel: latest/edge + channel: quincy/edge ceph-mon: charm: ch:ceph-mon num_units: 3 @@ -69,7 +69,7 @@ applications: - '3' - '4' - '5' - channel: latest/edge + channel: quincy/edge vault: num_units: 1 charm: ch:vault diff --git a/ceph-iscsi/tests/bundles/jammy-reef-ec.yaml b/ceph-iscsi/tests/bundles/jammy-reef-ec.yaml index 646f8504..a611c260 100644 --- a/ceph-iscsi/tests/bundles/jammy-reef-ec.yaml +++ b/ceph-iscsi/tests/bundles/jammy-reef-ec.yaml @@ -58,7 +58,7 @@ applications: - '11' - '12' - '13' - channel: latest/edge + channel: reef/edge ceph-mon: charm: ch:ceph-mon num_units: 3 @@ -69,7 +69,7 @@ applications: - '3' - '4' - '5' - channel: latest/edge + channel: reef/edge vault: num_units: 1 charm: ch:vault diff --git a/ceph-iscsi/tests/bundles/jammy-reef.yaml b/ceph-iscsi/tests/bundles/jammy-reef.yaml index 70d043da..fafd0467 100644 --- a/ceph-iscsi/tests/bundles/jammy-reef.yaml +++ b/ceph-iscsi/tests/bundles/jammy-reef.yaml @@ -59,7 +59,7 @@ applications: - '11' - '12' - '13' - channel: latest/edge + channel: reef/edge ceph-mon: charm: ch:ceph-mon num_units: 3 @@ -70,7 +70,7 @@ applications: - '3' - '4' - '5' - channel: latest/edge + channel: reef/edge vault: num_units: 1 charm: ch:vault diff --git a/ceph-iscsi/tests/bundles/jammy.yaml b/ceph-iscsi/tests/bundles/jammy.yaml index 4c26c99a..0a63de9f 100644 --- a/ceph-iscsi/tests/bundles/jammy.yaml +++ b/ceph-iscsi/tests/bundles/jammy.yaml @@ -59,7 +59,7 @@ applications: - '11' - '12' - '13' - channel: latest/edge + channel: quincy/edge ceph-mon: charm: ch:ceph-mon num_units: 3 @@ -70,7 +70,7 @@ applications: - '3' - '4' - '5' - channel: latest/edge + channel: quincy/edge vault: num_units: 1 charm: ch:vault From 81a612fe97f39955758a9839d7cb59bfdf8c1dde Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Juli=C3=A1n=20Espina?= Date: Fri, 3 May 2024 18:00:57 -0600 Subject: [PATCH 2623/2699] feat: implement support for the `cephfs-share` relation This allows using the `cephfs-client` charm to connect to the CephFS share without having to do a manual setup. Change-Id: I1823dc0624c66232cd282ed098c7a58ea7456fa8 --- ceph-fs/.gitignore | 2 +- .../interfaces/cephfs_share/interface.yaml | 3 + ceph-fs/interfaces/cephfs_share/provides.py | 99 +++++++++++++++++++ ceph-fs/src/layer.yaml | 2 +- ceph-fs/src/metadata.yaml | 4 + ceph-fs/src/reactive/ceph_fs.py | 38 ++++++- ceph-fs/unit_tests/test_reactive_ceph_fs.py | 75 +++++++++++++- 7 files changed, 216 insertions(+), 7 deletions(-) create mode 100644 ceph-fs/interfaces/cephfs_share/interface.yaml create mode 100644 ceph-fs/interfaces/cephfs_share/provides.py diff --git a/ceph-fs/.gitignore b/ceph-fs/.gitignore index 231d85f4..3c71ec16 100644 --- a/ceph-fs/.gitignore +++ b/ceph-fs/.gitignore @@ -1,10 +1,10 @@ build .tox layers -interfaces .testrepository __pycache__ *.pyc .idea .stestr *.charm +.vscode diff --git a/ceph-fs/interfaces/cephfs_share/interface.yaml b/ceph-fs/interfaces/cephfs_share/interface.yaml new file mode 100644 index 00000000..f02fe2df --- /dev/null +++ b/ceph-fs/interfaces/cephfs_share/interface.yaml @@ -0,0 +1,3 @@ +name: cephfs_share +summary: CephFS Share provider interface +version: 1 \ No newline at end of file diff --git a/ceph-fs/interfaces/cephfs_share/provides.py b/ceph-fs/interfaces/cephfs_share/provides.py new file mode 100644 index 00000000..3ae7d8fc --- /dev/null +++ b/ceph-fs/interfaces/cephfs_share/provides.py @@ -0,0 +1,99 @@ +from charms.reactive import scopes, when, set_flag, clear_flag +from charms.reactive.endpoints import Endpoint + +from charmhelpers.core import hookenv + +from typing import Iterable, Dict, Set + +import json + +class _Transaction: + """Store transaction information between data mappings.""" + + def __init__(self, added: Set, changed: Set, deleted: Set): + self.added: Set = added + self.changed: Set = changed + self.deleted: Set = deleted + +def _eval(relation) -> _Transaction: + """Evaluate the difference between data in an integration changed databag. + + Args: + relation: Relation with the written data. + + Returns: + _Transaction: + Transaction info containing the added, deleted, and changed + keys from the relation databag. + """ + # Retrieve the old data from the data key in the unit databag. + old_data = json.loads(relation.to_publish_raw.get("cache", "{}")) + # Retrieve the new data from the relation integration databag. + new_data = { + key: value for key, value in relation.received_app.items() if key != "cache" + } + # These are the keys that were added to the databag and triggered this event. + added = new_data.keys() - old_data.keys() + # These are the keys that were removed from the databag and triggered this event. + deleted = old_data.keys() - new_data.keys() + # These are the keys that were added or already existed in the databag, but had their values changed. + changed = added.union( + {key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key]} + ) + # Convert the new_data to a serializable format and save it for a next diff check. + relation.to_publish_raw.update({ + "cache": json.dumps(new_data) + }) + + # Return the transaction with all possible changes. + return _Transaction(added, changed, deleted) + +class CephFSProvides(Endpoint): + + @when('endpoint.{endpoint_name}.changed') + def changed(self): + if hookenv.is_leader(): + for relation in self.relations: + transaction = _eval(relation) + if "name" in transaction.added: + set_flag(self.expand_name('{endpoint_name}.available')) + + def manage_flags(self): + if not self.is_joined: + clear_flag( + self.expand_name('{endpoint_name}.available') + ) + + def set_share(self, share_info: Dict, auth_info: Dict) -> None: + """Set info for mounting a CephFS share. + + Args: + relation: + share_info: Dictionary with the information required to mount the CephFS share. + - fsid: ID of the Ceph cluster. + - name: Name of the exported Ceph filesystem. + - path: Exported path of the Ceph filesystem. + - monitor_hosts: Address list of the available Ceph MON nodes. + auth_info: Dictionary with the information required to authenticate against the Ceph cluster. + - username: Name of the user authorized to access the Ceph filesystem. + - key: Cephx key for the authorized user. + + Notes: + Only the application leader unit can set the CephFS share data. + """ + if hookenv.is_leader(): + share_info = json.dumps({ + 'fsid': share_info['fsid'], + 'name': share_info['name'], + 'path': share_info['path'], + 'monitor_hosts': share_info['monitor_hosts'] + }) + auth_info = json.dumps({ + 'username': auth_info['username'], + 'key': auth_info['key'] + }) + for relation in self.relations: + relation.to_publish_app_raw.update({ + "share_info": share_info, + "auth": f"plain:{auth_info}", + }) diff --git a/ceph-fs/src/layer.yaml b/ceph-fs/src/layer.yaml index ae53d22d..146b2947 100644 --- a/ceph-fs/src/layer.yaml +++ b/ceph-fs/src/layer.yaml @@ -1,4 +1,4 @@ -includes: ['layer:ceph', 'interface:ceph-mds'] +includes: ['layer:ceph', 'interface:ceph-mds', 'interface:cephfs_share'] options: basic: use_venv: True diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index e20f31f3..19cb99a8 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -19,5 +19,9 @@ subordinate: false requires: ceph-mds: interface: ceph-mds +provides: + cephfs-share: + interface: cephfs_share + extra-bindings: public: diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index 8dc98980..2059a2d2 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -17,8 +17,10 @@ import charmhelpers.core as ch_core from charmhelpers.core.hookenv import ( - service_name, + service_name, application_name, + is_leader, config) +from charmhelpers.contrib.storage.linux import ceph import charms_openstack.bus import charms_openstack.charm as charm @@ -205,3 +207,37 @@ def storage_ceph_connected(ceph): weight=metadata_weight, app_name=ceph_mds.ceph_pool_app_name) ceph_mds.request_cephfs(service, extra_pools=extra_pools) + # Must have a current request thanks to the call above + rq = ceph_mds.get_current_request() + rq.add_op({ + 'op': 'create-cephfs-client', + 'fs_name': service, + 'client_id': '{}-client'.format(service), + 'path': "/", + 'perms': 'rw', + }) + ceph_mds.send_request_if_needed(rq) + + +@reactive.when_none('charm.paused', 'run-default-update-status') +@reactive.when('cephfs.configured', 'ceph-mds.pools.available', + 'cephfs-share.available') +def cephfs_share_available(): + cephfs_share = reactive.endpoint_from_flag('cephfs-share.available') + ceph_mds = reactive.endpoint_from_flag('ceph-mds.pools.available') + service = application_name() + if is_leader(): + response_key = ceph.get_broker_rsp_key() + # After the `create-cephfs-client` request completes, the + # databag must contain the generated key for that user. + key = ceph_mds.all_joined_units.received[response_key]["key"] + + cephfs_share.set_share(share_info={ + "fsid": ceph_mds.fsid, + "name": service, + "path": "/", + "monitor_hosts": ceph_mds.mon_hosts(), + }, auth_info={ + "username": '{}-client'.format(service), + "key": key + }) diff --git a/ceph-fs/unit_tests/test_reactive_ceph_fs.py b/ceph-fs/unit_tests/test_reactive_ceph_fs.py index 2840376b..4a8fce14 100644 --- a/ceph-fs/unit_tests/test_reactive_ceph_fs.py +++ b/ceph-fs/unit_tests/test_reactive_ceph_fs.py @@ -32,12 +32,27 @@ def test_hooks(self): ] hook_set = { 'when': { - 'config_changed': ('ceph-mds.pools.available',), - 'storage_ceph_connected': ('ceph-mds.connected',), + 'config_changed': ( + 'ceph-mds.pools.available', + ), + 'storage_ceph_connected': ( + 'ceph-mds.connected', + ), + 'cephfs_share_available': ( + 'cephfs.configured', + 'ceph-mds.pools.available', + 'cephfs-share.available', + ), }, 'when_none': { - 'config_changed': ('charm.paused', - 'run-default-update-status',), + 'config_changed': ( + 'charm.paused', + 'run-default-update-status', + ), + 'cephfs_share_available': ( + 'charm.paused', + 'run-default-update-status', + ), }, } # test that the hooks were registered via the reactive.ceph_fs module @@ -83,3 +98,55 @@ def test_config_changed(self): handlers.config_changed() self.target.install.assert_called_once_with() self.target.upgrade_if_available.assert_called_once_with([ceph_mds]) + + def test_cephfs_share_available(self): + self.patch_object(handlers.reactive, 'endpoint_from_flag') + handlers.ch_core.hookenv.application_name.return_value = "ceph-fs" + handlers.ceph.get_broker_rsp_key.return_value = 'broker-rsp-ceph-fs-0' + + ceph_mds = mock.MagicMock() + ceph_mds.fsid = "354ca7c4-f10d-11ee-93f8-1f85f87b7845" + ceph_mds.mon_hosts.return_value = [ + "10.5.0.80:6789", "10.5.2.23:6789", "10.5.2.17:6789"] + ceph_mds.all_joined_units.received = { + "auth": "cephx", + "broker-rsp-ceph-fs-0": { + "exit-code": 0, + "key": "AQDvOE5mUfBIKxAAYT73/v7NzwWx2ovLB4nnOg==", + "request-id": "22dd9c7d8c7d392d44866b35219a654006fd90f0"}, + "ceph-public-address": "10.143.60.15", + "fsid": "354ca7c4-f10d-11ee-93f8-1f85f87b7845", + "juju-2ffa43-1_mds_key": + "AQDwOE5mmkQ1LBAAVrx4OXWwWM+XmK/KjnJcdA==", + } + + cephfs_share = mock.MagicMock() + + def mock_eff(flag): + if flag == "ceph-mds.pools.available": + return ceph_mds + elif flag == "cephfs-share.available": + return cephfs_share + else: + raise Exception("invalid input") + + self.endpoint_from_flag.side_effect = mock_eff + + handlers.cephfs_share_available() + + cephfs_share.set_share.assert_called_once_with( + share_info={ + "fsid": "354ca7c4-f10d-11ee-93f8-1f85f87b7845", + "name": "ceph-fs", + "path": "/", + "monitor_hosts": [ + "10.5.0.80:6789", + "10.5.2.23:6789", + "10.5.2.17:6789" + ], + }, + auth_info={ + "username": "ceph-fs-client", + "key": "AQDvOE5mUfBIKxAAYT73/v7NzwWx2ovLB4nnOg==" + } + ) From 7cc17128fc08046ce6fec339238e972a51655cae Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Thu, 2 May 2024 14:44:39 -0300 Subject: [PATCH 2624/2699] Caracal-Squid enablement for ceph-dashboard Change-Id: Iae146f2772ce80e541966791f05e979578f62e86 --- ceph-dashboard/charmcraft.yaml | 6 - ceph-dashboard/metadata.yaml | 2 - .../tests/bundles/jammy-bobcat.yaml | 10 +- ...{mantic-bobcat.yaml => jammy-caracal.yaml} | 22 ++-- .../tests/bundles/lunar-antelope.yaml | 114 ------------------ ceph-dashboard/tests/tests.yaml | 5 +- 6 files changed, 19 insertions(+), 140 deletions(-) rename ceph-dashboard/tests/bundles/{mantic-bobcat.yaml => jammy-caracal.yaml} (89%) delete mode 100644 ceph-dashboard/tests/bundles/lunar-antelope.yaml diff --git a/ceph-dashboard/charmcraft.yaml b/ceph-dashboard/charmcraft.yaml index 0b55422f..bea920df 100644 --- a/ceph-dashboard/charmcraft.yaml +++ b/ceph-dashboard/charmcraft.yaml @@ -32,9 +32,3 @@ bases: - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.10" - architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-dashboard/metadata.yaml b/ceph-dashboard/metadata.yaml index 2311a695..bea2d947 100644 --- a/ceph-dashboard/metadata.yaml +++ b/ceph-dashboard/metadata.yaml @@ -17,8 +17,6 @@ subordinate: true series: - focal - jammy -- lunar -- mantic requires: dashboard: interface: ceph-dashboard diff --git a/ceph-dashboard/tests/bundles/jammy-bobcat.yaml b/ceph-dashboard/tests/bundles/jammy-bobcat.yaml index fcdd5727..35bb4fe2 100644 --- a/ceph-dashboard/tests/bundles/jammy-bobcat.yaml +++ b/ceph-dashboard/tests/bundles/jammy-bobcat.yaml @@ -10,13 +10,13 @@ applications: osd-devices: 'cinder,10G,2' options: osd-devices: '/dev/test-non-existent' - channel: latest/edge + channel: reef/edge ceph-mon: charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' - channel: latest/edge + channel: reef/edge vault: num_units: 1 charm: ch:vault @@ -59,17 +59,17 @@ applications: ceph-radosgw: charm: ch:ceph-radosgw num_units: 3 - channel: latest/edge + channel: reef/edge ceph-fs: charm: ch:ceph-fs num_units: 1 - channel: latest/edge + channel: reef/edge ceph-iscsi: charm: ch:ceph-iscsi num_units: 2 options: gateway-metadata-pool: iscsi-foo-metadata - channel: latest/edge + channel: reef/edge relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' diff --git a/ceph-dashboard/tests/bundles/mantic-bobcat.yaml b/ceph-dashboard/tests/bundles/jammy-caracal.yaml similarity index 89% rename from ceph-dashboard/tests/bundles/mantic-bobcat.yaml rename to ceph-dashboard/tests/bundles/jammy-caracal.yaml index 70e356e5..1ce3e1e1 100644 --- a/ceph-dashboard/tests/bundles/mantic-bobcat.yaml +++ b/ceph-dashboard/tests/bundles/jammy-caracal.yaml @@ -1,22 +1,23 @@ local_overlay_enabled: False -series: mantic +series: jammy variables: + openstack-origin: &openstack-origin cloud:jammy-caracal source: &source distro applications: ceph-osd: charm: ch:ceph-osd - series: mantic num_units: 6 storage: osd-devices: 'cinder,10G,2' options: + source: *openstack-origin osd-devices: '/dev/test-non-existent' channel: latest/edge ceph-mon: charm: ch:ceph-mon - series: mantic num_units: 3 options: + source: *openstack-origin monitor-count: '3' channel: latest/edge vault: @@ -27,6 +28,8 @@ applications: charm: ch:mysql-innodb-cluster constraints: mem=3072M num_units: 3 + options: + source: *openstack-origin channel: latest/edge vault-mysql-router: charm: ch:mysql-router @@ -38,37 +41,38 @@ applications: prometheus: charm: ch:prometheus2 num_units: 1 - series: focal grafana: # SSL and allow_embedding are not released into cs:grafana yet, due # Octrober 2021 charm: ch:grafana num_units: 1 series: focal + channel: latest/stable options: anonymous: True install_method: snap allow_embedding: True prometheus-alertmanager: + series: focal charm: ch:prometheus-alertmanager num_units: 1 - series: focal ceph-radosgw: charm: ch:ceph-radosgw - series: mantic num_units: 3 + options: + source: *openstack-origin channel: latest/edge ceph-fs: charm: ch:ceph-fs - series: mantic num_units: 1 + options: + source: *openstack-origin channel: latest/edge ceph-iscsi: charm: ch:ceph-iscsi - # ceph-iscsi is deprecated therefore using older series. - series: jammy num_units: 2 options: + source: *openstack-origin gateway-metadata-pool: iscsi-foo-metadata channel: latest/edge relations: diff --git a/ceph-dashboard/tests/bundles/lunar-antelope.yaml b/ceph-dashboard/tests/bundles/lunar-antelope.yaml deleted file mode 100644 index dd2f39a2..00000000 --- a/ceph-dashboard/tests/bundles/lunar-antelope.yaml +++ /dev/null @@ -1,114 +0,0 @@ -local_overlay_enabled: False -series: lunar -variables: - source: &source distro -applications: - ceph-osd: - charm: ch:ceph-osd - series: lunar - num_units: 6 - storage: - osd-devices: 'cinder,10G,2' - options: - osd-devices: '/dev/test-non-existent' - channel: quincy/edge - ceph-mon: - charm: ch:ceph-mon - series: lunar - num_units: 3 - options: - monitor-count: '3' - channel: quincy/edge - vault: - num_units: 1 - charm: ch:vault - channel: 1.8/stable - series: jammy - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - constraints: mem=3072M - num_units: 3 - channel: latest/edge - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge - series: jammy - ceph-dashboard: - charm: ../../ceph-dashboard.charm - options: - public-hostname: 'ceph-dashboard.zaza.local' - prometheus: - charm: ch:prometheus2 - num_units: 1 - series: focal - grafana: - # SSL and allow_embedding are not released into cs:grafana yet, due - # Octrober 2021 - charm: ch:grafana - num_units: 1 - series: focal - channel: latest/stable - options: - anonymous: True - install_method: snap - allow_embedding: True - prometheus-alertmanager: - charm: ch:prometheus-alertmanager - num_units: 1 - series: focal - ceph-radosgw: - charm: ch:ceph-radosgw - series: lunar - num_units: 3 - channel: quincy/edge - ceph-fs: - charm: ch:ceph-fs - series: lunar - num_units: 1 - channel: quincy/edge - ceph-iscsi: - charm: ch:ceph-iscsi - series: lunar - num_units: 2 - options: - gateway-metadata-pool: iscsi-foo-metadata - channel: quincy/edge -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - 'ceph-dashboard:dashboard' - - 'ceph-mon:dashboard' - - - 'ceph-dashboard:certificates' - - 'vault:certificates' - - - 'ceph-mon:prometheus' - - 'prometheus:target' - - - 'grafana:grafana-source' - - 'prometheus:grafana-source' - - - 'grafana:certificates' - - 'vault:certificates' - - - 'ceph-dashboard:grafana-dashboard' - - 'grafana:dashboards' - - - 'ceph-dashboard:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - - 'ceph-dashboard:prometheus' - - 'prometheus:website' - - - 'prometheus:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - 'ceph-radosgw:certificates' - - 'vault:certificates' - - - 'ceph-dashboard:radosgw-dashboard' - - 'ceph-radosgw:radosgw-user' - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-dashboard:iscsi-dashboard' - - 'ceph-iscsi:admin-access' diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml index 11721de7..8852f622 100644 --- a/ceph-dashboard/tests/tests.yaml +++ b/ceph-dashboard/tests/tests.yaml @@ -3,7 +3,7 @@ gate_bundles: - focal - jammy - jammy-bobcat - - mantic-bobcat + - jammy-caracal smoke_bundles: - focal configure: @@ -29,6 +29,3 @@ target_deploy_status: telegraf: workload-status: active workload-status-message-prefix: Monitoring -tests_options: - force_deploy: - - mantic-bobcat From 04d768d4ab0f52cde0c0f7c6857a3101abe58c26 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Thu, 2 May 2024 14:53:35 -0300 Subject: [PATCH 2625/2699] Caracal-Squid enablement for ceph-fs Change-Id: If2190d60c5709c9e2b6117254f9436be95a8a89e --- ceph-fs/charmcraft.yaml | 6 - ceph-fs/src/build.lock | 294 ++++++++++++++++++ ceph-fs/src/metadata.yaml | 2 - ceph-fs/src/tests/bundles/jammy-antelope.yaml | 4 +- ...lunar-antelope.yaml => jammy-caracal.yaml} | 4 +- ceph-fs/src/tests/bundles/mantic-bobcat.yaml | 44 --- 6 files changed, 298 insertions(+), 56 deletions(-) create mode 100644 ceph-fs/src/build.lock rename ceph-fs/src/tests/bundles/{lunar-antelope.yaml => jammy-caracal.yaml} (90%) delete mode 100644 ceph-fs/src/tests/bundles/mantic-bobcat.yaml diff --git a/ceph-fs/charmcraft.yaml b/ceph-fs/charmcraft.yaml index 3683b391..242452c5 100644 --- a/ceph-fs/charmcraft.yaml +++ b/ceph-fs/charmcraft.yaml @@ -33,9 +33,3 @@ bases: - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.10" - architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-fs/src/build.lock b/ceph-fs/src/build.lock new file mode 100644 index 00000000..220ef7cc --- /dev/null +++ b/ceph-fs/src/build.lock @@ -0,0 +1,294 @@ +{ + "locks": [ + { + "type": "layer", + "item": "layer:options", + "url": "https://github.com/juju-solutions/layer-options.git", + "vcs": null, + "branch": "fcdcea4e5de3e1556c24e6704607862d0ba00a56", + "commit": "fcdcea4e5de3e1556c24e6704607862d0ba00a56" + }, + { + "type": "layer", + "item": "layer:basic", + "url": "https://github.com/juju-solutions/layer-basic.git", + "vcs": null, + "branch": "1edd5ccd3fb6240ca734b64a7dae9f1cb30b66f6", + "commit": "1edd5ccd3fb6240ca734b64a7dae9f1cb30b66f6" + }, + { + "type": "layer", + "item": "layer:openstack", + "url": "https://github.com/openstack/charm-layer-openstack", + "vcs": null, + "branch": "7c671b0696977f455616565d956895b2f890464b", + "commit": "7c671b0696977f455616565d956895b2f890464b" + }, + { + "type": "layer", + "item": "layer:openstack-principle", + "url": "https://github.com/openstack/charm-layer-openstack-principle", + "vcs": null, + "branch": "2541283a7f9c3fa923c0fccd7257e217e11dadcd", + "commit": "2541283a7f9c3fa923c0fccd7257e217e11dadcd" + }, + { + "type": "layer", + "item": "layer:openstack-api", + "url": "https://github.com/openstack/charm-layer-openstack-api", + "vcs": null, + "branch": "34311a62e963d0ce903b7ddb9d70b8f071f71651", + "commit": "34311a62e963d0ce903b7ddb9d70b8f071f71651" + }, + { + "type": "layer", + "item": "aodh", + "url": null, + "vcs": null, + "branch": null, + "commit": "" + }, + { + "type": "layer", + "item": "interface:tls-certificates", + "url": "https://github.com/juju-solutions/interface-tls-certificates", + "vcs": null, + "branch": "da891c403864482688ec767a964218e5857f0e49", + "commit": "da891c403864482688ec767a964218e5857f0e49" + }, + { + "type": "layer", + "item": "interface:mysql-shared", + "url": "https://github.com/openstack/charm-interface-mysql-shared", + "vcs": null, + "branch": "f90f8c6edce21e4da3a872f0f9d6761730ce025d", + "commit": "f90f8c6edce21e4da3a872f0f9d6761730ce025d" + }, + { + "type": "layer", + "item": "interface:rabbitmq", + "url": "https://github.com/openstack/charm-interface-rabbitmq", + "vcs": null, + "branch": "383121fc584d2d3bf9d233eba0d3708398a4c468", + "commit": "383121fc584d2d3bf9d233eba0d3708398a4c468" + }, + { + "type": "layer", + "item": "interface:keystone", + "url": "https://github.com/openstack/charm-interface-keystone", + "vcs": null, + "branch": "36ea7b385a569ebabca7184ed4ce8420bb8fa61a", + "commit": "36ea7b385a569ebabca7184ed4ce8420bb8fa61a" + }, + { + "type": "layer", + "item": "interface:hacluster", + "url": "https://github.com/openstack/charm-interface-hacluster.git", + "vcs": null, + "branch": "d91e83a1fa59a13f4160febfe536c68dc762e436", + "commit": "d91e83a1fa59a13f4160febfe536c68dc762e436" + }, + { + "type": "layer", + "item": "interface:openstack-ha", + "url": "https://github.com/openstack/charm-interface-openstack-ha", + "vcs": null, + "branch": "a3b00e5b5fd857d130698c5cfe4b918877f0ab80", + "commit": "a3b00e5b5fd857d130698c5cfe4b918877f0ab80" + }, + { + "type": "layer", + "item": "interface:mongodb", + "url": "https://github.com/cloud-green/juju-relation-mongodb", + "vcs": null, + "branch": "708b6ade852794b17447024a28e5ec2bb883538c", + "commit": "708b6ade852794b17447024a28e5ec2bb883538c" + }, + { + "type": "layer", + "item": "interface:nrpe-external-master", + "url": "https://github.com/canonical/nrpe-external-master-interface", + "vcs": null, + "branch": "c58fe7b01a151d933681b5bf67e47ad3de65eeaa", + "commit": "c58fe7b01a151d933681b5bf67e47ad3de65eeaa" + }, + { + "type": "python_module", + "package": "trove-classifiers", + "vcs": null, + "version": "2024.4.10" + }, + { + "type": "python_module", + "package": "netifaces", + "vcs": null, + "version": "0.11.0" + }, + { + "type": "python_module", + "package": "dnspython", + "vcs": null, + "version": "2.6.1" + }, + { + "type": "python_module", + "package": "netaddr", + "vcs": null, + "version": "0.7.19" + }, + { + "type": "python_module", + "package": "packaging", + "vcs": null, + "version": "24.0" + }, + { + "type": "python_module", + "package": "pluggy", + "vcs": null, + "version": "1.5.0" + }, + { + "type": "python_module", + "package": "anyio", + "vcs": null, + "version": "3.6.2" + }, + { + "type": "python_module", + "package": "tomli", + "vcs": null, + "version": "2.0.1" + }, + { + "type": "python_module", + "package": "pyaml", + "vcs": null, + "version": "21.10.1" + }, + { + "type": "python_module", + "package": "idna", + "vcs": null, + "version": "3.7" + }, + { + "type": "python_module", + "package": "calver", + "vcs": null, + "version": "2022.6.26" + }, + { + "type": "python_module", + "package": "wheel", + "vcs": null, + "version": "0.43.0" + }, + { + "type": "python_module", + "package": "pip", + "vcs": null, + "version": "22.0.4" + }, + { + "type": "python_module", + "package": "setuptools", + "vcs": null, + "version": "62.1.0" + }, + { + "type": "python_module", + "package": "psutil", + "vcs": null, + "version": "5.9.8" + }, + { + "type": "python_module", + "package": "charms.openstack", + "url": "git+https://github.com/openstack/charms.openstack.git", + "branch": "stable/caracal", + "version": "018b72d734223db274b59f524b7887d8153cdb6d", + "vcs": "git" + }, + { + "type": "python_module", + "package": "charmhelpers", + "url": "git+https://github.com/juju/charm-helpers.git", + "branch": "stable/caracal", + "version": "807f705d1d132bedb62b2eb743403e1d5867360f", + "vcs": "git" + }, + { + "type": "python_module", + "package": "pathspec", + "vcs": null, + "version": "0.12.1" + }, + { + "type": "python_module", + "package": "hatchling", + "vcs": null, + "version": "1.24.2" + }, + { + "type": "python_module", + "package": "Cython", + "vcs": null, + "version": "0.29.37" + }, + { + "type": "python_module", + "package": "MarkupSafe", + "vcs": null, + "version": "2.1.5" + }, + { + "type": "python_module", + "package": "PyYAML", + "vcs": null, + "version": "6.0.1" + }, + { + "type": "python_module", + "package": "charms.reactive", + "vcs": null, + "version": "1.5.2" + }, + { + "type": "python_module", + "package": "sniffio", + "vcs": null, + "version": "1.3.0" + }, + { + "type": "python_module", + "package": "setuptools_scm", + "vcs": null, + "version": "6.4.2" + }, + { + "type": "python_module", + "package": "Jinja2", + "vcs": null, + "version": "3.1.3" + }, + { + "type": "python_module", + "package": "pbr", + "vcs": null, + "version": "6.0.0" + }, + { + "type": "python_module", + "package": "flit_scm", + "vcs": null, + "version": "1.7.0" + }, + { + "type": "python_module", + "package": "flit_core", + "vcs": null, + "version": "3.9.0" + } + ] +} \ No newline at end of file diff --git a/ceph-fs/src/metadata.yaml b/ceph-fs/src/metadata.yaml index e20f31f3..919002e1 100644 --- a/ceph-fs/src/metadata.yaml +++ b/ceph-fs/src/metadata.yaml @@ -13,8 +13,6 @@ tags: series: - focal - jammy -- lunar -- mantic subordinate: false requires: ceph-mds: diff --git a/ceph-fs/src/tests/bundles/jammy-antelope.yaml b/ceph-fs/src/tests/bundles/jammy-antelope.yaml index b75c3506..dfab58f3 100644 --- a/ceph-fs/src/tests/bundles/jammy-antelope.yaml +++ b/ceph-fs/src/tests/bundles/jammy-antelope.yaml @@ -26,7 +26,7 @@ applications: options: osd-devices: '/dev/test-non-existent' source: *openstack-origin - channel: latest/edge + channel: reef/edge ceph-mon: charm: ch:ceph-mon @@ -34,7 +34,7 @@ applications: options: monitor-count: '3' source: *openstack-origin - channel: latest/edge + channel: reef/edge relations: - - 'ceph-mon:mds' diff --git a/ceph-fs/src/tests/bundles/lunar-antelope.yaml b/ceph-fs/src/tests/bundles/jammy-caracal.yaml similarity index 90% rename from ceph-fs/src/tests/bundles/lunar-antelope.yaml rename to ceph-fs/src/tests/bundles/jammy-caracal.yaml index 35d64bcb..82ad470b 100644 --- a/ceph-fs/src/tests/bundles/lunar-antelope.yaml +++ b/ceph-fs/src/tests/bundles/jammy-caracal.yaml @@ -1,9 +1,9 @@ variables: - openstack-origin: &openstack-origin distro + openstack-origin: &openstack-origin cloud:jammy-caracal local_overlay_enabled: False -series: &series lunar +series: &series jammy applications: ubuntu: # used to test mounts diff --git a/ceph-fs/src/tests/bundles/mantic-bobcat.yaml b/ceph-fs/src/tests/bundles/mantic-bobcat.yaml deleted file mode 100644 index 3456d8cb..00000000 --- a/ceph-fs/src/tests/bundles/mantic-bobcat.yaml +++ /dev/null @@ -1,44 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -local_overlay_enabled: False - -series: &series mantic - -applications: - ubuntu: # used to test mounts - charm: ch:ubuntu - num_units: 2 - ceph-fs: - charm: ../../../ceph-fs.charm - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *openstack-origin - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - channel: latest/edge - -relations: - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' From 852e3623d41919951779acc126d7818a6e84c240 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Wed, 29 May 2024 10:35:00 +0200 Subject: [PATCH 2626/2699] Caracal-Squid enablement for ceph-nfs Also remove focal-pacific as there's a backward-incompat. change in charmhelpers to the ceph client broker protocol which added crush-profile parameter to pool creation. Change-Id: I16433b24c7584b22b2561fee60cbbb5fd3066e64 --- ceph-nfs/README.md | 2 + ceph-nfs/charmcraft.yaml | 6 --- ceph-nfs/metadata.yaml | 2 - ceph-nfs/osci.yaml | 30 ++---------- ceph-nfs/tests/bundles/focal-pacific.yaml | 47 ------------------- ceph-nfs/tests/bundles/jammy-reef.yaml | 6 +-- .../{mantic-quincy.yaml => jammy-squid.yaml} | 14 +++--- ceph-nfs/tests/bundles/lunar-quincy.yaml | 47 ------------------- ceph-nfs/tests/tests.yaml | 2 +- 9 files changed, 18 insertions(+), 138 deletions(-) delete mode 100644 ceph-nfs/tests/bundles/focal-pacific.yaml rename ceph-nfs/tests/bundles/{mantic-quincy.yaml => jammy-squid.yaml} (84%) delete mode 100644 ceph-nfs/tests/bundles/lunar-quincy.yaml diff --git a/ceph-nfs/README.md b/ceph-nfs/README.md index 0887907c..547ad129 100644 --- a/ceph-nfs/README.md +++ b/ceph-nfs/README.md @@ -51,6 +51,8 @@ Please report bugs on [Launchpad][lp-bugs-charm-ceph-fs]. For general charm questions refer to the OpenStack [Charm Guide][cg]. +Note that starting with the squid track of the ceph-nfs charm, deployment of Ceph Pacific and older clusters is not supported anymore. + [lp-bugs-charm-ceph-fs]: https://bugs.launchpad.net/charm-ceph-fs/+filebug diff --git a/ceph-nfs/charmcraft.yaml b/ceph-nfs/charmcraft.yaml index 71f50dd0..f87b7111 100644 --- a/ceph-nfs/charmcraft.yaml +++ b/ceph-nfs/charmcraft.yaml @@ -33,9 +33,3 @@ bases: - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.10" - architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-nfs/metadata.yaml b/ceph-nfs/metadata.yaml index ccab695a..a2527159 100644 --- a/ceph-nfs/metadata.yaml +++ b/ceph-nfs/metadata.yaml @@ -11,8 +11,6 @@ tags: series: - focal - jammy - - lunar - - mantic subordinate: false min-juju-version: 2.7.6 extra-bindings: diff --git a/ceph-nfs/osci.yaml b/ceph-nfs/osci.yaml index c3ff84ec..04f68b6c 100644 --- a/ceph-nfs/osci.yaml +++ b/ceph-nfs/osci.yaml @@ -1,30 +1,18 @@ - project: templates: - charm-unit-jobs-py38 - - charm-unit-jobs-py39 + - charm-unit-jobs-py310 check: jobs: - - focal-pacific - focal-quincy - jammy-quincy - jammy-reef - - lunar-quincy - - mantic-quincy + - jammy-squid vars: needs_charm_build: true charm_build_name: ceph-nfs build_type: charmcraft charmcraft_channel: 2.x/stable -- job: - name: focal-pacific - parent: func-target - dependencies: - - charm-build - - osci-lint - - tox-py38 - - tox-py39 - vars: - tox_extra_args: -- focal-pacific - job: name: focal-quincy parent: func-target @@ -32,7 +20,6 @@ - charm-build - osci-lint - tox-py38 - - tox-py39 vars: tox_extra_args: -- focal-quincy - job: @@ -50,18 +37,9 @@ vars: tox_extra_args: -- jammy-reef - job: - name: lunar-quincy - parent: func-target - voting: false - dependencies: - - focal-quincy - vars: - tox_extra_args: -- lunar-quincy -- job: - name: mantic-quincy + name: jammy-squid parent: func-target - voting: false dependencies: - focal-quincy vars: - tox_extra_args: -- mantic-quincy + tox_extra_args: -- jammy-squid diff --git a/ceph-nfs/tests/bundles/focal-pacific.yaml b/ceph-nfs/tests/bundles/focal-pacific.yaml deleted file mode 100644 index faad3034..00000000 --- a/ceph-nfs/tests/bundles/focal-pacific.yaml +++ /dev/null @@ -1,47 +0,0 @@ -local_overlay_enabled: False -series: focal -applications: - ubuntu: - charm: cs:ubuntu - num_units: 2 - ceph-nfs: - charm: ../../ceph-nfs.charm - num_units: 2 - options: - source: proposed - ceph-osd: - charm: ch:ceph-osd - channel: pacific/edge - num_units: 3 - storage: - osd-devices: '2,10G' - options: - source: cloud:focal-wallaby - ceph-mon: - charm: ch:ceph-mon - channel: pacific/edge - num_units: 3 - options: - monitor-count: '3' - expected-osd-count: 6 - source: cloud:focal-wallaby - ceph-fs: - charm: ch:ceph-fs - channel: pacific/edge - num_units: 2 - options: - source: cloud:focal-wallaby - hacluster: - charm: ch:hacluster - channel: 2.0.3/edge - options: - cluster_count: 2 -relations: - - - 'ceph-mon:client' - - 'ceph-nfs:ceph-client' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-fs' - - 'ceph-mon' - - - 'ceph-nfs:ha' - - 'hacluster:ha' diff --git a/ceph-nfs/tests/bundles/jammy-reef.yaml b/ceph-nfs/tests/bundles/jammy-reef.yaml index e0c186cf..ff05761b 100644 --- a/ceph-nfs/tests/bundles/jammy-reef.yaml +++ b/ceph-nfs/tests/bundles/jammy-reef.yaml @@ -14,7 +14,7 @@ applications: source: *source ceph-osd: charm: ch:ceph-osd - channel: latest/edge + channel: reef/edge num_units: 3 storage: osd-devices: '2,10G' @@ -22,7 +22,7 @@ applications: source: *source ceph-mon: charm: ch:ceph-mon - channel: latest/edge + channel: reef/edge num_units: 3 options: monitor-count: '3' @@ -30,7 +30,7 @@ applications: source: *source ceph-fs: charm: ch:ceph-fs - channel: latest/edge + channel: reef/edge num_units: 2 options: source: *source diff --git a/ceph-nfs/tests/bundles/mantic-quincy.yaml b/ceph-nfs/tests/bundles/jammy-squid.yaml similarity index 84% rename from ceph-nfs/tests/bundles/mantic-quincy.yaml rename to ceph-nfs/tests/bundles/jammy-squid.yaml index 0f8616ce..2270da22 100644 --- a/ceph-nfs/tests/bundles/mantic-quincy.yaml +++ b/ceph-nfs/tests/bundles/jammy-squid.yaml @@ -1,6 +1,8 @@ +options: + source: &source cloud:jammy-caracal + local_overlay_enabled: False -series: mantic -jammy +series: jammy applications: ubuntu: charm: cs:ubuntu @@ -9,7 +11,7 @@ applications: charm: ../../ceph-nfs.charm num_units: 2 options: - source: distro + source: *source ceph-osd: charm: ch:ceph-osd channel: latest/edge @@ -17,7 +19,7 @@ applications: storage: osd-devices: '2,10G' options: - source: distro + source: *source ceph-mon: charm: ch:ceph-mon channel: latest/edge @@ -25,13 +27,13 @@ applications: options: monitor-count: '3' expected-osd-count: 6 - source: distro + source: *source ceph-fs: charm: ch:ceph-fs channel: latest/edge num_units: 2 options: - source: distro + source: *source hacluster: charm: ch:hacluster channel: 2.4/edge diff --git a/ceph-nfs/tests/bundles/lunar-quincy.yaml b/ceph-nfs/tests/bundles/lunar-quincy.yaml deleted file mode 100644 index 669cb915..00000000 --- a/ceph-nfs/tests/bundles/lunar-quincy.yaml +++ /dev/null @@ -1,47 +0,0 @@ -local_overlay_enabled: False -series: jammy -applications: - ubuntu: - charm: cs:ubuntu - num_units: 2 - ceph-nfs: - charm: ../../ceph-nfs.charm - num_units: 2 - options: - source: distro - ceph-osd: - charm: ch:ceph-osd - channel: quincy/edge - num_units: 3 - storage: - osd-devices: '2,10G' - options: - source: distro - ceph-mon: - charm: ch:ceph-mon - channel: quincy/edge - num_units: 3 - options: - monitor-count: '3' - expected-osd-count: 6 - source: distro - ceph-fs: - charm: ch:ceph-fs - channel: quincy/edge - num_units: 2 - options: - source: distro - hacluster: - charm: ch:hacluster - channel: 2.4/edge - options: - cluster_count: 2 -relations: - - - 'ceph-mon:client' - - 'ceph-nfs:ceph-client' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-fs' - - 'ceph-mon' - - - 'ceph-nfs:ha' - - 'hacluster:ha' diff --git a/ceph-nfs/tests/tests.yaml b/ceph-nfs/tests/tests.yaml index 4660722e..b435eff8 100644 --- a/ceph-nfs/tests/tests.yaml +++ b/ceph-nfs/tests/tests.yaml @@ -4,7 +4,7 @@ gate_bundles: - focal-pacific - jammy-quincy - jammy-reef - - mantic-quincy + - jammy-squid smoke_bundles: - focal-pacific configure: [] From d13cf257c313d8cc0691cb31bbf18e6daf97dce6 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 3 Jun 2024 17:45:50 +0200 Subject: [PATCH 2627/2699] Squid support: make caracal the default source Change-Id: I6dfb458dee644eb56e0a59e0049706e7867bd47c --- ceph-fs/src/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-fs/src/config.yaml b/ceph-fs/src/config.yaml index 58ce4db8..b503b9c4 100644 --- a/ceph-fs/src/config.yaml +++ b/ceph-fs/src/config.yaml @@ -5,7 +5,7 @@ options: description: Mon and OSD debug level. Max is 20. source: type: string - default: quincy + default: caracal description: | Optional configuration to support use of additional sources such as: . From 7e94bf6f060b06e760232a7c14c96f69a220175c Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 3 Jun 2024 18:12:54 +0200 Subject: [PATCH 2628/2699] Squid support: make caracal the default source Change-Id: I7ac8163ddcfe739799ef2728a2c9ffd2feace22a --- ceph-iscsi/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-iscsi/config.yaml b/ceph-iscsi/config.yaml index 870c9239..4274c8b6 100644 --- a/ceph-iscsi/config.yaml +++ b/ceph-iscsi/config.yaml @@ -5,7 +5,7 @@ options: description: Mon and OSD debug level. Max is 20. source: type: string - default: distro + default: caracal description: | Optional configuration to support use of additional sources such as: - ppa:myteam/ppa From 9efe794d5a4821f85140a5e09ca5ef2eba48a471 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 3 Jun 2024 18:12:54 +0200 Subject: [PATCH 2629/2699] Squid support: make caracal the default source Change-Id: Ic1789cab4bf989ef54e2c45e6e3e5345890786fb --- ceph-nfs/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-nfs/config.yaml b/ceph-nfs/config.yaml index fd6c3337..5dfe1c9b 100644 --- a/ceph-nfs/config.yaml +++ b/ceph-nfs/config.yaml @@ -10,7 +10,7 @@ options: source: type: string - default: quincy + default: caracal description: | Optional configuration to support use of additional sources such as: - ppa:myteam/ppa From 1e532088bac53cf621ed9e44aefdd79fdc087582 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 3 Jun 2024 18:12:54 +0200 Subject: [PATCH 2630/2699] Squid support: make caracal the default source Change-Id: I6d34cb67bf9a419d870d143c856a1a4bee34dd8e --- ceph-radosgw/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/config.yaml b/ceph-radosgw/config.yaml index c931a840..45be3aae 100644 --- a/ceph-radosgw/config.yaml +++ b/ceph-radosgw/config.yaml @@ -5,7 +5,7 @@ options: description: RadosGW debug level. Max is 20. source: type: string - default: bobcat + default: caracal description: | Optional repository from which to install. May be one of the following: distro (default), ppa:somecustom/ppa, a deb url sources entry, From 7d75a96cd4459d57552e928710feb4b93a6bc995 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Thu, 2 May 2024 15:02:09 -0300 Subject: [PATCH 2631/2699] Caracal-Squid enablement for ceph-proxy Change-Id: I1eecf717ed945faaa5b3bf6a9db2bd9f80e9fa73 --- ceph-proxy/charmcraft.yaml | 6 - ceph-proxy/charmhelpers/contrib/network/ip.py | 48 +++- .../charmhelpers/contrib/openstack/utils.py | 49 ++-- .../charmhelpers/contrib/storage/linux/lvm.py | 6 +- ceph-proxy/charmhelpers/core/host.py | 7 +- ceph-proxy/charmhelpers/fetch/ubuntu.py | 10 + ceph-proxy/charmhelpers/osplatform.py | 28 ++- ceph-proxy/lib/charms_ceph/broker.py | 69 +++++- ceph-proxy/lib/charms_ceph/utils.py | 139 ++++------- ceph-proxy/metadata.yaml | 2 - ceph-proxy/tests/bundles/jammy-antelope.yaml | 28 +-- ceph-proxy/tests/bundles/jammy-bobcat.yaml | 28 +-- ...lunar-antelope.yaml => jammy-caracal.yaml} | 14 +- .../tests/bundles/lunar-antelope-ec.yaml | 224 ------------------ .../tests/bundles/mantic-bobcat-ec.yaml | 224 ------------------ ceph-proxy/tests/bundles/mantic-bobcat.yaml | 214 ----------------- 16 files changed, 245 insertions(+), 851 deletions(-) rename ceph-proxy/tests/bundles/{lunar-antelope.yaml => jammy-caracal.yaml} (95%) delete mode 100644 ceph-proxy/tests/bundles/lunar-antelope-ec.yaml delete mode 100644 ceph-proxy/tests/bundles/mantic-bobcat-ec.yaml delete mode 100644 ceph-proxy/tests/bundles/mantic-bobcat.yaml diff --git a/ceph-proxy/charmcraft.yaml b/ceph-proxy/charmcraft.yaml index 62a8f5b8..09f03428 100644 --- a/ceph-proxy/charmcraft.yaml +++ b/ceph-proxy/charmcraft.yaml @@ -31,9 +31,3 @@ bases: - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "23.10" - architectures: [amd64, s390x, ppc64el, arm64] diff --git a/ceph-proxy/charmhelpers/contrib/network/ip.py b/ceph-proxy/charmhelpers/contrib/network/ip.py index cf9926b9..f3b4864f 100644 --- a/ceph-proxy/charmhelpers/contrib/network/ip.py +++ b/ceph-proxy/charmhelpers/contrib/network/ip.py @@ -16,6 +16,7 @@ import re import subprocess import socket +import ssl from functools import partial @@ -527,19 +528,56 @@ def get_hostname(address, fqdn=True): return result.split('.')[0] -def port_has_listener(address, port): +class SSLPortCheckInfo(object): + + def __init__(self, key, cert, ca_cert, check_hostname=False): + self.key = key + self.cert = cert + self.ca_cert = ca_cert + # NOTE: by default we do not check hostname since the port check is + # typically performed using 0.0.0.0 which will not match the + # certificate. Hence the default for this is False. + self.check_hostname = check_hostname + + @property + def ssl_context(self): + context = ssl.create_default_context() + context.check_hostname = self.check_hostname + context.load_cert_chain(self.cert, self.key) + context.load_verify_locations(self.ca_cert) + return context + + +def port_has_listener(address, port, sslinfo=None): """ Returns True if the address:port is open and being listened to, - else False. + else False. By default uses netcat to check ports but if sslinfo is + provided will use an SSL connection instead. @param address: an IP address or hostname @param port: integer port + @param sslinfo: optional SSLPortCheckInfo object. + If provided, the check is performed using an ssl + connection. Note calls 'zc' via a subprocess shell """ - cmd = ['nc', '-z', address, str(port)] - result = subprocess.call(cmd) - return not (bool(result)) + if not sslinfo: + cmd = ['nc', '-z', address, str(port)] + result = subprocess.call(cmd) + return not (bool(result)) + + try: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock: + ssock = sslinfo.ssl_context.wrap_socket(sock, + server_hostname=address) + ssock.connect((address, port)) + # this bit is crucial to ensure tls close_notify is sent + ssock.unwrap() + + return True + except ConnectionRefusedError: + return False def assert_charm_supports_ipv6(): diff --git a/ceph-proxy/charmhelpers/contrib/openstack/utils.py b/ceph-proxy/charmhelpers/contrib/openstack/utils.py index e98be2c5..82c28d8e 100644 --- a/ceph-proxy/charmhelpers/contrib/openstack/utils.py +++ b/ceph-proxy/charmhelpers/contrib/openstack/utils.py @@ -161,6 +161,7 @@ ('2022.2', 'zed'), ('2023.1', 'antelope'), ('2023.2', 'bobcat'), + ('2024.1', 'caracal'), ]) # The ugly duckling - must list releases oldest to newest @@ -416,17 +417,6 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES, error_out(e) -def get_os_version_codename_swift(codename): - '''Determine OpenStack version number of swift from codename.''' - # for k, v in six.iteritems(SWIFT_CODENAMES): - for k, v in SWIFT_CODENAMES.items(): - if k == codename: - return v[-1] - e = 'Could not derive swift version for '\ - 'codename: %s' % codename - error_out(e) - - def get_swift_codename(version): '''Determine OpenStack codename that corresponds to swift version.''' codenames = [k for k, v in SWIFT_CODENAMES.items() if version in v] @@ -585,7 +575,6 @@ def _do_install(): return openstack_release().get('OPENSTACK_CODENAME') -@cached def openstack_release(): """Return /etc/os-release in a dict.""" d = {} @@ -847,14 +836,10 @@ def openstack_upgrade_available(package): if not cur_vers: # The package has not been installed yet do not attempt upgrade return False - if "swift" in package: - codename = get_os_codename_install_source(src) - avail_vers = get_os_version_codename_swift(codename) - else: - try: - avail_vers = get_os_version_install_source(src) - except Exception: - avail_vers = cur_vers + try: + avail_vers = get_os_version_install_source(src) + except Exception: + avail_vers = cur_vers apt.init() return apt.version_compare(avail_vers, cur_vers) >= 1 @@ -1222,12 +1207,14 @@ def _ows_check_services_running(services, ports): return ows_check_services_running(services, ports) -def ows_check_services_running(services, ports): +def ows_check_services_running(services, ports, ssl_check_info=None): """Check that the services that should be running are actually running and that any ports specified are being listened to. @param services: list of strings OR dictionary specifying services/ports @param ports: list of ports + @param ssl_check_info: SSLPortCheckInfo object. If provided, port checks + will be done using an SSL connection. @returns state, message: strings or None, None """ messages = [] @@ -1243,7 +1230,7 @@ def ows_check_services_running(services, ports): # also verify that the ports that should be open are open # NB, that ServiceManager objects only OPTIONALLY have ports map_not_open, ports_open = ( - _check_listening_on_services_ports(services)) + _check_listening_on_services_ports(services, ssl_check_info)) if not all(ports_open): # find which service has missing ports. They are in service # order which makes it a bit easier. @@ -1258,7 +1245,8 @@ def ows_check_services_running(services, ports): if ports is not None: # and we can also check ports which we don't know the service for - ports_open, ports_open_bools = _check_listening_on_ports_list(ports) + ports_open, ports_open_bools = \ + _check_listening_on_ports_list(ports, ssl_check_info) if not all(ports_open_bools): messages.append( "Ports which should be open, but are not: {}" @@ -1317,7 +1305,8 @@ def _check_running_services(services): return list(zip(services, services_running)), services_running -def _check_listening_on_services_ports(services, test=False): +def _check_listening_on_services_ports(services, test=False, + ssl_check_info=None): """Check that the unit is actually listening (has the port open) on the ports that the service specifies are open. If test is True then the function returns the services with ports that are open rather than @@ -1327,11 +1316,14 @@ def _check_listening_on_services_ports(services, test=False): @param services: OrderedDict(service: [port, ...], ...) @param test: default=False, if False, test for closed, otherwise open. + @param ssl_check_info: SSLPortCheckInfo object. If provided, port checks + will be done using an SSL connection. @returns OrderedDict(service: [port-not-open, ...]...), [boolean] """ test = not (not (test)) # ensure test is True or False all_ports = list(itertools.chain(*services.values())) - ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] + ports_states = [port_has_listener('0.0.0.0', p, ssl_check_info) + for p in all_ports] map_ports = OrderedDict() matched_ports = [p for p, opened in zip(all_ports, ports_states) if opened == test] # essentially opened xor test @@ -1342,16 +1334,19 @@ def _check_listening_on_services_ports(services, test=False): return map_ports, ports_states -def _check_listening_on_ports_list(ports): +def _check_listening_on_ports_list(ports, ssl_check_info=None): """Check that the ports list given are being listened to Returns a list of ports being listened to and a list of the booleans. + @param ssl_check_info: SSLPortCheckInfo object. If provided, port checks + will be done using an SSL connection. @param ports: LIST of port numbers. @returns [(port_num, boolean), ...], [boolean] """ - ports_open = [port_has_listener('0.0.0.0', p) for p in ports] + ports_open = [port_has_listener('0.0.0.0', p, ssl_check_info) + for p in ports] return zip(ports, ports_open), ports_open diff --git a/ceph-proxy/charmhelpers/contrib/storage/linux/lvm.py b/ceph-proxy/charmhelpers/contrib/storage/linux/lvm.py index d0a57211..0d294c79 100644 --- a/ceph-proxy/charmhelpers/contrib/storage/linux/lvm.py +++ b/ceph-proxy/charmhelpers/contrib/storage/linux/lvm.py @@ -17,8 +17,6 @@ CalledProcessError, check_call, check_output, - Popen, - PIPE, ) @@ -58,9 +56,7 @@ def remove_lvm_physical_volume(block_device): :param block_device: str: Full path of block device to scrub. ''' - p = Popen(['pvremove', '-ff', block_device], - stdin=PIPE) - p.communicate(input='y\n') + check_call(['pvremove', '-ff', '--yes', block_device]) def list_lvm_volume_group(block_device): diff --git a/ceph-proxy/charmhelpers/core/host.py b/ceph-proxy/charmhelpers/core/host.py index 70dde6a5..def403c5 100644 --- a/ceph-proxy/charmhelpers/core/host.py +++ b/ceph-proxy/charmhelpers/core/host.py @@ -256,8 +256,11 @@ def service_resume(service_name, init_dir="/etc/init", upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) sysv_file = os.path.join(initd_dir, service_name) if init_is_systemd(service_name=service_name): - service('unmask', service_name) - service('enable', service_name) + if service('is-enabled', service_name): + log('service {} already enabled'.format(service_name), level=DEBUG) + else: + service('unmask', service_name) + service('enable', service_name) elif os.path.exists(upstart_file): override_path = os.path.join( init_dir, '{}.override'.format(service_name)) diff --git a/ceph-proxy/charmhelpers/fetch/ubuntu.py b/ceph-proxy/charmhelpers/fetch/ubuntu.py index 1be992c4..d0089eb7 100644 --- a/ceph-proxy/charmhelpers/fetch/ubuntu.py +++ b/ceph-proxy/charmhelpers/fetch/ubuntu.py @@ -246,6 +246,14 @@ 'bobcat/proposed': 'jammy-proposed/bobcat', 'jammy-bobcat/proposed': 'jammy-proposed/bobcat', 'jammy-proposed/bobcat': 'jammy-proposed/bobcat', + # caracal + 'caracal': 'jammy-updates/caracal', + 'jammy-caracal': 'jammy-updates/caracal', + 'jammy-caracal/updates': 'jammy-updates/caracal', + 'jammy-updates/caracal': 'jammy-updates/caracal', + 'caracal/proposed': 'jammy-proposed/caracal', + 'jammy-caracal/proposed': 'jammy-proposed/caracal', + 'jammy-proposed/caracal': 'jammy-proposed/caracal', # OVN 'focal-ovn-22.03': 'focal-updates/ovn-22.03', @@ -279,6 +287,7 @@ 'zed', 'antelope', 'bobcat', + 'caracal', ) @@ -308,6 +317,7 @@ ('kinetic', 'zed'), ('lunar', 'antelope'), ('mantic', 'bobcat'), + ('noble', 'caracal'), ]) diff --git a/ceph-proxy/charmhelpers/osplatform.py b/ceph-proxy/charmhelpers/osplatform.py index 1ace468f..5d121866 100644 --- a/ceph-proxy/charmhelpers/osplatform.py +++ b/ceph-proxy/charmhelpers/osplatform.py @@ -9,19 +9,13 @@ def get_platform(): will be returned (which is the name of the module). This string is used to decide which platform module should be imported. """ - # linux_distribution is deprecated and will be removed in Python 3.7 - # Warnings *not* disabled, as we certainly need to fix this. - if hasattr(platform, 'linux_distribution'): - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] - else: - current_platform = _get_platform_from_fs() + current_platform = _get_current_platform() if "Ubuntu" in current_platform: return "ubuntu" elif "CentOS" in current_platform: return "centos" - elif "debian" in current_platform: + elif "debian" in current_platform or "Debian" in current_platform: # Stock Python does not detect Ubuntu and instead returns debian. # Or at least it does in some build environments like Travis CI return "ubuntu" @@ -36,6 +30,24 @@ def get_platform(): .format(current_platform)) +def _get_current_platform(): + """Return the current platform information for the OS. + + Attempts to lookup linux distribution information from the platform + module for releases of python < 3.7. For newer versions of python, + the platform is determined from the /etc/os-release file. + """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warnings *not* disabled, as we certainly need to fix this. + if hasattr(platform, 'linux_distribution'): + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + else: + current_platform = _get_platform_from_fs() + + return current_platform + + def _get_platform_from_fs(): """Get Platform from /etc/os-release.""" with open(os.path.join(os.sep, 'etc', 'os-release')) as fin: diff --git a/ceph-proxy/lib/charms_ceph/broker.py b/ceph-proxy/lib/charms_ceph/broker.py index 71f85f45..7ca96922 100644 --- a/ceph-proxy/lib/charms_ceph/broker.py +++ b/ceph-proxy/lib/charms_ceph/broker.py @@ -106,6 +106,8 @@ def decode_req_encode_rsp(f): """Decorator to decode incoming requests and encode responses.""" def decode_inner(req): + if isinstance(req, bytes): + req = req.decode('utf-8') return json.dumps(f(json.loads(req))) return decode_inner @@ -767,7 +769,7 @@ def handle_create_cephfs(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - if get_cephfs(service=service): + if cephfs_name in get_cephfs(service=service): # CephFS new has already been called log("CephFS already created") return @@ -833,6 +835,69 @@ def handle_rgw_region_set(request, service): os.unlink(infile.name) +def handle_create_cephfs_client(request, service): + """Creates a new CephFS client for a filesystem. + + :param request: The broker request + :param service: The ceph client to run the command under. + :returns: dict. exit-code and reason if not 0. + """ + fs_name = request.get('fs_name') + client_id = request.get('client_id') + # TODO: fs allows setting write permissions for a list of paths. + path = request.get('path') + perms = request.get('perms') + # Need all parameters + if not fs_name or not client_id or not path or not perms: + msg = "Missing fs_name, client_id, path or perms params" + log(msg, level=ERROR) + return {'exit-code': 1, 'stderr': msg} + + # Skip creation if the request has already been called + # This makes it a bit more compatible with older Ceph versions + # that throw when trying to authorize a user with the same + # capabilites that it currently has. + try: + cmd = ["ceph", "--id", service, "auth", "ls", "-f", "json"] + auth_ls = json.loads(check_output(cmd, encoding="utf-8")) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + except ValueError as err: + log(str(err), level=ERROR) + return {'exit-code': 1, 'stderr': str(err)} + + client = "client.{}".format(client_id) + for elem in auth_ls["auth_dump"]: + if client == elem["entity"]: + log("Client {} has already been created".format(client)) + return {'exit-code': 0, 'key': elem["key"]} + + # Try to authorize the client + # `ceph fs authorize` already returns the correct error + # message if the filesystem doesn't exist. + try: + cmd = [ + "ceph", + "--id", service, + "fs", "authorize", + fs_name, + client, + path, + perms, + "-f", "json" + ] + fs_auth = json.loads(check_output(cmd, encoding="utf-8")) + except CalledProcessError as err: + log(err.output, level=ERROR) + return {'exit-code': 1, 'stderr': err.output} + except ValueError as err: + log(str(err), level=ERROR) + return {'exit-code': 1, 'stderr': str(err)} + + return {'exit-code': 0, 'key': fs_auth[0]["key"]} + + def process_requests_v1(reqs): """Process v1 requests. @@ -902,6 +967,8 @@ def process_requests_v1(reqs): ret = handle_add_permissions_to_key(request=req, service=svc) elif op == 'set-key-permissions': ret = handle_set_key_permissions(request=req, service=svc) + elif op == "create-cephfs-client": + ret = handle_create_cephfs_client(request=req, service=svc) else: msg = "Unknown operation '{}'".format(op) log(msg, level=ERROR) diff --git a/ceph-proxy/lib/charms_ceph/utils.py b/ceph-proxy/lib/charms_ceph/utils.py index 94bfb9e4..63dd1fae 100644 --- a/ceph-proxy/lib/charms_ceph/utils.py +++ b/ceph-proxy/lib/charms_ceph/utils.py @@ -1324,16 +1324,6 @@ def systemd(): return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' -def use_bluestore(): - """Determine whether bluestore should be used for OSD's - - :returns: whether bluestore disk format should be used - :rtype: bool""" - if cmp_pkgrevno('ceph', '12.2.0') < 0: - return False - return config('bluestore') - - def bootstrap_monitor_cluster(secret): """Bootstrap local Ceph mon into the Ceph cluster @@ -1551,21 +1541,21 @@ def get_devices(name): def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, - bluestore=False, key_manager=CEPH_KEY_MANAGER, osd_id=None): + key_manager=CEPH_KEY_MANAGER, osd_id=None): if dev.startswith('/dev'): osdize_dev(dev, osd_format, osd_journal, ignore_errors, encrypt, - bluestore, key_manager, osd_id) + key_manager, osd_id) else: if cmp_pkgrevno('ceph', '14.0.0') >= 0: log("Directory backed OSDs can not be created on Nautilus", level=WARNING) return - osdize_dir(dev, encrypt, bluestore) + osdize_dir(dev, encrypt) def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, - encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER, + encrypt=False, key_manager=CEPH_KEY_MANAGER, osd_id=None): """ Prepare a block device for use as a Ceph OSD @@ -1579,7 +1569,6 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, :param: ignore_errors: Don't fail in the event of any errors during processing :param: encrypt: Encrypt block devices using 'key_manager' - :param: bluestore: Use bluestore native Ceph block device format :param: key_manager: Key management approach for encryption keys :raises subprocess.CalledProcessError: in the event that any supporting subprocess operation failed @@ -1630,15 +1619,13 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, cmd = _ceph_volume(dev, osd_journal, encrypt, - bluestore, key_manager, osd_id) else: cmd = _ceph_disk(dev, osd_format, osd_journal, - encrypt, - bluestore) + encrypt) try: status_set('maintenance', 'Initializing device {}'.format(dev)) @@ -1669,7 +1656,7 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, db.flush() -def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): +def _ceph_disk(dev, osd_format, osd_journal, encrypt=False): """ Prepare a device for usage as a Ceph OSD using ceph-disk @@ -1677,7 +1664,6 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): The function looks up realpath of the device :param: osd_journal: List of block devices to use for OSD journals :param: encrypt: Use block device encryption (unsupported) - :param: bluestore: Use bluestore storage for OSD :returns: list. 'ceph-disk' command and required parameters for execution by check_call """ @@ -1686,25 +1672,17 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): if encrypt: cmd.append('--dmcrypt') - if osd_format and not bluestore: - cmd.append('--fs-type') - cmd.append(osd_format) - - # NOTE(jamespage): enable experimental bluestore support - if use_bluestore(): - cmd.append('--bluestore') - wal = get_devices('bluestore-wal') - if wal: - cmd.append('--block.wal') - least_used_wal = find_least_used_utility_device(wal) - cmd.append(least_used_wal) - db = get_devices('bluestore-db') - if db: - cmd.append('--block.db') - least_used_db = find_least_used_utility_device(db) - cmd.append(least_used_db) - elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: - cmd.append('--filestore') + cmd.append('--bluestore') + wal = get_devices('bluestore-wal') + if wal: + cmd.append('--block.wal') + least_used_wal = find_least_used_utility_device(wal) + cmd.append(least_used_wal) + db = get_devices('bluestore-db') + if db: + cmd.append('--block.db') + least_used_db = find_least_used_utility_device(db) + cmd.append(least_used_db) cmd.append(os.path.realpath(dev)) @@ -1715,8 +1693,8 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): return cmd -def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, - key_manager=CEPH_KEY_MANAGER, osd_id=None): +def _ceph_volume(dev, osd_journal, encrypt=False, key_manager=CEPH_KEY_MANAGER, + osd_id=None): """ Prepare and activate a device for usage as a Ceph OSD using ceph-volume. @@ -1726,7 +1704,6 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, :param: dev: Full path to use for OSD block device setup :param: osd_journal: List of block devices to use for OSD journals :param: encrypt: Use block device encryption - :param: bluestore: Use bluestore storage for OSD :param: key_manager: dm-crypt Key Manager to use :param: osd_id: The OSD-id to recycle, or None to create a new one :raises subprocess.CalledProcessError: in the event that any supporting @@ -1739,13 +1716,8 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, osd_fsid = str(uuid.uuid4()) cmd.append('--osd-fsid') cmd.append(osd_fsid) - - if bluestore: - cmd.append('--bluestore') - main_device_type = 'block' - else: - cmd.append('--filestore') - main_device_type = 'data' + cmd.append('--bluestore') + main_device_type = 'block' if encrypt and key_manager == CEPH_KEY_MANAGER: cmd.append('--dmcrypt') @@ -1753,19 +1725,6 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, if osd_id is not None: cmd.extend(['--osd-id', str(osd_id)]) - # On-disk journal volume creation - if not osd_journal and not bluestore: - journal_lv_type = 'journal' - cmd.append('--journal') - cmd.append(_allocate_logical_volume( - dev=dev, - lv_type=journal_lv_type, - osd_fsid=osd_fsid, - size='{}M'.format(calculate_volume_size('journal')), - encrypt=encrypt, - key_manager=key_manager) - ) - cmd.append('--data') cmd.append(_allocate_logical_volume(dev=dev, lv_type=main_device_type, @@ -1773,36 +1732,21 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, encrypt=encrypt, key_manager=key_manager)) - if bluestore: - for extra_volume in ('wal', 'db'): - devices = get_devices('bluestore-{}'.format(extra_volume)) - if devices: - cmd.append('--block.{}'.format(extra_volume)) - least_used = find_least_used_utility_device(devices, - lvs=True) - cmd.append(_allocate_logical_volume( - dev=least_used, - lv_type=extra_volume, - osd_fsid=osd_fsid, - size='{}M'.format(calculate_volume_size(extra_volume)), - shared=True, - encrypt=encrypt, - key_manager=key_manager) - ) - - elif osd_journal: - cmd.append('--journal') - least_used = find_least_used_utility_device(osd_journal, - lvs=True) - cmd.append(_allocate_logical_volume( - dev=least_used, - lv_type='journal', - osd_fsid=osd_fsid, - size='{}M'.format(calculate_volume_size('journal')), - shared=True, - encrypt=encrypt, - key_manager=key_manager) - ) + for extra_volume in ('wal', 'db'): + devices = get_devices('bluestore-{}'.format(extra_volume)) + if devices: + cmd.append('--block.{}'.format(extra_volume)) + least_used = find_least_used_utility_device(devices, + lvs=True) + cmd.append(_allocate_logical_volume( + dev=least_used, + lv_type=extra_volume, + osd_fsid=osd_fsid, + size='{}M'.format(calculate_volume_size(extra_volume)), + shared=True, + encrypt=encrypt, + key_manager=key_manager) + ) return cmd @@ -2040,7 +1984,7 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid, return "{}/{}".format(vg_name, lv_name) -def osdize_dir(path, encrypt=False, bluestore=False): +def osdize_dir(path, encrypt=False): """Ask ceph-disk to prepare a directory to become an OSD. :param path: str. The directory to osdize @@ -2077,12 +2021,8 @@ def osdize_dir(path, encrypt=False, bluestore=False): if cmp_pkgrevno('ceph', '0.60') >= 0: if encrypt: cmd.append('--dmcrypt') + cmd.append('--bluestore') - # NOTE(icey): enable experimental bluestore support - if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: - cmd.append('--bluestore') - elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: - cmd.append('--filestore') log("osdize dir cmd: {}".format(cmd)) subprocess.check_call(cmd) @@ -2135,6 +2075,7 @@ def get_cephfs(service): for part in parts: if "name" in part: filesystems.append(part.split(' ')[1]) + return filesystems except subprocess.CalledProcessError: return [] @@ -3232,6 +3173,7 @@ def dirs_need_ownership_update(service): ('octopus', 'pacific'), ('pacific', 'quincy'), ('quincy', 'reef'), + ('reef', 'squid'), ]) # Map UCA codenames to Ceph codenames @@ -3256,6 +3198,7 @@ def dirs_need_ownership_update(service): 'zed': 'quincy', 'antelope': 'quincy', 'bobcat': 'reef', + 'caracal': 'squid', } diff --git a/ceph-proxy/metadata.yaml b/ceph-proxy/metadata.yaml index f79ee554..efff7340 100644 --- a/ceph-proxy/metadata.yaml +++ b/ceph-proxy/metadata.yaml @@ -12,8 +12,6 @@ tags: - misc series: - jammy -- lunar -- mantic extra-bindings: public: cluster: diff --git a/ceph-proxy/tests/bundles/jammy-antelope.yaml b/ceph-proxy/tests/bundles/jammy-antelope.yaml index 1e4e54e6..89e3da79 100644 --- a/ceph-proxy/tests/bundles/jammy-antelope.yaml +++ b/ceph-proxy/tests/bundles/jammy-antelope.yaml @@ -33,13 +33,13 @@ applications: cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -48,7 +48,7 @@ applications: - '0' - '1' - '2' - channel: latest/edge + channel: 8.0/edge ceph-mon: charm: ch:ceph-mon @@ -60,7 +60,7 @@ applications: - '3' - '4' - '5' - channel: latest/edge + channel: reef/edge ceph-osd: charm: ch:ceph-osd @@ -73,7 +73,7 @@ applications: - '6' - '7' - '8' - channel: latest/edge + channel: reef/edge ceph-proxy: charm: ../../ceph-proxy.charm @@ -90,7 +90,7 @@ applications: source: *openstack-origin to: - '10' - channel: latest/edge + channel: reef/edge cinder: charm: ch:cinder @@ -104,13 +104,13 @@ applications: constraints: mem=2048 to: - '11' - channel: latest/edge + channel: 2023.2/edge cinder-ceph: charm: ch:cinder-ceph options: restrict-ceph-pools: True - channel: latest/edge + channel: 2023.2/edge keystone: charm: ch:keystone @@ -121,7 +121,7 @@ applications: constraints: mem=1024 to: - '12' - channel: latest/edge + channel: 2023.2/edge rabbitmq-server: charm: ch:rabbitmq-server @@ -129,7 +129,7 @@ applications: constraints: mem=1024 to: - '13' - channel: latest/edge + channel: 3.9/edge glance: charm: ch:glance @@ -138,7 +138,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: latest/edge + channel: 2023.2/edge nova-compute: charm: ch:nova-compute @@ -147,7 +147,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: latest/edge + channel: 2023.2/edge ubuntu: # used to test mounts charm: ch:ubuntu @@ -158,7 +158,7 @@ applications: ceph-fs: charm: ch:ceph-fs - channel: latest/edge + channel: reef/edge num_units: 1 to: - '18' diff --git a/ceph-proxy/tests/bundles/jammy-bobcat.yaml b/ceph-proxy/tests/bundles/jammy-bobcat.yaml index b431f527..1b9ce6ba 100644 --- a/ceph-proxy/tests/bundles/jammy-bobcat.yaml +++ b/ceph-proxy/tests/bundles/jammy-bobcat.yaml @@ -33,13 +33,13 @@ applications: cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -48,7 +48,7 @@ applications: - '0' - '1' - '2' - channel: latest/edge + channel: 8.0/edge ceph-mon: charm: ch:ceph-mon @@ -60,7 +60,7 @@ applications: - '3' - '4' - '5' - channel: latest/edge + channel: reef/edge ceph-osd: charm: ch:ceph-osd @@ -73,7 +73,7 @@ applications: - '6' - '7' - '8' - channel: latest/edge + channel: reef/edge ceph-proxy: charm: ../../ceph-proxy.charm @@ -90,7 +90,7 @@ applications: source: *openstack-origin to: - '10' - channel: latest/edge + channel: reef/edge cinder: charm: ch:cinder @@ -104,13 +104,13 @@ applications: constraints: mem=2048 to: - '11' - channel: latest/edge + channel: 2023.2/edge cinder-ceph: charm: ch:cinder-ceph options: restrict-ceph-pools: True - channel: latest/edge + channel: 2023.2/edge keystone: charm: ch:keystone @@ -121,7 +121,7 @@ applications: constraints: mem=1024 to: - '12' - channel: latest/edge + channel: 2023.2/edge rabbitmq-server: charm: ch:rabbitmq-server @@ -129,7 +129,7 @@ applications: constraints: mem=1024 to: - '13' - channel: latest/edge + channel: 3.9/edge glance: charm: ch:glance @@ -138,7 +138,7 @@ applications: openstack-origin: *openstack-origin to: - '14' - channel: latest/edge + channel: 2023.2/edge nova-compute: charm: ch:nova-compute @@ -147,7 +147,7 @@ applications: openstack-origin: *openstack-origin to: - '15' - channel: latest/edge + channel: 2023.2/edge ubuntu: # used to test mounts charm: ch:ubuntu @@ -158,7 +158,7 @@ applications: ceph-fs: charm: ch:ceph-fs - channel: latest/edge + channel: reef/edge num_units: 1 to: - '18' diff --git a/ceph-proxy/tests/bundles/lunar-antelope.yaml b/ceph-proxy/tests/bundles/jammy-caracal.yaml similarity index 95% rename from ceph-proxy/tests/bundles/lunar-antelope.yaml rename to ceph-proxy/tests/bundles/jammy-caracal.yaml index e6cdff99..4f16483b 100644 --- a/ceph-proxy/tests/bundles/lunar-antelope.yaml +++ b/ceph-proxy/tests/bundles/jammy-caracal.yaml @@ -1,7 +1,7 @@ variables: - openstack-origin: &openstack-origin distro + openstack-origin: &openstack-origin cloud:jammy-caracal -series: lunar +series: jammy comment: - 'machines section to decide order of deployment. database sooner = faster' @@ -33,13 +33,13 @@ applications: cinder-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge glance-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge keystone-mysql-router: charm: ch:mysql-router - channel: latest/edge + channel: 8.0/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster @@ -48,7 +48,7 @@ applications: - '0' - '1' - '2' - channel: latest/edge + channel: 8.0/edge ceph-mon: charm: ch:ceph-mon @@ -129,7 +129,7 @@ applications: constraints: mem=1024 to: - '13' - channel: latest/edge + channel: 3.9/edge glance: charm: ch:glance diff --git a/ceph-proxy/tests/bundles/lunar-antelope-ec.yaml b/ceph-proxy/tests/bundles/lunar-antelope-ec.yaml deleted file mode 100644 index 9b5f7f84..00000000 --- a/ceph-proxy/tests/bundles/lunar-antelope-ec.yaml +++ /dev/null @@ -1,224 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: lunar - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': - -applications: - - cinder-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - channel: latest/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - '16' - - '17' - - '18' - channel: latest/edge - - ceph-proxy: - charm: ../../ceph-proxy.charm - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: ch:ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '10' - channel: latest/edge - - cinder: - charm: ch:cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - channel: latest/edge - - cinder-ceph: - charm: ch:cinder-ceph - options: - restrict-ceph-pools: True - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: lrc - ec-profile-locality: 3 - channel: latest/edge - - keystone: - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - channel: latest/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - constraints: mem=1024 - to: - - '13' - channel: latest/edge - - glance: - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: jerasure - to: - - '14' - channel: latest/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: isa - libvirt-image-backend: rbd - to: - - '15' - channel: latest/edge - - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:ceph' - - 'ceph-proxy:client' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:ceph' - - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/mantic-bobcat-ec.yaml b/ceph-proxy/tests/bundles/mantic-bobcat-ec.yaml deleted file mode 100644 index a70a2ad7..00000000 --- a/ceph-proxy/tests/bundles/mantic-bobcat-ec.yaml +++ /dev/null @@ -1,224 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: mantic - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': - -applications: - - cinder-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - channel: latest/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - '16' - - '17' - - '18' - channel: latest/edge - - ceph-proxy: - charm: ../../ceph-proxy.charm - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: ch:ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '10' - channel: latest/edge - - cinder: - charm: ch:cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - channel: latest/edge - - cinder-ceph: - charm: ch:cinder-ceph - options: - restrict-ceph-pools: True - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: lrc - ec-profile-locality: 3 - channel: latest/edge - - keystone: - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - channel: latest/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - constraints: mem=1024 - to: - - '13' - channel: latest/edge - - glance: - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: jerasure - to: - - '14' - channel: latest/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: isa - libvirt-image-backend: rbd - to: - - '15' - channel: latest/edge - - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:ceph' - - 'ceph-proxy:client' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:ceph' - - 'ceph-proxy:client' diff --git a/ceph-proxy/tests/bundles/mantic-bobcat.yaml b/ceph-proxy/tests/bundles/mantic-bobcat.yaml deleted file mode 100644 index a07c9bfc..00000000 --- a/ceph-proxy/tests/bundles/mantic-bobcat.yaml +++ /dev/null @@ -1,214 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: mantic - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': - -applications: - - cinder-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - channel: latest/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - channel: latest/edge - - ceph-proxy: - charm: ../../ceph-proxy.charm - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: ch:ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - to: - - '10' - channel: latest/edge - - cinder: - charm: ch:cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - channel: latest/edge - - cinder-ceph: - charm: ch:cinder-ceph - options: - restrict-ceph-pools: True - channel: latest/edge - - keystone: - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - channel: latest/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - constraints: mem=1024 - to: - - '13' - channel: latest/edge - - glance: - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - channel: latest/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: latest/edge - - ubuntu: # used to test mounts - charm: ch:ubuntu - num_units: 2 - to: - - '16' - - '17' - - ceph-fs: - charm: ch:ceph-fs - channel: latest/edge - num_units: 1 - to: - - '18' - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'ceph-proxy:mds' - - 'ceph-fs:ceph-mds' From 50390de77cc224f1ece0fd9662048c8f87cda5a2 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 3 Jun 2024 18:12:54 +0200 Subject: [PATCH 2632/2699] Squid support: make caracal the default source Change-Id: Ic53636714dbe4b96f7106c93f6d7eafa9a018cde --- ceph-proxy/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-proxy/config.yaml b/ceph-proxy/config.yaml index 720fb347..ff510ed9 100644 --- a/ceph-proxy/config.yaml +++ b/ceph-proxy/config.yaml @@ -10,7 +10,7 @@ options: Setting this to True will allow supporting services to log to syslog. source: type: string - default: quincy + default: caracal description: | Repository from which to install. May be one of the following: distro (default), ppa:somecustom/ppa, a deb url sources entry, From 30081e9c0e89d9576b6e0de4fc55557012b49f48 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 5 Jun 2024 19:17:27 -0300 Subject: [PATCH 2633/2699] Add tests for jammy-caracal Change-Id: I8f5143835f12a49451bff4000a641bf0a06bf296 --- ceph-iscsi/osci.yaml | 46 ++------ .../tests/bundles/jammy-caracal-ec.yaml | 101 +++++++++++++++++ ceph-iscsi/tests/bundles/jammy-caracal.yaml | 102 ++++++++++++++++++ 3 files changed, 210 insertions(+), 39 deletions(-) create mode 100644 ceph-iscsi/tests/bundles/jammy-caracal-ec.yaml create mode 100644 ceph-iscsi/tests/bundles/jammy-caracal.yaml diff --git a/ceph-iscsi/osci.yaml b/ceph-iscsi/osci.yaml index 591bacce..54ddc196 100644 --- a/ceph-iscsi/osci.yaml +++ b/ceph-iscsi/osci.yaml @@ -5,14 +5,10 @@ jobs: - ceph-iscsi-focal-quincy - ceph-iscsi-focal-quincy-ec - - ceph-iscsi-jammy-quincy - - ceph-iscsi-jammy-quincy-ec - ceph-iscsi-jammy-reef - ceph-iscsi-jammy-reef-ec - - ceph-iscsi-lunar-quincy: - voting: false - - ceph-iscsi-lunar-quincy-ec: - voting: false + - ceph-iscsi-jammy-caracal + - ceph-iscsi-jammy-caracal-ec vars: needs_charm_build: true charm_build_name: ceph-iscsi @@ -21,36 +17,13 @@ - job: name: ceph-iscsi-focal-quincy parent: func-target - dependencies: - - ceph-iscsi-jammy-quincy vars: tox_extra_args: -- focal - job: name: ceph-iscsi-focal-quincy-ec parent: func-target - dependencies: - - ceph-iscsi-jammy-quincy vars: tox_extra_args: -- focal-ec - -- job: - name: ceph-iscsi-jammy-quincy - parent: func-target - dependencies: - - charm-build - - osci-lint - - name: tox-py310 - soft: true - vars: - tox_extra_args: -- jammy -- job: - name: ceph-iscsi-jammy-quincy-ec - parent: func-target - dependencies: - - ceph-iscsi-jammy-quincy - vars: - tox_extra_args: -- jammy-ec - - job: name: ceph-iscsi-jammy-reef parent: func-target @@ -68,20 +41,15 @@ - ceph-iscsi-jammy-reef vars: tox_extra_args: -- jammy-reef-ec - - job: - name: ceph-iscsi-lunar-quincy + name: ceph-iscsi-jammy-caracal parent: func-target - voting: false - dependencies: - - ceph-iscsi-jammy-quincy vars: - tox_extra_args: -- lunar + tox_extra_args: -- jammy-caracal - job: - name: ceph-iscsi-lunar-quincy-ec + name: ceph-iscsi-jammy-caracal-ec parent: func-target - voting: false dependencies: - - ceph-iscsi-jammy-quincy + - ceph-iscsi-jammy-caracal vars: - tox_extra_args: -- lunar-ec + tox_extra_args: -- jammy-caracal-ec diff --git a/ceph-iscsi/tests/bundles/jammy-caracal-ec.yaml b/ceph-iscsi/tests/bundles/jammy-caracal-ec.yaml new file mode 100644 index 00000000..f887904f --- /dev/null +++ b/ceph-iscsi/tests/bundles/jammy-caracal-ec.yaml @@ -0,0 +1,101 @@ +variables: + source: &source cloud:jammy-caracal + +local_overlay_enabled: False +series: jammy +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + constraints: mem=3072M + '9': + constraints: mem=3072M + '10': + constraints: mem=3072M + '11': + '12': + '13': + '14': + '15': +applications: + ubuntu: + charm: cs:ubuntu + num_units: 3 + to: + - '7' + - '14' + - '15' + ceph-iscsi: + charm: ../../ceph-iscsi.charm + num_units: 2 + options: + gateway-metadata-pool: iscsi-foo-metadata + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + source: *source + to: + - '0' + - '1' + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *source + to: + - '0' + - '1' + - '2' + - '11' + - '12' + - '13' + channel: latest/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *source + to: + - '3' + - '4' + - '5' + channel: latest/edge + vault: + num_units: 1 + charm: ch:vault + to: + - '6' + channel: 1.8/stable + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + to: + - '8' + - '9' + - '10' + channel: 8.0/edge + vault-mysql-router: + charm: ch:mysql-router + channel: 8.0/edge + +relations: + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/bundles/jammy-caracal.yaml b/ceph-iscsi/tests/bundles/jammy-caracal.yaml new file mode 100644 index 00000000..a4dd5869 --- /dev/null +++ b/ceph-iscsi/tests/bundles/jammy-caracal.yaml @@ -0,0 +1,102 @@ +options: + source: &source cloud:jammy-caracal + +local_overlay_enabled: False +series: jammy +machines: + '0': + '1': + '2': + '3': + '4': + '5': + '6': + '7': + '8': + constraints: mem=3072M + '9': + constraints: mem=3072M + '10': + constraints: mem=3072M + '11': + '12': + '13': + '14': + '15': + '16': + '17': +applications: + ubuntu: + charm: cs:ubuntu + num_units: 3 + to: + - '7' + - '14' + - '15' + ceph-iscsi: + charm: ../../ceph-iscsi.charm + num_units: 4 + options: + gateway-metadata-pool: iscsi-foo-metadata + source: *source + to: + - '0' + - '1' + - '16' + - '17' + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: 'cinder,10G' + options: + osd-devices: '/dev/test-non-existent' + source: *source + to: + - '0' + - '1' + - '2' + - '11' + - '12' + - '13' + channel: latest/edge + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *source + to: + - '3' + - '4' + - '5' + channel: latest/edge + vault: + num_units: 1 + charm: ch:vault + to: + - '6' + channel: 1.8/stable + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + to: + - '8' + - '9' + - '10' + channel: 8.0/edge + vault-mysql-router: + charm: ch:mysql-router + channel: 8.0/edge + +relations: + - - 'ceph-mon:client' + - 'ceph-iscsi:ceph-client' + - - 'vault:certificates' + - 'ceph-iscsi:certificates' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'vault:shared-db' + - 'vault-mysql-router:shared-db' + - - 'vault-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' From 3e5f519554b6b0767248c7a0adc527e41dbfb306 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Tue, 11 Jun 2024 19:53:49 -0300 Subject: [PATCH 2634/2699] Allow the user to skip Bluestore features This patchset allows users to skip either the WAL, DB or both bluestore devices when adding an OSD. It does so by modifying the 'add-disk' action to take an additional parameter, called 'bluestore-skip' which specifies which features to avoid using. Change-Id: I744316656a2570950c42b1c3898a41a4185ffbd7 --- ceph-osd/actions.yaml | 5 ++++ ceph-osd/actions/add_disk.py | 17 ++++++++++--- ceph-osd/lib/charms_ceph/broker.py | 25 +++++++++----------- ceph-osd/lib/charms_ceph/utils.py | 21 +++++++++++----- ceph-osd/unit_tests/test_actions_add_disk.py | 2 +- 5 files changed, 46 insertions(+), 24 deletions(-) diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 7d620907..47f86ff3 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -69,6 +69,11 @@ add-disk: type: boolean description: | Must be set when 'use-crimson' is True. + bluestore-skip: + type: string + description: | + A comma-separated list of what Bluestore features to omit. This can + be the WAL and DB devices (for example - "wal,db"). required: - osd-devices blacklist-add-disk: diff --git a/ceph-osd/actions/add_disk.py b/ceph-osd/actions/add_disk.py index 15de3478..184c796e 100755 --- a/ceph-osd/actions/add_disk.py +++ b/ceph-osd/actions/add_disk.py @@ -79,8 +79,8 @@ def start_crimson_osd(osd_id, device): 'crimson-osd@{}'.format(osd_id)]) -def add_device(request, device_path, bucket=None, - osd_id=None, part_iter=None, use_crimson=False): +def add_device(request, device_path, bucket=None, osd_id=None, + part_iter=None, use_crimson=False, bluestore_skip=None): """Add a new device to be used by the OSD unit. :param request: A broker request to notify monitors of changes. @@ -99,6 +99,12 @@ def add_device(request, device_path, bucket=None, demand, to service bcache creation, or None, if no partitions need to be created. :type part_iter: Option[PartitionIter, None] + + :param use_crimson: Whether to use Crimson for the OSD (Experimental). + :type use_crimson: bool + + :param bluestore_skip: Which Bluestore features to avoid. + :type bluestore_skip: Option[str, None] """ if part_iter is not None: effective_dev = part_iter.create_bcache(device_path) @@ -111,12 +117,15 @@ def add_device(request, device_path, bucket=None, if osd_id is not None and osd_id.startswith('osd.'): osd_id = osd_id[4:] + if bluestore_skip: + bluestore_skip = bluestore_skip.split(',') + charms_ceph.utils.osdize(effective_dev, hookenv.config('osd-format'), ceph_hooks.get_journal_devices(), hookenv.config('ignore-device-errors'), hookenv.config('osd-encrypt'), hookenv.config('osd-encrypt-keymanager'), - osd_id) + osd_id, bluestore_skip) if use_crimson: start_crimson_osd(osd_id, effective_dev) @@ -233,6 +242,8 @@ def validate_partition_size(psize, devices, caches): else: osd_ids = [None] * len(devices) + bluestore_skip = hookenv.action_get('bluestore-skip') + errors = [] for dev, osd_id in zip(devices, osd_ids): try: diff --git a/ceph-osd/lib/charms_ceph/broker.py b/ceph-osd/lib/charms_ceph/broker.py index 7f453ec8..7ca96922 100644 --- a/ceph-osd/lib/charms_ceph/broker.py +++ b/ceph-osd/lib/charms_ceph/broker.py @@ -769,7 +769,7 @@ def handle_create_cephfs(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - if get_cephfs(service=service): + if cephfs_name in get_cephfs(service=service): # CephFS new has already been called log("CephFS already created") return @@ -853,15 +853,10 @@ def handle_create_cephfs_client(request, service): log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} - # Check that the provided fs_name exists - if fs_name not in get_cephfs(service=service): - msg = ("Ceph filesystem {} does not exist." - + "Cannot authorize client").format( - fs_name) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - # Check that the provided client does NOT exist. + # Skip creation if the request has already been called + # This makes it a bit more compatible with older Ceph versions + # that throw when trying to authorize a user with the same + # capabilites that it currently has. try: cmd = ["ceph", "--id", service, "auth", "ls", "-f", "json"] auth_ls = json.loads(check_output(cmd, encoding="utf-8")) @@ -873,12 +868,14 @@ def handle_create_cephfs_client(request, service): return {'exit-code': 1, 'stderr': str(err)} client = "client.{}".format(client_id) - if client in (elem["entity"] for elem in auth_ls["auth_dump"]): - msg = "Client {} already exists".format(client) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} + for elem in auth_ls["auth_dump"]: + if client == elem["entity"]: + log("Client {} has already been created".format(client)) + return {'exit-code': 0, 'key': elem["key"]} # Try to authorize the client + # `ceph fs authorize` already returns the correct error + # message if the filesystem doesn't exist. try: cmd = [ "ceph", diff --git a/ceph-osd/lib/charms_ceph/utils.py b/ceph-osd/lib/charms_ceph/utils.py index 57cb1d7b..85e6249b 100644 --- a/ceph-osd/lib/charms_ceph/utils.py +++ b/ceph-osd/lib/charms_ceph/utils.py @@ -1541,11 +1541,11 @@ def get_devices(name): def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, - key_manager=CEPH_KEY_MANAGER, osd_id=None): + key_manager=CEPH_KEY_MANAGER, osd_id=None, bluestore_skip=None): if dev.startswith('/dev'): osdize_dev(dev, osd_format, osd_journal, ignore_errors, encrypt, - key_manager, osd_id) + key_manager, osd_id, bluestore_skip) else: if cmp_pkgrevno('ceph', '14.0.0') >= 0: log("Directory backed OSDs can not be created on Nautilus", @@ -1556,7 +1556,7 @@ def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, key_manager=CEPH_KEY_MANAGER, - osd_id=None): + osd_id=None, bluestore_skip=None): """ Prepare a block device for use as a Ceph OSD @@ -1570,6 +1570,8 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, processing :param: encrypt: Encrypt block devices using 'key_manager' :param: key_manager: Key management approach for encryption keys + :param: osd_id: The ID for the newly created OSD + :param: bluestore_skip: Bluestore parameters to skip ('wal' and/or 'db') :raises subprocess.CalledProcessError: in the event that any supporting subprocess operation failed :raises ValueError: if an invalid key_manager is provided @@ -1620,7 +1622,8 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, osd_journal, encrypt, key_manager, - osd_id) + osd_id, + bluestore_skip) else: cmd = _ceph_disk(dev, osd_format, @@ -1694,7 +1697,7 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False): def _ceph_volume(dev, osd_journal, encrypt=False, key_manager=CEPH_KEY_MANAGER, - osd_id=None): + osd_id=None, bluestore_skip=None): """ Prepare and activate a device for usage as a Ceph OSD using ceph-volume. @@ -1706,6 +1709,7 @@ def _ceph_volume(dev, osd_journal, encrypt=False, key_manager=CEPH_KEY_MANAGER, :param: encrypt: Use block device encryption :param: key_manager: dm-crypt Key Manager to use :param: osd_id: The OSD-id to recycle, or None to create a new one + :param: bluestore_skip: Bluestore parameters to skip ('wal' and/or 'db') :raises subprocess.CalledProcessError: in the event that any supporting LVM operation failed. :returns: list. 'ceph-volume' command and required parameters for @@ -1732,7 +1736,11 @@ def _ceph_volume(dev, osd_journal, encrypt=False, key_manager=CEPH_KEY_MANAGER, encrypt=encrypt, key_manager=key_manager)) - for extra_volume in ('wal', 'db'): + extras = ('wal', 'db') + if bluestore_skip: + extras = tuple(set(extras) - set(bluestore_skip)) + + for extra_volume in extras: devices = get_devices('bluestore-{}'.format(extra_volume)) if devices: cmd.append('--block.{}'.format(extra_volume)) @@ -2075,6 +2083,7 @@ def get_cephfs(service): for part in parts: if "name" in part: filesystems.append(part.split(' ')[1]) + return filesystems except subprocess.CalledProcessError: return [] diff --git a/ceph-osd/unit_tests/test_actions_add_disk.py b/ceph-osd/unit_tests/test_actions_add_disk.py index 6e04308f..781ba427 100644 --- a/ceph-osd/unit_tests/test_actions_add_disk.py +++ b/ceph-osd/unit_tests/test_actions_add_disk.py @@ -53,7 +53,7 @@ def fake_config(key): self.hookenv.relation_set.assert_has_calls([call]) mock_osdize.assert_has_calls([mock.call('/dev/myosddev', None, '', True, True, - True, None)]) + True, None, None)]) piter = add_disk.PartitionIter(['/dev/cache'], 100, ['/dev/myosddev']) mock_create_bcache = mock.MagicMock(side_effect=lambda b: '/dev/cache') From d218614c6e7fd624f70ee67e4748068d0360c573 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 21 Jun 2024 18:14:29 +0200 Subject: [PATCH 2635/2699] Create squid-jammy branch Change-Id: I225fecdf2d9882d23101f7314e3c9fb1fe1a79f5 --- ceph-fs/.gitreview | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-fs/.gitreview b/ceph-fs/.gitreview index 245b1ad2..d8e1269c 100644 --- a/ceph-fs/.gitreview +++ b/ceph-fs/.gitreview @@ -2,3 +2,5 @@ host=review.opendev.org port=29418 project=openstack/charm-ceph-fs.git + +defaultbranch=stable/squid-jammy From b9ddebff70751b10158112a5d9a70948be780fdb Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 21 Jun 2024 19:37:02 +0200 Subject: [PATCH 2636/2699] Create squid-jammy branch Change-Id: Icf7ae0937fe05966abc3a7234595e25769a58d65 --- ceph-dashboard/.gitreview | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-dashboard/.gitreview b/ceph-dashboard/.gitreview index 4b2e1139..7568dc50 100644 --- a/ceph-dashboard/.gitreview +++ b/ceph-dashboard/.gitreview @@ -2,3 +2,5 @@ host=review.opendev.org port=29418 project=openstack/charm-ceph-dashboard.git + +defaultbranch=stable/squid-jammy From 6f26076bdcd884989413f4fbdf34bc9b7e4ad15f Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 21 Jun 2024 19:37:14 +0200 Subject: [PATCH 2637/2699] Create squid-jammy branch Change-Id: Ifb0ce27b7229c1a1e469c6c92cdeb75246cf1843 --- ceph-iscsi/.gitreview | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-iscsi/.gitreview b/ceph-iscsi/.gitreview index 577c572c..3525ef61 100644 --- a/ceph-iscsi/.gitreview +++ b/ceph-iscsi/.gitreview @@ -2,3 +2,5 @@ host=review.opendev.org port=29418 project=openstack/charm-ceph-iscsi.git + +defaultbranch=stable/squid-jammy From bb6fea1ac6ce0759c94b7f9126fd7aba2d1f1773 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 21 Jun 2024 19:37:24 +0200 Subject: [PATCH 2638/2699] Create squid-jammy branch Change-Id: I5dda7a459c946eae1cdac0c8ae76b6083383b741 --- ceph-mon/.gitreview | 3 +- ceph-mon/tests/bundles/noble-caracal.yaml | 261 ---------------------- ceph-mon/tests/tests.yaml | 1 - 3 files changed, 2 insertions(+), 263 deletions(-) delete mode 100644 ceph-mon/tests/bundles/noble-caracal.yaml diff --git a/ceph-mon/.gitreview b/ceph-mon/.gitreview index 5c2f5880..6ebc9abf 100644 --- a/ceph-mon/.gitreview +++ b/ceph-mon/.gitreview @@ -1,4 +1,5 @@ [gerrit] host=review.opendev.org port=29418 -project=openstack/charm-ceph-mon.git \ No newline at end of file +project=openstack/charm-ceph-mon.git +defaultbranch=stable/squid-jammy diff --git a/ceph-mon/tests/bundles/noble-caracal.yaml b/ceph-mon/tests/bundles/noble-caracal.yaml deleted file mode 100644 index b0b80b50..00000000 --- a/ceph-mon/tests/bundles/noble-caracal.yaml +++ /dev/null @@ -1,261 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -local_overlay_enabled: False - -series: noble - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - glance-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - placement-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '0' - - '1' - - '2' - channel: 8.0/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - to: - - '9' - channel: 3.9/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: '10G' - options: - source: *openstack-origin - osd-devices: '/dev/test-non-existent' - to: - - '3' - - '4' - - '5' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - channel: latest/edge - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - to: - - '6' - - '7' - - '8' - - ceph-fs: - charm: ch:ceph-fs - num_units: 1 - options: - source: *openstack-origin - channel: latest/edge - to: - - '17' - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - channel: latest/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - libvirt-image-backend: rbd - to: - - '11' - channel: latest/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - channel: latest/edge - - cinder: - expose: True - charm: ch:cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: *openstack-origin - to: - - '13' - channel: latest/edge - - cinder-ceph: - charm: ch:cinder-ceph - channel: latest/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - channel: latest/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: latest/edge - - prometheus2: - charm: ch:prometheus2 - num_units: 1 - to: - - '16' - -relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - nova-compute:ceph-access - - cinder-ceph:ceph-access - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'ceph-mon:prometheus' - - 'prometheus2:target' diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 08d034d3..913c74d1 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -4,7 +4,6 @@ gate_bundles: - jammy-yoga - jammy-bobcat - jammy-caracal - - noble-caracal smoke_bundles: - jammy-caracal From 3b045a78f645718122aae3e0f6c21e2643e8e265 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 21 Jun 2024 19:37:48 +0200 Subject: [PATCH 2639/2699] Create squid-jammy branch Change-Id: If300d270c26c02bbc9d3ca616f6e483fd3c03fb6 --- ceph-nfs/.gitreview | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-nfs/.gitreview b/ceph-nfs/.gitreview index 1156baba..10ffb19c 100644 --- a/ceph-nfs/.gitreview +++ b/ceph-nfs/.gitreview @@ -2,3 +2,5 @@ host=review.opendev.org port=29418 project=openstack/charm-ceph-nfs.git + +defaultbranch=stable/squid-jammy From 7685beca2086af7bed40886d134661b48450e209 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 21 Jun 2024 19:38:00 +0200 Subject: [PATCH 2640/2699] Create squid-jammy branch Change-Id: I35a71ef09eeddcd1913bb7fd02f9108fe8c54610 --- ceph-osd/.gitreview | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-osd/.gitreview b/ceph-osd/.gitreview index 8c3d120d..0e144d01 100644 --- a/ceph-osd/.gitreview +++ b/ceph-osd/.gitreview @@ -2,3 +2,5 @@ host=review.opendev.org port=29418 project=openstack/charm-ceph-osd.git + +defaultbranch=stable/squid-jammy From af85b2e39ed23ac5f49d4f178a66141ad2653661 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 21 Jun 2024 19:38:11 +0200 Subject: [PATCH 2641/2699] Create squid-jammy branch Change-Id: I324dde12f5107d902774c871ebc3e61259128aa7 --- ceph-proxy/.gitreview | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-proxy/.gitreview b/ceph-proxy/.gitreview index 134c66ec..d1e8ee12 100644 --- a/ceph-proxy/.gitreview +++ b/ceph-proxy/.gitreview @@ -2,3 +2,5 @@ host=review.opendev.org port=29418 project=openstack/charm-ceph-proxy.git + +defaultbranch=stable/squid-jammy From 8fad6c4727dc8a9a5faa58c2530fd91b51c5e132 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 21 Jun 2024 19:38:22 +0200 Subject: [PATCH 2642/2699] Create squid-jammy branch Change-Id: I2f52310d8802b5df387ef01edd1b964c36293ed9 --- ceph-radosgw/.gitreview | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-radosgw/.gitreview b/ceph-radosgw/.gitreview index 496586e6..fb258f86 100644 --- a/ceph-radosgw/.gitreview +++ b/ceph-radosgw/.gitreview @@ -2,3 +2,5 @@ host=review.opendev.org port=29418 project=openstack/charm-ceph-radosgw.git + +defaultbranch=stable/squid-jammy From 20a9f2d40650b0e89017aa05255e00bacea6c10a Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 21 Jun 2024 19:38:33 +0200 Subject: [PATCH 2643/2699] Create squid-jammy branch Change-Id: Id677722b3a99652747c187194d2eabc306edb0ad --- ceph-rbd-mirror/.gitreview | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ceph-rbd-mirror/.gitreview b/ceph-rbd-mirror/.gitreview index 4a1c91d6..66ebbc9a 100644 --- a/ceph-rbd-mirror/.gitreview +++ b/ceph-rbd-mirror/.gitreview @@ -2,3 +2,5 @@ host=review.opendev.org port=29418 project=openstack/charm-ceph-rbd-mirror.git + +defaultbranch=stable/squid-jammy From 4b7098ca96e88039fbe08b93be5ec80b64318f62 Mon Sep 17 00:00:00 2001 From: Rodrigo Barbieri Date: Mon, 10 Jun 2024 16:26:14 -0300 Subject: [PATCH 2644/2699] Defer restart of services on AppArmor change AppArmor changes cause restarts of all OSD services in all units at the same time, which can cause ceph-cluster outage. Defer actual update and set a status message prompting to run an action for each unit separately. Related-bug: #2068020 Change-Id: I32398a0525b7098503de36d72e593c14207102a1 (cherry picked from commit b4a870af2c74638f139d73a09dc61cd43865c4f5) --- ceph-osd/actions.yaml | 5 + ceph-osd/actions/service.py | 4 +- .../actions/update-apparmor-and-restart-osds | 1 + ceph-osd/hooks/ceph_hooks.py | 77 +++++++-- ceph-osd/unit_tests/test_ceph_hooks.py | 161 +++++++++++++----- 5 files changed, 192 insertions(+), 56 deletions(-) create mode 120000 ceph-osd/actions/update-apparmor-and-restart-osds diff --git a/ceph-osd/actions.yaml b/ceph-osd/actions.yaml index 47f86ff3..3fe3c29d 100644 --- a/ceph-osd/actions.yaml +++ b/ceph-osd/actions.yaml @@ -149,6 +149,11 @@ stop: - osds security-checklist: description: Validate the running configuration against the OpenStack security guides checklist +update-apparmor-and-restart-osds: + description: | + Invoke pending continuation of update of AppArmor profiles followed by restarting OSD + services. Make sure to run this action separately in each unit at different times + to avoid simultaneous restart of OSDs. get-availability-zone: description: | Obtain information about the availability zone, which will contain information about the CRUSH diff --git a/ceph-osd/actions/service.py b/ceph-osd/actions/service.py index 1768930d..b00f0cf5 100755 --- a/ceph-osd/actions/service.py +++ b/ceph-osd/actions/service.py @@ -28,7 +28,8 @@ function_fail, log, ) -from ceph_hooks import assess_status + +from ceph_hooks import assess_status, update_apparmor from utils import parse_osds_arguments, ALL START = 'start' @@ -138,6 +139,7 @@ def start(): ACTIONS = {'stop': stop, 'start': start, + 'update-apparmor-and-restart-osds': update_apparmor, } diff --git a/ceph-osd/actions/update-apparmor-and-restart-osds b/ceph-osd/actions/update-apparmor-and-restart-osds new file mode 120000 index 00000000..12afe70c --- /dev/null +++ b/ceph-osd/actions/update-apparmor-and-restart-osds @@ -0,0 +1 @@ +service.py \ No newline at end of file diff --git a/ceph-osd/hooks/ceph_hooks.py b/ceph-osd/hooks/ceph_hooks.py index bf81528c..37e58307 100755 --- a/ceph-osd/hooks/ceph_hooks.py +++ b/ceph-osd/hooks/ceph_hooks.py @@ -132,6 +132,10 @@ CRON_CEPH_CHECK_FILE = '/etc/cron.d/check-osd-services' +class AppArmorProfileNeverInstalledException(Exception): + pass + + def check_for_upgrade(): if not os.path.exists(_upgrade_keyring): @@ -211,18 +215,45 @@ def tune_network_adapters(): ceph.tune_nic(interface) +def check_aa_profile_needs_update(): + """ + Compares the hash of a new AA profile and the previously installed one, + if one exists. + """ + db = kv() + for x in glob.glob('files/apparmor/*'): + db_key = 'hash:{}'.format(x) + previous_hash = db.get(db_key) + if previous_hash is None: + raise AppArmorProfileNeverInstalledException() + new_hash = file_hash(x) + if new_hash != previous_hash: + return True + return False + + +def _set_pending_apparmor_update_status(): + # Setting to active to avoid impact of other workflows + status_set('active', + ('Pending update-apparmor-and-restart-osds action required,' + ' please refer to the action documentation.')) + + def aa_profile_changed(service_name='ceph-osd-all'): """ Reload AA profile and restart OSD processes. """ log("Loading new AppArmor profile") service_reload('apparmor') + if config('aa-profile-mode') == 'disable': + # No need to restart services if AppArmor is not enabled + return log("Restarting ceph-osd services with new AppArmor profile") if ceph.systemd(): - for osd_id in ceph.get_local_osd_ids(): - service_restart('ceph-osd@{}'.format(osd_id)) + service_restart('ceph-osd.target') else: service_restart(service_name) + assess_status() def copy_profile_into_place(): @@ -285,12 +316,8 @@ def use_vaultlocker(): return False -def install_apparmor_profile(): - """ - Install ceph apparmor profiles and configure - based on current setting of 'aa-profile-mode' - configuration option. - """ +def update_apparmor(): + """Action: Proceed to updating the profile and restarting OSDs.""" changes = copy_profile_into_place() # NOTE(jamespage): If any profiles where changed or # freshly installed then force @@ -302,6 +329,28 @@ def install_apparmor_profile(): aa_profile_changed() +def install_apparmor_profile(): + """ + Install ceph apparmor profiles and configure + based on current setting of 'aa-profile-mode' + configuration option. + """ + changes = False + try: + changes = check_aa_profile_needs_update() + except AppArmorProfileNeverInstalledException: + update_apparmor() + return + if not changes: + return + if config('aa-profile-mode') != 'disable': + log("Deferring update of AppArmor profiles to avoid " + "restarting ceph-osd services all at the same time.") + _set_pending_apparmor_update_status() + else: + update_apparmor() + + def install_udev_rules(): """ Install and reload udev rules for ceph-volume LV @@ -959,8 +1008,16 @@ def assess_status(): status_set('blocked', 'No block devices detected using current configuration') else: - status_set('active', - 'Unit is ready ({} OSD)'.format(len(running_osds))) + aa_needs_update = False + try: + aa_needs_update = check_aa_profile_needs_update() + except AppArmorProfileNeverInstalledException: + pass + if aa_needs_update and config('aa-profile-mode') != 'disable': + _set_pending_apparmor_update_status() + else: + status_set('active', + 'Unit is ready ({} OSD)'.format(len(running_osds))) else: pristine = True # Check unmounted disks that should be configured but don't check diff --git a/ceph-osd/unit_tests/test_ceph_hooks.py b/ceph-osd/unit_tests/test_ceph_hooks.py index 2b604f59..e6d37c23 100644 --- a/ceph-osd/unit_tests/test_ceph_hooks.py +++ b/ceph-osd/unit_tests/test_ceph_hooks.py @@ -481,19 +481,104 @@ def test_ipv6_only_env_bindings(self, mock_config, mock_config2): } self.assertEqual(ctxt, expected) + @patch.object(ceph_hooks, "kv") + @patch.object(ceph_hooks.glob, "glob") + @patch.object(ceph_hooks, "file_hash") + def test_check_aa_profile_needs_update_True( + self, mock_hash, mock_glob, mock_kv): + mock_glob.return_value = ['file1', 'file2', 'file3'] + mock_hash.side_effect = ['hash1', 'hash2'] + mock_kv.return_value = {'hash:file1': 'hash1', + 'hash:file2': 'hash2_old'} + result = ceph_hooks.check_aa_profile_needs_update() + self.assertTrue(result) + mock_hash.assert_has_calls([call('file1'), call('file2')]) + + @patch.object(ceph_hooks, "kv") + @patch.object(ceph_hooks.glob, "glob") + @patch.object(ceph_hooks, "file_hash") + def test_check_aa_profile_needs_update_False( + self, mock_hash, mock_glob, mock_kv): + mock_glob.return_value = ['file1', 'file2', 'file3'] + mock_hash.side_effect = ['hash1', 'hash2', 'hash3'] + mock_kv.return_value = {'hash:file1': 'hash1', + 'hash:file2': 'hash2', + 'hash:file3': 'hash3'} + result = ceph_hooks.check_aa_profile_needs_update() + self.assertFalse(result) + mock_hash.assert_has_calls( + [call('file1'), call('file2'), call('file3')]) + + @patch.object(ceph_hooks, "kv") + @patch.object(ceph_hooks.glob, "glob") + @patch.object(ceph_hooks, "file_hash") + def test_check_aa_profile_needs_update_never_installed( + self, mock_hash, mock_glob, mock_kv): + mock_glob.return_value = ['file1', 'file2', 'file3'] + mock_kv.return_value = {} + self.assertRaises(ceph_hooks.AppArmorProfileNeverInstalledException, + ceph_hooks.check_aa_profile_needs_update) + mock_hash.assert_not_called() + + @patch.object(ceph_hooks, 'check_aa_profile_needs_update') + @patch.object(ceph_hooks, 'update_apparmor') + @patch.object(ceph_hooks, '_set_pending_apparmor_update_status') + def test_install_apparmor_profile_no_change( + self, mock_set, mock_update, mock_check): + mock_check.return_value = False + ceph_hooks.install_apparmor_profile() + mock_set.assert_not_called() + mock_update.assert_not_called() + + @patch.object(ceph_hooks, 'check_aa_profile_needs_update') + @patch.object(ceph_hooks, 'update_apparmor') + @patch.object(ceph_hooks, '_set_pending_apparmor_update_status') + @patch.object(ceph_hooks, 'config') + def test_install_apparmor_profile_disable( + self, mock_config, mock_set, mock_update, mock_check): + mock_check.return_value = True + mock_config.return_value = 'disable' + ceph_hooks.install_apparmor_profile() + mock_set.assert_not_called() + mock_update.assert_called_once_with() + + @patch.object(ceph_hooks, 'check_aa_profile_needs_update') + @patch.object(ceph_hooks, 'update_apparmor') + @patch.object(ceph_hooks, '_set_pending_apparmor_update_status') + @patch.object(ceph_hooks, 'config') + def test_install_apparmor_profile_never_installed( + self, mock_config, mock_set, mock_update, mock_check): + mock_check.side_effect = ( + ceph_hooks.AppArmorProfileNeverInstalledException) + ceph_hooks.install_apparmor_profile() + mock_config.assert_not_called() + mock_set.assert_not_called() + mock_update.assert_called_once_with() + + @patch.object(ceph_hooks, 'check_aa_profile_needs_update') + @patch.object(ceph_hooks, 'update_apparmor') + @patch.object(ceph_hooks, '_set_pending_apparmor_update_status') + @patch.object(ceph_hooks, 'config') + def test_install_apparmor_profile_enforce( + self, mock_config, mock_set, mock_update, mock_check): + mock_check.return_value = True + mock_config.return_value = 'enforce' + ceph_hooks.install_apparmor_profile() + mock_set.assert_called_once_with() + mock_update.assert_not_called() + + @patch.object(ceph_hooks, 'assess_status') @patch.object(ceph_hooks, 'ceph') @patch.object(ceph_hooks, 'service_restart') @patch.object(ceph_hooks, 'service_reload') @patch.object(ceph_hooks, 'copy_profile_into_place') @patch.object(ceph_hooks, 'CephOsdAppArmorContext') @patch.object(ceph_hooks, 'config') - def test_install_apparmor_profile(self, mock_config, - mock_apparmor_context, - mock_copy_profile_into_place, - mock_service_reload, - mock_service_restart, - mock_ceph): - '''Apparmor profile reloaded when config changes (upstart)''' + def test_update_apparmor_upstart_config_changed( + self, mock_config, mock_apparmor_context, + mock_copy_profile_into_place, mock_service_reload, + mock_service_restart, mock_ceph, + mock_assess_status): m_config = MagicMock() m_config.changed.return_value = True mock_config.return_value = m_config @@ -502,81 +587,67 @@ def test_install_apparmor_profile(self, mock_config, mock_ceph.systemd.return_value = False mock_copy_profile_into_place.return_value = False - ceph_hooks.install_apparmor_profile() + ceph_hooks.update_apparmor() m_aa_context.setup_aa_profile.assert_called() mock_copy_profile_into_place.assert_called() mock_service_restart.assert_called_with('ceph-osd-all') m_config.changed.assert_called_with('aa-profile-mode') mock_service_reload.assert_called_with('apparmor') + mock_assess_status.assert_called_once_with() + @patch.object(ceph_hooks, 'assess_status') @patch.object(ceph_hooks, 'ceph') @patch.object(ceph_hooks, 'service_restart') @patch.object(ceph_hooks, 'service_reload') @patch.object(ceph_hooks, 'copy_profile_into_place') @patch.object(ceph_hooks, 'CephOsdAppArmorContext') @patch.object(ceph_hooks, 'config') - def test_install_apparmor_profile_systemd(self, mock_config, - mock_apparmor_context, - mock_copy_profile_into_place, - mock_service_reload, - mock_service_restart, - mock_ceph): - '''Apparmor profile reloaded when config changes (systemd)''' - m_config = MagicMock() - m_config.changed.return_value = True - mock_config.return_value = m_config + def test_update_apparmor_systemd_profile_changed( + self, mock_config, mock_apparmor_context, + mock_copy_profile_into_place, mock_service_reload, + mock_service_restart, mock_ceph, + mock_assess_status): m_aa_context = MagicMock() mock_apparmor_context.return_value = m_aa_context mock_ceph.systemd.return_value = True - mock_ceph.get_local_osd_ids.return_value = [0, 1, 2] - mock_copy_profile_into_place.return_value = False + mock_copy_profile_into_place.return_value = True - ceph_hooks.install_apparmor_profile() + ceph_hooks.update_apparmor() m_aa_context.setup_aa_profile.assert_called() mock_copy_profile_into_place.assert_called() - m_config.changed.assert_called_with('aa-profile-mode') + mock_config.changed.assert_not_called() mock_service_reload.assert_called_with('apparmor') - mock_service_restart.assert_has_calls([ - call('ceph-osd@0'), - call('ceph-osd@1'), - call('ceph-osd@2'), - ]) + mock_service_restart.assert_called_once_with('ceph-osd.target') + mock_assess_status.assert_called_once_with() + @patch.object(ceph_hooks, 'assess_status') @patch.object(ceph_hooks, 'ceph') @patch.object(ceph_hooks, 'service_restart') @patch.object(ceph_hooks, 'service_reload') @patch.object(ceph_hooks, 'copy_profile_into_place') @patch.object(ceph_hooks, 'CephOsdAppArmorContext') @patch.object(ceph_hooks, 'config') - def test_install_apparmor_profile_new_install(self, mock_config, - mock_apparmor_context, - mock_copy_profile_into_place, - mock_service_reload, - mock_service_restart, - mock_ceph): - '''Apparmor profile always reloaded on fresh install''' - m_config = MagicMock() - m_config.changed.return_value = True - mock_config.return_value = m_config + def test_update_apparmor_disable( + self, mock_config, mock_apparmor_context, + mock_copy_profile_into_place, + mock_service_reload, mock_service_restart, + mock_ceph, mock_assess_status): + mock_config.return_value = 'disable' m_aa_context = MagicMock() mock_apparmor_context.return_value = m_aa_context mock_ceph.systemd.return_value = True - mock_ceph.get_local_osd_ids.return_value = [0, 1, 2] mock_copy_profile_into_place.return_value = True - ceph_hooks.install_apparmor_profile() + ceph_hooks.update_apparmor() m_aa_context.setup_aa_profile.assert_called() mock_copy_profile_into_place.assert_called() - m_config.changed.assert_not_called() + mock_config.changed.assert_not_called() mock_service_reload.assert_called_with('apparmor') - mock_service_restart.assert_has_calls([ - call('ceph-osd@0'), - call('ceph-osd@1'), - call('ceph-osd@2'), - ]) + mock_service_restart.assert_not_called() + mock_assess_status.assert_not_called() @patch.object(ceph_hooks, 'is_block_device') @patch.object(ceph_hooks, 'storage_list') From 8092b516ea53915e246f4be8ccc45bb0aa28d76c Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Mon, 29 Jul 2024 22:36:14 +0900 Subject: [PATCH 2645/2699] Don't make any changes during update-status hooks Previously the config_changed function was invoked during the update-status hooks. And it made unnecessary changes to the system. Guard reactive functions properly. > INFO unit.ceph-fs/0.juju-log Invoking reactive handler: > reactive/ceph_fs.py:42:config_changed Also, pin `path` to workaround the charm-tools issue temporarily. Closes-Bug: #2074349 Related-Bug: #2071780 Change-Id: If6cd061fef4c3625d6d498942949e31f243622df (cherry picked from commit 032949448fc6d1a472ff5d3040b0862b893c85d8) --- ceph-fs/requirements.txt | 3 +++ ceph-fs/src/reactive/ceph_fs.py | 5 +++-- ceph-fs/unit_tests/test_reactive_ceph_fs.py | 8 ++++++-- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/ceph-fs/requirements.txt b/ceph-fs/requirements.txt index b3dc23f7..0c07cdd1 100644 --- a/ceph-fs/requirements.txt +++ b/ceph-fs/requirements.txt @@ -18,3 +18,6 @@ cryptography<3.4 git+https://github.com/juju/charm-tools.git simplejson + +# https://github.com/juju/charm-tools/issues/674 +path<17.0.0 diff --git a/ceph-fs/src/reactive/ceph_fs.py b/ceph-fs/src/reactive/ceph_fs.py index 2059a2d2..e58b3e45 100644 --- a/ceph-fs/src/reactive/ceph_fs.py +++ b/ceph-fs/src/reactive/ceph_fs.py @@ -41,7 +41,7 @@ ) -@reactive.when_none('charm.paused', 'run-default-update-status') +@reactive.when_none('charm.paused', 'is-update-status-hook') @reactive.when('ceph-mds.pools.available') def config_changed(): ceph_mds = reactive.endpoint_from_flag('ceph-mds.pools.available') @@ -77,6 +77,7 @@ def config_changed(): str(exc)) +@reactive.when_none('charm.paused', 'is-update-status-hook') @reactive.when('ceph-mds.connected') def storage_ceph_connected(ceph): ceph_mds = reactive.endpoint_from_flag('ceph-mds.connected') @@ -219,7 +220,7 @@ def storage_ceph_connected(ceph): ceph_mds.send_request_if_needed(rq) -@reactive.when_none('charm.paused', 'run-default-update-status') +@reactive.when_none('charm.paused', 'is-update-status-hook') @reactive.when('cephfs.configured', 'ceph-mds.pools.available', 'cephfs-share.available') def cephfs_share_available(): diff --git a/ceph-fs/unit_tests/test_reactive_ceph_fs.py b/ceph-fs/unit_tests/test_reactive_ceph_fs.py index 4a8fce14..e165d8f8 100644 --- a/ceph-fs/unit_tests/test_reactive_ceph_fs.py +++ b/ceph-fs/unit_tests/test_reactive_ceph_fs.py @@ -47,11 +47,15 @@ def test_hooks(self): 'when_none': { 'config_changed': ( 'charm.paused', - 'run-default-update-status', + 'is-update-status-hook', + ), + 'storage_ceph_connected': ( + 'charm.paused', + 'is-update-status-hook', ), 'cephfs_share_available': ( 'charm.paused', - 'run-default-update-status', + 'is-update-status-hook', ), }, } From d8a3751c988819e968abcc6ccf4d17b2bc856aa6 Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Wed, 14 Aug 2024 13:32:21 +0900 Subject: [PATCH 2646/2699] Defer cos-prometheus for bootstrap When the cluster is not yet bootstrapped, we need to defer the event of enabling the prometheus module. This is the same logic as I3d274c09522f9d7ef56bc66f68d8488150c125d8. Closes-bug: #2074337 Related-bug: #2042891 Change-Id: Id9fd3c8bad504bfe7610de856798114f2b8c0fd3 (cherry picked from commit 8f59007236bae09cfaf1df38c650c4144ca84ee7) --- ceph-mon/src/ceph_metrics.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/ceph-mon/src/ceph_metrics.py b/ceph-mon/src/ceph_metrics.py index e720c846..59298bdc 100644 --- a/ceph-mon/src/ceph_metrics.py +++ b/ceph-mon/src/ceph_metrics.py @@ -169,10 +169,17 @@ def __init__(self, charm): def _on_refresh(self, event): """Enable prometheus on relation change""" - if self._charm.unit.is_leader() and ceph_utils.is_bootstrapped(): - logger.debug("refreshing cos_agent relation") - mgr_config_set_rbd_stats_pools() - ceph_utils.mgr_enable_module("prometheus") + if not self._charm.unit.is_leader(): + return + + if not ceph_utils.is_bootstrapped(): + logger.debug("not bootstrapped, defer _on_refresh: %s", event) + event.defer() + return + + logger.debug("refreshing cos_agent relation") + mgr_config_set_rbd_stats_pools() + ceph_utils.mgr_enable_module("prometheus") super()._on_refresh(event) def _on_relation_departed(self, event): From a780e3f9ee8d7339045c952e86de6925455d0726 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Wed, 16 Oct 2024 12:23:19 +0200 Subject: [PATCH 2647/2699] Fix: add scrape configs on non-leaders as well Also remove deprecated network_get patch in unit testing Closes-bug: #2084663 Change-Id: If1b0595446d78fe451ce331d968b5d653f027291 Signed-off-by: Peter Sabaini --- ceph-mon/src/ceph_metrics.py | 9 +++------ ceph-mon/unit_tests/test_ceph_metrics.py | 3 +-- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/ceph-mon/src/ceph_metrics.py b/ceph-mon/src/ceph_metrics.py index 59298bdc..363bdc81 100644 --- a/ceph-mon/src/ceph_metrics.py +++ b/ceph-mon/src/ceph_metrics.py @@ -169,17 +169,14 @@ def __init__(self, charm): def _on_refresh(self, event): """Enable prometheus on relation change""" - if not self._charm.unit.is_leader(): - return - if not ceph_utils.is_bootstrapped(): logger.debug("not bootstrapped, defer _on_refresh: %s", event) event.defer() return - logger.debug("refreshing cos_agent relation") - mgr_config_set_rbd_stats_pools() - ceph_utils.mgr_enable_module("prometheus") + if self._charm.unit.is_leader(): + mgr_config_set_rbd_stats_pools() + ceph_utils.mgr_enable_module("prometheus") super()._on_refresh(event) def _on_relation_departed(self, event): diff --git a/ceph-mon/unit_tests/test_ceph_metrics.py b/ceph-mon/unit_tests/test_ceph_metrics.py index 6e11c3b2..d684c322 100644 --- a/ceph-mon/unit_tests/test_ceph_metrics.py +++ b/ceph-mon/unit_tests/test_ceph_metrics.py @@ -14,7 +14,6 @@ import ceph_metrics # noqa: avoid circ. import import charm -import helpers class CephMetricsTestBase(unittest.TestCase): @@ -41,7 +40,6 @@ def tearDownClass(cls): cls.tempdir.cleanup() -@helpers.patch_network_get() class TestCephMetrics(CephMetricsTestBase): def setUp(self): super().setUp() @@ -50,6 +48,7 @@ def setUp(self): self.harness.begin() self.harness.set_leader(True) self.harness.charm.metrics_endpoint._alert_rules_path = self.rules_dir + self.harness.add_network('10.0.0.10') def test_init(self): self.assertEqual( From 4a3c0a3dd7b86a3c45c92a0e218bc8d91a1032fd Mon Sep 17 00:00:00 2001 From: Ponnuvel Palaniyappan Date: Thu, 29 Aug 2024 12:58:12 +0100 Subject: [PATCH 2648/2699] Fix a bug cephfs subvolume size calculation The charm is documented to take the 'size' in gigabytes. But when passing it down to 'ceph fs subvolume', it's incorrectly calculating the bytes. Closes-Bug: #2078019 Change-Id: I94ebe1bf506ef7741dbf9d2975a7ba82405a41ff Signed-off-by: Ponnuvel Palaniyappan (cherry picked from commit 28a1bb3f3c184991da255e0780e784f14b2f3b3f) --- ceph-nfs/src/ganesha.py | 4 +-- ceph-nfs/unit_tests/test_ganesha.py | 40 +++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/ceph-nfs/src/ganesha.py b/ceph-nfs/src/ganesha.py index 5c54c11a..0cd71d03 100644 --- a/ceph-nfs/src/ganesha.py +++ b/ceph-nfs/src/ganesha.py @@ -126,7 +126,7 @@ def create_share(self, name: str = None, size: int = None, if existing_shares: return existing_shares[0].path if size is not None: - size_in_bytes = size * 1024 * 1024 + size_in_bytes = size * 1024 * 1024 * 1024 if access_ips is None: access_ips = ['0.0.0.0'] # Ganesha deals with networks just fine, except when the network is @@ -188,7 +188,7 @@ def list_shares(self) -> List[Export]: return exports def resize_share(self, name: str, size: int): - size_in_bytes = size * 1024 * 1024 + size_in_bytes = size * 1024 * 1024 * 1024 self._ceph_subvolume_command('resize', 'ceph-fs', name, str(size_in_bytes), '--no_shrink') diff --git a/ceph-nfs/unit_tests/test_ganesha.py b/ceph-nfs/unit_tests/test_ganesha.py index ba2dc19c..be8d0fee 100644 --- a/ceph-nfs/unit_tests/test_ganesha.py +++ b/ceph-nfs/unit_tests/test_ganesha.py @@ -78,3 +78,43 @@ def test_remove_client(self): [ {'Access_Type': 'rw', 'Clients': '10.0.0.0/8, 192.168.0.0/16'}, ]) + + +class TestGaneshaNFS(unittest.TestCase): + + @unittest.mock.patch.object(ganesha.GaneshaNFS, '_ceph_subvolume_command') + @unittest.mock.patch.object(ganesha.GaneshaNFS, '_ganesha_add_export') + @unittest.mock.patch.object(ganesha.GaneshaNFS, '_get_next_export_id') + @unittest.mock.patch.object(ganesha.GaneshaNFS, 'list_shares') + @unittest.mock.patch.object(ganesha.GaneshaNFS, '_ceph_auth_key') + @unittest.mock.patch.object(ganesha.GaneshaNFS, '_rados_get') + @unittest.mock.patch.object(ganesha.GaneshaNFS, '_rados_put') + @unittest.mock.patch.object(ganesha.Export, 'to_export') + def test_create_share(self, mock_export, + mock_rados_put, + mock_rados_get, + mock_auth_key, + mock_list_shares, + mock_export_id, + mock_add_export, + mock_subvolume_command): + mock_subvolume_command.return_value = b'mock-volume' + mock_list_shares.return_value = [] + mock_export_id.return_value = 1 + mock_auth_key.return_value = b'mock-auth-key' + + inst = ganesha.GaneshaNFS('ceph-client', 'mypool') + inst.create_share('test-create-share', size=3, access_ips=None) + + mock_subvolume_command.assert_any_call('create', 'ceph-fs', + 'test-create-share', + str(3 * 1024 * 1024 * 1024)) + + @unittest.mock.patch.object(ganesha.GaneshaNFS, '_ceph_subvolume_command') + def test_resize_share(self, mock_subvolume_command): + inst = ganesha.GaneshaNFS('ceph-client', 'mypool') + inst.resize_share('test-resize-share', 5) + mock_subvolume_command.assert_any_call('resize', 'ceph-fs', + 'test-resize-share', + str(5 * 1024 * 1024 * 1024), + '--no_shrink') From 51408b82c0553e7423c6eef1d1deb029ea98d61e Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 15 Nov 2024 19:16:48 +0100 Subject: [PATCH 2649/2699] Keystone auth: try external auth first Closes-bug: #2083831 Signed-off-by: Peter Sabaini Change-Id: I948b0ba6601463569183743ce6157c913532f793 --- ceph-radosgw/templates/ceph.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ceph-radosgw/templates/ceph.conf b/ceph-radosgw/templates/ceph.conf index 03fa83b2..738bbb14 100644 --- a/ceph-radosgw/templates/ceph.conf +++ b/ceph-radosgw/templates/ceph.conf @@ -75,7 +75,7 @@ rgw keystone token cache size = {{ cache_size }} rgw keystone revocation interval = 0 {% endif -%} rgw s3 auth use keystone = true -rgw s3 auth order = local, external +rgw s3 auth order = external, local {% if namespace_tenants %} rgw swift account in url = true rgw keystone implicit tenants = true From a69fd62848544c4e37a62203f76b3255dc14672e Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 20 Dec 2024 17:54:28 +0100 Subject: [PATCH 2650/2699] SSL handling: check cert/key before attempting to set Also update some dependencies and fix tox config Change-Id: Ief48e2f67130f45b4aa83840636b11313a44bb96 Signed-off-by: Peter Sabaini --- ceph-dashboard/src/ceph_dashboard_commands.py | 71 ++++++++- ceph-dashboard/src/charm.py | 10 ++ ceph-dashboard/test-requirements.txt | 2 +- ceph-dashboard/tox.ini | 13 +- .../test_ceph_dashboard_commands.py | 136 ++++++++++++++++++ 5 files changed, 226 insertions(+), 6 deletions(-) create mode 100644 ceph-dashboard/unit_tests/test_ceph_dashboard_commands.py diff --git a/ceph-dashboard/src/ceph_dashboard_commands.py b/ceph-dashboard/src/ceph_dashboard_commands.py index d047e899..6dfbf494 100644 --- a/ceph-dashboard/src/ceph_dashboard_commands.py +++ b/ceph-dashboard/src/ceph_dashboard_commands.py @@ -5,8 +5,10 @@ # Learn more at: https://juju.is/docs/sdk import json +import os import socket -from typing import List +import tempfile +from typing import List, Tuple from functools import partial import subprocess @@ -150,3 +152,70 @@ def check_ceph_dashboard_ssl_configured( return False return True + + +def validate_ssl_keypair(cert: bytes, key: bytes) -> Tuple[bool, str]: + """Validates if a private key matches a certificate + + Args: + cert, key (str): SSL material + + Returns: + Tuple[bool, str]: bool for validaity and err message + """ + try: + with tempfile.NamedTemporaryFile(mode="wb", delete=False) as cert_temp: + cert_temp.write(cert) + cert_path = cert_temp.name + + with tempfile.NamedTemporaryFile(mode="wb", delete=False) as key_temp: + key_temp.write(key) + key_path = key_temp.name + except IOError as e: + return False, f"Failed to create temporary files: {str(e)}" + + try: + # check if pubkeys from cert and key match + try: + cert_pubkey_cmd = subprocess.run( + ["openssl", "x509", "-in", cert_path, "-noout", "-pubkey"], + capture_output=True, + text=True, + check=True, + ) + cert_pubkey = cert_pubkey_cmd.stdout.strip() + except subprocess.CalledProcessError as e: + return ( + False, + f"Failed to extract pubkey from cert: {e.stderr.strip()}", + ) + + try: + key_pubkey_cmd = subprocess.run( + ["openssl", "rsa", "-in", key_path, "-pubout"], + capture_output=True, + text=True, + check=True, + ) + key_pubkey = key_pubkey_cmd.stdout.strip() + except subprocess.CalledProcessError as e: + return ( + False, + f"Failed to extract pubkey from priv key: {e.stderr.strip()}", + ) + + if cert_pubkey != key_pubkey: + return False, "Certificate and private key do not match" + + return ( + True, + "Certificate and private key match and certificate is valid", + ) + + finally: + # Best effort clean up + try: + os.unlink(cert_path) + os.unlink(key_path) + except Exception: + pass diff --git a/ceph-dashboard/src/charm.py b/ceph-dashboard/src/charm.py index fe8eb69f..9313f5c9 100755 --- a/ceph-dashboard/src/charm.py +++ b/ceph-dashboard/src/charm.py @@ -569,6 +569,10 @@ def _certificates_relation_departed(self, event) -> None: def _configure_tls(self, key, cert, ca_cert, ca_cert_path) -> None: """Configure TLS using provided credentials""" + is_valid, msg = cmds.validate_ssl_keypair(cert, key) + if not is_valid: + logging.error("Invalid SSL key/cert: %s", msg) + return self.TLS_KEY_PATH.write_bytes(key) self.TLS_CERT_PATH.write_bytes(cert) if ca_cert: @@ -697,6 +701,12 @@ def _status_check_conflicting_ssl_sources(self): "config is ignored. Remove conflicting source to proceed." ) + # Check for ssl material validity. + is_valid, msg = cmds.validate_ssl_keypair(cert, key) + if not is_valid: + return BlockedStatus( + "Invalid SSL key/cert: {}".format(msg) + ) return BlockedStatus("Unknown SSL source.") def _configure_tls_from_charm_config(self) -> None: diff --git a/ceph-dashboard/test-requirements.txt b/ceph-dashboard/test-requirements.txt index 90c36911..63b72495 100644 --- a/ceph-dashboard/test-requirements.txt +++ b/ceph-dashboard/test-requirements.txt @@ -3,7 +3,7 @@ # requirements management in charms via bot-control. Thank you. charm-tools>=2.4.4 coverage>=3.6 -flake8>=2.2.4,<=2.4.1 +flake8 pyflakes==2.1.1 stestr>=2.2.0 requests>=2.18.4 diff --git a/ceph-dashboard/tox.ini b/ceph-dashboard/tox.ini index aa5eec86..5483caa7 100644 --- a/ceph-dashboard/tox.ini +++ b/ceph-dashboard/tox.ini @@ -18,9 +18,9 @@ skip_missing_interpreters = False # * It is necessary to declare setuptools as a dependency otherwise tox will # fail very early at not being able to load it. The version pinning is in # line with `pip.sh`. -requires = pip < 20.3 - virtualenv < 20.0 - setuptools < 50.0.0 +requires = pip + virtualenv + setuptools # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci minversion = 3.2.0 @@ -40,7 +40,12 @@ allowlist_externals = rename.sh ls pwd -passenv = HOME TERM CS_* OS_* TEST_* +passenv = + HOME + TERM + CS_* + OS_* + TEST_* deps = -r{toxinidir}/test-requirements.txt [testenv:py35] diff --git a/ceph-dashboard/unit_tests/test_ceph_dashboard_commands.py b/ceph-dashboard/unit_tests/test_ceph_dashboard_commands.py new file mode 100644 index 00000000..aa595a31 --- /dev/null +++ b/ceph-dashboard/unit_tests/test_ceph_dashboard_commands.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical +# See LICENSE file for licensing details. +# +# Learn more at: https://juju.is/docs/sdk +import unittest +import subprocess +import tempfile +import os + +from ceph_dashboard_commands import validate_ssl_keypair + + +class TestSSLValidation(unittest.TestCase): + @classmethod + def setUpClass(cls): + """Generate test certificates and keys for all test cases""" + cls.valid_cert, cls.valid_key = cls._generate_cert_key_pair() + cls.another_cert, cls.another_key = cls._generate_cert_key_pair() + cls.malformed_cert = ( + b"-----BEGIN CERTIFICATE-----\nMalform\n-----END CERTIFICATE-----" + ) + cls.malformed_key = ( + b"-----BEGIN PRIVATE KEY-----\nMalform\n-----END PRIVATE KEY-----" + ) + + @staticmethod + def _generate_cert_key_pair(days=1): + """Generate a test certificate and private key pair""" + # create a key tmpfile + with tempfile.NamedTemporaryFile(mode="wb", delete=False) as key_file: + subprocess.run( + [ + "openssl", + "genpkey", + "-algorithm", + "RSA", + "-out", + key_file.name, + ], + check=True, + capture_output=True, + ) + # openssl config file + with tempfile.NamedTemporaryFile( + mode="w", delete=False + ) as config_file: + config_content = """ + [req] + default_bits = 2048 + prompt = no + default_md = sha256 + distinguished_name = dn + x509_extensions = v3_req + + [dn] + CN = test.local + + [v3_req] + basicConstraints = CA:FALSE + keyUsage = nonRepudiation, digitalSignature, keyEncipherment + subjectAltName = @alt_names + + [alt_names] + DNS.1 = test.local + """ + config_file.write(config_content) + config_file.flush() + + # create certificate with config file + with tempfile.NamedTemporaryFile( + delete=False, mode="wb" + ) as cert_file: + subprocess.run( + [ + "openssl", + "req", + "-new", + "-x509", + "-key", + key_file.name, + "-out", + cert_file.name, + "-config", + config_file.name, + ], + check=True, + capture_output=True, + ) + with open(cert_file.name, "rb") as cert_f: + cert_content = cert_f.read() + with open(key_file.name, "rb") as key_f: + key_content = key_f.read() + + os.unlink(cert_file.name) + os.unlink(config_file.name) + os.unlink(key_file.name) + + return cert_content, key_content + + def test_valid_cert_key_pair(self): + """Test validation of a valid certificate and key pair""" + is_valid, message = validate_ssl_keypair( + self.valid_cert, self.valid_key + ) + self.assertTrue(is_valid) + + def test_mismatched_pair(self): + """Test validation with mismatched certificate and key""" + is_valid, message = validate_ssl_keypair( + self.valid_cert, self.another_key + ) + self.assertFalse(is_valid) + + def test_malformed_cert(self): + """Test validation with malformed certificate""" + is_valid, message = validate_ssl_keypair( + self.malformed_cert, self.valid_key + ) + self.assertFalse(is_valid) + + def test_malformed_key(self): + """Test validation with malformed key""" + is_valid, message = validate_ssl_keypair( + self.valid_cert, self.malformed_key + ) + self.assertFalse(is_valid) + + def test_empty_inputs(self): + """Test validation with empty inputs""" + is_valid, message = validate_ssl_keypair(b"", b"") + self.assertFalse(is_valid) + + +if __name__ == "__main__": + unittest.main() From bcd5d61be27a401c9df04fc482b18eb766e211d1 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 23 Jan 2025 12:34:26 +0100 Subject: [PATCH 2651/2699] Tests: pin boto3 For https://github.com/boto/boto3/issues/4398 Also lint updates Signed-off-by: Peter Sabaini Change-Id: Ieb248434b482f513060ca8b453e2068d51fde9cf Cherry picked from 525245830f9bfef332cf3ea2e505ead98cfe6240 --- ceph-radosgw/hooks/utils.py | 4 ++-- ceph-radosgw/test-requirements.txt | 3 +++ ceph-radosgw/tox.ini | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/ceph-radosgw/hooks/utils.py b/ceph-radosgw/hooks/utils.py index 8397ac85..b861162c 100644 --- a/ceph-radosgw/hooks/utils.py +++ b/ceph-radosgw/hooks/utils.py @@ -256,10 +256,10 @@ def check_optional_config_and_relations(configs): "resolve." if (len(zonegroups) > 1 and config('zonegroup') not in zonegroups): - return('blocked', status_msg) + return ('blocked', status_msg) if len(zones) > 1 and config('zone') not in zones: - return('blocked', status_msg) + return ('blocked', status_msg) if not all(master_configured): return ('blocked', "Failure in Multisite migration, " diff --git a/ceph-radosgw/test-requirements.txt b/ceph-radosgw/test-requirements.txt index 59f3cbdb..c2b3d818 100644 --- a/ceph-radosgw/test-requirements.txt +++ b/ceph-radosgw/test-requirements.txt @@ -30,3 +30,6 @@ git+https://opendev.org/openstack/tempest.git#egg=tempest croniter # needed for charm-rabbitmq-server unit tests psutil + +# https://github.com/boto/boto3/issues/4392 +boto3<1.36.0 diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 31118e12..b8a4b629 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -67,7 +67,7 @@ deps = basepython = python3 deps = -c {env:TEST_CONSTRAINTS_FILE:https://raw.githubusercontent.com/openstack-charmers/zaza-openstack-tests/master/constraints/constraints-2024.1.txt} - flake8==3.9.2 + flake8 git+https://github.com/juju/charm-tools.git commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof From f398d72c5b4bcd31ba3cb97992e33605edfaff87 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Tue, 11 Feb 2025 15:14:05 +0100 Subject: [PATCH 2652/2699] Migration: stable/squid-jammy updates Updates and fixes for the squid-jammy branch migration Signed-off-by: Peter Sabaini --- ceph-dashboard/test-requirements.txt | 4 - ceph-dashboard/tests/bundles/focal-yoga.yaml | 125 - ceph-dashboard/tests/bundles/focal.yaml | 114 - .../tests/bundles/jammy-antelope.yaml | 119 - .../tests/bundles/jammy-bobcat.yaml | 119 - .../tests/bundles/jammy-caracal.yaml | 82 +- ceph-dashboard/tests/target.py | 475 ++++ ceph-dashboard/tests/tests.yaml | 17 +- ceph-dashboard/tox.ini | 6 + ceph-fs/src/tests/target.py | 274 +++ ceph-fs/src/tests/tests.yaml | 23 +- ceph-fs/tox.ini | 70 +- ceph-iscsi/test-requirements.txt | 6 +- ceph-iscsi/tests/target.py | 321 +++ ceph-iscsi/tests/tests.yaml | 16 +- ceph-iscsi/tox.ini | 3 +- ceph-mon/src/ceph_client.py | 6 +- ceph-mon/tests/target.py | 2091 +++++++++++++++++ ceph-mon/tests/tests.yaml | 45 +- ceph-mon/tox.ini | 11 +- ceph-mon/unit_tests/helpers.py | 4 +- ceph-nfs/tests/nfs_ganesha.py | 109 +- ceph-nfs/tests/tests.yaml | 8 +- ceph-nfs/tox.ini | 83 +- ceph-osd/tests/target.py | 922 ++++++++ ceph-osd/tests/tests.yaml | 32 +- ceph-osd/tox.ini | 3 +- ceph-proxy/tests/target.py | 268 +++ ceph-proxy/tests/tests.yaml | 53 +- ceph-proxy/tox.ini | 40 +- ceph-radosgw/tests/target.py | 1084 +++++++++ ceph-radosgw/tests/tests.yaml | 49 +- ceph-radosgw/tox.ini | 1 + ceph-rbd-mirror/src/tests/target.py | 859 +++++++ ceph-rbd-mirror/src/tests/tests.yaml | 33 +- ceph-rbd-mirror/tox.ini | 33 +- 36 files changed, 6570 insertions(+), 938 deletions(-) delete mode 100644 ceph-dashboard/tests/bundles/focal-yoga.yaml delete mode 100644 ceph-dashboard/tests/bundles/focal.yaml delete mode 100644 ceph-dashboard/tests/bundles/jammy-antelope.yaml delete mode 100644 ceph-dashboard/tests/bundles/jammy-bobcat.yaml create mode 100644 ceph-dashboard/tests/target.py create mode 100644 ceph-fs/src/tests/target.py create mode 100644 ceph-iscsi/tests/target.py create mode 100644 ceph-mon/tests/target.py create mode 100644 ceph-osd/tests/target.py create mode 100644 ceph-proxy/tests/target.py create mode 100644 ceph-radosgw/tests/target.py create mode 100644 ceph-rbd-mirror/src/tests/target.py diff --git a/ceph-dashboard/test-requirements.txt b/ceph-dashboard/test-requirements.txt index 63b72495..740ef811 100644 --- a/ceph-dashboard/test-requirements.txt +++ b/ceph-dashboard/test-requirements.txt @@ -1,7 +1,3 @@ -# This file is managed centrally. If you find the need to modify this as a -# one-off, please don't. Intead, consult #openstack-charms and ask about -# requirements management in charms via bot-control. Thank you. -charm-tools>=2.4.4 coverage>=3.6 flake8 pyflakes==2.1.1 diff --git a/ceph-dashboard/tests/bundles/focal-yoga.yaml b/ceph-dashboard/tests/bundles/focal-yoga.yaml deleted file mode 100644 index 7d7d1bf8..00000000 --- a/ceph-dashboard/tests/bundles/focal-yoga.yaml +++ /dev/null @@ -1,125 +0,0 @@ -local_overlay_enabled: False -series: focal -variables: - openstack-origin: &openstack-origin cloud:focal-yoga -applications: - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G,2' - options: - source: *openstack-origin - osd-devices: '/dev/test-non-existent' - channel: quincy/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - channel: quincy/edge - vault: - num_units: 1 - charm: ch:vault - channel: 1.8/stable - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - constraints: mem=3072M - num_units: 3 - options: - source: *openstack-origin - channel: 8.0/edge - vault-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - ceph-dashboard: - charm: ../../ceph-dashboard.charm - options: - public-hostname: 'ceph-dashboard.zaza.local' - prometheus: - charm: ch:prometheus2 - num_units: 1 - grafana: - # SSL and allow_embedding are not released into cs:grafana yet, due - # October 2021 - charm: ch:grafana - num_units: 1 - options: - anonymous: True - install_method: snap - allow_embedding: True - telegraf: - charm: telegraf - channel: stable - options: - hostname: "{host}" - prometheus-alertmanager: - charm: ch:prometheus-alertmanager - num_units: 1 - ceph-radosgw: - charm: ch:ceph-radosgw - num_units: 3 - options: - source: *openstack-origin - channel: quincy/edge - ceph-fs: - charm: ch:ceph-fs - num_units: 1 - options: - source: *openstack-origin - channel: quincy/edge - ceph-iscsi: - charm: ch:ceph-iscsi - num_units: 2 - options: - source: *openstack-origin - gateway-metadata-pool: iscsi-foo-metadata - channel: quincy/edge -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - 'ceph-dashboard:dashboard' - - 'ceph-mon:dashboard' - - - 'ceph-dashboard:certificates' - - 'vault:certificates' - - - 'ceph-mon:prometheus' - - 'prometheus:target' - - - 'grafana:grafana-source' - - 'prometheus:grafana-source' - - - 'grafana:certificates' - - 'vault:certificates' - - - 'ceph-osd:juju-info' - - 'telegraf:juju-info' - - - 'ceph-mon:juju-info' - - 'telegraf:juju-info' - - - 'telegraf:prometheus-client' - - 'prometheus:target' - - - 'telegraf:dashboards' - - 'grafana:dashboards' - - - 'ceph-dashboard:grafana-dashboard' - - 'grafana:dashboards' - - - 'ceph-dashboard:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - - 'ceph-dashboard:prometheus' - - 'prometheus:website' - - - 'prometheus:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - 'ceph-radosgw:certificates' - - 'vault:certificates' - - - 'ceph-dashboard:radosgw-dashboard' - - 'ceph-radosgw:radosgw-user' - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-dashboard:iscsi-dashboard' - - 'ceph-iscsi:admin-access' diff --git a/ceph-dashboard/tests/bundles/focal.yaml b/ceph-dashboard/tests/bundles/focal.yaml deleted file mode 100644 index fedc7011..00000000 --- a/ceph-dashboard/tests/bundles/focal.yaml +++ /dev/null @@ -1,114 +0,0 @@ -local_overlay_enabled: False -series: focal -applications: - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G,2' - options: - osd-devices: '/dev/test-non-existent' - channel: latest/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - channel: latest/edge - vault: - num_units: 1 - charm: ch:vault - channel: 1.8/stable - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - constraints: mem=3072M - num_units: 3 - channel: latest/edge - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge - ceph-dashboard: - charm: ../../ceph-dashboard.charm - options: - public-hostname: 'ceph-dashboard.zaza.local' - prometheus: - charm: cs:prometheus2 - num_units: 1 - grafana: - # SSL and allow_embedding are not released into cs:grafana yet, due - # Octrober 2021 - charm: ch:grafana - num_units: 1 - options: - anonymous: True - install_method: snap - allow_embedding: True - telegraf: - charm: telegraf - channel: stable - options: - hostname: "{host}" - prometheus-alertmanager: - charm: cs:prometheus-alertmanager - num_units: 1 - ceph-radosgw: - charm: ch:ceph-radosgw - num_units: 3 - channel: latest/edge - ceph-fs: - charm: ch:ceph-fs - num_units: 1 - channel: latest/edge - ceph-iscsi: - charm: ch:ceph-iscsi - num_units: 2 - options: - gateway-metadata-pool: iscsi-foo-metadata - channel: latest/edge -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - 'ceph-dashboard:dashboard' - - 'ceph-mon:dashboard' - - - 'ceph-dashboard:certificates' - - 'vault:certificates' - - - 'ceph-mon:prometheus' - - 'prometheus:target' - - - 'grafana:grafana-source' - - 'prometheus:grafana-source' - - - 'grafana:certificates' - - 'vault:certificates' - - - 'ceph-osd:juju-info' - - 'telegraf:juju-info' - - - 'ceph-mon:juju-info' - - 'telegraf:juju-info' - - - 'telegraf:prometheus-client' - - 'prometheus:target' - - - 'telegraf:dashboards' - - 'grafana:dashboards' - - - 'ceph-dashboard:grafana-dashboard' - - 'grafana:dashboards' - - - 'ceph-dashboard:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - - 'ceph-dashboard:prometheus' - - 'prometheus:website' - - - 'prometheus:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - 'ceph-radosgw:certificates' - - 'vault:certificates' - - - 'ceph-dashboard:radosgw-dashboard' - - 'ceph-radosgw:radosgw-user' - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-dashboard:iscsi-dashboard' - - 'ceph-iscsi:admin-access' diff --git a/ceph-dashboard/tests/bundles/jammy-antelope.yaml b/ceph-dashboard/tests/bundles/jammy-antelope.yaml deleted file mode 100644 index b2497e93..00000000 --- a/ceph-dashboard/tests/bundles/jammy-antelope.yaml +++ /dev/null @@ -1,119 +0,0 @@ -local_overlay_enabled: False -series: jammy -variables: - openstack-origin: &openstack-origin cloud:jammy-antelope -applications: - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G,2' - options: - osd-devices: '/dev/test-non-existent' - channel: quincy/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - channel: quincy/edge - vault: - num_units: 1 - charm: ch:vault - channel: 1.8/stable - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - constraints: mem=3072M - num_units: 3 - channel: latest/edge - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge - ceph-dashboard: - charm: ../../ceph-dashboard.charm - options: - public-hostname: 'ceph-dashboard.zaza.local' - prometheus: - charm: ch:prometheus2 - num_units: 1 - grafana: - # SSL and allow_embedding are not released into cs:grafana yet, due - # October 2021 - charm: ch:grafana - series: focal - channel: latest/stable - num_units: 1 - options: - anonymous: True - install_method: snap - allow_embedding: True - telegraf: - charm: telegraf - channel: stable - options: - hostname: "{host}" - prometheus-alertmanager: - charm: ch:prometheus-alertmanager - series: focal - num_units: 1 - ceph-radosgw: - charm: ch:ceph-radosgw - num_units: 3 - channel: quincy/edge - ceph-fs: - charm: ch:ceph-fs - num_units: 1 - channel: quincy/edge - ceph-iscsi: - charm: ch:ceph-iscsi - num_units: 2 - options: - gateway-metadata-pool: iscsi-foo-metadata - channel: quincy/edge -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - 'ceph-dashboard:dashboard' - - 'ceph-mon:dashboard' - - - 'ceph-dashboard:certificates' - - 'vault:certificates' - - - 'ceph-mon:prometheus' - - 'prometheus:target' - - - 'grafana:grafana-source' - - 'prometheus:grafana-source' - - - 'grafana:certificates' - - 'vault:certificates' - - - 'ceph-osd:juju-info' - - 'telegraf:juju-info' - - - 'ceph-mon:juju-info' - - 'telegraf:juju-info' - - - 'telegraf:prometheus-client' - - 'prometheus:target' - - - 'telegraf:dashboards' - - 'grafana:dashboards' - - - 'ceph-dashboard:grafana-dashboard' - - 'grafana:dashboards' - - - 'ceph-dashboard:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - - 'ceph-dashboard:prometheus' - - 'prometheus:website' - - - 'prometheus:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - 'ceph-radosgw:certificates' - - 'vault:certificates' - - - 'ceph-dashboard:radosgw-dashboard' - - 'ceph-radosgw:radosgw-user' - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-dashboard:iscsi-dashboard' - - 'ceph-iscsi:admin-access' diff --git a/ceph-dashboard/tests/bundles/jammy-bobcat.yaml b/ceph-dashboard/tests/bundles/jammy-bobcat.yaml deleted file mode 100644 index 35bb4fe2..00000000 --- a/ceph-dashboard/tests/bundles/jammy-bobcat.yaml +++ /dev/null @@ -1,119 +0,0 @@ -local_overlay_enabled: False -series: jammy -variables: - openstack-origin: &openstack-origin cloud:jammy-bobcat -applications: - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G,2' - options: - osd-devices: '/dev/test-non-existent' - channel: reef/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - channel: reef/edge - vault: - num_units: 1 - charm: ch:vault - channel: 1.8/stable - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - constraints: mem=3072M - num_units: 3 - channel: latest/edge - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge - ceph-dashboard: - charm: ../../ceph-dashboard.charm - options: - public-hostname: 'ceph-dashboard.zaza.local' - prometheus: - charm: ch:prometheus2 - num_units: 1 - grafana: - # SSL and allow_embedding are not released into cs:grafana yet, due - # October 2021 - charm: ch:grafana - num_units: 1 - series: focal - channel: latest/stable - options: - anonymous: True - install_method: snap - allow_embedding: True - telegraf: - charm: telegraf - channel: stable - options: - hostname: "{host}" - prometheus-alertmanager: - charm: ch:prometheus-alertmanager - series: focal - num_units: 1 - ceph-radosgw: - charm: ch:ceph-radosgw - num_units: 3 - channel: reef/edge - ceph-fs: - charm: ch:ceph-fs - num_units: 1 - channel: reef/edge - ceph-iscsi: - charm: ch:ceph-iscsi - num_units: 2 - options: - gateway-metadata-pool: iscsi-foo-metadata - channel: reef/edge -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - 'ceph-dashboard:dashboard' - - 'ceph-mon:dashboard' - - - 'ceph-dashboard:certificates' - - 'vault:certificates' - - - 'ceph-mon:prometheus' - - 'prometheus:target' - - - 'grafana:grafana-source' - - 'prometheus:grafana-source' - - - 'grafana:certificates' - - 'vault:certificates' - - - 'ceph-osd:juju-info' - - 'telegraf:juju-info' - - - 'ceph-mon:juju-info' - - 'telegraf:juju-info' - - - 'telegraf:prometheus-client' - - 'prometheus:target' - - - 'telegraf:dashboards' - - 'grafana:dashboards' - - - 'ceph-dashboard:grafana-dashboard' - - 'grafana:dashboards' - - - 'ceph-dashboard:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - - 'ceph-dashboard:prometheus' - - 'prometheus:website' - - - 'prometheus:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - 'ceph-radosgw:certificates' - - 'vault:certificates' - - - 'ceph-dashboard:radosgw-dashboard' - - 'ceph-radosgw:radosgw-user' - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-dashboard:iscsi-dashboard' - - 'ceph-iscsi:admin-access' diff --git a/ceph-dashboard/tests/bundles/jammy-caracal.yaml b/ceph-dashboard/tests/bundles/jammy-caracal.yaml index 1ce3e1e1..f9a9f93c 100644 --- a/ceph-dashboard/tests/bundles/jammy-caracal.yaml +++ b/ceph-dashboard/tests/bundles/jammy-caracal.yaml @@ -2,17 +2,30 @@ local_overlay_enabled: False series: jammy variables: openstack-origin: &openstack-origin cloud:jammy-caracal - source: &source distro + source: &source cloud:jammy-caracal + +machines: + '0': + constraints: cores=2 mem=6G root-disk=40G virt-type=virtual-machine + '1': + constraints: cores=2 mem=6G root-disk=40G virt-type=virtual-machine + '2': + constraints: cores=2 mem=6G root-disk=40G virt-type=virtual-machine + applications: ceph-osd: charm: ch:ceph-osd - num_units: 6 + num_units: 3 storage: - osd-devices: 'cinder,10G,2' + osd-devices: 'loop,10G' options: source: *openstack-origin osd-devices: '/dev/test-non-existent' channel: latest/edge + to: + - '0' + - '1' + - '2' ceph-mon: charm: ch:ceph-mon num_units: 3 @@ -23,7 +36,7 @@ applications: vault: num_units: 1 charm: ch:vault - channel: 1.8/stable + channel: latest/edge mysql-innodb-cluster: charm: ch:mysql-innodb-cluster constraints: mem=3072M @@ -35,46 +48,13 @@ applications: charm: ch:mysql-router channel: latest/edge ceph-dashboard: - charm: ../../ceph-dashboard.charm + charm: ch:ceph-dashboard options: public-hostname: 'ceph-dashboard.zaza.local' prometheus: charm: ch:prometheus2 num_units: 1 - grafana: - # SSL and allow_embedding are not released into cs:grafana yet, due - # Octrober 2021 - charm: ch:grafana - num_units: 1 - series: focal - channel: latest/stable - options: - anonymous: True - install_method: snap - allow_embedding: True - prometheus-alertmanager: - series: focal - charm: ch:prometheus-alertmanager - num_units: 1 - ceph-radosgw: - charm: ch:ceph-radosgw - num_units: 3 - options: - source: *openstack-origin - channel: latest/edge - ceph-fs: - charm: ch:ceph-fs - num_units: 1 - options: - source: *openstack-origin - channel: latest/edge - ceph-iscsi: - charm: ch:ceph-iscsi - num_units: 2 - options: - source: *openstack-origin - gateway-metadata-pool: iscsi-foo-metadata - channel: latest/edge + series: jammy relations: - - 'ceph-osd:mon' - 'ceph-mon:osd' @@ -88,29 +68,5 @@ relations: - 'vault:certificates' - - 'ceph-mon:prometheus' - 'prometheus:target' - - - 'grafana:grafana-source' - - 'prometheus:grafana-source' - - - 'grafana:certificates' - - 'vault:certificates' - - - 'ceph-dashboard:grafana-dashboard' - - 'grafana:dashboards' - - - 'ceph-dashboard:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - 'ceph-dashboard:prometheus' - 'prometheus:website' - - - 'prometheus:alertmanager-service' - - 'prometheus-alertmanager:alertmanager-service' - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - 'ceph-radosgw:certificates' - - 'vault:certificates' - - - 'ceph-dashboard:radosgw-dashboard' - - 'ceph-radosgw:radosgw-user' - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-dashboard:iscsi-dashboard' - - 'ceph-iscsi:admin-access' diff --git a/ceph-dashboard/tests/target.py b/ceph-dashboard/tests/target.py new file mode 100644 index 00000000..90e764ab --- /dev/null +++ b/ceph-dashboard/tests/target.py @@ -0,0 +1,475 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulating `ceph-dashboard` testing.""" + +import json +import uuid +import logging +import collections +from base64 import b64encode +import requests +import tenacity +import trustme + +import zaza +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.openstack as openstack_utils + + +X509_CERT = ''' +MIICZDCCAg6gAwIBAgICBr8wDQYJKoZIhvcNAQEEBQAwgZIxCzAJBgNVBAYTAlVTMRMwEQYDVQQI +EwpDYWxpZm9ybmlhMRQwEgYDVQQHEwtTYW50YSBDbGFyYTEeMBwGA1UEChMVU3VuIE1pY3Jvc3lz +dGVtcyBJbmMuMRowGAYDVQQLExFJZGVudGl0eSBTZXJ2aWNlczEcMBoGA1UEAxMTQ2VydGlmaWNh +dGUgTWFuYWdlcjAeFw0wNzAzMDcyMTUwMDVaFw0xMDEyMDEyMTUwMDVaMDsxFDASBgNVBAoTC2V4 +YW1wbGUuY29tMSMwIQYDVQQDExpMb2FkQmFsYW5jZXItMy5leGFtcGxlLmNvbTCBnzANBgkqhkiG +9w0BAQEFAAOBjQAwgYkCgYEAlOhN9HddLMpE3kCjkPSOFpCkDxTNuhMhcgBkYmSEF/iJcQsLX/ga +pO+W1SIpwqfsjzR5ZvEdtc/8hGumRHqcX3r6XrU0dESM6MW5AbNNJsBnwIV6xZ5QozB4wL4zREhw +zwwYejDVQ/x+8NRESI3ym17tDLEuAKyQBueubgjfic0CAwEAAaNgMF4wEQYJYIZIAYb4QgEBBAQD +AgZAMA4GA1UdDwEB/wQEAwIE8DAfBgNVHSMEGDAWgBQ7oCE35Uwn7FsjS01w5e3DA1CrrjAYBgNV +HREEETAPgQ1tYWxsYUBzdW4uY29tMA0GCSqGSIb3DQEBBAUAA0EAGhJhep7X2hqWJWQoXFcdU7eQ +''' + +X509_DATA = ''' +EwpDYWxpZm9ybmlhMRQwEgYDVQQHEwtTYW50YSBDbGFyYTEeMBwGA1UEChMVU3VuIE1pY3Jvc3lz +dGVtcyBJbmMuMRowGAYDVQQLExFJZGVudGl0eSBTZXJ2aWNlczEcMBoGA1UEAxMTQ2VydGlmaWNh +dGUgTWFuYWdlcjAeFw0wNzAzMDcyMjAxMTVaFw0xMDEyMDEyMjAxMTVaMDsxFDASBgNVBAoTC2V4 +YW1wbGUuY29tMSMwIQYDVQQDExpMb2FkQmFsYW5jZXItMy5leGFtcGxlLmNvbTCBnzANBgkqhkiG +HREEETAPgQ1tYWxsYUBzdW4uY29tMA0GCSqGSIb3DQEBBAUAA0EAEgbmnOz2Rvpj9bludb9lEeVa +OA46zRiyt4BPlbgIaFyG6P7GWSddMi/14EimQjjDbr4ZfvlEdPJmimHExZY3KQ== +''' + +SAML_IDP_METADATA = ''' + + + + + + + {cert} + + + + + + + + {data} + + + + + + urn:oasis:names:tc:SAML:2.0:nameid-format:persistent + + + urn:oasis:names:tc:SAML:2.0:nameid-format:transient + + + + +''' + + +def check_dashboard_cert(model_name=None): + """Wait for Dashboard to be ready. + + :param model_name: Name of model to query. + :type model_name: str + """ + logging.info("Check dashbaord Waiting for cacert") + openstack_utils.block_until_ca_exists( + 'ceph-dashboard', + 'CERTIFICATE', + model_name=model_name) + zaza.model.block_until_all_units_idle(model_name=model_name) + + +def set_grafana_url(model_name=None): + """Set the url for the grafana api. + + :param model_name: Name of model to query. + :type model_name: str + """ + try: + unit = zaza.model.get_units('grafana')[0] + except KeyError: + return + zaza.model.set_application_config( + 'ceph-dashboard', + { + 'grafana-api-url': "https://{}:3000".format( + zaza.model.get_unit_public_address(unit)) + }) + + +class CephDashboardTest(test_utils.BaseCharmTest): + """Class for `ceph-dashboard` tests.""" + + REMOTE_CERT_FILE = ('/usr/local/share/ca-certificates/' + 'vault_ca_cert_dashboard.crt') + + @classmethod + def setUpClass(cls): + """Run class setup for running ceph dashboard tests.""" + super().setUpClass() + cls.application_name = 'ceph-dashboard' + cls.local_ca_cert = openstack_utils.get_remote_ca_cert_file( + cls.application_name) + + @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, + min=5, max=10), + retry=tenacity.retry_if_exception_type( + requests.exceptions.ConnectionError), + reraise=True) + def _run_request_get(self, url, verify, allow_redirects): + """Run a GET request against `url` with tenacity retries. + + :param url: url to access + :type url: str + :param verify: Path to a CA_BUNDLE file or directory with certificates + of trusted CAs or False to ignore verifying the SSL + certificate. + :type verify: Union[str, bool] + :param allow_redirects: Set to True if redirect following is allowed. + :type allow_redirects: bool + :returns: Request response + :rtype: requests.models.Response + """ + return requests.get( + url, + verify=verify, + allow_redirects=allow_redirects, + timeout=120) + + @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, + min=5, max=10), + retry=tenacity.retry_if_exception_type( + requests.exceptions.ConnectionError), + reraise=True) + def _run_request_post(self, url, verify, data, headers): + """Run a POST request against `url` with tenacity retries. + + :param url: url to access + :type url: str + :param verify: Path to a CA_BUNDLE file or directory with certificates + of trusted CAs or False to ignore verifying the SSL + certificate. + :type verify: Union[str, bool] + :param data: Data to post to url + :type data: str + :param headers: Headers to set when posting + :type headers: dict + :returns: Request response + :rtype: requests.models.Response + """ + return requests.post( + url, + data=data, + headers=headers, + verify=verify, + timeout=120) + + @tenacity.retry(wait=tenacity.wait_fixed(2), reraise=True, + stop=tenacity.stop_after_attempt(90)) + def get_master_dashboard_url(self): + """Get the url of the dashboard servicing requests. + + Only one unit serves requests at any one time, the other units + redirect to that unit. + + :returns: URL of dashboard on unit + :rtype: Union[str, None] + """ + output = zaza.model.run_on_leader( + 'ceph-mon', + 'ceph mgr services')['Stdout'] + url = json.loads(output).get('dashboard') + if url is None: + raise tenacity.RetryError(None) + return url.strip('/') + + def test_001_dashboard_units(self): + """Check dashboard units are configured correctly.""" + self.verify_ssl_config(self.local_ca_cert) + + def create_user(self, username, role='administrator'): + """Create a dashboard user. + + :param username: Username to create. + :type username: str + :param role: Role to grant to user. + :type role: str + :returns: Results from action. + :rtype: juju.action.Action + """ + action = zaza.model.run_action_on_leader( + 'ceph-dashboard', + 'add-user', + action_params={ + 'username': username, + 'role': role}) + return action + + def get_random_username(self): + """Generate a username to use in tests. + + :returns: Username + :rtype: str + """ + return "zazauser-{}".format(uuid.uuid1()) + + def test_002_create_user(self): + """Test create user action.""" + test_user = self.get_random_username() + action = self.create_user(test_user) + self.assertEqual(action.status, "completed") + self.assertTrue(action.data['results']['password']) + action = self.create_user(test_user) + # Action should fail as the user already exists + self.assertEqual(action.status, "failed") + + def access_dashboard(self, dashboard_url): + """Test logging via a dashboard url. + + :param dashboard_url: Base url to use to login to + :type dashboard_url: str + """ + user = self.get_random_username() + action = self.create_user(username=user) + self.assertEqual(action.status, "completed") + password = action.data['results']['password'] + path = "api/auth" + headers = { + 'Content-type': 'application/json', + 'Accept': 'application/vnd.ceph.api.v1.0+json'} + payload = {"username": user, "password": password} + verify = self.local_ca_cert + r = self._run_request_post( + "{}/{}".format(dashboard_url, path), + verify=verify, + data=json.dumps(payload), + headers=headers) + self.assertEqual(r.status_code, requests.codes.created) + + def test_003_access_dashboard(self): + """Test logging in to the dashboard.""" + self.access_dashboard(self.get_master_dashboard_url()) + + def test_004_ceph_keys(self): + """Check that ceph services are properly registered.""" + status = zaza.model.get_status() + applications = status.applications.keys() + dashboard_keys = [] + ceph_keys = [] + if 'ceph-radosgw' in applications: + dashboard_keys.extend(['RGW_API_ACCESS_KEY', 'RGW_API_SECRET_KEY']) + if 'grafana' in applications: + dashboard_keys.append('GRAFANA_API_URL') + if 'prometheus' in applications: + dashboard_keys.append('PROMETHEUS_API_HOST') + ceph_keys.extend( + ['config/mgr/mgr/dashboard/{}'.format(k) for k in dashboard_keys]) + if 'ceph-iscsi' in applications: + ceph_keys.append('mgr/dashboard/_iscsi_config') + for key in ceph_keys: + logging.info("Checking key {} exists".format(key)) + check_out = zaza.model.run_on_leader( + 'ceph-dashboard', + 'ceph config-key exists {}'.format(key)) + self.assertEqual(check_out['Code'], '0') + + @tenacity.retry(wait=tenacity.wait_fixed(2), reraise=True, + stop=tenacity.stop_after_attempt(20)) + def wait_for_saml_dashboard(self): + """Wait until the Ceph dashboard is enabled.""" + output = zaza.model.run_on_leader( + 'ceph-mon', + 'ceph dashboard sso status')['Stdout'] + if 'enabled' in output: + return + raise tenacity.RetryError(None) + + def test_005_saml(self): + """Check that the dashboard is accessible with SAML enabled.""" + url = self.get_master_dashboard_url() + idp_meta = SAML_IDP_METADATA.format( + cert=X509_CERT, + data=X509_DATA, + host=url) + + zaza.model.set_application_config( + 'ceph-dashboard', + {'saml-base-url': url, 'saml-idp-metadata': idp_meta} + ) + + self.wait_for_saml_dashboard() + + # Check that both login and metadata are accesible. + resp = self._run_request_get( + url + '/auth/saml2/login', + verify=self.local_ca_cert, + allow_redirects=False) + self.assertTrue(resp.status_code, requests.codes.ok) + + resp = self._run_request_get( + url + '/auth/saml2/metadata', + verify=self.local_ca_cert, + allow_redirects=False) + self.assertEqual(resp.status_code, requests.codes.ok) + + def is_app_deployed(self, app_name) -> bool: + """Check if the provided app is deployed in the zaza model.""" + try: + zaza.model.get_application(app_name) + return True + except KeyError: + return False + + def _get_wait_for_dashboard_assert_state( + self, state, message_prefix) -> dict: + """Generate a assert state for ceph-dashboard charm blocked state.""" + assert_state = { + 'ceph-dashboard': { + "workload-status": state, + "workload-status-message-prefix": message_prefix + } + } + # Telegraf has a non-standard active state message. + if self.is_app_deployed('telegraf'): + assert_state['telegraf'] = { + "workload-status": "active", + "workload-status-message-prefix": "Monitoring ceph" + } + + return assert_state + + def verify_ssl_config(self, ca_file): + """Check if request validates the configured SSL cert.""" + units = zaza.model.get_units('ceph-mon') + + for attempt in tenacity.Retrying( + wait=tenacity.wait_exponential(max=60), + reraise=True, stop=tenacity.stop_after_attempt(10) + ): + with attempt: + rcs = collections.defaultdict(list) + for unit in units: + req = self._run_request_get( + 'https://{}:8443'.format( + zaza.model.get_unit_public_address(unit)), + verify=ca_file, + allow_redirects=False) + rcs[req.status_code].append( + zaza.model.get_unit_public_address(unit) + ) + self.assertEqual(len(rcs[requests.codes.ok]), 1) + self.assertEqual( + len(rcs[requests.codes.see_other]), + len(units) - 1) + + def _get_dashboard_hostnames_sans(self): + """Get a generator for Dashboard unit public addresses.""" + yield 'ceph-dashboard' # Include hostname in san as well. + # Since Ceph-Dashboard is a subordinate application, + # we use the principle application to get public addresses. + for unit in zaza.model.get_units('ceph-mon'): + addr = zaza.model.get_unit_public_address(unit) + if addr: + yield addr + + def test_006_charm_config_ssl(self): + """Config charm SSL certs to test the Ceph dashboard application.""" + # Use RSA keys not ECSDA + local_ca = trustme.CA(key_type=trustme.KeyType.RSA) + server_cert = local_ca.issue_cert( + *self._get_dashboard_hostnames_sans(), + key_type=trustme.KeyType.RSA + ) + + ssl_cert = b64encode(server_cert.cert_chain_pems[0].bytes()).decode() + ssl_key = b64encode(server_cert.private_key_pem.bytes()).decode() + ssl_ca = b64encode(local_ca.cert_pem.bytes()).decode() + + # Configure local certs in charm config + zaza.model.set_application_config( + 'ceph-dashboard', + { + 'ssl_cert': ssl_cert, 'ssl_key': ssl_key, + 'ssl_ca': ssl_ca + } + ) + + # Check application status message. + assert_state = self._get_wait_for_dashboard_assert_state( + "blocked", "Conflict: Active SSL from 'certificates' relation" + ) + zaza.model.wait_for_application_states( + states=assert_state, timeout=500 + ) + + # Remove certificates relation to trigger configured certs. + zaza.model.remove_relation( + 'ceph-dashboard', 'ceph-dashboard:certificates', + 'vault:certificates' + ) + + # Wait for status to clear + assert_state = self._get_wait_for_dashboard_assert_state( + "active", "Unit is ready" + ) + zaza.model.wait_for_application_states( + states=assert_state, timeout=500 + ) + + # Verify Certificates. + with local_ca.cert_pem.tempfile() as ca_temp_file: + self.verify_ssl_config(ca_temp_file) + + # Re-add certificates relation + zaza.model.add_relation( + 'ceph-dashboard', 'ceph-dashboard:certificates', + 'vault:certificates' + ) + + # Check blocked status message + assert_state = self._get_wait_for_dashboard_assert_state( + "blocked", "Conflict: Active SSL from Charm config" + ) + zaza.model.wait_for_application_states( + states=assert_state, timeout=500 + ) + + # Remove SSL config + zaza.model.set_application_config( + 'ceph-dashboard', + {'ssl_cert': "", 'ssl_key': "", 'ssl_ca': ""} + ) + + # Wait for status to clear + assert_state = self._get_wait_for_dashboard_assert_state( + "active", "Unit is ready" + ) + zaza.model.wait_for_application_states( + states=assert_state, timeout=500 + ) + + # Verify Relation SSL certs. + self.verify_ssl_config(self.local_ca_cert) diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml index 8852f622..af876141 100644 --- a/ceph-dashboard/tests/tests.yaml +++ b/ceph-dashboard/tests/tests.yaml @@ -1,17 +1,17 @@ charm_name: ceph-dasboard gate_bundles: - - focal - - jammy - - jammy-bobcat - jammy-caracal smoke_bundles: - - focal + - jammy-caracal +dev_bundles: + - jammy-caracal configure: - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation - - zaza.openstack.charm_tests.ceph.dashboard.setup.check_dashboard_cert - - zaza.openstack.charm_tests.ceph.dashboard.setup.set_grafana_url + - tests.target.setup.check_dashboard_cert + - tests.target.setup.set_grafana_url tests: - - zaza.openstack.charm_tests.ceph.dashboard.tests.CephDashboardTest + - zaza.charm_tests.lifecycle.refresh.CharmRefreshAll + - tests.target.CephDashboardTest - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest target_deploy_status: ceph-dashboard: @@ -26,6 +26,3 @@ target_deploy_status: prometheus2: workload-status: active workload-status-message-prefix: Ready - telegraf: - workload-status: active - workload-status-message-prefix: Monitoring diff --git a/ceph-dashboard/tox.ini b/ceph-dashboard/tox.ini index 5483caa7..3428801d 100644 --- a/ceph-dashboard/tox.ini +++ b/ceph-dashboard/tox.ini @@ -27,6 +27,7 @@ minversion = 3.2.0 [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + TEST_JUJU3=1 CHARM_DIR={envdir} ZAZA_FEATURE_BUG472=1 install_command = @@ -139,6 +140,11 @@ basepython = python3 commands = functest-run-suite --keep-model +[testenv:func-dev] +basepython = python3 +commands = + functest-run-suite --keep-model --dev + [testenv:func-smoke] basepython = python3 commands = diff --git a/ceph-fs/src/tests/target.py b/ceph-fs/src/tests/target.py new file mode 100644 index 00000000..c4e135bc --- /dev/null +++ b/ceph-fs/src/tests/target.py @@ -0,0 +1,274 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate CephFS testing.""" + +import logging +import json +import subprocess +from tenacity import ( + retry, Retrying, stop_after_attempt, wait_exponential, + retry_if_exception_type) +import unittest +import zaza +import zaza.model as model +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.generic as zaza_utils + + +class CephFSTests(unittest.TestCase): + """Encapsulate CephFS tests.""" + + mounts_share = False + mount_dir = '/mnt/cephfs' + CEPH_MON = 'ceph-mon' + + def tearDown(self): + """Cleanup after running tests.""" + if self.mounts_share: + for unit in ['ceph-osd/0', 'ceph-osd/1']: + try: + zaza.utilities.generic.run_via_ssh( + unit_name=unit, + cmd='sudo fusermount -u {0} && sudo rmdir {0}'.format( + self.mount_dir)) + except subprocess.CalledProcessError: + logging.warning( + "Failed to cleanup mounts on {}".format(unit)) + + def _mount_share(self, unit_name: str, + retry: bool = True): + self._install_dependencies(unit_name) + self._install_keyring(unit_name) + ssh_cmd = ( + 'sudo mkdir -p {0} && ' + 'sudo ceph-fuse {0}'.format(self.mount_dir) + ) + if retry: + for attempt in Retrying( + stop=stop_after_attempt(5), + wait=wait_exponential(multiplier=3, + min=2, max=10)): + with attempt: + zaza.utilities.generic.run_via_ssh( + unit_name=unit_name, + cmd=ssh_cmd) + else: + zaza.utilities.generic.run_via_ssh( + unit_name=unit_name, + cmd=ssh_cmd) + self.mounts_share = True + + def _install_keyring(self, unit_name: str): + + keyring = model.run_on_leader( + self.CEPH_MON, 'cat /etc/ceph/ceph.client.admin.keyring')['Stdout'] + config = model.run_on_leader( + self.CEPH_MON, 'cat /etc/ceph/ceph.conf')['Stdout'] + commands = [ + 'sudo mkdir -p /etc/ceph', + "echo '{}' | sudo tee /etc/ceph/ceph.conf".format(config), + "echo '{}' | " + 'sudo tee /etc/ceph/ceph.client.admin.keyring'.format(keyring) + ] + for cmd in commands: + zaza.utilities.generic.run_via_ssh( + unit_name=unit_name, + cmd=cmd) + + def _install_dependencies(self, unit: str): + zaza.utilities.generic.run_via_ssh( + unit_name=unit, + cmd='sudo apt-get install -yq ceph-fuse') + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super(CephFSTests, cls).setUpClass() + + @retry( + stop=stop_after_attempt(5), + wait=wait_exponential(multiplier=3, min=2, max=10)) + def _write_testing_file_on_instance(self, instance_name: str): + zaza.utilities.generic.run_via_ssh( + unit_name=instance_name, + cmd='echo -n "test" | sudo tee {}/test'.format(self.mount_dir)) + + @retry( + stop=stop_after_attempt(5), + wait=wait_exponential(multiplier=3, min=2, max=10)) + def _verify_testing_file_on_instance(self, instance_name: str): + output = zaza.model.run_on_unit( + instance_name, 'sudo cat {}/test'.format(self.mount_dir))['Stdout'] + self.assertEqual('test', output.strip()) + + def test_cephfs_share(self): + """Test that CephFS shares can be accessed on two instances. + + 1. Spawn two servers + 2. mount it on both + 3. write a file on one + 4. read it on the other + 5. profit + """ + self._mount_share('ceph-osd/0') + self._mount_share('ceph-osd/1') + + self._write_testing_file_on_instance('ceph-osd/0') + self._verify_testing_file_on_instance('ceph-osd/1') + + def test_conf(self): + """Test ceph to ensure juju config options are properly set.""" + self.TESTED_UNIT = 'ceph-fs/0' + + def _get_conf(): + """get/parse ceph daemon response into dict. + + :returns dict: Current configuration of the Ceph MDS daemon + :rtype: dict + """ + cmd = "sudo ceph daemon mds.$HOSTNAME config show" + conf = model.run_on_unit(self.TESTED_UNIT, cmd) + return json.loads(conf['Stdout']) + + @retry(wait=wait_exponential(multiplier=1, min=4, max=10), + stop=stop_after_attempt(10)) + def _change_conf_check(mds_config): + """Change configs, then assert to ensure config was set. + + Doesn't return a value. + """ + model.set_application_config('ceph-fs', mds_config) + results = _get_conf() + self.assertEqual( + results['mds_cache_memory_limit'], + mds_config['mds-cache-memory-limit']) + self.assertAlmostEqual( + float(results['mds_cache_reservation']), + float(mds_config['mds-cache-reservation'])) + self.assertAlmostEqual( + float(results['mds_health_cache_threshold']), + float(mds_config['mds-health-cache-threshold'])) + + # ensure defaults are set + mds_config = {'mds-cache-memory-limit': '4294967296', + 'mds-cache-reservation': '0.05', + 'mds-health-cache-threshold': '1.5'} + _change_conf_check(mds_config) + + # change defaults + mds_config = {'mds-cache-memory-limit': '8589934592', + 'mds-cache-reservation': '0.10', + 'mds-health-cache-threshold': '2'} + _change_conf_check(mds_config) + + # Restore config to keep tests idempotent + mds_config = {'mds-cache-memory-limit': '4294967296', + 'mds-cache-reservation': '0.05', + 'mds-health-cache-threshold': '1.5'} + _change_conf_check(mds_config) + + +class CharmOperationTest(test_utils.BaseCharmTest): + """CephFS Charm operation tests.""" + + def test_pause_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped, then resume and check + they are started. + """ + services = ['ceph-mds'] + with self.pause_resume(services): + logging.info('Testing pause resume (services="{}")' + .format(services)) + + +class CephKeyRotationTests(test_utils.BaseCharmTest): + """Tests for the rotate-key action.""" + + def _get_all_keys(self, unit, entity_filter): + cmd = 'sudo ceph auth ls' + result = model.run_on_unit(unit, cmd) + # Don't use json formatting, as it's buggy upstream. + data = result['Stdout'].split() + ret = set() + + for ix, line in enumerate(data): + # Structure: + # $ENTITY + # key: + # key contents + # That's why we need to move one position ahead. + if 'key:' in line and entity_filter(data[ix - 1]): + ret.add((data[ix - 1], data[ix + 1])) + return ret + + def _check_key_rotation(self, entity, unit): + def entity_filter(name): + return name.startswith(entity) + + old_keys = self._get_all_keys(unit, entity_filter) + action_obj = model.run_action( + unit_name=unit, + action_name='rotate-key', + action_params={'entity': entity} + ) + zaza_utils.assertActionRanOK(action_obj) + # NOTE(lmlg): There's a nasty race going on here. Essentially, + # since this action involves 2 different applications, what + # happens is as follows: + # (1) (2) (3) (4) + # ceph-mon rotates key | (idle) | remote-unit rotates key | (idle) + # Between (2) and (3), there's a window where all units are + # idle, _but_ the key hasn't been rotated in the other unit. + # As such, we retry a few times instead of using the + # `wait_for_application_states` interface. + + for attempt in Retrying( + wait=wait_exponential(multiplier=2, max=32), + reraise=True, stop=stop_after_attempt(20), + retry=retry_if_exception_type(AssertionError) + ): + with attempt: + new_keys = self._get_all_keys(unit, entity_filter) + self.assertNotEqual(old_keys, new_keys) + + diff = new_keys - old_keys + self.assertEqual(len(diff), 1) + first = next(iter(diff)) + # Check that the entity matches. The 'entity_filter' + # callable will return a true-like value if it + # matches the type of entity we're after (i.e: 'mgr') + self.assertTrue(entity_filter(first[0])) + + def _get_fs_client(self, unit): + def _filter_fs(name): + return (name.startswith('mds.') and + name not in ('mds.ceph-fs', 'mds.None')) + + ret = self._get_all_keys(unit, _filter_fs) + if not ret: + return None + return next(iter(ret))[0] + + def test_key_rotate(self): + """Test that rotating the keys actually changes them.""" + unit = 'ceph-mon/0' + fs_svc = self._get_fs_client(unit) + + if fs_svc is not None: + self._check_key_rotation(fs_svc, unit) + else: + logging.info('ceph-fs units present, but no MDS service') diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index a3c820ab..4f3d041f 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -1,24 +1,9 @@ charm_name: ceph-fs -gate_bundles: - - focal-xena - - focal-yoga - - jammy-yoga - - jammy-bobcat - - mantic-bobcat -smoke_bundles: - - focal-xena -# configure: -# - zaza.openstack.charm_tests.glance.setup.add_lts_image -# - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network -# - zaza.openstack.charm_tests.nova.setup.create_flavors -# - zaza.openstack.charm_tests.nova.setup.manage_ssh_key -# - zaza.openstack.charm_tests.keystone.setup.add_demo_user tests: - - zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests - - zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest - - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation - - zaza.openstack.charm_tests.ceph.tests.CephMonKeyRotationTests - + - zaza.charm_tests.lifecycle.refresh.CharmRefreshAll + - tests.target.CephFSTests + - tests.target.CharmOperationTest + - tests.target.CephKeyRotationTests target_deploy_status: ubuntu: workload-status: active diff --git a/ceph-fs/tox.ini b/ceph-fs/tox.ini index 50527f59..291604e4 100644 --- a/ceph-fs/tox.ini +++ b/ceph-fs/tox.ini @@ -5,50 +5,37 @@ # https://github.com/openstack-charmers/release-tools [tox] -skipsdist = True envlist = pep8,py3 # NOTE: Avoid build/test env pollution by not enabling sitepackages. sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False -# NOTES: -# * We avoid the new dependency resolver by pinning pip < 20.3, see -# https://github.com/pypa/pip/issues/9187 -# * Pinning dependencies requires tox >= 3.2.0, see -# https://tox.readthedocs.io/en/latest/config.html#conf-requires -# * It is also necessary to pin virtualenv as a newer virtualenv would still -# lead to fetching the latest pip in the func* tox targets, see -# https://stackoverflow.com/a/38133283 -requires = - pip < 20.3 - virtualenv < 20.0 - setuptools<50.0.0 - tox < 4.0.0 - -# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci -minversion = 3.18.0 [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 TERM=linux - LAYER_PATH={toxinidir}/layers - INTERFACE_PATH={toxinidir}/interfaces + CHARM_LAYERS_DIR={toxinidir}/layers + CHARM_INTERFACES_DIR={toxinidir}/interfaces JUJU_REPOSITORY={toxinidir}/build -passenv = http_proxy https_proxy INTERFACE_PATH LAYER_PATH JUJU_REPOSITORY -install_command = - {toxinidir}/pip.sh install {opts} {packages} + TEST_JUJU3=1 +passenv = + no_proxy + http_proxy + https_proxy + CHARM_INTERFACES_DIR + CHARM_LAYERS_DIR + JUJU_REPOSITORY allowlist_externals = charmcraft bash tox - rename.sh + {toxinidir}/rename.sh deps = -r{toxinidir}/requirements.txt [testenv:build] basepython = python3 -deps = -r{toxinidir}/build-requirements.txt commands = charmcraft clean charmcraft -v pack @@ -57,7 +44,7 @@ commands = [testenv:build-reactive] basepython = python3 commands = - charm-build --log-level DEBUG --use-lock-file-branches -o {toxinidir}/build/builds src {posargs} + charm-build --log-level DEBUG --use-lock-file-branches --binary-wheels-from-source -o {toxinidir}/build/builds src {posargs} [testenv:add-build-lock-file] basepython = python3 @@ -66,44 +53,25 @@ commands = [testenv:py3] basepython = python3 -deps = -r{toxinidir}/test-requirements.txt -commands = stestr run --slowest {posargs} - -[testenv:py35] -basepython = python3.5 -deps = -r{toxinidir}/test-requirements.txt -commands = stestr run --slowest {posargs} - -[testenv:py36] -basepython = python3.6 -deps = -r{toxinidir}/test-requirements.txt -commands = stestr run --slowest {posargs} - -[testenv:py37] -basepython = python3.7 -deps = -r{toxinidir}/test-requirements.txt -commands = stestr run --slowest {posargs} - -[testenv:py38] -basepython = python3.8 -deps = -r{toxinidir}/test-requirements.txt +deps = + -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} -[testenv:py39] -basepython = python3.9 -deps = -r{toxinidir}/test-requirements.txt +[testenv:py310] +basepython = python3.10 +deps = + -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} [testenv:pep8] basepython = python3 -deps = flake8==3.9.2 +deps = flake8 commands = flake8 {posargs} src unit_tests [testenv:func-target] # Hack to get functional tests working in the charmcraft # world. We should fix this. basepython = python3 -passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/src/test-requirements.txt changedir = {toxinidir}/src commands = diff --git a/ceph-iscsi/test-requirements.txt b/ceph-iscsi/test-requirements.txt index cff01fc9..e47e81d8 100644 --- a/ceph-iscsi/test-requirements.txt +++ b/ceph-iscsi/test-requirements.txt @@ -1,10 +1,6 @@ -# This file is managed centrally. If you find the need to modify this as a -# one-off, please don't. Intead, consult #openstack-charms and ask about -# requirements management in charms via bot-control. Thank you. -charm-tools>=2.4.4 coverage>=3.6 mock>=1.2 -flake8>=2.2.4,<=2.4.1 +flake8 stestr>=2.2.0 requests>=2.18.4 psutil diff --git a/ceph-iscsi/tests/target.py b/ceph-iscsi/tests/target.py new file mode 100644 index 00000000..ad3b57b1 --- /dev/null +++ b/ceph-iscsi/tests/target.py @@ -0,0 +1,321 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulating `ceph-iscsi` testing.""" + +import logging +import tempfile + +import zaza +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.generic as generic_utils + + +class SimpleISCSITest(test_utils.BaseCharmTest): + + def test_pause_resume(self): + """Test pausing and resuming a unit.""" + with self.pause_resume( + ['rbd-target-api', 'rbd-target-gw'], + pgrep_full=True): + logging.info("Testing pause resume") + + +class CephISCSIGatewayTest(SimpleISCSITest): + """Class for `ceph-iscsi` tests.""" + + GW_IQN = "iqn.2003-03.com.canonical.iscsi-gw:iscsi-igw" + DATA_POOL_NAME = 'zaza_rep_pool' + EC_PROFILE_NAME = 'zaza_iscsi' + EC_DATA_POOL = 'zaza_ec_data_pool' + EC_METADATA_POOL = 'zaza_ec_metadata_pool' + + def get_client_initiatorname(self, unit): + """Return the initiatorname for the given unit. + + :param unit_name: Name of unit to match + :type unit: str + :returns: Initiator name + :rtype: str + """ + generic_utils.assertRemoteRunOK(zaza.model.run_on_unit( + unit, + ('cp /etc/iscsi/initiatorname.iscsi /tmp; ' + 'chmod 644 /tmp/initiatorname.iscsi'))) + with tempfile.TemporaryDirectory() as tmpdirname: + tmp_file = '{}/{}'.format(tmpdirname, 'initiatorname.iscsi') + zaza.model.scp_from_unit( + unit, + '/tmp/initiatorname.iscsi', + tmp_file) + with open(tmp_file, 'r') as stream: + contents = stream.readlines() + initiatorname = None + for line in contents: + if line.startswith('InitiatorName'): + initiatorname = line.split('=')[1].rstrip() + return initiatorname + + def get_base_ctxt(self): + """Generate a context for running gwcli commands to create a target. + + :returns: Base gateway context + :rtype: Dict + """ + gw_units = zaza.model.get_units('ceph-iscsi') + host_names = generic_utils.get_unit_hostnames(gw_units, fqdn=True) + client_entity_ids = [ + u.entity_id for u in zaza.model.get_units('ubuntu')] + ctxt = { + 'client_entity_ids': sorted(client_entity_ids), + 'gw_iqn': self.GW_IQN, + 'chap_creds': 'username={chap_username} password={chap_password}', + 'gwcli_gw_dir': '/iscsi-targets/{gw_iqn}/gateways', + 'gwcli_hosts_dir': '/iscsi-targets/{gw_iqn}/hosts', + 'gwcli_disk_dir': '/disks', + 'gwcli_client_dir': '{gwcli_hosts_dir}/{client_initiatorname}', + } + ctxt['gateway_units'] = [ + { + 'entity_id': u.entity_id, + 'ip': zaza.model.get_unit_public_address(u), + 'hostname': host_names[u.entity_id]} + for u in zaza.model.get_units('ceph-iscsi')] + ctxt['gw_ip'] = sorted([g['ip'] for g in ctxt['gateway_units']])[0] + return ctxt + + def run_commands(self, unit_name, commands, ctxt): + """Run commands on unit. + + Iterate over each command and apply the context to the command, then + run the command on the supplied unit. + + :param unit_name: Name of unit to match + :type unit: str + :param commands: List of commands to run. + :type commands: List[str] + :param ctxt: Context to apply to each command. + :type ctxt: Dict + :raises: AssertionError + """ + for _cmd in commands: + cmd = _cmd.format(**ctxt) + generic_utils.assertRemoteRunOK(zaza.model.run_on_unit( + unit_name, + cmd)) + + def create_iscsi_target(self, ctxt): + """Create target on gateway. + + :param ctxt: Base gateway context + :type ctxt: Dict + """ + generic_utils.assertActionRanOK(zaza.model.run_action_on_leader( + 'ceph-iscsi', + 'create-target', + action_params={ + 'gateway-units': ' '.join([g['entity_id'] + for g in ctxt['gateway_units']]), + 'iqn': self.GW_IQN, + 'rbd-pool-name': ctxt.get('pool_name', ''), + 'ec-rbd-metadata-pool': ctxt.get('ec_meta_pool_name', ''), + 'image-size': ctxt['img_size'], + 'image-name': ctxt['img_name'], + 'client-initiatorname': ctxt['client_initiatorname'], + 'client-username': ctxt['chap_username'], + 'client-password': ctxt['chap_password'] + })) + + def login_iscsi_target(self, ctxt): + """Login to the iscsi target on client. + + :param ctxt: Base gateway context + :type ctxt: Dict + """ + logging.info("Logging in to iscsi target") + base_op_cmd = ('iscsiadm --mode node --targetname {gw_iqn} ' + '--op=update ').format(**ctxt) + setup_cmds = [ + 'iscsiadm -m discovery -t st -p {gw_ip}', + base_op_cmd + '-n node.session.auth.authmethod -v CHAP', + base_op_cmd + '-n node.session.auth.username -v {chap_username}', + base_op_cmd + '-n node.session.auth.password -v {chap_password}', + 'iscsiadm --mode node --targetname {gw_iqn} --login'] + self.run_commands(ctxt['client_entity_id'], setup_cmds, ctxt) + + def logout_iscsi_targets(self, ctxt): + """Logout of iscsi target on client. + + :param ctxt: Base gateway context + :type ctxt: Dict + """ + logging.info("Logging out of iscsi target") + logout_cmds = [ + 'iscsiadm --mode node --logoutall=all'] + self.run_commands(ctxt['client_entity_id'], logout_cmds, ctxt) + + def check_client_device(self, ctxt, init_client=True): + """Wait for multipath device to appear on client and test access. + + :param ctxt: Base gateway context + :type ctxt: Dict + :param init_client: Initialise client if this is the first time it has + been used. + :type init_client: bool + """ + logging.info("Checking multipath device is present.") + device_ctxt = { + 'bdevice': '/dev/dm-0', + 'mount_point': '/mnt/iscsi', + 'test_file': '/mnt/iscsi/test.data'} + ls_bdevice_cmd = 'ls -l {bdevice}' + mkfs_cmd = 'mke2fs {bdevice}' + mkdir_cmd = 'mkdir {mount_point}' + mount_cmd = 'mount {bdevice} {mount_point}' + umount_cmd = 'umount {mount_point}' + check_mounted_cmd = 'mountpoint {mount_point}' + write_cmd = 'truncate -s 1M {test_file}' + check_file = 'ls -l {test_file}' + if init_client: + commands = [ + mkfs_cmd, + mkdir_cmd, + mount_cmd, + check_mounted_cmd, + write_cmd, + check_file, + umount_cmd] + else: + commands = [ + mount_cmd, + check_mounted_cmd, + check_file, + umount_cmd] + + async def check_device_present(): + run = await zaza.model.async_run_on_unit( + ctxt['client_entity_id'], + ls_bdevice_cmd.format(bdevice=device_ctxt['bdevice'])) + return device_ctxt['bdevice'] in run['stdout'] + + logging.info("Checking {} is present on {}".format( + device_ctxt['bdevice'], + ctxt['client_entity_id'])) + zaza.model.block_until(check_device_present) + logging.info("Checking mounting device and access") + self.run_commands(ctxt['client_entity_id'], commands, device_ctxt) + + def create_data_pool(self): + """Create data pool to back iscsi targets.""" + generic_utils.assertActionRanOK(zaza.model.run_action_on_leader( + 'ceph-mon', + 'create-pool', + action_params={ + 'name': self.DATA_POOL_NAME})) + + def create_ec_data_pool(self): + """Create data pool to back iscsi targets.""" + generic_utils.assertActionRanOK(zaza.model.run_action_on_leader( + 'ceph-mon', + 'create-erasure-profile', + action_params={ + 'name': self.EC_PROFILE_NAME, + 'coding-chunks': 2, + 'data-chunks': 4, + 'plugin': 'jerasure'})) + generic_utils.assertActionRanOK(zaza.model.run_action_on_leader( + 'ceph-mon', + 'create-pool', + action_params={ + 'name': self.EC_DATA_POOL, + 'pool-type': 'erasure-coded', + 'allow-ec-overwrites': True, + 'erasure-profile-name': self.EC_PROFILE_NAME})) + generic_utils.assertActionRanOK(zaza.model.run_action_on_leader( + 'ceph-mon', + 'create-pool', + action_params={ + 'name': self.EC_METADATA_POOL})) + + def refresh_partitions(self, ctxt): + """Refresh kernel partition tables in client.""" + self.run_commands(ctxt['client_entity_id'], ('partprobe', ), ctxt) + + def run_client_checks(self, test_ctxt): + """Check access to mulipath device. + + Write a filesystem to device, mount it and write data. Then unmount + and logout the iscsi target, finally reconnect and remount checking + data is still present. + + :param test_ctxt: Test context. + :type test_ctxt: Dict + """ + self.create_iscsi_target(test_ctxt) + self.login_iscsi_target(test_ctxt) + self.refresh_partitions(test_ctxt) + self.check_client_device(test_ctxt, init_client=True) + self.logout_iscsi_targets(test_ctxt) + self.login_iscsi_target(test_ctxt) + self.refresh_partitions(test_ctxt) + self.check_client_device(test_ctxt, init_client=False) + self.refresh_partitions(test_ctxt) + + def test_create_and_mount_volume(self): + """Test creating a target and mounting it on a client.""" + self.create_data_pool() + ctxt = self.get_base_ctxt() + client_entity_id = ctxt['client_entity_ids'][0] + ctxt.update({ + 'client_entity_id': client_entity_id, + 'client_initiatorname': self.get_client_initiatorname( + client_entity_id), + 'pool_name': self.DATA_POOL_NAME, + 'chap_username': 'myiscsiusername1', + 'chap_password': 'myiscsipassword1', + 'img_size': '1G', + 'img_name': 'disk_rep_1'}) + self.run_client_checks(ctxt) + + def test_create_and_mount_ec_backed_volume(self): + """Test creating an EC backed target and mounting it on a client.""" + self.create_ec_data_pool() + ctxt = self.get_base_ctxt() + client_entity_id = ctxt['client_entity_ids'][1] + ctxt.update({ + 'client_entity_id': client_entity_id, + 'client_initiatorname': self.get_client_initiatorname( + client_entity_id), + 'pool_name': self.EC_DATA_POOL, + 'ec_meta_pool_name': self.EC_METADATA_POOL, + 'chap_username': 'myiscsiusername2', + 'chap_password': 'myiscsipassword2', + 'img_size': '2G', + 'img_name': 'disk_ec_1'}) + self.run_client_checks(ctxt) + + def test_create_and_mount_volume_default_pool(self): + """Test creating a target and mounting it on a client.""" + self.create_data_pool() + ctxt = self.get_base_ctxt() + client_entity_id = ctxt['client_entity_ids'][2] + ctxt.update({ + 'client_entity_id': client_entity_id, + 'client_initiatorname': self.get_client_initiatorname( + client_entity_id), + 'chap_username': 'myiscsiusername3', + 'chap_password': 'myiscsipassword3', + 'img_size': '3G', + 'img_name': 'disk_default_1'}) + self.run_client_checks(ctxt) diff --git a/ceph-iscsi/tests/tests.yaml b/ceph-iscsi/tests/tests.yaml index fd844339..ef6812e6 100644 --- a/ceph-iscsi/tests/tests.yaml +++ b/ceph-iscsi/tests/tests.yaml @@ -1,21 +1,15 @@ charm_name: ceph-iscsi gate_bundles: - - focal-ec - - focal - - jammy-ec - - jammy - - jammy-reef - - jammy-reef-ef + - jammy-caracal smoke_bundles: - - jammy + - jammy-caracal dev_bundles: - - jammy-ec - - jammy + - jammy-caracal configure: - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation - - zaza.openstack.charm_tests.ceph.iscsi.setup.basic_guest_setup tests: - - zaza.openstack.charm_tests.ceph.iscsi.tests.CephISCSIGatewayTest + - zaza.charm_tests.lifecycle.refresh.CharmRefreshAll + - tests.target.SimpleISCSITest - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation target_deploy_status: ubuntu: diff --git a/ceph-iscsi/tox.ini b/ceph-iscsi/tox.ini index a7703d60..23c8fab8 100644 --- a/ceph-iscsi/tox.ini +++ b/ceph-iscsi/tox.ini @@ -15,6 +15,7 @@ skip_missing_interpreters = False skip_install = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + TEST_JUJU3=1 CHARM_DIR={envdir} commands = stestr run --slowest {posargs} allowlist_externals = @@ -138,4 +139,4 @@ commands = [flake8] # Ignore E902 because the unit_tests directory is missing in the built charm. -ignore = E402,E226,E902 +ignore = E402,E226,E902,W504 diff --git a/ceph-mon/src/ceph_client.py b/ceph-mon/src/ceph_client.py index 65a3ebca..e29e6a11 100644 --- a/ceph-mon/src/ceph_client.py +++ b/ceph-mon/src/ceph_client.py @@ -68,7 +68,8 @@ def _on_relation_changed(self, event): self._handle_client_relation(event.relation, event.unit) def _get_ceph_info_from_configs(self): - """Create dictionary of ceph information required to set client relation. + """ + Create dictionary of ceph information required to set client relation. :returns: Dictionary of ceph configurations needed for client relation :rtype: dict @@ -126,7 +127,8 @@ def _handle_client_relation(self, relation, unit): def _handle_broker_request( self, relation, unit, add_legacy_response=False, force=False): - """Retrieve broker request from relation, process, return response data. + """ + Retrieve broker request from relation, process, return response data. :param event: Operator event for the relation :type relid: Event diff --git a/ceph-mon/tests/target.py b/ceph-mon/tests/target.py new file mode 100644 index 00000000..c3b0da6d --- /dev/null +++ b/ceph-mon/tests/target.py @@ -0,0 +1,2091 @@ +# Copyright 2018 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Ceph Testing.""" + +import unittest +import json +import logging +from os import ( + listdir, + path +) +import requests +import tempfile +import boto3 +import botocore.exceptions +import urllib3 + +import tenacity + +import zaza.charm_lifecycle.utils as lifecycle_utils +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.model as zaza_model +import zaza.openstack.utilities.ceph as zaza_ceph +import zaza.openstack.utilities.exceptions as zaza_exceptions +import zaza.openstack.utilities.generic as zaza_utils +import zaza.utilities.juju as juju_utils +import zaza.openstack.utilities.openstack as zaza_openstack +import zaza.openstack.utilities.generic as generic_utils + +# Disable warnings for ssl_verify=false +urllib3.disable_warnings( + urllib3.exceptions.InsecureRequestWarning +) + + +class CephLowLevelTest(test_utils.BaseCharmTest): + """Ceph Low Level Test Class.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running ceph low level tests.""" + super(CephLowLevelTest, cls).setUpClass() + + def test_processes(self): + """Verify Ceph processes. + + Verify that the expected service processes are running + on each ceph unit. + """ + logging.info('Checking ceph-mon and ceph-osd processes...') + # Process name and quantity of processes to expect on each unit + ceph_mon_processes = { + 'ceph-mon': 1, + 'ceph-mgr': 1, + } + + ceph_osd_processes = { + 'ceph-osd': [1, 2, 3] + } + + # Units with process names and PID quantities expected + expected_processes = { + 'ceph-mon/0': ceph_mon_processes, + 'ceph-mon/1': ceph_mon_processes, + 'ceph-mon/2': ceph_mon_processes, + 'ceph-osd/0': ceph_osd_processes, + 'ceph-osd/1': ceph_osd_processes, + 'ceph-osd/2': ceph_osd_processes + } + + actual_pids = zaza_utils.get_unit_process_ids(expected_processes) + ret = zaza_utils.validate_unit_process_ids(expected_processes, + actual_pids) + self.assertTrue(ret) + + def test_services(self): + """Verify the ceph services. + + Verify the expected services are running on the service units. + """ + logging.info('Checking ceph-osd and ceph-mon services...') + services = {} + ceph_services = ['ceph-mon', 'ceph-mgr'] + services['ceph-osd/0'] = ['ceph-osd'] + + services['ceph-mon/0'] = ceph_services + services['ceph-mon/1'] = ceph_services + services['ceph-mon/2'] = ceph_services + + for unit_name, unit_services in services.items(): + zaza_model.block_until_service_status( + unit_name=unit_name, + services=unit_services, + target_status='running' + ) + + @test_utils.skipUntilVersion('ceph-mon', 'ceph', '14.2.0') + def test_pg_tuning(self): + """Verify that auto PG tuning is enabled for Nautilus+.""" + unit_name = 'ceph-mon/0' + cmd = "ceph osd pool autoscale-status --format=json" + result = zaza_model.run_on_unit(unit_name, cmd) + self.assertEqual(result['Code'], '0') + for pool in json.loads(result['Stdout']): + self.assertEqual(pool['pg_autoscale_mode'], 'on') + + +class CephTest(test_utils.BaseCharmTest): + """Ceph common functional tests.""" + + @classmethod + def setUpClass(cls): + """Run the ceph's common class setup.""" + super(CephTest, cls).setUpClass() + + def osd_out_in(self, services): + """Run OSD out and OSD in tests. + + Remove OSDs and then add them back in on a unit checking that services + are in the required state after each action + + :param services: Services expected to be restarted when config_file is + changed. + :type services: list + """ + zaza_model.block_until_service_status( + self.lead_unit, + services, + 'running', + model_name=self.model_name) + zaza_model.block_until_unit_wl_status( + self.lead_unit, + 'active', + model_name=self.model_name) + zaza_model.run_action( + self.lead_unit, + 'osd-out', + model_name=self.model_name) + zaza_model.block_until_unit_wl_status( + self.lead_unit, + 'maintenance', + model_name=self.model_name) + zaza_model.block_until_all_units_idle(model_name=self.model_name) + zaza_model.run_action( + self.lead_unit, + 'osd-in', + model_name=self.model_name) + zaza_model.block_until_unit_wl_status( + self.lead_unit, + 'active', + model_name=self.model_name) + zaza_model.block_until_all_units_idle(model_name=self.model_name) + zaza_model.block_until_service_status( + self.lead_unit, + services, + 'running', + model_name=self.model_name) + + def test_ceph_check_osd_pools(self): + """Check OSD pools. + + Check osd pools on all ceph units, expect them to be + identical, and expect specific pools to be present. + """ + try: + zaza_model.get_application('cinder-ceph') + except KeyError: + raise unittest.SkipTest("Skipping OpenStack dependent test") + logging.info('Checking pools on ceph units...') + + expected_pools = zaza_ceph.get_expected_pools() + results = [] + unit_name = 'ceph-mon/0' + + # Check for presence of expected pools on each unit + logging.debug('Expected pools: {}'.format(expected_pools)) + pools = zaza_ceph.get_ceph_pools(unit_name) + results.append(pools) + + for expected_pool in expected_pools: + if expected_pool not in pools: + msg = ('{} does not have pool: ' + '{}'.format(unit_name, expected_pool)) + raise zaza_exceptions.CephPoolNotFound(msg) + logging.debug('{} has (at least) the expected ' + 'pools.'.format(unit_name)) + + # Check that all units returned the same pool name:id data + for i, result in enumerate(results): + for other in results[i+1:]: + logging.debug('result: {}, other: {}'.format(result, other)) + self.assertEqual(result, other) + + def test_ceph_pool_creation_with_text_file(self): + """Check the creation of a pool and a text file. + + Create a pool, add a text file to it and retrieve its content. + Verify that the content matches the original file. + """ + unit_name = 'ceph-mon/0' + cmd = 'sudo ceph osd pool create test {PG_NUM}; \ + echo 123456789 > /tmp/input.txt; \ + rados put -p test test_input /tmp/input.txt; \ + rados get -p test test_input /dev/stdout' + cmd = cmd.format(PG_NUM=32) + logging.debug('Creating test pool and putting test file in pool...') + result = zaza_model.run_on_unit(unit_name, cmd) + code = result.get('Code') + if code != '0': + raise zaza_model.CommandRunFailed(cmd, result) + output = result.get('Stdout').strip() + logging.debug('Output received: {}'.format(output)) + self.assertEqual(output, '123456789') + + def test_ceph_encryption(self): + """Test Ceph encryption. + + Verify that the new disk is added with encryption by checking for + Ceph's encryption keys directory. + """ + current_release = zaza_openstack.get_os_release(application='ceph-mon') + trusty_mitaka = zaza_openstack.get_os_release('trusty_mitaka') + if current_release >= trusty_mitaka: + logging.warn("Skipping encryption test for Mitaka and higher") + return + unit_name = 'ceph-osd/0' + set_default = { + 'osd-encrypt': 'False', + 'osd-devices': '/dev/vdb /srv/ceph', + } + set_alternate = { + 'osd-encrypt': 'True', + 'osd-devices': '/dev/vdb /srv/ceph /srv/ceph_encrypted', + } + juju_service = 'ceph-osd' + logging.info('Making config change on {}...'.format(juju_service)) + mtime = zaza_model.get_unit_time(unit_name) + + file_mtime = None + + folder_name = '/etc/ceph/dmcrypt-keys/' + with self.config_change(set_default, set_alternate, + application_name=juju_service): + with tempfile.TemporaryDirectory() as tempdir: + # Creating a temp dir to copy keys + temp_folder = '/tmp/dmcrypt-keys' + cmd = 'mkdir {}'.format(temp_folder) + ret = zaza_model.run_on_unit(unit_name, cmd) + logging.debug('Ret for cmd {} is {}'.format(cmd, ret)) + # Copy keys from /etc to /tmp + cmd = 'sudo cp {}* {}'.format(folder_name, temp_folder) + ret = zaza_model.run_on_unit(unit_name, cmd) + logging.debug('Ret for cmd {} is {}'.format(cmd, ret)) + # Changing permissions to be able to SCP the files + cmd = 'sudo chown -R ubuntu:ubuntu {}'.format(temp_folder) + ret = zaza_model.run_on_unit(unit_name, cmd) + logging.debug('Ret for cmd {} is {}'.format(cmd, ret)) + # SCP to retrieve all files in folder + # -p: preserve timestamps + source = '/tmp/dmcrypt-keys/*' + zaza_model.scp_from_unit(unit_name=unit_name, + source=source, + destination=tempdir, + scp_opts='-p') + for elt in listdir(tempdir): + file_path = '/'.join([tempdir, elt]) + if path.isfile(file_path): + file_mtime = path.getmtime(file_path) + if file_mtime: + break + + if not file_mtime: + logging.warn('Could not determine mtime, assuming ' + 'folder does not exist') + raise FileNotFoundError('folder does not exist') + + if file_mtime >= mtime: + logging.info('Folder mtime is newer than provided mtime ' + '(%s >= %s) on %s (OK)' % (file_mtime, + mtime, unit_name)) + else: + logging.warn('Folder mtime is older than provided mtime' + '(%s < on %s) on %s' % (file_mtime, + mtime, unit_name)) + raise Exception('Folder mtime is older than provided mtime') + + def test_blocked_when_non_pristine_disk_appears(self): + """Test blocked state with non-pristine disk. + + Validate that charm goes into blocked state when it is presented with + new block devices that have foreign data on them. + Instances used in UOSCI has a flavour with ephemeral storage in + addition to the bootable instance storage. The ephemeral storage + device is partitioned, formatted and mounted early in the boot process + by cloud-init. + As long as the device is mounted the charm will not attempt to use it. + If we unmount it and trigger the config-changed hook the block device + will appear as a new and previously untouched device for the charm. + One of the first steps of device eligibility checks should be to make + sure we are seeing a pristine and empty device before doing any + further processing. + As the ephemeral device will have data on it we can use it to validate + that these checks work as intended. + """ + current_release = zaza_openstack.get_os_release(application='ceph-mon') + focal_ussuri = zaza_openstack.get_os_release('focal_ussuri') + if current_release >= focal_ussuri: + # NOTE(ajkavanagh) - focal (on ServerStack) is broken for /dev/vdb + # and so this test can't pass: LP#1842751 discusses the issue, but + # basically the snapd daemon along with lxcfs results in /dev/vdb + # being mounted in the lxcfs process namespace. If the charm + # 'tries' to umount it, it can (as root), but the mount is still + # 'held' by lxcfs and thus nothing else can be done with it. This + # is only a problem in serverstack with images with a default + # /dev/vdb ephemeral + logging.warn("Skipping pristine disk test for focal and higher") + return + logging.info('Checking behaviour when non-pristine disks appear...') + logging.info('Configuring ephemeral-unmount...') + alternate_conf = { + 'ephemeral-unmount': '/mnt', + 'osd-devices': '/dev/vdb' + } + juju_service = 'ceph-osd' + zaza_model.set_application_config(juju_service, alternate_conf) + ceph_osd_states = { + 'ceph-osd': { + 'workload-status': 'blocked', + 'workload-status-message': 'Non-pristine' + } + } + zaza_model.wait_for_application_states(states=ceph_osd_states) + logging.info('Units now in blocked state, running zap-disk action...') + unit_names = ['ceph-osd/0', 'ceph-osd/1', 'ceph-osd/2'] + for unit_name in unit_names: + zap_disk_params = { + 'devices': '/dev/vdb', + 'i-really-mean-it': True, + } + action_obj = zaza_model.run_action( + unit_name=unit_name, + action_name='zap-disk', + action_params=zap_disk_params + ) + logging.debug('Result of action: {}'.format(action_obj)) + + logging.info('Running add-disk action...') + for unit_name in unit_names: + add_disk_params = { + 'osd-devices': '/dev/vdb', + } + action_obj = zaza_model.run_action( + unit_name=unit_name, + action_name='add-disk', + action_params=add_disk_params + ) + logging.debug('Result of action: {}'.format(action_obj)) + + logging.info('Wait for idle/ready status...') + zaza_model.wait_for_application_states() + + logging.info('OK') + + set_default = { + 'ephemeral-unmount': '', + 'osd-devices': '/dev/vdb', + } + + bionic_train = zaza_openstack.get_os_release('bionic_train') + if current_release < bionic_train: + set_default['osd-devices'] = '/dev/vdb /srv/ceph' + + logging.info('Restoring to default configuration...') + zaza_model.set_application_config(juju_service, set_default) + + zaza_model.wait_for_application_states() + + def test_pause_and_resume(self): + """The services can be paused and resumed.""" + logging.info('Checking pause and resume actions...') + self.pause_resume(['ceph-osd']) + + def get_device_for_blacklist(self, unit): + """Return a device to be used by the blacklist tests.""" + cmd = "mount | grep 'on / ' | awk '{print $1}'" + obj = zaza_model.run_on_unit(unit, cmd) + return obj.get('Stdout').strip() + + def test_blacklist(self): + """Check the blacklist action. + + The blacklist actions execute and behave as expected. + """ + logging.info('Checking blacklist-add-disk and ' + 'blacklist-remove-disk actions...') + unit_name = 'ceph-osd/0' + + zaza_model.block_until_unit_wl_status( + unit_name, + 'active' + ) + + # Attempt to add device with non-absolute path should fail + action_obj = zaza_model.run_action( + unit_name=unit_name, + action_name='blacklist-add-disk', + action_params={'osd-devices': 'vda'} + ) + self.assertTrue(action_obj.status != 'completed') + zaza_model.block_until_unit_wl_status( + unit_name, + 'active' + ) + + # Attempt to add device with non-existent path should fail + action_obj = zaza_model.run_action( + unit_name=unit_name, + action_name='blacklist-add-disk', + action_params={'osd-devices': '/non-existent'} + ) + self.assertTrue(action_obj.status != 'completed') + zaza_model.block_until_unit_wl_status( + unit_name, + 'active' + ) + + # Attempt to add device with existent path should succeed + device = self.get_device_for_blacklist(unit_name) + if not device: + raise unittest.SkipTest( + "Skipping test because no device was found") + + action_obj = zaza_model.run_action( + unit_name=unit_name, + action_name='blacklist-add-disk', + action_params={'osd-devices': device} + ) + self.assertEqual('completed', action_obj.status) + zaza_model.block_until_unit_wl_status( + unit_name, + 'active' + ) + + # Attempt to remove listed device should always succeed + action_obj = zaza_model.run_action( + unit_name=unit_name, + action_name='blacklist-remove-disk', + action_params={'osd-devices': device} + ) + self.assertEqual('completed', action_obj.status) + zaza_model.block_until_unit_wl_status( + unit_name, + 'active' + ) + logging.debug('OK') + + def test_list_disks(self): + """Test the list-disks action. + + The list-disks action execute. + """ + logging.info('Checking list-disks action...') + unit_name = 'ceph-osd/0' + + zaza_model.block_until_unit_wl_status( + unit_name, + 'active' + ) + + action_obj = zaza_model.run_action( + unit_name=unit_name, + action_name='list-disks', + ) + self.assertEqual('completed', action_obj.status) + zaza_model.block_until_unit_wl_status( + unit_name, + 'active' + ) + logging.debug('OK') + + def get_local_osd_id(self, unit): + """Get the OSD id for a unit.""" + ret = zaza_model.run_on_unit(unit, + 'ceph-volume lvm list --format=json') + local = list(json.loads(ret['Stdout']))[-1] + return local if local.startswith('osd.') else 'osd.' + local + + def get_num_osds(self, osd, is_up_only=False): + """Compute the number of active OSD's.""" + result = zaza_model.run_on_unit(osd, 'ceph osd stat --format=json') + result = json.loads(result['Stdout']) + if is_up_only: + return int(result['num_up_osds']) + else: + return int(result['num_osds']) + + def get_osd_devices_on_unit(self, unit_name): + """Get information for osd devices present on a particular unit. + + :param unit: Unit name to be queried for osd device info. + :type unit: str + """ + osd_devices = json.loads( + zaza_model.run_on_unit( + unit_name, 'ceph-volume lvm list --format=json' + ).get('Stdout', '') + ) + + return osd_devices + + def remove_disk_from_osd_unit(self, unit, osd_id, is_purge=False): + """Remove osd device with provided osd_id from unit. + + :param unit: Unit name where the osd device is to be removed from. + :type unit: str + + :param osd_id: osd-id for the osd device to be removed. + :type osd_id: str + + :param is_purge: whether to purge the osd device + :type is_purge: bool + """ + action_obj = zaza_model.run_action( + unit_name=unit, + action_name='remove-disk', + action_params={ + 'osd-ids': osd_id, + 'timeout': 10, + 'format': 'json', + 'purge': is_purge + } + ) + zaza_utils.assertActionRanOK(action_obj) + results = json.loads(action_obj.data['results']['message']) + results = results[next(iter(results))] + self.assertEqual(results['osd-ids'], osd_id) + zaza_model.run_on_unit(unit, 'partprobe') + + def remove_one_osd(self, unit, block_devs): + """Remove one device from osd unit. + + :param unit: Unit name where the osd device is to be removed from. + :type unit: str + :params block_devs: list of block devices on the scpecified unit + :type block_devs: list[str] + """ + # Should have more than 1 OSDs to take one out and test. + self.assertGreater(len(block_devs), 1) + + # Get complete device details for an OSD. + key = list(block_devs)[-1] + device = { + 'osd-id': key if key.startswith('osd.') else 'osd.' + key, + 'block-device': block_devs[key][0]['devices'][0] + } + + self.remove_disk_from_osd_unit(unit, device['osd-id'], is_purge=True) + return device + + def test_cache_device(self): + """Test replacing a disk in use.""" + logging.info('Running add-disk action with a caching device') + mon = next(iter(zaza_model.get_units('ceph-mon'))).entity_id + osds = [x.entity_id for x in zaza_model.get_units('ceph-osd')] + osd_info = dict() + + # Remove one of the two disks. + logging.info('Removing single disk from each OSD') + for unit in osds: + block_devs = self.get_osd_devices_on_unit(unit) + if len(block_devs) < 2: + continue + device_info = self.remove_one_osd(unit, block_devs) + block_dev = device_info['block-device'] + logging.info("Removing device %s from unit %s" % (block_dev, unit)) + osd_info[unit] = device_info + if not osd_info: + raise unittest.SkipTest( + 'Skipping OSD replacement Test, no spare devices added') + + logging.debug('Removed OSD Info: {}'.format(osd_info)) + zaza_model.wait_for_application_states() + + logging.info('Recycling previously removed disks') + for unit, device_info in osd_info.items(): + osd_id = device_info['osd-id'] + block_dev = device_info['block-device'] + logging.info("Found device %s on unit %s" % (block_dev, unit)) + self.assertNotEqual(block_dev, None) + action_obj = zaza_model.run_action( + unit_name=unit, + action_name='add-disk', + action_params={'osd-devices': block_dev, + 'osd-ids': osd_id, + 'partition-size': 5} + ) + zaza_utils.assertActionRanOK(action_obj) + zaza_model.wait_for_application_states() + + logging.info('Removing previously added OSDs') + for unit, device_info in osd_info.items(): + osd_id = device_info['osd-id'] + block_dev = device_info['block-device'] + logging.info( + "Removing block device %s from unit %s" % + (block_dev, unit) + ) + self.remove_disk_from_osd_unit(unit, osd_id, is_purge=False) + zaza_model.wait_for_application_states() + + logging.info('Finally adding back OSDs') + for unit, device_info in osd_info.items(): + block_dev = device_info['block-device'] + action_obj = zaza_model.run_action( + unit_name=unit, + action_name='add-disk', + action_params={'osd-devices': block_dev, + 'partition-size': 5} + ) + zaza_utils.assertActionRanOK(action_obj) + zaza_model.wait_for_application_states() + + for attempt in tenacity.Retrying( + wait=tenacity.wait_exponential(multiplier=2, max=32), + reraise=True, stop=tenacity.stop_after_attempt(10), + retry=tenacity.retry_if_exception_type(AssertionError) + ): + with attempt: + self.assertEqual( + len(osds) * 2, self.get_num_osds(mon, is_up_only=True) + ) + + +class CephRGWTest(test_utils.BaseCharmTest): + """Ceph RADOS Gateway Daemons Test Class. + + This Testset is not idempotent, because we don't support scale down from + multisite to singlesite (yet). Tests can be performed independently. + However, If test_100 has completed migration, retriggering the test-set + would cause a time-out in test_003. + """ + + # String Resources + primary_rgw_app = 'ceph-radosgw' + primary_rgw_unit = 'ceph-radosgw/0' + secondary_rgw_app = 'secondary-ceph-radosgw' + secondary_rgw_unit = 'secondary-ceph-radosgw/0' + + @classmethod + def setUpClass(cls): + """Run class setup for running ceph low level tests.""" + super(CephRGWTest, cls).setUpClass(application_name='ceph-radosgw') + + @property + def expected_apps(self): + """Determine application names for ceph-radosgw apps.""" + _apps = [ + self.primary_rgw_app + ] + try: + zaza_model.get_application(self.secondary_rgw_app) + _apps.append(self.secondary_rgw_app) + except KeyError: + pass + return _apps + + @property + def multisite(self): + """Determine whether deployment is multi-site.""" + try: + zaza_model.get_application(self.secondary_rgw_app) + return True + except KeyError: + return False + + def get_rgwadmin_cmd_skeleton(self, unit_name): + """ + Get radosgw-admin cmd skeleton with rgw.hostname populated key. + + :param unit_name: Unit on which the complete command would be run. + :type unit_name: str + :returns: hostname filled basic command skeleton + :rtype: str + """ + app_name = unit_name.split('/')[0] + juju_units = zaza_model.get_units(app_name) + unit_hostnames = generic_utils.get_unit_hostnames(juju_units) + hostname = unit_hostnames[unit_name] + return 'radosgw-admin --id=rgw.{} '.format(hostname) + + def purge_bucket(self, application, bucket_name): + """Remove a bucket and all it's objects. + + :param application: RGW application name + :type application: str + :param bucket_name: Name for RGW bucket to be deleted + :type bucket_name: str + """ + juju_units = zaza_model.get_units(application) + unit_hostnames = generic_utils.get_unit_hostnames(juju_units) + for unit_name, hostname in unit_hostnames.items(): + key_name = "rgw.{}".format(hostname) + cmd = 'radosgw-admin --id={} bucket rm --bucket={}' \ + ' --purge-objects'.format(key_name, bucket_name) + zaza_model.run_on_unit(unit_name, cmd) + + def wait_for_status(self, application, + is_primary=False, sync_expected=True): + """Wait for required RGW endpoint to finish sync for data and metadata. + + :param application: RGW application which has to be waited for + :type application: str + :param is_primary: whether RGW application is primary or secondary + :type is_primary: boolean + :param sync_expected: whether sync details should be expected in status + :type sync_expected: boolean + """ + juju_units = zaza_model.get_units(application) + unit_hostnames = generic_utils.get_unit_hostnames(juju_units) + data_check = 'data is caught up with source' + meta_primary = 'metadata sync no sync (zone is master)' + meta_secondary = 'metadata is caught up with master' + meta_check = meta_primary if is_primary else meta_secondary + + for attempt in tenacity.Retrying( + wait=tenacity.wait_exponential(multiplier=10, max=300), + reraise=True, stop=tenacity.stop_after_attempt(12), + retry=tenacity.retry_if_exception_type(AssertionError) + ): + with attempt: + for unit_name, hostname in unit_hostnames.items(): + key_name = "rgw.{}".format(hostname) + cmd = 'radosgw-admin --id={} sync status'.format(key_name) + stdout = zaza_model.run_on_unit( + unit_name, cmd + ).get('Stdout', '') + if sync_expected: + # Both data and meta sync. + self.assertIn(data_check, stdout) + self.assertIn(meta_check, stdout) + else: + # ExpectPrimary's Meta Status and no Data sync status + self.assertIn(meta_primary, stdout) + self.assertNotIn(data_check, stdout) + + def fetch_rgw_object(self, target_client, container_name, object_name): + """Fetch RGW object content. + + :param target_client: boto3 client object configured for an endpoint. + :type target_client: str + :param container_name: RGW bucket name for desired object. + :type container_name: str + :param object_name: Object name for desired object. + :type object_name: str + """ + for attempt in tenacity.Retrying( + wait=tenacity.wait_exponential(multiplier=1, max=60), + reraise=True, stop=tenacity.stop_after_attempt(12) + ): + with attempt: + return target_client.Object( + container_name, object_name + ).get()['Body'].read().decode('UTF-8') + + def promote_rgw_to_primary(self, app_name: str): + """Promote provided app to Primary and update period at new secondary. + + :param app_name: Secondary site rgw Application to be promoted. + :type app_name: str + """ + if app_name is self.primary_rgw_app: + new_secondary = self.secondary_rgw_unit + else: + new_secondary = self.primary_rgw_unit + + # Promote to Primary + zaza_model.run_action_on_leader( + app_name, + 'promote', + action_params={}, + ) + + # Period Update Commit new secondary. + cmd = self.get_rgwadmin_cmd_skeleton(new_secondary) + zaza_model.run_on_unit( + new_secondary, cmd + 'period update --commit' + ) + + def get_client_keys(self, rgw_app_name=None): + """Create access_key and secret_key for boto3 client. + + :param rgw_app_name: RGW application for which keys are required. + :type rgw_app_name: str + """ + unit_name = self.primary_rgw_unit + if rgw_app_name is not None: + unit_name = rgw_app_name + '/0' + user_name = 'botoclient' + cmd = self.get_rgwadmin_cmd_skeleton(unit_name) + users = json.loads(zaza_model.run_on_unit( + unit_name, cmd + 'user list' + ).get('Stdout', '')) + # Fetch boto3 user keys if user exists. + if user_name in users: + output = json.loads(zaza_model.run_on_unit( + unit_name, cmd + 'user info --uid={}'.format(user_name) + ).get('Stdout', '')) + keys = output['keys'][0] + return keys['access_key'], keys['secret_key'] + # Create boto3 user if it does not exist. + create_cmd = cmd + 'user create --uid={} --display-name={}'.format( + user_name, user_name + ) + output = json.loads( + zaza_model.run_on_unit(unit_name, create_cmd).get('Stdout', '') + ) + keys = output['keys'][0] + return keys['access_key'], keys['secret_key'] + + @tenacity.retry( + retry=tenacity.retry_if_result(lambda ret: ret is None), + wait=tenacity.wait_fixed(10), + stop=tenacity.stop_after_attempt(5) + ) + def get_rgw_endpoint(self, unit_name: str): + """Fetch Application endpoint for RGW unit. + + :param unit_name: Unit name for which RGW endpoint is required. + :type unit_name: str + """ + # Get address "public" network binding. + unit_address = zaza_model.run_on_unit( + unit_name, "network-get public --bind-address" + ).get('Stdout', '').strip() + + logging.info("Unit: {}, Endpoint: {}".format(unit_name, unit_address)) + if unit_address is None: + return None + # Evaluate port + try: + zaza_model.get_application("vault") + return "https://{}:443".format(unit_address) + except KeyError: + return "http://{}:80".format(unit_address) + + def configure_rgw_apps_for_multisite(self): + """Configure Multisite values on primary and secondary apps.""" + realm = 'zaza_realm' + zonegroup = 'zaza_zg' + + zaza_model.set_application_config( + self.primary_rgw_app, + { + 'realm': realm, + 'zonegroup': zonegroup, + 'zone': 'zaza_primary' + } + ) + zaza_model.set_application_config( + self.secondary_rgw_app, + { + 'realm': realm, + 'zonegroup': zonegroup, + 'zone': 'zaza_secondary' + } + ) + + def configure_rgw_multisite_relation(self): + """Configure multi-site relation between primary and secondary apps.""" + multisite_relation = zaza_model.get_relation_id( + self.primary_rgw_app, self.secondary_rgw_app, + remote_interface_name='secondary' + ) + if multisite_relation is None: + logging.info('Configuring Multisite') + self.configure_rgw_apps_for_multisite() + zaza_model.add_relation( + self.primary_rgw_app, + self.primary_rgw_app + ":primary", + self.secondary_rgw_app + ":secondary" + ) + zaza_model.block_until_unit_wl_status( + self.secondary_rgw_unit, "waiting" + ) + + zaza_model.block_until_unit_wl_status( + self.secondary_rgw_unit, "active" + ) + zaza_model.block_until_unit_wl_status( + self.primary_rgw_unit, "active" + ) + zaza_model.wait_for_unit_idle(self.secondary_rgw_unit) + zaza_model.wait_for_unit_idle(self.primary_rgw_unit) + + def clean_rgw_multisite_config(self, app_name): + """Clear Multisite Juju config values to default. + + :param app_name: App for which config values are to be cleared + :type app_name: str + """ + unit_name = app_name + "/0" + zaza_model.set_application_config( + app_name, + { + 'realm': "", + 'zonegroup': "", + 'zone': "default" + } + ) + # Commit changes to period. + cmd = self.get_rgwadmin_cmd_skeleton(unit_name) + zaza_model.run_on_unit( + unit_name, cmd + 'period update --commit --rgw-zone=default ' + '--rgw-zonegroup=default' + ) + + def enable_virtual_hosted_bucket(self): + """Enable virtual hosted bucket on primary rgw app.""" + zaza_model.set_application_config( + self.primary_rgw_app, + { + 'virtual-hosted-bucket-enabled': "true" + } + ) + + def set_os_public_hostname(self): + """Set os-public-hostname on primary rgw app.""" + zaza_model.set_application_config( + self.primary_rgw_app, + { + 'os-public-hostname': "rgw.example.com", + } + ) + + def clean_virtual_hosted_bucket(self): + """Clear virtual hosted bucket on primary app.""" + zaza_model.set_application_config( + self.primary_rgw_app, + { + 'os-public-hostname': "", + 'virtual-hosted-bucket-enabled': "false" + } + ) + + def test_001_processes(self): + """Verify Ceph processes. + + Verify that the expected service processes are running + on each ceph unit. + """ + logging.info('Checking radosgw processes...') + # Process name and quantity of processes to expect on each unit + ceph_radosgw_processes = { + 'radosgw': 1, + } + + # Units with process names and PID quantities expected + expected_processes = {} + for app in self.expected_apps: + for unit in zaza_model.get_units(app): + expected_processes[unit.entity_id] = ceph_radosgw_processes + + actual_pids = zaza_utils.get_unit_process_ids(expected_processes) + ret = zaza_utils.validate_unit_process_ids(expected_processes, + actual_pids) + self.assertTrue(ret) + + def test_002_services(self): + """Verify the ceph services. + + Verify the expected services are running on the service units. + """ + logging.info('Checking radosgw services...') + services = ['radosgw', 'haproxy'] + for app in self.expected_apps: + for unit in zaza_model.get_units(app): + zaza_model.block_until_service_status( + unit_name=unit.entity_id, + services=services, + target_status='running' + ) + + def test_003_object_storage_and_secondary_block(self): + """Verify Object Storage API and Secondary Migration block.""" + container_name = 'zaza-container' + obj_data = 'Test data from Zaza' + obj_name = 'prefile' + + logging.info('Checking Object Storage API for Primary Cluster') + # 1. Fetch Primary Endpoint Details + primary_endpoint = self.get_rgw_endpoint(self.primary_rgw_unit) + self.assertNotEqual(primary_endpoint, None) + + # 2. Create RGW Client and perform IO + access_key, secret_key = self.get_client_keys() + primary_client = boto3.resource("s3", + verify=False, + endpoint_url=primary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + primary_client.Bucket(container_name).create() + primary_object_one = primary_client.Object( + container_name, + obj_name + ) + primary_object_one.put(Body=obj_data) + + # 3. Fetch Object and Perform Data Integrity check. + content = primary_object_one.get()['Body'].read().decode('UTF-8') + self.assertEqual(content, obj_data) + + # Skip multisite tests if not compatible with bundle. + if not self.multisite: + logging.info('Skipping Secondary Object gatewaty verification') + return + + logging.info('Checking Object Storage API for Secondary Cluster') + # 1. Fetch Secondary Endpoint Details + secondary_endpoint = self.get_rgw_endpoint(self.secondary_rgw_unit) + self.assertNotEqual(secondary_endpoint, None) + + # 2. Create RGW Client and perform IO + access_key, secret_key = self.get_client_keys(self.secondary_rgw_app) + secondary_client = boto3.resource("s3", + verify=False, + endpoint_url=secondary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + secondary_client.Bucket(container_name).create() + secondary_object = secondary_client.Object( + container_name, + obj_name + ) + secondary_object.put(Body=obj_data) + + # 3. Fetch Object and Perform Data Integrity check. + content = secondary_object.get()['Body'].read().decode('UTF-8') + self.assertEqual(content, obj_data) + + logging.info('Checking Secondary Migration Block') + # 1. Migrate to multisite + if zaza_model.get_relation_id( + self.primary_rgw_app, self.secondary_rgw_app, + remote_interface_name='secondary' + ) is not None: + logging.info('Skipping Test, Multisite relation already present.') + return + + logging.info('Configuring Multisite') + self.configure_rgw_apps_for_multisite() + zaza_model.add_relation( + self.primary_rgw_app, + self.primary_rgw_app + ":primary", + self.secondary_rgw_app + ":secondary" + ) + + # 2. Verify secondary fails migration due to existing Bucket. + assert_state = { + self.secondary_rgw_app: { + "workload-status": "blocked", + "workload-status-message-prefix": + "Non-Pristine RGW site can't be used as secondary" + } + } + zaza_model.wait_for_application_states(states=assert_state, + timeout=900) + + # 3. Perform Secondary Cleanup + logging.info('Perform cleanup at secondary') + self.clean_rgw_multisite_config(self.secondary_rgw_app) + zaza_model.remove_relation( + self.primary_rgw_app, + self.primary_rgw_app + ":primary", + self.secondary_rgw_app + ":secondary" + ) + + # Make secondary pristine. + self.purge_bucket(self.secondary_rgw_app, container_name) + + zaza_model.block_until_unit_wl_status(self.secondary_rgw_unit, + 'active') + + def test_004_multisite_directional_sync_policy(self): + """Verify Multisite Directional Sync Policy.""" + # Skip multisite tests if not compatible with bundle. + if not self.multisite: + logging.info('Skipping multisite sync policy verification') + return + + container_name = 'zaza-container' + primary_obj_name = 'primary-testfile' + primary_obj_data = 'Primary test data' + secondary_directional_obj_name = 'secondary-directional-testfile' + secondary_directional_obj_data = 'Secondary directional test data' + secondary_symmetrical_obj_name = 'secondary-symmetrical-testfile' + secondary_symmetrical_obj_data = 'Secondary symmetrical test data' + + logging.info('Verifying multisite directional sync policy') + + # Set default sync policy to "allowed", which allows buckets to sync, + # but the sync is disabled by default in the zone group. Also, set the + # secondary zone sync policy flow type policy to "directional". + zaza_model.set_application_config( + self.primary_rgw_app, + { + "sync-policy-state": "allowed", + } + ) + zaza_model.set_application_config( + self.secondary_rgw_app, + { + "sync-policy-flow-type": "directional", + } + ) + zaza_model.wait_for_unit_idle(self.secondary_rgw_unit) + zaza_model.wait_for_unit_idle(self.primary_rgw_unit) + + # Setup multisite relation. + self.configure_rgw_multisite_relation() + + logging.info('Waiting for Data and Metadata to Synchronize') + # NOTE: We only check the secondary zone, because the sync policy flow + # type is set to "directional" between the primary and secondary zones. + self.wait_for_status(self.secondary_rgw_app, is_primary=False) + + # Create bucket on primary RGW zone. + logging.info('Creating bucket on primary zone') + primary_endpoint = self.get_rgw_endpoint(self.primary_rgw_unit) + self.assertNotEqual(primary_endpoint, None) + + access_key, secret_key = self.get_client_keys() + primary_client = boto3.resource("s3", + verify=False, + endpoint_url=primary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + primary_client.Bucket(container_name).create() + + # Enable sync on the bucket. + logging.info('Enabling sync on the bucket from the primary zone') + zaza_model.run_action_on_leader( + self.primary_rgw_app, + 'enable-buckets-sync', + action_params={ + 'buckets': container_name, + }, + raise_on_failure=True, + ) + + # Check that sync cannot be enabled using secondary Juju RGW app. + with self.assertRaises(zaza_model.ActionFailed): + zaza_model.run_action_on_leader( + self.secondary_rgw_app, + 'enable-buckets-sync', + action_params={ + 'buckets': container_name, + }, + raise_on_failure=True, + ) + + logging.info('Waiting for Data and Metadata to Synchronize') + self.wait_for_status(self.secondary_rgw_app, is_primary=False) + + # Perform IO on primary zone bucket. + logging.info('Performing IO on primary zone bucket') + primary_object = primary_client.Object( + container_name, + primary_obj_name + ) + primary_object.put(Body=primary_obj_data) + + # Verify that the object is replicated to the secondary zone. + logging.info('Verifying that the object is replicated to the ' + 'secondary zone') + secondary_endpoint = self.get_rgw_endpoint(self.secondary_rgw_unit) + self.assertNotEqual(secondary_endpoint, None) + + secondary_client = boto3.resource("s3", + verify=False, + endpoint_url=secondary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + secondary_data = self.fetch_rgw_object( + secondary_client, + container_name, + primary_obj_name + ) + self.assertEqual(secondary_data, primary_obj_data) + + # Write object to the secondary zone bucket, when the sync policy + # flow type is set to "directional" between the zones. + logging.info('Writing object to the secondary zone bucket, which ' + 'should not be replicated to the primary zone') + secondary_object = secondary_client.Object( + container_name, + secondary_directional_obj_name + ) + secondary_object.put(Body=secondary_directional_obj_data) + + # Verify that the object is not replicated to the primary zone. + logging.info('Verifying that the object is not replicated to the ' + 'primary zone') + with self.assertRaises(botocore.exceptions.ClientError): + self.fetch_rgw_object( + primary_client, + container_name, + secondary_directional_obj_name + ) + + logging.info('Setting sync policy flow to "symmetrical" on the ' + 'secondary RGW zone') + zaza_model.set_application_config( + self.secondary_rgw_app, + { + "sync-policy-flow-type": "symmetrical", + } + ) + zaza_model.wait_for_unit_idle(self.secondary_rgw_unit) + zaza_model.wait_for_unit_idle(self.primary_rgw_unit) + + # Write another object to the secondary zone bucket. + logging.info('Writing another object to the secondary zone bucket.') + secondary_object = secondary_client.Object( + container_name, + secondary_symmetrical_obj_name + ) + secondary_object.put(Body=secondary_symmetrical_obj_data) + + logging.info('Waiting for Data and Metadata to Synchronize') + # NOTE: This time, we check both the primary and secondary zones, + # because the sync policy flow type is set to "symmetrical" between + # the zones. + self.wait_for_status(self.secondary_rgw_app, is_primary=False) + self.wait_for_status(self.primary_rgw_app, is_primary=True) + + # Verify that all objects are replicated to the primary zone. + logging.info('Verifying that all objects are replicated to the ' + 'primary zone (including older objects).') + test_cases = [ + { + 'obj_name': primary_obj_name, + 'obj_data': primary_obj_data, + }, + { + 'obj_name': secondary_directional_obj_name, + 'obj_data': secondary_directional_obj_data, + }, + { + 'obj_name': secondary_symmetrical_obj_name, + 'obj_data': secondary_symmetrical_obj_data, + }, + ] + for tc in test_cases: + logging.info('Verifying that object "{}" is replicated'.format( + tc['obj_name'])) + primary_data = self.fetch_rgw_object( + primary_client, + container_name, + tc['obj_name'] + ) + self.assertEqual(primary_data, tc['obj_data']) + + # Cleanup. + logging.info('Cleaning up buckets after test case') + self.purge_bucket(self.primary_rgw_app, container_name) + self.purge_bucket(self.secondary_rgw_app, container_name) + + logging.info('Waiting for Data and Metadata to Synchronize') + self.wait_for_status(self.secondary_rgw_app, is_primary=False) + self.wait_for_status(self.primary_rgw_app, is_primary=True) + + # Set multisite sync policy state to "enabled" on the primary RGW app. + # Paired with "symmetrical" sync policy flow on the secondary RGW app, + # this enables bidirectional sync between the zones (which is the + # default behaviour without multisite sync policies configured). + logging.info('Setting sync policy state to "enabled".') + zaza_model.set_application_config( + self.primary_rgw_app, + { + "sync-policy-state": "enabled", + } + ) + zaza_model.wait_for_unit_idle(self.primary_rgw_unit) + + def test_100_migration_and_multisite_failover(self): + """Perform multisite migration and verify failover.""" + container_name = 'zaza-container' + obj_data = 'Test data from Zaza' + # Skip multisite tests if not compatible with bundle. + if not self.multisite: + raise unittest.SkipTest('Skipping Migration Test') + + logging.info('Perform Pre-Migration IO') + # 1. Fetch Endpoint Details + primary_endpoint = self.get_rgw_endpoint(self.primary_rgw_unit) + self.assertNotEqual(primary_endpoint, None) + + # 2. Create primary client and add pre-migration object. + access_key, secret_key = self.get_client_keys() + primary_client = boto3.resource("s3", + verify=False, + endpoint_url=primary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + primary_client.Bucket(container_name).create() + primary_client.Object( + container_name, + 'prefile' + ).put(Body=obj_data) + + # If Primary/Secondary relation does not exist, add it. + self.configure_rgw_multisite_relation() + + logging.info('Waiting for Data and Metadata to Synchronize') + self.wait_for_status(self.secondary_rgw_app, is_primary=False) + self.wait_for_status(self.primary_rgw_app, is_primary=True) + + logging.info('Performing post migration IO tests.') + # Add another object at primary + primary_client.Object( + container_name, + 'postfile' + ).put(Body=obj_data) + + # 1. Fetch Endpoint Details + secondary_endpoint = self.get_rgw_endpoint(self.secondary_rgw_unit) + self.assertNotEqual(secondary_endpoint, None) + + # 2. Create secondary client and fetch synchronised objects. + secondary_client = boto3.resource("s3", + verify=False, + endpoint_url=secondary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + + # 3. Verify Data Integrity + # fetch_rgw_object has internal retry so waiting for sync beforehand + # is not required for post migration object sync. + pre_migration_data = self.fetch_rgw_object( + secondary_client, container_name, 'prefile' + ) + post_migration_data = self.fetch_rgw_object( + secondary_client, container_name, 'postfile' + ) + + # 4. Verify Syncronisation works and objects are replicated + self.assertEqual(pre_migration_data, obj_data) + self.assertEqual(post_migration_data, obj_data) + + logging.info('Checking multisite failover/failback') + # Failover Scenario, Promote Secondary-Ceph-RadosGW to Primary + self.promote_rgw_to_primary(self.secondary_rgw_app) + + # Wait for Sites to be syncronised. + self.wait_for_status(self.primary_rgw_app, is_primary=False) + self.wait_for_status(self.secondary_rgw_app, is_primary=True) + + # IO Test + container = 'failover-container' + test_data = 'Test data from Zaza on Secondary' + secondary_client.Bucket(container).create() + secondary_object = secondary_client.Object(container, 'testfile') + secondary_object.put( + Body=test_data + ) + secondary_content = secondary_object.get()[ + 'Body' + ].read().decode('UTF-8') + + # Wait for Sites to be syncronised. + self.wait_for_status(self.primary_rgw_app, is_primary=False) + self.wait_for_status(self.secondary_rgw_app, is_primary=True) + + # Recovery scenario, reset ceph-rgw as primary. + self.promote_rgw_to_primary(self.primary_rgw_app) + self.wait_for_status(self.primary_rgw_app, is_primary=True) + self.wait_for_status(self.secondary_rgw_app, is_primary=False) + + # Fetch Syncronised copy of testfile from primary site. + primary_content = self.fetch_rgw_object( + primary_client, container, 'testfile' + ) + + # Verify Data Integrity. + self.assertEqual(secondary_content, primary_content) + + # Scaledown and verify replication has stopped. + logging.info('Checking multisite scaledown') + zaza_model.remove_relation( + self.primary_rgw_app, + self.primary_rgw_app + ":primary", + self.secondary_rgw_app + ":secondary" + ) + + # wait for sync stop + self.wait_for_status(self.primary_rgw_app, sync_expected=False) + self.wait_for_status(self.secondary_rgw_app, sync_expected=False) + + # Refresh client and verify objects are not replicating. + primary_client = boto3.resource("s3", + verify=False, + endpoint_url=primary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + secondary_client = boto3.resource("s3", + verify=False, + endpoint_url=secondary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + + # IO Test + container = 'scaledown-container' + test_data = 'Scaledown Test data' + secondary_client.Bucket(container).create() + secondary_object = secondary_client.Object(container, 'scaledown') + secondary_object.put( + Body=test_data + ) + + # Since bucket is not replicated. + with self.assertRaises(botocore.exceptions.ClientError): + primary_content = self.fetch_rgw_object( + primary_client, container, 'scaledown' + ) + + # Cleanup of scaledown resources and synced resources. + self.purge_bucket(self.secondary_rgw_app, container) + self.purge_bucket(self.secondary_rgw_app, 'zaza-container') + self.purge_bucket(self.secondary_rgw_app, 'failover-container') + + def test_101_virtual_hosted_bucket(self): + """Test virtual hosted bucket.""" + # skip if quincy or older + current_release = zaza_openstack.get_os_release( + application='ceph-mon') + reef = zaza_openstack.get_os_release('jammy_bobcat') + if current_release < reef: + raise unittest.SkipTest( + 'Virtual hosted bucket not supported in quincy or older') + + primary_rgw_unit = zaza_model.get_unit_from_name(self.primary_rgw_unit) + if primary_rgw_unit.workload_status != "active": + logging.info('Skipping virtual hosted bucket test since ' + 'primary rgw unit is not in active state') + return + + logging.info('Testing virtual hosted bucket') + + # 0. Configure virtual hosted bucket + self.enable_virtual_hosted_bucket() + zaza_model.block_until_wl_status_info_starts_with( + self.primary_rgw_app, + 'os-public-hostname must have a value', + timeout=900 + ) + self.set_os_public_hostname() + zaza_model.block_until_all_units_idle(self.model_name) + container_name = 'zaza-bucket' + obj_data = 'Test content from Zaza' + obj_name = 'testfile' + + # 1. Fetch Primary Endpoint Details + primary_endpoint = self.get_rgw_endpoint(self.primary_rgw_unit) + self.assertNotEqual(primary_endpoint, None) + + # 2. Create RGW Client and perform IO + access_key, secret_key = self.get_client_keys() + primary_client = boto3.resource("s3", + verify=False, + endpoint_url=primary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + # We may not have certs for the pub hostname yet, so retry a few times. + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(10), + wait=tenacity.wait_fixed(4), + ): + with attempt: + primary_client.Bucket(container_name).create() + primary_object_one = primary_client.Object( + container_name, + obj_name + ) + primary_object_one.put(Body=obj_data) + primary_client.Bucket(container_name).Acl().put(ACL='public-read') + primary_client.Object(container_name, obj_name).Acl().put( + ACL='public-read' + ) + + # 3. Test if we can get content via virtual hosted bucket name + public_hostname = zaza_model.get_application_config( + self.primary_rgw_app + )["os-public-hostname"]["value"] + url = f"{primary_endpoint}/{obj_name}" + headers = {'host': f"{container_name}.{public_hostname}"} + f = requests.get(url, headers=headers, verify=False) + self.assertEqual(f.text, obj_data) + + # 4. Cleanup and de-configure virtual hosted bucket + self.clean_virtual_hosted_bucket() + zaza_model.block_until_all_units_idle(self.model_name) + self.purge_bucket(self.primary_rgw_app, container_name) + + +class CephProxyTest(unittest.TestCase): + """Test ceph via proxy.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super(CephProxyTest, cls).setUpClass() + + test_config = lifecycle_utils.get_charm_config(fatal=False) + cls.target_deploy_status = test_config.get('target_deploy_status', {}) + + def test_ceph_health(self): + """Make sure ceph-proxy can communicate with ceph.""" + logging.info('Wait for idle/ready status...') + zaza_model.wait_for_application_states( + states=self.target_deploy_status) + + self.assertEqual( + zaza_model.run_on_leader("ceph-proxy", "sudo ceph health")["Code"], + "0" + ) + + def test_cinder_ceph_restrict_pool_setup(self): + """Make sure cinder-ceph restrict pool was created successfully.""" + try: + zaza_model.get_application('cinder-ceph') + except KeyError: + raise unittest.SkipTest("Skipping OpenStack dependent test") + logging.info('Wait for idle/ready status...') + zaza_model.wait_for_application_states( + states=self.target_deploy_status) + + for attempt in tenacity.Retrying( + wait=tenacity.wait_exponential(multiplier=2, max=32), + reraise=True, stop=tenacity.stop_after_attempt(8), + ): + with attempt: + pools = zaza_ceph.get_ceph_pools('ceph-mon/0') + if 'cinder-ceph' not in pools: + msg = ('cinder-ceph pool not found querying ceph-mon/0,' + 'got: {}'.format(pools)) + raise zaza_exceptions.CephPoolNotFound(msg) + + # Checking for cinder-ceph specific permissions makes + # the test more rugged when we add additional relations + # to ceph for other applications (such as glance and nova). + expected_permissions = [ + "allow rwx pool=cinder-ceph", + "allow class-read object_prefix rbd_children", + ] + cmd = "sudo ceph auth get client.cinder-ceph" + result = zaza_model.run_on_unit('ceph-mon/0', cmd) + output = result.get('Stdout').strip() + + for expected in expected_permissions: + if expected not in output: + msg = ('cinder-ceph pool restriction ({}) was not' + ' configured correctly.' + ' Found: {}'.format(expected, output)) + raise zaza_exceptions.CephPoolNotConfigured(msg) + + +class CephPrometheusTest(unittest.TestCase): + """Test the Ceph <-> Prometheus relation.""" + + def test_prometheus_metrics(self): + """Validate that Prometheus has Ceph metrics.""" + try: + zaza_model.get_application( + 'prometheus2') + except KeyError: + raise unittest.SkipTest('Prometheus not present, skipping test') + unit = zaza_model.get_unit_from_name( + zaza_model.get_lead_unit_name('prometheus2')) + prometheus_mon_count = _get_mon_count_from_prometheus( + zaza_model.get_unit_public_address(unit)) + self.assertTrue(0 < int(prometheus_mon_count)) + + +class CephPoolConfig(Exception): + """Custom Exception for bad Ceph pool config.""" + + pass + + +class CheckPoolTypes(unittest.TestCase): + """Test the ceph pools created for clients are of the expected type.""" + + def test_check_pool_types(self): + """Check type of pools created for clients.""" + app_pools = [ + ('glance', 'glance'), + ('nova-compute', 'nova'), + ('cinder-ceph', 'cinder-ceph')] + runtime_pool_details = zaza_ceph.get_ceph_pool_details() + for app, pool_name in app_pools: + try: + app_config = zaza_model.get_application_config(app) + except KeyError: + logging.info( + 'Skipping pool check of %s, application %s not present', + pool_name, + app) + continue + rel_id = zaza_model.get_relation_id( + app, + 'ceph-mon', + remote_interface_name='client') + if not rel_id: + logging.info( + 'Skipping pool check of %s, ceph relation not present', + app) + continue + juju_pool_config = app_config.get('pool-type') + if juju_pool_config: + expected_pool_type = juju_pool_config['value'] + else: + # If the pool-type option is absent assume the default of + # replicated. + expected_pool_type = zaza_ceph.REPLICATED_POOL_TYPE + for pool_config in runtime_pool_details: + if pool_config['pool_name'] == pool_name: + logging.info('Checking {} is {}'.format( + pool_name, + expected_pool_type)) + expected_pool_code = -1 + if expected_pool_type == zaza_ceph.REPLICATED_POOL_TYPE: + expected_pool_code = zaza_ceph.REPLICATED_POOL_CODE + elif expected_pool_type == zaza_ceph.ERASURE_POOL_TYPE: + expected_pool_code = zaza_ceph.ERASURE_POOL_CODE + self.assertEqual( + pool_config['type'], + expected_pool_code) + break + else: + raise CephPoolConfig( + "Failed to find config for {}".format(pool_name)) + + +# NOTE: We might query before prometheus has fetch data +@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, + min=5, max=10), + reraise=True) +def _get_mon_count_from_prometheus(prometheus_ip): + url = ('http://{}:9090/api/v1/query?query=' + 'count(ceph_mon_metadata)'.format(prometheus_ip)) + client = requests.session() + response = client.get(url) + logging.debug("Prometheus response: {}".format(response.json())) + return response.json()['data']['result'][0]['value'][1] + + +class BlueStoreCompressionCharmOperation(test_utils.BaseCharmTest): + """Test charm handling of bluestore compression configuration options.""" + + @classmethod + def setUpClass(cls): + """Perform class one time initialization.""" + super(BlueStoreCompressionCharmOperation, cls).setUpClass() + release_application = 'keystone' + try: + zaza_model.get_application(release_application) + except KeyError: + release_application = 'ceph-mon' + cls.current_release = zaza_openstack.get_os_release( + application=release_application) + cls.bionic_rocky = zaza_openstack.get_os_release('bionic_rocky') + + def setUp(self): + """Perform common per test initialization steps.""" + super(BlueStoreCompressionCharmOperation, self).setUp() + + # determine if the tests should be run or not + logging.debug('os_release: {} >= {} = {}' + .format(self.current_release, + self.bionic_rocky, + self.current_release >= self.bionic_rocky)) + self.mimic_or_newer = self.current_release >= self.bionic_rocky + + def _assert_pools_properties(self, pools, pools_detail, + expected_properties, log_func=logging.info): + """Check properties on a set of pools. + + :param pools: List of pool names to check. + :type pools: List[str] + :param pools_detail: List of dictionaries with pool detail + :type pools_detail List[Dict[str,any]] + :param expected_properties: Properties to check and their expected + values. + :type expected_properties: Dict[str,any] + :returns: Nothing + :raises: AssertionError + """ + for pool in pools: + for pd in pools_detail: + if pd['pool_name'] == pool: + if 'options' in expected_properties: + for k, v in expected_properties['options'].items(): + self.assertEqual(pd['options'][k], v) + log_func("['options']['{}'] == {}".format(k, v)) + for k, v in expected_properties.items(): + if k == 'options': + continue + self.assertEqual(pd[k], v) + log_func("{} == {}".format(k, v)) + + def test_configure_compression(self): + """Enable compression and validate properties flush through to pool.""" + if not self.mimic_or_newer: + logging.info('Skipping test, Mimic or newer required.') + return + if self.application_name == 'ceph-osd': + # The ceph-osd charm itself does not request pools, neither does + # the BlueStore Compression configuration options it have affect + # pool properties. + logging.info('test does not apply to ceph-osd charm.') + return + elif self.application_name == 'ceph-radosgw': + # The Ceph RadosGW creates many light weight pools to keep track of + # metadata, we only compress the pool containing actual data. + app_pools = ['.rgw.buckets.data'] + else: + # Retrieve which pools the charm under test has requested skipping + # metadata pools as they are deliberately not compressed. + app_pools = [ + pool + for pool in zaza_ceph.get_pools_from_broker_req( + self.application_name, model_name=self.model_name) + if 'metadata' not in pool + ] + + ceph_pools_detail = zaza_ceph.get_ceph_pool_details( + model_name=self.model_name) + + logging.debug('BEFORE: {}'.format(ceph_pools_detail)) + try: + logging.info('Checking Ceph pool compression_mode prior to change') + self._assert_pools_properties( + app_pools, ceph_pools_detail, + {'options': {'compression_mode': 'none'}}) + except KeyError: + logging.info('property does not exist on pool, which is OK.') + logging.info('Changing "bluestore-compression-mode" to "force" on {}' + .format(self.application_name)) + with self.config_change( + {'bluestore-compression-mode': 'none'}, + {'bluestore-compression-mode': 'force'}): + logging.info('Checking Ceph pool compression_mode after to change') + self._check_pool_compression_mode(app_pools, 'force') + + logging.info('Checking Ceph pool compression_mode after ' + 'restoring config to previous value') + self._check_pool_compression_mode(app_pools, 'none') + + @tenacity.retry( + wait=tenacity.wait_exponential(multiplier=1, min=2, max=10), + stop=tenacity.stop_after_attempt(10), + reraise=True, + retry=tenacity.retry_if_exception_type(AssertionError) + ) + def _check_pool_compression_mode(self, app_pools, mode): + ceph_pools_detail = zaza_ceph.get_ceph_pool_details( + model_name=self.model_name) + logging.debug('ceph_pools_details: %s', ceph_pools_detail) + logging.debug(juju_utils.get_relation_from_unit( + 'ceph-mon', self.application_name, None, + model_name=self.model_name)) + self._assert_pools_properties( + app_pools, ceph_pools_detail, + {'options': {'compression_mode': mode}}) + + def test_invalid_compression_configuration(self): + """Set invalid configuration and validate charm response.""" + if not self.mimic_or_newer: + logging.info('Skipping test, Mimic or newer required.') + return + stored_target_deploy_status = self.test_config.get( + 'target_deploy_status', {}) + new_target_deploy_status = stored_target_deploy_status.copy() + new_target_deploy_status[self.application_name] = { + 'workload-status': 'blocked', + 'workload-status-message': 'Invalid configuration', + } + if 'target_deploy_status' in self.test_config: + self.test_config['target_deploy_status'].update( + new_target_deploy_status) + else: + self.test_config['target_deploy_status'] = new_target_deploy_status + + with self.config_change( + {'bluestore-compression-mode': 'none'}, + {'bluestore-compression-mode': 'PEBCAK'}): + logging.info('Charm went into blocked state as expected, restore ' + 'configuration') + self.test_config[ + 'target_deploy_status'] = stored_target_deploy_status + + +class CephAuthTest(unittest.TestCase): + """Ceph auth tests (user creation and deletion).""" + + def test_ceph_auth(self): + """Test creating and deleting user.""" + logging.info('Creating user and exported keyring...') + action_obj = zaza_model.run_action_on_leader( + 'ceph-mon', + 'get-or-create-user', + action_params={'username': 'sandbox', + 'mon-caps': 'allow r', + 'osd-caps': 'allow r'} + ) + logging.debug('Result of action: {}'.format(action_obj)) + create_results = json.loads(action_obj.data['results']['message']) + + logging.info('Getting existing user and exported keyring...') + action_obj = zaza_model.run_action_on_leader( + 'ceph-mon', + 'get-or-create-user', + action_params={'username': 'sandbox'} + ) + logging.debug('Result of action: {}'.format(action_obj)) + get_results = json.loads(action_obj.data['results']['message']) + + self.assertEqual(get_results, create_results) + + logging.info('Deleting existing user...') + action_obj = zaza_model.run_action_on_leader( + 'ceph-mon', + 'delete-user', + action_params={'username': 'sandbox'} + ) + logging.debug('Result of action: {}'.format(action_obj)) + + logging.info('Verify user is deleted...') + result = zaza_model.run_on_leader( + 'ceph-mon', + 'sudo ceph auth get client.sandbox', + ) + logging.debug('ceph auth get: {}'.format(result)) + self.assertIn("failed to find client.sandbox", result.get('Stderr')) + + +class CephMonActionsTest(test_utils.BaseCharmTest): + """Test miscellaneous actions of the ceph-mon charm.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running ceph-mon actions.""" + super(CephMonActionsTest, cls).setUpClass() + # Allow mons to delete pools. + zaza_model.run_on_unit( + 'ceph-mon/0', + "ceph tell mon.\\* injectargs '--mon-allow-pool-delete=true'" + ) + + def _get_osd_weight(self, osd, unit): + """Fetch the CRUSH weight of an OSD.""" + cmd = 'sudo ceph osd crush tree --format=json' + result = zaza_model.run_on_unit(unit, cmd) + self.assertEqual(int(result.get('Code')), 0) + + tree = json.loads(result.get('Stdout')) + for node in tree['nodes']: + if node.get('name') == osd: + return node['crush_weight'] + + def test_reweight_osd(self): + """Test the change-osd-weight action.""" + unit = 'ceph-mon/0' + osd = 0 + osd_str = 'osd.' + str(osd) + weight = 700 + prev_weight = self._get_osd_weight(osd_str, unit) + try: + action_obj = zaza_model.run_action( + unit_name=unit, + action_name='change-osd-weight', + action_params={'osd': osd, 'weight': 700} + ) + zaza_utils.assertActionRanOK(action_obj) + self.assertEqual(weight, self._get_osd_weight(osd_str, unit)) + finally: + # Reset the weight. + zaza_model.run_action( + unit_name=unit, + action_name='change-osd-weight', + action_params={'osd': osd, 'weight': prev_weight} + ) + + def test_copy_pool(self): + """Test the copy-pool (and list-pool) action.""" + unit = 'ceph-mon/0' + logging.debug('Creating secondary test pool') + cmd = 'sudo ceph osd pool create test2 32' + cmd2 = 'sudo ceph osd pool create test3 32' + try: + result = zaza_model.run_on_unit(unit, cmd) + self.assertEqual(int(result.get('Code')), 0) + result = zaza_model.run_on_unit(unit, cmd2) + self.assertEqual(int(result.get('Code')), 0) + + action_obj = zaza_model.run_action( + unit_name=unit, + action_name='list-pools', + action_params={} + ) + zaza_utils.assertActionRanOK(action_obj) + self.assertIn('test2', action_obj.data['results']['message']) + self.assertIn('test3', action_obj.data['results']['message']) + + logging.debug('Copying test pool') + action_obj = zaza_model.run_action( + unit_name=unit, + action_name='copy-pool', + action_params={'source': 'test2', 'target': 'test3'} + ) + zaza_utils.assertActionRanOK(action_obj) + finally: + # Clean up our mess. + zaza_model.run_on_unit( + unit, + ('sudo ceph osd pool delete test2 test2 ' + '--yes-i-really-really-mean-it') + ) + zaza_model.run_on_unit( + unit, + ('sudo ceph osd pool delete test3 test3 ' + '--yes-i-really-really-mean-it') + ) + + +class CephMonJujuPersistent(test_utils.BaseCharmTest): + """Check juju persistent config is working.""" + + def test_persistent_config(self): + """Check persistent config will update if config change.""" + set_default = { + 'loglevel': 1, + } + set_alternate = { + 'loglevel': 2, + } + unit = 'ceph-mon/0' + cmd = ( + 'cat /var/lib/juju/agents' + '/unit-ceph-mon-0/charm/.juju-persistent-config' + ) + with self.config_change( + default_config=set_default, + alternate_config=set_alternate, + application_name='ceph-mon', + ): + result = zaza_model.run_on_unit( + unit, + cmd, + ) + data = json.loads(result['Stdout']) + assert data['loglevel'] == 2 + + +class CephMonKeyRotationTests(test_utils.BaseCharmTest): + """Tests for the rotate-key action.""" + + def setUp(self): + """Initialize key rotation test class.""" + super(CephMonKeyRotationTests, self).setUp() + try: + # Workaround for ubuntu units that don't play nicely with zaza. + zaza_model.get_application('ubuntu') + self.app_states = { + 'ubuntu': { + 'workload-status-message': '' + } + } + except KeyError: + self.app_states = None + + def _get_all_keys(self, unit, entity_filter): + cmd = 'sudo ceph auth ls' + result = zaza_model.run_on_unit(unit, cmd) + # Don't use json formatting, as it's buggy upstream. + data = result['Stdout'].split() + ret = set() + + for ix, line in enumerate(data): + # Structure: + # $ENTITY + # key: + # key contents + # That's why we need to move one position ahead. + if 'key:' in line and entity_filter(data[ix - 1]): + ret.add((data[ix - 1], data[ix + 1])) + return ret + + def _check_key_rotation(self, entity, unit): + def entity_filter(name): + return name.startswith(entity) + + old_keys = self._get_all_keys(unit, entity_filter) + action_obj = zaza_model.run_action( + unit_name=unit, + action_name='rotate-key', + action_params={'entity': entity} + ) + zaza_utils.assertActionRanOK(action_obj) + # NOTE(lmlg): There's a nasty race going on here. Essentially, + # since this action involves 2 different applications, what + # happens is as follows: + # (1) (2) (3) (4) + # ceph-mon rotates key | (idle) | remote-unit rotates key | (idle) + # Between (2) and (3), there's a window where all units are + # idle, _but_ the key hasn't been rotated in the other unit. + # As such, we retry a few times instead of using the + # `wait_for_application_states` interface. + + for attempt in tenacity.Retrying( + wait=tenacity.wait_exponential(multiplier=2, max=32), + reraise=True, stop=tenacity.stop_after_attempt(20), + retry=tenacity.retry_if_exception_type(AssertionError) + ): + with attempt: + new_keys = self._get_all_keys(unit, entity_filter) + self.assertNotEqual(old_keys, new_keys) + + diff = new_keys - old_keys + self.assertEqual(len(diff), 1) + first = next(iter(diff)) + # Check that the entity matches. The 'entity_filter' + # callable will return a true-like value if it + # matches the type of entity we're after (i.e: 'mgr') + self.assertTrue(entity_filter(first[0])) + + def _get_rgw_client(self, unit): + ret = self._get_all_keys(unit, lambda x: x.startswith('client.rgw')) + if not ret: + return None + return next(iter(ret))[0] + + def _get_fs_client(self, unit): + def _filter_fs(name): + return (name.startswith('mds.') and + name not in ('mds.ceph-fs', 'mds.None')) + + ret = self._get_all_keys(unit, _filter_fs) + if not ret: + return None + return next(iter(ret))[0] + + def test_key_rotate(self): + """Test that rotating the keys actually changes them.""" + unit = 'ceph-mon/0' + self._check_key_rotation('osd.0', unit) + + try: + zaza_model.get_application('ceph-radosgw') + rgw_client = self._get_rgw_client(unit) + if rgw_client: + self._check_key_rotation(rgw_client, unit) + else: + logging.info('ceph-radosgw units present, but no RGW service') + except KeyError: + pass + + try: + zaza_model.get_application('ceph-fs') + fs_svc = self._get_fs_client(unit) + if fs_svc is not None: + # Only wait for ceph-fs, as this model includes 'ubuntu' + # units, and those don't play nice with zaza (they don't + # set the workload-status-message correctly). + self._check_key_rotation(fs_svc, unit) + else: + logging.info('ceph-fs units present, but no MDS service') + except KeyError: + pass diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index 913c74d1..a6fe3e8e 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,40 +1,11 @@ charm_name: ceph-mon -gate_bundles: - - jammy-yoga - - jammy-bobcat - - jammy-caracal - -smoke_bundles: - - jammy-caracal - -configure: - - install: - - zaza.openstack.charm_tests.glance.setup.add_lts_image - tests: - - install: - - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - - zaza.openstack.charm_tests.ceph.tests.CephTest - - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest - - zaza.openstack.charm_tests.ceph.mon.tests.CephPermissionUpgradeTest - - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - - zaza.openstack.charm_tests.ceph.tests.CephTest - - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest - # Charm upgrade, then re-run tests - - zaza.charm_tests.lifecycle.tests.UpgradeCharmsToPath;ceph-mon - - zaza.openstack.charm_tests.ceph.tests.CephMonJujuPersistent - - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - - zaza.openstack.charm_tests.ceph.tests.CephTest - - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest - # Tests from quincy. - - zaza.openstack.charm_tests.ceph.tests.CephAuthTest - - zaza.openstack.charm_tests.ceph.tests.CephMonActionsTest - - zaza.openstack.charm_tests.ceph.mon.tests.CephPermissionUpgradeTest - - zaza.openstack.charm_tests.ceph.tests.CephMonKeyRotationTests + - zaza.charm_tests.lifecycle.refresh.CharmRefreshAll + - tests.target.CephLowLevelTest + - tests.target.CephTest + - tests.target.CephPrometheusTest + - tests.target.CephAuthTest + - tests.target.CephMonActionsTest + - tests.target.CephMonKeyRotationTests + - tests.target.CephMonJujuPersistent diff --git a/ceph-mon/tox.ini b/ceph-mon/tox.ini index ec2ce221..82202a8f 100644 --- a/ceph-mon/tox.ini +++ b/ceph-mon/tox.ini @@ -21,7 +21,9 @@ minversion = 3.18.0 [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + TEST_JUJU3=1 CHARM_DIR={envdir} + CHARMS_ARTIFACT_DIR={toxinidir}/.. install_command = pip install {opts} {packages} commands = stestr run --slowest {posargs} @@ -74,6 +76,11 @@ basepython = python3.11 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py312] +basepython = python3.12 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt @@ -81,8 +88,8 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 -deps = flake8==3.9.2 - charm-tools==2.8.4 +deps = flake8 + charm-tools commands = flake8 {posargs} unit_tests tests actions files src [testenv:cover] diff --git a/ceph-mon/unit_tests/helpers.py b/ceph-mon/unit_tests/helpers.py index f0970674..70dc272f 100644 --- a/ceph-mon/unit_tests/helpers.py +++ b/ceph-mon/unit_tests/helpers.py @@ -7,7 +7,9 @@ def patch_network_get(private_address="10.0.0.10") -> Callable: def network_get(*args, **kwargs) -> dict: - """Patch for the not-yet-implemented testing backend needed for `bind_address`. + """ + Patch for the not-yet-implemented testing backend needed + for `bind_address`. This patch decorator can be used for cases such as: self.model.get_binding(event.relation).network.bind_address diff --git a/ceph-nfs/tests/nfs_ganesha.py b/ceph-nfs/tests/nfs_ganesha.py index 845700b4..e1e9ebf1 100644 --- a/ceph-nfs/tests/nfs_ganesha.py +++ b/ceph-nfs/tests/nfs_ganesha.py @@ -21,23 +21,37 @@ import unittest import yaml import zaza +import zaza.model as model import zaza.utilities.installers +from tenacity import stop_after_attempt, wait_exponential, retry_if_result class NfsGaneshaTest(unittest.TestCase): mount_dir = '/mnt/test' share_protocol = 'nfs' - mounts_share = False - created_share = None + + def setUp(self): + super(NfsGaneshaTest, self).setUp() + self.created_share = None + self.mounts_share = False + ip1 = zaza.model.get_unit_public_address( + zaza.model.get_unit_from_name('ceph-nfs/0') + ) + ip2 = zaza.model.get_unit_public_address( + zaza.model.get_unit_from_name('ceph-nfs/1') + ) + zaza.model.set_application_config( + 'ceph-nfs', + {'vip': ' '.join([str(ip1), str(ip2)])}) def tearDown(self): if self.mounts_share: try: zaza.utilities.generic.run_via_ssh( - unit_name='ubuntu/0', + unit_name='ceph-osd/0', cmd='sudo umount /mnt/test && sudo rmdir /mnt/test') zaza.utilities.generic.run_via_ssh( - unit_name='ubuntu/1', + unit_name='ceph-osd/1', cmd='sudo umount /mnt/test && sudo rmdir /mnt/test') except subprocess.CalledProcessError: logging.warning("Failed to cleanup mounts") @@ -52,6 +66,7 @@ def tearDown(self): def _create_share(self, name: str, size: int = 10, access_ip: str = '0.0.0.0') -> Dict[str, str]: + logging.info(f"create share {name}, access_ip {access_ip}") action = zaza.model.run_action_on_leader( 'ceph-nfs', 'create-share', @@ -63,7 +78,7 @@ def _create_share(self, name: str, size: int = 10, self.assertEqual(action.status, 'completed') self.created_share = name results = action.results - logging.debug("Action results: {}".format(results)) + logging.info("create-share action: {}".format(results)) return results def _grant_access(self, share_name: str, access_ip: str): @@ -77,28 +92,31 @@ def _grant_access(self, share_name: str, access_ip: str): self.assertEqual(action.status, 'completed') def _mount_share(self, unit_name: str, share_ip: str, - export_path: str, retry: bool = True): + export_path: str, perform_retry: bool = True): self._install_dependencies(unit_name) - ssh_cmd = ( + cmd = ( 'sudo mkdir -p {0} && ' 'sudo mount -t {1} -o nfsvers=4.1,proto=tcp {2}:{3} {0}'.format( self.mount_dir, self.share_protocol, share_ip, export_path)) - if retry: - for attempt in tenacity.Retrying( - stop=tenacity.stop_after_attempt(5), - wait=tenacity.wait_exponential(multiplier=3, - min=2, max=10)): - with attempt: - zaza.utilities.generic.run_via_ssh( - unit_name=unit_name, - cmd=ssh_cmd) + if perform_retry: + @tenacity.retry( + stop=stop_after_attempt(5), + wait=wait_exponential(multiplier=3, min=2, max=10), + retry=retry_if_result(lambda res: res.get('Code') != '0') + ) + def _do_mount(): + logging.info(f"Mounting CephFS on {unit_name}: {cmd}") + res = model.run_on_unit(unit_name, cmd) + logging.info(f"Mount result: {res}") + return res + + _do_mount() else: - zaza.utilities.generic.run_via_ssh( - unit_name=unit_name, - cmd=ssh_cmd) + model.run_on_unit(unit_name, cmd) + self.mounts_share = True def _install_dependencies(self, unit: str): @@ -120,46 +138,53 @@ def _write_testing_file_on_instance(self, instance_name: str): wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)) def _verify_testing_file_on_instance(self, instance_name: str): run_with_juju_ssh = zaza.utilities.installers.make_juju_ssh_fn( - 'ubuntu/1', sudo=True + 'ceph-osd/1', sudo=True ) output = run_with_juju_ssh( 'sudo cat {}/test'.format(self.mount_dir)) logging.info("Verification output: {}".format(output)) self.assertEqual('test', output.strip()) + def _get_ipaddr(self, unit): + """Run ssh cmd on unit to get ipaddresses""" + cmd = ('''ip -o addr show | \ + awk '$2 != "lo" && ($3 == "inet" || $3 == "inet6")''' + '''{ sub("/.*","",$4); print $4 }' + ''') + res = model.run_on_unit(unit, cmd) + return res['Stdout'].strip().splitlines() + def test_create_share(self): logging.info("Creating a share") # Todo - enable ACL testing - ubuntu_0_ip = zaza.model.get_unit_public_address( - zaza.model.get_unit_from_name('ubuntu/0') - ) - ubuntu_1_ip = zaza.model.get_unit_public_address( - zaza.model.get_unit_from_name('ubuntu/1') - ) - share = self._create_share('test_ganesha_share', access_ip=ubuntu_0_ip) - # share = self._create_share('test_ganesha_share') - zaza.model.wait_for_application_states(states={ - 'ubuntu': { - "workload-status-message-regex": "^$", - } - }) + osd_0_ip = ','.join(self._get_ipaddr('ceph-osd/0')) + osd_1_ip = ','.join(self._get_ipaddr('ceph-osd/1')) + share = self._create_share('test_ganesha_share', access_ip=osd_0_ip) + sharelist = zaza.model.run_action_on_leader( + 'ceph-nfs', + 'list-shares', + action_params={}) + logging.info("sharelist: {}".format(sharelist.results)) + export_path = share['path'] ip = share['ip'] - logging.info("Mounting share on ubuntu units") - self._mount_share('ubuntu/0', ip, export_path) - logging.info("writing to the share on ubuntu/0") - self._write_testing_file_on_instance('ubuntu/0') + logging.info("Mounting {} on ceph-osd units".format(export_path)) + self._mount_share('ceph-osd/0', ip, export_path) + logging.info("writing to the share on ceph-osd/0") + self._write_testing_file_on_instance('ceph-osd/0') # Todo - enable ACL testing try: - self._mount_share('ubuntu/1', ip, export_path, retry=False) + self._mount_share( + 'ceph-osd/1', ip, export_path, perform_retry=False + ) self.fail('Mounting should not have succeeded') except: # noqa: E722 pass - self._grant_access('test_ganesha_share', access_ip=ubuntu_1_ip) + self._grant_access('test_ganesha_share', access_ip=osd_1_ip) - self._mount_share('ubuntu/1', ip, export_path) - logging.info("reading from the share on ubuntu/1") - self._verify_testing_file_on_instance('ubuntu/1') + self._mount_share('ceph-osd/1', ip, export_path) + logging.info("reading from the share on ceph-osd/1") + self._verify_testing_file_on_instance('ceph-osd/1') def test_list_shares(self): self._create_share('test_ganesha_list_share') diff --git a/ceph-nfs/tests/tests.yaml b/ceph-nfs/tests/tests.yaml index b435eff8..0aec63fc 100644 --- a/ceph-nfs/tests/tests.yaml +++ b/ceph-nfs/tests/tests.yaml @@ -1,12 +1,8 @@ charm_name: ceph-nfs gate_bundles: - - focal-quincy - - focal-pacific - - jammy-quincy - - jammy-reef - - jammy-squid + - jammy-caracal smoke_bundles: - - focal-pacific + - jammy-caracal configure: [] tests: - tests.nfs_ganesha.NfsGaneshaTest diff --git a/ceph-nfs/tox.ini b/ceph-nfs/tox.ini index c8550616..80452bfa 100644 --- a/ceph-nfs/tox.ini +++ b/ceph-nfs/tox.ini @@ -1,5 +1,12 @@ -# Operator charm (with zaza): tox.ini - +# Classic charm (with zaza): ./tox.ini +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. [tox] envlist = pep8,py3 skipsdist = True @@ -7,43 +14,37 @@ skipsdist = True sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False -# NOTES: -# * We avoid the new dependency resolver by pinning pip < 20.3, see -# https://github.com/pypa/pip/issues/9187 -# * Pinning dependencies requires tox >= 3.2.0, see -# https://tox.readthedocs.io/en/latest/config.html#conf-requires -# * It is also necessary to pin virtualenv as a newer virtualenv would still -# lead to fetching the latest pip in the func* tox targets, see -# https://stackoverflow.com/a/38133283 -# * It is necessary to declare setuptools as a dependency otherwise tox will -# fail very early at not being able to load it. The version pinning is in -# line with `pip.sh`. -requires = pip < 20.3 - virtualenv < 20.0 - setuptools < 50.0.0 + # NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci -minversion = 3.2.0 +minversion = 3.18.0 [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + TEST_JUJU3=1 CHARM_DIR={envdir} + CHARMS_ARTIFACT_DIR={toxinidir}/.. install_command = - pip install {opts} {packages} + pip install {opts} {packages} commands = stestr run --slowest {posargs} allowlist_externals = - git - add-to-archive.py - bash charmcraft - rename.sh -passenv = HOME TERM CS_* OS_* TEST_* + {toxinidir}/rename.sh +passenv = + HOME + TERM + CS_* + OS_* + TEST_* deps = -r{toxinidir}/test-requirements.txt -[testenv:py35] -basepython = python3.5 -# python3.5 is irrelevant on a focal+ charm. -commands = /bin/true +[testenv:build] +basepython = python3 +deps = -r{toxinidir}/build-requirements.txt +commands = + charmcraft clean + charmcraft -v pack + {toxinidir}/rename.sh [testenv:py36] basepython = python3.6 @@ -70,6 +71,16 @@ basepython = python3.10 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py311] +basepython = python3.11 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py312] +basepython = python3.12 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt @@ -77,9 +88,9 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} src unit_tests tests +deps = flake8 + charm-tools +commands = flake8 {posargs} unit_tests tests src [testenv:cover] # Technique based heavily upon @@ -113,14 +124,6 @@ omit = basepython = python3 commands = {posargs} -[testenv:build] -basepython = python3 -deps = -r{toxinidir}/build-requirements.txt -commands = - charmcraft clean - charmcraft -v pack - {toxinidir}/rename.sh - [testenv:func-noop] basepython = python3 commands = @@ -147,5 +150,5 @@ commands = functest-run-suite --keep-model --bundle {posargs} [flake8] -# Ignore E902 because the unit_tests directory is missing in the built charm. -ignore = E402,E226,E902 +ignore = E402,E226,W503,W504 +exclude = */charmhelpers diff --git a/ceph-osd/tests/target.py b/ceph-osd/tests/target.py new file mode 100644 index 00000000..6b9e68f6 --- /dev/null +++ b/ceph-osd/tests/target.py @@ -0,0 +1,922 @@ +# Copyright 2018 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Ceph Testing.""" + +import unittest +from copy import deepcopy +import json +import logging +from os import ( + listdir, + path +) +import re +import tempfile +import urllib3 + +import tenacity + +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.model as zaza_model +import zaza.openstack.utilities.ceph as zaza_ceph +import zaza.openstack.utilities.exceptions as zaza_exceptions +import zaza.openstack.utilities.generic as zaza_utils +import zaza.openstack.utilities.openstack as zaza_openstack + +# Disable warnings for ssl_verify=false +urllib3.disable_warnings( + urllib3.exceptions.InsecureRequestWarning +) + + +class CephLowLevelTest(test_utils.BaseCharmTest): + """Ceph Low Level Test Class.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running ceph low level tests.""" + super(CephLowLevelTest, cls).setUpClass() + + def test_processes(self): + """Verify Ceph processes. + + Verify that the expected service processes are running + on each ceph unit. + """ + logging.info('Checking ceph-mon and ceph-osd processes...') + # Process name and quantity of processes to expect on each unit + ceph_mon_processes = { + 'ceph-mon': 1, + 'ceph-mgr': 1, + } + + ceph_osd_processes = { + 'ceph-osd': [1, 2, 3] + } + + # Units with process names and PID quantities expected + expected_processes = { + 'ceph-mon/0': ceph_mon_processes, + 'ceph-mon/1': ceph_mon_processes, + 'ceph-mon/2': ceph_mon_processes, + 'ceph-osd/0': ceph_osd_processes, + 'ceph-osd/1': ceph_osd_processes, + 'ceph-osd/2': ceph_osd_processes + } + + actual_pids = zaza_utils.get_unit_process_ids(expected_processes) + ret = zaza_utils.validate_unit_process_ids(expected_processes, + actual_pids) + self.assertTrue(ret) + + def test_services(self): + """Verify the ceph services. + + Verify the expected services are running on the service units. + """ + logging.info('Checking ceph-osd and ceph-mon services...') + services = {} + ceph_services = ['ceph-mon', 'ceph-mgr'] + services['ceph-osd/0'] = ['ceph-osd'] + + services['ceph-mon/0'] = ceph_services + services['ceph-mon/1'] = ceph_services + services['ceph-mon/2'] = ceph_services + + for unit_name, unit_services in services.items(): + zaza_model.block_until_service_status( + unit_name=unit_name, + services=unit_services, + target_status='running' + ) + + @test_utils.skipUntilVersion('ceph-mon', 'ceph', '14.2.0') + def test_pg_tuning(self): + """Verify that auto PG tuning is enabled for Nautilus+.""" + unit_name = 'ceph-mon/0' + cmd = "ceph osd pool autoscale-status --format=json" + result = zaza_model.run_on_unit(unit_name, cmd) + self.assertEqual(result['Code'], '0') + for pool in json.loads(result['Stdout']): + self.assertEqual(pool['pg_autoscale_mode'], 'on') + + +class CephTest(test_utils.BaseCharmTest): + """Ceph common functional tests.""" + + @classmethod + def setUpClass(cls): + """Run the ceph's common class setup.""" + super(CephTest, cls).setUpClass() + + def osd_out_in(self, services): + """Run OSD out and OSD in tests. + + Remove OSDs and then add them back in on a unit checking that services + are in the required state after each action + + :param services: Services expected to be restarted when config_file is + changed. + :type services: list + """ + zaza_model.block_until_service_status( + self.lead_unit, + services, + 'running', + model_name=self.model_name) + zaza_model.block_until_unit_wl_status( + self.lead_unit, + 'active', + model_name=self.model_name) + zaza_model.run_action( + self.lead_unit, + 'osd-out', + model_name=self.model_name) + zaza_model.block_until_unit_wl_status( + self.lead_unit, + 'maintenance', + model_name=self.model_name) + zaza_model.block_until_all_units_idle(model_name=self.model_name) + zaza_model.run_action( + self.lead_unit, + 'osd-in', + model_name=self.model_name) + zaza_model.block_until_unit_wl_status( + self.lead_unit, + 'active', + model_name=self.model_name) + zaza_model.block_until_all_units_idle(model_name=self.model_name) + zaza_model.block_until_service_status( + self.lead_unit, + services, + 'running', + model_name=self.model_name) + + def test_ceph_check_osd_pools(self): + """Check OSD pools. + + Check osd pools on all ceph units, expect them to be + identical, and expect specific pools to be present. + """ + try: + zaza_model.get_application('cinder-ceph') + except KeyError: + raise unittest.SkipTest("Skipping OpenStack dependent test") + logging.info('Checking pools on ceph units...') + + expected_pools = zaza_ceph.get_expected_pools() + results = [] + unit_name = 'ceph-mon/0' + + # Check for presence of expected pools on each unit + logging.debug('Expected pools: {}'.format(expected_pools)) + pools = zaza_ceph.get_ceph_pools(unit_name) + results.append(pools) + + for expected_pool in expected_pools: + if expected_pool not in pools: + msg = ('{} does not have pool: ' + '{}'.format(unit_name, expected_pool)) + raise zaza_exceptions.CephPoolNotFound(msg) + logging.debug('{} has (at least) the expected ' + 'pools.'.format(unit_name)) + + # Check that all units returned the same pool name:id data + for i, result in enumerate(results): + for other in results[i+1:]: + logging.debug('result: {}, other: {}'.format(result, other)) + self.assertEqual(result, other) + + def test_ceph_pool_creation_with_text_file(self): + """Check the creation of a pool and a text file. + + Create a pool, add a text file to it and retrieve its content. + Verify that the content matches the original file. + """ + unit_name = 'ceph-mon/0' + cmd = 'sudo ceph osd pool create test {PG_NUM}; \ + echo 123456789 > /tmp/input.txt; \ + rados put -p test test_input /tmp/input.txt; \ + rados get -p test test_input /dev/stdout' + cmd = cmd.format(PG_NUM=32) + logging.debug('Creating test pool and putting test file in pool...') + result = zaza_model.run_on_unit(unit_name, cmd) + code = result.get('Code') + if code != '0': + raise zaza_model.CommandRunFailed(cmd, result) + output = result.get('Stdout').strip() + logging.debug('Output received: {}'.format(output)) + self.assertEqual(output, '123456789') + + def test_ceph_encryption(self): + """Test Ceph encryption. + + Verify that the new disk is added with encryption by checking for + Ceph's encryption keys directory. + """ + current_release = zaza_openstack.get_os_release(application='ceph-mon') + trusty_mitaka = zaza_openstack.get_os_release('trusty_mitaka') + if current_release >= trusty_mitaka: + logging.warn("Skipping encryption test for Mitaka and higher") + return + unit_name = 'ceph-osd/0' + set_default = { + 'osd-encrypt': 'False', + 'osd-devices': '/dev/vdb /srv/ceph', + } + set_alternate = { + 'osd-encrypt': 'True', + 'osd-devices': '/dev/vdb /srv/ceph /srv/ceph_encrypted', + } + juju_service = 'ceph-osd' + logging.info('Making config change on {}...'.format(juju_service)) + mtime = zaza_model.get_unit_time(unit_name) + + file_mtime = None + + folder_name = '/etc/ceph/dmcrypt-keys/' + with self.config_change(set_default, set_alternate, + application_name=juju_service): + with tempfile.TemporaryDirectory() as tempdir: + # Creating a temp dir to copy keys + temp_folder = '/tmp/dmcrypt-keys' + cmd = 'mkdir {}'.format(temp_folder) + ret = zaza_model.run_on_unit(unit_name, cmd) + logging.debug('Ret for cmd {} is {}'.format(cmd, ret)) + # Copy keys from /etc to /tmp + cmd = 'sudo cp {}* {}'.format(folder_name, temp_folder) + ret = zaza_model.run_on_unit(unit_name, cmd) + logging.debug('Ret for cmd {} is {}'.format(cmd, ret)) + # Changing permissions to be able to SCP the files + cmd = 'sudo chown -R ubuntu:ubuntu {}'.format(temp_folder) + ret = zaza_model.run_on_unit(unit_name, cmd) + logging.debug('Ret for cmd {} is {}'.format(cmd, ret)) + # SCP to retrieve all files in folder + # -p: preserve timestamps + source = '/tmp/dmcrypt-keys/*' + zaza_model.scp_from_unit(unit_name=unit_name, + source=source, + destination=tempdir, + scp_opts='-p') + for elt in listdir(tempdir): + file_path = '/'.join([tempdir, elt]) + if path.isfile(file_path): + file_mtime = path.getmtime(file_path) + if file_mtime: + break + + if not file_mtime: + logging.warn('Could not determine mtime, assuming ' + 'folder does not exist') + raise FileNotFoundError('folder does not exist') + + if file_mtime >= mtime: + logging.info('Folder mtime is newer than provided mtime ' + '(%s >= %s) on %s (OK)' % (file_mtime, + mtime, unit_name)) + else: + logging.warn('Folder mtime is older than provided mtime' + '(%s < on %s) on %s' % (file_mtime, + mtime, unit_name)) + raise Exception('Folder mtime is older than provided mtime') + + def test_blocked_when_non_pristine_disk_appears(self): + """Test blocked state with non-pristine disk. + + Validate that charm goes into blocked state when it is presented with + new block devices that have foreign data on them. + Instances used in UOSCI has a flavour with ephemeral storage in + addition to the bootable instance storage. The ephemeral storage + device is partitioned, formatted and mounted early in the boot process + by cloud-init. + As long as the device is mounted the charm will not attempt to use it. + If we unmount it and trigger the config-changed hook the block device + will appear as a new and previously untouched device for the charm. + One of the first steps of device eligibility checks should be to make + sure we are seeing a pristine and empty device before doing any + further processing. + As the ephemeral device will have data on it we can use it to validate + that these checks work as intended. + """ + current_release = zaza_openstack.get_os_release(application='ceph-mon') + focal_ussuri = zaza_openstack.get_os_release('focal_ussuri') + if current_release >= focal_ussuri: + # NOTE(ajkavanagh) - focal (on ServerStack) is broken for /dev/vdb + # and so this test can't pass: LP#1842751 discusses the issue, but + # basically the snapd daemon along with lxcfs results in /dev/vdb + # being mounted in the lxcfs process namespace. If the charm + # 'tries' to umount it, it can (as root), but the mount is still + # 'held' by lxcfs and thus nothing else can be done with it. This + # is only a problem in serverstack with images with a default + # /dev/vdb ephemeral + logging.warn("Skipping pristine disk test for focal and higher") + return + logging.info('Checking behaviour when non-pristine disks appear...') + logging.info('Configuring ephemeral-unmount...') + alternate_conf = { + 'ephemeral-unmount': '/mnt', + 'osd-devices': '/dev/vdb' + } + juju_service = 'ceph-osd' + zaza_model.set_application_config(juju_service, alternate_conf) + ceph_osd_states = { + 'ceph-osd': { + 'workload-status': 'blocked', + 'workload-status-message': 'Non-pristine' + } + } + zaza_model.wait_for_application_states(states=ceph_osd_states) + logging.info('Units now in blocked state, running zap-disk action...') + unit_names = ['ceph-osd/0', 'ceph-osd/1', 'ceph-osd/2'] + for unit_name in unit_names: + zap_disk_params = { + 'devices': '/dev/vdb', + 'i-really-mean-it': True, + } + action_obj = zaza_model.run_action( + unit_name=unit_name, + action_name='zap-disk', + action_params=zap_disk_params + ) + logging.debug('Result of action: {}'.format(action_obj)) + + logging.info('Running add-disk action...') + for unit_name in unit_names: + add_disk_params = { + 'osd-devices': '/dev/vdb', + } + action_obj = zaza_model.run_action( + unit_name=unit_name, + action_name='add-disk', + action_params=add_disk_params + ) + logging.debug('Result of action: {}'.format(action_obj)) + + logging.info('Wait for idle/ready status...') + zaza_model.wait_for_application_states() + + logging.info('OK') + + set_default = { + 'ephemeral-unmount': '', + 'osd-devices': '/dev/vdb', + } + + bionic_train = zaza_openstack.get_os_release('bionic_train') + if current_release < bionic_train: + set_default['osd-devices'] = '/dev/vdb /srv/ceph' + + logging.info('Restoring to default configuration...') + zaza_model.set_application_config(juju_service, set_default) + + zaza_model.wait_for_application_states() + + def test_pause_and_resume(self): + """The services can be paused and resumed.""" + logging.info('Checking pause and resume actions...') + self.pause_resume(['ceph-osd']) + + def get_device_for_blacklist(self, unit): + """Return a device to be used by the blacklist tests.""" + cmd = "mount | grep 'on / ' | awk '{print $1}'" + obj = zaza_model.run_on_unit(unit, cmd) + return obj.get('Stdout').strip() + + def test_blacklist(self): + """Check the blacklist action. + + The blacklist actions execute and behave as expected. + """ + logging.info('Checking blacklist-add-disk and ' + 'blacklist-remove-disk actions...') + unit_name = 'ceph-osd/0' + + zaza_model.block_until_unit_wl_status( + unit_name, + 'active' + ) + + # Attempt to add device with non-absolute path should fail + action_obj = zaza_model.run_action( + unit_name=unit_name, + action_name='blacklist-add-disk', + action_params={'osd-devices': 'vda'} + ) + self.assertTrue(action_obj.status != 'completed') + zaza_model.block_until_unit_wl_status( + unit_name, + 'active' + ) + + # Attempt to add device with non-existent path should fail + action_obj = zaza_model.run_action( + unit_name=unit_name, + action_name='blacklist-add-disk', + action_params={'osd-devices': '/non-existent'} + ) + self.assertTrue(action_obj.status != 'completed') + zaza_model.block_until_unit_wl_status( + unit_name, + 'active' + ) + + # Attempt to add device with existent path should succeed + device = self.get_device_for_blacklist(unit_name) + if not device: + raise unittest.SkipTest( + "Skipping test because no device was found") + + action_obj = zaza_model.run_action( + unit_name=unit_name, + action_name='blacklist-add-disk', + action_params={'osd-devices': device} + ) + self.assertEqual('completed', action_obj.status) + zaza_model.block_until_unit_wl_status( + unit_name, + 'active' + ) + + # Attempt to remove listed device should always succeed + action_obj = zaza_model.run_action( + unit_name=unit_name, + action_name='blacklist-remove-disk', + action_params={'osd-devices': device} + ) + self.assertEqual('completed', action_obj.status) + zaza_model.block_until_unit_wl_status( + unit_name, + 'active' + ) + logging.debug('OK') + + def test_list_disks(self): + """Test the list-disks action. + + The list-disks action execute. + """ + logging.info('Checking list-disks action...') + unit_name = 'ceph-osd/0' + + zaza_model.block_until_unit_wl_status( + unit_name, + 'active' + ) + + action_obj = zaza_model.run_action( + unit_name=unit_name, + action_name='list-disks', + ) + self.assertEqual('completed', action_obj.status) + zaza_model.block_until_unit_wl_status( + unit_name, + 'active' + ) + logging.debug('OK') + + def get_local_osd_id(self, unit): + """Get the OSD id for a unit.""" + ret = zaza_model.run_on_unit(unit, + 'ceph-volume lvm list --format=json') + local = list(json.loads(ret['Stdout']))[-1] + return local if local.startswith('osd.') else 'osd.' + local + + def get_num_osds(self, osd, is_up_only=False): + """Compute the number of active OSD's.""" + result = zaza_model.run_on_unit(osd, 'ceph osd stat --format=json') + result = json.loads(result['Stdout']) + if is_up_only: + return int(result['num_up_osds']) + else: + return int(result['num_osds']) + + def get_osd_devices_on_unit(self, unit_name): + """Get information for osd devices present on a particular unit. + + :param unit: Unit name to be queried for osd device info. + :type unit: str + """ + osd_devices = json.loads( + zaza_model.run_on_unit( + unit_name, 'ceph-volume lvm list --format=json' + ).get('Stdout', '') + ) + + return osd_devices + + def remove_disk_from_osd_unit(self, unit, osd_id, is_purge=False): + """Remove osd device with provided osd_id from unit. + + :param unit: Unit name where the osd device is to be removed from. + :type unit: str + + :param osd_id: osd-id for the osd device to be removed. + :type osd_id: str + + :param is_purge: whether to purge the osd device + :type is_purge: bool + """ + action_obj = zaza_model.run_action( + unit_name=unit, + action_name='remove-disk', + action_params={ + 'osd-ids': osd_id, + 'timeout': 10, + 'format': 'json', + 'purge': is_purge + } + ) + zaza_utils.assertActionRanOK(action_obj) + results = json.loads(action_obj.data['results']['message']) + results = results[next(iter(results))] + self.assertEqual(results['osd-ids'], osd_id) + zaza_model.run_on_unit(unit, 'partprobe') + + def remove_one_osd(self, unit, block_devs): + """Remove one device from osd unit. + + :param unit: Unit name where the osd device is to be removed from. + :type unit: str + :params block_devs: list of block devices on the scpecified unit + :type block_devs: list[str] + """ + # Should have more than 1 OSDs to take one out and test. + self.assertGreater(len(block_devs), 1) + + # Get complete device details for an OSD. + key = list(block_devs)[-1] + device = { + 'osd-id': key if key.startswith('osd.') else 'osd.' + key, + 'block-device': block_devs[key][0]['devices'][0] + } + + self.remove_disk_from_osd_unit(unit, device['osd-id'], is_purge=True) + return device + + def test_cache_device(self): + """Test replacing a disk in use.""" + logging.info('Running add-disk action with a caching device') + mon = next(iter(zaza_model.get_units('ceph-mon'))).entity_id + osds = [x.entity_id for x in zaza_model.get_units('ceph-osd')] + osd_info = dict() + + # Remove one of the two disks. + logging.info('Removing single disk from each OSD') + for unit in osds: + block_devs = self.get_osd_devices_on_unit(unit) + if len(block_devs) < 2: + continue + device_info = self.remove_one_osd(unit, block_devs) + block_dev = device_info['block-device'] + logging.info("Removing device %s from unit %s" % (block_dev, unit)) + osd_info[unit] = device_info + if not osd_info: + raise unittest.SkipTest( + 'Skipping OSD replacement Test, no spare devices added') + + logging.debug('Removed OSD Info: {}'.format(osd_info)) + zaza_model.wait_for_application_states() + + logging.info('Recycling previously removed disks') + for unit, device_info in osd_info.items(): + osd_id = device_info['osd-id'] + block_dev = device_info['block-device'] + logging.info("Found device %s on unit %s" % (block_dev, unit)) + self.assertNotEqual(block_dev, None) + action_obj = zaza_model.run_action( + unit_name=unit, + action_name='add-disk', + action_params={'osd-devices': block_dev, + 'osd-ids': osd_id, + 'partition-size': 5} + ) + zaza_utils.assertActionRanOK(action_obj) + zaza_model.wait_for_application_states() + + logging.info('Removing previously added OSDs') + for unit, device_info in osd_info.items(): + osd_id = device_info['osd-id'] + block_dev = device_info['block-device'] + logging.info( + "Removing block device %s from unit %s" % + (block_dev, unit) + ) + self.remove_disk_from_osd_unit(unit, osd_id, is_purge=False) + zaza_model.wait_for_application_states() + + logging.info('Finally adding back OSDs') + for unit, device_info in osd_info.items(): + block_dev = device_info['block-device'] + action_obj = zaza_model.run_action( + unit_name=unit, + action_name='add-disk', + action_params={'osd-devices': block_dev, + 'partition-size': 5} + ) + zaza_utils.assertActionRanOK(action_obj) + zaza_model.wait_for_application_states() + + for attempt in tenacity.Retrying( + wait=tenacity.wait_exponential(multiplier=2, max=32), + reraise=True, stop=tenacity.stop_after_attempt(10), + retry=tenacity.retry_if_exception_type(AssertionError) + ): + with attempt: + self.assertEqual( + len(osds) * 2, self.get_num_osds(mon, is_up_only=True) + ) + + +class SecurityTest(unittest.TestCase): + """Ceph Security Tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running ceph security tests.""" + super(SecurityTest, cls).setUpClass() + + def test_osd_security_checklist(self): + """Verify expected state with security-checklist.""" + expected_failures = [] + expected_passes = [ + 'validate-file-ownership', + 'validate-file-permissions', + ] + + logging.info('Running `security-checklist` action' + ' on Ceph OSD leader unit') + test_utils.audit_assertions( + zaza_model.run_action_on_leader( + 'ceph-osd', + 'security-checklist', + action_params={}), + expected_passes, + expected_failures, + expected_to_pass=True) + + +class OsdService: + """Simple representation of ceph-osd systemd service.""" + + def __init__(self, id_): + """ + Init service using its ID. + + e.g.: id_=1 -> ceph-osd@1 + """ + self.id = id_ + self.name = 'ceph-osd@{}'.format(id_) + + +async def async_wait_for_service_status(unit_name, services, target_status, + model_name=None, timeout=2700): + """Wait for all services on the unit to be in the desired state. + + Note: This function emulates the + `zaza.model.async_block_until_service_status` function, but it's using + `systemctl is-active` command instead of `pidof/pgrep` of the original + function. + + :param unit_name: Name of unit to run action on + :type unit_name: str + :param services: List of services to check + :type services: List[str] + :param target_status: State services must be in (stopped or running) + :type target_status: str + :param model_name: Name of model to query. + :type model_name: str + :param timeout: Time to wait for status to be achieved + :type timeout: int + """ + async def _check_service(): + services_ok = True + for service in services: + command = r"systemctl is-active '{}'".format(service) + out = await zaza_model.async_run_on_unit( + unit_name, + command, + model_name=model_name, + timeout=timeout) + response = out['Stdout'].strip() + + if target_status == "running" and response == 'active': + continue + elif target_status == "stopped" and response == 'inactive': + continue + else: + services_ok = False + break + + return services_ok + + accepted_states = ('stopped', 'running') + if target_status not in accepted_states: + raise RuntimeError('Invalid target state "{}". Accepted states: ' + '{}'.format(target_status, accepted_states)) + + async with zaza_model.run_in_model(model_name): + await zaza_model.async_block_until(_check_service, timeout=timeout) + + +wait_for_service = zaza_model.sync_wrapper(async_wait_for_service_status) + + +class ServiceTest(unittest.TestCase): + """ceph-osd systemd service tests.""" + + TESTED_UNIT = 'ceph-osd/0' # This can be any ceph-osd unit in the model + SERVICE_PATTERN = re.compile(r'ceph-osd@(?P\d+)\.service') + + def __init__(self, methodName='runTest'): + """Initialize Test Case.""" + super(ServiceTest, self).__init__(methodName) + self._available_services = None + + @classmethod + def setUpClass(cls): + """Run class setup for running ceph service tests.""" + super(ServiceTest, cls).setUpClass() + + def setUp(self): + """Run test setup.""" + # Skip 'service' action tests on systems without systemd + result = zaza_model.run_on_unit(self.TESTED_UNIT, 'which systemctl') + if not result['Stdout']: + raise unittest.SkipTest("'service' action is not supported on " + "systems without 'systemd'. Skipping " + "tests.") + # Note(mkalcok): This counter reset is needed because ceph-osd service + # is limited to 3 restarts per 30 mins which is insufficient + # when running functional tests for 'service' action. This + # limitation is defined in /lib/systemd/system/ceph-osd@.service + # in section [Service] with options 'StartLimitInterval' and + # 'StartLimitBurst' + reset_counter = 'systemctl reset-failed' + zaza_model.run_on_unit(self.TESTED_UNIT, reset_counter) + + def tearDown(self): + """Start ceph-osd services after each test. + + This ensures that the environment is ready for the next tests. + """ + zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start', + action_params={'osds': 'all'}, + raise_on_failure=True) + + @property + def available_services(self): + """Return list of all ceph-osd services present on the TESTED_UNIT.""" + if self._available_services is None: + self._available_services = self._fetch_osd_services() + return self._available_services + + def _fetch_osd_services(self): + """Fetch all ceph-osd services present on the TESTED_UNIT.""" + service_list = [] + service_list_cmd = 'systemctl list-units --full --all ' \ + '--no-pager -t service' + result = zaza_model.run_on_unit(self.TESTED_UNIT, service_list_cmd) + for line in result['Stdout'].split('\n'): + service_name = self.SERVICE_PATTERN.search(line) + if service_name: + service_id = int(service_name.group('service_id')) + service_list.append(OsdService(service_id)) + return service_list + + def test_start_stop_all_by_keyword(self): + """Start and Stop all ceph-osd services using keyword 'all'.""" + service_list = [service.name for service in self.available_services] + + logging.info("Running 'service stop=all' action on {} " + "unit".format(self.TESTED_UNIT)) + zaza_model.run_action_on_units([self.TESTED_UNIT], 'stop', + action_params={'osds': 'all'}) + wait_for_service(unit_name=self.TESTED_UNIT, + services=service_list, + target_status='stopped') + + logging.info("Running 'service start=all' action on {} " + "unit".format(self.TESTED_UNIT)) + zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start', + action_params={'osds': 'all'}) + wait_for_service(unit_name=self.TESTED_UNIT, + services=service_list, + target_status='running') + + def test_start_stop_all_by_list(self): + """Start and Stop all ceph-osd services using explicit list.""" + service_list = [service.name for service in self.available_services] + service_ids = [str(service.id) for service in self.available_services] + action_params = ','.join(service_ids) + + logging.info("Running 'service stop={}' action on {} " + "unit".format(action_params, self.TESTED_UNIT)) + zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'stop', + action_params={'osds': action_params}) + wait_for_service(unit_name=self.TESTED_UNIT, + services=service_list, + target_status='stopped') + + logging.info("Running 'service start={}' action on {} " + "unit".format(action_params, self.TESTED_UNIT)) + zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start', + action_params={'osds': action_params}) + wait_for_service(unit_name=self.TESTED_UNIT, + services=service_list, + target_status='running') + + def test_stop_specific(self): + """Stop only specified ceph-osd service.""" + if len(self.available_services) < 2: + raise unittest.SkipTest('This test can be performed only if ' + 'there\'s more than one ceph-osd service ' + 'present on the tested unit') + + should_run = deepcopy(self.available_services) + to_stop = should_run.pop() + should_run = [service.name for service in should_run] + + logging.info("Running 'service stop={} on {} " + "unit".format(to_stop.id, self.TESTED_UNIT)) + + zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'stop', + action_params={'osds': to_stop.id}) + + wait_for_service(unit_name=self.TESTED_UNIT, + services=[to_stop.name, ], + target_status='stopped') + wait_for_service(unit_name=self.TESTED_UNIT, + services=should_run, + target_status='running') + + def test_start_specific(self): + """Start only specified ceph-osd service.""" + if len(self.available_services) < 2: + raise unittest.SkipTest('This test can be performed only if ' + 'there\'s more than one ceph-osd service ' + 'present on the tested unit') + + service_names = [service.name for service in self.available_services] + should_stop = deepcopy(self.available_services) + to_start = should_stop.pop() + should_stop = [service.name for service in should_stop] + + # Note: can't stop ceph-osd.target as restarting a single OSD will + # cause this to start all of the OSDs when a single one starts. + logging.info("Stopping all running ceph-osd services") + service_stop_cmd = '; '.join(['systemctl stop {}'.format(service) + for service in service_names]) + zaza_model.run_on_unit(self.TESTED_UNIT, service_stop_cmd) + + wait_for_service(unit_name=self.TESTED_UNIT, + services=service_names, + target_status='stopped') + + logging.info("Running 'service start={} on {} " + "unit".format(to_start.id, self.TESTED_UNIT)) + + zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start', + action_params={'osds': to_start.id}) + + wait_for_service(unit_name=self.TESTED_UNIT, + services=[to_start.name, ], + target_status='running') + + wait_for_service(unit_name=self.TESTED_UNIT, + services=should_stop, + target_status='stopped') + + def test_active_after_pristine_block(self): + """Test if we can get back to active state after pristine block. + + Set a non-pristine status, then trigger update-status to see if it + clears. + """ + logging.info('Setting Non-pristine status') + zaza_model.run_on_leader( + "ceph-osd", + "status-set blocked 'Non-pristine'" + ) + ceph_osd_states = { + 'ceph-osd': { + 'workload-status': 'blocked', + 'workload-status-message-prefix': 'Non-pristine' + } + } + zaza_model.wait_for_application_states(states=ceph_osd_states) + logging.info('Running update-status action') + zaza_model.run_on_leader('ceph-osd', 'hooks/update-status') + logging.info('Wait for idle/ready status') + zaza_model.wait_for_application_states() diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index ec5877dd..df0b2b26 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,30 +1,8 @@ charm_name: ceph-osd -gate_bundles: - - jammy-yoga - - jammy-bobcat - - jammy-caracal - -smoke_bundles: - - jammy-caracal - -configure: - - install: - - zaza.openstack.charm_tests.glance.setup.add_lts_image - tests: - - install: - - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - - zaza.openstack.charm_tests.ceph.tests.CephTest - - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - - zaza.openstack.charm_tests.ceph.osd.tests.ServiceTest - - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - - zaza.openstack.charm_tests.ceph.tests.CephTest - - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - - zaza.openstack.charm_tests.ceph.osd.tests.ServiceTest - # Charm upgrade, then re-run tests - - zaza.charm_tests.lifecycle.tests.UpgradeCharmsToPath;ceph-osd - - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - - zaza.openstack.charm_tests.ceph.tests.CephTest - - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest - - zaza.openstack.charm_tests.ceph.osd.tests.ServiceTest + - zaza.charm_tests.lifecycle.refresh.CharmRefreshAll + - tests.target.CephLowLevelTest + - tests.target.CephTest + - tests.target.SecurityTest + - tests.target.ServiceTest diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 98bf885f..5d5938f5 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -21,6 +21,7 @@ skip_missing_interpreters = False skip_install = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + TEST_JUJU3=1 CHARM_DIR={envdir} commands = stestr run --slowest {posargs} allowlist_externals = @@ -74,7 +75,7 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 -deps = flake8==3.9.2 +deps = flake8 git+https://github.com/juju/charm-tools.git commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof diff --git a/ceph-proxy/tests/target.py b/ceph-proxy/tests/target.py new file mode 100644 index 00000000..bbb5a35a --- /dev/null +++ b/ceph-proxy/tests/target.py @@ -0,0 +1,268 @@ +# Copyright 2024 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import logging +import tenacity +import json +import subprocess + +import zaza +import zaza.charm_lifecycle.utils as lifecycle_utils +import zaza.model as zaza_model +import zaza.openstack.utilities.ceph as zaza_ceph +import zaza.openstack.utilities.exceptions as zaza_exceptions + + +def setup_ceph_proxy(): + """ + Configure ceph proxy with ceph metadata. + + Fetches admin_keyring and FSID from ceph-mon and + uses those to configure ceph-proxy. + """ + raw_admin_keyring = zaza_model.run_on_leader( + "ceph-mon", 'cat /etc/ceph/ceph.client.admin.keyring')["Stdout"] + admin_keyring = [ + line for line in raw_admin_keyring.split("\n") if "key" in line + ][0].split(' = ')[-1].rstrip() + fsid = zaza_model.run_on_leader("ceph-mon", "leader-get fsid")["Stdout"] + cluster_ips = zaza_model.get_app_ips("ceph-mon") + + proxy_config = { + 'auth-supported': 'cephx', + 'admin-key': admin_keyring, + 'fsid': fsid, + 'monitor-hosts': ' '.join(cluster_ips) + } + + logging.debug('Config: {}'.format(proxy_config)) + + zaza_model.set_application_config("ceph-proxy", proxy_config) + + +class CephProxyTest(unittest.TestCase): + """Test ceph via proxy.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super(CephProxyTest, cls).setUpClass() + + test_config = lifecycle_utils.get_charm_config(fatal=False) + cls.target_deploy_status = test_config.get('target_deploy_status', {}) + + def test_ceph_health(self): + """Make sure ceph-proxy can communicate with ceph.""" + logging.info('Wait for idle/ready status...') + zaza_model.wait_for_application_states( + states=self.target_deploy_status) + + self.assertEqual( + zaza_model.run_on_leader("ceph-proxy", "sudo ceph health")["Code"], + "0" + ) + + def test_cinder_ceph_restrict_pool_setup(self): + """Make sure cinder-ceph restrict pool was created successfully.""" + try: + zaza_model.get_application('cinder-ceph') + except KeyError: + raise unittest.SkipTest("Skipping OpenStack dependent test") + logging.info('Wait for idle/ready status...') + zaza_model.wait_for_application_states( + states=self.target_deploy_status) + + for attempt in tenacity.Retrying( + wait=tenacity.wait_exponential(multiplier=2, max=32), + reraise=True, stop=tenacity.stop_after_attempt(8), + ): + with attempt: + pools = zaza_ceph.get_ceph_pools('ceph-mon/0') + if 'cinder-ceph' not in pools: + msg = ('cinder-ceph pool not found querying ceph-mon/0,' + 'got: {}'.format(pools)) + raise zaza_exceptions.CephPoolNotFound(msg) + + # Checking for cinder-ceph specific permissions makes + # the test more rugged when we add additional relations + # to ceph for other applications (such as glance and nova). + expected_permissions = [ + "allow rwx pool=cinder-ceph", + "allow class-read object_prefix rbd_children", + ] + cmd = "sudo ceph auth get client.cinder-ceph" + result = zaza_model.run_on_unit('ceph-mon/0', cmd) + output = result.get('Stdout').strip() + + for expected in expected_permissions: + if expected not in output: + msg = ('cinder-ceph pool restriction ({}) was not' + ' configured correctly.' + ' Found: {}'.format(expected, output)) + raise zaza_exceptions.CephPoolNotConfigured(msg) + + +class CephFSWithCephProxyTests(unittest.TestCase): + """Encapsulate CephFS tests.""" + + mounts_share = False + mount_dir = '/mnt/cephfs' + CEPH_MON = 'ceph-proxy' + + def tearDown(self): + """Cleanup after running tests.""" + if self.mounts_share: + for unit in ['ceph-osd/0', 'ceph-osd/1']: + try: + zaza.utilities.generic.run_via_ssh( + unit_name=unit, + cmd='sudo fusermount -u {0} && sudo rmdir {0}'.format( + self.mount_dir)) + except subprocess.CalledProcessError: + logging.warning( + "Failed to cleanup mounts on {}".format(unit)) + + def _mount_share(self, unit_name: str, + retry: bool = True): + self._install_dependencies(unit_name) + self._install_keyring(unit_name) + ssh_cmd = ( + 'sudo mkdir -p {0} && ' + 'sudo ceph-fuse {0}'.format(self.mount_dir) + ) + if retry: + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(5), + wait=tenacity.wait_exponential(multiplier=3, + min=2, max=10)): + with attempt: + zaza.utilities.generic.run_via_ssh( + unit_name=unit_name, + cmd=ssh_cmd) + else: + zaza.utilities.generic.run_via_ssh( + unit_name=unit_name, + cmd=ssh_cmd) + self.mounts_share = True + + def _install_keyring(self, unit_name: str): + + keyring = zaza_model.run_on_leader( + self.CEPH_MON, 'cat /etc/ceph/ceph.client.admin.keyring')['Stdout'] + config = zaza_model.run_on_leader( + self.CEPH_MON, 'cat /etc/ceph/ceph.conf')['Stdout'] + commands = [ + 'sudo mkdir -p /etc/ceph', + "echo '{}' | sudo tee /etc/ceph/ceph.conf".format(config), + "echo '{}' | " + 'sudo tee /etc/ceph/ceph.client.admin.keyring'.format(keyring) + ] + for cmd in commands: + zaza.utilities.generic.run_via_ssh( + unit_name=unit_name, + cmd=cmd) + + def _install_dependencies(self, unit: str): + zaza.utilities.generic.run_via_ssh( + unit_name=unit, + cmd='sudo apt-get install -yq ceph-fuse') + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super(CephFSWithCephProxyTests, cls).setUpClass() + + @tenacity.retry( + stop=tenacity.stop_after_attempt(5), + wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)) + def _write_testing_file_on_instance(self, instance_name: str): + zaza.utilities.generic.run_via_ssh( + unit_name=instance_name, + cmd='echo -n "test" | sudo tee {}/test'.format(self.mount_dir)) + + @tenacity.retry( + stop=tenacity.stop_after_attempt(5), + wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)) + def _verify_testing_file_on_instance(self, instance_name: str): + output = zaza_model.run_on_unit( + instance_name, 'sudo cat {}/test'.format(self.mount_dir))['Stdout'] + self.assertEqual('test', output.strip()) + + def test_cephfs_share(self): + """Test that CephFS shares can be accessed on two instances. + + 1. Spawn two servers + 2. mount it on both + 3. write a file on one + 4. read it on the other + 5. profit + """ + self._mount_share('ceph-osd/0') + self._mount_share('ceph-osd/1') + + self._write_testing_file_on_instance('ceph-osd/0') + self._verify_testing_file_on_instance('ceph-osd/1') + + def test_conf(self): + """Test ceph to ensure juju config options are properly set.""" + self.TESTED_UNIT = 'ceph-fs/0' + + def _get_conf(): + """get/parse ceph daemon response into dict. + + :returns dict: Current configuration of the Ceph MDS daemon + :rtype: dict + """ + cmd = "sudo ceph daemon mds.$HOSTNAME config show" + conf = zaza_model.run_on_unit(self.TESTED_UNIT, cmd) + return json.loads(conf['Stdout']) + + @tenacity.retry( + wait=tenacity.wait_exponential(multiplier=1, min=4, max=10), + stop=tenacity.stop_after_attempt(10)) + def _change_conf_check(mds_config): + """Change configs, then assert to ensure config was set. + + Doesn't return a value. + """ + zaza_model.set_application_config('ceph-fs', mds_config) + results = _get_conf() + self.assertEqual( + results['mds_cache_memory_limit'], + mds_config['mds-cache-memory-limit']) + self.assertAlmostEqual( + float(results['mds_cache_reservation']), + float(mds_config['mds-cache-reservation'])) + self.assertAlmostEqual( + float(results['mds_health_cache_threshold']), + float(mds_config['mds-health-cache-threshold'])) + + # ensure defaults are set + mds_config = {'mds-cache-memory-limit': '4294967296', + 'mds-cache-reservation': '0.05', + 'mds-health-cache-threshold': '1.5'} + _change_conf_check(mds_config) + + # change defaults + mds_config = {'mds-cache-memory-limit': '8589934592', + 'mds-cache-reservation': '0.10', + 'mds-health-cache-threshold': '2'} + _change_conf_check(mds_config) + + # Restore config to keep tests idempotent + mds_config = {'mds-cache-memory-limit': '4294967296', + 'mds-cache-reservation': '0.05', + 'mds-health-cache-threshold': '1.5'} + _change_conf_check(mds_config) diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 67f4a342..6a327566 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -1,35 +1,12 @@ charm_name: ceph-proxy configure: - - zaza.openstack.configure.ceph_proxy.setup_ceph_proxy - - erasure-coded: - - zaza.openstack.configure.ceph_proxy.setup_ceph_proxy + - tests.target.setup_ceph_proxy tests: - - zaza.openstack.charm_tests.ceph.tests.CephProxyTest - - zaza.openstack.charm_tests.ceph.fs.tests.CephFSWithCephProxyTests - - erasure-coded: - - zaza.openstack.charm_tests.ceph.tests.CephProxyTest - - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - -gate_bundles: - - jammy-yoga - - erasure-coded: jammy-yoga-ec - -dev_bundles: - - jammy-yoga - - erasure-coded: jammy-yoga-ec - - lunar-antelope - - mantic-bobcat - - erasure-coded: lunar-antelope-ec - - erasure-coded: mantic-bobcat-ec - - jammy-antelope - - jammy-bobcat - - erasure-coded: jammy-antelope-ec - - erasure-coded: jammy-bobcat-ec - -smoke_bundles: - - jammy-yoga + - zaza.charm_tests.lifecycle.refresh.CharmRefreshAll + - tests.target.CephProxyTest + - tests.target.CephFSWithCephProxyTests target_deploy_status: ceph-proxy: @@ -38,28 +15,6 @@ target_deploy_status: ceph-radosgw: workload-status: waiting workload-status-message-prefix: "Incomplete relations: mon" - keystone: - workload-status: active - workload-status-message-prefix: "Unit is ready" - cinder-ceph: - workload-status: waiting - workload-status-message-prefix: "Ceph broker request incomplete" ceph-fs: workload-status: waiting workload-status-message-prefix: "'ceph-mds' incomplete" - nova-compute: - workload-status: waiting - workload-status-message-prefix: "Incomplete relations: storage-backend" - glance: - workload-status: waiting - workload-status-message-prefix: "Incomplete relations: storage-backend" - ubuntu: - workload-status: active - workload-status-message-prefix: '' - -tests_options: - force_deploy: - - jammy-antelope - - jammy-bobcat - - jammy-antelope-ec - - jammy-bobcat-ec diff --git a/ceph-proxy/tox.ini b/ceph-proxy/tox.ini index ebf24210..41e25414 100644 --- a/ceph-proxy/tox.ini +++ b/ceph-proxy/tox.ini @@ -21,13 +21,14 @@ minversion = 3.18.0 [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + TEST_JUJU3=1 CHARM_DIR={envdir} + CHARMS_ARTIFACT_DIR={toxinidir}/.. install_command = pip install {opts} {packages} commands = stestr run --slowest {posargs} allowlist_externals = charmcraft - pip {toxinidir}/rename.sh passenv = HOME @@ -45,11 +46,41 @@ commands = charmcraft -v pack {toxinidir}/rename.sh +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py39] +basepython = python3.9 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py310] basepython = python3.10 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py311] +basepython = python3.11 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py312] +basepython = python3.12 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt @@ -57,10 +88,9 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 -deps = flake8==3.9.2 - git+https://github.com/juju/charm-tools.git -commands = flake8 {posargs} hooks unit_tests tests actions lib files - charm-proof +deps = flake8 + charm-tools +commands = flake8 {posargs} unit_tests tests actions files [testenv:cover] # Technique based heavily upon diff --git a/ceph-radosgw/tests/target.py b/ceph-radosgw/tests/target.py new file mode 100644 index 00000000..8eb10ff4 --- /dev/null +++ b/ceph-radosgw/tests/target.py @@ -0,0 +1,1084 @@ +# Copyright 2018 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Ceph Testing.""" + +import unittest +import json +import logging +import requests +import boto3 +import botocore.exceptions +import urllib3 + +import tenacity + +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.model as zaza_model +import zaza.openstack.utilities.ceph as zaza_ceph +import zaza.openstack.utilities.generic as zaza_utils +import zaza.utilities.juju as juju_utils +import zaza.openstack.utilities.openstack as zaza_openstack +import zaza.openstack.utilities.generic as generic_utils + +# Disable warnings for ssl_verify=false +urllib3.disable_warnings( + urllib3.exceptions.InsecureRequestWarning +) + + +class CephRGWTest(test_utils.BaseCharmTest): + """Ceph RADOS Gateway Daemons Test Class. + + This Testset is not idempotent, because we don't support scale down from + multisite to singlesite (yet). Tests can be performed independently. + However, If test_100 has completed migration, retriggering the test-set + would cause a time-out in test_003. + """ + + # String Resources + primary_rgw_app = 'ceph-radosgw' + primary_rgw_unit = 'ceph-radosgw/0' + secondary_rgw_app = 'secondary-ceph-radosgw' + secondary_rgw_unit = 'secondary-ceph-radosgw/0' + + @classmethod + def setUpClass(cls): + """Run class setup for running ceph low level tests.""" + super(CephRGWTest, cls).setUpClass(application_name='ceph-radosgw') + + @property + def expected_apps(self): + """Determine application names for ceph-radosgw apps.""" + _apps = [ + self.primary_rgw_app + ] + try: + zaza_model.get_application(self.secondary_rgw_app) + _apps.append(self.secondary_rgw_app) + except KeyError: + pass + return _apps + + @property + def multisite(self): + """Determine whether deployment is multi-site.""" + try: + zaza_model.get_application(self.secondary_rgw_app) + return True + except KeyError: + return False + + def get_rgwadmin_cmd_skeleton(self, unit_name): + """ + Get radosgw-admin cmd skeleton with rgw.hostname populated key. + + :param unit_name: Unit on which the complete command would be run. + :type unit_name: str + :returns: hostname filled basic command skeleton + :rtype: str + """ + app_name = unit_name.split('/')[0] + juju_units = zaza_model.get_units(app_name) + unit_hostnames = generic_utils.get_unit_hostnames(juju_units) + hostname = unit_hostnames[unit_name] + return 'radosgw-admin --id=rgw.{} '.format(hostname) + + def purge_bucket(self, application, bucket_name): + """Remove a bucket and all it's objects. + + :param application: RGW application name + :type application: str + :param bucket_name: Name for RGW bucket to be deleted + :type bucket_name: str + """ + juju_units = zaza_model.get_units(application) + unit_hostnames = generic_utils.get_unit_hostnames(juju_units) + for unit_name, hostname in unit_hostnames.items(): + key_name = "rgw.{}".format(hostname) + cmd = 'radosgw-admin --id={} bucket rm --bucket={}' \ + ' --purge-objects'.format(key_name, bucket_name) + zaza_model.run_on_unit(unit_name, cmd) + + def wait_for_status(self, application, + is_primary=False, sync_expected=True): + """Wait for required RGW endpoint to finish sync for data and metadata. + + :param application: RGW application which has to be waited for + :type application: str + :param is_primary: whether RGW application is primary or secondary + :type is_primary: boolean + :param sync_expected: whether sync details should be expected in status + :type sync_expected: boolean + """ + juju_units = zaza_model.get_units(application) + unit_hostnames = generic_utils.get_unit_hostnames(juju_units) + data_check = 'data is caught up with source' + meta_primary = 'metadata sync no sync (zone is master)' + meta_secondary = 'metadata is caught up with master' + meta_check = meta_primary if is_primary else meta_secondary + + for attempt in tenacity.Retrying( + wait=tenacity.wait_exponential(multiplier=10, max=300), + reraise=True, stop=tenacity.stop_after_attempt(12), + retry=tenacity.retry_if_exception_type(AssertionError) + ): + with attempt: + for unit_name, hostname in unit_hostnames.items(): + key_name = "rgw.{}".format(hostname) + cmd = 'radosgw-admin --id={} sync status'.format(key_name) + stdout = zaza_model.run_on_unit( + unit_name, cmd + ).get('Stdout', '') + if sync_expected: + # Both data and meta sync. + self.assertIn(data_check, stdout) + self.assertIn(meta_check, stdout) + else: + # ExpectPrimary's Meta Status and no Data sync status + self.assertIn(meta_primary, stdout) + self.assertNotIn(data_check, stdout) + + def fetch_rgw_object(self, target_client, container_name, object_name): + """Fetch RGW object content. + + :param target_client: boto3 client object configured for an endpoint. + :type target_client: str + :param container_name: RGW bucket name for desired object. + :type container_name: str + :param object_name: Object name for desired object. + :type object_name: str + """ + for attempt in tenacity.Retrying( + wait=tenacity.wait_exponential(multiplier=1, max=60), + reraise=True, stop=tenacity.stop_after_attempt(12) + ): + with attempt: + return target_client.Object( + container_name, object_name + ).get()['Body'].read().decode('UTF-8') + + def promote_rgw_to_primary(self, app_name: str): + """Promote provided app to Primary and update period at new secondary. + + :param app_name: Secondary site rgw Application to be promoted. + :type app_name: str + """ + if app_name is self.primary_rgw_app: + new_secondary = self.secondary_rgw_unit + else: + new_secondary = self.primary_rgw_unit + + # Promote to Primary + zaza_model.run_action_on_leader( + app_name, + 'promote', + action_params={}, + ) + + # Period Update Commit new secondary. + cmd = self.get_rgwadmin_cmd_skeleton(new_secondary) + zaza_model.run_on_unit( + new_secondary, cmd + 'period update --commit' + ) + + def get_client_keys(self, rgw_app_name=None): + """Create access_key and secret_key for boto3 client. + + :param rgw_app_name: RGW application for which keys are required. + :type rgw_app_name: str + """ + unit_name = self.primary_rgw_unit + if rgw_app_name is not None: + unit_name = rgw_app_name + '/0' + user_name = 'botoclient' + cmd = self.get_rgwadmin_cmd_skeleton(unit_name) + users = json.loads(zaza_model.run_on_unit( + unit_name, cmd + 'user list' + ).get('Stdout', '')) + # Fetch boto3 user keys if user exists. + if user_name in users: + output = json.loads(zaza_model.run_on_unit( + unit_name, cmd + 'user info --uid={}'.format(user_name) + ).get('Stdout', '')) + keys = output['keys'][0] + return keys['access_key'], keys['secret_key'] + # Create boto3 user if it does not exist. + create_cmd = cmd + 'user create --uid={} --display-name={}'.format( + user_name, user_name + ) + output = json.loads( + zaza_model.run_on_unit(unit_name, create_cmd).get('Stdout', '') + ) + keys = output['keys'][0] + return keys['access_key'], keys['secret_key'] + + @tenacity.retry( + retry=tenacity.retry_if_result(lambda ret: ret is None), + wait=tenacity.wait_fixed(10), + stop=tenacity.stop_after_attempt(5) + ) + def get_rgw_endpoint(self, unit_name: str): + """Fetch Application endpoint for RGW unit. + + :param unit_name: Unit name for which RGW endpoint is required. + :type unit_name: str + """ + # Get address "public" network binding. + unit_address = zaza_model.run_on_unit( + unit_name, "network-get public --bind-address" + ).get('Stdout', '').strip() + + logging.info("Unit: {}, Endpoint: {}".format(unit_name, unit_address)) + if unit_address is None: + return None + # Evaluate port + try: + zaza_model.get_application("vault") + return "https://{}:443".format(unit_address) + except KeyError: + return "http://{}:80".format(unit_address) + + def configure_rgw_apps_for_multisite(self): + """Configure Multisite values on primary and secondary apps.""" + realm = 'zaza_realm' + zonegroup = 'zaza_zg' + + zaza_model.set_application_config( + self.primary_rgw_app, + { + 'realm': realm, + 'zonegroup': zonegroup, + 'zone': 'zaza_primary' + } + ) + zaza_model.set_application_config( + self.secondary_rgw_app, + { + 'realm': realm, + 'zonegroup': zonegroup, + 'zone': 'zaza_secondary' + } + ) + + def configure_rgw_multisite_relation(self): + """Configure multi-site relation between primary and secondary apps.""" + multisite_relation = zaza_model.get_relation_id( + self.primary_rgw_app, self.secondary_rgw_app, + remote_interface_name='secondary' + ) + if multisite_relation is None: + logging.info('Configuring Multisite') + self.configure_rgw_apps_for_multisite() + zaza_model.add_relation( + self.primary_rgw_app, + self.primary_rgw_app + ":primary", + self.secondary_rgw_app + ":secondary" + ) + zaza_model.block_until_unit_wl_status( + self.secondary_rgw_unit, "waiting" + ) + + zaza_model.block_until_unit_wl_status( + self.secondary_rgw_unit, "active" + ) + zaza_model.block_until_unit_wl_status( + self.primary_rgw_unit, "active" + ) + zaza_model.wait_for_unit_idle(self.secondary_rgw_unit) + zaza_model.wait_for_unit_idle(self.primary_rgw_unit) + + def clean_rgw_multisite_config(self, app_name): + """Clear Multisite Juju config values to default. + + :param app_name: App for which config values are to be cleared + :type app_name: str + """ + unit_name = app_name + "/0" + zaza_model.set_application_config( + app_name, + { + 'realm': "", + 'zonegroup': "", + 'zone': "default" + } + ) + # Commit changes to period. + cmd = self.get_rgwadmin_cmd_skeleton(unit_name) + zaza_model.run_on_unit( + unit_name, cmd + 'period update --commit --rgw-zone=default ' + '--rgw-zonegroup=default' + ) + + def enable_virtual_hosted_bucket(self): + """Enable virtual hosted bucket on primary rgw app.""" + zaza_model.set_application_config( + self.primary_rgw_app, + { + 'virtual-hosted-bucket-enabled': "true" + } + ) + + def set_os_public_hostname(self): + """Set os-public-hostname on primary rgw app.""" + zaza_model.set_application_config( + self.primary_rgw_app, + { + 'os-public-hostname': "rgw.example.com", + } + ) + + def clean_virtual_hosted_bucket(self): + """Clear virtual hosted bucket on primary app.""" + zaza_model.set_application_config( + self.primary_rgw_app, + { + 'os-public-hostname': "", + 'virtual-hosted-bucket-enabled': "false" + } + ) + + def test_001_processes(self): + """Verify Ceph processes. + + Verify that the expected service processes are running + on each ceph unit. + """ + logging.info('Checking radosgw processes...') + # Process name and quantity of processes to expect on each unit + ceph_radosgw_processes = { + 'radosgw': 1, + } + + # Units with process names and PID quantities expected + expected_processes = {} + for app in self.expected_apps: + for unit in zaza_model.get_units(app): + expected_processes[unit.entity_id] = ceph_radosgw_processes + + actual_pids = zaza_utils.get_unit_process_ids(expected_processes) + ret = zaza_utils.validate_unit_process_ids(expected_processes, + actual_pids) + self.assertTrue(ret) + + def test_002_services(self): + """Verify the ceph services. + + Verify the expected services are running on the service units. + """ + logging.info('Checking radosgw services...') + services = ['radosgw', 'haproxy'] + for app in self.expected_apps: + for unit in zaza_model.get_units(app): + zaza_model.block_until_service_status( + unit_name=unit.entity_id, + services=services, + target_status='running' + ) + + def test_003_object_storage_and_secondary_block(self): + """Verify Object Storage API and Secondary Migration block.""" + container_name = 'zaza-container' + obj_data = 'Test data from Zaza' + obj_name = 'prefile' + + logging.info('Checking Object Storage API for Primary Cluster') + # 1. Fetch Primary Endpoint Details + primary_endpoint = self.get_rgw_endpoint(self.primary_rgw_unit) + self.assertNotEqual(primary_endpoint, None) + + # 2. Create RGW Client and perform IO + access_key, secret_key = self.get_client_keys() + primary_client = boto3.resource("s3", + verify=False, + endpoint_url=primary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + primary_client.Bucket(container_name).create() + primary_object_one = primary_client.Object( + container_name, + obj_name + ) + primary_object_one.put(Body=obj_data) + + # 3. Fetch Object and Perform Data Integrity check. + content = primary_object_one.get()['Body'].read().decode('UTF-8') + self.assertEqual(content, obj_data) + + # Skip multisite tests if not compatible with bundle. + if not self.multisite: + logging.info('Skipping Secondary Object gatewaty verification') + return + + logging.info('Checking Object Storage API for Secondary Cluster') + # 1. Fetch Secondary Endpoint Details + secondary_endpoint = self.get_rgw_endpoint(self.secondary_rgw_unit) + self.assertNotEqual(secondary_endpoint, None) + + # 2. Create RGW Client and perform IO + access_key, secret_key = self.get_client_keys(self.secondary_rgw_app) + secondary_client = boto3.resource("s3", + verify=False, + endpoint_url=secondary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + secondary_client.Bucket(container_name).create() + secondary_object = secondary_client.Object( + container_name, + obj_name + ) + secondary_object.put(Body=obj_data) + + # 3. Fetch Object and Perform Data Integrity check. + content = secondary_object.get()['Body'].read().decode('UTF-8') + self.assertEqual(content, obj_data) + + logging.info('Checking Secondary Migration Block') + # 1. Migrate to multisite + if zaza_model.get_relation_id( + self.primary_rgw_app, self.secondary_rgw_app, + remote_interface_name='secondary' + ) is not None: + logging.info('Skipping Test, Multisite relation already present.') + return + + logging.info('Configuring Multisite') + self.configure_rgw_apps_for_multisite() + zaza_model.add_relation( + self.primary_rgw_app, + self.primary_rgw_app + ":primary", + self.secondary_rgw_app + ":secondary" + ) + + # 2. Verify secondary fails migration due to existing Bucket. + assert_state = { + self.secondary_rgw_app: { + "workload-status": "blocked", + "workload-status-message-prefix": + "Non-Pristine RGW site can't be used as secondary" + } + } + zaza_model.wait_for_application_states(states=assert_state, + timeout=900) + + # 3. Perform Secondary Cleanup + logging.info('Perform cleanup at secondary') + self.clean_rgw_multisite_config(self.secondary_rgw_app) + zaza_model.remove_relation( + self.primary_rgw_app, + self.primary_rgw_app + ":primary", + self.secondary_rgw_app + ":secondary" + ) + + # Make secondary pristine. + self.purge_bucket(self.secondary_rgw_app, container_name) + + zaza_model.block_until_unit_wl_status(self.secondary_rgw_unit, + 'active') + + def test_004_multisite_directional_sync_policy(self): + """Verify Multisite Directional Sync Policy.""" + # Skip multisite tests if not compatible with bundle. + if not self.multisite: + logging.info('Skipping multisite sync policy verification') + return + + container_name = 'zaza-container' + primary_obj_name = 'primary-testfile' + primary_obj_data = 'Primary test data' + secondary_directional_obj_name = 'secondary-directional-testfile' + secondary_directional_obj_data = 'Secondary directional test data' + secondary_symmetrical_obj_name = 'secondary-symmetrical-testfile' + secondary_symmetrical_obj_data = 'Secondary symmetrical test data' + + logging.info('Verifying multisite directional sync policy') + + # Set default sync policy to "allowed", which allows buckets to sync, + # but the sync is disabled by default in the zone group. Also, set the + # secondary zone sync policy flow type policy to "directional". + zaza_model.set_application_config( + self.primary_rgw_app, + { + "sync-policy-state": "allowed", + } + ) + zaza_model.set_application_config( + self.secondary_rgw_app, + { + "sync-policy-flow-type": "directional", + } + ) + zaza_model.wait_for_unit_idle(self.secondary_rgw_unit) + zaza_model.wait_for_unit_idle(self.primary_rgw_unit) + + # Setup multisite relation. + self.configure_rgw_multisite_relation() + + logging.info('Waiting for Data and Metadata to Synchronize') + # NOTE: We only check the secondary zone, because the sync policy flow + # type is set to "directional" between the primary and secondary zones. + self.wait_for_status(self.secondary_rgw_app, is_primary=False) + + # Create bucket on primary RGW zone. + logging.info('Creating bucket on primary zone') + primary_endpoint = self.get_rgw_endpoint(self.primary_rgw_unit) + self.assertNotEqual(primary_endpoint, None) + + access_key, secret_key = self.get_client_keys() + primary_client = boto3.resource("s3", + verify=False, + endpoint_url=primary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + primary_client.Bucket(container_name).create() + + # Enable sync on the bucket. + logging.info('Enabling sync on the bucket from the primary zone') + zaza_model.run_action_on_leader( + self.primary_rgw_app, + 'enable-buckets-sync', + action_params={ + 'buckets': container_name, + }, + raise_on_failure=True, + ) + + # Check that sync cannot be enabled using secondary Juju RGW app. + with self.assertRaises(zaza_model.ActionFailed): + zaza_model.run_action_on_leader( + self.secondary_rgw_app, + 'enable-buckets-sync', + action_params={ + 'buckets': container_name, + }, + raise_on_failure=True, + ) + + logging.info('Waiting for Data and Metadata to Synchronize') + self.wait_for_status(self.secondary_rgw_app, is_primary=False) + + # Perform IO on primary zone bucket. + logging.info('Performing IO on primary zone bucket') + primary_object = primary_client.Object( + container_name, + primary_obj_name + ) + primary_object.put(Body=primary_obj_data) + + # Verify that the object is replicated to the secondary zone. + logging.info('Verifying that the object is replicated to the ' + 'secondary zone') + secondary_endpoint = self.get_rgw_endpoint(self.secondary_rgw_unit) + self.assertNotEqual(secondary_endpoint, None) + + secondary_client = boto3.resource("s3", + verify=False, + endpoint_url=secondary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + secondary_data = self.fetch_rgw_object( + secondary_client, + container_name, + primary_obj_name + ) + self.assertEqual(secondary_data, primary_obj_data) + + # Write object to the secondary zone bucket, when the sync policy + # flow type is set to "directional" between the zones. + logging.info('Writing object to the secondary zone bucket, which ' + 'should not be replicated to the primary zone') + secondary_object = secondary_client.Object( + container_name, + secondary_directional_obj_name + ) + secondary_object.put(Body=secondary_directional_obj_data) + + # Verify that the object is not replicated to the primary zone. + logging.info('Verifying that the object is not replicated to the ' + 'primary zone') + with self.assertRaises(botocore.exceptions.ClientError): + self.fetch_rgw_object( + primary_client, + container_name, + secondary_directional_obj_name + ) + + logging.info('Setting sync policy flow to "symmetrical" on the ' + 'secondary RGW zone') + zaza_model.set_application_config( + self.secondary_rgw_app, + { + "sync-policy-flow-type": "symmetrical", + } + ) + zaza_model.wait_for_unit_idle(self.secondary_rgw_unit) + zaza_model.wait_for_unit_idle(self.primary_rgw_unit) + + # Write another object to the secondary zone bucket. + logging.info('Writing another object to the secondary zone bucket.') + secondary_object = secondary_client.Object( + container_name, + secondary_symmetrical_obj_name + ) + secondary_object.put(Body=secondary_symmetrical_obj_data) + + logging.info('Waiting for Data and Metadata to Synchronize') + # NOTE: This time, we check both the primary and secondary zones, + # because the sync policy flow type is set to "symmetrical" between + # the zones. + self.wait_for_status(self.secondary_rgw_app, is_primary=False) + self.wait_for_status(self.primary_rgw_app, is_primary=True) + + # Verify that all objects are replicated to the primary zone. + logging.info('Verifying that all objects are replicated to the ' + 'primary zone (including older objects).') + test_cases = [ + { + 'obj_name': primary_obj_name, + 'obj_data': primary_obj_data, + }, + { + 'obj_name': secondary_directional_obj_name, + 'obj_data': secondary_directional_obj_data, + }, + { + 'obj_name': secondary_symmetrical_obj_name, + 'obj_data': secondary_symmetrical_obj_data, + }, + ] + for tc in test_cases: + logging.info('Verifying that object "{}" is replicated'.format( + tc['obj_name'])) + primary_data = self.fetch_rgw_object( + primary_client, + container_name, + tc['obj_name'] + ) + self.assertEqual(primary_data, tc['obj_data']) + + # Cleanup. + logging.info('Cleaning up buckets after test case') + self.purge_bucket(self.primary_rgw_app, container_name) + self.purge_bucket(self.secondary_rgw_app, container_name) + + logging.info('Waiting for Data and Metadata to Synchronize') + self.wait_for_status(self.secondary_rgw_app, is_primary=False) + self.wait_for_status(self.primary_rgw_app, is_primary=True) + + # Set multisite sync policy state to "enabled" on the primary RGW app. + # Paired with "symmetrical" sync policy flow on the secondary RGW app, + # this enables bidirectional sync between the zones (which is the + # default behaviour without multisite sync policies configured). + logging.info('Setting sync policy state to "enabled".') + zaza_model.set_application_config( + self.primary_rgw_app, + { + "sync-policy-state": "enabled", + } + ) + zaza_model.wait_for_unit_idle(self.primary_rgw_unit) + + def test_100_migration_and_multisite_failover(self): + """Perform multisite migration and verify failover.""" + container_name = 'zaza-container' + obj_data = 'Test data from Zaza' + # Skip multisite tests if not compatible with bundle. + if not self.multisite: + raise unittest.SkipTest('Skipping Migration Test') + + logging.info('Perform Pre-Migration IO') + # 1. Fetch Endpoint Details + primary_endpoint = self.get_rgw_endpoint(self.primary_rgw_unit) + self.assertNotEqual(primary_endpoint, None) + + # 2. Create primary client and add pre-migration object. + access_key, secret_key = self.get_client_keys() + primary_client = boto3.resource("s3", + verify=False, + endpoint_url=primary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + primary_client.Bucket(container_name).create() + primary_client.Object( + container_name, + 'prefile' + ).put(Body=obj_data) + + # If Primary/Secondary relation does not exist, add it. + self.configure_rgw_multisite_relation() + + logging.info('Waiting for Data and Metadata to Synchronize') + self.wait_for_status(self.secondary_rgw_app, is_primary=False) + self.wait_for_status(self.primary_rgw_app, is_primary=True) + + logging.info('Performing post migration IO tests.') + # Add another object at primary + primary_client.Object( + container_name, + 'postfile' + ).put(Body=obj_data) + + # 1. Fetch Endpoint Details + secondary_endpoint = self.get_rgw_endpoint(self.secondary_rgw_unit) + self.assertNotEqual(secondary_endpoint, None) + + # 2. Create secondary client and fetch synchronised objects. + secondary_client = boto3.resource("s3", + verify=False, + endpoint_url=secondary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + + # 3. Verify Data Integrity + # fetch_rgw_object has internal retry so waiting for sync beforehand + # is not required for post migration object sync. + pre_migration_data = self.fetch_rgw_object( + secondary_client, container_name, 'prefile' + ) + post_migration_data = self.fetch_rgw_object( + secondary_client, container_name, 'postfile' + ) + + # 4. Verify Syncronisation works and objects are replicated + self.assertEqual(pre_migration_data, obj_data) + self.assertEqual(post_migration_data, obj_data) + + logging.info('Checking multisite failover/failback') + # Failover Scenario, Promote Secondary-Ceph-RadosGW to Primary + self.promote_rgw_to_primary(self.secondary_rgw_app) + + # Wait for Sites to be syncronised. + self.wait_for_status(self.primary_rgw_app, is_primary=False) + self.wait_for_status(self.secondary_rgw_app, is_primary=True) + + # IO Test + container = 'failover-container' + test_data = 'Test data from Zaza on Secondary' + secondary_client.Bucket(container).create() + secondary_object = secondary_client.Object(container, 'testfile') + secondary_object.put( + Body=test_data + ) + secondary_content = secondary_object.get()[ + 'Body' + ].read().decode('UTF-8') + + # Wait for Sites to be syncronised. + self.wait_for_status(self.primary_rgw_app, is_primary=False) + self.wait_for_status(self.secondary_rgw_app, is_primary=True) + + # Recovery scenario, reset ceph-rgw as primary. + self.promote_rgw_to_primary(self.primary_rgw_app) + self.wait_for_status(self.primary_rgw_app, is_primary=True) + self.wait_for_status(self.secondary_rgw_app, is_primary=False) + + # Fetch Syncronised copy of testfile from primary site. + primary_content = self.fetch_rgw_object( + primary_client, container, 'testfile' + ) + + # Verify Data Integrity. + self.assertEqual(secondary_content, primary_content) + + # Scaledown and verify replication has stopped. + logging.info('Checking multisite scaledown') + zaza_model.remove_relation( + self.primary_rgw_app, + self.primary_rgw_app + ":primary", + self.secondary_rgw_app + ":secondary" + ) + + # wait for sync stop + self.wait_for_status(self.primary_rgw_app, sync_expected=False) + self.wait_for_status(self.secondary_rgw_app, sync_expected=False) + + # Refresh client and verify objects are not replicating. + primary_client = boto3.resource("s3", + verify=False, + endpoint_url=primary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + secondary_client = boto3.resource("s3", + verify=False, + endpoint_url=secondary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + + # IO Test + container = 'scaledown-container' + test_data = 'Scaledown Test data' + secondary_client.Bucket(container).create() + secondary_object = secondary_client.Object(container, 'scaledown') + secondary_object.put( + Body=test_data + ) + + # Since bucket is not replicated. + with self.assertRaises(botocore.exceptions.ClientError): + primary_content = self.fetch_rgw_object( + primary_client, container, 'scaledown' + ) + + # Cleanup of scaledown resources and synced resources. + self.purge_bucket(self.secondary_rgw_app, container) + self.purge_bucket(self.secondary_rgw_app, 'zaza-container') + self.purge_bucket(self.secondary_rgw_app, 'failover-container') + + def test_101_virtual_hosted_bucket(self): + """Test virtual hosted bucket.""" + # skip if quincy or older + current_release = zaza_openstack.get_os_release( + application='ceph-mon') + reef = zaza_openstack.get_os_release('jammy_bobcat') + if current_release < reef: + raise unittest.SkipTest( + 'Virtual hosted bucket not supported in quincy or older') + + primary_rgw_unit = zaza_model.get_unit_from_name(self.primary_rgw_unit) + if primary_rgw_unit.workload_status != "active": + logging.info('Skipping virtual hosted bucket test since ' + 'primary rgw unit is not in active state') + return + + logging.info('Testing virtual hosted bucket') + + # 0. Configure virtual hosted bucket + self.enable_virtual_hosted_bucket() + zaza_model.block_until_wl_status_info_starts_with( + self.primary_rgw_app, + 'os-public-hostname must have a value', + timeout=900 + ) + self.set_os_public_hostname() + zaza_model.block_until_all_units_idle(self.model_name) + container_name = 'zaza-bucket' + obj_data = 'Test content from Zaza' + obj_name = 'testfile' + + # 1. Fetch Primary Endpoint Details + primary_endpoint = self.get_rgw_endpoint(self.primary_rgw_unit) + self.assertNotEqual(primary_endpoint, None) + + # 2. Create RGW Client and perform IO + access_key, secret_key = self.get_client_keys() + primary_client = boto3.resource("s3", + verify=False, + endpoint_url=primary_endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + # We may not have certs for the pub hostname yet, so retry a few times. + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(10), + wait=tenacity.wait_fixed(4), + ): + with attempt: + primary_client.Bucket(container_name).create() + primary_object_one = primary_client.Object( + container_name, + obj_name + ) + primary_object_one.put(Body=obj_data) + primary_client.Bucket(container_name).Acl().put(ACL='public-read') + primary_client.Object(container_name, obj_name).Acl().put( + ACL='public-read' + ) + + # 3. Test if we can get content via virtual hosted bucket name + public_hostname = zaza_model.get_application_config( + self.primary_rgw_app + )["os-public-hostname"]["value"] + url = f"{primary_endpoint}/{obj_name}" + headers = {'host': f"{container_name}.{public_hostname}"} + f = requests.get(url, headers=headers, verify=False) + self.assertEqual(f.text, obj_data) + + # 4. Cleanup and de-configure virtual hosted bucket + self.clean_virtual_hosted_bucket() + zaza_model.block_until_all_units_idle(self.model_name) + self.purge_bucket(self.primary_rgw_app, container_name) + + +class BlueStoreCompressionCharmOperation(test_utils.BaseCharmTest): + """Test charm handling of bluestore compression configuration options.""" + + def _assert_pools_properties(self, pools, pools_detail, + expected_properties, log_func=logging.info): + """Check properties on a set of pools. + + :param pools: List of pool names to check. + :type pools: List[str] + :param pools_detail: List of dictionaries with pool detail + :type pools_detail List[Dict[str,any]] + :param expected_properties: Properties to check and their expected + values. + :type expected_properties: Dict[str,any] + :returns: Nothing + :raises: AssertionError + """ + for pool in pools: + for pd in pools_detail: + if pd['pool_name'] == pool: + if 'options' in expected_properties: + for k, v in expected_properties['options'].items(): + self.assertEqual(pd['options'][k], v) + log_func("['options']['{}'] == {}".format(k, v)) + for k, v in expected_properties.items(): + if k == 'options': + continue + self.assertEqual(pd[k], v) + log_func("{} == {}".format(k, v)) + + def test_configure_compression(self): + """Enable compression and validate properties flush through to pool.""" + # The Ceph RadosGW creates many light weight pools to keep track of + # metadata, we only compress the pool containing actual data. + app_pools = ['.rgw.buckets.data'] + + ceph_pools_detail = zaza_ceph.get_ceph_pool_details( + model_name=self.model_name) + + logging.debug('BEFORE: {}'.format(ceph_pools_detail)) + try: + logging.info('Checking Ceph pool compression_mode prior to change') + self._assert_pools_properties( + app_pools, ceph_pools_detail, + {'options': {'compression_mode': 'none'}}) + except KeyError: + logging.info('property does not exist on pool, which is OK.') + logging.info('Changing "bluestore-compression-mode" to "force" on {}' + .format(self.application_name)) + with self.config_change( + {'bluestore-compression-mode': 'none'}, + {'bluestore-compression-mode': 'force'}): + logging.info('Checking Ceph pool compression_mode after to change') + self._check_pool_compression_mode(app_pools, 'force') + + logging.info('Checking Ceph pool compression_mode after ' + 'restoring config to previous value') + self._check_pool_compression_mode(app_pools, 'none') + + @tenacity.retry( + wait=tenacity.wait_exponential(multiplier=1, min=2, max=10), + stop=tenacity.stop_after_attempt(10), + reraise=True, + retry=tenacity.retry_if_exception_type(AssertionError) + ) + def _check_pool_compression_mode(self, app_pools, mode): + ceph_pools_detail = zaza_ceph.get_ceph_pool_details( + model_name=self.model_name) + logging.debug('ceph_pools_details: %s', ceph_pools_detail) + logging.debug(juju_utils.get_relation_from_unit( + 'ceph-mon', self.application_name, None, + model_name=self.model_name)) + self._assert_pools_properties( + app_pools, ceph_pools_detail, + {'options': {'compression_mode': mode}}) + + def test_invalid_compression_configuration(self): + """Set invalid configuration and validate charm response.""" + stored_target_deploy_status = self.test_config.get( + 'target_deploy_status', {}) + new_target_deploy_status = stored_target_deploy_status.copy() + new_target_deploy_status[self.application_name] = { + 'workload-status': 'blocked', + 'workload-status-message': 'Invalid configuration', + } + if 'target_deploy_status' in self.test_config: + self.test_config['target_deploy_status'].update( + new_target_deploy_status) + else: + self.test_config['target_deploy_status'] = new_target_deploy_status + + with self.config_change( + {'bluestore-compression-mode': 'none'}, + {'bluestore-compression-mode': 'PEBCAK'}): + logging.info('Charm went into blocked state as expected, restore ' + 'configuration') + self.test_config[ + 'target_deploy_status'] = stored_target_deploy_status + + +class CephKeyRotationTests(test_utils.BaseCharmTest): + """Tests for the rotate-key action.""" + + def _get_all_keys(self, unit, entity_filter): + cmd = 'sudo ceph auth ls' + result = zaza_model.run_on_unit(unit, cmd) + # Don't use json formatting, as it's buggy upstream. + data = result['Stdout'].split() + ret = set() + + for ix, line in enumerate(data): + # Structure: + # $ENTITY + # key: + # key contents + # That's why we need to move one position ahead. + if 'key:' in line and entity_filter(data[ix - 1]): + ret.add((data[ix - 1], data[ix + 1])) + return ret + + def _check_key_rotation(self, entity, unit): + def entity_filter(name): + return name.startswith(entity) + + old_keys = self._get_all_keys(unit, entity_filter) + action_obj = zaza_model.run_action( + unit_name=unit, + action_name='rotate-key', + action_params={'entity': entity} + ) + zaza_utils.assertActionRanOK(action_obj) + # NOTE(lmlg): There's a nasty race going on here. Essentially, + # since this action involves 2 different applications, what + # happens is as follows: + # (1) (2) (3) (4) + # ceph-mon rotates key | (idle) | remote-unit rotates key | (idle) + # Between (2) and (3), there's a window where all units are + # idle, _but_ the key hasn't been rotated in the other unit. + # As such, we retry a few times instead of using the + # `wait_for_application_states` interface. + + for attempt in tenacity.Retrying( + wait=tenacity.wait_exponential(multiplier=2, max=32), + reraise=True, stop=tenacity.stop_after_attempt(20), + retry=tenacity.retry_if_exception_type(AssertionError) + ): + with attempt: + new_keys = self._get_all_keys(unit, entity_filter) + self.assertNotEqual(old_keys, new_keys) + + diff = new_keys - old_keys + self.assertEqual(len(diff), 1) + first = next(iter(diff)) + # Check that the entity matches. The 'entity_filter' + # callable will return a true-like value if it + # matches the type of entity we're after (i.e: 'mgr') + self.assertTrue(entity_filter(first[0])) + + def _get_rgw_client(self, unit): + ret = self._get_all_keys(unit, lambda x: x.startswith('client.rgw')) + if not ret: + return None + return next(iter(ret))[0] + + def test_key_rotate(self): + """Test that rotating the keys actually changes them.""" + unit = 'ceph-mon/0' + rgw_client = self._get_rgw_client(unit) + + if rgw_client: + self._check_key_rotation(rgw_client, unit) + else: + logging.info('ceph-radosgw units present, but no RGW service') diff --git a/ceph-radosgw/tests/tests.yaml b/ceph-radosgw/tests/tests.yaml index f0c59360..32f1fedd 100644 --- a/ceph-radosgw/tests/tests.yaml +++ b/ceph-radosgw/tests/tests.yaml @@ -1,52 +1,17 @@ charm_name: ceph-radosgw gate_bundles: - - focal-yoga-multisite - - vault: focal-yoga - - vault: focal-yoga-namespaced + - jammy-caracal smoke_bundles: - - jammy-antelope-multisite - - vault: jammy-antelope + - jammy-caracal dev_bundles: - - jammy-antelope-multisite - - jammy-bobcat-multisite - - jammy-caracal-multisite - - vault: jammy-antelope - - vault: jammy-bobcat - - vault: jammy-caracal - - vault: jammy-antelope-namespaced - - vault: jammy-bobcat-namespaced - - vault: jammy-caracal-namespaced - -target_deploy_status: - vault: - workload-status: blocked - workload-status-message-prefix: Vault needs to be initialized - -configure: - - vault: - - zaza.openstack.charm_tests.vault.setup.auto_initialize + - jammy-caracal tests: - - zaza.openstack.charm_tests.ceph.tests.CephRGWTest - - vault: - - zaza.openstack.charm_tests.ceph.tests.CephRGWTest - - zaza.openstack.charm_tests.swift.tests.S3APITest - - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation - # Charm upgrade, then re-run tests - - zaza.charm_tests.lifecycle.tests.UpgradeCharmsToPath;ceph-radosgw - - zaza.openstack.charm_tests.ceph.tests.CephRGWTest - - zaza.openstack.charm_tests.swift.tests.S3APITest - - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation - - zaza.openstack.charm_tests.ceph.tests.CephMonKeyRotationTests + - zaza.charm_tests.lifecycle.refresh.CharmRefreshAll + - tests.target.CephRGWTest + - tests.target.BlueStoreCompressionCharmOperation + - tests.target.CephKeyRotationTests -tests_options: - force_deploy: - - jammy-antelope - - jammy-bobcat - - jammy-antelope-namespaced - - jammy-bobcat-namespaced diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index b8a4b629..9e509733 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -21,6 +21,7 @@ skip_missing_interpreters = False skip_install = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + TEST_JUJU3=1 CHARM_DIR={envdir} commands = stestr run --slowest {posargs} allowlist_externals = diff --git a/ceph-rbd-mirror/src/tests/target.py b/ceph-rbd-mirror/src/tests/target.py new file mode 100644 index 00000000..c8aece63 --- /dev/null +++ b/ceph-rbd-mirror/src/tests/target.py @@ -0,0 +1,859 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate ``ceph-rbd-mirror`` testing.""" +import json +import logging +import re +import time +import unittest + +import cinderclient.exceptions as cinder_exceptions + +import zaza.openstack.charm_tests.test_utils as test_utils + +import zaza.model +import zaza.openstack.utilities.ceph +import zaza.openstack.utilities.openstack as openstack +import zaza.openstack.utilities.generic as zaza_utils + +from zaza.openstack.charm_tests.glance.setup import ( + LTS_IMAGE_NAME, + CIRROS_IMAGE_NAME) + + +DEFAULT_CINDER_RBD_MIRRORING_MODE = 'pool' + + +def get_cinder_rbd_mirroring_mode(cinder_ceph_app_name='cinder-ceph'): + """Get the RBD mirroring mode for the Cinder Ceph pool. + + :param cinder_ceph_app_name: Cinder Ceph Juju application name. + :type cinder_ceph_app_name: str + :returns: A string representing the RBD mirroring mode. It can be + either 'pool' or 'image'. + :rtype: str + """ + rbd_mirroring_mode_config = zaza.model.get_application_config( + cinder_ceph_app_name).get('rbd-mirroring-mode') + if rbd_mirroring_mode_config: + rbd_mirroring_mode = rbd_mirroring_mode_config.get( + 'value', DEFAULT_CINDER_RBD_MIRRORING_MODE).lower() + else: + rbd_mirroring_mode = DEFAULT_CINDER_RBD_MIRRORING_MODE + + return rbd_mirroring_mode + + +def get_glance_image(glance): + """Get the Glance image object to be used by the Ceph tests. + + It looks for the Cirros Glance image, and it's returned if it's found. + If the Cirros image is not found, it will try and find the Ubuntu + LTS image. + + :param glance: Authenticated glanceclient + :type glance: glanceclient.Client + :returns: Glance image object + :rtype: glanceclient.image + """ + images = openstack.get_images_by_name(glance, CIRROS_IMAGE_NAME) + if images: + return images[0] + logging.info("Failed to find {} image, falling back to {}".format( + CIRROS_IMAGE_NAME, + LTS_IMAGE_NAME)) + return openstack.get_images_by_name(glance, LTS_IMAGE_NAME)[0] + + +def setup_cinder_repl_volume_type(cinder, type_name='repl', + backend_name='cinder-ceph'): + """Set up the Cinder volume replication type. + + :param cinder: Authenticated cinderclient + :type cinder: cinder.Client + :param type_name: Cinder volume type name + :type type_name: str + :param backend_name: Cinder volume backend name with replication enabled. + :type backend_name: str + :returns: Cinder volume type object + :rtype: cinderclient.VolumeType + """ + try: + vol_type = cinder.volume_types.find(name=type_name) + except cinder_exceptions.NotFound: + vol_type = cinder.volume_types.create(type_name) + + vol_type.set_keys(metadata={ + 'volume_backend_name': backend_name, + 'replication_enabled': ' True', + }) + return vol_type + + +# TODO: This function should be incorporated into +# 'zaza.openstack.utilities.openstack.create_volume' helper, once the below +# flakiness comments are addressed. +def create_cinder_volume(cinder, name='zaza', image_id=None, type_id=None): + """Create a new Cinder volume. + + :param cinder: Authenticated cinderclient. + :type cinder: cinder.Client + :param name: Volume name. + :type name: str + :param image_id: Glance image id, if the volume is created from image. + :type image_id: str + :param type_id: Cinder Volume type id, if the volume needs to use an + explicit volume type. + :type type_id: boolean + :returns: Cinder volume + :rtype: :class:`Volume`. + """ + # NOTE(fnordahl): for some reason create volume from image often fails + # when run just after deployment is finished. We should figure out + # why, resolve the underlying issue and then remove this. + # + # We do not use tenacity here as it will interfere with tenacity used + # in ``resource_reaches_status`` + def create_volume(cinder, volume_params, retry=20): + if retry < 1: + return + volume = cinder.volumes.create(**volume_params) + try: + # Note(coreycb): stop_after_attempt is increased because using + # juju storage for ceph-osd backed by cinder on undercloud + # takes longer than the prior method of directory-backed OSD + # devices. + openstack.resource_reaches_status( + cinder.volumes, volume.id, msg='volume', + stop_after_attempt=20) + return volume + except AssertionError: + logging.info('retrying') + volume.delete() + return create_volume(cinder, volume_params, retry=retry - 1) + + volume_params = { + 'size': 8, + 'name': name, + } + if image_id: + volume_params['imageRef'] = image_id + if type_id: + volume_params['volume_type'] = type_id + + return create_volume(cinder, volume_params) + + +def setup_rbd_mirror(): + def setup(suffix): + zaza.model.run_action_on_leader( + 'ceph-mon' + suffix, + 'create-pool', + action_params={ + 'name': 'zaza-boot', + 'app-name': 'rbd', + } + ) + zaza.model.run_action_on_leader( + 'ceph-rbd-mirror' + suffix, + 'refresh-pools', + action_params={} + ) + + """Set up an RBD pool in case Cinder isn't present.""" + setup('') + setup('-b') + + +class CephRBDMirrorBase(test_utils.BaseCharmTest): + """Base class for ``ceph-rbd-mirror`` tests.""" + + @classmethod + def setUpClass(cls): + """Run setup for ``ceph-rbd-mirror`` tests.""" + super().setUpClass() + cls.cinder_ceph_app_name = 'cinder-ceph' + cls.test_cinder_volume_name = 'test-cinder-ceph-volume' + # get ready for multi-model Zaza + cls.site_a_model = cls.site_b_model = zaza.model.get_juju_model() + cls.site_b_app_suffix = '-b' + + def test_if_cinder_present(self): + """Test if the cinder-ceph application is present.""" + try: + zaza.model.get_application(self.cinder_ceph_app_name) + return True + except KeyError: + return False + + def skip_test_if_cinder_not_present(self, caller): + """Skip a test if Cinder isn't present.""" + if not self.test_if_cinder_present(): + raise unittest.SkipTest('Skipping %s due to lack of Cinder' + % caller) + + def run_status_action(self, application_name=None, model_name=None, + pools=[]): + """Run status action, decode and return response.""" + action_params = { + 'verbose': True, + 'format': 'json', + } + if len(pools) > 0: + action_params['pools'] = ','.join(pools) + result = zaza.model.run_action_on_leader( + application_name or self.application_name, + 'status', + model_name=model_name, + action_params=action_params) + if result.status == "failed": + logging.error("status action failed: %s", result.message) + return + return json.loads(result.results['output']) + + def get_pools(self): + """Retrieve list of pools from both sites. + + :returns: Tuple with list of pools on each side. + :rtype: tuple + """ + site_a_pools = zaza.openstack.utilities.ceph.get_ceph_pools( + zaza.model.get_lead_unit_name( + 'ceph-mon', model_name=self.site_a_model), + model_name=self.site_a_model) + site_b_pools = zaza.openstack.utilities.ceph.get_ceph_pools( + zaza.model.get_lead_unit_name( + 'ceph-mon' + self.site_b_app_suffix, + model_name=self.site_b_model), + model_name=self.site_b_model) + return sorted(site_a_pools.keys()), sorted(site_b_pools.keys()) + + def get_failover_pools(self): + """Get the failover Ceph pools' names, from both sites. + + If the Cinder RBD mirroring mode is 'image', the 'cinder-ceph' pool + needs to be excluded, since Cinder orchestrates the failover then. + + Also remove .mgr pools as they're not failed over + + :returns: Tuple with site-a pools and site-b pools. + :rtype: Tuple[List[str], List[str]] + """ + site_a_pools, site_b_pools = self.get_pools() + if (self.test_if_cinder_present() and + get_cinder_rbd_mirroring_mode(self.cinder_ceph_app_name) == + 'image'): + site_a_pools.remove(self.cinder_ceph_app_name) + site_b_pools.remove(self.cinder_ceph_app_name) + + site_a_pools.remove(".mgr") + site_b_pools.remove(".mgr") + + return site_a_pools, site_b_pools + + def wait_for_mirror_state(self, state, application_name=None, + model_name=None, + check_entries_behind_master=False, + require_images_in=[], + pools=[]): + """Wait until all images reach requested state. + + This function runs the ``status`` action and examines the data it + returns. + + :param state: State to expect all images to be in + :type state: str + :param application_name: Application to run action on + :type application_name: str + :param model_name: Model to run in + :type model_name: str + :param check_entries_behind_master: Wait for ``entries_behind_master`` + to become '0'. Only makes sense + when used with state + ``up+replying``. + :type check_entries_behind_master: bool + :param require_images_in: List of pools to require images in + :type require_images_in: list of str + :param pools: List of pools to run status on. If this is empty, the + status action will run on all the pools. + :type pools: list of str + :returns: True on success, never returns on failure + """ + rep = re.compile(r'.*"entries_behind_primary":(\d+),') + while True: + pool_status = self.run_status_action( + application_name=application_name, model_name=model_name, + pools=pools) + if pool_status is None: + logging.debug("status action failed, retrying") + time.sleep(5) # don't spam juju run-action + continue + for pool, status in pool_status.items(): + images = status.get('images', []) + logging.debug("checking pool %s, images: %s", pool, images) + if not len(images) and pool in require_images_in: + break + for image in images: + if image['state'] and image['state'] != state: + break + if check_entries_behind_master: + m = rep.match(image['description']) + # NOTE(fnordahl): Tactical fix for upstream Ceph + # Luminous bug https://tracker.ceph.com/issues/23516 + if m and int(m.group(1)) > 42: + logging.info('entries_behind_primary:{}' + .format(m.group(1))) + break + else: + # not found here, check next pool + continue + # found here, pass on to outer loop + break + else: + # all images with state has expected state + return True + time.sleep(5) # don't spam juju run-action + + def setup_test_cinder_volume(self): + """Set up the test Cinder volume into the Ceph RBD mirror environment. + + If the volume already exists, then it's returned. + + Also, if the Cinder RBD mirroring mode is 'image', the volume will + use an explicit volume type with the appropriate replication flags. + Otherwise, it is just a simple Cinder volume using the default backend. + + :returns: Cinder volume + :rtype: :class:`Volume`. + """ + session = openstack.get_overcloud_keystone_session() + cinder = openstack.get_cinder_session_client(session, version=3) + + try: + return cinder.volumes.find(name=self.test_cinder_volume_name) + except cinder_exceptions.NotFound: + logging.info("Test Cinder volume doesn't exist. Creating it") + + glance = openstack.get_glance_session_client(session) + image = get_glance_image(glance) + kwargs = { + 'cinder': cinder, + 'name': self.test_cinder_volume_name, + 'image_id': image.id, + } + if get_cinder_rbd_mirroring_mode(self.cinder_ceph_app_name) == 'image': + volume_type = setup_cinder_repl_volume_type( + cinder, + backend_name=self.cinder_ceph_app_name) + kwargs['type_id'] = volume_type.id + + return create_cinder_volume(**kwargs) + + +class CephRBDMirrorTest(CephRBDMirrorBase): + """Encapsulate ``ceph-rbd-mirror`` tests.""" + + def test_pause_resume(self): + """Run pause and resume tests.""" + self.pause_resume(['rbd-mirror']) + + def test_pool_broker_synced(self): + """Validate that pools created with broker protocol are synced. + + The functional test bundle includes the ``cinder``, ``cinder-ceph`` and + ``glance`` charms. The ``cinder-ceph`` and ``glance`` charms will + create pools using the ceph charms broker protocol at deploy time. + """ + site_a_pools, site_b_pools = self.get_pools() + self.assertEqual(site_a_pools, site_b_pools) + + def test_pool_manual_synced(self): + """Validate that manually created pools are synced after refresh. + + The ``ceph-rbd-mirror`` charm does not get notified when the operator + creates a pool manually without using the ceph charms broker protocol. + + To alleviate this the charm has a ``refresh-pools`` action the operator + can call to have it discover such pools. Validate its operation. + """ + # use action on ceph-mon to create a pool directly in the Ceph cluster + # without using the broker protocol + zaza.model.run_action_on_leader( + 'ceph-mon', + 'create-pool', + model_name=self.site_a_model, + action_params={ + 'name': 'zaza', + 'app-name': 'rbd', + }) + # tell ceph-rbd-mirror unit on site_a to refresh list of pools + zaza.model.run_action_on_leader( + 'ceph-rbd-mirror', + 'refresh-pools', + model_name=self.site_a_model, + action_params={ + }) + # wait for execution to start + zaza.model.wait_for_agent_status(model_name=self.site_a_model) + zaza.model.wait_for_agent_status(model_name=self.site_b_model) + # wait for execution to finish + zaza.model.wait_for_application_states(model_name=self.site_a_model) + zaza.model.wait_for_application_states(model_name=self.site_b_model) + # make sure everything is idle before we test + zaza.model.block_until_all_units_idle(model_name=self.site_a_model) + zaza.model.block_until_all_units_idle(model_name=self.site_b_model) + # validate result + site_a_pools, site_b_pools = self.get_pools() + self.assertEqual(site_a_pools, site_b_pools) + + def test_cinder_volume_mirrored(self): + """Validate that a volume created through Cinder is mirrored. + + For RBD Mirroring to work clients must enable the correct set of + features when creating images. + + The RBD image feature settings are announced by the ``ceph-mon`` charm + over the client relation when it has units related on its + ``rbd-mirror`` endpoint. + + By creating a volume through cinder on site A, checking for presence on + site B and subsequently comparing the contents we get a full end to end + test. + """ + self.skip_test_if_cinder_not_present('test_cinder_volume_mirrored') + volume = self.setup_test_cinder_volume() + site_a_hash = zaza.openstack.utilities.ceph.get_rbd_hash( + zaza.model.get_lead_unit_name('ceph-mon', + model_name=self.site_a_model), + 'cinder-ceph', + 'volume-{}'.format(volume.id), + model_name=self.site_a_model) + self.wait_for_mirror_state( + 'up+replaying', + check_entries_behind_master=True, + application_name=self.application_name + self.site_b_app_suffix, + model_name=self.site_b_model) + logging.info('Checking the Ceph RBD hashes of the primary and ' + 'the secondary Ceph images') + site_b_hash = zaza.openstack.utilities.ceph.get_rbd_hash( + zaza.model.get_lead_unit_name('ceph-mon' + self.site_b_app_suffix, + model_name=self.site_b_model), + 'cinder-ceph', + 'volume-{}'.format(volume.id), + model_name=self.site_b_model) + logging.info(site_a_hash) + logging.info(site_b_hash) + self.assertEqual(site_a_hash, site_b_hash) + + +class CephRBDMirrorControlledFailoverTest(CephRBDMirrorBase): + """Encapsulate ``ceph-rbd-mirror`` controlled failover tests.""" + + def execute_failover_juju_actions(self, + primary_site_app_name, + primary_site_model, + primary_site_pools, + secondary_site_app_name, + secondary_site_model, + secondary_site_pools): + """Execute the failover Juju actions. + + The failover / failback via Juju actions shares the same workflow. The + failback is just a failover with sites in reversed order. + + This function encapsulates the tasks to failover a primary site to + a secondary site: + 1. Demote primary site + 2. Validation of the primary site demotion + 3. Promote secondary site + 4. Validation of the secondary site promotion + + :param primary_site_app_name: Primary site Ceph RBD mirror app name. + :type primary_site_app_name: str + :param primary_site_model: Primary site Juju model name. + :type primary_site_model: str + :param primary_site_pools: Primary site pools. + :type primary_site_pools: List[str] + :param secondary_site_app_name: Secondary site Ceph RBD mirror + app name. + :type secondary_site_app_name: str + :param secondary_site_model: Secondary site Juju model name. + :type secondary_site_model: str + :param secondary_site_pools: Secondary site pools. + :type secondary_site_pools: List[str] + """ + # Check if primary and secondary pools sizes are the same. + self.assertEqual(len(primary_site_pools), len(secondary_site_pools)) + + # Run the 'demote' Juju action against the primary site pools. + logging.info('Demoting {} from model {}.'.format( + primary_site_app_name, primary_site_model)) + result = zaza.model.run_action_on_leader( + primary_site_app_name, + 'demote', + model_name=primary_site_model, + action_params={ + 'pools': ','.join(primary_site_pools) + }) + logging.info(result) + zaza_utils.assertActionRanOK(result) + + # Validate that the demoted pools count matches the total primary site + # pools count. + n_pools_demoted = len(result.results.get('output').split('\n')) + self.assertEqual(len(primary_site_pools), n_pools_demoted) + + # At this point, both primary and secondary sites are demoted. Validate + # that the Ceph images, from both sites, report 'up+unknown', since + # there isn't a primary site at the moment. + logging.info('Waiting until {} is demoted.'.format( + primary_site_app_name)) + self.wait_for_mirror_state( + 'up+unknown', + application_name=primary_site_app_name, + model_name=primary_site_model, + pools=primary_site_pools) + self.wait_for_mirror_state( + 'up+unknown', + application_name=secondary_site_app_name, + model_name=secondary_site_model, + pools=secondary_site_pools) + + # Run the 'promote' Juju against the secondary site. + logging.info('Promoting {} from model {}.'.format( + secondary_site_app_name, secondary_site_model)) + result = zaza.model.run_action_on_leader( + secondary_site_app_name, + 'promote', + model_name=secondary_site_model, + action_params={ + 'pools': ','.join(secondary_site_pools) + }) + zaza_utils.assertActionRanOK(result) + + # Validate that the promoted pools count matches the total secondary + # site pools count. + n_pools_promoted = len(result.results.get('output').split('\n')) + self.assertEqual(len(secondary_site_pools), n_pools_promoted) + + # Validate that the Ceph images from the newly promoted site + # report 'up+stopped' state (which is reported by primary Ceph images). + logging.info('Waiting until {} is promoted.'.format( + secondary_site_app_name)) + self.wait_for_mirror_state( + 'up+stopped', + application_name=secondary_site_app_name, + model_name=secondary_site_model, + pools=secondary_site_pools) + + # Validate that the Ceph images from site-a report 'up+replaying' + # (which is reported by secondary Ceph images). + self.wait_for_mirror_state( + 'up+replaying', + check_entries_behind_master=True, + application_name=primary_site_app_name, + model_name=primary_site_model, + pools=primary_site_pools) + + def test_100_cinder_failover(self): + """Validate controlled failover via the Cinder API. + + This test only makes sense if Cinder RBD mirroring mode is 'image'. + It will return early, if this is not the case. + """ + self.skip_test_if_cinder_not_present('test_100_cinder_failover') + cinder_rbd_mirroring_mode = get_cinder_rbd_mirroring_mode( + self.cinder_ceph_app_name) + if cinder_rbd_mirroring_mode != 'image': + logging.warning( + "Skipping 'test_100_cinder_failover' since Cinder RBD " + "mirroring mode is {}.".format(cinder_rbd_mirroring_mode)) + return + + session = openstack.get_overcloud_keystone_session() + cinder = openstack.get_cinder_session_client(session, version=3) + + # Check if the Cinder volume host is available with replication + # enabled. + host = 'cinder@{}'.format(self.cinder_ceph_app_name) + svc = cinder.services.list(host=host, binary='cinder-volume')[0] + self.assertEqual(svc.replication_status, 'enabled') + self.assertEqual(svc.status, 'enabled') + + # Setup the test Cinder volume + volume = self.setup_test_cinder_volume() + + # Check if the volume is properly mirrored + self.wait_for_mirror_state( + 'up+replaying', + check_entries_behind_master=True, + application_name=self.application_name + self.site_b_app_suffix, + model_name=self.site_b_model, + pools=[self.cinder_ceph_app_name]) + + # Execute the Cinder volume failover + openstack.failover_cinder_volume_host( + cinder=cinder, + backend_name=self.cinder_ceph_app_name, + target_backend_id='ceph', + target_status='disabled', + target_replication_status='failed-over') + + # Check if the test volume is still available after failover + self.assertEqual(cinder.volumes.get(volume.id).status, 'available') + + def test_101_cinder_failback(self): + """Validate controlled failback via the Cinder API. + + This test only makes sense if Cinder RBD mirroring mode is 'image'. + It will return early, if this is not the case. + + The test needs to be executed when the Cinder volume host is already + failed-over with the test volume on it. + """ + self.skip_test_if_cinder_not_present('test_101_cinder_failback') + cinder_rbd_mirroring_mode = get_cinder_rbd_mirroring_mode( + self.cinder_ceph_app_name) + if cinder_rbd_mirroring_mode != 'image': + logging.warning( + "Skipping 'test_101_cinder_failback' since Cinder RBD " + "mirroring mode is {}.".format(cinder_rbd_mirroring_mode)) + return + + session = openstack.get_overcloud_keystone_session() + cinder = openstack.get_cinder_session_client(session, version=3) + + # Check if the Cinder volume host is already failed-over + host = 'cinder@{}'.format(self.cinder_ceph_app_name) + svc = cinder.services.list(host=host, binary='cinder-volume')[0] + self.assertEqual(svc.replication_status, 'failed-over') + self.assertEqual(svc.status, 'disabled') + + # Check if the test Cinder volume is already present. The method + # 'cinder.volumes.find' raises 404 if the volume is not found. + volume = cinder.volumes.find(name=self.test_cinder_volume_name) + + # Execute the Cinder volume failback + openstack.failover_cinder_volume_host( + cinder=cinder, + backend_name=self.cinder_ceph_app_name, + target_backend_id='default', + target_status='enabled', + target_replication_status='enabled') + + # Check if the test volume is still available after failback + self.assertEqual(cinder.volumes.get(volume.id).status, 'available') + + def test_200_juju_failover(self): + """Validate controlled failover via Juju actions.""" + # Get the Ceph pools needed to failover + site_a_pools, site_b_pools = self.get_failover_pools() + + # Execute the failover Juju actions with the appropriate parameters. + site_b_app_name = self.application_name + self.site_b_app_suffix + self.execute_failover_juju_actions( + primary_site_app_name=self.application_name, + primary_site_model=self.site_a_model, + primary_site_pools=site_a_pools, + secondary_site_app_name=site_b_app_name, + secondary_site_model=self.site_b_model, + secondary_site_pools=site_b_pools) + + def test_201_juju_failback(self): + """Validate controlled failback via Juju actions.""" + # Get the Ceph pools needed to failback + site_a_pools, site_b_pools = self.get_failover_pools() + + # Execute the failover Juju actions with the appropriate parameters. + # The failback operation is just a failover with sites in reverse + # order. + site_b_app_name = self.application_name + self.site_b_app_suffix + self.execute_failover_juju_actions( + primary_site_app_name=site_b_app_name, + primary_site_model=self.site_b_model, + primary_site_pools=site_b_pools, + secondary_site_app_name=self.application_name, + secondary_site_model=self.site_a_model, + secondary_site_pools=site_a_pools) + + def test_203_juju_resync(self): + """Validate the 'resync-pools' Juju action. + + The 'resync-pools' Juju action is meant to flag Ceph images from the + secondary site to re-sync against the Ceph images from the primary + site. + + This use case is useful when the Ceph secondary images are out of sync. + """ + # Get the Ceph pools needed to failback + _, site_b_pools = self.get_failover_pools() + + # Run the 'resync-pools' Juju action against the pools from site-b. + # This will make sure that the Ceph images from site-b are properly + # synced with the primary images from site-a. + site_b_app_name = self.application_name + self.site_b_app_suffix + logging.info('Re-syncing {} from model {}'.format( + site_b_app_name, self.site_b_model)) + result = zaza.model.run_action_on_leader( + site_b_app_name, + 'resync-pools', + model_name=self.site_b_model, + action_params={ + 'pools': ','.join(site_b_pools), + 'i-really-mean-it': True, + }) + zaza_utils.assertActionRanOK(result) + + # Validate that the Ceph images from site-b report 'up+replaying' + # (which is reported by secondary Ceph images). And check that images + # exist in Cinder and Glance pools. + self.wait_for_mirror_state( + 'up+replaying', + check_entries_behind_master=True, + application_name=site_b_app_name, + model_name=self.site_b_model, + require_images_in=[self.cinder_ceph_app_name, 'glance'], + pools=site_b_pools) + + +class CephRBDMirrorDisasterFailoverTest(CephRBDMirrorBase): + """Encapsulate ``ceph-rbd-mirror`` destructive tests.""" + + def apply_cinder_ceph_workaround(self): + """Set minimal timeouts / retries to the Cinder Ceph backend. + + This is needed because the failover via Cinder API will try to do a + demotion of the site-a. However, when site-a is down, and with the + default timeouts / retries, the operation takes an unreasonably amount + of time (or sometimes it never finishes). + """ + # These new config options need to be set under the Cinder Ceph backend + # section in the main Cinder config file. + # At the moment, we don't the possibility of using Juju config to set + # these options. And also, it's not even a good practice to have them + # in production. + # These should be set only to do the Ceph failover via Cinder API, and + # they need to be removed after. + configs = { + 'rados_connect_timeout': '1', + 'rados_connection_retries': '1', + 'rados_connection_interval': '0', + 'replication_connect_timeout': '1', + } + + # Small Python script that will be executed via Juju run to update + # the Cinder config file. + update_cinder_conf_script = ( + "import configparser; " + "config = configparser.ConfigParser(); " + "config.read('/etc/cinder/cinder.conf'); " + "{}" + "f = open('/etc/cinder/cinder.conf', 'w'); " + "config.write(f); " + "f.close()") + set_cmd = '' + for cfg_name in configs: + set_cmd += "config.set('{0}', '{1}', '{2}'); ".format( + self.cinder_ceph_app_name, cfg_name, configs[cfg_name]) + script = update_cinder_conf_script.format(set_cmd) + + # Run the workaround script via Juju run + zaza.model.run_on_leader( + self.cinder_ceph_app_name, + 'python3 -c "{}"; systemctl restart cinder-volume'.format(script)) + + def kill_primary_site(self): + """Simulate an unexpected primary site shutdown.""" + logging.info('Killing the Ceph primary site') + for application in ['ceph-rbd-mirror', 'ceph-mon', 'ceph-osd']: + zaza.model.remove_application( + application, + model_name=self.site_a_model, + forcefully_remove_machines=True) + + def test_100_forced_juju_failover(self): + """Validate Ceph failover via Juju when the primary site is down. + + * Kill the primary site + * Execute the forced failover via Juju actions + """ + # Get the site-b Ceph pools that need to be promoted + _, site_b_pools = self.get_failover_pools() + site_b_app_name = self.application_name + self.site_b_app_suffix + + # Simulate primary site unexpected shutdown + self.kill_primary_site() + + # Try and promote the site-b to primary. + result = zaza.model.run_action_on_leader( + site_b_app_name, + 'promote', + model_name=self.site_b_model, + action_params={ + 'pools': ','.join(site_b_pools), + }) + zaza_utils.assertActionRanOK(result) + + # The action may not show up as 'failed' if there are no pools that + # needed to be promoted. + # self.assertEqual(result.status, 'failed') + + # Retry to promote site-b using the 'force' Juju action parameter. + result = zaza.model.run_action_on_leader( + site_b_app_name, + 'promote', + model_name=self.site_b_model, + action_params={ + 'force': True, + 'pools': ','.join(site_b_pools), + }) + + # Validate successful Juju action execution + self.assertEqual(result.status, 'completed') + + def test_200_forced_cinder_failover(self): + """Validate Ceph failover via Cinder when the primary site is down. + + This test only makes sense if Cinder RBD mirroring mode is 'image'. + It will return early, if this is not the case. + + This assumes that the primary site is already killed. + """ + self.skip_test_if_cinder_not_present('test_200_forced_cinder_failover') + cinder_rbd_mirroring_mode = get_cinder_rbd_mirroring_mode( + self.cinder_ceph_app_name) + if cinder_rbd_mirroring_mode != 'image': + logging.warning( + "Skipping 'test_200_cinder_failover_without_primary_site' " + "since Cinder RBD mirroring mode is {}.".format( + cinder_rbd_mirroring_mode)) + return + + # Make sure that the Cinder Ceph backend workaround is applied. + self.apply_cinder_ceph_workaround() + + session = openstack.get_overcloud_keystone_session() + cinder = openstack.get_cinder_session_client(session, version=3) + openstack.failover_cinder_volume_host( + cinder=cinder, + backend_name=self.cinder_ceph_app_name, + target_backend_id='ceph', + target_status='disabled', + target_replication_status='failed-over') + + # Check that the Cinder volumes are still available after forced + # failover. + for volume in cinder.volumes.list(): + self.assertEqual(volume.status, 'available') diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index 62b39dbb..b5c5ad9a 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -1,22 +1,17 @@ charm_name: ceph-rbd-mirror -smoke_bundles: -- jammy-antelope -gate_bundles: -- jammy-bobcat -comment: | - The e2e bundles are useful for development but adds no additional value to - the functional tests. -dev_bundles: -- focal-yoga -- focal-yoga-image-mirroring -- jammy-yoga -- jammy-yoga-image-mirroring configure: -- zaza.openstack.charm_tests.glance.setup.add_cirros_image + - tests.target.setup_rbd_mirror tests: -- zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorTest -- zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorControlledFailoverTest -- zaza.openstack.charm_tests.ceph.rbd_mirror.tests.CephRBDMirrorDisasterFailoverTest -tests_options: - force_deploy: - - jammy-caracal \ No newline at end of file +- zaza.charm_tests.lifecycle.refresh.CharmRefreshAll +- tests.target.CephRBDMirrorTest +- tests.target.CephRBDMirrorControlledFailoverTest +- tests.target.CephRBDMirrorDisasterFailoverTest + +target_deploy_status: + ceph-rbd-mirror: + workload-status: waiting + workload-status-message: 'Waiting for pools to be created' + + ceph-rbd-mirror-b: + workload-status: waiting + workload-status-message: 'Waiting for pools to be created' diff --git a/ceph-rbd-mirror/tox.ini b/ceph-rbd-mirror/tox.ini index af776db4..721bd6be 100644 --- a/ceph-rbd-mirror/tox.ini +++ b/ceph-rbd-mirror/tox.ini @@ -12,16 +12,13 @@ sitepackages = False skip_missing_interpreters = False [testenv] -# We use tox mainly for virtual environment management for test requirements -# and do not install the charm code as a Python package into that environment. -# Ref: https://tox.wiki/en/latest/config.html#skip_install -skip_install = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 TERM=linux - JUJU_REPOSITORY={toxinidir}/build CHARM_LAYERS_DIR={toxinidir}/layers CHARM_INTERFACES_DIR={toxinidir}/interfaces + JUJU_REPOSITORY={toxinidir}/build + TEST_JUJU3=1 passenv = no_proxy http_proxy @@ -39,10 +36,6 @@ deps = [testenv:build] basepython = python3 -# charmcraft clean is done to ensure that -# `tox -e build` always performs a clean, repeatable build. -# For faster rebuilds during development, -# directly run `charmcraft -v pack && ./rename.sh`. commands = charmcraft clean charmcraft -v pack @@ -52,7 +45,7 @@ commands = [testenv:build-reactive] basepython = python3 commands = - charm-build --log-level DEBUG --use-lock-file-branches -o {toxinidir}/build/builds src {posargs} + charm-build --log-level DEBUG --use-lock-file-branches --binary-wheels-from-source -o {toxinidir}/build/builds src {posargs} [testenv:add-build-lock-file] basepython = python3 @@ -61,28 +54,20 @@ commands = [testenv:py3] basepython = python3 -deps = -r{toxinidir}/test-requirements.txt -commands = stestr run --slowest {posargs} - -[testenv:py38] -basepython = python3.8 -deps = -r{toxinidir}/test-requirements.txt -commands = stestr run --slowest {posargs} - -[testenv:py39] -basepython = python3.9 -deps = -r{toxinidir}/test-requirements.txt +deps = + -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} [testenv:py310] basepython = python3.10 -deps = -r{toxinidir}/test-requirements.txt +deps = + -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} [testenv:pep8] basepython = python3 -deps = flake8==3.9.2 - charm-tools==2.8.4 +deps = flake8 + charm-tools commands = flake8 {posargs} src unit_tests [testenv:cover] From f4725ac9ac2629d7aae9cbcaa4d52dc2db6a9c90 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Wed, 12 Feb 2025 10:29:41 +0100 Subject: [PATCH 2653/2699] Fix lint / unit test issues Signed-off-by: Peter Sabaini --- ceph-fs/test-requirements.txt | 45 ++++++-------- .../test_lib_charm_openstack_ceph_fs.py | 18 +++--- ceph-mon/test-requirements.txt | 8 --- ceph-mon/unit_tests/test_ceph_hooks.py | 2 +- ceph-mon/unit_tests/test_ceph_utils.py | 26 ++++---- ceph-proxy/test-requirements.txt | 35 ++++++++--- ceph-rbd-mirror/test-requirements.txt | 62 ++++++++++++------- ceph-rbd-mirror/unit_tests/test_actions.py | 4 +- 8 files changed, 111 insertions(+), 89 deletions(-) diff --git a/ceph-fs/test-requirements.txt b/ceph-fs/test-requirements.txt index a11a7d07..35d38c5b 100644 --- a/ceph-fs/test-requirements.txt +++ b/ceph-fs/test-requirements.txt @@ -2,10 +2,15 @@ # within individual charm repos. See the 'global' dir contents for available # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools + +# NOTE: newer versions of cryptography require a Rust compiler to build, +# see +# * https://github.com/openstack-charmers/zaza/issues/421 +# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html # -pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. -cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. -setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 +cryptography<3.4 + +requests>=2.18.4 stestr>=2.2.0 @@ -25,30 +30,16 @@ stevedore<1.31.0;python_version<'3.6' debtcollector<1.22.0;python_version<'3.6' oslo.utils<=3.41.0;python_version<'3.6' -requests>=2.18.4 -charms.reactive +coverage>=4.5.2 +pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) -# Newer mock seems to have some syntax which is newer than python3.5 (e.g. -# f'{something}' -mock>=1.2,<4.0.0; python_version < '3.6' -mock>=1.2; python_version >= '3.6' +# icey: pyopenssl 22 introduces a requirement on newer OpenSSL which causes test +# failures. Pin pyopenssl to resolve the failure. +pyopenssl<=22.0.0 -nose>=1.3.7 -coverage>=3.6 +pydantic < 2 +cosl + +netifaces git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack -# -# Revisit for removal / mock improvement: -# -# NOTE(lourot): newer versions of cryptography require a Rust compiler to build, -# see -# * https://github.com/openstack-charmers/zaza/issues/421 -# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html -# -netifaces # vault -psycopg2-binary # vault -tenacity # vault -pbr==5.6.0 # vault -cryptography<3.4 # vault, keystone-saml-mellon -lxml # keystone-saml-mellon -hvac # vault, barbican-vault -psutil # cinder-lvm +charms.reactive diff --git a/ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py b/ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py index 6873aaef..a60cbf2e 100644 --- a/ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py +++ b/ceph-fs/unit_tests/test_lib_charm_openstack_ceph_fs.py @@ -32,7 +32,7 @@ def test_packages(self): # Package list is the only difference between the past version and # future versions of this charm, see ``TestCephFsCharm`` for the rest # of the tests - self.assertEquals(self.target.packages, [ + self.assertEqual(self.target.packages, [ 'ceph-mds', 'gdisk', 'btrfs-tools', 'xfsprogs']) @@ -54,30 +54,30 @@ def patch_target(self, attr, return_value=None): setattr(self, attr, started) def test___init__(self): - self.assertEquals(self.target.services, [ + self.assertEqual(self.target.services, [ 'ceph-mds@somehost']) self.assertDictEqual(self.target.restart_map, { '/etc/ceph/ceph.conf': ['ceph-mds@somehost']}) - self.assertEquals(self.target.packages, [ + self.assertEqual(self.target.packages, [ 'ceph-mds', 'gdisk', 'btrfs-progs', 'xfsprogs']) def test_configuration_class(self): - self.assertEquals(self.target.options.hostname, 'somehost') - self.assertEquals(self.target.options.mds_name, 'somehost') + self.assertEqual(self.target.options.hostname, 'somehost') + self.assertEqual(self.target.options.mds_name, 'somehost') self.patch_target('get_networks') self.get_networks.return_value = ['fakeaddress'] - self.assertEquals(self.target.options.networks, ['fakeaddress']) + self.assertEqual(self.target.options.networks, ['fakeaddress']) self.patch_object(ceph_fs.ch_core.hookenv, 'config') self.config.side_effect = lambda x: {'prefer-ipv6': False}.get(x) self.patch_object(ceph_fs, 'get_ipv6_addr') self.get_ipv6_addr.return_value = ['2001:db8::fake'] self.patch_target('get_public_addr') self.get_public_addr.return_value = '192.0.2.42' - self.assertEquals( + self.assertEqual( self.target.options.public_addr, '192.0.2.42') self.config.side_effect = lambda x: {'prefer-ipv6': True}.get(x) - self.assertEquals( + self.assertEqual( self.target.options.public_addr, '2001:db8::fake') self.patch_target('get_mds_cache') @@ -85,7 +85,7 @@ def test_configuration_class(self): 'mds-cache-memory-limit': '4Gi', 'mds-cache-reservation': 0.05, 'mds-health-cache-threshold': 1.5} - self.assertEquals(self.target.options.mds_cache, { + self.assertEqual(self.target.options.mds_cache, { 'mds-cache-memory-limit': '4Gi', 'mds-cache-reservation': 0.05, 'mds-health-cache-threshold': 1.5}) diff --git a/ceph-mon/test-requirements.txt b/ceph-mon/test-requirements.txt index 9ba127ae..43248e4c 100644 --- a/ceph-mon/test-requirements.txt +++ b/ceph-mon/test-requirements.txt @@ -2,14 +2,6 @@ # within individual charm repos. See the 'global' dir contents for available # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools -# -# TODO: Distill the func test requirements from the lint/unit test -# requirements. They are intertwined. Also, Zaza itself should specify -# all of its own requirements and if it doesn't, fix it there. -# -pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. -cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. -setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 # NOTE: newer versions of cryptography require a Rust compiler to build, # see diff --git a/ceph-mon/unit_tests/test_ceph_hooks.py b/ceph-mon/unit_tests/test_ceph_hooks.py index 713583bf..5426d00c 100644 --- a/ceph-mon/unit_tests/test_ceph_hooks.py +++ b/ceph-mon/unit_tests/test_ceph_hooks.py @@ -705,7 +705,7 @@ def test_bootstrap_source_relation_data_not_ready(self): for unit in ('ceph/0', 'ceph/1', 'ceph/2'): expected_calls.append(call('monitor-secret', unit, relid)) expected_calls.append(call('fsid', unit, relid)) - self.relation_get.has_calls(expected_calls) + self.relation_get.assert_has_calls(expected_calls) self.assertEqual(self.leader_set.call_count, 0) self.assertEqual(self.mon_relation.call_count, 0) diff --git a/ceph-mon/unit_tests/test_ceph_utils.py b/ceph-mon/unit_tests/test_ceph_utils.py index ba3da7f3..ff05b3b1 100644 --- a/ceph-mon/unit_tests/test_ceph_utils.py +++ b/ceph-mon/unit_tests/test_ceph_utils.py @@ -78,7 +78,7 @@ def test_get_default_rbd_features(self, _check_output): {'a': 'b', 'rbd_default_features': '61', 'c': 'd'}) - self.assertEquals( + self.assertEqual( utils.get_default_rbd_features(), 61) _check_output.assert_called_once_with( @@ -101,13 +101,13 @@ def test_get_rbd_features(self, _config, _has_rbd_mirrors, _get_default_rbd_features): _config.side_effect = \ lambda key: {'default-rbd-features': 42}.get(key, None) - self.assertEquals(utils.get_rbd_features(), 42) + self.assertEqual(utils.get_rbd_features(), 42) _has_rbd_mirrors.return_value = True _get_default_rbd_features.return_value = 61 _config.side_effect = lambda key: {}.get(key, None) - self.assertEquals(utils.get_rbd_features(), 125) + self.assertEqual(utils.get_rbd_features(), 125) _has_rbd_mirrors.return_value = False - self.assertEquals(utils.get_rbd_features(), None) + self.assertEqual(utils.get_rbd_features(), None) @mock.patch.object(utils, '_is_required_osd_release') @mock.patch.object(utils, '_all_ceph_versions_same') @@ -190,7 +190,7 @@ def test_all_ceph_versions_same_two_overall_returns_false( self.assertFalse( return_bool, msg='all_ceph_versions_same returned True but should be False') - self.assertEquals(log.call_count, 2) + self.assertEqual(log.call_count, 2) @mock.patch.object(utils.subprocess, 'check_output') @mock.patch.object(utils.json, 'loads') @@ -208,7 +208,7 @@ def test_all_ceph_versions_same_one_overall_no_osd_returns_false( self.assertFalse( return_bool, msg='all_ceph_versions_same returned True but should be False') - self.assertEquals(log.call_count, 2) + self.assertEqual(log.call_count, 2) @mock.patch.object(utils.subprocess, 'check_output') @mock.patch.object(utils, 'log') @@ -241,9 +241,10 @@ def test_set_require_osd_release_success(self, log, check_call): release = 'luminous' utils._set_require_osd_release(release) expected_call = mock.call( - ['ceph', 'osd', 'require-osd-release', release] + ['ceph', 'osd', 'require-osd-release', release, + '--yes-i-really-mean-it'] ) - check_call.has_calls(expected_call) + check_call.assert_has_calls([expected_call]) @mock.patch.object(utils.subprocess, 'check_call') @mock.patch.object(utils, 'log') @@ -252,14 +253,15 @@ def test_set_require_osd_release_raise_call_error(self, log, check_call): check_call.side_effect = utils.subprocess.CalledProcessError( 0, mock.MagicMock() ) - expected_call = mock.call( - ['ceph', 'osd', 'require-osd-release', release] - ) + expected_call = mock.call([ + 'ceph', 'osd', 'require-osd-release', release, + '--yes-i-really-mean-it' + ]) with self.assertRaises(utils.OsdPostUpgradeError): utils._set_require_osd_release(release) - check_call.has_calls(expected_call) + check_call.assert_has_calls([expected_call]) log.assert_called_once() @mock.patch.object(utils, 'relation_ids') diff --git a/ceph-proxy/test-requirements.txt b/ceph-proxy/test-requirements.txt index 4ef87dc5..43248e4c 100644 --- a/ceph-proxy/test-requirements.txt +++ b/ceph-proxy/test-requirements.txt @@ -2,13 +2,13 @@ # within individual charm repos. See the 'global' dir contents for available # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools + +# NOTE: newer versions of cryptography require a Rust compiler to build, +# see +# * https://github.com/openstack-charmers/zaza/issues/421 +# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html # -# TODO: Distill the func test requirements from the lint/unit test -# requirements. They are intertwined. Also, Zaza itself should specify -# all of its own requirements and if it doesn't, fix it there. -# -pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. -setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 +cryptography<3.4 requests>=2.18.4 @@ -18,12 +18,33 @@ stestr>=2.2.0 # https://github.com/mtreinish/stestr/issues/145 cliff<3.0.0 +# Dependencies of stestr. Newer versions use keywords that didn't exist in +# python 3.5 yet (e.g. "ModuleNotFoundError") +importlib-metadata<3.0.0; python_version < '3.6' +importlib-resources<3.0.0; python_version < '3.6' + +# Some Zuul nodes sometimes pull newer versions of these dependencies which +# dropped support for python 3.5: +osprofiler<2.7.0;python_version<'3.6' +stevedore<1.31.0;python_version<'3.6' +debtcollector<1.22.0;python_version<'3.6' +oslo.utils<=3.41.0;python_version<'3.6' + coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: -git+https://opendev.org/openstack/tempest.git#egg=tempest +git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.8' +tempest<30.0.0;python_version<'3.8' and python_version >= '3.6' +tempest<24.0.0;python_version<'3.6' croniter # needed for charm-rabbitmq-server unit tests + +# icey: pyopenssl 22 introduces a requirement on newer OpenSSL which causes test +# failures. Pin pyopenssl to resolve the failure. +pyopenssl<=22.0.0 + +pydantic < 2 +cosl diff --git a/ceph-rbd-mirror/test-requirements.txt b/ceph-rbd-mirror/test-requirements.txt index a7936e65..9e3c89dd 100644 --- a/ceph-rbd-mirror/test-requirements.txt +++ b/ceph-rbd-mirror/test-requirements.txt @@ -2,9 +2,15 @@ # within individual charm repos. See the 'global' dir contents for available # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools + +# NOTE: newer versions of cryptography require a Rust compiler to build, +# see +# * https://github.com/openstack-charmers/zaza/issues/421 +# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html # -pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. -setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 +cryptography<3.4 + +requests>=2.18.4 stestr>=2.2.0 @@ -12,27 +18,37 @@ stestr>=2.2.0 # https://github.com/mtreinish/stestr/issues/145 cliff<3.0.0 -requests>=2.18.4 -charms.reactive +# Dependencies of stestr. Newer versions use keywords that didn't exist in +# python 3.5 yet (e.g. "ModuleNotFoundError") +importlib-metadata<3.0.0; python_version < '3.6' +importlib-resources<3.0.0; python_version < '3.6' + +# Some Zuul nodes sometimes pull newer versions of these dependencies which +# dropped support for python 3.5: +osprofiler<2.7.0;python_version<'3.6' +stevedore<1.31.0;python_version<'3.6' +debtcollector<1.22.0;python_version<'3.6' +oslo.utils<=3.41.0;python_version<'3.6' + +coverage>=4.5.2 +pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) +git+https://github.com/openstack-charmers/zaza.git#egg=zaza +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack -mock>=1.2 +# Needed for charm-glance: +git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.8' +tempest<30.0.0;python_version<'3.8' and python_version >= '3.6' +tempest<24.0.0;python_version<'3.6' -nose>=1.3.7 -coverage>=3.6 +croniter # needed for charm-rabbitmq-server unit tests + +# icey: pyopenssl 22 introduces a requirement on newer OpenSSL which causes test +# failures. Pin pyopenssl to resolve the failure. +pyopenssl<=22.0.0 + +pydantic < 2 +cosl + +netifaces git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack -# -# Revisit for removal / mock improvement: -# -# NOTE(lourot): newer versions of cryptography require a Rust compiler to build, -# see -# * https://github.com/openstack-charmers/zaza/issues/421 -# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html -# -netifaces # vault -psycopg2-binary # vault -tenacity # vault -pbr==5.6.0 # vault -cryptography<3.4 # vault, keystone-saml-mellon -lxml # keystone-saml-mellon -hvac # vault, barbican-vault -psutil # cinder-lvm +charms.reactive diff --git a/ceph-rbd-mirror/unit_tests/test_actions.py b/ceph-rbd-mirror/unit_tests/test_actions.py index 270eb1a3..9c0964cb 100644 --- a/ceph-rbd-mirror/unit_tests/test_actions.py +++ b/ceph-rbd-mirror/unit_tests/test_actions.py @@ -73,7 +73,7 @@ def test_rbd_mirror_action(self): # the order the pools has in the output string is undefined self.action_set.assert_called_once_with( {'output': mock.ANY}) - self.assertEquals( + self.assertEqual( sorted(self.action_set.call_args[0][0]['output'].split('\n')), ['apool: Promoted 0 mirrored images', 'bpool: Promoted 0 mirrored images']) @@ -171,7 +171,7 @@ def test_resync_pools(self): mock.call('i-really-mean-it'), mock.call('pools'), ]) - self.assertEquals( + self.assertEqual( sorted(self.action_set.call_args[0][0]['output'].split('\n')), ['apool/imagea: resync flagged for imagea']) From efb71d9bff05f68ef7e1a76cbd09fcdd94776ffc Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 7 Feb 2025 17:22:29 +0100 Subject: [PATCH 2654/2699] ceph-fs: fix test mount Signed-off-by: Peter Sabaini (cherry picked from commit d2ba29067bafd4f5ad929439b929e253f278db8f) --- ceph-fs/src/tests/target.py | 39 +++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/ceph-fs/src/tests/target.py b/ceph-fs/src/tests/target.py index c4e135bc..933f3c50 100644 --- a/ceph-fs/src/tests/target.py +++ b/ceph-fs/src/tests/target.py @@ -19,7 +19,7 @@ import subprocess from tenacity import ( retry, Retrying, stop_after_attempt, wait_exponential, - retry_if_exception_type) + retry_if_exception_type, retry_if_result) import unittest import zaza import zaza.model as model @@ -47,27 +47,28 @@ def tearDown(self): logging.warning( "Failed to cleanup mounts on {}".format(unit)) - def _mount_share(self, unit_name: str, - retry: bool = True): + def _mount_share(self, unit_name: str, perform_retry: bool = True): self._install_dependencies(unit_name) self._install_keyring(unit_name) - ssh_cmd = ( - 'sudo mkdir -p {0} && ' - 'sudo ceph-fuse {0}'.format(self.mount_dir) - ) - if retry: - for attempt in Retrying( - stop=stop_after_attempt(5), - wait=wait_exponential(multiplier=3, - min=2, max=10)): - with attempt: - zaza.utilities.generic.run_via_ssh( - unit_name=unit_name, - cmd=ssh_cmd) + cmd = 'sudo mkdir -p {0} && sudo ceph-fuse {0}'.format( + self.mount_dir) + + if perform_retry: + @retry( + stop=stop_after_attempt(5), + wait=wait_exponential(multiplier=3, min=2, max=10), + retry=retry_if_result(lambda res: res.get('Code') != '0') + ) + def _do_mount(): + logging.info(f"Mounting CephFS on {unit_name}") + res = model.run_on_unit(unit_name, cmd) + logging.info(f"Mount result: {res}") + return res + + _do_mount() else: - zaza.utilities.generic.run_via_ssh( - unit_name=unit_name, - cmd=ssh_cmd) + model.run_on_unit(unit_name, cmd) + self.mounts_share = True def _install_keyring(self, unit_name: str): From 0a4f34c9384c452b08de7cd58fcd5178fbce0252 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 22 Jan 2025 19:05:56 -0300 Subject: [PATCH 2655/2699] Add git-related files --- .github/workflows/build-and-test.yml | 152 +++++++++++++++++++++++++++ .github/workflows/pr.yaml | 8 ++ .gitignore | 12 +++ 3 files changed, 172 insertions(+) create mode 100644 .github/workflows/build-and-test.yml create mode 100644 .github/workflows/pr.yaml create mode 100644 .gitignore diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml new file mode 100644 index 00000000..433a2c84 --- /dev/null +++ b/.github/workflows/build-and-test.yml @@ -0,0 +1,152 @@ +name: Build/Test + +on: + workflow_call: + +jobs: + modifiedparts: + runs-on: ubuntu-latest + outputs: + parts: ${{steps.changed-parts.outputs.parts }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Get modified files + id: changed-files + uses: tj-actions/changed-files@v35 + + - name: Set output + id: changed-parts + run: | + components=() + # Retrieve components with a 'tox.ini' file. + for file in ${{ steps.changed-files.outputs.all_changed_files }}; do + component=$(echo "$file" | cut -d "/" -f1) + if [[ -f "./$component/tox.ini" ]]; then + components="$components $component" + fi + done + + components=($components) + components=`echo "${components[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' '` + components=($components) + modified_parts=`jq --compact-output --null-input '$ARGS.positional' --args -- "${components[@]}"` + echo "Modified parts: $modified_parts" + echo "parts=$modified_parts" >> $GITHUB_OUTPUT + + build: + needs: modifiedparts + name: Build the charm + runs-on: ubuntu-latest + if: ${{ needs.modifiedparts.output.parts != '[]' }} + strategy: + matrix: + part: ${{ fromJson(needs.modifiedparts.outputs.parts) }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install dependencies + run: | + sudo apt-get -qq install libxslt-dev libxml2-dev python3-lxml tox + + - name: Run linters + run: tox -c ${{ matrix.part }} -e pep8 + + - name: Run unit tests + run: tox -c ${{ matrix.part }} -e py3 + + - name: Setup LXD + uses: canonical/setup-lxd@v0.1.1 + with: + channel: 5.21/stable + + - name: Build charm(s) + id: builder + run: | + sudo snap install charmcraft --classic + tox -c ${{ matrix.part }} -e build + + - name: Upload built charm + uses: actions/upload-artifact@v3 + with: + name: charms + path: "./${{ matrix.part }}/*.charm" + + functional-test: + needs: + - modifiedparts + - build + name: Functional tests + runs-on: self-hosted + if: ${{ needs.modifiedparts.output.parts != '[]' }} + strategy: + matrix: + part: ${{ fromJson(needs.modifiedparts.outputs.parts) }} + steps: + - name: Download charm + uses: actions/download-artifact@v3 + with: + name: charms + path: ~/artifacts/ + + - name: Checkout code + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Setup LXD + run: | + if [[ "$(snap list | grep -c lxd)" -eq 0 ]]; then + sudo snap install lxd --channel=5.21/stable + sudo usermod -aG lxd "$USER" + newgrp lxd + lxd init --minimal + fi + + - name: Install dependencies + run: | + sudo apt -y install tox + if [ ! -d "$HOME/.local/share/juju" ]; then + sudo snap install juju --channel=3.4/stable + mkdir -p ~/.local/share/juju + juju bootstrap localhost localhost + fi + + sudo snap install --classic juju-crashdump + + - name: Run the tests + run: | + date + mv ~/artifacts/*.charm ./ + if [[ -f "./${{ matrix.part }}/src/tox.ini" ]]; then + tox -c ${{ matrix.part }}/src -e func-target -- noble-caracal + else + tox -c ${{ matrix.part}} -e func-target -- noble-caracal + fi + + - name: Generate crash dumps + if: failure() + run: | + models=$(juju models | grep zaza | awk '{print $1}' | tr -d '*') + rm -rf ./crashdumps + mkdir ./crashdumps + for model in $models; do + juju-crashdump -m $model -o ./crashdumps + done + + - name: Upload artifacts on failure + uses: actions/upload-artifact@v3 + with: + name: crashdumps + path: "./crashdumps/*" + if: failure() + + - name: Tear down models + if: always() + run: | + models=$(juju models | grep zaza | awk '{print $1}' | tr -d '*') + for model in $models; do + juju destroy-model --no-prompt --force --destroy-storage $model + done diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml new file mode 100644 index 00000000..6e045d55 --- /dev/null +++ b/.github/workflows/pr.yaml @@ -0,0 +1,8 @@ +name: PR + +on: + pull_request: + +jobs: + build: + uses: ./.github/workflows/build-and-test.yml diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..901e8bd5 --- /dev/null +++ b/.gitignore @@ -0,0 +1,12 @@ +bin +.idea +.coverage +.testrepository +.tox +*.sw[nop] +*.charm +.idea +*.pyc +func-results.json +.stestr +__pycache__ From d25b41b2e13a76bf3151d82489782b31f8a2b243 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Tue, 4 Feb 2025 09:12:57 +0100 Subject: [PATCH 2656/2699] Workflow: modernize up/download action, fix empty matrix The matrix strategy doesn't work with an empty parts field, skipping in that case. Also add debugging Signed-off-by: Peter Sabaini --- .github/workflows/build-and-test.yml | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 433a2c84..4c2ce345 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -39,11 +39,12 @@ jobs: needs: modifiedparts name: Build the charm runs-on: ubuntu-latest - if: ${{ needs.modifiedparts.output.parts != '[]' }} + if: ${{ needs.modifiedparts.outputs.parts != '[]' }} strategy: matrix: part: ${{ fromJson(needs.modifiedparts.outputs.parts) }} steps: + - name: Checkout uses: actions/checkout@v4 @@ -69,9 +70,9 @@ jobs: tox -c ${{ matrix.part }} -e build - name: Upload built charm - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: charms + name: charm-${{ matrix.part }} path: "./${{ matrix.part }}/*.charm" functional-test: @@ -79,16 +80,16 @@ jobs: - modifiedparts - build name: Functional tests - runs-on: self-hosted + runs-on: [self-hosted, linux, amd64, X64, large, noble] if: ${{ needs.modifiedparts.output.parts != '[]' }} strategy: matrix: part: ${{ fromJson(needs.modifiedparts.outputs.parts) }} steps: - name: Download charm - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: - name: charms + name: charm-${{ matrix.part }} path: ~/artifacts/ - name: Checkout code @@ -109,7 +110,7 @@ jobs: run: | sudo apt -y install tox if [ ! -d "$HOME/.local/share/juju" ]; then - sudo snap install juju --channel=3.4/stable + sudo snap install juju --channel=3.6/stable mkdir -p ~/.local/share/juju juju bootstrap localhost localhost fi @@ -137,12 +138,16 @@ jobs: done - name: Upload artifacts on failure - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: crashdumps + name: crashdumps-${{ matrix.part }} path: "./crashdumps/*" if: failure() + - name: Setup tmate session + if: ${{ failure() && runner.debug }} + uses: canonical/action-tmate@main + - name: Tear down models if: always() run: | From acaa30049d92966c87a2afa5d22c0ad05e242658 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Tue, 4 Feb 2025 21:09:44 +0100 Subject: [PATCH 2657/2699] CI: clear out docker iptables rules (#8) Signed-off-by: Peter Sabaini --- .github/workflows/build-and-test.yml | 6 ++++++ tests/scripts/actionutils.sh | 15 +++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 tests/scripts/actionutils.sh diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 4c2ce345..4c75077e 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -97,6 +97,12 @@ jobs: with: fetch-depth: 0 + - name: Copy utils + run: cp tests/scripts/actionutils.sh $HOME + + - name: Clear FORWARD firewall rules + run: ~/actionutils.sh cleaript + - name: Setup LXD run: | if [[ "$(snap list | grep -c lxd)" -eq 0 ]]; then diff --git a/tests/scripts/actionutils.sh b/tests/scripts/actionutils.sh new file mode 100644 index 00000000..dd3976be --- /dev/null +++ b/tests/scripts/actionutils.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +function cleaript() { + # Docker can inject rules causing firewall conflicts + sudo iptables -P FORWARD ACCEPT || true + sudo ip6tables -P FORWARD ACCEPT || true + sudo iptables -F FORWARD || true + sudo ip6tables -F FORWARD || true + +} + +run="${1}" +shift + +$run "$@" From 98298c18876ec07f31fe047927ab7c3c82b1442f Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 31 Jan 2025 19:30:45 +0100 Subject: [PATCH 2658/2699] ceph-proxy: remove ntp systemd-timesyncd should be sufficient Signed-off-by: Peter Sabaini --- ceph-proxy/hooks/ceph.py | 4 ++-- tests/scripts/actionutils.sh | 0 2 files changed, 2 insertions(+), 2 deletions(-) mode change 100644 => 100755 tests/scripts/actionutils.sh diff --git a/ceph-proxy/hooks/ceph.py b/ceph-proxy/hooks/ceph.py index 7e57155f..04485faf 100644 --- a/ceph-proxy/hooks/ceph.py +++ b/ceph-proxy/hooks/ceph.py @@ -46,8 +46,8 @@ PEON = 'peon' QUORUM = [LEADER, PEON] -PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs'] -PACKAGES_FOCAL = ['ceph', 'gdisk', 'ntp', 'btrfs-progs', 'xfsprogs'] +PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', 'xfsprogs'] +PACKAGES_FOCAL = ['ceph', 'gdisk', 'btrfs-progs', 'xfsprogs'] def ceph_user(): diff --git a/tests/scripts/actionutils.sh b/tests/scripts/actionutils.sh old mode 100644 new mode 100755 From d02474d8b19546b676e97227abb7287350e8cada Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Wed, 5 Feb 2025 15:31:07 +0100 Subject: [PATCH 2659/2699] Workflow: add manual trigger Signed-off-by: Peter Sabaini --- .github/workflows/build-and-test.yml | 64 +++++++++++++++++++--------- 1 file changed, 44 insertions(+), 20 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 4c75077e..8c5f5308 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -2,38 +2,63 @@ name: Build/Test on: workflow_call: + workflow_dispatch: + inputs: + part: + description: 'Name of the charm to build/test manually. Defaults to all charms' + required: false + default: '' jobs: modifiedparts: runs-on: ubuntu-latest outputs: - parts: ${{steps.changed-parts.outputs.parts }} + parts: ${{ steps.determine-parts.outputs.parts }} steps: - name: Checkout repository uses: actions/checkout@v4 + # For non-manual triggered runs - name: Get modified files id: changed-files + if: ${{ github.event_name != 'workflow_dispatch' }} uses: tj-actions/changed-files@v35 - - name: Set output - id: changed-parts + - name: Determine charms to build/test + id: determine-parts + env: + INPUT_PART: ${{ inputs.part }} + GITHUB_EVENT_NAME: ${{ github.event_name }} run: | - components=() - # Retrieve components with a 'tox.ini' file. - for file in ${{ steps.changed-files.outputs.all_changed_files }}; do - component=$(echo "$file" | cut -d "/" -f1) - if [[ -f "./$component/tox.ini" ]]; then - components="$components $component" + if [ "$GITHUB_EVENT_NAME" = "workflow_dispatch" ]; then + if [ -n "$INPUT_PART" ]; then + # Manual run with a specified charm + components=($INPUT_PART) + else + # Manual run, no charm specified -> run all + components=($(find . -maxdepth 1 -type d ! -path '.' -exec bash -c '[[ -f "$0/charmcraft.yaml" ]] && basename "$0"' {} \; | sort)) fi - done - - components=($components) - components=`echo "${components[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' '` - components=($components) - modified_parts=`jq --compact-output --null-input '$ARGS.positional' --args -- "${components[@]}"` - echo "Modified parts: $modified_parts" - echo "parts=$modified_parts" >> $GITHUB_OUTPUT + else + # Automatic run: use changed-files to determine modified charms + components=() + # Retrieve components with a 'tox.ini' file. + for file in ${{ steps.changed-files.outputs.all_changed_files }}; do + component=$(echo "$file" | cut -d "/" -f1) + if [[ -f "./$component/charmcraft.yaml" ]]; then + # This is a charm. + components+=("$component") + elif [[ -f "./$component/tox.ini" ]]; then + # Assume this is a library. + # TODO: Add dependent charms here. + : + fi + done + # Remove dups + components=($(echo "${components[@]}" | tr ' ' '\n' | sort -u)) + fi + json_output=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${components[@]}") + echo "Modified parts: $json_output" + echo "parts=$json_output" >> $GITHUB_OUTPUT build: needs: modifiedparts @@ -44,7 +69,6 @@ jobs: matrix: part: ${{ fromJson(needs.modifiedparts.outputs.parts) }} steps: - - name: Checkout uses: actions/checkout@v4 @@ -81,7 +105,7 @@ jobs: - build name: Functional tests runs-on: [self-hosted, linux, amd64, X64, large, noble] - if: ${{ needs.modifiedparts.output.parts != '[]' }} + if: ${{ needs.modifiedparts.outputs.parts != '[]' }} strategy: matrix: part: ${{ fromJson(needs.modifiedparts.outputs.parts) }} @@ -130,7 +154,7 @@ jobs: if [[ -f "./${{ matrix.part }}/src/tox.ini" ]]; then tox -c ${{ matrix.part }}/src -e func-target -- noble-caracal else - tox -c ${{ matrix.part}} -e func-target -- noble-caracal + tox -c ${{ matrix.part }} -e func-target -- noble-caracal fi - name: Generate crash dumps From 3dceac361cd0882b6acbad1aa530a3efa5ae3c93 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 6 Feb 2025 18:45:55 +0100 Subject: [PATCH 2660/2699] ceph-nfs: remove overlay Not needed anymore Signed-off-by: Peter Sabaini --- ceph-nfs/tests/bundles/overlays/local-charm-overlay.yaml.j2 | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 ceph-nfs/tests/bundles/overlays/local-charm-overlay.yaml.j2 diff --git a/ceph-nfs/tests/bundles/overlays/local-charm-overlay.yaml.j2 b/ceph-nfs/tests/bundles/overlays/local-charm-overlay.yaml.j2 deleted file mode 100644 index a8bed22b..00000000 --- a/ceph-nfs/tests/bundles/overlays/local-charm-overlay.yaml.j2 +++ /dev/null @@ -1,4 +0,0 @@ -applications: - ceph-nfs: - options: - vip: '{{ TEST_VIP00 }}' From 4a12a251657bef253ab8933ba5cfc13a613b935f Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 6 Feb 2025 19:14:42 +0100 Subject: [PATCH 2661/2699] Workflow: check deleted files as well Also add some debugging Signed-off-by: Peter Sabaini --- .github/workflows/build-and-test.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 8c5f5308..28f04a9d 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -29,6 +29,7 @@ jobs: env: INPUT_PART: ${{ inputs.part }} GITHUB_EVENT_NAME: ${{ github.event_name }} + ALL_MOD_FILES: ${{ steps.changed-files.outputs.all_modified_files }} run: | if [ "$GITHUB_EVENT_NAME" = "workflow_dispatch" ]; then if [ -n "$INPUT_PART" ]; then @@ -40,9 +41,10 @@ jobs: fi else # Automatic run: use changed-files to determine modified charms + echo "Modified files to eval: ${ALL_MOD_FILES}" components=() # Retrieve components with a 'tox.ini' file. - for file in ${{ steps.changed-files.outputs.all_changed_files }}; do + for file in ${ALL_MOD_FILES} ; do component=$(echo "$file" | cut -d "/" -f1) if [[ -f "./$component/charmcraft.yaml" ]]; then # This is a charm. From f0226c0f23ce57c56ae38aa4172beb92b4177f02 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 12 Feb 2025 13:40:55 -0300 Subject: [PATCH 2662/2699] Misc. improvements to the github workflow This PR makes it so that: - A single failure in a test run doesn't automatically cancel all the other test runs. - The upload task has been modified so that the name is equal for all jobs. This was necessary because otherwise the charms would only download their own built files. - Before deploying via juju for the functional testing, the images for both containers and VM's are cached. --- .github/workflows/build-and-test.yml | 7 +++++-- tests/scripts/actionutils.sh | 10 ++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 28f04a9d..37f34741 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -70,6 +70,7 @@ jobs: strategy: matrix: part: ${{ fromJson(needs.modifiedparts.outputs.parts) }} + fail-fast: false steps: - name: Checkout uses: actions/checkout@v4 @@ -98,7 +99,7 @@ jobs: - name: Upload built charm uses: actions/upload-artifact@v4 with: - name: charm-${{ matrix.part }} + name: charms path: "./${{ matrix.part }}/*.charm" functional-test: @@ -111,11 +112,12 @@ jobs: strategy: matrix: part: ${{ fromJson(needs.modifiedparts.outputs.parts) }} + fail-fast: false steps: - name: Download charm uses: actions/download-artifact@v4 with: - name: charm-${{ matrix.part }} + name: charms path: ~/artifacts/ - name: Checkout code @@ -152,6 +154,7 @@ jobs: - name: Run the tests run: | date + ~/actionutils.sh cacheimgs "ubuntu:24.04" mv ~/artifacts/*.charm ./ if [[ -f "./${{ matrix.part }}/src/tox.ini" ]]; then tox -c ${{ matrix.part }}/src -e func-target -- noble-caracal diff --git a/tests/scripts/actionutils.sh b/tests/scripts/actionutils.sh index dd3976be..7885558e 100755 --- a/tests/scripts/actionutils.sh +++ b/tests/scripts/actionutils.sh @@ -9,6 +9,16 @@ function cleaript() { } +function cacheimgs() { + lxc launch $1 ctemp 2>&1 >/dev/null + lxc launch $1 vmtemp --vm -c limits.cpu=2 -c limits.memory=4GiB -d root,size=25GiB 2>&1 >/dev/null + lxc stop ctemp + lxc delete ctemp + sleep 60 + lxc stop vmtemp + lxc delete vmtemp +} + run="${1}" shift From e0ef5e7984ee68da7a4363a5fadfc4b7d4259dd5 Mon Sep 17 00:00:00 2001 From: Luciano Lo Giudice Date: Wed, 12 Feb 2025 14:41:12 -0300 Subject: [PATCH 2663/2699] Update build-and-test.yml Fix artifact uploading. --- .github/workflows/build-and-test.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 37f34741..d5abd609 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -99,7 +99,7 @@ jobs: - name: Upload built charm uses: actions/upload-artifact@v4 with: - name: charms + name: charm-artifact-${{ matrix.part }} path: "./${{ matrix.part }}/*.charm" functional-test: @@ -117,7 +117,9 @@ jobs: - name: Download charm uses: actions/download-artifact@v4 with: - name: charms + name: charm-artifact-${{ matrix.part }} + pattern: charm-artifact-* + merge-multiple: true path: ~/artifacts/ - name: Checkout code From 0dec56711274a24d34a3e0144e2359c2f2479b7e Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 13 Feb 2025 12:22:29 +0100 Subject: [PATCH 2664/2699] Add global test constraints Add global test constraints with boto3 restriction Signed-off-by: Peter Sabaini --- ceph-radosgw/tox.ini | 1 + constraints/test-constraints.txt | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 constraints/test-constraints.txt diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 9e509733..aa415332 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -35,6 +35,7 @@ passenv = TEST_* deps = -c {env:TEST_CONSTRAINTS_FILE:https://raw.githubusercontent.com/openstack-charmers/zaza-openstack-tests/master/constraints/constraints-2024.1.txt} + -c {toxinidir}/../constraints/test-constraints.txt -r{toxinidir}/test-requirements.txt [testenv:build] diff --git a/constraints/test-constraints.txt b/constraints/test-constraints.txt new file mode 100644 index 00000000..31e245a8 --- /dev/null +++ b/constraints/test-constraints.txt @@ -0,0 +1,2 @@ +# https://github.com/boto/boto3/issues/4392 +boto3<1.36.0 From ef65d33eebfcda96762b39a32c1adbb942f1d710 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 13 Feb 2025 18:05:32 +0100 Subject: [PATCH 2665/2699] Testing: pre-heat juju machine alloc Trying to make spawning more robust by first creating two dummy machines Also cf. https://github.com/juju/juju/issues/18900 Signed-off-by: Peter Sabaini --- .github/workflows/build-and-test.yml | 2 +- tests/scripts/actionutils.sh | 16 +++++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index d5abd609..ead1ec15 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -156,7 +156,7 @@ jobs: - name: Run the tests run: | date - ~/actionutils.sh cacheimgs "ubuntu:24.04" + ~/actionutils.sh cacheimgs "ubuntu@24.04" mv ~/artifacts/*.charm ./ if [[ -f "./${{ matrix.part }}/src/tox.ini" ]]; then tox -c ${{ matrix.part }}/src -e func-target -- noble-caracal diff --git a/tests/scripts/actionutils.sh b/tests/scripts/actionutils.sh index 7885558e..16a55154 100755 --- a/tests/scripts/actionutils.sh +++ b/tests/scripts/actionutils.sh @@ -10,13 +10,15 @@ function cleaript() { } function cacheimgs() { - lxc launch $1 ctemp 2>&1 >/dev/null - lxc launch $1 vmtemp --vm -c limits.cpu=2 -c limits.memory=4GiB -d root,size=25GiB 2>&1 >/dev/null - lxc stop ctemp - lxc delete ctemp - sleep 60 - lxc stop vmtemp - lxc delete vmtemp + local base="${1?missing}" + juju add-machine --base "$base" + sleep 10 + juju add-machine --base "$base" --constraints "virt-type=virtual-machine" + while [ "$(juju machines | egrep -wc 'started')" -ne 2 ]; do + sleep 2 + done + juju machines | awk '/started/{ print $1 }' | while read n; do juju remove-machine --force --no-prompt $n ; done + sleep 5 } run="${1}" From b12129d35e7bf34190af9616bdcafd5c176ab03b Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 13 Feb 2025 21:21:41 +0100 Subject: [PATCH 2666/2699] Tests: add juju model configs Signed-off-by: Peter Sabaini --- .github/workflows/build-and-test.yml | 14 ++++---------- tests/configs/model-defaults.yaml | 4 ++++ tests/scripts/actionutils.sh | 14 ++++++++++++++ 3 files changed, 22 insertions(+), 10 deletions(-) create mode 100644 tests/configs/model-defaults.yaml diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index ead1ec15..9246d30d 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -142,21 +142,15 @@ jobs: lxd init --minimal fi - - name: Install dependencies + - name: Install and configure tests run: | - sudo apt -y install tox - if [ ! -d "$HOME/.local/share/juju" ]; then - sudo snap install juju --channel=3.6/stable - mkdir -p ~/.local/share/juju - juju bootstrap localhost localhost - fi - - sudo snap install --classic juju-crashdump + date + ~/actionutils.sh setup_functest + ~/actionutils.sh cacheimgs "ubuntu@24.04" - name: Run the tests run: | date - ~/actionutils.sh cacheimgs "ubuntu@24.04" mv ~/artifacts/*.charm ./ if [[ -f "./${{ matrix.part }}/src/tox.ini" ]]; then tox -c ${{ matrix.part }}/src -e func-target -- noble-caracal diff --git a/tests/configs/model-defaults.yaml b/tests/configs/model-defaults.yaml new file mode 100644 index 00000000..25a325c9 --- /dev/null +++ b/tests/configs/model-defaults.yaml @@ -0,0 +1,4 @@ +test-mode: true +automatically-retry-hooks: true +logging-config: "=DEBUG" +enable-os-upgrade: false diff --git a/tests/scripts/actionutils.sh b/tests/scripts/actionutils.sh index 16a55154..71fd6410 100755 --- a/tests/scripts/actionutils.sh +++ b/tests/scripts/actionutils.sh @@ -21,6 +21,20 @@ function cacheimgs() { sleep 5 } +function setup_functest() { + sudo apt -y install tox + if [ ! -d "$HOME/.local/share/juju" ]; then + sudo snap install juju --channel=3.6/stable + mkdir -p ~/.local/share/juju + juju bootstrap \ + --auto-upgrade=false \ + --model-default=tests/configs/model-defaults.yaml \ + localhost localhost + fi + sudo snap install --classic juju-crashdump + +} + run="${1}" shift From c02a6bd80ccb37b1c452751c24d417f9233e036c Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 14 Feb 2025 14:05:22 +0100 Subject: [PATCH 2667/2699] Tests: add zaza conf, normalize test bundles (#27) Signed-off-by: Peter Sabaini --- .../tests/bundles/jammy-caracal.yaml | 1 + .../bundles/jammy-caracal-multisite.yaml | 6 +- ceph-radosgw/tests/bundles/noble-caracal.yaml | 61 +++++++++++++++++++ tests/configs/dot.zaza.yaml | 6 ++ tests/scripts/actionutils.sh | 2 +- 5 files changed, 73 insertions(+), 3 deletions(-) create mode 100644 ceph-radosgw/tests/bundles/noble-caracal.yaml create mode 100644 tests/configs/dot.zaza.yaml diff --git a/ceph-dashboard/tests/bundles/jammy-caracal.yaml b/ceph-dashboard/tests/bundles/jammy-caracal.yaml index f9a9f93c..1920ac4b 100644 --- a/ceph-dashboard/tests/bundles/jammy-caracal.yaml +++ b/ceph-dashboard/tests/bundles/jammy-caracal.yaml @@ -49,6 +49,7 @@ applications: channel: latest/edge ceph-dashboard: charm: ch:ceph-dashboard + channel: latest/edge options: public-hostname: 'ceph-dashboard.zaza.local' prometheus: diff --git a/ceph-radosgw/tests/bundles/jammy-caracal-multisite.yaml b/ceph-radosgw/tests/bundles/jammy-caracal-multisite.yaml index 116737b5..4a5c3cb5 100644 --- a/ceph-radosgw/tests/bundles/jammy-caracal-multisite.yaml +++ b/ceph-radosgw/tests/bundles/jammy-caracal-multisite.yaml @@ -19,7 +19,8 @@ machines: applications: ceph-radosgw: - charm: ../../ceph-radosgw.charm + charm: ch:ceph-radosgw + channel: latest/edge num_units: 1 options: source: *source @@ -27,7 +28,8 @@ applications: - '0' secondary-ceph-radosgw: - charm: ../../ceph-radosgw.charm + charm: ch:ceph-radosgw + channel: latest/edge num_units: 1 options: source: *source diff --git a/ceph-radosgw/tests/bundles/noble-caracal.yaml b/ceph-radosgw/tests/bundles/noble-caracal.yaml new file mode 100644 index 00000000..de1cbf2d --- /dev/null +++ b/ceph-radosgw/tests/bundles/noble-caracal.yaml @@ -0,0 +1,61 @@ +options: + source: &source distro + +series: noble + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + '1': + constraints: cores=2 mem=6G root-disk=40G virt-type=virtual-machine + '2': + constraints: cores=2 mem=6G root-disk=40G virt-type=virtual-machine + '3': + constraints: cores=2 mem=6G root-disk=40G virt-type=virtual-machine + '4': + '5': + '6': + +applications: + ceph-radosgw: + charm: ch:ceph-radosgw + channel: latest/edge + num_units: 1 + options: + source: *source + to: + - '0' + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: 'loop,10G' + options: + source: *source + osd-devices: '/srv/ceph /dev/test-non-existent' + to: + - '1' + - '2' + - '3' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: 3 + source: *source + to: + - '4' + - '5' + - '6' + channel: latest/edge + +relations: + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + + - - 'ceph-radosgw:mon' + - 'ceph-mon:radosgw' diff --git a/tests/configs/dot.zaza.yaml b/tests/configs/dot.zaza.yaml new file mode 100644 index 00000000..4f0579f7 --- /dev/null +++ b/tests/configs/dot.zaza.yaml @@ -0,0 +1,6 @@ +--- +model_settings: + logging-config: "=INFO;unit=DEBUG" + +runtime_config: + TEST_MAX_RESOLVE_COUNT: 5 diff --git a/tests/scripts/actionutils.sh b/tests/scripts/actionutils.sh index 71fd6410..0867f7c8 100755 --- a/tests/scripts/actionutils.sh +++ b/tests/scripts/actionutils.sh @@ -32,7 +32,7 @@ function setup_functest() { localhost localhost fi sudo snap install --classic juju-crashdump - + cp tests/configs/dot.zaza.yaml ~/.zaza.yaml } run="${1}" From de05519e48dc4528f234159ca1cbdd2576e203c9 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 20 Feb 2025 11:13:34 +0100 Subject: [PATCH 2668/2699] Migration: test bundles Signed-off-by: Peter Sabaini --- ceph-dashboard/tests/tests.yaml | 7 +- ceph-dashboard/tox.ini | 2 +- ceph-fs/src/tests/bundles/jammy-caracal.yaml | 33 ++- ceph-iscsi/tests/bundles/jammy-caracal.yaml | 3 +- ceph-mon/tests/bundles/jammy-caracal.yaml | 242 +----------------- ceph-nfs/tests/bundles/jammy-caracal.yaml | 72 ++++++ ceph-osd/tests/bundles/jammy-caracal.yaml | 211 +-------------- ceph-proxy/tests/bundles/jammy-caracal.yaml | 171 +------------ .../src/tests/bundles/jammy-caracal.yaml | 148 +++-------- 9 files changed, 178 insertions(+), 711 deletions(-) create mode 100644 ceph-nfs/tests/bundles/jammy-caracal.yaml diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml index af876141..56237b66 100644 --- a/ceph-dashboard/tests/tests.yaml +++ b/ceph-dashboard/tests/tests.yaml @@ -7,12 +7,10 @@ dev_bundles: - jammy-caracal configure: - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation - - tests.target.setup.check_dashboard_cert - - tests.target.setup.set_grafana_url + - tests.target.check_dashboard_cert tests: - zaza.charm_tests.lifecycle.refresh.CharmRefreshAll - tests.target.CephDashboardTest - - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest target_deploy_status: ceph-dashboard: workload-status: blocked @@ -20,9 +18,6 @@ target_deploy_status: vault: workload-status: blocked workload-status-message-prefix: Vault needs to be initialized - grafana: - workload-status: active - workload-status-message-prefix: Started prometheus2: workload-status: active workload-status-message-prefix: Ready diff --git a/ceph-dashboard/tox.ini b/ceph-dashboard/tox.ini index 3428801d..6c690f69 100644 --- a/ceph-dashboard/tox.ini +++ b/ceph-dashboard/tox.ini @@ -38,7 +38,7 @@ allowlist_externals = add-to-archive.py bash charmcraft - rename.sh + {toxinidir}/rename.sh ls pwd passenv = diff --git a/ceph-fs/src/tests/bundles/jammy-caracal.yaml b/ceph-fs/src/tests/bundles/jammy-caracal.yaml index 82ad470b..a1bbd92c 100644 --- a/ceph-fs/src/tests/bundles/jammy-caracal.yaml +++ b/ceph-fs/src/tests/bundles/jammy-caracal.yaml @@ -1,32 +1,45 @@ variables: openstack-origin: &openstack-origin cloud:jammy-caracal -local_overlay_enabled: False - series: &series jammy +machines: + '0': + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine + '1': + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine + '2': + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine + '3': + '4': + '5': + applications: - ubuntu: # used to test mounts - charm: ch:ubuntu - num_units: 2 ceph-fs: - charm: ../../../ceph-fs.charm + charm: ch:ceph-fs + channel: latest/edge num_units: 1 options: source: *openstack-origin pool-type: erasure-coded ec-profile-k: 4 ec-profile-m: 2 + to: + - '2' ceph-osd: charm: ch:ceph-osd - num_units: 6 + num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: 'loop,10G' options: osd-devices: '/dev/test-non-existent' source: *openstack-origin channel: latest/edge + to: + - '0' + - '1' + - '2' ceph-mon: charm: ch:ceph-mon @@ -35,6 +48,10 @@ applications: monitor-count: '3' source: *openstack-origin channel: latest/edge + to: + - '3' + - '4' + - '5' relations: - - 'ceph-mon:mds' diff --git a/ceph-iscsi/tests/bundles/jammy-caracal.yaml b/ceph-iscsi/tests/bundles/jammy-caracal.yaml index a4dd5869..01dc7763 100644 --- a/ceph-iscsi/tests/bundles/jammy-caracal.yaml +++ b/ceph-iscsi/tests/bundles/jammy-caracal.yaml @@ -34,7 +34,8 @@ applications: - '14' - '15' ceph-iscsi: - charm: ../../ceph-iscsi.charm + charm: ch:ceph-iscsi + channel: latest/edge num_units: 4 options: gateway-metadata-pool: iscsi-foo-metadata diff --git a/ceph-mon/tests/bundles/jammy-caracal.yaml b/ceph-mon/tests/bundles/jammy-caracal.yaml index e6e587c0..4209121d 100644 --- a/ceph-mon/tests/bundles/jammy-caracal.yaml +++ b/ceph-mon/tests/bundles/jammy-caracal.yaml @@ -1,261 +1,45 @@ variables: openstack-origin: &openstack-origin cloud:jammy-caracal -local_overlay_enabled: False +series: &series jammy -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' machines: '0': - constraints: mem=3072M '1': - constraints: mem=3072M '2': - constraints: mem=3072M '3': + constraints: cores=2 mem=6G root-disk=40G virt-type=virtual-machine '4': + constraints: cores=2 mem=6G root-disk=40G virt-type=virtual-machine '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - + constraints: cores=2 mem=6G root-disk=40G virt-type=virtual-machine applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - glance-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - placement-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster + ceph-mon: + charm: ch:ceph-mon + channel: latest/edge num_units: 3 + options: + monitor-count: 3 + source: *openstack-origin to: - '0' - '1' - '2' - channel: 8.0/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - to: - - '9' - channel: 3.9/edge ceph-osd: charm: ch:ceph-osd num_units: 3 - storage: - osd-devices: '10G' + channel: latest/edge options: source: *openstack-origin - osd-devices: '/dev/test-non-existent' + storage: + osd-devices: 'loop,10G' to: - '3' - '4' - '5' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - channel: latest/edge - num_units: 3 - options: - source: *openstack-origin - monitor-count: '3' - to: - - '6' - - '7' - - '8' - - ceph-fs: - charm: ch:ceph-fs - num_units: 1 - options: - source: *openstack-origin - channel: latest/edge - to: - - '17' - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - channel: latest/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - libvirt-image-backend: rbd - to: - - '11' - channel: latest/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - channel: latest/edge - - cinder: - expose: True - charm: ch:cinder - num_units: 1 - options: - block-device: 'None' - glance-api-version: '2' - openstack-origin: *openstack-origin - to: - - '13' - channel: latest/edge - - cinder-ceph: - charm: ch:cinder-ceph - channel: latest/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - channel: latest/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: latest/edge - - prometheus2: - charm: ch:prometheus2 - num_units: 1 - to: - - '16' relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - nova-compute:ceph-access - - cinder-ceph:ceph-access - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - 'ceph-osd:mon' - 'ceph-mon:osd' - - - - 'ceph-mon:mds' - - 'ceph-fs:ceph-mds' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'ceph-mon:prometheus' - - 'prometheus2:target' diff --git a/ceph-nfs/tests/bundles/jammy-caracal.yaml b/ceph-nfs/tests/bundles/jammy-caracal.yaml new file mode 100644 index 00000000..b4185ea1 --- /dev/null +++ b/ceph-nfs/tests/bundles/jammy-caracal.yaml @@ -0,0 +1,72 @@ +options: + source: &source cloud:jammy-caracal + +machines: + '0': + constraints: cores=2 mem=6G root-disk=40G virt-type=virtual-machine + '1': + constraints: cores=2 mem=6G root-disk=40G virt-type=virtual-machine + '2': + constraints: cores=2 mem=6G root-disk=40G virt-type=virtual-machine + '3': + '4': + '5': + +local_overlay_enabled: False +series: noble +applications: + ceph-nfs: + charm: ch:ceph-nfs + channel: latest/edge + num_units: 2 + options: + source: *source + to: + - '3' + - '4' + ceph-osd: + charm: ch:ceph-osd + channel: latest/edge + num_units: 3 + storage: + osd-devices: 'loop,10G' + options: + source: *source + to: + - '0' + - '1' + - '2' + ceph-mon: + charm: ch:ceph-mon + channel: latest/edge + num_units: 3 + options: + monitor-count: '3' + source: *source + to: + - '3' + - '4' + - '5' + ceph-fs: + charm: ch:ceph-fs + channel: latest/edge + num_units: 1 + options: + source: *source + to: + - '2' + hacluster: + charm: ch:hacluster + channel: 2.4/edge + options: + cluster_count: 2 + +relations: + - - 'ceph-mon:client' + - 'ceph-nfs:ceph-client' + - - 'ceph-osd:mon' + - 'ceph-mon:osd' + - - 'ceph-fs' + - 'ceph-mon' + - - 'ceph-nfs:ha' + - 'hacluster:ha' diff --git a/ceph-osd/tests/bundles/jammy-caracal.yaml b/ceph-osd/tests/bundles/jammy-caracal.yaml index b9fbbeaf..5d34f4cd 100644 --- a/ceph-osd/tests/bundles/jammy-caracal.yaml +++ b/ceph-osd/tests/bundles/jammy-caracal.yaml @@ -1,235 +1,44 @@ variables: openstack-origin: &openstack-origin cloud:jammy-caracal -series: jammy +series: &series jammy -comment: -- 'machines section to decide order of deployment. database sooner = faster' machines: '0': - constraints: mem=3072M '1': - constraints: mem=3072M '2': - constraints: mem=3072M '3': + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine '4': + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - glance-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - nova-cloud-controller-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - placement-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster + ceph-mon: + charm: ch:ceph-mon + channel: latest/edge num_units: 3 + options: + monitor-count: 3 to: - '0' - '1' - '2' - channel: 8.0/edge ceph-osd: charm: ch:ceph-osd channel: latest/edge num_units: 3 storage: - osd-devices: 'cinder,10G,2' + osd-devices: 'loop,5G,2' options: - osd-devices: '/dev/test-non-existent' source: *openstack-origin - aa-profile-mode: enforce to: - '3' - '4' - '5' - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *openstack-origin - to: - - '6' - - '7' - - '8' - channel: latest/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - to: - - '9' - channel: 3.9/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '10' - channel: latest/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '11' - channel: latest/edge - - glance: - expose: True - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '12' - channel: latest/edge - - cinder: - expose: True - charm: ch:cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: 'None' - glance-api-version: '2' - to: - - '13' - channel: latest/edge - - cinder-ceph: - charm: ch:cinder-ceph - channel: latest/edge - - nova-cloud-controller: - expose: True - charm: ch:nova-cloud-controller - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - channel: latest/edge - - placement: - charm: ch:placement - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: latest/edge - relations: - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:image-service' - - 'glance:image-service' - - - - 'nova-compute:ceph' - - 'ceph-mon:client' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:amqp' - - 'rabbitmq-server:amqp' - - - - 'glance:ceph' - - 'ceph-mon:client' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:image-service' - - 'glance:image-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-mon:client' - - - 'ceph-osd:mon' - 'ceph-mon:osd' - - - - 'nova-cloud-controller:shared-db' - - 'nova-cloud-controller-mysql-router:shared-db' - - - 'nova-cloud-controller-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-cloud-controller:identity-service' - - 'keystone:identity-service' - - - - 'nova-cloud-controller:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-cloud-controller:cloud-compute' - - 'nova-compute:cloud-compute' - - - - 'nova-cloud-controller:image-service' - - 'glance:image-service' - - - - 'placement:shared-db' - - 'placement-mysql-router:shared-db' - - - 'placement-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'placement' - - 'keystone' - - - - 'placement' - - 'nova-cloud-controller' - - - - 'cinder-ceph:ceph-access' - - 'nova-compute:ceph-access' diff --git a/ceph-proxy/tests/bundles/jammy-caracal.yaml b/ceph-proxy/tests/bundles/jammy-caracal.yaml index 4f16483b..cc8b24e9 100644 --- a/ceph-proxy/tests/bundles/jammy-caracal.yaml +++ b/ceph-proxy/tests/bundles/jammy-caracal.yaml @@ -3,212 +3,67 @@ variables: series: jammy -comment: -- 'machines section to decide order of deployment. database sooner = faster' machines: '0': - constraints: mem=3072M + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine '1': - constraints: mem=3072M + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine '2': - constraints: mem=3072M + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine '3': '4': '5': '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': applications: - - cinder-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - glance-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - keystone-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster + ceph-osd: + charm: ch:ceph-osd num_units: 3 + storage: + osd-devices: 'loop,10G' to: - '0' - '1' - '2' - channel: 8.0/edge + channel: latest/edge ceph-mon: charm: ch:ceph-mon num_units: 3 options: expected-osd-count: 3 - source: *openstack-origin + channel: latest/edge to: - '3' - '4' - '5' - channel: latest/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - channel: latest/edge ceph-proxy: - charm: ../../ceph-proxy.charm + charm: ../../../ceph-proxy.charm num_units: 1 options: - source: *openstack-origin + source: distro to: - - '9' + - '6' ceph-radosgw: charm: ch:ceph-radosgw num_units: 1 - options: - source: *openstack-origin - to: - - '10' channel: latest/edge - cinder: - charm: ch:cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - channel: latest/edge - - cinder-ceph: - charm: ch:cinder-ceph - options: - restrict-ceph-pools: True - channel: latest/edge - - keystone: - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - channel: latest/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - constraints: mem=1024 - to: - - '13' - channel: 3.9/edge - - glance: - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - channel: latest/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: latest/edge - - ubuntu: # used to test mounts - charm: ch:ubuntu - num_units: 2 - to: - - '16' - - '17' - ceph-fs: charm: ch:ceph-fs channel: latest/edge num_units: 1 to: - - '18' + - '2' relations: - - - 'ceph-osd:mon' - 'ceph-mon:osd' - - 'ceph-proxy:radosgw' - 'ceph-radosgw:mon' - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - 'ceph-proxy:mds' - 'ceph-fs:ceph-mds' diff --git a/ceph-rbd-mirror/src/tests/bundles/jammy-caracal.yaml b/ceph-rbd-mirror/src/tests/bundles/jammy-caracal.yaml index 67f89686..0009b692 100644 --- a/ceph-rbd-mirror/src/tests/bundles/jammy-caracal.yaml +++ b/ceph-rbd-mirror/src/tests/bundles/jammy-caracal.yaml @@ -2,79 +2,29 @@ variables: openstack-origin: &openstack-origin cloud:jammy-caracal series: &series jammy -local_overlay_enabled: False - series: *series machines: '0': - constraints: "mem=3072M" + constraints: cores=2 mem=4G root-disk=16G virt-type=virtual-machine '1': - constraints: "mem=3072M" + constraints: cores=2 mem=4G root-disk=16G virt-type=virtual-machine '2': - constraints: "mem=3072M" + constraints: cores=2 mem=4G root-disk=16G virt-type=virtual-machine + '3': + constraints: cores=2 mem=4G root-disk=16G virt-type=virtual-machine + '4': + constraints: cores=2 mem=4G root-disk=16G virt-type=virtual-machine + '5': + constraints: cores=2 mem=4G root-disk=16G virt-type=virtual-machine + '6': + '7': + '8': + '9': + '10': + '11': applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - cinder-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '0' - - '1' - - '2' - channel: latest/edge - - keystone: - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: latest/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - channel: latest/edge - - cinder: - charm: ch:cinder - num_units: 1 - options: - block-device: None - glance-api-version: 2 - openstack-origin: *openstack-origin - channel: latest/edge - - cinder-ceph: - charm: ch:cinder-ceph - num_units: 0 - channel: latest/edge - - glance: - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: latest/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - channel: latest/edge - ceph-mon: charm: ch:ceph-mon num_units: 3 @@ -82,23 +32,34 @@ applications: expected-osd-count: 3 source: *openstack-origin channel: latest/edge + to: + - '6' + - '7' + - '8' ceph-osd: charm: ch:ceph-osd num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: 'loop,4G' options: source: *openstack-origin osd-devices: '/dev/test-non-existent' channel: latest/edge + to: + - '0' + - '1' + - '2' ceph-rbd-mirror: series: *series - charm: ../../../ceph-rbd-mirror.charm + charm: ch:ceph-rbd-mirror + channel: latest/edge num_units: 1 options: source: *openstack-origin + to: + - '0' ceph-mon-b: charm: ch:ceph-mon @@ -107,64 +68,37 @@ applications: expected-osd-count: 3 source: *openstack-origin channel: latest/edge + to: + - '9' + - '10' + - '11' ceph-osd-b: charm: ch:ceph-osd num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: 'loop,4G' options: source: *openstack-origin osd-devices: '/dev/test-non-existent' channel: latest/edge + to: + - '3' + - '4' + - '5' ceph-rbd-mirror-b: series: *series - charm: ../../../ceph-rbd-mirror.charm + charm: ch:ceph-rbd-mirror + channel: latest/edge num_units: 1 options: source: *openstack-origin + to: + - '3' relations: -- - keystone:shared-db - - keystone-mysql-router:shared-db -- - keystone-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - glance:shared-db - - glance-mysql-router:shared-db -- - glance-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - cinder:shared-db - - cinder-mysql-router:shared-db -- - cinder-mysql-router:db-router - - mysql-innodb-cluster:db-router - -- - rabbitmq-server - - cinder - -- - 'keystone:identity-service' - - cinder -- - 'keystone:identity-service' - - glance - -- - cinder - - cinder-ceph -- - cinder-ceph:ceph - - ceph-mon:client - -- - nova-compute:ceph-access - - cinder-ceph:ceph-access -- - nova-compute:amqp - - rabbitmq-server:amqp - -- - glance:image-service - - nova-compute:image-service -- - glance - - ceph-mon - - - ceph-mon:osd - ceph-osd:mon - - ceph-mon From 0b687d3a7a3bd40e7e52f5fd4e91b973f2f607cf Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 20 Feb 2025 16:00:39 +0100 Subject: [PATCH 2669/2699] Testing: fix ipv6 rendering Signed-off-by: Peter Sabaini --- ceph-dashboard/tests/target.py | 6 +++++- ceph-radosgw/tests/target.py | 2 ++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ceph-dashboard/tests/target.py b/ceph-dashboard/tests/target.py index 90e764ab..63b05daf 100644 --- a/ceph-dashboard/tests/target.py +++ b/ceph-dashboard/tests/target.py @@ -26,6 +26,7 @@ import zaza import zaza.openstack.charm_tests.test_utils as test_utils import zaza.openstack.utilities.openstack as openstack_utils +import zaza.utilities.networking as network_utils X509_CERT = ''' @@ -372,9 +373,12 @@ def verify_ssl_config(self, ca_file): with attempt: rcs = collections.defaultdict(list) for unit in units: + ipaddr = network_utils.format_addr( + zaza.model.get_unit_public_address(unit) + ) req = self._run_request_get( 'https://{}:8443'.format( - zaza.model.get_unit_public_address(unit)), + ipaddr), verify=ca_file, allow_redirects=False) rcs[req.status_code].append( diff --git a/ceph-radosgw/tests/target.py b/ceph-radosgw/tests/target.py index 8eb10ff4..f00d818d 100644 --- a/ceph-radosgw/tests/target.py +++ b/ceph-radosgw/tests/target.py @@ -28,6 +28,7 @@ import zaza.model as zaza_model import zaza.openstack.utilities.ceph as zaza_ceph import zaza.openstack.utilities.generic as zaza_utils +import zaza.utilities.networking as network_utils import zaza.utilities.juju as juju_utils import zaza.openstack.utilities.openstack as zaza_openstack import zaza.openstack.utilities.generic as generic_utils @@ -243,6 +244,7 @@ def get_rgw_endpoint(self, unit_name: str): logging.info("Unit: {}, Endpoint: {}".format(unit_name, unit_address)) if unit_address is None: return None + unit_address = network_utils.format_addr(unit_address) # Evaluate port try: zaza_model.get_application("vault") From 9f9ab4b5e34ffea002d60013bbb76a38b0617a92 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 3 Mar 2025 13:43:19 +0100 Subject: [PATCH 2670/2699] ceph-fs: squid-jammy migration updates Signed-off-by: Peter Sabaini --- ceph-fs/charmcraft.yaml | 50 +++---- ceph-fs/requirements.txt | 13 -- ceph-fs/src/build.lock | 218 +++++++++++++----------------- ceph-fs/src/test-requirements.txt | 44 +++++- ceph-fs/src/tox.ini | 36 ++--- 5 files changed, 181 insertions(+), 180 deletions(-) diff --git a/ceph-fs/charmcraft.yaml b/ceph-fs/charmcraft.yaml index 242452c5..00963764 100644 --- a/ceph-fs/charmcraft.yaml +++ b/ceph-fs/charmcraft.yaml @@ -2,34 +2,34 @@ type: charm parts: charm: - charm-entrypoint: "hooks/install" + plugin: reactive + reactive-charm-build-arguments: + - --binary-wheels-from-source + - -v build-packages: - tox - git - python3-dev - libffi-dev - override-build: | - apt-get install ca-certificates -y - tox -e build-reactive - override-stage: | - echo "Copying charm to staging area: $CRAFT_STAGE" - NAME=$(ls $CRAFT_PART_BUILD/build/builds) - cp -r $CRAFT_PART_BUILD/build/builds/$NAME/* $CRAFT_STAGE/ - override-prime: | - # For some reason, the normal priming chokes on the fact that there's a - # hooks directory. - cp -r $CRAFT_STAGE/* . + source: src/ + build-snaps: + - charm/latest/edge + build-environment: + - CHARM_INTERFACES_DIR: $CRAFT_PROJECT_DIR/interfaces/ + - CHARM_LAYERS_DIR: $CRAFT_PROJECT_DIR/layers/ -bases: - - build-on: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 - run-on: - - name: ubuntu - channel: "20.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "22.04" - architectures: [amd64, s390x, ppc64el, arm64] +base: ubuntu@24.04 +build-base: ubuntu@24.04 +platforms: + amd64: + build-on: amd64 + build-for: amd64 + arm64: + build-on: arm64 + build-for: arm64 + s390x: + build-on: s390x + build-for: s390x + ppc64el: + build-on: ppc64el + build-for: ppc64el diff --git a/ceph-fs/requirements.txt b/ceph-fs/requirements.txt index 0c07cdd1..29081289 100644 --- a/ceph-fs/requirements.txt +++ b/ceph-fs/requirements.txt @@ -1,13 +1,3 @@ -# This file is managed centrally by release-tools and should not be modified -# within individual charm repos. See the 'global' dir contents for available -# choices of *requirements.txt files for OpenStack Charms: -# https://github.com/openstack-charmers/release-tools -# -# NOTE(lourot): This might look like a duplication of test-requirements.txt but -# some tox targets use only test-requirements.txt whereas charm-build uses only -# requirements.txt -setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 - # NOTE: newer versions of cryptography require a Rust compiler to build, # see # * https://github.com/openstack-charmers/zaza/issues/421 @@ -18,6 +8,3 @@ cryptography<3.4 git+https://github.com/juju/charm-tools.git simplejson - -# https://github.com/juju/charm-tools/issues/674 -path<17.0.0 diff --git a/ceph-fs/src/build.lock b/ceph-fs/src/build.lock index 220ef7cc..a41bbd2a 100644 --- a/ceph-fs/src/build.lock +++ b/ceph-fs/src/build.lock @@ -13,8 +13,8 @@ "item": "layer:basic", "url": "https://github.com/juju-solutions/layer-basic.git", "vcs": null, - "branch": "1edd5ccd3fb6240ca734b64a7dae9f1cb30b66f6", - "commit": "1edd5ccd3fb6240ca734b64a7dae9f1cb30b66f6" + "branch": "33526bd6aaa01ffe717a5c66ed62bc4790344ef2", + "commit": "33526bd6aaa01ffe717a5c66ed62bc4790344ef2" }, { "type": "layer", @@ -26,27 +26,19 @@ }, { "type": "layer", - "item": "layer:openstack-principle", - "url": "https://github.com/openstack/charm-layer-openstack-principle", + "item": "layer:ceph", + "url": "https://github.com/openstack/charm-layer-ceph.git", "vcs": null, - "branch": "2541283a7f9c3fa923c0fccd7257e217e11dadcd", - "commit": "2541283a7f9c3fa923c0fccd7257e217e11dadcd" + "branch": "17d40abd8d9ec3b8c32756ca981c80c4733c016f", + "commit": "17d40abd8d9ec3b8c32756ca981c80c4733c016f" }, { "type": "layer", - "item": "layer:openstack-api", - "url": "https://github.com/openstack/charm-layer-openstack-api", - "vcs": null, - "branch": "34311a62e963d0ce903b7ddb9d70b8f071f71651", - "commit": "34311a62e963d0ce903b7ddb9d70b8f071f71651" - }, - { - "type": "layer", - "item": "aodh", + "item": "ceph-fs", "url": null, "vcs": null, - "branch": null, - "commit": "" + "branch": "e6c6f13cde785174cee1a48a8df1c581e394fc3b", + "commit": "e6c6f13cde785174cee1a48a8df1c581e394fc3b" }, { "type": "layer", @@ -58,131 +50,107 @@ }, { "type": "layer", - "item": "interface:mysql-shared", - "url": "https://github.com/openstack/charm-interface-mysql-shared", - "vcs": null, - "branch": "f90f8c6edce21e4da3a872f0f9d6761730ce025d", - "commit": "f90f8c6edce21e4da3a872f0f9d6761730ce025d" - }, - { - "type": "layer", - "item": "interface:rabbitmq", - "url": "https://github.com/openstack/charm-interface-rabbitmq", - "vcs": null, - "branch": "383121fc584d2d3bf9d233eba0d3708398a4c468", - "commit": "383121fc584d2d3bf9d233eba0d3708398a4c468" - }, - { - "type": "layer", - "item": "interface:keystone", - "url": "https://github.com/openstack/charm-interface-keystone", - "vcs": null, - "branch": "36ea7b385a569ebabca7184ed4ce8420bb8fa61a", - "commit": "36ea7b385a569ebabca7184ed4ce8420bb8fa61a" - }, - { - "type": "layer", - "item": "interface:hacluster", - "url": "https://github.com/openstack/charm-interface-hacluster.git", + "item": "interface:ceph-mds", + "url": "https://opendev.org/openstack/charm-interface-ceph-client.git", "vcs": null, - "branch": "d91e83a1fa59a13f4160febfe536c68dc762e436", - "commit": "d91e83a1fa59a13f4160febfe536c68dc762e436" + "branch": "d9f3b53ca0cf30e47347a68beab59da5c03ce4c7", + "commit": "d9f3b53ca0cf30e47347a68beab59da5c03ce4c7" }, { "type": "layer", - "item": "interface:openstack-ha", - "url": "https://github.com/openstack/charm-interface-openstack-ha", + "item": "interface:cephfs_share", + "url": null, "vcs": null, - "branch": "a3b00e5b5fd857d130698c5cfe4b918877f0ab80", - "commit": "a3b00e5b5fd857d130698c5cfe4b918877f0ab80" + "branch": "e6c6f13cde785174cee1a48a8df1c581e394fc3b", + "commit": "e6c6f13cde785174cee1a48a8df1c581e394fc3b" }, { - "type": "layer", - "item": "interface:mongodb", - "url": "https://github.com/cloud-green/juju-relation-mongodb", + "type": "python_module", + "package": "dnspython3", "vcs": null, - "branch": "708b6ade852794b17447024a28e5ec2bb883538c", - "commit": "708b6ade852794b17447024a28e5ec2bb883538c" + "version": "1.12.0" }, { - "type": "layer", - "item": "interface:nrpe-external-master", - "url": "https://github.com/canonical/nrpe-external-master-interface", + "type": "python_module", + "package": "netifaces", "vcs": null, - "branch": "c58fe7b01a151d933681b5bf67e47ad3de65eeaa", - "commit": "c58fe7b01a151d933681b5bf67e47ad3de65eeaa" + "version": "0.11.0" }, { "type": "python_module", - "package": "trove-classifiers", + "package": "packaging", "vcs": null, - "version": "2024.4.10" + "version": "24.1" }, { "type": "python_module", - "package": "netifaces", + "package": "setuptools", "vcs": null, - "version": "0.11.0" + "version": "71.1.0" }, { "type": "python_module", - "package": "dnspython", + "package": "pyaml", "vcs": null, - "version": "2.6.1" + "version": "21.10.1" }, { "type": "python_module", - "package": "netaddr", + "package": "flit_scm", "vcs": null, - "version": "0.7.19" + "version": "1.7.0" }, { "type": "python_module", - "package": "packaging", - "vcs": null, - "version": "24.0" + "package": "charms.reactive", + "url": "git+https://github.com/canonical/charms.reactive.git", + "branch": "0dc82abb7ac01f288042ee44b56a9d428c8fc46c", + "version": "0dc82abb7ac01f288042ee44b56a9d428c8fc46c", + "vcs": "git" }, { "type": "python_module", - "package": "pluggy", + "package": "psutil", "vcs": null, - "version": "1.5.0" + "version": "6.0.0" }, { "type": "python_module", - "package": "anyio", + "package": "pyxattr", "vcs": null, - "version": "3.6.2" + "version": "0.8.1" }, { "type": "python_module", - "package": "tomli", + "package": "MarkupSafe", "vcs": null, - "version": "2.0.1" + "version": "2.1.5" }, { "type": "python_module", - "package": "pyaml", + "package": "trove_classifiers", "vcs": null, - "version": "21.10.1" + "version": "2024.7.2" }, { "type": "python_module", - "package": "idna", + "package": "flit_core", "vcs": null, - "version": "3.7" + "version": "3.9.0" }, { "type": "python_module", - "package": "calver", + "package": "PyYAML", "vcs": null, - "version": "2022.6.26" + "version": "6.0.1" }, { "type": "python_module", - "package": "wheel", - "vcs": null, - "version": "0.43.0" + "package": "charmhelpers", + "url": "git+https://github.com/juju/charm-helpers.git", + "branch": "1b2d4dc8f8effd79d782241a32a0485af1f01e73", + "version": "1b2d4dc8f8effd79d782241a32a0485af1f01e73", + "vcs": "git" }, { "type": "python_module", @@ -192,31 +160,27 @@ }, { "type": "python_module", - "package": "setuptools", + "package": "calver", "vcs": null, - "version": "62.1.0" + "version": "2022.6.26" }, { "type": "python_module", - "package": "psutil", + "package": "pluggy", "vcs": null, - "version": "5.9.8" + "version": "1.5.0" }, { "type": "python_module", - "package": "charms.openstack", - "url": "git+https://github.com/openstack/charms.openstack.git", - "branch": "stable/caracal", - "version": "018b72d734223db274b59f524b7887d8153cdb6d", - "vcs": "git" + "package": "pyudev", + "vcs": null, + "version": "0.24.3" }, { "type": "python_module", - "package": "charmhelpers", - "url": "git+https://github.com/juju/charm-helpers.git", - "branch": "stable/caracal", - "version": "807f705d1d132bedb62b2eb743403e1d5867360f", - "vcs": "git" + "package": "six", + "vcs": null, + "version": "1.16.0" }, { "type": "python_module", @@ -226,69 +190,79 @@ }, { "type": "python_module", - "package": "hatchling", + "package": "jinja2", "vcs": null, - "version": "1.24.2" + "version": "3.1.4" }, { "type": "python_module", - "package": "Cython", + "package": "pbr", "vcs": null, - "version": "0.29.37" + "version": "6.0.0" }, { "type": "python_module", - "package": "MarkupSafe", - "vcs": null, - "version": "2.1.5" + "package": "charms.ceph", + "url": "git+https://github.com/openstack/charms.ceph.git", + "branch": "64f3c1b12b14545a76321469478fb456b379832d", + "version": "64f3c1b12b14545a76321469478fb456b379832d", + "vcs": "git" }, { "type": "python_module", - "package": "PyYAML", + "package": "looseversion", "vcs": null, - "version": "6.0.1" + "version": "1.3.0" }, { "type": "python_module", - "package": "charms.reactive", + "package": "hatchling", "vcs": null, - "version": "1.5.2" + "version": "1.25.0" }, { "type": "python_module", - "package": "sniffio", + "package": "netaddr", "vcs": null, - "version": "1.3.0" + "version": "0.7.19" }, { "type": "python_module", - "package": "setuptools_scm", + "package": "Cython", "vcs": null, - "version": "6.4.2" + "version": "0.29.37" }, { "type": "python_module", - "package": "Jinja2", + "package": "charms.openstack", + "url": "git+https://github.com/openstack/charms.openstack.git", + "branch": "355d65f64cc1dac133d885aa7cfc58b1804a0c30", + "version": "355d65f64cc1dac133d885aa7cfc58b1804a0c30", + "vcs": "git" + }, + { + "type": "python_module", + "package": "wheel", "vcs": null, - "version": "3.1.3" + "version": "0.43.0" }, { "type": "python_module", - "package": "pbr", + "package": "dnspython", "vcs": null, - "version": "6.0.0" + "version": "2.6.1" }, { "type": "python_module", - "package": "flit_scm", + "package": "ceph_api", "vcs": null, - "version": "1.7.0" + "version": "0.4.0" }, { "type": "python_module", - "package": "flit_core", + "package": "setuptools_scm", "vcs": null, - "version": "3.9.0" + "version": "8.1.0" } ] } \ No newline at end of file diff --git a/ceph-fs/src/test-requirements.txt b/ceph-fs/src/test-requirements.txt index 9c7afb7f..43248e4c 100644 --- a/ceph-fs/src/test-requirements.txt +++ b/ceph-fs/src/test-requirements.txt @@ -2,11 +2,49 @@ # within individual charm repos. See the 'global' dir contents for available # choices of *requirements.txt files for OpenStack Charms: # https://github.com/openstack-charmers/release-tools + +# NOTE: newer versions of cryptography require a Rust compiler to build, +# see +# * https://github.com/openstack-charmers/zaza/issues/421 +# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html # +cryptography<3.4 + +requests>=2.18.4 + +stestr>=2.2.0 + +# Dependency of stestr. Workaround for +# https://github.com/mtreinish/stestr/issues/145 +cliff<3.0.0 + +# Dependencies of stestr. Newer versions use keywords that didn't exist in +# python 3.5 yet (e.g. "ModuleNotFoundError") +importlib-metadata<3.0.0; python_version < '3.6' +importlib-resources<3.0.0; python_version < '3.6' -# Need tox to be available from tox... inception yes, but its a workaround for now -tox +# Some Zuul nodes sometimes pull newer versions of these dependencies which +# dropped support for python 3.5: +osprofiler<2.7.0;python_version<'3.6' +stevedore<1.31.0;python_version<'3.6' +debtcollector<1.22.0;python_version<'3.6' +oslo.utils<=3.41.0;python_version<'3.6' -# Functional Test Requirements (let Zaza's dependencies solve all dependencies here!) +coverage>=4.5.2 +pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack + +# Needed for charm-glance: +git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.8' +tempest<30.0.0;python_version<'3.8' and python_version >= '3.6' +tempest<24.0.0;python_version<'3.6' + +croniter # needed for charm-rabbitmq-server unit tests + +# icey: pyopenssl 22 introduces a requirement on newer OpenSSL which causes test +# failures. Pin pyopenssl to resolve the failure. +pyopenssl<=22.0.0 + +pydantic < 2 +cosl diff --git a/ceph-fs/src/tox.ini b/ceph-fs/src/tox.ini index b40d2952..6f3c7f9c 100644 --- a/ceph-fs/src/tox.ini +++ b/ceph-fs/src/tox.ini @@ -6,32 +6,29 @@ [tox] envlist = pep8 -skipsdist = True # NOTE: Avoid build/test env pollution by not enabling sitepackages. sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False -# NOTES: -# * We avoid the new dependency resolver by pinning pip < 20.3, see -# https://github.com/pypa/pip/issues/9187 -# * Pinning dependencies requires tox >= 3.2.0, see -# https://tox.readthedocs.io/en/latest/config.html#conf-requires -# * It is also necessary to pin virtualenv as a newer virtualenv would still -# lead to fetching the latest pip in the func* tox targets, see -# https://stackoverflow.com/a/38133283 -requires = pip < 20.3 - virtualenv < 20.0 -# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci -minversion = 3.18.0 [testenv] +# We use tox mainly for virtual environment management for test requirements +# and do not install the charm code as a Python package into that environment. +# Ref: https://tox.wiki/en/latest/config.html#skip_install +skip_install = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + TEST_JUJU3=1 + CHARMS_ARTIFACT_DIR={toxinidir}/../.. allowlist_externals = juju -passenv = HOME TERM CS_* OS_* TEST_* -deps = -r{toxinidir}/test-requirements.txt -install_command = - pip install {opts} {packages} +passenv = + HOME + TERM + CS_* + OS_* + TEST_* +deps = + -r{toxinidir}/test-requirements.txt [testenv:pep8] basepython = python3 @@ -52,6 +49,11 @@ basepython = python3 commands = functest-run-suite --keep-model --smoke +[testenv:func-dev] +basepython = python3 +commands = + functest-run-suite --keep-model --dev + [testenv:func-target] basepython = python3 commands = From 76b2df6e641b44da2c084aa61310af0119fb5822 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 31 Jan 2025 11:53:49 +0100 Subject: [PATCH 2671/2699] Functest targets Make the workflow CI use the func-dev target. Add gate and dev targets to test config. This allows for more flexibility in configuring which tests should be run in CI and integration testing. Signed-off-by: Peter Sabaini --- .github/workflows/build-and-test.yml | 4 ++-- ceph-dashboard/tests/tests.yaml | 3 +++ ceph-fs/src/tests/tests.yaml | 10 ++++++++++ ceph-mon/tests/tests.yaml | 9 +++++++++ ceph-nfs/tests/tests.yaml | 3 +++ ceph-osd/tests/tests.yaml | 9 +++++++++ ceph-proxy/tests/tests.yaml | 9 +++++++++ ceph-rbd-mirror/src/tests/tests.yaml | 10 ++++++++++ ceph-rbd-mirror/src/tox.ini | 5 +++++ 9 files changed, 60 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 9246d30d..eff3a575 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -153,9 +153,9 @@ jobs: date mv ~/artifacts/*.charm ./ if [[ -f "./${{ matrix.part }}/src/tox.ini" ]]; then - tox -c ${{ matrix.part }}/src -e func-target -- noble-caracal + tox -c ${{ matrix.part }}/src -e func-dev else - tox -c ${{ matrix.part }} -e func-target -- noble-caracal + tox -c ${{ matrix.part }} -e func-dev fi - name: Generate crash dumps diff --git a/ceph-dashboard/tests/tests.yaml b/ceph-dashboard/tests/tests.yaml index 56237b66..9a2b09f2 100644 --- a/ceph-dashboard/tests/tests.yaml +++ b/ceph-dashboard/tests/tests.yaml @@ -5,6 +5,9 @@ smoke_bundles: - jammy-caracal dev_bundles: - jammy-caracal +dev_bundles: + - jammy-caracal + configure: - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation - tests.target.check_dashboard_cert diff --git a/ceph-fs/src/tests/tests.yaml b/ceph-fs/src/tests/tests.yaml index 4f3d041f..0742bfde 100644 --- a/ceph-fs/src/tests/tests.yaml +++ b/ceph-fs/src/tests/tests.yaml @@ -1,4 +1,14 @@ charm_name: ceph-fs + +gate_bundles: + - jammy-caracal + +smoke_bundles: + - jammy-caracal + +dev_bundles: + - jammy-caracal + tests: - zaza.charm_tests.lifecycle.refresh.CharmRefreshAll - tests.target.CephFSTests diff --git a/ceph-mon/tests/tests.yaml b/ceph-mon/tests/tests.yaml index a6fe3e8e..6925db91 100644 --- a/ceph-mon/tests/tests.yaml +++ b/ceph-mon/tests/tests.yaml @@ -1,5 +1,14 @@ charm_name: ceph-mon +gate_bundles: + - jammy-caracal + +smoke_bundles: + - jammy-caracal + +dev_bundles: + - jammy-caracal + tests: - zaza.charm_tests.lifecycle.refresh.CharmRefreshAll - tests.target.CephLowLevelTest diff --git a/ceph-nfs/tests/tests.yaml b/ceph-nfs/tests/tests.yaml index 0aec63fc..bde4618a 100644 --- a/ceph-nfs/tests/tests.yaml +++ b/ceph-nfs/tests/tests.yaml @@ -3,6 +3,9 @@ gate_bundles: - jammy-caracal smoke_bundles: - jammy-caracal +dev_bundles: + - jammy-caracal + configure: [] tests: - tests.nfs_ganesha.NfsGaneshaTest diff --git a/ceph-osd/tests/tests.yaml b/ceph-osd/tests/tests.yaml index df0b2b26..3b2c91b6 100644 --- a/ceph-osd/tests/tests.yaml +++ b/ceph-osd/tests/tests.yaml @@ -1,5 +1,14 @@ charm_name: ceph-osd +gate_bundles: + - jammy-caracal + +smoke_bundles: + - jammy-caracal + +dev_bundles: + - jammy-caracal + tests: - zaza.charm_tests.lifecycle.refresh.CharmRefreshAll - tests.target.CephLowLevelTest diff --git a/ceph-proxy/tests/tests.yaml b/ceph-proxy/tests/tests.yaml index 6a327566..04cfb599 100644 --- a/ceph-proxy/tests/tests.yaml +++ b/ceph-proxy/tests/tests.yaml @@ -1,5 +1,14 @@ charm_name: ceph-proxy +gate_bundles: + - jammy-caracal + +smoke_bundles: + - jammy-caracal + +dev_bundles: + - jammy-caracal + configure: - tests.target.setup_ceph_proxy diff --git a/ceph-rbd-mirror/src/tests/tests.yaml b/ceph-rbd-mirror/src/tests/tests.yaml index b5c5ad9a..46adf75b 100644 --- a/ceph-rbd-mirror/src/tests/tests.yaml +++ b/ceph-rbd-mirror/src/tests/tests.yaml @@ -1,4 +1,14 @@ charm_name: ceph-rbd-mirror + +gate_bundles: + - jammy-caracal + +smoke_bundles: + - jammy-caracal + +dev_bundles: + - jammy-caracal + configure: - tests.target.setup_rbd_mirror tests: diff --git a/ceph-rbd-mirror/src/tox.ini b/ceph-rbd-mirror/src/tox.ini index 8ffff0f9..815a499a 100644 --- a/ceph-rbd-mirror/src/tox.ini +++ b/ceph-rbd-mirror/src/tox.ini @@ -46,6 +46,11 @@ basepython = python3 commands = functest-run-suite --keep-model --smoke +[testenv:func-dev] +basepython = python3 +commands = + functest-run-suite --keep-model --dev + [testenv:func-target] basepython = python3 commands = From 7515c29406c17ff021ed8819d5a4844b8bae9672 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 14 Feb 2025 09:24:45 +0100 Subject: [PATCH 2672/2699] Tests: fix ceph-nfs Signed-off-by: Peter Sabaini --- ceph-nfs/tests/tests.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ceph-nfs/tests/tests.yaml b/ceph-nfs/tests/tests.yaml index bde4618a..f2ae0073 100644 --- a/ceph-nfs/tests/tests.yaml +++ b/ceph-nfs/tests/tests.yaml @@ -8,6 +8,7 @@ dev_bundles: configure: [] tests: + - zaza.charm_tests.lifecycle.refresh.CharmRefreshAll - tests.nfs_ganesha.NfsGaneshaTest target_deploy_status: ubuntu: From 5349adc624177a80240e99bf480ebed0ca0aa22f Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 14 Feb 2025 14:18:21 +0100 Subject: [PATCH 2673/2699] Central tox.ini model config Signed-off-by: Peter Sabaini --- ceph-dashboard/tox.ini | 3 ++- ceph-osd/tox.ini | 2 ++ ceph-radosgw/tox.ini | 2 ++ ceph-rbd-mirror/src/tox.ini | 3 +++ 4 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ceph-dashboard/tox.ini b/ceph-dashboard/tox.ini index 6c690f69..57b83c31 100644 --- a/ceph-dashboard/tox.ini +++ b/ceph-dashboard/tox.ini @@ -29,7 +29,8 @@ setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 TEST_JUJU3=1 CHARM_DIR={envdir} - ZAZA_FEATURE_BUG472=1 + CHARMS_ARTIFACT_DIR={toxinidir}/.. + install_command = pip install {opts} {packages} commands = stestr run --slowest {posargs} diff --git a/ceph-osd/tox.ini b/ceph-osd/tox.ini index 5d5938f5..44ffd2eb 100644 --- a/ceph-osd/tox.ini +++ b/ceph-osd/tox.ini @@ -23,6 +23,8 @@ setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 TEST_JUJU3=1 CHARM_DIR={envdir} + CHARMS_ARTIFACT_DIR={toxinidir}/.. + commands = stestr run --slowest {posargs} allowlist_externals = charmcraft diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index aa415332..8d07014f 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -23,6 +23,8 @@ setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 TEST_JUJU3=1 CHARM_DIR={envdir} + CHARMS_ARTIFACT_DIR={toxinidir}/.. + commands = stestr run --slowest {posargs} allowlist_externals = charmcraft diff --git a/ceph-rbd-mirror/src/tox.ini b/ceph-rbd-mirror/src/tox.ini index 815a499a..cf00338c 100644 --- a/ceph-rbd-mirror/src/tox.ini +++ b/ceph-rbd-mirror/src/tox.ini @@ -18,6 +18,9 @@ skip_missing_interpreters = False skip_install = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 + TEST_JUJU3=1 + CHARMS_ARTIFACT_DIR={toxinidir}/../.. + allowlist_externals = juju passenv = HOME From f99b92ce823f9989b69e7da33309d3045f584a8c Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Thu, 13 Feb 2025 18:42:54 +0100 Subject: [PATCH 2674/2699] Testing: pre-heat in dummy model Signed-off-by: Peter Sabaini --- tests/scripts/actionutils.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/scripts/actionutils.sh b/tests/scripts/actionutils.sh index 0867f7c8..ae48ac97 100755 --- a/tests/scripts/actionutils.sh +++ b/tests/scripts/actionutils.sh @@ -11,13 +11,14 @@ function cleaript() { function cacheimgs() { local base="${1?missing}" + juju add-model dummy juju add-machine --base "$base" sleep 10 juju add-machine --base "$base" --constraints "virt-type=virtual-machine" while [ "$(juju machines | egrep -wc 'started')" -ne 2 ]; do sleep 2 done - juju machines | awk '/started/{ print $1 }' | while read n; do juju remove-machine --force --no-prompt $n ; done + juju destroy-model --force --timeout 20s --no-prompt dummy sleep 5 } From 3ecec8011ab36cb2815eaa3dcf6df55f1563e17b Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 14 Feb 2025 14:05:22 +0100 Subject: [PATCH 2675/2699] Tests: add zaza conf, normalize test bundles (#27) Signed-off-by: Peter Sabaini --- ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml b/ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml index 19b5b68a..fee2fb98 100644 --- a/ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml +++ b/ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml @@ -19,7 +19,8 @@ machines: applications: ceph-radosgw: - charm: ../../ceph-radosgw.charm + charm: ch:ceph-radosgw + channel: latest/edge num_units: 1 options: source: *source @@ -27,7 +28,8 @@ applications: - '0' secondary-ceph-radosgw: - charm: ../../ceph-radosgw.charm + charm: ch:ceph-radosgw + channel: latest/edge num_units: 1 options: source: *source From 7f874dcffe3b9814fdcbbc6fd974cf3b273f064d Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Tue, 18 Feb 2025 17:32:18 +0100 Subject: [PATCH 2676/2699] Add Terraform modules for ceph-mon and ceph-osd --- .github/workflows/plan-terraform.yml | 59 +++++++++++++++++ ceph-mon/terraform/README.md | 96 ++++++++++++++++++++++++++++ ceph-mon/terraform/main.tf | 19 ++++++ ceph-mon/terraform/outputs.tf | 14 ++++ ceph-mon/terraform/variables.tf | 55 ++++++++++++++++ ceph-mon/terraform/versions.tf | 12 ++++ ceph-osd/terraform/README.md | 96 ++++++++++++++++++++++++++++ ceph-osd/terraform/main.tf | 20 ++++++ ceph-osd/terraform/outputs.tf | 13 ++++ ceph-osd/terraform/variables.tf | 61 ++++++++++++++++++ ceph-osd/terraform/versions.tf | 13 ++++ tests/terraform/default.yaml | 16 +++++ 12 files changed, 474 insertions(+) create mode 100644 .github/workflows/plan-terraform.yml create mode 100644 ceph-mon/terraform/README.md create mode 100644 ceph-mon/terraform/main.tf create mode 100644 ceph-mon/terraform/outputs.tf create mode 100644 ceph-mon/terraform/variables.tf create mode 100644 ceph-mon/terraform/versions.tf create mode 100644 ceph-osd/terraform/README.md create mode 100644 ceph-osd/terraform/main.tf create mode 100644 ceph-osd/terraform/outputs.tf create mode 100644 ceph-osd/terraform/variables.tf create mode 100644 ceph-osd/terraform/versions.tf create mode 100644 tests/terraform/default.yaml diff --git a/.github/workflows/plan-terraform.yml b/.github/workflows/plan-terraform.yml new file mode 100644 index 00000000..1e357118 --- /dev/null +++ b/.github/workflows/plan-terraform.yml @@ -0,0 +1,59 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +name: Plan Terraform tests + +on: + push: + branches: + - main + pull_request: + +jobs: + plan-terraform: + name: Plan Terraform with Juju + runs-on: ubuntu-latest + strategy: + matrix: + test: + - name: default + yaml: test/default.yaml + env: + TF_VAR_model: test + TF_VAR_manifest_yaml: ${{ matrix.test.yaml }} + steps: + - uses: actions/checkout@v4 + - name: Install Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + - uses: charmed-kubernetes/actions-operator@main + with: + provider: lxd + channel: latest/stable + - name: Prepare juju tf provider environment + run: | + CONTROLLER=$(juju whoami | yq .Controller) + JUJU_CONTROLLER_ADDRESSES="$(juju show-controller | yq '.[$CONTROLLER]'.details.\"api-endpoints\" | tr -d "[]' "|tr -d '"'|tr -d '\n')" + JUJU_USERNAME="$(cat ~/.local/share/juju/accounts.yaml | yq .controllers.$CONTROLLER.user|tr -d '"')" + JUJU_PASSWORD="$(cat ~/.local/share/juju/accounts.yaml | yq .controllers.$CONTROLLER.password|tr -d '"')" + + echo "JUJU_CONTROLLER_ADDRESSES=$JUJU_CONTROLLER_ADDRESSES" >> "$GITHUB_ENV" + echo "JUJU_USERNAME=$JUJU_USERNAME" >> "$GITHUB_ENV" + echo "JUJU_PASSWORD=$JUJU_PASSWORD" >> "$GITHUB_ENV" + { + echo 'JUJU_CA_CERT<> "$GITHUB_ENV" + - uses: hashicorp/setup-terraform@v3 + - run: terraform init + working-directory: ${{github.WORKSPACE}} + - run: terraform plan -out=tfplan + working-directory: ${{github.WORKSPACE}} + - run: terraform show tfplan + working-directory: ${{github.WORKSPACE}} + - uses: actions/upload-artifact@v4 + with: + name: ${{matrix.test.name}}-terraform-plan + path: ${{github.WORKSPACE}}/tfplan diff --git a/ceph-mon/terraform/README.md b/ceph-mon/terraform/README.md new file mode 100644 index 00000000..2f617ef5 --- /dev/null +++ b/ceph-mon/terraform/README.md @@ -0,0 +1,96 @@ +# Terraform Manifest Module + +This module reads a yaml configuration file and exports the values into terraform variables that +can be passed down into other modules. It is specifically tailored for working with +modules for charms defined with the +[juju terraform provider](https://registry.terraform.io/providers/juju/juju/latest/docs). It +simplifies having to pass every individual charm input as a variable in the product level +module for a given product. + +## Inputs + +| Name | Type | Description | Required | +|------------|--------|------------------------------------------------------------------------|----------| +| `manifest` | string | Absolute path to the yaml file with the config for a Juju application. | true | +| `app` | string | Name of the application to load the config for. | true | + +## Outputs + +All outputs are under `config` as a map of values below: + +| Name | Description | +|---------------|-------------------------------------------------------------------------------| +| `app_name` | Name of the application in Juju. | +| `base` | Base to deploy the charm as eg. ubuntu@24.04. | +| `channel` | Channel of the application being deployed. | +| `config` | Map of the config for the charm, values can be found under the specific charm | +| `constraints` | String of constraints when deploying the charm `cores=2 mem=4069M` | +| `resources` | List of resources to deploy with the charm. | +| `revision` | Specific revision of this charm to deploy. | +| `units` | Number of units of a charm to deploy | +| `storage` | Storage configuration of a charm to deploy | + +## Usage + +This module is meant to be use as a helper for product modules. It is meant to allow the +user to have one manifest yaml file that can allow hold all the configuration for a solution +or deployment while also allowing the developer to not have to maintain the configuration +between each charm and the overall product. + +### Defining a `manifest` in terraform + +The manifest module will have to be defined for each charm in question. Terraform will +load the config under the app key and output the values. If the key is not found in the +manifest, then the module will return `null` and terraform will ignore the configuration. + +``` +module "ceph_mon_config" { + source = "git::https://github.com/canonical/k8s-bundles//terraform/manifest?ref=main" + manifest = var.manifest_yaml + app = "ceph_mon" +} +``` + +These values can the be passed into a resource for a specific charm: + +``` +module "ceph_mon" { + source = "git::https://github.com/canonical/ceph-charms//ceph-osd/terraform?ref=main" + app_name = module.ceph_mon_config.config.app_name + channel = module.ceph_mon_config.config.channel + config = module.ceph_mon_config.config.config + constraints = module.ceph_mon_config.config.constraints + model = var.model + resources = module.ceph_mon_config.config.resources + revision = module.ceph_mon_config.config.revision + base = module.ceph_mon_config.config.base + units = module.ceph_mon_config.config.units +} +``` + +### Defining a manifest.yaml + +In the implementation of the product module, the user can specify their configuration using +a single manifest file similar to the one below: + +``` yaml +ceph_mon: + channel: quincy/stable + constraints: cores=2 mem=4G root-disk=16G + num_units: 1 + config: + monitor-count: 1 + expected-osd-count: 2 +ceph_osd: + channel: quincy/stable + constraints: cores=2 mem=4G root-disk=16G + num_units: 2 + storage: + [ + { type: "osd-devices", count: 1, size: "1G" }, + { type: "osd-journals", count: 1, size: "1G" }, + ] +``` + +Using the terraform in the above section, the `units`, `base`, `constraints`, and `channel` +forward into the `ceph-mon` deployment. diff --git a/ceph-mon/terraform/main.tf b/ceph-mon/terraform/main.tf new file mode 100644 index 00000000..c800e77a --- /dev/null +++ b/ceph-mon/terraform/main.tf @@ -0,0 +1,19 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +resource "juju_application" "ceph_mon" { + name = var.app_name + model = var.model + + charm { + name = "ceph-mon" + channel = var.channel + revision = var.revision + base = var.base + } + + config = var.config + constraints = var.constraints + units = var.units + resources = var.resources +} diff --git a/ceph-mon/terraform/outputs.tf b/ceph-mon/terraform/outputs.tf new file mode 100644 index 00000000..a994bec7 --- /dev/null +++ b/ceph-mon/terraform/outputs.tf @@ -0,0 +1,14 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +output "app_name" { + description = "Name of the deployed application." + value = juju_application.ceph_mon.name +} + +output "provides" { + value = { + osd = "osd" + client = "client" + } +} diff --git a/ceph-mon/terraform/variables.tf b/ceph-mon/terraform/variables.tf new file mode 100644 index 00000000..322aa49f --- /dev/null +++ b/ceph-mon/terraform/variables.tf @@ -0,0 +1,55 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +variable "app_name" { + description = "Name of the application in the Juju model." + type = string + default = "ceph-mon" +} + +variable "base" { + description = "Ubuntu bases to deploy the charm onto" + type = string + default = "ubuntu@24.04" +} + +variable "channel" { + description = "The channel to use when deploying a charm." + type = string + default = "squid/beta" +} + +variable "resources" { + description = "Resources to use with the application." + type = map(string) + default = {} +} + +variable "revision" { + description = "Revision number of the charm" + type = number + default = null +} + +variable "units" { + description = "Number of units to deploy" + type = number + default = 1 +} + +variable "config" { + description = "Application config. Details about available options can be found at https://charmhub.io/ceph-mon/configurations." + type = map(string) + default = {} +} + +variable "constraints" { + description = "Juju constraints to apply for this application." + type = string + default = "arch=amd64" +} + +variable "model" { + description = "Reference to a `juju_model`." + type = string +} diff --git a/ceph-mon/terraform/versions.tf b/ceph-mon/terraform/versions.tf new file mode 100644 index 00000000..f076a31e --- /dev/null +++ b/ceph-mon/terraform/versions.tf @@ -0,0 +1,12 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +terraform { + required_version = ">= 1.6" + required_providers { + juju = { + source = "juju/juju" + version = "~> 0.14.0" + } + } +} diff --git a/ceph-osd/terraform/README.md b/ceph-osd/terraform/README.md new file mode 100644 index 00000000..5edc649b --- /dev/null +++ b/ceph-osd/terraform/README.md @@ -0,0 +1,96 @@ +# Terraform Manifest Module + +This module reads a yaml configuration file and exports the values into terraform variables that +can be passed down into other modules. It is specifically tailored for working with +modules for charms defined with the +[juju terraform provider](https://registry.terraform.io/providers/juju/juju/latest/docs). It +simplifies having to pass every individual charm input as a variable in the product level +module for a given product. + +## Inputs + +| Name | Type | Description | Required | +|------------|--------|------------------------------------------------------------------------|----------| +| `manifest` | string | Absolute path to the yaml file with the config for a Juju application. | true | +| `app` | string | Name of the application to load the config for. | true | + +## Outputs + +All outputs are under `config` as a map of values below: + +| Name | Description | +|---------------|-------------------------------------------------------------------------------| +| `app_name` | Name of the application in Juju. | +| `base` | Base to deploy the charm as eg. ubuntu@24.04. | +| `channel` | Channel of the application being deployed. | +| `config` | Map of the config for the charm, values can be found under the specific charm | +| `constraints` | String of constraints when deploying the charm `cores=2 mem=4069M` | +| `resources` | List of resources to deploy with the charm. | +| `revision` | Specific revision of this charm to deploy. | +| `units` | Number of units of a charm to deploy | +| `storage` | Storage configuration of a charm to deploy | + +## Usage + +This module is meant to be use as a helper for product modules. It is meant to allow the +user to have one manifest yaml file that can allow hold all the configuration for a solution +or deployment while also allowing the developer to not have to maintain the configuration +between each charm and the overall product. + +### Defining a `manifest` in terraform + +The manifest module will have to be defined for each charm in question. Terraform will +load the config under the app key and output the values. If the key is not found in the +manifest, then the module will return `null` and terraform will ignore the configuration. + +``` +module "ceph_osd_config" { + source = "git::https://github.com/canonical/k8s-bundles//terraform/manifest?ref=main" + manifest = var.manifest_yaml + app = "ceph_osd" +} +``` + +These values can the be passed into a resource for a specific charm: + +``` +module "ceph_osd" { + source = "git::https://github.com/canonical/ceph-charms//ceph-osd/terraform?ref=main" + app_name = module.ceph_osd_config.config.app_name + channel = module.ceph_osd_config.config.channel + config = module.ceph_osd_config.config.config + constraints = module.ceph_osd_config.config.constraints + model = var.model + resources = module.ceph_osd_config.config.resources + revision = module.ceph_osd_config.config.revision + base = module.ceph_osd_config.config.base + units = module.ceph_osd_config.config.units +} +``` + +### Defining a manifest.yaml + +In the implementation of the product module, the user can specify their configuration using +a single manifest file similar to the one below: + +``` yaml +ceph_mon: + channel: quincy/stable + constraints: cores=2 mem=4G root-disk=16G + num_units: 1 + config: + monitor-count: 1 + expected-osd-count: 2 +ceph_osd: + channel: quincy/stable + constraints: cores=2 mem=4G root-disk=16G + num_units: 2 + storage: + [ + { type: "osd-devices", count: 1, size: "1G" }, + { type: "osd-journals", count: 1, size: "1G" }, + ] +``` + +Using the terraform in the above section, the `units`, `base`, `constraints`, and `channel` +forward into the `ceph-osd` deployment. diff --git a/ceph-osd/terraform/main.tf b/ceph-osd/terraform/main.tf new file mode 100644 index 00000000..e6c5b939 --- /dev/null +++ b/ceph-osd/terraform/main.tf @@ -0,0 +1,20 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +resource "juju_application" "ceph_osd" { + name = var.app_name + model = var.model + + charm { + name = "ceph-osd" + channel = var.channel + revision = var.revision + base = var.base + } + + config = var.config + constraints = var.constraints + units = var.units + resources = var.resources + storage_directives = var.storage +} diff --git a/ceph-osd/terraform/outputs.tf b/ceph-osd/terraform/outputs.tf new file mode 100644 index 00000000..8b155a32 --- /dev/null +++ b/ceph-osd/terraform/outputs.tf @@ -0,0 +1,13 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +output "app_name" { + description = "Name of the deployed application." + value = juju_application.ceph_osd.name +} + +output "requires" { + value = { + mon = "mon" + } +} diff --git a/ceph-osd/terraform/variables.tf b/ceph-osd/terraform/variables.tf new file mode 100644 index 00000000..100c3b09 --- /dev/null +++ b/ceph-osd/terraform/variables.tf @@ -0,0 +1,61 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +variable "app_name" { + description = "Name of the application in the Juju model." + type = string + default = "ceph-osd" +} + +variable "base" { + description = "Ubuntu bases to deploy the charm onto" + type = string + default = "ubuntu@24.04" +} + +variable "channel" { + description = "The channel to use when deploying a charm." + type = string + default = "squid/beta" +} + +variable "resources" { + description = "Resources to use with the application." + type = map(string) + default = {} +} + +variable "revision" { + description = "Revision number of the charm" + type = number + default = null +} + +variable "units" { + description = "Number of units to deploy" + type = number + default = 1 +} + +variable "config" { + description = "Application config. Details about available options can be found at https://charmhub.io/ceph-osd/configurations." + type = map(string) + default = {} +} + +variable "storage" { + description = "Storage configuration for this application." + type = map(string) + default = {} +} + +variable "constraints" { + description = "Juju constraints to apply for this application." + type = string + default = "arch=amd64" +} + +variable "model" { + description = "Reference to a `juju_model`." + type = string +} diff --git a/ceph-osd/terraform/versions.tf b/ceph-osd/terraform/versions.tf new file mode 100644 index 00000000..56d51db6 --- /dev/null +++ b/ceph-osd/terraform/versions.tf @@ -0,0 +1,13 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +terraform { + + required_version = ">= 1.6" + required_providers { + juju = { + source = "juju/juju" + version = "~> 0.14.0" + } + } +} diff --git a/tests/terraform/default.yaml b/tests/terraform/default.yaml new file mode 100644 index 00000000..5c3d0b18 --- /dev/null +++ b/tests/terraform/default.yaml @@ -0,0 +1,16 @@ +ceph-mon: + channel: quincy/stable + constraints: cores=2 mem=4G root-disk=16G + num_units: 1 + config: + monitor-count: 1 + expected-osd-count: 2 +ceph-osd: + channel: quincy/stable + constraints: cores=2 mem=4G root-disk=16G + num_units: 2 + storage: + [ + { type: "osd-devices", count: 1, size: "1G" }, + { type: "osd-journals", count: 1, size: "1G" }, + ] From 5d5f991a478ea08b8e250a8adc995da876639845 Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Tue, 18 Feb 2025 18:10:53 +0100 Subject: [PATCH 2677/2699] add applications module --- terraform/applications.tf | 31 +++++++++++++++++++++++++++++++ terraform/configs.tf | 14 ++++++++++++++ terraform/integrations.tf | 14 ++++++++++++++ terraform/outputs.tf | 12 ++++++++++++ terraform/variables.tf | 12 ++++++++++++ terraform/versions.tf | 12 ++++++++++++ tests/terraform/main.tf | 31 +++++++++++++++++++++++++++++++ 7 files changed, 126 insertions(+) create mode 100644 terraform/applications.tf create mode 100644 terraform/configs.tf create mode 100644 terraform/integrations.tf create mode 100644 terraform/outputs.tf create mode 100644 terraform/variables.tf create mode 100644 terraform/versions.tf create mode 100644 tests/terraform/main.tf diff --git a/terraform/applications.tf b/terraform/applications.tf new file mode 100644 index 00000000..0d612498 --- /dev/null +++ b/terraform/applications.tf @@ -0,0 +1,31 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +module "ceph_mon" { + source = "./ceph_mon" + + model = var.model + base = coalesce(module.ceph_mon_config.config.base, var.k8s.config.base) + constraints = coalesce(module.ceph_mon_config.config.constraints, var.k8s.config.constraints) + channel = coalesce(module.ceph_mon_config.config.channel, var.k8s.config.channel) + + config = coalesce(module.ceph_mon_config.config.config, {}) + resources = module.ceph_mon_config.config.resources + revision = module.ceph_mon_config.config.revision + units = module.ceph_mon_config.config.units +} + +module "ceph_osd" { + source = "./ceph_osd" + + model = var.model + base = coalesce(module.ceph_osd_config.config.base, var.k8s.config.base) + constraints = coalesce(module.ceph_osd_config.config.constraints, var.k8s.config.constraints) + channel = coalesce(module.ceph_osd_config.config.channel, var.k8s.config.channel) + + config = coalesce(module.ceph_osd_config.config.config, {}) + resources = module.ceph_osd_config.config.resources + storage = coalesce(module.ceph_osd_config.config.storage, {}) + revision = module.ceph_osd_config.config.revision + units = module.ceph_osd_config.config.units +} diff --git a/terraform/configs.tf b/terraform/configs.tf new file mode 100644 index 00000000..fd2899d1 --- /dev/null +++ b/terraform/configs.tf @@ -0,0 +1,14 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +module "ceph_mon_config" { + source = "../manifest" + manifest = var.manifest_yaml + app = "ceph_mon" +} + +module "ceph_osd_config" { + source = "../manifest" + manifest = var.manifest_yaml + app = "ceph_osd" +} diff --git a/terraform/integrations.tf b/terraform/integrations.tf new file mode 100644 index 00000000..03455214 --- /dev/null +++ b/terraform/integrations.tf @@ -0,0 +1,14 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +resource "juju_integration" "ceph_mon" { + model = var.model + application { + name = module.ceph_mon.app_name + endpoint = module.ceph_mon.provides.osd + } + application { + name = module.ceph_osd.app_name + endpoint = module.ceph_osd.requires.mon + } +} diff --git a/terraform/outputs.tf b/terraform/outputs.tf new file mode 100644 index 00000000..a3c831cd --- /dev/null +++ b/terraform/outputs.tf @@ -0,0 +1,12 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +output "ceph_mon" { + description = "Object of the ceph_mon application." + value = module.ceph_mon +} + +output "ceph_osd" { + description = "Object of the ceph_osd application." + value = module.ceph_osd +} diff --git a/terraform/variables.tf b/terraform/variables.tf new file mode 100644 index 00000000..60c709f0 --- /dev/null +++ b/terraform/variables.tf @@ -0,0 +1,12 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +variable "manifest_yaml" { + description = "Absolute path to the manifest yaml file for the charm configurations." + type = string +} + +variable "model" { + description = "Name of the Juju model to deploy to." + type = string +} diff --git a/terraform/versions.tf b/terraform/versions.tf new file mode 100644 index 00000000..e25f4383 --- /dev/null +++ b/terraform/versions.tf @@ -0,0 +1,12 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +terraform { + required_version = ">= 1.6" + required_providers { + juju = { + source = "juju/juju" + version = "~> 0.14.0" + } + } +} diff --git a/tests/terraform/main.tf b/tests/terraform/main.tf new file mode 100644 index 00000000..bc360716 --- /dev/null +++ b/tests/terraform/main.tf @@ -0,0 +1,31 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +terraform { + required_version = ">= 1.6" + required_providers { + juju = { + source = "juju/juju" + version = "~> 0.14.0" + } + } +} + +provider "juju" {} + +variable "manifest_yaml" { + description = "Path to the manifest YAML file" + type = string +} + +variable "model" { + description = "Name of the model to deploy to" + type = string + default = "my-canonical-k8s" +} + +module "ceph" { + source = "../../terraform" + model = var.model + manifest_yaml = var.manifest_yaml +} From 13cf6c95001e952843fe567d8514b2a39d7efaea Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Tue, 18 Feb 2025 18:12:35 +0100 Subject: [PATCH 2678/2699] fix github ci --- .github/workflows/plan-terraform.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/plan-terraform.yml b/.github/workflows/plan-terraform.yml index 1e357118..632762f2 100644 --- a/.github/workflows/plan-terraform.yml +++ b/.github/workflows/plan-terraform.yml @@ -21,6 +21,7 @@ jobs: env: TF_VAR_model: test TF_VAR_manifest_yaml: ${{ matrix.test.yaml }} + WORKING_DIR: 'terraform' steps: - uses: actions/checkout@v4 - name: Install Python @@ -48,12 +49,12 @@ jobs: } >> "$GITHUB_ENV" - uses: hashicorp/setup-terraform@v3 - run: terraform init - working-directory: ${{github.WORKSPACE}} + working-directory: ${{env.WORKING_DIR}} - run: terraform plan -out=tfplan - working-directory: ${{github.WORKSPACE}} + working-directory: ${{env.WORKING_DIR}} - run: terraform show tfplan - working-directory: ${{github.WORKSPACE}} + working-directory: ${{env.WORKING_DIR}} - uses: actions/upload-artifact@v4 with: name: ${{matrix.test.name}}-terraform-plan - path: ${{github.WORKSPACE}}/tfplan + path: ${{env.WORKING_DIR}}/tfplan From f75ae53d0aedf3ccd6a39470de52f2ec45535aff Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Tue, 18 Feb 2025 18:19:53 +0100 Subject: [PATCH 2679/2699] add manifest module --- terraform/configs.tf | 4 ++-- terraform/manifest/main.tf | 6 ++++++ terraform/manifest/outputs.tf | 16 ++++++++++++++++ terraform/manifest/variables.tf | 12 ++++++++++++ 4 files changed, 36 insertions(+), 2 deletions(-) create mode 100644 terraform/manifest/main.tf create mode 100644 terraform/manifest/outputs.tf create mode 100644 terraform/manifest/variables.tf diff --git a/terraform/configs.tf b/terraform/configs.tf index fd2899d1..13f50685 100644 --- a/terraform/configs.tf +++ b/terraform/configs.tf @@ -2,13 +2,13 @@ # See LICENSE file for licensing details. module "ceph_mon_config" { - source = "../manifest" + source = "./manifest" manifest = var.manifest_yaml app = "ceph_mon" } module "ceph_osd_config" { - source = "../manifest" + source = "./manifest" manifest = var.manifest_yaml app = "ceph_osd" } diff --git a/terraform/manifest/main.tf b/terraform/manifest/main.tf new file mode 100644 index 00000000..390769ed --- /dev/null +++ b/terraform/manifest/main.tf @@ -0,0 +1,6 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +locals { + yaml_data = lookup(yamldecode(file("${var.manifest}")), var.app, {}) +} diff --git a/terraform/manifest/outputs.tf b/terraform/manifest/outputs.tf new file mode 100644 index 00000000..09464e0f --- /dev/null +++ b/terraform/manifest/outputs.tf @@ -0,0 +1,16 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +output "config" { + value = { + app_name = lookup(local.yaml_data, "app_name", null) + base = lookup(local.yaml_data, "base", null) + channel = lookup(local.yaml_data, "channel", null) + config = lookup(local.yaml_data, "config", null) + constraints = lookup(local.yaml_data, "constraints", null) + resources = lookup(local.yaml_data, "resoruces", null) + revision = lookup(local.yaml_data, "revision", null) + units = lookup(local.yaml_data, "units", null) + storage = lookup(local.yaml_data, "storage", null) + } +} diff --git a/terraform/manifest/variables.tf b/terraform/manifest/variables.tf new file mode 100644 index 00000000..b7b3fc44 --- /dev/null +++ b/terraform/manifest/variables.tf @@ -0,0 +1,12 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +variable "manifest" { + description = "Absolute path to a yaml file with config for a Juju application." + type = string +} + +variable "app" { + description = "Name of the application to load config for." + type = string +} From 1fc49e140d08bc6e0d74a8d6041a0e81a3585ff0 Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Tue, 18 Feb 2025 18:28:10 +0100 Subject: [PATCH 2680/2699] Fix source path --- terraform/applications.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/terraform/applications.tf b/terraform/applications.tf index 0d612498..794e8fdd 100644 --- a/terraform/applications.tf +++ b/terraform/applications.tf @@ -2,7 +2,7 @@ # See LICENSE file for licensing details. module "ceph_mon" { - source = "./ceph_mon" + source = "../ceph-mon/terraform" model = var.model base = coalesce(module.ceph_mon_config.config.base, var.k8s.config.base) @@ -16,7 +16,7 @@ module "ceph_mon" { } module "ceph_osd" { - source = "./ceph_osd" + source = "../ceph-osd/terraform" model = var.model base = coalesce(module.ceph_osd_config.config.base, var.k8s.config.base) From 339345fda94c671e4d0984900b959fe27430773a Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Tue, 18 Feb 2025 18:33:25 +0100 Subject: [PATCH 2681/2699] Remove obsolete k8s code --- terraform/applications.tf | 18 +++++++++--------- tests/terraform/main.tf | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/terraform/applications.tf b/terraform/applications.tf index 794e8fdd..8e404961 100644 --- a/terraform/applications.tf +++ b/terraform/applications.tf @@ -5,11 +5,11 @@ module "ceph_mon" { source = "../ceph-mon/terraform" model = var.model - base = coalesce(module.ceph_mon_config.config.base, var.k8s.config.base) - constraints = coalesce(module.ceph_mon_config.config.constraints, var.k8s.config.constraints) - channel = coalesce(module.ceph_mon_config.config.channel, var.k8s.config.channel) + base = module.ceph_mon_config.config.base + constraints = module.ceph_mon_config.config.constraints + channel = module.ceph_mon_config.config.channel - config = coalesce(module.ceph_mon_config.config.config, {}) + config = module.ceph_mon_config.config.config resources = module.ceph_mon_config.config.resources revision = module.ceph_mon_config.config.revision units = module.ceph_mon_config.config.units @@ -19,13 +19,13 @@ module "ceph_osd" { source = "../ceph-osd/terraform" model = var.model - base = coalesce(module.ceph_osd_config.config.base, var.k8s.config.base) - constraints = coalesce(module.ceph_osd_config.config.constraints, var.k8s.config.constraints) - channel = coalesce(module.ceph_osd_config.config.channel, var.k8s.config.channel) + base = module.ceph_osd_config.config.base + constraints = module.ceph_osd_config.config.constraints + channel = module.ceph_osd_config.config.channel - config = coalesce(module.ceph_osd_config.config.config, {}) + config = module.ceph_osd_config.config.config resources = module.ceph_osd_config.config.resources - storage = coalesce(module.ceph_osd_config.config.storage, {}) + storage = module.ceph_osd_config.config.storage revision = module.ceph_osd_config.config.revision units = module.ceph_osd_config.config.units } diff --git a/tests/terraform/main.tf b/tests/terraform/main.tf index bc360716..a7d0a5ee 100644 --- a/tests/terraform/main.tf +++ b/tests/terraform/main.tf @@ -21,7 +21,7 @@ variable "manifest_yaml" { variable "model" { description = "Name of the model to deploy to" type = string - default = "my-canonical-k8s" + default = "ceph-model" } module "ceph" { From 86532469ee59d2020352df3434a359dec29761e7 Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Tue, 18 Feb 2025 18:39:38 +0100 Subject: [PATCH 2682/2699] Fix path --- .github/workflows/plan-terraform.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/plan-terraform.yml b/.github/workflows/plan-terraform.yml index 632762f2..8b602902 100644 --- a/.github/workflows/plan-terraform.yml +++ b/.github/workflows/plan-terraform.yml @@ -17,7 +17,7 @@ jobs: matrix: test: - name: default - yaml: test/default.yaml + yaml: ../tests/terraform/default.yaml env: TF_VAR_model: test TF_VAR_manifest_yaml: ${{ matrix.test.yaml }} From cc89c68a12fcf48816cd7243d1577d10e7ad3682 Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Fri, 21 Feb 2025 14:33:55 +0100 Subject: [PATCH 2683/2699] Add terraform apply step --- .github/workflows/plan-terraform.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/.github/workflows/plan-terraform.yml b/.github/workflows/plan-terraform.yml index 8b602902..de7edcd9 100644 --- a/.github/workflows/plan-terraform.yml +++ b/.github/workflows/plan-terraform.yml @@ -54,6 +54,20 @@ jobs: working-directory: ${{env.WORKING_DIR}} - run: terraform show tfplan working-directory: ${{env.WORKING_DIR}} + - run: | + terraform apply -auto-approve + for i in {1..30}; do + if juju status --format=json | jq -e '.applications | all(.status.current == "active")' >/dev/null; then + echo "All applications are active" + exit 0 + fi + juju status + echo "Waiting for applications to become active..." + sleep 10 + done + echo "Timeout waiting for applications to become active" + exit 1 + working-directory: ${{env.WORKING_DIR}} - uses: actions/upload-artifact@v4 with: name: ${{matrix.test.name}}-terraform-plan From c658bb1ba27402e7e200addafaf6d5f876077623 Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Fri, 21 Feb 2025 14:40:35 +0100 Subject: [PATCH 2684/2699] add model --- .github/workflows/plan-terraform.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/plan-terraform.yml b/.github/workflows/plan-terraform.yml index de7edcd9..6cee19dc 100644 --- a/.github/workflows/plan-terraform.yml +++ b/.github/workflows/plan-terraform.yml @@ -55,6 +55,7 @@ jobs: - run: terraform show tfplan working-directory: ${{env.WORKING_DIR}} - run: | + juju add-model test terraform apply -auto-approve for i in {1..30}; do if juju status --format=json | jq -e '.applications | all(.status.current == "active")' >/dev/null; then From 36fb7514f82831a6fabebd752ff08a541e6ee2a1 Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Fri, 21 Feb 2025 14:52:16 +0100 Subject: [PATCH 2685/2699] fix example --- ceph-mon/terraform/README.md | 14 ++++++-------- ceph-osd/terraform/README.md | 14 ++++++-------- tests/terraform/default.yaml | 18 ++++++++---------- 3 files changed, 20 insertions(+), 26 deletions(-) diff --git a/ceph-mon/terraform/README.md b/ceph-mon/terraform/README.md index 2f617ef5..88248606 100644 --- a/ceph-mon/terraform/README.md +++ b/ceph-mon/terraform/README.md @@ -76,20 +76,18 @@ a single manifest file similar to the one below: ``` yaml ceph_mon: channel: quincy/stable - constraints: cores=2 mem=4G root-disk=16G - num_units: 1 + constraints: arch=amd64 cores=2 mem=8192M root-disk=16384M virt-type=virtual-machine + units: 1 config: monitor-count: 1 expected-osd-count: 2 ceph_osd: channel: quincy/stable - constraints: cores=2 mem=4G root-disk=16G - num_units: 2 + constraints: arch=amd64 cores=2 mem=8192M root-disk=16384M virt-type=virtual-machine + units: 2 storage: - [ - { type: "osd-devices", count: 1, size: "1G" }, - { type: "osd-journals", count: 1, size: "1G" }, - ] + osd-devices: 1G,1 + osd-journals: 1G,1 ``` Using the terraform in the above section, the `units`, `base`, `constraints`, and `channel` diff --git a/ceph-osd/terraform/README.md b/ceph-osd/terraform/README.md index 5edc649b..fe5edbe4 100644 --- a/ceph-osd/terraform/README.md +++ b/ceph-osd/terraform/README.md @@ -76,20 +76,18 @@ a single manifest file similar to the one below: ``` yaml ceph_mon: channel: quincy/stable - constraints: cores=2 mem=4G root-disk=16G - num_units: 1 + constraints: arch=amd64 cores=2 mem=8192M root-disk=16384M virt-type=virtual-machine + units: 1 config: monitor-count: 1 expected-osd-count: 2 ceph_osd: channel: quincy/stable - constraints: cores=2 mem=4G root-disk=16G - num_units: 2 + constraints: arch=amd64 cores=2 mem=8192M root-disk=16384M virt-type=virtual-machine + units: 2 storage: - [ - { type: "osd-devices", count: 1, size: "1G" }, - { type: "osd-journals", count: 1, size: "1G" }, - ] + osd-devices: 1G,1 + osd-journals: 1G,1 ``` Using the terraform in the above section, the `units`, `base`, `constraints`, and `channel` diff --git a/tests/terraform/default.yaml b/tests/terraform/default.yaml index 5c3d0b18..3e4af66f 100644 --- a/tests/terraform/default.yaml +++ b/tests/terraform/default.yaml @@ -1,16 +1,14 @@ -ceph-mon: +ceph_mon: channel: quincy/stable - constraints: cores=2 mem=4G root-disk=16G - num_units: 1 + constraints: arch=amd64 cores=2 mem=8192M root-disk=16384M virt-type=virtual-machine + units: 1 config: monitor-count: 1 expected-osd-count: 2 -ceph-osd: +ceph_osd: channel: quincy/stable - constraints: cores=2 mem=4G root-disk=16G - num_units: 2 + constraints: arch=amd64 cores=2 mem=8192M root-disk=16384M virt-type=virtual-machine + units: 2 storage: - [ - { type: "osd-devices", count: 1, size: "1G" }, - { type: "osd-journals", count: 1, size: "1G" }, - ] + osd-devices: 1G,1 + osd-journals: 1G,1 From 8385e1f7dba97d0e3b790a67960b6ae737e951e7 Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Fri, 21 Feb 2025 15:19:20 +0100 Subject: [PATCH 2686/2699] debug --- .github/workflows/plan-terraform.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/plan-terraform.yml b/.github/workflows/plan-terraform.yml index 6cee19dc..c2667ac1 100644 --- a/.github/workflows/plan-terraform.yml +++ b/.github/workflows/plan-terraform.yml @@ -58,7 +58,7 @@ jobs: juju add-model test terraform apply -auto-approve for i in {1..30}; do - if juju status --format=json | jq -e '.applications | all(.status.current == "active")' >/dev/null; then + if juju status --format=json | jq -e '.applications | all(.status.current == "active")'; then echo "All applications are active" exit 0 fi From 227be8d9b664b3ae671755677d0fd331b6761443 Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Fri, 21 Feb 2025 15:31:40 +0100 Subject: [PATCH 2687/2699] debug --- .github/workflows/plan-terraform.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/plan-terraform.yml b/.github/workflows/plan-terraform.yml index c2667ac1..43a69150 100644 --- a/.github/workflows/plan-terraform.yml +++ b/.github/workflows/plan-terraform.yml @@ -62,7 +62,7 @@ jobs: echo "All applications are active" exit 0 fi - juju status + juju status --format=json | jq -e '.applications echo "Waiting for applications to become active..." sleep 10 done From 32484239d5ac12c6ef12cb9f20a5507bd4227612 Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Fri, 21 Feb 2025 15:40:04 +0100 Subject: [PATCH 2688/2699] debug --- .github/workflows/plan-terraform.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/plan-terraform.yml b/.github/workflows/plan-terraform.yml index 43a69150..74e1fb48 100644 --- a/.github/workflows/plan-terraform.yml +++ b/.github/workflows/plan-terraform.yml @@ -62,7 +62,7 @@ jobs: echo "All applications are active" exit 0 fi - juju status --format=json | jq -e '.applications + juju status --format=json | jq -e '.applications' echo "Waiting for applications to become active..." sleep 10 done From 51a77c19dbd1d84c476f7c0249d5a35e020e69f9 Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Fri, 21 Feb 2025 15:55:31 +0100 Subject: [PATCH 2689/2699] fix test --- .github/workflows/plan-terraform.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/plan-terraform.yml b/.github/workflows/plan-terraform.yml index 74e1fb48..647fc26d 100644 --- a/.github/workflows/plan-terraform.yml +++ b/.github/workflows/plan-terraform.yml @@ -58,7 +58,7 @@ jobs: juju add-model test terraform apply -auto-approve for i in {1..30}; do - if juju status --format=json | jq -e '.applications | all(.status.current == "active")'; then + if juju status --format=json | jq -e '.applications | all(.application-status.current == "active")'; then echo "All applications are active" exit 0 fi From cf208b8d1e66e28b4c0d3eaacbb49c08deea7454 Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Fri, 21 Feb 2025 17:17:12 +0100 Subject: [PATCH 2690/2699] fix test --- .github/workflows/plan-terraform.yml | 29 ++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/.github/workflows/plan-terraform.yml b/.github/workflows/plan-terraform.yml index 647fc26d..ae93e64b 100644 --- a/.github/workflows/plan-terraform.yml +++ b/.github/workflows/plan-terraform.yml @@ -56,17 +56,30 @@ jobs: working-directory: ${{env.WORKING_DIR}} - run: | juju add-model test - terraform apply -auto-approve - for i in {1..30}; do - if juju status --format=json | jq -e '.applications | all(.application-status.current == "active")'; then - echo "All applications are active" - exit 0 + set -e # Exit on error + + # Apply Terraform changes + terraform apply -auto-approve || { echo "Terraform apply failed"; exit 1; } + + # Wait for Juju applications to become active + MAX_RETRIES=30 + for i in $(seq 1 $MAX_RETRIES); do + echo "Checking Juju application statuses... Attempt $i/$MAX_RETRIES" + + # Fetch status JSON once and store it + STATUS_JSON=$(juju status --format=json) + + # Check if all applications are active + if echo "$STATUS_JSON" | jq -e '.applications | all(.["application-status"].current == "active")' > /dev/null; then + echo "✅ All applications are active" + exit 0 fi - juju status --format=json | jq -e '.applications' - echo "Waiting for applications to become active..." + + echo "ⳠWaiting for applications to become active..." sleep 10 done - echo "Timeout waiting for applications to become active" + + echo "⌠Timeout waiting for applications to become active" exit 1 working-directory: ${{env.WORKING_DIR}} - uses: actions/upload-artifact@v4 From c5ee31e50952097e1dc613e483bb69e3e45cb6cb Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Tue, 25 Feb 2025 16:15:11 -0600 Subject: [PATCH 2691/2699] Support wider range of juju providers, fix storage issue --- ceph-mon/terraform/versions.tf | 2 +- ceph-osd/terraform/versions.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ceph-mon/terraform/versions.tf b/ceph-mon/terraform/versions.tf index f076a31e..f5324296 100644 --- a/ceph-mon/terraform/versions.tf +++ b/ceph-mon/terraform/versions.tf @@ -6,7 +6,7 @@ terraform { required_providers { juju = { source = "juju/juju" - version = "~> 0.14.0" + version = ">= 0.14.0, < 1.0.0" } } } diff --git a/ceph-osd/terraform/versions.tf b/ceph-osd/terraform/versions.tf index 56d51db6..650fa445 100644 --- a/ceph-osd/terraform/versions.tf +++ b/ceph-osd/terraform/versions.tf @@ -7,7 +7,7 @@ terraform { required_providers { juju = { source = "juju/juju" - version = "~> 0.14.0" + version = ">= 0.14.0, < 1.0.0" } } } From e9b6a2311a01068c897a0bfa2b4aafa40eaed0a4 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 3 Mar 2025 12:57:31 +0100 Subject: [PATCH 2692/2699] Add CLA and Signed-off-by checks (#40) Signed-off-by: Peter Sabaini --- .github/workflows/commits.yml | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 .github/workflows/commits.yml diff --git a/.github/workflows/commits.yml b/.github/workflows/commits.yml new file mode 100644 index 00000000..74adde12 --- /dev/null +++ b/.github/workflows/commits.yml @@ -0,0 +1,33 @@ +name: Commits +on: + - pull_request + +permissions: + contents: read + +jobs: + cla-check: + permissions: + pull-requests: read + name: Canonical CLA signed + runs-on: ubuntu-20.04 + steps: + - name: Check if CLA signed + uses: canonical/has-signed-canonical-cla@v2 + + dco-check: + permissions: + pull-requests: read # for tim-actions/get-pr-commits to get list of commits from the PR + name: Signed-off-by (DCO) + runs-on: ubuntu-20.04 + steps: + - name: Get PR Commits + id: 'get-pr-commits' + uses: tim-actions/get-pr-commits@master + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Check that all commits are signed-off + uses: tim-actions/dco@master + with: + commits: ${{ steps.get-pr-commits.outputs.commits }} From b999b43f940d3d16a30de9331c2c809c85b743eb Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Tue, 4 Mar 2025 10:09:51 +0100 Subject: [PATCH 2693/2699] Adjust charmcraft.yaml Signed-off-by: Peter Sabaini --- ceph-dashboard/charmcraft.yaml | 27 +++++++++++---------- ceph-fs/charmcraft.yaml | 4 ++-- ceph-iscsi/charmcraft.yaml | 30 +++++++++++------------ ceph-mon/charmcraft.yaml | 38 +++++++++++++---------------- ceph-nfs/charmcraft.yaml | 33 ++++++++++++-------------- ceph-osd/charmcraft.yaml | 42 +++++++++++---------------------- ceph-proxy/charmcraft.yaml | 24 +++++++++++-------- ceph-radosgw/charmcraft.yaml | 42 +++++++++++---------------------- ceph-rbd-mirror/charmcraft.yaml | 27 +++++++++++---------- 9 files changed, 118 insertions(+), 149 deletions(-) diff --git a/ceph-dashboard/charmcraft.yaml b/ceph-dashboard/charmcraft.yaml index bea920df..0fbd75be 100644 --- a/ceph-dashboard/charmcraft.yaml +++ b/ceph-dashboard/charmcraft.yaml @@ -19,16 +19,17 @@ parts: apt install -y ca-certificates update-ca-certificates -bases: - - build-on: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 - run-on: - - name: ubuntu - channel: "20.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "22.04" - architectures: [amd64, s390x, ppc64el, arm64] +base: ubuntu@22.04 +platforms: + amd64: + build-on: amd64 + build-for: amd64 + arm64: + build-on: arm64 + build-for: arm64 + ppc64el: + build-on: ppc64el + build-for: ppc64el + s390x: + build-on: s390x + build-for: s390x diff --git a/ceph-fs/charmcraft.yaml b/ceph-fs/charmcraft.yaml index 00963764..326d045b 100644 --- a/ceph-fs/charmcraft.yaml +++ b/ceph-fs/charmcraft.yaml @@ -18,8 +18,8 @@ parts: - CHARM_INTERFACES_DIR: $CRAFT_PROJECT_DIR/interfaces/ - CHARM_LAYERS_DIR: $CRAFT_PROJECT_DIR/layers/ -base: ubuntu@24.04 -build-base: ubuntu@24.04 +base: ubuntu@22.04 +build-base: ubuntu@22.04 platforms: amd64: build-on: amd64 diff --git a/ceph-iscsi/charmcraft.yaml b/ceph-iscsi/charmcraft.yaml index 2edbf324..6eca8b72 100644 --- a/ceph-iscsi/charmcraft.yaml +++ b/ceph-iscsi/charmcraft.yaml @@ -20,19 +20,17 @@ parts: apt install -y ca-certificates update-ca-certificates -bases: - - build-on: - - name: ubuntu - channel: "22.04" - architectures: - - amd64 - run-on: - - name: ubuntu - channel: "20.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "22.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "22.10" - architectures: [amd64, s390x, ppc64el, arm64] +base: ubuntu@22.04 +platforms: + amd64: + build-on: amd64 + build-for: amd64 + arm64: + build-on: arm64 + build-for: arm64 + ppc64el: + build-on: ppc64el + build-for: ppc64el + s390x: + build-on: s390x + build-for: s390x diff --git a/ceph-mon/charmcraft.yaml b/ceph-mon/charmcraft.yaml index a58f4124..e36a47bf 100644 --- a/ceph-mon/charmcraft.yaml +++ b/ceph-mon/charmcraft.yaml @@ -2,18 +2,13 @@ type: charm parts: charm: - prime: - - actions/* - - lib/* - - templates/* - - files/* after: - - update-certificates + - update-certificates charm-python-packages: # Use the updated version of setuptools (needed by jinja2). - - setuptools + - setuptools build-packages: - - git + - git update-certificates: # Ensure that certificates in the base image are up-to-date. @@ -23,16 +18,17 @@ parts: apt install -y ca-certificates update-ca-certificates -bases: - - build-on: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 - run-on: - - name: ubuntu - channel: "20.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "22.04" - architectures: [amd64, s390x, ppc64el, arm64] +base: ubuntu@22.04 +platforms: + amd64: + build-on: amd64 + build-for: amd64 + arm64: + build-on: arm64 + build-for: arm64 + ppc64el: + build-on: ppc64el + build-for: ppc64el + s390x: + build-on: s390x + build-for: s390x diff --git a/ceph-nfs/charmcraft.yaml b/ceph-nfs/charmcraft.yaml index f87b7111..fe0f4b66 100644 --- a/ceph-nfs/charmcraft.yaml +++ b/ceph-nfs/charmcraft.yaml @@ -4,11 +4,6 @@ parts: charm: after: - update-certificates - charm-python-packages: - # NOTE(lourot): see - # * https://github.com/canonical/charmcraft/issues/551 - # * https://github.com/canonical/charmcraft/issues/632 - - setuptools < 58 build-packages: - git @@ -20,16 +15,18 @@ parts: apt install -y ca-certificates update-ca-certificates -bases: - - build-on: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 - run-on: - - name: ubuntu - channel: "20.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "22.04" - architectures: [amd64, s390x, ppc64el, arm64] +base: ubuntu@22.04 +build-base: ubuntu@24.04 +platforms: + amd64: + build-on: amd64 + build-for: amd64 + arm64: + build-on: arm64 + build-for: arm64 + s390x: + build-on: s390x + build-for: s390x + ppc64el: + build-on: ppc64el + build-for: ppc64el diff --git a/ceph-osd/charmcraft.yaml b/ceph-osd/charmcraft.yaml index 4190b63d..20675a9e 100644 --- a/ceph-osd/charmcraft.yaml +++ b/ceph-osd/charmcraft.yaml @@ -4,32 +4,18 @@ parts: charm: plugin: dump source: . - prime: - - actions/* - - files/* - - hooks/* - - lib/* - - templates/* - - actions.yaml - - config.yaml - - copyright - - hardening.yaml - - icon.svg - - LICENSE - - Makefile - - metadata.yaml - - README.md -bases: - - build-on: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 - run-on: - - name: ubuntu - channel: "20.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "22.04" - architectures: [amd64, s390x, ppc64el, arm64] +base: ubuntu@22.04 +platforms: + amd64: + build-on: amd64 + build-for: amd64 + arm64: + build-on: arm64 + build-for: arm64 + ppc64el: + build-on: ppc64el + build-for: ppc64el + s390x: + build-on: s390x + build-for: s390x diff --git a/ceph-proxy/charmcraft.yaml b/ceph-proxy/charmcraft.yaml index 09f03428..daa66338 100644 --- a/ceph-proxy/charmcraft.yaml +++ b/ceph-proxy/charmcraft.yaml @@ -21,13 +21,17 @@ parts: - metadata.yaml - README.md -bases: - - build-on: - - name: ubuntu - channel: "22.04" - architectures: - - amd64 - run-on: - - name: ubuntu - channel: "22.04" - architectures: [amd64, s390x, ppc64el, arm64] +base: ubuntu@22.04 +platforms: + amd64: + build-on: amd64 + build-for: amd64 + arm64: + build-on: arm64 + build-for: arm64 + ppc64el: + build-on: ppc64el + build-for: ppc64el + s390x: + build-on: s390x + build-for: s390x diff --git a/ceph-radosgw/charmcraft.yaml b/ceph-radosgw/charmcraft.yaml index e54ca1af..20675a9e 100644 --- a/ceph-radosgw/charmcraft.yaml +++ b/ceph-radosgw/charmcraft.yaml @@ -4,32 +4,18 @@ parts: charm: plugin: dump source: . - prime: - - actions/* - - files/* - - hooks/* - - lib/* - - templates/* - - actions.yaml - - config.yaml - - copyright - - hardening.yaml - - icon.svg - - LICENSE - - Makefile - - metadata.yaml - - README.md -bases: - - build-on: - - name: ubuntu - channel: "22.04" - architectures: - - amd64 - run-on: - - name: ubuntu - channel: "20.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "22.04" - architectures: [amd64, s390x, ppc64el, arm64] +base: ubuntu@22.04 +platforms: + amd64: + build-on: amd64 + build-for: amd64 + arm64: + build-on: arm64 + build-for: arm64 + ppc64el: + build-on: ppc64el + build-for: ppc64el + s390x: + build-on: s390x + build-for: s390x diff --git a/ceph-rbd-mirror/charmcraft.yaml b/ceph-rbd-mirror/charmcraft.yaml index 366bcbe0..6abcaad5 100644 --- a/ceph-rbd-mirror/charmcraft.yaml +++ b/ceph-rbd-mirror/charmcraft.yaml @@ -14,16 +14,17 @@ parts: - CHARM_INTERFACES_DIR: /root/project/interfaces/ - CHARM_LAYERS_DIR: /root/project/layers/ -bases: - - build-on: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 - run-on: - - name: ubuntu - channel: "20.04" - architectures: [amd64, s390x, ppc64el, arm64] - - name: ubuntu - channel: "22.04" - architectures: [amd64, s390x, ppc64el, arm64] +base: ubuntu@22.04 +platforms: + amd64: + build-on: amd64 + build-for: amd64 + arm64: + build-on: arm64 + build-for: arm64 + ppc64el: + build-on: ppc64el + build-for: ppc64el + s390x: + build-on: s390x + build-for: s390x From 34e4868f52f34eabfcf6473b05d167fda09c18ad Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 7 Mar 2025 11:52:07 +0100 Subject: [PATCH 2694/2699] ceph-iscsi: migration updates Signed-off-by: Peter Sabaini --- ceph-iscsi/README.md | 10 +- ceph-iscsi/build-requirements.txt | 7 ++ ceph-iscsi/rename.sh | 1 + ceph-iscsi/tests/bundles/focal-ec.yaml | 100 ------------------ ceph-iscsi/tests/bundles/focal.yaml | 97 ----------------- ceph-iscsi/tests/target.py | 39 ++++--- ceph-iscsi/tests/tests.yaml | 17 ++- ceph-iscsi/tox.ini | 56 ++++++---- .../unit_tests/test_ceph_iscsi_charm.py | 2 +- .../test_interface_ceph_iscsi_peer.py | 4 +- 10 files changed, 84 insertions(+), 249 deletions(-) create mode 100644 ceph-iscsi/build-requirements.txt delete mode 100644 ceph-iscsi/tests/bundles/focal-ec.yaml delete mode 100644 ceph-iscsi/tests/bundles/focal.yaml diff --git a/ceph-iscsi/README.md b/ceph-iscsi/README.md index 4dc4ff75..b2970374 100644 --- a/ceph-iscsi/README.md +++ b/ceph-iscsi/README.md @@ -4,7 +4,15 @@ The ceph-iscsi charm deploys the [Ceph iSCSI gateway service][ceph-iscsi-upstream]. The charm is intended to be used in conjunction with the [ceph-osd][ceph-osd-charm] and [ceph-mon][ceph-mon-charm] charms. -**NOTE**: This charm is deprecated. No new features will be introduced. +# Notice for developers +The functional tests are expected to fail for this charm. This is because the +kernel module needed for the charm to function properly (iscsi_tcp) seems to +no longer be available on the virtual machines that are deployed by default +on LXD. + +In order to debug this issue, developers should install the `linux-modules` and +`linux-modules-extra` packages that are native on the running kernel, and then +load the module and test that it's running (`modprobe iscsi_tcp` and `lsmod | grep iscsi`). # Usage diff --git a/ceph-iscsi/build-requirements.txt b/ceph-iscsi/build-requirements.txt new file mode 100644 index 00000000..b6d2452f --- /dev/null +++ b/ceph-iscsi/build-requirements.txt @@ -0,0 +1,7 @@ +# NOTES(lourot): +# * We don't install charmcraft via pip anymore because it anyway spins up a +# container and scp the system's charmcraft snap inside it. So the charmcraft +# snap is necessary on the system anyway. +# * `tox -e build` successfully validated with charmcraft 1.2.1 + +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. diff --git a/ceph-iscsi/rename.sh b/ceph-iscsi/rename.sh index d0c35c97..528d20f8 100755 --- a/ceph-iscsi/rename.sh +++ b/ceph-iscsi/rename.sh @@ -11,3 +11,4 @@ then fi echo "Renaming charm here." mv ${charm}_*.charm ${charm}.charm +cp ${charm}.charm ../ diff --git a/ceph-iscsi/tests/bundles/focal-ec.yaml b/ceph-iscsi/tests/bundles/focal-ec.yaml deleted file mode 100644 index d72f97a1..00000000 --- a/ceph-iscsi/tests/bundles/focal-ec.yaml +++ /dev/null @@ -1,100 +0,0 @@ -variables: - source: &source cloud:focal-yoga - -local_overlay_enabled: False -series: focal -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - constraints: mem=3072M - '9': - constraints: mem=3072M - '10': - constraints: mem=3072M - '11': - '12': - '13': - '14': - '15': -applications: - ubuntu: - charm: cs:ubuntu - num_units: 3 - to: - - '7' - - '14' - - '15' - ceph-iscsi: - charm: ../../ceph-iscsi.charm - num_units: 2 - options: - gateway-metadata-pool: iscsi-foo-metadata - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - source: *source - to: - - '0' - - '1' - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *source - to: - - '0' - - '1' - - '2' - - '11' - - '12' - - '13' - channel: quincy/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *source - to: - - '3' - - '4' - - '5' - channel: quincy/edge - vault: - num_units: 1 - charm: ch:vault - to: - - '6' - channel: 1.7/edge - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '8' - - '9' - - '10' - channel: 8.0.19/edge - vault-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge -relations: - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/bundles/focal.yaml b/ceph-iscsi/tests/bundles/focal.yaml deleted file mode 100644 index cab5a7d8..00000000 --- a/ceph-iscsi/tests/bundles/focal.yaml +++ /dev/null @@ -1,97 +0,0 @@ -variables: - source: &source cloud:focal-yoga - -local_overlay_enabled: False -series: focal -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - constraints: mem=3072M - '9': - constraints: mem=3072M - '10': - constraints: mem=3072M - '11': - '12': - '13': - '14': - '15': -applications: - ubuntu: - charm: cs:ubuntu - num_units: 3 - to: - - '7' - - '14' - - '15' - ceph-iscsi: - charm: ../../ceph-iscsi.charm - num_units: 2 - options: - gateway-metadata-pool: iscsi-foo-metadata - source: *source - to: - - '0' - - '1' - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *source - to: - - '0' - - '1' - - '2' - - '11' - - '12' - - '13' - channel: quincy/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *source - to: - - '3' - - '4' - - '5' - channel: quincy/edge - vault: - num_units: 1 - charm: ch:vault - to: - - '6' - channel: 1.7/edge - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '8' - - '9' - - '10' - channel: 8.0.19/edge - vault-mysql-router: - charm: ch:mysql-router - channel: 8.0.19/edge -relations: - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/target.py b/ceph-iscsi/tests/target.py index ad3b57b1..7dde7029 100644 --- a/ceph-iscsi/tests/target.py +++ b/ceph-iscsi/tests/target.py @@ -12,27 +12,32 @@ # See the License for the specific language governing permissions and # limitations under the License. + """Encapsulating `ceph-iscsi` testing.""" import logging import tempfile import zaza +import zaza.model import zaza.openstack.charm_tests.test_utils as test_utils import zaza.openstack.utilities.generic as generic_utils -class SimpleISCSITest(test_utils.BaseCharmTest): - - def test_pause_resume(self): - """Test pausing and resuming a unit.""" - with self.pause_resume( - ['rbd-target-api', 'rbd-target-gw'], - pgrep_full=True): - logging.info("Testing pause resume") +def basic_guest_setup(): + """Run basic setup for iscsi guest.""" + for unit in zaza.model.get_units('ceph-osd'): + setup_cmds = [ + "apt install --yes open-iscsi multipath-tools", + "systemctl start iscsi", + "systemctl start iscsid"] + for cmd in setup_cmds: + zaza.model.run_on_unit( + unit.entity_id, + cmd) -class CephISCSIGatewayTest(SimpleISCSITest): +class CephISCSIGatewayTest(test_utils.BaseCharmTest): """Class for `ceph-iscsi` tests.""" GW_IQN = "iqn.2003-03.com.canonical.iscsi-gw:iscsi-igw" @@ -76,7 +81,7 @@ def get_base_ctxt(self): gw_units = zaza.model.get_units('ceph-iscsi') host_names = generic_utils.get_unit_hostnames(gw_units, fqdn=True) client_entity_ids = [ - u.entity_id for u in zaza.model.get_units('ubuntu')] + u.entity_id for u in zaza.model.get_units('ceph-osd')] ctxt = { 'client_entity_ids': sorted(client_entity_ids), 'gw_iqn': self.GW_IQN, @@ -248,10 +253,6 @@ def create_ec_data_pool(self): action_params={ 'name': self.EC_METADATA_POOL})) - def refresh_partitions(self, ctxt): - """Refresh kernel partition tables in client.""" - self.run_commands(ctxt['client_entity_id'], ('partprobe', ), ctxt) - def run_client_checks(self, test_ctxt): """Check access to mulipath device. @@ -264,13 +265,10 @@ def run_client_checks(self, test_ctxt): """ self.create_iscsi_target(test_ctxt) self.login_iscsi_target(test_ctxt) - self.refresh_partitions(test_ctxt) self.check_client_device(test_ctxt, init_client=True) self.logout_iscsi_targets(test_ctxt) self.login_iscsi_target(test_ctxt) - self.refresh_partitions(test_ctxt) self.check_client_device(test_ctxt, init_client=False) - self.refresh_partitions(test_ctxt) def test_create_and_mount_volume(self): """Test creating a target and mounting it on a client.""" @@ -319,3 +317,10 @@ def test_create_and_mount_volume_default_pool(self): 'img_size': '3G', 'img_name': 'disk_default_1'}) self.run_client_checks(ctxt) + + def test_pause_resume(self): + """Test pausing and resuming a unit.""" + with self.pause_resume( + ['rbd-target-api', 'rbd-target-gw'], + pgrep_full=True): + logging.info("Testing pause resume") diff --git a/ceph-iscsi/tests/tests.yaml b/ceph-iscsi/tests/tests.yaml index ef6812e6..d5408139 100644 --- a/ceph-iscsi/tests/tests.yaml +++ b/ceph-iscsi/tests/tests.yaml @@ -1,20 +1,17 @@ charm_name: ceph-iscsi + gate_bundles: - jammy-caracal + smoke_bundles: - jammy-caracal + dev_bundles: - jammy-caracal + configure: - - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation + - tests.target.basic_guest_setup tests: - zaza.charm_tests.lifecycle.refresh.CharmRefreshAll - - tests.target.SimpleISCSITest - - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation -target_deploy_status: - ubuntu: - workload-status: active - workload-status-message-prefix: '' - vault: - workload-status: blocked - workload-status-message-prefix: Vault needs to be initialized + - tests.target.CephISCSIGatewayTest + diff --git a/ceph-iscsi/tox.ini b/ceph-iscsi/tox.ini index 23c8fab8..89e845ee 100644 --- a/ceph-iscsi/tox.ini +++ b/ceph-iscsi/tox.ini @@ -1,5 +1,12 @@ -# Operator charm (with zaza): tox.ini - +# Classic charm (with zaza): ./tox.ini +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. [tox] envlist = pep8,py3 skipsdist = True @@ -8,20 +15,21 @@ sitepackages = False # NOTE: Avoid false positives by not skipping missing interpreters. skip_missing_interpreters = False +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.18.0 + [testenv] -# We use tox mainly for virtual environment management for test requirements -# and do not install the charm code as a Python package into that environment. -# Ref: https://tox.wiki/en/latest/config.html#skip_install -skip_install = True setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 TEST_JUJU3=1 CHARM_DIR={envdir} + CHARMS_ARTIFACT_DIR={toxinidir}/.. + TEST_MODEL_SETTINGS = automatically-retry-hooks=true + TEST_MAX_RESOLVE_COUNT = 5 +install_command = + pip install {opts} {packages} commands = stestr run --slowest {posargs} allowlist_externals = - git - add-to-archive.py - bash charmcraft {toxinidir}/rename.sh passenv = @@ -32,6 +40,14 @@ passenv = TEST_* deps = -r{toxinidir}/test-requirements.txt +[testenv:build] +basepython = python3 +deps = -r{toxinidir}/build-requirements.txt +commands = + charmcraft clean + charmcraft -v pack + {toxinidir}/rename.sh + [testenv:py36] basepython = python3.6 deps = -r{toxinidir}/requirements.txt @@ -62,6 +78,11 @@ basepython = python3.11 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py312] +basepython = python3.12 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt @@ -69,9 +90,9 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} src unit_tests tests +deps = flake8 + charm-tools +commands = flake8 {posargs} unit_tests tests src [testenv:cover] # Technique based heavily upon @@ -105,13 +126,6 @@ omit = basepython = python3 commands = {posargs} -[testenv:build] -basepython = python3 -commands = - charmcraft clean - charmcraft -v pack - {toxinidir}/rename.sh - [testenv:func-noop] basepython = python3 commands = @@ -138,5 +152,5 @@ commands = functest-run-suite --keep-model --bundle {posargs} [flake8] -# Ignore E902 because the unit_tests directory is missing in the built charm. -ignore = E402,E226,E902,W504 +ignore = E402,E226,W503,W504 +exclude = */charmhelpers diff --git a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py index 99f08af5..e9ee7e8e 100644 --- a/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py +++ b/ceph-iscsi/unit_tests/test_ceph_iscsi_charm.py @@ -23,7 +23,7 @@ sys.path.append('lib') # noqa sys.path.append('src') # noqa -from mock import call, patch, MagicMock, ANY, Mock +from unittest.mock import call, patch, MagicMock, ANY, Mock from ops.testing import Harness, _TestingModelBackend from ops.model import ( diff --git a/ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py b/ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py index db00964a..a6acb942 100644 --- a/ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py +++ b/ceph-iscsi/unit_tests/test_interface_ceph_iscsi_peer.py @@ -8,8 +8,8 @@ import interface_ceph_iscsi_peer -from unittest import mock -from mock import PropertyMock +import unittest.mock as mock +from unittest.mock import PropertyMock from ops import framework from ops.testing import Harness From 857cee5b75dabf50d34b346c2e3404c335d1d3e1 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Fri, 7 Mar 2025 12:15:03 +0100 Subject: [PATCH 2695/2699] Migration updates Signed-off-by: Peter Sabaini --- .github/workflows/build-and-test.yml | 2 +- ceph-radosgw/tox.ini | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index eff3a575..f4193307 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -146,7 +146,7 @@ jobs: run: | date ~/actionutils.sh setup_functest - ~/actionutils.sh cacheimgs "ubuntu@24.04" + ~/actionutils.sh cacheimgs "ubuntu@22.04" - name: Run the tests run: | diff --git a/ceph-radosgw/tox.ini b/ceph-radosgw/tox.ini index 8d07014f..f555757a 100644 --- a/ceph-radosgw/tox.ini +++ b/ceph-radosgw/tox.ini @@ -36,7 +36,6 @@ passenv = OS_* TEST_* deps = - -c {env:TEST_CONSTRAINTS_FILE:https://raw.githubusercontent.com/openstack-charmers/zaza-openstack-tests/master/constraints/constraints-2024.1.txt} -c {toxinidir}/../constraints/test-constraints.txt -r{toxinidir}/test-requirements.txt From e3239b7c7b9a4c4d94b21ad8da87f3b1f71eb955 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 10 Mar 2025 10:01:31 +0100 Subject: [PATCH 2696/2699] ceph-iscsi: update bundles Signed-off-by: Peter Sabaini --- .../tests/bundles/jammy-caracal-ec.yaml | 101 ----------------- ceph-iscsi/tests/bundles/jammy-caracal.yaml | 90 +++++----------- ceph-iscsi/tests/bundles/jammy-ec.yaml | 101 ----------------- ceph-iscsi/tests/bundles/jammy-reef-ec.yaml | 101 ----------------- ceph-iscsi/tests/bundles/jammy-reef.yaml | 102 ------------------ ceph-iscsi/tests/bundles/jammy.yaml | 102 ------------------ ceph-iscsi/tests/bundles/lunar-ec.yaml | 100 ----------------- ceph-iscsi/tests/bundles/lunar.yaml | 101 ----------------- 8 files changed, 24 insertions(+), 774 deletions(-) delete mode 100644 ceph-iscsi/tests/bundles/jammy-caracal-ec.yaml delete mode 100644 ceph-iscsi/tests/bundles/jammy-ec.yaml delete mode 100644 ceph-iscsi/tests/bundles/jammy-reef-ec.yaml delete mode 100644 ceph-iscsi/tests/bundles/jammy-reef.yaml delete mode 100644 ceph-iscsi/tests/bundles/jammy.yaml delete mode 100644 ceph-iscsi/tests/bundles/lunar-ec.yaml delete mode 100644 ceph-iscsi/tests/bundles/lunar.yaml diff --git a/ceph-iscsi/tests/bundles/jammy-caracal-ec.yaml b/ceph-iscsi/tests/bundles/jammy-caracal-ec.yaml deleted file mode 100644 index f887904f..00000000 --- a/ceph-iscsi/tests/bundles/jammy-caracal-ec.yaml +++ /dev/null @@ -1,101 +0,0 @@ -variables: - source: &source cloud:jammy-caracal - -local_overlay_enabled: False -series: jammy -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - constraints: mem=3072M - '9': - constraints: mem=3072M - '10': - constraints: mem=3072M - '11': - '12': - '13': - '14': - '15': -applications: - ubuntu: - charm: cs:ubuntu - num_units: 3 - to: - - '7' - - '14' - - '15' - ceph-iscsi: - charm: ../../ceph-iscsi.charm - num_units: 2 - options: - gateway-metadata-pool: iscsi-foo-metadata - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - source: *source - to: - - '0' - - '1' - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *source - to: - - '0' - - '1' - - '2' - - '11' - - '12' - - '13' - channel: latest/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *source - to: - - '3' - - '4' - - '5' - channel: latest/edge - vault: - num_units: 1 - charm: ch:vault - to: - - '6' - channel: 1.8/stable - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '8' - - '9' - - '10' - channel: 8.0/edge - vault-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - -relations: - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/bundles/jammy-caracal.yaml b/ceph-iscsi/tests/bundles/jammy-caracal.yaml index 01dc7763..d0d96c98 100644 --- a/ceph-iscsi/tests/bundles/jammy-caracal.yaml +++ b/ceph-iscsi/tests/bundles/jammy-caracal.yaml @@ -1,50 +1,35 @@ -options: - source: &source cloud:jammy-caracal - local_overlay_enabled: False series: jammy + +variables: + openstack-origin: &openstack-origin cloud:jammy-caracal + machines: '0': '1': '2': '3': + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine '4': + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine '5': + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine '6': + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine '7': - '8': - constraints: mem=3072M - '9': - constraints: mem=3072M - '10': - constraints: mem=3072M - '11': - '12': - '13': - '14': - '15': - '16': - '17': + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine + applications: - ubuntu: - charm: cs:ubuntu - num_units: 3 - to: - - '7' - - '14' - - '15' ceph-iscsi: charm: ch:ceph-iscsi - channel: latest/edge - num_units: 4 + num_units: 2 + channel: squid/edge options: gateway-metadata-pool: iscsi-foo-metadata - source: *source to: - - '0' - - '1' - - '16' - - '17' + - '6' + - '7' + ceph-osd: charm: ch:ceph-osd num_units: 6 @@ -52,52 +37,25 @@ applications: osd-devices: 'cinder,10G' options: osd-devices: '/dev/test-non-existent' - source: *source to: - - '0' - - '1' - - '2' - - '11' - - '12' - - '13' - channel: latest/edge + - '3' + - '4' + - '5' + channel: squid/edge + ceph-mon: charm: ch:ceph-mon num_units: 3 options: monitor-count: '3' - source: *source - to: - - '3' - - '4' - - '5' - channel: latest/edge - vault: - num_units: 1 - charm: ch:vault - to: - - '6' - channel: 1.8/stable - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 to: - - '8' - - '9' - - '10' - channel: 8.0/edge - vault-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge + - '0' + - '1' + - '2' + channel: quid/edge relations: - - 'ceph-mon:client' - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - 'ceph-osd:mon' - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/bundles/jammy-ec.yaml b/ceph-iscsi/tests/bundles/jammy-ec.yaml deleted file mode 100644 index 3bcc745a..00000000 --- a/ceph-iscsi/tests/bundles/jammy-ec.yaml +++ /dev/null @@ -1,101 +0,0 @@ -variables: - source: &source distro - -local_overlay_enabled: False -series: jammy -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - constraints: mem=3072M - '9': - constraints: mem=3072M - '10': - constraints: mem=3072M - '11': - '12': - '13': - '14': - '15': -applications: - ubuntu: - charm: cs:ubuntu - num_units: 3 - to: - - '7' - - '14' - - '15' - ceph-iscsi: - charm: ../../ceph-iscsi.charm - num_units: 2 - options: - gateway-metadata-pool: iscsi-foo-metadata - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - source: *source - to: - - '0' - - '1' - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *source - to: - - '0' - - '1' - - '2' - - '11' - - '12' - - '13' - channel: quincy/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *source - to: - - '3' - - '4' - - '5' - channel: quincy/edge - vault: - num_units: 1 - charm: ch:vault - to: - - '6' - channel: 1.8/stable - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '8' - - '9' - - '10' - channel: 8.0/edge - vault-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - -relations: - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/bundles/jammy-reef-ec.yaml b/ceph-iscsi/tests/bundles/jammy-reef-ec.yaml deleted file mode 100644 index a611c260..00000000 --- a/ceph-iscsi/tests/bundles/jammy-reef-ec.yaml +++ /dev/null @@ -1,101 +0,0 @@ -variables: - source: &source cloud:jammy-bobcat - -local_overlay_enabled: False -series: jammy -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - constraints: mem=3072M - '9': - constraints: mem=3072M - '10': - constraints: mem=3072M - '11': - '12': - '13': - '14': - '15': -applications: - ubuntu: - charm: cs:ubuntu - num_units: 3 - to: - - '7' - - '14' - - '15' - ceph-iscsi: - charm: ../../ceph-iscsi.charm - num_units: 2 - options: - gateway-metadata-pool: iscsi-foo-metadata - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - source: *source - to: - - '0' - - '1' - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *source - to: - - '0' - - '1' - - '2' - - '11' - - '12' - - '13' - channel: reef/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *source - to: - - '3' - - '4' - - '5' - channel: reef/edge - vault: - num_units: 1 - charm: ch:vault - to: - - '6' - channel: 1.8/stable - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '8' - - '9' - - '10' - channel: 8.0/edge - vault-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - -relations: - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/bundles/jammy-reef.yaml b/ceph-iscsi/tests/bundles/jammy-reef.yaml deleted file mode 100644 index fafd0467..00000000 --- a/ceph-iscsi/tests/bundles/jammy-reef.yaml +++ /dev/null @@ -1,102 +0,0 @@ -options: - source: &source cloud:jammy-bobcat - -local_overlay_enabled: False -series: jammy -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - constraints: mem=3072M - '9': - constraints: mem=3072M - '10': - constraints: mem=3072M - '11': - '12': - '13': - '14': - '15': - '16': - '17': -applications: - ubuntu: - charm: cs:ubuntu - num_units: 3 - to: - - '7' - - '14' - - '15' - ceph-iscsi: - charm: ../../ceph-iscsi.charm - num_units: 4 - options: - gateway-metadata-pool: iscsi-foo-metadata - source: *source - to: - - '0' - - '1' - - '16' - - '17' - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *source - to: - - '0' - - '1' - - '2' - - '11' - - '12' - - '13' - channel: reef/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *source - to: - - '3' - - '4' - - '5' - channel: reef/edge - vault: - num_units: 1 - charm: ch:vault - to: - - '6' - channel: 1.8/stable - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '8' - - '9' - - '10' - channel: 8.0/edge - vault-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - -relations: - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/bundles/jammy.yaml b/ceph-iscsi/tests/bundles/jammy.yaml deleted file mode 100644 index 0a63de9f..00000000 --- a/ceph-iscsi/tests/bundles/jammy.yaml +++ /dev/null @@ -1,102 +0,0 @@ -variables: - source: &source distro - -local_overlay_enabled: False -series: jammy -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - constraints: mem=3072M - '9': - constraints: mem=3072M - '10': - constraints: mem=3072M - '11': - '12': - '13': - '14': - '15': - '16': - '17': -applications: - ubuntu: - charm: cs:ubuntu - num_units: 3 - to: - - '7' - - '14' - - '15' - ceph-iscsi: - charm: ../../ceph-iscsi.charm - num_units: 4 - options: - gateway-metadata-pool: iscsi-foo-metadata - source: *source - to: - - '0' - - '1' - - '16' - - '17' - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *source - to: - - '0' - - '1' - - '2' - - '11' - - '12' - - '13' - channel: quincy/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *source - to: - - '3' - - '4' - - '5' - channel: quincy/edge - vault: - num_units: 1 - charm: ch:vault - to: - - '6' - channel: 1.8/stable - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '8' - - '9' - - '10' - channel: 8.0/edge - vault-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - -relations: - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/bundles/lunar-ec.yaml b/ceph-iscsi/tests/bundles/lunar-ec.yaml deleted file mode 100644 index d4519832..00000000 --- a/ceph-iscsi/tests/bundles/lunar-ec.yaml +++ /dev/null @@ -1,100 +0,0 @@ -variables: - source: &source distro - -local_overlay_enabled: False -series: lunar -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - constraints: mem=3072M - '9': - constraints: mem=3072M - '10': - constraints: mem=3072M - '11': - '12': - '13': - '14': - '15': -applications: - ubuntu: - charm: cs:ubuntu - num_units: 3 - to: - - '7' - - '14' - - '15' - ceph-iscsi: - charm: ../../ceph-iscsi.charm - num_units: 2 - options: - gateway-metadata-pool: iscsi-foo-metadata - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - source: *source - to: - - '0' - - '1' - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *source - to: - - '0' - - '1' - - '2' - - '11' - - '12' - - '13' - channel: latest/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *source - to: - - '3' - - '4' - - '5' - channel: latest/edge - vault: - num_units: 1 - charm: ch:vault - to: - - '6' - channel: latest/edge - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '8' - - '9' - - '10' - channel: latest/edge - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge -relations: - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' diff --git a/ceph-iscsi/tests/bundles/lunar.yaml b/ceph-iscsi/tests/bundles/lunar.yaml deleted file mode 100644 index 041f75fe..00000000 --- a/ceph-iscsi/tests/bundles/lunar.yaml +++ /dev/null @@ -1,101 +0,0 @@ -variables: - source: &source distro - -local_overlay_enabled: False -series: lunar -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - constraints: mem=3072M - '9': - constraints: mem=3072M - '10': - constraints: mem=3072M - '11': - '12': - '13': - '14': - '15': - '16': - '17': -applications: - ubuntu: - charm: cs:ubuntu - num_units: 3 - to: - - '7' - - '14' - - '15' - ceph-iscsi: - charm: ../../ceph-iscsi.charm - num_units: 4 - options: - gateway-metadata-pool: iscsi-foo-metadata - source: *source - to: - - '0' - - '1' - - '16' - - '17' - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 'cinder,10G' - options: - osd-devices: '/dev/test-non-existent' - source: *source - to: - - '0' - - '1' - - '2' - - '11' - - '12' - - '13' - channel: latest/edge - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: '3' - source: *source - to: - - '3' - - '4' - - '5' - channel: latest/edge - vault: - num_units: 1 - charm: ch:vault - to: - - '6' - channel: latest/edge - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '8' - - '9' - - '10' - channel: latest/edge - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge -relations: - - - 'ceph-mon:client' - - 'ceph-iscsi:ceph-client' - - - 'vault:certificates' - - 'ceph-iscsi:certificates' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' From 7a0d763443313da3954f90608f2378ebb60f4028 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 10 Mar 2025 10:07:50 +0100 Subject: [PATCH 2697/2699] ceph-nfs: bundle updates Signed-off-by: Peter Sabaini --- ceph-nfs/tests/bundles/focal-quincy.yaml | 47 --------------------- ceph-nfs/tests/bundles/jammy-caracal.yaml | 1 + ceph-nfs/tests/bundles/jammy-quincy.yaml | 47 --------------------- ceph-nfs/tests/bundles/jammy-reef.yaml | 50 ----------------------- ceph-nfs/tests/bundles/jammy-squid.yaml | 50 ----------------------- 5 files changed, 1 insertion(+), 194 deletions(-) delete mode 100644 ceph-nfs/tests/bundles/focal-quincy.yaml delete mode 100644 ceph-nfs/tests/bundles/jammy-quincy.yaml delete mode 100644 ceph-nfs/tests/bundles/jammy-reef.yaml delete mode 100644 ceph-nfs/tests/bundles/jammy-squid.yaml diff --git a/ceph-nfs/tests/bundles/focal-quincy.yaml b/ceph-nfs/tests/bundles/focal-quincy.yaml deleted file mode 100644 index 07c78965..00000000 --- a/ceph-nfs/tests/bundles/focal-quincy.yaml +++ /dev/null @@ -1,47 +0,0 @@ -local_overlay_enabled: False -series: focal -applications: - ubuntu: - charm: cs:ubuntu - num_units: 2 - ceph-nfs: - charm: ../../ceph-nfs.charm - num_units: 2 - options: - source: proposed - ceph-osd: - charm: ch:ceph-osd - channel: quincy/edge - num_units: 3 - storage: - osd-devices: '2,10G' - options: - source: cloud:focal-yoga - ceph-mon: - charm: ch:ceph-mon - channel: quincy/edge - num_units: 3 - options: - monitor-count: '3' - expected-osd-count: 6 - source: cloud:focal-yoga - ceph-fs: - charm: ch:ceph-fs - channel: quincy/edge - num_units: 2 - options: - source: cloud:focal-yoga - hacluster: - charm: ch:hacluster - channel: 2.0.3/edge - options: - cluster_count: 2 -relations: - - - 'ceph-mon:client' - - 'ceph-nfs:ceph-client' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-fs' - - 'ceph-mon' - - - 'ceph-nfs:ha' - - 'hacluster:ha' diff --git a/ceph-nfs/tests/bundles/jammy-caracal.yaml b/ceph-nfs/tests/bundles/jammy-caracal.yaml index b4185ea1..448bd7cd 100644 --- a/ceph-nfs/tests/bundles/jammy-caracal.yaml +++ b/ceph-nfs/tests/bundles/jammy-caracal.yaml @@ -1,5 +1,6 @@ options: source: &source cloud:jammy-caracal +series: &series jammy machines: '0': diff --git a/ceph-nfs/tests/bundles/jammy-quincy.yaml b/ceph-nfs/tests/bundles/jammy-quincy.yaml deleted file mode 100644 index 669cb915..00000000 --- a/ceph-nfs/tests/bundles/jammy-quincy.yaml +++ /dev/null @@ -1,47 +0,0 @@ -local_overlay_enabled: False -series: jammy -applications: - ubuntu: - charm: cs:ubuntu - num_units: 2 - ceph-nfs: - charm: ../../ceph-nfs.charm - num_units: 2 - options: - source: distro - ceph-osd: - charm: ch:ceph-osd - channel: quincy/edge - num_units: 3 - storage: - osd-devices: '2,10G' - options: - source: distro - ceph-mon: - charm: ch:ceph-mon - channel: quincy/edge - num_units: 3 - options: - monitor-count: '3' - expected-osd-count: 6 - source: distro - ceph-fs: - charm: ch:ceph-fs - channel: quincy/edge - num_units: 2 - options: - source: distro - hacluster: - charm: ch:hacluster - channel: 2.4/edge - options: - cluster_count: 2 -relations: - - - 'ceph-mon:client' - - 'ceph-nfs:ceph-client' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-fs' - - 'ceph-mon' - - - 'ceph-nfs:ha' - - 'hacluster:ha' diff --git a/ceph-nfs/tests/bundles/jammy-reef.yaml b/ceph-nfs/tests/bundles/jammy-reef.yaml deleted file mode 100644 index ff05761b..00000000 --- a/ceph-nfs/tests/bundles/jammy-reef.yaml +++ /dev/null @@ -1,50 +0,0 @@ -options: - source: &source cloud:jammy-bobcat - -local_overlay_enabled: False -series: jammy -applications: - ubuntu: - charm: cs:ubuntu - num_units: 2 - ceph-nfs: - charm: ../../ceph-nfs.charm - num_units: 2 - options: - source: *source - ceph-osd: - charm: ch:ceph-osd - channel: reef/edge - num_units: 3 - storage: - osd-devices: '2,10G' - options: - source: *source - ceph-mon: - charm: ch:ceph-mon - channel: reef/edge - num_units: 3 - options: - monitor-count: '3' - expected-osd-count: 6 - source: *source - ceph-fs: - charm: ch:ceph-fs - channel: reef/edge - num_units: 2 - options: - source: *source - hacluster: - charm: ch:hacluster - channel: 2.4/edge - options: - cluster_count: 2 -relations: - - - 'ceph-mon:client' - - 'ceph-nfs:ceph-client' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-fs' - - 'ceph-mon' - - - 'ceph-nfs:ha' - - 'hacluster:ha' diff --git a/ceph-nfs/tests/bundles/jammy-squid.yaml b/ceph-nfs/tests/bundles/jammy-squid.yaml deleted file mode 100644 index 2270da22..00000000 --- a/ceph-nfs/tests/bundles/jammy-squid.yaml +++ /dev/null @@ -1,50 +0,0 @@ -options: - source: &source cloud:jammy-caracal - -local_overlay_enabled: False -series: jammy -applications: - ubuntu: - charm: cs:ubuntu - num_units: 2 - ceph-nfs: - charm: ../../ceph-nfs.charm - num_units: 2 - options: - source: *source - ceph-osd: - charm: ch:ceph-osd - channel: latest/edge - num_units: 3 - storage: - osd-devices: '2,10G' - options: - source: *source - ceph-mon: - charm: ch:ceph-mon - channel: latest/edge - num_units: 3 - options: - monitor-count: '3' - expected-osd-count: 6 - source: *source - ceph-fs: - charm: ch:ceph-fs - channel: latest/edge - num_units: 2 - options: - source: *source - hacluster: - charm: ch:hacluster - channel: 2.4/edge - options: - cluster_count: 2 -relations: - - - 'ceph-mon:client' - - 'ceph-nfs:ceph-client' - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - 'ceph-fs' - - 'ceph-mon' - - - 'ceph-nfs:ha' - - 'hacluster:ha' From 7fd4539ede7b2cd13e7de2f0194d33f80e647004 Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 10 Mar 2025 10:08:13 +0100 Subject: [PATCH 2698/2699] ceph-radosgw: bundle updates Signed-off-by: Peter Sabaini --- .../tests/bundles/focal-yoga-multisite.yaml | 99 -------------- .../tests/bundles/focal-yoga-namespaced.yaml | 124 ----------------- ceph-radosgw/tests/bundles/focal-yoga.yaml | 123 ----------------- .../bundles/jammy-antelope-multisite.yaml | 99 -------------- .../bundles/jammy-antelope-namespaced.yaml | 125 ------------------ .../tests/bundles/jammy-antelope.yaml | 122 ----------------- .../tests/bundles/jammy-bobcat-multisite.yaml | 101 -------------- .../bundles/jammy-bobcat-namespaced.yaml | 125 ------------------ ceph-radosgw/tests/bundles/jammy-bobcat.yaml | 124 ----------------- .../bundles/jammy-caracal-multisite.yaml | 101 -------------- .../bundles/jammy-caracal-namespaced.yaml | 125 ------------------ ceph-radosgw/tests/bundles/jammy-caracal.yaml | 2 +- .../tests/bundles/local-jammy-antelope.yaml | 121 ----------------- ceph-radosgw/tests/bundles/noble-caracal.yaml | 61 --------- 14 files changed, 1 insertion(+), 1451 deletions(-) delete mode 100644 ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml delete mode 100644 ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/focal-yoga.yaml delete mode 100644 ceph-radosgw/tests/bundles/jammy-antelope-multisite.yaml delete mode 100644 ceph-radosgw/tests/bundles/jammy-antelope-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/jammy-antelope.yaml delete mode 100644 ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml delete mode 100644 ceph-radosgw/tests/bundles/jammy-bobcat-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/jammy-bobcat.yaml delete mode 100644 ceph-radosgw/tests/bundles/jammy-caracal-multisite.yaml delete mode 100644 ceph-radosgw/tests/bundles/jammy-caracal-namespaced.yaml delete mode 100644 ceph-radosgw/tests/bundles/local-jammy-antelope.yaml delete mode 100644 ceph-radosgw/tests/bundles/noble-caracal.yaml diff --git a/ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml b/ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml deleted file mode 100644 index 006ed1d0..00000000 --- a/ceph-radosgw/tests/bundles/focal-yoga-multisite.yaml +++ /dev/null @@ -1,99 +0,0 @@ -options: - source: &source cloud:focal-yoga - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - '9': - -applications: - ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - to: - - '0' - - secondary-ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - to: - - '1' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '2' - - '6' - - '7' - channel: quincy/edge - - secondary-ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '3' - - '8' - - '9' - channel: quincy/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 1 - options: - monitor-count: 1 - source: *source - to: - - '4' - channel: quincy/edge - - secondary-ceph-mon: - charm: ch:ceph-mon - num_units: 1 - options: - monitor-count: 1 - source: *source - to: - - '5' - channel: quincy/edge - -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'secondary-ceph-osd:mon' - - 'secondary-ceph-mon:osd' - - - - 'secondary-ceph-radosgw:mon' - - 'secondary-ceph-mon:radosgw' - diff --git a/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml b/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml deleted file mode 100644 index e7d6ebd2..00000000 --- a/ceph-radosgw/tests/bundles/focal-yoga-namespaced.yaml +++ /dev/null @@ -1,124 +0,0 @@ -options: - source: &source cloud:focal-yoga - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - namespace-tenants: True - to: - - '3' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - channel: quincy/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - channel: quincy/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - channel: yoga/edge - - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - vault: - charm: ch:vault - num_units: 1 - to: - - '11' - channel: latest/edge - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/focal-yoga.yaml b/ceph-radosgw/tests/bundles/focal-yoga.yaml deleted file mode 100644 index eac9de1f..00000000 --- a/ceph-radosgw/tests/bundles/focal-yoga.yaml +++ /dev/null @@ -1,123 +0,0 @@ -options: - source: &source cloud:focal-yoga - -series: focal - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - to: - - '3' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - channel: quincy/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - channel: quincy/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - channel: yoga/edge - - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - vault: - charm: ch:vault - num_units: 1 - to: - - '11' - channel: latest/edge - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/jammy-antelope-multisite.yaml b/ceph-radosgw/tests/bundles/jammy-antelope-multisite.yaml deleted file mode 100644 index 27d2a8b3..00000000 --- a/ceph-radosgw/tests/bundles/jammy-antelope-multisite.yaml +++ /dev/null @@ -1,99 +0,0 @@ -options: - source: &source cloud:jammy-antelope - -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - '9': - -applications: - ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - to: - - '0' - - secondary-ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - to: - - '1' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '2' - - '6' - - '7' - channel: quincy/edge - - secondary-ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '3' - - '8' - - '9' - channel: quincy/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 1 - options: - monitor-count: 1 - source: *source - to: - - '4' - channel: quincy/edge - - secondary-ceph-mon: - charm: ch:ceph-mon - num_units: 1 - options: - monitor-count: 1 - source: *source - to: - - '5' - channel: quincy/edge - -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'secondary-ceph-osd:mon' - - 'secondary-ceph-mon:osd' - - - - 'secondary-ceph-radosgw:mon' - - 'secondary-ceph-mon:radosgw' - diff --git a/ceph-radosgw/tests/bundles/jammy-antelope-namespaced.yaml b/ceph-radosgw/tests/bundles/jammy-antelope-namespaced.yaml deleted file mode 100644 index 3c5e57d8..00000000 --- a/ceph-radosgw/tests/bundles/jammy-antelope-namespaced.yaml +++ /dev/null @@ -1,125 +0,0 @@ -options: - source: &source cloud:jammy-antelope - -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-radosgw: - charm: ch:ceph-radosgw - channel: quincy/edge - num_units: 1 - options: - source: *source - namespace-tenants: True - to: - - '3' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - channel: quincy/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - channel: quincy/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - channel: 2023.1/edge - - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - vault: - charm: ch:vault - num_units: 1 - to: - - '11' - channel: latest/edge - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/jammy-antelope.yaml b/ceph-radosgw/tests/bundles/jammy-antelope.yaml deleted file mode 100644 index 02979ee6..00000000 --- a/ceph-radosgw/tests/bundles/jammy-antelope.yaml +++ /dev/null @@ -1,122 +0,0 @@ -options: - source: &source cloud:jammy-antelope - -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '0' - - '1' - - '2' - channel: 8.0/edge - - ceph-radosgw: - charm: ch:ceph-radosgw - channel: quincy/edge - num_units: 1 - options: - source: *source - to: - - '3' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - channel: quincy/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - channel: quincy/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - channel: 2023.1/edge - - vault-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - - vault: - charm: ch:vault - num_units: 1 - to: - - '11' - channel: 1.8/stable - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml b/ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml deleted file mode 100644 index fee2fb98..00000000 --- a/ceph-radosgw/tests/bundles/jammy-bobcat-multisite.yaml +++ /dev/null @@ -1,101 +0,0 @@ -options: - source: &source cloud:jammy-bobcat - -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - '9': - -applications: - ceph-radosgw: - charm: ch:ceph-radosgw - channel: latest/edge - num_units: 1 - options: - source: *source - to: - - '0' - - secondary-ceph-radosgw: - charm: ch:ceph-radosgw - channel: latest/edge - num_units: 1 - options: - source: *source - to: - - '1' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '2' - - '6' - - '7' - channel: reef/edge - - secondary-ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '3' - - '8' - - '9' - channel: reef/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 1 - options: - monitor-count: 1 - source: *source - to: - - '4' - channel: reef/edge - - secondary-ceph-mon: - charm: ch:ceph-mon - num_units: 1 - options: - monitor-count: 1 - source: *source - to: - - '5' - channel: reef/edge - -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'secondary-ceph-osd:mon' - - 'secondary-ceph-mon:osd' - - - - 'secondary-ceph-radosgw:mon' - - 'secondary-ceph-mon:radosgw' - diff --git a/ceph-radosgw/tests/bundles/jammy-bobcat-namespaced.yaml b/ceph-radosgw/tests/bundles/jammy-bobcat-namespaced.yaml deleted file mode 100644 index 8c385280..00000000 --- a/ceph-radosgw/tests/bundles/jammy-bobcat-namespaced.yaml +++ /dev/null @@ -1,125 +0,0 @@ -options: - source: &source cloud:jammy-bobcat - -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-radosgw: - charm: ch:ceph-radosgw - channel: reef/edge - num_units: 1 - options: - source: *source - namespace-tenants: True - to: - - '3' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - channel: reef/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - channel: reef/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - channel: 2023.2/edge - - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - vault: - charm: ch:vault - num_units: 1 - to: - - '11' - channel: latest/edge - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/jammy-bobcat.yaml b/ceph-radosgw/tests/bundles/jammy-bobcat.yaml deleted file mode 100644 index 0e6c26da..00000000 --- a/ceph-radosgw/tests/bundles/jammy-bobcat.yaml +++ /dev/null @@ -1,124 +0,0 @@ -options: - source: &source cloud:jammy-bobcat - -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-radosgw: - charm: ch:ceph-radosgw - channel: reef/edge - num_units: 1 - options: - source: *source - to: - - '3' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - channel: reef/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - channel: reef/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - channel: 2023.2/edge - - vault-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - vault: - charm: ch:vault - num_units: 1 - to: - - '11' - channel: latest/edge - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/jammy-caracal-multisite.yaml b/ceph-radosgw/tests/bundles/jammy-caracal-multisite.yaml deleted file mode 100644 index 4a5c3cb5..00000000 --- a/ceph-radosgw/tests/bundles/jammy-caracal-multisite.yaml +++ /dev/null @@ -1,101 +0,0 @@ -options: - source: &source cloud:jammy-caracal - -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - '1': - '2': - '3': - '4': - '5': - '6': - '7': - '8': - '9': - -applications: - ceph-radosgw: - charm: ch:ceph-radosgw - channel: latest/edge - num_units: 1 - options: - source: *source - to: - - '0' - - secondary-ceph-radosgw: - charm: ch:ceph-radosgw - channel: latest/edge - num_units: 1 - options: - source: *source - to: - - '1' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '2' - - '6' - - '7' - channel: latest/edge - - secondary-ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '3' - - '8' - - '9' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 1 - options: - monitor-count: 1 - source: *source - to: - - '4' - channel: latest/edge - - secondary-ceph-mon: - charm: ch:ceph-mon - num_units: 1 - options: - monitor-count: 1 - source: *source - to: - - '5' - channel: latest/edge - -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'secondary-ceph-osd:mon' - - 'secondary-ceph-mon:osd' - - - - 'secondary-ceph-radosgw:mon' - - 'secondary-ceph-mon:radosgw' - diff --git a/ceph-radosgw/tests/bundles/jammy-caracal-namespaced.yaml b/ceph-radosgw/tests/bundles/jammy-caracal-namespaced.yaml deleted file mode 100644 index b65b26e1..00000000 --- a/ceph-radosgw/tests/bundles/jammy-caracal-namespaced.yaml +++ /dev/null @@ -1,125 +0,0 @@ -options: - source: &source cloud:jammy-caracal - -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - channel: 8.0/edge - - ceph-radosgw: - charm: ch:ceph-radosgw - channel: latest/edge - num_units: 1 - options: - source: *source - namespace-tenants: True - to: - - '3' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - channel: latest/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - channel: latest/edge - - vault-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - - vault: - charm: ch:vault - num_units: 1 - to: - - '11' - channel: 1.8/edge - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/jammy-caracal.yaml b/ceph-radosgw/tests/bundles/jammy-caracal.yaml index 59d66de1..7d9abdbd 100644 --- a/ceph-radosgw/tests/bundles/jammy-caracal.yaml +++ b/ceph-radosgw/tests/bundles/jammy-caracal.yaml @@ -53,7 +53,7 @@ applications: num_units: 3 constraints: "mem=2048" storage: - osd-devices: 'cinder,10G' + osd-devices: 'loop,10G' options: source: *source osd-devices: '/srv/ceph /dev/test-non-existent' diff --git a/ceph-radosgw/tests/bundles/local-jammy-antelope.yaml b/ceph-radosgw/tests/bundles/local-jammy-antelope.yaml deleted file mode 100644 index a9dbdbdb..00000000 --- a/ceph-radosgw/tests/bundles/local-jammy-antelope.yaml +++ /dev/null @@ -1,121 +0,0 @@ -options: - source: &source cloud:jammy-antelope - -series: jammy - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - to: - - '0' - - '1' - - '2' - channel: 8.0/edge - - ceph-radosgw: - charm: ../../ceph-radosgw.charm - num_units: 1 - options: - source: *source - to: - - '3' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - constraints: "mem=2048" - storage: - osd-devices: 'cinder,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '4' - - '5' - - '6' - channel: quincy/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - source: *source - to: - - '7' - - '8' - - '9' - channel: quincy/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - channel: 2023.1/edge - - vault-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - - vault: - charm: ch:vault - num_units: 1 - to: - - '11' - channel: 1.8/stable - -relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates' diff --git a/ceph-radosgw/tests/bundles/noble-caracal.yaml b/ceph-radosgw/tests/bundles/noble-caracal.yaml deleted file mode 100644 index de1cbf2d..00000000 --- a/ceph-radosgw/tests/bundles/noble-caracal.yaml +++ /dev/null @@ -1,61 +0,0 @@ -options: - source: &source distro - -series: noble - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - '1': - constraints: cores=2 mem=6G root-disk=40G virt-type=virtual-machine - '2': - constraints: cores=2 mem=6G root-disk=40G virt-type=virtual-machine - '3': - constraints: cores=2 mem=6G root-disk=40G virt-type=virtual-machine - '4': - '5': - '6': - -applications: - ceph-radosgw: - charm: ch:ceph-radosgw - channel: latest/edge - num_units: 1 - options: - source: *source - to: - - '0' - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: 'loop,10G' - options: - source: *source - osd-devices: '/srv/ceph /dev/test-non-existent' - to: - - '1' - - '2' - - '3' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - monitor-count: 3 - source: *source - to: - - '4' - - '5' - - '6' - channel: latest/edge - -relations: - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-radosgw:mon' - - 'ceph-mon:radosgw' From f33b1d84fd4baafa7d871a6e431d02836ca6bbbd Mon Sep 17 00:00:00 2001 From: Peter Sabaini Date: Mon, 10 Mar 2025 11:25:13 +0100 Subject: [PATCH 2699/2699] test bundle fix Signed-off-by: Peter Sabaini --- ceph-iscsi/tests/bundles/jammy-caracal.yaml | 6 +- ceph-nfs/tests/bundles/jammy-caracal.yaml | 2 +- ceph-radosgw/tests/bundles/jammy-caracal.yaml | 91 +++---------------- 3 files changed, 18 insertions(+), 81 deletions(-) diff --git a/ceph-iscsi/tests/bundles/jammy-caracal.yaml b/ceph-iscsi/tests/bundles/jammy-caracal.yaml index d0d96c98..2fba5d22 100644 --- a/ceph-iscsi/tests/bundles/jammy-caracal.yaml +++ b/ceph-iscsi/tests/bundles/jammy-caracal.yaml @@ -32,9 +32,9 @@ applications: ceph-osd: charm: ch:ceph-osd - num_units: 6 + num_units: 3 storage: - osd-devices: 'cinder,10G' + osd-devices: 'loop,10G' options: osd-devices: '/dev/test-non-existent' to: @@ -52,7 +52,7 @@ applications: - '0' - '1' - '2' - channel: quid/edge + channel: squid/edge relations: - - 'ceph-mon:client' diff --git a/ceph-nfs/tests/bundles/jammy-caracal.yaml b/ceph-nfs/tests/bundles/jammy-caracal.yaml index 448bd7cd..a0b9692d 100644 --- a/ceph-nfs/tests/bundles/jammy-caracal.yaml +++ b/ceph-nfs/tests/bundles/jammy-caracal.yaml @@ -14,7 +14,7 @@ machines: '5': local_overlay_enabled: False -series: noble + applications: ceph-nfs: charm: ch:ceph-nfs diff --git a/ceph-radosgw/tests/bundles/jammy-caracal.yaml b/ceph-radosgw/tests/bundles/jammy-caracal.yaml index 7d9abdbd..4a0bc34b 100644 --- a/ceph-radosgw/tests/bundles/jammy-caracal.yaml +++ b/ceph-radosgw/tests/bundles/jammy-caracal.yaml @@ -7,118 +7,55 @@ comment: - 'machines section to decide order of deployment. database sooner = faster' machines: '0': - constraints: mem=3072M '1': - constraints: mem=3072M + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine '2': - constraints: mem=3072M + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine '3': + constraints: cores=2 mem=4G root-disk=25G virt-type=virtual-machine '4': '5': '6': - '7': - '8': - '9': - '10': - '11': applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *source - to: - - '0' - - '1' - - '2' - channel: 8.0/edge - ceph-radosgw: charm: ch:ceph-radosgw - channel: latest/edge + channel: squid/edge num_units: 1 options: source: *source to: - - '3' + - '0' ceph-osd: charm: ch:ceph-osd num_units: 3 - constraints: "mem=2048" storage: osd-devices: 'loop,10G' options: source: *source osd-devices: '/srv/ceph /dev/test-non-existent' to: - - '4' - - '5' - - '6' - channel: latest/edge + - '1' + - '2' + - '3' + channel: squid/edge ceph-mon: charm: ch:ceph-mon num_units: 3 options: + monitor-count: 3 source: *source to: - - '7' - - '8' - - '9' - channel: latest/edge - - keystone: - expose: True - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *source - to: - - '10' - channel: latest/edge - - vault-mysql-router: - charm: ch:mysql-router - channel: 8.0/edge - - vault: - charm: ch:vault - num_units: 1 - to: - - '11' - channel: 1.8/edge + - '4' + - '5' + - '6' + channel: squid/edge relations: - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - 'ceph-osd:mon' - 'ceph-mon:osd' - - 'ceph-radosgw:mon' - 'ceph-mon:radosgw' - - - - 'ceph-radosgw:identity-service' - - 'keystone:identity-service' - - - - 'vault-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'vault:shared-db' - - 'vault-mysql-router:shared-db' - - - - 'keystone:certificates' - - 'vault:certificates' - - - - 'ceph-radosgw:certificates' - - 'vault:certificates'